summaryrefslogtreecommitdiff
path: root/src/vm
diff options
context:
space:
mode:
authordotnet-bot <dotnet-bot@microsoft.com>2015-01-30 14:14:42 -0800
committerdotnet-bot <dotnet-bot@microsoft.com>2015-01-30 14:14:42 -0800
commitef1e2ab328087c61a6878c1e84f4fc5d710aebce (patch)
treedee1bbb89e9d722e16b0d1485e3cdd1b6c8e2cfa /src/vm
downloadcoreclr-ef1e2ab328087c61a6878c1e84f4fc5d710aebce.tar.gz
coreclr-ef1e2ab328087c61a6878c1e84f4fc5d710aebce.tar.bz2
coreclr-ef1e2ab328087c61a6878c1e84f4fc5d710aebce.zip
Initial commit to populate CoreCLR repo
[tfs-changeset: 1407945]
Diffstat (limited to 'src/vm')
-rw-r--r--src/vm/.gitmirror1
-rw-r--r--src/vm/CMakeLists.txt391
-rw-r--r--src/vm/ClrEtwAll.man6746
-rw-r--r--src/vm/ClrEtwAllMeta.lst595
-rw-r--r--src/vm/amd64/.gitmirror1
-rw-r--r--src/vm/amd64/AsmHelpers.asm793
-rw-r--r--src/vm/amd64/AsmMacros.inc443
-rw-r--r--src/vm/amd64/CLRErrorReporting.vrg5
-rw-r--r--src/vm/amd64/CallDescrWorkerAMD64.asm133
-rw-r--r--src/vm/amd64/ComCallPreStub.asm159
-rw-r--r--src/vm/amd64/CrtHelpers.asm528
-rw-r--r--src/vm/amd64/ExternalMethodFixupThunk.asm109
-rw-r--r--src/vm/amd64/GenericComCallStubs.asm305
-rw-r--r--src/vm/amd64/GenericComPlusCallStubs.asm149
-rw-r--r--src/vm/amd64/InstantiatingStub.asm154
-rw-r--r--src/vm/amd64/JitHelpers_Fast.asm981
-rw-r--r--src/vm/amd64/JitHelpers_FastWriteBarriers.asm276
-rw-r--r--src/vm/amd64/JitHelpers_InlineGetAppDomain.asm124
-rw-r--r--src/vm/amd64/JitHelpers_InlineGetThread.asm1332
-rw-r--r--src/vm/amd64/JitHelpers_Slow.asm1809
-rw-r--r--src/vm/amd64/PInvokeStubs.asm282
-rw-r--r--src/vm/amd64/RedirectedHandledJITCase.asm240
-rw-r--r--src/vm/amd64/RemotingThunksAMD64.asm304
-rw-r--r--src/vm/amd64/ThePreStubAMD64.asm37
-rw-r--r--src/vm/amd64/TlsGetters.asm121
-rw-r--r--src/vm/amd64/UMThunkStub.asm609
-rw-r--r--src/vm/amd64/VirtualCallStubAMD64.asm110
-rw-r--r--src/vm/amd64/asmconstants.h747
-rw-r--r--src/vm/amd64/calldescrworkeramd64.S132
-rw-r--r--src/vm/amd64/cgenamd64.cpp1152
-rw-r--r--src/vm/amd64/cgencpu.h519
-rw-r--r--src/vm/amd64/excepamd64.cpp599
-rw-r--r--src/vm/amd64/excepcpu.h92
-rw-r--r--src/vm/amd64/getstate.S48
-rw-r--r--src/vm/amd64/getstate.asm86
-rw-r--r--src/vm/amd64/gmsamd64.cpp127
-rw-r--r--src/vm/amd64/gmscpu.h203
-rw-r--r--src/vm/amd64/jithelpers_fast.S246
-rw-r--r--src/vm/amd64/jithelpers_fastwritebarriers.S235
-rw-r--r--src/vm/amd64/jithelpers_slow.S100
-rw-r--r--src/vm/amd64/jithelpersamd64.cpp52
-rw-r--r--src/vm/amd64/jitinterfaceamd64.cpp574
-rw-r--r--src/vm/amd64/profiler.cpp368
-rw-r--r--src/vm/amd64/remotingamd64.cpp673
-rw-r--r--src/vm/amd64/stublinkeramd64.cpp9
-rw-r--r--src/vm/amd64/stublinkeramd64.h11
-rw-r--r--src/vm/amd64/theprestubamd64.S31
-rw-r--r--src/vm/amd64/unixasmhelpers.S175
-rw-r--r--src/vm/amd64/unixasmmacros.inc283
-rw-r--r--src/vm/amd64/unixstubs.cpp158
-rw-r--r--src/vm/amd64/virtualcallstubamd64.S90
-rw-r--r--src/vm/amd64/virtualcallstubcpu.hpp791
-rw-r--r--src/vm/appdomain.cpp14994
-rw-r--r--src/vm/appdomain.hpp5463
-rw-r--r--src/vm/appdomain.inl342
-rw-r--r--src/vm/appdomainconfigfactory.hpp241
-rw-r--r--src/vm/appdomainhelper.cpp547
-rw-r--r--src/vm/appdomainhelper.h372
-rw-r--r--src/vm/appdomainnative.cpp1778
-rw-r--r--src/vm/appdomainnative.hpp154
-rw-r--r--src/vm/appdomainstack.cpp196
-rw-r--r--src/vm/appdomainstack.h232
-rw-r--r--src/vm/appdomainstack.inl444
-rw-r--r--src/vm/appxutil.cpp243
-rw-r--r--src/vm/appxutil.h32
-rw-r--r--src/vm/aptca.cpp1364
-rw-r--r--src/vm/aptca.h111
-rw-r--r--src/vm/argslot.h44
-rw-r--r--src/vm/arm/.gitmirror1
-rw-r--r--src/vm/arm/CrtHelpers.asm163
-rw-r--r--src/vm/arm/PInvokeStubs.asm143
-rw-r--r--src/vm/arm/armsinglestepper.cpp1197
-rw-r--r--src/vm/arm/asmconstants.h305
-rw-r--r--src/vm/arm/asmhelpers.asm2756
-rw-r--r--src/vm/arm/asmmacros.h162
-rw-r--r--src/vm/arm/cgencpu.h1334
-rw-r--r--src/vm/arm/ehhelpers.asm183
-rw-r--r--src/vm/arm/exceparm.cpp113
-rw-r--r--src/vm/arm/excepcpu.h51
-rw-r--r--src/vm/arm/gmscpu.h175
-rw-r--r--src/vm/arm/jithelpersarm.cpp55
-rw-r--r--src/vm/arm/memcpy.asm285
-rw-r--r--src/vm/arm/memcpy_crt.asm1002
-rw-r--r--src/vm/arm/patchedcode.asm603
-rw-r--r--src/vm/arm/profiler.cpp359
-rw-r--r--src/vm/arm/stubs.cpp3903
-rw-r--r--src/vm/arm/virtualcallstubcpu.hpp385
-rw-r--r--src/vm/arm64/.gitmirror1
-rw-r--r--src/vm/arm64/CallDescrWorkerARM64.asm139
-rw-r--r--src/vm/arm64/PInvokeStubs.asm138
-rw-r--r--src/vm/arm64/asmconstants.h151
-rw-r--r--src/vm/arm64/asmhelpers.asm1024
-rw-r--r--src/vm/arm64/asmmacros.h269
-rw-r--r--src/vm/arm64/cgenarm64.cpp39
-rw-r--r--src/vm/arm64/cgencpu.h691
-rw-r--r--src/vm/arm64/crthelpers.asm304
-rw-r--r--src/vm/arm64/excepcpu.h52
-rw-r--r--src/vm/arm64/gmscpu.h96
-rw-r--r--src/vm/arm64/stubs.cpp1744
-rw-r--r--src/vm/arm64/virtualcallstubcpu.hpp473
-rw-r--r--src/vm/armsinglestepper.h153
-rw-r--r--src/vm/array.cpp1440
-rw-r--r--src/vm/array.h114
-rw-r--r--src/vm/assembly.cpp5131
-rw-r--r--src/vm/assembly.hpp1111
-rw-r--r--src/vm/assemblyname.cpp304
-rw-r--r--src/vm/assemblyname.hpp32
-rw-r--r--src/vm/assemblynamelist.h111
-rw-r--r--src/vm/assemblynamesconfigfactory.cpp265
-rw-r--r--src/vm/assemblynamesconfigfactory.h73
-rw-r--r--src/vm/assemblynative.cpp2616
-rw-r--r--src/vm/assemblynative.hpp288
-rw-r--r--src/vm/assemblynativeresource.cpp586
-rw-r--r--src/vm/assemblynativeresource.h135
-rw-r--r--src/vm/assemblysink.cpp154
-rw-r--r--src/vm/assemblysink.h60
-rw-r--r--src/vm/assemblyspec.cpp2484
-rw-r--r--src/vm/assemblyspec.hpp687
-rw-r--r--src/vm/assemblyspecbase.h29
-rw-r--r--src/vm/baseassemblyspec.cpp750
-rw-r--r--src/vm/baseassemblyspec.h306
-rw-r--r--src/vm/baseassemblyspec.inl720
-rw-r--r--src/vm/binder.cpp1336
-rw-r--r--src/vm/binder.h501
-rw-r--r--src/vm/cachelinealloc.cpp296
-rw-r--r--src/vm/cachelinealloc.h147
-rw-r--r--src/vm/callhelpers.cpp684
-rw-r--r--src/vm/callhelpers.h654
-rw-r--r--src/vm/callingconvention.h1508
-rw-r--r--src/vm/ceeload.cpp16163
-rw-r--r--src/vm/ceeload.h3866
-rw-r--r--src/vm/ceeload.inl658
-rw-r--r--src/vm/ceemain.cpp5012
-rw-r--r--src/vm/ceemain.h252
-rw-r--r--src/vm/certificatecache.cpp86
-rw-r--r--src/vm/certificatecache.h41
-rw-r--r--src/vm/cgensys.h181
-rw-r--r--src/vm/class.cpp4746
-rw-r--r--src/vm/class.h2690
-rw-r--r--src/vm/class.inl60
-rw-r--r--src/vm/classcompat.cpp3711
-rw-r--r--src/vm/classcompat.h827
-rw-r--r--src/vm/classfactory.cpp1000
-rw-r--r--src/vm/classhash.cpp1104
-rw-r--r--src/vm/classhash.h149
-rw-r--r--src/vm/classhash.inl68
-rw-r--r--src/vm/classloadlevel.h89
-rw-r--r--src/vm/classnames.h167
-rw-r--r--src/vm/clrex.cpp2873
-rw-r--r--src/vm/clrex.h1313
-rw-r--r--src/vm/clrex.inl52
-rw-r--r--src/vm/clrprivbinderappx.cpp1058
-rw-r--r--src/vm/clrprivbinderappx.h365
-rw-r--r--src/vm/clrprivbinderfusion.cpp820
-rw-r--r--src/vm/clrprivbinderfusion.h229
-rw-r--r--src/vm/clrprivbinderloadfile.cpp265
-rw-r--r--src/vm/clrprivbinderloadfile.h149
-rw-r--r--src/vm/clrprivbinderreflectiononlywinrt.cpp498
-rw-r--r--src/vm/clrprivbinderreflectiononlywinrt.h298
-rw-r--r--src/vm/clrprivbinderutil.cpp836
-rw-r--r--src/vm/clrprivbinderwinrt.cpp1777
-rw-r--r--src/vm/clrprivbinderwinrt.h472
-rw-r--r--src/vm/clrprivtypecachereflectiononlywinrt.cpp261
-rw-r--r--src/vm/clrprivtypecachereflectiononlywinrt.h61
-rw-r--r--src/vm/clrprivtypecachewinrt.cpp247
-rw-r--r--src/vm/clrprivtypecachewinrt.h105
-rw-r--r--src/vm/clrtocomcall.cpp1182
-rw-r--r--src/vm/clrtocomcall.h75
-rw-r--r--src/vm/clrvarargs.cpp115
-rw-r--r--src/vm/clrvarargs.h28
-rw-r--r--src/vm/clsload.cpp6637
-rw-r--r--src/vm/clsload.hpp1081
-rw-r--r--src/vm/clsload.inl157
-rw-r--r--src/vm/codeman.cpp6516
-rw-r--r--src/vm/codeman.h1830
-rw-r--r--src/vm/codeman.inl17
-rw-r--r--src/vm/comcache.cpp1627
-rw-r--r--src/vm/comcache.h308
-rw-r--r--src/vm/comcallablewrapper.cpp6770
-rw-r--r--src/vm/comcallablewrapper.h2690
-rw-r--r--src/vm/comconnectionpoints.cpp1309
-rw-r--r--src/vm/comconnectionpoints.h255
-rw-r--r--src/vm/comdatetime.cpp126
-rw-r--r--src/vm/comdatetime.h50
-rw-r--r--src/vm/comdelegate.cpp4000
-rw-r--r--src/vm/comdelegate.h243
-rw-r--r--src/vm/comdependenthandle.cpp77
-rw-r--r--src/vm/comdependenthandle.h52
-rw-r--r--src/vm/comdynamic.cpp1898
-rw-r--r--src/vm/comdynamic.h202
-rw-r--r--src/vm/cominterfacemarshaler.cpp1335
-rw-r--r--src/vm/cominterfacemarshaler.h112
-rw-r--r--src/vm/comisolatedstorage.cpp1070
-rw-r--r--src/vm/comisolatedstorage.h203
-rw-r--r--src/vm/commemoryfailpoint.cpp44
-rw-r--r--src/vm/commemoryfailpoint.h29
-rw-r--r--src/vm/commethodrental.cpp121
-rw-r--r--src/vm/commethodrental.h30
-rw-r--r--src/vm/commodule.cpp1325
-rw-r--r--src/vm/commodule.h144
-rw-r--r--src/vm/common.cpp8
-rw-r--r--src/vm/common.h528
-rw-r--r--src/vm/commtmemberinfomap.cpp1583
-rw-r--r--src/vm/commtmemberinfomap.h221
-rw-r--r--src/vm/compactlayoutwriter.cpp4116
-rw-r--r--src/vm/compactlayoutwriter.h333
-rw-r--r--src/vm/compatibilityswitch.cpp108
-rw-r--r--src/vm/compatibilityswitch.h27
-rw-r--r--src/vm/compile.cpp8171
-rw-r--r--src/vm/compile.h919
-rw-r--r--src/vm/comreflectioncache.hpp270
-rw-r--r--src/vm/comreflectioncache.inl32
-rw-r--r--src/vm/comsynchronizable.cpp2243
-rw-r--r--src/vm/comsynchronizable.h157
-rw-r--r--src/vm/comthreadpool.cpp1018
-rw-r--r--src/vm/comthreadpool.h84
-rw-r--r--src/vm/comtoclrcall.cpp2075
-rw-r--r--src/vm/comtoclrcall.h483
-rw-r--r--src/vm/comtypelibconverter.cpp792
-rw-r--r--src/vm/comtypelibconverter.h108
-rw-r--r--src/vm/comutilnative.cpp3102
-rw-r--r--src/vm/comutilnative.h306
-rw-r--r--src/vm/comwaithandle.cpp453
-rw-r--r--src/vm/comwaithandle.h29
-rw-r--r--src/vm/confighelper.cpp310
-rw-r--r--src/vm/confighelper.h204
-rw-r--r--src/vm/constrainedexecutionregion.cpp2266
-rw-r--r--src/vm/constrainedexecutionregion.h568
-rw-r--r--src/vm/context.h231
-rw-r--r--src/vm/contexts.cpp940
-rw-r--r--src/vm/contractimpl.cpp716
-rw-r--r--src/vm/contractimpl.h1028
-rw-r--r--src/vm/coreassemblyspec.cpp693
-rw-r--r--src/vm/corebindresult.cpp71
-rw-r--r--src/vm/coreclr/.gitmirror1
-rw-r--r--src/vm/coreclr/corebindresult.h62
-rw-r--r--src/vm/coreclr/corebindresult.inl130
-rw-r--r--src/vm/corhost.cpp8936
-rw-r--r--src/vm/coverage.cpp56
-rw-r--r--src/vm/coverage.h20
-rw-r--r--src/vm/crossdomaincalls.cpp2590
-rw-r--r--src/vm/crossdomaincalls.h273
-rw-r--r--src/vm/crossgen/.gitmirror1
-rw-r--r--src/vm/crossgen/wks_crossgen.nativeproj163
-rw-r--r--src/vm/crossgen_mscorlib/.gitmirror1
-rw-r--r--src/vm/crossgen_mscorlib/mscorlib_crossgen.nativeproj27
-rw-r--r--src/vm/crossgencompile.cpp464
-rw-r--r--src/vm/crossgenroparsetypename.cpp495
-rw-r--r--src/vm/crossgenroresolvenamespace.cpp195
-rw-r--r--src/vm/crossgenroresolvenamespace.h28
-rw-r--r--src/vm/crst.cpp995
-rw-r--r--src/vm/crst.h566
-rw-r--r--src/vm/ctxtcall.h411
-rw-r--r--src/vm/customattribute.cpp1694
-rw-r--r--src/vm/customattribute.h240
-rw-r--r--src/vm/custommarshalerinfo.cpp642
-rw-r--r--src/vm/custommarshalerinfo.h321
-rw-r--r--src/vm/dac/.gitmirror1
-rw-r--r--src/vm/dac/CMakeLists.txt4
-rw-r--r--src/vm/dac/dacwks.targets166
-rw-r--r--src/vm/dac/dirs.proj18
-rw-r--r--src/vm/dangerousapis.h72
-rw-r--r--src/vm/dataimage.cpp2576
-rw-r--r--src/vm/dataimage.h464
-rw-r--r--src/vm/dataimagesection.h105
-rw-r--r--src/vm/dbggcinfodecoder.cpp933
-rw-r--r--src/vm/dbginterface.h420
-rw-r--r--src/vm/debugdebugger.cpp1733
-rw-r--r--src/vm/debugdebugger.h385
-rw-r--r--src/vm/debughelp.cpp1246
-rw-r--r--src/vm/debuginfostore.cpp750
-rw-r--r--src/vm/debuginfostore.h130
-rw-r--r--src/vm/decodemd.cpp518
-rw-r--r--src/vm/decodemd.h80
-rw-r--r--src/vm/delegateinfo.h86
-rw-r--r--src/vm/dirs.proj20
-rw-r--r--src/vm/dispatchinfo.cpp3772
-rw-r--r--src/vm/dispatchinfo.h411
-rw-r--r--src/vm/dispparammarshaler.cpp646
-rw-r--r--src/vm/dispparammarshaler.h228
-rw-r--r--src/vm/dllimport.cpp7559
-rw-r--r--src/vm/dllimport.h785
-rw-r--r--src/vm/dllimportcallback.cpp1522
-rw-r--r--src/vm/dllimportcallback.h595
-rw-r--r--src/vm/domainfile.cpp4484
-rw-r--r--src/vm/domainfile.h947
-rw-r--r--src/vm/domainfile.inl137
-rw-r--r--src/vm/dwbucketmanager.hpp1495
-rw-r--r--src/vm/dwreport.cpp3285
-rw-r--r--src/vm/dwreport.h106
-rw-r--r--src/vm/dynamicmethod.cpp1590
-rw-r--r--src/vm/dynamicmethod.h381
-rw-r--r--src/vm/ecall.cpp792
-rw-r--r--src/vm/ecall.h143
-rw-r--r--src/vm/ecalllist.h2478
-rw-r--r--src/vm/eeconfig.cpp2186
-rw-r--r--src/vm/eeconfig.h1360
-rw-r--r--src/vm/eeconfigfactory.cpp399
-rw-r--r--src/vm/eeconfigfactory.h150
-rw-r--r--src/vm/eecontract.cpp273
-rw-r--r--src/vm/eecontract.h116
-rw-r--r--src/vm/eedbginterface.h380
-rw-r--r--src/vm/eedbginterfaceimpl.cpp1688
-rw-r--r--src/vm/eedbginterfaceimpl.h348
-rw-r--r--src/vm/eedbginterfaceimpl.inl123
-rw-r--r--src/vm/eehash.cpp537
-rw-r--r--src/vm/eehash.h612
-rw-r--r--src/vm/eehash.inl878
-rw-r--r--src/vm/eemessagebox.cpp182
-rw-r--r--src/vm/eemessagebox.h71
-rw-r--r--src/vm/eepolicy.cpp1581
-rw-r--r--src/vm/eepolicy.h192
-rw-r--r--src/vm/eeprofinterfaces.h67
-rw-r--r--src/vm/eeprofinterfaces.inl28
-rw-r--r--src/vm/eetoprofinterfaceimpl.cpp6249
-rw-r--r--src/vm/eetoprofinterfaceimpl.h676
-rw-r--r--src/vm/eetoprofinterfaceimpl.inl253
-rw-r--r--src/vm/eetoprofinterfacewrapper.inl243
-rw-r--r--src/vm/eetwain.cpp5871
-rw-r--r--src/vm/encee.cpp1752
-rw-r--r--src/vm/encee.h449
-rw-r--r--src/vm/eventreporter.cpp768
-rw-r--r--src/vm/eventreporter.h78
-rw-r--r--src/vm/eventstore.cpp220
-rw-r--r--src/vm/eventstore.hpp33
-rw-r--r--src/vm/eventtrace.cpp6827
-rw-r--r--src/vm/eventtracepriv.h410
-rw-r--r--src/vm/excep.cpp14085
-rw-r--r--src/vm/excep.h966
-rw-r--r--src/vm/exceptionhandling.cpp6051
-rw-r--r--src/vm/exceptionhandling.h779
-rw-r--r--src/vm/exceptmacros.h592
-rw-r--r--src/vm/exinfo.cpp307
-rw-r--r--src/vm/exinfo.h184
-rw-r--r--src/vm/exstate.cpp648
-rw-r--r--src/vm/exstate.h374
-rw-r--r--src/vm/exstatecommon.h531
-rw-r--r--src/vm/extensibleclassfactory.cpp131
-rw-r--r--src/vm/extensibleclassfactory.h36
-rw-r--r--src/vm/fcall.cpp413
-rw-r--r--src/vm/fcall.h1371
-rw-r--r--src/vm/field.cpp1025
-rw-r--r--src/vm/field.h1004
-rw-r--r--src/vm/fieldmarshaler.cpp4751
-rw-r--r--src/vm/fieldmarshaler.h1956
-rw-r--r--src/vm/finalizerthread.cpp1448
-rw-r--r--src/vm/finalizerthread.h95
-rw-r--r--src/vm/formattype.cpp10
-rw-r--r--src/vm/fptrstubs.cpp168
-rw-r--r--src/vm/fptrstubs.h84
-rw-r--r--src/vm/frames.cpp2154
-rw-r--r--src/vm/frames.h3851
-rw-r--r--src/vm/frameworkexceptionloader.cpp104
-rw-r--r--src/vm/frameworkexceptionloader.h27
-rw-r--r--src/vm/fusionbind.cpp662
-rw-r--r--src/vm/fusioninit.cpp625
-rw-r--r--src/vm/fusionsink.cpp216
-rw-r--r--src/vm/gc.h6
-rw-r--r--src/vm/gccover.cpp1683
-rw-r--r--src/vm/gccover.h112
-rw-r--r--src/vm/gcdecode.cpp16
-rw-r--r--src/vm/gcdesc.h6
-rw-r--r--src/vm/gcenv.cpp563
-rw-r--r--src/vm/gcenv.h108
-rw-r--r--src/vm/gchelpers.cpp1334
-rw-r--r--src/vm/gchelpers.h124
-rw-r--r--src/vm/gchost.cpp277
-rw-r--r--src/vm/gcinfodecoder.cpp1853
-rw-r--r--src/vm/gcscan.h6
-rw-r--r--src/vm/gcstress.h555
-rw-r--r--src/vm/genericdict.cpp970
-rw-r--r--src/vm/genericdict.h302
-rw-r--r--src/vm/generics.cpp1146
-rw-r--r--src/vm/generics.h181
-rw-r--r--src/vm/generics.inl107
-rw-r--r--src/vm/genmeth.cpp1790
-rw-r--r--src/vm/gms.h7
-rw-r--r--src/vm/h2inc.pl65
-rw-r--r--src/vm/h2inc.ps165
-rw-r--r--src/vm/handletable.h6
-rw-r--r--src/vm/handletable.inl6
-rw-r--r--src/vm/hash.cpp1235
-rw-r--r--src/vm/hash.h786
-rw-r--r--src/vm/hillclimbing.cpp440
-rw-r--r--src/vm/hillclimbing.h98
-rw-r--r--src/vm/hostexecutioncontext.cpp231
-rw-r--r--src/vm/hostexecutioncontext.h30
-rw-r--r--src/vm/hosting.cpp1906
-rw-r--r--src/vm/hosting.h65
-rw-r--r--src/vm/i386/.gitmirror1
-rw-r--r--src/vm/i386/CLRErrorReporting.vrg5
-rw-r--r--src/vm/i386/RedirectedHandledJITCase.asm137
-rw-r--r--src/vm/i386/asmconstants.h491
-rw-r--r--src/vm/i386/asmhelpers.asm2446
-rw-r--r--src/vm/i386/cgencpu.h577
-rw-r--r--src/vm/i386/cgenx86.cpp2146
-rw-r--r--src/vm/i386/excepcpu.h88
-rw-r--r--src/vm/i386/excepx86.cpp3734
-rw-r--r--src/vm/i386/fptext.asm278
-rw-r--r--src/vm/i386/gmsasm.asm38
-rw-r--r--src/vm/i386/gmscpu.h140
-rw-r--r--src/vm/i386/gmsx86.cpp1238
-rw-r--r--src/vm/i386/jithelp.asm2575
-rw-r--r--src/vm/i386/jitinterfacex86.cpp1918
-rw-r--r--src/vm/i386/profiler.cpp339
-rw-r--r--src/vm/i386/remotingx86.cpp226
-rw-r--r--src/vm/i386/stublinkerx86.cpp6741
-rw-r--r--src/vm/i386/stublinkerx86.h802
-rw-r--r--src/vm/i386/virtualcallstubcpu.hpp1078
-rw-r--r--src/vm/ibclogger.cpp1198
-rw-r--r--src/vm/ibclogger.h622
-rw-r--r--src/vm/ildump.h223
-rw-r--r--src/vm/ilmarshalers.cpp6140
-rw-r--r--src/vm/ilmarshalers.h3367
-rw-r--r--src/vm/ilstubcache.cpp971
-rw-r--r--src/vm/ilstubcache.h244
-rw-r--r--src/vm/ilstubresolver.cpp521
-rw-r--r--src/vm/ilstubresolver.h125
-rw-r--r--src/vm/inlinetracking.cpp430
-rw-r--r--src/vm/inlinetracking.h233
-rw-r--r--src/vm/instmethhash.cpp441
-rw-r--r--src/vm/instmethhash.h177
-rw-r--r--src/vm/interopconverter.cpp985
-rw-r--r--src/vm/interopconverter.h184
-rw-r--r--src/vm/interoputil.cpp7225
-rw-r--r--src/vm/interoputil.h531
-rw-r--r--src/vm/interoputil.inl80
-rw-r--r--src/vm/interpreter.cpp12272
-rw-r--r--src/vm/interpreter.h2053
-rw-r--r--src/vm/interpreter.hpp482
-rw-r--r--src/vm/invalidoverlappedwrappers.h71
-rw-r--r--src/vm/invokeutil.cpp2128
-rw-r--r--src/vm/invokeutil.h335
-rw-r--r--src/vm/iterator_util.h334
-rw-r--r--src/vm/jithelpers.cpp6764
-rw-r--r--src/vm/jitinterface.cpp14178
-rw-r--r--src/vm/jitinterface.h1654
-rw-r--r--src/vm/jitinterfacegen.cpp291
-rw-r--r--src/vm/jupiterobject.h92
-rw-r--r--src/vm/listlock.cpp97
-rw-r--r--src/vm/listlock.h358
-rw-r--r--src/vm/listlock.inl52
-rw-r--r--src/vm/loaderallocator.cpp1668
-rw-r--r--src/vm/loaderallocator.hpp520
-rw-r--r--src/vm/loaderallocator.inl184
-rw-r--r--src/vm/managedmdimport.cpp723
-rw-r--r--src/vm/managedmdimport.hpp123
-rw-r--r--src/vm/marshalnative.cpp2708
-rw-r--r--src/vm/marshalnative.h242
-rw-r--r--src/vm/marvin32.cpp267
-rw-r--r--src/vm/mda.cpp4018
-rw-r--r--src/vm/mda.h1515
-rw-r--r--src/vm/mda.inl15
-rw-r--r--src/vm/mdaBoilerplate.exe.mda.config1134
-rw-r--r--src/vm/mdaassistants.cpp2351
-rw-r--r--src/vm/mdaassistants.h933
-rw-r--r--src/vm/mdaassistantschemas.inl640
-rw-r--r--src/vm/mdadac.cpp49
-rw-r--r--src/vm/mdagroups.inl74
-rw-r--r--src/vm/mdamacroscrubber.inl296
-rw-r--r--src/vm/mdaschema.inl576
-rw-r--r--src/vm/memberload.cpp1567
-rw-r--r--src/vm/memberload.h266
-rw-r--r--src/vm/message.cpp1172
-rw-r--r--src/vm/message.h201
-rw-r--r--src/vm/metasig.h722
-rw-r--r--src/vm/method.cpp5849
-rw-r--r--src/vm/method.hpp3775
-rw-r--r--src/vm/method.inl222
-rw-r--r--src/vm/methodimpl.cpp286
-rw-r--r--src/vm/methodimpl.h132
-rw-r--r--src/vm/methoditer.cpp371
-rw-r--r--src/vm/methoditer.h128
-rw-r--r--src/vm/methodtable.cpp9103
-rw-r--r--src/vm/methodtable.h4298
-rw-r--r--src/vm/methodtable.inl1911
-rw-r--r--src/vm/methodtablebuilder.cpp13345
-rw-r--r--src/vm/methodtablebuilder.h3052
-rw-r--r--src/vm/methodtablebuilder.inl524
-rw-r--r--src/vm/microsoft.comservices.h278
-rw-r--r--src/vm/microsoft.comservices_i.c176
-rw-r--r--src/vm/mixedmode.cpp237
-rw-r--r--src/vm/mixedmode.hpp140
-rw-r--r--src/vm/mlinfo.cpp5384
-rw-r--r--src/vm/mlinfo.h997
-rw-r--r--src/vm/mngstdinterfaces.cpp1031
-rw-r--r--src/vm/mngstdinterfaces.h399
-rw-r--r--src/vm/mngstditflist.h141
-rw-r--r--src/vm/mscorlib.cpp487
-rw-r--r--src/vm/mscorlib.h2201
-rw-r--r--src/vm/mscoruefwrapper.h20
-rw-r--r--src/vm/mtypes.h121
-rw-r--r--src/vm/multicorejit.cpp1669
-rw-r--r--src/vm/multicorejit.h278
-rw-r--r--src/vm/multicorejitimpl.h498
-rw-r--r--src/vm/multicorejitplayer.cpp1493
-rw-r--r--src/vm/namespace.h84
-rw-r--r--src/vm/nativeformatreader.h213
-rw-r--r--src/vm/nativeoverlapped.cpp535
-rw-r--r--src/vm/nativeoverlapped.h157
-rw-r--r--src/vm/newcompressedstack.cpp1075
-rw-r--r--src/vm/newcompressedstack.h198
-rw-r--r--src/vm/ngenhash.h493
-rw-r--r--src/vm/ngenhash.inl1523
-rw-r--r--src/vm/ngenoptout.cpp38
-rw-r--r--src/vm/ngenoptout.h35
-rw-r--r--src/vm/notifyexternals.cpp282
-rw-r--r--src/vm/notifyexternals.h27
-rw-r--r--src/vm/nsenumhandleallcases.h44
-rw-r--r--src/vm/nsenums.h77
-rw-r--r--src/vm/object.cpp3510
-rw-r--r--src/vm/object.h4680
-rw-r--r--src/vm/object.inl307
-rw-r--r--src/vm/objectclone.cpp3866
-rw-r--r--src/vm/objectclone.h1269
-rw-r--r--src/vm/objecthandle.h6
-rw-r--r--src/vm/objectlist.cpp210
-rw-r--r--src/vm/objectlist.h101
-rw-r--r--src/vm/olecontexthelpers.cpp172
-rw-r--r--src/vm/olecontexthelpers.h30
-rw-r--r--src/vm/oletls.h211
-rw-r--r--src/vm/olevariant.cpp5272
-rw-r--r--src/vm/olevariant.h606
-rw-r--r--src/vm/packedfields.inl346
-rw-r--r--src/vm/pefile.cpp5310
-rw-r--r--src/vm/pefile.h1239
-rw-r--r--src/vm/pefile.inl2137
-rw-r--r--src/vm/pefingerprint.cpp624
-rw-r--r--src/vm/pefingerprint.h127
-rw-r--r--src/vm/peimage.cpp2154
-rw-r--r--src/vm/peimage.h504
-rw-r--r--src/vm/peimage.inl952
-rw-r--r--src/vm/peimagelayout.cpp855
-rw-r--r--src/vm/peimagelayout.h200
-rw-r--r--src/vm/peimagelayout.inl121
-rw-r--r--src/vm/pendingload.cpp255
-rw-r--r--src/vm/pendingload.h259
-rw-r--r--src/vm/perfdefaults.cpp148
-rw-r--r--src/vm/perfdefaults.h91
-rw-r--r--src/vm/precode.cpp795
-rw-r--r--src/vm/precode.h370
-rw-r--r--src/vm/prestub.cpp2657
-rw-r--r--src/vm/profattach.cpp1337
-rw-r--r--src/vm/profattach.h426
-rw-r--r--src/vm/profattach.inl567
-rw-r--r--src/vm/profattachclient.cpp949
-rw-r--r--src/vm/profattachclient.h79
-rw-r--r--src/vm/profattachserver.cpp1297
-rw-r--r--src/vm/profattachserver.h110
-rw-r--r--src/vm/profattachserver.inl130
-rw-r--r--src/vm/profdetach.cpp714
-rw-r--r--src/vm/profdetach.h79
-rw-r--r--src/vm/profilermetadataemitvalidator.cpp1788
-rw-r--r--src/vm/profilermetadataemitvalidator.h1027
-rw-r--r--src/vm/profilingenumerators.cpp693
-rw-r--r--src/vm/profilingenumerators.h531
-rw-r--r--src/vm/profilinghelper.cpp1479
-rw-r--r--src/vm/profilinghelper.h137
-rw-r--r--src/vm/profilinghelper.inl277
-rw-r--r--src/vm/proftoeeinterfaceimpl.cpp9888
-rw-r--r--src/vm/proftoeeinterfaceimpl.h617
-rw-r--r--src/vm/proftoeeinterfaceimpl.inl196
-rw-r--r--src/vm/qcall.cpp108
-rw-r--r--src/vm/qcall.h348
-rw-r--r--src/vm/rcwrefcache.cpp291
-rw-r--r--src/vm/rcwrefcache.h104
-rw-r--r--src/vm/rcwwalker.cpp983
-rw-r--r--src/vm/rcwwalker.h151
-rw-r--r--src/vm/readytoruninfo.cpp317
-rw-r--r--src/vm/readytoruninfo.h131
-rw-r--r--src/vm/reflectclasswriter.cpp248
-rw-r--r--src/vm/reflectclasswriter.h104
-rw-r--r--src/vm/reflectioninvocation.cpp3902
-rw-r--r--src/vm/reflectioninvocation.h125
-rw-r--r--src/vm/registration.h26
-rw-r--r--src/vm/rejit.cpp3989
-rw-r--r--src/vm/rejit.h570
-rw-r--r--src/vm/rejit.inl346
-rw-r--r--src/vm/remoting.cpp3774
-rw-r--r--src/vm/remoting.h958
-rw-r--r--src/vm/rexcep.h356
-rw-r--r--src/vm/rtlfunctions.cpp117
-rw-r--r--src/vm/rtlfunctions.h80
-rw-r--r--src/vm/runtimecallablewrapper.cpp5616
-rw-r--r--src/vm/runtimecallablewrapper.h2234
-rw-r--r--src/vm/runtimeexceptionkind.h32
-rw-r--r--src/vm/runtimehandles.cpp3618
-rw-r--r--src/vm/runtimehandles.h677
-rw-r--r--src/vm/rwlock.cpp2947
-rw-r--r--src/vm/rwlock.h286
-rw-r--r--src/vm/safehandle.cpp507
-rw-r--r--src/vm/security.cpp78
-rw-r--r--src/vm/security.h381
-rw-r--r--src/vm/security.inl804
-rw-r--r--src/vm/securityattributes.cpp2765
-rw-r--r--src/vm/securityattributes.h147
-rw-r--r--src/vm/securityattributes.inl45
-rw-r--r--src/vm/securityconfig.cpp2182
-rw-r--r--src/vm/securityconfig.h123
-rw-r--r--src/vm/securitydeclarative.cpp1793
-rw-r--r--src/vm/securitydeclarative.h199
-rw-r--r--src/vm/securitydeclarative.inl135
-rw-r--r--src/vm/securitydeclarativecache.cpp358
-rw-r--r--src/vm/securitydeclarativecache.h139
-rw-r--r--src/vm/securitydescriptor.cpp479
-rw-r--r--src/vm/securitydescriptor.h197
-rw-r--r--src/vm/securitydescriptor.inl108
-rw-r--r--src/vm/securitydescriptorappdomain.cpp787
-rw-r--r--src/vm/securitydescriptorappdomain.h187
-rw-r--r--src/vm/securitydescriptorappdomain.inl128
-rw-r--r--src/vm/securitydescriptorassembly.cpp1094
-rw-r--r--src/vm/securitydescriptorassembly.h204
-rw-r--r--src/vm/securitydescriptorassembly.inl117
-rw-r--r--src/vm/securityhostprotection.cpp103
-rw-r--r--src/vm/securityhostprotection.h15
-rw-r--r--src/vm/securityimperative.cpp120
-rw-r--r--src/vm/securityimperative.h37
-rw-r--r--src/vm/securitymeta.cpp2356
-rw-r--r--src/vm/securitymeta.h674
-rw-r--r--src/vm/securitymeta.inl1280
-rw-r--r--src/vm/securitypolicy.cpp1267
-rw-r--r--src/vm/securitypolicy.h350
-rw-r--r--src/vm/securityprincipal.cpp238
-rw-r--r--src/vm/securityprincipal.h41
-rw-r--r--src/vm/securitystackwalk.cpp2440
-rw-r--r--src/vm/securitystackwalk.h295
-rw-r--r--src/vm/securitytransparentassembly.cpp1849
-rw-r--r--src/vm/securitytransparentassembly.h250
-rw-r--r--src/vm/securitytransparentassembly.inl260
-rw-r--r--src/vm/sha1.cpp409
-rw-r--r--src/vm/sha1.h55
-rw-r--r--src/vm/sigformat.cpp648
-rw-r--r--src/vm/sigformat.h44
-rw-r--r--src/vm/siginfo.cpp5603
-rw-r--r--src/vm/siginfo.hpp1194
-rw-r--r--src/vm/simplerwlock.cpp308
-rw-r--r--src/vm/simplerwlock.hpp266
-rw-r--r--src/vm/sourceline.cpp335
-rw-r--r--src/vm/sourceline.h45
-rw-r--r--src/vm/specialstatics.h41
-rw-r--r--src/vm/spinlock.cpp513
-rw-r--r--src/vm/spinlock.h307
-rw-r--r--src/vm/stackbuildersink.cpp700
-rw-r--r--src/vm/stackbuildersink.h50
-rw-r--r--src/vm/stackcompressor.cpp379
-rw-r--r--src/vm/stackcompressor.h40
-rw-r--r--src/vm/stackingallocator.cpp376
-rw-r--r--src/vm/stackingallocator.h316
-rw-r--r--src/vm/stackprobe.cpp1793
-rw-r--r--src/vm/stackprobe.h1008
-rw-r--r--src/vm/stackprobe.inl136
-rw-r--r--src/vm/stacksampler.cpp468
-rw-r--r--src/vm/stacksampler.h82
-rw-r--r--src/vm/stackwalk.cpp3393
-rw-r--r--src/vm/stackwalk.h697
-rw-r--r--src/vm/stackwalktypes.h244
-rw-r--r--src/vm/staticallocationhelpers.inl240
-rw-r--r--src/vm/stdinterfaces.cpp3717
-rw-r--r--src/vm/stdinterfaces.h562
-rw-r--r--src/vm/stdinterfaces_internal.h377
-rw-r--r--src/vm/stdinterfaces_wrapper.cpp3270
-rw-r--r--src/vm/stringliteralmap.cpp677
-rw-r--r--src/vm/stringliteralmap.h295
-rw-r--r--src/vm/stubcache.cpp303
-rw-r--r--src/vm/stubcache.h140
-rw-r--r--src/vm/stubgen.cpp2908
-rw-r--r--src/vm/stubgen.h739
-rw-r--r--src/vm/stubhelpers.cpp2149
-rw-r--r--src/vm/stubhelpers.h167
-rw-r--r--src/vm/stublink.cpp2598
-rw-r--r--src/vm/stublink.h1231
-rw-r--r--src/vm/stublink.inl117
-rw-r--r--src/vm/stubmgr.cpp2752
-rw-r--r--src/vm/stubmgr.h995
-rw-r--r--src/vm/syncblk.cpp3664
-rw-r--r--src/vm/syncblk.h1395
-rw-r--r--src/vm/syncblk.inl292
-rw-r--r--src/vm/syncclean.cpp104
-rw-r--r--src/vm/syncclean.hpp31
-rw-r--r--src/vm/synch.cpp1059
-rw-r--r--src/vm/synch.h209
-rw-r--r--src/vm/synchronizationcontextnative.cpp161
-rw-r--r--src/vm/synchronizationcontextnative.h33
-rw-r--r--src/vm/testhookmgr.cpp779
-rw-r--r--src/vm/testhookmgr.h102
-rw-r--r--src/vm/threaddebugblockinginfo.cpp91
-rw-r--r--src/vm/threaddebugblockinginfo.h81
-rw-r--r--src/vm/threadpoolrequest.cpp788
-rw-r--r--src/vm/threadpoolrequest.h359
-rw-r--r--src/vm/threads.cpp13602
-rw-r--r--src/vm/threads.h7792
-rw-r--r--src/vm/threads.inl297
-rw-r--r--src/vm/threadstatics.cpp710
-rw-r--r--src/vm/threadstatics.h689
-rw-r--r--src/vm/threadsuspend.cpp8507
-rw-r--r--src/vm/threadsuspend.h265
-rw-r--r--src/vm/tlbexport.cpp6342
-rw-r--r--src/vm/tlbexport.h486
-rw-r--r--src/vm/typectxt.cpp336
-rw-r--r--src/vm/typectxt.h192
-rw-r--r--src/vm/typedesc.cpp2459
-rw-r--r--src/vm/typedesc.h708
-rw-r--r--src/vm/typedesc.inl71
-rw-r--r--src/vm/typeequivalencehash.cpp101
-rw-r--r--src/vm/typeequivalencehash.hpp117
-rw-r--r--src/vm/typehandle.cpp2086
-rw-r--r--src/vm/typehandle.h838
-rw-r--r--src/vm/typehandle.inl286
-rw-r--r--src/vm/typehash.cpp874
-rw-r--r--src/vm/typehash.h161
-rw-r--r--src/vm/typekey.h308
-rw-r--r--src/vm/typeparse.cpp1974
-rw-r--r--src/vm/typeparse.h476
-rw-r--r--src/vm/typestring.cpp1674
-rw-r--r--src/vm/typestring.h268
-rw-r--r--src/vm/umthunkhash.cpp172
-rw-r--r--src/vm/umthunkhash.h88
-rw-r--r--src/vm/util.cpp4000
-rw-r--r--src/vm/util.hpp1372
-rw-r--r--src/vm/validator.cpp947
-rw-r--r--src/vm/vars.cpp363
-rw-r--r--src/vm/vars.hpp923
-rw-r--r--src/vm/verifier.cpp470
-rw-r--r--src/vm/verifier.hpp112
-rw-r--r--src/vm/veropcodes.hpp31
-rw-r--r--src/vm/vertable.h381
-rw-r--r--src/vm/virtualcallstub.cpp4198
-rw-r--r--src/vm/virtualcallstub.h1618
-rw-r--r--src/vm/vm.settings71
-rw-r--r--src/vm/vm.targets59
-rw-r--r--src/vm/vmholder.h27
-rw-r--r--src/vm/weakreferencenative.cpp982
-rw-r--r--src/vm/weakreferencenative.h43
-rw-r--r--src/vm/win32threadpool.cpp5597
-rw-r--r--src/vm/win32threadpool.h1392
-rw-r--r--src/vm/winrthelpers.cpp165
-rw-r--r--src/vm/winrthelpers.h31
-rw-r--r--src/vm/winrtredirector.h153
-rw-r--r--src/vm/winrtredirector.inl71
-rw-r--r--src/vm/winrttypenameconverter.cpp940
-rw-r--r--src/vm/winrttypenameconverter.h127
-rw-r--r--src/vm/wks/.gitmirror1
-rw-r--r--src/vm/wks/CMakeLists.txt44
-rw-r--r--src/vm/wks/wks.nativeproj22
-rw-r--r--src/vm/wks/wks.targets386
-rw-r--r--src/vm/wrappers.h351
-rw-r--r--src/vm/zapsig.cpp1431
-rw-r--r--src/vm/zapsig.h227
748 files changed, 799338 insertions, 0 deletions
diff --git a/src/vm/.gitmirror b/src/vm/.gitmirror
new file mode 100644
index 0000000000..f507630f94
--- /dev/null
+++ b/src/vm/.gitmirror
@@ -0,0 +1 @@
+Only contents of this folder, excluding subfolders, will be mirrored by the Git-TFS Mirror. \ No newline at end of file
diff --git a/src/vm/CMakeLists.txt b/src/vm/CMakeLists.txt
new file mode 100644
index 0000000000..489b5c7f0b
--- /dev/null
+++ b/src/vm/CMakeLists.txt
@@ -0,0 +1,391 @@
+set(CMAKE_INCLUDE_CURRENT_DIR ON)
+
+# WINTODO: Conditionalize the next check
+# AMD64 specific sources subdirectory
+set(AMD64_SOURCES_DIR amd64)
+
+# Needed due to the cmunged files being in the binary folders, the set(CMAKE_INCLUDE_CURRENT_DIR ON) is not enough
+include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR})
+
+include_directories(${CLR_DIR}/src/gc)
+include_directories(${AMD64_SOURCES_DIR})
+
+add_definitions(-D_TARGET_AMD64_=1)
+add_definitions(-DDBG_TARGET_64BIT=1)
+add_definitions(-DDBG_TARGET_AMD64=1)
+add_definitions(-DDBG_TARGET_WIN64=1)
+
+add_definitions(-DFEATURE_LEAVE_RUNTIME_HOLDER=1)
+
+add_definitions(-DUNICODE)
+add_definitions(-D_UNICODE)
+
+# Add the Merge flag here is needed
+add_definitions(-DFEATURE_MERGE_JIT_AND_ENGINE)
+
+if(CLR_CMAKE_PLATFORM_UNIX)
+ add_compile_options(-fPIC)
+ add_definitions(-DFEATURE_COREFX_GLOBALIZATION)
+endif(CLR_CMAKE_PLATFORM_UNIX)
+
+set(VM_SOURCES_DAC_AND_WKS_COMMON
+ appdomain.cpp
+ array.cpp
+ assembly.cpp
+ baseassemblyspec.cpp
+ binder.cpp
+ ceeload.cpp
+ certificatecache.cpp
+ class.cpp
+ classhash.cpp
+ clsload.cpp
+ codeman.cpp
+ comdelegate.cpp
+ contexts.cpp
+ contractimpl.cpp
+ coreassemblyspec.cpp
+ corebindresult.cpp
+ corhost.cpp
+ crst.cpp
+ debugdebugger.cpp
+ debughelp.cpp
+ debuginfostore.cpp
+ decodemd.cpp
+ dllimport.cpp
+ domainfile.cpp
+ dynamicmethod.cpp
+ ecall.cpp
+ eedbginterfaceimpl.cpp
+ eehash.cpp
+ eetwain.cpp
+ encee.cpp
+ excep.cpp
+ exstate.cpp
+ field.cpp
+ formattype.cpp
+ fptrstubs.cpp
+ frames.cpp
+ ../gc/gccommon.cpp
+ gcdecode.cpp
+ ../gc/gceesvr.cpp
+ ../gc/gceewks.cpp
+ ../gc/gcscan.cpp
+ ../gc/gcsvr.cpp
+ ../gc/gcwks.cpp
+ genericdict.cpp
+ generics.cpp
+ ../gc/handletable.cpp
+ ../gc/handletablecore.cpp
+ ../gc/handletablescan.cpp
+ hash.cpp
+ hillclimbing.cpp
+ ilstubcache.cpp
+ ilstubresolver.cpp
+ inlinetracking.cpp
+ instmethhash.cpp
+ jitinterface.cpp
+ loaderallocator.cpp
+ memberload.cpp
+ method.cpp
+ methodimpl.cpp
+ methoditer.cpp
+ methodtable.cpp
+ object.cpp
+ ../gc/objecthandle.cpp
+ pefile.cpp
+ peimage.cpp
+ peimagelayout.cpp
+ precode.cpp
+ prestub.cpp
+ rejit.cpp
+ securitydescriptor.cpp
+ securitydescriptorassembly.cpp
+ sigformat.cpp
+ siginfo.cpp
+ stackwalk.cpp
+ stublink.cpp
+ stubmgr.cpp
+ syncblk.cpp
+ threadpoolrequest.cpp
+ threads.cpp
+ threadstatics.cpp
+ typectxt.cpp
+ typedesc.cpp
+ typehandle.cpp
+ typehash.cpp
+ typestring.cpp
+ util.cpp
+ vars.cpp
+ virtualcallstub.cpp
+ win32threadpool.cpp
+ zapsig.cpp
+)
+
+set(VM_SOURCES_DAC
+ ${VM_SOURCES_DAC_AND_WKS_COMMON}
+ threaddebugblockinginfo.cpp
+)
+
+set(VM_SOURCES_WKS
+ ${VM_SOURCES_DAC_AND_WKS_COMMON}
+ appdomainhelper.cpp
+ appdomainnative.cpp
+ appdomainstack.cpp
+ assemblyname.cpp
+ assemblynative.cpp
+ assemblysink.cpp
+ assemblyspec.cpp
+ cachelinealloc.cpp
+ callhelpers.cpp
+ ceemain.cpp
+ clrex.cpp
+ clrprivbinderutil.cpp
+ clrvarargs.cpp
+ comdatetime.cpp
+ comdependenthandle.cpp
+ comdynamic.cpp
+ comisolatedstorage.cpp
+ commemoryfailpoint.cpp
+ commethodrental.cpp
+ commodule.cpp
+ compatibilityswitch.cpp
+ comsynchronizable.cpp
+ comthreadpool.cpp
+ comutilnative.cpp
+ comwaithandle.cpp
+ constrainedexecutionregion.cpp
+ coverage.cpp
+ crossdomaincalls.cpp
+ customattribute.cpp
+ custommarshalerinfo.cpp
+ dbggcinfodecoder.cpp
+ dllimportcallback.cpp
+ eeconfig.cpp
+ eecontract.cpp
+ eemessagebox.cpp
+ eepolicy.cpp
+ eetoprofinterfaceimpl.cpp
+ eventstore.cpp
+ exceptionhandling.cpp
+ fcall.cpp
+ fieldmarshaler.cpp
+ finalizerthread.cpp
+ frameworkexceptionloader.cpp
+ gccover.cpp
+ gcenv.cpp
+ gchelpers.cpp
+ gchost.cpp
+ gcinfodecoder.cpp
+ genmeth.cpp
+ ../gc/handletablecache.cpp
+ hostexecutioncontext.cpp
+ hosting.cpp
+ ibclogger.cpp
+ ilmarshalers.cpp
+ interopconverter.cpp
+ interoputil.cpp
+ interpreter.cpp
+ invokeutil.cpp
+ jithelpers.cpp
+ listlock.cpp
+ managedmdimport.cpp
+ marshalnative.cpp
+ mdaassistants.cpp
+ message.cpp
+ methodtablebuilder.cpp
+ mlinfo.cpp
+ mscorlib.cpp # <DisablePrecompiledHeaders>true</DisablePrecompiledHeaders>
+ multicorejit.cpp # Condition="'$(FeatureMulticoreJIT)' == 'true'
+ multicorejitplayer.cpp # Condition="'$(FeatureMulticoreJIT)' == 'true'
+ nativeoverlapped.cpp
+ objectlist.cpp
+ olevariant.cpp
+ pefingerprint.cpp
+ pendingload.cpp
+ perfdefaults.cpp
+ profattach.cpp
+ profattachclient.cpp
+ profattachserver.cpp
+ profdetach.cpp
+ profilingenumerators.cpp
+ profilinghelper.cpp
+ proftoeeinterfaceimpl.cpp
+ qcall.cpp
+ reflectclasswriter.cpp
+ reflectioninvocation.cpp
+ runtimehandles.cpp
+ rwlock.cpp
+ safehandle.cpp
+ security.cpp
+ securityattributes.cpp
+ securityconfig.cpp
+ securitydeclarative.cpp
+ securitydeclarativecache.cpp
+ securitydescriptorappdomain.cpp
+ securityhostprotection.cpp
+ securitymeta.cpp
+ securitypolicy.cpp
+ securitytransparentassembly.cpp
+ sha1.cpp
+ simplerwlock.cpp
+ sourceline.cpp
+ spinlock.cpp
+ stackingallocator.cpp
+ stringliteralmap.cpp
+ stubcache.cpp
+ stubgen.cpp
+ stubhelpers.cpp
+ syncclean.cpp
+ synch.cpp
+ synchronizationcontextnative.cpp
+ testhookmgr.cpp
+ threaddebugblockinginfo.cpp
+ threadsuspend.cpp
+ typeequivalencehash.cpp
+ typeparse.cpp
+ verifier.cpp
+ weakreferencenative.cpp
+)
+
+if(WIN32)
+
+set(VM_SOURCES_DAC_AND_WKS_WIN32
+ clrtocomcall.cpp
+ comtoclrcall.cpp
+ rcwwalker.cpp
+ umthunkhash.cpp #" Condition="'$(FeatureMixedMode)' == 'true'
+ winrttypenameconverter.cpp
+)
+
+set(VM_SOURCES_WKS
+ ${VM_SOURCES_WKS}
+ ${VM_SOURCES_DAC_AND_WKS_WIN32}
+ marvin32.cpp # move out of win32 when FEATURE_RANDOMIZED_STRING_HASHING is enabled for linux
+ # These should not be included for Linux
+ appxutil.cpp
+ assemblynativeresource.cpp
+ classcompat.cpp
+ classfactory.cpp
+ clrprivbinderwinrt.cpp
+ clrprivtypecachewinrt.cpp
+ comcache.cpp
+ comcallablewrapper.cpp
+ comconnectionpoints.cpp
+ cominterfacemarshaler.cpp
+ commtmemberinfomap.cpp
+ dispatchinfo.cpp
+ dispparammarshaler.cpp
+ dwreport.cpp
+ eventreporter.cpp
+ eventtrace.cpp
+ extensibleclassfactory.cpp
+ microsoft.comservices_i.c
+ mngstdinterfaces.cpp
+ notifyexternals.cpp
+ olecontexthelpers.cpp
+ profilermetadataemitvalidator.cpp
+ rcwrefcache.cpp
+ rtlfunctions.cpp
+ runtimecallablewrapper.cpp
+ securityprincipal.cpp
+ stacksampler.cpp
+ stdinterfaces.cpp
+ stdinterfaces_wrapper.cpp
+ winrthelpers.cpp
+)
+
+set(VM_SOURCES_DAC
+ ${VM_SOURCES_DAC}
+ ${VM_SOURCES_DAC_AND_WKS_WIN32}
+ # These should not be included for Linux
+ clrprivbinderwinrt.cpp
+ clrprivtypecachewinrt.cpp
+)
+
+# AMD64 specific asm sources
+set(VM_SOURCES_WKS_AMD64_ASM
+ ${AMD64_SOURCES_DIR}/AsmHelpers.asm
+ ${AMD64_SOURCES_DIR}/CallDescrWorkerAMD64.asm
+ ${AMD64_SOURCES_DIR}/ComCallPreStub.asm
+ ${AMD64_SOURCES_DIR}/CrtHelpers.asm
+ ${AMD64_SOURCES_DIR}/GenericComCallStubs.asm
+ ${AMD64_SOURCES_DIR}/GenericComPlusCallStubs.asm
+ ${AMD64_SOURCES_DIR}/getstate.asm
+ ${AMD64_SOURCES_DIR}/InstantiatingStub.asm
+ ${AMD64_SOURCES_DIR}/JitHelpers_Fast.asm
+ ${AMD64_SOURCES_DIR}/JitHelpers_FastWriteBarriers.asm
+ ${AMD64_SOURCES_DIR}/JitHelpers_InlineGetAppDomain.asm
+ ${AMD64_SOURCES_DIR}/JitHelpers_InlineGetThread.asm
+ ${AMD64_SOURCES_DIR}/JitHelpers_Slow.asm
+ ${AMD64_SOURCES_DIR}/PInvokeStubs.asm
+ ${AMD64_SOURCES_DIR}/RedirectedHandledJITCase.asm
+ ${AMD64_SOURCES_DIR}/ThePreStubAMD64.asm
+ ${AMD64_SOURCES_DIR}/ExternalMethodFixupThunk.asm
+ ${AMD64_SOURCES_DIR}/TlsGetters.asm # Condition="'$(FeatureImplicitTls)' != 'true'
+ ${AMD64_SOURCES_DIR}/UMThunkStub.asm
+ ${AMD64_SOURCES_DIR}/VirtualCallStubAMD64.asm
+)
+
+else(WIN32)
+set(VM_SOURCES_WKS_AMD64_ASM
+ ${AMD64_SOURCES_DIR}/jithelpers_fastwritebarriers.S
+ ${AMD64_SOURCES_DIR}/jithelpers_slow.S
+ ${AMD64_SOURCES_DIR}/jithelpers_fast.S
+ ${AMD64_SOURCES_DIR}/getstate.S
+ ${AMD64_SOURCES_DIR}/calldescrworkeramd64.S
+ ${AMD64_SOURCES_DIR}/unixasmhelpers.S
+ ${AMD64_SOURCES_DIR}/theprestubamd64.S
+ ${AMD64_SOURCES_DIR}/virtualcallstubamd64.S
+)
+endif(WIN32)
+
+# AMD64 specific cpp sources
+
+set(VM_SOURCES_DAC_AND_WKS_AMD64
+ ${AMD64_SOURCES_DIR}/cgenamd64.cpp
+ ${AMD64_SOURCES_DIR}/excepamd64.cpp
+ ${AMD64_SOURCES_DIR}/gmsamd64.cpp
+ ${AMD64_SOURCES_DIR}/stublinkeramd64.cpp
+)
+
+set(VM_SOURCES_WKS_AMD64
+#The following files need to be ported to Linux
+ ${AMD64_SOURCES_DIR}/jithelpersamd64.cpp
+ ${AMD64_SOURCES_DIR}/jitinterfaceamd64.cpp
+ ${AMD64_SOURCES_DIR}/profiler.cpp
+ jitinterfacegen.cpp
+)
+
+if(CLR_CMAKE_PLATFORM_UNIX)
+
+set(VM_SOURCES_WKS_AMD64
+ ${VM_SOURCES_WKS_AMD64}
+ ${AMD64_SOURCES_DIR}/unixstubs.cpp
+)
+
+endif(CLR_CMAKE_PLATFORM_UNIX)
+
+set(VM_SOURCES_DAC_AMD64
+ gcinfodecoder.cpp
+ dbggcinfodecoder.cpp
+ exceptionhandling.cpp
+)
+
+set(VM_SOURCES_WKS
+ ${VM_SOURCES_WKS}
+ ${VM_SOURCES_WKS_AMD64}
+ ${VM_SOURCES_DAC_AND_WKS_AMD64}
+)
+
+set(VM_SOURCES_DAC
+ ${VM_SOURCES_DAC}
+ ${VM_SOURCES_DAC_AMD64}
+ ${VM_SOURCES_DAC_AND_WKS_AMD64}
+)
+
+convert_to_absolute_path(VM_SOURCES_WKS ${VM_SOURCES_WKS})
+convert_to_absolute_path(VM_SOURCES_WKS_AMD64_ASM ${VM_SOURCES_WKS_AMD64_ASM})
+convert_to_absolute_path(VM_SOURCES_DAC ${VM_SOURCES_DAC})
+
+add_subdirectory(dac)
+add_subdirectory(wks)
diff --git a/src/vm/ClrEtwAll.man b/src/vm/ClrEtwAll.man
new file mode 100644
index 0000000000..944f8985d6
--- /dev/null
+++ b/src/vm/ClrEtwAll.man
@@ -0,0 +1,6746 @@
+<instrumentationManifest xmlns="http://schemas.microsoft.com/win/2004/08/events">
+ <instrumentation xmlns:xs="http://www.w3.org/2001/XMLSchema"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns:win="http://manifests.microsoft.com/win/2004/08/windows/events">
+
+ <events xmlns="http://schemas.microsoft.com/win/2004/08/events">
+ <!--CLR Runtime Publisher-->
+ <provider name="Microsoft-Windows-DotNETRuntime"
+ guid="{e13c0d23-ccbc-4e12-931b-d9cc2eee27e4}"
+ symbol="MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER"
+ resourceFileName="%INSTALL_PATH%\clretwrc.dll"
+ messageFileName="%INSTALL_PATH%\clretwrc.dll">
+
+ <!--Keywords-->
+ <keywords>
+ <keyword name="GCKeyword" mask="0x1"
+ message="$(string.RuntimePublisher.GCKeywordMessage)" symbol="CLR_GC_KEYWORD"/>
+ <keyword name="GCHandleKeyword" mask="0x2"
+ message="$(string.RuntimePublisher.GCHandleKeywordMessage)" symbol="CLR_GCHANDLE_KEYWORD"/>
+ <keyword name="FusionKeyword" mask="0x4"
+ message="$(string.RuntimePublisher.FusionKeywordMessage)" symbol="CLR_FUSION_KEYWORD"/>
+ <keyword name="LoaderKeyword" mask="0x8"
+ message="$(string.RuntimePublisher.LoaderKeywordMessage)" symbol="CLR_LOADER_KEYWORD"/>
+ <keyword name="JitKeyword" mask="0x10"
+ message="$(string.RuntimePublisher.JitKeywordMessage)" symbol="CLR_JIT_KEYWORD"/>
+ <keyword name="NGenKeyword" mask="0x20"
+ message="$(string.RuntimePublisher.NGenKeywordMessage)" symbol="CLR_NGEN_KEYWORD"/>
+ <keyword name="StartEnumerationKeyword" mask="0x40"
+ message="$(string.RuntimePublisher.StartEnumerationKeywordMessage)" symbol="CLR_STARTENUMERATION_KEYWORD"/>
+ <keyword name="EndEnumerationKeyword" mask="0x80"
+ message="$(string.RuntimePublisher.EndEnumerationKeywordMessage)" symbol="CLR_ENDENUMERATION_KEYWORD"/>
+ <!-- Keyword mask 0x100 is now defunct -->
+ <!-- Keyword mask 0x200 is now defunct -->
+ <keyword name="SecurityKeyword" mask="0x400"
+ message="$(string.RuntimePublisher.SecurityKeywordMessage)" symbol="CLR_SECURITY_KEYWORD"/>
+ <keyword name="AppDomainResourceManagementKeyword" mask="0x800"
+ message="$(string.RuntimePublisher.AppDomainResourceManagementKeywordMessage)" symbol="CLR_APPDOMAINRESOURCEMANAGEMENT_KEYWORD"/>
+ <keyword name="JitTracingKeyword" mask="0x1000"
+ message="$(string.RuntimePublisher.JitTracingKeywordMessage)" symbol="CLR_JITTRACING_KEYWORD"/>
+ <keyword name="InteropKeyword" mask="0x2000"
+ message="$(string.RuntimePublisher.InteropKeywordMessage)" symbol="CLR_INTEROP_KEYWORD"/>
+ <keyword name="ContentionKeyword" mask="0x4000"
+ message="$(string.RuntimePublisher.ContentionKeywordMessage)" symbol="CLR_CONTENTION_KEYWORD"/>
+ <keyword name="ExceptionKeyword" mask="0x8000"
+ message="$(string.RuntimePublisher.ExceptionKeywordMessage)" symbol="CLR_EXCEPTION_KEYWORD"/>
+ <keyword name="ThreadingKeyword" mask="0x10000"
+ message="$(string.RuntimePublisher.ThreadingKeywordMessage)" symbol="CLR_THREADING_KEYWORD"/>
+ <keyword name="JittedMethodILToNativeMapKeyword" mask="0x20000"
+ message="$(string.RuntimePublisher.JittedMethodILToNativeMapKeywordMessage)" symbol="CLR_JITTEDMETHODILTONATIVEMAP_KEYWORD"/>
+ <keyword name="OverrideAndSuppressNGenEventsKeyword" mask="0x40000"
+ message="$(string.RuntimePublisher.OverrideAndSuppressNGenEventsKeywordMessage)" symbol="CLR_OVERRIDEANDSUPPRESSNGENEVENTS_KEYWORD"/>
+ <keyword name="TypeKeyword" mask="0x80000"
+ message="$(string.RuntimePublisher.TypeKeywordMessage)" symbol="CLR_TYPE_KEYWORD"/>
+ <keyword name="GCHeapDumpKeyword" mask="0x100000"
+ message="$(string.RuntimePublisher.GCHeapDumpKeywordMessage)" symbol="CLR_GCHEAPDUMP_KEYWORD"/>
+ <keyword name="GCSampledObjectAllocationHighKeyword" mask="0x200000"
+ message="$(string.RuntimePublisher.GCSampledObjectAllocationHighKeywordMessage)" symbol="CLR_GCHEAPALLOCHIGH_KEYWORD"/>
+ <keyword name="GCHeapSurvivalAndMovementKeyword" mask="0x400000"
+ message="$(string.RuntimePublisher.GCHeapSurvivalAndMovementKeywordMessage)" symbol="CLR_GCHEAPSURVIVALANDMOVEMENT_KEYWORD"/>
+ <keyword name="GCHeapCollectKeyword" mask="0x800000"
+ message="$(string.RuntimePublisher.GCHeapCollectKeyword)" symbol="CLR_GCHEAPCOLLECT_KEYWORD"/>
+ <keyword name="GCHeapAndTypeNamesKeyword" mask="0x1000000"
+ message="$(string.RuntimePublisher.GCHeapAndTypeNamesKeyword)" symbol="CLR_GCHEAPANDTYPENAMES_KEYWORD"/>
+ <keyword name="GCSampledObjectAllocationLowKeyword" mask="0x2000000"
+ message="$(string.RuntimePublisher.GCSampledObjectAllocationLowKeywordMessage)" symbol="CLR_GCHEAPALLOCLOW_KEYWORD"/>
+ <keyword name="PerfTrackKeyword" mask="0x20000000"
+ message="$(string.RuntimePublisher.PerfTrackKeywordMessage)" symbol="CLR_PERFTRACK_KEYWORD"/>
+ <keyword name="StackKeyword" mask="0x40000000"
+ message="$(string.RuntimePublisher.StackKeywordMessage)" symbol="CLR_STACK_KEYWORD"/>
+ <keyword name="ThreadTransferKeyword" mask="0x80000000"
+ message="$(string.RuntimePublisher.ThreadTransferKeywordMessage)" symbol="CLR_THREADTRANSFER_KEYWORD"/>
+ <keyword name="DebuggerKeyword" mask="0x100000000"
+ message="$(string.RuntimePublisher.DebuggerKeywordMessage)" symbol="CLR_DEBUGGER_KEYWORD" />
+ </keywords>
+ <!--Tasks-->
+ <tasks>
+ <task name="GarbageCollection" symbol="CLR_GC_TASK"
+ value="1" eventGUID="{044973cd-251f-4dff-a3e9-9d6307286b05}"
+ message="$(string.RuntimePublisher.GarbageCollectionTaskMessage)">
+ <opcodes>
+
+ <!-- These opcode use to be 4 through 9 but we added 128 to them to avoid using the reserved range 0-10 -->
+ <opcode name="GCRestartEEEnd" message="$(string.RuntimePublisher.GCRestartEEEndOpcodeMessage)" symbol="CLR_GC_RESTARTEEEND_OPCODE" value="132"> </opcode>
+ <opcode name="GCHeapStats" message="$(string.RuntimePublisher.GCHeapStatsOpcodeMessage)" symbol="CLR_GC_HEAPSTATS_OPCODE" value="133"> </opcode>
+ <opcode name="GCCreateSegment" message="$(string.RuntimePublisher.GCCreateSegmentOpcodeMessage)" symbol="CLR_GC_CREATESEGMENT_OPCODE" value="134"> </opcode>
+ <opcode name="GCFreeSegment" message="$(string.RuntimePublisher.GCFreeSegmentOpcodeMessage)" symbol="CLR_GC_FREESEGMENT_OPCODE" value="135"> </opcode>
+ <opcode name="GCRestartEEBegin" message="$(string.RuntimePublisher.GCRestartEEBeginOpcodeMessage)" symbol="CLR_GC_RESTARTEEBEING_OPCODE" value="136"> </opcode>
+ <opcode name="GCSuspendEEEnd" message="$(string.RuntimePublisher.GCSuspendEEEndOpcodeMessage)" symbol="CLR_GC_SUSPENDEEND_OPCODE" value="137"> </opcode>
+ <opcode name="GCSuspendEEBegin" message="$(string.RuntimePublisher.GCSuspendEEBeginOpcodeMessage)" symbol="CLR_GC_SUSPENDEEBEGIN_OPCODE" value="10"> </opcode>
+ <opcode name="GCAllocationTick" message="$(string.RuntimePublisher.GCAllocationTickOpcodeMessage)" symbol="CLR_GC_ALLOCATIONTICK_OPCODE" value="11"> </opcode>
+ <opcode name="GCCreateConcurrentThread" message="$(string.RuntimePublisher.GCCreateConcurrentThreadOpcodeMessage)" symbol="CLR_GC_CREATECONCURRENTTHREAD_OPCODE" value="12"> </opcode>
+ <opcode name="GCTerminateConcurrentThread" message="$(string.RuntimePublisher.GCTerminateConcurrentThreadOpcodeMessage)" symbol="CLR_GC_TERMINATECONCURRENTTHREAD_OPCODE" value="13"> </opcode>
+ <opcode name="GCFinalizersEnd" message="$(string.RuntimePublisher.GCFinalizersEndOpcodeMessage)" symbol="CLR_GC_FINALIZERSEND_OPCODE" value="15"> </opcode>
+ <opcode name="GCFinalizersBegin" message="$(string.RuntimePublisher.GCFinalizersBeginOpcodeMessage)" symbol="CLR_GC_FINALIZERSBEGIN_OPCODE" value="19"> </opcode>
+ <opcode name="GCBulkRootEdge" message="$(string.RuntimePublisher.GCBulkRootEdgeOpcodeMessage)" symbol="CLR_GC_BULKROOTEDGE_OPCODE" value="20"> </opcode>
+ <opcode name="GCBulkRootConditionalWeakTableElementEdge" message="$(string.RuntimePublisher.GCBulkRootConditionalWeakTableElementEdgeOpcodeMessage)" symbol="CLR_GC_BULKROOTCONDITIONALWEAKTABLEELEMENTEDGE_OPCODE" value="21"> </opcode>
+ <opcode name="GCBulkNode" message="$(string.RuntimePublisher.GCBulkNodeOpcodeMessage)" symbol="CLR_GC_BULKNODE_OPCODE" value="22"> </opcode>
+ <opcode name="GCBulkEdge" message="$(string.RuntimePublisher.GCBulkEdgeOpcodeMessage)" symbol="CLR_GC_BULKEDGE_OPCODE" value="23"> </opcode>
+ <opcode name="GCSampledObjectAllocation" message="$(string.RuntimePublisher.GCSampledObjectAllocationOpcodeMessage)" symbol="CLR_GC_OBJECTALLOCATION_OPCODE" value="24"> </opcode>
+ <opcode name="GCBulkSurvivingObjectRanges" message="$(string.RuntimePublisher.GCBulkSurvivingObjectRangesOpcodeMessage)" symbol="CLR_GC_BULKSURVIVINGOBJECTRANGES_OPCODE" value="25"> </opcode>
+ <opcode name="GCBulkMovedObjectRanges" message="$(string.RuntimePublisher.GCBulkMovedObjectRangesOpcodeMessage)" symbol="CLR_GC_BULKMOVEDOBJECTRANGES_OPCODE" value="26"> </opcode>
+ <opcode name="GCGenerationRange" message="$(string.RuntimePublisher.GCGenerationRangeOpcodeMessage)" symbol="CLR_GC_GENERATIONRANGE_OPCODE" value="27"> </opcode>
+ <opcode name="GCMarkStackRoots" message="$(string.RuntimePublisher.GCMarkStackRootsOpcodeMessage)" symbol="CLR_GC_MARKSTACKROOTS_OPCODE" value="28"> </opcode>
+ <opcode name="GCMarkFinalizeQueueRoots" message="$(string.RuntimePublisher.GCMarkFinalizeQueueRootsOpcodeMessage)" symbol="CLR_GC_MARKFINALIZEQUEUEROOTS_OPCODE" value="29"> </opcode>
+ <opcode name="GCMarkHandles" message="$(string.RuntimePublisher.GCMarkHandlesOpcodeMessage)" symbol="CLR_GC_MARKHANDLES_OPCODE" value="30"> </opcode>
+ <opcode name="GCMarkOlderGenerationRoots" message="$(string.RuntimePublisher.GCMarkOlderGenerationRootsOpcodeMessage)" symbol="CLR_GC_MARKCARDS_OPCODE" value="31"> </opcode>
+ <opcode name="FinalizeObject" message="$(string.RuntimePublisher.FinalizeObjectOpcodeMessage)" symbol="CLR_GC_FINALIZEOBJECT_OPCODE" value="32"> </opcode>
+ <opcode name="SetGCHandle" message="$(string.RuntimePublisher.SetGCHandleOpcodeMessage)" symbol="CLR_GC_SETGCHANDLE_OPCODE" value="33"> </opcode>
+ <opcode name="DestroyGCHandle" message="$(string.RuntimePublisher.DestroyGCHandleOpcodeMessage)" symbol="CLR_GC_DESTROYGCHANDLE_OPCODE" value="34"> </opcode>
+ <opcode name="Triggered" message="$(string.RuntimePublisher.TriggeredOpcodeMessage)" symbol="CLR_GC_TRIGGERED_OPCODE" value="35"> </opcode>
+ <opcode name="PinObjectAtGCTime" message="$(string.RuntimePublisher.PinObjectAtGCTimeOpcodeMessage)" symbol="CLR_GC_PINGCOBJECT_OPCODE" value="36"> </opcode>
+ <opcode name="GCBulkRootCCW" message="$(string.RuntimePublisher.GCBulkRootCCWOpcodeMessage)" symbol="CLR_GC_BULKROOTCCW_OPCODE" value="38"> </opcode>
+ <opcode name="GCBulkRCW" message="$(string.RuntimePublisher.GCBulkRCWOpcodeMessage)" symbol="CLR_GC_BULKRCW_OPCODE" value="39"> </opcode>
+ <opcode name="GCBulkRootStaticVar" message="$(string.RuntimePublisher.GCBulkRootStaticVarOpcodeMessage)" symbol="CLR_GC_BULKROOTSTATICVAR_OPCODE" value="40"> </opcode>
+ <opcode name="IncreaseMemoryPressure" message="$(string.RuntimePublisher.IncreaseMemoryPressureOpcodeMessage)" symbol="CLR_GC_INCREASEMEMORYPRESSURE_OPCODE" value="200"> </opcode>
+ <opcode name="DecreaseMemoryPressure" message="$(string.RuntimePublisher.DecreaseMemoryPressureOpcodeMessage)" symbol="CLR_GC_DECREASEMEMORYPRESSURE_OPCODE" value="201"> </opcode>
+ </opcodes>
+ </task>
+
+ <task name="WorkerThreadCreation" symbol="CLR_WORKERTHREADCREATE_TASK"
+ value="2" eventGUID="{cfc4ba53-fb42-4757-8b70-5f5d51fee2f4}"
+ message="$(string.RuntimePublisher.WorkerThreadCreationTaskMessage)">
+ <opcodes>
+ </opcodes>
+ </task>
+
+ <task name="IOThreadCreation" symbol="CLR_IOTHREADCREATE_TASK"
+ value="3" eventGUID="{c71408de-42cc-4f81-9c93-b8912abf2a0f}"
+ message="$(string.RuntimePublisher.IOThreadCreationTaskMessage)">
+ <opcodes>
+ </opcodes>
+ </task>
+
+ <task name="WorkerThreadRetirement" symbol="CLR_WORKERTHREADRETIRE_TASK"
+ value="4" eventGUID="{efdf1eac-1d5d-4e84-893a-19b80f692176}"
+ message="$(string.RuntimePublisher.WorkerThreadRetirementTaskMessage)">
+ <opcodes>
+ </opcodes>
+ </task>
+
+ <task name="IOThreadRetirement" symbol="CLR_IOTHREADRETIRE_TASK"
+ value="5" eventGUID="{840c8456-6457-4eb7-9cd0-d28f01c64f5e}"
+ message="$(string.RuntimePublisher.IOThreadRetirementTaskMessage)">
+ <opcodes>
+ </opcodes>
+ </task>
+
+ <task name="ThreadpoolSuspension" symbol="CLR_THREADPOOLSUSPEND_TASK"
+ value="6" eventGUID="{c424b3e3-2ae0-416e-a039-410c5d8e5f14}"
+ message="$(string.RuntimePublisher.ThreadpoolSuspensionTaskMessage)">
+ <opcodes>
+ </opcodes>
+ </task>
+
+ <task name="Exception" symbol="CLR_EXCEPTION_TASK"
+ value="7" eventGUID="{300ce105-86d1-41f8-b9d2-83fcbff32d99}"
+ message="$(string.RuntimePublisher.ExceptionTaskMessage)">
+ <opcodes>
+ </opcodes>
+ </task>
+
+ <task name="Contention" symbol="CLR_CONTENTION_TASK"
+ value="8" eventGUID="{561410f5-a138-4ab3-945e-516483cddfbc}"
+ message="$(string.RuntimePublisher.ContentionTaskMessage)">
+ <opcodes>
+ </opcodes>
+ </task>
+
+ <task name="CLRMethod" symbol="CLR_METHOD_TASK"
+ value="9" eventGUID="{3044F61A-99B0-4c21-B203-D39423C73B00}"
+ message="$(string.RuntimePublisher.MethodTaskMessage)">
+ <opcodes>
+ <!-- The following 2 opcodes are now defunct -->
+ <opcode name="DCStartComplete" message="$(string.RuntimePublisher.DCStartCompleteOpcodeMessage)" symbol="CLR_METHOD_DCSTARTCOMPLETE_OPCODE" value="14"> </opcode>
+ <opcode name="DCEndComplete" message="$(string.RuntimePublisher.DCEndCompleteOpcodeMessage)" symbol="CLR_METHOD_DCENDCOMPLETE_OPCODE" value="15"> </opcode>
+ <opcode name="MethodLoad" message="$(string.RuntimePublisher.MethodLoadOpcodeMessage)" symbol="CLR_METHOD_METHODLOAD_OPCODE" value="33"> </opcode>
+ <opcode name="MethodUnload" message="$(string.RuntimePublisher.MethodUnloadOpcodeMessage)" symbol="CLR_METHOD_METHODUNLOAD_OPCODE" value="34"> </opcode>
+ <!-- The following 2 opcodes are now defunct -->
+ <opcode name="MethodDCStart" message="$(string.RuntimePublisher.MethodDCStartOpcodeMessage)" symbol="CLR_METHOD_METHODDCSTART_OPCODE" value="35"> </opcode>
+ <opcode name="MethodDCEnd" message="$(string.RuntimePublisher.MethodDCEndOpcodeMessage)" symbol="CLR_METHOD_METHODDCEND_OPCODE" value="36"> </opcode>
+ <opcode name="MethodLoadVerbose" message="$(string.RuntimePublisher.MethodLoadVerboseOpcodeMessage)" symbol="CLR_METHOD_METHODLOADVERBOSE_OPCODE" value="37"> </opcode>
+ <opcode name="MethodUnloadVerbose" message="$(string.RuntimePublisher.MethodUnloadVerboseOpcodeMessage)" symbol="CLR_METHOD_METHODUNLOADVERBOSE_OPCODE" value="38"> </opcode>
+ <!-- The following 2 opcodes are now defunct -->
+ <opcode name="MethodDCStartVerbose" message="$(string.RuntimePublisher.MethodDCStartVerboseOpcodeMessage)" symbol="CLR_METHOD_METHODDCSTARTVERBOSE_OPCODE" value="39"> </opcode>
+ <opcode name="MethodDCEndVerbose" message="$(string.RuntimePublisher.MethodDCEndVerboseOpcodeMessage)" symbol="CLR_METHOD_METHODDCENDVERBOSE_OPCODE" value="40"> </opcode>
+ <opcode name="MethodJittingStarted" message="$(string.RuntimePublisher.MethodJittingStartedOpcodeMessage)" symbol="CLR_METHOD_METHODJITTINGSTARTED_OPCODE" value="42"> </opcode>
+ <opcode name="JitInliningSucceeded" message="$(string.RuntimePublisher.JitInliningSucceededOpcodeMessage)" symbol="CLR_JITINLININGSUCCEEDED_OPCODE" value="83"> </opcode>
+ <opcode name="JitInliningFailed" message="$(string.RuntimePublisher.JitInliningFailedOpcodeMessage)" symbol="CLR_JITINLININGFAILED_OPCODE" value="84"> </opcode>
+ <opcode name="JitTailCallSucceeded" message="$(string.RuntimePublisher.JitTailCallSucceededOpcodeMessage)" symbol="CLR_JITTAILCALLSUCCEEDED_OPCODE" value="85"> </opcode>
+ <opcode name="JitTailCallFailed" message="$(string.RuntimePublisher.JitTailCallFailedOpcodeMessage)" symbol="CLR_JITTAILCALLFAILED_OPCODE" value="86"> </opcode>
+ <opcode name="MethodILToNativeMap" message="$(string.RuntimePublisher.MethodILToNativeMapOpcodeMessage)" symbol="CLR_METHODILTONATIVEMAP_OPCODE" value="87"> </opcode>
+ </opcodes>
+ </task>
+
+ <task name="CLRLoader" symbol="CLR_LOADER_TASK"
+ value="10" eventGUID="{D00792DA-07B7-40f5-97EB-5D974E054740}"
+ message="$(string.RuntimePublisher.LoaderTaskMessage)">
+ <opcodes>
+ <opcode name="DomainModuleLoad" message="$(string.RuntimePublisher.DomainModuleLoadOpcodeMessage)" symbol="CLR_DOMAINMODULELOAD_OPCODE" value="45"> </opcode>
+ <opcode name="ModuleLoad" message="$(string.RuntimePublisher.ModuleLoadOpcodeMessage)" symbol="CLR_MODULELOAD_OPCODE" value="33"> </opcode>
+ <opcode name="ModuleUnload" message="$(string.RuntimePublisher.ModuleUnloadOpcodeMessage)" symbol="CLR_MODULEUNLOAD_OPCODE" value="34"> </opcode>
+ <!-- The following 2 opcodes are now defunct -->
+ <opcode name="ModuleDCStart" message="$(string.RuntimePublisher.ModuleDCStartOpcodeMessage)" symbol="CLR_MODULEDCSTART_OPCODE" value="35"> </opcode>
+ <opcode name="ModuleDCEnd" message="$(string.RuntimePublisher.ModuleDCEndOpcodeMessage)" symbol="CLR_MODULEDCEND_OPCODE" value="36"> </opcode>
+ <opcode name="AssemblyLoad" message="$(string.RuntimePublisher.AssemblyLoadOpcodeMessage)" symbol="CLR_ASSEMBLYLOAD_OPCODE" value="37"> </opcode>
+ <opcode name="AssemblyUnload" message="$(string.RuntimePublisher.AssemblyUnloadOpcodeMessage)" symbol="CLR_ASSEMBLYUNLOAD_OPCODE" value="38"> </opcode>
+ <opcode name="AppDomainLoad" message="$(string.RuntimePublisher.AppDomainLoadOpcodeMessage)" symbol="CLR_APPDOMAINLOAD_OPCODE" value="41"> </opcode>
+ <opcode name="AppDomainUnload" message="$(string.RuntimePublisher.AppDomainUnloadOpcodeMessage)" symbol="CLR_APPDOMAINUNLOAD_OPCODE" value="42"> </opcode>
+ </opcodes>
+ </task>
+
+ <task name="CLRStack" symbol="CLR_STACK_TASK"
+ value="11" eventGUID="{d3363dc0-243a-4620-a4d0-8a07d772f533}"
+ message="$(string.RuntimePublisher.StackTaskMessage)" >
+ <opcodes>
+ <opcode name="CLRStackWalk" message="$(string.RuntimePublisher.CLRStackWalkOpcodeMessage)" symbol="CLR_STACK_STACKWALK_OPCODE" value="82"> </opcode>
+ </opcodes>
+ </task>
+
+ <task name="CLRStrongNameVerification" symbol="CLR_STRONGNAMEVERIFICATION_TASK"
+ value="12" eventGUID="{15447A14-B523-46ae-B75B-023F900B4393}"
+ message="$(string.RuntimePublisher.StrongNameVerificationTaskMessage)">
+ <opcodes>
+ </opcodes>
+ </task>
+
+ <task name="CLRAuthenticodeVerification" symbol="CLR_AUTHENTICODEVERIFICATION_TASK"
+ value="13" eventGUID="{B17304D9-5AFA-4da6-9F7B-5A4FA73129B6}"
+ message="$(string.RuntimePublisher.AuthenticodeVerificationTaskMessage)">
+ <opcodes>
+ </opcodes>
+ </task>
+
+ <task name="AppDomainResourceManagement" symbol="CLR_APPDOMAINRESOURCEMANAGEMENT_TASK"
+ value="14" eventGUID="{88e83959-6185-4e0b-95b8-0e4a35df6122}"
+ message="$(string.RuntimePublisher.AppDomainResourceManagementTaskMessage)">
+ <opcodes>
+
+ <opcode name="AppDomainMemAllocated" message="$(string.RuntimePublisher.AppDomainMemAllocatedOpcodeMessage)" symbol="CLR_APPDOMAINRESOURCEMANAGEMENT_APPDOMAINMEMALLOCATED_OPCODE" value="48"> </opcode>
+ <opcode name="AppDomainMemSurvived" message="$(string.RuntimePublisher.AppDomainMemSurvivedOpcodeMessage)" symbol="CLR_APPDOMAINRESOURCEMANAGEMENT_APPDOMAINMEMSURVIVED_OPCODE" value="49"> </opcode>
+ <opcode name="ThreadCreated" message="$(string.RuntimePublisher.ThreadCreatedOpcodeMessage)" symbol="CLR_APPDOMAINRESOURCEMANAGEMENT_THREADCREATED_OPCODE" value="50"> </opcode>
+ <opcode name="ThreadTerminated" message="$(string.RuntimePublisher.ThreadTerminatedOpcodeMessage)" symbol="CLR_APPDOMAINRESOURCEMANAGEMENT_THREADTERMINATED_OPCODE" value="51"> </opcode>
+ <opcode name="ThreadDomainEnter" message="$(string.RuntimePublisher.ThreadDomainEnterOpcodeMessage)" symbol="CLR_APPDOMAINRESOURCEMANAGEMENT_THREADDOMAINENTER_OPCODE" value="52"> </opcode>
+ </opcodes>
+ </task>
+
+ <task name="CLRILStub" symbol="CLR_IL_STUB"
+ value="15" eventGUID="{D00792DA-07B7-40f5-0000-5D974E054740}"
+ message="$(string.RuntimePublisher.ILStubTaskMessage)">
+ <opcodes>
+ <opcode name="ILStubGenerated" message="$(string.RuntimePublisher.ILStubGeneratedOpcodeMessage)" symbol="CLR_ILSTUB_ILSTUBGENERATED_OPCODE" value="88"> </opcode>
+ <opcode name="ILStubCacheHit" message="$(string.RuntimePublisher.ILStubCacheHitOpcodeMessage)" symbol="CLR_ILSTUB_ILSTUBCACHEHIT_OPCODE" value="89"> </opcode>
+ </opcodes>
+ </task>
+
+ <task name="ThreadPoolWorkerThread" symbol="CLR_THREADPOOLWORKERTHREAD_TASK"
+ value="16" eventGUID="{8a9a44ab-f681-4271-8810-830dab9f5621}"
+ message="$(string.RuntimePublisher.ThreadPoolWorkerThreadTaskMessage)">
+ <opcodes>
+ <opcode name="Wait" message="$(string.RuntimePublisher.WaitOpcodeMessage)" symbol="CLR_WAIT_OPCODE" value="90"> </opcode>
+ </opcodes>
+ </task>
+
+ <task name="ThreadPoolWorkerThreadRetirement" symbol="CLR_THREADPOOLWORKERTHREADRETIREMENT_TASK"
+ value="17" eventGUID="{402ee399-c137-4dc0-a5ab-3c2dea64ac9c}"
+ message="$(string.RuntimePublisher.ThreadPoolWorkerThreadRetirementTaskMessage)">
+ <opcodes>
+ </opcodes>
+ </task>
+
+ <task name="ThreadPoolWorkerThreadAdjustment" symbol="CLR_THREADPOOLWORKERTHREADADJUSTMENT_TASK"
+ value="18" eventGUID="{94179831-e99a-4625-8824-23ca5e00ca7d}"
+ message="$(string.RuntimePublisher.ThreadPoolWorkerThreadAdjustmentTaskMessage)">
+ <opcodes>
+ <opcode name="Sample" message="$(string.RuntimePublisher.SampleOpcodeMessage)" symbol="CLR_THREADPOOL_WORKERTHREADADJUSTMENT_SAMPLE_OPCODE" value="100"> </opcode>
+ <opcode name="Adjustment" message="$(string.RuntimePublisher.AdjustmentOpcodeMessage)" symbol="CLR_THREADPOOL_WORKERTHREADADJUSTMENT_ADJUSTMENT_OPCODE" value="101"> </opcode>
+ <opcode name="Stats" message="$(string.RuntimePublisher.StatsOpcodeMessage)" symbol="CLR_THREADPOOL_WORKERTHREADADJUSTMENT_STATS_OPCODE" value="102"> </opcode>
+ </opcodes>
+ </task>
+
+ <task name="CLRRuntimeInformation" symbol="CLR_EEStartup_TASK"
+ value="19" eventGUID="{CD7D3E32-65FE-40cd-9225-A2577D203FC3}"
+ message="$(string.RuntimePublisher.EEStartupTaskMessage)">
+ <opcodes>
+ </opcodes>
+ </task>
+ <task name="CLRPerfTrack" symbol="CLR_PERFTRACK_TASK"
+ value="20" eventGUID="{EAC685F6-2104-4dec-88FD-91E4254221EC}"
+ message="$(string.RuntimePublisher.PerfTrackTaskMessage)">
+ <opcodes>
+ <opcode name="ModuleRangeLoad" message="$(string.RuntimePublisher.ModuleRangeLoadOpcodeMessage)" symbol="CLR_PERFTRACK_MODULERANGELOAD_OPCODE" value="10"> </opcode>
+ </opcodes>
+ </task>
+ <task name="Type" symbol="CLR_TYPE_TASK"
+ value="21" eventGUID="{003E5A9B-4757-4d3e-B4A1-E47BFB489408}"
+ message="$(string.RuntimePublisher.TypeTaskMessage)">
+ <opcodes>
+ <opcode name="BulkType" message="$(string.RuntimePublisher.BulkTypeOpcodeMessage)" symbol="CLR_BULKTYPE_OPCODE" value="10"> </opcode>
+ </opcodes>
+ </task>
+ <task name="ThreadPoolWorkingThreadCount" symbol="CLR_THREADPOOLWORKINGTHREADCOUNT_TASK"
+ value="22" eventGUID="{1b032b96-767c-42e4-8481-cb528a66d7bd}"
+ message="$(string.RuntimePublisher.ThreadPoolWorkingThreadCountTaskMessage)">
+ <opcodes>
+ </opcodes>
+ </task>
+ <task name="ThreadPool" symbol="CLR_THREADPOOL_TASK"
+ value="23" eventGUID="{EAD685F6-2104-4dec-88FD-91E4254221E9}"
+ message="$(string.RuntimePublisher.ThreadPoolTaskMessage)">
+ <opcodes>
+ <opcode name="Enqueue" message="$(string.RuntimePublisher.EnqueueOpcodeMessage)" symbol="CLR_ENQUEUE_OPCODE" value="11"> </opcode>
+ <opcode name="Dequeue" message="$(string.RuntimePublisher.DequeueOpcodeMessage)" symbol="CLR_DEQUEUE_OPCODE" value="12"> </opcode>
+ <opcode name="IOEnqueue" message="$(string.RuntimePublisher.IOEnqueueOpcodeMessage)" symbol="CLR_IOENQUEUE_OPCODE" value="13"> </opcode>
+ <opcode name="IODequeue" message="$(string.RuntimePublisher.IODequeueOpcodeMessage)" symbol="CLR_IODEQUEUE_OPCODE" value="14"> </opcode>
+ <opcode name="IOPack" message="$(string.RuntimePublisher.IOPackOpcodeMessage)" symbol="CLR_IOPACK_OPCODE" value="15"> </opcode>
+ </opcodes>
+ </task>
+ <task name="Thread" symbol="CLR_THREADING_TASK"
+ value="24" eventGUID="{641994C5-16F2-4123-91A7-A2999DD7BFC3}"
+ message="$(string.RuntimePublisher.ThreadTaskMessage)">
+ <opcodes>
+ <opcode name="Creating" message="$(string.RuntimePublisher.ThreadCreatingOpcodeMessage)" symbol="CLR_THREAD_CREATING_OPCODE" value="11"> </opcode>
+ <opcode name="Running" message="$(string.RuntimePublisher.ThreadRunningOpcodeMessage)" symbol="CLR_THREAD_RUNNING_OPCODE" value="12"> </opcode>
+ </opcodes>
+ </task>
+ <task name="DebugIPCEvent" symbol="CLR_DEBUG_IPC_EVENT_TASK"
+ value="25" eventGUID="{EC2F3703-8321-4301-BD51-2CB9A09F31B1}"
+ message="$(string.RuntimePublisher.DebugIPCEventTaskMessage)">
+ <opcodes>
+ </opcodes>
+ </task>
+ <task name="DebugExceptionProcessing" symbol="CLR_EXCEPTION_PROCESSING_TASK"
+ value="26" eventGUID="{C4412198-EF03-47F1-9BD1-11C6637A2062}"
+ message="$(string.RuntimePublisher.DebugExceptionProcessingTaskMessage)">
+ <opcodes>
+ </opcodes>
+ </task>
+ </tasks>
+ <!--Maps-->
+ <maps>
+ <!-- ValueMaps -->
+ <valueMap name="GCSegmentTypeMap">
+ <map value="0x0" message="$(string.RuntimePublisher.GCSegment.SmallObjectHeapMapMessage)"/>
+ <map value="0x1" message="$(string.RuntimePublisher.GCSegment.LargeObjectHeapMapMessage)"/>
+ <map value="0x2" message="$(string.RuntimePublisher.GCSegment.ReadOnlyHeapMapMessage)"/>
+ </valueMap>
+ <valueMap name="GCAllocationKindMap">
+ <map value="0x0" message="$(string.RuntimePublisher.GCAllocation.SmallMapMessage)"/>
+ <map value="0x1" message="$(string.RuntimePublisher.GCAllocation.LargeMapMessage)"/>
+ </valueMap>
+ <valueMap name="GCTypeMap">
+ <map value="0x0" message="$(string.RuntimePublisher.GCType.NonConcurrentGCMapMessage)"/>
+ <map value="0x1" message="$(string.RuntimePublisher.GCType.BackgroundGCMapMessage)"/>
+ <map value="0x2" message="$(string.RuntimePublisher.GCType.ForegroundGCMapMessage)"/>
+ </valueMap>
+ <valueMap name="GCReasonMap">
+ <map value="0x0" message="$(string.RuntimePublisher.GCReason.AllocSmallMapMessage)"/>
+ <map value="0x1" message="$(string.RuntimePublisher.GCReason.InducedMapMessage)"/>
+ <map value="0x2" message="$(string.RuntimePublisher.GCReason.LowMemoryMapMessage)"/>
+ <map value="0x3" message="$(string.RuntimePublisher.GCReason.EmptyMapMessage)"/>
+ <map value="0x4" message="$(string.RuntimePublisher.GCReason.AllocLargeMapMessage)"/>
+ <map value="0x5" message="$(string.RuntimePublisher.GCReason.OutOfSpaceSmallObjectHeapMapMessage)"/>
+ <map value="0x6" message="$(string.RuntimePublisher.GCReason.OutOfSpaceLargeObjectHeapMapMessage)"/>
+ <map value="0x7" message="$(string.RuntimePublisher.GCReason.InducedNoForceMapMessage)"/>
+ <map value="0x8" message="$(string.RuntimePublisher.GCReason.StressMapMessage)"/>
+ <map value="0x9" message="$(string.RuntimePublisher.GCReason.InducedLowMemoryMapMessage)"/>
+ </valueMap>
+ <valueMap name="GCSuspendEEReasonMap">
+ <map value="0x0" message="$(string.RuntimePublisher.GCSuspendEEReason.SuspendOtherMapMessage)"/>
+ <map value="0x1" message="$(string.RuntimePublisher.GCSuspendEEReason.SuspendForGCMapMessage)"/>
+ <map value="0x2" message="$(string.RuntimePublisher.GCSuspendEEReason.SuspendForAppDomainShutdownMapMessage)"/>
+ <map value="0x3" message="$(string.RuntimePublisher.GCSuspendEEReason.SuspendForCodePitchingMapMessage)"/>
+ <map value="0x4" message="$(string.RuntimePublisher.GCSuspendEEReason.SuspendForShutdownMapMessage)"/>
+ <map value="0x5" message="$(string.RuntimePublisher.GCSuspendEEReason.SuspendForDebuggerMapMessage)"/>
+ <map value="0x6" message="$(string.RuntimePublisher.GCSuspendEEReason.SuspendForGCPrepMapMessage)"/>
+ <map value="0x7" message="$(string.RuntimePublisher.GCSuspendEEReason.SuspendForDebuggerSweepMapMessage)"/>
+ </valueMap>
+ <valueMap name="ContentionFlagsMap">
+ <map value="0x0" message="$(string.RuntimePublisher.Contention.ManagedMapMessage)"/>
+ <map value="0x1" message="$(string.RuntimePublisher.Contention.NativeMapMessage)"/>
+ </valueMap>
+ <valueMap name="TailCallTypeMap">
+ <map value="0x0" message="$(string.RuntimePublisher.TailCallType.OptimizedMapMessage)"/>
+ <map value="0x1" message="$(string.RuntimePublisher.TailCallType.RecursiveMapMessage)"/>
+ <map value="0x2" message="$(string.RuntimePublisher.TailCallType.HelperMapMessage)"/>
+ </valueMap>
+ <valueMap name="ThreadAdjustmentReasonMap">
+ <map value="0x0" message="$(string.RuntimePublisher.ThreadAdjustmentReason.WarmupMapMessage)"/>
+ <map value="0x1" message="$(string.RuntimePublisher.ThreadAdjustmentReason.InitializingMapMessage)"/>
+ <map value="0x2" message="$(string.RuntimePublisher.ThreadAdjustmentReason.RandomMoveMapMessage)"/>
+ <map value="0x3" message="$(string.RuntimePublisher.ThreadAdjustmentReason.ClimbingMoveMapMessage)"/>
+ <map value="0x4" message="$(string.RuntimePublisher.ThreadAdjustmentReason.ChangePointMapMessage)"/>
+ <map value="0x5" message="$(string.RuntimePublisher.ThreadAdjustmentReason.StabilizingMapMessage)"/>
+ <map value="0x6" message="$(string.RuntimePublisher.ThreadAdjustmentReason.StarvationMapMessage)"/>
+ <map value="0x7" message="$(string.RuntimePublisher.ThreadAdjustmentReason.ThreadTimedOutMapMessage)"/>
+ </valueMap>
+ <valueMap name="GCRootKindMap">
+ <map value="0" message="$(string.RuntimePublisher.GCRootKind.Stack)"/>
+ <map value="1" message="$(string.RuntimePublisher.GCRootKind.Finalizer)"/>
+ <map value="2" message="$(string.RuntimePublisher.GCRootKind.Handle)"/>
+ <map value="3" message="$(string.RuntimePublisher.GCRootKind.Other)"/>
+ </valueMap>
+ <valueMap name="GCHandleKindMap">
+ <map value="0x0" message="$(string.RuntimePublisher.GCHandleKind.WeakShortMessage)"/>
+ <map value="0x1" message="$(string.RuntimePublisher.GCHandleKind.WeakLongMessage)"/>
+ <map value="0x2" message="$(string.RuntimePublisher.GCHandleKind.StrongMessage)"/>
+ <map value="0x3" message="$(string.RuntimePublisher.GCHandleKind.PinnedMessage)"/>
+ <map value="0x4" message="$(string.RuntimePublisher.GCHandleKind.VariableMessage)"/>
+ <map value="0x5" message="$(string.RuntimePublisher.GCHandleKind.RefCountedMessage)"/>
+ <map value="0x6" message="$(string.RuntimePublisher.GCHandleKind.DependentMessage)"/>
+ <map value="0x7" message="$(string.RuntimePublisher.GCHandleKind.AsyncPinnedMessage)"/>
+ <map value="0x8" message="$(string.RuntimePublisher.GCHandleKind.SizedRefMessage)"/>
+ </valueMap>
+
+ <!-- BitMaps -->
+ <bitMap name="ModuleRangeTypeMap">
+ <map value="0x4" message="$(string.RuntimePublisher.ModuleRangeTypeMap.ColdRangeMessage)"/>
+ </bitMap>
+
+ <bitMap name="AppDomainFlagsMap">
+ <map value="0x1" message="$(string.RuntimePublisher.AppDomain.DefaultMapMessage)"/>
+ <map value="0x2" message="$(string.RuntimePublisher.AppDomain.ExecutableMapMessage)"/>
+ <map value="0x4" message="$(string.RuntimePublisher.AppDomain.SharedMapMessage)"/>
+ </bitMap>
+ <bitMap name="AssemblyFlagsMap">
+ <map value="0x1" message="$(string.RuntimePublisher.Assembly.DomainNeutralMapMessage)"/>
+ <map value="0x2" message="$(string.RuntimePublisher.Assembly.DynamicMapMessage)"/>
+ <map value="0x4" message="$(string.RuntimePublisher.Assembly.NativeMapMessage)"/>
+ <map value="0x8" message="$(string.RuntimePublisher.Assembly.CollectibleMapMessage)"/>
+ </bitMap>
+ <bitMap name="ModuleFlagsMap">
+ <map value= "0x1" message="$(string.RuntimePublisher.Module.DomainNeutralMapMessage)"/>
+ <map value= "0x2" message="$(string.RuntimePublisher.Module.NativeMapMessage)"/>
+ <map value= "0x4" message="$(string.RuntimePublisher.Module.DynamicMapMessage)"/>
+ <map value= "0x8" message="$(string.RuntimePublisher.Module.ManifestMapMessage)"/>
+ </bitMap>
+ <bitMap name="MethodFlagsMap">
+ <map value="0x1" message="$(string.RuntimePublisher.Method.DynamicMapMessage)"/>
+ <map value="0x2" message="$(string.RuntimePublisher.Method.GenericMapMessage)"/>
+ <map value="0x4" message="$(string.RuntimePublisher.Method.HasSharedGenericCodeMapMessage)"/>
+ <map value="0x8" message="$(string.RuntimePublisher.Method.JittedMapMessage)"/>
+ </bitMap>
+ <bitMap name="StartupModeMap">
+ <map value="0x1" message="$(string.RuntimePublisher.StartupMode.ManagedExeMapMessage)"/>
+ <map value="0x2" message="$(string.RuntimePublisher.StartupMode.HostedCLRMapMessage)"/>
+ <map value="0x4" message="$(string.RuntimePublisher.StartupMode.IjwDllMapMessage)"/>
+ <map value="0x8" message="$(string.RuntimePublisher.StartupMode.ComActivatedMapMessage)"/>
+ <map value="0x10" message="$(string.RuntimePublisher.StartupMode.OtherMapMessage)"/>
+ </bitMap>
+ <bitMap name="RuntimeSkuMap">
+ <map value="0x1" message="$(string.RuntimePublisher.RuntimeSku.DesktopCLRMapMessage)"/>
+ <map value="0x2" message="$(string.RuntimePublisher.RuntimeSku.CoreCLRMapMessage)"/>
+ </bitMap>
+ <bitMap name="ExceptionThrownFlagsMap">
+ <map value="0x1" message="$(string.RuntimePublisher.ExceptionThrown.HasInnerExceptionMapMessage)"/>
+ <map value="0x2" message="$(string.RuntimePublisher.ExceptionThrown.NestedMapMessage)"/>
+ <map value="0x4" message="$(string.RuntimePublisher.ExceptionThrown.ReThrownMapMessage)"/>
+ <map value="0x8" message="$(string.RuntimePublisher.ExceptionThrown.CorruptedStateMapMessage)"/>
+ <map value="0x10" message="$(string.RuntimePublisher.ExceptionThrown.CLSCompliantMapMessage)"/>
+ </bitMap>
+ <bitMap name="ILStubGeneratedFlagsMap">
+ <map value="0x1" message="$(string.RuntimePublisher.ILStubGenerated.ReverseInteropMapMessage)"/>
+ <map value="0x2" message="$(string.RuntimePublisher.ILStubGenerated.COMInteropMapMessage)"/>
+ <map value="0x4" message="$(string.RuntimePublisher.ILStubGenerated.NGenedStubMapMessage)"/>
+ <map value="0x8" message="$(string.RuntimePublisher.ILStubGenerated.DelegateMapMessage)"/>
+ <map value="0x10" message="$(string.RuntimePublisher.ILStubGenerated.VarArgMapMessage)"/>
+ <map value="0x20" message="$(string.RuntimePublisher.ILStubGenerated.UnmanagedCalleeMapMessage)"/>
+ </bitMap>
+ <bitMap name="StartupFlagsMap">
+ <map value="0x000001" message="$(string.RuntimePublisher.Startup.CONCURRENT_GCMapMessage)"/>
+ <map value="0x000002" message="$(string.RuntimePublisher.Startup.LOADER_OPTIMIZATION_SINGLE_DOMAINMapMessage)"/>
+ <map value="0x000004" message="$(string.RuntimePublisher.Startup.LOADER_OPTIMIZATION_MULTI_DOMAINMapMessage)"/>
+ <map value="0x000010" message="$(string.RuntimePublisher.Startup.LOADER_SAFEMODEMapMessage)"/>
+ <map value="0x000100" message="$(string.RuntimePublisher.Startup.LOADER_SETPREFERENCEMapMessage)"/>
+ <map value="0x001000" message="$(string.RuntimePublisher.Startup.SERVER_GCMapMessage)"/>
+ <map value="0x002000" message="$(string.RuntimePublisher.Startup.HOARD_GC_VMMapMessage)"/>
+ <map value="0x004000" message="$(string.RuntimePublisher.Startup.SINGLE_VERSION_HOSTING_INTERFACEMapMessage)"/>
+ <map value="0x010000" message="$(string.RuntimePublisher.Startup.LEGACY_IMPERSONATIONMapMessage)"/>
+ <map value="0x020000" message="$(string.RuntimePublisher.Startup.DISABLE_COMMITTHREADSTACKMapMessage)"/>
+ <map value="0x040000" message="$(string.RuntimePublisher.Startup.ALWAYSFLOW_IMPERSONATIONMapMessage)"/>
+ <map value="0x080000" message="$(string.RuntimePublisher.Startup.TRIM_GC_COMMITMapMessage)"/>
+ <map value="0x100000" message="$(string.RuntimePublisher.Startup.ETWMapMessage)"/>
+ <map value="0x200000" message="$(string.RuntimePublisher.Startup.SERVER_BUILDMapMessage)"/>
+ <map value="0x400000" message="$(string.RuntimePublisher.Startup.ARMMapMessage)"/>
+ </bitMap>
+ <bitMap name="TypeFlagsMap">
+ <map value="0x1" message="$(string.RuntimePublisher.TypeFlags.Delegate)"/>
+ <map value="0x2" message="$(string.RuntimePublisher.TypeFlags.Finalizable)"/>
+ <map value="0x4" message="$(string.RuntimePublisher.TypeFlags.ExternallyImplementedCOMObject)"/>
+ <map value="0x8" message="$(string.RuntimePublisher.TypeFlags.Array)"/>
+ </bitMap>
+ <bitMap name="GCRootFlagsMap">
+ <map value="0x1" message="$(string.RuntimePublisher.GCRootFlags.Pinning)"/>
+ <map value="0x2" message="$(string.RuntimePublisher.GCRootFlags.WeakRef)"/>
+ <map value="0x4" message="$(string.RuntimePublisher.GCRootFlags.Interior)"/>
+ <map value="0x8" message="$(string.RuntimePublisher.GCRootFlags.RefCounted)"/>
+ </bitMap>
+ <bitMap name="GCRootStaticVarFlagsMap">
+ <map value="0x1" message="$(string.RuntimePublisher.GCRootStaticVarFlags.ThreadLocal)"/>
+ </bitMap>
+ <bitMap name="GCRootCCWFlagsMap">
+ <map value="0x1" message="$(string.RuntimePublisher.GCRootCCWFlags.Strong)"/>
+ <map value="0x2" message="$(string.RuntimePublisher.GCRootCCWFlags.Pegged)"/>
+ </bitMap>
+ <bitMap name="ThreadFlagsMap">
+ <map value="0x1" message="$(string.RuntimePublisher.ThreadFlags.GCSpecial)"/>
+ <map value="0x2" message="$(string.RuntimePublisher.ThreadFlags.Finalizer)"/>
+ <map value="0x4" message="$(string.RuntimePublisher.ThreadFlags.ThreadPoolWorker)"/>
+ </bitMap>
+ </maps>
+
+ <!--Templates-->
+ <templates>
+ <template tid="StrongNameVerification">
+ <data name="VerificationFlags" inType="win:UInt32" outType="win:HexInt32"/>
+ <data name="ErrorCode" inType="win:UInt32" outType="win:HexInt32"/>
+ <data name="FullyQualifiedAssemblyName" inType="win:UnicodeString" />
+
+ <UserData>
+ <StrongNameVerification xmlns="myNs">
+ <VerificationFlags> %1 </VerificationFlags>
+ <ErrorCode> %2 </ErrorCode>
+ <FullyQualifiedAssemblyName> %3 </FullyQualifiedAssemblyName>
+ </StrongNameVerification>
+ </UserData>
+ </template>
+
+ <template tid="StrongNameVerification_V1">
+ <data name="VerificationFlags" inType="win:UInt32" outType="win:HexInt32"/>
+ <data name="ErrorCode" inType="win:UInt32" outType="win:HexInt32"/>
+ <data name="FullyQualifiedAssemblyName" inType="win:UnicodeString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <StrongNameVerification_V1 xmlns="myNs">
+ <VerificationFlags> %1 </VerificationFlags>
+ <ErrorCode> %2 </ErrorCode>
+ <FullyQualifiedAssemblyName> %3 </FullyQualifiedAssemblyName>
+ <ClrInstanceID> %4 </ClrInstanceID>
+ </StrongNameVerification_V1>
+ </UserData>
+ </template>
+
+ <template tid="AuthenticodeVerification">
+ <data name="VerificationFlags" inType="win:UInt32" outType="win:HexInt32"/>
+ <data name="ErrorCode" inType="win:UInt32" outType="win:HexInt32"/>
+ <data name="ModulePath" inType="win:UnicodeString" />
+
+ <UserData>
+ <AuthenticodeVerification xmlns="myNs">
+ <VerificationFlags> %1 </VerificationFlags>
+ <ErrorCode> %2 </ErrorCode>
+ <ModulePath> %3 </ModulePath>
+ </AuthenticodeVerification>
+ </UserData>
+ </template>
+
+ <template tid="AuthenticodeVerification_V1">
+ <data name="VerificationFlags" inType="win:UInt32" outType="win:HexInt32"/>
+ <data name="ErrorCode" inType="win:UInt32" outType="win:HexInt32"/>
+ <data name="ModulePath" inType="win:UnicodeString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <AuthenticodeVerification_V1 xmlns="myNs">
+ <VerificationFlags> %1 </VerificationFlags>
+ <ErrorCode> %2 </ErrorCode>
+ <ModulePath> %3 </ModulePath>
+ <ClrInstanceID> %4 </ClrInstanceID>
+ </AuthenticodeVerification_V1>
+ </UserData>
+ </template>
+
+ <template tid="RuntimeInformation">
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="Sku" inType="win:UInt16" map="RuntimeSkuMap" />
+ <data name="BclMajorVersion" inType="win:UInt16" />
+ <data name="BclMinorVersion" inType="win:UInt16" />
+ <data name="BclBuildNumber" inType="win:UInt16" />
+ <data name="BclQfeNumber" inType="win:UInt16" />
+ <data name="VMMajorVersion" inType="win:UInt16" />
+ <data name="VMMinorVersion" inType="win:UInt16" />
+ <data name="VMBuildNumber" inType="win:UInt16" />
+ <data name="VMQfeNumber" inType="win:UInt16" />
+ <data name="StartupFlags" inType="win:UInt32" map="StartupFlagsMap" />
+ <data name="StartupMode" inType="win:UInt8" map="StartupModeMap" />
+ <data name="CommandLine" inType="win:UnicodeString" />
+ <data name="ComObjectGuid" inType="win:GUID" />
+ <data name="RuntimeDllPath" inType="win:UnicodeString" />
+
+ <UserData>
+ <RuntimeInformation xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <Sku> %2 </Sku>
+ <BclMajorVersion> %3 </BclMajorVersion>
+ <BclMinorVersion> %4 </BclMinorVersion>
+ <BclBuildNumber> %5 </BclBuildNumber>
+ <BclQfeNumber> %6 </BclQfeNumber>
+ <VMMajorVersion> %7 </VMMajorVersion>
+ <VMMinorVersion> %8 </VMMinorVersion>
+ <VMBuildNumber> %9 </VMBuildNumber>
+ <VMQfeNumber> %10 </VMQfeNumber>
+ <StartupFlags> %11 </StartupFlags>
+ <StartupMode> %12 </StartupMode>
+ <CommandLine> %13 </CommandLine>
+ <ComObjectGuid> %14 </ComObjectGuid>
+ <RuntimeDllPath> %15 </RuntimeDllPath>
+ </RuntimeInformation>
+ </UserData>
+ </template>
+
+ <template tid="GCStart">
+ <data name="Count" inType="win:UInt32" outType="xs:unsignedInt"/>
+ <data name="Reason" inType="win:UInt32" map="GCReasonMap" />
+
+ <UserData>
+ <GCStart xmlns="myNs">
+ <Count> %1 </Count>
+ <Reason> %2 </Reason>
+ </GCStart>
+ </UserData>
+ </template>
+
+ <template tid="GCStart_V1">
+ <data name="Count" inType="win:UInt32" outType="xs:unsignedInt"/>
+ <data name="Depth" inType="win:UInt32" outType="xs:unsignedInt"/>
+ <data name="Reason" inType="win:UInt32" map="GCReasonMap" />
+ <data name="Type" inType="win:UInt32" map="GCTypeMap" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <GCStart_V1 xmlns="myNs">
+ <Count> %1 </Count>
+ <Depth> %2 </Depth>
+ <Reason> %3 </Reason>
+ <Type> %4 </Type>
+ <ClrInstanceID> %5 </ClrInstanceID>
+ </GCStart_V1>
+ </UserData>
+ </template>
+
+ <template tid="GCStart_V2">
+ <data name="Count" inType="win:UInt32" outType="xs:unsignedInt"/>
+ <data name="Depth" inType="win:UInt32" outType="xs:unsignedInt"/>
+ <data name="Reason" inType="win:UInt32" map="GCReasonMap" />
+ <data name="Type" inType="win:UInt32" map="GCTypeMap" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="ClientSequenceNumber" inType="win:UInt64" />
+
+ <UserData>
+ <GCStart_V1 xmlns="myNs">
+ <Count> %1 </Count>
+ <Depth> %2 </Depth>
+ <Reason> %3 </Reason>
+ <Type> %4 </Type>
+ <ClrInstanceID> %5 </ClrInstanceID>
+ <ClientSequenceNumber> %6 </ClientSequenceNumber>
+ </GCStart_V1>
+ </UserData>
+ </template>
+
+ <template tid="GCEnd">
+ <data name="Count" inType="win:UInt32" />
+ <data name="Depth" inType="win:UInt16" />
+
+ <UserData>
+ <GCEnd xmlns="myNs">
+ <Count> %1 </Count>
+ <Depth> %2 </Depth>
+ </GCEnd>
+ </UserData>
+ </template>
+
+ <template tid="GCEnd_V1">
+ <data name="Count" inType="win:UInt32" />
+ <data name="Depth" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <GCEnd_V1 xmlns="myNs">
+ <Count> %1 </Count>
+ <Depth> %2 </Depth>
+ <ClrInstanceID> %3 </ClrInstanceID>
+ </GCEnd_V1>
+ </UserData>
+ </template>
+
+ <template tid="GCHeapStats">
+ <data name="GenerationSize0" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="TotalPromotedSize0" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="GenerationSize1" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="TotalPromotedSize1" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="GenerationSize2" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="TotalPromotedSize2" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="GenerationSize3" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="TotalPromotedSize3" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="FinalizationPromotedSize" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="FinalizationPromotedCount" inType="win:UInt64" />
+ <data name="PinnedObjectCount" inType="win:UInt32" />
+ <data name="SinkBlockCount" inType="win:UInt32" />
+ <data name="GCHandleCount" inType="win:UInt32" />
+
+ <UserData>
+ <GCHeapStats xmlns="myNs">
+ <GenerationSize0> %1 </GenerationSize0>
+ <TotalPromotedSize0> %2 </TotalPromotedSize0>
+ <GenerationSize1> %3 </GenerationSize1>
+ <TotalPromotedSize1> %4 </TotalPromotedSize1>
+ <GenerationSize2> %5 </GenerationSize2>
+ <TotalPromotedSize2> %6 </TotalPromotedSize2>
+ <GenerationSize3> %7 </GenerationSize3>
+ <TotalPromotedSize3> %8 </TotalPromotedSize3>
+ <FinalizationPromotedSize> %9 </FinalizationPromotedSize>
+ <FinalizationPromotedCount> %10 </FinalizationPromotedCount>
+ <PinnedObjectCount> %11 </PinnedObjectCount>
+ <SinkBlockCount> %12 </SinkBlockCount>
+ <GCHandleCount> %13 </GCHandleCount>
+ </GCHeapStats>
+ </UserData>
+ </template>
+
+ <template tid="GCHeapStats_V1">
+ <data name="GenerationSize0" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="TotalPromotedSize0" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="GenerationSize1" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="TotalPromotedSize1" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="GenerationSize2" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="TotalPromotedSize2" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="GenerationSize3" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="TotalPromotedSize3" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="FinalizationPromotedSize" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="FinalizationPromotedCount" inType="win:UInt64" />
+ <data name="PinnedObjectCount" inType="win:UInt32" />
+ <data name="SinkBlockCount" inType="win:UInt32" />
+ <data name="GCHandleCount" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <GCHeapStats_V1 xmlns="myNs">
+ <GenerationSize0> %1 </GenerationSize0>
+ <TotalPromotedSize0> %2 </TotalPromotedSize0>
+ <GenerationSize1> %3 </GenerationSize1>
+ <TotalPromotedSize1> %4 </TotalPromotedSize1>
+ <GenerationSize2> %5 </GenerationSize2>
+ <TotalPromotedSize2> %6 </TotalPromotedSize2>
+ <GenerationSize3> %7 </GenerationSize3>
+ <TotalPromotedSize3> %8 </TotalPromotedSize3>
+ <FinalizationPromotedSize> %9 </FinalizationPromotedSize>
+ <FinalizationPromotedCount> %10 </FinalizationPromotedCount>
+ <PinnedObjectCount> %11 </PinnedObjectCount>
+ <SinkBlockCount> %12 </SinkBlockCount>
+ <GCHandleCount> %13 </GCHandleCount>
+ <ClrInstanceID> %14 </ClrInstanceID>
+ </GCHeapStats_V1>
+ </UserData>
+ </template>
+
+ <template tid="GCCreateSegment">
+ <data name="Address" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="Size" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="Type" inType="win:UInt32" map="GCSegmentTypeMap" />
+
+ <UserData>
+ <GCCreateSegment xmlns="myNs">
+ <Address> %1 </Address>
+ <Size> %2 </Size>
+ <Type> %3 </Type>
+ </GCCreateSegment>
+ </UserData>
+ </template>
+
+ <template tid="GCCreateSegment_V1">
+ <data name="Address" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="Size" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="Type" inType="win:UInt32" map="GCSegmentTypeMap" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <GCCreateSegment_V1 xmlns="myNs">
+ <Address> %1 </Address>
+ <Size> %2 </Size>
+ <Type> %3 </Type>
+ <ClrInstanceID> %4 </ClrInstanceID>
+ </GCCreateSegment_V1>
+ </UserData>
+ </template>
+
+ <template tid="GCFreeSegment">
+ <data name="Address" inType="win:UInt64" outType="win:HexInt64" />
+
+ <UserData>
+ <GCFreeSegment xmlns="myNs">
+ <Address> %1 </Address>
+ </GCFreeSegment>
+ </UserData>
+ </template>
+
+ <template tid="GCFreeSegment_V1">
+ <data name="Address" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <GCFreeSegment_V1 xmlns="myNs">
+ <Address> %1 </Address>
+ <ClrInstanceID> %2 </ClrInstanceID>
+ </GCFreeSegment_V1>
+ </UserData>
+ </template>
+
+ <template tid="GCNoUserData">
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <GCNoUserData xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ </GCNoUserData>
+ </UserData>
+ </template>
+
+ <template tid="GCSuspendEE">
+ <data name="Reason" inType="win:UInt16" map="GCSuspendEEReasonMap" />
+
+ <UserData>
+ <GCSuspendEE xmlns="myNs">
+ <Reason> %1 </Reason>
+ </GCSuspendEE>
+ </UserData>
+ </template>
+
+ <template tid="GCSuspendEE_V1">
+ <data name="Reason" inType="win:UInt32" map="GCSuspendEEReasonMap" />
+ <data name="Count" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <GCSuspendEE_V1 xmlns="myNs">
+ <Reason> %1 </Reason>
+ <Count> %2 </Count>
+ <ClrInstanceID> %3 </ClrInstanceID>
+ </GCSuspendEE_V1>
+ </UserData>
+ </template>
+
+ <template tid="GCAllocationTick">
+ <data name="AllocationAmount" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="AllocationKind" inType="win:UInt32" map="GCAllocationKindMap" />
+
+ <UserData>
+ <GCAllocationTick xmlns="myNs">
+ <AllocationAmount> %1 </AllocationAmount>
+ <AllocationKind> %2 </AllocationKind>
+ </GCAllocationTick>
+ </UserData>
+ </template>
+
+ <template tid="GCAllocationTick_V1">
+ <data name="AllocationAmount" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="AllocationKind" inType="win:UInt32" map="GCAllocationKindMap" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <GCAllocationTick_V1 xmlns="myNs">
+ <AllocationAmount> %1 </AllocationAmount>
+ <AllocationKind> %2 </AllocationKind>
+ <ClrInstanceID> %3 </ClrInstanceID>
+ </GCAllocationTick_V1>
+ </UserData>
+ </template>
+
+ <template tid="GCAllocationTick_V2">
+ <data name="AllocationAmount" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="AllocationKind" inType="win:UInt32" map="GCAllocationKindMap" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="AllocationAmount64" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="TypeID" inType="win:Pointer" />
+ <data name="TypeName" inType="win:UnicodeString" />
+ <data name="HeapIndex" inType="win:UInt32" />
+
+ <UserData>
+ <GCAllocationTick_V2 xmlns="myNs">
+ <AllocationAmount> %1 </AllocationAmount>
+ <AllocationKind> %2 </AllocationKind>
+ <ClrInstanceID> %3 </ClrInstanceID>
+ <AllocationAmount64> %4 </AllocationAmount64>
+ <TypeID> %5 </TypeID>
+ <TypeName> %6 </TypeName>
+ <HeapIndex> %7 </HeapIndex>
+ </GCAllocationTick_V2>
+ </UserData>
+ </template>
+
+ <template tid="GCAllocationTick_V3">
+ <data name="AllocationAmount" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="AllocationKind" inType="win:UInt32" map="GCAllocationKindMap" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="AllocationAmount64" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="TypeID" inType="win:Pointer" />
+ <data name="TypeName" inType="win:UnicodeString" />
+ <data name="HeapIndex" inType="win:UInt32" />
+ <data name="Address" inType="win:Pointer" />
+
+ <UserData>
+ <GCAllocationTick_V3 xmlns="myNs">
+ <AllocationAmount> %1 </AllocationAmount>
+ <AllocationKind> %2 </AllocationKind>
+ <ClrInstanceID> %3 </ClrInstanceID>
+ <AllocationAmount64> %4 </AllocationAmount64>
+ <TypeID> %5 </TypeID>
+ <TypeName> %6 </TypeName>
+ <HeapIndex> %7 </HeapIndex>
+ <Address> %8 </Address>
+ </GCAllocationTick_V3>
+ </UserData>
+ </template>
+
+ <template tid="GCCreateConcurrentThread">
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <GCCreateConcurrentThread xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ </GCCreateConcurrentThread>
+ </UserData>
+ </template>
+
+ <template tid="GCTerminateConcurrentThread">
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <GCTerminateConcurrentThread xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ </GCTerminateConcurrentThread>
+ </UserData>
+ </template>
+
+ <template tid="GCFinalizersEnd">
+ <data name="Count" inType="win:UInt32" />
+ <UserData>
+ <GCFinalizersEnd xmlns="myNs">
+ <Count> %1 </Count>
+ </GCFinalizersEnd>
+ </UserData>
+ </template>
+
+ <template tid="GCFinalizersEnd_V1">
+ <data name="Count" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <GCFinalizersEnd_V1 xmlns="myNs">
+ <Count> %1 </Count>
+ <ClrInstanceID> %2 </ClrInstanceID>
+ </GCFinalizersEnd_V1>
+ </UserData>
+ </template>
+
+ <template tid="GCMark">
+ <data name="HeapNum" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <GCMark xmlns="myNs">
+ <HeapNum> %1 </HeapNum>
+ <ClrInstanceID> %2 </ClrInstanceID>
+ </GCMark>
+ </UserData>
+ </template>
+
+ <template tid="FinalizeObject">
+ <data name="TypeID" inType="win:Pointer" />
+ <data name="ObjectID" inType="win:Pointer" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <FinalizeObject xmlns="myNs">
+ <TypeID> %1 </TypeID>
+ <ObjectID> %2 </ObjectID>
+ <ClrInstanceID> %3 </ClrInstanceID>
+ </FinalizeObject>
+ </UserData>
+ </template>
+
+ <template tid="DestroyGCHandle">
+ <data name="HandleID" inType="win:Pointer" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <DestroyGCHandle xmlns="myNs">
+ <HandleID> %1 </HandleID>
+ <ClrInstanceID> %2 </ClrInstanceID>
+ </DestroyGCHandle>
+ </UserData>
+ </template>
+
+ <template tid="SetGCHandle">
+ <data name="HandleID" inType="win:Pointer" />
+ <data name="ObjectID" inType="win:Pointer" />
+ <data name="Kind" map="GCHandleKindMap" inType="win:UInt32" />
+ <data name="Generation" inType="win:UInt32" />
+ <data name="AppDomainID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <SetGCHandle xmlns="myNs">
+ <HandleID> %1 </HandleID>
+ <ObjectID> %2 </ObjectID>
+ <Kind> %3 </Kind>
+ <Generation> %4 </Generation>
+ <AppDomainID> %5 </AppDomainID>
+ <ClrInstanceID> %6 </ClrInstanceID>
+ </SetGCHandle>
+ </UserData>
+ </template>
+
+ <template tid="GCTriggered">
+ <data name="Reason" inType="win:UInt32" map="GCReasonMap" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <GCTriggered xmlns="myNs">
+ <Reason> %1 </Reason>
+ <ClrInstanceID> %2 </ClrInstanceID>
+ </GCTriggered>
+ </UserData>
+ </template>
+
+ <template tid="PinObjectAtGCTime">
+ <data name="HandleID" inType="win:Pointer" />
+ <data name="ObjectID" inType="win:Pointer" />
+ <data name="ObjectSize" inType="win:UInt64" />
+ <data name="TypeName" inType="win:UnicodeString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ </template>
+
+ <template tid="IncreaseMemoryPressure">
+ <data name="BytesAllocated" inType="win:UInt64" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ </template>
+
+ <template tid="DecreaseMemoryPressure">
+ <data name="BytesFreed" inType="win:UInt64" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ </template>
+
+ <template tid="ClrWorkerThread">
+ <data name="WorkerThreadCount" inType="win:UInt32" />
+ <data name="RetiredWorkerThreads" inType="win:UInt32" />
+
+ <UserData>
+ <WorkerThread xmlns="myNs">
+ <WorkerThreadCount> %1 </WorkerThreadCount>
+ <RetiredWorkerThreads> %2 </RetiredWorkerThreads>
+ </WorkerThread>
+ </UserData>
+ </template>
+
+ <template tid="IOThread">
+ <data name="IOThreadCount" inType="win:UInt32" />
+ <data name="RetiredIOThreads" inType="win:UInt32" />
+
+ <UserData>
+ <IOThread xmlns="myNs">
+ <IOThreadCount> %1 </IOThreadCount>
+ <RetiredIOThreads> %2 </RetiredIOThreads>
+ </IOThread>
+ </UserData>
+ </template>
+
+ <template tid="IOThread_V1">
+ <data name="IOThreadCount" inType="win:UInt32" />
+ <data name="RetiredIOThreads" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <IOThread_V1 xmlns="myNs">
+ <IOThreadCount> %1 </IOThreadCount>
+ <RetiredIOThreads> %2 </RetiredIOThreads>
+ <ClrInstanceID> %3 </ClrInstanceID>
+ </IOThread_V1>
+ </UserData>
+ </template>
+
+ <template tid="ClrThreadPoolSuspend">
+ <data name="ClrThreadID" inType="win:UInt32" />
+ <data name="CpuUtilization" inType="win:UInt32" />
+
+ <UserData>
+ <CLRThreadPoolSuspend xmlns="myNs">
+ <ClrThreadID> %1 </ClrThreadID>
+ <CpuUtilization> %2 </CpuUtilization>
+ </CLRThreadPoolSuspend>
+ </UserData>
+ </template>
+
+ <template tid="ThreadPoolWorkerThread">
+ <data name="ActiveWorkerThreadCount" inType="win:UInt32" />
+ <data name="RetiredWorkerThreadCount" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <ThreadPoolWorkerThread xmlns="myNs">
+ <ActiveWorkerThreadCount> %1 </ActiveWorkerThreadCount>
+ <RetiredWorkerThreadCount> %2 </RetiredWorkerThreadCount>
+ <ClrInstanceID> %3 </ClrInstanceID>
+ </ThreadPoolWorkerThread>
+ </UserData>
+ </template>
+
+ <template tid="ThreadPoolWorkerThreadAdjustmentSample">
+ <data name="Throughput" inType="win:Double" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <ThreadPoolWorkerThreadAdjustmentSample xmlns="myNs">
+ <Throughput> %1 </Throughput>
+ <ClrInstanceID> %2 </ClrInstanceID>
+ </ThreadPoolWorkerThreadAdjustmentSample>
+ </UserData>
+ </template>
+
+ <template tid="ThreadPoolWorkerThreadAdjustmentAdjustment">
+ <data name="AverageThroughput" inType="win:Double" />
+ <data name="NewWorkerThreadCount" inType="win:UInt32" />
+ <data name="Reason" inType="win:UInt32" map="ThreadAdjustmentReasonMap"/>
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <ThreadPoolWorkerThreadAdjustmentAdjustment xmlns="myNs">
+ <AverageThroughput> %1 </AverageThroughput>
+ <NewWorkerThreadCount> %2 </NewWorkerThreadCount>
+ <Reason> %3 </Reason>
+ <ClrInstanceID> %4 </ClrInstanceID>
+ </ThreadPoolWorkerThreadAdjustmentAdjustment>
+ </UserData>
+ </template>
+
+ <template tid="ThreadPoolWorkerThreadAdjustmentStats">
+ <data name="Duration" inType="win:Double" />
+ <data name="Throughput" inType="win:Double" />
+ <data name="ThreadWave" inType="win:Double"/>
+ <data name="ThroughputWave" inType="win:Double"/>
+ <data name="ThroughputErrorEstimate" inType="win:Double"/>
+ <data name="AverageThroughputErrorEstimate" inType="win:Double"/>
+ <data name="ThroughputRatio" inType="win:Double" />
+ <data name="Confidence" inType="win:Double" />
+ <data name="NewControlSetting" inType="win:Double" />
+ <data name="NewThreadWaveMagnitude" inType="win:UInt16" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <ThreadPoolWorkerThreadAdjustmentStats xmlns="myNs">
+ <Duration> %1 </Duration>
+ <Throughput> %2 </Throughput>
+ <ThreadWave> %3 </ThreadWave>
+ <ThroughputWave> %4 </ThroughputWave>
+ <ThroughputErrorEstimate> %5 </ThroughputErrorEstimate>
+ <AverageThroughputErrorEstimate> %6 </AverageThroughputErrorEstimate>
+ <ThroughputRatio> %7 </ThroughputRatio>
+ <Confidence> %8 </Confidence>
+ <NewControlSetting> %9 </NewControlSetting>
+ <NewThreadWaveMagnitude> %10 </NewThreadWaveMagnitude>
+ <ClrInstanceID> %11 </ClrInstanceID>
+ </ThreadPoolWorkerThreadAdjustmentStats>
+ </UserData>
+ </template>
+
+ <template tid="ThreadPoolWork">
+ <data name="WorkID" inType="win:Pointer" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ </template>
+
+ <template tid="ThreadPoolIOWork">
+ <data name="NativeOverlapped" inType="win:Pointer" />
+ <data name="Overlapped" inType="win:Pointer" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ </template>
+
+ <template tid="ThreadPoolIOWorkEnqueue">
+ <data name="NativeOverlapped" inType="win:Pointer" />
+ <data name="Overlapped" inType="win:Pointer" />
+ <data name="MultiDequeues" inType="win:Boolean" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ </template>
+
+ <template tid="ThreadPoolWorkingThreadCount">
+ <data name="Count" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <ThreadPoolWorkingThreadCount xmlns="myNs">
+ <Count> %1 </Count>
+ <ClrInstanceID> %2 </ClrInstanceID>
+ </ThreadPoolWorkingThreadCount>
+ </UserData>
+ </template>
+
+ <template tid="ThreadStartWork">
+ <data name="ID" inType="win:Pointer" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ </template>
+
+ <template tid="Exception">
+ <data name="ExceptionType" inType="win:UnicodeString" />
+ <data name="ExceptionMessage" inType="win:UnicodeString" />
+ <data name="ExceptionEIP" inType="win:Pointer" />
+ <data name="ExceptionHRESULT" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="ExceptionFlags" inType="win:UInt16" map="ExceptionThrownFlagsMap" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <Exception xmlns="myNs">
+ <ExceptionType> %1 </ExceptionType>
+ <ExceptionMessage> %2 </ExceptionMessage>
+ <ExceptionEIP> %3 </ExceptionEIP>
+ <ExceptionHRESULT> %4 </ExceptionHRESULT>
+ <ExceptionFlags> %5 </ExceptionFlags>
+ <ClrInstanceID> %6 </ClrInstanceID>
+ </Exception>
+ </UserData>
+ </template>
+
+ <template tid="Contention">
+ <data name="ContentionFlags" inType="win:UInt8" map="ContentionFlagsMap" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <Contention xmlns="myNs">
+ <ContentionFlags> %1 </ContentionFlags>
+ <ClrInstanceID> %2 </ClrInstanceID>
+ </Contention>
+ </UserData>
+ </template>
+
+ <template tid="DomainModuleLoadUnload">
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AssemblyID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AppDomainID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleFlags" inType="win:UInt32" map="ModuleFlagsMap" />
+ <data name="Reserved1" inType="win:UInt32" />
+ <data name="ModuleILPath" inType="win:UnicodeString" />
+ <data name="ModuleNativePath" inType="win:UnicodeString" />
+ <UserData>
+ <DomainModuleLoadUnload xmlns="myNs">
+ <ModuleID> %1 </ModuleID>
+ <AssemblyID> %2 </AssemblyID>
+ <AppDomainID> %3 </AppDomainID>
+ <ModuleFlags> %4 </ModuleFlags>
+ <ModuleILPath> %5 </ModuleILPath>
+ <ModuleNativePath> %6 </ModuleNativePath>
+ </DomainModuleLoadUnload>
+ </UserData>
+ </template>
+
+ <template tid="DomainModuleLoadUnload_V1">
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AssemblyID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AppDomainID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleFlags" inType="win:UInt32" map="ModuleFlagsMap" />
+ <data name="Reserved1" inType="win:UInt32" />
+ <data name="ModuleILPath" inType="win:UnicodeString" />
+ <data name="ModuleNativePath" inType="win:UnicodeString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <DomainModuleLoadUnload_V1 xmlns="myNs">
+ <ModuleID> %1 </ModuleID>
+ <AssemblyID> %2 </AssemblyID>
+ <AppDomainID> %3 </AppDomainID>
+ <ModuleFlags> %4 </ModuleFlags>
+ <ModuleILPath> %5 </ModuleILPath>
+ <ModuleNativePath> %6 </ModuleNativePath>
+ <ClrInstanceID> %7 </ClrInstanceID>
+ </DomainModuleLoadUnload_V1>
+ </UserData>
+ </template>
+
+ <template tid="ModuleLoadUnload">
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AssemblyID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleFlags" inType="win:UInt32" map="ModuleFlagsMap" />
+ <data name="Reserved1" inType="win:UInt32" />
+ <data name="ModuleILPath" inType="win:UnicodeString" />
+ <data name="ModuleNativePath" inType="win:UnicodeString" />
+ <UserData>
+ <ModuleLoadUnload xmlns="myNs">
+ <ModuleID> %1 </ModuleID>
+ <AssemblyID> %2 </AssemblyID>
+ <ModuleFlags> %3 </ModuleFlags>
+ <ModuleILPath> %4 </ModuleILPath>
+ <ModuleNativePath> %5 </ModuleNativePath>
+ </ModuleLoadUnload>
+ </UserData>
+ </template>
+
+ <template tid="ModuleLoadUnload_V1">
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AssemblyID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleFlags" inType="win:UInt32" map="ModuleFlagsMap" />
+ <data name="Reserved1" inType="win:UInt32" />
+ <data name="ModuleILPath" inType="win:UnicodeString" />
+ <data name="ModuleNativePath" inType="win:UnicodeString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <ModuleLoadUnload_V1 xmlns="myNs">
+ <ModuleID> %1 </ModuleID>
+ <AssemblyID> %2 </AssemblyID>
+ <ModuleFlags> %3 </ModuleFlags>
+ <ModuleILPath> %4 </ModuleILPath>
+ <ModuleNativePath> %5 </ModuleNativePath>
+ <ClrInstanceID> %6 </ClrInstanceID>
+ </ModuleLoadUnload_V1>
+ </UserData>
+ </template>
+
+ <template tid="ModuleLoadUnload_V2">
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AssemblyID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleFlags" inType="win:UInt32" map="ModuleFlagsMap" />
+ <data name="Reserved1" inType="win:UInt32" />
+ <data name="ModuleILPath" inType="win:UnicodeString" />
+ <data name="ModuleNativePath" inType="win:UnicodeString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="ManagedPdbSignature" inType="win:GUID" />
+ <data name="ManagedPdbAge" inType="win:UInt32" />
+ <data name="ManagedPdbBuildPath" inType="win:UnicodeString" />
+ <data name="NativePdbSignature" inType="win:GUID" />
+ <data name="NativePdbAge" inType="win:UInt32" />
+ <data name="NativePdbBuildPath" inType="win:UnicodeString" />
+ <UserData>
+ <ModuleLoadUnload_V2 xmlns="myNs">
+ <ModuleID> %1 </ModuleID>
+ <AssemblyID> %2 </AssemblyID>
+ <ModuleFlags> %3 </ModuleFlags>
+ <ModuleILPath> %4 </ModuleILPath>
+ <ModuleNativePath> %5 </ModuleNativePath>
+ <ClrInstanceID> %6 </ClrInstanceID>
+ <ManagedPdbSignature> %7 </ManagedPdbSignature>
+ <ManagedPdbAge> %8 </ManagedPdbAge>
+ <ManagedPdbBuildPath> %9 </ManagedPdbBuildPath>
+ <NativePdbSignature> %10 </NativePdbSignature>
+ <NativePdbAge> %11 </NativePdbAge>
+ <NativePdbBuildPath> %12 </NativePdbBuildPath>
+ </ModuleLoadUnload_V2>
+ </UserData>
+ </template>
+
+ <template tid="AssemblyLoadUnload">
+ <data name="AssemblyID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AppDomainID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AssemblyFlags" inType="win:UInt32" map="AssemblyFlagsMap" />
+ <data name="FullyQualifiedAssemblyName" inType="win:UnicodeString" />
+ <UserData>
+ <AssemblyLoadUnload xmlns="myNs">
+ <AssemblyID> %1 </AssemblyID>
+ <AppDomainID> %2 </AppDomainID>
+ <AssemblyFlags> %3 </AssemblyFlags>
+ <FullyQualifiedAssemblyName> %4 </FullyQualifiedAssemblyName>
+ </AssemblyLoadUnload>
+ </UserData>
+ </template>
+
+ <template tid="AssemblyLoadUnload_V1">
+ <data name="AssemblyID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AppDomainID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="BindingID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AssemblyFlags" inType="win:UInt32" map="AssemblyFlagsMap" />
+ <data name="FullyQualifiedAssemblyName" inType="win:UnicodeString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <AssemblyLoadUnload_V1 xmlns="myNs">
+ <AssemblyID> %1 </AssemblyID>
+ <AppDomainID> %2 </AppDomainID>
+ <BindingID> %3 </BindingID>
+ <AssemblyFlags> %4 </AssemblyFlags>
+ <FullyQualifiedAssemblyName> %5 </FullyQualifiedAssemblyName>
+ <ClrInstanceID> %6 </ClrInstanceID>
+ </AssemblyLoadUnload_V1>
+ </UserData>
+ </template>
+
+ <template tid="AppDomainLoadUnload">
+ <data name="AppDomainID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AppDomainFlags" inType="win:UInt32" map="AppDomainFlagsMap" />
+ <data name="AppDomainName" inType="win:UnicodeString" />
+ <UserData>
+ <AppDomainLoadUnload xmlns="myNs">
+ <AppDomainID> %1 </AppDomainID>
+ <AppDomainFlags> %2 </AppDomainFlags>
+ <AppDomainName> %3 </AppDomainName>
+ </AppDomainLoadUnload>
+ </UserData>
+ </template>
+
+ <template tid="AppDomainLoadUnload_V1">
+ <data name="AppDomainID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AppDomainFlags" inType="win:UInt32" map="AppDomainFlagsMap" />
+ <data name="AppDomainName" inType="win:UnicodeString" />
+ <data name="AppDomainIndex" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <AppDomainLoadUnload_V1 xmlns="myNs">
+ <AppDomainID> %1 </AppDomainID>
+ <AppDomainFlags> %2 </AppDomainFlags>
+ <AppDomainName> %3 </AppDomainName>
+ <AppDomainIndex> %4 </AppDomainIndex>
+ <ClrInstanceID> %5 </ClrInstanceID>
+ </AppDomainLoadUnload_V1>
+ </UserData>
+ </template>
+
+ <template tid="MethodLoadUnload">
+ <data name="MethodID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodStartAddress" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodSize" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodToken" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodFlags" inType="win:UInt32" map="MethodFlagsMap" />
+
+ <UserData>
+ <MethodLoadUnload xmlns="myNs">
+ <MethodID> %1 </MethodID>
+ <ModuleID> %2 </ModuleID>
+ <MethodStartAddress> %3 </MethodStartAddress>
+ <MethodSize> %4 </MethodSize>
+ <MethodToken> %5 </MethodToken>
+ <MethodFlags> %6 </MethodFlags>
+ </MethodLoadUnload>
+ </UserData>
+ </template>
+
+ <template tid="MethodLoadUnload_V1">
+ <data name="MethodID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodStartAddress" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodSize" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodToken" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodFlags" inType="win:UInt32" map="MethodFlagsMap" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <MethodLoadUnload_V1 xmlns="myNs">
+ <MethodID> %1 </MethodID>
+ <ModuleID> %2 </ModuleID>
+ <MethodStartAddress> %3 </MethodStartAddress>
+ <MethodSize> %4 </MethodSize>
+ <MethodToken> %5 </MethodToken>
+ <MethodFlags> %6 </MethodFlags>
+ <ClrInstanceID> %7 </ClrInstanceID>
+ </MethodLoadUnload_V1>
+ </UserData>
+ </template>
+
+ <template tid="MethodLoadUnload_V2">
+ <data name="MethodID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodStartAddress" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodSize" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodToken" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodFlags" inType="win:UInt32" map="MethodFlagsMap" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="ReJITID" inType="win:UInt64" outType="win:HexInt64" />
+
+ <UserData>
+ <MethodLoadUnload_V2 xmlns="myNs">
+ <MethodID> %1 </MethodID>
+ <ModuleID> %2 </ModuleID>
+ <MethodStartAddress> %3 </MethodStartAddress>
+ <MethodSize> %4 </MethodSize>
+ <MethodToken> %5 </MethodToken>
+ <MethodFlags> %6 </MethodFlags>
+ <ClrInstanceID> %7 </ClrInstanceID>
+ <ReJITID> %8 </ReJITID>
+ </MethodLoadUnload_V2>
+ </UserData>
+ </template>
+
+ <template tid="MethodLoadUnloadVerbose">
+ <data name="MethodID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodStartAddress" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodSize" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodToken" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodFlags" inType="win:UInt32" map="MethodFlagsMap" />
+ <data name="MethodNamespace" inType="win:UnicodeString" />
+ <data name="MethodName" inType="win:UnicodeString" />
+ <data name="MethodSignature" inType="win:UnicodeString" />
+ <UserData>
+ <MethodLoadUnloadVerbose xmlns="myNs">
+ <MethodID> %1 </MethodID>
+ <ModuleID> %2 </ModuleID>
+ <MethodStartAddress> %3 </MethodStartAddress>
+ <MethodSize> %4 </MethodSize>
+ <MethodToken> %5 </MethodToken>
+ <MethodFlags> %6 </MethodFlags>
+ <MethodNamespace> %7 </MethodNamespace>
+ <MethodName> %8 </MethodName>
+ <MethodSignature> %9 </MethodSignature>
+ </MethodLoadUnloadVerbose>
+ </UserData>
+ </template>
+
+ <template tid="MethodLoadUnloadVerbose_V1">
+ <data name="MethodID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodStartAddress" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodSize" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodToken" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodFlags" inType="win:UInt32" map="MethodFlagsMap" />
+ <data name="MethodNamespace" inType="win:UnicodeString" />
+ <data name="MethodName" inType="win:UnicodeString" />
+ <data name="MethodSignature" inType="win:UnicodeString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <MethodLoadUnloadVerbose_V1 xmlns="myNs">
+ <MethodID> %1 </MethodID>
+ <ModuleID> %2 </ModuleID>
+ <MethodStartAddress> %3 </MethodStartAddress>
+ <MethodSize> %4 </MethodSize>
+ <MethodToken> %5 </MethodToken>
+ <MethodFlags> %6 </MethodFlags>
+ <MethodNamespace> %7 </MethodNamespace>
+ <MethodName> %8 </MethodName>
+ <MethodSignature> %9 </MethodSignature>
+ <ClrInstanceID> %10 </ClrInstanceID>
+ </MethodLoadUnloadVerbose_V1>
+ </UserData>
+ </template>
+
+ <template tid="MethodLoadUnloadVerbose_V2">
+ <data name="MethodID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodStartAddress" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodSize" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodToken" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodFlags" inType="win:UInt32" map="MethodFlagsMap" />
+ <data name="MethodNamespace" inType="win:UnicodeString" />
+ <data name="MethodName" inType="win:UnicodeString" />
+ <data name="MethodSignature" inType="win:UnicodeString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="ReJITID" inType="win:UInt64" outType="win:HexInt64" />
+ <UserData>
+ <MethodLoadUnloadVerbose_V2 xmlns="myNs">
+ <MethodID> %1 </MethodID>
+ <ModuleID> %2 </ModuleID>
+ <MethodStartAddress> %3 </MethodStartAddress>
+ <MethodSize> %4 </MethodSize>
+ <MethodToken> %5 </MethodToken>
+ <MethodFlags> %6 </MethodFlags>
+ <MethodNamespace> %7 </MethodNamespace>
+ <MethodName> %8 </MethodName>
+ <MethodSignature> %9 </MethodSignature>
+ <ClrInstanceID> %10 </ClrInstanceID>
+ <ReJITID> %11 </ReJITID>
+ </MethodLoadUnloadVerbose_V2>
+ </UserData>
+ </template>
+
+ <template tid="MethodJittingStarted">
+ <data name="MethodID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodToken" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodILSize" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodNamespace" inType="win:UnicodeString" />
+ <data name="MethodName" inType="win:UnicodeString" />
+ <data name="MethodSignature" inType="win:UnicodeString" />
+
+ <UserData>
+ <MethodJittingStarted xmlns="myNs">
+ <MethodID> %1 </MethodID>
+ <ModuleID> %2 </ModuleID>
+ <MethodToken> %3 </MethodToken>
+ <MethodILSize> %4 </MethodILSize>
+ <MethodNamespace> %5 </MethodNamespace>
+ <MethodName> %6 </MethodName>
+ <MethodSignature> %7 </MethodSignature>
+ </MethodJittingStarted>
+ </UserData>
+ </template>
+
+ <template tid="MethodJittingStarted_V1">
+ <data name="MethodID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodToken" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodILSize" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodNamespace" inType="win:UnicodeString" />
+ <data name="MethodName" inType="win:UnicodeString" />
+ <data name="MethodSignature" inType="win:UnicodeString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <MethodJittingStarted_V1 xmlns="myNs">
+ <MethodID> %1 </MethodID>
+ <ModuleID> %2 </ModuleID>
+ <MethodToken> %3 </MethodToken>
+ <MethodILSize> %4 </MethodILSize>
+ <MethodNamespace> %5 </MethodNamespace>
+ <MethodName> %6 </MethodName>
+ <MethodSignature> %7 </MethodSignature>
+ <ClrInstanceID> %8 </ClrInstanceID>
+ </MethodJittingStarted_V1>
+ </UserData>
+ </template>
+
+ <template tid="MethodJitInliningSucceeded">
+ <data name="MethodBeingCompiledNamespace" inType="win:UnicodeString" />
+ <data name="MethodBeingCompiledName" inType="win:UnicodeString" />
+ <data name="MethodBeingCompiledNameSignature" inType="win:UnicodeString" />
+ <data name="InlinerNamespace" inType="win:UnicodeString" />
+ <data name="InlinerName" inType="win:UnicodeString" />
+ <data name="InlinerNameSignature" inType="win:UnicodeString" />
+ <data name="InlineeNamespace" inType="win:UnicodeString" />
+ <data name="InlineeName" inType="win:UnicodeString" />
+ <data name="InlineeNameSignature" inType="win:UnicodeString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <MethodJitInliningSucceeded xmlns="myNs">
+ <MethodBeingCompiledNamespace> %1 </MethodBeingCompiledNamespace>
+ <MethodBeingCompiledName> %2 </MethodBeingCompiledName>
+ <MethodBeingCompiledNameSignature> %3 </MethodBeingCompiledNameSignature>
+ <InlinerNamespace> %4 </InlinerNamespace>
+ <InlinerName> %5 </InlinerName>
+ <InlinerNameSignature> %6 </InlinerNameSignature>
+ <InlineeNamespace> %7 </InlineeNamespace>
+ <InlineeName> %8 </InlineeName>
+ <InlineeNameSignature> %9 </InlineeNameSignature>
+ <ClrInstanceID> %10 </ClrInstanceID>
+ </MethodJitInliningSucceeded>
+ </UserData>
+ </template>
+
+ <template tid="MethodJitInliningFailed">
+ <data name="MethodBeingCompiledNamespace" inType="win:UnicodeString" />
+ <data name="MethodBeingCompiledName" inType="win:UnicodeString" />
+ <data name="MethodBeingCompiledNameSignature" inType="win:UnicodeString" />
+ <data name="InlinerNamespace" inType="win:UnicodeString" />
+ <data name="InlinerName" inType="win:UnicodeString" />
+ <data name="InlinerNameSignature" inType="win:UnicodeString" />
+ <data name="InlineeNamespace" inType="win:UnicodeString" />
+ <data name="InlineeName" inType="win:UnicodeString" />
+ <data name="InlineeNameSignature" inType="win:UnicodeString" />
+ <data name="FailAlways" inType="win:Boolean" />
+ <data name="FailReason" inType="win:AnsiString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <MethodJitInliningFailed xmlns="myNs">
+ <MethodBeingCompiledNamespace> %1 </MethodBeingCompiledNamespace>
+ <MethodBeingCompiledName> %2 </MethodBeingCompiledName>
+ <MethodBeingCompiledNameSignature> %3 </MethodBeingCompiledNameSignature>
+ <InlinerNamespace> %4 </InlinerNamespace>
+ <InlinerName> %5 </InlinerName>
+ <InlinerNameSignature> %6 </InlinerNameSignature>
+ <InlineeNamespace> %7 </InlineeNamespace>
+ <InlineeName> %8 </InlineeName>
+ <InlineeNameSignature> %9 </InlineeNameSignature>
+ <FailAlways> %10 </FailAlways>
+ <FailReason> %11 </FailReason>
+ <ClrInstanceID> %12 </ClrInstanceID>
+ </MethodJitInliningFailed>
+ </UserData>
+ </template>
+
+ <template tid="MethodJitTailCallSucceeded">
+ <data name="MethodBeingCompiledNamespace" inType="win:UnicodeString" />
+ <data name="MethodBeingCompiledName" inType="win:UnicodeString" />
+ <data name="MethodBeingCompiledNameSignature" inType="win:UnicodeString" />
+ <data name="CallerNamespace" inType="win:UnicodeString" />
+ <data name="CallerName" inType="win:UnicodeString" />
+ <data name="CallerNameSignature" inType="win:UnicodeString" />
+ <data name="CalleeNamespace" inType="win:UnicodeString" />
+ <data name="CalleeName" inType="win:UnicodeString" />
+ <data name="CalleeNameSignature" inType="win:UnicodeString" />
+ <data name="TailPrefix" inType="win:Boolean" />
+ <data name="TailCallType" inType="win:UInt32" map="TailCallTypeMap" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <MethodJitTailCallSucceeded xmlns="myNs">
+ <MethodBeingCompiledNamespace> %1 </MethodBeingCompiledNamespace>
+ <MethodBeingCompiledName> %2 </MethodBeingCompiledName>
+ <MethodBeingCompiledNameSignature> %3 </MethodBeingCompiledNameSignature>
+ <CallerNamespace> %4 </CallerNamespace>
+ <CallerName> %5 </CallerName>
+ <CallerNameSignature> %6 </CallerNameSignature>
+ <CalleeNamespace> %7 </CalleeNamespace>
+ <CalleeName> %8 </CalleeName>
+ <CalleeNameSignature> %9 </CalleeNameSignature>
+ <TailPrefix> %10 </TailPrefix>
+ <TailCallType> %11 </TailCallType>
+ <ClrInstanceID> %12 </ClrInstanceID>
+ </MethodJitTailCallSucceeded>
+ </UserData>
+ </template>
+
+ <template tid="MethodJitTailCallFailed">
+ <data name="MethodBeingCompiledNamespace" inType="win:UnicodeString" />
+ <data name="MethodBeingCompiledName" inType="win:UnicodeString" />
+ <data name="MethodBeingCompiledNameSignature" inType="win:UnicodeString" />
+ <data name="CallerNamespace" inType="win:UnicodeString" />
+ <data name="CallerName" inType="win:UnicodeString" />
+ <data name="CallerNameSignature" inType="win:UnicodeString" />
+ <data name="CalleeNamespace" inType="win:UnicodeString" />
+ <data name="CalleeName" inType="win:UnicodeString" />
+ <data name="CalleeNameSignature" inType="win:UnicodeString" />
+ <data name="TailPrefix" inType="win:Boolean" />
+ <data name="FailReason" inType="win:AnsiString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <MethodJitTailCallFailed xmlns="myNs">
+ <MethodBeingCompiledNamespace> %1 </MethodBeingCompiledNamespace>
+ <MethodBeingCompiledName> %2 </MethodBeingCompiledName>
+ <MethodBeingCompiledNameSignature> %3 </MethodBeingCompiledNameSignature>
+ <CallerNamespace> %4 </CallerNamespace>
+ <CallerName> %5 </CallerName>
+ <CallerNameSignature> %6 </CallerNameSignature>
+ <CalleeNamespace> %7 </CalleeNamespace>
+ <CalleeName> %8 </CalleeName>
+ <CalleeNameSignature> %9 </CalleeNameSignature>
+ <TailPrefix> %10 </TailPrefix>
+ <FailReason> %11 </FailReason>
+ <ClrInstanceID> %12 </ClrInstanceID>
+ </MethodJitTailCallFailed>
+ </UserData>
+ </template>
+
+ <template tid="MethodILToNativeMap">
+ <data name="MethodID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ReJITID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodExtent" inType="win:UInt8" />
+ <data name="CountOfMapEntries" inType="win:UInt16" />
+ <data name="ILOffsets" count="CountOfMapEntries" inType="win:UInt32" />
+ <data name="NativeOffsets" count="CountOfMapEntries" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <MethodILToNativeMap xmlns="myNs">
+ <MethodID> %1 </MethodID>
+ <ReJITID> %2 </ReJITID>
+ <MethodExtent> %3 </MethodExtent>
+ <CountOfMapEntries> %4 </CountOfMapEntries>
+ <ClrInstanceID> %5 </ClrInstanceID>
+ </MethodILToNativeMap>
+ </UserData>
+ </template>
+
+ <template tid="ClrStackWalk">
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="Reserved1" inType="win:UInt8" />
+ <data name="Reserved2" inType="win:UInt8" />
+ <data name="FrameCount" inType="win:UInt32" />
+ <data name="Stack" count="2" inType="win:Pointer" />
+ </template>
+
+ <template tid="AppDomainMemAllocated">
+ <data name="AppDomainID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="Allocated" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <AppDomainMemAllocated xmlns="myNs">
+ <AppDomainID> %1 </AppDomainID>
+ <Allocated> %2 </Allocated>
+ <ClrInstanceID> %3 </ClrInstanceID>
+ </AppDomainMemAllocated>
+ </UserData>
+ </template>
+
+ <template tid="AppDomainMemSurvived">
+ <data name="AppDomainID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="Survived" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ProcessSurvived" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <AppDomainMemSurvived xmlns="myNs">
+ <AppDomainID> %1 </AppDomainID>
+ <Survived> %2 </Survived>
+ <ProcessSurvived> %3 </ProcessSurvived>
+ <ClrInstanceID> %4 </ClrInstanceID>
+ </AppDomainMemSurvived>
+ </UserData>
+ </template>
+
+ <template tid="ThreadCreated">
+ <data name="ManagedThreadID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AppDomainID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="Flags" inType="win:UInt32" map="ThreadFlagsMap" />
+ <data name="ManagedThreadIndex" inType="win:UInt32" />
+ <data name="OSThreadID" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <ThreadCreated xmlns="myNs">
+ <ManagedThreadID> %1 </ManagedThreadID>
+ <AppDomainID> %2 </AppDomainID>
+ <Flags> %3 </Flags>
+ <ManagedThreadIndex> %4 </ManagedThreadIndex>
+ <OSThreadID> %5 </OSThreadID>
+ <ClrInstanceID> %6 </ClrInstanceID>
+ </ThreadCreated>
+ </UserData>
+ </template>
+
+ <template tid="ThreadTerminatedOrTransition">
+ <data name="ManagedThreadID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AppDomainID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <ThreadTerminatedOrTransition xmlns="myNs">
+ <ManagedThreadID> %1 </ManagedThreadID>
+ <AppDomainID> %2 </AppDomainID>
+ <ClrInstanceID> %3 </ClrInstanceID>
+ </ThreadTerminatedOrTransition>
+ </UserData>
+ </template>
+
+ <template tid="ILStubGenerated">
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="StubMethodID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="StubFlags" inType="win:UInt32" map="ILStubGeneratedFlagsMap" />
+ <data name="ManagedInteropMethodToken" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="ManagedInteropMethodNamespace" inType="win:UnicodeString" />
+ <data name="ManagedInteropMethodName" inType="win:UnicodeString" />
+ <data name="ManagedInteropMethodSignature" inType="win:UnicodeString" />
+ <data name="NativeMethodSignature" inType="win:UnicodeString" />
+ <data name="StubMethodSignature" inType="win:UnicodeString" />
+ <data name="StubMethodILCode" inType="win:UnicodeString" />
+ <UserData>
+ <ILStubGenerated xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <ModuleID> %2 </ModuleID>
+ <StubMethodID> %3 </StubMethodID>
+ <StubFlags> %4 </StubFlags>
+ <ManagedInteropMethodToken> %5 </ManagedInteropMethodToken>
+ <ManagedInteropMethodNamespace> %6 </ManagedInteropMethodNamespace>
+ <ManagedInteropMethodName> %7 </ManagedInteropMethodName>
+ <ManagedInteropMethodSignature> %8 </ManagedInteropMethodSignature>
+ <NativeMethodSignature> %9 </NativeMethodSignature>
+ <StubMethodSignature> %10 </StubMethodSignature>
+ <StubMethodILCode> %11 </StubMethodILCode>
+ </ILStubGenerated>
+ </UserData>
+ </template>
+
+ <template tid="ILStubCacheHit">
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="StubMethodID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ManagedInteropMethodToken" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="ManagedInteropMethodNamespace" inType="win:UnicodeString" />
+ <data name="ManagedInteropMethodName" inType="win:UnicodeString" />
+ <data name="ManagedInteropMethodSignature" inType="win:UnicodeString" />
+ <UserData>
+ <ILStubCacheHit xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <ModuleID> %2 </ModuleID>
+ <StubMethodID> %3 </StubMethodID>
+ <ManagedInteropMethodToken> %4 </ManagedInteropMethodToken>
+ <ManagedInteropMethodNamespace> %5 </ManagedInteropMethodNamespace>
+ <ManagedInteropMethodName> %6 </ManagedInteropMethodName>
+ <ManagedInteropMethodSignature> %7 </ManagedInteropMethodSignature>
+ </ILStubCacheHit>
+ </UserData>
+ </template>
+
+ <template tid="ModuleRange">
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64"/>
+ <data name="RangeBegin" inType="win:UInt32" outType="win:HexInt32"/>
+ <data name="RangeSize" inType="win:UInt32" outType="win:HexInt32"/>
+ <data name="RangeType" map="ModuleRangeTypeMap" inType="win:UInt8"/>
+ <UserData>
+ <ModuleRange xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <ModuleID> %2 </ModuleID>
+ <RangeBegin> %3 </RangeBegin>
+ <RangeSize> %4 </RangeSize>
+ <RangeType> %5 </RangeType>
+ </ModuleRange>
+ </UserData>
+ </template>
+
+ <template tid="BulkType">
+ <data name="Count" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <struct name="Values" count="Count" >
+ <data name="TypeID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="TypeNameID" inType="win:UInt32" />
+ <data name="Flags" inType="win:UInt32" map="TypeFlagsMap"/>
+ <data name="CorElementType" inType="win:UInt8" />
+ <data name="Name" inType="win:UnicodeString" />
+ <data name="TypeParameterCount" inType="win:UInt32" />
+ <data name="TypeParameters" count="TypeParameterCount" inType="win:UInt64" outType="win:HexInt64" />
+ </struct>
+ <UserData>
+ <Type xmlns="myNs">
+ <Count> %1 </Count>
+ <ClrInstanceID> %2 </ClrInstanceID>
+ </Type>
+ </UserData>
+ </template>
+
+ <template tid="GCBulkRootEdge">
+ <data name="Index" inType="win:UInt32" />
+ <data name="Count" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <struct name="Values" count="Count" >
+ <data name="RootedNodeAddress" inType="win:Pointer" />
+ <data name="GCRootKind" inType="win:UInt8" map="GCRootKindMap" />
+ <data name="GCRootFlag" inType="win:UInt32" map="GCRootFlagsMap" />
+ <data name="GCRootID" inType="win:Pointer" />
+ </struct>
+ <UserData>
+ <GCBulkRootEdge xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <Index> %2 </Index>
+ <Count> %3 </Count>
+ </GCBulkRootEdge>
+ </UserData>
+ </template>
+
+ <template tid="GCBulkRootCCW">
+ <data name="Count" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <struct name="Values" count="Count" >
+ <data name="GCRootID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ObjectID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="TypeID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="IUnknown" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="RefCount" inType="win:UInt32"/>
+ <data name="PeggedRefCount" inType="win:UInt32"/>
+ <data name="Flags" inType="win:UInt32" map="GCRootCCWFlagsMap"/>
+ </struct>
+ <UserData>
+ <Type xmlns="myNs">
+ <Count> %1 </Count>
+ <ClrInstanceID> %2 </ClrInstanceID>
+ </Type>
+ </UserData>
+ </template>
+
+ <template tid="GCBulkRCW">
+ <data name="Count" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <struct name="Values" count="Count" >
+ <data name="ObjectID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="TypeID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="IUnknown" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="VTable" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="RefCount" inType="win:UInt32"/>
+ <data name="Flags" inType="win:UInt32"/>
+ </struct>
+ <UserData>
+ <Type xmlns="myNs">
+ <Count> %1 </Count>
+ <ClrInstanceID> %2 </ClrInstanceID>
+ </Type>
+ </UserData>
+ </template>
+
+ <template tid="GCBulkRootStaticVar">
+ <data name="Count" inType="win:UInt32" />
+ <data name="AppDomainID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <struct name="Values" count="Count" >
+ <data name="GCRootID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ObjectID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="TypeID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="Flags" inType="win:UInt32" map="GCRootStaticVarFlagsMap" />
+ <data name="FieldName" inType="win:UnicodeString" />
+ </struct>
+ <UserData>
+ <Type xmlns="myNs">
+ <Count> %1 </Count>
+ <ClrInstanceID> %2 </ClrInstanceID>
+ </Type>
+ </UserData>
+ </template>
+
+ <template tid="GCBulkRootConditionalWeakTableElementEdge">
+ <data name="Index" inType="win:UInt32" />
+ <data name="Count" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <struct name="Values" count="Count" >
+ <data name="GCKeyNodeID" inType="win:Pointer" />
+ <data name="GCValueNodeID" inType="win:Pointer" />
+ <data name="GCRootID" inType="win:Pointer" />
+ </struct>
+ <UserData>
+ <GCBulkRootConditionalWeakTableElementEdge xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <Index> %2 </Index>
+ <Count> %3 </Count>
+ </GCBulkRootConditionalWeakTableElementEdge>
+ </UserData>
+ </template>
+
+ <template tid="GCBulkNode">
+ <data name="Index" inType="win:UInt32" />
+ <data name="Count" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <struct name="Values" count="Count" >
+ <data name="Address" inType="win:Pointer" />
+ <data name="Size" inType="win:UInt64" />
+ <data name="TypeID" inType="win:UInt64" />
+ <data name="EdgeCount" inType="win:UInt64" />
+ </struct>
+ <UserData>
+ <GCBulkNode xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <Index> %2 </Index>
+ <Count> %3 </Count>
+ </GCBulkNode>
+ </UserData>
+ </template>
+
+ <template tid="GCBulkEdge">
+ <data name="Index" inType="win:UInt32" />
+ <data name="Count" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <struct name="Values" count="Count" >
+ <data name="Value" inType="win:Pointer" outType="win:HexInt64" />
+ <data name="ReferencingFieldID" inType="win:UInt32" />
+ </struct>
+ <UserData>
+ <GCBulkEdge xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <Index> %2 </Index>
+ <Count> %3 </Count>
+ </GCBulkEdge>
+ </UserData>
+ </template>
+
+ <template tid="GCSampledObjectAllocation">
+ <data name="Address" inType="win:Pointer" />
+ <data name="TypeID" inType="win:Pointer" />
+ <data name="ObjectCountForTypeSample" inType="win:UInt32" />
+ <data name="TotalSizeForTypeSample" inType="win:UInt64" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <GCSampledObjectAllocation xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <Address> %2 </Address>
+ <TypeID> %3 </TypeID>
+ <ObjectCountForTypeSample> %4 </ObjectCountForTypeSample>
+ <TotalSizeForTypeSample> %5 </TotalSizeForTypeSample>
+ </GCSampledObjectAllocation>
+ </UserData>
+ </template>
+
+ <template tid="GCBulkSurvivingObjectRanges">
+ <data name="Index" inType="win:UInt32" />
+ <data name="Count" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <struct name="Values" count="Count" >
+ <data name="RangeBase" inType="win:Pointer" outType="win:HexInt64" />
+ <data name="RangeLength" inType="win:UInt64" />
+ </struct>
+ <UserData>
+ <GCBulkSurvivingObjectRanges xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <Index> %2 </Index>
+ <Count> %3 </Count>
+ </GCBulkSurvivingObjectRanges>
+ </UserData>
+ </template>
+
+ <template tid="GCBulkMovedObjectRanges">
+ <data name="Index" inType="win:UInt32" />
+ <data name="Count" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <struct name="Values" count="Count" >
+ <data name="OldRangeBase" inType="win:Pointer" outType="win:HexInt64" />
+ <data name="NewRangeBase" inType="win:Pointer" outType="win:HexInt64" />
+ <data name="RangeLength" inType="win:UInt64" />
+ </struct>
+ <UserData>
+ <GCBulkMovedObjectRanges xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <Index> %2 </Index>
+ <Count> %3 </Count>
+ </GCBulkMovedObjectRanges>
+ </UserData>
+ </template>
+
+ <template tid="GCGenerationRange">
+ <data name="Generation" inType="win:UInt8" />
+ <data name="RangeStart" inType="win:Pointer" outType="win:HexInt64" />
+ <data name="RangeUsedLength" inType="win:UInt64" />
+ <data name="RangeReservedLength" inType="win:UInt64" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <GCGenerationRange xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <Generation> %2 </Generation>
+ <RangeStart> %3 </RangeStart>
+ <RangeUsedLength> %4 </RangeUsedLength>
+ <RangeReservedLength> %5 </RangeReservedLength>
+ </GCGenerationRange>
+ </UserData>
+ </template>
+
+ </templates>
+
+ <events>
+ <!-- CLR GC events, value reserved from 0 to 39 and 200 to 239 -->
+ <!-- Note the opcode's for GC events do include 0 to 9 for backward compatibility, even though
+ they don't mean what those predefined opcodes are supposed to mean -->
+ <event value="1" version="0" level="win:Informational" template="GCStart"
+ keywords="GCKeyword" opcode="win:Start"
+ task="GarbageCollection"
+ symbol="GCStart" message="$(string.RuntimePublisher.GCStartEventMessage)"/>
+
+ <event value="1" version="1" level="win:Informational" template="GCStart_V1"
+ keywords="GCKeyword" opcode="win:Start"
+ task="GarbageCollection"
+ symbol="GCStart_V1" message="$(string.RuntimePublisher.GCStart_V1EventMessage)"/>
+
+ <event value="1" version="2" level="win:Informational" template="GCStart_V2"
+ keywords="GCKeyword" opcode="win:Start"
+ task="GarbageCollection"
+ symbol="GCStart_V2" message="$(string.RuntimePublisher.GCStart_V2EventMessage)"/>
+
+ <event value="2" version="0" level="win:Informational" template="GCEnd"
+ keywords="GCKeyword" opcode="win:Stop"
+ task="GarbageCollection"
+ symbol="GCEnd" message="$(string.RuntimePublisher.GCEndEventMessage)"/>
+
+ <event value="2" version="1" level="win:Informational" template="GCEnd_V1"
+ keywords ="GCKeyword" opcode="win:Stop"
+ task="GarbageCollection"
+ symbol="GCEnd_V1" message="$(string.RuntimePublisher.GCEnd_V1EventMessage)"/>
+
+ <event value="3" version="0" level="win:Informational"
+ keywords ="GCKeyword" opcode="GCRestartEEEnd"
+ task="GarbageCollection"
+ symbol="GCRestartEEEnd" message="$(string.RuntimePublisher.GCRestartEEEndEventMessage)"/>
+
+ <event value="3" version="1" level="win:Informational" template="GCNoUserData"
+ keywords ="GCKeyword" opcode="GCRestartEEEnd"
+ task="GarbageCollection"
+ symbol="GCRestartEEEnd_V1" message="$(string.RuntimePublisher.GCRestartEEEnd_V1EventMessage)"/>
+
+ <event value="4" version="0" level="win:Informational" template="GCHeapStats"
+ keywords ="GCKeyword" opcode="GCHeapStats"
+ task="GarbageCollection"
+ symbol="GCHeapStats" message="$(string.RuntimePublisher.GCHeapStatsEventMessage)"/>
+
+ <event value="4" version="1" level="win:Informational" template="GCHeapStats_V1"
+ keywords ="GCKeyword" opcode="GCHeapStats"
+ task="GarbageCollection"
+ symbol="GCHeapStats_V1" message="$(string.RuntimePublisher.GCHeapStats_V1EventMessage)"/>
+
+ <event value="5" version="0" level="win:Informational" template="GCCreateSegment"
+ keywords ="GCKeyword" opcode="GCCreateSegment"
+ task="GarbageCollection"
+ symbol="GCCreateSegment" message="$(string.RuntimePublisher.GCCreateSegmentEventMessage)"/>
+
+ <event value="5" version="1" level="win:Informational" template="GCCreateSegment_V1"
+ keywords ="GCKeyword" opcode="GCCreateSegment"
+ task="GarbageCollection"
+ symbol="GCCreateSegment_V1" message="$(string.RuntimePublisher.GCCreateSegment_V1EventMessage)"/>
+
+ <event value="6" version="0" level="win:Informational" template="GCFreeSegment"
+ keywords ="GCKeyword" opcode="GCFreeSegment"
+ task="GarbageCollection"
+ symbol="GCFreeSegment" message="$(string.RuntimePublisher.GCFreeSegmentEventMessage)"/>
+
+ <event value="6" version="1" level="win:Informational" template="GCFreeSegment_V1"
+ keywords ="GCKeyword" opcode="GCFreeSegment"
+ task="GarbageCollection"
+ symbol="GCFreeSegment_V1" message="$(string.RuntimePublisher.GCFreeSegment_V1EventMessage)"/>
+
+ <event value="7" version="0" level="win:Informational"
+ keywords ="GCKeyword" opcode="GCRestartEEBegin"
+ task="GarbageCollection"
+ symbol="GCRestartEEBegin" message="$(string.RuntimePublisher.GCRestartEEBeginEventMessage)"/>
+
+ <event value="7" version="1" level="win:Informational" template="GCNoUserData"
+ keywords ="GCKeyword" opcode="GCRestartEEBegin"
+ task="GarbageCollection"
+ symbol="GCRestartEEBegin_V1" message="$(string.RuntimePublisher.GCRestartEEBegin_V1EventMessage)"/>
+
+ <event value="8" version="0" level="win:Informational"
+ keywords ="GCKeyword" opcode="GCSuspendEEEnd"
+ task="GarbageCollection"
+ symbol="GCSuspendEEEnd" message="$(string.RuntimePublisher.GCSuspendEEEndEventMessage)"/>
+
+ <event value="8" version="1" level="win:Informational" template="GCNoUserData"
+ keywords ="GCKeyword" opcode="GCSuspendEEEnd"
+ task="GarbageCollection"
+ symbol="GCSuspendEEEnd_V1" message="$(string.RuntimePublisher.GCSuspendEEEnd_V1EventMessage)"/>
+
+ <event value="9" version="0" level="win:Informational" template="GCSuspendEE"
+ keywords ="GCKeyword" opcode="GCSuspendEEBegin"
+ task="GarbageCollection"
+ symbol="GCSuspendEEBegin" message="$(string.RuntimePublisher.GCSuspendEEEventMessage)"/>
+
+ <event value="9" version="1" level="win:Informational" template="GCSuspendEE_V1"
+ keywords ="GCKeyword" opcode="GCSuspendEEBegin"
+ task="GarbageCollection"
+ symbol="GCSuspendEEBegin_V1" message="$(string.RuntimePublisher.GCSuspendEE_V1EventMessage)"/>
+
+ <event value="10" version="0" level="win:Verbose" template="GCAllocationTick"
+ keywords="GCKeyword" opcode="GCAllocationTick"
+ task="GarbageCollection"
+ symbol="GCAllocationTick" message="$(string.RuntimePublisher.GCAllocationTickEventMessage)"/>
+
+ <event value="10" version="1" level="win:Verbose" template="GCAllocationTick_V1"
+ keywords="GCKeyword" opcode="GCAllocationTick"
+ task="GarbageCollection"
+ symbol="GCAllocationTick_V1" message="$(string.RuntimePublisher.GCAllocationTick_V1EventMessage)"/>
+
+ <event value="10" version="2" level="win:Verbose" template="GCAllocationTick_V2"
+ keywords="GCKeyword" opcode="GCAllocationTick"
+ task="GarbageCollection"
+ symbol="GCAllocationTick_V2" message="$(string.RuntimePublisher.GCAllocationTick_V2EventMessage)"/>
+
+ <event value="10" version="3" level="win:Verbose" template="GCAllocationTick_V3"
+ keywords="GCKeyword" opcode="GCAllocationTick"
+ task="GarbageCollection"
+ symbol="GCAllocationTick_V3" message="$(string.RuntimePublisher.GCAllocationTick_V3EventMessage)"/>
+
+ <event value="11" version="0" level="win:Informational"
+ keywords ="GCKeyword" opcode="GCCreateConcurrentThread"
+ task="GarbageCollection"
+ symbol="GCCreateConcurrentThread" message="$(string.RuntimePublisher.GCCreateConcurrentThreadEventMessage)"/>
+
+ <event value="11" version="1" level="win:Informational" template="GCCreateConcurrentThread"
+ keywords ="GCKeyword ThreadingKeyword" opcode="GCCreateConcurrentThread"
+ task="GarbageCollection"
+ symbol="GCCreateConcurrentThread_V1" message="$(string.RuntimePublisher.GCCreateConcurrentThread_V1EventMessage)"/>
+
+ <event value="12" version="0" level="win:Informational"
+ keywords ="GCKeyword" opcode="GCTerminateConcurrentThread"
+ task="GarbageCollection"
+ symbol="GCTerminateConcurrentThread" message="$(string.RuntimePublisher.GCTerminateConcurrentThreadEventMessage)"/>
+
+ <event value="12" version="1" level="win:Informational" template="GCTerminateConcurrentThread"
+ keywords ="GCKeyword ThreadingKeyword" opcode="GCTerminateConcurrentThread"
+ task="GarbageCollection"
+ symbol="GCTerminateConcurrentThread_V1" message="$(string.RuntimePublisher.GCTerminateConcurrentThread_V1EventMessage)"/>
+
+ <event value="13" version="0" level="win:Informational" template="GCFinalizersEnd"
+ keywords ="GCKeyword" opcode="GCFinalizersEnd"
+ task="GarbageCollection"
+ symbol="GCFinalizersEnd" message="$(string.RuntimePublisher.GCFinalizersEndEventMessage)"/>
+
+ <event value="13" version="1" level="win:Informational" template="GCFinalizersEnd_V1"
+ keywords ="GCKeyword" opcode="GCFinalizersEnd"
+ task="GarbageCollection"
+ symbol="GCFinalizersEnd_V1" message="$(string.RuntimePublisher.GCFinalizersEnd_V1EventMessage)"/>
+
+ <event value="14" version="0" level="win:Informational"
+ keywords ="GCKeyword" opcode="GCFinalizersBegin"
+ task="GarbageCollection"
+ symbol="GCFinalizersBegin" message="$(string.RuntimePublisher.GCFinalizersBeginEventMessage)"/>
+
+ <event value="14" version="1" level="win:Informational" template="GCNoUserData"
+ keywords ="GCKeyword" opcode="GCFinalizersBegin"
+ task="GarbageCollection"
+ symbol="GCFinalizersBegin_V1" message="$(string.RuntimePublisher.GCFinalizersBegin_V1EventMessage)"/>
+
+ <event value="15" version="0" level="win:Informational" template="BulkType"
+ keywords ="TypeKeyword" opcode="BulkType"
+ task="Type"
+ symbol="BulkType" message="$(string.RuntimePublisher.BulkTypeEventMessage)"/>
+
+ <event value="16" version="0" level="win:Informational" template="GCBulkRootEdge"
+ keywords ="GCHeapDumpKeyword" opcode="GCBulkRootEdge"
+ task="GarbageCollection"
+ symbol="GCBulkRootEdge" message="$(string.RuntimePublisher.GCBulkRootEdgeEventMessage)"/>
+
+ <event value="17" version="0" level="win:Informational" template="GCBulkRootConditionalWeakTableElementEdge"
+ keywords ="GCHeapDumpKeyword" opcode="GCBulkRootConditionalWeakTableElementEdge"
+ task="GarbageCollection"
+ symbol="GCBulkRootConditionalWeakTableElementEdge" message="$(string.RuntimePublisher.GCBulkRootConditionalWeakTableElementEdgeEventMessage)"/>
+
+ <event value="18" version="0" level="win:Informational" template="GCBulkNode"
+ keywords ="GCHeapDumpKeyword" opcode="GCBulkNode"
+ task="GarbageCollection"
+ symbol="GCBulkNode" message="$(string.RuntimePublisher.GCBulkNodeEventMessage)"/>
+
+ <event value="19" version="0" level="win:Informational" template="GCBulkEdge"
+ keywords ="GCHeapDumpKeyword" opcode="GCBulkEdge"
+ task="GarbageCollection"
+ symbol="GCBulkEdge" message="$(string.RuntimePublisher.GCBulkEdgeEventMessage)"/>
+
+ <event value="20" version="0" level="win:Informational" template="GCSampledObjectAllocation"
+ keywords ="GCSampledObjectAllocationHighKeyword" opcode="GCSampledObjectAllocation"
+ task="GarbageCollection"
+ symbol="GCSampledObjectAllocationHigh" message="$(string.RuntimePublisher.GCSampledObjectAllocationHighEventMessage)"/>
+
+ <event value="21" version="0" level="win:Informational" template="GCBulkSurvivingObjectRanges"
+ keywords ="GCHeapSurvivalAndMovementKeyword" opcode="GCBulkSurvivingObjectRanges"
+ task="GarbageCollection"
+ symbol="GCBulkSurvivingObjectRanges" message="$(string.RuntimePublisher.GCBulkSurvivingObjectRangesEventMessage)"/>
+
+ <event value="22" version="0" level="win:Informational" template="GCBulkMovedObjectRanges"
+ keywords ="GCHeapSurvivalAndMovementKeyword" opcode="GCBulkMovedObjectRanges"
+ task="GarbageCollection"
+ symbol="GCBulkMovedObjectRanges" message="$(string.RuntimePublisher.GCBulkMovedObjectRangesEventMessage)"/>
+
+ <event value="23" version="0" level="win:Informational" template="GCGenerationRange"
+ keywords ="GCKeyword" opcode="GCGenerationRange"
+ task="GarbageCollection"
+ symbol="GCGenerationRange" message="$(string.RuntimePublisher.GCGenerationRangeEventMessage)"/>
+
+ <event value="25" version="0" level="win:Informational" template="GCMark"
+ keywords ="GCKeyword" opcode="GCMarkStackRoots"
+ task="GarbageCollection"
+ symbol="GCMarkStackRoots" message="$(string.RuntimePublisher.GCMarkStackRootsEventMessage)"/>
+
+ <event value="26" version="0" level="win:Informational" template="GCMark"
+ keywords ="GCKeyword" opcode="GCMarkFinalizeQueueRoots"
+ task="GarbageCollection"
+ symbol="GCMarkFinalizeQueueRoots" message="$(string.RuntimePublisher.GCMarkFinalizeQueueRootsEventMessage)"/>
+
+ <event value="27" version="0" level="win:Informational" template="GCMark"
+ keywords ="GCKeyword" opcode="GCMarkHandles"
+ task="GarbageCollection"
+ symbol="GCMarkHandles" message="$(string.RuntimePublisher.GCMarkHandlesEventMessage)"/>
+
+ <event value="28" version="0" level="win:Informational" template="GCMark"
+ keywords ="GCKeyword" opcode="GCMarkOlderGenerationRoots"
+ task="GarbageCollection"
+ symbol="GCMarkOlderGenerationRoots" message="$(string.RuntimePublisher.GCMarkOlderGenerationRootsEventMessage)"/>
+
+ <event value="29" version="0" level="win:Verbose" template="FinalizeObject"
+ keywords ="GCKeyword"
+ opcode="FinalizeObject"
+ task="GarbageCollection"
+ symbol="FinalizeObject" message="$(string.RuntimePublisher.FinalizeObjectEventMessage)"/>
+
+ <event value="30" version="0" level="win:Informational" template="SetGCHandle"
+ keywords="GCHandleKeyword"
+ opcode="SetGCHandle"
+ task="GarbageCollection"
+ symbol="SetGCHandle" message="$(string.RuntimePublisher.SetGCHandleEventMessage)"/>
+
+ <event value="31" version="0" level="win:Informational" template="DestroyGCHandle"
+ keywords="GCHandleKeyword"
+ opcode="DestroyGCHandle"
+ task="GarbageCollection"
+ symbol="DestroyGCHandle" message="$(string.RuntimePublisher.DestroyGCHandleEventMessage)"/>
+
+ <event value="32" version="0" level="win:Informational" template="GCSampledObjectAllocation"
+ keywords ="GCSampledObjectAllocationLowKeyword" opcode="GCSampledObjectAllocation"
+ task="GarbageCollection"
+ symbol="GCSampledObjectAllocationLow" message="$(string.RuntimePublisher.GCSampledObjectAllocationLowEventMessage)"/>
+
+
+ <event value="33" version="0" level="win:Verbose" template="PinObjectAtGCTime"
+ keywords="GCKeyword"
+ opcode="PinObjectAtGCTime"
+ task="GarbageCollection"
+ symbol="PinObjectAtGCTime" message="$(string.RuntimePublisher.PinObjectAtGCTimeEventMessage)"/>
+
+ <event value="35" version="0" level="win:Informational" template="GCTriggered"
+ keywords="GCKeyword" opcode="Triggered"
+ task="GarbageCollection"
+ symbol="GCTriggered" message="$(string.RuntimePublisher.GCTriggeredEventMessage)"/>
+
+ <event value="36" version="0" level="win:Informational" template="GCBulkRootCCW"
+ keywords ="GCHeapDumpKeyword" opcode="GCBulkRootCCW"
+ task="GarbageCollection"
+ symbol="GCBulkRootCCW" message="$(string.RuntimePublisher.GCBulkRootCCWEventMessage)"/>
+
+ <event value="37" version="0" level="win:Informational" template="GCBulkRCW"
+ keywords ="GCHeapDumpKeyword" opcode="GCBulkRCW"
+ task="GarbageCollection"
+ symbol="GCBulkRCW" message="$(string.RuntimePublisher.GCBulkRCWEventMessage)"/>
+
+ <event value="38" version="0" level="win:Informational" template="GCBulkRootStaticVar"
+ keywords ="GCHeapDumpKeyword" opcode="GCBulkRootStaticVar"
+ task="GarbageCollection"
+ symbol="GCBulkRootStaticVar" message="$(string.RuntimePublisher.GCBulkRootStaticVarEventMessage)"/>
+
+ <!-- CLR Threading events, value reserved from 40 to 79 -->
+ <event value="40" version="0" level="win:Informational" template="ClrWorkerThread"
+ keywords="ThreadingKeyword" opcode="win:Start"
+ task="WorkerThreadCreation"
+ symbol="WorkerThreadCreate" message="$(string.RuntimePublisher.WorkerThreadCreateEventMessage)"/>
+
+ <event value="41" version="0" level="win:Informational" template="ClrWorkerThread"
+ keywords="ThreadingKeyword" opcode="win:Stop"
+ task="WorkerThreadCreation"
+ symbol="WorkerThreadTerminate" message="$(string.RuntimePublisher.WorkerThreadTerminateEventMessage)"/>
+
+ <event value="42" version="0" level="win:Informational" template="ClrWorkerThread"
+ keywords="ThreadingKeyword" opcode="win:Start"
+ task="WorkerThreadRetirement"
+ symbol="WorkerThreadRetire" message="$(string.RuntimePublisher.WorkerThreadRetirementRetireThreadEventMessage)"/>
+
+ <event value="43" version="0" level="win:Informational" template="ClrWorkerThread"
+ keywords="ThreadingKeyword" opcode="win:Stop"
+ task="WorkerThreadRetirement"
+ symbol="WorkerThreadUnretire" message="$(string.RuntimePublisher.WorkerThreadRetirementUnretireThreadEventMessage)"/>
+
+ <event value="44" version="0" level="win:Informational" template="IOThread"
+ keywords="ThreadingKeyword" opcode="win:Start"
+ task="IOThreadCreation"
+ symbol="IOThreadCreate" message="$(string.RuntimePublisher.IOThreadCreateEventMessage)"/>
+
+ <event value="44" version="1" level="win:Informational" template="IOThread_V1"
+ keywords="ThreadingKeyword" opcode="win:Start"
+ task="IOThreadCreation"
+ symbol="IOThreadCreate_V1" message="$(string.RuntimePublisher.IOThreadCreate_V1EventMessage)"/>
+
+ <event value="45" version="0" level="win:Informational" template="IOThread"
+ keywords="ThreadingKeyword" opcode="win:Stop"
+ task="IOThreadCreation"
+ symbol="IOThreadTerminate" message="$(string.RuntimePublisher.IOThreadTerminateEventMessage)"/>
+
+ <event value="45" version="1" level="win:Informational" template="IOThread_V1"
+ keywords="ThreadingKeyword" opcode="win:Stop"
+ task="IOThreadCreation"
+ symbol="IOThreadTerminate_V1" message="$(string.RuntimePublisher.IOThreadTerminate_V1EventMessage)"/>
+
+ <event value="46" version="0" level="win:Informational" template="IOThread"
+ keywords="ThreadingKeyword" opcode="win:Start"
+ task="IOThreadRetirement"
+ symbol="IOThreadRetire" message="$(string.RuntimePublisher.IOThreadRetirementRetireThreadEventMessage)"/>
+
+ <event value="46" version="1" level="win:Informational" template="IOThread_V1"
+ keywords="ThreadingKeyword" opcode="win:Start"
+ task="IOThreadRetirement"
+ symbol="IOThreadRetire_V1" message="$(string.RuntimePublisher.IOThreadRetirementRetireThread_V1EventMessage)"/>
+
+ <event value="47" version="0" level="win:Informational" template="IOThread"
+ keywords="ThreadingKeyword" opcode="win:Stop"
+ task="IOThreadRetirement"
+ symbol="IOThreadUnretire" message="$(string.RuntimePublisher.IOThreadRetirementUnretireThreadEventMessage)"/>
+
+ <event value="47" version="1" level="win:Informational" template="IOThread_V1"
+ keywords="ThreadingKeyword" opcode="win:Stop"
+ task="IOThreadRetirement"
+ symbol="IOThreadUnretire_V1" message="$(string.RuntimePublisher.IOThreadRetirementUnretireThread_V1EventMessage)"/>
+
+ <event value="48" version="0" level="win:Informational" template="ClrThreadPoolSuspend"
+ keywords ="ThreadingKeyword" opcode="win:Start"
+ task="ThreadpoolSuspension"
+ symbol="ThreadpoolSuspensionSuspendThread" message="$(string.RuntimePublisher.ThreadPoolSuspendSuspendThreadEventMessage)"/>
+
+ <event value="49" version="0" level="win:Informational" template="ClrThreadPoolSuspend"
+ keywords ="ThreadingKeyword" opcode="win:Stop"
+ task="ThreadpoolSuspension"
+ symbol="ThreadpoolSuspensionResumeThread" message="$(string.RuntimePublisher.ThreadPoolSuspendResumeThreadEventMessage)"/>
+
+ <event value="50" version="0" level="win:Informational" template="ThreadPoolWorkerThread"
+ keywords ="ThreadingKeyword" opcode="win:Start"
+ task="ThreadPoolWorkerThread"
+ symbol="ThreadPoolWorkerThreadStart" message="$(string.RuntimePublisher.ThreadPoolWorkerThreadEventMessage)"/>
+
+ <event value="51" version="0" level="win:Informational" template="ThreadPoolWorkerThread"
+ keywords ="ThreadingKeyword" opcode="win:Stop"
+ task="ThreadPoolWorkerThread"
+ symbol="ThreadPoolWorkerThreadStop" message="$(string.RuntimePublisher.ThreadPoolWorkerThreadEventMessage)"/>
+
+ <event value="52" version="0" level="win:Informational" template="ThreadPoolWorkerThread"
+ keywords ="ThreadingKeyword" opcode="win:Start"
+ task="ThreadPoolWorkerThreadRetirement"
+ symbol="ThreadPoolWorkerThreadRetirementStart" message="$(string.RuntimePublisher.ThreadPoolWorkerThreadEventMessage)"/>
+
+ <event value="53" version="0" level="win:Informational" template="ThreadPoolWorkerThread"
+ keywords ="ThreadingKeyword" opcode="win:Stop"
+ task="ThreadPoolWorkerThreadRetirement"
+ symbol="ThreadPoolWorkerThreadRetirementStop" message="$(string.RuntimePublisher.ThreadPoolWorkerThreadEventMessage)"/>
+
+ <event value="54" version="0" level="win:Informational" template="ThreadPoolWorkerThreadAdjustmentSample"
+ keywords ="ThreadingKeyword" opcode="Sample"
+ task="ThreadPoolWorkerThreadAdjustment"
+ symbol="ThreadPoolWorkerThreadAdjustmentSample" message="$(string.RuntimePublisher.ThreadPoolWorkerThreadAdjustmentSampleEventMessage)"/>
+
+ <event value="55" version="0" level="win:Informational" template="ThreadPoolWorkerThreadAdjustmentAdjustment"
+ keywords ="ThreadingKeyword" opcode="Adjustment"
+ task="ThreadPoolWorkerThreadAdjustment"
+ symbol="ThreadPoolWorkerThreadAdjustmentAdjustment" message="$(string.RuntimePublisher.ThreadPoolWorkerThreadAdjustmentAdjustmentEventMessage)"/>
+
+ <event value="56" version="0" level="win:Verbose" template="ThreadPoolWorkerThreadAdjustmentStats"
+ keywords ="ThreadingKeyword" opcode="Stats"
+ task="ThreadPoolWorkerThreadAdjustment"
+ symbol="ThreadPoolWorkerThreadAdjustmentStats" message="$(string.RuntimePublisher.ThreadPoolWorkerThreadAdjustmentStatsEventMessage)"/>
+
+ <event value="57" version="0" level="win:Informational" template="ThreadPoolWorkerThread"
+ keywords ="ThreadingKeyword" opcode="Wait"
+ task="ThreadPoolWorkerThread"
+ symbol="ThreadPoolWorkerThreadWait" message="$(string.RuntimePublisher.ThreadPoolWorkerThreadEventMessage)"/>
+
+ <!-- CLR private ThreadPool events -->
+ <event value="60" version="0" level="win:Verbose" template="ThreadPoolWorkingThreadCount"
+ keywords="ThreadingKeyword"
+ opcode="win:Start"
+ task="ThreadPoolWorkingThreadCount"
+ symbol="ThreadPoolWorkingThreadCount" message="$(string.RuntimePublisher.ThreadPoolWorkingThreadCountEventMessage)"/>
+
+ <event value="61" version="0" level="win:Verbose" template="ThreadPoolWork"
+ keywords="ThreadingKeyword ThreadTransferKeyword"
+ task="ThreadPool"
+ opcode="Enqueue"
+ symbol="ThreadPoolEnqueue"
+ message="$(string.RuntimePublisher.ThreadPoolEnqueueEventMessage)"/>
+
+ <event value="62" version="0" level="win:Verbose" template="ThreadPoolWork"
+ keywords="ThreadingKeyword ThreadTransferKeyword"
+ task="ThreadPool"
+ opcode="Dequeue"
+ symbol="ThreadPoolDequeue"
+ message="$(string.RuntimePublisher.ThreadPoolDequeueEventMessage)"/>
+
+ <event value="63" version="0" level="win:Verbose" template="ThreadPoolIOWorkEnqueue"
+ keywords="ThreadingKeyword ThreadTransferKeyword"
+ task="ThreadPool"
+ opcode="IOEnqueue"
+ symbol="ThreadPoolIOEnqueue"
+ message="$(string.RuntimePublisher.ThreadPoolIOEnqueueEventMessage)"/>
+
+ <event value="64" version="0" level="win:Verbose" template="ThreadPoolIOWork"
+ keywords="ThreadingKeyword ThreadTransferKeyword"
+ task="ThreadPool"
+ opcode="IODequeue"
+ symbol="ThreadPoolIODequeue"
+ message="$(string.RuntimePublisher.ThreadPoolIODequeueEventMessage)"/>
+
+ <event value="65" version="0" level="win:Verbose" template="ThreadPoolIOWork"
+ keywords="ThreadingKeyword"
+ task="ThreadPool"
+ opcode="IOPack"
+ symbol="ThreadPoolIOPack"
+ message="$(string.RuntimePublisher.ThreadPoolIOPackEventMessage)"/>
+
+ <event value="70" version="0" level="win:Informational" template="ThreadStartWork"
+ keywords="ThreadingKeyword ThreadTransferKeyword"
+ task="Thread"
+ opcode="Creating"
+ symbol="ThreadCreating"
+ message="$(string.RuntimePublisher.ThreadCreatingEventMessage)"/>
+
+ <event value="71" version="0" level="win:Informational" template="ThreadStartWork"
+ keywords="ThreadingKeyword ThreadTransferKeyword"
+ task="Thread"
+ opcode="Running"
+ symbol="ThreadRunning"
+ message="$(string.RuntimePublisher.ThreadRunningEventMessage)"/>
+
+ <!-- CLR Exception events -->
+ <event value="80" version="0" level="win:Informational"
+ opcode="win:Start"
+ task="Exception"
+ symbol="ExceptionThrown" message="$(string.RuntimePublisher.ExceptionExceptionThrownEventMessage)"/>
+
+ <event value="80" version="1" level="win:Error" template="Exception"
+ keywords ="ExceptionKeyword" opcode="win:Start"
+ task="Exception"
+ symbol="ExceptionThrown_V1" message="$(string.RuntimePublisher.ExceptionExceptionThrown_V1EventMessage)"/>
+
+ <!-- CLR Contention events -->
+ <event value="81" version="0" level="win:Informational"
+ opcode="win:Start"
+ task="Contention"
+ symbol="Contention" message="$(string.RuntimePublisher.ContentionStartEventMessage)"/>
+
+ <event value="81" version="1" level="win:Informational" template="Contention"
+ keywords ="ContentionKeyword" opcode="win:Start"
+ task="Contention"
+ symbol="ContentionStart_V1" message="$(string.RuntimePublisher.ContentionStart_V1EventMessage)"/>
+
+ <event value="91" version="0" level="win:Informational" template="Contention"
+ keywords ="ContentionKeyword" opcode="win:Stop"
+ task="Contention"
+ symbol="ContentionStop" message="$(string.RuntimePublisher.ContentionStopEventMessage)"/>
+
+ <!-- CLR Stack events -->
+ <event value="82" version="0" level="win:LogAlways" template="ClrStackWalk"
+ keywords ="StackKeyword" opcode="CLRStackWalk"
+ task="CLRStack"
+ symbol="CLRStackWalk" message="$(string.RuntimePublisher.StackEventMessage)"/>
+
+ <!-- CLR AppDomainResourceManagement events -->
+ <event value="83" version="0" level="win:Informational" template="AppDomainMemAllocated"
+ keywords ="AppDomainResourceManagementKeyword" opcode="AppDomainMemAllocated"
+ task="AppDomainResourceManagement"
+ symbol="AppDomainMemAllocated" message="$(string.RuntimePublisher.AppDomainMemAllocatedEventMessage)"/>
+
+ <event value="84" version="0" level="win:Informational" template="AppDomainMemSurvived"
+ keywords ="AppDomainResourceManagementKeyword" opcode="AppDomainMemSurvived"
+ task="AppDomainResourceManagement"
+ symbol="AppDomainMemSurvived" message="$(string.RuntimePublisher.AppDomainMemSurvivedEventMessage)"/>
+
+ <event value="85" version="0" level="win:Informational" template="ThreadCreated"
+ keywords ="AppDomainResourceManagementKeyword ThreadingKeyword" opcode="ThreadCreated"
+ task="AppDomainResourceManagement"
+ symbol="ThreadCreated" message="$(string.RuntimePublisher.ThreadCreatedEventMessage)"/>
+
+ <event value="86" version="0" level="win:Informational" template="ThreadTerminatedOrTransition"
+ keywords ="AppDomainResourceManagementKeyword ThreadingKeyword" opcode="ThreadTerminated"
+ task="AppDomainResourceManagement"
+ symbol="ThreadTerminated" message="$(string.RuntimePublisher.ThreadTerminatedEventMessage)"/>
+
+ <event value="87" version="0" level="win:Informational" template="ThreadTerminatedOrTransition"
+ keywords ="AppDomainResourceManagementKeyword ThreadingKeyword" opcode="ThreadDomainEnter"
+ task="AppDomainResourceManagement"
+ symbol="ThreadDomainEnter" message="$(string.RuntimePublisher.ThreadDomainEnterEventMessage)"/>
+
+ <!-- CLR Interop events -->
+ <event value="88" version="0" level="win:Informational" template="ILStubGenerated"
+ keywords ="InteropKeyword" opcode="ILStubGenerated"
+ task="CLRILStub"
+ symbol="ILStubGenerated" message="$(string.RuntimePublisher.ILStubGeneratedEventMessage)"/>
+
+ <event value="89" version="0" level="win:Informational" template="ILStubCacheHit"
+ keywords ="InteropKeyword" opcode="ILStubCacheHit"
+ task="CLRILStub"
+ symbol="ILStubCacheHit" message="$(string.RuntimePublisher.ILStubCacheHitEventMessage)"/>
+
+ <!-- CLR Method events -->
+ <!-- The following 6 events are now defunct -->
+ <event value="135" version="0" level="win:Informational"
+ keywords ="JitKeyword NGenKeyword" opcode="DCStartComplete"
+ task="CLRMethod"
+ symbol="DCStartCompleteV2" message="$(string.RuntimePublisher.DCStartCompleteEventMessage)"/>
+
+ <event value="136" version="0" level="win:Informational"
+ keywords ="JitKeyword NGenKeyword" opcode="DCEndComplete"
+ task="CLRMethod"
+ symbol="DCEndCompleteV2" message="$(string.RuntimePublisher.DCEndCompleteEventMessage)"/>
+
+ <event value="137" version="0" level="win:Informational" template="MethodLoadUnload"
+ keywords ="JitKeyword NGenKeyword" opcode="MethodDCStart"
+ task="CLRMethod"
+ symbol="MethodDCStartV2" message="$(string.RuntimePublisher.MethodDCStartEventMessage)"/>
+
+ <event value="138" version="0" level="win:Informational" template="MethodLoadUnload"
+ keywords ="JitKeyword NGenKeyword" opcode="MethodDCEnd"
+ task="CLRMethod"
+ symbol="MethodDCEndV2" message="$(string.RuntimePublisher.MethodDCEndEventMessage)"/>
+
+ <event value="139" version="0" level="win:Informational" template="MethodLoadUnloadVerbose"
+ keywords ="JitKeyword NGenKeyword" opcode="MethodDCStartVerbose"
+ task="CLRMethod"
+ symbol="MethodDCStartVerboseV2" message="$(string.RuntimePublisher.MethodDCStartEventMessage)"/>
+
+ <event value="140" version="0" level="win:Informational" template="MethodLoadUnloadVerbose"
+ keywords ="JitKeyword NGenKeyword" opcode="MethodDCEndVerbose"
+ task="CLRMethod"
+ symbol="MethodDCEndVerboseV2" message="$(string.RuntimePublisher.MethodDCEndVerboseEventMessage)"/>
+
+ <event value="141" version="0" level="win:Informational" template="MethodLoadUnload"
+ keywords ="JitKeyword NGenKeyword" opcode="MethodLoad"
+ task="CLRMethod"
+ symbol="MethodLoad" message="$(string.RuntimePublisher.MethodLoadEventMessage)"/>
+
+ <event value="141" version="1" level="win:Informational" template="MethodLoadUnload_V1"
+ keywords ="JitKeyword NGenKeyword" opcode="MethodLoad"
+ task="CLRMethod"
+ symbol="MethodLoad_V1" message="$(string.RuntimePublisher.MethodLoad_V1EventMessage)"/>
+
+ <event value="141" version="2" level="win:Informational" template="MethodLoadUnload_V2"
+ keywords ="JitKeyword NGenKeyword" opcode="MethodLoad"
+ task="CLRMethod"
+ symbol="MethodLoad_V2" message="$(string.RuntimePublisher.MethodLoad_V2EventMessage)"/>
+
+ <event value="142" version="0" level="win:Informational" template="MethodLoadUnload"
+ keywords ="JitKeyword NGenKeyword" opcode="MethodUnload"
+ task="CLRMethod"
+ symbol="MethodUnload" message="$(string.RuntimePublisher.MethodUnloadEventMessage)"/>
+
+ <event value="142" version="1" level="win:Informational" template="MethodLoadUnload_V1"
+ keywords ="JitKeyword NGenKeyword" opcode="MethodUnload"
+ task="CLRMethod"
+ symbol="MethodUnload_V1" message="$(string.RuntimePublisher.MethodUnload_V1EventMessage)"/>
+
+ <event value="142" version="2" level="win:Informational" template="MethodLoadUnload_V2"
+ keywords ="JitKeyword NGenKeyword" opcode="MethodUnload"
+ task="CLRMethod"
+ symbol="MethodUnload_V2" message="$(string.RuntimePublisher.MethodUnload_V2EventMessage)"/>
+
+ <event value="143" version="0" level="win:Informational" template="MethodLoadUnloadVerbose"
+ keywords ="JitKeyword NGenKeyword" opcode="MethodLoadVerbose"
+ task="CLRMethod"
+ symbol="MethodLoadVerbose" message="$(string.RuntimePublisher.MethodLoadVerboseEventMessage)"/>
+
+ <event value="143" version="1" level="win:Informational" template="MethodLoadUnloadVerbose_V1"
+ keywords ="JitKeyword NGenKeyword" opcode="MethodLoadVerbose"
+ task="CLRMethod"
+ symbol="MethodLoadVerbose_V1" message="$(string.RuntimePublisher.MethodLoadVerbose_V1EventMessage)"/>
+
+ <event value="143" version="2" level="win:Informational" template="MethodLoadUnloadVerbose_V2"
+ keywords ="JitKeyword NGenKeyword" opcode="MethodLoadVerbose"
+ task="CLRMethod"
+ symbol="MethodLoadVerbose_V2" message="$(string.RuntimePublisher.MethodLoadVerbose_V2EventMessage)"/>
+
+ <event value="144" version="0" level="win:Informational" template="MethodLoadUnloadVerbose"
+ keywords ="JitKeyword NGenKeyword" opcode="MethodUnloadVerbose"
+ task="CLRMethod"
+ symbol="MethodUnloadVerbose" message="$(string.RuntimePublisher.MethodUnloadVerboseEventMessage)"/>
+
+ <event value="144" version="1" level="win:Informational" template="MethodLoadUnloadVerbose_V1"
+ keywords ="JitKeyword NGenKeyword" opcode="MethodUnloadVerbose"
+ task="CLRMethod"
+ symbol="MethodUnloadVerbose_V1" message="$(string.RuntimePublisher.MethodUnloadVerbose_V1EventMessage)"/>
+
+ <event value="144" version="2" level="win:Informational" template="MethodLoadUnloadVerbose_V2"
+ keywords ="JitKeyword NGenKeyword" opcode="MethodUnloadVerbose"
+ task="CLRMethod"
+ symbol="MethodUnloadVerbose_V2" message="$(string.RuntimePublisher.MethodUnloadVerbose_V2EventMessage)"/>
+
+ <event value="145" version="0" level="win:Verbose" template="MethodJittingStarted"
+ keywords ="JitKeyword" opcode="MethodJittingStarted"
+ task="CLRMethod"
+ symbol="MethodJittingStarted" message="$(string.RuntimePublisher.MethodJittingStartedEventMessage)"/>
+
+ <event value="145" version="1" level="win:Verbose" template="MethodJittingStarted_V1"
+ keywords ="JitKeyword" opcode="MethodJittingStarted"
+ task="CLRMethod"
+ symbol="MethodJittingStarted_V1" message="$(string.RuntimePublisher.MethodJittingStarted_V1EventMessage)"/>
+
+ <event value="185" version="0" level="win:Verbose" template="MethodJitInliningSucceeded"
+ keywords ="JitTracingKeyword" opcode="JitInliningSucceeded"
+ task="CLRMethod"
+ symbol="MethodJitInliningSucceeded"
+ message="$(string.RuntimePublisher.MethodJitInliningSucceededEventMessage)"/>
+
+ <event value="186" version="0" level="win:Verbose" template="MethodJitInliningFailed"
+ keywords ="JitTracingKeyword" opcode="JitInliningFailed"
+ task="CLRMethod"
+ symbol="MethodJitInliningFailed"
+ message="$(string.RuntimePublisher.MethodJitInliningFailedEventMessage)"/>
+
+ <event value="188" version="0" level="win:Verbose" template="MethodJitTailCallSucceeded"
+ keywords ="JitTracingKeyword" opcode="JitTailCallSucceeded"
+ task="CLRMethod"
+ symbol="MethodJitTailCallSucceeded"
+ message="$(string.RuntimePublisher.MethodJitTailCallSucceededEventMessage)"/>
+
+ <event value="189" version="0" level="win:Verbose" template="MethodJitTailCallFailed"
+ keywords ="JitTracingKeyword" opcode="JitTailCallFailed"
+ task="CLRMethod"
+ symbol="MethodJitTailCallFailed"
+ message="$(string.RuntimePublisher.MethodJitTailCallFailedEventMessage)"/>
+
+ <event value="190" version="0" level="win:Verbose" template="MethodILToNativeMap"
+ keywords ="JittedMethodILToNativeMapKeyword" opcode="MethodILToNativeMap"
+ task="CLRMethod"
+ symbol="MethodILToNativeMap"
+ message="$(string.RuntimePublisher.MethodILToNativeMapEventMessage)"/>
+
+ <!-- CLR Loader events -->
+ <!-- The following 2 events are now defunct -->
+ <event value="149" version="0" level="win:Informational" template="ModuleLoadUnload"
+ keywords ="LoaderKeyword" opcode="ModuleDCStart"
+ task="CLRLoader"
+ symbol="ModuleDCStartV2" message="$(string.RuntimePublisher.ModuleDCStartEventMessage)"/>
+
+ <event value="150" version="0" level="win:Informational" template="ModuleLoadUnload"
+ keywords ="LoaderKeyword" opcode="ModuleDCEnd"
+ task="CLRLoader"
+ symbol="ModuleDCEndV2" message="$(string.RuntimePublisher.ModuleDCEndEventMessage)"/>
+
+ <event value="151" version="0" level="win:Informational" template="DomainModuleLoadUnload"
+ keywords ="LoaderKeyword" opcode="DomainModuleLoad"
+ task="CLRLoader"
+ symbol="DomainModuleLoad" message="$(string.RuntimePublisher.DomainModuleLoadEventMessage)"/>
+
+ <event value="151" version="1" level="win:Informational" template="DomainModuleLoadUnload_V1"
+ keywords ="LoaderKeyword" opcode="DomainModuleLoad"
+ task="CLRLoader"
+ symbol="DomainModuleLoad_V1" message="$(string.RuntimePublisher.DomainModuleLoad_V1EventMessage)"/>
+
+ <event value="152" version="0" level="win:Informational" template="ModuleLoadUnload"
+ keywords ="LoaderKeyword" opcode="ModuleLoad"
+ task="CLRLoader"
+ symbol="ModuleLoad" message="$(string.RuntimePublisher.ModuleLoadEventMessage)"/>
+
+ <event value="152" version="1" level="win:Informational" template="ModuleLoadUnload_V1"
+ keywords ="LoaderKeyword PerfTrackKeyword" opcode="ModuleLoad"
+ task="CLRLoader"
+ symbol="ModuleLoad_V1" message="$(string.RuntimePublisher.ModuleLoad_V1EventMessage)"/>
+
+ <event value="152" version="2" level="win:Informational" template="ModuleLoadUnload_V2"
+ keywords ="LoaderKeyword PerfTrackKeyword" opcode="ModuleLoad"
+ task="CLRLoader"
+ symbol="ModuleLoad_V2" message="$(string.RuntimePublisher.ModuleLoad_V2EventMessage)"/>
+
+ <event value="153" version="0" level="win:Informational" template="ModuleLoadUnload"
+ keywords ="LoaderKeyword" opcode="ModuleUnload"
+ task="CLRLoader"
+ symbol="ModuleUnload" message="$(string.RuntimePublisher.ModuleUnloadEventMessage)"/>
+
+ <event value="153" version="1" level="win:Informational" template="ModuleLoadUnload_V1"
+ keywords ="LoaderKeyword PerfTrackKeyword" opcode="ModuleUnload"
+ task="CLRLoader"
+ symbol="ModuleUnload_V1" message="$(string.RuntimePublisher.ModuleUnload_V1EventMessage)"/>
+
+ <event value="153" version="2" level="win:Informational" template="ModuleLoadUnload_V2"
+ keywords ="LoaderKeyword PerfTrackKeyword" opcode="ModuleUnload"
+ task="CLRLoader"
+ symbol="ModuleUnload_V2" message="$(string.RuntimePublisher.ModuleUnload_V2EventMessage)"/>
+
+ <event value="154" version="0" level="win:Informational" template="AssemblyLoadUnload"
+ keywords ="LoaderKeyword" opcode="AssemblyLoad"
+ task="CLRLoader"
+ symbol="AssemblyLoad" message="$(string.RuntimePublisher.AssemblyLoadEventMessage)"/>
+
+ <event value="154" version="1" level="win:Informational" template="AssemblyLoadUnload_V1"
+ keywords ="LoaderKeyword" opcode="AssemblyLoad"
+ task="CLRLoader"
+ symbol="AssemblyLoad_V1" message="$(string.RuntimePublisher.AssemblyLoad_V1EventMessage)"/>
+
+ <event value="155" version="0" level="win:Informational" template="AssemblyLoadUnload"
+ keywords ="LoaderKeyword" opcode="AssemblyUnload"
+ task="CLRLoader"
+ symbol="AssemblyUnload" message="$(string.RuntimePublisher.AssemblyUnloadEventMessage)"/>
+
+ <event value="155" version="1" level="win:Informational" template="AssemblyLoadUnload_V1"
+ keywords ="LoaderKeyword" opcode="AssemblyUnload"
+ task="CLRLoader"
+ symbol="AssemblyUnload_V1" message="$(string.RuntimePublisher.AssemblyUnload_V1EventMessage)"/>
+
+ <event value="156" version="0" level="win:Informational" template="AppDomainLoadUnload"
+ keywords ="LoaderKeyword" opcode="AppDomainLoad"
+ task="CLRLoader"
+ symbol="AppDomainLoad" message="$(string.RuntimePublisher.AppDomainLoadEventMessage)"/>
+
+ <event value="156" version="1" level="win:Informational" template="AppDomainLoadUnload_V1"
+ keywords ="LoaderKeyword" opcode="AppDomainLoad"
+ task="CLRLoader"
+ symbol="AppDomainLoad_V1" message="$(string.RuntimePublisher.AppDomainLoad_V1EventMessage)"/>
+
+ <event value="157" version="0" level="win:Informational" template="AppDomainLoadUnload"
+ keywords ="LoaderKeyword" opcode="AppDomainUnload"
+ task="CLRLoader"
+ symbol="AppDomainUnload" message="$(string.RuntimePublisher.AppDomainUnloadEventMessage)"/>
+
+ <event value="157" version="1" level="win:Informational" template="AppDomainLoadUnload_V1"
+ keywords ="LoaderKeyword" opcode="AppDomainUnload"
+ task="CLRLoader"
+ symbol="AppDomainUnload_V1" message="$(string.RuntimePublisher.AppDomainUnload_V1EventMessage)"/>
+
+ <event value="158" version="0" level="win:Informational" template="ModuleRange"
+ keywords ="PerfTrackKeyword" opcode="ModuleRangeLoad"
+ task="CLRPerfTrack"
+ symbol="ModuleRangeLoad" message="$(string.RuntimePublisher.ModuleRangeLoadEventMessage)"/>
+
+ <!-- CLR Security events -->
+ <event value="181" version="0" level="win:Verbose" template="StrongNameVerification"
+ keywords ="SecurityKeyword" opcode="win:Start"
+ task="CLRStrongNameVerification"
+ symbol="StrongNameVerificationStart" message="$(string.RuntimePublisher.StrongNameVerificationStartEventMessage)"/>
+
+ <event value="181" version="1" level="win:Verbose" template="StrongNameVerification_V1"
+ keywords ="SecurityKeyword" opcode="win:Start"
+ task="CLRStrongNameVerification"
+ symbol="StrongNameVerificationStart_V1" message="$(string.RuntimePublisher.StrongNameVerificationStart_V1EventMessage)"/>
+
+ <event value="182" version="0" level="win:Informational" template="StrongNameVerification"
+ keywords ="SecurityKeyword" opcode="win:Stop"
+ task="CLRStrongNameVerification"
+ symbol="StrongNameVerificationStop" message="$(string.RuntimePublisher.StrongNameVerificationEndEventMessage)"/>
+
+ <event value="182" version="1" level="win:Informational" template="StrongNameVerification_V1"
+ keywords ="SecurityKeyword" opcode="win:Stop"
+ task="CLRStrongNameVerification"
+ symbol="StrongNameVerificationStop_V1" message="$(string.RuntimePublisher.StrongNameVerificationEnd_V1EventMessage)"/>
+
+ <event value="183" version="0" level="win:Verbose" template="AuthenticodeVerification"
+ keywords ="SecurityKeyword" opcode="win:Start"
+ task="CLRAuthenticodeVerification"
+ symbol="AuthenticodeVerificationStart" message="$(string.RuntimePublisher.AuthenticodeVerificationStartEventMessage)"/>
+
+ <event value="183" version="1" level="win:Verbose" template="AuthenticodeVerification_V1"
+ keywords ="SecurityKeyword" opcode="win:Start"
+ task="CLRAuthenticodeVerification"
+ symbol="AuthenticodeVerificationStart_V1" message="$(string.RuntimePublisher.AuthenticodeVerificationStart_V1EventMessage)"/>
+
+ <event value="184" version="0" level="win:Informational" template="AuthenticodeVerification"
+ keywords ="SecurityKeyword" opcode="win:Stop"
+ task="CLRAuthenticodeVerification"
+ symbol="AuthenticodeVerificationStop" message="$(string.RuntimePublisher.AuthenticodeVerificationEndEventMessage)"/>
+
+ <event value="184" version="1" level="win:Informational" template="AuthenticodeVerification_V1"
+ keywords ="SecurityKeyword" opcode="win:Stop"
+ task="CLRAuthenticodeVerification"
+ symbol="AuthenticodeVerificationStop_V1" message="$(string.RuntimePublisher.AuthenticodeVerificationEnd_V1EventMessage)"/>
+
+ <!-- CLR RuntimeInformation events -->
+ <event value="187" version="0" level="win:Informational" template="RuntimeInformation"
+ opcode="win:Start"
+ task="CLRRuntimeInformation"
+ symbol="RuntimeInformationStart" message="$(string.RuntimePublisher.RuntimeInformationEventMessage)"/>
+
+ <!-- Additional GC events 200-239 -->
+ <event value="200" version="0" level="win:Verbose" template="IncreaseMemoryPressure"
+ keywords="GCKeyword" opcode="IncreaseMemoryPressure"
+ task="GarbageCollection"
+ symbol="IncreaseMemoryPressure" message="$(string.RuntimePublisher.IncreaseMemoryPressureEventMessage)"/>
+
+ <event value="201" version="0" level="win:Verbose" template="DecreaseMemoryPressure"
+ keywords="GCKeyword" opcode="DecreaseMemoryPressure"
+ task="GarbageCollection"
+ symbol="DecreaseMemoryPressure" message="$(string.RuntimePublisher.DecreaseMemoryPressureEventMessage)"/>
+
+ <!-- CLR Debugger events 240-249 -->
+ <event value="240" version="0" level="win:Informational"
+ keywords="DebuggerKeyword" opcode="win:Start"
+ task="DebugIPCEvent"
+ symbol="DebugIPCEventStart" />
+ <event value="241" version="0" level="win:Informational"
+ keywords="DebuggerKeyword" opcode="win:Stop"
+ task="DebugIPCEvent"
+ symbol="DebugIPCEventEnd" />
+ <event value="242" version="0" level="win:Informational"
+ keywords="DebuggerKeyword" opcode="win:Start"
+ task="DebugExceptionProcessing"
+ symbol="DebugExceptionProcessingStart" />
+ <event value="243" version="0" level="win:Informational"
+ keywords="DebuggerKeyword" opcode="win:Stop"
+ task="DebugExceptionProcessing"
+ symbol="DebugExceptionProcessingEnd" />
+ </events>
+ </provider>
+
+
+ <!--CLR Rundown Publisher-->
+ <provider name="Microsoft-Windows-DotNETRuntimeRundown"
+ guid="{A669021C-C450-4609-A035-5AF59AF4DF18}"
+ symbol="MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER"
+ resourceFileName="%INSTALL_PATH%\clretwrc.dll"
+ messageFileName="%INSTALL_PATH%\clretwrc.dll">
+
+ <!--Keywords-->
+ <keywords>
+ <keyword name="LoaderRundownKeyword" mask="0x8"
+ message="$(string.RundownPublisher.LoaderKeywordMessage)" symbol="CLR_RUNDOWNLOADER_KEYWORD"/>
+ <keyword name="JitRundownKeyword" mask="0x10"
+ message="$(string.RundownPublisher.JitKeywordMessage)" symbol="CLR_RUNDOWNJIT_KEYWORD"/>
+ <keyword name="NGenRundownKeyword" mask="0x20"
+ message="$(string.RundownPublisher.NGenKeywordMessage)" symbol="CLR_RUNDOWNNGEN_KEYWORD"/>
+ <keyword name="StartRundownKeyword" mask="0x40"
+ message="$(string.RundownPublisher.StartRundownKeywordMessage)" symbol="CLR_RUNDOWNSTART_KEYWORD"/>
+ <keyword name="EndRundownKeyword" mask="0x100"
+ message="$(string.RundownPublisher.EndRundownKeywordMessage)" symbol="CLR_RUNDOWNEND_KEYWORD"/>
+ <!-- Keyword mask 0x200 is now defunct -->
+ <keyword name="AppDomainResourceManagementRundownKeyword" mask="0x800"
+ message="$(string.RuntimePublisher.AppDomainResourceManagementRundownKeywordMessage)" symbol="CLR_RUNDOWNAPPDOMAINRESOURCEMANAGEMENT_KEYWORD"/>
+ <keyword name="ThreadingKeyword" mask="0x10000"
+ message="$(string.RundownPublisher.ThreadingKeywordMessage)" symbol="CLR_RUNDOWNTHREADING_KEYWORD"/>
+ <keyword name="JittedMethodILToNativeMapRundownKeyword" mask="0x20000"
+ message="$(string.RundownPublisher.JittedMethodILToNativeMapRundownKeywordMessage)" symbol="CLR_RUNDOWNJITTEDMETHODILTONATIVEMAP_KEYWORD"/>
+ <keyword name="OverrideAndSuppressNGenEventsRundownKeyword" mask="0x40000"
+ message="$(string.RundownPublisher.OverrideAndSuppressNGenEventsRundownKeywordMessage)" symbol="CLR_RUNDOWNOVERRIDEANDSUPPRESSNGENEVENTS_KEYWORD"/>
+ <keyword name="PerfTrackRundownKeyword" mask="0x20000000"
+ message="$(string.RundownPublisher.PerfTrackRundownKeywordMessage)" symbol="CLR_RUNDOWNPERFTRACK_KEYWORD"/>
+ <keyword name="StackKeyword" mask="0x40000000"
+ message="$(string.RundownPublisher.StackKeywordMessage)" symbol="CLR_RUNDOWNSTACK_KEYWORD"/>
+ </keywords>
+
+ <!--Tasks-->
+ <tasks>
+ <task name="CLRMethodRundown" symbol="CLR_METHODRUNDOWN_TASK"
+ value="1" eventGUID="{0BCD91DB-F943-454a-A662-6EDBCFBB76D2}"
+ message="$(string.RundownPublisher.MethodTaskMessage)">
+ <opcodes>
+ <opcode name="MethodDCStart" message="$(string.RundownPublisher.MethodDCStartOpcodeMessage)" symbol="CLR_METHODDC_METHODDCSTART_OPCODE" value="35"> </opcode>
+ <opcode name="MethodDCEnd" message="$(string.RundownPublisher.MethodDCEndOpcodeMessage)" symbol="CLR_METHODDC_METHODDCEND_OPCODE" value="36"> </opcode>
+ <opcode name="MethodDCStartVerbose" message="$(string.RundownPublisher.MethodDCStartVerboseOpcodeMessage)" symbol="CLR_METHODDC_METHODDCSTARTVERBOSE_OPCODE" value="39"> </opcode>
+ <opcode name="MethodDCEndVerbose" message="$(string.RundownPublisher.MethodDCEndVerboseOpcodeMessage)" symbol="CLR_METHODDC_METHODDCENDVERBOSE_OPCODE" value="40"> </opcode>
+ <opcode name="MethodDCStartILToNativeMap" message="$(string.RundownPublisher.MethodDCStartILToNativeMapOpcodeMessage)" symbol="CLR_METHODDC_METHODDCSTARTILTONATIVEMAP_OPCODE" value="41"> </opcode>
+ <opcode name="MethodDCEndILToNativeMap" message="$(string.RundownPublisher.MethodDCEndILToNativeMapOpcodeMessage)" symbol="CLR_METHODDC_METHODDCENDILTONATIVEMAP_OPCODE" value="42"> </opcode>
+ <opcode name="DCStartComplete" message="$(string.RundownPublisher.DCStartCompleteOpcodeMessage)" symbol="CLR_METHODDC_DCSTARTCOMPLETE_OPCODE" value="14"> </opcode>
+ <opcode name="DCEndComplete" message="$(string.RundownPublisher.DCEndCompleteOpcodeMessage)" symbol="CLR_METHODDC_DCENDCOMPLETE_OPCODE" value="15"> </opcode>
+ <opcode name="DCStartInit" message="$(string.RundownPublisher.DCStartInitOpcodeMessage)" symbol="CLR_METHODDC_DCSTARTINIT_OPCODE" value="16"> </opcode>
+ <opcode name="DCEndInit" message="$(string.RundownPublisher.DCEndInitOpcodeMessage)" symbol="CLR_METHODDC_DCENDINIT_OPCODE" value="17"> </opcode>
+ </opcodes>
+ </task>
+
+ <task name="CLRLoaderRundown" symbol="CLR_LOADERRUNDOWN_TASK"
+ value="2" eventGUID="{5A54F4DF-D302-4fee-A211-6C2C0C1DCB1A}"
+ message="$(string.RundownPublisher.LoaderTaskMessage)">
+ <opcodes>
+ <opcode name="ModuleDCStart" message="$(string.RundownPublisher.ModuleDCStartOpcodeMessage)" symbol="CLR_LOADERDC_MODULEDCSTART_OPCODE" value="35"> </opcode>
+ <opcode name="ModuleDCEnd" message="$(string.RundownPublisher.ModuleDCEndOpcodeMessage)" symbol="CLR_LOADERDC_MODULEDCEND_OPCODE" value="36"> </opcode>
+ <opcode name="AssemblyDCStart" message="$(string.RundownPublisher.AssemblyDCStartOpcodeMessage)" symbol="CLR_LOADERDC_ASSEMBLYDCSTART_OPCODE" value="39"> </opcode>
+ <opcode name="AssemblyDCEnd" message="$(string.RundownPublisher.AssemblyDCEndOpcodeMessage)" symbol="CLR_LOADERDC_ASSEMBLYDCEND_OPCODE" value="40"> </opcode>
+ <opcode name="AppDomainDCStart" message="$(string.RundownPublisher.AppDomainDCStartOpcodeMessage)" symbol="CLR_LOADERDC_APPDOMAINDCSTART_OPCODE" value="43"> </opcode>
+ <opcode name="AppDomainDCEnd" message="$(string.RundownPublisher.AppDomainDCEndOpcodeMessage)" symbol="CLR_LOADERDC_APPDOMAINDCEND_OPCODE" value="44"> </opcode>
+ <opcode name="DomainModuleDCStart" message="$(string.RundownPublisher.DomainModuleDCStartOpcodeMessage)" symbol="CLR_LOADERDC_DOMAINMODULEDCSTART_OPCODE" value="46"> </opcode>
+ <opcode name="DomainModuleDCEnd" message="$(string.RundownPublisher.DomainModuleDCEndOpcodeMessage)" symbol="CLR_LOADERDC_DOMAINMODULEDCEND_OPCODE" value="47"> </opcode>
+ <opcode name="ThreadDC" message="$(string.RundownPublisher.ThreadDCOpcodeMessage)" symbol="CLR_LOADERDC_THREADDC_OPCODE" value="48"> </opcode>
+ </opcodes>
+ </task>
+
+ <task name="CLRStackRundown" symbol="CLR_STACKRUNDOWN_TASK"
+ value="11" eventGUID="{d3363dc0-243a-4620-a4d0-8a07d772f533}"
+ message="$(string.RundownPublisher.StackTaskMessage)">
+ <opcodes>
+ <opcode name="CLRStackWalk" message="$(string.RundownPublisher.CLRStackWalkOpcodeMessage)" symbol="CLR_RUNDOWNSTACK_STACKWALK_OPCODE" value="82"> </opcode>
+ </opcodes>
+ </task>
+
+ <task name="CLRRuntimeInformationRundown" symbol="CLR_RuntimeInformation_TASK"
+ value="19" eventGUID="{CD7D3E32-65FE-40cd-9225-A2577D203FC3}"
+ message="$(string.RundownPublisher.EEStartupTaskMessage)">
+ <opcodes>
+ </opcodes>
+ </task>
+
+ <task name="CLRPerfTrackRundown" symbol="CLR_PERFTRACKRUNDOWN_TASK"
+ value="20" eventGUID="{EAC685F6-2104-4dec-88FD-91E4254221EC}"
+ message="$(string.RundownPublisher.PerfTrackTaskMessage)">
+ <opcodes>
+ <opcode name="ModuleRangeDCStart" message="$(string.RundownPublisher.ModuleRangeDCStartOpcodeMessage)" symbol="CLR_PERFTRACKRUNDOWN_MODULERANGEDCSTART_OPCODE" value="10"> </opcode>
+ <opcode name="ModuleRangeDCEnd" message="$(string.RundownPublisher.ModuleRangeDCEndOpcodeMessage)" symbol="CLR_PERFTRACKRUNDOWN_MODULERANGEDCEND_OPCODE" value="11"> </opcode>
+ </opcodes>
+ </task>
+ </tasks>
+
+ <maps>
+ <bitMap name="ModuleRangeTypeMap">
+ <map value="0x4" message="$(string.RundownPublisher.ModuleRangeTypeMap.ColdRangeMessage)"/>
+ </bitMap>
+
+ <!-- BitMaps -->
+ <bitMap name="AppDomainFlagsMap">
+ <map value="0x1" message="$(string.RundownPublisher.AppDomain.DefaultMapMessage)"/>
+ <map value="0x2" message="$(string.RundownPublisher.AppDomain.ExecutableMapMessage)"/>
+ <map value="0x4" message="$(string.RundownPublisher.AppDomain.SharedMapMessage)"/>
+ </bitMap>
+ <bitMap name="AssemblyFlagsMap">
+ <map value="0x1" message="$(string.RundownPublisher.Assembly.DomainNeutralMapMessage)"/>
+ <map value="0x2" message="$(string.RundownPublisher.Assembly.DynamicMapMessage)"/>
+ <map value="0x4" message="$(string.RundownPublisher.Assembly.NativeMapMessage)"/>
+ <map value="0x8" message="$(string.RundownPublisher.Assembly.CollectibleMapMessage)"/>
+ </bitMap>
+ <bitMap name="ModuleFlagsMap">
+ <map value= "0x1" message="$(string.RundownPublisher.Module.DomainNeutralMapMessage)"/>
+ <map value= "0x2" message="$(string.RundownPublisher.Module.NativeMapMessage)"/>
+ <map value= "0x4" message="$(string.RundownPublisher.Module.DynamicMapMessage)"/>
+ <map value= "0x8" message="$(string.RundownPublisher.Module.ManifestMapMessage)"/>
+ </bitMap>
+ <bitMap name="MethodFlagsMap">
+ <map value="0x1" message="$(string.RundownPublisher.Method.DynamicMapMessage)"/>
+ <map value="0x2" message="$(string.RundownPublisher.Method.GenericMapMessage)"/>
+ <map value="0x4" message="$(string.RundownPublisher.Method.HasSharedGenericCodeMapMessage)"/>
+ <map value="0x8" message="$(string.RundownPublisher.Method.JittedMapMessage)"/>
+ </bitMap>
+ <bitMap name="StartupModeMap">
+ <map value="0x1" message="$(string.RundownPublisher.StartupMode.ManagedExeMapMessage)"/>
+ <map value="0x2" message="$(string.RundownPublisher.StartupMode.HostedCLRMapMessage)"/>
+ <map value="0x4" message="$(string.RundownPublisher.StartupMode.IjwDllMapMessage)"/>
+ <map value="0x8" message="$(string.RundownPublisher.StartupMode.ComActivatedMapMessage)"/>
+ <map value="0x10" message="$(string.RundownPublisher.StartupMode.OtherMapMessage)"/>
+ </bitMap>
+ <bitMap name="RuntimeSkuMap">
+ <map value="0x1" message="$(string.RundownPublisher.RuntimeSku.DesktopCLRMapMessage)"/>
+ <map value="0x2" message="$(string.RundownPublisher.RuntimeSku.CoreCLRMapMessage)"/>
+ </bitMap>
+ <bitMap name="StartupFlagsMap">
+ <map value="0x000001" message="$(string.RundownPublisher.Startup.CONCURRENT_GCMapMessage)"/>
+ <map value="0x000002" message="$(string.RundownPublisher.Startup.LOADER_OPTIMIZATION_SINGLE_DOMAINMapMessage)"/>
+ <map value="0x000004" message="$(string.RundownPublisher.Startup.LOADER_OPTIMIZATION_MULTI_DOMAINMapMessage)"/>
+ <map value="0x000010" message="$(string.RundownPublisher.Startup.LOADER_SAFEMODEMapMessage)"/>
+ <map value="0x000100" message="$(string.RundownPublisher.Startup.LOADER_SETPREFERENCEMapMessage)"/>
+ <map value="0x001000" message="$(string.RundownPublisher.Startup.SERVER_GCMapMessage)"/>
+ <map value="0x002000" message="$(string.RundownPublisher.Startup.HOARD_GC_VMMapMessage)"/>
+ <map value="0x004000" message="$(string.RundownPublisher.Startup.SINGLE_VERSION_HOSTING_INTERFACEMapMessage)"/>
+ <map value="0x010000" message="$(string.RundownPublisher.Startup.LEGACY_IMPERSONATIONMapMessage)"/>
+ <map value="0x020000" message="$(string.RundownPublisher.Startup.DISABLE_COMMITTHREADSTACKMapMessage)"/>
+ <map value="0x040000" message="$(string.RundownPublisher.Startup.ALWAYSFLOW_IMPERSONATIONMapMessage)"/>
+ <map value="0x080000" message="$(string.RundownPublisher.Startup.TRIM_GC_COMMITMapMessage)"/>
+ <map value="0x100000" message="$(string.RundownPublisher.Startup.ETWMapMessage)"/>
+ <map value="0x200000" message="$(string.RundownPublisher.Startup.SERVER_BUILDMapMessage)"/>
+ <map value="0x400000" message="$(string.RundownPublisher.Startup.ARMMapMessage)"/>
+ </bitMap>
+ <bitMap name="ThreadFlagsMap">
+ <map value="0x1" message="$(string.RundownPublisher.ThreadFlags.GCSpecial)"/>
+ <map value="0x2" message="$(string.RundownPublisher.ThreadFlags.Finalizer)"/>
+ <map value="0x4" message="$(string.RundownPublisher.ThreadFlags.ThreadPoolWorker)"/>
+ </bitMap>
+ </maps>
+
+ <!--Templates-->
+ <templates>
+ <template tid="RuntimeInformationRundown">
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="Sku" inType="win:UInt16" map="RuntimeSkuMap" />
+ <data name="BclMajorVersion" inType="win:UInt16" />
+ <data name="BclMinorVersion" inType="win:UInt16" />
+ <data name="BclBuildNumber" inType="win:UInt16" />
+ <data name="BclQfeNumber" inType="win:UInt16" />
+ <data name="VMMajorVersion" inType="win:UInt16" />
+ <data name="VMMinorVersion" inType="win:UInt16" />
+ <data name="VMBuildNumber" inType="win:UInt16" />
+ <data name="VMQfeNumber" inType="win:UInt16" />
+ <data name="StartupFlags" inType="win:UInt32" map="StartupFlagsMap" />
+ <data name="StartupMode" inType="win:UInt8" map="StartupModeMap" />
+ <data name="CommandLine" inType="win:UnicodeString" />
+ <data name="ComObjectGuid" inType="win:GUID" />
+ <data name="RuntimeDllPath" inType="win:UnicodeString" />
+
+ <UserData>
+ <RuntimeInformationRundown xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <Sku> %2 </Sku>
+ <BclMajorVersion> %3 </BclMajorVersion>
+ <BclMinorVersion> %4 </BclMinorVersion>
+ <BclBuildNumber> %5 </BclBuildNumber>
+ <BclQfeNumber> %6 </BclQfeNumber>
+ <VMMajorVersion> %7 </VMMajorVersion>
+ <VMMinorVersion> %8 </VMMinorVersion>
+ <VMBuildNumber> %9 </VMBuildNumber>
+ <VMQfeNumber> %10 </VMQfeNumber>
+ <StartupFlags> %11 </StartupFlags>
+ <StartupMode> %12 </StartupMode>
+ <CommandLine> %13 </CommandLine>
+ <ComObjectGuid> %14 </ComObjectGuid>
+ <RuntimeDllPath> %15 </RuntimeDllPath>
+ </RuntimeInformationRundown>
+ </UserData>
+ </template>
+
+ <template tid="DomainModuleLoadUnloadRundown">
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AssemblyID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AppDomainID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleFlags" inType="win:UInt32" map="ModuleFlagsMap" />
+ <data name="Reserved1" inType="win:UInt32" />
+ <data name="ModuleILPath" inType="win:UnicodeString" />
+ <data name="ModuleNativePath" inType="win:UnicodeString" />
+
+ <UserData>
+ <DomainModuleLoadUnloadRundown xmlns="myNs">
+ <ModuleID> %1 </ModuleID>
+ <AssemblyID> %2 </AssemblyID>
+ <AppDomainID> %3 </AppDomainID>
+ <ModuleFlags> %4 </ModuleFlags>
+ <ModuleILPath> %5 </ModuleILPath>
+ <ModuleNativePath> %6 </ModuleNativePath>
+ </DomainModuleLoadUnloadRundown>
+ </UserData>
+ </template>
+
+ <template tid="DomainModuleLoadUnloadRundown_V1">
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AssemblyID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AppDomainID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleFlags" inType="win:UInt32" map="ModuleFlagsMap" />
+ <data name="Reserved1" inType="win:UInt32" />
+ <data name="ModuleILPath" inType="win:UnicodeString" />
+ <data name="ModuleNativePath" inType="win:UnicodeString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <DomainModuleLoadUnloadRundown_V1 xmlns="myNs">
+ <ModuleID> %1 </ModuleID>
+ <AssemblyID> %2 </AssemblyID>
+ <AppDomainID> %3 </AppDomainID>
+ <ModuleFlags> %4 </ModuleFlags>
+ <ModuleILPath> %5 </ModuleILPath>
+ <ModuleNativePath> %6 </ModuleNativePath>
+ <ClrInstanceID> %7 </ClrInstanceID>
+ </DomainModuleLoadUnloadRundown_V1>
+ </UserData>
+ </template>
+
+ <template tid="ModuleLoadUnloadRundown">
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AssemblyID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleFlags" inType="win:UInt32" map="ModuleFlagsMap" />
+ <data name="Reserved1" inType="win:UInt32" />
+ <data name="ModuleILPath" inType="win:UnicodeString" />
+ <data name="ModuleNativePath" inType="win:UnicodeString" />
+
+ <UserData>
+ <ModuleLoadUnloadRundown xmlns="myNs">
+ <ModuleID> %1 </ModuleID>
+ <AssemblyID> %2 </AssemblyID>
+ <ModuleFlags> %3 </ModuleFlags>
+ <ModuleILPath> %4 </ModuleILPath>
+ <ModuleNativePath> %5 </ModuleNativePath>
+ </ModuleLoadUnloadRundown>
+ </UserData>
+ </template>
+
+ <template tid="ModuleLoadUnloadRundown_V1">
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AssemblyID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleFlags" inType="win:UInt32" map="ModuleFlagsMap" />
+ <data name="Reserved1" inType="win:UInt32" />
+ <data name="ModuleILPath" inType="win:UnicodeString" />
+ <data name="ModuleNativePath" inType="win:UnicodeString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <ModuleLoadUnloadRundown_V1 xmlns="myNs">
+ <ModuleID> %1 </ModuleID>
+ <AssemblyID> %2 </AssemblyID>
+ <ModuleFlags> %3 </ModuleFlags>
+ <ModuleILPath> %4 </ModuleILPath>
+ <ModuleNativePath> %5 </ModuleNativePath>
+ <ClrInstanceID> %6 </ClrInstanceID>
+ </ModuleLoadUnloadRundown_V1>
+ </UserData>
+ </template>
+
+ <template tid="ModuleLoadUnloadRundown_V2">
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AssemblyID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleFlags" inType="win:UInt32" map="ModuleFlagsMap" />
+ <data name="Reserved1" inType="win:UInt32" />
+ <data name="ModuleILPath" inType="win:UnicodeString" />
+ <data name="ModuleNativePath" inType="win:UnicodeString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="ManagedPdbSignature" inType="win:GUID" />
+ <data name="ManagedPdbAge" inType="win:UInt32" />
+ <data name="ManagedPdbBuildPath" inType="win:UnicodeString" />
+ <data name="NativePdbSignature" inType="win:GUID" />
+ <data name="NativePdbAge" inType="win:UInt32" />
+ <data name="NativePdbBuildPath" inType="win:UnicodeString" />
+
+ <UserData>
+ <ModuleLoadUnloadRundown_V2 xmlns="myNs">
+ <ModuleID> %1 </ModuleID>
+ <AssemblyID> %2 </AssemblyID>
+ <ModuleFlags> %3 </ModuleFlags>
+ <ModuleILPath> %4 </ModuleILPath>
+ <ModuleNativePath> %5 </ModuleNativePath>
+ <ClrInstanceID> %6 </ClrInstanceID>
+ <ManagedPdbSignature> %7 </ManagedPdbSignature>
+ <ManagedPdbAge> %8 </ManagedPdbAge>
+ <ManagedPdbBuildPath> %9 </ManagedPdbBuildPath>
+ <NativePdbSignature> %10 </NativePdbSignature>
+ <NativePdbAge> %11 </NativePdbAge>
+ <NativePdbBuildPath> %12 </NativePdbBuildPath>
+ </ModuleLoadUnloadRundown_V2>
+ </UserData>
+ </template>
+
+ <template tid="AssemblyLoadUnloadRundown">
+ <data name="AssemblyID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AppDomainID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AssemblyFlags" inType="win:UInt32" map="AssemblyFlagsMap" />
+ <data name="FullyQualifiedAssemblyName" inType="win:UnicodeString" />
+
+ <UserData>
+ <AssemblyLoadUnloadRundown xmlns="myNs">
+ <AssemblyID> %1 </AssemblyID>
+ <AppDomainID> %2 </AppDomainID>
+ <AssemblyFlags> %3 </AssemblyFlags>
+ <FullyQualifiedAssemblyName> %4 </FullyQualifiedAssemblyName>
+ </AssemblyLoadUnloadRundown>
+ </UserData>
+ </template>
+
+ <template tid="AssemblyLoadUnloadRundown_V1">
+ <data name="AssemblyID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AppDomainID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="BindingID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AssemblyFlags" inType="win:UInt32" map="AssemblyFlagsMap" />
+ <data name="FullyQualifiedAssemblyName" inType="win:UnicodeString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <AssemblyLoadUnloadRundown_V1 xmlns="myNs">
+ <AssemblyID> %1 </AssemblyID>
+ <AppDomainID> %2 </AppDomainID>
+ <BindingID> %3 </BindingID>
+ <AssemblyFlags> %4 </AssemblyFlags>
+ <FullyQualifiedAssemblyName> %5 </FullyQualifiedAssemblyName>
+ <ClrInstanceID> %6 </ClrInstanceID>
+ </AssemblyLoadUnloadRundown_V1>
+ </UserData>
+ </template>
+
+ <template tid="AppDomainLoadUnloadRundown">
+ <data name="AppDomainID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AppDomainFlags" inType="win:UInt32" map="AppDomainFlagsMap" />
+ <data name="AppDomainName" inType="win:UnicodeString" />
+
+ <UserData>
+ <AppDomainLoadUnloadRundown xmlns="myNs">
+ <AppDomainID> %1 </AppDomainID>
+ <AppDomainFlags> %2 </AppDomainFlags>
+ <AppDomainName> %3 </AppDomainName>
+ </AppDomainLoadUnloadRundown>
+ </UserData>
+ </template>
+
+ <template tid="AppDomainLoadUnloadRundown_V1">
+ <data name="AppDomainID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AppDomainFlags" inType="win:UInt32" map="AppDomainFlagsMap" />
+ <data name="AppDomainName" inType="win:UnicodeString" />
+ <data name="AppDomainIndex" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <AppDomainLoadUnloadRundown_V1 xmlns="myNs">
+ <AppDomainID> %1 </AppDomainID>
+ <AppDomainFlags> %2 </AppDomainFlags>
+ <AppDomainName> %3 </AppDomainName>
+ <AppDomainIndex> %4 </AppDomainIndex>
+ <ClrInstanceID> %4 </ClrInstanceID>
+ </AppDomainLoadUnloadRundown_V1>
+ </UserData>
+ </template>
+
+ <template tid="MethodLoadUnloadRundown">
+ <data name="MethodID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodStartAddress" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodSize" inType="win:UInt32" />
+ <data name="MethodToken" inType="win:UInt32" />
+ <data name="MethodFlags" inType="win:UInt32" map="MethodFlagsMap" />
+
+ <UserData>
+ <MethodLoadUnloadRundown xmlns="myNs">
+ <MethodID> %1 </MethodID>
+ <ModuleID> %2 </ModuleID>
+ <MethodStartAddress> %3 </MethodStartAddress>
+ <MethodSize> %4 </MethodSize>
+ <MethodToken> %5 </MethodToken>
+ <MethodFlags> %6 </MethodFlags>
+ </MethodLoadUnloadRundown>
+ </UserData>
+ </template>
+
+ <template tid="MethodLoadUnloadRundown_V1">
+ <data name="MethodID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodStartAddress" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodSize" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodToken" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodFlags" inType="win:UInt32" map="MethodFlagsMap" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <MethodLoadUnloadRundown_V1 xmlns="myNs">
+ <MethodID> %1 </MethodID>
+ <ModuleID> %2 </ModuleID>
+ <MethodStartAddress> %3 </MethodStartAddress>
+ <MethodSize> %4 </MethodSize>
+ <MethodToken> %5 </MethodToken>
+ <MethodFlags> %6 </MethodFlags>
+ <ClrInstanceID> %7 </ClrInstanceID>
+ </MethodLoadUnloadRundown_V1>
+ </UserData>
+ </template>
+
+ <template tid="MethodLoadUnloadRundown_V2">
+ <data name="MethodID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodStartAddress" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodSize" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodToken" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodFlags" inType="win:UInt32" map="MethodFlagsMap" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="ReJITID" inType="win:UInt64" outType="win:HexInt64" />
+
+ <UserData>
+ <MethodLoadUnloadRundown_V2 xmlns="myNs">
+ <MethodID> %1 </MethodID>
+ <ModuleID> %2 </ModuleID>
+ <MethodStartAddress> %3 </MethodStartAddress>
+ <MethodSize> %4 </MethodSize>
+ <MethodToken> %5 </MethodToken>
+ <MethodFlags> %6 </MethodFlags>
+ <ClrInstanceID> %7 </ClrInstanceID>
+ <ReJITID> %8 </ReJITID>
+ </MethodLoadUnloadRundown_V2>
+ </UserData>
+ </template>
+
+ <template tid="MethodLoadUnloadRundownVerbose">
+ <data name="MethodID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodStartAddress" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodSize" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodToken" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodFlags" inType="win:UInt32" map="MethodFlagsMap" />
+ <data name="MethodNamespace" inType="win:UnicodeString" />
+ <data name="MethodName" inType="win:UnicodeString" />
+ <data name="MethodSignature" inType="win:UnicodeString" />
+ <UserData>
+ <MethodLoadUnloadRundownVerbose xmlns="myNs">
+ <MethodID> %1 </MethodID>
+ <ModuleID> %2 </ModuleID>
+ <MethodStartAddress> %3 </MethodStartAddress>
+ <MethodSize> %4 </MethodSize>
+ <MethodToken> %5 </MethodToken>
+ <MethodFlags> %6 </MethodFlags>
+ <MethodNamespace> %7 </MethodNamespace>
+ <MethodName> %8 </MethodName>
+ <MethodSignature> %9 </MethodSignature>
+ </MethodLoadUnloadRundownVerbose>
+ </UserData>
+ </template>
+
+ <template tid="MethodLoadUnloadRundownVerbose_V1">
+ <data name="MethodID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodStartAddress" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodSize" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodToken" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodFlags" inType="win:UInt32" map="MethodFlagsMap" />
+ <data name="MethodNamespace" inType="win:UnicodeString" />
+ <data name="MethodName" inType="win:UnicodeString" />
+ <data name="MethodSignature" inType="win:UnicodeString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <MethodLoadUnloadRundownVerbose_V1 xmlns="myNs">
+ <MethodID> %1 </MethodID>
+ <ModuleID> %2 </ModuleID>
+ <MethodStartAddress> %3 </MethodStartAddress>
+ <MethodSize> %4 </MethodSize>
+ <MethodToken> %5 </MethodToken>
+ <MethodFlags> %6 </MethodFlags>
+ <MethodNamespace> %7 </MethodNamespace>
+ <MethodName> %8 </MethodName>
+ <MethodSignature> %9 </MethodSignature>
+ <ClrInstanceID> %10 </ClrInstanceID>
+ </MethodLoadUnloadRundownVerbose_V1>
+ </UserData>
+ </template>
+
+ <template tid="MethodLoadUnloadRundownVerbose_V2">
+ <data name="MethodID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodStartAddress" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodSize" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodToken" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="MethodFlags" inType="win:UInt32" map="MethodFlagsMap" />
+ <data name="MethodNamespace" inType="win:UnicodeString" />
+ <data name="MethodName" inType="win:UnicodeString" />
+ <data name="MethodSignature" inType="win:UnicodeString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="ReJITID" inType="win:UInt64" outType="win:HexInt64" />
+ <UserData>
+ <MethodLoadUnloadRundownVerbose_V2 xmlns="myNs">
+ <MethodID> %1 </MethodID>
+ <ModuleID> %2 </ModuleID>
+ <MethodStartAddress> %3 </MethodStartAddress>
+ <MethodSize> %4 </MethodSize>
+ <MethodToken> %5 </MethodToken>
+ <MethodFlags> %6 </MethodFlags>
+ <MethodNamespace> %7 </MethodNamespace>
+ <MethodName> %8 </MethodName>
+ <MethodSignature> %9 </MethodSignature>
+ <ClrInstanceID> %10 </ClrInstanceID>
+ <ReJITID> %11 </ReJITID>
+ </MethodLoadUnloadRundownVerbose_V2>
+ </UserData>
+ </template>
+
+ <template tid="MethodILToNativeMapRundown">
+ <data name="MethodID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ReJITID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="MethodExtent" inType="win:UInt8" />
+ <data name="CountOfMapEntries" inType="win:UInt16" />
+ <data name="ILOffsets" count="CountOfMapEntries" inType="win:UInt32" />
+ <data name="NativeOffsets" count="CountOfMapEntries" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <MethodILToNativeMap xmlns="myNs">
+ <MethodID> %1 </MethodID>
+ <ReJITID> %2 </ReJITID>
+ <MethodExtent> %3 </MethodExtent>
+ <CountOfMapEntries> %4 </CountOfMapEntries>
+ <ClrInstanceID> %5 </ClrInstanceID>
+ </MethodILToNativeMap>
+ </UserData>
+ </template>
+
+ <template tid="DCStartEnd">
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <DCStartEnd xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ </DCStartEnd>
+ </UserData>
+ </template>
+
+ <template tid="ClrStackWalk">
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="Reserved1" inType="win:UInt8" />
+ <data name="Reserved2" inType="win:UInt8" />
+ <data name="FrameCount" inType="win:UInt32" />
+ <data name="Stack" count="2" inType="win:Pointer" />
+ </template>
+
+ <template tid="ThreadCreatedRundown">
+ <data name="ManagedThreadID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="AppDomainID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="Flags" inType="win:UInt32" map="ThreadFlagsMap" />
+ <data name="ManagedThreadIndex" inType="win:UInt32" />
+ <data name="OSThreadID" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <ThreadCreatedRundown xmlns="myNs">
+ <ManagedThreadID> %1 </ManagedThreadID>
+ <AppDomainID> %2 </AppDomainID>
+ <Flags> %3 </Flags>
+ <ManagedThreadIndex> %4 </ManagedThreadIndex>
+ <OSThreadID> %5 </OSThreadID>
+ <ClrInstanceID> %5 </ClrInstanceID>
+ </ThreadCreatedRundown>
+ </UserData>
+ </template>
+
+ <template tid="ModuleRangeRundown">
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64"/>
+ <data name="RangeBegin" count="1" inType="win:UInt32" outType="win:HexInt32"/>
+ <data name="RangeSize" count="1" inType="win:UInt32" outType="win:HexInt32"/>
+ <data name="RangeType" map="ModuleRangeTypeMap" inType="win:UInt8"/>
+ <UserData>
+ <ModuleRangeRundown xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <ModuleID> %2 </ModuleID>
+ <RangeBegin> %3 </RangeBegin>
+ <RangeSize> %4 </RangeSize>
+ <RangeType> %5 </RangeType>
+ </ModuleRangeRundown>
+ </UserData>
+ </template>
+ </templates>
+
+ <events>
+ <!-- CLR StackWalk Rundown Events -->
+ <event value="0" version="0" level="win:LogAlways" template="ClrStackWalk"
+ keywords ="StackKeyword" opcode="CLRStackWalk"
+ task="CLRStackRundown"
+ symbol="CLRStackWalkDCStart" message="$(string.RundownPublisher.StackEventMessage)"/>
+
+ <!-- CLR Method Rundown Events -->
+ <event value="141" version="0" level="win:Informational" template="MethodLoadUnloadRundown"
+ keywords ="JitRundownKeyword NGenRundownKeyword" opcode="MethodDCStart"
+ task="CLRMethodRundown"
+ symbol="MethodDCStart" message="$(string.RundownPublisher.MethodDCStartEventMessage)"/>
+
+ <event value="141" version="1" level="win:Informational" template="MethodLoadUnloadRundown_V1"
+ keywords ="JitRundownKeyword NGenRundownKeyword" opcode="MethodDCStart"
+ task="CLRMethodRundown"
+ symbol="MethodDCStart_V1" message="$(string.RundownPublisher.MethodDCStart_V1EventMessage)"/>
+
+ <event value="141" version="2" level="win:Informational" template="MethodLoadUnloadRundown_V2"
+ keywords ="JitRundownKeyword NGenRundownKeyword" opcode="MethodDCStart"
+ task="CLRMethodRundown"
+ symbol="MethodDCStart_V2" message="$(string.RundownPublisher.MethodDCStart_V2EventMessage)"/>
+
+ <event value="142" version="0" level="win:Informational" template="MethodLoadUnloadRundown"
+ keywords ="JitRundownKeyword NGenRundownKeyword" opcode="MethodDCEnd"
+ task="CLRMethodRundown"
+ symbol="MethodDCEnd" message="$(string.RundownPublisher.MethodDCEndEventMessage)"/>
+
+ <event value="142" version="1" level="win:Informational" template="MethodLoadUnloadRundown_V1"
+ keywords ="JitRundownKeyword NGenRundownKeyword" opcode="MethodDCEnd"
+ task="CLRMethodRundown"
+ symbol="MethodDCEnd_V1" message="$(string.RundownPublisher.MethodDCEnd_V1EventMessage)"/>
+
+ <event value="142" version="2" level="win:Informational" template="MethodLoadUnloadRundown_V2"
+ keywords ="JitRundownKeyword NGenRundownKeyword" opcode="MethodDCEnd"
+ task="CLRMethodRundown"
+ symbol="MethodDCEnd_V2" message="$(string.RundownPublisher.MethodDCEnd_V2EventMessage)"/>
+
+ <event value="143" version="0" level="win:Informational" template="MethodLoadUnloadRundownVerbose"
+ keywords ="JitRundownKeyword NGenRundownKeyword" opcode="MethodDCStartVerbose"
+ task="CLRMethodRundown"
+ symbol="MethodDCStartVerbose" message="$(string.RundownPublisher.MethodDCStartVerboseEventMessage)"/>
+
+ <event value="143" version="1" level="win:Informational" template="MethodLoadUnloadRundownVerbose_V1"
+ keywords ="JitRundownKeyword NGenRundownKeyword" opcode="MethodDCStartVerbose"
+ task="CLRMethodRundown"
+ symbol="MethodDCStartVerbose_V1" message="$(string.RundownPublisher.MethodDCStartVerbose_V1EventMessage)"/>
+
+ <event value="143" version="2" level="win:Informational" template="MethodLoadUnloadRundownVerbose_V2"
+ keywords ="JitRundownKeyword NGenRundownKeyword" opcode="MethodDCStartVerbose"
+ task="CLRMethodRundown"
+ symbol="MethodDCStartVerbose_V2" message="$(string.RundownPublisher.MethodDCStartVerbose_V2EventMessage)"/>
+
+ <event value="144" version="0" level="win:Informational" template="MethodLoadUnloadRundownVerbose"
+ keywords ="JitRundownKeyword NGenRundownKeyword" opcode="MethodDCEndVerbose"
+ task="CLRMethodRundown"
+ symbol="MethodDCEndVerbose" message="$(string.RundownPublisher.MethodDCEndVerboseEventMessage)"/>
+
+ <event value="144" version="1" level="win:Informational" template="MethodLoadUnloadRundownVerbose_V1"
+ keywords ="JitRundownKeyword NGenRundownKeyword" opcode="MethodDCEndVerbose"
+ task="CLRMethodRundown"
+ symbol="MethodDCEndVerbose_V1" message="$(string.RundownPublisher.MethodDCEndVerbose_V1EventMessage)"/>
+
+ <event value="144" version="2" level="win:Informational" template="MethodLoadUnloadRundownVerbose_V2"
+ keywords ="JitRundownKeyword NGenRundownKeyword" opcode="MethodDCEndVerbose"
+ task="CLRMethodRundown"
+ symbol="MethodDCEndVerbose_V2" message="$(string.RundownPublisher.MethodDCEndVerbose_V2EventMessage)"/>
+
+ <event value="145" version="0" level="win:Informational"
+ keywords ="JitRundownKeyword JittedMethodILToNativeMapRundownKeyword NGenRundownKeyword LoaderRundownKeyword" opcode="DCStartComplete"
+ task="CLRMethodRundown"
+ symbol="DCStartComplete"/>
+
+ <event value="145" version="1" level="win:Informational" template="DCStartEnd"
+ keywords ="JitRundownKeyword JittedMethodILToNativeMapRundownKeyword NGenRundownKeyword LoaderRundownKeyword" opcode="DCStartComplete"
+ task="CLRMethodRundown"
+ symbol="DCStartComplete_V1" message="$(string.RundownPublisher.DCStartCompleteEventMessage)"/>
+
+ <event value="146" version="0" level="win:Informational"
+ keywords ="JitRundownKeyword JittedMethodILToNativeMapRundownKeyword NGenRundownKeyword LoaderRundownKeyword" opcode="DCEndComplete"
+ task="CLRMethodRundown"
+ symbol="DCEndComplete"/>
+
+ <event value="146" version="1" level="win:Informational" template="DCStartEnd"
+ keywords ="JitRundownKeyword JittedMethodILToNativeMapRundownKeyword NGenRundownKeyword LoaderRundownKeyword" opcode="DCEndComplete"
+ task="CLRMethodRundown"
+ symbol="DCEndComplete_V1" message="$(string.RundownPublisher.DCEndCompleteEventMessage)"/>
+
+ <event value="147" version="0" level="win:Informational"
+ keywords ="JitRundownKeyword JittedMethodILToNativeMapRundownKeyword NGenRundownKeyword LoaderRundownKeyword" opcode="DCStartInit"
+ task="CLRMethodRundown"
+ symbol="DCStartInit"/>
+
+ <event value="147" version="1" level="win:Informational" template="DCStartEnd"
+ keywords ="JitRundownKeyword JittedMethodILToNativeMapRundownKeyword NGenRundownKeyword LoaderRundownKeyword" opcode="DCStartInit"
+ task="CLRMethodRundown"
+ symbol="DCStartInit_V1" message="$(string.RundownPublisher.DCStartInitEventMessage)"/>
+
+ <event value="148" version="0" level="win:Informational"
+ keywords ="JitRundownKeyword JittedMethodILToNativeMapRundownKeyword NGenRundownKeyword LoaderRundownKeyword" opcode="DCEndInit"
+ task="CLRMethodRundown"
+ symbol="DCEndInit"/>
+
+ <event value="148" version="1" level="win:Informational" template="DCStartEnd"
+ keywords ="JitRundownKeyword JittedMethodILToNativeMapRundownKeyword NGenRundownKeyword LoaderRundownKeyword" opcode="DCEndInit"
+ task="CLRMethodRundown"
+ symbol="DCEndInit_V1" message="$(string.RundownPublisher.DCEndInitEventMessage)"/>
+
+ <event value="149" version="0" level="win:Verbose" template="MethodILToNativeMapRundown"
+ keywords ="JittedMethodILToNativeMapRundownKeyword" opcode="MethodDCStartILToNativeMap"
+ task="CLRMethodRundown"
+ symbol="MethodDCStartILToNativeMap"
+ message="$(string.RundownPublisher.MethodDCStartILToNativeMapEventMessage)"/>
+
+ <event value="150" version="0" level="win:Verbose" template="MethodILToNativeMapRundown"
+ keywords ="JittedMethodILToNativeMapRundownKeyword" opcode="MethodDCEndILToNativeMap"
+ task="CLRMethodRundown"
+ symbol="MethodDCEndILToNativeMap"
+ message="$(string.RundownPublisher.MethodDCEndILToNativeMapEventMessage)"/>
+
+ <!-- CLR Loader Rundown Events -->
+ <event value="151" version="0" level="win:Informational" template="DomainModuleLoadUnloadRundown"
+ keywords ="LoaderRundownKeyword" opcode="DomainModuleDCStart"
+ task="CLRLoaderRundown"
+ symbol="DomainModuleDCStart" message="$(string.RundownPublisher.DomainModuleDCStartEventMessage)"/>
+
+ <event value="151" version="1" level="win:Informational" template="DomainModuleLoadUnloadRundown_V1"
+ keywords ="LoaderRundownKeyword" opcode="DomainModuleDCStart"
+ task="CLRLoaderRundown"
+ symbol="DomainModuleDCStart_V1" message="$(string.RundownPublisher.DomainModuleDCStart_V1EventMessage)"/>
+
+ <event value="152" version="0" level="win:Informational" template="DomainModuleLoadUnloadRundown"
+ keywords ="LoaderRundownKeyword" opcode="DomainModuleDCEnd"
+ task="CLRLoaderRundown"
+ symbol="DomainModuleDCEnd" message="$(string.RundownPublisher.DomainModuleDCEndEventMessage)"/>
+
+ <event value="152" version="1" level="win:Informational" template="DomainModuleLoadUnloadRundown_V1"
+ keywords ="LoaderRundownKeyword" opcode="DomainModuleDCEnd"
+ task="CLRLoaderRundown"
+ symbol="DomainModuleDCEnd_V1" message="$(string.RundownPublisher.DomainModuleDCEnd_V1EventMessage)"/>
+
+ <event value="153" version="0" level="win:Informational" template="ModuleLoadUnloadRundown"
+ keywords ="LoaderRundownKeyword" opcode="ModuleDCStart"
+ task="CLRLoaderRundown"
+ symbol="ModuleDCStart" message="$(string.RundownPublisher.ModuleDCStartEventMessage)"/>
+
+ <event value="153" version="1" level="win:Informational" template="ModuleLoadUnloadRundown_V1"
+ keywords ="LoaderRundownKeyword PerfTrackRundownKeyword" opcode="ModuleDCStart"
+ task="CLRLoaderRundown"
+ symbol="ModuleDCStart_V1" message="$(string.RundownPublisher.ModuleDCStart_V1EventMessage)"/>
+
+ <event value="153" version="2" level="win:Informational" template="ModuleLoadUnloadRundown_V2"
+ keywords ="LoaderRundownKeyword PerfTrackRundownKeyword" opcode="ModuleDCStart"
+ task="CLRLoaderRundown"
+ symbol="ModuleDCStart_V2" message="$(string.RundownPublisher.ModuleDCStart_V2EventMessage)"/>
+
+ <event value="154" version="0" level="win:Informational" template="ModuleLoadUnloadRundown"
+ keywords ="LoaderRundownKeyword" opcode="ModuleDCEnd"
+ task="CLRLoaderRundown"
+ symbol="ModuleDCEnd" message="$(string.RundownPublisher.ModuleDCEndEventMessage)"/>
+
+ <event value="154" version="1" level="win:Informational" template="ModuleLoadUnloadRundown_V1"
+ keywords ="LoaderRundownKeyword PerfTrackRundownKeyword" opcode="ModuleDCEnd"
+ task="CLRLoaderRundown"
+ symbol="ModuleDCEnd_V1" message="$(string.RundownPublisher.ModuleDCEnd_V1EventMessage)"/>
+
+ <event value="154" version="2" level="win:Informational" template="ModuleLoadUnloadRundown_V2"
+ keywords ="LoaderRundownKeyword PerfTrackRundownKeyword" opcode="ModuleDCEnd"
+ task="CLRLoaderRundown"
+ symbol="ModuleDCEnd_V2" message="$(string.RundownPublisher.ModuleDCEnd_V2EventMessage)"/>
+
+ <event value="155" version="0" level="win:Informational" template="AssemblyLoadUnloadRundown"
+ keywords ="LoaderRundownKeyword" opcode="AssemblyDCStart"
+ task="CLRLoaderRundown"
+ symbol="AssemblyDCStart" message="$(string.RundownPublisher.AssemblyDCStartEventMessage)"/>
+
+ <event value="155" version="1" level="win:Informational" template="AssemblyLoadUnloadRundown_V1"
+ keywords ="LoaderRundownKeyword" opcode="AssemblyDCStart"
+ task="CLRLoaderRundown"
+ symbol="AssemblyDCStart_V1" message="$(string.RundownPublisher.AssemblyDCStart_V1EventMessage)"/>
+
+ <event value="156" version="0" level="win:Informational" template="AssemblyLoadUnloadRundown"
+ keywords ="LoaderRundownKeyword" opcode="AssemblyDCEnd"
+ task="CLRLoaderRundown"
+ symbol="AssemblyDCEnd" message="$(string.RundownPublisher.AssemblyDCEndEventMessage)"/>
+
+ <event value="156" version="1" level="win:Informational" template="AssemblyLoadUnloadRundown_V1"
+ keywords ="LoaderRundownKeyword" opcode="AssemblyDCEnd"
+ task="CLRLoaderRundown"
+ symbol="AssemblyDCEnd_V1" message="$(string.RundownPublisher.AssemblyDCEnd_V1EventMessage)"/>
+
+ <event value="157" version="0" level="win:Informational" template="AppDomainLoadUnloadRundown"
+ keywords ="LoaderRundownKeyword" opcode="AppDomainDCStart"
+ task="CLRLoaderRundown"
+ symbol="AppDomainDCStart" message="$(string.RundownPublisher.AppDomainDCStartEventMessage)"/>
+
+ <event value="157" version="1" level="win:Informational" template="AppDomainLoadUnloadRundown_V1"
+ keywords ="LoaderRundownKeyword" opcode="AppDomainDCStart"
+ task="CLRLoaderRundown"
+ symbol="AppDomainDCStart_V1" message="$(string.RundownPublisher.AppDomainDCStart_V1EventMessage)"/>
+
+ <event value="158" version="0" level="win:Informational" template="AppDomainLoadUnloadRundown"
+ keywords ="LoaderRundownKeyword" opcode="AppDomainDCEnd"
+ task="CLRLoaderRundown"
+ symbol="AppDomainDCEnd" message="$(string.RundownPublisher.AppDomainDCEndEventMessage)"/>
+
+ <event value="158" version="1" level="win:Informational" template="AppDomainLoadUnloadRundown_V1"
+ keywords ="LoaderRundownKeyword" opcode="AppDomainDCEnd"
+ task="CLRLoaderRundown"
+ symbol="AppDomainDCEnd_V1" message="$(string.RundownPublisher.AppDomainDCEnd_V1EventMessage)"/>
+
+ <event value="159" version="0" level="win:Informational" template="ThreadCreatedRundown"
+ keywords ="AppDomainResourceManagementRundownKeyword ThreadingKeyword" opcode="ThreadDC"
+ task="CLRLoaderRundown"
+ symbol="ThreadDC" message="$(string.RundownPublisher.ThreadCreatedEventMessage)"/>
+
+ <event value="160" version="0" level="win:Informational" template="ModuleRangeRundown"
+ keywords ="PerfTrackRundownKeyword" opcode="ModuleRangeDCStart"
+ task="CLRPerfTrackRundown"
+ symbol="ModuleRangeDCStart" message="$(string.RundownPublisher.ModuleRangeDCStartEventMessage)"/>
+
+ <event value="161" version="0" level="win:Informational" template="ModuleRangeRundown"
+ keywords ="PerfTrackRundownKeyword" opcode="ModuleRangeDCEnd"
+ task="CLRPerfTrackRundown"
+ symbol="ModuleRangeDCEnd" message="$(string.RundownPublisher.ModuleRangeDCEndEventMessage)"/>
+
+ <!-- CLR Runtime Information events for rundown -->
+ <event value="187" version="0" level="win:Informational" template="RuntimeInformationRundown"
+ opcode="win:Start"
+ task="CLRRuntimeInformationRundown"
+ symbol="RuntimeInformationDCStart" message="$(string.RundownPublisher.RuntimeInformationEventMessage)"/>
+ </events>
+ </provider>
+
+ <provider name="Microsoft-Windows-DotNETRuntimeStress"
+ guid="{CC2BCBBA-16B6-4cf3-8990-D74C2E8AF500}"
+ symbol="MICROSOFT_WINDOWS_DOTNETRUNTIME_STRESS_PROVIDER"
+ resourceFileName="%INSTALL_PATH%\clretwrc.dll"
+ messageFileName="%INSTALL_PATH%\clretwrc.dll">
+ <!-- CLR Stress Publisher-->
+
+ <!--Keywords-->
+ <keywords>
+ <!-- Add your keywords here -->
+ <keyword name="StackKeyword" mask="0x40000000"
+ message="$(string.StressPublisher.StackKeywordMessage)" symbol="CLR_STRESSSTACK_KEYWORD"/>
+ </keywords>
+
+ <!--Tasks-->
+ <tasks>
+ <task name="StressLogTask" symbol="CLR_STRESSLOG_TASK" value="1"
+ eventGUID="{EA40C74D-4F65-4561-BB26-656231C8967F}"
+ message="$(string.StressPublisher.StressTaskMessage)">
+ <opcodes>
+ </opcodes>
+ </task>
+
+ <task name="CLRStackStress" symbol="CLR_STACKSTRESS_TASK"
+ value="11" eventGUID="{d3363dc0-243a-4620-a4d0-8a07d772f533}"
+ message="$(string.StressPublisher.StackTaskMessage)">
+ <opcodes>
+ <opcode name="CLRStackWalk" message="$(string.StressPublisher.CLRStackWalkOpcodeMessage)" symbol="CLR_STRESSSTACK_STACKWALK_OPCODE" value="82"> </opcode>
+ </opcodes>
+ </task>
+ </tasks>
+
+ <!--Templates-->
+ <templates>
+ <template tid="StressLog">
+ <data name="Facility" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="LogLevel" inType="win:UInt8" />
+ <data name="Message" inType="win:AnsiString" />
+
+ <UserData>
+ <StressLog xmlns="myNs">
+ <Facility> %1 </Facility>
+ <LogLevel> %2 </LogLevel>
+ <Message> %3 </Message>
+ </StressLog>
+ </UserData>
+ </template>
+
+ <template tid="StressLog_V1">
+ <data name="Facility" inType="win:UInt32" outType="win:HexInt32" />
+ <data name="LogLevel" inType="win:UInt8" />
+ <data name="Message" inType="win:AnsiString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <StressLog_V1 xmlns="myNs">
+ <Facility> %1 </Facility>
+ <LogLevel> %2 </LogLevel>
+ <Message> %3 </Message>
+ <ClrInstanceID> %4 </ClrInstanceID>
+ </StressLog_V1>
+ </UserData>
+ </template>
+
+ <template tid="ClrStackWalk">
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="Reserved1" inType="win:UInt8" />
+ <data name="Reserved2" inType="win:UInt8" />
+ <data name="FrameCount" inType="win:UInt32" />
+ <data name="Stack" count="2" inType="win:Pointer" />
+ </template>
+ </templates>
+
+ <!--Events-->
+ <events>
+ <event value="0" version="0" level="win:Informational" template="StressLog"
+ task="StressLogTask"
+ opcode="win:Start"
+ symbol="StressLogEvent" message="$(string.StressPublisher.StressLogEventMessage)"/>
+
+ <event value="0" version="1" level="win:Informational" template="StressLog_V1"
+ task="StressLogTask"
+ opcode="win:Start"
+ symbol="StressLogEvent_V1" message="$(string.StressPublisher.StressLog_V1EventMessage)"/>
+
+ <event value="1" version="0" level="win:LogAlways" template="ClrStackWalk"
+ keywords ="StackKeyword" opcode="CLRStackWalk"
+ task="CLRStackStress"
+ symbol="CLRStackWalkStress" message="$(string.StressPublisher.StackEventMessage)"/>
+ </events>
+ </provider>
+
+ <!-- CLR Private Publisher-->
+ <provider name="Microsoft-Windows-DotNETRuntimePrivate"
+ guid="{763FD754-7086-4dfe-95EB-C01A46FAF4CA}"
+ symbol="MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER"
+ resourceFileName="%INSTALL_PATH%\clretwrc.dll"
+ messageFileName="%INSTALL_PATH%\clretwrc.dll">
+ <!--CLR Private Publisher-->
+
+ <!--Keywords-->
+ <keywords>
+ <keyword name="GCPrivateKeyword" mask="0x00000001"
+ message="$(string.PrivatePublisher.GCPrivateKeywordMessage)" symbol="CLR_PRIVATEGC_KEYWORD"/>
+ <keyword name="BindingKeyword" mask="0x00000002"
+ message="$(string.PrivatePublisher.BindingKeywordMessage)" symbol="CLR_PRIVATEBINDING_KEYWORD"/>
+ <keyword name="NGenForceRestoreKeyword" mask="0x00000004"
+ message="$(string.PrivatePublisher.NGenForceRestoreKeywordMessage)" symbol="CLR_PRIVATENGENFORCERESTORE_KEYWORD"/>
+ <keyword name="PrivateFusionKeyword" mask="0x00000008"
+ message="$(string.PrivatePublisher.PrivateFusionKeywordMessage)" symbol="CLR_PRIVATEFUSION_KEYWORD"/>
+ <keyword name="LoaderHeapPrivateKeyword" mask="0x00000010"
+ message="$(string.PrivatePublisher.LoaderHeapPrivateKeywordMessage)" symbol="CLR_PRIVATELOADERHEAP_KEYWORD"/>
+ <keyword name="SecurityPrivateKeyword" mask="0x00000400"
+ message="$(string.PrivatePublisher.SecurityPrivateKeywordMessage)" symbol="CLR_PRIVATESECURITY_KEYWORD"/>
+ <keyword name="InteropPrivateKeyword" mask="0x2000"
+ message="$(string.PrivatePublisher.InteropPrivateKeywordMessage)" symbol="CLR_INTEROP_KEYWORD"/>
+ <keyword name="GCHandlePrivateKeyword" mask="0x4000"
+ message="$(string.PrivatePublisher.GCHandlePrivateKeywordMessage)" symbol="CLR_PRIVATEGCHANDLE_KEYWORD"/>
+ <keyword name="MulticoreJitPrivateKeyword" mask="0x20000"
+ message="$(string.PrivatePublisher.MulticoreJitPrivateKeywordMessage)" symbol="CLR_PRIVATEMULTICOREJIT_KEYWORD"/>
+ <keyword name="StackKeyword" mask="0x40000000"
+ message="$(string.PrivatePublisher.StackKeywordMessage)" symbol="CLR_PRIVATESTACK_KEYWORD"/>
+ <keyword name="StartupKeyword" mask="0x80000000"
+ message="$(string.PrivatePublisher.StartupKeywordMessage)" symbol="CLR_PRIVATESTARTUP_KEYWORD"/>
+ <keyword name="PerfTrackPrivateKeyword" mask="0x20000000"
+ message="$(string.PrivatePublisher.PerfTrackKeywordMessage)" symbol="CLR_PERFTRACK_PRIVATE_KEYWORD"/>
+
+ <!-- NOTE: This is not used anymore. They are kept around for backcompat with traces that might have already contained these -->
+ <keyword name="DynamicTypeUsageKeyword" mask="0x00000020"
+ message="$(string.PrivatePublisher.DynamicTypeUsageMessage)" symbol="CLR_PRIVATE_DYNAMICTYPEUSAGE_KEYWORD"/>
+ </keywords>
+
+ <!--Tasks-->
+ <tasks>
+ <task name="GarbageCollectionPrivate" symbol="CLR_GCPRIVATE_TASK"
+ value="1" eventGUID="{2f1b6bf6-18ff-4645-9501-15df6c64c2cf}"
+ message="$(string.PrivatePublisher.GarbageCollectionTaskMessage)">
+ <opcodes>
+ <opcode name="GCDecision" message="$(string.PrivatePublisher.GCDecisionOpcodeMessage)" symbol="CLR_PRIVATEGC_GCDECISION_OPCODE" value="132"> </opcode>
+ <opcode name="GCSettings" message="$(string.PrivatePublisher.GCSettingsOpcodeMessage)" symbol="CLR_PRIVATEGC_GCSETTINGS_OPCODE" value="14"> </opcode>
+ <opcode name="GCOptimized" message="$(string.PrivatePublisher.GCOptimizedOpcodeMessage)" symbol="CLR_PRIVATEGC_GCOPTIMIZED_OPCODE" value="16"> </opcode>
+ <opcode name="GCPerHeapHistory" message="$(string.PrivatePublisher.GCPerHeapHistoryOpcodeMessage)" symbol="CLR_PRIVATEGC_GCPERHEAPHISTORY_OPCODE" value="17"> </opcode>
+ <opcode name="GCGlobalHeapHistory" message="$(string.PrivatePublisher.GCGlobalHeapHistoryOpcodeMessage)" symbol="CLR_PRIVATEGC_GCGLOBALHEAPHISTORY_OPCODE" value="18"> </opcode>
+ <opcode name="GCFullNotify" message="$(string.PrivatePublisher.GCFullNotifyOpcodeMessage)" symbol="CLR_PRIVATEGC_GCFULLNOTIFY_OPCODE" value="19"> </opcode>
+ <opcode name="GCJoin" message="$(string.PrivatePublisher.GCJoinOpcodeMessage)" symbol="CLR_PRIVATEGC_JOIN_OPCODE" value="20"> </opcode>
+ <opcode name="PrvGCMarkStackRoots" message="$(string.PrivatePublisher.GCMarkStackRootsOpcodeMessage)" symbol="CLR_PRIVATEGC_MARKSTACKROOTS_OPCODE" value="21"> </opcode>
+ <opcode name="PrvGCMarkFinalizeQueueRoots" message="$(string.PrivatePublisher.GCMarkFinalizeQueueRootsOpcodeMessage)" symbol="CLR_PRIVATEGC_MARKFINALIZEQUEUEROOTS_OPCODE" value="22"> </opcode>
+ <opcode name="PrvGCMarkHandles" message="$(string.PrivatePublisher.GCMarkHandlesOpcodeMessage)" symbol="CLR_PRIVATEGC_MARKHANDLES_OPCODE" value="23"> </opcode>
+ <opcode name="PrvGCMarkCards" message="$(string.PrivatePublisher.GCMarkCardsOpcodeMessage)" symbol="CLR_PRIVATEGC_MARKCARDS_OPCODE" value="24"> </opcode>
+ <opcode name="BGCBegin" message="$(string.PrivatePublisher.BGCBeginOpcodeMessage)" symbol="CLR_PRIVATEGC_BGCBEGIN_OPCODE" value="25"> </opcode>
+ <opcode name="BGC1stNonConEnd" message="$(string.PrivatePublisher.BGC1stNonCondEndOpcodeMessage)" symbol="CLR_PRIVATEGC_BGC1STNONCONEND_OPCODE" value="26"> </opcode>
+ <opcode name="BGC1stConEnd" message="$(string.PrivatePublisher.BGC1stConEndOpcodeMessage)" symbol="CLR_PRIVATEGC_BGC1STCONEND_OPCODE" value="27"> </opcode>
+ <opcode name="BGC2ndNonConBegin" message="$(string.PrivatePublisher.BGC2ndNonConBeginOpcodeMessage)" symbol="CLR_PRIVATEGC_BGC2NDNONCONBEGIN_OPCODE" value="28"> </opcode>
+ <opcode name="BGC2ndNonConEnd" message="$(string.PrivatePublisher.BGC2ndNonConEndOpcodeMessage)" symbol="CLR_PRIVATEGC_BGC2NDNONCONEND_OPCODE" value="29"> </opcode>
+ <opcode name="BGC2ndConBegin" message="$(string.PrivatePublisher.BGC2ndConBeginOpcodeMessage)" symbol="CLR_PRIVATEGC_BGC2NDCONBEGIN_OPCODE" value="30"> </opcode>
+ <opcode name="BGC2ndConEnd" message="$(string.PrivatePublisher.BGC2ndConEndOpcodeMessage)" symbol="CLR_PRIVATEGC_BGC2NDCONEND_OPCODE" value="31"> </opcode>
+ <opcode name="BGCPlanEnd" message="$(string.PrivatePublisher.BGCPlanEndOpcodeMessage)" symbol="CLR_PRIVATEGC_BGCPLANEND_OPCODE" value="32"> </opcode>
+ <opcode name="BGCSweepEnd" message="$(string.PrivatePublisher.BGCSweepEndOpcodeMessage)" symbol="CLR_PRIVATEGC_BGCSWEEPEND_OPCODE" value="33"> </opcode>
+ <opcode name="BGCDrainMark" message="$(string.PrivatePublisher.BGCDrainMarkOpcodeMessage)" symbol="CLR_PRIVATEGC_BGCDRAINMARK_OPCODE" value="34"> </opcode>
+ <opcode name="BGCRevisit" message="$(string.PrivatePublisher.BGCRevisitOpcodeMessage)" symbol="CLR_PRIVATEGC_BGCREVISIT_OPCODE" value="35"> </opcode>
+ <opcode name="BGCOverflow" message="$(string.PrivatePublisher.BGCOverflowOpcodeMessage)" symbol="CLR_PRIVATEGC_BGCOVERFLOW_OPCODE" value="36"> </opcode>
+ <opcode name="BGCAllocWaitBegin" message="$(string.PrivatePublisher.BGCAllocWaitBeginOpcodeMessage)" symbol="CLR_PRIVATEGC_BGCALLOCWAITBEGIN_OPCODE" value="37"> </opcode>
+ <opcode name="BGCAllocWaitEnd" message="$(string.PrivatePublisher.BGCAllocWaitEndOpcodeMessage)" symbol="CLR_PRIVATEGC_BGCALLOCWAITEND_OPCODE" value="38"> </opcode>
+ <opcode name="PrvFinalizeObject" message="$(string.PrivatePublisher.FinalizeObjectOpcodeMessage)" symbol="CLR_PRIVATEGC_FINALIZEOBJECT_OPCODE" value="39"> </opcode>
+ <opcode name="CCWRefCountChange" message="$(string.PrivatePublisher.CCWRefCountChangeOpcodeMessage)" symbol="CLR_PRIVATEGC_CCWREFCOUNTCHANGE_OPCODE" value="40"> </opcode>
+ <opcode name="SetGCHandle" message="$(string.PrivatePublisher.SetGCHandleOpcodeMessage)" symbol="CLR_PRIVATEGC_SETGCHANDLE_OPCODE" value="42"> </opcode>
+ <opcode name="DestroyGCHandle" message="$(string.PrivatePublisher.DestroyGCHandleOpcodeMessage)" symbol="CLR_PRIVATEGC_DESTROYGCHANDLE_OPCODE" value="43"> </opcode>
+ <opcode name="PinPlugAtGCTime" message="$(string.PrivatePublisher.PinPlugAtGCTimeOpcodeMessage)" symbol="CLR_PRIVATEGC_PINGCPLUG_OPCODE" value="44"> </opcode>
+ </opcodes>
+ </task>
+
+ <task name="CLRFailFast" symbol="CLR_FAILFAST_TASK"
+ value="2" eventGUID="{EE9EDE12-C5F5-4995-81A2-DCFB5F6B80C8}"
+ message="$(string.PrivatePublisher.FailFastTaskMessage)">
+ <opcodes>
+ <opcode name="FailFast" message="$(string.PrivatePublisher.FailFastOpcodeMessage)" symbol="CLR_FAILFAST_FAILFAST_OPCODE" value="52"> </opcode>
+ </opcodes>
+ </task>
+
+ <task name="Startup" symbol="CLR_STARTUP_TASK"
+ value="9" eventGUID="{02D08A4F-FD01-4538-989B-03E437B950F4}"
+ message="$(string.PrivatePublisher.StartupTaskMessage)">
+ <opcodes>
+ <opcode name="EEStartupStart" message="$(string.PrivatePublisher.EEStartupStartOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_EESTARTUPSTART_OPCODE" value="128"> </opcode>
+ <opcode name="EEStartupEnd" message="$(string.PrivatePublisher.EEStartupEndOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_EESTARTUPEND_OPCODE" value="129"> </opcode>
+ <opcode name="EEConfigSetup" message="$(string.PrivatePublisher.EEConfigSetupOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_EECONFIGSETUP_OPCODE" value="130"> </opcode>
+ <opcode name="EEConfigSetupEnd" message="$(string.PrivatePublisher.EEConfigSetupEndOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_EECONFIGSETUPEND_OPCODE" value="131"> </opcode>
+ <opcode name="LoadSystemBases" message="$(string.PrivatePublisher.LoadSystemBasesOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_LOADSYSTEMBASES_OPCODE" value="132"> </opcode>
+ <opcode name="LoadSystemBasesEnd" message="$(string.PrivatePublisher.LoadSystemBasesEndOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_LOADSYSTEMBASESEND_OPCODE" value="133"> </opcode>
+ <opcode name="ExecExe" message="$(string.PrivatePublisher.ExecExeOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_EXEEXE_OPCODE" value="134"> </opcode>
+ <opcode name="ExecExeEnd" message="$(string.PrivatePublisher.ExecExeEndOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_EXEEXEEND_OPCODE" value="135"> </opcode>
+ <opcode name="Main" message="$(string.PrivatePublisher.MainOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_MAIN_OPCODE" value="136"> </opcode>
+ <opcode name="MainEnd" message="$(string.PrivatePublisher.MainEndOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_MAINEND_OPCODE" value="137"> </opcode>
+
+ <opcode name="ApplyPolicyStart" message="$(string.PrivatePublisher.ApplyPolicyStartOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_APPLYPOLICYSTART_OPCODE" value="10"> </opcode>
+ <opcode name="ApplyPolicyEnd" message="$(string.PrivatePublisher.ApplyPolicyEndOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_APPLYPOLICYEND_OPCODE" value="11"> </opcode>
+ <opcode name="LdLibShFolder" message="$(string.PrivatePublisher.LdLibShFolderOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_LDLIBSHFOLDER_OPCODE" value="12"> </opcode>
+ <opcode name="LdLibShFolderEnd" message="$(string.PrivatePublisher.LdLibShFolderEndOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_LDLIBSHFOLDEREND_OPCODE" value="13"> </opcode>
+ <opcode name="PrestubWorker" message="$(string.PrivatePublisher.PrestubWorkerOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_PRESTUBWORKER_OPCODE" value="14"> </opcode>
+ <opcode name="PrestubWorkerEnd" message="$(string.PrivatePublisher.PrestubWorkerEndOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_PRESTUBWORKEREND_OPCODE" value="15"> </opcode>
+ <opcode name="GetInstallationStart" message="$(string.PrivatePublisher.GetInstallationStartOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_GETINSTALLATIONSTART_OPCODE" value="16"> </opcode>
+ <opcode name="GetInstallationEnd" message="$(string.PrivatePublisher.GetInstallationEndOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_GETINSTALLATIONEND_OPCODE" value="17"> </opcode>
+ <opcode name="OpenHModule" message="$(string.PrivatePublisher.OpenHModuleOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_OPENHMODULE_OPCODE" value="18"> </opcode>
+ <opcode name="OpenHModuleEnd" message="$(string.PrivatePublisher.OpenHModuleEndOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_OPENHMODULEEND_OPCODE" value="19"> </opcode>
+ <opcode name="ExplicitBindStart" message="$(string.PrivatePublisher.ExplicitBindStartOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_EXPLICITBINDSTART_OPCODE" value="20"> </opcode>
+ <opcode name="ExplicitBindEnd" message="$(string.PrivatePublisher.ExplicitBindEndOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_EXPLICITBINDEND_OPCODE" value="21"> </opcode>
+ <opcode name="ParseXml" message="$(string.PrivatePublisher.ParseXmlOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_PARSEXML_OPCODE" value="22"> </opcode>
+ <opcode name="ParseXmlEnd" message="$(string.PrivatePublisher.ParseXmlEndOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_PARSEXMLEND_OPCODE" value="23"> </opcode>
+ <opcode name="InitDefaultDomain" message="$(string.PrivatePublisher.InitDefaultDomainOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_INITDEFAULTDOMAIN_OPCODE" value="24"> </opcode>
+ <opcode name="InitDefaultDomainEnd" message="$(string.PrivatePublisher.InitDefaultDomainEndOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_INITDEFAULTDOMAINEND_OPCODE" value="25"> </opcode>
+ <opcode name="InitSecurity" message="$(string.PrivatePublisher.InitSecurityOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_INITSECURITY_OPCODE" value="26"> </opcode>
+ <opcode name="InitSecurityEnd" message="$(string.PrivatePublisher.InitSecurityEndOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_INITSECURITYEND_OPCODE" value="27"> </opcode>
+ <opcode name="AllowBindingRedirs" message="$(string.PrivatePublisher.AllowBindingRedirsOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_ALLOWBINDINGREDIRS_OPCODE" value="28"> </opcode>
+ <opcode name="AllowBindingRedirsEnd" message="$(string.PrivatePublisher.AllowBindingRedirsEndOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_ALLOWBINDINGREDIRSEND_OPCODE" value="29"> </opcode>
+ <opcode name="EEConfigSync" message="$(string.PrivatePublisher.EEConfigSyncOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_EECONFIGSYNC_OPCODE" value="30"> </opcode>
+ <opcode name="EEConfigSyncEnd" message="$(string.PrivatePublisher.EEConfigSyncEndOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_EECONFIGSYNCEND_OPCODE" value="31"> </opcode>
+ <opcode name="FusionBinding" message="$(string.PrivatePublisher.FusionBindingOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_FUSIONBINDING_OPCODE" value="32"> </opcode>
+ <opcode name="FusionBindingEnd" message="$(string.PrivatePublisher.FusionBindingEndOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_FUSIONBINDINGEND_OPCODE" value="33"> </opcode>
+ <opcode name="LoaderCatchCall" message="$(string.PrivatePublisher.LoaderCatchCallOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_LOADERCATCHCALL_OPCODE" value="34"> </opcode>
+ <opcode name="LoaderCatchCallEnd" message="$(string.PrivatePublisher.LoaderCatchCallEndOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_LOADERCATCHCALLEND_OPCODE" value="35"> </opcode>
+ <opcode name="FusionInit" message="$(string.PrivatePublisher.FusionInitOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_FUSIONINIT_OPCODE" value="36"> </opcode>
+ <opcode name="FusionInitEnd" message="$(string.PrivatePublisher.FusionInitEndOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_FUSIONINITEND_OPCODE" value="37"> </opcode>
+ <opcode name="FusionAppCtx" message="$(string.PrivatePublisher.FusionAppCtxOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_FUSIONAPPCTX_OPCODE" value="38"> </opcode>
+ <opcode name="FusionAppCtxEnd" message="$(string.PrivatePublisher.FusionAppCtxEndOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_FUSIONAPPCTXEND_OPCODE" value="39"> </opcode>
+ <opcode name="Fusion2EE" message="$(string.PrivatePublisher.Fusion2EEOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_FUSION2EE_OPCODE" value="40"> </opcode>
+ <opcode name="Fusion2EEEnd" message="$(string.PrivatePublisher.Fusion2EEEndOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_FUSION2EEEND_OPCODE" value="41"> </opcode>
+ <opcode name="SecurityCatchCall" message="$(string.PrivatePublisher.SecurityCatchCallOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_SECURITYCATCHCALL_OPCODE" value="42"> </opcode>
+ <opcode name="SecurityCatchCallEnd" message="$(string.PrivatePublisher.SecurityCatchCallEndOpcodeMessage)" symbol="CLR_PRIVATESTARTUP_SECURITYCATCHCALLEND_OPCODE" value="43"> </opcode>
+ </opcodes>
+ </task>
+
+ <task name="Binding" symbol="CLR_BINDING_TASK"
+ value="10" eventGUID="{E90E32BA-E396-4e6a-A790-0A08C6C925DC}"
+ message="$(string.PrivatePublisher.BindingTaskMessage)">
+ <opcodes>
+ <opcode name="BindingPolicyPhaseStart" message="$(string.PrivatePublisher.BindingPolicyPhaseStartOpcodeMessage)" symbol="CLR_PRIVATEBINDING_BINDINGPOLICYPHASESTART_OPCODE" value="51"> </opcode>
+ <opcode name="BindingPolicyPhaseEnd" message="$(string.PrivatePublisher.BindingPolicyPhaseEndOpcodeMessage)" symbol="CLR_PRIVATEBINDING_BINDINGPOLICYPHASEEND_OPCODE" value="52"> </opcode>
+ <opcode name="BindingNgenPhaseStart" message="$(string.PrivatePublisher.BindingNgenPhaseStartOpcodeMessage)" symbol="CLR_PRIVATEBINDING_BINDINGNGENPHASESTART_OPCODE" value="53"> </opcode>
+ <opcode name="BindingNgenPhaseEnd" message="$(string.PrivatePublisher.BindingNgenPhaseEndOpcodeMessage)" symbol="CLR_PRIVATEBINDING_BINDINGNGENPHASEEND_OPCODE" value="54"> </opcode>
+ <opcode name="BindingLookupAndProbingPhaseStart" message="$(string.PrivatePublisher.BindingLoopupAndProbingPhaseStartOpcodeMessage)" symbol="CLR_PRIVATEBINDING_BINDINGLOOKUPANDPROBINGPHASESTART_OPCODE" value="55"> </opcode>
+ <opcode name="BindingLookupAndProbingPhaseEnd" message="$(string.PrivatePublisher.BindingLookupAndProbingPhaseEndOpcodeMessage)" symbol="CLR_PRIVATEBINDING_BINDINGLOOKUPANDPROBINGPHASEEND_OPCODE" value="56"> </opcode>
+ <opcode name="LoaderPhaseStart" message="$(string.PrivatePublisher.LoaderPhaseStartOpcodeMessage)" symbol="CLR_PRIVATEBINDING_LOADERPHASESTART_OPCODE" value="57"> </opcode>
+ <opcode name="LoaderPhaseEnd" message="$(string.PrivatePublisher.LoaderPhaseEndOpcodeMessage)" symbol="CLR_PRIVATEBINDING_LOADERPHASEEND_OPCODE" value="58"> </opcode>
+ <opcode name="BindingPhaseStart" message="$(string.PrivatePublisher.BindingPhaseStartOpcodeMessage)" symbol="CLR_PRIVATEBINDING_BINDINGPHASESTART_OPCODE" value="59"> </opcode>
+ <opcode name="BindingPhaseEnd" message="$(string.PrivatePublisher.BindingPhaseEndOpcodeMessage)" symbol="CLR_PRIVATEBINDING_BINDINGPHASEEND_OPCODE" value="60"> </opcode>
+ <opcode name="BindingDownloadPhaseStart" message="$(string.PrivatePublisher.BindingDownloadPhaseStartOpcodeMessage)" symbol="CLR_PRIVATEBINDING_BINDINGDOWNLOADPHASESTART_OPCODE" value="61"> </opcode>
+ <opcode name="BindingDownloadPhaseEnd" message="$(string.PrivatePublisher.BindingDownloadPhaseEndOpcodeMessage)" symbol="CLR_PRIVATEBINDING_BINDINGDOWNLOADPHASEEND_OPCODE" value="62"> </opcode>
+ <opcode name="LoaderAssemblyInitPhaseStart" message="$(string.PrivatePublisher.LoaderAssemblyInitPhaseStartOpcodeMessage)" symbol="CLR_PRIVATEBINDING_LOADERASSEMBLYINITPHASESTART_OPCODE" value="63"> </opcode>
+ <opcode name="LoaderAssemblyInitPhaseEnd" message="$(string.PrivatePublisher.LoaderAssemblyInitPhaseEndOpcodeMessage)" symbol="CLR_PRIVATEBINDING_LOADERASSEMBLYINITPHASEEND_OPCODE" value="64"> </opcode>
+ <opcode name="LoaderMappingPhaseStart" message="$(string.PrivatePublisher.LoaderMappingPhaseStartOpcodeMessage)" symbol="CLR_PRIVATEBINDING_LOADERMAPPINGPHASESTART_OPCODE" value="65"> </opcode>
+ <opcode name="LoaderMappingPhaseEnd" message="$(string.PrivatePublisher.LoaderMappingPhaseEndOpcodeMessage)" symbol="CLR_PRIVATEBINDING_LOADERMAPPINGPHASEEND_OPCODE" value="66"> </opcode>
+ <opcode name="LoaderDeliverEventsPhaseStart" message="$(string.PrivatePublisher.LoaderDeliverEventPhaseStartOpcodeMessage)" symbol="CLR_PRIVATEBINDING_LOADERDELIVERYEVENTSPHASESTART_OPCODE" value="67"> </opcode>
+ <opcode name="LoaderDeliverEventsPhaseEnd" message="$(string.PrivatePublisher.LoaderDeliverEventsPhaseEndOpcodeMessage)" symbol="CLR_PRIVATEBINDING_LOADERDELIVERYEVENTSPHASEEND_OPCODE" value="68"> </opcode>
+ <opcode name="FusionMessage" message="$(string.PrivatePublisher.FusionMessageOpcodeMessage)" symbol="CLR_PRIVATEBINDING_FUSIONMESSAGE_OPCODE" value="70"> </opcode>
+ <opcode name="FusionErrorCode" message="$(string.PrivatePublisher.FusionErrorCodeOpcodeMessage)" symbol="CLR_PRIVATEBINDING_FUSIONERRORCODE_OPCODE" value="71"> </opcode>
+ </opcodes>
+ </task>
+
+ <task name="CLRStackPrivate" symbol="CLR_STACKPRIVATE_TASK"
+ value="11" eventGUID="{d3363dc0-243a-4620-a4d0-8a07d772f533}"
+ message="$(string.PrivatePublisher.StackTaskMessage)">
+ <opcodes>
+ <opcode name="CLRStackWalk" message="$(string.PrivatePublisher.CLRStackWalkOpcodeMessage)" symbol="CLR_PRIVATESTACK_STACKWALK_OPCODE" value="82"> </opcode>
+ </opcodes>
+ </task>
+
+ <task name="EvidenceGeneratedTask" symbol="CLR_EVIDENCE_GENERATED_TASK"
+ value="12" eventGUID="{24333617-5ae4-4f9e-a5c5-5ede1bc59207}"
+ message="$(string.PrivatePublisher.EvidenceGeneratedTaskMessage)">
+ <opcodes>
+ <opcode name="EvidenceGenerated" message="$(string.PrivatePublisher.EvidenceGeneratedMessage)" symbol="CLR_EVIDENCEGENERATED_OPCODE" value="10"/>
+ </opcodes>
+ </task>
+
+ <task name="CLRNgenBinder" symbol="CLR_NGEN_BINDER_TASK"
+ value="13" eventGUID="{861f5339-19d6-4873-b350-7b03228bda7c}"
+ message="$(string.PrivatePublisher.NgenBinderTaskMessage)">
+ <opcodes>
+ <opcode name="NgenBind" message="$(string.PrivatePublisher.NgenBindOpcodeMessage)" symbol="CLR_NGEN_BINDER_OPCODE" value="69"></opcode>
+ </opcodes>
+ </task>
+
+ <task name="TransparencyComputation" symbol="CLR_TRANSPARENCY_COMPUTATION_TASK"
+ value="14" eventGUID="{e2444377-ddf9-4589-a885-08d6092521df}"
+ message="$(string.PrivatePublisher.TransparencyComputationMessage)">
+ <opcodes>
+ <opcode name="ModuleTransparencyComputationStart" message="$(string.PrivatePublisher.ModuleTransparencyComputationStartMessage)" symbol="CLR_MODULE_TRANSPARENCY_COMPUTATION_START_OPCODE" value="83"/>
+ <opcode name="ModuleTransparencyComputationEnd" message="$(string.PrivatePublisher.ModuleTransparencyComputationEndMessage)" symbol="CLR_MODULE_TRANSPARENCY_COMPUTATION_END_OPCODE" value="84"/>
+ <opcode name="TypeTransparencyComputationStart" message="$(string.PrivatePublisher.TypeTransparencyComputationStartMessage)" symbol="CLR_TYPE_TRANSPARENCY_COMPUTATION_START_OPCODE" value="85"/>
+ <opcode name="TypeTransparencyComputationEnd" message="$(string.PrivatePublisher.TypeTransparencyComputationEndMessage)" symbol="CLR_TYPE_TRANSPARENCY_COMPUTATION_END_OPCODE" value="86"/>
+ <opcode name="MethodTransparencyComputationStart" message="$(string.PrivatePublisher.MethodTransparencyComputationStartMessage)" symbol="CLR_METHOD_TRANSPARENCY_COMPUTATION_START_OPCODE" value="87"/>
+ <opcode name="MethodTransparencyComputationEnd" message="$(string.PrivatePublisher.MethodTransparencyComputationEndMessage)" symbol="CLR_METHOD_TRANSPARENCY_COMPUTATION_END_OPCODE" value="88"/>
+ <opcode name="FieldTransparencyComputationStart" message="$(string.PrivatePublisher.FieldTransparencyComputationStartMessage)" symbol="CLR_FIELD_TRANSPARENCY_COMPUTATION_START_OPCODE" value="89"/>
+ <opcode name="FieldTransparencyComputationEnd" message="$(string.PrivatePublisher.FieldTransparencyComputationEndMessage)" symbol="CLR_FIELD_TRANSPARENCY_COMPUTATION_END_OPCODE" value="90"/>
+ <opcode name="TokenTransparencyComputationStart" message="$(string.PrivatePublisher.TokenTransparencyComputationStartMessage)" symbol="CLR_TOKEN_TRANSPARENCY_COMPUTATION_START_OPCODE" value="91"/>
+ <opcode name="TokenTransparencyComputationEnd" message="$(string.PrivatePublisher.TokenTransparencyComputationEndMessage)" symbol="CLR_TOKEN_TRANSPARENCY_COMPUTATION_END_OPCODE" value="92"/>
+ </opcodes>
+ </task>
+
+ <task name="LoaderHeapAllocation" symbol="CLR_LOADERHEAPALLOCATIONPRIVATE_TASK"
+ value="16" eventGUID="{87f1e966-d604-41ba-b1ab-183849dff29d}"
+ message="$(string.PrivatePublisher.LoaderHeapAllocationPrivateTaskMessage)">
+ <opcodes>
+ <opcode name="AllocRequest" message="$(string.PrivatePublisher.LoaderHeapPrivateAllocRequestMessage)" symbol="CLR_LOADERHEAP_ALLOCREQUEST_OPCODE" value="97"/>
+ </opcodes>
+ </task>
+
+ <task name="CLRMulticoreJit" symbol="CLR_MULTICOREJIT_TASK"
+ value="17" eventGUID="{B85AD9E5-658B-4215-8DDB-834040F4BC10}"
+ message="$(string.PrivatePublisher.MulticoreJitTaskMessage)">
+ <opcodes>
+ <opcode name="Common" message="$(string.PrivatePublisher.MulticoreJitOpcodeMessage)" symbol="CLR_MULTICOREJIT_COMMON_OPCODE" value="10"> </opcode>
+ <opcode name="MethodCodeReturned" message="$(string.PrivatePublisher.MulticoreJitOpcodeMethodCodeReturnedMessage)" symbol="CLR_MULTICOREJIT_METHODCODERETURNED_OPCODE" value="11"> </opcode>
+ </opcodes>
+ </task>
+
+ <task name="CLRPerfTrackPrivate" symbol="CLR_PERFTRACK_PRIVATE_TASK"
+ value="20" eventGUID="{EAC685F6-2104-4dec-88FD-91E4254221EC}"
+ message="$(string.PrivatePublisher.PerfTrackTaskMessage)">
+ <opcodes>
+ <opcode name="ModuleRangeLoadPrivate" message="$(string.PrivatePublisher.ModuleRangeLoadOpcodeMessage)" symbol="CLR_PERFTRACK_PRIVATE_MODULE_RANGE_LOAD_OPCODE" value="10"> </opcode>
+ </opcodes>
+ </task>
+
+ <!-- NOTE: These are not used anymore. They are kept around for backcompat with traces that might have already contained these -->
+ <task name="DynamicTypeUsage" symbol="CLR_DYNAMICTYPEUSAGE_TASK"
+ value="22" eventGUID="{4F67E18D-EEDD-4056-B8CE-DD822FE54553}"
+ message="$(string.PrivatePublisher.DynamicTypeUsageTaskMessage)">
+ <opcodes>
+ <opcode name="IInspectableRuntimeClassName" message="$(string.PrivatePublisher.IInspectableRuntimeClassNameOpcodeMessage)" symbol="CLR_DYNAMICTYPEUSAGE_IINSPECTABLERUNTIMECLASSNAME_OPCODE" value="11"> </opcode>
+ <opcode name="WinRTUnbox" message="$(string.PrivatePublisher.WinRTUnboxOpcodeMessage)" symbol="CLR_DYNAMICTYPEUSAGE_WINRTUNBOX_OPCODE" value="12"> </opcode>
+ <opcode name="CreateRCW" message="$(string.PrivatePublisher.CreateRCWOpcodeMessage)" symbol="CLR_DYNAMICTYPEUSAGE_CREATERCW_OPCODE" value="13"> </opcode>
+ <opcode name="RCWVariance" message="$(string.PrivatePublisher.RCWVarianceOpcodeMessage)" symbol="CLR_DYNAMICTYPEUSAGE_RCWVARIANCE_OPCODE" value="14"> </opcode>
+ <opcode name="RCWIEnumerableCasting" message="$(string.PrivatePublisher.RCWIEnumerableCastingOpcodeMessage)" symbol="CLR_DYNAMICTYPEUSAGE_RCWIENUMERABLECASTING_OPCODE" value="15"> </opcode>
+ <opcode name="CreateCCW" message="$(string.PrivatePublisher.CreateCCWOpcodeMessage)" symbol="CLR_DYNAMICTYPEUSAGE_CREATECCW_OPCODE" value="16"> </opcode>
+ <opcode name="CCWVariance" message="$(string.PrivatePublisher.CCWVarianceOpcodeMessage)" symbol="CLR_DYNAMICTYPEUSAGE_CCWVARIANCE_OPCODE" value="17"> </opcode>
+ <opcode name="ObjectVariantMarshallingToNative" message="$(string.PrivatePublisher.ObjectVariantMarshallingToNativeOpcodeMessage)" symbol="CLR_DYNAMICTYPEUSAGE_OBJECTVARIANTMARSHALLINGTONATIVE_OPCODE" value="18"> </opcode>
+ <opcode name="GetTypeFromGUID" message="$(string.PrivatePublisher.GetTypeFromGUIDOpcodeMessage)" symbol="CLR_DYNAMICTYPEUSAGE_GETTYPEFROMGUID_OPCODE" value="19"> </opcode>
+ <opcode name="GetTypeFromProgID" message="$(string.PrivatePublisher.GetTypeFromProgIDOpcodeMessage)" symbol="CLR_DYNAMICTYPEUSAGE_GETTYPEFROMPROGID_OPCODE" value="20"> </opcode>
+ <opcode name="ConvertToCallbackEtw" message="$(string.PrivatePublisher.ConvertToCallbackEtwOpcodeMessage)" symbol="CLR_DYNAMICTYPEUSAGE_CONVERTTOCALLBACKETW_OPCODE" value="21"> </opcode>
+ <opcode name="BeginCreateManagedReference" message="$(string.PrivatePublisher.BeginCreateManagedReferenceOpcodeMessage)" symbol="CLR_DYNAMICTYPEUSAGE_BEGINCREATEMANAGEDREFERENCE_OPCODE" value="22"> </opcode>
+ <opcode name="EndCreateManagedReference" message="$(string.PrivatePublisher.EndCreateManagedReferenceOpcodeMessage)" symbol="CLR_DYNAMICTYPEUSAGE_ENDCREATEMANAGEDREFERENCE_OPCODE" value="23"> </opcode>
+ <opcode name="ObjectVariantMarshallingToManaged" message="$(string.PrivatePublisher.ObjectVariantMarshallingToManagedOpcodeMessage)" symbol="CLR_DYNAMICTYPEUSAGE_OBJECTVARIANTMARSHALLINGTOMANAGED_OPCODE" value="24"> </opcode>
+ </opcodes>
+ </task>
+ </tasks>
+
+ <maps>
+ <valueMap name="ModuleRangeSectionTypeMap">
+ <map value="0x1" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.ModuleSection)"/>
+ <map value="0x2" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.EETableSection)"/>
+ <map value="0x3" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.WriteDataSection)"/>
+ <map value="0x4" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.WriteableDataSection)"/>
+ <map value="0x5" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.DataSection)"/>
+ <map value="0x6" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.RVAStaticsSection)"/>
+ <map value="0x7" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.EEDataSection)"/>
+ <map value="0x8" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.DelayLoadInfoTableEagerSection)"/>
+ <map value="0x9" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.DelayLoadInfoTableSection)"/>
+ <map value="0xA" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.EEReadonlyData)"/>
+ <map value="0xB" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.ReadonlyData)"/>
+ <map value="0xC" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.ClassSection)"/>
+ <map value="0xD" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.CrossDomainInfoSection)"/>
+ <map value="0xE" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.MethodDescSection)"/>
+ <map value="0xF" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.MethodDescWriteableSection)"/>
+ <map value="0x10" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.ExceptionSection)"/>
+ <map value="0x11" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.InstrumentSection)"/>
+ <map value="0x12" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.VirtualImportThunkSection)"/>
+ <map value="0x13" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.ExternalMethodThunkSection)"/>
+ <map value="0x14" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.HelperTableSection)"/>
+ <map value="0x15" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.MethodPrecodeWriteableSection)"/>
+ <map value="0x16" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.MethodPrecodeWriteSection)"/>
+ <map value="0x17" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.MethodPrecodeSection)"/>
+ <map value="0x18" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.Win32ResourcesSection)"/>
+ <map value="0x19" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.HeaderSection)"/>
+ <map value="0x1A" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.MetadataSection)"/>
+ <map value="0x1B" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.DelayLoadInfoSection)"/>
+ <map value="0x1C" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.ImportTableSection)"/>
+ <map value="0x1D" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.CodeSection)"/>
+ <map value="0x1E" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.CodeHeaderSection)"/>
+ <map value="0x1F" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.CodeManagerSection)"/>
+ <map value="0x20" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.UnwindDataSection)"/>
+ <map value="0x21" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.RuntimeFunctionSection)"/>
+ <map value="0x22" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.StubsSection)"/>
+ <map value="0x23" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.StubDispatchDataSection)"/>
+ <map value="0x24" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.ExternalMethodDataSection)"/>
+ <map value="0x25" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.DelayLoadInfoDelayListSection)"/>
+ <map value="0x26" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.ReadonlySharedSection)"/>
+ <map value="0x27" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.ReadonlySection)"/>
+ <map value="0x28" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.ILSection)"/>
+ <map value="0x29" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.GCInfoSection)"/>
+ <map value="0x2A" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.ILMetadataSection)"/>
+ <map value="0x2B" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.ResourcesSection)"/>
+ <map value="0x2C" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.CompressedMapsSection)"/>
+ <map value="0x2D" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.DebugSection)"/>
+ <map value="0x2E" message="$(string.PrivatePublisher.ModuleRangeSectionTypeMap.BaseRelocsSection)"/>
+ </valueMap>
+
+ <valueMap name="GCHandleKindMap">
+ <map value="0x0" message="$(string.PrivatePublisher.GCHandleKind.WeakShortMessage)"/>
+ <map value="0x1" message="$(string.PrivatePublisher.GCHandleKind.WeakLongMessage)"/>
+ <map value="0x2" message="$(string.PrivatePublisher.GCHandleKind.StrongMessage)"/>
+ <map value="0x3" message="$(string.PrivatePublisher.GCHandleKind.PinnedMessage)"/>
+ <map value="0x4" message="$(string.PrivatePublisher.GCHandleKind.VariableMessage)"/>
+ <map value="0x5" message="$(string.PrivatePublisher.GCHandleKind.RefCountedMessage)"/>
+ <map value="0x6" message="$(string.PrivatePublisher.GCHandleKind.DependentMessage)"/>
+ <map value="0x7" message="$(string.PrivatePublisher.GCHandleKind.AsyncPinnedMessage)"/>
+ <map value="0x8" message="$(string.PrivatePublisher.GCHandleKind.SizedRefMessage)"/>
+ </valueMap>
+
+ <bitMap name="ModuleRangeIBCTypeMap">
+ <map value="0x1" message="$(string.PrivatePublisher.ModuleRangeIBCTypeMap.IBCUnprofiledSectionMessage)"/>
+ <map value="0x2" message="$(string.PrivatePublisher.ModuleRangeIBCTypeMap.IBCProfiledSectionMessage)"/>
+ </bitMap>
+ <bitMap name="ModuleRangeTypeMap">
+ <map value="0x1" message="$(string.PrivatePublisher.ModuleRangeTypeMap.HotRangeMessage)"/>
+ <map value="0x2" message="$(string.PrivatePublisher.ModuleRangeTypeMap.WarmRangeMessage)"/>
+ <map value="0x4" message="$(string.PrivatePublisher.ModuleRangeTypeMap.ColdRangeMessage)"/>
+ <map value="0x8" message="$(string.PrivatePublisher.ModuleRangeTypeMap.HotColdRangeMessage)"/>
+ </bitMap>
+ </maps>
+
+ <!--Templates-->
+ <templates>
+ <!--Private Templates-->
+ <template tid="ClrStackWalk">
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="Reserved1" inType="win:UInt8" />
+ <data name="Reserved2" inType="win:UInt8" />
+ <data name="FrameCount" inType="win:UInt32" />
+ <data name="Stack" count="2" inType="win:Pointer" />
+ </template>
+
+ <template tid="EvidenceGenerated">
+ <data name="Type" inType="win:UInt32" outType="xs:unsignedInt"/>
+ <data name="AppDomain" inType="win:UInt32" outType="xs:unsignedInt"/>
+ <data name="ILImage" inType="win:UnicodeString"/>
+ <data name="ClrInstanceID" inType="win:UInt16"/>
+
+ <UserData>
+ <EvidenceGenerated xmlns="myNs">
+ <Type> %1 </Type>
+ <AppDomain> %2 </AppDomain>
+ <ILImage> %3 </ILImage>
+ </EvidenceGenerated>
+ </UserData>
+ </template>
+
+ <template tid="GCDecision">
+ <data name="DoCompact" inType="win:Boolean" />
+
+ <UserData>
+ <GCDecision xmlns="myNs">
+ <DoCompact> %1 </DoCompact>
+ </GCDecision>
+ </UserData>
+ </template>
+
+ <template tid="GCDecision_V1">
+ <data name="DoCompact" inType="win:Boolean" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <GCDecision_V1 xmlns="myNs">
+ <DoCompact> %1 </DoCompact>
+ <ClrInstanceID> %2 </ClrInstanceID>
+ </GCDecision_V1>
+ </UserData>
+ </template>
+
+ <template tid="PrvGCMark">
+ <data name="HeapNum" inType="win:UInt32" />
+
+ <UserData>
+ <PrvGCMark xmlns="myNs">
+ <HeapNum> %1 </HeapNum>
+ </PrvGCMark>
+ </UserData>
+ </template>
+
+ <template tid="PrvGCMark_V1">
+ <data name="HeapNum" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <PrvGCMark_V1 xmlns="myNs">
+ <HeapNum> %1 </HeapNum>
+ <ClrInstanceID> %2 </ClrInstanceID>
+ </PrvGCMark_V1>
+ </UserData>
+ </template>
+
+ <template tid="GCPerHeapHistory">
+ </template>
+
+ <template tid="GCPerHeapHistory_V1">
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <GCPerHeapHistory_V1 xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ </GCPerHeapHistory_V1>
+ </UserData>
+ </template>
+
+ <template tid="GCGlobalHeap">
+ <data name="FinalYoungestDesired" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="NumHeaps" inType="win:Int32" />
+ <data name="CondemnedGeneration" inType="win:UInt32" />
+ <data name="Gen0ReductionCount" inType="win:UInt32" />
+ <data name="Reason" inType="win:UInt32" />
+ <data name="GlobalMechanisms" inType="win:UInt32" />
+
+ <UserData>
+ <GCGlobalHeap xmlns="myNs">
+ <FinalYoungestDesired> %1 </FinalYoungestDesired>
+ <NumHeaps> %2 </NumHeaps>
+ <CondemnedGeneration> %3 </CondemnedGeneration>
+ <Gen0ReductionCount> %4 </Gen0ReductionCount>
+ <Reason> %5 </Reason>
+ <GlobalMechanisms> %6 </GlobalMechanisms>
+ </GCGlobalHeap>
+ </UserData>
+ </template>
+
+ <template tid="GCGlobalHeap_V1">
+ <data name="FinalYoungestDesired" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="NumHeaps" inType="win:Int32" />
+ <data name="CondemnedGeneration" inType="win:UInt32" />
+ <data name="Gen0ReductionCount" inType="win:UInt32" />
+ <data name="Reason" inType="win:UInt32" />
+ <data name="GlobalMechanisms" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <GCGlobalHeap_V1 xmlns="myNs">
+ <FinalYoungestDesired> %1 </FinalYoungestDesired>
+ <NumHeaps> %2 </NumHeaps>
+ <CondemnedGeneration> %3 </CondemnedGeneration>
+ <Gen0ReductionCount> %4 </Gen0ReductionCount>
+ <Reason> %5 </Reason>
+ <GlobalMechanisms> %6 </GlobalMechanisms>
+ <ClrInstanceID> %7 </ClrInstanceID>
+ </GCGlobalHeap_V1>
+ </UserData>
+ </template>
+
+ <template tid="GCJoin">
+ <data name="Heap" inType="win:UInt32" />
+ <data name="JoinTime" inType="win:UInt32" />
+ <data name="JoinType" inType="win:UInt32" />
+
+ <UserData>
+ <GCJoin xmlns="myNs">
+ <Heap> %1 </Heap>
+ <JoinTime> %2 </JoinTime>
+ <JoinType> %3 </JoinType>
+ </GCJoin>
+ </UserData>
+ </template>
+
+ <template tid="GCJoin_V1">
+ <data name="Heap" inType="win:UInt32" />
+ <data name="JoinTime" inType="win:UInt32" />
+ <data name="JoinType" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <GCJoin_V1 xmlns="myNs">
+ <Heap> %1 </Heap>
+ <JoinTime> %2 </JoinTime>
+ <JoinType> %3 </JoinType>
+ <ClrInstanceID> %4 </ClrInstanceID>
+ </GCJoin_V1>
+ </UserData>
+ </template>
+
+ <template tid="GCOptimized">
+ <data name="DesiredAllocation" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="NewAllocation" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="GenerationNumber" inType="win:UInt32" />
+
+ <UserData>
+ <GCOptimized xmlns="myNs">
+ <DesiredAllocation> %1 </DesiredAllocation>
+ <NewAllocation> %2 </NewAllocation>
+ <GenerationNumber> %3 </GenerationNumber>
+ </GCOptimized>
+ </UserData>
+ </template>
+
+ <template tid="GCOptimized_V1">
+ <data name="DesiredAllocation" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="NewAllocation" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="GenerationNumber" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <GCOptimized_V1 xmlns="myNs">
+ <DesiredAllocation> %1 </DesiredAllocation>
+ <NewAllocation> %2 </NewAllocation>
+ <GenerationNumber> %3 </GenerationNumber>
+ <ClrInstanceID> %4 </ClrInstanceID>
+ </GCOptimized_V1>
+ </UserData>
+ </template>
+
+ <template tid="GCSettings">
+ <data name="SegmentSize" inType="win:UInt64" />
+ <data name="LargeObjectSegmentSize" inType="win:UInt64" />
+ <data name="ServerGC" inType="win:Boolean" />
+
+ <UserData>
+ <GCSettings xmlns="myNs">
+ <SegmentSize> %1 </SegmentSize>
+ <LargeObjectSegmentSize> %2 </LargeObjectSegmentSize>
+ <ServerGC> %3 </ServerGC>
+ </GCSettings>
+ </UserData>
+ </template>
+
+ <template tid="GCSettings_V1">
+ <data name="SegmentSize" inType="win:UInt64" />
+ <data name="LargeObjectSegmentSize" inType="win:UInt64" />
+ <data name="ServerGC" inType="win:Boolean" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <GCSettings_V1 xmlns="myNs">
+ <SegmentSize> %1 </SegmentSize>
+ <LargeObjectSegmentSize> %2 </LargeObjectSegmentSize>
+ <ServerGC> %3 </ServerGC>
+ <ClrInstanceID> %4 </ClrInstanceID>
+ </GCSettings_V1>
+ </UserData>
+ </template>
+
+ <template tid="BGCDrainMark">
+ <data name="Objects" inType="win:UInt64" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <BGCDrainMark xmlns="myNs">
+ <Objects> %1 </Objects>
+ <ClrInstanceID> %2 </ClrInstanceID>
+ </BGCDrainMark>
+ </UserData>
+ </template>
+
+ <template tid="BGCRevisit">
+ <data name="Pages" inType="win:UInt64" />
+ <data name="Objects" inType="win:UInt64" />
+ <data name="IsLarge" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <BGCRevisit xmlns="myNs">
+ <Pages> %1 </Pages>
+ <Objects> %2 </Objects>
+ <IsLarge> %3 </IsLarge>
+ <ClrInstanceID> %4 </ClrInstanceID>
+ </BGCRevisit>
+ </UserData>
+ </template>
+
+ <template tid="BGCOverflow">
+ <data name="Min" inType="win:UInt64" />
+ <data name="Max" inType="win:UInt64" />
+ <data name="Objects" inType="win:UInt64" />
+ <data name="IsLarge" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <BGCOverflow xmlns="myNs">
+ <Min> %1 </Min>
+ <Max> %2 </Max>
+ <Objects> %3 </Objects>
+ <IsLarge> %4 </IsLarge>
+ <ClrInstanceID> %5 </ClrInstanceID>
+ </BGCOverflow>
+ </UserData>
+ </template>
+
+ <template tid="BGCAllocWait">
+ <data name="Reason" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <BGCAllocWait xmlns="myNs">
+ <Reason> %1 </Reason>
+ <ClrInstanceID> %2 </ClrInstanceID>
+ </BGCAllocWait>
+ </UserData>
+ </template>
+
+ <template tid="GCFullNotify">
+ <data name="GenNumber" inType="win:UInt32" />
+ <data name="IsAlloc" inType="win:UInt32" />
+
+ <UserData>
+ <GCFullNotify xmlns="myNs">
+ <GenNumber> %1 </GenNumber>
+ <IsAlloc> %2 </IsAlloc>
+ </GCFullNotify>
+ </UserData>
+ </template>
+
+
+ <template tid="GCFullNotify_V1">
+ <data name="GenNumber" inType="win:UInt32" />
+ <data name="IsAlloc" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <GCFullNotify_V1 xmlns="myNs">
+ <GenNumber> %1 </GenNumber>
+ <IsAlloc> %2 </IsAlloc>
+ <ClrInstanceID> %3 </ClrInstanceID>
+ </GCFullNotify_V1>
+ </UserData>
+ </template>
+
+ <template tid="GCNoUserData">
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <GCNoUserData xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ </GCNoUserData>
+ </UserData>
+ </template>
+
+ <template tid="Startup">
+ <UserData>
+ <Startup xmlns="myNs">
+ </Startup>
+ </UserData>
+ </template>
+
+ <template tid="Startup_V1">
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <Startup_V1 xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ </Startup_V1>
+ </UserData>
+ </template>
+
+ <template tid="FusionMessage">
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="Prepend" inType="win:Boolean" />
+ <data name="Message" inType="win:UnicodeString"/>
+ <UserData>
+ <FusionMessage xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <Prepend> %2 </Prepend>
+ <Message> %3 </Message>
+ </FusionMessage>
+ </UserData>
+ </template>
+
+ <template tid="FusionErrorCode">
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="Category" inType="win:UInt32" />
+ <data name="ErrorCode" inType="win:UInt32" />
+ <UserData>
+ <FusionErrorCode xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <Category> %2 </Category>
+ <ErrorCode> %3 </ErrorCode>
+ </FusionErrorCode>
+ </UserData>
+ </template>
+
+ <template tid="Binding">
+ <data name="AppDomainID" inType="win:UInt32" />
+ <data name="LoadContextID" inType="win:UInt32" />
+ <data name="FromLoaderCache" inType="win:UInt32" />
+ <data name="DynamicLoad" inType="win:UInt32" />
+ <data name="AssemblyCodebase" inType="win:UnicodeString" />
+ <data name="AssemblyName" inType="win:UnicodeString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <Binding xmlns="myNs">
+ <AppDomainID> %1 </AppDomainID>
+ <LoadContextID> %2 </LoadContextID>
+ <FromLoaderCache> %3 </FromLoaderCache>
+ <DynamicLoad> %4 </DynamicLoad>
+ <AssemblyCodebase> %5 </AssemblyCodebase>
+ <AssemblyName> %6 </AssemblyName>
+ <ClrInstanceID> %7 </ClrInstanceID>
+ </Binding>
+ </UserData>
+ </template>
+
+ <template tid="NgenBindEvent">
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="BindingID" inType="win:UInt64" />
+ <data name="ReasonCode" inType="win:UInt32" />
+ <data name="AssemblyName" inType="win:UnicodeString" />
+
+ <UserData>
+ <NgenBindEvent xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <BindingID> %2 </BindingID>
+ <ReasonCode> %3 </ReasonCode>
+ <AssemblyName> %4 </AssemblyName>
+ </NgenBindEvent>
+ </UserData>
+ </template>
+
+ <template tid="ModuleTransparencyCalculation">
+ <data name="Module" inType="win:UnicodeString" />
+ <data name="AppDomainID" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <ModuleTransparencyCalculation xmlns="myNs">
+ <Module> %1 </Module>
+ <AppDomainID> %2 </AppDomainID>
+ <ClrInstanceID> %3 </ClrInstanceID>
+ </ModuleTransparencyCalculation>
+ </UserData>
+ </template>
+
+ <template tid="TypeTransparencyCalculation">
+ <data name="Type" inType="win:UnicodeString" />
+ <data name="Module" inType="win:UnicodeString" />
+ <data name="AppDomainID" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <TypeTransparencyCalculation xmlns="myNs">
+ <Type> %1 </Type>
+ <Module> %2 </Module>
+ <AppDomainID> %3 </AppDomainID>
+ <ClrInstanceID> %4 </ClrInstanceID>
+ </TypeTransparencyCalculation>
+ </UserData>
+ </template>
+
+ <template tid="MethodTransparencyCalculation">
+ <data name="Method" inType="win:UnicodeString" />
+ <data name="Module" inType="win:UnicodeString" />
+ <data name="AppDomainID" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <MethodTransparencyCalculation xmlns="myNs">
+ <Method> %1 </Method>
+ <Module> %2 </Module>
+ <AppDomainID> %3 </AppDomainID>
+ <ClrInstanceID> %4 </ClrInstanceID>
+ </MethodTransparencyCalculation>
+ </UserData>
+ </template>
+
+ <template tid="FieldTransparencyCalculation">
+ <data name="Field" inType="win:UnicodeString" />
+ <data name="Module" inType="win:UnicodeString" />
+ <data name="AppDomainID" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <FieldTransparencyCalculation xmlns="myNs">
+ <Field> %1 </Field>
+ <Module> %2 </Module>
+ <AppDomainID> %3 </AppDomainID>
+ <ClrInstanceID> %4 </ClrInstanceID>
+ </FieldTransparencyCalculation>
+ </UserData>
+ </template>
+
+ <template tid="TokenTransparencyCalculation">
+ <data name="Token" inType="win:UInt32" />
+ <data name="Module" inType="win:UnicodeString" />
+ <data name="AppDomainID" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <TokenTransparencyCalculation xmlns="myNs">
+ <Token> %1 </Token>
+ <Module> %2 </Module>
+ <AppDomainID> %3 </AppDomainID>
+ <ClrInstanceID> %4 </ClrInstanceID>
+ </TokenTransparencyCalculation>
+ </UserData>
+ </template>
+
+ <template tid="ModuleTransparencyCalculationResult">
+ <data name="Module" inType="win:UnicodeString" />
+ <data name="AppDomainID" inType="win:UInt32" />
+ <data name="IsAllCritical" inType="win:Boolean" />
+ <data name="IsAllTransparent" inType="win:Boolean" />
+ <data name="IsTreatAsSafe" inType="win:Boolean" />
+ <data name="IsOpportunisticallyCritical" inType="win:Boolean" />
+ <data name="SecurityRuleSet" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <ModuleTransparencyCalculationResult xmlns="myNs">
+ <Module> %1 </Module>
+ <AppDomainID> %2 </AppDomainID>
+ <IsAllCritical> %3 </IsAllCritical>
+ <IsAllTransparent> %4 </IsAllTransparent>
+ <IsTreatAsSafe> %5 </IsTreatAsSafe>
+ <IsOpportunisticallyCritical> %6 </IsOpportunisticallyCritical>
+ <SecurityRuleSet> %7 </SecurityRuleSet>
+ <ClrInstanceID> %8 </ClrInstanceID>
+ </ModuleTransparencyCalculationResult>
+ </UserData>
+ </template>
+
+ <template tid="TypeTransparencyCalculationResult">
+ <data name="Type" inType="win:UnicodeString" />
+ <data name="Module" inType="win:UnicodeString" />
+ <data name="AppDomainID" inType="win:UInt32" />
+ <data name="IsAllCritical" inType="win:Boolean" />
+ <data name="IsAllTransparent" inType="win:Boolean" />
+ <data name="IsCritical" inType="win:Boolean" />
+ <data name="IsTreatAsSafe" inType="win:Boolean" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <TypeTransparencyCalculationResult xmlns="myNs">
+ <Type> %1 </Type>
+ <Module> %2 </Module>
+ <AppDomainID> %3</AppDomainID>
+ <IsAllCritical> %4 </IsAllCritical>
+ <IsAllTransparent> %5 </IsAllTransparent>
+ <IsCritical> %6 </IsCritical>
+ <IsTreatAsSafe> %7 </IsTreatAsSafe>
+ <ClrInstanceID> %8 </ClrInstanceID>
+ </TypeTransparencyCalculationResult>
+ </UserData>
+ </template>
+
+ <template tid="MethodTransparencyCalculationResult">
+ <data name="Method" inType="win:UnicodeString" />
+ <data name="Module" inType="win:UnicodeString" />
+ <data name="AppDomainID" inType="win:UInt32" />
+ <data name="IsCritical" inType="win:Boolean" />
+ <data name="IsTreatAsSafe" inType="win:Boolean" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <MethodTransparencyCalculationResult xmlns="myNs">
+ <Method> %1 </Method>
+ <Module> %2 </Module>
+ <AppDomainID> %3 </AppDomainID>
+ <IsCritical> %4 </IsCritical>
+ <IsTreatAsSafe> %5 </IsTreatAsSafe>
+ <ClrInstanceID> %6 </ClrInstanceID>
+ </MethodTransparencyCalculationResult>
+ </UserData>
+ </template>
+
+ <template tid="FieldTransparencyCalculationResult">
+ <data name="Field" inType="win:UnicodeString" />
+ <data name="Module" inType="win:UnicodeString" />
+ <data name="AppDomainID" inType="win:UInt32" />
+ <data name="IsCritical" inType="win:Boolean" />
+ <data name="IsTreatAsSafe" inType="win:Boolean" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <FieldTransparencyCalculationResult xmlns="myNs">
+ <Field> %1 </Field>
+ <Module> %2 </Module>
+ <AppDomainID> %3 </AppDomainID>
+ <IsCritical> %4 </IsCritical>
+ <IsTreatAsSafe> %5 </IsTreatAsSafe>
+ <ClrInstanceID> %6 </ClrInstanceID>
+ </FieldTransparencyCalculationResult>
+ </UserData>
+ </template>
+
+
+ <template tid="TokenTransparencyCalculationResult">
+ <data name="Token" inType="win:UInt32" />
+ <data name="Module" inType="win:UnicodeString" />
+ <data name="AppDomainID" inType="win:UInt32" />
+ <data name="IsCritical" inType="win:Boolean" />
+ <data name="IsTreatAsSafe" inType="win:Boolean" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+
+ <UserData>
+ <TokenTransparencyCalculationResult xmlns="myNs">
+ <Token> %1 </Token>
+ <Module> %2 </Module>
+ <AppDomainID> %3 </AppDomainID>
+ <IsCritical> %4 </IsCritical>
+ <IsTreatAsSafe> %5 </IsTreatAsSafe>
+ <ClrInstanceID> %6 </ClrInstanceID>
+ </TokenTransparencyCalculationResult>
+ </UserData>
+ </template>
+
+ <template tid="FailFast">
+ <data name="FailFastUserMessage" inType="win:UnicodeString" />
+ <data name="FailedEIP" inType="win:Pointer" />
+ <data name="OSExitCode" inType="win:UInt32" />
+ <data name="ClrExitCode" inType="win:UInt32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <FailFast xmlns="myNs">
+ <FailFastUserMessage> %1 </FailFastUserMessage>
+ <FailedEIP> %2 </FailedEIP>
+ <OSExitCode> %3 </OSExitCode>
+ <ClrExitCode> %4 </ClrExitCode>
+ <ClrInstanceID> %5 </ClrInstanceID>
+ </FailFast>
+ </UserData>
+ </template>
+
+ <template tid="PrvFinalizeObject">
+ <data name="TypeID" inType="win:Pointer" />
+ <data name="ObjectID" inType="win:Pointer" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="TypeName" inType="win:UnicodeString" />
+ </template>
+
+ <template tid="CCWRefCountChange">
+ <data name="HandleID" inType="win:Pointer" />
+ <data name="ObjectID" inType="win:Pointer" />
+ <data name="COMInterfacePointer" inType="win:Pointer" />
+ <data name="NewRefCount" inType="win:UInt32" />
+ <data name="AppDomainID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ClassName" inType="win:AnsiString" />
+ <data name="NameSpace" inType="win:AnsiString" />
+ <data name="Operation" inType="win:UnicodeString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ </template>
+
+ <template tid="PinPlugAtGCTime">
+ <data name="PlugStart" inType="win:Pointer" />
+ <data name="PlugEnd" inType="win:Pointer" />
+ <data name="GapBeforeSize" inType="win:Pointer" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ </template>
+
+ <template tid="PrvDestroyGCHandle">
+ <data name="HandleID" inType="win:Pointer" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ </template>
+
+ <template tid="PrvSetGCHandle">
+ <data name="HandleID" inType="win:Pointer" />
+ <data name="ObjectID" inType="win:Pointer" />
+ <data name="Kind" map="GCHandleKindMap" inType="win:UInt32" />
+ <data name="Generation" inType="win:UInt32" />
+ <data name="AppDomainID" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ </template>
+
+ <template tid="LoaderHeapPrivate">
+ <data name="LoaderHeapPtr" inType="win:Pointer" />
+ <data name="MemoryAddress" inType="win:Pointer" />
+ <data name="RequestSize" inType="win:UInt32" />
+ <!-- we had a weird problem where the EtwCallout callback (which does stack traces)
+ was not being called for only this event. By adding this field which makes
+ the sigature of this event the same as SetGCHandle, we avoid the problem.
+ ideally this gets ripped out at some point -->
+ <data name="Unused1" inType="win:UInt32" />
+ <data name="Unused2" inType="win:UInt64" outType="win:HexInt64" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <LoaderHeapPrivate xmlns="myNs">
+ <LoaderHeapPtr> %1 </LoaderHeapPtr>
+ <MemoryAddress> %2 </MemoryAddress>
+ <RequestSize> %3 </RequestSize>
+ <ClrInstanceID> %4 </ClrInstanceID>
+ </LoaderHeapPrivate>
+ </UserData>
+ </template>
+
+ <template tid="ModuleRangePrivate">
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="ModuleID" inType="win:UInt64" outType="win:HexInt64"/>
+ <data name="RangeBegin" count="1" inType="win:UInt32" outType="win:HexInt32"/>
+ <data name="RangeSize" count="1" inType="win:UInt32" outType="win:HexInt32"/>
+ <data name="RangeType" map="ModuleRangeTypeMap" inType="win:UInt8"/>
+ <data name="IBCType" map="ModuleRangeIBCTypeMap" inType="win:UInt8"/>
+ <data name="SectionType" map="ModuleRangeSectionTypeMap" inType="win:UInt16" />
+ <UserData>
+ <ModuleRange xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <ModuleID> %2 </ModuleID>
+ <RangeBegin> %3 </RangeBegin>
+ <RangeSize> %4 </RangeSize>
+ <RangeType> %5 </RangeType>
+ <IBCType> %6 </IBCType>
+ <SectionType> %7 </SectionType>
+ </ModuleRange>
+ </UserData>
+ </template>
+
+ <template tid="MulticoreJitPrivate">
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="String1" inType="win:UnicodeString" />
+ <data name="String2" inType="win:UnicodeString" />
+ <data name="Int1" inType="win:Int32" />
+ <data name="Int2" inType="win:Int32" />
+ <data name="Int3" inType="win:Int32" />
+ <UserData>
+ <MulticoreJit xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <String1> %2 </String1>
+ <String2> %3 </String2>
+ <Int1> %4 </Int1>
+ <Int2> %5 </Int2>
+ <Int3> %6 </Int3>
+ </MulticoreJit>
+ </UserData>
+ </template>
+
+ <template tid="MulticoreJitMethodCodeReturnedPrivate">
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <data name="ModuleID" inType="win:UInt64" />
+ <data name="MethodID" inType="win:UInt64" />
+ <UserData>
+ <MulticoreJitMethodCodeReturned xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ <ModuleID> %2 </ModuleID>
+ <MethodID> %3 </MethodID>
+ </MulticoreJitMethodCodeReturned>
+ </UserData>
+ </template>
+
+ <template tid="DynamicTypeUsePrivate">
+ <data name="TypeName" inType="win:UnicodeString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <DynamicTypeUse xmlns="myNs">
+ <TypeName> %1 </TypeName>
+ <ClrInstanceID> %2 </ClrInstanceID>
+ </DynamicTypeUse>
+ </UserData>
+ </template>
+
+ <template tid="DynamicTypeUseTwoParametersPrivate">
+ <data name="TypeName" inType="win:UnicodeString" />
+ <data name="SecondTypeName" inType="win:UnicodeString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <DynamicTypeUseTwoParameters xmlns="myNs">
+ <TypeName> %1 </TypeName>
+ <SecondTypeName> %2 </SecondTypeName>
+ <ClrInstanceID> %3 </ClrInstanceID>
+ </DynamicTypeUseTwoParameters>
+ </UserData>
+ </template>
+
+ <template tid="DynamicTypeUsePrivateVariance">
+ <data name="TypeName" inType="win:UnicodeString" />
+ <data name="InterfaceTypeName" inType="win:UnicodeString" />
+ <data name="VariantInterfaceTypeName" inType="win:UnicodeString" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <DynamicTypeVariance xmlns="myNs">
+ <TypeName> %1 </TypeName>
+ <InterfaceTypeName> %2 </InterfaceTypeName>
+ <VariantInterfaceTypeName> %3 </VariantInterfaceTypeName>
+ <ClrInstanceID> %4 </ClrInstanceID>
+ </DynamicTypeVariance>
+ </UserData>
+ </template>
+
+ <template tid="DynamicTypeUseStringAndIntPrivate">
+ <data name="TypeName" inType="win:UnicodeString" />
+ <data name="Int1" inType="win:Int32" />
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <DynamicTypeUseStringAndInt xmlns="myNs">
+ <TypeName> %1 </TypeName>
+ <Int1> %2 </Int1>
+ <ClrInstanceID> %3 </ClrInstanceID>
+ </DynamicTypeUseStringAndInt>
+ </UserData>
+ </template>
+
+ <template tid="DynamicTypeUseNoParametersPrivate">
+ <data name="ClrInstanceID" inType="win:UInt16" />
+ <UserData>
+ <DynamicTypeUseStringAndInt xmlns="myNs">
+ <ClrInstanceID> %1 </ClrInstanceID>
+ </DynamicTypeUseStringAndInt>
+ </UserData>
+ </template>
+
+ </templates>
+
+ <!--Events-->
+ <events>
+ <!--Private GC events, value reserved from 0 to 79-->
+ <event value="1" version="0" level="win:Informational" template="GCDecision"
+ keywords ="GCPrivateKeyword" opcode="GCDecision"
+ task="GarbageCollectionPrivate"
+ symbol="GCDecision" message="$(string.PrivatePublisher.GCDecisionEventMessage)"/>
+
+ <event value="1" version="1" level="win:Informational" template="GCDecision_V1"
+ keywords ="GCPrivateKeyword" opcode="GCDecision"
+ task="GarbageCollectionPrivate"
+ symbol="GCDecision_V1" message="$(string.PrivatePublisher.GCDecision_V1EventMessage)"/>
+
+ <event value="2" version="0" level="win:Informational" template="GCSettings"
+ keywords ="GCPrivateKeyword" opcode="GCSettings"
+ task="GarbageCollectionPrivate"
+ symbol="GCSettings" message="$(string.PrivatePublisher.GCSettingsEventMessage)"/>
+
+ <event value="2" version="1" level="win:Informational" template="GCSettings_V1"
+ keywords ="GCPrivateKeyword" opcode="GCSettings"
+ task="GarbageCollectionPrivate"
+ symbol="GCSettings_V1" message="$(string.PrivatePublisher.GCSettings_V1EventMessage)"/>
+
+ <event value="3" version="0" level="win:Verbose" template="GCOptimized"
+ keywords ="GCPrivateKeyword" opcode="GCOptimized"
+ task="GarbageCollectionPrivate"
+ symbol="GCOptimized" message="$(string.PrivatePublisher.GCOptimizedEventMessage)"/>
+
+ <event value="3" version="1" level="win:Verbose" template="GCOptimized_V1"
+ keywords ="GCPrivateKeyword" opcode="GCOptimized"
+ task="GarbageCollectionPrivate"
+ symbol="GCOptimized_V1" message="$(string.PrivatePublisher.GCOptimized_V1EventMessage)"/>
+
+ <event value="4" version="2" level="win:Informational" template="GCPerHeapHistory"
+ keywords ="GCPrivateKeyword" opcode="GCPerHeapHistory"
+ task="GarbageCollectionPrivate"
+ symbol="GCPerHeapHistory" message="$(string.PrivatePublisher.GCPerHeapHistoryEventMessage)"/>
+
+ <event value="4" version="1" level="win:Informational" template="GCPerHeapHistory_V1"
+ keywords ="GCPrivateKeyword" opcode="GCPerHeapHistory"
+ task="GarbageCollectionPrivate"
+ symbol="GCPerHeapHistory_V1" message="$(string.PrivatePublisher.GCPerHeapHistory_V1EventMessage)"/>
+
+ <event value="5" version="0" level="win:Informational" template="GCGlobalHeap"
+ keywords ="GCPrivateKeyword" opcode="GCGlobalHeapHistory"
+ task="GarbageCollectionPrivate"
+ symbol="GCGlobalHeapHistory" message="$(string.PrivatePublisher.GCGlobalHeapEventMessage)"/>
+
+ <event value="5" version="1" level="win:Informational" template="GCGlobalHeap_V1"
+ keywords ="GCPrivateKeyword" opcode="GCGlobalHeapHistory"
+ task="GarbageCollectionPrivate"
+ symbol="GCGlobalHeapHistory_V1" message="$(string.PrivatePublisher.GCGlobalHeap_V1EventMessage)"/>
+
+ <event value="6" version="0" level="win:Verbose" template="GCJoin"
+ keywords ="GCPrivateKeyword" opcode="GCJoin"
+ task="GarbageCollectionPrivate"
+ symbol="GCJoin" message="$(string.PrivatePublisher.GCJoinEventMessage)"/>
+
+ <event value="6" version="1" level="win:Verbose" template="GCJoin_V1"
+ keywords ="GCPrivateKeyword" opcode="GCJoin"
+ task="GarbageCollectionPrivate"
+ symbol="GCJoin_V1" message="$(string.PrivatePublisher.GCJoin_V1EventMessage)"/>
+
+ <event value="7" version="0" level="win:Informational" template="PrvGCMark"
+ keywords ="GCPrivateKeyword" opcode="PrvGCMarkStackRoots"
+ task="GarbageCollectionPrivate"
+ symbol="PrvGCMarkStackRoots" message="$(string.PrivatePublisher.GCMarkStackRootsEventMessage)"/>
+
+ <event value="7" version="1" level="win:Informational" template="PrvGCMark_V1"
+ keywords ="GCPrivateKeyword" opcode="PrvGCMarkStackRoots"
+ task="GarbageCollectionPrivate"
+ symbol="PrvGCMarkStackRoots_V1" message="$(string.PrivatePublisher.GCMarkStackRoots_V1EventMessage)"/>
+
+ <event value="8" version="0" level="win:Informational" template="PrvGCMark"
+ keywords ="GCPrivateKeyword" opcode="PrvGCMarkFinalizeQueueRoots"
+ task="GarbageCollectionPrivate"
+ symbol="PrvGCMarkFinalizeQueueRoots" message="$(string.PrivatePublisher.GCMarkFinalizeQueueRootsEventMessage)"/>
+
+ <event value="8" version="1" level="win:Informational" template="PrvGCMark_V1"
+ keywords ="GCPrivateKeyword" opcode="PrvGCMarkFinalizeQueueRoots"
+ task="GarbageCollectionPrivate"
+ symbol="PrvGCMarkFinalizeQueueRoots_V1" message="$(string.PrivatePublisher.GCMarkFinalizeQueueRoots_V1EventMessage)"/>
+
+ <event value="9" version="0" level="win:Informational" template="PrvGCMark"
+ keywords ="GCPrivateKeyword" opcode="PrvGCMarkHandles"
+ task="GarbageCollectionPrivate"
+ symbol="PrvGCMarkHandles" message="$(string.PrivatePublisher.GCMarkHandlesEventMessage)"/>
+
+ <event value="9" version="1" level="win:Informational" template="PrvGCMark_V1"
+ keywords ="GCPrivateKeyword" opcode="PrvGCMarkHandles"
+ task="GarbageCollectionPrivate"
+ symbol="PrvGCMarkHandles_V1" message="$(string.PrivatePublisher.GCMarkHandles_V1EventMessage)"/>
+
+ <event value="10" version="0" level="win:Informational" template="PrvGCMark"
+ keywords ="GCPrivateKeyword" opcode="PrvGCMarkCards"
+ task="GarbageCollectionPrivate"
+ symbol="PrvGCMarkCards" message="$(string.PrivatePublisher.GCMarkCardsEventMessage)"/>
+
+ <event value="10" version="1" level="win:Informational" template="PrvGCMark_V1"
+ keywords ="GCPrivateKeyword" opcode="PrvGCMarkCards"
+ task="GarbageCollectionPrivate"
+ symbol="PrvGCMarkCards_V1" message="$(string.PrivatePublisher.GCMarkCards_V1EventMessage)"/>
+
+ <event value="11" version="0" level="win:Informational" template="GCNoUserData"
+ keywords ="GCPrivateKeyword" opcode="BGCBegin"
+ task="GarbageCollectionPrivate"
+ symbol="BGCBegin" message="$(string.PrivatePublisher.BGCBeginEventMessage)"/>
+
+ <event value="12" version="0" level="win:Informational" template="GCNoUserData"
+ keywords ="GCPrivateKeyword" opcode="BGC1stNonConEnd"
+ task="GarbageCollectionPrivate"
+ symbol="BGC1stNonConEnd" message="$(string.PrivatePublisher.BGC1stNonConEndEventMessage)"/>
+
+ <event value="13" version="0" level="win:Informational" template="GCNoUserData"
+ keywords ="GCPrivateKeyword" opcode="BGC1stConEnd"
+ task="GarbageCollectionPrivate"
+ symbol="BGC1stConEnd" message="$(string.PrivatePublisher.BGC1stConEndEventMessage)"/>
+
+ <event value="14" version="0" level="win:Informational" template="GCNoUserData"
+ keywords ="GCPrivateKeyword" opcode="BGC2ndNonConBegin"
+ task="GarbageCollectionPrivate"
+ symbol="BGC2ndNonConBegin" message="$(string.PrivatePublisher.BGC2ndNonConBeginEventMessage)"/>
+
+ <event value="15" version="0" level="win:Informational" template="GCNoUserData"
+ keywords ="GCPrivateKeyword" opcode="BGC2ndNonConEnd"
+ task="GarbageCollectionPrivate"
+ symbol="BGC2ndNonConEnd" message="$(string.PrivatePublisher.BGC2ndNonConEndEventMessage)"/>
+
+ <event value="16" version="0" level="win:Informational" template="GCNoUserData"
+ keywords ="GCPrivateKeyword" opcode="BGC2ndConBegin"
+ task="GarbageCollectionPrivate"
+ symbol="BGC2ndConBegin" message="$(string.PrivatePublisher.BGC2ndConBeginEventMessage)"/>
+
+ <event value="17" version="0" level="win:Informational" template="GCNoUserData"
+ keywords ="GCPrivateKeyword" opcode="BGC2ndConEnd"
+ task="GarbageCollectionPrivate"
+ symbol="BGC2ndConEnd" message="$(string.PrivatePublisher.BGC2ndConEndEventMessage)"/>
+
+ <event value="18" version="0" level="win:Informational" template="GCNoUserData"
+ keywords ="GCPrivateKeyword" opcode="BGCPlanEnd"
+ task="GarbageCollectionPrivate"
+ symbol="BGCPlanEnd" message="$(string.PrivatePublisher.BGCPlanEndEventMessage)"/>
+
+ <event value="19" version="0" level="win:Informational" template="GCNoUserData"
+ keywords ="GCPrivateKeyword" opcode="BGCSweepEnd"
+ task="GarbageCollectionPrivate"
+ symbol="BGCSweepEnd" message="$(string.PrivatePublisher.BGCSweepEndEventMessage)"/>
+
+ <event value="20" version="0" level="win:Informational" template="BGCDrainMark"
+ keywords ="GCPrivateKeyword" opcode="BGCDrainMark"
+ task="GarbageCollectionPrivate"
+ symbol="BGCDrainMark" message="$(string.PrivatePublisher.BGCDrainMarkEventMessage)"/>
+
+ <event value="21" version="0" level="win:Informational" template="BGCRevisit"
+ keywords ="GCPrivateKeyword" opcode="BGCRevisit"
+ task="GarbageCollectionPrivate"
+ symbol="BGCRevisit" message="$(string.PrivatePublisher.BGCRevisitEventMessage)"/>
+
+ <event value="22" version="0" level="win:Informational" template="BGCOverflow"
+ keywords ="GCPrivateKeyword" opcode="BGCOverflow"
+ task="GarbageCollectionPrivate"
+ symbol="BGCOverflow" message="$(string.PrivatePublisher.BGCOverflowEventMessage)"/>
+
+ <event value="23" version="0" level="win:Informational" template="BGCAllocWait"
+ keywords ="GCPrivateKeyword" opcode="BGCAllocWaitBegin"
+ task="GarbageCollectionPrivate"
+ symbol="BGCAllocWaitBegin" message="$(string.PrivatePublisher.BGCAllocWaitEventMessage)"/>
+
+ <event value="24" version="0" level="win:Informational" template="BGCAllocWait"
+ keywords ="GCPrivateKeyword" opcode="BGCAllocWaitEnd"
+ task="GarbageCollectionPrivate"
+ symbol="BGCAllocWaitEnd" message="$(string.PrivatePublisher.BGCAllocWaitEventMessage)"/>
+
+ <event value="25" version="0" level="win:Informational" template="GCFullNotify"
+ keywords ="GCPrivateKeyword" opcode="GCFullNotify"
+ task="GarbageCollectionPrivate"
+ symbol="GCFullNotify" message="$(string.PrivatePublisher.GCFullNotifyEventMessage)"/>
+
+ <event value="25" version="1" level="win:Informational" template="GCFullNotify_V1"
+ keywords ="GCPrivateKeyword" opcode="GCFullNotify"
+ task="GarbageCollectionPrivate"
+ symbol="GCFullNotify_V1" message="$(string.PrivatePublisher.GCFullNotify_V1EventMessage)"/>
+
+ <!--Private events from other components in CLR, starting value 80-->
+ <event value="80" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="EEStartupStart"
+ task="Startup"
+ symbol="EEStartupStart" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="80" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="EEStartupStart"
+ task="Startup"
+ symbol="EEStartupStart_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="81" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="EEStartupEnd"
+ task="Startup"
+ symbol="EEStartupEnd" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="81" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="EEStartupEnd"
+ task="Startup"
+ symbol="EEStartupEnd_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="82" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="EEConfigSetup"
+ task="Startup"
+ symbol="EEConfigSetup" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="82" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="EEConfigSetup"
+ task="Startup"
+ symbol="EEConfigSetup_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="83" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="EEConfigSetupEnd"
+ task="Startup"
+ symbol="EEConfigSetupEnd" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="83" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="EEConfigSetupEnd"
+ task="Startup"
+ symbol="EEConfigSetupEnd_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="84" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="LoadSystemBases"
+ task="Startup"
+ symbol="LdSysBases" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="84" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="LoadSystemBases"
+ task="Startup"
+ symbol="LdSysBases_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="85" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="LoadSystemBasesEnd"
+ task="Startup"
+ symbol="LdSysBasesEnd" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="85" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="LoadSystemBasesEnd"
+ task="Startup"
+ symbol="LdSysBasesEnd_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="86" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="ExecExe"
+ task="Startup"
+ symbol="ExecExe" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="86" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="ExecExe"
+ task="Startup"
+ symbol="ExecExe_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="87" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="ExecExeEnd"
+ task="Startup"
+ symbol="ExecExeEnd" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="87" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="ExecExeEnd"
+ task="Startup"
+ symbol="ExecExeEnd_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="88" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="Main"
+ task="Startup"
+ symbol="Main" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="88" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="Main"
+ task="Startup"
+ symbol="Main_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="89" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="MainEnd"
+ task="Startup"
+ symbol="MainEnd" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="89" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="MainEnd"
+ task="Startup"
+ symbol="MainEnd_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="90" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="ApplyPolicyStart"
+ task="Startup"
+ symbol="ApplyPolicyStart" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="90" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="ApplyPolicyStart"
+ task="Startup"
+ symbol="ApplyPolicyStart_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="91" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="ApplyPolicyEnd"
+ task="Startup"
+ symbol="ApplyPolicyEnd" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="91" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="ApplyPolicyEnd"
+ task="Startup"
+ symbol="ApplyPolicyEnd_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="92" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="LdLibShFolder"
+ task="Startup"
+ symbol="LdLibShFolder" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="92" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="LdLibShFolder"
+ task="Startup"
+ symbol="LdLibShFolder_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="93" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="LdLibShFolderEnd"
+ task="Startup"
+ symbol="LdLibShFolderEnd" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="93" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="LdLibShFolderEnd"
+ task="Startup"
+ symbol="LdLibShFolderEnd_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="94" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="PrestubWorker"
+ task="Startup"
+ symbol="PrestubWorker" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="94" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="PrestubWorker"
+ task="Startup"
+ symbol="PrestubWorker_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="95" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="PrestubWorkerEnd"
+ task="Startup"
+ symbol="PrestubWorkerEnd" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="95" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="PrestubWorkerEnd"
+ task="Startup"
+ symbol="PrestubWorkerEnd_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="96" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="GetInstallationStart"
+ task="Startup"
+ symbol="GetInstallationStart" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="96" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="GetInstallationStart"
+ task="Startup"
+ symbol="GetInstallationStart_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="97" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="GetInstallationEnd"
+ task="Startup"
+ symbol="GetInstallationEnd" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="97" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="GetInstallationEnd"
+ task="Startup"
+ symbol="GetInstallationEnd_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="98" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="OpenHModule"
+ task="Startup"
+ symbol="OpenHModule" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="98" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="OpenHModule"
+ task="Startup"
+ symbol="OpenHModule_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="99" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="OpenHModuleEnd"
+ task="Startup"
+ symbol="OpenHModuleEnd" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="99" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="OpenHModuleEnd"
+ task="Startup"
+ symbol="OpenHModuleEnd_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="100" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="ExplicitBindStart"
+ task="Startup"
+ symbol="ExplicitBindStart" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="100" version="1" level="win:Informational" template="Startup_V1"
+ task="Startup"
+ keywords ="StartupKeyword" opcode="ExplicitBindStart"
+ symbol="ExplicitBindStart_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="101" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="ExplicitBindEnd"
+ task="Startup"
+ symbol="ExplicitBindEnd" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="101" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="ExplicitBindEnd"
+ task="Startup"
+ symbol="ExplicitBindEnd_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="102" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="ParseXml"
+ task="Startup"
+ symbol="ParseXml" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="102" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="ParseXml"
+ task="Startup"
+ symbol="ParseXml_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="103" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="ParseXmlEnd"
+ task="Startup"
+ symbol="ParseXmlEnd" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="103" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="ParseXmlEnd"
+ task="Startup"
+ symbol="ParseXmlEnd_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="104" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="InitDefaultDomain"
+ task="Startup"
+ symbol="InitDefaultDomain" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="104" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="InitDefaultDomain"
+ task="Startup"
+ symbol="InitDefaultDomain_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="105" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="InitDefaultDomainEnd"
+ task="Startup"
+ symbol="InitDefaultDomainEnd" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="105" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="InitDefaultDomainEnd"
+ task="Startup"
+ symbol="InitDefaultDomainEnd_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="106" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="InitSecurity"
+ task="Startup"
+ symbol="InitSecurity" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="106" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="InitSecurity"
+ task="Startup"
+ symbol="InitSecurity_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="107" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="InitSecurityEnd"
+ task="Startup"
+ symbol="InitSecurityEnd" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="107" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="InitSecurityEnd"
+ task="Startup"
+ symbol="InitSecurityEnd_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="108" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="AllowBindingRedirs"
+ task="Startup"
+ symbol="AllowBindingRedirs" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="108" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="AllowBindingRedirs"
+ task="Startup"
+ symbol="AllowBindingRedirs_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="109" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="AllowBindingRedirsEnd"
+ task="Startup"
+ symbol="AllowBindingRedirsEnd" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="109" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="AllowBindingRedirsEnd"
+ task="Startup"
+ symbol="AllowBindingRedirsEnd_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="110" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="EEConfigSync"
+ task="Startup"
+ symbol="EEConfigSync" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="110" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="EEConfigSync"
+ task="Startup"
+ symbol="EEConfigSync_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="111" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="EEConfigSyncEnd"
+ task="Startup"
+ symbol="EEConfigSyncEnd" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="111" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="EEConfigSyncEnd"
+ task="Startup"
+ symbol="EEConfigSyncEnd_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="112" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="FusionBinding"
+ task="Startup"
+ symbol="FusionBinding" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="112" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="FusionBinding"
+ task="Startup"
+ symbol="FusionBinding_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="113" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="FusionBindingEnd"
+ task="Startup"
+ symbol="FusionBindingEnd" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="113" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="FusionBindingEnd"
+ task="Startup"
+ symbol="FusionBindingEnd_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="114" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="LoaderCatchCall"
+ task="Startup"
+ symbol="LoaderCatchCall" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="114" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="LoaderCatchCall"
+ task="Startup"
+ symbol="LoaderCatchCall_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="115" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="LoaderCatchCallEnd"
+ task="Startup"
+ symbol="LoaderCatchCallEnd" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="115" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="LoaderCatchCallEnd"
+ task="Startup"
+ symbol="LoaderCatchCallEnd_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="116" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="FusionInit"
+ task="Startup"
+ symbol="FusionInit" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="116" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="FusionInit"
+ task="Startup"
+ symbol="FusionInit_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="117" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="FusionInitEnd"
+ task="Startup"
+ symbol="FusionInitEnd" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="117" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="FusionInitEnd"
+ task="Startup"
+ symbol="FusionInitEnd_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="118" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="FusionAppCtx"
+ task="Startup"
+ symbol="FusionAppCtx" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="118" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="FusionAppCtx"
+ task="Startup"
+ symbol="FusionAppCtx_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="119" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="FusionAppCtxEnd"
+ task="Startup"
+ symbol="FusionAppCtxEnd" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="119" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="FusionAppCtxEnd"
+ task="Startup"
+ symbol="FusionAppCtxEnd_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="120" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="Fusion2EE"
+ task="Startup"
+ symbol="Fusion2EE" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="120" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="Fusion2EE"
+ task="Startup"
+ symbol="Fusion2EE_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="121" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="Fusion2EEEnd"
+ task="Startup"
+ symbol="Fusion2EEEnd" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="121" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="Fusion2EEEnd"
+ task="Startup"
+ symbol="Fusion2EEEnd_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="122" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="SecurityCatchCall"
+ task="Startup"
+ symbol="SecurityCatchCall" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="122" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="SecurityCatchCall"
+ task="Startup"
+ symbol="SecurityCatchCall_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="123" version="0" level="win:Informational" template="Startup"
+ keywords ="StartupKeyword" opcode="SecurityCatchCallEnd"
+ task="Startup"
+ symbol="SecurityCatchCallEnd" message="$(string.PrivatePublisher.StartupEventMessage)"/>
+
+ <event value="123" version="1" level="win:Informational" template="Startup_V1"
+ keywords ="StartupKeyword" opcode="SecurityCatchCallEnd"
+ task="Startup"
+ symbol="SecurityCatchCallEnd_V1" message="$(string.PrivatePublisher.Startup_V1EventMessage)"/>
+
+ <event value="151" version="0" level="win:LogAlways" template="ClrStackWalk"
+ keywords ="StackKeyword" opcode="CLRStackWalk"
+ task="CLRStackPrivate"
+ symbol="CLRStackWalkPrivate" message="$(string.PrivatePublisher.StackEventMessage)"/>
+
+ <event value="158" version="0" level="win:Informational" template="ModuleRangePrivate"
+ keywords ="PerfTrackPrivateKeyword" opcode="ModuleRangeLoadPrivate"
+ task="CLRPerfTrackPrivate"
+ symbol="ModuleRangeLoadPrivate" message="$(string.PrivatePublisher.ModuleRangeLoadEventMessage)"/>
+
+ <event value="159" version="0" level="win:Informational" template="Binding"
+ keywords ="BindingKeyword" opcode="BindingPolicyPhaseStart"
+ task="Binding"
+ symbol="BindingPolicyPhaseStart" message="$(string.PrivatePublisher.BindingEventMessage)"/>
+
+ <event value="160" version="0" level="win:Informational" template="Binding"
+ keywords ="BindingKeyword" opcode="BindingPolicyPhaseEnd"
+ task="Binding"
+ symbol="BindingPolicyPhaseEnd" message="$(string.PrivatePublisher.BindingEventMessage)"/>
+
+ <event value="161" version="0" level="win:Informational" template="Binding"
+ keywords ="BindingKeyword" opcode="BindingNgenPhaseStart"
+ task="Binding"
+ symbol="BindingNgenPhaseStart" message="$(string.PrivatePublisher.BindingEventMessage)"/>
+
+ <event value="162" version="0" level="win:Informational" template="Binding"
+ keywords ="BindingKeyword" opcode="BindingNgenPhaseEnd"
+ task="Binding"
+ symbol="BindingNgenPhaseEnd" message="$(string.PrivatePublisher.BindingEventMessage)"/>
+
+ <event value="163" version="0" level="win:Informational" template="Binding"
+ keywords ="BindingKeyword" opcode="BindingLookupAndProbingPhaseStart"
+ task="Binding"
+ symbol="BindingLookupAndProbingPhaseStart" message="$(string.PrivatePublisher.BindingEventMessage)"/>
+
+ <event value="164" version="0" level="win:Informational" template="Binding"
+ keywords ="BindingKeyword" opcode="BindingLookupAndProbingPhaseEnd"
+ task="Binding"
+ symbol="BindingLookupAndProbingPhaseEnd" message="$(string.PrivatePublisher.BindingEventMessage)"/>
+
+ <event value="165" version="0" level="win:Informational" template="Binding"
+ keywords ="BindingKeyword" opcode="LoaderPhaseStart"
+ task="Binding"
+ symbol="LoaderPhaseStart" message="$(string.PrivatePublisher.BindingEventMessage)"/>
+
+ <event value="166" version="0" level="win:Informational" template="Binding"
+ keywords ="BindingKeyword" opcode="LoaderPhaseEnd"
+ task="Binding"
+ symbol="LoaderPhaseEnd" message="$(string.PrivatePublisher.BindingEventMessage)"/>
+
+ <event value="167" version="0" level="win:Informational" template="Binding"
+ keywords ="BindingKeyword" opcode="BindingPhaseStart"
+ task="Binding"
+ symbol="BindingPhaseStart" message="$(string.PrivatePublisher.BindingEventMessage)"/>
+
+ <event value="168" version="0" level="win:Informational" template="Binding"
+ keywords ="BindingKeyword" opcode="BindingPhaseEnd"
+ task="Binding"
+ symbol="BindingPhaseEnd" message="$(string.PrivatePublisher.BindingEventMessage)"/>
+
+ <event value="169" version="0" level="win:Informational" template="Binding"
+ keywords ="BindingKeyword" opcode="BindingDownloadPhaseStart"
+ task="Binding"
+ symbol="BindingDownloadPhaseStart" message="$(string.PrivatePublisher.BindingEventMessage)"/>
+
+ <event value="170" version="0" level="win:Informational" template="Binding"
+ keywords ="BindingKeyword" opcode="BindingDownloadPhaseEnd"
+ task="Binding"
+ symbol="BindingDownloadPhaseEnd" message="$(string.PrivatePublisher.BindingEventMessage)"/>
+
+ <event value="171" version="0" level="win:Informational" template="Binding"
+ keywords ="BindingKeyword" opcode="LoaderAssemblyInitPhaseStart"
+ task="Binding"
+ symbol="LoaderAssemblyInitPhaseStart" message="$(string.PrivatePublisher.BindingEventMessage)"/>
+
+ <event value="172" version="0" level="win:Informational" template="Binding"
+ keywords ="BindingKeyword" opcode="LoaderAssemblyInitPhaseEnd"
+ task="Binding"
+ symbol="LoaderAssemblyInitPhaseEnd" message="$(string.PrivatePublisher.BindingEventMessage)"/>
+
+ <event value="173" version="0" level="win:Informational" template="Binding"
+ keywords ="BindingKeyword" opcode="LoaderMappingPhaseStart"
+ task="Binding"
+ symbol="LoaderMappingPhaseStart" message="$(string.PrivatePublisher.BindingEventMessage)"/>
+
+ <event value="174" version="0" level="win:Informational" template="Binding"
+ keywords ="BindingKeyword" opcode="LoaderMappingPhaseEnd"
+ task="Binding"
+ symbol="LoaderMappingPhaseEnd" message="$(string.PrivatePublisher.BindingEventMessage)"/>
+
+ <event value="175" version="0" level="win:Informational" template="Binding"
+ keywords ="BindingKeyword" opcode="LoaderDeliverEventsPhaseStart"
+ task="Binding"
+ symbol="LoaderDeliverEventsPhaseStart" message="$(string.PrivatePublisher.BindingEventMessage)"/>
+
+ <event value="176" version="0" level="win:Informational" template="Binding"
+ keywords ="BindingKeyword" opcode="LoaderDeliverEventsPhaseEnd"
+ task="Binding"
+ symbol="LoaderDeliverEventsPhaseEnd" message="$(string.PrivatePublisher.BindingEventMessage)"/>
+
+ <event value="177" version="0" level="win:Informational" template="EvidenceGenerated"
+ keywords="SecurityPrivateKeyword" opcode="EvidenceGenerated"
+ task="EvidenceGeneratedTask"
+ symbol="EvidenceGenerated" message="$(string.PrivatePublisher.EvidenceGeneratedEventMessage)"/>
+
+ <event value="178" version="0" level="win:Informational" template="ModuleTransparencyCalculation"
+ keywords="SecurityPrivateKeyword" opcode="ModuleTransparencyComputationStart"
+ task="TransparencyComputation"
+ symbol="ModuleTransparencyComputationStart" message="$(string.PrivatePublisher.ModuleTransparencyComputationStartEventMessage)" />
+
+ <event value="179" version="0" level="win:Informational" template="ModuleTransparencyCalculationResult"
+ keywords="SecurityPrivateKeyword" opcode="ModuleTransparencyComputationEnd"
+ task="TransparencyComputation"
+ symbol="ModuleTransparencyComputationEnd" message="$(string.PrivatePublisher.ModuleTransparencyComputationEndEventMessage)" />
+
+ <event value="180" version="0" level="win:Informational" template="TypeTransparencyCalculation"
+ keywords="SecurityPrivateKeyword" opcode="TypeTransparencyComputationStart"
+ task="TransparencyComputation"
+ symbol="TypeTransparencyComputationStart" message="$(string.PrivatePublisher.TypeTransparencyComputationStartEventMessage)" />
+
+ <event value="181" version="0" level="win:Informational" template="TypeTransparencyCalculationResult"
+ keywords="SecurityPrivateKeyword" opcode="TypeTransparencyComputationEnd"
+ task="TransparencyComputation"
+ symbol="TypeTransparencyComputationEnd" message="$(string.PrivatePublisher.TypeTransparencyComputationEndEventMessage)" />
+
+ <event value="182" version="0" level="win:Informational" template="MethodTransparencyCalculation"
+ keywords="SecurityPrivateKeyword" opcode="MethodTransparencyComputationStart"
+ task="TransparencyComputation"
+ symbol="MethodTransparencyComputationStart" message="$(string.PrivatePublisher.MethodTransparencyComputationStartEventMessage)" />
+
+ <event value="183" version="0" level="win:Informational" template="MethodTransparencyCalculationResult"
+ keywords="SecurityPrivateKeyword" opcode="MethodTransparencyComputationEnd"
+ task="TransparencyComputation"
+ symbol="MethodTransparencyComputationEnd" message="$(string.PrivatePublisher.MethodTransparencyComputationEndEventMessage)" />
+
+ <event value="184" version="0" level="win:Informational" template="FieldTransparencyCalculation"
+ keywords="SecurityPrivateKeyword" opcode="FieldTransparencyComputationStart"
+ task="TransparencyComputation"
+ symbol="FieldTransparencyComputationStart" message="$(string.PrivatePublisher.FieldTransparencyComputationStartEventMessage)" />
+
+ <event value="185" version="0" level="win:Informational" template="FieldTransparencyCalculationResult"
+ keywords="SecurityPrivateKeyword" opcode="FieldTransparencyComputationEnd"
+ task="TransparencyComputation"
+ symbol="FieldTransparencyComputationEnd" message="$(string.PrivatePublisher.FieldTransparencyComputationEndEventMessage)" />
+
+ <event value="186" version="0" level="win:Informational" template="TokenTransparencyCalculation"
+ keywords="SecurityPrivateKeyword" opcode="TokenTransparencyComputationStart"
+ task="TransparencyComputation"
+ symbol="TokenTransparencyComputationStart" message="$(string.PrivatePublisher.TokenTransparencyComputationStartEventMessage)" />
+
+ <event value="187" version="0" level="win:Informational" template="TokenTransparencyCalculationResult"
+ keywords="SecurityPrivateKeyword" opcode="TokenTransparencyComputationEnd"
+ task="TransparencyComputation"
+ symbol="TokenTransparencyComputationEnd" message="$(string.PrivatePublisher.TokenTransparencyComputationEndEventMessage)" />
+
+ <event value="188" version="0" level="win:Informational" template="NgenBindEvent"
+ keywords="PrivateFusionKeyword" opcode="NgenBind"
+ task="CLRNgenBinder"
+ symbol="NgenBindEvent" message="$(string.PrivatePublisher.NgenBinderMessage)"/>
+
+ <!-- CLR FailFast events -->
+ <event value="191" version="0" level="win:Critical" template="FailFast"
+ opcode="FailFast"
+ task="CLRFailFast"
+ symbol="FailFast" message="$(string.PrivatePublisher.FailFastEventMessage)"/>
+
+ <event value="192" version="0" level="win:Verbose" template="PrvFinalizeObject"
+ keywords ="GCPrivateKeyword"
+ opcode="PrvFinalizeObject"
+ task="GarbageCollectionPrivate"
+ symbol="PrvFinalizeObject" message="$(string.PrivatePublisher.FinalizeObjectEventMessage)"/>
+
+ <event value="193" version="0" level="win:Verbose" template="CCWRefCountChange"
+ keywords="InteropPrivateKeyword"
+ opcode="CCWRefCountChange"
+ task="GarbageCollectionPrivate"
+ symbol="CCWRefCountChange" message="$(string.PrivatePublisher.CCWRefCountChangeEventMessage)"/>
+
+ <event value="194" version="0" level="win:Verbose" template="PrvSetGCHandle"
+ keywords="GCHandlePrivateKeyword"
+ opcode="SetGCHandle"
+ task="GarbageCollectionPrivate"
+ symbol="PrvSetGCHandle" message="$(string.PrivatePublisher.SetGCHandleEventMessage)"/>
+
+ <event value="195" version="0" level="win:Verbose" template="PrvDestroyGCHandle"
+ keywords="GCHandlePrivateKeyword"
+ opcode="DestroyGCHandle"
+ task="GarbageCollectionPrivate"
+ symbol="PrvDestroyGCHandle" message="$(string.PrivatePublisher.DestroyGCHandleEventMessage)"/>
+
+ <event value="196" version="0" level="win:Informational" template="FusionMessage"
+ keywords="BindingKeyword" opcode="FusionMessage"
+ task="Binding"
+ symbol="FusionMessageEvent" message="$(string.PrivatePublisher.FusionMessageEventMessage)"/>
+
+ <event value="197" version="0" level="win:Informational" template="FusionErrorCode"
+ keywords="BindingKeyword" opcode="FusionErrorCode"
+ task="Binding"
+ symbol="FusionErrorCodeEvent" message="$(string.PrivatePublisher.FusionErrorCodeEventMessage)"/>
+
+ <event value="199" version="0" level="win:Verbose" template="PinPlugAtGCTime"
+ keywords="GCPrivateKeyword"
+ opcode="PinPlugAtGCTime"
+ task="GarbageCollectionPrivate"
+ symbol="PinPlugAtGCTime" message="$(string.PrivatePublisher.PinPlugAtGCTimeEventMessage)"/>
+
+ <event value="310" version="0" level="win:Verbose" template="LoaderHeapPrivate"
+ keywords="LoaderHeapPrivateKeyword" opcode="AllocRequest"
+ task="LoaderHeapAllocation" symbol="AllocRequest" message="$(string.PrivatePublisher.AllocRequestEventMessage)" />
+
+ <!-- CLR Private Multicore JIT events -->
+ <event value="201" version="0" level="win:Informational" template="MulticoreJitPrivate"
+ keywords="MulticoreJitPrivateKeyword" opcode="Common"
+ task="CLRMulticoreJit" symbol="MulticoreJit" message="$(string.PrivatePublisher.MulticoreJitCommonEventMessage)" />
+ <event value="202" version="0" level="win:Informational" template="MulticoreJitMethodCodeReturnedPrivate"
+ keywords="MulticoreJitPrivateKeyword" opcode="MethodCodeReturned"
+ task="CLRMulticoreJit" symbol="MulticoreJitMethodCodeReturned" message="$(string.PrivatePublisher.MulticoreJitMethodCodeReturnedMessage)" />
+
+ <!-- CLR Private Dynamic Type Usage events NOTE: These are not used anymore. They are kept around for backcompat with traces that might have already contained these -->
+ <event value="400" version="0" level="win:Informational" template="DynamicTypeUsePrivate"
+ keywords="DynamicTypeUsageKeyword"
+ symbol="IInspectableRuntimeClassName"
+ task="DynamicTypeUsage"
+ opcode="IInspectableRuntimeClassName"
+ message="$(string.PrivatePublisher.IInspectableRuntimeClassNameMessage)" />
+
+ <event value="401" version="0" level="win:Informational" template="DynamicTypeUseTwoParametersPrivate"
+ keywords="DynamicTypeUsageKeyword"
+ symbol="WinRTUnbox"
+ task="DynamicTypeUsage"
+ opcode="WinRTUnbox"
+ message="$(string.PrivatePublisher.WinRTUnboxMessage)" />
+
+ <event value="402" version="0" level="win:Informational" template="DynamicTypeUsePrivate"
+ keywords="DynamicTypeUsageKeyword"
+ symbol="CreateRCW"
+ task="DynamicTypeUsage"
+ opcode="CreateRCW"
+ message="$(string.PrivatePublisher.CreateRcwMessage)" />
+
+ <event value="403" version="0" level="win:Informational" template="DynamicTypeUsePrivateVariance"
+ keywords="DynamicTypeUsageKeyword"
+ symbol="RCWVariance"
+ task="DynamicTypeUsage"
+ opcode="RCWVariance"
+ message="$(string.PrivatePublisher.RcwVarianceMessage)" />
+
+ <event value="404" version="0" level="win:Informational" template="DynamicTypeUseTwoParametersPrivate"
+ keywords="DynamicTypeUsageKeyword"
+ symbol="RCWIEnumerableCasting"
+ task="DynamicTypeUsage"
+ opcode="RCWIEnumerableCasting"
+ message="$(string.PrivatePublisher.RCWIEnumerableCastingMessage)" />
+
+ <event value="405" version="0" level="win:Informational" template="DynamicTypeUsePrivate"
+ keywords="DynamicTypeUsageKeyword"
+ symbol="CreateCCW"
+ task="DynamicTypeUsage"
+ opcode="CreateCCW"
+ message="$(string.PrivatePublisher.CreateCCWMessage)" />
+
+ <event value="406" version="0" level="win:Informational" template="DynamicTypeUsePrivateVariance"
+ keywords="DynamicTypeUsageKeyword"
+ symbol="CCWVariance"
+ task="DynamicTypeUsage"
+ opcode="CCWVariance"
+ message="$(string.PrivatePublisher.CCWVarianceMessage)" />
+
+ <event value="407" version="0" level="win:Informational" template="DynamicTypeUseStringAndIntPrivate"
+ keywords="DynamicTypeUsageKeyword"
+ symbol="ObjectVariantMarshallingToNative"
+ task="DynamicTypeUsage"
+ opcode="ObjectVariantMarshallingToNative"
+ message="$(string.PrivatePublisher.ObjectVariantMarshallingMessage)" />
+
+ <event value="408" version="0" level="win:Informational" template="DynamicTypeUseTwoParametersPrivate"
+ keywords="DynamicTypeUsageKeyword"
+ symbol="GetTypeFromGUID"
+ task="DynamicTypeUsage"
+ opcode="GetTypeFromGUID"
+ message="$(string.PrivatePublisher.GetTypeFromGUIDMessage)" />
+
+ <event value="409" version="0" level="win:Informational" template="DynamicTypeUseTwoParametersPrivate"
+ keywords="DynamicTypeUsageKeyword"
+ symbol="GetTypeFromProgID"
+ task="DynamicTypeUsage"
+ opcode="GetTypeFromProgID"
+ message="$(string.PrivatePublisher.GetTypeFromProgIDMessage)" />
+
+ <event value="410" version="0" level="win:Informational" template="DynamicTypeUseTwoParametersPrivate"
+ keywords="DynamicTypeUsageKeyword"
+ symbol="ConvertToCallbackEtw"
+ task="DynamicTypeUsage"
+ opcode="ConvertToCallbackEtw"
+ message="$(string.PrivatePublisher.ConvertToCallbackMessage)" />
+
+ <event value="411" version="0" level="win:Informational" template="DynamicTypeUseNoParametersPrivate"
+ keywords="DynamicTypeUsageKeyword"
+ symbol="BeginCreateManagedReference"
+ task="DynamicTypeUsage"
+ opcode="BeginCreateManagedReference"
+ message="$(string.PrivatePublisher.BeginCreateManagedReferenceMessage)" />
+
+ <event value="412" version="0" level="win:Informational" template="DynamicTypeUseNoParametersPrivate"
+ keywords="DynamicTypeUsageKeyword"
+ symbol="EndCreateManagedReference"
+ task="DynamicTypeUsage"
+ opcode="EndCreateManagedReference"
+ message="$(string.PrivatePublisher.EndCreateManagedReferenceMessage)" />
+
+ <event value="413" version="0" level="win:Informational" template="DynamicTypeUseStringAndIntPrivate"
+ keywords="DynamicTypeUsageKeyword"
+ symbol="ObjectVariantMarshallingToManaged"
+ task="DynamicTypeUsage"
+ opcode="ObjectVariantMarshallingToManaged"
+ message="$(string.PrivatePublisher.ObjectVariantMarshallingMessage)" />
+ </events>
+ </provider>
+
+ </events>
+ </instrumentation>
+
+ <localization>
+ <resources culture="en-US">
+ <stringTable>
+ <!--Message Strings-->
+ <!-- Event Messages -->
+ <string id="RuntimePublisher.GCStartEventMessage" value="Count=%1;%nReason=%2" />
+ <string id="RuntimePublisher.GCStart_V1EventMessage" value="Count=%1;%nDepth=%2;%nReason=%3;%nType=%4;%nClrInstanceID=%5" />
+ <string id="RuntimePublisher.GCStart_V2EventMessage" value="Count=%1;%nDepth=%2;%nReason=%3;%nType=%4;%nClrInstanceID=%5;%nClientSequenceNumber=%6" />
+ <string id="RuntimePublisher.GCEndEventMessage" value="Count=%1;%nDepth=%2" />
+ <string id="RuntimePublisher.GCEnd_V1EventMessage" value="Count=%1;%nDepth=%2;%nClrInstanceID=%3" />
+ <string id="RuntimePublisher.GCHeapStatsEventMessage" value="GenerationSize0=%1;%nTotalPromotedSize0=%2;%nGenerationSize1=%3;%nTotalPromotedSize1=%4;%nGenerationSize2=%5;%nTotalPromotedSize2=%6;%nGenerationSize3=%7;%nTotalPromotedSize3=%8;%nFinalizationPromotedSize=%9;%nFinalizationPromotedCount=%10;%nPinnedObjectCount=%11;%nSinkBlockCount=%12;%nGCHandleCount=%13" />
+ <string id="RuntimePublisher.GCHeapStats_V1EventMessage" value="GenerationSize0=%1;%nTotalPromotedSize0=%2;%nGenerationSize1=%3;%nTotalPromotedSize1=%4;%nGenerationSize2=%5;%nTotalPromotedSize2=%6;%nGenerationSize3=%7;%nTotalPromotedSize3=%8;%nFinalizationPromotedSize=%9;%nFinalizationPromotedCount=%10;%nPinnedObjectCount=%11;%nSinkBlockCount=%12;%nGCHandleCount=%13;%nClrInstanceID=%14" />
+ <string id="RuntimePublisher.GCCreateSegmentEventMessage" value="Address=%1;%nSize=%2;%nType=%3" />
+ <string id="RuntimePublisher.GCCreateSegment_V1EventMessage" value="Address=%1;%nSize=%2;%nType=%3;%nClrInstanceID=%4" />
+ <string id="RuntimePublisher.GCFreeSegmentEventMessage" value="Address=%1" />
+ <string id="RuntimePublisher.GCFreeSegment_V1EventMessage" value="Address=%1;%nClrInstanceID=%2" />
+ <string id="RuntimePublisher.GCRestartEEBeginEventMessage" value="NONE" />
+ <string id="RuntimePublisher.GCRestartEEBegin_V1EventMessage" value="ClrInstanceID=%1" />
+ <string id="RuntimePublisher.GCRestartEEEndEventMessage" value="NONE" />
+ <string id="RuntimePublisher.GCRestartEEEnd_V1EventMessage" value="ClrInstanceID=%1" />
+ <string id="RuntimePublisher.GCSuspendEEEventMessage" value="Reason=%1" />
+ <string id="RuntimePublisher.GCSuspendEE_V1EventMessage" value="Reason=%1;%nCount=%2;%nClrInstanceID=%3" />
+ <string id="RuntimePublisher.GCSuspendEEEndEventMessage" value="NONE" />
+ <string id="RuntimePublisher.GCSuspendEEEnd_V1EventMessage" value="ClrInstanceID=%1" />
+ <string id="RuntimePublisher.GCAllocationTickEventMessage" value="Amount=%1;%nKind=%2" />
+ <string id="RuntimePublisher.GCAllocationTick_V1EventMessage" value="Amount=%1;%nKind=%2;%nClrInstanceID=%3" />
+ <string id="RuntimePublisher.GCAllocationTick_V2EventMessage" value="Amount=%1;%nKind=%2;%nClrInstanceID=%3;Amount64=%4;%nTypeID=%5;%nTypeName=%6;%nHeapIndex=%7" />
+ <string id="RuntimePublisher.GCAllocationTick_V3EventMessage" value="Amount=%1;%nKind=%2;%nClrInstanceID=%3;Amount64=%4;%nTypeID=%5;%nTypeName=%6;%nHeapIndex=%7;%nAddress=%8" />
+ <string id="RuntimePublisher.GCCreateConcurrentThreadEventMessage" value="NONE" />
+ <string id="RuntimePublisher.GCCreateConcurrentThread_V1EventMessage" value="ClrInstanceID=%1" />
+ <string id="RuntimePublisher.GCTerminateConcurrentThreadEventMessage" value="NONE" />
+ <string id="RuntimePublisher.GCTerminateConcurrentThread_V1EventMessage" value="ClrInstanceID=%1" />
+ <string id="RuntimePublisher.GCFinalizersEndEventMessage" value="Count=%1" />
+ <string id="RuntimePublisher.GCFinalizersEnd_V1EventMessage" value="Count=%1;%nClrInstanceID=%2" />
+ <string id="RuntimePublisher.GCFinalizersBeginEventMessage" value="NONE" />
+ <string id="RuntimePublisher.GCFinalizersBegin_V1EventMessage" value="ClrInstanceID=%1" />
+ <string id="RuntimePublisher.BulkTypeEventMessage" value="Count=%1;%nClrInstanceID=%2" />
+ <string id="RuntimePublisher.GCBulkRootEdgeEventMessage" value="ClrInstanceID=%1;%nIndex=%2;%nCount=%3" />
+ <string id="RuntimePublisher.GCBulkRootCCWEventMessage" value="ClrInstanceID=%1;%nIndex=%2;%nCount=%3" />
+ <string id="RuntimePublisher.GCBulkRCWEventMessage" value="ClrInstanceID=%1;%nIndex=%2;%nCount=%3" />
+ <string id="RuntimePublisher.GCBulkRootStaticVarEventMessage" value="ClrInstanceID=%1;%nIndex=%2;%nCount=%3" />
+ <string id="RuntimePublisher.GCBulkRootConditionalWeakTableElementEdgeEventMessage" value="ClrInstanceID=%1;%nIndex=%2;%nCount=%3" />
+ <string id="RuntimePublisher.GCBulkNodeEventMessage" value="ClrInstanceID=%1;%nIndex=%2;%nCount=%3" />
+ <string id="RuntimePublisher.GCBulkEdgeEventMessage" value="ClrInstanceID=%1;%nIndex=%2;%nCount=%3" />
+ <string id="RuntimePublisher.GCSampledObjectAllocationHighEventMessage" value="High:ClrInstanceID=%1;%nAddress=%2;%nTypeID=%3;%nObjectCountForTypeSample=%4;%nTotalSizeForTypeSample=%5" />
+ <string id="RuntimePublisher.GCSampledObjectAllocationLowEventMessage" value="Low:ClrInstanceID=%1;%nAddress=%2;%nTypeID=%3;%nObjectCountForTypeSample=%4;%nTotalSizeForTypeSample=%5" />
+ <string id="RuntimePublisher.GCBulkSurvivingObjectRangesEventMessage" value="ClrInstanceID=%1;%nIndex=%2;%nCount=%3" />
+ <string id="RuntimePublisher.GCBulkMovedObjectRangesEventMessage" value="ClrInstanceID=%1;%nIndex=%2;%nCount=%3" />
+ <string id="RuntimePublisher.GCGenerationRangeEventMessage" value="ClrInstanceID=%1;%nGeneration=%2;%nRangeStart=%3;%nRangeUsedLength=%4;%nRangeReservedLength=%5" />
+ <string id="RuntimePublisher.GCMarkStackRootsEventMessage" value="HeapNum=%1;%nClrInstanceID=%2"/>
+ <string id="RuntimePublisher.GCMarkFinalizeQueueRootsEventMessage" value="HeapNum=%1;%nClrInstanceID=%2"/>
+ <string id="RuntimePublisher.GCMarkHandlesEventMessage" value="HeapNum=%1;%nClrInstanceID=%2"/>
+ <string id="RuntimePublisher.GCMarkOlderGenerationRootsEventMessage" value="HeapNum=%1;%nClrInstanceID=%2"/>
+ <string id="RuntimePublisher.FinalizeObjectEventMessage" value="TypeID=%1;%nObjectID=%2;%nClrInstanceID=%3" />
+ <string id="RuntimePublisher.GCTriggeredEventMessage" value="Reason=%1" />
+ <string id="RuntimePublisher.PinObjectAtGCTimeEventMessage" value="HandleID=%1;%nObjectID=%2;%nObjectSize=%3;%nTypeName=%4;%n;%nClrInstanceID=%5" />
+ <string id="RuntimePublisher.IncreaseMemoryPressureEventMessage" value="BytesAllocated=%1;%n;%nClrInstanceID=%2" />
+ <string id="RuntimePublisher.DecreaseMemoryPressureEventMessage" value="BytesFreed=%1;%n;%nClrInstanceID=%2" />
+ <string id="RuntimePublisher.WorkerThreadCreateEventMessage" value="WorkerThreadCount=%1;%nRetiredWorkerThreads=%2" />
+ <string id="RuntimePublisher.WorkerThreadTerminateEventMessage" value="WorkerThreadCount=%1;%nRetiredWorkerThreads=%2" />
+ <string id="RuntimePublisher.WorkerThreadRetirementRetireThreadEventMessage" value="WorkerThreadCount=%1;%nRetiredWorkerThreads=%2" />
+ <string id="RuntimePublisher.WorkerThreadRetirementUnretireThreadEventMessage" value="WorkerThreadCount=%1;%nRetiredWorkerThreads=%2" />
+ <string id="RuntimePublisher.ThreadPoolWorkerThreadEventMessage" value="WorkerThreadCount=%1;%nRetiredWorkerThreadCount=%2;%nClrInstanceID=%3" />
+ <string id="RuntimePublisher.ThreadPoolWorkerThreadAdjustmentSampleEventMessage" value="Throughput=%1;%nClrInstanceID=%2" />
+ <string id="RuntimePublisher.ThreadPoolWorkerThreadAdjustmentAdjustmentEventMessage" value="AverageThroughput=%1;%nNewWorkerThreadCount=%2;%nReason=%3;%nClrInstanceID=%4" />
+ <string id="RuntimePublisher.ThreadPoolWorkerThreadAdjustmentStatsEventMessage" value="Duration=%1;%nThroughput=%2;%nThreadWave=%3;%nThroughputWave=%4;%nThroughputErrorEstimate=%5;%nAverageThroughputErrorEstimate=%6;%nThroughputRatio=%7;%nConfidence=%8;%nNewControlSetting=%9;%nNewThreadWaveMagnitude=%10;%nClrInstanceID=%11" />
+ <string id="RuntimePublisher.IOThreadCreateEventMessage" value="IOThreadCount=%1;%nRetiredIOThreads=%2" />
+ <string id="RuntimePublisher.IOThreadCreate_V1EventMessage" value="IOThreadCount=%1;%nRetiredIOThreads=%2;%nClrInstanceID=%3" />
+ <string id="RuntimePublisher.IOThreadTerminateEventMessage" value="IOThreadCount=%1;%nRetiredIOThreads=%2" />
+ <string id="RuntimePublisher.IOThreadTerminate_V1EventMessage" value="IOThreadCount=%1;%nRetiredIOThreads=%2;%nClrInstanceID=%3" />
+ <string id="RuntimePublisher.IOThreadRetirementRetireThreadEventMessage" value="IOThreadCount=%1;%nRetiredIOThreads=%2" />
+ <string id="RuntimePublisher.IOThreadRetirementRetireThread_V1EventMessage" value="IOThreadCount=%1;%nRetiredIOThreads=%2;%nClrInstanceID=%3" />
+ <string id="RuntimePublisher.IOThreadRetirementUnretireThreadEventMessage" value="IOThreadCount=%1;%nRetiredIOThreads=%2" />
+ <string id="RuntimePublisher.IOThreadRetirementUnretireThread_V1EventMessage" value="IOThreadCount=%1;%nRetiredIOThreads=%2;%nClrInstanceID=%3" />
+ <string id="RuntimePublisher.ThreadPoolSuspendSuspendThreadEventMessage" value="ClrThreadID=%1;%nCPUUtilization=%2" />
+ <string id="RuntimePublisher.ThreadPoolSuspendResumeThreadEventMessage" value="ClrThreadID=%1;%nCPUUtilization=%2" />
+ <string id="RuntimePublisher.ThreadPoolWorkingThreadCountEventMessage" value="Count=%1;%nClrInstanceID=%2" />
+ <string id="RuntimePublisher.ThreadPoolEnqueueEventMessage" value="WorkID=%1;%nClrInstanceID=%2" />
+ <string id="RuntimePublisher.ThreadPoolDequeueEventMessage" value="WorkID=%1;%nClrInstanceID=%2" />
+ <string id="RuntimePublisher.ThreadPoolIOEnqueueEventMessage" value="WorkID=%1;%nMultiDequeues=%4%nClrInstanceID=%3" />
+ <string id="RuntimePublisher.ThreadPoolIOEnqueue_V1EventMessage" value="WorkID=%1;%nClrInstanceID=%3" />
+ <string id="RuntimePublisher.ThreadPoolIODequeueEventMessage" value="WorkID=%1;%nClrInstanceID=%3" />
+ <string id="RuntimePublisher.ThreadPoolIOPackEventMessage" value="WorkID=%1;%nClrInstanceID=%3" />
+ <string id="RuntimePublisher.ThreadCreatingEventMessage" value="ID=%1;%nClrInstanceID=%s" />
+ <string id="RuntimePublisher.ThreadRunningEventMessage" value="ID=%1;%nClrInstanceID=%s" />
+ <string id="RuntimePublisher.ExceptionExceptionThrownEventMessage" value="NONE" />
+ <string id="RuntimePublisher.ExceptionExceptionThrown_V1EventMessage" value="ExceptionType=%1;%nExceptionMessage=%2;%nExceptionEIP=%3;%nExceptionHRESULT=%4;%nExceptionFlags=%5;%nClrInstanceID=%6" />
+ <string id="RuntimePublisher.ContentionStartEventMessage" value="NONE" />
+ <string id="RuntimePublisher.ContentionStart_V1EventMessage" value="ContentionFlags=%1;%nClrInstanceID=%2"/>
+ <string id="RuntimePublisher.ContentionStopEventMessage" value="ContentionFlags=%1;%nClrInstanceID=%2"/>
+ <string id="RuntimePublisher.DCStartCompleteEventMessage" value="NONE" />
+ <string id="RuntimePublisher.DCEndCompleteEventMessage" value="NONE" />
+ <string id="RuntimePublisher.MethodDCStartEventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6" />
+ <string id="RuntimePublisher.MethodDCEndEventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6" />
+ <string id="RuntimePublisher.MethodDCStartVerboseEventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6;%nMethodNamespace=%7;%nMethodName=%8;%nMethodSignature=%9" />
+ <string id="RuntimePublisher.MethodDCEndVerboseEventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6;%nMethodNamespace=%7;%nMethodName=%8;%nMethodSignature=%9" />
+ <string id="RuntimePublisher.MethodLoadEventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6" />
+ <string id="RuntimePublisher.MethodLoad_V1EventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6;%nClrInstanceID=%7" />
+ <string id="RuntimePublisher.MethodLoad_V2EventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6;%nClrInstanceID=%7;%nReJITID=%8" />
+ <string id="RuntimePublisher.MethodLoadVerboseEventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6;%nMethodNamespace=%7;%nMethodName=%8;%nMethodSignature=%9" />
+ <string id="RuntimePublisher.MethodLoadVerbose_V1EventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6;%nMethodNamespace=%7;%nMethodName=%8;%nMethodSignature=%9;%nClrInstanceID=%10" />
+ <string id="RuntimePublisher.MethodLoadVerbose_V2EventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6;%nMethodNamespace=%7;%nMethodName=%8;%nMethodSignature=%9;%nClrInstanceID=%10;%nReJITID=%11" />
+ <string id="RuntimePublisher.MethodUnloadEventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6" />
+ <string id="RuntimePublisher.MethodUnload_V1EventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6;%nClrInstanceID=%7" />
+ <string id="RuntimePublisher.MethodUnload_V2EventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6;%nClrInstanceID=%7;%nReJITID=%8" />
+ <string id="RuntimePublisher.MethodUnloadVerboseEventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6;%nMethodNamespace=%7;%nMethodName=%8;%nMethodSignature=%9" />
+ <string id="RuntimePublisher.MethodUnloadVerbose_V1EventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6;%nMethodNamespace=%7;%nMethodName=%8;%nMethodSignature=%9;%nClrInstanceID=%10" />
+ <string id="RuntimePublisher.MethodUnloadVerbose_V2EventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6;%nMethodNamespace=%7;%nMethodName=%8;%nMethodSignature=%9;%nClrInstanceID=%10;%nReJITID=%11" />
+ <string id="RuntimePublisher.MethodJittingStartedEventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodToken=%3;%nMethodILSize=%4;%nMethodNamespace=%5;%nMethodName=%6;%nMethodSignature=%7" />
+ <string id="RuntimePublisher.MethodJittingStarted_V1EventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodToken=%3;%nMethodILSize=%4;%nMethodNamespace=%5;%nMethodName=%6;%nMethodSignature=%7;%nClrInstanceID=%8" />
+ <string id="RuntimePublisher.MethodILToNativeMapEventMessage" value="MethodID=%1;%nReJITID=%2;%nMethodExtent=%3;%nCountOfMapEntries=%4;%nClrInstanceID=%5" />
+ <string id="RuntimePublisher.DomainModuleLoadEventMessage" value="ModuleID=%1;%nAssemblyID=%2;%nAppDomainID=%3;%nModuleFlags=%4;%nModuleILPath=%5;ModuleNativePath=%6" />
+ <string id="RuntimePublisher.DomainModuleLoad_V1EventMessage" value="ModuleID=%1;%nAssemblyID=%2;%nAppDomainID=%3;%nModuleFlags=%4;%nModuleILPath=%5;ModuleNativePath=%6;%nClrInstanceID=%7" />
+ <string id="RuntimePublisher.DomainModuleUnloadEventMessage" value="ModuleID=%1;%nAssemblyID=%2;%nAppDomainID=%3;%nModuleFlags=%4;%nModuleILPath=%5;ModuleNativePath=%6" />
+ <string id="RuntimePublisher.DomainModuleUnload_V1EventMessage" value="ModuleID=%1;%nAssemblyID=%2;%nAppDomainID=%3;%nModuleFlags=%4;%nModuleILPath=%5;ModuleNativePath=%6;%nClrInstanceID=%7" />
+ <string id="RuntimePublisher.ModuleDCStartEventMessage" value="ModuleID=%1;%nAssemblyID=%2;%nModuleFlags=%3;ModuleILPath=%4;%nModuleNativePath=%5" />
+ <string id="RuntimePublisher.ModuleDCEndEventMessage" value="ModuleID=%1;%nAssemblyID=%2;%nModuleFlags=%3;ModuleILPath=%4;%nModuleNativePath=%5" />
+ <string id="RuntimePublisher.ModuleLoadEventMessage" value="ModuleID=%1;%nAssemblyID=%2;%nModuleFlags=%3;ModuleILPath=%4;%nModuleNativePath=%5" />
+ <string id="RuntimePublisher.ModuleLoad_V1EventMessage" value="ModuleID=%1;%nAssemblyID=%2;%nModuleFlags=%3;%nModuleILPath=%4;%nModuleNativePath=%5;%nClrInstanceID=%6" />
+ <string id="RuntimePublisher.ModuleLoad_V2EventMessage" value="ModuleID=%1;%nAssemblyID=%2;%nModuleFlags=%3;%nModuleILPath=%4;%nModuleNativePath=%5;%nClrInstanceID=%6;%nManagedPdbSignature=%7;%nManagedPdbAge=%8;%nManagedPdbBuildPath=%9;%nNativePdbSignature=%10;%nNativePdbAge=%11;%nNativePdbBuildPath=%12" />
+ <string id="RuntimePublisher.ModuleUnloadEventMessage" value="ModuleID=%1;%nAssemblyID=%2;%nModuleFlags=%3;ModuleILPath=%4;%nModuleNativePath=%5" />
+ <string id="RuntimePublisher.ModuleUnload_V1EventMessage" value="ModuleID=%1;%nAssemblyID=%2;%nModuleFlags=%3;%nModuleILPath=%4;%nModuleNativePath=%5;%nClrInstanceID=%6" />
+ <string id="RuntimePublisher.ModuleUnload_V2EventMessage" value="ModuleID=%1;%nAssemblyID=%2;%nModuleFlags=%3;%nModuleILPath=%4;%nModuleNativePath=%5;%nClrInstanceID=%6;%nManagedPdbSignature=%7;%nManagedPdbAge=%8;%nManagedPdbBuildPath=%9;%nNativePdbSignature=%10;%nNativePdbAge=%11;%nNativePdbBuildPath=%12" />
+ <string id="RuntimePublisher.AssemblyLoadEventMessage" value="AssemblyID=%1;%nAppDomainID=%2;%nAssemblyFlags=%3;%nFullyQualifiedAssemblyName=%4" />
+ <string id="RuntimePublisher.AssemblyLoad_V1EventMessage" value="AssemblyID=%1;%nAppDomainID=%2;%nAssemblyFlags=%3;%nFullyQualifiedAssemblyName=%4;%nClrInstanceID=%5" />
+ <string id="RuntimePublisher.AssemblyUnloadEventMessage" value="AssemblyID=%1;%nAppDomainID=%2;%nAssemblyFlags=%3;%nFullyQualifiedAssemblyName=%4" />
+ <string id="RuntimePublisher.AssemblyUnload_V1EventMessage" value="AssemblyID=%1;%nAppDomainID=%2;%nAssemblyFlags=%3;%nFullyQualifiedAssemblyName=%4;%nClrInstanceID=%5" />
+ <string id="RuntimePublisher.AppDomainLoadEventMessage" value="AppDomainID=%1;%nAppDomainFlags=%2;%nAppDomainName=%3" />
+ <string id="RuntimePublisher.AppDomainLoad_V1EventMessage" value="AppDomainID=%1;%nAppDomainFlags=%2;%nAppDomainName=%3;%nAppDomainIndex=%4;%nClrInstanceID=%5" />
+ <string id="RuntimePublisher.AppDomainUnloadEventMessage" value="AppDomainID=%1;%nAppDomainFlags=%2;%nAppDomainName=%3" />
+ <string id="RuntimePublisher.AppDomainUnload_V1EventMessage" value="AppDomainID=%1;%nAppDomainFlags=%2;%nAppDomainName=%3;%nAppDomainIndex=%4;%nClrInstanceID=%5" />
+ <string id="RuntimePublisher.StackEventMessage" value="ClrInstanceID=%1;%nReserved1=%2;%nReserved2=%3;%nFrameCount=%4;%nStack=%5" />
+ <string id="RuntimePublisher.AppDomainMemAllocatedEventMessage" value="AppDomainID=%1;%nAllocated=%2;%nClrInstanceID=%3" />
+ <string id="RuntimePublisher.AppDomainMemSurvivedEventMessage" value="AppDomainID=%1;%nSurvived=%2;%nProcessSurvived=%3;%nClrInstanceID=%4" />
+ <string id="RuntimePublisher.ThreadCreatedEventMessage" value="ManagedThreadID=%1;%nAppDomainID=%2;%nFlags=%3;%nManagedThreadIndex=%4;%nOSThreadID=%5;%nClrInstanceID=%6" />
+ <string id="RuntimePublisher.ThreadTerminatedEventMessage" value="ManagedThreadID=%1;%nAppDomainID=%2;%nClrInstanceID=%3" />
+ <string id="RuntimePublisher.ThreadDomainEnterEventMessage" value="ManagedThreadID=%1;%nAppDomainID=%2;%nClrInstanceID=%3" />
+ <string id="RuntimePublisher.ILStubGeneratedEventMessage" value="ClrInstanceID=%1;%nModuleID=%2;%nStubMethodID=%3;%nStubFlags=%4;%nManagedInteropMethodToken=%5;%nManagedInteropMethodNamespace=%6;%nManagedInteropMethodName=%7;%nManagedInteropMethodSignature=%8;%nNativeMethodSignature=%9;%nStubMethodSignature=%10;%nStubMethodILCode=%11" />
+ <string id="RuntimePublisher.ILStubCacheHitEventMessage" value="ClrInstanceID=%1;%nModuleID=%2;%nStubMethodID=%3;%nManagedInteropMethodToken=%4;%nManagedInteropMethodNamespace=%5;%nManagedInteropMethodName=%6;%nManagedInteropMethodSignature=%7" />
+ <string id="RuntimePublisher.StrongNameVerificationStartEventMessage" value="VerificationFlags=%1;%nErrorCode=%2;%nFullyQualifiedAssemblyName=%3"/>
+ <string id="RuntimePublisher.StrongNameVerificationStart_V1EventMessage" value="VerificationFlags=%1;%nErrorCode=%2;%nFullyQualifiedAssemblyName=%3;%nClrInstanceID=%4"/>
+ <string id="RuntimePublisher.StrongNameVerificationEndEventMessage" value="VerificationFlags=%1;%nErrorCode=%2;%nFullyQualifiedAssemblyName=%3"/>
+ <string id="RuntimePublisher.StrongNameVerificationEnd_V1EventMessage" value="VerificationFlags=%1;%nErrorCode=%2;%nFullyQualifiedAssemblyName=%3;%nClrInstanceID=%4"/>
+ <string id="RuntimePublisher.AuthenticodeVerificationStartEventMessage" value="VerificationFlags=%1;%nErrorCode=%2;%nModulePath=%3"/>
+ <string id="RuntimePublisher.AuthenticodeVerificationStart_V1EventMessage" value="VerificationFlags=%1;%nErrorCode=%2;%nModulePath=%3;%nClrInstanceID=%4"/>
+ <string id="RuntimePublisher.AuthenticodeVerificationEndEventMessage" value="VerificationFlags=%1;%nErrorCode=%2;%nModulePath=%3"/>
+ <string id="RuntimePublisher.AuthenticodeVerificationEnd_V1EventMessage" value="VerificationFlags=%1;%nErrorCode=%2;%nModulePath=%3;%nClrInstanceID=%4"/>
+ <string id="RuntimePublisher.EEStartupStartEventMessage" value="VerificationFlags=%1;%nErrorCode=%2;%nModulePath=%3;%nClrInstanceID=%4"/>
+ <string id="RuntimePublisher.RuntimeInformationEventMessage" value="ClrInstanceID=%1;%nSKU=%2;%nBclMajorVersion=%3;%nBclMinorVersion=%4;%nBclBuildNumber=%5;%nBclQfeNumber=%6;%nVMMajorVersion=%7;%nVMMinorVersion=%8;%nVMBuildNumber=%9;%nVMQfeNumber=%10;%nStartupFlags=%11;%nStartupMode=%12;%nCommandLine=%13;%nComObjectGUID=%14;%nRuntimeDllPath=%15"/>
+ <string id="RuntimePublisher.MethodJitInliningFailedEventMessage" value="MethodBeingCompiledNamespace=%1;%nMethodBeingCompiledName=%2;%nMethodBeingCompiledNameSignature=%3;%nInlinerNamespace=%4;%nInlinerName=%5;%nInlinerNameSignature=%6;%nInlineeNamespace=%7;%nInlineeName=%8;%nInlineeNameSignature=%9;%nFailAlways=%10;%nFailReason=%11;%nClrInstanceID=%12" />
+ <string id="RuntimePublisher.MethodJitInliningSucceededEventMessage" value="MethodBeingCompiledNamespace=%1;%nMethodBeingCompiledName=%2;%nMethodBeingCompiledNameSignature=%3;%nInlinerNamespace=%4;%nInlinerName=%5;%nInlinerNameSignature=%6;%nInlineeNamespace=%7;%nInlineeName=%8;%nInlineeNameSignature=%9;%nClrInstanceID=%10" />
+ <string id="RuntimePublisher.MethodJitTailCallFailedEventMessage" value="MethodBeingCompiledNamespace=%1;%nMethodBeingCompiledName=%2;%nMethodBeingCompiledNameSignature=%3;%nCallerNamespace=%4;%nCallerName=%5;%nCallerNameSignature=%6;%nCalleeNamespace=%7;%nCalleeName=%8;%nCalleeNameSignature=%9;%nTailPrefix=%10;%nFailReason=%11;%nClrInstanceID=%12" />
+ <string id="RuntimePublisher.MethodJitTailCallSucceededEventMessage" value="MethodBeingCompiledNamespace=%1;%nMethodBeingCompiledName=%2;%nMethodBeingCompiledNameSignature=%3;%nCallerNamespace=%4;%nCallerName=%5;%nCallerNameSignature=%6;%nCalleeNamespace=%7;%nCalleeName=%8;%nCalleeNameSignature=%9;%nTailPrefix=%10;%nTailCallType=%11;%nClrInstanceID=%12" />
+ <string id="RuntimePublisher.SetGCHandleEventMessage" value="HandleID=%1;%nObjectID=%2;%nKind=%3;%nGeneration=%4;%nAppDomainID=%5;%nClrInstanceID=%6" />
+ <string id="RuntimePublisher.DestroyGCHandleEventMessage" value="HandleID=%1;%nClrInstanceID=%2" />
+ <string id="RundownPublisher.MethodDCStartEventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6" />
+ <string id="RundownPublisher.MethodDCStart_V1EventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6;%nClrInstanceID=%7" />
+ <string id="RundownPublisher.MethodDCStart_V2EventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6;%nClrInstanceID=%7;%nReJITID=%8" />
+ <string id="RuntimePublisher.ModuleRangeLoadEventMessage" value="ClrInstanceID=%1;%ModuleID=%2;%nRangeBegin=%3;%nRangeSize=%4;%nRangeType=%5" />
+ <string id="RundownPublisher.MethodDCEndEventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6" />
+ <string id="RundownPublisher.MethodDCEnd_V1EventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6;%nClrInstanceID=%7" />
+ <string id="RundownPublisher.MethodDCEnd_V2EventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6;%nClrInstanceID=%7;%nReJITID=%8" />
+ <string id="RundownPublisher.MethodDCStartVerboseEventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6;%nMethodNamespace=%7;%nMethodName=%8;%nMethodSignature=%9" />
+ <string id="RundownPublisher.MethodDCStartVerbose_V1EventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6;%nMethodNamespace=%7;%nMethodName=%8;%nMethodSignature=%9;%nClrInstanceID=%10" />
+ <string id="RundownPublisher.MethodDCStartVerbose_V2EventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6;%nMethodNamespace=%7;%nMethodName=%8;%nMethodSignature=%9;%nClrInstanceID=%10;%nReJITID=%11" />
+ <string id="RundownPublisher.MethodDCEndVerboseEventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6;%nMethodNamespace=%7;%nMethodName=%8;%nMethodSignature=%9" />
+ <string id="RundownPublisher.MethodDCEndVerbose_V1EventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6;%nMethodNamespace=%7;%nMethodName=%8;%nMethodSignature=%9;%nClrInstanceID=%10" />
+ <string id="RundownPublisher.MethodDCEndVerbose_V2EventMessage" value="MethodID=%1;%nModuleID=%2;%nMethodStartAddress=%3;%nMethodSize=%4;%nMethodToken=%5;%nMethodFlags=%6;%nMethodNamespace=%7;%nMethodName=%8;%nMethodSignature=%9;%nClrInstanceID=%10;%nReJITID=%11" />
+ <string id="RundownPublisher.MethodDCStartILToNativeMapEventMessage" value="MethodID=%1;%nReJITID=%2;%nMethodExtent=%3;%nCountOfMapEntries=%4;%nClrInstanceID=%5" />
+ <string id="RundownPublisher.MethodDCEndILToNativeMapEventMessage" value="MethodID=%1;%nReJITID=%2;%nMethodExtent=%3;%nCountOfMapEntries=%4;%nClrInstanceID=%5" />
+ <string id="RundownPublisher.DomainModuleDCStartEventMessage" value="ModuleID=%1;%nAssemblyID=%2;%nAppDomainID=%3;%nModuleFlags=%4;ModuleILPath=%5;ModuleNativePath=%6" />
+ <string id="RundownPublisher.DomainModuleDCStart_V1EventMessage" value="ModuleID=%1;%nAssemblyID=%2;%nAppDomainID=%3;%nModuleFlags=%4;ModuleILPath=%5;ModuleNativePath=%6;%nClrInstanceID=%7" />
+ <string id="RundownPublisher.DomainModuleDCEndEventMessage" value="ModuleID=%1;%nAssemblyID=%2;%nAppDomainID=%3;%nModuleFlags=%4;ModuleILPath=%5;ModuleNativePath=%6" />
+ <string id="RundownPublisher.DomainModuleDCEnd_V1EventMessage" value="ModuleID=%1;%nAssemblyID=%2;%nAppDomainID=%3;%nModuleFlags=%4;ModuleILPath=%5;ModuleNativePath=%6;%nClrInstanceID=%7" />
+ <string id="RundownPublisher.ModuleDCStartEventMessage" value="ModuleID=%1;%nAssemblyID=%2;%nModuleFlags=%3;ModuleILPath=%4;ModuleNativePath=%5" />
+ <string id="RundownPublisher.ModuleDCStart_V1EventMessage" value="ModuleID=%1;%nAssemblyID=%2;%nModuleFlags=%3;ModuleILPath=%4;ModuleNativePath=%5;%nClrInstanceID=%6" />
+ <string id="RundownPublisher.ModuleDCStart_V2EventMessage" value="ModuleID=%1;%nAssemblyID=%2;%nModuleFlags=%3;%nModuleILPath=%4;%nModuleNativePath=%5;%nClrInstanceID=%6;%nManagedPdbSignature=%7;%nManagedPdbAge=%8;%nManagedPdbBuildPath=%9;%nNativePdbSignature=%10;%nNativePdbAge=%11;%nNativePdbBuildPath=%12" />
+ <string id="RundownPublisher.ModuleDCEndEventMessage" value="ModuleID=%1;%nAssemblyID=%2;%nModuleFlags=%3;ModuleILPath=%4;ModuleNativePath=%5" />
+ <string id="RundownPublisher.ModuleDCEnd_V1EventMessage" value="ModuleID=%1;%nAssemblyID=%2;%nModuleFlags=%3;ModuleILPath=%4;ModuleNativePath=%5;%nClrInstanceID=%6" />
+ <string id="RundownPublisher.ModuleDCEnd_V2EventMessage" value="ModuleID=%1;%nAssemblyID=%2;%nModuleFlags=%3;%nModuleILPath=%4;%nModuleNativePath=%5;%nClrInstanceID=%6;%nManagedPdbSignature=%7;%nManagedPdbAge=%8;%nManagedPdbBuildPath=%9;%nNativePdbSignature=%10;%nNativePdbAge=%11;%nNativePdbBuildPath=%12" />
+ <string id="RundownPublisher.AssemblyDCStartEventMessage" value="AssemblyID=%1;%nAppDomainID=%2;%nAssemblyFlags=%3;FullyQualifiedAssemblyName=%4" />
+ <string id="RundownPublisher.AssemblyDCStart_V1EventMessage" value="AssemblyID=%1;%nAppDomainID=%2;%nAssemblyFlags=%3;FullyQualifiedAssemblyName=%4;%nClrInstanceID=%5" />
+ <string id="RundownPublisher.AssemblyDCEndEventMessage" value="AssemblyID=%1;%nAppDomainID=%2;%nAssemblyFlags=%3;FullyQualifiedAssemblyName=%4" />
+ <string id="RundownPublisher.AssemblyDCEnd_V1EventMessage" value="AssemblyID=%1;%nAppDomainID=%2;%nAssemblyFlags=%3;FullyQualifiedAssemblyName=%4;%nClrInstanceID=%5" />
+ <string id="RundownPublisher.AppDomainDCStartEventMessage" value="AppDomainID=%1;%nAppDomainFlags=%2;%nAppDomainName=%3" />
+ <string id="RundownPublisher.AppDomainDCStart_V1EventMessage" value="AppDomainID=%1;%nAppDomainFlags=%2;%nAppDomainName=%3;%nAppDomainIndex=%4;%nClrInstanceID=%5" />
+ <string id="RundownPublisher.AppDomainDCEndEventMessage" value="AppDomainID=%1;%nAppDomainFlags=%2;%nAppDomainName=%3" />
+ <string id="RundownPublisher.AppDomainDCEnd_V1EventMessage" value="AppDomainID=%1;%nAppDomainFlags=%2;%nAppDomainName=%3;%nAppDomainIndex=%4;%nClrInstanceID=%5" />
+ <string id="RundownPublisher.DCStartCompleteEventMessage" value="ClrInstanceID=%1" />
+ <string id="RundownPublisher.DCEndCompleteEventMessage" value="ClrInstanceID=%1" />
+ <string id="RundownPublisher.DCStartInitEventMessage" value="ClrInstanceID=%1" />
+ <string id="RundownPublisher.DCEndInitEventMessage" value="ClrInstanceID=%1" />
+ <string id="RundownPublisher.ThreadCreatedEventMessage" value="ManagedThreadID=%1;%nAppDomainID=%2;%nFlags=%3;%nManagedThreadIndex=%4;%nOSThreadID=%5;%nClrInstanceID=%6" />
+ <string id="RundownPublisher.RuntimeInformationEventMessage" value="ClrInstanceID=%1;%nSKU=%2;%nBclMajorVersion=%3;%nBclMinorVersion=%4;%nBclBuildNumber=%5;%nBclQfeNumber=%6;%nVMMajorVersion=%7;%nVMMinorVersion=%8;%nVMBuildNumber=%9;%nVMQfeNumber=%10;%nStartupFlags=%11;%nStartupMode=%12;%nCommandLine=%13;%nComObjectGUID=%14;%nRuntimeDllPath=%15"/>
+ <string id="RundownPublisher.StackEventMessage" value="ClrInstanceID=%1;%nReserved1=%2;%nReserved2=%3;%nFrameCount=%4;%nStack=%5" />
+ <string id="RundownPublisher.ModuleRangeDCStartEventMessage" value="ClrInstanceID=%1;%ModuleID=%2;%nRangeBegin=%3;%nRangeSize=%4;%nRangeType=%5" />
+ <string id="RundownPublisher.ModuleRangeDCEndEventMessage" value= "ClrInstanceID=%1;%ModuleID=%2;%nRangeBegin=%3;%nRangeSize=%4;%nRangeType=%5" />
+ <string id="StressPublisher.StressLogEventMessage" value="Facility=%1;%nLevel=%2;%nMessage=%3" />
+ <string id="StressPublisher.StressLog_V1EventMessage" value="Facility=%1;%nLevel=%2;%nMessage=%3;%nClrInstanceID=%4" />
+ <string id="StressPublisher.StackEventMessage" value="ClrInstanceID=%1;%nReserved1=%2;%nReserved2=%3;%nFrameCount=%4;%nStack=%5" />
+ <string id="PrivatePublisher.FailFastEventMessage" value="FailFastUserMessage=%1;%nFailedEIP=%2;%nOSExitCode=%3;%nClrExitCode=%4;%nClrInstanceID=%5" />
+ <string id="PrivatePublisher.FinalizeObjectEventMessage" value="TypeName=%1;%nTypeID=%2;%nObjectID=%3;%nClrInstanceID=%4" />
+ <string id="PrivatePublisher.SetGCHandleEventMessage" value="HandleID=%1;%nObjectID=%2;%n;%nClrInstanceID=%3" />
+ <string id="PrivatePublisher.DestroyGCHandleEventMessage" value="HandleID=%1;%nClrInstanceID=%2" />
+ <string id="PrivatePublisher.PinPlugAtGCTimeEventMessage" value="PlugStart=%1;%nPlugEnd=%2;%n;%nClrInstanceID=%3" />
+ <string id="PrivatePublisher.CCWRefCountChangeEventMessage" value="HandleID=%1;%nObjectID=%2;%nCOMInterfacePointer=%3;%nNewRefCount=%4;%nTypeName=%5;%nOperation=%6;%nClrInstanceID=%7" />
+ <string id="PrivatePublisher.GCDecisionEventMessage" value="DoCompact=%1" />
+ <string id="PrivatePublisher.GCDecision_V1EventMessage" value="DoCompact=%1;%nClrInstanceID=%2" />
+ <string id="PrivatePublisher.GCSettingsEventMessage" value="SegmentSize=%1;%nLargeObjectSegmentSize=%2;%nServerGC=%3"/>
+ <string id="PrivatePublisher.GCSettings_V1EventMessage" value="SegmentSize=%1;%nLargeObjectSegmentSize=%2;%nServerGC=%3;%nClrInstanceID=%4"/>
+ <string id="PrivatePublisher.GCOptimizedEventMessage" value="DesiredAllocation=%1;%nNewAllocation=%2;%nGenerationNumber=%3"/>
+ <string id="PrivatePublisher.GCOptimized_V1EventMessage" value="DesiredAllocation=%1;%nNewAllocation=%2;%nGenerationNumber=%3;%nClrInstanceID=%4"/>
+ <string id="PrivatePublisher.GCPerHeapHistoryEventMessage" value="NONE"/>
+ <string id="PrivatePublisher.GCPerHeapHistory_V1EventMessage" value="ClrInstanceID=%1"/>
+ <string id="PrivatePublisher.GCGlobalHeapEventMessage" value="FinalYoungestDesired=%1;%nNumHeaps=%2;%nCondemnedGeneration=%3;%nGen0ReductionCount=%4;%nReason=%5;%nGlobalMechanisms=%6"/>
+ <string id="PrivatePublisher.GCGlobalHeap_V1EventMessage" value="FinalYoungestDesired=%1;%nNumHeaps=%2;%nCondemnedGeneration=%3;%nGen0ReductionCount=%4;%nReason=%5;%nGlobalMechanisms=%6;%nClrInstanceID=%7"/>
+ <string id="PrivatePublisher.GCJoinEventMessage" value="Heap=%1;%nJoinTime=%2;%nJoinType=%3"/>
+ <string id="PrivatePublisher.GCJoin_V1EventMessage" value="Heap=%1;%nJoinTime=%2;%nJoinType=%3;%nClrInstanceID=%4"/>
+ <string id="PrivatePublisher.GCMarkStackRootsEventMessage" value="HeapNum=%1"/>
+ <string id="PrivatePublisher.GCMarkStackRoots_V1EventMessage" value="HeapNum=%1;%nClrInstanceID=%2"/>
+ <string id="PrivatePublisher.GCMarkFinalizeQueueRootsEventMessage" value="HeapNum=%1"/>
+ <string id="PrivatePublisher.GCMarkFinalizeQueueRoots_V1EventMessage" value="HeapNum=%1;%nClrInstanceID=%2"/>
+ <string id="PrivatePublisher.GCMarkHandlesEventMessage" value="HeapNum=%1"/>
+ <string id="PrivatePublisher.GCMarkHandles_V1EventMessage" value="HeapNum=%1;%nClrInstanceID=%2"/>
+ <string id="PrivatePublisher.GCMarkCardsEventMessage" value="HeapNum=%1"/>
+ <string id="PrivatePublisher.GCMarkCards_V1EventMessage" value="HeapNum=%1;%nClrInstanceID=%2"/>
+ <string id="PrivatePublisher.BGCBeginEventMessage" value="ClrInstanceID=%1"/>
+ <string id="PrivatePublisher.BGC1stNonConEndEventMessage" value="ClrInstanceID=%1"/>
+ <string id="PrivatePublisher.BGC1stConEndEventMessage" value="ClrInstanceID=%1"/>
+ <string id="PrivatePublisher.BGC2ndNonConBeginEventMessage" value="ClrInstanceID=%1"/>
+ <string id="PrivatePublisher.BGC2ndNonConEndEventMessage" value="ClrInstanceID=%1"/>
+ <string id="PrivatePublisher.BGC2ndConBeginEventMessage" value="ClrInstanceID=%1"/>
+ <string id="PrivatePublisher.BGC2ndConEndEventMessage" value="ClrInstanceID=%1"/>
+ <string id="PrivatePublisher.BGCPlanEndEventMessage" value="ClrInstanceID=%1"/>
+ <string id="PrivatePublisher.BGCSweepEndEventMessage" value="ClrInstanceID=%1"/>
+ <string id="PrivatePublisher.BGCDrainMarkEventMessage" value="Objects=%1;%nClrInstanceID=%2"/>
+ <string id="PrivatePublisher.BGCRevisitEventMessage" value="Pages=%1;%nObjects=%2;%nIsLarge=%3;%nClrInstanceID=%4"/>
+ <string id="PrivatePublisher.BGCOverflowEventMessage" value="Min=%1;%nMax=%2;%Objects=%3;%nIsLarge=%4;%nClrInstanceID=%5"/>
+ <string id="PrivatePublisher.BGCAllocWaitEventMessage" value="Reason=%1;%nClrInstanceID=%2"/>
+ <string id="PrivatePublisher.GCFullNotifyEventMessage" value="GenNumber=%1;%nIsAlloc=%2"/>
+ <string id="PrivatePublisher.GCFullNotify_V1EventMessage" value="GenNumber=%1;%nIsAlloc=%2;%nClrInstanceID=%3"/>
+ <string id="PrivatePublisher.StartupEventMessage" value="NONE"/>
+ <string id="PrivatePublisher.Startup_V1EventMessage" value="ClrInstanceID=%1"/>
+ <string id="PrivatePublisher.StackEventMessage" value="ClrInstanceID=%1;%nReserved1=%2;%nReserved2=%3;%nFrameCount=%4;%nStack=%5" />
+ <string id="PrivatePublisher.BindingEventMessage" value="%AppDomainID=%1;%nLoadContextID=%2;%nFromLoaderCache=%3;%nDynamicLoad=%4;%nAssemblyCodebase=%5;%nAssemblyName=%6;%nClrInstanceID=%6"/>
+ <string id="PrivatePublisher.EvidenceGeneratedEventMessage" value="EvidenceType=%1;%nAppDomainID=%2;%nILImage=%3;%nClrInstanceID=%4" />
+ <string id="PrivatePublisher.NgenBinderMessage" value="ClrInstanceID=%1;%nBindingID=%2;%nReason=%3;%nAssembly=%4" />
+ <string id="PrivatePublisher.FusionMessageEventMessage" value="ClrInstanceID=%1;%nMessage=%2;" />
+ <string id="PrivatePublisher.FusionErrorCodeEventMessage" value="ClrInstanceID=%1;%nCategory=%2%nErrorCode=%3" />
+ <string id="PrivatePublisher.ModuleTransparencyComputationStartEventMessage" value="Module=%1;%nAppDomainID=%2;%nClrInstanceID=%3" />
+ <string id="PrivatePublisher.ModuleTransparencyComputationEndEventMessage" value="Module=%1;%nAppDomainID=%2;%nIsAllCritical=%3;%nIsAllTransparent=%4;%nIsTreatAsSafe=%5;%nIsOpportunisticallyCritical=%6;%nSecurityRuleSet=%7;%nClrInstanceID=%8" />
+ <string id="PrivatePublisher.TypeTransparencyComputationStartEventMessage" value="Type=%1;%nModule=%2;%nAppDomainID=%3;%nClrInstanceID=%4" />
+ <string id="PrivatePublisher.TypeTransparencyComputationEndEventMessage" value="Type=%1;%nModule=%2;%nAppDomainID=%3;%nIsAllCritical=%4;%nIsAllTransparent=%5;%nIsCritical=%6;%nIsTreatAsSafe=%7;%nClrInstanceID=%8" />
+ <string id="PrivatePublisher.MethodTransparencyComputationStartEventMessage" value="Method=%1;%nModule=%2;%nAppDomainID=%3;%nClrInstanceID=%4" />
+ <string id="PrivatePublisher.MethodTransparencyComputationEndEventMessage" value="Method=%1;%nModule=%2;%nAppDomainID=%3;%nIsCritical=%4;%nIsTreatAsSafe=%5;%nClrInstanceID=%6" />
+ <string id="PrivatePublisher.FieldTransparencyComputationStartEventMessage" value="Field=%1;%nModule=%2;%nAppDomainID=%3;%nClrInstanceID=%4" />
+ <string id="PrivatePublisher.FieldTransparencyComputationEndEventMessage" value="Field=%1;%nModule=%2;%nAppDomainID=%3;%nIsCritical=%4;%nIsTreatAsSafe=%5;%nClrInstanceID=%6" />
+ <string id="PrivatePublisher.TokenTransparencyComputationStartEventMessage" value="Token=%1;%nModule=%2;%nAppDomainID=%3;%nClrInstanceID=%4" />
+ <string id="PrivatePublisher.TokenTransparencyComputationEndEventMessage" value="Token=%1;%nModule=%2;%nAppDomainID=%3;%nIsCritical=%4;%nIsTreatAsSafe=%5;%nClrInstanceID=%6" />
+
+ <string id="PrivatePublisher.AllocRequestEventMessage" value="LoaderHeapPtr=%1;%nMemoryAddress=%2;%nRequestSize=%3;%nClrInstanceID=%4" />
+ <string id="PrivatePublisher.ModuleRangeLoadEventMessage" value="ClrInstanceID=%1;%ModuleID=%2;%nRangeBegin=%3;%nRangeSize=%4;%nRangeType=%5;%nIBCType=%6;%nSectionType=%7" />
+ <string id="PrivatePublisher.MulticoreJitCommonEventMessage" value="ClrInstanceID=%1;%String1=%2;%nString2=%3;%nInt1=%4;%nInt2=%5;%nInt3=%6" />
+ <string id="PrivatePublisher.MulticoreJitMethodCodeReturnedMessage" value="ClrInstanceID=%1;%nModuleID=%2;%nMethodID=%3" />
+
+ <string id="PrivatePublisher.IInspectableRuntimeClassNameMessage" value="TypeName=%1;%nClrInstanceID=%2" />
+ <string id="PrivatePublisher.WinRTUnboxMessage" value="TypeName=%1;%nObject=%2;%nClrInstanceID=%3" />
+ <string id="PrivatePublisher.CreateRcwMessage" value="TypeName=%1;%nClrInstanceID=%2" />
+ <string id="PrivatePublisher.RcwVarianceMessage" value="RcwTypeName=%1;%nInterface=%2;%nVariantInterface=%3;%nClrInstanceID=%4" />
+ <string id="PrivatePublisher.RCWIEnumerableCastingMessage" value="TypeName=%1;%nSecondType=%2;%nClrInstanceID=%3" />
+ <string id="PrivatePublisher.CreateCCWMessage" value="TypeName=%1;%nClrInstanceID=%2" />
+ <string id="PrivatePublisher.CCWVarianceMessage" value="RcwTypeName=%1;%nInterface=%2;%nVariantInterface=%3;%nClrInstanceID=%4" />
+ <string id="PrivatePublisher.ObjectVariantMarshallingMessage" value="TypeName=%1;%nInt1=%2;%nClrInstanceID=%3" />
+ <string id="PrivatePublisher.GetTypeFromGUIDMessage" value="TypeName=%1;%nSecondType=%2;%nClrInstanceID=%3" />
+ <string id="PrivatePublisher.GetTypeFromProgIDMessage" value="TypeName=%1;%nSecondType=%2;%nClrInstanceID=%3" />
+ <string id="PrivatePublisher.ConvertToCallbackMessage" value="TypeName=%1;%nSecondType=%2;%nClrInstanceID=%3" />
+ <string id="PrivatePublisher.BeginCreateManagedReferenceMessage" value="ClrInstanceID=%1" />
+ <string id="PrivatePublisher.EndCreateManagedReferenceMessage" value="ClrInstanceID=%1" />
+
+ <!-- Task Messages -->
+ <string id="RuntimePublisher.GarbageCollectionTaskMessage" value="GC" />
+ <string id="RuntimePublisher.WorkerThreadCreationTaskMessage" value="WorkerThreadCreationV2" />
+ <string id="RuntimePublisher.WorkerThreadRetirementTaskMessage" value="WorkerThreadRetirementV2" />
+ <string id="RuntimePublisher.IOThreadCreationTaskMessage" value="IOThreadCreation" />
+ <string id="RuntimePublisher.IOThreadRetirementTaskMessage" value="IOThreadRetirement" />
+ <string id="RuntimePublisher.ThreadpoolSuspensionTaskMessage" value="ThreadpoolSuspensionV2" />
+ <string id="RuntimePublisher.ThreadPoolWorkerThreadTaskMessage" value="ThreadPoolWorkerThread" />
+ <string id="RuntimePublisher.ThreadPoolWorkerThreadRetirementTaskMessage" value="ThreadPoolWorkerThreadRetirement" />
+ <string id="RuntimePublisher.ThreadPoolWorkerThreadAdjustmentTaskMessage" value="ThreadPoolWorkerThreadAdjustment" />
+ <string id="RuntimePublisher.ExceptionTaskMessage" value="Exception" />
+ <string id="RuntimePublisher.ContentionTaskMessage" value="Contention" />
+ <string id="RuntimePublisher.MethodTaskMessage" value="Method" />
+ <string id="RuntimePublisher.LoaderTaskMessage" value="Loader" />
+ <string id="RuntimePublisher.StackTaskMessage" value="ClrStack" />
+ <string id="RuntimePublisher.StrongNameVerificationTaskMessage" value="StrongNameVerification" />
+ <string id="RuntimePublisher.AuthenticodeVerificationTaskMessage" value="AuthenticodeVerification" />
+ <string id="RuntimePublisher.AppDomainResourceManagementTaskMessage" value="AppDomainResourceManagement" />
+ <string id="RuntimePublisher.ILStubTaskMessage" value="ILStub" />
+ <string id="RuntimePublisher.EEStartupTaskMessage" value="Runtime" />
+ <string id="RuntimePublisher.PerfTrackTaskMessage" value="ClrPerfTrack" />
+ <string id="RuntimePublisher.TypeTaskMessage" value="Type" />
+ <string id="RuntimePublisher.ThreadPoolWorkingThreadCountTaskMessage" value="ThreadPoolWorkingThreadCount" />
+ <string id="RuntimePublisher.ThreadPoolTaskMessage" value="ThreadPool" />
+ <string id="RuntimePublisher.ThreadTaskMessage" value="Thread" />
+ <string id="RuntimePublisher.DebugIPCEventTaskMessage" value="DebugIPCEvent" />
+ <string id="RuntimePublisher.DebugExceptionProcessingTaskMessage" value="DebugExceptionProcessing" />
+ <string id="RundownPublisher.EEStartupTaskMessage" value="Runtime" />
+ <string id="RundownPublisher.MethodTaskMessage" value="Method" />
+ <string id="RundownPublisher.LoaderTaskMessage" value="Loader" />
+ <string id="RundownPublisher.StackTaskMessage" value="ClrStack" />
+ <string id="RundownPublisher.PerfTrackTaskMessage" value="ClrPerfTrack" />
+ <string id="PrivatePublisher.GarbageCollectionTaskMessage" value="GC" />
+ <string id="PrivatePublisher.StartupTaskMessage" value="Startup"/>
+ <string id="PrivatePublisher.StackTaskMessage" value="ClrStack" />
+ <string id="PrivatePublisher.BindingTaskMessage" value="Binding"/>
+ <string id="PrivatePublisher.EvidenceGeneratedTaskMessage" value="EvidenceGeneration"/>
+ <string id="PrivatePublisher.TransparencyComputationMessage" value="Transparency"/>
+ <string id="PrivatePublisher.NgenBinderTaskMessage" value="NgenBinder" />
+ <string id="PrivatePublisher.FailFastTaskMessage" value="FailFast" />
+
+ <string id="PrivatePublisher.LoaderHeapAllocationPrivateTaskMessage" value="LoaderHeap" />
+ <string id="PrivatePublisher.PerfTrackTaskMessage" value="ClrPerfTrack" />
+ <string id="PrivatePublisher.MulticoreJitTaskMessage" value="ClrMulticoreJit" />
+ <string id="PrivatePublisher.DynamicTypeUsageTaskMessage" value="ClrDynamicTypeUsage" />
+
+ <string id="StressPublisher.StressTaskMessage" value="StressLog" />
+ <string id="StressPublisher.StackTaskMessage" value="ClrStack" />
+
+ <!-- Map Messages -->
+ <string id="RuntimePublisher.AppDomain.DefaultMapMessage" value="Default" />
+ <string id="RuntimePublisher.AppDomain.ExecutableMapMessage" value="Executable" />
+ <string id="RuntimePublisher.AppDomain.SharedMapMessage" value="Shared" />
+ <string id="RuntimePublisher.Assembly.DomainNeutralMapMessage" value="DomainNeutral" />
+ <string id="RuntimePublisher.Assembly.DynamicMapMessage" value="Dynamic" />
+ <string id="RuntimePublisher.Assembly.NativeMapMessage" value="Native" />
+ <string id="RuntimePublisher.Assembly.CollectibleMapMessage" value="Collectible" />
+ <string id="RuntimePublisher.Module.DomainNeutralMapMessage" value="DomainNeutral" />
+ <string id="RuntimePublisher.Module.NativeMapMessage" value="Native" />
+ <string id="RuntimePublisher.Module.DynamicMapMessage" value="Dynamic" />
+ <string id="RuntimePublisher.Module.ManifestMapMessage" value="Manifest" />
+ <string id="RuntimePublisher.Method.DynamicMapMessage" value="Dynamic" />
+ <string id="RuntimePublisher.Method.GenericMapMessage" value="Generic" />
+ <string id="RuntimePublisher.Method.HasSharedGenericCodeMapMessage" value="HasSharedGenericCode" />
+ <string id="RuntimePublisher.Method.JittedMapMessage" value="Jitted" />
+ <string id="RuntimePublisher.GCSegment.SmallObjectHeapMapMessage" value="SmallObjectHeap" />
+ <string id="RuntimePublisher.GCSegment.LargeObjectHeapMapMessage" value="LargeObjectHeap" />
+ <string id="RuntimePublisher.GCSegment.ReadOnlyHeapMapMessage" value="ReadOnlyHeap" />
+ <string id="RuntimePublisher.GCAllocation.SmallMapMessage" value="Small" />
+ <string id="RuntimePublisher.GCAllocation.LargeMapMessage" value="Large" />
+ <string id="RuntimePublisher.GCType.NonConcurrentGCMapMessage" value="NonConcurrentGC" />
+ <string id="RuntimePublisher.GCType.BackgroundGCMapMessage" value="BackgroundGC" />
+ <string id="RuntimePublisher.GCType.ForegroundGCMapMessage" value="ForegroundGC" />
+ <string id="RuntimePublisher.GCReason.AllocSmallMapMessage" value="AllocSmall" />
+ <string id="RuntimePublisher.GCReason.InducedMapMessage" value="Induced" />
+ <string id="RuntimePublisher.GCReason.LowMemoryMapMessage" value="LowMemory" />
+ <string id="RuntimePublisher.GCReason.EmptyMapMessage" value="Empty" />
+ <string id="RuntimePublisher.GCReason.AllocLargeMapMessage" value="AllocLarge" />
+ <string id="RuntimePublisher.GCReason.OutOfSpaceSmallObjectHeapMapMessage" value="OutOfSpaceSmallObjectHeap" />
+ <string id="RuntimePublisher.GCReason.OutOfSpaceLargeObjectHeapMapMessage" value="OutOfSpaceLargeObjectHeap" />
+ <string id="RuntimePublisher.GCReason.InducedNoForceMapMessage" value="InducedNoForce" />
+ <string id="RuntimePublisher.GCReason.StressMapMessage" value="Stress" />
+ <string id="RuntimePublisher.GCReason.InducedLowMemoryMapMessage" value="InducedLowMemory" />
+ <string id="RuntimePublisher.GCSuspendEEReason.SuspendOtherMapMessage" value="SuspendOther" />
+ <string id="RuntimePublisher.GCSuspendEEReason.SuspendForGCMapMessage" value="SuspendForGC" />
+ <string id="RuntimePublisher.GCSuspendEEReason.SuspendForAppDomainShutdownMapMessage" value="SuspendForAppDomainShutdown" />
+ <string id="RuntimePublisher.GCSuspendEEReason.SuspendForCodePitchingMapMessage" value="SuspendForCodePitching" />
+ <string id="RuntimePublisher.GCSuspendEEReason.SuspendForShutdownMapMessage" value="SuspendForShutdown" />
+ <string id="RuntimePublisher.GCSuspendEEReason.SuspendForDebuggerMapMessage" value="SuspendForDebugger" />
+ <string id="RuntimePublisher.GCSuspendEEReason.SuspendForGCPrepMapMessage" value="SuspendForGCPrep" />
+ <string id="RuntimePublisher.GCSuspendEEReason.SuspendForDebuggerSweepMapMessage" value="SuspendForDebuggerSweep" />
+ <string id="RuntimePublisher.StartupMode.ManagedExeMapMessage" value="ManagedExe" />
+ <string id="RuntimePublisher.StartupMode.HostedCLRMapMessage" value="HostedClr" />
+ <string id="RuntimePublisher.StartupMode.IjwDllMapMessage" value="IjwDll" />
+ <string id="RuntimePublisher.StartupMode.ComActivatedMapMessage" value="ComActivated" />
+ <string id="RuntimePublisher.StartupMode.OtherMapMessage" value="Other" />
+ <string id="RuntimePublisher.RuntimeSku.DesktopCLRMapMessage" value="DesktopClr" />
+ <string id="RuntimePublisher.RuntimeSku.CoreCLRMapMessage" value="CoreClr" />
+ <string id="RuntimePublisher.ExceptionThrown.HasInnerExceptionMapMessage" value="HasInnerException" />
+ <string id="RuntimePublisher.ExceptionThrown.NestedMapMessage" value="Nested" />
+ <string id="RuntimePublisher.ExceptionThrown.ReThrownMapMessage" value="ReThrown" />
+ <string id="RuntimePublisher.ExceptionThrown.CorruptedStateMapMessage" value="CorruptedState" />
+ <string id="RuntimePublisher.ExceptionThrown.CLSCompliantMapMessage" value="CLSCompliant" />
+ <string id="RuntimePublisher.ILStubGenerated.ReverseInteropMapMessage" value="ReverseInterop" />
+ <string id="RuntimePublisher.ILStubGenerated.COMInteropMapMessage" value="ComInterop" />
+ <string id="RuntimePublisher.ILStubGenerated.NGenedStubMapMessage" value="NGenedStub" />
+ <string id="RuntimePublisher.ILStubGenerated.DelegateMapMessage" value="Delegate" />
+ <string id="RuntimePublisher.ILStubGenerated.VarArgMapMessage" value="VarArg" />
+ <string id="RuntimePublisher.ILStubGenerated.UnmanagedCalleeMapMessage" value="UnmanagedCallee" />
+ <string id="RuntimePublisher.Contention.ManagedMapMessage" value="Managed" />
+ <string id="RuntimePublisher.Contention.NativeMapMessage" value="Native" />
+ <string id="RuntimePublisher.TailCallType.OptimizedMapMessage" value="OptimizedTailCall" />
+ <string id="RuntimePublisher.TailCallType.RecursiveMapMessage" value="RecursiveLoop" />
+ <string id="RuntimePublisher.TailCallType.HelperMapMessage" value="HelperAssistedTailCall" />
+ <string id="RuntimePublisher.ThreadAdjustmentReason.WarmupMapMessage" value="Warmup" />
+ <string id="RuntimePublisher.ThreadAdjustmentReason.InitializingMapMessage" value="Initializing" />
+ <string id="RuntimePublisher.ThreadAdjustmentReason.RandomMoveMapMessage" value="RandomMove" />
+ <string id="RuntimePublisher.ThreadAdjustmentReason.ClimbingMoveMapMessage" value="ClimbingMove" />
+ <string id="RuntimePublisher.ThreadAdjustmentReason.ChangePointMapMessage" value="ChangePoint" />
+ <string id="RuntimePublisher.ThreadAdjustmentReason.StabilizingMapMessage" value="Stabilizing" />
+ <string id="RuntimePublisher.ThreadAdjustmentReason.StarvationMapMessage" value="Starvation" />
+ <string id="RuntimePublisher.ThreadAdjustmentReason.ThreadTimedOutMapMessage" value="ThreadTimedOut" />
+ <string id="RuntimePublisher.GCRootKind.Stack" value="Stack" />
+ <string id="RuntimePublisher.GCRootKind.Finalizer" value="Finalizer" />
+ <string id="RuntimePublisher.GCRootKind.Handle" value="Handle" />
+ <string id="RuntimePublisher.GCRootKind.Other" value="Other" />
+ <string id="RuntimePublisher.Startup.CONCURRENT_GCMapMessage" value="CONCURRENT_GC" />
+ <string id="RuntimePublisher.Startup.LOADER_OPTIMIZATION_SINGLE_DOMAINMapMessage" value="LOADER_OPTIMIZATION_SINGLE_DOMAIN" />
+ <string id="RuntimePublisher.Startup.LOADER_OPTIMIZATION_MULTI_DOMAINMapMessage" value="LOADER_OPTIMIZATION_MULTI_DOMAIN" />
+ <string id="RuntimePublisher.Startup.LOADER_SAFEMODEMapMessage" value="LOADER_SAFEMODE" />
+ <string id="RuntimePublisher.Startup.LOADER_SETPREFERENCEMapMessage" value="LOADER_SETPREFERENCE" />
+ <string id="RuntimePublisher.Startup.SERVER_GCMapMessage" value="SERVER_GC" />
+ <string id="RuntimePublisher.Startup.HOARD_GC_VMMapMessage" value="HOARD_GC_VM" />
+ <string id="RuntimePublisher.Startup.SINGLE_VERSION_HOSTING_INTERFACEMapMessage" value="SINGLE_VERSION_HOSTING_INTERFACE" />
+ <string id="RuntimePublisher.Startup.LEGACY_IMPERSONATIONMapMessage" value="LEGACY_IMPERSONATION" />
+ <string id="RuntimePublisher.Startup.DISABLE_COMMITTHREADSTACKMapMessage" value="DISABLE_COMMITTHREADSTACK" />
+ <string id="RuntimePublisher.Startup.ALWAYSFLOW_IMPERSONATIONMapMessage" value="ALWAYSFLOW_IMPERSONATION" />
+ <string id="RuntimePublisher.Startup.TRIM_GC_COMMITMapMessage" value="TRIM_GC_COMMIT" />
+ <string id="RuntimePublisher.Startup.ETWMapMessage" value="ETW" />
+ <string id="RuntimePublisher.Startup.SERVER_BUILDMapMessage" value="SERVER_BUILD" />
+ <string id="RuntimePublisher.Startup.ARMMapMessage" value="ARM" />
+ <string id="RuntimePublisher.ModuleRangeTypeMap.ColdRangeMessage" value="ColdRange"/>
+ <string id="RuntimePublisher.TypeFlags.Delegate" value="Delegate"/>
+ <string id="RuntimePublisher.TypeFlags.Finalizable" value="Finalizable"/>
+ <string id="RuntimePublisher.TypeFlags.ExternallyImplementedCOMObject" value="ExternallyImplementedCOMObject"/>
+ <string id="RuntimePublisher.TypeFlags.Array" value="Array"/>
+ <string id="RuntimePublisher.GCRootFlags.Pinning" value="Pinning"/>
+ <string id="RuntimePublisher.GCRootFlags.WeakRef" value="WeakRef"/>
+ <string id="RuntimePublisher.GCRootFlags.Interior" value="Interior"/>
+ <string id="RuntimePublisher.GCRootFlags.RefCounted" value="RefCounted"/>
+ <string id="RuntimePublisher.GCRootStaticVarFlags.ThreadLocal" value="ThreadLocal"/>
+ <string id="RuntimePublisher.GCRootCCWFlags.Strong" value="Strong"/>
+ <string id="RuntimePublisher.GCRootCCWFlags.Pegged" value="Pegged"/>
+ <string id="RundownPublisher.AppDomain.DefaultMapMessage" value="Default" />
+ <string id="RuntimePublisher.ThreadFlags.GCSpecial" value="GCSpecial"/>
+ <string id="RuntimePublisher.ThreadFlags.Finalizer" value="Finalizer"/>
+ <string id="RuntimePublisher.ThreadFlags.ThreadPoolWorker" value="ThreadPoolWorker"/>
+ <string id="RuntimePublisher.GCHandleKind.WeakShortMessage" value="WeakShort" />
+ <string id="RuntimePublisher.GCHandleKind.WeakLongMessage" value="WeakLong" />
+ <string id="RuntimePublisher.GCHandleKind.StrongMessage" value="Strong" />
+ <string id="RuntimePublisher.GCHandleKind.PinnedMessage" value="Pinned" />
+ <string id="RuntimePublisher.GCHandleKind.VariableMessage" value="Variable" />
+ <string id="RuntimePublisher.GCHandleKind.RefCountedMessage" value="RefCounted" />
+ <string id="RuntimePublisher.GCHandleKind.DependentMessage" value="Dependent" />
+ <string id="RuntimePublisher.GCHandleKind.AsyncPinnedMessage" value="AsyncPinned" />
+ <string id="RuntimePublisher.GCHandleKind.SizedRefMessage" value="SizedRef" />
+ <string id="RundownPublisher.AppDomain.ExecutableMapMessage" value="Executable" />
+ <string id="RundownPublisher.AppDomain.SharedMapMessage" value="Shared" />
+ <string id="RundownPublisher.Assembly.DomainNeutralMapMessage" value="DomainNeutral" />
+ <string id="RundownPublisher.Assembly.DynamicMapMessage" value="Dynamic" />
+ <string id="RundownPublisher.Assembly.NativeMapMessage" value="Native" />
+ <string id="RundownPublisher.Assembly.CollectibleMapMessage" value="Collectible" />
+ <string id="RundownPublisher.Module.DomainNeutralMapMessage" value="DomainNeutral" />
+ <string id="RundownPublisher.Module.NativeMapMessage" value="Native" />
+ <string id="RundownPublisher.Module.DynamicMapMessage" value="Dynamic" />
+ <string id="RundownPublisher.Module.ManifestMapMessage" value="Manifest" />
+ <string id="RundownPublisher.Method.DynamicMapMessage" value="Dynamic" />
+ <string id="RundownPublisher.Method.GenericMapMessage" value="Generic" />
+ <string id="RundownPublisher.Method.HasSharedGenericCodeMapMessage" value="HasSharedGenericCode" />
+ <string id="RundownPublisher.Method.JittedMapMessage" value="Jitted" />
+ <string id="RundownPublisher.StartupMode.ManagedExeMapMessage" value="ManagedExe" />
+ <string id="RundownPublisher.StartupMode.HostedCLRMapMessage" value="HostedClr" />
+ <string id="RundownPublisher.StartupMode.IjwDllMapMessage" value="IjwDll" />
+ <string id="RundownPublisher.StartupMode.ComActivatedMapMessage" value="ComActivated" />
+ <string id="RundownPublisher.StartupMode.OtherMapMessage" value="Other" />
+ <string id="RundownPublisher.RuntimeSku.DesktopCLRMapMessage" value="DesktopClr" />
+ <string id="RundownPublisher.RuntimeSku.CoreCLRMapMessage" value="CoreClr" />
+ <string id="RundownPublisher.Startup.CONCURRENT_GCMapMessage" value="CONCURRENT_GC" />
+ <string id="RundownPublisher.Startup.LOADER_OPTIMIZATION_SINGLE_DOMAINMapMessage" value="LOADER_OPTIMIZATION_SINGLE_DOMAIN" />
+ <string id="RundownPublisher.Startup.LOADER_OPTIMIZATION_MULTI_DOMAINMapMessage" value="LOADER_OPTIMIZATION_MULTI_DOMAIN" />
+ <string id="RundownPublisher.Startup.LOADER_SAFEMODEMapMessage" value="LOADER_SAFEMODE" />
+ <string id="RundownPublisher.Startup.LOADER_SETPREFERENCEMapMessage" value="LOADER_SETPREFERENCE" />
+ <string id="RundownPublisher.Startup.SERVER_GCMapMessage" value="SERVER_GC" />
+ <string id="RundownPublisher.Startup.HOARD_GC_VMMapMessage" value="HOARD_GC_VM" />
+ <string id="RundownPublisher.Startup.SINGLE_VERSION_HOSTING_INTERFACEMapMessage" value="SINGLE_VERSION_HOSTING_INTERFACE" />
+ <string id="RundownPublisher.Startup.LEGACY_IMPERSONATIONMapMessage" value="LEGACY_IMPERSONATION" />
+ <string id="RundownPublisher.Startup.DISABLE_COMMITTHREADSTACKMapMessage" value="DISABLE_COMMITTHREADSTACK" />
+ <string id="RundownPublisher.Startup.ALWAYSFLOW_IMPERSONATIONMapMessage" value="ALWAYSFLOW_IMPERSONATION" />
+ <string id="RundownPublisher.Startup.TRIM_GC_COMMITMapMessage" value="TRIM_GC_COMMIT" />
+ <string id="RundownPublisher.Startup.ETWMapMessage" value="ETW" />
+ <string id="RundownPublisher.Startup.SERVER_BUILDMapMessage" value="SERVER_BUILD" />
+ <string id="RundownPublisher.Startup.ARMMapMessage" value="ARM" />
+ <string id="RundownPublisher.ModuleRangeTypeMap.ColdRangeMessage" value="ColdRange"/>
+ <string id="RundownPublisher.ThreadFlags.GCSpecial" value="GCSpecial"/>
+ <string id="RundownPublisher.ThreadFlags.Finalizer" value="Finalizer"/>
+ <string id="RundownPublisher.ThreadFlags.ThreadPoolWorker" value="ThreadPoolWorker"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.ModuleSection" value="ModuleSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.EETableSection" value="EETableSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.WriteDataSection" value="WriteDataSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.WriteableDataSection" value="WriteableDataSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.DataSection" value="DataSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.RVAStaticsSection" value="RVAStaticsSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.EEDataSection" value="EEDataSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.DelayLoadInfoTableEagerSection" value="DelayLoadInfoTableEagerSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.DelayLoadInfoTableSection" value="DelayLoadInfoTableSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.EEReadonlyData" value="EEReadonlyData"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.ReadonlyData" value="ReadonlyData"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.ClassSection" value="ClassSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.CrossDomainInfoSection" value="CrossDomainInfoSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.MethodDescSection" value="MethodDescSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.MethodDescWriteableSection" value="MethodDescWriteableSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.ExceptionSection" value="ExceptionSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.InstrumentSection" value="InstrumentSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.VirtualImportThunkSection" value="VirtualImportThunkSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.ExternalMethodThunkSection" value="ExternalMethodThunkSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.HelperTableSection" value="HelperTableSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.MethodPrecodeWriteableSection" value="MethodPrecodeWriteableSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.MethodPrecodeWriteSection" value="MethodPrecodeWriteSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.MethodPrecodeSection" value="MethodPrecodeSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.Win32ResourcesSection" value="Win32ResourcesSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.HeaderSection" value="HeaderSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.MetadataSection" value="MetadataSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.DelayLoadInfoSection" value="DelayLoadInfoSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.ImportTableSection" value="ImportTableSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.CodeSection" value="CodeSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.CodeHeaderSection" value="CodeHeaderSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.CodeManagerSection" value="CodeManagerSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.UnwindDataSection" value="UnwindDataSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.RuntimeFunctionSection" value="RuntimeFunctionSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.StubsSection" value="StubsSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.StubDispatchDataSection" value="StubDispatchDataSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.ExternalMethodDataSection" value="ExternalMethodDataSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.DelayLoadInfoDelayListSection" value="DelayLoadInfoDelayListSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.ReadonlySharedSection" value="ReadonlySharedSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.ReadonlySection" value="ReadonlySection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.ILSection" value="ILSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.GCInfoSection" value="GCInfoSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.ILMetadataSection" value="ILMetadataSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.ResourcesSection" value="ResourcesSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.CompressedMapsSection" value="CompressedMapsSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.DebugSection" value="DebugSection"/>
+ <string id="PrivatePublisher.ModuleRangeSectionTypeMap.BaseRelocsSection" value="BaseRelocsSection"/>
+ <string id="PrivatePublisher.ModuleRangeIBCTypeMap.IBCUnprofiledSectionMessage" value="IBCUnprofiledSection"/>
+ <string id="PrivatePublisher.ModuleRangeIBCTypeMap.IBCProfiledSectionMessage" value="IBCProfiledSection"/>
+ <string id="PrivatePublisher.ModuleRangeTypeMap.HotRangeMessage" value="HotRange"/>
+ <string id="PrivatePublisher.ModuleRangeTypeMap.WarmRangeMessage" value="WarmRange"/>
+ <string id="PrivatePublisher.ModuleRangeTypeMap.ColdRangeMessage" value="ColdRange"/>
+ <string id="PrivatePublisher.ModuleRangeTypeMap.HotColdRangeMessage" value="HotColdSortedRange"/>
+ <string id="PrivatePublisher.GCHandleKind.WeakShortMessage" value="WeakShort" />
+ <string id="PrivatePublisher.GCHandleKind.WeakLongMessage" value="WeakLong" />
+ <string id="PrivatePublisher.GCHandleKind.StrongMessage" value="Strong" />
+ <string id="PrivatePublisher.GCHandleKind.PinnedMessage" value="Pinned" />
+ <string id="PrivatePublisher.GCHandleKind.VariableMessage" value="Variable" />
+ <string id="PrivatePublisher.GCHandleKind.RefCountedMessage" value="RefCounted" />
+ <string id="PrivatePublisher.GCHandleKind.DependentMessage" value="Dependent" />
+ <string id="PrivatePublisher.GCHandleKind.AsyncPinnedMessage" value="AsyncPinned" />
+ <string id="PrivatePublisher.GCHandleKind.SizedRefMessage" value="SizedRef" />
+
+ <!-- Keyword Messages -->
+ <string id="RuntimePublisher.GCKeywordMessage" value="GC" />
+ <string id="RuntimePublisher.ThreadingKeywordMessage" value="Threading" />
+ <string id="RuntimePublisher.FusionKeywordMessage" value="Binder" />
+ <string id="RuntimePublisher.LoaderKeywordMessage" value="Loader" />
+ <string id="RuntimePublisher.JitKeywordMessage" value="Jit" />
+ <string id="RuntimePublisher.JittedMethodILToNativeMapKeywordMessage" value="JittedMethodILToNativeMap" />
+ <string id="RuntimePublisher.NGenKeywordMessage" value="NGen" />
+ <string id="RuntimePublisher.StartEnumerationKeywordMessage" value="StartEnumeration" />
+ <string id="RuntimePublisher.EndEnumerationKeywordMessage" value="StopEnumeration" />
+ <string id="RuntimePublisher.SecurityKeywordMessage" value="Security" />
+ <string id="RuntimePublisher.AppDomainResourceManagementKeywordMessage" value="AppDomainResourceManagement" />
+ <string id="RuntimePublisher.InteropKeywordMessage" value="Interop" />
+ <string id="RuntimePublisher.ContentionKeywordMessage" value="Contention" />
+ <string id="RuntimePublisher.ExceptionKeywordMessage" value="Exception" />
+ <string id="RuntimePublisher.PerfTrackKeywordMessage" value="PerfTrack" />
+ <string id="RuntimePublisher.StackKeywordMessage" value="Stack" />
+ <string id="RuntimePublisher.JitTracingKeywordMessage" value="JitTracing" />
+ <string id="RuntimePublisher.OverrideAndSuppressNGenEventsKeywordMessage" value="OverrideAndSuppressNGenEvents" />
+ <string id="RuntimePublisher.TypeKeywordMessage" value="Type" />
+ <string id="RuntimePublisher.GCHeapDumpKeywordMessage" value="GCHeapDump" />
+ <string id="RuntimePublisher.GCSampledObjectAllocationHighKeywordMessage" value="GCSampledObjectAllocationHigh" />
+ <string id="RuntimePublisher.GCSampledObjectAllocationLowKeywordMessage" value="GCSampledObjectAllocationLow" />
+ <string id="RuntimePublisher.GCHeapSurvivalAndMovementKeywordMessage" value="GCHeapSurvivalAndMovement" />
+ <string id="RuntimePublisher.GCHeapCollectKeyword" value="GCHeapCollect" />
+ <string id="RuntimePublisher.GCHeapAndTypeNamesKeyword" value="GCHeapAndTypeNames" />
+ <string id="RuntimePublisher.GCHandleKeywordMessage" value="GCHandle" />
+ <string id="RuntimePublisher.ThreadTransferKeywordMessage" value="ThreadTransfer" />
+ <string id="RuntimePublisher.DebuggerKeywordMessage" value="Debugger" />
+ <string id="RundownPublisher.LoaderKeywordMessage" value="Loader" />
+ <string id="RundownPublisher.JitKeywordMessage" value="Jit" />
+ <string id="RundownPublisher.JittedMethodILToNativeMapRundownKeywordMessage" value="JittedMethodILToNativeMapRundown" />
+ <string id="RundownPublisher.NGenKeywordMessage" value="NGen" />
+ <string id="RundownPublisher.StartRundownKeywordMessage" value="Start" />
+ <string id="RundownPublisher.EndRundownKeywordMessage" value="End" />
+ <string id="RuntimePublisher.AppDomainResourceManagementRundownKeywordMessage" value="AppDomainResourceManagement" />
+ <string id="RundownPublisher.ThreadingKeywordMessage" value="Threading" />
+ <string id="RundownPublisher.OverrideAndSuppressNGenEventsRundownKeywordMessage" value="OverrideAndSuppressNGenEvents" />
+ <string id="RundownPublisher.PerfTrackRundownKeywordMessage" value="PerfTrack" />
+ <string id="RundownPublisher.StackKeywordMessage" value="Stack" />
+ <string id="PrivatePublisher.GCPrivateKeywordMessage" value="GC" />
+ <string id="PrivatePublisher.StartupKeywordMessage" value="Startup" />
+ <string id="PrivatePublisher.StackKeywordMessage" value="Stack" />
+ <string id="PrivatePublisher.BindingKeywordMessage" value="Binding" />
+ <string id="PrivatePublisher.NGenForceRestoreKeywordMessage" value="NGenForceRestore" />
+ <string id="PrivatePublisher.SecurityPrivateKeywordMessage" value="Security" />
+ <string id="PrivatePublisher.PrivateFusionKeywordMessage" value="Fusion" />
+ <string id="PrivatePublisher.LoaderHeapPrivateKeywordMessage" value="LoaderHeap" />
+ <string id="PrivatePublisher.PerfTrackKeywordMessage" value="PerfTrack" />
+ <string id="PrivatePublisher.DynamicTypeUsageMessage" value="DynamicTypeUsage" />
+ <string id="PrivatePublisher.MulticoreJitPrivateKeywordMessage" value="MulticoreJit" />
+ <string id="PrivatePublisher.InteropPrivateKeywordMessage" value="Interop" />
+ <string id="PrivatePublisher.GCHandlePrivateKeywordMessage" value="GCHandle" />
+
+ <string id="StressPublisher.StackKeywordMessage" value="Stack" />
+
+ <!-- Opcode messages -->
+ <string id="RuntimePublisher.GCRestartEEEndOpcodeMessage" value="RestartEEStop" />
+ <string id="RuntimePublisher.GCHeapStatsOpcodeMessage" value="HeapStats" />
+ <string id="RuntimePublisher.GCCreateSegmentOpcodeMessage" value="CreateSegment" />
+ <string id="RuntimePublisher.GCFreeSegmentOpcodeMessage" value="FreeSegment" />
+ <string id="RuntimePublisher.GCRestartEEBeginOpcodeMessage" value="RestartEEStart" />
+ <string id="RuntimePublisher.GCSuspendEEEndOpcodeMessage" value="SuspendEEStop" />
+ <string id="RuntimePublisher.GCSuspendEEBeginOpcodeMessage" value="SuspendEEStart" />
+ <string id="RuntimePublisher.GCAllocationTickOpcodeMessage" value="AllocationTick" />
+ <string id="RuntimePublisher.GCCreateConcurrentThreadOpcodeMessage" value="CreateConcurrentThread" />
+ <string id="RuntimePublisher.GCTerminateConcurrentThreadOpcodeMessage" value="TerminateConcurrentThread" />
+ <string id="RuntimePublisher.GCFinalizersEndOpcodeMessage" value="FinalizersStop" />
+ <string id="RuntimePublisher.GCFinalizersBeginOpcodeMessage" value="FinalizersStart" />
+ <string id="RuntimePublisher.GCBulkRootEdgeOpcodeMessage" value="GCBulkRootEdge" />
+ <string id="RuntimePublisher.GCBulkRootCCWOpcodeMessage" value="GCBulkRootCCW" />
+ <string id="RuntimePublisher.GCBulkRCWOpcodeMessage" value="GCBulkRCW" />
+ <string id="RuntimePublisher.GCBulkRootStaticVarOpcodeMessage" value="GCBulkRootStaticVar" />
+ <string id="RuntimePublisher.GCBulkRootConditionalWeakTableElementEdgeOpcodeMessage" value="GCBulkRootConditionalWeakTableElementEdge" />
+ <string id="RuntimePublisher.GCBulkNodeOpcodeMessage" value="GCBulkNode" />
+ <string id="RuntimePublisher.GCBulkEdgeOpcodeMessage" value="GCBulkEdge" />
+ <string id="RuntimePublisher.GCSampledObjectAllocationOpcodeMessage" value="GCSampledObjectAllocation" />
+ <string id="RuntimePublisher.GCBulkSurvivingObjectRangesOpcodeMessage" value="GCBulkSurvivingObjectRanges" />
+ <string id="RuntimePublisher.GCBulkMovedObjectRangesOpcodeMessage" value="GCBulkMovedObjectRanges" />
+ <string id="RuntimePublisher.GCGenerationRangeOpcodeMessage" value="GCGenerationRange" />
+ <string id="RuntimePublisher.GCMarkStackRootsOpcodeMessage" value="MarkStackRoots" />
+ <string id="RuntimePublisher.GCMarkHandlesOpcodeMessage" value="MarkHandles" />
+ <string id="RuntimePublisher.GCMarkFinalizeQueueRootsOpcodeMessage" value="MarkFinalizeQueueRoots" />
+ <string id="RuntimePublisher.GCMarkOlderGenerationRootsOpcodeMessage" value="MarkCards" />
+ <string id="RuntimePublisher.FinalizeObjectOpcodeMessage" value="FinalizeObject" />
+ <string id="RuntimePublisher.BulkTypeOpcodeMessage" value="BulkType" />
+ <string id="RuntimePublisher.MethodLoadOpcodeMessage" value="Load" />
+ <string id="RuntimePublisher.MethodUnloadOpcodeMessage" value="Unload" />
+ <string id="RuntimePublisher.MethodLoadVerboseOpcodeMessage" value="LoadVerbose" />
+ <string id="RuntimePublisher.MethodUnloadVerboseOpcodeMessage" value="UnloadVerbose" />
+ <string id="RuntimePublisher.DCStartCompleteOpcodeMessage" value="DCStartCompleteV2" />
+ <string id="RuntimePublisher.DCEndCompleteOpcodeMessage" value="DCEndCompleteV2" />
+ <string id="RuntimePublisher.MethodDCStartOpcodeMessage" value="DCStartV2" />
+ <string id="RuntimePublisher.MethodDCEndOpcodeMessage" value="DCStopV2" />
+ <string id="RuntimePublisher.MethodDCStartVerboseOpcodeMessage" value="DCStartVerboseV2" />
+ <string id="RuntimePublisher.MethodDCEndVerboseOpcodeMessage" value="DCStopVerboseV2" />
+ <string id="RuntimePublisher.MethodJittingStartedOpcodeMessage" value="JittingStarted" />
+ <string id="RuntimePublisher.JitInliningSucceededOpcodeMessage" value="InliningSucceeded" />
+ <string id="RuntimePublisher.JitInliningFailedOpcodeMessage" value="InliningFailed" />
+ <string id="RuntimePublisher.JitTailCallSucceededOpcodeMessage" value="TailCallSucceeded" />
+ <string id="RuntimePublisher.JitTailCallFailedOpcodeMessage" value="TailCallFailed" />
+ <string id="RuntimePublisher.MethodILToNativeMapOpcodeMessage" value="MethodILToNativeMap" />
+ <string id="RuntimePublisher.DomainModuleLoadOpcodeMessage" value="DomainModuleLoad" />
+ <string id="RuntimePublisher.ModuleLoadOpcodeMessage" value="ModuleLoad" />
+ <string id="RuntimePublisher.ModuleUnloadOpcodeMessage" value="ModuleUnload" />
+ <string id="RuntimePublisher.ModuleDCStartOpcodeMessage" value="ModuleDCStartV2" />
+ <string id="RuntimePublisher.ModuleDCEndOpcodeMessage" value="ModuleDCStopV2" />
+ <string id="RuntimePublisher.AssemblyLoadOpcodeMessage" value="AssemblyLoad" />
+ <string id="RuntimePublisher.AssemblyUnloadOpcodeMessage" value="AssemblyUnload" />
+ <string id="RuntimePublisher.AppDomainLoadOpcodeMessage" value="AppDomainLoad" />
+ <string id="RuntimePublisher.AppDomainUnloadOpcodeMessage" value="AppDomainUnload" />
+ <string id="RuntimePublisher.CLRStackWalkOpcodeMessage" value="Walk" />
+ <string id="RuntimePublisher.AppDomainMemAllocatedOpcodeMessage" value="MemAllocated" />
+ <string id="RuntimePublisher.AppDomainMemSurvivedOpcodeMessage" value="MemSurvived" />
+ <string id="RuntimePublisher.ThreadCreatedOpcodeMessage" value="ThreadCreated" />
+ <string id="RuntimePublisher.ThreadTerminatedOpcodeMessage" value="ThreadTerminated" />
+ <string id="RuntimePublisher.ThreadDomainEnterOpcodeMessage" value="DomainEnter" />
+ <string id="RuntimePublisher.ILStubGeneratedOpcodeMessage" value="StubGenerated" />
+ <string id="RuntimePublisher.ILStubCacheHitOpcodeMessage" value="StubCacheHit" />
+ <string id="RuntimePublisher.WaitOpcodeMessage" value="Wait" />
+ <string id="RuntimePublisher.SampleOpcodeMessage" value="Sample" />
+ <string id="RuntimePublisher.AdjustmentOpcodeMessage" value="Adjustment" />
+ <string id="RuntimePublisher.StatsOpcodeMessage" value="Stats" />
+ <string id="RuntimePublisher.ModuleRangeLoadOpcodeMessage" value="ModuleRangeLoad" />
+ <string id="RuntimePublisher.SetGCHandleOpcodeMessage" value="SetGCHandle" />
+ <string id="RuntimePublisher.DestroyGCHandleOpcodeMessage" value="DestoryGCHandle" />
+ <string id="RuntimePublisher.TriggeredOpcodeMessage" value="Triggered" />
+ <string id="RuntimePublisher.PinObjectAtGCTimeOpcodeMessage" value="PinObjectAtGCTime" />
+ <string id="RuntimePublisher.IncreaseMemoryPressureOpcodeMessage" value="IncreaseMemoryPressure" />
+ <string id="RuntimePublisher.DecreaseMemoryPressureOpcodeMessage" value="DecreaseMemoryPressure" />
+
+ <string id="RuntimePublisher.EnqueueOpcodeMessage" value="Enqueue" />
+ <string id="RuntimePublisher.DequeueOpcodeMessage" value="Dequeue" />
+ <string id="RuntimePublisher.IOEnqueueOpcodeMessage" value="IOEnqueue" />
+ <string id="RuntimePublisher.IODequeueOpcodeMessage" value="IODequeue" />
+ <string id="RuntimePublisher.IOPackOpcodeMessage" value="IOPack" />
+
+ <string id="RuntimePublisher.ThreadCreatingOpcodeMessage" value="Creating" />
+ <string id="RuntimePublisher.ThreadRunningOpcodeMessage" value="Running" />
+
+ <string id="RuntimePublisher.DebugIPCEventStartOpcodeMessage" value="IPCEventStart" />
+ <string id="RuntimePublisher.DebugIPCEventEndOpcodeMessage" value="IPCEventEnd" />
+ <string id="RuntimePublisher.DebugExceptionProcessingStartOpcodeMessage" value="ExceptionProcessingStart" />
+ <string id="RuntimePublisher.DebugExceptionProcessingEndOpcodeMessage" value="ExceptionProcessingEnd" />
+
+ <string id="RundownPublisher.MethodDCStartOpcodeMessage" value="DCStart" />
+ <string id="RundownPublisher.MethodDCEndOpcodeMessage" value="DCStop" />
+ <string id="RundownPublisher.MethodDCStartVerboseOpcodeMessage" value="DCStartVerbose" />
+ <string id="RundownPublisher.MethodDCEndVerboseOpcodeMessage" value="DCStopVerbose" />
+ <string id="RundownPublisher.MethodDCStartILToNativeMapOpcodeMessage" value="MethodDCStartILToNativeMap" />
+ <string id="RundownPublisher.MethodDCEndILToNativeMapOpcodeMessage" value="MethodDCEndILToNativeMap" />
+ <string id="RundownPublisher.DCStartCompleteOpcodeMessage" value="DCStartComplete" />
+ <string id="RundownPublisher.DCEndCompleteOpcodeMessage" value="DCStopComplete" />
+ <string id="RundownPublisher.DCStartInitOpcodeMessage" value="DCStartInit" />
+ <string id="RundownPublisher.DCEndInitOpcodeMessage" value="DCStopInit" />
+ <string id="RundownPublisher.ModuleDCStartOpcodeMessage" value="ModuleDCStart" />
+ <string id="RundownPublisher.ModuleDCEndOpcodeMessage" value="ModuleDCStop" />
+ <string id="RundownPublisher.AssemblyDCStartOpcodeMessage" value="AssemblyDCStart" />
+ <string id="RundownPublisher.AssemblyDCEndOpcodeMessage" value="AssemblyDCStop" />
+ <string id="RundownPublisher.AppDomainDCStartOpcodeMessage" value="AppDomainDCStart" />
+ <string id="RundownPublisher.AppDomainDCEndOpcodeMessage" value="AppDomainDCStop" />
+ <string id="RundownPublisher.DomainModuleDCStartOpcodeMessage" value="DomainModuleDCStart" />
+ <string id="RundownPublisher.DomainModuleDCEndOpcodeMessage" value="DomainModuleDCStop" />
+ <string id="RundownPublisher.ThreadDCOpcodeMessage" value="ThreadDCStop" />
+ <string id="RundownPublisher.CLRStackWalkOpcodeMessage" value="Walk" />
+ <string id="RundownPublisher.ModuleRangeDCStartOpcodeMessage" value="ModuleRangeDCStart" />
+ <string id="RundownPublisher.ModuleRangeDCEndOpcodeMessage" value="ModuleRangeDCEnd" />
+ <string id="PrivatePublisher.FailFastOpcodeMessage" value="FailFast" />
+
+
+ <string id="PrivatePublisher.GCDecisionOpcodeMessage" value="Decision" />
+ <string id="PrivatePublisher.GCSettingsOpcodeMessage" value="Settings" />
+ <string id="PrivatePublisher.GCOptimizedOpcodeMessage" value="Optimized" />
+ <string id="PrivatePublisher.GCPerHeapHistoryOpcodeMessage" value="PerHeapHistory" />
+ <string id="PrivatePublisher.GCGlobalHeapHistoryOpcodeMessage" value="GlobalHeapHistory" />
+ <string id="PrivatePublisher.GCFullNotifyOpcodeMessage" value="FullNotify" />
+ <string id="PrivatePublisher.GCJoinOpcodeMessage" value="Join" />
+ <string id="PrivatePublisher.GCMarkStackRootsOpcodeMessage" value="MarkStackRoots" />
+ <string id="PrivatePublisher.GCMarkHandlesOpcodeMessage" value="MarkHandles" />
+ <string id="PrivatePublisher.GCMarkFinalizeQueueRootsOpcodeMessage" value="MarkFinalizeQueueRoots" />
+ <string id="PrivatePublisher.GCMarkCardsOpcodeMessage" value="MarkCards" />
+ <string id="PrivatePublisher.BGCBeginOpcodeMessage" value="BGCStart" />
+ <string id="PrivatePublisher.BGC1stNonCondEndOpcodeMessage" value="BGC1stNonCondStop" />
+ <string id="PrivatePublisher.BGC2ndNonConBeginOpcodeMessage" value="BGC2ndNonConStart" />
+ <string id="PrivatePublisher.BGC1stConEndOpcodeMessage" value="BGC1stConStop" />
+ <string id="PrivatePublisher.BGC2ndNonConEndOpcodeMessage" value="BGC2ndNonConStop" />
+ <string id="PrivatePublisher.BGC2ndConBeginOpcodeMessage" value="BGC2ndConStart" />
+ <string id="PrivatePublisher.BGC2ndConEndOpcodeMessage" value="BGC2ndConStop" />
+ <string id="PrivatePublisher.BGCPlanEndOpcodeMessage" value="BGCPlanStop" />
+ <string id="PrivatePublisher.BGCSweepEndOpcodeMessage" value="BGCSweepStop" />
+ <string id="PrivatePublisher.BGCDrainMarkOpcodeMessage" value="BGCDrainMark" />
+ <string id="PrivatePublisher.BGCRevisitOpcodeMessage" value="BGCRevisit" />
+ <string id="PrivatePublisher.BGCOverflowOpcodeMessage" value="BGCOverflow" />
+ <string id="PrivatePublisher.BGCAllocWaitBeginOpcodeMessage" value="BGCAllocWaitStart" />
+ <string id="PrivatePublisher.BGCAllocWaitEndOpcodeMessage" value="BGCAllocWaitStop" />
+ <string id="PrivatePublisher.FinalizeObjectOpcodeMessage" value="FinalizeObject" />
+ <string id="PrivatePublisher.SetGCHandleOpcodeMessage" value="SetGCHandle" />
+ <string id="PrivatePublisher.DestroyGCHandleOpcodeMessage" value="DestoryGCHandle" />
+ <string id="PrivatePublisher.PinPlugAtGCTimeOpcodeMessage" value="PinPlugAtGCTime" />
+ <string id="PrivatePublisher.CCWRefCountChangeOpcodeMessage" value="CCWRefCountChange" />
+ <string id="PrivatePublisher.EEStartupStartOpcodeMessage" value="EEStartupStart" />
+ <string id="PrivatePublisher.EEStartupEndOpcodeMessage" value="EEStartupStop" />
+ <string id="PrivatePublisher.EEConfigSetupOpcodeMessage" value="EEConfigSetupStart" />
+ <string id="PrivatePublisher.EEConfigSetupEndOpcodeMessage" value="EEConfigSetupStop" />
+ <string id="PrivatePublisher.LoadSystemBasesOpcodeMessage" value="LoadSystemBasesStart" />
+ <string id="PrivatePublisher.LoadSystemBasesEndOpcodeMessage" value="LoadSystemBasesStop" />
+ <string id="PrivatePublisher.ExecExeOpcodeMessage" value="ExecExeStart" />
+ <string id="PrivatePublisher.ExecExeEndOpcodeMessage" value="ExecExeStop" />
+ <string id="PrivatePublisher.MainOpcodeMessage" value="MainStart" />
+ <string id="PrivatePublisher.MainEndOpcodeMessage" value="MainStop" />
+ <string id="PrivatePublisher.ApplyPolicyStartOpcodeMessage" value="ApplyPolicyStart" />
+ <string id="PrivatePublisher.ApplyPolicyEndOpcodeMessage" value="ApplyPolicyStop" />
+ <string id="PrivatePublisher.LdLibShFolderOpcodeMessage" value="LdLibShFolderStart" />
+ <string id="PrivatePublisher.LdLibShFolderEndOpcodeMessage" value="LdLibShFolderStop" />
+ <string id="PrivatePublisher.PrestubWorkerOpcodeMessage" value="PrestubWorkerStart" />
+ <string id="PrivatePublisher.PrestubWorkerEndOpcodeMessage" value="PrestubWorkerStop" />
+ <string id="PrivatePublisher.GetInstallationStartOpcodeMessage" value="GetInstallationStart" />
+ <string id="PrivatePublisher.GetInstallationEndOpcodeMessage" value="GetInstallationStop" />
+ <string id="PrivatePublisher.OpenHModuleOpcodeMessage" value="OpenHModuleStart" />
+ <string id="PrivatePublisher.OpenHModuleEndOpcodeMessage" value="OpenHModuleStop" />
+ <string id="PrivatePublisher.ExplicitBindStartOpcodeMessage" value="ExplicitBindStart" />
+ <string id="PrivatePublisher.ExplicitBindEndOpcodeMessage" value="ExplicitBindStop" />
+ <string id="PrivatePublisher.ParseXmlOpcodeMessage" value="ParseXmlStart" />
+ <string id="PrivatePublisher.ParseXmlEndOpcodeMessage" value="ParseXmlStop" />
+ <string id="PrivatePublisher.InitDefaultDomainOpcodeMessage" value="InitDefaultDomainStart" />
+ <string id="PrivatePublisher.InitDefaultDomainEndOpcodeMessage" value="InitDefaultDomainStop" />
+ <string id="PrivatePublisher.InitSecurityOpcodeMessage" value="InitSecurityStart" />
+ <string id="PrivatePublisher.InitSecurityEndOpcodeMessage" value="InitSecurityStop" />
+ <string id="PrivatePublisher.AllowBindingRedirsOpcodeMessage" value="AllowBindingRedirsStart" />
+ <string id="PrivatePublisher.AllowBindingRedirsEndOpcodeMessage" value="AllowBindingRedirsStop" />
+ <string id="PrivatePublisher.EEConfigSyncOpcodeMessage" value="EEConfigSyncStart" />
+ <string id="PrivatePublisher.EEConfigSyncEndOpcodeMessage" value="EEConfigSyncStop" />
+ <string id="PrivatePublisher.FusionBindingOpcodeMessage" value="BindingStart" />
+ <string id="PrivatePublisher.FusionBindingEndOpcodeMessage" value="BindingStop" />
+ <string id="PrivatePublisher.LoaderCatchCallOpcodeMessage" value="LoaderCatchCallStart" />
+ <string id="PrivatePublisher.LoaderCatchCallEndOpcodeMessage" value="LoaderCatchCallStop" />
+ <string id="PrivatePublisher.FusionInitOpcodeMessage" value="FusionInitStart" />
+ <string id="PrivatePublisher.FusionInitEndOpcodeMessage" value="FusionInitStop" />
+ <string id="PrivatePublisher.FusionAppCtxOpcodeMessage" value="FusionAppCtxStart" />
+ <string id="PrivatePublisher.FusionAppCtxEndOpcodeMessage" value="FusionAppCtxStop" />
+ <string id="PrivatePublisher.Fusion2EEOpcodeMessage" value="Fusion2EEStart" />
+ <string id="PrivatePublisher.Fusion2EEEndOpcodeMessage" value="Fusion2EEStop" />
+ <string id="PrivatePublisher.SecurityCatchCallOpcodeMessage" value="SecurityCatchCallStart" />
+ <string id="PrivatePublisher.SecurityCatchCallEndOpcodeMessage" value="SecurityCatchCallStop" />
+ <string id="PrivatePublisher.BindingPolicyPhaseStartOpcodeMessage" value="PolicyPhaseStart" />
+ <string id="PrivatePublisher.BindingPolicyPhaseEndOpcodeMessage" value="PolicyPhaseStop" />
+ <string id="PrivatePublisher.BindingNgenPhaseStartOpcodeMessage" value="NgenPhaseStart" />
+ <string id="PrivatePublisher.BindingNgenPhaseEndOpcodeMessage" value="NgenPhaseStop" />
+ <string id="PrivatePublisher.BindingLoopupAndProbingPhaseStartOpcodeMessage" value="LoopupAndProbingPhaseStart" />
+ <string id="PrivatePublisher.BindingLookupAndProbingPhaseEndOpcodeMessage" value="LookupAndProbingPhaseStop" />
+ <string id="PrivatePublisher.LoaderPhaseStartOpcodeMessage" value="LoaderPhaseStart" />
+ <string id="PrivatePublisher.LoaderPhaseEndOpcodeMessage" value="LoaderPhaseStop" />
+ <string id="PrivatePublisher.BindingPhaseStartOpcodeMessage" value="PhaseStart" />
+ <string id="PrivatePublisher.BindingPhaseEndOpcodeMessage" value="PhaseStop" />
+ <string id="PrivatePublisher.BindingDownloadPhaseStartOpcodeMessage" value="DownloadPhaseStart" />
+ <string id="PrivatePublisher.BindingDownloadPhaseEndOpcodeMessage" value="DownloadPhaseStop" />
+ <string id="PrivatePublisher.LoaderAssemblyInitPhaseStartOpcodeMessage" value="LoaderAssemblyInitPhaseStart" />
+ <string id="PrivatePublisher.LoaderAssemblyInitPhaseEndOpcodeMessage" value="LoaderAssemblyInitPhaseStop" />
+ <string id="PrivatePublisher.LoaderMappingPhaseStartOpcodeMessage" value="LoaderMappingPhaseStart" />
+ <string id="PrivatePublisher.LoaderMappingPhaseEndOpcodeMessage" value="LoaderMappingPhaseStop" />
+ <string id="PrivatePublisher.NgenBindOpcodeMessage" value="NgenBind" />
+ <string id="PrivatePublisher.LoaderDeliverEventPhaseStartOpcodeMessage" value="LoaderDeliverEventPhaseStart" />
+ <string id="PrivatePublisher.LoaderDeliverEventsPhaseEndOpcodeMessage" value="LoaderDeliverEventsPhaseStop" />
+ <string id="PrivatePublisher.FusionMessageOpcodeMessage" value="FusionMessage" />
+ <string id="PrivatePublisher.FusionErrorCodeOpcodeMessage" value="FusionErrorCode" />
+
+ <string id="PrivatePublisher.IInspectableRuntimeClassNameOpcodeMessage" value="IInspectableRuntimeClassName" />
+ <string id="PrivatePublisher.WinRTUnboxOpcodeMessage" value="WinRTUnbox" />
+ <string id="PrivatePublisher.CreateRCWOpcodeMessage" value="CreateRCW" />
+ <string id="PrivatePublisher.RCWVarianceOpcodeMessage" value="RCWVariance" />
+ <string id="PrivatePublisher.RCWIEnumerableCastingOpcodeMessage" value="RCWIEnumerableCasting" />
+ <string id="PrivatePublisher.CreateCCWOpcodeMessage" value="CreateCCW" />
+ <string id="PrivatePublisher.CCWVarianceOpcodeMessage" value="CCWVariance" />
+ <string id="PrivatePublisher.ObjectVariantMarshallingToNativeOpcodeMessage" value="ObjectVariantMarshallingToNative" />
+ <string id="PrivatePublisher.GetTypeFromGUIDOpcodeMessage" value="GetTypeFromGUID" />
+ <string id="PrivatePublisher.GetTypeFromProgIDOpcodeMessage" value="GetTypeFromProgID" />
+ <string id="PrivatePublisher.ConvertToCallbackEtwOpcodeMessage" value="ConvertToCallbackEtw" />
+ <string id="PrivatePublisher.BeginCreateManagedReferenceOpcodeMessage" value="BeginCreateManagedReference" />
+ <string id="PrivatePublisher.EndCreateManagedReferenceOpcodeMessage" value="EndCreateManagedReference" />
+ <string id="PrivatePublisher.ObjectVariantMarshallingToManagedOpcodeMessage" value="ObjectVariantMarshallingToManaged" />
+
+
+ <string id="PrivatePublisher.CLRStackWalkOpcodeMessage" value="Walk" />
+ <string id="PrivatePublisher.MulticoreJitOpcodeMessage" value="Common" />
+ <string id="PrivatePublisher.MulticoreJitOpcodeMethodCodeReturnedMessage" value="MethodCodeReturned" />
+ <string id="StressPublisher.CLRStackWalkOpcodeMessage" value="Walk" />
+
+ <string id="PrivatePublisher.EvidenceGeneratedMessage" value="EvidenceGenerated" />
+ <string id="PrivatePublisher.ModuleTransparencyComputationStartMessage" value="ModuleTransparencyComputationStart" />
+ <string id="PrivatePublisher.ModuleTransparencyComputationEndMessage" value="ModuleTransparencyComputationStop" />
+ <string id="PrivatePublisher.TypeTransparencyComputationStartMessage" value="TypeTransparencyComputationStart" />
+ <string id="PrivatePublisher.TypeTransparencyComputationEndMessage" value="TypeTransparencyComputationStop" />
+ <string id="PrivatePublisher.MethodTransparencyComputationStartMessage" value="MethodTransparencyComputationStart" />
+ <string id="PrivatePublisher.MethodTransparencyComputationEndMessage" value="MethodTransparencyComputationStop" />
+ <string id="PrivatePublisher.FieldTransparencyComputationStartMessage" value="FieldTransparencyComputationStart" />
+ <string id="PrivatePublisher.FieldTransparencyComputationEndMessage" value="FieldTransparencyComputationStop" />
+ <string id="PrivatePublisher.TokenTransparencyComputationStartMessage" value="TokenTransparencyComputationStart" />
+ <string id="PrivatePublisher.TokenTransparencyComputationEndMessage" value="TokenTransparencyComputationStop" />
+
+ <string id="PrivatePublisher.LoaderHeapPrivateAllocRequestMessage" value="LoaderHeapAllocRequest" />
+ <string id="PrivatePublisher.ModuleRangeLoadOpcodeMessage" value="ModuleRangeLoad" />
+ </stringTable>
+ </resources>
+ </localization>
+</instrumentationManifest>
diff --git a/src/vm/ClrEtwAllMeta.lst b/src/vm/ClrEtwAllMeta.lst
new file mode 100644
index 0000000000..355fe08601
--- /dev/null
+++ b/src/vm/ClrEtwAllMeta.lst
@@ -0,0 +1,595 @@
+#
+# This list is to specify the events not supported on Mac.
+# The format of this file is <action>:[eventtask]:[eventprovider]:[eventversion]:[eventsymbol]
+# where <action> could be one of nostack, nomac, stack, noclrinstanceid
+# [eventtask] is the task of the event
+# [eventprovider] is the provider of the event
+# [eventversion] is the version of the event
+# [eventsymbol] is the symbol of the event
+# in the src\VM\ClrEtwAll.man manifest file
+#
+# <action> is mandatory
+# * can be used as a wildcard in place of [eventtask], [eventprovider], [eventversion], [eventversion]
+# if [eventprovider] is specified, then the action is applied to the entire provider unless [eventtask] or [eventsymbol] is specified
+# if [eventtask] is specified, then the action is applied to all the events with that task unless the [eventsymbol] is also specified. [eventprovider] is ignored at this time
+# if [eventsymbol] is specified, then the action is applied to only that event. [eventprovider] is ignored at this time
+# [eventversion] is currently unused and will act as NOP
+#
+# If we do not want an event to have a stack, there should be nostack entries for all versions of that event
+# An event having a stack is represented by a '1' bit and a '0' bit otherwise
+# A single bit is saved for a single event value and therefore even if the event has multiple versions,
+# it has a single bit.
+# Logical OR rules apply as far as support for stack for an event is concerned,
+# which is to say that if an event is marked as 'stack' and 'nostack', the logical OR'ing will give the final result of 'stack'
+#
+# Whenever a new version of an event comes up such that
+# its older version is no longer used on Mac, the older
+# version's event entry must be added here
+#
+
+##################################
+# Events from the runtime provider
+##################################
+
+##########################
+# GarbageCollection events
+##########################
+noclrinstanceid:GarbageCollection:::GCStart
+nostack:GarbageCollection:::GCStart
+nomac:GarbageCollection:::GCStart_V1
+nostack:GarbageCollection:::GCStart_V1
+nomac:GarbageCollection:::GCStart_V2
+nostack:GarbageCollection:::GCStart_V2
+nomac:GarbageCollection:::GCEnd
+noclrinstanceid:GarbageCollection:::GCEnd
+nostack:GarbageCollection:::GCEnd
+nostack:GarbageCollection:::GCEnd_V1
+nomac:GarbageCollection:::GCRestartEEEnd
+noclrinstanceid:GarbageCollection:::GCRestartEEEnd
+nostack:GarbageCollection:::GCRestartEEEnd
+nostack:GarbageCollection:::GCRestartEEEnd_V1
+nomac:GarbageCollection:::GCHeapStats
+noclrinstanceid:GarbageCollection:::GCHeapStats
+nostack:GarbageCollection:::GCHeapStats
+nostack:GarbageCollection:::GCHeapStats_V1
+nomac:GarbageCollection:::GCHeapStats_V1
+nomac:GarbageCollection:::GCCreateSegment
+nostack:GarbageCollection:::GCCreateSegment
+noclrinstanceid:GarbageCollection:::GCCreateSegment
+nostack:GarbageCollection:::GCCreateSegment_V1
+nomac:GarbageCollection:::GCFreeSegment
+noclrinstanceid:GarbageCollection:::GCFreeSegment
+nostack:GarbageCollection:::GCFreeSegment
+nostack:GarbageCollection:::GCFreeSegment_V1
+nomac:GarbageCollection:::GCRestartEEBegin
+noclrinstanceid:GarbageCollection:::GCRestartEEBegin
+nostack:GarbageCollection:::GCRestartEEBegin
+nostack:GarbageCollection:::GCRestartEEBegin_V1
+nomac:GarbageCollection:::GCSuspendEEEnd
+noclrinstanceid:GarbageCollection:::GCSuspendEEEnd
+nostack:GarbageCollection:::GCSuspendEEEnd
+nostack:GarbageCollection:::GCSuspendEEEnd_V1
+nomac:GarbageCollection:::GCSuspendEEBegin
+noclrinstanceid:GarbageCollection:::GCSuspendEEBegin
+nostack:GarbageCollection:::GCSuspendEEBegin
+nostack:GarbageCollection:::GCSuspendEEBegin_V1
+nomac:GarbageCollection:::GCAllocationTick
+noclrinstanceid:GarbageCollection:::GCAllocationTick
+nomac:GarbageCollection:::GCCreateConcurrentThread
+noclrinstanceid:GarbageCollection:::GCCreateConcurrentThread
+nostack:GarbageCollection:::GCCreateConcurrentThread
+nostack:GarbageCollection:::GCCreateConcurrentThread_V1
+nomac:GarbageCollection:::GCCreateConcurrentThread_V1
+nomac:GarbageCollection:::GCTerminateConcurrentThread
+noclrinstanceid:GarbageCollection:::GCTerminateConcurrentThread
+nostack:GarbageCollection:::GCTerminateConcurrentThread
+nomac:GarbageCollection:::GCTerminateConcurrentThread_V1
+nostack:GarbageCollection:::GCTerminateConcurrentThread_V1
+nomac:GarbageCollection:::GCFinalizersEnd
+noclrinstanceid:GarbageCollection:::GCFinalizersEnd
+nostack:GarbageCollection:::GCFinalizersEnd
+nostack:GarbageCollection:::GCFinalizersEnd_V1
+nomac:GarbageCollection:::GCFinalizersBegin
+noclrinstanceid:GarbageCollection:::GCFinalizersBegin
+nostack:GarbageCollection:::GCFinalizersBegin
+nostack:GarbageCollection:::GCFinalizersBegin_V1
+nomac:GarbageCollection:::GCMarkStackRoots
+nostack:GarbageCollection:::GCMarkStackRoots
+nomac:GarbageCollection:::GCMarkFinalizeQueueRoots
+nostack:GarbageCollection:::GCMarkFinalizeQueueRoots
+nomac:GarbageCollection:::GCMarkHandles
+nostack:GarbageCollection:::GCMarkHandles
+nomac:GarbageCollection:::GCMarkOlderGenerationRoots
+nostack:GarbageCollection:::GCMarkOlderGenerationRoots
+nostack:GarbageCollection:::PinObjectAtGCTime
+nostack:GarbageCollection:::FinalizeObject
+nostack:GarbageCollection:::GCGenerationRange
+nostack:GarbageCollection:::GCBulkRootEdge
+nostack:GarbageCollection:::GCBulkRootConditionalWeakTableElementEdge
+nostack:GarbageCollection:::GCBulkNode
+nostack:GarbageCollection:::GCBulkEdge
+nostack:GarbageCollection:::GCBulkSurvivingObjectRanges
+nostack:GarbageCollection:::GCBulkMovedObjectRanges
+nostack:GarbageCollection:::GCBulkRootCCW
+nostack:GarbageCollection:::GCBulkRCW
+nostack:GarbageCollection:::GCBulkRootStaticVar
+
+#############
+# Type events
+#############
+
+nostack:Type:::BulkType
+
+###################
+# Threadpool events
+###################
+nomac:WorkerThreadCreation:::WorkerThreadCreate
+noclrinstanceid:WorkerThreadCreation:::WorkerThreadCreate
+nomac:WorkerThreadCreation:::WorkerThreadTerminate
+noclrinstanceid:WorkerThreadCreation:::WorkerThreadTerminate
+nomac:WorkerThreadRetirement:::WorkerThreadRetire
+noclrinstanceid:WorkerThreadRetirement:::WorkerThreadRetire
+nomac:WorkerThreadRetirement:::WorkerThreadUnretire
+noclrinstanceid:WorkerThreadRetirement:::WorkerThreadUnretire
+nomac:IOThreadCreation:::IOThreadCreate
+noclrinstanceid:IOThreadCreation:::IOThreadCreate
+nomac:IOThreadCreation:::IOThreadTerminate
+noclrinstanceid:IOThreadCreation:::IOThreadTerminate
+nomac:IOThreadRetirement:::IOThreadRetire
+noclrinstanceid:IOThreadRetirement:::IOThreadRetire
+nomac:IOThreadRetirement:::IOThreadUnretire
+noclrinstanceid:IOThreadRetirement:::IOThreadUnretire
+nomac:ThreadpoolSuspension:::ThreadpoolSuspensionSuspendThread
+noclrinstanceid:ThreadpoolSuspension:::ThreadpoolSuspensionSuspendThread
+nomac:ThreadpoolSuspension:::ThreadpoolSuspensionResumeThread
+noclrinstanceid:ThreadpoolSuspension:::ThreadpoolSuspensionResumeThread
+nomac:ThreadPoolWorkerThread:::ThreadPoolWorkerThreadStart
+nostack:ThreadPoolWorkerThread:::ThreadPoolWorkerThreadStart
+nostack:ThreadPoolWorkerThread:::ThreadPoolWorkerThreadWait
+nomac:ThreadPoolWorkerThread:::ThreadPoolWorkerThreadStop
+nostack:ThreadPoolWorkerThread:::ThreadPoolWorkerThreadStop
+nomac:ThreadPoolWorkerThreadRetirement:::ThreadPoolWorkerThreadRetirementStart
+nostack:ThreadPoolWorkerThreadRetirement:::ThreadPoolWorkerThreadRetirementStart
+nomac:ThreadPoolWorkerThreadRetirement:::ThreadPoolWorkerThreadRetirementStop
+nostack:ThreadPoolWorkerThreadRetirement:::ThreadPoolWorkerThreadRetirementStop
+nomac:ThreadPoolWorkerThreadAdjustment:::ThreadPoolWorkerThreadAdjustmentSample
+nostack:ThreadPoolWorkerThreadAdjustment:::ThreadPoolWorkerThreadAdjustmentSample
+nomac:ThreadPoolWorkerThreadAdjustment:::ThreadPoolWorkerThreadAdjustmentAdjustment
+nostack:ThreadPoolWorkerThreadAdjustment:::ThreadPoolWorkerThreadAdjustmentAdjustment
+
+##################
+# Exception events
+##################
+nomac:Exception:::ExceptionThrown
+noclrinstanceid:Exception:::ExceptionThrown
+
+###################
+# Contention events
+###################
+nomac:Contention:::Contention
+noclrinstanceid:Contention:::Contention
+nomac:Contention:::ContentionStart_V1
+nostack:Contention:::ContentionStop
+nomac:Contention:::ContentionStop
+
+##################
+# StackWalk events
+##################
+nomac:CLRStack:::CLRStackWalk
+nostack:CLRStack:::CLRStackWalk
+
+####################################
+# AppDomainResourceManagement events
+####################################
+nomac:AppDomainResourceManagement:::AppDomainMemAllocated
+nomac:AppDomainResourceManagement:::AppDomainMemSurvived
+nomac:AppDomainResourceManagement:::ThreadCreated
+nomac:AppDomainResourceManagement:::ThreadTerminated
+nomac:AppDomainResourceManagement:::ThreadDomainEnter
+
+################
+# Interop events
+################
+nomac:CLRILStub:::ILStubGenerated
+nomac:CLRILStub:::ILStubCacheHit
+
+###############
+# Method events
+###############
+nomac:CLRMethod:::MethodLoad
+noclrinstanceid:CLRMethod:::MethodLoad
+nostack:CLRMethod:::MethodLoad
+nostack:CLRMethod:::MethodLoad_V1
+nostack:CLRMethod:::MethodLoad_V2
+nomac:CLRMethod:::MethodUnload
+noclrinstanceid:CLRMethod:::MethodUnload
+nostack:CLRMethod:::MethodUnload
+nostack:CLRMethod:::MethodUnload_V1
+nostack:CLRMethod:::MethodUnload_V2
+nomac:CLRMethod:::MethodLoadVerbose
+noclrinstanceid:CLRMethod:::MethodLoadVerbose
+nostack:CLRMethod:::MethodLoadVerbose
+nostack:CLRMethod:::MethodLoadVerbose_V1
+nostack:CLRMethod:::MethodLoadVerbose_V2
+nomac:CLRMethod:::MethodUnloadVerbose
+nostack:CLRMethod:::MethodUnloadVerbose
+nostack:CLRMethod:::MethodUnloadVerbose_V1
+nostack:CLRMethod:::MethodUnloadVerbose_V2
+noclrinstanceid:CLRMethod:::MethodUnloadVerbose
+nomac:CLRMethod:::MethodJittingStarted
+noclrinstanceid:CLRMethod:::MethodJittingStarted
+nomac:CLRMethod:::MethodJitInliningSucceeded
+nostack:CLRMethod:::MethodJitInliningSucceeded
+nomac:CLRMethod:::MethodJitInliningFailed
+nostack:CLRMethod:::MethodJitInliningFailed
+nostack:CLRMethod:::MethodJitTailCallSucceeded
+nostack:CLRMethod:::MethodJitTailCallFailed
+noclrinstanceid:CLRMethod:::MethodDCStartV2
+noclrinstanceid:CLRMethod:::MethodDCEndV2
+noclrinstanceid:CLRMethod:::MethodDCStartVerboseV2
+noclrinstanceid:CLRMethod:::MethodDCEndVerboseV2
+noclrinstanceid:CLRMethod:::DCStartCompleteV2
+noclrinstanceid:CLRMethod:::DCEndCompleteV2
+nomac:CLRMethod:::MethodILToNativeMap
+
+###############
+# Loader events
+###############
+nomac:CLRLoader:::ModuleLoad
+noclrinstanceid:CLRLoader:::ModuleLoad
+nomac:CLRLoader:::ModuleUnload
+noclrinstanceid:CLRLoader:::ModuleUnload
+nomac:CLRLoader:::AssemblyLoad
+noclrinstanceid:CLRLoader:::AssemblyLoad
+nomac:CLRLoader:::AssemblyUnload
+noclrinstanceid:CLRLoader:::AssemblyUnload
+nomac:CLRLoader:::AppDomainLoad
+noclrinstanceid:CLRLoader:::AppDomainLoad
+nomac:CLRLoader:::AppDomainUnload
+noclrinstanceid:CLRLoader:::AppDomainUnload
+nomac:CLRLoader:::DomainModuleLoad
+noclrinstanceid:CLRLoader:::DomainModuleLoad
+noclrinstanceid:CLRLoader:::ModuleDCStartV2
+noclrinstanceid:CLRLoader:::ModuleDCEndV2
+nomac:CLRPerfTrack:::ModuleRangeLoad
+nostack:CLRPerfTrack:::ModuleRangeLoad
+nomac:CLRLoader:::ModuleLoad_V2
+nomac:CLRLoader:::ModuleUnload_V2
+nomac:CLRLoaderRundown:::ModuleDCStart_V2
+nomac:CLRLoaderRundown:::ModuleDCEnd_V2
+
+#################
+# Security events
+#################
+nomac:CLRStrongNameVerification:::StrongNameVerificationStart
+noclrinstanceid:CLRStrongNameVerification:::StrongNameVerificationStart
+nostack:CLRStrongNameVerification:::StrongNameVerificationStart
+nostack:CLRStrongNameVerification:::StrongNameVerificationStart_V1
+nomac:CLRStrongNameVerification:::StrongNameVerificationStart_V1
+nomac:CLRStrongNameVerification:::StrongNameVerificationStop
+noclrinstanceid:CLRStrongNameVerification:::StrongNameVerificationStop
+nomac:CLRStrongNameVerification:::StrongNameVerificationStop_V1
+nomac:CLRAuthenticodeVerification:::AuthenticodeVerificationStart
+noclrinstanceid:CLRAuthenticodeVerification:::AuthenticodeVerificationStart
+nostack:CLRAuthenticodeVerification:::AuthenticodeVerificationStart
+nomac:CLRAuthenticodeVerification:::AuthenticodeVerificationStart_V1
+nostack:CLRAuthenticodeVerification:::AuthenticodeVerificationStart_V1
+nomac:CLRAuthenticodeVerification:::AuthenticodeVerificationStop
+noclrinstanceid:CLRAuthenticodeVerification:::AuthenticodeVerificationStop
+nomac:CLRAuthenticodeVerification:::AuthenticodeVerificationStop_V1
+
+####################
+# RuntimeInfo events
+####################
+nostack:CLRRuntimeInformation:::RuntimeInformationStart
+
+##################################
+# Events from the rundown provider
+##################################
+nostack::Microsoft-Windows-DotNETRuntimeRundown::
+
+##################
+# StackWalk events
+##################
+nomac:CLRStackRundown:::CLRStackWalkDCStart
+
+###############
+# Method events
+###############
+nomac:CLRMethodRundown:::MethodDCStart
+noclrinstanceid:CLRMethodRundown:::MethodDCStart
+nomac:CLRMethodRundown:::MethodDCStart_V1
+nomac:CLRMethodRundown:::MethodDCEnd
+noclrinstanceid:CLRMethodRundown:::MethodDCEnd
+nomac:CLRMethodRundown:::MethodDCEnd_V1
+nomac:CLRMethodRundown:::MethodDCStartVerbose
+noclrinstanceid:CLRMethodRundown:::MethodDCStartVerbose
+nomac:CLRMethodRundown:::MethodDCStartVerbose_V1
+nomac:CLRMethodRundown:::MethodDCEndVerbose
+noclrinstanceid:CLRMethodRundown:::MethodDCEndVerbose
+nomac:CLRMethodRundown:::MethodDCEndVerbose_V1
+nomac:CLRMethodRundown:::DCStartComplete
+noclrinstanceid:CLRMethodRundown:::DCStartComplete
+nomac:CLRMethodRundown:::DCStartComplete_V1
+nomac:CLRMethodRundown:::DCEndComplete
+noclrinstanceid:CLRMethodRundown:::DCEndComplete
+nomac:CLRMethodRundown:::DCEndComplete_V1
+nomac:CLRMethodRundown:::DCStartInit
+noclrinstanceid:CLRMethodRundown:::DCStartInit
+nomac:CLRMethodRundown:::DCStartInit_V1
+nomac:nomac:CLRMethodRundown:::DCEndInit
+noclrinstanceid:nomac:CLRMethodRundown:::DCEndInit
+nomac:CLRMethodRundown:::DCEndInit_V1
+nomac:CLRMethodRundown:::MethodDCStartILToNativeMap
+nomac:CLRMethodRundown:::MethodDCEndILToNativeMap
+
+###############
+# Loader events
+###############
+nomac:CLRLoaderRundown:::DomainModuleDCStart
+noclrinstanceid:CLRLoaderRundown:::DomainModuleDCStart
+nomac:CLRLoaderRundown:::DomainModuleDCStart_V1
+nomac:CLRLoaderRundown:::DomainModuleDCEnd
+noclrinstanceid:CLRLoaderRundown:::DomainModuleDCEnd
+nomac:CLRLoaderRundown:::DomainModuleDCEnd_V1
+nomac:CLRLoaderRundown:::ModuleDCStart
+noclrinstanceid:CLRLoaderRundown:::ModuleDCStart
+nomac:CLRLoaderRundown:::ModuleDCStart_V1
+nomac:CLRLoaderRundown:::ModuleDCEnd
+noclrinstanceid:CLRLoaderRundown:::ModuleDCEnd
+nomac:CLRLoaderRundown:::ModuleDCEnd_V1
+nomac:CLRLoaderRundown:::AssemblyDCStart
+noclrinstanceid:CLRLoaderRundown:::AssemblyDCStart
+nomac:CLRLoaderRundown:::AssemblyDCStart_V1
+nomac:CLRLoaderRundown:::AssemblyDCEnd
+noclrinstanceid:CLRLoaderRundown:::AssemblyDCEnd
+nomac:CLRLoaderRundown:::AssemblyDCEnd_V1
+nomac:CLRLoaderRundown:::AppDomainDCStart
+noclrinstanceid:CLRLoaderRundown:::AppDomainDCStart
+nomac:CLRLoaderRundown:::AppDomainDCStart_V1
+nomac:CLRLoaderRundown:::AppDomainDCEnd
+noclrinstanceid:CLRLoaderRundown:::AppDomainDCEnd
+nomac:CLRLoaderRundown:::AppDomainDCEnd_V1
+nomac:CLRLoaderRundown:::ThreadDC
+nomac:CLRPerfTrack:::ModuleRangeDCStart
+nostack:CLRPerfTrack:::ModuleRangeDCStart
+nomac:CLRPerfTrack:::ModuleRangeDCEnd
+nostack:CLRPerfTrack:::ModuleRangeDCEnd
+
+####################
+# RuntimeInfo events
+####################
+nomac:CLRRuntimeInformationRundown:::RuntimeInformationDCStart
+
+##################################
+# Events from the private provider
+##################################
+nostack::Microsoft-Windows-DotNETRuntimePrivate::
+
+##########################
+# GarbageCollection events
+##########################
+nomac:GarbageCollectionPrivate:::GCDecision
+noclrinstanceid:GarbageCollectionPrivate:::GCDecision
+nomac:GarbageCollectionPrivate:::GCDecision_V1
+nomac:GarbageCollectionPrivate:::GCSettings
+noclrinstanceid:GarbageCollectionPrivate:::GCSettings
+nomac:GarbageCollectionPrivate:::GCSettings_V1
+nomac:GarbageCollectionPrivate:::GCOptimized
+noclrinstanceid:GarbageCollectionPrivate:::GCOptimized
+nomac:GarbageCollectionPrivate:::GCPerHeapHistory
+noclrinstanceid:GarbageCollectionPrivate:::GCPerHeapHistory
+nomac:GarbageCollectionPrivate:::GCPerHeapHistory_V1
+nomac:GarbageCollectionPrivate:::GCGlobalHeapHistory
+noclrinstanceid:GarbageCollectionPrivate:::GCGlobalHeapHistory
+nomac:GarbageCollectionPrivate:::GCGlobalHeapHistory_V1
+nomac:GarbageCollectionPrivate:::GCJoin
+noclrinstanceid:GarbageCollectionPrivate:::GCJoin
+nomac:GarbageCollectionPrivate:::GCJoin_V1
+nomac:GarbageCollectionPrivate:::PrvGCMarkStackRoots
+noclrinstanceid:GarbageCollectionPrivate:::PrvGCMarkStackRoots
+nomac:GarbageCollectionPrivate:::PrvGCMarkStackRoots_V1
+nomac:GarbageCollectionPrivate:::PrvGCMarkFinalizeQueueRoots
+noclrinstanceid:GarbageCollectionPrivate:::PrvGCMarkFinalizeQueueRoots
+nomac:GarbageCollectionPrivate:::PrvGCMarkFinalizeQueueRoots_V1
+nomac:GarbageCollectionPrivate:::PrvGCMarkHandles
+noclrinstanceid:GarbageCollectionPrivate:::PrvGCMarkHandles
+nomac:GarbageCollectionPrivate:::PrvGCMarkHandles_V1
+nomac:GarbageCollectionPrivate:::PrvGCMarkCards
+noclrinstanceid:GarbageCollectionPrivate:::PrvGCMarkCards
+nomac:GarbageCollectionPrivate:::PrvGCMarkCards_V1
+nomac:GarbageCollectionPrivate:::BGCBegin
+nomac:GarbageCollectionPrivate:::BGC1stNonConEnd
+nomac:GarbageCollectionPrivate:::BGC1stConEnd
+nomac:GarbageCollectionPrivate:::BGC2ndNonConBegin
+nomac:GarbageCollectionPrivate:::BGC2ndNonConEnd
+nomac:GarbageCollectionPrivate:::BGC2ndConBegin
+nomac:GarbageCollectionPrivate:::BGC2ndConEnd
+nomac:GarbageCollectionPrivate:::BGCPlanEnd
+nomac:GarbageCollectionPrivate:::BGCSweepEnd
+nomac:GarbageCollectionPrivate:::BGCDrainMark
+nomac:GarbageCollectionPrivate:::BGCRevisit
+nomac:GarbageCollectionPrivate:::BGCOverflow
+nomac:GarbageCollectionPrivate:::BGCAllocWaitBegin
+nomac:GarbageCollectionPrivate:::BGCAllocWaitEnd
+nomac:GarbageCollectionPrivate:::GCFullNotify
+noclrinstanceid:GarbageCollectionPrivate:::GCFullNotify
+stack:GarbageCollectionPrivate:Microsoft-Windows-DotNETRuntimePrivate::SetGCHandle
+stack:GarbageCollectionPrivate:Microsoft-Windows-DotNETRuntimePrivate::DestroyGCHandle
+stack:GarbageCollectionPrivate:Microsoft-Windows-DotNETRuntimePrivate::CCWRefCountChange
+
+################
+# Startup events
+################
+nomac:Startup:::EEStartupStart
+noclrinstanceid:Startup:::EEStartupStart
+nomac:Startup:::EEStartupEnd
+noclrinstanceid:Startup:::EEStartupEnd
+nomac:Startup:::EEStartupEnd_V1
+nomac:Startup:::EEConfigSetup
+noclrinstanceid:Startup:::EEConfigSetup
+nomac:Startup:::EEConfigSetupEnd
+noclrinstanceid:Startup:::EEConfigSetupEnd
+nomac:Startup:::LdSysBases
+noclrinstanceid:Startup:::LdSysBases
+nomac:Startup:::LdSysBasesEnd
+noclrinstanceid:Startup:::LdSysBasesEnd
+nomac:Startup:::ExecExe
+noclrinstanceid:Startup:::ExecExe
+nomac:Startup:::ExecExe_V1
+nomac:Startup:::ExecExeEnd
+noclrinstanceid:Startup:::ExecExeEnd
+nomac:Startup:::ExecExeEnd_V1
+nomac:Startup:::Main
+noclrinstanceid:Startup:::Main
+nomac:Startup:::MainEnd
+noclrinstanceid:Startup:::MainEnd
+nomac:Startup:::ApplyPolicyStart
+noclrinstanceid:Startup:::ApplyPolicyStart
+nomac:Startup:::ApplyPolicyStart_V1
+nomac:Startup:::ApplyPolicyEnd
+noclrinstanceid:Startup:::ApplyPolicyEnd
+nomac:Startup:::ApplyPolicyEnd_V1
+nomac:Startup:::LdLibShFolder
+noclrinstanceid:Startup:::LdLibShFolder
+nomac:Startup:::LdLibShFolder_V1
+nomac:Startup:::LdLibShFolderEnd
+noclrinstanceid:Startup:::LdLibShFolderEnd
+nomac:Startup:::LdLibShFolderEnd_V1
+nomac:Startup:::PrestubWorker
+noclrinstanceid:Startup:::PrestubWorker
+nomac:Startup:::PrestubWorkerEnd
+noclrinstanceid:Startup:::PrestubWorkerEnd
+nomac:Startup:::PrestubWorkerEnd_V1
+nomac:Startup:::GetInstallationStart
+noclrinstanceid:Startup:::GetInstallationStart
+nomac:Startup:::GetInstallationStart_V1
+nomac:Startup:::GetInstallationEnd
+noclrinstanceid:Startup:::GetInstallationEnd
+nomac:Startup:::GetInstallationEnd_V1
+nomac:Startup:::OpenHModule
+noclrinstanceid:Startup:::OpenHModule
+nomac:Startup:::OpenHModule_V1
+nomac:Startup:::OpenHModuleEnd
+noclrinstanceid:Startup:::OpenHModuleEnd
+nomac:Startup:::OpenHModuleEnd_V1
+nomac:Startup:::ExplicitBindStart
+noclrinstanceid:Startup:::ExplicitBindStart
+nomac:Startup:::ExplicitBindStart_V1
+nomac:Startup:::ExplicitBindEnd
+noclrinstanceid:Startup:::ExplicitBindEnd
+nomac:Startup:::ExplicitBindEnd_V1
+nomac:Startup:::ParseXml
+noclrinstanceid:Startup:::ParseXml
+nomac:Startup:::ParseXml_V1
+nomac:Startup:::ParseXmlEnd
+noclrinstanceid:Startup:::ParseXmlEnd
+nomac:Startup:::ParseXmlEnd_V1
+nomac:Startup:::InitDefaultDomain
+noclrinstanceid:Startup:::InitDefaultDomain
+nomac:Startup:::InitDefaultDomainEnd
+noclrinstanceid:Startup:::InitDefaultDomainEnd
+nomac:Startup:::InitSecurity
+noclrinstanceid:Startup:::InitSecurity
+nomac:Startup:::InitSecurity_V1
+nomac:Startup:::InitSecurityEnd
+noclrinstanceid:Startup:::InitSecurityEnd
+nomac:Startup:::InitSecurityEnd_V1
+nomac:Startup:::AllowBindingRedirs
+noclrinstanceid:Startup:::AllowBindingRedirs
+nomac:Startup:::AllowBindingRedirs_V1
+nomac:Startup:::AllowBindingRedirsEnd
+noclrinstanceid:Startup:::AllowBindingRedirsEnd
+nomac:Startup:::AllowBindingRedirsEnd_V1
+nomac:Startup:::EEConfigSync
+noclrinstanceid:Startup:::EEConfigSync
+nomac:Startup:::EEConfigSyncEnd
+noclrinstanceid:Startup:::EEConfigSyncEnd
+nomac:Startup:::FusionBinding
+noclrinstanceid:Startup:::FusionBinding
+nomac:Startup:::FusionBindingEnd
+noclrinstanceid:Startup:::FusionBindingEnd
+nomac:Startup:::LoaderCatchCall
+noclrinstanceid:Startup:::LoaderCatchCall
+nomac:Startup:::LoaderCatchCallEnd
+noclrinstanceid:Startup:::LoaderCatchCallEnd
+nomac:Startup:::FusionInit
+noclrinstanceid:Startup:::FusionInit
+nomac:Startup:::FusionInit_V1
+nomac:Startup:::FusionInitEnd
+noclrinstanceid:Startup:::FusionInitEnd
+nomac:Startup:::FusionInitEnd_V1
+nomac:Startup:::FusionAppCtx
+noclrinstanceid:Startup:::FusionAppCtx
+nomac:Startup:::FusionAppCtxEnd
+noclrinstanceid:Startup:::FusionAppCtxEnd
+nomac:Startup:::Fusion2EE
+noclrinstanceid:Startup:::Fusion2EE
+nomac:Startup:::Fusion2EE_V1
+nomac:Startup:::Fusion2EEEnd
+noclrinstanceid:Startup:::Fusion2EEEnd
+nomac:Startup:::Fusion2EEEnd_V1
+nomac:Startup:::SecurityCatchCall
+noclrinstanceid:Startup:::SecurityCatchCall
+nomac:Startup:::SecurityCatchCallEnd
+noclrinstanceid:Startup:::SecurityCatchCallEnd
+
+##################
+# Loader events
+##################
+stack:LoaderHeapAllocation:Microsoft-Windows-DotNETRuntimePrivate::AllocRequest
+
+##################
+# StackWalk events
+##################
+nomac:CLRStackPrivate:::CLRStackWalkPrivate
+
+################
+# Binding events
+################
+nomac:Binding:::BindingPolicyPhaseStart
+nomac:Binding:::BindingPolicyPhaseEnd
+nomac:Binding:::BindingNgenPhaseStart
+nomac:Binding:::BindingNgenPhaseEnd
+nomac:Binding:::BindingLookupAndProbingPhaseStart
+nomac:Binding:::BindingLookupAndProbingPhaseEnd
+nomac:Binding:::LoaderPhaseStart
+nomac:Binding:::LoaderPhaseEnd
+nomac:Binding:::BindingPhaseStart
+nomac:Binding:::BindingPhaseEnd
+nomac:Binding:::BindingDownloadPhaseStart
+nomac:Binding:::BindingDownloadPhaseEnd
+nomac:Binding:::LoaderAssemblyInitPhaseStart
+nomac:Binding:::LoaderAssemblyInitPhaseEnd
+nomac:Binding:::LoaderMappingPhaseStart
+nomac:Binding:::LoaderMappingPhaseEnd
+nomac:Binding:::LoaderDeliverEventsPhaseStart
+nomac:Binding:::LoaderDeliverEventsPhaseEnd
+nomac:Binding:::EvidenceGenerated
+nomac:Binding:::FusionMessage
+stack:Binding:::FusionMessage
+nomac:Binding:::FusionErrorCode
+stack:Binding:::FusionErrorCode
+
+################
+# ModuleRange event
+################
+nomac:CLRPerfTrackPrivate:::ModuleRangeLoadPrivate
+nostack:CLRPerfTrackPrivate:::ModuleRangeLoadPrivate
+
+#################################
+# Events from the stress provider
+#################################
+nostack::Microsoft-Windows-DotNETRuntimeStress::
+
+##################
+# StressLog events
+##################
+nomac:StressLogTask:::StressLogEvent
+noclrinstanceid:StressLogTask:::StressLogEvent
+nomac:StressLogTask:::StressLogEvent_V1
+
+##################
+# StackWalk events
+##################
+nomac:CLRStackStress:::CLRStackWalkStress
diff --git a/src/vm/amd64/.gitmirror b/src/vm/amd64/.gitmirror
new file mode 100644
index 0000000000..f507630f94
--- /dev/null
+++ b/src/vm/amd64/.gitmirror
@@ -0,0 +1 @@
+Only contents of this folder, excluding subfolders, will be mirrored by the Git-TFS Mirror. \ No newline at end of file
diff --git a/src/vm/amd64/AsmHelpers.asm b/src/vm/amd64/AsmHelpers.asm
new file mode 100644
index 0000000000..cede8467dc
--- /dev/null
+++ b/src/vm/amd64/AsmHelpers.asm
@@ -0,0 +1,793 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;
+
+;
+; ==--==
+;
+; FILE: asmhelpers.asm
+;
+
+;
+; ======================================================================================
+
+include AsmMacros.inc
+include asmconstants.inc
+
+extern JIT_InternalThrow:proc
+extern NDirectImportWorker:proc
+extern ThePreStub:proc
+extern ProfileEnter:proc
+extern ProfileLeave:proc
+extern ProfileTailcall:proc
+extern OnHijackObjectWorker:proc
+extern OnHijackInteriorPointerWorker:proc
+extern OnHijackScalarWorker:proc
+extern JIT_RareDisableHelperWorker:proc
+
+ifdef _DEBUG
+extern DebugCheckStubUnwindInfoWorker:proc
+endif
+
+
+GenerateArrayOpStubExceptionCase macro ErrorCaseName, ExceptionName
+
+NESTED_ENTRY ErrorCaseName&_RSIRDI_ScratchArea, _TEXT
+
+ ; account for scratch area, rsi, rdi already on the stack
+ .allocstack 38h
+ END_PROLOGUE
+
+ mov rcx, CORINFO_&ExceptionName&_ASM
+
+ ; begin epilogue
+
+ add rsp, 28h ; pop callee scratch area
+ pop rdi
+ pop rsi
+ jmp JIT_InternalThrow
+
+NESTED_END ErrorCaseName&_RSIRDI_ScratchArea, _TEXT
+
+NESTED_ENTRY ErrorCaseName&_ScratchArea, _TEXT
+
+ ; account for scratch area already on the stack
+ .allocstack 28h
+ END_PROLOGUE
+
+ mov rcx, CORINFO_&ExceptionName&_ASM
+
+ ; begin epilogue
+
+ add rsp, 28h ; pop callee scratch area
+ jmp JIT_InternalThrow
+
+NESTED_END ErrorCaseName&_ScratchArea, _TEXT
+
+NESTED_ENTRY ErrorCaseName&_RSIRDI, _TEXT
+
+ ; account for rsi, rdi already on the stack
+ .allocstack 10h
+ END_PROLOGUE
+
+ mov rcx, CORINFO_&ExceptionName&_ASM
+
+ ; begin epilogue
+
+ pop rdi
+ pop rsi
+ jmp JIT_InternalThrow
+
+NESTED_END ErrorCaseName&_RSIRDI, _TEXT
+
+LEAF_ENTRY ErrorCaseName, _TEXT
+
+ mov rcx, CORINFO_&ExceptionName&_ASM
+
+ ; begin epilogue
+
+ jmp JIT_InternalThrow
+
+LEAF_END ErrorCaseName, _TEXT
+
+ endm
+
+
+GenerateArrayOpStubExceptionCase ArrayOpStubNullException, NullReferenceException
+GenerateArrayOpStubExceptionCase ArrayOpStubRangeException, IndexOutOfRangeException
+GenerateArrayOpStubExceptionCase ArrayOpStubTypeMismatchException, ArrayTypeMismatchException
+
+
+; EXTERN_C int __fastcall HelperMethodFrameRestoreState(
+; INDEBUG_COMMA(HelperMethodFrame *pFrame)
+; MachState *pState
+; )
+LEAF_ENTRY HelperMethodFrameRestoreState, _TEXT
+
+ifdef _DEBUG
+ mov rcx, rdx
+endif
+
+ ; Check if the MachState is valid
+ xor eax, eax
+ cmp qword ptr [rcx + OFFSETOF__MachState___pRetAddr], rax
+ jne @F
+ REPRET
+@@:
+
+ ;
+ ; If a preserved register were pushed onto the stack between
+ ; the managed caller and the H_M_F, m_pReg will point to its
+ ; location on the stack and it would have been updated on the
+ ; stack by the GC already and it will be popped back into the
+ ; appropriate register when the appropriate epilog is run.
+ ;
+ ; Otherwise, the register is preserved across all the code
+ ; in this HCALL or FCALL, so we need to update those registers
+ ; here because the GC will have updated our copies in the
+ ; frame.
+ ;
+ ; So, if m_pReg points into the MachState, we need to update
+ ; the register here. That's what this macro does.
+ ;
+RestoreReg macro reg
+ lea rax, [rcx + OFFSETOF__MachState__m_Capture&reg]
+ mov rdx, [rcx + OFFSETOF__MachState__m_p&reg]
+ cmp rax, rdx
+ cmove reg, [rax]
+ endm
+
+ RestoreReg Rdi
+ RestoreReg Rsi
+ RestoreReg Rbx
+ RestoreReg Rbp
+ RestoreReg R12
+ RestoreReg R13
+ RestoreReg R14
+ RestoreReg R15
+
+ xor eax, eax
+ ret
+
+LEAF_END HelperMethodFrameRestoreState, _TEXT
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; NDirectImportThunk
+;;
+;; In addition to being called by the EE, this function can be called
+;; directly from code generated by JIT64 for CRT optimized direct
+;; P/Invoke calls. If it is modified, the JIT64 compiler's code
+;; generation will need to altered accordingly.
+;;
+; EXTERN_C VOID __stdcall NDirectImportThunk();
+NESTED_ENTRY NDirectImportThunk, _TEXT
+
+ ;
+ ; Allocate space for XMM parameter registers and callee scratch area.
+ ;
+ alloc_stack 68h
+
+ ;
+ ; Save integer parameter registers.
+ ; Make sure to preserve r11 as well as it is used to pass the stack argument size from JIT
+ ;
+ save_reg_postrsp rcx, 70h
+ save_reg_postrsp rdx, 78h
+ save_reg_postrsp r8, 80h
+ save_reg_postrsp r9, 88h
+ save_reg_postrsp r11, 60h
+
+ save_xmm128_postrsp xmm0, 20h
+ save_xmm128_postrsp xmm1, 30h
+ save_xmm128_postrsp xmm2, 40h
+ save_xmm128_postrsp xmm3, 50h
+ END_PROLOGUE
+
+ ;
+ ; Call NDirectImportWorker w/ the NDirectMethodDesc*
+ ;
+ mov rcx, METHODDESC_REGISTER
+ call NDirectImportWorker
+
+ ;
+ ; Restore parameter registers
+ ;
+ mov rcx, [rsp + 70h]
+ mov rdx, [rsp + 78h]
+ mov r8, [rsp + 80h]
+ mov r9, [rsp + 88h]
+ mov r11, [rsp + 60h]
+ movdqa xmm0, [rsp + 20h]
+ movdqa xmm1, [rsp + 30h]
+ movdqa xmm2, [rsp + 40h]
+ movdqa xmm3, [rsp + 50h]
+
+ ;
+ ; epilogue, rax contains the native target address
+ ;
+ add rsp, 68h
+
+ TAILJMP_RAX
+NESTED_END NDirectImportThunk, _TEXT
+
+
+;------------------------------------------------
+; JIT_RareDisableHelper
+;
+; The JIT expects this helper to preserve registers used for return values
+;
+
+NESTED_ENTRY JIT_RareDisableHelper, _TEXT
+
+ push rax
+ alloc_stack 30h
+ END_PROLOGUE
+ movdqu [rsp+20h], xmm0
+
+ call JIT_RareDisableHelperWorker
+
+ movdqu xmm0, [rsp+20h]
+ add rsp, 30h
+ pop rax
+ ret
+
+NESTED_END JIT_RareDisableHelper, _TEXT
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; PrecodeFixupThunk
+;;
+;; The call in fixup precode initally points to this function.
+;; The pupose of this function is to load the MethodDesc and forward the call the prestub.
+;;
+; EXTERN_C VOID __stdcall PrecodeFixupThunk();
+LEAF_ENTRY PrecodeFixupThunk, _TEXT
+
+ pop rax ; Pop the return address. It points right after the call instruction in the precode.
+
+ ; Inline computation done by FixupPrecode::GetMethodDesc()
+ movzx r10,byte ptr [rax+2] ; m_PrecodeChunkIndex
+ movzx r11,byte ptr [rax+1] ; m_MethodDescChunkIndex
+ mov rax,qword ptr [rax+r10*8+3]
+ lea METHODDESC_REGISTER,[rax+r11*8]
+
+ ; Tail call to prestub
+ jmp ThePreStub
+
+LEAF_END PrecodeFixupThunk, _TEXT
+
+
+; extern "C" void setFPReturn(int fpSize, INT64 retVal);
+LEAF_ENTRY setFPReturn, _TEXT
+ cmp ecx, 4
+ je setFPReturn4
+ cmp ecx, 8
+ jne setFPReturnNot8
+ mov [rsp+10h], rdx
+ movsd xmm0, real8 ptr [rsp+10h]
+setFPReturnNot8:
+ REPRET
+
+setFPReturn4:
+ mov [rsp+10h], rdx
+ movss xmm0, real4 ptr [rsp+10h]
+ ret
+LEAF_END setFPReturn, _TEXT
+
+
+; extern "C" void getFPReturn(int fpSize, INT64 *retval);
+LEAF_ENTRY getFPReturn, _TEXT
+ cmp ecx, 4
+ je getFPReturn4
+ cmp ecx, 8
+ jne getFPReturnNot8
+ movsd real8 ptr [rdx], xmm0
+getFPReturnNot8:
+ REPRET
+
+getFPReturn4:
+ movss real4 ptr [rdx], xmm0
+ ret
+LEAF_END getFPReturn, _TEXT
+
+
+ifdef _DEBUG
+NESTED_ENTRY DebugCheckStubUnwindInfo, _TEXT
+
+ ;
+ ; rax is pushed on the stack before being trashed by the "mov rax,
+ ; target/jmp rax" code generated by X86EmitNearJump. This stack slot
+ ; will be reused later in the epilogue. This slot is left there to
+ ; align rsp.
+ ;
+
+ .allocstack 8
+
+ mov rax, [rsp]
+
+ ;
+ ; Create a CONTEXT structure. DebugCheckStubUnwindInfoWorker will
+ ; fill in the flags.
+ ;
+
+ alloc_stack 20h + SIZEOF__CONTEXT
+
+ mov r10, rbp
+
+ set_frame rbp, 20h
+
+ mov [rbp + OFFSETOF__CONTEXT__Rbp], r10
+ .savereg rbp, OFFSETOF__CONTEXT__Rbp
+
+ save_reg_frame rbx, rbp, OFFSETOF__CONTEXT__Rbx
+ save_reg_frame rsi, rbp, OFFSETOF__CONTEXT__Rsi
+ save_reg_frame rdi, rbp, OFFSETOF__CONTEXT__Rdi
+ save_reg_frame r12, rbp, OFFSETOF__CONTEXT__R12
+ save_reg_frame r13, rbp, OFFSETOF__CONTEXT__R13
+ save_reg_frame r14, rbp, OFFSETOF__CONTEXT__R14
+ save_reg_frame r15, rbp, OFFSETOF__CONTEXT__R15
+ save_xmm128_frame xmm6, rbp, OFFSETOF__CONTEXT__Xmm6
+ save_xmm128_frame xmm7, rbp, OFFSETOF__CONTEXT__Xmm7
+ save_xmm128_frame xmm8, rbp, OFFSETOF__CONTEXT__Xmm8
+ save_xmm128_frame xmm9, rbp, OFFSETOF__CONTEXT__Xmm9
+ save_xmm128_frame xmm10, rbp, OFFSETOF__CONTEXT__Xmm10
+ save_xmm128_frame xmm11, rbp, OFFSETOF__CONTEXT__Xmm11
+ save_xmm128_frame xmm12, rbp, OFFSETOF__CONTEXT__Xmm12
+ save_xmm128_frame xmm13, rbp, OFFSETOF__CONTEXT__Xmm13
+ save_xmm128_frame xmm14, rbp, OFFSETOF__CONTEXT__Xmm14
+ save_xmm128_frame xmm15, rbp, OFFSETOF__CONTEXT__Xmm15
+ END_PROLOGUE
+
+ mov [rbp + OFFSETOF__CONTEXT__Rax], rax
+ mov [rbp + OFFSETOF__CONTEXT__Rcx], rcx
+ mov [rbp + OFFSETOF__CONTEXT__Rdx], rdx
+ mov [rbp + OFFSETOF__CONTEXT__R8], r8
+ mov [rbp + OFFSETOF__CONTEXT__R9], r9
+ mov [rbp + OFFSETOF__CONTEXT__R10], r10
+ mov [rbp + OFFSETOF__CONTEXT__R11], r11
+ movdqa [rbp + OFFSETOF__CONTEXT__Xmm0], xmm0
+ movdqa [rbp + OFFSETOF__CONTEXT__Xmm1], xmm1
+ movdqa [rbp + OFFSETOF__CONTEXT__Xmm2], xmm2
+ movdqa [rbp + OFFSETOF__CONTEXT__Xmm3], xmm3
+ movdqa [rbp + OFFSETOF__CONTEXT__Xmm4], xmm4
+ movdqa [rbp + OFFSETOF__CONTEXT__Xmm5], xmm5
+
+ mov rax, [rbp+SIZEOF__CONTEXT+8]
+ mov [rbp+OFFSETOF__CONTEXT__Rip], rax
+
+ lea rax, [rbp+SIZEOF__CONTEXT+8+8]
+ mov [rbp+OFFSETOF__CONTEXT__Rsp], rax
+
+ ;
+ ; Align rsp
+ ;
+ and rsp, -16
+
+ ;
+ ; Verify that unwinding works from the stub's CONTEXT.
+ ;
+
+ mov rcx, rbp
+ call DebugCheckStubUnwindInfoWorker
+
+ ;
+ ; Restore stub's registers. rbp will be restored using "pop" in the
+ ; epilogue.
+ ;
+
+ mov rax, [rbp+OFFSETOF__CONTEXT__Rbp]
+ mov [rbp+SIZEOF__CONTEXT], rax
+
+ mov rax, [rbp+OFFSETOF__CONTEXT__Rax]
+ mov rbx, [rbp+OFFSETOF__CONTEXT__Rbx]
+ mov rcx, [rbp+OFFSETOF__CONTEXT__Rcx]
+ mov rdx, [rbp+OFFSETOF__CONTEXT__Rdx]
+ mov rsi, [rbp+OFFSETOF__CONTEXT__Rsi]
+ mov rdi, [rbp+OFFSETOF__CONTEXT__Rdi]
+ mov r8, [rbp+OFFSETOF__CONTEXT__R8]
+ mov r9, [rbp+OFFSETOF__CONTEXT__R9]
+ mov r10, [rbp+OFFSETOF__CONTEXT__R10]
+ mov r11, [rbp+OFFSETOF__CONTEXT__R11]
+ mov r12, [rbp+OFFSETOF__CONTEXT__R12]
+ mov r13, [rbp+OFFSETOF__CONTEXT__R13]
+ mov r14, [rbp+OFFSETOF__CONTEXT__R14]
+ mov r15, [rbp+OFFSETOF__CONTEXT__R15]
+ movdqa xmm0, [rbp+OFFSETOF__CONTEXT__Xmm0]
+ movdqa xmm1, [rbp+OFFSETOF__CONTEXT__Xmm1]
+ movdqa xmm2, [rbp+OFFSETOF__CONTEXT__Xmm2]
+ movdqa xmm3, [rbp+OFFSETOF__CONTEXT__Xmm3]
+ movdqa xmm4, [rbp+OFFSETOF__CONTEXT__Xmm4]
+ movdqa xmm5, [rbp+OFFSETOF__CONTEXT__Xmm5]
+ movdqa xmm6, [rbp+OFFSETOF__CONTEXT__Xmm6]
+ movdqa xmm7, [rbp+OFFSETOF__CONTEXT__Xmm7]
+ movdqa xmm8, [rbp+OFFSETOF__CONTEXT__Xmm8]
+ movdqa xmm9, [rbp+OFFSETOF__CONTEXT__Xmm9]
+ movdqa xmm10, [rbp+OFFSETOF__CONTEXT__Xmm10]
+ movdqa xmm11, [rbp+OFFSETOF__CONTEXT__Xmm11]
+ movdqa xmm12, [rbp+OFFSETOF__CONTEXT__Xmm12]
+ movdqa xmm13, [rbp+OFFSETOF__CONTEXT__Xmm13]
+ movdqa xmm14, [rbp+OFFSETOF__CONTEXT__Xmm14]
+ movdqa xmm15, [rbp+OFFSETOF__CONTEXT__Xmm15]
+
+ ;
+ ; epilogue
+ ;
+
+ lea rsp, [rbp + SIZEOF__CONTEXT]
+ pop rbp
+ ret
+
+NESTED_END DebugCheckStubUnwindInfo, _TEXT
+endif ; _DEBUG
+
+
+; A JITted method's return address was hijacked to return to us here.
+;
+;VOID __stdcall OnHijackObjectTripThread();
+NESTED_ENTRY OnHijackObjectTripThread, _TEXT
+
+ ; Don't fiddle with this unless you change HijackFrame::UpdateRegDisplay
+ ; and HijackObjectArgs
+ push rax ; make room for the real return address (Rip)
+ PUSH_CALLEE_SAVED_REGISTERS
+ push_vol_reg rax
+ mov rcx, rsp
+
+ alloc_stack 20h
+
+ END_PROLOGUE
+
+ call OnHijackObjectWorker
+
+ add rsp, 20h
+ pop rax
+ POP_CALLEE_SAVED_REGISTERS
+ ret ; return to the correct place, adjusted by our caller
+NESTED_END OnHijackObjectTripThread, _TEXT
+
+
+; VOID OnHijackInteriorPointerTripThread()
+NESTED_ENTRY OnHijackInteriorPointerTripThread, _TEXT
+
+ ; Don't fiddle with this unless you change HijackFrame::UpdateRegDisplay
+ ; and HijackObjectArgs
+ push rax ; make room for the real return address (Rip)
+ PUSH_CALLEE_SAVED_REGISTERS
+ push_vol_reg rax
+ mov rcx, rsp
+
+ alloc_stack 20h
+
+ END_PROLOGUE
+
+ call OnHijackInteriorPointerWorker
+
+ add rsp, 20h
+ pop rax
+ POP_CALLEE_SAVED_REGISTERS
+ ret ; return to the correct place, adjusted by our caller
+NESTED_END OnHijackInteriorPointerTripThread, _TEXT
+
+; VOID OnHijackScalarTripThread()
+NESTED_ENTRY OnHijackScalarTripThread, _TEXT
+
+ ; Don't fiddle with this unless you change HijackFrame::UpdateRegDisplay
+ ; and HijackObjectArgs
+ push rax ; make room for the real return address (Rip)
+ PUSH_CALLEE_SAVED_REGISTERS
+ push_vol_reg rax
+ mov rcx, rsp
+
+ alloc_stack 30h ; make extra room for xmm0
+ save_xmm128_postrsp xmm0, 20h
+
+
+ END_PROLOGUE
+
+ call OnHijackScalarWorker
+
+ movdqa xmm0, [rsp + 20h]
+
+ add rsp, 30h
+ pop rax
+ POP_CALLEE_SAVED_REGISTERS
+ ret ; return to the correct place, adjusted by our caller
+NESTED_END OnHijackScalarTripThread, _TEXT
+
+
+;
+; typedef struct _PROFILE_PLATFORM_SPECIFIC_DATA
+; {
+; FunctionID *functionId; // function ID comes in the r11 register
+; void *rbp;
+; void *probersp;
+; void *ip;
+; void *profiledRsp;
+; UINT64 rax;
+; LPVOID hiddenArg;
+; UINT64 flt0;
+; UINT64 flt1;
+; UINT64 flt2;
+; UINT64 flt3;
+; UINT32 flags;
+; } PROFILE_PLATFORM_SPECIFIC_DATA, *PPROFILE_PLATFORM_SPECIFIC_DATA;
+;
+SIZEOF_PROFILE_PLATFORM_SPECIFIC_DATA equ 8h*11 + 4h*2 ; includes fudge to make FP_SPILL right
+SIZEOF_OUTGOING_ARGUMENT_HOMES equ 8h*4
+SIZEOF_FP_ARG_SPILL equ 10h*1
+
+; Need to be careful to keep the stack 16byte aligned here, since we are pushing 3
+; arguments that will align the stack and we just want to keep it aligned with our
+; SIZEOF_STACK_FRAME
+
+OFFSETOF_PLATFORM_SPECIFIC_DATA equ SIZEOF_OUTGOING_ARGUMENT_HOMES
+
+; we'll just spill into the PROFILE_PLATFORM_SPECIFIC_DATA structure
+OFFSETOF_FP_ARG_SPILL equ SIZEOF_OUTGOING_ARGUMENT_HOMES + \
+ SIZEOF_PROFILE_PLATFORM_SPECIFIC_DATA
+
+SIZEOF_STACK_FRAME equ SIZEOF_OUTGOING_ARGUMENT_HOMES + \
+ SIZEOF_PROFILE_PLATFORM_SPECIFIC_DATA + \
+ SIZEOF_MAX_FP_ARG_SPILL
+
+PROFILE_ENTER equ 1h
+PROFILE_LEAVE equ 2h
+PROFILE_TAILCALL equ 4h
+
+; ***********************************************************
+; NOTE:
+;
+; Register preservation scheme:
+;
+; Preserved:
+; - all non-volatile registers
+; - rax
+; - xmm0
+;
+; Not Preserved:
+; - integer argument registers (rcx, rdx, r8, r9)
+; - floating point argument registers (xmm1-3)
+; - volatile integer registers (r10, r11)
+; - volatile floating point registers (xmm4-5)
+;
+; ***********************************************************
+
+; void JIT_ProfilerEnterLeaveTailcallStub(UINT_PTR ProfilerHandle)
+LEAF_ENTRY JIT_ProfilerEnterLeaveTailcallStub, _TEXT
+ REPRET
+LEAF_END JIT_ProfilerEnterLeaveTailcallStub, _TEXT
+
+;EXTERN_C void ProfileEnterNaked(FunctionIDOrClientID functionIDOrClientID, size_t profiledRsp);
+NESTED_ENTRY ProfileEnterNaked, _TEXT
+ push_nonvol_reg rax
+
+; Upon entry :
+; rcx = clientInfo
+; rdx = profiledRsp
+
+ lea rax, [rsp + 10h] ; caller rsp
+ mov r10, [rax - 8h] ; return address
+
+ alloc_stack SIZEOF_STACK_FRAME
+
+ ; correctness of return value in structure doesn't matter for enter probe
+
+
+ ; setup ProfilePlatformSpecificData structure
+ xor r8, r8;
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 0h], r8 ; r8 is null -- struct functionId field
+ save_reg_postrsp rbp, OFFSETOF_PLATFORM_SPECIFIC_DATA + 8h ; -- struct rbp field
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 10h], rax ; caller rsp -- struct probeRsp field
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 18h], r10 ; return address -- struct ip field
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 20h], rdx ; -- struct profiledRsp field
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 28h], r8 ; r8 is null -- struct rax field
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 30h], r8 ; r8 is null -- struct hiddenArg field
+ movsd real8 ptr [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 38h], xmm0 ; -- struct flt0 field
+ movsd real8 ptr [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 40h], xmm1 ; -- struct flt1 field
+ movsd real8 ptr [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 48h], xmm2 ; -- struct flt2 field
+ movsd real8 ptr [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 50h], xmm3 ; -- struct flt3 field
+ mov r10, PROFILE_ENTER
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 58h], r10d ; flags ; -- struct flags field
+
+ ; we need to be able to restore the fp return register
+ save_xmm128_postrsp xmm0, OFFSETOF_FP_ARG_SPILL + 0h
+ END_PROLOGUE
+
+ ; rcx already contains the clientInfo
+ lea rdx, [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA]
+ call ProfileEnter
+
+ ; restore fp return register
+ movdqa xmm0, [rsp + OFFSETOF_FP_ARG_SPILL + 0h]
+
+ ; begin epilogue
+ add rsp, SIZEOF_STACK_FRAME
+ pop rax
+ ret
+NESTED_END ProfileEnterNaked, _TEXT
+
+;EXTERN_C void ProfileLeaveNaked(FunctionIDOrClientID functionIDOrClientID, size_t profiledRsp);
+NESTED_ENTRY ProfileLeaveNaked, _TEXT
+ push_nonvol_reg rax
+
+; Upon entry :
+; rcx = clientInfo
+; rdx = profiledRsp
+
+ ; need to be careful with rax here because it contains the return value which we want to harvest
+
+ lea r10, [rsp + 10h] ; caller rsp
+ mov r11, [r10 - 8h] ; return address
+
+ alloc_stack SIZEOF_STACK_FRAME
+
+ ; correctness of argument registers in structure doesn't matter for leave probe
+
+ ; setup ProfilePlatformSpecificData structure
+ xor r8, r8;
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 0h], r8 ; r8 is null -- struct functionId field
+ save_reg_postrsp rbp, OFFSETOF_PLATFORM_SPECIFIC_DATA + 8h ; -- struct rbp field
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 10h], r10 ; caller rsp -- struct probeRsp field
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 18h], r11 ; return address -- struct ip field
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 20h], rdx ; -- struct profiledRsp field
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 28h], rax ; return value -- struct rax field
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 30h], r8 ; r8 is null -- struct hiddenArg field
+ movsd real8 ptr [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 38h], xmm0 ; -- struct flt0 field
+ movsd real8 ptr [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 40h], xmm1 ; -- struct flt1 field
+ movsd real8 ptr [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 48h], xmm2 ; -- struct flt2 field
+ movsd real8 ptr [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 50h], xmm3 ; -- struct flt3 field
+ mov r10, PROFILE_LEAVE
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 58h], r10d ; flags -- struct flags field
+
+ ; we need to be able to restore the fp return register
+ save_xmm128_postrsp xmm0, OFFSETOF_FP_ARG_SPILL + 0h
+ END_PROLOGUE
+
+ ; rcx already contains the clientInfo
+ lea rdx, [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA]
+ call ProfileLeave
+
+ ; restore fp return register
+ movdqa xmm0, [rsp + OFFSETOF_FP_ARG_SPILL + 0h]
+
+ ; begin epilogue
+ add rsp, SIZEOF_STACK_FRAME
+ pop rax
+ ret
+NESTED_END ProfileLeaveNaked, _TEXT
+
+;EXTERN_C void ProfileTailcallNaked(FunctionIDOrClientID functionIDOrClientID, size_t profiledRsp);
+NESTED_ENTRY ProfileTailcallNaked, _TEXT
+ push_nonvol_reg rax
+
+; Upon entry :
+; rcx = clientInfo
+; rdx = profiledRsp
+
+ lea rax, [rsp + 10h] ; caller rsp
+ mov r11, [rax - 8h] ; return address
+
+ alloc_stack SIZEOF_STACK_FRAME
+
+ ; correctness of return values and argument registers in structure
+ ; doesn't matter for tailcall probe
+
+
+ ; setup ProfilePlatformSpecificData structure
+ xor r8, r8;
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 0h], r8 ; r8 is null -- struct functionId field
+ save_reg_postrsp rbp, OFFSETOF_PLATFORM_SPECIFIC_DATA + 8h ; -- struct rbp field
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 10h], rax ; caller rsp -- struct probeRsp field
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 18h], r11 ; return address -- struct ip field
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 20h], rdx ; -- struct profiledRsp field
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 28h], r8 ; r8 is null -- struct rax field
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 30h], r8 ; r8 is null -- struct hiddenArg field
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 38h], r8 ; r8 is null -- struct flt0 field
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 40h], r8 ; r8 is null -- struct flt1 field
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 48h], r8 ; r8 is null -- struct flt2 field
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 50h], r8 ; r8 is null -- struct flt3 field
+ mov r10, PROFILE_TAILCALL
+ mov [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA + 58h], r10d ; flags -- struct flags field
+
+ ; we need to be able to restore the fp return register
+ save_xmm128_postrsp xmm0, OFFSETOF_FP_ARG_SPILL + 0h
+ END_PROLOGUE
+
+ ; rcx already contains the clientInfo
+ lea rdx, [rsp + OFFSETOF_PLATFORM_SPECIFIC_DATA]
+ call ProfileTailcall
+
+ ; restore fp return register
+ movdqa xmm0, [rsp + OFFSETOF_FP_ARG_SPILL + 0h]
+
+ ; begin epilogue
+ add rsp, SIZEOF_STACK_FRAME
+ pop rax
+ ret
+NESTED_END ProfileTailcallNaked, _TEXT
+
+
+;; extern "C" DWORD __stdcall getcpuid(DWORD arg, unsigned char result[16]);
+NESTED_ENTRY getcpuid, _TEXT
+
+ push_nonvol_reg rbx
+ push_nonvol_reg rsi
+ END_PROLOGUE
+
+ mov eax, ecx ; first arg
+ mov rsi, rdx ; second arg (result)
+ xor ecx, ecx ; clear ecx - needed for "Structured Extended Feature Flags"
+ cpuid
+ mov [rsi+ 0], eax
+ mov [rsi+ 4], ebx
+ mov [rsi+ 8], ecx
+ mov [rsi+12], edx
+ pop rsi
+ pop rbx
+ ret
+NESTED_END getcpuid, _TEXT
+
+;The following function uses Deterministic Cache Parameter leafs to determine the cache hierarchy information on Prescott & Above platforms.
+; This function takes 3 arguments:
+; Arg1 is an input to ECX. Used as index to specify which cache level to return information on by CPUID.
+; Arg1 is already passed in ECX on call to getextcpuid, so no explicit assignment is required;
+; Arg2 is an input to EAX. For deterministic code enumeration, we pass in 4H in arg2.
+; Arg3 is a pointer to the return dwbuffer
+NESTED_ENTRY getextcpuid, _TEXT
+ push_nonvol_reg rbx
+ push_nonvol_reg rsi
+ END_PROLOGUE
+
+ mov eax, edx ; second arg (input to EAX)
+ mov rsi, r8 ; third arg (pointer to return dwbuffer)
+ cpuid
+ mov [rsi+ 0], eax
+ mov [rsi+ 4], ebx
+ mov [rsi+ 8], ecx
+ mov [rsi+12], edx
+ pop rsi
+ pop rbx
+
+ ret
+NESTED_END getextcpuid, _TEXT
+
+
+; EXTERN_C void moveOWord(LPVOID* src, LPVOID* target);
+; <NOTE>
+; MOVDQA is not an atomic operation. You need to call this function in a crst.
+; </NOTE>
+LEAF_ENTRY moveOWord, _TEXT
+ movdqa xmm0, [rcx]
+ movdqa [rdx], xmm0
+
+ ret
+LEAF_END moveOWord, _TEXT
+
+
+extern JIT_InternalThrowFromHelper:proc
+
+LEAF_ENTRY SinglecastDelegateInvokeStub, _TEXT
+
+ test rcx, rcx
+ jz NullObject
+
+
+ mov rax, [rcx + OFFSETOF__DelegateObject___methodPtr]
+ mov rcx, [rcx + OFFSETOF__DelegateObject___target] ; replace "this" pointer
+
+ jmp rax
+
+NullObject:
+ mov rcx, CORINFO_NullReferenceException_ASM
+ jmp JIT_InternalThrow
+
+LEAF_END SinglecastDelegateInvokeStub, _TEXT
+
+ end
+
diff --git a/src/vm/amd64/AsmMacros.inc b/src/vm/amd64/AsmMacros.inc
new file mode 100644
index 0000000000..327a0f0de7
--- /dev/null
+++ b/src/vm/amd64/AsmMacros.inc
@@ -0,0 +1,443 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+;
+; Define macros to build unwind data for prologues.
+;
+
+push_nonvol_reg macro Reg
+
+ .errnz ___STACK_ADJUSTMENT_FORBIDDEN, <push_nonvol_reg cannot be used after save_reg_postrsp>
+
+ push Reg
+ .pushreg Reg
+
+ endm
+
+push_vol_reg macro Reg
+
+ .errnz ___STACK_ADJUSTMENT_FORBIDDEN, push_vol_reg cannot be used after save_reg_postrsp
+
+ push Reg
+ .allocstack 8
+
+ endm
+
+push_eflags macro
+
+ .errnz ___STACK_ADJUSTMENT_FORBIDDEN, push_eflags cannot be used after save_reg_postrsp
+
+ pushfq
+ .allocstack 8
+
+ endm
+
+alloc_stack macro Size
+
+ .errnz ___STACK_ADJUSTMENT_FORBIDDEN, alloc_stack cannot be used after save_reg_postrsp
+
+ sub rsp, Size
+ .allocstack Size
+
+ endm
+
+save_reg_frame macro Reg, FrameReg, Offset
+
+ .erre ___FRAME_REG_SET, save_reg_frame cannot be used before set_frame
+
+ mov Offset[FrameReg], Reg
+ .savereg Reg, Offset
+
+ endm
+
+save_reg_postrsp macro Reg, Offset
+
+ .errnz ___FRAME_REG_SET, save_reg_postrsp cannot be used after set_frame
+
+ mov Offset[rsp], Reg
+ .savereg Reg, Offset
+
+ ___STACK_ADJUSTMENT_FORBIDDEN = 1
+
+ endm
+
+save_xmm128_frame macro Reg, FrameReg, Offset
+
+ .erre ___FRAME_REG_SET, save_xmm128_frame cannot be used before set_frame
+
+ movdqa Offset[FrameReg], Reg
+ .savexmm128 Reg, Offset
+
+ endm
+
+save_xmm128_postrsp macro Reg, Offset
+
+ .errnz ___FRAME_REG_SET, save_reg_postrsp cannot be used after set_frame
+
+ movdqa Offset[rsp], Reg
+ .savexmm128 Reg, Offset
+
+ ___STACK_ADJUSTMENT_FORBIDDEN = 1
+
+ endm
+
+set_frame macro Reg, Offset
+
+ .errnz ___FRAME_REG_SET, set_frame cannot be used more than once
+
+if Offset
+
+ lea Reg, Offset[rsp]
+
+else
+
+ mov reg, rsp
+
+endif
+
+ .setframe Reg, Offset
+ ___FRAME_REG_SET = 1
+
+ endm
+
+END_PROLOGUE macro
+
+ .endprolog
+
+ endm
+
+
+;
+; Define function entry/end macros.
+;
+
+LEAF_ENTRY macro Name, Section
+
+Section segment para 'CODE'
+
+ align 16
+
+ public Name
+Name proc
+
+ endm
+
+LEAF_END macro Name, section
+
+Name endp
+
+Section ends
+
+ endm
+
+LEAF_END_MARKED macro Name, section
+ public Name&_End
+Name&_End label qword
+ ; this nop is important to keep the label in
+ ; the right place in the face of BBT
+ nop
+
+Name endp
+
+Section ends
+
+ endm
+
+
+NESTED_ENTRY macro Name, Section, Handler
+
+Section segment para 'CODE'
+
+ align 16
+
+ public Name
+
+ifb <Handler>
+
+Name proc frame
+
+else
+
+Name proc frame:Handler
+
+endif
+
+ ___FRAME_REG_SET = 0
+ ___STACK_ADJUSTMENT_FORBIDDEN = 0
+
+ endm
+
+NESTED_END macro Name, section
+
+Name endp
+
+Section ends
+
+ endm
+
+NESTED_END_MARKED macro Name, section
+ public Name&_End
+Name&_End label qword
+
+Name endp
+
+Section ends
+
+ endm
+
+
+;
+; Macro to Call GetThread() correctly whether it is indirect or direct
+;
+CALL_GETTHREAD macro
+ifndef GetThread
+extern GetThread:proc
+endif
+ call GetThread
+ endm
+
+CALL_GETAPPDOMAIN macro
+ifndef GetAppDomain
+extern GetAppDomain:proc
+endif
+ call GetAppDomain
+ endm
+
+;
+; if you change this code there will be corresponding code in JITInterfaceGen.cpp which will need to be changed
+;
+
+; DEFAULT_TARGET needs to always be futher away than the fixed up target will be
+
+
+JIT_HELPER_MONITOR_THUNK macro THUNK_NAME, Section
+Section segment para 'CODE'
+ align 16
+ public THUNK_NAME
+THUNK_NAME proc
+ xor edx, edx
+THUNK_NAME endp
+Section ends
+ endm
+
+;
+; Useful for enabling C++ to know where to patch code at runtime.
+;
+PATCH_LABEL macro Name
+ public Name
+Name::
+ endm
+
+;
+; Define alternate entry macro.
+;
+ALTERNATE_ENTRY macro Name
+ public Name
+Name label proc
+ endm
+
+;
+; Appropriate instructions for certain specific scenarios:
+; - REPRET: should be used as the return instruction when the return is a branch
+; target or immediately follows a conditional branch
+; - TAILJMP_RAX: ("jmp rax") should be used for tailcalls, this emits an instruction
+; sequence which is recognized by the unwinder as a valid epilogue terminator
+;
+REPRET TEXTEQU <DB 0F3h, 0C3h>
+TAILJMP_RAX TEXTEQU <DB 048h, 0FFh, 0E0h>
+
+NOP_2_BYTE macro
+
+ xchg ax,ax
+
+ endm
+
+NOP_3_BYTE macro
+
+ nop dword ptr [rax]
+
+ endm
+
+PUSH_CALLEE_SAVED_REGISTERS macro
+
+ push_nonvol_reg r15
+ push_nonvol_reg r14
+ push_nonvol_reg r13
+ push_nonvol_reg r12
+ push_nonvol_reg rbp
+ push_nonvol_reg rbx
+ push_nonvol_reg rsi
+ push_nonvol_reg rdi
+
+ endm
+
+SAVE_CALLEE_SAVED_REGISTERS macro ofs
+
+ save_reg_postrsp rdi, ofs + 0h
+ save_reg_postrsp rsi, ofs + 8h
+ save_reg_postrsp rbx, ofs + 10h
+ save_reg_postrsp rbp, ofs + 18h
+ save_reg_postrsp r12, ofs + 20h
+ save_reg_postrsp r13, ofs + 28h
+ save_reg_postrsp r14, ofs + 30h
+ save_reg_postrsp r15, ofs + 38h
+
+ endm
+
+POP_CALLEE_SAVED_REGISTERS macro
+
+ pop rdi
+ pop rsi
+ pop rbx
+ pop rbp
+ pop r12
+ pop r13
+ pop r14
+ pop r15
+
+ endm
+
+SAVE_ARGUMENT_REGISTERS macro ofs
+
+ save_reg_postrsp rcx, ofs + 0h
+ save_reg_postrsp rdx, ofs + 8h
+ save_reg_postrsp r8, ofs + 10h
+ save_reg_postrsp r9, ofs + 18h
+
+ endm
+
+RESTORE_ARGUMENT_REGISTERS macro ofs
+
+ mov rcx, [rsp + ofs + 0h]
+ mov rdx, [rsp + ofs + 8h]
+ mov r8, [rsp + ofs + 10h]
+ mov r9, [rsp + ofs + 18h]
+
+ endm
+
+SAVE_FLOAT_ARGUMENT_REGISTERS macro ofs
+
+ save_xmm128_postrsp xmm0, ofs
+ save_xmm128_postrsp xmm1, ofs + 10h
+ save_xmm128_postrsp xmm2, ofs + 20h
+ save_xmm128_postrsp xmm3, ofs + 30h
+
+ endm
+
+RESTORE_FLOAT_ARGUMENT_REGISTERS macro ofs
+
+ movdqa xmm0, [rsp + ofs]
+ movdqa xmm1, [rsp + ofs + 10h]
+ movdqa xmm2, [rsp + ofs + 20h]
+ movdqa xmm3, [rsp + ofs + 30h]
+
+ endm
+
+
+; Stack layout:
+;
+; (stack parameters)
+; ...
+; r9
+; r8
+; rdx
+; rcx <- __PWTB_ArgumentRegisters
+; return address
+; CalleeSavedRegisters::r15
+; CalleeSavedRegisters::r14
+; CalleeSavedRegisters::r13
+; CalleeSavedRegisters::r12
+; CalleeSavedRegisters::rbp
+; CalleeSavedRegisters::rbx
+; CalleeSavedRegisters::rsi
+; CalleeSavedRegisters::rdi <- __PWTB_StackAlloc
+; padding to align xmm save area
+; xmm3
+; xmm2
+; xmm1
+; xmm0 <- __PWTB_FloatArgumentRegisters
+; extra locals + padding to qword align
+; callee's r9
+; callee's r8
+; callee's rdx
+; callee's rcx
+
+PROLOG_WITH_TRANSITION_BLOCK macro extraLocals := <0>, stackAllocOnEntry := <0>, stackAllocSpill1, stackAllocSpill2, stackAllocSpill3
+
+ __PWTB_FloatArgumentRegisters = SIZEOF_MAX_OUTGOING_ARGUMENT_HOMES + extraLocals
+
+ if (__PWTB_FloatArgumentRegisters mod 16) ne 0
+ __PWTB_FloatArgumentRegisters = __PWTB_FloatArgumentRegisters + 8
+ endif
+
+ __PWTB_StackAlloc = __PWTB_FloatArgumentRegisters + 4 * 16 + 8
+ __PWTB_TransitionBlock = __PWTB_StackAlloc
+ __PWTB_ArgumentRegisters = __PWTB_StackAlloc + 9 * 8
+
+ .errnz stackAllocOnEntry ge 4*8, Max supported stackAllocOnEntry is 3*8
+
+ if stackAllocOnEntry gt 0
+ .allocstack stackAllocOnEntry
+ endif
+
+ ; PUSH_CALLEE_SAVED_REGISTERS expanded here
+
+ if stackAllocOnEntry lt 8
+ push_nonvol_reg r15
+ endif
+
+ if stackAllocOnEntry lt 2*8
+ push_nonvol_reg r14
+ endif
+
+ if stackAllocOnEntry lt 3*8
+ push_nonvol_reg r13
+ endif
+
+ push_nonvol_reg r12
+ push_nonvol_reg rbp
+ push_nonvol_reg rbx
+ push_nonvol_reg rsi
+ push_nonvol_reg rdi
+
+ alloc_stack __PWTB_StackAlloc
+ SAVE_ARGUMENT_REGISTERS __PWTB_ArgumentRegisters
+ SAVE_FLOAT_ARGUMENT_REGISTERS __PWTB_FloatArgumentRegisters
+
+ if stackAllocOnEntry ge 3*8
+ mov stackAllocSpill3, [rsp + __PWTB_StackAlloc + 28h]
+ save_reg_postrsp r13, __PWTB_StackAlloc + 28h
+ endif
+
+ if stackAllocOnEntry ge 2*8
+ mov stackAllocSpill2, [rsp + __PWTB_StackAlloc + 30h]
+ save_reg_postrsp r14, __PWTB_StackAlloc + 30h
+ endif
+
+ if stackAllocOnEntry ge 8
+ mov stackAllocSpill1, [rsp + __PWTB_StackAlloc + 38h]
+ save_reg_postrsp r15, __PWTB_StackAlloc + 38h
+ endif
+
+ END_PROLOGUE
+
+ endm
+
+EPILOG_WITH_TRANSITION_BLOCK_RETURN macro
+
+ add rsp, __PWTB_StackAlloc
+ POP_CALLEE_SAVED_REGISTERS
+ ret
+
+ endm
+
+EPILOG_WITH_TRANSITION_BLOCK_TAILCALL macro
+
+ RESTORE_FLOAT_ARGUMENT_REGISTERS __PWTB_FloatArgumentRegisters
+ RESTORE_ARGUMENT_REGISTERS __PWTB_ArgumentRegisters
+ add rsp, __PWTB_StackAlloc
+ POP_CALLEE_SAVED_REGISTERS
+
+ endm
diff --git a/src/vm/amd64/CLRErrorReporting.vrg b/src/vm/amd64/CLRErrorReporting.vrg
new file mode 100644
index 0000000000..f398185aff
--- /dev/null
+++ b/src/vm/amd64/CLRErrorReporting.vrg
@@ -0,0 +1,5 @@
+VSREG 7
+
+[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\Eventlog\Application\.NET Runtime 4.0 Error Reporting]
+"EventMessageFile"="[DWFolder.F0DF3458_A845_11D3_8D0A_0050046416B9]DW20.EXE"
+"TypesSupported"=dword:00000007
diff --git a/src/vm/amd64/CallDescrWorkerAMD64.asm b/src/vm/amd64/CallDescrWorkerAMD64.asm
new file mode 100644
index 0000000000..ed51e57fe3
--- /dev/null
+++ b/src/vm/amd64/CallDescrWorkerAMD64.asm
@@ -0,0 +1,133 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+include <AsmMacros.inc>
+include <AsmConstants.inc>
+
+extern CallDescrWorkerUnwindFrameChainHandler:proc
+
+;;
+;; EXTERN_C void FastCallFinalizeWorker(Object *obj, PCODE funcPtr);
+;;
+ NESTED_ENTRY FastCallFinalizeWorker, _TEXT, CallDescrWorkerUnwindFrameChainHandler
+ alloc_stack 28h ;; alloc callee scratch and align the stack
+ END_PROLOGUE
+
+ ;
+ ; RCX: already contains obj*
+ ; RDX: address of finalizer method to call
+ ;
+
+ ; !!!!!!!!!
+ ; NOTE: you cannot tail call here because we must have the CallDescrWorkerUnwindFrameChainHandler
+ ; personality routine on the stack.
+ ; !!!!!!!!!
+ call rdx
+ xor rax, rax
+
+ ; epilog
+ add rsp, 28h
+ ret
+
+
+ NESTED_END FastCallFinalizeWorker, _TEXT
+
+;;extern "C" void CallDescrWorkerInternal(CallDescrData * pCallDescrData);
+
+ NESTED_ENTRY CallDescrWorkerInternal, _TEXT, CallDescrWorkerUnwindFrameChainHandler
+
+ push_nonvol_reg rbx ; save nonvolatile registers
+ push_nonvol_reg rsi ;
+ push_nonvol_reg rbp ;
+ set_frame rbp, 0 ; set frame pointer
+
+ END_PROLOGUE
+
+ mov rbx, rcx ; save pCallDescrData in rbx
+
+ mov ecx, dword ptr [rbx + CallDescrData__numStackSlots]
+
+ test ecx, 1
+ jz StackAligned
+ push rax
+StackAligned:
+
+ mov rsi, [rbx + CallDescrData__pSrc] ; set source argument list address
+ lea rsi, [rsi + 8 * rcx]
+
+StackCopyLoop: ; copy the arguments to stack top-down to carefully probe for sufficient stack space
+ sub rsi, 8
+ push qword ptr [rsi]
+ dec ecx
+ jnz StackCopyLoop
+
+ ;
+ ; N.B. All four argument registers are loaded regardless of the actual number
+ ; of arguments.
+ ;
+
+ mov rax, [rbx + CallDescrData__dwRegTypeMap] ; save the reg (arg) type map
+
+ mov rcx, 0[rsp] ; load first four argument registers
+ movss xmm0, real4 ptr 0[rsp] ;
+ cmp al, ASM_ELEMENT_TYPE_R8 ;
+ jnz Arg2 ;
+ movsd xmm0, real8 ptr 0[rsp] ;
+Arg2:
+ mov rdx, 8[rsp] ;
+ movss xmm1, real4 ptr 8[rsp] ;
+ cmp ah, ASM_ELEMENT_TYPE_R8 ;
+ jnz Arg3 ;
+ movsd xmm1, real8 ptr 8[rsp] ;
+Arg3:
+ mov r8, 10h[rsp] ;
+ movss xmm2, real4 ptr 10h[rsp];
+ shr eax, 16 ;
+ cmp al, ASM_ELEMENT_TYPE_R8 ;
+ jnz Arg4 ;
+ movsd xmm2, real8 ptr 10h[rsp];
+Arg4:
+ mov r9, 18h[rsp] ;
+ movss xmm3, real4 ptr 18h[rsp];
+ cmp ah, ASM_ELEMENT_TYPE_R8 ;
+ jnz DoCall ;
+ movsd xmm3, real8 ptr 18h[rsp];
+DoCall:
+ call qword ptr [rbx+CallDescrData__pTarget] ; call target function
+
+ ; Save FP return value
+
+ mov ecx, dword ptr [rbx+CallDescrData__fpReturnSize]
+ test ecx, ecx
+ jz ReturnsInt
+
+ cmp ecx, 4
+ je ReturnsFloat
+ cmp ecx, 8
+ je ReturnsDouble
+ ; unexpected
+ jmp Epilog
+
+ReturnsInt:
+ mov [rbx+CallDescrData__returnValue], rax
+
+Epilog:
+ lea rsp, 0[rbp] ; deallocate argument list
+ pop rbp ; restore nonvolatile register
+ pop rsi ;
+ pop rbx ;
+ ret
+
+ReturnsFloat:
+ movss real4 ptr [rbx+CallDescrData__returnValue], xmm0
+ jmp Epilog
+
+ReturnsDouble:
+ movsd real8 ptr [rbx+CallDescrData__returnValue], xmm0
+ jmp Epilog
+
+ NESTED_END CallDescrWorkerInternal, _TEXT
+
+ end
diff --git a/src/vm/amd64/ComCallPreStub.asm b/src/vm/amd64/ComCallPreStub.asm
new file mode 100644
index 0000000000..bb582c5363
--- /dev/null
+++ b/src/vm/amd64/ComCallPreStub.asm
@@ -0,0 +1,159 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;
+
+;
+; ==--==
+
+ifdef FEATURE_COMINTEROP
+
+include AsmMacros.inc
+include asmconstants.inc
+
+; extern "C" const BYTE* ComPreStubWorker(ComPrestubMethodFrame *pPFrame, UINT64 *pErrorResult)
+extern ComPreStubWorker:proc
+extern JIT_FailFast:proc
+extern s_gsCookie:qword
+
+
+; extern "C" VOID ComCallPreStub()
+NESTED_ENTRY ComCallPreStub, _TEXT
+
+;
+; Stack layout:
+;
+; (stack parameters)
+; ...
+; r9
+; r8
+; rdx
+; rcx
+; ComPrestubMethodFrame::m_ReturnAddress
+; ComPrestubMethodFrame::m_pFuncDesc
+; Frame::m_Next
+; __VFN_table <-- rsp + ComCallPreStub_ComPrestubMethodFrame_OFFSET
+; gsCookie
+; HRESULT <-- rsp + ComCallPreStub_HRESULT_OFFSET
+; (optional padding to qword align xmm save area)
+; xmm3
+; xmm2
+; xmm1
+; xmm0 <-- rsp + ComCallPreStub_XMM_SAVE_OFFSET
+; callee's r9
+; callee's r8
+; callee's rdx
+; callee's rcx
+
+ComCallPreStub_STACK_FRAME_SIZE = 0
+
+; ComPrestubMethodFrame MUST be the highest part of the stack frame,
+; immediately below the return address, so that
+; ComPrestubMethodFrame::m_ReturnAddress and m_FuncDesc are in the right place.
+ComCallPreStub_STACK_FRAME_SIZE = ComCallPreStub_STACK_FRAME_SIZE + SIZEOF__ComPrestubMethodFrame - 8
+ComCallPreStub_ComPrestubMethodFrame_NEGOFFSET = ComCallPreStub_STACK_FRAME_SIZE
+
+; CalleeSavedRegisters MUST be immediately below ComPrestubMethodFrame
+ComCallPreStub_STACK_FRAME_SIZE = ComCallPreStub_STACK_FRAME_SIZE + 8*8
+ComCallPreStub_CalleeSavedRegisters_NEGOFFSET = ComCallPreStub_STACK_FRAME_SIZE
+
+; GSCookie MUST be immediately below CalleeSavedRegisters
+ComCallPreStub_STACK_FRAME_SIZE = ComCallPreStub_STACK_FRAME_SIZE + SIZEOF_GSCookie
+
+; UINT64 (out param to ComPreStubWorker)
+ComCallPreStub_STACK_FRAME_SIZE = ComCallPreStub_STACK_FRAME_SIZE + 8
+ComCallPreStub_ERRORRETVAL_NEGOFFSET = ComCallPreStub_STACK_FRAME_SIZE
+
+; Ensure that the offset of the XMM save area will be 16-byte aligned.
+if ((ComCallPreStub_STACK_FRAME_SIZE + SIZEOF__Frame + 8) mod 16) ne 0
+ComCallPreStub_STACK_FRAME_SIZE = ComCallPreStub_STACK_FRAME_SIZE + 8
+endif
+
+; FP parameters (xmm0-xmm3)
+ComCallPreStub_STACK_FRAME_SIZE = ComCallPreStub_STACK_FRAME_SIZE + 40h
+ComCallPreStub_XMM_SAVE_NEGOFFSET = ComCallPreStub_STACK_FRAME_SIZE
+
+; Callee scratch area
+ComCallPreStub_STACK_FRAME_SIZE = ComCallPreStub_STACK_FRAME_SIZE + 20h
+
+; Now we have the full size of the stack frame. The offsets have been computed relative to the
+; top, so negate them to make them relative to the post-prologue rsp.
+ComCallPreStub_ComPrestubMethodFrame_OFFSET = ComCallPreStub_STACK_FRAME_SIZE - ComCallPreStub_ComPrestubMethodFrame_NEGOFFSET
+OFFSETOF_GSCookie = ComCallPreStub_ComPrestubMethodFrame_OFFSET - SIZEOF_GSCookie
+ComCallPreStub_ERRORRETVAL_OFFSET = ComCallPreStub_STACK_FRAME_SIZE - ComCallPreStub_ERRORRETVAL_NEGOFFSET
+ComCallPreStub_XMM_SAVE_OFFSET = ComCallPreStub_STACK_FRAME_SIZE - ComCallPreStub_XMM_SAVE_NEGOFFSET
+
+ .allocstack 8 ; ComPrestubMethodFrame::m_pFuncDesc, pushed by prepad
+
+ alloc_stack SIZEOF__ComPrestubMethodFrame - 2*8
+
+ ;
+ ; Save ComPrestubMethodFrame* to pass to ComPreStubWorker
+ ;
+ mov r10, rsp
+
+ ;
+ ; Allocate callee scratch area and save FP parameters
+ ;
+ alloc_stack ComCallPreStub_ComPrestubMethodFrame_OFFSET
+
+ ;
+ ; Save argument registers
+ ;
+ SAVE_ARGUMENT_REGISTERS ComCallPreStub_STACK_FRAME_SIZE + 8h
+
+ ;
+ ; spill the fp args
+ ;
+ SAVE_FLOAT_ARGUMENT_REGISTERS ComCallPreStub_XMM_SAVE_OFFSET
+
+ END_PROLOGUE
+
+ mov rcx, s_gsCookie
+ mov [rsp + OFFSETOF_GSCookie], rcx
+ ;
+ ; Resolve target.
+ ;
+ mov rcx, r10
+ lea rdx, [rsp + ComCallPreStub_ERRORRETVAL_OFFSET]
+ call ComPreStubWorker
+ test rax, rax
+ jz ExitError
+
+ifdef _DEBUG
+ mov rcx, s_gsCookie
+ cmp [rsp + OFFSETOF_GSCookie], rcx
+ je GoodGSCookie
+ call JIT_FailFast
+GoodGSCookie:
+endif ; _DEBUG
+
+ ;
+ ; Restore FP parameters
+ ;
+ RESTORE_FLOAT_ARGUMENT_REGISTERS ComCallPreStub_XMM_SAVE_OFFSET
+
+ ;
+ ; Restore integer parameters
+ ;
+ RESTORE_ARGUMENT_REGISTERS ComCallPreStub_STACK_FRAME_SIZE + 8h
+
+ add rsp, ComCallPreStub_ComPrestubMethodFrame_OFFSET + SIZEOF__ComPrestubMethodFrame - 8
+
+ TAILJMP_RAX
+
+ExitError:
+ mov rax, [rsp + ComCallPreStub_ERRORRETVAL_OFFSET]
+ add rsp, ComCallPreStub_ComPrestubMethodFrame_OFFSET + SIZEOF__ComPrestubMethodFrame - 8
+
+ ret
+
+NESTED_END ComCallPreStub, _TEXT
+
+endif ; FEATURE_COMINTEROP
+
+ end
+
diff --git a/src/vm/amd64/CrtHelpers.asm b/src/vm/amd64/CrtHelpers.asm
new file mode 100644
index 0000000000..4c4361ce77
--- /dev/null
+++ b/src/vm/amd64/CrtHelpers.asm
@@ -0,0 +1,528 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;
+
+;
+; ==--==
+; ***********************************************************************
+; File: CrtHelpers.asm, see history in asmhelpers.asm
+;
+; ***********************************************************************
+
+include AsmMacros.inc
+include asmconstants.inc
+
+; JIT_MemSet/JIT_MemCpy
+;
+; It is IMPORANT that the exception handling code is able to find these guys
+; on the stack, but to keep them from being tailcalled by VC++ we need to turn
+; off optimization and it ends up being a wasteful implementation.
+;
+; Hence these assembly helpers.
+;
+
+
+;***
+;memset.asm - set a section of memory to all one byte
+;
+; Copyright (c) 1985-2001, Microsoft Corporation. All rights reserved.
+;
+;
+;*******************************************************************************
+
+;***
+;char *memset(dst, value, count) - sets "count" bytes at "dst" to "value"
+;
+;Purpose:
+; Sets the first "count" bytes of the memory starting
+; at "dst" to the character value "value".
+;
+; Algorithm:
+; char *
+; memset (dst, value, count)
+; char *dst;
+; char value;
+; unsigned int count;
+; {
+; char *start = dst;
+;
+; while (count--)
+; *dst++ = value;
+; return(start);
+; }
+;
+;Entry:
+; char *dst - pointer to memory to fill with value
+; char value - value to put in dst bytes
+; int count - number of bytes of dst to fill
+;
+;Exit:
+; returns dst, with filled bytes
+;
+;Uses:
+;
+;Exceptions:
+;
+;*******************************************************************************
+
+CACHE_LIMIT_MEMSET equ 070000h ; limit for nontemporal fill
+
+LEAF_ENTRY JIT_MemSet, _TEXT
+
+ mov rax, rcx ; save destination address
+ cmp r8, 8 ; check if 8 bytes to fill
+ jb short mset40 ; if b, less than 8 bytes to fill
+ movzx edx, dl ; set fill pattern
+ mov r9, 0101010101010101h ; replicate fill over 8 bytes
+ imul rdx, r9 ;
+ cmp r8, 64 ; check if 64 bytes to fill
+ jb short mset20 ; if b, less than 64 bytes
+
+;
+; Large block - fill alignment bytes.
+;
+
+mset00: neg rcx ; compute bytes to alignment
+ and ecx, 7 ;
+ jz short mset10 ; if z, no alignment required
+ sub r8, rcx ; adjust remaining bytes by alignment
+ mov [rax], rdx ; fill alignment bytes
+mset10: add rcx, rax ; compute aligned destination address
+
+;
+; Attempt to fill 64-byte blocks
+;
+
+ mov r9, r8 ; copy count of bytes remaining
+ and r8, 63 ; compute remaining byte count
+ shr r9, 6 ; compute number of 64-byte blocks
+ test r9, r9 ; remove partial flag stall caused by shr
+ jnz short mset70 ; if nz, 64-byte blocks to fill
+
+;
+; Fill 8-byte bytes.
+;
+
+mset20: mov r9, r8 ; copy count of bytes remaining
+ and r8, 7 ; compute remaining byte count
+ shr r9, 3 ; compute number of 8-byte blocks
+ test r9, r9 ; remove partial flag stall caused by shr
+ jz short mset40 ; if z, no 8-byte blocks
+
+ align ; simpler way to align instrucitons
+
+mset30: mov [rcx], rdx ; fill 8-byte blocks
+ add rcx, 8 ; advance to next 8-byte block
+ dec r9 ; decrement loop count
+ jnz short mset30 ; if nz, more 8-byte blocks
+
+;
+; Fill residual bytes.
+;
+
+mset40: test r8, r8 ; test if any bytes to fill
+ jz short mset60 ; if z, no bytes to fill
+mset50: mov [rcx], dl ; fill byte
+ inc rcx ; advance to next byte
+ dec r8 ; decrement loop count
+ jnz short mset50 ; if nz, more bytes to fill
+mset60:
+ ; for some reason the assembler doesn't like the REPRET macro on the same line as a label
+ REPRET ; return
+
+;
+; Fill 64-byte blocks.
+;
+
+ align 16
+
+ db 066h, 066h, 066h, 090h
+ db 066h, 066h, 090h
+
+mset70: cmp r9, CACHE_LIMIT_MEMSET / 64 ; check if large fill
+ jae short mset90 ; if ae, large fill
+mset80: mov [rcx], rdx ; fill 64-byte block
+ mov 8[rcx], rdx ;
+ mov 16[rcx], rdx ;
+ add rcx, 64 ; advance to next block
+ mov (24 - 64)[rcx], rdx ;
+ mov (32 - 64)[rcx], rdx ;
+ dec r9 ; decrement loop count
+ mov (40 - 64)[rcx], rdx ;
+ mov (48 - 64)[rcx], rdx ;
+ mov (56 - 64)[rcx], rdx ;
+ jnz short mset80 ; if nz, more 64-byte blocks
+ jmp short mset20 ; finish in common code
+
+;
+; Fill 64-byte blocks nontemporal.
+;
+
+ align
+
+mset90: movnti [rcx], rdx ; fill 64-byte block
+ movnti 8[rcx], rdx ;
+ movnti 16[rcx], rdx ;
+ add rcx, 64 ; advance to next block
+ movnti (24 - 64)[rcx], rdx ;
+ movnti (32 - 64)[rcx], rdx ;
+ dec r9 ; decrement loop count
+ movnti (40 - 64)[rcx], rdx ;
+ movnti (48 - 64)[rcx], rdx ;
+ movnti (56 - 64)[rcx], rdx ;
+ jnz short mset90 ; if nz, move 64-byte blocks
+ lock or byte ptr [rsp], 0 ; flush data to memory
+ jmp mset20 ; finish in common code
+
+LEAF_END_MARKED JIT_MemSet, _TEXT
+
+;*******************************************************************************
+; This ensures that atomic updates of aligned fields will stay atomic.
+;***
+;JIT_MemCpy - Copy source buffer to destination buffer
+;
+;Purpose:
+;JIT_MemCpy - Copy source buffer to destination buffer
+;
+;Purpose:
+; JIT_MemCpy() copies a source memory buffer to a destination memory
+; buffer. This routine recognize overlapping buffers to avoid propogation.
+; For cases where propogation is not a problem, memcpy() can be used.
+;
+;Entry:
+; void *dst = pointer to destination buffer
+; const void *src = pointer to source buffer
+; size_t count = number of bytes to copy
+;
+;Exit:
+; Returns a pointer to the destination buffer in AX/DX:AX
+;
+;Uses:
+; CX, DX
+;
+;Exceptions:
+;*******************************************************************************
+; This ensures that atomic updates of aligned fields will stay atomic.
+
+CACHE_LIMIT_MEMMOV equ 040000h ; limit for nontemporal fill
+CACHE_BLOCK equ 01000h ; nontemporal move block size
+
+
+LEAF_ENTRY JIT_MemCpy, _TEXT
+
+ mov r11, rcx ; save destination address
+ sub rdx, rcx ; compute offset to source buffer
+ jb mmov10 ; if b, destination may overlap
+ cmp r8, 8 ; check if 8 bytes to move
+ jb short mcpy40 ; if b, less than 8 bytes to move
+
+;
+; Move alignment bytes.
+;
+
+ test cl, 7 ; test if destination aligned
+ jz short mcpy20 ; if z, destination aligned
+ test cl, 1 ; test if byte move needed
+ jz short mcpy00 ; if z, byte move not needed
+ mov al, [rcx + rdx] ; move byte
+ dec r8 ; decrement byte count
+ mov [rcx], al ;
+ inc rcx ; increment destination address
+mcpy00: test cl, 2 ; test if word move needed
+ jz short mcpy10 ; if z, word move not needed
+ mov ax, [rcx + rdx] ; move word
+ sub r8, 2 ; reduce byte count
+ mov [rcx], ax ;
+ add rcx, 2 ; advance destination address
+mcpy10: test cl, 4 ; test if dword move needed
+ jz short mcpy20 ; if z, dword move not needed
+ mov eax, [rcx + rdx] ; move dword
+ sub r8, 4 ; reduce byte count
+ mov [rcx], eax ;
+ add rcx, 4 ; advance destination address
+
+;
+; Attempt to move 32-byte blocks.
+;
+
+mcpy20: mov r9, r8 ; copy count of bytes remaining
+ shr r9, 5 ; compute number of 32-byte blocks
+ test r9, r9 ; v-liti, remove partial flag stall caused by shr
+ jnz short mcpy60 ; if nz, 32-byte blocks to fill
+
+ align
+;
+; Move 8-byte blocks.
+;
+
+mcpy25: mov r9, r8 ; copy count of bytes remaining
+ shr r9, 3 ; compute number of 8-byte blocks
+ test r9, r9 ; v-liti, remove partial flag stall caused by shr
+ jz short mcpy40 ; if z, no 8-byte blocks
+ align
+
+mcpy30: mov rax, [rcx + rdx] ; move 8-byte blocks
+ mov [rcx], rax ;
+ add rcx, 8 ; advance destination address
+ dec r9 ; decrement loop count
+ jnz short mcpy30 ; if nz, more 8-byte blocks
+ and r8, 7 ; compute remaining byte count
+
+;
+; Test for residual bytes.
+;
+
+mcpy40: test r8, r8 ; test if any bytes to move
+ jnz short mcpy50 ; if nz, residual bytes to move
+ mov rax, r11 ; set destination address
+ ret ;
+
+;
+; Move residual bytes.
+;
+
+ align
+
+mcpy50: mov al, [rcx + rdx] ; move byte
+ mov [rcx], al ;
+ inc rcx ; increment destiantion address
+ dec r8 ; decrement loop count
+ jnz short mcpy50 ; if nz, more bytes to fill
+ mov rax, r11 ; set destination address
+ ret ; return
+
+;
+; Move 32 byte blocks
+;
+
+ align 16
+
+ db 066h, 066h, 066h, 090h
+ db 066h, 066h, 090h
+
+mcpy60: cmp r9, CACHE_LIMIT_MEMMOV / 32 ; check if large move
+ jae short mcpy80 ; if ae, large move
+mcpy70: mov rax, [rcx + rdx] ; move 32-byte block
+ mov r10, 8[rcx + rdx] ;
+ add rcx, 32 ; advance destination address
+ mov (-32)[rcx], rax ;
+ mov (-24)[rcx], r10 ;
+ mov rax, (-16)[rcx + rdx] ;
+ mov r10, (-8)[rcx + rdx] ;
+ dec r9 ;
+ mov (-16)[rcx], rax ;
+ mov (-8)[rcx], r10 ;
+ jnz short mcpy70 ; if nz, more 32-byte blocks
+ and r8, 31 ; compute remaining byte count
+ jmp mcpy25 ;
+
+;
+; Move 64-byte blocks nontemporal.
+;
+
+ align
+
+ db 066h, 090h
+
+mcpy80: cmp rdx, CACHE_BLOCK ; check if cache block spacing
+ jb short mcpy70 ; if b, not cache block spaced
+mcpy81: mov eax, CACHE_BLOCK / 128 ; set loop count
+mcpy85: prefetchnta [rcx + rdx] ; prefetch 128 bytes
+ prefetchnta 64[rcx + rdx] ;
+ add rcx, 128 ; advance source address
+ dec eax ; decrement loop count
+ jnz short mcpy85 ; if nz, more to prefetch
+ sub rcx, CACHE_BLOCK ; reset source address
+ mov eax, CACHE_BLOCK / 64 ; set loop count
+mcpy90: mov r9, [rcx + rdx] ; move 64-byte block
+ mov r10, 8[rcx + rdx] ;
+ movnti [rcx], r9 ;
+ movnti 8[rcx], r10 ;
+ mov r9, 16[rcx + rdx] ;
+ mov r10, 24[rcx + rdx] ;
+ movnti 16[rcx], r9 ;
+ movnti 24[rcx], r10 ;
+ mov r9, 32[rcx + rdx] ;
+ mov r10, 40[rcx + rdx] ;
+ add rcx, 64 ; advance destination address
+ movnti (32 - 64)[rcx], r9 ;
+ movnti (40 - 64)[rcx], r10 ;
+ mov r9, (48 - 64)[rcx + rdx] ;
+ mov r10, (56 - 64)[rcx + rdx] ;
+ dec eax ;
+ movnti (48 - 64)[rcx], r9 ;
+ movnti (56 - 64)[rcx], r10 ;
+ jnz short mcpy90 ; if nz, more 32-byte blocks
+ sub r8, CACHE_BLOCK ; reduce remaining length
+ cmp r8, CACHE_BLOCK ; check if cache block remains
+ jae mcpy81 ; if ae, cache block remains
+ lock or byte ptr [rsp], 0 ; flush data to memory
+ jmp mcpy20 ;
+
+;
+; The source address is less than the destination address.
+;
+
+ align
+
+ db 066h, 066h, 066h, 090h
+ db 066h, 066h, 066h, 090h
+ db 066h, 090h
+
+mmov10: add rcx, r8 ; compute ending destination address
+ cmp r8, 8 ; check if 8 bytes to move
+ jb short mmov60 ; if b, less than 8 bytes to move
+
+;
+; Move alignment bytes.
+;
+
+ test cl, 7 ; test if destination aligned
+ jz short mmov30 ; if z, destination aligned
+ test cl, 1 ; test if byte move needed
+ jz short mmov15 ; if z, byte move not needed
+ dec rcx ; decrement destination address
+ mov al, [rcx + rdx] ; move byte
+ dec r8 ; decrement byte count
+ mov [rcx], al ;
+mmov15: test cl, 2 ; test if word move needed
+ jz short mmov20 ; if z, word move not needed
+ sub rcx, 2 ; reduce destination address
+ mov ax, [rcx + rdx] ; move word
+ sub r8, 2 ; reduce byte count
+ mov [rcx], ax ;
+mmov20: test cl, 4 ; test if dword move needed
+ jz short mmov30 ; if z, dword move not needed
+ sub rcx, 4 ; reduce destination address
+ mov eax, [rcx + rdx] ; move dword
+ sub r8, 4 ; reduce byte count
+ mov [rcx], eax ;
+
+;
+; Attempt to move 32-byte blocks
+;
+
+mmov30: mov r9, r8 ; copy count of bytes remaining
+ shr r9, 5 ; compute number of 32-byte blocks
+ test r9, r9 ; v-liti, remove partial flag stall caused by shr
+ jnz short mmov80 ; if nz, 32-byte blocks to fill
+
+;
+; Move 8-byte blocks.
+;
+ align
+
+mmov40: mov r9, r8 ; copy count of bytes remaining
+ shr r9, 3 ; compute number of 8-byte blocks
+ test r9, r9 ; v-liti, remove partial flag stall caused by shr
+ jz short mmov60 ; if z, no 8-byte blocks
+
+ align
+
+mmov50: sub rcx, 8 ; reduce destination address
+ mov rax, [rcx + rdx] ; move 8-byte blocks
+ dec r9 ; decrement loop count
+ mov [rcx], rax ;
+ jnz short mmov50 ; if nz, more 8-byte blocks
+ and r8, 7 ; compute remaining byte count
+
+;
+; Test for residual bytes.
+;
+
+mmov60: test r8, r8 ; test if any bytes to move
+ jnz short mmov70 ; if nz, residual bytes to move
+ mov rax, r11 ; set destination address
+ ret ;
+
+;
+; Move residual bytes.
+;
+
+ align
+
+mmov70: dec rcx ; decrement destination address
+ mov al, [rcx + rdx] ; move byte
+ dec r8 ; decrement loop count
+ mov [rcx], al ;
+ jnz short mmov70 ; if nz, more bytes to fill
+ mov rax, r11 ; set destination address
+ ret ; return
+
+;
+; Move 32 byte blocks
+;
+
+ align 16
+
+ db 066h, 066h, 066h, 090h
+ db 066h, 066h, 090h
+
+mmov80: cmp r9, CACHE_LIMIT_MEMMOV / 32 ; check if large move
+ jae short mmov93 ; if ae, large move
+mmov90: mov rax, (-8)[rcx + rdx] ; move 32-byte block
+ mov r10, (-16)[rcx + rdx] ;
+ sub rcx, 32 ; reduce destination address
+ mov 24[rcx], rax ;
+ mov 16[rcx], r10 ;
+ mov rax, 8[rcx + rdx] ;
+ mov r10, [rcx + rdx] ;
+ dec r9 ;
+ mov 8[rcx], rax ;
+ mov [rcx], r10 ;
+ jnz short mmov90 ; if nz, more 32-byte blocks
+ and r8, 31 ; compute remaining byte count
+ jmp mmov40 ;
+
+;
+; Move 64-byte blocks nontemporal.
+;
+
+ align
+
+ db 066h, 090h
+
+mmov93: cmp rdx, -CACHE_BLOCK ; check if cache block spacing
+ ja short mmov90 ; if a, not cache block spaced
+mmov94: mov eax, CACHE_BLOCK / 128 ; set loop count
+mmov95: sub rcx, 128 ; reduce destination address
+ prefetchnta [rcx + rdx] ; prefetch 128 bytes
+ prefetchnta 64[rcx + rdx] ;
+ dec eax ; decrement loop count
+ jnz short mmov95 ; if nz, more to prefetch
+ add rcx, CACHE_BLOCK ; reset source address
+ mov eax, CACHE_BLOCK / 64 ; set loop count
+mmov97: mov r9, (-8)[rcx + rdx] ; move 64-byte block
+ mov r10, (-16)[rcx + rdx] ;
+ movnti (-8)[rcx], r9 ;
+ movnti (-16)[rcx], r10 ;
+ mov r9, (-24)[rcx + rdx] ;
+ mov r10, (-32)[rcx + rdx] ;
+ movnti (-24)[rcx], r9 ;
+ movnti (-32)[rcx], r10 ;
+ mov r9, (-40)[rcx + rdx] ;
+ mov r10, (-48)[rcx + rdx] ;
+ sub rcx, 64 ; reduce destination address
+ movnti (64 - 40)[rcx], r9 ;
+ movnti (64 - 48)[rcx], r10 ;
+ mov r9, (64 - 56)[rcx + rdx] ;
+ mov r10, (64 - 64)[rcx + rdx] ;
+ dec eax ; decrement loop count
+ movnti (64 - 56)[rcx], r9 ;
+ movnti (64 - 64)[rcx], r10 ;
+ jnz short mmov97 ; if nz, more 32-byte blocks
+ sub r8, CACHE_BLOCK ; reduce remaining length
+ cmp r8, CACHE_BLOCK ; check if cache block remains
+ jae mmov94 ; if ae, cache block remains
+ lock or byte ptr [rsp], 0 ; flush data to memory
+ jmp mmov30 ;
+
+LEAF_END_MARKED JIT_MemCpy, _TEXT
+
+
+ end
+
diff --git a/src/vm/amd64/ExternalMethodFixupThunk.asm b/src/vm/amd64/ExternalMethodFixupThunk.asm
new file mode 100644
index 0000000000..f3ed9473d8
--- /dev/null
+++ b/src/vm/amd64/ExternalMethodFixupThunk.asm
@@ -0,0 +1,109 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+include <AsmMacros.inc>
+include AsmConstants.inc
+
+ extern ExternalMethodFixupWorker:proc
+ extern ProcessCLRException:proc
+ extern VirtualMethodFixupWorker:proc
+
+ifdef FEATURE_READYTORUN
+ extern DynamicHelperWorker:proc
+endif
+
+;============================================================================================
+;; EXTERN_C VOID __stdcall ExternalMethodFixupStub()
+
+NESTED_ENTRY ExternalMethodFixupStub, _TEXT, ProcessCLRException
+
+ PROLOG_WITH_TRANSITION_BLOCK 0, 8, rdx
+
+ lea rcx, [rsp + __PWTB_TransitionBlock] ; pTransitionBlock
+ sub rdx, 5 ; pThunk
+ mov r8, 0 ; sectionIndex
+ mov r9, 0 ; pModule
+
+ call ExternalMethodFixupWorker
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+PATCH_LABEL ExternalMethodFixupPatchLabel
+ TAILJMP_RAX
+
+NESTED_END ExternalMethodFixupStub, _TEXT
+
+
+ifdef FEATURE_READYTORUN
+
+NESTED_ENTRY DelayLoad_MethodCall, _TEXT
+
+ PROLOG_WITH_TRANSITION_BLOCK 0, 10h, r8, r9
+
+ lea rcx, [rsp + __PWTB_TransitionBlock] ; pTransitionBlock
+ mov rdx, rax ; pIndirection
+
+ call ExternalMethodFixupWorker
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+
+ ; Share the patch label
+ jmp ExternalMethodFixupPatchLabel
+
+NESTED_END DelayLoad_MethodCall, _TEXT
+
+;============================================================================================
+
+DYNAMICHELPER macro frameFlags, suffix
+
+NESTED_ENTRY DelayLoad_Helper&suffix, _TEXT
+
+ PROLOG_WITH_TRANSITION_BLOCK 8h, 10h, r8, r9
+
+ mov rcx, frameFlags
+ mov [rsp], rcx
+ lea rcx, [rsp + __PWTB_TransitionBlock] ; pTransitionBlock
+ mov rdx, rax ; pIndirection
+
+ call DynamicHelperWorker
+
+ test rax,rax
+ jnz @F
+
+ mov rax, [rsp + __PWTB_ArgumentRegisters] ; The result is stored in the argument area of the transition block
+
+ EPILOG_WITH_TRANSITION_BLOCK_RETURN
+
+@@:
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+ TAILJMP_RAX
+
+NESTED_END DelayLoad_Helper&suffix, _TEXT
+
+ endm
+
+DYNAMICHELPER DynamicHelperFrameFlags_Default
+DYNAMICHELPER DynamicHelperFrameFlags_ObjectArg, _Obj
+DYNAMICHELPER <DynamicHelperFrameFlags_ObjectArg OR DynamicHelperFrameFlags_ObjectArg2>, _ObjObj
+
+endif ; FEATURE_READYTORUN
+
+;============================================================================================
+;; EXTERN_C VOID __stdcall VirtualMethodFixupStub()
+
+NESTED_ENTRY VirtualMethodFixupStub, _TEXT, ProcessCLRException
+
+ PROLOG_WITH_TRANSITION_BLOCK 0, 8, rdx
+
+ lea rcx, [rsp + __PWTB_TransitionBlock] ; pTransitionBlock
+ sub rdx, 5 ; pThunk
+ call VirtualMethodFixupWorker
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+PATCH_LABEL VirtualMethodFixupPatchLabel
+ TAILJMP_RAX
+
+NESTED_END VirtualMethodFixupStub, _TEXT
+
+ end
diff --git a/src/vm/amd64/GenericComCallStubs.asm b/src/vm/amd64/GenericComCallStubs.asm
new file mode 100644
index 0000000000..d102a7105f
--- /dev/null
+++ b/src/vm/amd64/GenericComCallStubs.asm
@@ -0,0 +1,305 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;
+
+;
+; ==--==
+
+ifdef FEATURE_COMINTEROP
+
+include AsmMacros.inc
+include asmconstants.inc
+
+extern CallDescrWorkerUnwindFrameChainHandler:proc
+extern ReverseComUnwindFrameChainHandler:proc
+extern COMToCLRWorker:proc
+extern JIT_FailFast:proc
+extern s_gsCookie:qword
+
+
+NESTED_ENTRY GenericComCallStub, _TEXT, ReverseComUnwindFrameChainHandler
+
+;
+; Set up a ComMethodFrame and call COMToCLRWorker.
+;
+; Stack frame layout:
+;
+; (stack parameters)
+; ...
+; r9
+; r8
+; rdx
+; rcx
+; UnmanagedToManagedFrame::m_ReturnAddress
+; UnmanagedToManagedFrame::m_Datum
+; Frame::m_Next
+; __VFN_table <-- rsp + GenericComCallStub_ComMethodFrame_OFFSET
+; GSCookie
+; (optional padding to qword align xmm save area)
+; xmm3
+; xmm2
+; xmm1
+; xmm0 <-- rsp + GenericComCallStub_XMM_SAVE_OFFSET
+; r12
+; r13
+; r14
+; (optional padding to qword align rsp)
+; callee's r9
+; callee's r8
+; callee's rdx
+; callee's rcx
+
+GenericComCallStub_STACK_FRAME_SIZE = 0
+
+; ComMethodFrame MUST be the highest part of the stack frame, immediately
+; below the return address and MethodDesc*, so that
+; UnmanagedToManagedFrame::m_ReturnAddress and
+; UnmanagedToManagedFrame::m_Datum are the right place.
+GenericComCallStub_STACK_FRAME_SIZE = GenericComCallStub_STACK_FRAME_SIZE + (SIZEOF__ComMethodFrame - 8)
+GenericComCallStub_ComMethodFrame_NEGOFFSET = GenericComCallStub_STACK_FRAME_SIZE
+
+GenericComCallStub_STACK_FRAME_SIZE = GenericComCallStub_STACK_FRAME_SIZE + SIZEOF_GSCookie
+
+; Ensure that the offset of the XMM save area will be 16-byte aligned.
+if ((GenericComCallStub_STACK_FRAME_SIZE + 8) MOD 16) ne 0
+GenericComCallStub_STACK_FRAME_SIZE = GenericComCallStub_STACK_FRAME_SIZE + 8
+endif
+
+; XMM save area MUST be immediately below GenericComCallStub
+; (w/ alignment padding)
+GenericComCallStub_STACK_FRAME_SIZE = GenericComCallStub_STACK_FRAME_SIZE + 4*16
+GenericComCallStub_XMM_SAVE_NEGOFFSET = GenericComCallStub_STACK_FRAME_SIZE
+
+; Add in the callee scratch area size.
+GenericComCallStub_CALLEE_SCRATCH_SIZE = 4*8
+GenericComCallStub_STACK_FRAME_SIZE = GenericComCallStub_STACK_FRAME_SIZE + GenericComCallStub_CALLEE_SCRATCH_SIZE
+
+; Now we have the full size of the stack frame. The offsets have been computed relative to the
+; top, so negate them to make them relative to the post-prologue rsp.
+GenericComCallStub_ComMethodFrame_OFFSET = GenericComCallStub_STACK_FRAME_SIZE - GenericComCallStub_ComMethodFrame_NEGOFFSET
+GenericComCallStub_XMM_SAVE_OFFSET = GenericComCallStub_STACK_FRAME_SIZE - GenericComCallStub_XMM_SAVE_NEGOFFSET
+OFFSETOF_GSCookie = GenericComCallStub_ComMethodFrame_OFFSET - SIZEOF_GSCookie
+
+ .allocstack 8 ; UnmanagedToManagedFrame::m_Datum, pushed by prepad
+
+ ;
+ ; Allocate the remainder of the ComMethodFrame. The fields
+ ; will be filled in by COMToCLRWorker
+ ;
+ alloc_stack SIZEOF__ComMethodFrame - 10h
+
+ ;
+ ; Save ComMethodFrame* to pass to COMToCLRWorker
+ ;
+ mov r10, rsp
+
+ alloc_stack GenericComCallStub_ComMethodFrame_OFFSET
+
+ ;
+ ; Save argument registers
+ ;
+ SAVE_ARGUMENT_REGISTERS GenericComCallStub_STACK_FRAME_SIZE + 8h
+
+ ;
+ ; spill the fp args
+ ;
+ SAVE_FLOAT_ARGUMENT_REGISTERS GenericComCallStub_XMM_SAVE_OFFSET
+
+ END_PROLOGUE
+
+ mov rcx, s_gsCookie
+ mov [rsp + OFFSETOF_GSCookie], rcx
+
+ ;
+ ; Call COMToCLRWorker. Note that the first parameter (pThread) is
+ ; filled in by callee.
+ ;
+
+ifdef _DEBUG
+ mov rcx, 0cccccccccccccccch
+endif
+ mov rdx, r10
+ call COMToCLRWorker
+
+ifdef _DEBUG
+ mov rcx, s_gsCookie
+ cmp [rsp + OFFSETOF_GSCookie], rcx
+ je GoodGSCookie
+ call JIT_FailFast
+GoodGSCookie:
+endif ; _DEBUG
+
+ ;
+ ; epilogue
+ ;
+ add rsp, GenericComCallStub_STACK_FRAME_SIZE
+ ret
+
+NESTED_END GenericComCallStub, _TEXT
+
+
+; ARG_SLOT COMToCLRDispatchHelperWithStack(DWORD dwStackSlots, // rcx
+; ComMethodFrame *pFrame, // rdx
+; PCODE pTarget, // r8
+; PCODE pSecretArg, // r9
+; INT_PTR pDangerousThis // rbp+40h
+; );
+NESTED_ENTRY COMToCLRDispatchHelperWithStack, _TEXT, CallDescrWorkerUnwindFrameChainHandler
+
+ComMethodFrame_Arguments_OFFSET = SIZEOF__ComMethodFrame
+ComMethodFrame_XMM_SAVE_OFFSET = GenericComCallStub_XMM_SAVE_OFFSET - GenericComCallStub_ComMethodFrame_OFFSET
+
+ push_nonvol_reg rdi ; save nonvolatile registers
+ push_nonvol_reg rsi ;
+ push_nonvol_reg rbp ;
+ set_frame rbp, 0 ; set frame pointer
+
+ END_PROLOGUE
+
+
+ ;
+ ; copy stack
+ ;
+ lea rsi, [rdx + ComMethodFrame_Arguments_OFFSET]
+ add ecx, 4 ; outgoing argument homes
+ mov eax, ecx ; number of stack slots
+ shl eax, 3 ; compute number of argument bytes
+ add eax, 8h ; alignment padding
+ and rax, 0FFFFFFFFFFFFFFf0h ; for proper stack alignment, v-liti remove partial register stall
+ sub rsp, rax ; allocate argument list
+ mov rdi, rsp ; set destination argument list address
+ rep movsq ; copy arguments to the stack
+
+
+ ; Stack layout:
+ ;
+ ; callee's rcx (to be loaded into rcx) <- rbp+40h
+ ; r9 (to be loaded into r10)
+ ; r8 (IL stub entry point)
+ ; rdx (ComMethodFrame ptr)
+ ; rcx (number of stack slots to repush)
+ ; return address
+ ; saved rdi
+ ; saved rsi
+ ; saved rbp <- rbp
+ ; alignment
+ ; (stack parameters)
+ ; callee's r9
+ ; callee's r8
+ ; callee's rdx
+ ; callee's rcx (not loaded into rcx) <- rsp
+
+ ;
+ ; load fp registers
+ ;
+ movdqa xmm0, [rdx + ComMethodFrame_XMM_SAVE_OFFSET + 00h]
+ movdqa xmm1, [rdx + ComMethodFrame_XMM_SAVE_OFFSET + 10h]
+ movdqa xmm2, [rdx + ComMethodFrame_XMM_SAVE_OFFSET + 20h]
+ movdqa xmm3, [rdx + ComMethodFrame_XMM_SAVE_OFFSET + 30h]
+
+ ;
+ ; load secret arg and target
+ ;
+ mov r10, r9
+ mov rax, r8
+
+ ;
+ ; load argument registers
+ ;
+ mov rcx, [rbp + 40h] ; ignoring the COM IP at [rsp]
+ mov rdx, [rsp + 08h]
+ mov r8, [rsp + 10h]
+ mov r9, [rsp + 18h]
+
+ ;
+ ; call the target
+ ;
+ call rax
+
+ ; It is important to have an instruction between the previous call and the epilog.
+ ; If the return address is in epilog, OS won't call personality routine because
+ ; it thinks personality routine does not help in this case.
+ nop
+
+ ;
+ ; epilog
+ ;
+ lea rsp, 0[rbp] ; deallocate argument list
+ pop rbp ; restore nonvolatile register
+ pop rsi ;
+ pop rdi ;
+ ret
+
+NESTED_END COMToCLRDispatchHelperWithStack, _TEXT
+
+; ARG_SLOT COMToCLRDispatchHelper(DWORD dwStackSlots, // rcx
+; ComMethodFrame *pFrame, // rdx
+; PCODE pTarget, // r8
+; PCODE pSecretArg, // r9
+; INT_PTR pDangerousThis // rsp + 28h on entry
+; );
+NESTED_ENTRY COMToCLRDispatchHelper, _TEXT, CallDescrWorkerUnwindFrameChainHandler
+
+ ;
+ ; Check to see if we have stack to copy and, if so, tail call to
+ ; the routine that can handle that.
+ ;
+ test ecx, ecx
+ jnz COMToCLRDispatchHelperWithStack
+
+ alloc_stack 28h ; alloc scratch space + alignment, pDangerousThis moves to [rsp+50]
+ END_PROLOGUE
+
+
+ ; get pointer to arguments
+ lea r11, [rdx + ComMethodFrame_Arguments_OFFSET]
+
+ ;
+ ; load fp registers
+ ;
+ movdqa xmm0, [rdx + ComMethodFrame_XMM_SAVE_OFFSET + 00h]
+ movdqa xmm1, [rdx + ComMethodFrame_XMM_SAVE_OFFSET + 10h]
+ movdqa xmm2, [rdx + ComMethodFrame_XMM_SAVE_OFFSET + 20h]
+ movdqa xmm3, [rdx + ComMethodFrame_XMM_SAVE_OFFSET + 30h]
+
+ ;
+ ; load secret arg and target
+ ;
+ mov r10, r9
+ mov rax, r8
+
+ ;
+ ; load argument registers
+ ;
+ mov rcx, [rsp + 50h] ; ignoring the COM IP at [r11 + 00h]
+ mov rdx, [r11 + 08h]
+ mov r8, [r11 + 10h]
+ mov r9, [r11 + 18h]
+
+ ;
+ ; call the target
+ ;
+ call rax
+
+ ; It is important to have an instruction between the previous call and the epilog.
+ ; If the return address is in epilog, OS won't call personality routine because
+ ; it thinks personality routine does not help in this case.
+ nop
+
+ ;
+ ; epilog
+ ;
+ add rsp, 28h
+ ret
+NESTED_END COMToCLRDispatchHelper, _TEXT
+
+
+
+endif ; FEATURE_COMINTEROP
+
+ end
+
diff --git a/src/vm/amd64/GenericComPlusCallStubs.asm b/src/vm/amd64/GenericComPlusCallStubs.asm
new file mode 100644
index 0000000000..9c6bb735fa
--- /dev/null
+++ b/src/vm/amd64/GenericComPlusCallStubs.asm
@@ -0,0 +1,149 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;
+
+;
+; ==--==
+
+ifdef FEATURE_COMINTEROP
+
+include AsmMacros.inc
+include asmconstants.inc
+
+CTPMethodTable__s_pThunkTable equ ?s_pThunkTable@CTPMethodTable@@0PEAVMethodTable@@EA
+InstantiatedMethodDesc__IMD_GetComPlusCallInfo equ ?IMD_GetComPlusCallInfo@InstantiatedMethodDesc@@QEAAPEAUComPlusCallInfo@@XZ
+
+ifdef FEATURE_REMOTING
+extern CRemotingServices__DispatchInterfaceCall:proc
+extern CTPMethodTable__s_pThunkTable:qword
+extern InstantiatedMethodDesc__IMD_GetComPlusCallInfo:proc
+endif
+
+extern CLRToCOMWorker:proc
+extern ProcessCLRException:proc
+
+ifdef FEATURE_REMOTING
+;
+; in:
+; r10: MethodDesc*
+; rcx: 'this' object
+;
+; out:
+; METHODDESC_REGISTER (r10) = MethodDesc* (for IL stubs)
+;
+LEAF_ENTRY GenericComPlusCallStub, _TEXT
+
+ ;
+ ; check for a null 'this' pointer and
+ ; then see if this is a TransparentProxy
+ ;
+
+ test rcx, rcx
+ jz do_com_call
+
+ mov rax, [CTPMethodTable__s_pThunkTable]
+ cmp [rcx], rax
+ jne do_com_call
+
+ ;
+ ; 'this' is a TransparentProxy
+ ;
+ jmp CRemotingServices__DispatchInterfaceCall
+
+do_com_call:
+
+ ;
+ ; Check if the call is being made on an InstantiatedMethodDesc.
+ ;
+
+ mov ax, [r10 + OFFSETOF__MethodDesc__m_wFlags]
+ and ax, MethodDescClassification__mdcClassification
+ cmp ax, MethodDescClassification__mcInstantiated
+ je GenericComPlusCallWorkerInstantiated
+
+ ;
+ ; Check if there is an IL stub.
+ ;
+
+ mov rax, [r10 + OFFSETOF__ComPlusCallMethodDesc__m_pComPlusCallInfo]
+ mov rax, [rax + OFFSETOF__ComPlusCallInfo__m_pILStub]
+ test rax, rax
+ jz GenericComPlusCallStubSlow
+
+ TAILJMP_RAX
+
+LEAF_END GenericComPlusCallStub, _TEXT
+
+; We could inline IMD_GetComPlusCallInfo here but it would be ugly.
+NESTED_ENTRY GenericComPlusCallWorkerInstantiated, _TEXT, ProcessCLRException
+ alloc_stack 68h
+
+ save_reg_postrsp r10, 60h
+
+ SAVE_ARGUMENT_REGISTERS 70h
+
+ SAVE_FLOAT_ARGUMENT_REGISTERS 20h
+
+ END_PROLOGUE
+
+ mov rcx, r10
+ call InstantiatedMethodDesc__IMD_GetComPlusCallInfo
+
+ RESTORE_FLOAT_ARGUMENT_REGISTERS 20h
+
+ RESTORE_ARGUMENT_REGISTERS 70h
+
+ mov r10, [rsp + 60h]
+
+ mov rax, [rax + OFFSETOF__ComPlusCallInfo__m_pILStub]
+
+ add rsp, 68h
+ TAILJMP_RAX
+NESTED_END GenericComPlusCallWorkerInstantiated, _TEXT
+endif
+
+
+ifdef FEATURE_REMOTING
+NESTED_ENTRY GenericComPlusCallStubSlow, _TEXT, ProcessCLRException
+else
+NESTED_ENTRY GenericComPlusCallStub, _TEXT, ProcessCLRException
+endif
+
+ PROLOG_WITH_TRANSITION_BLOCK 8
+
+ ;
+ ; Call CLRToCOMWorker.
+ ;
+ lea rcx, [rsp + __PWTB_TransitionBlock] ; pTransitionBlock
+ mov rdx, r10 ; MethodDesc *
+ call CLRToCOMWorker
+
+ ; handle FP return values
+
+ lea rcx, [rsp + __PWTB_FloatArgumentRegisters - 8]
+ cmp rax, 4
+ jne @F
+ movss xmm0, real4 ptr [rcx]
+@@:
+ cmp rax, 8
+ jne @F
+ movsd xmm0, real8 ptr [rcx]
+@@:
+ ; load return value
+ mov rax, [rcx]
+
+ EPILOG_WITH_TRANSITION_BLOCK_RETURN
+
+ifdef FEATURE_REMOTING
+NESTED_END GenericComPlusCallStubSlow, _TEXT
+else
+NESTED_END GenericComPlusCallStub, _TEXT
+endif
+
+endif ; FEATURE_COMINTEROP
+
+ end
diff --git a/src/vm/amd64/InstantiatingStub.asm b/src/vm/amd64/InstantiatingStub.asm
new file mode 100644
index 0000000000..d0c3c6402c
--- /dev/null
+++ b/src/vm/amd64/InstantiatingStub.asm
@@ -0,0 +1,154 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;
+
+;
+; ==--==
+
+include <AsmMacros.inc>
+include AsmConstants.inc
+
+SHF_GETMETHODFRAMEVPTR equ ?GetMethodFrameVPtr@StubHelperFrame@@SA_KXZ
+
+extern SHF_GETMETHODFRAMEVPTR:proc
+extern JIT_FailFast:proc
+extern s_gsCookie:qword
+
+
+OFFSETOF_SECRET_PARAMS equ 0h
+OFFSETOF_GSCOOKIE equ OFFSETOF_SECRET_PARAMS + \
+ 18h + 8h ; +8 for stack alignment padding
+OFFSETOF_FRAME equ OFFSETOF_GSCOOKIE + \
+ 8h
+OFFSETOF_FRAME_REGISTERS equ OFFSETOF_FRAME + \
+ SIZEOF__Frame
+SIZEOF_FIXED_FRAME equ OFFSETOF_FRAME_REGISTERS + \
+ SIZEOF_CalleeSavedRegisters + 8h ; +8 for return address
+
+.errnz SIZEOF_FIXED_FRAME mod 16, SIZEOF_FIXED_FRAME not aligned
+
+;
+; This method takes three secret parameters on the stack:
+;
+; incoming:
+;
+; rsp -> nStackSlots
+; entrypoint of shared MethodDesc
+; extra stack param
+; <space for StubHelperFrame>
+; return address
+; rcx home
+; rdx home
+; :
+;
+;
+; Stack Layout:
+;
+; rsp-> callee scratch
+; + 8h callee scratch
+; +10h callee scratch
+; +18h callee scratch
+; :
+; stack arguments
+; :
+; rbp-> nStackSlots
+; + 8h entrypoint of shared MethodDesc
+; +10h extra stack param
+; +18h padding
+; +20h gsCookie
+; +28h __VFN_table
+; +30h m_Next
+; +38h m_calleeSavedRegisters
+; +98h m_ReturnAddress
+; +a0h rcx home
+; +a8h rdx home
+; +b0h r8 home
+; +b8h r9 home
+;
+NESTED_ENTRY InstantiatingMethodStubWorker, _TEXT
+ .allocstack SIZEOF_FIXED_FRAME - 8h ; -8 for return address
+
+ SAVE_CALLEE_SAVED_REGISTERS OFFSETOF_FRAME_REGISTERS
+
+ SAVE_ARGUMENT_REGISTERS SIZEOF_FIXED_FRAME
+
+ set_frame rbp, 0
+ END_PROLOGUE
+
+ sub rsp, SIZEOF_MAX_OUTGOING_ARGUMENT_HOMES
+
+ ;
+ ; fully initialize the StubHelperFrame
+ ;
+ call SHF_GETMETHODFRAMEVPTR
+ mov [rbp + OFFSETOF_FRAME], rax
+
+ mov rax, s_gsCookie
+ mov [rbp + OFFSETOF_GSCOOKIE], rax
+
+ ;
+ ; link the StubHelperFrame
+ ;
+ CALL_GETTHREAD
+ mov rdx, [rax + OFFSETOF__Thread__m_pFrame]
+ mov [rbp + OFFSETOF_FRAME + OFFSETOF__Frame__m_Next], rdx
+ lea rcx, [rbp + OFFSETOF_FRAME]
+ mov [rax + OFFSETOF__Thread__m_pFrame], rcx
+
+ mov r12, rax ; store the Thread pointer
+
+ add rsp, SIZEOF_MAX_OUTGOING_ARGUMENT_HOMES
+
+ mov rcx, [rbp + OFFSETOF_SECRET_PARAMS + 0h] ; nStackSlots (includes padding for stack alignment)
+
+ lea rsi, [rbp + SIZEOF_FIXED_FRAME + SIZEOF_MAX_OUTGOING_ARGUMENT_HOMES + 8 * rcx]
+
+StackCopyLoop: ; copy the arguments to stack top-down to carefully probe for sufficient stack space
+ sub rsi, 8
+ push qword ptr [rsi]
+ dec rcx
+ jnz StackCopyLoop
+
+ push qword ptr [rbp+OFFSETOF_SECRET_PARAMS + 10h] ; push extra stack arg
+ sub rsp, SIZEOF_MAX_OUTGOING_ARGUMENT_HOMES
+
+ mov rcx, [rbp + SIZEOF_FIXED_FRAME + 00h]
+ mov rdx, [rbp + SIZEOF_FIXED_FRAME + 08h]
+ mov r8, [rbp + SIZEOF_FIXED_FRAME + 10h]
+ mov r9, [rbp + SIZEOF_FIXED_FRAME + 18h]
+
+ call qword ptr [rbp+OFFSETOF_SECRET_PARAMS + 8h] ; call target
+
+ifdef _DEBUG
+ mov rcx, s_gsCookie
+ cmp [rbp + OFFSETOF_GSCookie], rcx
+ je GoodGSCookie
+ call JIT_FailFast
+GoodGSCookie:
+endif ; _DEBUG
+
+ ;
+ ; unlink the StubHelperFrame
+ ;
+ mov rcx, [rbp + OFFSETOF_FRAME + OFFSETOF__Frame__m_Next]
+ mov [r12 + OFFSETOF__Thread__m_pFrame], rcx
+
+ ;
+ ; epilog
+ ;
+
+ lea rsp, [rbp + OFFSETOF_FRAME_REGISTERS]
+
+ POP_CALLEE_SAVED_REGISTERS
+
+ ret
+
+NESTED_END InstantiatingMethodStubWorker, _TEXT
+
+
+ end
+
diff --git a/src/vm/amd64/JitHelpers_Fast.asm b/src/vm/amd64/JitHelpers_Fast.asm
new file mode 100644
index 0000000000..840bd675b6
--- /dev/null
+++ b/src/vm/amd64/JitHelpers_Fast.asm
@@ -0,0 +1,981 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;
+
+;
+; ==--==
+; ***********************************************************************
+; File: JitHelpers_Fast.asm, see jithelp.asm for history
+;
+; Notes: routinues which we believe to be on the hot path for managed
+; code in most scenarios.
+; ***********************************************************************
+
+
+include AsmMacros.inc
+include asmconstants.inc
+
+; Min amount of stack space that a nested function should allocate.
+MIN_SIZE equ 28h
+
+EXTERN g_ephemeral_low:QWORD
+EXTERN g_ephemeral_high:QWORD
+EXTERN g_lowest_address:QWORD
+EXTERN g_highest_address:QWORD
+EXTERN g_card_table:QWORD
+
+ifdef WRITE_BARRIER_CHECK
+; Those global variables are always defined, but should be 0 for Server GC
+g_GCShadow TEXTEQU <?g_GCShadow@@3PEAEEA>
+g_GCShadowEnd TEXTEQU <?g_GCShadowEnd@@3PEAEEA>
+EXTERN g_GCShadow:QWORD
+EXTERN g_GCShadowEnd:QWORD
+endif
+
+INVALIDGCVALUE equ 0CCCCCCCDh
+
+ifdef _DEBUG
+extern JIT_WriteBarrier_Debug:proc
+endif
+
+extern JIT_InternalThrow:proc
+
+extern JITutil_ChkCastInterface:proc
+extern JITutil_IsInstanceOfInterface:proc
+extern JITutil_ChkCastAny:proc
+extern JITutil_IsInstanceOfAny:proc
+
+;EXTERN_C Object* JIT_IsInstanceOfClass(MethodTable* pMT, Object* pObject);
+LEAF_ENTRY JIT_IsInstanceOfClass, _TEXT
+ ; move rdx into rax in case of a match or null
+ mov rax, rdx
+
+ ; check if the instance is null
+ test rdx, rdx
+ je IsNullInst
+
+ ; check is the MethodTable for the instance matches pMT
+ cmp rcx, qword ptr [rdx]
+ jne JIT_IsInstanceOfClass2
+
+ IsNullInst:
+ REPRET
+LEAF_END JIT_IsInstanceOfClass, _TEXT
+
+LEAF_ENTRY JIT_IsInstanceOfClass2, _TEXT
+ ; check if the parent class matches.
+ ; start by putting the MethodTable for the instance in rdx
+ mov rdx, qword ptr [rdx]
+
+ align 16
+ CheckParent:
+ ; NULL parent MethodTable* indicates that we're at the top of the hierarchy
+
+ ; unroll 0
+ mov rdx, qword ptr [rdx + OFFSETOF__MethodTable__m_pParentMethodTable]
+ cmp rcx, rdx
+ je IsInst
+ test rdx, rdx
+ je DoneWithLoop
+
+ ; unroll 1
+ mov rdx, qword ptr [rdx + OFFSETOF__MethodTable__m_pParentMethodTable]
+ cmp rcx, rdx
+ je IsInst
+ test rdx, rdx
+ je DoneWithLoop
+
+ ; unroll 2
+ mov rdx, qword ptr [rdx + OFFSETOF__MethodTable__m_pParentMethodTable]
+ cmp rcx, rdx
+ je IsInst
+ test rdx, rdx
+ je DoneWithLoop
+
+ ; unroll 3
+ mov rdx, qword ptr [rdx + OFFSETOF__MethodTable__m_pParentMethodTable]
+ cmp rcx, rdx
+ je IsInst
+ test rdx, rdx
+ jne CheckParent
+
+ align 16
+ DoneWithLoop:
+if METHODTABLE_EQUIVALENCE_FLAGS gt 0
+ ; check if the instance is a proxy or has type equivalence
+ ; get the MethodTable of the original Object (stored earlier in rax)
+ mov rdx, [rax]
+ test dword ptr [rdx + OFFSETOF__MethodTable__m_dwFlags], METHODTABLE_EQUIVALENCE_FLAGS
+ jne SlowPath
+endif ; METHODTABLE_EQUIVALENCE_FLAGS gt 0
+
+ ; we didn't find a match in the ParentMethodTable hierarchy
+ ; and it isn't a proxy and doesn't have type equivalence, return NULL
+ xor eax, eax
+ ret
+if METHODTABLE_EQUIVALENCE_FLAGS gt 0
+ SlowPath:
+ ; Set up the args to call JITutil_IsInstanceOfAny. Note that rcx already contains
+ ; the MethodTable*
+ mov rdx, rax ; rdx = Object*
+
+ ; Call out to JITutil_IsInstanceOfAny to handle the proxy/equivalence case.
+ jmp JITutil_IsInstanceOfAny
+endif ; METHODTABLE_EQUIVALENCE_FLAGS gt 0
+ ; if it is a null instance then rax is null
+ ; if they match then rax contains the instance
+ align 16
+ IsInst:
+ REPRET
+LEAF_END JIT_IsInstanceOfClass2, _TEXT
+
+; TODO: this is not necessary... we will be calling JIT_ChkCastClass2 all of the time
+; now that the JIT inlines the null check and the exact MT comparison... Or are
+; they only doing it on the IBC hot path??? Look into that. If it will turn out
+; to be cold then put it down at the bottom.
+
+;EXTERN_C Object* JIT_ChkCastClass(MethodTable* pMT, Object* pObject);
+LEAF_ENTRY JIT_ChkCastClass, _TEXT
+ ; check if the instance is null
+ test rdx, rdx
+ je IsNullInst
+
+ ; check if the MethodTable for the instance matches pMT
+ cmp rcx, qword ptr [rdx]
+ jne JIT_ChkCastClassSpecial
+
+ IsNullInst:
+ ; setup the return value for a match or null
+ mov rax, rdx
+ ret
+LEAF_END JIT_ChkCastClass, _TEXT
+
+LEAF_ENTRY JIT_ChkCastClassSpecial, _TEXT
+ ; save off the instance in case it is a proxy, and to setup
+ ; our return value for a match
+ mov rax, rdx
+
+ ; check if the parent class matches.
+ ; start by putting the MethodTable for the instance in rdx
+ mov rdx, qword ptr [rdx]
+ align 16
+ CheckParent:
+ ; NULL parent MethodTable* indicates that we're at the top of the hierarchy
+
+ ; unroll 0
+ mov rdx, qword ptr [rdx + OFFSETOF__MethodTable__m_pParentMethodTable]
+ cmp rcx, rdx
+ je IsInst
+ test rdx, rdx
+ je DoneWithLoop
+
+ ; unroll 1
+ mov rdx, qword ptr [rdx + OFFSETOF__MethodTable__m_pParentMethodTable]
+ cmp rcx, rdx
+ je IsInst
+ test rdx, rdx
+ je DoneWithLoop
+
+ ; unroll 2
+ mov rdx, qword ptr [rdx + OFFSETOF__MethodTable__m_pParentMethodTable]
+ cmp rcx, rdx
+ je IsInst
+ test rdx, rdx
+ je DoneWithLoop
+
+ ; unroll 3
+ mov rdx, qword ptr [rdx + OFFSETOF__MethodTable__m_pParentMethodTable]
+ cmp rcx, rdx
+ je IsInst
+ test rdx, rdx
+ jne CheckParent
+
+ align 16
+ DoneWithLoop:
+ ; Set up the args to call JITutil_ChkCastAny. Note that rcx already contains the MethodTable*
+ mov rdx, rax ; rdx = Object*
+
+ ; Call out to JITutil_ChkCastAny to handle the proxy case and throw a rich
+ ; InvalidCastException in case of failure.
+ jmp JITutil_ChkCastAny
+
+ ; if it is a null instance then rax is null
+ ; if they match then rax contains the instance
+ align 16
+ IsInst:
+ REPRET
+LEAF_END JIT_ChkCastClassSpecial, _TEXT
+
+FIX_INDIRECTION macro Reg
+ifdef FEATURE_PREJIT
+ test Reg, 1
+ jz @F
+ mov Reg, [Reg-1]
+ @@:
+endif
+endm
+
+; PERF TODO: consider prefetching the entire interface map into the cache
+
+; For all bizarre castes this quickly fails and falls back onto the JITutil_IsInstanceOfAny
+; helper, this means that all failure cases take the slow path as well.
+;
+; This can trash r10/r11
+LEAF_ENTRY JIT_IsInstanceOfInterface, _TEXT
+ test rdx, rdx
+ jz IsNullInst
+
+ ; get methodtable
+ mov rax, [rdx]
+ mov r11w, word ptr [rax + OFFSETOF__MethodTable__m_wNumInterfaces]
+
+ test r11w, r11w
+ jz DoBizarre
+
+ ; fetch interface map ptr
+ mov rax, [rax + OFFSETOF__MethodTable__m_pInterfaceMap]
+
+ ; r11 holds number of interfaces
+ ; rax is pointer to beginning of interface map list
+ align 16
+ Top:
+ ; rax -> InterfaceInfo_t* into the interface map, aligned to 4 entries
+ ; use offsets of SIZEOF__InterfaceInfo_t to get at entry 1, 2, 3 in this
+ ; block. If we make it through the full 4 without a hit we'll move to
+ ; the next block of 4 and try again.
+
+ ; unroll 0
+ifdef FEATURE_PREJIT
+ mov r10, [rax + OFFSETOF__InterfaceInfo_t__m_pMethodTable]
+ FIX_INDIRECTION r10
+ cmp rcx, r10
+else
+ cmp rcx, [rax + OFFSETOF__InterfaceInfo_t__m_pMethodTable]
+endif
+ je Found
+ ; move to next entry in list
+ dec r11w
+ jz DoBizarre
+
+ ; unroll 1
+ifdef FEATURE_PREJIT
+ mov r10, [rax + SIZEOF__InterfaceInfo_t + OFFSETOF__InterfaceInfo_t__m_pMethodTable]
+ FIX_INDIRECTION r10
+ cmp rcx, r10
+else
+ cmp rcx, [rax + SIZEOF__InterfaceInfo_t + OFFSETOF__InterfaceInfo_t__m_pMethodTable]
+endif
+ je Found
+ ; move to next entry in list
+ dec r11w
+ jz DoBizarre
+
+ ; unroll 2
+ifdef FEATURE_PREJIT
+ mov r10, [rax + 2 * SIZEOF__InterfaceInfo_t + OFFSETOF__InterfaceInfo_t__m_pMethodTable]
+ FIX_INDIRECTION r10
+ cmp rcx, r10
+else
+ cmp rcx, [rax + 2 * SIZEOF__InterfaceInfo_t + OFFSETOF__InterfaceInfo_t__m_pMethodTable]
+endif
+ je Found
+ ; move to next entry in list
+ dec r11w
+ jz DoBizarre
+
+ ; unroll 3
+ifdef FEATURE_PREJIT
+ mov r10, [rax + 3 * SIZEOF__InterfaceInfo_t + OFFSETOF__InterfaceInfo_t__m_pMethodTable]
+ FIX_INDIRECTION r10
+ cmp rcx, r10
+else
+ cmp rcx, [rax + 3 * SIZEOF__InterfaceInfo_t + OFFSETOF__InterfaceInfo_t__m_pMethodTable]
+endif
+ je Found
+ ; move to next entry in list
+ dec r11w
+ jz DoBizarre
+
+ ; if we didn't find the entry in this loop jump to the next 4 entries in the map
+ add rax, 4 * SIZEOF__InterfaceInfo_t
+ jmp Top
+
+ DoBizarre:
+ mov rax, [rdx]
+ test dword ptr [rax + OFFSETOF__MethodTable__m_dwFlags], METHODTABLE_NONTRIVIALINTERFACECAST_FLAGS
+ jnz NonTrivialCast
+ xor rax,rax
+ ret
+
+ align 16
+ Found:
+ IsNullInst:
+ ; return the successful instance
+ mov rax, rdx
+ ret
+
+ NonTrivialCast:
+ jmp JITutil_IsInstanceOfInterface
+LEAF_END JIT_IsInstanceOfInterface, _TEXT
+
+; For all bizarre castes this quickly fails and falls back onto the JITutil_ChkCastInterface
+; helper, this means that all failure cases take the slow path as well.
+;
+; This can trash r10/r11
+LEAF_ENTRY JIT_ChkCastInterface, _TEXT
+ test rdx, rdx
+ jz IsNullInst
+
+ ; get methodtable
+ mov rax, [rdx]
+ mov r11w, word ptr [rax + OFFSETOF__MethodTable__m_wNumInterfaces]
+
+ ; speculatively fetch interface map ptr
+ mov rax, [rax + OFFSETOF__MethodTable__m_pInterfaceMap]
+
+ test r11w, r11w
+ jz DoBizarre
+
+ ; r11 holds number of interfaces
+ ; rax is pointer to beginning of interface map list
+ align 16
+ Top:
+ ; rax -> InterfaceInfo_t* into the interface map, aligned to 4 entries
+ ; use offsets of SIZEOF__InterfaceInfo_t to get at entry 1, 2, 3 in this
+ ; block. If we make it through the full 4 without a hit we'll move to
+ ; the next block of 4 and try again.
+
+ ; unroll 0
+ifdef FEATURE_PREJIT
+ mov r10, [rax + OFFSETOF__InterfaceInfo_t__m_pMethodTable]
+ FIX_INDIRECTION r10
+ cmp rcx, r10
+else
+ cmp rcx, [rax + OFFSETOF__InterfaceInfo_t__m_pMethodTable]
+endif
+ je Found
+ ; move to next entry in list
+ dec r11w
+ jz DoBizarre
+
+ ; unroll 1
+ifdef FEATURE_PREJIT
+ mov r10, [rax + SIZEOF__InterfaceInfo_t + OFFSETOF__InterfaceInfo_t__m_pMethodTable]
+ FIX_INDIRECTION r10
+ cmp rcx, r10
+else
+ cmp rcx, [rax + SIZEOF__InterfaceInfo_t + OFFSETOF__InterfaceInfo_t__m_pMethodTable]
+endif
+ je Found
+ ; move to next entry in list
+ dec r11w
+ jz DoBizarre
+
+ ; unroll 2
+ifdef FEATURE_PREJIT
+ mov r10, [rax + 2 * SIZEOF__InterfaceInfo_t + OFFSETOF__InterfaceInfo_t__m_pMethodTable]
+ FIX_INDIRECTION r10
+ cmp rcx, r10
+else
+ cmp rcx, [rax + 2 * SIZEOF__InterfaceInfo_t + OFFSETOF__InterfaceInfo_t__m_pMethodTable]
+endif
+ je Found
+ ; move to next entry in list
+ dec r11w
+ jz DoBizarre
+
+ ; unroll 3
+ifdef FEATURE_PREJIT
+ mov r10, [rax + 3 * SIZEOF__InterfaceInfo_t + OFFSETOF__InterfaceInfo_t__m_pMethodTable]
+ FIX_INDIRECTION r10
+ cmp rcx, r10
+else
+ cmp rcx, [rax + 3 * SIZEOF__InterfaceInfo_t + OFFSETOF__InterfaceInfo_t__m_pMethodTable]
+endif
+ je Found
+ ; move to next entry in list
+ dec r11w
+ jz DoBizarre
+
+ ; if we didn't find the entry in this loop jump to the next 4 entries in the map
+ add rax, 4 * SIZEOF__InterfaceInfo_t
+ jmp Top
+
+ DoBizarre:
+ jmp JITutil_ChkCastInterface
+
+ align 16
+ Found:
+ IsNullInst:
+ ; return either NULL or the successful instance
+ mov rax, rdx
+ ret
+LEAF_END JIT_ChkCastInterface, _TEXT
+
+; There is an even more optimized version of these helpers possible which takes
+; advantage of knowledge of which way the ephemeral heap is growing to only do 1/2
+; that check (this is more significant in the JIT_WriteBarrier case).
+;
+; Additionally we can look into providing helpers which will take the src/dest from
+; specific registers (like x86) which _could_ (??) make for easier register allocation
+; for the JIT64, however it might lead to having to have some nasty code that treats
+; these guys really special like... :(.
+;
+; Version that does the move, checks whether or not it's in the GC and whether or not
+; it needs to have it's card updated
+;
+; void JIT_CheckedWriteBarrier(Object** dst, Object* src)
+LEAF_ENTRY JIT_CheckedWriteBarrier, _TEXT
+
+ ; When WRITE_BARRIER_CHECK is defined _NotInHeap will write the reference
+ ; but if it isn't then it will just return.
+ ;
+ ; See if this is in GCHeap
+ cmp rcx, [g_lowest_address]
+ jb NotInHeap
+ cmp rcx, [g_highest_address]
+ jnb NotInHeap
+
+ jmp JIT_WriteBarrier
+
+ NotInHeap:
+ ; See comment above about possible AV
+ mov [rcx], rdx
+ ret
+LEAF_END_MARKED JIT_CheckedWriteBarrier, _TEXT
+
+; Mark start of the code region that we patch at runtime
+LEAF_ENTRY JIT_PatchedCodeStart, _TEXT
+ ret
+LEAF_END JIT_PatchedCodeStart, _TEXT
+
+
+; This is used by the mechanism to hold either the JIT_WriteBarrier_PreGrow
+; or JIT_WriteBarrier_PostGrow code (depending on the state of the GC). It _WILL_
+; change at runtime as the GC changes. Initially it should simply be a copy of the
+; larger of the two functions (JIT_WriteBarrier_PostGrow) to ensure we have created
+; enough space to copy that code in.
+LEAF_ENTRY JIT_WriteBarrier, _TEXT
+ align 16
+
+ifdef _DEBUG
+ ; In debug builds, this just contains jump to the debug version of the write barrier by default
+ jmp JIT_WriteBarrier_Debug
+endif
+
+ ; Do the move into the GC . It is correct to take an AV here, the EH code
+ ; figures out that this came from a WriteBarrier and correctly maps it back
+ ; to the managed method which called the WriteBarrier (see setup in
+ ; InitializeExceptionHandling, vm\exceptionhandling.cpp).
+ mov [rcx], rdx
+
+ NOP_3_BYTE ; padding for alignment of constant
+
+ ; Can't compare a 64 bit immediate, so we have to move them into a
+ ; register. Values of these immediates will be patched at runtime.
+ ; By using two registers we can pipeline better. Should we decide to use
+ ; a special non-volatile calling convention, this should be changed to
+ ; just one.
+
+ mov rax, 0F0F0F0F0F0F0F0F0h
+
+ ; Check the lower and upper ephemeral region bounds
+ cmp rdx, rax
+ jb Exit
+
+ nop ; padding for alignment of constant
+
+ mov r8, 0F0F0F0F0F0F0F0F0h
+
+ cmp rdx, r8
+ jae Exit
+
+ nop ; padding for alignment of constant
+
+ mov rax, 0F0F0F0F0F0F0F0F0h
+
+ ; Touch the card table entry, if not already dirty.
+ shr rcx, 0Bh
+ cmp byte ptr [rcx + rax], 0FFh
+ jne UpdateCardTable
+ REPRET
+
+ UpdateCardTable:
+ mov byte ptr [rcx + rax], 0FFh
+ ret
+
+ align 16
+ Exit:
+ REPRET
+ ; make sure this guy is bigger than any of the other guys
+ align 16
+ nop
+LEAF_END_MARKED JIT_WriteBarrier, _TEXT
+
+ifndef FEATURE_IMPLICIT_TLS
+LEAF_ENTRY GetThread, _TEXT
+ ; the default implementation will just jump to one that returns null until
+ ; MakeOptimizedTlsGetter is run which will overwrite this with the actual
+ ; implementation.
+ jmp short GetTLSDummy
+
+ ;
+ ; insert enough NOPS to be able to insert the largest optimized TLS getter
+ ; that we might need, it is important that the TLS getter doesn't overwrite
+ ; into the dummy getter.
+ ;
+ db (TLS_GETTER_MAX_SIZE_ASM - 2) DUP (0CCh)
+
+LEAF_END GetThread, _TEXT
+
+LEAF_ENTRY GetAppDomain, _TEXT
+ ; the default implementation will just jump to one that returns null until
+ ; MakeOptimizedTlsGetter is run which will overwrite this with the actual
+ ; implementation.
+ jmp short GetTLSDummy
+
+ ;
+ ; insert enough NOPS to be able to insert the largest optimized TLS getter
+ ; that we might need, it is important that the TLS getter doesn't overwrite
+ ; into the dummy getter.
+ ;
+ db (TLS_GETTER_MAX_SIZE_ASM - 2) DUP (0CCh)
+
+LEAF_END GetAppDomain, _TEXT
+
+LEAF_ENTRY GetTLSDummy, _TEXT
+ xor rax, rax
+ ret
+LEAF_END GetTLSDummy, _TEXT
+
+LEAF_ENTRY ClrFlsGetBlock, _TEXT
+ ; the default implementation will just jump to one that returns null until
+ ; MakeOptimizedTlsGetter is run which will overwrite this with the actual
+ ; implementation.
+ jmp short GetTLSDummy
+
+ ;
+ ; insert enough NOPS to be able to insert the largest optimized TLS getter
+ ; that we might need, it is important that the TLS getter doesn't overwrite
+ ; into the dummy getter.
+ ;
+ db (TLS_GETTER_MAX_SIZE_ASM - 2) DUP (0CCh)
+
+LEAF_END ClrFlsGetBlock, _TEXT
+endif
+
+; Mark start of the code region that we patch at runtime
+LEAF_ENTRY JIT_PatchedCodeLast, _TEXT
+ ret
+LEAF_END JIT_PatchedCodeLast, _TEXT
+
+; JIT_ByRefWriteBarrier has weird symantics, see usage in StubLinkerX86.cpp
+;
+; Entry:
+; RDI - address of ref-field (assigned to)
+; RSI - address of the data (source)
+; RCX can be trashed
+; Exit:
+; RDI, RSI are incremented by SIZEOF(LPVOID)
+LEAF_ENTRY JIT_ByRefWriteBarrier, _TEXT
+ mov rcx, [rsi]
+
+; If !WRITE_BARRIER_CHECK do the write first, otherwise we might have to do some ShadowGC stuff
+ifndef WRITE_BARRIER_CHECK
+ ; rcx is [rsi]
+ mov [rdi], rcx
+endif
+
+ ; When WRITE_BARRIER_CHECK is defined _NotInHeap will write the reference
+ ; but if it isn't then it will just return.
+ ;
+ ; See if this is in GCHeap
+ cmp rdi, [g_lowest_address]
+ jb NotInHeap
+ cmp rdi, [g_highest_address]
+ jnb NotInHeap
+
+ifdef WRITE_BARRIER_CHECK
+ ; we can only trash rcx in this function so in _DEBUG we need to save
+ ; some scratch registers.
+ push r10
+ push r11
+ push rax
+
+ ; **ALSO update the shadow GC heap if that is enabled**
+ ; Do not perform the work if g_GCShadow is 0
+ cmp g_GCShadow, 0
+ je NoShadow
+
+ ; If we end up outside of the heap don't corrupt random memory
+ mov r10, rdi
+ sub r10, [g_lowest_address]
+ jb NoShadow
+
+ ; Check that our adjusted destination is somewhere in the shadow gc
+ add r10, [g_GCShadow]
+ cmp r10, [g_GCShadowEnd]
+ ja NoShadow
+
+ ; Write ref into real GC
+ mov [rdi], rcx
+ ; Write ref into shadow GC
+ mov [r10], rcx
+
+ ; Ensure that the write to the shadow heap occurs before the read from
+ ; the GC heap so that race conditions are caught by INVALIDGCVALUE
+ mfence
+
+ ; Check that GC/ShadowGC values match
+ mov r11, [rdi]
+ mov rax, [r10]
+ cmp rax, r11
+ je DoneShadow
+ mov r11, INVALIDGCVALUE
+ mov [r10], r11
+
+ jmp DoneShadow
+
+ ; If we don't have a shadow GC we won't have done the write yet
+ NoShadow:
+ mov [rdi], rcx
+
+ ; If we had a shadow GC then we already wrote to the real GC at the same time
+ ; as the shadow GC so we want to jump over the real write immediately above.
+ ; Additionally we know for sure that we are inside the heap and therefore don't
+ ; need to replicate the above checks.
+ DoneShadow:
+ pop rax
+ pop r11
+ pop r10
+endif
+
+ ; See if we can just quick out
+ cmp rcx, [g_ephemeral_low]
+ jb Exit
+ cmp rcx, [g_ephemeral_high]
+ jnb Exit
+
+ ; move current rdi value into rcx and then increment the pointers
+ mov rcx, rdi
+ add rsi, 8h
+ add rdi, 8h
+
+ ; Check if we need to update the card table
+ ; Calc pCardByte
+ shr rcx, 0Bh
+ add rcx, [g_card_table]
+
+ ; Check if this card is dirty
+ cmp byte ptr [rcx], 0FFh
+ jne UpdateCardTable
+ REPRET
+
+ UpdateCardTable:
+ mov byte ptr [rcx], 0FFh
+ ret
+
+ align 16
+ NotInHeap:
+; If WRITE_BARRIER_CHECK then we won't have already done the mov and should do it here
+; If !WRITE_BARRIER_CHECK we want _NotInHeap and _Leave to be the same and have both
+; 16 byte aligned.
+ifdef WRITE_BARRIER_CHECK
+ ; rcx is [rsi]
+ mov [rdi], rcx
+endif
+ Exit:
+ ; Increment the pointers before leaving
+ add rdi, 8h
+ add rsi, 8h
+ ret
+LEAF_END JIT_ByRefWriteBarrier, _TEXT
+
+
+g_pObjectClass equ ?g_pObjectClass@@3PEAVMethodTable@@EA
+
+EXTERN g_pObjectClass:qword
+extern ArrayStoreCheck:proc
+extern ObjIsInstanceOfNoGC:proc
+
+; TODO: put definition for this in asmconstants.h
+CanCast equ 1
+
+;__declspec(naked) void F_CALL_CONV JIT_Stelem_Ref(PtrArray* array, unsigned idx, Object* val)
+LEAF_ENTRY JIT_Stelem_Ref, _TEXT
+ ; check for null PtrArray*
+ test rcx, rcx
+ je ThrowNullReferenceException
+
+ ; we only want the lower 32-bits of edx, it might be dirty
+ or edx, edx
+
+ ; check that index is in bounds
+ cmp edx, dword ptr [rcx + OFFSETOF__PtrArray__m_NumComponents] ; 8h -> array size offset
+ jae ThrowIndexOutOfRangeException
+
+ ; r10 = Array MT
+ mov r10, [rcx]
+
+ ; if we're assigning a null object* then we don't need a write barrier
+ test r8, r8
+ jz AssigningNull
+
+ifdef CHECK_APP_DOMAIN_LEAKS
+ ; get Array TypeHandle
+ mov r9, [r10 + OFFSETOF__MethodTable__m_ElementType] ; 10h -> typehandle offset
+ ; check for non-MT
+ test r9, 2
+ jnz NoCheck
+
+ ; Check VMflags of element type
+ mov r9, [r9 + OFFSETOF__MethodTable__m_pEEClass]
+ mov r9d, dword ptr [r9 + OFFSETOF__EEClass__m_wAuxFlags]
+ test r9d, EEClassFlags
+ jnz ArrayStoreCheck_Helper
+
+ NoCheck:
+endif
+
+ mov r9, [r10 + OFFSETOF__MethodTable__m_ElementType] ; 10h -> typehandle offset
+
+ ; check for exact match
+ cmp r9, [r8]
+ jne NotExactMatch
+
+ DoWrite:
+ lea rcx, [rcx + 8*rdx + OFFSETOF__PtrArray__m_Array]
+ mov rdx, r8
+
+ ; JIT_WriteBarrier(Object** dst, Object* src)
+ jmp JIT_WriteBarrier
+
+ AssigningNull:
+ ; write barrier is not needed for assignment of NULL references
+ mov [rcx + 8*rdx + OFFSETOF__PtrArray__m_Array], r8
+ ret
+
+ NotExactMatch:
+ cmp r9, [g_pObjectClass]
+ je DoWrite
+
+ jmp JIT_Stelem_Ref__ObjIsInstanceOfNoGC_Helper
+
+ ThrowNullReferenceException:
+ mov rcx, CORINFO_NullReferenceException_ASM
+ jmp JIT_InternalThrow
+
+ ThrowIndexOutOfRangeException:
+ mov rcx, CORINFO_IndexOutOfRangeException_ASM
+ jmp JIT_InternalThrow
+LEAF_END JIT_Stelem_Ref, _TEXT
+
+NESTED_ENTRY JIT_Stelem_Ref__ObjIsInstanceOfNoGC_Helper, _TEXT
+ alloc_stack MIN_SIZE
+ save_reg_postrsp rcx, MIN_SIZE + 8h
+ save_reg_postrsp rdx, MIN_SIZE + 10h
+ save_reg_postrsp r8, MIN_SIZE + 18h
+ END_PROLOGUE
+
+ ; need to get TypeHandle before setting rcx to be the Obj* because that trashes the PtrArray*
+ mov rdx, r9
+ mov rcx, r8
+
+ ; TypeHandle::CastResult ObjIsInstanceOfNoGC(Object *pElement, TypeHandle toTypeHnd)
+ call ObjIsInstanceOfNoGC
+
+ mov rcx, [rsp + MIN_SIZE + 8h]
+ mov rdx, [rsp + MIN_SIZE + 10h]
+ mov r8, [rsp + MIN_SIZE + 18h]
+
+ cmp eax, CanCast
+ jne NeedCheck
+
+ lea rcx, [rcx + 8*rdx + OFFSETOF__PtrArray__m_Array]
+ mov rdx, r8
+ add rsp, MIN_SIZE
+
+ ; JIT_WriteBarrier(Object** dst, Object* src)
+ jmp JIT_WriteBarrier
+
+ NeedCheck:
+ add rsp, MIN_SIZE
+ jmp JIT_Stelem_Ref__ArrayStoreCheck_Helper
+NESTED_END JIT_Stelem_Ref__ObjIsInstanceOfNoGC_Helper, _TEXT
+
+; Need to save r8 to provide a stack address for the Object*
+NESTED_ENTRY JIT_Stelem_Ref__ArrayStoreCheck_Helper, _TEXT
+ alloc_stack MIN_SIZE
+ save_reg_postrsp rcx, MIN_SIZE + 8h
+ save_reg_postrsp rdx, MIN_SIZE + 10h
+ save_reg_postrsp r8, MIN_SIZE + 18h
+ END_PROLOGUE
+
+ lea rcx, [rsp + MIN_SIZE + 18h]
+ lea rdx, [rsp + MIN_SIZE + 8h]
+
+ ; HCIMPL2(FC_INNER_RET, ArrayStoreCheck, Object** pElement, PtrArray** pArray)
+ call ArrayStoreCheck
+
+ mov rcx, [rsp + MIN_SIZE + 8h]
+ mov rdx, [rsp + MIN_SIZE + 10h]
+ mov r8, [rsp + MIN_SIZE + 18h]
+
+ lea rcx, [rcx + 8*rdx + OFFSETOF__PtrArray__m_Array]
+ mov rdx, r8
+ add rsp, MIN_SIZE
+
+ ; JIT_WriteBarrier(Object** dst, Object* src)
+ jmp JIT_WriteBarrier
+
+NESTED_END JIT_Stelem_Ref__ArrayStoreCheck_Helper, _TEXT
+
+
+; Equivalent of x86's c++ /fp:fast sin/cos/tan helpers, on x64
+
+; public: static double __fastcall COMDouble::Sin(double)
+LEAF_ENTRY ?Sin@COMDouble@@SANN@Z, _TEXT
+ movsd qword ptr [rsp + 8h], xmm0
+ fld qword ptr [rsp + 8h]
+ fsin
+ fstp qword ptr [rsp + 8h]
+ movsd xmm0, qword ptr [rsp + 8h]
+ ret
+LEAF_END ?Sin@COMDouble@@SANN@Z, _TEXT
+
+; public: static double __fastcall COMDouble::Cos(double)
+LEAF_ENTRY ?Cos@COMDouble@@SANN@Z, _TEXT
+ movsd qword ptr [rsp + 8h], xmm0
+ fld qword ptr [rsp + 8h]
+ fcos
+ fstp qword ptr [rsp + 8h]
+ movsd xmm0, qword ptr [rsp + 8h]
+ ret
+LEAF_END ?Cos@COMDouble@@SANN@Z, _TEXT
+
+; public: static double __fastcall COMDouble::Tan(double)
+LEAF_ENTRY ?Tan@COMDouble@@SANN@Z, _TEXT
+ movsd qword ptr [rsp + 8h], xmm0
+ fld qword ptr [rsp + 8h]
+ fptan
+ fstp st(0)
+ fstp qword ptr [rsp + 8h]
+ movsd xmm0, qword ptr [rsp + 8h]
+ ret
+LEAF_END ?Tan@COMDouble@@SANN@Z, _TEXT
+
+
+extern JIT_FailFast:proc
+extern s_gsCookie:qword
+
+OFFSETOF_GSCOOKIE equ 0h
+OFFSETOF_FRAME equ OFFSETOF_GSCOOKIE + \
+ 8h
+
+;
+; incoming:
+;
+; rsp -> return address
+; :
+;
+; Stack Layout:
+;
+; rsp-> callee scratch
+; + 8h callee scratch
+; +10h callee scratch
+; +18h callee scratch
+; :
+; stack arguments
+; :
+; r13-> gsCookie
+; + 8h __VFN_table
+; +10h m_Next
+; +18h m_pGCLayout
+; +20h m_padding
+; +28h m_rdi
+; +30h m_rsi
+; +38h m_rbx
+; +40h m_rbp
+; +48h m_r12
+; +50h m_r13
+; +58h m_r14
+; +60h m_r15
+; +68h m_ReturnAddress
+; r12 -> // Caller's SP
+;
+; r14 = GetThread();
+; r15 = GetThread()->GetFrame(); // For restoring/popping the frame
+;
+NESTED_ENTRY TailCallHelperStub, _TEXT
+ PUSH_CALLEE_SAVED_REGISTERS
+
+ alloc_stack 48h ; m_padding, m_pGCLayout, m_Next, __VFN_table, gsCookie, outgoing shadow area
+
+ set_frame r13, 20h
+ END_PROLOGUE
+
+ ;
+ ; This part is never executed, but we keep it here for reference
+ ;
+ int 3
+
+if 0 ne 0
+ ; Save the caller's SP
+ mov r12, rsp + ...
+
+ ;
+ ; fully initialize the TailCallFrame
+ ;
+ call TCF_GETMETHODFRAMEVPTR
+ mov [r13 + OFFSETOF_FRAME], rax
+
+ mov rax, s_gsCookie
+ mov [r13 + OFFSETOF_GSCOOKIE], rax
+
+ ;
+ ; link the TailCallFrame
+ ;
+ CALL_GETTHREAD
+ mov r14, rax
+ mov r15, [rax + OFFSETOF__Thread__m_pFrame]
+ mov [r13 + OFFSETOF_FRAME + OFFSETOF__Frame__m_Next], r15
+ lea r10, [r13 + OFFSETOF_FRAME]
+ mov [rax + OFFSETOF__Thread__m_pFrame], r10
+endif
+
+ ; the pretend call would be here
+ ; with the return address pointing this this real epilog
+
+PATCH_LABEL JIT_TailCallHelperStub_ReturnAddress
+
+ ; our epilog (which also unlinks the TailCallFrame)
+
+ifdef _DEBUG
+ mov rcx, s_gsCookie
+ cmp [r13 + OFFSETOF_GSCookie], rcx
+ je GoodGSCookie
+ call JIT_FailFast
+GoodGSCookie:
+endif ; _DEBUG
+
+ ;
+ ; unlink the TailCallFrame
+ ;
+ mov [r14 + OFFSETOF__Thread__m_pFrame], r15
+
+ ;
+ ; epilog
+ ;
+
+ lea rsp, [r13 + 28h]
+ POP_CALLEE_SAVED_REGISTERS
+ ret
+
+NESTED_END TailCallHelperStub, _TEXT
+
+ end
+
diff --git a/src/vm/amd64/JitHelpers_FastWriteBarriers.asm b/src/vm/amd64/JitHelpers_FastWriteBarriers.asm
new file mode 100644
index 0000000000..bef2c09b97
--- /dev/null
+++ b/src/vm/amd64/JitHelpers_FastWriteBarriers.asm
@@ -0,0 +1,276 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;
+
+;
+; ==--==
+; ***********************************************************************
+; File: JitHelpers_FastWriteBarriers.asm, see jithelp.asm for history
+;
+; Notes: these are the fast write barriers which are copied in to the
+; JIT_WriteBarrier buffer (found in JitHelpers_Fast.asm).
+; This code should never be executed at runtime and should end
+; up effectively being treated as data.
+; ***********************************************************************
+
+include AsmMacros.inc
+include asmconstants.inc
+
+
+; Two super fast helpers that together do the work of JIT_WriteBarrier. These
+; use inlined ephemeral region bounds and an inlined pointer to the card table.
+;
+; Until the GC does some major reshuffling, the ephemeral region will always be
+; at the top of the heap, so given that we know the reference is inside the
+; heap, we don't have to check against the upper bound of the ephemeral region
+; (PreGrow version). Once the GC moves the ephemeral region, this will no longer
+; be valid, so we use the PostGrow version to check both the upper and lower
+; bounds. The inlined bounds and card table pointers have to be patched
+; whenever they change.
+;
+; At anyone time, the memory pointed to by JIT_WriteBarrier will contain one
+; of these functions. See StompWriteBarrierResize and StompWriteBarrierEphemeral
+; in VM\AMD64\JITInterfaceAMD64.cpp and InitJITHelpers1 in VM\JITInterfaceGen.cpp
+; for more info.
+;
+; READ THIS!!!!!!
+; it is imperative that the addresses of of the values that we overwrite
+; (card table, ephemeral region ranges, etc) are naturally aligned since
+; there are codepaths that will overwrite these values while the EE is running.
+;
+LEAF_ENTRY JIT_WriteBarrier_PreGrow32, _TEXT
+ align 4
+ ; Do the move into the GC . It is correct to take an AV here, the EH code
+ ; figures out that this came from a WriteBarrier and correctly maps it back
+ ; to the managed method which called the WriteBarrier (see setup in
+ ; InitializeExceptionHandling, vm\exceptionhandling.cpp).
+ mov [rcx], rdx
+
+ NOP_2_BYTE ; padding for alignment of constant
+
+PATCH_LABEL JIT_WriteBarrier_PreGrow32_PatchLabel_Lower
+ cmp rdx, 0F0F0F0F0h
+ jb Exit
+
+ shr rcx, 0Bh
+PATCH_LABEL JIT_WriteBarrier_PreGrow32_PatchLabel_CardTable_Check
+ cmp byte ptr [rcx + 0F0F0F0F0h], 0FFh
+ jne UpdateCardTable
+ REPRET
+
+ nop ; padding for alignment of constant
+
+PATCH_LABEL JIT_WriteBarrier_PreGrow32_PatchLabel_CardTable_Update
+ UpdateCardTable:
+ mov byte ptr [rcx + 0F0F0F0F0h], 0FFh
+ ret
+
+ align 16
+ Exit:
+ REPRET
+LEAF_END_MARKED JIT_WriteBarrier_PreGrow32, _TEXT
+
+
+LEAF_ENTRY JIT_WriteBarrier_PreGrow64, _TEXT
+ align 8
+ ; Do the move into the GC . It is correct to take an AV here, the EH code
+ ; figures out that this came from a WriteBarrier and correctly maps it back
+ ; to the managed method which called the WriteBarrier (see setup in
+ ; InitializeExceptionHandling, vm\exceptionhandling.cpp).
+ mov [rcx], rdx
+
+ NOP_3_BYTE ; padding for alignment of constant
+
+ ; Can't compare a 64 bit immediate, so we have to move it into a
+ ; register. Value of this immediate will be patched at runtime.
+PATCH_LABEL JIT_WriteBarrier_PreGrow64_Patch_Label_Lower
+ mov rax, 0F0F0F0F0F0F0F0F0h
+
+ ; Check the lower ephemeral region bound.
+ cmp rdx, rax
+ jb Exit
+
+ nop ; padding for alignment of constant
+
+PATCH_LABEL JIT_WriteBarrier_PreGrow64_Patch_Label_CardTable
+ mov rax, 0F0F0F0F0F0F0F0F0h
+
+ ; Touch the card table entry, if not already dirty.
+ shr rcx, 0Bh
+ cmp byte ptr [rcx + rax], 0FFh
+ jne UpdateCardTable
+ REPRET
+
+ UpdateCardTable:
+ mov byte ptr [rcx + rax], 0FFh
+ ret
+
+ align 16
+ Exit:
+ REPRET
+LEAF_END_MARKED JIT_WriteBarrier_PreGrow64, _TEXT
+
+
+; See comments for JIT_WriteBarrier_PreGrow (above).
+LEAF_ENTRY JIT_WriteBarrier_PostGrow64, _TEXT
+ align 8
+ ; Do the move into the GC . It is correct to take an AV here, the EH code
+ ; figures out that this came from a WriteBarrier and correctly maps it back
+ ; to the managed method which called the WriteBarrier (see setup in
+ ; InitializeExceptionHandling, vm\exceptionhandling.cpp).
+ mov [rcx], rdx
+
+ NOP_3_BYTE ; padding for alignment of constant
+
+ ; Can't compare a 64 bit immediate, so we have to move them into a
+ ; register. Values of these immediates will be patched at runtime.
+ ; By using two registers we can pipeline better. Should we decide to use
+ ; a special non-volatile calling convention, this should be changed to
+ ; just one.
+PATCH_LABEL JIT_WriteBarrier_PostGrow64_Patch_Label_Lower
+ mov rax, 0F0F0F0F0F0F0F0F0h
+
+ ; Check the lower and upper ephemeral region bounds
+ cmp rdx, rax
+ jb Exit
+
+ nop ; padding for alignment of constant
+
+PATCH_LABEL JIT_WriteBarrier_PostGrow64_Patch_Label_Upper
+ mov r8, 0F0F0F0F0F0F0F0F0h
+
+ cmp rdx, r8
+ jae Exit
+
+ nop ; padding for alignment of constant
+
+PATCH_LABEL JIT_WriteBarrier_PostGrow64_Patch_Label_CardTable
+ mov rax, 0F0F0F0F0F0F0F0F0h
+
+ ; Touch the card table entry, if not already dirty.
+ shr rcx, 0Bh
+ cmp byte ptr [rcx + rax], 0FFh
+ jne UpdateCardTable
+ REPRET
+
+ UpdateCardTable:
+ mov byte ptr [rcx + rax], 0FFh
+ ret
+
+ align 16
+ Exit:
+ REPRET
+LEAF_END_MARKED JIT_WriteBarrier_PostGrow64, _TEXT
+
+LEAF_ENTRY JIT_WriteBarrier_PostGrow32, _TEXT
+ align 4
+ ; Do the move into the GC . It is correct to take an AV here, the EH code
+ ; figures out that this came from a WriteBarrier and correctly maps it back
+ ; to the managed method which called the WriteBarrier (see setup in
+ ; InitializeExceptionHandling, vm\exceptionhandling.cpp).
+ mov [rcx], rdx
+
+ NOP_2_BYTE ; padding for alignment of constant
+
+ ; Check the lower and upper ephemeral region bounds
+
+PATCH_LABEL JIT_WriteBarrier_PostGrow32_PatchLabel_Lower
+ cmp rdx, 0F0F0F0F0h
+ jb Exit
+
+ NOP_3_BYTE ; padding for alignment of constant
+
+PATCH_LABEL JIT_WriteBarrier_PostGrow32_PatchLabel_Upper
+ cmp rdx, 0F0F0F0F0h
+ jae Exit
+
+ ; Touch the card table entry, if not already dirty.
+ shr rcx, 0Bh
+
+PATCH_LABEL JIT_WriteBarrier_PostGrow32_PatchLabel_CheckCardTable
+ cmp byte ptr [rcx + 0F0F0F0F0h], 0FFh
+ jne UpdateCardTable
+ REPRET
+
+ nop ; padding for alignment of constant
+
+PATCH_LABEL JIT_WriteBarrier_PostGrow32_PatchLabel_UpdateCardTable
+ UpdateCardTable:
+ mov byte ptr [rcx + 0F0F0F0F0h], 0FFh
+ ret
+
+ align 16
+ Exit:
+ REPRET
+LEAF_END_MARKED JIT_WriteBarrier_PostGrow32, _TEXT
+
+
+LEAF_ENTRY JIT_WriteBarrier_SVR32, _TEXT
+ align 4
+ ;
+ ; SVR GC has multiple heaps, so it cannot provide one single
+ ; ephemeral region to bounds check against, so we just skip the
+ ; bounds checking all together and do our card table update
+ ; unconditionally.
+ ;
+
+ ; Do the move into the GC . It is correct to take an AV here, the EH code
+ ; figures out that this came from a WriteBarrier and correctly maps it back
+ ; to the managed method which called the WriteBarrier (see setup in
+ ; InitializeExceptionHandling, vm\exceptionhandling.cpp).
+ mov [rcx], rdx
+
+ shr rcx, 0Bh
+
+ NOP_3_BYTE ; padding for alignment of constant
+
+PATCH_LABEL JIT_WriteBarrier_SVR32_PatchLabel_CheckCardTable
+ cmp byte ptr [rcx + 0F0F0F0F0h], 0FFh
+ jne UpdateCardTable
+ REPRET
+
+ nop ; padding for alignment of constant
+
+PATCH_LABEL JIT_WriteBarrier_SVR32_PatchLabel_UpdateCardTable
+ UpdateCardTable:
+ mov byte ptr [rcx + 0F0F0F0F0h], 0FFh
+ ret
+LEAF_END_MARKED JIT_WriteBarrier_SVR32, _TEXT
+
+LEAF_ENTRY JIT_WriteBarrier_SVR64, _TEXT
+ align 8
+ ;
+ ; SVR GC has multiple heaps, so it cannot provide one single
+ ; ephemeral region to bounds check against, so we just skip the
+ ; bounds checking all together and do our card table update
+ ; unconditionally.
+ ;
+
+ ; Do the move into the GC . It is correct to take an AV here, the EH code
+ ; figures out that this came from a WriteBarrier and correctly maps it back
+ ; to the managed method which called the WriteBarrier (see setup in
+ ; InitializeExceptionHandling, vm\exceptionhandling.cpp).
+ mov [rcx], rdx
+
+ NOP_3_BYTE ; padding for alignment of constant
+
+PATCH_LABEL JIT_WriteBarrier_SVR64_PatchLabel_CardTable
+ mov rax, 0F0F0F0F0F0F0F0F0h
+
+ shr rcx, 0Bh
+
+ cmp byte ptr [rcx + rax], 0FFh
+ jne UpdateCardTable
+ REPRET
+
+ UpdateCardTable:
+ mov byte ptr [rcx + rax], 0FFh
+ ret
+LEAF_END_MARKED JIT_WriteBarrier_SVR64, _TEXT
+
+ end
+
diff --git a/src/vm/amd64/JitHelpers_InlineGetAppDomain.asm b/src/vm/amd64/JitHelpers_InlineGetAppDomain.asm
new file mode 100644
index 0000000000..217b433e56
--- /dev/null
+++ b/src/vm/amd64/JitHelpers_InlineGetAppDomain.asm
@@ -0,0 +1,124 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;
+
+;
+; ==--==
+; ***********************************************************************
+; File: JitHelpers_InlineGetAppDomain.asm, see history in jithelp.asm
+;
+; Notes: These routinues will be patched at runtime with the location in
+; the TLS to find the AppDomain* and are the fastest implementation
+; of their specific functionality.
+; ***********************************************************************
+
+include AsmMacros.inc
+include asmconstants.inc
+
+; Min amount of stack space that a nested function should allocate.
+MIN_SIZE equ 28h
+
+; Macro to create a patchable inline GetAppdomain, if we decide to create patchable
+; high TLS inline versions then just change this macro to make sure to create enough
+; space in the asm to patch the high TLS getter instructions.
+PATCHABLE_INLINE_GETAPPDOMAIN macro Reg, PatchLabel
+PATCH_LABEL PatchLabel
+ mov Reg, gs:[OFFSET__TEB__TlsSlots]
+ endm
+
+extern JIT_GetSharedNonGCStaticBase_Helper:proc
+extern JIT_GetSharedGCStaticBase_Helper:proc
+
+LEAF_ENTRY JIT_GetSharedNonGCStaticBase_InlineGetAppDomain, _TEXT
+ ; Check if rcx (moduleDomainID) is not a moduleID
+ mov rax, rcx
+ test rax, 1
+ jz HaveLocalModule
+
+ PATCHABLE_INLINE_GETAPPDOMAIN rax, JIT_GetSharedNonGCStaticBase__PatchTLSLabel
+
+ ; Get the LocalModule, rcx will always be odd, so: rcx * 4 - 4 <=> (rcx >> 1) * 8
+ mov rax, [rax + OFFSETOF__AppDomain__m_sDomainLocalBlock + OFFSETOF__DomainLocalBlock__m_pModuleSlots]
+ mov rax, [rax + rcx * 4 - 4]
+
+ HaveLocalModule:
+ ; If class is not initialized, bail to C++ helper
+ test byte ptr [rax + OFFSETOF__DomainLocalModule__m_pDataBlob + rdx], 1
+ jz CallHelper
+ REPRET
+
+ align 16
+ CallHelper:
+ ; Tail call JIT_GetSharedNonGCStaticBase_Helper
+ mov rcx, rax
+ jmp JIT_GetSharedNonGCStaticBase_Helper
+LEAF_END JIT_GetSharedNonGCStaticBase_InlineGetAppDomain, _TEXT
+
+LEAF_ENTRY JIT_GetSharedNonGCStaticBaseNoCtor_InlineGetAppDomain, _TEXT
+ ; Check if rcx (moduleDomainID) is not a moduleID
+ mov rax, rcx
+ test rax, 1
+ jz HaveLocalModule
+
+ PATCHABLE_INLINE_GETAPPDOMAIN rax, JIT_GetSharedNonGCStaticBaseNoCtor__PatchTLSLabel
+
+ ; Get the LocalModule, rcx will always be odd, so: rcx * 4 - 4 <=> (rcx >> 1) * 8
+ mov rax, [rax + OFFSETOF__AppDomain__m_sDomainLocalBlock + OFFSETOF__DomainLocalBlock__m_pModuleSlots]
+ mov rax, [rax + rcx * 4 - 4]
+ ret
+
+ align 16
+ HaveLocalModule:
+ REPRET
+LEAF_END JIT_GetSharedNonGCStaticBaseNoCtor_InlineGetAppDomain, _TEXT
+
+LEAF_ENTRY JIT_GetSharedGCStaticBase_InlineGetAppDomain, _TEXT
+ ; Check if rcx (moduleDomainID) is not a moduleID
+ mov rax, rcx
+ test rax, 1
+ jz HaveLocalModule
+
+ PATCHABLE_INLINE_GETAPPDOMAIN rax, JIT_GetSharedGCStaticBase__PatchTLSLabel
+
+ ; Get the LocalModule, rcx will always be odd, so: rcx * 4 - 4 <=> (rcx >> 1) * 8
+ mov rax, [rax + OFFSETOF__AppDomain__m_sDomainLocalBlock + OFFSETOF__DomainLocalBlock__m_pModuleSlots]
+ mov rax, [rax + rcx * 4 - 4]
+
+ HaveLocalModule:
+ ; If class is not initialized, bail to C++ helper
+ test byte ptr [rax + OFFSETOF__DomainLocalModule__m_pDataBlob + rdx], 1
+ jz CallHelper
+
+ mov rax, [rax + OFFSETOF__DomainLocalModule__m_pGCStatics]
+ ret
+
+ align 16
+ CallHelper:
+ ; Tail call Jit_GetSharedGCStaticBase_Helper
+ mov rcx, rax
+ jmp JIT_GetSharedGCStaticBase_Helper
+LEAF_END JIT_GetSharedGCStaticBase_InlineGetAppDomain, _TEXT
+
+LEAF_ENTRY JIT_GetSharedGCStaticBaseNoCtor_InlineGetAppDomain, _TEXT
+ ; Check if rcx (moduleDomainID) is not a moduleID
+ mov rax, rcx
+ test rax, 1
+ jz HaveLocalModule
+
+ PATCHABLE_INLINE_GETAPPDOMAIN rax, JIT_GetSharedGCStaticBaseNoCtor__PatchTLSLabel
+
+ ; Get the LocalModule, rcx will always be odd, so: rcx * 4 - 4 <=> (rcx >> 1) * 8
+ mov rax, [rax + OFFSETOF__AppDomain__m_sDomainLocalBlock + OFFSETOF__DomainLocalBlock__m_pModuleSlots]
+ mov rax, [rax + rcx * 4 - 4]
+
+ HaveLocalModule:
+ mov rax, [rax + OFFSETOF__DomainLocalModule__m_pGCStatics]
+ ret
+LEAF_END JIT_GetSharedGCStaticBaseNoCtor_InlineGetAppDomain, _TEXT
+
+ end
+
diff --git a/src/vm/amd64/JitHelpers_InlineGetThread.asm b/src/vm/amd64/JitHelpers_InlineGetThread.asm
new file mode 100644
index 0000000000..05353e8a2f
--- /dev/null
+++ b/src/vm/amd64/JitHelpers_InlineGetThread.asm
@@ -0,0 +1,1332 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;
+
+;
+; ==--==
+; ***********************************************************************
+; File: JitHelpers_InlineGetThread.asm, see history in jithelp.asm
+;
+; Notes: These routinues will be patched at runtime with the location in
+; the TLS to find the Thread* and are the fastest implementation
+; of their specific functionality.
+; ***********************************************************************
+
+include AsmMacros.inc
+include asmconstants.inc
+
+; Min amount of stack space that a nested function should allocate.
+MIN_SIZE equ 28h
+
+; Macro to create a patchable inline GetAppdomain, if we decide to create patchable
+; high TLS inline versions then just change this macro to make sure to create enough
+; space in the asm to patch the high TLS getter instructions.
+PATCHABLE_INLINE_GETTHREAD macro Reg, PatchLabel
+PATCH_LABEL PatchLabel
+ mov Reg, gs:[OFFSET__TEB__TlsSlots]
+ endm
+
+
+JIT_NEW equ ?JIT_New@@YAPEAVObject@@PEAUCORINFO_CLASS_STRUCT_@@@Z
+Object__DEBUG_SetAppDomain equ ?DEBUG_SetAppDomain@Object@@QEAAXPEAVAppDomain@@@Z
+CopyValueClassUnchecked equ ?CopyValueClassUnchecked@@YAXPEAX0PEAVMethodTable@@@Z
+JIT_Box equ ?JIT_Box@@YAPEAVObject@@PEAUCORINFO_CLASS_STRUCT_@@PEAX@Z
+g_pStringClass equ ?g_pStringClass@@3PEAVMethodTable@@EA
+FramedAllocateString equ ?FramedAllocateString@@YAPEAVStringObject@@K@Z
+JIT_NewArr1 equ ?JIT_NewArr1@@YAPEAVObject@@PEAUCORINFO_CLASS_STRUCT_@@_J@Z
+
+INVALIDGCVALUE equ 0CCCCCCCDh
+
+extern JIT_NEW:proc
+extern CopyValueClassUnchecked:proc
+extern JIT_Box:proc
+extern g_pStringClass:QWORD
+extern FramedAllocateString:proc
+extern JIT_NewArr1:proc
+
+extern JIT_InternalThrow:proc
+
+ifdef _DEBUG
+extern DEBUG_TrialAllocSetAppDomain:proc
+extern DEBUG_TrialAllocSetAppDomain_NoScratchArea:proc
+endif
+
+; IN: rcx: MethodTable*
+; OUT: rax: new object
+LEAF_ENTRY JIT_TrialAllocSFastMP_InlineGetThread, _TEXT
+ mov edx, [rcx + OFFSET__MethodTable__m_BaseSize]
+
+ ; m_BaseSize is guaranteed to be a multiple of 8.
+
+ PATCHABLE_INLINE_GETTHREAD r11, JIT_TrialAllocSFastMP_InlineGetThread__PatchTLSOffset
+ mov r10, [r11 + OFFSET__Thread__m_alloc_context__alloc_limit]
+ mov rax, [r11 + OFFSET__Thread__m_alloc_context__alloc_ptr]
+
+ add rdx, rax
+
+ cmp rdx, r10
+ ja AllocFailed
+
+ mov [r11 + OFFSET__Thread__m_alloc_context__alloc_ptr], rdx
+ mov [rax], rcx
+
+ifdef _DEBUG
+ call DEBUG_TrialAllocSetAppDomain_NoScratchArea
+endif ; _DEBUG
+
+ ret
+
+ AllocFailed:
+ jmp JIT_NEW
+LEAF_END JIT_TrialAllocSFastMP_InlineGetThread, _TEXT
+
+; HCIMPL2(Object*, JIT_Box, CORINFO_CLASS_HANDLE type, void* unboxedData)
+NESTED_ENTRY JIT_BoxFastMP_InlineGetThread, _TEXT
+ mov rax, [rcx + OFFSETOF__MethodTable__m_pWriteableData]
+
+ ; Check whether the class has not been initialized
+ test dword ptr [rax + OFFSETOF__MethodTableWriteableData__m_dwFlags], MethodTableWriteableData__enum_flag_Unrestored
+ jnz ClassNotInited
+
+ mov r8d, [rcx + OFFSET__MethodTable__m_BaseSize]
+
+ ; m_BaseSize is guaranteed to be a multiple of 8.
+
+ PATCHABLE_INLINE_GETTHREAD r11, JIT_BoxFastMPIGT__PatchTLSLabel
+ mov r10, [r11 + OFFSET__Thread__m_alloc_context__alloc_limit]
+ mov rax, [r11 + OFFSET__Thread__m_alloc_context__alloc_ptr]
+
+ add r8, rax
+
+ cmp r8, r10
+ ja AllocFailed
+
+ mov [r11 + OFFSET__Thread__m_alloc_context__alloc_ptr], r8
+ mov [rax], rcx
+
+ifdef _DEBUG
+ call DEBUG_TrialAllocSetAppDomain_NoScratchArea
+endif ; _DEBUG
+
+ ; Check whether the object contains pointers
+ test dword ptr [rcx + OFFSETOF__MethodTable__m_dwFlags], MethodTable__enum_flag_ContainsPointers
+ jnz ContainsPointers
+
+ ; We have no pointers - emit a simple inline copy loop
+ ; Copy the contents from the end
+ mov ecx, [rcx + OFFSET__MethodTable__m_BaseSize]
+ sub ecx, 18h ; sizeof(ObjHeader) + sizeof(Object) + last slot
+
+align 16
+ CopyLoop:
+ mov r8, [rdx+rcx]
+ mov [rax+rcx+8], r8
+ sub ecx, 8
+ jge CopyLoop
+ REPRET
+
+ ContainsPointers:
+ ; Do call to CopyValueClassUnchecked(object, data, pMT)
+ push_vol_reg rax
+ alloc_stack 20h
+ END_PROLOGUE
+
+ mov r8, rcx
+ lea rcx, [rax + 8]
+ call CopyValueClassUnchecked
+
+ add rsp, 20h
+ pop rax
+ ret
+
+ ClassNotInited:
+ AllocFailed:
+ jmp JIT_Box
+NESTED_END JIT_BoxFastMP_InlineGetThread, _TEXT
+
+FIX_INDIRECTION macro Reg
+ifdef FEATURE_PREJIT
+ test Reg, 1
+ jz @F
+ mov Reg, [Reg-1]
+ @@:
+endif
+endm
+
+LEAF_ENTRY AllocateStringFastMP_InlineGetThread, _TEXT
+ ; We were passed the number of characters in ECX
+
+ ; we need to load the method table for string from the global
+ mov r9, [g_pStringClass]
+
+ ; Instead of doing elaborate overflow checks, we just limit the number of elements
+ ; to (LARGE_OBJECT_SIZE - 256)/sizeof(WCHAR) or less.
+ ; This will avoid avoid all overflow problems, as well as making sure
+ ; big string objects are correctly allocated in the big object heap.
+
+ cmp ecx, (ASM_LARGE_OBJECT_SIZE - 256)/2
+ jae OversizedString
+
+ mov edx, [r9 + OFFSET__MethodTable__m_BaseSize]
+
+ ; Calculate the final size to allocate.
+ ; We need to calculate baseSize + cnt*2, then round that up by adding 7 and anding ~7.
+
+ lea edx, [edx + ecx*2 + 7]
+ and edx, -8
+
+ PATCHABLE_INLINE_GETTHREAD r11, AllocateStringFastMP_InlineGetThread__PatchTLSOffset
+ mov r10, [r11 + OFFSET__Thread__m_alloc_context__alloc_limit]
+ mov rax, [r11 + OFFSET__Thread__m_alloc_context__alloc_ptr]
+
+ add rdx, rax
+
+ cmp rdx, r10
+ ja AllocFailed
+
+ mov [r11 + OFFSET__Thread__m_alloc_context__alloc_ptr], rdx
+ mov [rax], r9
+
+ mov [rax + OFFSETOF__StringObject__m_StringLength], ecx
+
+ifdef _DEBUG
+ call DEBUG_TrialAllocSetAppDomain_NoScratchArea
+endif ; _DEBUG
+
+ ret
+
+ OversizedString:
+ AllocFailed:
+ jmp FramedAllocateString
+LEAF_END AllocateStringFastMP_InlineGetThread, _TEXT
+
+; HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
+LEAF_ENTRY JIT_NewArr1VC_MP_InlineGetThread, _TEXT
+ ; We were passed a type descriptor in RCX, which contains the (shared)
+ ; array method table and the element type.
+
+ ; The element count is in RDX
+
+ ; NOTE: if this code is ported for CORINFO_HELP_NEWSFAST_ALIGN8, it will need
+ ; to emulate the double-specific behavior of JIT_TrialAlloc::GenAllocArray.
+
+ ; Do a conservative check here. This is to avoid overflow while doing the calculations. We don't
+ ; have to worry about "large" objects, since the allocation quantum is never big enough for
+ ; LARGE_OBJECT_SIZE.
+
+ ; For Value Classes, this needs to be 2^16 - slack (2^32 / max component size),
+ ; The slack includes the size for the array header and round-up ; for alignment. Use 256 for the
+ ; slack value out of laziness.
+
+ ; In both cases we do a final overflow check after adding to the alloc_ptr.
+
+ ; we need to load the true method table from the type desc
+ mov r9, [rcx + OFFSETOF__ArrayTypeDesc__m_TemplateMT - 2]
+
+ FIX_INDIRECTION r9
+
+ cmp rdx, (65535 - 256)
+ jae OversizedArray
+
+ movzx r8d, word ptr [r9 + OFFSETOF__MethodTable__m_dwFlags] ; component size is low 16 bits
+ imul r8d, edx
+ add r8d, dword ptr [r9 + OFFSET__MethodTable__m_BaseSize]
+
+ ; round the size to a multiple of 8
+
+ add r8d, 7
+ and r8d, -8
+
+
+ PATCHABLE_INLINE_GETTHREAD r11, JIT_NewArr1VC_MP_InlineGetThread__PatchTLSOffset
+ mov r10, [r11 + OFFSET__Thread__m_alloc_context__alloc_limit]
+ mov rax, [r11 + OFFSET__Thread__m_alloc_context__alloc_ptr]
+
+ add r8, rax
+ jc AllocFailed
+
+ cmp r8, r10
+ ja AllocFailed
+
+ mov [r11 + OFFSET__Thread__m_alloc_context__alloc_ptr], r8
+ mov [rax], r9
+
+ mov dword ptr [rax + OFFSETOF__ArrayBase__m_NumComponents], edx
+
+ifdef _DEBUG
+ call DEBUG_TrialAllocSetAppDomain_NoScratchArea
+endif ; _DEBUG
+
+ ret
+
+ OversizedArray:
+ AllocFailed:
+ jmp JIT_NewArr1
+LEAF_END JIT_NewArr1VC_MP_InlineGetThread, _TEXT
+
+
+; HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
+LEAF_ENTRY JIT_NewArr1OBJ_MP_InlineGetThread, _TEXT
+ ; We were passed a type descriptor in RCX, which contains the (shared)
+ ; array method table and the element type.
+
+ ; The element count is in RDX
+
+ ; NOTE: if this code is ported for CORINFO_HELP_NEWSFAST_ALIGN8, it will need
+ ; to emulate the double-specific behavior of JIT_TrialAlloc::GenAllocArray.
+
+ ; Verifies that LARGE_OBJECT_SIZE fits in 32-bit. This allows us to do array size
+ ; arithmetic using 32-bit registers.
+ .erre ASM_LARGE_OBJECT_SIZE lt 100000000h
+
+ cmp rdx, (ASM_LARGE_OBJECT_SIZE - 256)/8 ; sizeof(void*)
+ jae OversizedArray
+
+ ; we need to load the true method table from the type desc
+ mov r9, [rcx + OFFSETOF__ArrayTypeDesc__m_TemplateMT - 2]
+
+ FIX_INDIRECTION r9
+
+ ; In this case we know the element size is sizeof(void *), or 8 for x64
+ ; This helps us in two ways - we can shift instead of multiplying, and
+ ; there's no need to align the size either
+
+ mov r8d, dword ptr [r9 + OFFSET__MethodTable__m_BaseSize]
+ lea r8d, [r8d + edx * 8]
+
+ ; No need for rounding in this case - element size is 8, and m_BaseSize is guaranteed
+ ; to be a multiple of 8.
+
+ PATCHABLE_INLINE_GETTHREAD r11, JIT_NewArr1OBJ_MP_InlineGetThread__PatchTLSOffset
+ mov r10, [r11 + OFFSET__Thread__m_alloc_context__alloc_limit]
+ mov rax, [r11 + OFFSET__Thread__m_alloc_context__alloc_ptr]
+
+ add r8, rax
+
+ cmp r8, r10
+ ja AllocFailed
+
+ mov [r11 + OFFSET__Thread__m_alloc_context__alloc_ptr], r8
+ mov [rax], r9
+
+ mov dword ptr [rax + OFFSETOF__ArrayBase__m_NumComponents], edx
+
+ifdef _DEBUG
+ call DEBUG_TrialAllocSetAppDomain_NoScratchArea
+endif ; _DEBUG
+
+ ret
+
+ OversizedArray:
+ AllocFailed:
+ jmp JIT_NewArr1
+LEAF_END JIT_NewArr1OBJ_MP_InlineGetThread, _TEXT
+
+
+MON_ENTER_STACK_SIZE equ 00000020h
+MON_EXIT_STACK_SIZE equ 00000068h
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+MON_ENTER_STACK_SIZE_INLINEGETTHREAD equ 00000020h
+MON_EXIT_STACK_SIZE_INLINEGETTHREAD equ 00000068h
+endif
+endif
+
+BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX equ 08000000h ; syncblk.h
+BIT_SBLK_IS_HASHCODE equ 04000000h ; syncblk.h
+BIT_SBLK_SPIN_LOCK equ 10000000h ; syncblk.h
+
+SBLK_MASK_LOCK_THREADID equ 000003FFh ; syncblk.h
+SBLK_LOCK_RECLEVEL_INC equ 00000400h ; syncblk.h
+SBLK_MASK_LOCK_RECLEVEL equ 0000FC00h ; syncblk.h
+
+MASK_SYNCBLOCKINDEX equ 03FFFFFFh ; syncblk.h
+STATE_CHECK equ 0FFFFFFFEh
+
+MT_CTX_PROXY_FLAG equ 10000000h
+
+g_pSyncTable equ ?g_pSyncTable@@3PEAVSyncTableEntry@@EA
+g_SystemInfo equ ?g_SystemInfo@@3U_SYSTEM_INFO@@A
+g_SpinConstants equ ?g_SpinConstants@@3USpinConstants@@A
+
+extern g_pSyncTable:QWORD
+extern g_SystemInfo:QWORD
+extern g_SpinConstants:QWORD
+
+; JITutil_MonEnterWorker(Object* obj, BYTE* pbLockTaken)
+extern JITutil_MonEnterWorker:proc
+; JITutil_MonTryEnter(Object* obj, INT32 timeout, BYTE* pbLockTaken)
+extern JITutil_MonTryEnter:proc
+; JITutil_MonExitWorker(Object* obj, BYTE* pbLockTaken)
+extern JITutil_MonExitWorker:proc
+; JITutil_MonSignal(AwareLock* lock, BYTE* pbLockTaken)
+extern JITutil_MonSignal:proc
+; JITutil_MonContention(AwareLock* lock, BYTE* pbLockTaken)
+extern JITutil_MonContention:proc
+
+ifdef _DEBUG
+MON_DEBUG equ 1
+endif
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+extern EnterSyncHelper:proc
+extern LeaveSyncHelper:proc
+endif
+endif
+
+
+MON_ENTER_EPILOG_ADJUST_STACK macro
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ add rsp, MON_ENTER_STACK_SIZE_INLINEGETTHREAD
+endif
+endif
+ endm
+
+
+MON_ENTER_RETURN_SUCCESS macro
+ ; This is sensitive to the potential that pbLockTaken is NULL
+ test rsi, rsi
+ jz @F
+ mov byte ptr [rsi], 1
+ @@:
+ MON_ENTER_EPILOG_ADJUST_STACK
+ pop rsi
+ ret
+
+ endm
+
+
+; The worker versions of these functions are smart about the potential for pbLockTaken
+; to be NULL, and if it is then they treat it as if they don't have a state variable.
+; This is because when locking is not inserted by the JIT (instead by explicit calls to
+; Monitor.Enter() and Monitor.Exit()) we will call these guys.
+;
+; This is a frameless helper for entering a monitor on a object.
+; The object is in ARGUMENT_REG1. This tries the normal case (no
+; blocking or object allocation) in line and calls a framed helper
+; for the other cases.
+;
+; EXTERN_C void JIT_MonEnterWorker_InlineGetThread(Object* obj, /*OUT*/ BYTE* pbLockTaken)
+JIT_HELPER_MONITOR_THUNK JIT_MonEnter, _TEXT
+NESTED_ENTRY JIT_MonEnterWorker_InlineGetThread, _TEXT
+ push_nonvol_reg rsi
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ alloc_stack MON_ENTER_STACK_SIZE_INLINEGETTHREAD
+
+ save_reg_postrsp rcx, MON_ENTER_STACK_SIZE_INLINEGETTHREAD + 10h + 0h
+ save_reg_postrsp rdx, MON_ENTER_STACK_SIZE_INLINEGETTHREAD + 10h + 8h
+ save_reg_postrsp r8, MON_ENTER_STACK_SIZE_INLINEGETTHREAD + 10h + 10h
+ save_reg_postrsp r9, MON_ENTER_STACK_SIZE_INLINEGETTHREAD + 10h + 18h
+endif
+endif
+ END_PROLOGUE
+
+ ; Put pbLockTaken in rsi, this can be null
+ mov rsi, rdx
+
+ ; Check if the instance is NULL
+ test rcx, rcx
+ jz FramedLockHelper
+
+ PATCHABLE_INLINE_GETTHREAD r11, JIT_MonEnterWorker_InlineGetThread_GetThread_PatchLabel
+
+ ; Initialize delay value for retry with exponential backoff
+ mov r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwInitialDuration]
+
+ ; Check if we can abort here
+ mov eax, dword ptr [r11 + OFFSETOF__Thread__m_State]
+ and eax, THREAD_CATCHATSAFEPOINT_BITS
+ ; Go through the slow code path to initiate ThreadAbort
+ jnz FramedLockHelper
+
+ ; r8 will hold the syncblockindex address
+ lea r8, [rcx - OFFSETOF__ObjHeader__SyncBlkIndex]
+
+ RetryThinLock:
+ ; Fetch the syncblock dword
+ mov eax, dword ptr [r8]
+
+ ; Check whether we have the "thin lock" layout, the lock is free and the spin lock bit is not set
+ test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_SPIN_LOCK + SBLK_MASK_LOCK_THREADID + SBLK_MASK_LOCK_RECLEVEL
+ jnz NeedMoreTests
+
+ ; Everything is fine - get the thread id to store in the lock
+ mov edx, dword ptr [r11 + OFFSETOF__Thread__m_ThreadId]
+
+ ; If the thread id is too large, we need a syncblock for sure
+ cmp edx, SBLK_MASK_LOCK_THREADID
+ ja FramedLockHelper
+
+ ; We want to store a new value with the current thread id set in the low 10 bits
+ or edx, eax
+ lock cmpxchg dword ptr [r8], edx
+ jnz PrepareToWaitThinLock
+
+ ; Everything went fine and we're done
+ add dword ptr [r11 + OFFSETOF__Thread__m_dwLockCount], 1
+
+ ; Done, leave and set pbLockTaken if we have it
+ MON_ENTER_RETURN_SUCCESS
+
+ NeedMoreTests:
+ ; OK, not the simple case, find out which case it is
+ test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX
+ jnz HaveHashOrSyncBlockIndex
+
+ ; The header is transitioning or the lock, treat this as if the lock was taken
+ test eax, BIT_SBLK_SPIN_LOCK
+ jnz PrepareToWaitThinLock
+
+ ; Here we know we have the "thin lock" layout, but the lock is not free.
+ ; It could still be the recursion case, compare the thread id to check
+ mov edx, eax
+ and edx, SBLK_MASK_LOCK_THREADID
+ cmp edx, dword ptr [r11 + OFFSETOF__Thread__m_ThreadId]
+ jne PrepareToWaitThinLock
+
+ ; Ok, the thread id matches, it's the recursion case.
+ ; Bump up the recursion level and check for overflow
+ lea edx, [eax + SBLK_LOCK_RECLEVEL_INC]
+ test edx, SBLK_MASK_LOCK_RECLEVEL
+ jz FramedLockHelper
+
+ ; Try to put the new recursion level back. If the header was changed in the meantime
+ ; we need a full retry, because the layout could have changed
+ lock cmpxchg dword ptr [r8], edx
+ jnz RetryHelperThinLock
+
+ ; Done, leave and set pbLockTaken if we have it
+ MON_ENTER_RETURN_SUCCESS
+
+ PrepareToWaitThinLock:
+ ; If we are on an MP system, we try spinning for a certain number of iterations
+ cmp dword ptr [g_SystemInfo + OFFSETOF__g_SystemInfo__dwNumberOfProcessors], 1
+ jle FramedLockHelper
+
+ ; Exponential backoff; delay by approximately 2*r10 clock cycles
+ mov eax, r10d
+ delayLoopThinLock:
+ pause ; indicate to the CPU that we are spin waiting
+ sub eax, 1
+ jnz delayLoopThinLock
+
+ ; Next time, wait a factor longer
+ imul r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwBackoffFactor]
+
+ cmp r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwMaximumDuration]
+ jle RetryHelperThinLock
+
+ jmp FramedLockHelper
+
+ RetryHelperThinLock:
+ jmp RetryThinLock
+
+ HaveHashOrSyncBlockIndex:
+ ; If we have a hash code already, we need to create a sync block
+ test eax, BIT_SBLK_IS_HASHCODE
+ jnz FramedLockHelper
+
+ ; OK, we have a sync block index, just and out the top bits and grab the synblock index
+ and eax, MASK_SYNCBLOCKINDEX
+
+ ; Get the sync block pointer
+ mov rdx, qword ptr [g_pSyncTable]
+ shl eax, 4h
+ mov rdx, [rdx + rax + OFFSETOF__SyncTableEntry__m_SyncBlock]
+
+ ; Check if the sync block has been allocated
+ test rdx, rdx
+ jz FramedLockHelper
+
+ ; Get a pointer to the lock object
+ lea rdx, [rdx + OFFSETOF__SyncBlock__m_Monitor]
+
+ ; Attempt to acquire the lock
+ RetrySyncBlock:
+ mov eax, dword ptr [rdx + OFFSETOF__AwareLock__m_MonitorHeld]
+ test eax, eax
+ jne HaveWaiters
+
+ ; Common case, lock isn't held and there are no waiters. Attempt to
+ ; gain ownership ourselves
+ xor ecx, ecx
+ inc ecx
+
+ lock cmpxchg dword ptr [rdx + OFFSETOF__AwareLock__m_MonitorHeld], ecx
+ jnz RetryHelperSyncBlock
+
+ ; Success. Save the thread object in the lock and increment the use count
+ mov qword ptr [rdx + OFFSETOF__AwareLock__m_HoldingThread], r11
+ add dword ptr [rdx + OFFSETOF__AwareLock__m_Recursion], 1
+ add dword ptr [r11 + OFFSETOF__Thread__m_dwLockCount], 1
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ mov rcx, [rsp + MON_ENTER_STACK_SIZE_INLINEGETTHREAD + 8h] ; return address
+ ; void EnterSyncHelper(UINT_PTR caller, AwareLock* lock)
+ call EnterSyncHelper
+endif
+endif
+
+ ; Done, leave and set pbLockTaken if we have it
+ MON_ENTER_RETURN_SUCCESS
+
+ ; It's possible to get here with waiters by no lock held, but in this
+ ; case a signal is about to be fired which will wake up the waiter. So
+ ; for fairness sake we should wait too.
+ ; Check first for recur11ve lock attempts on the same thread.
+ HaveWaiters:
+ ; Is mutex already owned by current thread?
+ cmp [rdx + OFFSETOF__AwareLock__m_HoldingThread], r11
+ jne PrepareToWait
+
+ ; Yes, bump our use count.
+ add dword ptr [rdx + OFFSETOF__AwareLock__m_Recursion], 1
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ mov rcx, [rsp + MON_ENTER_STACK_SIZE_INLINEGETTHREAD + 8h] ; return address
+ ; void EnterSyncHelper(UINT_PTR caller, AwareLock* lock)
+ call EnterSyncHelper
+endif
+endif
+ ; Done, leave and set pbLockTaken if we have it
+ MON_ENTER_RETURN_SUCCESS
+
+ PrepareToWait:
+ ; If we are on a MP system we try spinning for a certain number of iterations
+ cmp dword ptr [g_SystemInfo + OFFSETOF__g_SystemInfo__dwNumberOfProcessors], 1
+ jle HaveWaiters1
+
+ ; Exponential backoff: delay by approximately 2*r10 clock cycles
+ mov eax, r10d
+ delayLoop:
+ pause ; indicate to the CPU that we are spin waiting
+ sub eax, 1
+ jnz delayLoop
+
+ ; Next time, wait a factor longer
+ imul r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwBackoffFactor]
+
+ cmp r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwMaximumDuration]
+ jle RetrySyncBlock
+
+ HaveWaiters1:
+ mov rcx, rdx
+ mov rdx, rsi
+ MON_ENTER_EPILOG_ADJUST_STACK
+ pop rsi
+ ; void JITutil_MonContention(AwareLock* lock, BYTE* pbLockTaken)
+ jmp JITutil_MonContention
+
+ RetryHelperSyncBlock:
+ jmp RetrySyncBlock
+
+ FramedLockHelper:
+ mov rdx, rsi
+ MON_ENTER_EPILOG_ADJUST_STACK
+ pop rsi
+ ; void JITutil_MonEnterWorker(Object* obj, BYTE* pbLockTaken)
+ jmp JITutil_MonEnterWorker
+
+NESTED_END JIT_MonEnterWorker_InlineGetThread, _TEXT
+
+
+MON_EXIT_EPILOG_ADJUST_STACK macro
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ add rsp, MON_EXIT_STACK_SIZE_INLINEGETTHREAD
+endif
+endif
+ endm
+
+MON_EXIT_RETURN_SUCCESS macro
+ ; This is sensitive to the potential that pbLockTaken is null
+ test r10, r10
+ jz @F
+ mov byte ptr [r10], 0
+ @@:
+ MON_EXIT_EPILOG_ADJUST_STACK
+ ret
+
+ endm
+
+
+; The worker versions of these functions are smart about the potential for pbLockTaken
+; to be NULL, and if it is then they treat it as if they don't have a state variable.
+; This is because when locking is not inserted by the JIT (instead by explicit calls to
+; Monitor.Enter() and Monitor.Exit()) we will call these guys.
+;
+; This is a frameless helper for exiting a monitor on a object.
+; The object is in ARGUMENT_REG1. This tries the normal case (no
+; blocking or object allocation) in line and calls a framed helper
+; for the other cases.
+;
+; void JIT_MonExitWorker_InlineGetThread(Object* obj, BYTE* pbLockTaken)
+JIT_HELPER_MONITOR_THUNK JIT_MonExit, _TEXT
+NESTED_ENTRY JIT_MonExitWorker_InlineGetThread, _TEXT
+ .savereg rcx, 0
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ alloc_stack MON_EXIT_STACK_SIZE_INLINEGETTHREAD
+
+ save_reg_postrsp rcx, MON_EXIT_STACK_SIZE_INLINEGETTHREAD + 8h + 0h
+ save_reg_postrsp rdx, MON_EXIT_STACK_SIZE_INLINEGETTHREAD + 8h + 8h
+ save_reg_postrsp r8, MON_EXIT_STACK_SIZE_INLINEGETTHREAD + 8h + 10h
+ save_reg_postrsp r9, MON_EXIT_STACK_SIZE_INLINEGETTHREAD + 8h + 18h
+endif
+endif
+ END_PROLOGUE
+
+ ; pbLockTaken is stored in r10, this can be null
+ mov r10, rdx
+
+ ; if pbLockTaken is NULL then we got here without a state variable, avoid the
+ ; next comparison in that case as it will AV
+ test rdx, rdx
+ jz Null_pbLockTaken
+
+ ; If the lock wasn't taken then we bail quickly without doing anything
+ cmp byte ptr [rdx], 0
+ je LockNotTaken
+
+ Null_pbLockTaken:
+ ; Check is the instance is null
+ test rcx, rcx
+ jz FramedLockHelper
+
+ PATCHABLE_INLINE_GETTHREAD r11, JIT_MonExitWorker_InlineGetThread_GetThread_PatchLabel
+
+ ; r8 will hold the syncblockindex address
+ lea r8, [rcx - OFFSETOF__ObjHeader__SyncBlkIndex]
+
+ RetryThinLock:
+ ; Fetch the syncblock dword
+ mov eax, dword ptr [r8]
+ test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_SPIN_LOCK
+ jnz NeedMoreTests
+
+ ; Ok, we have a "thin lock" layout - check whether the thread id matches
+ mov edx, eax
+ and edx, SBLK_MASK_LOCK_THREADID
+ cmp edx, dword ptr [r11 + OFFSETOF__Thread__m_ThreadId]
+ jne FramedLockHelper
+
+ ; check the recursion level
+ test eax, SBLK_MASK_LOCK_RECLEVEL
+ jne DecRecursionLevel
+
+ ; It's zero -- we're leaving the lock.
+ ; So try to put back a zero thread id.
+ ; edx and eax match in the thread id bits, and edx is zero else where, so the xor is sufficient
+ xor edx, eax
+ lock cmpxchg dword ptr [r8], edx
+ jnz RetryThinLockHelper1 ; forward jump to avoid mispredict on success
+
+ ; Dec the dwLockCount on the thread
+ sub dword ptr [r11 + OFFSETOF__Thread__m_dwLockCount], 1
+
+ ; Done, leave and set pbLockTaken if we have it
+ MON_EXIT_RETURN_SUCCESS
+
+ RetryThinLockHelper1:
+ jmp RetryThinLock
+
+ DecRecursionLevel:
+ lea edx, [eax - SBLK_LOCK_RECLEVEL_INC]
+ lock cmpxchg dword ptr [r8], edx
+ jnz RetryThinLockHelper2 ; forward jump to avoid mispredict on success
+
+ ; We're done, leave and set pbLockTaken if we have it
+ MON_EXIT_RETURN_SUCCESS
+
+ RetryThinLockHelper2:
+ jmp RetryThinLock
+
+ NeedMoreTests:
+ ; Forward all special cases to the slow helper
+ test eax, BIT_SBLK_IS_HASHCODE + BIT_SBLK_SPIN_LOCK
+ jnz FramedLockHelper
+
+ ; Get the sync block index and use it to compute the sync block pointer
+ mov rdx, qword ptr [g_pSyncTable]
+ and eax, MASK_SYNCBLOCKINDEX
+ shl eax, 4
+ mov rdx, [rdx + rax + OFFSETOF__SyncTableEntry__m_SyncBlock]
+
+ ; Was there a sync block?
+ test rdx, rdx
+ jz FramedLockHelper
+
+ ; Get a pointer to the lock object.
+ lea rdx, [rdx + OFFSETOF__SyncBlock__m_Monitor]
+
+ ; Check if the lock is held.
+ cmp qword ptr [rdx + OFFSETOF__AwareLock__m_HoldingThread], r11
+ jne FramedLockHelper
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ mov [rsp + 28h], rcx
+ mov [rsp + 30h], rdx
+ mov [rsp + 38h], r10
+ mov [rsp + 40h], r11
+
+ mov rcx, [rsp + MON_EXIT_STACK_SIZE_INLINEGETTHREAD ] ; return address
+ ; void LeaveSyncHelper(UINT_PTR caller, AwareLock* lock)
+ call LeaveSyncHelper
+
+ mov rcx, [rsp + 28h]
+ mov rdx, [rsp + 30h]
+ mov r10, [rsp + 38h]
+ mov r11, [rsp + 40h]
+endif
+endif
+
+ ; Reduce our recursion count
+ sub dword ptr [rdx + OFFSETOF__AwareLock__m_Recursion], 1
+ jz LastRecursion
+
+ ; Done, leave and set pbLockTaken if we have it
+ MON_EXIT_RETURN_SUCCESS
+
+ RetryHelperThinLock:
+ jmp RetryThinLock
+
+ FramedLockHelper:
+ mov rdx, r10
+ MON_EXIT_EPILOG_ADJUST_STACK
+ ; void JITutil_MonExitWorker(Object* obj, BYTE* pbLockTaken)
+ jmp JITutil_MonExitWorker
+
+ LastRecursion:
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ mov rax, [rdx + OFFSETOF__AwareLock__m_HoldingThread]
+endif
+endif
+
+ sub dword ptr [r11 + OFFSETOF__Thread__m_dwLockCount], 1
+ mov qword ptr [rdx + OFFSETOF__AwareLock__m_HoldingThread], 0
+
+ Retry:
+ mov eax, dword ptr [rdx + OFFSETOF__AwareLock__m_MonitorHeld]
+ lea r9d, [eax - 1]
+ lock cmpxchg dword ptr [rdx + OFFSETOF__AwareLock__m_MonitorHeld], r9d
+ jne RetryHelper
+
+ test eax, STATE_CHECK
+ jne MustSignal
+
+ ; Done, leave and set pbLockTaken if we have it
+ MON_EXIT_RETURN_SUCCESS
+
+ MustSignal:
+ mov rcx, rdx
+ mov rdx, r10
+ MON_EXIT_EPILOG_ADJUST_STACK
+ ; void JITutil_MonSignal(AwareLock* lock, BYTE* pbLockTaken)
+ jmp JITutil_MonSignal
+
+ RetryHelper:
+ jmp Retry
+
+ LockNotTaken:
+ MON_EXIT_EPILOG_ADJUST_STACK
+ REPRET
+NESTED_END JIT_MonExitWorker_InlineGetThread, _TEXT
+
+
+; This is a frameless helper for trying to enter a monitor on a object.
+; The object is in ARGUMENT_REG1 and a timeout in ARGUMENT_REG2. This tries the
+; normal case (no object allocation) in line and calls a framed helper for the
+; other cases.
+;
+; void JIT_MonTryEnter_InlineGetThread(Object* obj, INT32 timeOut, BYTE* pbLockTaken)
+NESTED_ENTRY JIT_MonTryEnter_InlineGetThread, _TEXT
+ ; save rcx, rdx (timeout) in the shadow space
+ .savereg rcx, 8h
+ mov [rsp + 8h], rcx
+ .savereg rdx, 10h
+ mov [rsp + 10h], rdx
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ alloc_stack MON_ENTER_STACK_SIZE_INLINEGETTHREAD
+
+; rcx has already been saved
+; save_reg_postrsp rcx, MON_ENTER_STACK_SIZE_INLINEGETTHREAD + 8h + 0h
+; rdx has already been saved
+; save_reg_postrsp rdx, MON_ENTER_STACK_SIZE + 8h + 8h
+ save_reg_postrsp r8, MON_ENTER_STACK_SIZE_INLINEGETTHREAD + 8h + 10h
+ save_reg_postrsp r9, MON_ENTER_STACK_SIZE_INLINEGETTHREAD + 8h + 18h
+endif
+endif
+ END_PROLOGUE
+
+ ; Check if the instance is NULL
+ test rcx, rcx
+ jz FramedLockHelper
+
+ ; Check if the timeout looks valid
+ cmp edx, -1
+ jl FramedLockHelper
+
+ PATCHABLE_INLINE_GETTHREAD r11, JIT_MonTryEnter_GetThread_PatchLabel
+
+ ; Initialize delay value for retry with exponential backoff
+ mov r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwInitialDuration]
+
+ ; Check if we can abort here
+ mov eax, dword ptr [r11 + OFFSETOF__Thread__m_State]
+ and eax, THREAD_CATCHATSAFEPOINT_BITS
+ ; Go through the slow code path to initiate THreadAbort
+ jnz FramedLockHelper
+
+ ; r9 will hold the syncblockindex address
+ lea r9, [rcx - OFFSETOF__ObjHeader__SyncBlkIndex]
+
+ RetryThinLock:
+ ; Fetch the syncblock dword
+ mov eax, dword ptr [r9]
+
+ ; Check whether we have the "thin lock" layout, the lock is free and the spin lock bit is not set
+ test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_SPIN_LOCK + SBLK_MASK_LOCK_THREADID + SBLK_MASK_LOCK_RECLEVEL
+ jne NeedMoreTests
+
+ ; Everything is fine - get the thread id to store in the lock
+ mov edx, dword ptr [r11 + OFFSETOF__Thread__m_ThreadId]
+
+ ; If the thread id is too large, we need a syncblock for sure
+ cmp edx, SBLK_MASK_LOCK_THREADID
+ ja FramedLockHelper
+
+ ; We want to store a new value with the current thread id set in the low 10 bits
+ or edx, eax
+ lock cmpxchg dword ptr [r9], edx
+ jnz RetryHelperThinLock
+
+ ; Got the lock, everything is fine
+ add dword ptr [r11 + OFFSETOF__Thread__m_dwLockCount], 1
+ ; Return TRUE
+ mov byte ptr [r8], 1
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ add rsp, MON_ENTER_STACK_SIZE_INLINEGETTHREAD
+endif
+endif
+ ret
+
+ NeedMoreTests:
+ ; OK, not the simple case, find out which case it is
+ test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX
+ jnz HaveHashOrSyncBlockIndex
+
+ ; The header is transitioning or the lock
+ test eax, BIT_SBLK_SPIN_LOCK
+ jnz RetryHelperThinLock
+
+ ; Here we know we have the "thin lock" layout, but the lock is not free.
+ ; It could still be the recursion case, compare the thread id to check
+ mov edx, eax
+ and edx, SBLK_MASK_LOCK_THREADID
+ cmp edx, dword ptr [r11 + OFFSETOF__Thread__m_ThreadId]
+ jne PrepareToWaitThinLock
+
+ ; Ok, the thread id matches, it's the recursion case.
+ ; Dump up the recursion level and check for overflow
+ lea edx, [eax + SBLK_LOCK_RECLEVEL_INC]
+ test edx, SBLK_MASK_LOCK_RECLEVEL
+ jz FramedLockHelper
+
+ ; Try to put the new recursion level back. If the header was changed in the meantime
+ ; we need a full retry, because the layout could have changed
+ lock cmpxchg dword ptr [r9], edx
+ jnz RetryHelperThinLock
+
+ ; Everything went fine and we're done, return TRUE
+ mov byte ptr [r8], 1
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ add rsp, MON_ENTER_STACK_SIZE_INLINEGETTHREAD
+endif
+endif
+ ret
+
+ PrepareToWaitThinLock:
+ ; If we are on an MP system, we try spinning for a certain number of iterations
+ cmp dword ptr [g_SystemInfo + OFFSETOF__g_SystemInfo__dwNumberOfProcessors], 1
+ jle FramedLockHelper
+
+ ; Exponential backoff; delay by approximately 2*r10d clock cycles
+ mov eax, r10d
+ DelayLoopThinLock:
+ pause ; indicate to the CPU that we are spin waiting
+ sub eax, 1
+ jnz DelayLoopThinLock
+
+ ; Next time, wait a factor longer
+ imul r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwBackoffFactor]
+
+ cmp r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwMaximumDuration]
+ jle RetryHelperThinLock
+
+ jmp FramedLockHelper
+
+ RetryHelperThinLock:
+ jmp RetryThinLock
+
+ HaveHashOrSyncBlockIndex:
+ ; If we have a hash code already, we need to create a sync block
+ test eax, BIT_SBLK_IS_HASHCODE
+ jnz FramedLockHelper
+
+ ; OK, we have a sync block index, just and out the top bits and grab the synblock index
+ and eax, MASK_SYNCBLOCKINDEX
+
+ ; Get the sync block pointer
+ mov rdx, qword ptr [g_pSyncTable]
+ shl eax, 4
+ mov rdx, [rdx + rax + OFFSETOF__SyncTableEntry__m_SyncBlock]
+
+ ; Check if the sync block has been allocated
+ test rdx, rdx
+ jz FramedLockHelper
+
+ ; Get a pointer to the lock object
+ lea rdx, [rdx + OFFSETOF__SyncBlock__m_Monitor]
+
+ RetrySyncBlock:
+ ; Attempt to acuire the lock
+ mov eax, dword ptr [rdx + OFFSETOF__AwareLock__m_MonitorHeld]
+ test eax, eax
+ jne HaveWaiters
+
+ ; Common case, lock isn't held and there are no waiters. Attempt to
+ ; gain ownership ourselves
+ xor ecx, ecx
+ inc ecx
+ lock cmpxchg dword ptr [rdx + OFFSETOF__AwareLock__m_MonitorHeld], ecx
+ jnz RetryHelperSyncBlock
+
+ ; Success. Save the thread object in the lock and increment the use count
+ mov qword ptr [rdx + OFFSETOF__AwareLock__m_HoldingThread], r11
+ add dword ptr [rdx + OFFSETOF__AwareLock__m_Recursion], 1
+ add dword ptr [r11 + OFFSETOF__Thread__m_dwLockCount], 1
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ mov rcx, [rsp + MON_ENTER_STACK_SIZE_INLINEGETTHREAD] ; return address
+ ; void EnterSyncHelper(UINT_PTR caller, AwareLock* lock)
+ call EnterSyncHelper
+endif
+endif
+
+ ; Return TRUE
+ mov byte ptr [r8], 1
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ add rsp, MON_ENTER_STACK_SIZE_INLINEGETTHREAD
+endif
+endif
+ ret
+
+ ; It's possible to get here with waiters by no lock held, but in this
+ ; case a signal is about to be fired which will wake up the waiter. So
+ ; for fairness sake we should wait too.
+ ; Check first for recur11ve lock attempts on the same thread.
+ HaveWaiters:
+ ; Is mutex already owned by current thread?
+ cmp [rdx + OFFSETOF__AwareLock__m_HoldingThread], r11
+ jne PrepareToWait
+
+ ; Yes, bump our use count.
+ add dword ptr [rdx + OFFSETOF__AwareLock__m_Recursion], 1
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ mov rcx, [rsp + MON_ENTER_STACK_SIZE_INLINEGETTHREAD] ; return address
+ ; void EnterSyncHelper(UINT_PTR caller, AwareLock* lock)
+ call EnterSyncHelper
+endif
+endif
+
+ ; Return TRUE
+ mov byte ptr [r8], 1
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ add rsp, MON_ENTER_STACK_SIZE_INLINEGETTHREAD
+endif
+endif
+ ret
+
+ PrepareToWait:
+ ; If we are on an MP system, we try spinning for a certain number of iterations
+ cmp dword ptr [g_SystemInfo + OFFSETOF__g_SystemInfo__dwNumberOfProcessors], 1
+ jle WouldBlock
+
+ ; Exponential backoff; delay by approximately 2*r10d clock cycles
+ mov eax, r10d
+ DelayLoop:
+ pause ; indicate to the CPU that we are spin waiting
+ sub eax, 1
+ jnz DelayLoop
+
+ ; Next time, wait a factor longer
+ imul r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwBackoffFactor]
+
+ cmp r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwMaximumDuration]
+ jle RetrySyncBlock
+
+ ; We would need to block to enter the section. Return failure if
+ ; timeout is zero, else call the farmed helper to do the blocking
+ ; form of TryEnter.
+ WouldBlock:
+ mov rdx, [rsp + 10h]
+ ; if we are using the _DEBUG stuff then rsp has been adjusted
+ ; just overwrite the wrong RDX value that we already retrieved
+ ; there's really little harm in the extra stack read
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ mov rdx, [rsp + MON_ENTER_STACK_SIZE_INLINEGETTHREAD + 10h]
+endif
+endif
+ test rdx, rdx
+ jnz Block
+ ; Return FALSE
+ mov byte ptr [r8], 0
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ add rsp, MON_ENTER_STACK_SIZE_INLINEGETTHREAD
+endif
+endif
+ ret
+
+ RetryHelperSyncBlock:
+ jmp RetrySyncBlock
+
+ Block:
+ ; In the Block case we've trashed RCX, restore it
+ mov rcx, [rsp + 8h]
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ ; if we're tracking this stuff then rcx is at a different offset to RSP, we just
+ ; overwrite the wrong value which we just got... this is for debug purposes only
+ ; so there's really no performance issue here
+ mov rcx, [rsp + MON_ENTER_STACK_SIZE_INLINEGETTHREAD + 8h]
+endif
+endif
+ FramedLockHelper:
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ add rsp, MON_ENTER_STACK_SIZE_INLINEGETTHREAD
+endif
+endif
+ mov rdx, [rsp + 10h]
+ ; void JITutil_MonTryEnter(Object* obj, INT32 timeout)
+ jmp JITutil_MonTryEnter
+
+NESTED_END JIT_MonTryEnter_InlineGetThread, _TEXT
+
+
+MON_ENTER_STATIC_RETURN_SUCCESS macro
+ ; pbLockTaken is never null for static helpers
+ test rdx, rdx
+ mov byte ptr [rdx], 1
+ REPRET
+
+ endm
+
+MON_EXIT_STATIC_RETURN_SUCCESS macro
+ ; pbLockTaken is never null for static helpers
+ mov byte ptr [rdx], 0
+ REPRET
+
+ endm
+
+
+; This is a frameless helper for entering a static monitor on a class.
+; The methoddesc is in ARGUMENT_REG1. This tries the normal case (no
+; blocking or object allocation) in line and calls a framed helper
+; for the other cases.
+;
+; void JIT_MonEnterStatic_InlineGetThread(AwareLock *lock, BYTE *pbLockTaken)
+NESTED_ENTRY JIT_MonEnterStatic_InlineGetThread, _TEXT
+ .savereg rcx, 0
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ alloc_stack MIN_SIZE
+ save_reg_postrsp rcx, MIN_SIZE + 8h + 0h
+endif
+endif
+ END_PROLOGUE
+
+ ; Attempt to acquire the lock
+ Retry:
+ mov eax, dword ptr [rcx + OFFSETOF__AwareLock__m_MonitorHeld]
+ test eax, eax
+ jne HaveWaiters
+
+ ; Common case; lock isn't held and there are no waiters. Attempt to
+ ; gain ownership by ourselves.
+ mov r10d, 1
+
+ lock cmpxchg dword ptr [rcx + OFFSETOF__AwareLock__m_MonitorHeld], r10d
+ jnz RetryHelper
+
+ PATCHABLE_INLINE_GETTHREAD rax, JIT_MonEnterStaticWorker_InlineGetThread_GetThread_PatchLabel_1
+
+ mov qword ptr [rcx + OFFSETOF__AwareLock__m_HoldingThread], rax
+ add dword ptr [rcx + OFFSETOF__AwareLock__m_Recursion], 1
+ add dword ptr [rax + OFFSETOF__Thread__m_dwLockCount], 1
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ mov rdx, rcx
+ mov rcx, [rsp]
+ add rsp, MIN_SIZE
+ ; void EnterSyncHelper(UINT_PTR caller, AwareLock* lock)
+ jmp EnterSyncHelper
+endif
+endif
+ MON_ENTER_STATIC_RETURN_SUCCESS
+
+ ; It's possible to get here with waiters by with no lock held, in this
+ ; case a signal is about to be fired which will wake up a waiter. So
+ ; for fairness sake we should wait too.
+ ; Check first for recursive lock attempts on the same thread.
+ HaveWaiters:
+ PATCHABLE_INLINE_GETTHREAD rax, JIT_MonEnterStaticWorker_InlineGetThread_GetThread_PatchLabel_2
+
+ ; Is mutex alread owned by current thread?
+ cmp [rcx + OFFSETOF__AwareLock__m_HoldingThread], rax
+ jne PrepareToWait
+
+ ; Yes, bump our use count.
+ add dword ptr [rcx + OFFSETOF__AwareLock__m_Recursion], 1
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ mov rdx, rcx
+ mov rcx, [rsp + MIN_SIZE]
+ add rsp, MIN_SIZE
+ ; void EnterSyncHelper(UINT_PTR caller, AwareLock* lock)
+ jmp EnterSyncHelper
+endif
+endif
+ ret
+
+ PrepareToWait:
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ add rsp, MIN_SIZE
+endif
+endif
+ ; void JITutil_MonContention(AwareLock* obj, BYTE* pbLockTaken)
+ jmp JITutil_MonContention
+
+ RetryHelper:
+ jmp Retry
+NESTED_END JIT_MonEnterStatic_InlineGetThread, _TEXT
+
+; A frameless helper for exiting a static monitor on a class.
+; The methoddesc is in ARGUMENT_REG1. This tries the normal case (no
+; blocking or object allocation) in line and calls a framed helper
+; for the other cases.
+;
+; void JIT_MonExitStatic_InlineGetThread(AwareLock *lock, BYTE *pbLockTaken)
+NESTED_ENTRY JIT_MonExitStatic_InlineGetThread, _TEXT
+ .savereg rcx, 0
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ alloc_stack MIN_SIZE
+ save_reg_postrsp rcx, MIN_SIZE + 8h + 0h
+endif
+endif
+ END_PROLOGUE
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ push rsi
+ push rdi
+ mov rsi, rcx
+ mov rdi, rdx
+ mov rdx, [rsp + 8]
+ call LeaveSyncHelper
+ mov rcx, rsi
+ mov rdx, rdi
+ pop rdi
+ pop rsi
+endif
+endif
+ PATCHABLE_INLINE_GETTHREAD rax, JIT_MonExitStaticWorker_InlineGetThread_GetThread_PatchLabel
+
+ ; Check if lock is held
+ cmp [rcx + OFFSETOF__AwareLock__m_HoldingThread], rax
+ jne LockError
+
+ ; Reduce our recursion count
+ sub dword ptr [rcx + OFFSETOF__AwareLock__m_Recursion], 1
+ jz LastRecursion
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ add rsp, MIN_SIZE
+ ret
+endif
+endif
+ REPRET
+
+ ; This is the last count we held on this lock, so release the lock
+ LastRecursion:
+ ; Thead* is in rax
+ sub dword ptr [rax + OFFSETOF__Thread__m_dwLockCount], 1
+ mov qword ptr [rcx + OFFSETOF__AwareLock__m_HoldingThread], 0
+
+ Retry:
+ mov eax, dword ptr [rcx + OFFSETOF__AwareLock__m_MonitorHeld]
+ lea r10d, [eax - 1]
+ lock cmpxchg dword ptr [rcx + OFFSETOF__AwareLock__m_MonitorHeld], r10d
+ jne RetryHelper
+ test eax, STATE_CHECK
+ jne MustSignal
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ add rsp, MIN_SIZE
+ ret
+endif
+endif
+ MON_EXIT_STATIC_RETURN_SUCCESS
+
+ MustSignal:
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ add rsp, MIN_SIZE
+endif
+endif
+ ; void JITutil_MonSignal(AwareLock* lock, BYTE* pbLockTaken)
+ jmp JITutil_MonSignal
+
+ RetryHelper:
+ jmp Retry
+
+ LockError:
+ mov rcx, CORINFO_SynchronizationLockException_ASM
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ add rsp, MIN_SIZE
+endif
+endif
+ ; void JIT_InternalThrow(unsigned exceptNum)
+ jmp JIT_InternalThrow
+NESTED_END JIT_MonExitStatic_InlineGetThread, _TEXT
+
+ end
+
diff --git a/src/vm/amd64/JitHelpers_Slow.asm b/src/vm/amd64/JitHelpers_Slow.asm
new file mode 100644
index 0000000000..202ea43dcf
--- /dev/null
+++ b/src/vm/amd64/JitHelpers_Slow.asm
@@ -0,0 +1,1809 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;
+
+;
+; ==--==
+; ***********************************************************************
+; File: JitHelpers_Slow.asm, see history in jithelp.asm
+;
+; Notes: These are ASM routinues which we believe to be cold in normal
+; AMD64 scenarios, mainly because they have other versions which
+; have some more performant nature which will be used in the best
+; cases.
+; ***********************************************************************
+
+include AsmMacros.inc
+include asmconstants.inc
+
+; Min amount of stack space that a nested function should allocate.
+MIN_SIZE equ 28h
+
+EXTERN g_ephemeral_low:QWORD
+EXTERN g_ephemeral_high:QWORD
+EXTERN g_lowest_address:QWORD
+EXTERN g_highest_address:QWORD
+EXTERN g_card_table:QWORD
+
+ifdef WRITE_BARRIER_CHECK
+; Those global variables are always defined, but should be 0 for Server GC
+g_GCShadow TEXTEQU <?g_GCShadow@@3PEAEEA>
+g_GCShadowEnd TEXTEQU <?g_GCShadowEnd@@3PEAEEA>
+EXTERN g_GCShadow:QWORD
+EXTERN g_GCShadowEnd:QWORD
+endif
+
+JIT_NEW equ ?JIT_New@@YAPEAVObject@@PEAUCORINFO_CLASS_STRUCT_@@@Z
+Object__DEBUG_SetAppDomain equ ?DEBUG_SetAppDomain@Object@@QEAAXPEAVAppDomain@@@Z
+CopyValueClassUnchecked equ ?CopyValueClassUnchecked@@YAXPEAX0PEAVMethodTable@@@Z
+JIT_Box equ ?JIT_Box@@YAPEAVObject@@PEAUCORINFO_CLASS_STRUCT_@@PEAX@Z
+g_pStringClass equ ?g_pStringClass@@3PEAVMethodTable@@EA
+FramedAllocateString equ ?FramedAllocateString@@YAPEAVStringObject@@K@Z
+JIT_NewArr1 equ ?JIT_NewArr1@@YAPEAVObject@@PEAUCORINFO_CLASS_STRUCT_@@_J@Z
+
+INVALIDGCVALUE equ 0CCCCCCCDh
+
+extern JIT_NEW:proc
+extern CopyValueClassUnchecked:proc
+extern JIT_Box:proc
+extern g_pStringClass:QWORD
+extern FramedAllocateString:proc
+extern JIT_NewArr1:proc
+
+extern JIT_GetSharedNonGCStaticBase_Helper:proc
+extern JIT_GetSharedGCStaticBase_Helper:proc
+
+extern JIT_InternalThrow:proc
+
+ifdef _DEBUG
+; Version for when we're sure to be in the GC, checks whether or not the card
+; needs to be updated
+;
+; void JIT_WriteBarrier_Debug(Object** dst, Object* src)
+LEAF_ENTRY JIT_WriteBarrier_Debug, _TEXT
+
+ifdef WRITE_BARRIER_CHECK
+ ; **ALSO update the shadow GC heap if that is enabled**
+ ; Do not perform the work if g_GCShadow is 0
+ cmp g_GCShadow, 0
+ je NoShadow
+
+ ; If we end up outside of the heap don't corrupt random memory
+ mov r10, rcx
+ sub r10, [g_lowest_address]
+ jb NoShadow
+
+ ; Check that our adjusted destination is somewhere in the shadow gc
+ add r10, [g_GCShadow]
+ cmp r10, [g_GCShadowEnd]
+ ja NoShadow
+
+ ; Write ref into real GC; see comment below about possibility of AV
+ mov [rcx], rdx
+ ; Write ref into shadow GC
+ mov [r10], rdx
+
+ ; Ensure that the write to the shadow heap occurs before the read from
+ ; the GC heap so that race conditions are caught by INVALIDGCVALUE
+ mfence
+
+ ; Check that GC/ShadowGC values match
+ mov r11, [rcx]
+ mov rax, [r10]
+ cmp rax, r11
+ je DoneShadow
+ mov r11, INVALIDGCVALUE
+ mov [r10], r11
+
+ jmp DoneShadow
+
+ ; If we don't have a shadow GC we won't have done the write yet
+ NoShadow:
+endif
+
+ mov rax, rdx
+
+ ; Do the move. It is correct to possibly take an AV here, the EH code
+ ; figures out that this came from a WriteBarrier and correctly maps it back
+ ; to the managed method which called the WriteBarrier (see setup in
+ ; InitializeExceptionHandling, vm\exceptionhandling.cpp).
+ mov [rcx], rax
+
+ifdef WRITE_BARRIER_CHECK
+ ; If we had a shadow GC then we already wrote to the real GC at the same time
+ ; as the shadow GC so we want to jump over the real write immediately above
+ DoneShadow:
+endif
+
+ ; See if we can just quick out
+ cmp rax, [g_ephemeral_low]
+ jb Exit
+ cmp rax, [g_ephemeral_high]
+ jnb Exit
+
+ ; Check if we need to update the card table
+ ; Calc pCardByte
+ shr rcx, 0Bh
+ add rcx, [g_card_table]
+
+ ; Check if this card is dirty
+ cmp byte ptr [rcx], 0FFh
+ jne UpdateCardTable
+ REPRET
+
+ UpdateCardTable:
+ mov byte ptr [rcx], 0FFh
+ ret
+
+ align 16
+ Exit:
+ REPRET
+LEAF_END_MARKED JIT_WriteBarrier_Debug, _TEXT
+endif
+
+NESTED_ENTRY JIT_TrialAllocSFastMP, _TEXT
+ alloc_stack MIN_SIZE
+ END_PROLOGUE
+
+ CALL_GETTHREAD
+ mov r11, rax
+
+ mov r8d, [rcx + OFFSET__MethodTable__m_BaseSize]
+
+ ; m_BaseSize is guaranteed to be a multiple of 8.
+
+ mov r10, [r11 + OFFSET__Thread__m_alloc_context__alloc_limit]
+ mov rax, [r11 + OFFSET__Thread__m_alloc_context__alloc_ptr]
+
+ add r8, rax
+
+ cmp r8, r10
+ ja AllocFailed
+
+ mov [r11 + OFFSET__Thread__m_alloc_context__alloc_ptr], r8
+ mov [rax], rcx
+
+ifdef _DEBUG
+ call DEBUG_TrialAllocSetAppDomain
+endif ; _DEBUG
+
+ ; epilog
+ add rsp, MIN_SIZE
+ ret
+
+ AllocFailed:
+ add rsp, MIN_SIZE
+ jmp JIT_NEW
+NESTED_END JIT_TrialAllocSFastMP, _TEXT
+
+
+; HCIMPL2(Object*, JIT_Box, CORINFO_CLASS_HANDLE type, void* unboxedData)
+NESTED_ENTRY JIT_BoxFastMP, _TEXT
+ alloc_stack MIN_SIZE
+ END_PROLOGUE
+
+ mov rax, [rcx + OFFSETOF__MethodTable__m_pWriteableData]
+
+ ; Check whether the class has not been initialized
+ test dword ptr [rax + OFFSETOF__MethodTableWriteableData__m_dwFlags], MethodTableWriteableData__enum_flag_Unrestored
+ jnz ClassNotInited
+
+ CALL_GETTHREAD
+ mov r11, rax
+
+ mov r8d, [rcx + OFFSET__MethodTable__m_BaseSize]
+
+ ; m_BaseSize is guaranteed to be a multiple of 8.
+
+ mov r10, [r11 + OFFSET__Thread__m_alloc_context__alloc_limit]
+ mov rax, [r11 + OFFSET__Thread__m_alloc_context__alloc_ptr]
+
+ add r8, rax
+
+ cmp r8, r10
+ ja AllocFailed
+
+ mov [r11 + OFFSET__Thread__m_alloc_context__alloc_ptr], r8
+ mov [rax], rcx
+
+ifdef _DEBUG
+ call DEBUG_TrialAllocSetAppDomain
+endif ; _DEBUG
+
+ ; Check whether the object contains pointers
+ test dword ptr [rcx + OFFSETOF__MethodTable__m_dwFlags], MethodTable__enum_flag_ContainsPointers
+ jnz ContainsPointers
+
+ ; We have no pointers - emit a simple inline copy loop
+
+ mov ecx, [rcx + OFFSET__MethodTable__m_BaseSize]
+ sub ecx, 18h ; sizeof(ObjHeader) + sizeof(Object) + last slot
+
+ CopyLoop:
+ mov r8, [rdx+rcx]
+ mov [rax+rcx+8], r8
+
+ sub ecx, 8
+ jge CopyLoop
+
+ add rsp, MIN_SIZE
+ ret
+
+ ContainsPointers:
+ ; Do call to CopyValueClassUnchecked(object, data, pMT)
+
+ mov [rsp+20h], rax
+
+ mov r8, rcx
+ lea rcx, [rax + 8]
+ call CopyValueClassUnchecked
+
+ mov rax, [rsp+20h]
+
+ add rsp, MIN_SIZE
+ ret
+
+ ClassNotInited:
+ AllocFailed:
+ add rsp, MIN_SIZE
+ jmp JIT_Box
+NESTED_END JIT_BoxFastMP, _TEXT
+
+
+NESTED_ENTRY AllocateStringFastMP, _TEXT
+ alloc_stack MIN_SIZE
+ END_PROLOGUE
+
+ ; Instead of doing elaborate overflow checks, we just limit the number of elements
+ ; to (LARGE_OBJECT_SIZE - 256)/sizeof(WCHAR) or less.
+ ; This will avoid avoid all overflow problems, as well as making sure
+ ; big string objects are correctly allocated in the big object heap.
+
+ cmp ecx, (ASM_LARGE_OBJECT_SIZE - 256)/2
+ jae OversizedString
+
+ CALL_GETTHREAD
+ mov r11, rax
+
+ mov rdx, [g_pStringClass]
+ mov r8d, [rdx + OFFSET__MethodTable__m_BaseSize]
+
+ ; Calculate the final size to allocate.
+ ; We need to calculate baseSize + cnt*2, then round that up by adding 7 and anding ~7.
+
+ lea r8d, [r8d + ecx*2 + 7]
+ and r8d, -8
+
+ mov r10, [r11 + OFFSET__Thread__m_alloc_context__alloc_limit]
+ mov rax, [r11 + OFFSET__Thread__m_alloc_context__alloc_ptr]
+
+ add r8, rax
+
+ cmp r8, r10
+ ja AllocFailed
+
+ mov [r11 + OFFSET__Thread__m_alloc_context__alloc_ptr], r8
+ mov [rax], rdx
+
+ mov [rax + OFFSETOF__StringObject__m_StringLength], ecx
+
+ifdef _DEBUG
+ call DEBUG_TrialAllocSetAppDomain
+endif ; _DEBUG
+
+ add rsp, MIN_SIZE
+ ret
+
+ OversizedString:
+ AllocFailed:
+ add rsp, MIN_SIZE
+ jmp FramedAllocateString
+NESTED_END AllocateStringFastMP, _TEXT
+
+FIX_INDIRECTION macro Reg
+ifdef FEATURE_PREJIT
+ test Reg, 1
+ jz @F
+ mov Reg, [Reg-1]
+ @@:
+endif
+endm
+
+; HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
+NESTED_ENTRY JIT_NewArr1VC_MP, _TEXT
+ alloc_stack MIN_SIZE
+ END_PROLOGUE
+
+ ; We were passed a type descriptor in RCX, which contains the (shared)
+ ; array method table and the element type.
+
+ ; The element count is in RDX
+
+ ; NOTE: if this code is ported for CORINFO_HELP_NEWSFAST_ALIGN8, it will need
+ ; to emulate the double-specific behavior of JIT_TrialAlloc::GenAllocArray.
+
+ ; Do a conservative check here. This is to avoid overflow while doing the calculations. We don't
+ ; have to worry about "large" objects, since the allocation quantum is never big enough for
+ ; LARGE_OBJECT_SIZE.
+
+ ; For Value Classes, this needs to be 2^16 - slack (2^32 / max component size),
+ ; The slack includes the size for the array header and round-up ; for alignment. Use 256 for the
+ ; slack value out of laziness.
+
+ ; In both cases we do a final overflow check after adding to the alloc_ptr.
+
+ CALL_GETTHREAD
+ mov r11, rax
+
+ ; we need to load the true method table from the type desc
+ mov r9, [rcx + OFFSETOF__ArrayTypeDesc__m_TemplateMT - 2]
+
+ FIX_INDIRECTION r9
+
+ cmp rdx, (65535 - 256)
+ jae OversizedArray
+
+ movzx r8d, word ptr [r9 + OFFSETOF__MethodTable__m_dwFlags] ; component size is low 16 bits
+ imul r8d, edx ; signed mul, but won't overflow due to length restriction above
+ add r8d, dword ptr [r9 + OFFSET__MethodTable__m_BaseSize]
+
+ ; round the size to a multiple of 8
+
+ add r8d, 7
+ and r8d, -8
+
+ mov r10, [r11 + OFFSET__Thread__m_alloc_context__alloc_limit]
+ mov rax, [r11 + OFFSET__Thread__m_alloc_context__alloc_ptr]
+
+ add r8, rax
+ jc AllocFailed
+
+ cmp r8, r10
+ ja AllocFailed
+
+ mov [r11 + OFFSET__Thread__m_alloc_context__alloc_ptr], r8
+ mov [rax], r9
+
+ mov dword ptr [rax + OFFSETOF__ArrayBase__m_NumComponents], edx
+
+ifdef _DEBUG
+ call DEBUG_TrialAllocSetAppDomain
+endif ; _DEBUG
+
+ add rsp, MIN_SIZE
+ ret
+
+ OversizedArray:
+ AllocFailed:
+ add rsp, MIN_SIZE
+ jmp JIT_NewArr1
+NESTED_END JIT_NewArr1VC_MP, _TEXT
+
+
+; HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
+NESTED_ENTRY JIT_NewArr1OBJ_MP, _TEXT
+ alloc_stack MIN_SIZE
+ END_PROLOGUE
+
+ ; We were passed a type descriptor in RCX, which contains the (shared)
+ ; array method table and the element type.
+
+ ; The element count is in RDX
+
+ ; NOTE: if this code is ported for CORINFO_HELP_NEWSFAST_ALIGN8, it will need
+ ; to emulate the double-specific behavior of JIT_TrialAlloc::GenAllocArray.
+
+ ; Verifies that LARGE_OBJECT_SIZE fits in 32-bit. This allows us to do array size
+ ; arithmetic using 32-bit registers.
+ .erre ASM_LARGE_OBJECT_SIZE lt 100000000h
+
+ cmp rdx, (ASM_LARGE_OBJECT_SIZE - 256)/8
+ jae OversizedArray
+
+ CALL_GETTHREAD
+ mov r11, rax
+
+ ; we need to load the true method table from the type desc
+ mov r9, [rcx + OFFSETOF__ArrayTypeDesc__m_TemplateMT - 2]
+
+ FIX_INDIRECTION r9
+
+ ; In this case we know the element size is sizeof(void *), or 8 for x64
+ ; This helps us in two ways - we can shift instead of multiplying, and
+ ; there's no need to align the size either
+
+ mov r8d, dword ptr [r9 + OFFSET__MethodTable__m_BaseSize]
+ lea r8d, [r8d + edx * 8]
+
+ ; No need for rounding in this case - element size is 8, and m_BaseSize is guaranteed
+ ; to be a multiple of 8.
+
+ mov r10, [r11 + OFFSET__Thread__m_alloc_context__alloc_limit]
+ mov rax, [r11 + OFFSET__Thread__m_alloc_context__alloc_ptr]
+
+ add r8, rax
+
+ cmp r8, r10
+ ja AllocFailed
+
+ mov [r11 + OFFSET__Thread__m_alloc_context__alloc_ptr], r8
+ mov [rax], r9
+
+ mov dword ptr [rax + OFFSETOF__ArrayBase__m_NumComponents], edx
+
+ifdef _DEBUG
+ call DEBUG_TrialAllocSetAppDomain
+endif ; _DEBUG
+
+ add rsp, MIN_SIZE
+ ret
+
+ OversizedArray:
+ AllocFailed:
+ add rsp, MIN_SIZE
+ jmp JIT_NewArr1
+NESTED_END JIT_NewArr1OBJ_MP, _TEXT
+
+
+
+; <TODO> this m_GCLock should be a size_t so we don't have a store-forwarding penalty in the code below.
+; Unfortunately, the compiler intrinsic for InterlockedExchangePointer seems to be broken and we
+; get bad code gen in gc.cpp on IA64. </TODO>
+
+M_GCLOCK equ ?m_GCLock@@3JC
+extern M_GCLOCK:dword
+extern generation_table:qword
+
+LEAF_ENTRY JIT_TrialAllocSFastSP, _TEXT
+
+ mov r8d, [rcx + OFFSET__MethodTable__m_BaseSize]
+
+ ; m_BaseSize is guaranteed to be a multiple of 8.
+
+ inc [M_GCLOCK]
+ jnz JIT_NEW
+
+ mov rax, [generation_table + 0] ; alloc_ptr
+ mov r10, [generation_table + 8] ; limit_ptr
+
+ add r8, rax
+
+ cmp r8, r10
+ ja AllocFailed
+
+ mov qword ptr [generation_table + 0], r8 ; update the alloc ptr
+ mov [rax], rcx
+ mov [M_GCLOCK], -1
+
+ifdef _DEBUG
+ call DEBUG_TrialAllocSetAppDomain_NoScratchArea
+endif ; _DEBUG
+
+ ret
+
+ AllocFailed:
+ mov [M_GCLOCK], -1
+ jmp JIT_NEW
+LEAF_END JIT_TrialAllocSFastSP, _TEXT
+
+; HCIMPL2(Object*, JIT_Box, CORINFO_CLASS_HANDLE type, void* unboxedData)
+NESTED_ENTRY JIT_BoxFastUP, _TEXT
+
+ mov rax, [rcx + OFFSETOF__MethodTable__m_pWriteableData]
+
+ ; Check whether the class has not been initialized
+ test dword ptr [rax + OFFSETOF__MethodTableWriteableData__m_dwFlags], MethodTableWriteableData__enum_flag_Unrestored
+ jnz JIT_Box
+
+ mov r8d, [rcx + OFFSET__MethodTable__m_BaseSize]
+
+ ; m_BaseSize is guaranteed to be a multiple of 8.
+
+ inc [M_GCLOCK]
+ jnz JIT_Box
+
+ mov rax, [generation_table + 0] ; alloc_ptr
+ mov r10, [generation_table + 8] ; limit_ptr
+
+ add r8, rax
+
+ cmp r8, r10
+ ja NoAlloc
+
+
+ mov qword ptr [generation_table + 0], r8 ; update the alloc ptr
+ mov [rax], rcx
+ mov [M_GCLOCK], -1
+
+ifdef _DEBUG
+ call DEBUG_TrialAllocSetAppDomain_NoScratchArea
+endif ; _DEBUG
+
+ ; Check whether the object contains pointers
+ test dword ptr [rcx + OFFSETOF__MethodTable__m_dwFlags], MethodTable__enum_flag_ContainsPointers
+ jnz ContainsPointers
+
+ ; We have no pointers - emit a simple inline copy loop
+
+ mov ecx, [rcx + OFFSET__MethodTable__m_BaseSize]
+ sub ecx, 18h ; sizeof(ObjHeader) + sizeof(Object) + last slot
+
+ CopyLoop:
+ mov r8, [rdx+rcx]
+ mov [rax+rcx+8], r8
+
+ sub ecx, 8
+ jge CopyLoop
+ REPRET
+
+ ContainsPointers:
+
+ ; Do call to CopyValueClassUnchecked(object, data, pMT)
+
+ push_vol_reg rax
+ alloc_stack 20h
+ END_PROLOGUE
+
+ mov r8, rcx
+ lea rcx, [rax + 8]
+ call CopyValueClassUnchecked
+
+ add rsp, 20h
+ pop rax
+ ret
+
+ NoAlloc:
+ mov [M_GCLOCK], -1
+ jmp JIT_Box
+NESTED_END JIT_BoxFastUP, _TEXT
+
+LEAF_ENTRY AllocateStringFastUP, _TEXT
+
+ ; We were passed the number of characters in ECX
+
+ ; we need to load the method table for string from the global
+
+ mov r11, [g_pStringClass]
+
+ ; Instead of doing elaborate overflow checks, we just limit the number of elements
+ ; to (LARGE_OBJECT_SIZE - 256)/sizeof(WCHAR) or less.
+ ; This will avoid avoid all overflow problems, as well as making sure
+ ; big string objects are correctly allocated in the big object heap.
+
+ cmp ecx, (ASM_LARGE_OBJECT_SIZE - 256)/2
+ jae FramedAllocateString
+
+ mov r8d, [r11 + OFFSET__MethodTable__m_BaseSize]
+
+ ; Calculate the final size to allocate.
+ ; We need to calculate baseSize + cnt*2, then round that up by adding 7 and anding ~7.
+
+ lea r8d, [r8d + ecx*2 + 7]
+ and r8d, -8
+
+ inc [M_GCLOCK]
+ jnz FramedAllocateString
+
+ mov rax, [generation_table + 0] ; alloc_ptr
+ mov r10, [generation_table + 8] ; limit_ptr
+
+ add r8, rax
+
+ cmp r8, r10
+ ja AllocFailed
+
+ mov qword ptr [generation_table + 0], r8 ; update the alloc ptr
+ mov [rax], r11
+ mov [M_GCLOCK], -1
+
+ mov [rax + OFFSETOF__StringObject__m_StringLength], ecx
+
+ifdef _DEBUG
+ call DEBUG_TrialAllocSetAppDomain_NoScratchArea
+endif ; _DEBUG
+
+ ret
+
+ AllocFailed:
+ mov [M_GCLOCK], -1
+ jmp FramedAllocateString
+LEAF_END AllocateStringFastUP, _TEXT
+
+; HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
+LEAF_ENTRY JIT_NewArr1VC_UP, _TEXT
+
+ ; We were passed a type descriptor in RCX, which contains the (shared)
+ ; array method table and the element type.
+
+ ; The element count is in RDX
+
+ ; NOTE: if this code is ported for CORINFO_HELP_NEWSFAST_ALIGN8, it will need
+ ; to emulate the double-specific behavior of JIT_TrialAlloc::GenAllocArray.
+
+ ; Do a conservative check here. This is to avoid overflow while doing the calculations. We don't
+ ; have to worry about "large" objects, since the allocation quantum is never big enough for
+ ; LARGE_OBJECT_SIZE.
+
+ ; For Value Classes, this needs to be 2^16 - slack (2^32 / max component size),
+ ; The slack includes the size for the array header and round-up ; for alignment. Use 256 for the
+ ; slack value out of laziness.
+
+ ; In both cases we do a final overflow check after adding to the alloc_ptr.
+
+ ; we need to load the true method table from the type desc
+ mov r9, [rcx + OFFSETOF__ArrayTypeDesc__m_TemplateMT - 2]
+
+ FIX_INDIRECTION r9
+
+ cmp rdx, (65535 - 256)
+ jae JIT_NewArr1
+
+ movzx r8d, word ptr [r9 + OFFSETOF__MethodTable__m_dwFlags] ; component size is low 16 bits
+ imul r8d, edx ; signed mul, but won't overflow due to length restriction above
+ add r8d, dword ptr [r9 + OFFSET__MethodTable__m_BaseSize]
+
+ ; round the size to a multiple of 8
+
+ add r8d, 7
+ and r8d, -8
+
+ inc [M_GCLOCK]
+ jnz JIT_NewArr1
+
+ mov rax, [generation_table + 0] ; alloc_ptr
+ mov r10, [generation_table + 8] ; limit_ptr
+
+ add r8, rax
+ jc AllocFailed
+
+ cmp r8, r10
+ ja AllocFailed
+
+ mov qword ptr [generation_table + 0], r8 ; update the alloc ptr
+ mov [rax], r9
+ mov [M_GCLOCK], -1
+
+ mov dword ptr [rax + OFFSETOF__ArrayBase__m_NumComponents], edx
+
+ifdef _DEBUG
+ call DEBUG_TrialAllocSetAppDomain_NoScratchArea
+endif ; _DEBUG
+
+ ret
+
+ AllocFailed:
+ mov [M_GCLOCK], -1
+ jmp JIT_NewArr1
+LEAF_END JIT_NewArr1VC_UP, _TEXT
+
+
+; HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
+LEAF_ENTRY JIT_NewArr1OBJ_UP, _TEXT
+
+ ; We were passed a type descriptor in RCX, which contains the (shared)
+ ; array method table and the element type.
+
+ ; The element count is in RDX
+
+ ; NOTE: if this code is ported for CORINFO_HELP_NEWSFAST_ALIGN8, it will need
+ ; to emulate the double-specific behavior of JIT_TrialAlloc::GenAllocArray.
+
+ ; Verifies that LARGE_OBJECT_SIZE fits in 32-bit. This allows us to do array size
+ ; arithmetic using 32-bit registers.
+ .erre ASM_LARGE_OBJECT_SIZE lt 100000000h
+
+ cmp rdx, (ASM_LARGE_OBJECT_SIZE - 256)/8 ; sizeof(void*)
+ jae OversizedArray
+
+ ; we need to load the true method table from the type desc
+ mov r9, [rcx + OFFSETOF__ArrayTypeDesc__m_TemplateMT - 2]
+
+ FIX_INDIRECTION r9
+
+ ; In this case we know the element size is sizeof(void *), or 8 for x64
+ ; This helps us in two ways - we can shift instead of multiplying, and
+ ; there's no need to align the size either
+
+ mov r8d, dword ptr [r9 + OFFSET__MethodTable__m_BaseSize]
+ lea r8d, [r8d + edx * 8]
+
+ ; No need for rounding in this case - element size is 8, and m_BaseSize is guaranteed
+ ; to be a multiple of 8.
+
+ inc [M_GCLOCK]
+ jnz JIT_NewArr1
+
+ mov rax, [generation_table + 0] ; alloc_ptr
+ mov r10, [generation_table + 8] ; limit_ptr
+
+ add r8, rax
+
+ cmp r8, r10
+ ja AllocFailed
+
+ mov qword ptr [generation_table + 0], r8 ; update the alloc ptr
+ mov [rax], r9
+ mov [M_GCLOCK], -1
+
+ mov dword ptr [rax + OFFSETOF__ArrayBase__m_NumComponents], edx
+
+ifdef _DEBUG
+ call DEBUG_TrialAllocSetAppDomain_NoScratchArea
+endif ; _DEBUG
+
+ ret
+
+ AllocFailed:
+ mov [M_GCLOCK], -1
+
+ OversizedArray:
+ jmp JIT_NewArr1
+LEAF_END JIT_NewArr1OBJ_UP, _TEXT
+
+
+NESTED_ENTRY JIT_GetSharedNonGCStaticBase_Slow, _TEXT
+ alloc_stack MIN_SIZE
+ END_PROLOGUE
+
+ ; Check if rcx (moduleDomainID) is not a moduleID
+ test rcx, 1
+ jz HaveLocalModule
+
+ CALL_GETAPPDOMAIN
+
+ ; Get the LocalModule
+ mov rax, [rax + OFFSETOF__AppDomain__m_sDomainLocalBlock + OFFSETOF__DomainLocalBlock__m_pModuleSlots]
+ ; rcx will always be odd, so: rcx * 4 - 4 <=> (rcx >> 1) * 8
+ mov rcx, [rax + rcx * 4 - 4]
+
+ HaveLocalModule:
+ ; If class is not initialized, bail to C++ helper
+ test [rcx + OFFSETOF__DomainLocalModule__m_pDataBlob + rdx], 1
+ jz CallHelper
+
+ mov rax, rcx
+ add rsp, MIN_SIZE
+ ret
+
+ align 16
+ CallHelper:
+ ; Tail call Jit_GetSharedNonGCStaticBase_Helper
+ add rsp, MIN_SIZE
+ jmp JIT_GetSharedNonGCStaticBase_Helper
+NESTED_END JIT_GetSharedNonGCStaticBase_Slow, _TEXT
+
+NESTED_ENTRY JIT_GetSharedNonGCStaticBaseNoCtor_Slow, _TEXT
+ alloc_stack MIN_SIZE
+ END_PROLOGUE
+
+ ; Check if rcx (moduleDomainID) is not a moduleID
+ test rcx, 1
+ jz HaveLocalModule
+
+ CALL_GETAPPDOMAIN
+
+ ; Get the LocalModule
+ mov rax, [rax + OFFSETOF__AppDomain__m_sDomainLocalBlock + OFFSETOF__DomainLocalBlock__m_pModuleSlots]
+ ; rcx will always be odd, so: rcx * 4 - 4 <=> (rcx >> 1) * 8
+ mov rax, [rax + rcx * 4 - 4]
+
+ add rsp, MIN_SIZE
+ ret
+
+ align 16
+ HaveLocalModule:
+ mov rax, rcx
+ add rsp, MIN_SIZE
+ ret
+NESTED_END JIT_GetSharedNonGCStaticBaseNoCtor_Slow, _TEXT
+
+NESTED_ENTRY JIT_GetSharedGCStaticBase_Slow, _TEXT
+ alloc_stack MIN_SIZE
+ END_PROLOGUE
+
+ ; Check if rcx (moduleDomainID) is not a moduleID
+ test rcx, 1
+ jz HaveLocalModule
+
+ CALL_GETAPPDOMAIN
+
+ ; Get the LocalModule
+ mov rax, [rax + OFFSETOF__AppDomain__m_sDomainLocalBlock + OFFSETOF__DomainLocalBlock__m_pModuleSlots]
+ ; rcx will always be odd, so: rcx * 4 - 4 <=> (rcx >> 1) * 8
+ mov rcx, [rax + rcx * 4 - 4]
+
+ HaveLocalModule:
+ ; If class is not initialized, bail to C++ helper
+ test [rcx + OFFSETOF__DomainLocalModule__m_pDataBlob + rdx], 1
+ jz CallHelper
+
+ mov rax, [rcx + OFFSETOF__DomainLocalModule__m_pGCStatics]
+
+ add rsp, MIN_SIZE
+ ret
+
+ align 16
+ CallHelper:
+ ; Tail call Jit_GetSharedGCStaticBase_Helper
+ add rsp, MIN_SIZE
+ jmp JIT_GetSharedGCStaticBase_Helper
+NESTED_END JIT_GetSharedGCStaticBase_Slow, _TEXT
+
+NESTED_ENTRY JIT_GetSharedGCStaticBaseNoCtor_Slow, _TEXT
+ alloc_stack MIN_SIZE
+ END_PROLOGUE
+
+ ; Check if rcx (moduleDomainID) is not a moduleID
+ test rcx, 1
+ jz HaveLocalModule
+
+ CALL_GETAPPDOMAIN
+
+ ; Get the LocalModule
+ mov rax, [rax + OFFSETOF__AppDomain__m_sDomainLocalBlock + OFFSETOF__DomainLocalBlock__m_pModuleSlots]
+ ; rcx will always be odd, so: rcx * 4 - 4 <=> (rcx >> 1) * 8
+ mov rcx, [rax + rcx * 4 - 4]
+
+ HaveLocalModule:
+ mov rax, [rcx + OFFSETOF__DomainLocalModule__m_pGCStatics]
+
+ add rsp, MIN_SIZE
+ ret
+NESTED_END JIT_GetSharedGCStaticBaseNoCtor_Slow, _TEXT
+
+
+MON_ENTER_STACK_SIZE equ 00000020h
+MON_EXIT_STACK_SIZE equ 00000068h
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+MON_ENTER_STACK_SIZE_INLINEGETTHREAD equ 00000020h
+MON_EXIT_STACK_SIZE_INLINEGETTHREAD equ 00000068h
+endif
+endif
+
+BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX equ 08000000h ; syncblk.h
+BIT_SBLK_IS_HASHCODE equ 04000000h ; syncblk.h
+BIT_SBLK_SPIN_LOCK equ 10000000h ; syncblk.h
+
+SBLK_MASK_LOCK_THREADID equ 000003FFh ; syncblk.h
+SBLK_LOCK_RECLEVEL_INC equ 00000400h ; syncblk.h
+SBLK_MASK_LOCK_RECLEVEL equ 0000FC00h ; syncblk.h
+
+MASK_SYNCBLOCKINDEX equ 03FFFFFFh ; syncblk.h
+STATE_CHECK equ 0FFFFFFFEh
+
+MT_CTX_PROXY_FLAG equ 10000000h
+
+g_pSyncTable equ ?g_pSyncTable@@3PEAVSyncTableEntry@@EA
+g_SystemInfo equ ?g_SystemInfo@@3U_SYSTEM_INFO@@A
+g_SpinConstants equ ?g_SpinConstants@@3USpinConstants@@A
+
+extern g_pSyncTable:QWORD
+extern g_SystemInfo:QWORD
+extern g_SpinConstants:QWORD
+
+; JITutil_MonEnterWorker(Object* obj, BYTE* pbLockTaken)
+extern JITutil_MonEnterWorker:proc
+; JITutil_MonTryEnter(Object* obj, INT32 timeout, BYTE* pbLockTaken)
+extern JITutil_MonTryEnter:proc
+; JITutil_MonExitWorker(Object* obj, BYTE* pbLockTaken)
+extern JITutil_MonExitWorker:proc
+; JITutil_MonSignal(AwareLock* lock, BYTE* pbLockTaken)
+extern JITutil_MonSignal:proc
+; JITutil_MonContention(AwareLock* lock, BYTE* pbLockTaken)
+extern JITutil_MonContention:proc
+
+ifdef _DEBUG
+MON_DEBUG equ 1
+endif
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+extern EnterSyncHelper:proc
+extern LeaveSyncHelper:proc
+endif
+endif
+
+
+; This is a frameless helper for entering a monitor on a object.
+; The object is in ARGUMENT_REG1. This tries the normal case (no
+; blocking or object allocation) in line and calls a framed helper
+; for the other cases.
+;
+; EXTERN_C void JIT_MonEnterWorker_Slow(Object* obj, /*OUT*/ BYTE* pbLockTaken)
+NESTED_ENTRY JIT_MonEnterWorker_Slow, _TEXT
+ push_nonvol_reg rsi
+
+ alloc_stack MON_ENTER_STACK_SIZE
+
+ save_reg_postrsp rcx, MON_ENTER_STACK_SIZE + 10h + 0h
+ save_reg_postrsp rdx, MON_ENTER_STACK_SIZE + 10h + 8h
+ save_reg_postrsp r8, MON_ENTER_STACK_SIZE + 10h + 10h
+ save_reg_postrsp r9, MON_ENTER_STACK_SIZE + 10h + 18h
+
+ END_PROLOGUE
+
+ ; Check if the instance is NULL
+ test rcx, rcx
+ jz FramedLockHelper
+
+ ; Put pbLockTaken in rsi, this can be null
+ mov rsi, rdx
+
+ ; We store the thread object in r11
+ CALL_GETTHREAD
+ mov r11, rax
+
+ ; Initialize delay value for retry with exponential backoff
+ mov r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwInitialDuration]
+
+ ; Check if we can abort here
+ mov eax, dword ptr [r11 + OFFSETOF__Thread__m_State]
+ and eax, THREAD_CATCHATSAFEPOINT_BITS
+ ; Go through the slow code path to initiate ThreadAbort
+ jnz FramedLockHelper
+
+ ; r8 will hold the syncblockindex address
+ lea r8, [rcx - OFFSETOF__ObjHeader__SyncBlkIndex]
+
+ RetryThinLock:
+ ; Fetch the syncblock dword
+ mov eax, dword ptr [r8]
+
+ ; Check whether we have the "thin lock" layout, the lock is free and the spin lock bit is not set
+ test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_SPIN_LOCK + SBLK_MASK_LOCK_THREADID + SBLK_MASK_LOCK_RECLEVEL
+ jnz NeedMoreTests
+
+ ; Everything is fine - get the thread id to store in the lock
+ mov edx, dword ptr [r11 + OFFSETOF__Thread__m_ThreadId]
+
+ ; If the thread id is too large, we need a syncblock for sure
+ cmp edx, SBLK_MASK_LOCK_THREADID
+ ja FramedLockHelper
+
+ ; We want to store a new value with the current thread id set in the low 10 bits
+ or edx, eax
+ lock cmpxchg dword ptr [r8], edx
+ jnz PrepareToWaitThinLock
+
+ ; Everything went fine and we're done
+ add dword ptr [r11 + OFFSETOF__Thread__m_dwLockCount], 1
+
+ ; Done, leave and set pbLockTaken if we have it
+ jmp LockTaken
+
+ NeedMoreTests:
+ ; OK, not the simple case, find out which case it is
+ test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX
+ jnz HaveHashOrSyncBlockIndex
+
+ ; The header is transitioning or the lock, treat this as if the lock was taken
+ test eax, BIT_SBLK_SPIN_LOCK
+ jnz PrepareToWaitThinLock
+
+ ; Here we know we have the "thin lock" layout, but the lock is not free.
+ ; It could still be the recursion case, compare the thread id to check
+ mov edx, eax
+ and edx, SBLK_MASK_LOCK_THREADID
+ cmp edx, dword ptr [r11 + OFFSETOF__Thread__m_ThreadId]
+ jne PrepareToWaitThinLock
+
+ ; Ok, the thread id matches, it's the recursion case.
+ ; Bump up the recursion level and check for overflow
+ lea edx, [eax + SBLK_LOCK_RECLEVEL_INC]
+ test edx, SBLK_MASK_LOCK_RECLEVEL
+ jz FramedLockHelper
+
+ ; Try to put the new recursion level back. If the header was changed in the meantime
+ ; we need a full retry, because the layout could have changed
+ lock cmpxchg dword ptr [r8], edx
+ jnz RetryHelperThinLock
+
+ ; Done, leave and set pbLockTaken if we have it
+ jmp LockTaken
+
+ PrepareToWaitThinLock:
+ ; If we are on an MP system, we try spinning for a certain number of iterations
+ cmp dword ptr [g_SystemInfo + OFFSETOF__g_SystemInfo__dwNumberOfProcessors], 1
+ jle FramedLockHelper
+
+ ; Exponential backoff; delay by approximately 2*r10 clock cycles
+ mov eax, r10d
+ delayLoopThinLock:
+ pause ; indicate to the CPU that we are spin waiting
+ sub eax, 1
+ jnz delayLoopThinLock
+
+ ; Next time, wait a factor longer
+ imul r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwBackoffFactor]
+
+ cmp r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwMaximumDuration]
+ jle RetryHelperThinLock
+
+ jmp FramedLockHelper
+
+ RetryHelperThinLock:
+ jmp RetryThinLock
+
+ HaveHashOrSyncBlockIndex:
+ ; If we have a hash code already, we need to create a sync block
+ test eax, BIT_SBLK_IS_HASHCODE
+ jnz FramedLockHelper
+
+ ; OK, we have a sync block index, just and out the top bits and grab the synblock index
+ and eax, MASK_SYNCBLOCKINDEX
+
+ ; Get the sync block pointer
+ mov rdx, qword ptr [g_pSyncTable]
+ shl eax, 4h
+ mov rdx, [rdx + rax + OFFSETOF__SyncTableEntry__m_SyncBlock]
+
+ ; Check if the sync block has been allocated
+ test rdx, rdx
+ jz FramedLockHelper
+
+ ; Get a pointer to the lock object
+ lea rdx, [rdx + OFFSETOF__SyncBlock__m_Monitor]
+
+ ; Attempt to acquire the lock
+ RetrySyncBlock:
+ mov eax, dword ptr [rdx + OFFSETOF__AwareLock__m_MonitorHeld]
+ test eax, eax
+ jne HaveWaiters
+
+ ; Common case, lock isn't held and there are no waiters. Attempt to
+ ; gain ownership ourselves
+ xor ecx, ecx
+ inc ecx
+ lock cmpxchg dword ptr [rdx + OFFSETOF__AwareLock__m_MonitorHeld], ecx
+ jnz RetryHelperSyncBlock
+
+ ; Success. Save the thread object in the lock and increment the use count
+ mov qword ptr [rdx + OFFSETOF__AwareLock__m_HoldingThread], r11
+ add dword ptr [rdx + OFFSETOF__AwareLock__m_Recursion], 1
+ add dword ptr [r11 + OFFSETOF__Thread__m_dwLockCount], 1
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ mov rcx, [rsp + MON_ENTER_STACK_SIZE + 8h] ; return address
+ ; void EnterSyncHelper(UINT_PTR caller, AwareLock* lock)
+ call EnterSyncHelper
+endif
+endif
+
+ ; Done, leave and set pbLockTaken if we have it
+ jmp LockTaken
+
+ ; It's possible to get here with waiters by no lock held, but in this
+ ; case a signal is about to be fired which will wake up the waiter. So
+ ; for fairness sake we should wait too.
+ ; Check first for recur11ve lock attempts on the same thread.
+ HaveWaiters:
+ ; Is mutex already owned by current thread?
+ cmp [rdx + OFFSETOF__AwareLock__m_HoldingThread], r11
+ jne PrepareToWait
+
+ ; Yes, bump our use count.
+ add dword ptr [rdx + OFFSETOF__AwareLock__m_Recursion], 1
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ mov rcx, [rsp + MON_ENTER_STACK_SIZE + 8h] ; return address
+ ; void EnterSyncHelper(UINT_PTR caller, AwareLock* lock)
+ call EnterSyncHelper
+endif
+endif
+ ; Done, leave and set pbLockTaken if we have it
+ jmp LockTaken
+
+ PrepareToWait:
+ ; If we are on a MP system we try spinning for a certain number of iterations
+ cmp dword ptr [g_SystemInfo + OFFSETOF__g_SystemInfo__dwNumberOfProcessors], 1
+ jle HaveWaiters1
+
+ ; Exponential backoff: delay by approximately 2*r10 clock cycles
+ mov eax, r10d
+ delayLoop:
+ pause ; indicate to the CPU that we are spin waiting
+ sub eax, 1
+ jnz delayLoop
+
+ ; Next time, wait a factor longer
+ imul r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwBackoffFactor]
+
+ cmp r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwMaximumDuration]
+ jle RetrySyncBlock
+
+ HaveWaiters1:
+ mov rcx, rdx
+ mov rdx, rsi
+ add rsp, MON_ENTER_STACK_SIZE
+ pop rsi
+ ; void JITutil_MonContention(AwareLock* lock, BYTE* pbLockTaken)
+ jmp JITutil_MonContention
+
+ RetryHelperSyncBlock:
+ jmp RetrySyncBlock
+
+ FramedLockHelper:
+ mov rdx, rsi
+ add rsp, MON_ENTER_STACK_SIZE
+ pop rsi
+ ; void JITutil_MonEnterWorker(Object* obj, BYTE* pbLockTaken)
+ jmp JITutil_MonEnterWorker
+
+ align 16
+ ; This is sensitive to the potential that pbLockTaken is NULL
+ LockTaken:
+ test rsi, rsi
+ jz LockTaken_Exit
+ mov byte ptr [rsi], 1
+ LockTaken_Exit:
+ add rsp, MON_ENTER_STACK_SIZE
+ pop rsi
+ ret
+NESTED_END JIT_MonEnterWorker_Slow, _TEXT
+
+; This is a frameless helper for exiting a monitor on a object.
+; The object is in ARGUMENT_REG1. This tries the normal case (no
+; blocking or object allocation) in line and calls a framed helper
+; for the other cases.
+;
+; void JIT_MonExitWorker_Slow(Object* obj, BYTE* pbLockTaken)
+NESTED_ENTRY JIT_MonExitWorker_Slow, _TEXT
+ alloc_stack MON_EXIT_STACK_SIZE
+
+ save_reg_postrsp rcx, MON_EXIT_STACK_SIZE + 8h + 0h
+ save_reg_postrsp rdx, MON_EXIT_STACK_SIZE + 8h + 8h
+ save_reg_postrsp r8, MON_EXIT_STACK_SIZE + 8h + 10h
+ save_reg_postrsp r9, MON_EXIT_STACK_SIZE + 8h + 18h
+
+ END_PROLOGUE
+
+ ; pbLockTaken is stored in r10
+ mov r10, rdx
+
+ ; if pbLockTaken is NULL then we got here without a state variable, avoid the
+ ; next comparison in that case as it will AV
+ test rdx, rdx
+ jz Null_pbLockTaken
+
+ ; If the lock wasn't taken then we bail quickly without doing anything
+ cmp byte ptr [rdx], 0
+ je LockNotTaken
+
+ Null_pbLockTaken:
+ ; Check is the instance is null
+ test rcx, rcx
+ jz FramedLockHelper
+
+ ; The Thread obj address is stored in r11
+ CALL_GETTHREAD
+ mov r11, rax
+
+ ; r8 will hold the syncblockindex address
+ lea r8, [rcx - OFFSETOF__ObjHeader__SyncBlkIndex]
+
+ RetryThinLock:
+ ; Fetch the syncblock dword
+ mov eax, dword ptr [r8]
+ test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_SPIN_LOCK
+ jnz NeedMoreTests
+
+ ; Ok, we have a "thin lock" layout - check whether the thread id matches
+ mov edx, eax
+ and edx, SBLK_MASK_LOCK_THREADID
+ cmp edx, dword ptr [r11 + OFFSETOF__Thread__m_ThreadId]
+ jne FramedLockHelper
+
+ ; check the recursion level
+ test eax, SBLK_MASK_LOCK_RECLEVEL
+ jne DecRecursionLevel
+
+ ; It's zero -- we're leaving the lock.
+ ; So try to put back a zero thread id.
+ ; edx and eax match in the thread id bits, and edx is zero else where, so the xor is sufficient
+ xor edx, eax
+ lock cmpxchg dword ptr [r8], edx
+ jnz RetryHelperThinLock
+
+ ; Dec the dwLockCount on the thread
+ sub dword ptr [r11 + OFFSETOF__Thread__m_dwLockCount], 1
+
+ ; Done, leave and set pbLockTaken if we have it
+ jmp LockReleased
+
+ DecRecursionLevel:
+ lea edx, [eax - SBLK_LOCK_RECLEVEL_INC]
+ lock cmpxchg dword ptr [r8], edx
+ jnz RetryHelperThinLock
+
+ ; We're done, leave and set pbLockTaken if we have it
+ jmp LockReleased
+
+ NeedMoreTests:
+ ; Forward all special cases to the slow helper
+ test eax, BIT_SBLK_IS_HASHCODE + BIT_SBLK_SPIN_LOCK
+ jnz FramedLockHelper
+
+ ; Get the sync block index and use it to compute the sync block pointer
+ mov rdx, qword ptr [g_pSyncTable]
+ and eax, MASK_SYNCBLOCKINDEX
+ shl eax, 4
+ mov rdx, [rdx + rax + OFFSETOF__SyncTableEntry__m_SyncBlock]
+
+ ; Was there a sync block?
+ test rdx, rdx
+ jz FramedLockHelper
+
+ ; Get a pointer to the lock object.
+ lea rdx, [rdx + OFFSETOF__SyncBlock__m_Monitor]
+
+ ; Check if the lock is held.
+ cmp qword ptr [rdx + OFFSETOF__AwareLock__m_HoldingThread], r11
+ jne FramedLockHelper
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ mov [rsp + 28h], rcx
+ mov [rsp + 30h], rdx
+ mov [rsp + 38h], r10
+ mov [rsp + 40h], r11
+
+ mov rcx, [rsp + MON_EXIT_STACK_SIZE ] ; return address
+ ; void LeaveSyncHelper(UINT_PTR caller, AwareLock* lock)
+ call LeaveSyncHelper
+
+ mov rcx, [rsp + 28h]
+ mov rdx, [rsp + 30h]
+ mov r10, [rsp + 38h]
+ mov r11, [rsp + 40h]
+endif
+endif
+
+ ; Reduce our recursion count
+ sub dword ptr [rdx + OFFSETOF__AwareLock__m_Recursion], 1
+ jz LastRecursion
+
+ ; Done, leave and set pbLockTaken if we have it
+ jmp LockReleased
+
+ RetryHelperThinLock:
+ jmp RetryThinLock
+
+ FramedLockHelper:
+ mov rdx, r10
+ add rsp, MON_EXIT_STACK_SIZE
+ ; void JITutil_MonExitWorker(Object* obj, BYTE* pbLockTaken)
+ jmp JITutil_MonExitWorker
+
+ LastRecursion:
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ mov rax, [rdx + OFFSETOF__AwareLock__m_HoldingThread]
+endif
+endif
+
+ sub dword ptr [r11 + OFFSETOF__Thread__m_dwLockCount], 1
+ mov qword ptr [rdx + OFFSETOF__AwareLock__m_HoldingThread], 0
+
+ Retry:
+ mov eax, dword ptr [rdx + OFFSETOF__AwareLock__m_MonitorHeld]
+ lea r9d, [eax - 1]
+ lock cmpxchg dword ptr [rdx + OFFSETOF__AwareLock__m_MonitorHeld], r9d
+ jne RetryHelper
+
+ test eax, STATE_CHECK
+ jne MustSignal
+
+ ; Done, leave and set pbLockTaken if we have it
+ jmp LockReleased
+
+ MustSignal:
+ mov rcx, rdx
+ mov rdx, r10
+ add rsp, MON_EXIT_STACK_SIZE
+ ; void JITutil_MonSignal(AwareLock* lock, BYTE* pbLockTaken)
+ jmp JITutil_MonSignal
+
+ RetryHelper:
+ jmp Retry
+
+ LockNotTaken:
+ add rsp, MON_EXIT_STACK_SIZE
+ ret
+
+ align 16
+ ; This is sensitive to the potential that pbLockTaken is null
+ LockReleased:
+ test r10, r10
+ jz LockReleased_Exit
+ mov byte ptr [r10], 0
+ LockReleased_Exit:
+ add rsp, MON_EXIT_STACK_SIZE
+ ret
+NESTED_END JIT_MonExitWorker_Slow, _TEXT
+
+; This is a frameless helper for trying to enter a monitor on a object.
+; The object is in ARGUMENT_REG1 and a timeout in ARGUMENT_REG2. This tries the
+; normal case (no object allocation) in line and calls a framed helper for the
+; other cases.
+;
+; void JIT_MonTryEnter_Slow(Object* obj, INT32 timeOut, BYTE* pbLockTaken)
+NESTED_ENTRY JIT_MonTryEnter_Slow, _TEXT
+ push_nonvol_reg rsi
+
+ alloc_stack MON_ENTER_STACK_SIZE
+
+ save_reg_postrsp rcx, MON_ENTER_STACK_SIZE + 10h + 0h
+ save_reg_postrsp rdx, MON_ENTER_STACK_SIZE + 10h + 8h
+ save_reg_postrsp r8, MON_ENTER_STACK_SIZE + 10h + 10h
+ save_reg_postrsp r9, MON_ENTER_STACK_SIZE + 10h + 18h
+
+ END_PROLOGUE
+
+ mov rsi, rdx
+
+ ; Check if the instance is NULL
+ test rcx, rcx
+ jz FramedLockHelper
+
+ ; Check if the timeout looks valid
+ cmp rdx, -1
+ jl FramedLockHelper
+
+ ; We store the thread object in r11
+ CALL_GETTHREAD
+ mov r11, rax
+
+ ; Initialize delay value for retry with exponential backoff
+ mov r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwInitialDuration]
+
+ ; Check if we can abort here
+ mov eax, dword ptr [r11 + OFFSETOF__Thread__m_State]
+ and eax, THREAD_CATCHATSAFEPOINT_BITS
+ ; Go through the slow code path to initiate THreadAbort
+ jnz FramedLockHelper
+
+ ; r9 will hold the syncblockindex address
+ lea r9, [rcx - OFFSETOF__ObjHeader__SyncBlkIndex]
+
+ RetryThinLock:
+ ; Fetch the syncblock dword
+ mov eax, dword ptr [r9]
+
+ ; Check whether we have the "thin lock" layout, the lock is free and the spin lock bit is not set
+ test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_SPIN_LOCK + SBLK_MASK_LOCK_THREADID + SBLK_MASK_LOCK_RECLEVEL
+ jne NeedMoreTests
+
+ ; Everything is fine - get the thread id to store in the lock
+ mov edx, dword ptr [r11 + OFFSETOF__Thread__m_ThreadId]
+
+ ; If the thread id is too large, we need a syncblock for sure
+ cmp edx, SBLK_MASK_LOCK_THREADID
+ ja FramedLockHelper
+
+ ; We want to store a new value with the current thread id set in the low 10 bits
+ or edx, eax
+ lock cmpxchg dword ptr [r9], edx
+ jnz RetryHelperThinLock
+
+ ; Got the lock, everything is fine
+ add dword ptr [r11 + OFFSETOF__Thread__m_dwLockCount], 1
+ ; Return TRUE
+ mov byte ptr [r8], 1
+ add rsp, MON_ENTER_STACK_SIZE
+ pop rsi
+ ret
+
+ NeedMoreTests:
+ ; OK, not the simple case, find out which case it is
+ test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX
+ jnz HaveHashOrSyncBlockIndex
+
+ ; The header is transitioning or the lock
+ test eax, BIT_SBLK_SPIN_LOCK
+ jnz RetryHelperThinLock
+
+ ; Here we know we have the "thin lock" layout, but the lock is not free.
+ ; It could still be the recursion case, compare the thread id to check
+ mov edx, eax
+ and edx, SBLK_MASK_LOCK_THREADID
+ cmp edx, dword ptr [r11 + OFFSETOF__Thread__m_ThreadId]
+ jne PrepareToWaitThinLock
+
+ ; Ok, the thread id matches, it's the recursion case.
+ ; Dump up the recursion level and check for overflow
+ lea edx, [eax + SBLK_LOCK_RECLEVEL_INC]
+ test edx, SBLK_MASK_LOCK_RECLEVEL
+ jz FramedLockHelper
+
+ ; Try to put the new recursion level back. If the header was changed in the meantime
+ ; we need a full retry, because the layout could have changed
+ lock cmpxchg dword ptr [r9], edx
+ jnz RetryHelperThinLock
+
+ ; Everything went fine and we're done, return TRUE
+ mov byte ptr [r8], 1
+ add rsp, MON_ENTER_STACK_SIZE
+ pop rsi
+ ret
+
+ PrepareToWaitThinLock:
+ ; If we are on an MP system, we try spinning for a certain number of iterations
+ cmp dword ptr [g_SystemInfo + OFFSETOF__g_SystemInfo__dwNumberOfProcessors], 1
+ jle FramedLockHelper
+
+ ; Exponential backoff; delay by approximately 2*r10d clock cycles
+ mov eax, r10d
+ DelayLoopThinLock:
+ pause ; indicate to the CPU that we are spin waiting
+ sub eax, 1
+ jnz DelayLoopThinLock
+
+ ; Next time, wait a factor longer
+ imul r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwBackoffFactor]
+
+ cmp r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwMaximumDuration]
+ jle RetryHelperThinLock
+
+ jmp FramedLockHelper
+
+ RetryHelperThinLock:
+ jmp RetryThinLock
+
+ HaveHashOrSyncBlockIndex:
+ ; If we have a hash code already, we need to create a sync block
+ test eax, BIT_SBLK_IS_HASHCODE
+ jnz FramedLockHelper
+
+ ; OK, we have a sync block index, just and out the top bits and grab the synblock index
+ and eax, MASK_SYNCBLOCKINDEX
+
+ ; Get the sync block pointer
+ mov rdx, qword ptr [g_pSyncTable]
+ shl eax, 4
+ mov rdx, [rdx + rax + OFFSETOF__SyncTableEntry__m_SyncBlock]
+
+ ; Check if the sync block has been allocated
+ test rdx, rdx
+ jz FramedLockHelper
+
+ ; Get a pointer to the lock object
+ lea rdx, [rdx + OFFSETOF__SyncBlock__m_Monitor]
+
+ RetrySyncBlock:
+ ; Attempt to acuire the lock
+ mov eax, dword ptr [rdx + OFFSETOF__AwareLock__m_MonitorHeld]
+ test eax, eax
+ jne HaveWaiters
+
+ ; Common case, lock isn't held and there are no waiters. Attempt to
+ ; gain ownership ourselves
+ xor ecx, ecx
+ inc ecx
+ lock cmpxchg dword ptr [rdx + OFFSETOF__AwareLock__m_MonitorHeld], ecx
+ jnz RetryHelperSyncBlock
+
+ ; Success. Save the thread object in the lock and increment the use count
+ mov qword ptr [rdx + OFFSETOF__AwareLock__m_HoldingThread], r11
+ add dword ptr [rdx + OFFSETOF__AwareLock__m_Recursion], 1
+ add dword ptr [r11 + OFFSETOF__Thread__m_dwLockCount], 1
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ mov rcx, [rsp + MON_ENTER_STACK_SIZE + 8h] ; return address
+ ; void EnterSyncHelper(UINT_PTR caller, AwareLock* lock)
+ call EnterSyncHelper
+endif
+endif
+
+ ; Return TRUE
+ mov byte ptr [r8], 1
+ add rsp, MON_ENTER_STACK_SIZE
+ pop rsi
+ ret
+
+ ; It's possible to get here with waiters by no lock held, but in this
+ ; case a signal is about to be fired which will wake up the waiter. So
+ ; for fairness sake we should wait too.
+ ; Check first for recur11ve lock attempts on the same thread.
+ HaveWaiters:
+ ; Is mutex already owned by current thread?
+ cmp [rdx + OFFSETOF__AwareLock__m_HoldingThread], r11
+ jne PrepareToWait
+
+ ; Yes, bump our use count.
+ add dword ptr [rdx + OFFSETOF__AwareLock__m_Recursion], 1
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ mov rcx, [rsp + MON_ENTER_STACK_SIZE + 8h] ; return address
+ ; void EnterSyncHelper(UINT_PTR caller, AwareLock* lock)
+ call EnterSyncHelper
+endif
+endif
+
+ ; Return TRUE
+ mov byte ptr [r8], 1
+ add rsp, MON_ENTER_STACK_SIZE
+ pop rsi
+ ret
+
+ PrepareToWait:
+ ; If we are on an MP system, we try spinning for a certain number of iterations
+ cmp dword ptr [g_SystemInfo + OFFSETOF__g_SystemInfo__dwNumberOfProcessors], 1
+ jle WouldBlock
+
+ ; Exponential backoff; delay by approximately 2*r10d clock cycles
+ mov eax, r10d
+ DelayLoop:
+ pause ; indicate to the CPU that we are spin waiting
+ sub eax, 1
+ jnz DelayLoop
+
+ ; Next time, wait a factor longer
+ imul r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwBackoffFactor]
+
+ cmp r10d, dword ptr [g_SpinConstants + OFFSETOF__g_SpinConstants__dwMaximumDuration]
+ jle RetrySyncBlock
+
+ ; We would need to block to enter the section. Return failure if
+ ; timeout is zero, else call the farmed helper to do the blocking
+ ; form of TryEnter.
+ WouldBlock:
+ test rsi, rsi
+ jnz Block
+
+ ; Return FALSE
+ mov byte ptr [r8], 0
+ add rsp, MON_ENTER_STACK_SIZE
+ pop rsi
+ ret
+
+ RetryHelperSyncBlock:
+ jmp RetrySyncBlock
+
+ Block:
+ ; In the Block case we've trashed RCX, restore it
+ mov rcx, [rsp + MON_ENTER_STACK_SIZE + 10h]
+ FramedLockHelper:
+ mov rdx, rsi
+ add rsp, MON_ENTER_STACK_SIZE
+ pop rsi
+ ; void JITutil_MonTryEnter(Object* obj, UINT32 timeout, BYTE* pbLockTaken)
+ jmp JITutil_MonTryEnter
+
+NESTED_END JIT_MonTryEnter_Slow, _TEXT
+
+MON_ENTER_STATIC_RETURN_SUCCESS macro
+ ; pbLockTaken is never null for static helpers
+ mov byte ptr [rdx], 1
+ add rsp, MIN_SIZE
+ ret
+
+ endm
+
+MON_EXIT_STATIC_RETURN_SUCCESS macro
+ ; pbLockTaken is never null for static helpers
+ mov byte ptr [rdx], 0
+ add rsp, MIN_SIZE
+ ret
+
+ endm
+
+
+; This is a frameless helper for entering a static monitor on a class.
+; The methoddesc is in ARGUMENT_REG1. This tries the normal case (no
+; blocking or object allocation) in line and calls a framed helper
+; for the other cases.
+;
+; void JIT_MonEnterStatic_Slow(AwareLock *lock, BYTE *pbLockTaken)
+NESTED_ENTRY JIT_MonEnterStatic_Slow, _TEXT
+ alloc_stack MIN_SIZE
+ END_PROLOGUE
+
+ ; Attempt to acquire the lock
+ Retry:
+ mov eax, dword ptr [rcx + OFFSETOF__AwareLock__m_MonitorHeld]
+ test eax, eax
+ jne HaveWaiters
+
+ ; Common case; lock isn't held and there are no waiters. Attempt to
+ ; gain ownership by ourselves.
+ mov r10d, 1
+ lock cmpxchg dword ptr [rcx + OFFSETOF__AwareLock__m_MonitorHeld], r10d
+ jnz RetryHelper
+
+ ; Success. Save the thread object in the lock and increment the use count.
+ CALL_GETTHREAD
+
+ mov qword ptr [rcx + OFFSETOF__AwareLock__m_HoldingThread], rax
+ add dword ptr [rcx + OFFSETOF__AwareLock__m_Recursion], 1
+ add dword ptr [rax + OFFSETOF__Thread__m_dwLockCount], 1
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ add rsp, MIN_SIZE
+ mov rdx, rcx
+ mov rcx, [rsp]
+ ; void EnterSyncHelper(UINT_PTR caller, AwareLock* lock)
+ jmp EnterSyncHelper
+endif
+endif
+ MON_ENTER_STATIC_RETURN_SUCCESS
+
+ ; It's possible to get here with waiters by with no lock held, in this
+ ; case a signal is about to be fired which will wake up a waiter. So
+ ; for fairness sake we should wait too.
+ ; Check first for recursive lock attempts on the same thread.
+ HaveWaiters:
+ CALL_GETTHREAD
+
+ ; Is mutex alread owned by current thread?
+ cmp [rcx + OFFSETOF__AwareLock__m_HoldingThread], rax
+ jne PrepareToWait
+
+ ; Yes, bump our use count.
+ add dword ptr [rcx + OFFSETOF__AwareLock__m_Recursion], 1
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ mov rdx, rcx
+ mov rcx, [rsp]
+ ; void EnterSyncHelper(UINT_PTR caller, AwareLock* lock)
+ add rsp, MIN_SIZE
+ jmp EnterSyncHelper
+endif
+endif
+ MON_ENTER_STATIC_RETURN_SUCCESS
+
+ PrepareToWait:
+ add rsp, MIN_SIZE
+ ; void JITutil_MonContention(AwareLock* obj, BYTE* pbLockTaken)
+ jmp JITutil_MonContention
+
+ RetryHelper:
+ jmp Retry
+NESTED_END JIT_MonEnterStatic_Slow, _TEXT
+
+; A frameless helper for exiting a static monitor on a class.
+; The methoddesc is in ARGUMENT_REG1. This tries the normal case (no
+; blocking or object allocation) in line and calls a framed helper
+; for the other cases.
+;
+; void JIT_MonExitStatic_Slow(AwareLock *lock, BYTE *pbLockTaken)
+NESTED_ENTRY JIT_MonExitStatic_Slow, _TEXT
+ alloc_stack MIN_SIZE
+ END_PROLOGUE
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ push rsi
+ push rdi
+ mov rsi, rcx
+ mov rdi, rdx
+ mov rdx, [rsp + 8]
+ call LeaveSyncHelper
+ mov rcx, rsi
+ mov rdx, rdi
+ pop rdi
+ pop rsi
+endif
+endif
+
+ ; Check if lock is held
+ CALL_GETTHREAD
+
+ cmp [rcx + OFFSETOF__AwareLock__m_HoldingThread], rax
+ jne LockError
+
+ ; Reduce our recursion count
+ sub dword ptr [rcx + OFFSETOF__AwareLock__m_Recursion], 1
+ jz LastRecursion
+
+ MON_EXIT_STATIC_RETURN_SUCCESS
+
+ ; This is the last count we held on this lock, so release the lock
+ LastRecursion:
+ ; Thead* is in rax
+ sub dword ptr [rax + OFFSETOF__Thread__m_dwLockCount], 1
+ mov qword ptr [rcx + OFFSETOF__AwareLock__m_HoldingThread], 0
+
+ Retry:
+ mov eax, dword ptr [rcx + OFFSETOF__AwareLock__m_MonitorHeld]
+ lea r10d, [eax - 1]
+ lock cmpxchg dword ptr [rcx + OFFSETOF__AwareLock__m_MonitorHeld], r10d
+ jne RetryHelper
+ test eax, STATE_CHECK
+ jne MustSignal
+
+ MON_EXIT_STATIC_RETURN_SUCCESS
+
+ MustSignal:
+ add rsp, MIN_SIZE
+ ; void JITutil_MonSignal(AwareLock* lock, BYTE* pbLockTaken)
+ jmp JITutil_MonSignal
+
+ RetryHelper:
+ jmp Retry
+
+ LockError:
+ mov rcx, CORINFO_SynchronizationLockException_ASM
+ add rsp, MIN_SIZE
+ ; void JIT_InternalThrow(unsigned exceptNum)
+ jmp JIT_InternalThrow
+NESTED_END JIT_MonExitStatic_Slow, _TEXT
+
+
+ifdef _DEBUG
+
+extern Object__DEBUG_SetAppDomain:proc
+
+;
+; IN: rax: new object needing the AppDomain ID set..
+; OUT: rax, returns original value at entry
+;
+; all integer register state is preserved
+;
+DEBUG_TrialAllocSetAppDomain_STACK_SIZE equ MIN_SIZE + 10h
+NESTED_ENTRY DEBUG_TrialAllocSetAppDomain, _TEXT
+ push_vol_reg rax
+ push_vol_reg rcx
+ push_vol_reg rdx
+ push_vol_reg r8
+ push_vol_reg r9
+ push_vol_reg r10
+ push_vol_reg r11
+ push_nonvol_reg rbx
+ alloc_stack MIN_SIZE
+ END_PROLOGUE
+
+ mov rbx, rax
+
+ ; get the app domain ptr
+ CALL_GETAPPDOMAIN
+
+ ; set the sync block app domain ID
+ mov rcx, rbx
+ mov rdx, rax
+ call Object__DEBUG_SetAppDomain
+
+ ; epilog
+ add rsp, MIN_SIZE
+ pop rbx
+ pop r11
+ pop r10
+ pop r9
+ pop r8
+ pop rdx
+ pop rcx
+ pop rax
+ ret
+NESTED_END DEBUG_TrialAllocSetAppDomain, _TEXT
+
+NESTED_ENTRY DEBUG_TrialAllocSetAppDomain_NoScratchArea, _TEXT
+
+ push_nonvol_reg rbp
+ set_frame rbp, 0
+ END_PROLOGUE
+
+ sub rsp, 20h
+ and rsp, -16
+
+ call DEBUG_TrialAllocSetAppDomain
+
+ lea rsp, [rbp+0]
+ pop rbp
+ ret
+NESTED_END DEBUG_TrialAllocSetAppDomain_NoScratchArea, _TEXT
+
+endif
+
+
+ end
+
diff --git a/src/vm/amd64/PInvokeStubs.asm b/src/vm/amd64/PInvokeStubs.asm
new file mode 100644
index 0000000000..a9cd1c0829
--- /dev/null
+++ b/src/vm/amd64/PInvokeStubs.asm
@@ -0,0 +1,282 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;
+
+;
+; ==--==
+
+include AsmMacros.inc
+include AsmConstants.inc
+
+PInvokeStubForHostWorker equ ?PInvokeStubForHostWorker@@YAXKPEAX0@Z
+extern PInvokeStubForHostWorker:proc
+
+extern GenericPInvokeCalliStubWorker:proc
+extern VarargPInvokeStubWorker:proc
+
+PInvokeStubForHost_CALLEE_SCRATCH_SIZE = 20h
+
+PInvokeStubForHost_STACK_FRAME_SIZE = PInvokeStubForHost_CALLEE_SCRATCH_SIZE
+
+; 4 FP parameter registers
+PInvokeStubForHost_XMM_SAVE_OFFSET = PInvokeStubForHost_STACK_FRAME_SIZE
+PInvokeStubForHost_STACK_FRAME_SIZE = PInvokeStubForHost_STACK_FRAME_SIZE + 40h
+
+; Ensure that the new rsp will be 16-byte aligned.
+if ((PInvokeStubForHost_STACK_FRAME_SIZE + 8) MOD 16) ne 0
+PInvokeStubForHost_STACK_FRAME_SIZE = PInvokeStubForHost_STACK_FRAME_SIZE + 8
+endif
+
+; Return address is immediately above the local variables.
+PInvokeStubForHost_RETURN_ADDRESS_OFFSET = PInvokeStubForHost_STACK_FRAME_SIZE
+PInvokeStubForHost_PARAM_REGISTERS_OFFSET = PInvokeStubForHost_RETURN_ADDRESS_OFFSET + 8
+
+NESTED_ENTRY PInvokeStubForHost, _TEXT
+ alloc_stack PInvokeStubForHost_STACK_FRAME_SIZE
+ END_PROLOGUE
+
+ ; spill args
+ mov [rsp + PInvokeStubForHost_PARAM_REGISTERS_OFFSET + 0h], rcx
+ mov [rsp + PInvokeStubForHost_PARAM_REGISTERS_OFFSET + 8h], rdx
+ mov [rsp + PInvokeStubForHost_PARAM_REGISTERS_OFFSET + 10h], r8
+ mov [rsp + PInvokeStubForHost_PARAM_REGISTERS_OFFSET + 18h], r9
+ movdqa [rsp + PInvokeStubForHost_XMM_SAVE_OFFSET + 0h], xmm0
+ movdqa [rsp + PInvokeStubForHost_XMM_SAVE_OFFSET + 10h], xmm1
+ movdqa [rsp + PInvokeStubForHost_XMM_SAVE_OFFSET + 20h], xmm2
+ movdqa [rsp + PInvokeStubForHost_XMM_SAVE_OFFSET + 30h], xmm3
+
+ ; PInvokeStubForHostWorker(#stack args, stack frame, this)
+ mov r8, rcx
+ mov rcx, r11
+ mov rdx, rsp
+ call PInvokeStubForHostWorker
+
+ ; unspill return value
+ mov rax, [rsp + PInvokeStubForHost_XMM_SAVE_OFFSET + 0h]
+ movdqa xmm0, [rsp + PInvokeStubForHost_XMM_SAVE_OFFSET + 10h]
+
+ add rsp, PInvokeStubForHost_STACK_FRAME_SIZE
+ ret
+NESTED_END PInvokeStubForHost, _TEXT
+
+
+PInvokeStubForHostInner_STACK_FRAME_SIZE = 0
+
+; integer registers saved in prologue
+PInvokeStubForHostInner_NUM_REG_PUSHES = 2
+PInvokeStubForHostInner_STACK_FRAME_SIZE = PInvokeStubForHostInner_STACK_FRAME_SIZE + PInvokeStubForHostInner_NUM_REG_PUSHES*8
+
+; Ensure that the new rsp will be 16-byte aligned.
+if ((PInvokeStubForHostInner_STACK_FRAME_SIZE + 8) MOD 16) ne 0
+PInvokeStubForHostInner_STACK_FRAME_SIZE = PInvokeStubForHostInner_STACK_FRAME_SIZE + 8
+endif
+
+; Return address is immediately above the local variables.
+PInvokeStubForHostInner_RETURN_ADDRESS_OFFSET = PInvokeStubForHostInner_STACK_FRAME_SIZE
+PInvokeStubForHostInner_PARAM_REGISTERS_OFFSET = PInvokeStubForHostInner_RETURN_ADDRESS_OFFSET + 8
+
+PInvokeStubForHostInner_FRAME_OFFSET = PInvokeStubForHost_CALLEE_SCRATCH_SIZE
+
+; RCX - #stack args
+; RDX - PInvokeStubForHost's stack frame
+; R8 - target address
+NESTED_ENTRY PInvokeStubForHostInner, _TEXT
+
+ push_nonvol_reg rbp
+ push_nonvol_reg r12
+ alloc_stack PInvokeStubForHostInner_FRAME_OFFSET + PInvokeStubForHostInner_STACK_FRAME_SIZE - PInvokeStubForHostInner_NUM_REG_PUSHES*8
+ set_frame rbp, PInvokeStubForHostInner_FRAME_OFFSET
+ END_PROLOGUE
+
+ mov r10, r8
+ mov r12, rdx
+
+ test rcx, rcx
+ jnz HandleStackArgs
+
+ ;
+ ; Allocate space for scratch area if there are no stack args.
+ ;
+ sub rsp, PInvokeStubForHost_CALLEE_SCRATCH_SIZE
+
+DoneStackArgs:
+ ; unspill args
+ mov rcx, [r12 + PInvokeStubForHost_PARAM_REGISTERS_OFFSET + 0h]
+ mov rdx, [r12 + PInvokeStubForHost_PARAM_REGISTERS_OFFSET + 8h]
+ mov r8, [r12 + PInvokeStubForHost_PARAM_REGISTERS_OFFSET + 10h]
+ mov r9, [r12 + PInvokeStubForHost_PARAM_REGISTERS_OFFSET + 18h]
+ movdqa xmm0, [r12 + PInvokeStubForHost_XMM_SAVE_OFFSET + 0h]
+ movdqa xmm1, [r12 + PInvokeStubForHost_XMM_SAVE_OFFSET + 10h]
+ movdqa xmm2, [r12 + PInvokeStubForHost_XMM_SAVE_OFFSET + 20h]
+ movdqa xmm3, [r12 + PInvokeStubForHost_XMM_SAVE_OFFSET + 30h]
+
+ call r10
+
+ ; spill return value
+ mov [r12 + PInvokeStubForHost_XMM_SAVE_OFFSET + 0h], rax
+ movdqa [r12 + PInvokeStubForHost_XMM_SAVE_OFFSET + 10h], xmm0
+
+ ; epilogue
+ lea rsp, [rbp + PInvokeStubForHostInner_RETURN_ADDRESS_OFFSET - PInvokeStubForHostInner_NUM_REG_PUSHES*8]
+ pop r12
+ pop rbp
+ ret
+
+; INPUTS:
+; RDX - number of stack bytes
+; R12 - the outer method's frame pointer
+; RSP -
+; RBP -
+;
+HandleStackArgs:
+ ;
+ ; Allocate space for stack parameters + scratch area.
+ ;
+ sub rsp, rcx
+ and rsp, -16
+ sub rsp, PInvokeStubForHost_CALLEE_SCRATCH_SIZE
+
+ ;
+ ; Copy stack parameters
+ ;
+ shr rcx, 3 ; setup count
+
+ mov r8, rdi
+ mov r9, rsi
+
+ lea rdi, [rsp + PInvokeStubForHost_CALLEE_SCRATCH_SIZE] ; rdi -> above callee scratch area
+ lea rsi, [r12 + PInvokeStubForHost_PARAM_REGISTERS_OFFSET + PInvokeStubForHost_CALLEE_SCRATCH_SIZE]
+ rep movsq
+
+ mov rsi, r9 ; restore rsi
+ mov rdi, r8 ; restore rdi
+ jmp DoneStackArgs
+NESTED_END PInvokeStubForHostInner, _TEXT
+
+
+;
+; in:
+; PINVOKE_CALLI_TARGET_REGISTER (r10) = unmanaged target
+; PINVOKE_CALLI_SIGTOKEN_REGNUM (r11) = sig token
+;
+; out:
+; METHODDESC_REGISTER (r10) = unmanaged target
+;
+LEAF_ENTRY GenericPInvokeCalliHelper, _TEXT
+
+ ;
+ ; check for existing IL stub
+ ;
+ mov rax, [PINVOKE_CALLI_SIGTOKEN_REGISTER + OFFSETOF__VASigCookie__pNDirectILStub]
+ test rax, rax
+ jz GenericPInvokeCalliGenILStub
+
+ ;
+ ; We need to distinguish between a MethodDesc* and an unmanaged target in PInvokeStubForHost().
+ ; The way we do this is to shift the managed target to the left by one bit and then set the
+ ; least significant bit to 1. This works because MethodDesc* are always 8-byte aligned.
+ ;
+ shl PINVOKE_CALLI_TARGET_REGISTER, 1
+ or PINVOKE_CALLI_TARGET_REGISTER, 1
+
+ ;
+ ; jump to existing IL stub
+ ;
+ jmp rax
+
+LEAF_END GenericPInvokeCalliHelper, _TEXT
+
+NESTED_ENTRY GenericPInvokeCalliGenILStub, _TEXT
+
+ PROLOG_WITH_TRANSITION_BLOCK
+
+ ;
+ ; save target
+ ;
+ mov r12, METHODDESC_REGISTER
+ mov r13, PINVOKE_CALLI_SIGTOKEN_REGISTER
+
+ ;
+ ; GenericPInvokeCalliStubWorker(TransitionBlock * pTransitionBlock, VASigCookie * pVASigCookie, PCODE pUnmanagedTarget)
+ ;
+ lea rcx, [rsp + __PWTB_TransitionBlock] ; pTransitionBlock*
+ mov rdx, PINVOKE_CALLI_SIGTOKEN_REGISTER ; pVASigCookie
+ mov r8, METHODDESC_REGISTER ; pUnmanagedTarget
+ call GenericPInvokeCalliStubWorker
+
+ ;
+ ; restore target
+ ;
+ mov METHODDESC_REGISTER, r12
+ mov PINVOKE_CALLI_SIGTOKEN_REGISTER, r13
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+ jmp GenericPInvokeCalliHelper
+
+NESTED_END GenericPInvokeCalliGenILStub, _TEXT
+
+LEAF_ENTRY VarargPInvokeStub, _TEXT
+ mov PINVOKE_CALLI_SIGTOKEN_REGISTER, rcx
+ jmp VarargPInvokeStubHelper
+LEAF_END VarargPInvokeStub, _TEXT
+
+LEAF_ENTRY VarargPInvokeStub_RetBuffArg, _TEXT
+ mov PINVOKE_CALLI_SIGTOKEN_REGISTER, rdx
+ jmp VarargPInvokeStubHelper
+LEAF_END VarargPInvokeStub_RetBuffArg, _TEXT
+
+LEAF_ENTRY VarargPInvokeStubHelper, _TEXT
+ ;
+ ; check for existing IL stub
+ ;
+ mov rax, [PINVOKE_CALLI_SIGTOKEN_REGISTER + OFFSETOF__VASigCookie__pNDirectILStub]
+ test rax, rax
+ jz VarargPInvokeGenILStub
+
+ ;
+ ; jump to existing IL stub
+ ;
+ jmp rax
+
+LEAF_END VarargPInvokeStubHelper, _TEXT
+
+;
+; IN: METHODDESC_REGISTER (R10) stub secret param
+; PINVOKE_CALLI_SIGTOKEN_REGISTER (R11) VASigCookie*
+;
+; ASSUMES: we already checked for an existing stub to use
+;
+NESTED_ENTRY VarargPInvokeGenILStub, _TEXT
+
+ PROLOG_WITH_TRANSITION_BLOCK
+
+ ;
+ ; save target
+ ;
+ mov r12, METHODDESC_REGISTER
+ mov r13, PINVOKE_CALLI_SIGTOKEN_REGISTER
+
+ ;
+ ; VarargPInvokeStubWorker(TransitionBlock * pTransitionBlock, VASigCookie *pVASigCookie, MethodDesc *pMD)
+ ;
+ lea rcx, [rsp + __PWTB_TransitionBlock] ; pTransitionBlock*
+ mov rdx, PINVOKE_CALLI_SIGTOKEN_REGISTER ; pVASigCookie
+ mov r8, METHODDESC_REGISTER ; pMD
+ call VarargPInvokeStubWorker
+
+ ;
+ ; restore target
+ ;
+ mov METHODDESC_REGISTER, r12
+ mov PINVOKE_CALLI_SIGTOKEN_REGISTER, r13
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+ jmp VarargPInvokeStubHelper
+
+NESTED_END VarargPInvokeGenILStub, _TEXT
+
+ end
diff --git a/src/vm/amd64/RedirectedHandledJITCase.asm b/src/vm/amd64/RedirectedHandledJITCase.asm
new file mode 100644
index 0000000000..1e393a93b1
--- /dev/null
+++ b/src/vm/amd64/RedirectedHandledJITCase.asm
@@ -0,0 +1,240 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;
+
+;
+; ==--==
+
+include AsmMacros.inc
+include asmconstants.inc
+
+Thread__GetAbortContext equ ?GetAbortContext@Thread@@QEAAPEAU_CONTEXT@@XZ
+
+extern FixContextHandler:proc
+extern LinkFrameAndThrow:proc
+extern GetCurrentSavedRedirectContext:proc
+extern Thread__GetAbortContext:proc
+extern HijackHandler:proc
+extern ThrowControlForThread:proc
+extern FixRedirectContextHandler:proc
+
+;
+; WARNING!! These functions immediately ruin thread unwindability. This is
+; WARNING!! OK as long as there is a mechanism for saving the thread context
+; WARNING!! prior to running these functions as well as a mechanism for
+; WARNING!! restoring the context prior to any stackwalk. This means that
+; WARNING!! we need to ensure that no GC can occur while the stack is
+; WARNING!! unwalkable. This further means that we cannot allow any exception
+; WARNING!! to occure when the stack is unwalkable
+;
+
+
+; If you edit this macro, make sure you update GetCONTEXTFromRedirectedStubStackFrame.
+; This function is used by both the personality routine and the debugger to retrieve the original CONTEXT.
+GenerateRedirectedHandledJITCaseStub macro reason
+
+extern ?RedirectedHandledJITCaseFor&reason&@Thread@@CAXXZ:proc
+
+NESTED_ENTRY RedirectedHandledJITCaseFor&reason&_Stub, _TEXT, FixRedirectContextHandler
+
+ ;
+ ; To aid debugging, we'll fake a call to this function. Allocate an
+ ; extra stack slot that is hidden from the unwind info, where we can
+ ; stuff the "return address". If we wanted to preserve full
+ ; unwindability, we would need to copy all preserved registers.
+ ; Ordinarily, rbp is used for the frame pointer, so since we're only
+ ; interested is debugability, we'll just handle that common case.
+ ;
+
+ push rax ; where to stuff the fake return address
+ push_nonvol_reg rbp ; save interrupted rbp for stack walk
+ alloc_stack 28h ; CONTEXT*, callee scratch area
+ set_frame rbp, 0
+
+.errnz REDIRECTSTUB_ESTABLISHER_OFFSET_RBP, REDIRECTSTUB_ESTABLISHER_OFFSET_RBP has changed - update asm stubs
+
+ END_PROLOGUE
+
+ ;
+ ; Align rsp. rsp must misaligned at entry to any C function.
+ ;
+ and rsp, -16
+
+ ;
+ ; Save a copy of the redirect CONTEXT* in case an exception occurs.
+ ; The personality routine will use this to restore unwindability for
+ ; the exception dispatcher.
+ ;
+ call GetCurrentSavedRedirectContext
+
+ mov [rbp+20h], rax
+.errnz REDIRECTSTUB_RBP_OFFSET_CONTEXT - 20h, REDIRECTSTUB_RBP_OFFSET_CONTEXT has changed - update asm stubs
+
+ ;
+ ; Fetch the interrupted rip and save it as our return address.
+ ;
+ mov rax, [rax + OFFSETOF__CONTEXT__Rip]
+ mov [rbp+30h], rax
+
+ ;
+ ; Call target, which will do whatever we needed to do in the context
+ ; of the target thread, and will RtlRestoreContext when it is done.
+ ;
+ call ?RedirectedHandledJITCaseFor&reason&@Thread@@CAXXZ
+
+ int 3 ; target shouldn't return.
+
+; Put a label here to tell the debugger where the end of this function is.
+PATCH_LABEL RedirectedHandledJITCaseFor&reason&_StubEnd
+
+NESTED_END RedirectedHandledJITCaseFor&reason&_Stub, _TEXT
+
+ endm
+
+
+GenerateRedirectedHandledJITCaseStub GCThreadControl
+GenerateRedirectedHandledJITCaseStub DbgThreadControl
+GenerateRedirectedHandledJITCaseStub UserSuspend
+GenerateRedirectedHandledJITCaseStub YieldTask
+
+ifdef _DEBUG
+ifdef HAVE_GCCOVER
+GenerateRedirectedHandledJITCaseStub GCStress
+endif
+endif
+
+
+; scratch area; padding; GSCookie
+OFFSET_OF_FRAME = SIZEOF_MAX_OUTGOING_ARGUMENT_HOMES + 8 + SIZEOF_GSCookie
+
+; force evaluation to avoid "expression is too complex errors"
+SIZEOF__FaultingExceptionFrame = SIZEOF__FaultingExceptionFrame
+
+GenerateRedirectedStubWithFrame macro STUB, FILTER, TARGET
+
+altentry STUB&_RspAligned
+
+NESTED_ENTRY STUB, _TEXT, FILTER
+
+ ;
+ ; IN: rcx: original IP before redirect
+ ;
+
+ mov rdx, rsp
+
+ ; This push of the return address must not be recorded in the unwind
+ ; info. After this push, unwinding will work.
+ push rcx
+
+ test rsp, 0fh
+ jnz STUB&_FixRsp
+
+STUB&_RspAligned:
+
+ ; Any stack operations hereafter must be recorded in the unwind info, but
+ ; only nonvolatile register locations are needed. Anything else is only
+ ; a "sub rsp, 8" to the unwinder.
+
+ ; m_ctx must be 16-byte aligned
+.errnz (OFFSET_OF_FRAME + SIZEOF__FaultingExceptionFrame) MOD 16
+
+ alloc_stack OFFSET_OF_FRAME + SIZEOF__FaultingExceptionFrame
+
+.errnz THROWSTUB_ESTABLISHER_OFFSET_FaultingExceptionFrame - OFFSET_OF_FRAME, THROWSTUB_ESTABLISHER_OFFSET_FaultingExceptionFrame has changed - update asm stubs
+
+ END_PROLOGUE
+
+ lea rcx, [rsp + OFFSET_OF_FRAME]
+
+ mov dword ptr [rcx], 0 ; Initialize vtbl (it is not strictly necessary)
+ mov dword ptr [rcx + OFFSETOF__FaultingExceptionFrame__m_fFilterExecuted], 0 ; Initialize BOOL for personality routine
+
+ call TARGET
+
+ ; Target should not return.
+ int 3
+
+NESTED_END STUB, _TEXT
+
+; This function is used by the stub above to adjust the stack alignment. The
+; stub can't conditionally push something on the stack because the unwind
+; encodings have no way to express that.
+;
+; CONSIDER: we could move the frame pointer above the FaultingExceptionFrame,
+; and detect the misalignment adjustment in
+; GetFrameFromRedirectedStubStackFrame. This is probably less code and more
+; straightforward.
+LEAF_ENTRY STUB&_FixRsp, _TEXT
+
+ call STUB&_RspAligned
+
+ ; Target should not return.
+ int 3
+
+LEAF_END STUB&_FixRsp, _TEXT
+
+ endm
+
+
+REDIRECT_FOR_THROW_CONTROL_FRAME_SIZE = SIZEOF_MAX_OUTGOING_ARGUMENT_HOMES + 8
+
+NESTED_ENTRY RedirectForThrowControl2, _TEXT
+
+ ; On entry
+ ; rcx -> FaultingExceptionFrame
+ ; rdx -> Original RSP
+
+ alloc_stack REDIRECT_FOR_THROW_CONTROL_FRAME_SIZE
+
+ save_reg_postrsp rcx, REDIRECT_FOR_THROW_CONTROL_FRAME_SIZE + 8h ; FaultingExceptionFrame
+ save_reg_postrsp rdx, REDIRECT_FOR_THROW_CONTROL_FRAME_SIZE + 10h ; Original RSP
+
+ END_PROLOGUE
+
+ ; Fetch rip from a CONTEXT, and store it as our return address.
+ CALL_GETTHREAD
+
+ mov rcx, rax
+ call Thread__GetAbortContext
+
+ mov rax, [rax + OFFSETOF__CONTEXT__Rip]
+ mov rdx, [rsp + REDIRECT_FOR_THROW_CONTROL_FRAME_SIZE + 10h] ; Original RSP
+ mov [rdx - 8], rax
+
+ mov rcx, [rsp + REDIRECT_FOR_THROW_CONTROL_FRAME_SIZE + 8h] ; FaultingExceptionFrame
+ call ThrowControlForThread
+
+ ; ThrowControlForThread doesn't return.
+ int 3
+
+NESTED_END RedirectForThrowControl2, _TEXT
+
+GenerateRedirectedStubWithFrame RedirectForThrowControl, HijackHandler, RedirectForThrowControl2
+
+
+NAKED_THROW_HELPER_FRAME_SIZE = SIZEOF_MAX_OUTGOING_ARGUMENT_HOMES + 8
+
+NESTED_ENTRY NakedThrowHelper2, _TEXT
+
+ ; On entry
+ ; rcx -> FaultingExceptionFrame
+
+ alloc_stack NAKED_THROW_HELPER_FRAME_SIZE
+ END_PROLOGUE
+
+ call LinkFrameAndThrow
+
+ ; LinkFrameAndThrow doesn't return.
+ int 3
+
+NESTED_END NakedThrowHelper2, _TEXT
+
+GenerateRedirectedStubWithFrame NakedThrowHelper, FixContextHandler, NakedThrowHelper2
+
+
+ end
+
diff --git a/src/vm/amd64/RemotingThunksAMD64.asm b/src/vm/amd64/RemotingThunksAMD64.asm
new file mode 100644
index 0000000000..bea067dfdb
--- /dev/null
+++ b/src/vm/amd64/RemotingThunksAMD64.asm
@@ -0,0 +1,304 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+include AsmMacros.inc
+include AsmConstants.inc
+ifdef FEATURE_REMOTING
+
+extern CallDescrWorkerUnwindFrameChainHandler:proc
+
+extern TransparentProxyStubWorker:proc
+
+; Stack frame layout:
+;
+; (stack parameters)
+; ...
+; r9
+; r8
+; rdx
+; rcx <- TPSCC_PARAMS_OFFSET
+; return address <- TPSCC_STACK_FRAME_SIZE
+; r10 <- TPSCC_R10_OFFSET
+; xmm3
+; xmm2
+; xmm1
+; xmm0 <- TPSCC_XMM_SAVE_OFFSET
+; callee's r9
+; callee's r8
+; callee's rdx
+; callee's rcx
+
+TPSCC_XMM_SAVE_OFFSET = 20h
+TPSCC_R10_OFFSET = 60h
+TPSCC_STACK_FRAME_SIZE = 68h
+TPSCC_PARAMS_OFFSET = 70h
+
+TRANSPARENT_PROXY_STUB_PROLOGUE macro
+ alloc_stack TPSCC_STACK_FRAME_SIZE
+
+ save_reg_postrsp r10, TPSCC_R10_OFFSET
+
+ SAVE_ARGUMENT_REGISTERS TPSCC_PARAMS_OFFSET
+ SAVE_FLOAT_ARGUMENT_REGISTERS TPSCC_XMM_SAVE_OFFSET
+
+ END_PROLOGUE
+
+ endm
+
+NESTED_ENTRY TransparentProxyStub, _TEXT, CallDescrWorkerUnwindFrameChainHandler
+
+ TRANSPARENT_PROXY_STUB_PROLOGUE
+
+ ;; rcx: this
+ ;; [rsp]: slot number
+
+ mov rax, [rcx + TransparentProxyObject___stub]
+ mov rcx, [rcx + TransparentProxyObject___stubData]
+ call rax
+
+ RESTORE_ARGUMENT_REGISTERS TPSCC_PARAMS_OFFSET
+ RESTORE_FLOAT_ARGUMENT_REGISTERS TPSCC_XMM_SAVE_OFFSET
+
+ mov r10, [rsp + TPSCC_R10_OFFSET]
+
+ test rax, rax
+ jnz CrossContext
+
+ mov r11, [rcx + TransparentProxyObject___pMT]
+
+ ; Convert the slot number (r10) into the code address (in rax)
+ ; See MethodTable.h for details on vtable layout
+ shr r10, MethodTable_VtableSlotsPerChunkLog2
+ mov rax, [r11 + r10*8 + METHODTABLE_OFFSET_VTABLE]
+
+ mov r10, [rsp + TPSCC_R10_OFFSET] ; Reload the slot
+ and r10, MethodTable_VtableSlotsPerChunk-1
+ mov rax, [rax + r10*8]
+
+ add rsp, TPSCC_STACK_FRAME_SIZE
+ TAILJMP_RAX
+
+CrossContext:
+ add rsp, TPSCC_STACK_FRAME_SIZE
+ jmp TransparentProxyStub_CrossContext
+
+NESTED_END TransparentProxyStub, _TEXT
+
+
+NESTED_ENTRY TransparentProxyStub_CrossContext, _TEXT
+
+ PROLOG_WITH_TRANSITION_BLOCK 8
+
+ ;
+ ; Call TransparentProxyStubWorker.
+ ;
+ lea rcx, [rsp + __PWTB_TransitionBlock] ; pTransitionBlock
+ mov rdx, r10 ; MethodDesc *
+ call TransparentProxyStubWorker
+
+ ; handle FP return values
+
+ lea rcx, [rsp + __PWTB_FloatArgumentRegisters - 8]
+ cmp rax, 4
+ jne @F
+ movss xmm0, real4 ptr [rcx]
+@@:
+ cmp rax, 8
+ jne @F
+ movsd xmm0, real8 ptr [rcx]
+@@:
+ ; load return value
+ mov rax, [rcx]
+
+ EPILOG_WITH_TRANSITION_BLOCK_RETURN
+
+NESTED_END TransparentProxyStub_CrossContext, _TEXT
+
+LEAF_ENTRY TransparentProxyStubPatch, _TEXT
+ ; make sure that the basic block is unique
+ test eax,12
+PATCH_LABEL TransparentProxyStubPatchLabel
+ ret
+LEAF_END TransparentProxyStubPatch, _TEXT
+
+;+----------------------------------------------------------------------------
+;
+; Method: CRemotingServices::CallFieldGetter private
+;
+; Synopsis: Calls the field getter function (Object::__FieldGetter) in
+; managed code by setting up the stack and calling the target
+;
+;+----------------------------------------------------------------------------
+; extern "C"
+;void __stdcall CRemotingServices__CallFieldGetter( MethodDesc *pMD,
+; LPVOID pThis,
+; LPVOID pFirst,
+; LPVOID pSecond,
+; LPVOID pThird
+; )
+LEAF_ENTRY CRemotingServices__CallFieldGetter, _TEXT
+
+; +28 pThird
+; +20 scratch area
+; +18 scratch area
+; +10 scratch area
+; + 8 scratch area
+; rsp return address
+
+ mov METHODDESC_REGISTER, rcx
+ mov rcx, rdx
+ mov rdx, r8
+ mov r8, r9
+ mov r9, [rsp + 28h]
+ jmp TransparentProxyStub
+
+LEAF_END CRemotingServices__CallFieldGetter, _TEXT
+
+
+;+----------------------------------------------------------------------------
+;
+; Method: CRemotingServices::CallFieldSetter private
+;
+; Synopsis: Calls the field setter function (Object::__FieldSetter) in
+; managed code by setting up the stack and calling the target
+;
+;+----------------------------------------------------------------------------
+; extern "C"
+;void __stdcall CRemotingServices__CallFieldSetter( MethodDesc *pMD,
+; LPVOID pThis,
+; LPVOID pFirst,
+; LPVOID pSecond,
+; LPVOID pThird
+; )
+LEAF_ENTRY CRemotingServices__CallFieldSetter, _TEXT
+
+; +28 pThird
+; +20 scratch area
+; +18 scratch area
+; +10 scratch area
+; + 8 scratch area
+; rsp return address
+
+ mov METHODDESC_REGISTER, rcx
+ mov rcx, rdx
+ mov rdx, r8
+ mov r8, r9
+ mov r9, [rsp + 28h]
+ jmp TransparentProxyStub
+
+LEAF_END CRemotingServices__CallFieldSetter, _TEXT
+
+
+;; extern "C" ARG_SLOT __stdcall CTPMethodTable__CallTargetHelper2(const void *pTarget,
+;; LPVOID pvFirst,
+;; LPVOID pvSecond);
+NESTED_ENTRY CTPMethodTable__CallTargetHelper2, _TEXT, CallDescrWorkerUnwindFrameChainHandler
+ alloc_stack 28h ;; alloc callee scratch and align the stack
+ END_PROLOGUE
+
+ mov rax, rcx ; rax <- call target
+ mov rcx, rdx ; rcx <- first arg
+ mov rdx, r8 ; rdx <- second arg
+
+ call rax
+ ;; It is important to have an instruction between the previous call and the epilog.
+ ;; If the return address is in epilog, OS won't call personality routine because
+ ;; it thinks personality routine does not help in this case.
+ nop
+
+ ; epilog
+ add rsp, 28h
+ ret
+NESTED_END CTPMethodTable__CallTargetHelper2, _TEXT
+
+;; extern "C" ARG_SLOT __stdcall CTPMethodTable__CallTargetHelper2(const void *pTarget,
+;; LPVOID pvFirst,
+;; LPVOID pvSecond,
+;; LPVOID pvThird);
+NESTED_ENTRY CTPMethodTable__CallTargetHelper3, _TEXT, CallDescrWorkerUnwindFrameChainHandler
+ alloc_stack 28h ;; alloc callee scratch and align the stack
+ END_PROLOGUE
+
+ mov rax, rcx ; rax <- call target
+ mov rcx, rdx ; rcx <- first arg
+ mov rdx, r8 ; rdx <- second arg
+ mov r8, r9 ; r8 <- third arg
+
+ call rax
+
+ ;; It is important to have an instruction between the previous call and the epilog.
+ ;; If the return address is in epilog, OS won't call personality routine because
+ ;; it thinks personality routine does not help in this case.
+ nop
+
+ ; epilog
+ add rsp, 28h
+ ret
+NESTED_END CTPMethodTable__CallTargetHelper3, _TEXT
+
+NESTED_ENTRY CRemotingServices__DispatchInterfaceCall, _TEXT, CallDescrWorkerUnwindFrameChainHandler
+
+ TRANSPARENT_PROXY_STUB_PROLOGUE
+
+ ;
+ ; 'this' is a TransparentProxy. Call to stub to see if need to cross contexts.
+ ;
+
+ mov rax, [rcx + TransparentProxyObject___stub]
+ mov rcx, [rcx + TransparentProxyObject___stubData]
+ call rax
+
+ test rax, rax
+ jnz CrossContext
+
+extern VSD_GetTargetForTPWorkerQuick:proc
+ mov rcx, [rsp + TPSCC_PARAMS_OFFSET] ; rcx <- this
+ mov rdx, [rsp + TPSCC_R10_OFFSET] ; rdx <- Get the MethodDesc* or slot number
+ call VSD_GetTargetForTPWorkerQuick
+
+ RESTORE_ARGUMENT_REGISTERS TPSCC_PARAMS_OFFSET
+ RESTORE_FLOAT_ARGUMENT_REGISTERS TPSCC_XMM_SAVE_OFFSET
+
+ mov r10, [rsp + TPSCC_R10_OFFSET]
+
+ test rax, rax ; Did we find a target?
+ jz SlowDispatch
+
+ add rsp, TPSCC_STACK_FRAME_SIZE
+ TAILJMP_RAX
+
+SlowDispatch:
+ add rsp, TPSCC_STACK_FRAME_SIZE
+ jmp InContextTPDispatchAsmStub
+
+CrossContext:
+ RESTORE_ARGUMENT_REGISTERS TPSCC_PARAMS_OFFSET
+ RESTORE_FLOAT_ARGUMENT_REGISTERS TPSCC_XMM_SAVE_OFFSET
+
+ mov r10, [rsp + TPSCC_R10_OFFSET]
+
+ add rsp, TPSCC_STACK_FRAME_SIZE
+ jmp TransparentProxyStub_CrossContext
+
+NESTED_END CRemotingServices__DispatchInterfaceCall, _TEXT
+
+NESTED_ENTRY InContextTPDispatchAsmStub, _TEXT
+
+ PROLOG_WITH_TRANSITION_BLOCK
+
+extern VSD_GetTargetForTPWorker:proc
+ lea rcx, [rsp + __PWTB_TransitionBlock] ; pTransitionBlock
+ mov rdx, r10 ; token
+ call VSD_GetTargetForTPWorker
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+ TAILJMP_RAX
+
+NESTED_END InContextTPDispatchAsmStub, _TEXT
+
+endif ; FEATURE_REMOTING
+
+ end
+
diff --git a/src/vm/amd64/ThePreStubAMD64.asm b/src/vm/amd64/ThePreStubAMD64.asm
new file mode 100644
index 0000000000..1fbc9190c9
--- /dev/null
+++ b/src/vm/amd64/ThePreStubAMD64.asm
@@ -0,0 +1,37 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+include <AsmMacros.inc>
+include AsmConstants.inc
+
+ extern PreStubWorker:proc
+ extern ProcessCLRException:proc
+
+NESTED_ENTRY ThePreStub, _TEXT, ProcessCLRException
+
+ PROLOG_WITH_TRANSITION_BLOCK
+
+ ;
+ ; call PreStubWorker
+ ;
+ lea rcx, [rsp + __PWTB_TransitionBlock] ; pTransitionBlock*
+ mov rdx, METHODDESC_REGISTER
+ call PreStubWorker
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+ TAILJMP_RAX
+
+NESTED_END ThePreStub, _TEXT
+
+LEAF_ENTRY ThePreStubPatch, _TEXT
+ ; make sure that the basic block is unique
+ test eax,34
+PATCH_LABEL ThePreStubPatchLabel
+ ret
+LEAF_END ThePreStubPatch, _TEXT
+
+
+
+end
diff --git a/src/vm/amd64/TlsGetters.asm b/src/vm/amd64/TlsGetters.asm
new file mode 100644
index 0000000000..65e49b286f
--- /dev/null
+++ b/src/vm/amd64/TlsGetters.asm
@@ -0,0 +1,121 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;
+
+;
+; ==--==
+; ***********************************************************************
+; File: TlsGetters.asm, see history in jithelp.asm
+;
+; Notes: These TlsGetters (GetAppDomain(), GetThread()) are implemented
+; in a generic fashion, but might be patched at runtime to contain
+; a much faster implementation which goes straight to the TLS for
+; the Thread* or AppDomain*.
+;
+; Note that the macro takes special care to not have these become
+; non-unwindable after the patching has overwritten the prologue of
+; the generic getter.
+; ***********************************************************************
+
+include AsmMacros.inc
+include asmconstants.inc
+
+; Min amount of stack space that a nested function should allocate.
+MIN_SIZE equ 28h
+
+
+; These generic TLS getters are used for GetThread() and GetAppDomain(), they do a little
+; extra work to ensure that certain registers are preserved, those include the following
+; volatile registers
+;
+; rcx
+; rdx
+; r8
+; r9
+; r10
+; r11
+;
+; The return value is in rax as usual
+;
+; They DO NOT save scratch flowing point registers, if you need those you need to save them.
+
+ifdef ENABLE_GET_THREAD_GENERIC_FULL_CHECK
+GetThreadGenericFullCheck equ ?GetThreadGenericFullCheck@@YAPEAVThread@@XZ
+extern GetThreadGenericFullCheck:proc
+endif ; ENABLE_GET_THREAD_GENERIC_FULL_CHECK
+
+; Creates a generic TLS getter using the value from TLS slot gTLSIndex. Set GenerateGetThread
+; when using this macro to generate GetThread, as that will cause special code to be generated which
+; enables additional debug-only checking, such as enforcement of EE_THREAD_NOT_REQUIRED contracts
+GenerateOptimizedTLSGetter macro name, GenerateGetThread
+
+extern g&name&TLSIndex:dword
+extern __imp_TlsGetValue:qword
+
+SIZEOF_PUSHED_ARGS equ 10h
+
+NESTED_ENTRY Get&name&Generic, _TEXT
+ push_vol_reg r10
+ push_vol_reg r11
+ alloc_stack MIN_SIZE
+
+ ; save argument registers in shadow space
+ save_reg_postrsp rcx, MIN_SIZE + 8h + SIZEOF_PUSHED_ARGS
+ save_reg_postrsp rdx, MIN_SIZE + 10h + SIZEOF_PUSHED_ARGS
+ save_reg_postrsp r8, MIN_SIZE + 18h + SIZEOF_PUSHED_ARGS
+ save_reg_postrsp r9, MIN_SIZE + 20h + SIZEOF_PUSHED_ARGS
+ END_PROLOGUE
+
+ifdef _DEBUG
+ cmp dword ptr [g&name&TLSIndex], -1
+ jnz @F
+ int 3
+@@:
+endif ; _DEBUG
+
+CALL_GET_THREAD_GENERIC_FULL_CHECK=0
+
+ifdef ENABLE_GET_THREAD_GENERIC_FULL_CHECK
+if GenerateGetThread
+
+; Generating the GetThread() tlsgetter, and GetThreadGenericFullCheck is
+; defined in C (in threads.cpp). So we'll want to delegate directly to
+; GetThreadGenericFullCheck, which may choose to do additional checking, such
+; as enforcing EE_THREAD_NOT_REQUIRED contracts
+CALL_GET_THREAD_GENERIC_FULL_CHECK=1
+
+endif ; GenerateGetThread
+endif ; ENABLE_GET_THREAD_GENERIC_FULL_CHECK
+
+if CALL_GET_THREAD_GENERIC_FULL_CHECK
+ call GetThreadGenericFullCheck
+else
+ ; Not generating the GetThread() tlsgetter (or there is no GetThreadGenericFullCheck
+ ; to call), so do nothing special--just look up the value stored at TLS slot gTLSIndex
+ mov ecx, [g&name&TLSIndex]
+ call [__imp_TlsGetValue]
+endif
+
+ ; restore arguments from shadow space
+ mov rcx, [rsp + MIN_SIZE + 8h + SIZEOF_PUSHED_ARGS]
+ mov rdx, [rsp + MIN_SIZE + 10h + SIZEOF_PUSHED_ARGS]
+ mov r8, [rsp + MIN_SIZE + 18h + SIZEOF_PUSHED_ARGS]
+ mov r9, [rsp + MIN_SIZE + 20h + SIZEOF_PUSHED_ARGS]
+
+ ; epilog
+ add rsp, MIN_SIZE
+ pop r11
+ pop r10
+ ret
+NESTED_END Get&name&Generic, _TEXT
+
+ endm
+
+GenerateOptimizedTLSGetter Thread, 1
+GenerateOptimizedTLSGetter AppDomain, 0
+
+ end
diff --git a/src/vm/amd64/UMThunkStub.asm b/src/vm/amd64/UMThunkStub.asm
new file mode 100644
index 0000000000..05e25da156
--- /dev/null
+++ b/src/vm/amd64/UMThunkStub.asm
@@ -0,0 +1,609 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;
+
+;
+; ==--==
+
+include <AsmMacros.inc>
+include AsmConstants.inc
+
+ifdef FEATURE_MIXEDMODE
+IJWNOADThunk__MakeCall equ ?MakeCall@IJWNOADThunk@@KAXXZ
+IJWNOADThunk__FindThunkTarget equ ?FindThunkTarget@IJWNOADThunk@@QEAAPEBXXZ
+endif
+gfHostConfig equ ?g_fHostConfig@@3KA
+NDirect__IsHostHookEnabled equ ?IsHostHookEnabled@NDirect@@SAHXZ
+
+extern CreateThreadBlockThrow:proc
+extern TheUMEntryPrestubWorker:proc
+ifdef FEATURE_MIXEDMODE
+extern IJWNOADThunk__FindThunkTarget:proc
+endif
+extern UMEntryPrestubUnwindFrameChainHandler:proc
+extern UMThunkStubUnwindFrameChainHandler:proc
+extern g_TrapReturningThreads:dword
+extern UM2MDoADCallBack:proc
+extern ReverseEnterRuntimeHelper:proc
+extern ReverseLeaveRuntimeHelper:proc
+extern gfHostConfig:dword
+extern NDirect__IsHostHookEnabled:proc
+extern UMThunkStubRareDisableWorker:proc
+
+
+;
+; METHODDESC_REGISTER: UMEntryThunk*
+;
+NESTED_ENTRY TheUMEntryPrestub, _TEXT, UMEntryPrestubUnwindFrameChainHandler
+
+TheUMEntryPrestub_STACK_FRAME_SIZE = SIZEOF_MAX_OUTGOING_ARGUMENT_HOMES
+
+; XMM save area
+TheUMEntryPrestub_XMM_SAVE_OFFSET = TheUMEntryPrestub_STACK_FRAME_SIZE
+TheUMEntryPrestub_STACK_FRAME_SIZE = TheUMEntryPrestub_STACK_FRAME_SIZE + SIZEOF_MAX_FP_ARG_SPILL
+
+; Ensure that the new rsp will be 16-byte aligned. Note that the caller has
+; already pushed the return address.
+if ((TheUMEntryPrestub_STACK_FRAME_SIZE + 8) MOD 16) ne 0
+TheUMEntryPrestub_STACK_FRAME_SIZE = TheUMEntryPrestub_STACK_FRAME_SIZE + 8
+endif
+
+ alloc_stack TheUMEntryPrestub_STACK_FRAME_SIZE
+
+ save_reg_postrsp rcx, TheUMEntryPrestub_STACK_FRAME_SIZE + 8h
+ save_reg_postrsp rdx, TheUMEntryPrestub_STACK_FRAME_SIZE + 10h
+ save_reg_postrsp r8, TheUMEntryPrestub_STACK_FRAME_SIZE + 18h
+ save_reg_postrsp r9, TheUMEntryPrestub_STACK_FRAME_SIZE + 20h
+
+ save_xmm128_postrsp xmm0, TheUMEntryPrestub_XMM_SAVE_OFFSET
+ save_xmm128_postrsp xmm1, TheUMEntryPrestub_XMM_SAVE_OFFSET + 10h
+ save_xmm128_postrsp xmm2, TheUMEntryPrestub_XMM_SAVE_OFFSET + 20h
+ save_xmm128_postrsp xmm3, TheUMEntryPrestub_XMM_SAVE_OFFSET + 30h
+
+ END_PROLOGUE
+
+ ;
+ ; Do prestub-specific stuff
+ ;
+ mov rcx, METHODDESC_REGISTER
+ call TheUMEntryPrestubWorker
+
+ ;
+ ; we're going to tail call to the exec stub that we just setup
+ ;
+
+ mov rcx, [rsp + TheUMEntryPrestub_STACK_FRAME_SIZE + 8h]
+ mov rdx, [rsp + TheUMEntryPrestub_STACK_FRAME_SIZE + 10h]
+ mov r8, [rsp + TheUMEntryPrestub_STACK_FRAME_SIZE + 18h]
+ mov r9, [rsp + TheUMEntryPrestub_STACK_FRAME_SIZE + 20h]
+
+ movdqa xmm0, [rsp + TheUMEntryPrestub_XMM_SAVE_OFFSET]
+ movdqa xmm1, [rsp + TheUMEntryPrestub_XMM_SAVE_OFFSET + 10h]
+ movdqa xmm2, [rsp + TheUMEntryPrestub_XMM_SAVE_OFFSET + 20h]
+ movdqa xmm3, [rsp + TheUMEntryPrestub_XMM_SAVE_OFFSET + 30h]
+
+ ;
+ ; epilogue
+ ;
+ add rsp, TheUMEntryPrestub_STACK_FRAME_SIZE
+ TAILJMP_RAX
+
+NESTED_END TheUMEntryPrestub, _TEXT
+
+
+;
+; METHODDESC_REGISTER: UMEntryThunk*
+;
+NESTED_ENTRY UMThunkStub, _TEXT, UMThunkStubUnwindFrameChainHandler
+
+UMThunkStubAMD64_STACK_FRAME_SIZE = 0
+
+; number of integer registers saved in prologue
+UMThunkStubAMD64_NUM_REG_PUSHES = 2
+UMThunkStubAMD64_STACK_FRAME_SIZE = UMThunkStubAMD64_STACK_FRAME_SIZE + (UMThunkStubAMD64_NUM_REG_PUSHES * 8)
+
+; rare path spill area
+UMThunkStubAMD64_RARE_PATH_SPILL_SIZE = 10h
+UMThunkStubAMD64_STACK_FRAME_SIZE = UMThunkStubAMD64_STACK_FRAME_SIZE + UMThunkStubAMD64_RARE_PATH_SPILL_SIZE
+UMThunkStubAMD64_RARE_PATH_SPILL_NEGOFFSET = UMThunkStubAMD64_STACK_FRAME_SIZE
+
+
+
+; HOST_NOTIFY_FLAG
+UMThunkStubAMD64_STACK_FRAME_SIZE = UMThunkStubAMD64_STACK_FRAME_SIZE + 8
+UMThunkStubAMD64_HOST_NOTIFY_FLAG_NEGOFFSET = UMThunkStubAMD64_STACK_FRAME_SIZE
+
+; XMM save area
+UMThunkStubAMD64_STACK_FRAME_SIZE = UMThunkStubAMD64_STACK_FRAME_SIZE + SIZEOF_MAX_FP_ARG_SPILL
+
+; Ensure that the offset of the XMM save area will be 16-byte aligned.
+if ((UMThunkStubAMD64_STACK_FRAME_SIZE + 8) MOD 16) ne 0 ; +8 for caller-pushed return address
+UMThunkStubAMD64_STACK_FRAME_SIZE = UMThunkStubAMD64_STACK_FRAME_SIZE + 8
+endif
+
+UMThunkStubAMD64_XMM_SAVE_NEGOFFSET = UMThunkStubAMD64_STACK_FRAME_SIZE
+
+; Add in the callee scratch area size.
+UMThunkStubAMD64_CALLEE_SCRATCH_SIZE = SIZEOF_MAX_OUTGOING_ARGUMENT_HOMES
+UMThunkStubAMD64_STACK_FRAME_SIZE = UMThunkStubAMD64_STACK_FRAME_SIZE + UMThunkStubAMD64_CALLEE_SCRATCH_SIZE
+
+; Now we have the full size of the stack frame. The offsets have been computed relative to the
+; top, so negate them to make them relative to the post-prologue rsp.
+UMThunkStubAMD64_FRAME_OFFSET = UMThunkStubAMD64_CALLEE_SCRATCH_SIZE
+UMThunkStubAMD64_RARE_PATH_SPILL_OFFSET = UMThunkStubAMD64_STACK_FRAME_SIZE - UMThunkStubAMD64_FRAME_OFFSET - UMThunkStubAMD64_RARE_PATH_SPILL_NEGOFFSET
+UMThunkStubAMD64_HOST_NOTIFY_FLAG_OFFSET = UMThunkStubAMD64_STACK_FRAME_SIZE - UMThunkStubAMD64_FRAME_OFFSET - UMThunkStubAMD64_HOST_NOTIFY_FLAG_NEGOFFSET
+UMThunkStubAMD64_XMM_SAVE_OFFSET = UMThunkStubAMD64_STACK_FRAME_SIZE - UMThunkStubAMD64_FRAME_OFFSET - UMThunkStubAMD64_XMM_SAVE_NEGOFFSET
+UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET = UMThunkStubAMD64_STACK_FRAME_SIZE + 8 - UMThunkStubAMD64_FRAME_OFFSET ; +8 for return address
+UMThunkStubAMD64_FIXED_STACK_ALLOC_SIZE = UMThunkStubAMD64_STACK_FRAME_SIZE - (UMThunkStubAMD64_NUM_REG_PUSHES * 8)
+
+.errnz UMTHUNKSTUB_HOST_NOTIFY_FLAG_RBPOFFSET - UMThunkStubAMD64_HOST_NOTIFY_FLAG_OFFSET, update UMTHUNKSTUB_HOST_NOTIFY_FLAG_RBPOFFSET
+
+
+;
+; [ callee scratch ] <-- new RSP
+; [ callee scratch ]
+; [ callee scratch ]
+; [ callee scratch ]
+; {optional stack args passed to callee}
+; xmm0 <-- RBP
+; xmm1
+; xmm2
+; xmm3
+; {optional padding to align xmm regs}
+; HOST_NOTIFY_FLAG (needs to make ReverseLeaveRuntime call flag)
+; [rare path spill area]
+; [rare path spill area]
+; rbp save
+; r12 save
+; return address <-- entry RSP
+; [rcx home]
+; [rdx home]
+; [r8 home]
+; [r9 home]
+; stack arg 0
+; stack arg 1
+; ...
+
+ push_nonvol_reg r12
+ push_nonvol_reg rbp ; stack_args
+ alloc_stack UMThunkStubAMD64_FIXED_STACK_ALLOC_SIZE
+ set_frame rbp, UMThunkStubAMD64_FRAME_OFFSET ; stack_args
+ mov byte ptr [rbp + UMThunkStubAMD64_HOST_NOTIFY_FLAG_OFFSET], 0 ; hosted
+ END_PROLOGUE
+
+ ;
+ ; Call GetThread()
+ ;
+ CALL_GETTHREAD ; will not trash r10
+ test rax, rax
+ jz DoThreadSetup
+
+HaveThread:
+
+ mov r12, rax ; r12 <- Thread*
+
+ ;
+ ; disable preemptive GC
+ ;
+ mov dword ptr [r12 + OFFSETOF__Thread__m_fPreemptiveGCDisabled], 1
+
+ ;
+ ; catch returning thread here if a GC is in progress
+ ;
+ cmp [g_TrapReturningThreads], 0
+ jnz DoTrapReturningThreadsTHROW
+
+InCooperativeMode:
+
+ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ test [gfHostConfig], ASM_CLRTASKHOSTED ; inlined NDirect::IsHostHookEnabled ; hosted
+ifdef _DEBUG
+ call IsHostHookEnabledHelper
+ test eax, eax
+endif ; _DEBUG
+ jnz NotifyHost_ReverseEnterRuntime ; hosted
+Done_NotifyHost_ReverseEnterRuntime:
+endif
+
+ mov rax, [r12 + OFFSETOF__Thread__m_pDomain]
+ mov eax, [rax + OFFSETOF__AppDomain__m_dwId]
+
+ mov r11d, [METHODDESC_REGISTER + OFFSETOF__UMEntryThunk__m_dwDomainId]
+
+ cmp rax, r11
+ jne WrongAppDomain
+
+ mov r11, [METHODDESC_REGISTER + OFFSETOF__UMEntryThunk__m_pUMThunkMarshInfo]
+ mov eax, [r11 + OFFSETOF__UMThunkMarshInfo__m_cbActualArgSize] ; stack_args
+ test rax, rax ; stack_args
+ jnz CopyStackArgs ; stack_args
+
+ArgumentsSetup:
+
+ mov rax, [r11 + OFFSETOF__UMThunkMarshInfo__m_pILStub] ; rax <- Stub*
+ call rax
+
+PostCall:
+ ;
+ ; enable preemptive GC
+ ;
+ mov dword ptr [r12 + OFFSETOF__Thread__m_fPreemptiveGCDisabled], 0
+
+ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ cmp byte ptr [rbp + UMThunkStubAMD64_HOST_NOTIFY_FLAG_OFFSET], 0 ; hosted
+ jnz NotifyHost_ReverseLeaveRuntime ; hosted
+Done_NotifyHost_ReverseLeaveRuntime:
+endif
+
+ ; epilog
+ lea rsp, [rbp - UMThunkStubAMD64_FRAME_OFFSET + UMThunkStubAMD64_FIXED_STACK_ALLOC_SIZE]
+ pop rbp ; stack_args
+ pop r12
+ ret
+
+
+DoThreadSetup:
+ mov [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h], rcx
+ mov [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 8h], rdx
+ mov [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 10h], r8
+ mov [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 18h], r9
+
+ ; @CONSIDER: mark UMEntryThunks that have FP params and only save/restore xmm regs on those calls
+ ; initial measurements indidcate that this could be worth about a 5% savings in reverse
+ ; pinvoke overhead.
+ movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0
+ movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h], xmm1
+ movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h], xmm2
+ movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h], xmm3
+
+ mov [rbp + UMThunkStubAMD64_RARE_PATH_SPILL_OFFSET], METHODDESC_REGISTER
+ call CreateThreadBlockThrow
+ mov METHODDESC_REGISTER, [rbp + UMThunkStubAMD64_RARE_PATH_SPILL_OFFSET]
+
+ mov rcx, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h]
+ mov rdx, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 8h]
+ mov r8, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 10h]
+ mov r9, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 18h]
+
+ ; @CONSIDER: mark UMEntryThunks that have FP params and only save/restore xmm regs on those calls
+ movdqa xmm0, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h]
+ movdqa xmm1, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h]
+ movdqa xmm2, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h]
+ movdqa xmm3, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h]
+
+ jmp HaveThread
+
+DoTrapReturningThreadsTHROW:
+
+ mov [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h], rcx
+ mov [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 8h], rdx
+ mov [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 10h], r8
+ mov [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 18h], r9
+
+ ; @CONSIDER: mark UMEntryThunks that have FP params and only save/restore xmm regs on those calls
+ ; initial measurements indidcate that this could be worth about a 5% savings in reverse
+ ; pinvoke overhead.
+ movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0
+ movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h], xmm1
+ movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h], xmm2
+ movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h], xmm3
+
+ mov [rbp + UMThunkStubAMD64_RARE_PATH_SPILL_OFFSET], METHODDESC_REGISTER
+ mov rcx, r12 ; Thread* pThread
+ mov rdx, METHODDESC_REGISTER ; UMEntryThunk* pUMEntry
+ call UMThunkStubRareDisableWorker
+ mov METHODDESC_REGISTER, [rbp + UMThunkStubAMD64_RARE_PATH_SPILL_OFFSET]
+
+ mov rcx, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h]
+ mov rdx, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 8h]
+ mov r8, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 10h]
+ mov r9, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 18h]
+
+ ; @CONSIDER: mark UMEntryThunks that have FP params and only save/restore xmm regs on those calls
+ movdqa xmm0, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h]
+ movdqa xmm1, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h]
+ movdqa xmm2, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h]
+ movdqa xmm3, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h]
+
+ jmp InCooperativeMode
+
+CopyStackArgs:
+ ; rax = cbStackArgs (with 20h for register args subtracted out already)
+
+ sub rsp, rax
+ and rsp, -16
+
+ mov [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h], rcx
+ mov [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 8h], rdx
+ mov [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 10h], r8
+
+ ; rax = number of bytes
+
+ lea rcx, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + SIZEOF_MAX_OUTGOING_ARGUMENT_HOMES]
+ lea rdx, [rsp + UMThunkStubAMD64_CALLEE_SCRATCH_SIZE]
+
+CopyLoop:
+ ; rax = number of bytes
+ ; rcx = src
+ ; rdx = dest
+ ; r8 = sratch
+
+ add rax, -8
+ mov r8, [rcx + rax]
+ mov [rdx + rax], r8
+ jnz CopyLoop
+
+ mov rcx, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h]
+ mov rdx, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 8h]
+ mov r8, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 10h]
+
+ jmp ArgumentsSetup
+
+ifdef FEATURE_INCLUDE_ALL_INTERFACES
+NotifyHost_ReverseEnterRuntime:
+ mov [rbp + UMThunkStubAMD64_RARE_PATH_SPILL_OFFSET], METHODDESC_REGISTER
+
+ mov [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h], rcx
+ mov [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 8h], rdx
+ mov [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 10h], r8
+ mov [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 18h], r9
+
+ ; @CONSIDER: mark UMEntryThunks that have FP params and only save/restore xmm regs on those calls
+ ; initial measurements indidcate that this could be worth about a 5% savings in reverse
+ ; pinvoke overhead.
+ movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0
+ movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h], xmm1
+ movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h], xmm2
+ movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h], xmm3
+
+ mov rcx, r12
+ call ReverseEnterRuntimeHelper
+ mov byte ptr [rbp + UMThunkStubAMD64_HOST_NOTIFY_FLAG_OFFSET], 1
+
+ mov rcx, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h]
+ mov rdx, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 8h]
+ mov r8, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 10h]
+ mov r9, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 18h]
+
+ ; @CONSIDER: mark UMEntryThunks that have FP params and only save/restore xmm regs on those calls
+ movdqa xmm0, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h]
+ movdqa xmm1, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h]
+ movdqa xmm2, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h]
+ movdqa xmm3, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h]
+
+ mov METHODDESC_REGISTER, [rbp + UMThunkStubAMD64_RARE_PATH_SPILL_OFFSET]
+
+ jmp Done_NotifyHost_ReverseEnterRuntime
+
+NotifyHost_ReverseLeaveRuntime:
+
+ ; save rax, xmm0
+ mov [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h], rax
+ movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0
+
+ mov rcx, r12
+ call ReverseLeaveRuntimeHelper
+ mov byte ptr [rbp + UMThunkStubAMD64_HOST_NOTIFY_FLAG_OFFSET], 0
+
+ ; restore rax, xmm0
+ mov rax, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h]
+ movdqa xmm0, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h]
+
+ jmp Done_NotifyHost_ReverseLeaveRuntime
+endif
+
+WrongAppDomain:
+ ;
+ ; home register args to the stack
+ ;
+ mov [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h], rcx
+ mov [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 8h], rdx
+ mov [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 10h], r8
+ mov [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 18h], r9
+
+ ;
+ ; save off xmm registers
+ ;
+ movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h], xmm0
+ movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 10h], xmm1
+ movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 20h], xmm2
+ movdqa [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 30h], xmm3
+
+ ;
+ ; call our helper to perform the AD transtion
+ ;
+ mov rcx, METHODDESC_REGISTER
+ lea r8, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET]
+ mov rax, [METHODDESC_REGISTER + OFFSETOF__UMEntryThunk__m_pUMThunkMarshInfo]
+ mov r9d, [rax + OFFSETOF__UMThunkMarshInfo__m_cbActualArgSize]
+ call UM2MDoADCallBack
+
+ ; restore return value
+ mov rax, [rbp + UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h]
+ movdqa xmm0, [rbp + UMThunkStubAMD64_XMM_SAVE_OFFSET + 0h]
+
+ jmp PostCall
+
+NESTED_END UMThunkStub, _TEXT
+
+;
+; EXTERN_C void __stdcall UM2MThunk_WrapperHelper(
+; void *pThunkArgs, ; rcx
+; int argLen, ; rdx
+; void *pAddr, ; r8 // not used
+; UMEntryThunk *pEntryThunk, ; r9
+; Thread *pThread); ; [entry_sp + 28h]
+;
+NESTED_ENTRY UM2MThunk_WrapperHelper, _TEXT
+
+
+UM2MThunk_WrapperHelper_STACK_FRAME_SIZE = 0
+
+; number of integer registers saved in prologue
+UM2MThunk_WrapperHelper_NUM_REG_PUSHES = 3
+UM2MThunk_WrapperHelper_STACK_FRAME_SIZE = UM2MThunk_WrapperHelper_STACK_FRAME_SIZE + (UM2MThunk_WrapperHelper_NUM_REG_PUSHES * 8)
+
+UM2MThunk_WrapperHelper_CALLEE_SCRATCH_SIZE = SIZEOF_MAX_OUTGOING_ARGUMENT_HOMES
+UM2MThunk_WrapperHelper_STACK_FRAME_SIZE = UM2MThunk_WrapperHelper_STACK_FRAME_SIZE + UM2MThunk_WrapperHelper_CALLEE_SCRATCH_SIZE
+
+; Ensure that rsp remains 16-byte aligned
+if ((UM2MThunk_WrapperHelper_STACK_FRAME_SIZE + 8) MOD 16) ne 0 ; +8 for caller-pushed return address
+UM2MThunk_WrapperHelper_STACK_FRAME_SIZE = UM2MThunk_WrapperHelper_STACK_FRAME_SIZE + 8
+endif
+
+UM2MThunk_WrapperHelper_FRAME_OFFSET = UM2MThunk_WrapperHelper_CALLEE_SCRATCH_SIZE
+UM2MThunk_WrapperHelper_FIXED_STACK_ALLOC_SIZE = UM2MThunk_WrapperHelper_STACK_FRAME_SIZE - (UM2MThunk_WrapperHelper_NUM_REG_PUSHES * 8)
+
+ push_nonvol_reg rsi
+ push_nonvol_reg rdi
+ push_nonvol_reg rbp
+ alloc_stack UM2MThunk_WrapperHelper_FIXED_STACK_ALLOC_SIZE
+ set_frame rbp, UM2MThunk_WrapperHelper_FRAME_OFFSET
+ END_PROLOGUE
+
+ ;
+ ; We are in cooperative mode and in the correct domain.
+ ; The host has also been notified that we've entered the
+ ; runtime. All we have left to do is to copy the stack,
+ ; setup the register args and then call the managed target
+ ;
+
+ test rdx, rdx
+ jg CopyStackArgs
+
+ArgumentsSetup:
+ mov METHODDESC_REGISTER, r9
+
+ mov rsi, rcx ; rsi <- pThunkArgs
+ mov rcx, [rsi + 0h]
+ mov rdx, [rsi + 8h]
+ mov r8, [rsi + 10h]
+ mov r9, [rsi + 18h]
+
+ movdqa xmm0, [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h]
+ movdqa xmm1, [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 10h]
+ movdqa xmm2, [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 20h]
+ movdqa xmm3, [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 30h]
+
+ mov rax, [METHODDESC_REGISTER + OFFSETOF__UMEntryThunk__m_pUMThunkMarshInfo] ; rax <- UMThunkMarshInfo*
+ mov rax, [rax + OFFSETOF__UMThunkMarshInfo__m_pILStub] ; rax <- Stub*
+ call rax
+
+ ; make sure we don't trash the return value
+ mov [rsi + 0h], rax
+ movdqa [rsi + UMThunkStubAMD64_XMM_SAVE_OFFSET - UMThunkStubAMD64_ARGUMENTS_STACK_HOME_OFFSET + 0h], xmm0
+
+ lea rsp, [rbp - UM2MThunk_WrapperHelper_FRAME_OFFSET + UM2MThunk_WrapperHelper_FIXED_STACK_ALLOC_SIZE]
+ pop rbp
+ pop rdi
+ pop rsi
+ ret
+
+
+CopyStackArgs:
+ ; rdx = cbStackArgs (with 20h for register args subtracted out already)
+ ; rcx = pSrcArgStack
+
+ sub rsp, rdx
+ and rsp, -16
+
+ mov r8, rcx
+
+ lea rsi, [rcx + SIZEOF_MAX_OUTGOING_ARGUMENT_HOMES]
+ lea rdi, [rsp + UM2MThunk_WrapperHelper_CALLEE_SCRATCH_SIZE]
+
+ mov rcx, rdx
+ shr rcx, 3
+
+ rep movsq
+
+ mov rcx, r8
+
+ jmp ArgumentsSetup
+
+NESTED_END UM2MThunk_WrapperHelper, _TEXT
+
+ifdef _DEBUG
+ifdef FEATURE_INCLUDE_ALL_INTERFACES
+
+NESTED_ENTRY IsHostHookEnabledHelper, _TEXT
+
+ push_nonvol_reg rcx
+ push_nonvol_reg rdx
+ push_nonvol_reg r8
+ push_nonvol_reg r9
+ push_nonvol_reg r10
+
+IsHostHookEnabledHelper_FIXED_STACK_ALLOC_SIZE = 20h + 40h
+
+ alloc_stack IsHostHookEnabledHelper_FIXED_STACK_ALLOC_SIZE
+
+ END_PROLOGUE
+
+ movdqa [rsp + 20h + 0h], xmm0
+ movdqa [rsp + 20h + 10h], xmm1
+ movdqa [rsp + 20h + 20h], xmm2
+ movdqa [rsp + 20h + 30h], xmm3
+
+ call NDirect__IsHostHookEnabled
+
+ movdqa xmm0, [rsp + 20h + 0h]
+ movdqa xmm1, [rsp + 20h + 10h]
+ movdqa xmm2, [rsp + 20h + 20h]
+ movdqa xmm3, [rsp + 20h + 30h]
+
+ ; epilog
+ add rsp, IsHostHookEnabledHelper_FIXED_STACK_ALLOC_SIZE
+ pop r10
+ pop r9
+ pop r8
+ pop rdx
+ pop rcx
+ ret
+NESTED_END IsHostHookEnabledHelper, _TEXT
+
+endif ; FEATURE_INCLUDE_ALL_INTERFACES
+endif ; _DEBUG
+
+ifdef FEATURE_MIXEDMODE
+NESTED_ENTRY IJWNOADThunk__MakeCall, _TEXT
+ ; METHODDESC_REGISTER = IJWNOADThunk*
+
+ alloc_stack 68h
+
+ save_reg_postrsp rcx, 70h
+ save_reg_postrsp rdx, 78h
+ save_reg_postrsp r8, 80h
+ save_reg_postrsp r9, 88h
+
+ save_xmm128_postrsp xmm0, 20h
+ save_xmm128_postrsp xmm1, 30h
+ save_xmm128_postrsp xmm2, 40h
+ save_xmm128_postrsp xmm3, 50h
+ END_PROLOGUE
+
+ mov rcx, METHODDESC_REGISTER
+ call IJWNOADThunk__FindThunkTarget
+
+ movdqa xmm0, [rsp + 20h]
+ movdqa xmm1, [rsp + 30h]
+ movdqa xmm2, [rsp + 40h]
+ movdqa xmm3, [rsp + 50h]
+
+ mov rcx, [rsp + 70h]
+ mov rdx, [rsp + 78h]
+ mov r8, [rsp + 80h]
+ mov r9 , [rsp + 88h]
+
+ ; The target is in rax
+ add rsp, 68h
+ TAILJMP_RAX
+NESTED_END IJWNOADThunk__MakeCall, _TEXT
+endif ; FEATURE_MIXEDMODE
+
+ end
+
diff --git a/src/vm/amd64/VirtualCallStubAMD64.asm b/src/vm/amd64/VirtualCallStubAMD64.asm
new file mode 100644
index 0000000000..6c97e53d68
--- /dev/null
+++ b/src/vm/amd64/VirtualCallStubAMD64.asm
@@ -0,0 +1,110 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+include <AsmMacros.inc>
+include AsmConstants.inc
+
+CHAIN_SUCCESS_COUNTER equ ?g_dispatch_cache_chain_success_counter@@3_KA
+
+ extern VSD_ResolveWorker:proc
+ extern CHAIN_SUCCESS_COUNTER:dword
+
+ extern StubDispatchFixupWorker:proc
+ extern ProcessCLRException:proc
+
+BACKPATCH_FLAG equ 1 ;; Also known as SDF_ResolveBackPatch in the EE
+PROMOTE_CHAIN_FLAG equ 2 ;; Also known as SDF_ResolvePromoteChain in the EE
+INITIAL_SUCCESS_COUNT equ 100h
+
+;; On Input:
+;; r11 contains the address of the indirection cell (with the flags in the low bits)
+;; [rsp+0] m_Datum: contains the dispatch token (slot number or MethodDesc) for the target
+;; or the ResolveCacheElem when r11 has the PROMOTE_CHAIN_FLAG set
+;; [rsp+8] m_ReturnAddress: contains the return address of caller to stub
+
+NESTED_ENTRY ResolveWorkerAsmStub, _TEXT
+
+ PROLOG_WITH_TRANSITION_BLOCK 0, 8, r8
+
+ ; token stored in r8 by prolog
+
+ lea rcx, [rsp + __PWTB_TransitionBlock] ; pTransitionBlock
+ mov rdx, r11 ; indirection cell + flags
+ mov r9, rdx
+ and r9, 7 ; flags
+ sub rdx, r9 ; indirection cell
+
+ call VSD_ResolveWorker
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+ TAILJMP_RAX
+
+NESTED_END ResolveWorkerAsmStub, _TEXT
+
+;; extern void ResolveWorkerChainLookupAsmStub()
+LEAF_ENTRY ResolveWorkerChainLookupAsmStub, _TEXT
+;; This will perform a quick chained lookup of the entry if the initial cache lookup fails
+;; On Input:
+;; rdx contains our type (MethodTable)
+;; r10 contains our contract (DispatchToken)
+;; r11 contains the address of the indirection (and the flags in the low two bits)
+;; [rsp+0x00] contains the pointer to the ResolveCacheElem
+;; [rsp+0x08] contains the saved value of rdx
+;; [rsp+0x10] contains the return address of caller to stub
+;;
+ mov rax, BACKPATCH_FLAG ;; First we check if r11 has the BACKPATCH_FLAG set
+ and rax, r11 ;; Set the flags based on (BACKPATCH_FLAG and r11)
+ pop rax ;; pop the pointer to the ResolveCacheElem from the top of stack (leaving the flags unchanged)
+ jnz Fail ;; If the BACKPATCH_FLAGS is set we will go directly to the ResolveWorkerAsmStub
+
+MainLoop:
+ mov rax, [rax+18h] ;; get the next entry in the chain (don't bother checking the first entry again)
+ test rax,rax ;; test if we hit a terminating NULL
+ jz Fail
+
+ cmp rdx, [rax+00h] ;; compare our MT with the one in the ResolveCacheElem
+ jne MainLoop
+ cmp r10, [rax+08h] ;; compare our DispatchToken with one in the ResolveCacheElem
+ jne MainLoop
+Success:
+ sub [CHAIN_SUCCESS_COUNTER],1 ;; decrement success counter
+ jl Promote
+ mov rax, [rax+10h] ;; get the ImplTarget
+ pop rdx
+ jmp rax
+
+Promote: ;; Move this entry to head postion of the chain
+ ;; be quick to reset the counter so we don't get a bunch of contending threads
+ mov [CHAIN_SUCCESS_COUNTER], INITIAL_SUCCESS_COUNT
+ or r11, PROMOTE_CHAIN_FLAG
+ mov r10, rax ;; We pass the ResolveCacheElem to ResolveWorkerAsmStub instead of the DispatchToken
+Fail:
+ pop rdx ;; Restore the original saved rdx value
+ push r10 ;; pass the DispatchToken or ResolveCacheElem to promote to ResolveWorkerAsmStub
+
+ jmp ResolveWorkerAsmStub
+
+LEAF_END ResolveWorkerChainLookupAsmStub, _TEXT
+
+
+NESTED_ENTRY StubDispatchFixupStub, _TEXT, ProcessCLRException
+
+ PROLOG_WITH_TRANSITION_BLOCK
+
+ lea rcx, [rsp + __PWTB_TransitionBlock] ; pTransitionBlock
+ mov rdx, r11 ; indirection cell address
+
+ mov r8,0 ; sectionIndex
+ mov r9,0 ; pModule
+
+ call StubDispatchFixupWorker
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+PATCH_LABEL StubDispatchFixupPatchLabel
+ TAILJMP_RAX
+
+NESTED_END StubDispatchFixupStub, _TEXT
+
+ end
diff --git a/src/vm/amd64/asmconstants.h b/src/vm/amd64/asmconstants.h
new file mode 100644
index 0000000000..d7e872cd41
--- /dev/null
+++ b/src/vm/amd64/asmconstants.h
@@ -0,0 +1,747 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// See makefile.inc. During the build, this file is converted into a .inc
+// file for inclusion by .asm files. The #defines are converted into EQU's.
+//
+// Allow multiple inclusion.
+
+
+#ifndef _TARGET_AMD64_
+#error this file should only be used on an AMD64 platform
+#endif // _TARGET_AMD64_
+
+#include "../../inc/switches.h"
+
+#ifndef ASMCONSTANTS_C_ASSERT
+#define ASMCONSTANTS_C_ASSERT(cond)
+#endif
+
+#ifndef ASMCONSTANTS_RUNTIME_ASSERT
+#define ASMCONSTANTS_RUNTIME_ASSERT(cond)
+#endif
+
+
+// Some contants are different in _DEBUG builds. This macro factors out
+// ifdefs from below.
+#ifdef _DEBUG
+#define DBG_FRE(dbg,fre) dbg
+#else
+#define DBG_FRE(dbg,fre) fre
+#endif
+
+#define DynamicHelperFrameFlags_Default 0
+#define DynamicHelperFrameFlags_ObjectArg 1
+#define DynamicHelperFrameFlags_ObjectArg2 2
+
+#define ASMCONSTANT_OFFSETOF_ASSERT(struct, member) \
+ASMCONSTANTS_C_ASSERT(OFFSETOF__##struct##__##member == offsetof(struct, member));
+
+#define ASMCONSTANT_SIZEOF_ASSERT(classname) \
+ASMCONSTANTS_C_ASSERT(SIZEOF__##classname == sizeof(classname));
+
+#define ASM_ELEMENT_TYPE_R4 0xC
+ASMCONSTANTS_C_ASSERT(ASM_ELEMENT_TYPE_R4 == ELEMENT_TYPE_R4);
+
+#define ASM_ELEMENT_TYPE_R8 0xD
+ASMCONSTANTS_C_ASSERT(ASM_ELEMENT_TYPE_R8 == ELEMENT_TYPE_R8);
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+#define ASM_CLRTASKHOSTED 0x2
+ASMCONSTANTS_C_ASSERT(ASM_CLRTASKHOSTED == CLRTASKHOSTED);
+#endif
+
+#define METHODDESC_REGNUM 10
+#define METHODDESC_REGISTER r10
+
+#define PINVOKE_CALLI_TARGET_REGNUM 10
+#define PINVOKE_CALLI_TARGET_REGISTER r10
+
+#define PINVOKE_CALLI_SIGTOKEN_REGNUM 11
+#define PINVOKE_CALLI_SIGTOKEN_REGISTER r11
+
+// rcx, rdx, r8, r9
+#define SIZEOF_MAX_OUTGOING_ARGUMENT_HOMES 0x20
+
+// xmm0...xmm3
+#define SIZEOF_MAX_FP_ARG_SPILL 0x40
+
+#ifndef UNIX_AMD64_ABI
+#define SIZEOF_CalleeSavedRegisters 0x40
+ASMCONSTANTS_C_ASSERT(SIZEOF_CalleeSavedRegisters == sizeof(CalleeSavedRegisters));
+#else
+#define SIZEOF_CalleeSavedRegisters 0x30
+ASMCONSTANTS_C_ASSERT(SIZEOF_CalleeSavedRegisters == sizeof(CalleeSavedRegisters));
+#endif
+
+#define SIZEOF_GSCookie 0x8
+ASMCONSTANTS_C_ASSERT(SIZEOF_GSCookie == sizeof(GSCookie));
+
+#define OFFSETOF__Frame____VFN_table 0
+
+#define OFFSETOF__Frame__m_Next 0x8
+ASMCONSTANTS_C_ASSERT(OFFSETOF__Frame__m_Next
+ == offsetof(Frame, m_Next));
+
+#define SIZEOF__Frame 0x10
+
+#ifdef FEATURE_COMINTEROP
+#define SIZEOF__ComPrestubMethodFrame 0x20
+ASMCONSTANTS_C_ASSERT(SIZEOF__ComPrestubMethodFrame
+ == sizeof(ComPrestubMethodFrame));
+
+#define SIZEOF__ComMethodFrame 0x20
+ASMCONSTANTS_C_ASSERT(SIZEOF__ComMethodFrame
+ == sizeof(ComMethodFrame));
+#endif // FEATURE_COMINTEROP
+
+#define OFFSETOF__UMEntryThunk__m_pUMThunkMarshInfo 0x18
+ASMCONSTANTS_C_ASSERT(OFFSETOF__UMEntryThunk__m_pUMThunkMarshInfo
+ == offsetof(UMEntryThunk, m_pUMThunkMarshInfo));
+
+#define OFFSETOF__UMEntryThunk__m_dwDomainId 0x20
+ASMCONSTANTS_C_ASSERT(OFFSETOF__UMEntryThunk__m_dwDomainId
+ == offsetof(UMEntryThunk, m_dwDomainId));
+
+#define OFFSETOF__UMThunkMarshInfo__m_pILStub 0x00
+ASMCONSTANTS_C_ASSERT(OFFSETOF__UMThunkMarshInfo__m_pILStub
+ == offsetof(UMThunkMarshInfo, m_pILStub));
+
+#define OFFSETOF__UMThunkMarshInfo__m_cbActualArgSize 0x08
+ASMCONSTANTS_C_ASSERT(OFFSETOF__UMThunkMarshInfo__m_cbActualArgSize
+ == offsetof(UMThunkMarshInfo, m_cbActualArgSize));
+
+#ifdef FEATURE_COMINTEROP
+
+#define OFFSETOF__ComPlusCallMethodDesc__m_pComPlusCallInfo DBG_FRE(0x30, 0x08)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__ComPlusCallMethodDesc__m_pComPlusCallInfo
+ == offsetof(ComPlusCallMethodDesc, m_pComPlusCallInfo));
+
+#define OFFSETOF__ComPlusCallInfo__m_pILStub 0x0
+ASMCONSTANTS_C_ASSERT(OFFSETOF__ComPlusCallInfo__m_pILStub
+ == offsetof(ComPlusCallInfo, m_pILStub));
+
+#endif // FEATURE_COMINTEROP
+
+#define OFFSETOF__Thread__m_fPreemptiveGCDisabled 0x0C
+#ifndef CROSSGEN_COMPILE
+ASMCONSTANTS_C_ASSERT(OFFSETOF__Thread__m_fPreemptiveGCDisabled
+ == offsetof(Thread, m_fPreemptiveGCDisabled));
+#endif
+#define Thread_m_fPreemptiveGCDisabled OFFSETOF__Thread__m_fPreemptiveGCDisabled
+
+#define OFFSETOF__Thread__m_pFrame 0x10
+#ifndef CROSSGEN_COMPILE
+ASMCONSTANTS_C_ASSERT(OFFSETOF__Thread__m_pFrame
+ == offsetof(Thread, m_pFrame));
+#endif
+#define Thread_m_pFrame OFFSETOF__Thread__m_pFrame
+
+#ifndef CROSSGEN_COMPILE
+#define OFFSETOF__Thread__m_State 0x8
+ASMCONSTANTS_C_ASSERT(OFFSETOF__Thread__m_State
+ == offsetof(Thread, m_State));
+
+#define OFFSETOF__Thread__m_pDomain 0x20
+ASMCONSTANTS_C_ASSERT(OFFSETOF__Thread__m_pDomain
+ == offsetof(Thread, m_pDomain));
+
+#define OFFSETOF__Thread__m_dwLockCount 0x28
+ASMCONSTANTS_C_ASSERT(OFFSETOF__Thread__m_dwLockCount
+ == offsetof(Thread, m_dwLockCount));
+
+#define OFFSETOF__Thread__m_ThreadId 0x2C
+ASMCONSTANTS_C_ASSERT(OFFSETOF__Thread__m_ThreadId
+ == offsetof(Thread, m_ThreadId));
+
+#define OFFSET__Thread__m_alloc_context__alloc_ptr 0x60
+ASMCONSTANTS_C_ASSERT(OFFSET__Thread__m_alloc_context__alloc_ptr == offsetof(Thread, m_alloc_context) + offsetof(alloc_context, alloc_ptr));
+
+#define OFFSET__Thread__m_alloc_context__alloc_limit 0x68
+ASMCONSTANTS_C_ASSERT(OFFSET__Thread__m_alloc_context__alloc_limit == offsetof(Thread, m_alloc_context) + offsetof(alloc_context, alloc_limit));
+
+#define OFFSETOF__ThreadExceptionState__m_pCurrentTracker 0x000
+ASMCONSTANTS_C_ASSERT(OFFSETOF__ThreadExceptionState__m_pCurrentTracker
+ == offsetof(ThreadExceptionState, m_pCurrentTracker));
+
+#define THREAD_CATCHATSAFEPOINT_BITS 0x5F
+ASMCONSTANTS_C_ASSERT(THREAD_CATCHATSAFEPOINT_BITS == Thread::TS_CatchAtSafePoint);
+#endif // CROSSGEN_COMPILE
+
+
+#ifdef FEATURE_REMOTING
+#define TransparentProxyObject___stubData 0x10
+ASMCONSTANTS_C_ASSERT(TransparentProxyObject___stubData == offsetof(TransparentProxyObject, _stubData))
+
+#define TransparentProxyObject___stub 0x28
+ASMCONSTANTS_C_ASSERT(TransparentProxyObject___stub == offsetof(TransparentProxyObject, _stub))
+
+#define TransparentProxyObject___pMT 0x18
+ASMCONSTANTS_C_ASSERT(TransparentProxyObject___pMT == offsetof(TransparentProxyObject, _pMT))
+#endif // FEATURE_REMOTING
+
+#define OFFSETOF__NDirectMethodDesc__m_pWriteableData DBG_FRE(0x48, 0x20)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__NDirectMethodDesc__m_pWriteableData == offsetof(NDirectMethodDesc, ndirect.m_pWriteableData));
+
+#define OFFSETOF__ObjHeader__SyncBlkIndex 0x4
+ASMCONSTANTS_C_ASSERT(OFFSETOF__ObjHeader__SyncBlkIndex
+ == (sizeof(ObjHeader) - offsetof(ObjHeader, m_SyncBlockValue)));
+
+#define SIZEOF__SyncTableEntry 0x10
+ASMCONSTANT_SIZEOF_ASSERT(SyncTableEntry);
+
+#define OFFSETOF__SyncTableEntry__m_SyncBlock 0x0
+ASMCONSTANT_OFFSETOF_ASSERT(SyncTableEntry, m_SyncBlock);
+
+#define OFFSETOF__SyncBlock__m_Monitor 0x0
+ASMCONSTANT_OFFSETOF_ASSERT(SyncBlock, m_Monitor);
+
+#define OFFSETOF__DelegateObject___methodPtr 0x18
+ASMCONSTANT_OFFSETOF_ASSERT(DelegateObject, _methodPtr);
+
+#define OFFSETOF__DelegateObject___target 0x08
+ASMCONSTANT_OFFSETOF_ASSERT(DelegateObject, _target);
+
+#define OFFSETOF__AwareLock__m_MonitorHeld 0x0
+ASMCONSTANTS_C_ASSERT(OFFSETOF__AwareLock__m_MonitorHeld
+ == offsetof(AwareLock, m_MonitorHeld));
+
+#define OFFSETOF__AwareLock__m_Recursion 0x4
+ASMCONSTANTS_C_ASSERT(OFFSETOF__AwareLock__m_Recursion
+ == offsetof(AwareLock, m_Recursion));
+
+#define OFFSETOF__AwareLock__m_HoldingThread 0x8
+ASMCONSTANTS_C_ASSERT(OFFSETOF__AwareLock__m_HoldingThread
+ == offsetof(AwareLock, m_HoldingThread));
+
+#define OFFSETOF__g_SystemInfo__dwNumberOfProcessors 0x20
+ASMCONSTANTS_C_ASSERT(OFFSETOF__g_SystemInfo__dwNumberOfProcessors
+ == offsetof(SYSTEM_INFO, dwNumberOfProcessors));
+
+#define OFFSETOF__g_SpinConstants__dwInitialDuration 0x0
+ASMCONSTANTS_C_ASSERT(OFFSETOF__g_SpinConstants__dwInitialDuration
+ == offsetof(SpinConstants, dwInitialDuration));
+
+#define OFFSETOF__g_SpinConstants__dwMaximumDuration 0x4
+ASMCONSTANTS_C_ASSERT(OFFSETOF__g_SpinConstants__dwMaximumDuration
+ == offsetof(SpinConstants, dwMaximumDuration));
+
+#define OFFSETOF__g_SpinConstants__dwBackoffFactor 0x8
+ASMCONSTANTS_C_ASSERT(OFFSETOF__g_SpinConstants__dwBackoffFactor
+ == offsetof(SpinConstants, dwBackoffFactor));
+
+#define OFFSETOF__MethodTable__m_dwFlags 0x00
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MethodTable__m_dwFlags
+ == offsetof(MethodTable, m_dwFlags));
+
+#define OFFSET__MethodTable__m_BaseSize 0x04
+ASMCONSTANTS_C_ASSERT(OFFSET__MethodTable__m_BaseSize
+ == offsetof(MethodTable, m_BaseSize));
+
+#define OFFSETOF__MethodTable__m_wNumInterfaces 0x0E
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MethodTable__m_wNumInterfaces
+ == offsetof(MethodTable, m_wNumInterfaces));
+
+#define OFFSETOF__MethodTable__m_pParentMethodTable DBG_FRE(0x18, 0x10)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MethodTable__m_pParentMethodTable
+ == offsetof(MethodTable, m_pParentMethodTable));
+
+#define OFFSETOF__MethodTable__m_pWriteableData DBG_FRE(0x28, 0x20)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MethodTable__m_pWriteableData
+ == offsetof(MethodTable, m_pWriteableData));
+
+#define OFFSETOF__MethodTable__m_pEEClass DBG_FRE(0x30, 0x28)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MethodTable__m_pEEClass
+ == offsetof(MethodTable, m_pEEClass));
+
+#define METHODTABLE_OFFSET_VTABLE DBG_FRE(0x48, 0x40)
+ASMCONSTANTS_C_ASSERT(METHODTABLE_OFFSET_VTABLE == sizeof(MethodTable));
+
+#define OFFSETOF__MethodTable__m_ElementType DBG_FRE(0x38, 0x30)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MethodTable__m_ElementType
+ == offsetof(MethodTable, m_pMultipurposeSlot1));
+
+#define OFFSETOF__MethodTable__m_pInterfaceMap DBG_FRE(0x40, 0x38)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MethodTable__m_pInterfaceMap
+ == offsetof(MethodTable, m_pMultipurposeSlot2));
+
+
+#define MethodTable_VtableSlotsPerChunk 8
+ASMCONSTANTS_C_ASSERT(MethodTable_VtableSlotsPerChunk == VTABLE_SLOTS_PER_CHUNK)
+
+#define MethodTable_VtableSlotsPerChunkLog2 3
+ASMCONSTANTS_C_ASSERT(MethodTable_VtableSlotsPerChunkLog2 == VTABLE_SLOTS_PER_CHUNK_LOG2)
+
+#if defined(FEATURE_TYPEEQUIVALENCE) || defined(FEATURE_REMOTING)
+#define METHODTABLE_EQUIVALENCE_FLAGS 0x02000000
+ASMCONSTANTS_C_ASSERT(METHODTABLE_EQUIVALENCE_FLAGS
+ == MethodTable::enum_flag_HasTypeEquivalence);
+#else
+#define METHODTABLE_EQUIVALENCE_FLAGS 0x0
+#endif
+
+#ifdef FEATURE_COMINTEROP
+#define METHODTABLE_NONTRIVIALINTERFACECAST_FLAGS 0x40080000
+ASMCONSTANTS_C_ASSERT(METHODTABLE_NONTRIVIALINTERFACECAST_FLAGS
+ == MethodTable::enum_flag_NonTrivialInterfaceCast);
+#else
+#define METHODTABLE_NONTRIVIALINTERFACECAST_FLAGS 0x00080000
+ASMCONSTANTS_C_ASSERT(METHODTABLE_NONTRIVIALINTERFACECAST_FLAGS
+ == MethodTable::enum_flag_NonTrivialInterfaceCast);
+#endif
+
+#define MethodTable__enum_flag_ContainsPointers 0x01000000
+ASMCONSTANTS_C_ASSERT(MethodTable__enum_flag_ContainsPointers
+ == MethodTable::enum_flag_ContainsPointers);
+
+#define OFFSETOF__MethodTableWriteableData__m_dwFlags 0
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MethodTableWriteableData__m_dwFlags
+ == offsetof(MethodTableWriteableData, m_dwFlags));
+
+#define MethodTableWriteableData__enum_flag_Unrestored 0x04
+ASMCONSTANTS_C_ASSERT(MethodTableWriteableData__enum_flag_Unrestored
+ == MethodTableWriteableData::enum_flag_Unrestored);
+
+#define OFFSETOF__InterfaceInfo_t__m_pMethodTable 0
+ASMCONSTANTS_C_ASSERT(OFFSETOF__InterfaceInfo_t__m_pMethodTable
+ == offsetof(InterfaceInfo_t, m_pMethodTable));
+
+#define SIZEOF__InterfaceInfo_t 0x8
+ASMCONSTANTS_C_ASSERT(SIZEOF__InterfaceInfo_t
+ == sizeof(InterfaceInfo_t));
+
+#define OFFSETOF__AppDomain__m_dwId 0x8
+ASMCONSTANTS_C_ASSERT(OFFSETOF__AppDomain__m_dwId
+ == offsetof(AppDomain, m_dwId));
+
+#define OFFSETOF__AppDomain__m_sDomainLocalBlock DBG_FRE(0x10, 0x10)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__AppDomain__m_sDomainLocalBlock
+ == offsetof(AppDomain, m_sDomainLocalBlock));
+
+#define OFFSETOF__DomainLocalBlock__m_pModuleSlots 0x8
+ASMCONSTANTS_C_ASSERT(OFFSETOF__DomainLocalBlock__m_pModuleSlots
+ == offsetof(DomainLocalBlock, m_pModuleSlots));
+
+#define OFFSETOF__DomainLocalModule__m_pDataBlob 0x030
+ASMCONSTANTS_C_ASSERT(OFFSETOF__DomainLocalModule__m_pDataBlob
+ == offsetof(DomainLocalModule, m_pDataBlob));
+
+// If this changes then we can't just test one bit in the assembly code.
+ASMCONSTANTS_C_ASSERT(ClassInitFlags::INITIALIZED_FLAG == 1);
+
+// End for JIT_GetSharedNonGCStaticBaseWorker
+
+// For JIT_GetSharedGCStaticBaseWorker
+
+#define OFFSETOF__DomainLocalModule__m_pGCStatics 0x020
+ASMCONSTANTS_C_ASSERT(OFFSETOF__DomainLocalModule__m_pGCStatics
+ == offsetof(DomainLocalModule, m_pGCStatics));
+
+// End for JIT_GetSharedGCStaticBaseWorker
+
+#define CORINFO_NullReferenceException_ASM 0
+ASMCONSTANTS_C_ASSERT( CORINFO_NullReferenceException_ASM
+ == CORINFO_NullReferenceException);
+
+#define CORINFO_InvalidCastException_ASM 2
+ASMCONSTANTS_C_ASSERT( CORINFO_InvalidCastException_ASM
+ == CORINFO_InvalidCastException);
+
+#define CORINFO_IndexOutOfRangeException_ASM 3
+ASMCONSTANTS_C_ASSERT( CORINFO_IndexOutOfRangeException_ASM
+ == CORINFO_IndexOutOfRangeException);
+
+#define CORINFO_SynchronizationLockException_ASM 5
+ASMCONSTANTS_C_ASSERT( CORINFO_SynchronizationLockException_ASM
+ == CORINFO_SynchronizationLockException);
+
+#define CORINFO_ArrayTypeMismatchException_ASM 6
+ASMCONSTANTS_C_ASSERT( CORINFO_ArrayTypeMismatchException_ASM
+ == CORINFO_ArrayTypeMismatchException);
+
+#define CORINFO_ArgumentNullException_ASM 8
+ASMCONSTANTS_C_ASSERT( CORINFO_ArgumentNullException_ASM
+ == CORINFO_ArgumentNullException);
+
+#define CORINFO_ArgumentException_ASM 9
+ASMCONSTANTS_C_ASSERT( CORINFO_ArgumentException_ASM
+ == CORINFO_ArgumentException);
+
+
+// MachState offsets (AMD64\gmscpu.h)
+
+#define OFFSETOF__MachState__m_Rip 0x00
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MachState__m_Rip
+ == offsetof(MachState, m_Rip));
+
+#define OFFSETOF__MachState__m_Rsp 0x08
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MachState__m_Rsp
+ == offsetof(MachState, m_Rsp));
+
+#define OFFSETOF__MachState__m_CaptureRdi 0x10
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MachState__m_CaptureRdi
+ == offsetof(MachState, m_CaptureRdi));
+
+#define OFFSETOF__MachState__m_CaptureRsi 0x18
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MachState__m_CaptureRsi
+ == offsetof(MachState, m_CaptureRsi));
+
+#define OFFSETOF__MachState__m_CaptureRbx 0x20
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MachState__m_CaptureRbx
+ == offsetof(MachState, m_CaptureRbx));
+
+#define OFFSETOF__MachState__m_CaptureRbp 0x28
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MachState__m_CaptureRbp
+ == offsetof(MachState, m_CaptureRbp));
+
+#define OFFSETOF__MachState__m_CaptureR12 0x30
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MachState__m_CaptureR12
+ == offsetof(MachState, m_CaptureR12));
+
+#define OFFSETOF__MachState__m_CaptureR13 0x38
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MachState__m_CaptureR13
+ == offsetof(MachState, m_CaptureR13));
+
+#define OFFSETOF__MachState__m_CaptureR14 0x40
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MachState__m_CaptureR14
+ == offsetof(MachState, m_CaptureR14));
+
+#define OFFSETOF__MachState__m_CaptureR15 0x48
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MachState__m_CaptureR15
+ == offsetof(MachState, m_CaptureR15));
+
+#define OFFSETOF__MachState__m_pRdi 0x50
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MachState__m_pRdi
+ == offsetof(MachState, m_pRdi));
+
+#define OFFSETOF__MachState__m_pRsi 0x58
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MachState__m_pRsi
+ == offsetof(MachState, m_pRsi));
+
+#define OFFSETOF__MachState__m_pRbx 0x60
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MachState__m_pRbx
+ == offsetof(MachState, m_pRbx));
+
+#define OFFSETOF__MachState__m_pRbp 0x68
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MachState__m_pRbp
+ == offsetof(MachState, m_pRbp));
+
+#define OFFSETOF__MachState__m_pR12 0x70
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MachState__m_pR12
+ == offsetof(MachState, m_pR12));
+
+#define OFFSETOF__MachState__m_pR13 0x78
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MachState__m_pR13
+ == offsetof(MachState, m_pR13));
+
+#define OFFSETOF__MachState__m_pR14 0x80
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MachState__m_pR14
+ == offsetof(MachState, m_pR14));
+
+#define OFFSETOF__MachState__m_pR15 0x88
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MachState__m_pR15
+ == offsetof(MachState, m_pR15));
+
+#define OFFSETOF__MachState___pRetAddr 0x90
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MachState___pRetAddr
+ == offsetof(MachState, _pRetAddr));
+
+#define OFFSETOF__LazyMachState__m_CaptureRip 0x98
+ASMCONSTANTS_C_ASSERT(OFFSETOF__LazyMachState__m_CaptureRip
+ == offsetof(LazyMachState, m_CaptureRip));
+
+#define OFFSETOF__LazyMachState__m_CaptureRsp 0xA0
+ASMCONSTANTS_C_ASSERT(OFFSETOF__LazyMachState__m_CaptureRsp
+ == offsetof(LazyMachState, m_CaptureRsp));
+
+#define OFFSETOF__MethodDesc__m_wFlags DBG_FRE(0x2E, 0x06)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__MethodDesc__m_wFlags == offsetof(MethodDesc, m_wFlags));
+
+#define OFFSETOF__VASigCookie__pNDirectILStub 0x8
+ASMCONSTANTS_C_ASSERT(OFFSETOF__VASigCookie__pNDirectILStub
+ == offsetof(VASigCookie, pNDirectILStub));
+
+#define SIZEOF__CONTEXT (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*16 + 8 + /*XMM_SAVE_AREA32*/(2*2 + 1*2 + 2 + 4 + 2*2 + 4 + 2*2 + 4*2 + 16*8 + 16*16 + 1*96) + 26*16 + 8 + 8*5)
+ASMCONSTANTS_C_ASSERT(SIZEOF__CONTEXT
+ == sizeof(CONTEXT));
+
+#define OFFSETOF__CONTEXT__Rax (8*6 + 4*2 + 2*6 + 4 + 8*6)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Rax
+ == offsetof(CONTEXT, Rax));
+
+#define OFFSETOF__CONTEXT__Rcx (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Rcx
+ == offsetof(CONTEXT, Rcx));
+
+#define OFFSETOF__CONTEXT__Rdx (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*2)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Rdx
+ == offsetof(CONTEXT, Rdx));
+
+#define OFFSETOF__CONTEXT__Rbx (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*3)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Rbx
+ == offsetof(CONTEXT, Rbx));
+
+#define OFFSETOF__CONTEXT__Rsp (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*4)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Rsp
+ == offsetof(CONTEXT, Rsp));
+
+#define OFFSETOF__CONTEXT__Rbp (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*5)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Rbp
+ == offsetof(CONTEXT, Rbp));
+
+#define OFFSETOF__CONTEXT__Rsi (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*6)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Rsi
+ == offsetof(CONTEXT, Rsi));
+
+#define OFFSETOF__CONTEXT__Rdi (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*7)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Rdi
+ == offsetof(CONTEXT, Rdi));
+
+#define OFFSETOF__CONTEXT__R8 (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*8)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__R8
+ == offsetof(CONTEXT, R8));
+
+#define OFFSETOF__CONTEXT__R9 (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*9)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__R9
+ == offsetof(CONTEXT, R9));
+
+#define OFFSETOF__CONTEXT__R10 (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*10)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__R10
+ == offsetof(CONTEXT, R10));
+
+#define OFFSETOF__CONTEXT__R11 (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*11)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__R11
+ == offsetof(CONTEXT, R11));
+
+#define OFFSETOF__CONTEXT__R12 (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*12)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__R12
+ == offsetof(CONTEXT, R12));
+
+#define OFFSETOF__CONTEXT__R13 (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*13)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__R13
+ == offsetof(CONTEXT, R13));
+
+#define OFFSETOF__CONTEXT__R14 (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*14)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__R14
+ == offsetof(CONTEXT, R14));
+
+#define OFFSETOF__CONTEXT__R15 (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*15)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__R15
+ == offsetof(CONTEXT, R15));
+
+#define OFFSETOF__CONTEXT__Rip (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*16)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Rip
+ == offsetof(CONTEXT, Rip));
+
+#define OFFSETOF__CONTEXT__Xmm0 (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*16 + 8 + 2*16 + 8*16)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Xmm0
+ == offsetof(CONTEXT, Xmm0));
+
+#define OFFSETOF__CONTEXT__Xmm1 (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*16 + 8 + 2*16 + 8*16 + 16)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Xmm1
+ == offsetof(CONTEXT, Xmm1));
+
+#define OFFSETOF__CONTEXT__Xmm2 (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*16 + 8 + 2*16 + 8*16 + 16*2)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Xmm2
+ == offsetof(CONTEXT, Xmm2));
+
+#define OFFSETOF__CONTEXT__Xmm3 (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*16 + 8 + 2*16 + 8*16 + 16*3)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Xmm3
+ == offsetof(CONTEXT, Xmm3));
+
+#define OFFSETOF__CONTEXT__Xmm4 (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*16 + 8 + 2*16 + 8*16 + 16*4)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Xmm4
+ == offsetof(CONTEXT, Xmm4));
+
+#define OFFSETOF__CONTEXT__Xmm5 (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*16 + 8 + 2*16 + 8*16 + 16*5)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Xmm5
+ == offsetof(CONTEXT, Xmm5));
+
+#define OFFSETOF__CONTEXT__Xmm6 (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*16 + 8 + 2*16 + 8*16 + 16*6)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Xmm6
+ == offsetof(CONTEXT, Xmm6));
+
+#define OFFSETOF__CONTEXT__Xmm7 (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*16 + 8 + 2*16 + 8*16 + 16*7)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Xmm7
+ == offsetof(CONTEXT, Xmm7));
+
+#define OFFSETOF__CONTEXT__Xmm8 (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*16 + 8 + 2*16 + 8*16 + 16*8)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Xmm8
+ == offsetof(CONTEXT, Xmm8));
+
+#define OFFSETOF__CONTEXT__Xmm9 (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*16 + 8 + 2*16 + 8*16 + 16*9)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Xmm9
+ == offsetof(CONTEXT, Xmm9));
+
+#define OFFSETOF__CONTEXT__Xmm10 (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*16 + 8 + 2*16 + 8*16 + 16*10)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Xmm10
+ == offsetof(CONTEXT, Xmm10));
+
+#define OFFSETOF__CONTEXT__Xmm11 (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*16 + 8 + 2*16 + 8*16 + 16*11)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Xmm11
+ == offsetof(CONTEXT, Xmm11));
+
+#define OFFSETOF__CONTEXT__Xmm12 (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*16 + 8 + 2*16 + 8*16 + 16*12)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Xmm12
+ == offsetof(CONTEXT, Xmm12));
+
+#define OFFSETOF__CONTEXT__Xmm13 (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*16 + 8 + 2*16 + 8*16 + 16*13)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Xmm13
+ == offsetof(CONTEXT, Xmm13));
+
+#define OFFSETOF__CONTEXT__Xmm14 (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*16 + 8 + 2*16 + 8*16 + 16*14)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Xmm14
+ == offsetof(CONTEXT, Xmm14));
+
+#define OFFSETOF__CONTEXT__Xmm15 (8*6 + 4*2 + 2*6 + 4 + 8*6 + 8*16 + 8 + 2*16 + 8*16 + 16*15)
+ASMCONSTANTS_C_ASSERT(OFFSETOF__CONTEXT__Xmm15
+ == offsetof(CONTEXT, Xmm15));
+
+#define SIZEOF__FaultingExceptionFrame (0x20 + SIZEOF__CONTEXT)
+ASMCONSTANTS_C_ASSERT(SIZEOF__FaultingExceptionFrame
+ == sizeof(FaultingExceptionFrame));
+
+#define OFFSETOF__FaultingExceptionFrame__m_fFilterExecuted 0x10
+ASMCONSTANTS_C_ASSERT(OFFSETOF__FaultingExceptionFrame__m_fFilterExecuted
+ == offsetof(FaultingExceptionFrame, m_fFilterExecuted));
+
+#define OFFSETOF__PtrArray__m_NumComponents 0x8
+ASMCONSTANTS_C_ASSERT(OFFSETOF__PtrArray__m_NumComponents
+ == offsetof(PtrArray, m_NumComponents));
+
+#define OFFSETOF__PtrArray__m_Array 0x10
+ASMCONSTANTS_C_ASSERT(OFFSETOF__PtrArray__m_Array
+ == offsetof(PtrArray, m_Array));
+
+
+#define MethodDescClassification__mdcClassification 0x7
+ASMCONSTANTS_C_ASSERT(MethodDescClassification__mdcClassification == mdcClassification);
+
+#define MethodDescClassification__mcInstantiated 0x5
+ASMCONSTANTS_C_ASSERT(MethodDescClassification__mcInstantiated == mcInstantiated);
+
+#ifndef FEATURE_PAL
+
+#define OFFSET__TEB__TlsSlots 0x1480
+ASMCONSTANTS_C_ASSERT(OFFSET__TEB__TlsSlots == offsetof(TEB, TlsSlots));
+
+#define OFFSETOF__TEB__LastErrorValue 0x68
+ASMCONSTANTS_C_ASSERT(OFFSETOF__TEB__LastErrorValue == offsetof(TEB, LastErrorValue));
+
+#endif // !FEATURE_PAL
+
+#ifdef _DEBUG
+#define TLS_GETTER_MAX_SIZE_ASM 0x30
+#else
+#define TLS_GETTER_MAX_SIZE_ASM 0x18
+#endif
+ASMCONSTANTS_C_ASSERT(TLS_GETTER_MAX_SIZE_ASM == TLS_GETTER_MAX_SIZE)
+
+
+// If you change these constants, you need to update code in
+// RedirectHandledJITCase.asm and ExcepAMD64.cpp.
+#define REDIRECTSTUB_ESTABLISHER_OFFSET_RBP 0
+#define REDIRECTSTUB_RBP_OFFSET_CONTEXT 0x20
+
+#define THROWSTUB_ESTABLISHER_OFFSET_FaultingExceptionFrame 0x30
+
+
+#define UMTHUNKSTUB_HOST_NOTIFY_FLAG_RBPOFFSET (0x40) // xmm save size
+
+#define Thread__ObjectRefFlush ?ObjectRefFlush@Thread@@SAXPEAV1@@Z
+
+
+#define DELEGATE_FIELD_OFFSET__METHOD_AUX 0x20
+ASMCONSTANTS_RUNTIME_ASSERT(DELEGATE_FIELD_OFFSET__METHOD_AUX == Object::GetOffsetOfFirstField() +
+ MscorlibBinder::GetFieldOffset(FIELD__DELEGATE__METHOD_PTR_AUX));
+
+
+#define ASM_LARGE_OBJECT_SIZE 85000
+ASMCONSTANTS_C_ASSERT(ASM_LARGE_OBJECT_SIZE == LARGE_OBJECT_SIZE);
+
+#define OFFSETOF__ArrayBase__m_NumComponents 8
+ASMCONSTANTS_C_ASSERT(OFFSETOF__ArrayBase__m_NumComponents
+ == offsetof(ArrayBase, m_NumComponents));
+
+#define OFFSETOF__StringObject__m_StringLength 0x8
+ASMCONSTANTS_C_ASSERT(OFFSETOF__StringObject__m_StringLength
+ == offsetof(StringObject, m_StringLength));
+
+#define OFFSETOF__ArrayTypeDesc__m_TemplateMT 8
+ASMCONSTANTS_C_ASSERT(OFFSETOF__ArrayTypeDesc__m_TemplateMT
+ == offsetof(ArrayTypeDesc, m_TemplateMT));
+
+#define OFFSETOF__ArrayTypeDesc__m_Arg 0x10
+ASMCONSTANTS_C_ASSERT(OFFSETOF__ArrayTypeDesc__m_Arg
+ == offsetof(ArrayTypeDesc, m_Arg));
+
+#define SYNCBLOCKINDEX_OFFSET 0x4
+ASMCONSTANTS_C_ASSERT(SYNCBLOCKINDEX_OFFSET
+ == (sizeof(ObjHeader) - offsetof(ObjHeader, m_SyncBlockValue)));
+
+#define CallDescrData__pSrc 0x00
+#define CallDescrData__numStackSlots 0x08
+#ifdef UNIX_AMD64_ABI
+#define CallDescrData__pArgumentRegisters 0x10
+#define CallDescrData__pFloatArgumentRegisters 0x18
+#define CallDescrData__fpReturnSize 0x20
+#define CallDescrData__pTarget 0x28
+#define CallDescrData__returnValue 0x30
+#else
+#define CallDescrData__dwRegTypeMap 0x10
+#define CallDescrData__fpReturnSize 0x18
+#define CallDescrData__pTarget 0x20
+#define CallDescrData__returnValue 0x28
+#endif
+
+ASMCONSTANTS_C_ASSERT(CallDescrData__pSrc == offsetof(CallDescrData, pSrc))
+ASMCONSTANTS_C_ASSERT(CallDescrData__numStackSlots == offsetof(CallDescrData, numStackSlots))
+#ifdef UNIX_AMD64_ABI
+ASMCONSTANTS_C_ASSERT(CallDescrData__pArgumentRegisters == offsetof(CallDescrData, pArgumentRegisters))
+ASMCONSTANTS_C_ASSERT(CallDescrData__pFloatArgumentRegisters == offsetof(CallDescrData, pFloatArgumentRegisters))
+#else
+ASMCONSTANTS_C_ASSERT(CallDescrData__dwRegTypeMap == offsetof(CallDescrData, dwRegTypeMap))
+#endif
+ASMCONSTANTS_C_ASSERT(CallDescrData__fpReturnSize == offsetof(CallDescrData, fpReturnSize))
+ASMCONSTANTS_C_ASSERT(CallDescrData__pTarget == offsetof(CallDescrData, pTarget))
+ASMCONSTANTS_C_ASSERT(CallDescrData__returnValue == offsetof(CallDescrData, returnValue))
+
+#undef ASMCONSTANTS_RUNTIME_ASSERT
+#undef ASMCONSTANTS_C_ASSERT
+#undef DBG_FRE
+
+
+//#define USE_COMPILE_TIME_CONSTANT_FINDER // Uncomment this line to use the constant finder
+#if defined(__cplusplus) && defined(USE_COMPILE_TIME_CONSTANT_FINDER)
+// This class causes the compiler to emit an error with the constant we're interested in
+// in the error message. This is useful if a size or offset changes. To use, comment out
+// the compile-time assert that is firing, enable the constant finder, add the appropriate
+// constant to find to BogusFunction(), and build.
+//
+// Here's a sample compiler error:
+// d:\dd\clr\src\ndp\clr\src\vm\i386\asmconstants.h(326) : error C2248: 'FindCompileTimeConstant<N>::FindCompileTimeConstant' : cannot access private member declared in class 'FindCompileTimeConstant<N>'
+// with
+// [
+// N=1520
+// ]
+// d:\dd\clr\src\ndp\clr\src\vm\i386\asmconstants.h(321) : see declaration of 'FindCompileTimeConstant<N>::FindCompileTimeConstant'
+// with
+// [
+// N=1520
+// ]
+template<size_t N>
+class FindCompileTimeConstant
+{
+private:
+ FindCompileTimeConstant();
+};
+
+void BogusFunction()
+{
+ // Sample usage to generate the error
+ FindCompileTimeConstant<offsetof(Thread, m_pDomain)> bogus_variable;
+ FindCompileTimeConstant<offsetof(Thread, m_ExceptionState)> bogus_variable2;
+}
+#endif // defined(__cplusplus) && defined(USE_COMPILE_TIME_CONSTANT_FINDER)
diff --git a/src/vm/amd64/calldescrworkeramd64.S b/src/vm/amd64/calldescrworkeramd64.S
new file mode 100644
index 0000000000..a126ad18d7
--- /dev/null
+++ b/src/vm/amd64/calldescrworkeramd64.S
@@ -0,0 +1,132 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+.intel_syntax noprefix
+#include "unixasmmacros.inc"
+#include "asmconstants.h"
+
+#define real4 dword
+#define real8 qword
+
+//extern CallDescrWorkerUnwindFrameChainHandler:proc
+
+//
+// EXTERN_C void FastCallFinalizeWorker(Object *obj, PCODE funcPtr);
+//
+NESTED_ENTRY FastCallFinalizeWorker, _TEXT, CallDescrWorkerUnwindFrameChainHandler
+ alloc_stack 0x28 // alloc callee scratch and align the stack
+ END_PROLOGUE
+
+ //
+ // RDI: already contains obj*
+ // RSI: address of finalizer method to call
+ //
+
+ // !!!!!!!!!
+ // NOTE: you cannot tail call here because we must have the CallDescrWorkerUnwindFrameChainHandler
+ // personality routine on the stack.
+ // !!!!!!!!!
+ call rsi
+ xor rax, rax
+
+ // epilog
+ add rsp, 0x28
+ ret
+
+
+NESTED_END FastCallFinalizeWorker, _TEXT
+
+//extern "C" void CallDescrWorkerInternal(CallDescrData * pCallDescrData);
+
+NESTED_ENTRY CallDescrWorkerInternal, _TEXT, CallDescrWorkerUnwindFrameChainHandler
+ push_nonvol_reg rbx // save nonvolatile registers
+ push_nonvol_reg rbp //
+ set_frame rbp, 0 // set frame pointer
+ lea rsp, [rsp - 8] // ensure proper alignment of the rsp
+
+ END_PROLOGUE
+
+ mov rbx, rdi // save pCallDescrData in rbx
+
+ mov ecx, dword ptr [rbx + CallDescrData__numStackSlots]
+
+ and ecx, ecx
+ jz NoStackArguments
+
+ test ecx, 1
+ jz StackAligned
+ push rax
+StackAligned:
+
+ mov rsi, [rbx + CallDescrData__pSrc] // set source argument list address
+ lea rsi, [rsi + 8 * rcx]
+
+StackCopyLoop: // copy the arguments to stack top-down to carefully probe for sufficient stack space
+ sub rsi, 8
+ push qword ptr [rsi]
+ dec ecx
+ jnz StackCopyLoop
+NoStackArguments:
+ // All argument registers are loaded regardless of the actual number
+ // of arguments.
+
+ mov rax, [rbx + CallDescrData__pArgumentRegisters]
+ mov rdi, [rax + 0]
+ mov rsi, [rax + 8]
+ mov rdx, [rax + 16]
+ mov rcx, [rax + 24]
+ mov r8, [rax + 32]
+ mov r9, [rax + 40]
+
+ // All float argument registers are loaded regardless of the actual number
+ // of arguments.
+
+ mov rax, [rbx + CallDescrData__pFloatArgumentRegisters]
+ and rax, rax
+ jz NoFloatArguments
+ movsd xmm0, [rax + 0]
+ movsd xmm1, [rax + 16]
+ movsd xmm2, [rax + 32]
+ movsd xmm3, [rax + 48]
+ movsd xmm4, [rax + 64]
+ movsd xmm5, [rax + 80]
+ movsd xmm6, [rax + 96]
+ movsd xmm7, [rax + 112]
+NoFloatArguments:
+ call qword ptr [rbx + CallDescrData__pTarget] // call target function
+
+ // Save FP return value
+
+ mov ecx, dword ptr [rbx + CallDescrData__fpReturnSize]
+ test ecx, ecx
+ jz ReturnsInt
+
+ cmp ecx, 4
+ je ReturnsFloat
+ cmp ecx, 8
+ je ReturnsDouble
+ // unexpected
+ jmp Epilog
+
+ReturnsInt:
+ mov [rbx+CallDescrData__returnValue], rax
+
+Epilog:
+ lea rsp, 0[rbp] // deallocate argument list
+ pop rbp // restore nonvolatile register
+ pop rbx //
+ ret
+
+ReturnsFloat:
+ movss real4 ptr [rbx+CallDescrData__returnValue], xmm0
+ jmp Epilog
+
+ReturnsDouble:
+ movsd real8 ptr [rbx+CallDescrData__returnValue], xmm0
+ jmp Epilog
+
+NESTED_END CallDescrWorkerInternal, _TEXT
+
+
diff --git a/src/vm/amd64/cgenamd64.cpp b/src/vm/amd64/cgenamd64.cpp
new file mode 100644
index 0000000000..c02095d0d7
--- /dev/null
+++ b/src/vm/amd64/cgenamd64.cpp
@@ -0,0 +1,1152 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// Various helper routines for generating AMD64 assembly code.
+//
+
+// Precompiled Header
+
+#include "common.h"
+
+#include "stublink.h"
+#include "cgensys.h"
+#include "siginfo.hpp"
+#include "excep.h"
+#include "ecall.h"
+#include "dllimport.h"
+#include "dllimportcallback.h"
+#include "dbginterface.h"
+#include "fcall.h"
+#include "array.h"
+#include "virtualcallstub.h"
+
+#ifdef FEATURE_COMINTEROP
+#include "clrtocomcall.h"
+#endif // FEATURE_COMINTEROP
+
+void UpdateRegDisplayFromCalleeSavedRegisters(REGDISPLAY * pRD, CalleeSavedRegisters * pRegs)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ T_CONTEXT * pContext = pRD->pCurrentContext;
+ pContext->Rbp = pRegs->rbp;
+#ifndef UNIX_AMD64_ABI
+ pContext->Rsi = pRegs->rsi;
+ pContext->Rdi = pRegs->rdi;
+#endif
+ pContext->R12 = pRegs->r12;
+ pContext->R13 = pRegs->r13;
+ pContext->R14 = pRegs->r14;
+ pContext->R15 = pRegs->r15;
+
+ KNONVOLATILE_CONTEXT_POINTERS * pContextPointers = pRD->pCurrentContextPointers;
+ pContextPointers->Rbx = (PULONG64)&pRegs->rbx;
+#ifndef UNIX_AMD64_ABI
+ pContextPointers->Rsi = (PULONG64)&pRegs->rsi;
+ pContextPointers->Rdi = (PULONG64)&pRegs->rdi;
+#endif
+ pContextPointers->Rbp = (PULONG64)&pRegs->rbp;
+ pContextPointers->R12 = (PULONG64)&pRegs->r12;
+ pContextPointers->R13 = (PULONG64)&pRegs->r13;
+ pContextPointers->R14 = (PULONG64)&pRegs->r14;
+ pContextPointers->R15 = (PULONG64)&pRegs->r15;
+}
+
+void ClearRegDisplayArgumentAndScratchRegisters(REGDISPLAY * pRD)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ KNONVOLATILE_CONTEXT_POINTERS * pContextPointers = pRD->pCurrentContextPointers;
+ pContextPointers->Rax = NULL;
+#ifdef UNIX_AMD64_ABI
+ pContextPointers->Rsi = NULL;
+ pContextPointers->Rdi = NULL;
+#endif
+ pContextPointers->Rcx = NULL;
+ pContextPointers->Rdx = NULL;
+ pContextPointers->R8 = NULL;
+ pContextPointers->R9 = NULL;
+ pContextPointers->R10 = NULL;
+ pContextPointers->R11 = NULL;
+}
+
+void TransitionFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ pRD->IsCallerContextValid = FALSE;
+ pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
+
+ pRD->pCurrentContext->Rip = GetReturnAddress();
+ pRD->pCurrentContext->Rsp = GetSP();
+
+ UpdateRegDisplayFromCalleeSavedRegisters(pRD, GetCalleeSavedRegisters());
+ ClearRegDisplayArgumentAndScratchRegisters(pRD);
+
+ SyncRegDisplayToCurrentContext(pRD);
+
+ LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK TransitionFrame::UpdateRegDisplay(rip:%p, rsp:%p)\n", pRD->ControlPC, pRD->SP));
+}
+
+#ifndef DACCESS_COMPILE
+
+void TailCallFrame::InitFromContext(T_CONTEXT * pContext)
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifndef UNIX_AMD64_ABI
+ m_calleeSavedRegisters.rdi = pContext->Rdi;
+ m_calleeSavedRegisters.rsi = pContext->Rsi;
+#endif
+ m_calleeSavedRegisters.rbx = pContext->Rbx;
+ m_calleeSavedRegisters.rbp = pContext->Rbp;
+ m_calleeSavedRegisters.r12 = pContext->R12;
+ m_calleeSavedRegisters.r13 = pContext->R13;
+ m_calleeSavedRegisters.r14 = pContext->R14;
+ m_calleeSavedRegisters.r15 = pContext->R15;
+ m_pGCLayout = 0;
+ m_ReturnAddress = pContext->Rip;
+}
+
+#endif // !DACCESS_COMPILE
+
+void TailCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ pRD->IsCallerContextValid = FALSE;
+ pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
+
+ pRD->pCurrentContext->Rip = m_ReturnAddress;
+ pRD->pCurrentContext->Rsp = dac_cast<TADDR>(this) + sizeof(*this);
+
+ UpdateRegDisplayFromCalleeSavedRegisters(pRD, &m_calleeSavedRegisters);
+ ClearRegDisplayArgumentAndScratchRegisters(pRD);
+
+ SyncRegDisplayToCurrentContext(pRD);
+
+ LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK TransitionFrame::UpdateRegDisplay(rip:%p, rsp:%p)\n", pRD->ControlPC, pRD->SP));
+}
+
+void InlinedCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+#ifdef PROFILING_SUPPORTED
+ PRECONDITION(CORProfilerStackSnapshotEnabled() || InlinedCallFrame::FrameHasActiveCall(this));
+#endif
+ HOST_NOCALLS;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ if (!InlinedCallFrame::FrameHasActiveCall(this))
+ {
+ LOG((LF_CORDB, LL_ERROR, "WARNING: InlinedCallFrame::UpdateRegDisplay called on inactive frame %p\n", this));
+ return;
+ }
+
+ pRD->IsCallerContextValid = FALSE;
+ pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
+
+ pRD->pCurrentContext->Rip = *(DWORD64 *)&m_pCallerReturnAddress;
+ pRD->pCurrentContext->Rsp = *(DWORD64 *)&m_pCallSiteSP;
+ pRD->pCurrentContext->Rbp = *(DWORD64 *)&m_pCalleeSavedFP;
+
+ ClearRegDisplayArgumentAndScratchRegisters(pRD);
+
+ pRD->pCurrentContextPointers->Rbx = NULL;
+#ifndef UNIX_AMD64_ABI
+ pRD->pCurrentContextPointers->Rsi = NULL;
+ pRD->pCurrentContextPointers->Rdi = NULL;
+#endif
+ pRD->pCurrentContextPointers->Rbp = (DWORD64 *)&m_pCalleeSavedFP;
+ pRD->pCurrentContextPointers->R12 = NULL;
+ pRD->pCurrentContextPointers->R13 = NULL;
+ pRD->pCurrentContextPointers->R14 = NULL;
+ pRD->pCurrentContextPointers->R15 = NULL;
+
+ SyncRegDisplayToCurrentContext(pRD);
+
+ LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK InlinedCallFrame::UpdateRegDisplay(rip:%p, rsp:%p)\n", pRD->ControlPC, pRD->SP));
+}
+
+void HelperMethodFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(m_MachState._pRetAddr == PTR_TADDR(&m_MachState.m_Rip));
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ pRD->IsCallerContextValid = FALSE;
+ pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
+
+ //
+ // Copy the saved state from the frame to the current context.
+ //
+
+ LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK HelperMethodFrame::UpdateRegDisplay cached ip:%p, sp:%p\n", m_MachState.m_Rip, m_MachState.m_Rsp));
+
+#if defined(DACCESS_COMPILE)
+ // For DAC, we may get here when the HMF is still uninitialized.
+ // So we may need to unwind here.
+ if (!m_MachState.isValid())
+ {
+ // This allocation throws on OOM.
+ MachState* pUnwoundState = (MachState*)DacAllocHostOnlyInstance(sizeof(*pUnwoundState), true);
+
+ InsureInit(false, pUnwoundState);
+
+ pRD->pCurrentContext->Rip = pRD->ControlPC = pUnwoundState->m_Rip;
+ pRD->pCurrentContext->Rsp = pRD->SP = pUnwoundState->m_Rsp;
+
+#ifndef UNIX_AMD64_ABI
+ pRD->pCurrentContext->Rdi = pUnwoundState->m_CaptureRdi;
+ pRD->pCurrentContext->Rsi = pUnwoundState->m_CaptureRsi;
+#endif
+ pRD->pCurrentContext->Rbx = pUnwoundState->m_CaptureRbx;
+ pRD->pCurrentContext->Rbp = pUnwoundState->m_CaptureRbp;
+ pRD->pCurrentContext->R12 = pUnwoundState->m_CaptureR12;
+ pRD->pCurrentContext->R13 = pUnwoundState->m_CaptureR13;
+ pRD->pCurrentContext->R14 = pUnwoundState->m_CaptureR14;
+ pRD->pCurrentContext->R15 = pUnwoundState->m_CaptureR15;
+
+ return;
+ }
+#endif // DACCESS_COMPILE
+
+ pRD->pCurrentContext->Rip = pRD->ControlPC = m_MachState.m_Rip;
+ pRD->pCurrentContext->Rsp = pRD->SP = m_MachState.m_Rsp;
+
+#ifndef UNIX_AMD64_ABI
+ pRD->pCurrentContext->Rdi = *m_MachState.m_pRdi;
+ pRD->pCurrentContext->Rsi = *m_MachState.m_pRsi;
+#endif
+ pRD->pCurrentContext->Rbx = *m_MachState.m_pRbx;
+ pRD->pCurrentContext->Rbp = *m_MachState.m_pRbp;
+ pRD->pCurrentContext->R12 = *m_MachState.m_pR12;
+ pRD->pCurrentContext->R13 = *m_MachState.m_pR13;
+ pRD->pCurrentContext->R14 = *m_MachState.m_pR14;
+ pRD->pCurrentContext->R15 = *m_MachState.m_pR15;
+#ifndef UNIX_AMD64_ABI
+ pRD->pCurrentContextPointers->Rdi = m_MachState.m_pRdi;
+ pRD->pCurrentContextPointers->Rsi = m_MachState.m_pRsi;
+#endif
+ pRD->pCurrentContextPointers->Rbx = m_MachState.m_pRbx;
+ pRD->pCurrentContextPointers->Rbp = m_MachState.m_pRbp;
+ pRD->pCurrentContextPointers->R12 = m_MachState.m_pR12;
+ pRD->pCurrentContextPointers->R13 = m_MachState.m_pR13;
+ pRD->pCurrentContextPointers->R14 = m_MachState.m_pR14;
+ pRD->pCurrentContextPointers->R15 = m_MachState.m_pR15;
+
+ //
+ // Clear all knowledge of scratch registers. We're skipping to any
+ // arbitrary point on the stack, and frames aren't required to preserve or
+ // keep track of these anyways.
+ //
+
+ ClearRegDisplayArgumentAndScratchRegisters(pRD);
+}
+
+void FaultingExceptionFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ memcpy(pRD->pCurrentContext, &m_ctx, sizeof(CONTEXT));
+
+ pRD->ControlPC = m_ctx.Rip;
+
+ pRD->SP = m_ctx.Rsp;
+
+ pRD->pCurrentContextPointers->Rax = &m_ctx.Rax;
+ pRD->pCurrentContextPointers->Rcx = &m_ctx.Rcx;
+ pRD->pCurrentContextPointers->Rdx = &m_ctx.Rdx;
+ pRD->pCurrentContextPointers->Rbx = &m_ctx.Rbx;
+ pRD->pCurrentContextPointers->Rbp = &m_ctx.Rbp;
+ pRD->pCurrentContextPointers->Rsi = &m_ctx.Rsi;
+ pRD->pCurrentContextPointers->Rdi = &m_ctx.Rdi;
+ pRD->pCurrentContextPointers->R8 = &m_ctx.R8;
+ pRD->pCurrentContextPointers->R9 = &m_ctx.R9;
+ pRD->pCurrentContextPointers->R10 = &m_ctx.R10;
+ pRD->pCurrentContextPointers->R11 = &m_ctx.R11;
+ pRD->pCurrentContextPointers->R12 = &m_ctx.R12;
+ pRD->pCurrentContextPointers->R13 = &m_ctx.R13;
+ pRD->pCurrentContextPointers->R14 = &m_ctx.R14;
+ pRD->pCurrentContextPointers->R15 = &m_ctx.R15;
+
+ pRD->IsCallerContextValid = FALSE;
+ pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
+}
+
+#ifdef FEATURE_HIJACK
+TADDR ResumableFrame::GetReturnAddressPtr()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return dac_cast<TADDR>(m_Regs) + offsetof(CONTEXT, Rip);
+}
+
+void ResumableFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ CopyMemory(pRD->pCurrentContext, m_Regs, sizeof(CONTEXT));
+
+ pRD->ControlPC = m_Regs->Rip;
+
+ pRD->SP = m_Regs->Rsp;
+
+ pRD->pCurrentContextPointers->Rax = &m_Regs->Rax;
+ pRD->pCurrentContextPointers->Rcx = &m_Regs->Rcx;
+ pRD->pCurrentContextPointers->Rdx = &m_Regs->Rdx;
+ pRD->pCurrentContextPointers->Rbx = &m_Regs->Rbx;
+ pRD->pCurrentContextPointers->Rbp = &m_Regs->Rbp;
+ pRD->pCurrentContextPointers->Rsi = &m_Regs->Rsi;
+ pRD->pCurrentContextPointers->Rdi = &m_Regs->Rdi;
+ pRD->pCurrentContextPointers->R8 = &m_Regs->R8;
+ pRD->pCurrentContextPointers->R9 = &m_Regs->R9;
+ pRD->pCurrentContextPointers->R10 = &m_Regs->R10;
+ pRD->pCurrentContextPointers->R11 = &m_Regs->R11;
+ pRD->pCurrentContextPointers->R12 = &m_Regs->R12;
+ pRD->pCurrentContextPointers->R13 = &m_Regs->R13;
+ pRD->pCurrentContextPointers->R14 = &m_Regs->R14;
+ pRD->pCurrentContextPointers->R15 = &m_Regs->R15;
+
+ pRD->IsCallerContextValid = FALSE;
+ pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
+
+ RETURN;
+}
+
+// The HijackFrame has to know the registers that are pushed by OnHijackObjectTripThread
+// and OnHijackScalarTripThread, so all three are implemented together.
+void HijackFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ pRD->IsCallerContextValid = FALSE;
+ pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
+
+ pRD->pCurrentContext->Rip = m_ReturnAddress;
+ pRD->pCurrentContext->Rsp = PTR_TO_MEMBER_TADDR(HijackArgs, m_Args, Rip) + sizeof(void *);
+
+ UpdateRegDisplayFromCalleeSavedRegisters(pRD, &(m_Args->Regs));
+
+ pRD->pCurrentContextPointers->Rcx = NULL;
+ pRD->pCurrentContextPointers->Rdx = NULL;
+ pRD->pCurrentContextPointers->R8 = NULL;
+ pRD->pCurrentContextPointers->R9 = NULL;
+ pRD->pCurrentContextPointers->R10 = NULL;
+ pRD->pCurrentContextPointers->R11 = NULL;
+
+ pRD->pCurrentContextPointers->Rax = (PULONG64)&m_Args->Rax;
+
+ SyncRegDisplayToCurrentContext(pRD);
+
+/*
+ // This only describes the top-most frame
+ pRD->pContext = NULL;
+
+
+ pRD->PCTAddr = dac_cast<TADDR>(m_Args) + offsetof(HijackArgs, Rip);
+ //pRD->pPC = PTR_SLOT(pRD->PCTAddr);
+ pRD->SP = (ULONG64)(pRD->PCTAddr + sizeof(TADDR));
+*/
+}
+#endif // FEATURE_HIJACK
+
+BOOL isJumpRel32(PCODE pCode)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ PTR_BYTE pbCode = PTR_BYTE(pCode);
+
+ return 0xE9 == pbCode[0];
+}
+
+//
+// Given the same pBuffer that was used by emitJump this
+// method decodes the instructions and returns the jump target
+//
+PCODE decodeJump32(PCODE pBuffer)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ // jmp rel32
+ _ASSERTE(isJumpRel32(pBuffer));
+
+ return rel32Decode(pBuffer+1);
+}
+
+BOOL isJumpRel64(PCODE pCode)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ PTR_BYTE pbCode = PTR_BYTE(pCode);
+
+ return 0x48 == pbCode[0] &&
+ 0xB8 == pbCode[1] &&
+ 0xFF == pbCode[10] &&
+ 0xE0 == pbCode[11];
+}
+
+PCODE decodeJump64(PCODE pBuffer)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ // mov rax, xxx
+ // jmp rax
+ _ASSERTE(isJumpRel64(pBuffer));
+
+ return *PTR_UINT64(pBuffer+2);
+}
+
+#ifdef DACCESS_COMPILE
+BOOL GetAnyThunkTarget (CONTEXT *pctx, TADDR *pTarget, TADDR *pTargetMethodDesc)
+{
+ TADDR pThunk = GetIP(pctx);
+
+ *pTargetMethodDesc = NULL;
+
+ //
+ // Check for something generated by emitJump.
+ //
+ if (isJumpRel64(pThunk))
+ {
+ *pTarget = decodeJump64(pThunk);
+ return TRUE;
+ }
+
+ return FALSE;
+}
+#endif // DACCESS_COMPILE
+
+
+#ifndef DACCESS_COMPILE
+
+// Note: This is only used on server GC on Windows.
+//
+// This function returns the number of logical processors on a given physical chip. If it cannot
+// determine the number of logical cpus, or the machine is not populated uniformly with the same
+// type of processors, this function returns 1.
+
+extern "C" DWORD __stdcall getcpuid(DWORD arg, unsigned char result[16]);
+
+// fix this if/when AMD does multicore or SMT
+DWORD GetLogicalCpuCount()
+{
+ // No CONTRACT possible because GetLogicalCpuCount uses SEH
+
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ static DWORD val = 0;
+
+ // cache value for later re-use
+ if (val)
+ {
+ return val;
+ }
+
+ struct Param : DefaultCatchFilterParam
+ {
+ DWORD retVal;
+ } param;
+ param.pv = COMPLUS_EXCEPTION_EXECUTE_HANDLER;
+ param.retVal = 1;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+
+ unsigned char buffer[16];
+ DWORD maxCpuId = getcpuid(0, buffer);
+ DWORD* dwBuffer = (DWORD*)buffer;
+
+ if (maxCpuId < 1)
+ goto qExit;
+
+ if (dwBuffer[1] == 'uneG') {
+ if (dwBuffer[3] == 'Ieni') {
+ if (dwBuffer[2] == 'letn') { // get SMT/multicore enumeration for Intel EM64T
+
+
+ // TODO: Currently GetLogicalCpuCountFromOS() and GetLogicalCpuCountFallback() are broken on
+ // multi-core processor, but we never call into those two functions since we don't halve the
+ // gen0size when it's prescott and above processor. We keep the old version here for earlier
+ // generation system(Northwood based), perf data suggests on those systems, halve gen0 size
+ // still boost the performance(ex:Biztalk boosts about 17%). So on earlier systems(Northwood)
+ // based, we still go ahead and halve gen0 size. The logic in GetLogicalCpuCountFromOS()
+ // and GetLogicalCpuCountFallback() works fine for those earlier generation systems.
+ // If it's a Prescott and above processor or Multi-core, perf data suggests not to halve gen0
+ // size at all gives us overall better performance.
+ // This is going to be fixed with a new version in orcas time frame.
+
+ if( (maxCpuId > 3) && (maxCpuId < 0x80000000) )
+ goto qExit;
+
+ val = GetLogicalCpuCountFromOS(); //try to obtain HT enumeration from OS API
+ if (val )
+ {
+ pParam->retVal = val; // OS API HT enumeration successful, we are Done
+ goto qExit;
+ }
+
+ val = GetLogicalCpuCountFallback(); // Fallback to HT enumeration using CPUID
+ if( val )
+ pParam->retVal = val;
+ }
+ }
+ }
+qExit: ;
+ }
+
+ PAL_EXCEPT_FILTER(DefaultCatchFilter)
+ {
+ }
+ PAL_ENDTRY
+
+ if (val == 0)
+ {
+ val = param.retVal;
+ }
+
+ return param.retVal;
+}
+
+void EncodeLoadAndJumpThunk (LPBYTE pBuffer, LPVOID pv, LPVOID pTarget)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(pBuffer));
+ }
+ CONTRACTL_END;
+
+ // mov r10, pv 49 ba xx xx xx xx xx xx xx xx
+
+ pBuffer[0] = 0x49;
+ pBuffer[1] = 0xBA;
+
+ *((UINT64 UNALIGNED *)&pBuffer[2]) = (UINT64)pv;
+
+ // mov rax, pTarget 48 b8 xx xx xx xx xx xx xx xx
+
+ pBuffer[10] = 0x48;
+ pBuffer[11] = 0xB8;
+
+ *((UINT64 UNALIGNED *)&pBuffer[12]) = (UINT64)pTarget;
+
+ // jmp rax ff e0
+
+ pBuffer[20] = 0xFF;
+ pBuffer[21] = 0xE0;
+
+ _ASSERTE(DbgIsExecutable(pBuffer, 22));
+}
+
+void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target)
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+ BYTE *pBuffer = (BYTE*)pCOMMethod - COMMETHOD_CALL_PRESTUB_SIZE;
+
+ // We need the target to be in a 64-bit aligned memory location and the call instruction
+ // to immediately precede the ComCallMethodDesc. We'll generate an indirect call to avoid
+ // consuming 3 qwords for this (mov rax, | target | nops & call rax).
+
+ // dq 123456789abcdef0h
+ // nop 90
+ // nop 90
+ // call [$ - 10] ff 15 f0 ff ff ff
+
+ *((UINT64 *)&pBuffer[COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET]) = (UINT64)target;
+
+ pBuffer[-2] = 0x90;
+ pBuffer[-1] = 0x90;
+
+ pBuffer[0] = 0xFF;
+ pBuffer[1] = 0x15;
+ *((UINT32 UNALIGNED *)&pBuffer[2]) = (UINT32)(COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET - COMMETHOD_CALL_PRESTUB_SIZE);
+
+ _ASSERTE(DbgIsExecutable(pBuffer, COMMETHOD_CALL_PRESTUB_SIZE));
+
+ RETURN;
+}
+
+void emitJump(LPBYTE pBuffer, LPVOID target)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(pBuffer));
+ }
+ CONTRACTL_END;
+
+ // mov rax, 123456789abcdef0h 48 b8 xx xx xx xx xx xx xx xx
+ // jmp rax ff e0
+
+ pBuffer[0] = 0x48;
+ pBuffer[1] = 0xB8;
+
+ *((UINT64 UNALIGNED *)&pBuffer[2]) = (UINT64)target;
+
+ pBuffer[10] = 0xFF;
+ pBuffer[11] = 0xE0;
+
+ _ASSERTE(DbgIsExecutable(pBuffer, 12));
+}
+
+void UMEntryThunkCode::Encode(BYTE* pTargetCode, void* pvSecretParam)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // padding // CC CC CC CC
+ // mov r10, pUMEntryThunk // 49 ba xx xx xx xx xx xx xx xx // METHODDESC_REGISTER
+ // mov rax, pJmpDest // 48 b8 xx xx xx xx xx xx xx xx // need to ensure this imm64 is qword aligned
+ // TAILJMP_RAX // 48 FF E0
+
+#ifdef _DEBUG
+ m_padding[0] = X86_INSTR_INT3;
+ m_padding[1] = X86_INSTR_INT3;
+ m_padding[2] = X86_INSTR_INT3;
+ m_padding[3] = X86_INSTR_INT3;
+#endif // _DEBUG
+ m_movR10[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT | REX_OPCODE_REG_EXT;
+ m_movR10[1] = 0xBA;
+ m_uet = pvSecretParam;
+ m_movRAX[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
+ m_movRAX[1] = 0xB8;
+ m_execstub = pTargetCode;
+ m_jmpRAX[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
+ m_jmpRAX[1] = 0xFF;
+ m_jmpRAX[2] = 0xE0;
+
+ _ASSERTE(DbgIsExecutable(&m_movR10[0], &m_jmpRAX[3]-&m_movR10[0]));
+}
+
+UMEntryThunk* UMEntryThunk::Decode(LPVOID pCallback)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ UMEntryThunkCode *pThunkCode = (UMEntryThunkCode*)((BYTE*)pCallback - UMEntryThunkCode::GetEntryPointOffset());
+
+ return (UMEntryThunk*)pThunkCode->m_uet;
+}
+
+INT32 rel32UsingJumpStub(INT32 UNALIGNED * pRel32, PCODE target, MethodDesc *pMethod, LoaderAllocator *pLoaderAllocator /* = NULL */)
+{
+ CONTRACTL
+ {
+ THROWS; // Creating a JumpStub could throw OutOfMemory
+ GC_NOTRIGGER;
+
+ PRECONDITION(pMethod != NULL || pLoaderAllocator != NULL);
+ // If a loader allocator isn't explicitly provided, we must be able to get one via the MethodDesc.
+ PRECONDITION(pLoaderAllocator != NULL || pMethod->GetLoaderAllocator() != NULL);
+ // If a domain is provided, the MethodDesc mustn't yet be set up to have one, or it must match the MethodDesc's domain,
+ // unless we're in a compilation domain (NGen loads assemblies as domain-bound but compiles them as domain neutral).
+ PRECONDITION(!pLoaderAllocator || !pMethod || pMethod->GetMethodDescChunk()->GetMethodTablePtr()->IsNull() ||
+ pLoaderAllocator == pMethod->GetMethodDescChunk()->GetFirstMethodDesc()->GetLoaderAllocatorForCode() || IsCompilationProcess());
+ }
+ CONTRACTL_END;
+
+ TADDR baseAddr = (TADDR)pRel32 + 4;
+
+ INT_PTR offset = target - baseAddr;
+
+ if (!FitsInI4(offset) INDEBUG(|| PEDecoder::GetForceRelocs()))
+ {
+ TADDR loAddr = baseAddr + INT32_MIN;
+ if (loAddr > baseAddr) loAddr = UINT64_MIN; // overflow
+
+ TADDR hiAddr = baseAddr + INT32_MAX;
+ if (hiAddr < baseAddr) hiAddr = UINT64_MAX; // overflow
+
+ PCODE jumpStubAddr = ExecutionManager::jumpStub(pMethod,
+ target,
+ (BYTE *)loAddr,
+ (BYTE *)hiAddr,
+ pLoaderAllocator);
+
+ offset = jumpStubAddr - baseAddr;
+
+ if (!FitsInI4(offset))
+ {
+ _ASSERTE(!"jump stub was not in expected range");
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
+ }
+ }
+
+ _ASSERTE(FitsInI4(offset));
+ return static_cast<INT32>(offset);
+}
+
+BOOL DoesSlotCallPrestub(PCODE pCode)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ PRECONDITION(pCode != GetPreStubEntryPoint());
+ } CONTRACTL_END;
+
+ // AMD64 has the following possible sequences for prestub logic:
+ // 1. slot -> temporary entrypoint -> prestub
+ // 2. slot -> precode -> prestub
+ // 3. slot -> precode -> jumprel64 (jump stub) -> prestub
+ // 4. slot -> precode -> jumprel64 (NGEN case) -> prestub
+
+#ifdef HAS_COMPACT_ENTRYPOINTS
+ if (MethodDescChunk::GetMethodDescFromCompactEntryPoint(pCode, TRUE) != NULL)
+ {
+ return TRUE;
+ }
+#endif
+
+ if (!IS_ALIGNED(pCode, PRECODE_ALIGNMENT))
+ {
+ return FALSE;
+ }
+
+#ifdef HAS_FIXUP_PRECODE
+ if (*PTR_BYTE(pCode) == X86_INSTR_CALL_REL32)
+ {
+ // Note that call could have been patched to jmp in the meantime
+ pCode = rel32Decode(pCode+1);
+
+#ifdef FEATURE_PREJIT
+ // NGEN helper
+ if (*PTR_BYTE(pCode) == X86_INSTR_JMP_REL32) {
+ pCode = (TADDR)rel32Decode(pCode+1);
+ }
+#endif
+
+ // JumpStub
+ if (isJumpRel64(pCode)) {
+ pCode = decodeJump64(pCode);
+ }
+
+ return pCode == (TADDR)PrecodeFixupThunk;
+ }
+#endif
+
+ if (*PTR_USHORT(pCode) != X86_INSTR_MOV_R10_IMM64 || // mov rax,XXXX
+ *PTR_BYTE(pCode+10) != X86_INSTR_NOP || // nop
+ *PTR_BYTE(pCode+11) != X86_INSTR_JMP_REL32) // jmp rel32
+ {
+ return FALSE;
+ }
+ pCode = rel32Decode(pCode+12);
+
+#ifdef FEATURE_PREJIT
+ // NGEN helper
+ if (*PTR_BYTE(pCode) == X86_INSTR_JMP_REL32) {
+ pCode = (TADDR)rel32Decode(pCode+1);
+ }
+#endif
+
+ // JumpStub
+ if (isJumpRel64(pCode)) {
+ pCode = decodeJump64(pCode);
+ }
+
+ return pCode == GetPreStubEntryPoint();
+}
+
+//
+// Some AMD64 assembly functions have one or more DWORDS at the end of the function
+// that specify the offsets where significant instructions are
+// we use this function to get at these offsets
+//
+DWORD GetOffsetAtEndOfFunction(ULONGLONG uImageBase,
+ PRUNTIME_FUNCTION pFunctionEntry,
+ int offsetNum /* = 1*/)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ PRECONDITION((offsetNum > 0) && (offsetNum < 20)); /* we only allow reasonable offsetNums 1..19 */
+ }
+ CONTRACTL_END;
+
+ DWORD functionSize = pFunctionEntry->EndAddress - pFunctionEntry->BeginAddress;
+ BYTE* pEndOfFunction = (BYTE*) (uImageBase + pFunctionEntry->EndAddress);
+ DWORD* pOffset = (DWORD*) (pEndOfFunction) - offsetNum;
+ DWORD offsetInFunc = *pOffset;
+
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/cGenAMD64.cpp", (offsetInFunc >= 0) && (offsetInFunc < functionSize));
+
+ return offsetInFunc;
+}
+
+//==========================================================================================
+// In NGen image, virtual slots inherited from cross-module dependencies point to jump thunks.
+// These jump thunk initially point to VirtualMethodFixupStub which transfers control here.
+// This method 'VirtualMethodFixupWorker' will patch the jump thunk to point to the actual
+// inherited method body after we have execute the precode and a stable entry point.
+//
+EXTERN_C PCODE VirtualMethodFixupWorker(TransitionBlock * pTransitionBlock, CORCOMPILE_VIRTUAL_IMPORT_THUNK * pThunk)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS; // GC not allowed until we call pEMFrame->SetFunction(pMD);
+
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ MAKE_CURRENT_THREAD_AVAILABLE();
+
+ PCODE pCode = NULL;
+ MethodDesc * pMD = NULL;
+
+#ifdef _DEBUG
+ Thread::ObjectRefFlush(CURRENT_THREAD);
+#endif
+
+ BEGIN_SO_INTOLERANT_CODE(CURRENT_THREAD);
+
+ _ASSERTE(IS_ALIGNED((size_t)pThunk, sizeof(INT64)));
+
+ FrameWithCookie<ExternalMethodFrame> frame(pTransitionBlock);
+ ExternalMethodFrame * pEMFrame = &frame;
+
+ OBJECTREF pThisPtr = pEMFrame->GetThis();
+ _ASSERTE(pThisPtr != NULL);
+ VALIDATEOBJECT(pThisPtr);
+
+ MethodTable * pMT = pThisPtr->GetTrueMethodTable();
+
+ WORD slotNumber = pThunk->slotNum;
+ _ASSERTE(slotNumber != (WORD)-1);
+
+ pCode = pMT->GetRestoredSlot(slotNumber);
+
+ if (!DoesSlotCallPrestub(pCode))
+ {
+ pMD = MethodTable::GetMethodDescForSlotAddress(pCode);
+
+ pEMFrame->SetFunction(pMD); // We will use the pMD to enumerate the GC refs in the arguments
+ pEMFrame->Push(CURRENT_THREAD);
+
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER_NO_PROBE;
+
+ // Skip fixup precode jump for better perf
+ PCODE pDirectTarget = Precode::TryToSkipFixupPrecode(pCode);
+ if (pDirectTarget != NULL)
+ pCode = pDirectTarget;
+
+ INT64 oldValue = *(INT64*)pThunk;
+ BYTE* pOldValue = (BYTE*)&oldValue;
+
+ if (pOldValue[0] == X86_INSTR_CALL_REL32)
+ {
+ INT64 newValue = oldValue;
+ BYTE* pNewValue = (BYTE*)&newValue;
+ pNewValue[0] = X86_INSTR_JMP_REL32;
+
+ *(INT32 *)(pNewValue+1) = rel32UsingJumpStub((INT32*)(&pThunk->callJmp[1]), pCode, pMD, NULL);
+
+ _ASSERTE(IS_ALIGNED(pThunk, sizeof(INT64)));
+ EnsureWritableExecutablePages(pThunk, sizeof(INT64));
+ FastInterlockCompareExchangeLong((INT64*)pThunk, newValue, oldValue);
+
+ FlushInstructionCache(GetCurrentProcess(), pThunk, 8);
+ }
+
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER_NO_PROBE;
+ pEMFrame->Pop(CURRENT_THREAD);
+ }
+
+ // Ready to return
+
+ END_SO_INTOLERANT_CODE;
+
+ return pCode;
+}
+
+#ifdef FEATURE_READYTORUN
+
+//
+// Allocation of dynamic helpers
+//
+
+#define DYNAMIC_HELPER_ALIGNMENT sizeof(TADDR)
+
+#define BEGIN_DYNAMIC_HELPER_EMIT(size) \
+ SIZE_T cb = size; \
+ SIZE_T cbAligned = ALIGN_UP(cb, DYNAMIC_HELPER_ALIGNMENT); \
+ BYTE * pStart = (BYTE *)(void *)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(cbAligned, DYNAMIC_HELPER_ALIGNMENT); \
+ BYTE * p = pStart;
+
+#define END_DYNAMIC_HELPER_EMIT() \
+ _ASSERTE(pStart + cb == p); \
+ while (p < pStart + cbAligned) *p++ = X86_INSTR_INT3; \
+ ClrFlushInstructionCache(pStart, cbAligned); \
+ return (PCODE)pStart
+
+PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
+{
+ STANDARD_VM_CONTRACT;
+
+ BEGIN_DYNAMIC_HELPER_EMIT(15);
+
+#ifdef UNIX_AMD64_ABI
+ *(UINT16 *)p = 0xBF48; // mov rdi, XXXXXX
+#else
+ *(UINT16 *)p = 0xB948; // mov rcx, XXXXXX
+#endif
+ p += 2;
+ *(TADDR *)p = arg;
+ p += 8;
+
+ *p++ = X86_INSTR_JMP_REL32; // jmp rel32
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target, NULL, pAllocator);
+ p += 4;
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+PCODE DynamicHelpers::CreateHelperWithArg(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT(15);
+
+#ifdef UNIX_AMD64_ABI
+ *(UINT16 *)p = 0xBE48; // mov rsi, XXXXXX
+#else
+ *(UINT16 *)p = 0xBA48; // mov rdx, XXXXXX
+#endif
+ p += 2;
+ *(TADDR *)p = arg;
+ p += 8;
+
+ *p++ = X86_INSTR_JMP_REL32; // jmp rel32
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target, NULL, pAllocator);
+ p += 4;
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, TADDR arg2, PCODE target)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT(25);
+
+#ifdef UNIX_AMD64_ABI
+ *(UINT16 *)p = 0xBF48; // mov rdi, XXXXXX
+#else
+ *(UINT16 *)p = 0xB948; // mov rcx, XXXXXX
+#endif
+ p += 2;
+ *(TADDR *)p = arg;
+ p += 8;
+
+#ifdef UNIX_AMD64_ABI
+ *(UINT16 *)p = 0xBE48; // mov rsi, XXXXXX
+#else
+ *(UINT16 *)p = 0xBA48; // mov rdx, XXXXXX
+#endif
+ p += 2;
+ *(TADDR *)p = arg2;
+ p += 8;
+
+ *p++ = X86_INSTR_JMP_REL32; // jmp rel32
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target, NULL, pAllocator);
+ p += 4;
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+PCODE DynamicHelpers::CreateHelperArgMove(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT(18);
+
+#ifdef UNIX_AMD64_ABI
+ *p++ = 0x48; // mov rsi, rdi
+ *(UINT16 *)p = 0xF78B;
+#else
+ *p++ = 0x48; // mov rdx, rcx
+ *(UINT16 *)p = 0xD18B;
+#endif
+ p += 2;
+
+#ifdef UNIX_AMD64_ABI
+ *(UINT16 *)p = 0xBF48; // mov rdi, XXXXXX
+#else
+ *(UINT16 *)p = 0xB948; // mov rcx, XXXXXX
+#endif
+ p += 2;
+ *(TADDR *)p = arg;
+ p += 8;
+
+ *p++ = X86_INSTR_JMP_REL32; // jmp rel32
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target, NULL, pAllocator);
+ p += 4;
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+PCODE DynamicHelpers::CreateReturn(LoaderAllocator * pAllocator)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT(1);
+
+ *p++ = 0xC3; // ret
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+PCODE DynamicHelpers::CreateReturnConst(LoaderAllocator * pAllocator, TADDR arg)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT(11);
+
+ *(UINT16 *)p = 0xB848; // mov rax, XXXXXX
+ p += 2;
+ *(TADDR *)p = arg;
+ p += 8;
+
+ *p++ = 0xC3; // ret
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+PCODE DynamicHelpers::CreateReturnIndirConst(LoaderAllocator * pAllocator, TADDR arg, INT8 offset)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT((offset != 0) ? 15 : 11);
+
+ *(UINT16 *)p = 0xA148; // mov rax, [XXXXXX]
+ p += 2;
+ *(TADDR *)p = arg;
+ p += 8;
+
+ if (offset != 0)
+ {
+ // add rax, <offset>
+ *p++ = 0x48;
+ *p++ = 0x83;
+ *p++ = 0xC0;
+ *p++ = offset;
+ }
+
+ *p++ = 0xC3; // ret
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT(15);
+
+#ifdef UNIX_AMD64_ABI
+ *(UINT16 *)p = 0xBA48; // mov rdx, XXXXXX
+#else
+ *(UINT16 *)p = 0xB849; // mov r8, XXXXXX
+#endif
+ p += 2;
+ *(TADDR *)p = arg;
+ p += 8;
+
+ *p++ = X86_INSTR_JMP_REL32; // jmp rel32
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target, NULL, pAllocator);
+ p += 4;
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADDR arg, TADDR arg2, PCODE target)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT(25);
+
+#ifdef UNIX_AMD64_ABI
+ *(UINT16 *)p = 0xBA48; // mov rdx, XXXXXX
+#else
+ *(UINT16 *)p = 0xB849; // mov r8, XXXXXX
+#endif
+ p += 2;
+ *(TADDR *)p = arg;
+ p += 8;
+
+#ifdef UNIX_AMD64_ABI
+ *(UINT16 *)p = 0xB948; // mov rcx, XXXXXX
+#else
+ *(UINT16 *)p = 0xB949; // mov r9, XXXXXX
+#endif
+ p += 2;
+ *(TADDR *)p = arg2;
+ p += 8;
+
+ *p++ = X86_INSTR_JMP_REL32; // jmp rel32
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target, NULL, pAllocator);
+ p += 4;
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+#endif // FEATURE_READYTORUN
+
+#endif // DACCESS_COMPILE
diff --git a/src/vm/amd64/cgencpu.h b/src/vm/amd64/cgencpu.h
new file mode 100644
index 0000000000..ccc7d519e9
--- /dev/null
+++ b/src/vm/amd64/cgencpu.h
@@ -0,0 +1,519 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// CGENCPU.H -
+//
+// Various helper routines for generating AMD64 assembly code.
+//
+// DO NOT INCLUDE THIS FILE DIRECTLY - ALWAYS USE CGENSYS.H INSTEAD
+//
+
+
+
+#ifndef _TARGET_AMD64_
+#error Should only include "AMD64\cgencpu.h" for AMD64 builds
+#endif
+
+#ifndef __cgencpu_h__
+#define __cgencpu_h__
+
+#include "xmmintrin.h"
+
+// Given a return address retrieved during stackwalk,
+// this is the offset by which it should be decremented to lend somewhere in a call instruction.
+#define STACKWALK_CONTROLPC_ADJUST_OFFSET 1
+
+// preferred alignment for data
+#define DATA_ALIGNMENT 8
+
+class MethodDesc;
+class FramedMethodFrame;
+class Module;
+struct VASigCookie;
+class ComCallMethodDesc;
+
+//
+// functions implemented in AMD64 assembly
+//
+EXTERN_C void InstantiatingMethodStubWorker(void);
+EXTERN_C void SinglecastDelegateInvokeStub();
+EXTERN_C void FastCallFinalizeWorker(Object *obj, PCODE funcPtr);
+
+#define COMMETHOD_PREPAD 16 // # extra bytes to allocate in addition to sizeof(ComCallMethodDesc)
+#define COMMETHOD_CALL_PRESTUB_SIZE 6 // 32-bit indirect relative call
+#define COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET -10 // the offset of the call target address inside the prestub
+
+#define STACK_ALIGN_SIZE 16
+
+#define JUMP_ALLOCATE_SIZE 12 // # bytes to allocate for a 64-bit jump instruction
+#define BACK_TO_BACK_JUMP_ALLOCATE_SIZE 12 // # bytes to allocate for a back to back 64-bit jump instruction
+#define SIZEOF_LOAD_AND_JUMP_THUNK 22 // # bytes to mov r10, X; jmp Z
+#define SIZEOF_LOAD2_AND_JUMP_THUNK 32 // # bytes to mov r10, X; mov r11, Y; jmp Z
+
+// Also in Zapper.h, CorCompile.h, FnTableAccess.h
+#define USE_INDIRECT_CODEHEADER // use CodeHeader, RealCodeHeader construct
+
+#define HAS_NDIRECT_IMPORT_PRECODE 1
+//#define HAS_REMOTING_PRECODE 1 // TODO: Implement
+#define HAS_FIXUP_PRECODE 1
+#define HAS_FIXUP_PRECODE_CHUNKS 1
+
+// ThisPtrRetBufPrecode one is necessary for closed delegates over static methods with return buffer
+#define HAS_THISPTR_RETBUF_PRECODE 1
+
+#define CODE_SIZE_ALIGN 16 // must alloc code blocks on 8-byte boundaries; for perf reasons we use 16 byte boundaries
+#define CACHE_LINE_SIZE 64 // Current AMD64 processors have 64-byte cache lines as per AMD64 optmization manual
+#define LOG2SLOT LOG2_PTRSIZE
+
+#define ENREGISTERED_RETURNTYPE_MAXSIZE 8 // bytes
+#define ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE 8 // bytes
+#define ENREGISTERED_PARAMTYPE_MAXSIZE 8 // bytes
+
+#ifdef UNIX_AMD64_ABI
+#define CALLDESCR_ARGREGS 1 // CallDescrWorker has ArgumentRegister parameter
+#define CALLDESCR_FPARGREGS 1 // CallDescrWorker has FloatArgumentRegisters parameter
+#else
+#define COM_STUBS_SEPARATE_FP_LOCATIONS
+#define CALLDESCR_REGTYPEMAP 1
+#endif
+
+#define INSTRFMT_K64SMALL
+#define INSTRFMT_K64
+
+#define USE_REDIRECT_FOR_GCSTRESS
+
+//
+// REX prefix byte
+//
+#define REX_PREFIX_BASE 0x40 // 0100xxxx
+#define REX_OPERAND_SIZE_64BIT 0x08 // xxxx1xxx
+#define REX_MODRM_REG_EXT 0x04 // xxxxx1xx // use for 'middle' 3 bit field of mod/r/m
+#define REX_SIB_INDEX_EXT 0x02 // xxxxxx10
+#define REX_MODRM_RM_EXT 0x01 // XXXXXXX1 // use for low 3 bit field of mod/r/m
+#define REX_SIB_BASE_EXT 0x01 // XXXXXXX1
+#define REX_OPCODE_REG_EXT 0x01 // XXXXXXX1
+
+#define X86_REGISTER_MASK 0x7
+
+#define X86RegFromAMD64Reg(extended_reg) \
+ ((X86Reg)(((int)extended_reg) & X86_REGISTER_MASK))
+
+// Max size of optimized TLS helpers
+#ifdef _DEBUG
+// Debug build needs extra space for last error trashing
+#define TLS_GETTER_MAX_SIZE 0x30
+#else
+#define TLS_GETTER_MAX_SIZE 0x18
+#endif
+
+//=======================================================================
+// IMPORTANT: This value is used to figure out how much to allocate
+// for a fixed array of FieldMarshaler's. That means it must be at least
+// as large as the largest FieldMarshaler subclass. This requirement
+// is guarded by an assert.
+//=======================================================================
+#define MAXFIELDMARSHALERSIZE 40
+
+
+// Why is the return value ARG_SLOT? On 64-bit systems, that is 64-bits
+// and much bigger than necessary for R4, requiring explicit downcasts.
+inline
+ARG_SLOT FPSpillToR4(void* pSpillSlot)
+{
+ LIMITED_METHOD_CONTRACT;
+ return *(DWORD*)pSpillSlot;
+}
+
+inline
+ARG_SLOT FPSpillToR8(void* pSpillSlot)
+{
+ LIMITED_METHOD_CONTRACT;
+ return *(SIZE_T*)pSpillSlot;
+}
+
+inline
+void R4ToFPSpill(void* pSpillSlot, DWORD srcFloatAsDWORD)
+{
+ LIMITED_METHOD_CONTRACT;
+ *(SIZE_T*)pSpillSlot = (SIZE_T)srcFloatAsDWORD;
+ *((SIZE_T*)pSpillSlot + 1) = 0;
+}
+
+inline
+void R8ToFPSpill(void* pSpillSlot, SIZE_T srcDoubleAsSIZE_T)
+{
+ LIMITED_METHOD_CONTRACT;
+ *(SIZE_T*)pSpillSlot = srcDoubleAsSIZE_T;
+ *((SIZE_T*)pSpillSlot + 1) = 0;
+}
+
+
+#ifdef CROSSGEN_COMPILE
+#define GetEEFuncEntryPoint(pfn) 0x1001
+#else
+#define GetEEFuncEntryPoint(pfn) GFN_TADDR(pfn)
+#endif
+
+
+//**********************************************************************
+// Parameter size
+//**********************************************************************
+
+typedef INT64 StackElemType;
+#define STACK_ELEM_SIZE sizeof(StackElemType)
+
+// !! This expression assumes STACK_ELEM_SIZE is a power of 2.
+#define StackElemSize(parmSize) (((parmSize) + STACK_ELEM_SIZE - 1) & ~((ULONG)(STACK_ELEM_SIZE - 1)))
+
+//**********************************************************************
+// Frames
+//**********************************************************************
+//--------------------------------------------------------------------
+// This represents some of the TransitionFrame fields that are
+// stored at negative offsets.
+//--------------------------------------------------------------------
+typedef DPTR(struct CalleeSavedRegisters) PTR_CalleeSavedRegisters;
+struct CalleeSavedRegisters {
+#ifndef UNIX_AMD64_ABI
+ INT_PTR rdi;
+ INT_PTR rsi;
+#endif
+ INT_PTR rbx;
+ INT_PTR rbp;
+ INT_PTR r12;
+ INT_PTR r13;
+ INT_PTR r14;
+ INT_PTR r15;
+};
+
+struct REGDISPLAY;
+
+void UpdateRegDisplayFromCalleeSavedRegisters(REGDISPLAY * pRD, CalleeSavedRegisters * pRegs);
+
+//--------------------------------------------------------------------
+// This represents the arguments that are stored in volatile registers.
+// This should not overlap the CalleeSavedRegisters since those are already
+// saved separately and it would be wasteful to save the same register twice.
+// If we do use a non-volatile register as an argument, then the ArgIterator
+// will probably have to communicate this back to the PromoteCallerStack
+// routine to avoid a double promotion.
+//--------------------------------------------------------------------
+#ifdef UNIX_AMD64_ABI
+
+#define ENUM_ARGUMENT_REGISTERS() \
+ ARGUMENT_REGISTER(RDI) \
+ ARGUMENT_REGISTER(RSI) \
+ ARGUMENT_REGISTER(RDX) \
+ ARGUMENT_REGISTER(RCX) \
+ ARGUMENT_REGISTER(R8) \
+ ARGUMENT_REGISTER(R9)
+
+#define NUM_ARGUMENT_REGISTERS 6
+
+#else // UNIX_AMD64_ABI
+
+#define ENUM_ARGUMENT_REGISTERS() \
+ ARGUMENT_REGISTER(RCX) \
+ ARGUMENT_REGISTER(RDX) \
+ ARGUMENT_REGISTER(R8) \
+ ARGUMENT_REGISTER(R9)
+
+#define NUM_ARGUMENT_REGISTERS 4
+
+#endif // UNIX_AMD64_ABI
+
+typedef DPTR(struct ArgumentRegisters) PTR_ArgumentRegisters;
+struct ArgumentRegisters {
+ #define ARGUMENT_REGISTER(regname) INT_PTR regname;
+ ENUM_ARGUMENT_REGISTERS();
+ #undef ARGUMENT_REGISTER
+};
+
+#define SCRATCH_REGISTER_X86REG kRAX
+
+#ifdef UNIX_AMD64_ABI
+#define THIS_REG RDI
+#define THIS_kREG kRDI
+#else
+#define THIS_REG RCX
+#define THIS_kREG kRCX
+#endif
+
+#ifdef UNIX_AMD64_ABI
+
+typedef DPTR(struct FloatArgumentRegisters) PTR_FloatArgumentRegisters;
+struct FloatArgumentRegisters {
+ M128A d[8]; // xmm0-xmm7
+};
+
+#endif
+
+
+// Sufficient context for Try/Catch restoration.
+struct EHContext {
+ // Not used
+};
+
+#define ARGUMENTREGISTERS_SIZE sizeof(ArgumentRegisters)
+
+
+#include "stublinkeramd64.h"
+
+
+
+//**********************************************************************
+// Exception handling
+//**********************************************************************
+
+inline PCODE GetIP(const CONTEXT * context)
+{
+ CONTRACTL
+ {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+
+ PRECONDITION(CheckPointer(context));
+ }
+ CONTRACTL_END;
+
+ return PCODE(context->Rip);
+}
+
+inline void SetIP(CONTEXT* context, PCODE rip)
+{
+ CONTRACTL
+ {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+
+ PRECONDITION(CheckPointer(context));
+ }
+ CONTRACTL_END;
+
+ context->Rip = (DWORD64) rip;
+}
+
+inline TADDR GetSP(const CONTEXT * context)
+{
+ CONTRACTL
+ {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+
+ PRECONDITION(CheckPointer(context));
+ }
+ CONTRACTL_END;
+
+ return (TADDR)context->Rsp;
+}
+inline void SetSP(CONTEXT *context, TADDR rsp)
+{
+ CONTRACTL
+ {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+
+ PRECONDITION(CheckPointer(context));
+ }
+ CONTRACTL_END;
+
+ context->Rsp = rsp;
+}
+
+#define SetFP(context, ebp)
+inline TADDR GetFP(const CONTEXT * context)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (TADDR)(context->Rbp);
+}
+
+extern "C" TADDR GetCurrentSP();
+
+// Emits:
+// mov r10, pv1
+// mov rax, pTarget
+// jmp rax
+void EncodeLoadAndJumpThunk (LPBYTE pBuffer, LPVOID pv, LPVOID pTarget);
+
+
+// Get Rel32 destination, emit jumpStub if necessary
+INT32 rel32UsingJumpStub(INT32 UNALIGNED * pRel32, PCODE target, MethodDesc *pMethod, LoaderAllocator *pLoaderAllocator = NULL);
+
+void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target);
+
+void emitJump(LPBYTE pBuffer, LPVOID target);
+
+BOOL isJumpRel32(PCODE pCode);
+PCODE decodeJump32(PCODE pCode);
+
+BOOL isJumpRel64(PCODE pCode);
+PCODE decodeJump64(PCODE pCode);
+
+//
+// On IA64 back to back jumps should be separated by a nop bundle to get
+// the best performance from the hardware's branch prediction logic.
+// For all other platforms back to back jumps don't require anything special
+// That is why we have these two wrapper functions that call emitJump and decodeJump
+//
+inline void emitBackToBackJump(LPBYTE pBuffer, LPVOID target)
+{
+ WRAPPER_NO_CONTRACT;
+
+ emitJump(pBuffer, target);
+}
+
+inline BOOL isBackToBackJump(PCODE pCode)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return isJumpRel32(pCode) || isJumpRel64(pCode);
+}
+
+inline PCODE decodeBackToBackJump(PCODE pCode)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ if (isJumpRel32(pCode))
+ return decodeJump32(pCode);
+ else
+ if (isJumpRel64(pCode))
+ return decodeJump64(pCode);
+ else
+ return NULL;
+}
+
+extern "C" void setFPReturn(int fpSize, INT64 retVal);
+extern "C" void getFPReturn(int fpSize, INT64 *retval);
+
+
+struct ComToManagedExRecord; // defined in cgencpu.cpp
+
+inline BOOL IsUnmanagedValueTypeReturnedByRef(UINT sizeofvaluetype)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (sizeofvaluetype > ENREGISTERED_RETURNTYPE_MAXSIZE)
+ {
+ return TRUE;
+ }
+ else
+ {
+ return FALSE;
+ }
+}
+
+#include <pshpack1.h>
+DECLSPEC_ALIGN(8) struct UMEntryThunkCode
+{
+ // padding // CC CC CC CC
+ // mov r10, pUMEntryThunk // 49 ba xx xx xx xx xx xx xx xx // METHODDESC_REGISTER
+ // mov rax, pJmpDest // 48 b8 xx xx xx xx xx xx xx xx // need to ensure this imm64 is qword aligned
+ // TAILJMP_RAX // 48 FF E0
+
+ BYTE m_padding[4];
+ BYTE m_movR10[2]; // MOV R10,
+ LPVOID m_uet; // pointer to start of this structure
+ BYTE m_movRAX[2]; // MOV RAX,
+ DECLSPEC_ALIGN(8)
+ const BYTE* m_execstub; // pointer to destination code // ensure this is qword aligned
+ BYTE m_jmpRAX[3]; // JMP RAX
+ BYTE m_padding2[5];
+
+ void Encode(BYTE* pTargetCode, void* pvSecretParam);
+
+ LPCBYTE GetEntryPoint() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (LPCBYTE)&m_movR10;
+ }
+
+ static int GetEntryPointOffset()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return offsetof(UMEntryThunkCode, m_movR10);
+ }
+};
+#include <poppack.h>
+
+struct HijackArgs
+{
+ union
+ {
+ ULONG64 Rax;
+ ULONG64 ReturnValue;
+ };
+ CalleeSavedRegisters Regs;
+ union
+ {
+ ULONG64 Rip;
+ size_t ReturnAddress;
+ };
+};
+
+#ifndef DACCESS_COMPILE
+
+DWORD GetOffsetAtEndOfFunction(ULONGLONG uImageBase,
+ PRUNTIME_FUNCTION pFunctionEntry,
+ int offsetNum = 1);
+
+#endif // DACCESS_COMPILE
+
+// ClrFlushInstructionCache is used when we want to call FlushInstructionCache
+// for a specific architecture in the common code, but not for other architectures.
+// We call ClrFlushInstructionCache whenever we create or modify code in the heap.
+// Currently ClrFlushInstructionCache has no effect on AMD64
+//
+
+inline BOOL ClrFlushInstructionCache(LPCVOID pCodeAddr, size_t sizeOfCode)
+{
+ // FlushInstructionCache(GetCurrentProcess(), pCodeAddr, sizeOfCode);
+ MemoryBarrier();
+ return TRUE;
+}
+
+#ifndef FEATURE_IMPLICIT_TLS
+//
+// JIT HELPER ALIASING FOR PORTABILITY.
+//
+// Create alias for optimized implementations of helpers provided on this platform
+//
+#define JIT_MonEnter JIT_MonEnter
+#define JIT_MonEnterWorker JIT_MonEnterWorker_InlineGetThread
+#define JIT_MonReliableEnter JIT_MonEnterWorker
+#define JIT_MonTryEnter JIT_MonTryEnter_InlineGetThread
+#define JIT_MonExit JIT_MonExit
+#define JIT_MonExitWorker JIT_MonExitWorker_InlineGetThread
+#define JIT_MonEnterStatic JIT_MonEnterStatic_InlineGetThread
+#define JIT_MonExitStatic JIT_MonExitStatic_InlineGetThread
+
+#define JIT_GetSharedGCStaticBase JIT_GetSharedGCStaticBase_InlineGetAppDomain
+#define JIT_GetSharedNonGCStaticBase JIT_GetSharedNonGCStaticBase_InlineGetAppDomain
+#define JIT_GetSharedGCStaticBaseNoCtor JIT_GetSharedGCStaticBaseNoCtor_InlineGetAppDomain
+#define JIT_GetSharedNonGCStaticBaseNoCtor JIT_GetSharedNonGCStaticBaseNoCtor_InlineGetAppDomain
+
+#endif // FEATURE_IMPLICIT_TLS
+
+#ifndef FEATURE_PAL
+
+#define JIT_ChkCastClass JIT_ChkCastClass
+#define JIT_ChkCastClassSpecial JIT_ChkCastClassSpecial
+#define JIT_IsInstanceOfClass JIT_IsInstanceOfClass
+#define JIT_ChkCastInterface JIT_ChkCastInterface
+#define JIT_IsInstanceOfInterface JIT_IsInstanceOfInterface
+#define JIT_Stelem_Ref JIT_Stelem_Ref
+
+#endif // FEATURE_PAL
+
+#endif // __cgencpu_h__
diff --git a/src/vm/amd64/excepamd64.cpp b/src/vm/amd64/excepamd64.cpp
new file mode 100644
index 0000000000..b69645dbbf
--- /dev/null
+++ b/src/vm/amd64/excepamd64.cpp
@@ -0,0 +1,599 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+/* EXCEP.CPP: Copyright (C) 1998 Microsoft Corporation
+ *
+ */
+//
+
+//
+
+#include "common.h"
+
+#include "frames.h"
+#include "threads.h"
+#include "excep.h"
+#include "object.h"
+#include "field.h"
+#include "dbginterface.h"
+#include "cgensys.h"
+#include "comutilnative.h"
+#include "sigformat.h"
+#include "siginfo.hpp"
+#include "gc.h"
+#include "eedbginterfaceimpl.h" //so we can clearexception in COMPlusThrow
+#include "perfcounters.h"
+#include "asmconstants.h"
+
+#include "exceptionhandling.h"
+
+
+
+#if !defined(DACCESS_COMPILE)
+
+VOID ResetCurrentContext()
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+bool IsInstrModifyFault(PEXCEPTION_POINTERS pExceptionInfo)
+{
+ return false;
+}
+
+LONG CLRNoCatchHandler(EXCEPTION_POINTERS* pExceptionInfo, PVOID pv)
+{
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+
+#endif // !DACCESS_COMPILE
+
+inline PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrameWorker(UINT_PTR establisherFrame)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ SIZE_T rbp = establisherFrame + REDIRECTSTUB_ESTABLISHER_OFFSET_RBP;
+ PTR_PTR_CONTEXT ppContext = dac_cast<PTR_PTR_CONTEXT>((TADDR)rbp + REDIRECTSTUB_RBP_OFFSET_CONTEXT);
+ return *ppContext;
+}
+
+PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(DISPATCHER_CONTEXT * pDispatcherContext)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return GetCONTEXTFromRedirectedStubStackFrameWorker(pDispatcherContext->EstablisherFrame);
+}
+
+PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(CONTEXT * pContext)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return GetCONTEXTFromRedirectedStubStackFrameWorker(pContext->Rbp);
+}
+
+#if !defined(DACCESS_COMPILE)
+
+FaultingExceptionFrame *GetFrameFromRedirectedStubStackFrame (DISPATCHER_CONTEXT *pDispatcherContext)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (FaultingExceptionFrame*)(pDispatcherContext->EstablisherFrame + THROWSTUB_ESTABLISHER_OFFSET_FaultingExceptionFrame);
+}
+
+#endif // !DACCESS_COMPILE
+
+#if !defined(DACCESS_COMPILE)
+
+#define AMD64_SIZE64_PREFIX 0x48
+#define AMD64_ADD_IMM8_OP 0x83
+#define AMD64_ADD_IMM32_OP 0x81
+#define AMD64_JMP_IMM8_OP 0xeb
+#define AMD64_JMP_IMM32_OP 0xe9
+#define AMD64_JMP_IND_OP 0xff
+#define AMD64_JMP_IND_RAX 0x20
+#define AMD64_LEA_OP 0x8d
+#define AMD64_POP_OP 0x58
+#define AMD64_RET_OP 0xc3
+#define AMD64_RET_OP_2 0xc2
+#define AMD64_REP_PREFIX 0xf3
+#define AMD64_NOP 0x90
+#define AMD64_INT3 0xCC
+
+#define AMD64_IS_REX_PREFIX(x) (((x) & 0xf0) == 0x40)
+
+#define FAKE_PROLOG_SIZE 1
+#define FAKE_FUNCTION_CODE_SIZE 1
+
+#ifdef DEBUGGING_SUPPORTED
+//
+// If there is an Int3 opcode at the Address then this tries to get the
+// correct Opcode for the address from the managed patch table. If this is
+// called on an address which doesn't currently have an Int3 then the current
+// opcode is returned. If there is no managed patch in the patch table
+// corresponding to this address then the current opcode (0xCC) at Address is
+// is returned. If a 0xCC is returned from this function it indicates an
+// unmanaged patch at the address.
+//
+// If there is a managed patch at the address HasManagedBreakpoint is set to true.
+//
+// If there is a 0xCC at the address before the call to GetPatchedOpcode and
+// still a 0xCC when we return then this is considered an unmanaged patch and
+// HasManagedBreakpoint is set to true.
+//
+UCHAR GetOpcodeFromManagedBPForAddress(ULONG64 Address, BOOL* HasManagedBreakpoint, BOOL* HasUnmanagedBreakpoint)
+{
+ // If we don't see a breakpoint then quickly return.
+ if (((UCHAR)*(BYTE*)Address) != AMD64_INT3)
+ {
+ return ((UCHAR)*(BYTE*)Address);
+ }
+
+ UCHAR PatchedOpcode;
+ PatchedOpcode = (UCHAR)g_pDebugInterface->GetPatchedOpcode((CORDB_ADDRESS_TYPE*)(BYTE*)Address);
+
+ // If a non Int3 opcode is returned from GetPatchedOpcode then
+ // this function has a managed breakpoint
+ if (PatchedOpcode != AMD64_INT3)
+ {
+ (*HasManagedBreakpoint) = TRUE;
+ }
+ else
+ {
+ (*HasUnmanagedBreakpoint) = TRUE;
+ }
+
+ return PatchedOpcode;
+}
+#endif // DEBUGGING_SUPPORTED
+
+PEXCEPTION_ROUTINE
+RtlVirtualUnwind (
+ IN ULONG HandlerType,
+ IN ULONG64 ImageBase,
+ IN ULONG64 ControlPc,
+ IN PRUNTIME_FUNCTION FunctionEntry,
+ IN OUT PCONTEXT ContextRecord,
+ OUT PVOID *HandlerData,
+ OUT PULONG64 EstablisherFrame,
+ IN OUT PKNONVOLATILE_CONTEXT_POINTERS ContextPointers OPTIONAL
+ )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // The indirection should be taken care of by the caller
+ _ASSERTE((FunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) == 0);
+
+#ifndef FEATURE_PAL
+
+#ifdef DEBUGGING_SUPPORTED
+ if (CORDebuggerAttached())
+ {
+ return RtlVirtualUnwind_Worker(HandlerType, ImageBase, ControlPc, FunctionEntry, ContextRecord, HandlerData, EstablisherFrame, ContextPointers);
+ }
+ else
+#endif // DEBUGGING_SUPPORTED
+ {
+ return RtlVirtualUnwind_Unsafe(HandlerType, ImageBase, ControlPc, FunctionEntry, ContextRecord, HandlerData, EstablisherFrame, ContextPointers);
+ }
+
+#else // !FEATURE_PAL
+ PORTABILITY_ASSERT("UNIXTODO: Implement unwinding for PAL");
+ return NULL;
+#endif // !FEATURE_PAL
+}
+
+#ifndef FEATURE_PAL
+#ifdef DEBUGGING_SUPPORTED
+PEXCEPTION_ROUTINE
+RtlVirtualUnwind_Worker (
+ IN ULONG HandlerType,
+ IN ULONG64 ImageBase,
+ IN ULONG64 ControlPc,
+ IN PRUNTIME_FUNCTION FunctionEntry,
+ IN OUT PCONTEXT ContextRecord,
+ OUT PVOID *HandlerData,
+ OUT PULONG64 EstablisherFrame,
+ IN OUT PKNONVOLATILE_CONTEXT_POINTERS ContextPointers OPTIONAL
+ )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // b/c we're only called by the safe RtlVirtualUnwind we are guaranteed
+ // that the debugger is attched when we get here.
+ _ASSERTE(CORDebuggerAttached());
+
+ LOG((LF_CORDB, LL_EVERYTHING, "RVU_CBSW: in RtlVitualUnwind_ClrDbgSafeWorker, ControlPc=0x%p\n", ControlPc));
+
+ BOOL InEpilogue = FALSE;
+ BOOL HasManagedBreakpoint = FALSE;
+ BOOL HasUnmanagedBreakpoint = FALSE;
+ UCHAR TempOpcode = NULL;
+ PUCHAR NextByte;
+ ULONG CurrentOffset;
+ ULONG FrameRegister;
+ ULONG64 BranchTarget;
+ PUNWIND_INFO UnwindInfo;
+
+ // 64bit Whidbey does NOT support interop debugging, so if this
+ // is not managed code, normal unwind
+ if (!ExecutionManager::IsManagedCode((PCODE) ControlPc))
+ {
+ goto NORMAL_UNWIND;
+ }
+
+ UnwindInfo = (PUNWIND_INFO)(FunctionEntry->UnwindData + ImageBase);
+ CurrentOffset = (ULONG)(ControlPc - (FunctionEntry->BeginAddress + ImageBase));
+
+ // control stopped in prologue, normal unwind
+ if (CurrentOffset < UnwindInfo->SizeOfProlog)
+ {
+ goto NORMAL_UNWIND;
+ }
+
+ // ASSUMPTION: only the first byte of an opcode will be patched by the CLR debugging code
+
+ // determine if we're in an epilog and if there is at least one managed breakpoint
+ NextByte = (PUCHAR)ControlPc;
+
+ TempOpcode = GetOpcodeFromManagedBPForAddress((ULONG64)NextByte, &HasManagedBreakpoint, &HasUnmanagedBreakpoint);
+
+ // TempOpcode == NextByte[0] unless NextByte[0] is a breakpoint
+ _ASSERTE(TempOpcode == NextByte[0] || NextByte[0] == AMD64_INT3);
+
+ // Check for an indication of the start of a epilogue:
+ // add rsp, imm8
+ // add rsp, imm32
+ // lea rsp, -disp8[fp]
+ // lea rsp, -disp32[fp]
+ if ((TempOpcode == AMD64_SIZE64_PREFIX)
+ && (NextByte[1] == AMD64_ADD_IMM8_OP)
+ && (NextByte[2] == 0xc4))
+ {
+ // add rsp, imm8.
+ NextByte += 4;
+ }
+ else if ((TempOpcode == AMD64_SIZE64_PREFIX)
+ && (NextByte[1] == AMD64_ADD_IMM32_OP)
+ && (NextByte[2] == 0xc4))
+ {
+ // add rsp, imm32.
+ NextByte += 7;
+ }
+ else if (((TempOpcode & 0xf8) == AMD64_SIZE64_PREFIX)
+ && (NextByte[1] == AMD64_LEA_OP))
+ {
+ FrameRegister = ((TempOpcode & 0x7) << 3) | (NextByte[2] & 0x7);
+
+ if ((FrameRegister != 0)
+ && (FrameRegister == UnwindInfo->FrameRegister))
+ {
+ if ((NextByte[2] & 0xf8) == 0x60)
+ {
+ // lea rsp, disp8[fp].
+ NextByte += 4;
+ }
+ else if ((NextByte[2] &0xf8) == 0xa0)
+ {
+ // lea rsp, disp32[fp].
+ NextByte += 7;
+ }
+ }
+ }
+
+ // if we haven't eaten any of the code stream detecting a stack adjustment
+ // then TempOpcode is still valid
+ if (((ULONG64)NextByte) != ControlPc)
+ {
+ TempOpcode = GetOpcodeFromManagedBPForAddress((ULONG64)NextByte, &HasManagedBreakpoint, &HasUnmanagedBreakpoint);
+ }
+
+ // TempOpcode == NextByte[0] unless NextByte[0] is a breakpoint
+ _ASSERTE(TempOpcode == NextByte[0] || NextByte[0] == AMD64_INT3);
+
+ // Check for any number of:
+ // pop nonvolatile-integer-register[0..15].
+ while (TRUE)
+ {
+ if ((TempOpcode & 0xf8) == AMD64_POP_OP)
+ {
+ NextByte += 1;
+ }
+ else if (AMD64_IS_REX_PREFIX(TempOpcode)
+ && ((NextByte[1] & 0xf8) == AMD64_POP_OP))
+ {
+ NextByte += 2;
+ }
+ else
+ {
+ // when we break out here TempOpcode will hold the next Opcode so there
+ // is no need to call GetOpcodeFromManagedBPForAddress again
+ break;
+ }
+ TempOpcode = GetOpcodeFromManagedBPForAddress((ULONG64)NextByte, &HasManagedBreakpoint, &HasUnmanagedBreakpoint);
+
+ // TempOpcode == NextByte[0] unless NextByte[0] is a breakpoint
+ _ASSERTE(TempOpcode == NextByte[0] || NextByte[0] == AMD64_INT3);
+ }
+
+ // TempOpcode == NextByte[0] unless NextByte[0] is a breakpoint
+ _ASSERTE(TempOpcode == NextByte[0] || NextByte[0] == AMD64_INT3);
+
+ // If the next instruction is a return, then control is currently in
+ // an epilogue and execution of the epilogue should be emulated.
+ // Otherwise, execution is not in an epilogue and the prologue should
+ // be unwound.
+ if (TempOpcode == AMD64_RET_OP || TempOpcode == AMD64_RET_OP_2)
+ {
+ // A return is an unambiguous indication of an epilogue
+ InEpilogue = TRUE;
+ NextByte += 1;
+ }
+ else if (TempOpcode == AMD64_REP_PREFIX && NextByte[1] == AMD64_RET_OP)
+ {
+ // A return is an unambiguous indication of an epilogue
+ InEpilogue = TRUE;
+ NextByte += 2;
+ }
+ else if (TempOpcode == AMD64_JMP_IMM8_OP || TempOpcode == AMD64_JMP_IMM32_OP)
+ {
+ // An unconditional branch to a target that is equal to the start of
+ // or outside of this routine is logically a call to another function.
+ BranchTarget = (ULONG64)NextByte - ImageBase;
+
+ if (TempOpcode == AMD64_JMP_IMM8_OP)
+ {
+ BranchTarget += 2 + (CHAR)NextByte[1];
+ NextByte += 2;
+ }
+ else
+ {
+ BranchTarget += 5 + *((LONG UNALIGNED *)&NextByte[1]);
+ NextByte += 5;
+ }
+
+ // Now determine whether the branch target refers to code within this
+ // function. If not, then it is an epilogue indicator.
+ //
+ // A branch to the start of self implies a recursive call, so
+ // is treated as an epilogue.
+ if (BranchTarget <= FunctionEntry->BeginAddress ||
+ BranchTarget >= FunctionEntry->EndAddress)
+ {
+ _ASSERTE((UnwindInfo->Flags & UNW_FLAG_CHAININFO) == 0);
+ InEpilogue = TRUE;
+ }
+ }
+ else if ((TempOpcode == AMD64_JMP_IND_OP) && (NextByte[1] == 0x25))
+ {
+ // An unconditional jump indirect.
+
+ // This is a jmp outside of the function, probably a tail call
+ // to an import function.
+ InEpilogue = TRUE;
+ NextByte += 2;
+ }
+ else if (((TempOpcode & 0xf8) == AMD64_SIZE64_PREFIX)
+ && (NextByte[1] == AMD64_JMP_IND_OP)
+ && (NextByte[2] & 0x38) == AMD64_JMP_IND_RAX)
+ {
+ //
+ // This is an indirect jump opcode: 0x48 0xff /4. The 64-bit
+ // flag (REX.W) is always redundant here, so its presence is
+ // overloaded to indicate a branch out of the function - a tail
+ // call.
+ //
+ // Such an opcode is an unambiguous epilogue indication.
+ //
+ InEpilogue = TRUE;
+ NextByte += 3;
+ }
+
+ if (InEpilogue && HasUnmanagedBreakpoint)
+ {
+ STRESS_LOG1(LF_CORDB, LL_ERROR, "RtlVirtualUnwind is about to fail b/c the ControlPc (0x%p) is in the epilog of a function which has a 0xCC in its epilog.", ControlPc);
+ _ASSERTE(!"RtlVirtualUnwind is about to fail b/c you are unwinding through\n"
+ "the epilogue of a function and have a 0xCC in the codestream. This is\n"
+ "probably caused by having set that breakpoint yourself in the debugger,\n"
+ "you might try to remove the bp and ignore this assert.");
+ }
+
+ if (!(InEpilogue && HasManagedBreakpoint))
+ {
+ goto NORMAL_UNWIND;
+ }
+ else
+ {
+ // InEpilogue && HasManagedBreakpoint, this means we have to make the fake code buffer
+
+ // We explicitly handle the case where the new below can't allocate, but we're still
+ // getting an assert from inside new b/c we can be called within a FAULT_FORBID scope.
+ //
+ // If new does fail we will still end up crashing, but the debugger doesn't have to
+ // be OOM hardened in Whidbey and this is a debugger only code path so we're ok in
+ // that department.
+ FAULT_NOT_FATAL();
+
+ LOG((LF_CORDB, LL_EVERYTHING, "RVU_CBSW: Function has >1 managed bp in the epilogue, and we are in the epilogue, need a code buffer for RtlVirtualUnwind\n"));
+
+ // IMPLEMENTATION NOTE:
+ // It is of note that we are significantly pruning the funtion here in making the fake
+ // code buffer, all that we are making room for is 1 byte for the prologue, 1 byte for
+ // function code and what is left of the epilogue to be executed. This is _very_ closely
+ // tied to the implmentation of RtlVirtualUnwind and the knowledge that by passing the
+ // the test above and having InEpilogue==TRUE then the code path which will be followed
+ // through RtlVirtualUnwind is known.
+ //
+ // In making this fake code buffer we need to ensure that we don't mess with the outcome
+ // of the test in RtlVirtualUnwind to determine that control stopped within a function
+ // epilogue, or the unwinding that will happen when that test comes out TRUE. To that end
+ // we have preserved a single byte representing the Prologue as a section of the buffer
+ // as well as a single byte representation of the Function code so that tests to make sure
+ // that we're out of the prologue will not fail.
+
+ RUNTIME_FUNCTION FakeFunctionEntry;
+
+ //
+ // The buffer contains 4 sections
+ //
+ // UNWIND_INFO: The fake UNWIND_INFO will be first, we are making a copy within the
+ // buffer because it needs to be addressable through a 32bit offset
+ // of NewImageBase like the fake code buffer
+ //
+ // Prologue: A single byte representing the function Prologue
+ //
+ // Function Code: A single byte representing the Function's code
+ //
+ // Epilogue: This contains what is left to be executed of the Epilogue which control
+ // stopped in, it can be as little as a "return" type statement or as much
+ // as the whole Epilogue containing a stack adjustment, pops and "return"
+ // type statement.
+ //
+ //
+ // Here is the layout of the buffer:
+ //
+ // UNWIND_INFO copy:
+ // pBuffer[0]
+ // ...
+ // pBuffer[sizeof(UNWIND_INFO) - 1]
+ // PROLOGUE:
+ // pBuffer[sizeof(UNWIND_INFO) + 0] <----------------- THIS IS THE START OF pCodeBuffer
+ // FUNCTION CODE:
+ // pBuffer[sizeof(UNWIND_INFO) + FAKE_PROLOG_SIZE]
+ // EPILOGUE
+ // pBuffer[sizeof(UNWIND_INFO) + FAKE_PROLOG_SIZE + FAKE_FUNCTION_CODE_SIZE]
+ // ...
+ // pBuffer[sizeof(UNWIND_INFO) + FAKE_PROLOG_SIZE + FAKE_FUNCTION_CODE_SIZE + SizeOfEpilogue]
+ //
+ ULONG SizeOfEpilogue = (ULONG)((ULONG64)NextByte - ControlPc);
+ ULONG SizeOfBuffer = (ULONG)(sizeof(UNWIND_INFO) + FAKE_PROLOG_SIZE + FAKE_FUNCTION_CODE_SIZE + SizeOfEpilogue);
+ BYTE *pBuffer = (BYTE*) new (nothrow) BYTE[SizeOfBuffer];
+ BYTE *pCodeBuffer;
+ ULONG64 NewImageBase;
+ ULONG64 NewControlPc;
+
+ // <TODO> This WILL fail during unwind because we KNOW there is a managed breakpoint
+ // in the epilog and we're in the epilog, but we could not allocate a buffer to
+ // put our cleaned up code into, what to do? </TODO>
+ if (pBuffer == NULL)
+ {
+ // TODO: can we throw OOM here? or will we just go recursive b/c that will eventually get to the same place?
+ _ASSERTE(!"OOM when trying to allocate buffer for virtual unwind cleaned code, BIG PROBLEM!!");
+ goto NORMAL_UNWIND;
+ }
+
+ NewImageBase = ((((ULONG64)pBuffer) >> 32) << 32);
+ pCodeBuffer = pBuffer + sizeof(UNWIND_INFO);
+
+#if defined(_DEBUG)
+ // Fill the buffer up to the rest of the epilogue to be executed with Int3
+ for (int i=0; i<(FAKE_PROLOG_SIZE + FAKE_FUNCTION_CODE_SIZE); i++)
+ {
+ pCodeBuffer[i] = AMD64_INT3;
+ }
+#endif
+
+ // Copy the UNWIND_INFO and the Epilogue into the buffer
+ memcpy(pBuffer, (const void*)UnwindInfo, sizeof(UNWIND_INFO));
+ memcpy(&(pCodeBuffer[FAKE_PROLOG_SIZE + FAKE_FUNCTION_CODE_SIZE]), (const void*)(BYTE*)ControlPc, SizeOfEpilogue);
+
+ _ASSERTE((UCHAR)*(BYTE*)ControlPc == (UCHAR)pCodeBuffer[FAKE_PROLOG_SIZE+FAKE_FUNCTION_CODE_SIZE]);
+
+ HasManagedBreakpoint = FALSE;
+ HasUnmanagedBreakpoint = FALSE;
+
+ // The buffer cleaning implementation here just runs through the buffer byte by byte trying
+ // to get a real opcode from the patch table for any 0xCC that it finds. There is the
+ // possiblity that the epilogue will contain a 0xCC in an immediate value for which a
+ // patch won't be found and this will report a false positive for HasUnmanagedBreakpoint.
+ BYTE* pCleanCodePc = pCodeBuffer + FAKE_PROLOG_SIZE + FAKE_FUNCTION_CODE_SIZE;
+ BYTE* pRealCodePc = (BYTE*)ControlPc;
+ while (pCleanCodePc < (pCodeBuffer + FAKE_PROLOG_SIZE + FAKE_FUNCTION_CODE_SIZE + SizeOfEpilogue))
+ {
+ // If we have a breakpoint at the address then try to get the correct opcode from
+ // the managed patch using GetOpcodeFromManagedBPForAddress.
+ if (AMD64_INT3 == ((UCHAR)*pCleanCodePc))
+ {
+ (*pCleanCodePc) = GetOpcodeFromManagedBPForAddress((ULONG64)pRealCodePc, &HasManagedBreakpoint, &HasUnmanagedBreakpoint);
+ }
+
+ pCleanCodePc++;
+ pRealCodePc++;
+ }
+
+ // On the second pass through the epilogue assuming things are working as
+ // they should we should once again have at least one managed breakpoint...
+ // otherwise why are we here?
+ _ASSERTE(HasManagedBreakpoint == TRUE);
+
+ // This would be nice to assert, but we can't w/ current buffer cleaning implementation, see note above.
+ // _ASSERTE(HasUnmanagedBreakpoint == FALSE);
+
+ ((PUNWIND_INFO)pBuffer)->SizeOfProlog = FAKE_PROLOG_SIZE;
+
+ FakeFunctionEntry.BeginAddress = (ULONG)((ULONG64)pCodeBuffer - NewImageBase);
+ FakeFunctionEntry.EndAddress = (ULONG)((ULONG64)(pCodeBuffer + (FAKE_PROLOG_SIZE + FAKE_FUNCTION_CODE_SIZE + SizeOfEpilogue)) - NewImageBase);
+ FakeFunctionEntry.UnwindData = (ULONG)((ULONG64)pBuffer - NewImageBase);
+
+ NewControlPc = (ULONG64)(pCodeBuffer + FAKE_PROLOG_SIZE + FAKE_FUNCTION_CODE_SIZE);
+
+ RtlVirtualUnwind_Unsafe((ULONG)HandlerType, (ULONG64)NewImageBase, (ULONG64)NewControlPc, &FakeFunctionEntry, ContextRecord, HandlerData, EstablisherFrame, ContextPointers);
+
+ // Make sure to delete the whole buffer and not just the code buffer
+ delete[] pBuffer;
+
+ return NULL; // if control left in the epilog then RtlVirtualUnwind will not return an exception handler
+ }
+
+NORMAL_UNWIND:
+ return RtlVirtualUnwind_Unsafe(HandlerType, ImageBase, ControlPc, FunctionEntry, ContextRecord, HandlerData, EstablisherFrame, ContextPointers);
+}
+#endif // DEBUGGING_SUPPORTED
+#endif // !FEATURE_PAL
+
+#undef FAKE_PROLOG_SIZE
+#undef FAKE_FUNCTION_CODE_SIZE
+
+#undef AMD64_SIZE64_PREFIX
+#undef AMD64_ADD_IMM8_OP
+#undef AMD64_ADD_IMM32_OP
+#undef AMD64_JMP_IMM8_OP
+#undef AMD64_JMP_IMM32_OP
+#undef AMD64_JMP_IND_OP
+#undef AMD64_JMP_IND_RAX
+#undef AMD64_POP_OP
+#undef AMD64_RET_OP
+#undef AMD64_RET_OP_2
+#undef AMD64_NOP
+#undef AMD64_INT3
+
+#endif // !DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+// Returns TRUE if caller should resume execution.
+BOOL
+AdjustContextForVirtualStub(
+ EXCEPTION_RECORD *pExceptionRecord,
+ CONTEXT *pContext)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Nothing to adjust
+
+ return FALSE;
+}
+
+#endif
+
diff --git a/src/vm/amd64/excepcpu.h b/src/vm/amd64/excepcpu.h
new file mode 100644
index 0000000000..dc7002d4d8
--- /dev/null
+++ b/src/vm/amd64/excepcpu.h
@@ -0,0 +1,92 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+// EXCEPCPU.H -
+//
+// This header file is included from Excep.h if the target platform is AMD64
+//
+
+
+#ifndef __excepamd64_h__
+#define __excepamd64_h__
+
+#include "corerror.h" // HResults for the COM+ Runtime
+
+#include "../dlls/mscorrc/resource.h"
+
+class FaultingExceptionFrame;
+
+
+#define THROW_CONTROL_FOR_THREAD_FUNCTION RedirectForThrowControl
+
+EXTERN_C void RedirectForThrowControl();
+
+#define STATUS_CLR_GCCOVER_CODE STATUS_PRIVILEGED_INSTRUCTION
+
+//
+// No FS:0, nothing to do.
+//
+#define INSTALL_EXCEPTION_HANDLING_RECORD(record)
+#define UNINSTALL_EXCEPTION_HANDLING_RECORD(record)
+
+//
+// On Win64, the COMPlusFrameHandler's work is done by our personality routine.
+//
+#define DECLARE_CPFH_EH_RECORD(pCurThread)
+
+//
+// Retrieves the redirected CONTEXT* from the stack frame of one of the
+// RedirectedHandledJITCaseForXXX_Stub's.
+//
+PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(DISPATCHER_CONTEXT * pDispatcherContext);
+PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(CONTEXT * pContext);
+
+//
+// Retrieves the FaultingExceptionFrame* from the stack frame of
+// RedirectForThrowControl or NakedThrowHelper.
+//
+FaultingExceptionFrame *GetFrameFromRedirectedStubStackFrame (DISPATCHER_CONTEXT *pDispatcherContext);
+
+//
+// Functions that wrap RtlVirtualUnwind to make sure that in the AMD64 case all the
+// breakpoints have been removed from the Epilogue if RtlVirtualUnwind is going to
+// try and disassemble it.
+//
+#if !defined(DACCESS_COMPILE)
+UCHAR GetOpcodeFromManagedBPForAddress(ULONG64 Address, BOOL* HasManagedBreakpoint, BOOL* HasUnmanagedBreakpoint);
+
+#define RtlVirtualUnwind RtlVirtualUnwind_Wrapper
+
+PEXCEPTION_ROUTINE
+RtlVirtualUnwind (
+ IN ULONG HandlerType,
+ IN ULONG64 ImageBase,
+ IN ULONG64 ControlPc,
+ IN PRUNTIME_FUNCTION FunctionEntry,
+ IN OUT PCONTEXT ContextRecord,
+ OUT PVOID *HandlerData,
+ OUT PULONG64 EstablisherFrame,
+ IN OUT PKNONVOLATILE_CONTEXT_POINTERS ContextPointers OPTIONAL
+ );
+
+PEXCEPTION_ROUTINE
+RtlVirtualUnwind_Worker (
+ IN ULONG HandlerType,
+ IN ULONG64 ImageBase,
+ IN ULONG64 ControlPc,
+ IN PRUNTIME_FUNCTION FunctionEntry,
+ IN OUT PCONTEXT ContextRecord,
+ OUT PVOID *HandlerData,
+ OUT PULONG64 EstablisherFrame,
+ IN OUT PKNONVOLATILE_CONTEXT_POINTERS ContextPointers OPTIONAL
+ );
+#endif // !DACCESS_COMPILE
+
+BOOL AdjustContextForVirtualStub(EXCEPTION_RECORD *pExceptionRecord, CONTEXT *pContext);
+
+#endif // __excepamd64_h__
+
diff --git a/src/vm/amd64/getstate.S b/src/vm/amd64/getstate.S
new file mode 100644
index 0000000000..25ba115bd0
--- /dev/null
+++ b/src/vm/amd64/getstate.S
@@ -0,0 +1,48 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+.intel_syntax noprefix
+#include "unixasmmacros.inc"
+#include "asmconstants.h"
+
+LEAF_ENTRY GetCurrentSP, _TEXT
+
+ mov rax, rsp
+ add rax, 8
+ ret
+
+LEAF_END GetCurrentSP, _TEXT
+
+
+LEAF_ENTRY GetCurrentIP, _TEXT
+
+ mov rax, [rsp]
+ ret
+
+LEAF_END GetCurrentIP, _TEXT
+
+
+// EXTERN_C void LazyMachStateCaptureState(struct LazyMachState *pState)
+LEAF_ENTRY LazyMachStateCaptureState, _TEXT
+
+ mov rdx, [rsp] // get the return address
+
+ mov [rdi + OFFSETOF__MachState__m_CaptureRdi], rdi
+ mov [rdi + OFFSETOF__MachState__m_CaptureRsi], rsi
+ mov [rdi + OFFSETOF__MachState__m_CaptureRbx], rbx
+ mov [rdi + OFFSETOF__MachState__m_CaptureRbp], rbp
+ mov [rdi + OFFSETOF__MachState__m_CaptureR12], r12
+ mov [rdi + OFFSETOF__MachState__m_CaptureR13], r13
+ mov [rdi + OFFSETOF__MachState__m_CaptureR14], r14
+ mov [rdi + OFFSETOF__MachState__m_CaptureR15], r15
+
+ mov qword ptr [rdi + OFFSETOF__MachState___pRetAddr], 0
+
+ mov [rdi + OFFSETOF__LazyMachState__m_CaptureRip], rdx
+ mov [rdi + OFFSETOF__LazyMachState__m_CaptureRsp], rsp
+
+ ret
+
+LEAF_END LazyMachStateCaptureState, _TEXT
diff --git a/src/vm/amd64/getstate.asm b/src/vm/amd64/getstate.asm
new file mode 100644
index 0000000000..e6efb51263
--- /dev/null
+++ b/src/vm/amd64/getstate.asm
@@ -0,0 +1,86 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;
+
+;
+; ==--==
+
+include AsmMacros.inc
+include AsmConstants.inc
+
+
+LEAF_ENTRY GetCurrentSP, _TEXT
+
+ mov rax, rsp
+ add rax, 8
+ ret
+
+LEAF_END GetCurrentSP, _TEXT
+
+
+LEAF_ENTRY GetCurrentIP, _TEXT
+
+ mov rax, [rsp]
+ ret
+
+LEAF_END GetCurrentIP, _TEXT
+
+
+LEAF_ENTRY GetRBP, _TEXT
+
+ mov rax, rbp
+ ret
+
+LEAF_END GetRBP, _TEXT
+
+;// this is the same implementation as the function of the same name in di\amd64\floatconversion.asm and they must
+;// remain in sync.
+
+;// @dbgtodo inspection: remove this function when we remove the ipc event to load the float state
+
+; extern "C" double FPFillR8(void* fpContextSlot);
+LEAF_ENTRY FPFillR8, _TEXT
+ movdqa xmm0, [rcx]
+ ret
+LEAF_END FPFillR8, _TEXT
+
+
+LEAF_ENTRY get_cycle_count, _TEXT
+
+ rdtsc ; time stamp count ret'd in edx:eax
+ shl rdx, 32
+ mov edx, eax
+ mov rax, rdx ; return tsc in rax
+ ret
+LEAF_END get_cycle_count, _TEXT
+
+
+; EXTERN_C void LazyMachStateCaptureState(struct LazyMachState *pState)
+LEAF_ENTRY LazyMachStateCaptureState, _TEXT
+
+ mov rdx, [rsp] ; get the return address
+
+ mov [rcx + OFFSETOF__MachState__m_CaptureRdi], rdi
+ mov [rcx + OFFSETOF__MachState__m_CaptureRsi], rsi
+ mov [rcx + OFFSETOF__MachState__m_CaptureRbx], rbx
+ mov [rcx + OFFSETOF__MachState__m_CaptureRbp], rbp
+ mov [rcx + OFFSETOF__MachState__m_CaptureR12], r12
+ mov [rcx + OFFSETOF__MachState__m_CaptureR13], r13
+ mov [rcx + OFFSETOF__MachState__m_CaptureR14], r14
+ mov [rcx + OFFSETOF__MachState__m_CaptureR15], r15
+
+ mov qword ptr [rcx + OFFSETOF__MachState___pRetAddr], 0
+
+ mov [rcx + OFFSETOF__LazyMachState__m_CaptureRip], rdx
+ mov [rcx + OFFSETOF__LazyMachState__m_CaptureRsp], rsp
+
+ ret
+
+LEAF_END LazyMachStateCaptureState, _TEXT
+
+
+ end
diff --git a/src/vm/amd64/gmsamd64.cpp b/src/vm/amd64/gmsamd64.cpp
new file mode 100644
index 0000000000..8f59388410
--- /dev/null
+++ b/src/vm/amd64/gmsamd64.cpp
@@ -0,0 +1,127 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/**************************************************************/
+/* gmsAMD64.cpp */
+/**************************************************************/
+
+#include "common.h"
+#include "gmscpu.h"
+
+void LazyMachState::unwindLazyState(LazyMachState* baseState,
+ MachState* unwoundState,
+ int funCallDepth /* = 1 */,
+ HostCallPreference hostCallPreference /* = (HostCallPreference)(-1) */)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ CONTEXT ctx;
+ KNONVOLATILE_CONTEXT_POINTERS nonVolRegPtrs;
+
+ ctx.Rip = baseState->m_CaptureRip;
+ ctx.Rsp = baseState->m_CaptureRsp + 8; // +8 for return addr pushed before calling LazyMachStateCaptureState
+
+ ctx.Rdi = unwoundState->m_CaptureRdi = baseState->m_CaptureRdi;
+ ctx.Rsi = unwoundState->m_CaptureRsi = baseState->m_CaptureRsi;
+ ctx.Rbx = unwoundState->m_CaptureRbx = baseState->m_CaptureRbx;
+ ctx.Rbp = unwoundState->m_CaptureRbp = baseState->m_CaptureRbp;
+ ctx.R12 = unwoundState->m_CaptureR12 = baseState->m_CaptureR12;
+ ctx.R13 = unwoundState->m_CaptureR13 = baseState->m_CaptureR13;
+ ctx.R14 = unwoundState->m_CaptureR14 = baseState->m_CaptureR14;
+ ctx.R15 = unwoundState->m_CaptureR15 = baseState->m_CaptureR15;
+
+#if !defined(DACCESS_COMPILE)
+ // For DAC, if we get here, it means that the LazyMachState is uninitialized and we have to unwind it.
+ // The API we use to unwind in DAC is StackWalk64(), which does not support the context pointers.
+ nonVolRegPtrs.Rdi = &unwoundState->m_CaptureRdi;
+ nonVolRegPtrs.Rsi = &unwoundState->m_CaptureRsi;
+ nonVolRegPtrs.Rbx = &unwoundState->m_CaptureRbx;
+ nonVolRegPtrs.Rbp = &unwoundState->m_CaptureRbp;
+ nonVolRegPtrs.R12 = &unwoundState->m_CaptureR12;
+ nonVolRegPtrs.R13 = &unwoundState->m_CaptureR13;
+ nonVolRegPtrs.R14 = &unwoundState->m_CaptureR14;
+ nonVolRegPtrs.R15 = &unwoundState->m_CaptureR15;
+#endif // !DACCESS_COMPILE
+
+ LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK LazyMachState::unwindLazyState(ip:%p,sp:%p)\n", baseState->m_CaptureRip, baseState->m_CaptureRsp));
+
+ PCODE pvControlPc;
+
+ do
+ {
+ pvControlPc = Thread::VirtualUnwindCallFrame(&ctx, &nonVolRegPtrs);
+
+ if (funCallDepth > 0)
+ {
+ --funCallDepth;
+ if (funCallDepth == 0)
+ break;
+ }
+ else
+ {
+ // Determine whether given IP resides in JITted code. (It returns nonzero in that case.)
+ // Use it now to see if we've unwound to managed code yet.
+ BOOL fFailedReaderLock = FALSE;
+ BOOL fIsManagedCode = ExecutionManager::IsManagedCode(pvControlPc, hostCallPreference, &fFailedReaderLock);
+ if (fFailedReaderLock)
+ {
+ // We don't know if we would have been able to find a JIT
+ // manager, because we couldn't enter the reader lock without
+ // yielding (and our caller doesn't want us to yield). So abort
+ // now.
+
+ // Invalidate the lazyState we're returning, so the caller knows
+ // we aborted before we could fully unwind
+ unwoundState->_pRetAddr = NULL;
+ return;
+ }
+
+ if (fIsManagedCode)
+ break;
+ }
+ }
+ while(TRUE);
+
+ //
+ // Update unwoundState so that HelperMethodFrameRestoreState knows which
+ // registers have been potentially modified.
+ //
+
+ unwoundState->m_Rip = ctx.Rip;
+ unwoundState->m_Rsp = ctx.Rsp;
+
+ // For DAC, the return value of this function may be used after unwoundState goes out of scope. so we cannot do
+ // "unwoundState->_pRetAddr = PTR_TADDR(&unwoundState->m_Rip)".
+ unwoundState->_pRetAddr = PTR_TADDR(unwoundState->m_Rsp - 8);
+
+#if defined(DACCESS_COMPILE)
+ // For DAC, we have to update the registers directly, since we don't have context pointers.
+ unwoundState->m_CaptureRdi = ctx.Rdi;
+ unwoundState->m_CaptureRsi = ctx.Rsi;
+ unwoundState->m_CaptureRbx = ctx.Rbx;
+ unwoundState->m_CaptureRbp = ctx.Rbp;
+ unwoundState->m_CaptureR12 = ctx.R12;
+ unwoundState->m_CaptureR13 = ctx.R13;
+ unwoundState->m_CaptureR14 = ctx.R14;
+ unwoundState->m_CaptureR15 = ctx.R15;
+
+#else // !DACCESS_COMPILE
+ unwoundState->m_pRdi = PTR_ULONG64(nonVolRegPtrs.Rdi);
+ unwoundState->m_pRsi = PTR_ULONG64(nonVolRegPtrs.Rsi);
+ unwoundState->m_pRbx = PTR_ULONG64(nonVolRegPtrs.Rbx);
+ unwoundState->m_pRbp = PTR_ULONG64(nonVolRegPtrs.Rbp);
+ unwoundState->m_pR12 = PTR_ULONG64(nonVolRegPtrs.R12);
+ unwoundState->m_pR13 = PTR_ULONG64(nonVolRegPtrs.R13);
+ unwoundState->m_pR14 = PTR_ULONG64(nonVolRegPtrs.R14);
+ unwoundState->m_pR15 = PTR_ULONG64(nonVolRegPtrs.R15);
+#endif // DACCESS_COMPILE
+}
diff --git a/src/vm/amd64/gmscpu.h b/src/vm/amd64/gmscpu.h
new file mode 100644
index 0000000000..3eff547287
--- /dev/null
+++ b/src/vm/amd64/gmscpu.h
@@ -0,0 +1,203 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/**************************************************************/
+/* gmscpu.h */
+/**************************************************************/
+/* HelperFrame is defines 'GET_STATE(machState)' macro, which
+ figures out what the state of the machine will be when the
+ current method returns. It then stores the state in the
+ JIT_machState structure. */
+
+/**************************************************************/
+
+#ifndef __gmsAMD64_h__
+#define __gmsAMD64_h__
+
+#ifdef _DEBUG
+class HelperMethodFrame;
+struct MachState;
+EXTERN_C MachState* __stdcall HelperMethodFrameConfirmState(HelperMethodFrame* frame, void* esiVal, void* ediVal, void* ebxVal, void* ebpVal);
+#endif // _DEBUG
+
+// A MachState indicates the register state of the processor at some point in time (usually
+// just before or after a call is made). It can be made one of two ways. Either explicitly
+// (when you for some reason know the values of all the registers), or implicitly using the
+// GET_STATE macros.
+
+typedef DPTR(struct MachState) PTR_MachState;
+struct MachState
+{
+ MachState()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ INDEBUG(memset(this, 0xCC, sizeof(MachState));)
+ }
+
+ bool isValid() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE(dac_cast<TADDR>(_pRetAddr) != INVALID_POINTER_CC); return(_pRetAddr != 0); }
+ TADDR* pRetAddr() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE(isValid()); return(_pRetAddr); }
+ TADDR GetRetAddr() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE(isValid()); return *_pRetAddr; }
+#ifndef DACCESS_COMPILE
+ void SetRetAddr(TADDR* addr) { _ASSERTE(isValid()); _pRetAddr = addr; }
+#endif
+
+ friend class HelperMethodFrame;
+ friend class CheckAsmOffsets;
+ friend struct LazyMachState;
+#ifdef _DEBUG
+ friend MachState* __stdcall HelperMethodFrameConfirmState(HelperMethodFrame* frame, void* esiVal, void* ediVal, void* ebxVal, void* ebpVal);
+#endif
+
+protected:
+ PCODE m_Rip;
+ TADDR m_Rsp;
+
+ //
+ // These "capture" fields are READ ONLY once initialized by
+ // LazyMachStateCaptureState because we are racing to update
+ // the MachState when we do a stackwalk so, we must not update
+ // any state used to initialize the unwind from the captured
+ // state to the managed caller.
+ //
+ // Note also, that these fields need to be in the base struct
+ // because the context pointers below may point up to these
+ // fields.
+ //
+ ULONG64 m_CaptureRdi;
+ ULONG64 m_CaptureRsi;
+ ULONG64 m_CaptureRbx;
+ ULONG64 m_CaptureRbp;
+ ULONG64 m_CaptureR12;
+ ULONG64 m_CaptureR13;
+ ULONG64 m_CaptureR14;
+ ULONG64 m_CaptureR15;
+
+ // context pointers for preserved registers
+ PTR_ULONG64 m_pRdi;
+ PTR_ULONG64 m_pRsi;
+ PTR_ULONG64 m_pRbx;
+ PTR_ULONG64 m_pRbp;
+ PTR_ULONG64 m_pR12;
+ PTR_ULONG64 m_pR13;
+ PTR_ULONG64 m_pR14;
+ PTR_ULONG64 m_pR15;
+
+ PTR_TADDR _pRetAddr;
+};
+
+/********************************************************************/
+/* This allows you to defer the computation of the Machine state
+ until later. Note that we don't reuse slots, because we want
+ this to be threadsafe without locks */
+
+EXTERN_C void LazyMachStateCaptureState(struct LazyMachState *pState);
+
+typedef DPTR(struct LazyMachState) PTR_LazyMachState;
+struct LazyMachState : public MachState
+{
+ // compute the machine state of the processor as it will exist just
+ // after the return after at most'funCallDepth' number of functions.
+ // if 'testFtn' is non-NULL, the return address is tested at each
+ // return instruction encountered. If this test returns non-NULL,
+ // then stack walking stops (thus you can walk up to the point that the
+ // return address matches some criteria
+
+ // Normally this is called with funCallDepth=1 and testFtn = 0 so that
+ // it returns the state of the processor after the function that called 'captureState()'
+ void setLazyStateFromUnwind(MachState* copy);
+ static void unwindLazyState(LazyMachState* baseState,
+ MachState* lazyState,
+ int funCallDepth = 1,
+ HostCallPreference hostCallPreference = AllowHostCalls);
+
+ friend class HelperMethodFrame;
+ friend class CheckAsmOffsets;
+
+ //
+ // These "capture" fields are READ ONLY once initialized by
+ // LazyMachStateCaptureState because we are racing to update
+ // the MachState when we do a stackwalk so, we must not update
+ // any state used to initialize the unwind from the captured
+ // state to the managed caller.
+ //
+ ULONG64 m_CaptureRip;
+ ULONG64 m_CaptureRsp;
+};
+
+// rdi, rsi, rbx, rbp, r12, r13, r14, r15
+#define NUM_NONVOLATILE_CONTEXT_POINTERS 8
+
+inline void LazyMachState::setLazyStateFromUnwind(MachState* copy)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#if defined(DACCESS_COMPILE)
+ // This function cannot be called in DAC because DAC cannot update target memory.
+ DacError(E_FAIL);
+ return;
+
+#else // !DACCESS_COMPILE
+ this->m_Rip = copy->m_Rip;
+ this->m_Rsp = copy->m_Rsp;
+
+ // Capture* has already been set, so there is no need to touch it
+
+ // loop over the nonvolatile context pointers for
+ // rdi, rsi, rbx, rbp, r12, r13, r14, r15 and make
+ // sure to properly copy interior pointers into the
+ // new struct
+
+ PULONG64* pSrc = &copy->m_pRdi;
+ PULONG64* pDst = &this->m_pRdi;
+
+ const PULONG64 LowerBoundDst = (PULONG64) this;
+ const PULONG64 LowerBoundSrc = (PULONG64) copy;
+
+ const PULONG64 UpperBoundSrc = (PULONG64) (((BYTE*)LowerBoundSrc) + sizeof(*copy) - sizeof(_pRetAddr));
+
+#ifdef _DEBUG
+ int count = 0;
+#endif // _DEBUG
+
+ while (((PULONG64)pSrc) < UpperBoundSrc)
+ {
+#ifdef _DEBUG
+ count++;
+#endif // _DEBUG
+
+ PULONG64 valueSrc = *pSrc++;
+
+ if ((LowerBoundSrc <= valueSrc) && (valueSrc < UpperBoundSrc))
+ {
+ // make any pointer interior to 'src' interior to 'dst'
+ valueSrc = (PULONG64)((BYTE*)valueSrc - (BYTE*)LowerBoundSrc + (BYTE*)LowerBoundDst);
+ }
+
+ *pDst++ = valueSrc;
+ }
+
+ CONSISTENCY_CHECK_MSGF(count == NUM_NONVOLATILE_CONTEXT_POINTERS, ("count != NUM_NONVOLATILE_CONTEXT_POINTERS, actually = %d", count));
+
+ // this has to be last because we depend on write ordering to
+ // synchronize the race implicit in updating this struct
+ VolatileStore(&_pRetAddr, (PTR_TADDR)(TADDR)&m_Rip);
+
+#endif // !DACCESS_COMPILE
+}
+
+// Do the initial capture of the machine state. This is meant to be
+// as light weight as possible, as we may never need the state that
+// we capture. Thus to complete the process you need to call
+// 'getMachState()', which finishes the process
+EXTERN_C void LazyMachStateCaptureState(struct LazyMachState *pState);
+
+// CAPTURE_STATE captures just enough register state so that the state of the
+// processor can be deterined just after the the routine that has CAPTURE_STATE in
+// it returns.
+
+#define CAPTURE_STATE(machState, ret) \
+ LazyMachStateCaptureState(machState)
+
+#endif // __gmsAMD64_h__
diff --git a/src/vm/amd64/jithelpers_fast.S b/src/vm/amd64/jithelpers_fast.S
new file mode 100644
index 0000000000..457d96e62e
--- /dev/null
+++ b/src/vm/amd64/jithelpers_fast.S
@@ -0,0 +1,246 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+.intel_syntax noprefix
+#include "unixasmmacros.inc"
+
+// Mark start of the code region that we patch at runtime
+LEAF_ENTRY JIT_PatchedCodeStart, _TEXT
+ ret
+LEAF_END JIT_PatchedCodeStart, _TEXT
+
+// This is used by the mechanism to hold either the JIT_WriteBarrier_PreGrow
+// or JIT_WriteBarrier_PostGrow code (depending on the state of the GC). It _WILL_
+// change at runtime as the GC changes. Initially it should simply be a copy of the
+// larger of the two functions (JIT_WriteBarrier_PostGrow) to ensure we have created
+// enough space to copy that code in.
+ .align 16
+LEAF_ENTRY JIT_WriteBarrier, _TEXT
+
+#ifdef _DEBUG
+ // In debug builds, this just contains jump to the debug version of the write barrier by default
+ jmp JIT_WriteBarrier_Debug
+#endif
+
+ // Do the move into the GC . It is correct to take an AV here, the EH code
+ // figures out that this came from a WriteBarrier and correctly maps it back
+ // to the managed method which called the WriteBarrier (see setup in
+ // InitializeExceptionHandling, vm\exceptionhandling.cpp).
+ mov [rdi], rsi
+
+ NOP_3_BYTE // padding for alignment of constant
+
+ // Can't compare a 64 bit immediate, so we have to move them into a
+ // register. Values of these immediates will be patched at runtime.
+ // By using two registers we can pipeline better. Should we decide to use
+ // a special non-volatile calling convention, this should be changed to
+ // just one.
+
+ movabs rax, 0F0F0F0F0F0F0F0F0h
+
+ // Check the lower and upper ephemeral region bounds
+ cmp rsi, rax
+ jb Exit
+
+ nop // padding for alignment of constant
+
+ movabs r8, 0F0F0F0F0F0F0F0F0h
+
+ cmp rsi, r8
+ jae Exit
+
+ nop // padding for alignment of constant
+
+ movabs rax, 0F0F0F0F0F0F0F0F0h
+
+ // Touch the card table entry, if not already dirty.
+ shr rdi, 0Bh
+ cmp byte ptr [rdi + rax], 0FFh
+ jne UpdateCardTable
+ REPRET
+
+ UpdateCardTable:
+ mov byte ptr [rdi + rax], 0FFh
+ ret
+
+ .align 16
+ Exit:
+ REPRET
+ // make sure this guy is bigger than any of the other guys
+ .align 16
+ nop
+LEAF_END_MARKED JIT_WriteBarrier, _TEXT
+
+// Mark start of the code region that we patch at runtime
+LEAF_ENTRY JIT_PatchedCodeLast, _TEXT
+ ret
+LEAF_END JIT_PatchedCodeLast, _TEXT
+
+// There is an even more optimized version of these helpers possible which takes
+// advantage of knowledge of which way the ephemeral heap is growing to only do 1/2
+// that check (this is more significant in the JIT_WriteBarrier case).
+//
+// Additionally we can look into providing helpers which will take the src/dest from
+// specific registers (like x86) which _could_ (??) make for easier register allocation
+// for the JIT64, however it might lead to having to have some nasty code that treats
+// these guys really special like... :(.
+//
+// Version that does the move, checks whether or not it's in the GC and whether or not
+// it needs to have it's card updated
+//
+// void JIT_CheckedWriteBarrier(Object** dst, Object* src)
+LEAF_ENTRY JIT_CheckedWriteBarrier, _TEXT
+
+ // When WRITE_BARRIER_CHECK is defined _NotInHeap will write the reference
+ // but if it isn't then it will just return.
+ //
+ // See if this is in GCHeap
+ PREPARE_EXTERNAL_VAR g_lowest_address, rax
+ cmp rdi, [rax]
+ jb NotInHeap
+ PREPARE_EXTERNAL_VAR g_highest_address, rax
+ cmp rdi, [rax]
+ jnb NotInHeap
+
+ jmp JIT_WriteBarrier
+
+ NotInHeap:
+ // See comment above about possible AV
+ mov [rdi], rsi
+ ret
+LEAF_END_MARKED JIT_CheckedWriteBarrier, _TEXT
+
+// JIT_ByRefWriteBarrier has weird symantics, see usage in StubLinkerX86.cpp
+//
+// Entry:
+// RDI - address of ref-field (assigned to)
+// RSI - address of the data (source)
+// RCX can be trashed
+// Exit:
+// RDI, RSI are incremented by SIZEOF(LPVOID)
+LEAF_ENTRY JIT_ByRefWriteBarrier, _TEXT
+ push rax
+ mov rcx, [rsi]
+
+// If !WRITE_BARRIER_CHECK do the write first, otherwise we might have to do some ShadowGC stuff
+#ifndef WRITE_BARRIER_CHECK
+ // rcx is [rsi]
+ mov [rdi], rcx
+#endif
+
+ // When WRITE_BARRIER_CHECK is defined _NotInHeap will write the reference
+ // but if it isn't then it will just return.
+ //
+ // See if this is in GCHeap
+ PREPARE_EXTERNAL_VAR g_lowest_address, rax
+ cmp rdi, [rax]
+ jb NotInHeap_ByRefWriteBarrier
+ PREPARE_EXTERNAL_VAR g_highest_address, rax
+ cmp rdi, [rax]
+ jnb NotInHeap_ByRefWriteBarrier
+
+#ifdef WRITE_BARRIER_CHECK
+ // we can only trash rcx in this function so in _DEBUG we need to save
+ // some scratch registers.
+ push r10
+ push r11
+
+ // **ALSO update the shadow GC heap if that is enabled**
+ // Do not perform the work if g_GCShadow is 0
+ PREPARE_EXTERNAL_VAR g_GCShadow, rax
+ cmp [rax], 0
+ je NoShadow_ByRefWriteBarrier
+
+ // If we end up outside of the heap don't corrupt random memory
+ mov r10, rdi
+ PREPARE_EXTERNAL_VAR g_lowest_address, rax
+ sub r10, [rax]
+ jb NoShadow_ByRefWriteBarrier
+
+ // Check that our adjusted destination is somewhere in the shadow gc
+ PREPARE_EXTERNAL_VAR g_GCShadow, rax
+ add r10, [rax]
+ PREPARE_EXTERNAL_VAR g_GCShadowEnd, rax
+ cmp r10, [rax]
+ ja NoShadow_ByRefWriteBarrier
+
+ // Write ref into real GC
+ mov [rdi], rcx
+ // Write ref into shadow GC
+ mov [r10], rcx
+
+ // Ensure that the write to the shadow heap occurs before the read from
+ // the GC heap so that race conditions are caught by INVALIDGCVALUE
+ mfence
+
+ // Check that GC/ShadowGC values match
+ mov r11, [rdi]
+ mov rax, [r10]
+ cmp rax, r11
+ je DoneShadow_ByRefWriteBarrier
+ mov r11, INVALIDGCVALUE
+ mov [r10], r11
+
+ jmp DoneShadow_ByRefWriteBarrier
+
+ // If we don't have a shadow GC we won't have done the write yet
+ NoShadow_ByRefWriteBarrier:
+ mov [rdi], rcx
+
+ // If we had a shadow GC then we already wrote to the real GC at the same time
+ // as the shadow GC so we want to jump over the real write immediately above.
+ // Additionally we know for sure that we are inside the heap and therefore don't
+ // need to replicate the above checks.
+ DoneShadow_ByRefWriteBarrier:
+ pop r11
+ pop r10
+#endif
+
+ // See if we can just quick out
+ PREPARE_EXTERNAL_VAR g_ephemeral_low, rax
+ cmp rcx, [rax]
+ jb Exit_ByRefWriteBarrier
+ PREPARE_EXTERNAL_VAR g_ephemeral_high, rax
+ cmp rcx, [rax]
+ jnb Exit_ByRefWriteBarrier
+
+ // move current rdi value into rcx and then increment the pointers
+ mov rcx, rdi
+ add rsi, 8h
+ add rdi, 8h
+
+ // Check if we need to update the card table
+ // Calc pCardByte
+ shr rcx, 0Bh
+ PREPARE_EXTERNAL_VAR g_card_table, rax
+ add rcx, [rax]
+
+ pop rax
+
+ // Check if this card is dirty
+ cmp byte ptr [rcx], 0FFh
+ jne UpdateCardTable_ByRefWriteBarrier
+ REPRET
+
+ UpdateCardTable_ByRefWriteBarrier:
+ mov byte ptr [rcx], 0FFh
+ ret
+
+ .align 16
+ NotInHeap_ByRefWriteBarrier:
+// If WRITE_BARRIER_CHECK then we won't have already done the mov and should do it here
+// If !WRITE_BARRIER_CHECK we want _NotInHeap and _Leave to be the same and have both
+// 16 byte aligned.
+#ifdef WRITE_BARRIER_CHECK
+ // rcx is [rsi]
+ mov [rdi], rcx
+#endif
+ Exit_ByRefWriteBarrier:
+ // Increment the pointers before leaving
+ add rdi, 8h
+ add rsi, 8h
+ pop rax
+ ret
+LEAF_END JIT_ByRefWriteBarrier, _TEXT
diff --git a/src/vm/amd64/jithelpers_fastwritebarriers.S b/src/vm/amd64/jithelpers_fastwritebarriers.S
new file mode 100644
index 0000000000..d1366e825b
--- /dev/null
+++ b/src/vm/amd64/jithelpers_fastwritebarriers.S
@@ -0,0 +1,235 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+.intel_syntax noprefix
+#include "unixasmmacros.inc"
+
+ .align 4
+LEAF_ENTRY JIT_WriteBarrier_PreGrow32, _TEXT
+ // Do the move into the GC . It is correct to take an AV here, the EH code
+ // figures out that this came from a WriteBarrier and correctly maps it back
+ // to the managed method which called the WriteBarrier (see setup in
+ // InitializeExceptionHandling, vm\exceptionhandling.cpp).
+ mov [rdi], rsi
+
+ NOP_2_BYTE // padding for alignment of constant
+
+PATCH_LABEL JIT_WriteBarrier_PreGrow32_PatchLabel_Lower
+ cmp rsi, -0F0F0F10h // 0F0F0F0F0h
+ jb Exit_PreGrow32
+
+ shr rdi, 0Bh
+PATCH_LABEL JIT_WriteBarrier_PreGrow32_PatchLabel_CardTable_Check
+ cmp byte ptr [rdi + 0F0F0F0F0h], 0FFh
+ jne UpdateCardTable_PreGrow32
+ REPRET
+
+ nop // padding for alignment of constant
+
+PATCH_LABEL JIT_WriteBarrier_PreGrow32_PatchLabel_CardTable_Update
+ UpdateCardTable_PreGrow32:
+ mov byte ptr [rdi + 0F0F0F0F0h], 0FFh
+ ret
+
+ .align 16
+ Exit_PreGrow32:
+ REPRET
+LEAF_END_MARKED JIT_WriteBarrier_PreGrow32, _TEXT
+
+ .align 8
+LEAF_ENTRY JIT_WriteBarrier_PreGrow64, _TEXT
+ // Do the move into the GC . It is correct to take an AV here, the EH code
+ // figures out that this came from a WriteBarrier and correctly maps it back
+ // to the managed method which called the WriteBarrier (see setup in
+ // InitializeExceptionHandling, vm\exceptionhandling.cpp).
+ mov [rdi], rsi
+
+ NOP_3_BYTE // padding for alignment of constant
+
+ // Can't compare a 64 bit immediate, so we have to move it into a
+ // register. Value of this immediate will be patched at runtime.
+PATCH_LABEL JIT_WriteBarrier_PreGrow64_Patch_Label_Lower
+ movabs rax, 0F0F0F0F0F0F0F0F0h
+
+ // Check the lower ephemeral region bound.
+ cmp rsi, rax
+ jb Exit_PreGrow64
+
+ nop // padding for alignment of constant
+
+PATCH_LABEL JIT_WriteBarrier_PreGrow64_Patch_Label_CardTable
+ movabs rax, 0F0F0F0F0F0F0F0F0h
+
+ // Touch the card table entry, if not already dirty.
+ shr rdi, 0Bh
+ cmp byte ptr [rdi + rax], 0FFh
+ jne UpdateCardTable_PreGrow64
+ REPRET
+
+ UpdateCardTable_PreGrow64:
+ mov byte ptr [rdi + rax], 0FFh
+ ret
+
+ .align 16
+ Exit_PreGrow64:
+ REPRET
+LEAF_END_MARKED JIT_WriteBarrier_PreGrow64, _TEXT
+
+ .align 8
+// See comments for JIT_WriteBarrier_PreGrow (above).
+LEAF_ENTRY JIT_WriteBarrier_PostGrow64, _TEXT
+ // Do the move into the GC . It is correct to take an AV here, the EH code
+ // figures out that this came from a WriteBarrier and correctly maps it back
+ // to the managed method which called the WriteBarrier (see setup in
+ // InitializeExceptionHandling, vm\exceptionhandling.cpp).
+ mov [rdi], rsi
+
+ NOP_3_BYTE // padding for alignment of constant
+
+ // Can't compare a 64 bit immediate, so we have to move them into a
+ // register. Values of these immediates will be patched at runtime.
+ // By using two registers we can pipeline better. Should we decide to use
+ // a special non-volatile calling convention, this should be changed to
+ // just one.
+PATCH_LABEL JIT_WriteBarrier_PostGrow64_Patch_Label_Lower
+ movabs rax, 0F0F0F0F0F0F0F0F0h
+
+ // Check the lower and upper ephemeral region bounds
+ cmp rsi, rax
+ jb Exit_PostGrow64
+
+ nop // padding for alignment of constant
+
+PATCH_LABEL JIT_WriteBarrier_PostGrow64_Patch_Label_Upper
+ movabs r8, 0F0F0F0F0F0F0F0F0h
+
+ cmp rsi, r8
+ jae Exit_PostGrow64
+
+ nop // padding for alignment of constant
+
+PATCH_LABEL JIT_WriteBarrier_PostGrow64_Patch_Label_CardTable
+ movabs rax, 0F0F0F0F0F0F0F0F0h
+
+ // Touch the card table entry, if not already dirty.
+ shr rdi, 0Bh
+ cmp byte ptr [rdi + rax], 0FFh
+ jne UpdateCardTable_PostGrow64
+ REPRET
+
+ UpdateCardTable_PostGrow64:
+ mov byte ptr [rdi + rax], 0FFh
+ ret
+
+ .align 16
+ Exit_PostGrow64:
+ REPRET
+LEAF_END_MARKED JIT_WriteBarrier_PostGrow64, _TEXT
+
+ .align 4
+LEAF_ENTRY JIT_WriteBarrier_PostGrow32, _TEXT
+ // Do the move into the GC . It is correct to take an AV here, the EH code
+ // figures out that this came from a WriteBarrier and correctly maps it back
+ // to the managed method which called the WriteBarrier (see setup in
+ // InitializeExceptionHandling, vm\exceptionhandling.cpp).
+ mov [rdi], rsi
+
+ NOP_2_BYTE // padding for alignment of constant
+
+ // Check the lower and upper ephemeral region bounds
+
+PATCH_LABEL JIT_WriteBarrier_PostGrow32_PatchLabel_Lower
+ cmp rsi, -0F0F0F10h // 0F0F0F0F0h
+ jb Exit_PostGrow32
+
+ NOP_3_BYTE // padding for alignment of constant
+
+PATCH_LABEL JIT_WriteBarrier_PostGrow32_PatchLabel_Upper
+ cmp rsi, -0F0F0F10h // 0F0F0F0F0h
+ jae Exit_PostGrow32
+
+ // Touch the card table entry, if not already dirty.
+ shr rdi, 0Bh
+
+PATCH_LABEL JIT_WriteBarrier_PostGrow32_PatchLabel_CheckCardTable
+ cmp byte ptr [rdi + 0F0F0F0F0h], 0FFh
+ jne UpdateCardTable_PostGrow32
+ REPRET
+
+ nop // padding for alignment of constant
+
+PATCH_LABEL JIT_WriteBarrier_PostGrow32_PatchLabel_UpdateCardTable
+ UpdateCardTable_PostGrow32:
+ mov byte ptr [rdi + 0F0F0F0F0h], 0FFh
+ ret
+
+ .align 16
+ Exit_PostGrow32:
+ REPRET
+LEAF_END_MARKED JIT_WriteBarrier_PostGrow32, _TEXT
+
+
+ .align 4
+LEAF_ENTRY JIT_WriteBarrier_SVR32, _TEXT
+ //
+ // SVR GC has multiple heaps, so it cannot provide one single
+ // ephemeral region to bounds check against, so we just skip the
+ // bounds checking all together and do our card table update
+ // unconditionally.
+ //
+
+ // Do the move into the GC . It is correct to take an AV here, the EH code
+ // figures out that this came from a WriteBarrier and correctly maps it back
+ // to the managed method which called the WriteBarrier (see setup in
+ // InitializeExceptionHandling, vm\exceptionhandling.cpp).
+ mov [rdi], rsi
+
+ shr rdi, 0Bh
+
+ NOP_3_BYTE // padding for alignment of constant
+
+PATCH_LABEL JIT_WriteBarrier_SVR32_PatchLabel_CheckCardTable
+ cmp byte ptr [rdi + 0F0F0F0F0h], 0FFh
+ jne UpdateCardTable_SVR32
+ REPRET
+
+ nop // padding for alignment of constant
+
+PATCH_LABEL JIT_WriteBarrier_SVR32_PatchLabel_UpdateCardTable
+ UpdateCardTable_SVR32:
+ mov byte ptr [rdi + 0F0F0F0F0h], 0FFh
+ ret
+LEAF_END_MARKED JIT_WriteBarrier_SVR32, _TEXT
+
+ .align 8
+LEAF_ENTRY JIT_WriteBarrier_SVR64, _TEXT
+ //
+ // SVR GC has multiple heaps, so it cannot provide one single
+ // ephemeral region to bounds check against, so we just skip the
+ // bounds checking all together and do our card table update
+ // unconditionally.
+ //
+
+ // Do the move into the GC . It is correct to take an AV here, the EH code
+ // figures out that this came from a WriteBarrier and correctly maps it back
+ // to the managed method which called the WriteBarrier (see setup in
+ // InitializeExceptionHandling, vm\exceptionhandling.cpp).
+ mov [rdi], rsi
+
+ NOP_3_BYTE // padding for alignment of constant
+
+PATCH_LABEL JIT_WriteBarrier_SVR64_PatchLabel_CardTable
+ movabs rax, 0F0F0F0F0F0F0F0F0h
+
+ shr rdi, 0Bh
+
+ cmp byte ptr [rdi + rax], 0FFh
+ jne UpdateCardTable_SVR64
+ REPRET
+
+ UpdateCardTable_SVR64:
+ mov byte ptr [rdi + rax], 0FFh
+ ret
+LEAF_END_MARKED JIT_WriteBarrier_SVR64, _TEXT
diff --git a/src/vm/amd64/jithelpers_slow.S b/src/vm/amd64/jithelpers_slow.S
new file mode 100644
index 0000000000..1aed8b99e8
--- /dev/null
+++ b/src/vm/amd64/jithelpers_slow.S
@@ -0,0 +1,100 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+.intel_syntax noprefix
+#include "unixasmmacros.inc"
+
+#ifdef _DEBUG
+// Version for when we're sure to be in the GC, checks whether or not the card
+// needs to be updated
+//
+// void JIT_WriteBarrier_Debug(Object** dst, Object* src)
+LEAF_ENTRY JIT_WriteBarrier_Debug, _TEXT
+
+#ifdef WRITE_BARRIER_CHECK
+ // **ALSO update the shadow GC heap if that is enabled**
+ // Do not perform the work if g_GCShadow is 0
+ PREPARE_EXTERNAL_VAR g_GCShadow, rax
+ cmp [rax], 0
+ je NoShadow
+
+ // If we end up outside of the heap don't corrupt random memory
+ mov r10, rdi
+ PREPARE_EXTERNAL_VAR g_lowest_address, r11
+ sub r10, [r11]
+ jb NoShadow
+
+ // Check that our adjusted destination is somewhere in the shadow gc
+ add r10, [rax]
+ PREPARE_EXTERNAL_VAR g_GCShadowEnd, r11
+ cmp r10, [r11]
+ ja NoShadow
+
+ // Write ref into real GC// see comment below about possibility of AV
+ mov [rdi], rsi
+ // Write ref into shadow GC
+ mov [r10], rsi
+
+ // Ensure that the write to the shadow heap occurs before the read from
+ // the GC heap so that race conditions are caught by INVALIDGCVALUE
+ mfence
+
+ // Check that GC/ShadowGC values match
+ mov r11, [rdi]
+ mov rax, [r10]
+ cmp rax, r11
+ je DoneShadow
+ mov r11, INVALIDGCVALUE
+ mov [r10], r11
+
+ jmp DoneShadow
+
+ // If we don't have a shadow GC we won't have done the write yet
+ NoShadow:
+#endif
+
+ mov rax, rsi
+
+ // Do the move. It is correct to possibly take an AV here, the EH code
+ // figures out that this came from a WriteBarrier and correctly maps it back
+ // to the managed method which called the WriteBarrier (see setup in
+ // InitializeExceptionHandling, vm\exceptionhandling.cpp).
+ mov [rdi], rax
+
+#ifdef WRITE_BARRIER_CHECK
+ // If we had a shadow GC then we already wrote to the real GC at the same time
+ // as the shadow GC so we want to jump over the real write immediately above
+ DoneShadow:
+#endif
+
+ // See if we can just quick out
+ PREPARE_EXTERNAL_VAR g_ephemeral_low, r10
+ cmp rax, [r10]
+ jb Exit_Debug
+ PREPARE_EXTERNAL_VAR g_ephemeral_high, r10
+ cmp rax, [r10]
+ jnb Exit_Debug
+
+ // Check if we need to update the card table
+ // Calc pCardByte
+ shr rdi, 0Bh
+ PREPARE_EXTERNAL_VAR g_card_table, r10
+ add rdi, [r10]
+
+ // Check if this card is dirty
+ cmp byte ptr [rdi], 0FFh
+ jne UpdateCardTable_Debug
+ REPRET
+
+ UpdateCardTable_Debug:
+ mov byte ptr [rdi], 0FFh
+ ret
+
+ .align 16
+ Exit_Debug:
+ REPRET
+LEAF_END_MARKED JIT_WriteBarrier_Debug, _TEXT
+#endif
+
diff --git a/src/vm/amd64/jithelpersamd64.cpp b/src/vm/amd64/jithelpersamd64.cpp
new file mode 100644
index 0000000000..b4845b9e7a
--- /dev/null
+++ b/src/vm/amd64/jithelpersamd64.cpp
@@ -0,0 +1,52 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: JITHelpers.CPP
+//
+// Copyright (c) 1999 Microsoft Corporation. All Rights Reserved.
+// Microsoft Confidential.
+// ===========================================================================
+
+// This contains JITinterface routines that are specific to the
+// AMD64 platform. They are modeled after the X86 specific routines
+// found in JIThelp.asm
+
+
+#include "common.h"
+#include "jitinterface.h"
+#include "eeconfig.h"
+#include "excep.h"
+#include "ecall.h"
+#include "asmconstants.h"
+
+EXTERN_C void JIT_TailCallHelperStub_ReturnAddress();
+
+TailCallFrame * TailCallFrame::GetFrameFromContext(CONTEXT * pContext)
+{
+ _ASSERTE((void*)::GetIP(pContext) == JIT_TailCallHelperStub_ReturnAddress);
+ return (TailCallFrame*)(pContext->R13 + sizeof(GSCookie));
+}
+
+// Assuming pContext is a plain generic call-site, adjust it to look like
+// it called into TailCallHelperStub, and is at the point of the call.
+TailCallFrame * TailCallFrame::AdjustContextForTailCallHelperStub(CONTEXT * pContext, size_t cbNewArgArea, Thread * pThread)
+{
+ TailCallFrame * pNewFrame = (TailCallFrame *)(GetSP(pContext) - sizeof(TailCallFrame));
+
+ // R13 is the frame pointer (for popping the stack)
+ pContext->R13 = (size_t)pNewFrame - sizeof(GSCookie);
+ // R12 is the previous stack pointer, so we can determine if a return buffer from the
+ // immediate caller (and thus being discarded via the tail call), or someplace else
+ pContext->R12 = GetSP(pContext);
+ // for the args and pushed return address of the 'call'
+ SetSP(pContext, (size_t)pNewFrame - (cbNewArgArea + sizeof(void*) + sizeof(GSCookie)));
+
+ // For popping the Frame, store the Thread
+ pContext->R14 = (DWORD_PTR)pThread;
+ // And the current head/top
+ pContext->R15 = (DWORD_PTR)pThread->GetFrame(); // m_Next
+
+ return (TailCallFrame *) pNewFrame;
+}
diff --git a/src/vm/amd64/jitinterfaceamd64.cpp b/src/vm/amd64/jitinterfaceamd64.cpp
new file mode 100644
index 0000000000..de92ad467b
--- /dev/null
+++ b/src/vm/amd64/jitinterfaceamd64.cpp
@@ -0,0 +1,574 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+// ===========================================================================
+// File: JITinterfaceCpu.CPP
+//
+// Copyright (c) 1999 Microsoft Corporation. All Rights Reserved.
+// Microsoft Confidential.
+// ===========================================================================
+
+// This contains JITinterface routines that are specific to the
+// AMD64 platform. They are modeled after the X86 specific routines
+// found in JITinterfaceX86.cpp or JIThelp.asm
+
+
+#include "common.h"
+#include "jitinterface.h"
+#include "eeconfig.h"
+#include "excep.h"
+#include "threadsuspend.h"
+
+extern BYTE* g_ephemeral_low;
+extern BYTE* g_ephemeral_high;
+extern DWORD* g_card_table;
+
+// Patch Labels for the various write barriers
+EXTERN_C void JIT_WriteBarrier_End();
+
+EXTERN_C void JIT_WriteBarrier_PreGrow32(Object **dst, Object *ref);
+EXTERN_C void JIT_WriteBarrier_PreGrow32_PatchLabel_Lower();
+EXTERN_C void JIT_WriteBarrier_PreGrow32_PatchLabel_CardTable_Check();
+EXTERN_C void JIT_WriteBarrier_PreGrow32_PatchLabel_CardTable_Update();
+EXTERN_C void JIT_WriteBarrier_PreGrow32_End();
+
+EXTERN_C void JIT_WriteBarrier_PreGrow64(Object **dst, Object *ref);
+EXTERN_C void JIT_WriteBarrier_PreGrow64_Patch_Label_Lower();
+EXTERN_C void JIT_WriteBarrier_PreGrow64_Patch_Label_CardTable();
+EXTERN_C void JIT_WriteBarrier_PreGrow64_End();
+
+EXTERN_C void JIT_WriteBarrier_PostGrow32(Object **dst, Object *ref);
+EXTERN_C void JIT_WriteBarrier_PostGrow32_PatchLabel_Lower();
+EXTERN_C void JIT_WriteBarrier_PostGrow32_PatchLabel_Upper();
+EXTERN_C void JIT_WriteBarrier_PostGrow32_PatchLabel_CheckCardTable();
+EXTERN_C void JIT_WriteBarrier_PostGrow32_PatchLabel_UpdateCardTable();
+EXTERN_C void JIT_WriteBarrier_PostGrow32_End();
+
+EXTERN_C void JIT_WriteBarrier_PostGrow64(Object **dst, Object *ref);
+EXTERN_C void JIT_WriteBarrier_PostGrow64_Patch_Label_Lower();
+EXTERN_C void JIT_WriteBarrier_PostGrow64_Patch_Label_Upper();
+EXTERN_C void JIT_WriteBarrier_PostGrow64_Patch_Label_CardTable();
+EXTERN_C void JIT_WriteBarrier_PostGrow64_End();
+
+#ifdef FEATURE_SVR_GC
+EXTERN_C void JIT_WriteBarrier_SVR32(Object **dst, Object *ref);
+EXTERN_C void JIT_WriteBarrier_SVR32_PatchLabel_CheckCardTable();
+EXTERN_C void JIT_WriteBarrier_SVR32_PatchLabel_UpdateCardTable();
+EXTERN_C void JIT_WriteBarrier_SVR32_End();
+
+EXTERN_C void JIT_WriteBarrier_SVR64(Object **dst, Object *ref);
+EXTERN_C void JIT_WriteBarrier_SVR64_PatchLabel_CardTable();
+EXTERN_C void JIT_WriteBarrier_SVR64_End();
+#endif
+
+WriteBarrierManager g_WriteBarrierManager;
+
+// Use this somewhat hokey macro to concantonate the function start with the patch
+// label, this allows the code below to look relatively nice, but relies on the
+// naming convention which we have established for these helpers.
+#define CALC_PATCH_LOCATION(func,label,offset) CalculatePatchLocation((PVOID)func, (PVOID)func##_##label, offset)
+
+WriteBarrierManager::WriteBarrierManager() :
+ m_currentWriteBarrier(WRITE_BARRIER_UNINITIALIZED)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+#ifndef CODECOVERAGE // Deactivate alignment validation for code coverage builds
+ // because the instrumentation tool will not preserve alignmant constraits and we will fail.
+
+void WriteBarrierManager::Validate()
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ // we have an invariant that the addresses of all the values that we update in our write barrier
+ // helpers must be naturally aligned, this is so that the update can happen atomically since there
+ // are places where these values are updated while the EE is running
+ // NOTE: we can't call this from the ctor since our infrastructure isn't ready for assert dialogs
+
+ PBYTE pLowerBoundImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_PreGrow32, PatchLabel_Lower, 3);
+ PBYTE pCardTableImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_PreGrow32, PatchLabel_CardTable_Check, 2);
+ PBYTE pCardTableImmediate2 = CALC_PATCH_LOCATION(JIT_WriteBarrier_PreGrow32, PatchLabel_CardTable_Update, 2);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", (reinterpret_cast<UINT64>(pLowerBoundImmediate) & 0x3) == 0);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", (reinterpret_cast<UINT64>(pCardTableImmediate) & 0x3) == 0);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", (reinterpret_cast<UINT64>(pCardTableImmediate2) & 0x3) == 0);
+
+ pLowerBoundImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_PreGrow64, Patch_Label_Lower, 2);
+ pCardTableImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_PreGrow64, Patch_Label_CardTable, 2);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", (reinterpret_cast<UINT64>(pLowerBoundImmediate) & 0x7) == 0);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", (reinterpret_cast<UINT64>(pCardTableImmediate) & 0x7) == 0);
+
+ PBYTE pUpperBoundImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_PostGrow32, PatchLabel_Upper, 3);
+ pLowerBoundImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_PostGrow32, PatchLabel_Lower, 3);
+ pCardTableImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_PostGrow32, PatchLabel_CheckCardTable, 2);
+ pCardTableImmediate2 = CALC_PATCH_LOCATION(JIT_WriteBarrier_PostGrow32, PatchLabel_UpdateCardTable, 2);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", (reinterpret_cast<UINT64>(pUpperBoundImmediate) & 0x3) == 0);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", (reinterpret_cast<UINT64>(pLowerBoundImmediate) & 0x3) == 0);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", (reinterpret_cast<UINT64>(pCardTableImmediate) & 0x3) == 0);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", (reinterpret_cast<UINT64>(pCardTableImmediate2) & 0x3) == 0);
+
+
+ pLowerBoundImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_PostGrow64, Patch_Label_Lower, 2);
+ pUpperBoundImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_PostGrow64, Patch_Label_Upper, 2);
+ pCardTableImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_PostGrow64, Patch_Label_CardTable, 2);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", (reinterpret_cast<UINT64>(pLowerBoundImmediate) & 0x7) == 0);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", (reinterpret_cast<UINT64>(pUpperBoundImmediate) & 0x7) == 0);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", (reinterpret_cast<UINT64>(pCardTableImmediate) & 0x7) == 0);
+
+#ifdef FEATURE_SVR_GC
+ pCardTableImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_SVR32, PatchLabel_CheckCardTable, 2);
+ pCardTableImmediate2 = CALC_PATCH_LOCATION(JIT_WriteBarrier_SVR32, PatchLabel_UpdateCardTable, 2);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", (reinterpret_cast<UINT64>(pCardTableImmediate) & 0x3) == 0);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", (reinterpret_cast<UINT64>(pCardTableImmediate2) & 0x3) == 0);
+
+ pCardTableImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_SVR64, PatchLabel_CardTable, 2);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", (reinterpret_cast<UINT64>(pCardTableImmediate) & 0x7) == 0);
+#endif
+}
+
+#endif // CODECOVERAGE
+
+
+PCODE WriteBarrierManager::GetCurrentWriteBarrierCode()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ switch (m_currentWriteBarrier)
+ {
+ case WRITE_BARRIER_PREGROW32:
+ return GetEEFuncEntryPoint(JIT_WriteBarrier_PreGrow32);
+ case WRITE_BARRIER_PREGROW64:
+ return GetEEFuncEntryPoint(JIT_WriteBarrier_PreGrow64);
+ case WRITE_BARRIER_POSTGROW32:
+ return GetEEFuncEntryPoint(JIT_WriteBarrier_PostGrow32);
+ case WRITE_BARRIER_POSTGROW64:
+ return GetEEFuncEntryPoint(JIT_WriteBarrier_PostGrow64);
+#ifdef FEATURE_SVR_GC
+ case WRITE_BARRIER_SVR32:
+ return GetEEFuncEntryPoint(JIT_WriteBarrier_SVR32);
+ case WRITE_BARRIER_SVR64:
+ return GetEEFuncEntryPoint(JIT_WriteBarrier_SVR64);
+#endif
+ default:
+ UNREACHABLE_MSG("unexpected m_currentWriteBarrier!");
+ };
+}
+
+size_t WriteBarrierManager::GetSpecificWriteBarrierSize(WriteBarrierType writeBarrier)
+{
+// marked asm functions are those which use the LEAF_END_MARKED macro to end them which
+// creates a public Name_End label which can be used to figure out their size without
+// having to create unwind info.
+#define MARKED_FUNCTION_SIZE(pfn) (size_t)((LPBYTE)GetEEFuncEntryPoint(pfn##_End) - (LPBYTE)GetEEFuncEntryPoint(pfn))
+
+ switch (writeBarrier)
+ {
+ case WRITE_BARRIER_PREGROW32:
+ return MARKED_FUNCTION_SIZE(JIT_WriteBarrier_PreGrow32);
+ case WRITE_BARRIER_PREGROW64:
+ return MARKED_FUNCTION_SIZE(JIT_WriteBarrier_PreGrow64);
+ case WRITE_BARRIER_POSTGROW32:
+ return MARKED_FUNCTION_SIZE(JIT_WriteBarrier_PostGrow32);
+ case WRITE_BARRIER_POSTGROW64:
+ return MARKED_FUNCTION_SIZE(JIT_WriteBarrier_PostGrow64);
+#ifdef FEATURE_SVR_GC
+ case WRITE_BARRIER_SVR32:
+ return MARKED_FUNCTION_SIZE(JIT_WriteBarrier_SVR32);
+ case WRITE_BARRIER_SVR64:
+ return MARKED_FUNCTION_SIZE(JIT_WriteBarrier_SVR64);
+#endif
+ case WRITE_BARRIER_BUFFER:
+ return MARKED_FUNCTION_SIZE(JIT_WriteBarrier);
+ default:
+ UNREACHABLE_MSG("unexpected m_currentWriteBarrier!");
+ };
+#undef MARKED_FUNCTION_SIZE
+}
+
+size_t WriteBarrierManager::GetCurrentWriteBarrierSize()
+{
+ return GetSpecificWriteBarrierSize(m_currentWriteBarrier);
+}
+
+PBYTE WriteBarrierManager::CalculatePatchLocation(LPVOID base, LPVOID label, int offset)
+{
+ // the label should always come after the entrypoint for this funtion
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", (LPBYTE)label > (LPBYTE)base);
+
+ return ((LPBYTE)GetEEFuncEntryPoint(JIT_WriteBarrier) + ((LPBYTE)GetEEFuncEntryPoint(label) - (LPBYTE)GetEEFuncEntryPoint(base) + offset));
+}
+
+void WriteBarrierManager::ChangeWriteBarrierTo(WriteBarrierType newWriteBarrier)
+{
+ GCX_MAYBE_COOP_NO_THREAD_BROKEN((GetThread() != NULL));
+ BOOL bEESuspended = FALSE;
+ if(m_currentWriteBarrier != WRITE_BARRIER_UNINITIALIZED && !IsGCThread())
+ {
+ ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_FOR_GC_PREP);
+ bEESuspended = TRUE;
+ }
+
+ _ASSERTE(m_currentWriteBarrier != newWriteBarrier);
+ m_currentWriteBarrier = newWriteBarrier;
+
+ // the memcpy must come before the switch statment because the asserts inside the switch
+ // are actually looking into the JIT_WriteBarrier buffer
+ memcpy((PVOID)JIT_WriteBarrier, (LPVOID)GetCurrentWriteBarrierCode(), GetCurrentWriteBarrierSize());
+
+ switch (newWriteBarrier)
+ {
+ case WRITE_BARRIER_PREGROW32:
+ {
+ m_pLowerBoundImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_PreGrow32, PatchLabel_Lower, 3);
+ m_pCardTableImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_PreGrow32, PatchLabel_CardTable_Check, 2);
+ m_pCardTableImmediate2 = CALC_PATCH_LOCATION(JIT_WriteBarrier_PreGrow32, PatchLabel_CardTable_Update, 2);
+
+ // Make sure that we will be bashing the right places (immediates should be hardcoded to 0x0f0f0f0f0f0f0f0f0).
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", 0xf0f0f0f0 == *(DWORD*)m_pLowerBoundImmediate);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", 0xf0f0f0f0 == *(DWORD*)m_pCardTableImmediate);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", 0xf0f0f0f0 == *(DWORD*)m_pCardTableImmediate2);
+ break;
+ }
+
+ case WRITE_BARRIER_PREGROW64:
+ {
+ m_pLowerBoundImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_PreGrow64, Patch_Label_Lower, 2);
+ m_pCardTableImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_PreGrow64, Patch_Label_CardTable, 2);
+
+ // Make sure that we will be bashing the right places (immediates should be hardcoded to 0x0f0f0f0f0f0f0f0f0).
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", 0xf0f0f0f0f0f0f0f0 == *(UINT64*)m_pLowerBoundImmediate);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", 0xf0f0f0f0f0f0f0f0 == *(UINT64*)m_pCardTableImmediate);
+ break;
+ }
+
+ case WRITE_BARRIER_POSTGROW32:
+ {
+ m_pUpperBoundImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_PostGrow32, PatchLabel_Upper, 3);
+ m_pLowerBoundImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_PostGrow32, PatchLabel_Lower, 3);
+ m_pCardTableImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_PostGrow32, PatchLabel_CheckCardTable, 2);
+ m_pCardTableImmediate2 = CALC_PATCH_LOCATION(JIT_WriteBarrier_PostGrow32, PatchLabel_UpdateCardTable, 2);
+
+ // Make sure that we will be bashing the right places (immediates should be hardcoded to 0x0f0f0f0f0f0f0f0f0).
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", 0xf0f0f0f0 == *(DWORD*)m_pUpperBoundImmediate);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", 0xf0f0f0f0 == *(DWORD*)m_pLowerBoundImmediate);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", 0xf0f0f0f0 == *(DWORD*)m_pCardTableImmediate);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", 0xf0f0f0f0 == *(DWORD*)m_pCardTableImmediate2);
+ break;
+ }
+
+ case WRITE_BARRIER_POSTGROW64:
+ {
+ m_pLowerBoundImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_PostGrow64, Patch_Label_Lower, 2);
+ m_pUpperBoundImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_PostGrow64, Patch_Label_Upper, 2);
+ m_pCardTableImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_PostGrow64, Patch_Label_CardTable, 2);
+
+ // Make sure that we will be bashing the right places (immediates should be hardcoded to 0x0f0f0f0f0f0f0f0f0).
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", 0xf0f0f0f0f0f0f0f0 == *(UINT64*)m_pLowerBoundImmediate);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", 0xf0f0f0f0f0f0f0f0 == *(UINT64*)m_pCardTableImmediate);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", 0xf0f0f0f0f0f0f0f0 == *(UINT64*)m_pUpperBoundImmediate);
+ break;
+ }
+
+#ifdef FEATURE_SVR_GC
+ case WRITE_BARRIER_SVR32:
+ {
+ m_pCardTableImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_SVR32, PatchLabel_CheckCardTable, 2);
+ m_pCardTableImmediate2 = CALC_PATCH_LOCATION(JIT_WriteBarrier_SVR32, PatchLabel_UpdateCardTable, 2);
+
+ // Make sure that we will be bashing the right places (immediates should be hardcoded to 0x0f0f0f0f0f0f0f0f0).
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", 0xf0f0f0f0 == *(DWORD*)m_pCardTableImmediate);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", 0xf0f0f0f0 == *(DWORD*)m_pCardTableImmediate2);
+ break;
+ }
+
+ case WRITE_BARRIER_SVR64:
+ {
+ m_pCardTableImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_SVR64, PatchLabel_CardTable, 2);
+
+ // Make sure that we will be bashing the right places (immediates should be hardcoded to 0x0f0f0f0f0f0f0f0f0).
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", 0xf0f0f0f0f0f0f0f0 == *(UINT64*)m_pCardTableImmediate);
+ break;
+ }
+#endif
+
+ default:
+ UNREACHABLE_MSG("unexpected write barrier type!");
+ }
+
+ UpdateEphemeralBounds();
+ UpdateCardTableLocation(FALSE);
+
+ if(bEESuspended)
+ {
+ ThreadSuspend::RestartEE(FALSE, TRUE);
+ }
+}
+
+#undef CALC_PATCH_LOCATION
+
+void WriteBarrierManager::Initialize()
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+
+ // Ensure that the generic JIT_WriteBarrier function buffer is large enough to hold any of the more specific
+ // write barrier implementations.
+ size_t cbWriteBarrierBuffer = GetSpecificWriteBarrierSize(WRITE_BARRIER_BUFFER);
+
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", cbWriteBarrierBuffer >= GetSpecificWriteBarrierSize(WRITE_BARRIER_PREGROW32));
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", cbWriteBarrierBuffer >= GetSpecificWriteBarrierSize(WRITE_BARRIER_PREGROW64));
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", cbWriteBarrierBuffer >= GetSpecificWriteBarrierSize(WRITE_BARRIER_POSTGROW32));
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", cbWriteBarrierBuffer >= GetSpecificWriteBarrierSize(WRITE_BARRIER_POSTGROW64));
+#ifdef FEATURE_SVR_GC
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", cbWriteBarrierBuffer >= GetSpecificWriteBarrierSize(WRITE_BARRIER_SVR32));
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", cbWriteBarrierBuffer >= GetSpecificWriteBarrierSize(WRITE_BARRIER_SVR64));
+#endif
+
+#if !defined(CODECOVERAGE)
+ Validate();
+#endif
+}
+
+bool WriteBarrierManager::NeedDifferentWriteBarrier(BOOL bReqUpperBoundsCheck, WriteBarrierType* pNewWriteBarrierType)
+{
+ // Init code for the JIT_WriteBarrier assembly routine. Since it will be bashed everytime the GC Heap
+ // changes size, we want to do most of the work just once.
+ //
+ // The actual JIT_WriteBarrier routine will only be called in free builds, but we keep this code (that
+ // modifies it) around in debug builds to check that it works (with assertions).
+
+
+ WriteBarrierType writeBarrierType = m_currentWriteBarrier;
+
+ for(;;)
+ {
+ switch (writeBarrierType)
+ {
+ case WRITE_BARRIER_UNINITIALIZED:
+#ifdef _DEBUG
+ // Use the default slow write barrier some of the time in debug builds because of of contains some good asserts
+ if ((g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_BARRIERCHECK) || DbgRandomOnExe(0.5)) {
+ break;
+ }
+#endif
+
+ writeBarrierType = GCHeap::IsServerHeap() ? WRITE_BARRIER_SVR32 : WRITE_BARRIER_PREGROW32;
+ continue;
+
+ case WRITE_BARRIER_PREGROW32:
+ if (bReqUpperBoundsCheck)
+ {
+ writeBarrierType = WRITE_BARRIER_POSTGROW32;
+ continue;
+ }
+
+ if (!FitsInI4((size_t)g_card_table) || !FitsInI4((size_t)g_ephemeral_low))
+ {
+ writeBarrierType = WRITE_BARRIER_PREGROW64;
+ }
+ break;
+
+ case WRITE_BARRIER_PREGROW64:
+ if (bReqUpperBoundsCheck)
+ {
+ writeBarrierType = WRITE_BARRIER_POSTGROW64;
+ }
+ break;
+
+ case WRITE_BARRIER_POSTGROW32:
+ if (!FitsInI4((size_t)g_card_table) || !FitsInI4((size_t)g_ephemeral_low) || !FitsInI4((size_t)g_ephemeral_high))
+ {
+ writeBarrierType = WRITE_BARRIER_POSTGROW64;
+ }
+ break;
+
+ case WRITE_BARRIER_POSTGROW64:
+ break;
+
+#ifdef FEATURE_SVR_GC
+ case WRITE_BARRIER_SVR32:
+ if (!FitsInI4((size_t)g_card_table))
+ {
+ writeBarrierType = WRITE_BARRIER_SVR64;
+ }
+ break;
+
+ case WRITE_BARRIER_SVR64:
+ break;
+#endif
+
+ default:
+ UNREACHABLE_MSG("unexpected write barrier type!");
+ }
+ break;
+ }
+
+ *pNewWriteBarrierType = writeBarrierType;
+ return m_currentWriteBarrier != writeBarrierType;
+}
+
+void WriteBarrierManager::UpdateEphemeralBounds()
+{
+ bool needToFlushCache = false;
+
+ WriteBarrierType newType;
+ if (NeedDifferentWriteBarrier(FALSE, &newType))
+ {
+ ChangeWriteBarrierTo(newType);
+ return;
+ }
+
+#ifdef _DEBUG
+ // Using debug-only write barrier?
+ if (m_currentWriteBarrier == WRITE_BARRIER_UNINITIALIZED)
+ return;
+#endif
+
+ switch (m_currentWriteBarrier)
+ {
+
+ case WRITE_BARRIER_POSTGROW32:
+ {
+ // Change immediate if different from new g_ephermeral_high.
+ if (*(INT32*)m_pUpperBoundImmediate != (INT32)(size_t)g_ephemeral_high)
+ {
+ *(INT32*)m_pUpperBoundImmediate = (INT32)(size_t)g_ephemeral_high;
+ needToFlushCache = true;
+ }
+ }
+ //
+ // INTENTIONAL FALL-THROUGH!
+ //
+ case WRITE_BARRIER_PREGROW32:
+ {
+ // Change immediate if different from new g_ephermeral_low.
+ if (*(INT32*)m_pLowerBoundImmediate != (INT32)(size_t)g_ephemeral_low)
+ {
+ *(INT32*)m_pLowerBoundImmediate = (INT32)(size_t)g_ephemeral_low;
+ needToFlushCache = true;
+ }
+ break;
+ }
+
+ case WRITE_BARRIER_POSTGROW64:
+ {
+ // Change immediate if different from new g_ephermeral_high.
+ if (*(UINT64*)m_pUpperBoundImmediate != (size_t)g_ephemeral_high)
+ {
+ *(UINT64*)m_pUpperBoundImmediate = (size_t)g_ephemeral_high;
+ needToFlushCache = true;
+ }
+ }
+ //
+ // INTENTIONAL FALL-THROUGH!
+ //
+ case WRITE_BARRIER_PREGROW64:
+ {
+ // Change immediate if different from new g_ephermeral_low.
+ if (*(UINT64*)m_pLowerBoundImmediate != (size_t)g_ephemeral_low)
+ {
+ *(UINT64*)m_pLowerBoundImmediate = (size_t)g_ephemeral_low;
+ needToFlushCache = true;
+ }
+ break;
+ }
+
+#ifdef FEATURE_SVR_GC
+ case WRITE_BARRIER_SVR32:
+ case WRITE_BARRIER_SVR64:
+ {
+ break;
+ }
+#endif
+
+ default:
+ UNREACHABLE_MSG("unexpected m_currentWriteBarrier in UpdateEphemeralBounds");
+ }
+
+ if (needToFlushCache)
+ {
+ FlushInstructionCache(GetCurrentProcess(), (PVOID)JIT_WriteBarrier, GetCurrentWriteBarrierSize());
+ }
+}
+
+void WriteBarrierManager::UpdateCardTableLocation(BOOL bReqUpperBoundsCheck)
+{
+ // If we are told that we require an upper bounds check (GC did some heap
+ // reshuffling), we need to switch to the WriteBarrier_PostGrow function for
+ // good.
+
+ WriteBarrierType newType;
+ if (NeedDifferentWriteBarrier(bReqUpperBoundsCheck, &newType))
+ {
+ ChangeWriteBarrierTo(newType);
+ return;
+ }
+
+#ifdef _DEBUG
+ // Using debug-only write barrier?
+ if (m_currentWriteBarrier == WRITE_BARRIER_UNINITIALIZED)
+ return;
+#endif
+
+ bool fFlushCache = false;
+
+ if (m_currentWriteBarrier == WRITE_BARRIER_PREGROW32 ||
+ m_currentWriteBarrier == WRITE_BARRIER_POSTGROW32 ||
+ m_currentWriteBarrier == WRITE_BARRIER_SVR32)
+ {
+ if (*(INT32*)m_pCardTableImmediate != (INT32)(size_t)g_card_table)
+ {
+ *(INT32*)m_pCardTableImmediate = (INT32)(size_t)g_card_table;
+ *(INT32*)m_pCardTableImmediate2 = (INT32)(size_t)g_card_table;
+ fFlushCache = true;
+ }
+ }
+ else
+ {
+ if (*(UINT64*)m_pCardTableImmediate != (size_t)g_card_table)
+ {
+ *(UINT64*)m_pCardTableImmediate = (size_t)g_card_table;
+ fFlushCache = true;
+ }
+ }
+
+ if (fFlushCache)
+ {
+ FlushInstructionCache(GetCurrentProcess(), (LPVOID)JIT_WriteBarrier, GetCurrentWriteBarrierSize());
+ }
+}
+
+
+// This function bashes the super fast amd64 version of the JIT_WriteBarrier
+// helper. It should be called by the GC whenever the ephermeral region
+// bounds get changed, but still remain on the top of the GC Heap.
+void StompWriteBarrierEphemeral()
+{
+ WRAPPER_NO_CONTRACT;
+
+ g_WriteBarrierManager.UpdateEphemeralBounds();
+}
+
+// This function bashes the super fast amd64 versions of the JIT_WriteBarrier
+// helpers. It should be called by the GC whenever the ephermeral region gets moved
+// from being at the top of the GC Heap, and/or when the cards table gets moved.
+void StompWriteBarrierResize(BOOL bReqUpperBoundsCheck)
+{
+ WRAPPER_NO_CONTRACT;
+
+ g_WriteBarrierManager.UpdateCardTableLocation(bReqUpperBoundsCheck);
+}
diff --git a/src/vm/amd64/profiler.cpp b/src/vm/amd64/profiler.cpp
new file mode 100644
index 0000000000..605dda8dc9
--- /dev/null
+++ b/src/vm/amd64/profiler.cpp
@@ -0,0 +1,368 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// FILE: profiler.cpp
+//
+
+//
+
+//
+// ======================================================================================
+
+#include "common.h"
+
+#ifdef PROFILING_SUPPORTED
+#include "proftoeeinterfaceimpl.h"
+
+MethodDesc *FunctionIdToMethodDesc(FunctionID functionID);
+
+// TODO: move these to some common.h file
+// FLAGS
+#define PROFILE_ENTER 0x1
+#define PROFILE_LEAVE 0x2
+#define PROFILE_TAILCALL 0x4
+
+typedef struct _PROFILE_PLATFORM_SPECIFIC_DATA
+{
+ FunctionID functionId;
+ void *rbp;
+ void *probeRsp;
+ void *ip;
+ void *profiledRsp;
+ UINT64 rax;
+ LPVOID hiddenArg;
+ UINT64 flt0; // floats stored as doubles
+ UINT64 flt1;
+ UINT64 flt2;
+ UINT64 flt3;
+ UINT32 flags;
+} PROFILE_PLATFORM_SPECIFIC_DATA, *PPROFILE_PLATFORM_SPECIFIC_DATA;
+
+
+/*
+ * ProfileGetIPFromPlatformSpecificHandle
+ *
+ * This routine takes the platformSpecificHandle and retrieves from it the
+ * IP value.
+ *
+ * Parameters:
+ * handle - the platformSpecificHandle passed to ProfileEnter/Leave/Tailcall
+ *
+ * Returns:
+ * The IP value stored in the handle.
+ */
+UINT_PTR ProfileGetIPFromPlatformSpecificHandle(void *handle)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ PROFILE_PLATFORM_SPECIFIC_DATA* pData = (PROFILE_PLATFORM_SPECIFIC_DATA*)handle;
+ return (UINT_PTR)pData->ip;
+}
+
+
+/*
+ * ProfileSetFunctionIDInPlatformSpecificHandle
+ *
+ * This routine takes the platformSpecificHandle and functionID, and assign
+ * functionID to functionID field of platformSpecificHandle.
+ *
+ * Parameters:
+ * pPlatformSpecificHandle - the platformSpecificHandle passed to ProfileEnter/Leave/Tailcall
+ * functionID - the FunctionID to be assigned
+ *
+ * Returns:
+ * None
+ */
+void ProfileSetFunctionIDInPlatformSpecificHandle(void * pPlatformSpecificHandle, FunctionID functionID)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(pPlatformSpecificHandle != NULL);
+ _ASSERTE(functionID != NULL);
+
+ PROFILE_PLATFORM_SPECIFIC_DATA * pData = reinterpret_cast<PROFILE_PLATFORM_SPECIFIC_DATA *>(pPlatformSpecificHandle);
+ pData->functionId = functionID;
+}
+
+/*
+ * ProfileArgIterator::ProfileArgIterator
+ *
+ * Constructor. Initializes for arg iteration.
+ *
+ * Parameters:
+ * pMetaSig - The signature of the method we are going iterate over
+ * platformSpecificHandle - the value passed to ProfileEnter/Leave/Tailcall
+ *
+ * Returns:
+ * None.
+ */
+ProfileArgIterator::ProfileArgIterator(MetaSig * pSig, void * platformSpecificHandle) :
+ m_argIterator(pSig)
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(pSig != NULL);
+ _ASSERTE(platformSpecificHandle != NULL);
+
+ m_handle = platformSpecificHandle;
+ PROFILE_PLATFORM_SPECIFIC_DATA* pData = (PROFILE_PLATFORM_SPECIFIC_DATA*)m_handle;
+
+ // unwind a frame and get the Rsp for the profiled method to make sure it matches
+ // what the JIT gave us
+#ifdef _DEBUG
+ {
+ // setup the context to represent the frame that called ProfileEnterNaked
+ CONTEXT ctx;
+ memset(&ctx, 0, sizeof(CONTEXT));
+ ctx.Rsp = (UINT64)pData->probeRsp;
+ ctx.Rbp = (UINT64)pData->rbp;
+ ctx.Rip = (UINT64)pData->ip;
+
+ // walk up a frame to the caller frame (called the managed method which
+ // called ProfileEnterNaked)
+ Thread::VirtualUnwindCallFrame(&ctx);
+
+ _ASSERTE(pData->profiledRsp == (void*)ctx.Rsp);
+ }
+#endif // _DEBUG
+
+ // Get the hidden arg if there is one
+ MethodDesc * pMD = FunctionIdToMethodDesc(pData->functionId);
+
+ if ( (pData->hiddenArg == NULL) &&
+ (pMD->RequiresInstArg() || pMD->AcquiresInstMethodTableFromThis()) )
+ {
+ // In the enter probe, the JIT may not have pushed the generics token onto the stack yet.
+ // Luckily, we can inspect the registers reliably at this point.
+ if (pData->flags & PROFILE_ENTER)
+ {
+ _ASSERTE(!((pData->flags & PROFILE_LEAVE) || (pData->flags & PROFILE_TAILCALL)));
+
+ if (pMD->AcquiresInstMethodTableFromThis())
+ {
+ pData->hiddenArg = GetThis();
+ }
+ else
+ {
+ // The param type arg comes after the return buffer argument and the "this" pointer.
+ int index = 0;
+
+ if (m_argIterator.HasThis())
+ {
+ index++;
+ }
+
+ if (m_argIterator.HasRetBuffArg())
+ {
+ index++;
+ }
+
+ pData->hiddenArg = *(LPVOID*)((LPBYTE)pData->profiledRsp + (index * sizeof(SIZE_T)));
+ }
+ }
+ else
+ {
+ EECodeInfo codeInfo((PCODE)pData->ip);
+
+ // We want to pass the caller SP here.
+ pData->hiddenArg = EECodeManager::GetExactGenericsToken((SIZE_T)(pData->profiledRsp), &codeInfo);
+ }
+ }
+}
+
+/*
+ * ProfileArgIterator::~ProfileArgIterator
+ *
+ * Destructor, releases all resources.
+ *
+ */
+ProfileArgIterator::~ProfileArgIterator()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_handle = NULL;
+}
+
+/*
+ * ProfileArgIterator::GetNextArgAddr
+ *
+ * After initialization, this method is called repeatedly until it
+ * returns NULL to get the address of each arg. Note: this address
+ * could be anywhere on the stack.
+ *
+ * Returns:
+ * Address of the argument, or NULL if iteration is complete.
+ */
+LPVOID ProfileArgIterator::GetNextArgAddr()
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(m_handle != NULL);
+
+ PROFILE_PLATFORM_SPECIFIC_DATA* pData = (PROFILE_PLATFORM_SPECIFIC_DATA*)m_handle;
+
+ if ((pData->flags & PROFILE_LEAVE) || (pData->flags & PROFILE_TAILCALL))
+ {
+ _ASSERTE(!"GetNextArgAddr() - arguments are not available in leave and tailcall probes");
+ return NULL;
+ }
+
+ int argOffset = m_argIterator.GetNextOffset();
+
+ // argOffset of TransitionBlock::InvalidOffset indicates that we're done
+ if (argOffset == TransitionBlock::InvalidOffset)
+ {
+ return NULL;
+ }
+
+ // stack args are offset against the profiledRsp
+ if (TransitionBlock::IsStackArgumentOffset(argOffset))
+ {
+ LPVOID pArg = ((LPBYTE)pData->profiledRsp) + (argOffset - TransitionBlock::GetOffsetOfArgs());
+
+ if (m_argIterator.IsArgPassedByRef())
+ pArg = *(LPVOID *)pArg;
+
+ return pArg;
+ }
+
+ // if we're here we have an enregistered argument
+ int regStructOfs = (argOffset - TransitionBlock::GetOffsetOfArgumentRegisters());
+ _ASSERTE(regStructOfs < ARGUMENTREGISTERS_SIZE);
+
+ CorElementType t = m_argIterator.GetArgType();
+ _ASSERTE(IS_ALIGNED(regStructOfs, sizeof(SLOT)));
+ if (t == ELEMENT_TYPE_R4 || t == ELEMENT_TYPE_R8)
+ {
+ return (LPBYTE)&pData->flt0 + regStructOfs;
+ }
+ else
+ {
+ // enregistered args (which are really stack homed) are offset against profiledRsp
+ LPVOID pArg = ((LPBYTE)pData->profiledRsp + regStructOfs);
+
+ if (m_argIterator.IsArgPassedByRef())
+ pArg = *(LPVOID *)pArg;
+
+ return pArg;
+ }
+
+ return NULL;
+}
+
+/*
+ * ProfileArgIterator::GetHiddenArgValue
+ *
+ * Called after initialization, any number of times, to retrieve any
+ * hidden argument, so that resolution for Generics can be done.
+ *
+ * Parameters:
+ * None.
+ *
+ * Returns:
+ * Value of the hidden parameter, or NULL if none exists.
+ */
+LPVOID ProfileArgIterator::GetHiddenArgValue(void)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ PROFILE_PLATFORM_SPECIFIC_DATA* pData = (PROFILE_PLATFORM_SPECIFIC_DATA*)m_handle;
+
+ return pData->hiddenArg;
+}
+
+/*
+ * ProfileArgIterator::GetThis
+ *
+ * Called after initialization, any number of times, to retrieve any
+ * 'this' pointer.
+ *
+ * Parameters:
+ * None.
+ *
+ * Returns:
+ * Address of the 'this', or NULL if none exists.
+ */
+LPVOID ProfileArgIterator::GetThis(void)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ PROFILE_PLATFORM_SPECIFIC_DATA* pData = (PROFILE_PLATFORM_SPECIFIC_DATA*)m_handle;
+ MethodDesc * pMD = FunctionIdToMethodDesc(pData->functionId);
+
+ // We guarantee to return the correct "this" pointer in the enter probe.
+ // For the leave and tailcall probes, we only return a valid "this" pointer if it is the generics token.
+ if (pData->hiddenArg != NULL)
+ {
+ if (pMD->AcquiresInstMethodTableFromThis())
+ {
+ return pData->hiddenArg;
+ }
+ }
+
+ if (pData->flags & PROFILE_ENTER)
+ {
+ if (m_argIterator.HasThis())
+ {
+ return *(LPVOID*)((LPBYTE)pData->profiledRsp);
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * ProfileArgIterator::GetReturnBufferAddr
+ *
+ * Called after initialization, any number of times, to retrieve the
+ * address of the return buffer. NULL indicates no return value.
+ *
+ * Parameters:
+ * None.
+ *
+ * Returns:
+ * Address of the return buffer, or NULL if none exists.
+ */
+LPVOID ProfileArgIterator::GetReturnBufferAddr(void)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ PROFILE_PLATFORM_SPECIFIC_DATA* pData = (PROFILE_PLATFORM_SPECIFIC_DATA*)m_handle;
+
+ if (m_argIterator.HasRetBuffArg())
+ {
+ // the JIT64 makes sure that in ret-buf-arg cases where the method is being profiled that
+ // rax is setup with the address of caller passed in buffer. this is _questionably_ required
+ // by our calling convention, but is required by our profiler spec.
+ return (LPVOID)pData->rax;
+ }
+
+ CorElementType t = m_argIterator.GetSig()->GetReturnType();
+ if (ELEMENT_TYPE_VOID != t)
+ {
+ if (ELEMENT_TYPE_R4 == t || ELEMENT_TYPE_R8 == t)
+ pData->rax = pData->flt0;
+
+ return &(pData->rax);
+ }
+ else
+ return NULL;
+}
+
+#undef PROFILE_ENTER
+#undef PROFILE_LEAVE
+#undef PROFILE_TAILCALL
+
+#endif // PROFILING_SUPPORTED
+
diff --git a/src/vm/amd64/remotingamd64.cpp b/src/vm/amd64/remotingamd64.cpp
new file mode 100644
index 0000000000..c909adb792
--- /dev/null
+++ b/src/vm/amd64/remotingamd64.cpp
@@ -0,0 +1,673 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+/*===========================================================================
+**
+** File: RemotingCpu.cpp
+**
+**
+**
+** Purpose: Defines various remoting related functions for the AMD64 architecture
+**
+**
+** See code:EEStartup#TableOfContents for EE overview
+**
+=============================================================================*/
+
+#include "common.h"
+
+#ifdef FEATURE_REMOTING
+
+#include "excep.h"
+#include "comdelegate.h"
+#include "remoting.h"
+#include "field.h"
+#include "siginfo.hpp"
+#include "stackbuildersink.h"
+#include "threads.h"
+#include "method.hpp"
+
+#include "asmconstants.h"
+
+// External variables
+extern DWORD g_dwNonVirtualThunkRemotingLabelOffset;
+extern DWORD g_dwNonVirtualThunkReCheckLabelOffset;
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::CheckForContextMatch public
+//
+// Synopsis: This code generates a check to see if the current context and
+// the context of the proxy match.
+//
+//+----------------------------------------------------------------------------
+//
+// returns zero if contexts match
+// returns non-zero if contexts don't match
+//
+extern "C" UINT_PTR __stdcall CRemotingServices__CheckForContextMatch(Object* pStubData)
+{
+ // This method cannot have a contract because CreateStubForNonVirtualMethod assumes
+ // it won't trash XMM registers. The code generated for contracts by recent compilers
+ // is trashing XMM registers.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_COOPERATIVE; // due to the Object parameter
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ UINT_PTR contextID = *(UINT_PTR*)pStubData->UnBox();
+ UINT_PTR contextCur = (UINT_PTR)GetThread()->m_Context;
+ return (contextCur != contextID); // chosen to match x86 convention
+}
+
+
+//+----------------------------------------------------------------------------
+//
+// Method: CTPMethodTable::CreateThunkForVirtualMethod private
+//
+// Synopsis: Creates the thunk that pushes the supplied slot number and jumps
+// to TP Stub
+//
+//+----------------------------------------------------------------------------
+PCODE CTPMethodTable::CreateThunkForVirtualMethod(DWORD dwSlot, BYTE* pbCode)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ BYTE *pbCodeStart = pbCode;
+
+ // NOTE: if you change the code generated here, update
+ // CVirtualThunkMgr::IsThunkByASM, CVirtualThunkMgr::GetMethodDescByASM
+
+ //
+ // mov r10, <dwSlot>
+ // mov rax, TransparentProxyStub
+ // jmp rax
+ //
+ *pbCode++ = 0x49;
+ *pbCode++ = 0xc7;
+ *pbCode++ = 0xc2;
+ *((DWORD*)pbCode) = dwSlot;
+ pbCode += sizeof(DWORD);
+ *pbCode++ = 0x48;
+ *pbCode++ = 0xB8;
+ *((UINT64*)pbCode) = (UINT64)(TransparentProxyStub);
+ pbCode += sizeof(UINT64);
+ *pbCode++ = 0xFF;
+ *pbCode++ = 0xE0;
+
+ _ASSERTE(pbCode - pbCodeStart == ConstVirtualThunkSize);
+ _ASSERTE(CVirtualThunkMgr::IsThunkByASM((PCODE)pbCodeStart));
+
+ return (PCODE)pbCodeStart;
+}
+
+
+#ifdef HAS_REMOTING_PRECODE
+
+//+----------------------------------------------------------------------------
+//
+// Method: CTPMethodTable::ActivatePrecodeRemotingThunk private
+//
+// Synopsis: Patch the precode remoting thunk to begin interception
+//
+//+----------------------------------------------------------------------------
+void CTPMethodTable::ActivatePrecodeRemotingThunk()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PORTABILITY_WARNING("CTPMethodTable::ActivatePrecodeRemotingThunk");
+}
+
+#else // HAS_REMOTING_PRECODE
+
+//+----------------------------------------------------------------------------
+//
+// Method: CTPMethodTable::CreateStubForNonVirtualMethod public
+//
+// Synopsis: Create a stub for a non virtual method
+//
+//+----------------------------------------------------------------------------
+Stub* CTPMethodTable::CreateStubForNonVirtualMethod(MethodDesc* pMD, CPUSTUBLINKER* psl,
+ LPVOID pvAddrOfCode, Stub* pInnerStub)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Sanity check
+
+ Stub *pStub = NULL;
+
+ // we need a hash table only for virtual methods
+ _ASSERTE(!pMD->IsVirtual());
+
+ // Ensure the TP MethodTable's fields have been initialized.
+ EnsureFieldsInitialized();
+
+ /*
+ NonVirtualMethodStub<thisReg, pvAddrOfCode, pTPMethodTable, pvTPStub>
+ {
+ ;; thisReg: this
+
+ sub rsp, 0x28
+
+ test thisReg, thisReg
+ je JmpAddrLabel
+
+ mov rax, [thisReg]
+ mov r10, <pTPMethodTable>
+ cmp rax, r10
+ jne JmpAddrLabel
+
+ mov [rsp+0x30], rcx ;|
+ mov [rsp+0x38], rdx ;|
+ mov [rsp+0x40], r8 ;|
+ mov [rsp+0x48], r9 ;|
+ ;|
+ mov rax, [thisReg + TransparentProxyObject___stubData] ;|
+ call [thisReg + TransparentProxyObject___stub] ;| EmitCallToStub<pCtxMismatch>
+ ;|
+ mov rcx, [rsp+0x30] ;|
+ mov rdx, [rsp+0x38] ;|
+ mov r8, [rsp+0x40] ;|
+ mov r9, [rsp+0x48] ;|
+ ;|
+ test rax, rax ;|
+ jnz RemotingLabel ;|
+
+ JmpAddrLabel:
+ mov rax, <pvAddrOfCode>
+ add rsp, 0x28
+ jmp rax
+
+ RemotingLabel:
+ mov r10, <pMD>
+ mov rax, <pvTPStub>
+ add rsp, 0x20
+ jmp rax
+ }
+ */
+
+ X86Reg thisReg = kRCX;
+ void* pvTPStub = TransparentProxyStub_CrossContext;
+
+ // Generate label where a null reference exception will be thrown
+ CodeLabel *pJmpAddrLabel = psl->NewCodeLabel();
+ // Generate label where remoting code will execute
+ CodeLabel *pRemotingLabel = psl->NewCodeLabel();
+
+ // NOTE: if you change any of this code, you must update
+ // CNonVirtualThunkMgr::IsThunkByASM.
+
+ // Allocate callee scratch area
+ // sub rsp, 0x28
+ psl->X86EmitSubEsp(0x28);
+
+ // test thisReg, thisReg
+ psl->X86EmitR2ROp(0x85, thisReg, thisReg);
+ // je JmpAddrLabel
+ psl->X86EmitCondJump(pJmpAddrLabel, X86CondCode::kJE);
+
+ // Emit a label here for the debugger. A breakpoint will
+ // be set at the next instruction and the debugger will
+ // call CNonVirtualThunkMgr::TraceManager when the
+ // breakpoint is hit with the thread's context.
+ CodeLabel *pRecheckLabel = psl->NewCodeLabel();
+ psl->EmitLabel(pRecheckLabel);
+
+ // mov rax, [thisReg]
+ psl->X86EmitIndexRegLoad(kRAX, thisReg, 0);
+
+ // mov r10, CTPMethodTable::GetMethodTable()
+ psl->X86EmitRegLoad(kR10, (UINT_PTR)CTPMethodTable::GetMethodTable());
+ // cmp rax, r10
+ psl->X86EmitR2ROp(0x3B, kRAX, kR10);
+
+ // jne JmpAddrLabel
+ psl->X86EmitCondJump(pJmpAddrLabel, X86CondCode::kJNE);
+
+ // CONSIDER: write all possible stubs in asm to ensure param registers are not trashed
+
+ // mov [rsp+0x30], rcx
+ // mov [rsp+0x38], rdx
+ // mov [rsp+0x40], r8
+ // mov [rsp+0x48], r9
+ psl->X86EmitRegSave(kRCX, 0x30);
+ psl->X86EmitRegSave(kRDX, 0x38);
+ psl->X86EmitRegSave(kR8, 0x40);
+ psl->X86EmitRegSave(kR9, 0x48);
+
+ // mov rax, [thisReg + TransparentProxyObject___stub]
+ psl->X86EmitIndexRegLoad(kRAX, thisReg, TransparentProxyObject___stub);
+
+ // mov rcx, [thisReg + TransparentProxyObject___stubData]
+ psl->X86EmitIndexRegLoad(kRCX, thisReg, TransparentProxyObject___stubData);
+
+ // call rax
+ psl->Emit16(0xd0ff);
+
+ // mov rcx, [rsp+0x30]
+ // mov rdx, [rsp+0x38]
+ // mov r8, [rsp+0x40]
+ // mov r9, [rsp+0x48]
+ psl->X86EmitEspOffset(0x8b, kRCX, 0x30);
+ psl->X86EmitEspOffset(0x8b, kRDX, 0x38);
+ psl->X86EmitEspOffset(0x8b, kR8, 0x40);
+ psl->X86EmitEspOffset(0x8b, kR9, 0x48);
+
+ // test rax, rax
+ psl->X86EmitR2ROp(0x85, kRAX, kRAX);
+ // jnz RemotingLabel
+ psl->X86EmitCondJump(pRemotingLabel, X86CondCode::kJNZ);
+
+// pJmpAddrLabel:
+ psl->EmitLabel(pJmpAddrLabel);
+
+ // Make sure that the actual code does not require MethodDesc in r10
+ _ASSERTE(!pMD->RequiresMethodDescCallingConvention());
+
+ // mov rax, <pvAddrOfCode>
+ // add rsp, 0x28
+ // REX.W jmp rax
+ psl->X86EmitTailcallWithESPAdjust(psl->NewExternalCodeLabel(pvAddrOfCode), 0x28);
+
+// pRemotingLabel:
+ psl->EmitLabel(pRemotingLabel);
+
+ // mov r10, <pMD>
+ psl->X86EmitRegLoad(kR10, (UINT_PTR)pMD);
+
+ // mov rax, <pvTPStub>
+ // add rsp, 0x28
+ // REX.W jmp rax
+ psl->X86EmitTailcallWithESPAdjust(psl->NewExternalCodeLabel(pvTPStub), 0x28);
+
+ // Link and produce the stub
+ pStub = psl->LinkInterceptor(pMD->GetLoaderAllocator()->GetStubHeap(),
+ pInnerStub, pvAddrOfCode);
+
+ return pStub;
+}
+
+
+//+----------------------------------------------------------------------------
+//
+// Synopsis: Find an existing thunk or create a new one for the given
+// method descriptor. NOTE: This is used for the methods that do
+// not go through the vtable such as constructors, private and
+// final methods.
+//
+//+----------------------------------------------------------------------------
+PCODE CTPMethodTable::CreateNonVirtualThunkForVirtualMethod(MethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ CPUSTUBLINKER sl;
+ CPUSTUBLINKER* psl = &sl;
+
+ Stub *pStub = NULL;
+
+ // The thunk has not been created yet. Go ahead and create it.
+ // Compute the address of the slot
+ LPVOID pvEntryPoint = (LPVOID)pMD->GetMethodEntryPoint();
+
+ X86Reg thisReg = kRCX;
+ void* pvStub = CRemotingServices__DispatchInterfaceCall;
+
+ // Generate label where a null reference exception will be thrown
+ CodeLabel *pExceptionLabel = psl->NewCodeLabel();
+
+ // !!! WARNING WARNING WARNING WARNING WARNING !!!
+ //
+ // DO NOT CHANGE this code without changing the thunk recognition
+ // code in CNonVirtualThunkMgr::IsThunkByASM
+ // & CNonVirtualThunkMgr::GetMethodDescByASM
+ //
+ // !!! WARNING WARNING WARNING WARNING WARNING !!!
+
+ // NOTE: constant mov's should use an extended register to force a REX
+ // prefix and the full 64-bit immediate value, so that
+ // g_dwNonVirtualThunkRemotingLabelOffset and
+ // g_dwNonVirtualThunkReCheckLabelOffset are the same for all
+ // generated code.
+
+ // if this == NULL throw NullReferenceException
+ // test rcx, rcx
+ psl->X86EmitR2ROp(0x85, thisReg, thisReg);
+
+ // je ExceptionLabel
+ psl->X86EmitCondJump(pExceptionLabel, X86CondCode::kJE);
+
+ // Generate label where remoting code will execute
+ CodeLabel *pRemotingLabel = psl->NewCodeLabel();
+
+ // Emit a label here for the debugger. A breakpoint will
+ // be set at the next instruction and the debugger will
+ // call CNonVirtualThunkMgr::TraceManager when the
+ // breakpoint is hit with the thread's context.
+ CodeLabel *pRecheckLabel = psl->NewCodeLabel();
+ psl->EmitLabel(pRecheckLabel);
+
+ // If this.MethodTable == TPMethodTable then do RemotingCall
+ // mov rax, [thisReg]
+ psl->X86EmitIndexRegLoad(kRAX, thisReg, 0);
+ // mov r10, CTPMethodTable::GetMethodTable()
+ psl->X86EmitRegLoad(kR10, (UINT_PTR)CTPMethodTable::GetMethodTable());
+ // cmp rax, r10
+ psl->X86EmitR2ROp(0x3B, kRAX, kR10);
+ // je RemotingLabel
+ psl->X86EmitCondJump(pRemotingLabel, X86CondCode::kJE);
+
+ // Exception handling and non-remoting share the
+ // same codepath
+ psl->EmitLabel(pExceptionLabel);
+
+ // Non-RemotingCode
+ // Jump to the vtable slot of the method
+ // mov rax, pvEntryPoint
+ // Encoded the mov manually so that it always uses the 64-bit form.
+ //psl->X86EmitRegLoad(kRAX, (UINT_PTR)pvEntryPoint);
+ psl->Emit8(REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT);
+ psl->Emit8(0xb8);
+ psl->EmitBytes((BYTE*)&pvEntryPoint, 8);
+ // jmp rax
+ psl->Emit8(0xff);
+ psl->Emit8(0xe0);
+
+ // Remoting code. Note: CNonVirtualThunkMgr::TraceManager
+ // relies on this label being right after the jmp pvEntryPoint
+ // instruction above. If you move this label, update
+ // CNonVirtualThunkMgr::DoTraceStub.
+ psl->EmitLabel(pRemotingLabel);
+
+ // Save the MethodDesc and goto TPStub
+ // push MethodDesc
+ psl->X86EmitRegLoad(kR10, (UINT_PTR)pMD);
+
+ // jmp TPStub
+ psl->X86EmitNearJump(psl->NewExternalCodeLabel(pvStub));
+
+ // Link and produce the stub
+ // FUTURE: Do we have to provide the loader heap ?
+ pStub = psl->Link(SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap());
+
+ // Grab the offset of the RemotingLabel and RecheckLabel
+ // for use in CNonVirtualThunkMgr::DoTraceStub and
+ // TraceManager.
+ DWORD dwOffset;
+
+ dwOffset = psl->GetLabelOffset(pRemotingLabel);
+ ASSERT(!g_dwNonVirtualThunkRemotingLabelOffset || g_dwNonVirtualThunkRemotingLabelOffset == dwOffset);
+ g_dwNonVirtualThunkRemotingLabelOffset = dwOffset;
+
+ dwOffset = psl->GetLabelOffset(pRecheckLabel);
+ ASSERT(!g_dwNonVirtualThunkReCheckLabelOffset || g_dwNonVirtualThunkReCheckLabelOffset == dwOffset);
+ g_dwNonVirtualThunkReCheckLabelOffset = dwOffset;
+
+ return (pStub->GetEntryPoint());
+}
+
+#endif // HAS_REMOTING_PRECODE
+
+//+----------------------------------------------------------------------------
+//
+// Method: CVirtualThunkMgr::DoTraceStub public
+//
+// Synopsis: Traces the stub given the starting address
+//
+//+----------------------------------------------------------------------------
+BOOL CVirtualThunkMgr::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // <TODO> implement this </TODO>
+ return FALSE;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CVirtualThunkMgr::IsThunkByASM public
+//
+// Synopsis: Check assembly to see if this one of our thunks
+//
+//+----------------------------------------------------------------------------
+BOOL CVirtualThunkMgr::IsThunkByASM(PCODE startaddr)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ PTR_BYTE pbCode = PTR_BYTE(startaddr);
+
+ // NOTE: this depends on the code generated by
+ // CTPMethodTable::CreateThunkForVirtualMethod.
+
+ // mov r10, <dwSlot>
+ return 0x49 == pbCode[0]
+ && 0xc7 == pbCode[1]
+ && 0xc2 == pbCode[2]
+ // mov rax, TransparentProxyStub
+ && 0x48 == pbCode[7]
+ && 0xb8 == pbCode[8]
+ && (TADDR)TransparentProxyStub == *PTR_TADDR(pbCode+9)
+ // jmp rax
+ && 0xff == pbCode[17]
+ && 0xe0 == pbCode[18];
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CVirtualThunkMgr::GetMethodDescByASM public
+//
+// Synopsis: Parses MethodDesc out of assembly code
+//
+//+----------------------------------------------------------------------------
+MethodDesc *CVirtualThunkMgr::GetMethodDescByASM(PCODE pbThunkCode, MethodTable *pMT)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // NOTE: this depends on the code generated by
+ // CTPMethodTable::CreateThunkForVirtualMethod.
+
+ return pMT->GetMethodDescForSlot(*((DWORD *) (pbThunkCode + 3)));
+}
+
+
+#ifndef HAS_REMOTING_PRECODE
+
+//+----------------------------------------------------------------------------
+//
+// Method: CNonVirtualThunkMgr::TraceManager public
+//
+// Synopsis: Traces the stub given the current context
+//
+//+----------------------------------------------------------------------------
+BOOL CNonVirtualThunkMgr::TraceManager(Thread* thread,
+ TraceDestination* trace,
+ CONTEXT* pContext,
+ BYTE** pRetAddr)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(thread, NULL_OK));
+ PRECONDITION(CheckPointer(trace));
+ PRECONDITION(CheckPointer(pContext));
+ PRECONDITION(CheckPointer(pRetAddr));
+ }
+ CONTRACTL_END;
+
+ BOOL bRet = FALSE;
+
+ MethodDesc * pMD = GetMethodDescByASM(GetIP(pContext) - g_dwNonVirtualThunkReCheckLabelOffset);
+
+ LPBYTE pThis = (LPBYTE)pContext->Rcx;
+
+ if ((pThis != NULL) &&
+ (*(LPBYTE*)(SIZE_T)pThis == (LPBYTE)(SIZE_T)CTPMethodTable::GetMethodTable()))
+ {
+ // <TODO>We know that we've got a proxy
+ // in the way. If the proxy is to a remote call, with no
+ // managed code in between, then the debugger doesn't care and
+ // we should just be able to return FALSE.
+ //
+ // </TODO>
+ bRet = FALSE;
+ }
+ else
+ {
+ // No proxy in the way, so figure out where we're really going
+ // to and let the stub manager try to pickup the trace from
+ // there.
+ LPBYTE stubStartAddress = (LPBYTE)GetIP(pContext) -
+ g_dwNonVirtualThunkReCheckLabelOffset;
+
+ // Extract the address of the destination
+ BYTE* pbAddr = (BYTE *)(SIZE_T)(stubStartAddress +
+ g_dwNonVirtualThunkRemotingLabelOffset - 2 - sizeof(void *));
+
+ SIZE_T destAddress = *(SIZE_T *)pbAddr;
+
+ // Ask the stub manager to trace the destination address
+ bRet = StubManager::TraceStub((PCODE)(BYTE *)(size_t)destAddress, trace);
+ }
+
+ // While we may have made it this far, further tracing may reveal
+ // that the debugger can't continue on. Therefore, since there is
+ // no frame currently pushed, we need to tell the debugger where
+ // we're returning to just in case it hits such a situtation. We
+ // know that the return address is on the top of the thread's
+ // stack.
+ (*pRetAddr) = *((BYTE**)(size_t)(GetSP(pContext)));
+
+ return bRet;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CNonVirtualThunkMgr::DoTraceStub public
+//
+// Synopsis: Traces the stub given the starting address
+//
+//+----------------------------------------------------------------------------
+BOOL CNonVirtualThunkMgr::DoTraceStub(PCODE stubStartAddress,
+ TraceDestination* trace)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(stubStartAddress != NULL);
+ PRECONDITION(CheckPointer(trace));
+ }
+ CONTRACTL_END;
+
+ BOOL bRet = FALSE;
+
+ if (!IsThunkByASM(stubStartAddress))
+ return FALSE;
+
+ CNonVirtualThunk* pThunk = FindThunk((const BYTE *)stubStartAddress);
+
+ if(NULL != pThunk)
+ {
+ // We can either jump to
+ // (1) a slot in the transparent proxy table (UNMANAGED)
+ // (2) a slot in the non virtual part of the vtable
+ // ... so, we need to return TRACE_MGR_PUSH with the address
+ // at which we want to be called back with the thread's context
+ // so we can figure out which way we're gonna go.
+ if((const BYTE *)stubStartAddress == pThunk->GetThunkCode())
+ {
+ trace->InitForManagerPush(
+ (PCODE) (stubStartAddress + g_dwNonVirtualThunkReCheckLabelOffset),
+ this);
+ bRet = TRUE;
+ }
+ }
+
+ return bRet;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CNonVirtualThunkMgr::IsThunkByASM public
+//
+// Synopsis: Check assembly to see if this one of our thunks
+//
+//+----------------------------------------------------------------------------
+BOOL CNonVirtualThunkMgr::IsThunkByASM(PCODE startaddr)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ PTR_BYTE pbCode = PTR_BYTE(startaddr);
+
+ // test rcx, rcx ; 3 bytes
+ return 0x48 == pbCode[0]
+ && 0x85 == pbCode[1]
+ && 0xc9 == pbCode[2]
+ // je ... ; 2 bytes
+ && 0x74 == pbCode[3]
+ // mov rax, [rcx] ; 3 bytes
+ // mov r10, CTPMethodTable::GetMethodTable() ; 2 bytes + MethodTable*
+ && (TADDR)CTPMethodTable::GetMethodTable() == *PTR_TADDR(pbCode + 10);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CNonVirtualThunkMgr::GetMethodDescByASM public
+//
+// Synopsis: Parses MethodDesc out of assembly code
+//
+//+----------------------------------------------------------------------------
+MethodDesc* CNonVirtualThunkMgr::GetMethodDescByASM(PCODE pbThunkCode)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return *((MethodDesc **) (pbThunkCode + g_dwNonVirtualThunkRemotingLabelOffset + 2));
+}
+
+#endif // HAS_REMOTING_PRECODE
+
+
+//+----------------------------------------------------------------------------
+//
+// Method: CTPMethodTable::GenericCheckForContextMatch private
+//
+// Synopsis: Calls the stub in the TP & returns TRUE if the contexts
+// match, FALSE otherwise.
+//
+// Note: 1. Called during FieldSet/Get, used for proxy extensibility
+//
+//+----------------------------------------------------------------------------
+BOOL __stdcall CTPMethodTable__GenericCheckForContextMatch(Object* orTP)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE; // due to the Object parameter
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ Object *StubData = OBJECTREFToObject(((TransparentProxyObject*)orTP)->GetStubData());
+ CTPMethodTable::CheckContextCrossingProc *pfnCheckContextCrossing =
+ (CTPMethodTable::CheckContextCrossingProc*)(((TransparentProxyObject*)orTP)->GetStub());
+ return pfnCheckContextCrossing(StubData) == 0;
+}
+
+#endif // FEATURE_REMOTING
+
+
diff --git a/src/vm/amd64/stublinkeramd64.cpp b/src/vm/amd64/stublinkeramd64.cpp
new file mode 100644
index 0000000000..719315de3e
--- /dev/null
+++ b/src/vm/amd64/stublinkeramd64.cpp
@@ -0,0 +1,9 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+#include "common.h"
+#include "asmconstants.h"
+
+#include "../i386/stublinkerx86.cpp"
+
diff --git a/src/vm/amd64/stublinkeramd64.h b/src/vm/amd64/stublinkeramd64.h
new file mode 100644
index 0000000000..a18f8e3ddb
--- /dev/null
+++ b/src/vm/amd64/stublinkeramd64.h
@@ -0,0 +1,11 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+#ifndef _STUBLINKERAMD64_H_
+#define _STUBLINKERAMD64_H_
+
+#include "../i386/stublinkerx86.h"
+
+#endif // _STUBLINKERAMD64_H_
diff --git a/src/vm/amd64/theprestubamd64.S b/src/vm/amd64/theprestubamd64.S
new file mode 100644
index 0000000000..7966b2c87e
--- /dev/null
+++ b/src/vm/amd64/theprestubamd64.S
@@ -0,0 +1,31 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+.intel_syntax noprefix
+#include "unixasmmacros.inc"
+#include "asmconstants.h"
+
+NESTED_ENTRY ThePreStub, _TEXT, ProcessCLRException
+ PROLOG_WITH_TRANSITION_BLOCK
+
+ //
+ // call PreStubWorker
+ //
+ lea rdi, [rsp + __PWTB_TransitionBlock] // pTransitionBlock*
+ mov rsi, METHODDESC_REGISTER
+ call PreStubWorker
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+ TAILJMP_RAX
+
+NESTED_END ThePreStub, _TEXT
+
+LEAF_ENTRY ThePreStubPatch, _TEXT
+ // make sure that the basic block is unique
+ test eax,34
+PATCH_LABEL ThePreStubPatchLabel
+ ret
+LEAF_END ThePreStubPatch, _TEXT
+
diff --git a/src/vm/amd64/unixasmhelpers.S b/src/vm/amd64/unixasmhelpers.S
new file mode 100644
index 0000000000..cc271ea938
--- /dev/null
+++ b/src/vm/amd64/unixasmhelpers.S
@@ -0,0 +1,175 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+.intel_syntax noprefix
+#include "unixasmmacros.inc"
+#include "asmconstants.h"
+
+
+//////////////////////////////////////////////////////////////////////////
+//
+// PrecodeFixupThunk
+//
+// The call in fixup precode initally points to this function.
+// The pupose of this function is to load the MethodDesc and forward the call the prestub.
+//
+// EXTERN_C VOID __stdcall PrecodeFixupThunk()
+LEAF_ENTRY PrecodeFixupThunk, _TEXT
+
+ pop rax // Pop the return address. It points right after the call instruction in the precode.
+
+ // Inline computation done by FixupPrecode::GetMethodDesc()
+ movzx r10,byte ptr [rax+2] // m_PrecodeChunkIndex
+ movzx r11,byte ptr [rax+1] // m_MethodDescChunkIndex
+ mov rax,qword ptr [rax+r10*8+3]
+ lea METHODDESC_REGISTER,[rax+r11*8]
+
+ // Tail call to prestub
+ jmp ThePreStub
+
+LEAF_END PrecodeFixupThunk, _TEXT
+
+// EXTERN_C int __fastcall HelperMethodFrameRestoreState(
+// INDEBUG_COMMA(HelperMethodFrame *pFrame)
+// MachState *pState
+// )
+LEAF_ENTRY HelperMethodFrameRestoreState, _TEXT
+
+#ifdef _DEBUG
+ mov rdi, rsi
+#endif
+
+ // Check if the MachState is valid
+ xor eax, eax
+ cmp qword ptr [rdi + OFFSETOF__MachState___pRetAddr], rax
+ jne DoRestore
+ REPRET
+DoRestore:
+
+ //
+ // If a preserved register were pushed onto the stack between
+ // the managed caller and the H_M_F, m_pReg will point to its
+ // location on the stack and it would have been updated on the
+ // stack by the GC already and it will be popped back into the
+ // appropriate register when the appropriate epilog is run.
+ //
+ // Otherwise, the register is preserved across all the code
+ // in this HCALL or FCALL, so we need to update those registers
+ // here because the GC will have updated our copies in the
+ // frame.
+ //
+ // So, if m_pReg points into the MachState, we need to update
+ // the register here. That's what this macro does.
+ //
+#define RestoreReg(reg) \
+ lea rax, [rdi + OFFSETOF__MachState__m_Capture##reg]; \
+ mov rdx, [rdi + OFFSETOF__MachState__m_p##reg]; \
+ cmp rax, rdx; \
+ cmove reg, [rax];
+// .endm
+
+ //RestoreReg(Rdi)
+ //RestoreReg(Rsi)
+ RestoreReg(Rbx)
+ RestoreReg(Rbp)
+ RestoreReg(R12)
+ RestoreReg(R13)
+ RestoreReg(R14)
+ RestoreReg(R15)
+
+ xor eax, eax
+ ret
+
+LEAF_END HelperMethodFrameRestoreState, _TEXT
+
+//////////////////////////////////////////////////////////////////////////
+//
+// NDirectImportThunk
+//
+// In addition to being called by the EE, this function can be called
+// directly from code generated by JIT64 for CRT optimized direct
+// P/Invoke calls. If it is modified, the JIT64 compiler's code
+// generation will need to altered accordingly.
+//
+// EXTERN_C VOID __stdcall NDirectImportThunk()//
+NESTED_ENTRY NDirectImportThunk, _TEXT
+
+ //
+ // Save integer parameter registers.
+ // Make sure to preserve r11 as well as it is used to pass the stack argument size from JIT
+ //
+ PUSH_ARGUMENT_REGISTERS
+ push r11
+
+ //
+ // Allocate space for XMM parameter registers
+ //
+ alloc_stack 0x80
+
+ SAVE_FLOAT_ARGUMENT_REGISTERS 0
+
+ END_PROLOGUE
+
+ //
+ // Call NDirectImportWorker w/ the NDirectMethodDesc*
+ //
+ mov rdi, METHODDESC_REGISTER
+ call NDirectImportWorker
+
+ RESTORE_FLOAT_ARGUMENT_REGISTERS 0
+
+ //
+ // epilogue, rax contains the native target address
+ //
+ add rsp, 0x80
+
+ //
+ // Restore integer parameter registers and r11
+ //
+ pop r11
+ POP_ARGUMENT_REGISTERS
+
+ TAILJMP_RAX
+NESTED_END NDirectImportThunk, _TEXT
+
+// EXTERN_C void moveOWord(LPVOID* src, LPVOID* target);
+// <NOTE>
+// MOVDQA is not an atomic operation. You need to call this function in a crst.
+// </NOTE>
+LEAF_ENTRY moveOWord, _TEXT
+ movdqa xmm0, [rdi]
+ movdqa [rsi], xmm0
+
+ ret
+LEAF_END moveOWord, _TEXT
+
+//------------------------------------------------
+// JIT_RareDisableHelper
+//
+// The JIT expects this helper to preserve registers used for return values
+//
+NESTED_ENTRY JIT_RareDisableHelper, _TEXT
+
+ // First integer return register
+ push rax
+ // Second integer return register
+ push rdx
+ alloc_stack 0x28
+ END_PROLOGUE
+ // First float return register
+ movdqa [rsp], xmm0
+ // Second float return register
+ movdqa [rsp+0x10], xmm1
+
+ call JIT_RareDisableHelperWorker
+
+ movdqa xmm0, [rsp]
+ movdqa xmm1, [rsp+0x10]
+ add rsp, 0x28
+ pop rdx
+ pop rax
+ ret
+
+NESTED_END JIT_RareDisableHelper, _TEXT
diff --git a/src/vm/amd64/unixasmmacros.inc b/src/vm/amd64/unixasmmacros.inc
new file mode 100644
index 0000000000..ded46bddce
--- /dev/null
+++ b/src/vm/amd64/unixasmmacros.inc
@@ -0,0 +1,283 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+#define INVALIDGCVALUE -0x33333333 // 0CCCCCCCDh - the assembler considers it to be a signed integer constant
+
+.macro NOP_3_BYTE
+ nop dword ptr [rax]
+.endm
+
+.macro NOP_2_BYTE
+ xchg ax, ax
+.endm
+
+.macro REPRET
+ .byte 0xf3
+ .byte 0xc3
+.endm
+
+.macro TAILJMP_RAX
+ .byte 0x48
+ .byte 0xFF
+ .byte 0xE0
+.endm
+
+.macro PATCH_LABEL Name
+ .global \Name
+\Name:
+.endm
+
+.macro LEAF_ENTRY Name, Section
+ .global \Name
+ .type \Name, %function
+\Name:
+.endm
+
+.macro LEAF_END_MARKED Name, Section
+\Name\()_End:
+ .global \Name\()_End
+ .size \Name, .-\Name
+.endm
+
+.macro LEAF_END Name, Section
+ LEAF_END_MARKED \Name, \Section
+.endm
+
+.macro PREPARE_EXTERNAL_VAR Name, HelperReg
+ mov \HelperReg, [rip + \Name@GOTPCREL]
+.endm
+
+.macro push_nonvol_reg Register
+ push \Register
+ //.pushreg \Register
+.endm
+
+.macro NESTED_ENTRY Name, Section, Handler
+ LEAF_ENTRY \Name, \Section
+.endm
+
+.macro NESTED_END Name, Section
+ LEAF_END \Name, \Section
+.endm
+
+.macro END_PROLOGUE
+.endm
+
+.macro alloc_stack Size
+.att_syntax
+ lea -\Size(%rsp), %rsp
+.intel_syntax noprefix
+ //.allocstack \Size
+.endm
+
+.macro set_frame Reg, Offset
+ lea \Reg, \Offset[rsp]
+.endm
+
+.macro save_reg_postrsp Reg, Offset
+
+ .ifdef ___FRAME_REG_SET
+ .error "save_reg_postrsp cannot be used after set_frame"
+ .endif
+
+ __Offset = \Offset
+ mov qword ptr [rsp + __Offset], \Reg
+
+//
+// TODO: find the right directive
+// this one gives an "unknown directive" error
+//
+// .savereg \Reg, \Offset
+
+ ___STACK_ADJUSTMENT_FORBIDDEN = 1
+
+.endm
+
+.macro restore_reg Reg, Offset
+ __Offset = \Offset
+ mov \Reg, [rsp + __Offset]
+.endm
+
+.macro save_xmm128_postrsp Reg, Offset
+
+ .ifdef ___FRAME_REG_SET
+ .error "save_reg_postrsp cannot be used after set_frame"
+ .endif
+
+ __Offset = \Offset
+ movdqa [rsp + __Offset], \Reg
+
+//
+// TODO: find the right directive
+// this one gives an "unknown directive" error
+//
+// .savexmm128 \Reg, \Offset
+
+ ___STACK_ADJUSTMENT_FORBIDDEN = 1
+
+.endm
+
+.macro restore_xmm128 Reg, ofs
+ __Offset = \ofs
+ movdqa \Reg, [rsp + __Offset]
+.endm
+
+.macro POP_CALLEE_SAVED_REGISTERS
+
+ pop rbx
+ pop rbp
+ pop r12
+ pop r13
+ pop r14
+ pop r15
+
+.endm
+
+.macro PUSH_ARGUMENT_REGISTERS
+
+ push r9
+ push r8
+ push rdx
+ push rcx
+ push rsi
+ push rdi
+
+.endm
+
+.macro POP_ARGUMENT_REGISTERS
+
+ pop rdi
+ pop rsi
+ pop rcx
+ pop rdx
+ pop r8
+ pop r9
+
+.endm
+
+.macro SAVE_FLOAT_ARGUMENT_REGISTERS ofs
+
+ save_xmm128_postrsp xmm0, \ofs
+ save_xmm128_postrsp xmm1, \ofs + 0x10
+ save_xmm128_postrsp xmm2, \ofs + 0x20
+ save_xmm128_postrsp xmm3, \ofs + 0x30
+ save_xmm128_postrsp xmm4, \ofs + 0x40
+ save_xmm128_postrsp xmm5, \ofs + 0x50
+ save_xmm128_postrsp xmm6, \ofs + 0x60
+ save_xmm128_postrsp xmm7, \ofs + 0x70
+
+.endm
+
+.macro RESTORE_FLOAT_ARGUMENT_REGISTERS ofs
+
+ restore_xmm128 xmm0, \ofs
+ restore_xmm128 xmm1, \ofs + 0x10
+ restore_xmm128 xmm2, \ofs + 0x20
+ restore_xmm128 xmm3, \ofs + 0x30
+ restore_xmm128 xmm4, \ofs + 0x40
+ restore_xmm128 xmm5, \ofs + 0x50
+ restore_xmm128 xmm6, \ofs + 0x60
+ restore_xmm128 xmm7, \ofs + 0x70
+
+.endm
+
+// Stack layout:
+//
+// (stack parameters)
+// ...
+// return address
+// CalleeSavedRegisters::r15
+// CalleeSavedRegisters::r14
+// CalleeSavedRegisters::r13
+// CalleeSavedRegisters::r12
+// CalleeSavedRegisters::rbp
+// CalleeSavedRegisters::rbx
+// ArgumentRegisters::r9
+// ArgumentRegisters::r8
+// ArgumentRegisters::rcx
+// ArgumentRegisters::rdx
+// ArgumentRegisters::rsi
+// ArgumentRegisters::rdi <- __PWTB_StackAlloc, __PWTB_TransitionBlock
+// padding to align xmm save area
+// xmm7
+// xmm6
+// xmm5
+// xmm4
+// xmm3
+// xmm2
+// xmm1
+// xmm0 <- __PWTB_FloatArgumentRegisters
+// extra locals + padding to qword align
+.macro PROLOG_WITH_TRANSITION_BLOCK extraLocals = 0, stackAllocOnEntry = 0, stackAllocSpill1, stackAllocSpill2, stackAllocSpill3
+
+ __PWTB_FloatArgumentRegisters = \extraLocals
+
+ .if ((__PWTB_FloatArgumentRegisters % 16) != 0)
+ __PWTB_FloatArgumentRegisters = __PWTB_FloatArgumentRegisters + 8
+ .endif
+
+ __PWTB_StackAlloc = __PWTB_FloatArgumentRegisters + 8 * 16 + 8 // 8 floating point registers
+ __PWTB_TransitionBlock = __PWTB_StackAlloc
+
+ .if \stackAllocOnEntry >= 4*8
+ .error "Max supported stackAllocOnEntry is 3*8"
+ .endif
+
+ .if \stackAllocOnEntry > 0
+ //.allocstack \stackAllocOnEntry
+ .endif
+
+ // PUSH_CALLEE_SAVED_REGISTERS expanded here
+
+ .if \stackAllocOnEntry < 8
+ push_nonvol_reg r15
+ .endif
+
+ .if \stackAllocOnEntry < 2*8
+ push_nonvol_reg r14
+ .endif
+
+ .if \stackAllocOnEntry < 3*8
+ push_nonvol_reg r13
+ .endif
+
+ push_nonvol_reg r12
+ push_nonvol_reg rbp
+ push_nonvol_reg rbx
+
+ // ArgumentRegisters
+ PUSH_ARGUMENT_REGISTERS
+
+ .if \stackAllocOnEntry >= 3*8
+ mov \stackAllocSpill3, [rsp + 0x48]
+ save_reg_postrsp r13, 0x48
+ .endif
+
+ .if \stackAllocOnEntry >= 2*8
+ mov \stackAllocSpill2, [rsp + 0x50]
+ save_reg_postrsp r14, 0x50
+ .endif
+
+ .if \stackAllocOnEntry >= 8
+ mov \stackAllocSpill1, [rsp + 0x58]
+ save_reg_postrsp r15, 0x58
+ .endif
+
+ alloc_stack __PWTB_StackAlloc
+ SAVE_FLOAT_ARGUMENT_REGISTERS __PWTB_FloatArgumentRegisters
+
+ END_PROLOGUE
+
+.endm
+
+.macro EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+
+ RESTORE_FLOAT_ARGUMENT_REGISTERS __PWTB_FloatArgumentRegisters
+ lea rsp, [rsp + __PWTB_StackAlloc]
+ POP_ARGUMENT_REGISTERS
+ POP_CALLEE_SAVED_REGISTERS
+
+.endm
+
diff --git a/src/vm/amd64/unixstubs.cpp b/src/vm/amd64/unixstubs.cpp
new file mode 100644
index 0000000000..2615bdb715
--- /dev/null
+++ b/src/vm/amd64/unixstubs.cpp
@@ -0,0 +1,158 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+#include "common.h"
+
+extern "C"
+{
+ void RedirectForThrowControl()
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void ErectWriteBarrier_ASM(Object** dst, Object* ref)
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void ExternalMethodFixupPatchLabel()
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void ExternalMethodFixupStub()
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void GenericPInvokeCalliHelper()
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void NakedThrowHelper()
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void PInvokeStubForHost()
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void PInvokeStubForHostInner(DWORD dwStackSize, LPVOID pStackFrame, LPVOID pTarget)
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void SinglecastDelegateInvokeStub()
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void TheUMEntryPrestub()
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void UMThunkStub()
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void VarargPInvokeStub()
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void STDCALL UM2MThunk_WrapperHelper(void *pThunkArgs,
+ int argLen,
+ void *pAddr,
+ UMEntryThunk *pEntryThunk,
+ Thread *pThread)
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void VarargPInvokeStub_RetBuffArg()
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void VirtualMethodFixupPatchLabel()
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void VirtualMethodFixupStub()
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ DWORD getcpuid(DWORD arg, unsigned char result[16])
+ {
+ DWORD eax;
+ __asm(" xor %%ecx, %%ecx\n" \
+ " cpuid\n" \
+ " mov %%eax, 0(%[result])\n" \
+ " mov %%ebx, 4(%[result])\n" \
+ " mov %%ecx, 8(%[result])\n" \
+ " mov %%edx, 12(%[result])\n" \
+ : "=a"(eax) /*output in eax*/\
+ : "a"(arg), [result]"r"(result) /*inputs - arg in eax, result in any register*/\
+ : "eax", "rbx", "ecx", "edx" /* registers that are clobbered*/
+ );
+ return eax;
+ }
+
+ DWORD getextcpuid(DWORD arg1, DWORD arg2, unsigned char result[16])
+ {
+ DWORD eax;
+ __asm(" cpuid\n" \
+ " mov %%eax, 0(%[result])\n" \
+ " mov %%ebx, 4(%[result])\n" \
+ " mov %%ecx, 8(%[result])\n" \
+ " mov %%edx, 12(%[result])\n" \
+ : "=a"(eax) /*output in eax*/\
+ : "c"(arg1), "a"(arg2), [result]"r"(result) /*inputs - arg1 in ecx, arg2 in eax, result in any register*/\
+ : "eax", "rbx", "ecx", "edx" /* registers that are clobbered*/
+ );
+ return eax;
+ }
+
+ void STDCALL JIT_MemCpy(void *dest, const void *src, SIZE_T count)
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void STDCALL JIT_MemCpy_End()
+ {
+ }
+
+ void STDCALL JIT_MemSet(void *dest, int c, SIZE_T count)
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void STDCALL JIT_MemSet_End()
+ {
+ }
+
+ void STDCALL JIT_ProfilerEnterLeaveTailcallStub(UINT_PTR ProfilerHandle)
+ {
+ }
+
+#ifdef FEATURE_PREJIT
+ void StubDispatchFixupStub()
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+#endif
+
+ void StubDispatchFixupPatchLabel()
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+};
diff --git a/src/vm/amd64/virtualcallstubamd64.S b/src/vm/amd64/virtualcallstubamd64.S
new file mode 100644
index 0000000000..025e165c17
--- /dev/null
+++ b/src/vm/amd64/virtualcallstubamd64.S
@@ -0,0 +1,90 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+.intel_syntax noprefix
+#include "unixasmmacros.inc"
+
+// This is the number of times a successful chain lookup will occur before the
+// entry is promoted to the front of the chain. This is declared as extern because
+// the default value (CALL_STUB_CACHE_INITIAL_SUCCESS_COUNT) is defined in the header.
+// extern size_t g_dispatch_cache_chain_success_counter;
+CHAIN_SUCCESS_COUNTER = g_dispatch_cache_chain_success_counter
+
+// The reason for not using .equ or '=' here is that otherwise the assembler compiles e.g.
+// mov rax, BACKPATCH_FLAG as mov rax, [BACKPATCH_FLAG]
+#define BACKPATCH_FLAG 1 // Also known as SDF_ResolveBackPatch in the EE
+#define PROMOTE_CHAIN_FLAG 2 // Also known as SDF_ResolvePromoteChain in the EE
+#define INITIAL_SUCCESS_COUNT 0x100
+
+// On Input:
+// r11 contains the address of the indirection cell (with the flags in the low bits)
+// [rsp+0] m_Datum: contains the dispatch token (slot number or MethodDesc) for the target
+// or the ResolveCacheElem when r11 has the PROMOTE_CHAIN_FLAG set
+// [rsp+8] m_ReturnAddress: contains the return address of caller to stub
+
+NESTED_ENTRY ResolveWorkerAsmStub, _TEXT
+
+ PROLOG_WITH_TRANSITION_BLOCK 0, 8, rdx
+
+ // token stored in rdx by prolog
+
+ lea rdi, [rsp + __PWTB_TransitionBlock] // pTransitionBlock
+ mov rsi, r11 // indirection cell + flags
+ mov rcx, rsi
+ and rcx, 7 // flags
+ sub rsi, rcx // indirection cell
+
+ call VSD_ResolveWorker
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+ TAILJMP_RAX
+
+NESTED_END ResolveWorkerAsmStub, _TEXT
+
+// extern void ResolveWorkerChainLookupAsmStub()
+LEAF_ENTRY ResolveWorkerChainLookupAsmStub, _TEXT
+// This will perform a quick chained lookup of the entry if the initial cache lookup fails
+// On Input:
+// rsi contains our type (MethodTable)
+// r10 contains our contract (DispatchToken)
+// r11 contains the address of the indirection (and the flags in the low two bits)
+// [rsp+0x00] contains the pointer to the ResolveCacheElem
+// [rsp+0x08] contains the saved value of rsi
+// [rsp+0x10] contains the return address of caller to stub
+//
+ mov rax, BACKPATCH_FLAG // First we check if r11 has the BACKPATCH_FLAG set
+ and rax, r11 // Set the flags based on (BACKPATCH_FLAG and r11)
+ pop rax // pop the pointer to the ResolveCacheElem from the top of stack (leaving the flags unchanged)
+ jnz Fail_RWCLAS // If the BACKPATCH_FLAGS is set we will go directly to the ResolveWorkerAsmStub
+
+MainLoop_RWCLAS:
+ mov rax, [rax+18h] // get the next entry in the chain (don't bother checking the first entry again)
+ test rax,rax // test if we hit a terminating NULL
+ jz Fail_RWCLAS
+
+ cmp rsi, [rax+00h] // compare our MT with the one in the ResolveCacheElem
+ jne MainLoop_RWCLAS
+ cmp r10, [rax+08h] // compare our DispatchToken with one in the ResolveCacheElem
+ jne MainLoop_RWCLAS
+Success_RWCLAS:
+ PREPARE_EXTERNAL_VAR CHAIN_SUCCESS_COUNTER, rsi
+ sub qword ptr [rsi],1 // decrement success counter
+ jl Promote_RWCLAS
+ mov rax, [rax+10h] // get the ImplTarget
+ pop rsi
+ jmp rax
+
+Promote_RWCLAS: // Move this entry to head postion of the chain
+ // be quick to reset the counter so we don't get a bunch of contending threads
+ mov qword ptr [rsi], INITIAL_SUCCESS_COUNT
+ or r11, PROMOTE_CHAIN_FLAG
+ mov r10, rax // We pass the ResolveCacheElem to ResolveWorkerAsmStub instead of the DispatchToken
+Fail_RWCLAS:
+ pop rsi // Restore the original saved rdx value
+ push r10 // pass the DispatchToken or ResolveCacheElem to promote to ResolveWorkerAsmStub
+
+ jmp ResolveWorkerAsmStub
+
+LEAF_END ResolveWorkerChainLookupAsmStub, _TEXT
diff --git a/src/vm/amd64/virtualcallstubcpu.hpp b/src/vm/amd64/virtualcallstubcpu.hpp
new file mode 100644
index 0000000000..3841d6f45f
--- /dev/null
+++ b/src/vm/amd64/virtualcallstubcpu.hpp
@@ -0,0 +1,791 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: AMD64/VirtualCallStubCpu.hpp
+//
+
+
+
+//
+
+// See code:VirtualCallStubManager for details
+//
+// ============================================================================
+
+#ifndef _VIRTUAL_CALL_STUB_AMD64_H
+#define _VIRTUAL_CALL_STUB_AMD64_H
+
+#include "dbginterface.h"
+
+//#define STUB_LOGGING
+
+#pragma pack(push, 1)
+// since we are placing code, we want byte packing of the structs
+
+#define USES_LOOKUP_STUBS 1
+
+/*********************************************************************************************
+Stubs that contain code are all part of larger structs called Holders. There is a
+Holder for each kind of stub, i.e XXXStub is contained with XXXHolder. Holders are
+essentially an implementation trick that allowed rearranging the code sequences more
+easily while trying out different alternatives, and for dealing with any alignment
+issues in a way that was mostly immune to the actually code sequences. These Holders
+should be revisited when the stub code sequences are fixed, since in many cases they
+add extra space to a stub that is not really needed.
+
+Stubs are placed in cache and hash tables. Since unaligned access of data in memory
+is very slow, the keys used in those tables should be aligned. The things used as keys
+typically also occur in the generated code, e.g. a token as an immediate part of an instruction.
+For now, to avoid alignment computations as different code strategies are tried out, the key
+fields are all in the Holders. Eventually, many of these fields should be dropped, and the instruction
+streams aligned so that the immediate fields fall on aligned boundaries.
+*/
+
+#if USES_LOOKUP_STUBS
+
+struct LookupStub;
+struct LookupHolder;
+
+/*LookupStub**************************************************************************************
+Virtual and interface call sites are initially setup to point at LookupStubs.
+This is because the runtime type of the <this> pointer is not yet known,
+so the target cannot be resolved. Note: if the jit is able to determine the runtime type
+of the <this> pointer, it should be generating a direct call not a virtual or interface call.
+This stub pushes a lookup token onto the stack to identify the sought after method, and then
+jumps into the EE (VirtualCallStubManager::ResolveWorkerStub) to effectuate the lookup and
+transfer of control to the appropriate target method implementation, perhaps patching of the call site
+along the way to point to a more appropriate stub. Hence callsites that point to LookupStubs
+get quickly changed to point to another kind of stub.
+*/
+struct LookupStub
+{
+ inline PCODE entryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)&_entryPoint[0]; }
+
+ inline size_t token() { LIMITED_METHOD_CONTRACT; return _token; }
+ inline size_t size() { LIMITED_METHOD_CONTRACT; return sizeof(LookupStub); }
+
+private:
+ friend struct LookupHolder;
+
+ // The lookup entry point starts with a nop in order to allow us to quickly see
+ // if the stub is lookup stub or a dispatch stub. We can read thye first byte
+ // of a stub to find out what kind of a stub we have.
+
+ BYTE _entryPoint [3]; // 90 nop
+ // 48 B8 mov rax,
+ size_t _token; // xx xx xx xx xx xx xx xx 64-bit address
+ BYTE part2 [3]; // 50 push rax
+ // 48 B8 mov rax,
+ size_t _resolveWorkerAddr; // xx xx xx xx xx xx xx xx 64-bit address
+ BYTE part3 [2]; // FF E0 jmp rax
+};
+
+/* LookupHolders are the containers for LookupStubs, they provide for any alignment of
+stubs as necessary. In the case of LookupStubs, alignment is necessary since
+LookupStubs are placed in a hash table keyed by token. */
+struct LookupHolder
+{
+ static void InitializeStatic();
+
+ void Initialize(PCODE resolveWorkerTarget, size_t dispatchToken);
+
+ LookupStub* stub() { LIMITED_METHOD_CONTRACT; return &_stub; }
+
+ static LookupHolder* FromLookupEntry(PCODE lookupEntry);
+
+private:
+ friend struct LookupStub;
+
+ LookupStub _stub;
+};
+
+#endif // USES_LOOKUP_STUBS
+
+struct DispatchStub;
+struct DispatchStubShort;
+struct DispatchStubLong;
+struct DispatchHolder;
+
+/*DispatchStub**************************************************************************************
+The structure of a full dispatch stub in memory is a DispatchStub followed contiguously in memory
+by either a DispatchStubShort of a DispatchStubLong. DispatchStubShort is used when the resolve
+stub (failTarget()) is reachable by a rel32 (DISPL) jump. We make a pretty good effort to make sure
+that the stub heaps are set up so that this is the case. If we allocate enough stubs that the heap
+end up allocating in a new block that is further away than a DISPL jump can go, then we end up using
+a DispatchStubLong which is bigger but is a full 64-bit jump. */
+
+/*DispatchStubShort*********************************************************************************
+This is the logical continuation of DispatchStub for the case when the failure target is within
+a rel32 jump (DISPL). */
+struct DispatchStubShort
+{
+ friend struct DispatchHolder;
+ friend struct DispatchStub;
+
+ static BOOL isShortStub(LPCBYTE pCode);
+ inline PCODE implTarget() const { LIMITED_METHOD_CONTRACT; return (PCODE) _implTarget; }
+ inline PCODE failTarget() const { LIMITED_METHOD_CONTRACT; return (PCODE) &_failDispl + sizeof(DISPL) + _failDispl; }
+
+private:
+ BYTE part1 [2]; // 0f 85 jne
+ DISPL _failDispl; // xx xx xx xx failEntry ;must be forward jmp for perf reasons
+ BYTE part2 [2]; // 48 B8 mov rax,
+ size_t _implTarget; // xx xx xx xx xx xx xx xx 64-bit address
+ BYTE part3 [2]; // FF E0 jmp rax
+
+ // 31 bytes long, need 1 byte of padding to 8-byte align.
+ BYTE alignPad [1]; // cc
+};
+
+inline BOOL DispatchStubShort::isShortStub(LPCBYTE pCode)
+{
+ LIMITED_METHOD_CONTRACT;
+ return reinterpret_cast<DispatchStubShort const *>(pCode)->part1[0] == 0x0f;
+}
+
+
+/*DispatchStubLong**********************************************************************************
+This is the logical continuation of DispatchStub for the case when the failure target is not
+reachable by a rel32 jump (DISPL). */
+struct DispatchStubLong
+{
+ friend struct DispatchHolder;
+ friend struct DispatchStub;
+
+ static inline BOOL isLongStub(LPCBYTE pCode);
+ inline PCODE implTarget() const { LIMITED_METHOD_CONTRACT; return (PCODE) _implTarget; }
+ inline PCODE failTarget() const { LIMITED_METHOD_CONTRACT; return (PCODE) _failTarget; }
+
+private:
+ BYTE part1 [1]; // 75 jne
+ BYTE _failDispl; // xx failLabel
+ BYTE part2 [2]; // 48 B8 mov rax,
+ size_t _implTarget; // xx xx xx xx xx xx xx xx 64-bit address
+ BYTE part3 [2]; // FF E0 jmp rax
+ // failLabel:
+ BYTE part4 [2]; // 48 B8 mov rax,
+ size_t _failTarget; // xx xx xx xx xx xx xx xx 64-bit address
+ BYTE part5 [2]; // FF E0 jmp rax
+
+ // 39 bytes long, need 1 byte of padding to 8-byte align.
+ BYTE alignPad [1]; // cc
+};
+
+inline BOOL DispatchStubLong::isLongStub(LPCBYTE pCode)
+{
+ LIMITED_METHOD_CONTRACT;
+ return reinterpret_cast<DispatchStubLong const *>(pCode)->part1[0] == 0x75;
+}
+
+/*DispatchStub**************************************************************************************
+Monomorphic and mostly monomorphic call sites eventually point to DispatchStubs.
+A dispatch stub has an expected type (expectedMT), target address (target) and fail address (failure).
+If the calling frame does in fact have the <this> type be of the expected type, then
+control is transfered to the target address, the method implementation. If not,
+then control is transfered to the fail address, a fail stub (see below) where a polymorphic
+lookup is done to find the correct address to go to.
+
+implementation note: Order, choice of instructions, and branch directions
+should be carefully tuned since it can have an inordinate effect on performance. Particular
+attention needs to be paid to the effects on the BTB and branch prediction, both in the small
+and in the large, i.e. it needs to run well in the face of BTB overflow--using static predictions.
+Note that since this stub is only used for mostly monomorphic callsites (ones that are not, get patched
+to something else), therefore the conditional jump "jne failure" is mostly not taken, and hence it is important
+that the branch prediction staticly predict this, which means it must be a forward jump. The alternative
+is to reverse the order of the jumps and make sure that the resulting conditional jump "je implTarget"
+is statically predicted as taken, i.e a backward jump. The current choice was taken since it was easier
+to control the placement of the stubs than control the placement of the jitted code and the stubs. */
+struct DispatchStub
+{
+ friend struct DispatchHolder;
+
+ enum DispatchStubType
+ {
+ e_TYPE_SHORT,
+ e_TYPE_LONG,
+ };
+
+ inline DispatchStubType const type() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(DispatchStubShort::isShortStub(reinterpret_cast<LPCBYTE>(this + 1))
+ || DispatchStubLong::isLongStub(reinterpret_cast<LPCBYTE>(this + 1)));
+ return DispatchStubShort::isShortStub((BYTE *)(this + 1)) ? e_TYPE_SHORT : e_TYPE_LONG;
+ }
+
+ inline static size_t size(DispatchStubType type)
+ {
+ STATIC_CONTRACT_LEAF;
+ return sizeof(DispatchStub) +
+ ((type == e_TYPE_SHORT) ? sizeof(DispatchStubShort) : sizeof(DispatchStubLong));
+ }
+
+ inline PCODE entryPoint() const { LIMITED_METHOD_CONTRACT; return (PCODE)&_entryPoint[0]; }
+ inline size_t expectedMT() const { LIMITED_METHOD_CONTRACT; return _expectedMT; }
+ inline size_t size() const { WRAPPER_NO_CONTRACT; return size(type()); }
+
+ inline PCODE implTarget() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (type() == e_TYPE_SHORT)
+ return getShortStub()->implTarget();
+ else
+ return getLongStub()->implTarget();
+ }
+
+ inline PCODE failTarget() const
+ {
+ if (type() == e_TYPE_SHORT)
+ return getShortStub()->failTarget();
+ else
+ return getLongStub()->failTarget();
+ }
+
+private:
+ inline DispatchStubShort const *getShortStub() const
+ { LIMITED_METHOD_CONTRACT; return reinterpret_cast<DispatchStubShort const *>(this + 1); }
+
+ inline DispatchStubLong const *getLongStub() const
+ { LIMITED_METHOD_CONTRACT; return reinterpret_cast<DispatchStubLong const *>(this + 1); }
+
+ BYTE _entryPoint [2]; // 48 B8 mov rax,
+ size_t _expectedMT; // xx xx xx xx xx xx xx xx 64-bit address
+ BYTE part1 [3]; // 48 39 XX cmp [THIS_REG], rax
+
+ // Followed by either DispatchStubShort or DispatchStubLong, depending
+ // on whether we were able to make a rel32 or had to make an abs64 jump
+ // to the resolve stub on failure.
+
+};
+
+/* DispatchHolders are the containers for DispatchStubs, they provide for any alignment of
+stubs as necessary. DispatchStubs are placed in a hashtable and in a cache. The keys for both
+are the pair expectedMT and token. Efficiency of the of the hash table is not a big issue,
+since lookups in it are fairly rare. Efficiency of the cache is paramount since it is accessed frequently
+(see ResolveStub below). Currently we are storing both of these fields in the DispatchHolder to simplify
+alignment issues. If inlineMT in the stub itself was aligned, then it could be the expectedMT field.
+While the token field can be logically gotten by following the failure target to the failEntryPoint
+of the ResolveStub and then to the token over there, for perf reasons of cache access, it is duplicated here.
+This allows us to use DispatchStubs in the cache. The alternative is to provide some other immutable struct
+for the cache composed of the triplet (expectedMT, token, target) and some sort of reclaimation scheme when
+they are thrown out of the cache via overwrites (since concurrency will make the obvious approaches invalid).
+*/
+
+/* @workaround for ee resolution - Since the EE does not currently have a resolver function that
+does what we want, see notes in implementation of VirtualCallStubManager::Resolver, we are
+using dispatch stubs to siumulate what we want. That means that inlineTarget, which should be immutable
+is in fact written. Hence we have moved target out into the holder and aligned it so we can
+atomically update it. When we get a resolver function that does what we want, we can drop this field,
+and live with just the inlineTarget field in the stub itself, since immutability will hold.*/
+struct DispatchHolder
+{
+ static void InitializeStatic();
+
+ void Initialize(PCODE implTarget, PCODE failTarget, size_t expectedMT,
+ DispatchStub::DispatchStubType type);
+
+ static size_t GetHolderSize(DispatchStub::DispatchStubType type)
+ { STATIC_CONTRACT_WRAPPER; return DispatchStub::size(type); }
+
+ static BOOL CanShortJumpDispatchStubReachFailTarget(PCODE failTarget, LPCBYTE stubMemory)
+ {
+ STATIC_CONTRACT_WRAPPER;
+ LPCBYTE pFrom = stubMemory + sizeof(DispatchStub) + offsetof(DispatchStubShort, part2[0]);
+ size_t cbRelJump = failTarget - (PCODE)pFrom;
+ return FitsInI4(cbRelJump);
+ }
+
+ DispatchStub* stub() { LIMITED_METHOD_CONTRACT; return reinterpret_cast<DispatchStub *>(this); }
+
+ static DispatchHolder* FromDispatchEntry(PCODE dispatchEntry);
+
+private:
+ // DispatchStub follows here. It is dynamically sized on allocation
+ // because it could be a DispatchStubLong or a DispatchStubShort
+};
+
+struct ResolveStub;
+struct ResolveHolder;
+
+/*ResolveStub**************************************************************************************
+Polymorphic call sites and monomorphic calls that fail end up in a ResolverStub. There is only
+one resolver stub built for any given token, even though there may be many call sites that
+use that token and many distinct <this> types that are used in the calling call frames. A resolver stub
+actually has two entry points, one for polymorphic call sites and one for dispatch stubs that fail on their
+expectedMT test. There is a third part of the resolver stub that enters the ee when a decision should
+be made about changing the callsite. Therefore, we have defined the resolver stub as three distinct pieces,
+even though they are actually allocated as a single contiguous block of memory. These pieces are:
+
+A ResolveStub has two entry points:
+
+FailEntry - where the dispatch stub goes if the expected MT test fails. This piece of the stub does
+a check to see how often we are actually failing. If failures are frequent, control transfers to the
+patch piece to cause the call site to be changed from a mostly monomorphic callsite
+(calls dispatch stub) to a polymorphic callsize (calls resolve stub). If failures are rare, control
+transfers to the resolve piece (see ResolveStub). The failEntryPoint decrements a counter
+every time it is entered. The ee at various times will add a large chunk to the counter.
+
+ResolveEntry - does a lookup via in a cache by hashing the actual type of the calling frame s
+<this> and the token identifying the (contract,method) pair desired. If found, control is transfered
+to the method implementation. If not found in the cache, the token is pushed and the ee is entered via
+the ResolveWorkerStub to do a full lookup and eventual transfer to the correct method implementation. Since
+there is a different resolve stub for every token, the token can be inlined and the token can be pre-hashed.
+The effectiveness of this approach is highly sensitive to the effectiveness of the hashing algorithm used,
+as well as its speed. It turns out it is very important to make the hash function sensitive to all
+of the bits of the method table, as method tables are laid out in memory in a very non-random way. Before
+making any changes to the code sequences here, it is very important to measure and tune them as perf
+can vary greatly, in unexpected ways, with seeming minor changes.
+
+Implementation note - Order, choice of instructions, and branch directions
+should be carefully tuned since it can have an inordinate effect on performance. Particular
+attention needs to be paid to the effects on the BTB and branch prediction, both in the small
+and in the large, i.e. it needs to run well in the face of BTB overflow--using static predictions.
+Note that this stub is called in highly polymorphic cases, but the cache should have been sized
+and the hash function chosen to maximize the cache hit case. Hence the cmp/jcc instructions should
+mostly be going down the cache hit route, and it is important that this be statically predicted as so.
+Hence the 3 jcc instrs need to be forward jumps. As structured, there is only one jmp/jcc that typically
+gets put in the BTB since all the others typically fall straight thru. Minimizing potential BTB entries
+is important. */
+
+struct ResolveStub
+{
+ inline PCODE failEntryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)&_failEntryPoint[0]; }
+ inline PCODE resolveEntryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)&_resolveEntryPoint[0]; }
+ inline PCODE slowEntryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)&_slowEntryPoint[0]; }
+
+ inline INT32* pCounter() { LIMITED_METHOD_CONTRACT; return _pCounter; }
+ inline UINT32 hashedToken() { LIMITED_METHOD_CONTRACT; return _hashedToken >> LOG2_PTRSIZE; }
+ inline size_t cacheAddress() { LIMITED_METHOD_CONTRACT; return _cacheAddress; }
+ inline size_t token() { LIMITED_METHOD_CONTRACT; return _token; }
+ inline size_t size() { LIMITED_METHOD_CONTRACT; return sizeof(LookupStub); }
+
+private:
+ friend struct ResolveHolder;
+
+ BYTE _resolveEntryPoint[3];// resolveStub:
+ // 52 push rdx
+ // 49 BA mov r10,
+ size_t _cacheAddress; // xx xx xx xx xx xx xx xx 64-bit address
+ BYTE part1 [15]; // 48 8B XX mov rax, [THIS_REG] ; Compute hash = ((MT + MT>>12) ^ prehash)
+ // 48 8B D0 mov rdx, rax ; rdx <- current MethodTable
+ // 48 C1 E8 0C shr rax, 12
+ // 48 03 C2 add rax, rdx
+ // 48 35 xor rax,
+ UINT32 _hashedToken; // xx xx xx xx hashedtoken ; xor with pre-hashed token
+ BYTE part2 [2]; // 48 25 and rax,
+ UINT32 mask; // xx xx xx xx cache_mask ; and with cache mask
+ BYTE part3 [6]; // 4A 8B 04 10 mov rax, [r10 + rax] ; get cache entry address
+ // 49 BA mov r10,
+ size_t _token; // xx xx xx xx xx xx xx xx 64-bit address
+ BYTE part4 [3]; // 48 3B 50 cmp rdx, [rax+ ; compare our MT vs. cache MT
+ BYTE mtOffset; // xx ResolverCacheElem.pMT]
+ BYTE part5 [1]; // 75 jne
+ BYTE toMiss1; // xx miss ; must be forward jump, for perf reasons
+ BYTE part6 [3]; // 4C 3B 50 cmp r10, [rax+ ; compare our token vs. cache token
+ BYTE tokenOffset; // xx ResolverCacheElem.token]
+ BYTE part7 [1]; // 75 jne
+ BYTE toMiss2; // xx miss ; must be forward jump, for perf reasons
+ BYTE part8 [3]; // 48 8B 40 mov rax, [rax+ ; setup rax with method impl address
+ BYTE targetOffset; // xx ResolverCacheElem.target]
+ BYTE part9 [3]; // 5A pop rdx
+ // FF E0 jmp rax
+ // failStub:
+ BYTE _failEntryPoint [2]; // 48 B8 mov rax,
+ INT32* _pCounter; // xx xx xx xx xx xx xx xx 64-bit address
+ BYTE part11 [4]; // 83 00 FF add dword ptr [rax], -1
+ // 7d jnl
+ BYTE toResolveStub1; // xx resolveStub
+ BYTE part12 [4]; // 49 83 CB 01 or r11, 1
+ BYTE _slowEntryPoint [3]; // 52 slow: push rdx
+ // 49 BA mov r10,
+ size_t _tokenSlow; // xx xx xx xx xx xx xx xx 64-bit address
+// BYTE miss [5]; // 5A miss: pop rdx ; don't pop rdx
+// // 41 52 push r10 ; don't push r10 leave it setup with token
+ BYTE miss [3]; // 50 push rax ; push ptr to cache elem
+ // 48 B8 mov rax,
+ size_t _resolveWorker; // xx xx xx xx xx xx xx xx 64-bit address
+ BYTE part10 [2]; // FF E0 jmp rax
+};
+
+/* ResolveHolders are the containers for ResolveStubs, They provide
+for any alignment of the stubs as necessary. The stubs are placed in a hash table keyed by
+the token for which they are built. Efficiency of access requires that this token be aligned.
+For now, we have copied that field into the ResolveHolder itself, if the resolve stub is arranged such that
+any of its inlined tokens (non-prehashed) is aligned, then the token field in the ResolveHolder
+is not needed. */
+struct ResolveHolder
+{
+ static void InitializeStatic();
+
+ void Initialize(PCODE resolveWorkerTarget, PCODE patcherTarget,
+ size_t dispatchToken, UINT32 hashedToken,
+ void * cacheAddr, INT32* counterAddr);
+
+ ResolveStub* stub() { LIMITED_METHOD_CONTRACT; return &_stub; }
+
+ static ResolveHolder* FromFailEntry(PCODE resolveEntry);
+ static ResolveHolder* FromResolveEntry(PCODE resolveEntry);
+
+private:
+ ResolveStub _stub;
+};
+#pragma pack(pop)
+
+#ifdef DECLARE_DATA
+
+LookupStub lookupInit;
+DispatchStub dispatchInit;
+DispatchStubShort dispatchShortInit;
+DispatchStubLong dispatchLongInit;
+ResolveStub resolveInit;
+
+#define INSTR_INT3 0xcc
+#define INSTR_NOP 0x90
+
+#ifndef DACCESS_COMPILE
+
+#include "asmconstants.h"
+
+#ifdef STUB_LOGGING
+extern size_t g_lookup_inline_counter;
+extern size_t g_call_inline_counter;
+extern size_t g_miss_inline_counter;
+extern size_t g_call_cache_counter;
+extern size_t g_miss_cache_counter;
+#endif
+
+/* Template used to generate the stub. We generate a stub by allocating a block of
+ memory and copy the template over it and just update the specific fields that need
+ to be changed.
+*/
+
+void LookupHolder::InitializeStatic()
+{
+ static_assert_no_msg((sizeof(LookupHolder) % sizeof(void*)) == 0);
+
+ // The first instruction of a LookupStub is nop
+ // and we use it in order to differentiate the first two bytes
+ // of a LookupStub and a ResolveStub
+ lookupInit._entryPoint [0] = INSTR_NOP;
+ lookupInit._entryPoint [1] = 0x48;
+ lookupInit._entryPoint [2] = 0xB8;
+ lookupInit._token = 0xcccccccccccccccc;
+ lookupInit.part2 [0] = 0x50;
+ lookupInit.part2 [1] = 0x48;
+ lookupInit.part2 [2] = 0xB8;
+ lookupInit._resolveWorkerAddr = 0xcccccccccccccccc;
+ lookupInit.part3 [0] = 0xFF;
+ lookupInit.part3 [1] = 0xE0;
+}
+
+void LookupHolder::Initialize(PCODE resolveWorkerTarget, size_t dispatchToken)
+{
+ _stub = lookupInit;
+
+ //fill in the stub specific fields
+ _stub._token = dispatchToken;
+ _stub._resolveWorkerAddr = (size_t) resolveWorkerTarget;
+}
+
+/* Template used to generate the stub. We generate a stub by allocating a block of
+ memory and copy the template over it and just update the specific fields that need
+ to be changed.
+*/
+
+void DispatchHolder::InitializeStatic()
+{
+ // Check that _expectedMT is aligned in the DispatchHolder
+ static_assert_no_msg(((sizeof(DispatchStub)+sizeof(DispatchStubShort)) % sizeof(void*)) == 0);
+ static_assert_no_msg(((sizeof(DispatchStub)+sizeof(DispatchStubLong)) % sizeof(void*)) == 0);
+ CONSISTENCY_CHECK((offsetof(DispatchStubLong, part4[0]) - offsetof(DispatchStubLong, part2[0])) < INT8_MAX);
+
+ // Common dispatch stub initialization
+ dispatchInit._entryPoint [0] = 0x48;
+ dispatchInit._entryPoint [1] = 0xB8;
+ dispatchInit._expectedMT = 0xcccccccccccccccc;
+ dispatchInit.part1 [0] = 0x48;
+ dispatchInit.part1 [1] = 0x39;
+#ifdef UNIX_AMD64_ABI
+ dispatchInit.part1 [2] = 0x07; // RDI
+#else
+ dispatchInit.part1 [2] = 0x01; // RCX
+#endif
+
+ // Short dispatch stub initialization
+ dispatchShortInit.part1 [0] = 0x0F;
+ dispatchShortInit.part1 [1] = 0x85;
+ dispatchShortInit._failDispl = 0xcccccccc;
+ dispatchShortInit.part2 [0] = 0x48;
+ dispatchShortInit.part2 [1] = 0xb8;
+ dispatchShortInit._implTarget = 0xcccccccccccccccc;
+ dispatchShortInit.part3 [0] = 0xFF;
+ dispatchShortInit.part3 [1] = 0xE0;
+ dispatchShortInit.alignPad [0] = INSTR_INT3;
+
+ // Long dispatch stub initialization
+ dispatchLongInit.part1 [0] = 0x75;
+ dispatchLongInit._failDispl = BYTE(&dispatchLongInit.part4[0] - &dispatchLongInit.part2[0]);
+ dispatchLongInit.part2 [0] = 0x48;
+ dispatchLongInit.part2 [1] = 0xb8;
+ dispatchLongInit._implTarget = 0xcccccccccccccccc;
+ dispatchLongInit.part3 [0] = 0xFF;
+ dispatchLongInit.part3 [1] = 0xE0;
+ // failLabel:
+ dispatchLongInit.part4 [0] = 0x48;
+ dispatchLongInit.part4 [1] = 0xb8;
+ dispatchLongInit._failTarget = 0xcccccccccccccccc;
+ dispatchLongInit.part5 [0] = 0xFF;
+ dispatchLongInit.part5 [1] = 0xE0;
+ dispatchLongInit.alignPad [0] = INSTR_INT3;
+};
+
+void DispatchHolder::Initialize(PCODE implTarget, PCODE failTarget, size_t expectedMT,
+ DispatchStub::DispatchStubType type)
+{
+ //
+ // Initialize the common area
+ //
+
+ // initialize the static data
+ *stub() = dispatchInit;
+
+ // fill in the dynamic data
+ stub()->_expectedMT = expectedMT;
+
+ //
+ // Initialize the short/long areas
+ //
+ if (type == DispatchStub::e_TYPE_SHORT)
+ {
+ DispatchStubShort *shortStub = const_cast<DispatchStubShort *>(stub()->getShortStub());
+
+ // initialize the static data
+ *shortStub = dispatchShortInit;
+
+ // fill in the dynamic data
+ size_t displ = (failTarget - ((PCODE) &shortStub->_failDispl + sizeof(DISPL)));
+ CONSISTENCY_CHECK(FitsInI4(displ));
+ shortStub->_failDispl = (DISPL) displ;
+ shortStub->_implTarget = (size_t) implTarget;
+ CONSISTENCY_CHECK((PCODE)&shortStub->_failDispl + sizeof(DISPL) + shortStub->_failDispl == failTarget);
+ }
+ else
+ {
+ CONSISTENCY_CHECK(type == DispatchStub::e_TYPE_LONG);
+ DispatchStubLong *longStub = const_cast<DispatchStubLong *>(stub()->getLongStub());
+
+ // initialize the static data
+ *longStub = dispatchLongInit;
+
+ // fill in the dynamic data
+ longStub->_implTarget = implTarget;
+ longStub->_failTarget = failTarget;
+ }
+}
+
+/* Template used to generate the stub. We generate a stub by allocating a block of
+ memory and copy the template over it and just update the specific fields that need
+ to be changed.
+*/
+
+void ResolveHolder::InitializeStatic()
+{
+ static_assert_no_msg((sizeof(ResolveHolder) % sizeof(void*)) == 0);
+
+ resolveInit._resolveEntryPoint [0] = 0x52;
+ resolveInit._resolveEntryPoint [1] = 0x49;
+ resolveInit._resolveEntryPoint [2] = 0xBA;
+ resolveInit._cacheAddress = 0xcccccccccccccccc;
+ resolveInit.part1 [ 0] = 0x48;
+ resolveInit.part1 [ 1] = 0x8B;
+#ifdef UNIX_AMD64_ABI
+ resolveInit.part1 [ 2] = 0x07; // RDI
+#else
+ resolveInit.part1 [ 2] = 0x01; // RCX
+#endif
+ resolveInit.part1 [ 3] = 0x48;
+ resolveInit.part1 [ 4] = 0x8B;
+ resolveInit.part1 [ 5] = 0xD0;
+ resolveInit.part1 [ 6] = 0x48;
+ resolveInit.part1 [ 7] = 0xC1;
+ resolveInit.part1 [ 8] = 0xE8;
+ resolveInit.part1 [ 9] = CALL_STUB_CACHE_NUM_BITS;
+ resolveInit.part1 [10] = 0x48;
+ resolveInit.part1 [11] = 0x03;
+ resolveInit.part1 [12] = 0xC2;
+ resolveInit.part1 [13] = 0x48;
+ resolveInit.part1 [14] = 0x35;
+// Review truncation from unsigned __int64 to UINT32 of a constant value.
+#if defined(_MSC_VER)
+#pragma warning(push)
+#pragma warning(disable:4305 4309)
+#endif // defined(_MSC_VER)
+
+ resolveInit._hashedToken = 0xcccccccccccccccc;
+
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif // defined(_MSC_VER)
+
+ resolveInit.part2 [ 0] = 0x48;
+ resolveInit.part2 [ 1] = 0x25;
+ resolveInit.mask = CALL_STUB_CACHE_MASK*sizeof(void *);
+ resolveInit.part3 [0] = 0x4A;
+ resolveInit.part3 [1] = 0x8B;
+ resolveInit.part3 [2] = 0x04;
+ resolveInit.part3 [3] = 0x10;
+ resolveInit.part3 [4] = 0x49;
+ resolveInit.part3 [5] = 0xBA;
+ resolveInit._token = 0xcccccccccccccccc;
+ resolveInit.part4 [0] = 0x48;
+ resolveInit.part4 [1] = 0x3B;
+ resolveInit.part4 [2] = 0x50;
+ resolveInit.mtOffset = offsetof(ResolveCacheElem,pMT) & 0xFF;
+ resolveInit.part5 [0] = 0x75;
+ resolveInit.toMiss1 = offsetof(ResolveStub,miss)-(offsetof(ResolveStub,toMiss1)+1) & 0xFF;
+ resolveInit.part6 [0] = 0x4C;
+ resolveInit.part6 [1] = 0x3B;
+ resolveInit.part6 [2] = 0x50;
+ resolveInit.tokenOffset = offsetof(ResolveCacheElem,token) & 0xFF;
+ resolveInit.part7 [0] = 0x75;
+ resolveInit.toMiss2 = offsetof(ResolveStub,miss)-(offsetof(ResolveStub,toMiss2)+1) & 0xFF;
+ resolveInit.part8 [0] = 0x48;
+ resolveInit.part8 [1] = 0x8B;
+ resolveInit.part8 [2] = 0x40;
+ resolveInit.targetOffset = offsetof(ResolveCacheElem,target) & 0xFF;
+ resolveInit.part9 [0] = 0x5A;
+ resolveInit.part9 [1] = 0xFF;
+ resolveInit.part9 [2] = 0xE0;
+ resolveInit._failEntryPoint [0] = 0x48;
+ resolveInit._failEntryPoint [1] = 0xB8;
+ resolveInit._pCounter = (INT32*) (size_t) 0xcccccccccccccccc;
+ resolveInit.part11 [0] = 0x83;
+ resolveInit.part11 [1] = 0x00;
+ resolveInit.part11 [2] = 0xFF;
+ resolveInit.part11 [3] = 0x7D;
+ resolveInit.toResolveStub1 = (offsetof(ResolveStub, _resolveEntryPoint) - (offsetof(ResolveStub, toResolveStub1)+1)) & 0xFF;
+ resolveInit.part12 [0] = 0x49;
+ resolveInit.part12 [1] = 0x83;
+ resolveInit.part12 [2] = 0xCB;
+ resolveInit.part12 [3] = 0x01;
+ resolveInit._slowEntryPoint [0] = 0x52;
+ resolveInit._slowEntryPoint [1] = 0x49;
+ resolveInit._slowEntryPoint [2] = 0xBA;
+ resolveInit._tokenSlow = 0xcccccccccccccccc;
+ resolveInit.miss [0] = 0x50;
+ resolveInit.miss [1] = 0x48;
+ resolveInit.miss [2] = 0xB8;
+ resolveInit._resolveWorker = 0xcccccccccccccccc;
+ resolveInit.part10 [0] = 0xFF;
+ resolveInit.part10 [1] = 0xE0;
+};
+
+void ResolveHolder::Initialize(PCODE resolveWorkerTarget, PCODE patcherTarget,
+ size_t dispatchToken, UINT32 hashedToken,
+ void * cacheAddr, INT32* counterAddr)
+{
+ _stub = resolveInit;
+
+ //fill in the stub specific fields
+ _stub._cacheAddress = (size_t) cacheAddr;
+ _stub._hashedToken = hashedToken << LOG2_PTRSIZE;
+ _stub._token = dispatchToken;
+ _stub._tokenSlow = dispatchToken;
+ _stub._resolveWorker = (size_t) resolveWorkerTarget;
+ _stub._pCounter = counterAddr;
+}
+
+ResolveHolder* ResolveHolder::FromFailEntry(PCODE failEntry)
+{
+ LIMITED_METHOD_CONTRACT;
+ ResolveHolder* resolveHolder = (ResolveHolder*) ( failEntry - offsetof(ResolveHolder, _stub) - offsetof(ResolveStub, _failEntryPoint) );
+ _ASSERTE(resolveHolder->_stub._resolveEntryPoint[1] == resolveInit._resolveEntryPoint[1]);
+ return resolveHolder;
+}
+
+#endif // DACCESS_COMPILE
+
+LookupHolder* LookupHolder::FromLookupEntry(PCODE lookupEntry)
+{
+ LIMITED_METHOD_CONTRACT;
+ LookupHolder* lookupHolder = (LookupHolder*) ( lookupEntry - offsetof(LookupHolder, _stub) - offsetof(LookupStub, _entryPoint) );
+ _ASSERTE(lookupHolder->_stub._entryPoint[2] == lookupInit._entryPoint[2]);
+ return lookupHolder;
+}
+
+
+DispatchHolder* DispatchHolder::FromDispatchEntry(PCODE dispatchEntry)
+{
+ LIMITED_METHOD_CONTRACT;
+ DispatchHolder* dispatchHolder = (DispatchHolder*) ( dispatchEntry - offsetof(DispatchStub, _entryPoint) );
+ _ASSERTE(dispatchHolder->stub()->_entryPoint[1] == dispatchInit._entryPoint[1]);
+ return dispatchHolder;
+}
+
+
+ResolveHolder* ResolveHolder::FromResolveEntry(PCODE resolveEntry)
+{
+ LIMITED_METHOD_CONTRACT;
+ ResolveHolder* resolveHolder = (ResolveHolder*) ( resolveEntry - offsetof(ResolveHolder, _stub) - offsetof(ResolveStub, _resolveEntryPoint) );
+ _ASSERTE(resolveHolder->_stub._resolveEntryPoint[1] == resolveInit._resolveEntryPoint[1]);
+ return resolveHolder;
+}
+
+VirtualCallStubManager::StubKind VirtualCallStubManager::predictStubKind(PCODE stubStartAddress)
+{
+#ifdef DACCESS_COMPILE
+ return SK_BREAKPOINT; // Dac always uses the slower lookup
+#else
+ StubKind stubKind = SK_UNKNOWN;
+
+ EX_TRY
+ {
+ // If stubStartAddress is completely bogus, then this might AV,
+ // so we protect it with SEH. An AV here is OK.
+ AVInRuntimeImplOkayHolder AVOkay;
+
+ WORD firstWord = *((WORD*) stubStartAddress);
+
+ if (firstWord == 0xB848)
+ {
+ stubKind = SK_DISPATCH;
+ }
+ else if (firstWord == 0x4890)
+ {
+ stubKind = SK_LOOKUP;
+ }
+ else if (firstWord == 0x4952)
+ {
+ stubKind = SK_RESOLVE;
+ }
+ else if (firstWord == 0x48F8)
+ {
+ stubKind = SK_LOOKUP;
+ }
+ else
+ {
+ BYTE firstByte = ((BYTE*) stubStartAddress)[0];
+ BYTE secondByte = ((BYTE*) stubStartAddress)[1];
+
+ if ((firstByte == INSTR_INT3) || (secondByte == INSTR_INT3))
+ {
+ stubKind = SK_BREAKPOINT;
+ }
+ }
+ }
+ EX_CATCH
+ {
+ stubKind = SK_UNKNOWN;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return stubKind;
+
+#endif // DACCESS_COMPILE
+}
+
+#endif //DECLARE_DATA
+
+#endif // _VIRTUAL_CALL_STUB_AMD64_H
diff --git a/src/vm/appdomain.cpp b/src/vm/appdomain.cpp
new file mode 100644
index 0000000000..de343848be
--- /dev/null
+++ b/src/vm/appdomain.cpp
@@ -0,0 +1,14994 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "common.h"
+
+#include "appdomain.hpp"
+#include "peimagelayout.inl"
+#include "field.h"
+#include "security.h"
+#include "strongnameinternal.h"
+#include "excep.h"
+#include "eeconfig.h"
+#include "gc.h"
+#include "gcenv.h"
+#include "eventtrace.h"
+#ifdef FEATURE_FUSION
+#include "assemblysink.h"
+#include "fusion.h"
+#include "fusionbind.h"
+#include "fusionlogging.h"
+#endif
+#include "perfcounters.h"
+#include "assemblyname.hpp"
+#include "eeprofinterfaces.h"
+#include "dbginterface.h"
+#ifndef DACCESS_COMPILE
+#include "eedbginterfaceimpl.h"
+#endif
+#include "comdynamic.h"
+#include "mlinfo.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "posterror.h"
+#include "assemblynative.hpp"
+#include "shimload.h"
+#include "stringliteralmap.h"
+#include "codeman.h"
+#include "comcallablewrapper.h"
+#include "apithreadstress.h"
+#include "eventtrace.h"
+#include "comdelegate.h"
+#include "siginfo.hpp"
+#ifdef FEATURE_REMOTING
+#include "appdomainhelper.h"
+#include "objectclone.h"
+#endif
+#include "typekey.h"
+
+#include "caparser.h"
+#include "ecall.h"
+#include "finalizerthread.h"
+#include "threadsuspend.h"
+
+#ifdef FEATURE_PREJIT
+#include "corcompile.h"
+#include "compile.h"
+#endif // FEATURE_PREJIT
+
+#ifdef FEATURE_COMINTEROP
+#include "comtoclrcall.h"
+#include "sxshelpers.h"
+#include "runtimecallablewrapper.h"
+#include "mngstdinterfaces.h"
+#include "olevariant.h"
+#include "rcwrefcache.h"
+#include "olecontexthelpers.h"
+#endif // FEATURE_COMINTEROP
+#ifdef FEATURE_TYPEEQUIVALENCE
+#include "typeequivalencehash.hpp"
+#endif
+
+#include "listlock.inl"
+#include "appdomain.inl"
+#include "typeparse.h"
+#include "mdaassistants.h"
+#include "stackcompressor.h"
+#ifdef FEATURE_REMOTING
+#include "mscorcfg.h"
+#include "appdomainconfigfactory.hpp"
+#include "crossdomaincalls.h"
+#endif
+#include "threadpoolrequest.h"
+
+#include "nativeoverlapped.h"
+
+#include "compatibilityflags.h"
+
+#ifndef FEATURE_PAL
+#include "dwreport.h"
+#endif // !FEATURE_PAL
+
+#include "stringarraylist.h"
+
+#ifdef FEATURE_VERSIONING
+#include "../binder/inc/clrprivbindercoreclr.h"
+#endif
+
+#if defined(FEATURE_APPX_BINDER) && defined(FEATURE_HOSTED_BINDER)
+#include "appxutil.h"
+#include "clrprivbinderappx.h"
+#endif
+
+#ifdef FEATURE_HOSTED_BINDER
+#include "clrprivtypecachewinrt.h"
+#endif
+
+#ifndef FEATURE_CORECLR
+#include "nlsinfo.h"
+#endif
+
+#ifdef FEATURE_RANDOMIZED_STRING_HASHING
+#pragma warning(push)
+#pragma warning(disable:4324)
+#include "marvin32.h"
+#pragma warning(pop)
+#endif
+
+// this file handles string conversion errors for itself
+#undef MAKE_TRANSLATIONFAILED
+
+// Define these macro's to do strict validation for jit lock and class
+// init entry leaks. This defines determine if the asserts that
+// verify for these leaks are defined or not. These asserts can
+// sometimes go off even if no entries have been leaked so this
+// defines should be used with caution.
+//
+// If we are inside a .cctor when the application shut's down then the
+// class init lock's head will be set and this will cause the assert
+// to go off.
+//
+// If we are jitting a method when the application shut's down then
+// the jit lock's head will be set causing the assert to go off.
+
+//#define STRICT_CLSINITLOCK_ENTRY_LEAK_DETECTION
+
+static const WCHAR DEFAULT_DOMAIN_FRIENDLY_NAME[] = W("DefaultDomain");
+static const WCHAR OTHER_DOMAIN_FRIENDLY_NAME_PREFIX[] = W("Domain");
+
+#define STATIC_OBJECT_TABLE_BUCKET_SIZE 1020
+
+#define MAX_URL_LENGTH 2084 // same as INTERNET_MAX_URL_LENGTH
+
+//#define _DEBUG_ADUNLOAD 1
+
+HRESULT RunDllMain(MethodDesc *pMD, HINSTANCE hInst, DWORD dwReason, LPVOID lpReserved); // clsload.cpp
+
+
+
+
+
+// Statics
+
+SPTR_IMPL(SystemDomain, SystemDomain, m_pSystemDomain);
+SVAL_IMPL(ArrayListStatic, SystemDomain, m_appDomainIndexList);
+SPTR_IMPL(SharedDomain, SharedDomain, m_pSharedDomain);
+SVAL_IMPL(BOOL, SystemDomain, s_fForceDebug);
+SVAL_IMPL(BOOL, SystemDomain, s_fForceProfiling);
+SVAL_IMPL(BOOL, SystemDomain, s_fForceInstrument);
+
+#ifndef DACCESS_COMPILE
+
+// Base Domain Statics
+CrstStatic BaseDomain::m_SpecialStaticsCrst;
+
+int BaseDomain::m_iNumberOfProcessors = 0;
+
+// Shared Domain Statics
+static BYTE g_pSharedDomainMemory[sizeof(SharedDomain)];
+
+// System Domain Statics
+GlobalStringLiteralMap* SystemDomain::m_pGlobalStringLiteralMap = NULL;
+
+static BYTE g_pSystemDomainMemory[sizeof(SystemDomain)];
+
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+size_t SystemDomain::m_totalSurvivedBytes = 0;
+#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
+
+CrstStatic SystemDomain::m_SystemDomainCrst;
+CrstStatic SystemDomain::m_DelayedUnloadCrst;
+
+ULONG SystemDomain::s_dNumAppDomains = 0;
+
+AppDomain * SystemDomain::m_pAppDomainBeingUnloaded = NULL;
+ADIndex SystemDomain::m_dwIndexOfAppDomainBeingUnloaded;
+Thread *SystemDomain::m_pAppDomainUnloadRequestingThread = 0;
+Thread *SystemDomain::m_pAppDomainUnloadingThread = 0;
+
+ArrayListStatic SystemDomain::m_appDomainIdList;
+
+DWORD SystemDomain::m_dwLowestFreeIndex = 0;
+
+
+
+// comparison function to be used for matching clsids in our clsid hash table
+BOOL CompareCLSID(UPTR u1, UPTR u2)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ GUID *pguid = (GUID *)(u1 << 1);
+ _ASSERTE(pguid != NULL);
+
+ MethodTable *pMT= (MethodTable *)u2;
+ _ASSERTE(pMT!= NULL);
+
+ GUID guid;
+ pMT->GetGuid(&guid, TRUE);
+ if (!IsEqualIID(guid, *pguid))
+ return FALSE;
+
+ return TRUE;
+}
+
+#ifndef CROSSGEN_COMPILE
+// Constructor for the LargeHeapHandleBucket class.
+LargeHeapHandleBucket::LargeHeapHandleBucket(LargeHeapHandleBucket *pNext, DWORD Size, BaseDomain *pDomain, BOOL bCrossAD)
+: m_pNext(pNext)
+, m_ArraySize(Size)
+, m_CurrentPos(0)
+, m_CurrentEmbeddedFreePos(0) // hint for where to start a search for an embedded free item
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pDomain));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ PTRARRAYREF HandleArrayObj;
+
+ // Allocate the array in the large object heap.
+ if (!bCrossAD)
+ {
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+ HandleArrayObj = (PTRARRAYREF)AllocateObjectArray(Size, g_pObjectClass, TRUE);
+ }
+ else
+ {
+ // During AD creation we don't want to assign the handle array to the currently running AD but
+ // to the AD being created. Ensure that AllocateArrayEx doesn't set the AD and then set it here.
+ AppDomain *pAD = pDomain->AsAppDomain();
+ _ASSERTE(pAD);
+ _ASSERTE(pAD->IsBeingCreated());
+
+ OBJECTREF array;
+ {
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+ array = AllocateArrayEx(
+ ClassLoader::LoadArrayTypeThrowing(g_pObjectClass),
+ (INT32 *)(&Size),
+ 1,
+ TRUE
+ DEBUG_ARG(TRUE));
+ }
+
+ array->SetAppDomain(pAD);
+
+ HandleArrayObj = (PTRARRAYREF)array;
+ }
+
+ // Retrieve the pointer to the data inside the array. This is legal since the array
+ // is located in the large object heap and is guaranteed not to move.
+ m_pArrayDataPtr = (OBJECTREF *)HandleArrayObj->GetDataPtr();
+
+ // Store the array in a strong handle to keep it alive.
+ m_hndHandleArray = pDomain->CreatePinningHandle((OBJECTREF)HandleArrayObj);
+}
+
+
+// Destructor for the LargeHeapHandleBucket class.
+LargeHeapHandleBucket::~LargeHeapHandleBucket()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (m_hndHandleArray)
+ {
+ DestroyPinningHandle(m_hndHandleArray);
+ m_hndHandleArray = NULL;
+ }
+}
+
+
+// Allocate handles from the bucket.
+OBJECTREF *LargeHeapHandleBucket::AllocateHandles(DWORD nRequested)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(nRequested > 0 && nRequested <= GetNumRemainingHandles());
+ _ASSERTE(m_pArrayDataPtr == (OBJECTREF*)((PTRARRAYREF)ObjectFromHandle(m_hndHandleArray))->GetDataPtr());
+
+ // Store the handles in the buffer that was passed in
+ OBJECTREF* ret = &m_pArrayDataPtr[m_CurrentPos];
+ m_CurrentPos += nRequested;
+
+ return ret;
+}
+
+// look for a free item embedded in the table
+OBJECTREF *LargeHeapHandleBucket::TryAllocateEmbeddedFreeHandle()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF pPreallocatedSentinalObject = ObjectFromHandle(g_pPreallocatedSentinelObject);
+ _ASSERTE(pPreallocatedSentinalObject != NULL);
+
+ for (int i = m_CurrentEmbeddedFreePos; i < m_CurrentPos; i++)
+ {
+ if (m_pArrayDataPtr[i] == pPreallocatedSentinalObject)
+ {
+ m_CurrentEmbeddedFreePos = i;
+ m_pArrayDataPtr[i] = NULL;
+ return &m_pArrayDataPtr[i];
+ }
+ }
+
+ // didn't find it (we don't bother wrapping around for a full search, it's not worth it to try that hard, we'll get it next time)
+
+ m_CurrentEmbeddedFreePos = 0;
+ return NULL;
+}
+
+
+// Maximum bucket size will be 64K on 32-bit and 128K on 64-bit.
+// We subtract out a small amount to leave room for the object
+// header and length of the array.
+
+#define MAX_BUCKETSIZE (16384 - 4)
+
+// Constructor for the LargeHeapHandleTable class.
+LargeHeapHandleTable::LargeHeapHandleTable(BaseDomain *pDomain, DWORD InitialBucketSize)
+: m_pHead(NULL)
+, m_pDomain(pDomain)
+, m_NextBucketSize(InitialBucketSize)
+, m_pFreeSearchHint(NULL)
+, m_cEmbeddedFree(0)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pDomain));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ m_pCrstDebug = NULL;
+#endif
+}
+
+
+// Destructor for the LargeHeapHandleTable class.
+LargeHeapHandleTable::~LargeHeapHandleTable()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // Delete the buckets.
+ while (m_pHead)
+ {
+ LargeHeapHandleBucket *pOld = m_pHead;
+ m_pHead = pOld->GetNext();
+ delete pOld;
+ }
+}
+
+//*****************************************************************************
+//
+// LOCKING RULES FOR AllocateHandles() and ReleaseHandles() 12/08/2004
+//
+//
+// These functions are not protected by any locking in this location but rather the callers are
+// assumed to be doing suitable locking for the handle table. The handle table itself is
+// behaving rather like a thread-agnostic collection class -- it doesn't want to know
+// much about the outside world and so it is just doing its job with no awareness of
+// thread notions.
+//
+// The instance in question is
+// There are two locations you can find a LargeHeapHandleTable
+// 1) there is one in every BaseDomain, it is used to keep track of the static members
+// in that domain
+// 2) there is one in the System Domain that is used for the GlobalStringLiteralMap
+//
+// the one in (2) is not the same as the one that is in the BaseDomain object that corresponds
+// to the SystemDomain -- that one is basically stilborn because the string literals don't go
+// there and of course the System Domain has no code loaded into it -- only regular
+// AppDomains (like Domain 0) actually execute code. As a result handle tables are in
+// practice used either for string literals or for static members but never for both.
+// At least not at this writing.
+//
+// Now it's useful to consider what the locking discipline is for these classes.
+//
+// ---------
+//
+// First case: (easiest) is the statics members
+//
+// Each BaseDomain has its own critical section
+//
+// BaseDomain::AllocateObjRefPtrsInLargeTable takes a lock with
+// CrstHolder ch(&m_LargeHeapHandleTableCrst);
+//
+// it does this before it calls AllocateHandles which suffices. It does not call ReleaseHandles
+// at any time (although ReleaseHandles may be called via AllocateHandles if the request
+// doesn't fit in the current block, the remaining handles at the end of the block are released
+// automatically as part of allocation/recycling)
+//
+// note: Recycled handles are only used during String Literal allocation because we only try
+// to recycle handles if the allocation request is for exactly one handle.
+//
+// The handles in the BaseDomain handle table are released when the Domain is unloaded
+// as the GC objects become rootless at that time.
+//
+// This dispenses with all of the Handle tables except the one that is used for string literals
+//
+// ---------
+//
+// Second case: Allocation for use in a string literal
+//
+// AppDomainStringLiteralMap::GetStringLiteral
+// leads to calls to
+// LargeHeapHandleBlockHolder constructor
+// leads to calls to
+// m_Data = pOwner->AllocateHandles(nCount);
+//
+// before doing this AppDomainStringLiteralMap::GetStringLiteral takes this lock
+//
+// CrstHolder gch(&(SystemDomain::GetGlobalStringLiteralMap()->m_HashTableCrstGlobal));
+//
+// which is the lock for the hash table that it owns
+//
+// STRINGREF *AppDomainStringLiteralMap::GetInternedString
+//
+// has a similar call path and uses the same approach and the same lock
+// this covers all the paths which allocate
+//
+// ---------
+//
+// Third case: Releases for use in a string literal entry
+//
+// CrstHolder gch(&(SystemDomain::GetGlobalStringLiteralMap()->m_HashTableCrstGlobal));
+// taken in the AppDomainStringLiteralMap functions below protects the 4 ways that this can happen
+//
+// case 3a)
+//
+// in an appdomain unload case
+//
+// AppDomainStringLiteralMap::~AppDomainStringLiteralMap() takes the lock then
+// leads to calls to
+// StringLiteralEntry::Release
+// which leads to
+// SystemDomain::GetGlobalStringLiteralMapNoCreate()->RemoveStringLiteralEntry(this)
+// which leads to
+// m_LargeHeapHandleTable.ReleaseHandles((OBJECTREF*)pObjRef, 1);
+//
+// case 3b)
+//
+// AppDomainStringLiteralMap::GetStringLiteral() can call StringLiteralEntry::Release in some
+// error cases, leading to the same stack as above
+//
+// case 3c)
+//
+// AppDomainStringLiteralMap::GetInternedString() can call StringLiteralEntry::Release in some
+// error cases, leading to the same stack as above
+//
+// case 3d)
+//
+// The same code paths in 3b and 3c and also end up releasing if an exception is thrown
+// during their processing. Both these paths use a StringLiteralEntryHolder to assist in cleanup,
+// the StaticRelease method of the StringLiteralEntry gets called, which in turn calls the
+// Release method.
+
+
+// Allocate handles from the large heap handle table.
+OBJECTREF* LargeHeapHandleTable::AllocateHandles(DWORD nRequested, BOOL bCrossAD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(nRequested > 0);
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // SEE "LOCKING RULES FOR AllocateHandles() and ReleaseHandles()" above
+
+ // the lock must be registered and already held by the caller per contract
+#ifdef _DEBUG
+ _ASSERTE(m_pCrstDebug != NULL);
+ _ASSERTE(m_pCrstDebug->OwnedByCurrentThread());
+#endif
+
+ if (nRequested == 1 && m_cEmbeddedFree != 0)
+ {
+ // special casing singleton requests to look for slots that can be re-used
+
+ // we need to do this because string literals are allocated one at a time and then sometimes
+ // released. we do not wish for the number of handles consumed by string literals to
+ // increase forever as assemblies are loaded and unloaded
+
+ if (m_pFreeSearchHint == NULL)
+ m_pFreeSearchHint = m_pHead;
+
+ while (m_pFreeSearchHint)
+ {
+ OBJECTREF* pObjRef = m_pFreeSearchHint->TryAllocateEmbeddedFreeHandle();
+ if (pObjRef != NULL)
+ {
+ // the slot is to have been prepared with a null ready to go
+ _ASSERTE(*pObjRef == NULL);
+ m_cEmbeddedFree--;
+ return pObjRef;
+ }
+ m_pFreeSearchHint = m_pFreeSearchHint->GetNext();
+ }
+
+ // the search doesn't wrap around so it's possible that we might have embedded free items
+ // and not find them but that's ok, we'll get them on the next alloc... all we're trying to do
+ // is to not have big leaks over time.
+ }
+
+
+ // Retrieve the remaining number of handles in the bucket.
+ DWORD NumRemainingHandlesInBucket = (m_pHead != NULL) ? m_pHead->GetNumRemainingHandles() : 0;
+
+ // create a new block if this request doesn't fit in the current block
+ if (nRequested > NumRemainingHandlesInBucket)
+ {
+ if (m_pHead != NULL)
+ {
+ // mark the handles in that remaining region as available for re-use
+ ReleaseHandles(m_pHead->CurrentPos(), NumRemainingHandlesInBucket);
+
+ // mark what's left as having been used
+ m_pHead->ConsumeRemaining();
+ }
+
+ // create a new bucket for this allocation
+
+ // We need a block big enough to hold the requested handles
+ DWORD NewBucketSize = max(m_NextBucketSize, nRequested);
+
+ m_pHead = new LargeHeapHandleBucket(m_pHead, NewBucketSize, m_pDomain, bCrossAD);
+
+ m_NextBucketSize = min(m_NextBucketSize * 2, MAX_BUCKETSIZE);
+ }
+
+ return m_pHead->AllocateHandles(nRequested);
+}
+
+//*****************************************************************************
+// Release object handles allocated using AllocateHandles().
+void LargeHeapHandleTable::ReleaseHandles(OBJECTREF *pObjRef, DWORD nReleased)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pObjRef));
+ }
+ CONTRACTL_END;
+
+ // SEE "LOCKING RULES FOR AllocateHandles() and ReleaseHandles()" above
+
+ // the lock must be registered and already held by the caller per contract
+#ifdef _DEBUG
+ _ASSERTE(m_pCrstDebug != NULL);
+ _ASSERTE(m_pCrstDebug->OwnedByCurrentThread());
+#endif
+
+ OBJECTREF pPreallocatedSentinalObject = ObjectFromHandle(g_pPreallocatedSentinelObject);
+ _ASSERTE(pPreallocatedSentinalObject != NULL);
+
+
+ // Add the released handles to the list of available handles.
+ for (DWORD i = 0; i < nReleased; i++)
+ {
+ SetObjectReference(&pObjRef[i], pPreallocatedSentinalObject, NULL);
+ }
+
+ m_cEmbeddedFree += nReleased;
+}
+
+
+
+
+// Constructor for the ThreadStaticHandleBucket class.
+ThreadStaticHandleBucket::ThreadStaticHandleBucket(ThreadStaticHandleBucket *pNext, DWORD Size, BaseDomain *pDomain)
+: m_pNext(pNext)
+, m_ArraySize(Size)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pDomain));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ PTRARRAYREF HandleArrayObj;
+
+ // Allocate the array on the GC heap.
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+ HandleArrayObj = (PTRARRAYREF)AllocateObjectArray(Size, g_pObjectClass, FALSE);
+
+ // Store the array in a strong handle to keep it alive.
+ m_hndHandleArray = pDomain->CreateStrongHandle((OBJECTREF)HandleArrayObj);
+}
+
+// Destructor for the ThreadStaticHandleBucket class.
+ThreadStaticHandleBucket::~ThreadStaticHandleBucket()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (m_hndHandleArray)
+ {
+ DestroyStrongHandle(m_hndHandleArray);
+ m_hndHandleArray = NULL;
+ }
+}
+
+// Allocate handles from the bucket.
+OBJECTHANDLE ThreadStaticHandleBucket::GetHandles()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ return m_hndHandleArray;
+}
+
+// Constructor for the ThreadStaticHandleTable class.
+ThreadStaticHandleTable::ThreadStaticHandleTable(BaseDomain *pDomain)
+: m_pHead(NULL)
+, m_pDomain(pDomain)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pDomain));
+ }
+ CONTRACTL_END;
+}
+
+// Destructor for the ThreadStaticHandleTable class.
+ThreadStaticHandleTable::~ThreadStaticHandleTable()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // Delete the buckets.
+ while (m_pHead)
+ {
+ ThreadStaticHandleBucket *pOld = m_pHead;
+ m_pHead = pOld->GetNext();
+ delete pOld;
+ }
+}
+
+// Allocate handles from the large heap handle table.
+OBJECTHANDLE ThreadStaticHandleTable::AllocateHandles(DWORD nRequested)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(nRequested > 0);
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // create a new bucket for this allocation
+ m_pHead = new ThreadStaticHandleBucket(m_pHead, nRequested, m_pDomain);
+
+ return m_pHead->GetHandles();
+}
+
+#endif // CROSSGEN_COMPILE
+
+
+//*****************************************************************************
+// BaseDomain
+//*****************************************************************************
+void BaseDomain::Attach()
+{
+#ifdef FEATURE_RANDOMIZED_STRING_HASHING
+#ifdef FEATURE_CORECLR
+ // Randomized string hashing is on by default for String.GetHashCode in coreclr.
+ COMNlsHashProvider::s_NlsHashProvider.SetUseRandomHashing((CorHost2::GetStartupFlags() & STARTUP_DISABLE_RANDOMIZED_STRING_HASHING) == 0);
+#endif // FEATURE_CORECLR
+#endif // FEATURE_RANDOMIZED_STRING_HASHING
+ m_SpecialStaticsCrst.Init(CrstSpecialStatics);
+}
+
+BaseDomain::BaseDomain()
+{
+ // initialize fields so the domain can be safely destructed
+ // shouldn't call anything that can fail here - use ::Init instead
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ m_fDisableInterfaceCache = FALSE;
+
+ m_pFusionContext = NULL;
+#if defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+ m_pTPABinderContext = NULL;
+#endif
+
+ // Make sure the container is set to NULL so that it gets loaded when it is used.
+ m_pLargeHeapHandleTable = NULL;
+
+#ifndef CROSSGEN_COMPILE
+ // Note that m_hHandleTableBucket is overridden by app domains
+ m_hHandleTableBucket = g_HandleTableMap.pBuckets[0];
+#else
+ m_hHandleTableBucket = NULL;
+#endif
+
+ m_pMarshalingData = NULL;
+
+ m_dwContextStatics = 0;
+#ifdef FEATURE_COMINTEROP
+ m_pMngStdInterfacesInfo = NULL;
+ m_pWinRtBinder = NULL;
+#endif
+ m_FileLoadLock.PreInit();
+ m_JITLock.PreInit();
+ m_ClassInitLock.PreInit();
+ m_ILStubGenLock.PreInit();
+
+#ifdef FEATURE_REJIT
+ m_reJitMgr.PreInit(this == (BaseDomain *) g_pSharedDomainMemory);
+#endif
+
+#ifdef FEATURE_CORECLR
+ m_CompatMode = APPDOMAINCOMPAT_NONE;
+#endif
+
+} //BaseDomain::BaseDomain
+
+//*****************************************************************************
+void BaseDomain::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ //
+ // Initialize the domain locks
+ //
+
+ if (this == reinterpret_cast<BaseDomain*>(&g_pSharedDomainMemory[0]))
+ m_DomainCrst.Init(CrstSharedBaseDomain);
+ else if (this == reinterpret_cast<BaseDomain*>(&g_pSystemDomainMemory[0]))
+ m_DomainCrst.Init(CrstSystemBaseDomain);
+ else
+ m_DomainCrst.Init(CrstBaseDomain);
+
+ m_DomainCacheCrst.Init(CrstAppDomainCache);
+ m_DomainLocalBlockCrst.Init(CrstDomainLocalBlock);
+
+ m_InteropDataCrst.Init(CrstInteropData, CRST_REENTRANCY);
+
+ m_WinRTFactoryCacheCrst.Init(CrstWinRTFactoryCache, CRST_UNSAFE_COOPGC);
+
+ // NOTE: CRST_UNSAFE_COOPGC prevents a GC mode switch to preemptive when entering this crst.
+ // If you remove this flag, we will switch to preemptive mode when entering
+ // m_FileLoadLock, which means all functions that enter it will become
+ // GC_TRIGGERS. (This includes all uses of PEFileListLockHolder, LoadLockHolder, etc.) So be sure
+ // to update the contracts if you remove this flag.
+ m_FileLoadLock.Init(CrstAssemblyLoader,
+ CrstFlags(CRST_HOST_BREAKABLE), TRUE);
+
+ //
+ // The JIT lock and the CCtor locks are at the same level (and marked as
+ // UNSAFE_SAME_LEVEL) because they are all part of the same deadlock detection mechanism. We
+ // see through cycles of JITting and .cctor execution and then explicitly allow the cycle to
+ // be broken by giving access to uninitialized classes. If there is no cycle or if the cycle
+ // involves other locks that arent part of this special deadlock-breaking semantics, then
+ // we continue to block.
+ //
+ m_JITLock.Init(CrstJit, CrstFlags(CRST_REENTRANCY | CRST_UNSAFE_SAMELEVEL), TRUE);
+ m_ClassInitLock.Init(CrstClassInit, CrstFlags(CRST_REENTRANCY | CRST_UNSAFE_SAMELEVEL), TRUE);
+
+ m_ILStubGenLock.Init(CrstILStubGen, CrstFlags(CRST_REENTRANCY), TRUE);
+
+ // Large heap handle table CRST.
+ m_LargeHeapHandleTableCrst.Init(CrstAppDomainHandleTable);
+
+ m_crstLoaderAllocatorReferences.Init(CrstLoaderAllocatorReferences);
+ // Has to switch thread to GC_NOTRIGGER while being held (see code:BaseDomain#AssemblyListLock)
+ m_crstAssemblyList.Init(CrstAssemblyList, CrstFlags(
+ CRST_GC_NOTRIGGER_WHEN_TAKEN | CRST_DEBUGGER_THREAD | CRST_TAKEN_DURING_SHUTDOWN));
+
+ // Initialize the EE marshaling data to NULL.
+ m_pMarshalingData = NULL;
+
+#ifdef FEATURE_COMINTEROP
+ // Allocate the managed standard interfaces information.
+ m_pMngStdInterfacesInfo = new MngStdInterfacesInfo();
+
+#if defined(FEATURE_APPX_BINDER)
+ if (!AppX::IsAppXProcess())
+#endif
+ {
+ CLRPrivBinderWinRT::NamespaceResolutionKind fNamespaceResolutionKind = CLRPrivBinderWinRT::NamespaceResolutionKind_WindowsAPI;
+ if (CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_DesignerNamespaceResolutionEnabled) != FALSE)
+ {
+ fNamespaceResolutionKind = CLRPrivBinderWinRT::NamespaceResolutionKind_DesignerResolveEvent;
+ }
+ CLRPrivTypeCacheWinRT * pWinRtTypeCache = CLRPrivTypeCacheWinRT::GetOrCreateTypeCache();
+ m_pWinRtBinder = CLRPrivBinderWinRT::GetOrCreateBinder(pWinRtTypeCache, fNamespaceResolutionKind);
+ }
+#endif // FEATURE_COMINTEROP
+
+ // Init the COM Interop data hash
+ {
+ LockOwner lock = {&m_InteropDataCrst, IsOwnerOfCrst};
+ m_interopDataHash.Init(0, NULL, false, &lock);
+ }
+
+ m_dwSizedRefHandles = 0;
+ if (!m_iNumberOfProcessors)
+ {
+ m_iNumberOfProcessors = GetCurrentProcessCpuCount();
+ }
+}
+
+#undef LOADERHEAP_PROFILE_COUNTER
+
+#ifndef CROSSGEN_COMPILE
+//*****************************************************************************
+void BaseDomain::Terminate()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_crstLoaderAllocatorReferences.Destroy();
+ m_DomainCrst.Destroy();
+ m_DomainCacheCrst.Destroy();
+ m_DomainLocalBlockCrst.Destroy();
+ m_InteropDataCrst.Destroy();
+
+ ListLockEntry* pElement;
+
+ // All the threads that are in this domain had better be stopped by this
+ // point.
+ //
+ // We might be jitting or running a .cctor so we need to empty that queue.
+ pElement = m_JITLock.Pop(TRUE);
+ while (pElement)
+ {
+#ifdef STRICT_JITLOCK_ENTRY_LEAK_DETECTION
+ _ASSERTE ((m_JITLock.m_pHead->m_dwRefCount == 1
+ && m_JITLock.m_pHead->m_hrResultCode == E_FAIL) ||
+ dbg_fDrasticShutdown || g_fInControlC);
+#endif // STRICT_JITLOCK_ENTRY_LEAK_DETECTION
+ delete(pElement);
+ pElement = m_JITLock.Pop(TRUE);
+
+ }
+ m_JITLock.Destroy();
+
+ pElement = m_ClassInitLock.Pop(TRUE);
+ while (pElement)
+ {
+#ifdef STRICT_CLSINITLOCK_ENTRY_LEAK_DETECTION
+ _ASSERTE (dbg_fDrasticShutdown || g_fInControlC);
+#endif
+ delete(pElement);
+ pElement = m_ClassInitLock.Pop(TRUE);
+ }
+ m_ClassInitLock.Destroy();
+
+ FileLoadLock* pFileElement;
+ pFileElement = (FileLoadLock*) m_FileLoadLock.Pop(TRUE);
+ while (pFileElement)
+ {
+#ifdef STRICT_CLSINITLOCK_ENTRY_LEAK_DETECTION
+ _ASSERTE (dbg_fDrasticShutdown || g_fInControlC);
+#endif
+ pFileElement->Release();
+ pFileElement = (FileLoadLock*) m_FileLoadLock.Pop(TRUE);
+ }
+ m_FileLoadLock.Destroy();
+
+ pElement = m_ILStubGenLock.Pop(TRUE);
+ while (pElement)
+ {
+#ifdef STRICT_JITLOCK_ENTRY_LEAK_DETECTION
+ _ASSERTE ((m_ILStubGenLock.m_pHead->m_dwRefCount == 1
+ && m_ILStubGenLock.m_pHead->m_hrResultCode == E_FAIL) ||
+ dbg_fDrasticShutdown || g_fInControlC);
+#endif // STRICT_JITLOCK_ENTRY_LEAK_DETECTION
+ delete(pElement);
+ pElement = m_ILStubGenLock.Pop(TRUE);
+ }
+ m_ILStubGenLock.Destroy();
+
+ m_LargeHeapHandleTableCrst.Destroy();
+
+ if (m_pLargeHeapHandleTable != NULL)
+ {
+ delete m_pLargeHeapHandleTable;
+ m_pLargeHeapHandleTable = NULL;
+ }
+
+ if (!IsAppDomain())
+ {
+ // Kind of a workaround - during unloading, we need to have an EE halt
+ // around deleting this stuff. So it gets deleted in AppDomain::Terminate()
+ // for those things (because there is a convenient place there.)
+ GetLoaderAllocator()->CleanupStringLiteralMap();
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (m_pMngStdInterfacesInfo)
+ {
+ delete m_pMngStdInterfacesInfo;
+ m_pMngStdInterfacesInfo = NULL;
+ }
+
+ if (m_pWinRtBinder != NULL)
+ {
+ m_pWinRtBinder->Release();
+ }
+#endif // FEATURE_COMINTEROP
+
+ ClearFusionContext();
+
+ m_dwSizedRefHandles = 0;
+}
+#endif // CROSSGEN_COMPILE
+
+void BaseDomain::InitVSD()
+{
+ STANDARD_VM_CONTRACT;
+
+ // This is a workaround for gcc, since it fails to successfully resolve
+ // "TypeIDMap::STARTING_SHARED_DOMAIN_ID" when used within the ?: operator.
+ UINT32 startingId;
+ if (IsSharedDomain())
+ {
+ startingId = TypeIDMap::STARTING_SHARED_DOMAIN_ID;
+ }
+ else
+ {
+ startingId = TypeIDMap::STARTING_UNSHARED_DOMAIN_ID;
+ }
+
+ // By passing false as the last parameter, interfaces loaded in the
+ // shared domain will not be given fat type ids if RequiresFatDispatchTokens
+ // is set. This is correct, as the fat dispatch tokens are only needed to solve
+ // uniqueness problems involving domain specific types.
+ m_typeIDMap.Init(startingId, 2, !IsSharedDomain());
+
+#ifndef CROSSGEN_COMPILE
+ GetLoaderAllocator()->InitVirtualCallStubManager(this);
+#endif
+}
+
+#ifndef CROSSGEN_COMPILE
+BOOL BaseDomain::ContainsOBJECTHANDLE(OBJECTHANDLE handle)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return Ref_ContainHandle(m_hHandleTableBucket,handle);
+}
+
+DWORD BaseDomain::AllocateContextStaticsOffset(DWORD* pOffsetSlot)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ CrstHolder ch(&m_SpecialStaticsCrst);
+
+ DWORD dwOffset = *pOffsetSlot;
+
+ if (dwOffset == (DWORD)-1)
+ {
+ // Allocate the slot
+ dwOffset = m_dwContextStatics++;
+ *pOffsetSlot = dwOffset;
+ }
+
+ return dwOffset;
+}
+
+void BaseDomain::ClearFusionContext()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ if(m_pFusionContext) {
+ m_pFusionContext->Release();
+ m_pFusionContext = NULL;
+ }
+#if defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+ if (m_pTPABinderContext) {
+ m_pTPABinderContext->Release();
+ m_pTPABinderContext = NULL;
+ }
+#endif
+}
+
+#ifdef FEATURE_PREJIT
+void AppDomain::DeleteNativeCodeRanges()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ // Fast path to skip using the assembly iterator when the appdomain has not yet completely been initialized
+ // and yet we are destroying it. (This is the case if we OOM during AppDomain creation.)
+ if (m_Assemblies.IsEmpty())
+ return;
+
+ // Shutdown assemblies
+ AssemblyIterator i = IterateAssembliesEx( (AssemblyIterationFlags)(kIncludeLoaded | kIncludeLoading | kIncludeExecution | kIncludeIntrospection | kIncludeFailedToLoad) );
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+
+ while (i.Next(pDomainAssembly.This()))
+ {
+ Assembly * assembly = pDomainAssembly->m_pAssembly;
+ if ((assembly != NULL) && !assembly->IsDomainNeutral())
+ assembly->DeleteNativeCodeRanges();
+ }
+}
+#endif
+
+void AppDomain::ShutdownAssemblies()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Fast path to skip using the assembly iterator when the appdomain has not yet completely been initialized
+ // and yet we are destroying it. (This is the case if we OOM during AppDomain creation.)
+ if (m_Assemblies.IsEmpty())
+ return;
+
+ // Shutdown assemblies
+ // has two stages because Terminate needs info from the Assembly's dependencies
+
+ // Stage 1: call code:Assembly::Terminate
+ AssemblyIterator i = IterateAssembliesEx((AssemblyIterationFlags)(
+ kIncludeLoaded | kIncludeLoading | kIncludeExecution | kIncludeIntrospection | kIncludeFailedToLoad | kIncludeCollected));
+ DomainAssembly * pDomainAssembly = NULL;
+
+ while (i.Next_UnsafeNoAddRef(&pDomainAssembly))
+ {
+ // Note: cannot use DomainAssembly::GetAssembly() here as it asserts that the assembly has been
+ // loaded to at least the FILE_LOAD_ALLOCATE level. Since domain shutdown can take place
+ // asynchronously this property cannot be guaranteed. Access the m_pAssembly field directly instead.
+ Assembly * assembly = pDomainAssembly->m_pAssembly;
+ if (assembly && !assembly->IsDomainNeutral())
+ assembly->Terminate();
+ }
+
+ // Stage 2: Clear the list of assemblies
+ i = IterateAssembliesEx((AssemblyIterationFlags)(
+ kIncludeLoaded | kIncludeLoading | kIncludeExecution | kIncludeIntrospection | kIncludeFailedToLoad | kIncludeCollected));
+ while (i.Next_UnsafeNoAddRef(&pDomainAssembly))
+ {
+ // We are in shutdown path, no one else can get to the list anymore
+ delete pDomainAssembly;
+ }
+ m_Assemblies.Clear(this);
+
+ // Stage 2: Clear the loader allocators registered for deletion from code:Assembly:Terminate calls in
+ // stage 1
+ // Note: It is not clear to me why we cannot delete the loader allocator from within
+ // code:DomainAssembly::~DomainAssembly
+ ShutdownFreeLoaderAllocators(FALSE);
+} // AppDomain::ShutdownAssemblies
+
+void AppDomain::ShutdownFreeLoaderAllocators(BOOL bFromManagedCode)
+{
+ // If we're called from managed code (i.e. the finalizer thread) we take a lock in
+ // LoaderAllocator::CleanupFailedTypeInit, which may throw. Otherwise we're called
+ // from the app-domain shutdown path in which we can avoid taking the lock.
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ if (bFromManagedCode) THROWS; else NOTHROW;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ CrstHolder ch(GetLoaderAllocatorReferencesLock());
+
+ // Shutdown the LoaderAllocators associated with collectible assemblies
+ while (m_pDelayedLoaderAllocatorUnloadList != NULL)
+ {
+ LoaderAllocator * pCurrentLoaderAllocator = m_pDelayedLoaderAllocatorUnloadList;
+ // Remove next loader allocator from the list
+ m_pDelayedLoaderAllocatorUnloadList = m_pDelayedLoaderAllocatorUnloadList->m_pLoaderAllocatorDestroyNext;
+
+ if (bFromManagedCode)
+ {
+ // For loader allocator finalization, we need to be careful about cleaning up per-appdomain allocations
+ // and synchronizing with GC using delay unload list. We need to wait for next Gen2 GC to finish to ensure
+ // that GC heap does not have any references to the MethodTables being unloaded.
+
+ pCurrentLoaderAllocator->CleanupFailedTypeInit();
+
+ pCurrentLoaderAllocator->CleanupHandles();
+
+ GCX_COOP();
+ SystemDomain::System()->AddToDelayedUnloadList(pCurrentLoaderAllocator);
+ }
+ else
+ {
+ // For appdomain unload, delete the loader allocator right away
+ delete pCurrentLoaderAllocator;
+ }
+ }
+} // AppDomain::ShutdownFreeLoaderAllocators
+
+//---------------------------------------------------------------------------------------
+//
+// Register the loader allocator for deletion in code:AppDomain::ShutdownFreeLoaderAllocators.
+//
+void AppDomain::RegisterLoaderAllocatorForDeletion(LoaderAllocator * pLoaderAllocator)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ NOTHROW;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ CrstHolder ch(GetLoaderAllocatorReferencesLock());
+
+ pLoaderAllocator->m_pLoaderAllocatorDestroyNext = m_pDelayedLoaderAllocatorUnloadList;
+ m_pDelayedLoaderAllocatorUnloadList = pLoaderAllocator;
+}
+
+#ifdef FEATURE_CORECLR
+void AppDomain::ShutdownNativeDllSearchDirectories()
+{
+ LIMITED_METHOD_CONTRACT;
+ // Shutdown assemblies
+ PathIterator i = IterateNativeDllSearchDirectories();
+
+ while (i.Next())
+ {
+ delete i.GetPath();
+ }
+
+ m_NativeDllSearchDirectories.Clear();
+}
+#endif
+
+void AppDomain::ReleaseDomainBoundInfo()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;;
+ // Shutdown assemblies
+ m_AssemblyCache.OnAppDomainUnload();
+
+ AssemblyIterator i = IterateAssembliesEx( (AssemblyIterationFlags)(kIncludeFailedToLoad) );
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+
+ while (i.Next(pDomainAssembly.This()))
+ {
+ pDomainAssembly->ReleaseManagedData();
+ }
+}
+
+void AppDomain::ReleaseFiles()
+{
+ STANDARD_VM_CONTRACT;
+
+ // Shutdown assemblies
+ AssemblyIterator i = IterateAssembliesEx((AssemblyIterationFlags)(
+ kIncludeLoaded | kIncludeExecution | kIncludeIntrospection | kIncludeFailedToLoad | kIncludeLoading));
+ CollectibleAssemblyHolder<DomainAssembly *> pAsm;
+
+ while (i.Next(pAsm.This()))
+ {
+ if (pAsm->GetCurrentAssembly() == NULL)
+ {
+ // Might be domain neutral or not, but should have no live objects as it has not been
+ // really loaded yet. Just reset it.
+ _ASSERTE(FitsIn<DWORD>(i.GetIndex()));
+ m_Assemblies.Set(this, static_cast<DWORD>(i.GetIndex()), NULL);
+ delete pAsm.Extract();
+ }
+ else
+ {
+ if (!pAsm->GetCurrentAssembly()->IsDomainNeutral())
+ pAsm->ReleaseFiles();
+ }
+ }
+} // AppDomain::ReleaseFiles
+
+
+OBJECTREF* BaseDomain::AllocateObjRefPtrsInLargeTable(int nRequested, OBJECTREF** ppLazyAllocate, BOOL bCrossAD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION((nRequested > 0));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (ppLazyAllocate && *ppLazyAllocate)
+ {
+ // Allocation already happened
+ return *ppLazyAllocate;
+ }
+
+ // Enter preemptive state, take the lock and go back to cooperative mode.
+ {
+ CrstHolder ch(&m_LargeHeapHandleTableCrst);
+ GCX_COOP();
+
+ if (ppLazyAllocate && *ppLazyAllocate)
+ {
+ // Allocation already happened
+ return *ppLazyAllocate;
+ }
+
+ // Make sure the large heap handle table is initialized.
+ if (!m_pLargeHeapHandleTable)
+ InitLargeHeapHandleTable();
+
+ // Allocate the handles.
+ OBJECTREF* result = m_pLargeHeapHandleTable->AllocateHandles(nRequested, bCrossAD);
+
+ if (ppLazyAllocate)
+ {
+ *ppLazyAllocate = result;
+ }
+
+ return result;
+ }
+}
+#endif // CROSSGEN_COMPILE
+
+#endif // !DACCESS_COMPILE
+
+/*static*/
+PTR_BaseDomain BaseDomain::ComputeBaseDomain(
+ BaseDomain * pGenericDefinitionDomain, // the domain that owns the generic type or method
+ Instantiation classInst, // the type arguments to the type (if any)
+ Instantiation methodInst) // the type arguments to the method (if any)
+{
+ CONTRACT(PTR_BaseDomain)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ SUPPORTS_DAC;
+ SO_TOLERANT;
+ }
+ CONTRACT_END
+
+ if (pGenericDefinitionDomain && pGenericDefinitionDomain->IsAppDomain())
+ RETURN PTR_BaseDomain(pGenericDefinitionDomain);
+
+ for (DWORD i = 0; i < classInst.GetNumArgs(); i++)
+ {
+ PTR_BaseDomain pArgDomain = classInst[i].GetDomain();
+ if (pArgDomain->IsAppDomain())
+ RETURN pArgDomain;
+ }
+
+ for (DWORD i = 0; i < methodInst.GetNumArgs(); i++)
+ {
+ PTR_BaseDomain pArgDomain = methodInst[i].GetDomain();
+ if (pArgDomain->IsAppDomain())
+ RETURN pArgDomain;
+ }
+ RETURN (pGenericDefinitionDomain ?
+ PTR_BaseDomain(pGenericDefinitionDomain) :
+ PTR_BaseDomain(SystemDomain::System()));
+}
+
+PTR_BaseDomain BaseDomain::ComputeBaseDomain(TypeKey * pKey)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+
+ if (pKey->GetKind() == ELEMENT_TYPE_CLASS)
+ return BaseDomain::ComputeBaseDomain(pKey->GetModule()->GetDomain(),
+ pKey->GetInstantiation());
+ else if (pKey->GetKind() != ELEMENT_TYPE_FNPTR)
+ return pKey->GetElementType().GetDomain();
+ else
+ return BaseDomain::ComputeBaseDomain(NULL,Instantiation(pKey->GetRetAndArgTypes(), pKey->GetNumArgs()+1));
+}
+
+
+
+
+
+#ifndef DACCESS_COMPILE
+
+// Insert class in the hash table
+void AppDomain::InsertClassForCLSID(MethodTable* pMT, BOOL fForceInsert /*=FALSE*/)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_ANY;
+ THROWS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ CVID cvid;
+
+ // Ensure that registered classes are activated for allocation
+ pMT->EnsureInstanceActive();
+
+ // Note that it is possible for multiple classes to claim the same CLSID, and in such a
+ // case it is arbitrary which one we will return for a future query for a given app domain.
+
+ pMT->GetGuid(&cvid, fForceInsert);
+
+ if (!IsEqualIID(cvid, GUID_NULL))
+ {
+ //<TODO>@todo get a better key</TODO>
+ LPVOID val = (LPVOID)pMT;
+ {
+ LockHolder lh(this);
+
+ if (LookupClass(cvid) != pMT)
+ {
+ m_clsidHash.InsertValue(GetKeyFromGUID(&cvid), val);
+ }
+ }
+ }
+}
+
+void AppDomain::InsertClassForCLSID(MethodTable* pMT, GUID *pGuid)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(CheckPointer(pGuid));
+ }
+ CONTRACT_END;
+
+ LPVOID val = (LPVOID)pMT;
+ {
+ LockHolder lh(this);
+
+ CVID* cvid = pGuid;
+ if (LookupClass(*cvid) != pMT)
+ {
+ m_clsidHash.InsertValue(GetKeyFromGUID(pGuid), val);
+ }
+ }
+
+ RETURN;
+}
+#endif // DACCESS_COMPILE
+
+#ifdef FEATURE_COMINTEROP
+
+#ifndef DACCESS_COMPILE
+void AppDomain::CacheTypeByName(const SString &ssClassName, const UINT vCacheVersion, TypeHandle typeHandle, BYTE bFlags, BOOL bReplaceExisting /*= FALSE*/)
+{
+ WRAPPER_NO_CONTRACT;
+ LockHolder lh(this);
+ CacheTypeByNameWorker(ssClassName, vCacheVersion, typeHandle, bFlags, bReplaceExisting);
+}
+
+void AppDomain::CacheTypeByNameWorker(const SString &ssClassName, const UINT vCacheVersion, TypeHandle typeHandle, BYTE bFlags, BOOL bReplaceExisting /*= FALSE*/)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(!typeHandle.IsNull());
+ }
+ CONTRACTL_END;
+
+ NewArrayHolder<WCHAR> wzClassName(DuplicateStringThrowing(ssClassName.GetUnicode()));
+
+ if (m_vNameToTypeMapVersion != vCacheVersion)
+ return;
+
+ if (m_pNameToTypeMap == nullptr)
+ {
+ m_pNameToTypeMap = new NameToTypeMapTable();
+ }
+
+ NameToTypeMapEntry e;
+ e.m_key.m_wzName = wzClassName;
+ e.m_key.m_cchName = ssClassName.GetCount();
+ e.m_typeHandle = typeHandle;
+ e.m_nEpoch = this->m_nEpoch;
+ e.m_bFlags = bFlags;
+ if (!bReplaceExisting)
+ m_pNameToTypeMap->Add(e);
+ else
+ m_pNameToTypeMap->AddOrReplace(e);
+
+ wzClassName.SuppressRelease();
+}
+#endif // DACCESS_COMPILE
+
+TypeHandle AppDomain::LookupTypeByName(const SString &ssClassName, UINT* pvCacheVersion, BYTE *pbFlags)
+{
+ WRAPPER_NO_CONTRACT;
+ LockHolder lh(this);
+ return LookupTypeByNameWorker(ssClassName, pvCacheVersion, pbFlags);
+}
+
+TypeHandle AppDomain::LookupTypeByNameWorker(const SString &ssClassName, UINT* pvCacheVersion, BYTE *pbFlags)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SUPPORTS_DAC;
+ PRECONDITION(CheckPointer(pbFlags, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ *pvCacheVersion = m_vNameToTypeMapVersion;
+
+ if (m_pNameToTypeMap == nullptr)
+ return TypeHandle(); // a null TypeHandle
+
+ NameToTypeMapEntry::Key key;
+ key.m_cchName = ssClassName.GetCount();
+ key.m_wzName = ssClassName.GetUnicode();
+
+ const NameToTypeMapEntry * pEntry = m_pNameToTypeMap->LookupPtr(key);
+ if (pEntry == NULL)
+ return TypeHandle(); // a null TypeHandle
+
+ if (pbFlags != NULL)
+ *pbFlags = pEntry->m_bFlags;
+
+ return pEntry->m_typeHandle;
+}
+
+PTR_MethodTable AppDomain::LookupTypeByGuid(const GUID & guid)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ SString sGuid;
+ {
+ WCHAR wszGuid[64];
+ GuidToLPWSTR(guid, wszGuid, _countof(wszGuid));
+ sGuid.Append(wszGuid);
+ }
+ UINT ver;
+ TypeHandle th = LookupTypeByName(sGuid, &ver, NULL);
+
+ if (!th.IsNull())
+ {
+ _ASSERTE(!th.IsTypeDesc());
+ return th.AsMethodTable();
+ }
+
+#ifdef FEATURE_PREJIT
+ else
+ {
+ // Next look in each ngen'ed image in turn
+ AssemblyIterator assemblyIterator = IterateAssembliesEx((AssemblyIterationFlags)(
+ kIncludeLoaded | kIncludeExecution));
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+ while (assemblyIterator.Next(pDomainAssembly.This()))
+ {
+ CollectibleAssemblyHolder<Assembly *> pAssembly = pDomainAssembly->GetLoadedAssembly();
+
+ DomainAssembly::ModuleIterator i = pDomainAssembly->IterateModules(kModIterIncludeLoaded);
+ while (i.Next())
+ {
+ Module * pModule = i.GetLoadedModule();
+ if (!pModule->HasNativeImage())
+ continue;
+ _ASSERTE(!pModule->IsCollectible());
+ PTR_MethodTable pMT = pModule->LookupTypeByGuid(guid);
+ if (pMT != NULL)
+ {
+ return pMT;
+ }
+ }
+ }
+ }
+#endif // FEATURE_PREJIT
+ return NULL;
+}
+
+#ifndef DACCESS_COMPILE
+void AppDomain::CacheWinRTTypeByGuid(TypeHandle typeHandle)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(!typeHandle.IsTypeDesc());
+ PRECONDITION(CanCacheWinRTTypeByGuid(typeHandle));
+ }
+ CONTRACTL_END;
+
+ PTR_MethodTable pMT = typeHandle.AsMethodTable();
+
+ GUID guid;
+ if (pMT->GetGuidForWinRT(&guid))
+ {
+ SString sGuid;
+
+ {
+ WCHAR wszGuid[64];
+ GuidToLPWSTR(guid, wszGuid, _countof(wszGuid));
+ sGuid.Append(wszGuid);
+ }
+
+ BYTE bFlags = 0x80;
+ TypeHandle th;
+ UINT vCacheVersion;
+ {
+ LockHolder lh(this);
+ th = LookupTypeByNameWorker(sGuid, &vCacheVersion, &bFlags);
+
+ if (th.IsNull())
+ {
+ // no other entry with the same GUID exists in the cache
+ CacheTypeByNameWorker(sGuid, vCacheVersion, typeHandle, bFlags);
+ }
+ else if (typeHandle.AsMethodTable() != th.AsMethodTable() && th.IsProjectedFromWinRT())
+ {
+ // If we found a native WinRT type cached with the same GUID, replace it.
+ // Otherwise simply add the new mapping to the cache.
+ CacheTypeByNameWorker(sGuid, vCacheVersion, typeHandle, bFlags, TRUE);
+ }
+ }
+ }
+}
+#endif // DACCESS_COMPILE
+
+void AppDomain::GetCachedWinRTTypes(
+ SArray<PTR_MethodTable> * pTypes,
+ SArray<GUID> * pGuids,
+ UINT minEpoch,
+ UINT * pCurEpoch)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ LockHolder lh(this);
+
+ for (auto it = m_pNameToTypeMap->Begin(), end = m_pNameToTypeMap->End();
+ it != end;
+ ++it)
+ {
+ NameToTypeMapEntry entry = (NameToTypeMapEntry)(*it);
+ TypeHandle th = entry.m_typeHandle;
+ if (th.AsMethodTable() != NULL &&
+ entry.m_key.m_wzName[0] == W('{') &&
+ entry.m_nEpoch >= minEpoch)
+ {
+ _ASSERTE(!th.IsTypeDesc());
+ PTR_MethodTable pMT = th.AsMethodTable();
+ // we're parsing the GUID value from the cache, because projected types do not cache the
+ // COM GUID in their GetGuid() but rather the legacy GUID
+ GUID iid;
+ if (LPWSTRToGuid(&iid, entry.m_key.m_wzName, 38) && iid != GUID_NULL)
+ {
+ pTypes->Append(pMT);
+ pGuids->Append(iid);
+ }
+ }
+ }
+
+#ifdef FEATURE_PREJIT
+ // Next look in each ngen'ed image in turn
+ AssemblyIterator assemblyIterator = IterateAssembliesEx((AssemblyIterationFlags)(
+ kIncludeLoaded | kIncludeExecution));
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+ while (assemblyIterator.Next(pDomainAssembly.This()))
+ {
+ CollectibleAssemblyHolder<Assembly *> pAssembly = pDomainAssembly->GetLoadedAssembly();
+
+ DomainAssembly::ModuleIterator i = pDomainAssembly->IterateModules(kModIterIncludeLoaded);
+ while (i.Next())
+ {
+ Module * pModule = i.GetLoadedModule();
+ if (!pModule->HasNativeImage())
+ continue;
+ _ASSERTE(!pModule->IsCollectible());
+
+ pModule->GetCachedWinRTTypes(pTypes, pGuids);
+ }
+ }
+#endif // FEATURE_PREJIT
+
+ if (pCurEpoch != NULL)
+ *pCurEpoch = m_nEpoch;
+ ++m_nEpoch;
+}
+
+#ifndef CROSSGEN_COMPILE
+#ifndef DACCESS_COMPILE
+// static
+void WinRTFactoryCacheTraits::OnDestructPerEntryCleanupAction(const WinRTFactoryCacheEntry& e)
+{
+ WRAPPER_NO_CONTRACT;
+ if (e.m_pCtxEntry != NULL)
+ {
+ e.m_pCtxEntry->Release();
+ }
+ // the AD is going away, no need to destroy the OBJECTHANDLE
+}
+
+void AppDomain::CacheWinRTFactoryObject(MethodTable *pClassMT, OBJECTREF *refFactory, LPVOID lpCtxCookie)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pClassMT));
+ }
+ CONTRACTL_END;
+
+ CtxEntryHolder pNewCtxEntry;
+ if (lpCtxCookie != NULL)
+ {
+ // We don't want to insert the context cookie in the cache because it's just an address
+ // of an internal COM data structure which will be freed when the apartment is torn down.
+ // What's worse, if another apartment is later created, its context cookie may have exactly
+ // the same value leading to incorrect cache hits. We'll use our CtxEntry instead which
+ // is ref-counted and keeps the COM data structure alive even after the apartment ceases
+ // to exist.
+ pNewCtxEntry = CtxEntryCache::GetCtxEntryCache()->FindCtxEntry(lpCtxCookie, GetThread());
+ }
+
+ WinRTFactoryCacheLockHolder lh(this);
+
+ if (m_pWinRTFactoryCache == nullptr)
+ {
+ m_pWinRTFactoryCache = new WinRTFactoryCache();
+ }
+
+ WinRTFactoryCacheEntry *pEntry = const_cast<WinRTFactoryCacheEntry*>(m_pWinRTFactoryCache->LookupPtr(pClassMT));
+ if (!pEntry)
+ {
+ //
+ // No existing entry for this cache
+ // Create a new one
+ //
+ WinRTFactoryCacheEntry e;
+
+ OBJECTHANDLEHolder ohNewHandle(CreateHandle(*refFactory));
+
+ e.key = pClassMT;
+ e.m_pCtxEntry = pNewCtxEntry;
+ e.m_ohFactoryObject = ohNewHandle;
+
+ m_pWinRTFactoryCache->Add(e);
+
+ // suppress release of the CtxEntry and handle after we successfully inserted the new entry
+ pNewCtxEntry.SuppressRelease();
+ ohNewHandle.SuppressRelease();
+ }
+ else
+ {
+ //
+ // Existing entry
+ //
+ // release the old CtxEntry and update the entry
+ CtxEntry *pTemp = pNewCtxEntry.Extract();
+ pNewCtxEntry = pEntry->m_pCtxEntry;
+ pEntry->m_pCtxEntry = pTemp;
+
+ HndAssignHandle(pEntry->m_ohFactoryObject, *refFactory);
+ }
+}
+
+OBJECTREF AppDomain::LookupWinRTFactoryObject(MethodTable *pClassMT, LPVOID lpCtxCookie)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pClassMT));
+ PRECONDITION(CheckPointer(m_pWinRTFactoryCache, NULL_OK));
+ }
+ CONTRACTL_END;
+
+
+ if (m_pWinRTFactoryCache == nullptr)
+ return NULL;
+
+ //
+ // Retrieve cached factory
+ //
+ WinRTFactoryCacheLockHolder lh(this);
+
+ const WinRTFactoryCacheEntry *pEntry = m_pWinRTFactoryCache->LookupPtr(pClassMT);
+ if (pEntry == NULL)
+ return NULL;
+
+ //
+ // Ignore factories from a different context, unless lpCtxCookie == NULL,
+ // which means the factory is free-threaded
+ // Note that we cannot touch the RCW to retrieve cookie at this point
+ // because the RCW might belong to a STA thread and that STA thread might die
+ // and take the RCW with it. Therefore we have to save cookie in this cache
+ //
+ if (pEntry->m_pCtxEntry == NULL || pEntry->m_pCtxEntry->GetCtxCookie() == lpCtxCookie)
+ return ObjectFromHandle(pEntry->m_ohFactoryObject);
+
+ return NULL;
+}
+
+void AppDomain::RemoveWinRTFactoryObjects(LPVOID pCtxCookie)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_pWinRTFactoryCache == nullptr)
+ return;
+
+ // helper class for delayed CtxEntry cleanup
+ class CtxEntryListReleaseHolder
+ {
+ public:
+ CQuickArrayList<CtxEntry *> m_list;
+
+ ~CtxEntryListReleaseHolder()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ for (SIZE_T i = 0; i < m_list.Size(); i++)
+ {
+ m_list[i]->Release();
+ }
+ }
+ } ctxEntryListReleaseHolder;
+
+ GCX_COOP();
+ {
+ WinRTFactoryCacheLockHolder lh(this);
+
+ // Go through the hash table and remove items in the given context
+ for (WinRTFactoryCache::Iterator it = m_pWinRTFactoryCache->Begin(); it != m_pWinRTFactoryCache->End(); it++)
+ {
+ if (it->m_pCtxEntry != NULL && it->m_pCtxEntry->GetCtxCookie() == pCtxCookie)
+ {
+ // Releasing the CtxEntry may trigger GC which we can't do under the lock so we push
+ // it on our local list and release them all after we're done iterating the hashtable.
+ ctxEntryListReleaseHolder.m_list.Push(it->m_pCtxEntry);
+
+ DestroyHandle(it->m_ohFactoryObject);
+ m_pWinRTFactoryCache->Remove(it);
+ }
+ }
+ }
+}
+
+OBJECTREF AppDomain::GetMissingObject()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (!m_hndMissing)
+ {
+ // Get the field
+ FieldDesc *pValueFD = MscorlibBinder::GetField(FIELD__MISSING__VALUE);
+
+ pValueFD->CheckRunClassInitThrowing();
+
+ // Retrieve the value static field and store it.
+ OBJECTHANDLE hndMissing = CreateHandle(pValueFD->GetStaticOBJECTREF());
+
+ if (FastInterlockCompareExchangePointer(&m_hndMissing, hndMissing, NULL) != NULL)
+ {
+ // Exchanged failed. The m_hndMissing did not equal NULL and was returned.
+ DestroyHandle(hndMissing);
+ }
+ }
+
+ return ObjectFromHandle(m_hndMissing);
+}
+
+#endif // DACCESS_COMPILE
+#endif //CROSSGEN_COMPILE
+#endif // FEATURE_COMINTEROP
+
+#ifndef DACCESS_COMPILE
+
+EEMarshalingData *BaseDomain::GetMarshalingData()
+{
+ CONTRACT (EEMarshalingData*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(m_pMarshalingData));
+ }
+ CONTRACT_END;
+
+ if (!m_pMarshalingData)
+ {
+ // Take the lock
+ CrstHolder holder(&m_InteropDataCrst);
+
+ if (!m_pMarshalingData)
+ {
+ LoaderHeap* pHeap = GetLoaderAllocator()->GetLowFrequencyHeap();
+ m_pMarshalingData = new (pHeap) EEMarshalingData(this, pHeap, &m_DomainCrst);
+ }
+ }
+
+ RETURN m_pMarshalingData;
+}
+
+void BaseDomain::DeleteMarshalingData()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // We are in shutdown - no need to take any lock
+ if (m_pMarshalingData)
+ {
+ delete m_pMarshalingData;
+ m_pMarshalingData = NULL;
+ }
+}
+
+#ifndef CROSSGEN_COMPILE
+
+STRINGREF *BaseDomain::IsStringInterned(STRINGREF *pString)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pString));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ return GetLoaderAllocator()->IsStringInterned(pString);
+}
+
+STRINGREF *BaseDomain::GetOrInternString(STRINGREF *pString)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pString));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ return GetLoaderAllocator()->GetOrInternString(pString);
+}
+
+void BaseDomain::InitLargeHeapHandleTable()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(m_pLargeHeapHandleTable==NULL);
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ m_pLargeHeapHandleTable = new LargeHeapHandleTable(this, STATIC_OBJECT_TABLE_BUCKET_SIZE);
+
+#ifdef _DEBUG
+ m_pLargeHeapHandleTable->RegisterCrstDebug(&m_LargeHeapHandleTableCrst);
+#endif
+}
+
+#ifdef FEATURE_COMINTEROP
+MethodTable* AppDomain::GetLicenseInteropHelperMethodTable()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ if(m_pLicenseInteropHelperMT == NULL)
+ {
+ // Do this work outside of the lock so we don't have an unbreakable lock condition
+
+ TypeHandle licenseMgrTypeHnd;
+ MethodDescCallSite loadLM(METHOD__MARSHAL__LOAD_LICENSE_MANAGER);
+
+ licenseMgrTypeHnd = (MethodTable*) loadLM.Call_RetLPVOID((ARG_SLOT*)NULL);
+
+ //
+ // Look up this method by name, because the type is actually declared in System.dll. <TODO>@todo: why?</TODO>
+ //
+
+ MethodDesc *pGetLIHMD = MemberLoader::FindMethod(licenseMgrTypeHnd.AsMethodTable(),
+ "GetLicenseInteropHelperType", &gsig_SM_Void_RetIntPtr);
+ _ASSERTE(pGetLIHMD);
+
+ TypeHandle lihTypeHnd;
+
+ MethodDescCallSite getLIH(pGetLIHMD);
+ lihTypeHnd = (MethodTable*) getLIH.Call_RetLPVOID((ARG_SLOT*)NULL);
+
+ BaseDomain::LockHolder lh(this);
+
+ if(m_pLicenseInteropHelperMT == NULL)
+ m_pLicenseInteropHelperMT = lihTypeHnd.AsMethodTable();
+ }
+ return m_pLicenseInteropHelperMT;
+}
+
+COMorRemotingFlag AppDomain::GetComOrRemotingFlag()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // 0. check if the value is already been set
+ if (m_COMorRemotingFlag != COMorRemoting_NotInitialized)
+ return m_COMorRemotingFlag;
+
+ // 1. check whether the process is AppX
+ if (AppX::IsAppXProcess())
+ {
+ // do not use Remoting in AppX
+ m_COMorRemotingFlag = COMorRemoting_COM;
+ return m_COMorRemotingFlag;
+ }
+
+ // 2. check the xml file
+ m_COMorRemotingFlag = GetPreferComInsteadOfManagedRemotingFromConfigFile();
+ if (m_COMorRemotingFlag != COMorRemoting_NotInitialized)
+ {
+ return m_COMorRemotingFlag;
+ }
+
+ // 3. check the global setting
+ if (NULL != g_pConfig && g_pConfig->ComInsteadOfManagedRemoting())
+ {
+ m_COMorRemotingFlag = COMorRemoting_COM;
+ }
+ else
+ {
+ m_COMorRemotingFlag = COMorRemoting_Remoting;
+ }
+
+ return m_COMorRemotingFlag;
+}
+
+BOOL AppDomain::GetPreferComInsteadOfManagedRemoting()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return (GetComOrRemotingFlag() == COMorRemoting_COM);
+}
+
+STDAPI GetXMLObjectEx(IXMLParser **ppv);
+
+COMorRemotingFlag AppDomain::GetPreferComInsteadOfManagedRemotingFromConfigFile()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_REMOTING
+ COMorRemotingFlag res = COMorRemoting_NotInitialized;
+ NonVMComHolder<IXMLParser> pIXMLParser(NULL);
+ NonVMComHolder<IStream> pFile(NULL);
+ NonVMComHolder<AppDomainConfigFactory> factory(NULL);
+
+ EX_TRY
+ {
+ HRESULT hr;
+ CQuickBytes qb;
+
+ // get config file URL which is a combination of app base and config file name
+ IfFailGo(m_pFusionContext->PrefetchAppConfigFile());
+
+ LPWSTR wzConfigFileUrl = (LPWSTR)qb.AllocThrows(MAX_URL_LENGTH * sizeof(WCHAR));
+ DWORD dwSize = static_cast<DWORD>(qb.Size());
+
+ IfFailGo(m_pFusionContext->Get(ACTAG_APP_CFG_LOCAL_FILEPATH, wzConfigFileUrl, &dwSize, 0));
+
+ IfFailGo(CreateConfigStream(wzConfigFileUrl, &pFile));
+
+ IfFailGo(GetXMLObjectEx(&pIXMLParser));
+
+ factory = new (nothrow) AppDomainConfigFactory();
+
+ if (!factory) {
+ goto ErrExit;
+ }
+ factory->AddRef(); // RefCount = 1
+
+
+ IfFailGo(pIXMLParser->SetInput(pFile)); // filestream's RefCount=2
+
+ IfFailGo(pIXMLParser->SetFactory(factory)); // factory's RefCount=2
+
+ IfFailGo(pIXMLParser->Run(-1));
+
+ res = factory->GetCOMorRemotingFlag();
+ErrExit: ;
+
+ }
+ EX_CATCH
+ {
+ ;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return res;
+#else // FEATURE_REMOTING
+ return COMorRemoting_COM;
+#endif // FEATURE_REMOTING
+}
+#endif // FEATURE_COMINTEROP
+
+#endif // CROSSGEN_COMPILE
+
+//*****************************************************************************
+//*****************************************************************************
+//*****************************************************************************
+
+void *SystemDomain::operator new(size_t size, void *pInPlace)
+{
+ LIMITED_METHOD_CONTRACT;
+ return pInPlace;
+}
+
+
+void SystemDomain::operator delete(void *pMem)
+{
+ LIMITED_METHOD_CONTRACT;
+ // Do nothing - new() was in-place
+}
+
+
+void SystemDomain::SetCompilationOverrides(BOOL fForceDebug,
+ BOOL fForceProfiling,
+ BOOL fForceInstrument)
+{
+ LIMITED_METHOD_CONTRACT;
+ s_fForceDebug = fForceDebug;
+ s_fForceProfiling = fForceProfiling;
+ s_fForceInstrument = fForceInstrument;
+}
+
+#endif //!DACCESS_COMPILE
+
+void SystemDomain::GetCompilationOverrides(BOOL * fForceDebug,
+ BOOL * fForceProfiling,
+ BOOL * fForceInstrument)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ *fForceDebug = s_fForceDebug;
+ *fForceProfiling = s_fForceProfiling;
+ *fForceInstrument = s_fForceInstrument;
+}
+
+#ifndef DACCESS_COMPILE
+
+void SystemDomain::Attach()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(m_pSystemDomain == NULL);
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+#ifndef CROSSGEN_COMPILE
+ // Initialize stub managers
+ PrecodeStubManager::Init();
+ DelegateInvokeStubManager::Init();
+ JumpStubStubManager::Init();
+ RangeSectionStubManager::Init();
+ ILStubManager::Init();
+ InteropDispatchStubManager::Init();
+ StubLinkStubManager::Init();
+
+ ThunkHeapStubManager::Init();
+
+ TailCallStubManager::Init();
+
+ PerAppDomainTPCountList::InitAppDomainIndexList();
+#endif // CROSSGEN_COMPILE
+
+ m_appDomainIndexList.Init();
+ m_appDomainIdList.Init();
+
+ m_SystemDomainCrst.Init(CrstSystemDomain, (CrstFlags)(CRST_REENTRANCY | CRST_TAKEN_DURING_SHUTDOWN));
+ m_DelayedUnloadCrst.Init(CrstSystemDomainDelayedUnloadList, CRST_UNSAFE_COOPGC);
+
+ // Initialize the ID dispenser that is used for domain neutral module IDs
+ g_pModuleIndexDispenser = new IdDispenser();
+
+ // Create the global SystemDomain and initialize it.
+ m_pSystemDomain = new (&g_pSystemDomainMemory[0]) SystemDomain();
+ // No way it can fail since g_pSystemDomainMemory is a static array.
+ CONSISTENCY_CHECK(CheckPointer(m_pSystemDomain));
+
+ LOG((LF_CLASSLOADER,
+ LL_INFO10,
+ "Created system domain at %p\n",
+ m_pSystemDomain));
+
+ // We need to initialize the memory pools etc. for the system domain.
+ m_pSystemDomain->BaseDomain::Init(); // Setup the memory heaps
+
+ // Create the default domain
+ m_pSystemDomain->CreateDefaultDomain();
+ SharedDomain::Attach();
+
+ // Each domain gets its own ReJitManager, and ReJitManager has its own static
+ // initialization to run
+ ReJitManager::InitStatic();
+}
+
+#ifndef CROSSGEN_COMPILE
+
+void SystemDomain::DetachBegin()
+{
+ WRAPPER_NO_CONTRACT;
+ // Shut down the domain and its children (but don't deallocate anything just
+ // yet).
+
+ // TODO: we should really not running managed DLLMain during process detach.
+ if (GetThread() == NULL)
+ {
+ return;
+ }
+
+ if(m_pSystemDomain)
+ m_pSystemDomain->Stop();
+}
+
+void SystemDomain::DetachEnd()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ // Shut down the domain and its children (but don't deallocate anything just
+ // yet).
+ if(m_pSystemDomain)
+ {
+ GCX_PREEMP();
+ m_pSystemDomain->ClearFusionContext();
+ if (m_pSystemDomain->m_pDefaultDomain)
+ m_pSystemDomain->m_pDefaultDomain->ClearFusionContext();
+ }
+}
+
+void SystemDomain::Stop()
+{
+ WRAPPER_NO_CONTRACT;
+ AppDomainIterator i(TRUE);
+
+ while (i.Next())
+ if (i.GetDomain()->m_Stage < AppDomain::STAGE_CLEARED)
+ i.GetDomain()->Stop();
+}
+
+
+void SystemDomain::Terminate() // bNotifyProfiler is ignored
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // This ignores the refences and terminates the appdomains
+ AppDomainIterator i(FALSE);
+
+ while (i.Next())
+ {
+ delete i.GetDomain();
+ // Keep the iterator from Releasing the current domain
+ i.m_pCurrent = NULL;
+ }
+
+ if (m_pSystemFile != NULL) {
+ m_pSystemFile->Release();
+ m_pSystemFile = NULL;
+ }
+
+ m_pSystemAssembly = NULL;
+
+ if(m_pwDevpath) {
+ delete[] m_pwDevpath;
+ m_pwDevpath = NULL;
+ }
+ m_dwDevpath = 0;
+ m_fDevpath = FALSE;
+
+ if (m_pGlobalStringLiteralMap) {
+ delete m_pGlobalStringLiteralMap;
+ m_pGlobalStringLiteralMap = NULL;
+ }
+
+
+ SharedDomain::Detach();
+
+ BaseDomain::Terminate();
+
+#ifdef FEATURE_COMINTEROP
+ if (g_pRCWCleanupList != NULL)
+ delete g_pRCWCleanupList;
+#endif // FEATURE_COMINTEROP
+ m_GlobalAllocator.Terminate();
+}
+
+
+void SystemDomain::PreallocateSpecialObjects()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(g_pPreallocatedSentinelObject == NULL);
+
+ OBJECTREF pPreallocatedSentinalObject = AllocateObject(g_pObjectClass);
+#if CHECK_APP_DOMAIN_LEAKS
+ pPreallocatedSentinalObject->SetSyncBlockAppDomainAgile();
+#endif
+ g_pPreallocatedSentinelObject = CreatePinningHandle( pPreallocatedSentinalObject );
+
+#ifdef FEATURE_PREJIT
+ if (SystemModule()->HasNativeImage())
+ {
+ CORCOMPILE_EE_INFO_TABLE *pEEInfo = SystemModule()->GetNativeImage()->GetNativeEEInfoTable();
+ pEEInfo->emptyString = (CORINFO_Object **)StringObject::GetEmptyStringRefPtr();
+ }
+#endif
+}
+
+void SystemDomain::CreatePreallocatedExceptions()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ EXCEPTIONREF pBaseException = (EXCEPTIONREF)AllocateObject(g_pExceptionClass);
+ pBaseException->SetHResult(COR_E_EXCEPTION);
+ pBaseException->SetXCode(EXCEPTION_COMPLUS);
+ _ASSERTE(g_pPreallocatedBaseException == NULL);
+ g_pPreallocatedBaseException = CreateHandle(pBaseException);
+
+
+ EXCEPTIONREF pOutOfMemory = (EXCEPTIONREF)AllocateObject(g_pOutOfMemoryExceptionClass);
+ pOutOfMemory->SetHResult(COR_E_OUTOFMEMORY);
+ pOutOfMemory->SetXCode(EXCEPTION_COMPLUS);
+ _ASSERTE(g_pPreallocatedOutOfMemoryException == NULL);
+ g_pPreallocatedOutOfMemoryException = CreateHandle(pOutOfMemory);
+
+
+ EXCEPTIONREF pStackOverflow = (EXCEPTIONREF)AllocateObject(g_pStackOverflowExceptionClass);
+ pStackOverflow->SetHResult(COR_E_STACKOVERFLOW);
+ pStackOverflow->SetXCode(EXCEPTION_COMPLUS);
+ _ASSERTE(g_pPreallocatedStackOverflowException == NULL);
+ g_pPreallocatedStackOverflowException = CreateHandle(pStackOverflow);
+
+
+ EXCEPTIONREF pExecutionEngine = (EXCEPTIONREF)AllocateObject(g_pExecutionEngineExceptionClass);
+ pExecutionEngine->SetHResult(COR_E_EXECUTIONENGINE);
+ pExecutionEngine->SetXCode(EXCEPTION_COMPLUS);
+ _ASSERTE(g_pPreallocatedExecutionEngineException == NULL);
+ g_pPreallocatedExecutionEngineException = CreateHandle(pExecutionEngine);
+
+
+ EXCEPTIONREF pRudeAbortException = (EXCEPTIONREF)AllocateObject(g_pThreadAbortExceptionClass);
+#if CHECK_APP_DOMAIN_LEAKS
+ pRudeAbortException->SetSyncBlockAppDomainAgile();
+#endif
+ pRudeAbortException->SetHResult(COR_E_THREADABORTED);
+ pRudeAbortException->SetXCode(EXCEPTION_COMPLUS);
+ _ASSERTE(g_pPreallocatedRudeThreadAbortException == NULL);
+ g_pPreallocatedRudeThreadAbortException = CreateHandle(pRudeAbortException);
+
+
+ EXCEPTIONREF pAbortException = (EXCEPTIONREF)AllocateObject(g_pThreadAbortExceptionClass);
+#if CHECK_APP_DOMAIN_LEAKS
+ pAbortException->SetSyncBlockAppDomainAgile();
+#endif
+ pAbortException->SetHResult(COR_E_THREADABORTED);
+ pAbortException->SetXCode(EXCEPTION_COMPLUS);
+ _ASSERTE(g_pPreallocatedThreadAbortException == NULL);
+ g_pPreallocatedThreadAbortException = CreateHandle( pAbortException );
+}
+#endif // CROSSGEN_COMPILE
+
+void SystemDomain::Init()
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+#ifdef _DEBUG
+ LOG((
+ LF_EEMEM,
+ LL_INFO10,
+ "sizeof(EEClass) = %d\n"
+ "sizeof(MethodTable) = %d\n"
+ "sizeof(MethodDesc)= %d\n"
+ "sizeof(FieldDesc) = %d\n"
+ "sizeof(Module) = %d\n",
+ sizeof(EEClass),
+ sizeof(MethodTable),
+ sizeof(MethodDesc),
+ sizeof(FieldDesc),
+ sizeof(Module)
+ ));
+#endif // _DEBUG
+
+ // The base domain is initialized in SystemDomain::Attach()
+ // to allow stub caches to use the memory pool. Do not
+ // initialze it here!
+
+#ifndef CROSSGEN_COMPILE
+#ifdef _DEBUG
+ Context *curCtx = GetCurrentContext();
+#endif
+ _ASSERTE(curCtx);
+ _ASSERTE(curCtx->GetDomain() != NULL);
+#endif
+
+#ifdef _DEBUG
+ g_fVerifierOff = g_pConfig->IsVerifierOff();
+#endif
+
+#ifdef FEATURE_PREJIT
+ if (CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ZapDisable) != 0)
+ g_fAllowNativeImages = false;
+#endif
+
+ m_pSystemFile = NULL;
+ m_pSystemAssembly = NULL;
+
+ DWORD size = 0;
+
+#ifdef FEATURE_VERSIONING
+
+ // Get the install directory so we can find mscorlib
+ hr = GetInternalSystemDirectory(NULL, &size);
+ if (hr != HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER))
+ ThrowHR(hr);
+
+ // GetInternalSystemDirectory returns a size, including the null!
+ WCHAR *buffer = m_SystemDirectory.OpenUnicodeBuffer(size-1);
+ IfFailThrow(GetInternalSystemDirectory(buffer, &size));
+ m_SystemDirectory.CloseBuffer();
+ m_SystemDirectory.Normalize();
+
+ // At this point m_SystemDirectory should already be canonicalized
+
+#else
+
+ m_SystemDirectory = GetInternalSystemDirectory(&size);
+
+#endif // FEATURE_VERSIONING
+
+ m_BaseLibrary.Append(m_SystemDirectory);
+ m_BaseLibrary.Append(g_pwBaseLibrary);
+ m_BaseLibrary.Normalize();
+
+ LoadBaseSystemClasses();
+
+ {
+ // We are about to start allocating objects, so we must be in cooperative mode.
+ // However, many of the entrypoints to the system (DllGetClassObject and all
+ // N/Direct exports) get called multiple times. Sometimes they initialize the EE,
+ // but generally they remain in preemptive mode. So we really want to push/pop
+ // the state here:
+ GCX_COOP();
+
+#ifndef CROSSGEN_COMPILE
+ if (!NingenEnabled())
+ {
+ CreatePreallocatedExceptions();
+
+ PreallocateSpecialObjects();
+ }
+#endif
+
+ // Finish loading mscorlib now.
+ m_pSystemAssembly->GetDomainAssembly()->EnsureActive();
+#ifdef FEATURE_FUSION
+ // disable fusion log for m_pSystemFile, because m_pSystemFile will get reused
+ m_pSystemFile->DisableFusionLogging();
+#endif
+ }
+
+#ifdef _DEBUG
+ BOOL fPause = EEConfig::GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_PauseOnLoad, FALSE);
+
+ while(fPause)
+ {
+ ClrSleepEx(20, TRUE);
+ }
+#endif // _DEBUG
+}
+
+#ifndef CROSSGEN_COMPILE
+void SystemDomain::LazyInitGlobalStringLiteralMap()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // Allocate the global string literal map.
+ NewHolder<GlobalStringLiteralMap> pGlobalStringLiteralMap(new GlobalStringLiteralMap());
+
+ // Initialize the global string literal map.
+ pGlobalStringLiteralMap->Init();
+
+ if (InterlockedCompareExchangeT<GlobalStringLiteralMap *>(&m_pGlobalStringLiteralMap, pGlobalStringLiteralMap, NULL) == NULL)
+ {
+ pGlobalStringLiteralMap.SuppressRelease();
+ }
+}
+
+void AppDomain::CreateADUnloadStartEvent()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ g_pUnloadStartEvent = new CLREvent();
+ g_pUnloadStartEvent->CreateAutoEvent(FALSE);
+}
+
+/*static*/ void SystemDomain::EnumAllStaticGCRefs(promote_func* fn, ScanContext* sc)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACT_END;
+
+ // We don't do a normal AppDomainIterator because we can't take the SystemDomain lock from
+ // here.
+ // We're only supposed to call this from a Server GC. We're walking here m_appDomainIdList
+ // m_appDomainIdList will have an AppDomain* or will be NULL. So the only danger is if we
+ // Fetch an AppDomain and then in some other thread the AppDomain is deleted.
+ //
+ // If the thread deleting the AppDomain (AppDomain::~AppDomain)was in Preemptive mode
+ // while doing SystemDomain::EnumAllStaticGCRefs we will issue a GCX_COOP(), which will wait
+ // for the GC to finish, so we are safe
+ //
+ // If the thread is in cooperative mode, it must have been suspended for the GC so a delete
+ // can't happen.
+
+ _ASSERTE(GCHeap::IsGCInProgress() &&
+ GCHeap::IsServerHeap() &&
+ IsGCSpecialThread());
+
+ SystemDomain* sysDomain = SystemDomain::System();
+ if (sysDomain)
+ {
+ DWORD i;
+ DWORD count = (DWORD) m_appDomainIdList.GetCount();
+ for (i = 0 ; i < count ; i++)
+ {
+ AppDomain* pAppDomain = (AppDomain *)m_appDomainIdList.Get(i);
+ if (pAppDomain && pAppDomain->IsActive() && !pAppDomain->IsUnloading())
+ {
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+ if (g_fEnableARM)
+ {
+ sc->pCurrentDomain = pAppDomain;
+ }
+#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
+ pAppDomain->EnumStaticGCRefs(fn, sc);
+ }
+ }
+ }
+
+ RETURN;
+}
+
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+void SystemDomain::ResetADSurvivedBytes()
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+ _ASSERTE(GCHeap::IsGCInProgress());
+
+ SystemDomain* sysDomain = SystemDomain::System();
+ if (sysDomain)
+ {
+ DWORD i;
+ DWORD count = (DWORD) m_appDomainIdList.GetCount();
+ for (i = 0 ; i < count ; i++)
+ {
+ AppDomain* pAppDomain = (AppDomain *)m_appDomainIdList.Get(i);
+ if (pAppDomain && pAppDomain->IsUserActive())
+ {
+ pAppDomain->ResetSurvivedBytes();
+ }
+ }
+ }
+
+ RETURN;
+}
+
+ULONGLONG SystemDomain::GetADSurvivedBytes()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SystemDomain* sysDomain = SystemDomain::System();
+ ULONGLONG ullTotalADSurvived = 0;
+ if (sysDomain)
+ {
+ DWORD i;
+ DWORD count = (DWORD) m_appDomainIdList.GetCount();
+ for (i = 0 ; i < count ; i++)
+ {
+ AppDomain* pAppDomain = (AppDomain *)m_appDomainIdList.Get(i);
+ if (pAppDomain && pAppDomain->IsUserActive())
+ {
+ ULONGLONG ullSurvived = pAppDomain->GetSurvivedBytes();
+ ullTotalADSurvived += ullSurvived;
+ }
+ }
+ }
+
+ return ullTotalADSurvived;
+}
+
+void SystemDomain::RecordTotalSurvivedBytes(size_t totalSurvivedBytes)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+ m_totalSurvivedBytes = totalSurvivedBytes;
+
+ SystemDomain* sysDomain = SystemDomain::System();
+ if (sysDomain)
+ {
+ DWORD i;
+ DWORD count = (DWORD) m_appDomainIdList.GetCount();
+ for (i = 0 ; i < count ; i++)
+ {
+ AppDomain* pAppDomain = (AppDomain *)m_appDomainIdList.Get(i);
+ if (pAppDomain && pAppDomain->IsUserActive())
+ {
+ FireEtwAppDomainMemSurvived((ULONGLONG)pAppDomain, pAppDomain->GetSurvivedBytes(), totalSurvivedBytes, GetClrInstanceId());
+ }
+ }
+ }
+
+ RETURN;
+}
+#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
+
+// Only called when EE is suspended.
+DWORD SystemDomain::GetTotalNumSizedRefHandles()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SystemDomain* sysDomain = SystemDomain::System();
+ DWORD dwTotalNumSizedRefHandles = 0;
+ if (sysDomain)
+ {
+ DWORD i;
+ DWORD count = (DWORD) m_appDomainIdList.GetCount();
+ for (i = 0 ; i < count ; i++)
+ {
+ AppDomain* pAppDomain = (AppDomain *)m_appDomainIdList.Get(i);
+ if (pAppDomain && pAppDomain->IsActive() && !pAppDomain->IsUnloading())
+ {
+ dwTotalNumSizedRefHandles += pAppDomain->GetNumSizedRefHandles();
+ }
+ }
+ }
+
+ return dwTotalNumSizedRefHandles;
+}
+#endif // CROSSGEN_COMPILE
+
+void SystemDomain::LoadBaseSystemClasses()
+{
+ STANDARD_VM_CONTRACT;
+
+ ETWOnStartup(LdSysBases_V1, LdSysBasesEnd_V1);
+
+ {
+#ifdef FEATURE_FUSION
+ ETWOnStartup (FusionAppCtx_V1, FusionAppCtxEnd_V1);
+ // Setup fusion context for the system domain - this is used for binding mscorlib.
+ IfFailThrow(FusionBind::SetupFusionContext(m_SystemDirectory, NULL, &m_pFusionContext));
+
+ m_pSystemFile = PEAssembly::OpenSystem(m_pFusionContext);
+#else
+ m_pSystemFile = PEAssembly::OpenSystem(NULL);
+#endif // FEATURE_FUSION
+ }
+ // Only partially load the system assembly. Other parts of the code will want to access
+ // the globals in this function before finishing the load.
+ m_pSystemAssembly = DefaultDomain()->LoadDomainAssembly(NULL, m_pSystemFile, FILE_LOAD_POST_LOADLIBRARY, NULL)->GetCurrentAssembly();
+
+ // Set up binder for mscorlib
+ MscorlibBinder::AttachModule(m_pSystemAssembly->GetManifestModule());
+
+ // Load Object
+ g_pObjectClass = MscorlibBinder::GetClass(CLASS__OBJECT);
+
+ // get the Object::.ctor method desc so we can special-case it
+ g_pObjectCtorMD = MscorlibBinder::GetMethod(METHOD__OBJECT__CTOR);
+
+ // Now that ObjectClass is loaded, we can set up
+ // the system for finalizers. There is no point in deferring this, since we need
+ // to know this before we allocate our first object.
+ g_pObjectFinalizerMD = MscorlibBinder::GetMethod(METHOD__OBJECT__FINALIZE);
+
+
+ g_pCanonMethodTableClass = MscorlibBinder::GetClass(CLASS____CANON);
+
+ // NOTE: !!!IMPORTANT!!! ValueType and Enum MUST be loaded one immediately after
+ // the other, because we have coded MethodTable::IsChildValueType
+ // in such a way that it depends on this behaviour.
+ // Load the ValueType class
+ g_pValueTypeClass = MscorlibBinder::GetClass(CLASS__VALUE_TYPE);
+
+ // Load the enum class
+ g_pEnumClass = MscorlibBinder::GetClass(CLASS__ENUM);
+ _ASSERTE(!g_pEnumClass->IsValueType());
+
+ // Load System.RuntimeType
+ // We need to load this after ValueType and Enum because RuntimeType now
+ // contains an enum field (m_invocationFlags). Otherwise INVOCATION_FLAGS
+ // would be treated as a reference type and clr!SigPointer::GetTypeHandleThrowing
+ // throws an exception.
+ g_pRuntimeTypeClass = MscorlibBinder::GetClass(CLASS__CLASS);
+ _ASSERTE(g_pRuntimeTypeClass->IsFullyLoaded());
+
+ // Load Array class
+ g_pArrayClass = MscorlibBinder::GetClass(CLASS__ARRAY);
+
+ // Calling a method on IList<T> for an array requires redirection to a method on
+ // the SZArrayHelper class. Retrieving such methods means calling
+ // GetActualImplementationForArrayGenericIListMethod, which calls FetchMethod for
+ // the corresponding method on SZArrayHelper. This basically results in a class
+ // load due to a method call, which the debugger cannot handle, so we pre-load
+ // the SZArrayHelper class here.
+ g_pSZArrayHelperClass = MscorlibBinder::GetClass(CLASS__SZARRAYHELPER);
+
+ // Load Nullable class
+ g_pNullableClass = MscorlibBinder::GetClass(CLASS__NULLABLE);
+
+ // Load the Object array class.
+ g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT] = ClassLoader::LoadArrayTypeThrowing(TypeHandle(g_pObjectClass)).AsArray();
+
+ // We have delayed allocation of mscorlib's static handles until we load the object class
+ MscorlibBinder::GetModule()->AllocateRegularStaticHandles(DefaultDomain());
+
+ // used by MethodTable::ContainsStackPtr
+ g_TypedReferenceMT = MscorlibBinder::GetClass(CLASS__TYPED_REFERENCE);
+ g_ArgumentHandleMT = MscorlibBinder::GetClass(CLASS__ARGUMENT_HANDLE);
+ g_ArgIteratorMT = MscorlibBinder::GetClass(CLASS__ARG_ITERATOR);
+
+ // Make sure all primitive types are loaded
+ for (int et = ELEMENT_TYPE_VOID; et <= ELEMENT_TYPE_R8; et++)
+ MscorlibBinder::LoadPrimitiveType((CorElementType)et);
+
+ MscorlibBinder::LoadPrimitiveType(ELEMENT_TYPE_I);
+ MscorlibBinder::LoadPrimitiveType(ELEMENT_TYPE_U);
+
+ // unfortunately, the following cannot be delay loaded since the jit
+ // uses it to compute method attributes within a function that cannot
+ // handle Complus exception and the following call goes through a path
+ // where a complus exception can be thrown. It is unfortunate, because
+ // we know that the delegate class and multidelegate class are always
+ // guaranteed to be found.
+ g_pDelegateClass = MscorlibBinder::GetClass(CLASS__DELEGATE);
+ g_pMulticastDelegateClass = MscorlibBinder::GetClass(CLASS__MULTICAST_DELEGATE);
+
+ // used by IsImplicitInterfaceOfSZArray
+ MscorlibBinder::GetClass(CLASS__IENUMERABLEGENERIC);
+ MscorlibBinder::GetClass(CLASS__ICOLLECTIONGENERIC);
+ MscorlibBinder::GetClass(CLASS__ILISTGENERIC);
+#if !defined(FEATURE_CORECLR) || defined(FEATURE_COMINTEROP)
+ MscorlibBinder::GetClass(CLASS__IREADONLYCOLLECTIONGENERIC);
+ MscorlibBinder::GetClass(CLASS__IREADONLYLISTGENERIC);
+#endif
+
+ // Load String
+ g_pStringClass = MscorlibBinder::LoadPrimitiveType(ELEMENT_TYPE_STRING);
+ _ASSERTE(g_pStringClass->GetBaseSize() == ObjSizeOf(StringObject)+sizeof(WCHAR));
+ _ASSERTE(g_pStringClass->GetComponentSize() == 2);
+
+#ifndef CROSSGEN_COMPILE
+ ECall::PopulateManagedStringConstructors();
+
+ if (CLRIoCompletionHosted())
+ {
+ g_pOverlappedDataClass = MscorlibBinder::GetClass(CLASS__OVERLAPPEDDATA);
+ _ASSERTE (g_pOverlappedDataClass);
+ if (CorHost2::GetHostOverlappedExtensionSize() != 0)
+ {
+ // Overlapped may have an extension if a host hosts IO completion subsystem
+ DWORD instanceFieldBytes = g_pOverlappedDataClass->GetNumInstanceFieldBytes() + CorHost2::GetHostOverlappedExtensionSize();
+ _ASSERTE (instanceFieldBytes + ObjSizeOf(Object) >= MIN_OBJECT_SIZE);
+ DWORD baseSize = (DWORD) (instanceFieldBytes + ObjSizeOf(Object));
+ baseSize = (baseSize + ALLOC_ALIGN_CONSTANT) & ~ALLOC_ALIGN_CONSTANT; // m_BaseSize must be aligned
+ DWORD adjustSize = baseSize - g_pOverlappedDataClass->GetBaseSize();
+ CGCDesc* map = CGCDesc::GetCGCDescFromMT(g_pOverlappedDataClass);
+ CGCDescSeries * cur = map->GetHighestSeries();
+ _ASSERTE ((SSIZE_T)map->GetNumSeries() == 1);
+ cur->SetSeriesSize(cur->GetSeriesSize() - adjustSize);
+ g_pOverlappedDataClass->SetBaseSize(baseSize);
+ }
+ }
+#endif // CROSSGEN_COMPILE
+
+ g_pExceptionClass = MscorlibBinder::GetClass(CLASS__EXCEPTION);
+ g_pOutOfMemoryExceptionClass = MscorlibBinder::GetException(kOutOfMemoryException);
+ g_pStackOverflowExceptionClass = MscorlibBinder::GetException(kStackOverflowException);
+ g_pExecutionEngineExceptionClass = MscorlibBinder::GetException(kExecutionEngineException);
+ g_pThreadAbortExceptionClass = MscorlibBinder::GetException(kThreadAbortException);
+
+ // Used for determining whether a class has a critical finalizer
+ // To determine whether a class has a critical finalizer, we
+ // currently will simply see if it's parent class has a critical
+ // finalizer. To introduce a class with a critical finalizer,
+ // we'll explicitly load CriticalFinalizerObject and set the bit
+ // here.
+ g_pCriticalFinalizerObjectClass = MscorlibBinder::GetClass(CLASS__CRITICAL_FINALIZER_OBJECT);
+ _ASSERTE(g_pCriticalFinalizerObjectClass->HasCriticalFinalizer());
+
+ // used by gc to handle predefined agility checking
+ g_pThreadClass = MscorlibBinder::GetClass(CLASS__THREAD);
+
+#ifdef FEATURE_COMINTEROP
+ g_pBaseCOMObject = MscorlibBinder::GetClass(CLASS__COM_OBJECT);
+ g_pBaseRuntimeClass = MscorlibBinder::GetClass(CLASS__RUNTIME_CLASS);
+
+ MscorlibBinder::GetClass(CLASS__IDICTIONARYGENERIC);
+ MscorlibBinder::GetClass(CLASS__IREADONLYDICTIONARYGENERIC);
+ MscorlibBinder::GetClass(CLASS__ATTRIBUTE);
+ MscorlibBinder::GetClass(CLASS__EVENT_HANDLERGENERIC);
+
+ MscorlibBinder::GetClass(CLASS__IENUMERABLE);
+ MscorlibBinder::GetClass(CLASS__ICOLLECTION);
+ MscorlibBinder::GetClass(CLASS__ILIST);
+ MscorlibBinder::GetClass(CLASS__IDISPOSABLE);
+
+#ifdef _DEBUG
+ WinRTInterfaceRedirector::VerifyRedirectedInterfaceStubs();
+#endif // _DEBUG
+#endif
+
+ // Load a special marker method used to detect Constrained Execution Regions
+ // at jit time.
+ g_pPrepareConstrainedRegionsMethod = MscorlibBinder::GetMethod(METHOD__RUNTIME_HELPERS__PREPARE_CONSTRAINED_REGIONS);
+ g_pExecuteBackoutCodeHelperMethod = MscorlibBinder::GetMethod(METHOD__RUNTIME_HELPERS__EXECUTE_BACKOUT_CODE_HELPER);
+
+ // Make sure that FCall mapping for Monitor.Enter is initialized. We need it in case Monitor.Enter is used only as JIT helper.
+ // For more details, see comment in code:JITutil_MonEnterWorker around "__me = GetEEFuncEntryPointMacro(JIT_MonEnter)".
+ ECall::GetFCallImpl(MscorlibBinder::GetMethod(METHOD__MONITOR__ENTER));
+
+#ifdef PROFILING_SUPPORTED
+ // Note that g_profControlBlock.fBaseSystemClassesLoaded must be set to TRUE only after
+ // all base system classes are loaded. Profilers are not allowed to call any type-loading
+ // APIs until g_profControlBlock.fBaseSystemClassesLoaded is TRUE. It is important that
+ // all base system classes need to be loaded before profilers can trigger the type loading.
+ g_profControlBlock.fBaseSystemClassesLoaded = TRUE;
+#endif // PROFILING_SUPPORTED
+
+#if defined(_DEBUG) && !defined(CROSSGEN_COMPILE)
+ if (!NingenEnabled())
+ {
+ g_Mscorlib.Check();
+ }
+#endif
+
+#if defined(HAVE_GCCOVER) && defined(FEATURE_PREJIT)
+ if (GCStress<cfg_instr_ngen>::IsEnabled())
+ {
+ // Setting up gc coverage requires the base system classes
+ // to be initialized. So we have deferred it until now for mscorlib.
+ Module *pModule = MscorlibBinder::GetModule();
+ _ASSERTE(pModule->IsSystem());
+ if(pModule->HasNativeImage())
+ {
+ SetupGcCoverageForNativeImage(pModule);
+ }
+ }
+#endif // defined(HAVE_GCCOVER) && !defined(FEATURE_PREJIT)
+}
+
+/*static*/
+void SystemDomain::LoadDomain(AppDomain *pDomain)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(System()));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ pDomain->SetCanUnload(); // by default can unload any domain
+ SystemDomain::System()->AddDomain(pDomain);
+}
+
+ADIndex SystemDomain::GetNewAppDomainIndex(AppDomain *pAppDomain)
+{
+ STANDARD_VM_CONTRACT;
+
+ DWORD count = m_appDomainIndexList.GetCount();
+ DWORD i;
+
+#ifdef _DEBUG
+ if (count < 2000)
+ {
+ // So that we can keep AD index inside object header.
+ // We do not want to create syncblock unless needed.
+ i = count;
+ }
+ else
+ {
+#endif // _DEBUG
+ //
+ // Look for an unused index. Note that in a checked build,
+ // we never reuse indexes - this makes it easier to tell
+ // when we are looking at a stale app domain.
+ //
+
+ i = m_appDomainIndexList.FindElement(m_dwLowestFreeIndex, NULL);
+ if (i == (DWORD) ArrayList::NOT_FOUND)
+ i = count;
+ m_dwLowestFreeIndex = i+1;
+#ifdef _DEBUG
+ if (m_dwLowestFreeIndex >= 2000)
+ {
+ m_dwLowestFreeIndex = 0;
+ }
+ }
+#endif // _DEBUG
+
+ if (i == count)
+ IfFailThrow(m_appDomainIndexList.Append(pAppDomain));
+ else
+ m_appDomainIndexList.Set(i, pAppDomain);
+
+ _ASSERTE(i < m_appDomainIndexList.GetCount());
+
+ // Note that index 0 means domain agile.
+ return ADIndex(i+1);
+}
+
+void SystemDomain::ReleaseAppDomainIndex(ADIndex index)
+{
+ WRAPPER_NO_CONTRACT;
+ SystemDomain::LockHolder lh;
+ // Note that index 0 means domain agile.
+ index.m_dwIndex--;
+
+ _ASSERTE(m_appDomainIndexList.Get(index.m_dwIndex) != NULL);
+
+ m_appDomainIndexList.Set(index.m_dwIndex, NULL);
+
+#ifndef _DEBUG
+ if (index.m_dwIndex < m_dwLowestFreeIndex)
+ m_dwLowestFreeIndex = index.m_dwIndex;
+#endif // !_DEBUG
+}
+
+#endif // !DACCESS_COMPILE
+
+PTR_AppDomain SystemDomain::GetAppDomainAtIndex(ADIndex index)
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ _ASSERTE(index.m_dwIndex != 0);
+
+ PTR_AppDomain pAppDomain = TestGetAppDomainAtIndex(index);
+
+ _ASSERTE(pAppDomain || !"Attempt to access unloaded app domain");
+
+ return pAppDomain;
+}
+
+PTR_AppDomain SystemDomain::TestGetAppDomainAtIndex(ADIndex index)
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ _ASSERTE(index.m_dwIndex != 0);
+ index.m_dwIndex--;
+
+#ifndef DACCESS_COMPILE
+ _ASSERTE(index.m_dwIndex < (DWORD)m_appDomainIndexList.GetCount());
+ AppDomain *pAppDomain = (AppDomain*) m_appDomainIndexList.Get(index.m_dwIndex);
+#else // DACCESS_COMPILE
+ PTR_ArrayListStatic pList = &m_appDomainIndexList;
+ AppDomain *pAppDomain = dac_cast<PTR_AppDomain>(pList->Get(index.m_dwIndex));
+#endif // DACCESS_COMPILE
+ return PTR_AppDomain(pAppDomain);
+}
+
+#ifndef DACCESS_COMPILE
+
+// See also code:SystemDomain::ReleaseAppDomainId
+ADID SystemDomain::GetNewAppDomainId(AppDomain *pAppDomain)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ DWORD i = m_appDomainIdList.GetCount();
+
+ IfFailThrow(m_appDomainIdList.Append(pAppDomain));
+
+ _ASSERTE(i < m_appDomainIdList.GetCount());
+
+ return ADID(i+1);
+}
+
+AppDomain *SystemDomain::GetAppDomainAtId(ADID index)
+{
+ CONTRACTL
+ {
+#ifdef _DEBUG
+ if (!SystemDomain::IsUnderDomainLock() && !IsGCThread()) { MODE_COOPERATIVE;} else { DISABLED(MODE_ANY);}
+#endif
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ if(index.m_dwId == 0)
+ return NULL;
+ DWORD requestedID = index.m_dwId - 1;
+
+ if(requestedID >= (DWORD)m_appDomainIdList.GetCount())
+ return NULL;
+
+ AppDomain * result = (AppDomain *)m_appDomainIdList.Get(requestedID);
+
+#ifndef CROSSGEN_COMPILE
+ if(result==NULL && GetThread() == FinalizerThread::GetFinalizerThread() &&
+ SystemDomain::System()->AppDomainBeingUnloaded()!=NULL &&
+ SystemDomain::System()->AppDomainBeingUnloaded()->GetId()==index)
+ result=SystemDomain::System()->AppDomainBeingUnloaded();
+ // If the current thread can't enter the AppDomain, then don't return it.
+ if (!result || !result->CanThreadEnter(GetThread()))
+ return NULL;
+#endif // CROSSGEN_COMPILE
+
+ return result;
+}
+
+// Releases an appdomain index. Note that today we have code that depends on these
+// indexes not being recycled, so we don't actually shrink m_appDomainIdList, but
+// simply zero out an entry. THus we 'leak' the memory associated the slot in
+// m_appDomainIdList.
+//
+// TODO make this a sparse structure so that we avoid that leak.
+//
+void SystemDomain::ReleaseAppDomainId(ADID index)
+{
+ LIMITED_METHOD_CONTRACT;
+ index.m_dwId--;
+
+ _ASSERTE(index.m_dwId < (DWORD)m_appDomainIdList.GetCount());
+
+ m_appDomainIdList.Set(index.m_dwId, NULL);
+}
+
+#if defined(FEATURE_COMINTEROP_APARTMENT_SUPPORT) && !defined(CROSSGEN_COMPILE)
+
+#ifdef _DEBUG
+int g_fMainThreadApartmentStateSet = 0;
+#endif
+
+Thread::ApartmentState SystemDomain::GetEntryPointThreadAptState(IMDInternalImport* pScope, mdMethodDef mdMethod)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr;
+ IfFailThrow(hr = pScope->GetCustomAttributeByName(mdMethod,
+ DEFAULTDOMAIN_MTA_TYPE,
+ NULL,
+ NULL));
+ BOOL fIsMTA = FALSE;
+ if(hr == S_OK)
+ fIsMTA = TRUE;
+
+ IfFailThrow(hr = pScope->GetCustomAttributeByName(mdMethod,
+ DEFAULTDOMAIN_STA_TYPE,
+ NULL,
+ NULL));
+ BOOL fIsSTA = FALSE;
+ if (hr == S_OK)
+ fIsSTA = TRUE;
+
+ if (fIsSTA && fIsMTA)
+ COMPlusThrowHR(COR_E_CUSTOMATTRIBUTEFORMAT);
+
+ if (fIsSTA)
+ return Thread::AS_InSTA;
+ else if (fIsMTA)
+ return Thread::AS_InMTA;
+
+ return Thread::AS_Unknown;
+}
+
+void SystemDomain::SetThreadAptState (IMDInternalImport* pScope, Thread::ApartmentState state)
+{
+ STANDARD_VM_CONTRACT;
+
+ BOOL fIsLegacy = FALSE;
+
+ // Check for legacy behavior regarding COM Apartment state of the main thread.
+
+#define METAMODEL_MAJOR_VER_WITH_NEW_BEHAVIOR 2
+#define METAMODEL_MINOR_VER_WITH_NEW_BEHAVIOR 0
+
+ LPCSTR pVer;
+ IfFailThrow(pScope->GetVersionString(&pVer));
+
+ // Does this look like a version?
+ if (pVer != NULL)
+ {
+ // Is it 'vN.' where N is a digit?
+ if ((pVer[0] == 'v' || pVer[0] == 'V') &&
+ IS_DIGIT(pVer[1]) &&
+ (pVer[2] == '.') )
+ {
+ // Looks like a version. Is it lesser than v2.0 major version where we start using new behavior?
+ fIsLegacy = DIGIT_TO_INT(pVer[1]) < METAMODEL_MAJOR_VER_WITH_NEW_BEHAVIOR;
+ }
+ }
+
+ if (!fIsLegacy && g_pConfig != NULL)
+ {
+ fIsLegacy = g_pConfig->LegacyApartmentInitPolicy();
+ }
+
+
+ Thread* pThread = GetThread();
+ _ASSERTE(pThread);
+
+ if(state == Thread::AS_InSTA)
+ {
+ Thread::ApartmentState pState = pThread->SetApartment(Thread::AS_InSTA, TRUE);
+ _ASSERTE(pState == Thread::AS_InSTA);
+ }
+ else if ((state == Thread::AS_InMTA) || (!fIsLegacy))
+ {
+ // If either MTAThreadAttribute is specified or (if no attribute is specified and we are not
+ // running in legacy mode), then
+ // we will set the apartment state to MTA. The reason for this is to ensure the apartment
+ // state is consistent and reliably set. Without this, the apartment state for the main
+ // thread would be undefined and would actually be dependent on if the assembly was
+ // ngen'd, which other type were loaded, etc.
+ Thread::ApartmentState pState = pThread->SetApartment(Thread::AS_InMTA, TRUE);
+ _ASSERTE(pState == Thread::AS_InMTA);
+ }
+
+#ifdef _DEBUG
+ g_fMainThreadApartmentStateSet++;
+#endif
+}
+#endif // defined(FEATURE_COMINTEROP_APARTMENT_SUPPORT) && !defined(CROSSGEN_COMPILE)
+
+// Looks in all the modules for the DefaultDomain attribute
+// The order is assembly and then the modules. It is first
+// come, first serve.
+BOOL SystemDomain::SetGlobalSharePolicyUsingAttribute(IMDInternalImport* pScope, mdMethodDef mdMethod)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef FEATURE_FUSION
+ HRESULT hr;
+
+ //
+ // Check to see if the assembly has the LoaderOptimization attribute set.
+ //
+
+ DWORD cbVal;
+ BYTE *pVal;
+ IfFailThrow(hr = pScope->GetCustomAttributeByName(mdMethod,
+ DEFAULTDOMAIN_LOADEROPTIMIZATION_TYPE,
+ (const void**)&pVal, &cbVal));
+
+ if (hr == S_OK) {
+ CustomAttributeParser cap(pVal, cbVal);
+ IfFailThrow(cap.SkipProlog());
+
+ UINT8 u1;
+ IfFailThrow(cap.GetU1(&u1));
+
+ g_dwGlobalSharePolicy = u1 & AppDomain::SHARE_POLICY_MASK;
+
+ return TRUE;
+ }
+#endif
+
+ return FALSE;
+}
+
+void SystemDomain::SetupDefaultDomain()
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+
+ Thread *pThread = GetThread();
+ _ASSERTE(pThread);
+
+ AppDomain *pDomain;
+ pDomain = pThread->GetDomain();
+ _ASSERTE(pDomain);
+
+ GCX_COOP();
+
+ ENTER_DOMAIN_PTR(SystemDomain::System()->DefaultDomain(),ADV_DEFAULTAD)
+ {
+ // Push this frame around loading the main assembly to ensure the
+ // debugger can properly recgonize any managed code that gets run
+ // as "class initializaion" code.
+ FrameWithCookie<DebuggerClassInitMarkFrame> __dcimf;
+
+ {
+ GCX_PREEMP();
+ InitializeDefaultDomain(TRUE);
+ }
+
+ __dcimf.Pop();
+ }
+ END_DOMAIN_TRANSITION;
+
+ RETURN;
+}
+
+HRESULT SystemDomain::SetupDefaultDomainNoThrow()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ SystemDomain::SetupDefaultDomain();
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+}
+
+#ifdef _DEBUG
+int g_fInitializingInitialAD = 0;
+#endif
+
+// This routine completes the initialization of the default domaine.
+// After this call mananged code can be executed.
+void SystemDomain::InitializeDefaultDomain(
+ BOOL allowRedirects
+#ifdef FEATURE_HOSTED_BINDER
+ , ICLRPrivBinder * pBinder
+#endif
+ )
+{
+ STANDARD_VM_CONTRACT;
+
+ WCHAR* pwsConfig = NULL;
+ WCHAR* pwsPath = NULL;
+
+ ETWOnStartup (InitDefaultDomain_V1, InitDefaultDomainEnd_V1);
+
+#if defined(FEATURE_FUSION) // SxS
+ // Determine the application base and the configuration file name
+ CQuickWSTR sPathName;
+ CQuickWSTR sConfigName;
+
+ SIZE_T dwSize;
+ HRESULT hr = GetConfigFileFromWin32Manifest(sConfigName.Ptr(),
+ sConfigName.MaxSize(),
+ &dwSize);
+ if(FAILED(hr))
+ {
+ if(hr == HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER))
+ {
+ sConfigName.ReSizeThrows(dwSize);
+ hr = GetConfigFileFromWin32Manifest(sConfigName.Ptr(),
+ sConfigName.MaxSize(),
+ &dwSize);
+ }
+ IfFailThrow(hr);
+ }
+ else
+ sConfigName.ReSizeThrows(dwSize);
+
+ hr = GetApplicationPathFromWin32Manifest(sPathName.Ptr(),
+ sPathName.MaxSize(),
+ &dwSize);
+ if(FAILED(hr))
+ {
+ if(hr == HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER))
+ {
+ sPathName.ReSizeThrows(dwSize);
+ hr = GetApplicationPathFromWin32Manifest(sPathName.Ptr(),
+ sPathName.MaxSize(),
+ &dwSize);
+ }
+ IfFailThrow(hr);
+ }
+ else
+ sPathName.ReSizeThrows(dwSize);
+
+ pwsConfig = (sConfigName.Size() > 0 ? sConfigName.Ptr() : NULL);
+ pwsPath = (sPathName.Size() > 0 ? sPathName.Ptr() : NULL);
+#endif // defined(FEATURE_FUSION) // SxS
+
+ // Setup the default AppDomain.
+
+#ifdef _DEBUG
+ g_fInitializingInitialAD++;
+#endif
+
+ AppDomain* pDefaultDomain = SystemDomain::System()->DefaultDomain();
+
+#ifdef FEATURE_HOSTED_BINDER
+ if (pBinder != nullptr)
+ {
+ pDefaultDomain->SetLoadContextHostBinder(pBinder);
+ }
+ #ifdef FEATURE_APPX_BINDER
+ else if (AppX::IsAppXProcess())
+ {
+ CLRPrivBinderAppX * pAppXBinder = CLRPrivBinderAppX::GetOrCreateBinder();
+ pDefaultDomain->SetLoadContextHostBinder(pAppXBinder);
+ }
+ #endif
+#endif
+
+ {
+ GCX_COOP();
+
+#ifndef CROSSGEN_COMPILE
+ if (!NingenEnabled())
+ {
+#ifndef FEATURE_CORECLR
+ pDefaultDomain->InitializeHashing(NULL);
+ pDefaultDomain->InitializeSorting(NULL);
+#endif // FEATURE_CORECLR
+ }
+#endif // CROSSGEN_COMPILE
+
+ pDefaultDomain->InitializeDomainContext(allowRedirects, pwsPath, pwsConfig);
+
+#ifndef CROSSGEN_COMPILE
+ if (!NingenEnabled())
+ {
+#ifdef FEATURE_CLICKONCE
+ pDefaultDomain->InitializeDefaultClickOnceDomain();
+#endif // FEATURE_CLICKONCE
+
+ if (!IsSingleAppDomain())
+ {
+ pDefaultDomain->InitializeDefaultDomainManager();
+ pDefaultDomain->InitializeDefaultDomainSecurity();
+ }
+ }
+#endif // CROSSGEN_COMPILE
+ }
+
+ // DefaultDomain Load event
+ ETW::LoaderLog::DomainLoad(pDefaultDomain);
+
+#ifdef _DEBUG
+ g_fInitializingInitialAD--;
+#endif
+
+ TESTHOOKCALL(RuntimeStarted(RTS_DEFAULTADREADY));
+}
+
+
+
+#ifndef CROSSGEN_COMPILE
+
+#ifdef _DEBUG
+Volatile<LONG> g_fInExecuteMainMethod = 0;
+#endif
+
+#ifndef FEATURE_CORECLR
+void SystemDomain::ExecuteMainMethod(HMODULE hMod, __in_opt LPWSTR path /*=NULL*/)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(hMod, NULL_OK));
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ CounterHolder counter(&g_fInExecuteMainMethod);
+#endif
+
+ Thread *pThread = GetThread();
+ _ASSERTE(pThread);
+
+ GCX_COOP();
+
+ //
+ // There is no EH protecting this transition!
+ // This is generically ok in this method because if we throw out of here, it becomes unhandled anyway.
+ //
+ FrameWithCookie<ContextTransitionFrame> frame;
+ pThread->EnterContextRestricted(SystemDomain::System()->DefaultDomain()->GetDefaultContext(), &frame);
+ _ASSERTE(pThread->GetDomain());
+
+ AppDomain *pDomain = GetAppDomain();
+ _ASSERTE(pDomain);
+
+ // Push this frame around loading the main assembly to ensure the
+ // debugger can properly recognize any managed code that gets run
+ // as "class initializaion" code.
+ FrameWithCookie<DebuggerClassInitMarkFrame> __dcimf;
+ {
+ GCX_PREEMP();
+
+ PEImageHolder pTempImage(PEImage::LoadImage(hMod));
+
+ PEFileHolder pTempFile(PEFile::Open(pTempImage.Extract()));
+
+ // Check for CustomAttributes - Set up the DefaultDomain and the main thread
+ // Note that this has to be done before ExplicitBind() as it
+ // affects the bind
+ mdToken tkEntryPoint = pTempFile->GetEntryPointToken();
+ // <TODO>@TODO: What if the entrypoint is in another file of the assembly?</TODO>
+ ReleaseHolder<IMDInternalImport> scope(pTempFile->GetMDImportWithRef());
+ // In theory, we should have a valid executable image and scope should never be NULL, but we've been
+ // getting Watson failures for AVs here due to ISVs modifying image headers and some new OS loader
+ // checks (see Dev10# 718530 and Windows 7# 615596)
+ if (scope == NULL)
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+
+#ifdef FEATURE_COMINTEROP
+ Thread::ApartmentState state = Thread::AS_Unknown;
+
+ if((!IsNilToken(tkEntryPoint)) && (TypeFromToken(tkEntryPoint) == mdtMethodDef)) {
+ if (scope->IsValidToken(tkEntryPoint))
+ state = SystemDomain::GetEntryPointThreadAptState(scope, tkEntryPoint);
+ else
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+
+ // If the entry point has an explicit thread apartment state, set it
+ // before running the AppDomainManager initialization code.
+ if (state == Thread::AS_InSTA || state == Thread::AS_InMTA)
+ SystemDomain::SetThreadAptState(scope, state);
+#endif // FEATURE_COMINTEROP
+
+ BOOL fSetGlobalSharePolicyUsingAttribute = FALSE;
+
+ if((!IsNilToken(tkEntryPoint)) && (TypeFromToken(tkEntryPoint) == mdtMethodDef))
+ {
+ // The global share policy needs to be set before initializing default domain
+ // so that it is in place for loading of appdomain manager.
+ fSetGlobalSharePolicyUsingAttribute = SystemDomain::SetGlobalSharePolicyUsingAttribute(scope, tkEntryPoint);
+ }
+
+ // This can potentially run managed code.
+ InitializeDefaultDomain(FALSE);
+
+#ifdef FEATURE_COMINTEROP
+ // If we haven't set an explicit thread apartment state, set it after the
+ // AppDomainManager has got a chance to go set it in InitializeNewDomain.
+ if (state != Thread::AS_InSTA && state != Thread::AS_InMTA)
+ SystemDomain::SetThreadAptState(scope, state);
+#endif // FEATURE_COMINTEROP
+
+ if (fSetGlobalSharePolicyUsingAttribute)
+ SystemDomain::System()->DefaultDomain()->SetupLoaderOptimization(g_dwGlobalSharePolicy);
+
+ NewHolder<IPEFileSecurityDescriptor> pSecDesc(Security::CreatePEFileSecurityDescriptor(pDomain, pTempFile));
+
+ {
+ GCX_COOP();
+ pSecDesc->Resolve();
+ if (pSecDesc->AllowBindingRedirects())
+ pDomain->TurnOnBindingRedirects();
+ }
+
+ PEAssemblyHolder pFile(pDomain->BindExplicitAssembly(hMod, TRUE));
+
+ pDomain->m_pRootAssembly = GetAppDomain()->LoadAssembly(NULL, pFile, FILE_ACTIVE);
+
+ {
+ GCX_COOP();
+
+ // Reuse the evidence that was generated for the PEFile for the assembly so we don't have to
+ // regenerate evidence of the same type again if it is requested later.
+ pDomain->m_pRootAssembly->GetSecurityDescriptor()->SetEvidenceFromPEFile(pSecDesc);
+ }
+
+ // If the AppDomainManager for the default domain was specified in the application config file then
+ // we require that the assembly be trusted in order to set the manager
+ if (pDomain->HasAppDomainManagerInfo() && pDomain->AppDomainManagerSetFromConfig())
+ {
+ Assembly *pEntryAssembly = pDomain->GetAppDomainManagerEntryAssembly();
+ if (!pEntryAssembly->GetSecurityDescriptor()->AllowApplicationSpecifiedAppDomainManager())
+ {
+ COMPlusThrow(kTypeLoadException, IDS_E_UNTRUSTED_APPDOMAIN_MANAGER);
+ }
+ }
+
+ if (CorCommandLine::m_pwszAppFullName == NULL) {
+ StackSString friendlyName;
+ StackSString assemblyPath = pFile->GetPath();
+ SString::Iterator i = assemblyPath.End();
+
+ if (PEAssembly::FindLastPathSeparator(assemblyPath, i)) {
+ i++;
+ friendlyName.Set(assemblyPath, i, assemblyPath.End());
+ }
+ else
+ friendlyName.Set(assemblyPath);
+
+ pDomain->SetFriendlyName(friendlyName, TRUE);
+ }
+ }
+ __dcimf.Pop();
+
+ {
+ GCX_PREEMP();
+
+ LOG((LF_CLASSLOADER | LF_CORDB,
+ LL_INFO10,
+ "Created domain for an executable at %p\n",
+ (pDomain->m_pRootAssembly ? pDomain->m_pRootAssembly->Parent() : NULL)));
+ TESTHOOKCALL(RuntimeStarted(RTS_CALLINGENTRYPOINT));
+
+#ifdef FEATURE_MULTICOREJIT
+ pDomain->GetMulticoreJitManager().AutoStartProfile(pDomain);
+#endif
+
+ pDomain->m_pRootAssembly->ExecuteMainMethod(NULL);
+ }
+
+ pThread->ReturnToContext(&frame);
+
+#ifdef FEATURE_TESTHOOKS
+ TESTHOOKCALL(LeftAppDomain(DefaultADID));
+#endif
+}
+#endif //!FEATURE_CORECLR
+
+#ifdef FEATURE_CLICKONCE
+void SystemDomain::ActivateApplication(int *pReturnValue)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ struct _gc {
+ OBJECTREF orThis;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCX_COOP();
+ GCPROTECT_BEGIN(gc);
+
+ gc.orThis = SystemDomain::System()->DefaultDomain()->GetExposedObject();
+
+ MethodDescCallSite activateApp(METHOD__APP_DOMAIN__ACTIVATE_APPLICATION, &gc.orThis);
+
+ ARG_SLOT args[] = {
+ ObjToArgSlot(gc.orThis),
+ };
+ int retval = activateApp.Call_RetI4(args);
+ if (pReturnValue)
+ *pReturnValue = retval;
+
+ GCPROTECT_END();
+}
+#endif // FEATURE_CLICKONCE
+
+#ifdef FEATURE_MIXEDMODE
+static HRESULT RunDllMainHelper(HINSTANCE hInst, DWORD dwReason, LPVOID lpReserved, Thread* pThread, bool bReenablePreemptive)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_FAULT;
+
+ MethodDesc *pMD;
+ AppDomain *pDomain;
+ Module *pModule;
+ HRESULT hr = S_FALSE; // Assume no entry point.
+
+ // Setup the thread state to cooperative to run managed code.
+
+ // Get the old domain from the thread. Legacy dll entry points must always
+ // be run from the default domain.
+ //
+ // We cannot support legacy dlls getting loaded into all domains!!
+ EX_TRY
+ {
+ ENTER_DOMAIN_PTR(SystemDomain::System()->DefaultDomain(),ADV_DEFAULTAD)
+ {
+ pDomain = pThread->GetDomain();
+
+ // The module needs to be in the current list if you are coming here.
+ pModule = pDomain->GetIJWModule(hInst);
+ if (!pModule)
+ goto ErrExit;
+
+ // See if there even is an entry point.
+ pMD = pModule->GetDllEntryPoint();
+ if (!pMD)
+ goto ErrExit;
+
+ // We're actually going to run some managed code. There may be a customer
+ // debug probe enabled, that prevents execution in the loader lock.
+ CanRunManagedCode(hInst);
+
+ {
+ // Enter cooperative mode
+ GCX_COOP_NO_DTOR();
+ }
+
+ // Run through the helper which will do exception handling for us.
+ hr = ::RunDllMain(pMD, hInst, dwReason, lpReserved);
+
+ {
+ // Update thread state for the case where we are returning to unmanaged code.
+ GCX_MAYBE_PREEMP_NO_DTOR(bReenablePreemptive);
+ }
+
+ErrExit: ;
+ // does not throw exception
+ }
+ END_DOMAIN_TRANSITION;
+
+ }
+ EX_CATCH
+ {
+ hr = GetExceptionHResult(GET_THROWABLE());
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ return (hr);
+}
+
+//*****************************************************************************
+// This guy will set up the proper thread state, look for the module given
+// the hinstance, and then run the entry point if there is one.
+//*****************************************************************************
+HRESULT SystemDomain::RunDllMain(HINSTANCE hInst, DWORD dwReason, LPVOID lpReserved)
+{
+
+ CONTRACTL
+ {
+ NOTHROW;
+ if (GetThread() && !lpReserved) {MODE_PREEMPTIVE;} else {DISABLED(MODE_PREEMPTIVE);};
+ if(GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);};
+ }
+ CONTRACTL_END;
+
+
+ Thread *pThread = NULL;
+ BOOL fEnterCoop = FALSE;
+ HRESULT hr = S_FALSE; // Assume no entry point.
+
+ pThread = GetThread();
+ if ((!pThread && (dwReason == DLL_PROCESS_DETACH || dwReason == DLL_THREAD_DETACH)) ||
+ g_fEEShutDown)
+ return S_OK;
+
+ // ExitProcess is called while a thread is doing GC.
+ if (dwReason == DLL_PROCESS_DETACH && GCHeap::IsGCInProgress())
+ return S_OK;
+
+ // ExitProcess is called on a thread that we don't know about
+ if (dwReason == DLL_PROCESS_DETACH && GetThread() == NULL)
+ return S_OK;
+
+ // Need to setup the thread since this might be the first time the EE has
+ // seen it if the thread was created in unmanaged code and this is a thread
+ // attach event.
+ if (pThread)
+ fEnterCoop = pThread->PreemptiveGCDisabled();
+ else {
+ pThread = SetupThreadNoThrow(&hr);
+ if (pThread == NULL)
+ return hr;
+ }
+
+ return RunDllMainHelper(hInst, dwReason, lpReserved, pThread, !fEnterCoop);
+}
+#endif // FEATURE_MIXEDMODE
+
+#endif // CROSSGEN_COMPILE
+
+
+
+// Helper function to load an assembly. This is called from LoadCOMClass.
+/* static */
+
+Assembly *AppDomain::LoadAssemblyHelper(LPCWSTR wszAssembly,
+ LPCWSTR wszCodeBase)
+{
+ CONTRACT(Assembly *)
+ {
+ THROWS;
+ POSTCONDITION(CheckPointer(RETVAL));
+ PRECONDITION(wszAssembly || wszCodeBase);
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ AssemblySpec spec;
+ if(wszAssembly) {
+ #define MAKE_TRANSLATIONFAILED { ThrowOutOfMemory(); }
+ MAKE_UTF8PTR_FROMWIDE(szAssembly,wszAssembly);
+ #undef MAKE_TRANSLATIONFAILED
+
+ IfFailThrow(spec.Init(szAssembly));
+ }
+
+ if (wszCodeBase) {
+ spec.SetCodeBase(wszCodeBase);
+ }
+ RETURN spec.LoadAssembly(FILE_LOADED);
+}
+
+#if defined(FEATURE_CLASSIC_COMINTEROP) && !defined(CROSSGEN_COMPILE)
+
+#ifdef FEATURE_CORECLR
+MethodTable *AppDomain::LoadCOMClass(GUID clsid,
+ BOOL bLoadRecord/*=FALSE*/,
+ BOOL* pfAssemblyInReg/*=NULL*/)
+{
+ // @CORESYSTODO: what to do here?
+ return NULL;
+}
+#else // FEATURE_CORECLR
+
+static BOOL IsSameRuntimeVersion(ICLRRuntimeInfo *pInfo1, ICLRRuntimeInfo *pInfo2)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ WCHAR wszVersion1[_MAX_PATH];
+ WCHAR wszVersion2[_MAX_PATH];
+ DWORD cchVersion;
+
+ cchVersion = COUNTOF(wszVersion1);
+ IfFailThrow(pInfo1->GetVersionString(wszVersion1, &cchVersion));
+
+ cchVersion = COUNTOF(wszVersion2);
+ IfFailThrow(pInfo2->GetVersionString(wszVersion2, &cchVersion));
+
+ return SString::_wcsicmp(wszVersion1, wszVersion2) == 0;
+}
+
+MethodTable *AppDomain::LoadCOMClass(GUID clsid,
+ BOOL bLoadRecord/*=FALSE*/,
+ BOOL* pfAssemblyInReg/*=NULL*/)
+{
+ CONTRACT (MethodTable*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+
+ MethodTable* pMT = NULL;
+
+ NewArrayHolder<WCHAR> wszClassName = NULL;
+ NewArrayHolder<WCHAR> wszAssemblyString = NULL;
+ NewArrayHolder<WCHAR> wszCodeBaseString = NULL;
+
+ DWORD cbAssembly = 0;
+ DWORD cbCodeBase = 0;
+ Assembly *pAssembly = NULL;
+ BOOL fFromRegistry = FALSE;
+ BOOL fRegFreePIA = FALSE;
+
+ HRESULT hr = S_OK;
+
+ if (pfAssemblyInReg != NULL)
+ *pfAssemblyInReg = FALSE;
+
+ // with sxs.dll help
+ hr = FindShimInfoFromWin32(clsid, bLoadRecord, NULL, NULL, &wszClassName, &wszAssemblyString, &fRegFreePIA);
+
+ if(FAILED(hr))
+ {
+ hr = FindShimInfoFromRegistry(clsid, bLoadRecord, VER_ASSEMBLYMAJORVERSION, VER_ASSEMBLYMINORVERSION,
+ &wszClassName, &wszAssemblyString, &wszCodeBaseString);
+ if (FAILED(hr))
+ RETURN NULL;
+
+ fFromRegistry = TRUE;
+ }
+
+ // Skip the GetRuntimeForManagedCOMObject check for value types since they cannot be activated and are
+ // always used for wrapping existing instances coming from COM.
+ if (!bLoadRecord)
+ {
+ // We will load the assembly only if it is a PIA or if unmanaged activation would load the currently running
+ // runtime. Otherwise we return NULL which will result in using the default System.__ComObject type.
+
+ // the type is a PIA type if mscoree.dll is not its inproc server dll or it was specified as <clrSurrogate> in the manifest
+ BOOL fPIA = (fFromRegistry ? !Clr::Util::Com::CLSIDHasMscoreeAsInprocServer32(clsid) : fRegFreePIA);
+ if (!fPIA)
+ {
+ // this isn't a PIA, so we must determine which runtime it would load
+ ReleaseHolder<ICLRRuntimeHostInternal> pRuntimeHostInternal;
+ IfFailThrow(g_pCLRRuntime->GetInterface(CLSID_CLRRuntimeHostInternal,
+ IID_ICLRRuntimeHostInternal,
+ &pRuntimeHostInternal));
+
+ // we call the shim to see which runtime would this be activated in
+ ReleaseHolder<ICLRRuntimeInfo> pRuntimeInfo;
+ if (FAILED(pRuntimeHostInternal->GetRuntimeForManagedCOMObject(clsid, IID_ICLRRuntimeInfo, &pRuntimeInfo)))
+ {
+ // the requested runtime is not loadable - don't load the assembly
+ RETURN NULL;
+ }
+
+ if (!IsSameRuntimeVersion(g_pCLRRuntime, pRuntimeInfo))
+ {
+ // the requested runtime is different from this runtime - don't load the assembly
+ RETURN NULL;
+ }
+ }
+ }
+
+ if (pfAssemblyInReg != NULL)
+ *pfAssemblyInReg = TRUE;
+
+ if (wszAssemblyString != NULL) {
+ pAssembly = LoadAssemblyHelper(wszAssemblyString, wszCodeBaseString);
+ pMT = TypeName::GetTypeFromAssembly(wszClassName, pAssembly).GetMethodTable();
+ if (!pMT)
+ goto ErrExit;
+ }
+
+ if (pMT == NULL) {
+ ErrExit:
+ // Convert the GUID to its string representation.
+ WCHAR szClsid[64];
+ if (GuidToLPWSTR(clsid, szClsid, NumItems(szClsid)) == 0)
+ szClsid[0] = 0;
+
+ // Throw an exception indicating we failed to load the type with
+ // the requested CLSID.
+ COMPlusThrow(kTypeLoadException, IDS_CLASSLOAD_NOCLSIDREG, szClsid);
+ }
+
+ RETURN pMT;
+}
+
+#endif // FEATURE_CORECLR
+
+#endif // FEATURE_CLASSIC_COMINTEROP && !CROSSGEN_COMPILE
+
+
+/*static*/
+bool SystemDomain::IsReflectionInvocationMethod(MethodDesc* pMeth)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodTable* pCaller = pMeth->GetMethodTable();
+
+ // All Reflection Invocation methods are defined in mscorlib.dll
+ if (!pCaller->GetModule()->IsSystem())
+ return false;
+
+ /* List of types that should be skipped to identify true caller */
+ static const BinderClassID reflectionInvocationTypes[] = {
+ CLASS__METHOD,
+ CLASS__METHOD_BASE,
+ CLASS__METHOD_INFO,
+ CLASS__CONSTRUCTOR,
+ CLASS__CONSTRUCTOR_INFO,
+ CLASS__CLASS,
+ CLASS__TYPE_HANDLE,
+ CLASS__METHOD_HANDLE,
+ CLASS__FIELD_HANDLE,
+ CLASS__TYPE,
+ CLASS__FIELD,
+ CLASS__RT_FIELD_INFO,
+ CLASS__FIELD_INFO,
+ CLASS__EVENT,
+ CLASS__EVENT_INFO,
+ CLASS__PROPERTY,
+ CLASS__PROPERTY_INFO,
+ CLASS__ACTIVATOR,
+ CLASS__ARRAY,
+ CLASS__ASSEMBLYBASE,
+ CLASS__ASSEMBLY,
+ CLASS__TYPE_DELEGATOR,
+ CLASS__RUNTIME_HELPERS,
+#if defined(FEATURE_COMINTEROP) && !defined(FEATURE_CORECLR)
+ CLASS__ITYPE,
+ CLASS__IASSEMBLY,
+ CLASS__IMETHODBASE,
+ CLASS__IMETHODINFO,
+ CLASS__ICONSTRUCTORINFO,
+ CLASS__IFIELDINFO,
+ CLASS__IPROPERTYINFO,
+ CLASS__IEVENTINFO,
+ CLASS__IAPPDOMAIN,
+#endif // FEATURE_COMINTEROP && !FEATURE_CORECLR
+ CLASS__LAZY_INITIALIZER,
+ CLASS__DYNAMICMETHOD,
+ CLASS__DELEGATE,
+ CLASS__MULTICAST_DELEGATE
+ };
+
+ static const BinderClassID genericReflectionInvocationTypes[] = {
+ CLASS__LAZY_HELPERS,
+ CLASS__LAZY
+ };
+
+ static mdTypeDef genericReflectionInvocationTypeDefs[NumItems(genericReflectionInvocationTypes)];
+
+ static bool fInited = false;
+
+ if (!VolatileLoad(&fInited))
+ {
+ // Make sure all types are loaded so that we can use faster GetExistingClass()
+ for (unsigned i = 0; i < NumItems(reflectionInvocationTypes); i++)
+ {
+ MscorlibBinder::GetClass(reflectionInvocationTypes[i]);
+ }
+
+ // Make sure all types are loaded so that we can use faster GetExistingClass()
+ for (unsigned i = 0; i < NumItems(genericReflectionInvocationTypes); i++)
+ {
+ genericReflectionInvocationTypeDefs[i] = MscorlibBinder::GetClass(genericReflectionInvocationTypes[i])->GetCl();
+ }
+
+ MscorlibBinder::GetClass(CLASS__APP_DOMAIN);
+
+ VolatileStore(&fInited, true);
+ }
+
+ if (pCaller->HasInstantiation())
+ {
+ // For generic types, pCaller will be an instantiated type and never equal to the type definition.
+ // So we compare their TypeDef tokens instead.
+ for (unsigned i = 0; i < NumItems(genericReflectionInvocationTypeDefs); i++)
+ {
+ if (pCaller->GetCl() == genericReflectionInvocationTypeDefs[i])
+ return true;
+ }
+ }
+ else
+ {
+ for (unsigned i = 0; i < NumItems(reflectionInvocationTypes); i++)
+ {
+ if (MscorlibBinder::GetExistingClass(reflectionInvocationTypes[i]) == pCaller)
+ return true;
+ }
+
+ // AppDomain is an example of a type that is both used in the implementation of
+ // reflection, and also a type that contains methods that are clients of reflection
+ // (i.e., they instigate their own CreateInstance). Skip all AppDomain frames that
+ // are NOT known clients of reflection. NOTE: The ever-increasing complexity of this
+ // exclusion list is a sign that we need a better way--this is error-prone and
+ // unmaintainable as more changes are made to BCL types.
+ if ((pCaller == MscorlibBinder::GetExistingClass(CLASS__APP_DOMAIN))
+ && (pMeth != MscorlibBinder::GetMethod(METHOD__APP_DOMAIN__CREATE_APP_DOMAIN_MANAGER)) // This uses reflection to create an AppDomainManager
+ #ifdef FEATURE_CLICKONCE
+ && (pMeth != MscorlibBinder::GetMethod(METHOD__APP_DOMAIN__ACTIVATE_APPLICATION)) // This uses reflection to create an ActivationContext
+ #endif
+ )
+ {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+#ifndef CROSSGEN_COMPILE
+struct CallersDataWithStackMark
+{
+ StackCrawlMark* stackMark;
+ BOOL foundMe;
+#ifdef FEATURE_REMOTING
+ BOOL skippingRemoting;
+#endif
+ MethodDesc* pFoundMethod;
+ MethodDesc* pPrevMethod;
+ AppDomain* pAppDomain;
+};
+
+/*static*/
+MethodDesc* SystemDomain::GetCallersMethod(StackCrawlMark* stackMark,
+ AppDomain **ppAppDomain/*=NULL*/)
+
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ GCX_COOP();
+
+ CallersDataWithStackMark cdata;
+ ZeroMemory(&cdata, sizeof(CallersDataWithStackMark));
+ cdata.stackMark = stackMark;
+
+ GetThread()->StackWalkFrames(CallersMethodCallbackWithStackMark, &cdata, FUNCTIONSONLY | LIGHTUNWIND);
+
+ if(cdata.pFoundMethod) {
+ if (ppAppDomain)
+ *ppAppDomain = cdata.pAppDomain;
+ return cdata.pFoundMethod;
+ } else
+ return NULL;
+}
+
+/*static*/
+MethodTable* SystemDomain::GetCallersType(StackCrawlMark* stackMark,
+ AppDomain **ppAppDomain/*=NULL*/)
+
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ CallersDataWithStackMark cdata;
+ ZeroMemory(&cdata, sizeof(CallersDataWithStackMark));
+ cdata.stackMark = stackMark;
+
+ GetThread()->StackWalkFrames(CallersMethodCallbackWithStackMark, &cdata, FUNCTIONSONLY | LIGHTUNWIND);
+
+ if(cdata.pFoundMethod) {
+ if (ppAppDomain)
+ *ppAppDomain = cdata.pAppDomain;
+ return cdata.pFoundMethod->GetMethodTable();
+ } else
+ return NULL;
+}
+
+/*static*/
+Module* SystemDomain::GetCallersModule(StackCrawlMark* stackMark,
+ AppDomain **ppAppDomain/*=NULL*/)
+
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ GCX_COOP();
+
+ CallersDataWithStackMark cdata;
+ ZeroMemory(&cdata, sizeof(CallersDataWithStackMark));
+ cdata.stackMark = stackMark;
+
+ GetThread()->StackWalkFrames(CallersMethodCallbackWithStackMark, &cdata, FUNCTIONSONLY | LIGHTUNWIND);
+
+ if(cdata.pFoundMethod) {
+ if (ppAppDomain)
+ *ppAppDomain = cdata.pAppDomain;
+ return cdata.pFoundMethod->GetModule();
+ } else
+ return NULL;
+}
+
+struct CallersData
+{
+ int skip;
+ MethodDesc* pMethod;
+};
+
+/*static*/
+Assembly* SystemDomain::GetCallersAssembly(StackCrawlMark *stackMark,
+ AppDomain **ppAppDomain/*=NULL*/)
+{
+ WRAPPER_NO_CONTRACT;
+ Module* mod = GetCallersModule(stackMark, ppAppDomain);
+ if (mod)
+ return mod->GetAssembly();
+ return NULL;
+}
+
+/*static*/
+Module* SystemDomain::GetCallersModule(int skip)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ GCX_COOP();
+
+ CallersData cdata;
+ ZeroMemory(&cdata, sizeof(CallersData));
+ cdata.skip = skip;
+
+ StackWalkFunctions(GetThread(), CallersMethodCallback, &cdata);
+
+ if(cdata.pMethod)
+ return cdata.pMethod->GetModule();
+ else
+ return NULL;
+}
+
+/*private static*/
+StackWalkAction SystemDomain::CallersMethodCallbackWithStackMark(CrawlFrame* pCf, VOID* data)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_INTOLERANT;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+
+ MethodDesc *pFunc = pCf->GetFunction();
+
+ /* We asked to be called back only for functions */
+ _ASSERTE(pFunc);
+
+ CallersDataWithStackMark* pCaller = (CallersDataWithStackMark*) data;
+ if (pCaller->stackMark)
+ {
+ if (!pCf->IsInCalleesFrames(pCaller->stackMark))
+ {
+ // save the current in case it is the one we want
+ pCaller->pPrevMethod = pFunc;
+ pCaller->pAppDomain = pCf->GetAppDomain();
+ return SWA_CONTINUE;
+ }
+
+ // LookForMe stack crawl marks needn't worry about reflection or
+ // remoting frames on the stack. Each frame above (newer than) the
+ // target will be captured by the logic above. Once we transition to
+ // finding the stack mark below the AofRA, we know that we hit the
+ // target last time round and immediately exit with the cached result.
+
+ if (*(pCaller->stackMark) == LookForMe)
+ {
+ pCaller->pFoundMethod = pCaller->pPrevMethod;
+ return SWA_ABORT;
+ }
+ }
+
+ // Skip reflection and remoting frames that could lie between a stack marked
+ // method and its true caller (or that caller and its own caller). These
+ // frames are infrastructure and logically transparent to the stack crawling
+ // algorithm.
+
+ // Skipping remoting frames. We always skip entire client to server spans
+ // (though we see them in the order server then client during a stack crawl
+ // obviously).
+
+ // We spot the server dispatcher end because all calls are dispatched
+ // through a single method: StackBuilderSink._PrivateProcessMessage.
+
+ Frame* frame = pCf->GetFrame();
+ _ASSERTE(pCf->IsFrameless() || frame);
+
+#ifdef FEATURE_REMOTING
+ if (pFunc == MscorlibBinder::GetMethod(METHOD__STACK_BUILDER_SINK__PRIVATE_PROCESS_MESSAGE))
+ {
+ _ASSERTE(!pCaller->skippingRemoting);
+ pCaller->skippingRemoting = true;
+ return SWA_CONTINUE;
+ }
+ // And we spot the client end because there's a transparent proxy transition
+ // frame pushed.
+ if (frame && frame->GetFrameType() == Frame::TYPE_TP_METHOD_FRAME)
+ {
+ pCaller->skippingRemoting = false;
+ return SWA_CONTINUE;
+ }
+
+ // Skip any frames into between the server and client remoting endpoints.
+ if (pCaller->skippingRemoting)
+ return SWA_CONTINUE;
+#endif
+
+
+ // Skipping reflection frames. We don't need to be quite as exhaustive here
+ // as the security or reflection stack walking code since we know this logic
+ // is only invoked for selected methods in mscorlib itself. So we're
+ // reasonably sure we won't have any sensitive methods late bound invoked on
+ // constructors, properties or events. This leaves being invoked via
+ // MethodInfo, Type or Delegate (and depending on which invoke overload is
+ // being used, several different reflection classes may be involved).
+
+ g_IBCLogger.LogMethodDescAccess(pFunc);
+
+ if (SystemDomain::IsReflectionInvocationMethod(pFunc))
+ return SWA_CONTINUE;
+
+ if (frame && frame->GetFrameType() == Frame::TYPE_MULTICAST)
+ {
+ // This must be either a secure delegate frame or a true multicast delegate invocation.
+
+ _ASSERTE(pFunc->GetMethodTable()->IsDelegate());
+
+ DELEGATEREF del = (DELEGATEREF)((SecureDelegateFrame*)frame)->GetThis(); // This can throw.
+
+ if (COMDelegate::IsSecureDelegate(del))
+ {
+ if (del->IsWrapperDelegate())
+ {
+ // On ARM, we use secure delegate infrastructure to preserve R4 register.
+ return SWA_CONTINUE;
+ }
+ // For a secure delegate frame, we should return the delegate creator instead
+ // of the delegate method itself.
+ pFunc = (MethodDesc*) del->GetMethodPtrAux();
+ }
+ else
+ {
+ _ASSERTE(COMDelegate::IsTrueMulticastDelegate(del));
+ return SWA_CONTINUE;
+ }
+ }
+
+ // Return the first non-reflection/remoting frame if no stack mark was
+ // supplied.
+ if (!pCaller->stackMark)
+ {
+ pCaller->pFoundMethod = pFunc;
+ pCaller->pAppDomain = pCf->GetAppDomain();
+ return SWA_ABORT;
+ }
+
+ // If we got here, we must already be in the frame containing the stack mark and we are not looking for "me".
+ _ASSERTE(pCaller->stackMark &&
+ pCf->IsInCalleesFrames(pCaller->stackMark) &&
+ *(pCaller->stackMark) != LookForMe);
+
+ // When looking for caller's caller, we delay returning results for another
+ // round (the way this is structured, we will still be able to skip
+ // reflection and remoting frames between the caller and the caller's
+ // caller).
+
+ if ((*(pCaller->stackMark) == LookForMyCallersCaller) &&
+ (pCaller->pFoundMethod == NULL))
+ {
+ pCaller->pFoundMethod = pFunc;
+ return SWA_CONTINUE;
+ }
+
+#ifndef FEATURE_REMOTING
+ // If remoting is not available, we only set the caller if the crawlframe is from the same domain.
+ // Why? Because if the callerdomain is different from current domain,
+ // there have to be interop/native frames in between.
+ // For example, in the CORECLR, if we find the caller to be in a different domain, then the
+ // call into reflection is due to an unmanaged call into mscorlib. For that
+ // case, the caller really is an INTEROP method.
+ // In general, if the caller is INTEROP, we set the caller/callerdomain to be NULL
+ // (To be precise: they are already NULL and we don't change them).
+ if (pCf->GetAppDomain() == GetAppDomain())
+#endif // FEATURE_REMOTING
+ // We must either be looking for the caller, or the caller's caller when
+ // we've already found the caller (we used a non-null value in pFoundMethod
+ // simply as a flag, the correct method to return in both case is the
+ // current method).
+ {
+ pCaller->pFoundMethod = pFunc;
+ pCaller->pAppDomain = pCf->GetAppDomain();
+ }
+
+ return SWA_ABORT;
+}
+
+/*private static*/
+StackWalkAction SystemDomain::CallersMethodCallback(CrawlFrame* pCf, VOID* data)
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ MethodDesc *pFunc = pCf->GetFunction();
+
+ /* We asked to be called back only for functions */
+ _ASSERTE(pFunc);
+
+ // Ignore intercepted frames
+ if(pFunc->IsInterceptedForDeclSecurity())
+ return SWA_CONTINUE;
+
+ CallersData* pCaller = (CallersData*) data;
+ if(pCaller->skip == 0) {
+ pCaller->pMethod = pFunc;
+ return SWA_ABORT;
+ }
+ else {
+ pCaller->skip--;
+ return SWA_CONTINUE;
+ }
+
+}
+#endif // CROSSGEN_COMPILE
+
+#ifdef CROSSGEN_COMPILE
+// defined in compile.cpp
+extern CompilationDomain * theDomain;
+#endif
+
+void SystemDomain::CreateDefaultDomain()
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef CROSSGEN_COMPILE
+ AppDomainRefHolder pDomain(theDomain);
+#else
+ AppDomainRefHolder pDomain(new AppDomain());
+#endif
+
+ SystemDomain::LockHolder lh;
+ pDomain->Init();
+
+ Security::SetDefaultAppDomainProperty(pDomain->GetSecurityDescriptor());
+
+ // need to make this assignment here since we'll be releasing
+ // the lock before calling AddDomain. So any other thread
+ // grabbing this lock after we release it will find that
+ // the COM Domain has already been created
+ m_pDefaultDomain = pDomain;
+ _ASSERTE (pDomain->GetId().m_dwId == DefaultADID);
+
+ // allocate a Virtual Call Stub Manager for the default domain
+ m_pDefaultDomain->InitVSD();
+
+ pDomain->SetStage(AppDomain::STAGE_OPEN);
+ pDomain.SuppressRelease();
+
+ LOG((LF_CLASSLOADER | LF_CORDB,
+ LL_INFO10,
+ "Created default domain at %p\n", m_pDefaultDomain));
+}
+
+#ifdef DEBUGGING_SUPPORTED
+
+void SystemDomain::PublishAppDomainAndInformDebugger (AppDomain *pDomain)
+{
+ CONTRACTL
+ {
+ if(!g_fEEInit) {THROWS;} else {DISABLED(NOTHROW);};
+ if(!g_fEEInit) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);};
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO100, "SD::PADAID: Adding 0x%x\n", pDomain));
+
+ // Call the publisher API to add this appdomain entry to the list
+ // The publisher will handle failures, so we don't care if this succeeds or fails.
+ if (g_pDebugInterface != NULL)
+ {
+ g_pDebugInterface->AddAppDomainToIPC(pDomain);
+ }
+}
+
+#endif // DEBUGGING_SUPPORTED
+
+void SystemDomain::AddDomain(AppDomain* pDomain)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer((pDomain)));
+ }
+ CONTRACTL_END;
+
+ {
+ LockHolder lh;
+
+ _ASSERTE (pDomain->m_Stage != AppDomain::STAGE_CREATING);
+ if (pDomain->m_Stage == AppDomain::STAGE_READYFORMANAGEDCODE ||
+ pDomain->m_Stage == AppDomain::STAGE_ACTIVE)
+ {
+ pDomain->SetStage(AppDomain::STAGE_OPEN);
+ IncrementNumAppDomains(); // Maintain a count of app domains added to the list.
+ }
+ }
+
+ // Note that if you add another path that can reach here without calling
+ // PublishAppDomainAndInformDebugger, then you should go back & make sure
+ // that PADAID gets called. Right after this call, if not sooner.
+ LOG((LF_CORDB, LL_INFO1000, "SD::AD:Would have added domain here! 0x%x\n",
+ pDomain));
+}
+
+BOOL SystemDomain::RemoveDomain(AppDomain* pDomain)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pDomain));
+ PRECONDITION(!pDomain->IsDefaultDomain());
+ }
+ CONTRACTL_END;
+
+ // You can not remove the default domain.
+
+
+ if (!pDomain->IsActive())
+ return FALSE;
+
+ pDomain->Release();
+
+ return TRUE;
+}
+
+
+#ifdef PROFILING_SUPPORTED
+void SystemDomain::NotifyProfilerStartup()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ _ASSERTE(System());
+ g_profControlBlock.pProfInterface->AppDomainCreationStarted((AppDomainID) System());
+ END_PIN_PROFILER();
+ }
+
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ _ASSERTE(System());
+ g_profControlBlock.pProfInterface->AppDomainCreationFinished((AppDomainID) System(), S_OK);
+ END_PIN_PROFILER();
+ }
+
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ _ASSERTE(System()->DefaultDomain());
+ g_profControlBlock.pProfInterface->AppDomainCreationStarted((AppDomainID) System()->DefaultDomain());
+ END_PIN_PROFILER();
+ }
+
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ _ASSERTE(System()->DefaultDomain());
+ g_profControlBlock.pProfInterface->AppDomainCreationFinished((AppDomainID) System()->DefaultDomain(), S_OK);
+ END_PIN_PROFILER();
+ }
+
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ _ASSERTE(SharedDomain::GetDomain());
+ g_profControlBlock.pProfInterface->AppDomainCreationStarted((AppDomainID) SharedDomain::GetDomain());
+ END_PIN_PROFILER();
+ }
+
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ _ASSERTE(SharedDomain::GetDomain());
+ g_profControlBlock.pProfInterface->AppDomainCreationFinished((AppDomainID) SharedDomain::GetDomain(), S_OK);
+ END_PIN_PROFILER();
+ }
+}
+
+HRESULT SystemDomain::NotifyProfilerShutdown()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ _ASSERTE(System());
+ g_profControlBlock.pProfInterface->AppDomainShutdownStarted((AppDomainID) System());
+ END_PIN_PROFILER();
+ }
+
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ _ASSERTE(System());
+ g_profControlBlock.pProfInterface->AppDomainShutdownFinished((AppDomainID) System(), S_OK);
+ END_PIN_PROFILER();
+ }
+
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ _ASSERTE(System()->DefaultDomain());
+ g_profControlBlock.pProfInterface->AppDomainShutdownStarted((AppDomainID) System()->DefaultDomain());
+ END_PIN_PROFILER();
+ }
+
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ _ASSERTE(System()->DefaultDomain());
+ g_profControlBlock.pProfInterface->AppDomainShutdownFinished((AppDomainID) System()->DefaultDomain(), S_OK);
+ END_PIN_PROFILER();
+ }
+ return (S_OK);
+}
+#endif // PROFILING_SUPPORTED
+
+#ifdef FEATURE_FUSION
+static HRESULT GetVersionPath(HKEY root, __in LPWSTR key, __out LPWSTR* pDevpath, DWORD* pdwDevpath)
+{
+ CONTRACTL
+ {
+ MODE_PREEMPTIVE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END;
+
+ DWORD rtn;
+ RegKeyHolder versionKey;
+ rtn = WszRegOpenKeyEx(root, key, 0, KEY_READ, &versionKey);
+ if(rtn == ERROR_SUCCESS) {
+ DWORD type;
+ DWORD cbDevpath;
+ if(WszRegQueryValueEx(versionKey, W("devpath"), 0, &type, (LPBYTE) NULL, &cbDevpath) == ERROR_SUCCESS && type == REG_SZ) {
+ *pDevpath = (LPWSTR) new (nothrow) BYTE[cbDevpath];
+ if(*pDevpath == NULL)
+ return E_OUTOFMEMORY;
+ else {
+ rtn = WszRegQueryValueEx(versionKey, W("devpath"), 0, &type, (LPBYTE) *pDevpath, &cbDevpath);
+ if ((rtn == ERROR_SUCCESS) && (type == REG_SZ))
+ *pdwDevpath = (DWORD) wcslen(*pDevpath);
+ }
+ }
+ else
+ return REGDB_E_INVALIDVALUE;
+ }
+
+ return HRESULT_FROM_WIN32(rtn);
+}
+
+// Get the developers path from the environment. This can only be set through the environment and
+// cannot be added through configuration files, registry etc. This would make it to easy for
+// developers to deploy apps that are not side by side. The environment variable should only
+// be used on developers machines where exact matching to versions makes build and testing to
+// difficult.
+void SystemDomain::GetDevpathW(__out_ecount_opt(1) LPWSTR* pDevpath, DWORD* pdwDevpath)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+
+ if(g_pConfig->DeveloperInstallation() && m_fDevpath == FALSE) {
+
+ LockHolder lh;
+
+ if(m_fDevpath == FALSE) {
+ DWORD dwPath = 0;
+ dwPath = WszGetEnvironmentVariable(APPENV_DEVPATH, 0, 0);
+ if(dwPath) {
+ m_pwDevpath = (WCHAR*) new WCHAR[dwPath];
+ m_dwDevpath = WszGetEnvironmentVariable(APPENV_DEVPATH,
+ m_pwDevpath,
+ dwPath);
+ }
+ else {
+ RegKeyHolder userKey;
+ RegKeyHolder machineKey;
+
+ WCHAR pVersion[MAX_PATH];
+ DWORD dwVersion = MAX_PATH;
+ HRESULT hr = S_OK;
+ hr = FusionBind::GetVersion(pVersion, &dwVersion);
+ if(SUCCEEDED(hr)) {
+ LONG rslt;
+ rslt = WszRegOpenKeyEx(HKEY_CURRENT_USER, FRAMEWORK_REGISTRY_KEY_W,0,KEY_READ, &userKey);
+ hr = HRESULT_FROM_WIN32(rslt);
+ if (SUCCEEDED(hr)) {
+ hr = GetVersionPath(userKey, pVersion, &m_pwDevpath, &m_dwDevpath);
+ }
+
+ if (FAILED(hr) && WszRegOpenKeyEx(HKEY_LOCAL_MACHINE, FRAMEWORK_REGISTRY_KEY_W,0,KEY_READ, &machineKey) == ERROR_SUCCESS) {
+ hr = GetVersionPath(machineKey, pVersion, &m_pwDevpath, &m_dwDevpath);
+ }
+ }
+ if (Assembly::FileNotFound(hr))
+ hr = S_FALSE;
+ else
+ IfFailThrow(hr);
+ }
+
+ m_fDevpath = TRUE;
+ }
+ // lh out of scope here
+ }
+
+ if(pDevpath) *pDevpath = m_pwDevpath;
+ if(pdwDevpath) *pdwDevpath = m_dwDevpath;
+ return;
+}
+#endif // FEATURE_FUSION
+
+#ifdef _DEBUG
+struct AppDomain::ThreadTrackInfo {
+ Thread *pThread;
+ CDynArray<Frame *> frameStack;
+};
+#endif // _DEBUG
+
+AppDomain::AppDomain()
+{
+ // initialize fields so the appdomain can be safely destructed
+ // shouldn't call anything that can fail here - use ::Init instead
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ m_cRef=1;
+ m_pNextInDelayedUnloadList = NULL;
+ m_pSecContext = NULL;
+ m_fRudeUnload = FALSE;
+ m_pUnloadRequestThread = NULL;
+ m_ADUnloadSink=NULL;
+
+#ifndef FEATURE_CORECLR
+ m_bUseOsSorting = RunningOnWin8();
+ m_sortVersion = DEFAULT_SORT_VERSION;
+ m_pCustomSortLibrary = NULL;
+#if _DEBUG
+ m_bSortingInitialized = FALSE;
+#endif // _DEBUG
+ m_pNlsHashProvider = NULL;
+#endif //!FEATURE_CORECLR
+
+ // Initialize Shared state. Assemblies are loaded
+ // into each domain by default.
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ m_SharePolicy = SHARE_POLICY_UNSPECIFIED;
+#endif
+
+ m_pRootAssembly = NULL;
+
+ m_pwDynamicDir = NULL;
+
+ m_dwFlags = 0;
+ m_pSecDesc = NULL;
+ m_pDefaultContext = NULL;
+#ifdef FEATURE_COMINTEROP
+ m_pComCallWrapperCache = NULL;
+ m_pRCWCache = NULL;
+ m_pRCWRefCache = NULL;
+ m_pLicenseInteropHelperMT = NULL;
+ m_COMorRemotingFlag = COMorRemoting_NotInitialized;
+ memset(m_rpCLRTypes, 0, sizeof(m_rpCLRTypes));
+ m_pSystemDll = nullptr;
+ m_pSystemRuntimeWindowsRuntimeDll = nullptr;
+ m_pSystemRuntimeWindowsRuntimeUIXamlDll = nullptr;
+ m_pSystemNumericsVectors = nullptr;
+#endif // FEATURE_COMINTEROP
+
+ m_pUMEntryThunkCache = NULL;
+
+ m_pAsyncPool = NULL;
+ m_hHandleTableBucket = NULL;
+
+ m_ExposedObject = NULL;
+ m_pComIPForExposedObject = NULL;
+
+ #ifdef _DEBUG
+ m_pThreadTrackInfoList = NULL;
+ m_TrackSpinLock = 0;
+ m_Assemblies.Debug_SetAppDomain(this);
+#endif // _DEBUG
+
+ m_dwThreadEnterCount = 0;
+ m_dwThreadsStillInAppDomain = (ULONG)-1;
+
+ m_pSecDesc = NULL;
+ m_hHandleTableBucket=NULL;
+
+ m_ExposedObject = NULL;
+
+#ifdef FEATURE_COMINTEROP
+ m_pRefDispIDCache = NULL;
+ m_hndMissing = NULL;
+#endif
+
+ m_pRefClassFactHash = NULL;
+ m_anonymouslyHostedDynamicMethodsAssembly = NULL;
+
+ m_ReversePInvokeCanEnter=TRUE;
+ m_ForceTrivialWaitOperations = false;
+ m_Stage=STAGE_CREATING;
+
+ m_bForceGCOnUnload=FALSE;
+ m_bUnloadingFromUnloadEvent=FALSE;
+#ifdef _DEBUG
+ m_dwIterHolders=0;
+ m_dwRefTakers=0;
+ m_dwCreationHolders=0;
+#endif
+
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+ m_ullTotalProcessorUsage = 0;
+ m_pullAllocBytes = NULL;
+ m_pullSurvivedBytes = NULL;
+#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
+
+#ifdef FEATURE_TYPEEQUIVALENCE
+ m_pTypeEquivalenceTable = NULL;
+#endif // FEATURE_TYPEEQUIVALENCE
+
+#ifdef FEATURE_COMINTEROP
+#ifdef FEATURE_REFLECTION_ONLY_LOAD
+ m_pReflectionOnlyWinRtBinder = NULL;
+ m_pReflectionOnlyWinRtTypeCache = NULL;
+#endif // FEATURE_REFLECTION_ONLY_LOAD
+ m_pNameToTypeMap = NULL;
+ m_vNameToTypeMapVersion = 0;
+ m_nEpoch = 0;
+ m_pWinRTFactoryCache = NULL;
+#endif // FEATURE_COMINTEROP
+
+ m_fAppDomainManagerSetInConfig = FALSE;
+ m_dwAppDomainManagerInitializeDomainFlags = eInitializeNewDomainFlags_None;
+
+#ifdef FEATURE_PREJIT
+ m_pDomainFileWithNativeImageList = NULL;
+#endif
+
+#if defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+ m_fIsBindingModelLocked.Store(FALSE);
+#endif // defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+
+} // AppDomain::AppDomain
+
+AppDomain::~AppDomain()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef CROSSGEN_COMPILE
+
+ _ASSERTE(m_dwCreationHolders == 0);
+
+ // release the TPIndex. note that since TPIndex values are recycled the TPIndex
+ // can only be released once all threads in the AppDomain have exited.
+ if (GetTPIndex().m_dwIndex != 0)
+ PerAppDomainTPCountList::ResetAppDomainIndex(GetTPIndex());
+
+ if (m_dwId.m_dwId!=0)
+ SystemDomain::ReleaseAppDomainId(m_dwId);
+
+ m_AssemblyCache.Clear();
+
+ if (m_ADUnloadSink)
+ m_ADUnloadSink->Release();
+
+ if (m_pSecContext)
+ delete m_pSecContext;
+
+ if(!g_fEEInit)
+ Terminate();
+
+#ifndef FEATURE_CORECLR
+ if (m_pCustomSortLibrary)
+ delete m_pCustomSortLibrary;
+
+ if (m_pNlsHashProvider)
+ delete m_pNlsHashProvider;
+#endif
+
+
+#ifdef FEATURE_REMOTING
+ if (!g_fEEInit)
+ {
+ GCX_COOP(); // See SystemDomain::EnumAllStaticGCRefs if you are removing this
+ CrossDomainTypeMap::FlushStaleEntries();
+ CrossDomainFieldMap::FlushStaleEntries();
+ }
+#endif // FEATURE_REMOTING
+
+#ifdef FEATURE_COMINTEROP
+#ifdef FEATURE_REFLECTION_ONLY_LOAD
+ if (m_pReflectionOnlyWinRtBinder != NULL)
+ {
+ m_pReflectionOnlyWinRtBinder->Release();
+ }
+ if (m_pReflectionOnlyWinRtTypeCache != NULL)
+ {
+ m_pReflectionOnlyWinRtTypeCache->Release();
+ }
+#endif // FEATURE_REFLECTION_ONLY_LOAD
+ if (m_pNameToTypeMap != nullptr)
+ {
+ delete m_pNameToTypeMap;
+ m_pNameToTypeMap = nullptr;
+ }
+ if (m_pWinRTFactoryCache != nullptr)
+ {
+ delete m_pWinRTFactoryCache;
+ m_pWinRTFactoryCache = nullptr;
+ }
+#endif //FEATURE_COMINTEROP
+
+#ifdef _DEBUG
+ // If we were tracking thread AD transitions, cleanup the list on shutdown
+ if (m_pThreadTrackInfoList)
+ {
+ while (m_pThreadTrackInfoList->Count() > 0)
+ {
+ // Get the very last element
+ ThreadTrackInfo *pElem = *(m_pThreadTrackInfoList->Get(m_pThreadTrackInfoList->Count() - 1));
+ _ASSERTE(pElem);
+
+ // Free the memory
+ delete pElem;
+
+ // Remove pointer entry from the list
+ m_pThreadTrackInfoList->Delete(m_pThreadTrackInfoList->Count() - 1);
+ }
+
+ // Now delete the list itself
+ delete m_pThreadTrackInfoList;
+ m_pThreadTrackInfoList = NULL;
+ }
+#endif // _DEBUG
+
+#endif // CROSSGEN_COMPILE
+}
+
+//*****************************************************************************
+//*****************************************************************************
+//*****************************************************************************
+#ifdef _DEBUG
+#include "handletablepriv.h"
+#endif
+
+
+
+void AppDomain::Init()
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(SystemDomain::IsUnderDomainLock());
+ }
+ CONTRACTL_END;
+
+ m_pDelayedLoaderAllocatorUnloadList = NULL;
+
+ SetStage( STAGE_CREATING);
+
+
+#ifdef FEATURE_HOSTED_BINDER
+ // The lock is taken also during stack walking (GC or profiler)
+ // - To prevent deadlock with GC thread, we cannot trigger GC while holding the lock
+ // - To prevent deadlock with profiler thread, we cannot allow thread suspension
+ m_crstHostAssemblyMap.Init(
+ CrstHostAssemblyMap,
+ (CrstFlags)(CRST_GC_NOTRIGGER_WHEN_TAKEN
+ | CRST_DEBUGGER_THREAD
+ INDEBUG(| CRST_DEBUG_ONLY_CHECK_FORBID_SUSPEND_THREAD)));
+ m_crstHostAssemblyMapAdd.Init(CrstHostAssemblyMapAdd);
+#endif //FEATURE_HOSTED_BINDER
+
+ m_dwId = SystemDomain::GetNewAppDomainId(this);
+
+ m_LoaderAllocator.Init(this);
+
+#ifndef CROSSGEN_COMPILE
+ //Allocate the threadpool entry before the appdomin id list. Otherwise,
+ //the thread pool list will be out of sync if insertion of id in
+ //the appdomain fails.
+ m_tpIndex = PerAppDomainTPCountList::AddNewTPIndex();
+#endif // CROSSGEN_COMPILE
+
+ m_dwIndex = SystemDomain::GetNewAppDomainIndex(this);
+
+#ifndef CROSSGEN_COMPILE
+ PerAppDomainTPCountList::SetAppDomainId(m_tpIndex, m_dwId);
+
+ m_ADUnloadSink=new ADUnloadSink();
+#endif
+
+ BaseDomain::Init();
+
+ // Set up the IL stub cache
+ m_ILStubCache.Init(GetLoaderAllocator()->GetHighFrequencyHeap());
+
+ m_pSecContext = new SecurityContext (GetLowFrequencyHeap());
+
+// Set up the binding caches
+ m_AssemblyCache.Init(&m_DomainCacheCrst, GetHighFrequencyHeap());
+ m_UnmanagedCache.InitializeTable(this, &m_DomainCacheCrst);
+
+ m_MemoryPressure = 0;
+
+ m_sDomainLocalBlock.Init(this);
+
+#ifndef CROSSGEN_COMPILE
+
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+ // NOTE: it's important that we initialize ARM data structures before calling
+ // Ref_CreateHandleTableBucket, this is because AD::Init() can race with GC
+ // and once we add ourselves to the handle table map the GC can start walking
+ // our handles and calling AD::RecordSurvivedBytes() which touches ARM data.
+ if (GCHeap::IsServerHeap())
+ m_dwNumHeaps = CPUGroupInfo::CanEnableGCCPUGroups() ?
+ CPUGroupInfo::GetNumActiveProcessors() :
+ GetCurrentProcessCpuCount();
+ else
+ m_dwNumHeaps = 1;
+ m_pullAllocBytes = new ULONGLONG [m_dwNumHeaps * ARM_CACHE_LINE_SIZE_ULL];
+ m_pullSurvivedBytes = new ULONGLONG [m_dwNumHeaps * ARM_CACHE_LINE_SIZE_ULL];
+ for (DWORD i = 0; i < m_dwNumHeaps; i++)
+ {
+ m_pullAllocBytes[i * ARM_CACHE_LINE_SIZE_ULL] = 0;
+ m_pullSurvivedBytes[i * ARM_CACHE_LINE_SIZE_ULL] = 0;
+ }
+ m_ullLastEtwAllocBytes = 0;
+#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
+
+ // Default domain reuses the handletablemap that was created during EEStartup since
+ // default domain cannot be unloaded.
+ if (GetId().m_dwId == DefaultADID)
+ {
+ m_hHandleTableBucket = g_HandleTableMap.pBuckets[0];
+ }
+ else
+ {
+ m_hHandleTableBucket = Ref_CreateHandleTableBucket(m_dwIndex);
+ }
+
+#ifdef _DEBUG
+ if (((HandleTable *)(m_hHandleTableBucket->pTable[0]))->uADIndex != m_dwIndex)
+ _ASSERTE (!"AD index mismatch");
+#endif // _DEBUG
+
+#endif // CROSSGEN_COMPILE
+
+#ifdef FEATURE_TYPEEQUIVALENCE
+ m_TypeEquivalenceCrst.Init(CrstTypeEquivalenceMap);
+#endif
+
+ m_ReflectionCrst.Init(CrstReflection, CRST_UNSAFE_ANYMODE);
+ m_RefClassFactCrst.Init(CrstClassFactInfoHash);
+
+ {
+ LockOwner lock = {&m_DomainCrst, IsOwnerOfCrst};
+ m_clsidHash.Init(0,&CompareCLSID,true, &lock); // init hash table
+ }
+
+ CreateSecurityDescriptor();
+ SetStage(STAGE_READYFORMANAGEDCODE);
+
+#ifndef CROSSGEN_COMPILE
+ m_pDefaultContext = new Context(this);
+
+ m_ExposedObject = CreateHandle(NULL);
+
+ // Create the Application Security Descriptor
+
+ COUNTER_ONLY(GetPerfCounters().m_Loading.cAppDomains++);
+
+#ifdef FEATURE_COMINTEROP
+ if (!AppX::IsAppXProcess())
+ {
+#ifdef FEATURE_REFLECTION_ONLY_LOAD
+ m_pReflectionOnlyWinRtTypeCache = clr::SafeAddRef(new CLRPrivTypeCacheReflectionOnlyWinRT());
+ m_pReflectionOnlyWinRtBinder = clr::SafeAddRef(new CLRPrivBinderReflectionOnlyWinRT(m_pReflectionOnlyWinRtTypeCache));
+#endif
+ }
+#ifdef FEATURE_APPX_BINDER
+ else if (g_fEEStarted && !IsDefaultDomain())
+ { // Non-default domain in an AppX process. This exists only for designers and we'd better be in dev mode.
+ _ASSERTE(IsCompilationProcess() || AppX::IsAppXDesignMode());
+
+ // Inherit AppX binder from default domain.
+ SetLoadContextHostBinder(SystemDomain::System()->DefaultDomain()->GetLoadContextHostBinder());
+
+ // Note: LoadFrom, LoadFile, Load(byte[], ...), ReflectionOnlyLoad, LoadWithPartialName,
+ /// etc. are not supported and are actively blocked.
+ }
+#endif //FEATURE_APPX_BINDER
+#endif //FEATURE_COMINTEROP
+
+#endif // CROSSGEN_COMPILE
+} // AppDomain::Init
+
+
+/*********************************************************************/
+
+BOOL AppDomain::IsCompilationDomain()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ BOOL isCompilationDomain = (m_dwFlags & COMPILATION_DOMAIN) != 0;
+#ifdef FEATURE_PREJIT
+ _ASSERTE(!isCompilationDomain ||
+ (IsCompilationProcess() && IsPassiveDomain()));
+#endif // FEATURE_PREJIT
+ return isCompilationDomain;
+}
+
+#ifndef CROSSGEN_COMPILE
+
+extern int g_fADUnloadWorkerOK;
+
+// Notes:
+// This helper will send the AppDomain creation notifications for profiler / debugger.
+// If it throws, its backout code will also send a notification.
+// If it succeeds, then we still need to send a AppDomainCreateFinished notification.
+void AppDomain::CreateUnmanagedObject(AppDomainCreationHolder<AppDomain>& pDomain)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+
+ pDomain.Assign(new AppDomain());
+ if (g_fADUnloadWorkerOK<0)
+ {
+ AppDomain::CreateADUnloadWorker();
+ }
+
+ //@todo: B#25921
+ // We addref Appdomain object here and notify a profiler that appdomain
+ // creation has started, then return to managed code which will call
+ // the function that releases the appdomain and notifies a profiler that we finished
+ // creating the appdomain. If an exception is raised while we're in that managed code
+ // we will leak memory and the profiler will not be notified about the failure
+
+#ifdef PROFILING_SUPPORTED
+ // Signal profile if present.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ g_profControlBlock.pProfInterface->AppDomainCreationStarted((AppDomainID) (AppDomain*) pDomain);
+ END_PIN_PROFILER();
+ }
+ EX_TRY
+#endif // PROFILING_SUPPORTED
+ {
+ {
+ SystemDomain::LockHolder lh;
+ pDomain->Init();
+ // allocate a Virtual Call Stub Manager for this domain
+ pDomain->InitVSD();
+ }
+
+ pDomain->SetCanUnload(); // by default can unload any domain
+
+ #ifdef DEBUGGING_SUPPORTED
+ // Notify the debugger here, before the thread transitions into the
+ // AD to finish the setup, and before any assemblies are loaded into it.
+ SystemDomain::PublishAppDomainAndInformDebugger(pDomain);
+ #endif // DEBUGGING_SUPPORTED
+
+ STRESS_LOG2 (LF_APPDOMAIN, LL_INFO100, "Create domain [%d] %p\n", pDomain->GetId().m_dwId, (AppDomain*)pDomain);
+ pDomain->LoadSystemAssemblies();
+ pDomain->SetupSharedStatics();
+
+ pDomain->SetStage(AppDomain::STAGE_ACTIVE);
+ }
+#ifdef PROFILING_SUPPORTED
+ EX_HOOK
+ {
+ // Need the first assembly loaded in to get any data on an app domain.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ g_profControlBlock.pProfInterface->AppDomainCreationFinished((AppDomainID)(AppDomain*) pDomain, GET_EXCEPTION()->GetHR());
+ END_PIN_PROFILER();
+ }
+ }
+ EX_END_HOOK;
+
+ // On success, caller must still send the AppDomainCreationFinished notification.
+#endif // PROFILING_SUPPORTED
+}
+
+void AppDomain::Stop()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_MULTICOREJIT
+ GetMulticoreJitManager().StopProfile(true);
+#endif
+
+ // Set the unloaded flag before notifying the debugger
+ GetLoaderAllocator()->SetIsUnloaded();
+
+#ifdef DEBUGGING_SUPPORTED
+ if (IsDebuggerAttached())
+ NotifyDebuggerUnload();
+#endif // DEBUGGING_SUPPORTED
+
+ m_pRootAssembly = NULL; // This assembly is in the assembly list;
+
+ if (m_pSecDesc != NULL)
+ {
+ delete m_pSecDesc;
+ m_pSecDesc = NULL;
+ }
+
+#ifdef DEBUGGING_SUPPORTED
+ if (NULL != g_pDebugInterface)
+ {
+ // Call the publisher API to delete this appdomain entry from the list
+ CONTRACT_VIOLATION(ThrowsViolation);
+ g_pDebugInterface->RemoveAppDomainFromIPC (this);
+ }
+#endif // DEBUGGING_SUPPORTED
+}
+
+void AppDomain::Terminate()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+
+
+ _ASSERTE(m_dwThreadEnterCount == 0 || IsDefaultDomain());
+
+ if (m_pComIPForExposedObject)
+ {
+ m_pComIPForExposedObject->Release();
+ m_pComIPForExposedObject = NULL;
+ }
+
+ delete m_pDefaultContext;
+ m_pDefaultContext = NULL;
+
+ if (m_pUMEntryThunkCache)
+ {
+ delete m_pUMEntryThunkCache;
+ m_pUMEntryThunkCache = NULL;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (m_pRCWCache)
+ {
+ delete m_pRCWCache;
+ m_pRCWCache = NULL;
+ }
+
+ if (m_pRCWRefCache)
+ {
+ delete m_pRCWRefCache;
+ m_pRCWRefCache = NULL;
+ }
+
+ if (m_pComCallWrapperCache)
+ {
+ m_pComCallWrapperCache->Neuter();
+ m_pComCallWrapperCache->Release();
+ }
+
+ // if the above released the wrapper cache, then it will call back and reset our
+ // m_pComCallWrapperCache to null. If not null, then need to set it's domain pointer to
+ // null.
+ if (! m_pComCallWrapperCache)
+ {
+ LOG((LF_APPDOMAIN, LL_INFO10, "AppDomain::Terminate ComCallWrapperCache released\n"));
+ }
+#ifdef _DEBUG
+ else
+ {
+ m_pComCallWrapperCache = NULL;
+ LOG((LF_APPDOMAIN, LL_INFO10, "AppDomain::Terminate ComCallWrapperCache not released\n"));
+ }
+#endif // _DEBUG
+
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_FUSION
+ if(m_pAsyncPool != NULL)
+ {
+ delete m_pAsyncPool;
+ m_pAsyncPool = NULL;
+ }
+#endif
+
+ if (!IsAtProcessExit())
+ {
+ // if we're not shutting down everything then clean up the string literals associated
+ // with this appdomain -- note that is no longer needs to happen while suspended
+ // because the appropriate locks are taken in the GlobalStringLiteralMap
+ // this is important as this locks have a higher lock number than does the
+ // thread-store lock which is taken when we suspend.
+ GetLoaderAllocator()->CleanupStringLiteralMap();
+
+ // Suspend the EE to do some clean up that can only occur
+ // while no threads are running.
+ GCX_COOP (); // SuspendEE may require current thread to be in Coop mode
+ ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_FOR_APPDOMAIN_SHUTDOWN);
+ }
+
+ // Note that this must be performed before restarting the EE. It will clean
+ // the cache and prevent others from using stale cache entries.
+ //@TODO: Would be nice to get this back to BaseDomain, but need larger fix for that.
+ // NOTE: Must have the runtime suspended to unlink managers
+ // NOTE: May be NULL due to OOM during initialization. Can skip in that case.
+ GetLoaderAllocator()->UninitVirtualCallStubManager();
+ MethodTable::ClearMethodDataCache();
+ ClearJitGenericHandleCache(this);
+
+ // @TODO s_TPMethodTableCrst prevents us from from keeping the whole
+ // assembly shutdown logic here. See if we can do better in the next milestone
+#ifdef FEATURE_PREJIT
+ DeleteNativeCodeRanges();
+#endif
+
+ if (!IsAtProcessExit())
+ {
+ // Resume the EE.
+ ThreadSuspend::RestartEE(FALSE, TRUE);
+ }
+
+ ShutdownAssemblies();
+#ifdef FEATURE_CORECLR
+ ShutdownNativeDllSearchDirectories();
+#endif
+
+ if (m_pRefClassFactHash)
+ {
+ m_pRefClassFactHash->Destroy();
+ // storage for m_pRefClassFactHash itself is allocated on the loader heap
+ }
+
+#ifdef FEATURE_TYPEEQUIVALENCE
+ m_TypeEquivalenceCrst.Destroy();
+#endif
+
+ m_ReflectionCrst.Destroy();
+ m_RefClassFactCrst.Destroy();
+
+ m_LoaderAllocator.Terminate();
+
+ BaseDomain::Terminate();
+
+#ifdef _DEBUG
+ if (m_hHandleTableBucket &&
+ m_hHandleTableBucket->pTable &&
+ ((HandleTable *)(m_hHandleTableBucket->pTable[0]))->uADIndex != m_dwIndex)
+ _ASSERTE (!"AD index mismatch");
+#endif // _DEBUG
+
+ if (m_hHandleTableBucket) {
+ Ref_DestroyHandleTableBucket(m_hHandleTableBucket);
+ m_hHandleTableBucket = NULL;
+ }
+
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+ if (m_pullAllocBytes)
+ {
+ delete [] m_pullAllocBytes;
+ }
+ if (m_pullSurvivedBytes)
+ {
+ delete [] m_pullSurvivedBytes;
+ }
+#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
+
+ if(m_dwIndex.m_dwIndex != 0)
+ SystemDomain::ReleaseAppDomainIndex(m_dwIndex);
+} // AppDomain::Terminate
+
+void AppDomain::CloseDomain()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+
+ BOOL bADRemoved=FALSE;;
+
+ AddRef(); // Hold a reference
+ AppDomainRefHolder AdHolder(this);
+ {
+ SystemDomain::LockHolder lh;
+
+ SystemDomain::System()->DecrementNumAppDomains(); // Maintain a count of app domains added to the list.
+ bADRemoved = SystemDomain::System()->RemoveDomain(this);
+ }
+
+ if(bADRemoved)
+ Stop();
+}
+
+/*********************************************************************/
+
+struct GetExposedObject_Args
+{
+ AppDomain *pDomain;
+ OBJECTREF *ref;
+};
+
+static void GetExposedObject_Wrapper(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ GetExposedObject_Args *args = (GetExposedObject_Args *) ptr;
+ *(args->ref) = args->pDomain->GetExposedObject();
+}
+
+
+OBJECTREF AppDomain::GetExposedObject()
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ OBJECTREF ref = GetRawExposedObject();
+ if (ref == NULL)
+ {
+ APPDOMAINREF obj = NULL;
+
+ Thread *pThread = GetThread();
+ if (pThread->GetDomain() != this)
+ {
+ GCPROTECT_BEGIN(ref);
+ GetExposedObject_Args args = {this, &ref};
+ // call through DoCallBack with a domain transition
+ pThread->DoADCallBack(this,GetExposedObject_Wrapper, &args,ADV_CREATING|ADV_RUNNINGIN);
+ GCPROTECT_END();
+ return ref;
+ }
+ MethodTable *pMT = MscorlibBinder::GetClass(CLASS__APP_DOMAIN);
+
+ // Create the module object
+ obj = (APPDOMAINREF) AllocateObject(pMT);
+ obj->SetDomain(this);
+
+ if(StoreFirstObjectInHandle(m_ExposedObject, (OBJECTREF) obj) == FALSE) {
+ obj = (APPDOMAINREF) GetRawExposedObject();
+ _ASSERTE(obj);
+ }
+
+ return (OBJECTREF) obj;
+ }
+
+ return ref;
+}
+
+#ifndef FEATURE_CORECLR
+void AppDomain::InitializeSorting(OBJECTREF* ppAppdomainSetup)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(ppAppdomainSetup == NULL || IsProtectedByGCFrame(ppAppdomainSetup));
+ }
+ CONTRACTL_END;
+
+ DWORD sortVersionFromConfig = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CompatSortNLSVersion);
+
+ if(sortVersionFromConfig != 0)
+ {
+ m_bUseOsSorting = FALSE;
+ m_sortVersion = sortVersionFromConfig;
+ }
+
+ if(ppAppdomainSetup != NULL)
+ {
+ APPDOMAINSETUPREF adSetup = (APPDOMAINSETUPREF) *ppAppdomainSetup;
+ APPDOMAINSORTINGSETUPINFOREF sortingSetup = adSetup->GetAppDomainSortingSetupInfo();
+
+ if(sortingSetup != NULL)
+ {
+ if(sortingSetup->UseV2LegacySorting() || sortingSetup->UseV4LegacySorting())
+ {
+
+ m_bUseOsSorting = FALSE;
+
+ if(sortingSetup->UseV2LegacySorting())
+ {
+ m_sortVersion = SORT_VERSION_WHIDBEY;
+ }
+
+ if(sortingSetup->UseV4LegacySorting())
+ {
+ m_sortVersion = SORT_VERSION_V4;
+ }
+ }
+ else if(sortingSetup->GetPFNIsNLSDefinedString() != NULL
+ && sortingSetup->GetPFNCompareStringEx() != NULL
+ && sortingSetup->GetPFNLCMapStringEx() != NULL
+ && sortingSetup->GetPFNFindNLSStringEx() != NULL
+ && sortingSetup->GetPFNCompareStringOrdinal() != NULL
+ && sortingSetup->GetPFNGetNLSVersionEx() != NULL
+ && sortingSetup->GetPFNFindStringOrdinal() != NULL)
+ {
+ m_pCustomSortLibrary = new COMNlsCustomSortLibrary;
+ m_pCustomSortLibrary->pIsNLSDefinedString = (PFN_IS_NLS_DEFINED_STRING) sortingSetup->GetPFNIsNLSDefinedString();
+ m_pCustomSortLibrary->pCompareStringEx = (PFN_COMPARE_STRING_EX) sortingSetup->GetPFNCompareStringEx();
+ m_pCustomSortLibrary->pLCMapStringEx = (PFN_LC_MAP_STRING_EX) sortingSetup->GetPFNLCMapStringEx();
+ m_pCustomSortLibrary->pFindNLSStringEx = (PFN_FIND_NLS_STRING_EX) sortingSetup->GetPFNFindNLSStringEx();
+ m_pCustomSortLibrary->pCompareStringOrdinal = (PFN_COMPARE_STRING_ORDINAL) sortingSetup->GetPFNCompareStringOrdinal();
+ m_pCustomSortLibrary->pGetNLSVersionEx = (PFN_GET_NLS_VERSION_EX) sortingSetup->GetPFNGetNLSVersionEx();
+ m_pCustomSortLibrary->pFindStringOrdinal = (PFN_FIND_STRING_ORDINAL) sortingSetup->GetPFNFindStringOrdinal();
+ }
+ }
+ }
+
+ if(m_bUseOsSorting == FALSE && m_sortVersion == DEFAULT_SORT_VERSION)
+ {
+ // If we are using the legacy sorting dlls, the default version for sorting is SORT_VERSION_V4. Note that
+ // we don't expect this to change in the future (even when V5 or V6 of the runtime comes out).
+ m_sortVersion = SORT_VERSION_V4;
+ }
+
+ if(RunningOnWin8() && m_bUseOsSorting == FALSE)
+ {
+ // We need to ensure that the versioned sort DLL could load so we don't crash later. This ensures we have
+ // the same behavior as Windows 7, where even if we couldn't load the correct versioned sort dll, we would
+ // provide the default sorting behavior.
+ INT_PTR sortOrigin;
+ if(COMNlsInfo::InternalInitVersionedSortHandle(W(""), &sortOrigin, m_sortVersion) == NULL)
+ {
+ LOG((LF_APPDOMAIN, LL_WARNING, "AppDomain::InitializeSorting failed to load legacy sort DLL for AppDomain.\n"));
+ // We couldn't load a sort DLL. Fall back to default sorting using the OS.
+ m_bUseOsSorting = TRUE;
+ m_sortVersion = DEFAULT_SORT_VERSION;
+ }
+ }
+
+#if _DEBUG
+ m_bSortingInitialized = TRUE;
+#endif
+}
+#endif
+
+#ifndef FEATURE_CORECLR
+void AppDomain::InitializeHashing(OBJECTREF* ppAppdomainSetup)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(ppAppdomainSetup == NULL || IsProtectedByGCFrame(ppAppdomainSetup));
+ }
+ CONTRACTL_END;
+
+ m_pNlsHashProvider = new COMNlsHashProvider;
+
+#ifdef FEATURE_RANDOMIZED_STRING_HASHING
+ BOOL fUseRandomizedHashing = (BOOL) CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_UseRandomizedStringHashAlgorithm);
+
+ if(ppAppdomainSetup != NULL)
+ {
+ APPDOMAINSETUPREF adSetup = (APPDOMAINSETUPREF) *ppAppdomainSetup;
+ fUseRandomizedHashing |= adSetup->UseRandomizedStringHashing();
+ }
+
+ m_pNlsHashProvider->SetUseRandomHashing(fUseRandomizedHashing);
+#endif // FEATURE_RANDOMIZED_STRING_HASHING
+}
+#endif // FEATURE_CORECLR
+
+OBJECTREF AppDomain::DoSetup(OBJECTREF* setupInfo)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ ADID adid=GetAppDomain()->GetId();
+
+ OBJECTREF retval=NULL;
+ GCPROTECT_BEGIN(retval);
+
+ ENTER_DOMAIN_PTR(this,ADV_CREATING);
+
+ MethodDescCallSite setup(METHOD__APP_DOMAIN__SETUP);
+
+ ARG_SLOT args[1];
+
+ args[0]=ObjToArgSlot(*setupInfo);
+
+ OBJECTREF activator;
+ activator=setup.Call_RetOBJECTREF(args);
+#ifdef FEATURE_REMOTING
+ if (activator != NULL)
+ {
+ GCPROTECT_BEGIN(activator);
+ retval=AppDomainHelper::CrossContextCopyTo(adid,&activator);
+ GCPROTECT_END();
+ }
+#else
+ _ASSERTE(activator==NULL);
+#endif
+
+#if defined(FEATURE_MULTICOREJIT)
+ // Disable AutoStartProfile in default domain from this code path.
+ // It's called from SystemDomain::ExecuteMainMethod for normal program, not needed for SL and Asp.Net
+ if (! IsDefaultDomain())
+ {
+ GCX_PREEMP();
+
+ GetMulticoreJitManager().AutoStartProfile(this);
+ }
+#endif
+
+ END_DOMAIN_TRANSITION;
+ GCPROTECT_END();
+ return retval;
+}
+
+#endif // !CROSSGEN_COMPILE
+
+#ifdef FEATURE_COMINTEROP
+#ifndef CROSSGEN_COMPILE
+HRESULT AppDomain::GetComIPForExposedObject(IUnknown **pComIP)
+{
+ // Assumption: This function is called for AppDomain's that the current
+ // thread is in or has entered, or the AppDomain is kept alive.
+ //
+ // Assumption: This function can now throw. The caller is responsible for any
+ // BEGIN_EXTERNAL_ENTRYPOINT, EX_TRY, or other
+ // techniques to convert to a COM HRESULT protocol.
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ Thread *pThread = GetThread();
+ if (m_pComIPForExposedObject)
+ {
+ GCX_PREEMP_THREAD_EXISTS(pThread);
+ m_pComIPForExposedObject->AddRef();
+ *pComIP = m_pComIPForExposedObject;
+ return S_OK;
+ }
+
+ IUnknown* punk = NULL;
+
+ OBJECTREF ref = NULL;
+ GCPROTECT_BEGIN(ref);
+
+ EnsureComStarted();
+
+ ENTER_DOMAIN_PTR(this,ADV_DEFAULTAD)
+ {
+ ref = GetExposedObject();
+ punk = GetComIPFromObjectRef(&ref);
+ if (FastInterlockCompareExchangePointer(&m_pComIPForExposedObject, punk, NULL) == NULL)
+ {
+ GCX_PREEMP_THREAD_EXISTS(pThread);
+ m_pComIPForExposedObject->AddRef();
+ }
+ }
+ END_DOMAIN_TRANSITION;
+
+ GCPROTECT_END();
+
+ if(SUCCEEDED(hr))
+ {
+ *pComIP = m_pComIPForExposedObject;
+ }
+
+ return hr;
+}
+#endif //#ifndef CROSSGEN_COMPILE
+
+MethodTable *AppDomain::GetRedirectedType(WinMDAdapter::RedirectedTypeIndex index)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // If we have the type loaded already, use that
+ if (m_rpCLRTypes[index] != nullptr)
+ {
+ return m_rpCLRTypes[index];
+ }
+
+ WinMDAdapter::FrameworkAssemblyIndex frameworkAssemblyIndex;
+ WinMDAdapter::GetRedirectedTypeInfo(index, nullptr, nullptr, nullptr, &frameworkAssemblyIndex, nullptr, nullptr);
+ MethodTable * pMT = LoadRedirectedType(index, frameworkAssemblyIndex);
+ m_rpCLRTypes[index] = pMT;
+ return pMT;
+}
+
+MethodTable* AppDomain::LoadRedirectedType(WinMDAdapter::RedirectedTypeIndex index, WinMDAdapter::FrameworkAssemblyIndex assembly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(index < WinMDAdapter::RedirectedTypeIndex_Count);
+ }
+ CONTRACTL_END;
+
+ LPCSTR szClrNamespace;
+ LPCSTR szClrName;
+ LPCSTR szFullWinRTName;
+ WinMDAdapter::FrameworkAssemblyIndex nFrameworkAssemblyIndex;
+
+ WinMDAdapter::GetRedirectedTypeInfo(index, &szClrNamespace, &szClrName, &szFullWinRTName, &nFrameworkAssemblyIndex, nullptr, nullptr);
+
+ _ASSERTE(nFrameworkAssemblyIndex >= WinMDAdapter::FrameworkAssembly_Mscorlib &&
+ nFrameworkAssemblyIndex < WinMDAdapter::FrameworkAssembly_Count);
+
+ if (assembly != nFrameworkAssemblyIndex)
+ {
+ // The framework type does not live in the assembly we were requested to load redirected types from
+ return nullptr;
+ }
+ else if (nFrameworkAssemblyIndex == WinMDAdapter::FrameworkAssembly_Mscorlib)
+ {
+ return ClassLoader::LoadTypeByNameThrowing(MscorlibBinder::GetModule()->GetAssembly(),
+ szClrNamespace,
+ szClrName,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::LoadTypes,
+ CLASS_LOAD_EXACTPARENTS).GetMethodTable();
+ }
+ else
+ {
+ LPCSTR pSimpleName;
+ AssemblyMetaDataInternal context;
+ const BYTE * pbKeyToken;
+ DWORD cbKeyTokenLength;
+ DWORD dwFlags;
+
+ WinMDAdapter::GetExtraAssemblyRefProps(nFrameworkAssemblyIndex,
+ &pSimpleName,
+ &context,
+ &pbKeyToken,
+ &cbKeyTokenLength,
+ &dwFlags);
+
+ Assembly* pAssembly = AssemblySpec::LoadAssembly(pSimpleName,
+ &context,
+ pbKeyToken,
+ cbKeyTokenLength,
+ dwFlags);
+
+ return ClassLoader::LoadTypeByNameThrowing(
+ pAssembly,
+ szClrNamespace,
+ szClrName,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::LoadTypes,
+ CLASS_LOAD_EXACTPARENTS).GetMethodTable();
+ }
+}
+
+bool AppDomain::FindRedirectedAssembly(Assembly* pAssembly, WinMDAdapter::FrameworkAssemblyIndex* pIndex)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pAssembly));
+ PRECONDITION(CheckPointer(pIndex));
+ }
+ CONTRACTL_END;
+
+ DomainAssembly *pDomainAssembly = pAssembly->GetDomainAssembly();
+
+ if (pDomainAssembly->IsSystem())
+ {
+ *pIndex = WinMDAdapter::FrameworkAssembly_Mscorlib;
+ return true;
+ }
+ else if (pDomainAssembly == m_pSystemDll)
+ {
+ *pIndex = WinMDAdapter::FrameworkAssembly_System;
+ return true;
+ }
+ else if (pDomainAssembly == m_pSystemRuntimeWindowsRuntimeDll)
+ {
+ *pIndex = WinMDAdapter::FrameworkAssembly_SystemRuntimeWindowsRuntime;
+ return true;
+ }
+ else if (pDomainAssembly == m_pSystemRuntimeWindowsRuntimeUIXamlDll)
+ {
+ *pIndex = WinMDAdapter::FrameworkAssembly_SystemRuntimeWindowsRuntimeUIXaml;
+ return true;
+ }
+ else if (pDomainAssembly == m_pSystemNumericsVectors)
+ {
+ *pIndex = WinMDAdapter::FrameworkAssembly_SystemNumericsVectors;
+ return true;
+ }
+
+ return false;
+}
+
+#endif //FEATURE_COMINTEROP
+
+#endif //!DACCESS_COMPILE
+
+
+#ifdef FEATURE_COMINTEROP
+BOOL AppDomain::FindRedirectedAssemblyFromIndexIfLoaded(WinMDAdapter::FrameworkAssemblyIndex index, Assembly ** ppAssembly)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // If new redirected assemblies are added, this function probably needs to be updated
+ C_ASSERT(WinMDAdapter::FrameworkAssembly_Count == 5);
+
+ DomainAssembly * pDomainAssembly = NULL;
+
+ if (index == WinMDAdapter::FrameworkAssembly_Mscorlib)
+ {
+ *ppAssembly = SystemDomain::SystemAssembly();
+ return TRUE;
+ }
+ else if (index == WinMDAdapter::FrameworkAssembly_System)
+ {
+ pDomainAssembly = m_pSystemDll;
+ }
+ else if (index == WinMDAdapter::FrameworkAssembly_SystemRuntimeWindowsRuntime)
+ {
+ pDomainAssembly = m_pSystemRuntimeWindowsRuntimeDll;
+ }
+ else if (index == WinMDAdapter::FrameworkAssembly_SystemRuntimeWindowsRuntimeUIXaml)
+ {
+ pDomainAssembly = m_pSystemRuntimeWindowsRuntimeUIXamlDll;
+ }
+ else if (index == WinMDAdapter::FrameworkAssembly_SystemNumericsVectors)
+ {
+ pDomainAssembly = m_pSystemNumericsVectors;
+ }
+
+ if (pDomainAssembly != NULL)
+ {
+ *ppAssembly = pDomainAssembly->GetAssembly();
+ return TRUE;
+ }
+
+ *ppAssembly = NULL;
+ return FALSE;
+}
+
+#endif // FEATURE_COMINTEROP
+
+#ifndef DACCESS_COMPILE
+
+void AppDomain::CreateSecurityDescriptor()
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(m_pSecDesc == NULL);
+
+ m_pSecDesc = Security::CreateApplicationSecurityDescriptor(this);
+}
+
+bool IsPlatformAssembly(LPCSTR szName, DomainAssembly *pDomainAssembly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(szName));
+ PRECONDITION(CheckPointer(pDomainAssembly));
+ }
+ CONTRACTL_END;
+
+ PEAssembly *pPEAssembly = pDomainAssembly->GetFile();
+
+ if (strcmp(szName, pPEAssembly->GetSimpleName()) != 0)
+ {
+ return false;
+ }
+
+ DWORD cbPublicKey;
+ const BYTE *pbPublicKey = static_cast<const BYTE *>(pPEAssembly->GetPublicKey(&cbPublicKey));
+ if (pbPublicKey == nullptr)
+ {
+ return false;
+ }
+
+#ifdef FEATURE_CORECLR
+ return StrongNameIsSilverlightPlatformKey(pbPublicKey, cbPublicKey);
+#else
+ return StrongNameIsEcmaKey(pbPublicKey, cbPublicKey);
+#endif
+}
+
+void AppDomain::AddAssembly(DomainAssembly * assem)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ {
+ CrstHolder ch(GetAssemblyListLock());
+
+ // Attempt to find empty space in assemblies list
+ DWORD asmCount = m_Assemblies.GetCount_Unlocked();
+ for (DWORD i = 0; i < asmCount; ++i)
+ {
+ if (m_Assemblies.Get_UnlockedNoReference(i) == NULL)
+ {
+ m_Assemblies.Set_Unlocked(i, assem);
+ return;
+ }
+ }
+
+ // If empty space not found, simply add to end of list
+ IfFailThrow(m_Assemblies.Append_Unlocked(assem));
+ }
+
+#ifdef FEATURE_COMINTEROP
+ // See if this is one of the well-known assemblies that we look for
+ if (m_pSystemDll == nullptr && IsPlatformAssembly("System", assem))
+ {
+ m_pSystemDll = assem;
+ return;
+ }
+
+ if (m_pSystemRuntimeWindowsRuntimeDll == nullptr && IsPlatformAssembly("System.Runtime.WindowsRuntime", assem))
+ {
+ m_pSystemRuntimeWindowsRuntimeDll = assem;
+ return;
+ }
+ if (m_pSystemRuntimeWindowsRuntimeUIXamlDll == nullptr && IsPlatformAssembly("System.Runtime.WindowsRuntime.UI.Xaml", assem))
+ {
+ m_pSystemRuntimeWindowsRuntimeUIXamlDll = assem;
+ return;
+ }
+ if (m_pSystemNumericsVectors == nullptr)
+ {
+ PEAssembly *pPEAssembly = assem->GetFile();
+
+ if (strcmp("System.Numerics.Vectors", pPEAssembly->GetSimpleName()) == 0)
+ {
+ DWORD cbPublicKey;
+ const BYTE *pbPublicKey = static_cast<const BYTE *>(pPEAssembly->GetPublicKey(&cbPublicKey));
+
+ if (cbPublicKey == sizeof(s_pbContractPublicKey) && memcmp(pbPublicKey, s_pbContractPublicKey, cbPublicKey) == 0)
+ {
+ m_pSystemNumericsVectors = assem;
+ }
+ }
+ }
+#endif // FEATURE_COMINTEROP
+}
+
+void AppDomain::RemoveAssembly_Unlocked(DomainAssembly * pAsm)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(GetAssemblyListLock()->OwnedByCurrentThread());
+
+ DWORD asmCount = m_Assemblies.GetCount_Unlocked();
+ for (DWORD i = 0; i < asmCount; ++i)
+ {
+ if (m_Assemblies.Get_UnlockedNoReference(i) == pAsm)
+ {
+ m_Assemblies.Set_Unlocked(i, NULL);
+ return;
+ }
+ }
+
+ _ASSERTE(!"Unreachable");
+}
+
+BOOL AppDomain::ContainsAssembly(Assembly * assem)
+{
+ WRAPPER_NO_CONTRACT;
+ AssemblyIterator i = IterateAssembliesEx((AssemblyIterationFlags)(
+ kIncludeLoaded |
+ (assem->IsIntrospectionOnly() ? kIncludeIntrospection : kIncludeExecution)));
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+
+ while (i.Next(pDomainAssembly.This()))
+ {
+ CollectibleAssemblyHolder<Assembly *> pAssembly = pDomainAssembly->GetLoadedAssembly();
+ if (pAssembly == assem)
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+BOOL AppDomain::HasSetSecurityPolicy()
+{
+ CONTRACT(BOOL)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ GCX_COOP();
+
+ if (NingenEnabled())
+ {
+ return FALSE;
+ }
+ RETURN ((APPDOMAINREF)GetExposedObject())->HasSetPolicy();
+}
+
+#if defined (FEATURE_LOADER_OPTIMIZATION) && !defined(FEATURE_CORECLR)
+// Returns true if the user has declared the desire to load an
+// assembly domain-neutral. This is either by specifying System.LoaderOptimizationAttribute
+// on the entry routine or the host has set this loader-optimization flag.
+BOOL AppDomain::ApplySharePolicy(DomainAssembly *pFile)
+{
+ CONTRACT(BOOL)
+ {
+ PRECONDITION(CheckPointer(pFile));
+ THROWS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ if (!pFile->GetFile()->IsShareable())
+ RETURN FALSE;
+
+ if (ApplySharePolicyFlag(pFile))
+ RETURN TRUE;
+
+ RETURN FALSE;
+}
+
+BOOL AppDomain::ApplySharePolicyFlag(DomainAssembly *pFile)
+{
+ CONTRACT(BOOL)
+ {
+ PRECONDITION(CheckPointer(pFile));
+ THROWS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ switch(GetSharePolicy()) {
+ case SHARE_POLICY_ALWAYS:
+ RETURN (!pFile->MayHaveUnknownDependencies());
+
+ case SHARE_POLICY_GAC:
+ RETURN (pFile->IsClosedInGAC());
+
+ case SHARE_POLICY_NEVER:
+ RETURN pFile->IsSystem();
+
+ default:
+ UNREACHABLE_MSG("Unknown share policy");
+ }
+}
+#endif // FEATURE_LOADER_OPTIMIZATION
+
+EEClassFactoryInfoHashTable* AppDomain::SetupClassFactHash()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ CrstHolder ch(&m_ReflectionCrst);
+
+ if (m_pRefClassFactHash == NULL)
+ {
+ AllocMemHolder<void> pCache(GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof (EEClassFactoryInfoHashTable))));
+ EEClassFactoryInfoHashTable *tmp = new (pCache) EEClassFactoryInfoHashTable;
+ LockOwner lock = {&m_RefClassFactCrst,IsOwnerOfCrst};
+ if (!tmp->Init(20, &lock))
+ COMPlusThrowOM();
+ pCache.SuppressRelease();
+ m_pRefClassFactHash = tmp;
+ }
+
+ return m_pRefClassFactHash;
+}
+
+#ifdef FEATURE_COMINTEROP
+DispIDCache* AppDomain::SetupRefDispIDCache()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ CrstHolder ch(&m_ReflectionCrst);
+
+ if (m_pRefDispIDCache == NULL)
+ {
+ AllocMemHolder<void> pCache = GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof (DispIDCache)));
+
+ DispIDCache *tmp = new (pCache) DispIDCache;
+ tmp->Init();
+
+ pCache.SuppressRelease();
+ m_pRefDispIDCache = tmp;
+ }
+
+ return m_pRefDispIDCache;
+}
+
+#endif // FEATURE_COMINTEROP
+
+FileLoadLock *FileLoadLock::Create(PEFileListLock *pLock, PEFile *pFile, DomainFile *pDomainFile)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(pLock->HasLock());
+ PRECONDITION(pLock->FindFileLock(pFile) == NULL);
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ NewHolder<FileLoadLock> result(new FileLoadLock(pLock, pFile, pDomainFile));
+
+ pLock->AddElement(result);
+ result->AddRef(); // Add one ref on behalf of the ListLock's reference. The corresponding Release() happens in FileLoadLock::CompleteLoadLevel.
+ return result.Extract();
+}
+
+FileLoadLock::~FileLoadLock()
+{
+ CONTRACTL
+ {
+ DESTRUCTOR_CHECK;
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ ((PEFile *) m_pData)->Release();
+}
+
+DomainFile *FileLoadLock::GetDomainFile()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pDomainFile;
+}
+
+FileLoadLevel FileLoadLock::GetLoadLevel()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_level;
+}
+
+ADID FileLoadLock::GetAppDomainId()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_AppDomainId;
+}
+
+// Acquire will return FALSE and not take the lock if the file
+// has already been loaded to the target level. Otherwise,
+// it will return TRUE and take the lock.
+//
+// Note that the taker must release the lock via IncrementLoadLevel.
+
+BOOL FileLoadLock::Acquire(FileLoadLevel targetLevel)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // If we are already loaded to the desired level, the lock is "free".
+ if (m_level >= targetLevel)
+ return FALSE;
+
+ if (!DeadlockAwareEnter())
+ {
+ // We failed to get the lock due to a deadlock.
+ return FALSE;
+ }
+
+ if (m_level >= targetLevel)
+ {
+ Leave();
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+BOOL FileLoadLock::CanAcquire(FileLoadLevel targetLevel)
+{
+ // If we are already loaded to the desired level, the lock is "free".
+ if (m_level >= targetLevel)
+ return FALSE;
+
+ return CanDeadlockAwareEnter();
+}
+
+#if !defined(DACCESS_COMPILE) && (defined(LOGGING) || defined(STRESS_LOG))
+static const char *fileLoadLevelName[] =
+{
+ "CREATE", // FILE_LOAD_CREATE
+ "BEGIN", // FILE_LOAD_BEGIN
+ "FIND_NATIVE_IMAGE", // FILE_LOAD_FIND_NATIVE_IMAGE
+ "VERIFY_NATIVE_IMAGE_DEPENDENCIES", // FILE_LOAD_VERIFY_NATIVE_IMAGE_DEPENDENCIES
+ "ALLOCATE", // FILE_LOAD_ALLOCATE
+ "ADD_DEPENDENCIES", // FILE_LOAD_ADD_DEPENDENCIES
+ "PRE_LOADLIBRARY", // FILE_LOAD_PRE_LOADLIBRARY
+ "LOADLIBRARY", // FILE_LOAD_LOADLIBRARY
+ "POST_LOADLIBRARY", // FILE_LOAD_POST_LOADLIBRARY
+ "EAGER_FIXUPS", // FILE_LOAD_EAGER_FIXUPS
+ "VTABLE FIXUPS", // FILE_LOAD_VTABLE_FIXUPS
+ "DELIVER_EVENTS", // FILE_LOAD_DELIVER_EVENTS
+ "LOADED", // FILE_LOADED
+ "VERIFY_EXECUTION", // FILE_LOAD_VERIFY_EXECUTION
+ "ACTIVE", // FILE_ACTIVE
+};
+#endif // !DACCESS_COMPILE && (LOGGING || STRESS_LOG)
+
+BOOL FileLoadLock::CompleteLoadLevel(FileLoadLevel level, BOOL success)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_TRIGGERS;
+ THROWS;
+ PRECONDITION(HasLock());
+ }
+ CONTRACTL_END;
+
+ // Increment may happen more than once if reentrancy occurs (e.g. LoadLibrary)
+ if (level > m_level)
+ {
+ // Must complete each level in turn, unless we have an error
+ CONSISTENCY_CHECK(m_pDomainFile->IsError() || (level == (m_level+1)));
+ // Remove the lock from the list if the load is completed
+ if (level >= FILE_ACTIVE)
+ {
+ {
+ GCX_COOP();
+ PEFileListLockHolder lock((PEFileListLock*)m_pList);
+
+#if _DEBUG
+ BOOL fDbgOnly_SuccessfulUnlink =
+#endif
+ m_pList->Unlink(this);
+ _ASSERTE(fDbgOnly_SuccessfulUnlink);
+
+ m_pDomainFile->ClearLoading();
+
+ CONSISTENCY_CHECK(m_dwRefCount >= 2); // Caller (LoadDomainFile) should have 1 refcount and m_pList should have another which was acquired in FileLoadLock::Create.
+
+ m_level = (FileLoadLevel)level;
+
+ // Dev11 bug 236344
+ // In AppDomain::IsLoading, if the lock is taken on m_pList and then FindFileLock returns NULL,
+ // we depend on the DomainFile's load level being up to date. Hence we must update the load
+ // level while the m_pList lock is held.
+ if (success)
+ m_pDomainFile->SetLoadLevel(level);
+ }
+
+
+ Release(); // Release m_pList's refcount on this lock, which was acquired in FileLoadLock::Create
+
+ }
+ else
+ {
+ m_level = (FileLoadLevel)level;
+
+ if (success)
+ m_pDomainFile->SetLoadLevel(level);
+ }
+
+#ifndef DACCESS_COMPILE
+ switch(level)
+ {
+ case FILE_LOAD_ALLOCATE:
+ case FILE_LOAD_ADD_DEPENDENCIES:
+ case FILE_LOAD_DELIVER_EVENTS:
+ case FILE_LOADED:
+ case FILE_ACTIVE: // The timing of stress logs is not critical, so even for the FILE_ACTIVE stage we need not do it while the m_pList lock is held.
+ STRESS_LOG4(LF_CLASSLOADER, LL_INFO100, "Completed Load Level %s for DomainFile %p in AD %i - success = %i\n", fileLoadLevelName[level], m_pDomainFile, m_AppDomainId.m_dwId, success);
+ break;
+ default:
+ break;
+ }
+#endif
+
+ return TRUE;
+ }
+ else
+ return FALSE;
+}
+
+void FileLoadLock::SetError(Exception *ex)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_TRIGGERS;
+ THROWS;
+ PRECONDITION(CheckPointer(ex));
+ PRECONDITION(HasLock());
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ m_cachedHR = ex->GetHR();
+
+ LOG((LF_LOADER, LL_WARNING, "LOADER: %x:***%s*\t!!!Non-transient error 0x%x\n",
+ m_pDomainFile->GetAppDomain(), m_pDomainFile->GetSimpleName(), m_cachedHR));
+
+ m_pDomainFile->SetError(ex);
+
+ CompleteLoadLevel(FILE_ACTIVE, FALSE);
+}
+
+void FileLoadLock::AddRef()
+{
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockIncrement((LONG *) &m_dwRefCount);
+}
+
+UINT32 FileLoadLock::Release()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LONG count = FastInterlockDecrement((LONG *) &m_dwRefCount);
+ if (count == 0)
+ delete this;
+
+ return count;
+}
+
+FileLoadLock::FileLoadLock(PEFileListLock *pLock, PEFile *pFile, DomainFile *pDomainFile)
+ : ListLockEntry(pLock, pFile, "File load lock"),
+ m_level((FileLoadLevel) (FILE_LOAD_CREATE)),
+ m_pDomainFile(pDomainFile),
+ m_cachedHR(S_OK),
+ m_AppDomainId(pDomainFile->GetAppDomain()->GetId())
+{
+ WRAPPER_NO_CONTRACT;
+ pFile->AddRef();
+}
+
+void FileLoadLock::HolderLeave(FileLoadLock *pThis)
+{
+ LIMITED_METHOD_CONTRACT;
+ pThis->Leave();
+}
+
+
+
+
+
+
+//
+// Assembly loading:
+//
+// Assembly loading is carefully layered to avoid deadlocks in the
+// presence of circular loading dependencies.
+// A LoadLevel is associated with each assembly as it is being loaded. During the
+// act of loading (abstractly, increasing its load level), its lock is
+// held, and the current load level is stored on the thread. Any
+// recursive loads during that period are automatically restricted to
+// only partially load the dependent assembly to the same level as the
+// caller (or to one short of that level in the presence of a deadlock
+// loop.)
+//
+// Each loading stage must be carfully constructed so that
+// this constraint is expected and can be dealt with.
+//
+// Note that there is one case where this still doesn't handle recursion, and that is the
+// security subsytem. The security system runs managed code, and thus must typically fully
+// initialize assemblies of permission sets it is trying to use. (And of course, these may be used
+// while those assemblies are initializing.) This is dealt with in the historical manner - namely
+// the security system passes in a special flag which says that it will deal with null return values
+// in the case where a load cannot be safely completed due to such issues.
+//
+
+void AppDomain::LoadSystemAssemblies()
+{
+ STANDARD_VM_CONTRACT;
+
+ // The only reason to make an assembly a "system assembly" is if the EE is caching
+ // pointers to stuff in the assembly. Because this is going on, we need to preserve
+ // the invariant that the assembly is loaded into every app domain.
+ //
+ // Right now we have only one system assembly. We shouldn't need to add any more.
+
+ LoadAssembly(NULL, SystemDomain::System()->SystemFile(), FILE_ACTIVE);
+}
+
+FileLoadLevel AppDomain::GetDomainFileLoadLevel(DomainFile *pFile)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ LoadLockHolder lock(this);
+
+ FileLoadLock* pLockEntry = (FileLoadLock *) lock->FindFileLock(pFile->GetFile());
+
+ if (pLockEntry == NULL)
+ return pFile->GetLoadLevel();
+ else
+ return pLockEntry->GetLoadLevel();
+}
+
+// This checks if the thread has initiated (or completed) loading at the given level. A false guarantees that
+// (a) The current thread (or a thread blocking on the current thread) has not started loading the file
+// at the given level, and
+// (b) No other thread had started loading the file at this level at the start of this function call.
+
+// Note that another thread may start loading the file at that level in a race with the completion of
+// this function. However, the caller still has the guarantee that such a load started after this
+// function was called (and e.g. any state in place before the function call will be seen by the other thread.)
+//
+// Conversely, a true guarantees that either the current thread has started the load step, or another
+// thread has completed the load step.
+//
+
+BOOL AppDomain::IsLoading(DomainFile *pFile, FileLoadLevel level)
+{
+ // Cheap out
+ if (pFile->GetLoadLevel() < level)
+ {
+ FileLoadLock *pLock = NULL;
+ {
+ LoadLockHolder lock(this);
+
+ pLock = (FileLoadLock *) lock->FindFileLock(pFile->GetFile());
+
+ if (pLock == NULL)
+ {
+ // No thread involved with loading
+ return pFile->GetLoadLevel() >= level;
+ }
+
+ pLock->AddRef();
+ }
+
+ FileLoadLockRefHolder lockRef(pLock);
+
+ if (pLock->Acquire(level))
+ {
+ // We got the lock - therefore no other thread has started this loading step yet.
+ pLock->Leave();
+ return FALSE;
+ }
+
+ // We didn't get the lock - either this thread is already doing the load,
+ // or else the load has already finished.
+ }
+ return TRUE;
+}
+
+// CheckLoading is a weaker form of IsLoading, which will not block on
+// other threads waiting for their status. This is appropriate for asserts.
+CHECK AppDomain::CheckLoading(DomainFile *pFile, FileLoadLevel level)
+{
+ // Cheap out
+ if (pFile->GetLoadLevel() < level)
+ {
+ FileLoadLock *pLock = NULL;
+
+ LoadLockHolder lock(this);
+
+ pLock = (FileLoadLock *) lock->FindFileLock(pFile->GetFile());
+
+ if (pLock != NULL
+ && pLock->CanAcquire(level))
+ {
+ // We can get the lock - therefore no other thread has started this loading step yet.
+ CHECK_FAILF(("Loading step %d has not been initiated yet", level));
+ }
+
+ // We didn't get the lock - either this thread is already doing the load,
+ // or else the load has already finished.
+ }
+
+ CHECK_OK;
+}
+
+CHECK AppDomain::CheckCanLoadTypes(Assembly *pAssembly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ CHECK_MSG(CheckValidModule(pAssembly->GetManifestModule()),
+ "Type loading can occur only when executing in the assembly's app domain");
+ CHECK_OK;
+}
+
+CHECK AppDomain::CheckCanExecuteManagedCode(MethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Module* pModule=pMD->GetModule();
+
+ CHECK_MSG(CheckValidModule(pModule),
+ "Managed code can only run when executing in the module's app domain");
+
+ if (!pMD->IsInterface() || pMD->IsStatic()) //interfaces require no activation for instance methods
+ {
+ //cctor could have been interupted by ADU
+ CHECK_MSG(HasUnloadStarted() || pModule->CheckActivated(),
+ "Managed code can only run when its module has been activated in the current app domain");
+ }
+
+ CHECK_MSG(!IsPassiveDomain() || pModule->CanExecuteCode(),
+ "Executing managed code from an unsafe assembly in a Passive AppDomain");
+
+ CHECK_OK;
+}
+
+#endif // !DACCESS_COMPILE
+
+void AppDomain::LoadDomainFile(DomainFile *pFile,
+ FileLoadLevel targetLevel)
+{
+ CONTRACTL
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM();); }
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // Quick exit if finished
+ if (pFile->GetLoadLevel() >= targetLevel)
+ return;
+
+ // Handle the error case
+ pFile->ThrowIfError(targetLevel);
+
+
+#ifndef DACCESS_COMPILE
+
+ if (pFile->IsLoading())
+ {
+ GCX_PREEMP();
+
+ // Load some more if appropriate
+ LoadLockHolder lock(this);
+
+ FileLoadLock* pLockEntry = (FileLoadLock *) lock->FindFileLock(pFile->GetFile());
+ if (pLockEntry == NULL)
+ {
+ _ASSERTE (!pFile->IsLoading());
+ return;
+ }
+
+ pLockEntry->AddRef();
+
+ lock.Release();
+
+ LoadDomainFile(pLockEntry, targetLevel);
+ }
+
+#else // DACCESS_COMPILE
+ DacNotImpl();
+#endif // DACCESS_COMPILE
+}
+
+#ifndef DACCESS_COMPILE
+
+FileLoadLevel AppDomain::GetThreadFileLoadLevel()
+{
+ WRAPPER_NO_CONTRACT;
+ if (GetThread()->GetLoadLevelLimiter() == NULL)
+ return FILE_ACTIVE;
+ else
+ return (FileLoadLevel)(GetThread()->GetLoadLevelLimiter()->GetLoadLevel()-1);
+}
+
+
+Assembly *AppDomain::LoadAssembly(AssemblySpec* pIdentity,
+ PEAssembly *pFile,
+ FileLoadLevel targetLevel,
+ AssemblyLoadSecurity *pLoadSecurity /* = NULL */)
+{
+ CONTRACT(Assembly *)
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pFile));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); // May be NULL in recursive load case
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ DomainAssembly *pAssembly = LoadDomainAssembly(pIdentity, pFile, targetLevel, pLoadSecurity);
+ PREFIX_ASSUME(pAssembly != NULL);
+
+ RETURN pAssembly->GetAssembly();
+}
+
+#ifndef CROSSGEN_COMPILE
+// Thread stress
+class LoadDomainAssemblyStress : APIThreadStress
+{
+public:
+ AppDomain *pThis;
+ AssemblySpec* pSpec;
+ PEAssembly *pFile;
+ AssemblyLoadSecurity *pLoadSecurity;
+ FileLoadLevel targetLevel;
+
+ LoadDomainAssemblyStress(AppDomain *pThis, AssemblySpec* pSpec, PEAssembly *pFile, FileLoadLevel targetLevel, AssemblyLoadSecurity *pLoadSecurity)
+ : pThis(pThis), pSpec(pSpec), pFile(pFile), pLoadSecurity(pLoadSecurity), targetLevel(targetLevel) {LIMITED_METHOD_CONTRACT;}
+
+ void Invoke()
+ {
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_INTOLERANT;
+ SetupThread();
+ pThis->LoadDomainAssembly(pSpec, pFile, targetLevel, pLoadSecurity);
+ }
+};
+#endif // CROSSGEN_COMPILE
+
+extern BOOL AreSameBinderInstance(ICLRPrivBinder *pBinderA, ICLRPrivBinder *pBinderB);
+
+DomainAssembly* AppDomain::LoadDomainAssembly( AssemblySpec* pSpec,
+ PEAssembly *pFile,
+ FileLoadLevel targetLevel,
+ AssemblyLoadSecurity *pLoadSecurity /* = NULL */)
+{
+ STATIC_CONTRACT_THROWS;
+
+ if (pSpec == nullptr)
+ {
+ // skip caching, since we don't have anything to base it on
+ return LoadDomainAssemblyInternal(pSpec, pFile, targetLevel, pLoadSecurity);
+ }
+
+ DomainAssembly* pRetVal = NULL;
+ EX_TRY
+ {
+ pRetVal = LoadDomainAssemblyInternal(pSpec, pFile, targetLevel, pLoadSecurity);
+ }
+ EX_HOOK
+ {
+ Exception* pEx=GET_EXCEPTION();
+ if (!pEx->IsTransient())
+ {
+#if defined(FEATURE_CORECLR)
+ // Setup the binder reference in AssemblySpec from the PEAssembly if one is not already set.
+ ICLRPrivBinder* pCurrentBindingContext = pSpec->GetBindingContext();
+ ICLRPrivBinder* pBindingContextFromPEAssembly = pFile->GetBindingContext();
+
+ if (pCurrentBindingContext == NULL)
+ {
+ // Set the binding context we got from the PEAssembly if AssemblySpec does not
+ // have that information
+ _ASSERTE(pBindingContextFromPEAssembly != NULL);
+ pSpec->SetBindingContext(pBindingContextFromPEAssembly);
+ }
+#if defined(_DEBUG)
+ else
+ {
+ // Binding context in the spec should be the same as the binding context in the PEAssembly
+ _ASSERTE(AreSameBinderInstance(pCurrentBindingContext, pBindingContextFromPEAssembly));
+ }
+#endif // _DEBUG
+#endif // defined(FEATURE_CORECLR)
+
+ if (!EEFileLoadException::CheckType(pEx))
+ {
+ StackSString name;
+ pSpec->GetFileOrDisplayName(0, name);
+ pEx=new EEFileLoadException(name, pEx->GetHR(), NULL, pEx);
+ AddExceptionToCache(pSpec, pEx);
+ PAL_CPP_THROW(Exception *, pEx);
+ }
+ else
+ AddExceptionToCache(pSpec, pEx);
+ }
+ }
+ EX_END_HOOK;
+
+ return pRetVal;
+}
+
+
+DomainAssembly *AppDomain::LoadDomainAssemblyInternal(AssemblySpec* pIdentity,
+ PEAssembly *pFile,
+ FileLoadLevel targetLevel,
+ AssemblyLoadSecurity *pLoadSecurity /* = NULL */)
+{
+ CONTRACT(DomainAssembly *)
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pFile));
+ PRECONDITION(CheckPointer(pLoadSecurity, NULL_OK));
+ PRECONDITION(pFile->IsSystem() || ::GetAppDomain()==this);
+ POSTCONDITION(CheckPointer(RETVAL));
+ POSTCONDITION(RETVAL->GetLoadLevel() >= GetThreadFileLoadLevel()
+ || RETVAL->GetLoadLevel() >= targetLevel);
+ POSTCONDITION(RETVAL->CheckNoError(targetLevel));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+
+ DomainAssembly * result;
+
+#ifndef CROSSGEN_COMPILE
+ LoadDomainAssemblyStress ts (this, pIdentity, pFile, targetLevel, pLoadSecurity);
+#endif
+
+ // Go into preemptive mode since this may take a while.
+ GCX_PREEMP();
+
+ // Check for existing fully loaded assembly, or for an assembly which has failed during the loading process.
+ result = FindAssembly(pFile, FindAssemblyOptions_IncludeFailedToLoad);
+
+ if (result == NULL)
+ {
+ // Allocate the DomainAssembly a bit early to avoid GC mode problems. We could potentially avoid
+ // a rare redundant allocation by moving this closer to FileLoadLock::Create, but it's not worth it.
+
+ NewHolder<DomainAssembly> pDomainAssembly;
+ pDomainAssembly = new DomainAssembly(this, pFile, pLoadSecurity, this->GetLoaderAllocator());
+
+ LoadLockHolder lock(this);
+
+ // Find the list lock entry
+ FileLoadLock * fileLock = (FileLoadLock *)lock->FindFileLock(pFile);
+ if (fileLock == NULL)
+ {
+ // Check again in case we were racing
+ result = FindAssembly(pFile, FindAssemblyOptions_IncludeFailedToLoad);
+ if (result == NULL)
+ {
+ // We are the first one in - create the DomainAssembly
+ fileLock = FileLoadLock::Create(lock, pFile, pDomainAssembly);
+ pDomainAssembly.SuppressRelease();
+ }
+ }
+ else
+ {
+ fileLock->AddRef();
+ }
+
+ lock.Release();
+
+ if (result == NULL)
+ {
+ // We pass our ref on fileLock to LoadDomainFile to release.
+
+ // Note that if we throw here, we will poison fileLock with an error condition,
+ // so it will not be removed until app domain unload. So there is no need
+ // to release our ref count.
+ result = (DomainAssembly *)LoadDomainFile(fileLock, targetLevel);
+ }
+ else
+ {
+ result->EnsureLoadLevel(targetLevel);
+ }
+ }
+ else
+ result->EnsureLoadLevel(targetLevel);
+
+ // Malformed metadata may contain a Module reference to what is actually
+ // an Assembly. In this case we need to throw an exception, since returning
+ // a DomainModule as a DomainAssembly is a type safety violation.
+ if (!result->IsAssembly())
+ {
+ ThrowHR(COR_E_ASSEMBLYEXPECTED);
+ }
+
+ // Cache result in all cases, since found pFile could be from a different AssemblyRef than pIdentity
+ // Do not cache WindowsRuntime assemblies, they are cached in code:CLRPrivTypeCacheWinRT
+ if ((pIdentity != NULL) && (pIdentity->CanUseWithBindingCache()) && (result->CanUseWithBindingCache()))
+ GetAppDomain()->AddAssemblyToCache(pIdentity, result);
+
+ RETURN result;
+} // AppDomain::LoadDomainAssembly
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+
+#ifndef CROSSGEN_COMPILE
+// Thread stress
+class LoadDomainModuleStress : APIThreadStress
+{
+public:
+ AppDomain *pThis;
+ DomainAssembly *pAssembly;
+ PEModule *pFile;
+ FileLoadLevel targetLevel;
+
+ LoadDomainModuleStress(AppDomain *pThis, DomainAssembly *pAssembly, PEModule *pFile, FileLoadLevel targetLevel)
+ : pThis(pThis), pAssembly(pAssembly), pFile(pFile), targetLevel(targetLevel) {LIMITED_METHOD_CONTRACT;}
+
+ void Invoke()
+ {
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_INTOLERANT;
+ SetupThread();
+ pThis->LoadDomainModule(pAssembly, pFile, targetLevel);
+ }
+};
+#endif // CROSSGEN_COMPILE
+
+DomainModule *AppDomain::LoadDomainModule(DomainAssembly *pAssembly, PEModule *pFile,
+ FileLoadLevel targetLevel)
+{
+ CONTRACT(DomainModule *)
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pAssembly));
+ PRECONDITION(CheckPointer(pFile));
+ POSTCONDITION(CheckPointer(RETVAL));
+ POSTCONDITION(RETVAL->GetLoadLevel() >= GetThreadFileLoadLevel()
+ || RETVAL->GetLoadLevel() >= targetLevel);
+ POSTCONDITION(RETVAL->CheckNoError(targetLevel));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ GCX_PREEMP();
+
+#ifndef CROSSGEN_COMPILE
+ // Thread stress
+ LoadDomainModuleStress ts (this, pAssembly, pFile, targetLevel);
+#endif
+
+ // Check for existing fully loaded assembly
+ DomainModule *result = pAssembly->FindModule(pFile);
+ if (result == NULL)
+ {
+ LoadLockHolder lock(this);
+
+ // Check again in case we were racing
+ result = pAssembly->FindModule(pFile);
+ if (result == NULL)
+ {
+ // Find the list lock entry
+ FileLoadLock *fileLock = (FileLoadLock *) lock->FindFileLock(pFile);
+ if (fileLock == NULL)
+ {
+ // We are the first one in - create the DomainModule
+ NewHolder<DomainModule> pDomainModule(new DomainModule(this, pAssembly, pFile));
+ fileLock = FileLoadLock::Create(lock, pFile, pDomainModule);
+ pDomainModule.SuppressRelease();
+ }
+ else
+ fileLock->AddRef();
+
+ lock.Release();
+
+ // We pass our ref on fileLock to LoadDomainFile to release.
+
+ // Note that if we throw here, we will poison fileLock with an error condition,
+ // so it will not be removed until app domain unload. So there is no need
+ // to release our ref count.
+
+ result = (DomainModule *) LoadDomainFile(fileLock, targetLevel);
+ }
+ else
+ {
+ lock.Release();
+ result->EnsureLoadLevel(targetLevel);
+ }
+
+ }
+ else
+ result->EnsureLoadLevel(targetLevel);
+
+ // Malformed metadata may contain an Assembly reference to what is actually
+ // a Module. In this case we need to throw an exception, since returning a
+ // DomainAssembly as a DomainModule is a type safety violation.
+ if (result->IsAssembly())
+ {
+ ThrowHR(COR_E_ASSEMBLY_NOT_EXPECTED);
+ }
+
+ RETURN result;
+}
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+
+struct LoadFileArgs
+{
+ FileLoadLock *pLock;
+ FileLoadLevel targetLevel;
+ DomainFile *result;
+};
+
+static void LoadDomainFile_Wrapper(void *ptr)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_INTOLERANT;
+ GCX_PREEMP();
+ LoadFileArgs *args = (LoadFileArgs *) ptr;
+ args->result = GetAppDomain()->LoadDomainFile(args->pLock, args->targetLevel);
+}
+
+DomainFile *AppDomain::LoadDomainFile(FileLoadLock *pLock, FileLoadLevel targetLevel)
+{
+ CONTRACT(DomainFile *)
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pLock));
+ PRECONDITION(pLock->GetDomainFile()->GetAppDomain() == this);
+ POSTCONDITION(RETVAL->GetLoadLevel() >= GetThreadFileLoadLevel()
+ || RETVAL->GetLoadLevel() >= targetLevel);
+ POSTCONDITION(RETVAL->CheckNoError(targetLevel));
+ }
+ CONTRACT_END;
+
+
+ if(!CanLoadCode())
+ COMPlusThrow(kAppDomainUnloadedException);
+
+ // Thread stress
+ APIThreadStress::SyncThreadStress();
+
+ DomainFile *pFile = pLock->GetDomainFile();
+
+ // Make sure we release the lock on exit
+ FileLoadLockRefHolder lockRef(pLock);
+
+ // We need to perform the early steps of loading mscorlib without a domain transition. This is
+ // important for bootstrapping purposes - we need to get mscorlib at least partially loaded
+ // into a domain before we can run serialization code to do the transition.
+ //
+ // Note that we cannot do this in general for all assemblies, because some of the security computations
+ // require the managed exposed object, which must be created in the correct app domain.
+
+ if (this != GetAppDomain()
+ && pFile->GetFile()->IsSystem()
+ && targetLevel > FILE_LOAD_ALLOCATE)
+ {
+ // Re-call the routine with a limited load level. This will cause the first part of the load to
+ // get performed in the current app domain.
+
+ pLock->AddRef();
+ LoadDomainFile(pLock, targetLevel > FILE_LOAD_ALLOCATE ? FILE_LOAD_ALLOCATE : targetLevel);
+
+ // Now continue on to complete the rest of the load, if any.
+ }
+
+ // Do a quick out check for the already loaded case.
+ if (pLock->GetLoadLevel() >= targetLevel)
+ {
+ pFile->ThrowIfError(targetLevel);
+
+ RETURN pFile;
+ }
+
+#ifndef CROSSGEN_COMPILE
+ // Make sure we are in the right domain. Many of the load operations require the target domain
+ // to be the current app domain, most notably anything involving managed code or managed object
+ // creation.
+ if (this != GetAppDomain()
+ && (!pFile->GetFile()->IsSystem() || targetLevel > FILE_LOAD_ALLOCATE))
+ {
+ // Transition to the correct app domain and perform the load there.
+ GCX_COOP();
+
+ // we will release the lock in the other app domain
+ lockRef.SuppressRelease();
+
+ if(!CanLoadCode() || GetDefaultContext() ==NULL)
+ COMPlusThrow(kAppDomainUnloadedException);
+ LoadFileArgs args = {pLock, targetLevel, NULL};
+ GetThread()->DoADCallBack(this, LoadDomainFile_Wrapper, (void *) &args, ADV_CREATING);
+
+ RETURN args.result;
+ }
+#endif // CROSSGEN_COMPILE
+
+ // Initialize a loading queue. This will hold any loads which are triggered recursively but
+ // which cannot be immediately satisfied due to anti-deadlock constraints.
+
+ // PendingLoadQueues are allocated on the stack during a load, and
+ // shared with all nested loads on the same thread. (Note that we won't use
+ // "candidate" if we are in a recursive load; that's OK since they are cheap to
+ // construct.)
+ FileLoadLevel immediateTargetLevel = targetLevel;
+ {
+ LoadLevelLimiter limit;
+ limit.Activate();
+
+ // We cannot set a target level higher than that allowed by the limiter currently.
+ // This is because of anti-deadlock constraints.
+ if (immediateTargetLevel > limit.GetLoadLevel())
+ immediateTargetLevel = limit.GetLoadLevel();
+
+ LOG((LF_LOADER, LL_INFO100, "LOADER: %x:***%s*\t>>>Load initiated, %s/%s\n",
+ pFile->GetAppDomain(), pFile->GetSimpleName(),
+ fileLoadLevelName[immediateTargetLevel], fileLoadLevelName[targetLevel]));
+
+ // Now loop and do the load incrementally to the target level.
+ if (pLock->GetLoadLevel() < immediateTargetLevel)
+ {
+ // Thread stress
+ APIThreadStress::SyncThreadStress();
+
+ while (pLock->Acquire(immediateTargetLevel))
+ {
+ FileLoadLevel workLevel;
+ {
+ FileLoadLockHolder fileLock(pLock);
+
+ // Work level is next step to do
+ workLevel = (FileLoadLevel)(fileLock->GetLoadLevel()+1);
+
+ // Set up the anti-deadlock constraint: we cannot safely recursively load any assemblies
+ // on this thread to a higher level than this assembly is being loaded now.
+ // Note that we do allow work at a parallel level; any deadlocks caused here will
+ // be resolved by the deadlock detection in the FileLoadLocks.
+ limit.SetLoadLevel(workLevel);
+
+ LOG((LF_LOADER,
+ (workLevel == FILE_LOAD_BEGIN
+ || workLevel == FILE_LOADED
+ || workLevel == FILE_ACTIVE)
+ ? LL_INFO10 : LL_INFO1000,
+ "LOADER: %p:***%s*\t loading at level %s\n",
+ this, pFile->GetSimpleName(), fileLoadLevelName[workLevel]));
+
+ TryIncrementalLoad(pFile, workLevel, fileLock);
+ }
+ TESTHOOKCALL(CompletedFileLoadLevel(GetId().m_dwId,pFile,workLevel));
+ }
+
+ if (pLock->GetLoadLevel() == immediateTargetLevel-1)
+ {
+ LOG((LF_LOADER, LL_INFO100, "LOADER: %x:***%s*\t<<<Load limited due to detected deadlock, %s\n",
+ pFile->GetAppDomain(), pFile->GetSimpleName(),
+ fileLoadLevelName[immediateTargetLevel-1]));
+ }
+ }
+
+ LOG((LF_LOADER, LL_INFO100, "LOADER: %x:***%s*\t<<<Load completed, %s\n",
+ pFile->GetAppDomain(), pFile->GetSimpleName(),
+ fileLoadLevelName[pLock->GetLoadLevel()]));
+
+ }
+
+ // There may have been an error stored on the domain file by another thread, or from a previous load
+ pFile->ThrowIfError(targetLevel);
+
+ // There are two normal results from the above loop.
+ //
+ // 1. We succeeded in loading the file to the current thread's load level.
+ // 2. We succeeded in loading the file to the current thread's load level - 1, due
+ // to deadlock condition with another thread loading the same assembly.
+ //
+ // Either of these are considered satisfactory results, as code inside a load must expect
+ // a parial load result.
+ //
+ // However, if load level elevation has occurred, then it is possible for a deadlock to
+ // prevent us from loading an assembly which was loading before the elevation at a radically
+ // lower level. In such a case, we throw an exception which transiently fails the current
+ // load, since it is likely we have not satisfied the caller.
+ // (An alternate, and possibly preferable, strategy here would be for all callers to explicitly
+ // identify the minimum load level acceptable via CheckLoadDomainFile and throw from there.)
+
+ pFile->RequireLoadLevel((FileLoadLevel)(immediateTargetLevel-1));
+
+
+ RETURN pFile;
+}
+
+void AppDomain::TryIncrementalLoad(DomainFile *pFile, FileLoadLevel workLevel, FileLoadLockHolder &lockHolder)
+{
+ STANDARD_VM_CONTRACT;
+
+ // This is factored out so we don't call EX_TRY in a loop (EX_TRY can _alloca)
+
+ BOOL released = FALSE;
+ FileLoadLock* pLoadLock = lockHolder.GetValue();
+
+ EX_TRY
+ {
+#ifndef FEATURE_CORECLR
+ // Event Tracing for Windows is used to log data for performance and functional testing purposes.
+ // The events below are used to measure the performance of two steps in the assembly loader, namely assembly initialization and delivering events.
+ StackSString ETWAssemblySimpleName;
+ if (ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, TRACE_LEVEL_INFORMATION, CLR_PRIVATEBINDING_KEYWORD))
+ {
+ LPCUTF8 simpleName = pFile->GetSimpleName();
+ ETWAssemblySimpleName.AppendUTF8(simpleName ? simpleName : "NULL"); // Gather data used by ETW events later in this function.
+ }
+#endif // FEATURE_CORECLR
+
+ // Special case: for LoadLibrary, we cannot hold the lock during the
+ // actual LoadLibrary call, because we might get a callback from _CorDllMain on any
+ // other thread. (Note that this requires DomainFile's LoadLibrary to be independently threadsafe.)
+
+ if (workLevel == FILE_LOAD_LOADLIBRARY)
+ {
+ lockHolder.Release();
+ released = TRUE;
+ }
+#ifndef FEATURE_CORECLR
+ else if (workLevel == FILE_LOAD_DELIVER_EVENTS)
+ {
+ FireEtwLoaderDeliverEventsPhaseStart(GetId().m_dwId, ETWLoadContextNotAvailable, ETWFieldUnused, ETWLoaderLoadTypeNotAvailable, NULL, ETWAssemblySimpleName, GetClrInstanceId());
+ }
+#endif // FEATURE_CORECLR
+
+ // Do the work
+ TESTHOOKCALL(NextFileLoadLevel(GetId().m_dwId,pFile,workLevel));
+#ifndef FEATURE_CORECLR
+ if (workLevel == FILE_LOAD_ALLOCATE)
+ {
+ FireEtwLoaderAssemblyInitPhaseStart(GetId().m_dwId, ETWLoadContextNotAvailable, ETWFieldUnused, ETWLoaderLoadTypeNotAvailable, NULL, ETWAssemblySimpleName, GetClrInstanceId());
+ }
+#endif // FEATURE_CORECLR
+ BOOL success = pFile->DoIncrementalLoad(workLevel);
+#ifndef FEATURE_CORECLR
+ if (workLevel == FILE_LOAD_ALLOCATE)
+ {
+ FireEtwLoaderAssemblyInitPhaseEnd(GetId().m_dwId, ETWLoadContextNotAvailable, ETWFieldUnused, ETWLoaderLoadTypeNotAvailable, NULL, ETWAssemblySimpleName, GetClrInstanceId());
+ }
+#endif // FEATURE_CORECLR
+ TESTHOOKCALL(CompletingFileLoadLevel(GetId().m_dwId,pFile,workLevel));
+ if (released)
+ {
+ // Reobtain lock to increment level. (Note that another thread may
+ // have already done it which is OK.
+ if (pLoadLock->Acquire(workLevel))
+ {
+ // note lockHolder.Acquire isn't wired up to actually take the lock
+ lockHolder = pLoadLock;
+ released = FALSE;
+ }
+ }
+
+ if (!released)
+ {
+ // Complete the level.
+ if (pLoadLock->CompleteLoadLevel(workLevel, success) &&
+ pLoadLock->GetLoadLevel()==FILE_LOAD_DELIVER_EVENTS)
+ {
+ lockHolder.Release();
+ released = TRUE;
+ pFile->DeliverAsyncEvents();
+#ifndef FEATURE_CORECLR
+ FireEtwLoaderDeliverEventsPhaseEnd(GetId().m_dwId, ETWLoadContextNotAvailable, ETWFieldUnused, ETWLoaderLoadTypeNotAvailable, NULL, ETWAssemblySimpleName, GetClrInstanceId());
+#endif // FEATURE_CORECLR
+ };
+ }
+ }
+ EX_HOOK
+ {
+ Exception *pEx = GET_EXCEPTION();
+
+
+ //We will cache this error and wire this load to forever fail,
+ // unless the exception is transient or the file is loaded OK but just cannot execute
+ if (!pEx->IsTransient() && !pFile->IsLoaded())
+ {
+
+ if (released)
+ {
+ // Reobtain lock to increment level. (Note that another thread may
+ // have already done it which is OK.
+ if (pLoadLock->Acquire(workLevel)) // note pLockHolder->Acquire isn't wired up to actually take the lock
+ {
+ // note lockHolder.Acquire isn't wired up to actually take the lock
+ lockHolder = pLoadLock;
+ released = FALSE;
+ }
+ }
+
+ if (!released)
+ {
+ // Report the error in the lock
+ pLoadLock->SetError(pEx);
+ }
+
+ if (!EEFileLoadException::CheckType(pEx))
+ EEFileLoadException::Throw(pFile->GetFile(), pEx->GetHR(), pEx);
+ }
+
+ // Otherwise, we simply abort this load, and can retry later on.
+ // @todo cleanup: make sure that each level is restartable after an exception, and
+ // leaves no bad side effects
+ }
+ EX_END_HOOK;
+}
+
+// Checks whether the module is valid to be in the given app domain (need not be yet loaded)
+CHECK AppDomain::CheckValidModule(Module * pModule)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (pModule->FindDomainFile(this) != NULL)
+ CHECK_OK;
+
+ CCHECK_START
+ {
+ Assembly * pAssembly = pModule->GetAssembly();
+
+ CCHECK(pAssembly->IsDomainNeutral());
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ Assembly * pSharedAssembly = NULL;
+ _ASSERTE(this == ::GetAppDomain());
+ {
+ SharedAssemblyLocator locator(pAssembly->GetManifestFile());
+ pSharedAssembly = SharedDomain::GetDomain()->FindShareableAssembly(&locator);
+ }
+
+ CCHECK(pAssembly == pSharedAssembly);
+#endif
+ }
+ CCHECK_END;
+
+ CHECK_OK;
+}
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+// Loads an existing Module into an AppDomain
+// WARNING: this can only be done in a very limited scenario - the Module must be an unloaded domain neutral
+// dependency in the app domain in question. Normal code should not call this!
+DomainFile *AppDomain::LoadDomainNeutralModuleDependency(Module *pModule, FileLoadLevel targetLevel)
+{
+ CONTRACT(DomainFile *)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(::GetAppDomain()==this);
+ PRECONDITION(CheckPointer(pModule));
+ POSTCONDITION(CheckValidModule(pModule));
+ POSTCONDITION(CheckPointer(RETVAL));
+ POSTCONDITION(RETVAL->GetModule() == pModule);
+ }
+ CONTRACT_END;
+
+ DomainFile *pDomainFile = pModule->FindDomainFile(this);
+
+ STRESS_LOG3(LF_CLASSLOADER, LL_INFO100,"LDNMD: DomainFile %p for module %p in AppDomain %i\n",pDomainFile,pModule,GetId().m_dwId);
+
+ if (pDomainFile == NULL)
+ {
+ GCX_PREEMP();
+
+ Assembly *pAssembly = pModule->GetAssembly();
+
+ DomainAssembly *pDomainAssembly = pAssembly->FindDomainAssembly(this);
+ if (pDomainAssembly == NULL)
+ {
+ AssemblySpec spec(this);
+ spec.InitializeSpec(pAssembly->GetManifestFile());
+
+ pDomainAssembly = spec.LoadDomainAssembly(targetLevel);
+ }
+ else
+ {
+ //if the domain assembly already exists, we need to load it to the target level
+ pDomainAssembly->EnsureLoadLevel (targetLevel);
+ }
+
+ if(pAssembly != pDomainAssembly->GetAssembly())
+ {
+ ThrowHR(SECURITY_E_INCOMPATIBLE_SHARE);
+ }
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ if (pModule == pAssembly->GetManifestModule())
+ pDomainFile = pDomainAssembly;
+ else
+ {
+ pDomainFile = LoadDomainModule(pDomainAssembly, (PEModule*) pModule->GetFile(), targetLevel);
+ STRESS_LOG4(LF_CLASSLOADER, LL_INFO100,"LDNMD: DF: for %p[%p/%p] is %p",
+ pModule,pDomainAssembly,pModule->GetFile(),pDomainFile);
+ }
+#else
+ _ASSERTE (pModule == pAssembly->GetManifestModule());
+ pDomainFile = pDomainAssembly;
+#endif
+ }
+ else
+ {
+ // If the DomainFile already exists, we need to load it to the target level.
+ pDomainFile->EnsureLoadLevel (targetLevel);
+ }
+
+ RETURN pDomainFile;
+}
+
+void AppDomain::SetSharePolicy(SharePolicy policy)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if ((int)policy > SHARE_POLICY_COUNT)
+ COMPlusThrow(kArgumentException,W("Argument_InvalidValue"));
+
+ // We cannot make all code domain neutral and still provide complete compatibility with regard
+ // to using custom security policy and assembly evidence.
+ //
+ // In particular, if you try to do either of the above AFTER loading a domain neutral assembly
+ // out of the GAC, we will now throw an exception. The remedy would be to either not use SHARE_POLICY_ALWAYS
+ // (change LoaderOptimizationMultiDomain to LoaderOptimizationMultiDomainHost), or change the loading order
+ // in the app domain to do the policy set or evidence load earlier (which BTW will have the effect of
+ // automatically using MDH rather than MD, for the same result.)
+ //
+ // We include a compatibility flag here to preserve old functionality if necessary - this has the effect
+ // of never using SHARE_POLICY_ALWAYS.
+ if (policy == SHARE_POLICY_ALWAYS &&
+ (HasSetSecurityPolicy()
+ || GetCompatibilityFlag(compatOnlyGACDomainNeutral)))
+ {
+ // Never share assemblies not in the GAC
+ policy = SHARE_POLICY_GAC;
+ }
+
+ if (policy != m_SharePolicy)
+ {
+
+#ifdef FEATURE_PREJIT
+
+#ifdef FEATURE_FUSION
+ GCX_PREEMP();
+
+ // Update the native image config flags
+ FusionBind::SetApplicationContextDWORDProperty(m_pFusionContext, ACTAG_ZAP_CONFIG_FLAGS,
+ PEFile::GetNativeImageConfigFlags());
+#endif //FEATURE_FUSION
+
+#endif // FEATURE_PREJIT
+
+ m_SharePolicy = policy;
+ }
+
+ return;
+}
+
+#ifdef FEATURE_FUSION
+BOOL AppDomain::ReduceSharePolicyFromAlways()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // We may have already committed to always sharing - this is the case if
+ // we have already loaded non-GAC-bound assemblies as domain neutral.
+
+ if (GetSharePolicy() == SHARE_POLICY_ALWAYS)
+ {
+ AppDomain::AssemblyIterator i = IterateAssembliesEx((AssemblyIterationFlags)(kIncludeLoaded | kIncludeLoading | kIncludeExecution));
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+
+ // If we have loaded any non-GAC assemblies, we cannot set app domain policy as we have
+ // already committed to the process-wide policy.
+
+ while (i.Next(pDomainAssembly.This()))
+ {
+ if (pDomainAssembly->GetAssembly() &&
+ pDomainAssembly->GetAssembly()->IsDomainNeutral() &&
+ !pDomainAssembly->IsClosedInGAC())
+ {
+ // This assembly has been loaded domain neutral because of SHARE_POLICY_ALWAYS. We
+ // can't reverse that decision now, so we have to fail the sharing policy change.
+ return FALSE;
+ }
+ }
+
+ // We haven't loaded any non-GAC assemblies yet - scale back to SHARE_POLICY_GAC so
+ // future non-GAC assemblies won't be loaded as domain neutral.
+ SetSharePolicy(SHARE_POLICY_GAC);
+ }
+
+ return TRUE;
+}
+#endif // FEATURE_FUSION
+
+AppDomain::SharePolicy AppDomain::GetSharePolicy()
+{
+ LIMITED_METHOD_CONTRACT;
+ // If the policy has been explicitly set for
+ // the domain, use that.
+ SharePolicy policy = m_SharePolicy;
+
+ // Pick up the a specified config policy
+ if (policy == SHARE_POLICY_UNSPECIFIED)
+ policy = (SharePolicy) g_pConfig->DefaultSharePolicy();
+
+ // Next, honor a host's request for global policy.
+ if (policy == SHARE_POLICY_UNSPECIFIED)
+ policy = (SharePolicy) g_dwGlobalSharePolicy;
+
+ // If all else fails, use the hardwired default policy.
+ if (policy == SHARE_POLICY_UNSPECIFIED)
+ policy = SHARE_POLICY_DEFAULT;
+
+ return policy;
+}
+#endif // FEATURE_LOADER_OPTIMIZATION
+
+
+#ifdef FEATURE_CORECLR
+void AppDomain::CheckForMismatchedNativeImages(AssemblySpec * pSpec, const GUID * pGuid)
+{
+ STANDARD_VM_CONTRACT;
+
+ //
+ // The native images are ever used only for trusted images in CoreCLR.
+ // We don't wish to open the IL file at runtime so we just forgo any
+ // eager consistency checking. But we still want to prevent mistmatched
+ // NGen images from being used. We record all mappings between assembly
+ // names and MVID, and fail once we detect mismatch.
+ //
+
+ if (pSpec->IsStrongNamed() && pSpec->HasPublicKey())
+ {
+ pSpec->ConvertPublicKeyToToken();
+ }
+
+ //
+ // CoreCLR binder unifies assembly versions. Ignore assembly version here to
+ // detect more types of potential mismatches.
+ //
+ AssemblyMetaDataInternal * pContext = pSpec->GetContext();
+ pContext->usMajorVersion = (USHORT)-1;
+ pContext->usMinorVersion = (USHORT)-1;
+ pContext->usBuildNumber = (USHORT)-1;
+ pContext->usRevisionNumber = (USHORT)-1;
+
+ // Ignore the WinRT type while considering if two assemblies have the same identity.
+ pSpec->SetWindowsRuntimeType(NULL, NULL);
+
+ CrstHolder ch(&m_DomainCrst);
+
+ const NativeImageDependenciesEntry * pEntry = m_NativeImageDependencies.Lookup(pSpec);
+
+ if (pEntry != NULL)
+ {
+ if (*pGuid != pEntry->m_guidMVID)
+ {
+ SString msg;
+ msg.Printf("ERROR: Native images generated against multiple versions of assembly %s. ", pSpec->GetName());
+ WszOutputDebugString(msg.GetUnicode());
+ COMPlusThrowNonLocalized(kFileLoadException, msg.GetUnicode());
+ }
+ }
+ else
+ {
+ //
+ // No entry yet - create one
+ //
+ AllocMemTracker amTracker;
+ AllocMemTracker *pamTracker = &amTracker;
+
+ NativeImageDependenciesEntry * pNewEntry =
+ new (pamTracker->Track(GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(NativeImageDependenciesEntry)))))
+ NativeImageDependenciesEntry();
+
+ pNewEntry->m_AssemblySpec.CopyFrom(pSpec);
+ pNewEntry->m_AssemblySpec.CloneFieldsToLoaderHeap(AssemblySpec::ALL_OWNED, GetLowFrequencyHeap(), pamTracker);
+
+ pNewEntry->m_guidMVID = *pGuid;
+
+ m_NativeImageDependencies.Add(pNewEntry);
+ amTracker.SuppressRelease();
+ }
+}
+#endif // FEATURE_CORECLR
+
+
+void AppDomain::SetupSharedStatics()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+#ifndef CROSSGEN_COMPILE
+ if (NingenEnabled())
+ return;
+
+ LOG((LF_CLASSLOADER, LL_INFO10000, "STATICS: SetupSharedStatics()"));
+
+ // don't do any work in init stage. If not init only do work in non-shared case if are default domain
+ _ASSERTE(!g_fEEInit);
+
+ // Because we are allocating/referencing objects, need to be in cooperative mode
+ GCX_COOP();
+
+ static OBJECTHANDLE hSharedStaticsHandle = NULL;
+
+ if (hSharedStaticsHandle == NULL) {
+ // Note that there is no race here since the default domain is always set up first
+ _ASSERTE(IsDefaultDomain());
+
+ MethodTable *pMT = MscorlibBinder::GetClass(CLASS__SHARED_STATICS);
+ _ASSERTE(pMT->IsClassPreInited());
+
+ hSharedStaticsHandle = CreateGlobalHandle(AllocateObject(pMT));
+ }
+
+ DomainLocalModule *pLocalModule;
+
+ if (IsSingleAppDomain())
+ {
+ pLocalModule = MscorlibBinder::GetModule()->GetDomainLocalModule();
+ }
+ else
+ {
+ pLocalModule = GetDomainLocalBlock()->GetModuleSlot(
+ MscorlibBinder::GetModule()->GetModuleIndex());
+ }
+
+ FieldDesc *pFD = MscorlibBinder::GetField(FIELD__SHARED_STATICS__SHARED_STATICS);
+
+ OBJECTREF* pHandle = (OBJECTREF*)
+ ((TADDR)pLocalModule->GetPrecomputedGCStaticsBasePointer()+pFD->GetOffset());
+ SetObjectReference( pHandle, ObjectFromHandle(hSharedStaticsHandle), this );
+
+ // This is a convenient place to initialize String.Empty.
+ // It is treated as intrinsic by the JIT as so the static constructor would never run.
+ // Leaving it uninitialized would confuse debuggers.
+
+ // String should not have any static constructors.
+ _ASSERTE(g_pStringClass->IsClassPreInited());
+
+ FieldDesc * pEmptyStringFD = MscorlibBinder::GetField(FIELD__STRING__EMPTY);
+ OBJECTREF* pEmptyStringHandle = (OBJECTREF*)
+ ((TADDR)pLocalModule->GetPrecomputedGCStaticsBasePointer()+pEmptyStringFD->GetOffset());
+ SetObjectReference( pEmptyStringHandle, StringObject::GetEmptyString(), this );
+#endif // CROSSGEN_COMPILE
+}
+
+DomainAssembly * AppDomain::FindAssembly(PEAssembly * pFile, FindAssemblyOptions options/* = FindAssemblyOptions_None*/)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ const bool includeFailedToLoad = (options & FindAssemblyOptions_IncludeFailedToLoad) != 0;
+
+#ifdef FEATURE_HOSTED_BINDER
+ if (pFile->HasHostAssembly())
+ {
+ DomainAssembly * pDA = FindAssembly(pFile->GetHostAssembly());
+ if (pDA != nullptr && (pDA->IsLoaded() || (includeFailedToLoad && pDA->IsError())))
+ {
+ return pDA;
+ }
+ return nullptr;
+ }
+#endif
+
+ AssemblyIterator i = IterateAssembliesEx((AssemblyIterationFlags)(
+ kIncludeLoaded |
+ (includeFailedToLoad ? kIncludeFailedToLoad : 0) |
+ (pFile->IsIntrospectionOnly() ? kIncludeIntrospection : kIncludeExecution)));
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+
+ while (i.Next(pDomainAssembly.This()))
+ {
+ PEFile * pManifestFile = pDomainAssembly->GetFile();
+ if (pManifestFile &&
+ !pManifestFile->IsResource() &&
+ pManifestFile->Equals(pFile))
+ {
+ // Caller already has PEAssembly, so we can give DomainAssembly away freely without AddRef
+ return pDomainAssembly.Extract();
+ }
+ }
+ return NULL;
+}
+
+static const AssemblyIterationFlags STANDARD_IJW_ITERATOR_FLAGS =
+ (AssemblyIterationFlags)(kIncludeLoaded | kIncludeLoading | kIncludeExecution | kExcludeCollectible);
+
+#ifdef FEATURE_MIXEDMODE
+Module * AppDomain::GetIJWModule(HMODULE hMod)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ AssemblyIterator i = IterateAssembliesEx(STANDARD_IJW_ITERATOR_FLAGS);
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+
+ while (i.Next(pDomainAssembly.This()))
+ {
+ _ASSERTE(!pDomainAssembly->IsCollectible());
+ DomainFile * result = pDomainAssembly->FindIJWModule(hMod);
+
+ if (result == NULL)
+ continue;
+ result->EnsureAllocated();
+ return result->GetLoadedModule();
+ }
+
+ return NULL;
+}
+
+DomainFile * AppDomain::FindIJWDomainFile(HMODULE hMod, const SString & path)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ AssemblyIterator i = IterateAssembliesEx(STANDARD_IJW_ITERATOR_FLAGS);
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+
+ while (i.Next(pDomainAssembly.This()))
+ {
+ _ASSERTE(!pDomainAssembly->IsCollectible());
+ if (pDomainAssembly->GetCurrentAssembly() == NULL)
+ continue;
+
+ DomainFile * result = pDomainAssembly->GetCurrentAssembly()->FindIJWDomainFile(hMod, path);
+
+ if (result != NULL)
+ return result;
+ }
+
+ return NULL;
+}
+#endif // FEATURE_MIXEDMODE
+
+void AppDomain::SetFriendlyName(LPCWSTR pwzFriendlyName, BOOL fDebuggerCares/*=TRUE*/)
+{
+ CONTRACTL
+ {
+ THROWS;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // Do all computations into a temporary until we're ensured of success
+ SString tmpFriendlyName;
+
+
+ if (pwzFriendlyName)
+ tmpFriendlyName.Set(pwzFriendlyName);
+ else
+ {
+ // If there is an assembly, try to get the name from it.
+ // If no assembly, but if it's the DefaultDomain, then give it a name
+
+ if (m_pRootAssembly)
+ {
+ tmpFriendlyName.SetUTF8(m_pRootAssembly->GetSimpleName());
+
+ SString::Iterator i = tmpFriendlyName.End();
+ if (tmpFriendlyName.FindBack(i, '.'))
+ tmpFriendlyName.Truncate(i);
+ }
+ else
+ {
+ if (IsDefaultDomain())
+ tmpFriendlyName.Set(DEFAULT_DOMAIN_FRIENDLY_NAME);
+
+ // This is for the profiler - if they call GetFriendlyName on an AppdomainCreateStarted
+ // event, then we want to give them a temporary name they can use.
+ else if (GetId().m_dwId != 0)
+ {
+ tmpFriendlyName.Clear();
+ tmpFriendlyName.Printf(W("%s %d"), OTHER_DOMAIN_FRIENDLY_NAME_PREFIX, GetId().m_dwId);
+ }
+ }
+
+ }
+
+ tmpFriendlyName.Normalize();
+
+
+ m_friendlyName = tmpFriendlyName;
+ m_friendlyName.Normalize();
+
+ if(g_pDebugInterface)
+ {
+ // update the name in the IPC publishing block
+ if (SUCCEEDED(g_pDebugInterface->UpdateAppDomainEntryInIPC(this)))
+ {
+ // inform the attached debugger that the name of this appdomain has changed.
+ if (IsDebuggerAttached() && fDebuggerCares)
+ g_pDebugInterface->NameChangeEvent(this, NULL);
+ }
+ }
+}
+
+void AppDomain::ResetFriendlyName(BOOL fDebuggerCares/*=TRUE*/)
+{
+ WRAPPER_NO_CONTRACT;
+ SetFriendlyName(NULL, fDebuggerCares);
+}
+
+LPCWSTR AppDomain::GetFriendlyName(BOOL fDebuggerCares/*=TRUE*/)
+{
+ CONTRACT (LPCWSTR)
+ {
+ THROWS;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+#if _DEBUG
+ // Handle NULL this pointer - this happens sometimes when printing log messages
+ // but in general shouldn't occur in real code
+ if (this == NULL)
+ RETURN NULL;
+#endif // _DEBUG
+
+ if (m_friendlyName.IsEmpty())
+ SetFriendlyName(NULL, fDebuggerCares);
+
+ RETURN m_friendlyName;
+}
+
+LPCWSTR AppDomain::GetFriendlyNameForLogging()
+{
+ CONTRACT(LPWSTR)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL,NULL_OK));
+ }
+ CONTRACT_END;
+#if _DEBUG
+ // Handle NULL this pointer - this happens sometimes when printing log messages
+ // but in general shouldn't occur in real code
+ if (this == NULL)
+ RETURN NULL;
+#endif // _DEBUG
+ RETURN (m_friendlyName.IsEmpty() ?W(""):(LPCWSTR)m_friendlyName);
+}
+
+LPCWSTR AppDomain::GetFriendlyNameForDebugger()
+{
+ CONTRACT (LPCWSTR)
+ {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+
+ if (m_friendlyName.IsEmpty())
+ {
+ BOOL fSuccess = FALSE;
+
+ EX_TRY
+ {
+ SetFriendlyName(NULL);
+
+ fSuccess = TRUE;
+ }
+ EX_CATCH
+ {
+ // Gobble all exceptions.
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (!fSuccess)
+ {
+ RETURN W("");
+ }
+ }
+
+ RETURN m_friendlyName;
+}
+
+
+#endif // !DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+
+PVOID AppDomain::GetFriendlyNameNoSet(bool* isUtf8)
+{
+ SUPPORTS_DAC;
+
+ if (!m_friendlyName.IsEmpty())
+ {
+ *isUtf8 = false;
+ return m_friendlyName.DacGetRawContent();
+ }
+ else if (m_pRootAssembly)
+ {
+ *isUtf8 = true;
+ return (PVOID)m_pRootAssembly->GetSimpleName();
+ }
+ else if (dac_cast<TADDR>(this) ==
+ dac_cast<TADDR>(SystemDomain::System()->DefaultDomain()))
+ {
+ *isUtf8 = false;
+ return (PVOID)DEFAULT_DOMAIN_FRIENDLY_NAME;
+ }
+ else
+ {
+ return NULL;
+ }
+}
+
+#endif // DACCESS_COMPILE
+
+void AppDomain::CacheStringsForDAC()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ //
+ // If the application base, private bin paths, and configuration file are
+ // available, cache them so DAC can read them out of memory
+ //
+#ifdef FEATURE_FUSION
+ if (m_pFusionContext)
+ {
+ CQuickBytes qb;
+ LPWSTR ssz = (LPWSTR) qb.AllocThrows(MAX_URL_LENGTH * sizeof(WCHAR));
+
+ DWORD dwSize;
+
+ // application base
+ ssz[0] = '\0';
+ dwSize = MAX_URL_LENGTH * sizeof(WCHAR);
+ m_pFusionContext->Get(ACTAG_APP_BASE_URL, ssz, &dwSize, 0);
+ m_applicationBase.Set(ssz);
+
+ // private bin paths
+ ssz[0] = '\0';
+ dwSize = MAX_URL_LENGTH * sizeof(WCHAR);
+ m_pFusionContext->Get(ACTAG_APP_PRIVATE_BINPATH, ssz, &dwSize, 0);
+ m_privateBinPaths.Set(ssz);
+
+ // configuration file
+ ssz[0] = '\0';
+ dwSize = MAX_URL_LENGTH * sizeof(WCHAR);
+ m_pFusionContext->Get(ACTAG_APP_CONFIG_FILE, ssz, &dwSize, 0);
+ m_configFile.Set(ssz);
+ }
+#endif // FEATURE_FUSION
+}
+
+#ifndef DACCESS_COMPILE
+
+BOOL AppDomain::AddFileToCache(AssemblySpec* pSpec, PEAssembly *pFile, BOOL fAllowFailure)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pSpec));
+ // Hosted fusion binder makes an exception here, so we cannot assert.
+ //PRECONDITION(pSpec->CanUseWithBindingCache());
+ //PRECONDITION(pFile->CanUseWithBindingCache());
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ CrstHolder holder(&m_DomainCacheCrst);
+ // !!! suppress exceptions
+ if(!m_AssemblyCache.StoreFile(pSpec, pFile) && !fAllowFailure)
+ {
+ // TODO: Disabling the below assertion as currently we experience
+ // inconsistency on resolving the Microsoft.Office.Interop.MSProject.dll
+ // This causes below assertion to fire and crashes the VS. This issue
+ // is being tracked with Dev10 Bug 658555. Brought back it when this bug
+ // is fixed.
+ // _ASSERTE(FALSE);
+
+ EEFileLoadException::Throw(pSpec, FUSION_E_CACHEFILE_FAILED, NULL);
+ }
+
+ return TRUE;
+}
+
+BOOL AppDomain::AddAssemblyToCache(AssemblySpec* pSpec, DomainAssembly *pAssembly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pSpec));
+ PRECONDITION(CheckPointer(pAssembly));
+ PRECONDITION(pSpec->CanUseWithBindingCache());
+ PRECONDITION(pAssembly->CanUseWithBindingCache());
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ CrstHolder holder(&m_DomainCacheCrst);
+ // !!! suppress exceptions
+ BOOL bRetVal = m_AssemblyCache.StoreAssembly(pSpec, pAssembly);
+#ifdef FEATURE_FUSION
+ // check for context propagation
+ if (bRetVal && pSpec->GetParentLoadContext() == LOADCTX_TYPE_LOADFROM && pAssembly->GetFile()->GetLoadContext() == LOADCTX_TYPE_DEFAULT)
+ {
+ // LoadFrom propagation occured, store it in a way reachable by Load() (the "post-policy" one)
+ AssemblySpec loadSpec;
+ loadSpec.CopyFrom(pSpec);
+ loadSpec.SetParentAssembly(NULL);
+ bRetVal = m_AssemblyCache.StoreAssembly(&loadSpec, pAssembly);
+ }
+#endif
+ return bRetVal;
+}
+
+BOOL AppDomain::AddExceptionToCache(AssemblySpec* pSpec, Exception *ex)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pSpec));
+ PRECONDITION(pSpec->CanUseWithBindingCache());
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (ex->IsTransient())
+ return TRUE;
+
+ CrstHolder holder(&m_DomainCacheCrst);
+ // !!! suppress exceptions
+ return m_AssemblyCache.StoreException(pSpec, ex);
+}
+
+void AppDomain::AddUnmanagedImageToCache(LPCWSTR libraryName, HMODULE hMod)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(libraryName));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+ if (libraryName)
+ {
+ AssemblySpec spec;
+ spec.SetCodeBase(libraryName);
+ m_UnmanagedCache.InsertEntry(&spec, hMod);
+ }
+ return ;
+}
+
+
+HMODULE AppDomain::FindUnmanagedImageInCache(LPCWSTR libraryName)
+{
+ CONTRACT(HMODULE)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(libraryName,NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL,NULL_OK));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+ if(libraryName == NULL) RETURN NULL;
+
+ AssemblySpec spec;
+ spec.SetCodeBase(libraryName);
+ RETURN (HMODULE) m_UnmanagedCache.LookupEntry(&spec, 0);
+}
+
+
+BOOL AppDomain::IsCached(AssemblySpec *pSpec)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Check to see if this fits our rather loose idea of a reference to mscorlib.
+ // If so, don't use fusion to bind it - do it ourselves.
+ if (pSpec->IsMscorlib())
+ return TRUE;
+
+ return m_AssemblyCache.Contains(pSpec);
+}
+
+
+PEAssembly* AppDomain::FindCachedFile(AssemblySpec* pSpec, BOOL fThrow /*=TRUE*/)
+{
+ CONTRACTL
+ {
+ if (fThrow) {
+ GC_TRIGGERS;
+ THROWS;
+ }
+ else {
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Check to see if this fits our rather loose idea of a reference to mscorlib.
+ // If so, don't use fusion to bind it - do it ourselves.
+ if (fThrow && pSpec->IsMscorlib())
+ {
+ CONSISTENCY_CHECK(SystemDomain::System()->SystemAssembly() != NULL);
+ PEAssembly *pFile = SystemDomain::System()->SystemFile();
+ pFile->AddRef();
+ return pFile;
+ }
+
+ return m_AssemblyCache.LookupFile(pSpec, fThrow);
+}
+
+
+BOOL AppDomain::PostBindResolveAssembly(AssemblySpec *pPrePolicySpec,
+ AssemblySpec *pPostPolicySpec,
+ HRESULT hrBindResult,
+ AssemblySpec **ppFailedSpec)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pPrePolicySpec));
+ PRECONDITION(CheckPointer(pPostPolicySpec));
+ PRECONDITION(CheckPointer(ppFailedSpec));
+
+ BOOL fFailure = TRUE;
+ *ppFailedSpec = pPrePolicySpec;
+
+#ifdef FEATURE_FUSION
+ // Fusion policy could have been applied,
+ // so failed assembly could be not exactly what we ordered
+
+ IAssemblyName *pIPostPolicyName = pPrePolicySpec->GetNameAfterPolicy();
+
+ // Get post-policy assembly name
+ if (pIPostPolicyName != NULL)
+ {
+ pPostPolicySpec->InitializeSpec(pIPostPolicyName,
+ NULL,
+ pPrePolicySpec->IsIntrospectionOnly());
+ pPrePolicySpec->ReleaseNameAfterPolicy();
+
+ if (!pPostPolicySpec->CompareEx(pPrePolicySpec))
+ {
+ *ppFailedSpec = pPostPolicySpec;
+ }
+ }
+#endif //FEATURE_FUSION
+
+ PEAssemblyHolder result;
+
+ if ((EEFileLoadException::GetFileLoadKind(hrBindResult) == kFileNotFoundException) ||
+ (hrBindResult == FUSION_E_REF_DEF_MISMATCH) ||
+ (hrBindResult == FUSION_E_INVALID_NAME))
+ {
+ result = TryResolveAssembly(*ppFailedSpec, FALSE /* fPreBind */);
+
+ if (result != NULL && pPrePolicySpec->CanUseWithBindingCache() && result->CanUseWithBindingCache())
+ {
+ fFailure = FALSE;
+
+ // Given the post-policy resolve event construction of the CLR binder,
+ // chained managed resolve events can race with each other, therefore we do allow
+ // the adding of the result to fail. Checking for already chached specs
+ // is not an option as it would introduce another race window.
+ // The binder does a re-fetch of the
+ // orignal binding spec and therefore will not cause inconsistency here.
+ // For the purposes of the resolve event, failure to add to the cache still is a success.
+ AddFileToCache(pPrePolicySpec, result, TRUE /* fAllowFailure */);
+ if (*ppFailedSpec != pPrePolicySpec && pPostPolicySpec->CanUseWithBindingCache())
+ {
+ AddFileToCache(pPostPolicySpec, result, TRUE /* fAllowFailure */ );
+ }
+ }
+ }
+
+ return fFailure;
+}
+
+#ifdef FEATURE_HOSTED_BINDER
+//----------------------------------------------------------------------------------------
+// Helper class for hosted binder
+
+class PEAssemblyAsPrivAssemblyInfo : public IUnknownCommon<ICLRPrivAssemblyInfo>
+{
+public:
+ //------------------------------------------------------------------------------------
+ // Ctor
+
+ PEAssemblyAsPrivAssemblyInfo(PEAssembly *pPEAssembly)
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_THROWS;
+
+ if (pPEAssembly == nullptr)
+ ThrowHR(E_UNEXPECTED);
+
+ pPEAssembly->AddRef();
+ m_pPEAssembly = pPEAssembly;
+ }
+
+ //------------------------------------------------------------------------------------
+ // ICLRPrivAssemblyInfo methods
+
+ //------------------------------------------------------------------------------------
+ STDMETHOD(GetAssemblyName)(
+ __in DWORD cchBuffer,
+ __out_opt LPDWORD pcchBuffer,
+ __out_ecount_part_opt(cchBuffer, *pcchBuffer) LPWSTR wzBuffer)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ if ((cchBuffer == 0) != (wzBuffer == nullptr))
+ {
+ return E_INVALIDARG;
+ }
+
+ LPCUTF8 szName = m_pPEAssembly->GetSimpleName();
+
+ bool bIsAscii;
+ DWORD cchName;
+ IfFailRet(FString::Utf8_Unicode_Length(szName, &bIsAscii, &cchName));
+
+ if (cchBuffer < cchName + 1)
+ {
+ if (pcchBuffer != nullptr)
+ {
+ *pcchBuffer = cchName + 1;
+ }
+ return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
+ }
+ else
+ {
+ IfFailRet(FString::Utf8_Unicode(szName, bIsAscii, wzBuffer, *pcchBuffer));
+ *pcchBuffer = cchName;
+ return S_OK;
+ }
+ }
+
+ //------------------------------------------------------------------------------------
+ STDMETHOD(GetAssemblyVersion)(
+ USHORT *pMajor,
+ USHORT *pMinor,
+ USHORT *pBuild,
+ USHORT *pRevision)
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_pPEAssembly->GetVersion(pMajor, pMinor, pBuild, pRevision);
+ }
+
+ //------------------------------------------------------------------------------------
+ STDMETHOD(GetAssemblyPublicKey)(
+ DWORD cbBuffer,
+ LPDWORD pcbBuffer,
+ BYTE *pbBuffer)
+ {
+ STATIC_CONTRACT_LIMITED_METHOD;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+
+ VALIDATE_PTR_RET(pcbBuffer);
+ VALIDATE_CONDITION((pbBuffer == nullptr) == (cbBuffer == 0), return E_INVALIDARG);
+
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ // Note: PEAssembly::GetPublicKey will return bogus data pointer when *pcbBuffer == 0
+ LPCVOID pbKey = m_pPEAssembly->GetPublicKey(pcbBuffer);
+
+ if (*pcbBuffer != 0)
+ {
+ if (pbBuffer != nullptr && cbBuffer >= *pcbBuffer)
+ {
+ memcpy(pbBuffer, pbKey, *pcbBuffer);
+ hr = S_OK;
+ }
+ else
+ {
+ hr = HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
+ }
+ }
+ else
+ {
+ hr = S_FALSE; // ==> No public key
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+ }
+
+private:
+ ReleaseHolder<PEAssembly> m_pPEAssembly;
+};
+
+//-----------------------------------------------------------------------------------------------------------------
+static HRESULT VerifyBindHelper(
+ ICLRPrivAssembly *pPrivAssembly,
+ IAssemblyName *pAssemblyName,
+ PEAssembly *pPEAssembly)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ HRESULT hr = S_OK;
+ // Create an ICLRPrivAssemblyInfo to call to ICLRPrivAssembly::VerifyBind
+ NewHolder<PEAssemblyAsPrivAssemblyInfo> pPrivAssemblyInfoImpl = new PEAssemblyAsPrivAssemblyInfo(pPEAssembly);
+ ReleaseHolder<ICLRPrivAssemblyInfo> pPrivAssemblyInfo;
+ IfFailRet(pPrivAssemblyInfoImpl->QueryInterface(__uuidof(ICLRPrivAssemblyInfo), (LPVOID *)&pPrivAssemblyInfo));
+ pPrivAssemblyInfoImpl.SuppressRelease();
+
+ // Call VerifyBind to give the host a chance to reject the bind based on assembly image contents.
+ IfFailRet(pPrivAssembly->VerifyBind(pAssemblyName, pPrivAssembly, pPrivAssemblyInfo));
+
+ return hr;
+}
+
+//-----------------------------------------------------------------------------------------------------------------
+HRESULT AppDomain::BindAssemblySpecForHostedBinder(
+ AssemblySpec * pSpec,
+ IAssemblyName * pAssemblyName,
+ ICLRPrivBinder * pBinder,
+ PEAssembly ** ppAssembly)
+{
+ STANDARD_VM_CONTRACT;
+
+ PRECONDITION(CheckPointer(pSpec));
+ PRECONDITION(pSpec->GetAppDomain() == this);
+ PRECONDITION(CheckPointer(ppAssembly));
+ PRECONDITION(pSpec->GetCodeBase() == nullptr);
+
+ HRESULT hr = S_OK;
+
+#ifdef FEATURE_FUSION
+ StackSString wszAssemblyName;
+
+ if (fusion::logging::LoggingEnabled())
+ { // Don't perform computation if logging is not enabled.
+ FusionBind::GetAssemblyNameDisplayName(pAssemblyName, wszAssemblyName, ASM_DISPLAYF_FULL);
+ }
+
+ // Fire ETW Start event.
+ FireEtwBindingPhaseStart(
+ GetId().m_dwId, LOADCTX_TYPE_HOSTED, ETWFieldUnused, ETWLoaderLoadTypeNotAvailable,
+ pSpec->m_wszCodeBase, wszAssemblyName.GetUnicode(), GetClrInstanceId());
+#endif
+
+ // The Fusion binder can throw (to preserve compat, since it will actually perform an assembly
+ // load as part of it's bind), so we need to be careful here to catch any FileNotFoundException
+ // objects if fThrowIfNotFound is false.
+ ReleaseHolder<ICLRPrivAssembly> pPrivAssembly;
+
+ // We return HRESULTs here on failure instead of throwing as failures here are not necessarily indicative
+ // of an actual application problem. Returning an error code is substantially faster than throwing, and
+ // should be used when possible.
+ IfFailRet(pBinder->BindAssemblyByName(pAssemblyName, &pPrivAssembly));
+
+ IfFailRet(BindHostedPrivAssembly(nullptr, pPrivAssembly, pAssemblyName, ppAssembly));
+
+#ifdef FEATURE_FUSION
+ // Fire ETW End event.
+ FireEtwBindingPhaseEnd(
+ GetId().m_dwId, LOADCTX_TYPE_HOSTED, ETWFieldUnused, ETWLoaderLoadTypeNotAvailable,
+ pSpec->m_wszCodeBase, wszAssemblyName.GetUnicode(), GetClrInstanceId());
+
+ #endif
+
+ return S_OK;
+}
+
+//-----------------------------------------------------------------------------------------------------------------
+HRESULT
+AppDomain::BindHostedPrivAssembly(
+ PEAssembly * pParentAssembly,
+ ICLRPrivAssembly * pPrivAssembly,
+ IAssemblyName * pAssemblyName,
+ PEAssembly ** ppAssembly,
+ BOOL fIsIntrospectionOnly) // = FALSE
+{
+ STANDARD_VM_CONTRACT;
+
+ PRECONDITION(CheckPointer(pPrivAssembly));
+ PRECONDITION(CheckPointer(ppAssembly));
+
+ HRESULT hr = S_OK;
+
+ *ppAssembly = nullptr;
+
+ // See if result has been previously loaded.
+ {
+ DomainAssembly* pDomainAssembly = FindAssembly(pPrivAssembly);
+ if (pDomainAssembly != nullptr)
+ {
+ *ppAssembly = clr::SafeAddRef(pDomainAssembly->GetFile());
+ }
+ }
+
+ if (*ppAssembly != nullptr)
+ { // Already exists: ask the binder to verify and return the assembly.
+ return VerifyBindHelper(pPrivAssembly, pAssemblyName, *ppAssembly);
+ }
+
+ // Get the IL PEFile.
+ PEImageHolder pPEImageIL;
+ {
+ // Does not already exist, so get the resource for the assembly and load it.
+ DWORD dwImageType;
+ ReleaseHolder<ICLRPrivResource> pIResourceIL;
+
+ IfFailRet(pPrivAssembly->GetImageResource(ASSEMBLY_IMAGE_TYPE_IL, &dwImageType, &pIResourceIL));
+ _ASSERTE(dwImageType == ASSEMBLY_IMAGE_TYPE_IL);
+
+ pPEImageIL = PEImage::OpenImage(pIResourceIL, MDInternalImport_Default);
+ }
+
+ // See if an NI is available.
+ DWORD dwAvailableImages;
+ IfFailRet(pPrivAssembly->GetAvailableImageTypes(&dwAvailableImages));
+ _ASSERTE(dwAvailableImages & ASSEMBLY_IMAGE_TYPE_IL); // Just double checking that IL bit is always set.
+
+ // Get the NI PEFile if available.
+ PEImageHolder pPEImageNI;
+ if (dwAvailableImages & ASSEMBLY_IMAGE_TYPE_NATIVE)
+ {
+ DWORD dwImageType;
+ ReleaseHolder<ICLRPrivResource> pIResourceNI;
+
+ IfFailRet(pPrivAssembly->GetImageResource(ASSEMBLY_IMAGE_TYPE_NATIVE, &dwImageType, &pIResourceNI));
+ _ASSERTE(dwImageType == ASSEMBLY_IMAGE_TYPE_NATIVE || FAILED(hr));
+
+ pPEImageNI = PEImage::OpenImage(pIResourceNI, MDInternalImport_TrustedNativeImage);
+ }
+ _ASSERTE(pPEImageIL != nullptr);
+
+ // Create a PEAssembly using the IL and NI images.
+ PEAssemblyHolder pPEAssembly = PEAssembly::Open(pParentAssembly, pPEImageIL, pPEImageNI, pPrivAssembly, fIsIntrospectionOnly);
+
+#ifdef FEATURE_FUSION
+ // Ensure that the assembly found can be loaded for execution in the process.
+ if (!fIsIntrospectionOnly)
+ IfFailRet(RuntimeIsValidAssemblyOnThisPlatform_CheckProcessorArchitecture(pPEAssembly->GetFusionProcessorArchitecture(), FALSE));
+#endif
+
+ // Ask the binder to verify.
+ IfFailRet(VerifyBindHelper(pPrivAssembly, pAssemblyName, pPEAssembly));
+
+ // The result.
+ *ppAssembly = pPEAssembly.Extract();
+
+ return S_OK;
+} // AppDomain::BindHostedPrivAssembly
+#endif // FEATURE_HOSTED_BINDER
+
+//---------------------------------------------------------------------------------------------------------------------
+PEAssembly * AppDomain::BindAssemblySpec(
+ AssemblySpec * pSpec,
+ BOOL fThrowOnFileNotFound,
+ BOOL fRaisePrebindEvents,
+ StackCrawlMark * pCallerStackMark,
+ AssemblyLoadSecurity * pLoadSecurity,
+ BOOL fUseHostBinderIfAvailable)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pSpec));
+ PRECONDITION(pSpec->GetAppDomain() == this);
+ PRECONDITION(this==::GetAppDomain());
+
+ GCX_PREEMP();
+
+ BOOL fForceReThrow = FALSE;
+
+#if defined(FEATURE_HOSTED_BINDER) && defined(FEATURE_APPX_BINDER)
+ //
+ // If there is a host binder available and this is an unparented bind within the
+ // default load context, then the bind will be delegated to the domain-wide host
+ // binder. If there is a parent assembly, then a bind will occur only if it has
+ // an associated ICLRPrivAssembly to serve as the binder.
+ //
+ // fUseHostBinderIfAvailable can be false if this method is called by
+ // CLRPrivBinderFusion::BindAssemblyByName, which explicitly indicates that it
+ // wants to use the fusion binder.
+ //
+
+ if (AppX::IsAppXProcess() &&
+ fUseHostBinderIfAvailable &&
+ (
+ ( pSpec->HasParentAssembly()
+ ? // Parent assembly is hosted
+ pSpec->GetParentAssembly()->GetFile()->HasHostAssembly()
+ : // Non-parented default context bind
+ ( HasLoadContextHostBinder() &&
+ !pSpec->IsIntrospectionOnly()
+ )
+ ) ||
+ (pSpec->GetHostBinder() != nullptr)
+ )
+ )
+ {
+ HRESULT hr = S_OK;
+
+ if (pSpec->GetCodeBase() != nullptr)
+ { // LoadFrom is not supported in AppX (we should never even get here)
+ IfFailThrow(E_INVALIDARG);
+ }
+
+ // Get the assembly display name.
+ ReleaseHolder<IAssemblyName> pAssemblyName;
+ IfFailThrow(pSpec->CreateFusionName(&pAssemblyName, TRUE, TRUE));
+
+ // Create new binding scope for fusion logging.
+ fusion::logging::BindingScope defaultScope(pAssemblyName, FUSION_BIND_LOG_CATEGORY_DEFAULT);
+
+ PEAssemblyHolder pAssembly;
+ EX_TRY
+ {
+ // If there is a specified binder, then it is used.
+ // Otherwise if there exist a parent assembly, then it provides the binding context
+ // Otherwise the domain's root-level binder is used.
+ ICLRPrivBinder * pBinder = nullptr;
+
+ if (pSpec->GetHostBinder() != nullptr)
+ {
+ pBinder = pSpec->GetHostBinder();
+ }
+ else
+ {
+ PEAssembly * pParentAssembly =
+ (pSpec->GetParentAssembly() == nullptr) ? nullptr : pSpec->GetParentAssembly()->GetFile();
+
+ if ((pParentAssembly != nullptr) && (pParentAssembly->HasHostAssembly()))
+ {
+ BOOL fMustUseOriginalLoadContextBinder = FALSE;
+ if (pSpec->IsContentType_WindowsRuntime())
+ {
+ // Ugly, but we need to handle Framework assemblies that contain WinRT type references,
+ // and the Fusion binder won't resolve these in AppX processes. The shareable flag is currently
+ // a reasonable proxy for these cases. (It also catches first party WinMD files, but depedencies
+ // from those can also be resolved by the original load context binder).
+ // TODO! Update the fusion binder to resolve WinMD references correctly.
+ IfFailThrow(pParentAssembly->GetHostAssembly()->IsShareable(&fMustUseOriginalLoadContextBinder));
+ }
+
+ if (fMustUseOriginalLoadContextBinder)
+ {
+ pBinder = GetLoadContextHostBinder();
+ }
+ else
+ {
+ pBinder = pParentAssembly->GetHostAssembly();
+ }
+ }
+ else
+ {
+ pBinder = GetCurrentLoadContextHostBinder();
+ }
+ }
+ _ASSERTE(pBinder != nullptr);
+
+ hr = BindAssemblySpecForHostedBinder(pSpec, pAssemblyName, pBinder, &pAssembly);
+ if (FAILED(hr))
+ {
+ goto EndTry1;
+ }
+EndTry1:;
+ }
+ // The combination of this conditional catch/ the following if statement which will throw reduces the count of exceptions
+ // thrown in scenarios where the exception does not escape the method. We cannot get rid of the try/catch block, as
+ // there are cases within some of the clrpriv binder's which throw.
+ // Note: In theory, FileNotFound should always come here as HRESULT, never as exception.
+ EX_CATCH_HRESULT_IF(hr,
+ !fThrowOnFileNotFound && Assembly::FileNotFound(hr))
+
+ if (FAILED(hr) && (fThrowOnFileNotFound || !Assembly::FileNotFound(hr)))
+ {
+ if (Assembly::FileNotFound(hr))
+ {
+ _ASSERTE(fThrowOnFileNotFound);
+ // Uses defaultScope
+ EEFileLoadException::Throw(pSpec, fusion::logging::GetCurrentFusionBindLog(), hr);
+ }
+ if ((hr == CLR_E_BIND_UNRECOGNIZED_IDENTITY_FORMAT) && pSpec->IsContentType_WindowsRuntime())
+ { // Error returned e.g. for WinRT type name without namespace
+ if (fThrowOnFileNotFound)
+ { // Throw ArgumentException (with the HRESULT) wrapped by TypeLoadException to give user type name for diagnostics
+ // Note: TypeLoadException is equivalent of FileNotFound in WinRT world
+ EEMessageException ex(hr);
+ EX_THROW_WITH_INNER(EETypeLoadException, (pSpec->GetWinRtTypeNamespace(), pSpec->GetWinRtTypeClassName(), nullptr, nullptr, IDS_EE_WINRT_LOADFAILURE), &ex);
+ }
+ }
+ else
+ {
+ IfFailThrow(hr);
+ }
+ }
+
+ _ASSERTE((pAssembly != nullptr) || (FAILED(hr) && !fThrowOnFileNotFound));
+ return pAssembly.Extract();
+ }
+ else
+#endif //FEATURE_HOSTED_BINDER && FEATURE_APPX_BINDER
+#if defined(FEATURE_HOSTED_BINDER) && defined(FEATURE_COMINTEROP)
+ // Handle WinRT assemblies in the classic/hybrid scenario. If this is an AppX process,
+ // then this case will be handled by the previous block as part of the full set of
+ // available binding hosts.
+#ifndef FEATURE_APPX_BINDER
+ if (pSpec->IsContentType_WindowsRuntime())
+#else
+ if (!AppX::IsAppXProcess() && pSpec->IsContentType_WindowsRuntime())
+#endif
+ {
+ HRESULT hr = S_OK;
+
+ // Get the assembly display name.
+ ReleaseHolder<IAssemblyName> pAssemblyName;
+
+ IfFailThrow(pSpec->CreateFusionName(&pAssemblyName, TRUE, TRUE));
+
+#ifdef FEATURE_FUSION
+ // Create new binding scope for fusion logging.
+ fusion::logging::BindingScope defaultScope(pAssemblyName, FUSION_BIND_LOG_CATEGORY_DEFAULT);
+#endif
+
+ PEAssemblyHolder pAssembly;
+
+ EX_TRY
+ {
+ hr = BindAssemblySpecForHostedBinder(pSpec, pAssemblyName, m_pWinRtBinder, &pAssembly);
+ if (FAILED(hr))
+ goto EndTry2; // Goto end of try block.
+EndTry2:;
+ }
+ // The combination of this conditional catch/ the following if statement which will throw reduces the count of exceptions
+ // thrown in scenarios where the exception does not escape the method. We cannot get rid of the try/catch block, as
+ // there are cases within some of the clrpriv binder's which throw.
+ // Note: In theory, FileNotFound should always come here as HRESULT, never as exception.
+ EX_CATCH_HRESULT_IF(hr,
+ !fThrowOnFileNotFound && Assembly::FileNotFound(hr))
+
+ if (FAILED(hr) && (fThrowOnFileNotFound || !Assembly::FileNotFound(hr)))
+ {
+ if (Assembly::FileNotFound(hr))
+ {
+ _ASSERTE(fThrowOnFileNotFound);
+ // Uses defaultScope
+#ifdef FEATURE_FUSION
+ EEFileLoadException::Throw(pSpec, fusion::logging::GetCurrentFusionBindLog(), hr);
+#else
+ EEFileLoadException::Throw(pSpec, hr);
+#endif // FEATURE_FUSION
+ }
+
+ // WinRT type bind failures
+ _ASSERTE(pSpec->IsContentType_WindowsRuntime());
+ if (hr == HRESULT_FROM_WIN32(APPMODEL_ERROR_NO_PACKAGE)) // Returned by RoResolveNamespace when using 3rd party WinRT types in classic process
+ {
+ if (fThrowOnFileNotFound)
+ { // Throw NotSupportedException (with custom message) wrapped by TypeLoadException to give user type name for diagnostics
+ // Note: TypeLoadException is equivalent of FileNotFound in WinRT world
+ EEMessageException ex(kNotSupportedException, IDS_EE_WINRT_THIRDPARTY_NOTSUPPORTED);
+ EX_THROW_WITH_INNER(EETypeLoadException, (pSpec->GetWinRtTypeNamespace(), pSpec->GetWinRtTypeClassName(), nullptr, nullptr, IDS_EE_WINRT_LOADFAILURE), &ex);
+ }
+ }
+ else if ((hr == CLR_E_BIND_UNRECOGNIZED_IDENTITY_FORMAT) || // Returned e.g. for WinRT type name without namespace
+ (hr == COR_E_PLATFORMNOTSUPPORTED)) // Using WinRT on pre-Win8 OS
+ {
+ if (fThrowOnFileNotFound)
+ { // Throw ArgumentException/PlatformNotSupportedException wrapped by TypeLoadException to give user type name for diagnostics
+ // Note: TypeLoadException is equivalent of FileNotFound in WinRT world
+ EEMessageException ex(hr);
+ EX_THROW_WITH_INNER(EETypeLoadException, (pSpec->GetWinRtTypeNamespace(), pSpec->GetWinRtTypeClassName(), nullptr, nullptr, IDS_EE_WINRT_LOADFAILURE), &ex);
+ }
+ }
+ else
+ {
+ IfFailThrow(hr);
+ }
+ }
+ _ASSERTE((FAILED(hr) && !fThrowOnFileNotFound) || pAssembly != nullptr);
+
+ return pAssembly.Extract();
+ }
+ else
+#endif // FEATURE_HOSTED_BINDER && FEATURE_COMINTEROP
+ if (pSpec->HasUniqueIdentity())
+ {
+ HRESULT hrBindResult = S_OK;
+ PEAssemblyHolder result;
+
+#if defined(FEATURE_COMINTEROP) && defined(FEATURE_REFLECTION_ONLY_LOAD)
+ // We want to keep this holder around to avoid closing and remapping the file again - calls to Fusion further down will open the file again
+ ReleaseHolder<IMetaDataAssemblyImport> pMetaDataAssemblyImport;
+
+ // Special case ReflectionOnlyLoadFrom on .winmd (WinRT) assemblies
+ if (pSpec->IsIntrospectionOnly() && (pSpec->m_wszCodeBase != NULL))
+ { // This is a LoadFrom request - we need to find out if it is .winmd file or classic managed assembly
+ HRESULT hr = S_OK;
+
+ StackSString sPath(pSpec->GetCodeBase());
+ PEAssembly::UrlToPath(sPath);
+
+ // Open MetaData of the file
+ hr = GetAssemblyMDInternalImportEx(
+ sPath,
+ IID_IMetaDataAssemblyImport,
+ MDInternalImport_Default,
+ (IUnknown **)&pMetaDataAssemblyImport);
+ if (SUCCEEDED(hr))
+ {
+ DWORD dwAssemblyFlags = 0;
+ hr = pMetaDataAssemblyImport->GetAssemblyProps(
+ TokenFromRid(1, mdtAssembly),
+ nullptr, // ppbPublicKey
+ nullptr, // pcbPublicKey
+ nullptr, // pulHashAlgId
+ nullptr, // szName
+ 0, // cchName
+ nullptr, // pchName
+ nullptr, // pMetaData
+ &dwAssemblyFlags);
+ if (SUCCEEDED(hr) && IsAfContentType_WindowsRuntime(dwAssemblyFlags))
+ { // It is .winmd file
+ _ASSERTE(!AppX::IsAppXProcess());
+
+ ReleaseHolder<ICLRPrivAssembly> pPrivAssembly;
+ ReleaseHolder<PEAssembly> pAssembly;
+
+ hr = m_pReflectionOnlyWinRtBinder->BindAssemblyExplicit(sPath, &pPrivAssembly);
+ if (SUCCEEDED(hr))
+ {
+ hr = BindHostedPrivAssembly(nullptr, pPrivAssembly, nullptr, &pAssembly, TRUE);
+ _ASSERTE(FAILED(hr) || (pAssembly != nullptr));
+ }
+ if (FAILED(hr))
+ {
+ if (fThrowOnFileNotFound)
+ {
+ ThrowHR(hr);
+ }
+ return nullptr;
+ }
+ return pAssembly.Extract();
+ }
+ }
+ }
+#endif //FEATURE_COMINTEROP && FEATURE_REFLECTION_ONLY_LOAD
+
+ EX_TRY
+ {
+ if (!IsCached(pSpec))
+ {
+
+#ifdef FEATURE_FUSION
+ if (fRaisePrebindEvents
+ && (result = TryResolveAssembly(pSpec, TRUE /*fPreBind*/)) != NULL
+ && result->CanUseWithBindingCache())
+ {
+ // Failure to add simply means someone else beat us to it. In that case
+ // the FindCachedFile call below (after catch block) will update result
+ // to the cached value.
+ AddFileToCache(pSpec, result, TRUE /*fAllowFailure*/);
+ }
+ else
+#endif
+ {
+ bool fAddFileToCache = false;
+
+ BOOL fIsWellKnown = FALSE;
+
+#ifdef FEATURE_FUSION
+ SafeComHolderPreemp<IAssembly> pIAssembly;
+ SafeComHolderPreemp<IBindResult> pNativeFusionAssembly;
+ SafeComHolderPreemp<IHostAssembly> pIHostAssembly;
+ SafeComHolderPreemp<IFusionBindLog> pFusionLog;
+
+ // Event Tracing for Windows is used to log data for performance and functional testing purposes.
+ // The events below are used to measure the performance of assembly binding as a whole.
+ FireEtwBindingPhaseStart(GetId().m_dwId, ETWLoadContextNotAvailable, ETWFieldUnused, ETWLoaderLoadTypeNotAvailable, pSpec->m_wszCodeBase, NULL, GetClrInstanceId());
+ fIsWellKnown = pSpec->FindAssemblyFile(this,
+ fThrowOnFileNotFound,
+ &pIAssembly,
+ &pIHostAssembly,
+ &pNativeFusionAssembly,
+ &pFusionLog,
+ &hrBindResult,
+ pCallerStackMark,
+ pLoadSecurity);
+ FireEtwBindingPhaseEnd(GetId().m_dwId, ETWLoadContextNotAvailable, ETWFieldUnused, ETWLoaderLoadTypeNotAvailable, pSpec->m_wszCodeBase, NULL, GetClrInstanceId());
+ if (pIAssembly || pIHostAssembly)
+ {
+
+ if (fIsWellKnown &&
+ m_pRootAssembly &&
+ pIAssembly == m_pRootAssembly->GetFusionAssembly())
+ {
+ // This is a shortcut to avoid opening another copy of the process exe.
+ // In fact, we have other similar cases where we've called
+ // ExplicitBind() rather than normal binding, which aren't covered here.
+
+ // <TODO>@todo: It would be nice to populate the cache with those assemblies
+ // to avoid getting in this situation.</TODO>
+
+ result = m_pRootAssembly->GetManifestFile();
+ result.SuppressRelease(); // Didn't get a refcount
+ }
+ else
+ {
+ BOOL isSystemAssembly = pSpec->IsMscorlib(); // can use SystemDomain::m_pSystemAssembly
+ BOOL isIntrospectionOnly = pSpec->IsIntrospectionOnly();
+ if (pIAssembly)
+ result = PEAssembly::Open(pIAssembly, pNativeFusionAssembly, pFusionLog,
+ isSystemAssembly, isIntrospectionOnly);
+ else
+ result = PEAssembly::Open(pIHostAssembly, isSystemAssembly,
+ isIntrospectionOnly);
+ }
+ fAddFileToCache = true;
+ }
+ else if (!fIsWellKnown)
+ {
+ // Trigger the resolve event also for non-throw situation.
+ // However, this code path will behave as if the resolve handler has thrown,
+ // that is, not trigger an MDA.
+ _ASSERTE(fThrowOnFileNotFound == FALSE);
+
+ AssemblySpec NewSpec(this);
+ AssemblySpec *pFailedSpec = NULL;
+
+ fForceReThrow = TRUE; // Managed resolve event handler can throw
+
+ // Purposly ignore return value
+ PostBindResolveAssembly(pSpec, &NewSpec, hrBindResult, &pFailedSpec);
+ }
+#else //!FEATURE_FUSION
+ // Use CoreClr's fusion alternative
+ CoreBindResult bindResult;
+
+ pSpec->Bind(this, fThrowOnFileNotFound, &bindResult, FALSE /* fNgenExplicitBind */, FALSE /* fExplicitBindToNativeImage */, pCallerStackMark);
+ hrBindResult = bindResult.GetHRBindResult();
+
+ if (bindResult.Found())
+ {
+ if (SystemDomain::SystemFile() && bindResult.IsMscorlib())
+ {
+ // Avoid rebinding to another copy of mscorlib
+ result = SystemDomain::SystemFile();
+ result.SuppressRelease(); // Didn't get a refcount
+ }
+ else
+ {
+ // IsSystem on the PEFile should be false, even for mscorlib satellites
+ result = PEAssembly::Open(&bindResult,
+ FALSE, pSpec->IsIntrospectionOnly());
+ }
+ fAddFileToCache = true;
+
+#if defined(FEATURE_CORECLR)
+ // Setup the reference to the binder, which performed the bind, into the AssemblySpec
+ ICLRPrivBinder* pBinder = result->GetBindingContext();
+ _ASSERTE(pBinder != NULL);
+ pSpec->SetBindingContext(pBinder);
+#endif // defined(FEATURE_CORECLR)
+ }
+
+#endif //!FEATURE_FUSION
+
+ if (fAddFileToCache)
+ {
+
+#ifdef FEATURE_REFLECTION_ONLY_LOAD
+ // <TODO> PERF: This doesn't scale... </TODO>
+ if (pSpec->IsIntrospectionOnly() && (pSpec->GetCodeBase() != NULL))
+ {
+ IAssemblyName * pIAssemblyName = result->GetFusionAssemblyName();
+
+ AppDomain::AssemblyIterator i = IterateAssembliesEx((AssemblyIterationFlags)(
+ kIncludeLoaded | kIncludeIntrospection));
+ CollectibleAssemblyHolder<DomainAssembly *> pCachedDomainAssembly;
+ while (i.Next(pCachedDomainAssembly.This()))
+ {
+ IAssemblyName * pCachedAssemblyName = pCachedDomainAssembly->GetAssembly()->GetFusionAssemblyName();
+ if (pCachedAssemblyName->IsEqual(pIAssemblyName, ASM_CMPF_IL_ALL) == S_OK)
+ {
+ if (!pCachedDomainAssembly->GetAssembly()->GetManifestModule()->GetFile()->Equals(result))
+ {
+ COMPlusThrow(kFileLoadException, IDS_EE_REFLECTIONONLY_LOADFROM, pSpec->GetCodeBase());
+ }
+ }
+ }
+ }
+#endif //FEATURE_REFLECTION_ONLY_LOAD
+
+ if (pSpec->CanUseWithBindingCache() && result->CanUseWithBindingCache())
+ {
+ // Failure to add simply means someone else beat us to it. In that case
+ // the FindCachedFile call below (after catch block) will update result
+ // to the cached value.
+ AddFileToCache(pSpec, result, TRUE /*fAllowFailure*/);
+ }
+ }
+ else if (!fIsWellKnown)
+ {
+ // Trigger the resolve event also for non-throw situation.
+ // However, this code path will behave as if the resolve handler has thrown,
+ // that is, not trigger an MDA.
+ _ASSERTE(fThrowOnFileNotFound == FALSE);
+
+ AssemblySpec NewSpec(this);
+ AssemblySpec *pFailedSpec = NULL;
+
+ fForceReThrow = TRUE; // Managed resolve event handler can throw
+
+ // Purposly ignore return value
+ PostBindResolveAssembly(pSpec, &NewSpec, hrBindResult, &pFailedSpec);
+ }
+ }
+ }
+ }
+ EX_CATCH
+ {
+ Exception *ex = GET_EXCEPTION();
+
+ AssemblySpec NewSpec(this);
+ AssemblySpec *pFailedSpec = NULL;
+
+ // Let transient exceptions or managed resolve event handler exceptions propagate
+ if (ex->IsTransient() || fForceReThrow)
+ {
+ EX_RETHROW;
+ }
+
+ {
+ // This is not executed for SO exceptions so we need to disable the backout
+ // stack validation to prevent false violations from being reported.
+ DISABLE_BACKOUT_STACK_VALIDATION;
+
+ BOOL fFailure = PostBindResolveAssembly(pSpec, &NewSpec, ex->GetHR(), &pFailedSpec);
+ if (fFailure)
+ {
+ BOOL bFileNotFoundException =
+ (EEFileLoadException::GetFileLoadKind(ex->GetHR()) == kFileNotFoundException);
+
+ if (!bFileNotFoundException)
+ {
+ fFailure = AddExceptionToCache(pFailedSpec, ex);
+ } // else, fFailure stays TRUE
+ // Effectively, fFailure == bFileNotFoundException || AddExceptionToCache(pFailedSpec, ex)
+
+ // Only throw this exception if we are the first in the cache
+ if (fFailure)
+ {
+ //
+ // If the BindingFailure MDA is enabled, trigger one for this failure
+ // Note: TryResolveAssembly() can also throw if an AssemblyResolve event subscriber throws
+ // and the MDA isn't sent in this case (or for transient failure cases)
+ //
+#ifdef MDA_SUPPORTED
+ MdaBindingFailure* pProbe = MDA_GET_ASSISTANT(BindingFailure);
+ if (pProbe)
+ {
+ // Transition to cooperative GC mode before using any OBJECTREFs.
+ GCX_COOP();
+
+ OBJECTREF exceptionObj = GET_THROWABLE();
+ GCPROTECT_BEGIN(exceptionObj)
+ {
+ pProbe->BindFailed(pFailedSpec, &exceptionObj);
+ }
+ GCPROTECT_END();
+ }
+#endif
+
+ // In the same cases as for the MDA, store the failure information for DAC to read
+ if (IsDebuggerAttached()) {
+ FailedAssembly *pFailed = new FailedAssembly();
+ pFailed->Initialize(pFailedSpec, ex);
+ IfFailThrow(m_failedAssemblies.Append(pFailed));
+ }
+
+ if (!bFileNotFoundException || fThrowOnFileNotFound)
+ {
+
+ // V1.1 App-compatibility workaround. See VSW530166 if you want to whine about it.
+ //
+ // In Everett, if we failed to download an assembly because of a broken network cable,
+ // we returned a FileNotFoundException with a COR_E_FILENOTFOUND hr embedded inside
+ // (which would be exposed when marshaled to native.)
+ //
+ // In Whidbey, we now set the more appropriate INET_E_RESOURCE_NOT_FOUND hr. But
+ // the online/offline switch code in VSTO for Everett hardcoded a check for
+ // COR_E_FILENOTFOUND.
+ //
+ // So now, to keep that code from breaking, we have to remap INET_E_RESOURCE_NOT_FOUND
+ // back to COR_E_FILENOTFOUND. We're doing it here rather down in Fusion so as to affect
+ // the least number of callers.
+
+ if (ex->GetHR() == INET_E_RESOURCE_NOT_FOUND)
+ {
+ EEFileLoadException::Throw(pFailedSpec, COR_E_FILENOTFOUND, ex);
+ }
+
+ if (EEFileLoadException::CheckType(ex))
+ {
+ if (pFailedSpec == pSpec)
+ {
+ EX_RETHROW; //preserve the information
+ }
+ else
+ {
+ StackSString exceptionDisplayName, failedSpecDisplayName;
+
+ ((EEFileLoadException*)ex)->GetName(exceptionDisplayName);
+ pFailedSpec->GetFileOrDisplayName(0, failedSpecDisplayName);
+
+ if (exceptionDisplayName.CompareCaseInsensitive(failedSpecDisplayName) == 0)
+ {
+ EX_RETHROW; // Throw the original exception. Otherwise, we'd throw an exception that contains the same message twice.
+ }
+ }
+ }
+
+ EEFileLoadException::Throw(pFailedSpec, ex->GetHR(), ex);
+ }
+
+ }
+ }
+ }
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ // Now, if it's a cacheable bind we need to re-fetch the result from the cache, as we may have been racing with another
+ // thread to store our result. Note that we may throw from here, if there is a cached exception.
+ // This will release the refcount of the current result holder (if any), and will replace
+ // it with a non-addref'ed result
+ if (pSpec->CanUseWithBindingCache() && (result== NULL || result->CanUseWithBindingCache()))
+ {
+ result = FindCachedFile(pSpec);
+
+ if (result != NULL)
+ result->AddRef();
+ }
+
+ return result.Extract();
+ }
+ else
+ {
+ // Unsupported content type
+ if (fThrowOnFileNotFound)
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ return nullptr;
+ }
+} // AppDomain::BindAssemblySpec
+
+
+#ifdef FEATURE_REFLECTION_ONLY_LOAD
+DomainAssembly *
+AppDomain::BindAssemblySpecForIntrospectionDependencies(
+ AssemblySpec * pSpec)
+{
+ STANDARD_VM_CONTRACT;
+
+ PRECONDITION(CheckPointer(pSpec));
+ PRECONDITION(pSpec->GetAppDomain() == this);
+ PRECONDITION(pSpec->IsIntrospectionOnly());
+ PRECONDITION(this == ::GetAppDomain());
+
+ PEAssemblyHolder result;
+ HRESULT hr;
+
+ if (!pSpec->HasUniqueIdentity())
+ {
+ if (!pSpec->HasBindableIdentity())
+ {
+ COMPlusThrowHR(E_UNEXPECTED);
+ }
+
+ // In classic (non-AppX), this is initilized by AppDomain constructor
+ _ASSERTE(m_pReflectionOnlyWinRtBinder != NULL);
+
+ ReleaseHolder<ICLRPrivAssembly> pPrivAssembly;
+ hr = m_pReflectionOnlyWinRtBinder->BindWinRtType(
+ pSpec->GetWinRtTypeNamespace(),
+ pSpec->GetWinRtTypeClassName(),
+ pSpec->GetParentAssembly(),
+ &pPrivAssembly);
+ if (FAILED(hr))
+ {
+ if (hr == CLR_E_BIND_TYPE_NOT_FOUND)
+ { // We could not find the type - throw TypeLoadException to give user type name for diagnostics
+ EX_THROW(EETypeLoadException, (pSpec->GetWinRtTypeNamespace(), pSpec->GetWinRtTypeClassName(), nullptr, nullptr, IDS_EE_REFLECTIONONLY_WINRT_LOADFAILURE));
+ }
+ if (!Exception::IsTransient(hr))
+ { // Throw the HRESULT as exception wrapped by TypeLoadException to give user type name for diagnostics
+ EEMessageException ex(hr);
+ EX_THROW_WITH_INNER(EETypeLoadException, (pSpec->GetWinRtTypeNamespace(), pSpec->GetWinRtTypeClassName(), nullptr, nullptr, IDS_EE_REFLECTIONONLY_WINRT_LOADFAILURE), &ex);
+ }
+ IfFailThrow(hr);
+ }
+
+ IfFailThrow(BindHostedPrivAssembly(nullptr, pPrivAssembly, nullptr, &result, TRUE));
+ _ASSERTE(result != nullptr);
+ return LoadDomainAssembly(pSpec, result, FILE_LOADED);
+ }
+
+ EX_TRY
+ {
+ if (!IsCached(pSpec))
+ {
+ result = TryResolveAssembly(pSpec, TRUE /*fPreBind*/);
+ if (result != NULL && result->CanUseWithBindingCache())
+ {
+ // Failure to add simply means someone else beat us to it. In that case
+ // the FindCachedFile call below (after catch block) will update result
+ // to the cached value.
+ AddFileToCache(pSpec, result, TRUE /*fAllowFailure*/);
+ }
+ }
+ }
+ EX_CATCH
+ {
+ Exception *ex = GET_EXCEPTION();
+ AssemblySpec NewSpec(this);
+ AssemblySpec *pFailedSpec = NULL;
+
+ // Let transient exceptions propagate
+ if (ex->IsTransient())
+ {
+ EX_RETHROW;
+ }
+
+ // Non-"file not found" exception also propagate
+ BOOL fFailure = PostBindResolveAssembly(pSpec, &NewSpec, ex->GetHR(), &pFailedSpec);
+ if(fFailure)
+ {
+ if (AddExceptionToCache(pFailedSpec, ex))
+ {
+ if ((pFailedSpec == pSpec) && EEFileLoadException::CheckType(ex))
+ {
+ EX_RETHROW; //preserve the information
+ }
+ else
+ EEFileLoadException::Throw(pFailedSpec, ex->GetHR(), ex);
+ }
+ }
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ result = FindCachedFile(pSpec);
+ result.SuppressRelease();
+
+
+ if (result)
+ {
+ // It was either already in the spec cache or the prebind event returned a result.
+ return LoadDomainAssembly(pSpec, result, FILE_LOADED);
+ }
+
+
+ // Otherwise, look in the list of assemblies already loaded for reflectiononly.
+ IAssemblyName * ptmp = NULL;
+ hr = pSpec->CreateFusionName(&ptmp);
+ if (FAILED(hr))
+ {
+ COMPlusThrowHR(hr);
+ }
+ SafeComHolder<IAssemblyName> pIAssemblyName(ptmp);
+
+ // Note: We do not support introspection-only collectible assemblies (yet)
+ AppDomain::AssemblyIterator i = IterateAssembliesEx((AssemblyIterationFlags)(
+ kIncludeLoaded | kIncludeIntrospection | kExcludeCollectible));
+ CollectibleAssemblyHolder<DomainAssembly *> pCachedDomainAssembly;
+
+ while (i.Next(pCachedDomainAssembly.This()))
+ {
+ _ASSERTE(!pCachedDomainAssembly->IsCollectible());
+ IAssemblyName * pCachedAssemblyName = pCachedDomainAssembly->GetAssembly()->GetFusionAssemblyName();
+ if (pCachedAssemblyName->IsEqual(pIAssemblyName, ASM_CMPF_IL_ALL) == S_OK)
+ {
+ return pCachedDomainAssembly;
+ }
+ }
+ // If not found in that list, it is an ERROR. Yes, this is by design.
+ StackSString name;
+ pSpec->GetFileOrDisplayName(0, name);
+ COMPlusThrow(kFileLoadException, IDS_EE_REFLECTIONONLY_LOADFAILURE,name);
+} // AppDomain::BindAssemblySpecForIntrospectionDependencies
+#endif // FEATURE_REFLECTION_ONLY_LOAD
+
+PEAssembly *AppDomain::TryResolveAssembly(AssemblySpec *pSpec, BOOL fPreBind)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+
+ PEAssembly *result = NULL;
+
+ EX_TRY
+ {
+ result = pSpec->ResolveAssemblyFile(this, fPreBind);
+ }
+ EX_HOOK
+ {
+ Exception *pEx = GET_EXCEPTION();
+
+ if (!pEx->IsTransient())
+ {
+ AddExceptionToCache(pSpec, pEx);
+ if (!EEFileLoadException::CheckType(pEx))
+ EEFileLoadException::Throw(pSpec, pEx->GetHR(), pEx);
+ }
+ }
+ EX_END_HOOK;
+
+ return result;
+}
+
+#ifdef FEATURE_FUSION
+void AppDomain::GetFileFromFusion(IAssembly *pIAssembly, LPCWSTR wszModuleName,
+ SString &path)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ SafeComHolder<IAssemblyModuleImport> pImport;
+ IfFailThrow(pIAssembly->GetModuleByName(wszModuleName, &pImport));
+
+ if (!pImport->IsAvailable()) {
+ AssemblySink* pSink = AllocateAssemblySink(NULL);
+ SafeComHolder<IAssemblyBindSink> sinkholder(pSink);
+ SafeComHolder<IAssemblyModuleImport> pResult;
+
+ IfFailThrow(FusionBind::RemoteLoadModule(GetFusionContext(),
+ pImport,
+ pSink,
+ &pResult));
+ pResult->AddRef();
+ pImport.Assign(pResult);
+ }
+
+ DWORD dwPath = 0;
+ pImport->GetModulePath(NULL, &dwPath);
+
+ LPWSTR buffer = path.OpenUnicodeBuffer(dwPath-1);
+ IfFailThrow(pImport->GetModulePath(buffer, &dwPath));
+ path.CloseBuffer();
+}
+
+PEAssembly *AppDomain::BindExplicitAssembly(HMODULE hMod, BOOL bindable)
+{
+ CONTRACT(PEAssembly *)
+ {
+ PRECONDITION(CheckPointer(hMod));
+ GC_TRIGGERS;
+ THROWS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ SafeComHolder<IAssembly> pFusionAssembly;
+ SafeComHolder<IBindResult> pNativeFusionAssembly;
+ SafeComHolder<IFusionBindLog> pFusionLog;
+
+ StackSString path;
+ PEImage::GetPathFromDll(hMod, path);
+
+ HRESULT hr = ExplicitBind(path, GetFusionContext(),
+ bindable ? EXPLICITBIND_FLAGS_EXE : EXPLICITBIND_FLAGS_NON_BINDABLE,
+ NULL, &pFusionAssembly, &pNativeFusionAssembly,&pFusionLog);
+ if (FAILED(hr))
+ EEFileLoadException::Throw(path, hr);
+
+ RETURN PEAssembly::OpenHMODULE(hMod, pFusionAssembly,pNativeFusionAssembly, pFusionLog, FALSE);
+}
+
+Assembly *AppDomain::LoadExplicitAssembly(HMODULE hMod, BOOL bindable)
+{
+ CONTRACT(Assembly *)
+ {
+ PRECONDITION(CheckPointer(hMod));
+ GC_TRIGGERS;
+ THROWS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ PEAssemblyHolder pFile(BindExplicitAssembly(hMod, bindable));
+
+ RETURN LoadAssembly(NULL, pFile, FILE_ACTIVE);
+}
+#endif // FEATURE_FUSION
+
+ULONG AppDomain::AddRef()
+{
+ LIMITED_METHOD_CONTRACT;
+ return InterlockedIncrement(&m_cRef);
+}
+
+ULONG AppDomain::Release()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(m_cRef > 0);
+ }
+ CONTRACTL_END;
+
+ ULONG cRef = InterlockedDecrement(&m_cRef);
+ if (!cRef)
+ {
+ _ASSERTE (m_Stage == STAGE_CREATING || m_Stage == STAGE_CLOSED);
+ ADID adid=GetId();
+ delete this;
+ TESTHOOKCALL(AppDomainDestroyed(adid.m_dwId));
+ }
+ return (cRef);
+}
+
+#ifdef FEATURE_FUSION
+AssemblySink* AppDomain::AllocateAssemblySink(AssemblySpec* pSpec)
+{
+ CONTRACT(AssemblySink *)
+ {
+ THROWS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ AssemblySink* ret = FastInterlockExchangePointer(&m_pAsyncPool, NULL);
+
+ if(ret == NULL)
+ ret = new AssemblySink(this);
+ else
+ ret->AddRef();
+ ret->SetAssemblySpec(pSpec);
+ RETURN ret;
+}
+#endif
+
+AppDomain* AppDomain::s_pAppDomainToRaiseUnloadEvent;
+BOOL AppDomain::s_fProcessUnloadDomainEvent = FALSE;
+
+#ifndef CROSSGEN_COMPILE
+
+void AppDomain::RaiseUnloadDomainEvent_Wrapper(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ AppDomain* pDomain = (AppDomain *) ptr;
+ pDomain->RaiseUnloadDomainEvent();
+}
+
+void AppDomain::ProcessUnloadDomainEventOnFinalizeThread()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ Thread *pThread = GetThread();
+ _ASSERTE (pThread && IsFinalizerThread());
+
+ // if we are not unloading domain now, do not process the event
+ if (SystemDomain::AppDomainBeingUnloaded() == NULL)
+ {
+ s_pAppDomainToRaiseUnloadEvent->SetStage(STAGE_UNLOAD_REQUESTED);
+ s_pAppDomainToRaiseUnloadEvent->EnableADUnloadWorker(
+ s_pAppDomainToRaiseUnloadEvent->IsRudeUnload()?EEPolicy::ADU_Rude:EEPolicy::ADU_Safe);
+ FastInterlockExchangePointer(&s_pAppDomainToRaiseUnloadEvent, NULL);
+ return;
+ }
+ FastInterlockExchange((LONG*)&s_fProcessUnloadDomainEvent, TRUE);
+ AppDomain::EnableADUnloadWorkerForFinalizer();
+ pThread->SetThreadStateNC(Thread::TSNC_RaiseUnloadEvent);
+ s_pAppDomainToRaiseUnloadEvent->RaiseUnloadDomainEvent();
+ pThread->ResetThreadStateNC(Thread::TSNC_RaiseUnloadEvent);
+ s_pAppDomainToRaiseUnloadEvent->EnableADUnloadWorker(
+ s_pAppDomainToRaiseUnloadEvent->IsRudeUnload()?EEPolicy::ADU_Rude:EEPolicy::ADU_Safe);
+ FastInterlockExchangePointer(&s_pAppDomainToRaiseUnloadEvent, NULL);
+ FastInterlockExchange((LONG*)&s_fProcessUnloadDomainEvent, FALSE);
+
+ if (pThread->IsAbortRequested())
+ {
+ pThread->UnmarkThreadForAbort(Thread::TAR_Thread);
+ }
+}
+
+void AppDomain::RaiseUnloadDomainEvent()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ EX_TRY
+ {
+ Thread *pThread = GetThread();
+ if (this != pThread->GetDomain())
+ {
+ pThread->DoADCallBack(this, AppDomain::RaiseUnloadDomainEvent_Wrapper, this,ADV_FINALIZER|ADV_COMPILATION);
+ }
+ else
+ {
+ struct _gc
+ {
+ APPDOMAINREF Domain;
+ OBJECTREF Delegate;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+ gc.Domain = (APPDOMAINREF) GetRawExposedObject();
+ if (gc.Domain != NULL)
+ {
+ gc.Delegate = gc.Domain->m_pDomainUnloadEventHandler;
+ if (gc.Delegate != NULL)
+ DistributeEventReliably(&gc.Delegate, (OBJECTREF *) &gc.Domain);
+ }
+ GCPROTECT_END();
+ }
+ }
+ EX_CATCH
+ {
+ //@TODO call a MDA here
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+void AppDomain::RaiseLoadingAssemblyEvent(DomainAssembly *pAssembly)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ PRECONDITION(this == GetAppDomain());
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ GCX_COOP();
+ FAULT_NOT_FATAL();
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+
+ EX_TRY
+ {
+ struct _gc {
+ APPDOMAINREF AppDomainRef;
+ OBJECTREF orThis;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ if ((gc.AppDomainRef = (APPDOMAINREF) GetRawExposedObject()) != NULL) {
+ if (gc.AppDomainRef->m_pAssemblyEventHandler != NULL)
+ {
+ ARG_SLOT args[2];
+ GCPROTECT_BEGIN(gc);
+
+ gc.orThis = pAssembly->GetExposedAssemblyObject();
+
+ MethodDescCallSite onAssemblyLoad(METHOD__APP_DOMAIN__ON_ASSEMBLY_LOAD, &gc.orThis);
+
+ // GetExposedAssemblyObject may cause a gc, so call this before filling args[0]
+ args[1] = ObjToArgSlot(gc.orThis);
+ args[0] = ObjToArgSlot(gc.AppDomainRef);
+
+ onAssemblyLoad.Call(args);
+
+ GCPROTECT_END();
+ }
+ }
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+
+BOOL AppDomain::OnUnhandledException(OBJECTREF *pThrowable, BOOL isTerminating/*=TRUE*/)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+
+ BOOL retVal= FALSE;
+
+ GCX_COOP();
+
+ // The Everett behavior was to send the unhandled exception event only to the Default
+ // AppDomain (since that's the only place that exceptions actually went unhandled).
+ //
+ // During Whidbey development, we broadcast the event to all AppDomains in the process.
+ //
+ // But the official shipping Whidbey behavior is that the unhandled exception event is
+ // sent to the Default AppDomain and to whatever AppDomain the exception went unhandled
+ // in. To achieve this, we declare the exception to be unhandled *BEFORE* we marshal
+ // it back to the Default AppDomain at the base of the Finalizer, threadpool and managed
+ // threads.
+ //
+ // The rationale for sending the event to the Default AppDomain as well as the one the
+ // exception went unhandled in is:
+ //
+ // 1) This is compatible with the pre-Whidbey behavior, where only the Default AppDomain
+ // received the notification.
+ //
+ // 2) This is convenient for hosts, which don't want to bother injecting listeners into
+ // every single AppDomain.
+
+ AppDomain *pAppDomain = GetAppDomain();
+ OBJECTREF orSender = 0;
+
+ GCPROTECT_BEGIN(orSender);
+
+ orSender = pAppDomain->GetRawExposedObject();
+
+ retVal = pAppDomain->RaiseUnhandledExceptionEventNoThrow(&orSender, pThrowable, isTerminating);
+#ifndef FEATURE_CORECLR
+// CoreCLR#520:
+// To make this work correctly we need the changes for coreclr 473
+ if (pAppDomain != SystemDomain::System()->DefaultDomain())
+ retVal |= SystemDomain::System()->DefaultDomain()->RaiseUnhandledExceptionEventNoThrow
+ (&orSender, pThrowable, isTerminating);
+#endif
+
+ GCPROTECT_END();
+
+ return retVal;
+}
+
+
+// Move outside of the AppDomain iteration, to avoid issues with the GC Frames being outside
+// the domain transition. This is a chronic issue that causes us to report roots for an AppDomain
+// after we have left it. This causes problems with AppDomain unloading that we only find
+// with stress coverage..
+void AppDomain::RaiseOneExitProcessEvent()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ struct _gc
+ {
+ APPDOMAINREF Domain;
+ OBJECTREF Delegate;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ EX_TRY {
+
+ GCPROTECT_BEGIN(gc);
+ gc.Domain = (APPDOMAINREF) SystemDomain::GetCurrentDomain()->GetRawExposedObject();
+ if (gc.Domain != NULL)
+ {
+ gc.Delegate = gc.Domain->m_pProcessExitEventHandler;
+ if (gc.Delegate != NULL)
+ DistributeEventReliably(&gc.Delegate, (OBJECTREF *) &gc.Domain);
+ }
+ GCPROTECT_END();
+
+ } EX_CATCH {
+ } EX_END_CATCH(SwallowAllExceptions);
+}
+
+// Local wrapper used in AppDomain::RaiseExitProcessEvent,
+// introduced solely to avoid stack overflow because of _alloca in the loop.
+// It's just factored out body of the loop, but it has to be a member method of AppDomain,
+// because it calls private RaiseOneExitProcessEvent
+/*static*/ void AppDomain::RaiseOneExitProcessEvent_Wrapper(AppDomainIterator* pi)
+{
+
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ EX_TRY {
+ ENTER_DOMAIN_PTR(pi->GetDomain(),ADV_ITERATOR)
+ AppDomain::RaiseOneExitProcessEvent();
+ END_DOMAIN_TRANSITION;
+ } EX_CATCH {
+ } EX_END_CATCH(SwallowAllExceptions);
+}
+
+static LONG s_ProcessedExitProcessEventCount = 0;
+
+LONG GetProcessedExitProcessEventCount()
+{
+ LIMITED_METHOD_CONTRACT;
+ return s_ProcessedExitProcessEventCount;
+}
+
+void AppDomain::RaiseExitProcessEvent()
+{
+ if (!g_fEEStarted)
+ return;
+
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ // Only finalizer thread during shutdown can call this function.
+ _ASSERTE ((g_fEEShutDown&ShutDown_Finalize1) && GetThread() == FinalizerThread::GetFinalizerThread());
+
+ _ASSERTE (GetThread()->PreemptiveGCDisabled());
+
+ _ASSERTE (GetThread()->GetDomain()->IsDefaultDomain());
+
+ AppDomainIterator i(TRUE);
+ while (i.Next())
+ {
+ RaiseOneExitProcessEvent_Wrapper(&i);
+ FastInterlockIncrement(&s_ProcessedExitProcessEventCount);
+ }
+}
+
+#ifndef FEATURE_CORECLR
+void AppDomain::RaiseUnhandledExceptionEvent_Wrapper(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+ AppDomain::RaiseUnhandled_Args *args = (AppDomain::RaiseUnhandled_Args *) ptr;
+
+ struct _gc {
+ OBJECTREF orThrowable;
+ OBJECTREF orSender;
+ } gc;
+
+ ZeroMemory(&gc, sizeof(gc));
+
+ _ASSERTE(args->pTargetDomain == GetAppDomain());
+ GCPROTECT_BEGIN(gc);
+ EX_TRY
+ {
+ SetObjectReference(&gc.orThrowable,
+ AppDomainHelper::CrossContextCopyFrom(args->pExceptionDomain,
+ args->pThrowable),
+ args->pTargetDomain);
+
+ SetObjectReference(&gc.orSender,
+ AppDomainHelper::CrossContextCopyFrom(args->pExceptionDomain,
+ args->pSender),
+ args->pTargetDomain);
+ }
+ EX_CATCH
+ {
+ SetObjectReference(&gc.orThrowable, GET_THROWABLE(), args->pTargetDomain);
+ SetObjectReference(&gc.orSender, GetAppDomain()->GetRawExposedObject(), args->pTargetDomain);
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+ *(args->pResult) = args->pTargetDomain->RaiseUnhandledExceptionEvent(&gc.orSender,
+ &gc.orThrowable,
+ args->isTerminating);
+ GCPROTECT_END();
+
+}
+#endif //!FEATURE_CORECLR
+
+BOOL
+AppDomain::RaiseUnhandledExceptionEventNoThrow(OBJECTREF *pSender, OBJECTREF *pThrowable, BOOL isTerminating)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ BOOL bRetVal=FALSE;
+
+ EX_TRY
+ {
+ bRetVal = RaiseUnhandledExceptionEvent(pSender, pThrowable, isTerminating);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions) // Swallow any errors.
+ return bRetVal;
+
+}
+
+BOOL
+AppDomain::HasUnhandledExceptionEventHandler()
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_NOTRIGGER; //essential
+ NOTHROW;
+ }
+ CONTRACTL_END;
+ if (!CanThreadEnter(GetThread()))
+ return FALSE;
+ if (GetRawExposedObject()==NULL)
+ return FALSE;
+ return (((APPDOMAINREF)GetRawExposedObject())->m_pUnhandledExceptionEventHandler!=NULL);
+}
+
+BOOL
+AppDomain::RaiseUnhandledExceptionEvent(OBJECTREF *pSender, OBJECTREF *pThrowable, BOOL isTerminating)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (!HasUnhandledExceptionEventHandler())
+ return FALSE;
+
+ BOOL result = FALSE;
+
+ _ASSERTE(pThrowable != NULL && IsProtectedByGCFrame(pThrowable));
+ _ASSERTE(pSender != NULL && IsProtectedByGCFrame(pSender));
+
+#ifndef FEATURE_CORECLR
+ Thread *pThread = GetThread();
+ if (this != pThread->GetDomain())
+ {
+ RaiseUnhandled_Args args = {pThread->GetDomain(), this, pSender, pThrowable, isTerminating, &result};
+ // call through DoCallBack with a domain transition
+ pThread->DoADCallBack(this, AppDomain::RaiseUnhandledExceptionEvent_Wrapper, &args, ADV_DEFAULTAD);
+ return result;
+ }
+#else
+ _ASSERTE(this == GetThread()->GetDomain());
+#endif
+
+
+ OBJECTREF orDelegate = NULL;
+
+ GCPROTECT_BEGIN(orDelegate);
+
+ APPDOMAINREF orAD = (APPDOMAINREF) GetAppDomain()->GetRawExposedObject();
+
+ if (orAD != NULL)
+ {
+ orDelegate = orAD->m_pUnhandledExceptionEventHandler;
+ if (orDelegate != NULL)
+ {
+ result = TRUE;
+ DistributeUnhandledExceptionReliably(&orDelegate, pSender, pThrowable, isTerminating);
+ }
+ }
+ GCPROTECT_END();
+ return result;
+}
+
+
+#ifndef FEATURE_CORECLR
+// Create a domain based on a string name
+AppDomain* AppDomain::CreateDomainContext(LPCWSTR fileName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if(fileName == NULL) return NULL;
+
+ AppDomain* pDomain = NULL;
+
+ MethodDescCallSite valCreateDomain(METHOD__APP_DOMAIN__VAL_CREATE_DOMAIN);
+
+ STRINGREF pFilePath = NULL;
+ GCPROTECT_BEGIN(pFilePath);
+ pFilePath = StringObject::NewString(fileName);
+
+ ARG_SLOT args[1] =
+ {
+ ObjToArgSlot(pFilePath),
+ };
+
+ APPDOMAINREF pDom = (APPDOMAINREF) valCreateDomain.Call_RetOBJECTREF(args);
+ if(pDom != NULL)
+ {
+ Context* pContext = Context::GetExecutionContext(pDom);
+ if(pContext)
+ {
+ pDomain = pContext->GetDomain();
+ }
+ }
+ GCPROTECT_END();
+
+ return pDomain;
+}
+#endif // !FEATURE_CORECLR
+
+#endif // CROSSGEN_COMPILE
+
+// You must be in the correct context before calling this
+// routine. Therefore, it is only good for initializing the
+// default domain.
+void AppDomain::InitializeDomainContext(BOOL allowRedirects,
+ LPCWSTR pwszPath,
+ LPCWSTR pwszConfig)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (NingenEnabled())
+ {
+#ifdef FEATURE_FUSION
+ CreateFusionContext();
+#endif // FEATURE_FUSION
+
+#ifdef FEATURE_VERSIONING
+ CreateFusionContext();
+#endif // FEATURE_VERSIONING
+
+ return;
+ }
+
+#ifndef CROSSGEN_COMPILE
+ struct _gc {
+ STRINGREF pFilePath;
+ STRINGREF pConfig;
+ OBJECTREF ref;
+ PTRARRAYREF propertyNames;
+ PTRARRAYREF propertyValues;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+ if(pwszPath)
+ {
+ gc.pFilePath = StringObject::NewString(pwszPath);
+ }
+
+ if(pwszConfig)
+ {
+ gc.pConfig = StringObject::NewString(pwszConfig);
+ }
+
+#ifndef FEATURE_CORECLR
+ StringArrayList *pPropertyNames;
+ StringArrayList *pPropertyValues;
+ CorHost2::GetDefaultAppDomainProperties(&pPropertyNames, &pPropertyValues);
+
+ _ASSERTE(pPropertyNames->GetCount() == pPropertyValues->GetCount());
+
+ if (pPropertyNames->GetCount() > 0)
+ {
+ gc.propertyNames = (PTRARRAYREF)AllocateObjectArray(pPropertyNames->GetCount(), g_pStringClass);
+ gc.propertyValues = (PTRARRAYREF)AllocateObjectArray(pPropertyValues->GetCount(), g_pStringClass);
+
+ for (DWORD i = 0; i < pPropertyNames->GetCount(); ++i)
+ {
+ STRINGREF propertyName = StringObject::NewString(pPropertyNames->Get(i));
+ gc.propertyNames->SetAt(i, propertyName);
+
+ STRINGREF propertyValue = StringObject::NewString(pPropertyValues->Get(i));
+ gc.propertyValues->SetAt(i, propertyValue);
+ }
+ }
+#endif // !FEATURE_CORECLR
+
+ if ((gc.ref = GetExposedObject()) != NULL)
+ {
+ MethodDescCallSite setupDomain(METHOD__APP_DOMAIN__SETUP_DOMAIN);
+
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(gc.ref),
+ BoolToArgSlot(allowRedirects),
+ ObjToArgSlot(gc.pFilePath),
+ ObjToArgSlot(gc.pConfig),
+ ObjToArgSlot(gc.propertyNames),
+ ObjToArgSlot(gc.propertyValues)
+ };
+ setupDomain.Call(args);
+ }
+ GCPROTECT_END();
+
+ CacheStringsForDAC();
+#endif // CROSSGEN_COMPILE
+}
+
+#ifdef FEATURE_FUSION
+
+void AppDomain::SetupLoaderOptimization(DWORD optimization)
+{
+ STANDARD_VM_CONTRACT;
+
+ GCX_COOP();
+
+ if ((GetExposedObject()) != NULL)
+ {
+ MethodDescCallSite setupLoaderOptimization(METHOD__APP_DOMAIN__SETUP_LOADER_OPTIMIZATION);
+
+ ARG_SLOT args[2] =
+ {
+ ObjToArgSlot(GetExposedObject()),
+ optimization
+ };
+ setupLoaderOptimization.Call(args);
+ }
+}
+
+// The fusion context should only be null when appdomain is being setup
+// and there should be no reason to protect the creation.
+IApplicationContext *AppDomain::CreateFusionContext()
+{
+ CONTRACT(IApplicationContext *)
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ if (m_pFusionContext == NULL)
+ {
+ ETWOnStartup (FusionAppCtx_V1, FusionAppCtxEnd_V1);
+
+ GCX_PREEMP();
+
+ SafeComHolderPreemp<IApplicationContext> pFusionContext;
+
+ IfFailThrow(FusionBind::CreateFusionContext(NULL, &pFusionContext));
+
+#if defined(FEATURE_COMINTEROP) && !defined(FEATURE_CORECLR)
+ CLRPrivBinderWinRT * pWinRtBinder;
+ if (AppX::IsAppXProcess())
+ { // Note: Fusion binder is used in AppX to bind .NET Fx assemblies - some of them depend on .winmd files (e.g. System.Runtime.WindowsRuntime.dll)
+ CLRPrivBinderAppX * pAppXBinder = CLRPrivBinderAppX::GetOrCreateBinder();
+ pWinRtBinder = pAppXBinder->GetWinRtBinder();
+ }
+ else
+ {
+ pWinRtBinder = m_pWinRtBinder;
+ }
+ _ASSERTE(pWinRtBinder != nullptr);
+
+ IfFailThrow(SetApplicationContext_WinRTBinder(
+ pFusionContext,
+ static_cast<IBindContext *>(pWinRtBinder)));
+#endif
+
+#ifdef FEATURE_PREJIT
+ if (NGENImagesAllowed())
+ {
+ // Set the native image settings so fusion will bind native images
+ SString zapString(g_pConfig->ZapSet());
+ FusionBind::SetApplicationContextStringProperty(pFusionContext, ACTAG_ZAP_STRING, zapString);
+ FusionBind::SetApplicationContextDWORDProperty(pFusionContext, ACTAG_ZAP_CONFIG_FLAGS,
+ PEFile::GetNativeImageConfigFlags());
+ }
+#endif // FEATURE_PREJIT
+
+ pFusionContext.SuppressRelease();
+ m_pFusionContext = pFusionContext;
+
+ DWORD dwId = m_dwId.m_dwId;
+ IfFailThrow(m_pFusionContext->Set(ACTAG_APP_DOMAIN_ID, &dwId, sizeof(DWORD), 0));
+
+ if (HasLoadContextHostBinder())
+ FusionBind::SetApplicationContextDWORDProperty(pFusionContext, ACTAG_FX_ONLY,1);
+
+ }
+
+ RETURN m_pFusionContext;
+}
+
+void AppDomain::TurnOnBindingRedirects()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+
+ if ((GetExposedObject()) != NULL)
+ {
+ MethodDescCallSite turnOnBindingRedirects(METHOD__APP_DOMAIN__TURN_ON_BINDING_REDIRECTS);
+ ARG_SLOT args[1] =
+ {
+ ObjToArgSlot(GetExposedObject()),
+ };
+ turnOnBindingRedirects.Call(args);
+ }
+
+ IfFailThrow(m_pFusionContext->Set(ACTAG_DISALLOW_APP_BINDING_REDIRECTS,
+ NULL,
+ 0,
+ 0));
+}
+
+void AppDomain::SetupExecutableFusionContext(LPCWSTR exePath)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(GetAppDomain() == this);
+ }
+ CONTRACTL_END;
+
+ GCX_COOP();
+
+ struct _gc {
+ STRINGREF pFilePath;
+ OBJECTREF ref;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+ gc.pFilePath = StringObject::NewString(exePath);
+
+ if ((gc.ref = GetExposedObject()) != NULL)
+ {
+ MethodDescCallSite setDomainContext(METHOD__APP_DOMAIN__SET_DOMAIN_CONTEXT, &gc.ref);
+ ARG_SLOT args[2] =
+ {
+ ObjToArgSlot(gc.ref),
+ ObjToArgSlot(gc.pFilePath),
+ };
+ setDomainContext.Call(args);
+ }
+
+ GCPROTECT_END();
+
+}
+
+BOOL AppDomain::SetContextProperty(IApplicationContext* pFusionContext,
+ LPCWSTR pProperty, OBJECTREF* obj)
+
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (GetAppDomain()->HasLoadContextHostBinder())
+ COMPlusThrow(kNotSupportedException);
+
+
+ if(obj) {
+ if ((*obj) != NULL){
+ MethodTable* pMT = (*obj)->GetMethodTable();
+ DWORD lgth;
+
+ if(MscorlibBinder::IsClass(pMT, CLASS__STRING)) {
+
+ lgth = (ObjectToSTRINGREF(*(StringObject**)obj))->GetStringLength();
+ CQuickBytes qb;
+ LPWSTR wszValue = (LPWSTR) qb.AllocThrows((lgth+1)*sizeof(WCHAR));
+ memcpy(wszValue, (ObjectToSTRINGREF(*(StringObject**)obj))->GetBuffer(), lgth*sizeof(WCHAR));
+ if(lgth > 0 && wszValue[lgth-1] == '/')
+ lgth--;
+ wszValue[lgth] = W('\0');
+
+ LOG((LF_LOADER,
+ LL_INFO10,
+ "Set: %S: *%S*.\n",
+ pProperty, wszValue));
+
+ IfFailThrow(pFusionContext->Set(pProperty,
+ wszValue,
+ (lgth+1) * sizeof(WCHAR),
+ 0));
+ }
+ else {
+ // Pin byte array for loading
+ Wrapper<OBJECTHANDLE, DoNothing, DestroyPinningHandle> handle(
+ GetAppDomain()->CreatePinningHandle(*obj));
+
+ const BYTE *pbArray = ((U1ARRAYREF)(*obj))->GetDirectConstPointerToNonObjectElements();
+ DWORD cbArray = (*obj)->GetNumComponents();
+
+ IfFailThrow(pFusionContext->Set(pProperty,
+ (LPVOID) pbArray,
+ cbArray,
+ 0));
+ }
+ }
+ else { // Un-set the property
+ IfFailThrow(pFusionContext->Set(pProperty,
+ NULL,
+ 0,
+ 0));
+ }
+ }
+
+ return TRUE;
+}
+#endif // FEATURE_FUSION
+
+#ifdef FEATURE_VERSIONING
+IUnknown *AppDomain::CreateFusionContext()
+{
+ CONTRACT(IUnknown *)
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ if (!m_pFusionContext)
+ {
+ ETWOnStartup (FusionAppCtx_V1, FusionAppCtxEnd_V1);
+ CLRPrivBinderCoreCLR *pTPABinder = NULL;
+
+ GCX_PREEMP();
+
+ // Initialize the assembly binder for the default context loads for CoreCLR.
+ IfFailThrow(CCoreCLRBinderHelper::DefaultBinderSetupContext(GetId().m_dwId, &pTPABinder));
+ m_pFusionContext = reinterpret_cast<IUnknown *>(pTPABinder);
+
+#if defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+ // By default, initial binding context setup for CoreCLR is also the TPABinding context
+ (m_pTPABinderContext = pTPABinder)->AddRef();
+#endif // defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+
+ }
+
+ RETURN m_pFusionContext;
+}
+#endif // FEATURE_VERSIONING
+
+#ifdef FEATURE_FUSION
+LPWSTR AppDomain::GetDynamicDir()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (m_pwDynamicDir == NULL) {
+
+ BaseDomain::LockHolder lh(this);
+
+ if(m_pwDynamicDir == NULL) {
+ IApplicationContext* pFusionContext = GetFusionContext();
+ _ASSERTE(pFusionContext);
+
+ HRESULT hr = S_OK;
+ DWORD dwSize = 0;
+ hr = pFusionContext->GetDynamicDirectory(NULL, &dwSize);
+ AllocMemHolder<WCHAR> tempDynamicDir;
+
+ if(hr == HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER)) {
+ tempDynamicDir = GetLowFrequencyHeap()->AllocMem(S_SIZE_T(dwSize) * S_SIZE_T(sizeof(WCHAR)));
+ hr = pFusionContext->GetDynamicDirectory(tempDynamicDir, &dwSize);
+ }
+ if(hr==HRESULT_FROM_WIN32(ERROR_NOT_FOUND))
+ return NULL;
+ IfFailThrow(hr);
+
+ tempDynamicDir.SuppressRelease();
+ m_pwDynamicDir = tempDynamicDir;
+ }
+ // lh out of scope here
+ }
+
+ return m_pwDynamicDir;;
+}
+#endif //FEATURE_FUSION
+
+
+//---------------------------------------------------------------------------------------
+//
+// AppDomain::IsDebuggerAttached - is a debugger attached to this process
+//
+// Arguments:
+// None
+//
+// Return Value:
+// TRUE if a debugger is attached to this process, FALSE otherwise.
+//
+// Notes:
+// This is identical to CORDebuggerAttached. This exists idependantly for legacy reasons - we used to
+// support attaching to individual AppDomains. This should probably go away eventually.
+//
+
+BOOL AppDomain::IsDebuggerAttached()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (CORDebuggerAttached())
+ {
+ return TRUE;
+ }
+ else
+ {
+ return FALSE;
+ }
+}
+
+#ifdef DEBUGGING_SUPPORTED
+
+// This is called from the debugger to request notification events from
+// Assemblies, Modules, Types in this appdomain.
+BOOL AppDomain::NotifyDebuggerLoad(int flags, BOOL attaching)
+{
+ WRAPPER_NO_CONTRACT;
+ BOOL result = FALSE;
+
+ if (!attaching && !IsDebuggerAttached())
+ return FALSE;
+
+ AssemblyIterator i;
+
+ // Attach to our assemblies
+ LOG((LF_CORDB, LL_INFO100, "AD::NDA: Iterating assemblies\n"));
+ i = IterateAssembliesEx((AssemblyIterationFlags)(kIncludeLoaded | kIncludeLoading | kIncludeExecution));
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+ while (i.Next(pDomainAssembly.This()))
+ {
+ result = (pDomainAssembly->NotifyDebuggerLoad(flags, attaching) ||
+ result);
+ }
+
+ return result;
+}
+
+void AppDomain::NotifyDebuggerUnload()
+{
+ WRAPPER_NO_CONTRACT;
+ if (!IsDebuggerAttached())
+ return;
+
+ LOG((LF_CORDB, LL_INFO10, "AD::NDD domain [%d] %#08x %ls\n",
+ GetId().m_dwId, this, GetFriendlyNameForLogging()));
+
+ LOG((LF_CORDB, LL_INFO100, "AD::NDD: Interating domain bound assemblies\n"));
+ AssemblyIterator i = IterateAssembliesEx((AssemblyIterationFlags)(kIncludeLoaded | kIncludeLoading | kIncludeExecution));
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+
+ // Detach from our assemblies
+ while (i.Next(pDomainAssembly.This()))
+ {
+ LOG((LF_CORDB, LL_INFO100, "AD::NDD: Iterating assemblies\n"));
+ pDomainAssembly->NotifyDebuggerUnload();
+ }
+}
+#endif // DEBUGGING_SUPPORTED
+
+void AppDomain::SetSystemAssemblyLoadEventSent(BOOL fFlag)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (fFlag == TRUE)
+ m_dwFlags |= LOAD_SYSTEM_ASSEMBLY_EVENT_SENT;
+ else
+ m_dwFlags &= ~LOAD_SYSTEM_ASSEMBLY_EVENT_SENT;
+}
+
+BOOL AppDomain::WasSystemAssemblyLoadEventSent(void)
+{
+ LIMITED_METHOD_CONTRACT;
+ return ((m_dwFlags & LOAD_SYSTEM_ASSEMBLY_EVENT_SENT) == 0) ? FALSE : TRUE;
+}
+
+#ifndef CROSSGEN_COMPILE
+// U->M thunks created in this domain and not associated with a delegate.
+UMEntryThunkCache *AppDomain::GetUMEntryThunkCache()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (!m_pUMEntryThunkCache)
+ {
+ UMEntryThunkCache *pUMEntryThunkCache = new UMEntryThunkCache(this);
+
+ if (FastInterlockCompareExchangePointer(&m_pUMEntryThunkCache, pUMEntryThunkCache, NULL) != NULL)
+ {
+ // some thread swooped in and set the field
+ delete pUMEntryThunkCache;
+ }
+ }
+ _ASSERTE(m_pUMEntryThunkCache);
+ return m_pUMEntryThunkCache;
+}
+
+#ifdef FEATURE_COMINTEROP
+
+ComCallWrapperCache *AppDomain::GetComCallWrapperCache()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (! m_pComCallWrapperCache)
+ {
+ BaseDomain::LockHolder lh(this);
+
+ if (! m_pComCallWrapperCache)
+ m_pComCallWrapperCache = ComCallWrapperCache::Create(this);
+ }
+ _ASSERTE(m_pComCallWrapperCache);
+ return m_pComCallWrapperCache;
+}
+
+RCWRefCache *AppDomain::GetRCWRefCache()
+{
+ CONTRACT(RCWRefCache*)
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ if (!m_pRCWRefCache) {
+ NewHolder<RCWRefCache> pRCWRefCache = new RCWRefCache(this);
+ if (FastInterlockCompareExchangePointer(&m_pRCWRefCache, (RCWRefCache *)pRCWRefCache, NULL) == NULL)
+ {
+ pRCWRefCache.SuppressRelease();
+ }
+ }
+ RETURN m_pRCWRefCache;
+}
+
+RCWCache *AppDomain::CreateRCWCache()
+{
+ CONTRACT(RCWCache*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ // Initialize the global RCW cleanup list here as well. This is so that it
+ // it guaranteed to exist if any RCW's are created, but it is not created
+ // unconditionally.
+ if (!g_pRCWCleanupList)
+ {
+ SystemDomain::LockHolder lh;
+
+ if (!g_pRCWCleanupList)
+ g_pRCWCleanupList = new RCWCleanupList();
+ }
+ _ASSERTE(g_pRCWCleanupList);
+
+ {
+ BaseDomain::LockHolder lh(this);
+
+ if (!m_pRCWCache)
+ m_pRCWCache = new RCWCache(this);
+ }
+
+ RETURN m_pRCWCache;
+}
+
+void AppDomain::ReleaseRCWs(LPVOID pCtxCookie)
+{
+ WRAPPER_NO_CONTRACT;
+ if (m_pRCWCache)
+ m_pRCWCache->ReleaseWrappersWorker(pCtxCookie);
+
+ RemoveWinRTFactoryObjects(pCtxCookie);
+}
+
+void AppDomain::DetachRCWs()
+{
+ WRAPPER_NO_CONTRACT;
+ if (m_pRCWCache)
+ m_pRCWCache->DetachWrappersWorker();
+}
+
+#endif // FEATURE_COMINTEROP
+
+BOOL AppDomain::CanThreadEnter(Thread *pThread)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (m_Stage < STAGE_EXITED)
+ return TRUE;
+
+ if (pThread == SystemDomain::System()->GetUnloadingThread())
+ return m_Stage < STAGE_FINALIZING;
+ if (pThread == FinalizerThread::GetFinalizerThread())
+ return m_Stage < STAGE_FINALIZED;
+
+ return FALSE;
+}
+
+void AppDomain::AllowThreadEntrance(AppDomain * pApp)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ FORBID_FAULT;
+ PRECONDITION(CheckPointer(pApp));
+ }
+ CONTRACTL_END;
+
+ if (pApp->GetUnloadRequestThread() == NULL)
+ {
+ // This is asynchonous unload, either by a host, or by AppDomain.Unload from AD unload event.
+ if (!pApp->IsUnloadingFromUnloadEvent())
+ {
+ pApp->SetStage(STAGE_UNLOAD_REQUESTED);
+ pApp->EnableADUnloadWorker(
+ pApp->IsRudeUnload()?EEPolicy::ADU_Rude:EEPolicy::ADU_Safe);
+ return;
+ }
+ }
+
+ SystemDomain::LockHolder lh; // we don't want to reopen appdomain if other thread can be preparing to unload it
+
+#ifdef FEATURE_COMINTEROP
+ if (pApp->m_pComCallWrapperCache)
+ pApp->m_pComCallWrapperCache->ResetDomainIsUnloading();
+#endif // FEATURE_COMINTEROP
+
+ pApp->SetStage(STAGE_OPEN);
+}
+
+void AppDomain::RestrictThreadEntrance(AppDomain * pApp)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ DISABLED(GC_TRIGGERS);
+ MODE_ANY;
+ DISABLED(FORBID_FAULT);
+ PRECONDITION(CheckPointer(pApp));
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_COMINTEROP
+ // Set the flag on our CCW cache so stubs won't enter
+ if (pApp->m_pComCallWrapperCache)
+ pApp->m_pComCallWrapperCache->SetDomainIsUnloading();
+#endif // FEATURE_COMINTEROP
+
+ SystemDomain::LockHolder lh; // we don't want to reopen appdomain if other thread can be preparing to unload it
+ // Release our ID so remoting and thread pool won't enter
+ pApp->SetStage(STAGE_EXITED);
+};
+
+void AppDomain::Exit(BOOL fRunFinalizers, BOOL fAsyncExit)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_APPDOMAIN | LF_CORDB, LL_INFO10, "AppDomain::Exiting domain [%d] %#08x %ls\n",
+ GetId().m_dwId, this, GetFriendlyNameForLogging()));
+
+ RestrictEnterHolder RestrictEnter(this);
+
+ {
+ SystemDomain::LockHolder lh; // we don't want to close appdomain if other thread can be preparing to unload it
+ SetStage(STAGE_EXITING); // Note that we're trying to exit
+ }
+
+ // Raise the event indicating the domain is being unloaded.
+ if (GetDefaultContext())
+ {
+ FastInterlockExchangePointer(&s_pAppDomainToRaiseUnloadEvent, this);
+
+ DWORD timeout = GetEEPolicy()->GetTimeout(m_fRudeUnload?OPR_AppDomainRudeUnload : OPR_AppDomainUnload);
+ //if (timeout == INFINITE)
+ //{
+ // timeout = 20000; // 20 seconds
+ //}
+ DWORD timeoutForFinalizer = GetEEPolicy()->GetTimeout(OPR_FinalizerRun);
+ ULONGLONG curTime = CLRGetTickCount64();
+ ULONGLONG endTime = 0;
+ if (timeout != INFINITE)
+ {
+ endTime = curTime + timeout;
+ // We will try to kill AD unload event if it takes too long, and then we move on to the next registered caller.
+ timeout /= 5;
+ }
+
+ while (s_pAppDomainToRaiseUnloadEvent != NULL)
+ {
+ FinalizerThread::FinalizerThreadWait(s_fProcessUnloadDomainEvent?timeout:timeoutForFinalizer);
+ if (endTime != 0 && s_pAppDomainToRaiseUnloadEvent != NULL)
+ {
+ if (CLRGetTickCount64() >= endTime)
+ {
+ SString sThreadId;
+ sThreadId.Printf(W("%x"), FinalizerThread::GetFinalizerThread()->GetThreadId());
+ COMPlusThrow(kCannotUnloadAppDomainException,
+ IDS_EE_ADUNLOAD_CANT_UNWIND_THREAD,
+ sThreadId);
+ }
+ }
+ }
+ }
+
+ //
+ // Set up blocks so no threads can enter except for the finalizer and the thread
+ // doing the unload.
+ //
+
+ RestrictThreadEntrance(this);
+
+ // Cause existing threads to abort out of this domain. This should ensure all
+ // normal threads are outside the domain, and we've already ensured that no new threads
+ // can enter.
+
+ PerAppDomainTPCountList::AppDomainUnloadingHolder tpAdUnloadHolder(GetTPIndex());
+
+
+ if (!NingenEnabled())
+ {
+ UnwindThreads();
+ }
+
+ TESTHOOKCALL(UnwoundThreads(GetId().m_dwId)) ;
+ ProcessEventForHost(Event_DomainUnload, (PVOID)(UINT_PTR)GetId().m_dwId);
+
+ RestrictEnter.SuppressRelease(); //after this point we don't guarantee appdomain consistency
+#ifdef PROFILING_SUPPORTED
+ // Signal profile if present.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->AppDomainShutdownStarted((AppDomainID) this);
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+ COUNTER_ONLY(GetPerfCounters().m_Loading.cAppDomains--);
+ COUNTER_ONLY(GetPerfCounters().m_Loading.cAppDomainsUnloaded++);
+
+ LOG((LF_APPDOMAIN | LF_CORDB, LL_INFO10, "AppDomain::Domain [%d] %#08x %ls is exited.\n",
+ GetId().m_dwId, this, GetFriendlyNameForLogging()));
+
+ ReJitManager::OnAppDomainExit(this);
+
+ // Send ETW events for this domain's unload and potentially iterate through this
+ // domain's modules & assemblies to send events for their unloads as well. This
+ // needs to occur before STAGE_FINALIZED (to ensure everything is there), so we do
+ // this before any finalization occurs at all.
+ ETW::LoaderLog::DomainUnload(this);
+
+ //
+ // Spin running finalizers until we flush them all. We need to make multiple passes
+ // in case the finalizers create more finalizable objects. This is important to clear
+ // the finalizable objects as roots, as well as to actually execute the finalizers. This
+ // will only finalize instances instances of types that aren't potentially agile becuase we can't
+ // risk finalizing agile objects. So we will be left with instances of potentially agile types
+ // in handles or statics.
+ //
+ // <TODO>@todo: Need to ensure this will terminate in a reasonable amount of time. Eventually
+ // we should probably start passing FALSE for fRunFinalizers. Also I'm not sure we
+ // guarantee that FinalizerThreadWait will ever terminate in general.</TODO>
+ //
+
+ SetStage(STAGE_FINALIZING);
+
+ // Flush finalizers now.
+ FinalizerThread::UnloadAppDomain(this, fRunFinalizers);
+
+ DWORD timeout = GetEEPolicy()->GetTimeout(m_fRudeUnload?OPR_AppDomainRudeUnload : OPR_AppDomainUnload);
+ ULONGLONG startTime = CLRGetTickCount64();
+ ULONGLONG elapsedTime = 0;
+ DWORD finalizerWait = 0;
+
+ while (FinalizerThread::GetUnloadingAppDomain() != NULL)
+ {
+
+ if (timeout != INFINITE)
+ {
+ elapsedTime = CLRGetTickCount64() - startTime;
+ }
+ if (timeout > elapsedTime)
+ {
+ finalizerWait = timeout - static_cast<DWORD>(elapsedTime);
+ }
+ FinalizerThread::FinalizerThreadWait(finalizerWait); //will set stage to finalized
+ if (timeout != INFINITE && FinalizerThread::GetUnloadingAppDomain() != NULL)
+ {
+ elapsedTime = CLRGetTickCount64() - startTime;
+ if (timeout <= elapsedTime)
+ {
+ SetRudeUnload();
+ // TODO: Consider escalation from RudeAppDomain
+ timeout = INFINITE;
+ }
+ }
+ }
+
+ tpAdUnloadHolder.SuppressRelease();
+ PerAppDomainTPCountList::ResetAppDomainTPCounts(GetTPIndex());
+
+ LOG((LF_APPDOMAIN | LF_CORDB, LL_INFO10, "AppDomain::Domain [%d] %#08x %ls is finalized.\n",
+ GetId().m_dwId, this, GetFriendlyNameForLogging()));
+
+
+ AppDomainRefHolder This(this);
+ AddRef(); // Hold a reference so CloseDomain won't delete us yet
+ CloseDomain(); // Remove ourself from the list of app domains
+
+ // This needs to be done prior to destroying the handle tables below.
+ ReleaseDomainBoundInfo();
+
+ //
+ // It should be impossible to run non-mscorlib code in this domain now.
+ // Cleanup all of our roots except the handles. We do this to allow as many
+ // finalizers as possible to run correctly. If we delete the handles, they
+ // can't run.
+ //
+ if (!NingenEnabled())
+ {
+#ifdef FEATURE_REMOTING
+ EX_TRY
+ {
+ ADID domainId = GetId();
+ MethodDescCallSite domainUnloaded(METHOD__REMOTING_SERVICES__DOMAIN_UNLOADED);
+
+ ARG_SLOT args[1];
+ args[0] = domainId.m_dwId;
+ domainUnloaded.Call(args);
+ }
+ EX_CATCH
+ {
+ //we don't care if it fails
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+#endif // FEATURE_REMOTING
+ }
+
+ ClearGCRoots();
+ ClearGCHandles();
+
+ LOG((LF_APPDOMAIN | LF_CORDB, LL_INFO10, "AppDomain::Domain [%d] %#08x %ls is cleared.\n",
+ GetId().m_dwId, this, GetFriendlyNameForLogging()));
+
+ if (fAsyncExit && fRunFinalizers)
+ {
+ GCX_PREEMP();
+ m_AssemblyCache.Clear();
+ ClearFusionContext();
+ ReleaseFiles();
+ if (!NingenEnabled())
+ {
+ AddMemoryPressure();
+ }
+ }
+ SystemDomain::System()->AddToDelayedUnloadList(this, fAsyncExit);
+ SystemDomain::SetUnloadDomainCleared();
+ if (m_dwId.m_dwId!=0)
+ SystemDomain::ReleaseAppDomainId(m_dwId);
+#ifdef PROFILING_SUPPORTED
+ // Always signal profile if present, even when failed.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->AppDomainShutdownFinished((AppDomainID) this, S_OK);
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+}
+
+void AppDomain::Close()
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_APPDOMAIN | LF_CORDB, LL_INFO10, "AppDomain::Domain [%d] %#08x %ls is collected.\n",
+ GetId().m_dwId, this, GetFriendlyNameForLogging()));
+
+
+#if CHECK_APP_DOMAIN_LEAKS
+ if (g_pConfig->AppDomainLeaks())
+ // at this point shouldn't have any non-agile objects in the heap because we finalized all the non-agile ones.
+ SyncBlockCache::GetSyncBlockCache()->CheckForUnloadedInstances(GetIndex());
+#endif // CHECK_APP_DOMAIN_LEAKS
+ {
+ GCX_PREEMP();
+ RemoveMemoryPressure();
+ }
+ _ASSERTE(m_cRef>0); //should be alive at this point otherwise iterator can revive us and crash
+ {
+ SystemDomain::LockHolder lh; // Avoid races with AppDomainIterator
+ SetStage(STAGE_CLOSED);
+ }
+
+ // CONSIDER: move releasing remoting cache from managed code to here.
+}
+
+
+void AppDomain::ResetUnloadRequestThread(ADID Id)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ PRECONDITION(!IsADUnloadHelperThread());
+ }
+ CONTRACTL_END;
+
+ GCX_COOP();
+ AppDomainFromIDHolder ad(Id, TRUE);
+ if(!ad.IsUnloaded() && ad->m_Stage < STAGE_UNLOAD_REQUESTED)
+ {
+ Thread *pThread = ad->GetUnloadRequestThread();
+ if(pThread==GetThread())
+ {
+ ad->m_dwThreadsStillInAppDomain=(ULONG)-1;
+
+ if(pThread)
+ {
+ if (pThread->GetUnloadBoundaryFrame() && pThread->IsBeingAbortedForADUnload())
+ {
+ pThread->UnmarkThreadForAbort(Thread::TAR_ADUnload);
+ }
+ ad->GetUnloadRequestThread()->ResetUnloadBoundaryFrame();
+ pThread->ResetBeginAbortedForADUnload();
+ }
+
+ ad->SetUnloadRequestThread(NULL);
+ }
+ }
+}
+
+
+int g_fADUnloadWorkerOK = -1;
+
+HRESULT AppDomain::UnloadById(ADID dwId, BOOL fSync,BOOL fExceptionsPassThrough)
+{
+ CONTRACTL
+ {
+ if(fExceptionsPassThrough) {THROWS;} else {NOTHROW;}
+ MODE_ANY;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_TRIGGERS);}
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ if (dwId==(ADID)DefaultADID)
+ return COR_E_CANNOTUNLOADAPPDOMAIN;
+
+ Thread *pThread = GetThread();
+
+ // Finalizer thread can not wait until AD unload is done,
+ // because AD unload is going to wait for Finalizer Thread.
+ if (fSync && pThread == FinalizerThread::GetFinalizerThread() &&
+ !pThread->HasThreadStateNC(Thread::TSNC_RaiseUnloadEvent))
+ return COR_E_CANNOTUNLOADAPPDOMAIN;
+
+
+ // AD unload helper thread should have been created.
+ _ASSERTE (g_fADUnloadWorkerOK == 1);
+
+ _ASSERTE (!IsADUnloadHelperThread());
+
+ BOOL fIsRaisingUnloadEvent = (pThread != NULL && pThread->HasThreadStateNC(Thread::TSNC_RaiseUnloadEvent));
+
+ if (fIsRaisingUnloadEvent)
+ {
+ AppDomainFromIDHolder pApp(dwId, TRUE, AppDomainFromIDHolder::SyncType_GC);
+
+ if (pApp.IsUnloaded() || ! pApp->CanLoadCode() || pApp->GetId().m_dwId == 0)
+ return COR_E_APPDOMAINUNLOADED;
+
+ pApp->EnableADUnloadWorker();
+
+ return S_FALSE;
+ }
+
+
+ ADUnloadSinkHolder pSink;
+
+ {
+ SystemDomain::LockHolder ulh;
+
+ AppDomainFromIDHolder pApp(dwId, TRUE, AppDomainFromIDHolder::SyncType_ADLock);
+
+ if (pApp.IsUnloaded() || ! pApp->CanLoadCode() || pApp->GetId().m_dwId == 0)
+ return COR_E_APPDOMAINUNLOADED;
+
+ if (g_fADUnloadWorkerOK != 1)
+ {
+ _ASSERTE(FALSE);
+ return E_UNEXPECTED;
+ }
+
+ if (!fSync)
+ {
+ pApp->EnableADUnloadWorker();
+ return S_OK;
+ }
+
+ pSink = pApp->PrepareForWaitUnloadCompletion();
+
+ pApp->EnableADUnloadWorker();
+
+ // release the holders - we don't care anymore if the appdomain is gone
+ }
+
+#ifdef FEATURE_TESTHOOKS
+ if (fExceptionsPassThrough)
+ {
+ CONTRACT_VIOLATION(FaultViolation);
+ return UnloadWaitNoCatch(dwId,pSink);
+ }
+#endif
+
+ return UnloadWait(dwId,pSink);
+}
+
+HRESULT AppDomain::UnloadWait(ADID Id, ADUnloadSink * pSink)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_TRIGGERS);}
+ }
+ CONTRACTL_END;
+
+ HRESULT hr=S_OK;
+ EX_TRY
+ {
+ // IF you ever try to change this to something not using events, please address the fact that
+ // AppDomain::StopEEAndUnwindThreads relies on that events are used.
+
+ pSink->WaitUnloadCompletion();
+ }
+ EX_CATCH_HRESULT(hr);
+
+ if (SUCCEEDED(hr))
+ hr=pSink->GetUnloadResult();
+
+ if (FAILED(hr))
+ {
+ ResetUnloadRequestThread(Id);
+ }
+ return hr;
+}
+
+#ifdef FEATURE_TESTHOOKS
+HRESULT AppDomain::UnloadWaitNoCatch(ADID Id, ADUnloadSink * pSink)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_MODE_ANY;
+
+ Holder<ADID, DoNothing<ADID>, AppDomain::ResetUnloadRequestThread> resetUnloadHolder(Id);
+
+ // IF you ever try to change this to something not using events, please address the fact that
+ // AppDomain::StopEEAndUnwindThreads relies on that events are used.
+ pSink->WaitUnloadCompletion();
+
+ HRESULT hr = pSink->GetUnloadResult();
+
+ if (SUCCEEDED(hr))
+ resetUnloadHolder.SuppressRelease();
+
+ return hr;
+}
+#endif
+
+void AppDomain::Unload(BOOL fForceUnload)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_MULTICOREJIT
+
+ // Avoid profiling file is partially written in ASP.net scenarios, call it earlier
+ GetMulticoreJitManager().StopProfile(true);
+
+#endif
+
+ Thread *pThread = GetThread();
+
+
+ if (! fForceUnload && !g_pConfig->AppDomainUnload())
+ return;
+
+ EPolicyAction action;
+ EClrOperation operation;
+ if (!IsRudeUnload())
+ {
+ operation = OPR_AppDomainUnload;
+ }
+ else
+ {
+ operation = OPR_AppDomainRudeUnload;
+ }
+ action = GetEEPolicy()->GetDefaultAction(operation,NULL);
+ GetEEPolicy()->NotifyHostOnDefaultAction(operation,action);
+
+ switch (action)
+ {
+ case eUnloadAppDomain:
+ break;
+ case eRudeUnloadAppDomain:
+ SetRudeUnload();
+ break;
+ case eExitProcess:
+ case eFastExitProcess:
+ case eRudeExitProcess:
+ case eDisableRuntime:
+ EEPolicy::HandleExitProcessFromEscalation(action, HOST_E_EXITPROCESS_ADUNLOAD);
+ _ASSERTE (!"Should not get here");
+ break;
+ default:
+ break;
+ }
+
+#if (defined(_DEBUG) || defined(BREAK_ON_UNLOAD) || defined(AD_LOG_MEMORY) || defined(AD_SNAPSHOT))
+ static int unloadCount = 0;
+#endif
+
+#ifdef AD_LOG_MEMORY
+ {
+ GCX_PREEMP();
+ static int logMemory = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ADLogMemory);
+ typedef void (__cdecl *LogItFcn) ( int );
+ static LogItFcn pLogIt = NULL;
+
+ if (logMemory && ! pLogIt)
+ {
+ HMODULE hMod = CLRLoadLibrary(W("mpdh.dll"));
+ if (hMod)
+ {
+ pLogIt = (LogItFcn)GetProcAddress(hMod, "logIt");
+ if (pLogIt)
+ {
+ pLogIt(9999);
+ pLogIt(9999);
+ }
+ }
+ }
+ }
+#endif // AD_LOG_MEMORY
+
+ if (IsDefaultDomain() && !IsSingleAppDomain())
+ COMPlusThrow(kCannotUnloadAppDomainException, IDS_EE_ADUNLOAD_DEFAULT);
+
+ _ASSERTE(CanUnload());
+
+ if (pThread == FinalizerThread::GetFinalizerThread() || GetUnloadRequestThread() == FinalizerThread::GetFinalizerThread())
+ COMPlusThrow(kCannotUnloadAppDomainException, IDS_EE_ADUNLOAD_IN_FINALIZER);
+
+ _ASSERTE(! SystemDomain::AppDomainBeingUnloaded());
+
+ // should not be running in this AD because unload spawned thread in default domain
+ if (!NingenEnabled())
+ {
+ _ASSERTE(!pThread->IsRunningIn(this, NULL));
+ }
+
+
+#ifdef APPDOMAIN_STATE
+ _ASSERTE_ALL_BUILDS("clr/src/VM/AppDomain.cpp", pThread->GetDomain()->IsDefaultDomain());
+#endif
+
+ LOG((LF_APPDOMAIN | LF_CORDB, LL_INFO10, "AppDomain::Unloading domain [%d] %#08x %ls\n", GetId().m_dwId, this, GetFriendlyName()));
+
+ STRESS_LOG3 (LF_APPDOMAIN, LL_INFO100, "Unload domain [%d, %d] %p\n", GetId().m_dwId, GetIndex().m_dwIndex, this);
+
+ UnloadHolder hold(this);
+
+ SystemDomain::System()->SetUnloadRequestingThread(GetUnloadRequestThread());
+ SystemDomain::System()->SetUnloadingThread(pThread);
+
+
+#ifdef _DEBUG
+ static int dumpSB = -1;
+
+ if (dumpSB == -1)
+ dumpSB = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ADDumpSB);
+
+ if (dumpSB > 1)
+ {
+ LogSpewAlways("Starting unload %3.3d\n", unloadCount);
+ DumpSyncBlockCache();
+ }
+#endif // _DEBUG
+
+ BOOL bForceGC=m_bForceGCOnUnload;
+
+#ifdef AD_LOG_MEMORY
+ if (pLogIt)
+ bForceGC=TRUE;
+#endif // AD_LOG_MEMORY
+
+#ifdef AD_SNAPSHOT
+ static int takeSnapShot = -1;
+
+ if (takeSnapShot == -1)
+ takeSnapShot = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ADTakeSnapShot);
+
+ if (takeSnapShot)
+ bForceGC=TRUE;
+#endif // AD_SNAPSHOT
+
+#ifdef _DEBUG
+ if (dumpSB > 0)
+ bForceGC=TRUE;
+#endif // _DEBUG
+ static int cfgForceGC = -1;
+
+ if (cfgForceGC == -1)
+ cfgForceGC =!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ADULazyMemoryRelease);
+
+ bForceGC=bForceGC||cfgForceGC;
+ AppDomainRefHolder This(this);
+ AddRef();
+
+ // Do the actual unloading
+ {
+ // We do not want other threads to abort the current one.
+ ThreadPreventAsyncHolder preventAsync;
+ Exit(TRUE, !bForceGC);
+ }
+ if(bForceGC)
+ {
+ GCHeap::GetGCHeap()->GarbageCollect();
+ FinalizerThread::FinalizerThreadWait();
+ SetStage(STAGE_COLLECTED);
+ Close(); //NOTHROW!
+ }
+
+#ifdef AD_LOG_MEMORY
+ if (pLogIt)
+ {
+ GCX_PREEMP();
+ pLogIt(unloadCount);
+ }
+#endif // AD_LOG_MEMORY
+
+#ifdef AD_SNAPSHOT
+ if (takeSnapShot)
+ {
+ char buffer[1024];
+ sprintf(buffer, "vadump -p %d -o > vadump.%d", GetCurrentProcessId(), unloadCount);
+ system(buffer);
+ sprintf(buffer, "umdh -p:%d -d -i:1 -f:umdh.%d", GetCurrentProcessId(), unloadCount);
+ system(buffer);
+ int takeDHSnapShot = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ADTakeDHSnapShot);
+ if (takeDHSnapShot)
+ {
+ sprintf(buffer, "dh -p %d -s -g -h -b -f dh.%d", GetCurrentProcessId(), unloadCount);
+ system(buffer);
+ }
+ }
+#endif // AD_SNAPSHOT
+
+#ifdef _DEBUG
+ if (dumpSB > 0)
+ {
+ // do extra finalizer wait to remove any leftover sb entries
+ FinalizerThread::FinalizerThreadWait();
+ GCHeap::GetGCHeap()->GarbageCollect();
+ FinalizerThread::FinalizerThreadWait();
+ LogSpewAlways("Done unload %3.3d\n", unloadCount);
+ DumpSyncBlockCache();
+ ShutdownLogging();
+ WCHAR buffer[128];
+ swprintf_s(buffer, NumItems(buffer), W("DumpSB.%d"), unloadCount);
+ _ASSERTE(WszMoveFileEx(W("COMPLUS.LOG"), buffer, MOVEFILE_REPLACE_EXISTING));
+ // this will open a new file
+ InitLogging();
+ }
+#endif // _DEBUG
+}
+
+void AppDomain::ExceptionUnwind(Frame *pFrame)
+{
+ CONTRACTL
+ {
+ DISABLED(GC_TRIGGERS); // EEResourceException
+ DISABLED(THROWS); // EEResourceException
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_APPDOMAIN, LL_INFO10, "AppDomain::ExceptionUnwind for %8.8x\n", pFrame));
+#if _DEBUG_ADUNLOAD
+ printf("%x AppDomain::ExceptionUnwind for %8.8p\n", GetThread()->GetThreadId(), pFrame);
+#endif
+ Thread *pThread = GetThread();
+ _ASSERTE(pThread);
+
+ if (! pThread->ShouldChangeAbortToUnload(pFrame))
+ {
+ LOG((LF_APPDOMAIN, LL_INFO10, "AppDomain::ExceptionUnwind: not first transition or abort\n"));
+ return;
+ }
+
+ LOG((LF_APPDOMAIN, LL_INFO10, "AppDomain::ExceptionUnwind: changing to unload\n"));
+
+ GCX_COOP();
+ OBJECTREF throwable = NULL;
+ EEResourceException e(kAppDomainUnloadedException, W("Remoting_AppDomainUnloaded_ThreadUnwound"));
+ throwable = e.GetThrowable();
+
+ // reset the exception to an AppDomainUnloadedException
+ if (throwable != NULL)
+ {
+ GetThread()->SafeSetThrowables(throwable);
+ }
+}
+
+BOOL AppDomain::StopEEAndUnwindThreads(unsigned int retryCount, BOOL *pFMarkUnloadRequestThread)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ Thread *pThread = NULL;
+ DWORD nThreadsNeedMoreWork=0;
+ if (retryCount != (unsigned int)-1 && retryCount < g_pConfig->AppDomainUnloadRetryCount())
+ {
+ Thread *pCurThread = GetThread();
+ if (pCurThread->CatchAtSafePoint())
+ pCurThread->PulseGCMode();
+
+ {
+ // We know which thread is not in the domain now. We just need to
+ // work on those threads. We do not need to suspend the runtime.
+ ThreadStoreLockHolder tsl;
+
+ while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
+ {
+ if (pThread == pCurThread)
+ {
+ continue;
+ }
+
+ if (pThread == FinalizerThread::GetFinalizerThread())
+ {
+ continue;
+ }
+
+ if (pThread->GetUnloadBoundaryFrame() == NULL)
+ {
+ continue;
+ }
+
+ // A thread may have UnloadBoundaryFrame set if
+ // 1. Being unloaded by AD unload helper thread
+ // 2. Escalation from OOM or SO triggers AD unload
+ // Here we only need to work on threads that are in the domain. If we work on other threads,
+ // those threads may be stucked in a finally, and we will not be able to escalate for them,
+ // therefore AD unload is blocked.
+ if (pThread->IsBeingAbortedForADUnload() ||
+ pThread == SystemDomain::System()->GetUnloadRequestingThread())
+ {
+ nThreadsNeedMoreWork++;
+ }
+
+ if (!(IsRudeUnload() ||
+ (pThread != SystemDomain::System()->GetUnloadRequestingThread() || OnlyOneThreadLeft())))
+ {
+ continue;
+ }
+
+ if ((pThread == SystemDomain::System()->GetUnloadRequestingThread()) && *pFMarkUnloadRequestThread)
+ {
+ // Mark thread for abortion only once; later on interrupt only
+ *pFMarkUnloadRequestThread = FALSE;
+ pThread->SetAbortRequest(m_fRudeUnload? EEPolicy::TA_Rude : EEPolicy::TA_V1Compatible);
+ }
+ else
+ {
+ if (pThread->m_State & Thread::TS_Interruptible)
+ {
+ pThread->UserInterrupt(Thread::TI_Abort);
+ }
+ }
+
+ if (pThread->PreemptiveGCDisabledOther())
+ {
+ #ifdef FEATURE_HIJACK
+ Thread::SuspendThreadResult str = pThread->SuspendThread();
+ if (str == Thread::STR_Success)
+ {
+ if (pThread->PreemptiveGCDisabledOther() &&
+ (!pThread->IsAbortInitiated() || pThread->IsRudeAbort()))
+ {
+ pThread->HandleJITCaseForAbort();
+ }
+ pThread->ResumeThread();
+ }
+ #endif
+ }
+ }
+ } // ThreadStoreLockHolder
+
+ if (nThreadsNeedMoreWork && CLRTaskHosted())
+ {
+ // In case a thread is the domain is blocked due to its scheduler being
+ // occupied by another thread.
+ Thread::ThreadAbortWatchDog();
+ }
+ m_dwThreadsStillInAppDomain=nThreadsNeedMoreWork;
+ return !nThreadsNeedMoreWork;
+ }
+
+ // For now piggyback on the GC's suspend EE mechanism
+ ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_FOR_APPDOMAIN_SHUTDOWN);
+#ifdef _DEBUG
+ // <TODO>@todo: what to do with any threads that didn't stop?</TODO>
+ _ASSERTE(ThreadStore::s_pThreadStore->DbgBackgroundThreadCount() > 0);
+#endif // _DEBUG
+
+ int totalADCount = 0;
+ int finalizerADCount = 0;
+ pThread = NULL;
+
+ RuntimeExceptionKind reKind = kLastException;
+ UINT resId = 0;
+ SmallStackSString ssThreadId;
+
+ while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
+ {
+ // we already checked that we're not running in the unload domain
+ if (pThread == GetThread())
+ {
+ continue;
+ }
+
+#ifdef _DEBUG
+ void PrintStackTraceWithADToLog(Thread *pThread);
+ if (LoggingOn(LF_APPDOMAIN, LL_INFO100)) {
+ LOG((LF_APPDOMAIN, LL_INFO100, "\nStackTrace for %x\n", pThread->GetThreadId()));
+ PrintStackTraceWithADToLog(pThread);
+ }
+#endif // _DEBUG
+ int count = 0;
+ Frame *pFrame = pThread->GetFirstTransitionInto(this, &count);
+ if (! pFrame) {
+ _ASSERTE(count == 0);
+ if (pThread->IsBeingAbortedForADUnload())
+ {
+ pThread->ResetBeginAbortedForADUnload();
+ }
+ continue;
+ }
+
+ if (pThread != FinalizerThread::GetFinalizerThread())
+ {
+ totalADCount += count;
+ nThreadsNeedMoreWork++;
+ pThread->SetUnloadBoundaryFrame(pFrame);
+ }
+ else
+ {
+ finalizerADCount = count;
+ }
+
+ // don't setup the exception info for the unloading thread unless it's the last one in
+ if (retryCount != ((unsigned int) -1) && retryCount > g_pConfig->AppDomainUnloadRetryCount() && reKind == kLastException &&
+ (pThread != SystemDomain::System()->GetUnloadRequestingThread() || OnlyOneThreadLeft()))
+ {
+#ifdef AD_BREAK_ON_CANNOT_UNLOAD
+ static int breakOnCannotUnload = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ADBreakOnCannotUnload);
+ if (breakOnCannotUnload)
+ _ASSERTE(!"Cannot unload AD");
+#endif // AD_BREAK_ON_CANNOT_UNLOAD
+ reKind = kCannotUnloadAppDomainException;
+ resId = IDS_EE_ADUNLOAD_CANT_UNWIND_THREAD;
+ ssThreadId.Printf(W("%x"), pThread->GetThreadId());
+ STRESS_LOG2(LF_APPDOMAIN, LL_INFO10, "AppDomain::UnwindThreads cannot stop thread %x with %d transitions\n", pThread->GetThreadId(), count);
+ // don't break out of this early or the assert totalADCount == (int)m_dwThreadEnterCount below will fire
+ // it's better to chew a little extra time here and make sure our counts are consistent
+ }
+ // only abort the thread requesting the unload if it's the last one in, that way it will get
+ // notification that the unload failed for some other thread not being aborted. And don't abort
+ // the finalizer thread - let it finish it's work as it's allowed to be in there. If it won't finish,
+ // then we will eventually get a CannotUnloadException on it.
+
+ if (pThread != FinalizerThread::GetFinalizerThread() &&
+ // If the domain is rudely unloaded, we will unwind the requesting thread out
+ // Rude unload is going to succeed, or escalated to disable runtime or higher.
+ (IsRudeUnload() ||
+ (pThread != SystemDomain::System()->GetUnloadRequestingThread() || OnlyOneThreadLeft())
+ )
+ )
+ {
+
+ STRESS_LOG2(LF_APPDOMAIN, LL_INFO100, "AppDomain::UnwindThreads stopping %x with %d transitions\n", pThread->GetThreadId(), count);
+ LOG((LF_APPDOMAIN, LL_INFO100, "AppDomain::UnwindThreads stopping %x with %d transitions\n", pThread->GetThreadId(), count));
+#if _DEBUG_ADUNLOAD
+ printf("AppDomain::UnwindThreads %x stopping %x with first frame %8.8p\n", GetThread()->GetThreadId(), pThread->GetThreadId(), pFrame);
+#endif
+ if (pThread == SystemDomain::System()->GetUnloadRequestingThread())
+ {
+ // Mark thread for abortion only once; later on interrupt only
+ *pFMarkUnloadRequestThread = FALSE;
+ }
+ pThread->SetAbortRequest(m_fRudeUnload? EEPolicy::TA_Rude : EEPolicy::TA_V1Compatible);
+ }
+ TESTHOOKCALL(UnwindingThreads(GetId().m_dwId)) ;
+ }
+ _ASSERTE(totalADCount + finalizerADCount == (int)m_dwThreadEnterCount);
+
+ //@TODO: This is intended to catch a stress bug. Remove when no longer needed.
+ if (totalADCount + finalizerADCount != (int)m_dwThreadEnterCount)
+ FreeBuildDebugBreak();
+
+ // if our count did get messed up, set it to whatever count we actually found in the domain to avoid looping
+ // or other problems related to incorrect count. This is very much a bug if this happens - a thread should always
+ // exit the domain gracefully.
+ // m_dwThreadEnterCount = totalADCount;
+
+ if (reKind != kLastException)
+ {
+ pThread = NULL;
+ while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
+ {
+ if (pThread->IsBeingAbortedForADUnload())
+ {
+ pThread->ResetBeginAbortedForADUnload();
+ }
+ }
+ }
+
+ // CommonTripThread will handle the abort for any threads that we've marked
+ ThreadSuspend::RestartEE(FALSE, TRUE);
+ if (reKind != kLastException)
+ COMPlusThrow(reKind, resId, ssThreadId.GetUnicode());
+
+ _ASSERTE((totalADCount==0 && nThreadsNeedMoreWork==0) ||(totalADCount!=0 && nThreadsNeedMoreWork!=0));
+
+ m_dwThreadsStillInAppDomain=nThreadsNeedMoreWork;
+ return (totalADCount == 0);
+}
+
+void AppDomain::UnwindThreads()
+{
+ // This function should guarantee appdomain
+ // consistency even if it fails. Everything that is going
+ // to make the appdomain impossible to reenter
+ // should be factored out
+
+ // <TODO>@todo: need real synchronization here!!!</TODO>
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ int retryCount = -1;
+ m_dwThreadsStillInAppDomain=(ULONG)-1;
+ ULONGLONG startTime = CLRGetTickCount64();
+
+ if (GetEEPolicy()->GetDefaultAction(OPR_AppDomainUnload, NULL) == eRudeUnloadAppDomain &&
+ !IsRudeUnload())
+ {
+ GetEEPolicy()->NotifyHostOnDefaultAction(OPR_AppDomainUnload, eRudeUnloadAppDomain);
+ SetRudeUnload();
+ }
+
+ // Force threads to go through slow path during AD unload.
+ TSSuspendHolder shTrap;
+
+ BOOL fCurrentUnloadMode = IsRudeUnload();
+ BOOL fMarkUnloadRequestThread = TRUE;
+
+ // now wait for all the threads running in our AD to get out
+ do
+ {
+ DWORD timeout = GetEEPolicy()->GetTimeout(m_fRudeUnload?OPR_AppDomainRudeUnload : OPR_AppDomainUnload);
+ EPolicyAction action = GetEEPolicy()->GetActionOnTimeout(m_fRudeUnload?OPR_AppDomainRudeUnload : OPR_AppDomainUnload, NULL);
+ if (timeout != INFINITE && action > eUnloadAppDomain) {
+ // Escalation policy specified.
+ ULONGLONG curTime = CLRGetTickCount64();
+ ULONGLONG elapseTime = curTime - startTime;
+ if (elapseTime > timeout)
+ {
+ // Escalate
+ switch (action)
+ {
+ case eRudeUnloadAppDomain:
+ GetEEPolicy()->NotifyHostOnTimeout(m_fRudeUnload?OPR_AppDomainRudeUnload : OPR_AppDomainUnload, action);
+ SetRudeUnload();
+ STRESS_LOG1(LF_APPDOMAIN, LL_INFO100,"Escalating to RADU, adid=%d",GetId().m_dwId);
+ break;
+ case eExitProcess:
+ case eFastExitProcess:
+ case eRudeExitProcess:
+ case eDisableRuntime:
+ GetEEPolicy()->NotifyHostOnTimeout(m_fRudeUnload?OPR_AppDomainRudeUnload : OPR_AppDomainUnload, action);
+ EEPolicy::HandleExitProcessFromEscalation(action, HOST_E_EXITPROCESS_TIMEOUT);
+ _ASSERTE (!"Should not reach here");
+ break;
+ default:
+ break;
+ }
+ }
+ }
+#ifdef _DEBUG
+ if (LoggingOn(LF_APPDOMAIN, LL_INFO100))
+ DumpADThreadTrack();
+#endif // _DEBUG
+ BOOL fNextUnloadMode = IsRudeUnload();
+ if (fCurrentUnloadMode != fNextUnloadMode)
+ {
+ // We have changed from normal unload to rude unload. We need to mark the thread
+ // with RudeAbort, but we can only do this safely if the runtime is suspended.
+ fCurrentUnloadMode = fNextUnloadMode;
+ retryCount = -1;
+ }
+ if (StopEEAndUnwindThreads(retryCount, &fMarkUnloadRequestThread))
+ break;
+ if (timeout != INFINITE)
+ {
+ // Turn off the timeout used by AD.
+ retryCount = 1;
+ }
+ else
+ {
+ // GCStress takes a long time to unwind, due to expensive creation of
+ // a threadabort exception.
+ if (!GCStress<cfg_any>::IsEnabled())
+ ++retryCount;
+ LOG((LF_APPDOMAIN, LL_INFO10, "AppDomain::UnwindThreads iteration %d waiting on thread count %d\n", retryCount, m_dwThreadEnterCount));
+#if _DEBUG_ADUNLOAD
+ printf("AppDomain::UnwindThreads iteration %d waiting on thread count %d\n", retryCount, m_dwThreadEnterCount);
+#endif
+ }
+
+ if (m_dwThreadEnterCount != 0)
+ {
+#ifdef _DEBUG
+ GetThread()->UserSleep(20);
+#else // !_DEBUG
+ GetThread()->UserSleep(10);
+#endif // !_DEBUG
+ }
+ }
+ while (TRUE) ;
+}
+
+void AppDomain::ClearGCHandles()
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ SetStage(STAGE_HANDLETABLE_NOACCESS);
+
+ GCHeap::GetGCHeap()->WaitUntilConcurrentGCComplete();
+
+ // Keep async pin handles alive by moving them to default domain
+ HandleAsyncPinHandles();
+
+ // Remove our handle table as a source of GC roots
+ HandleTableBucket *pBucket = m_hHandleTableBucket;
+
+#ifdef _DEBUG
+ if (((HandleTable *)(pBucket->pTable[0]))->uADIndex != m_dwIndex)
+ _ASSERTE (!"AD index mismatch");
+#endif // _DEBUG
+
+ Ref_RemoveHandleTableBucket(pBucket);
+}
+
+// When an AD is unloaded, we will release all objects in this AD.
+// If a future asynchronous operation, like io completion port function,
+// we need to keep the memory space fixed so that the gc heap is not corrupted.
+void AppDomain::HandleAsyncPinHandles()
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ HandleTableBucket *pBucket = m_hHandleTableBucket;
+ // IO completion port picks IO job using FIFO. Here is how we know which AsyncPinHandle can be freed.
+ // 1. We mark all non-pending AsyncPinHandle with READYTOCLEAN.
+ // 2. We queue a dump Overlapped to the IO completion as a marker.
+ // 3. When the Overlapped is picked up by completion port, we wait until all previous IO jobs are processed.
+ // 4. Then we can delete all AsyncPinHandle marked with READYTOCLEAN.
+ HandleTableBucket *pBucketInDefault = SystemDomain::System()->DefaultDomain()->m_hHandleTableBucket;
+ Ref_RelocateAsyncPinHandles(pBucket, pBucketInDefault);
+
+ OverlappedDataObject::RequestCleanup();
+}
+
+void AppDomain::ClearGCRoots()
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ Thread *pThread = NULL;
+ ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_FOR_APPDOMAIN_SHUTDOWN);
+
+ // Tell the JIT managers to delete any entries in their structures. All the cooperative mode threads are stopped at
+ // this point, so only need to synchronize the preemptive mode threads.
+ ExecutionManager::Unload(GetLoaderAllocator());
+
+ while ((pThread = ThreadStore::GetAllThreadList(pThread, 0, 0)) != NULL)
+ {
+ // Delete the thread local static store
+ pThread->DeleteThreadStaticData(this);
+
+#ifdef FEATURE_LEAK_CULTURE_INFO
+ pThread->ResetCultureForDomain(GetId());
+#endif // FEATURE_LEAK_CULTURE_INFO
+
+ // <TODO>@TODO: A pre-allocated AppDomainUnloaded exception might be better.</TODO>
+ if (m_hHandleTableBucket->Contains(pThread->m_LastThrownObjectHandle))
+ {
+ // Never delete a handle to a preallocated exception object.
+ if (!CLRException::IsPreallocatedExceptionHandle(pThread->m_LastThrownObjectHandle))
+ {
+ DestroyHandle(pThread->m_LastThrownObjectHandle);
+ }
+
+ pThread->m_LastThrownObjectHandle = NULL;
+ }
+
+ // Clear out the exceptions objects held by a thread.
+ pThread->GetExceptionState()->ClearThrowablesForUnload(m_hHandleTableBucket);
+ }
+
+ //delete them while we still have the runtime suspended
+ // This must be deleted before the loader heaps are deleted.
+ if (m_pMarshalingData != NULL)
+ {
+ delete m_pMarshalingData;
+ m_pMarshalingData = NULL;
+ }
+
+ if (m_pLargeHeapHandleTable != NULL)
+ {
+ delete m_pLargeHeapHandleTable;
+ m_pLargeHeapHandleTable = NULL;
+ }
+
+ ThreadSuspend::RestartEE(FALSE, TRUE);
+}
+
+#ifdef _DEBUG
+
+void AppDomain::TrackADThreadEnter(Thread *pThread, Frame *pFrame)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ // REENTRANT
+ PRECONDITION(CheckPointer(pThread));
+ PRECONDITION(pFrame != (Frame*)(size_t) INVALID_POINTER_CD);
+ }
+ CONTRACTL_END;
+
+ while (FastInterlockCompareExchange((LONG*)&m_TrackSpinLock, 1, 0) != 0)
+ ;
+ if (m_pThreadTrackInfoList == NULL)
+ m_pThreadTrackInfoList = new (nothrow) ThreadTrackInfoList;
+ // If we don't assert here, we will AV in the for loop below
+ _ASSERTE(m_pThreadTrackInfoList);
+
+ ThreadTrackInfoList *pTrackList= m_pThreadTrackInfoList;
+
+ ThreadTrackInfo *pTrack = NULL;
+ int i;
+ for (i=0; i < pTrackList->Count(); i++) {
+ if ((*(pTrackList->Get(i)))->pThread == pThread) {
+ pTrack = *(pTrackList->Get(i));
+ break;
+ }
+ }
+ if (! pTrack) {
+ pTrack = new (nothrow) ThreadTrackInfo;
+ // If we don't assert here, we will AV in the for loop below.
+ _ASSERTE(pTrack);
+ pTrack->pThread = pThread;
+ ThreadTrackInfo **pSlot = pTrackList->Append();
+ *pSlot = pTrack;
+ }
+
+ InterlockedIncrement((LONG*)&m_dwThreadEnterCount);
+ Frame **pSlot;
+ if (pTrack)
+ {
+ pSlot = pTrack->frameStack.Insert(0);
+ *pSlot = pFrame;
+ }
+ int totThreads = 0;
+ for (i=0; i < pTrackList->Count(); i++)
+ totThreads += (*(pTrackList->Get(i)))->frameStack.Count();
+ _ASSERTE(totThreads == (int)m_dwThreadEnterCount);
+
+ InterlockedExchange((LONG*)&m_TrackSpinLock, 0);
+}
+
+
+void AppDomain::TrackADThreadExit(Thread *pThread, Frame *pFrame)
+{
+ CONTRACTL
+ {
+ if (GetThread()) {MODE_COOPERATIVE;}
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ while (FastInterlockCompareExchange((LONG*)&m_TrackSpinLock, 1, 0) != 0)
+ ;
+ ThreadTrackInfoList *pTrackList= m_pThreadTrackInfoList;
+ _ASSERTE(pTrackList);
+ ThreadTrackInfo *pTrack = NULL;
+ int i;
+ for (i=0; i < pTrackList->Count(); i++)
+ {
+ if ((*(pTrackList->Get(i)))->pThread == pThread)
+ {
+ pTrack = *(pTrackList->Get(i));
+ break;
+ }
+ }
+ _ASSERTE(pTrack);
+ _ASSERTE(*(pTrack->frameStack.Get(0)) == pFrame);
+ pTrack->frameStack.Delete(0);
+ InterlockedDecrement((LONG*)&m_dwThreadEnterCount);
+
+ int totThreads = 0;
+ for (i=0; i < pTrackList->Count(); i++)
+ totThreads += (*(pTrackList->Get(i)))->frameStack.Count();
+ _ASSERTE(totThreads == (int)m_dwThreadEnterCount);
+
+ InterlockedExchange((LONG*)&m_TrackSpinLock, 0);
+}
+
+void AppDomain::DumpADThreadTrack()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ while (FastInterlockCompareExchange((LONG*)&m_TrackSpinLock, 1, 0) != 0)
+ ;
+ ThreadTrackInfoList *pTrackList= m_pThreadTrackInfoList;
+ if (!pTrackList)
+ goto end;
+
+ {
+ LOG((LF_APPDOMAIN, LL_INFO10000, "\nThread dump of %d threads for [%d] %#08x %S\n",
+ m_dwThreadEnterCount, GetId().m_dwId, this, GetFriendlyNameForLogging()));
+ int totThreads = 0;
+ for (int i=0; i < pTrackList->Count(); i++)
+ {
+ ThreadTrackInfo *pTrack = *(pTrackList->Get(i));
+ if (pTrack->frameStack.Count()==0)
+ continue;
+ LOG((LF_APPDOMAIN, LL_INFO100, " ADEnterCount for %x is %d\n", pTrack->pThread->GetThreadId(), pTrack->frameStack.Count()));
+ totThreads += pTrack->frameStack.Count();
+ for (int j=0; j < pTrack->frameStack.Count(); j++)
+ LOG((LF_APPDOMAIN, LL_INFO100, " frame %8.8x\n", *(pTrack->frameStack.Get(j))));
+ }
+ _ASSERTE(totThreads == (int)m_dwThreadEnterCount);
+ }
+end:
+ InterlockedExchange((LONG*)&m_TrackSpinLock, 0);
+}
+#endif // _DEBUG
+
+#ifdef FEATURE_REMOTING
+OBJECTREF AppDomain::GetAppDomainProxy()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ OBJECTREF orProxy = CRemotingServices::CreateProxyForDomain(this);
+
+ _ASSERTE(orProxy->IsTransparentProxy());
+
+ return orProxy;
+}
+#endif
+
+#endif // CROSSGEN_COMPILE
+
+void *SharedDomain::operator new(size_t size, void *pInPlace)
+{
+ LIMITED_METHOD_CONTRACT;
+ return pInPlace;
+}
+
+void SharedDomain::operator delete(void *pMem)
+{
+ LIMITED_METHOD_CONTRACT;
+ // Do nothing - new() was in-place
+}
+
+
+void SharedDomain::Attach()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // Create the global SharedDomain and initialize it.
+ m_pSharedDomain = new (&g_pSharedDomainMemory[0]) SharedDomain();
+ SystemDomain::GetGlobalLoaderAllocator()->m_pDomain = m_pSharedDomain;
+ // This cannot fail since g_pSharedDomainMemory is a static array.
+ CONSISTENCY_CHECK(CheckPointer(m_pSharedDomain));
+
+ LOG((LF_CLASSLOADER,
+ LL_INFO10,
+ "Created shared domain at %p\n",
+ m_pSharedDomain));
+
+ // We need to initialize the memory pools etc. for the system domain.
+ m_pSharedDomain->Init(); // Setup the memory heaps
+
+ // allocate a Virtual Call Stub Manager for the shared domain
+ m_pSharedDomain->InitVSD();
+}
+
+#ifndef CROSSGEN_COMPILE
+void SharedDomain::Detach()
+{
+ if (m_pSharedDomain)
+ {
+ m_pSharedDomain->Terminate();
+ delete m_pSharedDomain;
+ m_pSharedDomain = NULL;
+ }
+}
+#endif // CROSSGEN_COMPILE
+
+#endif // !DACCESS_COMPILE
+
+SharedDomain *SharedDomain::GetDomain()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return m_pSharedDomain;
+}
+
+#ifndef DACCESS_COMPILE
+
+#define INITIAL_ASSEMBLY_MAP_SIZE 17
+void SharedDomain::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ BaseDomain::Init();
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ m_FileCreateLock.Init(CrstSharedAssemblyCreate, CRST_DEFAULT,TRUE);
+
+ LockOwner lock = { &m_DomainCrst, IsOwnerOfCrst };
+ m_assemblyMap.Init(INITIAL_ASSEMBLY_MAP_SIZE, CompareSharedAssembly, TRUE, &lock);
+#endif // FEATURE_LOADER_OPTIMIZATION
+
+ ETW::LoaderLog::DomainLoad(this);
+}
+
+#ifndef CROSSGEN_COMPILE
+void SharedDomain::Terminate()
+{
+ // make sure we delete the StringLiteralMap before unloading
+ // the asemblies since the string literal map entries can
+ // point to metadata string literals.
+ GetLoaderAllocator()->CleanupStringLiteralMap();
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ PtrHashMap::PtrIterator i = m_assemblyMap.begin();
+
+ while (!i.end())
+ {
+ Assembly *pAssembly = (Assembly*) i.GetValue();
+ delete pAssembly;
+ ++i;
+ }
+
+ ListLockEntry* pElement;
+ pElement = m_FileCreateLock.Pop(TRUE);
+ while (pElement)
+ {
+#ifdef STRICT_CLSINITLOCK_ENTRY_LEAK_DETECTION
+ _ASSERTE (dbg_fDrasticShutdown || g_fInControlC);
+#endif
+ delete(pElement);
+ pElement = (FileLoadLock*) m_FileCreateLock.Pop(TRUE);
+ }
+ m_FileCreateLock.Destroy();
+#endif // FEATURE_LOADER_OPTIMIZATION
+ BaseDomain::Terminate();
+}
+#endif // CROSSGEN_COMPILE
+
+
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+
+BOOL SharedDomain::CompareSharedAssembly(UPTR u1, UPTR u2)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // This is the input to the lookup
+ SharedAssemblyLocator *pLocator = (SharedAssemblyLocator *) (u1<<1);
+
+ // This is the value stored in the table
+ Assembly *pAssembly = (Assembly *) u2;
+ if (pLocator->GetType()==SharedAssemblyLocator::DOMAINASSEMBLY)
+ {
+ if (!pAssembly->GetManifestFile()->Equals(pLocator->GetDomainAssembly()->GetFile()))
+ return FALSE;
+
+ return pAssembly->CanBeShared(pLocator->GetDomainAssembly());
+ }
+ else
+ if (pLocator->GetType()==SharedAssemblyLocator::PEASSEMBLY)
+ return pAssembly->GetManifestFile()->Equals(pLocator->GetPEAssembly());
+ else
+ if (pLocator->GetType()==SharedAssemblyLocator::PEASSEMBLYEXACT)
+ return pAssembly->GetManifestFile() == pLocator->GetPEAssembly();
+ _ASSERTE(!"Unexpected type of assembly locator");
+ return FALSE;
+}
+
+DWORD SharedAssemblyLocator::Hash()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+ if (m_type==DOMAINASSEMBLY)
+ return GetDomainAssembly()->HashIdentity();
+ if (m_type==PEASSEMBLY||m_type==PEASSEMBLYEXACT)
+ return GetPEAssembly()->HashIdentity();
+ _ASSERTE(!"Unexpected type of assembly locator");
+ return 0;
+}
+
+Assembly * SharedDomain::FindShareableAssembly(SharedAssemblyLocator * pLocator)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ Assembly * match= (Assembly *) m_assemblyMap.LookupValue(pLocator->Hash(), pLocator);
+ if (match != (Assembly *) INVALIDENTRY)
+ return match;
+ else
+ return NULL;
+}
+
+SIZE_T SharedDomain::GetShareableAssemblyCount()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_assemblyMap.GetCount();
+}
+
+void SharedDomain::AddShareableAssembly(Assembly * pAssembly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // We have a lock on the file. There should be no races to add the same assembly.
+
+ {
+ LockHolder holder(this);
+
+ EX_TRY
+ {
+ pAssembly->SetIsTenured();
+ m_assemblyMap.InsertValue(pAssembly->HashIdentity(), pAssembly);
+ }
+ EX_HOOK
+ {
+ // There was an error adding the assembly to the assembly hash (probably an OOM),
+ // so we need to unset the tenured bit so that correct cleanup can happen.
+ pAssembly->UnsetIsTenured();
+ }
+ EX_END_HOOK
+ }
+
+ LOG((LF_CODESHARING,
+ LL_INFO100,
+ "Successfully added shareable assembly \"%s\".\n",
+ pAssembly->GetManifestFile()->GetSimpleName()));
+}
+
+#endif // FEATURE_LOADER_OPTIMIZATION
+#endif // !DACCESS_COMPILE
+
+DWORD DomainLocalModule::GetClassFlags(MethodTable* pMT, DWORD iClassIndex /*=(DWORD)-1*/)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ { // SO tolerance exception for debug-only assertion.
+ CONTRACT_VIOLATION(SOToleranceViolation);
+ CONSISTENCY_CHECK(GetDomainFile()->GetModule() == pMT->GetModuleForStatics());
+ }
+
+ if (pMT->IsDynamicStatics())
+ {
+ _ASSERTE(!pMT->ContainsGenericVariables());
+ DWORD dynamicClassID = pMT->GetModuleDynamicEntryID();
+ if(m_aDynamicEntries <= dynamicClassID)
+ return FALSE;
+ return (m_pDynamicClassTable[dynamicClassID].m_dwFlags);
+ }
+ else
+ {
+ if (iClassIndex == (DWORD)-1)
+ iClassIndex = pMT->GetClassIndex();
+ return GetPrecomputedStaticsClassData()[iClassIndex];
+ }
+}
+
+#ifndef DACCESS_COMPILE
+
+void DomainLocalModule::SetClassInitialized(MethodTable* pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ BaseDomain::DomainLocalBlockLockHolder lh(GetDomainFile()->GetAppDomain());
+
+ _ASSERTE(!IsClassInitialized(pMT));
+ _ASSERTE(!IsClassInitError(pMT));
+
+ SetClassFlags(pMT, ClassInitFlags::INITIALIZED_FLAG);
+}
+
+void DomainLocalModule::SetClassInitError(MethodTable* pMT)
+{
+ WRAPPER_NO_CONTRACT;
+
+ BaseDomain::DomainLocalBlockLockHolder lh(GetDomainFile()->GetAppDomain());
+
+ SetClassFlags(pMT, ClassInitFlags::ERROR_FLAG);
+}
+
+void DomainLocalModule::SetClassFlags(MethodTable* pMT, DWORD dwFlags)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(GetDomainFile()->GetModule() == pMT->GetModuleForStatics());
+ // Assumes BaseDomain::DomainLocalBlockLockHolder is taken
+ PRECONDITION(GetDomainFile()->GetAppDomain()->OwnDomainLocalBlockLock());
+ } CONTRACTL_END;
+
+ if (pMT->IsDynamicStatics())
+ {
+ _ASSERTE(!pMT->ContainsGenericVariables());
+ DWORD dwID = pMT->GetModuleDynamicEntryID();
+ EnsureDynamicClassIndex(dwID);
+ m_pDynamicClassTable[dwID].m_dwFlags |= dwFlags;
+ }
+ else
+ {
+ GetPrecomputedStaticsClassData()[pMT->GetClassIndex()] |= dwFlags;
+ }
+}
+
+void DomainLocalModule::EnsureDynamicClassIndex(DWORD dwID)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ // Assumes BaseDomain::DomainLocalBlockLockHolder is taken
+ PRECONDITION(GetDomainFile()->GetAppDomain()->OwnDomainLocalBlockLock());
+ }
+ CONTRACTL_END;
+
+ if (dwID < m_aDynamicEntries)
+ {
+ _ASSERTE(m_pDynamicClassTable.Load() != NULL);
+ return;
+ }
+
+ SIZE_T aDynamicEntries = max(16, m_aDynamicEntries.Load());
+ while (aDynamicEntries <= dwID)
+ {
+ aDynamicEntries *= 2;
+ }
+
+ DynamicClassInfo* pNewDynamicClassTable;
+ pNewDynamicClassTable = (DynamicClassInfo*)
+ (void*)GetDomainFile()->GetLoaderAllocator()->GetHighFrequencyHeap()->AllocMem(
+ S_SIZE_T(sizeof(DynamicClassInfo)) * S_SIZE_T(aDynamicEntries));
+
+ memcpy(pNewDynamicClassTable, m_pDynamicClassTable, sizeof(DynamicClassInfo) * m_aDynamicEntries);
+
+ // Note: Memory allocated on loader heap is zero filled
+ // memset(pNewDynamicClassTable + m_aDynamicEntries, 0, (aDynamicEntries - m_aDynamicEntries) * sizeof(DynamicClassInfo));
+
+ _ASSERTE(m_aDynamicEntries%2 == 0);
+
+ // Commit new dynamic table. The lock-free helpers depend on the order.
+ MemoryBarrier();
+ m_pDynamicClassTable = pNewDynamicClassTable;
+ MemoryBarrier();
+ m_aDynamicEntries = aDynamicEntries;
+}
+
+#ifndef CROSSGEN_COMPILE
+void DomainLocalModule::AllocateDynamicClass(MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ // Assumes BaseDomain::DomainLocalBlockLockHolder is taken
+ PRECONDITION(GetDomainFile()->GetAppDomain()->OwnDomainLocalBlockLock());
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!pMT->ContainsGenericVariables());
+ _ASSERTE(!pMT->IsSharedByGenericInstantiations());
+ _ASSERTE(GetDomainFile()->GetModule() == pMT->GetModuleForStatics());
+ _ASSERTE(pMT->IsDynamicStatics());
+
+ DWORD dynamicEntryIDIndex = pMT->GetModuleDynamicEntryID();
+
+ EnsureDynamicClassIndex(dynamicEntryIDIndex);
+
+ _ASSERTE(m_aDynamicEntries > dynamicEntryIDIndex);
+
+ EEClass *pClass = pMT->GetClass();
+
+ DWORD dwStaticBytes = pClass->GetNonGCRegularStaticFieldBytes();
+ DWORD dwNumHandleStatics = pClass->GetNumHandleRegularStatics();
+
+ _ASSERTE(!IsClassAllocated(pMT));
+ _ASSERTE(!IsClassInitialized(pMT));
+ _ASSERTE(!IsClassInitError(pMT));
+
+ DynamicEntry *pDynamicStatics = m_pDynamicClassTable[dynamicEntryIDIndex].m_pDynamicEntry;
+
+ // We need this check because maybe a class had a cctor but no statics
+ if (dwStaticBytes > 0 || dwNumHandleStatics > 0)
+ {
+ if (pDynamicStatics == NULL)
+ {
+ LoaderHeap * pLoaderAllocator = GetDomainFile()->GetLoaderAllocator()->GetHighFrequencyHeap();
+
+ if (pMT->Collectible())
+ {
+ pDynamicStatics = (DynamicEntry*)(void*)pLoaderAllocator->AllocMem(S_SIZE_T(sizeof(CollectibleDynamicEntry)));
+ }
+ else
+ {
+ SIZE_T dynamicEntrySize = DynamicEntry::GetOffsetOfDataBlob() + dwStaticBytes;
+
+#ifdef FEATURE_64BIT_ALIGNMENT
+ // Allocate memory with extra alignment only if it is really necessary
+ if (dwStaticBytes >= MAX_PRIMITIVE_FIELD_SIZE)
+ {
+ static_assert_no_msg(sizeof(NormalDynamicEntry) % MAX_PRIMITIVE_FIELD_SIZE == 0);
+ pDynamicStatics = (DynamicEntry*)(void*)pLoaderAllocator->AllocAlignedMem(dynamicEntrySize, MAX_PRIMITIVE_FIELD_SIZE);
+ }
+ else
+#endif
+ pDynamicStatics = (DynamicEntry*)(void*)pLoaderAllocator->AllocMem(S_SIZE_T(dynamicEntrySize));
+ }
+
+ // Note: Memory allocated on loader heap is zero filled
+
+ m_pDynamicClassTable[dynamicEntryIDIndex].m_pDynamicEntry = pDynamicStatics;
+ }
+
+ if (pMT->Collectible() && (dwStaticBytes != 0))
+ {
+ GCX_COOP();
+ OBJECTREF nongcStaticsArray = NULL;
+ GCPROTECT_BEGIN(nongcStaticsArray);
+#ifdef FEATURE_64BIT_ALIGNMENT
+ // Allocate memory with extra alignment only if it is really necessary
+ if (dwStaticBytes >= MAX_PRIMITIVE_FIELD_SIZE)
+ nongcStaticsArray = AllocatePrimitiveArray(ELEMENT_TYPE_I8, (dwStaticBytes + (sizeof(CLR_I8)-1)) / (sizeof(CLR_I8)));
+ else
+#endif
+ nongcStaticsArray = AllocatePrimitiveArray(ELEMENT_TYPE_U1, dwStaticBytes);
+ ((CollectibleDynamicEntry *)pDynamicStatics)->m_hNonGCStatics = GetDomainFile()->GetModule()->GetLoaderAllocator()->AllocateHandle(nongcStaticsArray);
+ GCPROTECT_END();
+ }
+ if (dwNumHandleStatics > 0)
+ {
+ if (!pMT->Collectible())
+ {
+ GetAppDomain()->AllocateStaticFieldObjRefPtrs(dwNumHandleStatics,
+ &((NormalDynamicEntry *)pDynamicStatics)->m_pGCStatics);
+ }
+ else
+ {
+ GCX_COOP();
+ OBJECTREF gcStaticsArray = NULL;
+ GCPROTECT_BEGIN(gcStaticsArray);
+ gcStaticsArray = AllocateObjectArray(dwNumHandleStatics, g_pObjectClass);
+ ((CollectibleDynamicEntry *)pDynamicStatics)->m_hGCStatics = GetDomainFile()->GetModule()->GetLoaderAllocator()->AllocateHandle(gcStaticsArray);
+ GCPROTECT_END();
+ }
+ }
+ }
+}
+
+
+void DomainLocalModule::PopulateClass(MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!pMT->ContainsGenericVariables());
+
+ // <todo> the only work actually done here for non-dynamics is the freezing related work.
+ // See if we can eliminate this and make this a dynamic-only path </todo>
+ DWORD iClassIndex = pMT->GetClassIndex();
+
+ if (!IsClassAllocated(pMT, iClassIndex))
+ {
+ BaseDomain::DomainLocalBlockLockHolder lh(GetDomainFile()->GetAppDomain());
+
+ if (!IsClassAllocated(pMT, iClassIndex))
+ {
+ // Allocate dynamic space if necessary
+ if (pMT->IsDynamicStatics())
+ AllocateDynamicClass(pMT);
+
+ // determine flags to set on the statics block
+ DWORD dwFlags = ClassInitFlags::ALLOCATECLASS_FLAG;
+
+ if (!pMT->HasClassConstructor() && !pMT->HasBoxedRegularStatics())
+ {
+ _ASSERTE(!IsClassInitialized(pMT));
+ _ASSERTE(!IsClassInitError(pMT));
+ dwFlags |= ClassInitFlags::INITIALIZED_FLAG;
+ }
+
+ if (pMT->Collectible())
+ {
+ dwFlags |= ClassInitFlags::COLLECTIBLE_FLAG;
+ }
+
+ // Set all flags at the same time to avoid races
+ SetClassFlags(pMT, dwFlags);
+ }
+ }
+
+ return;
+}
+#endif // CROSSGEN_COMPILE
+
+void DomainLocalBlock::EnsureModuleIndex(ModuleIndex index)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ // Assumes BaseDomain::DomainLocalBlockLockHolder is taken
+ PRECONDITION(m_pDomain->OwnDomainLocalBlockLock());
+ }
+ CONTRACTL_END;
+
+ if (m_aModuleIndices > index.m_dwIndex)
+ {
+ _ASSERTE(m_pModuleSlots != NULL);
+ return;
+ }
+
+ SIZE_T aModuleIndices = max(16, m_aModuleIndices);
+ while (aModuleIndices <= index.m_dwIndex)
+ {
+ aModuleIndices *= 2;
+ }
+
+ PTR_DomainLocalModule* pNewModuleSlots = (PTR_DomainLocalModule*) (void*)m_pDomain->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(PTR_DomainLocalModule)) * S_SIZE_T(aModuleIndices));
+
+ memcpy(pNewModuleSlots, m_pModuleSlots, sizeof(SIZE_T)*m_aModuleIndices);
+
+ // Note: Memory allocated on loader heap is zero filled
+ // memset(pNewModuleSlots + m_aModuleIndices, 0 , (aModuleIndices - m_aModuleIndices)*sizeof(PTR_DomainLocalModule) );
+
+ // Commit new table. The lock-free helpers depend on the order.
+ MemoryBarrier();
+ m_pModuleSlots = pNewModuleSlots;
+ MemoryBarrier();
+ m_aModuleIndices = aModuleIndices;
+
+}
+
+void DomainLocalBlock::SetModuleSlot(ModuleIndex index, PTR_DomainLocalModule pLocalModule)
+{
+ // Need to synchronize with table growth in this domain
+ BaseDomain::DomainLocalBlockLockHolder lh(m_pDomain);
+
+ EnsureModuleIndex(index);
+
+ _ASSERTE(index.m_dwIndex < m_aModuleIndices);
+
+ // We would like this assert here, unfortunately, loading a module in this appdomain can fail
+ // after here and we will keep the module around and reuse the slot when we retry (if
+ // the failure happened due to a transient error, such as OOM). In that case the slot wont
+ // be null.
+ //_ASSERTE(m_pModuleSlots[index.m_dwIndex] == 0);
+
+ m_pModuleSlots[index.m_dwIndex] = pLocalModule;
+}
+
+#ifndef CROSSGEN_COMPILE
+
+DomainAssembly* AppDomain::RaiseTypeResolveEventThrowing(DomainAssembly* pAssembly, LPCSTR szName, ASSEMBLYREF *pResultingAssemblyRef)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_TRIGGERS;
+ THROWS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+
+
+ DomainAssembly* pResolvedAssembly = NULL;
+ _ASSERTE(strcmp(szName, g_AppDomainClassName));
+
+ GCX_COOP();
+
+ struct _gc {
+ OBJECTREF AppDomainRef;
+ OBJECTREF AssemblyRef;
+ STRINGREF str;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+ if ((gc.AppDomainRef = GetRawExposedObject()) != NULL)
+ {
+ if (pAssembly != NULL)
+ gc.AssemblyRef = pAssembly->GetExposedAssemblyObject();
+
+ MethodDescCallSite onTypeResolve(METHOD__APP_DOMAIN__ON_TYPE_RESOLVE, &gc.AppDomainRef);
+
+ gc.str = StringObject::NewString(szName);
+ ARG_SLOT args[3] =
+ {
+ ObjToArgSlot(gc.AppDomainRef),
+ ObjToArgSlot(gc.AssemblyRef),
+ ObjToArgSlot(gc.str)
+ };
+ ASSEMBLYREF ResultingAssemblyRef = (ASSEMBLYREF) onTypeResolve.Call_RetOBJECTREF(args);
+
+ if (ResultingAssemblyRef != NULL)
+ {
+ pResolvedAssembly = ResultingAssemblyRef->GetDomainAssembly();
+
+ if (pResultingAssemblyRef)
+ *pResultingAssemblyRef = ResultingAssemblyRef;
+ else
+ {
+ if (pResolvedAssembly->IsCollectible())
+ {
+ COMPlusThrow(kNotSupportedException, W("NotSupported_CollectibleBoundNonCollectible"));
+ }
+ }
+ }
+ }
+ GCPROTECT_END();
+
+ return pResolvedAssembly;
+}
+
+
+Assembly* AppDomain::RaiseResourceResolveEvent(DomainAssembly* pAssembly, LPCSTR szName)
+{
+ CONTRACT(Assembly*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ Assembly* pResolvedAssembly = NULL;
+
+ GCX_COOP();
+
+ struct _gc {
+ OBJECTREF AppDomainRef;
+ OBJECTREF AssemblyRef;
+ STRINGREF str;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+ if ((gc.AppDomainRef = GetRawExposedObject()) != NULL)
+ {
+ if (pAssembly != NULL)
+ gc.AssemblyRef=pAssembly->GetExposedAssemblyObject();
+
+ MethodDescCallSite onResourceResolve(METHOD__APP_DOMAIN__ON_RESOURCE_RESOLVE, &gc.AppDomainRef);
+ gc.str = StringObject::NewString(szName);
+ ARG_SLOT args[3] =
+ {
+ ObjToArgSlot(gc.AppDomainRef),
+ ObjToArgSlot(gc.AssemblyRef),
+ ObjToArgSlot(gc.str)
+ };
+ ASSEMBLYREF ResultingAssemblyRef = (ASSEMBLYREF) onResourceResolve.Call_RetOBJECTREF(args);
+ if (ResultingAssemblyRef != NULL)
+ {
+ pResolvedAssembly = ResultingAssemblyRef->GetAssembly();
+ if (pResolvedAssembly->IsCollectible())
+ {
+ COMPlusThrow(kNotSupportedException, W("NotSupported_CollectibleAssemblyResolve"));
+ }
+ }
+ }
+ GCPROTECT_END();
+
+ RETURN pResolvedAssembly;
+}
+
+
+Assembly *
+AppDomain::RaiseAssemblyResolveEvent(
+ AssemblySpec * pSpec,
+ BOOL fIntrospection,
+ BOOL fPreBind)
+{
+ CONTRACT(Assembly*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ BinderMethodID methodId;
+ StackSString ssName;
+ pSpec->GetFileOrDisplayName(0, ssName);
+
+#ifdef FEATURE_REFLECTION_ONLY_LOAD
+ if ( (!fIntrospection) && (!fPreBind) )
+ {
+ methodId = METHOD__APP_DOMAIN__ON_ASSEMBLY_RESOLVE; // post-bind execution event (the classic V1.0 event)
+ }
+ else if ((!fIntrospection) && fPreBind)
+ {
+ RETURN NULL; // There is currently no prebind execution resolve event
+ }
+ else if (fIntrospection && !fPreBind)
+ {
+ RETURN NULL; // There is currently no post-bind introspection resolve event
+ }
+ else
+ {
+ _ASSERTE( fIntrospection && fPreBind );
+ methodId = METHOD__APP_DOMAIN__ON_REFLECTION_ONLY_ASSEMBLY_RESOLVE; // event for introspection assemblies
+ }
+#else // FEATURE_REFLECTION_ONLY_LOAD
+ if (!fPreBind)
+ {
+ methodId = METHOD__APP_DOMAIN__ON_ASSEMBLY_RESOLVE; // post-bind execution event (the classic V1.0 event)
+ }
+ else
+ {
+ RETURN NULL;
+ }
+
+#endif // FEATURE_REFLECTION_ONLY_LOAD
+
+ // Elevate threads allowed loading level. This allows the host to load an assembly even in a restricted
+ // condition. Note, however, that this exposes us to possible recursion failures, if the host tries to
+ // load the assemblies currently being loaded. (Such cases would then throw an exception.)
+
+ OVERRIDE_LOAD_LEVEL_LIMIT(FILE_ACTIVE);
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+
+ GCX_COOP();
+
+ Assembly* pAssembly = NULL;
+
+ struct _gc {
+ OBJECTREF AppDomainRef;
+ OBJECTREF AssemblyRef;
+ STRINGREF str;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+ if ((gc.AppDomainRef = GetRawExposedObject()) != NULL)
+ {
+ if (pSpec->GetParentAssembly() != NULL)
+ {
+ if ( pSpec->IsIntrospectionOnly()
+#ifdef FEATURE_FUSION
+ || pSpec->GetParentLoadContext() == LOADCTX_TYPE_UNKNOWN
+#endif
+ )
+ {
+ gc.AssemblyRef=pSpec->GetParentAssembly()->GetExposedAssemblyObject();
+ }
+ }
+ MethodDescCallSite onAssemblyResolve(methodId, &gc.AppDomainRef);
+
+ gc.str = StringObject::NewString(ssName);
+ ARG_SLOT args[3] = {
+ ObjToArgSlot(gc.AppDomainRef),
+ ObjToArgSlot(gc.AssemblyRef),
+ ObjToArgSlot(gc.str)
+ };
+
+ ASSEMBLYREF ResultingAssemblyRef = (ASSEMBLYREF) onAssemblyResolve.Call_RetOBJECTREF(args);
+
+ if (ResultingAssemblyRef != NULL)
+ {
+ pAssembly = ResultingAssemblyRef->GetAssembly();
+ if (pAssembly->IsCollectible())
+ {
+ COMPlusThrow(kNotSupportedException, W("NotSupported_CollectibleAssemblyResolve"));
+ }
+ }
+ }
+ GCPROTECT_END();
+
+ if (pAssembly != NULL)
+ {
+ if ((!(pAssembly->IsIntrospectionOnly())) != (!fIntrospection))
+ {
+ // Cannot return an introspection assembly from an execution callback or vice-versa
+ COMPlusThrow(kFileLoadException, pAssembly->IsIntrospectionOnly() ? IDS_CLASSLOAD_ASSEMBLY_RESOLVE_RETURNED_INTROSPECTION : IDS_CLASSLOAD_ASSEMBLY_RESOLVE_RETURNED_EXECUTION);
+ }
+
+ // Check that the public key token matches the one specified in the spec
+ // MatchPublicKeys throws as appropriate
+ pSpec->MatchPublicKeys(pAssembly);
+ }
+
+ RETURN pAssembly;
+} // AppDomain::RaiseAssemblyResolveEvent
+
+#ifndef FEATURE_CORECLR
+
+//---------------------------------------------------------------------------------------
+//
+// Ask the AppDomainManager for the entry assembly of the application
+//
+// Note:
+// Most AppDomainManagers will fall back on the root assembly for the domain, so we need
+// to make sure this is set before we call through to the AppDomainManager itself.
+//
+
+Assembly *AppDomain::GetAppDomainManagerEntryAssembly()
+{
+ CONTRACT(Assembly *)
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(HasAppDomainManagerInfo());
+ PRECONDITION(CheckPointer(m_pRootAssembly));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ GCX_COOP();
+
+ Assembly *pEntryAssembly = NULL;
+
+ struct
+ {
+ APPDOMAINREF orDomain;
+ OBJECTREF orAppDomainManager;
+ ASSEMBLYREF orEntryAssembly;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.orDomain = static_cast<APPDOMAINREF>(GetExposedObject());
+ gc.orAppDomainManager = gc.orDomain->GetAppDomainManager();
+ _ASSERTE(gc.orAppDomainManager != NULL);
+
+ MethodDescCallSite getEntryAssembly(METHOD__APPDOMAIN_MANAGER__GET_ENTRY_ASSEMBLY, &gc.orAppDomainManager);
+ ARG_SLOT argThis = ObjToArgSlot(gc.orAppDomainManager);
+ gc.orEntryAssembly = static_cast<ASSEMBLYREF>(getEntryAssembly.Call_RetOBJECTREF(&argThis));
+
+ if (gc.orEntryAssembly != NULL)
+ {
+ pEntryAssembly = gc.orEntryAssembly->GetAssembly();
+ }
+
+ GCPROTECT_END();
+
+ // If the AppDomainManager did not return an entry assembly, we'll assume the default assembly
+ if (pEntryAssembly == NULL)
+ {
+ pEntryAssembly = m_pRootAssembly;
+ }
+
+ RETURN(pEntryAssembly);
+}
+
+#endif // !FEATURE_CORECLR
+
+//---------------------------------------------------------------------------------------
+//
+// Determine the type of AppDomainManager to use for the default AppDomain
+//
+// Notes:
+// v2.0 of the CLR used environment variables APPDOMAIN_MANAGER_ASM and APPDOMAIN_MANAGER_TYPE to set the
+// domain manager. For compatibility these are still supported, along with appDomainManagerAsm and
+// appDomainManagerType config file switches. If the config switches are supplied, the entry point must be
+// fully trusted.
+//
+
+void AppDomain::InitializeDefaultDomainManager()
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(GetId().m_dwId == DefaultADID);
+ PRECONDITION(!HasAppDomainManagerInfo());
+ }
+ CONTRACTL_END;
+
+ //
+ // The AppDomainManager for the default domain can be specified by:
+ // 1. Native hosting API
+ // 2. Application config file if the application is fully trusted
+ // 3. Environment variables
+ //
+
+
+ if (CorHost2::HasAppDomainManagerInfo())
+ {
+ SetAppDomainManagerInfo(CorHost2::GetAppDomainManagerAsm(),
+ CorHost2::GetAppDomainManagerType(),
+ CorHost2::GetAppDomainManagerInitializeNewDomainFlags());
+ m_fAppDomainManagerSetInConfig = FALSE;
+
+ LOG((LF_APPDOMAIN, LL_INFO10, "Setting default AppDomainManager '%S', '%S' from hosting API.\n", GetAppDomainManagerAsm(), GetAppDomainManagerType()));
+ }
+#ifndef FEATURE_CORECLR
+ else
+ {
+ CLRConfigStringHolder wszConfigAppDomainManagerAssembly(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_AppDomainManagerAsm));
+ CLRConfigStringHolder wszConfigAppDomainManagerType(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_AppDomainManagerType));
+
+ if (wszConfigAppDomainManagerAssembly != NULL &&
+ wszConfigAppDomainManagerType != NULL)
+ {
+ SetAppDomainManagerInfo(wszConfigAppDomainManagerAssembly,
+ wszConfigAppDomainManagerType,
+ eInitializeNewDomainFlags_None);
+ m_fAppDomainManagerSetInConfig = TRUE;
+
+ LOG((LF_APPDOMAIN, LL_INFO10, "Setting default AppDomainManager '%S', '%S' from application config file.\n", GetAppDomainManagerAsm(), GetAppDomainManagerType()));
+ }
+ else
+ {
+ CLRConfigStringHolder wszEnvironmentAppDomainManagerAssembly(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_LEGACY_APPDOMAIN_MANAGER_ASM));
+ CLRConfigStringHolder wszEnvironmentAppDomainManagerType(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_LEGACY_APPDOMAIN_MANAGER_TYPE));
+
+ if (wszEnvironmentAppDomainManagerAssembly != NULL &&
+ wszEnvironmentAppDomainManagerType != NULL)
+ {
+ SetAppDomainManagerInfo(wszEnvironmentAppDomainManagerAssembly,
+ wszEnvironmentAppDomainManagerType,
+ eInitializeNewDomainFlags_None);
+ m_fAppDomainManagerSetInConfig = FALSE;
+
+ LOG((LF_APPDOMAIN, LL_INFO10, "Setting default AppDomainManager '%S', '%S' from environment variables.\n", GetAppDomainManagerAsm(), GetAppDomainManagerType()));
+
+ // Reset the environmetn variables so that child processes do not inherit our domain manager
+ // by default.
+ WszSetEnvironmentVariable(CLRConfig::EXTERNAL_LEGACY_APPDOMAIN_MANAGER_ASM.name, NULL);
+ WszSetEnvironmentVariable(CLRConfig::EXTERNAL_LEGACY_APPDOMAIN_MANAGER_TYPE.name, NULL);
+ }
+ }
+ }
+#endif // !FEATURE_CORECLR
+
+ // If we found an AppDomain manager to use, create and initialize it
+ // Otherwise, initialize the config flags.
+ if (HasAppDomainManagerInfo())
+ {
+ // If the initialization flags promise that the domain manager isn't going to modify security, then do a
+ // pre-resolution of the domain now so that we can do some basic verification of the state later. We
+ // don't care about the actual result now, just that the resolution took place to compare against later.
+ if (GetAppDomainManagerInitializeNewDomainFlags() & eInitializeNewDomainFlags_NoSecurityChanges)
+ {
+ BOOL fIsFullyTrusted;
+ BOOL fIsHomogeneous;
+ GetSecurityDescriptor()->PreResolve(&fIsFullyTrusted, &fIsHomogeneous);
+ }
+
+ OBJECTREF orThis = GetExposedObject();
+ GCPROTECT_BEGIN(orThis);
+
+ MethodDescCallSite createDomainManager(METHOD__APP_DOMAIN__CREATE_APP_DOMAIN_MANAGER);
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(orThis)
+ };
+
+ createDomainManager.Call(args);
+
+ GCPROTECT_END();
+ }
+ else
+ {
+ OBJECTREF orThis = GetExposedObject();
+ GCPROTECT_BEGIN(orThis);
+
+ MethodDescCallSite initCompatFlags(METHOD__APP_DOMAIN__INITIALIZE_COMPATIBILITY_FLAGS);
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(orThis)
+ };
+
+ initCompatFlags.Call(args);
+
+ GCPROTECT_END();
+ }
+}
+
+#ifdef FEATURE_CLICKONCE
+
+//---------------------------------------------------------------------------------------
+//
+// If we are launching a ClickOnce application, setup the default domain with the deails
+// of the application.
+//
+
+void AppDomain::InitializeDefaultClickOnceDomain()
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ PRECONDITION(GetId().m_dwId == DefaultADID);
+ }
+ CONTRACTL_END;
+
+ //
+ // If the CLR is being started to run a ClickOnce application, then capture the information about the
+ // application to setup the default domain wtih.
+ //
+
+ if (CorCommandLine::m_pwszAppFullName != NULL)
+ {
+ struct
+ {
+ OBJECTREF orThis;
+ STRINGREF orAppFullName;
+ PTRARRAYREF orManifestPathsArray;
+ PTRARRAYREF orActivationDataArray;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.orAppFullName = StringObject::NewString(CorCommandLine::m_pwszAppFullName);
+
+ // If specific manifests have been pointed at, make a note of them
+ if (CorCommandLine::m_dwManifestPaths > 0)
+ {
+ _ASSERTE(CorCommandLine::m_ppwszManifestPaths != NULL);
+
+ gc.orManifestPathsArray = static_cast<PTRARRAYREF>(AllocateObjectArray(CorCommandLine::m_dwManifestPaths, g_pStringClass));
+ for (DWORD i = 0; i < CorCommandLine::m_dwManifestPaths; ++i)
+ {
+ STRINGREF str = StringObject::NewString(CorCommandLine::m_ppwszManifestPaths[i]);
+ gc.orManifestPathsArray->SetAt(i, str);
+ }
+ }
+
+ // Check for any activation parameters to pass to the ClickOnce application
+ if (CorCommandLine::m_dwActivationData > 0)
+ {
+ _ASSERTE(CorCommandLine::m_ppwszActivationData != NULL);
+
+ gc.orActivationDataArray = static_cast<PTRARRAYREF>(AllocateObjectArray(CorCommandLine::m_dwActivationData, g_pStringClass));
+ for (DWORD i = 0; i < CorCommandLine::m_dwActivationData; ++i)
+ {
+ STRINGREF str = StringObject::NewString(CorCommandLine::m_ppwszActivationData[i]);
+ gc.orActivationDataArray->SetAt(i, str);
+ }
+ }
+
+ gc.orThis = GetExposedObject();
+
+ MethodDescCallSite setupDefaultClickOnceDomain(METHOD__APP_DOMAIN__SETUP_DEFAULT_CLICKONCE_DOMAIN);
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(gc.orThis),
+ ObjToArgSlot(gc.orAppFullName),
+ ObjToArgSlot(gc.orManifestPathsArray),
+ ObjToArgSlot(gc.orActivationDataArray),
+ };
+ setupDefaultClickOnceDomain.Call(args);
+
+ GCPROTECT_END();
+ }
+}
+
+BOOL AppDomain::IsClickOnceAppDomain()
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ return ((APPDOMAINREF)GetExposedObject())->HasActivationContext();
+}
+
+#endif // FEATURE_CLICKONCE
+
+//---------------------------------------------------------------------------------------
+//
+// Intialize the security settings in the default AppDomain.
+//
+
+void AppDomain::InitializeDefaultDomainSecurity()
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ PRECONDITION(GetId().m_dwId == DefaultADID);
+ }
+ CONTRACTL_END;
+
+ OBJECTREF orThis = GetExposedObject();
+ GCPROTECT_BEGIN(orThis);
+
+ MethodDescCallSite initializeSecurity(METHOD__APP_DOMAIN__INITIALIZE_DOMAIN_SECURITY);
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(orThis),
+ ObjToArgSlot(NULL),
+ ObjToArgSlot(NULL),
+ static_cast<ARG_SLOT>(FALSE),
+ ObjToArgSlot(NULL),
+ static_cast<ARG_SLOT>(FALSE)
+ };
+
+ initializeSecurity.Call(args);
+
+ GCPROTECT_END();
+}
+
+CLREvent * AppDomain::g_pUnloadStartEvent;
+
+void AppDomain::CreateADUnloadWorker()
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef FEATURE_CORECLR
+ // Do not create adUnload thread if there is only default domain
+ if(IsSingleAppDomain())
+ return;
+#endif
+
+Retry:
+ BOOL fCreator = FALSE;
+ if (FastInterlockCompareExchange((LONG *)&g_fADUnloadWorkerOK,-2,-1)==-1) //we're first
+ {
+#ifdef _TARGET_X86_ // use the smallest possible stack on X86
+ DWORD stackSize = 128 * 1024;
+#else
+ DWORD stackSize = 512 * 1024; // leave X64 unchanged since we have plenty of VM
+#endif
+ Thread *pThread = SetupUnstartedThread();
+ if (pThread->CreateNewThread(stackSize, ADUnloadThreadStart, pThread))
+ {
+ fCreator = TRUE;
+ DWORD dwRet;
+ dwRet = pThread->StartThread();
+
+ // When running under a user mode native debugger there is a race
+ // between the moment we've created the thread (in CreateNewThread) and
+ // the moment we resume it (in StartThread); the debugger may receive
+ // the "ct" (create thread) notification, and it will attempt to
+ // suspend/resume all threads in the process. Now imagine the debugger
+ // resumes this thread first, and only later does it try to resume the
+ // newly created thread (the ADU worker thread). In these conditions our
+ // call to ResumeThread may come before the debugger's call to ResumeThread
+ // actually causing dwRet to equal 2.
+ // We cannot use IsDebuggerPresent() in the condition below because the
+ // debugger may have been detached between the time it got the notification
+ // and the moment we execute the test below.
+ _ASSERTE(dwRet == 1 || dwRet == 2);
+ }
+ else
+ {
+ pThread->DecExternalCount(FALSE);
+ FastInterlockExchange((LONG *)&g_fADUnloadWorkerOK, -1);
+ ThrowOutOfMemory();
+ }
+ }
+
+ YIELD_WHILE (g_fADUnloadWorkerOK == -2);
+
+ if (g_fADUnloadWorkerOK == -1) {
+ if (fCreator)
+ {
+ ThrowOutOfMemory();
+ }
+ else
+ {
+ goto Retry;
+ }
+ }
+}
+
+/*static*/ void AppDomain::ADUnloadWorkerHelper(AppDomain *pDomain)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ ADUnloadSink* pADUnloadSink=pDomain->GetADUnloadSinkForUnload();
+ HRESULT hr=S_OK;
+
+ EX_TRY
+ {
+ pDomain->Unload(FALSE);
+ }
+ EX_CATCH_HRESULT(hr);
+
+ if(pADUnloadSink)
+ {
+ SystemDomain::LockHolder lh;
+ pADUnloadSink->ReportUnloadResult(hr,NULL);
+ pADUnloadSink->Release();
+ }
+}
+
+void AppDomain::DoADUnloadWork()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ DWORD i = 1;
+ while (TRUE) {
+
+ AppDomain *pDomainToUnload = NULL;
+
+ {
+ // Take the lock so that no domain can be added or removed from the system domain
+ SystemDomain::LockHolder lh;
+
+ DWORD numDomain = SystemDomain::GetCurrentAppDomainMaxIndex();
+ for (; i <= numDomain; i ++) {
+ AppDomain * pDomain = SystemDomain::TestGetAppDomainAtIndex(ADIndex(i));
+ //
+ // @todo: We used to also select a domain if pDomain->IsUnload() returned true. But that causes
+ // problems when we've failed to completely unload the AD in the past. If we've reached the CLEARED
+ // stage, for instance, then there will be no default context and AppDomain::Exit() will simply crash.
+ //
+ if (pDomain && pDomain->IsUnloadRequested())
+ {
+ pDomainToUnload = pDomain;
+ i ++;
+ break;
+ }
+ }
+ }
+
+ if (!pDomainToUnload) {
+ break;
+ }
+
+ // We are the only thread that can unload domains so no one else can delete the appdomain
+ ADUnloadWorkerHelper(pDomainToUnload);
+ }
+}
+
+static void DoADUnloadWorkHelper()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ EX_TRY {
+ AppDomain::DoADUnloadWork();
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+ULONGLONG g_ObjFinalizeStartTime = 0;
+Volatile<BOOL> g_FinalizerIsRunning = FALSE;
+Volatile<ULONG> g_FinalizerLoopCount = 0;
+
+ULONGLONG GetObjFinalizeStartTime()
+{
+ LIMITED_METHOD_CONTRACT;
+ return g_ObjFinalizeStartTime;
+}
+
+void FinalizerThreadAbortOnTimeout()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ {
+ // If finalizer thread is blocked because scheduler is running another task,
+ // or it is waiting for another thread, we first see if we get finalizer thread
+ // running again.
+ Thread::ThreadAbortWatchDog();
+ }
+
+ EX_TRY
+ {
+ Thread *pFinalizerThread = FinalizerThread::GetFinalizerThread();
+ EPolicyAction action = GetEEPolicy()->GetActionOnTimeout(OPR_FinalizerRun, pFinalizerThread);
+ switch (action)
+ {
+ case eAbortThread:
+ GetEEPolicy()->NotifyHostOnTimeout(OPR_FinalizerRun, action);
+ pFinalizerThread->UserAbort(Thread::TAR_Thread,
+ EEPolicy::TA_Safe,
+ INFINITE,
+ Thread::UAC_FinalizerTimeout);
+ break;
+ case eRudeAbortThread:
+ GetEEPolicy()->NotifyHostOnTimeout(OPR_FinalizerRun, action);
+ pFinalizerThread->UserAbort(Thread::TAR_Thread,
+ EEPolicy::TA_Rude,
+ INFINITE,
+ Thread::UAC_FinalizerTimeout);
+ break;
+ case eUnloadAppDomain:
+ {
+ AppDomain *pDomain = pFinalizerThread->GetDomain();
+ pFinalizerThread->UserAbort(Thread::TAR_Thread,
+ EEPolicy::TA_Safe,
+ INFINITE,
+ Thread::UAC_FinalizerTimeout);
+ if (!pDomain->IsDefaultDomain())
+ {
+ GetEEPolicy()->NotifyHostOnTimeout(OPR_FinalizerRun, action);
+ pDomain->EnableADUnloadWorker(EEPolicy::ADU_Safe);
+ }
+ }
+ break;
+ case eRudeUnloadAppDomain:
+ {
+ AppDomain *pDomain = pFinalizerThread->GetDomain();
+ pFinalizerThread->UserAbort(Thread::TAR_Thread,
+ EEPolicy::TA_Rude,
+ INFINITE,
+ Thread::UAC_FinalizerTimeout);
+ if (!pDomain->IsDefaultDomain())
+ {
+ GetEEPolicy()->NotifyHostOnTimeout(OPR_FinalizerRun, action);
+ pDomain->EnableADUnloadWorker(EEPolicy::ADU_Rude);
+ }
+ }
+ break;
+ case eExitProcess:
+ case eFastExitProcess:
+ case eRudeExitProcess:
+ case eDisableRuntime:
+ GetEEPolicy()->NotifyHostOnTimeout(OPR_FinalizerRun, action);
+ EEPolicy::HandleExitProcessFromEscalation(action, HOST_E_EXITPROCESS_TIMEOUT);
+ _ASSERTE (!"Should not get here");
+ break;
+ default:
+ break;
+ }
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+enum WorkType
+{
+ WT_UnloadDomain = 0x1,
+ WT_ThreadAbort = 0x2,
+ WT_FinalizerThread = 0x4,
+ WT_ClearCollectedDomains=0x8
+};
+
+static Volatile<DWORD> s_WorkType = 0;
+
+
+DWORD WINAPI AppDomain::ADUnloadThreadStart(void *args)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ DISABLED(GC_TRIGGERS);
+
+ // This function will always be at the very bottom of the stack. The only
+ // user code it calls is the AppDomainUnload notifications which we will
+ // not be hardenning for Whidbey.
+ //
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ ClrFlsSetThreadType (ThreadType_ADUnloadHelper);
+
+ Thread *pThread = (Thread*)args;
+ bool fOK = (pThread->HasStarted() != 0);
+
+ {
+ GCX_MAYBE_PREEMP(fOK);
+
+ if (fOK)
+ {
+ EX_TRY
+ {
+ if (CLRTaskHosted())
+ {
+ // ADUnload helper thread is critical. We do not want it to share scheduler
+ // with other tasks.
+ pThread->LeaveRuntime(0);
+ }
+ }
+ EX_CATCH
+ {
+ fOK = false;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+
+ _ASSERTE (g_fADUnloadWorkerOK == -2);
+
+ FastInterlockExchange((LONG *)&g_fADUnloadWorkerOK,fOK?1:-1);
+
+ if (!fOK)
+ {
+ DestroyThread(pThread);
+ goto Exit;
+ }
+
+ pThread->SetBackground(TRUE);
+
+ pThread->SetThreadStateNC(Thread::TSNC_ADUnloadHelper);
+
+ while (TRUE) {
+ DWORD TAtimeout = INFINITE;
+ ULONGLONG endTime = Thread::GetNextSelfAbortEndTime();
+ ULONGLONG curTime = CLRGetTickCount64();
+ if (endTime <= curTime) {
+ TAtimeout = 5;
+ }
+ else
+ {
+ ULONGLONG diff = endTime - curTime;
+ if (diff < MAXULONG)
+ {
+ TAtimeout = (DWORD)diff;
+ }
+ }
+ ULONGLONG finalizeStartTime = GetObjFinalizeStartTime();
+ DWORD finalizeTimeout = INFINITE;
+ DWORD finalizeTimeoutSetting = GetEEPolicy()->GetTimeout(OPR_FinalizerRun);
+ if (finalizeTimeoutSetting != INFINITE && g_FinalizerIsRunning)
+ {
+ if (finalizeStartTime == 0)
+ {
+ finalizeTimeout = finalizeTimeoutSetting;
+ }
+ else
+ {
+ endTime = finalizeStartTime + finalizeTimeoutSetting;
+ if (endTime <= curTime) {
+ finalizeTimeout = 0;
+ }
+ else
+ {
+ ULONGLONG diff = endTime - curTime;
+ if (diff < MAXULONG)
+ {
+ finalizeTimeout = (DWORD)diff;
+ }
+ }
+ }
+ }
+
+ if (AppDomain::HasWorkForFinalizerThread())
+ {
+ if (finalizeTimeout > finalizeTimeoutSetting)
+ {
+ finalizeTimeout = finalizeTimeoutSetting;
+ }
+ }
+
+ DWORD timeout = INFINITE;
+ if (finalizeTimeout <= TAtimeout)
+ {
+ timeout = finalizeTimeout;
+ }
+ else
+ {
+ timeout = TAtimeout;
+ }
+
+ if (timeout != 0)
+ {
+ LOG((LF_APPDOMAIN, LL_INFO10, "Waiting to start unload\n"));
+ g_pUnloadStartEvent->Wait(timeout,FALSE);
+ }
+
+ if (finalizeTimeout != INFINITE || (s_WorkType & WT_FinalizerThread) != 0)
+ {
+ STRESS_LOG0(LF_ALWAYS, LL_ALWAYS, "ADUnloadThreadStart work for Finalizer thread\n");
+ FastInterlockAnd(&s_WorkType, ~WT_FinalizerThread);
+ // only watch finalizer thread is finalizer method or unloadevent is being processed
+ if (GetObjFinalizeStartTime() == finalizeStartTime && finalizeStartTime != 0 && g_FinalizerIsRunning)
+ {
+ if (CLRGetTickCount64() >= finalizeStartTime+finalizeTimeoutSetting)
+ {
+ GCX_COOP();
+ FinalizerThreadAbortOnTimeout();
+ }
+ }
+ if (s_fProcessUnloadDomainEvent && g_FinalizerIsRunning)
+ {
+ GCX_COOP();
+ FinalizerThreadAbortOnTimeout();
+ }
+ }
+
+ if (TAtimeout != INFINITE || (s_WorkType & WT_ThreadAbort) != 0)
+ {
+ STRESS_LOG0(LF_ALWAYS, LL_ALWAYS, "ADUnloadThreadStart work for thread abort\n");
+ FastInterlockAnd(&s_WorkType, ~WT_ThreadAbort);
+ GCX_COOP();
+ Thread::ThreadAbortWatchDog();
+ }
+
+ if ((s_WorkType & WT_UnloadDomain) != 0 && !AppDomain::HasWorkForFinalizerThread())
+ {
+ STRESS_LOG0(LF_ALWAYS, LL_ALWAYS, "ADUnloadThreadStart work for AD unload\n");
+ FastInterlockAnd(&s_WorkType, ~WT_UnloadDomain);
+ GCX_COOP();
+ DoADUnloadWorkHelper();
+ }
+
+ if ((s_WorkType & WT_ClearCollectedDomains) != 0)
+ {
+ STRESS_LOG0(LF_ALWAYS, LL_ALWAYS, "ADUnloadThreadStart work for AD cleanup\n");
+ FastInterlockAnd(&s_WorkType, ~WT_ClearCollectedDomains);
+ GCX_COOP();
+ SystemDomain::System()->ClearCollectedDomains();
+ }
+
+ }
+Exit:;
+ }
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return 0;
+}
+
+void AppDomain::EnableADUnloadWorker()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT; // Called during a SO
+ }
+ CONTRACTL_END;
+
+ EEPolicy::AppDomainUnloadTypes type = EEPolicy::ADU_Safe;
+
+#ifdef _DEBUG
+ DWORD hostTestADUnload = g_pConfig->GetHostTestADUnload();
+ if (hostTestADUnload == 2) {
+ type = EEPolicy::ADU_Rude;
+ }
+#endif // _DEBUG
+
+ EnableADUnloadWorker(type);
+}
+
+void AppDomain::EnableADUnloadWorker(EEPolicy::AppDomainUnloadTypes type, BOOL fHasStack)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT; // Called during a SO
+ }
+ CONTRACTL_END;
+
+ FastInterlockOr (&s_WorkType, WT_UnloadDomain);
+
+ LONG stage = m_Stage;
+ static_assert_no_msg(sizeof(m_Stage) == sizeof(int));
+
+ _ASSERTE(!IsDefaultDomain());
+
+ // Mark unload requested.
+ if (type == EEPolicy::ADU_Rude) {
+ SetRudeUnload();
+ }
+ while (stage < STAGE_UNLOAD_REQUESTED) {
+ stage = FastInterlockCompareExchange((LONG*)&m_Stage,STAGE_UNLOAD_REQUESTED,stage);
+ }
+
+ if (!fHasStack)
+ {
+ // Can not call Set due to limited stack.
+ return;
+ }
+ LOG((LF_APPDOMAIN, LL_INFO10, "Enabling unload worker\n"));
+ g_pUnloadStartEvent->Set();
+}
+
+void AppDomain::EnableADUnloadWorkerForThreadAbort()
+{
+ LIMITED_METHOD_CONTRACT;
+ STRESS_LOG0(LF_ALWAYS, LL_ALWAYS, "Enabling unload worker for thread abort\n");
+ LOG((LF_APPDOMAIN, LL_INFO10, "Enabling unload worker for thread abort\n"));
+ FastInterlockOr (&s_WorkType, WT_ThreadAbort);
+ g_pUnloadStartEvent->Set();
+}
+
+
+void AppDomain::EnableADUnloadWorkerForFinalizer()
+{
+ LIMITED_METHOD_CONTRACT;
+ if (GetEEPolicy()->GetTimeout(OPR_FinalizerRun) != INFINITE)
+ {
+ LOG((LF_APPDOMAIN, LL_INFO10, "Enabling unload worker for Finalizer Thread\n"));
+ FastInterlockOr (&s_WorkType, WT_FinalizerThread);
+ g_pUnloadStartEvent->Set();
+ }
+}
+
+void AppDomain::EnableADUnloadWorkerForCollectedADCleanup()
+{
+ LIMITED_METHOD_CONTRACT;
+ LOG((LF_APPDOMAIN, LL_INFO10, "Enabling unload worker for collected domains\n"));
+ FastInterlockOr (&s_WorkType, WT_ClearCollectedDomains);
+ g_pUnloadStartEvent->Set();
+}
+
+
+void SystemDomain::ClearCollectedDomains()
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ NOTHROW;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ AppDomain* pDomainsToClear=NULL;
+ {
+ CrstHolder lh(&m_DelayedUnloadCrst);
+ for (AppDomain** ppDomain=&m_pDelayedUnloadList;(*ppDomain)!=NULL; )
+ {
+ if ((*ppDomain)->m_Stage==AppDomain::STAGE_COLLECTED)
+ {
+ AppDomain* pAppDomain=*ppDomain;
+ *ppDomain=(*ppDomain)->m_pNextInDelayedUnloadList;
+ pAppDomain->m_pNextInDelayedUnloadList=pDomainsToClear;
+ pDomainsToClear=pAppDomain;
+ }
+ else
+ ppDomain=&((*ppDomain)->m_pNextInDelayedUnloadList);
+ }
+ }
+
+ for (AppDomain* pDomain=pDomainsToClear;pDomain!=NULL;)
+ {
+ AppDomain* pNext=pDomain->m_pNextInDelayedUnloadList;
+ pDomain->Close(); //NOTHROW!
+ pDomain->Release();
+ pDomain=pNext;
+ }
+}
+
+void SystemDomain::ProcessClearingDomains()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ CrstHolder lh(&m_DelayedUnloadCrst);
+
+ for (AppDomain** ppDomain=&m_pDelayedUnloadList;(*ppDomain)!=NULL; )
+ {
+ if ((*ppDomain)->m_Stage==AppDomain::STAGE_HANDLETABLE_NOACCESS)
+ {
+ AppDomain* pAppDomain=*ppDomain;
+ pAppDomain->SetStage(AppDomain::STAGE_CLEARED);
+ }
+ ppDomain=&((*ppDomain)->m_pNextInDelayedUnloadList);
+ }
+
+ if (!m_UnloadIsAsync)
+ {
+ // For synchronous mode, we are now done with the list.
+ m_pDelayedUnloadList = NULL;
+ }
+}
+
+void SystemDomain::ProcessDelayedUnloadDomains()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ int iGCRefPoint=GCHeap::GetGCHeap()->CollectionCount(GCHeap::GetGCHeap()->GetMaxGeneration());
+ if (GCHeap::GetGCHeap()->IsConcurrentGCInProgress())
+ iGCRefPoint--;
+
+ BOOL bAppDomainToCleanup = FALSE;
+ LoaderAllocator * pAllocatorsToDelete = NULL;
+
+ {
+ CrstHolder lh(&m_DelayedUnloadCrst);
+
+ for (AppDomain* pDomain=m_pDelayedUnloadList; pDomain!=NULL; pDomain=pDomain->m_pNextInDelayedUnloadList)
+ {
+ if (pDomain->m_Stage==AppDomain::STAGE_CLEARED)
+ {
+ // Compare with 0 to handle overflows gracefully
+ if (0 < iGCRefPoint - pDomain->GetGCRefPoint())
+ {
+ bAppDomainToCleanup=TRUE;
+ pDomain->SetStage(AppDomain::STAGE_COLLECTED);
+ }
+ }
+ }
+
+ LoaderAllocator ** ppAllocator=&m_pDelayedUnloadListOfLoaderAllocators;
+ while (*ppAllocator!= NULL)
+ {
+ LoaderAllocator * pAllocator = *ppAllocator;
+ if (0 < iGCRefPoint - pAllocator->GetGCRefPoint())
+ {
+ *ppAllocator = pAllocator->m_pLoaderAllocatorDestroyNext;
+
+ pAllocator->m_pLoaderAllocatorDestroyNext = pAllocatorsToDelete;
+ pAllocatorsToDelete = pAllocator;
+ }
+ else
+ {
+ ppAllocator = &pAllocator->m_pLoaderAllocatorDestroyNext;
+ }
+ }
+ }
+
+ if (bAppDomainToCleanup)
+ AppDomain::EnableADUnloadWorkerForCollectedADCleanup();
+
+ // Delete collected loader allocators on the finalizer thread. We cannot offload it to appdomain unload thread because of
+ // there is not guaranteed to be one, and it is not that expensive operation anyway.
+ while (pAllocatorsToDelete != NULL)
+ {
+ LoaderAllocator * pAllocator = pAllocatorsToDelete;
+ pAllocatorsToDelete = pAllocator->m_pLoaderAllocatorDestroyNext;
+ delete pAllocator;
+ }
+}
+
+#endif // CROSSGEN_COMPILE
+
+AppDomainFromIDHolder::AppDomainFromIDHolder(ADID adId, BOOL bUnsafePoint, SyncType synctype)
+{
+ WRAPPER_NO_CONTRACT;
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+#ifdef _DEBUG
+ m_bAcquired=false;
+ m_bChecked=false;
+ m_type=synctype;
+
+#endif
+ Assign(adId, bUnsafePoint);
+}
+
+AppDomainFromIDHolder::AppDomainFromIDHolder(SyncType synctype)
+{
+ LIMITED_METHOD_CONTRACT;
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+ m_pDomain=NULL;
+#ifdef _DEBUG
+ m_bAcquired=false;
+ m_bChecked=false;
+ m_type=synctype;
+#endif
+}
+
+#ifndef CROSSGEN_COMPILE
+void ADUnloadSink::ReportUnloadResult (HRESULT hr, OBJECTREF* pException)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(m_UnloadCompleteEvent.IsValid());
+ }
+ CONTRACTL_END;
+
+ //pException is unused;
+ m_UnloadResult=hr;
+ m_UnloadCompleteEvent.Set();
+};
+
+void ADUnloadSink::WaitUnloadCompletion()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(m_UnloadCompleteEvent.IsValid());
+ }
+ CONTRACTL_END;
+
+ CONTRACT_VIOLATION(FaultViolation);
+ m_UnloadCompleteEvent.WaitEx(INFINITE, (WaitMode)(WaitMode_Alertable | WaitMode_ADUnload));
+};
+
+ADUnloadSink* AppDomain::PrepareForWaitUnloadCompletion()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(SystemDomain::IsUnderDomainLock());
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ ADUnloadSink* pADSink=GetADUnloadSink();
+ PREFIX_ASSUME(pADSink!=NULL);
+ if (m_Stage < AppDomain::STAGE_UNLOAD_REQUESTED) //we're first
+ {
+ pADSink->Reset();
+ SetUnloadRequestThread(GetThread());
+ }
+ return pADSink;
+};
+
+ADUnloadSink::ADUnloadSink()
+{
+ CONTRACTL
+ {
+ CONSTRUCTOR_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ m_cRef=1;
+ m_UnloadCompleteEvent.CreateManualEvent(FALSE);
+ m_UnloadResult=S_OK;
+};
+
+ADUnloadSink::~ADUnloadSink()
+{
+ CONTRACTL
+ {
+ DESTRUCTOR_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ m_UnloadCompleteEvent.CloseEvent();
+
+};
+
+
+ULONG ADUnloadSink::AddRef()
+{
+ LIMITED_METHOD_CONTRACT;
+ return InterlockedIncrement(&m_cRef);
+};
+
+ULONG ADUnloadSink::Release()
+{
+ LIMITED_METHOD_CONTRACT;
+ ULONG ulRef = InterlockedDecrement(&m_cRef);
+ if (ulRef == 0)
+ {
+ delete this;
+ return 0;
+ }
+ return ulRef;
+};
+
+void ADUnloadSink::Reset()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_UnloadResult=S_OK;
+ m_UnloadCompleteEvent.Reset();
+}
+
+ADUnloadSink* AppDomain::GetADUnloadSink()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(SystemDomain::IsUnderDomainLock());
+ if(m_ADUnloadSink)
+ m_ADUnloadSink->AddRef();
+ return m_ADUnloadSink;
+};
+
+ADUnloadSink* AppDomain::GetADUnloadSinkForUnload()
+{
+ // unload thread only. Doesn't need to have AD lock
+ LIMITED_METHOD_CONTRACT;
+ if(m_ADUnloadSink)
+ m_ADUnloadSink->AddRef();
+ return m_ADUnloadSink;
+}
+#endif // CROSSGEN_COMPILE
+
+void AppDomain::EnumStaticGCRefs(promote_func* fn, ScanContext* sc)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACT_END;
+
+ _ASSERTE(GCHeap::IsGCInProgress() &&
+ GCHeap::IsServerHeap() &&
+ IsGCSpecialThread());
+
+ AppDomain::AssemblyIterator asmIterator = IterateAssembliesEx((AssemblyIterationFlags)(kIncludeLoaded | kIncludeExecution));
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+ while (asmIterator.Next(pDomainAssembly.This()))
+ {
+ // @TODO: Review when DomainAssemblies get added.
+ _ASSERTE(pDomainAssembly != NULL);
+ pDomainAssembly->EnumStaticGCRefs(fn, sc);
+ }
+
+ RETURN;
+}
+
+#endif // !DACCESS_COMPILE
+
+//------------------------------------------------------------------------
+UINT32 BaseDomain::GetTypeID(PTR_MethodTable pMT) {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(pMT->GetDomain() == this);
+ } CONTRACTL_END;
+
+ return m_typeIDMap.GetTypeID(pMT);
+}
+
+//------------------------------------------------------------------------
+// Returns the ID of the type if found. If not found, returns INVALID_TYPE_ID
+UINT32 BaseDomain::LookupTypeID(PTR_MethodTable pMT)
+{
+ CONTRACTL {
+ NOTHROW;
+ SO_TOLERANT;
+ WRAPPER(GC_TRIGGERS);
+ PRECONDITION(pMT->GetDomain() == this);
+ } CONTRACTL_END;
+
+ return m_typeIDMap.LookupTypeID(pMT);
+}
+
+//------------------------------------------------------------------------
+PTR_MethodTable BaseDomain::LookupType(UINT32 id) {
+ CONTRACTL {
+ NOTHROW;
+ SO_TOLERANT;
+ WRAPPER(GC_TRIGGERS);
+ CONSISTENCY_CHECK(id != TYPE_ID_THIS_CLASS);
+ } CONTRACTL_END;
+
+ PTR_MethodTable pMT = m_typeIDMap.LookupType(id);
+ if (pMT == NULL && !IsSharedDomain()) {
+ pMT = SharedDomain::GetDomain()->LookupType(id);
+ }
+
+ CONSISTENCY_CHECK(CheckPointer(pMT));
+ CONSISTENCY_CHECK(pMT->IsInterface());
+ return pMT;
+}
+
+#ifndef DACCESS_COMPILE
+
+#ifndef FEATURE_CORECLR
+//------------------------------------------------------------------------
+DWORD* SetupCompatibilityFlags()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ WCHAR buf[2] = { '\0', '\0' };
+
+ FAULT_NOT_FATAL(); // we can simply give up
+
+ if (WszGetEnvironmentVariable(W("UnsupportedCompatSwitchesEnabled"), buf, COUNTOF(buf)) == 0)
+ return NULL;
+
+ if (buf[0] != '1' || buf[1] != '\0')
+ return NULL;
+
+ static const LPCWSTR rgFlagNames[] = {
+#define COMPATFLAGDEF(name) TEXT(#name),
+#include "compatibilityflagsdef.h"
+ };
+
+ int size = (compatCount+31) / 32;
+ DWORD* pFlags = new (nothrow) DWORD[size];
+ if (pFlags == NULL)
+ return NULL;
+ ZeroMemory(pFlags, size * sizeof(DWORD));
+
+ for (int i = 0; i < COUNTOF(rgFlagNames); i++)
+ {
+ if (WszGetEnvironmentVariable(rgFlagNames[i], buf, COUNTOF(buf)) == 0)
+ continue;
+
+ if (buf[0] != '1' || buf[1] != '\0')
+ continue;
+
+ pFlags[i / 32] |= 1 << (i % 32);
+ }
+
+ return pFlags;
+}
+
+//------------------------------------------------------------------------
+static VolatilePtr<DWORD> g_pCompatibilityFlags = (DWORD*)(-1);
+
+DWORD* GetGlobalCompatibilityFlags()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ if (g_pCompatibilityFlags == (DWORD*)(-1))
+ {
+ DWORD *pCompatibilityFlags = SetupCompatibilityFlags();
+
+ if (FastInterlockCompareExchangePointer(g_pCompatibilityFlags.GetPointer(), pCompatibilityFlags, reinterpret_cast<DWORD *>(-1)) != (VOID*)(-1))
+ {
+ delete [] pCompatibilityFlags;
+ }
+ }
+
+ return g_pCompatibilityFlags;
+}
+#endif // !FEATURE_CORECLR
+
+//------------------------------------------------------------------------
+BOOL GetCompatibilityFlag(CompatibilityFlag flag)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+#ifndef FEATURE_CORECLR
+ DWORD *pFlags = GetGlobalCompatibilityFlags();
+
+ if (pFlags != NULL)
+ return (pFlags[flag / 32] & (1 << (flag % 32))) ? TRUE : FALSE;
+ else
+ return FALSE;
+#else // !FEATURE_CORECLR
+ return FALSE;
+#endif // !FEATURE_CORECLR
+}
+#endif // !DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+//
+BOOL
+AppDomain::AssemblyIterator::Next(
+ CollectibleAssemblyHolder<DomainAssembly *> * pDomainAssemblyHolder)
+{
+ CONTRACTL {
+ NOTHROW;
+ WRAPPER(GC_TRIGGERS); // Triggers only in MODE_COOPERATIVE (by taking the lock)
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ CrstHolder ch(m_pAppDomain->GetAssemblyListLock());
+ return Next_Unlocked(pDomainAssemblyHolder);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Note: Does not lock the assembly list, but locks collectible assemblies for adding references.
+//
+BOOL
+AppDomain::AssemblyIterator::Next_Unlocked(
+ CollectibleAssemblyHolder<DomainAssembly *> * pDomainAssemblyHolder)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+ _ASSERTE(m_pAppDomain->GetAssemblyListLock()->OwnedByCurrentThread());
+#endif
+
+ while (m_Iterator.Next())
+ {
+ // Get element from the list/iterator (without adding reference to the assembly)
+ DomainAssembly * pDomainAssembly = dac_cast<PTR_DomainAssembly>(m_Iterator.GetElement());
+ if (pDomainAssembly == NULL)
+ {
+ continue;
+ }
+
+ if (pDomainAssembly->IsError())
+ {
+ if (m_assemblyIterationFlags & kIncludeFailedToLoad)
+ {
+ *pDomainAssemblyHolder = pDomainAssembly;
+ return TRUE;
+ }
+ continue; // reject
+ }
+
+ // First, reject DomainAssemblies whose load status is not to be included in
+ // the enumeration
+
+ if (pDomainAssembly->IsAvailableToProfilers() &&
+ (m_assemblyIterationFlags & kIncludeAvailableToProfilers))
+ {
+ // The assembly has reached the state at which we would notify profilers,
+ // and we're supposed to include such assemblies in the enumeration. So
+ // don't reject it (i.e., noop here, and don't bother with the rest of
+ // the load status checks). Check for this first, since
+ // kIncludeAvailableToProfilers contains some loaded AND loading
+ // assemblies.
+ }
+ else if (pDomainAssembly->IsLoaded())
+ {
+ // A loaded assembly
+ if (!(m_assemblyIterationFlags & kIncludeLoaded))
+ {
+ continue; // reject
+ }
+ }
+ else
+ {
+ // A loading assembly
+ if (!(m_assemblyIterationFlags & kIncludeLoading))
+ {
+ continue; // reject
+ }
+ }
+
+ // Next, reject DomainAssemblies whose execution / introspection status is
+ // not to be included in the enumeration
+
+ if (pDomainAssembly->IsIntrospectionOnly())
+ {
+ // introspection assembly
+ if (!(m_assemblyIterationFlags & kIncludeIntrospection))
+ {
+ continue; // reject
+ }
+ }
+ else
+ {
+ // execution assembly
+ if (!(m_assemblyIterationFlags & kIncludeExecution))
+ {
+ continue; // reject
+ }
+ }
+
+ // Next, reject collectible assemblies
+ if (pDomainAssembly->IsCollectible())
+ {
+ if (m_assemblyIterationFlags & kExcludeCollectible)
+ {
+ _ASSERTE(!(m_assemblyIterationFlags & kIncludeCollected));
+ continue; // reject
+ }
+
+ // Un-tenured collectible assemblies should not be returned. (This can only happen in a brief
+ // window during collectible assembly creation. No thread should need to have a pointer
+ // to the just allocated DomainAssembly at this stage.)
+ if (!pDomainAssembly->GetAssembly()->GetManifestModule()->IsTenured())
+ {
+ continue; // reject
+ }
+
+ if (pDomainAssembly->GetLoaderAllocator()->AddReferenceIfAlive())
+ { // The assembly is alive
+
+ // Set the holder value (incl. increasing ref-count)
+ *pDomainAssemblyHolder = pDomainAssembly;
+
+ // Now release the reference we took in the if-condition
+ pDomainAssembly->GetLoaderAllocator()->Release();
+ return TRUE;
+ }
+ // The assembly is not alive anymore (and we didn't increase its ref-count in the
+ // if-condition)
+
+ if (!(m_assemblyIterationFlags & kIncludeCollected))
+ {
+ continue; // reject
+ }
+ // Set the holder value to assembly with 0 ref-count without increasing the ref-count (won't
+ // call Release either)
+ pDomainAssemblyHolder->Assign(pDomainAssembly, FALSE);
+ return TRUE;
+ }
+
+ *pDomainAssemblyHolder = pDomainAssembly;
+ return TRUE;
+ }
+
+ *pDomainAssemblyHolder = NULL;
+ return FALSE;
+} // AppDomain::AssemblyIterator::Next_Unlocked
+
+#ifndef DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+//
+// Can be called only from AppDomain shutdown code:AppDomain::ShutdownAssemblies.
+// Does not add-ref collectible assemblies (as the LoaderAllocator might not be reachable from the
+// DomainAssembly anymore).
+//
+BOOL
+AppDomain::AssemblyIterator::Next_UnsafeNoAddRef(
+ DomainAssembly ** ppDomainAssembly)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ // Make sure we are iterating all assemblies (see the only caller code:AppDomain::ShutdownAssemblies)
+ _ASSERTE(m_assemblyIterationFlags ==
+ (kIncludeLoaded | kIncludeLoading | kIncludeExecution | kIncludeIntrospection | kIncludeFailedToLoad | kIncludeCollected));
+ // It also means that we do not exclude anything
+ _ASSERTE((m_assemblyIterationFlags & kExcludeCollectible) == 0);
+
+ // We are on shutdown path, so lock shouldn't be neccessary, but all _Unlocked methods on AssemblyList
+ // have asserts that the lock is held, so why not to take it ...
+ CrstHolder ch(m_pAppDomain->GetAssemblyListLock());
+
+ while (m_Iterator.Next())
+ {
+ // Get element from the list/iterator (without adding reference to the assembly)
+ *ppDomainAssembly = dac_cast<PTR_DomainAssembly>(m_Iterator.GetElement());
+ if (*ppDomainAssembly == NULL)
+ {
+ continue;
+ }
+
+ return TRUE;
+ }
+
+ *ppDomainAssembly = NULL;
+ return FALSE;
+} // AppDomain::AssemblyIterator::Next_UnsafeNoAddRef
+
+#ifdef FEATURE_CORECLR
+
+//---------------------------------------------------------------------------------------
+//
+BOOL AppDomain::IsImageFromTrustedPath(PEImage* pPEImage)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_TRIGGERS;
+ THROWS;
+ PRECONDITION(CheckPointer(pPEImage));
+ }
+ CONTRACTL_END;
+
+ BOOL fIsInGAC = FALSE;
+ const SString &sImagePath = pPEImage->GetPath();
+
+ if (!sImagePath.IsEmpty())
+ {
+ // If we're not in a sandboxed domain, everything is full trust all the time
+ if (GetSecurityDescriptor()->IsFullyTrusted())
+ {
+ return TRUE;
+ }
+
+ fIsInGAC = GetTPABinderContext()->IsInTpaList(sImagePath);
+ }
+
+ return fIsInGAC;
+}
+
+BOOL AppDomain::IsImageFullyTrusted(PEImage* pPEImage)
+{
+ WRAPPER_NO_CONTRACT;
+ return IsImageFromTrustedPath(pPEImage);
+}
+
+#ifdef FEATURE_LEGACYNETCF
+BOOL RuntimeIsLegacyNetCF(DWORD adid)
+{
+ AppDomain * pAppDomain = GetAppDomain();
+
+ _ASSERTE(adid == 0 || adid == pAppDomain->GetId().m_dwId);
+
+ if (pAppDomain == NULL)
+ return FALSE;
+
+ if (pAppDomain->GetAppDomainCompatMode() == BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8)
+ return TRUE;
+
+ return FALSE;
+}
+#endif
+
+#endif //FEATURE_CORECLR
+
+#endif //!DACCESS_COMPILE
+
+#if defined(FEATURE_HOST_ASSEMBLY_RESOLVER) && !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+
+// Returns a BOOL indicating if the binding model has been locked for the AppDomain
+BOOL AppDomain::IsBindingModelLocked()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return m_fIsBindingModelLocked.Load();
+}
+
+// Marks the binding model locked for AppDomain
+BOOL AppDomain::LockBindingModel()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ BOOL fDidWeLockBindingModel = FALSE;
+
+ if (InterlockedCompareExchangeT<BOOL>(&m_fIsBindingModelLocked, TRUE, FALSE) == FALSE)
+ {
+ fDidWeLockBindingModel = TRUE;
+ }
+
+ return fDidWeLockBindingModel;
+}
+
+BOOL AppDomain::IsHostAssemblyResolverInUse()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (GetFusionContext() != GetTPABinderContext());
+}
+
+// Helper used by the assembly binder to check if the specified AppDomain can use apppath assembly resolver
+BOOL RuntimeCanUseAppPathAssemblyResolver(DWORD adid)
+{
+ CONTRACTL
+ {
+ NOTHROW; // Cannot throw since it is invoked by the Binder that expects to get a hresult
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ADID id(adid);
+
+ // We need to be in COOP mode to get the AppDomain*
+ GCX_COOP();
+
+ AppDomain *pTargetDomain = SystemDomain::GetAppDomainFromId(id, ADV_CURRENTAD);
+ _ASSERTE(pTargetDomain != NULL);
+
+ pTargetDomain->LockBindingModel();
+
+ return !pTargetDomain->IsHostAssemblyResolverInUse();
+}
+
+// Returns S_OK if the assembly was successfully loaded
+HRESULT RuntimeInvokeHostAssemblyResolver(CLRPrivBinderAssemblyLoadContext *pLoadContextToBindWithin, IAssemblyName *pIAssemblyName, ICLRPrivAssembly **ppLoadedAssembly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(ppLoadedAssembly != NULL);
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_FAIL;
+
+ // DevDiv #933506: Exceptions thrown during AssemblyLoadContext.Load should propagate
+ // EX_TRY
+ {
+ // Switch to COOP mode since we are going to work with managed references
+ GCX_COOP();
+
+ struct
+ {
+ ASSEMBLYNAMEREF oRefAssemblyName;
+ ASSEMBLYREF oRefLoadedAssembly;
+ } _gcRefs;
+
+ ZeroMemory(&_gcRefs, sizeof(_gcRefs));
+
+ GCPROTECT_BEGIN(_gcRefs);
+
+ // Get the pointer to the managed assembly load context
+ INT_PTR ptrManagedAssemblyLoadContext = pLoadContextToBindWithin->GetManagedAssemblyLoadContext();
+
+ // Prepare to invoke System.Runtime.Loader.AssemblyLoadContext.Resolve method.
+ //
+ // First, initialize an assembly spec for the requested assembly
+ //
+ AssemblySpec spec;
+ hr = spec.Init(pIAssemblyName);
+ if (SUCCEEDED(hr))
+ {
+ // Next, allocate an AssemblyName managed object
+ _gcRefs.oRefAssemblyName = (ASSEMBLYNAMEREF) AllocateObject(MscorlibBinder::GetClass(CLASS__ASSEMBLY_NAME));
+
+ // Initialize the AssemblyName object from the AssemblySpec
+ spec.AssemblyNameInit(&_gcRefs.oRefAssemblyName, NULL);
+
+ // Finally, setup arguments for invocation
+ BinderMethodID idHAR_Resolve = METHOD__ASSEMBLYLOADCONTEXT__RESOLVE;
+ MethodDescCallSite methLoadAssembly(idHAR_Resolve);
+
+ // Setup the arguments for the call
+ ARG_SLOT args[2] =
+ {
+ PtrToArgSlot(ptrManagedAssemblyLoadContext), // IntPtr for managed assembly load context instance
+ ObjToArgSlot(_gcRefs.oRefAssemblyName), // AssemblyName instance
+ };
+
+ // Make the call
+ _gcRefs.oRefLoadedAssembly = (ASSEMBLYREF) methLoadAssembly.Call_RetOBJECTREF(args);
+ if (_gcRefs.oRefLoadedAssembly != NULL)
+ {
+ // We were able to get the assembly loaded. Now, get its name since the host could have
+ // performed the resolution using an assembly with different name.
+ DomainAssembly *pDomainAssembly = _gcRefs.oRefLoadedAssembly->GetDomainAssembly();
+ PEAssembly *pLoadedPEAssembly = NULL;
+ bool fFailLoad = false;
+ if (!pDomainAssembly)
+ {
+ // Reflection emitted assemblies will not have a domain assembly.
+ fFailLoad = true;
+ }
+ else
+ {
+ pLoadedPEAssembly = pDomainAssembly->GetFile();
+ if (pLoadedPEAssembly->HasHostAssembly() != true)
+ {
+ // Reflection emitted assemblies will not have a domain assembly.
+ fFailLoad = true;
+ }
+ }
+
+ // The loaded assembly's ICLRPrivAssembly* is saved as HostAssembly in PEAssembly
+ if (fFailLoad)
+ {
+ SString name;
+ spec.GetFileOrDisplayName(0, name);
+ COMPlusThrowHR(COR_E_INVALIDOPERATION, IDS_HOST_ASSEMBLY_RESOLVER_DYNAMICALLY_EMITTED_ASSEMBLIES_UNSUPPORTED, name);
+ }
+
+ // Is the assembly already bound using a binding context that will be incompatible?
+ // An example is attempting to consume an assembly bound to WinRT binder.
+ ICLRPrivAssembly *pAssemblyBindingContext = pLoadedPEAssembly->GetHostAssembly();
+
+#ifdef FEATURE_COMINTEROP
+ if (AreSameBinderInstance(pAssemblyBindingContext, GetAppDomain()->GetWinRtBinder()))
+ {
+ // It is invalid to return an assembly bound to an incompatible binder
+ *ppLoadedAssembly = NULL;
+ SString name;
+ spec.GetFileOrDisplayName(0, name);
+ COMPlusThrowHR(COR_E_INVALIDOPERATION, IDS_HOST_ASSEMBLY_RESOLVER_INCOMPATIBLE_BINDING_CONTEXT, name);
+ }
+#endif // FEATURE_COMINTEROP
+
+ // Get the ICLRPrivAssembly reference to return back to.
+ *ppLoadedAssembly = clr::SafeAddRef(pLoadedPEAssembly->GetHostAssembly());
+ hr = S_OK;
+ }
+ }
+
+ GCPROTECT_END();
+ }
+ // EX_CATCH_HRESULT(hr);
+
+ return hr;
+
+}
+#endif // defined(FEATURE_HOST_ASSEMBLY_RESOLVER) && !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+
+//approximate size of loader data
+//maintained for each assembly
+#define APPROX_LOADER_DATA_PER_ASSEMBLY 8196
+
+size_t AppDomain::EstimateSize()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ size_t retval = sizeof(AppDomain);
+ retval += GetLoaderAllocator()->EstimateSize();
+ //very rough estimate
+ retval += GetAssemblyCount() * APPROX_LOADER_DATA_PER_ASSEMBLY;
+ return retval;
+}
+
+#ifdef DACCESS_COMPILE
+
+void
+DomainLocalModule::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+
+ // Enumerate the DomainLocalModule itself. DLMs are allocated to be larger than
+ // sizeof(DomainLocalModule) to make room for ClassInit flags and non-GC statics.
+ // "DAC_ENUM_DTHIS()" probably does not account for this, so we might not enumerate
+ // all of the ClassInit flags and non-GC statics.
+ // sizeof(DomainLocalModule) == 0x28
+ DAC_ENUM_DTHIS();
+
+ if (m_pDomainFile.IsValid())
+ {
+ m_pDomainFile->EnumMemoryRegions(flags);
+ }
+
+ if (m_pDynamicClassTable.Load().IsValid())
+ {
+ DacEnumMemoryRegion(dac_cast<TADDR>(m_pDynamicClassTable.Load()),
+ m_aDynamicEntries * sizeof(DynamicClassInfo));
+
+ for (SIZE_T i = 0; i < m_aDynamicEntries; i++)
+ {
+ PTR_DynamicEntry entry = dac_cast<PTR_DynamicEntry>(m_pDynamicClassTable[i].m_pDynamicEntry.Load());
+ if (entry.IsValid())
+ {
+ // sizeof(DomainLocalModule::DynamicEntry) == 8
+ entry.EnumMem();
+ }
+ }
+ }
+}
+
+void
+DomainLocalBlock::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ // Block is contained in AppDomain, don't enum this.
+
+ if (m_pModuleSlots.IsValid())
+ {
+ DacEnumMemoryRegion(dac_cast<TADDR>(m_pModuleSlots),
+ m_aModuleIndices * sizeof(TADDR));
+
+ for (SIZE_T i = 0; i < m_aModuleIndices; i++)
+ {
+ PTR_DomainLocalModule domMod = m_pModuleSlots[i];
+ if (domMod.IsValid())
+ {
+ domMod->EnumMemoryRegions(flags);
+ }
+ }
+ }
+}
+
+void
+BaseDomain::EnumMemoryRegions(CLRDataEnumMemoryFlags flags,
+ bool enumThis)
+{
+ SUPPORTS_DAC;
+ if (enumThis)
+ {
+ // This is wrong. Don't do it.
+ // BaseDomain cannot be instantiated.
+ // The only thing this code can hope to accomplish is to potentially break
+ // memory enumeration walking through the derived class if we
+ // explicitly call the base class enum first.
+// DAC_ENUM_VTHIS();
+ }
+
+ EMEM_OUT(("MEM: %p BaseDomain\n", dac_cast<TADDR>(this)));
+}
+
+void
+AppDomain::EnumMemoryRegions(CLRDataEnumMemoryFlags flags,
+ bool enumThis)
+{
+ SUPPORTS_DAC;
+
+ if (enumThis)
+ {
+ //sizeof(AppDomain) == 0xeb0
+ DAC_ENUM_VTHIS();
+ }
+ BaseDomain::EnumMemoryRegions(flags, false);
+
+ // We don't need AppDomain name in triage dumps.
+ if (flags != CLRDATA_ENUM_MEM_TRIAGE)
+ {
+ m_friendlyName.EnumMemoryRegions(flags);
+ }
+
+ m_Assemblies.EnumMemoryRegions(flags);
+ AssemblyIterator assem = IterateAssembliesEx((AssemblyIterationFlags)(kIncludeLoaded | kIncludeExecution | kIncludeIntrospection));
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+
+ while (assem.Next(pDomainAssembly.This()))
+ {
+ pDomainAssembly->EnumMemoryRegions(flags);
+ }
+
+ m_sDomainLocalBlock.EnumMemoryRegions(flags);
+
+ m_LoaderAllocator.EnumMemoryRegions(flags);
+}
+
+void
+SystemDomain::EnumMemoryRegions(CLRDataEnumMemoryFlags flags,
+ bool enumThis)
+{
+ SUPPORTS_DAC;
+ if (enumThis)
+ {
+ DAC_ENUM_VTHIS();
+ }
+ BaseDomain::EnumMemoryRegions(flags, false);
+
+ if (m_pSystemFile.IsValid())
+ {
+ m_pSystemFile->EnumMemoryRegions(flags);
+ }
+ if (m_pSystemAssembly.IsValid())
+ {
+ m_pSystemAssembly->EnumMemoryRegions(flags);
+ }
+ if (m_pDefaultDomain.IsValid())
+ {
+ m_pDefaultDomain->EnumMemoryRegions(flags, true);
+ }
+
+ m_appDomainIndexList.EnumMem();
+ (&m_appDomainIndexList)->EnumMemoryRegions(flags);
+}
+
+void
+SharedDomain::EnumMemoryRegions(CLRDataEnumMemoryFlags flags,
+ bool enumThis)
+{
+ SUPPORTS_DAC;
+ if (enumThis)
+ {
+ DAC_ENUM_VTHIS();
+ }
+ BaseDomain::EnumMemoryRegions(flags, false);
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ m_assemblyMap.EnumMemoryRegions(flags);
+ SharedAssemblyIterator assem;
+ while (assem.Next())
+ {
+ assem.GetAssembly()->EnumMemoryRegions(flags);
+ }
+#endif
+}
+
+#endif //DACCESS_COMPILE
+
+
+PTR_LoaderAllocator SystemDomain::GetGlobalLoaderAllocator()
+{
+ return PTR_LoaderAllocator(PTR_HOST_MEMBER_TADDR(SystemDomain,System(),m_GlobalAllocator));
+}
+
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+
+#ifndef CROSSGEN_COMPILE
+// Return the total processor time (user and kernel) used by threads executing in this AppDomain so far. The
+// result is in 100ns units.
+ULONGLONG AppDomain::QueryProcessorUsage()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+ Thread *pThread = NULL;
+
+ // Need to update our accumulated processor time count with current values from each thread that is
+ // currently executing in this domain.
+
+ // Take the thread store lock while we enumerate threads.
+ ThreadStoreLockHolder tsl;
+ while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
+ {
+ // Skip unstarted and dead threads and those that are currently executing in a different AppDomain.
+ if (pThread->IsUnstarted() || pThread->IsDead() || pThread->GetDomain(INDEBUG(TRUE)) != this)
+ continue;
+
+ // Add the amount of time spent by the thread in the AppDomain since the last time we asked (calling
+ // Thread::QueryThreadProcessorUsage() will reset the thread's counter).
+ UpdateProcessorUsage(pThread->QueryThreadProcessorUsage());
+ }
+#endif // !DACCESS_COMPILE
+
+ // Return the updated total.
+ return m_ullTotalProcessorUsage;
+}
+
+// Add to the current count of processor time used by threads within this AppDomain. This API is called by
+// threads transitioning between AppDomains.
+void AppDomain::UpdateProcessorUsage(ULONGLONG ullAdditionalUsage)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Need to be careful to synchronize here, multiple threads could be racing to update this count.
+ ULONGLONG ullOldValue;
+ ULONGLONG ullNewValue;
+ do
+ {
+ ullOldValue = m_ullTotalProcessorUsage;
+ ullNewValue = ullOldValue + ullAdditionalUsage;
+ } while (InterlockedCompareExchange64((LONGLONG*)&m_ullTotalProcessorUsage,
+ (LONGLONG)ullNewValue,
+ (LONGLONG)ullOldValue) != (LONGLONG)ullOldValue);
+}
+#endif // CROSSGEN_COMPILE
+
+#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
+
+#if defined(FEATURE_TYPEEQUIVALENCE)
+
+#ifndef DACCESS_COMPILE
+TypeEquivalenceHashTable * AppDomain::GetTypeEquivalenceCache()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Take the critical section all of the time in debug builds to ensure that it is safe to take
+ // the critical section in the unusual times when it may actually be needed in retail builds
+#ifdef _DEBUG
+ CrstHolder ch(&m_TypeEquivalenceCrst);
+#endif
+
+ if (m_pTypeEquivalenceTable.Load() == NULL)
+ {
+#ifndef _DEBUG
+ CrstHolder ch(&m_TypeEquivalenceCrst);
+#endif
+ if (m_pTypeEquivalenceTable.Load() == NULL)
+ {
+ m_pTypeEquivalenceTable = TypeEquivalenceHashTable::Create(this, 12, &m_TypeEquivalenceCrst);
+ }
+ }
+ return m_pTypeEquivalenceTable;
+}
+#endif //!DACCESS_COMPILE
+
+#endif //FEATURE_TYPEEQUIVALENCE
+
+#if defined(FEATURE_HOSTED_BINDER)
+#if !defined(DACCESS_COMPILE)
+
+//---------------------------------------------------------------------------------------------------------------------
+void AppDomain::PublishHostedAssembly(
+ DomainAssembly * pDomainAssembly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ if (pDomainAssembly->GetFile()->HasHostAssembly())
+ {
+ // We have to serialize all Add operations
+ CrstHolder lockAdd(&m_crstHostAssemblyMapAdd);
+ _ASSERTE(m_hostAssemblyMap.Lookup(pDomainAssembly->GetFile()->GetHostAssembly()) == nullptr);
+
+ // Wrapper for m_hostAssemblyMap.Add that avoids call out into host
+ HostAssemblyMap::AddPhases addCall;
+
+ // 1. Preallocate one element
+ addCall.PreallocateForAdd(&m_hostAssemblyMap);
+ {
+ // 2. Take the reader lock which can be taken during stack walking
+ // We cannot call out into host from ForbidSuspend region (i.e. no allocations/deallocations)
+ ForbidSuspendThreadHolder suspend;
+ {
+ CrstHolder lock(&m_crstHostAssemblyMap);
+ // 3. Add the element to the hash table (no call out into host)
+ addCall.Add(pDomainAssembly);
+ }
+ }
+ // 4. Cleanup the old memory (if any)
+ addCall.DeleteOldTable();
+ }
+ else
+ {
+#ifdef FEATURE_APPX_BINDER
+ // In AppX processes, all PEAssemblies that are reach this stage should have host binders.
+ _ASSERTE(!AppX::IsAppXProcess());
+#endif
+ }
+}
+
+//---------------------------------------------------------------------------------------------------------------------
+void AppDomain::UpdatePublishHostedAssembly(
+ DomainAssembly * pAssembly,
+ PTR_PEFile pFile)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END
+
+ if (pAssembly->GetFile()->HasHostAssembly())
+ {
+ // We have to serialize all Add operations
+ CrstHolder lockAdd(&m_crstHostAssemblyMapAdd);
+ {
+ // Wrapper for m_hostAssemblyMap.Add that avoids call out into host
+ OriginalFileHostAssemblyMap::AddPhases addCall;
+ bool fAddOrigFile = false;
+
+ // For cases where the pefile is being updated
+ // 1. Preallocate one element
+ if (pFile != pAssembly->GetFile())
+ {
+ addCall.PreallocateForAdd(&m_hostAssemblyMapForOrigFile);
+ fAddOrigFile = true;
+ }
+
+ {
+ // We cannot call out into host from ForbidSuspend region (i.e. no allocations/deallocations)
+ ForbidSuspendThreadHolder suspend;
+ {
+ CrstHolder lock(&m_crstHostAssemblyMap);
+
+ // Remove from hash table.
+ _ASSERTE(m_hostAssemblyMap.Lookup(pAssembly->GetFile()->GetHostAssembly()) != nullptr);
+ m_hostAssemblyMap.Remove(pAssembly->GetFile()->GetHostAssembly());
+
+ // Update PEFile on DomainAssembly. (This may cause the key for the hash to change, which is why we need this function)
+ pAssembly->UpdatePEFileWorker(pFile);
+
+ _ASSERTE(fAddOrigFile == (pAssembly->GetOriginalFile() != pAssembly->GetFile()));
+ if (fAddOrigFile)
+ {
+ // Add to the orig file hash table if we might be in a case where we've cached the original pefile and not the final pe file (for use during GetAssemblyIfLoaded)
+ addCall.Add(pAssembly);
+ }
+
+ // Add back to the hashtable (the call to Remove above guarantees that we will not call into host for table reallocation)
+ _ASSERTE(m_hostAssemblyMap.Lookup(pAssembly->GetFile()->GetHostAssembly()) == nullptr);
+ m_hostAssemblyMap.Add(pAssembly);
+ }
+ }
+
+ // 4. Cleanup the old memory (if any)
+ if (fAddOrigFile)
+ addCall.DeleteOldTable();
+ }
+ }
+ else
+ {
+#ifdef FEATURE_APPX_BINDER
+ // In AppX processes, all PEAssemblies that are reach this stage should have host binders.
+ _ASSERTE(!AppX::IsAppXProcess());
+#endif
+
+ pAssembly->UpdatePEFileWorker(pFile);
+ }
+}
+
+//---------------------------------------------------------------------------------------------------------------------
+void AppDomain::UnPublishHostedAssembly(
+ DomainAssembly * pAssembly)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END
+
+ if (pAssembly->GetFile()->HasHostAssembly())
+ {
+ ForbidSuspendThreadHolder suspend;
+ {
+ CrstHolder lock(&m_crstHostAssemblyMap);
+ _ASSERTE(m_hostAssemblyMap.Lookup(pAssembly->GetFile()->GetHostAssembly()) != nullptr);
+ m_hostAssemblyMap.Remove(pAssembly->GetFile()->GetHostAssembly());
+
+ // We also have an entry in m_hostAssemblyMapForOrigFile. Handle that case.
+ if (pAssembly->GetOriginalFile() != pAssembly->GetFile())
+ {
+ m_hostAssemblyMapForOrigFile.Remove(pAssembly->GetOriginalFile()->GetHostAssembly());
+ }
+ }
+ }
+ else
+ {
+ // In AppX processes, all PEAssemblies that are reach this stage should have host binders.
+ _ASSERTE(!AppX::IsAppXProcess());
+ }
+}
+
+#if defined(FEATURE_CORECLR) && defined(FEATURE_COMINTEROP)
+HRESULT AppDomain::SetWinrtApplicationContext(SString &appLocalWinMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(m_pWinRtBinder != nullptr);
+
+ _ASSERTE(GetTPABinderContext() != NULL);
+ BINDER_SPACE::ApplicationContext *pApplicationContext = GetTPABinderContext()->GetAppContext();
+ _ASSERTE(pApplicationContext != NULL);
+
+ return m_pWinRtBinder->SetApplicationContext(pApplicationContext, appLocalWinMD);
+}
+
+#endif // FEATURE_CORECLR && FEATURE_COMINTEROP
+
+#endif //!DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------------------------------------
+PTR_DomainAssembly AppDomain::FindAssembly(PTR_ICLRPrivAssembly pHostAssembly)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ if (pHostAssembly == nullptr)
+ return NULL;
+
+ {
+ ForbidSuspendThreadHolder suspend;
+ {
+ CrstHolder lock(&m_crstHostAssemblyMap);
+ PTR_DomainAssembly returnValue = m_hostAssemblyMap.Lookup(pHostAssembly);
+ if (returnValue == NULL)
+ {
+ // If not found in the m_hostAssemblyMap, look in the m_hostAssemblyMapForOrigFile
+ // This is necessary as it may happen during in a second AppDomain that the PEFile
+ // first discovered in the AppDomain may not be used by the DomainFile, but the CLRPrivBinderFusion
+ // will in some cases find the pHostAssembly associated with this no longer used PEFile
+ // instead of the PEFile that was finally decided upon.
+ returnValue = m_hostAssemblyMapForOrigFile.Lookup(pHostAssembly);
+ }
+
+ return returnValue;
+ }
+ }
+}
+
+#endif //FEATURE_HOSTED_BINDER
+
+#if !defined(DACCESS_COMPILE) && defined(FEATURE_CORECLR)
+
+void ZapperSetBindingPaths(ICorCompilationDomain *pDomain, SString &trustedPlatformAssemblies, SString &platformResourceRoots, SString &appPaths, SString &appNiPaths)
+{
+ CLRPrivBinderCoreCLR *pBinder = static_cast<CLRPrivBinderCoreCLR*>(((CompilationDomain *)pDomain)->GetFusionContext());
+ _ASSERTE(pBinder != NULL);
+ pBinder->SetupBindingPaths(trustedPlatformAssemblies, platformResourceRoots, appPaths, appNiPaths);
+#ifdef FEATURE_COMINTEROP
+ SString emptString;
+ ((CompilationDomain*)pDomain)->SetWinrtApplicationContext(emptString);
+#endif
+}
+
+#ifdef FEATURE_LEGACYNETCF
+void ZapperSetAppCompatWP8(ICorCompilationDomain *pDomain)
+{
+ ((CompilationDomain*)pDomain)->SetAppDomainCompatMode(BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8);
+}
+#endif
+
+#endif
+
+#if defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE)
+bool IsSingleAppDomain()
+{
+ STARTUP_FLAGS flags = CorHost2::GetStartupFlags();
+ if(flags & STARTUP_SINGLE_APPDOMAIN)
+ return TRUE;
+ else
+ return FALSE;
+}
+#else
+bool IsSingleAppDomain()
+{
+ return FALSE;
+}
+#endif
diff --git a/src/vm/appdomain.hpp b/src/vm/appdomain.hpp
new file mode 100644
index 0000000000..24c86e9ae4
--- /dev/null
+++ b/src/vm/appdomain.hpp
@@ -0,0 +1,5463 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: AppDomain.cpp
+**
+
+**
+** Purpose: Implements AppDomain (loader domain) architecture
+**
+**
+===========================================================*/
+#ifndef _APPDOMAIN_H
+#define _APPDOMAIN_H
+
+#ifndef CLR_STANDALONE_BINDER
+#include "eventtrace.h"
+#include "assembly.hpp"
+#include "clsload.hpp"
+#include "eehash.h"
+#ifdef FEATURE_FUSION
+#include "fusion.h"
+#endif
+#include "arraylist.h"
+#include "comreflectioncache.hpp"
+#include "comutilnative.h"
+#include "domainfile.h"
+#include "objectlist.h"
+#include "fptrstubs.h"
+#include "ilstubcache.h"
+#include "testhookmgr.h"
+#ifdef FEATURE_VERSIONING
+#include "../binder/inc/applicationcontext.hpp"
+#endif // FEATURE_VERSIONING
+#include "rejit.h"
+
+#ifdef FEATURE_MULTICOREJIT
+#include "multicorejit.h"
+#endif
+
+#ifdef FEATURE_COMINTEROP
+#include "clrprivbinderwinrt.h"
+#ifndef FEATURE_CORECLR
+#include "clrprivbinderreflectiononlywinrt.h"
+#include "clrprivtypecachereflectiononlywinrt.h"
+#endif
+#include "..\md\winmd\inc\adapter.h"
+#include "winrttypenameconverter.h"
+#endif // FEATURE_COMINTEROP
+
+#else // CLR_STANDALONE_BINDER
+class DomainFile;
+class CPUSTUBLINKER;
+struct CodeLabel;
+class IdDispenser;
+typedef DPTR(OBJECTREF) PTR_OBJECTREF;
+typedef DPTR(DomainFile) PTR_DomainFile;
+typedef DPTR(IdDispenser) PTR_IdDispenser;
+#include "..\md\winmd\inc\adapter.h"
+#endif // CLR_STANDALONE_BINDER
+
+#include "appxutil.h"
+
+class BaseDomain;
+class SystemDomain;
+class SharedDomain;
+class AppDomain;
+class CompilationDomain;
+class AppDomainEnum;
+class AssemblySink;
+class EEMarshalingData;
+class Context;
+class GlobalStringLiteralMap;
+class StringLiteralMap;
+struct SecurityContext;
+class MngStdInterfacesInfo;
+class DomainModule;
+class DomainAssembly;
+struct InteropMethodTableData;
+class LoadLevelLimiter;
+class UMEntryThunkCache;
+class TypeEquivalenceHashTable;
+class IApplicationSecurityDescriptor;
+class StringArrayList;
+
+typedef VPTR(IApplicationSecurityDescriptor) PTR_IApplicationSecurityDescriptor;
+
+extern INT64 g_PauseTime; // Total time in millisecond the CLR has been paused
+
+#ifdef FEATURE_COMINTEROP
+class ComCallWrapperCache;
+struct SimpleComCallWrapper;
+
+class RCWRefCache;
+
+// This enum is used to specify whether user want COM or remoting
+enum COMorRemotingFlag {
+ COMorRemoting_NotInitialized = 0,
+ COMorRemoting_COM = 1, // COM will be used both cross-domain and cross-runtime
+ COMorRemoting_Remoting = 2, // Remoting will be used cross-domain; cross-runtime will use Remoting only if it looks like it's expected (default)
+ COMorRemoting_LegacyMode = 3 // Remoting will be used both cross-domain and cross-runtime
+};
+
+#endif // FEATURE_COMINTEROP
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable : 4200) // Disable zero-sized array warning
+#endif
+
+
+GPTR_DECL(IdDispenser, g_pModuleIndexDispenser);
+
+// This enum is aligned to System.ExceptionCatcherType.
+enum ExceptionCatcher {
+ ExceptionCatcher_ManagedCode = 0,
+ ExceptionCatcher_AppDomainTransition = 1,
+ ExceptionCatcher_COMInterop = 2,
+};
+
+// We would like *ALLOCATECLASS_FLAG to AV (in order to catch errors), so don't change it
+struct ClassInitFlags {
+ enum
+ {
+ INITIALIZED_FLAG_BIT = 0,
+ INITIALIZED_FLAG = 1<<INITIALIZED_FLAG_BIT,
+ ERROR_FLAG_BIT = 1,
+ ERROR_FLAG = 1<<ERROR_FLAG_BIT,
+ ALLOCATECLASS_FLAG_BIT = 2, // Bit to avoid racing for InstantiateStaticHandles
+ ALLOCATECLASS_FLAG = 1<<ALLOCATECLASS_FLAG_BIT,
+ COLLECTIBLE_FLAG_BIT = 3,
+ COLLECTIBLE_FLAG = 1<<COLLECTIBLE_FLAG_BIT
+ };
+};
+
+struct DomainLocalModule
+{
+ friend class ClrDataAccess;
+ friend class CheckAsmOffsets;
+ friend struct ThreadLocalModule;
+
+// After these macros complete, they may have returned an interior pointer into a gc object. This pointer will have been cast to a byte pointer
+// It is critically important that no GC is allowed to occur before this pointer is used.
+#define GET_DYNAMICENTRY_GCSTATICS_BASEPOINTER(pLoaderAllocator, dynamicClassInfoParam, pGCStatics) \
+ {\
+ DomainLocalModule::PTR_DynamicClassInfo dynamicClassInfo = dac_cast<DomainLocalModule::PTR_DynamicClassInfo>(dynamicClassInfoParam);\
+ DomainLocalModule::PTR_DynamicEntry pDynamicEntry = dac_cast<DomainLocalModule::PTR_DynamicEntry>((DomainLocalModule::DynamicEntry*)dynamicClassInfo->m_pDynamicEntry.Load()); \
+ if ((dynamicClassInfo->m_dwFlags) & ClassInitFlags::COLLECTIBLE_FLAG) \
+ {\
+ PTRARRAYREF objArray;\
+ objArray = (PTRARRAYREF)pLoaderAllocator->GetHandleValueFastCannotFailType2( \
+ (dac_cast<DomainLocalModule::PTR_CollectibleDynamicEntry>(pDynamicEntry))->m_hGCStatics);\
+ *(pGCStatics) = dac_cast<PTR_BYTE>(PTR_READ(PTR_TO_TADDR(OBJECTREFToObject( objArray )) + offsetof(PtrArray, m_Array), objArray->GetNumComponents() * sizeof(void*))) ;\
+ }\
+ else\
+ {\
+ *(pGCStatics) = (dac_cast<DomainLocalModule::PTR_NormalDynamicEntry>(pDynamicEntry))->GetGCStaticsBasePointer();\
+ }\
+ }\
+
+#define GET_DYNAMICENTRY_NONGCSTATICS_BASEPOINTER(pLoaderAllocator, dynamicClassInfoParam, pNonGCStatics) \
+ {\
+ DomainLocalModule::PTR_DynamicClassInfo dynamicClassInfo = dac_cast<DomainLocalModule::PTR_DynamicClassInfo>(dynamicClassInfoParam);\
+ DomainLocalModule::PTR_DynamicEntry pDynamicEntry = dac_cast<DomainLocalModule::PTR_DynamicEntry>((DomainLocalModule::DynamicEntry*)(dynamicClassInfo)->m_pDynamicEntry.Load()); \
+ if (((dynamicClassInfo)->m_dwFlags) & ClassInitFlags::COLLECTIBLE_FLAG) \
+ {\
+ if ((dac_cast<DomainLocalModule::PTR_CollectibleDynamicEntry>(pDynamicEntry))->m_hNonGCStatics != 0) \
+ { \
+ U1ARRAYREF objArray;\
+ objArray = (U1ARRAYREF)pLoaderAllocator->GetHandleValueFastCannotFailType2( \
+ (dac_cast<DomainLocalModule::PTR_CollectibleDynamicEntry>(pDynamicEntry))->m_hNonGCStatics);\
+ *(pNonGCStatics) = dac_cast<PTR_BYTE>(PTR_READ( \
+ PTR_TO_TADDR(OBJECTREFToObject( objArray )) + sizeof(ArrayBase) - DomainLocalModule::DynamicEntry::GetOffsetOfDataBlob(), \
+ objArray->GetNumComponents() * (DWORD)objArray->GetComponentSize() + DomainLocalModule::DynamicEntry::GetOffsetOfDataBlob())); \
+ } else (*pNonGCStatics) = NULL; \
+ }\
+ else\
+ {\
+ *(pNonGCStatics) = dac_cast<DomainLocalModule::PTR_NormalDynamicEntry>(pDynamicEntry)->GetNonGCStaticsBasePointer();\
+ }\
+ }\
+
+ struct DynamicEntry
+ {
+ static DWORD GetOffsetOfDataBlob();
+ };
+ typedef DPTR(DynamicEntry) PTR_DynamicEntry;
+
+ struct CollectibleDynamicEntry : public DynamicEntry
+ {
+ LOADERHANDLE m_hGCStatics;
+ LOADERHANDLE m_hNonGCStatics;
+ };
+ typedef DPTR(CollectibleDynamicEntry) PTR_CollectibleDynamicEntry;
+
+ struct NormalDynamicEntry : public DynamicEntry
+ {
+ PTR_OBJECTREF m_pGCStatics;
+#ifdef FEATURE_64BIT_ALIGNMENT
+ // Padding to make m_pDataBlob aligned at MAX_PRIMITIVE_FIELD_SIZE
+ // code:MethodTableBuilder::PlaceRegularStaticFields assumes that the start of the data blob is aligned
+ SIZE_T m_padding;
+#endif
+ BYTE m_pDataBlob[0];
+
+ inline PTR_BYTE GetGCStaticsBasePointer()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return dac_cast<PTR_BYTE>(m_pGCStatics);
+ }
+ inline PTR_BYTE GetNonGCStaticsBasePointer()
+ {
+ LIMITED_METHOD_CONTRACT
+ SUPPORTS_DAC;
+ return dac_cast<PTR_BYTE>(this);
+ }
+ };
+ typedef DPTR(NormalDynamicEntry) PTR_NormalDynamicEntry;
+
+ struct DynamicClassInfo
+ {
+ VolatilePtr<DynamicEntry, PTR_DynamicEntry> m_pDynamicEntry;
+ Volatile<DWORD> m_dwFlags;
+ };
+ typedef DPTR(DynamicClassInfo) PTR_DynamicClassInfo;
+
+ inline UMEntryThunk * GetADThunkTable()
+ {
+ LIMITED_METHOD_CONTRACT
+ return m_pADThunkTable;
+ }
+
+ inline void SetADThunkTable(UMEntryThunk* pADThunkTable)
+ {
+ LIMITED_METHOD_CONTRACT
+ InterlockedCompareExchangeT(m_pADThunkTable.GetPointer(), pADThunkTable, NULL);
+ }
+
+ // Note the difference between:
+ //
+ // GetPrecomputedNonGCStaticsBasePointer() and
+ // GetPrecomputedStaticsClassData()
+ //
+ // GetPrecomputedNonGCStaticsBasePointer returns the pointer that should be added to field offsets to retrieve statics
+ // GetPrecomputedStaticsClassData returns a pointer to the first byte of the precomputed statics block
+ inline TADDR GetPrecomputedNonGCStaticsBasePointer()
+ {
+ LIMITED_METHOD_CONTRACT
+ return dac_cast<TADDR>(this);
+ }
+
+ inline PTR_BYTE GetPrecomputedStaticsClassData()
+ {
+ LIMITED_METHOD_CONTRACT
+ return dac_cast<PTR_BYTE>(this) + offsetof(DomainLocalModule, m_pDataBlob);
+ }
+
+ static SIZE_T GetOffsetOfDataBlob() { return offsetof(DomainLocalModule, m_pDataBlob); }
+ static SIZE_T GetOffsetOfGCStaticPointer() { return offsetof(DomainLocalModule, m_pGCStatics); }
+
+ inline DomainFile* GetDomainFile()
+ {
+ LIMITED_METHOD_CONTRACT
+ SUPPORTS_DAC;
+ return m_pDomainFile;
+ }
+
+#ifndef DACCESS_COMPILE
+ inline void SetDomainFile(DomainFile* pDomainFile)
+ {
+ LIMITED_METHOD_CONTRACT
+ m_pDomainFile = pDomainFile;
+ }
+#endif
+
+ inline PTR_OBJECTREF GetPrecomputedGCStaticsBasePointer()
+ {
+ LIMITED_METHOD_CONTRACT
+ return m_pGCStatics;
+ }
+
+ inline PTR_OBJECTREF * GetPrecomputedGCStaticsBasePointerAddress()
+ {
+ LIMITED_METHOD_CONTRACT
+ return &m_pGCStatics;
+ }
+
+#ifndef CLR_STANDALONE_BINDER
+ // Returns bytes so we can add offsets
+ inline PTR_BYTE GetGCStaticsBasePointer(MethodTable * pMT)
+ {
+ WRAPPER_NO_CONTRACT
+ SUPPORTS_DAC;
+
+ if (pMT->IsDynamicStatics())
+ {
+ _ASSERTE(GetDomainFile()->GetModule() == pMT->GetModuleForStatics());
+ return GetDynamicEntryGCStaticsBasePointer(pMT->GetModuleDynamicEntryID(), pMT->GetLoaderAllocator());
+ }
+ else
+ {
+ return dac_cast<PTR_BYTE>(m_pGCStatics);
+ }
+ }
+
+ inline PTR_BYTE GetNonGCStaticsBasePointer(MethodTable * pMT)
+ {
+ WRAPPER_NO_CONTRACT
+ SUPPORTS_DAC;
+
+ if (pMT->IsDynamicStatics())
+ {
+ _ASSERTE(GetDomainFile()->GetModule() == pMT->GetModuleForStatics());
+ return GetDynamicEntryNonGCStaticsBasePointer(pMT->GetModuleDynamicEntryID(), pMT->GetLoaderAllocator());
+ }
+ else
+ {
+ return dac_cast<PTR_BYTE>(this);
+ }
+ }
+#endif // !CLR_STANDALONE_BINDER
+
+ inline DynamicClassInfo* GetDynamicClassInfo(DWORD n)
+ {
+ LIMITED_METHOD_CONTRACT
+ SUPPORTS_DAC;
+ _ASSERTE(m_pDynamicClassTable.Load() && m_aDynamicEntries > n);
+ dac_cast<PTR_DynamicEntry>(m_pDynamicClassTable[n].m_pDynamicEntry.Load());
+
+ return &m_pDynamicClassTable[n];
+ }
+
+#ifndef CLR_STANDALONE_BINDER
+ // These helpers can now return null, as the debugger may do queries on a type
+ // before the calls to PopulateClass happen
+ inline PTR_BYTE GetDynamicEntryGCStaticsBasePointer(DWORD n, PTR_LoaderAllocator pLoaderAllocator)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+
+ if (n >= m_aDynamicEntries)
+ {
+ return NULL;
+ }
+
+ DynamicClassInfo* pClassInfo = GetDynamicClassInfo(n);
+ if (!pClassInfo->m_pDynamicEntry)
+ {
+ return NULL;
+ }
+
+ PTR_BYTE retval = NULL;
+
+ GET_DYNAMICENTRY_GCSTATICS_BASEPOINTER(pLoaderAllocator, pClassInfo, &retval);
+
+ return retval;
+ }
+
+ inline PTR_BYTE GetDynamicEntryNonGCStaticsBasePointer(DWORD n, PTR_LoaderAllocator pLoaderAllocator)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+
+ if (n >= m_aDynamicEntries)
+ {
+ return NULL;
+ }
+
+ DynamicClassInfo* pClassInfo = GetDynamicClassInfo(n);
+ if (!pClassInfo->m_pDynamicEntry)
+ {
+ return NULL;
+ }
+
+ PTR_BYTE retval = NULL;
+
+ GET_DYNAMICENTRY_NONGCSTATICS_BASEPOINTER(pLoaderAllocator, pClassInfo, &retval);
+
+ return retval;
+ }
+#endif // CLR_STANDALONE_BINDER
+
+ FORCEINLINE PTR_DynamicClassInfo GetDynamicClassInfoIfInitialized(DWORD n)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // m_aDynamicEntries is set last, it needs to be checked first
+ if (n >= m_aDynamicEntries)
+ {
+ return NULL;
+ }
+
+ _ASSERTE(m_pDynamicClassTable.Load() != NULL);
+ PTR_DynamicClassInfo pDynamicClassInfo = (PTR_DynamicClassInfo)(m_pDynamicClassTable.Load() + n);
+
+ // INITIALIZED_FLAG is set last, it needs to be checked first
+ if ((pDynamicClassInfo->m_dwFlags & ClassInitFlags::INITIALIZED_FLAG) == 0)
+ {
+ return NULL;
+ }
+
+ PREFIX_ASSUME(pDynamicClassInfo != NULL);
+ return pDynamicClassInfo;
+ }
+
+ // iClassIndex is slightly expensive to compute, so if we already know
+ // it, we can use this helper
+ inline BOOL IsClassInitialized(MethodTable* pMT, DWORD iClassIndex = (DWORD)-1)
+ {
+ WRAPPER_NO_CONTRACT;
+ return (GetClassFlags(pMT, iClassIndex) & ClassInitFlags::INITIALIZED_FLAG) != 0;
+ }
+
+ inline BOOL IsPrecomputedClassInitialized(DWORD classID)
+ {
+ return GetPrecomputedStaticsClassData()[classID] & ClassInitFlags::INITIALIZED_FLAG;
+ }
+
+ inline BOOL IsClassAllocated(MethodTable* pMT, DWORD iClassIndex = (DWORD)-1)
+ {
+ WRAPPER_NO_CONTRACT;
+ return (GetClassFlags(pMT, iClassIndex) & ClassInitFlags::ALLOCATECLASS_FLAG) != 0;
+ }
+
+ BOOL IsClassInitError(MethodTable* pMT, DWORD iClassIndex = (DWORD)-1)
+ {
+ WRAPPER_NO_CONTRACT;
+ return (GetClassFlags(pMT, iClassIndex) & ClassInitFlags::ERROR_FLAG) != 0;
+ }
+
+ void SetClassInitialized(MethodTable* pMT);
+ void SetClassInitError(MethodTable* pMT);
+
+ void EnsureDynamicClassIndex(DWORD dwID);
+
+ void AllocateDynamicClass(MethodTable *pMT);
+
+ void PopulateClass(MethodTable *pMT);
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+ static DWORD OffsetOfDataBlob()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return offsetof(DomainLocalModule, m_pDataBlob);
+ }
+
+#ifndef CLR_STANDALONE_BINDER
+ FORCEINLINE MethodTable * GetMethodTableFromClassDomainID(DWORD dwClassDomainID)
+ {
+ DWORD rid = (DWORD)(dwClassDomainID) + 1;
+ TypeHandle th = GetDomainFile()->GetModule()->LookupTypeDef(TokenFromRid(rid, mdtTypeDef));
+ _ASSERTE(!th.IsNull());
+ MethodTable * pMT = th.AsMethodTable();
+ PREFIX_ASSUME(pMT != NULL);
+ return pMT;
+ }
+#endif // CLR_STANDALONE_BINDER
+
+private:
+ friend void EmitFastGetSharedStaticBase(CPUSTUBLINKER *psl, CodeLabel *init, bool bCCtorCheck);
+
+ void SetClassFlags(MethodTable* pMT, DWORD dwFlags);
+ DWORD GetClassFlags(MethodTable* pMT, DWORD iClassIndex);
+
+ PTR_DomainFile m_pDomainFile;
+ VolatilePtr<DynamicClassInfo, PTR_DynamicClassInfo> m_pDynamicClassTable; // used for generics and reflection.emit in memory
+ Volatile<SIZE_T> m_aDynamicEntries; // number of entries in dynamic table
+ VolatilePtr<UMEntryThunk> m_pADThunkTable;
+ PTR_OBJECTREF m_pGCStatics; // Handle to GC statics of the module
+
+ // In addition to storing the ModuleIndex in the Module class, we also
+ // keep a copy of the ModuleIndex in the DomainLocalModule class. This
+ // allows the thread static JIT helpers to quickly convert a pointer to
+ // a DomainLocalModule into a ModuleIndex.
+ ModuleIndex m_ModuleIndex;
+
+ // Note that the static offset calculation in code:Module::BuildStaticsOffsets takes the offset m_pDataBlob
+ // into consideration for alignment so we do not need any padding to ensure that the start of the data blob is aligned
+
+ BYTE m_pDataBlob[0]; // First byte of the statics blob
+
+ // Layout of m_pDataBlob is:
+ // ClassInit bytes (hold flags for cctor run, cctor error, etc)
+ // Non GC Statics
+
+public:
+
+ // The Module class need to be able to initialized ModuleIndex,
+ // so for now I will make it a friend..
+ friend class Module;
+
+ FORCEINLINE ModuleIndex GetModuleIndex()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_ModuleIndex;
+ }
+
+}; // struct DomainLocalModule
+
+
+#ifndef CLR_STANDALONE_BINDER
+
+typedef DPTR(class DomainLocalBlock) PTR_DomainLocalBlock;
+class DomainLocalBlock
+{
+ friend class ClrDataAccess;
+ friend class CheckAsmOffsets;
+
+private:
+ PTR_AppDomain m_pDomain;
+ DPTR(PTR_DomainLocalModule) m_pModuleSlots;
+ SIZE_T m_aModuleIndices; // Module entries the shared block has allocated
+
+public: // used by code generators
+ static SIZE_T GetOffsetOfModuleSlotsPointer() { return offsetof(DomainLocalBlock, m_pModuleSlots);}
+
+public:
+
+#ifndef DACCESS_COMPILE
+ DomainLocalBlock()
+ : m_pDomain(NULL), m_pModuleSlots(NULL), m_aModuleIndices(0) {}
+
+ void EnsureModuleIndex(ModuleIndex index);
+
+ void Init(AppDomain *pDomain) { LIMITED_METHOD_CONTRACT; m_pDomain = pDomain; }
+#endif
+
+ void SetModuleSlot(ModuleIndex index, PTR_DomainLocalModule pLocalModule);
+
+ FORCEINLINE PTR_DomainLocalModule GetModuleSlot(ModuleIndex index)
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ _ASSERTE(index.m_dwIndex < m_aModuleIndices);
+ return m_pModuleSlots[index.m_dwIndex];
+ }
+
+ inline PTR_DomainLocalModule GetModuleSlot(MethodTable* pMT)
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetModuleSlot(pMT->GetModuleForStatics()->GetModuleIndex());
+ }
+
+ DomainFile* TryGetDomainFile(ModuleIndex index)
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ // the publishing of m_aModuleIndices and m_pModuleSlots is dependent
+ // on the order of accesses; we must ensure that we read from m_aModuleIndices
+ // before m_pModuleSlots.
+ if (index.m_dwIndex < m_aModuleIndices)
+ {
+ MemoryBarrier();
+ if (m_pModuleSlots[index.m_dwIndex])
+ {
+ return m_pModuleSlots[index.m_dwIndex]->GetDomainFile();
+ }
+ }
+
+ return NULL;
+ }
+
+ DomainFile* GetDomainFile(SIZE_T ModuleID)
+ {
+ WRAPPER_NO_CONTRACT;
+ ModuleIndex index = Module::IDToIndex(ModuleID);
+ _ASSERTE(index.m_dwIndex < m_aModuleIndices);
+ return m_pModuleSlots[index.m_dwIndex]->GetDomainFile();
+ }
+
+#ifndef DACCESS_COMPILE
+ void SetDomainFile(ModuleIndex index, DomainFile* pDomainFile)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(index.m_dwIndex < m_aModuleIndices);
+ m_pModuleSlots[index.m_dwIndex]->SetDomainFile(pDomainFile);
+ }
+#endif
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+
+private:
+
+ //
+ // Low level routines to get & set class entries
+ //
+
+};
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+
+// The large heap handle bucket class is used to contain handles allocated
+// from an array contained in the large heap.
+class LargeHeapHandleBucket
+{
+public:
+ // Constructor and desctructor.
+ LargeHeapHandleBucket(LargeHeapHandleBucket *pNext, DWORD Size, BaseDomain *pDomain, BOOL bCrossAD = FALSE);
+ ~LargeHeapHandleBucket();
+
+ // This returns the next bucket.
+ LargeHeapHandleBucket *GetNext()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pNext;
+ }
+
+ // This returns the number of remaining handle slots.
+ DWORD GetNumRemainingHandles()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_ArraySize - m_CurrentPos;
+ }
+
+ void ConsumeRemaining()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_CurrentPos = m_ArraySize;
+ }
+
+ OBJECTREF *TryAllocateEmbeddedFreeHandle();
+
+ // Allocate handles from the bucket.
+ OBJECTREF* AllocateHandles(DWORD nRequested);
+ OBJECTREF* CurrentPos()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pArrayDataPtr + m_CurrentPos;
+ }
+
+private:
+ LargeHeapHandleBucket *m_pNext;
+ int m_ArraySize;
+ int m_CurrentPos;
+ int m_CurrentEmbeddedFreePos;
+ OBJECTHANDLE m_hndHandleArray;
+ OBJECTREF *m_pArrayDataPtr;
+};
+
+
+
+// The large heap handle table is used to allocate handles that are pointers
+// to objects stored in an array in the large object heap.
+class LargeHeapHandleTable
+{
+public:
+ // Constructor and desctructor.
+ LargeHeapHandleTable(BaseDomain *pDomain, DWORD InitialBucketSize);
+ ~LargeHeapHandleTable();
+
+ // Allocate handles from the large heap handle table.
+ OBJECTREF* AllocateHandles(DWORD nRequested, BOOL bCrossAD = FALSE);
+
+ // Release object handles allocated using AllocateHandles().
+ void ReleaseHandles(OBJECTREF *pObjRef, DWORD nReleased);
+
+private:
+ // The buckets of object handles.
+ LargeHeapHandleBucket *m_pHead;
+
+ // We need to know the containing domain so we know where to allocate handles
+ BaseDomain *m_pDomain;
+
+ // The size of the LargeHeapHandleBuckets.
+ DWORD m_NextBucketSize;
+
+ // for finding and re-using embedded free items in the list
+ LargeHeapHandleBucket *m_pFreeSearchHint;
+ DWORD m_cEmbeddedFree;
+
+#ifdef _DEBUG
+
+ // these functions are present to enforce that there is a locking mechanism in place
+ // for each LargeHeapHandleTable even though the code itself does not do the locking
+ // you must tell the table which lock you intend to use and it will verify that it has
+ // in fact been taken before performing any operations
+
+public:
+ void RegisterCrstDebug(CrstBase *pCrst)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // this function must be called exactly once
+ _ASSERTE(pCrst != NULL);
+ _ASSERTE(m_pCrstDebug == NULL);
+ m_pCrstDebug = pCrst;
+ }
+
+private:
+ // we will assert that this Crst is held before using the object
+ CrstBase *m_pCrstDebug;
+
+#endif
+
+};
+
+class LargeHeapHandleBlockHolder;
+void LargeHeapHandleBlockHolder__StaticFree(LargeHeapHandleBlockHolder*);
+
+
+class LargeHeapHandleBlockHolder:public Holder<LargeHeapHandleBlockHolder*,DoNothing,LargeHeapHandleBlockHolder__StaticFree>
+
+{
+ LargeHeapHandleTable* m_pTable;
+ DWORD m_Count;
+ OBJECTREF* m_Data;
+public:
+ FORCEINLINE LargeHeapHandleBlockHolder(LargeHeapHandleTable* pOwner, DWORD nCount)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_Data = pOwner->AllocateHandles(nCount);
+ m_Count=nCount;
+ m_pTable=pOwner;
+ };
+
+ FORCEINLINE void FreeData()
+ {
+ WRAPPER_NO_CONTRACT;
+ for (DWORD i=0;i< m_Count;i++)
+ ClearObjectReference(m_Data+i);
+ m_pTable->ReleaseHandles(m_Data, m_Count);
+ };
+ FORCEINLINE OBJECTREF* operator[] (DWORD idx)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(idx<m_Count);
+ return &(m_Data[idx]);
+ }
+};
+
+FORCEINLINE void LargeHeapHandleBlockHolder__StaticFree(LargeHeapHandleBlockHolder* pHolder)
+{
+ WRAPPER_NO_CONTRACT;
+ pHolder->FreeData();
+};
+
+
+
+
+
+// The large heap handle bucket class is used to contain handles allocated
+// from an array contained in the large heap.
+class ThreadStaticHandleBucket
+{
+public:
+ // Constructor and desctructor.
+ ThreadStaticHandleBucket(ThreadStaticHandleBucket *pNext, DWORD Size, BaseDomain *pDomain);
+ ~ThreadStaticHandleBucket();
+
+ // This returns the next bucket.
+ ThreadStaticHandleBucket *GetNext()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pNext;
+ }
+
+ // Allocate handles from the bucket.
+ OBJECTHANDLE GetHandles();
+
+private:
+ ThreadStaticHandleBucket *m_pNext;
+ int m_ArraySize;
+ OBJECTHANDLE m_hndHandleArray;
+};
+
+
+// The large heap handle table is used to allocate handles that are pointers
+// to objects stored in an array in the large object heap.
+class ThreadStaticHandleTable
+{
+public:
+ // Constructor and desctructor.
+ ThreadStaticHandleTable(BaseDomain *pDomain);
+ ~ThreadStaticHandleTable();
+
+ // Allocate handles from the large heap handle table.
+ OBJECTHANDLE AllocateHandles(DWORD nRequested);
+
+private:
+ // The buckets of object handles.
+ ThreadStaticHandleBucket *m_pHead;
+
+ // We need to know the containing domain so we know where to allocate handles
+ BaseDomain *m_pDomain;
+};
+
+
+
+
+//--------------------------------------------------------------------------------------
+// Base class for domains. It provides an abstract way of finding the first assembly and
+// for creating assemblies in the the domain. The system domain only has one assembly, it
+// contains the classes that are logically shared between domains. All other domains can
+// have multiple assemblies. Iteration is done be getting the first assembly and then
+// calling the Next() method on the assembly.
+//
+// The system domain should be as small as possible, it includes object, exceptions, etc.
+// which are the basic classes required to load other assemblies. All other classes
+// should be loaded into the domain. Of coarse there is a trade off between loading the
+// same classes multiple times, requiring all domains to load certain assemblies (working
+// set) and being able to specify specific versions.
+//
+
+#define LOW_FREQUENCY_HEAP_RESERVE_SIZE (3 * PAGE_SIZE)
+#define LOW_FREQUENCY_HEAP_COMMIT_SIZE (1 * PAGE_SIZE)
+
+#define HIGH_FREQUENCY_HEAP_RESERVE_SIZE (10 * PAGE_SIZE)
+#define HIGH_FREQUENCY_HEAP_COMMIT_SIZE (1 * PAGE_SIZE)
+
+#define STUB_HEAP_RESERVE_SIZE (3 * PAGE_SIZE)
+#define STUB_HEAP_COMMIT_SIZE (1 * PAGE_SIZE)
+
+// --------------------------------------------------------------------------------
+// PE File List lock - for creating list locks on PE files
+// --------------------------------------------------------------------------------
+
+class PEFileListLock : public ListLock
+{
+public:
+#ifndef DACCESS_COMPILE
+ ListLockEntry *FindFileLock(PEFile *pFile)
+ {
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ PRECONDITION(HasLock());
+
+ ListLockEntry *pEntry;
+
+ for (pEntry = m_pHead;
+ pEntry != NULL;
+ pEntry = pEntry->m_pNext)
+ {
+ if (((PEFile *)pEntry->m_pData)->Equals(pFile))
+ {
+ return pEntry;
+ }
+ }
+
+ return NULL;
+ }
+#endif // DACCESS_COMPILE
+
+ DEBUG_NOINLINE static void HolderEnter(PEFileListLock *pThis) PUB
+ {
+ WRAPPER_NO_CONTRACT;
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+
+ pThis->Enter();
+ }
+
+ DEBUG_NOINLINE static void HolderLeave(PEFileListLock *pThis) PUB
+ {
+ WRAPPER_NO_CONTRACT;
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+
+ pThis->Leave();
+ }
+
+ typedef Wrapper<PEFileListLock*, PEFileListLock::HolderEnter, PEFileListLock::HolderLeave> Holder;
+};
+
+typedef PEFileListLock::Holder PEFileListLockHolder;
+
+// Loading infrastructure:
+//
+// a DomainFile is a file being loaded. Files are loaded in layers to enable loading in the
+// presence of dependency loops.
+//
+// FileLoadLevel describes the various levels available. These are implemented slightly
+// differently for assemblies and modules, but the basic structure is the same.
+//
+// LoadLock and FileLoadLock form the ListLock data structures for files. The FileLoadLock
+// is specialized in that it allows taking a lock at a particular level. Basicall any
+// thread may obtain the lock at a level at which the file has previously been loaded to, but
+// only one thread may obtain the lock at its current level.
+//
+// The PendingLoadQueue is a per thread data structure which serves two purposes. First, it
+// holds a "load limit" which automatically restricts the level of recursive loads to be
+// one less than the current load which is preceding. This, together with the AppDomain
+// LoadLock level behavior, will prevent any deadlocks from occuring due to circular
+// dependencies. (Note that it is important that the loading logic understands this restriction,
+// and any given level of loading must deal with the fact that any recursive loads will be partially
+// unfulfilled in a specific way.)
+//
+// The second function is to queue up any unfulfilled load requests for the thread. These
+// are then delivered immediately after the current load request is dealt with.
+
+class FileLoadLock : public ListLockEntry
+{
+private:
+ FileLoadLevel m_level;
+ DomainFile *m_pDomainFile;
+ HRESULT m_cachedHR;
+ ADID m_AppDomainId;
+
+public:
+ static FileLoadLock *Create(PEFileListLock *pLock, PEFile *pFile, DomainFile *pDomainFile);
+
+ ~FileLoadLock();
+ DomainFile *GetDomainFile();
+ ADID GetAppDomainId();
+ FileLoadLevel GetLoadLevel();
+
+ // CanAcquire will return FALSE if Acquire will definitely not take the lock due
+ // to levels or deadlock.
+ // (Note that there is a race exiting from the function, where Acquire may end
+ // up not taking the lock anyway if another thread did work in the meantime.)
+ BOOL CanAcquire(FileLoadLevel targetLevel);
+
+ // Acquire will return FALSE and not take the lock if the file
+ // has already been loaded to the target level. Otherwise,
+ // it will return TRUE and take the lock.
+ //
+ // Note that the taker must release the lock via IncrementLoadLevel.
+ BOOL Acquire(FileLoadLevel targetLevel);
+
+ // CompleteLoadLevel can be called after Acquire returns true
+ // returns TRUE if it updated load level, FALSE if the level was set already
+ BOOL CompleteLoadLevel(FileLoadLevel level, BOOL success);
+
+ void SetError(Exception *ex);
+
+ void AddRef();
+ UINT32 Release() DAC_EMPTY_RET(0);
+
+private:
+
+ FileLoadLock(PEFileListLock *pLock, PEFile *pFile, DomainFile *pDomainFile);
+
+ static void HolderLeave(FileLoadLock *pThis);
+
+public:
+ typedef Wrapper<FileLoadLock *, DoNothing, FileLoadLock::HolderLeave> Holder;
+
+};
+
+typedef FileLoadLock::Holder FileLoadLockHolder;
+
+#ifndef DACCESS_COMPILE
+ typedef ReleaseHolder<FileLoadLock> FileLoadLockRefHolder;
+#endif // DACCESS_COMPILE
+
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning (disable: 4324) //sometimes 64bit compilers complain about alignment
+#endif
+class LoadLevelLimiter
+{
+ FileLoadLevel m_currentLevel;
+ LoadLevelLimiter* m_previousLimit;
+ BOOL m_bActive;
+
+public:
+
+ LoadLevelLimiter()
+ : m_currentLevel(FILE_ACTIVE),
+ m_previousLimit(NULL),
+ m_bActive(FALSE)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ void Activate()
+ {
+ WRAPPER_NO_CONTRACT;
+ m_previousLimit=GetThread()->GetLoadLevelLimiter();
+ if(m_previousLimit)
+ m_currentLevel=m_previousLimit->GetLoadLevel();
+ GetThread()->SetLoadLevelLimiter(this);
+ m_bActive=TRUE;
+ }
+
+ void Deactivate()
+ {
+ WRAPPER_NO_CONTRACT;
+ if (m_bActive)
+ {
+ GetThread()->SetLoadLevelLimiter(m_previousLimit);
+ m_bActive=FALSE;
+ }
+ }
+
+ ~LoadLevelLimiter()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // PendingLoadQueues are allocated on the stack during a load, and
+ // shared with all nested loads on the same thread.
+
+ // Make sure the thread pointer gets reset after the
+ // top level queue goes out of scope.
+ if(m_bActive)
+ {
+ Deactivate();
+ }
+ }
+
+ FileLoadLevel GetLoadLevel()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_currentLevel;
+ }
+
+ void SetLoadLevel(FileLoadLevel level)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_currentLevel = level;
+ }
+};
+#ifdef _MSC_VER
+#pragma warning (pop) //4324
+#endif
+
+#define OVERRIDE_LOAD_LEVEL_LIMIT(newLimit) \
+ LoadLevelLimiter __newLimit; \
+ __newLimit.Activate(); \
+ __newLimit.SetLoadLevel(newLimit);
+
+// A BaseDomain much basic information in a code:AppDomain including
+//
+// * code:#AppdomainHeaps - Heaps for any data structures that will be freed on appdomain unload
+//
+class BaseDomain
+{
+ friend class Assembly;
+ friend class AssemblySpec;
+ friend class AppDomain;
+ friend class AppDomainNative;
+
+ VPTR_BASE_VTABLE_CLASS(BaseDomain)
+ VPTR_UNIQUE(VPTR_UNIQUE_BaseDomain)
+
+protected:
+ // These 2 variables are only used on the AppDomain, but by placing them here
+ // we reduce the cost of keeping the asmconstants file up to date.
+
+ // The creation sequence number of this app domain (starting from 1)
+ // This ID is generated by the code:SystemDomain::GetNewAppDomainId routine
+ // The ID are recycled.
+ //
+ // see also code:ADID
+ ADID m_dwId;
+
+ DomainLocalBlock m_sDomainLocalBlock;
+
+public:
+
+ class AssemblyIterator;
+ friend class AssemblyIterator;
+
+ // Static initialization.
+ static void Attach();
+
+ //****************************************************************************************
+ //
+ // Initialization/shutdown routines for every instance of an BaseDomain.
+
+ BaseDomain();
+ void Init();
+ void Stop();
+ void Terminate();
+
+ // ID to uniquely identify this AppDomain - used by the AppDomain publishing
+ // service (to publish the list of all appdomains present in the process),
+ // which in turn is used by, for eg., the debugger (to decide which App-
+ // Domain(s) to attach to).
+ // This is also used by Remoting for routing cross-appDomain calls.
+ ADID GetId (void)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return m_dwId;
+ }
+
+ virtual BOOL IsAppDomain() { LIMITED_METHOD_DAC_CONTRACT; return FALSE; }
+ virtual BOOL IsSharedDomain() { LIMITED_METHOD_DAC_CONTRACT; return FALSE; }
+
+ inline BOOL IsDefaultDomain(); // defined later in this file
+ virtual PTR_LoaderAllocator GetLoaderAllocator() = 0;
+ virtual PTR_AppDomain AsAppDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ _ASSERTE(!"Not an AppDomain");
+ return NULL;
+ }
+
+
+ // If one domain is the SharedDomain and one is an AppDomain then
+ // return the AppDomain, i.e. return the domain with the shorter lifetime
+ // of the two given domains.
+ static PTR_BaseDomain ComputeBaseDomain(
+ BaseDomain *pGenericDefinitionDomain, // the domain that owns the generic type or method
+ Instantiation classInst, // the type arguments to the type (if any)
+ Instantiation methodInst = Instantiation()); // the type arguments to the method (if any)
+
+ static PTR_BaseDomain ComputeBaseDomain(TypeKey * pTypeKey);
+
+#ifdef FEATURE_COMINTEROP
+ //****************************************************************************************
+ //
+ // This will look up interop data for a method table
+ //
+
+#ifndef DACCESS_COMPILE
+ // Returns the data pointer if present, NULL otherwise
+ InteropMethodTableData *LookupComInteropData(MethodTable *pMT)
+ {
+ // Take the lock
+ CrstHolder holder(&m_InteropDataCrst);
+
+ // Lookup
+ InteropMethodTableData *pData = (InteropMethodTableData*) m_interopDataHash.LookupValue((UPTR) pMT, (LPVOID) NULL);
+
+ // Not there...
+ if (pData == (InteropMethodTableData*) INVALIDENTRY)
+ return NULL;
+
+ // Found it
+ return pData;
+ }
+
+ // Returns TRUE if successfully inserted, FALSE if this would be a duplicate entry
+ BOOL InsertComInteropData(MethodTable* pMT, InteropMethodTableData *pData)
+ {
+ // We don't keep track of this kind of information for interfaces
+ _ASSERTE(!pMT->IsInterface());
+
+ // Take the lock
+ CrstHolder holder(&m_InteropDataCrst);
+
+ // Check to see that it's not already in there
+ InteropMethodTableData *pDupData = (InteropMethodTableData*) m_interopDataHash.LookupValue((UPTR) pMT, (LPVOID) NULL);
+ if (pDupData != (InteropMethodTableData*) INVALIDENTRY)
+ return FALSE;
+
+ // Not in there, so insert
+ m_interopDataHash.InsertValue((UPTR) pMT, (LPVOID) pData);
+
+ // Success
+ return TRUE;
+ }
+#endif // DACCESS_COMPILE
+#endif // FEATURE_COMINTEROP
+
+ void SetDisableInterfaceCache()
+ {
+ m_fDisableInterfaceCache = TRUE;
+ }
+ BOOL GetDisableInterfaceCache()
+ {
+ return m_fDisableInterfaceCache;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ MngStdInterfacesInfo * GetMngStdInterfacesInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pMngStdInterfacesInfo;
+ }
+
+ PTR_CLRPrivBinderWinRT GetWinRtBinder()
+ {
+ return m_pWinRtBinder;
+ }
+#endif // FEATURE_COMINTEROP
+
+ //****************************************************************************************
+ // This method returns marshaling data that the EE uses that is stored on a per app domain
+ // basis.
+ EEMarshalingData *GetMarshalingData();
+
+ // Deletes marshaling data at shutdown (which contains cached factories that needs to be released)
+ void DeleteMarshalingData();
+
+#ifdef _DEBUG
+ BOOL OwnDomainLocalBlockLock()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return m_DomainLocalBlockCrst.OwnedByCurrentThread();
+ }
+#endif
+
+ //****************************************************************************************
+ //
+
+ virtual IApplicationSecurityDescriptor* GetSecurityDescriptor() { LIMITED_METHOD_CONTRACT; return NULL; }
+
+
+ //****************************************************************************************
+ // Get the class init lock. The method is limited to friends because inappropriate use
+ // will cause deadlocks in the system
+ ListLock* GetClassInitLock()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return &m_ClassInitLock;
+ }
+
+ ListLock* GetJitLock()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return &m_JITLock;
+ }
+
+ ListLock* GetILStubGenLock()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return &m_ILStubGenLock;
+ }
+
+ STRINGREF *IsStringInterned(STRINGREF *pString);
+ STRINGREF *GetOrInternString(STRINGREF *pString);
+
+ virtual BOOL CanUnload() { LIMITED_METHOD_CONTRACT; return FALSE; } // can never unload BaseDomain
+
+ // Returns an array of OBJECTREF* that can be used to store domain specific data.
+ // Statics and reflection info (Types, MemberInfo,..) are stored this way
+ // If ppLazyAllocate != 0, allocation will only take place if *ppLazyAllocate != 0 (and the allocation
+ // will be properly serialized)
+ OBJECTREF *AllocateObjRefPtrsInLargeTable(int nRequested, OBJECTREF** ppLazyAllocate = NULL, BOOL bCrossAD = FALSE);
+
+#ifdef FEATURE_PREJIT
+ // Ensures that the file for logging profile data is open (we only open it once)
+ // return false on failure
+ static BOOL EnsureNGenLogFileOpen();
+#endif
+
+ //****************************************************************************************
+ // Handles
+
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE) // needs GetCurrentThreadHomeHeapNumber
+ OBJECTHANDLE CreateTypedHandle(OBJECTREF object, int type)
+ {
+ WRAPPER_NO_CONTRACT;
+ return ::CreateTypedHandle(m_hHandleTableBucket->pTable[GetCurrentThreadHomeHeapNumber()], object, type);
+ }
+
+ OBJECTHANDLE CreateHandle(OBJECTREF object)
+ {
+ WRAPPER_NO_CONTRACT;
+ CONDITIONAL_CONTRACT_VIOLATION(ModeViolation, object == NULL)
+ return ::CreateHandle(m_hHandleTableBucket->pTable[GetCurrentThreadHomeHeapNumber()], object);
+ }
+
+ OBJECTHANDLE CreateWeakHandle(OBJECTREF object)
+ {
+ WRAPPER_NO_CONTRACT;
+ return ::CreateWeakHandle(m_hHandleTableBucket->pTable[GetCurrentThreadHomeHeapNumber()], object);
+ }
+
+ OBJECTHANDLE CreateShortWeakHandle(OBJECTREF object)
+ {
+ WRAPPER_NO_CONTRACT;
+ return ::CreateShortWeakHandle(m_hHandleTableBucket->pTable[GetCurrentThreadHomeHeapNumber()], object);
+ }
+
+ OBJECTHANDLE CreateLongWeakHandle(OBJECTREF object)
+ {
+ WRAPPER_NO_CONTRACT;
+ CONDITIONAL_CONTRACT_VIOLATION(ModeViolation, object == NULL)
+ return ::CreateLongWeakHandle(m_hHandleTableBucket->pTable[GetCurrentThreadHomeHeapNumber()], object);
+ }
+
+ OBJECTHANDLE CreateStrongHandle(OBJECTREF object)
+ {
+ WRAPPER_NO_CONTRACT;
+ return ::CreateStrongHandle(m_hHandleTableBucket->pTable[GetCurrentThreadHomeHeapNumber()], object);
+ }
+
+ OBJECTHANDLE CreatePinningHandle(OBJECTREF object)
+ {
+ WRAPPER_NO_CONTRACT;
+#if CHECK_APP_DOMAIN_LEAKS
+ if(IsAppDomain())
+ object->TryAssignAppDomain((AppDomain*)this,TRUE);
+#endif
+ return ::CreatePinningHandle(m_hHandleTableBucket->pTable[GetCurrentThreadHomeHeapNumber()], object);
+ }
+
+ OBJECTHANDLE CreateSizedRefHandle(OBJECTREF object)
+ {
+ WRAPPER_NO_CONTRACT;
+ OBJECTHANDLE h = ::CreateSizedRefHandle(
+ m_hHandleTableBucket->pTable[GCHeap::IsServerHeap() ? (m_dwSizedRefHandles % m_iNumberOfProcessors) : GetCurrentThreadHomeHeapNumber()],
+ object);
+ InterlockedIncrement((LONG*)&m_dwSizedRefHandles);
+ return h;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ OBJECTHANDLE CreateRefcountedHandle(OBJECTREF object)
+ {
+ WRAPPER_NO_CONTRACT;
+ return ::CreateRefcountedHandle(m_hHandleTableBucket->pTable[GetCurrentThreadHomeHeapNumber()], object);
+ }
+
+ OBJECTHANDLE CreateWinRTWeakHandle(OBJECTREF object, IWeakReference* pWinRTWeakReference)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ return ::CreateWinRTWeakHandle(m_hHandleTableBucket->pTable[GetCurrentThreadHomeHeapNumber()], object, pWinRTWeakReference);
+ }
+#endif // FEATURE_COMINTEROP
+
+ OBJECTHANDLE CreateVariableHandle(OBJECTREF object, UINT type)
+ {
+ WRAPPER_NO_CONTRACT;
+ return ::CreateVariableHandle(m_hHandleTableBucket->pTable[GetCurrentThreadHomeHeapNumber()], object, type);
+ }
+
+ OBJECTHANDLE CreateDependentHandle(OBJECTREF primary, OBJECTREF secondary)
+ {
+ WRAPPER_NO_CONTRACT;
+ return ::CreateDependentHandle(m_hHandleTableBucket->pTable[GetCurrentThreadHomeHeapNumber()], primary, secondary);
+ }
+#endif // DACCESS_COMPILE && !CROSSGEN_COMPILE
+
+ BOOL ContainsOBJECTHANDLE(OBJECTHANDLE handle);
+
+#ifdef FEATURE_FUSION
+ IApplicationContext *GetFusionContext() {LIMITED_METHOD_CONTRACT; return m_pFusionContext; }
+#else
+ IUnknown *GetFusionContext() {LIMITED_METHOD_CONTRACT; return m_pFusionContext; }
+
+#if defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+ CLRPrivBinderCoreCLR *GetTPABinderContext() {LIMITED_METHOD_CONTRACT; return m_pTPABinderContext; }
+#endif // defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+
+#endif
+
+ CrstExplicitInit * GetLoaderAllocatorReferencesLock()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return &m_crstLoaderAllocatorReferences;
+ }
+
+protected:
+
+ //****************************************************************************************
+ // Helper method to initialize the large heap handle table.
+ void InitLargeHeapHandleTable();
+
+ //****************************************************************************************
+ // Adds an assembly to the domain.
+ void AddAssemblyNoLock(Assembly* assem);
+
+ //****************************************************************************************
+ //
+ // Hash table that maps a MethodTable to COM Interop compatibility data.
+ PtrHashMap m_interopDataHash;
+
+ // Critical sections & locks
+ PEFileListLock m_FileLoadLock; // Protects the list of assemblies in the domain
+ CrstExplicitInit m_DomainCrst; // General Protection for the Domain
+ CrstExplicitInit m_DomainCacheCrst; // Protects the Assembly and Unmanaged caches
+ CrstExplicitInit m_DomainLocalBlockCrst;
+ CrstExplicitInit m_InteropDataCrst; // Used for COM Interop compatiblilty
+ // Used to protect the reference lists in the collectible loader allocators attached to this appdomain
+ CrstExplicitInit m_crstLoaderAllocatorReferences;
+ CrstExplicitInit m_WinRTFactoryCacheCrst; // For WinRT factory cache
+
+ //#AssemblyListLock
+ // Used to protect the assembly list. Taken also by GC or debugger thread, therefore we have to avoid
+ // triggering GC while holding this lock (by switching the thread to GC_NOTRIGGER while it is held).
+ CrstExplicitInit m_crstAssemblyList;
+ BOOL m_fDisableInterfaceCache; // RCW COM interface cache
+ ListLock m_ClassInitLock;
+ ListLock m_JITLock;
+ ListLock m_ILStubGenLock;
+
+ // Fusion context, used for adding assemblies to the is domain. It defines
+ // fusion properties for finding assemblyies such as SharedBinPath,
+ // PrivateBinPath, Application Directory, etc.
+#ifdef FEATURE_FUSION
+ IApplicationContext* m_pFusionContext; // Binding context for the domain
+#else
+ IUnknown *m_pFusionContext; // Current binding context for the domain
+
+#if defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+ CLRPrivBinderCoreCLR *m_pTPABinderContext; // Reference to the binding context that holds TPA list details
+#endif // defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+
+#endif
+
+ HandleTableBucket *m_hHandleTableBucket;
+
+ // The large heap handle table.
+ LargeHeapHandleTable *m_pLargeHeapHandleTable;
+
+ // The large heap handle table critical section.
+ CrstExplicitInit m_LargeHeapHandleTableCrst;
+
+ EEMarshalingData *m_pMarshalingData;
+
+#ifdef FEATURE_COMINTEROP
+ // Information regarding the managed standard interfaces.
+ MngStdInterfacesInfo *m_pMngStdInterfacesInfo;
+
+ // WinRT binder (only in classic = non-AppX; AppX has the WinRT binder inside code:CLRPrivBinderAppX)
+ PTR_CLRPrivBinderWinRT m_pWinRtBinder;
+#endif // FEATURE_COMINTEROP
+
+ // Number of allocated slots for context local statics of this domain
+ DWORD m_dwContextStatics;
+
+ // Protects allocation of slot IDs for thread and context statics
+ static CrstStatic m_SpecialStaticsCrst;
+
+public:
+ // Lazily allocate offset for context static
+ DWORD AllocateContextStaticsOffset(DWORD* pOffsetSlot);
+
+public:
+ // Only call this routine when you can guarantee there are no
+ // loads in progress.
+ void ClearFusionContext();
+
+public:
+
+ //****************************************************************************************
+ // Synchronization holders.
+
+ class LockHolder : public CrstHolder
+ {
+ public:
+ LockHolder(BaseDomain *pD)
+ : CrstHolder(&pD->m_DomainCrst)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+ };
+ friend class LockHolder;
+
+ class CacheLockHolder : public CrstHolder
+ {
+ public:
+ CacheLockHolder(BaseDomain *pD)
+ : CrstHolder(&pD->m_DomainCacheCrst)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+ };
+ friend class CacheLockHolder;
+
+ class DomainLocalBlockLockHolder : public CrstHolder
+ {
+ public:
+ DomainLocalBlockLockHolder(BaseDomain *pD)
+ : CrstHolder(&pD->m_DomainLocalBlockCrst)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+ };
+ friend class DomainLocalBlockLockHolder;
+
+ class LoadLockHolder : public PEFileListLockHolder
+ {
+ public:
+ LoadLockHolder(BaseDomain *pD, BOOL Take = TRUE)
+ : PEFileListLockHolder(&pD->m_FileLoadLock, Take)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+ }
+ };
+ friend class LoadLockHolder;
+ class WinRTFactoryCacheLockHolder : public CrstHolder
+ {
+ public:
+ WinRTFactoryCacheLockHolder(BaseDomain *pD)
+ : CrstHolder(&pD->m_WinRTFactoryCacheCrst)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+ };
+ friend class WinRTFactoryCacheLockHolder;
+
+public:
+ void InitVSD();
+ RangeList *GetCollectibleVSDRanges() { return &m_collVSDRanges; }
+
+private:
+ TypeIDMap m_typeIDMap;
+ // Range list for collectible types. Maps VSD PCODEs back to the VirtualCallStubManager they belong to
+ LockedRangeList m_collVSDRanges;
+
+public:
+ UINT32 GetTypeID(PTR_MethodTable pMT);
+ UINT32 LookupTypeID(PTR_MethodTable pMT);
+ PTR_MethodTable LookupType(UINT32 id);
+
+private:
+ // I have yet to figure out an efficent way to get the number of handles
+ // of a particular type that's currently used by the process without
+ // spending more time looking at the handle table code. We know that
+ // our only customer (asp.net) in Dev10 is not going to create many of
+ // these handles so I am taking a shortcut for now and keep the sizedref
+ // handle count on the AD itself.
+ DWORD m_dwSizedRefHandles;
+
+ static int m_iNumberOfProcessors;
+
+public:
+ // Called by DestroySizedRefHandle
+ void DecNumSizedRefHandles()
+ {
+ WRAPPER_NO_CONTRACT;
+ LONG result;
+ result = InterlockedDecrement((LONG*)&m_dwSizedRefHandles);
+ _ASSERTE(result >= 0);
+ }
+
+ DWORD GetNumSizedRefHandles()
+ {
+ return m_dwSizedRefHandles;
+ }
+
+ // Profiler rejit
+private:
+ ReJitManager m_reJitMgr;
+
+public:
+ ReJitManager * GetReJitManager() { return &m_reJitMgr; }
+
+#ifdef DACCESS_COMPILE
+public:
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags,
+ bool enumThis);
+#endif
+
+#ifdef FEATURE_CORECLR
+public:
+ enum AppDomainCompatMode
+ {
+ APPDOMAINCOMPAT_NONE
+#ifdef FEATURE_LEGACYNETCF
+ , APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8 // for "AppDomainCompatSwitch" == "WindowsPhone_3.7.0.0" or "AppDomainCompatSwitch" == "WindowsPhone_3.8.0.0"
+#endif
+ };
+ void SetAppDomainCompatMode(AppDomainCompatMode compatMode);
+ AppDomainCompatMode GetAppDomainCompatMode();
+
+private:
+ AppDomainCompatMode m_CompatMode;
+#endif // FEATURE_CORECLR
+
+}; // class BaseDomain
+
+enum
+{
+ ATTACH_ASSEMBLY_LOAD = 0x1,
+ ATTACH_MODULE_LOAD = 0x2,
+ ATTACH_CLASS_LOAD = 0x4,
+
+ ATTACH_ALL = 0x7
+};
+
+class ADUnloadSink
+{
+
+protected:
+ ~ADUnloadSink();
+ CLREvent m_UnloadCompleteEvent;
+ HRESULT m_UnloadResult;
+ Volatile<LONG> m_cRef;
+public:
+ ADUnloadSink();
+ void ReportUnloadResult (HRESULT hr, OBJECTREF* pException);
+ void WaitUnloadCompletion();
+ HRESULT GetUnloadResult() {LIMITED_METHOD_CONTRACT; return m_UnloadResult;};
+ void Reset();
+ ULONG AddRef();
+ ULONG Release();
+};
+
+
+FORCEINLINE void ADUnloadSink__Release(ADUnloadSink* pADSink)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (pADSink)
+ pADSink->Release();
+}
+
+typedef Wrapper <ADUnloadSink*,DoNothing,ADUnloadSink__Release,NULL> ADUnloadSinkHolder;
+
+// This filters the output of IterateAssemblies. This ought to be declared more locally
+// but it would result in really verbose callsites.
+//
+// Assemblies can be categorized by their load status (loaded, loading, or loaded just
+// enough that they would be made available to profilers)
+// Independently, they can also be categorized as execution or introspection.
+//
+// An assembly will be included in the results of IterateAssemblies only if
+// the appropriate bit is set for *both* characterizations.
+//
+// The flags can be combined so if you want all loaded assemblies, you must specify:
+//
+/// kIncludeLoaded|kIncludeExecution|kIncludeIntrospection
+
+enum AssemblyIterationFlags
+{
+ // load status flags
+ kIncludeLoaded = 0x00000001, // include assemblies that are already loaded
+ // (m_level >= code:FILE_LOAD_DELIVER_EVENTS)
+ kIncludeLoading = 0x00000002, // include assemblies that are still in the process of loading
+ // (all m_level values)
+ kIncludeAvailableToProfilers
+ = 0x00000020, // include assemblies available to profilers
+ // See comment at code:DomainFile::IsAvailableToProfilers
+
+ // Execution / introspection flags
+ kIncludeExecution = 0x00000004, // include assemblies that are loaded for execution only
+ kIncludeIntrospection = 0x00000008, // include assemblies that are loaded for introspection only
+
+ kIncludeFailedToLoad = 0x00000010, // include assemblies that failed to load
+
+ // Collectible assemblies flags
+ kExcludeCollectible = 0x00000040, // Exclude all collectible assemblies
+ kIncludeCollected = 0x00000080,
+ // Include assemblies which were collected and cannot be referenced anymore. Such assemblies are not
+ // AddRef-ed. Any manipulation with them should be protected by code:GetAssemblyListLock.
+ // Should be used only by code:LoaderAllocator::GCLoaderAllocators.
+
+}; // enum AssemblyIterationFlags
+
+//---------------------------------------------------------------------------------------
+//
+// Base class for holder code:CollectibleAssemblyHolder (see code:HolderBase).
+// Manages AddRef/Release for collectible assemblies. It is no-op for 'normal' non-collectible assemblies.
+//
+// Each type of type parameter needs 2 methods implemented:
+// code:CollectibleAssemblyHolderBase::GetLoaderAllocator
+// code:CollectibleAssemblyHolderBase::IsCollectible
+//
+template<typename _Type>
+class CollectibleAssemblyHolderBase
+{
+protected:
+ _Type m_value;
+public:
+ CollectibleAssemblyHolderBase(const _Type & value = NULL)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_value = value;
+ }
+ void DoAcquire()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // We don't need to keep the assembly alive in DAC - see code:#CAH_DAC
+#ifndef DACCESS_COMPILE
+ if (this->IsCollectible(m_value))
+ {
+ LoaderAllocator * pLoaderAllocator = GetLoaderAllocator(m_value);
+ pLoaderAllocator->AddReference();
+ }
+#endif //!DACCESS_COMPILE
+ }
+ void DoRelease()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+ if (this->IsCollectible(m_value))
+ {
+ LoaderAllocator * pLoaderAllocator = GetLoaderAllocator(m_value);
+ pLoaderAllocator->Release();
+ }
+#endif //!DACCESS_COMPILE
+ }
+
+private:
+ LoaderAllocator * GetLoaderAllocator(DomainAssembly * pDomainAssembly)
+ {
+ WRAPPER_NO_CONTRACT;
+ return pDomainAssembly->GetLoaderAllocator();
+ }
+ BOOL IsCollectible(DomainAssembly * pDomainAssembly)
+ {
+ WRAPPER_NO_CONTRACT;
+ return pDomainAssembly->IsCollectible();
+ }
+ LoaderAllocator * GetLoaderAllocator(Assembly * pAssembly)
+ {
+ WRAPPER_NO_CONTRACT;
+ return pAssembly->GetLoaderAllocator();
+ }
+ BOOL IsCollectible(Assembly * pAssembly)
+ {
+ WRAPPER_NO_CONTRACT;
+ return pAssembly->IsCollectible();
+ }
+}; // class CollectibleAssemblyHolderBase<>
+
+//---------------------------------------------------------------------------------------
+//
+// Holder of assembly reference which keeps collectible assembly alive while the holder is valid.
+//
+// Collectible assembly can be collected at any point when GC happens. Almost instantly all native data
+// structures of the assembly (e.g. code:DomainAssembly, code:Assembly) could be deallocated.
+// Therefore any usage of (collectible) assembly data structures from native world, has to prevent the
+// deallocation by increasing ref-count on the assembly / associated loader allocator.
+//
+// #CAH_DAC
+// In DAC we don't AddRef/Release as the assembly doesn't have to be kept alive: The process is stopped when
+// DAC is used and therefore the assembly cannot just disappear.
+//
+template<typename _Type>
+class CollectibleAssemblyHolder : public BaseWrapper<_Type, CollectibleAssemblyHolderBase<_Type> >
+{
+public:
+ FORCEINLINE
+ CollectibleAssemblyHolder(const _Type & value = NULL, BOOL fTake = TRUE)
+ : BaseWrapper<_Type, CollectibleAssemblyHolderBase<_Type> >(value, fTake)
+ {
+ STATIC_CONTRACT_WRAPPER;
+ }
+
+ FORCEINLINE
+ CollectibleAssemblyHolder &
+ operator=(const _Type & value)
+ {
+ STATIC_CONTRACT_WRAPPER;
+ BaseWrapper<_Type, CollectibleAssemblyHolderBase<_Type> >::operator=(value);
+ return *this;
+ }
+
+ // Operator & is overloaded in parent, therefore we have to get to 'this' pointer explicitly.
+ FORCEINLINE
+ CollectibleAssemblyHolder<_Type> *
+ This()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return this;
+ }
+}; // class CollectibleAssemblyHolder<>
+
+//---------------------------------------------------------------------------------------
+//
+#ifdef FEATURE_LOADER_OPTIMIZATION
+class SharedAssemblyLocator
+{
+public:
+ enum
+ {
+ DOMAINASSEMBLY = 1,
+ PEASSEMBLY = 2,
+ PEASSEMBLYEXACT = 3
+ };
+ DWORD GetType() {LIMITED_METHOD_CONTRACT; return m_type;};
+#ifndef DACCESS_COMPILE
+ DomainAssembly* GetDomainAssembly() {LIMITED_METHOD_CONTRACT; _ASSERTE(m_type==DOMAINASSEMBLY); return (DomainAssembly*)m_value;};
+ PEAssembly* GetPEAssembly() {LIMITED_METHOD_CONTRACT; _ASSERTE(m_type==PEASSEMBLY||m_type==PEASSEMBLYEXACT); return (PEAssembly*)m_value;};
+ SharedAssemblyLocator(DomainAssembly* pAssembly)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_type=DOMAINASSEMBLY;
+ m_value=pAssembly;
+ }
+ SharedAssemblyLocator(PEAssembly* pFile, DWORD type = PEASSEMBLY)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_type = type;
+ m_value = pFile;
+ }
+#endif // DACCESS_COMPILE
+
+ DWORD Hash();
+protected:
+ DWORD m_type;
+ LPVOID m_value;
+#if FEATURE_VERSIONING
+ ULONG m_uIdentityHash;
+#endif
+};
+#endif // FEATURE_LOADER_OPTIMIZATION
+
+//
+// Stores binding information about failed assembly loads for DAC
+//
+struct FailedAssembly {
+ SString displayName;
+ SString location;
+#ifdef FEATURE_FUSION
+ LOADCTX_TYPE context;
+#endif
+ HRESULT error;
+
+ void Initialize(AssemblySpec *pSpec, Exception *ex)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ displayName.SetASCII(pSpec->GetName());
+ location.Set(pSpec->GetCodeBase());
+ error = ex->GetHR();
+
+ //
+ // Determine the binding context assembly would have been in.
+ // If the parent has been set, use its binding context.
+ // If the parent hasn't been set but the code base has, use LoadFrom.
+ // Otherwise, use the default.
+ //
+#ifdef FEATURE_FUSION
+ context = pSpec->GetParentIAssembly() ? pSpec->GetParentIAssembly()->GetFusionLoadContext() : LOADCTX_TYPE_LOADFROM;
+#endif // FEATURE_FUSION
+ }
+};
+
+#ifdef FEATURE_COMINTEROP
+
+// Cache used by COM Interop
+struct NameToTypeMapEntry
+{
+ // Host space representation of the key
+ struct Key
+ {
+ LPCWSTR m_wzName; // The type name or registry string representation of the GUID "{<guid>}"
+ SIZE_T m_cchName; // wcslen(m_wzName) for faster hashtable lookup
+ };
+ struct DacKey
+ {
+ PTR_CWSTR m_wzName; // The type name or registry string representation of the GUID "{<guid>}"
+ SIZE_T m_cchName; // wcslen(m_wzName) for faster hashtable lookup
+ } m_key;
+ TypeHandle m_typeHandle; // Using TypeHandle instead of MethodTable* to avoid losing information when sharing method tables.
+ UINT m_nEpoch; // tracks creation Epoch. This is incremented each time an external reader enumerate the cache
+ BYTE m_bFlags;
+};
+
+typedef DPTR(NameToTypeMapEntry) PTR_NameToTypeMapEntry;
+
+class NameToTypeMapTraits : public NoRemoveSHashTraits< DefaultSHashTraits<NameToTypeMapEntry> >
+{
+public:
+ typedef NameToTypeMapEntry::Key key_t;
+
+ static const NameToTypeMapEntry Null() { NameToTypeMapEntry e; e.m_key.m_wzName = NULL; e.m_key.m_cchName = 0; return e; }
+ static bool IsNull(const NameToTypeMapEntry &e) { return e.m_key.m_wzName == NULL; }
+ static const key_t GetKey(const NameToTypeMapEntry &e)
+ {
+ key_t key;
+ key.m_wzName = (LPCWSTR)(e.m_key.m_wzName); // this cast brings the string over to the host, in a DAC build
+ key.m_cchName = e.m_key.m_cchName;
+
+ return key;
+ }
+ static count_t Hash(const key_t &key) { WRAPPER_NO_CONTRACT; return HashStringN(key.m_wzName, key.m_cchName); }
+
+ static BOOL Equals(const key_t &lhs, const key_t &rhs)
+ {
+ WRAPPER_NO_CONTRACT;
+ return (lhs.m_cchName == rhs.m_cchName) && memcmp(lhs.m_wzName, rhs.m_wzName, lhs.m_cchName * sizeof(WCHAR)) == 0;
+ }
+
+ void OnDestructPerEntryCleanupAction(const NameToTypeMapEntry& e)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(e.m_key.m_cchName == wcslen(e.m_key.m_wzName));
+#ifndef DACCESS_COMPILE
+ delete [] e.m_key.m_wzName;
+#endif // DACCESS_COMPILE
+ }
+ static const bool s_DestructPerEntryCleanupAction = true;
+};
+
+typedef SHash<NameToTypeMapTraits> NameToTypeMapTable;
+
+typedef DPTR(NameToTypeMapTable) PTR_NameToTypeMapTable;
+
+struct WinRTFactoryCacheEntry
+{
+ typedef MethodTable *Key;
+ Key key; // Type as KEY
+
+ CtxEntry *m_pCtxEntry; // Context entry - used to verify whether the cache is a match
+ OBJECTHANDLE m_ohFactoryObject; // Handle to factory object
+};
+
+class WinRTFactoryCacheTraits : public DefaultSHashTraits<WinRTFactoryCacheEntry>
+{
+public:
+ typedef WinRTFactoryCacheEntry::Key key_t;
+ static const WinRTFactoryCacheEntry Null() { WinRTFactoryCacheEntry e; e.key = NULL; return e; }
+ static bool IsNull(const WinRTFactoryCacheEntry &e) { return e.key == NULL; }
+ static const WinRTFactoryCacheEntry::Key GetKey(const WinRTFactoryCacheEntry& e) { return e.key; }
+ static count_t Hash(WinRTFactoryCacheEntry::Key key) { return (count_t)((size_t)key); }
+ static BOOL Equals(WinRTFactoryCacheEntry::Key lhs, WinRTFactoryCacheEntry::Key rhs)
+ { return lhs == rhs; }
+ static const WinRTFactoryCacheEntry Deleted() { WinRTFactoryCacheEntry e; e.key = (MethodTable *)-1; return e; }
+ static bool IsDeleted(const WinRTFactoryCacheEntry &e) { return e.key == (MethodTable *)-1; }
+
+ static void OnDestructPerEntryCleanupAction(const WinRTFactoryCacheEntry& e);
+ static const bool s_DestructPerEntryCleanupAction = true;
+};
+
+typedef SHash<WinRTFactoryCacheTraits> WinRTFactoryCache;
+
+#endif // FEATURE_COMINTEROP
+
+class AppDomainIterator;
+
+const DWORD DefaultADID = 1;
+
+template <class AppDomainType> class AppDomainCreationHolder;
+
+// An Appdomain is the managed equivalent of a process. It is an isolation unit (conceptually you don't
+// have pointers directly from one appdomain to another, but rather go through remoting proxies). It is
+// also a unit of unloading.
+//
+// Threads are always running in the context of a particular AppDomain. See
+// file:threads.h#RuntimeThreadLocals for more details.
+//
+// see code:BaseDomain for much of the meat of a AppDomain (heaps locks, etc)
+// * code:AppDomain.m_Assemblies - is a list of code:Assembly in the appdomain
+//
+class AppDomain : public BaseDomain
+{
+ friend class ADUnloadSink;
+ friend class SystemDomain;
+ friend class AssemblySink;
+ friend class AppDomainNative;
+ friend class AssemblyNative;
+ friend class AssemblySpec;
+ friend class ClassLoader;
+ friend class ThreadNative;
+ friend class RCWCache;
+ friend class ClrDataAccess;
+ friend class CheckAsmOffsets;
+ friend class AppDomainFromIDHolder;
+
+ VPTR_VTABLE_CLASS(AppDomain, BaseDomain)
+
+public:
+#ifndef DACCESS_COMPILE
+ AppDomain();
+ virtual ~AppDomain();
+#endif
+ static void DoADUnloadWork();
+ DomainAssembly* FindDomainAssembly(Assembly*);
+ void EnterContext(Thread* pThread, Context* pCtx,ContextTransitionFrame *pFrame);
+
+#ifndef DACCESS_COMPILE
+ //-----------------------------------------------------------------------------------------------------------------
+ // Convenience wrapper for ::GetAppDomain to provide better encapsulation.
+ static AppDomain * GetCurrentDomain()
+ { return ::GetAppDomain(); }
+#endif //!DACCESS_COMPILE
+
+ //-----------------------------------------------------------------------------------------------------------------
+ // Initializes an AppDomain. (this functions is not called from the SystemDomain)
+ void Init();
+
+ // creates only unamaged part
+ static void CreateUnmanagedObject(AppDomainCreationHolder<AppDomain>& result);
+ inline void SetAppDomainManagerInfo(LPCWSTR szAssemblyName, LPCWSTR szTypeName, EInitializeNewDomainFlags dwInitializeDomainFlags);
+ inline BOOL HasAppDomainManagerInfo();
+ inline LPCWSTR GetAppDomainManagerAsm();
+ inline LPCWSTR GetAppDomainManagerType();
+ inline EInitializeNewDomainFlags GetAppDomainManagerInitializeNewDomainFlags();
+
+#ifndef FEATURE_CORECLR
+ inline BOOL AppDomainManagerSetFromConfig();
+ Assembly *GetAppDomainManagerEntryAssembly();
+#endif // FEATURE_CORECLR
+
+#if defined(FEATURE_CORECLR) && defined(FEATURE_COMINTEROP)
+ HRESULT SetWinrtApplicationContext(SString &appLocalWinMD);
+#endif // FEATURE_CORECLR && FEATURE_COMINTEROP
+
+ BOOL CanReversePInvokeEnter();
+ void SetReversePInvokeCannotEnter();
+ bool MustForceTrivialWaitOperations();
+ void SetForceTrivialWaitOperations();
+
+ //****************************************************************************************
+ //
+ // Stop deletes all the assemblies but does not remove other resources like
+ // the critical sections
+ void Stop();
+
+ // Gets rid of resources
+ void Terminate();
+
+#ifdef FEATURE_PREJIT
+ //assembly cleanup that requires suspended runtime
+ void DeleteNativeCodeRanges();
+#endif
+
+ // final assembly cleanup
+ void ShutdownAssemblies();
+ void ShutdownFreeLoaderAllocators(BOOL bFromManagedCode);
+
+ void ReleaseDomainBoundInfo();
+ void ReleaseFiles();
+
+
+ // Remove the Appdomain for the system and cleans up. This call should not be
+ // called from shut down code.
+ void CloseDomain();
+
+ virtual BOOL IsAppDomain() { LIMITED_METHOD_DAC_CONTRACT; return TRUE; }
+ virtual PTR_AppDomain AsAppDomain() { LIMITED_METHOD_CONTRACT; return dac_cast<PTR_AppDomain>(this); }
+
+#ifndef FEATURE_CORECLR
+ void InitializeSorting(OBJECTREF* ppAppdomainSetup);
+ void InitializeHashing(OBJECTREF* ppAppdomainSetup);
+#endif
+
+ OBJECTREF DoSetup(OBJECTREF* setupInfo);
+
+ OBJECTREF GetExposedObject();
+ OBJECTREF GetRawExposedObject() {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ if (m_ExposedObject) {
+ return ObjectFromHandle(m_ExposedObject);
+ }
+ else {
+ return NULL;
+ }
+ }
+
+ OBJECTHANDLE GetRawExposedObjectHandleForDebugger() { LIMITED_METHOD_DAC_CONTRACT; return m_ExposedObject; }
+
+#ifdef FEATURE_COMINTEROP
+ HRESULT GetComIPForExposedObject(IUnknown **pComIP);
+
+ MethodTable *GetRedirectedType(WinMDAdapter::RedirectedTypeIndex index);
+ bool FindRedirectedAssembly(Assembly* pAssembly, WinMDAdapter::FrameworkAssemblyIndex* pIndex);
+#endif // FEATURE_COMINTEROP
+
+
+ //****************************************************************************************
+
+protected:
+ // Multi-thread safe access to the list of assemblies
+ class DomainAssemblyList
+ {
+ private:
+ ArrayList m_array;
+#ifdef _DEBUG
+ AppDomain * dbg_m_pAppDomain;
+ public:
+ void Debug_SetAppDomain(AppDomain * pAppDomain)
+ {
+ dbg_m_pAppDomain = pAppDomain;
+ }
+#endif //_DEBUG
+ public:
+ bool IsEmpty()
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ // This function can be reliably called without taking the lock, because the first assembly
+ // added to the arraylist is non-collectible, and the ArrayList itself allows lockless read access
+ return (m_array.GetCount() == 0);
+ }
+ void Clear(AppDomain * pAppDomain)
+ {
+ CONTRACTL {
+ NOTHROW;
+ WRAPPER(GC_TRIGGERS); // Triggers only in MODE_COOPERATIVE (by taking the lock)
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ _ASSERTE(dbg_m_pAppDomain == pAppDomain);
+
+ CrstHolder ch(pAppDomain->GetAssemblyListLock());
+ m_array.Clear();
+ }
+
+ DWORD GetCount(AppDomain * pAppDomain)
+ {
+ CONTRACTL {
+ NOTHROW;
+ WRAPPER(GC_TRIGGERS); // Triggers only in MODE_COOPERATIVE (by taking the lock)
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ _ASSERTE(dbg_m_pAppDomain == pAppDomain);
+
+ CrstHolder ch(pAppDomain->GetAssemblyListLock());
+ return GetCount_Unlocked();
+ }
+ DWORD GetCount_Unlocked()
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+ _ASSERTE(dbg_m_pAppDomain->GetAssemblyListLock()->OwnedByCurrentThread());
+#endif
+ // code:Append_Unlock guarantees that we do not have more than MAXDWORD items
+ return m_array.GetCount();
+ }
+
+ void Get(AppDomain * pAppDomain, DWORD index, CollectibleAssemblyHolder<DomainAssembly *> * pAssemblyHolder)
+ {
+ CONTRACTL {
+ NOTHROW;
+ WRAPPER(GC_TRIGGERS); // Triggers only in MODE_COOPERATIVE (by taking the lock)
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ _ASSERTE(dbg_m_pAppDomain == pAppDomain);
+
+ CrstHolder ch(pAppDomain->GetAssemblyListLock());
+ Get_Unlocked(index, pAssemblyHolder);
+ }
+ void Get_Unlocked(DWORD index, CollectibleAssemblyHolder<DomainAssembly *> * pAssemblyHolder)
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ _ASSERTE(dbg_m_pAppDomain->GetAssemblyListLock()->OwnedByCurrentThread());
+ *pAssemblyHolder = dac_cast<PTR_DomainAssembly>(m_array.Get(index));
+ }
+ // Doesn't lock the assembly list (caller has to hold the lock already).
+ // Doesn't AddRef the returned assembly (if collectible).
+ DomainAssembly * Get_UnlockedNoReference(DWORD index)
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+ _ASSERTE(dbg_m_pAppDomain->GetAssemblyListLock()->OwnedByCurrentThread());
+#endif
+ return dac_cast<PTR_DomainAssembly>(m_array.Get(index));
+ }
+
+#ifndef DACCESS_COMPILE
+ void Set(AppDomain * pAppDomain, DWORD index, DomainAssembly * pAssembly)
+ {
+ CONTRACTL {
+ NOTHROW;
+ WRAPPER(GC_TRIGGERS); // Triggers only in MODE_COOPERATIVE (by taking the lock)
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ _ASSERTE(dbg_m_pAppDomain == pAppDomain);
+
+ CrstHolder ch(pAppDomain->GetAssemblyListLock());
+ return Set_Unlocked(index, pAssembly);
+ }
+ void Set_Unlocked(DWORD index, DomainAssembly * pAssembly)
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ _ASSERTE(dbg_m_pAppDomain->GetAssemblyListLock()->OwnedByCurrentThread());
+ m_array.Set(index, pAssembly);
+ }
+
+ HRESULT Append_Unlocked(DomainAssembly * pAssembly)
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ _ASSERTE(dbg_m_pAppDomain->GetAssemblyListLock()->OwnedByCurrentThread());
+ return m_array.Append(pAssembly);
+ }
+#else //DACCESS_COMPILE
+ void
+ EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+ {
+ SUPPORTS_DAC;
+
+ m_array.EnumMemoryRegions(flags);
+ }
+#endif // DACCESS_COMPILE
+
+ // Should be used only by code:AssemblyIterator::Create
+ ArrayList::Iterator GetArrayListIterator()
+ {
+ return m_array.Iterate();
+ }
+ }; // class DomainAssemblyList
+
+ // Conceptually a list of code:Assembly structures, protected by lock code:GetAssemblyListLock
+ DomainAssemblyList m_Assemblies;
+
+public:
+ // Note that this lock switches thread into GC_NOTRIGGER region as GC can take it too.
+ CrstExplicitInit * GetAssemblyListLock()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return &m_crstAssemblyList;
+ }
+
+public:
+ class AssemblyIterator
+ {
+ // AppDomain context with the assembly list
+ AppDomain * m_pAppDomain;
+ ArrayList::Iterator m_Iterator;
+ AssemblyIterationFlags m_assemblyIterationFlags;
+
+ public:
+ BOOL Next(CollectibleAssemblyHolder<DomainAssembly *> * pDomainAssemblyHolder);
+ // Note: Does not lock the assembly list, but AddRefs collectible assemblies.
+ BOOL Next_Unlocked(CollectibleAssemblyHolder<DomainAssembly *> * pDomainAssemblyHolder);
+#ifndef DACCESS_COMPILE
+ private:
+ // Can be called only from AppDomain shutdown code:AppDomain::ShutdownAssemblies.
+ // Note: Does not lock the assembly list and does not AddRefs collectible assemblies.
+ BOOL Next_UnsafeNoAddRef(DomainAssembly ** ppDomainAssembly);
+#endif
+
+ private:
+ inline DWORD GetIndex()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_Iterator.GetIndex();
+ }
+
+ private:
+ friend class AppDomain;
+ // Cannot have constructor so this iterator can be used inside a union
+ static AssemblyIterator Create(AppDomain * pAppDomain, AssemblyIterationFlags assemblyIterationFlags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ AssemblyIterator i;
+
+ i.m_pAppDomain = pAppDomain;
+ i.m_Iterator = pAppDomain->m_Assemblies.GetArrayListIterator();
+ i.m_assemblyIterationFlags = assemblyIterationFlags;
+ return i;
+ }
+ }; // class AssemblyIterator
+
+ AssemblyIterator IterateAssembliesEx(AssemblyIterationFlags assemblyIterationFlags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return AssemblyIterator::Create(this, assemblyIterationFlags);
+ }
+
+#ifdef FEATURE_CORECLR
+private:
+ struct NativeImageDependenciesEntry
+ {
+ BaseAssemblySpec m_AssemblySpec;
+ GUID m_guidMVID;
+ };
+
+ class NativeImageDependenciesTraits : public NoRemoveSHashTraits<DefaultSHashTraits<NativeImageDependenciesEntry *> >
+ {
+ public:
+ typedef BaseAssemblySpec *key_t;
+ static key_t GetKey(NativeImageDependenciesEntry * e) { return &(e->m_AssemblySpec); }
+
+ static count_t Hash(key_t k)
+ {
+ return k->Hash();
+ }
+
+ static BOOL Equals(key_t lhs, key_t rhs)
+ {
+ return lhs->CompareEx(rhs);
+ }
+ };
+
+ SHash<NativeImageDependenciesTraits> m_NativeImageDependencies;
+
+public:
+ void CheckForMismatchedNativeImages(AssemblySpec * pSpec, const GUID * pGuid);
+
+public:
+ class PathIterator
+ {
+ friend class AppDomain;
+
+ ArrayList::Iterator m_i;
+
+ public:
+ BOOL Next()
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_i.Next();
+ }
+
+ SString* GetPath()
+ {
+ WRAPPER_NO_CONTRACT;
+ return dac_cast<PTR_SString>(m_i.GetElement());
+ }
+ };
+ BOOL BindingByManifestFile();
+
+ PathIterator IterateNativeDllSearchDirectories();
+ void SetNativeDllSearchDirectories(LPCWSTR paths);
+ BOOL HasNativeDllSearchDirectories();
+ void ShutdownNativeDllSearchDirectories();
+#endif // FEATURE_CORECLR
+
+public:
+ SIZE_T GetAssemblyCount()
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_Assemblies.GetCount(this);
+ }
+
+ CHECK CheckCanLoadTypes(Assembly *pAssembly);
+ CHECK CheckCanExecuteManagedCode(MethodDesc* pMD);
+ CHECK CheckLoading(DomainFile *pFile, FileLoadLevel level);
+
+ FileLoadLevel GetDomainFileLoadLevel(DomainFile *pFile);
+ BOOL IsLoading(DomainFile *pFile, FileLoadLevel level);
+ static FileLoadLevel GetThreadFileLoadLevel();
+
+ void LoadDomainFile(DomainFile *pFile,
+ FileLoadLevel targetLevel);
+
+ enum FindAssemblyOptions
+ {
+ FindAssemblyOptions_None = 0x0,
+ FindAssemblyOptions_IncludeFailedToLoad = 0x1
+ };
+
+ DomainAssembly * FindAssembly(PEAssembly * pFile, FindAssemblyOptions options = FindAssemblyOptions_None) DAC_EMPTY_RET(NULL);
+
+#ifdef FEATURE_MIXEDMODE
+ // Finds only loaded modules, elevates level if needed
+ Module* GetIJWModule(HMODULE hMod) DAC_EMPTY_RET(NULL);
+ // Finds loading modules
+ DomainFile* FindIJWDomainFile(HMODULE hMod, const SString &path) DAC_EMPTY_RET(NULL);
+#endif // FEATURE_MIXEDMODE
+
+ Assembly *LoadAssembly(AssemblySpec* pIdentity,
+ PEAssembly *pFile,
+ FileLoadLevel targetLevel,
+ AssemblyLoadSecurity *pLoadSecurity = NULL);
+
+ // this function does not provide caching, you must use LoadDomainAssembly
+ // unless the call is guaranteed to succeed or you don't need the caching
+ // (e.g. if you will FailFast or tear down the AppDomain anyway)
+ // The main point that you should not bypass caching if you might try to load the same file again,
+ // resulting in multiple DomainAssembly objects that share the same PEAssembly for ngen image
+ //which is violating our internal assumptions
+ DomainAssembly *LoadDomainAssemblyInternal( AssemblySpec* pIdentity,
+ PEAssembly *pFile,
+ FileLoadLevel targetLevel,
+ AssemblyLoadSecurity *pLoadSecurity = NULL);
+
+ DomainAssembly *LoadDomainAssembly( AssemblySpec* pIdentity,
+ PEAssembly *pFile,
+ FileLoadLevel targetLevel,
+ AssemblyLoadSecurity *pLoadSecurity = NULL);
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ DomainModule *LoadDomainModule(DomainAssembly *pAssembly,
+ PEModule *pFile,
+ FileLoadLevel targetLevel);
+#endif
+
+ CHECK CheckValidModule(Module *pModule);
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ DomainFile *LoadDomainNeutralModuleDependency(Module *pModule, FileLoadLevel targetLevel);
+#endif
+
+#ifdef FEATURE_FUSION
+ PEAssembly *BindExplicitAssembly(HMODULE hMod, BOOL bindable);
+ Assembly *LoadExplicitAssembly(HMODULE hMod, BOOL bindable);
+ void GetFileFromFusion(IAssembly *pIAssembly, LPCWSTR wszModuleName,
+ SString &path);
+#endif
+ // private:
+ void LoadSystemAssemblies();
+
+ DomainFile *LoadDomainFile(FileLoadLock *pLock,
+ FileLoadLevel targetLevel);
+
+ void TryIncrementalLoad(DomainFile *pFile, FileLoadLevel workLevel, FileLoadLockHolder &lockHolder);
+
+ Assembly *LoadAssemblyHelper(LPCWSTR wszAssembly,
+ LPCWSTR wszCodeBase);
+
+#ifndef DACCESS_COMPILE // needs AssemblySpec
+ //****************************************************************************************
+ // Returns and Inserts assemblies into a lookup cache based on the binding information
+ // in the AssemblySpec. There can be many AssemblySpecs to a single assembly.
+ DomainAssembly* FindCachedAssembly(AssemblySpec* pSpec, BOOL fThrow=TRUE)
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_AssemblyCache.LookupAssembly(pSpec, fThrow);
+ }
+
+ PEAssembly* FindCachedFile(AssemblySpec* pSpec, BOOL fThrow = TRUE);
+ BOOL IsCached(AssemblySpec *pSpec);
+#endif // DACCESS_COMPILE
+ void CacheStringsForDAC();
+
+ BOOL AddFileToCache(AssemblySpec* pSpec, PEAssembly *pFile, BOOL fAllowFailure = FALSE);
+ BOOL AddAssemblyToCache(AssemblySpec* pSpec, DomainAssembly *pAssembly);
+ BOOL AddExceptionToCache(AssemblySpec* pSpec, Exception *ex);
+ void AddUnmanagedImageToCache(LPCWSTR libraryName, HMODULE hMod);
+ HMODULE FindUnmanagedImageInCache(LPCWSTR libraryName);
+ //****************************************************************************************
+ //
+ // Adds an assembly to the domain.
+ void AddAssembly(DomainAssembly * assem);
+ void RemoveAssembly_Unlocked(DomainAssembly * pAsm);
+
+ BOOL ContainsAssembly(Assembly * assem);
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ enum SharePolicy
+ {
+ // Attributes to control when to use domain neutral assemblies
+ SHARE_POLICY_UNSPECIFIED, // Use the current default policy (LoaderOptimization.NotSpecified)
+ SHARE_POLICY_NEVER, // Do not share anything, except the system assembly (LoaderOptimization.SingleDomain)
+ SHARE_POLICY_ALWAYS, // Share everything possible (LoaderOptimization.MultiDomain)
+ SHARE_POLICY_GAC, // Share only GAC-bound assemblies (LoaderOptimization.MultiDomainHost)
+
+ SHARE_POLICY_COUNT,
+ SHARE_POLICY_MASK = 0x3,
+
+ // NOTE that previously defined was a bit 0x40 which might be set on this value
+ // in custom attributes.
+ SHARE_POLICY_DEFAULT = SHARE_POLICY_NEVER,
+ };
+
+ void SetSharePolicy(SharePolicy policy);
+ SharePolicy GetSharePolicy();
+ BOOL ReduceSharePolicyFromAlways();
+
+ //****************************************************************************************
+ // Determines if the image is to be loaded into the shared assembly or an individual
+ // appdomains.
+#ifndef FEATURE_CORECLR
+ BOOL ApplySharePolicy(DomainAssembly *pFile);
+ BOOL ApplySharePolicyFlag(DomainAssembly *pFile);
+#endif
+#endif // FEATURE_LOADER_OPTIMIZATION
+
+ BOOL HasSetSecurityPolicy();
+
+ FORCEINLINE IApplicationSecurityDescriptor* GetSecurityDescriptor()
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return static_cast<IApplicationSecurityDescriptor*>(m_pSecDesc);
+ }
+
+ void CreateSecurityDescriptor();
+
+ //****************************************************************************************
+ //
+ // Reference count. When an appdomain is first created the reference is bump
+ // to one when it is added to the list of domains (see SystemDomain). An explicit
+ // Removal from the list is necessary before it will be deleted.
+ ULONG AddRef(void);
+ ULONG Release(void) DAC_EMPTY_RET(0);
+
+ //****************************************************************************************
+ LPCWSTR GetFriendlyName(BOOL fDebuggerCares = TRUE);
+ LPCWSTR GetFriendlyNameForDebugger();
+ LPCWSTR GetFriendlyNameForLogging();
+#ifdef DACCESS_COMPILE
+ PVOID GetFriendlyNameNoSet(bool* isUtf8);
+#endif
+ void SetFriendlyName(LPCWSTR pwzFriendlyName, BOOL fDebuggerCares = TRUE);
+ void ResetFriendlyName(BOOL fDebuggerCares = TRUE);
+
+ //****************************************************************************************
+
+ // This can be used to override the binding behavior of the appdomain. It
+ // is overridden in the compilation domain. It is important that all
+ // static binding goes through this path.
+ virtual PEAssembly * BindAssemblySpec(
+ AssemblySpec *pSpec,
+ BOOL fThrowOnFileNotFound,
+ BOOL fRaisePrebindEvents,
+ StackCrawlMark *pCallerStackMark = NULL,
+ AssemblyLoadSecurity *pLoadSecurity = NULL,
+ BOOL fUseHostBinderIfAvailable = TRUE) DAC_EMPTY_RET(NULL);
+
+#ifdef FEATURE_HOSTED_BINDER
+ HRESULT BindAssemblySpecForHostedBinder(
+ AssemblySpec * pSpec,
+ IAssemblyName * pAssemblyName,
+ ICLRPrivBinder * pBinder,
+ PEAssembly ** ppAssembly) DAC_EMPTY_RET(E_FAIL);
+
+ HRESULT BindHostedPrivAssembly(
+ PEAssembly * pParentPEAssembly,
+ ICLRPrivAssembly * pPrivAssembly,
+ IAssemblyName * pAssemblyName,
+ PEAssembly ** ppAssembly,
+ BOOL fIsIntrospectionOnly = FALSE) DAC_EMPTY_RET(S_OK);
+#endif // FEATURE_HOSTED_BINDER
+
+#ifdef FEATURE_REFLECTION_ONLY_LOAD
+ virtual DomainAssembly *BindAssemblySpecForIntrospectionDependencies(AssemblySpec *pSpec) DAC_EMPTY_RET(NULL);
+#endif
+
+ PEAssembly *TryResolveAssembly(AssemblySpec *pSpec, BOOL fPreBind);
+
+ // Store a successful binding into the cache. This will keep the file from
+ // being physically unmapped, as well as shortcutting future attempts to bind
+ // the same spec throught the Cached entry point.
+ //
+ // Right now we only cache assembly binds for "probing" type
+ // binding situations, basically when loading domain neutral assemblies or
+ // zap files.
+ //
+ // <TODO>@todo: We may want to be more aggressive about this if
+ // there are other situations where we are repeatedly binding the
+ // same assembly specs, though.</TODO>
+ //
+ // Returns TRUE if stored
+ // FALSE if it's a duplicate (caller should clean up args)
+ BOOL StoreBindAssemblySpecResult(AssemblySpec *pSpec,
+ PEAssembly *pFile,
+ BOOL clone = TRUE);
+
+ BOOL StoreBindAssemblySpecError(AssemblySpec *pSpec,
+ HRESULT hr,
+ OBJECTREF *pThrowable,
+ BOOL clone = TRUE);
+
+ //****************************************************************************************
+ //
+#ifdef FEATURE_FUSION
+ static BOOL SetContextProperty(IApplicationContext* pFusionContext,
+ LPCWSTR pProperty,
+ OBJECTREF* obj);
+#endif
+ //****************************************************************************************
+ //
+ // Uses the first assembly to add an application base to the Context. This is done
+ // in a lazy fashion so executables do not take the perf hit unless the load other
+ // assemblies
+#ifdef FEATURE_FUSION
+ LPWSTR GetDynamicDir();
+#endif
+#ifndef DACCESS_COMPILE
+ void OnAssemblyLoad(Assembly *assem);
+ void OnAssemblyLoadUnlocked(Assembly *assem);
+ static BOOL OnUnhandledException(OBJECTREF *pThrowable, BOOL isTerminating = TRUE);
+
+#endif
+
+ // True iff a debugger is attached to the process (same as CORDebuggerAttached)
+ BOOL IsDebuggerAttached (void);
+
+#ifdef DEBUGGING_SUPPORTED
+ // Notify debugger of all assemblies, modules, and possibly classes in this AppDomain
+ BOOL NotifyDebuggerLoad(int flags, BOOL attaching);
+
+ // Send unload notifications to the debugger for all assemblies, modules and classes in this AppDomain
+ void NotifyDebuggerUnload();
+#endif // DEBUGGING_SUPPORTED
+
+ void SetSystemAssemblyLoadEventSent (BOOL fFlag);
+ BOOL WasSystemAssemblyLoadEventSent (void);
+
+#ifndef DACCESS_COMPILE
+ OBJECTREF* AllocateStaticFieldObjRefPtrs(int nRequested, OBJECTREF** ppLazyAllocate = NULL)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return AllocateObjRefPtrsInLargeTable(nRequested, ppLazyAllocate);
+ }
+
+ OBJECTREF* AllocateStaticFieldObjRefPtrsCrossDomain(int nRequested, OBJECTREF** ppLazyAllocate = NULL)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return AllocateObjRefPtrsInLargeTable(nRequested, ppLazyAllocate, TRUE);
+ }
+#endif // DACCESS_COMPILE
+
+ void EnumStaticGCRefs(promote_func* fn, ScanContext* sc);
+
+ DomainLocalBlock *GetDomainLocalBlock()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return &m_sDomainLocalBlock;
+ }
+
+ static SIZE_T GetOffsetOfModuleSlotsPointer()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return offsetof(AppDomain,m_sDomainLocalBlock) + DomainLocalBlock::GetOffsetOfModuleSlotsPointer();
+ }
+
+ void SetupSharedStatics();
+
+ ADUnloadSink* PrepareForWaitUnloadCompletion();
+
+ //****************************************************************************************
+ //
+ // Create a quick lookup for classes loaded into this domain based on their GUID.
+ //
+ void InsertClassForCLSID(MethodTable* pMT, BOOL fForceInsert = FALSE);
+ void InsertClassForCLSID(MethodTable* pMT, GUID *pGuid);
+
+#ifdef FEATURE_COMINTEROP
+private:
+ void CacheTypeByNameWorker(const SString &ssClassName, const UINT vCacheVersion, TypeHandle typeHandle, BYTE flags, BOOL bReplaceExisting = FALSE);
+ TypeHandle LookupTypeByNameWorker(const SString &ssClassName, UINT *pvCacheVersion, BYTE *pbFlags);
+public:
+ // Used by COM Interop for mapping WinRT runtime class names to real types.
+ void CacheTypeByName(const SString &ssClassName, const UINT vCacheVersion, TypeHandle typeHandle, BYTE flags, BOOL bReplaceExisting = FALSE);
+ TypeHandle LookupTypeByName(const SString &ssClassName, UINT *pvCacheVersion, BYTE *pbFlags);
+ PTR_MethodTable LookupTypeByGuid(const GUID & guid);
+
+#ifndef DACCESS_COMPILE
+ inline BOOL CanCacheWinRTTypeByGuid(TypeHandle typeHandle)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Only allow caching guid/types maps for types loaded during
+ // "normal" domain operation
+ if (IsCompilationDomain() || (m_Stage < STAGE_OPEN))
+ return FALSE;
+
+ MethodTable *pMT = typeHandle.GetMethodTable();
+ if (pMT != NULL)
+ {
+ // Don't cache mscorlib-internal declarations of WinRT types.
+ if (pMT->GetModule()->IsSystem() && pMT->IsProjectedFromWinRT())
+ return FALSE;
+
+ // Don't cache redirected WinRT types.
+ if (WinRTTypeNameConverter::IsRedirectedWinRTSourceType(pMT))
+ return FALSE;
+ }
+
+ return TRUE;
+ }
+#endif // !DACCESS_COMPILE
+
+ void CacheWinRTTypeByGuid(TypeHandle typeHandle);
+ void GetCachedWinRTTypes(SArray<PTR_MethodTable> * pTypes, SArray<GUID> * pGuids, UINT minEpoch, UINT * pCurEpoch);
+
+ // Used by COM Interop for caching WinRT factory objects.
+ void CacheWinRTFactoryObject(MethodTable *pClassMT, OBJECTREF *refFactory, LPVOID lpCtxCookie);
+ OBJECTREF LookupWinRTFactoryObject(MethodTable *pClassMT, LPVOID lpCtxCookie);
+ void RemoveWinRTFactoryObjects(LPVOID pCtxCookie);
+
+ MethodTable *LoadCOMClass(GUID clsid, BOOL bLoadRecord = FALSE, BOOL* pfAssemblyInReg = NULL);
+ COMorRemotingFlag GetComOrRemotingFlag();
+ BOOL GetPreferComInsteadOfManagedRemoting();
+ OBJECTREF GetMissingObject(); // DispatchInfo will call function to retrieve the Missing.Value object.
+#endif // FEATURE_COMINTEROP
+
+#ifndef DACCESS_COMPILE
+ MethodTable* LookupClass(REFIID iid)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ MethodTable *pMT = (MethodTable*) m_clsidHash.LookupValue((UPTR) GetKeyFromGUID(&iid), (LPVOID)&iid);
+ return (pMT == (MethodTable*) INVALIDENTRY
+ ? NULL
+ : pMT);
+ }
+#endif // DACCESS_COMPILE
+
+ //<TODO>@todo get a better key</TODO>
+ ULONG GetKeyFromGUID(const GUID *pguid)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return *(ULONG *) pguid;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ ComCallWrapperCache* GetComCallWrapperCache();
+ RCWCache *GetRCWCache()
+ {
+ WRAPPER_NO_CONTRACT;
+ if (m_pRCWCache)
+ return m_pRCWCache;
+
+ // By separating the cache creation from the common lookup, we
+ // can keep the (x86) EH prolog/epilog off the path.
+ return CreateRCWCache();
+ }
+private:
+ RCWCache *CreateRCWCache();
+public:
+ RCWCache *GetRCWCacheNoCreate()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pRCWCache;
+ }
+
+ RCWRefCache *GetRCWRefCache();
+
+ void ResetComCallWrapperCache()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pComCallWrapperCache = NULL;
+ }
+
+ MethodTable* GetLicenseInteropHelperMethodTable();
+#endif // FEATURE_COMINTEROP
+
+ //****************************************************************************************
+ // Get the proxy for this app domain
+#ifdef FEATURE_REMOTING
+ OBJECTREF GetAppDomainProxy();
+#endif
+
+ ADIndex GetIndex()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return m_dwIndex;
+ }
+
+ TPIndex GetTPIndex()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_tpIndex;
+ }
+
+ void InitializeDomainContext(BOOL allowRedirects, LPCWSTR pwszPath, LPCWSTR pwszConfig);
+
+#ifdef FEATURE_FUSION
+ IApplicationContext *CreateFusionContext();
+ void SetupLoaderOptimization(DWORD optimization);
+#endif
+#ifdef FEATURE_VERSIONING
+ IUnknown *CreateFusionContext();
+#endif // FEATURE_VERSIONING
+
+#if defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+ void OverrideDefaultContextBinder(IUnknown *pOverrideBinder)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(pOverrideBinder != NULL);
+ pOverrideBinder->AddRef();
+ m_pFusionContext->Release();
+ m_pFusionContext = pOverrideBinder;
+ }
+
+#endif // defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+
+#ifdef FEATURE_PREJIT
+ CorCompileConfigFlags GetNativeConfigFlags();
+#endif // FEATURE_PREJIT
+
+ //****************************************************************************************
+ // Create a domain context rooted at the fileName. The directory containing the file name
+ // is the application base and the configuration file is the fileName appended with
+ // .config. If no name is passed in then no domain is created.
+ static AppDomain* CreateDomainContext(LPCWSTR fileName);
+
+ // Sets up the current domain's fusion context based on the given exe file name
+ // (app base & config file)
+ void SetupExecutableFusionContext(LPCWSTR exePath);
+
+ //****************************************************************************************
+ // Manage a pool of asyncrhonous objects used to fetch assemblies. When a sink is released
+ // it places itself back on the pool list. Only one object is kept in the pool.
+#ifdef FEATURE_FUSION
+ AssemblySink* AllocateAssemblySink(AssemblySpec* pSpec);
+#endif
+ void SetIsUserCreatedDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_dwFlags |= USER_CREATED_DOMAIN;
+ }
+
+ BOOL IsUserCreatedDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_dwFlags & USER_CREATED_DOMAIN);
+ }
+
+ void SetIgnoreUnhandledExceptions()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_dwFlags |= IGNORE_UNHANDLED_EXCEPTIONS;
+ }
+
+ BOOL IgnoreUnhandledExceptions()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_dwFlags & IGNORE_UNHANDLED_EXCEPTIONS);
+ }
+
+#if defined(FEATURE_CORECLR)
+ void SetEnablePInvokeAndClassicComInterop()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_dwFlags |= ENABLE_PINVOKE_AND_CLASSIC_COMINTEROP;
+ }
+
+ BOOL EnablePInvokeAndClassicComInterop()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_dwFlags & ENABLE_PINVOKE_AND_CLASSIC_COMINTEROP);
+ }
+
+ void SetAllowPlatformSpecificAppAssemblies()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_dwFlags |= ENABLE_SKIP_PLAT_CHECKS;
+ }
+
+ BOOL AllowPlatformSpecificAppAssemblies()
+ {
+ LIMITED_METHOD_CONTRACT;
+ if(IsCompilationDomain())
+ return TRUE;
+
+ return (m_dwFlags & ENABLE_SKIP_PLAT_CHECKS);
+ }
+
+ void SetAllowLoadFile()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_dwFlags |= ENABLE_ASSEMBLY_LOADFILE;
+ }
+
+ BOOL IsLoadFileAllowed()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_dwFlags & ENABLE_ASSEMBLY_LOADFILE);
+ }
+#endif // defined(FEATURE_CORECLR)
+
+ void SetPassiveDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_dwFlags |= PASSIVE_DOMAIN;
+ }
+
+ BOOL IsPassiveDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_dwFlags & PASSIVE_DOMAIN);
+ }
+
+ void SetVerificationDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_dwFlags |= VERIFICATION_DOMAIN;
+ }
+
+ BOOL IsVerificationDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_dwFlags & VERIFICATION_DOMAIN);
+ }
+
+ void SetIllegalVerificationDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_dwFlags |= ILLEGAL_VERIFICATION_DOMAIN;
+ }
+
+ BOOL IsIllegalVerificationDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_dwFlags & ILLEGAL_VERIFICATION_DOMAIN);
+ }
+
+ void SetCompilationDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_dwFlags |= (PASSIVE_DOMAIN|COMPILATION_DOMAIN);
+ }
+
+ BOOL IsCompilationDomain();
+
+ PTR_CompilationDomain ToCompilationDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(IsCompilationDomain());
+ return dac_cast<PTR_CompilationDomain>(this);
+ }
+
+#ifdef MDIL
+ void SetMDILCompilationDomain()
+ {
+
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(IsCompilationDomain());
+ m_dwFlags |= MDIL_COMPILATION_DOMAIN;
+ }
+
+ BOOL IsMDILCompilationDomain()
+ {
+
+ LIMITED_METHOD_CONTRACT;
+ return m_dwFlags & MDIL_COMPILATION_DOMAIN;
+ }
+
+ void SetMinimalMDILCompilationDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(IsCompilationDomain());
+ m_dwFlags |= MINIMAL_MDIL_COMPILATION_DOMAIN;
+ }
+
+ BOOL IsMinimalMDILCompilationDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwFlags & MINIMAL_MDIL_COMPILATION_DOMAIN;
+ }
+
+ void SetNoMDILCompilationDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(IsCompilationDomain());
+ m_dwFlags |= NO_MDIL_COMPILATION_DOMAIN;
+ }
+
+ BOOL IsNoMDILCompilationDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwFlags & NO_MDIL_COMPILATION_DOMAIN;
+ }
+#endif // MDIL
+
+ void SetCanUnload()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_dwFlags |= APP_DOMAIN_CAN_BE_UNLOADED;
+ }
+
+ BOOL CanUnload()
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return m_dwFlags & APP_DOMAIN_CAN_BE_UNLOADED;
+ }
+
+ void SetRemotingConfigured()
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ FastInterlockOr((ULONG*)&m_dwFlags, REMOTING_CONFIGURED_FOR_DOMAIN);
+ }
+
+ BOOL IsRemotingConfigured()
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return m_dwFlags & REMOTING_CONFIGURED_FOR_DOMAIN;
+ }
+
+ void SetOrphanedLocks()
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ FastInterlockOr((ULONG*)&m_dwFlags, ORPHANED_LOCKS);
+ }
+
+ BOOL HasOrphanedLocks()
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return m_dwFlags & ORPHANED_LOCKS;
+ }
+
+ // This function is used to relax asserts in the lock accounting.
+ // It returns true if we are fine with hosed lock accounting in this domain.
+ BOOL OkToIgnoreOrphanedLocks()
+ {
+ WRAPPER_NO_CONTRACT;
+ return HasOrphanedLocks() && m_Stage >= STAGE_UNLOAD_REQUESTED;
+ }
+
+ static void ExceptionUnwind(Frame *pFrame);
+
+#ifdef _DEBUG
+ void TrackADThreadEnter(Thread *pThread, Frame *pFrame);
+ void TrackADThreadExit(Thread *pThread, Frame *pFrame);
+ void DumpADThreadTrack();
+#endif
+
+#ifndef DACCESS_COMPILE
+ void ThreadEnter(Thread *pThread, Frame *pFrame)
+ {
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+#ifdef _DEBUG
+ if (LoggingOn(LF_APPDOMAIN, LL_INFO100))
+ TrackADThreadEnter(pThread, pFrame);
+ else
+#endif
+ {
+ InterlockedIncrement((LONG*)&m_dwThreadEnterCount);
+ LOG((LF_APPDOMAIN, LL_INFO1000, "AppDomain::ThreadEnter %p to [%d] (%8.8x) %S count %d\n",
+ pThread,GetId().m_dwId, this,
+ GetFriendlyNameForLogging(),GetThreadEnterCount()));
+#if _DEBUG_AD_UNLOAD
+ printf("AppDomain::ThreadEnter %p to [%d] (%8.8x) %S count %d\n",
+ pThread, GetId().m_dwId, this,
+ GetFriendlyNameForLogging(), GetThreadEnterCount());
+#endif
+ }
+ }
+
+ void ThreadExit(Thread *pThread, Frame *pFrame)
+ {
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+#ifdef _DEBUG
+ if (LoggingOn(LF_APPDOMAIN, LL_INFO100)) {
+ TrackADThreadExit(pThread, pFrame);
+ }
+ else
+#endif
+ {
+ LONG result;
+ result = InterlockedDecrement((LONG*)&m_dwThreadEnterCount);
+ _ASSERTE(result >= 0);
+ LOG((LF_APPDOMAIN, LL_INFO1000, "AppDomain::ThreadExit from [%d] (%8.8x) %S count %d\n",
+ this, GetId().m_dwId,
+ GetFriendlyNameForLogging(), GetThreadEnterCount()));
+#if _DEBUG_ADUNLOAD
+ printf("AppDomain::ThreadExit %x from [%d] (%8.8x) %S count %d\n",
+ pThread->GetThreadId(), this, GetId().m_dwId,
+ GetFriendlyNameForLogging(), GetThreadEnterCount());
+#endif
+ }
+ }
+#endif // DACCESS_COMPILE
+
+ ULONG GetThreadEnterCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwThreadEnterCount;
+ }
+
+ BOOL OnlyOneThreadLeft()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwThreadEnterCount==1 || m_dwThreadsStillInAppDomain ==1;
+ }
+
+ Context *GetDefaultContext()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pDefaultContext;
+ }
+
+ BOOL CanLoadCode()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_Stage >= STAGE_READYFORMANAGEDCODE && m_Stage < STAGE_CLOSED;
+ }
+
+ void SetAnonymouslyHostedDynamicMethodsAssembly(DomainAssembly * pDomainAssembly)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(pDomainAssembly != NULL);
+ _ASSERTE(m_anonymouslyHostedDynamicMethodsAssembly == NULL);
+ m_anonymouslyHostedDynamicMethodsAssembly = pDomainAssembly;
+ }
+
+ DomainAssembly * GetAnonymouslyHostedDynamicMethodsAssembly()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_anonymouslyHostedDynamicMethodsAssembly;
+ }
+
+ BOOL HasUnloadStarted()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_Stage>=STAGE_EXITED;
+ }
+ static void RefTakerAcquire(AppDomain* pDomain)
+ {
+ WRAPPER_NO_CONTRACT;
+ if(!pDomain)
+ return;
+ pDomain->AddRef();
+#ifdef _DEBUG
+ FastInterlockIncrement(&pDomain->m_dwRefTakers);
+#endif
+ }
+
+ static void RefTakerRelease(AppDomain* pDomain)
+ {
+ WRAPPER_NO_CONTRACT;
+ if(!pDomain)
+ return;
+#ifdef _DEBUG
+ _ASSERTE(pDomain->m_dwRefTakers);
+ FastInterlockDecrement(&pDomain->m_dwRefTakers);
+#endif
+ pDomain->Release();
+ }
+
+#ifdef _DEBUG
+
+ BOOL IsHeldByIterator()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwIterHolders>0;
+ }
+
+ BOOL IsHeldByRefTaker()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwRefTakers>0;
+ }
+
+ void IteratorRelease()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_dwIterHolders);
+ FastInterlockDecrement(&m_dwIterHolders);
+ }
+
+
+ void IteratorAcquire()
+ {
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockIncrement(&m_dwIterHolders);
+ }
+
+#endif
+ BOOL IsActive()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return m_Stage >= STAGE_ACTIVE && m_Stage < STAGE_CLOSED;
+ }
+ // Range for normal execution of code in the appdomain, currently used for
+ // appdomain resource monitoring since we don't care to update resource usage
+ // unless it's in these stages (as fields of AppDomain may not be valid if it's
+ // not within these stages)
+ BOOL IsUserActive()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return m_Stage >= STAGE_ACTIVE && m_Stage <= STAGE_OPEN;
+ }
+ BOOL IsValid()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+#ifdef DACCESS_COMPILE
+ // We want to see all appdomains in SOS, even the about to be destructed ones.
+ // There is no risk of races under DAC, so we will pretend to be unconditionally valid.
+ return TRUE;
+#else
+ return m_Stage > STAGE_CREATING && m_Stage < STAGE_CLOSED;
+#endif
+ }
+
+#ifdef _DEBUG
+ BOOL IsBeingCreated()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_dwCreationHolders > 0;
+ }
+
+ void IncCreationCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ FastInterlockIncrement(&m_dwCreationHolders);
+ _ASSERTE(m_dwCreationHolders > 0);
+ }
+
+ void DecCreationCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ FastInterlockDecrement(&m_dwCreationHolders);
+ _ASSERTE(m_dwCreationHolders > -1);
+ }
+#endif
+ BOOL IsRunningIn(Thread* pThread);
+
+ BOOL IsUnloading()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return m_Stage > STAGE_UNLOAD_REQUESTED;
+ }
+
+ BOOL NotReadyForManagedCode()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_Stage < STAGE_READYFORMANAGEDCODE;
+ }
+
+ void SetFinalized()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetStage(STAGE_FINALIZED);
+ }
+
+ BOOL IsFinalizing()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_Stage >= STAGE_FINALIZING;
+ }
+
+ BOOL IsFinalized()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_Stage >= STAGE_FINALIZED;
+ }
+
+ BOOL NoAccessToHandleTable()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return m_Stage >= STAGE_HANDLETABLE_NOACCESS;
+ }
+
+ // Checks whether the given thread can enter the app domain
+ BOOL CanThreadEnter(Thread *pThread);
+
+ // Following two are needed for the Holder
+ static void SetUnloadInProgress(AppDomain *pThis) PUB;
+ static void SetUnloadComplete(AppDomain *pThis) PUB;
+ // Predicates for GC asserts
+ BOOL ShouldHaveFinalization()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return ((DWORD) m_Stage) < STAGE_COLLECTED;
+ }
+ BOOL ShouldHaveCode()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return ((DWORD) m_Stage) < STAGE_COLLECTED;
+ }
+ BOOL ShouldHaveRoots()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return ((DWORD) m_Stage) < STAGE_CLEARED;
+ }
+ BOOL ShouldHaveInstances()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return ((DWORD) m_Stage) < STAGE_COLLECTED;
+ }
+
+
+ static void RaiseExitProcessEvent();
+ Assembly* RaiseResourceResolveEvent(DomainAssembly* pAssembly, LPCSTR szName);
+ DomainAssembly* RaiseTypeResolveEventThrowing(DomainAssembly* pAssembly, LPCSTR szName, ASSEMBLYREF *pResultingAssemblyRef);
+ Assembly* RaiseAssemblyResolveEvent(AssemblySpec *pSpec, BOOL fIntrospection, BOOL fPreBind);
+
+private:
+ CrstExplicitInit m_ReflectionCrst;
+ CrstExplicitInit m_RefClassFactCrst;
+
+
+ EEClassFactoryInfoHashTable *m_pRefClassFactHash; // Hash table that maps a class factory info to a COM comp.
+#ifdef FEATURE_COMINTEROP
+ DispIDCache *m_pRefDispIDCache;
+ COMorRemotingFlag m_COMorRemotingFlag;
+ OBJECTHANDLE m_hndMissing; //Handle points to Missing.Value Object which is used for [Optional] arg scenario during IDispatch CCW Call
+
+ PTR_DomainAssembly m_pSystemDll; // System.dll loaded into this domain
+ PTR_DomainAssembly m_pSystemRuntimeWindowsRuntimeDll; // System.Runtime.WindowsRuntime.dll loaded into this domain
+ PTR_DomainAssembly m_pSystemRuntimeWindowsRuntimeUIXamlDll; // System.Runtime.WindowsRuntime.UI.Xaml.dll loaded into this domain
+ PTR_DomainAssembly m_pSystemNumericsVectors; // System.Numerics.Vectors.dll loaded into this domain
+public:
+ BOOL FindRedirectedAssemblyFromIndexIfLoaded(WinMDAdapter::FrameworkAssemblyIndex index, Assembly** ppAssembly);
+
+ BOOL IsSystemDll(Assembly *pAssembly)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_pSystemDll != NULL && m_pSystemDll->GetCurrentAssembly() == pAssembly);
+ }
+private:
+
+ MethodTable* m_rpCLRTypes[WinMDAdapter::RedirectedTypeIndex_Count];
+
+ MethodTable* LoadRedirectedType(WinMDAdapter::RedirectedTypeIndex index, WinMDAdapter::FrameworkAssemblyIndex assembly);
+#endif // FEATURE_COMINTEROP
+
+public:
+
+ CrstBase *GetRefClassFactCrst()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return &m_RefClassFactCrst;
+ }
+
+#ifndef DACCESS_COMPILE
+ EEClassFactoryInfoHashTable* GetClassFactHash()
+ {
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FAULT;
+
+ if (m_pRefClassFactHash != NULL) {
+ return m_pRefClassFactHash;
+ }
+
+ return SetupClassFactHash();
+ }
+#endif // DACCESS_COMPILE
+
+#ifdef FEATURE_COMINTEROP
+ DispIDCache* GetRefDispIDCache()
+ {
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FAULT;
+
+ if (m_pRefDispIDCache != NULL) {
+ return m_pRefDispIDCache;
+ }
+
+ return SetupRefDispIDCache();
+ }
+#endif // FEATURE_COMINTEROP
+
+ PTR_LoaderHeap GetStubHeap();
+ PTR_LoaderHeap GetLowFrequencyHeap();
+ PTR_LoaderHeap GetHighFrequencyHeap();
+ virtual PTR_LoaderAllocator GetLoaderAllocator();
+
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+ #define ARM_ETW_ALLOC_THRESHOLD (4 * 1024 * 1024)
+ // cache line size in ULONGLONG - 128 bytes which are 16 ULONGLONG's
+ #define ARM_CACHE_LINE_SIZE_ULL 16
+
+ inline ULONGLONG GetAllocBytes()
+ {
+ LIMITED_METHOD_CONTRACT;
+ ULONGLONG ullTotalAllocBytes = 0;
+
+ // Ensure that m_pullAllocBytes is non-null to avoid an AV in a race between GC and AD unload.
+ // A race can occur when a new appdomain is created, but an OOM is thrown when allocating for m_pullAllocBytes, causing the AD unload.
+ if(NULL != m_pullAllocBytes)
+ {
+ for (DWORD i = 0; i < m_dwNumHeaps; i++)
+ {
+ ullTotalAllocBytes += m_pullAllocBytes[i * ARM_CACHE_LINE_SIZE_ULL];
+ }
+ }
+ return ullTotalAllocBytes;
+ }
+
+ void RecordAllocBytes(size_t allocatedBytes, DWORD dwHeapNumber)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(dwHeapNumber < m_dwNumHeaps);
+
+ // Ensure that m_pullAllocBytes is non-null to avoid an AV in a race between GC and AD unload.
+ // A race can occur when a new appdomain is created, but an OOM is thrown when allocating for m_pullAllocBytes, causing the AD unload.
+ if(NULL != m_pullAllocBytes)
+ {
+ m_pullAllocBytes[dwHeapNumber * ARM_CACHE_LINE_SIZE_ULL] += allocatedBytes;
+ }
+
+ ULONGLONG ullTotalAllocBytes = GetAllocBytes();
+
+ if ((ullTotalAllocBytes - m_ullLastEtwAllocBytes) >= ARM_ETW_ALLOC_THRESHOLD)
+ {
+ m_ullLastEtwAllocBytes = ullTotalAllocBytes;
+ FireEtwAppDomainMemAllocated((ULONGLONG)this, ullTotalAllocBytes, GetClrInstanceId());
+ }
+ }
+
+ inline ULONGLONG GetSurvivedBytes()
+ {
+ LIMITED_METHOD_CONTRACT;
+ ULONGLONG ullTotalSurvivedBytes = 0;
+
+ // Ensure that m_pullSurvivedBytes is non-null to avoid an AV in a race between GC and AD unload.
+ // A race can occur when a new appdomain is created, but an OOM is thrown when allocating for m_pullSurvivedBytes, causing the AD unload.
+ if(NULL != m_pullSurvivedBytes)
+ {
+ for (DWORD i = 0; i < m_dwNumHeaps; i++)
+ {
+ ullTotalSurvivedBytes += m_pullSurvivedBytes[i * ARM_CACHE_LINE_SIZE_ULL];
+ }
+ }
+ return ullTotalSurvivedBytes;
+ }
+
+ void RecordSurvivedBytes(size_t promotedBytes, DWORD dwHeapNumber)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(dwHeapNumber < m_dwNumHeaps);
+
+ // Ensure that m_pullSurvivedBytes is non-null to avoid an AV in a race between GC and AD unload.
+ // A race can occur when a new appdomain is created, but an OOM is thrown when allocating for m_pullSurvivedBytes, causing the AD unload.
+ if(NULL != m_pullSurvivedBytes)
+ {
+ m_pullSurvivedBytes[dwHeapNumber * ARM_CACHE_LINE_SIZE_ULL] += promotedBytes;
+ }
+ }
+
+ inline void ResetSurvivedBytes()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // Ensure that m_pullSurvivedBytes is non-null to avoid an AV in a race between GC and AD unload.
+ // A race can occur when a new appdomain is created, but an OOM is thrown when allocating for m_pullSurvivedBytes, causing the AD unload.
+ if(NULL != m_pullSurvivedBytes)
+ {
+ for (DWORD i = 0; i < m_dwNumHeaps; i++)
+ {
+ m_pullSurvivedBytes[i * ARM_CACHE_LINE_SIZE_ULL] = 0;
+ }
+ }
+ }
+
+ // Return the total processor time (user and kernel) used by threads executing in this AppDomain so far.
+ // The result is in 100ns units.
+ ULONGLONG QueryProcessorUsage();
+
+ // Add to the current count of processor time used by threads within this AppDomain. This API is called by
+ // threads transitioning between AppDomains.
+ void UpdateProcessorUsage(ULONGLONG ullAdditionalUsage);
+#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
+
+private:
+ static void RaiseOneExitProcessEvent_Wrapper(AppDomainIterator* pi);
+ static void RaiseOneExitProcessEvent();
+ size_t EstimateSize();
+ EEClassFactoryInfoHashTable* SetupClassFactHash();
+#ifdef FEATURE_COMINTEROP
+ DispIDCache* SetupRefDispIDCache();
+ COMorRemotingFlag GetPreferComInsteadOfManagedRemotingFromConfigFile();
+#endif // FEATURE_COMINTEROP
+
+ void InitializeDefaultDomainManager ();
+
+#ifdef FEATURE_CLICKONCE
+ void InitializeDefaultClickOnceDomain();
+#endif // FEATURE_CLICKONCE
+
+ void InitializeDefaultDomainSecurity();
+public:
+#ifdef FEATURE_CLICKONCE
+ BOOL IsClickOnceAppDomain();
+#endif // FEATURE_CLICKONCE
+
+protected:
+ BOOL PostBindResolveAssembly(AssemblySpec *pPrePolicySpec,
+ AssemblySpec *pPostPolicySpec,
+ HRESULT hrBindResult,
+ AssemblySpec **ppFailedSpec);
+
+#ifdef FEATURE_COMINTEROP
+public:
+ void ReleaseRCWs(LPVOID pCtxCookie);
+ void DetachRCWs();
+
+protected:
+#endif // FEATURE_COMINTEROP
+
+ LPWSTR m_pwDynamicDir;
+
+private:
+ void RaiseLoadingAssemblyEvent(DomainAssembly* pAssembly);
+
+ friend class DomainAssembly;
+
+public:
+ static void ProcessUnloadDomainEventOnFinalizeThread();
+ static BOOL HasWorkForFinalizerThread()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return s_pAppDomainToRaiseUnloadEvent != NULL;
+ }
+
+private:
+ static AppDomain* s_pAppDomainToRaiseUnloadEvent;
+ static BOOL s_fProcessUnloadDomainEvent;
+
+ void RaiseUnloadDomainEvent();
+ static void RaiseUnloadDomainEvent_Wrapper(LPVOID /* AppDomain * */);
+
+ BOOL RaiseUnhandledExceptionEvent(OBJECTREF *pSender, OBJECTREF *pThrowable, BOOL isTerminating);
+ BOOL HasUnhandledExceptionEventHandler();
+ BOOL RaiseUnhandledExceptionEventNoThrow(OBJECTREF *pSender, OBJECTREF *pThrowable, BOOL isTerminating);
+
+ struct RaiseUnhandled_Args
+ {
+ AppDomain *pExceptionDomain;
+ AppDomain *pTargetDomain;
+ OBJECTREF *pSender;
+ OBJECTREF *pThrowable;
+ BOOL isTerminating;
+ BOOL *pResult;
+ };
+ #ifndef FEATURE_CORECLR
+ static void RaiseUnhandledExceptionEvent_Wrapper(LPVOID /* RaiseUnhandled_Args * */);
+ #endif
+
+
+ static void AllowThreadEntrance(AppDomain *pApp);
+ static void RestrictThreadEntrance(AppDomain *pApp);
+
+ typedef Holder<AppDomain*,DoNothing<AppDomain*>,AppDomain::AllowThreadEntrance,NULL> RestrictEnterHolder;
+
+ enum Stage {
+ STAGE_CREATING,
+ STAGE_READYFORMANAGEDCODE,
+ STAGE_ACTIVE,
+ STAGE_OPEN,
+ STAGE_UNLOAD_REQUESTED,
+ STAGE_EXITING,
+ STAGE_EXITED,
+ STAGE_FINALIZING,
+ STAGE_FINALIZED,
+ STAGE_HANDLETABLE_NOACCESS,
+ STAGE_CLEARED,
+ STAGE_COLLECTED,
+ STAGE_CLOSED
+ };
+ void SetStage(Stage stage)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ STRESS_LOG2(LF_APPDOMAIN, LL_INFO100,"Updating AD stage, ADID=%d, stage=%d\n",GetId().m_dwId,stage);
+ TESTHOOKCALL(AppDomainStageChanged(GetId().m_dwId,m_Stage,stage));
+ Stage lastStage=m_Stage;
+ while (lastStage !=stage)
+ lastStage = (Stage)FastInterlockCompareExchange((LONG*)&m_Stage,stage,lastStage);
+ };
+ void Exit(BOOL fRunFinalizers, BOOL fAsyncExit);
+ void Close();
+ void ClearGCRoots();
+ void ClearGCHandles();
+ void HandleAsyncPinHandles();
+ void UnwindThreads();
+ // Return TRUE if EE is stopped
+ // Return FALSE if more work is needed
+ BOOL StopEEAndUnwindThreads(unsigned int retryCount, BOOL *pFMarkUnloadRequestThread);
+
+ // Use Rude Abort to unload the domain.
+ BOOL m_fRudeUnload;
+
+ Thread *m_pUnloadRequestThread;
+ ADUnloadSink* m_ADUnloadSink;
+ BOOL m_bForceGCOnUnload;
+ BOOL m_bUnloadingFromUnloadEvent;
+ AppDomainLoaderAllocator m_LoaderAllocator;
+
+ // List of unloaded LoaderAllocators, protected by code:GetLoaderAllocatorReferencesLock (for now)
+ LoaderAllocator * m_pDelayedLoaderAllocatorUnloadList;
+
+public:
+
+ // Register the loader allocator for deletion in code:ShutdownFreeLoaderAllocators.
+ void RegisterLoaderAllocatorForDeletion(LoaderAllocator * pLoaderAllocator);
+
+ AppDomain * m_pNextInDelayedUnloadList;
+
+ void SetForceGCOnUnload(BOOL bSet)
+ {
+ m_bForceGCOnUnload=bSet;
+ }
+
+ void SetUnloadingFromUnloadEvent()
+ {
+ m_bUnloadingFromUnloadEvent=TRUE;
+ }
+
+ BOOL IsUnloadingFromUnloadEvent()
+ {
+ return m_bUnloadingFromUnloadEvent;
+ }
+
+ void SetRudeUnload()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_fRudeUnload = TRUE;
+ }
+
+ BOOL IsRudeUnload()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_fRudeUnload;
+ }
+
+ ADUnloadSink* GetADUnloadSink();
+ ADUnloadSink* GetADUnloadSinkForUnload();
+ void SetUnloadRequestThread(Thread *pThread)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_pUnloadRequestThread = pThread;
+ }
+
+ Thread *GetUnloadRequestThread()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pUnloadRequestThread;
+ }
+
+public:
+ void SetGCRefPoint(int gccounter)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_LoaderAllocator.SetGCRefPoint(gccounter);
+ }
+ int GetGCRefPoint()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_LoaderAllocator.GetGCRefPoint();
+ }
+
+ static USHORT GetOffsetOfId()
+ {
+ LIMITED_METHOD_CONTRACT;
+ size_t ofs = offsetof(class AppDomain, m_dwId);
+ _ASSERTE(FitsInI2(ofs));
+ return (USHORT)ofs;
+ }
+
+
+ void AddMemoryPressure();
+ void RemoveMemoryPressure();
+ void Unload(BOOL fForceUnload);
+ static HRESULT UnloadById(ADID Id, BOOL fSync, BOOL fExceptionsPassThrough=FALSE);
+ static HRESULT UnloadWait(ADID Id, ADUnloadSink* pSink);
+#ifdef FEATURE_TESTHOOKS
+ static HRESULT UnloadWaitNoCatch(ADID Id, ADUnloadSink* pSink);
+#endif
+ static void ResetUnloadRequestThread(ADID Id);
+
+ void UnlinkClass(MethodTable *pMT);
+
+ typedef Holder<AppDomain *, AppDomain::SetUnloadInProgress, AppDomain::SetUnloadComplete> UnloadHolder;
+ Assembly *GetRootAssembly()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pRootAssembly;
+ }
+
+#ifndef DACCESS_COMPILE
+ void SetRootAssembly(Assembly *pAssembly)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pRootAssembly = pAssembly;
+ }
+#endif
+
+private:
+ SString m_friendlyName;
+ PTR_Assembly m_pRootAssembly;
+
+ // General purpose flags.
+ DWORD m_dwFlags;
+
+ // When an application domain is created the ref count is artifically incremented
+ // by one. For it to hit zero an explicit close must have happened.
+ LONG m_cRef; // Ref count.
+
+ PTR_IApplicationSecurityDescriptor m_pSecDesc; // Application Security Descriptor
+
+ OBJECTHANDLE m_ExposedObject;
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ // Indicates where assemblies will be loaded for
+ // this domain. By default all assemblies are loaded into the domain.
+ // There are two additional settings, all
+ // assemblies can be loaded into the shared domain or assemblies
+ // that are strong named are loaded into the shared area.
+ SharePolicy m_SharePolicy;
+#endif
+
+ IUnknown *m_pComIPForExposedObject;
+
+ // Hash table that maps a clsid to a type
+ PtrHashMap m_clsidHash;
+
+#ifdef FEATURE_COMINTEROP
+ // Hash table that maps WinRT class names to MethodTables.
+ PTR_NameToTypeMapTable m_pNameToTypeMap;
+ UINT m_vNameToTypeMapVersion;
+
+ UINT m_nEpoch; // incremented each time m_pNameToTypeMap is enumerated
+
+ // Hash table that remembers the last cached WinRT factory object per type per appdomain.
+ WinRTFactoryCache *m_pWinRTFactoryCache;
+
+ // The wrapper cache for this domain - it has its own CCacheLineAllocator on a per domain basis
+ // to allow the domain to go away and eventually kill the memory when all refs are gone
+ ComCallWrapperCache *m_pComCallWrapperCache;
+
+ // this cache stores the RCWs in this domain
+ RCWCache *m_pRCWCache;
+
+ // this cache stores the RCW -> CCW references in this domain
+ RCWRefCache *m_pRCWRefCache;
+
+ // The method table used for LicenseInteropHelper
+ MethodTable* m_pLicenseInteropHelperMT;
+#endif // FEATURE_COMINTEROP
+
+ AssemblySink* m_pAsyncPool; // asynchronous retrival object pool (only one is kept)
+
+ // The index of this app domain among existing app domains (starting from 1)
+ ADIndex m_dwIndex;
+
+ // The thread-pool index of this app domain among existing app domains (starting from 1)
+ TPIndex m_tpIndex;
+
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+ ULONGLONG* m_pullAllocBytes;
+ ULONGLONG* m_pullSurvivedBytes;
+ DWORD m_dwNumHeaps;
+ ULONGLONG m_ullLastEtwAllocBytes;
+ // Total processor time (user and kernel) utilized by threads running in this AppDomain so far. May not
+ // account for threads currently executing in the AppDomain until a call to QueryProcessorUsage() is
+ // made.
+ Volatile<ULONGLONG> m_ullTotalProcessorUsage;
+#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
+
+#ifdef _DEBUG
+ struct ThreadTrackInfo;
+ typedef CDynArray<ThreadTrackInfo *> ThreadTrackInfoList;
+ ThreadTrackInfoList *m_pThreadTrackInfoList;
+ DWORD m_TrackSpinLock;
+#endif
+
+
+ // IL stub cache with fabricated MethodTable parented by a random module in this AD.
+ ILStubCache m_ILStubCache;
+
+ // U->M thunks created in this domain and not associated with a delegate.
+ // The cache is keyed by MethodDesc pointers.
+ UMEntryThunkCache *m_pUMEntryThunkCache;
+
+ // The number of times we have entered this AD
+ ULONG m_dwThreadEnterCount;
+ // The number of threads that have entered this AD, for ADU only
+ ULONG m_dwThreadsStillInAppDomain;
+
+ Volatile<Stage> m_Stage;
+
+ // The default context for this domain
+ Context *m_pDefaultContext;
+
+ SString m_applicationBase;
+ SString m_privateBinPaths;
+ SString m_configFile;
+
+ ArrayList m_failedAssemblies;
+
+ DomainAssembly * m_anonymouslyHostedDynamicMethodsAssembly;
+
+#ifdef _DEBUG
+ Volatile<LONG> m_dwIterHolders;
+ Volatile<LONG> m_dwRefTakers;
+ Volatile<LONG> m_dwCreationHolders;
+#endif
+
+ //
+ // DAC iterator for failed assembly loads
+ //
+ class FailedAssemblyIterator
+ {
+ ArrayList::Iterator m_i;
+
+ public:
+ BOOL Next()
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_i.Next();
+ }
+ FailedAssembly *GetFailedAssembly()
+ {
+ WRAPPER_NO_CONTRACT;
+ return dac_cast<PTR_FailedAssembly>(m_i.GetElement());
+ }
+ SIZE_T GetIndex()
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_i.GetIndex();
+ }
+
+ private:
+ friend class AppDomain;
+ // Cannot have constructor so this iterator can be used inside a union
+ static FailedAssemblyIterator Create(AppDomain *pDomain)
+ {
+ WRAPPER_NO_CONTRACT;
+ FailedAssemblyIterator i;
+
+ i.m_i = pDomain->m_failedAssemblies.Iterate();
+ return i;
+ }
+ };
+ friend class FailedAssemblyIterator;
+
+ FailedAssemblyIterator IterateFailedAssembliesEx()
+ {
+ WRAPPER_NO_CONTRACT;
+ return FailedAssemblyIterator::Create(this);
+ }
+
+ //---------------------------------------------------------
+ // Stub caches for Method stubs
+ //---------------------------------------------------------
+
+#ifdef FEATURE_FUSION
+ void TurnOnBindingRedirects();
+#endif
+public:
+
+#if defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+private:
+ Volatile<BOOL> m_fIsBindingModelLocked;
+public:
+ BOOL IsHostAssemblyResolverInUse();
+ BOOL IsBindingModelLocked();
+ BOOL LockBindingModel();
+#endif // defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+
+ UMEntryThunkCache *GetUMEntryThunkCache();
+
+ ILStubCache* GetILStubCache()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return &m_ILStubCache;
+ }
+
+ static AppDomain* GetDomain(ILStubCache* pILStubCache)
+ {
+ return CONTAINING_RECORD(pILStubCache, AppDomain, m_ILStubCache);
+ }
+
+ enum {
+ CONTEXT_INITIALIZED = 0x0001,
+ USER_CREATED_DOMAIN = 0x0002, // created by call to AppDomain.CreateDomain
+ ALLOCATEDCOM = 0x0008,
+ LOAD_SYSTEM_ASSEMBLY_EVENT_SENT = 0x0040,
+ REMOTING_CONFIGURED_FOR_DOMAIN = 0x0100,
+ COMPILATION_DOMAIN = 0x0400, // Are we ngenning?
+ APP_DOMAIN_CAN_BE_UNLOADED = 0x0800, // if need extra bits, can derive this at runtime
+ ORPHANED_LOCKS = 0x1000, // Orphaned locks exist in this appdomain.
+ PASSIVE_DOMAIN = 0x2000, // Can we execute code in this AppDomain
+ VERIFICATION_DOMAIN = 0x4000, // This is a verification domain
+ ILLEGAL_VERIFICATION_DOMAIN = 0x8000, // This can't be a verification domain
+ IGNORE_UNHANDLED_EXCEPTIONS = 0x10000, // AppDomain was created using the APPDOMAIN_IGNORE_UNHANDLED_EXCEPTIONS flag
+ ENABLE_PINVOKE_AND_CLASSIC_COMINTEROP = 0x20000, // AppDomain was created using the APPDOMAIN_ENABLE_PINVOKE_AND_CLASSIC_COMINTEROP flag
+#ifdef MDIL
+ MDIL_COMPILATION_DOMAIN = 0x040000, // Are we generating MDIL?
+ MINIMAL_MDIL_COMPILATION_DOMAIN = 0x080000, // Are we generating platform MDIL?
+ NO_MDIL_COMPILATION_DOMAIN = 0x100000, // Are we generating a file we believe will fail on the Triton code path
+#endif
+#ifdef FEATURE_CORECLR
+ ENABLE_SKIP_PLAT_CHECKS = 0x200000, // Skip various assembly checks (like platform check)
+ ENABLE_ASSEMBLY_LOADFILE = 0x400000, // Allow Assembly.LoadFile in CoreCLR
+#endif
+ };
+
+ SecurityContext *m_pSecContext;
+
+ AssemblySpecBindingCache m_AssemblyCache;
+ DomainAssemblyCache m_UnmanagedCache;
+ size_t m_MemoryPressure;
+
+ SString m_AppDomainManagerAssembly;
+ SString m_AppDomainManagerType;
+ BOOL m_fAppDomainManagerSetInConfig;
+ EInitializeNewDomainFlags m_dwAppDomainManagerInitializeDomainFlags;
+
+#ifdef FEATURE_CORECLR
+ ArrayList m_NativeDllSearchDirectories;
+#endif
+ BOOL m_ReversePInvokeCanEnter;
+ bool m_ForceTrivialWaitOperations;
+ // Section to support AD unload due to escalation
+public:
+ static void CreateADUnloadWorker();
+
+ static void CreateADUnloadStartEvent();
+
+ static DWORD WINAPI ADUnloadThreadStart(void *args);
+
+ // Default is safe unload with test hook
+ void EnableADUnloadWorker();
+
+ // If called to handle stack overflow, we can not set event, since the thread has limit stack.
+ void EnableADUnloadWorker(EEPolicy::AppDomainUnloadTypes type, BOOL fHasStack = TRUE);
+
+ static void EnableADUnloadWorkerForThreadAbort();
+ static void EnableADUnloadWorkerForFinalizer();
+ static void EnableADUnloadWorkerForCollectedADCleanup();
+
+ BOOL IsUnloadRequested()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_Stage == STAGE_UNLOAD_REQUESTED);
+ }
+
+#ifdef FEATURE_CORECLR
+ BOOL IsImageFromTrustedPath(PEImage* pImage);
+ BOOL IsImageFullyTrusted(PEImage* pImage);
+#endif
+
+#ifdef FEATURE_TYPEEQUIVALENCE
+private:
+ VolatilePtr<TypeEquivalenceHashTable> m_pTypeEquivalenceTable;
+ CrstExplicitInit m_TypeEquivalenceCrst;
+public:
+ TypeEquivalenceHashTable * GetTypeEquivalenceCache();
+#endif
+
+ private:
+ static void ADUnloadWorkerHelper(AppDomain *pDomain);
+ static CLREvent * g_pUnloadStartEvent;
+
+#ifdef DACCESS_COMPILE
+public:
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags,
+ bool enumThis);
+#endif
+
+#ifdef FEATURE_MULTICOREJIT
+
+private:
+ MulticoreJitManager m_MulticoreJitManager;
+
+public:
+ MulticoreJitManager & GetMulticoreJitManager()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_MulticoreJitManager;
+ }
+
+#endif
+
+#ifdef FEATURE_COMINTEROP
+
+private:
+#ifdef FEATURE_REFLECTION_ONLY_LOAD
+ // ReflectionOnly WinRT binder and its TypeCache (only in classic = non-AppX; the scenario is not supported in AppX)
+ CLRPrivBinderReflectionOnlyWinRT * m_pReflectionOnlyWinRtBinder;
+ CLRPrivTypeCacheReflectionOnlyWinRT * m_pReflectionOnlyWinRtTypeCache;
+#endif // FEATURE_REFLECTION_ONLY_LOAD
+
+#endif //FEATURE_COMINTEROP
+
+public:
+#ifndef FEATURE_CORECLR
+ BOOL m_bUseOsSorting;
+ DWORD m_sortVersion;
+ COMNlsCustomSortLibrary *m_pCustomSortLibrary;
+#if _DEBUG
+ BOOL m_bSortingInitialized;
+#endif // _DEBUG
+ COMNlsHashProvider *m_pNlsHashProvider;
+#endif // !FEATURE_CORECLR
+
+#ifdef FEATURE_HOSTED_BINDER
+private:
+ // This is the root-level default load context root binder. If null, then
+ // the Fusion binder is used; otherwise this binder is used.
+ ReleaseHolder<ICLRPrivBinder> m_pLoadContextHostBinder;
+
+ // -------------------------
+ // IMPORTANT!
+ // The shared and designer context binders are ONLY to be used in tool
+ // scenarios. There are known issues where use of these binders will
+ // cause application crashes, and interesting behaviors.
+ // -------------------------
+
+ // This is the default designer shared context root binder.
+ // This is used as the parent binder for ImmersiveDesignerContextBinders
+ ReleaseHolder<ICLRPrivBinder> m_pSharedContextHostBinder;
+
+ // This is the current context root binder.
+ // Normally, this variable is immutable for appdomain lifetime, but in designer scenarios
+ // it may be replaced by designer context binders
+ Volatile<ICLRPrivBinder *> m_pCurrentContextHostBinder;
+
+public:
+ // Returns the current hosted binder, or null if none available.
+ inline
+ ICLRPrivBinder * GetCurrentLoadContextHostBinder() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pCurrentContextHostBinder;
+ }
+
+ // Returns the shared context binder, or null if none available.
+ inline
+ ICLRPrivBinder * GetSharedContextHostBinder() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pSharedContextHostBinder;
+ }
+
+ // Returns the load context binder, or null if none available.
+ inline
+ ICLRPrivBinder * GetLoadContextHostBinder() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pLoadContextHostBinder;
+ }
+
+#ifndef DACCESS_COMPILE
+
+ // This is only called from the ImmersiveDesignerContext code
+ // It is protected with a managed monitor lock
+ inline
+ void SetSharedContextHostBinder(ICLRPrivBinder * pBinder)
+ {
+ LIMITED_METHOD_CONTRACT;
+ pBinder->AddRef();
+ m_pSharedContextHostBinder = pBinder;
+ }
+
+ // This is called from CorHost2's implementation of ICLRPrivRuntime::CreateAppDomain.
+ // Should only be called during AppDomain creation.
+ inline
+ void SetLoadContextHostBinder(ICLRPrivBinder * pBinder)
+ {
+ LIMITED_METHOD_CONTRACT;
+ pBinder->AddRef();
+ m_pLoadContextHostBinder = m_pCurrentContextHostBinder = pBinder;
+ }
+
+ inline
+ void SetCurrentContextHostBinder(ICLRPrivBinder * pBinder)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ LockHolder lh(this);
+
+#ifdef FEATURE_COMINTEROP
+ if (m_pNameToTypeMap != nullptr)
+ {
+ delete m_pNameToTypeMap;
+ m_pNameToTypeMap = nullptr;
+ }
+
+ m_vNameToTypeMapVersion++;
+#endif
+
+ m_pCurrentContextHostBinder = pBinder;
+ }
+
+#endif // DACCESS_COMPILE
+
+ // Indicates that a hosted binder is present.
+ inline
+ bool HasLoadContextHostBinder()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pLoadContextHostBinder != nullptr;
+ }
+
+ class ComInterfaceReleaseList
+ {
+ SArray<IUnknown *> m_objects;
+ public:
+ ~ComInterfaceReleaseList()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ for (COUNT_T i = 0; i < m_objects.GetCount(); i++)
+ {
+ IUnknown *pItf = *(m_objects.GetElements() + i);
+ if (pItf != nullptr)
+ pItf->Release();
+ }
+ }
+
+ // Append to the list of object to free. Only use under the AppDomain "LockHolder(pAppDomain)"
+ void Append(IUnknown *pInterfaceToRelease)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_objects.Append(pInterfaceToRelease);
+ }
+ } AppDomainInterfaceReleaseList;
+
+private:
+ //-----------------------------------------------------------
+ // Static ICLRPrivAssembly -> DomainAssembly mapping functions.
+ // This map does not maintain a reference count to either key or value.
+ // PEFile maintains a reference count on the ICLRPrivAssembly through its code:PEFile::m_pHostAssembly field.
+ // It is removed from this hash table by code:DomainAssembly::~DomainAssembly.
+ struct HostAssemblyHashTraits : public DefaultSHashTraits<PTR_DomainAssembly>
+ {
+ public:
+ typedef PTR_ICLRPrivAssembly key_t;
+
+ static key_t GetKey(element_t const & elem)
+ {
+ STATIC_CONTRACT_WRAPPER;
+ return elem->GetFile()->GetHostAssembly();
+ }
+
+ static BOOL Equals(key_t key1, key_t key2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return dac_cast<TADDR>(key1) == dac_cast<TADDR>(key2);
+ }
+
+ static count_t Hash(key_t key)
+ {
+ STATIC_CONTRACT_LIMITED_METHOD;
+ //return reinterpret_cast<count_t>(dac_cast<TADDR>(key));
+ return (count_t)(dac_cast<TADDR>(key));
+ }
+
+ static const element_t Null() { return NULL; }
+ static const element_t Deleted() { return (element_t)(TADDR)-1; }
+ static bool IsNull(const element_t & e) { return e == NULL; }
+ static bool IsDeleted(const element_t & e) { return dac_cast<TADDR>(e) == (TADDR)-1; }
+ };
+
+ struct OriginalFileHostAssemblyHashTraits : public HostAssemblyHashTraits
+ {
+ public:
+ static key_t GetKey(element_t const & elem)
+ {
+ STATIC_CONTRACT_WRAPPER;
+ return elem->GetOriginalFile()->GetHostAssembly();
+ }
+ };
+
+ typedef SHash<HostAssemblyHashTraits> HostAssemblyMap;
+ typedef SHash<OriginalFileHostAssemblyHashTraits> OriginalFileHostAssemblyMap;
+ HostAssemblyMap m_hostAssemblyMap;
+ OriginalFileHostAssemblyMap m_hostAssemblyMapForOrigFile;
+ CrstExplicitInit m_crstHostAssemblyMap;
+ // Lock to serialize all Add operations (in addition to the "read-lock" above)
+ CrstExplicitInit m_crstHostAssemblyMapAdd;
+
+public:
+ // Returns DomainAssembly.
+ PTR_DomainAssembly FindAssembly(PTR_ICLRPrivAssembly pHostAssembly);
+
+#ifndef DACCESS_COMPILE
+private:
+ friend void DomainAssembly::Allocate();
+ friend DomainAssembly::~DomainAssembly();
+
+ // Called from DomainAssembly::Begin.
+ void PublishHostedAssembly(
+ DomainAssembly* pAssembly);
+
+ // Called from DomainAssembly::UpdatePEFile.
+ void UpdatePublishHostedAssembly(
+ DomainAssembly* pAssembly,
+ PTR_PEFile pFile);
+
+ // Called from DomainAssembly::~DomainAssembly
+ void UnPublishHostedAssembly(
+ DomainAssembly* pAssembly);
+#endif // DACCESS_COMPILE
+
+#endif //FEATURE_HOSTED_BINDER
+#ifdef FEATURE_PREJIT
+ friend void DomainFile::InsertIntoDomainFileWithNativeImageList();
+ Volatile<DomainFile *> m_pDomainFileWithNativeImageList;
+public:
+ DomainFile *GetDomainFilesWithNativeImagesList()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pDomainFileWithNativeImageList;
+ }
+#endif
+}; // class AppDomain
+
+
+// This holder is to be used to take a reference to make sure AppDomain* is still valid
+// Please do not use if you are aleady ADU-safe
+typedef Wrapper<AppDomain*,AppDomain::RefTakerAcquire,AppDomain::RefTakerRelease,NULL> AppDomainRefTaker;
+
+// Just a ref holder
+typedef ReleaseHolder<AppDomain> AppDomainRefHolder;
+
+// This class provides a way to access AppDomain by ID
+// without risking the appdomain getting invalid in the process
+class AppDomainFromIDHolder
+{
+public:
+ enum SyncType
+ {
+ SyncType_GC, // Prevents AD from being unloaded by forbidding GC for the lifetime of the object
+ SyncType_ADLock // Prevents AD from being unloaded by requiring ownership of DomainLock for the lifetime of the object
+ };
+protected:
+ AppDomain* m_pDomain;
+#ifdef _DEBUG
+ BOOL m_bAcquired;
+ BOOL m_bChecked;
+ SyncType m_type;
+#endif
+public:
+ DEBUG_NOINLINE AppDomainFromIDHolder(ADID adId, BOOL bUnsafePoint, SyncType synctype=SyncType_GC);
+ DEBUG_NOINLINE AppDomainFromIDHolder(SyncType synctype=SyncType_GC);
+ DEBUG_NOINLINE ~AppDomainFromIDHolder();
+
+ void* GetAddress() { return m_pDomain; } // Used to get an identfier for ETW
+ void Assign(ADID adId, BOOL bUnsafePoint);
+ void ThrowIfUnloaded();
+ void Release();
+ BOOL IsUnloaded()
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifdef _DEBUG
+ m_bChecked=TRUE;
+ if (m_pDomain==NULL)
+ {
+ // no need to enforce anything
+ Release();
+ }
+#endif
+ return m_pDomain==NULL;
+ };
+ AppDomain* operator->();
+}; // class AppDomainFromIDHolder
+
+
+
+typedef VPTR(class SystemDomain) PTR_SystemDomain;
+
+class SystemDomain : public BaseDomain
+{
+ friend class AppDomainNative;
+ friend class AppDomainIterator;
+ friend class UnsafeAppDomainIterator;
+ friend class ClrDataAccess;
+ friend class AppDomainFromIDHolder;
+ friend Frame *Thread::IsRunningIn(AppDomain* pDomain, int *count);
+
+ VPTR_VTABLE_CLASS(SystemDomain, BaseDomain)
+ VPTR_UNIQUE(VPTR_UNIQUE_SystemDomain)
+ static AppDomain *GetAppDomainAtId(ADID indx);
+
+public:
+ static PTR_LoaderAllocator GetGlobalLoaderAllocator();
+ virtual PTR_LoaderAllocator GetLoaderAllocator() { WRAPPER_NO_CONTRACT; return GetGlobalLoaderAllocator(); }
+ static AppDomain* GetAppDomainFromId(ADID indx,DWORD ADValidityKind)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ AppDomain* pRetVal;
+ if (indx.m_dwId==DefaultADID)
+ pRetVal= SystemDomain::System()->DefaultDomain();
+ else
+ pRetVal= GetAppDomainAtId(indx);
+#ifdef _DEBUG
+ // Only call CheckADValidity in DEBUG builds for non-NULL return values
+ if (pRetVal != NULL)
+ CheckADValidity(pRetVal, ADValidityKind);
+#endif
+ return pRetVal;
+ }
+ //****************************************************************************************
+ //
+ // To be run during the initial start up of the EE. This must be
+ // performed prior to any class operations.
+ static void Attach();
+
+ //****************************************************************************************
+ //
+ // To be run during shutdown. This must be done after all operations
+ // that require the use of system classes (i.e., exceptions).
+ // DetachBegin stops all domains, while DetachEnd deallocates domain resources.
+ static void DetachBegin();
+
+ //****************************************************************************************
+ //
+ // To be run during shutdown. This must be done after all operations
+ // that require the use of system classes (i.e., exceptions).
+ // DetachBegin stops release resources held by systemdomain and the default domain.
+ static void DetachEnd();
+
+ //****************************************************************************************
+ //
+ // Initializes and shutdowns the single instance of the SystemDomain
+ // in the EE
+#ifndef DACCESS_COMPILE
+ void *operator new(size_t size, void *pInPlace);
+ void operator delete(void *pMem);
+#endif
+ void Init();
+ void Stop();
+ void Terminate();
+ static void LazyInitGlobalStringLiteralMap();
+
+ //****************************************************************************************
+ //
+ // Load the base system classes, these classes are required before
+ // any other classes are loaded
+ void LoadBaseSystemClasses();
+
+ AppDomain* DefaultDomain()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return m_pDefaultDomain;
+ }
+
+ // Notification when an assembly is loaded into the system domain
+ void OnAssemblyLoad(Assembly *assem);
+
+ //****************************************************************************************
+ //
+ // Global Static to get the one and only system domain
+ static SystemDomain * System()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return m_pSystemDomain;
+ }
+
+ static PEAssembly* SystemFile()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(m_pSystemDomain);
+ return System()->m_pSystemFile;
+ }
+
+ static Assembly* SystemAssembly()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return System()->m_pSystemAssembly;
+ }
+
+ static Module* SystemModule()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return SystemAssembly()->GetManifestModule();
+ }
+
+ static BOOL IsSystemLoaded()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return System()->m_pSystemAssembly != NULL;
+ }
+
+#ifndef DACCESS_COMPILE
+ static GlobalStringLiteralMap *GetGlobalStringLiteralMap()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (m_pGlobalStringLiteralMap == NULL)
+ {
+ SystemDomain::LazyInitGlobalStringLiteralMap();
+ }
+ _ASSERTE(m_pGlobalStringLiteralMap);
+ return m_pGlobalStringLiteralMap;
+ }
+ static GlobalStringLiteralMap *GetGlobalStringLiteralMapNoCreate()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(m_pGlobalStringLiteralMap);
+ return m_pGlobalStringLiteralMap;
+ }
+#endif // DACCESS_COMPILE
+
+#ifndef FEATURE_CORECLR
+ static void ExecuteMainMethod(HMODULE hMod, __in_opt LPWSTR path = NULL);
+#endif
+ static void ActivateApplication(int *pReturnValue);
+
+ static void InitializeDefaultDomain(
+ BOOL allowRedirects
+#ifdef FEATURE_HOSTED_BINDER
+ , ICLRPrivBinder * pBinder = NULL
+#endif
+ );
+ static void SetupDefaultDomain();
+ static HRESULT SetupDefaultDomainNoThrow();
+
+#if defined(FEATURE_COMINTEROP_APARTMENT_SUPPORT) && !defined(CROSSGEN_COMPILE)
+ static Thread::ApartmentState GetEntryPointThreadAptState(IMDInternalImport* pScope, mdMethodDef mdMethod);
+ static void SetThreadAptState(IMDInternalImport* pScope, Thread::ApartmentState state);
+#endif
+ static BOOL SetGlobalSharePolicyUsingAttribute(IMDInternalImport* pScope, mdMethodDef mdMethod);
+
+#ifdef FEATURE_MIXEDMODE
+ static HRESULT RunDllMain(HINSTANCE hInst, DWORD dwReason, LPVOID lpReserved);
+#endif // FEATURE_MIXEDMODE
+
+ //****************************************************************************************
+ //
+ // Use an already exising & inited Application Domain (e.g. a subclass).
+ static void LoadDomain(AppDomain *pDomain);
+
+#ifndef DACCESS_COMPILE
+ static void MakeUnloadable(AppDomain* pApp)
+ {
+ WRAPPER_NO_CONTRACT;
+ System()->AddDomain(pApp);
+ pApp->SetCanUnload();
+ }
+#endif // DACCESS_COMPILE
+
+ //****************************************************************************************
+ // Methods used to get the callers module and hence assembly and app domain.
+ __declspec(deprecated("This method is deprecated, use the version that takes a StackCrawlMark instead"))
+ static Module* GetCallersModule(int skip);
+ static MethodDesc* GetCallersMethod(StackCrawlMark* stackMark, AppDomain **ppAppDomain = NULL);
+ static MethodTable* GetCallersType(StackCrawlMark* stackMark, AppDomain **ppAppDomain = NULL);
+ static Module* GetCallersModule(StackCrawlMark* stackMark, AppDomain **ppAppDomain = NULL);
+ static Assembly* GetCallersAssembly(StackCrawlMark* stackMark, AppDomain **ppAppDomain = NULL);
+
+ static bool IsReflectionInvocationMethod(MethodDesc* pMeth);
+
+#ifndef DACCESS_COMPILE
+ //****************************************************************************************
+ // Returns the domain associated with the current context. (this can only be a child domain)
+ static inline AppDomain * GetCurrentDomain()
+ {
+ WRAPPER_NO_CONTRACT;
+ return ::GetAppDomain();
+ }
+#endif //!DACCESS_COMPILE
+
+#ifdef DEBUGGING_SUPPORTED
+ //****************************************************************************************
+ // Debugger/Publisher helper function to indicate creation of new app domain to debugger
+ // and publishing it in the IPC block
+ static void PublishAppDomainAndInformDebugger (AppDomain *pDomain);
+#endif // DEBUGGING_SUPPORTED
+
+ //****************************************************************************************
+ // Helper function to remove a domain from the system
+ BOOL RemoveDomain(AppDomain* pDomain); // Does not decrement the reference
+
+#ifdef PROFILING_SUPPORTED
+ //****************************************************************************************
+ // Tell profiler about system created domains which are created before the profiler is
+ // actually activated.
+ static void NotifyProfilerStartup();
+
+ //****************************************************************************************
+ // Tell profiler at shutdown that system created domains are going away. They are not
+ // torn down using the normal sequence.
+ static HRESULT NotifyProfilerShutdown();
+#endif // PROFILING_SUPPORTED
+
+ IApplicationSecurityDescriptor* GetSecurityDescriptor()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return NULL;
+ }
+
+ //****************************************************************************************
+ // return the dev path
+#ifdef FEATURE_FUSION
+ void GetDevpathW(__out_ecount_opt(1) LPWSTR* pPath, DWORD* pSize);
+#endif
+
+#ifndef DACCESS_COMPILE
+ void IncrementNumAppDomains ()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ s_dNumAppDomains++;
+ }
+
+ void DecrementNumAppDomains ()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ s_dNumAppDomains--;
+ }
+
+ ULONG GetNumAppDomains ()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return s_dNumAppDomains;
+ }
+#endif // DACCESS_COMPILE
+
+ //
+ // AppDomains currently have both an index and an ID. The
+ // index is "densely" assigned; indices are reused as domains
+ // are unloaded. The Id's on the other hand, are not reclaimed
+ // so may be sparse.
+ //
+ // Another important difference - it's OK to call GetAppDomainAtId for
+ // an unloaded domain (it will return NULL), while GetAppDomainAtIndex
+ // will assert if the domain is unloaded.
+ //<TODO>
+ // @todo:
+ // I'm not really happy with this situation, but
+ // (a) we need an ID for a domain which will last the process lifetime for the
+ // remoting code.
+ // (b) we need a dense ID, for the handle table index.
+ // So for now, I'm leaving both, but hopefully in the future we can come up
+ // with something better.
+ //</TODO>
+
+ static ADIndex GetNewAppDomainIndex(AppDomain * pAppDomain);
+ static void ReleaseAppDomainIndex(ADIndex indx);
+ static PTR_AppDomain GetAppDomainAtIndex(ADIndex indx);
+ static PTR_AppDomain TestGetAppDomainAtIndex(ADIndex indx);
+ static DWORD GetCurrentAppDomainMaxIndex()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ ArrayListStatic* list = (ArrayListStatic *)&m_appDomainIndexList;
+ PREFIX_ASSUME(list!=NULL);
+ return list->GetCount();
+ }
+
+ static ADID GetNewAppDomainId(AppDomain *pAppDomain);
+ static void ReleaseAppDomainId(ADID indx);
+
+#ifndef DACCESS_COMPILE
+ static ADID GetCurrentAppDomainMaxId() { ADID id; id.m_dwId=m_appDomainIdList.GetCount(); return id;}
+#endif // DACCESS_COMPILE
+
+
+#ifndef DACCESS_COMPILE
+ DWORD RequireAppDomainCleanup()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pDelayedUnloadList != 0 || m_pDelayedUnloadListOfLoaderAllocators != 0;
+ }
+
+ void AddToDelayedUnloadList(AppDomain* pDomain, BOOL bAsync)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ m_UnloadIsAsync = bAsync;
+
+ CrstHolder lh(&m_DelayedUnloadCrst);
+ pDomain->m_pNextInDelayedUnloadList=m_pDelayedUnloadList;
+ m_pDelayedUnloadList=pDomain;
+ if (m_UnloadIsAsync)
+ {
+ pDomain->AddRef();
+ int iGCRefPoint=GCHeap::GetGCHeap()->CollectionCount(GCHeap::GetGCHeap()->GetMaxGeneration());
+ if (GCHeap::GetGCHeap()->IsGCInProgress())
+ iGCRefPoint++;
+ pDomain->SetGCRefPoint(iGCRefPoint);
+ }
+ }
+
+ void AddToDelayedUnloadList(LoaderAllocator * pAllocator)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ CrstHolder lh(&m_DelayedUnloadCrst);
+ pAllocator->m_pLoaderAllocatorDestroyNext=m_pDelayedUnloadListOfLoaderAllocators;
+ m_pDelayedUnloadListOfLoaderAllocators=pAllocator;
+
+ int iGCRefPoint=GCHeap::GetGCHeap()->CollectionCount(GCHeap::GetGCHeap()->GetMaxGeneration());
+ if (GCHeap::GetGCHeap()->IsGCInProgress())
+ iGCRefPoint++;
+ pAllocator->SetGCRefPoint(iGCRefPoint);
+ }
+
+ void ClearCollectedDomains();
+ void ProcessClearingDomains();
+ void ProcessDelayedUnloadDomains();
+
+ static void SetUnloadInProgress(AppDomain *pDomain)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(m_pAppDomainBeingUnloaded == NULL);
+ m_pAppDomainBeingUnloaded = pDomain;
+ m_dwIndexOfAppDomainBeingUnloaded = pDomain->GetIndex();
+ }
+
+ static void SetUnloadDomainCleared()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // about to delete, so clear this pointer so nobody uses it
+ m_pAppDomainBeingUnloaded = NULL;
+ }
+ static void SetUnloadComplete()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // should have already cleared the AppDomain* prior to delete
+ // either we succesfully unloaded and cleared or we failed and restored the ID
+ _ASSERTE(m_pAppDomainBeingUnloaded == NULL && m_dwIndexOfAppDomainBeingUnloaded.m_dwIndex != 0
+ || m_pAppDomainBeingUnloaded && SystemDomain::GetAppDomainAtId(m_pAppDomainBeingUnloaded->GetId()) != NULL);
+ m_pAppDomainBeingUnloaded = NULL;
+ m_pAppDomainUnloadingThread = NULL;
+ }
+
+ static AppDomain *AppDomainBeingUnloaded()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pAppDomainBeingUnloaded;
+ }
+
+ static ADIndex IndexOfAppDomainBeingUnloaded()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwIndexOfAppDomainBeingUnloaded;
+ }
+
+ static void SetUnloadRequestingThread(Thread *pRequestingThread)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pAppDomainUnloadRequestingThread = pRequestingThread;
+ }
+
+ static Thread *GetUnloadRequestingThread()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pAppDomainUnloadRequestingThread;
+ }
+
+ static void SetUnloadingThread(Thread *pUnloadingThread)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pAppDomainUnloadingThread = pUnloadingThread;
+ }
+
+ static Thread *GetUnloadingThread()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pAppDomainUnloadingThread;
+ }
+
+ static void EnumAllStaticGCRefs(promote_func* fn, ScanContext* sc);
+
+#endif // DACCESS_COMPILE
+
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+ // The *AD* methods are what we got from tracing through EE roots.
+ // RecordTotalSurvivedBytes is the total promoted from a GC.
+ static void ResetADSurvivedBytes();
+ static ULONGLONG GetADSurvivedBytes();
+ static void RecordTotalSurvivedBytes(size_t totalSurvivedBytes);
+ static ULONGLONG GetTotalSurvivedBytes()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_totalSurvivedBytes;
+ }
+#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
+
+ //****************************************************************************************
+ // Routines to deal with the base library (currently mscorlib.dll)
+ LPCWSTR BaseLibrary()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return m_BaseLibrary;
+ }
+
+#ifndef DACCESS_COMPILE
+ BOOL IsBaseLibrary(SString &path)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // See if it is the installation path to mscorlib
+ if (path.EqualsCaseInsensitive(m_BaseLibrary, PEImage::GetFileSystemLocale()))
+ return TRUE;
+
+ // Or, it might be the GAC location of mscorlib
+ if (System()->SystemAssembly() != NULL
+ && path.EqualsCaseInsensitive(System()->SystemAssembly()->GetManifestFile()->GetPath(),
+ PEImage::GetFileSystemLocale()))
+ return TRUE;
+
+ return FALSE;
+ }
+
+ BOOL IsBaseLibrarySatellite(SString &path)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // See if it is the installation path to mscorlib.resources
+ SString s(SString::Ascii,g_psBaseLibrarySatelliteAssemblyName);
+ if (path.EqualsCaseInsensitive(s, PEImage::GetFileSystemLocale()))
+ return TRUE;
+
+ // workaround! Must implement some code to do this string comparison for
+ // mscorlib.resources in a culture-specific directory in the GAC.
+
+ /*
+ // Or, it might be the GAC location of mscorlib.resources
+ if (System()->SystemAssembly() != NULL
+ && path.EqualsCaseInsensitive(System()->SystemAssembly()->GetManifestFile()->GetPath(),
+ PEImage::GetFileSystemLocale()))
+ return TRUE;
+ */
+
+ return FALSE;
+ }
+#endif // DACCESS_COMPILE
+
+ // Return the system directory
+ LPCWSTR SystemDirectory()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return m_SystemDirectory;
+ }
+
+private:
+
+ //****************************************************************************************
+ // Helper function to create the single COM domain
+ void CreateDefaultDomain();
+
+ //****************************************************************************************
+ // Helper function to add a domain to the global list
+ void AddDomain(AppDomain* pDomain);
+
+ void CreatePreallocatedExceptions();
+
+ void PreallocateSpecialObjects();
+
+ //****************************************************************************************
+ //
+ static StackWalkAction CallersMethodCallback(CrawlFrame* pCrawlFrame, VOID* pClientData);
+ static StackWalkAction CallersMethodCallbackWithStackMark(CrawlFrame* pCrawlFrame, VOID* pClientData);
+
+#ifndef DACCESS_COMPILE
+ // This class is not to be created through normal allocation.
+ SystemDomain()
+ {
+ STANDARD_VM_CONTRACT;
+
+ m_pDefaultDomain = NULL;
+ m_pDelayedUnloadList=NULL;
+ m_pDelayedUnloadListOfLoaderAllocators=NULL;
+ m_UnloadIsAsync = FALSE;
+
+ m_GlobalAllocator.Init(this);
+ }
+#endif
+
+ PTR_PEAssembly m_pSystemFile; // Single assembly (here for quicker reference);
+ PTR_Assembly m_pSystemAssembly; // Single assembly (here for quicker reference);
+ PTR_AppDomain m_pDefaultDomain; // Default domain for COM+ classes exposed through IClassFactory.
+
+ GlobalLoaderAllocator m_GlobalAllocator;
+
+
+ InlineSString<100> m_BaseLibrary;
+
+#ifdef FEATURE_VERSIONING
+
+ InlineSString<100> m_SystemDirectory;
+
+#else
+
+ LPCWSTR m_SystemDirectory;
+
+#endif
+
+ LPWSTR m_pwDevpath;
+ DWORD m_dwDevpath;
+ BOOL m_fDevpath; // have we searched the environment
+
+ // <TODO>@TODO: CTS, we can keep the com modules in a single assembly or in different assemblies.
+ // We are currently using different assemblies but this is potentitially to slow...</TODO>
+
+ // Global domain that every one uses
+ SPTR_DECL(SystemDomain, m_pSystemDomain);
+
+ AppDomain* m_pDelayedUnloadList;
+ BOOL m_UnloadIsAsync;
+
+ LoaderAllocator * m_pDelayedUnloadListOfLoaderAllocators;
+
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+ // This is what gets promoted for the whole GC heap.
+ static size_t m_totalSurvivedBytes;
+#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
+
+ SVAL_DECL(ArrayListStatic, m_appDomainIndexList);
+#ifndef DACCESS_COMPILE
+ static CrstStatic m_DelayedUnloadCrst;
+ static CrstStatic m_SystemDomainCrst;
+
+
+ static ArrayListStatic m_appDomainIdList;
+
+ // only one ad can be unloaded at a time
+ static AppDomain* m_pAppDomainBeingUnloaded;
+ // need this so can determine AD being unloaded after it has been deleted
+ static ADIndex m_dwIndexOfAppDomainBeingUnloaded;
+
+ // if had to spin off a separate thread to do the unload, this is the original thread.
+ // allows us to delay aborting it until it's the last one so that it can receive
+ // notification of an unload failure
+ static Thread *m_pAppDomainUnloadRequestingThread;
+
+ // this is the thread doing the actual unload. He's allowed to enter the domain
+ // even if have started unloading.
+ static Thread *m_pAppDomainUnloadingThread;
+
+ static GlobalStringLiteralMap *m_pGlobalStringLiteralMap;
+
+ static ULONG s_dNumAppDomains; // Maintain a count of children app domains.
+
+ static DWORD m_dwLowestFreeIndex;
+#endif // DACCESS_COMPILE
+
+protected:
+
+ // These flags let the correct native image of mscorlib to be loaded.
+ // This is important for hardbinding to it
+
+ SVAL_DECL(BOOL, s_fForceDebug);
+ SVAL_DECL(BOOL, s_fForceProfiling);
+ SVAL_DECL(BOOL, s_fForceInstrument);
+
+public:
+ static void SetCompilationOverrides(BOOL fForceDebug,
+ BOOL fForceProfiling,
+ BOOL fForceInstrument);
+
+ static void GetCompilationOverrides(BOOL * fForceDebug,
+ BOOL * fForceProfiling,
+ BOOL * fForceInstrument);
+public:
+ //****************************************************************************************
+ //
+
+#ifndef DACCESS_COMPILE
+#ifdef _DEBUG
+inline static BOOL IsUnderDomainLock() { LIMITED_METHOD_CONTRACT; return m_SystemDomainCrst.OwnedByCurrentThread();};
+#endif
+
+ // This lock controls adding and removing domains from the system domain
+ class LockHolder : public CrstHolder
+ {
+ public:
+ LockHolder()
+ : CrstHolder(&m_SystemDomainCrst)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+ };
+#endif // DACCESS_COMPILE
+
+public:
+ DWORD GetTotalNumSizedRefHandles();
+
+#ifdef DACCESS_COMPILE
+public:
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags,
+ bool enumThis);
+#endif
+
+}; // class SystemDomain
+
+
+//
+// an UnsafeAppDomainIterator is used to iterate over all existing domains
+//
+// The iteration is guaranteed to include all domains that exist at the
+// start & end of the iteration. This iterator is considered unsafe because it does not
+// reference count the various appdomains, and can only be used when the runtime is stopped,
+// or external synchronization is used. (and therefore no other thread may cause the appdomain list to change.)
+//
+class UnsafeAppDomainIterator
+{
+ friend class SystemDomain;
+public:
+ UnsafeAppDomainIterator(BOOL bOnlyActive)
+ {
+ m_bOnlyActive = bOnlyActive;
+ }
+
+ void Init()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SystemDomain* sysDomain = SystemDomain::System();
+ if (sysDomain)
+ {
+ ArrayListStatic* list = &sysDomain->m_appDomainIndexList;
+ PREFIX_ASSUME(list != NULL);
+ m_i = list->Iterate();
+ }
+ else
+ {
+ m_i.SetEmpty();
+ }
+
+ m_pCurrent = NULL;
+ }
+
+ BOOL Next()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ while (m_i.Next())
+ {
+ m_pCurrent = dac_cast<PTR_AppDomain>(m_i.GetElement());
+ if (m_pCurrent != NULL &&
+ (m_bOnlyActive ?
+ m_pCurrent->IsActive() : m_pCurrent->IsValid()))
+ {
+ return TRUE;
+ }
+ }
+
+ m_pCurrent = NULL;
+ return FALSE;
+ }
+
+ AppDomain * GetDomain()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return m_pCurrent;
+ }
+
+ private:
+
+ ArrayList::Iterator m_i;
+ AppDomain * m_pCurrent;
+ BOOL m_bOnlyActive;
+}; // class UnsafeAppDomainIterator
+
+//
+// an AppDomainIterator is used to iterate over all existing domains.
+//
+// The iteration is guaranteed to include all domains that exist at the
+// start & end of the iteration. Any domains added or deleted during
+// iteration may or may not be included. The iterator also guarantees
+// that the current iterated appdomain (GetDomain()) will not be deleted.
+//
+
+class AppDomainIterator : public UnsafeAppDomainIterator
+{
+ friend class SystemDomain;
+
+ public:
+ AppDomainIterator(BOOL bOnlyActive) : UnsafeAppDomainIterator(bOnlyActive)
+ {
+ WRAPPER_NO_CONTRACT;
+ Init();
+ }
+
+ ~AppDomainIterator()
+ {
+ WRAPPER_NO_CONTRACT;
+
+#ifndef DACCESS_COMPILE
+ if (GetDomain() != NULL)
+ {
+#ifdef _DEBUG
+ GetDomain()->IteratorRelease();
+#endif
+ GetDomain()->Release();
+ }
+#endif
+ }
+
+ BOOL Next()
+ {
+ WRAPPER_NO_CONTRACT;
+
+#ifndef DACCESS_COMPILE
+ if (GetDomain() != NULL)
+ {
+#ifdef _DEBUG
+ GetDomain()->IteratorRelease();
+#endif
+ GetDomain()->Release();
+ }
+
+ SystemDomain::LockHolder lh;
+#endif
+
+ if (UnsafeAppDomainIterator::Next())
+ {
+#ifndef DACCESS_COMPILE
+ GetDomain()->AddRef();
+#ifdef _DEBUG
+ GetDomain()->IteratorAcquire();
+#endif
+#endif
+ return TRUE;
+ }
+
+ return FALSE;
+ }
+}; // class AppDomainIterator
+
+typedef VPTR(class SharedDomain) PTR_SharedDomain;
+
+class SharedDomain : public BaseDomain
+{
+
+ VPTR_VTABLE_CLASS(SharedDomain, BaseDomain)
+
+public:
+
+ static void Attach();
+ static void Detach();
+
+ virtual BOOL IsSharedDomain() { LIMITED_METHOD_DAC_CONTRACT; return TRUE; }
+ virtual PTR_LoaderAllocator GetLoaderAllocator() { WRAPPER_NO_CONTRACT; return SystemDomain::GetGlobalLoaderAllocator(); }
+
+ virtual PTR_AppDomain AsAppDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ _ASSERTE(!"Not an AppDomain");
+ return NULL;
+ }
+
+ static SharedDomain * GetDomain();
+
+ void Init();
+ void Terminate();
+
+ // This will also set the tenured bit if and only if the add was successful,
+ // and will make sure that the bit appears atomically set to all readers that
+ // might be accessing the hash on another thread.
+ MethodTable * FindIndexClass(SIZE_T index);
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ void AddShareableAssembly(Assembly * pAssembly);
+
+ class SharedAssemblyIterator
+ {
+ PtrHashMap::PtrIterator i;
+ Assembly * m_pAssembly;
+
+ public:
+ SharedAssemblyIterator() :
+ i(GetDomain() ? GetDomain()->m_assemblyMap.firstBucket() : NULL)
+ { LIMITED_METHOD_DAC_CONTRACT; }
+
+ BOOL Next()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ if (i.end())
+ return FALSE;
+
+ m_pAssembly = PTR_Assembly(dac_cast<TADDR>(i.GetValue()));
+ ++i;
+ return TRUE;
+ }
+
+ Assembly * GetAssembly()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return m_pAssembly;
+ }
+
+ private:
+ friend class SharedDomain;
+ };
+
+ Assembly * FindShareableAssembly(SharedAssemblyLocator * pLocator);
+ SIZE_T GetShareableAssemblyCount();
+#endif //FEATURE_LOADER_OPTIMIZATION
+
+private:
+ friend class SharedAssemblyIterator;
+ friend class SharedFileLockHolder;
+ friend class ClrDataAccess;
+
+#ifndef DACCESS_COMPILE
+ void *operator new(size_t size, void *pInPlace);
+ void operator delete(void *pMem);
+#endif
+
+ SPTR_DECL(SharedDomain, m_pSharedDomain);
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ PEFileListLock m_FileCreateLock;
+ SIZE_T m_nextClassIndex;
+ PtrHashMap m_assemblyMap;
+#endif
+
+public:
+#ifdef DACCESS_COMPILE
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags,
+ bool enumThis);
+#endif
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ // Hash map comparison function`
+ static BOOL CompareSharedAssembly(UPTR u1, UPTR u2);
+#endif
+};
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+class SharedFileLockHolderBase : protected HolderBase<PEFile *>
+{
+ protected:
+ PEFileListLock *m_pLock;
+ ListLockEntry *m_pLockElement;
+
+ SharedFileLockHolderBase(PEFile *value)
+ : HolderBase<PEFile *>(value)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_pLock = NULL;
+ m_pLockElement = NULL;
+ }
+
+#ifndef DACCESS_COMPILE
+ void DoAcquire()
+ {
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FAULT;
+
+ PEFileListLockHolder lockHolder(m_pLock);
+
+ m_pLockElement = m_pLock->FindFileLock(m_value);
+ if (m_pLockElement == NULL)
+ {
+ m_pLockElement = new ListLockEntry(m_pLock, m_value);
+ m_pLock->AddElement(m_pLockElement);
+ }
+ else
+ m_pLockElement->AddRef();
+
+ lockHolder.Release();
+
+ m_pLockElement->Enter();
+ }
+
+ void DoRelease()
+ {
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ m_pLockElement->Leave();
+ m_pLockElement->Release();
+ m_pLockElement = NULL;
+ }
+#endif // DACCESS_COMPILE
+};
+
+class SharedFileLockHolder : public BaseHolder<PEFile *, SharedFileLockHolderBase>
+{
+ public:
+ DEBUG_NOINLINE SharedFileLockHolder(SharedDomain *pDomain, PEFile *pFile, BOOL Take = TRUE)
+ : BaseHolder<PEFile *, SharedFileLockHolderBase>(pFile, FALSE)
+ {
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FAULT;
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+
+ m_pLock = &pDomain->m_FileCreateLock;
+ if (Take)
+ Acquire();
+ }
+};
+#endif // FEATURE_LOADER_OPTIMIZATION
+
+inline BOOL BaseDomain::IsDefaultDomain()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (SystemDomain::System()->DefaultDomain() == this);
+}
+
+#include "comreflectioncache.inl"
+
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+// holds an extra reference so needs special Extract() and should not have SuppressRelease()
+// Holders/Wrappers have nonvirtual methods so cannot use them as the base class
+template <class AppDomainType>
+class AppDomainCreationHolder
+{
+private:
+ // disable the copy ctor
+ AppDomainCreationHolder(const AppDomainCreationHolder<AppDomainType>&) {}
+
+protected:
+ AppDomainType* m_pDomain;
+ BOOL m_bAcquired;
+ void ReleaseAppDomainDuringCreation()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ WRAPPER(GC_TRIGGERS);
+ PRECONDITION(m_bAcquired);
+ PRECONDITION(CheckPointer(m_pDomain));
+ }
+ CONTRACTL_END;
+
+ if (m_pDomain->NotReadyForManagedCode())
+ {
+ m_pDomain->Release();
+ }
+ else
+ {
+ STRESS_LOG2 (LF_APPDOMAIN, LL_INFO100, "Unload domain during creation [%d] %p\n", m_pDomain->GetId().m_dwId, m_pDomain);
+ SystemDomain::MakeUnloadable(m_pDomain);
+#ifdef _DEBUG
+ DWORD hostTestADUnload = g_pConfig->GetHostTestADUnload();
+ m_pDomain->EnableADUnloadWorker(hostTestADUnload != 2?EEPolicy::ADU_Safe:EEPolicy::ADU_Rude);
+#else
+ m_pDomain->EnableADUnloadWorker(EEPolicy::ADU_Safe);
+#endif
+ }
+ };
+
+public:
+ AppDomainCreationHolder()
+ {
+ m_pDomain=NULL;
+ m_bAcquired=FALSE;
+ };
+ ~AppDomainCreationHolder()
+ {
+ if (m_bAcquired)
+ {
+ Release();
+ }
+ };
+ void Assign(AppDomainType* pDomain)
+ {
+ if(m_bAcquired)
+ Release();
+ m_pDomain=pDomain;
+ if(m_pDomain)
+ {
+ AppDomain::RefTakerAcquire(m_pDomain);
+#ifdef _DEBUG
+ m_pDomain->IncCreationCount();
+#endif // _DEBUG
+ }
+ m_bAcquired=TRUE;
+ };
+
+ void Release()
+ {
+ _ASSERTE(m_bAcquired);
+ if(m_pDomain)
+ {
+#ifdef _DEBUG
+ m_pDomain->DecCreationCount();
+#endif // _DEBUG
+ if(!m_pDomain->IsDefaultDomain())
+ ReleaseAppDomainDuringCreation();
+ AppDomain::RefTakerRelease(m_pDomain);
+ };
+ m_bAcquired=FALSE;
+ };
+
+ AppDomainType* Extract()
+ {
+ _ASSERTE(m_bAcquired);
+ if(m_pDomain)
+ {
+#ifdef _DEBUG
+ m_pDomain->DecCreationCount();
+#endif // _DEBUG
+ AppDomain::RefTakerRelease(m_pDomain);
+ }
+ m_bAcquired=FALSE;
+ return m_pDomain;
+ };
+
+ AppDomainType* operator ->()
+ {
+ _ASSERTE(m_bAcquired);
+ return m_pDomain;
+ }
+
+ operator AppDomainType*()
+ {
+ _ASSERTE(m_bAcquired);
+ return m_pDomain;
+ }
+
+ void DoneCreating()
+ {
+ Extract();
+ }
+};
+#endif // !DACCESS_COMPILE && !CROSSGEN_COMPILE
+
+
+#endif // !CLR_STANDALONE_BINDER
+
+#endif
diff --git a/src/vm/appdomain.inl b/src/vm/appdomain.inl
new file mode 100644
index 0000000000..5287450265
--- /dev/null
+++ b/src/vm/appdomain.inl
@@ -0,0 +1,342 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+/*============================================================
+**
+** Header: AppDomain.i
+**
+
+**
+** Purpose: Implements AppDomain (loader domain) architecture
+** inline functions
+**
+**
+===========================================================*/
+#ifndef _APPDOMAIN_I
+#define _APPDOMAIN_I
+
+#ifndef BINDER
+
+#ifndef DACCESS_COMPILE
+
+#include "appdomain.hpp"
+
+#ifdef FEATURE_CORECLR
+inline void BaseDomain::SetAppDomainCompatMode(AppDomainCompatMode compatMode)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_CompatMode = compatMode;
+}
+
+inline BaseDomain::AppDomainCompatMode BaseDomain::GetAppDomainCompatMode()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_CompatMode;
+}
+#endif // FEATURE_CORECLR
+
+inline void AppDomain::SetUnloadInProgress(AppDomain *pThis)
+{
+ WRAPPER_NO_CONTRACT;
+
+ SystemDomain::System()->SetUnloadInProgress(pThis);
+}
+
+inline void AppDomain::SetUnloadComplete(AppDomain *pThis)
+{
+ GCX_COOP();
+
+ SystemDomain::System()->SetUnloadComplete();
+}
+
+inline void AppDomain::EnterContext(Thread* pThread, Context* pCtx,ContextTransitionFrame *pFrame)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pThread));
+ PRECONDITION(CheckPointer(pCtx));
+ PRECONDITION(CheckPointer(pFrame));
+ PRECONDITION(pCtx->GetDomain()==this);
+ }
+ CONTRACTL_END;
+ pThread->EnterContextRestricted(pCtx,pFrame);
+};
+
+
+inline AppDomainFromIDHolder::~AppDomainFromIDHolder()
+{
+ WRAPPER_NO_CONTRACT;
+#ifdef _DEBUG
+ if(m_bAcquired)
+ Release();
+#endif
+}
+
+inline void AppDomainFromIDHolder::Release()
+{
+ //do not use real contract here!
+ WRAPPER_NO_CONTRACT;
+#ifdef _DEBUG
+ if(m_bAcquired)
+ {
+ if (m_type==SyncType_GC)
+#ifdef ENABLE_CONTRACTS_IMPL
+ {
+ if (GetThread())
+ {
+ STRESS_LOG1(LF_APPDOMAIN, LL_INFO10000, "AppDomainFromIDHolder::Assign is allowing GC - %08x",this);
+ GetThread()->EndForbidGC();
+ }
+ else
+ {
+ if (!IsGCThread())
+ {
+ _ASSERTE(!"Should not be called from a non GC thread");
+ }
+ }
+ }
+#else
+ m_pDomain=NULL;
+#endif
+ else
+ if (m_type==SyncType_ADLock)
+ SystemDomain::m_SystemDomainCrst.SetCantLeave(FALSE);
+ else
+ {
+ _ASSERTE(!"Unknown type");
+ }
+ m_pDomain=NULL;
+ m_bAcquired=FALSE;
+ }
+#endif
+}
+
+inline void AppDomainFromIDHolder::Assign(ADID id, BOOL bUnsafePoint)
+{
+ //do not use real contract here!
+ WRAPPER_NO_CONTRACT;
+ TESTHOOKCALL(AppDomainCanBeUnloaded(id.m_dwId, bUnsafePoint));
+#ifdef _DEBUG
+ m_bChecked=FALSE;
+ if (m_type==SyncType_GC)
+ {
+#ifdef ENABLE_CONTRACTS_IMPL
+ if (GetThread())
+ {
+ _ASSERTE(GetThread()->PreemptiveGCDisabled());
+ STRESS_LOG1(LF_APPDOMAIN, LL_INFO10000, "AppDomainFromIDHolder::Assign is forbidding GC - %08x",this);
+ GetThread()->BeginForbidGC(__FILE__, __LINE__);
+ }
+ else
+ {
+ if (!IsGCThread())
+ {
+ _ASSERTE(!"Should not be called from a non GC thread");
+ }
+ }
+#endif
+ }
+ else
+ if (m_type==SyncType_ADLock)
+ {
+ _ASSERTE(SystemDomain::m_SystemDomainCrst.OwnedByCurrentThread());
+ SystemDomain::m_SystemDomainCrst.SetCantLeave(TRUE);
+ }
+ else
+ {
+ _ASSERT(!"NI");
+ }
+
+ m_bAcquired=TRUE;
+ #endif
+ m_pDomain=SystemDomain::GetAppDomainAtId(id);
+
+}
+
+
+
+inline void AppDomainFromIDHolder::ThrowIfUnloaded()
+{
+ STATIC_CONTRACT_THROWS;
+ if (IsUnloaded())
+ {
+ COMPlusThrow(kAppDomainUnloadedException);
+ }
+#ifdef _DEBUG
+ m_bChecked=TRUE;
+#endif
+}
+
+inline AppDomain* AppDomainFromIDHolder::operator ->()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_bChecked && m_bAcquired);
+ return m_pDomain;
+}
+
+inline DomainAssembly* AppDomain::FindDomainAssembly(Assembly* assembly)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(assembly));
+ }
+ CONTRACTL_END;
+ return assembly->FindDomainAssembly(this);
+};
+
+inline BOOL AppDomain::IsRunningIn(Thread* pThread)
+{
+ WRAPPER_NO_CONTRACT;
+ if (IsDefaultDomain())
+ return TRUE;
+ return pThread->IsRunningIn(this, NULL)!=NULL;
+}
+
+
+
+inline void AppDomain::AddMemoryPressure()
+{
+ STANDARD_VM_CONTRACT;
+ m_MemoryPressure=EstimateSize();
+ GCInterface::AddMemoryPressure(m_MemoryPressure);
+}
+
+inline void AppDomain::RemoveMemoryPressure()
+{
+ WRAPPER_NO_CONTRACT;
+
+ GCInterface::RemoveMemoryPressure(m_MemoryPressure);
+}
+
+#endif // DACCESS_COMPILE
+
+inline void AppDomain::SetAppDomainManagerInfo(LPCWSTR szAssemblyName, LPCWSTR szTypeName, EInitializeNewDomainFlags dwInitializeDomainFlags)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ m_AppDomainManagerAssembly=szAssemblyName;
+ m_AppDomainManagerType=szTypeName;
+ m_dwAppDomainManagerInitializeDomainFlags = dwInitializeDomainFlags;
+}
+
+inline BOOL AppDomain::HasAppDomainManagerInfo()
+{
+ WRAPPER_NO_CONTRACT;
+ return !m_AppDomainManagerAssembly.IsEmpty() && !m_AppDomainManagerType.IsEmpty();
+}
+
+inline LPCWSTR AppDomain::GetAppDomainManagerAsm()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_AppDomainManagerAssembly;
+}
+
+
+inline LPCWSTR AppDomain::GetAppDomainManagerType()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_AppDomainManagerType;
+}
+
+#ifndef FEATURE_CORECLR
+inline BOOL AppDomain::AppDomainManagerSetFromConfig()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_fAppDomainManagerSetInConfig;
+}
+#endif // !FEATURE_CORECLR
+
+inline EInitializeNewDomainFlags AppDomain::GetAppDomainManagerInitializeNewDomainFlags()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_dwAppDomainManagerInitializeDomainFlags;
+}
+
+#ifdef FEATURE_CORECLR
+inline AppDomain::PathIterator AppDomain::IterateNativeDllSearchDirectories()
+{
+ WRAPPER_NO_CONTRACT;
+ PathIterator i;
+ i.m_i = m_NativeDllSearchDirectories.Iterate();
+ return i;
+}
+
+inline BOOL AppDomain::HasNativeDllSearchDirectories()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_NativeDllSearchDirectories.GetCount() !=0;
+}
+
+#endif // FEATURE_CORECLR
+
+inline BOOL AppDomain::CanReversePInvokeEnter()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_ReversePInvokeCanEnter;
+}
+
+inline void AppDomain::SetReversePInvokeCannotEnter()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_ReversePInvokeCanEnter=FALSE;
+}
+
+inline bool AppDomain::MustForceTrivialWaitOperations()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_ForceTrivialWaitOperations;
+}
+
+inline void AppDomain::SetForceTrivialWaitOperations()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_ForceTrivialWaitOperations = true;
+}
+
+inline PTR_LoaderHeap AppDomain::GetHighFrequencyHeap()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetLoaderAllocator()->GetHighFrequencyHeap();
+}
+
+inline PTR_LoaderHeap AppDomain::GetLowFrequencyHeap()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetLoaderAllocator()->GetLowFrequencyHeap();
+}
+
+inline PTR_LoaderHeap AppDomain::GetStubHeap()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetLoaderAllocator()->GetStubHeap();
+}
+
+inline PTR_LoaderAllocator AppDomain::GetLoaderAllocator()
+{
+ WRAPPER_NO_CONTRACT;
+ return PTR_LoaderAllocator(PTR_HOST_MEMBER_TADDR(AppDomain,this,m_LoaderAllocator));
+}
+
+#endif // !BINDER
+
+/* static */
+inline DWORD DomainLocalModule::DynamicEntry::GetOffsetOfDataBlob()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(DWORD(offsetof(NormalDynamicEntry, m_pDataBlob)) == offsetof(NormalDynamicEntry, m_pDataBlob));
+ return (DWORD)offsetof(NormalDynamicEntry, m_pDataBlob);
+}
+
+
+#endif // _APPDOMAIN_I
+
diff --git a/src/vm/appdomainconfigfactory.hpp b/src/vm/appdomainconfigfactory.hpp
new file mode 100644
index 0000000000..fc8cbb5f74
--- /dev/null
+++ b/src/vm/appdomainconfigfactory.hpp
@@ -0,0 +1,241 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#ifndef APPDOMAINCONFIGFACTORY_H
+#define APPDOMAINCONFIGFACTORY_H
+
+#include <xmlparser.h>
+#include <objbase.h>
+#include "unknwn.h"
+#include "../xmlparser/_reference.h"
+#include "../xmlparser/_unknown.h"
+
+#include "appdomain.hpp"
+
+#define ISWHITE(ch) ((ch) >= 0x09 && (ch) <= 0x0D || (ch) == 0x20)
+
+#define CONST_STRING_AND_LEN(str) str, NumItems(str)-1
+
+
+extern int EEXMLStringCompare(const WCHAR *pStr1,
+ DWORD cchStr1,
+ const WCHAR *pStr2,
+ DWORD cchStr2);
+
+
+enum APPDOMAINPARSESTATE
+{
+ APPDOMAINPARSESTATE_INITIALIZED,
+ APPDOMAINPARSESTATE_RUNTIME,
+ APPDOMAINPARSESTATE_PREFERCOMINSTEADOFREMOTING,
+ APPDOMAINPARSESTATE_ENABLED,
+ APPDOMAINPARSESTATE_LEGACYMODE
+};
+
+
+
+class AppDomainConfigFactory : public _unknown<IXMLNodeFactory, &IID_IXMLNodeFactory>
+{
+
+public:
+ AppDomainConfigFactory() : m_dwDepth(0), comorRemotingFlag(COMorRemoting_NotInitialized), m_appdomainParseState(APPDOMAINPARSESTATE_INITIALIZED)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ ~AppDomainConfigFactory()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ HRESULT STDMETHODCALLTYPE NotifyEvent(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ XML_NODEFACTORY_EVENT iEvt)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return S_OK;
+ }
+
+ HRESULT STDMETHODCALLTYPE BeginChildren(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ XML_NODE_INFO* __RPC_FAR pNodeInfo)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_dwDepth++;
+ return S_OK;
+ }
+
+ HRESULT STDMETHODCALLTYPE EndChildren(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ BOOL fEmptyNode,
+ /* [in] */ XML_NODE_INFO* __RPC_FAR pNodeInfo)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (!fEmptyNode)
+ m_dwDepth--;
+ return S_OK;
+ }
+
+ HRESULT STDMETHODCALLTYPE Error(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ HRESULT hrErrorCode,
+ /* [in] */ USHORT cNumRecs,
+ /* [in] */ XML_NODE_INFO* __RPC_FAR * __RPC_FAR apNodeInfo)
+ {
+ LIMITED_METHOD_CONTRACT;
+ /*
+ UNUSED(pSource);
+ UNUSED(hrErrorCode);
+ UNUSED(cNumRecs);
+ UNUSED(apNodeInfo);
+ */
+ return hrErrorCode;
+ }
+
+ HRESULT STDMETHODCALLTYPE CreateNode(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ PVOID pNodeParent,
+ /* [in] */ USHORT cNumRecs,
+ /* [in] */ XML_NODE_INFO* __RPC_FAR * __RPC_FAR apNodeInfo)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END;
+
+ if(m_dwDepth > 2)
+ {
+ return S_OK;
+ }
+
+ HRESULT hr = S_OK;
+ DWORD dwStringSize = 0;
+ WCHAR* pszString = NULL;
+ DWORD i;
+ BOOL fRuntimeKey = FALSE;
+ BOOL fVersion = FALSE;
+
+ for( i = 0; i < cNumRecs; i++) {
+
+ if(apNodeInfo[i]->dwType == XML_ELEMENT ||
+ apNodeInfo[i]->dwType == XML_ATTRIBUTE ||
+ apNodeInfo[i]->dwType == XML_PCDATA) {
+
+ dwStringSize = apNodeInfo[i]->ulLen;
+ pszString = (WCHAR*) apNodeInfo[i]->pwcText;
+ // Trim the value
+
+ // we should never decrement lgth if it's 0, because it's unsigned
+
+ for(;*pszString && ISWHITE(*pszString) && dwStringSize>0; pszString++, dwStringSize--);
+ while( dwStringSize > 0 && ISWHITE(pszString[dwStringSize-1]))
+ dwStringSize--;
+
+ if (m_appdomainParseState == APPDOMAINPARSESTATE_INITIALIZED)
+ {
+ //look forward to <runtime>
+ if (m_dwDepth == 1 &&
+ apNodeInfo[i]->dwType == XML_ELEMENT &&
+ EEXMLStringCompare(pszString, dwStringSize, CONST_STRING_AND_LEN(W("runtime"))) == 0)
+ {
+ m_appdomainParseState = APPDOMAINPARSESTATE_RUNTIME;
+ }
+ return S_OK;
+ }
+ else if (m_appdomainParseState == APPDOMAINPARSESTATE_RUNTIME)
+ {
+ // look forward to <PreferComInsteadOfManagedRemoting enabled="true"/>
+ if (m_dwDepth == 2 &&
+ apNodeInfo[i]->dwType == XML_ELEMENT &&
+ EEXMLStringCompare(pszString, dwStringSize, CONST_STRING_AND_LEN(W("PreferComInsteadOfManagedRemoting"))) == 0)
+ {
+ m_appdomainParseState = APPDOMAINPARSESTATE_PREFERCOMINSTEADOFREMOTING;
+ continue;
+ }
+ // if we ended parsing <Runtime>, we abort it
+ if (m_dwDepth <= 1)
+ pSource->Abort(NULL);
+ return S_OK;
+ }
+ else if (m_appdomainParseState == APPDOMAINPARSESTATE_PREFERCOMINSTEADOFREMOTING)
+ {
+ // require enabled="true"/> or legacyMode="true"/>
+ if (m_dwDepth == 2 &&
+ apNodeInfo[i]->dwType == XML_ATTRIBUTE)
+ {
+ if (EEXMLStringCompare(pszString, dwStringSize, CONST_STRING_AND_LEN(W("enabled"))) == 0)
+ {
+ m_appdomainParseState = APPDOMAINPARSESTATE_ENABLED;
+ }
+ if (EEXMLStringCompare(pszString, dwStringSize, CONST_STRING_AND_LEN(W("legacyMode"))) == 0)
+ {
+ m_appdomainParseState = APPDOMAINPARSESTATE_LEGACYMODE;
+ }
+ }
+
+ // ignore unrecognized attributes (forward compat)
+ continue;
+ }
+ else if (m_appdomainParseState == APPDOMAINPARSESTATE_ENABLED || m_appdomainParseState == APPDOMAINPARSESTATE_LEGACYMODE)
+ {
+ // require "true" /> or "false" />
+ if (m_dwDepth == 2 &&
+ apNodeInfo[i]->dwType == XML_PCDATA)
+ {
+ if (EEXMLStringCompare(pszString, dwStringSize, CONST_STRING_AND_LEN(W("true"))) == 0)
+ {
+ if (m_appdomainParseState == APPDOMAINPARSESTATE_LEGACYMODE)
+ {
+ // LegacyMode does not override the "master switch"
+ if (comorRemotingFlag != COMorRemoting_COM)
+ comorRemotingFlag = COMorRemoting_LegacyMode;
+ }
+ else
+ {
+ comorRemotingFlag = COMorRemoting_COM;
+ }
+ }
+ else if (EEXMLStringCompare(pszString, dwStringSize, CONST_STRING_AND_LEN(W("false"))) == 0)
+ {
+ if (m_appdomainParseState == APPDOMAINPARSESTATE_ENABLED)
+ {
+ // we do report that the "master switch" is explictly false
+ if (comorRemotingFlag == COMorRemoting_NotInitialized)
+ comorRemotingFlag = COMorRemoting_Remoting;
+ }
+ }
+
+ m_appdomainParseState = APPDOMAINPARSESTATE_PREFERCOMINSTEADOFREMOTING;
+ continue;
+ }
+ pSource->Abort(NULL);
+ return S_OK;
+ }
+ }
+ }
+ return hr;
+ }
+
+ COMorRemotingFlag GetCOMorRemotingFlag()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return comorRemotingFlag;
+ }
+
+private:
+ DWORD m_dwDepth;
+ COMorRemotingFlag comorRemotingFlag;
+ APPDOMAINPARSESTATE m_appdomainParseState;
+
+};
+
+#endif APPDOMAINCONFIGFACTORY_H
diff --git a/src/vm/appdomainhelper.cpp b/src/vm/appdomainhelper.cpp
new file mode 100644
index 0000000000..6a21713f49
--- /dev/null
+++ b/src/vm/appdomainhelper.cpp
@@ -0,0 +1,547 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+//
+
+#include "common.h"
+
+#ifdef FEATURE_REMOTING
+
+#include "appdomainhelper.h"
+#include "appdomain.inl"
+
+void AppDomainHelper::CopyEncodingToByteArray(IN PBYTE pbData,
+ IN DWORD cbData,
+ OUT OBJECTREF* pArray)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(cbData==0 || pbData!=NULL);
+ PRECONDITION(CheckPointer(pArray));
+ }
+ CONTRACTL_END;
+ PREFIX_ASSUME(pArray != NULL);
+
+ U1ARRAYREF pObj;
+
+ if(cbData) {
+ pObj = (U1ARRAYREF)AllocatePrimitiveArray(ELEMENT_TYPE_U1,cbData);
+ memcpyNoGCRefs(pObj->m_Array, pbData, cbData);
+ *pArray = (OBJECTREF) pObj;
+ } else
+ *pArray = NULL;
+
+ VALIDATEOBJECTREF(*pArray);
+}
+
+
+void AppDomainHelper::CopyByteArrayToEncoding(IN U1ARRAYREF* pArray,
+ OUT PBYTE* ppbData,
+ OUT DWORD* pcbData)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(pArray!=NULL);
+ PRECONDITION(ppbData!=NULL);
+ PRECONDITION(pcbData!=NULL);
+ }
+ CONTRACTL_END;
+
+ VALIDATEOBJECTREF(*pArray);
+
+ if (*pArray == NULL) {
+ *ppbData = NULL;
+ *pcbData = 0;
+ return;
+ }
+
+ DWORD size = (*pArray)->GetNumComponents();
+ if(size) {
+ *ppbData = new BYTE[size];
+ *pcbData = size;
+
+ CopyMemory(*ppbData, (*pArray)->GetDirectPointerToNonObjectElements(), size);
+ }
+}
+
+
+struct MarshalObjectArgs : public CtxTransitionBaseArgs
+{
+ OBJECTREF* orObject;
+ U1ARRAYREF* porBlob;
+};
+
+void MarshalObjectADCallback(MarshalObjectArgs * args)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ MethodDescCallSite marshalObject(METHOD__APP_DOMAIN__MARSHAL_OBJECT);
+
+ ARG_SLOT argsCall[] = {
+ ObjToArgSlot(*(args->orObject))
+ };
+
+ *(args->porBlob) = (U1ARRAYREF) marshalObject.Call_RetOBJECTREF(argsCall);
+}
+
+
+// Marshal a single object into a serialized blob.
+void AppDomainHelper::MarshalObject(ADID appDomain,
+ IN OBJECTREF *orObject, // Object must be GC protected
+ OUT U1ARRAYREF *porBlob)
+{
+
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(orObject!=NULL);
+ PRECONDITION(porBlob!=NULL);
+ PRECONDITION(IsProtectedByGCFrame(orObject));
+ }
+ CONTRACTL_END;
+
+ VALIDATEOBJECTREF(*orObject);
+
+ MarshalObjectArgs args;
+ args.orObject = orObject;
+ args.porBlob = porBlob;
+
+ MakeCallWithPossibleAppDomainTransition(appDomain, (FPAPPDOMAINCALLBACK) MarshalObjectADCallback, &args);
+
+ VALIDATEOBJECTREF(*porBlob);
+
+}
+
+void AppDomainHelper::MarshalObject(IN OBJECTREF *orObject, // Object must be GC protected
+ OUT U1ARRAYREF *porBlob)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(orObject!=NULL);
+ PRECONDITION(porBlob!=NULL);
+ PRECONDITION(IsProtectedByGCFrame(orObject));
+ }
+ CONTRACTL_END;
+
+ VALIDATEOBJECTREF(*orObject);
+
+ MethodDescCallSite marshalObject(METHOD__APP_DOMAIN__MARSHAL_OBJECT);
+
+ ARG_SLOT argsCall[] = {
+ ObjToArgSlot(*orObject)
+ };
+
+ *porBlob = (U1ARRAYREF) marshalObject.Call_RetOBJECTREF(argsCall);
+
+ VALIDATEOBJECTREF(*porBlob);
+}
+
+// Marshal a single object into a serialized blob.
+void AppDomainHelper::MarshalObject(IN AppDomain *pDomain,
+ IN OBJECTREF *orObject, // Object must be GC protected
+ OUT BYTE **ppbBlob,
+ OUT DWORD *pcbBlob)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(pDomain!=NULL);
+ PRECONDITION(orObject!=NULL);
+ PRECONDITION(ppbBlob!=NULL);
+ PRECONDITION(pcbBlob!=NULL);
+ PRECONDITION(IsProtectedByGCFrame(orObject));
+ }
+ CONTRACTL_END;
+
+ VALIDATEOBJECTREF(*orObject);
+
+ U1ARRAYREF orBlob = NULL;
+
+ GCPROTECT_BEGIN(orBlob);
+
+ MethodDescCallSite marshalObject(METHOD__APP_DOMAIN__MARSHAL_OBJECT);
+
+ ENTER_DOMAIN_PTR(pDomain,ADV_RUNNINGIN)
+ {
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(*orObject)
+ };
+
+ orBlob = (U1ARRAYREF) marshalObject.Call_RetOBJECTREF(args);
+ }
+ END_DOMAIN_TRANSITION;
+
+ if (orBlob != NULL)
+ CopyByteArrayToEncoding(&orBlob,
+ ppbBlob,
+ pcbBlob);
+ GCPROTECT_END();
+}
+
+// Marshal two objects into serialized blobs.
+void AppDomainHelper::MarshalObjects(IN AppDomain *pDomain,
+ IN OBJECTREF *orObject1,
+ IN OBJECTREF *orObject2,
+ OUT BYTE **ppbBlob1,
+ OUT DWORD *pcbBlob1,
+ OUT BYTE **ppbBlob2,
+ OUT DWORD *pcbBlob2)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(pDomain!=NULL);
+ PRECONDITION(orObject1!=NULL);
+ PRECONDITION(ppbBlob1!=NULL);
+ PRECONDITION(pcbBlob1!=NULL);
+ PRECONDITION(orObject2!=NULL);
+ PRECONDITION(ppbBlob2!=NULL);
+ PRECONDITION(pcbBlob2!=NULL);
+ PRECONDITION(IsProtectedByGCFrame(orObject1));
+ PRECONDITION(IsProtectedByGCFrame(orObject2));
+ }
+ CONTRACTL_END;
+
+ VALIDATEOBJECTREF(*orObject1);
+ VALIDATEOBJECTREF(*orObject2);
+
+ struct _gc {
+ U1ARRAYREF orBlob1;
+ U1ARRAYREF orBlob2;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ MethodDescCallSite marshalObjects(METHOD__APP_DOMAIN__MARSHAL_OBJECTS);
+
+ ENTER_DOMAIN_PTR(pDomain,ADV_RUNNINGIN)
+ {
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(*orObject1),
+ ObjToArgSlot(*orObject2),
+ PtrToArgSlot(&gc.orBlob2),
+ };
+
+ gc.orBlob1 = (U1ARRAYREF) marshalObjects.Call_RetOBJECTREF(args);
+ }
+ END_DOMAIN_TRANSITION;
+
+ if (gc.orBlob1 != NULL)
+ {
+ CopyByteArrayToEncoding(&gc.orBlob1,
+ ppbBlob1,
+ pcbBlob1);
+ }
+
+ if (gc.orBlob2 != NULL)
+ {
+ CopyByteArrayToEncoding(&gc.orBlob2,
+ ppbBlob2,
+ pcbBlob2);
+ }
+
+ GCPROTECT_END();
+}
+
+// Unmarshal a single object from a serialized blob.
+// Callers must GC protect both porBlob and porObject.
+void AppDomainHelper::UnmarshalObject(IN AppDomain *pDomain,
+ IN U1ARRAYREF *porBlob, // Object must be GC protected
+ OUT OBJECTREF *porObject) // Object must be GC protected
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(pDomain!=NULL);
+ PRECONDITION(porBlob!=NULL);
+ PRECONDITION(porObject!=NULL);
+ PRECONDITION(IsProtectedByGCFrame(porBlob));
+ PRECONDITION(IsProtectedByGCFrame(porObject));
+ }
+ CONTRACTL_END;
+
+ VALIDATEOBJECTREF(*porBlob);
+
+ MethodDescCallSite unmarshalObject(METHOD__APP_DOMAIN__UNMARSHAL_OBJECT);
+
+ ENTER_DOMAIN_PTR(pDomain,ADV_RUNNINGIN)
+ {
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(*porBlob)
+ };
+
+ *porObject = unmarshalObject.Call_RetOBJECTREF(args);
+ }
+ END_DOMAIN_TRANSITION;
+
+ VALIDATEOBJECTREF(*porObject);
+}
+
+// Unmarshal a single object from a serialized blob.
+void AppDomainHelper::UnmarshalObject(IN AppDomain *pDomain,
+ IN BYTE *pbBlob,
+ IN DWORD cbBlob,
+ OUT OBJECTREF *porObject)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(pDomain!=NULL);
+ PRECONDITION(porObject!=NULL);
+ PRECONDITION(IsProtectedByGCFrame(porObject));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF orBlob = NULL;
+
+ MethodDescCallSite unmarshalObject(METHOD__APP_DOMAIN__UNMARSHAL_OBJECT);
+
+ ENTER_DOMAIN_PTR(pDomain,ADV_RUNNINGIN)
+ {
+ GCPROTECT_BEGIN(orBlob);
+
+ AppDomainHelper::CopyEncodingToByteArray(pbBlob,
+ cbBlob,
+ &orBlob);
+
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(orBlob)
+ };
+
+ *porObject = unmarshalObject.Call_RetOBJECTREF(args);
+
+ GCPROTECT_END();
+ }
+ END_DOMAIN_TRANSITION;
+
+ VALIDATEOBJECTREF(*porObject);
+}
+
+// Unmarshal two objects from serialized blobs.
+void AppDomainHelper::UnmarshalObjects(IN AppDomain *pDomain,
+ IN BYTE *pbBlob1,
+ IN DWORD cbBlob1,
+ IN BYTE *pbBlob2,
+ IN DWORD cbBlob2,
+ OUT OBJECTREF *porObject1,
+ OUT OBJECTREF *porObject2)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(pDomain!=NULL);
+ PRECONDITION(porObject1!=NULL);
+ PRECONDITION(porObject2!=NULL);
+ PRECONDITION(IsProtectedByGCFrame(porObject1));
+ PRECONDITION(IsProtectedByGCFrame(porObject2));
+ }
+ CONTRACTL_END;
+
+ MethodDescCallSite unmarshalObjects(METHOD__APP_DOMAIN__UNMARSHAL_OBJECTS);
+
+ struct _gc {
+ OBJECTREF orBlob1;
+ OBJECTREF orBlob2;
+ OBJECTREF orObject2;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ ENTER_DOMAIN_PTR(pDomain,ADV_RUNNINGIN)
+ {
+
+ GCPROTECT_BEGIN(gc);
+
+ AppDomainHelper::CopyEncodingToByteArray(pbBlob1,
+ cbBlob1,
+ &gc.orBlob1);
+
+ AppDomainHelper::CopyEncodingToByteArray(pbBlob2,
+ cbBlob2,
+ &gc.orBlob2);
+
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(gc.orBlob1),
+ ObjToArgSlot(gc.orBlob2),
+ PtrToArgSlot(&gc.orObject2),
+ };
+
+ *porObject1 = unmarshalObjects.Call_RetOBJECTREF(args);
+ *porObject2 = gc.orObject2;
+
+ GCPROTECT_END();
+ }
+ END_DOMAIN_TRANSITION;
+
+ VALIDATEOBJECTREF(*porObject1);
+ VALIDATEOBJECTREF(*porObject2);
+}
+
+// Copy an object from the given appdomain into the current appdomain.
+OBJECTREF AppDomainHelper::CrossContextCopyFrom(IN ADID dwDomainId,
+ IN OBJECTREF *orObject) // Object must be GC protected
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(orObject!=NULL);
+ PRECONDITION(IsProtectedByGCFrame(orObject));
+ }
+ CONTRACTL_END;
+
+ struct _gc
+ {
+ U1ARRAYREF orBlob;
+ OBJECTREF pResult;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+ AppDomainHelper::MarshalObject(dwDomainId, orObject, &gc.orBlob);
+ AppDomainHelper::UnmarshalObject(GetAppDomain(), &gc.orBlob, &gc.pResult);
+ GCPROTECT_END();
+ VALIDATEOBJECTREF(gc.pResult);
+ return gc.pResult;
+}
+
+// Copy an object from the given appdomain into the current appdomain.
+OBJECTREF AppDomainHelper::CrossContextCopyTo(IN ADID dwDomainId,
+ IN OBJECTREF *orObject) // Object must be GC protected
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(orObject!=NULL);
+ PRECONDITION(IsProtectedByGCFrame(orObject));
+ }
+ CONTRACTL_END;
+
+
+ struct _gc
+ {
+ U1ARRAYREF orBlob;
+ OBJECTREF pResult;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+ AppDomainHelper::MarshalObject(orObject, &gc.orBlob);
+ ENTER_DOMAIN_ID(dwDomainId);
+ AppDomainHelper::UnmarshalObject(GetAppDomain(),&gc.orBlob, &gc.pResult);
+ END_DOMAIN_TRANSITION;
+ GCPROTECT_END();
+ VALIDATEOBJECTREF(gc.pResult);
+ return gc.pResult;
+
+}
+
+// Copy an object from the given appdomain into the current appdomain.
+OBJECTREF AppDomainHelper::CrossContextCopyFrom(IN AppDomain *pDomain,
+ IN OBJECTREF *orObject) // Object must be GC protected
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(orObject!=NULL);
+ PRECONDITION(IsProtectedByGCFrame(orObject));
+ PRECONDITION(pDomain!=NULL);
+ PRECONDITION(pDomain != GetAppDomain());
+ }
+ CONTRACTL_END;
+
+ VALIDATEOBJECTREF(*orObject);
+
+ struct _gc {
+ U1ARRAYREF orBlob;
+ OBJECTREF result;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+ ENTER_DOMAIN_PTR(pDomain, ADV_RUNNINGIN);
+ AppDomainHelper::MarshalObject(orObject, &gc.orBlob);
+ END_DOMAIN_TRANSITION;
+ AppDomainHelper::UnmarshalObject(GetAppDomain(),&gc.orBlob, &gc.result);
+ GCPROTECT_END();
+
+ VALIDATEOBJECTREF(gc.result);
+
+ return gc.result;
+}
+
+// Copy an object to the given appdomain from the current appdomain.
+OBJECTREF AppDomainHelper::CrossContextCopyTo(IN AppDomain *pDomain,
+ IN OBJECTREF *orObject) // Object must be GC protected
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(orObject!=NULL);
+ PRECONDITION(IsProtectedByGCFrame(orObject));
+ PRECONDITION(pDomain!=NULL);
+ PRECONDITION(pDomain != GetAppDomain());
+ }
+ CONTRACTL_END;
+
+ VALIDATEOBJECTREF(*orObject);
+
+ struct _gc {
+ U1ARRAYREF orBlob;
+ OBJECTREF result;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+ AppDomainHelper::MarshalObject(orObject, &gc.orBlob);
+ AppDomainHelper::UnmarshalObject(pDomain, &gc.orBlob, &gc.result);
+ GCPROTECT_END();
+
+ VALIDATEOBJECTREF(gc.result);
+
+ return gc.result;
+}
+
+#endif // FEATURE_REMOTING
+
diff --git a/src/vm/appdomainhelper.h b/src/vm/appdomainhelper.h
new file mode 100644
index 0000000000..26a6524238
--- /dev/null
+++ b/src/vm/appdomainhelper.h
@@ -0,0 +1,372 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#ifndef _APPDOMAIN_HELPER_H_
+#define _APPDOMAIN_HELPER_H_
+
+#ifndef FEATURE_REMOTING
+#error FEATURE_REMOTING is not set, please do not include appdomainhelper.h
+#endif
+
+// Marshal a single object into a serialized blob.
+//
+//
+
+class AppDomainHelper {
+
+ friend class MarshalCache;
+
+ // A pair of helper to move serialization info between managed byte-array and
+ // unmanaged blob.
+ static void AppDomainHelper::CopyEncodingToByteArray(IN PBYTE pbData,
+ IN DWORD cbData,
+ OUT OBJECTREF* pArray);
+
+ static void AppDomainHelper::CopyByteArrayToEncoding(IN U1ARRAYREF* pArray,
+ OUT PBYTE* ppbData,
+ OUT DWORD* pcbData);
+
+public:
+ // Marshal a single object into a serialized blob.
+ static void AppDomainHelper::MarshalObject(IN OBJECTREF *orObject,
+ OUT U1ARRAYREF *porBlob);
+
+ static void AppDomainHelper::MarshalObject(IN ADID pDomain,
+ IN OBJECTREF *orObject,
+ OUT U1ARRAYREF *porBlob);
+ // Marshal one object into a seraialized blob.
+ static void AppDomainHelper::MarshalObject(IN AppDomain *pDomain,
+ IN OBJECTREF *orObject,
+ OUT BYTE **ppbBlob,
+ OUT DWORD *pcbBlob);
+
+ // Marshal two objects into serialized blobs.
+ static void AppDomainHelper::MarshalObjects(IN AppDomain *pDomain,
+ IN OBJECTREF *orObject1,
+ IN OBJECTREF *orObject2,
+ OUT BYTE **ppbBlob1,
+ OUT DWORD *pcbBlob1,
+ OUT BYTE **ppbBlob2,
+ OUT DWORD *pcbBlob2);
+
+ // Unmarshal a single object from a serialized blob.
+ static void AppDomainHelper::UnmarshalObject(IN AppDomain *pDomain,
+ IN U1ARRAYREF *porBlob,
+ OUT OBJECTREF *porObject);
+
+ // Unmarshal a single object from a serialized blob.
+ static void AppDomainHelper::UnmarshalObject(IN AppDomain *pDomain,
+ IN BYTE *pbBlob,
+ IN DWORD cbBlob,
+ OUT OBJECTREF *porObject);
+
+ // Unmarshal two objects from serialized blobs.
+ static void AppDomainHelper::UnmarshalObjects(IN AppDomain *pDomain,
+ IN BYTE *pbBlob1,
+ IN DWORD cbBlob1,
+ IN BYTE *pbBlob2,
+ IN DWORD cbBlob2,
+ OUT OBJECTREF *porObject1,
+ OUT OBJECTREF *porObject2);
+
+ // Copy an object from the given appdomain into the current appdomain.
+ static OBJECTREF AppDomainHelper::CrossContextCopyFrom(IN AppDomain *pAppDomain,
+ IN OBJECTREF *orObject);
+ // Copy an object to the given appdomain from the current appdomain.
+ static OBJECTREF AppDomainHelper::CrossContextCopyTo(IN AppDomain *pAppDomain,
+ IN OBJECTREF *orObject);
+ // Copy an object from the given appdomain into the current appdomain.
+ static OBJECTREF AppDomainHelper::CrossContextCopyFrom(IN ADID dwDomainId,
+ IN OBJECTREF *orObject);
+ // Copy an object to the given appdomain from the current appdomain.
+ static OBJECTREF AppDomainHelper::CrossContextCopyTo(IN ADID dwDomainId,
+ IN OBJECTREF *orObject);
+
+};
+
+// Cache the bits needed to serialize/deserialize managed objects that will be
+// passed across appdomain boundaries during a stackwalk. The serialization is
+// performed lazily the first time it's needed and remains valid throughout the
+// stackwalk. The last deserialized object is cached and tagged with its
+// appdomain context. It's valid as long as we're walking frames within the same
+// appdomain.
+//
+class MarshalCache
+{
+public:
+ MarshalCache()
+ {
+ LIMITED_METHOD_CONTRACT;
+ ZeroMemory(this, sizeof(*this));
+ }
+
+ ~MarshalCache()
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (m_pbObj1)
+ delete [] m_pbObj1;
+ if (m_pbObj2)
+ delete [] m_pbObj2;
+ }
+
+ void EnsureSerializationOK()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ if ((m_sGC.m_orInput1 != NULL && (m_pbObj1 == NULL || m_cbObj1 == 0)) ||
+ (m_sGC.m_orInput2 != NULL && (m_pbObj2 == NULL || m_cbObj2 == 0)))
+ {
+ // Serialization went bad -> Throw exception indicating so.
+ COMPlusThrow(kSecurityException, IDS_UNMARSHALABLE_DEMAND_OBJECT);
+ }
+ }
+
+ void EnsureDeserializationOK()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ if ((m_pbObj1 != NULL && m_sGC.m_orOutput1 == NULL ) ||
+ (m_pbObj2 != NULL && m_sGC.m_orOutput2 == NULL ) )
+ {
+ // DeSerialization went bad -> Throw exception indicating so.
+ COMPlusThrow(kSecurityException, IDS_UNMARSHALABLE_DEMAND_OBJECT);
+ }
+ }
+
+#ifndef DACCESS_COMPILE
+
+ // Set the original value of the first cached object.
+ void SetObject(OBJECTREF orObject)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pOriginalDomain = ::GetAppDomain();
+ m_sGC.m_orInput1 = orObject;
+ }
+
+ // Set the original values of both cached objects.
+ void SetObjects(OBJECTREF orObject1, OBJECTREF orObject2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pOriginalDomain = ::GetAppDomain();
+ m_sGC.m_orInput1 = orObject1;
+ m_sGC.m_orInput2 = orObject2;
+ }
+
+#endif //!DACCESS_COMPILE
+
+ // Get a copy of the first object suitable for use in the given appdomain.
+ OBJECTREF GetObject(AppDomain *pDomain)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ CheckADValidity(pDomain, ADV_RUNNINGIN);
+
+ // No transition -- just return original object.
+ if (pDomain == m_pOriginalDomain) {
+ if (m_fObjectUpdated)
+ UpdateObjectFinish();
+ return m_sGC.m_orInput1;
+ }
+
+ // We've already deserialized the object into the correct context.
+ if (pDomain == m_pCachedDomain)
+ return m_sGC.m_orOutput1;
+
+ // If we've updated the object in a different appdomain from the one we
+ // originally started in, the cached object will be more up to date than
+ // the original. Resync the objects.
+ if (m_fObjectUpdated)
+ UpdateObjectFinish();
+
+ // Check whether we've serialized the original input object yet.
+ if (m_pbObj1 == NULL && m_sGC.m_orInput1 != NULL)
+ {
+ AppDomainHelper::MarshalObject(m_pOriginalDomain,
+ &m_sGC.m_orInput1,
+ &m_pbObj1,
+ &m_cbObj1);
+ EnsureSerializationOK();
+ }
+
+ // Deserialize into the correct context.
+ if (m_pbObj1 != NULL)
+ {
+ AppDomainHelper::UnmarshalObject(pDomain,
+ m_pbObj1,
+ m_cbObj1,
+ &m_sGC.m_orOutput1);
+ EnsureDeserializationOK();
+ }
+ m_pCachedDomain = pDomain;
+
+ return m_sGC.m_orOutput1;
+ }
+
+ // As above, but retrieve both objects.
+ OBJECTREF GetObjects(AppDomain *pDomain, OBJECTREF *porObject2)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ CheckADValidity(pDomain, ADV_RUNNINGIN);
+ // No transition -- just return original objects.
+ if (pDomain == m_pOriginalDomain) {
+ if (m_fObjectUpdated)
+ UpdateObjectFinish();
+ *porObject2 = m_sGC.m_orInput2;
+ return m_sGC.m_orInput1;
+ }
+
+ // We've already deserialized the objects into the correct context.
+ if (pDomain == m_pCachedDomain) {
+ *porObject2 = m_sGC.m_orOutput2;
+ return m_sGC.m_orOutput1;
+ }
+
+ // If we've updated the object in a different appdomain from the one we
+ // originally started in, the cached object will be more up to date than
+ // the original. Resync the objects.
+ if (m_fObjectUpdated)
+ UpdateObjectFinish();
+
+ // Check whether we've serialized the original input objects yet.
+ if ((m_pbObj1 == NULL && m_sGC.m_orInput1 != NULL) ||
+ (m_pbObj2 == NULL && m_sGC.m_orInput2 != NULL))
+ {
+ AppDomainHelper::MarshalObjects(m_pOriginalDomain,
+ &m_sGC.m_orInput1,
+ &m_sGC.m_orInput2,
+ &m_pbObj1,
+ &m_cbObj1,
+ &m_pbObj2,
+ &m_cbObj2);
+ EnsureSerializationOK();
+
+ }
+ if (m_pbObj1 != NULL || m_pbObj2 != NULL)
+ {
+ // Deserialize into the correct context.
+ AppDomainHelper::UnmarshalObjects(pDomain,
+ m_pbObj1,
+ m_cbObj1,
+ m_pbObj2,
+ m_cbObj2,
+ &m_sGC.m_orOutput1,
+ &m_sGC.m_orOutput2);
+ EnsureDeserializationOK();
+ }
+ m_pCachedDomain = pDomain;
+
+ *porObject2 = m_sGC.m_orOutput2;
+ return m_sGC.m_orOutput1;
+ }
+
+ // Change the first object (updating the cacheing information
+ // appropriately).
+ void UpdateObject(AppDomain *pDomain, OBJECTREF orObject)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // The cached serialized blob is now useless.
+ CheckADValidity(pDomain, ADV_RUNNINGIN);
+ if (m_pbObj1)
+ delete [] m_pbObj1;
+ m_pbObj1 = NULL;
+ m_cbObj1 = 0;
+
+ // The object we have now is valid in it's own appdomain, so place that
+ // in the object cache.
+ m_pCachedDomain = pDomain;
+ m_sGC.m_orOutput1 = orObject;
+
+ // If the object is updated in the original context, just use the new
+ // value as is. In this case we have the data to re-marshal the updated
+ // object as normal, so we can consider the cache fully updated and exit
+ // now.
+ if (pDomain == m_pOriginalDomain) {
+ m_sGC.m_orInput1 = orObject;
+ m_fObjectUpdated = false;
+ return;
+ }
+
+ // We want to avoid re-marshaling the updated value as long as possible
+ // (it might be updated again before we need its value in a different
+ // context). So set a flag to indicate that the object must be
+ // re-marshaled when the value is queried in a new context.
+ m_fObjectUpdated = true;
+ }
+
+ // This structure is public only so that it can be GC protected. Do not
+ // access the fields directly, they change in an unpredictable fashion due
+ // to the lazy cacheing algorithm.
+ struct _gc {
+ OBJECTREF m_orInput1;
+ OBJECTREF m_orInput2;
+ OBJECTREF m_orOutput1;
+ OBJECTREF m_orOutput2;
+ } m_sGC;
+
+private:
+
+ // Called after one or more calls to UpdateObject to marshal the updated
+ // object back into its original context (it's assumed we're called in this
+ // context).
+ void UpdateObjectFinish()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(m_fObjectUpdated && m_pbObj1 == NULL);
+ }
+ CONTRACTL_END;
+ AppDomainHelper::MarshalObject(m_pCachedDomain,
+ &m_sGC.m_orOutput1,
+ &m_pbObj1,
+ &m_cbObj1);
+ AppDomainHelper::UnmarshalObject(m_pOriginalDomain,
+ m_pbObj1,
+ m_cbObj1,
+ &m_sGC.m_orInput1);
+ m_fObjectUpdated = false;
+ }
+
+ BYTE *m_pbObj1;
+ DWORD m_cbObj1;
+ BYTE *m_pbObj2;
+ DWORD m_cbObj2;
+ AppDomain *m_pCachedDomain;
+ AppDomain *m_pOriginalDomain;
+ bool m_fObjectUpdated;
+};
+
+#endif
diff --git a/src/vm/appdomainnative.cpp b/src/vm/appdomainnative.cpp
new file mode 100644
index 0000000000..5e4656600e
--- /dev/null
+++ b/src/vm/appdomainnative.cpp
@@ -0,0 +1,1778 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#include "common.h"
+#include "appdomain.hpp"
+#include "appdomainnative.hpp"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#include "appdomainhelper.h"
+#endif
+#include "security.h"
+#include "vars.hpp"
+#include "eeconfig.h"
+#include "appdomain.inl"
+#include "eventtrace.h"
+#ifndef FEATURE_CORECLR
+#include "comutilnative.h"
+#endif // !FEATURE_CORECLR
+#if defined(FEATURE_APPX)
+#include "appxutil.h"
+#endif // FEATURE_APPX
+#if defined(FEATURE_APPX_BINDER) && defined(FEATURE_HOSTED_BINDER)
+#include "clrprivbinderappx.h"
+#include "clrprivtypecachewinrt.h"
+#endif // FEATURE_APPX_BINDER && FEATURE_HOSTED_BINDER
+#ifdef FEATURE_VERSIONING
+#include "../binder/inc/clrprivbindercoreclr.h"
+#endif
+
+
+//************************************************************************
+inline AppDomain *AppDomainNative::ValidateArg(APPDOMAINREF pThis)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ DISABLED(GC_TRIGGERS); // can't use this in an FCALL because we're in forbid gc mode until we setup a H_M_F.
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ if (pThis == NULL)
+ {
+ COMPlusThrow(kNullReferenceException, W("NullReference_This"));
+ }
+
+ // Should not get here with a Transparent proxy for the this pointer -
+ // should have always called through onto the real object
+#ifdef FEATURE_REMOTING
+ _ASSERTE(! CRemotingServices::IsTransparentProxy(OBJECTREFToObject(pThis)));
+#endif
+
+ AppDomain* pDomain = (AppDomain*)pThis->GetDomain();
+
+ if(!pDomain)
+ {
+ COMPlusThrow(kNullReferenceException, W("NullReference_This"));
+ }
+
+ // can only be accessed from within current domain
+ _ASSERTE(GetAppDomain() == pDomain);
+
+ // should not get here with an invalid appdomain. Once unload it, we won't let anyone else
+ // in and any threads that are already in will be unwound.
+ _ASSERTE(SystemDomain::GetAppDomainAtIndex(pDomain->GetIndex()) != NULL);
+ return pDomain;
+}
+
+
+#ifdef FEATURE_REMOTING
+//************************************************************************
+FCIMPL5(Object*, AppDomainNative::CreateDomain, StringObject* strFriendlyNameUNSAFE, Object* appdomainSetupUNSAFE, Object* providedEvidenceUNSAFE, Object* creatorsEvidenceUNSAFE, void* parentSecurityDescriptor)
+{
+ FCALL_CONTRACT;
+
+ struct _gc
+ {
+ OBJECTREF retVal;
+ STRINGREF strFriendlyName;
+ OBJECTREF appdomainSetup;
+ OBJECTREF providedEvidence;
+ OBJECTREF creatorsEvidence;
+ OBJECTREF entryPointProxy;
+ } gc;
+
+ ZeroMemory(&gc, sizeof(gc));
+ gc.strFriendlyName=(STRINGREF)strFriendlyNameUNSAFE;
+ gc.appdomainSetup=(OBJECTREF)appdomainSetupUNSAFE;
+ gc.providedEvidence=(OBJECTREF)providedEvidenceUNSAFE;
+ gc.creatorsEvidence=(OBJECTREF)creatorsEvidenceUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ CreateDomainHelper(&gc.strFriendlyName, &gc.appdomainSetup, &gc.providedEvidence, &gc.creatorsEvidence, parentSecurityDescriptor, &gc.entryPointProxy, &gc.retVal);
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(gc.retVal);
+}
+FCIMPLEND
+
+FCIMPL5(Object*, AppDomainNative::CreateInstance, StringObject* strFriendlyNameUNSAFE, Object* appdomainSetupUNSAFE, Object* providedEvidenceUNSAFE, Object* creatorsEvidenceUNSAFE, void* parentSecurityDescriptor)
+{
+ FCALL_CONTRACT;
+
+ struct _gc
+ {
+ OBJECTREF retVal;
+ STRINGREF strFriendlyName;
+ OBJECTREF appdomainSetup;
+ OBJECTREF providedEvidence;
+ OBJECTREF creatorsEvidence;
+ OBJECTREF entryPointProxy;
+ } gc;
+
+ ZeroMemory(&gc, sizeof(gc));
+ gc.strFriendlyName=(STRINGREF)strFriendlyNameUNSAFE;
+ gc.appdomainSetup=(OBJECTREF)appdomainSetupUNSAFE;
+ gc.providedEvidence=(OBJECTREF)providedEvidenceUNSAFE;
+ gc.creatorsEvidence=(OBJECTREF)creatorsEvidenceUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ CreateDomainHelper(&gc.strFriendlyName, &gc.appdomainSetup, &gc.providedEvidence, &gc.creatorsEvidence, parentSecurityDescriptor, &gc.entryPointProxy, &gc.retVal);
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(gc.entryPointProxy);
+}
+FCIMPLEND
+
+void AppDomainNative::CreateDomainHelper (STRINGREF* ppFriendlyName, OBJECTREF* ppAppdomainSetup, OBJECTREF* ppProvidedEvidence, OBJECTREF* ppCreatorsEvidence, void* parentSecurityDescriptor, OBJECTREF* pEntryPointProxy, OBJECTREF* pRetVal)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ PRECONDITION(IsProtectedByGCFrame(ppFriendlyName));
+ PRECONDITION(IsProtectedByGCFrame(ppAppdomainSetup));
+ PRECONDITION(IsProtectedByGCFrame(ppProvidedEvidence));
+ PRECONDITION(IsProtectedByGCFrame(ppCreatorsEvidence));
+ PRECONDITION(IsProtectedByGCFrame(pEntryPointProxy));
+ PRECONDITION(IsProtectedByGCFrame(pRetVal));
+ }
+ CONTRACTL_END;
+
+
+ AppDomainCreationHolder<AppDomain> pDomain;
+
+ // This helper will send the AppDomain creation notifications for profiler / debugger.
+ // If it throws, its backout code will also send a notification.
+ // If it succeeds, then we still need to send a AppDomainCreateFinished notification.
+ AppDomain::CreateUnmanagedObject(pDomain);
+
+#ifdef PROFILING_SUPPORTED
+ EX_TRY
+#endif
+ {
+ OBJECTREF setupInfo=NULL;
+ GCPROTECT_BEGIN(setupInfo);
+
+ MethodDescCallSite prepareDataForSetup(METHOD__APP_DOMAIN__PREPARE_DATA_FOR_SETUP);
+
+ ARG_SLOT args[8];
+ args[0]=ObjToArgSlot(*ppFriendlyName);
+ args[1]=ObjToArgSlot(*ppAppdomainSetup);
+ args[2]=ObjToArgSlot(*ppProvidedEvidence);
+ args[3]=ObjToArgSlot(*ppCreatorsEvidence);
+ args[4]=PtrToArgSlot(parentSecurityDescriptor);
+ args[5]=PtrToArgSlot(NULL);
+ args[6]=PtrToArgSlot(NULL);
+ args[7]=PtrToArgSlot(NULL);
+
+ setupInfo = prepareDataForSetup.Call_RetOBJECTREF(args);
+
+#ifndef FEATURE_CORECLR
+ // We need to setup domain sorting before any other managed code runs in the domain, since that code
+ // could end up caching data based on the sorting mode of the domain.
+ pDomain->InitializeSorting(ppAppdomainSetup);
+ pDomain->InitializeHashing(ppAppdomainSetup);
+#endif
+
+ // We need to ensure that the AppDomainProxy is generated before we call into DoSetup, since
+ // GetAppDomainProxy will ensure that remoting is correctly configured in the domain. DoSetup can
+ // end up loading user assemblies into the domain, and those assemblies may require that remoting be
+ // setup already. For instance, C++/CLI applications may trigger the CRT to try to marshal a
+ // reference to the default domain into the current domain, which won't work correctly without this
+ // setup being done.
+ *pRetVal = pDomain->GetAppDomainProxy();
+
+ *pEntryPointProxy=pDomain->DoSetup(&setupInfo);
+
+
+ GCPROTECT_END();
+
+ pDomain->CacheStringsForDAC();
+ }
+
+#ifdef PROFILING_SUPPORTED
+ EX_HOOK
+ {
+ // Need the first assembly loaded in to get any data on an app domain.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->AppDomainCreationFinished((AppDomainID)(AppDomain *) pDomain, GET_EXCEPTION()->GetHR());
+ END_PIN_PROFILER();
+ }
+ }
+ EX_END_HOOK;
+
+ // Need the first assembly loaded in to get any data on an app domain.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->AppDomainCreationFinished((AppDomainID)(AppDomain*) pDomain, S_OK);
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ ETW::LoaderLog::DomainLoad(pDomain, (LPWSTR)(*ppFriendlyName)->GetBuffer());
+
+ // DoneCreating releases ownership of AppDomain. After this call, there should be no access to pDomain.
+ pDomain.DoneCreating();
+}
+#endif // FEATURE_REMOTING
+
+void QCALLTYPE AppDomainNative::SetupDomainSecurity(QCall::AppDomainHandle pDomain,
+ QCall::ObjectHandleOnStack ohEvidence,
+ IApplicationSecurityDescriptor *pParentSecurityDescriptor,
+ BOOL fPublishAppDomain)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ struct
+ {
+ OBJECTREF orEvidence;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCX_COOP();
+ GCPROTECT_BEGIN(gc)
+ if (ohEvidence.m_ppObject != NULL)
+ {
+ gc.orEvidence = ObjectToOBJECTREF(*ohEvidence.m_ppObject);
+ }
+
+
+ // Set up the default AppDomain property.
+ IApplicationSecurityDescriptor *pSecDesc = pDomain->GetSecurityDescriptor();
+
+ if (!pSecDesc->IsHomogeneous() && pDomain->IsDefaultDomain())
+ {
+ Security::SetDefaultAppDomainProperty(pSecDesc);
+ }
+ // Set up the evidence property in the VM side.
+ else
+ {
+ // If there is no provided evidence then this new appdomain gets the same evidence as the creator.
+ //
+ // If there is no provided evidence and this AppDomain is not homogeneous, then it automatically
+ // is also a default appdomain (for security grant set purposes)
+ //
+ //
+ // If evidence is provided, the new appdomain is not a default appdomain and
+ // we simply use the provided evidence.
+
+ if (gc.orEvidence == NULL)
+ {
+ _ASSERTE(pParentSecurityDescriptor == NULL || pParentSecurityDescriptor->IsDefaultAppDomainEvidence());
+
+ if (pSecDesc->IsHomogeneous())
+ {
+ // New domain gets default AD evidence
+ Security::SetDefaultAppDomainEvidenceProperty(pSecDesc);
+ }
+ else
+ {
+ // New domain gets to be a default AD
+ Security::SetDefaultAppDomainProperty(pSecDesc);
+ }
+ }
+ }
+
+#ifdef FEATURE_CAS_POLICY
+ if (gc.orEvidence != NULL)
+ {
+ pSecDesc->SetEvidence(gc.orEvidence);
+ }
+#endif // FEATURE_CAS_POLICY
+
+ // We need to downgrade sharing level if the AppDomain is homogeneous and not fully trusted, or the
+ // AppDomain is in legacy mode. Effectively, we need to be sure that all assemblies loaded into the
+ // domain must be fully trusted in order to allow non-GAC sharing.
+#ifdef FEATURE_FUSION
+ if (pDomain->GetSharePolicy() == AppDomain::SHARE_POLICY_ALWAYS)
+ {
+ bool fSandboxedHomogenousDomain = false;
+ if (pSecDesc->IsHomogeneous())
+ {
+ pSecDesc->Resolve();
+ fSandboxedHomogenousDomain = !pSecDesc->IsFullyTrusted();
+ }
+
+ if (fSandboxedHomogenousDomain || pSecDesc->IsLegacyCasPolicyEnabled())
+ {
+ // We may not be able to reduce sharing policy at this point, if we have already loaded
+ // some non-GAC assemblies as domain neutral. For this case we must regrettably fail
+ // the whole operation.
+ if (!pDomain->ReduceSharePolicyFromAlways())
+ {
+ ThrowHR(COR_E_CANNOT_SET_POLICY);
+ }
+ }
+ }
+#endif
+
+ // Now finish the initialization.
+ pSecDesc->FinishInitialization();
+
+ // once domain is loaded it is publically available so if you have anything
+ // that a list interrogator might need access to if it gets a hold of the
+ // appdomain, then do it above the LoadDomain.
+ if (fPublishAppDomain)
+ SystemDomain::LoadDomain(pDomain);
+
+#ifdef _DEBUG
+ LOG((LF_APPDOMAIN, LL_INFO100, "AppDomainNative::CreateDomain domain [%d] %p %S\n", pDomain->GetIndex().m_dwIndex, (AppDomain*)pDomain, pDomain->GetFriendlyName()));
+#endif
+
+ GCPROTECT_END();
+
+ END_QCALL;
+}
+
+FCIMPL2(void, AppDomainNative::SetupFriendlyName, AppDomainBaseObject* refThisUNSAFE, StringObject* strFriendlyNameUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ struct _gc
+ {
+ APPDOMAINREF refThis;
+ STRINGREF strFriendlyName;
+ } gc;
+
+ gc.refThis = (APPDOMAINREF) refThisUNSAFE;
+ gc.strFriendlyName = (STRINGREF) strFriendlyNameUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_PROTECT(gc)
+
+ AppDomainRefHolder pDomain(ValidateArg(gc.refThis));
+ pDomain->AddRef();
+
+ // If the user created this domain, need to know this so the debugger doesn't
+ // go and reset the friendly name that was provided.
+ pDomain->SetIsUserCreatedDomain();
+
+ WCHAR* pFriendlyName = NULL;
+ Thread *pThread = GetThread();
+
+ CheckPointHolder cph(pThread->m_MarshalAlloc.GetCheckpoint()); //hold checkpoint for autorelease
+ if (gc.strFriendlyName != NULL) {
+ WCHAR* pString = NULL;
+ int iString;
+ gc.strFriendlyName->RefInterpretGetStringValuesDangerousForGC(&pString, &iString);
+ if (ClrSafeInt<int>::addition(iString, 1, iString))
+ {
+ pFriendlyName = new (&pThread->m_MarshalAlloc) WCHAR[(iString)];
+
+ // Check for a valid string allocation
+ if (pFriendlyName == (WCHAR*)-1)
+ pFriendlyName = NULL;
+ else
+ memcpy(pFriendlyName, pString, iString*sizeof(WCHAR));
+ }
+ }
+
+ pDomain->SetFriendlyName(pFriendlyName);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+#if FEATURE_COMINTEROP
+
+FCIMPL1(void, AppDomainNative::SetDisableInterfaceCache, AppDomainBaseObject* refThisUNSAFE)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ DISABLED(GC_TRIGGERS); // can't use this in an FCALL because we're in forbid gc mode until we setup a H_M_F.
+ SO_TOLERANT;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ struct _gc
+ {
+ APPDOMAINREF refThis;
+ } gc;
+
+ gc.refThis = (APPDOMAINREF) refThisUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_PROTECT(gc)
+
+ AppDomainRefHolder pDomain(ValidateArg(gc.refThis));
+ pDomain->AddRef();
+
+ pDomain->SetDisableInterfaceCache();
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_FUSION
+FCIMPL1(LPVOID, AppDomainNative::GetFusionContext, AppDomainBaseObject* refThis)
+{
+ FCALL_CONTRACT;
+
+ LPVOID rv = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(rv);
+
+ AppDomain* pApp = ValidateArg((APPDOMAINREF)refThis);
+
+ rv = pApp->CreateFusionContext();
+
+ HELPER_METHOD_FRAME_END();
+
+ return rv;
+}
+FCIMPLEND
+#endif
+
+FCIMPL1(void*, AppDomainNative::GetSecurityDescriptor, AppDomainBaseObject* refThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ void* pvRetVal = NULL;
+ APPDOMAINREF refThis = (APPDOMAINREF) refThisUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refThis);
+
+
+ pvRetVal = ValidateArg(refThis)->GetSecurityDescriptor();
+
+ HELPER_METHOD_FRAME_END();
+ return pvRetVal;
+}
+FCIMPLEND
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+FCIMPL2(void, AppDomainNative::UpdateLoaderOptimization, AppDomainBaseObject* refThisUNSAFE, DWORD optimization)
+{
+ FCALL_CONTRACT;
+
+ APPDOMAINREF refThis = (APPDOMAINREF) refThisUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_1(refThis);
+
+ ValidateArg(refThis)->SetSharePolicy((AppDomain::SharePolicy) (optimization & AppDomain::SHARE_POLICY_MASK));
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+#endif // FEATURE_LOADER_OPTIMIZATION
+
+#ifdef FEATURE_FUSION
+FCIMPL3(void, AppDomainNative::UpdateContextProperty, LPVOID fusionContext, StringObject* keyUNSAFE, Object* valueUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ struct _gc
+ {
+ STRINGREF key;
+ OBJECTREF value;
+ } gc;
+
+ gc.key = ObjectToSTRINGREF(keyUNSAFE);
+ gc.value = ObjectToOBJECTREF(valueUNSAFE);
+ _ASSERTE(gc.key != NULL);
+
+ HELPER_METHOD_FRAME_BEGIN_PROTECT(gc);
+
+ IApplicationContext* pContext = (IApplicationContext*) fusionContext;
+
+ BOOL fFXOnly;
+ DWORD size = sizeof(fFXOnly);
+ HRESULT hr = pContext->Get(ACTAG_FX_ONLY, &fFXOnly, &size, 0);
+ if (hr == HRESULT_FROM_WIN32(ERROR_NOT_FOUND))
+ {
+ fFXOnly = FALSE;
+ hr = S_FALSE;
+ }
+ IfFailThrow(hr);
+
+ if (!fFXOnly)
+ {
+ DWORD lgth = gc.key->GetStringLength();
+ CQuickBytes qb;
+ LPWSTR key = (LPWSTR) qb.AllocThrows((lgth+1)*sizeof(WCHAR));
+ memcpy(key, gc.key->GetBuffer(), lgth*sizeof(WCHAR));
+ key[lgth] = W('\0');
+
+ AppDomain::SetContextProperty(pContext, key, &gc.value);
+ }
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+#endif // FEATURE_FUSION
+
+/* static */
+INT32 AppDomainNative::ExecuteAssemblyHelper(Assembly* pAssembly,
+ BOOL bCreatedConsole,
+ PTRARRAYREF *pStringArgs)
+{
+ STATIC_CONTRACT_THROWS;
+
+ struct Param
+ {
+ Assembly* pAssembly;
+ PTRARRAYREF *pStringArgs;
+ INT32 iRetVal;
+ } param;
+ param.pAssembly = pAssembly;
+ param.pStringArgs = pStringArgs;
+ param.iRetVal = 0;
+
+ EE_TRY_FOR_FINALLY(Param *, pParam, &param)
+ {
+ pParam->iRetVal = pParam->pAssembly->ExecuteMainMethod(pParam->pStringArgs);
+ }
+ EE_FINALLY
+ {
+#ifndef FEATURE_PAL
+ if(bCreatedConsole)
+ FreeConsole();
+#endif // !FEATURE_PAL
+ }
+ EE_END_FINALLY
+
+ return param.iRetVal;
+}
+
+static void UpgradeLinkTimeCheckToLateBoundDemand(MethodDesc* pMeth)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ BOOL isEveryoneFullyTrusted = FALSE;
+
+ struct _gc
+ {
+ OBJECTREF refClassNonCasDemands;
+ OBJECTREF refClassCasDemands;
+ OBJECTREF refMethodNonCasDemands;
+ OBJECTREF refMethodCasDemands;
+ OBJECTREF refThrowable;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ isEveryoneFullyTrusted = Security::AllDomainsOnStackFullyTrusted();
+
+ // If all assemblies in the domain are fully trusted then we are not
+ // going to do any security checks anyway..
+ if (isEveryoneFullyTrusted)
+ {
+ goto Exit1;
+ }
+
+
+ if (pMeth->RequiresLinktimeCheck())
+ {
+ // Fetch link demand sets from all the places in metadata where we might
+ // find them (class and method). These might be split into CAS and non-CAS
+ // sets as well.
+ Security::RetrieveLinktimeDemands(pMeth,
+ &gc.refClassCasDemands,
+ &gc.refClassNonCasDemands,
+ &gc.refMethodCasDemands,
+ &gc.refMethodNonCasDemands);
+
+ if (gc.refClassCasDemands == NULL && gc.refClassNonCasDemands == NULL &&
+ gc.refMethodCasDemands == NULL && gc.refMethodNonCasDemands == NULL &&
+ isEveryoneFullyTrusted)
+ {
+ // All code access security demands will pass anyway.
+ goto Exit1;
+ }
+
+ // The following logic turns link demands on the target method into full
+ // stack walks in order to close security holes in poorly written
+ // reflection users.
+
+#ifdef FEATURE_APTCA
+ if (Security::IsUntrustedCallerCheckNeeded(pMeth) )
+ {
+ // Check for untrusted caller
+ // It is possible that wrappers like VBHelper libraries that are
+ // fully trusted, make calls to public methods that do not have
+ // safe for Untrusted caller custom attribute set.
+ // Like all other link demand that gets transformed to a full stack
+ // walk for reflection, calls to public methods also gets
+ // converted to full stack walk
+
+ // NOTE: this will always do the APTCA check, regardless of method caller
+ Security::DoUntrustedCallerChecks(NULL, pMeth, TRUE);
+ }
+#endif
+
+ // CAS Link Demands
+ if (gc.refClassCasDemands != NULL)
+ Security::DemandSet(SSWT_LATEBOUND_LINKDEMAND, gc.refClassCasDemands);
+
+ if (gc.refMethodCasDemands != NULL)
+ Security::DemandSet(SSWT_LATEBOUND_LINKDEMAND, gc.refMethodCasDemands);
+
+ // Non-CAS demands are not applied against a grant
+ // set, they're standalone.
+ if (gc.refClassNonCasDemands != NULL)
+ Security::CheckNonCasDemand(&gc.refClassNonCasDemands);
+
+ if (gc.refMethodNonCasDemands != NULL)
+ Security::CheckNonCasDemand(&gc.refMethodNonCasDemands);
+ }
+
+Exit1:;
+ GCPROTECT_END();
+}
+
+FCIMPL3(INT32, AppDomainNative::ExecuteAssembly, AppDomainBaseObject* refThisUNSAFE,
+ AssemblyBaseObject* assemblyNameUNSAFE, PTRArray* stringArgsUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ INT32 iRetVal = 0;
+
+ struct _gc
+ {
+ APPDOMAINREF refThis;
+ ASSEMBLYREF assemblyName;
+ PTRARRAYREF stringArgs;
+ } gc;
+
+ gc.refThis = (APPDOMAINREF) refThisUNSAFE;
+ gc.assemblyName = (ASSEMBLYREF) assemblyNameUNSAFE;
+ gc.stringArgs = (PTRARRAYREF) stringArgsUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ AppDomain* pDomain = ValidateArg(gc.refThis);
+
+ if (gc.assemblyName == NULL)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_Generic"));
+
+ if((BaseDomain*) pDomain == SystemDomain::System())
+ COMPlusThrow(kUnauthorizedAccessException, W("UnauthorizedAccess_SystemDomain"));
+
+ Assembly* pAssembly = (Assembly*) gc.assemblyName->GetAssembly();
+
+ if (!pDomain->m_pRootAssembly)
+ pDomain->m_pRootAssembly = pAssembly;
+
+ MethodDesc *pEntryPointMethod;
+ {
+ pEntryPointMethod = pAssembly->GetEntryPoint();
+ if (pEntryPointMethod)
+ {
+ UpgradeLinkTimeCheckToLateBoundDemand(pEntryPointMethod);
+ }
+ }
+
+ BOOL bCreatedConsole = FALSE;
+
+#ifndef FEATURE_PAL
+ if (pAssembly->GetManifestFile()->GetSubsystem() == IMAGE_SUBSYSTEM_WINDOWS_CUI)
+ {
+ {
+ GCX_COOP();
+ Security::CheckBeforeAllocConsole(pDomain, pAssembly);
+ }
+ bCreatedConsole = AllocConsole();
+ StackSString codebase;
+ pAssembly->GetManifestFile()->GetCodeBase(codebase);
+ SetConsoleTitle(codebase);
+ }
+#endif // !FEATURE_PAL
+
+ // This helper will call FreeConsole()
+ iRetVal = ExecuteAssemblyHelper(pAssembly, bCreatedConsole, &gc.stringArgs);
+
+ HELPER_METHOD_FRAME_END();
+
+ return iRetVal;
+}
+FCIMPLEND
+
+#ifdef FEATURE_VERSIONING
+FCIMPL1(void,
+ AppDomainNative::CreateContext,
+ AppDomainBaseObject *refThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ struct _gc
+ {
+ APPDOMAINREF refThis;
+ } gc;
+
+ gc.refThis = (APPDOMAINREF) refThisUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_PROTECT(gc);
+
+ AppDomain* pDomain = ValidateArg(gc.refThis);
+
+ if((BaseDomain*) pDomain == SystemDomain::System())
+ {
+ COMPlusThrow(kUnauthorizedAccessException, W("UnauthorizedAccess_SystemDomain"));
+ }
+
+ pDomain->CreateFusionContext();
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+void QCALLTYPE AppDomainNative::SetupBindingPaths(__in_z LPCWSTR wszTrustedPlatformAssemblies, __in_z LPCWSTR wszPlatformResourceRoots, __in_z LPCWSTR wszAppPaths, __in_z LPCWSTR wszAppNiPaths, __in_z LPCWSTR appLocalWinMD)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ AppDomain* pDomain = GetAppDomain();
+
+ SString sTrustedPlatformAssemblies(wszTrustedPlatformAssemblies);
+ SString sPlatformResourceRoots(wszPlatformResourceRoots);
+ SString sAppPaths(wszAppPaths);
+ SString sAppNiPaths(wszAppNiPaths);
+ SString sappLocalWinMD(appLocalWinMD);
+
+ CLRPrivBinderCoreCLR *pBinder = pDomain->GetTPABinderContext();
+ _ASSERTE(pBinder != NULL);
+ IfFailThrow(pBinder->SetupBindingPaths(sTrustedPlatformAssemblies,
+ sPlatformResourceRoots,
+ sAppPaths,
+ sAppNiPaths));
+
+#ifdef FEATURE_COMINTEROP
+ pDomain->SetWinrtApplicationContext(sappLocalWinMD);
+#endif
+
+ END_QCALL;
+}
+
+#endif // FEATURE_VERSIONING
+
+FCIMPL12(Object*, AppDomainNative::CreateDynamicAssembly, AppDomainBaseObject* refThisUNSAFE, AssemblyNameBaseObject* assemblyNameUNSAFE, Object* identityUNSAFE, StackCrawlMark* stackMark, Object* requiredPsetUNSAFE, Object* optionalPsetUNSAFE, Object* refusedPsetUNSAFE, U1Array *securityRulesBlobUNSAFE, U1Array *aptcaBlobUNSAFE, INT32 access, INT32 dwFlags, SecurityContextSource securityContextSource)
+{
+ FCALL_CONTRACT;
+
+ ASSEMBLYREF refRetVal = NULL;
+
+ //<TODO>
+ // @TODO: there MUST be a better way to do this...
+ //</TODO>
+ CreateDynamicAssemblyArgs args;
+
+ args.refThis = (APPDOMAINREF) refThisUNSAFE;
+ args.assemblyName = (ASSEMBLYNAMEREF) assemblyNameUNSAFE;
+ args.identity = (OBJECTREF) identityUNSAFE;
+ args.requiredPset = (OBJECTREF) requiredPsetUNSAFE;
+ args.optionalPset = (OBJECTREF) optionalPsetUNSAFE;
+ args.refusedPset = (OBJECTREF) refusedPsetUNSAFE;
+ args.securityRulesBlob = (U1ARRAYREF) securityRulesBlobUNSAFE;
+ args.aptcaBlob = (U1ARRAYREF) aptcaBlobUNSAFE;
+ args.loaderAllocator = NULL;
+
+ args.access = access;
+ args.flags = static_cast<DynamicAssemblyFlags>(dwFlags);
+ args.stackMark = stackMark;
+ args.securityContextSource = securityContextSource;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT((CreateDynamicAssemblyArgsGC&)args);
+
+ AppDomain* pAppDomain = ValidateArg(args.refThis);
+
+ Assembly *pAssembly = Assembly::CreateDynamic(pAppDomain, &args);
+
+ refRetVal = (ASSEMBLYREF) pAssembly->GetExposedObject();
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(refRetVal);
+}
+FCIMPLEND
+
+//---------------------------------------------------------------------------------------
+//
+// Returns true if the DisableFusionUpdatesFromADManager config switch is turned on.
+//
+// Arguments:
+// adhTarget - AppDomain to get domain manager information about
+//
+
+// static
+BOOL QCALLTYPE AppDomainNative::DisableFusionUpdatesFromADManager(QCall::AppDomainHandle adhTarget)
+{
+ QCALL_CONTRACT;
+
+ BOOL bUpdatesDisabled = FALSE;
+
+ BEGIN_QCALL;
+
+ bUpdatesDisabled = !!(g_pConfig->DisableFusionUpdatesFromADManager());
+
+ END_QCALL;
+
+ return bUpdatesDisabled;
+}
+
+#ifdef FEATURE_APPX
+
+//
+// Keep in sync with bcl\system\appdomain.cs
+//
+enum
+{
+ APPX_FLAGS_INITIALIZED = 0x01,
+
+ APPX_FLAGS_APPX_MODEL = 0x02,
+ APPX_FLAGS_APPX_DESIGN_MODE = 0x04,
+ APPX_FLAGS_APPX_NGEN = 0x08,
+ APPX_FLAGS_APPX_MASK = APPX_FLAGS_APPX_MODEL |
+ APPX_FLAGS_APPX_DESIGN_MODE |
+ APPX_FLAGS_APPX_NGEN,
+
+ APPX_FLAGS_API_CHECK = 0x10,
+};
+
+// static
+INT32 QCALLTYPE AppDomainNative::GetAppXFlags()
+{
+ QCALL_CONTRACT;
+
+ UINT32 flags = APPX_FLAGS_INITIALIZED;
+
+ BEGIN_QCALL;
+
+ if (AppX::IsAppXProcess())
+ {
+ flags |= APPX_FLAGS_APPX_MODEL;
+
+ if (AppX::IsAppXDesignMode())
+ flags |= APPX_FLAGS_APPX_DESIGN_MODE;
+ else
+ flags |= APPX_FLAGS_API_CHECK;
+
+ if (AppX::IsAppXNGen())
+ flags |= APPX_FLAGS_APPX_NGEN;
+ }
+
+ //
+ // 0: normal (only check in non-dev-mode APPX)
+ // 1: always check
+ // 2: never check
+ //
+ switch (g_pConfig->GetWindows8ProfileAPICheckFlag())
+ {
+ case 1:
+ flags |= APPX_FLAGS_API_CHECK;
+ break;
+ case 2:
+ flags &= ~APPX_FLAGS_API_CHECK;
+ break;
+ default:
+ break;
+ }
+
+ END_QCALL;
+
+ return flags;
+}
+
+#endif // FEATURE_APPX
+
+//---------------------------------------------------------------------------------------
+//
+// Get the assembly and type containing the AppDomainManager used for the current domain
+//
+// Arguments:
+// adhTarget - AppDomain to get domain manager information about
+// retAssembly - [out] assembly which contains the AppDomainManager
+// retType - [out] AppDomainManger for the domain
+//
+// Notes:
+// If the AppDomain does not have an AppDomainManager, retAssembly and retType will be null on return.
+//
+
+// static
+void QCALLTYPE AppDomainNative::GetAppDomainManagerType(QCall::AppDomainHandle adhTarget,
+ QCall::StringHandleOnStack shRetAssembly,
+ QCall::StringHandleOnStack shRetType)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ if (adhTarget->HasAppDomainManagerInfo())
+ {
+ shRetAssembly.Set(adhTarget->GetAppDomainManagerAsm());
+ shRetType.Set(adhTarget->GetAppDomainManagerType());
+ }
+ else
+ {
+ shRetAssembly.Set(static_cast<LPCWSTR>(NULL));
+ shRetType.Set(static_cast<LPCWSTR>(NULL));
+ }
+
+ END_QCALL;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Set the assembly and type containing the AppDomainManager to be used for the current domain
+//
+// Arguments:
+// adhTarget - AppDomain to set domain manager information for
+// wszAssembly - assembly which contains the AppDomainManager
+// wszType - AppDomainManger for the domain
+//
+
+// static
+void QCALLTYPE AppDomainNative::SetAppDomainManagerType(QCall::AppDomainHandle adhTarget,
+ __in_z LPCWSTR wszAssembly,
+ __in_z LPCWSTR wszType)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(wszAssembly));
+ PRECONDITION(CheckPointer(wszType));
+ PRECONDITION(!GetAppDomain()->HasAppDomainManagerInfo());
+ }
+ CONTRACTL_END;
+
+ BEGIN_QCALL;
+
+ // If the AppDomainManager type is the same as the domain manager setup by the CLR host, then we can
+ // propagate the host's initialization flags to the new domain as well;
+ EInitializeNewDomainFlags initializationFlags = eInitializeNewDomainFlags_None;
+ if (CorHost2::HasAppDomainManagerInfo())
+ {
+ if (wcscmp(CorHost2::GetAppDomainManagerAsm(), wszAssembly) == 0 &&
+ wcscmp(CorHost2::GetAppDomainManagerType(), wszType) == 0)
+ {
+ initializationFlags = CorHost2::GetAppDomainManagerInitializeNewDomainFlags();
+ }
+ }
+
+ adhTarget->SetAppDomainManagerInfo(wszAssembly, wszType, initializationFlags);
+
+ // If the initialization flags promise that the domain manager isn't going to modify security, then do a
+ // pre-resolution of the domain now so that we can do some basic verification of the state later. We
+ // don't care about the actual result now, just that the resolution took place to compare against later.
+ if (initializationFlags & eInitializeNewDomainFlags_NoSecurityChanges)
+ {
+ BOOL fIsFullyTrusted;
+ BOOL fIsHomogeneous;
+ adhTarget->GetSecurityDescriptor()->PreResolve(&fIsFullyTrusted, &fIsHomogeneous);
+ }
+
+ END_QCALL;
+}
+
+#ifdef FEATURE_APPDOMAINMANAGER_INITOPTIONS
+
+FCIMPL0(FC_BOOL_RET, AppDomainNative::HasHost)
+{
+ FCALL_CONTRACT;
+ FC_RETURN_BOOL(CorHost2::GetHostControl() != NULL);
+}
+FCIMPLEND
+
+//
+// Callback to the CLR host to register an AppDomainManager->AppDomain ID pair with it.
+//
+// Arguments:
+// punkAppDomainManager - COM reference to the AppDomainManager being registered with the host
+//
+
+// static
+void QCALLTYPE AppDomainNative::RegisterWithHost(IUnknown *punkAppDomainManager)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(punkAppDomainManager));
+ PRECONDITION(CheckPointer(CorHost2::GetHostControl()));
+ }
+ CONTRACTL_END;
+
+ BEGIN_QCALL;
+
+ EnsureComStarted();
+
+ IHostControl *pHostControl = CorHost2::GetHostControl();
+ ADID dwDomainId = SystemDomain::GetCurrentDomain()->GetId();
+ HRESULT hr = S_OK;
+
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pHostControl->SetAppDomainManager(dwDomainId.m_dwId, punkAppDomainManager);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+
+ if (FAILED(hr))
+ {
+ ThrowHR(hr);
+ }
+
+ END_QCALL;
+}
+#endif // FEATURE_APPDOMAINMANAGER_INITOPTIONS
+
+FCIMPL1(void, AppDomainNative::SetHostSecurityManagerFlags, DWORD dwFlags);
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ GetThread()->GetDomain()->GetSecurityDescriptor()->SetHostSecurityManagerFlags(dwFlags);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+// static
+void QCALLTYPE AppDomainNative::SetSecurityHomogeneousFlag(QCall::AppDomainHandle adhTarget,
+ BOOL fRuntimeSuppliedHomogenousGrantSet)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ IApplicationSecurityDescriptor *pAppSecDesc = adhTarget->GetSecurityDescriptor();
+ pAppSecDesc->SetHomogeneousFlag(fRuntimeSuppliedHomogenousGrantSet);
+
+ END_QCALL;
+}
+
+#ifdef FEATURE_CAS_POLICY
+
+// static
+void QCALLTYPE AppDomainNative::SetLegacyCasPolicyEnabled(QCall::AppDomainHandle adhTarget)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ IApplicationSecurityDescriptor *pAppSecDesc = adhTarget->GetSecurityDescriptor();
+ pAppSecDesc->SetLegacyCasPolicyEnabled();
+
+ END_QCALL;
+}
+
+// static
+BOOL QCALLTYPE AppDomainNative::IsLegacyCasPolicyEnabled(QCall::AppDomainHandle adhTarget)
+{
+ QCALL_CONTRACT;
+
+ BOOL fLegacyCasPolicy = FALSE;
+
+ BEGIN_QCALL;
+
+ IApplicationSecurityDescriptor *pAppSecDesc = adhTarget->GetSecurityDescriptor();
+ fLegacyCasPolicy = !!pAppSecDesc->IsLegacyCasPolicyEnabled();
+
+ END_QCALL;
+
+ return fLegacyCasPolicy;
+}
+
+#endif // FEATURE_CAS_POLICY
+
+#ifdef FEATURE_APTCA
+
+// static
+void QCALLTYPE AppDomainNative::SetCanonicalConditionalAptcaList(QCall::AppDomainHandle adhTarget,
+ LPCWSTR wszCanonicalConditionalAptcaList)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ IApplicationSecurityDescriptor *pAppSecDesc = adhTarget->GetSecurityDescriptor();
+
+ GCX_COOP();
+ pAppSecDesc->SetCanonicalConditionalAptcaList(wszCanonicalConditionalAptcaList);
+
+ END_QCALL;
+}
+
+#endif // FEATURE_APTCA
+
+FCIMPL1(Object*, AppDomainNative::GetFriendlyName, AppDomainBaseObject* refThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ STRINGREF str = NULL;
+ APPDOMAINREF refThis = (APPDOMAINREF) refThisUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refThis);
+
+ AppDomain* pApp = ValidateArg(refThis);
+
+ LPCWSTR wstr = pApp->GetFriendlyName();
+ if (wstr)
+ str = StringObject::NewString(wstr);
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(str);
+}
+FCIMPLEND
+
+FCIMPL1(FC_BOOL_RET, AppDomainNative::IsDefaultAppDomainForEvidence, AppDomainBaseObject* refThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ BOOL retVal = FALSE;
+ APPDOMAINREF refThis = (APPDOMAINREF) refThisUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refThis);
+
+ AppDomain* pApp = ValidateArg((APPDOMAINREF) refThisUNSAFE);
+ retVal = pApp->GetSecurityDescriptor()->IsDefaultAppDomainEvidence();
+
+ HELPER_METHOD_FRAME_END();
+ FC_RETURN_BOOL(retVal);
+}
+FCIMPLEND
+
+FCIMPL2(Object*, AppDomainNative::GetAssemblies, AppDomainBaseObject* refThisUNSAFE, CLR_BOOL forIntrospection);
+{
+ FCALL_CONTRACT;
+
+ struct _gc
+ {
+ PTRARRAYREF AsmArray;
+ APPDOMAINREF refThis;
+ } gc;
+
+ gc.AsmArray = NULL;
+ gc.refThis = (APPDOMAINREF) refThisUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ MethodTable * pAssemblyClass = MscorlibBinder::GetClass(CLASS__ASSEMBLY);
+
+ AppDomain * pApp = ValidateArg(gc.refThis);
+
+ // Allocate an array with as many elements as there are assemblies in this
+ // appdomain. This will usually be correct, but there may be assemblies
+ // that are still loading, and those won't be included in the array of
+ // loaded assemblies. When that happens, the array will have some trailing
+ // NULL entries; those entries will need to be trimmed.
+ size_t nArrayElems = pApp->m_Assemblies.GetCount(pApp);
+ gc.AsmArray = (PTRARRAYREF) AllocateObjectArray(
+ (DWORD)nArrayElems,
+ pAssemblyClass);
+
+ size_t numAssemblies = 0;
+ {
+ // Iterate over the loaded assemblies in the appdomain, and add each one to
+ // to the array. Quit when the array is full, in case assemblies have been
+ // loaded into this appdomain, on another thread.
+ AppDomain::AssemblyIterator i = pApp->IterateAssembliesEx((AssemblyIterationFlags)(
+ kIncludeLoaded |
+ (forIntrospection ? kIncludeIntrospection : kIncludeExecution)));
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+
+ while (i.Next(pDomainAssembly.This()) && (numAssemblies < nArrayElems))
+ {
+ // Do not change this code. This is done this way to
+ // prevent a GC hole in the SetObjectReference() call. The compiler
+ // is free to pick the order of evaluation.
+ OBJECTREF o = (OBJECTREF)pDomainAssembly->GetExposedAssemblyObject();
+ if (o == NULL)
+ { // The assembly was collected and is not reachable from managed code anymore
+ continue;
+ }
+ gc.AsmArray->SetAt(numAssemblies++, o);
+ // If it is a collectible assembly, it is now referenced from the managed world, so we can
+ // release the native reference in the holder
+ }
+ }
+
+ // If we didn't fill the array, allocate a new array that is exactly the
+ // right size, and copy the data to it.
+ if (numAssemblies < nArrayElems)
+ {
+ PTRARRAYREF AsmArray2;
+ AsmArray2 = (PTRARRAYREF) AllocateObjectArray(
+ (DWORD)numAssemblies,
+ pAssemblyClass);
+
+ for (size_t ix = 0; ix < numAssemblies; ++ix)
+ {
+ AsmArray2->SetAt(ix, gc.AsmArray->GetAt(ix));
+ }
+
+ gc.AsmArray = AsmArray2;
+ }
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(gc.AsmArray);
+} // AppDomainNative::GetAssemblies
+FCIMPLEND
+
+
+FCIMPL1(void, AppDomainNative::Unload, INT32 dwId)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ IfFailThrow(AppDomain::UnloadById(ADID(dwId),TRUE));
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL1(FC_BOOL_RET, AppDomainNative::IsDomainIdValid, INT32 dwId)
+{
+ FCALL_CONTRACT;
+
+ BOOL retVal = FALSE;
+ HELPER_METHOD_FRAME_BEGIN_RET_0()
+
+ AppDomainFromIDHolder ad((ADID)dwId, TRUE);
+ retVal=!ad.IsUnloaded();
+ HELPER_METHOD_FRAME_END();
+ FC_RETURN_BOOL(retVal);
+}
+FCIMPLEND
+
+#ifdef FEATURE_REMOTING
+FCIMPL0(Object*, AppDomainNative::GetDefaultDomain)
+{
+ FCALL_CONTRACT;
+
+ APPDOMAINREF rv = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(rv);
+
+ if (GetThread()->GetDomain()->IsDefaultDomain())
+ rv = (APPDOMAINREF) SystemDomain::System()->DefaultDomain()->GetExposedObject();
+ else
+ rv = (APPDOMAINREF) SystemDomain::System()->DefaultDomain()->GetAppDomainProxy();
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(rv);
+}
+FCIMPLEND
+#endif
+
+FCIMPL1(INT32, AppDomainNative::GetId, AppDomainBaseObject* refThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ INT32 iRetVal = 0;
+ APPDOMAINREF refThis = (APPDOMAINREF) refThisUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refThis);
+
+ AppDomain* pApp = ValidateArg(refThis);
+ // can only be accessed from within current domain
+ _ASSERTE(GetThread()->GetDomain() == pApp);
+
+ iRetVal = pApp->GetId().m_dwId;
+
+ HELPER_METHOD_FRAME_END();
+ return iRetVal;
+}
+FCIMPLEND
+
+FCIMPL1(void, AppDomainNative::ChangeSecurityPolicy, AppDomainBaseObject* refThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ APPDOMAINREF refThis = (APPDOMAINREF) refThisUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_1(refThis);
+ AppDomain* pApp = ValidateArg(refThis);
+
+#ifdef FEATURE_FUSION
+
+ // We do not support sharing behavior of ALWAYS when using app-domain local security config
+ if (pApp->GetSharePolicy() == AppDomain::SHARE_POLICY_ALWAYS)
+ {
+ // We may not be able to reduce sharing policy at this point, if we have already loaded
+ // some non-GAC assemblies as domain neutral. For this case we must regrettably fail
+ // the whole operation.
+ if (!pApp->ReduceSharePolicyFromAlways())
+ ThrowHR(COR_E_CANNOT_SET_POLICY);
+ }
+#endif
+ pApp->GetSecurityDescriptor()->SetPolicyLevelFlag();
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+
+FCIMPL2(Object*, AppDomainNative::IsStringInterned, AppDomainBaseObject* refThisUNSAFE, StringObject* pStringUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ APPDOMAINREF refThis = (APPDOMAINREF)ObjectToOBJECTREF(refThisUNSAFE);
+ STRINGREF refString = ObjectToSTRINGREF(pStringUNSAFE);
+ STRINGREF* prefRetVal = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_2(refThis, refString);
+
+ ValidateArg(refThis);
+
+ if (refString == NULL)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_String"));
+
+ prefRetVal = refThis->GetDomain()->IsStringInterned(&refString);
+
+ HELPER_METHOD_FRAME_END();
+
+ if (prefRetVal == NULL)
+ return NULL;
+
+ return OBJECTREFToObject(*prefRetVal);
+}
+FCIMPLEND
+
+FCIMPL2(Object*, AppDomainNative::GetOrInternString, AppDomainBaseObject* refThisUNSAFE, StringObject* pStringUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ STRINGREF refRetVal = NULL;
+ APPDOMAINREF refThis = (APPDOMAINREF) refThisUNSAFE;
+ STRINGREF pString = (STRINGREF) pStringUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_2(refThis, pString);
+
+ ValidateArg(refThis);
+
+ if (pString == NULL)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_String"));
+
+ STRINGREF* stringVal = refThis->GetDomain()->GetOrInternString(&pString);
+ if (stringVal != NULL)
+ {
+ refRetVal = *stringVal;
+ }
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(refRetVal);
+}
+FCIMPLEND
+
+
+FCIMPL1(Object*, AppDomainNative::GetDynamicDir, AppDomainBaseObject* refThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ STRINGREF str = NULL;
+#ifdef FEATURE_FUSION
+ APPDOMAINREF refThis = (APPDOMAINREF) refThisUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refThis);
+
+ AppDomain *pDomain = ValidateArg(refThis);
+ str = StringObject::NewString(pDomain->GetDynamicDir());
+ HELPER_METHOD_FRAME_END();
+#endif
+ return OBJECTREFToObject(str);
+}
+FCIMPLEND
+
+// static
+void QCALLTYPE AppDomainNative::GetGrantSet(QCall::AppDomainHandle adhTarget,
+ QCall::ObjectHandleOnStack retGrantSet)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ IApplicationSecurityDescriptor *pSecDesc = adhTarget->GetSecurityDescriptor();
+
+ GCX_COOP();
+ pSecDesc->Resolve();
+ retGrantSet.Set(pSecDesc->GetGrantedPermissionSet());
+
+ END_QCALL;
+}
+
+
+FCIMPL1(FC_BOOL_RET, AppDomainNative::IsUnloadingForcedFinalize, AppDomainBaseObject* refThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ BOOL retVal = FALSE;
+ APPDOMAINREF refThis = (APPDOMAINREF) refThisUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refThis);
+
+ AppDomain* pApp = ValidateArg((APPDOMAINREF)refThis);
+ retVal = pApp->IsFinalized();
+
+ HELPER_METHOD_FRAME_END();
+ FC_RETURN_BOOL(retVal);
+}
+FCIMPLEND
+
+FCIMPL1(FC_BOOL_RET, AppDomainNative::IsFinalizingForUnload, AppDomainBaseObject* refThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ BOOL retVal = FALSE;
+ APPDOMAINREF refThis = (APPDOMAINREF) refThisUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refThis);
+
+ AppDomain* pApp = ValidateArg(refThis);
+ retVal = pApp->IsFinalizing();
+
+ HELPER_METHOD_FRAME_END();
+ FC_RETURN_BOOL(retVal);
+}
+FCIMPLEND
+
+FCIMPL2(StringObject*, AppDomainNative::nApplyPolicy, AppDomainBaseObject* refThisUNSAFE, AssemblyNameBaseObject* refAssemblyNameUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ struct _gc
+ {
+ APPDOMAINREF refThis;
+ ASSEMBLYNAMEREF assemblyName;
+ STRINGREF rv;
+ } gc;
+
+ gc.refThis = (APPDOMAINREF)refThisUNSAFE;
+ gc.assemblyName = (ASSEMBLYNAMEREF) refAssemblyNameUNSAFE;
+ gc.rv = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ AppDomain* pDomain;
+ pDomain = ValidateArg(gc.refThis);
+
+ if (gc.assemblyName == NULL)
+ {
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_AssemblyName"));
+ }
+ if( (gc.assemblyName->GetSimpleName() == NULL) )
+ {
+ COMPlusThrow(kArgumentException, W("Format_StringZeroLength"));
+ }
+ Thread *pThread = GetThread();
+ CheckPointHolder cph(pThread->m_MarshalAlloc.GetCheckpoint()); //hold checkpoint for autorelease
+
+ // Initialize spec
+ AssemblySpec spec;
+ spec.InitializeSpec(&(pThread->m_MarshalAlloc),
+ &gc.assemblyName,
+ FALSE, /*fIsStringized*/
+ FALSE /*fForIntrospection*/
+ );
+
+ StackSString sDisplayName;
+
+#ifdef FEATURE_FUSION
+ {
+ GCX_PREEMP();
+
+ SafeComHolderPreemp<IAssemblyName> pAssemblyName(NULL);
+ SafeComHolderPreemp<IAssemblyName> pBoundName(NULL);
+ IfFailThrow(spec.CreateFusionName(&pAssemblyName));
+ HRESULT hr = PreBindAssembly(pDomain->GetFusionContext(),
+ pAssemblyName,
+ NULL, // pAsmParent (only needed to see if parent is loadfrom - in this case, we always want it to load in the normal ctx)
+ &pBoundName,
+ NULL // pvReserved
+ );
+ if (FAILED(hr) && hr != FUSION_E_REF_DEF_MISMATCH)
+ {
+ ThrowHR(hr);
+ }
+
+ FusionBind::GetAssemblyNameDisplayName(pBoundName, /*modifies*/sDisplayName, 0 /*flags*/);
+ }
+#else
+ spec.GetFileOrDisplayName(0,sDisplayName);
+#endif
+
+ gc.rv = StringObject::NewString(sDisplayName);
+
+ HELPER_METHOD_FRAME_END();
+ return (StringObject*)OBJECTREFToObject(gc.rv);
+}
+FCIMPLEND
+
+FCIMPL1(UINT32, AppDomainNative::GetAppDomainId, AppDomainBaseObject* refThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ FCUnique(0x91);
+
+ UINT32 retVal = 0;
+ APPDOMAINREF domainRef = (APPDOMAINREF) refThisUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(domainRef);
+
+ AppDomain* pDomain = ValidateArg(domainRef);
+ retVal = pDomain->GetId().m_dwId;
+
+ HELPER_METHOD_FRAME_END();
+ return retVal;
+}
+FCIMPLEND
+
+FCIMPL1(void , AppDomainNative::PublishAnonymouslyHostedDynamicMethodsAssembly, AssemblyBaseObject * pAssemblyUNSAFE);
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ ASSEMBLYREF refAssembly = (ASSEMBLYREF)ObjectToOBJECTREF(pAssemblyUNSAFE);
+ if (refAssembly == NULL)
+ FCThrowResVoid(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ DomainAssembly* pDomainAssembly = refAssembly->GetDomainAssembly();
+
+ pDomainAssembly->GetAppDomain()->SetAnonymouslyHostedDynamicMethodsAssembly(pDomainAssembly);
+}
+FCIMPLEND
+
+#ifdef FEATURE_CORECLR
+
+void QCALLTYPE AppDomainNative::SetNativeDllSearchDirectories(__in_z LPCWSTR wszNativeDllSearchDirectories)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(wszNativeDllSearchDirectories));
+ }
+ CONTRACTL_END;
+
+ BEGIN_QCALL;
+ AppDomain *pDomain = GetAppDomain();
+
+ SString sDirectories(wszNativeDllSearchDirectories);
+
+ if(sDirectories.GetCount() > 0)
+ {
+ SString::CIterator start = sDirectories.Begin();
+ SString::CIterator itr = sDirectories.Begin();
+ SString::CIterator end = sDirectories.End();
+ SString qualifiedPath;
+
+ while (itr != end)
+ {
+ start = itr;
+ BOOL found = sDirectories.Find(itr, W(';'));
+ if (!found)
+ {
+ itr = end;
+ }
+
+ SString qualifiedPath(sDirectories,start,itr);
+
+ if (found)
+ {
+ itr++;
+ }
+
+ unsigned len = qualifiedPath.GetCount();
+
+ if (len > 0)
+ {
+ if (qualifiedPath[len-1]!='\\')
+ {
+ qualifiedPath.Append('\\');
+ }
+
+ NewHolder<SString> stringHolder (new SString(qualifiedPath));
+ IfFailThrow(pDomain->m_NativeDllSearchDirectories.Append(stringHolder.GetValue()));
+ stringHolder.SuppressRelease();
+ }
+ }
+ }
+ END_QCALL;
+}
+
+#endif // FEATURE_CORECLR
+
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+FCIMPL0(void, AppDomainNative::EnableMonitoring)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ EnableARM();
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL0(FC_BOOL_RET, AppDomainNative::MonitoringIsEnabled)
+{
+ FCALL_CONTRACT;
+
+ FC_GC_POLL_NOT_NEEDED();
+
+ FC_RETURN_BOOL(g_fEnableARM);
+}
+FCIMPLEND
+
+FCIMPL1(INT64, AppDomainNative::GetTotalProcessorTime, AppDomainBaseObject* refThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ INT64 i64RetVal = -1;
+
+ if (g_fEnableARM)
+ {
+ APPDOMAINREF refThis = (APPDOMAINREF) refThisUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refThis);
+
+ AppDomain* pDomain = ValidateArg(refThis);
+ // can only be accessed from within current domain
+ _ASSERTE(GetThread()->GetDomain() == pDomain);
+
+ i64RetVal = (INT64)pDomain->QueryProcessorUsage();
+
+ HELPER_METHOD_FRAME_END();
+ }
+
+ return i64RetVal;
+}
+FCIMPLEND
+
+FCIMPL1(INT64, AppDomainNative::GetTotalAllocatedMemorySize, AppDomainBaseObject* refThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ INT64 i64RetVal = -1;
+
+ if (g_fEnableARM)
+ {
+ APPDOMAINREF refThis = (APPDOMAINREF) refThisUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refThis);
+
+ AppDomain* pDomain = ValidateArg(refThis);
+ // can only be accessed from within current domain
+ _ASSERTE(GetThread()->GetDomain() == pDomain);
+
+ i64RetVal = (INT64)pDomain->GetAllocBytes();
+
+ HELPER_METHOD_FRAME_END();
+ }
+
+ return i64RetVal;
+}
+FCIMPLEND
+
+FCIMPL1(INT64, AppDomainNative::GetLastSurvivedMemorySize, AppDomainBaseObject* refThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ INT64 i64RetVal = -1;
+
+ if (g_fEnableARM)
+ {
+ APPDOMAINREF refThis = (APPDOMAINREF) refThisUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refThis);
+
+ AppDomain* pDomain = ValidateArg(refThis);
+ // can only be accessed from within current domain
+ _ASSERTE(GetThread()->GetDomain() == pDomain);
+
+ i64RetVal = (INT64)pDomain->GetSurvivedBytes();
+
+ HELPER_METHOD_FRAME_END();
+ }
+
+ return i64RetVal;
+}
+FCIMPLEND
+
+FCIMPL0(INT64, AppDomainNative::GetLastSurvivedProcessMemorySize)
+{
+ FCALL_CONTRACT;
+
+ INT64 i64RetVal = -1;
+
+ if (g_fEnableARM)
+ {
+ i64RetVal = SystemDomain::GetTotalSurvivedBytes();
+ }
+
+ return i64RetVal;
+
+
+}
+FCIMPLEND
+#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
+
+#if defined(FEATURE_HOSTED_BINDER) && defined(FEATURE_APPX_BINDER)
+ICLRPrivBinder * QCALLTYPE AppDomainNative::CreateDesignerContext(LPCWSTR *rgPaths,
+ UINT cPaths,
+ BOOL fShared)
+{
+ QCALL_CONTRACT;
+
+ ICLRPrivBinder *pRetVal = nullptr;
+
+ BEGIN_QCALL;
+ ReleaseHolder<ICLRPrivBinder> pBinder;
+
+ // The runtime check is done on the managed side to enable the debugger to use
+ // FuncEval to create designer contexts outside of DesignMode.
+ _ASSERTE(AppX::IsAppXDesignMode() || (AppX::IsAppXProcess() && CORDebuggerAttached()));
+
+ AppDomain *pAppDomain = GetAppDomain();
+
+ pBinder = CLRPrivBinderAppX::CreateParentedBinder(fShared ? pAppDomain->GetLoadContextHostBinder() : pAppDomain->GetSharedContextHostBinder(), CLRPrivTypeCacheWinRT::GetOrCreateTypeCache(), rgPaths, cPaths, fShared /* fCanUseNativeImages */);
+
+ {
+ BaseDomain::LockHolder lh(pAppDomain);
+ pAppDomain->AppDomainInterfaceReleaseList.Append(pRetVal);
+ }
+ pBinder.SuppressRelease();
+ pRetVal = pBinder;
+
+ END_QCALL;
+
+ return pRetVal;
+}
+
+void QCALLTYPE AppDomainNative::SetCurrentDesignerContext(BOOL fDesignerContext, ICLRPrivBinder *newContext)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ if (fDesignerContext)
+ {
+ GetAppDomain()->SetCurrentContextHostBinder(newContext);
+ }
+ else
+ {
+ // Managed code is responsible for ensuring this isn't called more than once per AppDomain.
+ GetAppDomain()->SetSharedContextHostBinder(newContext);
+ }
+
+ END_QCALL;
+}
+#endif // defined(FEATURE_HOSTED_BINDER) && defined(FEATURE_APPX_BINDER)
+
diff --git a/src/vm/appdomainnative.hpp b/src/vm/appdomainnative.hpp
new file mode 100644
index 0000000000..cbde3c86ff
--- /dev/null
+++ b/src/vm/appdomainnative.hpp
@@ -0,0 +1,154 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+/*============================================================
+**
+** Header: AppDomainNative.hpp
+**
+** Purpose: Implements native methods for AppDomains
+**
+**
+===========================================================*/
+#ifndef _APPDOMAINNATIVE_H
+#define _APPDOMAINNATIVE_H
+
+#include "qcall.h"
+
+class AppDomainNative
+{
+public:
+ static AppDomain *ValidateArg(APPDOMAINREF pThis);
+#ifdef FEATURE_REMOTING
+ static FCDECL5(Object*, CreateDomain, StringObject* strFriendlyNameUNSAFE, Object* appdomainSetup, Object* providedEvidenceUNSAFE, Object* creatorsEvidenceUNSAFE, void* parentSecurityDescriptor);
+ static FCDECL5(Object*, CreateInstance, StringObject* strFriendlyNameUNSAFE, Object* appdomainSetup, Object* providedEvidenceUNSAFE, Object* creatorsEvidenceUNSAFE, void* parentSecurityDescriptor);
+#endif
+ static FCDECL2(void, SetupFriendlyName, AppDomainBaseObject* refThisUNSAFE, StringObject* strFriendlyNameUNSAFE);
+#if FEATURE_COMINTEROP
+ static FCDECL1(void, SetDisableInterfaceCache, AppDomainBaseObject* refThisUNSAFE);
+#endif // FEATURE_COMINTEROP
+ static FCDECL1(void*, GetSecurityDescriptor, AppDomainBaseObject* refThisUNSAFE);
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ static FCDECL2(void, UpdateLoaderOptimization, AppDomainBaseObject* refThisUNSAFE, DWORD optimization);
+#endif // FEATURE_LOADER_OPTIMIZATION
+
+ static FCDECL12(Object*, CreateDynamicAssembly, AppDomainBaseObject* refThisUNSAFE, AssemblyNameBaseObject* assemblyNameUNSAFE, Object* identityUNSAFE, StackCrawlMark* stackMark, Object* requiredPsetUNSAFE, Object* optionalPsetUNSAFE, Object* refusedPsetUNSAFE, U1Array* securityRulesBlobUNSAFE, U1Array* aptcaBlobUNSAFE, INT32 access, INT32 flags, SecurityContextSource securityContextSource);
+#ifdef FEATURE_APPDOMAINMANAGER_INITOPTIONS
+ static FCDECL0(FC_BOOL_RET, HasHost);
+#endif // FEATURE_APPDOMAINMANAGER_INITOPTIONS
+ static FCDECL1(void, SetHostSecurityManagerFlags, DWORD dwFlags);
+ static FCDECL1(Object*, GetFriendlyName, AppDomainBaseObject* refThisUNSAFE);
+ static FCDECL1(FC_BOOL_RET, IsDefaultAppDomainForEvidence, AppDomainBaseObject* refThisUNSAFE);
+ static FCDECL2(Object*, GetAssemblies, AppDomainBaseObject* refThisUNSAFE, CLR_BOOL fForIntrospection);
+ static FCDECL2(Object*, GetOrInternString, AppDomainBaseObject* refThisUNSAFE, StringObject* pStringUNSAFE);
+ static FCDECL3(INT32, ExecuteAssembly, AppDomainBaseObject* refThisUNSAFE, AssemblyBaseObject* assemblyNameUNSAFE, PTRArray* stringArgsUNSAFE);
+#ifdef FEATURE_VERSIONING
+ static FCDECL1(void, CreateContext, AppDomainBaseObject *refThisUNSAFE);
+ static void QCALLTYPE SetupBindingPaths(__in_z LPCWSTR wszTrustedPlatformAssemblies, __in_z LPCWSTR wszPlatformResourceRoots, __in_z LPCWSTR wszAppPaths, __in_z LPCWSTR wszAppNiPaths, __in_z LPCWSTR appLocalWinMD);
+#endif // FEATURE_VERSIONING
+ static FCDECL1(void, Unload, INT32 dwId);
+ static FCDECL1(Object*, GetDynamicDir, AppDomainBaseObject* refThisUNSAFE);
+ static FCDECL1(INT32, GetId, AppDomainBaseObject* refThisUNSAFE);
+ static FCDECL1(INT32, GetIdForUnload, AppDomainBaseObject* refDomainUNSAFE);
+ static FCDECL1(FC_BOOL_RET, IsDomainIdValid, INT32 dwId);
+ static FCDECL1(FC_BOOL_RET, IsFinalizingForUnload, AppDomainBaseObject* refThisUNSAFE);
+ static FCDECL1(void, ForceToSharedDomain, Object* pObjectUNSAFE);
+ static FCDECL1(void, ChangeSecurityPolicy, AppDomainBaseObject* refThisUNSAFE);
+#ifdef FEATURE_REMOTING
+ static FCDECL0(Object*, GetDefaultDomain);
+#endif
+ static FCDECL1(LPVOID, GetFusionContext, AppDomainBaseObject* refThis);
+ static FCDECL2(Object*, IsStringInterned, AppDomainBaseObject* refThis, StringObject* pString);
+ static FCDECL1(FC_BOOL_RET, IsUnloadingForcedFinalize, AppDomainBaseObject* refThis);
+ static FCDECL3(void, UpdateContextProperty, LPVOID fusionContext, StringObject* key, Object* value);
+ static FCDECL2(StringObject*, nApplyPolicy, AppDomainBaseObject* refThisUNSAFE, AssemblyNameBaseObject* assemblyNameUNSAFE);
+ static FCDECL2(FC_BOOL_RET, IsFrameworkAssembly, AppDomainBaseObject* refThisUNSAFE, AssemblyNameBaseObject* refAssemblyNameUNSAFE);
+ static FCDECL1(UINT32, GetAppDomainId, AppDomainBaseObject* refThisUNSAFE);
+ static FCDECL1(void , PublishAnonymouslyHostedDynamicMethodsAssembly, AssemblyBaseObject * pAssemblyUNSAFE);
+#ifdef FEATURE_CORECLR
+ static void QCALLTYPE SetNativeDllSearchDirectories(__in_z LPCWSTR wszAssembly);
+#endif // FEATURE_CORECLR
+
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+ static FCDECL0(void, EnableMonitoring);
+ static FCDECL0(FC_BOOL_RET, MonitoringIsEnabled);
+ static FCDECL1(INT64, GetTotalProcessorTime, AppDomainBaseObject* refThisUNSAFE);
+ static FCDECL1(INT64, GetTotalAllocatedMemorySize, AppDomainBaseObject* refThisUNSAFE);
+ static FCDECL1(INT64, GetLastSurvivedMemorySize, AppDomainBaseObject* refThisUNSAFE);
+ static FCDECL0(INT64, GetLastSurvivedProcessMemorySize);
+#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
+
+private:
+ static INT32 ExecuteAssemblyHelper(Assembly* pAssembly,
+ BOOL bCreatedConsole,
+ PTRARRAYREF *pStringArgs);
+#ifdef FEATURE_REMOTING
+ static void CreateDomainHelper (STRINGREF* ppFriendlyName, OBJECTREF* ppAppdomainSetup, OBJECTREF* ppProvidedEvidence, OBJECTREF* ppCreatorsEvidence, void* parentSecurityDescriptor, OBJECTREF* pEntryPointProxy, OBJECTREF* pRetVal);
+#endif
+
+public:
+ static
+ void QCALLTYPE SetupDomainSecurity(QCall::AppDomainHandle pDomain,
+ QCall::ObjectHandleOnStack ohEvidence,
+ IApplicationSecurityDescriptor *pParentSecurityDescriptor,
+ BOOL fPublishAppDomain);
+
+ static
+ void QCALLTYPE GetGrantSet(QCall::AppDomainHandle adhTarget,
+ QCall::ObjectHandleOnStack retGrantSet);
+
+
+ static
+ BOOL QCALLTYPE DisableFusionUpdatesFromADManager(QCall::AppDomainHandle adhTarget);
+
+#ifdef FEATURE_APPX
+ static
+ INT32 QCALLTYPE GetAppXFlags();
+#endif
+
+ static
+ void QCALLTYPE GetAppDomainManagerType(QCall::AppDomainHandle adhTarget,
+ QCall::StringHandleOnStack shRetAssembly,
+ QCall::StringHandleOnStack shRetType);
+
+ static
+ void QCALLTYPE SetAppDomainManagerType(QCall::AppDomainHandle adhTarget,
+ __in_z LPCWSTR wszAssembly,
+ __in_z LPCWSTR wszType);
+
+ static
+ void QCALLTYPE SetSecurityHomogeneousFlag(QCall::AppDomainHandle adhTarget,
+ BOOL fRuntimeSuppliedHomgenousGrantSet);
+
+#ifdef FEATURE_CAS_POLICY
+ static
+ void QCALLTYPE SetLegacyCasPolicyEnabled(QCall::AppDomainHandle adhTarget);
+
+ static
+ BOOL QCALLTYPE IsLegacyCasPolicyEnabled(QCall::AppDomainHandle adhTarget);
+#endif // FEATURE_CAS_POLICY
+
+#ifdef FEATURE_APTCA
+ static
+ void QCALLTYPE SetCanonicalConditionalAptcaList(QCall::AppDomainHandle adhTarget,
+ LPCWSTR wszCanonicalConditionalAptcaList);
+#endif // FEATURE_APTCA
+
+#ifdef FEATURE_APPDOMAINMANAGER_INITOPTIONS
+ static
+ void QCALLTYPE RegisterWithHost(IUnknown *punkAppDomainManager);
+#endif // FEATURE_APPDOMAINMANAGER_INITOPTIONS
+
+#if defined(FEATURE_HOSTED_BINDER) && defined(FEATURE_APPX_BINDER)
+ static
+ ICLRPrivBinder * QCALLTYPE CreateDesignerContext(LPCWSTR *rgPaths, UINT cPaths, BOOL fShared);
+
+ static
+ void QCALLTYPE SetCurrentDesignerContext(BOOL fDesignerContext, ICLRPrivBinder *newContext);
+#endif
+};
+
+#endif
diff --git a/src/vm/appdomainstack.cpp b/src/vm/appdomainstack.cpp
new file mode 100644
index 0000000000..0dabefa1be
--- /dev/null
+++ b/src/vm/appdomainstack.cpp
@@ -0,0 +1,196 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+
+//
+
+
+#include "common.h"
+
+#include "appdomainstack.h"
+#include "appdomainstack.inl"
+#include "security.h"
+#include "securitypolicy.h"
+#include "appdomain.inl"
+#ifdef FEATURE_REMOTING
+#include "crossdomaincalls.h"
+#else
+#include "callhelpers.h"
+#endif
+
+#ifdef _DEBUG
+void AppDomainStack::CheckOverridesAssertCounts()
+{
+ LIMITED_METHOD_CONTRACT;
+ DWORD dwAppDomainIndex = 0;
+ DWORD dwOverrides = 0;
+ DWORD dwAsserts = 0;
+ AppDomainStackEntry *pEntry = NULL;
+ for(dwAppDomainIndex=0;dwAppDomainIndex<m_numEntries;dwAppDomainIndex++)
+ {
+ pEntry = __GetEntryPtr(dwAppDomainIndex);
+ dwOverrides += pEntry->m_dwOverridesCount;
+ dwAsserts += pEntry->m_dwAsserts;
+ }
+ _ASSERTE(dwOverrides == m_dwOverridesCount);
+ _ASSERTE(dwAsserts == m_dwAsserts);
+}
+#endif
+
+BOOL AppDomainStackEntry::IsFullyTrustedWithNoStackModifiers(void)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (m_domainID.m_dwId == INVALID_APPDOMAIN_ID || m_dwOverridesCount != 0 || m_dwAsserts != 0)
+ return FALSE;
+
+ AppDomainFromIDHolder pDomain(m_domainID, FALSE);
+ if (pDomain.IsUnloaded())
+ return FALSE;
+ IApplicationSecurityDescriptor *currAppSecDesc = pDomain->GetSecurityDescriptor();
+ if (currAppSecDesc == NULL)
+ return FALSE;
+ return Security::CheckDomainWideSpecialFlag(currAppSecDesc, 1 << SECURITY_FULL_TRUST);
+}
+BOOL AppDomainStackEntry::IsHomogeneousWithNoStackModifiers(void)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (m_domainID.m_dwId == INVALID_APPDOMAIN_ID || m_dwOverridesCount != 0 || m_dwAsserts != 0)
+ return FALSE;
+
+ AppDomainFromIDHolder pDomain(m_domainID, FALSE);
+ if (pDomain.IsUnloaded())
+ return FALSE;
+ IApplicationSecurityDescriptor *currAppSecDesc = pDomain->GetSecurityDescriptor();
+ if (currAppSecDesc == NULL)
+ return FALSE;
+ return (currAppSecDesc->IsHomogeneous() && !currAppSecDesc->ContainsAnyRefusedPermissions());
+}
+
+BOOL AppDomainStackEntry::HasFlagsOrFullyTrustedWithNoStackModifiers(DWORD flags)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (m_domainID.m_dwId == INVALID_APPDOMAIN_ID || m_dwOverridesCount != 0 || m_dwAsserts != 0)
+ return FALSE;
+
+ AppDomainFromIDHolder pDomain(m_domainID, FALSE);
+ if (pDomain.IsUnloaded())
+ return FALSE;
+ IApplicationSecurityDescriptor *currAppSecDesc = pDomain->GetSecurityDescriptor();
+ if (currAppSecDesc == NULL)
+ return FALSE;
+
+ // either the desired flag (often 0) or fully trusted will do
+ flags |= (1<<SECURITY_FULL_TRUST);
+ return Security::CheckDomainWideSpecialFlag(currAppSecDesc, flags);
+}
+
+
+void AppDomainStackEntry::UpdateHomogeneousPLS(OBJECTREF* homogeneousPLS)
+{
+
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ AppDomainFromIDHolder domain(m_domainID, TRUE);
+ if (domain.IsUnloaded())
+ return;
+
+ IApplicationSecurityDescriptor *thisAppSecDesc = domain->GetSecurityDescriptor();
+
+ if (thisAppSecDesc->IsHomogeneous())
+ {
+ // update the intersection with the current grant set
+
+ NewArrayHolder<BYTE> pbtmpSerializedObject(NULL);
+
+ struct gc
+ {
+ OBJECTREF refGrantSet;
+ } gc;
+ ZeroMemory( &gc, sizeof( gc ) );
+ AppDomain* pCurrentDomain;
+ pCurrentDomain = GetAppDomain();
+
+ GCPROTECT_BEGIN( gc );
+#ifdef FEATURE_REMOTING // should not be possible without remoting
+ DWORD cbtmpSerializedObject = 0;
+ if (pCurrentDomain->GetId() != m_domainID)
+ {
+ // Unlikely scenario where we have another homogeneous AD on the callstack that's different from
+ // the current one. If there's another AD on the callstack, it's likely to be FT.
+ ENTER_DOMAIN_ID(m_domainID)
+ {
+ // Release the holder to allow GCs. This is safe because we've entered the AD, so it won't go away.
+ domain.Release();
+
+ gc.refGrantSet = thisAppSecDesc->GetGrantedPermissionSet(NULL);
+ AppDomainHelper::MarshalObject(GetAppDomain(), &gc.refGrantSet, &pbtmpSerializedObject, &cbtmpSerializedObject);
+ if (pbtmpSerializedObject == NULL)
+ {
+ // this is an error: possibly an OOM prevented the blob from getting created.
+ // We could return null and let the managed code use a fully restricted object or throw here.
+ // Let's throw here...
+ COMPlusThrow(kSecurityException);
+ }
+ gc.refGrantSet = NULL;
+
+ }
+ END_DOMAIN_TRANSITION
+ AppDomainHelper::UnmarshalObject(pCurrentDomain,pbtmpSerializedObject, cbtmpSerializedObject, &gc.refGrantSet);
+ }
+ else
+#else
+ _ASSERTE(pCurrentDomain->GetId() == m_domainID);
+#endif //!FEATURE_CORECLR
+ {
+ // Release the holder to allow GCs. This is safe because we're running in this AD, so it won't go away.
+ domain.Release();
+ gc.refGrantSet = thisAppSecDesc->GetGrantedPermissionSet(NULL);
+ }
+
+ // At this point gc.refGrantSet has the grantSet of pDomain (thisAppSecDesc) in the current domain.
+ // We don't care about refused perms since we established there were
+ // none earlier for this call stack.
+ // Let's intersect with what we've already got.
+
+ PREPARE_NONVIRTUAL_CALLSITE(METHOD__PERMISSION_LIST_SET__UPDATE);
+ DECLARE_ARGHOLDER_ARRAY(args, 2);
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(*homogeneousPLS); // arg 0
+ args[ARGNUM_1] = OBJECTREF_TO_ARGHOLDER(gc.refGrantSet); // arg 1
+ CALL_MANAGED_METHOD_NORET(args);
+
+ GCPROTECT_END();
+ }
+}
+
+
+BOOL AppDomainStack::AllDomainsHomogeneousWithNoStackModifiers()
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Used primarily by CompressedStack code to decide if a CS has to be constructed
+
+ DWORD dwAppDomainIndex = 0;
+
+
+ InitDomainIteration(&dwAppDomainIndex);
+ while (dwAppDomainIndex != 0)
+ {
+ AppDomainStackEntry* pEntry = GetNextDomainEntryOnStack(&dwAppDomainIndex);
+ _ASSERTE(pEntry != NULL);
+
+ if (!pEntry->IsHomogeneousWithNoStackModifiers() && !pEntry->IsFullyTrustedWithNoStackModifiers())
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
diff --git a/src/vm/appdomainstack.h b/src/vm/appdomainstack.h
new file mode 100644
index 0000000000..9825ea9dda
--- /dev/null
+++ b/src/vm/appdomainstack.h
@@ -0,0 +1,232 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// Appdomainstack.h -
+//
+
+
+//
+
+
+#ifndef __appdomainstack_h__
+#define __appdomainstack_h__
+
+#include "vars.hpp"
+#include "util.hpp"
+
+
+// Stack of AppDomains executing on the current thread. Used in security optimization to avoid stackwalks
+#define ADSTACK_BLOCK_SIZE 16
+#define INVALID_APPDOMAIN_ID ((DWORD)-1)
+#define CURRENT_APPDOMAIN_ID ((ADID)(DWORD)0)
+#define __GetADID(index) ((index)<ADSTACK_BLOCK_SIZE?m_pStack[(index)].m_domainID:m_pExtraStack[((index)-ADSTACK_BLOCK_SIZE)].m_domainID)
+#define __GetEntryPtr(index) ((index)<ADSTACK_BLOCK_SIZE?&(m_pStack[(index)]):&(m_pExtraStack[((index)-ADSTACK_BLOCK_SIZE)]))
+
+struct AppDomainStackEntry
+{
+ ADID m_domainID;
+ DWORD m_dwOverridesCount;
+ DWORD m_dwAsserts;
+ DWORD m_dwPreviousThreadWideSpecialFlags;
+
+ FORCEINLINE bool operator==(const AppDomainStackEntry& entry) const
+ {
+ return (m_domainID == entry.m_domainID &&
+ m_dwOverridesCount == entry.m_dwOverridesCount &&
+ m_dwAsserts == entry.m_dwAsserts);
+ }
+ FORCEINLINE bool operator!=(const AppDomainStackEntry& entry) const
+ {
+ return (m_domainID != entry.m_domainID ||
+ m_dwOverridesCount != entry.m_dwOverridesCount ||
+ m_dwAsserts != entry.m_dwAsserts);
+
+ }
+ BOOL IsFullyTrustedWithNoStackModifiers(void);
+ BOOL IsHomogeneousWithNoStackModifiers(void);
+ BOOL HasFlagsOrFullyTrustedWithNoStackModifiers(DWORD flags);
+#ifndef DACCESS_COMPILE
+ void UpdateHomogeneousPLS(OBJECTREF* homogeneousPLS);
+#endif
+};
+
+class AppDomainStack
+{
+public:
+ AppDomainStack() : m_numEntries(0), m_pExtraStack(NULL), m_ExtraStackSize(0), m_dwOverridesCount(0), m_dwAsserts(0), m_dwThreadWideSpecialFlags(0xFFFFFFFF)
+ {
+ WRAPPER_NO_CONTRACT;
+ FillEntries(m_pStack, ADSTACK_BLOCK_SIZE);
+ }
+
+ AppDomainStack(const AppDomainStack& stack):m_numEntries(0), m_pExtraStack(NULL), m_ExtraStackSize(0), m_dwOverridesCount(0), m_dwAsserts(0)
+ {
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_dwThreadWideSpecialFlags = stack.m_dwThreadWideSpecialFlags;
+ m_numEntries = stack.m_numEntries;
+ m_dwOverridesCount = stack.m_dwOverridesCount;
+ m_dwAsserts = stack.m_dwAsserts;
+ LOG((LF_APPDOMAIN, LL_INFO100, "copy ctor: m_dwAsserts:%d stack.m_dwAsserts:%d\n",m_dwAsserts, stack.m_dwAsserts));
+ memcpy(m_pStack, stack.m_pStack, sizeof( AppDomainStackEntry) * ADSTACK_BLOCK_SIZE);
+ // If there is anything stored in the extra allocated space, copy that over
+ if (m_numEntries > ADSTACK_BLOCK_SIZE)
+ {
+ // #blocks to allocate = ceil(numDomains/blocksize) - 1 = ceil ((numdomains - blocksize)/blocksize) = numdomains/blocksize
+ DWORD numBlocks = m_numEntries/ADSTACK_BLOCK_SIZE;
+ m_ExtraStackSize = numBlocks*ADSTACK_BLOCK_SIZE;
+ m_pExtraStack = new AppDomainStackEntry[m_ExtraStackSize];
+ memcpy(m_pExtraStack, stack.m_pExtraStack, sizeof(AppDomainStackEntry)*(m_numEntries-ADSTACK_BLOCK_SIZE));
+ FillEntries((m_pExtraStack+m_numEntries-ADSTACK_BLOCK_SIZE), (m_ExtraStackSize -(m_numEntries-ADSTACK_BLOCK_SIZE)));
+ }
+ }
+
+ ~AppDomainStack()
+ {
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ } CONTRACTL_END;
+ if (m_pExtraStack != NULL)
+ delete[] m_pExtraStack;
+ m_pExtraStack = NULL;
+ m_ExtraStackSize = 0;
+ }
+
+ bool operator!= (const AppDomainStack& stack) const
+ {
+ return !(*this == stack);
+ }
+
+ bool operator== (const AppDomainStack& stack) const
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (this == &stack) // degenerate case: comparing with self
+ return true;
+ if (this->m_numEntries != stack.m_numEntries ||
+ this->m_dwAsserts != stack.m_dwAsserts ||
+ this->m_dwOverridesCount != stack.m_dwOverridesCount)
+ return false;
+ for (unsigned i =0; i < stack.m_numEntries; i++)
+ {
+ if (i < ADSTACK_BLOCK_SIZE)
+ {
+ if (this->m_pStack[i] != stack.m_pStack[i])
+ return false;
+ }
+ else
+ {
+ if (this->m_pExtraStack[i-ADSTACK_BLOCK_SIZE] != stack.m_pExtraStack[i-ADSTACK_BLOCK_SIZE])
+ return false;
+ }
+ }
+ return true;
+ }
+ inline AppDomainStack& operator =(const AppDomainStack& stack)
+ {
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Degenerate case (assigning x = x)
+ if (this == &stack)
+ return *this;
+
+ m_dwThreadWideSpecialFlags = stack.m_dwThreadWideSpecialFlags;
+ m_numEntries = stack.m_numEntries;
+ m_dwOverridesCount = stack.m_dwOverridesCount;
+ m_dwAsserts = stack.m_dwAsserts;
+ LOG((LF_APPDOMAIN, LL_INFO100, "= operator : m_dwAsserts:%d stack.m_dwAsserts:%d\n",m_dwAsserts, stack.m_dwAsserts));
+ memcpy(m_pStack, stack.m_pStack, sizeof( AppDomainStackEntry) * ADSTACK_BLOCK_SIZE);
+ // If there is anything stored in the extra allocated space, copy that over
+ if (m_numEntries > ADSTACK_BLOCK_SIZE)
+ {
+ // #blocks to allocate = ceil(numDomains/blocksize) - 1 = ceil ((numdomains - blocksize)/blocksize) = numdomains/blocksize
+ DWORD numBlocks = m_numEntries/ADSTACK_BLOCK_SIZE;
+ if (m_ExtraStackSize < numBlocks*ADSTACK_BLOCK_SIZE)
+ {
+ // free ptr if it exists
+ if (m_pExtraStack != NULL)
+ delete[] m_pExtraStack;
+ m_pExtraStack = NULL;
+
+ m_ExtraStackSize = numBlocks*ADSTACK_BLOCK_SIZE;
+ m_pExtraStack = new AppDomainStackEntry[m_ExtraStackSize];
+ }
+
+ memset(m_pExtraStack, 0xFF, sizeof(ADID) * numBlocks);
+ memcpy(m_pExtraStack, stack.m_pExtraStack, sizeof(AppDomainStackEntry)*(m_numEntries-ADSTACK_BLOCK_SIZE));
+ FillEntries((m_pExtraStack+m_numEntries-ADSTACK_BLOCK_SIZE), (m_ExtraStackSize -(m_numEntries-ADSTACK_BLOCK_SIZE)));
+ }
+
+ return *this;
+ }
+
+ inline void PushDomain(ADID pDomain);
+ inline ADID PopDomain();
+
+ inline void InitDomainIteration(DWORD *pIndex) const;
+ // Gets the next AD on the stack
+ inline ADID GetNextDomainOnStack(DWORD *pIndex, DWORD *pOverrides, DWORD *pAsserts) const;
+ inline AppDomainStackEntry* GetNextDomainEntryOnStack(DWORD *pIndex);
+ inline AppDomainStackEntry* GetCurrentDomainEntryOnStack(DWORD pIndex);
+ // Updates the asserts/overrides on the next AD on the stack
+ inline void UpdateDomainOnStack(DWORD pIndex, DWORD asserts, DWORD overrides);
+ inline DWORD GetNumDomains() const;
+ inline void ClearDomainStack();
+ inline DWORD GetThreadWideSpecialFlag() const;
+ inline DWORD IncrementOverridesCount();
+ inline DWORD DecrementOverridesCount();
+ inline DWORD GetOverridesCount();
+ inline DWORD GetInnerAppDomainOverridesCount();
+ inline DWORD IncrementAssertCount();
+ inline DWORD DecrementAssertCount();
+ inline DWORD GetAssertCount();
+ inline DWORD GetInnerAppDomainAssertCount();
+ bool IsDefaultSecurityInfo() const;
+ BOOL AllDomainsHomogeneousWithNoStackModifiers();
+
+private:
+ inline void AddMoreDomains(void);
+ inline AppDomainStackEntry* ReadTopOfStack();
+ void UpdateStackFromEntries();
+ static void FillEntries(AppDomainStackEntry ptr[], DWORD size)
+ {
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }CONTRACTL_END;
+ _ASSERTE(ptr != NULL);
+ DWORD i;
+ const AppDomainStackEntry tmp_entry = {ADID(INVALID_APPDOMAIN_ID), 0, 0};
+ for(i=0;i<size;i++)
+ ptr[i]=tmp_entry;
+ }
+
+#ifdef _DEBUG
+ inline void LogADStackUpdate(void);
+ void CheckOverridesAssertCounts(); // Debug only code to check that assert count/overrides count are always in sync across adstack
+#endif
+
+ DWORD m_numEntries;
+ AppDomainStackEntry m_pStack[ADSTACK_BLOCK_SIZE];
+ AppDomainStackEntry *m_pExtraStack;
+ DWORD m_ExtraStackSize;
+ DWORD m_dwOverridesCount; // across all entries
+ DWORD m_dwAsserts; // across all entries
+ DWORD m_dwThreadWideSpecialFlags; // this flag records the last evaluated thread wide security state
+};
+#endif
diff --git a/src/vm/appdomainstack.inl b/src/vm/appdomainstack.inl
new file mode 100644
index 0000000000..c15f7cb357
--- /dev/null
+++ b/src/vm/appdomainstack.inl
@@ -0,0 +1,444 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+/*============================================================
+**
+** Header: AppDomainStack.inl
+**
+** Purpose: Implements ADStack inline functions
+**
+
+
+**
+===========================================================*/
+#ifndef _APPDOMAINSTACK_INL
+#define _APPDOMAINSTACK_INL
+
+#include "threads.h"
+#include "appdomain.hpp"
+#include "appdomainstack.h"
+#include "security.h"
+
+
+#ifndef DACCESS_COMPILE
+
+#ifdef _DEBUG
+#define LogADStackUpdateIfDebug LogADStackUpdate()
+inline void AppDomainStack::LogADStackUpdate(void)
+{
+ LIMITED_METHOD_CONTRACT;
+ for (int i=m_numEntries-1; i >= 0; i--) {
+ AppDomainStackEntry* pEntry = __GetEntryPtr(i);
+
+ LOG((LF_APPDOMAIN, LL_INFO100, " stack[%d]: AppDomain id[%d] Overrides[%d] Asserts[%d] \n", i,
+ pEntry->m_domainID.m_dwId, pEntry->m_dwOverridesCount, pEntry->m_dwAsserts));
+ }
+}
+
+#else
+#define LogADStackUpdateIfDebug
+#endif
+
+inline void AppDomainStack::AddMoreDomains(void)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ // Need to allocate a bigger block for pMoreDomains
+ AppDomainStackEntry *tmp = m_pExtraStack;
+ m_pExtraStack = new AppDomainStackEntry[m_ExtraStackSize + ADSTACK_BLOCK_SIZE];
+ memcpy(m_pExtraStack, tmp, sizeof(AppDomainStackEntry)*(m_ExtraStackSize));
+ FillEntries((m_pExtraStack+m_ExtraStackSize), ADSTACK_BLOCK_SIZE);
+ m_ExtraStackSize+= ADSTACK_BLOCK_SIZE;
+ delete[] tmp; // free the old block
+
+}
+inline void AppDomainStack::PushDomain(ADID pDomain)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ LOG((LF_APPDOMAIN, LL_INFO100, "Thread::PushDomain (%d), count now %d\n", pDomain.m_dwId, m_numEntries+1));
+
+ //
+ // When entering a new AppDomain, we need to update the thread wide
+ // state with the intersection of the current and the new AppDomains flags.
+ // This is because the old AppDomain could have loaded new assemblies
+ // that are not yet reflected in the thread wide state, and the thread
+ // could then execute code in that new Assembly.
+ // We save the old thread wide state in the AppDomainStackEntry so we
+ // can restore it when we pop the stack entry.
+ //
+
+ // The pushed domain could be the default AppDomain (which is the starting
+ // AppDomain for all threads), in which case we don't need to intersect
+ // with the flags from the previous AppDomain.
+ Thread* pThread = GetThread();
+ if (pThread)
+ m_dwThreadWideSpecialFlags &= pThread->GetDomain()->GetSecurityDescriptor()->GetDomainWideSpecialFlag();
+
+ if (m_numEntries == ADSTACK_BLOCK_SIZE + m_ExtraStackSize)
+ {
+ AddMoreDomains();
+ }
+
+ _ASSERTE(m_numEntries < ADSTACK_BLOCK_SIZE + m_ExtraStackSize);
+ if (m_numEntries < ADSTACK_BLOCK_SIZE)
+ {
+ m_pStack[m_numEntries].m_domainID = pDomain;
+ m_pStack[m_numEntries].m_dwAsserts = 0;
+ m_pStack[m_numEntries].m_dwOverridesCount = 0;
+ m_pStack[m_numEntries].m_dwPreviousThreadWideSpecialFlags = m_dwThreadWideSpecialFlags;
+ }
+ else
+ {
+ m_pExtraStack[m_numEntries-ADSTACK_BLOCK_SIZE].m_domainID = pDomain ;
+ m_pExtraStack[m_numEntries-ADSTACK_BLOCK_SIZE].m_dwAsserts = 0;
+ m_pExtraStack[m_numEntries-ADSTACK_BLOCK_SIZE].m_dwOverridesCount = 0;
+ m_pExtraStack[m_numEntries-ADSTACK_BLOCK_SIZE].m_dwPreviousThreadWideSpecialFlags = m_dwThreadWideSpecialFlags;
+ }
+
+ if (pThread) {
+ AppDomainFromIDHolder pAppDomain(pDomain, TRUE);
+ if (!pAppDomain.IsUnloaded())
+ m_dwThreadWideSpecialFlags &= pAppDomain->GetSecurityDescriptor()->GetDomainWideSpecialFlag();
+ }
+
+ m_numEntries++;
+
+ LogADStackUpdateIfDebug;
+}
+
+inline ADID AppDomainStack::PopDomain()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ ADID pRet = (ADID)INVALID_APPDOMAIN_ID;
+ _ASSERTE(m_numEntries > 0);
+ if (m_numEntries > 0)
+ {
+ m_numEntries--;
+ AppDomainStackEntry ret_entry;
+ const AppDomainStackEntry reset_entry = {ADID(INVALID_APPDOMAIN_ID), 0, 0};
+
+ if (m_numEntries < ADSTACK_BLOCK_SIZE)
+ {
+ ret_entry = m_pStack[m_numEntries];
+ m_pStack[m_numEntries] = reset_entry;
+ }
+ else
+ {
+ ret_entry = m_pExtraStack[m_numEntries-ADSTACK_BLOCK_SIZE];
+ m_pExtraStack[m_numEntries-ADSTACK_BLOCK_SIZE] = reset_entry;
+ }
+ pRet=ret_entry.m_domainID;
+
+ LOG((LF_APPDOMAIN, LL_INFO100, "PopDomain: Popping pRet.m_dwId [%d] m_dwAsserts:%d ret_entry.m_dwAsserts:%d. New m_dwAsserts:%d\n",
+ pRet.m_dwId, m_dwAsserts,ret_entry.m_dwAsserts, (m_dwAsserts-ret_entry.m_dwAsserts)));
+
+ m_dwAsserts -= ret_entry.m_dwAsserts;
+ m_dwOverridesCount -= ret_entry.m_dwOverridesCount;
+#ifdef _DEBUG
+ CheckOverridesAssertCounts();
+#endif
+
+ //
+ // When leaving an AppDomain, we need to update the thread wide state by
+ // restoring to the state we were in before entering the AppDomain
+ //
+
+ m_dwThreadWideSpecialFlags = ret_entry.m_dwPreviousThreadWideSpecialFlags;
+
+ LOG((LF_APPDOMAIN, LL_INFO100, "Thread::PopDomain popping [%d] count now %d\n",
+ pRet.m_dwId , m_numEntries));
+ }
+ else
+ {
+ LOG((LF_APPDOMAIN, LL_INFO100, "Thread::PopDomain count now %d (error pop)\n", m_numEntries));
+ }
+
+ LogADStackUpdateIfDebug;
+ return pRet;
+}
+#endif // DACCESS_COMPILE
+
+inline DWORD AppDomainStack::GetNumDomains() const
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_numEntries >= 1);
+ return m_numEntries;
+}
+
+inline DWORD AppDomainStack::GetThreadWideSpecialFlag() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_dwThreadWideSpecialFlags;
+}
+
+inline DWORD AppDomainStack::IncrementOverridesCount()
+{
+
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ SO_TOLERANT;// Yes, we update global state here, but at worst we have an incorrect overrides count that will be updated the next
+ }CONTRACTL_END; // time we run any code that leads to UpdateOverrides. And I don't see even how that can happen: it doesn't look possible
+ // for use to take an SO between the update and when we return to managed code.
+ AppDomainStackEntry *pEntry = ReadTopOfStack();
+ _ASSERTE(pEntry->m_domainID.m_dwId != INVALID_APPDOMAIN_ID);
+ ++(pEntry->m_dwOverridesCount);
+ return ++m_dwOverridesCount;
+}
+inline DWORD AppDomainStack::DecrementOverridesCount()
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ SO_TOLERANT;
+ }CONTRACTL_END;
+ AppDomainStackEntry *pEntry = ReadTopOfStack();
+ _ASSERTE(pEntry->m_domainID.m_dwId != INVALID_APPDOMAIN_ID);
+ _ASSERTE(pEntry->m_dwOverridesCount > 0);
+ _ASSERTE(m_dwOverridesCount > 0);
+ if (pEntry->m_dwOverridesCount > 0 && m_dwOverridesCount > 0)
+ {
+ --(pEntry->m_dwOverridesCount);
+ return --m_dwOverridesCount;
+ }
+
+ return 0;
+}
+inline DWORD AppDomainStack::GetOverridesCount()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+#ifdef _DEBUG
+ CheckOverridesAssertCounts();
+#endif
+ return m_dwOverridesCount;
+}
+
+inline DWORD AppDomainStack::GetInnerAppDomainOverridesCount()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+#ifdef _DEBUG
+ CheckOverridesAssertCounts();
+#endif
+ AppDomainStackEntry *pEntry = ReadTopOfStack();
+ _ASSERTE(pEntry->m_domainID.m_dwId != INVALID_APPDOMAIN_ID);
+
+ return pEntry->m_dwOverridesCount;
+}
+
+inline DWORD AppDomainStack::IncrementAssertCount()
+{
+ LIMITED_METHOD_CONTRACT;
+ AppDomainStackEntry *pEntry = ReadTopOfStack();
+ _ASSERTE(pEntry->m_domainID.m_dwId != INVALID_APPDOMAIN_ID);
+ LOG((LF_APPDOMAIN, LL_INFO100, "IncrementAssertCount: m_dwAsserts:%d ADID:%d pEntry:%p pEntry->m_dwAsserts:%d.\n",
+ m_dwAsserts, pEntry->m_domainID.m_dwId, pEntry, pEntry->m_dwAsserts));
+ ++(pEntry->m_dwAsserts);
+ return ++m_dwAsserts;
+}
+inline DWORD AppDomainStack::DecrementAssertCount()
+{
+ LIMITED_METHOD_CONTRACT;
+ AppDomainStackEntry *pEntry = ReadTopOfStack();
+ _ASSERTE(pEntry->m_domainID.m_dwId != INVALID_APPDOMAIN_ID);
+ _ASSERTE(pEntry->m_dwAsserts > 0);
+ _ASSERTE(m_dwAsserts > 0);
+ LOG((LF_APPDOMAIN, LL_INFO100, "DecrementAssertCount: m_dwAsserts:%d ADID:%d pEntry:%p pEntry->m_dwAsserts:%d.\n",
+ m_dwAsserts, pEntry->m_domainID.m_dwId, pEntry, pEntry->m_dwAsserts));
+ --(pEntry->m_dwAsserts);
+ return --m_dwAsserts;
+}
+
+inline DWORD AppDomainStack::GetAssertCount()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+#ifdef _DEBUG
+ CheckOverridesAssertCounts();
+#endif
+
+ return m_dwAsserts;
+}
+
+inline DWORD AppDomainStack::GetInnerAppDomainAssertCount()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+#ifdef _DEBUG
+ CheckOverridesAssertCounts();
+#endif
+ AppDomainStackEntry *pEntry = ReadTopOfStack();
+ _ASSERTE(pEntry->m_domainID.m_dwId != INVALID_APPDOMAIN_ID);
+
+ return pEntry->m_dwAsserts;
+}
+
+inline void AppDomainStack::InitDomainIteration(DWORD *pIndex) const
+{
+ LIMITED_METHOD_CONTRACT;
+ *pIndex = m_numEntries;
+}
+
+inline ADID AppDomainStack::GetNextDomainOnStack(DWORD *pIndex, DWORD *pOverrides, DWORD *pAsserts) const
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(*pIndex > 0 && *pIndex <= m_numEntries);
+ (*pIndex) --;
+ const AppDomainStackEntry *pEntry = __GetEntryPtr(*pIndex);
+ if (pOverrides != NULL)
+ *pOverrides = pEntry->m_dwOverridesCount;
+ if (pAsserts != NULL)
+ *pAsserts = pEntry->m_dwAsserts;
+ return (ADID)pEntry->m_domainID.m_dwId;
+}
+
+inline AppDomainStackEntry* AppDomainStack::GetCurrentDomainEntryOnStack(DWORD pIndex)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pIndex >=0 && pIndex < m_numEntries);
+ return __GetEntryPtr(pIndex);
+}
+
+inline AppDomainStackEntry* AppDomainStack::GetNextDomainEntryOnStack(DWORD *pIndex)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(*pIndex >0 && *pIndex <= m_numEntries);
+ (*pIndex) --;
+ return __GetEntryPtr(*pIndex);
+}
+
+inline void AppDomainStack::UpdateDomainOnStack(DWORD pIndex, DWORD asserts, DWORD overrides)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+ AppDomainStackEntry* entry;
+ _ASSERTE(pIndex >=0 && pIndex < m_numEntries);
+ entry = __GetEntryPtr(pIndex);
+ _ASSERTE(entry->m_domainID.m_dwId != INVALID_APPDOMAIN_ID);
+ entry->m_dwAsserts = asserts;
+ entry->m_dwOverridesCount = overrides;
+ UpdateStackFromEntries();
+
+}
+
+
+inline void AppDomainStack::UpdateStackFromEntries()
+{
+ LIMITED_METHOD_CONTRACT;
+ DWORD dwAppDomainIndex = 0;
+ DWORD dwOverrides = 0;
+ DWORD dwAsserts = 0;
+ AppDomainStackEntry *pEntry = NULL;
+ for(dwAppDomainIndex=0;dwAppDomainIndex<m_numEntries;dwAppDomainIndex++)
+ {
+ pEntry = __GetEntryPtr(dwAppDomainIndex);
+ dwOverrides += pEntry->m_dwOverridesCount;
+ dwAsserts += pEntry->m_dwAsserts;
+ }
+ LOG((LF_APPDOMAIN, LL_INFO100, "UpdateStackFromEntries: m_dwAsserts:%d Calculated dwAsserts:%d.\n",m_dwAsserts,dwAsserts));
+
+ m_dwAsserts = dwAsserts;
+ m_dwOverridesCount = dwOverrides;
+ return;
+}
+
+inline AppDomainStackEntry* AppDomainStack::ReadTopOfStack()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_numEntries > 0);
+ AppDomainStackEntry* pEntry = NULL;
+ if (m_numEntries <= ADSTACK_BLOCK_SIZE)
+ {
+ pEntry = &(m_pStack[m_numEntries-1]);
+ }
+ else
+ {
+ pEntry = &(m_pExtraStack[m_numEntries-ADSTACK_BLOCK_SIZE-1]);
+ }
+ return pEntry;
+}
+
+inline bool AppDomainStack::IsDefaultSecurityInfo() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return (m_numEntries == 1 && m_pStack[0].m_domainID == ADID(DefaultADID) &&
+ m_pStack[0].m_dwAsserts == 0 && m_pStack[0].m_dwOverridesCount == 0);
+}
+inline void AppDomainStack::ClearDomainStack()
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }CONTRACTL_END;
+ m_dwThreadWideSpecialFlags = 0xFFFFFFFF;
+ m_numEntries = 1;
+ FillEntries(m_pStack, ADSTACK_BLOCK_SIZE);
+ if (m_pExtraStack != NULL)
+ delete[] m_pExtraStack;
+ m_pExtraStack = NULL;
+ m_ExtraStackSize = 0;
+ m_dwOverridesCount = 0;
+ LOG((LF_APPDOMAIN, LL_INFO100, "ClearDomainStack: m_dwAsserts:%d setting to 0\n",m_dwAsserts));
+ m_dwAsserts = 0;
+ m_pStack[0].m_domainID = ADID(DefaultADID);
+}
+
+#endif
diff --git a/src/vm/appxutil.cpp b/src/vm/appxutil.cpp
new file mode 100644
index 0000000000..a29066d75c
--- /dev/null
+++ b/src/vm/appxutil.cpp
@@ -0,0 +1,243 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+//
+// Provides VM-specific AppX utility code.
+
+#include "common.h"
+
+#include "utilcode.h"
+#include "holder.h"
+#include "volatile.h"
+#include "appxutil.h"
+#include "ex.h"
+
+#include "Windows.ApplicationModel.h"
+#include "Windows.ApplicationModel.Core.h"
+
+namespace AppX
+{
+ //-----------------------------------------------------------------------------------
+ // This is a small helper class designed to ensure that the current thread is
+ // RoInitialized for the lifetime of the holder. Use this holder only if code does
+ // not store any WinRT interfaces in locations that will out-live the holder
+ // itself.
+
+ class RoInitializeHolder
+ {
+ public:
+ enum ThreadingModel
+ {
+ MultiThreaded, // Require multi-threaded model
+ SingleThreaded, // Require single-threaded model
+ AnyThreadedMultiPreferred // Any threading model is ok;
+ // prefer multi-threaded model
+ };
+
+ RoInitializeHolder(
+ ThreadingModel threadingModel) // desired/preferred apartment model
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ HRESULT hr = S_OK;
+
+ {
+ GCX_PREEMP();
+ LeaveRuntimeHolder lrh(::RoInitialize);
+
+ // Prefer MultiThreaded when AnyThreadedMultiPreferred is specified.
+ hr = ::RoInitialize((threadingModel == SingleThreaded) ? RO_INIT_SINGLETHREADED
+ : RO_INIT_MULTITHREADED);
+ }
+
+ // Success means that the thread's RoInitialize ref count has been incremented,
+ // and must be paired with a call to RoUnintialize.
+ _uninitRequired = SUCCEEDED(hr);
+
+ if (FAILED(hr))
+ {
+ // Throw if:
+ // 1. RoInitialize failed for any reason other than RPC_E_CHANGED_MODE
+ // 2. RoInitialize failed with RPC_E_CHANGED_MODE and caller will not
+ // accept a different apartment model.
+ if (hr != RPC_E_CHANGED_MODE || threadingModel != AnyThreadedMultiPreferred)
+ {
+ // Note: throwing here will cause us to skip the dtor, but will only
+ // do so when SUCCEEDED(hr) is FALSE, which means that _uninitRequired
+ // is also FALSE so there is no RoInitialize refcount leak here.
+ _ASSERTE(!_uninitRequired);
+
+ ThrowHR(hr);
+ }
+ }
+ }
+
+ // Ensures RoUninitialize is called (if needed) before holder falls out of scope.
+ ~RoInitializeHolder()
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (_uninitRequired)
+ {
+ _uninitRequired = false;
+ ::RoUninitialize();
+ }
+ }
+
+ private:
+ bool _uninitRequired; // Is a call to RoUnitialize required?
+ };
+
+ //-----------------------------------------------------------------------------------
+
+ HRESULT IsAppXDesignModeWorker(bool * pfResult)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ HRESULT hr = S_OK;
+
+ boolean fDesignModeEnabled = false;
+
+ // Delayloaded entrypoint may throw.
+ EX_TRY
+ {
+ // Ensure that thread is initialized for WinRT; either apt model will work for this API.
+ RoInitializeHolder hRoInit(RoInitializeHolder::AnyThreadedMultiPreferred);
+
+ ReleaseHolder<ABI::Windows::ApplicationModel::IDesignModeStatics> pIDesignMode;
+ IfFailThrow(clr::winrt::GetActivationFactory(
+ RuntimeClass_Windows_ApplicationModel_DesignMode, pIDesignMode));
+
+ IfFailThrow(pIDesignMode->get_DesignModeEnabled(&fDesignModeEnabled));
+ }
+ EX_CATCH_HRESULT(hr)
+ IfFailRet(hr);
+
+ if (!!fDesignModeEnabled)
+ {
+ *pfResult = true;
+ return S_OK;
+ }
+
+ *pfResult = false;
+ return S_OK;
+ }
+
+ //-----------------------------------------------------------------------------------
+ // Returns true if running in an AppX process with DevMode enabled.
+
+ bool IsAppXDesignMode()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+#ifdef FEATURE_CORECLR
+ // CoreCLR does not have proper support for AppX design mode. Once/if it has one, it should not need
+ // any special casing like desktop. Avoid the expensive check completely.
+ return false;
+#else
+ // DevMode does not change over the lifetime of a process and is expensive to compute
+ // Cache the first answer and return it once computed; idempotent so races are fine
+ static enum
+ {
+ CachedAppxMode_Unknown,
+ CachedAppxMode_Normal,
+ CachedAppxMode_Design
+ }
+ s_cachedAppxMode = CachedAppxMode_Unknown;
+
+ bool result = false;
+
+ switch (s_cachedAppxMode)
+ {
+ case CachedAppxMode_Unknown:
+ if (SUCCEEDED(IsAppXDesignModeWorker(&result)))
+ { // Cache the result on success; otherwise use the default value of false.
+ s_cachedAppxMode = result ? CachedAppxMode_Design : CachedAppxMode_Normal;
+ }
+ break;
+
+ case CachedAppxMode_Normal:
+ result = false;
+ break;
+
+ case CachedAppxMode_Design:
+ result = true;
+ break;
+ }
+
+#ifdef _DEBUG
+ bool dbg_result = false;
+ _ASSERTE(FAILED(IsAppXDesignModeWorker(&dbg_result)) || dbg_result == result);
+#endif
+
+ return result;
+#endif // FEATURE_CORECLR
+ }
+
+ HRESULT GetApplicationId(LPCWSTR& rString)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // the PRAID is a static value for the life of the process. the reason for caching is
+ // because the watson bucketing code requires this value during unhandled exception
+ // processing and due to the contracts in that code it cannot tolerate the switch to
+ // preemptive mode when calling out to WinRT.
+ static LPCWSTR s_wzPraid = nullptr;
+
+ HRESULT hr = S_OK;
+
+ if (s_wzPraid == nullptr)
+ {
+ ReleaseHolder<ABI::Windows::ApplicationModel::Core::ICoreApplication> coreApp;
+
+ hr = clr::winrt::GetActivationFactory(RuntimeClass_Windows_ApplicationModel_Core_CoreApplication, coreApp);
+
+ if (SUCCEEDED(hr))
+ {
+ WinRtString winrtAppId;
+ hr = coreApp->get_Id(winrtAppId.Address());
+
+ if (SUCCEEDED(hr))
+ {
+ LPCWSTR wzPraid = DuplicateString(winrtAppId.GetRawBuffer(), winrtAppId.size());
+ if (wzPraid)
+ {
+ if (InterlockedCompareExchangeT(&s_wzPraid, wzPraid, nullptr) != nullptr)
+ delete[] wzPraid;
+ }
+ else
+ {
+ hr = E_OUTOFMEMORY;
+ }
+ }
+ }
+ }
+
+ rString = s_wzPraid;
+
+ return hr;
+ }
+
+
+}
+
+
diff --git a/src/vm/appxutil.h b/src/vm/appxutil.h
new file mode 100644
index 0000000000..70fd54e780
--- /dev/null
+++ b/src/vm/appxutil.h
@@ -0,0 +1,32 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+// Provides VM-specific AppX utility code.
+
+#ifndef vm_AppXUtil_h
+#define vm_AppXUtil_h
+
+#include "../inc/appxutil.h"
+
+namespace AppX
+{
+#if defined(FEATURE_APPX) && !defined(CROSSGEN_COMPILE) && !defined(CLR_STANDALONE_BINDER)
+ //-----------------------------------------------------------------------------------
+ // Returns true if running in an AppX process with Designer Mode enabled.
+ bool IsAppXDesignMode();
+
+ // Return Application.Id
+ HRESULT GetApplicationId(LPCWSTR& rString);
+#else // FEATURE_APPX
+ inline bool IsAppXDesignMode()
+ {
+ return false;
+ }
+#endif // FEATURE_APPX && !CROSSGEN_COMPILE
+}
+
+#endif // vm_AppXUtil_h
diff --git a/src/vm/aptca.cpp b/src/vm/aptca.cpp
new file mode 100644
index 0000000000..fdb8869386
--- /dev/null
+++ b/src/vm/aptca.cpp
@@ -0,0 +1,1364 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//--------------------------------------------------------------------------
+// aptca.h
+//
+// Functions for handling allow partially trusted callers assemblies
+//
+
+//
+//--------------------------------------------------------------------------
+
+
+#include "common.h"
+#include "aptca.h"
+
+//
+// Conditional APTCA cache implementation
+//
+
+ConditionalAptcaCache::ConditionalAptcaCache(AppDomain *pAppDomain)
+ : m_pAppDomain(pAppDomain),
+ m_canonicalListIsNull(false),
+ m_domainState(kDomainStateUnknown)
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(pAppDomain != NULL);
+}
+
+ConditionalAptcaCache::~ConditionalAptcaCache()
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+void ConditionalAptcaCache::SetCachedState(PTR_PEImage pImage, ConditionalAptcaCache::State state)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pImage));
+ PRECONDITION(state != kUnknown);
+ }
+ CONTRACTL_END;
+
+ if (state == kNotCAptca)
+ {
+ pImage->SetIsNotConditionalAptca();
+ }
+}
+
+ConditionalAptcaCache::State ConditionalAptcaCache::GetCachedState(PTR_PEImage pImage)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pImage));
+ }
+ CONTRACTL_END;
+
+ if (!pImage->MayBeConditionalAptca())
+ {
+ return kNotCAptca;
+ }
+
+ return kUnknown;
+}
+
+void ConditionalAptcaCache::SetCanonicalConditionalAptcaList(LPCWSTR wszCanonicalConditionalAptcaList)
+{
+ WRAPPER_NO_CONTRACT;
+ m_canonicalListIsNull = (wszCanonicalConditionalAptcaList == NULL);
+ m_canonicalList.Set(wszCanonicalConditionalAptcaList);
+}
+
+#ifndef CROSSGEN_COMPILE
+ConditionalAptcaCache::DomainState ConditionalAptcaCache::GetConditionalAptcaDomainState()
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_domainState == kDomainStateUnknown)
+ {
+ IApplicationSecurityDescriptor *pASD = m_pAppDomain->GetSecurityDescriptor();
+ DomainState domainState = kDomainStateUnknown;
+
+ // In the full trust case we only need to look at the conditional APTCA list in the case that the host
+ // has configured one on the default domain (for instance WPF). Otherwise, all full trust domains have
+ // all conditional APTCA assemblies enabled.
+ bool processFullTrustAptcaList = false;
+ if (m_pAppDomain->IsCompilationDomain())
+ {
+ processFullTrustAptcaList = false;
+ }
+ else if (m_pAppDomain->IsDefaultDomain())
+ {
+ processFullTrustAptcaList = !m_canonicalListIsNull;
+ }
+ else
+ {
+ processFullTrustAptcaList = ConsiderFullTrustConditionalAptcaLists();
+ }
+
+ // Consider the domain to be fully trusted if it really is fully trusted, or if we're currently
+ // setting the domain up, it looks like it will be fully trusted, and the AppDomainManager has
+ // promised that won't change.
+ bool isFullTrustDomain = !m_pAppDomain->GetSecurityDescriptor()->DomainMayContainPartialTrustCode();
+ if (pASD->IsInitializationInProgress() && (m_pAppDomain->GetAppDomainManagerInitializeNewDomainFlags() & eInitializeNewDomainFlags_NoSecurityChanges))
+ {
+ BOOL preResolveFullTrust;
+ BOOL preResolveHomogenous;
+ pASD->PreResolve(&preResolveFullTrust, &preResolveHomogenous);
+
+ isFullTrustDomain = preResolveFullTrust && preResolveHomogenous;
+ }
+
+ if (m_pAppDomain->IsCompilationDomain())
+ {
+ // NGEN always enables all conditional APTCA assemblies
+ domainState = kAllEnabled;
+ }
+ else if (!isFullTrustDomain || processFullTrustAptcaList)
+ {
+ if (m_canonicalList.GetCount() == 0)
+ {
+ // A null or empty conditional APTCA list means that no assemblies are enabled in this domain
+ domainState = kAllDisabled;
+ }
+ else
+ {
+ // We're in a domain that supports conditional APTCA and an interesting list is supplied. In
+ // this domain, some assemblies are enabled.
+ domainState = kSomeEnabled;
+ }
+ }
+ else
+ {
+ domainState = kAllEnabled;
+ }
+
+ _ASSERTE(domainState != kDomainStateUnknown);
+ InterlockedCompareExchange(reinterpret_cast<volatile LONG *>(&m_domainState), domainState, kDomainStateUnknown);
+ }
+
+ return m_domainState;
+}
+
+// static
+bool ConditionalAptcaCache::ConsiderFullTrustConditionalAptcaLists()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (GetAppDomain()->IsCompilationDomain())
+ {
+ return false;
+ }
+
+ IApplicationSecurityDescriptor *pASD = SystemDomain::System()->DefaultDomain()->GetSecurityDescriptor();
+ ConditionalAptcaCache *pDefaultDomainCaptca = pASD->GetConditionalAptcaCache();
+
+ // The only way that we use CAPTCA lists is if the host has configured the default domain to not be all
+ // enabled (that is, the host has setup a CAPTCA list of any sort for the default domain)
+ return pDefaultDomainCaptca->GetConditionalAptcaDomainState() != kAllEnabled;
+}
+
+// APTCA killbit list helper functions
+namespace
+{
+ static const LPCWSTR wszAptcaRootKey = W("SOFTWARE\\Microsoft\\.NETFramework\\Policy\\APTCA");
+
+ //--------------------------------------------------------------------------------------------------------
+ //
+ // The AptcaKillBitList class is responsible for holding the machine wide list of assembly name / file
+ // versions which have been disabled for APTCA on the machine.
+ //
+
+ class AptcaKillBitList
+ {
+ private:
+ ArrayList m_killBitList;
+
+ public:
+ ~AptcaKillBitList();
+
+ bool AreAnyAssembliesKillBitted();
+ bool IsAssemblyKillBitted(PEAssembly *pAssembly);
+ bool IsAssemblyKillBitted(IAssemblyName *pAssemblyName, ULARGE_INTEGER fileVersion);
+
+ static AptcaKillBitList *ReadMachineKillBitList();
+
+ private:
+ AptcaKillBitList();
+ AptcaKillBitList(const AptcaKillBitList &other); // not implemented
+
+ private:
+ static const LPCWSTR wszKillBitValue;
+
+ private:
+ static bool FileVersionsAreEqual(ULARGE_INTEGER targetVersion, IAssemblyName *pKillBitAssemblyName);
+ };
+ const LPCWSTR AptcaKillBitList::wszKillBitValue = W("APTCA_FLAG");
+
+ AptcaKillBitList::AptcaKillBitList()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ AptcaKillBitList::~AptcaKillBitList()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // Release all of the IAssemblyName objects stored in this list
+ for (DWORD i = 0; i < m_killBitList.GetCount(); ++i)
+ {
+ IAssemblyName *pKillBitAssemblyName = reinterpret_cast<IAssemblyName *>(m_killBitList.Get(i));
+ if (pKillBitAssemblyName != NULL)
+ {
+ pKillBitAssemblyName->Release();
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------------------------------------------
+ //
+ // Determine if any assemblies are on the APTCA killbit list
+ //
+
+ bool AptcaKillBitList::AreAnyAssembliesKillBitted()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // We don't consider the killbit for NGEN, as ngened code always assumes that APTCA is enabled.
+ if (GetAppDomain()->IsCompilationDomain())
+ {
+ return false;
+ }
+
+ return m_killBitList.GetCount() > 0;
+ }
+
+ //--------------------------------------------------------------------------------------------------------
+ //
+ // Compare the file versions of an assembly with the verison that is being killbitted to see if they
+ // match. For compatibility with v3.5, we assume any failure means that the versions do not match.
+ //
+
+ // static
+ bool AptcaKillBitList::FileVersionsAreEqual(ULARGE_INTEGER targetVersion, IAssemblyName *pKillBitAssemblyName)
+ {
+ DWORD dwKillBitMajorVersion = 0;
+ DWORD dwVersionSize = sizeof(dwKillBitMajorVersion);
+ if (FAILED(pKillBitAssemblyName->GetProperty(ASM_NAME_FILE_MAJOR_VERSION, &dwKillBitMajorVersion, &dwVersionSize)) ||
+ dwVersionSize == 0)
+ {
+ return false;
+ }
+
+ DWORD dwKillBitMinorVersion = 0;
+ dwVersionSize = sizeof(dwKillBitMinorVersion);
+ if (FAILED(pKillBitAssemblyName->GetProperty(ASM_NAME_FILE_MINOR_VERSION, &dwKillBitMinorVersion, &dwVersionSize)) ||
+ dwVersionSize == 0)
+ {
+ return false;
+ }
+
+ DWORD dwKillBitBuildVersion = 0;
+ dwVersionSize = sizeof(dwKillBitBuildVersion);
+ if (FAILED(pKillBitAssemblyName->GetProperty(ASM_NAME_FILE_BUILD_NUMBER, &dwKillBitBuildVersion, &dwVersionSize)) ||
+ dwVersionSize == 0)
+ {
+ return false;
+ }
+
+ DWORD dwKillBitRevisionVersion = 0;
+ dwVersionSize = sizeof(dwKillBitRevisionVersion);
+ if (FAILED(pKillBitAssemblyName->GetProperty(ASM_NAME_FILE_REVISION_NUMBER, &dwKillBitRevisionVersion, &dwVersionSize)) ||
+ dwVersionSize == 0)
+ {
+ return false;
+ }
+
+ DWORD dwTargetMajorVersion = (targetVersion.HighPart & 0xFFFF0000) >> 16;
+ DWORD dwTargetMinorVersion = targetVersion.HighPart & 0x0000FFFF;
+ DWORD dwTargetBuildVersion = (targetVersion.LowPart & 0xFFFF0000) >> 16;
+ DWORD dwTargetRevisionVersion = targetVersion.LowPart & 0x0000FFFF;
+
+ return dwTargetMajorVersion == dwKillBitMajorVersion &&
+ dwTargetMinorVersion == dwKillBitMinorVersion &&
+ dwTargetBuildVersion == dwKillBitBuildVersion &&
+ dwTargetRevisionVersion == dwKillBitRevisionVersion;
+ }
+
+ //--------------------------------------------------------------------------------------------------------
+ //
+ // Determine if a specific assembly is on the killbit list
+ //
+
+ bool AptcaKillBitList::IsAssemblyKillBitted(PEAssembly *pAssembly)
+ {
+ STANDARD_VM_CONTRACT;
+
+ IAssemblyName *pTargetAssemblyName = pAssembly->GetFusionAssemblyName();
+
+ // For compat with v3.5, we use hte Win32 file version here rather than the Fusion version
+ LPCWSTR pwszPath = pAssembly->GetPath().GetUnicode();
+ if (pwszPath != NULL)
+ {
+ ULARGE_INTEGER fileVersion = { 0, 0 };
+ HRESULT hr = GetFileVersion(pwszPath, &fileVersion);
+ if (SUCCEEDED(hr))
+ {
+ return IsAssemblyKillBitted(pTargetAssemblyName, fileVersion);
+ }
+ }
+
+ return false;
+ }
+
+ //--------------------------------------------------------------------------------------------------------
+ //
+ // Determine if a specific assembly is on the killbit list
+ //
+
+ bool AptcaKillBitList::IsAssemblyKillBitted(IAssemblyName *pTargetAssemblyName, ULARGE_INTEGER fileVersion)
+ {
+ STANDARD_VM_CONTRACT;
+
+ // If nothing is killbitted, then this assembly cannot be killbitted
+ if (!AreAnyAssembliesKillBitted())
+ {
+ return false;
+ }
+
+ for (DWORD i = 0; i < m_killBitList.GetCount(); ++i)
+ {
+ IAssemblyName *pKillBitAssemblyName = reinterpret_cast<IAssemblyName *>(m_killBitList.Get(i));
+
+ // By default, we compare all fields of the assembly's name, however if the culture was neutral,
+ // we strip that out.
+ DWORD dwCmpFlags = ASM_CMPF_IL_ALL;
+
+ DWORD cbCultureSize = 0;
+ SString strCulture;
+ HRESULT hrCulture = pKillBitAssemblyName->GetProperty(ASM_NAME_CULTURE, NULL, &cbCultureSize);
+ if (hrCulture == HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER))
+ {
+ DWORD cchCulture = (cbCultureSize / sizeof(WCHAR)) - 1;
+ WCHAR *wszCultureBuffer = strCulture.OpenUnicodeBuffer(cchCulture);
+ hrCulture = pKillBitAssemblyName->GetProperty(ASM_NAME_CULTURE, wszCultureBuffer, &cbCultureSize);
+ strCulture.CloseBuffer();
+ }
+
+ if (SUCCEEDED(hrCulture))
+ {
+ if (cbCultureSize == 0 || strCulture.EqualsCaseInsensitive(W("")) || strCulture.EqualsCaseInsensitive(W("neutral")))
+ {
+ dwCmpFlags &= ~ASM_CMPF_CULTURE;
+ }
+ }
+
+ // If the input assembly matches the kill bit assembly's name and file version, then we need to
+ // kill it.
+ if (pTargetAssemblyName->IsEqual(pKillBitAssemblyName, dwCmpFlags) == S_OK &&
+ FileVersionsAreEqual(fileVersion, pKillBitAssemblyName))
+ {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ //--------------------------------------------------------------------------------------------------------
+ //
+ // Read the machine-wide APTCA kill bit list into a kill bit list object. For compatibility with v3.5,
+ // errors during this initialization are ignored - leading to APTCA entries that may not be considered
+ // for kill bitting.
+ //
+
+ // static
+ AptcaKillBitList *AptcaKillBitList::ReadMachineKillBitList()
+ {
+ CONTRACT(AptcaKillBitList *)
+ {
+ STANDARD_VM_CHECK;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ NewHolder<AptcaKillBitList> pKillBitList(new AptcaKillBitList);
+
+ HKEYHolder hKeyAptca;
+
+ // Open the APTCA subkey in the registry.
+ if (WszRegOpenKeyEx(HKEY_LOCAL_MACHINE, wszAptcaRootKey, 0, KEY_READ, &hKeyAptca) == ERROR_SUCCESS)
+ {
+
+ DWORD cchSubKeySize = 0;
+ if (WszRegQueryInfoKey(hKeyAptca, NULL, NULL, NULL, NULL, &cchSubKeySize, NULL, NULL, NULL, NULL, NULL, NULL) != ERROR_SUCCESS)
+ {
+ cchSubKeySize = MAX_PATH;
+ }
+ ++cchSubKeySize;
+
+ NewArrayHolder<WCHAR> wszSubKey(new WCHAR[cchSubKeySize]);
+
+ DWORD dwKey = 0;
+ DWORD cchWszSubKey = cchSubKeySize;
+ // Assembly specific records are represented as subkeys of the key we've just opened with names
+ // equal to the strong name of the assembly being kill bitted, and a value of APTCA_FLAG = 1.
+ while (WszRegEnumKeyEx(hKeyAptca, dwKey, wszSubKey, &cchWszSubKey, NULL, NULL, NULL, NULL) == ERROR_SUCCESS)
+ {
+ ++dwKey;
+ cchWszSubKey = cchSubKeySize;
+
+ // Open the subkey: the key name is the full name of the assembly to potentially kill-bit
+ HKEYHolder hSubKey;
+ if (WszRegOpenKeyEx(hKeyAptca, wszSubKey, 0, KEY_READ, &hSubKey) != ERROR_SUCCESS)
+ {
+ continue;
+ }
+
+ DWORD dwKillbit = 0;
+ DWORD dwType = REG_DWORD;
+ DWORD dwSize = sizeof(dwKillbit);
+
+ // look for the APTCA flag
+ LONG queryValue = WszRegQueryValueEx(hSubKey,
+ wszKillBitValue,
+ NULL,
+ &dwType,
+ reinterpret_cast<LPBYTE>(&dwKillbit),
+ &dwSize);
+ if (queryValue == ERROR_SUCCESS && dwKillbit == 1)
+ {
+ // We have a strong named assembly with an APTCA killbit value set - parse the key into
+ // an assembly name, and add it to our list
+ ReleaseHolder<IAssemblyName> pKillBitAssemblyName;
+ HRESULT hrAssemblyName = CreateAssemblyNameObject(&pKillBitAssemblyName, wszSubKey, CANOF_PARSE_DISPLAY_NAME, NULL);
+ if (FAILED(hrAssemblyName))
+ {
+ continue;
+ }
+
+ //
+ // For compatibility with v3.5, we only accept kill bit entries which have four part
+ // assembly versions, names, and public key tokens.
+ //
+
+ // Verify the version first
+ bool validVersion = true;
+ for (DWORD dwVersionPartId = ASM_NAME_MAJOR_VERSION; dwVersionPartId <= ASM_NAME_REVISION_NUMBER; ++dwVersionPartId)
+ {
+ DWORD dwVersionPart;
+ DWORD cbVersionPart = sizeof(dwVersionPart);
+ HRESULT hrVersion = pKillBitAssemblyName->GetProperty(dwVersionPartId, &dwVersionPart, &cbVersionPart);
+ if (FAILED(hrVersion) || cbVersionPart == 0)
+ {
+ validVersion = false;
+ }
+ }
+ if (!validVersion)
+ {
+ continue;
+ }
+
+ // Make sure there is a simple name
+ DWORD cbNameSize = 0;
+ HRESULT hrName = pKillBitAssemblyName->GetProperty(ASM_NAME_NAME, NULL, &cbNameSize);
+ if (hrName != HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER))
+ {
+ continue;
+ }
+
+ // Verify the killbit assembly has a public key token
+ DWORD cbPublicKeyTokenSize = 0;
+ HRESULT hrPublicKey = pKillBitAssemblyName->GetProperty(ASM_NAME_PUBLIC_KEY_TOKEN, NULL, &cbPublicKeyTokenSize);
+ if (hrPublicKey != HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER))
+ {
+ continue;
+ }
+
+ // Verify the killbit assembly has either no culture or a valid culture token
+ DWORD cbCultureSize = 0;
+ HRESULT hrCulture = pKillBitAssemblyName->GetProperty(ASM_NAME_CULTURE, NULL, &cbCultureSize);
+ if (FAILED(hrCulture) && hrCulture != HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER))
+ {
+ continue;
+ }
+
+ // The name checks out, so add the kill bit entry
+ LOG((LF_SECURITY,
+ LL_INFO10,
+ "APTCA killbit added for assembly '%S'.\n",
+ wszSubKey));
+ pKillBitList->m_killBitList.Append(pKillBitAssemblyName.Extract());
+ }
+ }
+ }
+
+ RETURN(pKillBitList.Extract());
+ }
+
+ VolatilePtr<AptcaKillBitList> g_pAptcaKillBitList(NULL);
+
+ //--------------------------------------------------------------------------------------------------------
+ //
+ // Get the APTCA killbit list
+ //
+
+ AptcaKillBitList *GetKillBitList()
+ {
+ STANDARD_VM_CONTRACT;
+
+ if (g_pAptcaKillBitList.Load() == NULL)
+ {
+ NewHolder<AptcaKillBitList> pAptcaKillBitList(AptcaKillBitList::ReadMachineKillBitList());
+
+ LPVOID pvOldValue = InterlockedCompareExchangeT(g_pAptcaKillBitList.GetPointer(),
+ pAptcaKillBitList.GetValue(),
+ NULL);
+ if (pvOldValue == NULL)
+ {
+ pAptcaKillBitList.SuppressRelease();
+ }
+ }
+
+ _ASSERTE(g_pAptcaKillBitList.Load() != NULL);
+ return g_pAptcaKillBitList.Load();
+ }
+}
+
+// APTCA helper functions
+namespace
+{
+ enum ConditionalAptcaSharingMode
+ {
+ kShareUnknown,
+ kShareIfEnabled, // Share an assembly only if all conditional APTCA assemblies in its closure are enabled
+ kShareIfDisabled, // Share an assembly only if all conditional APTCA assemblies in its closure are disabled
+ };
+
+ //--------------------------------------------------------------------------------------------------------
+ //
+ // Get the name of an assembly as it would appear in the APTCA enabled list of an AppDomain
+ //
+
+ void GetAssemblyNameForConditionalAptca(Assembly *pAssembly, SString *pAssemblyName)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pAssembly));
+ PRECONDITION(CheckPointer(pAssemblyName));
+ }
+ CONTRACTL_END;
+
+ GCX_COOP();
+
+ // Call assembly.GetName().GetNameWithPublicKey() to get the name the user would have to add to the
+ // whitelist to enable this assembly
+ struct
+ {
+ OBJECTREF orAssembly;
+ STRINGREF orAssemblyName;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.orAssembly = pAssembly->GetExposedObject();
+ MethodDescCallSite getAssemblyName(METHOD__ASSEMBLY__GET_NAME_FOR_CONDITIONAL_APTCA, &gc.orAssembly);
+ ARG_SLOT args[1] =
+ {
+ ObjToArgSlot(gc.orAssembly)
+ };
+ gc.orAssemblyName = getAssemblyName.Call_RetSTRINGREF(args);
+
+ // Copy to assemblyName
+ pAssemblyName->Set(gc.orAssemblyName->GetBuffer());
+
+ GCPROTECT_END();
+ }
+
+ //--------------------------------------------------------------------------------------------------------
+ //
+ // Determine which types of conditional APTCA assemblies may be shared
+ //
+
+ ConditionalAptcaSharingMode GetConditionalAptcaSharingMode()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ static ConditionalAptcaSharingMode sharingMode = kShareUnknown;
+
+ if (sharingMode == kShareUnknown)
+ {
+ // If the default domain has any conditional APTCA assemblies enabled in it, then we share in the
+ // enabled direction. Otherwise, the default domain has all conditional APTCA assemblies disabled
+ // so we need to share in the disabled direction
+ ConditionalAptcaCache *pDefaultDomainCache = SystemDomain::System()->DefaultDomain()->GetSecurityDescriptor()->GetConditionalAptcaCache();
+ ConditionalAptcaCache::DomainState domainState = pDefaultDomainCache->GetConditionalAptcaDomainState();
+
+ if (domainState == ConditionalAptcaCache::kAllDisabled)
+ {
+ sharingMode = kShareIfDisabled;
+ }
+ else
+ {
+ sharingMode = kShareIfEnabled;
+ }
+ }
+
+ return sharingMode;
+ }
+
+ /* XXX Fri 7/17/2009
+ * I can't call DomainAssembly::IsConditionalAPTCAVisible() here. That requires an Assembly which means
+ * we have to be at FILE_LOAD_ALLOCATE. There are two problems:
+ * 1) We don't want to load dependencies here if we can avoid it
+ * 2) We can't load them anyway (hard bound dependencies can't get past
+ * FILE_LOAD_VERIFY_NATIVE_IMAGE_DEPENDENCIES.
+ *
+ * We're going to do a relaxed check here. Instead of checking the public key, we're
+ * only going to check the public key token. See
+ * code:AppDomain::IsAssemblyOnAptcaVisibleListRaw for more information.
+ *
+ * pAsmName - The name of the assembly to check.
+ * pDomainAssembly - The Domain Assembly used for logging.
+ */
+ bool IsAssemblyOnAptcaVisibleList(IAssemblyName * pAsmName, DomainAssembly *pDomainAssembly)
+ {
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pAsmName));
+ }
+ CONTRACTL_END;
+
+ ConditionalAptcaCache *pDomainCache = pDomainAssembly->GetAppDomain()->GetSecurityDescriptor()->GetConditionalAptcaCache();
+ if (pDomainCache->GetConditionalAptcaDomainState() == ConditionalAptcaCache::kAllEnabled)
+ {
+ return true;
+ }
+
+ CQuickBytes qbName;
+ LPWSTR pszName;
+ DWORD cbName = 0;
+ HRESULT hr = pAsmName->GetProperty(ASM_NAME_NAME, NULL, &cbName);
+ if (hr == HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER))
+ {
+ pszName = (LPWSTR)qbName.AllocThrows(cbName);
+ }
+ else
+ {
+ pDomainAssembly->ExternalLog(LL_ERROR, W("Rejecting native image / code sharing because there was an ")
+ W("error checking for conditional APTCA: 0x%x"), hr);
+ return false;
+ }
+ hr = pAsmName->GetProperty(ASM_NAME_NAME, (void *)pszName, &cbName);
+ if (FAILED(hr))
+ {
+ pDomainAssembly->ExternalLog(LL_ERROR, W("Rejecting native image / code sharing because there was an ")
+ W("error checking for conditional APTCA: 0x%x"), hr);
+ return false;
+ }
+ BYTE rgPublicKeyToken[8];
+ DWORD cbPkt = _countof(rgPublicKeyToken);
+ hr = pAsmName->GetProperty(ASM_NAME_PUBLIC_KEY_TOKEN,
+ (void*)rgPublicKeyToken, &cbPkt);
+ if (FAILED(hr))
+ {
+ pDomainAssembly->ExternalLog(LL_ERROR, W("Rejecting native image / code sharing because there was an ")
+ W("error obtaining the public key token for %s: 0x%x"),
+ pszName, hr);
+ return false;
+ }
+
+ GCX_COOP();
+
+ CLR_BOOL isVisible = FALSE;
+
+ struct
+ {
+ OBJECTREF orThis;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+ GCPROTECT_BEGIN(gc);
+ gc.orThis = pDomainAssembly->GetAppDomain()->GetExposedObject();
+
+ MethodDescCallSite assemblyVisible(METHOD__APP_DOMAIN__IS_ASSEMBLY_ON_APTCA_VISIBLE_LIST_RAW,
+ &gc.orThis);
+ ARG_SLOT args[] = {
+ ObjToArgSlot(gc.orThis),
+ (ARG_SLOT)pszName,
+ (ARG_SLOT)wcslen(pszName),
+ (ARG_SLOT)rgPublicKeyToken,
+ (ARG_SLOT)cbPkt
+ };
+ isVisible = assemblyVisible.Call_RetBool(args);
+ GCPROTECT_END();
+
+ return isVisible;
+ }
+
+ bool IsAssemblyOnAptcaVisibleList(DomainAssembly *pAssembly)
+ {
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pAssembly));
+ PRECONDITION(GetAppDomain() == pAssembly->GetAppDomain());
+ }
+ CONTRACTL_END;
+
+ ConditionalAptcaCache *pDomainCache = pAssembly->GetAppDomain()->GetSecurityDescriptor()->GetConditionalAptcaCache();
+ if (pDomainCache->GetConditionalAptcaDomainState() == ConditionalAptcaCache::kAllEnabled)
+ {
+ return true;
+ }
+
+ GCX_COOP();
+
+ bool foundInList = false;
+
+ // Otherwise, we need to transition into the BCL code to find out if the assembly is on the list
+ struct
+ {
+ OBJECTREF orAppDomain;
+ OBJECTREF orAssembly;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ MethodDescCallSite isAssemblyOnAptcaVisibleList(METHOD__APP_DOMAIN__IS_ASSEMBLY_ON_APTCA_VISIBLE_LIST);
+ gc.orAppDomain = GetAppDomain()->GetExposedObject();
+ gc.orAssembly = pAssembly->GetAssembly()->GetExposedObject();
+
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(gc.orAppDomain),
+ ObjToArgSlot(gc.orAssembly)
+ };
+
+ foundInList = isAssemblyOnAptcaVisibleList.Call_RetBool(args);
+
+ GCPROTECT_END();
+
+ return foundInList;
+ }
+
+ //--------------------------------------------------------------------------------------------------------
+ //
+ // Determine if an assembly is APTCA in the current domain or not
+ //
+ // Arguments:
+ // pDomainAssembly - Assembly to check for APTCA-ness
+ // tokenFlags - raw metadata security bits from the assembly
+ //
+ // Return Value:
+ // true if the assembly is APTCA, false if it is not
+ //
+
+ bool IsAssemblyAptcaEnabled(DomainAssembly *pDomainAssembly, TokenSecurityDescriptorFlags tokenFlags)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pDomainAssembly));
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ SString strAptcaAssemblyBreak(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_Security_AptcaAssemblyBreak));
+ SString strAssemblySimpleName(SString::Utf8, pDomainAssembly->GetSimpleName());
+ if (strAptcaAssemblyBreak.EqualsCaseInsensitive(strAssemblySimpleName))
+ {
+ _ASSERTE(!"Checking APTCA-ness of an APTCA break assembly");
+ }
+#endif // _DEBUG
+
+ // If the assembly is not marked APTCA, then it cannot possibly be APTCA enabled
+ if ((tokenFlags & TokenSecurityDescriptorFlags_APTCA) == TokenSecurityDescriptorFlags_None)
+ {
+ return false;
+ }
+
+ GCX_PREEMP();
+
+ // Additionally, if the assembly is on the APTCA kill list, then no matter what it says in its metadata,
+ // it should not be considered APTCA
+ if (GetKillBitList()->IsAssemblyKillBitted(pDomainAssembly->GetFile()))
+ {
+ return false;
+ }
+
+ // If the assembly is conditionally APTCA, then we need to check the current AppDomain's APTCA enabled
+ // list to figure out if it is APTCA in this domain.
+ if (tokenFlags & TokenSecurityDescriptorFlags_ConditionalAPTCA)
+ {
+ return IsAssemblyOnAptcaVisibleList(pDomainAssembly);
+ }
+
+ // Otherwise, the assembly is APTCA
+ return true;
+ }
+
+ //--------------------------------------------------------------------------------------------------------
+ //
+ // Determine if the assembly matches the conditional APTCA sharing mode. That is, if we are sharing
+ // enabled conditional APTCA assemblies check that this assembly is enabled. Similarly, if we are
+ // sharing disabled conditional APTCA assemblies check that this assembly is disabled.
+ //
+ // This method assumes that the assembly is conditionally APTCA
+ //
+
+ bool AssemblyMatchesShareMode(IAssemblyName *pAsmName, DomainAssembly *pDomainAssembly)
+ {
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pAsmName));
+ PRECONDITION(GetConditionalAptcaSharingMode() != kShareUnknown);
+ }
+ CONTRACTL_END;
+
+ if (IsAssemblyOnAptcaVisibleList(pAsmName, pDomainAssembly))
+ {
+ return GetConditionalAptcaSharingMode() == kShareIfEnabled;
+ }
+ else
+ {
+ return GetConditionalAptcaSharingMode() == kShareIfDisabled;
+ }
+ }
+
+ bool AssemblyMatchesShareMode(ConditionalAptcaCache::State state)
+ {
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(state == ConditionalAptcaCache::kEnabled || state == ConditionalAptcaCache::kDisabled);
+
+ if (state == ConditionalAptcaCache::kEnabled)
+ {
+ return GetConditionalAptcaSharingMode() == kShareIfEnabled;
+ }
+ else
+ {
+ return GetConditionalAptcaSharingMode() == kShareIfDisabled;
+ }
+ }
+}
+
+//------------------------------------------------------------------------------------------------------------
+//
+// Determine if the AppDomain can share an assembly or if APTCA restrictions prevent sharing
+//
+
+bool DomainCanShareAptcaAssembly(DomainAssembly *pDomainAssembly)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pDomainAssembly));
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ DWORD dwAptcaAssemblyDomainBreak = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_Security_AptcaAssemblySharingDomainBreak);
+ if (dwAptcaAssemblyDomainBreak == 0 || ADID(dwAptcaAssemblyDomainBreak) == pDomainAssembly->GetAppDomain()->GetId())
+ {
+ SString strAptcaAssemblySharingBreak(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_Security_AptcaAssemblySharingBreak));
+ SString strAssemblySimpleName(SString::Utf8, pDomainAssembly->GetSimpleName());
+
+ if (strAptcaAssemblySharingBreak.EqualsCaseInsensitive(strAssemblySimpleName))
+ {
+ _ASSERTE(!"Checking code sharing for APTCA break assembly");
+ }
+ }
+#endif // _DEBUG
+
+ //
+ // We can only share an assembly if all conditional APTCA assemblies in its full closure of dependencies
+ // are enabled.
+ //
+
+ // We always allow sharing of mscorlib
+ if (pDomainAssembly->IsSystem())
+ {
+ return true;
+ }
+
+ IApplicationSecurityDescriptor *pDomainSecDesc = pDomainAssembly->GetAppDomain()->GetSecurityDescriptor();
+ ConditionalAptcaCache *pConditionalAptcaCache = pDomainSecDesc->GetConditionalAptcaCache();
+
+ // If all assemblies in the domain match the sharing mode, then we can share the assembly
+ ConditionalAptcaCache::DomainState domainState = pConditionalAptcaCache->GetConditionalAptcaDomainState();
+ if (GetConditionalAptcaSharingMode() == kShareIfEnabled)
+ {
+ if (domainState == ConditionalAptcaCache::kAllEnabled)
+ {
+ return true;
+ }
+ }
+ else
+ {
+ if (domainState == ConditionalAptcaCache::kAllDisabled)
+ {
+ return true;
+ }
+ }
+
+ // If the root assembly is conditionally APTCA, then it needs to be enabled
+ ReleaseHolder<IMDInternalImport> pRootImport(pDomainAssembly->GetFile()->GetMDImportWithRef());
+ TokenSecurityDescriptorFlags rootSecurityAttributes =
+ TokenSecurityDescriptor::ReadSecurityAttributes(pRootImport, TokenFromRid(1, mdtAssembly));
+ if (rootSecurityAttributes & TokenSecurityDescriptorFlags_ConditionalAPTCA)
+ {
+ if (!AssemblyMatchesShareMode(pDomainAssembly->GetFile()->GetFusionAssemblyName(), pDomainAssembly))
+ {
+ return false;
+ }
+ }
+
+ // Now we need to get the full closure of assemblies that this assembly depends upon and ensure that each
+ // one of those is either not conditional APTCA or is enabled in the domain. We get a new assembly
+ // closure object here rather than using DomainAssembly::GetAssemblyBindingClosure because we don't want
+ // to force that closure to walk the full dependency graph (and therefore not be considered equal to
+ // closures which weren't fully walked).
+ IUnknown *pFusionAssembly;
+ if (pDomainAssembly->GetFile()->IsIStream())
+ {
+ pFusionAssembly = pDomainAssembly->GetFile()->GetIHostAssembly();
+ }
+ else
+ {
+ pFusionAssembly = pDomainAssembly->GetFile()->GetFusionAssembly();
+ }
+
+ // Get the closure and force it to do a full dependency walk, not stopping at framework assemblies
+ SafeComHolder<IAssemblyBindingClosure> pClosure;
+
+
+ LPCWSTR pNIPath = NULL;
+ PEAssembly *pPEAsm = pDomainAssembly->GetFile();
+ if (pPEAsm->HasNativeImage())
+ {
+ ReleaseHolder<PEImage> pNIImage = pPEAsm->GetNativeImageWithRef();
+ pNIPath = pNIImage->GetPath().GetUnicode();
+ }
+
+ IfFailThrow(pDomainAssembly->GetAppDomain()->GetFusionContext()->GetAssemblyBindingClosure(pFusionAssembly, pNIPath, &pClosure));
+ IfFailThrow(pClosure->EnsureWalked(pFusionAssembly, pDomainAssembly->GetAppDomain()->GetFusionContext(), LEVEL_FXPROBED));
+
+ // Now iterate the closure looking for conditional APTCA assemblies
+ SafeComHolder<IAssemblyBindingClosureEnumerator> pClosureEnumerator;
+ IfFailThrow(pClosure->EnumerateAssemblies(&pClosureEnumerator));
+ LPCOLESTR szDependentAssemblyPath = NULL;
+ LPCOLESTR szDependentNIAssemblyPath = NULL;
+
+ for (HRESULT hr = pClosureEnumerator->GetNextAssemblyPath(&szDependentAssemblyPath, &szDependentNIAssemblyPath);
+ SUCCEEDED(hr);
+ hr = pClosureEnumerator->GetNextAssemblyPath(&szDependentAssemblyPath, &szDependentNIAssemblyPath))
+ {
+ // Make sure we've succesfully enumerated an item
+ if (hr != S_OK && hr != HRESULT_FROM_WIN32(ERROR_NO_MORE_ITEMS))
+ {
+ pDomainAssembly->ExternalLog(LL_ERROR, W("Rejecting code sharing because of an error enumerating dependent assemblies: 0x%x"), hr);
+ return false;
+ }
+ else if (szDependentAssemblyPath == NULL)
+ {
+ // This means we have an assembly but no way to verify the image at this point -- should we get
+ // into this state, we'll be conservative and fail the share
+ pDomainAssembly->ExternalLog(LL_ERROR, W("Rejecting code sharing because an assembly in the closure does not have a path"));
+ return false;
+ }
+ else
+ {
+ // We have succesfully found a new item in the closure of assemblies - now check to ensure that
+ // it is either not conditionally APTCA or is enabled in tihs domain.
+ PEImageHolder pDependentImage;
+
+ // Use the native image if it is loaded.
+ if (szDependentNIAssemblyPath != NULL)
+ {
+ SString strNIAssemblyPath(szDependentNIAssemblyPath);
+ pDependentImage = PEImage::OpenImage(strNIAssemblyPath, MDInternalImport_OnlyLookInCache);
+ if (pDependentImage != NULL && !pDependentImage->HasLoadedLayout())
+ {
+ pDependentImage = NULL;
+ }
+ else
+ {
+#if FEATURE_CORECLR
+#error Coreclr needs to check native image version here.
+#endif
+ }
+ }
+
+ if (pDependentImage == NULL)
+ {
+ SString strAssemblyPath(szDependentAssemblyPath);
+ pDependentImage = PEImage::OpenImage(strAssemblyPath);
+ }
+
+ // See if we already know if this image is enabled in the current domain or not
+ ConditionalAptcaCache::State dependentState = pConditionalAptcaCache->GetCachedState(pDependentImage);
+
+ // We don't know this assembly's conditional APTCA state in this domain, so we need to figure it
+ // out now.
+ if (dependentState == ConditionalAptcaCache::kUnknown)
+ {
+ // First figure out if the assembly is even conditionally APTCA to begin with
+ IMDInternalImport *pDependentImport = pDependentImage->GetMDImport();
+ TokenSecurityDescriptorFlags dependentSecurityAttributes =
+ TokenSecurityDescriptor::ReadSecurityAttributes(pDependentImport, TokenFromRid(1, mdtAssembly));
+
+ if (dependentSecurityAttributes & TokenSecurityDescriptorFlags_ConditionalAPTCA)
+ {
+ // The the assembly name of the dependent assembly so we can check it to the domain
+ // enabled list
+ ReleaseHolder<IAssemblyName> pDependentAssemblyName;
+ AssemblySpec dependentAssemblySpec(pDomainAssembly->GetAppDomain());
+ dependentAssemblySpec.InitializeSpec(TokenFromRid(1, mdtAssembly), pDependentImport);
+ IfFailThrow(dependentAssemblySpec.CreateFusionName(&pDependentAssemblyName, FALSE));
+
+ // Check the domain list to see if the assembly is on it
+ if (IsAssemblyOnAptcaVisibleList(pDependentAssemblyName, pDomainAssembly))
+ {
+ dependentState = ConditionalAptcaCache::kEnabled;
+ }
+ else
+ {
+ dependentState = ConditionalAptcaCache::kDisabled;
+ }
+ }
+ else
+ {
+ // The dependent assembly doesn't have the conditional APTCA bit set on it, so we don't
+ // need to do any checking to see if it's enabled
+ dependentState = ConditionalAptcaCache::kNotCAptca;
+ }
+
+ // Cache the result of evaluating conditional APTCA on this assembly in the domain
+ pConditionalAptcaCache->SetCachedState(pDependentImage, dependentState);
+ }
+
+ // If the dependent assembly does not match the sharing mode, then we cannot share the
+ // dependency. We can always share dependencies which are not conditionally APTCA, so don't
+ // bother checking the share mode for them.
+ if (dependentState != ConditionalAptcaCache::kNotCAptca)
+ {
+ if (!AssemblyMatchesShareMode(dependentState))
+ {
+ pDomainAssembly->ExternalLog(LL_ERROR, W("Rejecting code sharing because a dependent assembly did not match the conditional APTCA share mode"));
+ return false;
+ }
+ }
+ }
+ }
+
+ // The root assembly and all of its dependents were either on the conditional APTCA list or are not
+ // conditional APTCA, so we can share this assembly
+ return true;
+}
+
+//------------------------------------------------------------------------------------------------------------
+//
+// Get an exception string indicating how to enable a conditional APTCA assembly if it was disabled and
+// caused an exception
+//
+
+SString GetConditionalAptcaAccessExceptionContext(Assembly *pTargetAssembly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pTargetAssembly));
+ }
+ CONTRACTL_END;
+
+ SString exceptionContext;
+
+ ModuleSecurityDescriptor *pMSD = ModuleSecurityDescriptor::GetModuleSecurityDescriptor(pTargetAssembly);
+
+ if (pMSD->GetTokenFlags() & TokenSecurityDescriptorFlags_ConditionalAPTCA)
+ {
+ GCX_PREEMP();
+
+ if (!IsAssemblyOnAptcaVisibleList(pTargetAssembly->GetDomainAssembly()))
+ {
+ // We have a conditional APTCA assembly which is not on the visible list for the current
+ // AppDomain, provide information on how to enable it.
+ SString assemblyDisplayName;
+ pTargetAssembly->GetDisplayName(assemblyDisplayName);
+
+ SString assemblyConditionalAptcaName;
+ GetAssemblyNameForConditionalAptca(pTargetAssembly, &assemblyConditionalAptcaName);
+
+ EEException::GetResourceMessage(IDS_ACCESS_EXCEPTION_CONTEXT_CONDITIONAL_APTCA,
+ exceptionContext,
+ assemblyDisplayName,
+ assemblyConditionalAptcaName);
+ }
+ }
+
+ return exceptionContext;
+}
+
+//------------------------------------------------------------------------------------------------------------
+//
+// Get an exception string indicating that an assembly was on the kill bit list if it caused an exception
+//
+
+SString GetAptcaKillBitAccessExceptionContext(Assembly *pTargetAssembly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pTargetAssembly));
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+
+ SString exceptionContext;
+
+ if (GetKillBitList()->IsAssemblyKillBitted(pTargetAssembly->GetDomainAssembly()->GetFile()))
+ {
+ SString assemblyDisplayName;
+ pTargetAssembly->GetDisplayName(assemblyDisplayName);
+
+ EEException::GetResourceMessage(IDS_ACCESS_EXCEPTION_CONTEXT_APTCA_KILLBIT,
+ exceptionContext,
+ assemblyDisplayName);
+ }
+
+ return exceptionContext;
+}
+
+//------------------------------------------------------------------------------------------------------------
+//
+// Determine if a native image is valid to use from the perspective of APTCA. This means that the image
+// itself and all of its dependencies must:
+// 1. Not be killbitted
+// 2. Be enabled if they are conditionally APTCA
+//
+// Arguments:
+// pNativeImage - native image to accept or reject
+// pDomainAssembly - assembly that is being loaded
+//
+// Return Value:
+// true if the native image can be accepted due to APTCA-ness, false if we need to reject it
+//
+
+bool NativeImageHasValidAptcaDependencies(PEImage *pNativeImage, DomainAssembly *pDomainAssembly)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pNativeImage));
+ PRECONDITION(CheckPointer(pDomainAssembly));
+ }
+ CONTRACTL_END;
+
+ AptcaKillBitList *pKillBitList = GetKillBitList();
+
+ ConditionalAptcaCache *pDomainCache = pDomainAssembly->GetAppDomain()->GetSecurityDescriptor()->GetConditionalAptcaCache();
+ // If we have any killbitted assemblies, then we need to make sure that the current assembly and its dependencies
+ BOOL aptcaChecks = pKillBitList->AreAnyAssembliesKillBitted();
+ BOOL conditionalAptcaChecks = pDomainCache->GetConditionalAptcaDomainState() != ConditionalAptcaCache::kAllEnabled;
+ if (!aptcaChecks && !conditionalAptcaChecks)
+ return true;
+
+ //
+ // Check to see if the NGEN image itself is APTCA and killbitted
+ //
+
+ ReleaseHolder<IMDInternalImport> pAssemblyMD(pDomainAssembly->GetFile()->GetMDImportWithRef());
+ TokenSecurityDescriptorFlags assemblySecurityAttributes =
+ TokenSecurityDescriptor::ReadSecurityAttributes(pAssemblyMD, TokenFromRid(1, mdtAssembly));
+
+ if (aptcaChecks)
+ {
+ if ((assemblySecurityAttributes & TokenSecurityDescriptorFlags_APTCA) &&
+ pKillBitList->IsAssemblyKillBitted(pDomainAssembly->GetFile()))
+ {
+ return false;
+ }
+ }
+ if (conditionalAptcaChecks
+ && (assemblySecurityAttributes & TokenSecurityDescriptorFlags_ConditionalAPTCA))
+ {
+ //
+ // First check to see if we're disabled.
+ //
+
+ AssemblySpec spec;
+ spec.InitializeSpec(pDomainAssembly->GetFile());
+ ReleaseHolder<IAssemblyName> pAsmName;
+ IfFailThrow(spec.CreateFusionName(&pAsmName, FALSE));
+
+ if (!IsAssemblyOnAptcaVisibleList(pAsmName, pDomainAssembly))
+ {
+ //IsAssemblyOnAptcaVisibleList has already logged an error.
+ return false;
+ }
+ }
+
+ if (aptcaChecks || conditionalAptcaChecks)
+ {
+ //
+ // Also check its dependencies
+ //
+
+ COUNT_T dependencyCount;
+ PEImageLayout *pNativeLayout = pNativeImage->GetLoadedLayout();
+ CORCOMPILE_DEPENDENCY *pDependencies = pNativeLayout->GetNativeDependencies(&dependencyCount);
+
+ for (COUNT_T i = 0; i < dependencyCount; ++i)
+ {
+ CORCOMPILE_DEPENDENCY* pDependency = &(pDependencies[i]);
+ // Look for any dependency which is APTCA
+ if (pDependencies[i].dwAssemblyDef != mdAssemblyRefNil)
+ {
+ AssemblySpec name;
+ name.InitializeSpec(pDependency->dwAssemblyRef,
+ pNativeImage->GetNativeMDImport(),
+ NULL,
+ pDomainAssembly->GetFile()->IsIntrospectionOnly());
+
+ ReleaseHolder<IAssemblyName> pDependencyAssemblyName;
+ HRESULT hr = name.CreateFusionName(&pDependencyAssemblyName, FALSE);
+
+ // If we couldn't build the assemlby name up conservatively discard the image
+ if (FAILED(hr))
+ {
+ pDomainAssembly->ExternalLog(LL_ERROR, W("Rejecting native image because could not get ")
+ W("name for assemblyref 0x%x for native image dependency: ")
+ W("hr=0x%x"), pDependency->dwAssemblyRef, hr);
+ return false;
+ }
+
+ if (pDependencies[i].dependencyInfo & (CORCOMPILE_DEPENDENCY_IS_APTCA))
+ {
+ ULARGE_INTEGER fileVersion;
+
+ //This is a workaround for Dev10# 743602
+ fileVersion.QuadPart = GET_UNALIGNED_VAL64(&(pDependencies[i].uliFileVersion));
+ // If the dependency really is killbitted, then discard the image
+ if (pKillBitList->IsAssemblyKillBitted(pDependencyAssemblyName, fileVersion))
+ {
+ pDomainAssembly->ExternalLog(LL_ERROR, W("Rejecting native image because dependency ")
+ W("assemblyref 0x%x is killbitted."),
+ pDependency->dwAssemblyRef);
+ return false;
+ }
+ }
+ if (pDependencies[i].dependencyInfo & (CORCOMPILE_DEPENDENCY_IS_CAPTCA))
+ {
+ if (!IsAssemblyOnAptcaVisibleList(pDependencyAssemblyName, pDomainAssembly))
+ {
+ //IsAssemblyOnAptcaVisibleList has already logged an error.
+ return false;
+ }
+ }
+ }
+ }
+ }
+ return true;
+}
+#else // CROSSGEN_COMPILE
+namespace
+{
+ bool IsAssemblyAptcaEnabled(DomainAssembly *pDomainAssembly, TokenSecurityDescriptorFlags tokenFlags)
+ {
+ // No killbits or conditional APTCA for crossgen. Just check whether the assembly is marked APTCA.
+ return ((tokenFlags & TokenSecurityDescriptorFlags_APTCA) != TokenSecurityDescriptorFlags_None);
+ }
+}
+#endif // CROSSGEN_COMPILE
+
+//------------------------------------------------------------------------------------------------------------
+//
+// Process an assembly's real APTCA flags to determine if the assembly should be considered
+// APTCA or not
+//
+// Arguments:
+// pDomainAssembly - Assembly to check for APTCA-ness
+// tokenFlags - raw metadata security bits from the assembly
+//
+// Return Value:
+// updated token security descriptor flags which indicate the assembly's true APTCA state
+//
+
+TokenSecurityDescriptorFlags ProcessAssemblyAptcaFlags(DomainAssembly *pDomainAssembly,
+ TokenSecurityDescriptorFlags tokenFlags)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pDomainAssembly));
+ }
+ CONTRACTL_END;
+
+ const TokenSecurityDescriptorFlags aptcaFlags = TokenSecurityDescriptorFlags_APTCA |
+ TokenSecurityDescriptorFlags_ConditionalAPTCA;
+
+ if (IsAssemblyAptcaEnabled(pDomainAssembly, tokenFlags))
+ {
+ // The assembly is APTCA - temporarially remove all of its APTCA bits, and then add back the
+ // unconditionally APTCA bit
+ tokenFlags = tokenFlags & ~aptcaFlags;
+ return tokenFlags | TokenSecurityDescriptorFlags_APTCA;
+ }
+ else
+ {
+ // The assembly is not APTCA, so remove all of its APTCA bits from the token security descriptor
+ return tokenFlags & ~aptcaFlags;
+ }
+}
diff --git a/src/vm/aptca.h b/src/vm/aptca.h
new file mode 100644
index 0000000000..9edaf0f7c5
--- /dev/null
+++ b/src/vm/aptca.h
@@ -0,0 +1,111 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//--------------------------------------------------------------------------
+// aptca.h
+//
+// Functions for handling allow partially trusted callers assemblies
+//
+// This should be the only interface for talking about the APTCA-ness of an assembly, and even then should
+// be used only from very select areas of the CLR that absolutely need to know the information. For
+// instance:
+//
+// * the class loader (for code sharing and formatting exception messages)
+// * NGEN (for determining if a native image is valid)
+// * security attribute processing code (for obvious reasons)
+//
+// may use this interface. Nearly every other section of the code should simply be relying on the
+// ModuleSecurityDescriptor for the assembly in question. And no other sections of the code should be
+// directly asking questions like "is this assembly conditional APTCA" ... we explicitly want to hide that
+// information away behind the final assembly security attribute computation as much as possible.
+//
+// In particular, no code should be making security enforcement decisions based upon conditional APTCA, and
+// instead should rely on the existing transparency / legacy APTCA enforcement. This means that once the
+// security system, JIT, and class loader have finished setting up an assembly's APTCA attributes, there
+// should be no further questions asked about the particular APTCA attribute applied to the assembly.
+//
+// Put another way, once an assembly is loaded, the APTCA kill bit and conditional APTCA enabled / disabled
+// decision for an assembly should evaporate away, and all assemblies should look as if they either have a
+// full APTCA attribute (in the not-killbitted / conditional APTCA enabled case) or no APTCA attribute at
+// all (killbitted or conditional APTCA disabled).
+//
+
+//
+//--------------------------------------------------------------------------
+
+
+#ifndef __APTCA_H__
+#define __APTCA_H__
+
+#ifndef FEATURE_APTCA
+#error FEATURE_APTCA is required for this file
+#endif // FEATURE_APTCA
+
+#include "securitymeta.h"
+
+class ConditionalAptcaCache
+{
+public:
+ typedef enum
+ {
+ kUnknown, // No cached state
+ kEnabled, // The assembly is enabled in this domain
+ kDisabled, // The assembly is disabled in this domain
+ kNotCAptca, // The assembly is not conditionally APTCA
+ }
+ State;
+
+ typedef enum
+ {
+ kDomainStateUnknown, // The domain state is not yet initialized
+ kAllEnabled, // All assemblies in the domain are enabled
+ kSomeEnabled, // Some assemblies in the domain are enabled
+ kAllDisabled, // All assemblies in the domain are disabled
+ }
+ DomainState;
+
+ ConditionalAptcaCache(AppDomain *pAppDomain);
+ ~ConditionalAptcaCache();
+
+ State GetCachedState(PTR_PEImage pImage);
+ void SetCachedState(PTR_PEImage pImage, State state);
+
+ DomainState GetConditionalAptcaDomainState();
+ void SetCanonicalConditionalAptcaList(LPCWSTR wszCanonicalConditionalAptcaList);
+
+ static bool ConsiderFullTrustConditionalAptcaLists();
+
+private:
+ ConditionalAptcaCache(ConditionalAptcaCache &other); // not implemented - used to prevent compiler generating a copy constructor
+ ConditionalAptcaCache& operator=(const ConditionalAptcaCache &other); // not implemented - used to prevent compiler generating an assignment operator
+
+private:
+ AppDomain *m_pAppDomain;
+
+ bool m_canonicalListIsNull;
+ SString m_canonicalList;
+ DomainState m_domainState;
+};
+
+// Determine if the AppDomain can share an assembly or if APTCA restrictions prevent sharing
+bool DomainCanShareAptcaAssembly(DomainAssembly *pDomainAssembly);
+
+// Get an exception string indicating how to enable a conditional APTCA assembly if it was disabled and
+// caused an exception
+SString GetConditionalAptcaAccessExceptionContext(Assembly *pTargetAssembly);
+
+// Get an exception string indicating that
+SString GetConditionalAptcaSharingExceptionContext(Assembly *pTargetAssembly);
+
+// Get an exception string indicating that an assembly was on the kill bit list if it caused an exception
+SString GetAptcaKillBitAccessExceptionContext(Assembly *pTargetAssembly);
+
+// Determine if a native image is OK to use from an APTCA perspective (it and its dependencies all have the
+// same APTCA-ness now as at NGEN time)
+bool NativeImageHasValidAptcaDependencies(PEImage *pNativeImage, DomainAssembly *pDomainAssembly);
+
+// Process an assembly's real APTCA flags to determine if the assembly should be considered APTCA or not
+TokenSecurityDescriptorFlags ProcessAssemblyAptcaFlags(DomainAssembly *pDomainAssembly, TokenSecurityDescriptorFlags tokenFlags);
+
+#endif // __APTCA_H__
diff --git a/src/vm/argslot.h b/src/vm/argslot.h
new file mode 100644
index 0000000000..57a7afa8c2
--- /dev/null
+++ b/src/vm/argslot.h
@@ -0,0 +1,44 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ============================================================================
+// File: argslot.h
+//
+
+// ============================================================================
+// Contains the ARG_SLOT type.
+
+
+#ifndef __ARG_SLOT_H__
+#define __ARG_SLOT_H__
+
+// The ARG_SLOT must be big enough to represent all pointer and basic types (except for 80-bit fp values).
+// So, it's guaranteed to be at least 64-bit.
+typedef unsigned __int64 ARG_SLOT;
+#define SIZEOF_ARG_SLOT 8
+
+#if BIGENDIAN
+// Returns the address of the payload inside the argslot
+inline BYTE* ArgSlotEndianessFixup(ARG_SLOT* pArg, UINT cbSize) {
+ LIMITED_METHOD_CONTRACT;
+
+ BYTE* pBuf = (BYTE*)pArg;
+ switch (cbSize) {
+ case 1:
+ pBuf += 7;
+ break;
+ case 2:
+ pBuf += 6;
+ break;
+ case 4:
+ pBuf += 4;
+ break;
+ }
+ return pBuf;
+}
+#else
+#define ArgSlotEndianessFixup(pArg, cbSize) ((BYTE *)(pArg))
+#endif
+
+#endif // __ARG_SLOT_H__
diff --git a/src/vm/arm/.gitmirror b/src/vm/arm/.gitmirror
new file mode 100644
index 0000000000..f507630f94
--- /dev/null
+++ b/src/vm/arm/.gitmirror
@@ -0,0 +1 @@
+Only contents of this folder, excluding subfolders, will be mirrored by the Git-TFS Mirror. \ No newline at end of file
diff --git a/src/vm/arm/CrtHelpers.asm b/src/vm/arm/CrtHelpers.asm
new file mode 100644
index 0000000000..e20b1bf0bf
--- /dev/null
+++ b/src/vm/arm/CrtHelpers.asm
@@ -0,0 +1,163 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;
+
+;
+; ==--==
+; ***********************************************************************
+; File: CrtHelpers.asm
+;
+; ***********************************************************************
+
+#include "ksarm.h"
+
+#include "asmconstants.h"
+
+#include "asmmacros.h"
+
+ TEXTAREA
+
+; JIT_MemSet/JIT_MemCpy
+;
+; It is IMPORANT that the exception handling code is able to find these guys
+; on the stack, but to keep them from being tailcalled by VC++ we need to turn
+; off optimization and it ends up being a wasteful implementation.
+;
+; Hence these assembly helpers.
+;
+;EXTERN_C void __stdcall JIT_MemSet(void* _dest, int c, size_t count)
+ LEAF_ENTRY JIT_MemSet
+
+;
+; The memset function sets the first count bytes of
+; dest to the character c (r1).
+;
+; Doesn't return a value
+;
+
+ subs r2, r2, #4
+ blt ByteSet
+
+ ands r1, r1, #&FF
+ orr r1, r1, r1, lsl #8
+CheckAlign ; 2-3 cycles
+ ands r3, r0, #3 ; Check alignment and fix if possible
+ bne Align
+
+BlockSet ; 6-7 cycles
+ orr r1, r1, r1, lsl #16
+ subs r2, r2, #12
+ mov r3, r1
+ blt BlkSet8
+
+BlkSet16 ; 7 cycles/16 bytes
+ stm r0!, {r1, r3}
+ subs r2, r2, #16
+ stm r0!, {r1, r3}
+ bge BlkSet16
+
+BlkSet8 ; 4 cycles/8 bytes
+ adds r2, r2, #8
+ blt BlkSet4
+ stm r0!, {r1, r3}
+ sub r2, r2, #8
+
+BlkSet4
+ adds r2, r2, #4 ; 4 cycles/4 bytes
+ blt ByteSet
+ str r1, [r0], #4
+ b MaybeExit
+
+ByteSet
+ adds r2, r2, #4
+MaybeExit
+ beq ExitMemSet
+
+ strb r1, [r0] ; 5 cycles/1-3bytes
+ cmp r2, #2
+ blt ExitMemSet
+ strb r1, [r0, #1]
+ strbgt r1, [r0, #2]
+
+ExitMemSet
+
+ bx lr
+
+Align ; 8 cycles/1-3 bytes
+ tst r0, #1 ; Check byte alignment
+ beq AlignHalf
+ subs r2, r2, #1
+ strb r1, [r0], #1
+AlignHalf
+ tst r0, #2 ; Check Half-word alignment
+ beq BlockSet
+ subs r2, r2, #2
+ strh r1, [r0], #2
+ b BlockSet
+
+ LEAF_END_MARKED JIT_MemSet
+
+
+;EXTERN_C void __stdcall JIT_MemCpy(void* _dest, const void *_src, size_t count)
+ LEAF_ENTRY JIT_MemCpy
+;
+; It only requires 4 byte alignment
+; and doesn't return a value
+
+ cmp r2, #0 ; quick check for 0 length
+ beq ExitMemCpy ; if zero, exit
+
+ tst r0, #3 ; skip directly to aligned if already aligned
+ beq DestAligned ; if 0, we're already aligned; go large
+
+ByteLoop1
+ subs r2, r2, #1 ; decrement byte counter
+ ldrb r3, [r1], #1 ; copy one byte
+ strb r3, [r0], #1
+ beq ExitMemCpy ; if the byte counter hits 0, exit early
+ tst r0, #3 ; are we aligned now?
+ bne ByteLoop1 ; nope, keep going
+
+DestAligned
+ subs r2, r2, #8 ; byte counter -= 8
+ blt AlignedFinished ; if that puts us negative, skip the big copy
+
+ tst r1, #3 ; is the 4-byte source aligned?
+ addne r2, r2, #8 ; if not, fix the byte counter (+= 8)
+ bne ByteLoop2 ; and do all the rest with bytes
+
+QwordLoop
+ subs r2, r2, #8 ; decrement byte counter by 8
+ ldm r1!, {r3,r12} ; copy one qword
+ stm r0!, {r3,r12} ;
+ bge QwordLoop ; loop until the byte counter goes negative
+
+AlignedFinished
+ adds r2, r2, #4 ; add 4 to recover a potential >= 4-byte tail
+ blt AlignedFinished2
+ ldr r3, [r1], #4
+ str r3, [r0], #4
+ b MaybeExitMemCpy
+AlignedFinished2
+ adds r2, r2, #4 ; add 4 more to the byte counter to recover
+
+MaybeExitMemCpy
+ beq ExitMemCpy ; the remaining count
+
+ByteLoop2
+ subs r2, r2, #1 ; decrement the counter
+ ldrb r3, [r1], #1 ; copy one byte
+ strb r3, [r0], #1
+ bne ByteLoop2 ; loop until the counter hits 0
+
+ExitMemCpy
+ bx lr
+
+ LEAF_END_MARKED JIT_MemCpy
+
+ END
+
diff --git a/src/vm/arm/PInvokeStubs.asm b/src/vm/arm/PInvokeStubs.asm
new file mode 100644
index 0000000000..6e0047a17f
--- /dev/null
+++ b/src/vm/arm/PInvokeStubs.asm
@@ -0,0 +1,143 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+;; ==++==
+;;
+
+;;
+;; ==--==
+#include "ksarm.h"
+
+#include "asmconstants.h"
+
+#include "asmmacros.h"
+
+
+ IMPORT VarargPInvokeStubWorker
+ IMPORT GenericPInvokeCalliStubWorker
+
+
+; ------------------------------------------------------------------
+; Macro to generate PInvoke Stubs.
+; $__PInvokeStubFuncName : function which calls the actual stub obtained from VASigCookie
+; $__PInvokeGenStubFuncName : function which generates the IL stubs for PInvoke
+;
+; Params :-
+; $FuncPrefix : prefix of the function name for the stub
+; Eg. VarargPinvoke, GenericPInvokeCalli
+; $VASigCookieReg : register which contains the VASigCookie
+; $SaveFPArgs : "Yes" or "No" . For varidic functions FP Args are not present in FP regs
+; So need not save FP Args registers for vararg Pinvoke
+ MACRO
+
+ PINVOKE_STUB $FuncPrefix,$VASigCookieReg,$SaveFPArgs
+
+ GBLS __PInvokeStubFuncName
+ GBLS __PInvokeGenStubFuncName
+ GBLS __PInvokeStubWorkerName
+
+ IF "$FuncPrefix" == "GenericPInvokeCalli"
+__PInvokeStubFuncName SETS "$FuncPrefix":CC:"Helper"
+ ELSE
+__PInvokeStubFuncName SETS "$FuncPrefix":CC:"Stub"
+ ENDIF
+__PInvokeGenStubFuncName SETS "$FuncPrefix":CC:"GenILStub"
+__PInvokeStubWorkerName SETS "$FuncPrefix":CC:"StubWorker"
+
+ IF "$VASigCookieReg" == "r1"
+__PInvokeStubFuncName SETS "$__PInvokeStubFuncName":CC:"_RetBuffArg"
+__PInvokeGenStubFuncName SETS "$__PInvokeGenStubFuncName":CC:"_RetBuffArg"
+ ENDIF
+
+ NESTED_ENTRY $__PInvokeStubFuncName
+
+ ; save reg value before using the reg
+ PROLOG_PUSH {$VASigCookieReg}
+
+ ; get the stub
+ ldr $VASigCookieReg, [$VASigCookieReg,#VASigCookie__pNDirectILStub]
+
+ ; if null goto stub generation
+ cbz $VASigCookieReg, %0
+
+ EPILOG_STACK_FREE 4
+
+ EPILOG_BRANCH_REG $VASigCookieReg
+
+0
+
+ EPILOG_POP {$VASigCookieReg}
+ EPILOG_BRANCH $__PInvokeGenStubFuncName
+
+ NESTED_END
+
+
+ NESTED_ENTRY $__PInvokeGenStubFuncName
+
+ PROLOG_WITH_TRANSITION_BLOCK 0, $SaveFPArgs
+
+ ; r2 = UnmanagedTarget\ MethodDesc
+ mov r2, r12
+
+ ; r1 = VaSigCookie
+ IF "$VASigCookieReg" != "r1"
+ mov r1, $VASigCookieReg
+ ENDIF
+
+ ; r0 = pTransitionBlock
+ add r0, sp, #__PWTB_TransitionBlock
+
+ ; save hidden arg
+ mov r4, r12
+
+ bl $__PInvokeStubWorkerName
+
+ ; restore hidden arg (method desc or unmanaged target)
+ mov r12, r4
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+
+ EPILOG_BRANCH $__PInvokeStubFuncName
+
+ NESTED_END
+
+ MEND
+
+
+ TEXTAREA
+; ------------------------------------------------------------------
+; VarargPInvokeStub & VarargPInvokeGenILStub
+; There is a separate stub when the method has a hidden return buffer arg.
+;
+; in:
+; r0 = VASigCookie*
+; r12 = MethodDesc *
+;
+ PINVOKE_STUB VarargPInvoke, r0, {false}
+
+
+; ------------------------------------------------------------------
+; GenericPInvokeCalliHelper & GenericPInvokeCalliGenILStub
+; Helper for generic pinvoke calli instruction
+;
+; in:
+; r4 = VASigCookie*
+; r12 = Unmanaged target
+;
+ PINVOKE_STUB GenericPInvokeCalli, r4, {true}
+
+; ------------------------------------------------------------------
+; VarargPInvokeStub_RetBuffArg & VarargPInvokeGenILStub_RetBuffArg
+; Vararg PInvoke Stub when the method has a hidden return buffer arg
+;
+; in:
+; r1 = VASigCookie*
+; r12 = MethodDesc*
+;
+ PINVOKE_STUB VarargPInvoke, r1, {false}
+
+
+; Must be at very end of file
+ END
diff --git a/src/vm/arm/armsinglestepper.cpp b/src/vm/arm/armsinglestepper.cpp
new file mode 100644
index 0000000000..a5b1d68112
--- /dev/null
+++ b/src/vm/arm/armsinglestepper.cpp
@@ -0,0 +1,1197 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+// Emulate hardware single-step on ARM.
+//
+
+#include "common.h"
+#include "armsinglestepper.h"
+
+//
+// ITState methods.
+//
+
+ITState::ITState()
+{
+#ifdef _DEBUG
+ m_fValid = false;
+#endif
+}
+
+// Must call Get() (or Init()) to initialize this instance from a specific context before calling any other
+// (non-static) method.
+void ITState::Get(T_CONTEXT *pCtx)
+{
+ m_bITState = (BYTE)((BitExtract((WORD)pCtx->Cpsr, 15, 10) << 2) |
+ BitExtract((WORD)(pCtx->Cpsr >> 16), 10, 9));
+#ifdef _DEBUG
+ m_fValid = true;
+#endif
+}
+
+// Must call Init() (or Get()) to initialize this instance from a raw byte value before calling any other
+// (non-static) method.
+void ITState::Init(BYTE bState)
+{
+ m_bITState = bState;
+#ifdef _DEBUG
+ m_fValid = true;
+#endif
+}
+
+// Does the current IT state indicate we're executing within an IT block?
+bool ITState::InITBlock()
+{
+ _ASSERTE(m_fValid);
+ return (m_bITState & 0x1f) != 0;
+}
+
+// Only valid within an IT block. Returns the condition code which will be evaluated for the current
+// instruction.
+DWORD ITState::CurrentCondition()
+{
+ _ASSERTE(m_fValid);
+ _ASSERTE(InITBlock());
+ return BitExtract(m_bITState, 7, 4);
+}
+
+// Transition the IT state to that for the next instruction.
+void ITState::Advance()
+{
+ _ASSERTE(m_fValid);
+ if ((m_bITState & 0x7) == 0)
+ m_bITState = 0;
+ else
+ m_bITState = (m_bITState & 0xe0) | ((m_bITState << 1) & 0x1f);
+}
+
+// Write the current IT state back into the given context.
+void ITState::Set(T_CONTEXT *pCtx)
+{
+ _ASSERTE(m_fValid);
+
+ Clear(pCtx);
+ pCtx->Cpsr |= BitExtract(m_bITState, 1, 0) << 25;
+ pCtx->Cpsr |= BitExtract(m_bITState, 7, 2) << 10;
+}
+
+// Clear IT state (i.e. force execution to be outside of an IT block) in the given context.
+/* static */ void ITState::Clear(T_CONTEXT *pCtx)
+{
+ pCtx->Cpsr &= 0xf9ff03ff;
+}
+
+//
+// ArmSingleStepper methods.
+//
+ArmSingleStepper::ArmSingleStepper()
+ : m_originalPc(0), m_targetPc(0), m_rgCode(0), m_state(Disabled),
+ m_fEmulatedITInstruction(false), m_fRedirectedPc(false), m_fBypass(false), m_fEmulate(false), m_fSkipIT(false)
+{
+ m_opcodes[0] = 0;
+ m_opcodes[1] = 0;
+}
+
+ArmSingleStepper::~ArmSingleStepper()
+{
+#ifndef DACCESS_COMPILE
+ DeleteExecutable(m_rgCode);
+#endif
+}
+
+void ArmSingleStepper::Init()
+{
+#ifndef DACCESS_COMPILE
+ if (m_rgCode == NULL)
+ {
+ m_rgCode = new (executable) WORD[kMaxCodeBuffer];
+ }
+#endif
+}
+
+// Given the context with which a thread will be resumed, modify that context such that resuming the thread
+// will execute a single instruction before raising an EXCEPTION_BREAKPOINT. The thread context must be
+// cleaned up via the Fixup method below before any further exception processing can occur (at which point the
+// caller can behave as though EXCEPTION_SINGLE_STEP was raised).
+void ArmSingleStepper::Enable()
+{
+ _ASSERTE(m_state != Applied);
+
+ if (m_state == Enabled)
+ {
+ // We allow single-stepping to be enabled multiple times before the thread is resumed, but we require
+ // that the thread state is the same in all cases (i.e. additional step requests are treated as
+ // no-ops).
+ _ASSERTE(!m_fBypass);
+ _ASSERTE(m_opcodes[0] == 0);
+ _ASSERTE(m_opcodes[1] == 0);
+
+ return;
+ }
+
+ LOG((LF_CORDB, LL_INFO100000, "ArmSingleStepper::Enable\n"));
+
+ m_fBypass = false;
+ m_opcodes[0] = 0;
+ m_opcodes[1] = 0;
+ m_state = Enabled;
+}
+
+void ArmSingleStepper::Bypass(DWORD ip, WORD opcode1, WORD opcode2)
+{
+ _ASSERTE(m_state != Applied);
+
+ if (m_state == Enabled)
+ {
+ // We allow single-stepping to be enabled multiple times before the thread is resumed, but we require
+ // that the thread state is the same in all cases (i.e. additional step requests are treated as
+ // no-ops).
+ if (m_fBypass)
+ {
+ _ASSERTE(m_opcodes[0] == opcode1);
+ _ASSERTE(m_opcodes[1] == opcode2);
+ _ASSERTE(m_originalPc == ip);
+ return;
+ }
+ }
+
+
+ LOG((LF_CORDB, LL_INFO100000, "ArmSingleStepper::Bypass(pc=%x, opcode=%x %x)\n", (DWORD)ip, (DWORD)opcode1, (DWORD)opcode2));
+
+ m_fBypass = true;
+ m_originalPc = ip;
+ m_opcodes[0] = opcode1;
+ m_opcodes[1] = opcode2;
+ m_state = Enabled;
+}
+
+void ArmSingleStepper::Apply(T_CONTEXT *pCtx)
+{
+ if (m_rgCode == NULL)
+ {
+ Init();
+
+ // OOM. We will simply ignore the single step.
+ if (m_rgCode == NULL)
+ return;
+ }
+
+ _ASSERTE(pCtx != NULL);
+
+ if (!m_fBypass)
+ {
+ DWORD pc = ((DWORD)pCtx->Pc) & ~THUMB_CODE;
+ m_opcodes[0] = *(WORD*)pc;
+ if (Is32BitInstruction( m_opcodes[0]))
+ m_opcodes[1] = *(WORD*)(pc+2);
+ }
+
+ WORD opcode1 = m_opcodes[0];
+ WORD opcode2 = m_opcodes[1];
+
+ LOG((LF_CORDB, LL_INFO100000, "ArmSingleStepper::Apply(pc=%x, opcode=%x %x)\n",
+ (DWORD)pCtx->Pc, (DWORD)opcode1, (DWORD)opcode2));
+
+#ifdef _DEBUG
+ // Make sure that we aren't trying to step through our own buffer. If this asserts, something is horribly
+ // wrong with the debugging layer. Likely GetManagedStoppedCtx is retrieving a Pc that points to our
+ // buffer, even though the single stepper is disabled.
+ DWORD codestart = (DWORD)(DWORD_PTR)m_rgCode;
+ DWORD codeend = codestart + (kMaxCodeBuffer * sizeof(WORD));
+ _ASSERTE((pCtx->Pc < codestart) || (pCtx->Pc >= codeend));
+#endif
+
+ // All stepping is simulated using a breakpoint instruction. Since other threads are not suspended while
+ // we step, we avoid race conditions and other complexity by redirecting the thread into a thread-local
+ // execution buffer. We can either copy the instruction we wish to step over into the buffer followed by a
+ // breakpoint or we can emulate the instruction (which is useful for instruction that depend on the value
+ // of the PC or that branch or call to an alternate location). Even in the emulation case we still
+ // redirect execution into the buffer and insert a breakpoint; this simplifies our interface since the
+ // rest of the runtime is not set up to expect single stepping to occur inline. Instead there is always a
+ // 1:1 relationship between setting the single-step mode and receiving an exception once the thread is
+ // restarted.
+ //
+ // There are two parts to the emulation:
+ // 1) In this method we either emulate the instruction (updating the input thread context as a result) or
+ // copy the single instruction into the execution buffer. In both cases we copy a breakpoint into the
+ // execution buffer as well then update the thread context to redirect execution into this buffer.
+ // 2) In the runtime's first chance vectored exception handler we perform the necessary fixups to make
+ // the exception look like the result of a single step. This includes resetting the PC to its correct
+ // value (either the instruction following the stepped instruction or the target PC cached in this
+ // object when we emulated an instruction that alters the PC). It also involves switching
+ // EXCEPTION_BREAKPOINT to EXCEPTION_SINGLE_STEP.
+ //
+ // If we encounter an exception while emulating an instruction (currently this can only happen if we A/V
+ // trying to read a value from memory) then we abandon emulation and fall back to the copy instruction
+ // mechanism. When we run the execution buffer the exception should be raised and handled as normal (we
+ // still peform context fixup in this case but we don't attempt to alter any exception code other than
+ // EXCEPTION_BREAKPOINT to EXCEPTION_SINGLE_STEP). There is a very small timing window here where another
+ // thread could alter memory protections to avoid the A/V when we run the instruction for real but the
+ // liklihood of this happening (in managed code no less) is judged sufficiently small that it's not worth
+ // the alternate solution (where we'd have to set the thread up to raise an exception with exactly the
+ // right thread context).
+ //
+ // Matters are complicated by the ARM IT instruction (upto four following instructions are executed
+ // conditionally based on a single condition or its negation). The issues are that the current instruction
+ // may be rendered into a no-op or that a breakpoint immediately following the current instruction may not
+ // be executed. To simplify matters we may modify the IT state to force our instructions to execute. We
+ // cache the real state and re-apply it along with the rest of our fixups when handling the breakpoint
+ // exception. Note that when executing general instructions we can't simply disable any IT state since
+ // many instructions alter their behavior depending on whether they're executing within an IT block
+ // (mostly it's used to determine whether these instructions set condition flags or not).
+
+ // Cache thread's initial PC and IT state since we'll overwrite them as part of the emulation and we need
+ // to get back to the correct values at fixup time. We also cache a target PC (set below) since some
+ // instructions will set the PC directly or otherwise make it difficult for us to compute the final PC
+ // from the original. We still need the original PC however since this is the one we'll use if an
+ // exception (other than a breakpoint) occurs.
+ _ASSERTE(!m_fBypass || (m_originalPc == pCtx->Pc));
+
+ m_originalPc = pCtx->Pc;
+ m_originalITState.Get(pCtx);
+
+ // By default assume the next PC is right after the current instruction.
+ m_targetPc = m_originalPc + (Is32BitInstruction(opcode1) ? 4 : 2);
+ m_fEmulate = false;
+
+ // One more special case: if we attempt to single-step over an IT instruction it's easier to emulate this,
+ // set the new IT state in m_originalITState and set a special flag that lets Fixup() know we don't need
+ // to advance the state (this only works because we know IT will never raise an exception so we don't need
+ // m_originalITState to store the real original IT state, though in truth a legal IT instruction cannot be
+ // executed inside an IT block anyway). This flag (and m_originalITState) will be set inside TryEmulate()
+ // as needed.
+ m_fEmulatedITInstruction = false;
+ m_fSkipIT = false;
+
+ // There are three different scenarios we must deal with (listed in priority order). In all cases we will
+ // redirect the thread to execute code from our buffer and end by raising a breakpoint exception:
+ // 1) We're executing in an IT block and the current instruction doesn't meet the condition requirements.
+ // We leave the state unchanged and in fixup will advance the PC to the next instruction slot.
+ // 2) The current instruction either takes the PC as an input or modifies the PC in a non-trivial manner.
+ // We can't easily run these instructions from the redirect buffer so we emulate their effect (i.e.
+ // update the current context in the same way as executing the instruction would). The breakpoint
+ // fixup logic will restore the PC to the real resultant PC we cache in m_targetPc.
+ // 3) For all other cases (including emulation cases where we aborted due to a memory fault) we copy the
+ // single instruction into the redirect buffer for execution followed by a breakpoint (once we regain
+ // control in the breakpoint fixup logic we can then reset the PC to its proper location.
+
+ DWORD idxNextInstruction = 0;
+
+ if (m_originalITState.InITBlock() && !ConditionHolds(pCtx, m_originalITState.CurrentCondition()))
+ {
+ LOG((LF_CORDB, LL_INFO100000, "ArmSingleStepper: Case 1: ITState::Clear;\n"));
+ // Case 1: The current instruction is a no-op because due to the IT instruction. We've already set the
+ // target PC to the next instruction slot. Disable the IT block since we want our breakpoint
+ // to execute. We'll put the correct value back during fixup.
+ ITState::Clear(pCtx);
+ m_fSkipIT = true;
+ m_rgCode[idxNextInstruction++] = kBreakpointOp;
+ }
+ else if (TryEmulate(pCtx, opcode1, opcode2, false))
+ {
+ LOG((LF_CORDB, LL_INFO100000, "ArmSingleStepper: Case 2: Emulate\n"));
+ // Case 2: Successfully emulated an instruction that reads or writes the PC. Cache the new target PC
+ // so upon fixup we'll resume execution there rather than the following instruction. No need
+ // to mess with IT state since we know the next instruction is scheduled to execute (we dealt
+ // with the case where it wasn't above) and we're going to execute a breakpoint in that slot.
+ m_targetPc = pCtx->Pc;
+ m_fEmulate = true;
+
+ // Set breakpoints to stop the execution. This will get us right back here.
+ m_rgCode[idxNextInstruction++] = kBreakpointOp;
+ m_rgCode[idxNextInstruction++] = kBreakpointOp;
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO100000, "ArmSingleStepper: Case 3: CopyInstruction. Is32Bit=%d\n", (DWORD)Is32BitInstruction(opcode1)));
+ // Case 3: In all other cases copy the instruction to the buffer and we'll run it directly. If we're
+ // in an IT block there could be up to three instructions following this one whose execution
+ // is skipped. We could try to be clever here and either alter IT state to force the next
+ // instruction to execute or calculate the how many filler instructions we need to insert
+ // before we're guaranteed our breakpoint will be respected. But it's easier to just insert
+ // three additional breakpoints here (code below will add the fourth) and that way we'll
+ // guarantee one of them will be hit (we don't care which one -- the fixup code will update
+ // the PC and IT state to make it look as though the CPU just executed the current
+ // instruction).
+ m_rgCode[idxNextInstruction++] = opcode1;
+ if (Is32BitInstruction(opcode1))
+ m_rgCode[idxNextInstruction++] = opcode2;
+
+ m_rgCode[idxNextInstruction++] = kBreakpointOp;
+ m_rgCode[idxNextInstruction++] = kBreakpointOp;
+ m_rgCode[idxNextInstruction++] = kBreakpointOp;
+ }
+
+ // Always terminate the redirection buffer with a breakpoint.
+ m_rgCode[idxNextInstruction++] = kBreakpointOp;
+ _ASSERTE(idxNextInstruction <= kMaxCodeBuffer);
+
+ // Set the thread up so it will redirect to our buffer when execution resumes.
+ pCtx->Pc = ((DWORD)(DWORD_PTR)m_rgCode) | THUMB_CODE;
+
+ // Make sure the CPU sees the updated contents of the buffer.
+ FlushInstructionCache(GetCurrentProcess(), m_rgCode, sizeof(m_rgCode));
+
+ // Done, set the state.
+ m_state = Applied;
+}
+
+void ArmSingleStepper::Disable()
+{
+ _ASSERTE(m_state != Applied);
+ m_state = Disabled;
+}
+
+// When called in response to an exception (preferably in a first chance vectored handler before anyone else
+// has looked at the thread context) this method will (a) determine whether this exception was raised by a
+// call to Enable() above, in which case true will be returned and (b) perform final fixup of the thread
+// context passed in to complete the emulation of a hardware single step. Note that this routine must be
+// called even if the exception code is not EXCEPTION_BREAKPOINT since the instruction stepped might have
+// raised its own exception (e.g. A/V) and we still need to fix the thread context in this case.
+bool ArmSingleStepper::Fixup(T_CONTEXT *pCtx, DWORD dwExceptionCode)
+{
+#ifdef _DEBUG
+ DWORD codestart = (DWORD)(DWORD_PTR)m_rgCode;
+ DWORD codeend = codestart + (kMaxCodeBuffer * sizeof(WORD));
+#endif
+
+ // If we reach fixup, we should either be Disabled or Applied. If we reach here with Enabled it means
+ // that the debugging layer Enabled the single stepper, but we never applied it to a CONTEXT.
+ _ASSERTE(m_state != Enabled);
+
+ // Nothing to do if the stepper is disabled on this thread.
+ if (m_state == Disabled)
+ {
+ // We better not be inside our internal code buffer though.
+ _ASSERTE((pCtx->Pc < codestart) || (pCtx->Pc >= codeend));
+ return false;
+ }
+
+ // Turn off the single stepper after we have executed one instruction.
+ m_state = Disabled;
+
+ // We should always have a PC somewhere in our redirect buffer.
+#ifdef _DEBUG
+ _ASSERTE((pCtx->Pc >= codestart) && (pCtx->Pc < codeend));
+#endif
+
+ if (dwExceptionCode == EXCEPTION_BREAKPOINT)
+ {
+ // The single step went as planned. Set the PC back to its real value (either following the
+ // instruction we stepped or the computed destination we cached after emulating an instruction that
+ // modifies the PC). Advance the IT state from the value we cached before the single step (unless we
+ // stepped an IT instruction itself, in which case m_originalITState holds the new state and we should
+ // just set that).
+ if (!m_fEmulate)
+ {
+ if (m_rgCode[0] != kBreakpointOp)
+ {
+ LOG((LF_CORDB, LL_INFO100000, "ArmSingleStepper::Fixup executed code, ip = %x\n", m_targetPc));
+
+ pCtx->Pc = m_targetPc;
+ if (!m_fEmulatedITInstruction)
+ m_originalITState.Advance();
+
+ m_originalITState.Set(pCtx);
+ }
+ else
+ {
+ if (m_fSkipIT)
+ {
+ // We needed to skip over an instruction due to a false condition in an IT block.
+ LOG((LF_CORDB, LL_INFO100000, "ArmSingleStepper::Fixup skipped instruction due to IT\n"));
+ pCtx->Pc = m_targetPc;
+
+ _ASSERTE(!m_fEmulatedITInstruction);
+ m_originalITState.Advance();
+ m_originalITState.Set(pCtx);
+ }
+ else
+ {
+ // We've hit a breakpoint in the code stream. We will return false here (which causes us to NOT
+ // replace the breakpoint code with single step), and place the Pc back to the original Pc. The
+ // debugger patch skipping code will move past this breakpoint.
+ LOG((LF_CORDB, LL_INFO100000, "ArmSingleStepper::Fixup emulated breakpoint\n"));
+ pCtx->Pc = m_originalPc;
+
+ _ASSERTE(pCtx->Pc & THUMB_CODE);
+ return false;
+ }
+ }
+ }
+ else
+ {
+ bool res = TryEmulate(pCtx, m_opcodes[0], m_opcodes[1], true);
+ _ASSERTE(res); // We should always successfully emulate since we ran it through TryEmulate already.
+
+ if (!m_fRedirectedPc)
+ pCtx->Pc = m_targetPc;
+
+ LOG((LF_CORDB, LL_INFO100000, "ArmSingleStepper::Fixup emulated, ip = %x\n", pCtx->Pc));
+ }
+ }
+ else
+ {
+ // The stepped instruction caused an exception. Reset the PC and IT state to their original values we
+ // cached before stepping. (We should never seen this when stepping an IT instruction which overwrites
+ // m_originalITState).
+ _ASSERTE(!m_fEmulatedITInstruction);
+ _ASSERTE(m_fEmulate == false);
+ pCtx->Pc = m_originalPc;
+ m_originalITState.Set(pCtx);
+
+ LOG((LF_CORDB, LL_INFO100000, "ArmSingleStepper::Fixup hit exception pc = %x ex = %x\n", pCtx->Pc, dwExceptionCode));
+ }
+
+ _ASSERTE(pCtx->Pc & THUMB_CODE);
+ return true;
+}
+
+// Count the number of bits set in a DWORD.
+DWORD ArmSingleStepper::BitCount(DWORD dwValue)
+{
+ // There are faster implementations but speed isn't critical here.
+ DWORD cBits = 0;
+ while (dwValue)
+ {
+ cBits += dwValue & 1;
+ dwValue >>= 1;
+ }
+ return cBits;
+}
+
+// Return true if the given condition (C, N, Z or V) holds in the current context.
+#define GET_FLAG(pCtx, _flag) \
+ ((pCtx->Cpsr & (1 << APSR_##_flag)) != 0)
+
+// Returns true if the current context indicates the ARM condition specified holds.
+bool ArmSingleStepper::ConditionHolds(T_CONTEXT *pCtx, DWORD cond)
+{
+ switch (cond)
+ {
+ case 0: // EQ (Z==1)
+ return GET_FLAG(pCtx, Z);
+ case 1: // NE (Z==0)
+ return !GET_FLAG(pCtx, Z);
+ case 2: // CS (C==1)
+ return GET_FLAG(pCtx, C);
+ case 3: // CC (C==0)
+ return !GET_FLAG(pCtx, C);
+ case 4: // MI (N==1)
+ return GET_FLAG(pCtx, N);
+ case 5: // PL (N==0)
+ return !GET_FLAG(pCtx, N);
+ case 6: // VS (V==1)
+ return GET_FLAG(pCtx, V);
+ case 7: // VC (V==0)
+ return !GET_FLAG(pCtx, V);
+ case 8: // HI (C==1 && Z==0)
+ return GET_FLAG(pCtx, C) && !GET_FLAG(pCtx, Z);
+ case 9: // LS (C==0 || Z==1)
+ return !GET_FLAG(pCtx, C) || GET_FLAG(pCtx, Z);
+ case 10: // GE (N==V)
+ return GET_FLAG(pCtx, N) == GET_FLAG(pCtx, V);
+ case 11: // LT (N!=V)
+ return GET_FLAG(pCtx, N) != GET_FLAG(pCtx, V);
+ case 12: // GT (Z==0 && N==V)
+ return !GET_FLAG(pCtx, Z) && (GET_FLAG(pCtx, N) == GET_FLAG(pCtx, V));
+ case 13: // LE (Z==1 || N!=V)
+ return GET_FLAG(pCtx, Z) || (GET_FLAG(pCtx, N) != GET_FLAG(pCtx, V));
+ case 14: // AL
+ return true;
+ case 15:
+ _ASSERTE(!"Unsupported condition code: 15");
+ return false;
+ default:
+// UNREACHABLE();
+ return false;
+ }
+}
+
+// Get the current value of a register. PC (register 15) is always reported as the current instruction PC + 4
+// as per the ARM architecture.
+DWORD ArmSingleStepper::GetReg(T_CONTEXT *pCtx, DWORD reg)
+{
+ _ASSERTE(reg <= 15);
+
+ if (reg == 15)
+ return (m_originalPc + 4) & ~THUMB_CODE;
+
+ return (&pCtx->R0)[reg];
+}
+
+// Set the current value of a register. If the PC (register 15) is set then m_fRedirectedPc is set to true.
+void ArmSingleStepper::SetReg(T_CONTEXT *pCtx, DWORD reg, DWORD value)
+{
+ _ASSERTE(reg <= 15);
+
+ if (reg == 15)
+ {
+ value |= THUMB_CODE;
+ m_fRedirectedPc = true;
+ }
+
+ (&pCtx->R0)[reg] = value;
+}
+
+// Attempt to read a 1, 2 or 4 byte value from memory, zero or sign extend it to a 4-byte value and place that
+// value into the buffer pointed at by pdwResult. Returns false if attempting to read the location caused a
+// fault.
+bool ArmSingleStepper::GetMem(DWORD *pdwResult, DWORD_PTR pAddress, DWORD cbSize, bool fSignExtend)
+{
+ __try
+ {
+ switch (cbSize)
+ {
+ case 1:
+ *pdwResult = *(BYTE*)pAddress;
+ if (fSignExtend && (*pdwResult & 0x00000080))
+ *pdwResult |= 0xffffff00;
+ break;
+ case 2:
+ *pdwResult = *(WORD*)pAddress;
+ if (fSignExtend && (*pdwResult & 0x00008000))
+ *pdwResult |= 0xffff0000;
+ break;
+ case 4:
+ *pdwResult = *(DWORD*)pAddress;
+ break;
+ default:
+ UNREACHABLE();
+ return false;
+ }
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER)
+ {
+ return false;
+ }
+
+ return true;
+}
+
+// Wrapper around GetMem above that will automatically return from TryEmulate() indicating the instruction
+// could not be emulated if we try to read memory and fail due to an exception. This logic works (i.e. we can
+// simply return without worrying whether we've already updated the thread context) due to the fact that we
+// either (a) read memory before updating any registers (the various LDR literal variants) or (b) update the
+// register list before the base register in LDM-like operations (and this should therefore be an idempotent
+// operation when we re-execute the instruction). If this ever changes we will have to store a copy of the
+// original context we can use to revert changes (it gets even more complex if we ever have to emulate an
+// instruction that writes memory).
+#define GET_MEM(_result, _addr, _size, _signextend) \
+ do { \
+ if (!GetMem((_result), (_addr), (_size), (_signextend))) \
+ return false; \
+ } while (false)
+
+// Implements the various LDM-style multi-register load instructions (these include POP).
+#define LDM(ctx, _base, _registerlist, _writeback, _ia) \
+ do { \
+ DWORD _pAddr = GetReg(ctx, _base); \
+ if (!(_ia)) \
+ _pAddr -= BitCount(_registerlist) * sizeof(void*); \
+ DWORD _pStartAddr = _pAddr; \
+ for (DWORD _i = 0; _i < 16; _i++) \
+ { \
+ if ((_registerlist) & (1 << _i)) \
+ { \
+ DWORD _tmpresult; \
+ GET_MEM(&_tmpresult, _pAddr, 4, false); \
+ SetReg(ctx, _i, _tmpresult); \
+ _pAddr += sizeof(void*); \
+ } \
+ } \
+ if (_writeback) \
+ SetReg(ctx, _base, (_ia) ? _pAddr : _pStartAddr); \
+ } while (false)
+
+// Parse the instruction whose first word is given in opcode1 (if the instruction is 32-bit TryEmulate will
+// fetch the second word using the value of the PC stored in the current context). If the instruction reads or
+// writes the PC or is the IT instruction then it will be emulated by updating the thread context
+// appropriately and true will be returned. If the instruction is not one of those cases (or it is but we
+// faulted trying to read memory during the emulation) no state is updated and false is returned instead.
+bool ArmSingleStepper::TryEmulate(T_CONTEXT *pCtx, WORD opcode1, WORD opcode2, bool execute)
+{
+ LOG((LF_CORDB, LL_INFO100000, "ArmSingleStepper::TryEmulate(opcode=%x %x, execute=%s)\n", (DWORD)opcode1, (DWORD)opcode2, execute ? "true" : "false"));
+
+ // Track whether instruction emulation wrote a modified PC.
+ m_fRedirectedPc = false;
+
+ // Track whether we successfully emulated an instruction. If we did and we didn't modify the PC (e.g. a
+ // ADR instruction or a conditional branch not taken) then we'll need to explicitly set the PC to the next
+ // instruction (since our caller expects that whenever we return true m_pCtx->Pc holds the next
+ // instruction address).
+ bool fEmulated = false;
+
+ if (Is32BitInstruction(opcode1))
+ {
+ if (((opcode1 & 0xfbff) == 0xf2af) &&
+ ((opcode2 & 0x8000) == 0x0000))
+ {
+ // ADR.W : T2
+ if (execute)
+ {
+ DWORD Rd = BitExtract(opcode2, 11, 8);
+ DWORD i = BitExtract(opcode1, 10, 10);
+ DWORD imm3 = BitExtract(opcode2, 14, 12);
+ DWORD imm8 = BitExtract(opcode2, 7, 0);
+
+ SetReg(pCtx, Rd, (GetReg(pCtx, 15) & ~3) - ((i << 11) | (imm3 << 8) | imm8));
+ }
+
+ fEmulated = true;
+ }
+ else if (((opcode1 & 0xfbff) == 0xf20f) &&
+ ((opcode2 & 0x8000) == 0x0000))
+ {
+ // ADR.W : T3
+ if (execute)
+ {
+ DWORD Rd = BitExtract(opcode2, 11, 8);
+ DWORD i = BitExtract(opcode1, 10, 10);
+ DWORD imm3 = BitExtract(opcode2, 14, 12);
+ DWORD imm8 = BitExtract(opcode2, 7, 0);
+
+ SetReg(pCtx, Rd, (GetReg(pCtx, 15) & ~3) + ((i << 11) | (imm3 << 8) | imm8));
+ }
+
+ fEmulated = true;
+ }
+ else if (((opcode1 & 0xf800) == 0xf000) &&
+ ((opcode2 & 0xd000) == 0x8000) &&
+ ((opcode1 & 0x0380) != 0x0380))
+ {
+ // B.W : T3
+ if (execute)
+ {
+ DWORD S = BitExtract(opcode1, 10, 10);
+ DWORD cond = BitExtract(opcode1, 9, 6);
+ DWORD imm6 = BitExtract(opcode1, 5, 0);
+ DWORD J1 = BitExtract(opcode2, 13, 13);
+ DWORD J2 = BitExtract(opcode2, 11, 11);
+ DWORD imm11 = BitExtract(opcode2, 10, 0);
+
+ if (ConditionHolds(pCtx, cond) && execute)
+ {
+ DWORD disp = (S ? 0xfff00000 : 0) | (J2 << 19) | (J1 << 18) | (imm6 << 12) | (imm11 << 1);
+ SetReg(pCtx, 15, GetReg(pCtx, 15) + disp);
+ }
+ }
+
+ fEmulated = true;
+ }
+ else if (((opcode1 & 0xf800) == 0xf000) &&
+ ((opcode2 & 0xd000) == 0x9000))
+ {
+ // B.W : T4
+ if (execute)
+ {
+ DWORD S = BitExtract(opcode1, 10, 10);
+ DWORD imm10 = BitExtract(opcode1, 9, 0);
+ DWORD J1 = BitExtract(opcode2, 13, 13);
+ DWORD J2 = BitExtract(opcode2, 11, 11);
+ DWORD imm11 = BitExtract(opcode2, 10, 0);
+
+ DWORD I1 = (J1 ^ S) ^ 1;
+ DWORD I2 = (J2 ^ S) ^ 1;
+
+ DWORD disp = (S ? 0xff000000 : 0) | (I1 << 23) | (I2 << 22) | (imm10 << 12) | (imm11 << 1);
+ SetReg(pCtx, 15, GetReg(pCtx, 15) + disp);
+ }
+
+ fEmulated = true;
+ }
+ else if (((opcode1 & 0xf800) == 0xf000) &&
+ ((opcode2 & 0xd000) == 0xd000))
+ {
+ // BL (immediate) : T1
+ if (execute)
+ {
+ DWORD S = BitExtract(opcode1, 10, 10);
+ DWORD imm10 = BitExtract(opcode1, 9, 0);
+ DWORD J1 = BitExtract(opcode2, 13, 13);
+ DWORD J2 = BitExtract(opcode2, 11, 11);
+ DWORD imm11 = BitExtract(opcode2, 10, 0);
+
+ DWORD I1 = (J1 ^ S) ^ 1;
+ DWORD I2 = (J2 ^ S) ^ 1;
+
+ SetReg(pCtx, 14, GetReg(pCtx, 15) | 1);
+
+ DWORD disp = (S ? 0xff000000 : 0) | (I1 << 23) | (I2 << 22) | (imm10 << 12) | (imm11 << 1);
+ SetReg(pCtx, 15, GetReg(pCtx, 15) + disp);
+ }
+
+ fEmulated = true;
+ }
+ else if (((opcode1 & 0xffd0) == 0xe890) &&
+ ((opcode2 & 0x2000) == 0x0000))
+ {
+ // LDM.W : T2, POP.W : T2
+ if (execute)
+ {
+ DWORD W = BitExtract(opcode1, 5, 5);
+ DWORD Rn = BitExtract(opcode1, 3, 0);
+ DWORD registerList = opcode2;
+
+ LDM(pCtx, Rn, registerList, W, true);
+ fEmulated = true;
+ }
+ else
+ {
+ // We should only emulate this instruction if Pc is set
+ if (opcode2 & (1<<15))
+ fEmulated = true;
+ }
+ }
+ else if (((opcode1 & 0xffd0) == 0xe410) &&
+ ((opcode2 & 0x2000) == 0x0000))
+ {
+ // LDMDB : T1
+ if (execute)
+ {
+ DWORD W = BitExtract(opcode1, 5, 5);
+ DWORD Rn = BitExtract(opcode1, 3, 0);
+ DWORD registerList = opcode2;
+
+ LDM(pCtx, Rn, registerList, W, false);
+ fEmulated = true;
+ }
+ else
+ {
+ // We should only emulate this instruction if Pc is set
+ if (opcode2 & (1<<15))
+ fEmulated = true;
+ }
+ }
+ else if (((opcode1 & 0xfff0) == 0xf8d0) &&
+ ((opcode1 & 0x000f) != 0x000f))
+ {
+ // LDR.W (immediate): T3
+ DWORD Rt = BitExtract(opcode2, 15, 12);
+ DWORD Rn = BitExtract(opcode1, 3, 0);
+ if (execute)
+ {
+ DWORD imm12 = BitExtract(opcode2, 11, 0);
+
+ DWORD value;
+ GET_MEM(&value, GetReg(pCtx, Rn) + imm12, 4, false);
+
+ SetReg(pCtx, Rt, value);
+ fEmulated = true;
+ }
+ else
+ {
+ // We should only emulate this instruction if Pc is used
+ if (Rt == 15 || Rn == 15)
+ fEmulated = true;
+ }
+ }
+ else if (((opcode1 & 0xfff0) == 0xf850) &&
+ ((opcode2 & 0x0800) == 0x0800) &&
+ ((opcode1 & 0x000f) != 0x000f))
+ {
+ // LDR (immediate) : T4, POP : T3
+ DWORD Rn = BitExtract(opcode1, 3, 0);
+ DWORD Rt = BitExtract(opcode2, 15, 12);
+ if (execute)
+ {
+ DWORD P = BitExtract(opcode2, 10, 10);
+ DWORD U = BitExtract(opcode2, 9, 9);
+ DWORD W = BitExtract(opcode2, 8, 8);
+ DWORD imm8 = BitExtract(opcode2, 7, 0);
+
+ DWORD offset_addr = U ? GetReg(pCtx, Rn) + imm8 : GetReg(pCtx, Rn) - imm8;
+ DWORD addr = P ? offset_addr : GetReg(pCtx, Rn);
+
+ DWORD value;
+ GET_MEM(&value, addr, 4, false);
+
+ if (W)
+ SetReg(pCtx, Rn, offset_addr);
+
+ SetReg(pCtx, Rt, value);
+ fEmulated = true;
+ }
+ else
+ {
+ // We should only emulate this instruction if Pc is used
+ if (Rt == 15 || Rn == 15)
+ fEmulated = true;
+ }
+ }
+ else if (((opcode1 & 0xff7f) == 0xf85f))
+ {
+ // LDR.W (literal) : T2
+ DWORD Rt = BitExtract(opcode2, 15, 12);
+ if (execute)
+ {
+ DWORD U = BitExtract(opcode1, 7, 7);
+ DWORD imm12 = BitExtract(opcode2, 11, 0);
+
+ // This instruction always reads relative to R15/PC
+ DWORD addr = GetReg(pCtx, 15) & ~3;
+ addr = U ? addr + imm12 : addr - imm12;
+
+ DWORD value;
+ GET_MEM(&value, addr, 4, false);
+
+ SetReg(pCtx, Rt, value);
+ }
+
+ // We should ALWAYS emulate this instruction, because this instruction
+ // always reads the memory relative to PC
+ fEmulated = true;
+ }
+ else if (((opcode1 & 0xfff0) == 0xf850) &&
+ ((opcode2 & 0x0fc0) == 0x0000) &&
+ ((opcode1 & 0x000f) != 0x000f))
+ {
+ // LDR.W : T2
+ DWORD Rn = BitExtract(opcode1, 3, 0);
+ DWORD Rt = BitExtract(opcode2, 15, 12);
+ DWORD Rm = BitExtract(opcode2, 3, 0);
+ if (execute)
+ {
+ DWORD imm2 = BitExtract(opcode2, 5, 4);
+ DWORD addr = GetReg(pCtx, Rn) + (GetReg(pCtx, Rm) << imm2);
+
+ DWORD value;
+ GET_MEM(&value, addr, 4, false);
+
+ SetReg(pCtx, Rt, value);
+ fEmulated = true;
+ }
+ else
+ {
+ // We should only emulate this instruction if Pc is used
+ if (Rt == 15 || Rn == 15 || Rm == 15)
+ fEmulated = true;
+ }
+ }
+ else if (((opcode1 & 0xff7f) == 0xf81f) &&
+ ((opcode2 & 0xf000) != 0xf000))
+ {
+ // LDRB (literal) : T2
+ if (execute)
+ {
+ DWORD U = BitExtract(opcode1, 7, 7);
+ DWORD Rt = BitExtract(opcode2, 15, 12);
+ DWORD imm12 = BitExtract(opcode2, 11, 0);
+
+ DWORD addr = (GetReg(pCtx, 15) & ~3);
+ addr = U ? addr + imm12 : addr - imm12;
+
+ DWORD value;
+ GET_MEM(&value, addr, 1, false);
+
+ SetReg(pCtx, Rt, value);
+ }
+
+ fEmulated = true;
+ }
+ else if (((opcode1 & 0xfe5f) == 0xe85f) &&
+ ((opcode1 & 0x0120) != 0x0000))
+ {
+ // LDRD (literal) : T1
+ if (execute)
+ {
+ DWORD U = BitExtract(opcode1, 7, 7);
+ DWORD Rt = BitExtract(opcode2, 15, 12);
+ DWORD Rt2 = BitExtract(opcode2, 11, 8);
+ DWORD imm8 = BitExtract(opcode2, 7, 0);
+
+ DWORD addr = (GetReg(pCtx, 15) & ~3);
+ addr = U ? addr + (imm8 << 2) : addr - (imm8 << 2);
+
+ DWORD value1;
+ GET_MEM(&value1, addr, 4, false);
+
+ DWORD value2;
+ GET_MEM(&value2, addr + 4, 4, false);
+
+ SetReg(pCtx, Rt, value1);
+ SetReg(pCtx, Rt2, value2);
+ }
+
+ fEmulated = true;
+ }
+ else if (((opcode1 & 0xff7f) == 0xf83f) &&
+ ((opcode2 & 0xf000) != 0xf000))
+ {
+ // LDRH (literal) : T1
+ if (execute)
+ {
+ DWORD U = BitExtract(opcode1, 7, 7);
+ DWORD Rt = BitExtract(opcode2, 15, 12);
+ DWORD imm12 = BitExtract(opcode2, 11, 0);
+
+ DWORD addr = (GetReg(pCtx, 15) & ~3);
+ addr = U ? addr + imm12 : addr - imm12;
+
+ DWORD value;
+ GET_MEM(&value, addr, 2, false);
+
+ SetReg(pCtx, Rt, value);
+ }
+
+ fEmulated = true;
+ }
+ else if (((opcode1 & 0xff7f) == 0xf91f) &&
+ ((opcode2 & 0xf000) != 0xf000))
+ {
+ // LDRSB (literal) : T1
+ if (execute)
+ {
+ DWORD U = BitExtract(opcode1, 7, 7);
+ DWORD Rt = BitExtract(opcode2, 15, 12);
+ DWORD imm12 = BitExtract(opcode2, 11, 0);
+
+ DWORD addr = (GetReg(pCtx, 15) & ~3);
+ addr = U ? addr + imm12 : addr - imm12;
+
+ DWORD value;
+ GET_MEM(&value, addr, 1, true);
+
+ SetReg(pCtx, Rt, value);
+ }
+
+ fEmulated = true;
+ }
+ else if (((opcode1 & 0xff7f) == 0xf53f) &&
+ ((opcode2 & 0xf000) != 0xf000))
+ {
+ // LDRSH (literal) : T1
+ if (execute)
+ {
+ DWORD U = BitExtract(opcode1, 7, 7);
+ DWORD Rt = BitExtract(opcode2, 15, 12);
+ DWORD imm12 = BitExtract(opcode2, 11, 0);
+
+ DWORD addr = (GetReg(pCtx, 15) & ~3);
+ addr = U ? addr + imm12 : addr - imm12;
+
+ DWORD value;
+ GET_MEM(&value, addr, 2, true);
+
+ SetReg(pCtx, Rt, value);
+ }
+
+ fEmulated = true;
+ }
+ else if (((opcode1 & 0xfff0) == 0xe8d0) &&
+ ((opcode2 & 0xffe0) == 0xf000))
+ {
+ // TBB/TBH : T1
+ if (execute)
+ {
+ DWORD Rn = BitExtract(opcode1, 3, 0);
+ DWORD H = BitExtract(opcode2, 4, 4);
+ DWORD Rm = BitExtract(opcode2, 3, 0);
+
+ DWORD addr = GetReg(pCtx, Rn);
+
+ DWORD value;
+ if (H)
+ GET_MEM(&value, addr + (GetReg(pCtx, Rm) << 1), 2, false);
+ else
+ GET_MEM(&value, addr + GetReg(pCtx, Rm), 1, false);
+
+ SetReg(pCtx, 15, GetReg(pCtx, 15) + (value << 1));
+ }
+
+ fEmulated = true;
+ }
+
+ // If we emulated an instruction but didn't set the PC explicitly we have to do so now (in such cases
+ // the next PC will always point directly after the instruction we just emulated).
+ if (fEmulated && !m_fRedirectedPc)
+ SetReg(pCtx, 15, GetReg(pCtx, 15));
+ }
+ else
+ {
+ // Handle 16-bit instructions.
+
+ if ((opcode1 & 0xf800) == 0xa000)
+ {
+ // ADR : T1
+ if (execute)
+ {
+ DWORD Rd = BitExtract(opcode1, 10, 8);
+ DWORD imm8 = BitExtract(opcode1, 7, 0);
+
+ SetReg(pCtx, Rd, (GetReg(pCtx, 15) & 3) + (imm8 << 2));
+ }
+
+ fEmulated = true;
+ }
+ else if (((opcode1 & 0xf000) == 0xd000) && ((opcode1 & 0x0f00) != 0x0e00))
+ {
+ // B : T1
+
+ // We only emulate this instruction if we take the conditional
+ // jump. If not we'll pass right over the jump and set the
+ // target IP as normal.
+ DWORD cond = BitExtract(opcode1, 11, 8);
+ if (execute)
+ {
+ _ASSERTE(ConditionHolds(pCtx, cond));
+
+ DWORD imm8 = BitExtract(opcode1, 7, 0);
+ DWORD disp = (imm8 << 1) | ((imm8 & 0x80) ? 0xffffff00 : 0);
+
+ SetReg(pCtx, 15, GetReg(pCtx, 15) + disp);
+ fEmulated = true;
+ }
+ else
+ {
+ if (ConditionHolds(pCtx, cond))
+ {
+ fEmulated = true;
+ }
+ }
+ }
+ else if ((opcode1 & 0xf800) == 0xe000)
+ {
+ if (execute)
+ {
+ // B : T2
+ DWORD imm11 = BitExtract(opcode1, 10, 0);
+ DWORD disp = (imm11 << 1) | ((imm11 & 0x400) ? 0xfffff000 : 0);
+
+ SetReg(pCtx, 15, GetReg(pCtx, 15) + disp);
+ }
+
+ fEmulated = true;
+ }
+ else if ((opcode1 & 0xff87) == 0x4780)
+ {
+ // BLX (register) : T1
+ if (execute)
+ {
+ DWORD Rm = BitExtract(opcode1, 6, 3);
+ DWORD addr = GetReg(pCtx, Rm);
+
+ SetReg(pCtx, 14, (GetReg(pCtx, 15) - 2) | 1);
+ SetReg(pCtx, 15, addr);
+ }
+
+ fEmulated = true;
+ }
+ else if ((opcode1 & 0xff87) == 0x4700)
+ {
+ // BX : T1
+ if (execute)
+ {
+ DWORD Rm = BitExtract(opcode1, 6, 3);
+ SetReg(pCtx, 15, GetReg(pCtx, Rm));
+ }
+
+ fEmulated = true;
+ }
+ else if ((opcode1 & 0xf500) == 0xb100)
+ {
+ // CBNZ/CBZ : T1
+ if (execute)
+ {
+ DWORD op = BitExtract(opcode1, 11, 11);
+ DWORD i = BitExtract(opcode1, 9, 9);
+ DWORD imm5 = BitExtract(opcode1, 7, 3);
+ DWORD Rn = BitExtract(opcode1, 2, 0);
+
+ if ((op && (GetReg(pCtx, Rn) != 0)) ||
+ (!op && (GetReg(pCtx, Rn) == 0)))
+ {
+ SetReg(pCtx, 15, GetReg(pCtx, 15) + ((i << 6) | (imm5 << 1)));
+ }
+ }
+
+ fEmulated = true;
+ }
+ else if (((opcode1 & 0xff00) == 0xbf00) &&
+ ((opcode1 & 0x000f) != 0x0000))
+ {
+ // IT : T1
+ if (execute)
+ {
+ DWORD firstcond = BitExtract(opcode1, 7, 4);
+ DWORD mask = BitExtract(opcode1, 3, 0);
+
+ // The IT instruction is special. We compute the IT state bits for the CPSR and cache them in
+ // m_originalITState. We then set m_fEmulatedITInstruction so that Fixup() knows not to advance
+ // this state (simply write it as-is back into the CPSR).
+ m_originalITState.Init((BYTE)((firstcond << 4) | mask));
+ m_originalITState.Set(pCtx);
+ m_fEmulatedITInstruction = true;
+ }
+
+ fEmulated = true;
+ }
+ else if ((opcode1 & 0xf800) == 0x4800)
+ {
+ // LDR (literal) : T1
+ if (execute)
+ {
+ DWORD Rt = BitExtract(opcode1, 10, 8);
+ DWORD imm8 = BitExtract(opcode1, 7, 0);
+
+ DWORD addr = (GetReg(pCtx, 15) & ~3) + (imm8 << 2);
+
+ DWORD value = 0;
+ GET_MEM(&value, addr, 4, false);
+
+ SetReg(pCtx, Rt, value);
+ }
+
+ fEmulated = true;
+ }
+ else if ((opcode1 & 0xff00) == 0x4600)
+ {
+ // MOV (register) : T1
+ DWORD D = BitExtract(opcode1, 7, 7);
+ DWORD Rm = BitExtract(opcode1, 6, 3);
+ DWORD Rd = (D << 3) | BitExtract(opcode1, 2, 0);
+
+ if (execute)
+ {
+ SetReg(pCtx, Rd, GetReg(pCtx, Rm));
+ fEmulated = true;
+ }
+ else
+ {
+ // Only emulate if we change Pc
+ if (Rm == 15 || Rd == 15)
+ fEmulated = true;
+ }
+ }
+ else if ((opcode1 & 0xfe00) == 0xbc00)
+ {
+ // POP : T1
+ DWORD P = BitExtract(opcode1, 8, 8);
+ DWORD registerList = (P << 15) | BitExtract(opcode1, 7, 0);
+ if (execute)
+ {
+ LDM(pCtx, 13, registerList, true, true);
+ fEmulated = true;
+ }
+ else
+ {
+ // Only emulate if Pc is in the register list
+ if (registerList & (1<<15))
+ fEmulated = true;
+ }
+ }
+
+ // If we emulated an instruction but didn't set the PC explicitly we have to do so now (in such cases
+ // the next PC will always point directly after the instruction we just emulated).
+ if (execute && fEmulated && !m_fRedirectedPc)
+ SetReg(pCtx, 15, GetReg(pCtx, 15) - 2);
+ }
+
+ LOG((LF_CORDB, LL_INFO100000, "ArmSingleStepper::TryEmulate(opcode=%x %x) emulated=%s redirectedPc=%s\n",
+ (DWORD)opcode1, (DWORD)opcode2, fEmulated ? "true" : "false", m_fRedirectedPc ? "true" : "false"));
+ return fEmulated;
+}
diff --git a/src/vm/arm/asmconstants.h b/src/vm/arm/asmconstants.h
new file mode 100644
index 0000000000..8682f85d98
--- /dev/null
+++ b/src/vm/arm/asmconstants.h
@@ -0,0 +1,305 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// asmconstants.h -
+//
+// This header defines field offsets and constants used by assembly code
+// Be sure to rebuild clr/src/vm/ceemain.cpp after changing this file, to
+// ensure that the constants match the expected C/C++ values
+
+// #ifndef _ARM_
+// #error this file should only be used on an ARM platform
+// #endif // _ARM_
+
+#include "..\..\inc\switches.h"
+
+//-----------------------------------------------------------------------------
+
+#ifndef ASMCONSTANTS_C_ASSERT
+#define ASMCONSTANTS_C_ASSERT(cond)
+#endif
+
+#ifndef ASMCONSTANTS_RUNTIME_ASSERT
+#define ASMCONSTANTS_RUNTIME_ASSERT(cond)
+#endif
+
+// Some contants are different in _DEBUG builds. This macro factors out ifdefs from below.
+#ifdef _DEBUG
+#define DBG_FRE(dbg,fre) dbg
+#else
+#define DBG_FRE(dbg,fre) fre
+#endif
+
+#define DynamicHelperFrameFlags_Default 0
+#define DynamicHelperFrameFlags_ObjectArg 1
+#define DynamicHelperFrameFlags_ObjectArg2 2
+
+#define REDIRECTSTUB_SP_OFFSET_CONTEXT 0
+
+#define CORINFO_NullReferenceException_ASM 0
+ASMCONSTANTS_C_ASSERT( CORINFO_NullReferenceException_ASM
+ == CORINFO_NullReferenceException);
+
+#define CORINFO_IndexOutOfRangeException_ASM 3
+ASMCONSTANTS_C_ASSERT( CORINFO_IndexOutOfRangeException_ASM
+ == CORINFO_IndexOutOfRangeException);
+
+
+// Offset of the array containing the address of captured registers in MachState
+#define MachState__captureR4_R11 0x0
+ASMCONSTANTS_C_ASSERT(MachState__captureR4_R11 == offsetof(MachState, captureR4_R11))
+
+// Offset of the array containing the address of preserved registers in MachState
+#define MachState___R4_R11 0x20
+ASMCONSTANTS_C_ASSERT(MachState___R4_R11 == offsetof(MachState, _R4_R11))
+
+#define MachState__isValid 0x48
+ASMCONSTANTS_C_ASSERT(MachState__isValid == offsetof(MachState, _isValid))
+
+#define LazyMachState_captureR4_R11 MachState__captureR4_R11
+ASMCONSTANTS_C_ASSERT(LazyMachState_captureR4_R11 == offsetof(LazyMachState, captureR4_R11))
+
+#define LazyMachState_captureSp (MachState__isValid+4)
+ASMCONSTANTS_C_ASSERT(LazyMachState_captureSp == offsetof(LazyMachState, captureSp))
+
+#define LazyMachState_captureIp (LazyMachState_captureSp+4)
+ASMCONSTANTS_C_ASSERT(LazyMachState_captureIp == offsetof(LazyMachState, captureIp))
+
+#define DelegateObject___methodPtr 0x0c
+ASMCONSTANTS_C_ASSERT(DelegateObject___methodPtr == offsetof(DelegateObject, _methodPtr));
+
+#define DelegateObject___target 0x04
+ASMCONSTANTS_C_ASSERT(DelegateObject___target == offsetof(DelegateObject, _target));
+
+#define MethodTable__m_BaseSize 0x04
+ASMCONSTANTS_C_ASSERT(MethodTable__m_BaseSize == offsetof(MethodTable, m_BaseSize));
+
+#define MethodTable__m_dwFlags 0x0
+ASMCONSTANTS_C_ASSERT(MethodTable__m_dwFlags == offsetof(MethodTable, m_dwFlags));
+
+#define MethodTable__m_pWriteableData DBG_FRE(0x1c, 0x18)
+ASMCONSTANTS_C_ASSERT(MethodTable__m_pWriteableData == offsetof(MethodTable, m_pWriteableData));
+
+#define MethodTable__enum_flag_ContainsPointers 0x01000000
+ASMCONSTANTS_C_ASSERT(MethodTable__enum_flag_ContainsPointers == MethodTable::enum_flag_ContainsPointers);
+
+#define MethodTable__m_ElementType DBG_FRE(0x24, 0x20)
+ASMCONSTANTS_C_ASSERT(MethodTable__m_ElementType == offsetof(MethodTable, m_pMultipurposeSlot1));
+
+#define SIZEOF__MethodTable DBG_FRE(0x2c, 0x28)
+ASMCONSTANTS_C_ASSERT(SIZEOF__MethodTable == sizeof(MethodTable));
+
+#define MethodTableWriteableData__m_dwFlags 0x00
+ASMCONSTANTS_C_ASSERT(MethodTableWriteableData__m_dwFlags == offsetof(MethodTableWriteableData, m_dwFlags));
+
+#define MethodTableWriteableData__enum_flag_Unrestored 0x04
+ASMCONSTANTS_C_ASSERT(MethodTableWriteableData__enum_flag_Unrestored == MethodTableWriteableData::enum_flag_Unrestored);
+
+#define StringObject__m_StringLength 0x04
+ASMCONSTANTS_C_ASSERT(StringObject__m_StringLength == offsetof(StringObject, m_StringLength));
+
+#define SIZEOF__BaseStringObject 0xe
+ASMCONSTANTS_C_ASSERT(SIZEOF__BaseStringObject == (ObjSizeOf(StringObject) + sizeof(WCHAR)));
+
+#define SIZEOF__ArrayOfObjectRef 0xc
+ASMCONSTANTS_C_ASSERT(SIZEOF__ArrayOfObjectRef == ObjSizeOf(ArrayBase));
+
+#define SIZEOF__ArrayOfValueType 0xc
+ASMCONSTANTS_C_ASSERT(SIZEOF__ArrayOfValueType == ObjSizeOf(ArrayBase));
+
+#define ArrayBase__m_NumComponents 0x4
+ASMCONSTANTS_C_ASSERT(ArrayBase__m_NumComponents == offsetof(ArrayBase, m_NumComponents));
+
+#define ArrayTypeDesc__m_TemplateMT 0x4
+ASMCONSTANTS_C_ASSERT(ArrayTypeDesc__m_TemplateMT == offsetof(ArrayTypeDesc, m_TemplateMT));
+
+#define ArrayTypeDesc__m_Arg 0x8
+ASMCONSTANTS_C_ASSERT(ArrayTypeDesc__m_Arg == offsetof(ArrayTypeDesc, m_Arg));
+
+#define PtrArray__m_Array 0x8
+ASMCONSTANTS_C_ASSERT(PtrArray__m_Array == offsetof(PtrArray, m_Array));
+
+#define SYSTEM_INFO__dwNumberOfProcessors 0x14
+ASMCONSTANTS_C_ASSERT(SYSTEM_INFO__dwNumberOfProcessors == offsetof(SYSTEM_INFO, dwNumberOfProcessors));
+
+#define TypeHandle_CanCast 0x1 // TypeHandle::CanCast
+
+// Maximum number of characters to be allocated for a string in AllocateStringFast*. Chosen so that we'll
+// never have to check for overflow and will never try to allocate a string on regular heap that should have
+// gone on the large object heap. Additionally the constant has been chosen such that it can be encoded in a
+// single Thumb2 CMP instruction.
+#define MAX_FAST_ALLOCATE_STRING_SIZE 42240
+ASMCONSTANTS_C_ASSERT(MAX_FAST_ALLOCATE_STRING_SIZE < ((LARGE_OBJECT_SIZE - SIZEOF__BaseStringObject) / 2));
+
+
+// Array of objectRef of this Maximum number of elements can be allocated in JIT_NewArr1OBJ_MP*. Chosen so that we'll
+// never have to check for overflow and will never try to allocate the array on regular heap that should have
+// gone on the large object heap. Additionally the constant has been chosen such that it can be encoded in a
+// single Thumb2 CMP instruction.
+#define MAX_FAST_ALLOCATE_ARRAY_OBJECTREF_SIZE 21120
+ASMCONSTANTS_C_ASSERT(MAX_FAST_ALLOCATE_ARRAY_OBJECTREF_SIZE < ((LARGE_OBJECT_SIZE - SIZEOF__ArrayOfObjectRef) / sizeof(void*)));
+
+// Array of valueClass of this Maximum number of characters can be allocated JIT_NewArr1VC_MP*. Chosen so that we'll
+// never have to check for overflow and will never try to allocate the array on regular heap that should have
+// gone on the large object heap. Additionally the constant has been chosen such that it can be encoded in a
+// single Thumb2 CMP instruction.
+#define MAX_FAST_ALLOCATE_ARRAY_VC_SIZE 65280
+ASMCONSTANTS_C_ASSERT(MAX_FAST_ALLOCATE_ARRAY_VC_SIZE < ((4294967296 - 1 - SIZEOF__ArrayOfValueType) / 65536));
+
+#define SIZEOF__GSCookie 0x4
+ASMCONSTANTS_C_ASSERT(SIZEOF__GSCookie == sizeof(GSCookie));
+
+#define SIZEOF__Frame 0x8
+ASMCONSTANTS_C_ASSERT(SIZEOF__Frame == sizeof(Frame));
+
+#define SIZEOF__CONTEXT 0x1a0
+ASMCONSTANTS_C_ASSERT(SIZEOF__CONTEXT == sizeof(T_CONTEXT));
+
+#define SIZEOF__CalleeSavedRegisters 0x24
+ASMCONSTANTS_C_ASSERT(SIZEOF__CalleeSavedRegisters == sizeof(CalleeSavedRegisters))
+
+#define SIZEOF__ArgumentRegisters 0x10
+ASMCONSTANTS_C_ASSERT(SIZEOF__ArgumentRegisters == sizeof(ArgumentRegisters))
+
+#define SIZEOF__FloatArgumentRegisters 0x40
+ASMCONSTANTS_C_ASSERT(SIZEOF__FloatArgumentRegisters == sizeof(FloatArgumentRegisters))
+
+#define UMEntryThunk__m_pUMThunkMarshInfo 0x0C
+ASMCONSTANTS_C_ASSERT(UMEntryThunk__m_pUMThunkMarshInfo == offsetof(UMEntryThunk, m_pUMThunkMarshInfo))
+
+#define UMEntryThunk__m_dwDomainId 0x10
+ASMCONSTANTS_C_ASSERT(UMEntryThunk__m_dwDomainId == offsetof(UMEntryThunk, m_dwDomainId))
+
+#define UMThunkMarshInfo__m_pILStub 0x00
+ASMCONSTANTS_C_ASSERT(UMThunkMarshInfo__m_pILStub == offsetof(UMThunkMarshInfo, m_pILStub))
+
+#define UMThunkMarshInfo__m_cbActualArgSize 0x04
+ASMCONSTANTS_C_ASSERT(UMThunkMarshInfo__m_cbActualArgSize == offsetof(UMThunkMarshInfo, m_cbActualArgSize))
+
+#ifdef FEATURE_REMOTING
+
+#define TransparentProxyObject___stubData 0x8
+ASMCONSTANTS_C_ASSERT(TransparentProxyObject___stubData == offsetof(TransparentProxyObject, _stubData))
+
+#define TransparentProxyObject___stub 0x14
+ASMCONSTANTS_C_ASSERT(TransparentProxyObject___stub == offsetof(TransparentProxyObject, _stub))
+
+#define TransparentProxyObject___pMT 0xc
+ASMCONSTANTS_C_ASSERT(TransparentProxyObject___pMT == offsetof(TransparentProxyObject, _pMT))
+
+#define RemotingPrecode__m_pMethodDesc 0x10
+ASMCONSTANTS_C_ASSERT(RemotingPrecode__m_pMethodDesc == offsetof(RemotingPrecode, m_pMethodDesc))
+
+#define REMOTING_PRECODE_RET_OFFSET 0x06
+
+#endif // FEATURE_REMOTING
+
+#define MethodDesc__m_wFlags DBG_FRE(0x1A, 0x06)
+ASMCONSTANTS_C_ASSERT(MethodDesc__m_wFlags == offsetof(MethodDesc, m_wFlags))
+
+#define MethodDesc__mdcClassification 0x7
+ASMCONSTANTS_C_ASSERT(MethodDesc__mdcClassification == mdcClassification)
+
+#ifdef FEATURE_COMINTEROP
+
+#define MethodDesc__mcComInterop 0x6
+ASMCONSTANTS_C_ASSERT(MethodDesc__mcComInterop == mcComInterop)
+
+#define Stub__m_pCode DBG_FRE(0x10, 0x0c)
+ASMCONSTANTS_C_ASSERT(Stub__m_pCode == sizeof(Stub))
+
+#define SIZEOF__ComMethodFrame 0x24
+ASMCONSTANTS_C_ASSERT(SIZEOF__ComMethodFrame == sizeof(ComMethodFrame))
+
+#define UnmanagedToManagedFrame__m_pvDatum 0x08
+ASMCONSTANTS_C_ASSERT(UnmanagedToManagedFrame__m_pvDatum == offsetof(UnmanagedToManagedFrame, m_pvDatum))
+
+// In ComCallPreStub and GenericComPlusCallStub, we setup R12 to contain address of ComCallMethodDesc after doing the following:
+//
+// mov r12, pc
+//
+// This constant defines where ComCallMethodDesc is post execution of the above instruction.
+#define ComCallMethodDesc_Offset_FromR12 0x8
+
+#endif // FEATURE_COMINTEROP
+
+#ifndef CROSSGEN_COMPILE
+#define Thread__m_alloc_context__alloc_limit 0x44
+ASMCONSTANTS_C_ASSERT(Thread__m_alloc_context__alloc_limit == offsetof(Thread, m_alloc_context) + offsetof(alloc_context, alloc_limit));
+
+#define Thread__m_alloc_context__alloc_ptr 0x40
+ASMCONSTANTS_C_ASSERT(Thread__m_alloc_context__alloc_ptr == offsetof(Thread, m_alloc_context) + offsetof(alloc_context, alloc_ptr));
+#endif // CROSSGEN_COMPILE
+
+#define Thread__m_fPreemptiveGCDisabled 0x08
+#ifndef CROSSGEN_COMPILE
+ASMCONSTANTS_C_ASSERT(Thread__m_fPreemptiveGCDisabled == offsetof(Thread, m_fPreemptiveGCDisabled));
+#endif // CROSSGEN_COMPILE
+#define Thread_m_fPreemptiveGCDisabled Thread__m_fPreemptiveGCDisabled
+
+#define Thread__m_pFrame 0x0C
+#ifndef CROSSGEN_COMPILE
+ASMCONSTANTS_C_ASSERT(Thread__m_pFrame == offsetof(Thread, m_pFrame));
+#endif // CROSSGEN_COMPILE
+#define Thread_m_pFrame Thread__m_pFrame
+
+#ifndef CROSSGEN_COMPILE
+#define Thread__m_pDomain 0x14
+ASMCONSTANTS_C_ASSERT(Thread__m_pDomain == offsetof(Thread, m_pDomain));
+
+#define AppDomain__m_dwId 0x04
+ASMCONSTANTS_C_ASSERT(AppDomain__m_dwId == offsetof(AppDomain, m_dwId));
+
+#define AppDomain__m_sDomainLocalBlock 0x08
+ASMCONSTANTS_C_ASSERT(AppDomain__m_sDomainLocalBlock == offsetof(AppDomain, m_sDomainLocalBlock));
+
+#define DomainLocalBlock__m_pModuleSlots 0x04
+ASMCONSTANTS_C_ASSERT(DomainLocalBlock__m_pModuleSlots == offsetof(DomainLocalBlock, m_pModuleSlots));
+
+#define DomainLocalModule__m_pDataBlob 0x18
+ASMCONSTANTS_C_ASSERT(DomainLocalModule__m_pDataBlob == offsetof(DomainLocalModule, m_pDataBlob));
+
+#define DomainLocalModule__m_pGCStatics 0x10
+ASMCONSTANTS_C_ASSERT(DomainLocalModule__m_pGCStatics == offsetof(DomainLocalModule, m_pGCStatics));
+
+#endif
+
+#define ASM__VTABLE_SLOTS_PER_CHUNK 8
+ASMCONSTANTS_C_ASSERT(ASM__VTABLE_SLOTS_PER_CHUNK == VTABLE_SLOTS_PER_CHUNK)
+
+#define ASM__VTABLE_SLOTS_PER_CHUNK_LOG2 3
+ASMCONSTANTS_C_ASSERT(ASM__VTABLE_SLOTS_PER_CHUNK_LOG2 == VTABLE_SLOTS_PER_CHUNK_LOG2)
+
+#define VASigCookie__pNDirectILStub 0x4
+ASMCONSTANTS_C_ASSERT(VASigCookie__pNDirectILStub == offsetof(VASigCookie, pNDirectILStub))
+
+#define CONTEXT_Pc 0x040
+ASMCONSTANTS_C_ASSERT(CONTEXT_Pc == offsetof(T_CONTEXT,Pc))
+
+#define TLS_GETTER_MAX_SIZE_ASM 0x10
+ASMCONSTANTS_C_ASSERT(TLS_GETTER_MAX_SIZE_ASM == TLS_GETTER_MAX_SIZE)
+
+#define CallDescrData__pSrc 0x00
+#define CallDescrData__numStackSlots 0x04
+#define CallDescrData__pArgumentRegisters 0x08
+#define CallDescrData__pFloatArgumentRegisters 0x0C
+#define CallDescrData__fpReturnSize 0x10
+#define CallDescrData__pTarget 0x14
+#define CallDescrData__returnValue 0x18
+
+ASMCONSTANTS_C_ASSERT(CallDescrData__pSrc == offsetof(CallDescrData, pSrc))
+ASMCONSTANTS_C_ASSERT(CallDescrData__numStackSlots == offsetof(CallDescrData, numStackSlots))
+ASMCONSTANTS_C_ASSERT(CallDescrData__pArgumentRegisters == offsetof(CallDescrData, pArgumentRegisters))
+ASMCONSTANTS_C_ASSERT(CallDescrData__pFloatArgumentRegisters == offsetof(CallDescrData, pFloatArgumentRegisters))
+ASMCONSTANTS_C_ASSERT(CallDescrData__fpReturnSize == offsetof(CallDescrData, fpReturnSize))
+ASMCONSTANTS_C_ASSERT(CallDescrData__pTarget == offsetof(CallDescrData, pTarget))
+ASMCONSTANTS_C_ASSERT(CallDescrData__returnValue == offsetof(CallDescrData, returnValue))
+
+#define SIZEOF__FaultingExceptionFrame SIZEOF__Frame + 0x8 + SIZEOF__CONTEXT
+#define FaultingExceptionFrame__m_fFilterExecuted SIZEOF__Frame
+ASMCONSTANTS_C_ASSERT(SIZEOF__FaultingExceptionFrame == sizeof(FaultingExceptionFrame));
+ASMCONSTANTS_C_ASSERT(FaultingExceptionFrame__m_fFilterExecuted == offsetof(FaultingExceptionFrame, m_fFilterExecuted));
+
+#undef ASMCONSTANTS_RUNTIME_ASSERT
+#undef ASMCONSTANTS_C_ASSERT
diff --git a/src/vm/arm/asmhelpers.asm b/src/vm/arm/asmhelpers.asm
new file mode 100644
index 0000000000..82c38088d4
--- /dev/null
+++ b/src/vm/arm/asmhelpers.asm
@@ -0,0 +1,2756 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+;; ==++==
+;;
+
+;;
+;; ==--==
+#include "ksarm.h"
+
+#include "asmconstants.h"
+
+#include "asmmacros.h"
+
+ SETALIAS CTPMethodTable__s_pThunkTable, ?s_pThunkTable@CTPMethodTable@@0PAVMethodTable@@A
+ SETALIAS g_pObjectClass, ?g_pObjectClass@@3PAVMethodTable@@A
+
+ IMPORT GetThread
+ IMPORT JIT_InternalThrow
+ IMPORT JIT_WriteBarrier
+ IMPORT TheUMEntryPrestubWorker
+ IMPORT CreateThreadBlockThrow
+ IMPORT UMThunkStubRareDisableWorker
+ IMPORT UM2MDoADCallBack
+ IMPORT PreStubWorker
+ IMPORT NDirectImportWorker
+ IMPORT ObjIsInstanceOfNoGC
+ IMPORT ArrayStoreCheck
+ IMPORT VSD_ResolveWorker
+ IMPORT $g_pObjectClass
+
+#ifdef WRITE_BARRIER_CHECK
+ SETALIAS g_GCShadow, ?g_GCShadow@@3PAEA
+ SETALIAS g_GCShadowEnd, ?g_GCShadowEnd@@3PAEA
+
+ IMPORT g_lowest_address
+ IMPORT $g_GCShadow
+ IMPORT $g_GCShadowEnd
+#endif // WRITE_BARRIER_CHECK
+
+
+#ifdef FEATURE_REMOTING
+ IMPORT $CTPMethodTable__s_pThunkTable
+ IMPORT VSD_GetTargetForTPWorker
+ IMPORT VSD_GetTargetForTPWorkerQuick
+ IMPORT TransparentProxyStubWorker
+#endif
+#ifdef FEATURE_COMINTEROP
+ IMPORT CLRToCOMWorker
+ IMPORT ComPreStubWorker
+ IMPORT COMToCLRWorker
+#endif
+ IMPORT CallDescrWorkerUnwindFrameChainHandler
+ IMPORT UMEntryPrestubUnwindFrameChainHandler
+ IMPORT UMThunkStubUnwindFrameChainHandler
+#ifdef FEATURE_COMINTEROP
+ IMPORT ReverseComUnwindFrameChainHandler
+#endif
+
+#ifdef FEATURE_HIJACK
+ IMPORT OnHijackObjectWorker
+ IMPORT OnHijackInteriorPointerWorker
+ IMPORT OnHijackScalarWorker
+#endif ;FEATURE_HIJACK
+
+ IMPORT GetCurrentSavedRedirectContext
+
+#ifdef FEATURE_MIXEDMODE
+ SETALIAS IJWNOADThunk__FindThunkTarget, ?FindThunkTarget@IJWNOADThunk@@QAAPBXXZ
+ IMPORT $IJWNOADThunk__FindThunkTarget
+#endif
+
+ ;; Imports to support virtual import fixup for ngen images
+ IMPORT VirtualMethodFixupWorker
+ ;; Import to support cross-moodule external method invocation in ngen images
+ IMPORT ExternalMethodFixupWorker
+ IMPORT StubDispatchFixupWorker
+
+#ifdef FEATURE_READYTORUN
+ IMPORT DynamicHelperWorker
+#endif
+
+ IMPORT JIT_RareDisableHelperWorker
+ IMPORT DoJITFailFast
+ IMPORT s_gsCookie
+ IMPORT g_TrapReturningThreads
+
+ ;; Imports for singleDomain statics helpers
+ IMPORT JIT_GetSharedNonGCStaticBase_Helper
+ IMPORT JIT_GetSharedGCStaticBase_Helper
+
+ TEXTAREA
+
+;; LPVOID __stdcall GetCurrentIP(void);
+ LEAF_ENTRY GetCurrentIP
+ mov r0, lr
+ bx lr
+ LEAF_END
+
+;; LPVOID __stdcall GetCurrentSP(void);
+ LEAF_ENTRY GetCurrentSP
+ mov r0, sp
+ bx lr
+ LEAF_END
+
+;;-----------------------------------------------------------------------------
+;; This helper routine enregisters the appropriate arguments and makes the
+;; actual call.
+;;-----------------------------------------------------------------------------
+;;void CallDescrWorkerInternal(CallDescrData * pCallDescrData);
+ NESTED_ENTRY CallDescrWorkerInternal,,CallDescrWorkerUnwindFrameChainHandler
+ PROLOG_PUSH {r4,r5,r7,lr}
+ PROLOG_STACK_SAVE r7
+
+ mov r5,r0 ; save pCallDescrData in r5
+
+ ldr r1, [r5,#CallDescrData__numStackSlots]
+ cbz r1, Ldonestack
+
+ ;; Add frame padding to ensure frame size is a multiple of 8 (a requirement of the OS ABI).
+ ;; We push four registers (above) and numStackSlots arguments (below). If this comes to an odd number
+ ;; of slots we must pad with another. This simplifies to "if the low bit of numStackSlots is set,
+ ;; extend the stack another four bytes".
+ lsls r2, r1, #2
+ and r3, r2, #4
+ sub sp, sp, r3
+
+ ;; This loop copies numStackSlots words
+ ;; from [pSrcEnd-4,pSrcEnd-8,...] to [sp-4,sp-8,...]
+ ldr r0, [r5,#CallDescrData__pSrc]
+ add r0,r0,r2
+Lstackloop
+ ldr r2, [r0,#-4]!
+ str r2, [sp,#-4]!
+ subs r1, r1, #1
+ bne Lstackloop
+Ldonestack
+
+ ;; If FP arguments are supplied in registers (r3 != NULL) then initialize all of them from the pointer
+ ;; given in r3. Do not use "it" since it faults in floating point even when the instruction is not executed.
+ ldr r3, [r5,#CallDescrData__pFloatArgumentRegisters]
+ cbz r3, LNoFloatingPoint
+ vldm r3, {s0-s15}
+LNoFloatingPoint
+
+ ;; Copy [pArgumentRegisters, ..., pArgumentRegisters + 12]
+ ;; into r0, ..., r3
+
+ ldr r4, [r5,#CallDescrData__pArgumentRegisters]
+ ldm r4, {r0-r3}
+
+ CHECK_STACK_ALIGNMENT
+
+ ;; call pTarget
+ ;; Note that remoting expect target in r4.
+ ldr r4, [r5,#CallDescrData__pTarget]
+ blx r4
+
+ ldr r3, [r5,#CallDescrData__fpReturnSize]
+
+ ;; Save FP return value if appropriate
+ cbz r3, LFloatingPointReturnDone
+
+ ;; Float return case
+ ;; Do not use "it" since it faults in floating point even when the instruction is not executed.
+ cmp r3, #4
+ bne LNoFloatReturn
+ vmov r0, s0
+ b LFloatingPointReturnDone
+LNoFloatReturn
+
+ ;; Double return case
+ ;; Do not use "it" since it faults in floating point even when the instruction is not executed.
+ cmp r3, #8
+ bne LNoDoubleReturn
+ vmov r0, r1, s0, s1
+ b LFloatingPointReturnDone
+LNoDoubleReturn
+
+ add r2, r5, #CallDescrData__returnValue
+
+ cmp r3, #16
+ bne LNoFloatHFAReturn
+ vstm r2, {s0-s3}
+ b LReturnDone
+LNoFloatHFAReturn
+
+ cmp r3, #32
+ bne LNoDoubleHFAReturn
+ vstm r2, {d0-d3}
+ b LReturnDone
+LNoDoubleHFAReturn
+
+ EMIT_BREAKPOINT ; Unreachable
+
+LFloatingPointReturnDone
+
+ ;; Save return value into retbuf
+ str r0, [r5, #(CallDescrData__returnValue + 0)]
+ str r1, [r5, #(CallDescrData__returnValue + 4)]
+
+LReturnDone
+
+#ifdef _DEBUG
+ ;; trash the floating point registers to ensure that the HFA return values
+ ;; won't survive by accident
+ vldm sp, {d0-d3}
+#endif
+
+ EPILOG_STACK_RESTORE r7
+ EPILOG_POP {r4,r5,r7,pc}
+
+ NESTED_END
+
+
+;;-----------------------------------------------------------------------------
+;; This helper routine is where returns for irregular tail calls end up
+:: so they can dynamically pop their stack arguments.
+;;-----------------------------------------------------------------------------
+;
+; Stack Layout (stack grows up, 0 at the top, offsets relative to frame pointer, r7):
+;
+; sp -> callee stack arguments
+; :
+; :
+; -0Ch gsCookie
+; TailCallHelperFrame ->
+; -08h __VFN_table
+; -04h m_Next
+; r7 ->
+; +00h m_calleeSavedRgisters.r4
+; +04h .r5
+; +08h .r6
+; +0Ch .r7
+; +10h .r8
+; +14h .r9
+; +18h .r10
+; r11->
+; +1Ch .r11
+; +20h .r14 -or- m_ReturnAddress
+;
+; r6 -> GetThread()
+; r5 -> r6->m_pFrame (old Frame chain head)
+; r11 is used to preserve the ETW call stack
+
+ NESTED_ENTRY TailCallHelperStub
+ ;
+ ; This prolog is never executed, but we keep it here for reference
+ ; and for the unwind data it generates
+ ;
+
+ ; Spill callee saved registers and return address.
+ PROLOG_PUSH {r4-r11,lr}
+
+ PROLOG_STACK_SAVE r7
+
+ ;
+ ; This is the code that would have to run to setup this frame
+ ; like the C++ helper does before calling RtlRestoreContext
+ ;
+ ; Allocate space for the rest of the frame and GSCookie.
+ ; PROLOG_STACK_ALLOC 0x0C
+ ;
+ ; Set r11 for frame chain
+ ;add r11, r7, 0x1C
+ ;
+ ; Set the vtable for TailCallFrame
+ ;bl TCF_GETMETHODFRAMEVPTR
+ ;str r0, [r7, #-8]
+ ;
+ ; Initialize the GSCookie within the Frame
+ ;ldr r0, =s_gsCookie
+ ;str r0, [r7, #-0x0C]
+ ;
+ ; Link the TailCallFrameinto the Frame chain
+ ; and initialize r5 & r6 for unlinking later
+ ;CALL_GETTHREAD
+ ;mov r6, r0
+ ;ldr r5, [r6, #Thread__m_pFrame]
+ ;str r5, [r7, #-4]
+ ;sub r0, r7, 8
+ ;str r0, [r6, #Thread__m_pFrame]
+ ;
+ ; None of the previous stuff is ever executed,
+ ; but we keep it here for reference
+ ;
+
+ ;
+ ; Here's the pretend call (make it real so the unwinder
+ ; doesn't think we're in the prolog)
+ ;
+ bl TailCallHelperStub
+ ;
+ ; with the real return address pointing to this real epilog
+ ;
+JIT_TailCallHelperStub_ReturnAddress
+ EXPORT JIT_TailCallHelperStub_ReturnAddress
+
+ ;
+ ; Our epilog (which also unlinks the StubHelperFrame)
+ ; Be careful not to trash the return registers
+ ;
+
+#ifdef _DEBUG
+ ldr r3, =s_gsCookie
+ ldr r3, [r3]
+ ldr r2, [r7, #-0x0C]
+ cmp r2, r3
+ beq GoodGSCookie
+ bl DoJITFailFast
+GoodGSCookie
+#endif ; _DEBUG
+
+ ;
+ ; unlink the TailCallFrame
+ ;
+ str r5, [r6, #Thread__m_pFrame]
+
+ ;
+ ; epilog
+ ;
+ EPILOG_STACK_RESTORE r7
+ EPILOG_POP {r4-r11,lr}
+ EPILOG_RETURN
+
+ NESTED_END
+
+; ------------------------------------------------------------------
+
+; void LazyMachStateCaptureState(struct LazyMachState *pState);
+ LEAF_ENTRY LazyMachStateCaptureState
+
+ ;; marks that this is not yet valid
+ mov r1, #0
+ str r1, [r0, #MachState__isValid]
+
+ str lr, [r0, #LazyMachState_captureIp]
+ str sp, [r0, #LazyMachState_captureSp]
+
+ add r1, r0, #LazyMachState_captureR4_R11
+ stm r1, {r4-r11}
+
+ mov pc, lr
+
+ LEAF_END
+
+; void SinglecastDelegateInvokeStub(Delegate *pThis)
+ LEAF_ENTRY SinglecastDelegateInvokeStub
+ cmp r0, #0
+ beq LNullThis
+
+ ldr r12, [r0, #DelegateObject___methodPtr]
+ ldr r0, [r0, #DelegateObject___target]
+
+ bx r12
+
+LNullThis
+ mov r0, #CORINFO_NullReferenceException_ASM
+ b JIT_InternalThrow
+
+ LEAF_END
+
+;
+; r12 = UMEntryThunk*
+;
+ NESTED_ENTRY TheUMEntryPrestub,,UMEntryPrestubUnwindFrameChainHandler
+
+ PROLOG_PUSH {r0-r4,lr}
+ PROLOG_VPUSH {d0-d7}
+
+ CHECK_STACK_ALIGNMENT
+
+ mov r0, r12
+ bl TheUMEntryPrestubWorker
+
+ ; Record real target address in r12.
+ mov r12, r0
+
+ ; Epilog
+ EPILOG_VPOP {d0-d7}
+ EPILOG_POP {r0-r4,lr}
+ EPILOG_BRANCH_REG r12
+
+ NESTED_END
+
+;
+; r12 = UMEntryThunk*
+;
+ NESTED_ENTRY UMThunkStub,,UMThunkStubUnwindFrameChainHandler
+ PROLOG_PUSH {r4,r5,r7,r11,lr}
+ PROLOG_PUSH {r0-r3,r12}
+ PROLOG_STACK_SAVE r7
+
+ GBLA UMThunkStub_HiddenArg ; offset of saved UMEntryThunk *
+ GBLA UMThunkStub_StackArgs ; offset of original stack args (total size of UMThunkStub frame)
+UMThunkStub_HiddenArg SETA 4*4
+UMThunkStub_StackArgs SETA 10*4
+
+ CHECK_STACK_ALIGNMENT
+
+ bl GetThread
+ cbz r0, UMThunkStub_DoThreadSetup
+
+UMThunkStub_HaveThread
+ mov r5, r0 ; r5 = Thread *
+
+ ldr r2, =g_TrapReturningThreads
+
+ mov r4, 1
+ str r4, [r5, #Thread__m_fPreemptiveGCDisabled]
+
+ ldr r3, [r2]
+ cbnz r3, UMThunkStub_DoTrapReturningThreads
+
+UMThunkStub_InCooperativeMode
+ ldr r12, [r7, #UMThunkStub_HiddenArg]
+
+ ldr r0, [r5, #Thread__m_pDomain]
+ ldr r1, [r12, #UMEntryThunk__m_dwDomainId]
+ ldr r0, [r0, #AppDomain__m_dwId]
+ ldr r3, [r12, #UMEntryThunk__m_pUMThunkMarshInfo]
+ cmp r0, r1
+ bne UMThunkStub_WrongAppDomain
+
+ ldr r2, [r3, #UMThunkMarshInfo__m_cbActualArgSize]
+ cbz r2, UMThunkStub_ArgumentsSetup
+
+ add r0, r7, #UMThunkStub_StackArgs ; Source pointer
+ add r0, r0, r2
+ lsr r1, r2, #2 ; Count of stack slots to copy
+
+ and r2, r2, #4 ; Align the stack
+ sub sp, sp, r2
+
+UMThunkStub_StackLoop
+ ldr r2, [r0,#-4]!
+ str r2, [sp,#-4]!
+ subs r1, r1, #1
+ bne UMThunkStub_StackLoop
+
+UMThunkStub_ArgumentsSetup
+ ldr r4, [r3, #UMThunkMarshInfo__m_pILStub]
+
+ ; reload argument registers
+ ldm r7, {r0-r3}
+
+ CHECK_STACK_ALIGNMENT
+
+ blx r4
+
+UMThunkStub_PostCall
+ mov r4, 0
+ str r4, [r5, #Thread__m_fPreemptiveGCDisabled]
+
+ EPILOG_STACK_RESTORE r7
+ EPILOG_STACK_FREE 4 * 5
+ EPILOG_POP {r4,r5,r7,r11,pc}
+
+UMThunkStub_DoThreadSetup
+ sub sp, #SIZEOF__FloatArgumentRegisters
+ vstm sp, {d0-d7}
+ bl CreateThreadBlockThrow
+ vldm sp, {d0-d7}
+ add sp, #SIZEOF__FloatArgumentRegisters
+ b UMThunkStub_HaveThread
+
+UMThunkStub_DoTrapReturningThreads
+ sub sp, #SIZEOF__FloatArgumentRegisters
+ vstm sp, {d0-d7}
+ mov r0, r5 ; Thread* pThread
+ ldr r1, [r7, #UMThunkStub_HiddenArg] ; UMEntryThunk* pUMEntry
+ bl UMThunkStubRareDisableWorker
+ vldm sp, {d0-d7}
+ add sp, #SIZEOF__FloatArgumentRegisters
+ b UMThunkStub_InCooperativeMode
+
+UMThunkStub_WrongAppDomain
+ sub sp, #SIZEOF__FloatArgumentRegisters
+ vstm sp, {d0-d7}
+
+ ldr r0, [r7, #UMThunkStub_HiddenArg] ; UMEntryThunk* pUMEntry
+ mov r2, r7 ; void * pArgs
+ ; remaining arguments are unused
+ bl UM2MDoADCallBack
+
+ ; Restore non-FP return value.
+ ldr r0, [r7, #0]
+ ldr r1, [r7, #4]
+
+ ; Restore FP return value or HFA.
+ vldm sp, {d0-d3}
+ b UMThunkStub_PostCall
+
+ NESTED_END
+
+; UM2MThunk_WrapperHelper(void *pThunkArgs, // r0
+; int cbStackArgs, // r1 (unused)
+; void *pAddr, // r2 (unused)
+; UMEntryThunk *pEntryThunk, // r3
+; Thread *pThread) // [sp, #0]
+
+ NESTED_ENTRY UM2MThunk_WrapperHelper
+
+ PROLOG_PUSH {r4-r7,r11,lr}
+ PROLOG_STACK_SAVE r7
+
+ CHECK_STACK_ALIGNMENT
+
+ mov r12, r3 // r12 = UMEntryThunk *
+
+ ;
+ ; Note that layout of the arguments is given by UMThunkStub frame
+ ;
+ mov r5, r0 // r5 = pArgs
+
+ ldr r3, [r12, #UMEntryThunk__m_pUMThunkMarshInfo]
+
+ ldr r2, [r3, #UMThunkMarshInfo__m_cbActualArgSize]
+ cbz r2, UM2MThunk_WrapperHelper_ArgumentsSetup
+
+ add r0, r5, #UMThunkStub_StackArgs ; Source pointer
+ add r0, r0, r2
+ lsr r1, r2, #2 ; Count of stack slots to copy
+
+ and r2, r2, #4 ; Align the stack
+ sub sp, sp, r2
+
+UM2MThunk_WrapperHelper_StackLoop
+ ldr r2, [r0,#-4]!
+ str r2, [sp,#-4]!
+ subs r1, r1, #1
+ bne UM2MThunk_WrapperHelper_StackLoop
+
+UM2MThunk_WrapperHelper_ArgumentsSetup
+ ldr r4, [r3, #UMThunkMarshInfo__m_pILStub]
+
+ ; reload floating point registers
+ sub r6, r5, #SIZEOF__FloatArgumentRegisters
+ vldm r6, {d0-d7}
+
+ ; reload argument registers
+ ldm r5, {r0-r3}
+
+ CHECK_STACK_ALIGNMENT
+
+ blx r4
+
+ ; Save non-floating point return
+ str r0, [r5, #0]
+ str r1, [r5, #4]
+
+ ; Save FP return value or HFA.
+ vstm r6, {d0-d3}
+
+#ifdef _DEBUG
+ ;; trash the floating point registers to ensure that the HFA return values
+ ;; won't survive by accident
+ vldm sp, {d0-d3}
+#endif
+
+ EPILOG_STACK_RESTORE r7
+ EPILOG_POP {r4-r7,r11,pc}
+
+ NESTED_END
+
+; ------------------------------------------------------------------
+;
+; IJWNOADThunk::MakeCall
+;
+; On entry:
+; r12 : IJWNOADThunk *
+;
+; On exit:
+; Tail calls to real managed target
+;
+
+#ifdef FEATURE_MIXEDMODE
+ NESTED_ENTRY IJWNOADThunk__MakeCall
+
+ ; Can't pass C++ mangled names to NESTED_ENTRY and my attempts to use EQU to define an alternate name
+ ; for a symbol didn't work. Just define a label for the decorated name of the method and export it
+ ; manually.
+|?MakeCall@IJWNOADThunk@@KAXXZ|
+ EXPORT |?MakeCall@IJWNOADThunk@@KAXXZ|
+
+ PROLOG_PUSH {r0-r4,lr}
+ PROLOG_VPUSH {d0-d7}
+
+ CHECK_STACK_ALIGNMENT
+
+ mov r0, r12 ; IJWNOADThunk * is this pointer for IJWNOADThunk::FindThunkTarget
+ bl $IJWNOADThunk__FindThunkTarget
+ mov r12, r0 ; Returns real jump target in r0, save this in r12
+
+ EPILOG_VPOP {d0-d7}
+ EPILOG_POP {r0-r4,lr}
+ EPILOG_BRANCH_REG r12
+
+ NESTED_END
+#endif
+
+; ------------------------------------------------------------------
+
+ NESTED_ENTRY ThePreStub
+
+ PROLOG_WITH_TRANSITION_BLOCK
+
+ add r0, sp, #__PWTB_TransitionBlock ; pTransitionBlock
+ mov r1, r12 ; pMethodDesc
+
+ bl PreStubWorker
+
+ mov r12, r0
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+ EPILOG_BRANCH_REG r12
+
+ NESTED_END
+
+; ------------------------------------------------------------------
+; This method does nothing. It's just a fixed function for the debugger to put a breakpoint on.
+ LEAF_ENTRY ThePreStubPatch
+ nop
+ThePreStubPatchLabel
+ EXPORT ThePreStubPatchLabel
+ bx lr
+ LEAF_END
+
+; ------------------------------------------------------------------
+; The call in ndirect import precode points to this function.
+ NESTED_ENTRY NDirectImportThunk
+
+ PROLOG_PUSH {r0-r4,lr} ; Spill general argument registers, return address and
+ ; arbitrary register to keep stack aligned
+ PROLOG_VPUSH {d0-d7} ; Spill floating point argument registers
+
+ CHECK_STACK_ALIGNMENT
+
+ mov r0, r12
+ bl NDirectImportWorker
+ mov r12, r0
+
+ EPILOG_VPOP {d0-d7}
+ EPILOG_POP {r0-r4,lr}
+
+ ; If we got back from NDirectImportWorker, the MD has been successfully
+ ; linked. Proceed to execute the original DLL call.
+ EPILOG_BRANCH_REG r12
+
+ NESTED_END
+
+; ------------------------------------------------------------------
+; The call in fixup precode initally points to this function.
+; The pupose of this function is to load the MethodDesc and forward the call the prestub.
+ NESTED_ENTRY PrecodeFixupThunk
+
+ ; r12 = FixupPrecode *
+
+ PROLOG_PUSH {r0-r1}
+
+ ; Inline computation done by FixupPrecode::GetMethodDesc()
+ ldrb r0, [r12, #3] ; m_PrecodeChunkIndex
+ ldrb r1, [r12, #2] ; m_MethodDescChunkIndex
+
+ add r12,r12,r0,lsl #3
+ add r0,r12,r0,lsl #2
+ ldr r0, [r0,#8]
+ add r12,r0,r1,lsl #2
+
+ EPILOG_POP {r0-r1}
+ EPILOG_BRANCH ThePreStub
+
+ NESTED_END
+
+; ------------------------------------------------------------------
+; void ResolveWorkerAsmStub(r0, r1, r2, r3, r4:IndirectionCellAndFlags, r12:DispatchToken)
+;
+; The stub dispatch thunk which transfers control to VSD_ResolveWorker.
+ NESTED_ENTRY ResolveWorkerAsmStub
+
+ PROLOG_WITH_TRANSITION_BLOCK
+
+ add r0, sp, #__PWTB_TransitionBlock ; pTransitionBlock
+ mov r2, r12 ; token
+
+ ; indirection cell in r4 - should be consistent with REG_ARM_STUB_SPECIAL
+ bic r1, r4, #3 ; indirection cell
+ and r3, r4, #3 ; flags
+
+ bl VSD_ResolveWorker
+
+ mov r12, r0
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+ EPILOG_BRANCH_REG r12
+
+ NESTED_END
+
+; ------------------------------------------------------------------
+; void ResolveWorkerChainLookupAsmStub(r0, r1, r2, r3, r4:IndirectionCellAndFlags, r12:DispatchToken)
+ NESTED_ENTRY ResolveWorkerChainLookupAsmStub
+
+ ; ARMSTUB TODO: implement chained lookup
+ b ResolveWorkerAsmStub
+
+ NESTED_END
+
+#if defined(FEATURE_REMOTING) || defined(FEATURE_COMINTEROP)
+
+; ------------------------------------------------------------------
+; setStubReturnValue
+; r0 - size of floating point return value (MetaSig::GetFPReturnSize())
+; r1 - pointer to the return buffer in the stub frame
+ LEAF_ENTRY setStubReturnValue
+
+ cbz r0, NoFloatingPointRetVal
+
+ ;; Float return case
+ ;; Do not use "it" since it faults in floating point even when the instruction is not executed.
+ cmp r0, #4
+ bne LNoFloatRetVal
+ vldr s0, [r1]
+ bx lr
+LNoFloatRetVal
+
+ ;; Double return case
+ ;; Do not use "it" since it faults in floating point even when the instruction is not executed.
+ cmp r0, #8
+ bne LNoDoubleRetVal
+ vldr d0, [r1]
+ bx lr
+LNoDoubleRetVal
+
+ cmp r0, #16
+ bne LNoFloatHFARetVal
+ vldm r1, {s0-s3}
+ bx lr
+LNoFloatHFARetVal
+
+ cmp r0, #32
+ bne LNoDoubleHFARetVal
+ vldm r1, {d0-d3}
+ bx lr
+LNoDoubleHFARetVal
+
+ EMIT_BREAKPOINT ; Unreachable
+
+NoFloatingPointRetVal
+
+ ;; Restore the return value from retbuf
+ ldr r0, [r1]
+ ldr r1, [r1, #4]
+ bx lr
+
+ LEAF_END
+
+#endif // FEATURE_REMOTING || FEATURE_COMINTEROP
+
+#ifdef FEATURE_REMOTING
+
+; ------------------------------------------------------------------
+; Remoting stub used to dispatch a method invocation. This is the choke point for all remoting calls; all
+; scenarios where we determine we're not a local or a COM call, regardless of whether the dispatch is
+; interface, virtual or direct will wind up here sooner or later.
+;
+; On entry:
+; r0 : transparent proxy
+; r12 : target MethodDesc or slot number
+; plus user arguments in registers and on the stack
+;
+ NESTED_ENTRY TransparentProxyStub_CrossContext
+
+ PROLOG_WITH_TRANSITION_BLOCK 0x20
+
+ add r0, sp, #__PWTB_TransitionBlock ; pTransitionBlock
+ mov r1, r12 ; pMethodDesc
+
+ bl TransparentProxyStubWorker
+
+ ; r0 = fpRetSize
+
+ ; return value is stored before float argument registers
+ add r1, sp, #(__PWTB_FloatArgumentRegisters - 0x20)
+ bl setStubReturnValue
+
+ EPILOG_WITH_TRANSITION_BLOCK_RETURN
+
+ NESTED_END
+
+; ------------------------------------------------------------------
+; This method does nothing. It's just a fixed function for the debugger to put a breakpoint on.
+ LEAF_ENTRY TransparentProxyStubPatch
+ add r0, r1, r2
+TransparentProxyStubPatchLabel
+ EXPORT TransparentProxyStubPatchLabel
+ bx lr
+ LEAF_END
+
+; ------------------------------------------------------------------
+; VSD helper for performing an in-context interface dispatch on a TransparentProxy. This only happens for
+; ContextBoundObjects that are invoked in the correct context, never for general remoting.
+;
+; On entry:
+; r0 : transparent proxy
+; r12 : interface MethodDesc
+; plus user arguments in registers and on the stack
+;
+; On exit:
+; Tail calls to actual target which returns as normal to the caller.
+;
+ NESTED_ENTRY InContextTPQuickDispatchAsmStub
+
+ ; Spill caller's volatile argument registers and some other state we wish to preserve.
+ PROLOG_PUSH {r0-r3,r12,lr}
+ PROLOG_VPUSH {d0-d7}
+
+ CHECK_STACK_ALIGNMENT
+
+ ; Set up arguments for VSD_GetTargetForTPWorkerQuick
+ ; mov r0, r0 ; this
+ mov r1, r12 ; Interface MethodDesc
+
+ bl VSD_GetTargetForTPWorkerQuick
+
+ ; If we didn't find a target head for the slow path.
+ cbz r0, CacheMiss
+
+ ; Save target address since we're about to restore the value of r0. Can't place it directly into r12
+ ; since that's about to be restored as well. Instead we overwrite the saved version of r12 on the
+ ; stack (we don't need it any more since the lookup succeeded).
+ str r0, [sp, #((16 * 4) + (4 * 4))]
+
+ ; Restore caller's argument registers.
+ EPILOG_VPOP {d0-d7}
+ EPILOG_POP {r0-r3,r12,lr}
+
+ ; Tail call to the real code using the previously computed target address.
+ EPILOG_BRANCH_REG r12
+
+CacheMiss
+ ; Restore caller's argument registers.
+ EPILOG_VPOP {d0-d7}
+ EPILOG_POP {r0-r3,r12,lr}
+
+ EPILOG_BRANCH InContextTPDispatchAsmStub
+
+ NESTED_END
+
+; ------------------------------------------------------------------
+
+ NESTED_ENTRY InContextTPDispatchAsmStub
+
+ PROLOG_WITH_TRANSITION_BLOCK
+
+ add r0, sp, #__PWTB_TransitionBlock ; pTransitionBlock
+ mov r1, r12 ; pMethodDesc / token
+
+ bl VSD_GetTargetForTPWorker
+
+ mov r12, r0
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+ EPILOG_BRANCH_REG r12
+
+ NESTED_END
+
+; ------------------------------------------------------------------
+; Macro used to compare a MethodTable with that of __TransparentProxy. Sets the Z condition flag to indicate
+; the result (Z=1 for a match, Z=0 for a mismatch).
+;
+ MACRO
+ TP_TYPE_CHECK $methodTableReg, $scratchReg
+
+ ldr $scratchReg, =$CTPMethodTable__s_pThunkTable
+ ldr $scratchReg, [$scratchReg]
+ cmp $scratchReg, $methodTableReg
+ MEND
+
+; ------------------------------------------------------------------
+; Macro used to perform a context check.
+;
+; Calls a user customizable routine that determines whether the current execution context warrants a context
+; transition for the call. Regular remoting (as opposed to context transitioning based on ContextBoundObjects)
+; always returns a context-mismatch from this call.
+;
+; On entry:
+; r0 : this (TranparentProxy object)
+;
+; On exit:
+; r0 : check result (0 == contexts match, non-zero == contexts mismatch)
+; r1-r3,r12,lr: trashed
+;
+ MACRO
+ TP_CONTEXT_CHECK
+
+ ldr r1, [r0, #TransparentProxyObject___stub]
+ ldr r0, [r0, #TransparentProxyObject___stubData]
+ blx r1
+ MEND
+
+; ------------------------------------------------------------------
+; Used by the remoting precode for non-virtual dispatch to instance methods which might be remoted. Performs a
+; context and transparent proxy check and if both of these are negative (or the call has been made on a null
+; 'this') we simply return and the precode will dispatch the call locally as normal. Otherwise we redirect to
+; the remoting system and never return.
+;
+; On entry:
+; r0 : this (may or may not be a TransparentProxy)
+; r1 : trashed
+; lr : return address into RemotingPrecode (RemotingPrecode* + REMOTING_PRECODE_RET_OFFSET)
+; [sp, #0] : caller's saved r1
+; [sp, #4] : caller's saved lr (i.e. return address into caller of RemotingPrecode)
+; plus user arguments in registers and on the stack
+;
+ LEAF_ENTRY PrecodeRemotingThunk
+
+ ; Send null 'this' case to local dispatch case (else we'd need to handle an A/V from this stub).
+ cbz r0, LocalDispatch ; predicted not taken
+
+ ; Load MethodTable* in r12.
+ ldr r12, [r0]
+
+ ; Compare MethodTable in 'this' with that of __TransparentProxy; if they're not equal we dispatch
+ ; locally.
+ TP_TYPE_CHECK r12, r1 ; r1 is a scratch register
+ beq TransparentProxyDispatch ; predicted not taken
+
+LocalDispatch
+ ; Recover target MethodDesc pointer from the RemotingPrecode (we have the address of this +
+ ; REMOTING_PRECODE_RET_OFFSET in lr). Subtract extra 1 to account for the low-bit being set in LR to
+ ; indicate thumb mode.
+ ; We do this here because even the local case needs r12 initialized.
+ ldr r12, [lr, #(RemotingPrecode__m_pMethodDesc - REMOTING_PRECODE_RET_OFFSET - 1)]
+
+ bx lr
+
+ LEAF_END
+
+; ------------------------------------------------------------------
+; Handles the atypical path for the remoting precode above (typically the non-local dispatch cases). The
+; regular entry point defined by NESTED_ENTRY below is never called directly; it serves only to generate
+; prolog unwind data matching the pushes of the caller's r1 and lr done in the remoting precode so we can
+; unwind out of this frame. The real entry point is TransparentProxyDispatch called directly from
+; PrecodeRemotingThunk.
+;
+ NESTED_ENTRY TransparentProxyDispatch_FakeProlog
+
+ ; Match what the remoting precode has pushed.
+ PROLOG_PUSH {r1,lr}
+
+ ; This is where execution really starts.
+TransparentProxyDispatch
+
+ ; We need some temporary registers and to preserve lr.
+ PROLOG_PUSH {r0,r2-r5,lr}
+
+ CHECK_STACK_ALIGNMENT
+
+ ; Recover target MethodDesc pointer from the RemotingPrecode (we have the address of this +
+ ; REMOTING_PRECODE_RET_OFFSET in lr). Subtract extra 1 to account for the low-bit being set in LR to
+ ; indicate thumb mode. Stash the result in a non-volatile register to preserve it over the call to
+ ; TP_CONTEXT_CHECK below.
+ ldr r4, [lr, #(RemotingPrecode__m_pMethodDesc - REMOTING_PRECODE_RET_OFFSET - 1)]
+
+ ; Check whether the TP is already in the correct context. This can happen for ContextBoundObjects
+ ; only. The following macro will trash volatile registers and lr and return the result in r0 (0 ==
+ ; context match, non-zero for everything else). All other registers are preserved.
+ TP_CONTEXT_CHECK
+
+ ; Place MethodDesc* in r12 ready for wherever we dispatch to next.
+ mov r12, r4
+
+ ; Check the result of TP_CONTEXT_CHECK
+ cbnz r0, ContextMismatch1
+
+ ; At this point we know we're being called on a transparent proxy but the source and destination
+ ; contexts match. This only happens for a ContextBoundObject. For an non-interface dispatch we can
+ ; just return to the local dispatch case; the precode will eventually redirect to the jitted code
+ ; which knows how to handle a TP-wrapped ContextBoundObject. For interface calls we need to hand off
+ ; to VSD so it can resolve to the real target method. The quickest way to determine which of these
+ ; cases we need is to look at the classification of the method desc. All interface methods for which a
+ ; remoting precode is used are marked as mcComInterop, which though non-intuitive is generally OK
+ ; since only COM interop and remoting can dispatch directly on an interface method desc. (Generic
+ ; interface methods are not classified as mcComInterop but we use a different mechanism to intercept
+ ; those).
+ ldrh r0, [r4, #MethodDesc__m_wFlags]
+ and r0, #MethodDesc__mdcClassification
+ cmp r0, #MethodDesc__mcComInterop
+ bne LocalDispatch1
+
+ ; Local interface dispatch case. Restore argument registers saved here and in the RemotingPrecode,
+ ; discard return address into the RemotingPrecode (we're not going back there) and restore the real
+ ; caller's return address to LR before tail calling into the interface dispatch helper.
+ EPILOG_POP {r0,r2-r5,lr} ; Restore arg registers saved by this routine and RemotingPrecode lr
+ EPILOG_POP {r1,lr} ; Restore r1 saved by RemotingPrecode and real return address
+ EPILOG_BRANCH InContextTPQuickDispatchAsmStub
+
+LocalDispatch1
+
+ ; Local dispatch case. Restore argument registers saved here and return to the remoting precode.
+ EPILOG_POP {r0,r2-r5,pc}
+
+ContextMismatch1
+ ; Context-mismatch (remoted) dispatch case. Restore argument registers saved here and in the
+ ; RemotingPrecode, discard return address into the RemotingPrecode (we're not going back there) and
+ ; restore the real caller's return address to LR before tail calling into the cross-context helper.
+ EPILOG_POP {r0,r2-r5,lr} ; Restore arg registers saved by this routine and RemotingPrecode lr
+ EPILOG_POP {r1,lr} ; Restore r1 saved by RemotingPrecode and real return address
+ EPILOG_BRANCH TransparentProxyStub_CrossContext
+
+ NESTED_END
+
+; ------------------------------------------------------------------
+; Used to dispatch an interface call that is possibly be cross-context or remoted. Normally this is handled
+; by the remoting precode stub above but there is an edge case for generic interface methods that falls
+; through the cracks (it is not easy to cover since the precode stub makes use of it as a quick means
+; to differentiate between interface and non-interface calls in the non-cross context case).
+;
+; On entry:
+; r0 : this (TransparentProxy object)
+; r12 : interface MethodDesc
+; plus user arguments in registers and on the stack
+;
+; On exit:
+; Tail calls to the VSD in-context TP dispatcher or remoting system as appropriate.
+;
+ NESTED_ENTRY CRemotingServices__DispatchInterfaceCall
+
+ PROLOG_PUSH {r0-r3,r12,lr}
+
+ CHECK_STACK_ALIGNMENT
+
+ ; Check whether the TP is already in the correct context. This can happen for ContextBoundObjects
+ ; only. The following macro will trash volatile registers and lr and return the result in r0 (0 ==
+ ; context match, non-zero for everything else). All other registers are preserved.
+ TP_CONTEXT_CHECK
+ cbnz r0, ContextMismatch2
+
+ ; Local interface dispatch case. Tail call to VSD helper specifically for the in-context TP dispatch
+ ; scenario. Interface MethodDesc is restored to r12.
+ EPILOG_POP {r0-r3,r12,lr}
+ EPILOG_BRANCH InContextTPQuickDispatchAsmStub
+
+ContextMismatch2
+ ; Context-mismatch (remoted) dispatch case. Tail call to the general remoting dispatch code. Interface
+ ; MethodDesc is restored to r12.
+ EPILOG_POP {r0-r3,r12,lr}
+ EPILOG_BRANCH TransparentProxyStub_CrossContext
+
+ NESTED_END
+
+; ------------------------------------------------------------------
+; Common stub used for vtable dispatch of remoted methods. A small prestub will load the vtable slot index
+; into r12 and then jump here. This stub determines whether we're already in the correct context (which can
+; only happen for ContextBoundObjects). Depending on the answers we'll either dispatch the call locally or
+; re-direct it to the remoting system (via TransparentProxyStub_CrossContext).
+;
+; On entry:
+; r0 : this (TransparentProxy object)
+; r12 : virtual method slot number
+; plus user arguments in registers and on the stack
+;
+; On exit:
+; Tail calls to the VSD in-context TP dispatcher or remoting system as appropriate.
+;
+ NESTED_ENTRY TransparentProxyStub
+
+ PROLOG_PUSH {r0-r3,r12,lr}
+
+ CHECK_STACK_ALIGNMENT
+
+ ; Check whether the TP is already in the correct context. This can happen for ContextBoundObjects
+ ; only. The following macro will trash volatile registers and lr and return the result in r0 (0 ==
+ ; context match, non-zero for everything else). All other registers are preserved.
+ TP_CONTEXT_CHECK
+ cbnz r0, ContextMismatch3
+
+ ; We need to perform a local vtable dispatch on the ContextBoundObject. Obviously this needs to be on
+ ; the real type held in the proxy, not TransparentProxy's MethodTable or we'll just end up back here
+ ; recursively.
+
+ ; Recover 'this' pointer and slot number.
+ ldr r0, [sp]
+ ldr r12, [sp, #0x10]
+
+ ; Extract real type from the TP.
+ ldr r0, [r0, #TransparentProxyObject___pMT]
+
+ ; Vtables are no longer a linear array. Instead they use a two-level indirection with the first level
+ ; consisting of fixed sized chunks of function pointer arrays. R12 has our slot number.
+
+ ; Calculate first level chunk index.
+ lsr r1, r12, #ASM__VTABLE_SLOTS_PER_CHUNK_LOG2
+
+ ; Load the address of the chunk from the MethodTable (the chunk table immediately follows the
+ ; MethodTable structure).
+ add r0, #SIZEOF__MethodTable
+ ldr r2, [r0, r1, lsl #2]
+
+ ; Calculate the slot index within the chunk.
+ and r0, r12, #(ASM__VTABLE_SLOTS_PER_CHUNK - 1)
+
+ ; Load the target address into r12 (we no longer need the slot number and we're about to restore the
+ ; other registers).
+ ldr r12, [r2, r0, lsl #2]
+
+ ; Restore the stack state and tail call to the local target.
+ EPILOG_POP {r0-r3}
+ EPILOG_STACK_FREE 4 ; Skip restore of r12 since we've overwritten it
+ EPILOG_POP {lr}
+ EPILOG_BRANCH_REG r12
+
+ContextMismatch3
+ ; Contexts don't match so we have to dispatch through remoting. Clean up the stack and tail call to
+ ; the helper.
+ EPILOG_POP {r0-r3,r12,lr}
+ EPILOG_BRANCH TransparentProxyStub_CrossContext
+
+ NESTED_END
+
+#endif // FEATURE_REMOTING
+#if defined(FEATURE_REMOTING) || defined(FEATURE_COMINTEROP)
+; ------------------------------------------------------------------
+; Function used by remoting/COM interop to get floating point return value (since it's not in the same
+; register(s) as non-floating point values).
+;
+; On entry;
+; r0 : size of the FP result (4 or 8 bytes)
+; r1 : pointer to 64-bit buffer to receive result
+;
+; On exit:
+; buffer pointed to by r1 on entry contains the float or double argument as appropriate
+;
+ LEAF_ENTRY getFPReturn
+
+ cmp r0, #4
+ bne LgetFP8
+ vmov r2, s0
+ str r2, [r1]
+ bx lr
+LgetFP8
+ vmov r2, r3, d0
+ strd r2, r3, [r1]
+ bx lr
+
+ LEAF_END
+
+; ------------------------------------------------------------------
+; Function used by remoting/COM interop to set floating point return value (since it's not in the same
+; register(s) as non-floating point values).
+;
+; On entry:
+; r0 : size of the FP result (4 or 8 bytes)
+; r2/r3 : 32-bit or 64-bit FP result
+;
+; On exit:
+; s0 : float result if r0 == 4
+; d0 : double result if r0 == 8
+;
+ LEAF_ENTRY setFPReturn
+
+ cmp r0, #4
+ bne LsetFP8
+ vmov s0, r2
+ bx lr
+LsetFP8
+ vmov d0, r2, r3
+ bx lr
+
+ LEAF_END
+
+#endif defined(FEATURE_REMOTING) || defined(FEATURE_COMINTEROP)
+#ifdef FEATURE_REMOTING
+
+; ------------------------------------------------------------------
+; Tail call Object.FieldGetter remotely with the given arguments.
+;
+; On entry:
+; r0 : pMD (MethodDesc * of the Object.FieldGetter method)
+; r1 : pThis (the transparent proxy)
+; r2 : pFirst
+; r3 : pSecond
+; [sp, #0] : pThird
+;
+; On exit:
+; Tail calls to the managed method
+;
+ LEAF_ENTRY CRemotingServices__CallFieldGetter
+
+ mov r12, r0
+ mov r0, r1
+ mov r1, r2
+ mov r2, r3
+ ldr r3, [sp, #0]
+
+ b TransparentProxyStub_CrossContext
+
+ LEAF_END
+
+; ------------------------------------------------------------------
+; Tail call Object.FieldSetter remotely with the given arguments.
+;
+; On entry:
+; r0 : pMD (MethodDesc * of the Object.FieldSetter method)
+; r1 : pThis (the transparent proxy)
+; r2 : pFirst
+; r3 : pSecond
+; [sp, #0] : pThird
+;
+; On exit:
+; Tail calls to the managed method
+;
+ LEAF_ENTRY CRemotingServices__CallFieldSetter
+
+ mov r12, r0
+ mov r0, r1
+ mov r1, r2
+ mov r2, r3
+ ldr r3, [sp, #0]
+
+ b TransparentProxyStub_CrossContext
+
+ LEAF_END
+
+; ------------------------------------------------------------------
+; General purpose remoting helper used to call given target with two parameters.
+;
+; On entry:
+; r0 : pTarget
+; r1 : pFirst
+; r2 : pSecond
+;
+;
+ NESTED_ENTRY CTPMethodTable__CallTargetHelper2,,CallDescrWorkerUnwindFrameChainHandler
+
+ PROLOG_PUSH {r11, lr}
+
+ mov r12, r0
+ mov r0, r1
+ mov r1, r2
+
+ blx r12
+
+ ; Adding a nop so that unwind does not result in the IP being in epilog.
+ ; This ensures that the OS unwinder looks up the personality routine for this method.
+ nop
+
+ EPILOG_POP {r11, pc}
+
+ NESTED_END
+
+; ------------------------------------------------------------------
+; General purpose remoting helper used to call given target with three parameters.
+;
+; On entry:
+; r0 : pTarget
+; r1 : pFirst
+; r2 : pSecond
+; r3 : pThird
+;
+;
+ NESTED_ENTRY CTPMethodTable__CallTargetHelper3,,CallDescrWorkerUnwindFrameChainHandler
+
+ PROLOG_PUSH {r11, lr}
+
+ mov r12, r0
+ mov r0, r1
+ mov r1, r2
+ mov r2, r3
+
+ blx r12
+
+ ; Adding a nop so that unwind does not result in the IP being in epilog.
+ ; This ensures that the OS unwinder looks up the personality routine for this method.
+ nop
+
+ EPILOG_POP {r11, pc}
+
+ NESTED_END
+
+#endif // FEATURE_REMOTING
+
+#ifdef FEATURE_COMINTEROP
+; ------------------------------------------------------------------
+; GenericComPlusCallStub that erects a ComPlusMethodFrame and calls into the runtime
+; (CLRToCOMWorker) to dispatch rare cases of the interface call.
+;
+; On entry:
+; r0 : 'this' object
+; r12 : Interface MethodDesc*
+; plus user arguments in registers and on the stack
+;
+; On exit:
+; r0/r1/s0/d0 set to return value of the call as appropriate
+;
+ NESTED_ENTRY GenericComPlusCallStub
+
+ PROLOG_WITH_TRANSITION_BLOCK 0x20
+
+ add r0, sp, #__PWTB_TransitionBlock ; pTransitionBlock
+ mov r1, r12 ; pMethodDesc
+
+ ; Call CLRToCOMWorker(pFrame). This call will set up the rest of the frame (including the vfptr,
+ ; the GS cookie and linking to the thread), make the client call and return with correct registers set
+ ; (r0/r1/s0-s3/d0-d3 as appropriate).
+
+ bl CLRToCOMWorker
+
+ ; r0 = fpRetSize
+
+ ; return value is stored before float argument registers
+ add r1, sp, #(__PWTB_FloatArgumentRegisters - 0x20)
+ bl setStubReturnValue
+
+ EPILOG_WITH_TRANSITION_BLOCK_RETURN
+
+ NESTED_END
+
+; ------------------------------------------------------------------
+; COM to CLR stub called the first time a particular method is invoked.
+;
+; On entry:
+; r12 : (MethodDesc* - ComCallMethodDesc_Offset_FromR12) provided by prepad thunk
+; plus user arguments in registers and on the stack
+;
+; On exit:
+; tail calls to real method
+;
+ NESTED_ENTRY ComCallPreStub
+
+ GBLA ComCallPreStub_FrameSize
+ GBLA ComCallPreStub_FramePad
+ GBLA ComCallPreStub_StackAlloc
+ GBLA ComCallPreStub_Frame
+ GBLA ComCallPreStub_ErrorReturn
+
+; Set the defaults
+ComCallPreStub_FramePad SETA 8 ; error return
+ComCallPreStub_FrameSize SETA (ComCallPreStub_FramePad + SIZEOF__GSCookie + SIZEOF__ComMethodFrame)
+
+ IF ComCallPreStub_FrameSize:MOD:8 != 0
+ComCallPreStub_FramePad SETA ComCallPreStub_FramePad + 4
+ComCallPreStub_FrameSize SETA ComCallPreStub_FrameSize + 4
+ ENDIF
+
+ComCallPreStub_StackAlloc SETA ComCallPreStub_FrameSize - SIZEOF__ArgumentRegisters - 2 * 4
+ComCallPreStub_Frame SETA SIZEOF__FloatArgumentRegisters + ComCallPreStub_FramePad + SIZEOF__GSCookie
+ComCallPreStub_ErrorReturn SETA SIZEOF__FloatArgumentRegisters
+
+ PROLOG_PUSH {r0-r3} ; Spill general argument registers
+ PROLOG_PUSH {r11,lr} ; Save return address
+ PROLOG_STACK_ALLOC ComCallPreStub_StackAlloc ; Alloc non-spill portion of stack frame
+ PROLOG_VPUSH {d0-d7} ; Spill floating point argument registers
+
+ CHECK_STACK_ALIGNMENT
+
+ ; Finish initializing the frame. The C++ helper will fill in the GS cookie and vfptr and link us to
+ ; the Thread frame chain (see ComPrestubMethodFrame::Push). That leaves us with m_pFuncDesc.
+ ; The prepad thunk passes us a value which is the MethodDesc* - ComCallMethodDesc_Offset_FromR12 (due to encoding limitations in the
+ ; thunk). So we must correct this by adding 4 before storing the pointer.
+ add r12, #(ComCallMethodDesc_Offset_FromR12)
+ str r12, [sp, #(ComCallPreStub_Frame + UnmanagedToManagedFrame__m_pvDatum)]
+
+ ; Call the C++ worker: ComPreStubWorker(&Frame)
+ add r0, sp, #(ComCallPreStub_Frame)
+ add r1, sp, #(ComCallPreStub_ErrorReturn)
+ bl ComPreStubWorker
+
+ ; Handle failure case.
+ cbz r0, ErrorExit
+
+ ; Stash real target address where it won't be overwritten by restoring the calling state.
+ mov r12, r0
+
+ EPILOG_VPOP {d0-d7} ; Restore floating point argument registers
+ EPILOG_STACK_FREE ComCallPreStub_StackAlloc
+ EPILOG_POP {r11,lr}
+ EPILOG_POP {r0-r3} ; Restore argument registers
+ ; Tail call the real target. Actually ComPreStubWorker returns the address of the prepad thunk on ARM,
+ ; that way we don't run out of volatile registers trying to remember both the new target address and
+ ; the hidden MethodDesc* argument. ComPreStubWorker patched the prepad though so the second time
+ ; through we won't end up here again.
+ EPILOG_BRANCH_REG r12
+
+ErrorExit
+ ; Failed to find a stub to call. Retrieve the return value ComPreStubWorker set for us.
+ ldr r0, [sp, #(ComCallPreStub_ErrorReturn)]
+ ldr r1, [sp, #(ComCallPreStub_ErrorReturn+4)]
+ EPILOG_STACK_FREE ComCallPreStub_StackAlloc + SIZEOF__FloatArgumentRegisters
+ EPILOG_POP {r11,lr}
+ EPILOG_STACK_FREE SIZEOF__ArgumentRegisters
+ EPILOG_RETURN
+
+ NESTED_END
+
+; ------------------------------------------------------------------
+; COM to CLR stub which sets up a ComMethodFrame and calls COMToCLRWorker.
+;
+; On entry:
+; r12 : (MethodDesc* - ComCallMethodDesc_Offset_FromR12) provided by prepad thunk
+; plus user arguments in registers and on the stack
+;
+; On exit:
+; Result in r0/r1/s0/d0 as per the real method being called
+;
+ NESTED_ENTRY GenericComCallStub,,ReverseComUnwindFrameChainHandler
+
+; Calculate space needed on stack for alignment padding, a GS cookie and a ComMethodFrame (minus the last
+; field, m_ReturnAddress, which we'll push explicitly).
+
+ GBLA GenericComCallStub_FrameSize
+ GBLA GenericComCallStub_FramePad
+ GBLA GenericComCallStub_StackAlloc
+ GBLA GenericComCallStub_Frame
+
+; Set the defaults
+GenericComCallStub_FramePad SETA 0
+GenericComCallStub_FrameSize SETA (GenericComCallStub_FramePad + SIZEOF__GSCookie + SIZEOF__ComMethodFrame)
+
+ IF GenericComCallStub_FrameSize:MOD:8 != 0
+GenericComCallStub_FramePad SETA 4
+GenericComCallStub_FrameSize SETA GenericComCallStub_FrameSize + GenericComCallStub_FramePad
+ ENDIF
+
+GenericComCallStub_StackAlloc SETA GenericComCallStub_FrameSize - SIZEOF__ArgumentRegisters - 2 * 4
+GenericComCallStub_Frame SETA SIZEOF__FloatArgumentRegisters + GenericComCallStub_FramePad + SIZEOF__GSCookie
+
+ PROLOG_PUSH {r0-r3} ; Spill general argument registers
+ PROLOG_PUSH {r11,lr} ; Save return address
+ PROLOG_STACK_ALLOC GenericComCallStub_StackAlloc ; Alloc non-spill portion of stack frame
+ PROLOG_VPUSH {d0-d7} ; Spill floating point argument registers
+
+ CHECK_STACK_ALIGNMENT
+
+ ; Store MethodDesc* in frame. Due to a limitation of the prepad, r12 actually contains a value
+ ; "ComCallMethodDesc_Offset_FromR12" less than the pointer we want, so fix that up.
+ add r12, r12, #(ComCallMethodDesc_Offset_FromR12)
+ str r12, [sp, #(GenericComCallStub_Frame + UnmanagedToManagedFrame__m_pvDatum)]
+
+ ; Call COMToCLRWorker(pThread, pFrame). Note that pThread is computed inside the method so we don't
+ ; need to set it up here.
+ ;
+ ; Setup R1 to point to the start of the explicit frame. We account for alignment padding and
+ ; space for GSCookie.
+ add r1, sp, #(GenericComCallStub_Frame)
+ bl COMToCLRWorker
+
+ EPILOG_STACK_FREE GenericComCallStub_StackAlloc + SIZEOF__FloatArgumentRegisters
+ EPILOG_POP {r11,lr}
+ EPILOG_STACK_FREE SIZEOF__ArgumentRegisters
+ EPILOG_RETURN
+
+ NESTED_END
+
+; ------------------------------------------------------------------
+; COM to CLR stub called from COMToCLRWorker that actually dispatches to the real managed method.
+;
+; On entry:
+; r0 : dwStackSlots, count of argument stack slots to copy
+; r1 : pFrame, ComMethodFrame pushed by GenericComCallStub above
+; r2 : pTarget, address of code to call
+; r3 : pSecretArg, hidden argument passed to target above in r12
+; [sp, #0] : pDangerousThis, managed 'this' reference
+;
+; On exit:
+; Result in r0/r1/s0/d0 as per the real method being called
+;
+ NESTED_ENTRY COMToCLRDispatchHelper,,CallDescrWorkerUnwindFrameChainHandler
+
+ PROLOG_PUSH {r4-r5,r7,lr}
+ PROLOG_STACK_SAVE r7
+
+ ; Copy stack-based arguments. Make sure the eventual SP ends up 8-byte aligned. Note that the
+ ; following calculations assume that the prolog has left the stack already aligned.
+ CHECK_STACK_ALIGNMENT
+
+ cbz r0, COMToCLRDispatchHelper_ArgumentsSetup
+
+ lsl r4, r0, #2 ; r4 = (dwStackSlots * 4)
+ and r5, r4, #4 ; Align the stack
+ sub sp, sp, r5
+
+ add r5, r1, #SIZEOF__ComMethodFrame
+ add r5, r5, r4
+
+COMToCLRDispatchHelper_StackLoop
+ ldr r4, [r5,#-4]!
+ str r4, [sp,#-4]!
+ subs r0, r0, #1
+ bne COMToCLRDispatchHelper_StackLoop
+
+ CHECK_STACK_ALIGNMENT
+
+COMToCLRDispatchHelper_ArgumentsSetup
+ ; Load floating point argument registers.
+ sub r4, r1, #(GenericComCallStub_Frame)
+ vldm r4, {d0-d7}
+
+ ; Prepare the call target and hidden argument prior to overwriting r0-r3.
+ mov r12, r3 ; r12 = hidden argument
+ mov lr, r2 ; lr = target code
+
+ ; Load general argument registers except r0.
+ add r4, r1, #(SIZEOF__ComMethodFrame - SIZEOF__ArgumentRegisters + 4)
+ ldm r4, {r1-r3}
+
+ ; Load r0 from the managed this, not the original incoming IUnknown*.
+ ldr r0, [r7, #(4 * 4)]
+
+ ; Make the call.
+ blx lr
+
+ EPILOG_STACK_RESTORE r7
+ EPILOG_POP {r4-r5,r7,pc}
+
+ NESTED_END
+
+#endif // FEATURE_COMINTEROP
+
+#ifdef PROFILING_SUPPORTED
+
+PROFILE_ENTER equ 1
+PROFILE_LEAVE equ 2
+PROFILE_TAILCALL equ 4
+
+ ; Define the layout of the PROFILE_PLATFORM_SPECIFIC_DATA we push on the stack for all profiler
+ ; helpers.
+ map 0
+ field 4 ; r0
+ field 4 ; r1
+ field 4 ; r11
+ field 4 ; Pc (caller's PC, i.e. LR)
+ field SIZEOF__FloatArgumentRegisters ; spilled floating point argument registers
+functionId field 4
+probeSp field 4
+profiledSp field 4
+hiddenArg field 4
+flags field 4
+
+SIZEOF__PROFILE_PLATFORM_SPECIFIC_DATA field 0
+
+; ------------------------------------------------------------------
+; Macro used to generate profiler helpers. In all cases we push a partially initialized
+; PROFILE_PLATFORM_SPECIFIC_DATA structure on the stack and call into a C++ helper to continue processing.
+;
+; On entry:
+; r0 : clientInfo
+; r1/r2 : return values (in case of leave)
+; frame pointer(r11) must be set (in case of enter)
+; all arguments are on stack at frame pointer (r11) + 8bytes (save lr & prev r11).
+;
+; On exit:
+; All register values are preserved including volatile registers
+;
+ MACRO
+ DefineProfilerHelper $HelperName, $Flags
+
+ GBLS __ProfilerHelperFunc
+__ProfilerHelperFunc SETS "$HelperName":CC:"Naked"
+
+ NESTED_ENTRY $__ProfilerHelperFunc
+
+ IMPORT $HelperName ; The C++ helper which does most of the work
+
+ PROLOG_PUSH {r0,r3,r9,r12} ; save volatile general purpose registers. remaining r1 & r2 are saved below...saving r9 as it is required for virtualunwinding
+ PROLOG_STACK_ALLOC (6*4) ; Reserve space for tail end of structure (5*4 bytes) and extra 4 bytes is for aligning the stack at 8-byte boundary
+ PROLOG_VPUSH {d0-d7} ; Spill floting point argument registers
+ PROLOG_PUSH {r1,r11,lr} ; Save possible return value in r1, frame pointer and return address
+ PROLOG_PUSH {r2} ; Save possible return value in r0. Before calling Leave Hook Jit moves contents of r0 to r2
+ ; so pushing r2 instead of r0. This push statement cannot be combined with the above push
+ ; as r2 gets pushed before r1.
+
+ CHECK_STACK_ALIGNMENT
+
+ ; Zero r1 for use clearing fields in the PROFILE_PLATFORM_SPECIFIC_DATA.
+ eor r1, r1
+
+ ; Clear functionId.
+ str r1, [sp, #functionId]
+
+ ; Save caller's SP (at the point this helper was called).
+ add r2, sp, #(SIZEOF__PROFILE_PLATFORM_SPECIFIC_DATA + 20)
+ str r2, [sp, #probeSp]
+
+ ; Save caller's SP (at the point where only argument registers have been spilled).
+ ldr r2, [r11]
+ add r2, r2, #8 ; location of arguments is at frame pointer(r11) + 8 (lr & prev frame ptr is saved before changing
+ str r2, [sp, #profiledSp]
+
+ ; Clear hiddenArg.
+ str r1, [sp, #hiddenArg]
+
+ ; Set flags to indicate type of helper called.
+ mov r1, #($Flags)
+ str r1, [sp, #flags]
+
+ ; Call C++ portion of helper (<$HelperName>(clientInfo, &profilePlatformSpecificData)).
+ mov r1, sp
+ bl $HelperName
+
+ EPILOG_POP {r2}
+ EPILOG_POP {r1,r11,lr}
+ EPILOG_VPOP {d0-d7}
+ EPILOG_STACK_FREE (6*4)
+ EPILOG_POP {r0,r3,r9,r12}
+
+ EPILOG_RETURN
+
+ NESTED_END
+
+ MEND
+
+ DefineProfilerHelper ProfileEnter, PROFILE_ENTER
+ DefineProfilerHelper ProfileLeave, PROFILE_LEAVE
+ DefineProfilerHelper ProfileTailcall, PROFILE_TAILCALL
+
+#endif // PROFILING_SUPPORTED
+
+ ;
+ ; If a preserved register were pushed onto the stack between
+ ; the managed caller and the H_M_F, _R4_R11 will point to its
+ ; location on the stack and it would have been updated on the
+ ; stack by the GC already and it will be popped back into the
+ ; appropriate register when the appropriate epilog is run.
+ ;
+ ; Otherwise, the register is preserved across all the code
+ ; in this HCALL or FCALL, so we need to update those registers
+ ; here because the GC will have updated our copies in the
+ ; frame.
+ ;
+ ; So, if _R4_R11 points into the MachState, we need to update
+ ; the register here. That's what this macro does.
+ ;
+
+ MACRO
+ RestoreRegMS $regIndex, $reg
+
+ ; Incoming:
+ ;
+ ; R0 = address of MachState
+ ;
+ ; $regIndex: Index of the register (R4-R11). For R4, index is 4.
+ ; For R5, index is 5, and so on.
+ ;
+ ; $reg: Register name (e.g. R4, R5, etc)
+ ;
+ ; Get the address of the specified captured register from machine state
+ add r2, r0, #(MachState__captureR4_R11 + (($regIndex-4)*4))
+
+ ; Get the address of the specified preserved register from machine state
+ ldr r3, [r0, #(MachState___R4_R11 + (($regIndex-4)*4))]
+
+ cmp r2, r3
+ bne %FT0
+ ldr $reg, [r2]
+0
+
+ MEND
+
+; EXTERN_C int __fastcall HelperMethodFrameRestoreState(
+; INDEBUG_COMMA(HelperMethodFrame *pFrame)
+; MachState *pState
+; )
+ LEAF_ENTRY HelperMethodFrameRestoreState
+
+#ifdef _DEBUG
+ mov r0, r1
+#endif
+
+ ; If machine state is invalid, then simply exit
+ ldr r1, [r0, #MachState__isValid]
+ cmp r1, #0
+ beq Done
+
+ RestoreRegMS 4, R4
+ RestoreRegMS 5, R5
+ RestoreRegMS 6, R6
+ RestoreRegMS 7, R7
+ RestoreRegMS 8, R8
+ RestoreRegMS 9, R9
+ RestoreRegMS 10, R10
+ RestoreRegMS 11, R11
+Done
+ ; Its imperative that the return value of HelperMethodFrameRestoreState is zero
+ ; as it is used in the state machine to loop until it becomes zero.
+ ; Refer to HELPER_METHOD_FRAME_END macro for details.
+ mov r0,#0
+ bx lr
+
+ LEAF_END
+
+#ifdef FEATURE_HIJACK
+
+; ------------------------------------------------------------------
+; Hijack function for functions which return a reference type
+ NESTED_ENTRY OnHijackObjectTripThread
+ PROLOG_PUSH {r0,r4-r11,lr}
+
+ CHECK_STACK_ALIGNMENT
+
+ mov r0, sp
+ bl OnHijackObjectWorker
+
+ EPILOG_POP {r0,r4-r11,pc}
+ NESTED_END
+
+; ------------------------------------------------------------------
+; Hijack function for functions which return an interior pointer within an object allocated in managed heap
+ NESTED_ENTRY OnHijackInteriorPointerTripThread
+ PROLOG_PUSH {r0,r4-r11,lr}
+
+ CHECK_STACK_ALIGNMENT
+
+ mov r0, sp
+ bl OnHijackInteriorPointerWorker
+
+ EPILOG_POP {r0,r4-r11,pc}
+ NESTED_END
+
+; ------------------------------------------------------------------
+; Hijack function for functions which return a value type
+ NESTED_ENTRY OnHijackScalarTripThread
+ PROLOG_PUSH {r0,r4-r11,lr}
+
+ PROLOG_VPUSH {d0-d3} ; saving as d0-d3 can have the floating point return value
+ PROLOG_PUSH {r1} ; saving as r1 can have partial return value when return is > 32 bits
+ PROLOG_STACK_ALLOC 4 ; 8 byte align
+
+ CHECK_STACK_ALIGNMENT
+
+ add r0, sp, #40
+ bl OnHijackScalarWorker
+
+ EPILOG_STACK_FREE 4
+ EPILOG_POP {r1}
+ EPILOG_VPOP {d0-d3}
+
+ EPILOG_POP {r0,r4-r11,pc}
+ NESTED_END
+
+#endif ; FEATURE_HIJACK
+
+; ------------------------------------------------------------------
+; Macro to generate Redirection Stubs
+;
+; $reason : reason for redirection
+; Eg. GCThreadControl
+; NOTE: If you edit this macro, make sure you update GetCONTEXTFromRedirectedStubStackFrame.
+; This function is used by both the personality routine and the debugger to retrieve the original CONTEXT.
+ MACRO
+ GenerateRedirectedHandledJITCaseStub $reason
+
+ GBLS __RedirectionStubFuncName
+ GBLS __RedirectionStubEndFuncName
+ GBLS __RedirectionFuncName
+__RedirectionStubFuncName SETS "RedirectedHandledJITCaseFor":CC:"$reason":CC:"_Stub"
+__RedirectionStubEndFuncName SETS "RedirectedHandledJITCaseFor":CC:"$reason":CC:"_StubEnd"
+__RedirectionFuncName SETS "|?RedirectedHandledJITCaseFor":CC:"$reason":CC:"@Thread@@CAXXZ|"
+
+ IMPORT $__RedirectionFuncName
+
+ NESTED_ENTRY $__RedirectionStubFuncName
+
+ PROLOG_PUSH {r7,lr} ; return address
+ PROLOG_STACK_ALLOC 4 ; stack slot to save the CONTEXT *
+ PROLOG_STACK_SAVE r7
+
+ ;REDIRECTSTUB_SP_OFFSET_CONTEXT is defined in asmconstants.h
+ ;If CONTEXT is not saved at 0 offset from SP it must be changed as well.
+ ASSERT REDIRECTSTUB_SP_OFFSET_CONTEXT == 0
+
+ ; Runtime check for 8-byte alignment. This check is necessary as this function can be
+ ; entered before complete execution of the prolog of another function.
+ and r0, r7, #4
+ sub sp, sp, r0
+
+ ; stack must be 8 byte aligned
+ CHECK_STACK_ALIGNMENT
+
+ ;
+ ; Save a copy of the redirect CONTEXT*.
+ ; This is needed for the debugger to unwind the stack.
+ ;
+ bl GetCurrentSavedRedirectContext
+ str r0, [r7]
+
+ ;
+ ; Fetch the interrupted pc and save it as our return address.
+ ;
+ ldr r1, [r0, #CONTEXT_Pc]
+ str r1, [r7, #8]
+
+ ;
+ ; Call target, which will do whatever we needed to do in the context
+ ; of the target thread, and will RtlRestoreContext when it is done.
+ ;
+ bl $__RedirectionFuncName
+
+ EMIT_BREAKPOINT ; Unreachable
+
+; Put a label here to tell the debugger where the end of this function is.
+$__RedirectionStubEndFuncName
+ EXPORT $__RedirectionStubEndFuncName
+
+ NESTED_END
+
+ MEND
+
+; ------------------------------------------------------------------
+; Redirection Stub for GC in fully interruptible method
+ GenerateRedirectedHandledJITCaseStub GCThreadControl
+; ------------------------------------------------------------------
+ GenerateRedirectedHandledJITCaseStub DbgThreadControl
+; ------------------------------------------------------------------
+ GenerateRedirectedHandledJITCaseStub UserSuspend
+; ------------------------------------------------------------------
+ GenerateRedirectedHandledJITCaseStub YieldTask
+
+#ifdef _DEBUG
+; ------------------------------------------------------------------
+; Redirection Stub for GC Stress
+ GenerateRedirectedHandledJITCaseStub GCStress
+#endif
+
+; ------------------------------------------------------------------
+; Functions to probe for stack space
+; Input reg r4 = amount of stack to probe for
+; value of reg r4 is preserved on exit from function
+; r12 is trashed
+; The below two functions were copied from vctools\crt\crtw32\startup\arm\chkstk.asm
+
+ NESTED_ENTRY checkStack
+ subs r12,sp,r4
+ mrc p15,#0,r4,c13,c0,#2 ; get TEB *
+ ldr r4,[r4,#8] ; get Stack limit
+ bcc checkStack_neg ; if r12 is less then 0 set it to 0
+checkStack_label1
+ cmp r12, r4
+ bcc stackProbe ; must probe to extend guardpage if r12 is beyond stackLimit
+ sub r4, sp, r12 ; restore value of r4
+ EPILOG_RETURN
+checkStack_neg
+ mov r12, #0
+ b checkStack_label1
+ NESTED_END
+
+ NESTED_ENTRY stackProbe
+ PROLOG_PUSH {r5,r6}
+ mov r6, r12
+ bfc r6, #0, #0xc ; align down (4K)
+stackProbe_loop
+ sub r4,r4,#0x1000 ; dec stack Limit by 4K as page size is 4K
+ ldr r5,[r4] ; try to read ... this should move the guard page
+ cmp r4,r6
+ bne stackProbe_loop
+ EPILOG_POP {r5,r6}
+ EPILOG_NOP sub r4,sp,r12
+ EPILOG_RETURN
+ NESTED_END
+
+;------------------------------------------------
+; VirtualMethodFixupStub
+;
+; In NGEN images, virtual slots inherited from cross-module dependencies
+; point to a jump thunk that calls into the following function that will
+; call into a VM helper. The VM helper is responsible for patching up
+; thunk, upon executing the precode, so that all subsequent calls go directly
+; to the actual method body.
+;
+; This is done lazily for performance reasons.
+;
+; On entry:
+;
+; R0 = "this" pointer
+; R12 = Address of thunk + 4
+
+ NESTED_ENTRY VirtualMethodFixupStub
+
+ ; Save arguments and return address
+ PROLOG_PUSH {r0-r3, lr}
+
+ ; Align stack
+ PROLOG_STACK_ALLOC SIZEOF__FloatArgumentRegisters + 4
+ vstm sp, {d0-d7}
+
+
+ CHECK_STACK_ALIGNMENT
+
+ ; R12 contains an address that is 4 bytes ahead of
+ ; where the thunk starts. Refer to ZapImportVirtualThunk::Save
+ ; for details on this.
+ ;
+ ; Move the correct thunk start address in R1
+ sub r1, r12, #4
+
+ ; Call the helper in the VM to perform the actual fixup
+ ; and tell us where to tail call. R0 already contains
+ ; the this pointer.
+ bl VirtualMethodFixupWorker
+
+ ; On return, R0 contains the target to tailcall to
+ mov r12, r0
+
+ ; pop the stack and restore original register state
+ vldm sp, {d0-d7}
+ EPILOG_STACK_FREE SIZEOF__FloatArgumentRegisters + 4
+ EPILOG_POP {r0-r3, lr}
+
+ PATCH_LABEL VirtualMethodFixupPatchLabel
+
+ ; and tailcall to the actual method
+ EPILOG_BRANCH_REG r12
+
+ NESTED_END
+
+;------------------------------------------------
+; ExternalMethodFixupStub
+;
+; In NGEN images, calls to cross-module external methods initially
+; point to a jump thunk that calls into the following function that will
+; call into a VM helper. The VM helper is responsible for patching up the
+; thunk, upon executing the precode, so that all subsequent calls go directly
+; to the actual method body.
+;
+; This is done lazily for performance reasons.
+;
+; On entry:
+;
+; R12 = Address of thunk + 4
+
+ NESTED_ENTRY ExternalMethodFixupStub
+
+ PROLOG_WITH_TRANSITION_BLOCK
+
+ add r0, sp, #__PWTB_TransitionBlock ; pTransitionBlock
+
+ ; Adjust (read comment above for details) and pass the address of the thunk
+ sub r1, r12, #4 ; pThunk
+
+ mov r2, #0 ; sectionIndex
+ mov r3, #0 ; pModule
+ bl ExternalMethodFixupWorker
+
+ ; mov the address we patched to in R12 so that we can tail call to it
+ mov r12, r0
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+ PATCH_LABEL ExternalMethodFixupPatchLabel
+ EPILOG_BRANCH_REG r12
+
+ NESTED_END
+
+;------------------------------------------------
+; StubDispatchFixupStub
+;
+; In NGEN images, calls to interface methods initially
+; point to a jump thunk that calls into the following function that will
+; call into a VM helper. The VM helper is responsible for patching up the
+; thunk with actual stub dispatch stub.
+;
+; On entry:
+;
+; R4 = Address of indirection cell
+
+ NESTED_ENTRY StubDispatchFixupStub
+
+ PROLOG_WITH_TRANSITION_BLOCK
+
+ ; address of StubDispatchFrame
+ add r0, sp, #__PWTB_TransitionBlock ; pTransitionBlock
+ mov r1, r4 ; siteAddrForRegisterIndirect
+ mov r2, #0 ; sectionIndex
+ mov r3, #0 ; pModule
+
+ bl StubDispatchFixupWorker
+
+ ; mov the address we patched to in R12 so that we can tail call to it
+ mov r12, r0
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+ PATCH_LABEL StubDispatchFixupPatchLabel
+ EPILOG_BRANCH_REG r12
+
+ NESTED_END
+
+;------------------------------------------------
+; JIT_RareDisableHelper
+;
+; The JIT expects this helper to preserve registers used for return values
+;
+ NESTED_ENTRY JIT_RareDisableHelper
+
+ PROLOG_PUSH {r0-r1, r11, lr} ; save integer return value
+ PROLOG_VPUSH {d0-d3} ; floating point return value
+
+ CHECK_STACK_ALIGNMENT
+
+ bl JIT_RareDisableHelperWorker
+
+ EPILOG_VPOP {d0-d3}
+ EPILOG_POP {r0-r1, r11, pc}
+
+ NESTED_END
+
+
+#ifdef FEATURE_CORECLR
+;
+; JIT Static access helpers for single appdomain case
+;
+
+; ------------------------------------------------------------------
+; void* JIT_GetSharedNonGCStaticBase(SIZE_T moduleDomainID, DWORD dwClassDomainID)
+
+ LEAF_ENTRY JIT_GetSharedNonGCStaticBase_SingleAppDomain
+
+ ; If class is not initialized, bail to C++ helper
+ add r2, r0, #DomainLocalModule__m_pDataBlob
+ ldrb r2, [r2, r1]
+ tst r2, #1
+ beq CallCppHelper1
+
+ bx lr
+
+CallCppHelper1
+ ; Tail call JIT_GetSharedNonGCStaticBase_Helper
+ b JIT_GetSharedNonGCStaticBase_Helper
+ LEAF_END
+
+
+; ------------------------------------------------------------------
+; void* JIT_GetSharedNonGCStaticBaseNoCtor(SIZE_T moduleDomainID, DWORD dwClassDomainID)
+
+ LEAF_ENTRY JIT_GetSharedNonGCStaticBaseNoCtor_SingleAppDomain
+
+ bx lr
+ LEAF_END
+
+
+; ------------------------------------------------------------------
+; void* JIT_GetSharedGCStaticBase(SIZE_T moduleDomainID, DWORD dwClassDomainID)
+
+ LEAF_ENTRY JIT_GetSharedGCStaticBase_SingleAppDomain
+
+ ; If class is not initialized, bail to C++ helper
+ add r2, r0, #DomainLocalModule__m_pDataBlob
+ ldrb r2, [r2, r1]
+ tst r2, #1
+ beq CallCppHelper3
+
+ ldr r0, [r0, #DomainLocalModule__m_pGCStatics]
+ bx lr
+
+CallCppHelper3
+ ; Tail call Jit_GetSharedGCStaticBase_Helper
+ b JIT_GetSharedGCStaticBase_Helper
+ LEAF_END
+
+
+; ------------------------------------------------------------------
+; void* JIT_GetSharedGCStaticBaseNoCtor(SIZE_T moduleDomainID, DWORD dwClassDomainID)
+
+ LEAF_ENTRY JIT_GetSharedGCStaticBaseNoCtor_SingleAppDomain
+
+ ldr r0, [r0, #DomainLocalModule__m_pGCStatics]
+ bx lr
+ LEAF_END
+
+#endif
+
+; ------------------------------------------------------------------
+; __declspec(naked) void F_CALL_CONV JIT_Stelem_Ref(PtrArray* array, unsigned idx, Object* val)
+ LEAF_ENTRY JIT_Stelem_Ref
+
+ ; We retain arguments as they were passed and use r0 == array; r1 == idx; r2 == val
+
+ ; check for null array
+ cbz r0, ThrowNullReferenceException
+
+ ; idx bounds check
+ ldr r3,[r0,#ArrayBase__m_NumComponents]
+ cmp r3,r1
+ bls ThrowIndexOutOfRangeException
+
+ ; fast path to null assignment (doesn't need any write-barriers)
+ cbz r2, AssigningNull
+
+ ; Verify the array-type and val-type matches before writing
+ ldr r12, [r0] ; r12 = array MT
+ ldr r3, [r2] ; r3 = val->GetMethodTable()
+ ldr r12, [r12, #MethodTable__m_ElementType] ; array->GetArrayElementTypeHandle()
+ cmp r3, r12
+ beq JIT_Stelem_DoWrite
+
+ ; Types didnt match but allow writing into an array of objects
+ ldr r3, =$g_pObjectClass
+ ldr r3, [r3] ; r3 = *g_pObjectClass
+ cmp r3, r12 ; array type matches with Object*
+ beq JIT_Stelem_DoWrite
+
+ ; array type and val type do not exactly match. Raise frame and do detailed match
+ b JIT_Stelem_Ref_NotExactMatch
+
+AssigningNull
+ ; Assigning null doesn't need write barrier
+ adds r0, r1, LSL #2 ; r0 = r0 + (r1 x 4) = array->m_array[idx]
+ str r2, [r0, #PtrArray__m_Array] ; array->m_array[idx] = val
+ bx lr
+
+ThrowNullReferenceException
+ ; Tail call JIT_InternalThrow(NullReferenceException)
+ ldr r0, =CORINFO_NullReferenceException_ASM
+ b JIT_InternalThrow
+
+ThrowIndexOutOfRangeException
+ ; Tail call JIT_InternalThrow(NullReferenceException)
+ ldr r0, =CORINFO_IndexOutOfRangeException_ASM
+ b JIT_InternalThrow
+
+ LEAF_END
+
+; ------------------------------------------------------------------
+; __declspec(naked) void F_CALL_CONV JIT_Stelem_Ref_NotExactMatch(PtrArray* array,
+; unsigned idx, Object* val)
+; r12 = array->GetArrayElementTypeHandle()
+;
+ NESTED_ENTRY JIT_Stelem_Ref_NotExactMatch
+ PROLOG_PUSH {lr}
+ PROLOG_PUSH {r0-r2}
+
+ CHECK_STACK_ALIGNMENT
+
+ ; allow in case val can be casted to array element type
+ ; call ObjIsInstanceOfNoGC(val, array->GetArrayElementTypeHandle())
+ mov r1, r12 ; array->GetArrayElementTypeHandle()
+ mov r0, r2
+ bl ObjIsInstanceOfNoGC
+ cmp r0, TypeHandle_CanCast
+ beq DoWrite ; ObjIsInstance returned TypeHandle::CanCast
+
+ ; check via raising frame
+NeedFrame
+ mov r1, sp ; r1 = &array
+ adds r0, sp, #8 ; r0 = &val
+ bl ArrayStoreCheck ; ArrayStoreCheck(&val, &array)
+
+DoWrite
+ EPILOG_POP {r0-r2}
+ EPILOG_POP {lr}
+ EPILOG_BRANCH JIT_Stelem_DoWrite
+
+ NESTED_END
+
+; ------------------------------------------------------------------
+; __declspec(naked) void F_CALL_CONV JIT_Stelem_DoWrite(PtrArray* array, unsigned idx, Object* val)
+ LEAF_ENTRY JIT_Stelem_DoWrite
+
+ ; Setup args for JIT_WriteBarrier. r0 = &array->m_array[idx]; r1 = val
+ adds r0, #PtrArray__m_Array ; r0 = &array->m_array
+ adds r0, r1, LSL #2
+ mov r1, r2 ; r1 = val
+
+ ; Branch to the write barrier (which is already correctly overwritten with
+ ; single or multi-proc code based on the current CPU
+ b JIT_WriteBarrier
+
+ LEAF_END
+
+; ------------------------------------------------------------------
+; GC write barrier support.
+;
+; There's some complexity here for a couple of reasons:
+;
+; Firstly, there are a few variations of barrier types (input registers, checked vs unchecked, UP vs MP etc.).
+; So first we define a number of helper macros that perform fundamental pieces of a barrier and then we define
+; the final barrier functions by assembling these macros in various combinations.
+;
+; Secondly, for performance reasons we believe it's advantageous to be able to modify the barrier functions
+; over the lifetime of the CLR. Specifically ARM has real problems reading the values of external globals (we
+; need two memory indirections to do this) so we'd like to be able to directly set the current values of
+; various GC globals (e.g. g_lowest_address and g_card_table) into the barrier code itself and then reset them
+; every time they change (the GC already calls the VM to inform it of these changes). The handle this without
+; creating too much fragility such as hardcoding instruction offsets in the VM update code, we wrap write
+; barrier creation and GC globals access in a set of macros that create a table of descriptors describing each
+; offset that must be patched.
+;
+
+; Many of the following macros need a scratch register. Define a name for it here so it's easy to modify this
+; in the future.
+ GBLS __wbscratch
+__wbscratch SETS "r3"
+
+;
+; First define the meta-macros used to support dynamically patching write barriers.
+;
+
+ ; WRITEBARRIERAREA
+ ;
+ ; As we assemble each write barrier function we build a descriptor for the offsets within that function
+ ; that need to be patched at runtime. We write these descriptors into a read-only portion of memory. Use a
+ ; specially-named linker section for this to ensure all the descriptors are contiguous and form a table.
+ ; During the final link of the CLR this section should be merged into the regular read-only data section.
+ ;
+ ; This macro handles switching assembler output to the above section (similar to the TEXTAREA or
+ ; RODATAAREA macros defined by kxarm.h).
+ ;
+ MACRO
+ WRITEBARRIERAREA
+ AREA |.clrwb|,DATA,READONLY
+ MEND
+
+ ; BEGIN_WRITE_BARRIERS
+ ;
+ ; This macro must be invoked before any write barriers are defined. It sets up and exports a symbol,
+ ; g_rgWriteBarrierDescriptors, used by the VM to locate the start of the table describing the offsets in
+ ; each write barrier that need to be modified dynamically.
+ ;
+ MACRO
+ BEGIN_WRITE_BARRIERS
+
+ ; Define a global boolean to track whether we're currently in a BEGIN_WRITE_BARRIERS section. This is
+ ; used purely to catch incorrect attempts to define a write barrier outside the section.
+ GBLL __defining_write_barriers
+__defining_write_barriers SETL {true}
+
+ ; Switch to the descriptor table section.
+ WRITEBARRIERAREA
+
+ ; Define and export a symbol pointing to the start of the descriptor table.
+g_rgWriteBarrierDescriptors
+ EXPORT g_rgWriteBarrierDescriptors
+
+ ; Switch back to the code section.
+ TEXTAREA
+ MEND
+
+ ; END_WRITE_BARRIERS
+ ;
+ ; This macro must be invoked after all write barriers have been defined. It finalizes the creation of the
+ ; barrier descriptor table by writing a sentinel value at the end.
+ ;
+ MACRO
+ END_WRITE_BARRIERS
+
+ ASSERT __defining_write_barriers
+__defining_write_barriers SETL {false}
+
+ ; Switch to the descriptor table section.
+ WRITEBARRIERAREA
+
+ ; Write the sentinel value to the end of the descriptor table (a function entrypoint address of zero).
+ DCD 0
+
+ ; Switch back to the code section.
+ TEXTAREA
+ MEND
+
+ ; WRITE_BARRIER_ENTRY
+ ;
+ ; Declare the start of a write barrier function. Use similarly to NESTED_ENTRY. This is the only legal way
+ ; to declare a write barrier function.
+ ;
+ MACRO
+ WRITE_BARRIER_ENTRY $name
+
+ ; Ensure we're called inside a BEGIN_WRITE_BARRIERS section.
+ ASSERT __defining_write_barriers
+
+ ; Do the standard function declaration logic. Must use a NESTED_ENTRY since we require unwind info to
+ ; be registered (for the case where the barrier AVs and the runtime needs to recover).
+ LEAF_ENTRY $name
+
+ ; Record the function name as it's used as the basis for unique label name creation in some of the
+ ; macros below.
+ GBLS __write_barrier_name
+__write_barrier_name SETS "$name"
+
+ ; Declare globals to collect the values of the offsets of instructions that load GC global values.
+ GBLA __g_lowest_address_offset
+ GBLA __g_highest_address_offset
+ GBLA __g_ephemeral_low_offset
+ GBLA __g_ephemeral_high_offset
+ GBLA __g_card_table_offset
+
+ ; Initialize the above offsets to 0xffff. The default of zero is unsatisfactory because we could
+ ; legally have an offset of zero and we need some way to distinguish unset values (both for debugging
+ ; and because some write barriers don't use all the globals).
+__g_lowest_address_offset SETA 0xffff
+__g_highest_address_offset SETA 0xffff
+__g_ephemeral_low_offset SETA 0xffff
+__g_ephemeral_high_offset SETA 0xffff
+__g_card_table_offset SETA 0xffff
+
+ MEND
+
+ ; WRITE_BARRIER_END
+ ;
+ ; The partner to WRITE_BARRIER_ENTRY, used like NESTED_END.
+ ;
+ MACRO
+ WRITE_BARRIER_END
+
+ LTORG ; force the literal pool to be emitted here so that copy code picks it up
+ ; Use the standard macro to end the function definition.
+ LEAF_END_MARKED $__write_barrier_name
+
+; Define a local string to hold the name of a label identifying the end of the write barrier function.
+ LCLS __EndLabelName
+__EndLabelName SETS "$__write_barrier_name":CC:"_End"
+
+ ; Switch to the descriptor table section.
+ WRITEBARRIERAREA
+
+ ; Emit the descripter for this write barrier. The order of these datums must be kept in sync with the
+ ; definition of the WriteBarrierDescriptor structure in vm\arm\stubs.cpp.
+ DCD $__write_barrier_name
+ DCD $__EndLabelName
+ DCD __g_lowest_address_offset
+ DCD __g_highest_address_offset
+ DCD __g_ephemeral_low_offset
+ DCD __g_ephemeral_high_offset
+ DCD __g_card_table_offset
+
+ ; Switch back to the code section.
+ TEXTAREA
+
+ MEND
+
+ ; LOAD_GC_GLOBAL
+ ;
+ ; Used any time we want to load the value of one of the supported GC globals into a register. This records
+ ; the offset of the instructions used to do this (a movw/movt pair) so we can modify the actual value
+ ; loaded at runtime.
+ ;
+ ; Note that a given write barrier can only load a given global once (which will be compile-time asserted
+ ; below).
+ ;
+ MACRO
+ LOAD_GC_GLOBAL $regName, $globalName
+
+ ; Map the GC global name to the name of the variable tracking the offset for this function.
+ LCLS __offset_name
+__offset_name SETS "__$globalName._offset"
+
+ ; Ensure that we only attempt to load this global at most once in the current barrier function (we
+ ; have this limitation purely because we only record one offset for each GC global).
+ ASSERT $__offset_name == 0xffff
+
+ ; Define a unique name for a label we're about to define used in the calculation of the current
+ ; function offset.
+ LCLS __offset_label_name
+__offset_label_name SETS "$__write_barrier_name$__offset_name"
+
+ ; Define the label.
+$__offset_label_name
+
+ ; Write the current function offset into the tracking variable.
+$__offset_name SETA ($__offset_label_name - $__FuncStartLabel)
+
+ ; Emit the instructions which will be patched to provide the value of the GC global (we start with a
+ ; value of zero, so the write barriers have to be patched at least once before first use).
+ movw $regName, #0
+ movt $regName, #0
+ MEND
+
+;
+; Now define the macros used in the bodies of write barrier implementations.
+;
+
+ ; UPDATE_GC_SHADOW
+ ;
+ ; Update the GC shadow heap to aid debugging (no-op unless WRITE_BARRIER_CHECK is defined). Assumes the
+ ; location being written lies on the GC heap (either we've already performed the dynamic check or this is
+ ; statically asserted by the JIT by calling the unchecked version of the write barrier).
+ ;
+ ; Input:
+ ; $ptrReg : register containing the location (in the real heap) to be updated
+ ; $valReg : register containing the value (an objref) to be written to the location above
+ ;
+ ; Output:
+ ; $__wbscratch : trashed
+ ;
+ MACRO
+ UPDATE_GC_SHADOW $ptrReg, $valReg
+#ifdef WRITE_BARRIER_CHECK
+
+ ; Need one additional temporary register to hold the shadow pointer. Assume r7 is OK for now (and
+ ; assert it). If this becomes a problem in the future the register choice can be parameterized.
+ LCLS pShadow
+pShadow SETS "r7"
+ ASSERT "$ptrReg" != "$pShadow"
+ ASSERT "$valReg" != "$pShadow"
+
+ push {$pShadow}
+
+ ; Compute address of shadow heap location:
+ ; pShadow = g_GCShadow + ($ptrReg - g_lowest_address)
+ ldr $__wbscratch, =g_lowest_address
+ ldr $__wbscratch, [$__wbscratch]
+ sub $pShadow, $ptrReg, $__wbscratch
+ ldr $__wbscratch, =$g_GCShadow
+ ldr $__wbscratch, [$__wbscratch]
+ add $pShadow, $__wbscratch
+
+ ; if (pShadow >= g_GCShadow) goto end
+ ldr $__wbscratch, =$g_GCShadowEnd
+ ldr $__wbscratch, [$__wbscratch]
+ cmp $pShadow, $__wbscratch
+ bhs %FT0
+
+ ; *pShadow = $valReg
+ str $valReg, [$pShadow]
+
+ ; Ensure that the write to the shadow heap occurs before the read from the GC heap so that race
+ ; conditions are caught by INVALIDGCVALUE.
+ dmb
+
+ ; if (*$ptrReg == $valReg) goto end
+ ldr $__wbscratch, [$ptrReg]
+ cmp $__wbscratch, $valReg
+ beq %FT0
+
+ ; *pShadow = INVALIDGCVALUE (0xcccccccd)
+ movw $__wbscratch, #0xcccd
+ movt $__wbscratch, #0xcccc
+ str $__wbscratch, [$pShadow]
+
+0
+ pop {$pShadow}
+#endif // WRITE_BARRIER_CHECK
+ MEND
+
+ ; UPDATE_CARD_TABLE
+ ;
+ ; Update the card table as necessary (if the object reference being assigned in the barrier refers to an
+ ; object in the ephemeral generation). Otherwise this macro is a no-op. Assumes the location being written
+ ; lies on the GC heap (either we've already performed the dynamic check or this is statically asserted by
+ ; the JIT by calling the unchecked version of the write barrier).
+ ;
+ ; Additionally this macro can produce a uni-proc or multi-proc variant of the code. This governs whether
+ ; we bother to check if the card table has been updated before making our own update (on an MP system it
+ ; can be helpful to perform this check to avoid cache line thrashing, on an SP system the code path length
+ ; is more important).
+ ;
+ ; Input:
+ ; $ptrReg : register containing the location to be updated
+ ; $valReg : register containing the value (an objref) to be written to the location above
+ ; $mp : boolean indicating whether the code will run on an MP system
+ ; $tmpReg : additional register that can be trashed (can alias $ptrReg or $valReg if needed)
+ ;
+ ; Output:
+ ; $tmpReg : trashed (defaults to $ptrReg)
+ ; $__wbscratch : trashed
+ ;
+ MACRO
+ UPDATE_CARD_TABLE $ptrReg, $valReg, $mp, $postGrow, $tmpReg
+ ASSERT "$ptrReg" != "$__wbscratch"
+ ASSERT "$valReg" != "$__wbscratch"
+ ASSERT "$tmpReg" != "$__wbscratch"
+
+ ; In most cases the callers of this macro are fine with scratching $ptrReg, the exception being the
+ ; ref write barrier, which wants to scratch $valReg instead. Ideally we could set $ptrReg as the
+ ; default for the $tmpReg parameter, but limitations in armasm won't allow that. Similarly it doesn't
+ ; seem to like us trying to redefine $tmpReg in the body of the macro. Instead we define a new local
+ ; string variable and set that either with the value of $tmpReg or $ptrReg if $tmpReg wasn't
+ ; specified.
+ LCLS tempReg
+ IF "$tmpReg" == ""
+tempReg SETS "$ptrReg"
+ ELSE
+tempReg SETS "$tmpReg"
+ ENDIF
+
+ ; Check whether the value object lies in the ephemeral generations. If not we don't have to update the
+ ; card table.
+ LOAD_GC_GLOBAL $__wbscratch, g_ephemeral_low
+ cmp $valReg, $__wbscratch
+ blo %FT0
+ ; Only in post grow higher generation can be beyond ephemeral segment
+ IF $postGrow
+ LOAD_GC_GLOBAL $__wbscratch, g_ephemeral_high
+ cmp $valReg, $__wbscratch
+ bhs %FT0
+ ENDIF
+
+ ; Update the card table.
+ LOAD_GC_GLOBAL $__wbscratch, g_card_table
+ add $__wbscratch, $__wbscratch, $ptrReg, lsr #10
+
+ ; On MP systems make sure the card hasn't already been set first to avoid thrashing cache lines
+ ; between CPUs.
+ ; @ARMTODO: Check that the conditional store doesn't unconditionally gain exclusive access to the
+ ; cache line anyway. Compare perf with a branch over and verify that omitting the compare on uniproc
+ ; machines really is a perf win.
+ IF $mp
+ ldrb $tempReg, [$__wbscratch]
+ cmp $tempReg, #0xff
+ movne $tempReg, #0xff
+ strbne $tempReg, [$__wbscratch]
+ ELSE
+ mov $tempReg, #0xff
+ strb $tempReg, [$__wbscratch]
+ ENDIF
+0
+ MEND
+
+ ; CHECK_GC_HEAP_RANGE
+ ;
+ ; Verifies that the given value points into the GC heap range. If so the macro will fall through to the
+ ; following code. Otherwise (if the value points outside the GC heap) a branch to the supplied label will
+ ; be made.
+ ;
+ ; Input:
+ ; $ptrReg : register containing the location to be updated
+ ; $label : label branched to on a range check failure
+ ;
+ ; Output:
+ ; $__wbscratch : trashed
+ ;
+ MACRO
+ CHECK_GC_HEAP_RANGE $ptrReg, $label
+ ASSERT "$ptrReg" != "$__wbscratch"
+
+ LOAD_GC_GLOBAL $__wbscratch, g_lowest_address
+ cmp $ptrReg, $__wbscratch
+ blo $label
+ LOAD_GC_GLOBAL $__wbscratch, g_highest_address
+ cmp $ptrReg, $__wbscratch
+ bhs $label
+ MEND
+
+;
+; Finally define the write barrier functions themselves. Currently we don't provide variations that use
+; different input registers. If the JIT wants this at a later stage in order to improve code quality it would
+; be a relatively simply change to implement via an additional macro parameter to WRITE_BARRIER_ENTRY.
+;
+; The calling convention for the first batch of write barriers is:
+;
+; On entry:
+; r0 : the destination address (LHS of the assignment)
+; r1 : the object reference (RHS of the assignment)
+;
+; On exit:
+; r0 : trashed
+; $__wbscratch : trashed
+;
+
+ ; If you update any of the writebarrier be sure to update the sizes of patchable
+ ; writebarriers in
+ ; see ValidateWriteBarriers()
+
+ ; The write barriers are macro taking arguments like
+ ; $name: Name of the write barrier
+ ; $mp: {true} for multi-proc, {false} otherwise
+ ; $post: {true} for post-grow version, {false} otherwise
+
+ MACRO
+ JIT_WRITEBARRIER $name, $mp, $post
+ WRITE_BARRIER_ENTRY $name
+ IF $mp
+ dmb ; Perform a memory barrier
+ ENDIF
+ str r1, [r0] ; Write the reference
+ UPDATE_GC_SHADOW r0, r1 ; Update the shadow GC heap for debugging
+ UPDATE_CARD_TABLE r0, r1, $mp, $post ; Update the card table if necessary
+ bx lr
+ WRITE_BARRIER_END
+ MEND
+
+ MACRO
+ JIT_CHECKEDWRITEBARRIER_SP $name, $post
+ WRITE_BARRIER_ENTRY $name
+ str r1, [r0] ; Write the reference
+ CHECK_GC_HEAP_RANGE r0, %F1 ; Check whether the destination is in the GC heap
+ UPDATE_GC_SHADOW r0, r1 ; Update the shadow GC heap for debugging
+ UPDATE_CARD_TABLE r0, r1, {false}, $post; Update the card table if necessary
+1
+ bx lr
+ WRITE_BARRIER_END
+ MEND
+
+ MACRO
+ JIT_CHECKEDWRITEBARRIER_MP $name, $post
+ WRITE_BARRIER_ENTRY $name
+ CHECK_GC_HEAP_RANGE r0, %F1 ; Check whether the destination is in the GC heap
+ dmb ; Perform a memory barrier
+ str r1, [r0] ; Write the reference
+ UPDATE_GC_SHADOW r0, r1 ; Update the shadow GC heap for debugging
+ UPDATE_CARD_TABLE r0, r1, {true}, $post ; Update the card table if necessary
+ bx lr
+1
+ str r1, [r0] ; Write the reference
+ bx lr
+ WRITE_BARRIER_END
+ MEND
+
+; The ByRef write barriers have a slightly different interface:
+;
+; On entry:
+; r0 : the destination address (object reference written here)
+; r1 : the source address (points to object reference to write)
+;
+; On exit:
+; r0 : incremented by 4
+; r1 : incremented by 4
+; r2 : trashed
+; $__wbscratch : trashed
+;
+ MACRO
+ JIT_BYREFWRITEBARRIER $name, $mp, $post
+ WRITE_BARRIER_ENTRY $name
+ IF $mp
+ dmb ; Perform a memory barrier
+ ENDIF
+ ldr r2, [r1] ; Load target object ref from source pointer
+ str r2, [r0] ; Write the reference to the destination pointer
+ CHECK_GC_HEAP_RANGE r0, %F1 ; Check whether the destination is in the GC heap
+ UPDATE_GC_SHADOW r0, r2 ; Update the shadow GC heap for debugging
+ UPDATE_CARD_TABLE r0, r2, $mp, $post, r2 ; Update the card table if necessary (trash r2 rather than r0)
+1
+ add r0, #4 ; Increment the destination pointer by 4
+ add r1, #4 ; Increment the source pointer by 4
+ bx lr
+ WRITE_BARRIER_END
+ MEND
+
+ BEGIN_WRITE_BARRIERS
+
+ ; There 4 versions of each write barriers. A 2x2 combination of multi-proc/single-proc and pre/post grow version
+ JIT_WRITEBARRIER JIT_WriteBarrier_SP_Pre, {false}, {false}
+ JIT_WRITEBARRIER JIT_WriteBarrier_SP_Post, {false}, {true}
+ JIT_WRITEBARRIER JIT_WriteBarrier_MP_Pre, {true}, {false}
+ JIT_WRITEBARRIER JIT_WriteBarrier_MP_Post, {true}, {true}
+
+ JIT_CHECKEDWRITEBARRIER_SP JIT_CheckedWriteBarrier_SP_Pre, {false}
+ JIT_CHECKEDWRITEBARRIER_SP JIT_CheckedWriteBarrier_SP_Post, {true}
+ JIT_CHECKEDWRITEBARRIER_MP JIT_CheckedWriteBarrier_MP_Pre, {false}
+ JIT_CHECKEDWRITEBARRIER_MP JIT_CheckedWriteBarrier_MP_Post, {true}
+
+ JIT_BYREFWRITEBARRIER JIT_ByRefWriteBarrier_SP_Pre, {false}, {false}
+ JIT_BYREFWRITEBARRIER JIT_ByRefWriteBarrier_SP_Post, {false}, {true}
+ JIT_BYREFWRITEBARRIER JIT_ByRefWriteBarrier_MP_Pre, {true}, {false}
+ JIT_BYREFWRITEBARRIER JIT_ByRefWriteBarrier_MP_Post, {true}, {true}
+
+ END_WRITE_BARRIERS
+
+#ifdef FEATURE_READYTORUN
+
+ NESTED_ENTRY DelayLoad_MethodCall_FakeProlog
+
+ ; Match what the lazy thunk has pushed. The actual method arguments will be spilled later.
+ PROLOG_PUSH {r1-r3}
+
+ ; This is where execution really starts.
+DelayLoad_MethodCall
+ EXPORT DelayLoad_MethodCall
+
+ PROLOG_PUSH {r0}
+
+ PROLOG_WITH_TRANSITION_BLOCK 0x0, {true}, DoNotPushArgRegs
+
+ ; Load the helper arguments
+ ldr r5, [sp,#(__PWTB_TransitionBlock+10*4)] ; pModule
+ ldr r6, [sp,#(__PWTB_TransitionBlock+11*4)] ; sectionIndex
+ ldr r7, [sp,#(__PWTB_TransitionBlock+12*4)] ; indirection
+
+ ; Spill the actual method arguments
+ str r1, [sp,#(__PWTB_TransitionBlock+10*4)]
+ str r2, [sp,#(__PWTB_TransitionBlock+11*4)]
+ str r3, [sp,#(__PWTB_TransitionBlock+12*4)]
+
+ add r0, sp, #__PWTB_TransitionBlock ; pTransitionBlock
+
+ mov r1, r7 ; pIndirection
+ mov r2, r6 ; sectionIndex
+ mov r3, r5 ; pModule
+
+ bl ExternalMethodFixupWorker
+
+ ; mov the address we patched to in R12 so that we can tail call to it
+ mov r12, r0
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+
+ ; Share the patch label
+ EPILOG_BRANCH ExternalMethodFixupPatchLabel
+
+ NESTED_END
+
+
+ MACRO
+ DynamicHelper $frameFlags, $suffix
+
+ GBLS __FakePrologName
+__FakePrologName SETS "DelayLoad_Helper":CC:"$suffix":CC:"_FakeProlog"
+
+ NESTED_ENTRY $__FakePrologName
+
+ ; Match what the lazy thunk has pushed. The actual method arguments will be spilled later.
+ PROLOG_PUSH {r1-r3}
+
+ GBLS __RealName
+__RealName SETS "DelayLoad_Helper":CC:"$suffix"
+
+ ; This is where execution really starts.
+$__RealName
+ EXPORT $__RealName
+
+ PROLOG_PUSH {r0}
+
+ PROLOG_WITH_TRANSITION_BLOCK 0x4, {true}, DoNotPushArgRegs
+
+ ; Load the helper arguments
+ ldr r5, [sp,#(__PWTB_TransitionBlock+10*4)] ; pModule
+ ldr r6, [sp,#(__PWTB_TransitionBlock+11*4)] ; sectionIndex
+ ldr r7, [sp,#(__PWTB_TransitionBlock+12*4)] ; indirection
+
+ ; Spill the actual method arguments
+ str r1, [sp,#(__PWTB_TransitionBlock+10*4)]
+ str r2, [sp,#(__PWTB_TransitionBlock+11*4)]
+ str r3, [sp,#(__PWTB_TransitionBlock+12*4)]
+
+ add r0, sp, #__PWTB_TransitionBlock ; pTransitionBlock
+
+ mov r1, r7 ; pIndirection
+ mov r2, r6 ; sectionIndex
+ mov r3, r5 ; pModule
+
+ mov r4, $frameFlags
+ str r4, [sp,#0]
+
+ bl DynamicHelperWorker
+
+ cbnz r0, %FT0
+ ldr r0, [sp,#(__PWTB_TransitionBlock+9*4)] ; The result is stored in the argument area of the transition block
+
+ EPILOG_WITH_TRANSITION_BLOCK_RETURN
+
+0
+ mov r12, r0
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+ EPILOG_BRANCH_REG r12
+
+ NESTED_END
+
+ MEND
+
+ DynamicHelper DynamicHelperFrameFlags_Default
+ DynamicHelper DynamicHelperFrameFlags_ObjectArg, _Obj
+ DynamicHelper DynamicHelperFrameFlags_ObjectArg | DynamicHelperFrameFlags_ObjectArg2, _ObjObj
+
+#endif // FEATURE_READYTORUN
+
+; Must be at very end of file
+ END
diff --git a/src/vm/arm/asmmacros.h b/src/vm/arm/asmmacros.h
new file mode 100644
index 0000000000..a70d8f7906
--- /dev/null
+++ b/src/vm/arm/asmmacros.h
@@ -0,0 +1,162 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+;; ==++==
+;;
+
+;;
+;; ==--==
+
+;-----------------------------------------------------------------------------
+; Macro used to assign an alternate name to a symbol containing characters normally disallowed in a symbol
+; name (e.g. C++ decorated names).
+ MACRO
+ SETALIAS $name, $symbol
+ GBLS $name
+$name SETS "|$symbol|"
+ MEND
+
+
+;-----------------------------------------------------------------------------
+; Macro used to end a function with explicit _End label
+ MACRO
+ LEAF_END_MARKED $FuncName
+
+ LCLS __EndLabelName
+__EndLabelName SETS "$FuncName":CC:"_End"
+ EXPORT $__EndLabelName
+$__EndLabelName
+
+ LEAF_END $FuncName
+
+ MEND
+
+;-----------------------------------------------------------------------------
+; Macro use for enabling C++ to know where to patch code at runtime.
+ MACRO
+ PATCH_LABEL $FuncName
+$FuncName
+ EXPORT $FuncName
+
+ MEND
+
+;-----------------------------------------------------------------------------
+; Macro used to check (in debug builds only) whether the stack is 64-bit aligned (a requirement before calling
+; out into C++/OS code). Invoke this directly after your prolog (if the stack frame size is fixed) or directly
+; before a call (if you have a frame pointer and a dynamic stack). A breakpoint will be invoked if the stack
+; is misaligned.
+;
+ MACRO
+ CHECK_STACK_ALIGNMENT
+
+#ifdef _DEBUG
+ push {r0}
+ add r0, sp, #4
+ tst r0, #7
+ pop {r0}
+ beq %F0
+ EMIT_BREAKPOINT
+0
+#endif
+ MEND
+
+;-----------------------------------------------------------------------------
+; The following group of macros assist in implementing prologs and epilogs for methods that set up some
+; subclass of TransitionFrame. They ensure that the SP is 64-bit aligned at the conclusion of the prolog and
+; provide a helper macro to locate the start of the NegInfo (if there is one) for the frame.
+
+;-----------------------------------------------------------------------------
+; Define the prolog for a TransitionFrame-based method. This macro should be called first in the method and
+; comprises the entire prolog (i.e. don't modify SP after calling this). Takes the size of the frame's NegInfo
+; (which may be zero) and the frame itself. No initialization of the frame is done beyond callee saved
+; registers and (non-floating point) argument registers.
+;
+ MACRO
+ PROLOG_WITH_TRANSITION_BLOCK $extraLocals, $SaveFPArgs, $PushArgRegs
+
+ GBLA __PWTB_FloatArgumentRegisters
+ GBLA __PWTB_StackAlloc
+ GBLA __PWTB_TransitionBlock
+ GBLL __PWTB_SaveFPArgs
+
+ IF "$SaveFPArgs" != ""
+__PWTB_SaveFPArgs SETL $SaveFPArgs
+ ELSE
+__PWTB_SaveFPArgs SETL {true}
+ ENDIF
+
+ IF "$extraLocals" != ""
+__PWTB_FloatArgumentRegisters SETA $extraLocals
+ ELSE
+__PWTB_FloatArgumentRegisters SETA 0
+ ENDIF
+
+ IF __PWTB_SaveFPArgs
+
+ IF __PWTB_FloatArgumentRegisters:MOD:8 != 0
+__PWTB_FloatArgumentRegisters SETA __PWTB_FloatArgumentRegisters + 4
+ ENDIF
+__PWTB_TransitionBlock SETA __PWTB_FloatArgumentRegisters + (SIZEOF__FloatArgumentRegisters + 4) ; padding
+
+ ELSE
+
+ IF __PWTB_FloatArgumentRegisters:MOD:8 == 0
+__PWTB_FloatArgumentRegisters SETA __PWTB_FloatArgumentRegisters + 4; padding
+ ENDIF
+__PWTB_TransitionBlock SETA __PWTB_FloatArgumentRegisters
+
+ ENDIF
+
+__PWTB_StackAlloc SETA __PWTB_TransitionBlock
+
+ IF "$PushArgRegs" != "DoNotPushArgRegs"
+ ; Spill argument registers.
+ PROLOG_PUSH {r0-r3}
+ ENDIF
+
+ ; Spill callee saved registers and return address.
+ PROLOG_PUSH {r4-r11,lr}
+
+ ; Allocate space for the rest of the frame
+ PROLOG_STACK_ALLOC __PWTB_StackAlloc
+
+ IF __PWTB_SaveFPArgs
+ add r6, sp, #(__PWTB_FloatArgumentRegisters)
+ vstm r6, {s0-s15}
+ ENDIF
+
+ CHECK_STACK_ALIGNMENT
+ MEND
+
+;-----------------------------------------------------------------------------
+; Provides a matching epilog to PROLOG_WITH_TRANSITION_BLOCK and ends by preparing for tail-calling.
+; Since this is a tail call argument registers are restored.
+;
+ MACRO
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+
+ IF __PWTB_SaveFPArgs
+ add r6, sp, #(__PWTB_FloatArgumentRegisters)
+ vldm r6, {s0-s15}
+ ENDIF
+
+ EPILOG_STACK_FREE __PWTB_StackAlloc
+ EPILOG_POP {r4-r11,lr}
+ EPILOG_POP {r0-r3}
+ MEND
+
+;-----------------------------------------------------------------------------
+; Provides a matching epilog to PROLOG_WITH_TRANSITION_FRAME and ends by returning to the original caller.
+; Since this is not a tail call argument registers are not restored.
+;
+ MACRO
+ EPILOG_WITH_TRANSITION_BLOCK_RETURN
+
+ EPILOG_STACK_FREE __PWTB_StackAlloc
+ EPILOG_POP {r4-r11,lr}
+ EPILOG_STACK_FREE 16
+ EPILOG_RETURN
+ MEND
+
diff --git a/src/vm/arm/cgencpu.h b/src/vm/arm/cgencpu.h
new file mode 100644
index 0000000000..64619fd19f
--- /dev/null
+++ b/src/vm/arm/cgencpu.h
@@ -0,0 +1,1334 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+
+#ifndef _TARGET_ARM_
+#error Should only include "cGenCpu.h" for ARM builds
+#endif
+
+#ifndef __cgencpu_h__
+#define __cgencpu_h__
+
+#include "utilcode.h"
+#include "tls.h"
+
+// preferred alignment for data
+#define DATA_ALIGNMENT 4
+
+#define DISPATCH_STUB_FIRST_WORD 0xf8d0
+#define RESOLVE_STUB_FIRST_WORD 0xf8d0
+
+class MethodDesc;
+class FramedMethodFrame;
+class Module;
+struct DeclActionInfo;
+class ComCallMethodDesc;
+class BaseDomain;
+class ZapNode;
+struct ArgLocDesc;
+
+#define USE_REDIRECT_FOR_GCSTRESS
+
+// CPU-dependent functions
+Stub * GenerateInitPInvokeFrameHelper();
+
+EXTERN_C void checkStack(void);
+
+#ifdef CROSSGEN_COMPILE
+#define GetEEFuncEntryPoint(pfn) 0x1001
+#else
+#define GetEEFuncEntryPoint(pfn) GFN_TADDR(pfn)
+#endif
+
+//**********************************************************************
+
+#define COMMETHOD_PREPAD 12 // # extra bytes to allocate in addition to sizeof(ComCallMethodDesc)
+#ifdef FEATURE_COMINTEROP
+#define COMMETHOD_CALL_PRESTUB_SIZE 12
+#define COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET 8 // the offset of the call target address inside the prestub
+#endif // FEATURE_COMINTEROP
+
+#define STACK_ALIGN_SIZE 4
+
+#define JUMP_ALLOCATE_SIZE 8 // # bytes to allocate for a jump instruction
+#define BACK_TO_BACK_JUMP_ALLOCATE_SIZE 8 // # bytes to allocate for a back to back jump instruction
+
+//#define HAS_COMPACT_ENTRYPOINTS 1
+
+#define HAS_NDIRECT_IMPORT_PRECODE 1
+
+#define USE_INDIRECT_CODEHEADER
+
+#ifdef FEATURE_REMOTING
+#define HAS_REMOTING_PRECODE 1
+#endif
+
+EXTERN_C void getFPReturn(int fpSize, INT64 *pRetVal);
+EXTERN_C void setFPReturn(int fpSize, INT64 retVal);
+
+#define HAS_FIXUP_PRECODE 1
+#define HAS_FIXUP_PRECODE_CHUNKS 1
+
+// ThisPtrRetBufPrecode one is necessary for closed delegates over static methods with return buffer
+#define HAS_THISPTR_RETBUF_PRECODE 1
+
+#define CODE_SIZE_ALIGN 4
+#define CACHE_LINE_SIZE 32 // As per Intel Optimization Manual the cache line size is 32 bytes
+#define LOG2SLOT LOG2_PTRSIZE
+
+#define ENREGISTERED_RETURNTYPE_MAXSIZE 32 // bytes (maximum HFA size is 4 doubles)
+#define ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE 4 // bytes
+
+#define CALLDESCR_ARGREGS 1 // CallDescrWorker has ArgumentRegister parameter
+#define CALLDESCR_FPARGREGS 1 // CallDescrWorker has FloatArgumentRegisters parameter
+
+// Max size of optimized TLS helpers
+#define TLS_GETTER_MAX_SIZE 0x10
+
+// Given a return address retrieved during stackwalk,
+// this is the offset by which it should be decremented to arrive at the callsite.
+#define STACKWALK_CONTROLPC_ADJUST_OFFSET 2
+
+//=======================================================================
+// IMPORTANT: This value is used to figure out how much to allocate
+// for a fixed array of FieldMarshaler's. That means it must be at least
+// as large as the largest FieldMarshaler subclass. This requirement
+// is guarded by an assert.
+//=======================================================================
+#define MAXFIELDMARSHALERSIZE 24
+
+//**********************************************************************
+// Parameter size
+//**********************************************************************
+
+typedef INT32 StackElemType;
+#define STACK_ELEM_SIZE sizeof(StackElemType)
+
+// !! This expression assumes STACK_ELEM_SIZE is a power of 2.
+#define StackElemSize(parmSize) (((parmSize) + STACK_ELEM_SIZE - 1) & ~((ULONG)(STACK_ELEM_SIZE - 1)))
+
+//**********************************************************************
+// Frames
+//**********************************************************************
+
+//--------------------------------------------------------------------
+// This represents the callee saved (non-volatile) registers saved as
+// of a FramedMethodFrame.
+//--------------------------------------------------------------------
+typedef DPTR(struct CalleeSavedRegisters) PTR_CalleeSavedRegisters;
+struct CalleeSavedRegisters {
+ INT32 r4, r5, r6, r7, r8, r9, r10;
+ INT32 r11; // frame pointer
+ INT32 r14; // link register
+};
+
+//--------------------------------------------------------------------
+// This represents the arguments that are stored in volatile registers.
+// This should not overlap the CalleeSavedRegisters since those are already
+// saved separately and it would be wasteful to save the same register twice.
+// If we do use a non-volatile register as an argument, then the ArgIterator
+// will probably have to communicate this back to the PromoteCallerStack
+// routine to avoid a double promotion.
+//--------------------------------------------------------------------
+typedef DPTR(struct ArgumentRegisters) PTR_ArgumentRegisters;
+struct ArgumentRegisters {
+ INT32 r[4]; // r0, r1, r2, r3
+};
+#define NUM_ARGUMENT_REGISTERS 4
+
+//--------------------------------------------------------------------
+// This represents the floating point argument registers which are saved
+// as part of the NegInfo for a FramedMethodFrame. Note that these
+// might not be saved by all stubs: typically only those that call into
+// C++ helpers will need to preserve the values in these volatile
+// registers.
+//--------------------------------------------------------------------
+typedef DPTR(struct FloatArgumentRegisters) PTR_FloatArgumentRegisters;
+struct FloatArgumentRegisters {
+ union
+ {
+ float s[16]; // s0-s15
+ double d[8]; // d0-d7
+ };
+};
+
+// forward decl
+struct REGDISPLAY;
+typedef REGDISPLAY *PREGDISPLAY;
+
+// Sufficient context for Try/Catch restoration.
+struct EHContext {
+ INT32 r[16]; // note: includes r15(pc)
+ void Setup(PCODE resumePC, PREGDISPLAY regs);
+
+ inline TADDR GetSP() {
+ LIMITED_METHOD_CONTRACT;
+ return (TADDR)r[13];
+ }
+ inline void SetSP(LPVOID esp) {
+ LIMITED_METHOD_CONTRACT;
+ r[13] = (INT32)(size_t)esp;
+ }
+
+ inline LPVOID GetFP() {
+ LIMITED_METHOD_CONTRACT;
+ return (LPVOID)(UINT_PTR)r[11];
+ }
+
+ inline void SetArg(LPVOID arg) {
+ LIMITED_METHOD_CONTRACT;
+ r[0] = (INT32)(size_t)arg;
+ }
+};
+
+#define ARGUMENTREGISTERS_SIZE sizeof(ArgumentRegisters)
+
+//**********************************************************************
+// Exception handling
+//**********************************************************************
+
+inline PCODE GetIP(const T_CONTEXT * context) {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return PCODE(context->Pc);
+}
+
+inline void SetIP(T_CONTEXT *context, PCODE eip) {
+ LIMITED_METHOD_DAC_CONTRACT;
+ context->Pc = DWORD(eip);
+}
+
+inline TADDR GetSP(const T_CONTEXT * context) {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TADDR(context->Sp);
+}
+
+inline PCODE GetLR(const T_CONTEXT * context) {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return PCODE(context->Lr);
+}
+
+extern "C" LPVOID __stdcall GetCurrentSP();
+
+inline void SetSP(T_CONTEXT *context, TADDR esp) {
+ LIMITED_METHOD_DAC_CONTRACT;
+ context->Sp = DWORD(esp);
+}
+
+inline void SetFP(T_CONTEXT *context, TADDR ebp) {
+ LIMITED_METHOD_DAC_CONTRACT;
+ context->R11 = DWORD(ebp);
+}
+
+inline TADDR GetFP(const T_CONTEXT * context)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (TADDR)(context->R11);
+}
+
+inline void ClearITState(T_CONTEXT *context) {
+ LIMITED_METHOD_DAC_CONTRACT;
+ context->Cpsr = context->Cpsr & 0xf9ff03ff;
+}
+
+#ifdef FEATURE_COMINTEROP
+void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target);
+#endif // FEATURE_COMINTEROP
+
+//------------------------------------------------------------------------
+inline void emitJump(LPBYTE pBuffer, LPVOID target)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // The PC-relative load we emit below requires 4-byte alignment for the offset to be calculated correctly.
+ _ASSERTE(((UINT_PTR)pBuffer & 3) == 0);
+
+ DWORD * pCode = (DWORD *)pBuffer;
+
+ // ldr pc, [pc, #0]
+ pCode[0] = 0xf000f8df;
+ pCode[1] = (DWORD)target;
+}
+
+//------------------------------------------------------------------------
+// Given the same pBuffer that was used by emitJump this method
+// decodes the instructions and returns the jump target
+inline PCODE decodeJump(PCODE pCode)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ TADDR pInstr = PCODEToPINSTR(pCode);
+
+ return *dac_cast<PTR_PCODE>(pInstr + sizeof(DWORD));
+}
+
+//
+// On IA64 back to back jumps should be separated by a nop bundle to get
+// the best performance from the hardware's branch prediction logic.
+// For all other platforms back to back jumps don't require anything special
+// That is why we have these two wrapper functions that call emitJump and decodeJump
+//
+
+//------------------------------------------------------------------------
+inline BOOL isJump(PCODE pCode)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ TADDR pInstr = PCODEToPINSTR(pCode);
+
+ return *dac_cast<PTR_DWORD>(pInstr) == 0xf000f8df;
+}
+
+//------------------------------------------------------------------------
+inline BOOL isBackToBackJump(PCODE pBuffer)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return isJump(pBuffer);
+}
+
+//------------------------------------------------------------------------
+inline void emitBackToBackJump(LPBYTE pBuffer, LPVOID target)
+{
+ WRAPPER_NO_CONTRACT;
+ emitJump(pBuffer, target);
+}
+
+//------------------------------------------------------------------------
+inline PCODE decodeBackToBackJump(PCODE pBuffer)
+{
+ WRAPPER_NO_CONTRACT;
+ return decodeJump(pBuffer);
+}
+
+//----------------------------------------------------------------------
+#include "stublink.h"
+struct ArrayOpScript;
+
+#define THUMB_CODE 1
+
+inline BOOL IsThumbCode(PCODE pCode)
+{
+ return (pCode & THUMB_CODE) != 0;
+}
+
+struct ThumbReg
+{
+ int reg;
+ ThumbReg(int reg):reg(reg)
+ {
+ _ASSERTE(0 <= reg && reg < 16);
+ }
+
+ operator int ()
+ {
+ return reg;
+ }
+
+ int operator == (ThumbReg other)
+ {
+ return reg == other.reg;
+ }
+
+ int operator != (ThumbReg other)
+ {
+ return reg != other.reg;
+ }
+
+ WORD Mask() const
+ {
+ return 1 << reg;
+ }
+
+};
+
+struct ThumbCond
+{
+ int cond;
+ ThumbCond(int cond):cond(cond)
+ {
+ _ASSERTE(0 <= cond && cond < 16);
+ }
+};
+
+struct ThumbVFPSingleReg
+{
+ int reg;
+ ThumbVFPSingleReg(int reg):reg(reg)
+ {
+ _ASSERTE(0 <= reg && reg < 31);
+ }
+
+ operator int ()
+ {
+ return reg;
+ }
+
+ int operator == (ThumbVFPSingleReg other)
+ {
+ return reg == other.reg;
+ }
+
+ int operator != (ThumbVFPSingleReg other)
+ {
+ return reg != other.reg;
+ }
+
+ WORD Mask() const
+ {
+ return 1 << reg;
+ }
+
+};
+
+struct ThumbVFPDoubleReg
+{
+ int reg;
+ ThumbVFPDoubleReg(int reg):reg(reg)
+ {
+ _ASSERTE(0 <= reg && reg < 31);
+ }
+
+ operator int ()
+ {
+ return reg;
+ }
+
+ int operator == (ThumbVFPDoubleReg other)
+ {
+ return reg == other.reg;
+ }
+
+ int operator != (ThumbVFPDoubleReg other)
+ {
+ return reg != other.reg;
+ }
+
+ WORD Mask() const
+ {
+ return 1 << reg;
+ }
+};
+
+const ThumbReg thumbRegFp = ThumbReg(11);
+const ThumbReg thumbRegSp = ThumbReg(13);
+const ThumbReg thumbRegLr = ThumbReg(14);
+const ThumbReg thumbRegPc = ThumbReg(15);
+
+const ThumbCond thumbCondEq = ThumbCond(0);
+const ThumbCond thumbCondNe = ThumbCond(1);
+const ThumbCond thumbCondCs = ThumbCond(2);
+const ThumbCond thumbCondCc = ThumbCond(3);
+const ThumbCond thumbCondMi = ThumbCond(4);
+const ThumbCond thumbCondPl = ThumbCond(5);
+const ThumbCond thumbCondVs = ThumbCond(6);
+const ThumbCond thumbCondVc = ThumbCond(7);
+const ThumbCond thumbCondHi = ThumbCond(8);
+const ThumbCond thumbCondLs = ThumbCond(9);
+const ThumbCond thumbCondGe = ThumbCond(10);
+const ThumbCond thumbCondLt = ThumbCond(11);
+const ThumbCond thumbCondGt = ThumbCond(12);
+const ThumbCond thumbCondLe = ThumbCond(13);
+const ThumbCond thumbCondAl = ThumbCond(14);
+
+class StubLinkerCPU : public StubLinker
+{
+public:
+ static void Init();
+
+ void ThumbEmitProlog(UINT cCalleeSavedRegs, UINT cbStackFrame, BOOL fPushArgRegs)
+ {
+ _ASSERTE(!m_fProlog);
+
+ // Record the parameters of this prolog so that we can generate a matching epilog and unwind info.
+ DescribeProlog(cCalleeSavedRegs, cbStackFrame, fPushArgRegs);
+
+ // Trivial prologs (which is all that we support initially) consist of between one and three
+ // instructions.
+
+ // 1) Push argument registers. This is all or nothing (if we push, we push R0-R3).
+ if (fPushArgRegs)
+ {
+ // push {r0-r3}
+ ThumbEmitPush(ThumbReg(0).Mask() | ThumbReg(1).Mask() | ThumbReg(2).Mask() | ThumbReg(3).Mask());
+ }
+
+ // 2) Push callee saved registers. We always start pushing at R4, and only saved consecutive registers
+ // from there (max is R11). Additionally we always assume LR is saved for these types of prolog.
+ // push {r4-rX,lr}
+ WORD wRegisters = thumbRegLr.Mask();
+ for (unsigned int i = 4; i < (4 + cCalleeSavedRegs); i++)
+ wRegisters |= ThumbReg(i).Mask();
+ ThumbEmitPush(wRegisters);
+
+ // 3) Reserve space on the stack for the rest of the frame.
+ if (cbStackFrame)
+ {
+ // sub sp, #cbStackFrame
+ ThumbEmitSubSp(cbStackFrame);
+ }
+ }
+
+ void ThumbEmitEpilog()
+ {
+ // Generate an epilog matching a prolog generated by ThumbEmitProlog.
+ _ASSERTE(m_fProlog);
+
+ // If additional stack space for a frame was allocated remove it now.
+ if (m_cbStackFrame)
+ {
+ // add sp, #m_cbStackFrame
+ ThumbEmitAddSp(m_cbStackFrame);
+ }
+
+ // Pop callee saved registers (we always have at least LR). If no argument registers were saved then
+ // we can restore LR back into PC and we're done. Otherwise LR needs to be restored into LR.
+ // pop {r4-rX,lr|pc}
+ WORD wRegisters = m_fPushArgRegs ? thumbRegLr.Mask() : thumbRegPc.Mask();
+ for (unsigned int i = 4; i < (4 + m_cCalleeSavedRegs); i++)
+ wRegisters |= ThumbReg(i).Mask();
+ ThumbEmitPop(wRegisters);
+
+ if (!m_fPushArgRegs)
+ return;
+
+ // We pushed the argument registers. These aren't restored, but we need to reclaim the stack space.
+ // add sp, #16
+ ThumbEmitAddSp(16);
+
+ // Return. The return address has been restored into LR at this point.
+ // bx lr
+ ThumbEmitJumpRegister(thumbRegLr);
+ }
+
+ void ThumbEmitGetThread(TLSACCESSMODE mode, ThumbReg dest);
+
+ void ThumbEmitNop()
+ {
+ // nop
+ Emit16(0xbf00);
+ }
+
+ void ThumbEmitBreakpoint()
+ {
+ // Permanently undefined instruction #0xfe (see ARMv7-A A6.2.6). The debugger seems to accept this as
+ // a reasonable breakpoint substitute (it's what DebugBreak uses). Bkpt #0, on the other hand, always
+ // seems to flow directly to the kernel debugger (even if we ignore it there it doesn't seem to be
+ // picked up by the user mode debugger).
+ Emit16(0xdefe);
+ }
+
+ void ThumbEmitMovConstant(ThumbReg dest, int constant)
+ {
+ _ASSERT(dest != thumbRegPc);
+
+ //Emit 2 Byte instructions when dest reg < 8 & constant <256
+ if(dest <= 7 && constant < 256 && constant >= 0)
+ {
+ Emit16((WORD)(0x2000 | dest<<8 | (WORD)constant));
+ }
+ else // emit 4 byte instructions
+ {
+ WORD wConstantLow = (WORD)(constant & 0xffff);
+ WORD wConstantHigh = (WORD)(constant >> 16);
+
+ // movw regDest, #wConstantLow
+ Emit16((WORD)(0xf240 | (wConstantLow >> 12) | ((wConstantLow & 0x0800) ? 0x0400 : 0x0000)));
+ Emit16((WORD)((dest << 8) | (((wConstantLow >> 8) & 0x0007) << 12) | (wConstantLow & 0x00ff)));
+
+ if (wConstantHigh)
+ {
+ // movt regDest, #wConstantHighw
+ Emit16((WORD)(0xf2c0 | (wConstantHigh >> 12) | ((wConstantHigh & 0x0800) ? 0x0400 : 0x0000)));
+ Emit16((WORD)((dest << 8) | (((wConstantHigh >> 8) & 0x0007) << 12) | (wConstantHigh & 0x00ff)));
+ }
+ }
+ }
+
+ void ThumbEmitLoadRegIndirect(ThumbReg dest, ThumbReg source, int offset)
+ {
+ _ASSERTE((offset >= 0) && (offset <= 4095));
+
+ // ldr regDest, [regSource + #offset]
+ if ((dest < 8) && (source < 8) && ((offset & 0x3) == 0) && (offset < 125))
+ {
+ // Encoding T1
+ Emit16((WORD)(0x6800 | ((offset >> 2) << 6) | (source << 3) | dest));
+ }
+ else
+ {
+ // Encoding T3
+ Emit16((WORD)(0xf8d0 | source));
+ Emit16((WORD)((dest << 12) | offset));
+ }
+ }
+
+ void ThumbEmitLoadIndirectPostIncrement(ThumbReg dest, ThumbReg source, int offset)
+ {
+ _ASSERTE((offset >= 0) && (offset <= 255));
+
+ // ldr regDest, [regSource], #offset
+ Emit16((WORD)(0xf850 | source));
+ Emit16((WORD)(0x0b00 | (dest << 12) | offset));
+ }
+
+ void ThumbEmitStoreRegIndirect(ThumbReg source, ThumbReg dest, int offset)
+ {
+ _ASSERTE((offset >= -255) && (offset <= 4095));
+
+ // str regSource, [regDest + #offset]
+ if (offset < 0)
+ {
+ Emit16((WORD)(0xf840 | dest));
+ Emit16((WORD)(0x0C00 | (source << 12) | (UINT8)(-offset)));
+ }
+ else
+ if ((dest < 8) && (source < 8) && ((offset & 0x3) == 0) && (offset < 125))
+ {
+ // Encoding T1
+ Emit16((WORD)(0x6000 | ((offset >> 2) << 6) | (dest << 3) | source));
+ }
+ else
+ {
+ // Encoding T3
+ Emit16((WORD)(0xf8c0 | dest));
+ Emit16((WORD)((source << 12) | offset));
+ }
+ }
+
+ void ThumbEmitStoreIndirectPostIncrement(ThumbReg source, ThumbReg dest, int offset)
+ {
+ _ASSERTE((offset >= 0) && (offset <= 255));
+
+ // str regSource, [regDest], #offset
+ Emit16((WORD)(0xf840 | dest));
+ Emit16((WORD)(0x0b00 | (source << 12) | offset));
+ }
+
+ void ThumbEmitLoadOffsetScaledReg(ThumbReg dest, ThumbReg base, ThumbReg offset, int shift)
+ {
+ _ASSERTE(shift >=0 && shift <=3);
+
+ Emit16((WORD)(0xf850 | base));
+ Emit16((WORD)((dest << 12) | (shift << 4) | offset));
+ }
+
+ void ThumbEmitCallRegister(ThumbReg target)
+ {
+ // blx regTarget
+ Emit16((WORD)(0x4780 | (target << 3)));
+ }
+
+ void ThumbEmitJumpRegister(ThumbReg target)
+ {
+ // bx regTarget
+ Emit16((WORD)(0x4700 | (target << 3)));
+ }
+
+ void ThumbEmitMovRegReg(ThumbReg dest, ThumbReg source)
+ {
+ // mov regDest, regSource
+ Emit16((WORD)(0x4600 | ((dest > 7) ? 0x0080 : 0x0000) | (source << 3) | (dest & 0x0007)));
+ }
+
+ //Assuming SP is only subtracted in prolog
+ void ThumbEmitSubSp(int value)
+ {
+ _ASSERTE(value >= 0);
+ _ASSERTE((value & 0x3) == 0);
+
+ if(value < 512)
+ {
+ // encoding T1
+ // sub sp, sp, #(value >> 2)
+ Emit16((WORD)(0xb080 | (value >> 2)));
+ }
+ else if(value < 4096)
+ {
+ // Using 32-bit encoding
+ Emit16((WORD)(0xf2ad| ((value & 0x0800) >> 1)));
+ Emit16((WORD)(0x0d00| ((value & 0x0700) << 4) | (value & 0x00ff)));
+ }
+ else
+ {
+ // For values >= 4K (pageSize) must check for guard page
+
+#ifndef CROSSGEN_COMPILE
+ // mov r4, value
+ ThumbEmitMovConstant(ThumbReg(4), value);
+ // mov r12, checkStack
+ ThumbEmitMovConstant(ThumbReg(12), (int)checkStack);
+ // bl r12
+ ThumbEmitCallRegister(ThumbReg(12));
+#endif
+
+ // sub sp,sp,r4
+ Emit16((WORD)0xebad);
+ Emit16((WORD)0x0d04);
+ }
+ }
+
+ void ThumbEmitAddSp(int value)
+ {
+ _ASSERTE(value >= 0);
+ _ASSERTE((value & 0x3) == 0);
+
+ if(value < 512)
+ {
+ // encoding T2
+ // add sp, sp, #(value >> 2)
+ Emit16((WORD)(0xb000 | (value >> 2)));
+ }
+ else if(value < 4096)
+ {
+ // Using 32-bit encoding T4
+ Emit16((WORD)(0xf20d| ((value & 0x0800) >> 1)));
+ Emit16((WORD)(0x0d00| ((value & 0x0700) << 4) | (value & 0x00ff)));
+ }
+ else
+ {
+ //Must use temp register for values >=4096
+ ThumbEmitMovConstant(ThumbReg(12), value);
+ // add sp,sp,r12
+ Emit16((WORD)0x44e5);
+ }
+ }
+
+ void ThumbEmitAddReg(ThumbReg dest, ThumbReg source)
+ {
+
+ _ASSERTE(dest != source);
+ Emit16((WORD)(0x4400 | ((dest & 0x8)<<4) | (source<<3) | (dest & 0x7)));
+ }
+
+ void ThumbEmitAdd(ThumbReg dest, ThumbReg source, unsigned int value)
+ {
+
+ if(value<4096)
+ {
+ // addw dest, source, #value
+ unsigned int i = (value & 0x800) >> 11;
+ unsigned int imm3 = (value & 0x700) >> 8;
+ unsigned int imm8 = value & 0xff;
+ Emit16((WORD)(0xf200 | (i << 10) | source));
+ Emit16((WORD)((imm3 << 12) | (dest << 8) | imm8));
+ }
+ else
+ {
+ // if immediate is more than 4096 only ADD (register) will work
+ // move immediate to dest reg and call ADD(reg)
+ // this will not work if dest is same as source.
+ _ASSERTE(dest != source);
+ ThumbEmitMovConstant(dest, value);
+ ThumbEmitAddReg(dest, source);
+ }
+ }
+
+ void ThumbEmitSub(ThumbReg dest, ThumbReg source, unsigned int value)
+ {
+ _ASSERTE(value < 4096);
+
+ // subw dest, source, #value
+ unsigned int i = (value & 0x800) >> 11;
+ unsigned int imm3 = (value & 0x700) >> 8;
+ unsigned int imm8 = value & 0xff;
+ Emit16((WORD)(0xf2a0 | (i << 10) | source));
+ Emit16((WORD)((imm3 << 12) | (dest << 8) | imm8));
+ }
+
+ void ThumbEmitCmpReg(ThumbReg reg1, ThumbReg reg2)
+ {
+ if(reg1 < 8 && reg2 <8)
+ {
+ Emit16((WORD)(0x4280 | reg2 << 3 | reg1));
+ }
+ else
+ {
+ _ASSERTE(reg1 != ThumbReg(15) && reg2 != ThumbReg(15));
+ Emit16((WORD)(0x4500 | reg2 << 3 | reg1 & 0x7 | (reg1 & 0x8 ? 0x80 : 0x0)));
+ }
+ }
+
+ void ThumbEmitIncrement(ThumbReg dest, unsigned int value)
+ {
+ while (value)
+ {
+ if (value >= 4095)
+ {
+ // addw <dest>, <dest>, #4095
+ ThumbEmitAdd(dest, dest, 4095);
+ value -= 4095;
+ }
+ else if (value <= 255)
+ {
+ // add <dest>, #value
+ Emit16((WORD)(0x3000 | (dest << 8) | value));
+ break;
+ }
+ else
+ {
+ // addw <dest>, <dest>, #value
+ ThumbEmitAdd(dest, dest, value);
+ break;
+ }
+ }
+ }
+
+ void ThumbEmitPush(WORD registers)
+ {
+ _ASSERTE(registers != 0);
+ _ASSERTE((registers & 0xa000) == 0); // Pushing SP or PC undefined
+
+ // push {registers}
+ if (CountBits(registers) == 1)
+ {
+ // Encoding T3 (exactly one register, high or low)
+ WORD reg = 15;
+ while ((registers & (WORD)(1 << reg)) == 0)
+ {
+ reg--;
+ }
+ Emit16(0xf84d);
+ Emit16(0x0d04 | (reg << 12));
+ }
+ else if ((registers & 0xbf00) == 0)
+ {
+ // Encoding T1 (low registers plus maybe LR)
+ Emit16(0xb400 | (registers & thumbRegLr.Mask() ? 0x0100: 0x0000) | (registers & 0x00ff));
+ }
+ else
+ {
+ // Encoding T2 (two or more registers, high or low)
+ Emit16(0xe92d);
+ Emit16(registers);
+ }
+ }
+
+ void ThumbEmitLoadStoreMultiple(ThumbReg base, bool load, WORD registers)
+ {
+ _ASSERTE(CountBits(registers) > 1);
+ _ASSERTE((registers & 0xFF00) == 0); // This only supports the small encoding
+ _ASSERTE(base < 8); // This only supports the small encoding
+ _ASSERTE((base.Mask() & registers) == 0); // This only supports the small encoding
+
+ // (LDM|STM) base, {registers}
+ WORD flag = load ? 0x0800 : 0;
+ Emit16(0xc000 | flag | ((base & 7) << 8) | (registers & 0xFF));
+ }
+
+ void ThumbEmitPop(WORD registers)
+ {
+ _ASSERTE(registers != 0);
+ _ASSERTE((registers & 0xc000) != 0xc000); // Popping PC and LR together undefined
+
+ // pop {registers}
+ if (CountBits(registers) == 1)
+ {
+ // Encoding T3 (exactly one register, high or low)
+ WORD reg = 15;
+ while ((registers & (WORD)(1 << reg)) == 0)
+ {
+ reg--;
+ }
+ Emit16(0xf85d);
+ Emit16(0x0b04 | (reg << 12));
+ }
+ else if ((registers & 0x7f00) == 0)
+ {
+ // Encoding T1 (low registers plus maybe PC)
+ Emit16(0xbc00 | (registers & thumbRegPc.Mask() ? 0x0100: 0x0000) | (registers & 0x00ff));
+ }
+ else
+ {
+ // Encoding T2 (two or more registers, high or low)
+ Emit16(0xe8bd);
+ Emit16(registers);
+ }
+ }
+
+ void ThumbEmitLoadVFPSingleRegIndirect(ThumbVFPSingleReg dest, ThumbReg source, int offset)
+ {
+ _ASSERTE((offset >= -1020) && (offset <= 1020));
+ _ASSERTE(offset%4==0);
+
+ Emit16((WORD) (0xed10 | ((offset > 0 ? 0x1: 0x0) << 7) | ((dest & 0x1) << 6) | source));
+ Emit16((WORD) (0x0a00 | ((dest & 0x1e) << 11) | (abs(offset)>>2)));
+ }
+
+ void ThumbEmitLoadVFPDoubleRegIndirect(ThumbVFPDoubleReg dest, ThumbReg source, int offset)
+ {
+ _ASSERTE((offset >= -1020) && (offset <= 1020));
+ _ASSERTE(offset%4==0);
+
+ Emit16((WORD) (0xed10 | ((offset > 0 ? 0x1: 0x0) << 7) | ((dest & 0x10) << 6) | source));
+ Emit16((WORD) (0x0b00 | ((dest & 0xf) << 12) | (abs(offset)>>2)));
+ }
+
+#ifdef FEATURE_INTERPRETER
+ void ThumbEmitStoreMultipleVFPDoubleReg(ThumbVFPDoubleReg source, ThumbReg dest, unsigned numRegs)
+ {
+ _ASSERTE((numRegs + source) <= 16);
+
+ // The third nibble is 0x8; the 0x4 bit (D) is zero because the source reg number must be less
+ // than 16 for double registers.
+ Emit16((WORD) (0xec80 | 0x80 | dest));
+ Emit16((WORD) (((source & 0xf) << 12) | 0xb00 | numRegs));
+ }
+
+ void ThumbEmitLoadMultipleVFPDoubleReg(ThumbVFPDoubleReg dest, ThumbReg source, unsigned numRegs)
+ {
+ _ASSERTE((numRegs + dest) <= 16);
+
+ // The third nibble is 0x8; the 0x4 bit (D) is zero because the source reg number must be less
+ // than 16 for double registers.
+ Emit16((WORD) (0xec90 | 0x80 | source));
+ Emit16((WORD) (((dest & 0xf) << 12) | 0xb00 | numRegs));
+ }
+#endif // FEATURE_INTERPRETER
+
+ void EmitStubLinkFrame(TADDR pFrameVptr, int offsetOfFrame, int offsetOfTransitionBlock);
+ void EmitStubUnlinkFrame();
+
+ void ThumbEmitCondFlagJump(CodeLabel * target,UINT cond);
+
+ void ThumbEmitCondRegJump(CodeLabel *target, BOOL nonzero, ThumbReg reg);
+
+ void ThumbEmitNearJump(CodeLabel *target);
+
+ // Scratches r12.
+ void ThumbEmitCallManagedMethod(MethodDesc *pMD, bool fTailcall);
+
+ void EmitUnboxMethodStub(MethodDesc* pRealMD);
+ static UINT_PTR HashMulticastInvoke(MetaSig* pSig);
+
+ void EmitMulticastInvoke(UINT_PTR hash);
+ void EmitSecureDelegateInvoke(UINT_PTR hash);
+ void EmitShuffleThunk(struct ShuffleEntry *pShuffleEntryArray);
+#if defined(FEATURE_SHARE_GENERIC_CODE)
+ void EmitInstantiatingMethodStub(MethodDesc* pSharedMD, void* extra);
+#endif // FEATURE_SHARE_GENERIC_CODE
+
+ static Stub * CreateTailCallCopyArgsThunk(CORINFO_SIG_INFO * pSig,
+ CorInfoHelperTailCallSpecialHandling flags);
+
+private:
+ void ThumbCopyOneTailCallArg(UINT * pnSrcAlign, const ArgLocDesc * pArgLoc, UINT * pcbStackSpace);
+ void ThumbEmitCallWithGenericInstantiationParameter(MethodDesc *pMD, void *pHiddenArg);
+};
+
+extern "C" void SinglecastDelegateInvokeStub();
+
+// SEH info forward declarations
+
+inline BOOL IsUnmanagedValueTypeReturnedByRef(UINT sizeofvaluetype)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // structure that dont fit in the machine-word size are returned
+ // by reference.
+ return (sizeofvaluetype > 4);
+}
+
+DECLSPEC_ALIGN(4) struct UMEntryThunkCode
+{
+ WORD m_code[4];
+
+ TADDR m_pTargetCode;
+ TADDR m_pvSecretParam;
+
+ void Encode(BYTE* pTargetCode, void* pvSecretParam);
+
+ LPCBYTE GetEntryPoint() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (LPCBYTE)((TADDR)this | THUMB_CODE);
+ }
+
+ static int GetEntryPointOffset()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return 0;
+ }
+};
+
+struct HijackArgs
+{
+ union
+ {
+ DWORD R0;
+ size_t ReturnValue; // this may not be the return value when return is >32bits or return value is in VFP reg
+ // but it works for us as this is only used by functions OnHijackObjectWorker()
+ // and OnHijackInteriorPointerWorker() (where return is an address)
+ };
+
+ //
+ // Non-volatile Integer registers
+ //
+ DWORD R4;
+ DWORD R5;
+ DWORD R6;
+ DWORD R7;
+ DWORD R8;
+ DWORD R9;
+ DWORD R10;
+ DWORD R11;
+
+ union
+ {
+ DWORD Lr;
+ size_t ReturnAddress;
+ };
+};
+
+// ClrFlushInstructionCache is used when we want to call FlushInstructionCache
+// for a specific architecture in the common code, but not for other architectures.
+// On IA64 ClrFlushInstructionCache calls the Kernel FlushInstructionCache function
+// to flush the instruction cache.
+// We call ClrFlushInstructionCache whenever we create or modify code in the heap.
+// Currently ClrFlushInstructionCache has no effect on X86
+//
+
+inline BOOL ClrFlushInstructionCache(LPCVOID pCodeAddr, size_t sizeOfCode)
+{
+#ifdef CROSSGEN_COMPILE
+ // The code won't be executed when we are cross-compiling so flush instruction cache is unnecessary
+ return TRUE;
+#else
+ return FlushInstructionCache(GetCurrentProcess(), pCodeAddr, sizeOfCode);
+#endif
+}
+
+//
+// JIT HELPER ALIASING FOR PORTABILITY.
+//
+// Create alias for optimized implementations of helpers provided on this platform
+//
+// optimized static helpers
+#define JIT_GetSharedGCStaticBase JIT_GetSharedGCStaticBase_InlineGetAppDomain
+#define JIT_GetSharedNonGCStaticBase JIT_GetSharedNonGCStaticBase_InlineGetAppDomain
+#define JIT_GetSharedGCStaticBaseNoCtor JIT_GetSharedGCStaticBaseNoCtor_InlineGetAppDomain
+#define JIT_GetSharedNonGCStaticBaseNoCtor JIT_GetSharedNonGCStaticBaseNoCtor_InlineGetAppDomain
+
+#define JIT_Stelem_Ref JIT_Stelem_Ref
+
+//------------------------------------------------------------------------
+//
+// Precode definitions
+//
+//------------------------------------------------------------------------
+//
+// Note: If you introduce new precode implementation below, then please
+// update PrecodeStubManager::CheckIsStub_Internal to account for it.
+
+EXTERN_C VOID STDCALL PrecodeFixupThunk();
+
+#define PRECODE_ALIGNMENT CODE_SIZE_ALIGN
+#define SIZEOF_PRECODE_BASE CODE_SIZE_ALIGN
+#define OFFSETOF_PRECODE_TYPE 0
+
+// Invalid precode type
+struct InvalidPrecode {
+ static const int Type = 0;
+};
+
+struct StubPrecode {
+
+ static const int Type = 0xdf;
+
+ // ldr r12, [pc, #8] ; =m_pMethodDesc
+ // ldr pc, [pc, #0] ; =m_pTarget
+ // dcd pTarget
+ // dcd pMethodDesc
+ WORD m_rgCode[4];
+ TADDR m_pTarget;
+ TADDR m_pMethodDesc;
+
+ void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
+
+ TADDR GetMethodDesc()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pMethodDesc;
+ }
+
+ PCODE GetTarget()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pTarget;
+ }
+
+ BOOL SetTargetInterlocked(TADDR target, TADDR expected)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ EnsureWritableExecutablePages(&m_pTarget);
+ return (TADDR)InterlockedCompareExchange(
+ (LONG*)&m_pTarget, (LONG)target, (LONG)expected) == expected;
+ }
+
+#ifdef FEATURE_PREJIT
+ void Fixup(DataImage *image);
+#endif
+};
+typedef DPTR(StubPrecode) PTR_StubPrecode;
+
+
+struct NDirectImportPrecode {
+
+ static const int Type = 0xe0;
+
+ // ldr r12, [pc, #4] ; =m_pMethodDesc
+ // ldr pc, [pc, #4] ; =m_pTarget
+ // dcd pMethodDesc
+ // dcd pTarget
+ WORD m_rgCode[4];
+ TADDR m_pMethodDesc; // Notice that the fields are reversed compared to StubPrecode. Precode::GetType
+ // takes advantage of this to detect NDirectImportPrecode.
+ TADDR m_pTarget;
+
+ void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
+
+ TADDR GetMethodDesc()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pMethodDesc;
+ }
+
+ PCODE GetTarget()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pTarget;
+ }
+
+ LPVOID GetEntrypoint()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (LPVOID)(dac_cast<TADDR>(this) + THUMB_CODE);
+ }
+
+#ifdef FEATURE_PREJIT
+ void Fixup(DataImage *image);
+#endif
+};
+typedef DPTR(NDirectImportPrecode) PTR_NDirectImportPrecode;
+
+
+struct FixupPrecode {
+
+ static const int Type = 0xfc;
+
+ // mov r12, pc
+ // ldr pc, [pc, #4] ; =m_pTarget
+ // dcb m_MethodDescChunkIndex
+ // dcb m_PrecodeChunkIndex
+ // dcd m_pTarget
+ WORD m_rgCode[3];
+ BYTE m_MethodDescChunkIndex;
+ BYTE m_PrecodeChunkIndex;
+ TADDR m_pTarget;
+
+ void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int iMethodDescChunkIndex = 0, int iPrecodeChunkIndex = 0);
+
+ TADDR GetBase()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return dac_cast<TADDR>(this) + (m_PrecodeChunkIndex + 1) * sizeof(FixupPrecode);
+ }
+
+ TADDR GetMethodDesc();
+
+ PCODE GetTarget()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pTarget;
+ }
+
+ BOOL SetTargetInterlocked(TADDR target, TADDR expected)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ EnsureWritableExecutablePages(&m_pTarget);
+ return (TADDR)InterlockedCompareExchange(
+ (LONG*)&m_pTarget, (LONG)target, (LONG)expected) == expected;
+ }
+
+ static BOOL IsFixupPrecodeByASM(PCODE addr)
+ {
+ PTR_WORD pInstr = dac_cast<PTR_WORD>(PCODEToPINSTR(addr));
+
+ return
+ (pInstr[0] == 0x46fc) &&
+ (pInstr[1] == 0xf8df) &&
+ (pInstr[2] == 0xf004);
+ }
+
+#ifdef FEATURE_PREJIT
+ // Partial initialization. Used to save regrouped chunks.
+ void InitForSave(int iPrecodeChunkIndex);
+
+ void Fixup(DataImage *image, MethodDesc * pMD);
+#endif
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+};
+typedef DPTR(FixupPrecode) PTR_FixupPrecode;
+
+
+// Precode to stuffle this and retbuf for closed delegates over static methods with return buffer
+struct ThisPtrRetBufPrecode {
+
+ static const int Type = 0x84;
+
+ // mov r12, r0
+ // mov r0, r1
+ // mov r1, r12
+ // ldr pc, [pc, #0] ; =m_pTarget
+ // dcd pTarget
+ // dcd pMethodDesc
+ WORD m_rgCode[6];
+ TADDR m_pTarget;
+ TADDR m_pMethodDesc;
+
+ void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
+
+ TADDR GetMethodDesc()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return m_pMethodDesc;
+ }
+
+ PCODE GetTarget()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pTarget;
+ }
+
+ BOOL SetTargetInterlocked(TADDR target, TADDR expected)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ EnsureWritableExecutablePages(&m_pTarget);
+ return FastInterlockCompareExchange((LONG*)&m_pTarget, (LONG)target, (LONG)expected) == (LONG)expected;
+ }
+};
+typedef DPTR(ThisPtrRetBufPrecode) PTR_ThisPtrRetBufPrecode;
+
+
+#ifdef HAS_REMOTING_PRECODE
+
+// Precode with embedded remoting interceptor
+struct RemotingPrecode {
+
+ static const int Type = 0x02;
+
+ // push {r1,lr}
+ // ldr r1, [pc, #16] ; =m_pPrecodeRemotingThunk
+ // blx r1
+ // pop {r1,lr}
+ // ldr pc, [pc, #12] ; =m_pLocalTarget
+ // nop ; padding for alignment
+ // dcd m_pMethodDesc
+ // dcd m_pPrecodeRemotingThunk
+ // dcd m_pLocalTarget
+ WORD m_rgCode[8];
+ TADDR m_pMethodDesc;
+ TADDR m_pPrecodeRemotingThunk;
+ TADDR m_pLocalTarget;
+
+ void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator = NULL);
+
+ TADDR GetMethodDesc()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pMethodDesc;
+ }
+
+ PCODE GetTarget()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pLocalTarget;
+ }
+
+ BOOL SetTargetInterlocked(TADDR target, TADDR expected)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ EnsureWritableExecutablePages(&m_pLocalTarget);
+ return FastInterlockCompareExchange((LONG*)&m_pLocalTarget, (LONG)target, (LONG)expected) == (LONG)expected;
+ }
+
+#ifdef FEATURE_PREJIT
+ void Fixup(DataImage *image, ZapNode *pCodeNode);
+#endif
+};
+typedef DPTR(RemotingPrecode) PTR_RemotingPrecode;
+
+EXTERN_C void PrecodeRemotingThunk();
+
+#endif // HAS_REMOTING_PRECODE
+
+//**********************************************************************
+// Miscellaneous
+//**********************************************************************
+
+// Given the first halfword value of an ARM (Thumb) instruction (which is either an entire
+// 16-bit instruction, or the high-order halfword of a 32-bit instruction), determine how many bytes
+// the instruction is (2 or 4) and return that.
+inline size_t GetARMInstructionLength(WORD instr)
+{
+ // From the ARM Architecture Reference Manual, A6.1 "Thumb instruction set encoding":
+ // If bits [15:11] of the halfword being decoded take any of the following values, the halfword is the first
+ // halfword of a 32-bit instruction:
+ // 0b11101
+ // 0b11110
+ // 0b11111
+ // Otherwise, the halfword is a 16-bit instruction.
+ if ((instr & 0xf800) > 0xe000)
+ {
+ return 4;
+ }
+ else
+ {
+ return 2;
+ }
+}
+
+// Given a pointer to an ARM (Thumb) instruction address, determine how many bytes
+// the instruction is (2 or 4) and return that.
+inline size_t GetARMInstructionLength(PBYTE pInstr)
+{
+ return GetARMInstructionLength(*(WORD*)pInstr);
+}
+
+EXTERN_C void FCallMemcpy(byte* dest, byte* src, int len);
+
+#endif // __cgencpu_h__
diff --git a/src/vm/arm/ehhelpers.asm b/src/vm/arm/ehhelpers.asm
new file mode 100644
index 0000000000..fc7c55e801
--- /dev/null
+++ b/src/vm/arm/ehhelpers.asm
@@ -0,0 +1,183 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;
+
+;
+; ==--==
+#include "ksarm.h"
+
+#include "asmconstants.h"
+
+#include "asmmacros.h"
+
+ IMPORT FixContextHandler
+ IMPORT LinkFrameAndThrow
+ IMPORT HijackHandler
+ IMPORT ThrowControlForThread
+
+;
+; WARNING!! These functions immediately ruin thread unwindability. This is
+; WARNING!! OK as long as there is a mechanism for saving the thread context
+; WARNING!! prior to running these functions as well as a mechanism for
+; WARNING!! restoring the context prior to any stackwalk. This means that
+; WARNING!! we need to ensure that no GC can occur while the stack is
+; WARNING!! unwalkable. This further means that we cannot allow any exception
+; WARNING!! to occur when the stack is unwalkable
+;
+
+ TEXTAREA
+
+ ; GSCookie, scratch area
+ GBLA OFFSET_OF_FRAME
+
+ ; GSCookie + alignment padding
+OFFSET_OF_FRAME SETA 4 + SIZEOF__GSCookie
+
+ MACRO
+ GenerateRedirectedStubWithFrame $STUB, $TARGET
+
+ ;
+ ; This is the primary function to which execution will be redirected to.
+ ;
+ NESTED_ENTRY $STUB
+
+ ;
+ ; IN: lr: original IP before redirect
+ ;
+
+ PROLOG_PUSH {r4,r7,lr}
+ PROLOG_STACK_ALLOC OFFSET_OF_FRAME + SIZEOF__FaultingExceptionFrame
+
+ ; At this point, the stack maybe misaligned if the thread abort was asynchronously
+ ; triggered in the prolog or epilog of the managed method. For such a case, we must
+ ; align the stack before calling into the VM.
+ ;
+ ; Runtime check for 8-byte alignment.
+ PROLOG_STACK_SAVE r7
+ and r0, r7, #4
+ sub sp, sp, r0
+
+ ; Save pointer to FEF for GetFrameFromRedirectedStubStackFrame
+ add r4, sp, #OFFSET_OF_FRAME
+
+ ; Prepare to initialize to NULL
+ mov r1,#0
+ str r1, [r4] ; Initialize vtbl (it is not strictly necessary)
+ str r1, [r4, #FaultingExceptionFrame__m_fFilterExecuted] ; Initialize BOOL for personality routine
+
+ mov r0, r4 ; move the ptr to FEF in R0
+
+ ; stack must be 8 byte aligned
+ CHECK_STACK_ALIGNMENT
+
+ bl $TARGET
+
+ ; Target should not return.
+ EMIT_BREAKPOINT
+
+ NESTED_END $STUB
+
+ MEND
+
+; ------------------------------------------------------------------
+;
+; Helpers for async (NullRef, AccessViolation) exceptions
+;
+
+ NESTED_ENTRY NakedThrowHelper2,,FixContextHandler
+ PROLOG_PUSH {r0, lr}
+
+ ; On entry:
+ ;
+ ; R0 = Address of FaultingExceptionFrame
+ bl LinkFrameAndThrow
+
+ ; Target should not return.
+ EMIT_BREAKPOINT
+
+ NESTED_END NakedThrowHelper2
+
+
+ GenerateRedirectedStubWithFrame NakedThrowHelper, NakedThrowHelper2
+
+; ------------------------------------------------------------------
+;
+; Helpers for ThreadAbort exceptions
+;
+
+ NESTED_ENTRY RedirectForThreadAbort2,,HijackHandler
+ PROLOG_PUSH {r0, lr}
+
+ ; stack must be 8 byte aligned
+ CHECK_STACK_ALIGNMENT
+
+ ; On entry:
+ ;
+ ; R0 = Address of FaultingExceptionFrame.
+ ;
+ ; Invoke the helper to setup the FaultingExceptionFrame and raise the exception
+ bl ThrowControlForThread
+
+ ; ThrowControlForThread doesn't return.
+ EMIT_BREAKPOINT
+
+ NESTED_END RedirectForThreadAbort2
+
+ GenerateRedirectedStubWithFrame RedirectForThreadAbort, RedirectForThreadAbort2
+
+; ------------------------------------------------------------------
+
+ ; This helper enables us to call into a funclet after applying the non-volatiles
+ NESTED_ENTRY CallEHFunclet
+
+ PROLOG_PUSH {r4-r11, lr}
+ PROLOG_STACK_ALLOC 4
+
+ ; On entry:
+ ;
+ ; R0 = throwable
+ ; R1 = PC to invoke
+ ; R2 = address of R4 register in CONTEXT record; used to restore the non-volatile registers of CrawlFrame
+ ; R3 = address of the location where the SP of funclet's caller (i.e. this helper) should be saved.
+ ;
+ ; Save the SP of this function
+ str sp, [r3]
+ ; apply the non-volatiles corresponding to the CrawlFrame
+ ldm r2, {r4-r11}
+ ; Invoke the funclet
+ blx r1
+
+ EPILOG_STACK_FREE 4
+ EPILOG_POP {r4-r11, pc}
+
+ NESTED_END CallEHFunclet
+
+ ; This helper enables us to call into a filter funclet by passing it the CallerSP to lookup the
+ ; frame pointer for accessing the locals in the parent method.
+ NESTED_ENTRY CallEHFilterFunclet
+
+ PROLOG_PUSH {lr}
+ PROLOG_STACK_ALLOC 4
+
+ ; On entry:
+ ;
+ ; R0 = throwable
+ ; R1 = SP of the caller of the method/funclet containing the filter
+ ; R2 = PC to invoke
+ ; R3 = address of the location where the SP of funclet's caller (i.e. this helper) should be saved.
+ ;
+ ; Save the SP of this function
+ str sp, [r3]
+ ; Invoke the filter funclet
+ blx r2
+
+ EPILOG_STACK_FREE 4
+ EPILOG_POP {pc}
+
+ NESTED_END CallEHFilterFunclet
+ END
+
diff --git a/src/vm/arm/exceparm.cpp b/src/vm/arm/exceparm.cpp
new file mode 100644
index 0000000000..4904229f5f
--- /dev/null
+++ b/src/vm/arm/exceparm.cpp
@@ -0,0 +1,113 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: ExcepArm.cpp
+
+#include "common.h"
+#include "asmconstants.h"
+#include "virtualcallstub.h"
+
+PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(T_DISPATCHER_CONTEXT * pDispatcherContext)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ UINT_PTR stackSlot = pDispatcherContext->EstablisherFrame + REDIRECTSTUB_SP_OFFSET_CONTEXT;
+ PTR_PTR_CONTEXT ppContext = dac_cast<PTR_PTR_CONTEXT>((TADDR)stackSlot);
+ return *ppContext;
+}
+
+PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(T_CONTEXT * pContext)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ UINT_PTR stackSlot = pContext->Sp + REDIRECTSTUB_SP_OFFSET_CONTEXT;
+ PTR_PTR_CONTEXT ppContext = dac_cast<PTR_PTR_CONTEXT>((TADDR)stackSlot);
+ return *ppContext;
+}
+
+#if !defined(DACCESS_COMPILE)
+
+// The next two functions help retrieve data kept relative to FaultingExceptionFrame that is setup
+// for handling async exceptions (e.g. AV, NullRef, ThreadAbort, etc).
+//
+// FEF (and related data) is available relative to R4 - the thing to be kept in mind is that the
+// DispatcherContext->ContextRecord:
+//
+// 1) represents the caller context in the first pass.
+// 2) represents the current context in the second pass.
+//
+// Since R4 is a non-volatile register, this works for us since we setup the value of R4
+// in the redirection helpers (e.g. NakedThrowHelper or RedirectForThreadAbort) but do not
+// change it in their respective callee functions (e.g. NakedThrowHelper2 or RedirectForThreadAbort2)
+// that have the personality routines associated with them (which perform the collided unwind and also
+// invoke the two functions below).
+//
+// Thus, when our personality routine gets called in either passes, DC->ContextRecord->R4 will
+// have the same value.
+
+// Returns the pointer to the FEF
+FaultingExceptionFrame *GetFrameFromRedirectedStubStackFrame (T_DISPATCHER_CONTEXT *pDispatcherContext)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (FaultingExceptionFrame*)((TADDR)pDispatcherContext->ContextRecord->R4);
+}
+
+// Returns TRUE if caller should resume execution.
+BOOL
+AdjustContextForVirtualStub(
+ EXCEPTION_RECORD *pExceptionRecord,
+ CONTEXT *pContext)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ Thread * pThread = GetThread();
+
+ // We may not have a managed thread object. Example is an AV on the helper thread.
+ // (perhaps during StubManager::IsStub)
+ if (pThread == NULL)
+ {
+ return FALSE;
+ }
+
+ PCODE f_IP = GetIP(pContext);
+ TADDR pInstr = PCODEToPINSTR(f_IP);
+
+ VirtualCallStubManager::StubKind sk;
+ VirtualCallStubManager::FindStubManager(f_IP, &sk);
+
+ if (sk == VirtualCallStubManager::SK_DISPATCH)
+ {
+ if (*PTR_WORD(pInstr) != DISPATCH_STUB_FIRST_WORD)
+ {
+ _ASSERTE(!"AV in DispatchStub at unknown instruction");
+ return FALSE;
+ }
+ }
+ else
+ if (sk == VirtualCallStubManager::SK_RESOLVE)
+ {
+ if (*PTR_WORD(pInstr) != RESOLVE_STUB_FIRST_WORD)
+ {
+ _ASSERTE(!"AV in ResolveStub at unknown instruction");
+ return FALSE;
+ }
+ }
+ else
+ {
+ return FALSE;
+ }
+
+ PCODE callsite = GetAdjustedCallAddress(GetLR(pContext));
+
+ // Lr must already have been saved before calling so it should not be necessary to restore Lr
+
+ pExceptionRecord->ExceptionAddress = (PVOID)callsite;
+ SetIP(pContext, callsite);
+
+ return TRUE;
+}
+#endif // !DACCESS_COMPILE
+
diff --git a/src/vm/arm/excepcpu.h b/src/vm/arm/excepcpu.h
new file mode 100644
index 0000000000..6b8e16a972
--- /dev/null
+++ b/src/vm/arm/excepcpu.h
@@ -0,0 +1,51 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+//
+
+
+#ifndef __excepcpu_h__
+#define __excepcpu_h__
+
+#define THROW_CONTROL_FOR_THREAD_FUNCTION RedirectForThreadAbort
+EXTERN_C void RedirectForThreadAbort();
+
+#define STATUS_CLR_GCCOVER_CODE STATUS_ILLEGAL_INSTRUCTION
+
+class Thread;
+class FaultingExceptionFrame;
+
+#define INSTALL_EXCEPTION_HANDLING_RECORD(record)
+#define UNINSTALL_EXCEPTION_HANDLING_RECORD(record)
+//
+// On ARM, the COMPlusFrameHandler's work is done by our personality routine.
+//
+#define DECLARE_CPFH_EH_RECORD(pCurThread)
+
+//
+// Retrieves the redirected CONTEXT* from the stack frame of one of the
+// RedirectedHandledJITCaseForXXX_Stub's.
+//
+PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(T_DISPATCHER_CONTEXT * pDispatcherContext);
+PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(T_CONTEXT * pContext);
+
+//
+// Retrieves the FaultingExceptionFrame* from the stack frame of
+// RedirectForThrowControl or NakedThrowHelper.
+//
+FaultingExceptionFrame *GetFrameFromRedirectedStubStackFrame (T_DISPATCHER_CONTEXT *pDispatcherContext);
+
+inline
+PCODE GetAdjustedCallAddress(PCODE returnAddress)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // blx <reg> instruction size is 2 bytes
+ return returnAddress - 2;
+}
+
+BOOL AdjustContextForVirtualStub(EXCEPTION_RECORD *pExceptionRecord, T_CONTEXT *pContext);
+
+#endif // __excepcpu_h__
diff --git a/src/vm/arm/gmscpu.h b/src/vm/arm/gmscpu.h
new file mode 100644
index 0000000000..583b00acf0
--- /dev/null
+++ b/src/vm/arm/gmscpu.h
@@ -0,0 +1,175 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/**************************************************************/
+/* gmscpu.h */
+/**************************************************************/
+/* HelperFrame is defines 'GET_STATE(machState)' macro, which
+ figures out what the state of the machine will be when the
+ current method returns. It then stores the state in the
+ JIT_machState structure. */
+
+/**************************************************************/
+
+#ifndef __gmscpu_h__
+#define __gmscpu_h__
+
+#define __gmscpu_h__
+
+#ifdef _DEBUG
+class HelperMethodFrame;
+struct MachState;
+EXTERN_C MachState* __stdcall HelperMethodFrameConfirmState(HelperMethodFrame* frame, void* esiVal, void* ediVal, void* ebxVal, void* ebpVal);
+#endif
+
+ // A MachState indicates the register state of the processor at some point in time (usually
+ // just before or after a call is made). It can be made one of two ways. Either explicitly
+ // (when you for some reason know the values of all the registers), or implicitly using the
+ // GET_STATE macros.
+
+typedef DPTR(struct MachState) PTR_MachState;
+struct MachState {
+
+ BOOL isValid() { LIMITED_METHOD_DAC_CONTRACT; return _isValid; }
+ TADDR GetRetAddr() { LIMITED_METHOD_DAC_CONTRACT; return _pc; }
+
+ friend class HelperMethodFrame;
+ friend class CheckAsmOffsets;
+ friend struct LazyMachState;
+
+
+protected:
+ // The simplest way to understand the relationship between capturedR4_R11 (registers
+ // representing the captured state) and _R4_R11 (pointers to registers representing
+ // preserved state) is as follows:
+ //
+ // 1) LazyMachState::unwindLazyState is invoked by HelperMethodFrame to initialize the captured
+ // state. It then performs an unwind and copies the register pointers to _R4_R11.
+ //
+ // 2) HelperMethodFrame::UpdateRegdisplay is invoked by our StackWalker that initializes
+ // the regdisplay with the updated register state.
+ //
+ // 3) HelperMethodFrameRestoreState is invoked when the HMF state machine exits and it
+ // restores the values of unmodified registers.
+
+ TADDR captureR4_R11[8]; // Registers R4..R11 at the time of capture
+
+ PTR_DWORD _R4_R11[8]; // Preserved registers
+
+ TADDR _pc;
+ TADDR _sp; // stack pointer after the function returns
+
+ BOOL _isValid;
+};
+
+/********************************************************************/
+/* This allows you to defer the computation of the Machine state
+ until later. Note that we don't reuse slots, because we want
+ this to be threadsafe without locks */
+
+typedef DPTR(LazyMachState) PTR_LazyMachState;
+struct LazyMachState : public MachState {
+ // compute the machine state of the processor as it will exist just
+ // after the return after at most'funCallDepth' number of functions.
+ // if 'testFtn' is non-NULL, the return address is tested at each
+ // return instruction encountered. If this test returns non-NULL,
+ // then stack walking stops (thus you can walk up to the point that the
+ // return address matches some criteria
+
+ // Normally this is called with funCallDepth=1 and testFtn = 0 so that
+ // it returns the state of the processor after the function that called 'captureState()'
+ void setLazyStateFromUnwind(MachState* copy);
+ static void unwindLazyState(LazyMachState* baseState,
+ MachState* lazyState,
+ int funCallDepth = 1,
+ HostCallPreference hostCallPreference = AllowHostCalls);
+
+ friend class HelperMethodFrame;
+ friend class CheckAsmOffsets;
+private:
+ TADDR captureSp; // Stack pointer at the time of capture
+ TADDR captureIp; // Instruction pointer at the time of capture
+};
+
+// R4 - R11
+#define NUM_NONVOLATILE_CONTEXT_POINTERS 8
+
+inline void LazyMachState::setLazyStateFromUnwind(MachState* copy)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#if defined(DACCESS_COMPILE)
+ // This function cannot be called in DAC because DAC cannot update target memory.
+ DacError(E_FAIL);
+ return;
+
+#else // !DACCESS_COMPILE
+ this->_pc = copy->_pc;
+ this->_sp = copy->_sp;
+
+ // Capture* has already been set, so there is no need to touch it.
+ // This was setup in LazyMachState::unwindLazyState just before we
+ // called into the OS for unwind.
+
+ // Prepare to loop over the nonvolatile context pointers for and
+ // make sure to properly copy interior pointers into the new struct.
+
+ PDWORD* pSrc = &copy->_R4_R11[0];
+ PDWORD* pDst = &this->_R4_R11[0];
+
+ const PDWORD LowerBoundDst = (PDWORD) this;
+ const PDWORD LowerBoundSrc = (PDWORD) copy;
+
+ // Calculate the upperbound till which we need to loop (i.e. the highest address till
+ // which we have saved non-volatile pointers).
+ const PDWORD UpperBoundSrc = (PDWORD) (((BYTE*)LowerBoundSrc) + offsetof(LazyMachState, _pc));
+
+#ifdef _DEBUG
+ int count = 0;
+#endif // _DEBUG
+
+ while (((PDWORD)pSrc) < UpperBoundSrc)
+ {
+#ifdef _DEBUG
+ count++;
+#endif // _DEBUG
+
+ PDWORD valueSrc = *pSrc++;
+
+ // If any non-volatile register pointer is pointing to the corresponding register field
+ // in the MachState, then make the corresponding pointer in "this" MachState point
+ // to the corresponding field.
+ if ((LowerBoundSrc <= valueSrc) && (valueSrc < UpperBoundSrc))
+ {
+ valueSrc = (PDWORD)((BYTE*)valueSrc - (BYTE*)LowerBoundSrc + (BYTE*)LowerBoundDst);
+ }
+
+ *pDst++ = valueSrc;
+ }
+
+ CONSISTENCY_CHECK_MSGF(count == NUM_NONVOLATILE_CONTEXT_POINTERS, ("count != NUM_NONVOLATILE_CONTEXT_POINTERS, actually = %d", count));
+
+ // this has to be last because we depend on write ordering to
+ // synchronize the race implicit in updating this struct
+ VolatileStore(&_isValid, TRUE);
+
+#endif // !DACCESS_COMPILE
+
+}
+
+// Do the initial capture of the machine state. This is meant to be
+// as light weight as possible, as we may never need the state that
+// we capture. Thus to complete the process you need to call
+// 'getMachState()', which finishes the process
+EXTERN_C void LazyMachStateCaptureState(struct LazyMachState *pState);
+
+// CAPTURE_STATE captures just enough register state so that the state of the
+// processor can be deterined just after the the routine that has CAPTURE_STATE in
+// it returns.
+
+#define CAPTURE_STATE(machState, ret) \
+ LazyMachStateCaptureState(machState)
+
+#endif
diff --git a/src/vm/arm/jithelpersarm.cpp b/src/vm/arm/jithelpersarm.cpp
new file mode 100644
index 0000000000..de276b637e
--- /dev/null
+++ b/src/vm/arm/jithelpersarm.cpp
@@ -0,0 +1,55 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+// ===========================================================================
+// File: JITHelpersARM.CPP
+//
+// Copyright (c) 1999 Microsoft Corporation. All Rights Reserved.
+// Microsoft Confidential.
+// ===========================================================================
+
+// This contains JITinterface routines that are specific to the
+// ARM platform. They are modeled after the AMD64 specific routines
+// found in JIThelpersAMD64.cpp
+
+
+#include "common.h"
+#include "jitinterface.h"
+#include "eeconfig.h"
+#include "excep.h"
+#include "ecall.h"
+#include "asmconstants.h"
+
+EXTERN_C void JIT_TailCallHelperStub_ReturnAddress();
+
+TailCallFrame * TailCallFrame::GetFrameFromContext(CONTEXT * pContext)
+{
+ _ASSERTE((void*)::GetIP(pContext) == JIT_TailCallHelperStub_ReturnAddress);
+ return (TailCallFrame*)(pContext->R7 - offsetof(TailCallFrame, m_calleeSavedRegisters));
+}
+
+// Assuming pContext is a plain generic call-site, adjust it to look like
+// it called into TailCallHelperStub, and is at the point of the call.
+TailCallFrame * TailCallFrame::AdjustContextForTailCallHelperStub(CONTEXT * pContext, size_t cbNewArgArea, Thread * pThread)
+{
+ TailCallFrame * pNewFrame = (TailCallFrame *) (GetSP(pContext) - sizeof(TailCallFrame));
+
+ // The return addres for the pseudo-call
+ pContext->Lr = (DWORD_PTR)JIT_TailCallHelperStub_ReturnAddress;
+ // The R11/ETW chain 'frame' pointer
+ pContext->R11 = GetSP(pContext) - (2 * sizeof(DWORD)); // LR & R11
+ // The unwind data frame pointer
+ pContext->R7 = pContext->R11 - (7 * sizeof(DWORD)); // r4-R10 non-volatile registers
+ // for the args and the remainder of the FrameWithCookie<TailCallFrame>
+ SetSP(pContext, (size_t) pNewFrame - (cbNewArgArea + sizeof(GSCookie)));
+
+ // For popping the Frame, store the Thread
+ pContext->R6 = (DWORD_PTR)pThread;
+ // And the current head/top
+ pContext->R5 = (DWORD_PTR)pThread->GetFrame();
+
+ return pNewFrame;
+}
+
diff --git a/src/vm/arm/memcpy.asm b/src/vm/arm/memcpy.asm
new file mode 100644
index 0000000000..8d4e60adec
--- /dev/null
+++ b/src/vm/arm/memcpy.asm
@@ -0,0 +1,285 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+;
+
+;
+
+; This is the fast memcpy implementation for ARM stolen from the CRT (original location
+; vctools\crt\crtw32\string\arm\memcpy.asm) and modified to be compatible with CLR.
+;
+; For reference, the unmodified crt version of memcpy is preserved as memcpy_crt.asm
+
+#include "ksarm.h"
+#include "asmmacros.h"
+
+ IMPORT FCallMemCpy_GCPoll
+ IMPORT g_TrapReturningThreads
+
+ AREA |.text|,ALIGN=5,CODE,READONLY
+
+;
+; void *memcpy(void *dst, const void *src, size_t length)
+;
+; Copy a block of memory in a forward direction.
+;
+
+ ALIGN 32
+ LEAF_ENTRY FCallMemcpy
+
+ pld [r1] ; preload the first cache line
+ cmp r2, #16 ; less than 16 bytes?
+ mov r3, r0 ; use r3 as our destination
+ bhs __FCallMemcpy_large ; go to the large copy case directly
+
+CpySmal tbb [pc, r2] ; branch to specialized bits for small copies
+__SwitchTable1_Copy
+CTable dcb (Copy0 - CTable) / 2 ; 0B
+ dcb (Copy1 - CTable) / 2 ; 1B
+ dcb (Copy2 - CTable) / 2 ; 2B
+ dcb (Copy3 - CTable) / 2 ; 3B
+ dcb (Copy4 - CTable) / 2 ; 4B
+ dcb (Copy5 - CTable) / 2 ; 5B
+ dcb (Copy6 - CTable) / 2 ; 6B
+ dcb (Copy7 - CTable) / 2 ; 7B
+ dcb (Copy8 - CTable) / 2 ; 8B
+ dcb (Copy9 - CTable) / 2 ; 9B
+ dcb (Copy10 - CTable) / 2 ; 10B
+ dcb (Copy11 - CTable) / 2 ; 11B
+ dcb (Copy12 - CTable) / 2 ; 12B
+ dcb (Copy13 - CTable) / 2 ; 13B
+ dcb (Copy14 - CTable) / 2 ; 14B
+ dcb (Copy15 - CTable) / 2 ; 15B
+__SwitchTableEnd_Copy
+
+Copy1 ldrb r2, [r1]
+ strb r2, [r3]
+Copy0 b GC_POLL
+
+Copy2 ldrh r2, [r1]
+ strh r2, [r3]
+ b GC_POLL
+
+Copy3 ldrh r2, [r1]
+ ldrb r1, [r1, #2]
+ strh r2, [r3]
+ strb r1, [r3, #2]
+ b GC_POLL
+
+Copy4 ldr r2, [r1]
+ str r2, [r3]
+ b GC_POLL
+
+Copy5 ldr r2, [r1]
+ ldrb r1, [r1, #4]
+ str r2, [r3]
+ strb r1, [r3, #4]
+ b GC_POLL
+
+Copy6 ldr r2, [r1]
+ ldrh r1, [r1, #4]
+ str r2, [r3]
+ strh r1, [r3, #4]
+ b GC_POLL
+
+Copy7 ldr r12, [r1]
+ ldrh r2, [r1, #4]
+ ldrb r1, [r1, #6]
+ str r12, [r3]
+ strh r2, [r3, #4]
+ strb r1, [r3, #6]
+ b GC_POLL
+
+Copy8 ldr r2, [r1]
+ ldr r1, [r1, #4]
+ str r2, [r3]
+ str r1, [r3, #4]
+ b GC_POLL
+
+Copy9 ldr r12, [r1]
+ ldr r2, [r1, #4]
+ ldrb r1, [r1, #8]
+ str r12, [r3]
+ str r2, [r3, #4]
+ strb r1, [r3, #8]
+ b GC_POLL
+
+Copy10 ldr r12, [r1]
+ ldr r2, [r1, #4]
+ ldrh r1, [r1, #8]
+ str r12, [r3]
+ str r2, [r3, #4]
+ strh r1, [r3, #8]
+ b GC_POLL
+
+Copy11 ldr r12, [r1]
+ ldr r2, [r1, #4]
+ str r12, [r3]
+ str r2, [r3, #4]
+ ldrh r2, [r1, #8]
+ ldrb r1, [r1, #10]
+ strh r2, [r3, #8]
+ strb r1, [r3, #10]
+ b GC_POLL
+
+Copy12 ldr r12, [r1]
+ ldr r2, [r1, #4]
+ ldr r1, [r1, #8]
+ str r12, [r3]
+ str r2, [r3, #4]
+ str r1, [r3, #8]
+ b GC_POLL
+
+Copy13 ldr r12, [r1]
+ ldr r2, [r1, #4]
+ str r12, [r3]
+ str r2, [r3, #4]
+ ldr r2, [r1, #8]
+ ldrb r1, [r1, #12]
+ str r2, [r3, #8]
+ strb r1, [r3, #12]
+ b GC_POLL
+
+Copy14 ldr r12, [r1]
+ ldr r2, [r1, #4]
+ str r12, [r3]
+ str r2, [r3, #4]
+ ldr r2, [r1, #8]
+ ldrh r1, [r1, #12]
+ str r2, [r3, #8]
+ strh r1, [r3, #12]
+ b GC_POLL
+
+Copy15 ldr r12, [r1]
+ ldr r2, [r1, #4]
+ str r12, [r3]
+ str r2, [r3, #4]
+ ldr r12, [r1, #8]
+ ldrh r2, [r1, #12]
+ ldrb r1, [r1, #14]
+ str r12, [r3, #8]
+ strh r2, [r3, #12]
+ strb r1, [r3, #14]
+GC_POLL
+ ldr r0, =g_TrapReturningThreads
+ ldr r0, [r0]
+ cmp r0, #0
+ bne FCallMemCpy_GCPoll
+
+ bx lr
+
+ LEAF_END FCallMemcpy
+
+
+;
+; __memcpy_forward_large_integer (internal calling convention)
+;
+; Copy large (>= 16 bytes) blocks of memory in a forward direction,
+; using integer registers only.
+;
+
+ ALIGN 32
+ NESTED_ENTRY __FCallMemcpy_large
+
+ PROLOG_NOP lsls r12, r3, #31 ; C = bit 1, N = bit 0
+ PROLOG_PUSH {r4-r9, r11, lr}
+
+;
+; Align destination to a word boundary
+;
+
+ bpl %F1
+ ldrb r4, [r1], #1 ; fetch byte
+ subs r2, r2, #1 ; decrement count
+ strb r4, [r3], #1 ; store byte
+ lsls r12, r3, #31 ; compute updated status
+1
+ bcc %F2 ; if already aligned, just skip ahead
+ ldrh r4, [r1], #2 ; fetch halfword
+ subs r2, r2, #2 ; decrement count
+ strh r4, [r3], #2 ; store halfword
+2
+ tst r1, #3 ; is the source now word-aligned?
+ bne %F20 ; if not, we have to use the slow path
+
+;
+; Source is word-aligned; fast case
+;
+
+10
+ subs r2, r2, #32 ; take 32 off the top
+ blo %F13 ; if not enough, recover and do small copies
+ subs r2, r2, #32 ; take off another 32
+ pld [r1, #32] ; pre-load one block ahead
+ blo %F12 ; skip the loop if that's all we have
+11
+ pld [r1, #64] ; prefetch ahead
+ subs r2, r2, #32 ; count the bytes for this block
+ ldm r1!, {r4-r9, r12, lr} ; load 32 bytes
+ stm r3!, {r4-r9, r12, lr} ; store 32 bytes
+ bhs %B11 ; keep going until we're done
+12
+ ldm r1!, {r4-r9, r12, lr} ; load 32 bytes
+ stm r3!, {r4-r9, r12, lr} ; store 32 bytes
+13
+ adds r2, r2, #(32 - 8) ; recover original count, and pre-decrement
+ blo %F15 ; if not enough remaining, skip this loop
+14
+ subs r2, r2, #8 ; decrement count
+ ldrd r4, r5, [r1], #8 ; fetch pair of words
+ strd r4, r5, [r3], #8 ; store pair of words
+ bhs %B14 ; loop while we still have data remaining
+15
+ adds r2, r2, #8 ; recover final count
+
+ EPILOG_POP {r4-r9, r11, lr}
+ EPILOG_NOP bne CpySmal ; if some left, continue with small
+ EPILOG_BRANCH GC_POLL
+
+;
+; Source is not word-aligned; slow case
+;
+
+20
+ subs r2, r2, #64 ; pre-decrement to simplify the loop
+ blo %23 ; skip over the loop if we don't have enough
+ pld [r1, #32] ; pre-load one block ahead
+21
+ pld [r1, #64] ; prefetch ahead
+ ldr r4, [r1, #0] ; load 32 bytes
+ ldr r5, [r1, #4] ;
+ ldr r6, [r1, #8] ;
+ ldr r7, [r1, #12] ;
+ ldr r8, [r1, #16] ;
+ ldr r9, [r1, #20] ;
+ ldr r12, [r1, #24] ;
+ ldr lr, [r1, #28] ;
+ adds r1, r1, #32 ; update pointer
+ subs r2, r2, #32 ; count the bytes for this block
+ stm r3!, {r4-r9, r12, lr} ; store 32 bytes
+ bhs %B21 ; keep going until we're done
+23
+ adds r2, r2, #(64 - 8) ; recover original count, and pre-decrement
+ blo %F25 ; if not enough remaining, skip this loop
+24
+ ldr r4, [r1] ; fetch pair of words
+ ldr r5, [r1, #4] ;
+ adds r1, r1, #8 ; update pointer
+ subs r2, r2, #8 ; decrement count
+ strd r4, r5, [r3], #8 ; store pair of words
+ bhs %B24 ; loop while we still have data remaining
+25
+ adds r2, r2, #8 ; recover final count
+
+ EPILOG_POP {r4-r9, r11, lr}
+ EPILOG_NOP bne CpySmal ; if some left, continue with small
+ EPILOG_BRANCH GC_POLL
+
+ EXPORT FCallMemcpy_End ; this is used to place the entire
+FCallMemcpy_End ; implementation in av-exclusion list
+
+ NESTED_END __FCallMemcpy_large
+
+ END
diff --git a/src/vm/arm/memcpy_crt.asm b/src/vm/arm/memcpy_crt.asm
new file mode 100644
index 0000000000..34445217a3
--- /dev/null
+++ b/src/vm/arm/memcpy_crt.asm
@@ -0,0 +1,1002 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+;
+
+;
+
+#include "ksarm.h"
+
+#if !defined PF_ARM_EXTERNAL_CACHE_AVAILABLE
+#define PF_ARM_EXTERNAL_CACHE_AVAILABLE 0x1a
+#endif
+
+#if !defined(_BOOTCRT_)
+
+ DATAAREA
+
+__memcpy_forward_large_func dcd __memcpy_decide
+ EXPORT __memcpy_forward_large_func
+__memcpy_reverse_large_func dcd __memcpy_decide
+ EXPORT __memcpy_reverse_large_func
+
+#endif
+
+ AREA |.text|,ALIGN=5,CODE,READONLY
+
+;
+; void *memcpy(void *dst, const void *src, size_t length)
+;
+; Copy a block of memory in a forward direction.
+;
+
+ ALIGN 32
+ LEAF_ENTRY memcpy
+
+ ALTERNATE_ENTRY __memcpy_forward_new
+
+ pld [r1] ; preload the first cache line
+ cmp r2, #16 ; less than 16 bytes?
+ mov r3, r0 ; use r3 as our destination
+ bhs CpyLrge ; go to the small copy case directly
+
+CpySmal tbb [pc, r2] ; branch to specialized bits for small copies
+__SwitchTable1_Copy
+CTable dcb (Copy0 - CTable) / 2 ; 0B
+ dcb (Copy1 - CTable) / 2 ; 1B
+ dcb (Copy2 - CTable) / 2 ; 2B
+ dcb (Copy3 - CTable) / 2 ; 3B
+ dcb (Copy4 - CTable) / 2 ; 4B
+ dcb (Copy5 - CTable) / 2 ; 5B
+ dcb (Copy6 - CTable) / 2 ; 6B
+ dcb (Copy7 - CTable) / 2 ; 7B
+ dcb (Copy8 - CTable) / 2 ; 8B
+ dcb (Copy9 - CTable) / 2 ; 9B
+ dcb (Copy10 - CTable) / 2 ; 10B
+ dcb (Copy11 - CTable) / 2 ; 11B
+ dcb (Copy12 - CTable) / 2 ; 12B
+ dcb (Copy13 - CTable) / 2 ; 13B
+ dcb (Copy14 - CTable) / 2 ; 14B
+ dcb (Copy15 - CTable) / 2 ; 15B
+__SwitchTableEnd_Copy
+
+Copy1 ldrb r2, [r1]
+ strb r2, [r3]
+Copy0 bx lr
+
+Copy2 ldrh r2, [r1]
+ strh r2, [r3]
+ bx lr
+
+Copy3 ldrh r2, [r1]
+ ldrb r1, [r1, #2]
+ strh r2, [r3]
+ strb r1, [r3, #2]
+ bx lr
+
+Copy4 ldr r2, [r1]
+ str r2, [r3]
+ bx lr
+
+Copy5 ldr r2, [r1]
+ ldrb r1, [r1, #4]
+ str r2, [r3]
+ strb r1, [r3, #4]
+ bx lr
+
+Copy6 ldr r2, [r1]
+ ldrh r1, [r1, #4]
+ str r2, [r3]
+ strh r1, [r3, #4]
+ bx lr
+
+Copy7 ldr r12, [r1]
+ ldrh r2, [r1, #4]
+ ldrb r1, [r1, #6]
+ str r12, [r3]
+ strh r2, [r3, #4]
+ strb r1, [r3, #6]
+ bx lr
+
+Copy8 ldr r2, [r1]
+ ldr r1, [r1, #4]
+ str r2, [r3]
+ str r1, [r3, #4]
+ bx lr
+
+Copy9 ldr r12, [r1]
+ ldr r2, [r1, #4]
+ ldrb r1, [r1, #8]
+ str r12, [r3]
+ str r2, [r3, #4]
+ strb r1, [r3, #8]
+ bx lr
+
+Copy10 ldr r12, [r1]
+ ldr r2, [r1, #4]
+ ldrh r1, [r1, #8]
+ str r12, [r3]
+ str r2, [r3, #4]
+ strh r1, [r3, #8]
+ bx lr
+
+Copy11 ldr r12, [r1]
+ ldr r2, [r1, #4]
+ str r12, [r3]
+ str r2, [r3, #4]
+ ldrh r2, [r1, #8]
+ ldrb r1, [r1, #10]
+ strh r2, [r3, #8]
+ strb r1, [r3, #10]
+ bx lr
+
+Copy12 ldr r12, [r1]
+ ldr r2, [r1, #4]
+ ldr r1, [r1, #8]
+ str r12, [r3]
+ str r2, [r3, #4]
+ str r1, [r3, #8]
+ bx lr
+
+Copy13 ldr r12, [r1]
+ ldr r2, [r1, #4]
+ str r12, [r3]
+ str r2, [r3, #4]
+ ldr r2, [r1, #8]
+ ldrb r1, [r1, #12]
+ str r2, [r3, #8]
+ strb r1, [r3, #12]
+ bx lr
+
+Copy14 ldr r12, [r1]
+ ldr r2, [r1, #4]
+ str r12, [r3]
+ str r2, [r3, #4]
+ ldr r2, [r1, #8]
+ ldrh r1, [r1, #12]
+ str r2, [r3, #8]
+ strh r1, [r3, #12]
+ bx lr
+
+Copy15 ldr r12, [r1]
+ ldr r2, [r1, #4]
+ str r12, [r3]
+ str r2, [r3, #4]
+ ldr r12, [r1, #8]
+ ldrh r2, [r1, #12]
+ ldrb r1, [r1, #14]
+ str r12, [r3, #8]
+ strh r2, [r3, #12]
+ strb r1, [r3, #14]
+ bx lr
+
+CpyLrge
+
+#if defined(_BOOTCRT_)
+
+ b __memcpy_forward_large_integer ; always use integer in boot code
+
+#else
+
+ eor r12, r0, r1 ; see if src/dst are equally aligned
+ tst r12, #3 ; at least to a 4 byte boundary
+ bne __memcpy_forward_large_neon ; if not, always use NEON
+ mov32 r12, __memcpy_forward_large_func ; otherwise, load the large function pointer
+ ldr pc, [r12] ; and call it
+
+#endif
+
+ LEAF_END memcpy
+
+
+;
+; __memcpy_forward_large_integer (internal calling convention)
+;
+; Copy large (>= 16 bytes) blocks of memory in a forward direction,
+; using integer registers only.
+;
+
+ ALIGN 32
+ NESTED_ENTRY __memcpy_forward_large_integer_wrapper
+
+__memcpy_forward_large_integer
+
+ PROLOG_NOP lsls r12, r3, #31 ; C = bit 1, N = bit 0
+ PROLOG_PUSH {r4-r9, r11, lr}
+
+;
+; Align destination to a word boundary
+;
+
+ bpl %F1
+ ldrb r4, [r1], #1 ; fetch byte
+ subs r2, r2, #1 ; decrement count
+ strb r4, [r3], #1 ; store byte
+ lsls r12, r3, #31 ; compute updated status
+1
+ bcc %F2 ; if already aligned, just skip ahead
+ ldrh r4, [r1], #2 ; fetch halfword
+ subs r2, r2, #2 ; decrement count
+ strh r4, [r3], #2 ; store halfword
+2
+ tst r1, #3 ; is the source now word-aligned?
+ bne %F20 ; if not, we have to use the slow path
+
+;
+; Source is word-aligned; fast case
+;
+
+10
+ subs r2, r2, #32 ; take 32 off the top
+ blo %F13 ; if not enough, recover and do small copies
+ subs r2, r2, #32 ; take off another 32
+ pld [r1, #32] ; pre-load one block ahead
+ blo %F12 ; skip the loop if that's all we have
+11
+ pld [r1, #64] ; prefetch ahead
+ subs r2, r2, #32 ; count the bytes for this block
+ ldm r1!, {r4-r9, r12, lr} ; load 32 bytes
+ stm r3!, {r4-r9, r12, lr} ; store 32 bytes
+ bhs %B11 ; keep going until we're done
+12
+ ldm r1!, {r4-r9, r12, lr} ; load 32 bytes
+ stm r3!, {r4-r9, r12, lr} ; store 32 bytes
+13
+ adds r2, r2, #(32 - 8) ; recover original count, and pre-decrement
+ blo %F15 ; if not enough remaining, skip this loop
+14
+ subs r2, r2, #8 ; decrement count
+ ldrd r4, r5, [r1], #8 ; fetch pair of words
+ strd r4, r5, [r3], #8 ; store pair of words
+ bhs %B14 ; loop while we still have data remaining
+15
+ adds r2, r2, #8 ; recover final count
+
+ EPILOG_POP {r4-r9, r11, lr}
+ EPILOG_NOP bne CpySmal ; if some left, continue with small
+ EPILOG_RETURN ; else just return
+
+;
+; Source is not word-aligned; slow case
+;
+
+20
+ subs r2, r2, #64 ; pre-decrement to simplify the loop
+ blo %23 ; skip over the loop if we don't have enough
+ pld [r1, #32] ; pre-load one block ahead
+21
+ pld [r1, #64] ; prefetch ahead
+ ldr r4, [r1, #0] ; load 32 bytes
+ ldr r5, [r1, #4] ;
+ ldr r6, [r1, #8] ;
+ ldr r7, [r1, #12] ;
+ ldr r8, [r1, #16] ;
+ ldr r9, [r1, #20] ;
+ ldr r12, [r1, #24] ;
+ ldr lr, [r1, #28] ;
+ adds r1, r1, #32 ; update pointer
+ subs r2, r2, #32 ; count the bytes for this block
+ stm r3!, {r4-r9, r12, lr} ; store 32 bytes
+ bhs %B21 ; keep going until we're done
+23
+ adds r2, r2, #(64 - 8) ; recover original count, and pre-decrement
+ blo %F25 ; if not enough remaining, skip this loop
+24
+ ldr r4, [r1] ; fetch pair of words
+ ldr r5, [r1, #4] ;
+ adds r1, r1, #8 ; update pointer
+ subs r2, r2, #8 ; decrement count
+ strd r4, r5, [r3], #8 ; store pair of words
+ bhs %B24 ; loop while we still have data remaining
+25
+ adds r2, r2, #8 ; recover final count
+
+ EPILOG_POP {r4-r9, r11, lr}
+ EPILOG_NOP bne CpySmal ; if some left, continue with small
+ EPILOG_RETURN ; else just return
+
+ NESTED_END __memcpy_forward_large_integer
+
+
+;
+; __memcpy_forward_large_neon (internal calling convention)
+;
+; Copy large (>= 16 bytes) blocks of memory in a forward direction,
+; using NEON registers.
+;
+
+#if !defined(_BOOTCRT_)
+
+ ALIGN 32
+ NESTED_ENTRY __memcpy_forward_large_neon_wrapper
+
+__memcpy_forward_large_neon
+
+ PROLOG_PUSH {r4-r5, r11, lr}
+
+ subs r2, r2, #32 ; pre-decrement to simplify the loop
+ blo %F13 ; skip over the loop if we don't have enough
+ subs r2, r2, #32 ; pre-decrement to simplify the loop
+ pld [r1, #32] ; pre-load one block ahead
+ blo %F12 ; skip over the loop if we don't have enough
+11
+ pld [r1, #64] ; prefetch ahead
+ subs r2, r2, #32 ; count the bytes for this block
+ vld1.8 {d0-d3}, [r1]! ; load 32 bytes
+ vst1.8 {d0-d3}, [r3]! ; store 32 bytes
+ bhs %B11 ; keep going until we're done
+12
+ vld1.8 {d0-d3}, [r1]! ; load 32 bytes
+ vst1.8 {d0-d3}, [r3]! ; store 32 bytes
+13
+ adds r2, r2, #(32 - 8) ; recover original count, and pre-decrement
+ blo %F15 ; if not enough remaining, skip this loop
+14
+ ldr r4, [r1] ; fetch pair of words
+ ldr r5, [r1, #4] ;
+ adds r1, r1, #8 ; update pointer
+ str r4, [r3] ; store pair of words
+ str r5, [r3, #4] ;
+ adds r3, r3, #8
+ subs r2, r2, #8 ; decrement count
+ bhs %B14 ; loop while we still have data remaining
+15
+ adds r2, r2, #8 ; recover final count
+
+ EPILOG_POP {r4-r5, r11, lr}
+ EPILOG_NOP bne CpySmal ; if some left, continue with small
+ EPILOG_RETURN ; else just return
+
+ NESTED_END __memcpy_forward_large_neon
+
+#endif
+
+
+;
+; void *memmove(void *dst, const void *src, size_t length)
+;
+; Copy a block of memory in a forward or reverse direction, ensuring that
+; overlapping source/destination regions are copied correctly.
+;
+
+ ALIGN 32
+ LEAF_ENTRY memmove
+
+ subs r3, r0, r1 ; compute dest - source
+ cmp r3, r2 ; compare against size
+ bhs memcpy ; if no overlap, we can just do memcpy
+
+ ALTERNATE_ENTRY __memcpy_reverse_new
+
+ cmp r2, #16 ; less than 16 bytes?
+ pld [r1] ; preload the first cache line
+ bhs MovLrge ; go to the small copy case directly
+
+MovSmal tbb [pc, r2] ; branch to specialized bits for small copies
+__SwitchTable1_Move
+MTable dcb (Move0 - MTable) / 2 ; 0B
+ dcb (Move1 - MTable) / 2 ; 1B
+ dcb (Move2 - MTable) / 2 ; 2B
+ dcb (Move3 - MTable) / 2 ; 3B
+ dcb (Move4 - MTable) / 2 ; 4B
+ dcb (Move5 - MTable) / 2 ; 5B
+ dcb (Move6 - MTable) / 2 ; 6B
+ dcb (Move7 - MTable) / 2 ; 7B
+ dcb (Move8 - MTable) / 2 ; 8B
+ dcb (Move9 - MTable) / 2 ; 9B
+ dcb (Move10 - MTable) / 2 ; 10B
+ dcb (Move11 - MTable) / 2 ; 11B
+ dcb (Move12 - MTable) / 2 ; 12B
+ dcb (Move13 - MTable) / 2 ; 13B
+ dcb (Move14 - MTable) / 2 ; 14B
+ dcb (Move15 - MTable) / 2 ; 15B
+__SwitchTableEnd_Move
+
+Move1 ldrb r2, [r1]
+ strb r2, [r0]
+Move0 bx lr
+
+Move2 ldrh r2, [r1]
+ strh r2, [r0]
+ bx lr
+
+Move3 ldrh r2, [r1]
+ ldrb r1, [r1, #2]
+ strh r2, [r0]
+ strb r1, [r0, #2]
+ bx lr
+
+Move4 ldr r2, [r1]
+ str r2, [r0]
+ bx lr
+
+Move5 ldr r2, [r1]
+ ldrb r1, [r1, #4]
+ str r2, [r0]
+ strb r1, [r0, #4]
+ bx lr
+
+Move6 ldr r2, [r1]
+ ldrh r1, [r1, #4]
+ str r2, [r0]
+ strh r1, [r0, #4]
+ bx lr
+
+Move7 ldr r3, [r1]
+ ldrh r2, [r1, #4]
+ ldrb r1, [r1, #6]
+ str r3, [r0]
+ strh r2, [r0, #4]
+ strb r1, [r0, #6]
+ bx lr
+
+Move8 ldr r2, [r1]
+ ldr r1, [r1, #4]
+ str r2, [r0]
+ str r1, [r0, #4]
+ bx lr
+
+Move9 ldr r3, [r1]
+ ldr r2, [r1, #4]
+ ldrb r1, [r1, #8]
+ str r3, [r0]
+ str r2, [r0, #4]
+ strb r1, [r0, #8]
+ bx lr
+
+Move10 ldr r3, [r1]
+ ldr r2, [r1, #4]
+ ldrh r1, [r1, #8]
+ str r3, [r0]
+ str r2, [r0, #4]
+ strh r1, [r0, #8]
+ bx lr
+
+Move11 ldr r12, [r1]
+ ldr r3, [r1, #4]
+ ldrh r2, [r1, #8]
+ ldrb r1, [r1, #10]
+ str r12, [r0]
+ str r3, [r0, #4]
+ strh r2, [r0, #8]
+ strb r1, [r0, #10]
+ bx lr
+
+Move12 ldr r12, [r1]
+ ldr r2, [r1, #4]
+ ldr r1, [r1, #8]
+ str r12, [r0]
+ str r2, [r0, #4]
+ str r1, [r0, #8]
+ bx lr
+
+Move13 ldr r12, [r1]
+ ldr r3, [r1, #4]
+ ldr r2, [r1, #8]
+ ldrb r1, [r1, #12]
+ str r12, [r0]
+ str r3, [r0, #4]
+ str r2, [r0, #8]
+ strb r1, [r0, #12]
+ bx lr
+
+Move14 ldr r12, [r1]
+ ldr r3, [r1, #4]
+ ldr r2, [r1, #8]
+ ldrh r1, [r1, #12]
+ str r12, [r0]
+ str r3, [r0, #4]
+ str r2, [r0, #8]
+ strh r1, [r0, #12]
+ bx lr
+
+Move15 ldrh r3, [r1, #12]
+ ldrb r2, [r1, #14]
+ strh r3, [r0, #12]
+ strb r2, [r0, #14]
+ ldr r3, [r1]
+ ldr r2, [r1, #4]
+ ldr r1, [r1, #8]
+ str r3, [r0]
+ str r2, [r0, #4]
+ str r1, [r0, #8]
+ bx lr
+
+MovLrge
+
+#if defined(_BOOTCRT_)
+
+ b __memcpy_reverse_large_integer ; always use integer in boot code
+
+#else
+
+ eor r12, r0, r1 ; see if src/dst are equally aligned
+ tst r12, #3 ; at least to a 4 byte boundary
+ bne __memcpy_reverse_large_neon ; if not, always use NEON
+ mov32 r12, __memcpy_reverse_large_func
+ ldr pc, [r12]
+
+#endif
+
+ LEAF_END memmove
+
+
+;
+; __memcpy_reverse_large_integer (internal calling convention)
+;
+; Copy large (>= 16 bytes) block of memory in a reverse direction,
+; using NEON registers.
+;
+
+ ALIGN 32
+ NESTED_ENTRY __memcpy_reverse_large_integer_wrapper
+
+__memcpy_reverse_large_integer
+
+ PROLOG_NOP adds r3, r0, r2 ; advance destination to end
+ PROLOG_NOP adds r1, r1, r2 ; advance source to end
+ PROLOG_NOP lsls r12, r3, #31 ; C = bit 1, N = bit 0
+ PROLOG_NOP pld [r1, #-32] ; pre-load one block ahead
+ PROLOG_PUSH {r4-r9, r11, lr}
+
+;
+; Align destination to a word boundary
+;
+
+ bpl %F1
+ ldrb r4, [r1, #-1]! ; fetch byte
+ subs r2, r2, #1 ; decrement count
+ strb r4, [r3, #-1]! ; store byte
+ lsls r12, r3, #31 ; compute updated status
+1
+ bcc %F2 ; if already aligned, just skip ahead
+ ldrh r4, [r1, #-2]! ; fetch halfword
+ subs r2, r2, #2 ; decrement count
+ strh r4, [r3, #-2]! ; store halfword
+2
+ tst r1, #3 ; is the source now word-aligned?
+ bne %F20 ; if not, we have to use the slow path
+
+;
+; Source is word-aligned; fast case
+;
+
+10
+ subs r2, r2, #32 ; pre-decrement to simplify the loop
+ blo %F13 ; skip over the loop if we don't have enough
+ subs r2, r2, #32 ; pre-decrement to simplify the loop
+ pld [r1, #-64] ; pre-load one block ahead
+ blo %F12 ; skip over the loop if we don't have enough
+11
+ pld [r1, #-96] ; prefetch ahead
+ subs r2, r2, #32 ; count the bytes for this block
+ ldmdb r1!, {r4-r9, r12, lr} ; load 32 bytes
+ stmdb r3!, {r4-r9, r12, lr} ; store 32 bytes
+ bhs %B11 ; keep going until we're done
+12
+ ldmdb r1!, {r4-r9, r12, lr} ; load 32 bytes
+ stmdb r3!, {r4-r9, r12, lr} ; store 32 bytes
+13
+ adds r2, r2, #(32 - 8) ; recover original count, and pre-decrement
+ blo %F15 ; if not enough remaining, skip this loop
+14
+ subs r2, r2, #8 ; decrement count
+ ldrd r4, r5, [r1, #-8]! ; fetch pair of words
+ strd r4, r5, [r3, #-8]! ; store pair of words
+ bhs %B14 ; loop while we still have data remaining
+15
+ adds r2, r2, #8 ; determine final count
+ subs r1, r1, r2 ; recover original source
+
+ EPILOG_POP {r4-r9, r11, lr}
+ EPILOG_NOP bne MovSmal ; if some left, continue with small
+ EPILOG_RETURN ; else just return
+
+
+;
+; Source is not word-aligned; slow case
+;
+
+20
+ subs r2, r2, #64 ; pre-decrement to simplify the loop
+ blo %F23 ; skip over the loop if we don't have enough
+ pld [r1, #-64] ; pre-load one block ahead
+21
+ pld [r1, #-96] ; prefetch ahead
+ subs r2, r2, #32 ; count the bytes for this block
+ ldr r4, [r1, #-32]! ; load 32 bytes
+ ldr r5, [r1, #4] ;
+ ldr r6, [r1, #8] ;
+ ldr r7, [r1, #12] ;
+ ldr r8, [r1, #16] ;
+ ldr r9, [r1, #20] ;
+ ldr r12, [r1, #24] ;
+ ldr lr, [r1, #28] ;
+ stmdb r3!, {r4-r9, r12, lr} ; store 32 bytes
+ bhs %B21 ; keep going until we're done
+23
+ adds r2, r2, #(64 - 8) ; recover original count, and pre-decrement
+ blo %F25 ; if not enough remaining, skip this loop
+24
+ subs r2, r2, #8 ; decrement count
+ ldr r4, [r1, #-8]! ; fetch pair of words
+ ldr r5, [r1, #4] ;
+ strd r4, r5, [r3, #-8]! ; store pair of words
+ bhs %B24 ; loop while we still have data remaining
+25
+ adds r2, r2, #8 ; determine final count
+ subs r1, r1, r2 ; recover original source
+
+ EPILOG_POP {r4-r9, r11, lr}
+ EPILOG_NOP bne MovSmal ; if some left, continue with small
+ EPILOG_RETURN ; else just return
+
+ NESTED_END __memcpy_reverse_large_integer
+
+
+;
+; __memcpy_reverse_large_neon (internal calling convention)
+;
+; Copy large (>= 16 bytes) block of memory in a reverse direction,
+; using NEON registers.
+;
+
+#if !defined(_BOOTCRT_)
+
+ ALIGN 32
+ NESTED_ENTRY __memcpy_reverse_large_neon_wrapper
+
+__memcpy_reverse_large_neon
+
+ PROLOG_NOP adds r3, r0, r2 ; advance destination to end
+ PROLOG_NOP adds r1, r1, r2 ; advance source to end
+ PROLOG_NOP lsls r12, r3, #31 ; C = bit 1, N = bit 0
+ PROLOG_NOP pld [r1, #-32] ; pre-load one block ahead
+ PROLOG_PUSH {r4-r5, r11, lr}
+
+;
+; Align destination to a word boundary
+;
+
+ bpl %F1
+ ldrb r4, [r1, #-1]! ; fetch byte
+ subs r2, r2, #1 ; decrement count
+ strb r4, [r3, #-1]! ; store byte
+ lsls r12, r3, #31 ; compute updated status
+1
+ bcc %F2 ; if already aligned, just skip ahead
+ ldrh r4, [r1, #-2]! ; fetch halfword
+ subs r2, r2, #2 ; decrement count
+ strh r4, [r3, #-2]! ; store halfword
+2
+
+;
+; Perform main copy
+;
+
+ subs r2, r2, #32 ; pre-decrement to simplify the loop
+ blo %F13 ; skip over the loop if we don't have enough
+ subs r2, r2, #32 ; pre-decrement to simplify the loop
+ pld [r1, #-64] ; pre-load one block ahead
+ blo %F12 ; skip over the loop if we don't have enough
+11
+ pld [r1, #-96] ; prefetch ahead
+ subs r1, r1, #32
+ subs r3, r3, #32
+ subs r2, r2, #32 ; count the bytes for this block
+ vld1.8 {d0-d3}, [r1] ; load 32 bytes
+ vst1.8 {d0-d3}, [r3] ; store 32 bytes
+ bhs %B11 ; keep going until we're done
+12
+ subs r1, r1, #32
+ subs r3, r3, #32
+ vld1.8 {d0-d3}, [r1] ; load 32 bytes
+ vst1.8 {d0-d3}, [r3] ; store 32 bytes
+13
+ adds r2, r2, #(32 - 8) ; recover original count, and pre-decrement
+ blo %F15 ; if not enough remaining, skip this loop
+14
+ ldr r4, [r1, #-8]! ; fetch pair of words
+ ldr r5, [r1, #4] ; fetch pair of words
+ subs r2, r2, #8 ; decrement count
+ str r4, [r3, #-8]! ; store pair of words
+ str r5, [r3, #4]
+ bhs %B14 ; loop while we still have data remaining
+15
+ adds r2, r2, #8 ; determine final count
+ subs r1, r1, r2 ; recover original source
+
+ EPILOG_POP {r4-r5, r11, lr}
+ EPILOG_NOP bne MovSmal ; if some left, continue with small
+ EPILOG_RETURN ; else just return
+
+ NESTED_END __memcpy_reverse_large_neon
+
+#endif
+
+
+;
+; __memcpy_decide (internal calling convention)
+;
+; Determine whether to use integer or NEON for future memcpy's.
+;
+
+#if !defined(_BOOTCRT_)
+
+ ALIGN 32
+ NESTED_ENTRY __memcpy_decide_wrapper
+
+__memcpy_decide
+
+ PROLOG_PUSH {r4-r5, r11, lr}
+
+ ;
+ ; We want to use integer memcpy's on the A9, which has an external cache.
+ ;
+ ; First determine if we're in user or kernel mode. Reading CPSR
+ ; from user mode will either return the proper 5 mode bits, or all 0s.
+ ; Conveniently, user mode is 0x10, and there is no mode 0x00, so if
+ ; we read CPSR and the low 4 bits are 0, that's good enough.
+ ;
+
+ mrs r4, cpsr ; get CPSR
+ ands r4, r4, #0xf ; isolate the low 4 bits of the mode
+ beq %F1 ; if 0, we're in user mode
+
+ ;
+ ; If we are in kernel mode, read the MIDR directly.
+ ;
+
+ CP_READ r4, CP15_MIDR ; read main ID register
+ ubfx r5, r4, #24, #8 ; get implementer
+ lsrs r4, r4, #4 ; shift off revision field
+ cmp r5, #0x41 ; is implementer == ARM?
+ bne %F3 ; if not, use NEON
+ bfc r4, #12, #20 ; clear upper bits
+ ldr r5, =0xc09 ; A9 signature
+ cmp r4, r5 ; is this an A9?
+ bne %F3 ; if not, use NEON
+ b %F2 ; otherwise, use integer
+
+ ;
+ ; If we are in user mode, check the "external cache available" flag
+ ;
+1
+ ldr r4, =MM_SHARED_USER_DATA_VA + UsProcessorFeatures + PF_ARM_EXTERNAL_CACHE_AVAILABLE
+ ldrb r4, [r4] ; get external cache bit
+ cbz r4, %F3 ; if no external cache, do NEON
+
+ ;
+ ; Register for integer functions
+ ;
+2
+ ldr r4, =__memcpy_forward_large_integer ; select integer functions
+ ldr r5, =__memcpy_forward_large_func ;
+ str r4, [r5] ;
+ ldr r4, =__memcpy_reverse_large_integer ; select integer functions
+ ldr r5, =__memcpy_reverse_large_func ;
+ str r4, [r5] ;
+ b %F4
+
+ ;
+ ; Register for NEON functions
+ ;
+3
+ ldr r4, =__memcpy_forward_large_neon ; select NEON functions
+ ldr r5, =__memcpy_forward_large_func ;
+ str r4, [r5] ;
+ ldr r4, =__memcpy_reverse_large_neon ; select NEON functions
+ ldr r5, =__memcpy_reverse_large_func ;
+ str r4, [r5] ;
+4
+ EPILOG_POP {r4-r5, r11, lr} ; restore saved registers
+ EPILOG_NOP ldr pc, [r12] ; jump to the appropriate target
+
+ NESTED_END __memcpy_decide
+
+#endif
+
+
+;
+; void _memcpy_strict_align(void *dst, const void *src, size_t length)
+;
+; Copy a block of memory in a forward direction, only performing naturally-aligned
+; accesses.
+;
+
+ ALIGN 32
+ LEAF_ENTRY _memcpy_strict_align
+
+;
+; Verify alignment between source and destination
+;
+
+ sub r3, r0, r1 ; get relative alignment of source and destination
+ cbz r2, CopyExit ; exit if 0 count
+ ands r3, r3, #3 ; check DWORD alignment
+ bne CopyMisalignedHalf ; misaligned
+
+;
+; Source and destination are equally aligned: just align the
+; destination and the source will end up aligned as well
+;
+
+ tst r0, #3 ; dword aligned at the dest?
+ beq WordAligned_0 ; if so, skip ahead
+ tst r0, #1 ; halfword aligned at the dest?
+ beq HalfAligned_0 ; if so, skip ahead
+
+ subs r2, r2, #1 ; decrement count
+ ldrb r3, [r1], #1 ; fetch byte
+ strb r3, [r0], #1 ; store it
+ beq CopyExit ; stop if done
+ tst r0, #3 ; word aligned now?
+ beq WordAligned_0 ; if so, skip ahead
+
+HalfAligned_0
+ cmp r2, #2 ; do we have at least 2 bytes left?
+ blo CopyFinalBytes ; if not, copy bytes
+ subs r2, r2, #2 ; decrement count
+ ldrh r3, [r1], #2 ; fetch halfword
+ strh r3, [r0], #2 ; store it
+ beq CopyExit ; stop if done
+
+WordAligned_0
+ subs r2, r2, #4 ; at least 4 bytes remaining?
+ blt WordLoopEnd_0 ; if not, skip the main loop
+WordLoop_0
+ subs r2, r2, #4 ; decrement count
+ ldr r3, [r1], #4 ; fetch word
+ str r3, [r0], #4 ; store it
+ bge WordLoop_0 ; stop if done
+WordLoopEnd_0
+ adds r2, r2, #4 ; recover the extra 4 we subtracted
+ beq CopyExit ; stop if that's everything
+
+CopyFinalHalfwords
+ subs r2, r2, #2 ; at least 2 bytes remaining?
+ blt CopyFinalHalfwordsEnd ; if not, skip this
+CopyFinalHalfwordsLoop
+ subs r2, r2, #2 ; decrement count
+ ldrh r3, [r1], #2 ; fetch halfword
+ strh r3, [r0], #2 ; store it
+ bge CopyFinalHalfwordsLoop ; loop until done
+CopyFinalHalfwordsEnd
+ adds r2, r2, #2 ; recover the extra 2 we subtracted
+ beq CopyExit ; stop if that's everything
+
+CopyFinalBytes
+ subs r2, r2, #1 ; decrement count
+ ldrb r3, [r1], #1 ; fetch byte
+ strb r3, [r0], #1 ; store it
+ bne CopyFinalBytes ; loop until done
+CopyExit
+ bx lr ; return
+
+
+;
+; Source and destination are misaligned by 2 bytes
+;
+
+CopyMisalignedHalf
+ cmp r3, #2 ; misaligned by a halfword?
+ bne CopyMisalignedByte ; if not, skip
+
+ tst r0, #3 ; dword aligned at the dest?
+ beq WordAligned_2 ; if so, skip ahead
+ tst r0, #1 ; halfword aligned at the dest?
+ beq HalfAligned_2 ; if so, skip ahead
+
+ subs r2, r2, #1 ; decrement count
+ ldrb r3, [r1], #1 ; fetch byte
+ strb r3, [r0], #1 ; store it
+ beq CopyExit ; stop if done
+ tst r0, #3 ; word aligned now?
+ beq WordAligned_2 ; if so, skip ahead
+
+HalfAligned_2
+ cmp r2, #2 ; do we have at least 2 bytes left?
+ blo CopyFinalBytes ; if not, copy bytes
+ subs r2, r2, #2 ; decrement count
+ ldrh r3, [r1], #2 ; fetch halfword
+ strh r3, [r0], #2 ; store it
+ beq CopyExit ; stop if done
+
+WordAligned_2
+ subs r2, r2, #6 ; at least 6 bytes remaining?
+ blt WordLoopEnd_2 ; if so, skip the main loop
+ ldrh r12, [r1], #2 ; preload a halfword of source
+ subs r2, r2, #2 ; count these 2 bytes
+WordLoop_2
+ subs r2, r2, #4 ; decrement count
+ ldr r3, [r1], #4 ; fetch word
+ orr r12, r12, r3, lsl #16 ; copy low 16 bits to upper 16 of r12
+ str r12, [r0], #4 ; store it
+ lsr r12, r3, #16 ; copy upper 16 bits to lower 16 of r12
+ bge WordLoop_2 ; stop if done
+ strh r12, [r0], #2 ; store the extra halfword to the dest
+WordLoopEnd_2
+ adds r2, r2, #6 ; recover the extra 6 we subtracted
+ beq CopyExit ; stop if that's everything
+ b CopyFinalHalfwords ; otherwise, copy remainder
+
+
+;
+; Source and destination are misaligned by 1 byte
+;
+
+CopyMisalignedByte
+ cmp r3, #1 ; misaligned by a byte?
+ bne CopyMisalignedByte3 ; if not, skip
+
+ tst r0, #3 ; dword aligned at the dest?
+ beq WordAligned_1 ; if so, skip ahead
+ByteAlign_1
+ subs r2, r2, #1 ; decrement count
+ ldrb r3, [r1], #1 ; fetch byte
+ strb r3, [r0], #1 ; store it
+ beq CopyExit ; stop if done
+ tst r0, #3 ; word aligned now?
+ bne ByteAlign_1 ; if not, keep copying bytes
+
+WordAligned_1
+ subs r2, r2, #5 ; at least 5 bytes remaining?
+ blt WordLoopEnd_1 ; if so, skip the main loop
+ ldrb r12, [r1], #1 ; preload a byte of source
+ subs r2, r2, #1 ; count this byte
+WordLoop_1
+ subs r2, r2, #4 ; decrement count
+ ldr r3, [r1], #4 ; fetch word
+ orr r12, r12, r3, lsl #8 ; copy low 24 bits to upper 24 of r12
+ str r12, [r0], #4 ; store it
+ lsr r12, r3, #24 ; copy upper 8 bits to lower 8 of r12
+ bge WordLoop_1 ; stop if done
+ strb r12, [r0], #1 ; store the extra byte to the dest
+WordLoopEnd_1
+ adds r2, r2, #5 ; recover the extra 5 we subtracted
+ beq CopyExit ; stop if that's everything
+ b CopyFinalBytes ; otherwise, copy remainder
+
+
+;
+; Source and destination are misaligned by 3 bytes
+;
+
+CopyMisalignedByte3
+ tst r0, #3 ; dword aligned at the dest?
+ beq WordAligned_3 ; if so, skip ahead
+ByteAlign_3
+ subs r2, r2, #1 ; decrement count
+ ldrb r3, [r1], #1 ; fetch byte
+ strb r3, [r0], #1 ; store it
+ beq CopyExit ; stop if done
+ tst r0, #3 ; word aligned now?
+ bne ByteAlign_3 ; if not, keep copying bytes
+
+WordAligned_3
+ subs r2, r2, #7 ; at least 7 bytes remaining?
+ blt WordLoopEnd_3 ; if so, skip the main loop
+ ldrb r12, [r1], #1 ; preload a byte of source
+ ldrh r3, [r1], #2 ; preload a halfword of source
+ orr r12, r12, r3, lsl #8 ; OR in the halfword
+ subs r2, r2, #3 ; count these 3 bytes
+WordLoop_3
+ subs r2, r2, #4 ; decrement count
+ ldr r3, [r1], #4 ; fetch word
+ orr r12, r12, r3, lsl #24 ; copy low 8 bits to upper 8 of r12
+ str r12, [r0], #4 ; store it
+ lsr r12, r3, #8 ; copy upper 24 bits to lower 24 of r12
+ bge WordLoop_3 ; stop if done
+ strh r12, [r0], #2 ; store the extra halfword to the dest
+ lsr r12, r12, #16 ; down to the final byte
+ strb r12, [r0], #1 ; store the extra byte to the dest
+WordLoopEnd_3
+ adds r2, r2, #7 ; recover the extra 7 we subtracted
+ beq CopyExit ; stop if that's everything
+ b CopyFinalBytes ; otherwise, copy remainder
+
+ LEAF_END _memcpy_strict_align
+
+ END
diff --git a/src/vm/arm/patchedcode.asm b/src/vm/arm/patchedcode.asm
new file mode 100644
index 0000000000..9c3191e23b
--- /dev/null
+++ b/src/vm/arm/patchedcode.asm
@@ -0,0 +1,603 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+;; ==++==
+;;
+
+;;
+;; ==--==
+#include "ksarm.h"
+
+#include "asmconstants.h"
+
+#include "asmmacros.h"
+
+ SETALIAS JIT_Box,?JIT_Box@@YAPAVObject@@PAUCORINFO_CLASS_STRUCT_@@PAX@Z
+ SETALIAS JIT_New, ?JIT_New@@YAPAVObject@@PAUCORINFO_CLASS_STRUCT_@@@Z
+ SETALIAS JIT_Box, ?JIT_Box@@YAPAVObject@@PAUCORINFO_CLASS_STRUCT_@@PAX@Z
+ SETALIAS FramedAllocateString, ?FramedAllocateString@@YAPAVStringObject@@K@Z
+ SETALIAS g_pStringClass, ?g_pStringClass@@3PAVMethodTable@@A
+ SETALIAS JIT_NewArr1, ?JIT_NewArr1@@YAPAVObject@@PAUCORINFO_CLASS_STRUCT_@@H@Z
+ SETALIAS CopyValueClassUnchecked, ?CopyValueClassUnchecked@@YAXPAX0PAVMethodTable@@@Z
+
+ IMPORT $JIT_New
+ IMPORT $JIT_Box
+ IMPORT $FramedAllocateString
+ IMPORT $g_pStringClass
+ IMPORT $JIT_NewArr1
+ IMPORT $CopyValueClassUnchecked
+ IMPORT SetAppDomainInObject
+
+
+ IMPORT JIT_GetSharedNonGCStaticBase_Helper
+ IMPORT JIT_GetSharedGCStaticBase_Helper
+
+
+ EXPORT JIT_TrialAllocSFastMP_InlineGetThread__PatchTLSOffset
+ EXPORT JIT_BoxFastMP_InlineGetThread__PatchTLSOffset
+ EXPORT AllocateStringFastMP_InlineGetThread__PatchTLSOffset
+ EXPORT JIT_NewArr1VC_MP_InlineGetThread__PatchTLSOffset
+ EXPORT JIT_NewArr1OBJ_MP_InlineGetThread__PatchTLSOffset
+
+ EXPORT JIT_GetSharedNonGCStaticBase__PatchTLSLabel
+ EXPORT JIT_GetSharedNonGCStaticBaseNoCtor__PatchTLSLabel
+ EXPORT JIT_GetSharedGCStaticBase__PatchTLSLabel
+ EXPORT JIT_GetSharedGCStaticBaseNoCtor__PatchTLSLabel
+
+ MACRO
+ PATCHABLE_INLINE_GETTHREAD $reg, $label
+$label
+ mrc p15, 0, $reg, c13, c0, 2
+ ldr $reg, [$reg, #0xe10]
+ MEND
+
+
+ MACRO
+ PATCHABLE_INLINE_GETAPPDOMAIN $reg, $label
+$label
+ mrc p15, 0, $reg, c13, c0, 2
+ ldr $reg, [$reg, #0xe10]
+ MEND
+
+ TEXTAREA
+
+
+ MACRO
+ FIX_INDIRECTION $Reg, $label
+#ifdef FEATURE_PREJIT
+ tst $Reg, #1
+ beq $label
+ ldr $Reg, [$Reg, #-1]
+$label
+#endif
+ MEND
+
+
+; ------------------------------------------------------------------
+; Start of the writeable code region
+ LEAF_ENTRY JIT_PatchedCodeStart
+ bx lr
+ LEAF_END
+
+; ------------------------------------------------------------------
+; Optimized TLS getters
+
+ ALIGN 4
+ LEAF_ENTRY GetThread
+ ; This will be overwritten at runtime with optimized GetThread implementation
+ b GetTLSDummy
+ ; Just allocate space that will be filled in at runtime
+ SPACE (TLS_GETTER_MAX_SIZE_ASM - 2)
+ LEAF_END
+
+ ALIGN 4
+ LEAF_ENTRY GetAppDomain
+ ; This will be overwritten at runtime with optimized GetThread implementation
+ b GetTLSDummy
+ ; Just allocate space that will be filled in at runtime
+ SPACE (TLS_GETTER_MAX_SIZE_ASM - 2)
+ LEAF_END
+
+ LEAF_ENTRY GetTLSDummy
+ mov r0, #0
+ bx lr
+ LEAF_END
+
+ ALIGN 4
+ LEAF_ENTRY ClrFlsGetBlock
+ ; This will be overwritten at runtime with optimized ClrFlsGetBlock implementation
+ b GetTLSDummy
+ ; Just allocate space that will be filled in at runtime
+ SPACE (TLS_GETTER_MAX_SIZE_ASM - 2)
+ LEAF_END
+
+; ------------------------------------------------------------------
+; GC write barrier support.
+;
+; GC Write barriers are defined in asmhelpers.asm. The following functions are used to define
+; patchable location where the write-barriers are copied over at runtime
+
+ LEAF_ENTRY JIT_PatchedWriteBarrierStart
+ LEAF_END
+
+ ; These write barriers are overwritten on the fly
+ ; See ValidateWriteBarriers on how the sizes of these should be calculated
+ ALIGN 4
+ LEAF_ENTRY JIT_WriteBarrier
+ SPACE (0x84)
+ LEAF_END_MARKED JIT_WriteBarrier
+
+ ALIGN 4
+ LEAF_ENTRY JIT_CheckedWriteBarrier
+ SPACE (0x9C)
+ LEAF_END_MARKED JIT_CheckedWriteBarrier
+
+ ALIGN 4
+ LEAF_ENTRY JIT_ByRefWriteBarrier
+ SPACE (0xA0)
+ LEAF_END_MARKED JIT_ByRefWriteBarrier
+
+ LEAF_ENTRY JIT_PatchedWriteBarrierLast
+ LEAF_END
+
+; JIT Allocation helpers when TLS Index for Thread is low enough for fast helpers
+
+;---------------------------------------------------------------------------
+; IN: r0: MethodTable*
+;; OUT: r0: new object
+
+ LEAF_ENTRY JIT_TrialAllocSFastMP_InlineGetThread
+
+ ;get object size
+ ldr r1, [r0, #MethodTable__m_BaseSize]
+
+ ; m_BaseSize is guaranteed to be a multiple of 4.
+
+ ;getThread
+ PATCHABLE_INLINE_GETTHREAD r12, JIT_TrialAllocSFastMP_InlineGetThread__PatchTLSOffset
+
+ ;load current allocation pointers
+ ldr r2, [r12, #Thread__m_alloc_context__alloc_limit]
+ ldr r3, [r12, #Thread__m_alloc_context__alloc_ptr]
+
+ ;add object size to current pointer
+ add r1, r3
+
+ ;if beyond the limit call c++ method
+ cmp r1, r2
+ bhi AllocFailed
+
+ ;r1 is the new alloc_ptr and r3 has object address
+ ;update the alloc_ptr in Thread
+ str r1, [r12, #Thread__m_alloc_context__alloc_ptr]
+
+ ;write methodTable in object
+ str r0, [r3]
+
+ ;return object in r0
+ mov r0, r3
+
+#ifdef _DEBUG
+ ; Tail call to a helper that will set the current AppDomain index into the object header and then
+ ; return the object pointer back to our original caller.
+ b SetAppDomainInObject
+#else
+ ;return
+ bx lr
+#endif
+
+AllocFailed
+ b $JIT_New
+ LEAF_END
+
+
+;---------------------------------------------------------------------------
+; HCIMPL2(Object*, JIT_Box, CORINFO_CLASS_HANDLE type, void* unboxedData)
+; IN: r0: MethodTable*
+; IN: r1: data pointer
+;; OUT: r0: new object
+
+ LEAF_ENTRY JIT_BoxFastMP_InlineGetThread
+
+ ldr r2, [r0, #MethodTable__m_pWriteableData]
+
+ ;Check whether the class has been initialized
+ ldr r2, [r2, #MethodTableWriteableData__m_dwFlags]
+ cmp r2, #MethodTableWriteableData__enum_flag_Unrestored
+ bne ClassNotInited
+
+ ; Check whether the object contains pointers
+ ldr r3, [r0, #MethodTable__m_dwFlags]
+ cmp r3, #MethodTable__enum_flag_ContainsPointers
+ bne ContainsPointers
+
+ ldr r2, [r0, #MethodTable__m_BaseSize]
+
+ ;m_BaseSize is guranteed to be a multiple of 4
+
+ ;GetThread
+ PATCHABLE_INLINE_GETTHREAD r12, JIT_BoxFastMP_InlineGetThread__PatchTLSOffset
+
+ ldr r3, [r12, #Thread__m_alloc_context__alloc_ptr]
+ add r3, r2
+
+ ldr r2, [r12, #Thread__m_alloc_context__alloc_limit]
+
+ cmp r3, r2
+ bhi AllocFailed2
+
+ ldr r2, [r12, #Thread__m_alloc_context__alloc_ptr]
+
+ ;advance alloc_ptr in Thread
+ str r3, [r12, #Thread__m_alloc_context__alloc_ptr]
+
+ ;write methodtable* in the object
+ str r0, [r2]
+
+ ;copy the contents of value type in the object
+
+ ldr r3, [r0, #MethodTable__m_BaseSize]
+ sub r3, #0xc
+
+ ;r3 = no of bytes to copy
+
+ ;move address of object to return register
+ mov r0, r2
+
+ ;advance r2 to skip methodtable location
+ add r2, #4
+
+CopyLoop
+ ldr r12, [r1, r3]
+ str r12, [r2, r3]
+ sub r3, #4
+ bne CopyLoop
+
+#ifdef _DEBUG
+ ; Tail call to a helper that will set the current AppDomain index into the object header and then
+ ; return the object pointer back to our original caller.
+ b SetAppDomainInObject
+#else
+ ;return
+ bx lr
+#endif
+
+ContainsPointers
+ClassNotInited
+AllocFailed2
+ b $JIT_Box
+ LEAF_END
+
+
+;---------------------------------------------------------------------------
+; IN: r0: number of characters to allocate
+;; OUT: r0: address of newly allocated string
+
+ LEAF_ENTRY AllocateStringFastMP_InlineGetThread
+
+ ; Instead of doing elaborate overflow checks, we just limit the number of elements to
+ ; MAX_FAST_ALLOCATE_STRING_SIZE. This is picked (in asmconstants.h) to avoid any possibility of
+ ; overflow and to ensure we never try to allocate anything here that really should go on the large
+ ; object heap instead. Additionally the size has been selected so that it will encode into an
+ ; immediate in a single cmp instruction.
+
+ cmp r0, #MAX_FAST_ALLOCATE_STRING_SIZE
+ bhs OversizedString
+
+ ; Calculate total string size: Align(base size + (characters * 2), 4).
+ mov r1, #(SIZEOF__BaseStringObject + 3) ; r1 == string base size + 3 for alignment round up
+ add r1, r1, r0, lsl #1 ; r1 += characters * 2
+ bic r1, r1, #3 ; r1 &= ~3; round size to multiple of 4
+
+ ;GetThread
+ PATCHABLE_INLINE_GETTHREAD r12, AllocateStringFastMP_InlineGetThread__PatchTLSOffset
+ ldr r2, [r12, #Thread__m_alloc_context__alloc_limit]
+ ldr r3, [r12, #Thread__m_alloc_context__alloc_ptr]
+
+ add r1, r3
+ cmp r1, r2
+ bhi AllocFailed3
+
+ ;can allocate
+
+ ;advance alloc_ptr
+ str r1, [r12, #Thread__m_alloc_context__alloc_ptr]
+
+ ; Write MethodTable pointer into new object.
+ ldr r1, =$g_pStringClass
+ ldr r1, [r1]
+ str r1, [r3]
+
+ ; Write string length into new object.
+ str r0, [r3, #StringObject__m_StringLength]
+
+ ;prepare to return new object address
+ mov r0, r3
+
+#ifdef _DEBUG
+ ; Tail call to a helper that will set the current AppDomain index into the object header and then
+ ; return the object pointer back to our original caller.
+ b SetAppDomainInObject
+#else
+ ;return
+ bx lr
+#endif
+
+
+OversizedString
+AllocFailed3
+ b $FramedAllocateString
+
+ LEAF_END
+
+
+; HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
+;---------------------------------------------------------------------------
+; IN: r0: type descriptor which contains the (shared) array method table and the element type.
+; IN: r1: number of array elements
+;; OUT: r0: address of newly allocated string
+
+ LEAF_ENTRY JIT_NewArr1VC_MP_InlineGetThread
+
+ ; Do a conservative check here for number of elements.
+ ; This is to avoid overflow while doing the calculations. We don't
+ ; have to worry about "large" objects, since the allocation quantum is never big enough for
+ ; LARGE_OBJECT_SIZE.
+
+ ; For Value Classes, this needs to be < (max_value_in_4byte - size_of_base_array)/(max_size_of_each_element)
+ ; This evaluates to (2^32-1 - 0xc)/2^16
+
+ ; Additionally the constant has been chosen such that it can be encoded in a
+ ; single Thumb2 CMP instruction.
+
+ cmp r1, #MAX_FAST_ALLOCATE_ARRAY_VC_SIZE
+ bhs OverSizedArray3
+
+ ;load MethodTable from ArrayTypeDesc
+ ldr r3, [r0, #ArrayTypeDesc__m_TemplateMT - 2]
+
+ FIX_INDIRECTION r3, label1
+
+ ;get element size - stored in low 16bits of m_dwFlags
+ ldrh r12, [r3, #MethodTable__m_dwFlags]
+
+ ; getting size of object to allocate
+
+ ; multiply number of elements with size of each element
+ mul r2, r12, r1
+
+ ; add the base array size and 3 to align total bytes at 4 byte boundary
+ add r2, r2, #SIZEOF__ArrayOfValueType + 3
+ bic r2, #3
+
+ ;GetThread
+ PATCHABLE_INLINE_GETTHREAD r12, JIT_NewArr1VC_MP_InlineGetThread__PatchTLSOffset
+ ldr r3, [r12, #Thread__m_alloc_context__alloc_ptr]
+
+ add r3, r2
+
+ ldr r2, [r12, #Thread__m_alloc_context__alloc_limit]
+
+ cmp r3, r2
+ bhi AllocFailed6
+
+ ; can allocate
+
+ ;r2 = address of new object
+ ldr r2, [r12, #Thread__m_alloc_context__alloc_ptr]
+
+ ;update pointer in allocation context
+ str r3, [r12, #Thread__m_alloc_context__alloc_ptr]
+
+ ;store number of elements
+ str r1, [r2, #ArrayBase__m_NumComponents]
+
+ ;store methodtable
+ ldr r3, [r0, #ArrayTypeDesc__m_TemplateMT - 2]
+
+ FIX_INDIRECTION r3, label2
+
+ str r3, [r2]
+
+ ;copy return value
+ mov r0, r2
+
+#ifdef _DEBUG
+ ; Tail call to a helper that will set the current AppDomain index into the object header and then
+ ; return the object pointer back to our original caller.
+ b SetAppDomainInObject
+#else
+ ;return
+ bx lr
+#endif
+
+
+
+AllocFailed6
+OverSizedArray3
+ b $JIT_NewArr1
+
+ LEAF_END
+
+
+
+; HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
+;---------------------------------------------------------------------------
+; IN: r0: type descriptor which contains the (shared) array method table and the element type.
+; IN: r1: number of array elements
+;; OUT: r0: address of newly allocated string
+
+ LEAF_ENTRY JIT_NewArr1OBJ_MP_InlineGetThread
+
+ cmp r1, #MAX_FAST_ALLOCATE_ARRAY_OBJECTREF_SIZE
+ bhs OverSizedArray
+
+ mov r2, #SIZEOF__ArrayOfObjectRef
+ add r2, r2, r1, lsl #2
+
+ ;r2 will be a multiple of 4
+
+
+ ;GetThread
+ PATCHABLE_INLINE_GETTHREAD r12, JIT_NewArr1OBJ_MP_InlineGetThread__PatchTLSOffset
+ ldr r3, [r12, #Thread__m_alloc_context__alloc_ptr]
+
+ add r3, r2
+
+ ldr r2, [r12, #Thread__m_alloc_context__alloc_limit]
+
+ cmp r3, r2
+ bhi AllocFailed4
+
+ ;can allocate
+
+ ;r2 = address of new object
+ ldr r2, [r12, #Thread__m_alloc_context__alloc_ptr]
+
+ ;update pointer in allocation context
+ str r3, [r12, #Thread__m_alloc_context__alloc_ptr]
+
+ ;store number of elements
+ str r1, [r2, #ArrayBase__m_NumComponents]
+
+ ;store methodtable
+ ldr r3, [r0, #ArrayTypeDesc__m_TemplateMT - 2]
+
+ FIX_INDIRECTION r3, label3
+
+ str r3, [r2]
+
+ ;copy return value
+ mov r0, r2
+
+#ifdef _DEBUG
+ ; Tail call to a helper that will set the current AppDomain index into the object header and then
+ ; return the object pointer back to our original caller.
+ b SetAppDomainInObject
+#else
+ ;return
+ bx lr
+#endif
+
+OverSizedArray
+AllocFailed4
+ b $JIT_NewArr1
+ LEAF_END
+
+;
+; JIT Static access helpers when TLS Index for AppDomain is low enough for fast helpers
+;
+
+; ------------------------------------------------------------------
+; void* JIT_GetSharedNonGCStaticBase(SIZE_T moduleDomainID, DWORD dwClassDomainID)
+
+ LEAF_ENTRY JIT_GetSharedNonGCStaticBase_InlineGetAppDomain
+ ; Check if r0 (moduleDomainID) is not a moduleID
+ tst r0, #1
+ beq HaveLocalModule1
+
+ PATCHABLE_INLINE_GETAPPDOMAIN r2, JIT_GetSharedNonGCStaticBase__PatchTLSLabel
+
+ ; Get the LocalModule, r0 will always be odd, so: r0 * 2 - 2 <=> (r0 >> 1) * 4
+ ldr r2, [r2 , #AppDomain__m_sDomainLocalBlock + DomainLocalBlock__m_pModuleSlots]
+ add r2, r2, r0, LSL #1
+ ldr r0, [r2, #-2]
+
+HaveLocalModule1
+ ; If class is not initialized, bail to C++ helper
+ add r2, r0, #DomainLocalModule__m_pDataBlob
+ ldrb r2, [r2, r1]
+ tst r2, #1
+ beq CallHelper1
+
+ bx lr
+
+CallHelper1
+ ; Tail call JIT_GetSharedNonGCStaticBase_Helper
+ b JIT_GetSharedNonGCStaticBase_Helper
+ LEAF_END
+
+
+; ------------------------------------------------------------------
+; void* JIT_GetSharedNonGCStaticBaseNoCtor(SIZE_T moduleDomainID, DWORD dwClassDomainID)
+
+ LEAF_ENTRY JIT_GetSharedNonGCStaticBaseNoCtor_InlineGetAppDomain
+ ; Check if r0 (moduleDomainID) is not a moduleID
+ tst r0, #1
+ beq HaveLocalModule2
+
+ PATCHABLE_INLINE_GETAPPDOMAIN r2, JIT_GetSharedNonGCStaticBaseNoCtor__PatchTLSLabel
+
+ ; Get the LocalModule, r0 will always be odd, so: r0 * 2 - 2 <=> (r0 >> 1) * 4
+ ldr r2, [r2 , #AppDomain__m_sDomainLocalBlock + DomainLocalBlock__m_pModuleSlots]
+ add r2, r2, r0, LSL #1
+ ldr r0, [r2, #-2]
+
+
+HaveLocalModule2
+ bx lr
+ LEAF_END
+
+
+; ------------------------------------------------------------------
+; void* JIT_GetSharedGCStaticBase(SIZE_T moduleDomainID, DWORD dwClassDomainID)
+
+ LEAF_ENTRY JIT_GetSharedGCStaticBase_InlineGetAppDomain
+ ; Check if r0 (moduleDomainID) is not a moduleID
+ tst r0, #1
+ beq HaveLocalModule3
+
+ PATCHABLE_INLINE_GETAPPDOMAIN r2, JIT_GetSharedGCStaticBase__PatchTLSLabel
+
+ ; Get the LocalModule, r0 will always be odd, so: r0 * 2 - 2 <=> (r0 >> 1) * 4
+ ldr r2, [r2 , #AppDomain__m_sDomainLocalBlock + DomainLocalBlock__m_pModuleSlots]
+ add r2, r2, r0, LSL #1
+ ldr r0, [r2, #-2]
+
+HaveLocalModule3
+ ; If class is not initialized, bail to C++ helper
+ add r2, r0, #DomainLocalModule__m_pDataBlob
+ ldrb r2, [r2, r1]
+ tst r2, #1
+ beq CallHelper3
+
+ ldr r0, [r0, #DomainLocalModule__m_pGCStatics]
+ bx lr
+
+CallHelper3
+ ; Tail call Jit_GetSharedGCStaticBase_Helper
+ b JIT_GetSharedGCStaticBase_Helper
+ LEAF_END
+
+
+; ------------------------------------------------------------------
+; void* JIT_GetSharedGCStaticBaseNoCtor(SIZE_T moduleDomainID, DWORD dwClassDomainID)
+
+ LEAF_ENTRY JIT_GetSharedGCStaticBaseNoCtor_InlineGetAppDomain
+ ; Check if r0 (moduleDomainID) is not a moduleID
+ tst r0, #1
+ beq HaveLocalModule4
+
+ PATCHABLE_INLINE_GETAPPDOMAIN r2, JIT_GetSharedGCStaticBaseNoCtor__PatchTLSLabel
+
+ ; Get the LocalModule, r0 will always be odd, so: r0 * 2 - 2 <=> (r0 >> 1) * 4
+ ldr r2, [r2 , #AppDomain__m_sDomainLocalBlock + DomainLocalBlock__m_pModuleSlots]
+ add r2, r2, r0, LSL #1
+ ldr r0, [r2, #-2]
+
+HaveLocalModule4
+ ldr r0, [r0, #DomainLocalModule__m_pGCStatics]
+ bx lr
+ LEAF_END
+
+; ------------------------------------------------------------------
+; End of the writeable code region
+ LEAF_ENTRY JIT_PatchedCodeLast
+ bx lr
+ LEAF_END
+
+
+; Must be at very end of file
+ END
diff --git a/src/vm/arm/profiler.cpp b/src/vm/arm/profiler.cpp
new file mode 100644
index 0000000000..4ae5e41eea
--- /dev/null
+++ b/src/vm/arm/profiler.cpp
@@ -0,0 +1,359 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// FILE: profiler.cpp
+//
+
+//
+
+//
+// ======================================================================================
+
+#include "common.h"
+
+#ifdef PROFILING_SUPPORTED
+#include "proftoeeinterfaceimpl.h"
+
+MethodDesc *FunctionIdToMethodDesc(FunctionID functionID);
+
+// TODO: move these to some common.h file
+// FLAGS
+#define PROFILE_ENTER 0x1
+#define PROFILE_LEAVE 0x2
+#define PROFILE_TAILCALL 0x4
+
+typedef struct _PROFILE_PLATFORM_SPECIFIC_DATA
+{
+ UINT32 r0; // Keep r0 & r1 contiguous to make returning 64-bit results easier
+ UINT32 r1;
+ void *R11;
+ void *Pc;
+ union // Float arg registers as 32-bit (s0-s15) and 64-bit (d0-d7)
+ {
+ UINT32 s[16];
+ UINT64 d[8];
+ };
+ FunctionID functionId;
+ void *probeSp; // stack pointer of managed function
+ void *profiledSp; // location of arguments on stack
+ LPVOID hiddenArg;
+ UINT32 flags;
+} PROFILE_PLATFORM_SPECIFIC_DATA, *PPROFILE_PLATFORM_SPECIFIC_DATA;
+
+
+/*
+ * ProfileGetIPFromPlatformSpecificHandle
+ *
+ * This routine takes the platformSpecificHandle and retrieves from it the
+ * IP value.
+ *
+ * Parameters:
+ * handle - the platformSpecificHandle passed to ProfileEnter/Leave/Tailcall
+ *
+ * Returns:
+ * The IP value stored in the handle.
+ */
+UINT_PTR ProfileGetIPFromPlatformSpecificHandle(void *handle)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ PROFILE_PLATFORM_SPECIFIC_DATA* pData = (PROFILE_PLATFORM_SPECIFIC_DATA*)handle;
+ return (UINT_PTR)pData->Pc;
+}
+
+
+/*
+ * ProfileSetFunctionIDInPlatformSpecificHandle
+ *
+ * This routine takes the platformSpecificHandle and functionID, and assign
+ * functionID to functionID field of platformSpecificHandle.
+ *
+ * Parameters:
+ * pPlatformSpecificHandle - the platformSpecificHandle passed to ProfileEnter/Leave/Tailcall
+ * functionID - the FunctionID to be assigned
+ *
+ * Returns:
+ * None
+ */
+void ProfileSetFunctionIDInPlatformSpecificHandle(void * pPlatformSpecificHandle, FunctionID functionID)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(pPlatformSpecificHandle != NULL);
+ _ASSERTE(functionID != NULL);
+
+ PROFILE_PLATFORM_SPECIFIC_DATA * pData = reinterpret_cast<PROFILE_PLATFORM_SPECIFIC_DATA *>(pPlatformSpecificHandle);
+ pData->functionId = functionID;
+}
+
+/*
+ * ProfileArgIterator::ProfileArgIterator
+ *
+ * Constructor. Does almost nothing. Init must be called after construction.
+ *
+ */
+ProfileArgIterator::ProfileArgIterator(MetaSig * pSig, void * platformSpecificHandle)
+ : m_argIterator(pSig)
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(pSig != NULL);
+ _ASSERTE(platformSpecificHandle != NULL);
+
+ m_handle = platformSpecificHandle;
+ PROFILE_PLATFORM_SPECIFIC_DATA* pData = (PROFILE_PLATFORM_SPECIFIC_DATA*)m_handle;
+
+ // unwind a frame and get the SP for the profiled method to make sure it matches
+ // what the JIT gave us
+#ifdef _DEBUG
+ {
+/*
+ Foo() {
+ Bar();
+ }
+
+Stack for the above call will look as follows (stack growing downwards):
+
+ |
+ | Stack Args for Foo |
+ | pre spill r0-r3 |
+ | LR |
+ | R11 |
+ | Locals of Foo |
+ | Stack Args for Bar |
+ | pre spill r0-r3 | __________this Sp value is saved in profiledSP
+ | LR |
+ | R11 |
+ | Satck saved in prolog of Bar | _______ call to profiler hook is made here_____this Sp value is saved in probeSP
+ | |
+
+
+*/
+
+ // setup the context to represent the frame that called ProfileEnterNaked
+ CONTEXT ctx;
+ memset(&ctx, 0, sizeof(CONTEXT));
+ ctx.Sp = (UINT)pData->probeSp;
+ ctx.R11 = (UINT)pData->R11;
+ ctx.Pc = (UINT)pData->Pc;
+ // For some functions which do localloc, sp is saved in r9. In order to perform unwinding for functions r9 must be set in the context.
+ // r9 is stored at offset (sizeof(PROFILE_PLATFORM_SPECIFIC_DATA) (this also includes the padding done for 8-byte stack alignement) + size required for (r0,r3)) bytes from pData
+ ctx.R9 = *((UINT*)pData + (sizeof(PROFILE_PLATFORM_SPECIFIC_DATA) + 8)/4);
+
+ // walk up a frame to the caller frame (called the managed method which
+ // called ProfileEnterNaked)
+ Thread::VirtualUnwindCallFrame(&ctx);
+
+ // add the prespill register(r0-r3) size to get the stack pointer of previous function
+ _ASSERTE(pData->profiledSp == (void*)(ctx.Sp - 4*4));
+ }
+#endif // _DEBUG
+
+ // Get the hidden arg if there is one
+ MethodDesc * pMD = FunctionIdToMethodDesc(pData->functionId);
+
+ if ( (pData->hiddenArg == NULL) &&
+ (pMD->RequiresInstArg() || pMD->AcquiresInstMethodTableFromThis()) )
+ {
+ // In the enter probe, the JIT may not have pushed the generics token onto the stack yet.
+ // Luckily, we can inspect the registers reliably at this point.
+ if (pData->flags & PROFILE_ENTER)
+ {
+ _ASSERTE(!((pData->flags & PROFILE_LEAVE) || (pData->flags & PROFILE_TAILCALL)));
+
+ if (pMD->AcquiresInstMethodTableFromThis())
+ {
+ pData->hiddenArg = GetThis();
+ }
+ else
+ {
+ // The param type arg comes after the return buffer argument and the "this" pointer.
+ int index = 0;
+
+ if (m_argIterator.HasThis())
+ {
+ index++;
+ }
+
+ if (m_argIterator.HasRetBuffArg())
+ {
+ index++;
+ }
+
+ pData->hiddenArg = *(LPVOID*)((LPBYTE)pData->profiledSp + (index * sizeof(SIZE_T)));
+ }
+ }
+ else
+ {
+ EECodeInfo codeInfo((PCODE)pData->Pc);
+
+ // We want to pass the caller SP here.
+ pData->hiddenArg = EECodeManager::GetExactGenericsToken((SIZE_T)(pData->profiledSp), &codeInfo);
+ }
+ }
+}
+
+
+/*
+ * ProfileArgIterator::~ProfileArgIterator
+ *
+ * Destructor, releases all resources.
+ *
+ */
+ProfileArgIterator::~ProfileArgIterator()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_handle = NULL;
+}
+
+
+/*
+ * ProfileArgIterator::GetNextArgAddr
+ *
+ * After initialization, this method is called repeatedly until it
+ * returns NULL to get the address of each arg. Note: this address
+ * could be anywhere on the stack.
+ *
+ * Returns:
+ * Address of the argument, or NULL if iteration is complete.
+ */
+LPVOID ProfileArgIterator::GetNextArgAddr()
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(m_handle != NULL);
+
+ PROFILE_PLATFORM_SPECIFIC_DATA* pData = (PROFILE_PLATFORM_SPECIFIC_DATA*)m_handle;
+
+ if ((pData->flags & PROFILE_LEAVE) || (pData->flags & PROFILE_TAILCALL))
+ {
+ _ASSERTE(!"GetNextArgAddr() - arguments are not available in leave and tailcall probes");
+ return NULL;
+ }
+
+ int argOffset = m_argIterator.GetNextOffset();
+
+ // argOffset of TransitionBlock::InvalidOffset indicates that we're done
+ if (argOffset == TransitionBlock::InvalidOffset)
+ {
+ return NULL;
+ }
+
+ if (TransitionBlock::IsFloatArgumentRegisterOffset(argOffset))
+ {
+ // Arguments which land up in floating point registers are contained entirely within those
+ // registers (they're never split onto the stack).
+ return ((BYTE *)&pData->d) + (argOffset - TransitionBlock::GetOffsetOfFloatArgumentRegisters());
+ }
+
+ // Argument lives in one or more general registers (and possibly overflows onto the stack).
+ return (LPBYTE)pData->profiledSp + (argOffset - TransitionBlock::GetOffsetOfArgumentRegisters());
+}
+
+/*
+ * ProfileArgIterator::GetHiddenArgValue
+ *
+ * Called after initialization, any number of times, to retrieve any
+ * hidden argument, so that resolution for Generics can be done.
+ *
+ * Parameters:
+ * None.
+ *
+ * Returns:
+ * Value of the hidden parameter, or NULL if none exists.
+ */
+LPVOID ProfileArgIterator::GetHiddenArgValue(void)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ PROFILE_PLATFORM_SPECIFIC_DATA* pData = (PROFILE_PLATFORM_SPECIFIC_DATA*)m_handle;
+
+ return pData->hiddenArg;
+}
+
+/*
+ * ProfileArgIterator::GetThis
+ *
+ * Called after initialization, any number of times, to retrieve any
+ * 'this' pointer.
+ *
+ * Parameters:
+ * None.
+ *
+ * Returns:
+ * Address of the 'this', or NULL if none exists.
+ */
+LPVOID ProfileArgIterator::GetThis(void)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ PROFILE_PLATFORM_SPECIFIC_DATA* pData = (PROFILE_PLATFORM_SPECIFIC_DATA*)m_handle;
+ MethodDesc * pMD = FunctionIdToMethodDesc(pData->functionId);
+
+ // We guarantee to return the correct "this" pointer in the enter probe.
+ // For the leave and tailcall probes, we only return a valid "this" pointer if it is the generics token.
+ if (pData->hiddenArg != NULL)
+ {
+ if (pMD->AcquiresInstMethodTableFromThis())
+ {
+ return pData->hiddenArg;
+ }
+ }
+
+ if (pData->flags & PROFILE_ENTER)
+ {
+ if (m_argIterator.HasThis())
+ {
+ return *(LPVOID*)((LPBYTE)pData->profiledSp);
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * ProfileArgIterator::GetReturnBufferAddr
+ *
+ * Called after initialization, any number of times, to retrieve the
+ * address of the return buffer. NULL indicates no return value.
+ *
+ * Parameters:
+ * None.
+ *
+ * Returns:
+ * Address of the return buffer, or NULL if none exists.
+ */
+LPVOID ProfileArgIterator::GetReturnBufferAddr(void)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ PROFILE_PLATFORM_SPECIFIC_DATA* pData = (PROFILE_PLATFORM_SPECIFIC_DATA*)m_handle;
+ MethodDesc * pMD = FunctionIdToMethodDesc(pData->functionId);
+
+ if (m_argIterator.HasRetBuffArg())
+ {
+ return (LPVOID)pData->r0;
+ }
+
+ if (m_argIterator.GetFPReturnSize() != 0)
+ return &pData->d[0];
+
+ if (m_argIterator.GetSig()->GetReturnType() != ELEMENT_TYPE_VOID)
+ return &pData->r0;
+ else
+ return NULL;
+}
+
+#endif // PROFILING_SUPPORTED
diff --git a/src/vm/arm/stubs.cpp b/src/vm/arm/stubs.cpp
new file mode 100644
index 0000000000..3896e40fe6
--- /dev/null
+++ b/src/vm/arm/stubs.cpp
@@ -0,0 +1,3903 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: stubs.cpp
+//
+// This file contains stub functions for unimplemented features need to
+// run on the ARM platform.
+
+#include "common.h"
+#include "jitinterface.h"
+#include "comdelegate.h"
+#include "invokeutil.h"
+#include "excep.h"
+#include "class.h"
+#include "field.h"
+#include "dllimportcallback.h"
+#include "dllimport.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "eeconfig.h"
+#include "cgensys.h"
+#include "asmconstants.h"
+#include "security.h"
+#include "securitydescriptor.h"
+#include "virtualcallstub.h"
+#include "gcdump.h"
+#include "rtlfunctions.h"
+#include "codeman.h"
+#include "tls.h"
+#include "ecall.h"
+#include "threadsuspend.h"
+
+// target write barriers
+EXTERN_C void JIT_WriteBarrier(Object **dst, Object *ref);
+EXTERN_C void JIT_WriteBarrier_End();
+EXTERN_C void JIT_CheckedWriteBarrier(Object **dst, Object *ref);
+EXTERN_C void JIT_CheckedWriteBarrier_End();
+EXTERN_C void JIT_ByRefWriteBarrier_End();
+EXTERN_C void JIT_ByRefWriteBarrier_SP(Object **dst, Object *ref);
+
+// source write barriers
+EXTERN_C void JIT_WriteBarrier_SP_Pre(Object **dst, Object *ref);
+EXTERN_C void JIT_WriteBarrier_SP_Pre_End();
+EXTERN_C void JIT_WriteBarrier_SP_Post(Object **dst, Object *ref);
+EXTERN_C void JIT_WriteBarrier_SP_Post_End();
+EXTERN_C void JIT_WriteBarrier_MP_Pre(Object **dst, Object *ref);
+EXTERN_C void JIT_WriteBarrier_MP_Pre_End();
+EXTERN_C void JIT_WriteBarrier_MP_Post(Object **dst, Object *ref);
+EXTERN_C void JIT_WriteBarrier_MP_Post_End();
+
+EXTERN_C void JIT_CheckedWriteBarrier_SP_Pre(Object **dst, Object *ref);
+EXTERN_C void JIT_CheckedWriteBarrier_SP_Pre_End();
+EXTERN_C void JIT_CheckedWriteBarrier_SP_Post(Object **dst, Object *ref);
+EXTERN_C void JIT_CheckedWriteBarrier_SP_Post_End();
+EXTERN_C void JIT_CheckedWriteBarrier_MP_Pre(Object **dst, Object *ref);
+EXTERN_C void JIT_CheckedWriteBarrier_MP_Pre_End();
+EXTERN_C void JIT_CheckedWriteBarrier_MP_Post(Object **dst, Object *ref);
+EXTERN_C void JIT_CheckedWriteBarrier_MP_Post_End();
+
+EXTERN_C void JIT_ByRefWriteBarrier_SP_Pre();
+EXTERN_C void JIT_ByRefWriteBarrier_SP_Pre_End();
+EXTERN_C void JIT_ByRefWriteBarrier_SP_Post();
+EXTERN_C void JIT_ByRefWriteBarrier_SP_Post_End();
+EXTERN_C void JIT_ByRefWriteBarrier_MP_Pre();
+EXTERN_C void JIT_ByRefWriteBarrier_MP_Pre_End();
+EXTERN_C void JIT_ByRefWriteBarrier_MP_Post(Object **dst, Object *ref);
+EXTERN_C void JIT_ByRefWriteBarrier_MP_Post_End();
+
+EXTERN_C void JIT_PatchedWriteBarrierStart();
+EXTERN_C void JIT_PatchedWriteBarrierLast();
+
+#ifndef DACCESS_COMPILE
+//-----------------------------------------------------------------------
+// InstructionFormat for conditional jump.
+//-----------------------------------------------------------------------
+class ThumbCondJump : public InstructionFormat
+{
+ public:
+ ThumbCondJump() : InstructionFormat(InstructionFormat::k16)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual UINT GetSizeOfInstruction(UINT refsize, UINT variationCode)
+ {
+ LIMITED_METHOD_CONTRACT
+
+ _ASSERTE(refsize == InstructionFormat::k16);
+
+ return 2;
+ }
+
+ virtual UINT GetHotSpotOffset(UINT refsize, UINT variationCode)
+ {
+ LIMITED_METHOD_CONTRACT
+
+ _ASSERTE(refsize == InstructionFormat::k16);
+
+ return 4;
+ }
+
+ //CB{N}Z Rn, <Label>
+ //Encoding 1|0|1|1|op|0|i|1|imm5|Rn
+ //op = Bit3(variation)
+ //Rn = Bits2-0(variation)
+ virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT variationCode, BYTE *pDataBuffer)
+ {
+ LIMITED_METHOD_CONTRACT
+
+ _ASSERTE(refsize == InstructionFormat::k16);
+
+ if(fixedUpReference <0 || fixedUpReference > 126)
+ COMPlusThrow(kNotSupportedException);
+
+ _ASSERTE((fixedUpReference & 0x1) == 0);
+
+ pOutBuffer[0] = static_cast<BYTE>(((0x3e & fixedUpReference) << 2) | (0x7 & variationCode));
+ pOutBuffer[1] = static_cast<BYTE>(0xb1 | (0x8 & variationCode)| ((0x40 & fixedUpReference)>>5));
+ }
+};
+
+//-----------------------------------------------------------------------
+// InstructionFormat for near Jump and short Jump
+//-----------------------------------------------------------------------
+class ThumbNearJump : public InstructionFormat
+{
+ public:
+ ThumbNearJump() : InstructionFormat(InstructionFormat::k16|InstructionFormat::k32)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual UINT GetSizeOfInstruction(UINT refsize, UINT variationCode)
+ {
+ LIMITED_METHOD_CONTRACT
+
+ if(refsize == InstructionFormat::k16)
+ return 2;
+ else if(refsize == InstructionFormat::k32)
+ return 4;
+ else
+ _ASSERTE(!"Unknown refsize");
+ return 0;
+ }
+
+ virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT cond, BYTE *pDataBuffer)
+ {
+ LIMITED_METHOD_CONTRACT
+
+ _ASSERTE(cond <15);
+
+ //offsets must be in multiples of 2
+ _ASSERTE((fixedUpReference & 0x1) == 0);
+
+ if(cond == 0xe) //Always execute
+ {
+ if(fixedUpReference >= -2048 && fixedUpReference <= 2046)
+ {
+ if(refsize != InstructionFormat::k16)
+ _ASSERTE(!"Expected refSize to be 2");
+
+ //Emit T2 encoding of B<c> <label> instruction
+ pOutBuffer[0] = static_cast<BYTE>((fixedUpReference & 0x1fe)>>1);
+ pOutBuffer[1] = static_cast<BYTE>(0xe0 | ((fixedUpReference & 0xe00)>>9));
+ }
+ else if(fixedUpReference >= -16777216 && fixedUpReference <= 16777214)
+ {
+ if(refsize != InstructionFormat::k32)
+ _ASSERTE(!"Expected refSize to be 4");
+
+ //Emit T4 encoding of B<c> <label> instruction
+ int s = (fixedUpReference & 0x1000000) >> 24;
+ int i1 = (fixedUpReference & 0x800000) >> 23;
+ int i2 = (fixedUpReference & 0x400000) >> 22;
+ pOutBuffer[0] = static_cast<BYTE>((fixedUpReference & 0xff000) >> 12);
+ pOutBuffer[1] = static_cast<BYTE>(0xf0 | (s << 2) |( (fixedUpReference & 0x300000) >>20));
+ pOutBuffer[2] = static_cast<BYTE>((fixedUpReference & 0x1fe) >> 1);
+ pOutBuffer[3] = static_cast<BYTE>(0x90 | (~(i1^s)) << 5 | (~(i2^s)) << 3 | (fixedUpReference & 0xe00) >> 9);
+ }
+ else
+ {
+ COMPlusThrow(kNotSupportedException);
+ }
+ }
+ else // conditional branch based on flags
+ {
+ if(fixedUpReference >= -256 && fixedUpReference <= 254)
+ {
+ if(refsize != InstructionFormat::k16)
+ _ASSERTE(!"Expected refSize to be 2");
+
+ //Emit T1 encoding of B<c> <label> instruction
+ pOutBuffer[0] = static_cast<BYTE>((fixedUpReference & 0x1fe)>>1);
+ pOutBuffer[1] = static_cast<BYTE>(0xd0 | (cond & 0xf));
+ }
+ else if(fixedUpReference >= -1048576 && fixedUpReference <= 1048574)
+ {
+ if(refsize != InstructionFormat::k32)
+ _ASSERTE(!"Expected refSize to be 4");
+
+ //Emit T3 encoding of B<c> <label> instruction
+ pOutBuffer[0] = static_cast<BYTE>(((cond & 0x3) << 6) | ((fixedUpReference & 0x3f000) >>12));
+ pOutBuffer[1] = static_cast<BYTE>(0xf0 | ((fixedUpReference & 0x100000) >>18) | ((cond & 0xc) >> 2));
+ pOutBuffer[2] = static_cast<BYTE>((fixedUpReference & 0x1fe) >> 1);
+ pOutBuffer[3] = static_cast<BYTE>(0x80 | ((fixedUpReference & 0x40000) >> 13) | ((fixedUpReference & 0x80000) >> 16) | ((fixedUpReference & 0xe00) >> 9));
+ }
+ else
+ {
+ COMPlusThrow(kNotSupportedException);
+ }
+ }
+ }
+
+ virtual BOOL CanReach(UINT refsize, UINT variationCode, BOOL fExternal, INT_PTR offset)
+ {
+ LIMITED_METHOD_CONTRACT
+
+ if (fExternal)
+ {
+ _ASSERTE(0);
+ return FALSE;
+ }
+ else
+ {
+ switch (refsize)
+ {
+ case InstructionFormat::k16:
+ if(variationCode == 0xe)
+ return (offset >= -2048 && offset <= 2046 && (offset & 0x1) == 0);
+ else
+ return (offset >= -256 && offset <= 254 && (offset & 0x1) == 0);
+ case InstructionFormat::k32:
+ if(variationCode == 0xe)
+ return ((offset >= -16777216) && (offset <= 16777214) && ((offset & 0x1) == 0));
+ else
+ return ((offset >= -1048576) && (offset <= 1048574) && ((offset & 0x1) == 0));
+ default:
+ _ASSERTE(!"Unknown refsize");
+ return FALSE;
+ }
+ }
+ }
+
+ virtual UINT GetHotSpotOffset(UINT refsize, UINT variationCode)
+ {
+ LIMITED_METHOD_CONTRACT
+
+ _ASSERTE(refsize == InstructionFormat::k16 || refsize == InstructionFormat::k32);
+
+ return 4;
+ }
+};
+
+
+//static conditional jump instruction format object
+static BYTE gThumbCondJump[sizeof(ThumbCondJump)];
+
+//static near jump instruction format object
+static BYTE gThumbNearJump[sizeof(ThumbNearJump)];
+
+void StubLinkerCPU::Init(void)
+{
+ //Initialize the object
+ new (gThumbCondJump) ThumbCondJump();
+ new (gThumbNearJump) ThumbNearJump();
+}
+
+#ifndef CROSSGEN_COMPILE
+
+// GC write barrier support.
+//
+// To optimize our write barriers we code the values of several GC globals (e.g. g_lowest_address) directly
+// into the barrier function itself, thus avoiding a double memory indirection. Every time the GC modifies one
+// of these globals we need to update all of the write barriers accordingly.
+//
+// In order to keep this process non-brittle we don't hard code the offsets of the instructions that need to
+// be changed. Instead the code used to create these barriers is implemented using special macros that record
+// the necessary offsets in a descriptor table. Search for "GC write barrier support" in vm\arm\asmhelpers.asm
+// for more details.
+
+// Structure describing the layout of a single write barrier descriptor. This must be kept in sync with the
+// code in vm\arm\asmhelpers.asm in the WRITE_BARRIER_END macro. Each offset recorded is for one of the
+// supported GC globals (an offset of 0xffff is encoded if that global is not used by the particular barrier
+// function). We currently only support one usage of each global by any single barrier function. The offset is
+// the byte offset from the start of the function at which a movw,movt instruction pair is used to load the
+// value of the global into a register.
+struct WriteBarrierDescriptor
+{
+ BYTE * m_pFuncStart; // Pointer to the start of the barrier function
+ BYTE * m_pFuncEnd; // Pointer to the end of the barrier function
+ DWORD m_dw_g_lowest_address_offset; // Offset of the instruction reading g_lowest_address
+ DWORD m_dw_g_highest_address_offset; // Offset of the instruction reading g_highest_address
+ DWORD m_dw_g_ephemeral_low_offset; // Offset of the instruction reading g_ephemeral_low
+ DWORD m_dw_g_ephemeral_high_offset; // Offset of the instruction reading g_ephemeral_high
+ DWORD m_dw_g_card_table_offset; // Offset of the instruction reading g_card_table
+};
+
+// Infrastructure used for mapping of the source and destination of current WB patching
+struct WriteBarrierMapping
+{
+ PBYTE to; // Pointer to the write-barrier where it was copied over
+ PBYTE from; // Pointer to write-barrier from which it was copied
+};
+
+const int WriteBarrierIndex = 0;
+const int CheckedWriteBarrierIndex = 1;
+const int ByRefWriteBarrierIndex = 2;
+const int MaxWriteBarrierIndex = 3;
+
+WriteBarrierMapping wbMapping[MaxWriteBarrierIndex] =
+ {
+ {(PBYTE)JIT_WriteBarrier, NULL},
+ {(PBYTE)JIT_CheckedWriteBarrier, NULL},
+ {(PBYTE)JIT_ByRefWriteBarrier, NULL}
+ };
+
+PBYTE FindWBMapping(PBYTE from)
+{
+ for(int i = 0; i < MaxWriteBarrierIndex; ++i)
+ {
+ if(wbMapping[i].from == from)
+ return wbMapping[i].to;
+ }
+ return NULL;
+}
+
+// Pointer to the start of the descriptor table. The end of the table is marked by a sentinel entry
+// (m_pFuncStart is NULL).
+EXTERN_C WriteBarrierDescriptor g_rgWriteBarrierDescriptors;
+
+// Determine the range of memory containing all the write barrier implementations (these are clustered
+// together and should fit in a page or maybe two).
+void ComputeWriteBarrierRange(BYTE ** ppbStart, DWORD * pcbLength)
+{
+ DWORD size = (PBYTE)JIT_PatchedWriteBarrierLast - (PBYTE)JIT_PatchedWriteBarrierStart;
+ *ppbStart = (PBYTE)JIT_PatchedWriteBarrierStart;
+ *pcbLength = size;
+}
+
+void CopyWriteBarrier(PCODE dstCode, PCODE srcCode, PCODE endCode)
+{
+ TADDR dst = PCODEToPINSTR(dstCode);
+ TADDR src = PCODEToPINSTR(srcCode);
+ TADDR end = PCODEToPINSTR(endCode);
+
+ size_t size = (PBYTE)end - (PBYTE)src;
+ memcpy((PVOID)dst, (PVOID)src, size);
+}
+
+#if _DEBUG
+void ValidateWriteBarriers()
+{
+ // Post-grow WB are bigger than pre-grow so validating that target WB has space to accomodate those
+ _ASSERTE( ((PBYTE)JIT_WriteBarrier_End - (PBYTE)JIT_WriteBarrier) >= ((PBYTE)JIT_WriteBarrier_MP_Post_End - (PBYTE)JIT_WriteBarrier_MP_Post));
+ _ASSERTE( ((PBYTE)JIT_WriteBarrier_End - (PBYTE)JIT_WriteBarrier) >= ((PBYTE)JIT_WriteBarrier_SP_Post_End - (PBYTE)JIT_WriteBarrier_SP_Post));
+
+ _ASSERTE( ((PBYTE)JIT_CheckedWriteBarrier_End - (PBYTE)JIT_CheckedWriteBarrier) >= ((PBYTE)JIT_CheckedWriteBarrier_MP_Post_End - (PBYTE)JIT_CheckedWriteBarrier_MP_Post));
+ _ASSERTE( ((PBYTE)JIT_CheckedWriteBarrier_End - (PBYTE)JIT_CheckedWriteBarrier) >= ((PBYTE)JIT_CheckedWriteBarrier_SP_Post_End - (PBYTE)JIT_CheckedWriteBarrier_SP_Post));
+
+ _ASSERTE( ((PBYTE)JIT_ByRefWriteBarrier_End - (PBYTE)JIT_ByRefWriteBarrier) >= ((PBYTE)JIT_ByRefWriteBarrier_MP_Post_End - (PBYTE)JIT_ByRefWriteBarrier_MP_Post));
+ _ASSERTE( ((PBYTE)JIT_ByRefWriteBarrier_End - (PBYTE)JIT_ByRefWriteBarrier) >= ((PBYTE)JIT_ByRefWriteBarrier_SP_Post_End - (PBYTE)JIT_ByRefWriteBarrier_SP_Post));
+
+}
+#endif // _DEBUG
+
+#define UPDATE_WB(_proc,_grow) \
+ CopyWriteBarrier((PCODE)JIT_WriteBarrier, (PCODE)JIT_WriteBarrier_##_proc##_##_grow##, (PCODE)JIT_WriteBarrier_##_proc##_##_grow##_End); \
+ wbMapping[WriteBarrierIndex].from = (PBYTE)JIT_WriteBarrier_##_proc##_##_grow##; \
+ \
+ CopyWriteBarrier((PCODE)JIT_CheckedWriteBarrier, (PCODE)JIT_CheckedWriteBarrier_##_proc##_##_grow##, (PCODE)JIT_CheckedWriteBarrier_##_proc##_##_grow##_End); \
+ wbMapping[CheckedWriteBarrierIndex].from = (PBYTE)JIT_CheckedWriteBarrier_##_proc##_##_grow##; \
+ \
+ CopyWriteBarrier((PCODE)JIT_ByRefWriteBarrier, (PCODE)JIT_ByRefWriteBarrier_##_proc##_##_grow##, (PCODE)JIT_ByRefWriteBarrier_##_proc##_##_grow##_End); \
+ wbMapping[ByRefWriteBarrierIndex].from = (PBYTE)JIT_ByRefWriteBarrier_##_proc##_##_grow##; \
+
+// Update the instructions in our various write barrier implementations that refer directly to the values
+// of GC globals such as g_lowest_address and g_card_table. We don't particularly care which values have
+// changed on each of these callbacks, it's pretty cheap to refresh them all.
+void UpdateGCWriteBarriers(BOOL postGrow = false)
+{
+ // Define a helper macro that abstracts the minutia of patching the instructions to access the value of a
+ // particular GC global.
+
+#if _DEBUG
+ ValidateWriteBarriers();
+#endif // _DEBUG
+
+ static bool wbCopyRequired = true; // We begin with a wb copy
+ static bool wbIsPostGrow = false; // We begin with pre-Grow write barrier
+
+ if(postGrow && !wbIsPostGrow)
+ {
+ wbIsPostGrow = true;
+ wbCopyRequired = true;
+ }
+
+ if(wbCopyRequired)
+ {
+ BOOL mp = g_SystemInfo.dwNumberOfProcessors > 1;
+ if(mp)
+ {
+ if(wbIsPostGrow)
+ {
+ UPDATE_WB(MP,Post);
+ }
+ else
+ {
+ UPDATE_WB(MP,Pre);
+ }
+ }
+ else
+ {
+ if(wbIsPostGrow)
+ {
+ UPDATE_WB(SP,Post);
+ }
+ else
+ {
+ UPDATE_WB(SP,Pre);
+ }
+ }
+
+ wbCopyRequired = false;
+ }
+#define GWB_PATCH_OFFSET(_global) \
+ if (pDesc->m_dw_##_global##_offset != 0xffff) \
+ PutThumb2Mov32((UINT16*)(to + pDesc->m_dw_##_global##_offset - 1), (UINT32)(dac_cast<TADDR>(_global)));
+
+ // Iterate through the write barrier patch table created in the .clrwb section
+ // (see write barrier asm code)
+ WriteBarrierDescriptor * pDesc = &g_rgWriteBarrierDescriptors;
+ while (pDesc->m_pFuncStart)
+ {
+ // If the write barrier is being currently used (as in copied over to the patchable site)
+ // then read the patch location from the table and use the offset to patch the target asm code
+ PBYTE to = FindWBMapping(pDesc->m_pFuncStart);
+ if(to)
+ {
+ GWB_PATCH_OFFSET(g_lowest_address);
+ GWB_PATCH_OFFSET(g_highest_address);
+ GWB_PATCH_OFFSET(g_ephemeral_low);
+ GWB_PATCH_OFFSET(g_ephemeral_high);
+ GWB_PATCH_OFFSET(g_card_table);
+ }
+
+ pDesc++;
+ }
+
+ // We've changed code so we must flush the instruction cache.
+ BYTE *pbAlteredRange;
+ DWORD cbAlteredRange;
+ ComputeWriteBarrierRange(&pbAlteredRange, &cbAlteredRange);
+ FlushInstructionCache(GetCurrentProcess(), pbAlteredRange, cbAlteredRange);
+}
+
+void StompWriteBarrierResize(BOOL bReqUpperBoundsCheck)
+{
+ // The runtime is not always suspended when this is called (unlike StompWriteBarrierEphemeral) but we have
+ // no way to update the barrier code atomically on ARM since each 32-bit value we change is loaded over
+ // two instructions. So we have to suspend the EE (which forces code out of the barrier functions) before
+ // proceeding. Luckily the case where the runtime is not already suspended is relatively rare (allocation
+ // of a new large object heap segment). Skip the suspend for the case where we're called during runtime
+ // startup.
+
+ // suspend/resuming the EE under GC stress will trigger a GC and if we're holding the
+ // GC lock due to allocating a LOH segment it will cause a deadlock so disable it here.
+ GCStressPolicy::InhibitHolder iholder;
+
+ bool fSuspended = false;
+ if (!g_fEEInit && !GCHeap::IsGCInProgress())
+ {
+ ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_OTHER);
+ fSuspended = true;
+ }
+
+ UpdateGCWriteBarriers(bReqUpperBoundsCheck);
+
+ if (fSuspended)
+ ThreadSuspend::RestartEE(FALSE, TRUE);
+}
+
+void StompWriteBarrierEphemeral(void)
+{
+ _ASSERTE(GCHeap::IsGCInProgress() || g_fEEInit);
+ UpdateGCWriteBarriers();
+}
+#endif // CROSSGEN_COMPILE
+
+#endif // !DACCESS_COMPILE
+
+#ifndef CROSSGEN_COMPILE
+void LazyMachState::unwindLazyState(LazyMachState* baseState,
+ MachState* unwoundstate,
+ int funCallDepth,
+ HostCallPreference hostCallPreference)
+{
+ T_CONTEXT ctx;
+ T_KNONVOLATILE_CONTEXT_POINTERS nonVolRegPtrs;
+
+ ctx.Pc = baseState->captureIp;
+ ctx.Sp = baseState->captureSp;
+
+ ctx.R4 = unwoundstate->captureR4_R11[0] = baseState->captureR4_R11[0];
+ ctx.R5 = unwoundstate->captureR4_R11[1] = baseState->captureR4_R11[1];
+ ctx.R6 = unwoundstate->captureR4_R11[2] = baseState->captureR4_R11[2];
+ ctx.R7 = unwoundstate->captureR4_R11[3] = baseState->captureR4_R11[3];
+ ctx.R8 = unwoundstate->captureR4_R11[4] = baseState->captureR4_R11[4];
+ ctx.R9 = unwoundstate->captureR4_R11[5] = baseState->captureR4_R11[5];
+ ctx.R10 = unwoundstate->captureR4_R11[6] = baseState->captureR4_R11[6];
+ ctx.R11 = unwoundstate->captureR4_R11[7] = baseState->captureR4_R11[7];
+
+#if !defined(DACCESS_COMPILE)
+ // For DAC, if we get here, it means that the LazyMachState is uninitialized and we have to unwind it.
+ // The API we use to unwind in DAC is StackWalk64(), which does not support the context pointers.
+ //
+ // Restore the integer registers to KNONVOLATILE_CONTEXT_POINTERS to be used for unwinding.
+ nonVolRegPtrs.R4 = &unwoundstate->captureR4_R11[0];
+ nonVolRegPtrs.R5 = &unwoundstate->captureR4_R11[1];
+ nonVolRegPtrs.R6 = &unwoundstate->captureR4_R11[2];
+ nonVolRegPtrs.R7 = &unwoundstate->captureR4_R11[3];
+ nonVolRegPtrs.R8 = &unwoundstate->captureR4_R11[4];
+ nonVolRegPtrs.R9 = &unwoundstate->captureR4_R11[5];
+ nonVolRegPtrs.R10 = &unwoundstate->captureR4_R11[6];
+ nonVolRegPtrs.R11 = &unwoundstate->captureR4_R11[7];
+#endif // DACCESS_COMPILE
+
+ LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK LazyMachState::unwindLazyState(ip:%p,sp:%p)\n", baseState->captureIp, baseState->captureSp));
+
+ PCODE pvControlPc;
+
+ do
+ {
+ pvControlPc = Thread::VirtualUnwindCallFrame(&ctx, &nonVolRegPtrs);
+
+ if (funCallDepth > 0)
+ {
+ --funCallDepth;
+ if (funCallDepth == 0)
+ break;
+ }
+ else
+ {
+ // Determine whether given IP resides in JITted code. (It returns nonzero in that case.)
+ // Use it now to see if we've unwound to managed code yet.
+ BOOL fFailedReaderLock = FALSE;
+ BOOL fIsManagedCode = ExecutionManager::IsManagedCode(pvControlPc, hostCallPreference, &fFailedReaderLock);
+ if (fFailedReaderLock)
+ {
+ // We don't know if we would have been able to find a JIT
+ // manager, because we couldn't enter the reader lock without
+ // yielding (and our caller doesn't want us to yield). So abort
+ // now.
+
+ // Invalidate the lazyState we're returning, so the caller knows
+ // we aborted before we could fully unwind
+ unwoundstate->_isValid = false;
+ return;
+ }
+
+ if (fIsManagedCode)
+ break;
+ }
+ }
+ while(TRUE);
+
+ //
+ // Update unwoundState so that HelperMethodFrameRestoreState knows which
+ // registers have been potentially modified.
+ //
+
+ unwoundstate->_pc = ctx.Pc;
+ unwoundstate->_sp = ctx.Sp;
+
+#ifdef DACCESS_COMPILE
+ // For DAC builds, we update the registers directly since we dont have context pointers
+ unwoundstate->captureR4_R11[0] = ctx.R4;
+ unwoundstate->captureR4_R11[1] = ctx.R5;
+ unwoundstate->captureR4_R11[2] = ctx.R6;
+ unwoundstate->captureR4_R11[3] = ctx.R7;
+ unwoundstate->captureR4_R11[4] = ctx.R8;
+ unwoundstate->captureR4_R11[5] = ctx.R9;
+ unwoundstate->captureR4_R11[6] = ctx.R10;
+ unwoundstate->captureR4_R11[7] = ctx.R11;
+#else // !DACCESS_COMPILE
+ // For non-DAC builds, update the register state from context pointers
+ unwoundstate->_R4_R11[0] = (PDWORD)nonVolRegPtrs.R4;
+ unwoundstate->_R4_R11[1] = (PDWORD)nonVolRegPtrs.R5;
+ unwoundstate->_R4_R11[2] = (PDWORD)nonVolRegPtrs.R6;
+ unwoundstate->_R4_R11[3] = (PDWORD)nonVolRegPtrs.R7;
+ unwoundstate->_R4_R11[4] = (PDWORD)nonVolRegPtrs.R8;
+ unwoundstate->_R4_R11[5] = (PDWORD)nonVolRegPtrs.R9;
+ unwoundstate->_R4_R11[6] = (PDWORD)nonVolRegPtrs.R10;
+ unwoundstate->_R4_R11[7] = (PDWORD)nonVolRegPtrs.R11;
+#endif // DACCESS_COMPILE
+
+ unwoundstate->_isValid = true;
+}
+
+void HelperMethodFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ pRD->IsCallerContextValid = FALSE;
+ pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
+
+ //
+ // Copy the saved state from the frame to the current context.
+ //
+
+ LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK HelperMethodFrame::UpdateRegDisplay cached ip:%p, sp:%p\n", m_MachState._pc, m_MachState._sp));
+
+ #if defined(DACCESS_COMPILE)
+ // For DAC, we may get here when the HMF is still uninitialized.
+ // So we may need to unwind here.
+ if (!m_MachState.isValid())
+ {
+ // This allocation throws on OOM.
+ MachState* pUnwoundState = (MachState*)DacAllocHostOnlyInstance(sizeof(*pUnwoundState), true);
+
+ InsureInit(false, pUnwoundState);
+
+ pRD->pCurrentContext->Pc = pRD->ControlPC = pUnwoundState->_pc;
+ pRD->pCurrentContext->Sp = pRD->SP = pUnwoundState->_sp;
+
+ pRD->pCurrentContext->R4 = (DWORD)(pUnwoundState->captureR4_R11[0]);
+ pRD->pCurrentContext->R5 = (DWORD)(pUnwoundState->captureR4_R11[1]);
+ pRD->pCurrentContext->R6 = (DWORD)(pUnwoundState->captureR4_R11[2]);
+ pRD->pCurrentContext->R7 = (DWORD)(pUnwoundState->captureR4_R11[3]);
+ pRD->pCurrentContext->R8 = (DWORD)(pUnwoundState->captureR4_R11[4]);
+ pRD->pCurrentContext->R9 = (DWORD)(pUnwoundState->captureR4_R11[5]);
+ pRD->pCurrentContext->R10 = (DWORD)(pUnwoundState->captureR4_R11[6]);
+ pRD->pCurrentContext->R11 = (DWORD)(pUnwoundState->captureR4_R11[7]);
+
+ return;
+ }
+#endif // DACCESS_COMPILE
+
+ // reset pContext; it's only valid for active (top-most) frame
+ pRD->pContext = NULL;
+ pRD->ControlPC = GetReturnAddress();
+ pRD->SP = (DWORD)(size_t)m_MachState._sp;
+
+ pRD->pCurrentContext->Pc = pRD->ControlPC;
+ pRD->pCurrentContext->Sp = pRD->SP;
+
+ pRD->pCurrentContext->R4 = *m_MachState._R4_R11[0];
+ pRD->pCurrentContext->R5 = *m_MachState._R4_R11[1];
+ pRD->pCurrentContext->R6 = *m_MachState._R4_R11[2];
+ pRD->pCurrentContext->R7 = *m_MachState._R4_R11[3];
+ pRD->pCurrentContext->R8 = *m_MachState._R4_R11[4];
+ pRD->pCurrentContext->R9 = *m_MachState._R4_R11[5];
+ pRD->pCurrentContext->R10 = *m_MachState._R4_R11[6];
+ pRD->pCurrentContext->R11 = *m_MachState._R4_R11[7];
+
+ pRD->pCurrentContextPointers->R4 = m_MachState._R4_R11[0];
+ pRD->pCurrentContextPointers->R5 = m_MachState._R4_R11[1];
+ pRD->pCurrentContextPointers->R6 = m_MachState._R4_R11[2];
+ pRD->pCurrentContextPointers->R7 = m_MachState._R4_R11[3];
+ pRD->pCurrentContextPointers->R8 = m_MachState._R4_R11[4];
+ pRD->pCurrentContextPointers->R9 = m_MachState._R4_R11[5];
+ pRD->pCurrentContextPointers->R10 = m_MachState._R4_R11[6];
+ pRD->pCurrentContextPointers->R11 = m_MachState._R4_R11[7];
+ pRD->pCurrentContextPointers->Lr = NULL;
+}
+#endif // !CROSSGEN_COMPILE
+
+TADDR FixupPrecode::GetMethodDesc()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // This lookup is also manually inlined in PrecodeFixupThunk assembly code
+ TADDR base = *PTR_TADDR(GetBase());
+ if (base == NULL)
+ return NULL;
+ return base + (m_MethodDescChunkIndex * MethodDesc::ALIGNMENT);
+}
+
+#ifdef DACCESS_COMPILE
+void FixupPrecode::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ DacEnumMemoryRegion(dac_cast<TADDR>(this), sizeof(FixupPrecode));
+
+ DacEnumMemoryRegion(GetBase(), sizeof(TADDR));
+}
+#endif // DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+
+void StubPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
+{
+ WRAPPER_NO_CONTRACT;
+
+ int n = 0;
+
+ m_rgCode[n++] = 0xf8df; // ldr r12, [pc, #8]
+ m_rgCode[n++] = 0xc008;
+ m_rgCode[n++] = 0xf8df; // ldr pc, [pc, #0]
+ m_rgCode[n++] = 0xf000;
+
+ _ASSERTE(n == _countof(m_rgCode));
+
+ m_pTarget = GetPreStubEntryPoint();
+ m_pMethodDesc = (TADDR)pMD;
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+void StubPrecode::Fixup(DataImage *image)
+{
+ WRAPPER_NO_CONTRACT;
+
+ image->FixupFieldToNode(this, offsetof(StubPrecode, m_pTarget),
+ image->GetHelperThunk(CORINFO_HELP_EE_PRESTUB),
+ 0,
+ IMAGE_REL_BASED_PTR);
+
+ image->FixupField(this, offsetof(StubPrecode, m_pMethodDesc),
+ (void*)GetMethodDesc(),
+ 0,
+ IMAGE_REL_BASED_PTR);
+}
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+void NDirectImportPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
+{
+ WRAPPER_NO_CONTRACT;
+
+ int n = 0;
+
+ m_rgCode[n++] = 0xf8df; // ldr r12, [pc, #4]
+ m_rgCode[n++] = 0xc004;
+ m_rgCode[n++] = 0xf8df; // ldr pc, [pc, #4]
+ m_rgCode[n++] = 0xf004;
+
+ _ASSERTE(n == _countof(m_rgCode));
+
+ m_pMethodDesc = (TADDR)pMD;
+ m_pTarget = GetEEFuncEntryPoint(NDirectImportThunk);
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+void NDirectImportPrecode::Fixup(DataImage *image)
+{
+ WRAPPER_NO_CONTRACT;
+
+ image->FixupField(this, offsetof(NDirectImportPrecode, m_pMethodDesc),
+ (void*)GetMethodDesc(),
+ 0,
+ IMAGE_REL_BASED_PTR);
+
+ image->FixupFieldToNode(this, offsetof(NDirectImportPrecode, m_pTarget),
+ image->GetHelperThunk(CORINFO_HELP_EE_PINVOKE_FIXUP),
+ 0,
+ IMAGE_REL_BASED_PTR);
+}
+#endif
+
+void FixupPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int iMethodDescChunkIndex /*=0*/, int iPrecodeChunkIndex /*=0*/)
+{
+ WRAPPER_NO_CONTRACT;
+
+ m_rgCode[0] = 0x46fc; // mov r12, pc
+ m_rgCode[1] = 0xf8df; // ldr pc, [pc, #4]
+ m_rgCode[2] = 0xf004;
+
+ // Initialize chunk indices only if they are not initialized yet. This is necessary to make MethodDesc::Reset work.
+ if (m_PrecodeChunkIndex == 0)
+ {
+ _ASSERTE(FitsInU1(iPrecodeChunkIndex));
+ m_PrecodeChunkIndex = static_cast<BYTE>(iPrecodeChunkIndex);
+ }
+
+ if (iMethodDescChunkIndex != -1)
+ {
+ if (m_MethodDescChunkIndex == 0)
+ {
+ _ASSERTE(FitsInU1(iMethodDescChunkIndex));
+ m_MethodDescChunkIndex = static_cast<BYTE>(iMethodDescChunkIndex);
+ }
+
+ if (*(void**)GetBase() == NULL)
+ *(void**)GetBase() = (BYTE*)pMD - (iMethodDescChunkIndex * MethodDesc::ALIGNMENT);
+ }
+
+ _ASSERTE(GetMethodDesc() == (TADDR)pMD);
+
+ if (pLoaderAllocator != NULL)
+ {
+ m_pTarget = GetEEFuncEntryPoint(PrecodeFixupThunk);
+ }
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+// Partial initialization. Used to save regrouped chunks.
+void FixupPrecode::InitForSave(int iPrecodeChunkIndex)
+{
+ STANDARD_VM_CONTRACT;
+
+ m_rgCode[0] = 0x46fc; // mov r12, pc
+ m_rgCode[1] = 0xf8df; // ldr pc, [pc, #4]
+ m_rgCode[2] = 0xf004;
+
+ _ASSERTE(FitsInU1(iPrecodeChunkIndex));
+ m_PrecodeChunkIndex = static_cast<BYTE>(iPrecodeChunkIndex);
+
+ // The rest is initialized in code:FixupPrecode::Fixup
+}
+
+void FixupPrecode::Fixup(DataImage *image, MethodDesc * pMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Note that GetMethodDesc() does not return the correct value because of
+ // regrouping of MethodDescs into hot and cold blocks. That's why the caller
+ // has to supply the actual MethodDesc
+
+ SSIZE_T mdChunkOffset;
+ ZapNode * pMDChunkNode = image->GetNodeForStructure(pMD, &mdChunkOffset);
+ ZapNode * pHelperThunk = image->GetHelperThunk(CORINFO_HELP_EE_PRECODE_FIXUP);
+
+ image->FixupFieldToNode(this, offsetof(FixupPrecode, m_pTarget), pHelperThunk);
+
+ // Set the actual chunk index
+ FixupPrecode * pNewPrecode = (FixupPrecode *)image->GetImagePointer(this);
+
+ size_t mdOffset = mdChunkOffset - sizeof(MethodDescChunk);
+ size_t chunkIndex = mdOffset / MethodDesc::ALIGNMENT;
+ _ASSERTE(FitsInU1(chunkIndex));
+ pNewPrecode->m_MethodDescChunkIndex = (BYTE) chunkIndex;
+
+ // Fixup the base of MethodDescChunk
+ if (m_PrecodeChunkIndex == 0)
+ {
+ image->FixupFieldToNode(this, (BYTE *)GetBase() - (BYTE *)this,
+ pMDChunkNode, sizeof(MethodDescChunk));
+ }
+}
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+void ThisPtrRetBufPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
+{
+ WRAPPER_NO_CONTRACT;
+
+ int n = 0;
+
+ m_rgCode[n++] = 0x4684; // mov r12, r0
+ m_rgCode[n++] = 0x4608; // mov r0, r1
+ m_rgCode[n++] = 0xea4f; // mov r1, r12
+ m_rgCode[n++] = 0x010c;
+ m_rgCode[n++] = 0xf8df; // ldr pc, [pc, #0]
+ m_rgCode[n++] = 0xf000;
+
+ _ASSERTE(n == _countof(m_rgCode));
+
+ m_pTarget = GetPreStubEntryPoint();
+ m_pMethodDesc = (TADDR)pMD;
+}
+
+
+#ifdef HAS_REMOTING_PRECODE
+
+void RemotingPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
+{
+ WRAPPER_NO_CONTRACT;
+
+ int n = 0;
+
+ m_rgCode[n++] = 0xb502; // push {r1,lr}
+ m_rgCode[n++] = 0x4904; // ldr r1, [pc, #16] ; =m_pPrecodeRemotingThunk
+ m_rgCode[n++] = 0x4788; // blx r1
+ m_rgCode[n++] = 0xe8bd; // pop {r1,lr}
+ m_rgCode[n++] = 0x4002;
+ m_rgCode[n++] = 0xf8df; // ldr pc, [pc, #12] ; =m_pLocalTarget
+ m_rgCode[n++] = 0xf00c;
+ m_rgCode[n++] = 0xbf00; // nop ; padding for alignment
+
+ _ASSERTE(n == _countof(m_rgCode));
+
+ m_pMethodDesc = (TADDR)pMD;
+ m_pPrecodeRemotingThunk = GetEEFuncEntryPoint(PrecodeRemotingThunk);
+ m_pLocalTarget = GetPreStubEntryPoint();
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+void RemotingPrecode::Fixup(DataImage *image, ZapNode *pCodeNode)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (pCodeNode)
+ image->FixupFieldToNode(this, offsetof(RemotingPrecode, m_pLocalTarget),
+ pCodeNode,
+ THUMB_CODE,
+ IMAGE_REL_BASED_PTR);
+ else
+ image->FixupFieldToNode(this, offsetof(RemotingPrecode, m_pLocalTarget),
+ image->GetHelperThunk(CORINFO_HELP_EE_PRESTUB),
+ 0,
+ IMAGE_REL_BASED_PTR);
+
+ image->FixupFieldToNode(this, offsetof(RemotingPrecode, m_pPrecodeRemotingThunk),
+ image->GetHelperThunk(CORINFO_HELP_EE_REMOTING_THUNK),
+ 0,
+ IMAGE_REL_BASED_PTR);
+
+ image->FixupField(this, offsetof(RemotingPrecode, m_pMethodDesc),
+ (void*)GetMethodDesc(),
+ 0,
+ IMAGE_REL_BASED_PTR);
+}
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+void CTPMethodTable::ActivatePrecodeRemotingThunk()
+{
+ // Nothing to do for ARM version of remoting precode (we don't burn the TP MethodTable pointer into
+ // PrecodeRemotingThunk directly).
+}
+
+#endif // HAS_REMOTING_PRECODE
+
+
+#ifndef CROSSGEN_COMPILE
+/*
+Rough pseudo-code of interface dispatching:
+
+ // jitted code sets r0, r4:
+ r0 = object;
+ r4 = indirectionCell;
+ // jitted code calls *indirectionCell
+ switch (*indirectionCell)
+ {
+ case LookupHolder._stub:
+ // ResolveWorkerAsmStub:
+ *indirectionCell = DispatchHolder._stub;
+ call ResolveWorkerStatic, jump to target method;
+ case DispatchHolder._stub:
+ if (r0.methodTable == expectedMethodTable) jump to target method;
+ // ResolveHolder._stub._failEntryPoint:
+ jump to case ResolveHolder._stub._resolveEntryPoint;
+ case ResolveHolder._stub._resolveEntryPoint:
+ if (r0.methodTable in hashTable) jump to target method;
+ // ResolveHolder._stub._slowEntryPoint:
+ // ResolveWorkerChainLookupAsmStub:
+ // ResolveWorkerAsmStub:
+ if (_failEntryPoint called too many times) *indirectionCell = ResolveHolder._stub._resolveEntryPoint;
+ call ResolveWorkerStatic, jump to target method;
+ }
+
+Note that ResolveWorkerChainLookupAsmStub currently points directly
+to ResolveWorkerAsmStub; in the future, this could be separate.
+*/
+
+void LookupHolder::InitializeStatic()
+{
+ // Nothing to initialize
+}
+
+void LookupHolder::Initialize(PCODE resolveWorkerTarget, size_t dispatchToken)
+{
+ // Called directly by JITTED code
+ // See ResolveWorkerAsmStub
+
+ // ldr r12, [pc + 8] ; #_token
+ _stub._entryPoint[0] = 0xf8df;
+ _stub._entryPoint[1] = 0xc008;
+ // ldr pc, [pc] ; #_resolveWorkerTarget
+ _stub._entryPoint[2] = 0xf8df;
+ _stub._entryPoint[3] = 0xf000;
+
+ _stub._resolveWorkerTarget = resolveWorkerTarget;
+ _stub._token = dispatchToken;
+ _ASSERTE(4 == LookupStub::entryPointLen);
+}
+
+void DispatchHolder::InitializeStatic()
+{
+ // Nothing to initialize
+};
+
+void DispatchHolder::Initialize(PCODE implTarget, PCODE failTarget, size_t expectedMT)
+{
+ // Called directly by JITTED code
+ // DispatchHolder._stub._entryPoint(r0:object, r1, r2, r3, r4:IndirectionCell)
+ // {
+ // if (r0.methodTable == this._expectedMT) (this._implTarget)(r0, r1, r2, r3);
+ // else (this._failTarget)(r0, r1, r2, r3, r4);
+ // }
+
+ int n = 0;
+ WORD offset;
+
+ // We rely on the stub entry-point being DWORD aligned (so we can tell whether any subsequent WORD is
+ // DWORD-aligned or not, which matters in the calculation of PC-relative offsets).
+ _ASSERTE(((UINT_PTR)_stub._entryPoint & 0x3) == 0);
+
+// Compute a PC-relative offset for use in an instruction encoding. Must call this prior to emitting the
+// instruction halfword to which it applies. For thumb-2 encodings the offset must be computed before emitting
+// the first of the halfwords.
+#undef PC_REL_OFFSET
+#define PC_REL_OFFSET(_field) (WORD)(offsetof(DispatchStub, _field) - (offsetof(DispatchStub, _entryPoint[n + 2]) & 0xfffffffc))
+
+ // r0 : object. It can be null as well.
+ // when it is null the code causes an AV. This AV is seen by the VM's personality routine
+ // and it converts it into nullRef. We want the AV to happen before modifying the stack so that we can get the
+ // call stack in windbg at the point of AV. So therefore "ldr r12, [r0]" should be the first instruction.
+
+ // ldr r12, [r0 + #Object.m_pMethTab]
+ _stub._entryPoint[n++] = DISPATCH_STUB_FIRST_WORD;
+ _stub._entryPoint[n++] = 0xc000;
+
+ // push {r5}
+ _stub._entryPoint[n++] = 0xb420;
+
+ // ldr r5, [pc + #_expectedMT]
+ offset = PC_REL_OFFSET(_expectedMT);
+ _ASSERTE((offset & 0x3) == 0);
+ _stub._entryPoint[n++] = 0x4d00 | (offset >> 2);
+
+ // cmp r5, r12
+ _stub._entryPoint[n++] = 0x4565;
+
+ // pop {r5}
+ _stub._entryPoint[n++] = 0xbc20;
+
+ // bne failTarget
+ _stub._entryPoint[n++] = 0xd101;
+
+ // ldr pc, [pc + #_implTarget]
+ offset = PC_REL_OFFSET(_implTarget);
+ _stub._entryPoint[n++] = 0xf8df;
+ _stub._entryPoint[n++] = 0xf000 | offset;
+
+ // failTarget:
+ // ldr pc, [pc + #_failTarget]
+ offset = PC_REL_OFFSET(_failTarget);
+ _stub._entryPoint[n++] = 0xf8df;
+ _stub._entryPoint[n++] = 0xf000 | offset;
+
+ // nop - insert padding
+ _stub._entryPoint[n++] = 0xbf00;
+
+ _ASSERTE(n == DispatchStub::entryPointLen);
+
+ // Make sure that the data members below are aligned
+ _ASSERTE((n & 1) == 0);
+
+ _stub._expectedMT = DWORD(expectedMT);
+ _stub._failTarget = failTarget;
+ _stub._implTarget = implTarget;
+}
+
+void ResolveHolder::InitializeStatic()
+{
+}
+
+void ResolveHolder::Initialize(PCODE resolveWorkerTarget, PCODE patcherTarget,
+ size_t dispatchToken, UINT32 hashedToken,
+ void * cacheAddr, INT32 * counterAddr)
+{
+ // Called directly by JITTED code
+ // ResolveStub._resolveEntryPoint(r0:Object*, r1, r2, r3, r4:IndirectionCellAndFlags)
+ // {
+ // MethodTable mt = r0.m_pMethTab;
+ // int i = ((mt + mt >> 12) ^ this._hashedToken) & this._cacheMask
+ // ResolveCacheElem e = this._cacheAddress + i
+ // do
+ // {
+ // if (mt == e.pMT && this._token == e.token) (e.target)(r0, r1, r2, r3);
+ // e = e.pNext;
+ // } while (e != null)
+ // (this._slowEntryPoint)(r0, r1, r2, r3, r4);
+ // }
+ //
+
+ int n = 0;
+ WORD offset;
+
+ // We rely on the stub entry-point being DWORD aligned (so we can tell whether any subsequent WORD is
+ // DWORD-aligned or not, which matters in the calculation of PC-relative offsets).
+ _ASSERTE(((UINT_PTR)_stub._resolveEntryPoint & 0x3) == 0);
+
+// Compute a PC-relative offset for use in an instruction encoding. Must call this prior to emitting the
+// instruction halfword to which it applies. For thumb-2 encodings the offset must be computed before emitting
+// the first of the halfwords.
+#undef PC_REL_OFFSET
+#define PC_REL_OFFSET(_field) (WORD)(offsetof(ResolveStub, _field) - (offsetof(ResolveStub, _resolveEntryPoint[n + 2]) & 0xfffffffc))
+
+ // ldr r12, [r0 + #Object.m_pMethTab]
+ _stub._resolveEntryPoint[n++] = RESOLVE_STUB_FIRST_WORD;
+ _stub._resolveEntryPoint[n++] = 0xc000;
+
+ // ;; We need two scratch registers, r5 and r6
+ // push {r5,r6}
+ _stub._resolveEntryPoint[n++] = 0xb460;
+
+ // ;; Compute i = ((mt + mt >> 12) ^ this._hashedToken) & this._cacheMask
+
+ // add r6, r12, r12 lsr #12
+ _stub._resolveEntryPoint[n++] = 0xeb0c;
+ _stub._resolveEntryPoint[n++] = 0x361c;
+
+ // ldr r5, [pc + #_hashedToken]
+ offset = PC_REL_OFFSET(_hashedToken);
+ _ASSERTE((offset & 0x3) == 0);
+ _stub._resolveEntryPoint[n++] = 0x4d00 | (offset >> 2);
+
+ // eor r6, r6, r5
+ _stub._resolveEntryPoint[n++] = 0xea86;
+ _stub._resolveEntryPoint[n++] = 0x0605;
+
+ // ldr r5, [pc + #_cacheMask]
+ offset = PC_REL_OFFSET(_cacheMask);
+ _ASSERTE((offset & 0x3) == 0);
+ _stub._resolveEntryPoint[n++] = 0x4d00 | (offset >> 2);
+
+ // and r6, r6, r5
+ _stub._resolveEntryPoint[n++] = 0xea06;
+ _stub._resolveEntryPoint[n++] = 0x0605;
+
+ // ;; ResolveCacheElem e = this._cacheAddress + i
+ // ldr r5, [pc + #_cacheAddress]
+ offset = PC_REL_OFFSET(_cacheAddress);
+ _ASSERTE((offset & 0x3) == 0);
+ _stub._resolveEntryPoint[n++] = 0x4d00 | (offset >> 2);
+
+ // ldr r6, [r5 + r6] ;; r6 = e = this._cacheAddress + i
+ _stub._resolveEntryPoint[n++] = 0x59ae;
+
+ // ;; do {
+ int loop = n;
+
+ // ;; Check mt == e.pMT
+ // ldr r5, [r6 + #ResolveCacheElem.pMT]
+ offset = offsetof(ResolveCacheElem, pMT);
+ _ASSERTE(offset <= 124 && (offset & 0x3) == 0);
+ _stub._resolveEntryPoint[n++] = 0x6835 | (offset<< 4);
+
+ // cmp r12, r5
+ _stub._resolveEntryPoint[n++] = 0x45ac;
+
+ // bne nextEntry
+ _stub._resolveEntryPoint[n++] = 0xd108;
+
+ // ;; Check this._token == e.token
+ // ldr r5, [pc + #_token]
+ offset = PC_REL_OFFSET(_token);
+ _ASSERTE((offset & 0x3) == 0);
+ _stub._resolveEntryPoint[n++] = 0x4d00 | (offset>>2);
+
+ // ldr r12, [r6 + #ResolveCacheElem.token]
+ offset = offsetof(ResolveCacheElem, token);
+ _stub._resolveEntryPoint[n++] = 0xf8d6;
+ _stub._resolveEntryPoint[n++] = 0xc000 | offset;
+
+ // cmp r12, r5
+ _stub._resolveEntryPoint[n++] = 0x45ac;
+
+ // bne nextEntry
+ _stub._resolveEntryPoint[n++] = 0xd103;
+
+ // ldr r12, [r6 + #ResolveCacheElem.target] ;; r12 : e.target
+ offset = offsetof(ResolveCacheElem, target);
+ _stub._resolveEntryPoint[n++] = 0xf8d6;
+ _stub._resolveEntryPoint[n++] = 0xc000 | offset;
+
+ // ;; Restore r5 and r6
+ // pop {r5,r6}
+ _stub._resolveEntryPoint[n++] = 0xbc60;
+
+ // ;; Branch to e.target
+ // bx r12 ;; (e.target)(r0,r1,r2,r3)
+ _stub._resolveEntryPoint[n++] = 0x4760;
+
+ // nextEntry:
+ // ;; e = e.pNext;
+ // ldr r6, [r6 + #ResolveCacheElem.pNext]
+ offset = offsetof(ResolveCacheElem, pNext);
+ _ASSERTE(offset <=124 && (offset & 0x3) == 0);
+ _stub._resolveEntryPoint[n++] = 0x6836 | (offset << 4);
+
+ // ;; } while(e != null);
+ // cbz r6, slowEntryPoint
+ _stub._resolveEntryPoint[n++] = 0xb116;
+
+ // ldr r12, [r0 + #Object.m_pMethTab]
+ _stub._resolveEntryPoint[n++] = 0xf8d0;
+ _stub._resolveEntryPoint[n++] = 0xc000;
+
+ // b loop
+ offset = (WORD)((loop - (n + 2)) * sizeof(WORD));
+ offset = (offset >> 1) & 0x07ff;
+ _stub._resolveEntryPoint[n++] = 0xe000 | offset;
+
+ // slowEntryPoint:
+ // pop {r5,r6}
+ _stub._resolveEntryPoint[n++] = 0xbc60;
+
+ // nop for alignment
+ _stub._resolveEntryPoint[n++] = 0xbf00;
+
+ // the slow entry point be DWORD-aligned (see _ASSERTE below) insert nops if necessary .
+
+ // ARMSTUB TODO: promotion
+
+ // fall through to slow case
+ _ASSERTE(_stub._resolveEntryPoint + n == _stub._slowEntryPoint);
+ _ASSERTE(n == ResolveStub::resolveEntryPointLen);
+
+ // ResolveStub._slowEntryPoint(r0:MethodToken, r1, r2, r3, r4:IndirectionCellAndFlags)
+ // {
+ // r12 = this._tokenSlow;
+ // this._resolveWorkerTarget(r0, r1, r2, r3, r4, r12);
+ // }
+
+ // The following macro relies on this entry point being DWORD-aligned. We've already asserted that the
+ // overall stub is aligned above, just need to check that the preceding stubs occupy an even number of
+ // WORD slots.
+ _ASSERTE((n & 1) == 0);
+
+#undef PC_REL_OFFSET
+#define PC_REL_OFFSET(_field) (WORD)(offsetof(ResolveStub, _field) - (offsetof(ResolveStub, _slowEntryPoint[n + 2]) & 0xfffffffc))
+
+ n = 0;
+
+ // ldr r12, [pc + #_tokenSlow]
+ offset = PC_REL_OFFSET(_tokenSlow);
+ _stub._slowEntryPoint[n++] = 0xf8df;
+ _stub._slowEntryPoint[n++] = 0xc000 | offset;
+
+ // ldr pc, [pc + #_resolveWorkerTarget]
+ offset = PC_REL_OFFSET(_resolveWorkerTarget);
+ _stub._slowEntryPoint[n++] = 0xf8df;
+ _stub._slowEntryPoint[n++] = 0xf000 | offset;
+
+ _ASSERTE(n == ResolveStub::slowEntryPointLen);
+
+ // ResolveStub._failEntryPoint(r0:MethodToken, r1, r2, r3, r4:IndirectionCellAndFlags)
+ // {
+ // if(--*(this._pCounter) < 0) r4 = r4 | SDF_ResolveBackPatch;
+ // this._resolveEntryPoint(r0, r1, r2, r3, r4);
+ // }
+
+ // The following macro relies on this entry point being DWORD-aligned. We've already asserted that the
+ // overall stub is aligned above, just need to check that the preceding stubs occupy an even number of
+ // WORD slots.
+ _ASSERTE((n & 1) == 0);
+
+#undef PC_REL_OFFSET
+#define PC_REL_OFFSET(_field) (WORD)(offsetof(ResolveStub, _field) - (offsetof(ResolveStub, _failEntryPoint[n + 2]) & 0xfffffffc))
+
+ n = 0;
+
+ // push {r5}
+ _stub._failEntryPoint[n++] = 0xb420;
+
+ // ldr r5, [pc + #_pCounter]
+ offset = PC_REL_OFFSET(_pCounter);
+ _ASSERTE((offset & 0x3) == 0);
+ _stub._failEntryPoint[n++] = 0x4d00 | (offset >>2);
+
+ // ldr r12, [r5]
+ _stub._failEntryPoint[n++] = 0xf8d5;
+ _stub._failEntryPoint[n++] = 0xc000;
+
+ // subs r12, r12, #1
+ _stub._failEntryPoint[n++] = 0xf1bc;
+ _stub._failEntryPoint[n++] = 0x0c01;
+
+ // str r12, [r5]
+ _stub._failEntryPoint[n++] = 0xf8c5;
+ _stub._failEntryPoint[n++] = 0xc000;
+
+ // pop {r5}
+ _stub._failEntryPoint[n++] = 0xbc20;
+
+ // bge resolveEntryPoint
+ _stub._failEntryPoint[n++] = 0xda01;
+
+ // or r4, r4, SDF_ResolveBackPatch
+ _ASSERTE(SDF_ResolveBackPatch < 256);
+ _stub._failEntryPoint[n++] = 0xf044;
+ _stub._failEntryPoint[n++] = 0x0400 | SDF_ResolveBackPatch;
+
+ // resolveEntryPoint:
+ // b _resolveEntryPoint
+ offset = (WORD)(offsetof(ResolveStub, _resolveEntryPoint) - offsetof(ResolveStub, _failEntryPoint[n + 2]));
+ _ASSERTE((offset & 1) == 0);
+ offset = (offset >> 1) & 0x07ff;
+ _stub._failEntryPoint[n++] = 0xe000 | offset;
+
+ // nop for alignment
+ _stub._failEntryPoint[n++] = 0xbf00;
+
+ _ASSERTE(n == ResolveStub::failEntryPointLen);
+
+ _stub._pCounter = counterAddr;
+ _stub._hashedToken = hashedToken << LOG2_PTRSIZE;
+ _stub._cacheAddress = (size_t) cacheAddr;
+ _stub._token = dispatchToken;
+ _stub._tokenSlow = dispatchToken;
+ _stub._resolveWorkerTarget = resolveWorkerTarget;
+ _stub._cacheMask = CALL_STUB_CACHE_MASK * sizeof(void*);
+
+ _ASSERTE(resolveWorkerTarget == (PCODE)ResolveWorkerChainLookupAsmStub);
+ _ASSERTE(patcherTarget == NULL);
+}
+
+BOOL DoesSlotCallPrestub(PCODE pCode)
+{
+ PTR_WORD pInstr = dac_cast<PTR_WORD>(PCODEToPINSTR(pCode));
+
+ // FixupPrecode
+ if (pInstr[0] == 0x46fc && // // mov r12, pc
+ pInstr[1] == 0xf8df &&
+ pInstr[2] == 0xf004)
+ {
+ PCODE pTarget = dac_cast<PTR_FixupPrecode>(pInstr)->m_pTarget;
+
+ // Check for jump stub (NGen case)
+ if (isJump(pTarget))
+ {
+ pTarget = decodeJump(pTarget);
+ }
+
+ return pTarget == (TADDR)PrecodeFixupThunk;
+ }
+
+ // StubPrecode
+ if (pInstr[0] == 0xf8df && // ldr r12, [pc + 8]
+ pInstr[1] == 0xc008 &&
+ pInstr[2] == 0xf8df && // ldr pc, [pc]
+ pInstr[3] == 0xf000)
+ {
+ PCODE pTarget = dac_cast<PTR_StubPrecode>(pInstr)->m_pTarget;
+
+ // Check for jump stub (NGen case)
+ if (isJump(pTarget))
+ {
+ pTarget = decodeJump(pTarget);
+ }
+
+ return pTarget == GetPreStubEntryPoint();
+ }
+
+ return FALSE;
+}
+
+Stub *GenerateInitPInvokeFrameHelper()
+{
+ CONTRACT(Stub*)
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ CPUSTUBLINKER sl;
+ CPUSTUBLINKER *psl = &sl;
+
+ CORINFO_EE_INFO::InlinedCallFrameInfo FrameInfo;
+ InlinedCallFrame::GetEEInfo(&FrameInfo);
+
+ // R4 contains address of the frame on stack (the frame ptr, not its neg space)
+ unsigned negSpace = FrameInfo.offsetOfFrameVptr;
+
+ ThumbReg regFrame = ThumbReg(4);
+ ThumbReg regThread = ThumbReg(5);
+ ThumbReg regScratch = ThumbReg(6);
+
+ TLSACCESSMODE mode = GetTLSAccessMode(GetThreadTLSIndex());
+
+ if (mode == TLSACCESS_GENERIC)
+ {
+ // Erect frame to perform call to GetThread
+ psl->ThumbEmitProlog(1, sizeof(ArgumentRegisters), FALSE); // Save r4 for aligned stack
+
+ // Save argument registers around the GetThread call. Don't bother with using ldm/stm since this inefficient path anyway.
+ for (int reg = 0; reg < 4; reg++)
+ psl->ThumbEmitStoreRegIndirect(ThumbReg(reg), thumbRegSp, offsetof(ArgumentRegisters, r[reg]));
+ }
+
+ psl->ThumbEmitGetThread(mode, regThread);
+
+ if (mode == TLSACCESS_GENERIC)
+ {
+ for (int reg = 0; reg < 4; reg++)
+ psl->ThumbEmitLoadRegIndirect(ThumbReg(reg), thumbRegSp, offsetof(ArgumentRegisters, r[reg]));
+ }
+
+ // mov [regFrame + FrameInfo.offsetOfGSCookie], GetProcessGSCookie()
+ psl->ThumbEmitMovConstant(regScratch, GetProcessGSCookie());
+ psl->ThumbEmitStoreRegIndirect(regScratch, regFrame, FrameInfo.offsetOfGSCookie - negSpace);
+
+ // mov [regFrame + FrameInfo.offsetOfFrameVptr], InlinedCallFrame::GetMethodFrameVPtr()
+ psl->ThumbEmitMovConstant(regScratch, InlinedCallFrame::GetMethodFrameVPtr());
+ psl->ThumbEmitStoreRegIndirect(regScratch, regFrame, FrameInfo.offsetOfFrameVptr - negSpace);
+
+ // ldr regScratch, [regThread + offsetof(Thread, m_pFrame)]
+ // str regScratch, [regFrame + FrameInfo.offsetOfFrameLink]
+ psl->ThumbEmitLoadRegIndirect(regScratch, regThread, offsetof(Thread, m_pFrame));
+ psl->ThumbEmitStoreRegIndirect(regScratch, regFrame, FrameInfo.offsetOfFrameLink - negSpace);
+
+ // str FP, [regFrame + FrameInfo.offsetOfCalleeSavedEbp]
+ psl->ThumbEmitStoreRegIndirect(thumbRegFp, regFrame, FrameInfo.offsetOfCalleeSavedFP - negSpace);
+
+ // mov [regFrame + FrameInfo.offsetOfReturnAddress], 0
+ psl->ThumbEmitMovConstant(regScratch, 0);
+ psl->ThumbEmitStoreRegIndirect(regScratch, regFrame, FrameInfo.offsetOfReturnAddress - negSpace);
+
+ if (mode == TLSACCESS_GENERIC)
+ {
+ DWORD cbSavedRegs = sizeof(ArgumentRegisters) + 2 * 4; // r0-r3, r4, lr
+ psl->ThumbEmitAdd(regScratch, thumbRegSp, cbSavedRegs);
+ psl->ThumbEmitStoreRegIndirect(regScratch, regFrame, FrameInfo.offsetOfCallSiteSP - negSpace);
+ }
+ else
+ {
+ // str SP, [regFrame + FrameInfo.offsetOfCallSiteSP]
+ psl->ThumbEmitStoreRegIndirect(thumbRegSp, regFrame, FrameInfo.offsetOfCallSiteSP - negSpace);
+ }
+
+ // mov [regThread + offsetof(Thread, m_pFrame)], regFrame
+ psl->ThumbEmitStoreRegIndirect(regFrame, regThread, offsetof(Thread, m_pFrame));
+
+ // leave current Thread in R4
+
+ if (mode == TLSACCESS_GENERIC)
+ {
+ psl->ThumbEmitEpilog();
+ }
+ else
+ {
+ // Return. The return address has been restored into LR at this point.
+ // bx lr
+ psl->ThumbEmitJumpRegister(thumbRegLr);
+ }
+
+ // A single process-wide stub that will never unload
+ RETURN psl->Link(SystemDomain::GetGlobalLoaderAllocator()->GetStubHeap());
+}
+
+void StubLinkerCPU::ThumbEmitGetThread(TLSACCESSMODE mode, ThumbReg dest)
+{
+ DWORD idxThread = GetThreadTLSIndex();
+
+ if (mode != TLSACCESS_GENERIC)
+ {
+ // mrc p15, 0, dest, c13, c0, 2
+ Emit16(0xee1d);
+ Emit16((WORD)(0x0f50 | (dest << 12)));
+
+ if (mode == TLSACCESS_WNT)
+ {
+ // ldr dest, [dest, #(WINNT_TLS_OFFSET + (idxThread * sizeof(void*)))]
+ ThumbEmitLoadRegIndirect(dest, dest, offsetof(TEB, TlsSlots) + (idxThread * sizeof(void*)));
+ }
+ else
+ {
+ _ASSERTE(mode == TLSACCESS_WNT_HIGH);
+
+ // ldr dest, [dest, #WINNT5_TLSEXPANSIONPTR_OFFSET]
+ ThumbEmitLoadRegIndirect(dest, dest, offsetof(TEB, TlsExpansionSlots));
+
+ // ldr dest, [dest + #(idxThread * 4)]
+ ThumbEmitLoadRegIndirect(dest, dest, (idxThread - TLS_MINIMUM_AVAILABLE) * sizeof(void*));
+ }
+ }
+ else
+ {
+ ThumbEmitMovConstant(ThumbReg(0), idxThread);
+
+#pragma push_macro("TlsGetValue")
+#undef TlsGetValue
+ ThumbEmitMovConstant(ThumbReg(1), (TADDR)TlsGetValue);
+#pragma pop_macro("TlsGetValue")
+
+ ThumbEmitCallRegister(ThumbReg(1));
+
+ if (dest != ThumbReg(0))
+ {
+ ThumbEmitMovRegReg(dest, ThumbReg(0));
+ }
+ }
+}
+#endif // CROSSGEN_COMPILE
+
+
+// Emits code to adjust for a static delegate target.
+VOID StubLinkerCPU::EmitShuffleThunk(ShuffleEntry *pShuffleEntryArray)
+{
+ // Scan the shuffle entries to see if there any stack-to-stack operations. If there aren't we can emit a
+ // much simpler thunk (simply because we generate code that doesn't require more than one scratch
+ // register).
+ bool fSimpleCase = true;
+ ShuffleEntry *pEntry = pShuffleEntryArray;
+ while (pEntry->srcofs != ShuffleEntry::SENTINEL)
+ {
+ // It's enough to check whether we have a destination stack location (there are no register to stack
+ // scenarios).
+ if (!(pEntry->dstofs & ShuffleEntry::REGMASK))
+ {
+ fSimpleCase = false;
+ break;
+ }
+ pEntry++;
+ }
+
+ if (fSimpleCase)
+ {
+ // No real prolog for the simple case, we're a tail call so we shouldn't be on the stack for any walk
+ // or unwind.
+
+ // On entry r0 holds the delegate instance. Look up the real target address stored in the MethodPtrAux
+ // field and stash it in r12.
+ // ldr r12, [r0, #offsetof(DelegateObject, _methodPtrAux)]
+ ThumbEmitLoadRegIndirect(ThumbReg(12), ThumbReg(0), DelegateObject::GetOffsetOfMethodPtrAux());
+
+ // Emit the instructions to rewrite the argument registers. Most will be register-to-register (e.g.
+ // move r1 to r0) but one or two of them might move values from the top of the incoming stack
+ // arguments into registers r2 and r3. Note that the entries are ordered so that we don't need to
+ // worry about a move overwriting a register we'll need to use as input for the next move (i.e. we get
+ // move r1 to r0, move r2 to r1 etc.).
+ pEntry = pShuffleEntryArray;
+ while (pEntry->srcofs != ShuffleEntry::SENTINEL)
+ {
+ _ASSERTE(pEntry->dstofs & ShuffleEntry::REGMASK);
+
+ if (pEntry->srcofs & ShuffleEntry::REGMASK)
+ {
+ // Move from register case.
+ ThumbEmitMovRegReg(ThumbReg(pEntry->dstofs & ShuffleEntry::OFSMASK),
+ ThumbReg(pEntry->srcofs & ShuffleEntry::OFSMASK));
+ }
+ else
+ {
+ // Move from the stack case.
+ // ldr <dest>, [sp + #source_offset]
+ ThumbEmitLoadRegIndirect(ThumbReg(pEntry->dstofs & ShuffleEntry::OFSMASK),
+ thumbRegSp,
+ (pEntry->srcofs & ShuffleEntry::OFSMASK) * 4);
+ }
+
+ pEntry++;
+ }
+
+ // Tail call to real target.
+ // bx r12
+ ThumbEmitJumpRegister(ThumbReg(12));
+
+ return;
+ }
+
+ // In the more complex case we need to re-write at least some of the arguments on the stack as well as
+ // argument registers. We need some temporary registers to perform stack-to-stack copies and we've
+ // reserved our one remaining volatile register, r12, to store the eventual target method address. So
+ // we're going to generate a hybrid-tail call. Using a tail call has the advantage that we don't need to
+ // erect and link an explicit CLR frame to enable crawling of this thunk. Additionally re-writing the
+ // stack can be more peformant in some scenarios than copying the stack (in the presence of floating point
+ // or arguments requieing 64-bit alignment we might not have to move some or even most of the values).
+ // The hybrid nature is that we'll erect a standard native frame (with a proper prolog and epilog) so we
+ // can save some non-volatile registers to act as temporaries. Once we've performed the stack re-write
+ // we'll poke the saved LR value (which will become a PC value on the pop in the epilog) to return to the
+ // target method instead of us, thus atomically removing our frame from the stack and tail-calling the
+ // real target.
+
+ // Prolog:
+ ThumbEmitProlog(3, // Save r4-r6,lr (count doesn't include lr)
+ 0, // No additional space in the stack frame required
+ FALSE); // Don't push argument registers
+
+ // On entry r0 holds the delegate instance. Look up the real target address stored in the MethodPtrAux
+ // field and stash it in r12.
+ // ldr r12, [r0, #offsetof(DelegateObject, _methodPtrAux)]
+ ThumbEmitLoadRegIndirect(ThumbReg(12), ThumbReg(0), DelegateObject::GetOffsetOfMethodPtrAux());
+
+ // As we copy slots from lower in the argument stack to higher we need to keep track of source and
+ // destination pointers into those arguments (if we just use offsets from SP we get into trouble with
+ // argument frames larger than 4K). We'll use r4 to track the source (original location of an argument
+ // from the caller's perspective) and r5 to track the destination (new location of the argument from the
+ // callee's perspective). Both start at the current value of SP plus the offset created by pushing our
+ // stack frame in the prolog.
+ // add r4, sp, #cbSavedRegs
+ // add r5, sp, #cbSavedRegs
+ DWORD cbSavedRegs = 4 * 4; // r4, r5, r6, lr
+ ThumbEmitAdd(ThumbReg(4), thumbRegSp, cbSavedRegs);
+ ThumbEmitAdd(ThumbReg(5), thumbRegSp, cbSavedRegs);
+
+ // Follow the shuffle array instructions to re-write some subset of r0-r3 and the stacked arguments to
+ // remove the unwanted delegate instance in r0. Arguments only ever move from higher registers to lower
+ // registers or higher stack addresses to lower stack addresses and are ordered from lowest register to
+ // highest stack address. As a result we can do all updates in order and in place and we'll never
+ // overwrite a register or stack location needed as a source value in a later iteration.
+ DWORD dwLastSrcIndex = (DWORD)-1;
+ DWORD dwLastDstIndex = (DWORD)-1;
+ pEntry = pShuffleEntryArray;
+ while (pEntry->srcofs != ShuffleEntry::SENTINEL)
+ {
+ // If this is a register-to-register move we can do it in one instruction.
+ if ((pEntry->srcofs & ShuffleEntry::REGMASK) && (pEntry->dstofs & ShuffleEntry::REGMASK))
+ {
+ ThumbEmitMovRegReg(ThumbReg(pEntry->dstofs & ShuffleEntry::OFSMASK),
+ ThumbReg(pEntry->srcofs & ShuffleEntry::OFSMASK));
+ }
+ else
+ {
+ // There is no case where a source argument register is moved into a destination stack slot.
+ _ASSERTE((pEntry->srcofs & ShuffleEntry::REGMASK) == 0);
+
+ // Source or destination stack offsets might not be contiguous (though they often will be).
+ // Floating point arguments and 64-bit aligned values can cause discontinuities. While we copy
+ // values we'll use post increment addressing modes to move both source and destination stack
+ // pointers forward 4 bytes at a time, the common case. But we'll insert additional add
+ // instructions for any holes we find (we detect these by remembering the last source and
+ // destination stack offset we used).
+
+ // Add any additional offset to the source pointer (r4) to account for holes in the copy.
+ DWORD dwSrcIndex = pEntry->srcofs & ShuffleEntry::OFSMASK;
+ if (dwSrcIndex != (dwLastSrcIndex + 1))
+ {
+ _ASSERTE(dwSrcIndex > dwLastSrcIndex);
+
+ // add r4, #gap_size
+ ThumbEmitIncrement(ThumbReg(4), (dwSrcIndex - dwLastSrcIndex - 1) * 4);
+ }
+ dwLastSrcIndex = dwSrcIndex;
+
+ // Load the source value from the stack and increment our source pointer (r4) in one instruction.
+ // If the target is a register we can move the value directly there. Otherwise we move it to the
+ // r6 temporary register.
+ if (pEntry->dstofs & ShuffleEntry::REGMASK)
+ {
+ // ldr <regnum>, [r4], #4
+ ThumbEmitLoadIndirectPostIncrement(ThumbReg(pEntry->dstofs & ShuffleEntry::OFSMASK), ThumbReg(4), 4);
+ }
+ else
+ {
+ // ldr r6, [r4], #4
+ ThumbEmitLoadIndirectPostIncrement(ThumbReg(6), ThumbReg(4), 4);
+
+ // Add any additional offset to the destination pointer (r5) to account for holes in the copy.
+ DWORD dwDstIndex = pEntry->dstofs & ShuffleEntry::OFSMASK;
+ if (dwDstIndex != (dwLastDstIndex + 1))
+ {
+ _ASSERTE(dwDstIndex > dwLastDstIndex);
+
+ // add r5, #gap_size
+ ThumbEmitIncrement(ThumbReg(5), (dwDstIndex - dwLastDstIndex - 1) * 4);
+ }
+ dwLastDstIndex = dwDstIndex;
+
+ // Write the value in r6 to it's final home on the stack and increment our destination pointer
+ // (r5).
+ // str r6, [r5], #4
+ ThumbEmitStoreIndirectPostIncrement(ThumbReg(6), ThumbReg(5), 4);
+ }
+ }
+
+ pEntry++;
+ }
+
+ // Arguments are copied. Now we modify the saved value of LR we created in our prolog (which will be
+ // popped back off into PC in our epilog) so that it points to the real target address in r12 rather than
+ // our return address. We haven't modified LR ourselves, so the net result is that executing our epilog
+ // will pop our frame and tail call to the real method.
+ // str r12, [sp + #(cbSavedRegs-4)]
+ ThumbEmitStoreRegIndirect(ThumbReg(12), thumbRegSp, cbSavedRegs - 4);
+
+ // Epilog:
+ ThumbEmitEpilog();
+}
+
+void StubLinkerCPU::ThumbEmitCallManagedMethod(MethodDesc *pMD, bool fTailcall)
+{
+ // Use direct call if possible.
+ if (pMD->HasStableEntryPoint())
+ {
+ // mov r12, #entry_point
+ ThumbEmitMovConstant(ThumbReg(12), (TADDR)pMD->GetStableEntryPoint());
+ }
+ else
+ {
+ // mov r12, #slotaddress
+ ThumbEmitMovConstant(ThumbReg(12), (TADDR)pMD->GetAddrOfSlot());
+
+ // ldr r12, [r12]
+ ThumbEmitLoadRegIndirect(ThumbReg(12), ThumbReg(12), 0);
+ }
+
+ if (fTailcall)
+ {
+ // bx r12
+ ThumbEmitJumpRegister(ThumbReg(12));
+ }
+ else
+ {
+ // blx r12
+ ThumbEmitCallRegister(ThumbReg(12));
+ }
+}
+
+#ifndef CROSSGEN_COMPILE
+// Common code used to generate either an instantiating method stub or an unboxing stub (in the case where the
+// unboxing stub also needs to provide a generic instantiation parameter). The stub needs to add the
+// instantiation parameter provided in pHiddenArg and re-arrange the rest of the incoming arguments as a
+// result (since on ARM this hidden parameter is inserted before explicit user arguments we need a type of
+// shuffle thunk in the reverse direction of the type used for static delegates). If pHiddenArg == NULL it
+// indicates that we're in the unboxing case and should add sizeof(MethodTable*) to the incoming this pointer
+// before dispatching to the target. In this case the instantiating parameter is always the non-shared
+// MethodTable pointer we can deduce directly from the incoming 'this' reference.
+void StubLinkerCPU::ThumbEmitCallWithGenericInstantiationParameter(MethodDesc *pMD, void *pHiddenArg)
+{
+ // There is a simple case and a complex case.
+ // 1) In the simple case the addition of the hidden arg doesn't push any user args onto the stack. In
+ // this case we only have to re-arrange/initialize some argument registers and tail call to the
+ // target.
+ // 2) In the complex case we have to modify the stack by pushing some of the register based user
+ // arguments. We can't tail call in this case because we've altered the size of the stack and our
+ // caller doesn't expect this and can't compensate. Instead we'll need to create a stack frame
+ // (including an explicit Frame to make it crawlable to the runtime) and copy the incoming arguments
+ // over.
+ //
+ // First we need to analyze the signature of the target method both with and without the extra
+ // instantiation argument. We use ArgIterator to determine the difference in location
+ // (register or stack offset) for each argument between the two cases. This forms a set instructions that
+ // tell us how to copy incoming arguments into outgoing arguments (and if those instructions don't include
+ // any writes to stack locations in the outgoing case then we know we can generate a simple thunk).
+
+ SigTypeContext sTypeContext(pMD, TypeHandle());
+
+ // Incoming, source, method signature.
+ MetaSig sSrcSig(pMD->GetSignature(),
+ pMD->GetModule(),
+ &sTypeContext,
+ MetaSig::sigMember);
+
+ // Outgoing, destination, method signature.
+ MetaSig sDstSig(pMD->GetSignature(),
+ pMD->GetModule(),
+ &sTypeContext,
+ MetaSig::sigMember);
+
+ sDstSig.SetHasParamTypeArg();
+
+ // Wrap calling convention parsers round the source and destination signatures. These will be responsible
+ // for determining where each argument lives in registers or on the stack.
+ ArgIterator sSrcArgLocations(&sSrcSig);
+ ArgIterator sDstArgLocations(&sDstSig);
+
+ // Define an argument descriptor type that describes how a single 4 byte portion of an argument is mapped
+ // in the source and destination signature. We only have to worry about general registers and stack
+ // locations here; floating point argument registers are left unmodified by this thunk.
+ struct ArgDesc
+ {
+ int m_idxSrc; // Source register or stack offset
+ int m_idxDst; // Destination register or stack offset
+ bool m_fSrcIsReg; // Source index is a register number
+ bool m_fDstIsReg; // Destination index is a register number
+ };
+
+ // The number of argument move descriptors we'll need is a function of the number of 4-byte registers or
+ // stack slots the arguments occupy. The following calculation will over-estimate in a few side cases, but
+ // not by much (it assumes all four argument registers are used plus the number of stack slots that
+ // MetaSig calculates are needed for the rest of the arguments).
+ DWORD cArgDescriptors = 4 + (sSrcArgLocations.SizeOfArgStack() / 4);
+
+ // Allocate the array of argument descriptors.
+ CQuickArray<ArgDesc> rgArgDescs;
+ rgArgDescs.AllocThrows(cArgDescriptors);
+
+ // We only need to map translations for arguments that could come after the instantiation parameter we're
+ // inserting. On the ARM the only implicit argument that could follow is a vararg signature cookie, but
+ // it's disallowed in this case. So we simply walk the user arguments.
+ _ASSERTE(!sSrcSig.IsVarArg());
+
+ INT srcOffset;
+ INT dstOffset;
+
+ DWORD idxCurrentDesc = 0;
+ while ((srcOffset = sSrcArgLocations.GetNextOffset()) != TransitionBlock::InvalidOffset)
+ {
+ dstOffset = sDstArgLocations.GetNextOffset();
+
+ // Get the placement for a single argument in the source and destination signatures (may include
+ // multiple registers and/or stack locations if the argument is larger than 4 bytes).
+ ArgLocDesc sSrcArgLoc;
+ sSrcArgLocations.GetArgLoc(srcOffset, &sSrcArgLoc);
+ ArgLocDesc sDstArgLoc;
+ sDstArgLocations.GetArgLoc(dstOffset, &sDstArgLoc);
+
+ // Fill in as many single-slot descriptors as the argument needs. Note that we ignore any floating
+ // point register cases (m_cFloatReg > 0) since these will never change due to the hidden arg
+ // insertion.
+ while (sSrcArgLoc.m_cGenReg || sSrcArgLoc.m_cStack)
+ {
+ _ASSERTE(idxCurrentDesc < cArgDescriptors);
+
+ if (sSrcArgLoc.m_cGenReg)
+ {
+ sSrcArgLoc.m_cGenReg--;
+ rgArgDescs[idxCurrentDesc].m_idxSrc = sSrcArgLoc.m_idxGenReg++;
+ rgArgDescs[idxCurrentDesc].m_fSrcIsReg = true;
+ }
+ else
+ {
+ _ASSERTE(sSrcArgLoc.m_cStack > 0);
+ sSrcArgLoc.m_cStack--;
+ rgArgDescs[idxCurrentDesc].m_idxSrc = sSrcArgLoc.m_idxStack++;
+ rgArgDescs[idxCurrentDesc].m_fSrcIsReg = false;
+ }
+
+ if (sDstArgLoc.m_cGenReg)
+ {
+ sDstArgLoc.m_cGenReg--;
+ rgArgDescs[idxCurrentDesc].m_idxDst = sDstArgLoc.m_idxGenReg++;
+ rgArgDescs[idxCurrentDesc].m_fDstIsReg = true;
+ }
+ else
+ {
+ _ASSERTE(sDstArgLoc.m_cStack > 0);
+ sDstArgLoc.m_cStack--;
+ rgArgDescs[idxCurrentDesc].m_idxDst = sDstArgLoc.m_idxStack++;
+ rgArgDescs[idxCurrentDesc].m_fDstIsReg = false;
+ }
+
+ idxCurrentDesc++;
+ }
+ }
+
+ // Update descriptor count to the actual number used.
+ cArgDescriptors = idxCurrentDesc;
+
+ // Note the position at which we have the first move to a stack location
+ DWORD idxFirstMoveToStack = -1;
+
+ // We have a problem where register to register moves are concerned. Since we're adding an argument the
+ // moves will be from a lower numbered register to a higher numbered one (e.g. r0 -> r1). But the argument
+ // descriptors we just produced will order them starting from the lowest registers. If we emit move
+ // instructions in this order we'll end up copying the value of the lowest register into all of the rest
+ // (e.g. r0 -> r1, r1 -> r2 etc.). We don't have this problem with stack based arguments since the
+ // argument stacks don't overlap in the same fashion. To solve this we'll reverse the order of the
+ // descriptors with register destinations (there will be at most four of these so it's fairly cheap).
+ if (cArgDescriptors > 1)
+ {
+ // Start by assuming we have all four register destination descriptors.
+ DWORD idxLastRegDesc = min(3, cArgDescriptors - 1);
+
+ // Adjust that count to match reality.
+ while (!rgArgDescs[idxLastRegDesc].m_fDstIsReg)
+ {
+ _ASSERTE(idxLastRegDesc > 0);
+ idxLastRegDesc--;
+ }
+
+ // First move to stack location happens after the last move to register location
+ idxFirstMoveToStack = idxLastRegDesc+1;
+
+ // Calculate how many descriptors we'll need to swap.
+ DWORD cSwaps = (idxLastRegDesc + 1) / 2;
+
+ // Finally we can swap the descriptors.
+ DWORD idxFirstRegDesc = 0;
+ while (cSwaps)
+ {
+ ArgDesc sTempDesc = rgArgDescs[idxLastRegDesc];
+ rgArgDescs[idxLastRegDesc] = rgArgDescs[idxFirstRegDesc];
+ rgArgDescs[idxFirstRegDesc] = sTempDesc;
+
+ _ASSERTE(idxFirstRegDesc < idxLastRegDesc);
+ idxFirstRegDesc++;
+ idxLastRegDesc--;
+ cSwaps--;
+ }
+ }
+
+ // If we're ever required to write to the destination stack then we can't implement this case with a
+ // simple tail call stub. (That's not technically true: there are edge cases caused by 64-bit alignment
+ // requirements that might allow us to use a simple stub since the extra argument fits in a "hole" in the
+ // arguments, but these are infrequent enough that it's likely not worth the effort of detecting them).
+ ArgDesc *pLastArg = cArgDescriptors ? &rgArgDescs[cArgDescriptors - 1] : NULL;
+ if ((pLastArg == NULL) || pLastArg->m_fDstIsReg)
+ {
+ // Simple case where we can just rearrange a few argument registers and tail call.
+
+ for (idxCurrentDesc = 0; idxCurrentDesc < cArgDescriptors; idxCurrentDesc++)
+ {
+ // Because we're in the simple case we know we'll never be asked to move a value onto the stack
+ // and since we're adding a parameter we should never be required to move a value from the stack
+ // to a register either. So all of the descriptors should be register to register moves.
+ _ASSERTE(rgArgDescs[idxCurrentDesc].m_fSrcIsReg && rgArgDescs[idxCurrentDesc].m_fDstIsReg);
+ ThumbEmitMovRegReg(ThumbReg(rgArgDescs[idxCurrentDesc].m_idxDst),
+ ThumbReg(rgArgDescs[idxCurrentDesc].m_idxSrc));
+ }
+
+ // Place instantiation parameter into the correct register.
+ ArgLocDesc sInstArgLoc;
+ sDstArgLocations.GetParamTypeLoc(&sInstArgLoc);
+ int regHidden = sInstArgLoc.m_idxGenReg;
+ _ASSERTE(regHidden != -1);
+ if (pHiddenArg)
+ {
+ // mov regHidden, #pHiddenArg
+ ThumbEmitMovConstant(ThumbReg(regHidden), (TADDR)pHiddenArg);
+ }
+ else
+ {
+ // Extract MethodTable pointer (the hidden arg) from the object instance.
+ // ldr regHidden, [r0]
+ ThumbEmitLoadRegIndirect(ThumbReg(regHidden), ThumbReg(0), 0);
+ }
+
+ if (pHiddenArg == NULL)
+ {
+ // Unboxing stub case.
+
+ // Skip over the MethodTable* to find the address of the unboxed value type.
+ // add r0, #sizeof(MethodTable*)
+ ThumbEmitIncrement(ThumbReg(0), sizeof(MethodTable*));
+ }
+
+ // Emit a tail call to the target method.
+ ThumbEmitCallManagedMethod(pMD, true);
+ }
+ else
+ {
+ // Complex case where we need to emit a new stack frame and copy the arguments.
+
+ // Calculate the size of the new stack frame:
+ //
+ // +------------+
+ // SP -> | | <-+
+ // : : | Outgoing arguments
+ // | | <-+
+ // +------------+
+ // | Padding | <-- Optional, maybe required so that SP is 64-bit aligned
+ // +------------+
+ // | GS Cookie |
+ // +------------+
+ // +-> | vtable ptr |
+ // | +------------+
+ // | | m_Next |
+ // | +------------+
+ // | | R4 | <-+
+ // Stub | +------------+ |
+ // Helper | : : |
+ // Frame | +------------+ | Callee saved registers
+ // | | R11 | |
+ // | +------------+ |
+ // | | LR/RetAddr | <-+
+ // | +------------+
+ // | | R0 | <-+
+ // | +------------+ |
+ // | : : | Argument registers
+ // | +------------+ |
+ // +-> | R3 | <-+
+ // +------------+
+ // Old SP -> | |
+ //
+ DWORD cbStackArgs = (pLastArg->m_idxDst + 1) * 4;
+ DWORD cbStackFrame = cbStackArgs + sizeof(GSCookie) + sizeof(StubHelperFrame);
+ cbStackFrame = ALIGN_UP(cbStackFrame, 8);
+ DWORD cbStackFrameWithoutSavedRegs = cbStackFrame - (13 * 4); // r0-r11,lr
+
+ // Prolog:
+ ThumbEmitProlog(8, // Save r4-r11,lr (count doesn't include lr)
+ cbStackFrameWithoutSavedRegs, // Additional space in the stack frame required
+ TRUE); // Push argument registers
+
+ DWORD offsetOfFrame = cbStackFrame - sizeof(StubHelperFrame);
+
+ // Initialize and link the StubHelperFrame and associated GS cookie.
+ EmitStubLinkFrame(StubHelperFrame::GetMethodFrameVPtr(), offsetOfFrame, StubHelperFrame::GetOffsetOfTransitionBlock());
+
+ // Initialize temporary registers used when copying arguments:
+ // r6 == pointer to first incoming stack-based argument
+ // r7 == pointer to first outgoing stack-based argument
+
+ // add r6, sp, #cbStackFrame
+ ThumbEmitAdd(ThumbReg(6), thumbRegSp, cbStackFrame);
+
+ // mov r7, sp
+ ThumbEmitMovRegReg(ThumbReg(7), thumbRegSp);
+
+ // Copy incoming to outgoing arguments. Stack arguments are generally written consecutively and as
+ // such we use post-increment forms of register indirect addressing to keep our input (r6) and output
+ // (r7) pointers up to date. But sometimes we'll skip four bytes due to 64-bit alignment requirements
+ // and need to bump one or both of the pointers to compensate. We determine
+ //
+ // At this point, the ArgumentDescriptor array is divied into two parts:
+ //
+ // 1) Reverse sorted register to register moves (see the comment earlier in the method for details)
+ // 2) Register or Stack to Stack moves (if any) in the original order.
+ //
+ // Its possible that the register to register moves may move to a target register that happens
+ // to be a source for the register -> stack move. If this happens, and we emit the argument moves
+ // in the current order, then we can lose the contents of the register involved in register->stack
+ // move (stack->stack moves are not a problem as the locations dont overlap).
+ //
+ // To address this, we will emit the argument moves in two loops:
+ //
+ // 1) First loop will emit the moves that have stack location as the target
+ // 2) Second loop will emit moves that have register as the target.
+ DWORD idxCurrentLoopBegin = 0, idxCurrentLoopEnd = cArgDescriptors;
+ if (idxFirstMoveToStack != -1)
+ {
+ _ASSERTE(idxFirstMoveToStack < cArgDescriptors);
+ idxCurrentLoopBegin = idxFirstMoveToStack;
+
+ for (idxCurrentDesc = idxCurrentLoopBegin; idxCurrentDesc < idxCurrentLoopEnd; idxCurrentDesc++)
+ {
+ ArgDesc *pArgDesc = &rgArgDescs[idxCurrentDesc];
+
+ if (pArgDesc->m_fSrcIsReg)
+ {
+ // Source value is in a register.
+
+ _ASSERTE(!pArgDesc->m_fDstIsReg);
+ // Register to stack. Calculate delta from last stack write; normally it will be 4 bytes
+ // and our pointer has already been set up correctly by the post increment of the last
+ // write. But in some cases we need to skip four bytes due to a 64-bit alignment
+ // requirement. In those cases we need to emit an extra add to keep the pointer correct.
+ // Note that the first stack argument is guaranteed to be 64-bit aligned by the ABI and as
+ // such the first stack slot is never skipped.
+ if ((pArgDesc->m_idxDst > 0) &&
+ (pArgDesc->m_idxDst != (rgArgDescs[idxCurrentDesc - 1].m_idxDst + 1)))
+ {
+ _ASSERTE(pArgDesc->m_idxDst == (rgArgDescs[idxCurrentDesc - 1].m_idxDst + 2));
+ ThumbEmitIncrement(ThumbReg(7), 4);
+ }
+
+ // str srcReg, [r7], #4
+ ThumbEmitStoreIndirectPostIncrement(pArgDesc->m_idxSrc, ThumbReg(7), 4);
+ }
+ else
+ {
+ // Source value is on the stack. We should have no cases where a stack argument moves back to
+ // a register (because we're adding an argument).
+ _ASSERTE(!pArgDesc->m_fDstIsReg);
+
+ // Stack to stack move. We need to use register (r6) to store the value temporarily between
+ // the read and the write. See the comments above for why we need to check stack deltas and
+ // possibly insert extra add instructions in some cases.
+ if ((pArgDesc->m_idxSrc > 0) &&
+ (pArgDesc->m_idxSrc != (rgArgDescs[idxCurrentDesc - 1].m_idxSrc + 1)))
+ {
+ _ASSERTE(pArgDesc->m_idxSrc == (rgArgDescs[idxCurrentDesc - 1].m_idxSrc + 2));
+ ThumbEmitIncrement(ThumbReg(6), 4);
+ }
+ if ((pArgDesc->m_idxDst > 0) &&
+ (pArgDesc->m_idxDst != (rgArgDescs[idxCurrentDesc - 1].m_idxDst + 1)))
+ {
+ _ASSERTE(pArgDesc->m_idxDst == (rgArgDescs[idxCurrentDesc - 1].m_idxDst + 2));
+ ThumbEmitIncrement(ThumbReg(7), 4);
+ }
+
+ // ldr r8, [r6], #4
+ ThumbEmitLoadIndirectPostIncrement(ThumbReg(8), ThumbReg(6), 4);
+
+ // str r8, [r7], #4
+ ThumbEmitStoreIndirectPostIncrement(ThumbReg(8), ThumbReg(7), 4);
+ }
+ }
+
+ // Update the indexes to be used for the second loop
+ idxCurrentLoopEnd = idxCurrentLoopBegin;
+ idxCurrentLoopBegin = 0;
+ }
+
+ // Now, perform the register to register moves
+ for (idxCurrentDesc = idxCurrentLoopBegin; idxCurrentDesc < idxCurrentLoopEnd; idxCurrentDesc++)
+ {
+ ArgDesc *pArgDesc = &rgArgDescs[idxCurrentDesc];
+
+ // All moves to stack locations have been done (if applicable).
+ // Since we are moving to a register destination, the source
+ // will also be a register and cannot be a stack location (refer to the previous loop).
+ _ASSERTE(pArgDesc->m_fSrcIsReg && pArgDesc->m_fDstIsReg);
+
+ // Register to register case.
+ ThumbEmitMovRegReg(pArgDesc->m_idxDst, pArgDesc->m_idxSrc);
+ }
+
+
+ // Place instantiation parameter into the correct register.
+ ArgLocDesc sInstArgLoc;
+ sDstArgLocations.GetParamTypeLoc(&sInstArgLoc);
+ int regHidden = sInstArgLoc.m_idxGenReg;
+ _ASSERTE(regHidden != -1);
+ if (pHiddenArg)
+ {
+ // mov regHidden, #pHiddenArg
+ ThumbEmitMovConstant(ThumbReg(regHidden), (TADDR)pHiddenArg);
+ }
+ else
+ {
+ // Extract MethodTable pointer (the hidden arg) from the object instance.
+ // ldr regHidden, [r0]
+ ThumbEmitLoadRegIndirect(ThumbReg(regHidden), ThumbReg(0), 0);
+ }
+
+ if (pHiddenArg == NULL)
+ {
+ // Unboxing stub case.
+
+ // Skip over the MethodTable* to find the address of the unboxed value type.
+ // add r0, #sizeof(MethodTable*)
+ ThumbEmitIncrement(ThumbReg(0), sizeof(MethodTable*));
+ }
+
+ // Emit a regular (non-tail) call to the target method.
+ ThumbEmitCallManagedMethod(pMD, false);
+
+ // Unlink the StubHelperFrame.
+ EmitStubUnlinkFrame();
+
+ // Epilog
+ ThumbEmitEpilog();
+ }
+}
+
+#if defined(FEATURE_SHARE_GENERIC_CODE)
+// The stub generated by this method passes an extra dictionary argument before jumping to
+// shared-instantiation generic code.
+//
+// pSharedMD is either
+// * An InstantiatedMethodDesc for a generic method whose code is shared across instantiations.
+// In this case, the extra argument is the InstantiatedMethodDesc for the instantiation-specific stub itself.
+// or * A MethodDesc for a static method in a generic class whose code is shared across instantiations.
+// In this case, the extra argument is the MethodTable pointer of the instantiated type.
+VOID StubLinkerCPU::EmitInstantiatingMethodStub(MethodDesc* pSharedMD, void* extra)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(pSharedMD->RequiresInstMethodTableArg() || pSharedMD->RequiresInstMethodDescArg());
+ }
+ CONTRACTL_END;
+
+ // Share code with the instantiating version of the unboxing stub (see below).
+ ThumbEmitCallWithGenericInstantiationParameter(pSharedMD, extra);
+}
+#endif // FEATURE_SHARE_GENERIC_CODE
+
+void StubLinkerCPU::EmitUnboxMethodStub(MethodDesc *pMD)
+{
+ if (pMD->RequiresInstMethodTableArg())
+ {
+ // In this case we also have to add an instantiating parameter (which is always the MethodTable* from
+ // the instance we're called on). Most of this code is shared with the instantiating method stub
+ // above, the NULL parameter informs the emitter that we're both an unboxing stub and that the extra
+ // parameter can be deduced from the 'this' reference.
+ ThumbEmitCallWithGenericInstantiationParameter(pMD, NULL);
+ }
+ else
+ {
+ // We assume that we'll never see a case where a boxed value type method will require an instantiated
+ // method desc as a parameter. The stubs on other platforms make this assumption (and indeed this
+ // method isn't even passed an additional instantiation parameter). This is trivially true for the
+ // non-interface call case: the only methods callable directly on the boxed instance are the methods
+ // of Object, none of which are generic. For the interface dispatch case we're relying on the fact
+ // that the jit always provides the instantiating argument explicitly.
+ _ASSERTE(!pMD->RequiresInstMethodDescArg());
+
+ // Address of the value type is address of the boxed instance plus four.
+ // add r0, #4
+ ThumbEmitIncrement(ThumbReg(0), 4);
+
+ // Tail call the real target.
+ ThumbEmitCallManagedMethod(pMD, true /* tail call */);
+ }
+}
+
+#endif // CROSSGEN_COMPILE
+
+#endif // !DACCESS_COMPILE
+
+LONG CLRNoCatchHandler(EXCEPTION_POINTERS* pExceptionInfo, PVOID pv)
+{
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+
+bool IsInstrModifyFault(PEXCEPTION_POINTERS pExceptionInfo)
+{
+ return false;
+}
+
+void UpdateRegDisplayFromCalleeSavedRegisters(REGDISPLAY * pRD, CalleeSavedRegisters * pRegs)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ T_CONTEXT * pContext = pRD->pCurrentContext;
+ pContext->R4 = pRegs->r4;
+ pContext->R5 = pRegs->r5;
+ pContext->R6 = pRegs->r6;
+ pContext->R7 = pRegs->r7;
+ pContext->R8 = pRegs->r8;
+ pContext->R9 = pRegs->r9;
+ pContext->R10 = pRegs->r10;
+ pContext->R11 = pRegs->r11;
+ pContext->Lr = pRegs->r14;
+
+ T_KNONVOLATILE_CONTEXT_POINTERS * pContextPointers = pRD->pCurrentContextPointers;
+ pRD->pCurrentContextPointers->R4 = (PDWORD)&pRegs->r4;
+ pRD->pCurrentContextPointers->R5 = (PDWORD)&pRegs->r5;
+ pRD->pCurrentContextPointers->R6 = (PDWORD)&pRegs->r6;
+ pRD->pCurrentContextPointers->R7 = (PDWORD)&pRegs->r7;
+ pRD->pCurrentContextPointers->R8 = (PDWORD)&pRegs->r8;
+ pRD->pCurrentContextPointers->R9 = (PDWORD)&pRegs->r9;
+ pRD->pCurrentContextPointers->R10 = (PDWORD)&pRegs->r10;
+ pRD->pCurrentContextPointers->R11 = (PDWORD)&pRegs->r11;
+ pRD->pCurrentContextPointers->Lr = NULL;
+}
+
+
+void TransitionFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ pRD->IsCallerContextValid = FALSE;
+ pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
+
+ // Copy the saved argument registers into the current context
+ ArgumentRegisters * pArgRegs = GetArgumentRegisters();
+ pRD->pCurrentContext->R0 = pArgRegs->r[0];
+ pRD->pCurrentContext->R1 = pArgRegs->r[1];
+ pRD->pCurrentContext->R2 = pArgRegs->r[2];
+ pRD->pCurrentContext->R3 = pArgRegs->r[3];
+
+ // Next, copy all the callee saved registers
+ UpdateRegDisplayFromCalleeSavedRegisters(pRD, GetCalleeSavedRegisters());
+
+ // Set ControlPC to be the same as the saved "return address"
+ // value, which is actually a ControlPC in the frameless method (e.g.
+ // faulting address incase of AV or TAE).
+ pRD->pCurrentContext->Pc = GetReturnAddress();
+
+ // Set the caller SP
+ pRD->pCurrentContext->Sp = this->GetSP();
+
+ // Finally, syncup the regdisplay with the context
+ SyncRegDisplayToCurrentContext(pRD);
+
+ LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK TransitionFrame::UpdateRegDisplay(rip:%p, rsp:%p)\n", pRD->ControlPC, pRD->SP));
+}
+
+void TailCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ pRD->IsCallerContextValid = FALSE;
+ pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
+
+ // Next, copy all the callee saved registers
+ UpdateRegDisplayFromCalleeSavedRegisters(pRD, &m_calleeSavedRegisters);
+
+ // Set ControlPC to be the same as the saved "return address"
+ // value, which is actually a ControlPC in the frameless method (e.g.
+ // faulting address incase of AV or TAE).
+ pRD->pCurrentContext->Pc = m_ReturnAddress;
+
+ // Set the caller SP
+ pRD->pCurrentContext->Sp = dac_cast<TADDR>(this) + sizeof(*this);
+
+ // Finally, syncup the regdisplay with the context
+ SyncRegDisplayToCurrentContext(pRD);
+
+ LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK TransitionFrame::UpdateRegDisplay(rip:%p, rsp:%p)\n", pRD->ControlPC, pRD->SP));
+}
+
+#ifndef DACCESS_COMPILE
+
+void TailCallFrame::InitFromContext(T_CONTEXT * pContext)
+{
+ WRAPPER_NO_CONTRACT;
+
+ r4 = pContext->R4;
+ r5 = pContext->R5;
+ r6 = pContext->R6;
+ r7 = pContext->R7;
+ r8 = pContext->R8;
+ r9 = pContext->R9;
+ r10 = pContext->R10;
+ r11 = pContext->R11;
+ m_ReturnAddress = pContext->Lr;
+}
+
+#endif // !DACCESS_COMPILE
+
+void FaultingExceptionFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // Copy the context to regdisplay
+ memcpy(pRD->pCurrentContext, &m_ctx, sizeof(T_CONTEXT));
+
+ pRD->ControlPC = ::GetIP(&m_ctx);
+ pRD->SP = ::GetSP(&m_ctx);
+
+ // Update the integer registers in KNONVOLATILE_CONTEXT_POINTERS from
+ // the exception context we have.
+ pRD->pCurrentContextPointers->R4 = (PDWORD)&m_ctx.R4;
+ pRD->pCurrentContextPointers->R5 = (PDWORD)&m_ctx.R5;
+ pRD->pCurrentContextPointers->R6 = (PDWORD)&m_ctx.R6;
+ pRD->pCurrentContextPointers->R7 = (PDWORD)&m_ctx.R7;
+ pRD->pCurrentContextPointers->R8 = (PDWORD)&m_ctx.R8;
+ pRD->pCurrentContextPointers->R9 = (PDWORD)&m_ctx.R9;
+ pRD->pCurrentContextPointers->R10 = (PDWORD)&m_ctx.R10;
+ pRD->pCurrentContextPointers->R11 = (PDWORD)&m_ctx.R11;
+ pRD->pCurrentContextPointers->Lr = NULL;
+
+ pRD->IsCallerContextValid = FALSE;
+ pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
+}
+
+void InlinedCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ // We should skip over InlinedCallFrame if it is not active.
+ // It will be part of a JITed method's frame, and the stack-walker
+ // can handle such a case.
+#ifdef PROFILING_SUPPORTED
+ PRECONDITION(CORProfilerStackSnapshotEnabled() || InlinedCallFrame::FrameHasActiveCall(this));
+#endif
+ HOST_NOCALLS;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ // @TODO: Remove this after the debugger is fixed to avoid stack-walks from bad places
+ // @TODO: This may be still needed for sampling profilers
+ if (!InlinedCallFrame::FrameHasActiveCall(this))
+ {
+ LOG((LF_CORDB, LL_ERROR, "WARNING: InlinedCallFrame::UpdateRegDisplay called on inactive frame %p\n", this));
+ return;
+ }
+
+ // reset pContext; it's only valid for active (top-most) frame
+ pRD->pContext = NULL;
+
+ *(pRD->pPC) = m_pCallerReturnAddress;
+ pRD->SP = (DWORD) dac_cast<TADDR>(m_pCallSiteSP);
+
+ pRD->IsCallerContextValid = FALSE;
+ pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
+
+ pRD->pCurrentContext->Pc = *(pRD->pPC);
+ pRD->pCurrentContext->Sp = pRD->SP;
+
+ // Update the frame pointer in the current context.
+ pRD->pCurrentContext->R11 = m_pCalleeSavedFP;
+ pRD->pCurrentContextPointers->R11 = &m_pCalleeSavedFP;
+
+ // This is necessary to unwind methods with alloca. This needs to stay
+ // in sync with definition of REG_SAVED_LOCALLOC_SP in the JIT.
+ pRD->pCurrentContext->R9 = (DWORD) dac_cast<TADDR>(m_pCallSiteSP);
+ pRD->pCurrentContextPointers->R9 = (DWORD *)&m_pCallSiteSP;
+
+ RETURN;
+}
+
+#ifdef FEATURE_HIJACK
+TADDR ResumableFrame::GetReturnAddressPtr(void)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return dac_cast<TADDR>(m_Regs) + offsetof(T_CONTEXT, Pc);
+}
+
+void ResumableFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ CopyMemory(pRD->pCurrentContext, m_Regs, sizeof(T_CONTEXT));
+
+ pRD->ControlPC = m_Regs->Pc;
+ pRD->SP = m_Regs->Sp;
+
+ pRD->pCurrentContextPointers->R4 = &m_Regs->R4;
+ pRD->pCurrentContextPointers->R5 = &m_Regs->R5;
+ pRD->pCurrentContextPointers->R6 = &m_Regs->R6;
+ pRD->pCurrentContextPointers->R7 = &m_Regs->R7;
+ pRD->pCurrentContextPointers->R8 = &m_Regs->R8;
+ pRD->pCurrentContextPointers->R9 = &m_Regs->R9;
+ pRD->pCurrentContextPointers->R10 = &m_Regs->R10;
+ pRD->pCurrentContextPointers->R11 = &m_Regs->R11;
+ pRD->pCurrentContextPointers->Lr = &m_Regs->Lr;
+
+ pRD->volatileCurrContextPointers.R0 = &m_Regs->R0;
+ pRD->volatileCurrContextPointers.R1 = &m_Regs->R1;
+ pRD->volatileCurrContextPointers.R2 = &m_Regs->R2;
+ pRD->volatileCurrContextPointers.R3 = &m_Regs->R3;
+ pRD->volatileCurrContextPointers.R12 = &m_Regs->R12;
+
+ pRD->IsCallerContextValid = FALSE;
+ pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
+}
+
+void HijackFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ pRD->IsCallerContextValid = FALSE;
+ pRD->IsCallerSPValid = FALSE;
+
+ pRD->pCurrentContext->Pc = m_ReturnAddress;
+ pRD->pCurrentContext->Sp = PTR_TO_TADDR(m_Args) + sizeof(struct HijackArgs);
+
+ pRD->pCurrentContext->R0 = m_Args->R0;
+
+ pRD->pCurrentContext->R4 = m_Args->R4;
+ pRD->pCurrentContext->R5 = m_Args->R5;
+ pRD->pCurrentContext->R6 = m_Args->R6;
+ pRD->pCurrentContext->R7 = m_Args->R7;
+ pRD->pCurrentContext->R8 = m_Args->R8;
+ pRD->pCurrentContext->R9 = m_Args->R9;
+ pRD->pCurrentContext->R10 = m_Args->R10;
+ pRD->pCurrentContext->R11 = m_Args->R11;
+
+ pRD->pCurrentContextPointers->R4 = &m_Args->R4;
+ pRD->pCurrentContextPointers->R5 = &m_Args->R5;
+ pRD->pCurrentContextPointers->R6 = &m_Args->R6;
+ pRD->pCurrentContextPointers->R7 = &m_Args->R7;
+ pRD->pCurrentContextPointers->R8 = &m_Args->R8;
+ pRD->pCurrentContextPointers->R9 = &m_Args->R9;
+ pRD->pCurrentContextPointers->R10 = &m_Args->R10;
+ pRD->pCurrentContextPointers->R11 = &m_Args->R11;
+ pRD->pCurrentContextPointers->Lr = NULL;
+
+ SyncRegDisplayToCurrentContext(pRD);
+}
+#endif
+
+void PInvokeStubForHost(void)
+{
+ // Hosted P/Invoke is not implemented on ARM. See ARMTODO in code:CorHost2::SetHostControl.
+ UNREACHABLE();
+}
+
+class UMEntryThunk * UMEntryThunk::Decode(void *pCallback)
+{
+ _ASSERTE(offsetof(UMEntryThunkCode, m_code) == 0);
+ UMEntryThunkCode * pCode = (UMEntryThunkCode*)((ULONG_PTR)pCallback & ~THUMB_CODE);
+
+ // We may be called with an unmanaged external code pointer instead. So if it doesn't look like one of our
+ // stubs (see UMEntryThunkCode::Encode below) then we'll return NULL. Luckily in these scenarios our
+ // caller will perform a hash lookup on successful return to verify our result in case random unmanaged
+ // code happens to look like ours.
+ if ((pCode->m_code[0] == 0xf8df) &&
+ (pCode->m_code[1] == 0xc008) &&
+ (pCode->m_code[2] == 0xf8df) &&
+ (pCode->m_code[3] == 0xf000))
+ {
+ return (UMEntryThunk*)pCode->m_pvSecretParam;
+ }
+
+ return NULL;
+}
+
+void UMEntryThunkCode::Encode(BYTE* pTargetCode, void* pvSecretParam)
+{
+ // ldr r12, [pc + 8]
+ m_code[0] = 0xf8df;
+ m_code[1] = 0xc008;
+ // ldr pc, [pc]
+ m_code[2] = 0xf8df;
+ m_code[3] = 0xf000;
+
+ m_pTargetCode = (TADDR)pTargetCode;
+ m_pvSecretParam = (TADDR)pvSecretParam;
+
+ FlushInstructionCache(GetCurrentProcess(),&m_code,sizeof(m_code));
+}
+
+///////////////////////////// UNIMPLEMENTED //////////////////////////////////
+
+#ifndef DACCESS_COMPILE
+
+#ifndef CROSSGEN_COMPILE
+
+
+EXTERN_C DWORD gThreadTLSIndex;
+EXTERN_C DWORD gAppDomainTLSIndex;
+
+
+EXTERN_C Object* JIT_TrialAllocSFastMP_InlineGetThread(CORINFO_CLASS_HANDLE typeHnd_);
+EXTERN_C Object* JIT_BoxFastMP_InlineGetThread (CORINFO_CLASS_HANDLE type, void* unboxedData);
+EXTERN_C Object* AllocateStringFastMP_InlineGetThread (CLR_I4 cch);
+EXTERN_C Object* JIT_NewArr1OBJ_MP_InlineGetThread (CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size);
+EXTERN_C Object* JIT_NewArr1VC_MP_InlineGetThread (CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size);
+
+EXTERN_C void JIT_TrialAllocSFastMP_InlineGetThread__PatchTLSOffset();
+EXTERN_C void JIT_BoxFastMP_InlineGetThread__PatchTLSOffset();
+EXTERN_C void AllocateStringFastMP_InlineGetThread__PatchTLSOffset();
+EXTERN_C void JIT_NewArr1VC_MP_InlineGetThread__PatchTLSOffset();
+EXTERN_C void JIT_NewArr1OBJ_MP_InlineGetThread__PatchTLSOffset();
+
+extern "C" void STDCALL JIT_PatchedCodeStart();
+extern "C" void STDCALL JIT_PatchedCodeLast();
+
+static const LPVOID InlineGetThreadLocations[] = {
+ (PVOID)JIT_TrialAllocSFastMP_InlineGetThread__PatchTLSOffset,
+ (PVOID)JIT_BoxFastMP_InlineGetThread__PatchTLSOffset,
+ (PVOID)AllocateStringFastMP_InlineGetThread__PatchTLSOffset,
+ (PVOID)JIT_NewArr1VC_MP_InlineGetThread__PatchTLSOffset,
+ (PVOID)JIT_NewArr1OBJ_MP_InlineGetThread__PatchTLSOffset,
+};
+
+//EXTERN_C Object* JIT_TrialAllocSFastMP(CORINFO_CLASS_HANDLE typeHnd_);
+Object* JIT_TrialAllocSFastMP(CORINFO_CLASS_HANDLE typeHnd_);
+EXTERN_C Object* JIT_NewArr1OBJ_MP(CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size);
+EXTERN_C Object* AllocateStringFastMP(CLR_I4 cch);
+EXTERN_C Object* JIT_NewArr1VC_MP(CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size);
+EXTERN_C Object* JIT_BoxFastMP(CORINFO_CLASS_HANDLE type, void* unboxedData);
+
+
+EXTERN_C void JIT_GetSharedNonGCStaticBase__PatchTLSLabel();
+EXTERN_C void JIT_GetSharedNonGCStaticBaseNoCtor__PatchTLSLabel();
+EXTERN_C void JIT_GetSharedGCStaticBase__PatchTLSLabel();
+EXTERN_C void JIT_GetSharedGCStaticBaseNoCtor__PatchTLSLabel();
+
+EXTERN_C void JIT_GetSharedNonGCStaticBase_SingleAppDomain();
+EXTERN_C void JIT_GetSharedNonGCStaticBaseNoCtor_SingleAppDomain();
+EXTERN_C void JIT_GetSharedGCStaticBase_SingleAppDomain();
+EXTERN_C void JIT_GetSharedGCStaticBaseNoCtor_SingleAppDomain();
+
+
+static const LPVOID InlineGetAppDomainLocations[] = {
+ (PVOID)JIT_GetSharedNonGCStaticBase__PatchTLSLabel,
+ (PVOID)JIT_GetSharedNonGCStaticBaseNoCtor__PatchTLSLabel,
+ (PVOID)JIT_GetSharedGCStaticBase__PatchTLSLabel,
+ (PVOID)JIT_GetSharedGCStaticBaseNoCtor__PatchTLSLabel
+};
+
+
+void FixupInlineGetters(DWORD tlsSlot, const LPVOID * pLocations, int nLocations)
+{
+ STANDARD_VM_CONTRACT;
+
+ for (int i=0; i<nLocations; i++)
+ {
+ BYTE * pInlineGetter = (BYTE *)PCODEToPINSTR(GetEEFuncEntryPoint(pLocations[i]));
+
+ DWORD offset = (tlsSlot * sizeof(LPVOID) + offsetof(TEB, TlsSlots));
+
+ // ldr r??, [r??, #offset]
+ _ASSERTE_ALL_BUILDS("clr/src/VM/arm/stubs.cpp",
+ pInlineGetter[0] == 0x1d &&
+ pInlineGetter[1] == 0xee &&
+ pInlineGetter[2] == 0x50 &&
+ pInlineGetter[5] == 0xf8 &&
+ "Initialization failure while stomping instructions for the TLS slot offset: "
+ "the instruction at the given offset did not match what we expect");
+
+ *((WORD*)(pInlineGetter + 6)) &= 0xf000;
+
+ _ASSERTE(offset <=4095);
+ *((WORD*)(pInlineGetter + 6)) |= (WORD)offset;
+ }
+}
+
+
+
+void InitJITHelpers1()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (gThreadTLSIndex < TLS_MINIMUM_AVAILABLE)
+ {
+ FixupInlineGetters(gThreadTLSIndex, InlineGetThreadLocations, COUNTOF(InlineGetThreadLocations));
+ }
+
+ if (gAppDomainTLSIndex < TLS_MINIMUM_AVAILABLE)
+ {
+ FixupInlineGetters(gAppDomainTLSIndex, InlineGetAppDomainLocations, COUNTOF(InlineGetAppDomainLocations));
+ }
+
+ if(gThreadTLSIndex < TLS_MINIMUM_AVAILABLE || gAppDomainTLSIndex < TLS_MINIMUM_AVAILABLE)
+ {
+ FlushInstructionCache(GetCurrentProcess(), JIT_PatchedCodeStart, (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart);
+ }
+
+#if CHECK_APP_DOMAIN_LEAKS
+ if(g_pConfig->AppDomainLeaks())
+ SetJitHelperFunction(CORINFO_HELP_ARRADDR_ST, JIT_Stelem_Ref_Portable);
+#endif
+
+ // Allocation helpers, faster but non-logging.
+ if (!(TrackAllocationsEnabled()
+ || LoggingOn(LF_GCALLOC, LL_INFO10)
+#ifdef _DEBUG
+ || (g_pConfig->ShouldInjectFault(INJECTFAULT_GCHEAP) != 0)
+#endif // _DEBUG
+ ))
+ {
+
+ _ASSERTE(GCHeap::UseAllocationContexts());
+ // If the TLS for Thread is low enough use the super-fast helpers
+ if (gThreadTLSIndex < TLS_MINIMUM_AVAILABLE)
+ {
+ SetJitHelperFunction(CORINFO_HELP_NEWSFAST, JIT_TrialAllocSFastMP_InlineGetThread);
+ SetJitHelperFunction(CORINFO_HELP_BOX, JIT_BoxFastMP_InlineGetThread);
+ SetJitHelperFunction(CORINFO_HELP_NEWARR_1_VC, JIT_NewArr1VC_MP_InlineGetThread);
+ SetJitHelperFunction(CORINFO_HELP_NEWARR_1_OBJ, JIT_NewArr1OBJ_MP_InlineGetThread);
+
+ ECall::DynamicallyAssignFCallImpl(GetEEFuncEntryPoint(AllocateStringFastMP_InlineGetThread), ECall::FastAllocateString);
+ }
+ else
+ {
+/*
+ SetJitHelperFunction(CORINFO_HELP_NEWSFAST, JIT_TrialAllocSFastMP);
+ SetJitHelperFunction(CORINFO_HELP_BOX, JIT_BoxFastMP);
+ SetJitHelperFunction(CORINFO_HELP_NEWARR_1_VC, JIT_NewArr1VC_MP);
+ SetJitHelperFunction(CORINFO_HELP_NEWARR_1_OBJ, JIT_NewArr1OBJ_MP);
+
+ ECall::DynamicallyAssignFCallImpl(GetEEFuncEntryPoint(AllocateStringFastMP), ECall::FastAllocateString);
+*/
+ }
+ }
+
+
+#ifdef FEATURE_CORECLR
+ if(IsSingleAppDomain())
+ {
+ SetJitHelperFunction(CORINFO_HELP_GETSHARED_GCSTATIC_BASE, JIT_GetSharedGCStaticBase_SingleAppDomain);
+ SetJitHelperFunction(CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE, JIT_GetSharedNonGCStaticBase_SingleAppDomain);
+ SetJitHelperFunction(CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR, JIT_GetSharedGCStaticBaseNoCtor_SingleAppDomain);
+ SetJitHelperFunction(CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR,JIT_GetSharedNonGCStaticBaseNoCtor_SingleAppDomain);
+ }
+ else
+#endif
+ if (gAppDomainTLSIndex >= TLS_MINIMUM_AVAILABLE)
+ {
+ SetJitHelperFunction(CORINFO_HELP_GETSHARED_GCSTATIC_BASE, JIT_GetSharedGCStaticBase_Portable);
+ SetJitHelperFunction(CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE, JIT_GetSharedNonGCStaticBase_Portable);
+ SetJitHelperFunction(CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR, JIT_GetSharedGCStaticBaseNoCtor_Portable);
+ SetJitHelperFunction(CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR,JIT_GetSharedNonGCStaticBaseNoCtor_Portable);
+ }
+}
+
+extern "C" Object *SetAppDomainInObject(Object *pObject)
+{
+ pObject->SetAppDomain();
+ return pObject;
+}
+
+// +64 stack-based arguments here
+// -- MulticastFrame end
+// +48 r0-r3 argument registers
+// +44 lr return address
+// +40 fp frame pointer
+// +12 r4-r10 callee saved registers
+// +8 datum (typically a MethodDesc*)
+// +4 m_Next
+// +0 the frame vptr
+// -- MulticastFrame start
+// -4 gs cookie
+// -... floating point argument registers
+void StubLinkerCPU::EmitMulticastInvoke(UINT_PTR hash)
+{
+ //Decode Multicast Delegate hash
+ unsigned int numStackBytes = hash >> 8;
+ _ASSERTE(numStackBytes <= 0x7fff);
+
+ unsigned int numFPRegs = (hash & 0xf8) >> 3;
+ _ASSERTE(numFPRegs <= 16);
+
+ unsigned int numGenRegs = hash & 0x7;
+ _ASSERTE(numGenRegs <= 4);
+
+ DWORD offsetOfFPRegs = 0;
+
+ DWORD cbStackFrame = numStackBytes;
+ if (numFPRegs)
+ {
+ cbStackFrame = ALIGN_UP(cbStackFrame, 8);
+ offsetOfFPRegs = cbStackFrame;
+ cbStackFrame += 4 * numFPRegs;
+ }
+ cbStackFrame += sizeof(GSCookie) + sizeof(MulticastFrame);
+ cbStackFrame = ALIGN_UP(cbStackFrame, 8);
+ DWORD cbStackFrameWithoutSavedRegs = cbStackFrame - (13 * 4); // r0-r11,lr
+
+ // Prolog:
+ ThumbEmitProlog(8, // Save r4-r11,lr (count doesn't include lr)
+ cbStackFrameWithoutSavedRegs, // Additional space in the stack frame required
+ TRUE); // Push argument registers
+
+ DWORD offsetOfFrame = cbStackFrame - sizeof(MulticastFrame);
+
+ // Move the MethodDesc* we're calling to r12.
+ // ldr r12, [r0, #offsetof(DelegateObject, _methodPtrAux)]
+ ThumbEmitLoadRegIndirect(ThumbReg(12), ThumbReg(0), DelegateObject::GetOffsetOfMethodPtrAux());
+
+ // Initialize MulticastFrame::m_pMD to the MethodDesc* we're calling
+ // str r12, [sp + #(offsetOfFrame + offsetof(MulticastFrame, m_pMD))]
+ ThumbEmitStoreRegIndirect(ThumbReg(12), thumbRegSp, offsetOfFrame + MulticastFrame::GetOffsetOfDatum());
+
+ if (numFPRegs)
+ {
+ ThumbEmitAdd(ThumbReg(4), thumbRegSp, offsetOfFPRegs);
+
+ // save floating point arguments at offsetOfFPRegs
+ //vstm{IA} R4,{s0-s(numFPRegs -1)}
+ Emit16(0xec84);
+ Emit16(0x0a00 | (WORD)numFPRegs);
+ }
+
+ // Initialize and link the MulticastFrame and associated GS cookie.
+ EmitStubLinkFrame(MulticastFrame::GetMethodFrameVPtr(), offsetOfFrame, MulticastFrame::GetOffsetOfTransitionBlock());
+
+ //r7 as counter. Initialize it to 0.
+ // mov r7, 0
+ ThumbEmitMovConstant(ThumbReg(7), 0);
+
+ //initialize r9 to _invocationCount
+ ThumbEmitLoadRegIndirect(ThumbReg(9), ThumbReg(0), DelegateObject::GetOffsetOfInvocationCount());
+
+ CodeLabel *pLoopLabel = NewCodeLabel();
+ CodeLabel *pEndLoopLabel = NewCodeLabel();
+
+ //loop:
+ EmitLabel(pLoopLabel);
+
+ // cmp r7, r9
+ ThumbEmitCmpReg(ThumbReg(7), ThumbReg(9));
+
+ // if equal goto endloop
+ // beq endloop
+ ThumbEmitCondFlagJump(pEndLoopLabel, 0);
+
+ UINT32 count = 0;
+ if(numStackBytes)
+ {
+ //r1 = pos for stack args in Frame
+ ThumbEmitAdd(ThumbReg(1), ThumbReg(4), MulticastFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgs());
+
+ //r2 = stack pos for args of calling func
+ ThumbEmitMovRegReg(ThumbReg(2), thumbRegSp);
+
+ // ..move stack args..
+ _ASSERTE(numStackBytes%4 == 0);
+ while (count != numStackBytes)
+ {
+ ThumbEmitLoadIndirectPostIncrement(ThumbReg(0), ThumbReg(1), 4);
+ ThumbEmitStoreIndirectPostIncrement(ThumbReg(0), ThumbReg(2), 4);
+ count += 4;
+ }
+ }
+
+ count = 1;
+ while(count < numGenRegs)
+ {
+ ThumbEmitLoadRegIndirect(ThumbReg(count), ThumbReg(4), MulticastFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgumentRegisters() + count*4);
+ count++;
+ }
+
+ if(numFPRegs)
+ {
+ ThumbEmitAdd(ThumbReg(0), thumbRegSp, offsetOfFPRegs);
+ //vldm{IA}.32 R0, s0-s(numFPRegs-1)
+ Emit16(0xec90);
+ Emit16(0x0a00 | (WORD)numFPRegs);
+ }
+
+ //ldr r0, [r4+0x30] // get the first argument
+ ThumbEmitLoadRegIndirect(ThumbReg(0),ThumbReg(4), MulticastFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgumentRegisters());
+
+ // ldr r6, [r0+0x14] //invocationList
+ ThumbEmitLoadRegIndirect(ThumbReg(6), ThumbReg(0), DelegateObject::GetOffsetOfInvocationList());
+
+ // r6 - address of first delegate in invocation list
+ // add r6,r6,0xC
+ ThumbEmitAdd(ThumbReg(6), ThumbReg(6), PtrArray::GetDataOffset());
+
+ //ldr r8,[r6+r7*4] //get delegate object
+ ThumbEmitLoadOffsetScaledReg(ThumbReg(8), ThumbReg(6), ThumbReg(7), 2);
+
+ // ldr r0, [r8+0x04] //_target from the delegate
+ ThumbEmitLoadRegIndirect(ThumbReg(0), ThumbReg(8), DelegateObject::GetOffsetOfTarget());
+
+ // ldr r8, [r8+0xC] // methodPtr from the delegate
+ ThumbEmitLoadRegIndirect(ThumbReg(8), ThumbReg(8), DelegateObject::GetOffsetOfMethodPtr());
+
+ //call delegate
+ ThumbEmitCallRegister(ThumbReg(8));
+
+ //increment counter
+ ThumbEmitAdd(ThumbReg(7), ThumbReg(7), 1);
+
+ // The debugger may need to stop here, so grab the offset of this code.
+ EmitPatchLabel();
+
+ //goto loop
+ ThumbEmitNearJump(pLoopLabel);
+
+ //endloop:
+ EmitLabel(pEndLoopLabel);
+
+
+ //At this point of the stub:
+ //r4 must point to Frame
+ //and r5 must be current Thread*
+
+ EmitStubUnlinkFrame();
+
+ // Epilog
+ ThumbEmitEpilog();
+}
+
+void StubLinkerCPU::EmitSecureDelegateInvoke(UINT_PTR hash)
+{
+ //Decode Multicast Delegate hash
+ unsigned int numStackBytes = hash >> 8;
+ _ASSERTE(numStackBytes <= 0x7fff);
+
+ DWORD cbStackFrame = numStackBytes + sizeof(GSCookie) + sizeof(SecureDelegateFrame);
+ cbStackFrame = ALIGN_UP(cbStackFrame, 8);
+ DWORD cbStackFrameWithoutSavedRegs = cbStackFrame - (13 * 4); // r0-r11,lr
+
+ // Prolog:
+ ThumbEmitProlog(8, // Save r4-r11,lr (count doesn't include lr)
+ cbStackFrameWithoutSavedRegs, // Additional space in the stack frame required
+ TRUE); // Push argument registers
+
+ DWORD offsetOfFrame = cbStackFrame - sizeof(SecureDelegateFrame);
+
+ // Move the MethodDesc* we're calling to r12.
+ // ldr r12, [r0, #offsetof(DelegateObject, _invocationCount)]
+ ThumbEmitLoadRegIndirect(ThumbReg(12), ThumbReg(0), DelegateObject::GetOffsetOfInvocationCount());
+
+ // Initialize SecureDelegateFrame::m_pMD to the MethodDesc* we're calling
+ // str r12, [sp + #(offsetOfFrame + offsetof(SecureDelegateFrame, m_pMD))]
+ ThumbEmitStoreRegIndirect(ThumbReg(12), thumbRegSp, offsetOfFrame + SecureDelegateFrame::GetOffsetOfDatum());
+
+ // Initialize and link the SecureDelegateFrame and associated GS cookie.
+ EmitStubLinkFrame(SecureDelegateFrame::GetMethodFrameVPtr(), offsetOfFrame, SecureDelegateFrame::GetOffsetOfTransitionBlock());
+
+ // At this point:
+ // r0 : secure delegate
+ // r4 : SecureDelegateFrame *
+ // r5 : Thread *
+
+ if (numStackBytes)
+ {
+ // Copy stack based arguments from the calling frame into this one. Use the following registers:
+ // r6 : pointer to source arguments
+ // r7 : pointer to destination arguments
+ // r8 : temporary storage during copy operation
+
+ // add r6, r4, #MulticastFrame::GetOffsetOfArgs()
+ ThumbEmitAdd(ThumbReg(6), ThumbReg(4), MulticastFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgs());
+
+ // mov r7, sp
+ ThumbEmitMovRegReg(ThumbReg(7), thumbRegSp);
+
+ // Unrolled loop to copy the stack based arguments. Might want to consider a second path with a loop
+ // for large argument lists if anyone complains about this.
+ _ASSERTE((numStackBytes % 4) == 0);
+ for (unsigned int i = 0; i < numStackBytes; i += 4)
+ {
+ // Read one 4-byte value from the source stack and copy it to the new stack, post-incrementing
+ // both source and destination as we go.
+ // ldr r8, [r6], #4
+ // str r8, [r7], #4
+ ThumbEmitLoadIndirectPostIncrement(ThumbReg(8), ThumbReg(6), 4);
+ ThumbEmitStoreIndirectPostIncrement(ThumbReg(8), ThumbReg(7), 4);
+ }
+ }
+
+ // Stack-based arguments are copied. Floating point argument registers and r1-r3 are all still correct.
+ // All we need to do now is calculate the real value for r0 and the target address. Secure delegates wrap
+ // an inner delegate (kept in _invocationList). We retrieve this inner delegate and then perform the usual
+ // delegate invocation pattern on that.
+
+ // Get "real" delegate.
+ // ldr r0, [r0, #offsetof(DelegateObject, _invocationList)]
+ ThumbEmitLoadRegIndirect(ThumbReg(0), ThumbReg(0), DelegateObject::GetOffsetOfInvocationList());
+
+ // Load the destination address from the inner delegate.
+ // ldr r12, [r0, #offsetof(DelegateObject, _methodPtr)]
+ ThumbEmitLoadRegIndirect(ThumbReg(12), ThumbReg(0), DelegateObject::GetOffsetOfMethodPtr());
+
+ // This is only required for unbound delegates which use VSD stubs..but does not harm if done unconditionally
+ // add r4, r0+#offsetof(DelegateObject, _methodPtrAux) ; // r4 now contains indirection cell
+ ThumbEmitAdd(ThumbReg(4), ThumbReg(0), DelegateObject::GetOffsetOfMethodPtrAux());
+
+ // Replace the delegate reference with the object cached as the delegate's target.
+ // ldr r0, [r0, #offsetof(DelegateObject, _target)]
+ ThumbEmitLoadRegIndirect(ThumbReg(0), ThumbReg(0), DelegateObject::GetOffsetOfTarget());
+
+ // Perform the call.
+ // blx r12
+ ThumbEmitCallRegister(ThumbReg(12));
+
+ // restore frame pointer in r4
+ ThumbEmitAdd(ThumbReg(4), thumbRegSp, offsetOfFrame);
+
+ // Unlink SecureDelegateFrame. This requires the frame pointer in r4 and the thread pointer in r5.
+ EmitStubUnlinkFrame();
+
+ // Epilog
+ ThumbEmitEpilog();
+}
+
+//The function expects r4 to point to frame
+//and r5 must be current Thread*
+void StubLinkerCPU::EmitStubUnlinkFrame()
+{
+#ifdef _DEBUG
+ // EmitStubUnlinkFrame is emitted just before the epilog.
+ // Thus, at this point, all other callee-saved registers
+ // could be used since we are anyways going to restore them
+ // via epilog execution.
+
+ // Ensure that GSCookie is valid
+ //
+ // ldr r6, [r4-4]; Load the value of GSCookie
+ ThumbEmitSub(ThumbReg(6), ThumbReg(4), 4);
+ ThumbEmitLoadRegIndirect(ThumbReg(6), ThumbReg(6), 0);
+
+ // mov r7, s_gsCookie
+ ThumbEmitMovConstant(ThumbReg(7), GetProcessGSCookie());
+
+ // cmp r6, r7 ; Are the GSCookie values in sync?
+ ThumbEmitCmpReg(ThumbReg(6), ThumbReg(7));
+
+ CodeLabel *pAllDoneLabel = NewCodeLabel();
+
+ // beq AllDone; yes, GSCookie is good.
+ ThumbEmitCondFlagJump(pAllDoneLabel, 0);
+
+ // If we are here, then GSCookie was bad.
+ // Call into DoJITFailFast.
+ //
+ // mov r12, DoJITFailFast
+ ThumbEmitMovConstant(ThumbReg(12), (int)DoJITFailFast);
+ // bl r12
+ ThumbEmitCallRegister(ThumbReg(12));
+ // Emit a breakpoint - we are not expected to come here at all
+ // if we performed a FailFast.
+ ThumbEmitBreakpoint();
+
+ //AllDone:
+ EmitLabel(pAllDoneLabel);
+#endif // _DEBUG
+
+ // Unlink the MulticastFrame.
+ // ldr r6, [r4 + #offsetof(MulticastFrame, m_Next)]
+ // str r6, [r5 + #offsetof(Thread, m_pFrame)]
+ ThumbEmitLoadRegIndirect(ThumbReg(6), ThumbReg(4), Frame::GetOffsetOfNextLink());
+ ThumbEmitStoreRegIndirect(ThumbReg(6), ThumbReg(5), offsetof(Thread, m_pFrame));
+
+}
+
+//pFrameVptr = vtable ptr of Frame
+//offsetOfFrame = Frame offset in bytes from sp
+//After this method: r4 points to the Frame on stack
+// and r5 has current Thread*
+void StubLinkerCPU::EmitStubLinkFrame(TADDR pFrameVptr, int offsetOfFrame, int offsetOfTransitionBlock)
+{
+ // Initialize r4 to point to where we start filling the frame.
+ ThumbEmitAdd(ThumbReg(4), thumbRegSp, offsetOfFrame - sizeof(GSCookie));
+
+ // Write the initial GS cookie value
+ // mov r5, s_gsCookie
+ // str r5, [r4]
+ ThumbEmitMovConstant(ThumbReg(5), s_gsCookie);
+ ThumbEmitStoreIndirectPostIncrement(ThumbReg(5), ThumbReg(4), 4);
+
+ // Initialize the vtable pointer.
+ // mov r5, #vfptr
+ // str r5, [r4 + #offsetof(Frame, _vfptr)]
+ ThumbEmitMovConstant(ThumbReg(5), pFrameVptr);
+ ThumbEmitStoreRegIndirect(ThumbReg(5), ThumbReg(4), 0);
+
+ // Link the frame to the thread's frame chain.
+ // r5 <- current Thread*
+ // ldr r6, [r5 + #offsetof(Thread, m_pFrame)]
+ // str r6, [r4 + #offsetof(MulticastFrame, m_Next)]
+ // str r4, [r5 + #offsetof(Thread, m_pFrame)]
+
+ TLSACCESSMODE mode = GetTLSAccessMode(GetThreadTLSIndex());
+ ThumbEmitGetThread(mode, ThumbReg(5));
+ if (mode == TLSACCESS_GENERIC)
+ {
+ // reload argument registers that could have been corrupted by the call
+ for (int reg = 0; reg < 4; reg++)
+ ThumbEmitLoadRegIndirect(ThumbReg(reg), ThumbReg(4),
+ offsetOfTransitionBlock + TransitionBlock::GetOffsetOfArgumentRegisters() + offsetof(ArgumentRegisters, r[reg]));
+ }
+
+ ThumbEmitLoadRegIndirect(ThumbReg(6), ThumbReg(5), Thread::GetOffsetOfCurrentFrame());
+ ThumbEmitStoreRegIndirect(ThumbReg(6), ThumbReg(4), Frame::GetOffsetOfNextLink());
+ ThumbEmitStoreRegIndirect(ThumbReg(4), ThumbReg(5), Thread::GetOffsetOfCurrentFrame());
+}
+
+#endif // CROSSGEN_COMPILE
+
+void StubLinkerCPU::ThumbEmitNearJump(CodeLabel *target)
+{
+ WRAPPER_NO_CONTRACT;
+ EmitLabelRef(target, reinterpret_cast<ThumbNearJump&>(gThumbNearJump), 0xe);
+}
+
+void StubLinkerCPU::ThumbEmitCondFlagJump(CodeLabel *target, UINT cond)
+{
+ WRAPPER_NO_CONTRACT;
+ EmitLabelRef(target, reinterpret_cast<ThumbNearJump&>(gThumbNearJump), cond);
+}
+
+void StubLinkerCPU::ThumbEmitCondRegJump(CodeLabel *target, BOOL nonzero, ThumbReg reg)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(reg <= 7);
+ UINT variation = reg;
+ if(nonzero)
+ variation = variation | 0x8;
+ EmitLabelRef(target, reinterpret_cast<ThumbCondJump&>(gThumbCondJump), variation);
+}
+
+unsigned int StubLinkerCPU::HashMulticastInvoke(MetaSig *pSig)
+{
+ // Generate a hash key as follows:
+ // Bit0-2 : num of general purpose registers used
+ // Bit3-7 : num of FP regs used (counting in terms of s0,s1...)
+ // Bit8-22 : num of stack bytes used
+
+ ArgIterator delegateCallConv(pSig);
+
+ UINT numStackBytes = delegateCallConv.SizeOfArgStack();
+
+ if (numStackBytes > 0x7FFF)
+ COMPlusThrow(kNotSupportedException, W("NotSupported_TooManyArgs"));
+
+ int cGenReg = 1; // r0 is always used for this pointer
+ int cFPReg = 0;
+
+ // if it has a return buffer argument r1 is also used
+ if(delegateCallConv.HasRetBuffArg())
+ cGenReg = 2;
+
+ int argOffset;
+ while ((argOffset = delegateCallConv.GetNextOffset()) != TransitionBlock::InvalidOffset)
+ {
+ ArgLocDesc currArgLoc;
+ delegateCallConv.GetArgLoc(argOffset, &currArgLoc);
+
+ if(currArgLoc.m_idxGenReg != -1)
+ cGenReg = currArgLoc.m_idxGenReg + currArgLoc.m_cGenReg;
+
+ if(currArgLoc.m_idxFloatReg != -1)
+ cFPReg = currArgLoc.m_idxFloatReg + currArgLoc.m_cFloatReg;
+ }
+
+ // only r0-r3 can be used for arguments
+ _ASSERTE(cGenReg <= 4);
+
+ // only s0-s15 can be used for arguments
+ _ASSERTE(cFPReg <= 16);
+
+ return (numStackBytes << 8 | cFPReg << 3 | cGenReg);
+}
+
+void StubLinkerCPU::ThumbCopyOneTailCallArg(UINT * pnSrcAlign, const ArgLocDesc * pArgLoc, UINT * pcbStackSpace)
+{
+ if (pArgLoc->m_fRequires64BitAlignment && (*pnSrcAlign & 1)) {
+ // ADD R0, #4
+ ThumbEmitIncrement(ThumbReg(0), 4);
+ *pnSrcAlign = 0;
+ }
+
+ // Integer register arguments
+ if (pArgLoc->m_cGenReg > 0) {
+ int iReg = pArgLoc->m_idxGenReg;
+ int maxReg = iReg + pArgLoc->m_cGenReg;
+ while (iReg + 2 <= maxReg) {
+ // LDM r0!, {r4,r5} ; Post incremented loads (2 bytes)
+ ThumbEmitLoadStoreMultiple(ThumbReg(0), true, ThumbReg(4).Mask() | ThumbReg(5).Mask());
+ // STR r4, [R1, #offset of arg reg] ; (2 bytes)
+ ThumbEmitStoreRegIndirect(ThumbReg(4), ThumbReg(1), offsetof(T_CONTEXT, R0) + (iReg * sizeof(DWORD)));
+ iReg++;
+ // STR r5, [R1, #offset of arg reg] ; (2 bytes)
+ ThumbEmitStoreRegIndirect(ThumbReg(5), ThumbReg(1), offsetof(T_CONTEXT, R0) + (iReg * sizeof(DWORD)));
+ iReg++;
+ }
+ if (iReg < maxReg) {
+ // LDR r3, [R0], #+4 ; Post incremented load (4 bytes)
+ ThumbEmitLoadIndirectPostIncrement(ThumbReg(3), ThumbReg(0), 4);
+ (*pnSrcAlign)++;
+
+ // STR r3, [R1, #offset of arg reg] ; (2 bytes)
+ ThumbEmitStoreRegIndirect(ThumbReg(3), ThumbReg(1), offsetof(T_CONTEXT, R0) + (iReg * sizeof(DWORD)));
+ }
+ }
+ if (pArgLoc->m_cFloatReg > 0) {
+ int iReg = pArgLoc->m_idxFloatReg;
+ int maxReg = iReg + pArgLoc->m_cFloatReg;
+ while (iReg + 2 <= maxReg) {
+ // LDM r0!, {r4,r5} ; Post incremented loads (2 bytes)
+ ThumbEmitLoadStoreMultiple(ThumbReg(0), true, ThumbReg(4).Mask() | ThumbReg(5).Mask());
+ // STR r4, [R1, #offset of arg reg] ; (2 bytes)
+ ThumbEmitStoreRegIndirect(ThumbReg(4), ThumbReg(1), offsetof(T_CONTEXT, S) + (iReg * sizeof(DWORD)));
+ iReg++;
+ // STR r5, [R1, #offset of arg reg] ; (2 bytes)
+ ThumbEmitStoreRegIndirect(ThumbReg(5), ThumbReg(1), offsetof(T_CONTEXT, S) + (iReg * sizeof(DWORD)));
+ iReg++;
+ }
+ if (iReg < maxReg) {
+ // LDR r3, [R0], #+4 ; Post incremented load (4 bytes)
+ ThumbEmitLoadIndirectPostIncrement(ThumbReg(3), ThumbReg(0), 4);
+ (*pnSrcAlign)++;
+
+ // STR r3, [R1, #offset of arg reg] ; (2 bytes)
+ ThumbEmitStoreRegIndirect(ThumbReg(3), ThumbReg(1), offsetof(T_CONTEXT, S) + (iReg * sizeof(DWORD)));
+ }
+ }
+
+ if (pArgLoc->m_cStack > 0) {
+ // Copy to the stack
+ // Be careful because this can get big and ugly.
+ _ASSERTE(*pcbStackSpace <= (pArgLoc->m_idxStack * sizeof(DWORD)));
+
+ // Pad the output
+ if (*pcbStackSpace < (pArgLoc->m_idxStack * sizeof(DWORD)))
+ {
+ const UINT cbPad = ((pArgLoc->m_idxStack * sizeof(DWORD)) - *pcbStackSpace);
+ _ASSERTE(cbPad == 4);
+ // ADD R2, #4
+ ThumbEmitIncrement(ThumbReg(2), cbPad);
+ *pcbStackSpace += cbPad;
+ }
+ int cStack = pArgLoc->m_cStack;
+ *pcbStackSpace += (cStack * sizeof(DWORD));
+
+ // Now start the copying
+ if (cStack > 8) {
+ // Loop to copy in 16-byte chunks per loop.
+ // Sacrifice r3 for the loop counter
+ ThumbEmitMovConstant(ThumbReg(3), pArgLoc->m_cStack & ~3);
+ // LoopLabel:
+ CodeLabel *pLoopLabel = NewCodeLabel();
+ EmitLabel(pLoopLabel);
+ const WORD mask = ThumbReg(4).Mask() | ThumbReg(5).Mask() | ThumbReg(6).Mask() | ThumbReg(7).Mask();
+ // LDM r0!, {r4,r5,r6,r7} ; Post incremented loads (2 bytes)
+ ThumbEmitLoadStoreMultiple(ThumbReg(0), true, mask);
+ // STM r2!, {r4,r5,r6,r7} ; Post incremented stores (2 bytes)
+ ThumbEmitLoadStoreMultiple(ThumbReg(2), false, mask);
+ // SUBS r3, #4
+ Emit16((WORD)(0x3800 | (ThumbReg(3) << 8) | 4));
+ // BNZ LoopLabel
+ ThumbEmitCondFlagJump(pLoopLabel, thumbCondNe.cond);
+
+ cStack = cStack % 4;
+ // Now deal with the tail if any
+ }
+ _ASSERTE(cStack <= 8);
+
+ while (cStack > 1) {
+ _ASSERTE(cStack >= 2);
+ WORD mask = ThumbReg(4).Mask() | ThumbReg(5).Mask();
+ cStack -= 2;
+ if (cStack > 0) {
+ mask |= ThumbReg(6).Mask();
+ cStack--;
+ // Instead of copying 4 slots and leaving a single slot remainder
+ // which would require us to use the bigger opcodes for the tail
+ // Only copy 3 slots this loop, saving 2 for next time. :)
+ if (cStack == 1 || cStack > 2) {
+ mask |= ThumbReg(7).Mask();
+ cStack--;
+ }
+ else {
+ // We're reading an odd amount from the stack
+ (*pnSrcAlign)++;
+ }
+ }
+
+ // LDM r0!, {r4,r5,r6,r7} ; Post incremented loads (2 bytes)
+ ThumbEmitLoadStoreMultiple(ThumbReg(0), true, mask);
+ // STM r2!, {r4,r5,r6,r7} ; Post incremented stores (2 bytes)
+ ThumbEmitLoadStoreMultiple(ThumbReg(2), false, mask);
+ _ASSERTE((cStack == 0) || (cStack >= 2));
+ }
+ if (cStack > 0) {
+ _ASSERTE(cStack == 1);
+ // We're reading an odd amount from the stack
+ (*pnSrcAlign)++;
+ // LDR r12, [R0], #+4 ; Post incremented load (4 bytes)
+ ThumbEmitLoadIndirectPostIncrement(ThumbReg(12), ThumbReg(0), 4);
+ // STR r12, [R2], #+4 ; Post incremented store (4 bytes)
+ ThumbEmitStoreIndirectPostIncrement(ThumbReg(12), ThumbReg(2), 4);
+ }
+ }
+}
+
+
+Stub * StubLinkerCPU::CreateTailCallCopyArgsThunk(CORINFO_SIG_INFO * pSig,
+ CorInfoHelperTailCallSpecialHandling flags)
+{
+ STANDARD_VM_CONTRACT;
+
+ CPUSTUBLINKER sl;
+ CPUSTUBLINKER* pSl = &sl;
+
+ // Generates a function that looks like this:
+ // size_t CopyArguments(va_list args, (R0)
+ // CONTEXT *pCtx, (R1)
+ // DWORD *pvStack, (R2)
+ // size_t cbStack) (R3)
+ // {
+ // if (pCtx != NULL) {
+ // foreach (arg in args) {
+ // copy into pCtx or pvStack
+ // }
+ // }
+ // return <size of stack needed>;
+ // }
+ //
+
+ Module * module = GetModule(pSig->scope);
+ Instantiation classInst((TypeHandle*)pSig->sigInst.classInst, pSig->sigInst.classInstCount);
+ Instantiation methodInst((TypeHandle*)pSig->sigInst.methInst, pSig->sigInst.methInstCount);
+ SigTypeContext typeCtxt(classInst, methodInst);
+
+ // The -8 is because R11 points at the pushed {R11, LR} pair, and it is aligned.
+ // This is the magic distance, between the frame pointer and the Frame.
+ const UINT cbFrameOffset = (sizeof(FrameWithCookie<TailCallFrame>) - 8);
+
+ bool fNeedExtraRegs = false;
+ UINT copyEstimate = 0;
+ {
+ // Do a quick scan of the arguments looking for ones that will probably need extra registers
+ // and guestimating the size of the method
+ if (flags & CORINFO_TAILCALL_STUB_DISPATCH_ARG)
+ copyEstimate += 6;
+
+ if (pSig->hasThis())
+ copyEstimate += 6;
+
+ MetaSig msig(pSig->pSig, pSig->cbSig, module, &typeCtxt);
+ if (pSig->hasTypeArg())
+ msig.SetHasParamTypeArg();
+ ArgIterator argPlacer(&msig);
+
+ if (argPlacer.HasRetBuffArg()) {
+ copyEstimate += 24;
+ }
+
+ if (pSig->hasTypeArg() || pSig->isVarArg())
+ copyEstimate += 6;
+
+ int argOffset;
+ while ((argOffset = argPlacer.GetNextOffset()) != TransitionBlock::InvalidOffset)
+ {
+ ArgLocDesc argLoc;
+ argPlacer.GetArgLoc(argOffset, &argLoc);
+
+ if (argLoc.m_cStack > 1 || argLoc.m_cGenReg > 1 || argLoc.m_cFloatReg > 1) {
+ fNeedExtraRegs = true;
+ }
+ else {
+ copyEstimate += 8;
+ }
+ }
+ }
+
+ if (fNeedExtraRegs) {
+ // Inject a proper prolog
+ // push {r4-r7,lr}
+ pSl->ThumbEmitProlog(4, 0, false);
+ }
+
+ CodeLabel *pNullLabel = pSl->NewCodeLabel();
+
+ if (!fNeedExtraRegs && copyEstimate < 100) {
+ // The real range of BCZ is 0-126, but that's hard to estimate that precisely
+ // and we don't want to do that much work just to save a few bytes
+
+ // BCZ R1, NullLabel
+ pSl->ThumbEmitCondRegJump(pNullLabel, false, ThumbReg(1));
+ }
+ else {
+ // CMP R1, 0 ; T1 encoding
+ pSl->Emit16((WORD)(0x2900));
+
+ // BEQ NullLabel
+ pSl->ThumbEmitCondFlagJump(pNullLabel, thumbCondEq.cond);
+ }
+
+ UINT cbStackSpace = 0;
+ UINT cbReturnBufferSpace = 0;
+ UINT nSrcAlign = 0;
+
+ if (flags & CORINFO_TAILCALL_STUB_DISPATCH_ARG) {
+ // This is set for stub dispatch or 'thisInSecretRegister'
+ // The JIT placed an extra argument in the list that needs to
+ // get shoved into R4, and not counted.
+ // pCtx->R4 = va_arg(args, DWORD);
+
+ // LDR r3, [R0], #+4 ; Post incremented load (4 bytes)
+ pSl->ThumbEmitLoadIndirectPostIncrement(ThumbReg(3), ThumbReg(0), 4);
+ // STR r3, [R1, #offset of R4] ; (2 bytes)
+ pSl->ThumbEmitStoreRegIndirect(ThumbReg(3), ThumbReg(1), offsetof(T_CONTEXT, R4));
+ nSrcAlign++;
+ }
+
+
+ MetaSig msig(pSig->pSig, pSig->cbSig, module, &typeCtxt);
+ if (pSig->hasTypeArg())
+ msig.SetHasParamTypeArg();
+ ArgIterator argPlacer(&msig);
+ ArgLocDesc argLoc;
+
+ // First comes the 'this' pointer
+ if (argPlacer.HasThis()) {
+ argPlacer.GetThisLoc(&argLoc);
+ pSl->ThumbCopyOneTailCallArg(&nSrcAlign, &argLoc, &cbStackSpace);
+ }
+
+ // Next comes the return buffer
+ if (argPlacer.HasRetBuffArg()) {
+ // We always reserve space for the return buffer, but we never zero it out,
+ // and we never report it. Thus the callee shouldn't do RVO and expect
+ // to be able to read GC pointers from it.
+ // If the passed in return buffer is already pointing above the frame,
+ // then we need to pass it along (so it will get passed out).
+ // Otherwise we assume the caller is returning void, so we just pass in
+ // dummy space to be overwritten.
+
+ argPlacer.GetRetBuffArgLoc(&argLoc);
+ _ASSERTE(argLoc.m_cStack == 0);
+ _ASSERTE(argLoc.m_cFloatReg == 0);
+ _ASSERTE(argLoc.m_cGenReg == 1);
+
+ // Grab some space from the top of the frame and pass that in as a dummy
+ // buffer if needed. Align to 8-byte boundary (after taking in account the Frame).
+ // Do this by adding the Frame size, align, then remove the Frame size...
+ _ASSERTE((pSig->retType == CORINFO_TYPE_REFANY) || (pSig->retType == CORINFO_TYPE_VALUECLASS));
+ TypeHandle th(pSig->retTypeClass);
+ UINT cbUsed = ((th.GetSize() + cbFrameOffset + 0x7) & ~0x7) - cbFrameOffset;
+ _ASSERTE(cbUsed >= th.GetSize());
+ cbReturnBufferSpace += cbUsed;
+
+ // LDR r3, [R0], #+4 ; Post incremented load (4 bytes)
+ pSl->ThumbEmitLoadIndirectPostIncrement(ThumbReg(3), ThumbReg(0), 4);
+
+ // LDR r12, [R1, #offset of R11] ; (2 bytes)
+ pSl->ThumbEmitLoadRegIndirect(ThumbReg(12), ThumbReg(1), offsetof(T_CONTEXT, R11));
+
+ // CMP r3, r12 ; (2 bytes)
+ pSl->ThumbEmitCmpReg(ThumbReg(3), ThumbReg(12));
+
+ CodeLabel *pSkipLabel = pSl->NewCodeLabel();
+ // BHI NullLabel ; skip if R3 > R12 unsigned (2 bytes)
+ pSl->ThumbEmitCondFlagJump(pSkipLabel, thumbCondHi.cond);
+
+ // Also check the lower bound of the stack in case the return buffer is on the GC heap
+ // and the GC heap is below the stack
+ // CMP r3, sp ; (2 bytes)
+ pSl->ThumbEmitCmpReg(ThumbReg(3), thumbRegSp);
+ // BLO NullLabel ; skip if r3 < sp unsigned (2 bytes)
+ pSl->ThumbEmitCondFlagJump(pSkipLabel, thumbCondCc.cond);
+
+ // If the caller is expecting us to simulate a return buffer for the callee
+ // pass that pointer in now, by subtracting from R11 space for the Frame
+ // and space for the return buffer.
+ UINT offset = cbUsed + cbFrameOffset;
+ if (offset < 4096) {
+ // SUB r3, r12, #offset ; (4 bytes)
+ pSl->ThumbEmitSub(ThumbReg(3), ThumbReg(12), offset);
+ }
+ else {
+ offset = UINT(-int(offset)); // Silence the @#$%^ warning
+ // MOVW/MOVT (4-8 bytes)
+ // ADD r3, r12; (2 bytes)
+ pSl->ThumbEmitAdd(ThumbReg(3), ThumbReg(12), offset);
+ }
+ // SkipLabel:
+ pSl->EmitLabel(pSkipLabel);
+ // STR r3, [R1, #offset of arg reg] ; (2 bytes)
+ pSl->ThumbEmitStoreRegIndirect(ThumbReg(3), ThumbReg(1), offsetof(T_CONTEXT, R0) + (argLoc.m_idxGenReg * sizeof(DWORD)));
+
+ nSrcAlign++;
+ }
+
+ // Generics Instantiation Parameter
+ if (pSig->hasTypeArg()) {
+ argPlacer.GetParamTypeLoc(&argLoc);
+ pSl->ThumbCopyOneTailCallArg(&nSrcAlign, &argLoc, &cbStackSpace);
+ }
+
+ // VarArgs Cookie Parameter
+ if (pSig->isVarArg()) {
+ argPlacer.GetVASigCookieLoc(&argLoc);
+ pSl->ThumbCopyOneTailCallArg(&nSrcAlign, &argLoc, &cbStackSpace);
+ }
+
+ // Now for *all* the 'real' arguments
+ int argOffset;
+ while ((argOffset = argPlacer.GetNextOffset()) != TransitionBlock::InvalidOffset)
+ {
+ argPlacer.GetArgLoc(argOffset, &argLoc);
+
+ pSl->ThumbCopyOneTailCallArg(&nSrcAlign, &argLoc, &cbStackSpace);
+ }
+
+ // Now that we are done moving arguments, add back in the stack space we reserved
+ // for the return buffer.
+ cbStackSpace += cbReturnBufferSpace;
+
+ // Keep the stack space 8-byte aligned
+ if ((cbStackSpace + cbFrameOffset) & 7) {
+ cbStackSpace += 4;
+ }
+ _ASSERTE(((cbStackSpace + cbFrameOffset) & 7) == 0);
+
+ CodeLabel *pReturnLabel = pSl->NewCodeLabel();
+ // B ReturnLabel:
+ pSl->ThumbEmitNearJump(pReturnLabel);
+
+ // NullLabel:
+ pSl->EmitLabel(pNullLabel);
+ // MOVW/MOVT r0, 0 ; No GCLayout info
+ pSl->ThumbEmitMovConstant(ThumbReg(0), 0);
+ // STR r0, [r3]
+ pSl->ThumbEmitStoreRegIndirect(ThumbReg(0), ThumbReg(3), 0);
+
+ // ReturnLabel:
+ pSl->EmitLabel(pReturnLabel);
+
+ // MOVW/MOVT r0, #cbStackSpace
+ pSl->ThumbEmitMovConstant(ThumbReg(0), cbStackSpace);
+
+ if (fNeedExtraRegs) {
+ // Inject a proper prolog
+ // pop {r4-r7,pc}
+ pSl->ThumbEmitEpilog();
+ }
+ else {
+ // bx lr
+ pSl->ThumbEmitJumpRegister(thumbRegLr);
+ }
+
+
+ return pSl->Link();
+}
+
+
+VOID ResetCurrentContext()
+{
+ LIMITED_METHOD_CONTRACT;
+}
+#endif // !DACCESS_COMPILE
+
+#if defined(FEATURE_REMOTING) && !defined(CROSSGEN_COMPILE)
+
+#ifndef DACCESS_COMPILE
+PCODE CTPMethodTable::CreateThunkForVirtualMethod(DWORD dwSlot, BYTE *startaddr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(startaddr));
+ }
+ CONTRACTL_END;
+
+ WORD *pCode = (WORD*)((ULONG_PTR)startaddr);
+
+ // Slot literal is split into four pieces in the mov instruction:
+ // imm4:i:imm3:imm8
+ _ASSERTE(FitsInU2(dwSlot));
+ WORD imm4 = ((WORD)dwSlot & 0xf000) >> 12;
+ WORD i = ((WORD)dwSlot & 0x0800) >> 11;
+ WORD imm3 = ((WORD)dwSlot & 0x0700) >> 8;
+ WORD imm8 = (WORD)dwSlot & 0x00ff;
+
+ // f240 0c00 mov r12, #dwSlot
+ // f8df f000 ldr pc, [pc, #0]
+ // ???? ???? dcd TransparentProxyStub
+
+ *pCode++ = 0xf240 | (i << 10) | imm4;
+ *pCode++ = 0x0c00 | (imm3 << 12) | imm8;
+ *pCode++ = 0xf8df;
+ *pCode++ = 0xf000;
+ *((PCODE*)pCode) = GetTPStubEntryPoint();
+
+ _ASSERTE(CVirtualThunkMgr::IsThunkByASM((PCODE)startaddr));
+
+ return (PCODE)(startaddr + THUMB_CODE);
+}
+#endif // DACCESS_COMPILE
+
+BOOL CVirtualThunkMgr::IsThunkByASM(PCODE startaddr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(startaddr != NULL);
+ }
+ CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+ PTR_WORD pInstr = dac_cast<PTR_WORD>(PCODEToPINSTR(startaddr));
+
+ return (((pInstr[0] & 0xf240) == 0xf240) &&
+ ((pInstr[1] & 0x0c00) == 0x0c00) &&
+ (pInstr[2] == 0xf8df) &&
+ (pInstr[3] == 0xf000) &&
+ (*(PCODE*)&pInstr[4] == CTPMethodTable::GetTPStubEntryPoint()));
+#else
+ DacNotImpl();
+ return FALSE;
+#endif
+}
+
+MethodDesc *CVirtualThunkMgr::GetMethodDescByASM(PCODE startaddr, MethodTable *pMT)
+{
+ CONTRACT (MethodDesc*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(startaddr != NULL);
+ PRECONDITION(CheckPointer(pMT));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ _ASSERTE(IsThunkByASM(startaddr));
+
+ PTR_WORD pInstr = dac_cast<PTR_WORD>(PCODEToPINSTR(startaddr));
+
+ WORD i = (pInstr[0] & 0x0400) >> 10;
+ WORD imm4 = pInstr[0] & 0x000f;
+ WORD imm3 = (pInstr[1] & 0x7000) >> 12;
+ WORD imm8 = pInstr[1] & 0x00ff;
+
+ WORD wSlot = (imm4 << 12) | (i << 11) | (imm3 << 8) | imm8;
+
+ RETURN (pMT->GetMethodDescForSlot(wSlot));
+}
+
+#ifndef DACCESS_COMPILE
+
+BOOL CVirtualThunkMgr::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(stubStartAddress != NULL);
+ PRECONDITION(CheckPointer(trace));
+ }
+ CONTRACTL_END;
+
+ TADDR pInstr = PCODEToPINSTR(stubStartAddress);
+
+ BOOL bIsStub = FALSE;
+
+ // Find a thunk whose code address matching the starting address
+ LPBYTE pThunk = FindThunk((LPBYTE)pInstr);
+ if (pThunk)
+ {
+ LONG destAddress = 0;
+
+ // The stub target address is stored as an absolute pointer 8 byte into the thunk.
+ destAddress = *(LONG*)(pThunk + 8);
+
+ // We cannot tell where the stub will end up until OnCall is reached.
+ // So we tell the debugger to run till OnCall is reached and then
+ // come back and ask us again for the actual destination address of
+ // the call
+
+ Stub *stub = Stub::RecoverStub((TADDR)destAddress);
+
+ trace->InitForFramePush(stub->GetPatchAddress());
+ bIsStub = TRUE;
+ }
+
+ return bIsStub;
+}
+
+extern "C" UINT_PTR __stdcall CRemotingServices__CheckForContextMatch(Object* pStubData)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE; // due to the Object parameter
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pStubData));
+ }
+ CONTRACTL_END;
+
+ UINT_PTR contextID = *(UINT_PTR*)pStubData->UnBox();
+ UINT_PTR contextCur = (UINT_PTR)GetThread()->m_Context;
+ return (contextCur != contextID); // chosen to match x86 convention
+}
+
+// Return true if the current context matches that of the transparent proxy given.
+BOOL CTPMethodTable__GenericCheckForContextMatch(Object* orTP)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE; // due to the Object parameter
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ Object *StubData = OBJECTREFToObject(((TransparentProxyObject*)orTP)->GetStubData());
+ CTPMethodTable::CheckContextCrossingProc *pfnCheckContextCrossing =
+ (CTPMethodTable::CheckContextCrossingProc*)(((TransparentProxyObject*)orTP)->GetStub());
+ return pfnCheckContextCrossing(StubData) == 0;
+}
+
+#endif // !DACCESS_COMPILE
+
+#endif // FEATURE_REMOTING && !CROSSGEN_COMPILE
+
+#ifdef FEATURE_COMINTEROP
+void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // mov r12, pc
+ // ldr pc, [pc, #0]
+ // dcd 0
+ // dcd target
+ WORD rgCode[] = {
+ 0x46fc,
+ 0xf8df, 0xf004
+ };
+
+ BYTE *pBuffer = (BYTE*)pCOMMethod - COMMETHOD_CALL_PRESTUB_SIZE;
+
+ memcpy(pBuffer, rgCode, sizeof(rgCode));
+ *((PCODE*)(pBuffer + sizeof(rgCode) + 2)) = target;
+
+ // Ensure that the updated instructions get actually written
+ ClrFlushInstructionCache(pBuffer, COMMETHOD_CALL_PRESTUB_SIZE);
+
+ _ASSERTE(IS_ALIGNED(pBuffer + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET, sizeof(void*)) &&
+ *((PCODE*)(pBuffer + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET)) == target);
+}
+#endif // FEATURE_COMINTEROP
+
+#ifndef DACCESS_COMPILE
+
+#ifndef CROSSGEN_COMPILE
+
+DWORD GetLogicalCpuCount()
+{
+ // Just use the OS to return this information (the APIs used exist on all versions of Windows which
+ // support ARM).
+ return GetLogicalCpuCountFromOS();
+}
+
+#ifdef FEATURE_READYTORUN
+
+//
+// Allocation of dynamic helpers
+//
+
+#define DYNAMIC_HELPER_ALIGNMENT sizeof(TADDR)
+
+#define BEGIN_DYNAMIC_HELPER_EMIT(size) \
+ SIZE_T cb = size; \
+ SIZE_T cbAligned = ALIGN_UP(cb, DYNAMIC_HELPER_ALIGNMENT); \
+ BYTE * pStart = (BYTE *)(void *)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(cbAligned, DYNAMIC_HELPER_ALIGNMENT); \
+ BYTE * p = pStart;
+
+#define END_DYNAMIC_HELPER_EMIT() \
+ _ASSERTE(pStart + cb == p); \
+ while (p < pStart + cbAligned) { *(WORD *)p = 0xdefe; p += 2; } \
+ ClrFlushInstructionCache(pStart, cbAligned); \
+ return (PCODE)((TADDR)pStart | THUMB_CODE)
+
+static void MovRegImm(BYTE* p, int reg, TADDR imm)
+{
+ LIMITED_METHOD_CONTRACT;
+ *(WORD *)(p + 0) = 0xF240;
+ *(WORD *)(p + 2) = (UINT16)(reg << 8);
+ *(WORD *)(p + 4) = 0xF2C0;
+ *(WORD *)(p + 6) = (UINT16)(reg << 8);
+ PutThumb2Mov32((UINT16 *)p, imm);
+}
+
+PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
+{
+ STANDARD_VM_CONTRACT;
+
+ BEGIN_DYNAMIC_HELPER_EMIT(18);
+
+ // mov r0, arg
+ MovRegImm(p, 0, arg);
+ p += 8;
+
+ // mov r12, target
+ MovRegImm(p, 12, target);
+ p += 8;
+
+ // bx r12
+ *(WORD *)p = 0x4760;
+ p += 2;
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+PCODE DynamicHelpers::CreateHelperWithArg(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT(18);
+
+ // mov r1, arg
+ MovRegImm(p, 1, arg);
+ p += 8;
+
+ // mov r12, target
+ MovRegImm(p, 12, target);
+ p += 8;
+
+ // bx r12
+ *(WORD *)p = 0x4760;
+ p += 2;
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, TADDR arg2, PCODE target)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT(26);
+
+ // mov r0, arg
+ MovRegImm(p, 0, arg);
+ p += 8;
+
+ // mov r1, arg2
+ MovRegImm(p, 1, arg2);
+ p += 8;
+
+ // mov r12, target
+ MovRegImm(p, 12, target);
+ p += 8;
+
+ // bx r12
+ *(WORD *)p = 0x4760;
+ p += 2;
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+PCODE DynamicHelpers::CreateHelperArgMove(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT(20);
+
+ // mov r1, r0
+ *(WORD *)p = 0x4601;
+ p += 2;
+
+ // mov r0, arg
+ MovRegImm(p, 0, arg);
+ p += 8;
+
+ // mov r12, target
+ MovRegImm(p, 12, target);
+ p += 8;
+
+ // bx r12
+ *(WORD *)p = 0x4760;
+ p += 2;
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+PCODE DynamicHelpers::CreateReturn(LoaderAllocator * pAllocator)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT(2);
+
+ *(WORD *)p = 0x4770; // bx lr
+ p += 2;
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+PCODE DynamicHelpers::CreateReturnConst(LoaderAllocator * pAllocator, TADDR arg)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT(10);
+
+ // mov r0, arg
+ MovRegImm(p, 0, arg);
+ p += 8;
+
+ // bx lr
+ *(WORD *)p = 0x4770;
+ p += 2;
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+PCODE DynamicHelpers::CreateReturnIndirConst(LoaderAllocator * pAllocator, TADDR arg, INT8 offset)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT((offset != 0) ? 16 : 12);
+
+ // mov r0, arg
+ MovRegImm(p, 0, arg);
+ p += 8;
+
+ // ldr r0, [r0]
+ *(WORD *)p = 0x6800;
+ p += 2;
+
+ if (offset != 0)
+ {
+ // add r0, r0, <offset>
+ *(WORD *)(p + 0) = 0xF100;
+ *(WORD *)(p + 2) = offset;
+ p += 4;
+ }
+
+ // bx lr
+ *(WORD *)p = 0x4770;
+ p += 2;
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT(15);
+
+ // mov r2, arg
+ MovRegImm(p, 2, arg);
+ p += 8;
+
+ // mov r12, target
+ MovRegImm(p, 12, target);
+ p += 8;
+
+ // bx r12
+ *(WORD *)p = 0x4760;
+ p += 2;
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADDR arg, TADDR arg2, PCODE target)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT(26);
+
+ // mov r2, arg
+ MovRegImm(p, 2, arg);
+ p += 8;
+
+ // mov r3, arg
+ MovRegImm(p, 3, arg2);
+ p += 8;
+
+ // mov r12, target
+ MovRegImm(p, 12, target);
+ p += 8;
+
+ // bx r12
+ *(WORD *)p = 0x4760;
+ p += 2;
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+#endif // FEATURE_READYTORUN
+
+#endif // CROSSGEN_COMPILE
+
+#endif // !DACCESS_COMPILE
diff --git a/src/vm/arm/virtualcallstubcpu.hpp b/src/vm/arm/virtualcallstubcpu.hpp
new file mode 100644
index 0000000000..5a359d87fc
--- /dev/null
+++ b/src/vm/arm/virtualcallstubcpu.hpp
@@ -0,0 +1,385 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// VirtualCallStubCpu.hpp
+//
+#ifndef _VIRTUAL_CALL_STUB_ARM_H
+#define _VIRTUAL_CALL_STUB_ARM_H
+
+#ifdef DECLARE_DATA
+#include "asmconstants.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#ifdef FEATURE_PREJIT
+#include "compile.h"
+#endif
+#endif
+
+//#define STUB_LOGGING
+
+#include <pshpack1.h> // Since we are placing code, we want byte packing of the structs
+
+#define USES_LOOKUP_STUBS 1
+
+/*********************************************************************************************
+Stubs that contain code are all part of larger structs called Holders. There is a
+Holder for each kind of stub, i.e XXXStub is contained with XXXHolder. Holders are
+essentially an implementation trick that allowed rearranging the code sequences more
+easily while trying out different alternatives, and for dealing with any alignment
+issues in a way that was mostly immune to the actually code sequences. These Holders
+should be revisited when the stub code sequences are fixed, since in many cases they
+add extra space to a stub that is not really needed.
+
+Stubs are placed in cache and hash tables. Since unaligned access of data in memory
+is very slow, the keys used in those tables should be aligned. The things used as keys
+typically also occur in the generated code, e.g. a token as an immediate part of an instruction.
+For now, to avoid alignment computations as different code strategies are tried out, the key
+fields are all in the Holders. Eventually, many of these fields should be dropped, and the instruction
+streams aligned so that the immediate fields fall on aligned boundaries.
+*/
+
+#if USES_LOOKUP_STUBS
+
+struct LookupStub;
+struct LookupHolder;
+
+/*LookupStub**************************************************************************************
+Virtual and interface call sites are initially setup to point at LookupStubs.
+This is because the runtime type of the <this> pointer is not yet known,
+so the target cannot be resolved. Note: if the jit is able to determine the runtime type
+of the <this> pointer, it should be generating a direct call not a virtual or interface call.
+This stub pushes a lookup token onto the stack to identify the sought after method, and then
+jumps into the EE (VirtualCallStubManager::ResolveWorkerStub) to effectuate the lookup and
+transfer of control to the appropriate target method implementation, perhaps patching of the call site
+along the way to point to a more appropriate stub. Hence callsites that point to LookupStubs
+get quickly changed to point to another kind of stub.
+*/
+struct LookupStub
+{
+ inline PCODE entryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)&_entryPoint[0] + THUMB_CODE; }
+ inline size_t token() { LIMITED_METHOD_CONTRACT; return _token; }
+ inline size_t size() { LIMITED_METHOD_CONTRACT; return sizeof(LookupStub); }
+
+private:
+ friend struct LookupHolder;
+ const static int entryPointLen = 4;
+
+ WORD _entryPoint[entryPointLen];
+ PCODE _resolveWorkerTarget; // xx xx xx xx target address
+ size_t _token; // xx xx xx xx 32-bit constant
+};
+
+/* LookupHolders are the containers for LookupStubs, they provide for any alignment of
+stubs as necessary. In the case of LookupStubs, alignment is necessary since
+LookupStubs are placed in a hash table keyed by token. */
+struct LookupHolder
+{
+ static void InitializeStatic();
+
+ void Initialize(PCODE resolveWorkerTarget, size_t dispatchToken);
+
+ LookupStub* stub() { LIMITED_METHOD_CONTRACT; return &_stub; }
+
+ static LookupHolder* FromLookupEntry(PCODE lookupEntry);
+
+private:
+ friend struct LookupStub;
+
+ LookupStub _stub;
+};
+
+
+#endif // USES_LOOKUP_STUBS
+
+struct DispatchStub;
+struct DispatchHolder;
+
+/*DispatchStub**************************************************************************************
+Monomorphic and mostly monomorphic call sites eventually point to DispatchStubs.
+A dispatch stub has an expected type (expectedMT), target address (target) and fail address (failure).
+If the calling frame does in fact have the <this> type be of the expected type, then
+control is transfered to the target address, the method implementation. If not,
+then control is transfered to the fail address, a fail stub (see below) where a polymorphic
+lookup is done to find the correct address to go to.
+
+implementation note: Order, choice of instructions, and branch directions
+should be carefully tuned since it can have an inordinate effect on performance. Particular
+attention needs to be paid to the effects on the BTB and branch prediction, both in the small
+and in the large, i.e. it needs to run well in the face of BTB overflow--using static predictions.
+Note that since this stub is only used for mostly monomorphic callsites (ones that are not, get patched
+to something else), therefore the conditional jump "jne failure" is mostly not taken, and hence it is important
+that the branch prediction staticly predict this, which means it must be a forward jump. The alternative
+is to reverse the order of the jumps and make sure that the resulting conditional jump "je implTarget"
+is statically predicted as taken, i.e a backward jump. The current choice was taken since it was easier
+to control the placement of the stubs than control the placement of the jitted code and the stubs. */
+struct DispatchStub
+{
+ inline PCODE entryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)(&_entryPoint[0]) + THUMB_CODE; }
+
+ inline size_t expectedMT() { LIMITED_METHOD_CONTRACT; return _expectedMT; }
+ inline PCODE implTarget() { LIMITED_METHOD_CONTRACT; return _implTarget; }
+ inline PCODE failTarget() { LIMITED_METHOD_CONTRACT; return _failTarget; }
+ inline size_t size() { LIMITED_METHOD_CONTRACT; return sizeof(DispatchStub); }
+
+private:
+ friend struct DispatchHolder;
+ const static int entryPointLen = 12;
+
+ WORD _entryPoint[entryPointLen];
+ size_t _expectedMT;
+ PCODE _failTarget;
+ PCODE _implTarget;
+};
+
+/* DispatchHolders are the containers for DispatchStubs, they provide for any alignment of
+stubs as necessary. DispatchStubs are placed in a hashtable and in a cache. The keys for both
+are the pair expectedMT and token. Efficiency of the of the hash table is not a big issue,
+since lookups in it are fairly rare. Efficiency of the cache is paramount since it is accessed frequently
+o(see ResolveStub below). Currently we are storing both of these fields in the DispatchHolder to simplify
+alignment issues. If inlineMT in the stub itself was aligned, then it could be the expectedMT field.
+While the token field can be logically gotten by following the failure target to the failEntryPoint
+of the ResolveStub and then to the token over there, for perf reasons of cache access, it is duplicated here.
+This allows us to use DispatchStubs in the cache. The alternative is to provide some other immutable struct
+for the cache composed of the triplet (expectedMT, token, target) and some sort of reclaimation scheme when
+they are thrown out of the cache via overwrites (since concurrency will make the obvious approaches invalid).
+*/
+
+/* @workaround for ee resolution - Since the EE does not currently have a resolver function that
+does what we want, see notes in implementation of VirtualCallStubManager::Resolver, we are
+using dispatch stubs to siumulate what we want. That means that inlineTarget, which should be immutable
+is in fact written. Hence we have moved target out into the holder and aligned it so we can
+atomically update it. When we get a resolver function that does what we want, we can drop this field,
+and live with just the inlineTarget field in the stub itself, since immutability will hold.*/
+struct DispatchHolder
+{
+ static void InitializeStatic();
+
+ void Initialize(PCODE implTarget, PCODE failTarget, size_t expectedMT);
+
+ DispatchStub* stub() { LIMITED_METHOD_CONTRACT; return &_stub; }
+
+ static DispatchHolder* FromDispatchEntry(PCODE dispatchEntry);
+
+private:
+ //force expectedMT to be aligned since used as key in hash tables.
+ DispatchStub _stub;
+};
+
+struct ResolveStub;
+struct ResolveHolder;
+
+/*ResolveStub**************************************************************************************
+Polymorphic call sites and monomorphic calls that fail end up in a ResolverStub. There is only
+one resolver stub built for any given token, even though there may be many call sites that
+use that token and many distinct <this> types that are used in the calling call frames. A resolver stub
+actually has two entry points, one for polymorphic call sites and one for dispatch stubs that fail on their
+expectedMT test. There is a third part of the resolver stub that enters the ee when a decision should
+be made about changing the callsite. Therefore, we have defined the resolver stub as three distinct pieces,
+even though they are actually allocated as a single contiguous block of memory. These pieces are:
+
+A ResolveStub has two entry points:
+
+FailEntry - where the dispatch stub goes if the expected MT test fails. This piece of the stub does
+a check to see how often we are actually failing. If failures are frequent, control transfers to the
+patch piece to cause the call site to be changed from a mostly monomorphic callsite
+(calls dispatch stub) to a polymorphic callsize (calls resolve stub). If failures are rare, control
+transfers to the resolve piece (see ResolveStub). The failEntryPoint decrements a counter
+every time it is entered. The ee at various times will add a large chunk to the counter.
+
+ResolveEntry - does a lookup via in a cache by hashing the actual type of the calling frame s
+<this> and the token identifying the (contract,method) pair desired. If found, control is transfered
+to the method implementation. If not found in the cache, the token is pushed and the ee is entered via
+the ResolveWorkerStub to do a full lookup and eventual transfer to the correct method implementation. Since
+there is a different resolve stub for every token, the token can be inlined and the token can be pre-hashed.
+The effectiveness of this approach is highly sensitive to the effectiveness of the hashing algorithm used,
+as well as its speed. It turns out it is very important to make the hash function sensitive to all
+of the bits of the method table, as method tables are laid out in memory in a very non-random way. Before
+making any changes to the code sequences here, it is very important to measure and tune them as perf
+can vary greatly, in unexpected ways, with seeming minor changes.
+
+Implementation note - Order, choice of instructions, and branch directions
+should be carefully tuned since it can have an inordinate effect on performance. Particular
+attention needs to be paid to the effects on the BTB and branch prediction, both in the small
+and in the large, i.e. it needs to run well in the face of BTB overflow--using static predictions.
+Note that this stub is called in highly polymorphic cases, but the cache should have been sized
+and the hash function chosen to maximize the cache hit case. Hence the cmp/jcc instructions should
+mostly be going down the cache hit route, and it is important that this be statically predicted as so.
+Hence the 3 jcc instrs need to be forward jumps. As structured, there is only one jmp/jcc that typically
+gets put in the BTB since all the others typically fall straight thru. Minimizing potential BTB entries
+is important. */
+
+struct ResolveStub
+{
+ inline PCODE failEntryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)(&_failEntryPoint[0]) + THUMB_CODE; }
+ inline PCODE resolveEntryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)(&_resolveEntryPoint[0]) + THUMB_CODE; }
+ inline PCODE slowEntryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)(&_slowEntryPoint[0]) + THUMB_CODE; }
+
+ inline INT32* pCounter() { LIMITED_METHOD_CONTRACT; return _pCounter; }
+ inline UINT32 hashedToken() { LIMITED_METHOD_CONTRACT; return _hashedToken >> LOG2_PTRSIZE; }
+ inline size_t cacheAddress() { LIMITED_METHOD_CONTRACT; return _cacheAddress; }
+ inline size_t token() { LIMITED_METHOD_CONTRACT; return _token; }
+ inline size_t size() { LIMITED_METHOD_CONTRACT; return sizeof(ResolveStub); }
+
+private:
+ friend struct ResolveHolder;
+ const static int resolveEntryPointLen = 32;
+ const static int slowEntryPointLen = 4;
+ const static int failEntryPointLen = 14;
+
+ WORD _resolveEntryPoint[resolveEntryPointLen];
+ WORD _slowEntryPoint[slowEntryPointLen];
+ WORD _failEntryPoint[failEntryPointLen];
+ INT32* _pCounter;
+ UINT32 _hashedToken;
+ size_t _cacheAddress; // lookupCache
+ size_t _token;
+ size_t _tokenSlow;
+ PCODE _resolveWorkerTarget;
+ UINT32 _cacheMask;
+};
+
+/* ResolveHolders are the containers for ResolveStubs, They provide
+for any alignment of the stubs as necessary. The stubs are placed in a hash table keyed by
+the token for which they are built. Efficiency of access requires that this token be aligned.
+For now, we have copied that field into the ResolveHolder itself, if the resolve stub is arranged such that
+any of its inlined tokens (non-prehashed) is aligned, then the token field in the ResolveHolder
+is not needed. */
+struct ResolveHolder
+{
+ static void InitializeStatic();
+
+ void Initialize(PCODE resolveWorkerTarget, PCODE patcherTarget,
+ size_t dispatchToken, UINT32 hashedToken,
+ void * cacheAddr, INT32 * counterAddr);
+
+ ResolveStub* stub() { LIMITED_METHOD_CONTRACT; return &_stub; }
+
+ static ResolveHolder* FromFailEntry(PCODE failEntry);
+ static ResolveHolder* FromResolveEntry(PCODE resolveEntry);
+
+private:
+ ResolveStub _stub;
+};
+#include <poppack.h>
+
+
+#ifdef DECLARE_DATA
+
+#ifndef DACCESS_COMPILE
+
+#ifdef STUB_LOGGING
+extern size_t g_lookup_inline_counter;
+extern size_t g_mono_call_counter;
+extern size_t g_mono_miss_counter;
+extern size_t g_poly_call_counter;
+extern size_t g_poly_miss_counter;
+#endif
+
+TADDR StubDispatchFrame_MethodFrameVPtr;
+
+LookupHolder* LookupHolder::FromLookupEntry(PCODE lookupEntry)
+{
+ lookupEntry = lookupEntry & ~THUMB_CODE;
+ return (LookupHolder*) ( lookupEntry - offsetof(LookupHolder, _stub) - offsetof(LookupStub, _entryPoint) );
+}
+
+
+/* Template used to generate the stub. We generate a stub by allocating a block of
+ memory and copy the template over it and just update the specific fields that need
+ to be changed.
+*/
+DispatchStub dispatchInit;
+
+DispatchHolder* DispatchHolder::FromDispatchEntry(PCODE dispatchEntry)
+{
+ LIMITED_METHOD_CONTRACT;
+ dispatchEntry = dispatchEntry & ~THUMB_CODE;
+ DispatchHolder* dispatchHolder = (DispatchHolder*) ( dispatchEntry - offsetof(DispatchHolder, _stub) - offsetof(DispatchStub, _entryPoint) );
+ // _ASSERTE(dispatchHolder->_stub._entryPoint[0] == dispatchInit._entryPoint[0]);
+ return dispatchHolder;
+}
+
+
+/* Template used to generate the stub. We generate a stub by allocating a block of
+ memory and copy the template over it and just update the specific fields that need
+ to be changed.
+*/
+
+ResolveStub resolveInit;
+
+ResolveHolder* ResolveHolder::FromFailEntry(PCODE failEntry)
+{
+ LIMITED_METHOD_CONTRACT;
+ failEntry = failEntry & ~THUMB_CODE;
+ ResolveHolder* resolveHolder = (ResolveHolder*) ( failEntry - offsetof(ResolveHolder, _stub) - offsetof(ResolveStub, _failEntryPoint) );
+ // _ASSERTE(resolveHolder->_stub._resolveEntryPoint[0] == resolveInit._resolveEntryPoint[0]);
+ return resolveHolder;
+}
+
+ResolveHolder* ResolveHolder::FromResolveEntry(PCODE resolveEntry)
+{
+ LIMITED_METHOD_CONTRACT;
+ resolveEntry = resolveEntry & ~THUMB_CODE;
+ ResolveHolder* resolveHolder = (ResolveHolder*) ( resolveEntry - offsetof(ResolveHolder, _stub) - offsetof(ResolveStub, _resolveEntryPoint) );
+ // _ASSERTE(resolveHolder->_stub._resolveEntryPoint[0] == resolveInit._resolveEntryPoint[0]);
+ return resolveHolder;
+}
+
+
+#endif // DACCESS_COMPILE
+
+VirtualCallStubManager::StubKind VirtualCallStubManager::predictStubKind(PCODE stubStartAddress)
+{
+ SUPPORTS_DAC;
+#ifdef DACCESS_COMPILE
+
+ return SK_BREAKPOINT; // Dac always uses the slower lookup
+
+#else
+
+ StubKind stubKind = SK_UNKNOWN;
+ TADDR pInstr = PCODEToPINSTR(stubStartAddress);
+
+ EX_TRY
+ {
+ // If stubStartAddress is completely bogus, then this might AV,
+ // so we protect it with SEH. An AV here is OK.
+ AVInRuntimeImplOkayHolder AVOkay;
+
+ WORD firstWord = *((WORD*) pInstr);
+
+ //Assuming that RESOLVE_STUB_FIRST_WORD & DISPATCH_STUB_FIRST_WORD have same values
+ if (firstWord == DISPATCH_STUB_FIRST_WORD)
+ {
+ WORD thirdWord = ((WORD*)pInstr)[2];
+ if(thirdWord == 0xf84d)
+ {
+ stubKind = SK_DISPATCH;
+ }
+ else if(thirdWord == 0xb460)
+ {
+ stubKind = SK_RESOLVE;
+ }
+ }
+ else if (firstWord == 0xf8df)
+ {
+ stubKind = SK_LOOKUP;
+ }
+ }
+ EX_CATCH
+ {
+ stubKind = SK_UNKNOWN;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return stubKind;
+
+#endif // DACCESS_COMPILE
+}
+
+#endif //DECLARE_DATA
+
+#endif // _VIRTUAL_CALL_STUB_ARM_H
diff --git a/src/vm/arm64/.gitmirror b/src/vm/arm64/.gitmirror
new file mode 100644
index 0000000000..f507630f94
--- /dev/null
+++ b/src/vm/arm64/.gitmirror
@@ -0,0 +1 @@
+Only contents of this folder, excluding subfolders, will be mirrored by the Git-TFS Mirror. \ No newline at end of file
diff --git a/src/vm/arm64/CallDescrWorkerARM64.asm b/src/vm/arm64/CallDescrWorkerARM64.asm
new file mode 100644
index 0000000000..ae98ea7364
--- /dev/null
+++ b/src/vm/arm64/CallDescrWorkerARM64.asm
@@ -0,0 +1,139 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+;; ==++==
+;;
+
+;;
+;; ==--==
+#include "ksarm64.h"
+
+#include "asmconstants.h"
+
+
+
+ IMPORT CallDescrWorkerUnwindFrameChainHandler
+;;-----------------------------------------------------------------------------
+;; This helper routine enregisters the appropriate arguments and makes the
+;; actual call.
+;;-----------------------------------------------------------------------------
+;;void CallDescrWorkerInternal(CallDescrData * pCallDescrData);
+ NESTED_ENTRY CallDescrWorkerInternal,,CallDescrWorkerUnwindFrameChainHandler
+ PROLOG_SAVE_REG_PAIR fp, lr, #-32!
+ PROLOG_SAVE_REG x19, #16 ;the stack slot at sp+24 is empty for 16 byte alligment
+
+ mov x19, x0 ; save pCallDescrData in x19
+
+ ldr w1, [x19,#CallDescrData__numStackSlots]
+ cbz w1, Ldonestack
+
+ ;; Add frame padding to ensure frame size is a multiple of 16 (a requirement of the OS ABI).
+ ;; We push two registers (above) and numStackSlots arguments (below). If this comes to an odd number
+ ;; of slots we must pad with another. This simplifies to "if the low bit of numStackSlots is set,
+ ;; extend the stack another eight bytes".
+ ldr x0, [x19,#CallDescrData__pSrc]
+ add x0, x0, x1 lsl #3 ; pSrcEnd=pSrc+8*numStackSlots
+ ands x2, x1, #1
+ beq Lstackloop
+
+ ;; This loop copies numStackSlots words
+ ;; from [pSrcEnd-8,pSrcEnd-16,...] to [sp-8,sp-16,...]
+
+ ;; pad and store one stack slot as number of slots are odd
+ ldr x4, [x0,#-8]!
+ str x4, [sp,#-16]!
+ subs x1, x1, #1
+ beq Ldonestack
+Lstackloop
+ ldp x2, x4, [x0,#-16]!
+ stp x2, x4, [sp,#-16]!
+ subs x1, x1, #2
+ bne Lstackloop
+Ldonestack
+
+ ;; If FP arguments are supplied in registers (x8 != NULL) then initialize all of them from the pointer
+ ;; given in x8.
+ ldr x8, [x19,#CallDescrData__pFloatArgumentRegisters]
+ cbz x8, LNoFloatingPoint
+ ldp d0, d1, [x8]
+ ldp d2, d3, [x8, #16]
+ ldp d4, d5, [x8, #32]
+ ldp d6, d7, [x8, #48]
+LNoFloatingPoint
+
+ ;; Copy [pArgumentRegisters, ..., pArgumentRegisters + 56]
+ ;; into x0, ..., x7
+
+ ldr x8, [x19,#CallDescrData__pArgumentRegisters]
+ ldp x0, x1, [x8]
+ ldp x2, x3, [x8, #16]
+ ldp x4, x5, [x8, #32]
+ ldp x6, x7, [x8, #48]
+
+ ;; ARM64TODO: => see if anything special needs to be done for remoting
+ ;; call pTarget
+ ldr x8, [x19,#CallDescrData__pTarget]
+ blr x8
+
+ ldr w3, [x19,#CallDescrData__fpReturnSize]
+
+ ;; Int return case
+ cbz w3, LIntReturn
+
+ ;; Float return case
+ cmp w3, #4
+ beq LFloatReturn
+
+ ;; Double return case
+ cmp w3, #8
+ bne LNoDoubleReturn
+
+LFloatReturn
+ str d0, [x19, #(CallDescrData__returnValue + 0)]
+ b LReturnDone
+
+LNoDoubleReturn
+
+ ;;FloatHFAReturn return case
+ cmp w3, #16
+ bne LNoFloatHFAReturn
+
+ stp s0, s1, [x19, #(CallDescrData__returnValue + 0)]
+ stp s2, s3, [x19, #(CallDescrData__returnValue + 0x08)]
+ b LReturnDone
+LNoFloatHFAReturn
+
+ ;;DoubleHFAReturn return case
+ cmp w3, #32
+ bne LNoDoubleHFAReturn
+
+ stp d0, d1, [x19, #(CallDescrData__returnValue + 0)]
+ stp d2, d3, [x19, #(CallDescrData__returnValue + 0x10)]
+ b LReturnDone
+
+LNoDoubleHFAReturn
+
+ EMIT_BREAKPOINT ; Unreachable
+
+LIntReturn
+ ;; Save return value into retbuf for int
+ str x0, [x19, #(CallDescrData__returnValue + 0)]
+
+LReturnDone
+
+#ifdef _DEBUG
+ ;; trash the floating point registers to ensure that the HFA return values
+ ;; won't survive by accident
+ ldp d0, d1, [sp]
+ ldp d2, d3, [sp, #16]
+#endif
+
+ EPILOG_STACK_RESTORE
+ EPILOG_RESTORE_REG x19, #16 ;the stack slot at sp+24 is empty for 16 byte alligment
+ EPILOG_RESTORE_REG_PAIR fp, lr, #32!
+ EPILOG_RETURN
+ NESTED_END
+
+ END
diff --git a/src/vm/arm64/PInvokeStubs.asm b/src/vm/arm64/PInvokeStubs.asm
new file mode 100644
index 0000000000..a766164817
--- /dev/null
+++ b/src/vm/arm64/PInvokeStubs.asm
@@ -0,0 +1,138 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;;
+
+;;
+;; ==--==
+#include "ksarm64.h"
+
+#include "asmconstants.h"
+
+#include "asmmacros.h"
+
+
+ IMPORT VarargPInvokeStubWorker
+ IMPORT GenericPInvokeCalliStubWorker
+
+
+; ------------------------------------------------------------------
+; Macro to generate PInvoke Stubs.
+; $__PInvokeStubFuncName : function which calls the actual stub obtained from VASigCookie
+; $__PInvokeGenStubFuncName : function which generates the IL stubs for PInvoke
+;
+; Params :-
+; $FuncPrefix : prefix of the function name for the stub
+; Eg. VarargPinvoke, GenericPInvokeCalli
+; $VASigCookieReg : register which contains the VASigCookie
+; $SaveFPArgs : "Yes" or "No" . For varidic functions FP Args are not present in FP regs
+; So need not save FP Args registers for vararg Pinvoke
+ MACRO
+
+ PINVOKE_STUB $FuncPrefix,$VASigCookieReg,$HiddenArg,$SaveFPArgs
+
+ GBLS __PInvokeStubFuncName
+ GBLS __PInvokeGenStubFuncName
+ GBLS __PInvokeStubWorkerName
+
+ IF "$FuncPrefix" == "GenericPInvokeCalli"
+__PInvokeStubFuncName SETS "$FuncPrefix":CC:"Helper"
+ ELSE
+__PInvokeStubFuncName SETS "$FuncPrefix":CC:"Stub"
+ ENDIF
+__PInvokeGenStubFuncName SETS "$FuncPrefix":CC:"GenILStub"
+__PInvokeStubWorkerName SETS "$FuncPrefix":CC:"StubWorker"
+
+ IF "$VASigCookieReg" == "x1"
+__PInvokeStubFuncName SETS "$__PInvokeStubFuncName":CC:"_RetBuffArg"
+__PInvokeGenStubFuncName SETS "$__PInvokeGenStubFuncName":CC:"_RetBuffArg"
+ ENDIF
+
+ NESTED_ENTRY $__PInvokeStubFuncName
+
+ ; get the stub
+ ldr x9, [$VASigCookieReg, #VASigCookie__pNDirectILStub]
+
+ ; if null goto stub generation
+ cbz x9, %0
+
+
+ EPILOG_BRANCH_REG x9
+
+0
+ EPILOG_BRANCH $__PInvokeGenStubFuncName
+
+ NESTED_END
+
+
+ NESTED_ENTRY $__PInvokeGenStubFuncName
+
+ PROLOG_WITH_TRANSITION_BLOCK 0, $SaveFPArgs
+
+ ; x2 = Umanaged Target\MethodDesc
+ mov x2, $HiddenArg
+
+ ; x1 = VaSigCookie
+ IF "$VASigCookieReg" != "x1"
+ mov x1, $VASigCookieReg
+ ENDIF
+
+ ; x0 = pTransitionBlock
+ add x0, sp, #__PWTB_TransitionBlock
+
+ ; save hidden arg
+ mov x19, $HiddenArg
+
+ bl $__PInvokeStubWorkerName
+
+ ; restore hidden arg (method desc or unmanaged target)
+ mov $HiddenArg , x19
+
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+
+ EPILOG_BRANCH $__PInvokeStubFuncName
+
+ NESTED_END
+
+ MEND
+
+ TEXTAREA
+
+; ------------------------------------------------------------------
+; VarargPInvokeStub & VarargPInvokeGenILStub
+; There is a separate stub when the method has a hidden return buffer arg.
+;
+; in:
+; x0 = VASigCookie*
+; x12 = MethodDesc *
+;
+ PINVOKE_STUB VarargPInvoke, x0, x12, {false}
+
+
+; ------------------------------------------------------------------
+; GenericPInvokeCalliHelper & GenericPInvokeCalliGenILStub
+; Helper for generic pinvoke calli instruction
+;
+; in:
+; x15 = VASigCookie*
+; x14 = Unmanaged target
+;
+ PINVOKE_STUB GenericPInvokeCalli, x15, x14, {true}
+
+; ------------------------------------------------------------------
+; VarargPInvokeStub_RetBuffArg & VarargPInvokeGenILStub_RetBuffArg
+; Vararg PInvoke Stub when the method has a hidden return buffer arg
+;
+; in:
+; x1 = VASigCookie*
+; x12 = MethodDesc*
+;
+ PINVOKE_STUB VarargPInvoke, x1, x12, {false}
+
+
+; Must be at very end of file
+ END
diff --git a/src/vm/arm64/asmconstants.h b/src/vm/arm64/asmconstants.h
new file mode 100644
index 0000000000..04018bdb9f
--- /dev/null
+++ b/src/vm/arm64/asmconstants.h
@@ -0,0 +1,151 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// asmconstants.h -
+//
+// This header defines field offsets and constants used by assembly code
+// Be sure to rebuild clr/src/vm/ceemain.cpp after changing this file, to
+// ensure that the constants match the expected C/C++ values
+
+// #ifndef _ARM64_
+// #error this file should only be used on an ARM platform
+// #endif // _ARM64_
+
+#include "..\..\inc\switches.h"
+
+//-----------------------------------------------------------------------------
+
+#ifndef ASMCONSTANTS_C_ASSERT
+#define ASMCONSTANTS_C_ASSERT(cond)
+#endif
+
+#ifndef ASMCONSTANTS_RUNTIME_ASSERT
+#define ASMCONSTANTS_RUNTIME_ASSERT(cond)
+#endif
+
+#define Thread__m_fPreemptiveGCDisabled 0x0C
+#define Thread__m_pFrame 0x10
+
+#ifndef CROSSGEN_COMPILE
+ASMCONSTANTS_C_ASSERT(Thread__m_fPreemptiveGCDisabled == offsetof(Thread, m_fPreemptiveGCDisabled));
+ASMCONSTANTS_C_ASSERT(Thread__m_pFrame == offsetof(Thread, m_pFrame));
+#endif // CROSSGEN_COMPILE
+
+#define Thread_m_pFrame Thread__m_pFrame
+#define Thread_m_fPreemptiveGCDisabled Thread__m_fPreemptiveGCDisabled
+
+#ifndef CROSSGEN_COMPILE
+#define Thread__m_pDomain 0x20
+ASMCONSTANTS_C_ASSERT(Thread__m_pDomain == offsetof(Thread, m_pDomain));
+
+#define AppDomain__m_dwId 0x08
+ASMCONSTANTS_C_ASSERT(AppDomain__m_dwId == offsetof(AppDomain, m_dwId));
+#endif
+
+#define METHODDESC_REGISTER x12
+
+#define SIZEOF__ArgumentRegisters 0x40
+ASMCONSTANTS_C_ASSERT(SIZEOF__ArgumentRegisters == sizeof(ArgumentRegisters))
+
+#define SIZEOF__FloatArgumentRegisters 0x40
+ASMCONSTANTS_C_ASSERT(SIZEOF__FloatArgumentRegisters == sizeof(FloatArgumentRegisters))
+
+#define CallDescrData__pSrc 0x00
+#define CallDescrData__numStackSlots 0x08
+#define CallDescrData__pArgumentRegisters 0x10
+#define CallDescrData__pFloatArgumentRegisters 0x18
+#define CallDescrData__fpReturnSize 0x20
+#define CallDescrData__pTarget 0x28
+#define CallDescrData__returnValue 0x30
+
+ASMCONSTANTS_C_ASSERT(CallDescrData__pSrc == offsetof(CallDescrData, pSrc))
+ASMCONSTANTS_C_ASSERT(CallDescrData__numStackSlots == offsetof(CallDescrData, numStackSlots))
+ASMCONSTANTS_C_ASSERT(CallDescrData__pArgumentRegisters == offsetof(CallDescrData, pArgumentRegisters))
+ASMCONSTANTS_C_ASSERT(CallDescrData__pFloatArgumentRegisters == offsetof(CallDescrData, pFloatArgumentRegisters))
+ASMCONSTANTS_C_ASSERT(CallDescrData__fpReturnSize == offsetof(CallDescrData, fpReturnSize))
+ASMCONSTANTS_C_ASSERT(CallDescrData__pTarget == offsetof(CallDescrData, pTarget))
+ASMCONSTANTS_C_ASSERT(CallDescrData__returnValue == offsetof(CallDescrData, returnValue))
+
+#define CORINFO_NullReferenceException_ASM 0
+ASMCONSTANTS_C_ASSERT( CORINFO_NullReferenceException_ASM
+ == CORINFO_NullReferenceException);
+
+
+// Offset of the array containing the address of captured registers in MachState
+#define MachState__captureX19_X28 0x0
+ASMCONSTANTS_C_ASSERT(MachState__captureX19_X28 == offsetof(MachState, captureX19_X28))
+
+// Offset of the array containing the address of preserved registers in MachState
+#define MachState__ptrX19_X28 0x50
+ASMCONSTANTS_C_ASSERT(MachState__ptrX19_X28 == offsetof(MachState, ptrX19_X28))
+
+#define MachState__isValid 0xb8
+ASMCONSTANTS_C_ASSERT(MachState__isValid == offsetof(MachState, _isValid))
+
+#define LazyMachState_captureX19_X28 MachState__captureX19_X28
+ASMCONSTANTS_C_ASSERT(LazyMachState_captureX19_X28 == offsetof(LazyMachState, captureX19_X28))
+
+#define LazyMachState_captureSp (MachState__isValid+8) // padding for alignment
+ASMCONSTANTS_C_ASSERT(LazyMachState_captureSp == offsetof(LazyMachState, captureSp))
+
+#define LazyMachState_captureIp (LazyMachState_captureSp+8)
+ASMCONSTANTS_C_ASSERT(LazyMachState_captureIp == offsetof(LazyMachState, captureIp))
+
+#define LazyMachState_captureFp (LazyMachState_captureSp+16)
+ASMCONSTANTS_C_ASSERT(LazyMachState_captureFp == offsetof(LazyMachState, captureFp))
+
+#define VASigCookie__pNDirectILStub 0x8
+ASMCONSTANTS_C_ASSERT(VASigCookie__pNDirectILStub == offsetof(VASigCookie, pNDirectILStub))
+
+#define DelegateObject___methodPtr 0x18
+ASMCONSTANTS_C_ASSERT(DelegateObject___methodPtr == offsetof(DelegateObject, _methodPtr));
+
+#define DelegateObject___target 0x08
+ASMCONSTANTS_C_ASSERT(DelegateObject___target == offsetof(DelegateObject, _target));
+
+#define SIZEOF__GSCookie 0x8
+ASMCONSTANTS_C_ASSERT(SIZEOF__GSCookie == sizeof(GSCookie));
+
+#define SIZEOF__Frame 0x10
+ASMCONSTANTS_C_ASSERT(SIZEOF__Frame == sizeof(Frame));
+
+#define SIZEOF__CONTEXT 0x390
+ASMCONSTANTS_C_ASSERT(SIZEOF__CONTEXT == sizeof(T_CONTEXT));
+
+
+#ifdef FEATURE_COMINTEROP
+
+#define SIZEOF__ComMethodFrame 0x68
+ASMCONSTANTS_C_ASSERT(SIZEOF__ComMethodFrame == sizeof(ComMethodFrame));
+
+#define UnmanagedToManagedFrame__m_pvDatum 0x10
+ASMCONSTANTS_C_ASSERT(UnmanagedToManagedFrame__m_pvDatum == offsetof(UnmanagedToManagedFrame, m_pvDatum));
+
+#endif // FEATURE_COMINTEROP
+
+
+#define UMEntryThunk__m_pUMThunkMarshInfo 0x18
+ASMCONSTANTS_C_ASSERT(UMEntryThunk__m_pUMThunkMarshInfo == offsetof(UMEntryThunk, m_pUMThunkMarshInfo))
+
+#define UMEntryThunk__m_dwDomainId 0x20
+ASMCONSTANTS_C_ASSERT(UMEntryThunk__m_dwDomainId == offsetof(UMEntryThunk, m_dwDomainId))
+
+#define UMThunkMarshInfo__m_pILStub 0x00
+ASMCONSTANTS_C_ASSERT(UMThunkMarshInfo__m_pILStub == offsetof(UMThunkMarshInfo, m_pILStub))
+
+#define UMThunkMarshInfo__m_cbActualArgSize 0x08
+ASMCONSTANTS_C_ASSERT(UMThunkMarshInfo__m_cbActualArgSize == offsetof(UMThunkMarshInfo, m_cbActualArgSize))
+
+#define REDIRECTSTUB_SP_OFFSET_CONTEXT 0
+
+#define CONTEXT_Pc 0x108
+ASMCONSTANTS_C_ASSERT(CONTEXT_Pc == offsetof(T_CONTEXT,Pc))
+
+#define SIZEOF__FaultingExceptionFrame (SIZEOF__Frame + 0x10 + SIZEOF__CONTEXT)
+#define FaultingExceptionFrame__m_fFilterExecuted SIZEOF__Frame
+ASMCONSTANTS_C_ASSERT(SIZEOF__FaultingExceptionFrame == sizeof(FaultingExceptionFrame));
+ASMCONSTANTS_C_ASSERT(FaultingExceptionFrame__m_fFilterExecuted == offsetof(FaultingExceptionFrame, m_fFilterExecuted));
+
+#undef ASMCONSTANTS_RUNTIME_ASSERT
+#undef ASMCONSTANTS_C_ASSERT
diff --git a/src/vm/arm64/asmhelpers.asm b/src/vm/arm64/asmhelpers.asm
new file mode 100644
index 0000000000..e8bea04ae1
--- /dev/null
+++ b/src/vm/arm64/asmhelpers.asm
@@ -0,0 +1,1024 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+;; ==++==
+;;
+
+;;
+;; ==--==
+#include "ksarm64.h"
+#include "asmconstants.h"
+#include "asmmacros.h"
+
+ IMPORT VirtualMethodFixupWorker
+ IMPORT ExternalMethodFixupWorker
+ IMPORT PreStubWorker
+ IMPORT NDirectImportWorker
+ IMPORT VSD_ResolveWorker
+ IMPORT JIT_InternalThrow
+ IMPORT ComPreStubWorker
+ IMPORT COMToCLRWorker
+ IMPORT CallDescrWorkerUnwindFrameChainHandler
+ IMPORT UMEntryPrestubUnwindFrameChainHandler
+ IMPORT UMThunkStubUnwindFrameChainHandler
+ IMPORT TheUMEntryPrestubWorker
+ IMPORT GetThread
+ IMPORT CreateThreadBlockThrow
+ IMPORT UMThunkStubRareDisableWorker
+ IMPORT UM2MDoADCallBack
+ IMPORT GetCurrentSavedRedirectContext
+ IMPORT LinkFrameAndThrow
+ IMPORT FixContextHandler
+
+ IMPORT g_ephemeral_low
+ IMPORT g_ephemeral_high
+ IMPORT g_lowest_address
+ IMPORT g_highest_address
+ IMPORT g_card_table
+ IMPORT g_TrapReturningThreads
+
+ TEXTAREA
+
+;; LPVOID __stdcall GetCurrentIP(void);
+ LEAF_ENTRY GetCurrentIP
+ mov x0, lr
+ ret lr
+ LEAF_END
+
+;; LPVOID __stdcall GetCurrentSP(void);
+ LEAF_ENTRY GetCurrentSP
+ mov x0, sp
+ ret lr
+ LEAF_END
+
+;;-----------------------------------------------------------------------------
+;; This routine captures the machine state. It is used by helper method frame
+;;-----------------------------------------------------------------------------
+;;void LazyMachStateCaptureState(struct LazyMachState *pState);
+ LEAF_ENTRY LazyMachStateCaptureState
+ ;; marks that this is not yet valid
+ mov w1, #0
+ str w1, [x0, #MachState__isValid]
+
+ str lr, [x0, #LazyMachState_captureIp]
+
+ str fp, [x0, #LazyMachState_captureFp]
+
+ ;; str instruction does not save sp register directly so move to temp register
+ mov x1, sp
+ str x1, [x0, #LazyMachState_captureSp]
+
+ ;; save non-volatile registers that can contain object references
+ add x1, x0, #LazyMachState_captureX19_X28
+ stp x19, x20, [x1, #(16*0)]
+ stp x21, x22, [x1, #(16*1)]
+ stp x23, x24, [x1, #(16*2)]
+ stp x25, x26, [x1, #(16*3)]
+ stp x27, x28, [x1, #(16*4)]
+
+ ret lr
+ LEAF_END
+
+ ;
+ ; If a preserved register were pushed onto the stack between
+ ; the managed caller and the H_M_F, ptrX19_X28 will point to its
+ ; location on the stack and it would have been updated on the
+ ; stack by the GC already and it will be popped back into the
+ ; appropriate register when the appropriate epilog is run.
+ ;
+ ; Otherwise, the register is preserved across all the code
+ ; in this HCALL or FCALL, so we need to update those registers
+ ; here because the GC will have updated our copies in the
+ ; frame.
+ ;
+ ; So, if ptrX19_X28 points into the MachState, we need to update
+ ; the register here. That's what this macro does.
+ ;
+
+ MACRO
+ RestoreRegMS $regIndex, $reg
+
+ ; Incoming:
+ ;
+ ; x0 = address of MachState
+ ;
+ ; $regIndex: Index of the register (x19-x28). For x19, index is 19.
+ ; For x20, index is 20, and so on.
+ ;
+ ; $reg: Register name (e.g. x19, x20, etc)
+ ;
+ ; Get the address of the specified captured register from machine state
+ add x2, x0, #(MachState__captureX19_X28 + (($regIndex-19)*8))
+
+ ; Get the content of specified preserved register pointer from machine state
+ ldr x3, [x0, #(MachState__ptrX19_X28 + (($regIndex-19)*8))]
+
+ cmp x2, x3
+ bne %FT0
+ ldr $reg, [x2]
+0
+
+ MEND
+
+; EXTERN_C int __fastcall HelperMethodFrameRestoreState(
+; INDEBUG_COMMA(HelperMethodFrame *pFrame)
+; MachState *pState
+; )
+ LEAF_ENTRY HelperMethodFrameRestoreState
+
+#ifdef _DEBUG
+ mov x0, x1
+#endif
+
+ ; If machine state is invalid, then simply exit
+ ldr x1, [x0, #MachState__isValid]
+ cmp x1, #0
+ beq Done
+
+ RestoreRegMS 19, X19
+ RestoreRegMS 20, X20
+ RestoreRegMS 21, X21
+ RestoreRegMS 22, X22
+ RestoreRegMS 23, X23
+ RestoreRegMS 24, X24
+ RestoreRegMS 25, X25
+ RestoreRegMS 26, X26
+ RestoreRegMS 27, X27
+ RestoreRegMS 28, X28
+Done
+ ; Its imperative that the return value of HelperMethodFrameRestoreState is zero
+ ; as it is used in the state machine to loop until it becomes zero.
+ ; Refer to HELPER_METHOD_FRAME_END macro for details.
+ mov x0,#0
+ ret lr
+
+ LEAF_END
+
+; ------------------------------------------------------------------
+; The call in ndirect import precode points to this function.
+ NESTED_ENTRY NDirectImportThunk
+
+ PROLOG_SAVE_REG_PAIR fp, lr, #-144!
+ SAVE_ARGUMENT_REGISTERS sp, 16
+ SAVE_FLOAT_ARGUMENT_REGISTERS sp, 80
+
+ mov x0, x12
+ bl NDirectImportWorker
+ mov x12, x0
+
+ ; pop the stack and restore original register state
+ RESTORE_FLOAT_ARGUMENT_REGISTERS sp, 80
+ RESTORE_ARGUMENT_REGISTERS sp, 16
+ EPILOG_RESTORE_REG_PAIR fp, lr, #144!
+
+ ; If we got back from NDirectImportWorker, the MD has been successfully
+ ; linked. Proceed to execute the original DLL call.
+ EPILOG_BRANCH_REG x12
+
+ NESTED_END
+
+; ------------------------------------------------------------------
+; ARM64TODO: Implement PrecodeFixupThunk when PreCode is Enabled
+ NESTED_ENTRY PrecodeFixupThunk
+ brk #0
+ NESTED_END
+; ------------------------------------------------------------------
+
+ NESTED_ENTRY ThePreStub
+
+ PROLOG_WITH_TRANSITION_BLOCK
+
+ add x0, sp, #__PWTB_TransitionBlock ; pTransitionBlock
+ mov x1, METHODDESC_REGISTER ; pMethodDesc
+
+ bl PreStubWorker
+
+ mov x9, x0
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+ EPILOG_BRANCH_REG x9
+
+ NESTED_END
+
+;; ------------------------------------------------------------------
+;; ThePreStubPatch()
+
+ LEAF_ENTRY ThePreStubPatch
+ nop
+ThePreStubPatchLabel
+ EXPORT ThePreStubPatchLabel
+ ret lr
+ LEAF_END
+
+
+;; ------------------------------------------------------------------
+;; void ResolveWorkerAsmStub(args in regs x0-x7 & stack, x11:IndirectionCellAndFlags, x12:DispatchToken)
+;;
+;; The stub dispatch thunk which transfers control to VSD_ResolveWorker.
+ NESTED_ENTRY ResolveWorkerAsmStub
+
+ PROLOG_WITH_TRANSITION_BLOCK
+
+ add x0, sp, #__PWTB_TransitionBlock ; pTransitionBlock
+ and x1, x11, #-4 ; Indirection cell
+ mov x2, x12 ; DispatchToken
+ and x3, x11, #3 ; flag
+ bl VSD_ResolveWorker
+ mov x9, x0
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+
+ EPILOG_BRANCH_REG x9
+
+ NESTED_END
+
+;-----------------------------------------------------------------------------
+; The following Macros help in WRITE_BARRIER Implemetations
+ ; WRITE_BARRIER_ENTRY
+ ;
+ ; Declare the start of a write barrier function. Use similarly to NESTED_ENTRY. This is the only legal way
+ ; to declare a write barrier function.
+ ;
+ MACRO
+ WRITE_BARRIER_ENTRY $name
+
+ LEAF_ENTRY $name
+ MEND
+
+ ; WRITE_BARRIER_END
+ ;
+ ; The partner to WRITE_BARRIER_ENTRY, used like NESTED_END.
+ ;
+ MACRO
+ WRITE_BARRIER_END $__write_barrier_name
+
+ LEAF_END_MARKED $__write_barrier_name
+
+ MEND
+
+; void JIT_ByRefWriteBarrier
+; On entry:
+; x13 : the source address (points to object reference to write)
+; x14 : the destination address (object reference written here)
+;
+; On exit:
+; x12 : trashed
+; x13 : incremented by 8
+; x14 : incremented by 8
+; x15 : trashed
+;
+ WRITE_BARRIER_ENTRY JIT_ByRefWriteBarrier
+
+ ldr x15, [x13], 8
+ b JIT_CheckedWriteBarrier
+
+ WRITE_BARRIER_END JIT_ByRefWriteBarrier
+
+;-----------------------------------------------------------------------------
+; Simple WriteBarriers
+; void JIT_CheckedWriteBarrier(Object** dst, Object* src)
+; On entry:
+; x14 : the destination address (LHS of the assignment)
+; x15 : the object reference (RHS of the assignment)
+;
+; On exit:
+; x12 : trashed
+; x14 : incremented by 8
+; x15 : trashed
+;
+ WRITE_BARRIER_ENTRY JIT_CheckedWriteBarrier
+;; ARM64TODO: Temporary indirect access till support for :lo12:symbol is added
+ ldr x12, =g_lowest_address
+ ldr x12, [x12]
+ cmp x14, x12
+ blt NotInHeap
+
+;; ARM64TODO: Temporary indirect access till support for :lo12:symbol is added
+ ldr x12, =g_highest_address
+ ldr x12, [x12]
+ cmp x14, x12
+ blt JIT_WriteBarrier
+
+NotInHeap
+ str x15, [x14], 8
+ ret lr
+ WRITE_BARRIER_END JIT_CheckedWriteBarrier
+
+; void JIT_WriteBarrier(Object** dst, Object* src)
+; On entry:
+; x14 : the destination address (LHS of the assignment)
+; x15 : the object reference (RHS of the assignment)
+;
+; On exit:
+; x12 : trashed
+; x14 : incremented by 8
+; x15 : trashed
+;
+ WRITE_BARRIER_ENTRY JIT_WriteBarrier
+ dmb ST
+ str x15, [x14], 8
+
+ ; Branch to Exit if the reference is not in the Gen0 heap
+ ;
+;; ARM64TODO: Temporary indirect access till support for :lo12:symbol is added
+ ldr x12, =g_ephemeral_low
+ ldr x12, [x12]
+ cmp x15, x12
+ blt Exit
+
+;; ARM64TODO: Temporary indirect access till support for :lo12:symbol is added
+ ldr x12, =g_ephemeral_high
+ ldr x12, [x12]
+ cmp x15, x12
+ bgt Exit
+
+ ; Check if we need to update the card table
+;; ARM64TODO: Temporary indirect access till support for :lo12:symbol is added
+ ldr x12, =g_card_table
+ ldr x12, [x12]
+ add x15, x12, x14 lsr #11
+ ldrb w12, [x15]
+ cmp x12, 0xFF
+ beq Exit
+
+UpdateCardTable
+ mov x12, 0xFF
+ strb w12, [x15]
+Exit
+ ret lr
+ WRITE_BARRIER_END JIT_WriteBarrier
+
+; ------------------------------------------------------------------
+; Start of the writeable code region
+ LEAF_ENTRY JIT_PatchedCodeStart
+ ret lr
+ LEAF_END
+
+; ------------------------------------------------------------------
+; End of the writeable code region
+ LEAF_ENTRY JIT_PatchedCodeLast
+ ret lr
+ LEAF_END
+
+;------------------------------------------------
+; VirtualMethodFixupStub
+;
+; In NGEN images, virtual slots inherited from cross-module dependencies
+; point to a jump thunk that calls into the following function that will
+; call into a VM helper. The VM helper is responsible for patching up
+; thunk, upon executing the precode, so that all subsequent calls go directly
+; to the actual method body.
+;
+; This is done lazily for performance reasons.
+;
+; On entry:
+;
+; x0 = "this" pointer
+; x12 = Address of thunk
+
+ NESTED_ENTRY VirtualMethodFixupStub
+
+ ; Save arguments and return address
+ PROLOG_SAVE_REG_PAIR fp, lr, #-144!
+ SAVE_ARGUMENT_REGISTERS sp, 16
+ SAVE_FLOAT_ARGUMENT_REGISTERS sp, 80
+
+ ; Refer to ZapImportVirtualThunk::Save
+ ; for details on this.
+ ;
+ ; Move the thunk start address in x1
+ mov x1, x12
+
+ ; Call the helper in the VM to perform the actual fixup
+ ; and tell us where to tail call. x0 already contains
+ ; the this pointer.
+ bl VirtualMethodFixupWorker
+ ; On return, x0 contains the target to tailcall to
+ mov x12, x0
+
+ ; pop the stack and restore original register state
+ RESTORE_ARGUMENT_REGISTERS sp, 16
+ RESTORE_FLOAT_ARGUMENT_REGISTERS sp, 80
+ EPILOG_RESTORE_REG_PAIR fp, lr, #144!
+
+ PATCH_LABEL VirtualMethodFixupPatchLabel
+
+ ; and tailcall to the actual method
+ EPILOG_BRANCH_REG x12
+
+ NESTED_END
+;------------------------------------------------
+; ExternalMethodFixupStub
+;
+; In NGEN images, calls to cross-module external methods initially
+; point to a jump thunk that calls into the following function that will
+; call into a VM helper. The VM helper is responsible for patching up the
+; thunk, upon executing the precode, so that all subsequent calls go directly
+; to the actual method body.
+;
+; This is done lazily for performance reasons.
+;
+; On entry:
+;
+; x12 = Address of thunk
+
+ NESTED_ENTRY ExternalMethodFixupStub
+
+ PROLOG_WITH_TRANSITION_BLOCK
+
+ add x0, sp, #__PWTB_TransitionBlock ; pTransitionBlock
+ mov x1, x12 ; pThunk
+
+ bl ExternalMethodFixupWorker
+
+ ; mov the address we patched to in x12 so that we can tail call to it
+ mov x12, x0
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+ PATCH_LABEL ExternalMethodFixupPatchLabel
+ EPILOG_BRANCH_REG x12
+
+ NESTED_END
+
+; void SinglecastDelegateInvokeStub(Delegate *pThis)
+ LEAF_ENTRY SinglecastDelegateInvokeStub
+ cmp x0, #0
+ beq LNullThis
+
+ ldr x16, [x0, #DelegateObject___methodPtr]
+ ldr x0, [x0, #DelegateObject___target]
+
+ br x16
+
+LNullThis
+ mov x0, #CORINFO_NullReferenceException_ASM
+ b JIT_InternalThrow
+
+ LEAF_END
+
+#ifdef FEATURE_COMINTEROP
+
+; ------------------------------------------------------------------
+; COM to CLR stub called the first time a particular method is invoked.
+;
+; On entry:
+; x12 : ComCallMethodDesc* provided by prepad thunk
+; plus user arguments in registers and on the stack
+;
+; On exit:
+; tail calls to real method
+;
+ NESTED_ENTRY ComCallPreStub
+
+ GBLA ComCallPreStub_FrameSize
+ GBLA ComCallPreStub_StackAlloc
+ GBLA ComCallPreStub_FrameOffset
+ GBLA ComCallPreStub_ErrorReturnOffset
+
+ComCallPreStub_FrameSize SETA (SIZEOF__GSCookie + SIZEOF__ComMethodFrame)
+ComCallPreStub_StackAlloc SETA ComCallPreStub_FrameSize - SIZEOF__ArgumentRegisters - 2 * 8 ; reg args , fp & lr already pushed
+ComCallPreStub_StackAlloc SETA ComCallPreStub_StackAlloc + SIZEOF__FloatArgumentRegisters + 8; 8 for ErrorReturn
+
+ IF ComCallPreStub_StackAlloc:MOD:16 != 0
+ComCallPreStub_StackAlloc SETA ComCallPreStub_StackAlloc + 8
+ ENDIF
+
+ComCallPreStub_FrameOffset SETA (ComCallPreStub_StackAlloc - (SIZEOF__ComMethodFrame - SIZEOF__ArgumentRegisters - 2 * 8))
+ComCallPreStub_ErrorReturnOffset SETA SIZEOF__FloatArgumentRegisters
+
+ ; Save arguments and return address
+ PROLOG_SAVE_REG_PAIR fp, lr, #-80!
+ PROLOG_STACK_ALLOC ComCallPreStub_StackAlloc
+
+ SAVE_ARGUMENT_REGISTERS sp, (16+ComCallPreStub_StackAlloc)
+
+ SAVE_FLOAT_ARGUMENT_REGISTERS sp, 0
+
+ str x12, [sp, #(ComCallPreStub_FrameOffset + UnmanagedToManagedFrame__m_pvDatum)]
+ add x0, sp, #(ComCallPreStub_FrameOffset)
+ add x1, sp, #(ComCallPreStub_ErrorReturnOffset)
+ bl ComPreStubWorker
+
+ cbz x0, ComCallPreStub_ErrorExit
+
+ mov x12, x0
+
+ ; pop the stack and restore original register state
+ RESTORE_FLOAT_ARGUMENT_REGISTERS sp, 0
+ RESTORE_ARGUMENT_REGISTERS sp, (16+ComCallPreStub_StackAlloc)
+
+ EPILOG_STACK_FREE ComCallPreStub_StackAlloc
+ EPILOG_RESTORE_REG_PAIR fp, lr, #80!
+
+ ; and tailcall to the actual method
+ EPILOG_BRANCH_REG x12
+
+ComCallPreStub_ErrorExit
+ ldr x0, [sp, #(ComCallPreStub_ErrorReturnOffset)] ; ErrorReturn
+
+ ; pop the stack
+ EPILOG_STACK_FREE ComCallPreStub_StackAlloc
+ EPILOG_RESTORE_REG_PAIR fp, lr, #80!
+
+ EPILOG_RETURN
+
+ NESTED_END
+
+; ------------------------------------------------------------------
+; COM to CLR stub which sets up a ComMethodFrame and calls COMToCLRWorker.
+;
+; On entry:
+; x12 : ComCallMethodDesc* provided by prepad thunk
+; plus user arguments in registers and on the stack
+;
+; On exit:
+; Result in x0/d0 as per the real method being called
+;
+ NESTED_ENTRY GenericComCallStub
+
+ GBLA GenericComCallStub_FrameSize
+ GBLA GenericComCallStub_StackAlloc
+ GBLA GenericComCallStub_FrameOffset
+
+GenericComCallStub_FrameSize SETA (SIZEOF__GSCookie + SIZEOF__ComMethodFrame)
+GenericComCallStub_StackAlloc SETA GenericComCallStub_FrameSize - SIZEOF__ArgumentRegisters - 2 * 8
+GenericComCallStub_StackAlloc SETA GenericComCallStub_StackAlloc + SIZEOF__FloatArgumentRegisters
+
+ IF GenericComCallStub_StackAlloc:MOD:16 != 0
+GenericComCallStub_StackAlloc SETA GenericComCallStub_StackAlloc + 8
+ ENDIF
+
+GenericComCallStub_FrameOffset SETA (GenericComCallStub_StackAlloc - (SIZEOF__ComMethodFrame - SIZEOF__ArgumentRegisters - 2 * 8))
+
+ ; Save arguments and return address
+ PROLOG_SAVE_REG_PAIR fp, lr, #-80!
+ PROLOG_STACK_ALLOC GenericComCallStub_StackAlloc
+
+ SAVE_ARGUMENT_REGISTERS sp, (16+GenericComCallStub_StackAlloc)
+ SAVE_FLOAT_ARGUMENT_REGISTERS sp, 0
+
+ str x12, [sp, #(GenericComCallStub_FrameOffset + UnmanagedToManagedFrame__m_pvDatum)]
+ add x1, sp, #GenericComCallStub_FrameOffset
+ bl COMToCLRWorker
+
+ ; pop the stack
+ EPILOG_STACK_FREE GenericComCallStub_StackAlloc
+ EPILOG_RESTORE_REG_PAIR fp, lr, #80!
+
+ EPILOG_RETURN
+
+ NESTED_END
+
+; ------------------------------------------------------------------
+; COM to CLR stub called from COMToCLRWorker that actually dispatches to the real managed method.
+;
+; On entry:
+; x0 : dwStackSlots, count of argument stack slots to copy
+; x1 : pFrame, ComMethodFrame pushed by GenericComCallStub above
+; x2 : pTarget, address of code to call
+; x3 : pSecretArg, hidden argument passed to target above in x12
+; x4 : pDangerousThis, managed 'this' reference
+;
+; On exit:
+; Result in x0/d0 as per the real method being called
+;
+ NESTED_ENTRY COMToCLRDispatchHelper,,CallDescrWorkerUnwindFrameChainHandler
+
+ PROLOG_SAVE_REG_PAIR fp, lr, #-16!
+
+ cbz x0, COMToCLRDispatchHelper_RegSetup
+
+ add x9, x1, #SIZEOF__ComMethodFrame
+ add x9, x9, x0, LSL #3
+COMToCLRDispatchHelper_StackLoop
+ ldr x8, [x9, #-8]!
+ str x8, [sp, #-8]!
+ sub x0, x0, #1
+ cbnz x0, COMToCLRDispatchHelper_StackLoop
+
+COMToCLRDispatchHelper_RegSetup
+
+ RESTORE_FLOAT_ARGUMENT_REGISTERS x1, -1 * GenericComCallStub_FrameOffset
+
+ mov lr, x2
+ mov x12, x3
+
+ mov x0, x4
+
+ ldp x2, x3, [x1, #(SIZEOF__ComMethodFrame - SIZEOF__ArgumentRegisters + 16)]
+ ldp x4, x5, [x1, #(SIZEOF__ComMethodFrame - SIZEOF__ArgumentRegisters + 32)]
+ ldp x6, x7, [x1, #(SIZEOF__ComMethodFrame - SIZEOF__ArgumentRegisters + 48)]
+
+ ldr x1, [x1, #(SIZEOF__ComMethodFrame - SIZEOF__ArgumentRegisters + 8)]
+
+ blr lr
+
+ EPILOG_STACK_RESTORE
+ EPILOG_RESTORE_REG_PAIR fp, lr, #16!
+ EPILOG_RETURN
+
+ NESTED_END
+
+#endif ; FEATURE_COMINTEROP
+
+;
+; x12 = UMEntryThunk*
+;
+ NESTED_ENTRY TheUMEntryPrestub,,UMEntryPrestubUnwindFrameChainHandler
+
+ ; Save arguments and return address
+ PROLOG_SAVE_REG_PAIR fp, lr, #-144!
+ SAVE_ARGUMENT_REGISTERS sp, 16
+ SAVE_FLOAT_ARGUMENT_REGISTERS sp, 80
+
+ mov x0, x12
+ bl TheUMEntryPrestubWorker
+
+ ; save real target address in x12.
+ mov x12, x0
+
+ ; pop the stack and restore original register state
+ RESTORE_ARGUMENT_REGISTERS sp, 16
+ RESTORE_FLOAT_ARGUMENT_REGISTERS sp, 80
+ EPILOG_RESTORE_REG_PAIR fp, lr, #144!
+
+ ; and tailcall to the actual method
+ EPILOG_BRANCH_REG x12
+
+ NESTED_END
+
+;
+; x12 = UMEntryThunk*
+;
+ NESTED_ENTRY UMThunkStub,,UMThunkStubUnwindFrameChainHandler
+
+ ; Save arguments and return address
+ PROLOG_SAVE_REG_PAIR fp, lr, #-96! ; 64 for regArgs, 8 for x19 & 8 for x12
+ ; save callee saved reg x19. x19 is used in the method to store thread*
+ PROLOG_SAVE_REG x19, #88
+
+ SAVE_ARGUMENT_REGISTERS sp, 16
+
+ GBLA UMThunkStub_HiddenArg ; offset of saved UMEntryThunk *
+ GBLA UMThunkStub_StackArgs ; offset of original stack args (total size of UMThunkStub frame)
+UMThunkStub_HiddenArg SETA 80
+UMThunkStub_StackArgs SETA 96
+
+ ; save UMEntryThunk*
+ str x12, [sp, #UMThunkStub_HiddenArg]
+
+ ; assuming GetThread does not clobber FP Args
+ bl GetThread
+ cbz x0, UMThunkStub_DoThreadSetup
+
+UMThunkStub_HaveThread
+ mov x19, x0 ; x19 = Thread *
+
+ mov x9, 1
+ ; m_fPreemptiveGCDisabled is 4 byte field so using 32-bit variant
+ str w9, [x19, #Thread__m_fPreemptiveGCDisabled]
+
+ ldr x2, =g_TrapReturningThreads
+ ldr x3, [x2]
+ ; assuming x0 contains Thread* before jumping to UMThunkStub_DoTrapReturningThreads
+ cbnz x3, UMThunkStub_DoTrapReturningThreads
+
+UMThunkStub_InCooperativeMode
+ ldr x12, [fp, #UMThunkStub_HiddenArg] ; x12 = UMEntryThunk*
+
+ ldr x0, [x19, #Thread__m_pDomain]
+
+ ; m_dwDomainId is 4 bytes so using 32-bit variant
+ ldr w1, [x12, #UMEntryThunk__m_dwDomainId]
+ ldr w0, [x0, #AppDomain__m_dwId]
+ cmp w0, w1
+ bne UMThunkStub_WrongAppDomain
+
+ ldr x3, [x12, #UMEntryThunk__m_pUMThunkMarshInfo] ; x3 = m_pUMThunkMarshInfo
+
+ ; m_cbActualArgSize is UINT32 and hence occupies 4 bytes
+ ldr w2, [x3, #UMThunkMarshInfo__m_cbActualArgSize] ; w2 = Stack arg bytes
+ cbz w2, UMThunkStub_RegArgumentsSetup
+
+ ; extend to 64-bits
+ uxtw x2, w2
+
+ ; Source pointer
+ add x0, fp, #UMThunkStub_StackArgs
+
+ ; move source pointer to end of Stack Args
+ add x0, x0, x2
+
+ ; Count of stack slot pairs to copy (divide by 16)
+ lsr x1, x2, #4
+
+ ; Is there an extra stack slot (can happen when stack arg bytes not multiple of 16)
+ and x2, x2, #8
+
+ ; If yes then start source pointer from 16 byte aligned stack slot
+ add x0, x0, x2
+
+ ; increment stack slot pair count by 1 if x2 is not zero
+ add x1, x1, x2, LSR #3
+
+UMThunkStub_StackLoop
+ ldp x4, x5, [x0, #-16]! ; pre-Index
+ stp x4, x5, [sp, #-16]! ; pre-Index
+ subs x1, x1, #1
+ bne UMThunkStub_StackLoop
+
+UMThunkStub_RegArgumentsSetup
+ ldr x16, [x3, #UMThunkMarshInfo__m_pILStub]
+
+ RESTORE_ARGUMENT_REGISTERS fp, 16
+
+ blr x16
+
+UMThunkStub_PostCall
+ mov x4, 0
+ ; m_fPreemptiveGCDisabled is 4 byte field so using 32-bit variant
+ str w4, [x19, #Thread__m_fPreemptiveGCDisabled]
+
+ EPILOG_STACK_RESTORE
+ EPILOG_RESTORE_REG x19, #88
+ EPILOG_RESTORE_REG_PAIR fp, lr, #96!
+
+ EPILOG_RETURN
+
+UMThunkStub_DoThreadSetup
+ sub sp, sp, #SIZEOF__FloatArgumentRegisters
+ SAVE_FLOAT_ARGUMENT_REGISTERS sp, 0
+ bl CreateThreadBlockThrow
+ RESTORE_FLOAT_ARGUMENT_REGISTERS sp, 0
+ add sp, sp, #SIZEOF__FloatArgumentRegisters
+ b UMThunkStub_HaveThread
+
+UMThunkStub_DoTrapReturningThreads
+ sub sp, sp, #SIZEOF__FloatArgumentRegisters
+ SAVE_FLOAT_ARGUMENT_REGISTERS sp, 0
+ ; x0 already contains Thread* pThread
+ ; UMEntryThunk* pUMEntry
+ ldr x1, [fp, #UMThunkStub_HiddenArg]
+ bl UMThunkStubRareDisableWorker
+ RESTORE_FLOAT_ARGUMENT_REGISTERS sp, 0
+ add sp, sp, #SIZEOF__FloatArgumentRegisters
+ b UMThunkStub_InCooperativeMode
+
+UMThunkStub_WrongAppDomain
+ ; Saving FP Args as this is read by UM2MThunk_WrapperHelper
+ sub sp, sp, #SIZEOF__FloatArgumentRegisters
+ SAVE_FLOAT_ARGUMENT_REGISTERS sp, 0
+
+ ; UMEntryThunk* pUMEntry
+ ldr x0, [fp, #UMThunkStub_HiddenArg]
+
+ ; void * pArgs
+ add x2, fp, #16
+
+ ; remaining arguments are unused
+ bl UM2MDoADCallBack
+
+ ; restore integral return value
+ ldr x0, [fp, #16]
+
+ ; restore FP or HFA return value
+ RESTORE_FLOAT_ARGUMENT_REGISTERS sp, 0
+
+ b UMThunkStub_PostCall
+
+ NESTED_END
+
+
+; UM2MThunk_WrapperHelper(void *pThunkArgs, // x0
+; int cbStackArgs, // x1 (unused)
+; void *pAddr, // x2 (unused)
+; UMEntryThunk *pEntryThunk, // x3
+; Thread *pThread) // x4
+
+; pThunkArgs points to the argument registers pushed on the stack by UMThunkStub
+
+ NESTED_ENTRY UM2MThunk_WrapperHelper
+
+ PROLOG_SAVE_REG_PAIR fp, lr, #-32!
+ PROLOG_SAVE_REG x19, #16
+
+
+ ; save pThunkArgs in non-volatile reg. It is required after return from call to ILStub
+ mov x19, x0
+
+ ; ARM64TODO - Is this required by ILStub
+ mov x12, x3 ; // x12 = UMEntryThunk *
+
+ ;
+ ; Note that layout of the arguments is given by UMThunkStub frame
+ ;
+ ldr x3, [x3, #UMEntryThunk__m_pUMThunkMarshInfo]
+
+ ; m_cbActualArgSize is 4-byte field
+ ldr w2, [x3, #UMThunkMarshInfo__m_cbActualArgSize]
+ cbz w2, UM2MThunk_WrapperHelper_RegArgumentsSetup
+
+ ; extend to 64- bits
+ uxtw x2, w2
+
+ ; Source pointer. Subtracting 16 bytes due to fp & lr
+ add x6, x0, #(UMThunkStub_StackArgs-16)
+
+ ; move source ptr to end of Stack Args
+ add x6, x6, x2
+
+ ; Count of stack slot pairs to copy (divide by 16)
+ lsr x1, x2, #4
+
+ ; Is there an extra stack slot? (can happen when stack arg bytes not multiple of 16)
+ and x2, x2, #8
+
+ ; If yes then start source pointer from 16 byte aligned stack slot
+ add x6, x6, x2
+
+ ; increment stack slot pair count by 1 if x2 is not zero
+ add x1, x1, x2, LSR #3
+
+UM2MThunk_WrapperHelper_StackLoop
+ ldp x4, x5, [x6, #-16]!
+ stp x4, x5, [sp, #-16]!
+ subs x1, x1, #1
+ bne UM2MThunk_WrapperHelper_StackLoop
+
+UM2MThunk_WrapperHelper_RegArgumentsSetup
+ ldr x16, [x3, #(UMThunkMarshInfo__m_pILStub)]
+
+ ; reload floating point registers
+ RESTORE_FLOAT_ARGUMENT_REGISTERS x0, -1 * (SIZEOF__FloatArgumentRegisters + 16)
+
+ ; reload argument registers
+ RESTORE_ARGUMENT_REGISTERS x0, 0
+
+ blr x16
+
+ ; save integral return value
+ str x0, [x19]
+ ; save FP/HFA return values
+ SAVE_FLOAT_ARGUMENT_REGISTERS x19, -1 * (SIZEOF__FloatArgumentRegisters + 16)
+
+ EPILOG_STACK_RESTORE
+ EPILOG_RESTORE_REG x19, #16
+ EPILOG_RESTORE_REG_PAIR fp, lr, #32!
+ EPILOG_RETURN
+
+ NESTED_END
+
+
+
+;; ------------------------------------------------------------------
+;; Redirection Stub for GC in fully interruptible method
+; GenerateRedirectedHandledJITCaseStub GCThreadControl
+;; ------------------------------------------------------------------
+; GenerateRedirectedHandledJITCaseStub DbgThreadControl
+;; ------------------------------------------------------------------
+; GenerateRedirectedHandledJITCaseStub UserSuspend
+;; ------------------------------------------------------------------
+; GenerateRedirectedHandledJITCaseStub YieldTask
+
+#ifdef _DEBUG
+; ------------------------------------------------------------------
+; Redirection Stub for GC Stress
+ GenerateRedirectedHandledJITCaseStub GCStress
+#endif
+
+
+; ------------------------------------------------------------------
+
+ ; This helper enables us to call into a funclet after restoring Fp register
+ NESTED_ENTRY CallEHFunclet
+
+ ; Using below prolog instead of PROLOG_SAVE_REG_PAIR fp,lr, #-16!
+ ; is intentional. Above statement would also emit instruction to save
+ ; sp in fp. If sp is saved in fp in prolog then it is not expected that fp can change in the body
+ ; of method. However, this method needs to be able to change fp before calling funclet.
+ ; This is required to access locals in funclet.
+ PROLOG_SAVE_REG_PAIR x19,x20, #-16!
+ PROLOG_SAVE_REG fp, #0
+ PROLOG_SAVE_REG lr, #8
+
+ ; On entry:
+ ;
+ ; X0 = throwable
+ ; X1 = PC to invoke
+ ; X2 = address of X19 register in CONTEXT record; used to restore the non-volatile registers of CrawlFrame
+ ; X3 = address of the location where the SP of funclet's caller (i.e. this helper) should be saved.
+ ;
+ ; Save the SP of this function
+ str fp, [x3]
+
+ ldr fp, [x2, #80] ; offset of fp in CONTEXT relative to X19
+
+ ; Invoke the funclet
+ blr x1
+ nop
+
+ EPILOG_RESTORE_REG_PAIR fp, lr, #16!
+ EPILOG_RETURN
+
+ NESTED_END CallEHFunclet
+
+ ; This helper enables us to call into a filter funclet by passing it the CallerSP to lookup the
+ ; frame pointer for accessing the locals in the parent method.
+ NESTED_ENTRY CallEHFilterFunclet
+
+ PROLOG_SAVE_REG_PAIR fp, lr, #-16!
+
+ ; On entry:
+ ;
+ ; X0 = throwable
+ ; X1 = SP of the caller of the method/funclet containing the filter
+ ; X2 = PC to invoke
+ ; X3 = address of the location where the SP of funclet's caller (i.e. this helper) should be saved.
+ ;
+ ; Save the SP of this function
+ str fp, [x3]
+ ; Invoke the filter funclet
+ blr x2
+
+ EPILOG_RESTORE_REG_PAIR fp, lr, #16!
+ EPILOG_RETURN
+
+ NESTED_END CallEHFilterFunclet
+
+
+ GBLA FaultingExceptionFrame_StackAlloc
+ GBLA FaultingExceptionFrame_FrameOffset
+
+FaultingExceptionFrame_StackAlloc SETA (SIZEOF__GSCookie + SIZEOF__FaultingExceptionFrame)
+FaultingExceptionFrame_FrameOffset SETA SIZEOF__GSCookie
+
+ MACRO
+ GenerateRedirectedStubWithFrame $STUB, $TARGET
+
+ ;
+ ; This is the primary function to which execution will be redirected to.
+ ;
+ NESTED_ENTRY $STUB
+
+ ;
+ ; IN: lr: original IP before redirect
+ ;
+
+ PROLOG_SAVE_REG_PAIR fp, lr, #-16!
+ PROLOG_STACK_ALLOC FaultingExceptionFrame_StackAlloc
+
+ ; At this point, the stack maybe misaligned if the thread abort was asynchronously
+ ; triggered in the prolog or epilog of the managed method. For such a case, we must
+ ; align the stack before calling into the VM.
+ ;
+ ; Runtime check for 16-byte alignment.
+ mov x0, sp
+ and x0, x0, #15
+ sub sp, sp, x0
+
+ ; Save pointer to FEF for GetFrameFromRedirectedStubStackFrame
+ add x19, sp, #FaultingExceptionFrame_FrameOffset
+
+ ; Prepare to initialize to NULL
+ mov x1,#0
+ str x1, [x19] ; Initialize vtbl (it is not strictly necessary)
+ str x1, [x19, #FaultingExceptionFrame__m_fFilterExecuted] ; Initialize BOOL for personality routine
+
+ mov x0, x19 ; move the ptr to FEF in X0
+
+ bl $TARGET
+
+ ; Target should not return.
+ EMIT_BREAKPOINT
+
+ NESTED_END $STUB
+
+ MEND
+
+
+; ------------------------------------------------------------------
+;
+; Helpers for async (NullRef, AccessViolation) exceptions
+;
+
+ NESTED_ENTRY NakedThrowHelper2,,FixContextHandler
+ PROLOG_SAVE_REG_PAIR fp,lr, #-16!
+
+ ; On entry:
+ ;
+ ; X0 = Address of FaultingExceptionFrame
+ bl LinkFrameAndThrow
+
+ ; Target should not return.
+ EMIT_BREAKPOINT
+
+ NESTED_END NakedThrowHelper2
+
+
+ GenerateRedirectedStubWithFrame NakedThrowHelper, NakedThrowHelper2
+
+
+; Must be at very end of file
+ END
diff --git a/src/vm/arm64/asmmacros.h b/src/vm/arm64/asmmacros.h
new file mode 100644
index 0000000000..19c2f118cd
--- /dev/null
+++ b/src/vm/arm64/asmmacros.h
@@ -0,0 +1,269 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+;; ==++==
+;;
+
+;;
+;; ==--==
+
+;-----------------------------------------------------------------------------
+; Basic extension of Assembler Macros- For Consistency
+
+ MACRO
+ EPILOG_BRANCH_REG $reg
+
+ EPILOG_NOP br $reg
+ MEND
+;-----------------------------------------------------------------------------
+
+ MACRO
+ EPILOG_BRANCH $_target
+
+ EPILOG_NOP b $_target
+ MEND
+;-----------------------------------------------------------------------------
+; The following group of macros assist in implementing prologs and epilogs for methods that set up some
+; subclass of TransitionFrame. They ensure that the SP is 16-byte aligned at the conclusion of the prolog
+
+;-----------------------------------------------------------------------------
+; Define the prolog for a TransitionFrame-based method. This macro should be called first in the method and
+; comprises the entire prolog (i.e. don't modify SP after calling this).The locals must be 8 byte aligned
+;
+ MACRO
+ PROLOG_WITH_TRANSITION_BLOCK $extraLocals, $SaveFPArgs
+
+ GBLA __PWTB_FloatArgumentRegisters
+ GBLA __PWTB_ArgumentRegisters
+ GBLA __PWTB_StackAlloc
+ GBLA __PWTB_TransitionBlock
+ GBLL __PWTB_SaveFPArgs
+
+ IF "$SaveFPArgs" != ""
+__PWTB_SaveFPArgs SETL $SaveFPArgs
+ ELSE
+__PWTB_SaveFPArgs SETL {true}
+ ENDIF
+
+ IF "$extraLocals" != ""
+__PWTB_FloatArgumentRegisters SETA $extraLocals
+ ELSE
+__PWTB_FloatArgumentRegisters SETA 0
+ ENDIF
+
+ IF __PWTB_FloatArgumentRegisters:MOD:16 != 0
+__PWTB_FloatArgumentRegisters SETA __PWTB_FloatArgumentRegisters + 8
+ ENDIF
+
+ IF __PWTB_SaveFPArgs
+__PWTB_TransitionBlock SETA __PWTB_FloatArgumentRegisters + SIZEOF__FloatArgumentRegisters
+ ELSE
+__PWTB_TransitionBlock SETA __PWTB_FloatArgumentRegisters
+ ENDIF
+
+__PWTB_StackAlloc SETA __PWTB_TransitionBlock
+__PWTB_ArgumentRegisters SETA __PWTB_StackAlloc + 96
+
+ PROLOG_SAVE_REG_PAIR fp, lr, #-160!
+ ; Spill callee saved registers
+ PROLOG_SAVE_REG_PAIR x19, x20, #16
+ PROLOG_SAVE_REG_PAIR x21, x22, #32
+ PROLOG_SAVE_REG_PAIR x23, x24, #48
+ PROLOG_SAVE_REG_PAIR x25, x26, #64
+ PROLOG_SAVE_REG_PAIR x27, x28, #80
+
+ ; Allocate space for the rest of the frame
+ PROLOG_STACK_ALLOC __PWTB_StackAlloc
+
+ ; Spill argument registers.
+ SAVE_ARGUMENT_REGISTERS sp, __PWTB_ArgumentRegisters
+
+ IF __PWTB_SaveFPArgs
+ SAVE_FLOAT_ARGUMENT_REGISTERS sp, __PWTB_FloatArgumentRegisters
+ ENDIF
+
+ MEND
+
+;-----------------------------------------------------------------------------
+; Provides a matching epilog to PROLOG_WITH_TRANSITION_BLOCK and ends by preparing for tail-calling.
+; Since this is a tail call argument registers are restored.
+;
+ MACRO
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+
+ IF __PWTB_SaveFPArgs
+ RESTORE_FLOAT_ARGUMENT_REGISTERS sp, __PWTB_FloatArgumentRegisters
+ ENDIF
+
+ RESTORE_ARGUMENT_REGISTERS sp, __PWTB_ArgumentRegisters
+ EPILOG_STACK_FREE __PWTB_StackAlloc
+
+ EPILOG_RESTORE_REG_PAIR x19, x20, #16
+ EPILOG_RESTORE_REG_PAIR x21, x22, #32
+ EPILOG_RESTORE_REG_PAIR x23, x24, #48
+ EPILOG_RESTORE_REG_PAIR x25, x26, #64
+ EPILOG_RESTORE_REG_PAIR x27, x28, #80
+ EPILOG_RESTORE_REG_PAIR fp, lr, #160!
+ MEND
+
+;-----------------------------------------------------------------------------
+; Macro used to end a function with explicit _End label
+ MACRO
+ LEAF_END_MARKED $FuncName
+
+ LCLS __EndLabelName
+__EndLabelName SETS "$FuncName":CC:"_End"
+ EXPORT $__EndLabelName
+$__EndLabelName
+
+ LEAF_END $FuncName
+
+ MEND
+;-----------------------------------------------------------------------------
+; Macro use for enabling C++ to know where to patch code at runtime.
+ MACRO
+ PATCH_LABEL $FuncName
+$FuncName
+ EXPORT $FuncName
+
+ MEND
+
+;-----------------------------------------------------------------------------
+; The Following sets of SAVE_*_REGISTERS expect the memory to be reserved and
+; base address to be passed in $reg
+;
+
+; Reserve 64 bytes of memory before calling SAVE_ARGUMENT_REGISTERS
+ MACRO
+ SAVE_ARGUMENT_REGISTERS $reg, $offset
+
+ GBLA __PWTB_SAVE_ARGUMENT_REGISTERS_OFFSET
+
+ IF "$offset" != ""
+__PWTB_SAVE_ARGUMENT_REGISTERS_OFFSET SETA $offset
+ ELSE
+__PWTB_SAVE_ARGUMENT_REGISTERS_OFFSET SETA 0
+ ENDIF
+
+ stp x0, x1, [$reg, #(__PWTB_SAVE_ARGUMENT_REGISTERS_OFFSET)]
+ stp x2, x3, [$reg, #(__PWTB_SAVE_ARGUMENT_REGISTERS_OFFSET + 16)]
+ stp x4, x5, [$reg, #(__PWTB_SAVE_ARGUMENT_REGISTERS_OFFSET + 32)]
+ stp x6, x7, [$reg, #(__PWTB_SAVE_ARGUMENT_REGISTERS_OFFSET + 48)]
+ MEND
+
+; Reserve 64 bytes of memory before calling SAVE_FLOAT_ARGUMENT_REGISTERS
+ MACRO
+ SAVE_FLOAT_ARGUMENT_REGISTERS $reg, $offset
+
+ GBLA __PWTB_SAVE_FLOAT_ARGUMENT_REGISTERS_OFFSET
+
+ IF "$offset" != ""
+__PWTB_SAVE_FLOAT_ARGUMENT_REGISTERS_OFFSET SETA $offset
+ ELSE
+__PWTB_SAVE_FLOAT_ARGUMENT_REGISTERS_OFFSET SETA 0
+ ENDIF
+
+ stp d0, d1, [$reg, #(__PWTB_SAVE_FLOAT_ARGUMENT_REGISTERS_OFFSET)]
+ stp d2, d3, [$reg, #(__PWTB_SAVE_FLOAT_ARGUMENT_REGISTERS_OFFSET + 16)]
+ stp d4, d5, [$reg, #(__PWTB_SAVE_FLOAT_ARGUMENT_REGISTERS_OFFSET + 32)]
+ stp d6, d7, [$reg, #(__PWTB_SAVE_FLOAT_ARGUMENT_REGISTERS_OFFSET + 48)]
+ MEND
+
+ MACRO
+ RESTORE_ARGUMENT_REGISTERS $reg, $offset
+
+ GBLA __PWTB_RESTORE_ARGUMENT_REGISTERS_OFFSET
+
+ IF "$offset" != ""
+__PWTB_RESTORE_ARGUMENT_REGISTERS_OFFSET SETA $offset
+ ELSE
+__PWTB_RESTORE_ARGUMENT_REGISTERS_OFFSET SETA 0
+ ENDIF
+
+ ldp x0, x1, [$reg, #(__PWTB_RESTORE_ARGUMENT_REGISTERS_OFFSET)]
+ ldp x2, x3, [$reg, #(__PWTB_RESTORE_ARGUMENT_REGISTERS_OFFSET + 16)]
+ ldp x4, x5, [$reg, #(__PWTB_RESTORE_ARGUMENT_REGISTERS_OFFSET + 32)]
+ ldp x6, x7, [$reg, #(__PWTB_RESTORE_ARGUMENT_REGISTERS_OFFSET + 48)]
+ MEND
+
+ MACRO
+ RESTORE_FLOAT_ARGUMENT_REGISTERS $reg, $offset
+
+ GBLA __PWTB_RESTORE_FLOAT_ARGUMENT_REGISTERS_OFFSET
+
+ IF "$offset" != ""
+__PWTB_RESTORE_FLOAT_ARGUMENT_REGISTERS_OFFSET SETA $offset
+ ELSE
+__PWTB_RESTORE_FLOAT_ARGUMENT_REGISTERS_OFFSET SETA 0
+ ENDIF
+
+ ldp d0, d1, [$reg, #(__PWTB_RESTORE_FLOAT_ARGUMENT_REGISTERS_OFFSET)]
+ ldp d2, d3, [$reg, #(__PWTB_RESTORE_FLOAT_ARGUMENT_REGISTERS_OFFSET + 16)]
+ ldp d4, d5, [$reg, #(__PWTB_RESTORE_FLOAT_ARGUMENT_REGISTERS_OFFSET + 32)]
+ ldp d6, d7, [$reg, #(__PWTB_RESTORE_FLOAT_ARGUMENT_REGISTERS_OFFSET + 48)]
+ MEND
+
+; ------------------------------------------------------------------
+; Macro to generate Redirection Stubs
+;
+; $reason : reason for redirection
+; Eg. GCThreadControl
+; NOTE: If you edit this macro, make sure you update GetCONTEXTFromRedirectedStubStackFrame.
+; This function is used by both the personality routine and the debugger to retrieve the original CONTEXT.
+ MACRO
+ GenerateRedirectedHandledJITCaseStub $reason
+
+ GBLS __RedirectionStubFuncName
+ GBLS __RedirectionStubEndFuncName
+ GBLS __RedirectionFuncName
+__RedirectionStubFuncName SETS "RedirectedHandledJITCaseFor":CC:"$reason":CC:"_Stub"
+__RedirectionStubEndFuncName SETS "RedirectedHandledJITCaseFor":CC:"$reason":CC:"_StubEnd"
+__RedirectionFuncName SETS "|?RedirectedHandledJITCaseFor":CC:"$reason":CC:"@Thread@@CAXXZ|"
+
+ IMPORT $__RedirectionFuncName
+
+ NESTED_ENTRY $__RedirectionStubFuncName
+ PROLOG_SAVE_REG_PAIR fp, lr, #-16!
+ sub sp, sp, #16 ; stack slot for CONTEXT * and padding
+
+ ;REDIRECTSTUB_SP_OFFSET_CONTEXT is defined in asmconstants.h and is used in GetCONTEXTFromRedirectedStubStackFrame
+ ;If CONTEXT is not saved at 0 offset from SP it must be changed as well.
+ ASSERT REDIRECTSTUB_SP_OFFSET_CONTEXT == 0
+
+ ; Stack alignment. This check is necessary as this function can be
+ ; entered before complete execution of the prolog of another function.
+ and x8, fp, #15
+ sub sp, sp, x8
+
+
+ ;
+ ; Save a copy of the redirect CONTEXT*.
+ ; This is needed for the debugger to unwind the stack.
+ ;
+ bl GetCurrentSavedRedirectContext
+ str x0, [sp]
+
+ ;
+ ; Fetch the interrupted pc and save it as our return address.
+ ;
+ ldr x1, [x0, #CONTEXT_Pc]
+ str x1, [fp, #8]
+
+ ;
+ ; Call target, which will do whatever we needed to do in the context
+ ; of the target thread, and will RtlRestoreContext when it is done.
+ ;
+ bl $__RedirectionFuncName
+
+ EMIT_BREAKPOINT ; Unreachable
+
+; Put a label here to tell the debugger where the end of this function is.
+$__RedirectionStubEndFuncName
+ EXPORT $__RedirectionStubEndFuncName
+
+ NESTED_END
+
+ MEND
+
diff --git a/src/vm/arm64/cgenarm64.cpp b/src/vm/arm64/cgenarm64.cpp
new file mode 100644
index 0000000000..f371e966a8
--- /dev/null
+++ b/src/vm/arm64/cgenarm64.cpp
@@ -0,0 +1,39 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// Various helper routines for generating AMD64 assembly code.
+//
+
+// Precompiled Header
+
+#include "common.h"
+
+#include "stublink.h"
+#include "cgensys.h"
+#include "siginfo.hpp"
+#include "excep.h"
+#include "ecall.h"
+#include "dllimport.h"
+#include "dllimportcallback.h"
+#include "dbginterface.h"
+#include "fcall.h"
+#include "array.h"
+#include "virtualcallstub.h"
+
+#ifndef DACCESS_COMPILE
+
+// Note: This is only used on server GC on Windows.
+
+DWORD GetLogicalCpuCount()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // The contact with any callers of this function is that if we're unable to determine
+ // the processor count, or the number of processors is not distributed evenly, then
+ // we should return 1.
+ return 1;
+}
+
+#endif // DACCESS_COMPILE
diff --git a/src/vm/arm64/cgencpu.h b/src/vm/arm64/cgencpu.h
new file mode 100644
index 0000000000..28d4474fa4
--- /dev/null
+++ b/src/vm/arm64/cgencpu.h
@@ -0,0 +1,691 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+
+#ifndef _TARGET_ARM64_
+#error Should only include "cGenCpu.h" for ARM64 builds
+#endif
+
+#ifndef __cgencpu_h__
+#define __cgencpu_h__
+
+#define INSTRFMT_K64
+#include <stublink.h>
+
+#define USE_REDIRECT_FOR_GCSTRESS
+
+EXTERN_C void getFPReturn(int fpSize, INT64 *pRetVal);
+EXTERN_C void setFPReturn(int fpSize, INT64 retVal);
+
+
+class ComCallMethodDesc;
+
+
+#define COMMETHOD_PREPAD 24 // # extra bytes to allocate in addition to sizeof(ComCallMethodDesc)
+#ifdef FEATURE_COMINTEROP
+#define COMMETHOD_CALL_PRESTUB_SIZE 24
+#define COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET 16 // the offset of the call target address inside the prestub
+#endif // FEATURE_COMINTEROP
+
+#define STACK_ALIGN_SIZE 16
+
+#define JUMP_ALLOCATE_SIZE 16 // # bytes to allocate for a jump instruction
+#define BACK_TO_BACK_JUMP_ALLOCATE_SIZE 16 // # bytes to allocate for a back to back jump instruction
+
+#define HAS_NDIRECT_IMPORT_PRECODE 1
+
+#define USE_INDIRECT_CODEHEADER
+
+#ifdef FEATURE_REMOTING
+#define HAS_REMOTING_PRECODE 1
+#endif
+
+//ARM64TODO: Enable it once we complete work on precode
+//#define HAS_FIXUP_PRECODE 1
+//#define HAS_FIXUP_PRECODE_CHUNKS 1
+
+// ThisPtrRetBufPrecode one is necessary for closed delegates over static methods with return buffer
+#define HAS_THISPTR_RETBUF_PRECODE 1
+
+//ARM64TODO: verify this
+#define CODE_SIZE_ALIGN 8
+#define CACHE_LINE_SIZE 32 // As per Intel Optimization Manual the cache line size is 32 bytes
+#define LOG2SLOT LOG2_PTRSIZE
+
+#define ENREGISTERED_RETURNTYPE_MAXSIZE 64 // bytes (maximum HFA size is 8 doubles)
+#define ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE 8 // bytes
+#define ENREGISTERED_PARAMTYPE_MAXSIZE 16 // bytes (max value type size that can be passed by value)
+
+#define CALLDESCR_ARGREGS 1 // CallDescrWorker has ArgumentRegister parameter
+#define CALLDESCR_FPARGREGS 1 // CallDescrWorker has FloatArgumentRegisters parameter
+
+// Given a return address retrieved during stackwalk,
+// this is the offset by which it should be decremented to arrive at the callsite.
+#define STACKWALK_CONTROLPC_ADJUST_OFFSET 4
+
+//=======================================================================
+// IMPORTANT: This value is used to figure out how much to allocate
+// for a fixed array of FieldMarshaler's. That means it must be at least
+// as large as the largest FieldMarshaler subclass. This requirement
+// is guarded by an assert.
+//=======================================================================
+//ARM64TODO: verify this
+#define MAXFIELDMARSHALERSIZE 40
+
+//**********************************************************************
+// Parameter size
+//**********************************************************************
+
+typedef INT64 StackElemType;
+#define STACK_ELEM_SIZE sizeof(StackElemType)
+
+// !! This expression assumes STACK_ELEM_SIZE is a power of 2.
+#define StackElemSize(parmSize) (((parmSize) + STACK_ELEM_SIZE - 1) & ~((ULONG)(STACK_ELEM_SIZE - 1)))
+
+//**********************************************************************
+// Frames
+//**********************************************************************
+
+//--------------------------------------------------------------------
+// This represents the callee saved (non-volatile) registers saved as
+// of a FramedMethodFrame.
+//--------------------------------------------------------------------
+typedef DPTR(struct CalleeSavedRegisters) PTR_CalleeSavedRegisters;
+struct CalleeSavedRegisters {
+ INT64 x29; // frame pointer
+ INT64 x30; // link register
+ INT64 x19, x20, x21, x22, x23, x24, x25, x26, x27, x28;
+};
+
+//--------------------------------------------------------------------
+// This represents the arguments that are stored in volatile registers.
+// This should not overlap the CalleeSavedRegisters since those are already
+// saved separately and it would be wasteful to save the same register twice.
+// If we do use a non-volatile register as an argument, then the ArgIterator
+// will probably have to communicate this back to the PromoteCallerStack
+// routine to avoid a double promotion.
+//--------------------------------------------------------------------
+typedef DPTR(struct ArgumentRegisters) PTR_ArgumentRegisters;
+struct ArgumentRegisters {
+ INT64 x[8]; // x0 ....x7
+};
+#define NUM_ARGUMENT_REGISTERS 8
+
+#define ARGUMENTREGISTERS_SIZE sizeof(ArgumentRegisters)
+
+
+//--------------------------------------------------------------------
+// This represents the floating point argument registers which are saved
+// as part of the NegInfo for a FramedMethodFrame. Note that these
+// might not be saved by all stubs: typically only those that call into
+// C++ helpers will need to preserve the values in these volatile
+// registers.
+//--------------------------------------------------------------------
+typedef DPTR(struct FloatArgumentRegisters) PTR_FloatArgumentRegisters;
+struct FloatArgumentRegisters {
+ // armV8 supports 32 floating point registers. Each register is 128bits long.
+ // It can be accessed as 128-bit value or 64-bit value(d0-d31) or as 32-bit value (s0-s31)
+ // or as 16-bit value or as 8-bit values. C# only has two builtin floating datatypes float(32-bit) and
+ // double(64-bit). It does not have a quad-precision floating point.So therefore it does not make sense to
+ // store full 128-bit values in Frame when the upper 64 bit will not contain any values.
+ double d[8]; // d0-d7
+};
+
+
+//**********************************************************************
+// Exception handling
+//**********************************************************************
+
+inline PCODE GetIP(const T_CONTEXT * context) {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return context->Pc;
+}
+
+inline void SetIP(T_CONTEXT *context, PCODE eip) {
+ LIMITED_METHOD_DAC_CONTRACT;
+ context->Pc = eip;
+}
+
+inline TADDR GetSP(const T_CONTEXT * context) {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TADDR(context->Sp);
+}
+
+inline PCODE GetLR(const T_CONTEXT * context) {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return PCODE(context->Lr);
+}
+
+extern "C" LPVOID __stdcall GetCurrentSP();
+
+inline void SetSP(T_CONTEXT *context, TADDR esp) {
+ LIMITED_METHOD_DAC_CONTRACT;
+ context->Sp = DWORD(esp);
+}
+
+inline void SetFP(T_CONTEXT *context, TADDR ebp) {
+ LIMITED_METHOD_DAC_CONTRACT;
+ context->Fp = DWORD(ebp);
+}
+
+inline TADDR GetFP(const T_CONTEXT * context)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (TADDR)(context->Fp);
+}
+
+#ifdef FEATURE_COMINTEROP
+void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target);
+#endif // FEATURE_COMINTEROP
+
+//------------------------------------------------------------------------
+inline void emitJump(UINT32* pCode, LPVOID target)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // We require 8-byte alignment so the LDR instruction is aligned properly
+ _ASSERTE(((UINT_PTR)pCode & 7) == 0);
+
+ // +0: ldr x16, [pc, #8]
+ // +4: br x16
+ // +8: [target address]
+
+ pCode[0] = 0x58000050UL; // ldr x16, [pc, #8]
+ pCode[1] = 0xD61F0200UL; // br x16
+
+ *((LPVOID *)(pCode + 2)) = target; // 64-bit target address
+}
+
+//------------------------------------------------------------------------
+// Given the same pBuffer that was used by emitJump this method
+// decodes the instructions and returns the jump target
+inline PCODE decodeJump(PCODE pCode)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ TADDR pInstr = PCODEToPINSTR(pCode);
+
+ return *dac_cast<PTR_PCODE>(pInstr + 2*sizeof(DWORD));
+}
+
+//------------------------------------------------------------------------
+inline BOOL isJump(PCODE pCode)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ TADDR pInstr = PCODEToPINSTR(pCode);
+
+ return *dac_cast<PTR_DWORD>(pInstr) == 0x58000050;
+}
+
+//------------------------------------------------------------------------
+inline BOOL isBackToBackJump(PCODE pBuffer)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return isJump(pBuffer);
+}
+
+//------------------------------------------------------------------------
+inline void emitBackToBackJump(LPBYTE pBuffer, LPVOID target)
+{
+ WRAPPER_NO_CONTRACT;
+ emitJump((UINT32*)pBuffer, target);
+}
+
+//------------------------------------------------------------------------
+inline PCODE decodeBackToBackJump(PCODE pBuffer)
+{
+ WRAPPER_NO_CONTRACT;
+ return decodeJump(pBuffer);
+}
+
+// SEH info forward declarations
+
+inline BOOL IsUnmanagedValueTypeReturnedByRef(UINT sizeofvaluetype)
+{
+// ARM64TODO : Check if we need to consider HFA
+ return (sizeofvaluetype > 8);
+}
+
+
+//----------------------------------------------------------------------
+
+struct IntReg
+{
+ int reg;
+ IntReg(int reg):reg(reg)
+ {
+ _ASSERTE(0 <= reg && reg < 32);
+ }
+
+ operator int () { return reg; }
+ operator int () const { return reg; }
+ int operator == (IntReg other) { return reg == other.reg; }
+ int operator != (IntReg other) { return reg != other.reg; }
+ WORD Mask() const { return 1 << reg; }
+};
+
+struct VecReg
+{
+ int reg;
+ VecReg(int reg):reg(reg)
+ {
+ _ASSERTE(0 <= reg && reg < 32);
+ }
+
+ operator int() { return reg; }
+ int operator == (VecReg other) { return reg == other.reg; }
+ int operator != (VecReg other) { return reg != other.reg; }
+ WORD Mask() const { return 1 << reg; }
+};
+
+struct CondCode
+{
+ int cond;
+ CondCode(int cond):cond(cond)
+ {
+ _ASSERTE(0 <= cond && cond < 16);
+ }
+};
+
+const IntReg RegTeb = IntReg(18);
+const IntReg RegFp = IntReg(29);
+const IntReg RegLr = IntReg(30);
+// Note that stack pointer and zero register share the same encoding, 31
+const IntReg RegSp = IntReg(31);
+
+const CondCode CondEq = CondCode(0);
+const CondCode CondNe = CondCode(1);
+const CondCode CondCs = CondCode(2);
+const CondCode CondCc = CondCode(3);
+const CondCode CondMi = CondCode(4);
+const CondCode CondPl = CondCode(5);
+const CondCode CondVs = CondCode(6);
+const CondCode CondVc = CondCode(7);
+const CondCode CondHi = CondCode(8);
+const CondCode CondLs = CondCode(9);
+const CondCode CondGe = CondCode(10);
+const CondCode CondLt = CondCode(11);
+const CondCode CondGt = CondCode(12);
+const CondCode CondLe = CondCode(13);
+const CondCode CondAl = CondCode(14);
+const CondCode CondNv = CondCode(15);
+
+
+#define PRECODE_ALIGNMENT CODE_SIZE_ALIGN
+#define SIZEOF_PRECODE_BASE CODE_SIZE_ALIGN
+#define OFFSETOF_PRECODE_TYPE 0
+
+#ifdef CROSSGEN_COMPILE
+#define GetEEFuncEntryPoint(pfn) 0x1001
+#else
+#define GetEEFuncEntryPoint(pfn) GFN_TADDR(pfn)
+#endif
+
+class StubLinkerCPU : public StubLinker
+{
+
+private:
+ void EmitLoadStoreRegPairImm(DWORD flags, int regNum1, int regNum2, IntReg Xn, int offset, BOOL isVec);
+ void EmitLoadStoreRegImm(DWORD flags, int regNum, IntReg Xn, int offset, BOOL isVec);
+public:
+
+ // BitFlags for EmitLoadStoreReg(Pair)Imm methods
+ enum {
+ eSTORE = 0x0,
+ eLOAD = 0x1,
+ eWRITEBACK = 0x2,
+ ePOSTINDEX = 0x4,
+ eFLAGMASK = 0x7
+ };
+
+ // BitFlags for Register offsetted loads/stores
+ // Bits(1-3) indicate the <extend> encoding, while the bits(0) indicate the shift
+ enum {
+ eSHIFT = 0x1, // 0y0001
+ eUXTW = 0x4, // 0y0100
+ eSXTW = 0xC, // 0y1100
+ eLSL = 0x7, // 0y0111
+ eSXTX = 0xD, // 0y1110
+ };
+
+
+ static void Init();
+
+ void EmitUnboxMethodStub(MethodDesc* pRealMD);
+ void EmitCallManagedMethod(MethodDesc *pMD, BOOL fTailCall);
+ void EmitCallLabel(CodeLabel *target, BOOL fTailCall, BOOL fIndirect);
+ void EmitSecureDelegateInvoke(UINT_PTR hash);
+ static UINT_PTR HashMulticastInvoke(MetaSig* pSig);
+ void EmitShuffleThunk(struct ShuffleEntry *pShuffleEntryArray);
+ void EmitGetThreadInlined(IntReg Xt);
+
+#ifdef _DEBUG
+ void EmitNop() { Emit32(0xD503201F); }
+#endif
+ void EmitBreakPoint() { Emit32(0xD43E0000); }
+ void EmitMovConstant(IntReg target, UINT64 constant);
+ void EmitCmpImm(IntReg reg, int imm);
+ void EmitCmpReg(IntReg Xn, IntReg Xm);
+ void EmitCondFlagJump(CodeLabel * target, UINT cond);
+ void EmitJumpRegister(IntReg regTarget);
+ void EmitMovReg(IntReg dest, IntReg source);
+
+ void EmitSubImm(IntReg Xd, IntReg Xn, unsigned int value);
+ void EmitAddImm(IntReg Xd, IntReg Xn, unsigned int value);
+
+ void EmitLoadStoreRegPairImm(DWORD flags, IntReg Xt1, IntReg Xt2, IntReg Xn, int offset=0);
+ void EmitLoadStoreRegPairImm(DWORD flags, VecReg Vt1, VecReg Vt2, IntReg Xn, int offset=0);
+
+ void EmitLoadStoreRegImm(DWORD flags, IntReg Xt, IntReg Xn, int offset=0);
+ void EmitLoadStoreRegImm(DWORD flags, VecReg Vt, IntReg Xn, int offset=0);
+
+ void EmitLoadRegReg(IntReg Xt, IntReg Xn, IntReg Xm, DWORD option);
+
+ void EmitCallRegister(IntReg reg);
+ void EmitProlog(unsigned short cIntRegArgs,
+ unsigned short cVecRegArgs,
+ unsigned short cCalleeSavedRegs,
+ unsigned short cbStackSpace = 0);
+
+ void EmitEpilog();
+
+ void EmitRet(IntReg reg);
+
+
+};
+
+extern "C" void SinglecastDelegateInvokeStub();
+
+
+// preferred alignment for data
+//ARM64TODO: double check
+#define DATA_ALIGNMENT 8
+
+
+DECLSPEC_ALIGN(16) struct UMEntryThunkCode
+{
+ DWORD m_code[4];
+
+ TADDR m_pTargetCode;
+ TADDR m_pvSecretParam;
+
+ void Encode(BYTE* pTargetCode, void* pvSecretParam);
+
+ LPCBYTE GetEntryPoint() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (LPCBYTE)this;
+ }
+
+ static int GetEntryPointOffset()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return 0;
+ }
+};
+
+struct HijackArgs
+{
+ // ARM64:NYI
+};
+
+inline BOOL ClrFlushInstructionCache(LPCVOID pCodeAddr, size_t sizeOfCode)
+{
+#ifdef CROSSGEN_COMPILE
+ // The code won't be executed when we are cross-compiling so flush instruction cache is unnecessary
+ return TRUE;
+#else
+ return FlushInstructionCache(GetCurrentProcess(), pCodeAddr, sizeOfCode);
+#endif
+}
+EXTERN_C VOID STDCALL PrecodeFixupThunk();
+
+// Invalid precode type
+struct InvalidPrecode {
+ static const int Type = 0;
+};
+
+struct StubPrecode {
+
+ static const int Type = 0x89;
+
+ // adr x9, #16
+ // ldp x10,x12,[x9] ; =m_pTarget,m_pMethodDesc
+ // br x10
+ // 4 byte padding for 8 byte allignement
+ // dcd pTarget
+ // dcd pMethodDesc
+ DWORD m_rgCode[4];
+ TADDR m_pTarget;
+ TADDR m_pMethodDesc;
+
+ void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
+
+ TADDR GetMethodDesc()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pMethodDesc;
+ }
+
+ PCODE GetTarget()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pTarget;
+ }
+
+ BOOL SetTargetInterlocked(TADDR target, TADDR expected)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ EnsureWritableExecutablePages(&m_pTarget);
+ return (TADDR)InterlockedCompareExchange(
+ (TADDR*)&m_pTarget, (TADDR)target, (TADDR)expected) == expected;
+ }
+
+#ifdef FEATURE_PREJIT
+ void Fixup(DataImage *image);
+#endif
+};
+typedef DPTR(StubPrecode) PTR_StubPrecode;
+
+
+struct NDirectImportPrecode {
+
+ static const int Type = 0x88;
+
+ // adr x8, #16 ; Notice that x8 register is used to differentiate the stub from StubPrecode which uses x9
+ // ldp x10,x12,[x8] ; =m_pTarget,m_pMethodDesc
+ // br x10
+ // 4 byte padding for 8 byte allignement
+ // dcd pTarget
+ // dcd pMethodDesc
+ DWORD m_rgCode[4];
+ TADDR m_pTarget;
+ TADDR m_pMethodDesc;
+
+ void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
+
+ TADDR GetMethodDesc()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pMethodDesc;
+ }
+
+ PCODE GetTarget()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pTarget;
+ }
+
+ LPVOID GetEntrypoint()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return this;
+ }
+
+#ifdef FEATURE_PREJIT
+ void Fixup(DataImage *image);
+#endif
+};
+typedef DPTR(NDirectImportPrecode) PTR_NDirectImportPrecode;
+
+
+struct FixupPrecode {
+
+ static const int Type = 0xfc;
+
+ // mov r12, pc
+ // ldr pc, [pc, #4] ; =m_pTarget
+ // dcb m_MethodDescChunkIndex
+ // dcb m_PrecodeChunkIndex
+ // dcd m_pTarget
+ WORD m_rgCode[3];
+ BYTE m_MethodDescChunkIndex;
+ BYTE m_PrecodeChunkIndex;
+ TADDR m_pTarget;
+
+ void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int iMethodDescChunkIndex = 0, int iPrecodeChunkIndex = 0);
+
+ TADDR GetBase()
+ {
+ _ASSERTE(!"ARM64:NYI");
+ return NULL;
+ }
+
+ TADDR GetMethodDesc();
+
+ PCODE GetTarget()
+ {
+ _ASSERTE(!"ARM64:NYI");
+ return NULL;
+ }
+
+ BOOL SetTargetInterlocked(TADDR target, TADDR expected)
+ {
+ _ASSERTE(!"ARM64:NYI");
+ return NULL;
+ }
+
+ static BOOL IsFixupPrecodeByASM(PCODE addr)
+ {
+ _ASSERTE(!"ARM64:NYI");
+ return NULL;
+ }
+
+#ifdef FEATURE_PREJIT
+ // Partial initialization. Used to save regrouped chunks.
+ void InitForSave(int iPrecodeChunkIndex);
+
+ void Fixup(DataImage *image, MethodDesc * pMD);
+#endif
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+};
+typedef DPTR(FixupPrecode) PTR_FixupPrecode;
+
+
+// Precode to stuffle this and retbuf for closed delegates over static methods with return buffer
+struct ThisPtrRetBufPrecode {
+
+ static const int Type = 0x84;
+
+ // mov r12, r0
+ // mov r0, r1
+ // mov r1, r12
+ // ldr pc, [pc, #0] ; =m_pTarget
+ // dcd pTarget
+ // dcd pMethodDesc
+ WORD m_rgCode[6];
+ TADDR m_pTarget;
+ TADDR m_pMethodDesc;
+
+ void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
+
+ TADDR GetMethodDesc()
+ {
+ _ASSERTE(!"ARM64:NYI");
+ return NULL;
+ }
+
+ PCODE GetTarget()
+ {
+ _ASSERTE(!"ARM64:NYI");
+ return NULL;
+ }
+
+ BOOL SetTargetInterlocked(TADDR target, TADDR expected)
+ {
+ _ASSERTE(!"ARM64:NYI");
+ return NULL;
+ }
+};
+typedef DPTR(ThisPtrRetBufPrecode) PTR_ThisPtrRetBufPrecode;
+
+
+#ifdef HAS_REMOTING_PRECODE
+
+// Precode with embedded remoting interceptor
+struct RemotingPrecode {
+
+ static const int Type = 0x02;
+
+ // push {r1,lr}
+ // ldr r1, [pc, #16] ; =m_pPrecodeRemotingThunk
+ // blx r1
+ // pop {r1,lr}
+ // ldr pc, [pc, #12] ; =m_pLocalTarget
+ // nop ; padding for alignment
+ // dcd m_pMethodDesc
+ // dcd m_pPrecodeRemotingThunk
+ // dcd m_pLocalTarget
+ WORD m_rgCode[8];
+ TADDR m_pMethodDesc;
+ TADDR m_pPrecodeRemotingThunk;
+ TADDR m_pLocalTarget;
+
+ void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator = NULL);
+
+ TADDR GetMethodDesc()
+ {
+ _ASSERTE(!"ARM64:NYI");
+ return NULL;
+ }
+
+ PCODE GetTarget()
+ {
+ _ASSERTE(!"ARM64:NYI");
+ return NULL;
+ }
+
+ BOOL SetTargetInterlocked(TADDR target, TADDR expected)
+ {
+ _ASSERTE(!"ARM64:NYI");
+ return NULL;
+ }
+
+#ifdef FEATURE_PREJIT
+ void Fixup(DataImage *image, ZapNode *pCodeNode);
+#endif
+};
+typedef DPTR(RemotingPrecode) PTR_RemotingPrecode;
+
+EXTERN_C void PrecodeRemotingThunk();
+
+#endif // HAS_REMOTING_PRECODE
+
+
+#endif // __cgencpu_h__
diff --git a/src/vm/arm64/crthelpers.asm b/src/vm/arm64/crthelpers.asm
new file mode 100644
index 0000000000..d1ee84abd7
--- /dev/null
+++ b/src/vm/arm64/crthelpers.asm
@@ -0,0 +1,304 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+;; ==++==
+;;
+
+;;
+;; ==--==
+
+#include "ksarm64.h"
+
+ TEXTAREA
+
+; Calls to JIT_MemSet is emitted by jit for initialization of large structs.
+; We need to provide our own implementation of memset instead of using the ones in crt because crt implementation does not gurantee
+; that aligned 8/4/2 - byte memory will be written atomically. This is required because members in a struct can be read atomically
+; and their values should be written atomically.
+;
+;
+;void JIT_MemSet(void *dst, int val, SIZE_T count)
+;{
+; uintptr_t valEx = (char)val;
+; valEx = valEx | valEx << 8;
+; valEx = valEx | valEx << 16;
+; valEx = valEx | valEx << 32;
+;
+; // If not aligned then make it 8-byte aligned
+; if(((uintptr_t)dst&0x7) != 0)
+; {
+; if(((uintptr_t)dst&0x3) == 0)
+; {
+; *(UINT*)dst = (UINT)valEx;
+; dst = (UINT*)dst + 1;
+; count-=4;
+; }
+; else if(((uintptr_t)dst&0x1) == 0)
+; {
+; while(count > 0 && ((uintptr_t)dst&0x7) != 0)
+; {
+; *(short*)dst = (short)valEx;
+; dst = (short*)dst + 1;
+; count-=2;
+; }
+; }
+; else
+; {
+; while(count > 0 && ((uintptr_t)dst&0x7) != 0)
+; {
+; *(char*)dst = (char)valEx;
+; dst = (char*)dst + 1;
+; count--;
+; }
+; }
+; }
+;
+; while(count > 8)
+; {
+; *(uintptr_t*)dst = valEx;
+; dst = (uintptr_t*)dst + 1;
+; count-=8;
+; }
+;
+; if(count & 4)
+; {
+; *(UINT*)dst = (UINT)valEx;
+; dst = (UINT*)dst + 1;
+; }
+;
+; if(count & 2)
+; {
+; *(short*)dst = (short)valEx;
+; dst = (short*)dst + 1;
+; }
+;
+; if(count & 1)
+; {
+; *(char*)dst = (char)valEx;
+; }
+;}
+;
+
+; Assembly code corresponding to above C++ method. JIT_MemSet can AV and clr exception personality routine needs to
+; determine if the exception has taken place inside JIT_Memset in order to throw corresponding managed exception.
+; Determining this is slow if the method were implemented as C++ method (using unwind info). In .asm file by adding JIT_MemSet_End
+; marker it can be easily determined if exception happened in JIT_MemSet. Therefore, JIT_MemSet has been written in assembly instead of
+; as C++ method.
+
+ LEAF_ENTRY JIT_MemSet
+ sxtb w8,w1
+ sxtw x8,w8
+ orr x8,x8,x8 lsl #8
+ orr x8,x8,x8 lsl #0x10
+ orr x9,x8,x8 lsl #0x20
+ and x8,x0,#7
+ cbz x8,JIT_MemSet_0x7c
+ and x8,x0,#3
+ cbnz x8,JIT_MemSet_0x38
+ str w9,[x0]
+ add x0,x0,#4
+ mov x8,#-4
+ add x2,x2,x8
+ b JIT_MemSet_0x7c
+JIT_MemSet_0x38
+ cbz x2,JIT_MemSet_0x7c
+ tbnz x0,#0,JIT_MemSet_0x60
+JIT_MemSet_0x40
+ and x8,x0,#7
+ cbz x8,JIT_MemSet_0x7c
+ strh w9,[x0]
+ add x0,x0,#2
+ mov x8,#-2
+ add x2,x2,x8
+ cbnz x2,JIT_MemSet_0x40
+ b JIT_MemSet_0x7c
+JIT_MemSet_0x60
+ and x8,x0,#7
+ cbz x8,JIT_MemSet_0x7c
+ strb w9,[x0]
+ add x0,x0,#1
+ mov x8,#-1
+ add x2,x2,x8
+ cbnz x2,JIT_MemSet_0x60
+JIT_MemSet_0x7c
+ cmp x2,#8
+ bls JIT_MemSet_0xb8
+ mov x8,#-9
+ add x8,x2,x8
+ lsr x8,x8,#3
+ add x11,x8,#1
+ mov x10,x0
+ add x8,x10,x11 lsl #3
+JIT_MemSet_0x9c
+ cmp x10,x8
+ beq JIT_MemSet_0xac
+ str x9,[x10],#8
+ b JIT_MemSet_0x9c
+JIT_MemSet_0xac
+ mov x8,#-8
+ madd x2,x11,x8,x2
+ add x0,x0,x11 lsl #3
+JIT_MemSet_0xb8
+ tbz x2,#2,JIT_MemSet_0xc4
+ str w9,[x0]
+ add x0,x0,#4
+JIT_MemSet_0xc4
+ tbz x2,#1,JIT_MemSet_0xd0
+ strh w9,[x0]
+ add x0,x0,#2
+JIT_MemSet_0xd0
+ tbz x2,#0,JIT_MemSet_0xd8
+ strb w9,[x0]
+JIT_MemSet_0xd8
+ ret lr
+ LEAF_END
+
+ LEAF_ENTRY JIT_MemSet_End
+ LEAF_END
+
+
+; See comments above for JIT_MemSet
+
+;void JIT_MemCpy(void *dst, const void *src, SIZE_T count)
+;{
+; // If not aligned then make it 8-byte aligned
+; if(((uintptr_t)dst&0x7) != 0)
+; {
+; if(((uintptr_t)dst&0x3) == 0)
+; {
+; *(UINT*)dst = *(UINT*)src;
+; dst = (UINT*)dst + 1;
+; src = (UINT*)src + 1;
+; count-=4;
+; }
+; else if(((uintptr_t)dst&0x1) == 0)
+; {
+; while(count > 0 && ((uintptr_t)dst&0x7) != 0)
+; {
+; *(short*)dst = *(short*)src;
+; dst = (short*)dst + 1;
+; src = (short*)src + 1;
+; count-=2;
+; }
+; }
+; else
+; {
+; while(count > 0 && ((uintptr_t)dst&0x7) != 0)
+; {
+; *(char*)dst = *(char*)src;
+; dst = (char*)dst + 1;
+; src = (char*)src + 1;
+; count--;
+; }
+; }
+; }
+;
+; while(count > 8)
+; {
+; *(uintptr_t*)dst = *(uintptr_t*)src;
+; dst = (uintptr_t*)dst + 1;
+; src = (uintptr_t*)src + 1;
+; count-=8;
+; }
+;
+; if(count & 4)
+; {
+; *(UINT*)dst = *(UINT*)src;
+; dst = (UINT*)dst + 1;
+; src = (UINT*)src + 1;
+; }
+;
+; if(count & 2)
+; {
+; *(short*)dst = *(short*)src;
+; dst = (short*)dst + 1;
+; src = (short*)src + 1;
+; }
+;
+; if(count & 1)
+; {
+; *(char*)dst = *(char*)src;
+; }
+;}
+;
+
+; Assembly code corresponding to above C++ method.
+; See comments above for JIT_MemSet method
+ LEAF_ENTRY JIT_MemCpy
+ and x8,x0,#7
+ cbz x8,JIT_MemCpy_0x80
+ and x8,x0,#3
+ cbnz x8,JIT_MemCpy_0x2c
+ ldr w8,[x1]
+ str w8,[x0]
+ add x0,x0,#4
+ add x1,x1,#4
+ mov x8,#-4
+ add x2,x2,x8
+ b JIT_MemCpy_0x80
+JIT_MemCpy_0x2c
+ cbz x2,JIT_MemCpy_0x80
+ tbnz x0,#0,JIT_MemCpy_0x5c
+JIT_MemCpy_0x34
+ and x8,x0,#7
+ cbz x8,JIT_MemCpy_0x80
+ ldrsh w8,[x1]
+ strh w8,[x0]
+ add x0,x0,#2
+ add x1,x1,#2
+ mov x8,#-2
+ add x2,x2,x8
+ cbnz x2,JIT_MemCpy_0x34
+ b JIT_MemCpy_0x80
+JIT_MemCpy_0x5c
+ and x8,x0,#7
+ cbz x8,JIT_MemCpy_0x80
+ ldrsb w8,[x1]
+ strb w8,[x0]
+ add x0,x0,#1
+ add x1,x1,#1
+ mov x8,#-1
+ add x2,x2,x8
+ cbnz x2,JIT_MemCpy_0x5c
+JIT_MemCpy_0x80
+ cmp x2,#8
+ bls JIT_MemCpy_0xb4
+ mov x8,#-9
+ add x8,x2,x8
+ lsr x8,x8,#3
+ add x9,x8,#1
+ mov x8,#-8
+ madd x2,x9,x8,x2
+JIT_MemCpy_0xa0
+ ldr x8,[x1],#8
+ str x8,[x0],#8
+ mov x8,#-1
+ add x9,x9,x8
+ cbnz x9,JIT_MemCpy_0xa0
+JIT_MemCpy_0xb4
+ tbz x2,#2,JIT_MemCpy_0xc8
+ ldr w8,[x1]
+ str w8,[x0]
+ add x0,x0,#4
+ add x1,x1,#4
+JIT_MemCpy_0xc8
+ tbz x2,#1,JIT_MemCpy_0xdc
+ ldrsh w8,[x1]
+ strh w8,[x0]
+ add x0,x0,#2
+ add x1,x1,#2
+JIT_MemCpy_0xdc
+ tbz x2,#0,JIT_MemCpy_0xe8
+ ldrsb w8,[x1]
+ strb w8,[x0]
+JIT_MemCpy_0xe8
+ ret lr
+ LEAF_END
+
+ LEAF_ENTRY JIT_MemCpy_End
+ LEAF_END
+
+; Must be at very end of file
+ END
diff --git a/src/vm/arm64/excepcpu.h b/src/vm/arm64/excepcpu.h
new file mode 100644
index 0000000000..f992640cf1
--- /dev/null
+++ b/src/vm/arm64/excepcpu.h
@@ -0,0 +1,52 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+//
+
+
+#ifndef __excepcpu_h__
+#define __excepcpu_h__
+
+
+#define THROW_CONTROL_FOR_THREAD_FUNCTION RedirectForThreadAbort
+EXTERN_C void RedirectForThreadAbort();
+
+
+#define STATUS_CLR_GCCOVER_CODE STATUS_ILLEGAL_INSTRUCTION
+
+class Thread;
+class FaultingExceptionFrame;
+
+#define INSTALL_EXCEPTION_HANDLING_RECORD(record)
+#define UNINSTALL_EXCEPTION_HANDLING_RECORD(record)
+//
+// On ARM, the COMPlusFrameHandler's work is done by our personality routine.
+//
+#define DECLARE_CPFH_EH_RECORD(pCurThread)
+
+//
+// Retrieves the redirected CONTEXT* from the stack frame of one of the
+// RedirectedHandledJITCaseForXXX_Stub's.
+//
+PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(T_DISPATCHER_CONTEXT * pDispatcherContext);
+PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(T_CONTEXT * pContext);
+
+//
+// Retrieves the FaultingExceptionFrame* from the stack frame of
+// RedirectForThrowControl or NakedThrowHelper.
+//
+FaultingExceptionFrame *GetFrameFromRedirectedStubStackFrame (T_DISPATCHER_CONTEXT *pDispatcherContext);
+
+inline
+PCODE GetAdjustedCallAddress(PCODE returnAddress)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return returnAddress - 4;
+}
+
+BOOL AdjustContextForVirtualStub(EXCEPTION_RECORD *pExceptionRecord, T_CONTEXT *pContext);
+
+#endif // __excepcpu_h__
diff --git a/src/vm/arm64/gmscpu.h b/src/vm/arm64/gmscpu.h
new file mode 100644
index 0000000000..9105103395
--- /dev/null
+++ b/src/vm/arm64/gmscpu.h
@@ -0,0 +1,96 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/**************************************************************/
+/* gmscpu.h */
+/**************************************************************/
+/* HelperFrame is defines 'GET_STATE(machState)' macro, which
+ figures out what the state of the machine will be when the
+ current method returns. It then stores the state in the
+ JIT_machState structure. */
+
+/**************************************************************/
+
+#ifndef __gmscpu_h__
+#define __gmscpu_h__
+
+#define __gmscpu_h__
+
+// X19 - X28
+#define NUM_NONVOLATILE_CONTEXT_POINTERS 10
+
+struct MachState {
+ ULONG64 captureX19_X28[NUM_NONVOLATILE_CONTEXT_POINTERS]; // preserved registers
+ PTR_ULONG64 ptrX19_X28[NUM_NONVOLATILE_CONTEXT_POINTERS]; // pointers to preserved registers
+ TADDR _pc;
+ TADDR _sp;
+ TADDR _fp;
+ BOOL _isValid;
+
+ BOOL isValid() { LIMITED_METHOD_DAC_CONTRACT; return _isValid; }
+ TADDR GetRetAddr() { LIMITED_METHOD_DAC_CONTRACT; return _pc; }
+};
+
+struct LazyMachState : public MachState{
+
+ TADDR captureSp; // Stack pointer at the time of capture
+ TADDR captureIp; // Instruction pointer at the time of capture
+ TADDR captureFp; // Frame pointer at the time of the captues
+
+ void setLazyStateFromUnwind(MachState* copy);
+ static void unwindLazyState(LazyMachState* baseState,
+ MachState* lazyState,
+ int funCallDepth = 1,
+ HostCallPreference hostCallPreference = AllowHostCalls);
+};
+
+inline void LazyMachState::setLazyStateFromUnwind(MachState* copy)
+{
+#if defined(DACCESS_COMPILE)
+ // This function cannot be called in DAC because DAC cannot update target memory.
+ DacError(E_FAIL);
+ return;
+
+#else // !DACCESS_COMPILE
+
+ _sp = copy->_sp;
+ _pc = copy->_pc;
+ _fp = copy->_fp;
+
+ // Now copy the preserved register pointers. Note that some of the pointers could be
+ // pointing to copy->captureX19_X28[]. If that is case then while copying to destination
+ // ensure that they point to corresponding element in captureX19_X28[] of destination.
+ ULONG64* srcLowerBound = &copy->captureX19_X28[0];
+ ULONG64* srcUpperBound = (ULONG64*)((BYTE*)copy + offsetof(MachState, ptrX19_X28));
+
+
+ for (int i = 0; i<NUM_NONVOLATILE_CONTEXT_POINTERS; i++)
+ {
+ if (copy->ptrX19_X28[i] >= srcLowerBound && copy->ptrX19_X28[i] < srcUpperBound)
+ {
+ ptrX19_X28[i] = (PTR_ULONG64)((BYTE*)copy->ptrX19_X28[i] - (BYTE*)srcLowerBound + (BYTE*)captureX19_X28);
+ }
+ else
+ {
+ ptrX19_X28[i] = copy->ptrX19_X28[i];
+ }
+ }
+
+ // this has to be last because we depend on write ordering to
+ // synchronize the race implicit in updating this struct
+ VolatileStore(&_isValid, TRUE);
+#endif // DACCESS_COMPILE
+}
+
+// Do the initial capture of the machine state. This is meant to be
+// as light weight as possible, as we may never need the state that
+// we capture.
+EXTERN_C void LazyMachStateCaptureState(struct LazyMachState *pState);
+
+#define CAPTURE_STATE(machState, ret) \
+ LazyMachStateCaptureState(machState)
+
+
+#endif
diff --git a/src/vm/arm64/stubs.cpp b/src/vm/arm64/stubs.cpp
new file mode 100644
index 0000000000..d9355421b8
--- /dev/null
+++ b/src/vm/arm64/stubs.cpp
@@ -0,0 +1,1744 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: stubs.cpp
+//
+// This file contains stub functions for unimplemented features need to
+// run on the ARM64 platform.
+
+#include "common.h"
+#include "dllimportcallback.h"
+#include "comdelegate.h"
+#include "tls.h"
+#include "asmconstants.h"
+#include "virtualcallstub.h"
+
+#ifndef DACCESS_COMPILE
+//-----------------------------------------------------------------------
+// InstructionFormat for B.cond
+//-----------------------------------------------------------------------
+class ConditionalBranchInstructionFormat : public InstructionFormat
+{
+
+ public:
+ ConditionalBranchInstructionFormat() : InstructionFormat(InstructionFormat::k32)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual UINT GetSizeOfInstruction(UINT refsize, UINT variationCode)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(refsize == InstructionFormat::k32);
+
+ return 4;
+ }
+
+ virtual UINT GetHotSpotOffset(UINT refsize, UINT variationCode)
+ {
+ WRAPPER_NO_CONTRACT;
+ return 0;
+ }
+
+
+ virtual BOOL CanReach(UINT refSize, UINT variationCode, BOOL fExternal, INT_PTR offset)
+ {
+ _ASSERTE(!fExternal || "ARM64:NYI - CompareAndBranchInstructionFormat::CanReach external");
+ if (fExternal)
+ return false;
+
+ if (offset < -1048576 || offset > 1048572)
+ return false;
+ return true;
+ }
+ // B.<cond> <label>
+ // Encoding 0|1|0|1|0|1|0|0|imm19|0|cond
+ // cond = Bits3-0(variation)
+ // imm19 = bits19-0(fixedUpReference/4), will be SignExtended
+ virtual VOID EmitInstruction(UINT refSize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT variationCode, BYTE *pDataBuffer)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(refSize == InstructionFormat::k32);
+
+ if (fixedUpReference < -1048576 || fixedUpReference > 1048572)
+ COMPlusThrow(kNotSupportedException);
+
+ _ASSERTE((fixedUpReference & 0x3) == 0);
+ DWORD imm19 = (DWORD)(0x7FFFF & (fixedUpReference >> 2));
+
+ pOutBuffer[0] = static_cast<BYTE>((0x7 & imm19 /* Bits2-0(imm19) */) << 5 | (0xF & variationCode /* cond */));
+ pOutBuffer[1] = static_cast<BYTE>((0x7F8 & imm19 /* Bits10-3(imm19) */) >> 3);
+ pOutBuffer[2] = static_cast<BYTE>((0x7F800 & imm19 /* Bits19-11(imm19) */) >> 11);
+ pOutBuffer[3] = static_cast<BYTE>(0x54);
+ }
+};
+
+//-----------------------------------------------------------------------
+// InstructionFormat for B(L)(R) (unconditional branch)
+//-----------------------------------------------------------------------
+class BranchInstructionFormat : public InstructionFormat
+{
+ // Encoding of the VariationCode:
+ // bit(0) indicates whether this is a direct or an indirect jump.
+ // bit(1) indicates whether this is a branch with link -a.k.a call- (BL(R)) or not (B(R))
+
+ public:
+ enum VariationCodes
+ {
+ BIF_VAR_INDIRECT = 0x00000001,
+ BIF_VAR_CALL = 0x00000002,
+
+ BIF_VAR_JUMP = 0x00000000,
+ BIF_VAR_INDIRECT_CALL = 0x00000003
+ };
+ private:
+ BOOL IsIndirect(UINT variationCode)
+ {
+ return (variationCode & BIF_VAR_INDIRECT) != 0;
+ }
+ BOOL IsCall(UINT variationCode)
+ {
+ return (variationCode & BIF_VAR_CALL) != 0;
+ }
+
+
+ public:
+ BranchInstructionFormat() : InstructionFormat(InstructionFormat::k64)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual UINT GetSizeOfInstruction(UINT refSize, UINT variationCode)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(refSize == InstructionFormat::k64);
+
+ if (IsIndirect(variationCode))
+ return 12;
+ else
+ return 8;
+ }
+
+ virtual UINT GetSizeOfData(UINT refSize, UINT variationCode)
+ {
+ WRAPPER_NO_CONTRACT;
+ return 8;
+ }
+
+ virtual UINT GetHotSpotOffset(UINT refsize, UINT variationCode)
+ {
+ WRAPPER_NO_CONTRACT;
+ return 0;
+ }
+
+ virtual BOOL CanReach(UINT refSize, UINT variationCode, BOOL fExternal, INT_PTR offset)
+ {
+ if (fExternal)
+ {
+ // Note that the parameter 'offset' is not an offset but the target address itself (when fExternal is true)
+ return (refSize == InstructionFormat::k64);
+ }
+ else
+ {
+ return ((offset >= -134217728 && offset <= 134217724) || (refSize == InstructionFormat::k64));
+ }
+ }
+
+ virtual VOID EmitInstruction(UINT refSize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT variationCode, BYTE *pDataBuffer)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (IsIndirect(variationCode))
+ {
+ _ASSERTE(((UINT_PTR)pDataBuffer & 7) == 0);
+ __int64 dataOffset = pDataBuffer - pOutBuffer;
+
+ if (dataOffset < -1048576 || dataOffset > 1048572)
+ COMPlusThrow(kNotSupportedException);
+
+ DWORD imm19 = (DWORD)(0x7FFFF & (dataOffset >> 2));
+
+ // +0: ldr x16, [pc, #dataOffset]
+ // +4: ldr x16, [x16]
+ // +8: b(l)r x16
+ *((DWORD*)pOutBuffer) = (0x58000010 | (imm19 << 5));
+ *((DWORD*)(pOutBuffer+4)) = 0xF9400210;
+ if (IsCall(variationCode))
+ {
+ *((DWORD*)(pOutBuffer+8)) = 0xD63F0200; // blr x16
+ }
+ else
+ {
+ *((DWORD*)(pOutBuffer+8)) = 0xD61F0200; // br x16
+ }
+
+
+ *((__int64*)pDataBuffer) = fixedUpReference + (__int64)pOutBuffer;
+ }
+ else
+ {
+
+ _ASSERTE(((UINT_PTR)pDataBuffer & 7) == 0);
+ __int64 dataOffset = pDataBuffer - pOutBuffer;
+
+ if (dataOffset < -1048576 || dataOffset > 1048572)
+ COMPlusThrow(kNotSupportedException);
+
+ DWORD imm19 = (DWORD)(0x7FFFF & (dataOffset >> 2));
+
+ // +0: ldr x16, [pc, #dataOffset]
+ // +4: b(l)r x16
+ *((DWORD*)pOutBuffer) = (0x58000010 | (imm19 << 5));
+ if (IsCall(variationCode))
+ {
+ *((DWORD*)(pOutBuffer+4)) = 0xD63F0200; // blr x16
+ }
+ else
+ {
+ *((DWORD*)(pOutBuffer+4)) = 0xD61F0200; // br x16
+ }
+
+ if (!ClrSafeInt<__int64>::addition(fixedUpReference, (__int64)pOutBuffer, fixedUpReference))
+ COMPlusThrowArithmetic();
+ *((__int64*)pDataBuffer) = fixedUpReference;
+ }
+ }
+
+};
+
+//-----------------------------------------------------------------------
+// InstructionFormat for loading a label to the register (ADRP/ADR)
+//-----------------------------------------------------------------------
+class LoadFromLabelInstructionFormat : public InstructionFormat
+{
+ public:
+ LoadFromLabelInstructionFormat() : InstructionFormat( InstructionFormat::k32)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual UINT GetSizeOfInstruction(UINT refSize, UINT variationCode)
+ {
+ WRAPPER_NO_CONTRACT;
+ return 8;
+
+ }
+
+ virtual UINT GetHotSpotOffset(UINT refsize, UINT variationCode)
+ {
+ WRAPPER_NO_CONTRACT;
+ return 0;
+ }
+
+ virtual BOOL CanReach(UINT refSize, UINT variationCode, BOOL fExternal, INT_PTR offset)
+ {
+ return fExternal;
+ }
+
+ virtual VOID EmitInstruction(UINT refSize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT variationCode, BYTE *pDataBuffer)
+ {
+ LIMITED_METHOD_CONTRACT;
+ // VariationCode is used to indicate the register the label is going to be loaded
+
+ DWORD imm =(DWORD)(fixedUpReference>>12);
+ if (imm>>21)
+ COMPlusThrow(kNotSupportedException);
+
+ // Can't use SP or XZR
+ _ASSERTE((variationCode & 0x1F) != 31);
+
+ // adrp Xt, #Page_of_fixedUpReference
+ *((DWORD*)pOutBuffer) = ((9<<28) | ((imm & 3)<<29) | (imm>>2)<<5 | (variationCode&0x1F));
+
+ // ldr Xt, [Xt, #offset_of_fixedUpReference_to_its_page]
+ UINT64 target = (UINT64)(fixedUpReference + pOutBuffer)>>3;
+ *((DWORD*)(pOutBuffer+4)) = ( 0xF9400000 | ((target & 0x1FF)<<10) | (variationCode & 0x1F)<<5 | (variationCode & 0x1F));
+ }
+};
+
+
+
+static BYTE gConditionalBranchIF[sizeof(ConditionalBranchInstructionFormat)];
+static BYTE gBranchIF[sizeof(BranchInstructionFormat)];
+static BYTE gLoadFromLabelIF[sizeof(LoadFromLabelInstructionFormat)];
+
+#endif
+
+#ifndef CROSSGEN_COMPILE
+void LazyMachState::unwindLazyState(LazyMachState* baseState,
+ MachState* unwoundstate,
+ int funCallDepth,
+ HostCallPreference hostCallPreference)
+{
+ T_CONTEXT context;
+ T_KNONVOLATILE_CONTEXT_POINTERS nonVolContextPtrs;
+
+ context.X19 = unwoundstate->captureX19_X28[0] = baseState->captureX19_X28[0];
+ context.X20 = unwoundstate->captureX19_X28[1] = baseState->captureX19_X28[1];
+ context.X21 = unwoundstate->captureX19_X28[2] = baseState->captureX19_X28[2];
+ context.X22 = unwoundstate->captureX19_X28[3] = baseState->captureX19_X28[3];
+ context.X23 = unwoundstate->captureX19_X28[4] = baseState->captureX19_X28[4];
+ context.X24 = unwoundstate->captureX19_X28[5] = baseState->captureX19_X28[5];
+ context.X25 = unwoundstate->captureX19_X28[6] = baseState->captureX19_X28[6];
+ context.X26 = unwoundstate->captureX19_X28[7] = baseState->captureX19_X28[7];
+ context.X27 = unwoundstate->captureX19_X28[8] = baseState->captureX19_X28[8];
+ context.X28 = unwoundstate->captureX19_X28[9] = baseState->captureX19_X28[9];
+
+ context.Sp = baseState->captureSp;
+ context.Pc = baseState->captureIp;
+ context.Fp = baseState->captureFp;
+
+#if !defined(DACCESS_COMPILE)
+ // For DAC, if we get here, it means that the LazyMachState is uninitialized and we have to unwind it.
+ // The API we use to unwind in DAC is StackWalk64(), which does not support the context pointers.
+ //
+ // Restore the integer registers to KNONVOLATILE_CONTEXT_POINTERS to be used for unwinding.
+ nonVolContextPtrs.X19 = &unwoundstate->captureX19_X28[0];
+ nonVolContextPtrs.X20 = &unwoundstate->captureX19_X28[1];
+ nonVolContextPtrs.X21 = &unwoundstate->captureX19_X28[2];
+ nonVolContextPtrs.X22 = &unwoundstate->captureX19_X28[3];
+ nonVolContextPtrs.X23 = &unwoundstate->captureX19_X28[4];
+ nonVolContextPtrs.X24 = &unwoundstate->captureX19_X28[5];
+ nonVolContextPtrs.X25 = &unwoundstate->captureX19_X28[6];
+ nonVolContextPtrs.X26 = &unwoundstate->captureX19_X28[7];
+ nonVolContextPtrs.X27 = &unwoundstate->captureX19_X28[8];
+ nonVolContextPtrs.X28 = &unwoundstate->captureX19_X28[9];
+#endif // DACCESS_COMPILE
+
+ LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK LazyMachState::unwindLazyState(ip:%p,sp:%p,fp:%p)\n", baseState->captureIp, baseState->captureSp, baseState->captureFp));
+
+ PCODE pvControlPc;
+
+ do {
+
+ pvControlPc = Thread::VirtualUnwindCallFrame(&context, &nonVolContextPtrs);
+
+ if (funCallDepth > 0)
+ {
+ funCallDepth--;
+ if (funCallDepth == 0)
+ break;
+ }
+ else
+ {
+ // Determine whether given IP resides in JITted code. (It returns nonzero in that case.)
+ // Use it now to see if we've unwound to managed code yet.
+ BOOL fFailedReaderLock = FALSE;
+ BOOL fIsManagedCode = ExecutionManager::IsManagedCode(pvControlPc, hostCallPreference, &fFailedReaderLock);
+ if (fFailedReaderLock)
+ {
+ // We don't know if we would have been able to find a JIT
+ // manager, because we couldn't enter the reader lock without
+ // yielding (and our caller doesn't want us to yield). So abort
+ // now.
+
+ // Invalidate the lazyState we're returning, so the caller knows
+ // we aborted before we could fully unwind
+ unwoundstate->_isValid = false;
+ return;
+ }
+
+ if (fIsManagedCode)
+ break;
+
+ }
+ } while (true);
+
+#ifdef DACCESS_COMPILE
+ // For DAC builds, we update the registers directly since we dont have context pointers
+ unwoundstate->captureX19_X28[0] = context.X19;
+ unwoundstate->captureX19_X28[1] = context.X20;
+ unwoundstate->captureX19_X28[2] = context.X21;
+ unwoundstate->captureX19_X28[3] = context.X22;
+ unwoundstate->captureX19_X28[4] = context.X23;
+ unwoundstate->captureX19_X28[5] = context.X24;
+ unwoundstate->captureX19_X28[6] = context.X25;
+ unwoundstate->captureX19_X28[7] = context.X26;
+ unwoundstate->captureX19_X28[8] = context.X27;
+ unwoundstate->captureX19_X28[9] = context.X28;
+#else // !DACCESS_COMPILE
+ // For non-DAC builds, update the register state from context pointers
+ unwoundstate->ptrX19_X28[0] = nonVolContextPtrs.X19;
+ unwoundstate->ptrX19_X28[1] = nonVolContextPtrs.X20;
+ unwoundstate->ptrX19_X28[2] = nonVolContextPtrs.X21;
+ unwoundstate->ptrX19_X28[3] = nonVolContextPtrs.X22;
+ unwoundstate->ptrX19_X28[4] = nonVolContextPtrs.X23;
+ unwoundstate->ptrX19_X28[5] = nonVolContextPtrs.X24;
+ unwoundstate->ptrX19_X28[6] = nonVolContextPtrs.X25;
+ unwoundstate->ptrX19_X28[7] = nonVolContextPtrs.X26;
+ unwoundstate->ptrX19_X28[8] = nonVolContextPtrs.X27;
+ unwoundstate->ptrX19_X28[9] = nonVolContextPtrs.X28;
+#endif // DACCESS_COMPILE
+
+ unwoundstate->_pc = context.Pc;
+ unwoundstate->_sp = context.Sp;
+ unwoundstate->_fp = context.Fp;
+
+ unwoundstate->_isValid = TRUE;
+}
+
+void HelperMethodFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ pRD->IsCallerContextValid = FALSE;
+ pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
+
+ //
+ // Copy the saved state from the frame to the current context.
+ //
+
+ LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK HelperMethodFrame::UpdateRegDisplay cached ip:%p, sp:%p\n", m_MachState._pc, m_MachState._sp));
+
+ #if defined(DACCESS_COMPILE)
+ // For DAC, we may get here when the HMF is still uninitialized.
+ // So we may need to unwind here.
+ if (!m_MachState.isValid())
+ {
+ // This allocation throws on OOM.
+ MachState* pUnwoundState = (MachState*)DacAllocHostOnlyInstance(sizeof(*pUnwoundState), true);
+
+ InsureInit(false, pUnwoundState);
+
+ pRD->pCurrentContext->Pc = pRD->ControlPC = pUnwoundState->_pc;
+ pRD->pCurrentContext->Sp = pRD->SP = pUnwoundState->_sp;
+ pRD->pCurrentContext->Fp = pUnwoundState->_fp;
+
+ pRD->pCurrentContext->X19 = (DWORD64)(pUnwoundState->captureX19_X28[0]);
+ pRD->pCurrentContext->X20 = (DWORD64)(pUnwoundState->captureX19_X28[1]);
+ pRD->pCurrentContext->X21 = (DWORD64)(pUnwoundState->captureX19_X28[2]);
+ pRD->pCurrentContext->X22 = (DWORD64)(pUnwoundState->captureX19_X28[3]);
+ pRD->pCurrentContext->X23 = (DWORD64)(pUnwoundState->captureX19_X28[4]);
+ pRD->pCurrentContext->X24 = (DWORD64)(pUnwoundState->captureX19_X28[5]);
+ pRD->pCurrentContext->X25 = (DWORD64)(pUnwoundState->captureX19_X28[6]);
+ pRD->pCurrentContext->X26 = (DWORD64)(pUnwoundState->captureX19_X28[7]);
+ pRD->pCurrentContext->X27 = (DWORD64)(pUnwoundState->captureX19_X28[8]);
+ pRD->pCurrentContext->X28 = (DWORD64)(pUnwoundState->captureX19_X28[9]);
+
+ return;
+ }
+#endif // DACCESS_COMPILE
+
+ // reset pContext; it's only valid for active (top-most) frame
+ pRD->pContext = NULL;
+ pRD->ControlPC = GetReturnAddress();
+ pRD->SP = (DWORD64)(size_t)m_MachState._sp;
+
+ pRD->pCurrentContext->Pc = pRD->ControlPC;
+ pRD->pCurrentContext->Sp = pRD->SP;
+ pRD->pCurrentContext->Fp = (DWORD64)(size_t)m_MachState._fp;
+
+ pRD->pCurrentContext->X19 = *m_MachState.ptrX19_X28[0];
+ pRD->pCurrentContext->X20 = *m_MachState.ptrX19_X28[1];
+ pRD->pCurrentContext->X21 = *m_MachState.ptrX19_X28[2];
+ pRD->pCurrentContext->X22 = *m_MachState.ptrX19_X28[3];
+ pRD->pCurrentContext->X23 = *m_MachState.ptrX19_X28[4];
+ pRD->pCurrentContext->X24 = *m_MachState.ptrX19_X28[5];
+ pRD->pCurrentContext->X25 = *m_MachState.ptrX19_X28[6];
+ pRD->pCurrentContext->X26 = *m_MachState.ptrX19_X28[7];
+ pRD->pCurrentContext->X27 = *m_MachState.ptrX19_X28[8];
+ pRD->pCurrentContext->X28 = *m_MachState.ptrX19_X28[9];
+
+#if !defined(DACCESS_COMPILE)
+ pRD->pCurrentContextPointers->X19 = m_MachState.ptrX19_X28[0];
+ pRD->pCurrentContextPointers->X20 = m_MachState.ptrX19_X28[1];
+ pRD->pCurrentContextPointers->X21 = m_MachState.ptrX19_X28[2];
+ pRD->pCurrentContextPointers->X22 = m_MachState.ptrX19_X28[3];
+ pRD->pCurrentContextPointers->X23 = m_MachState.ptrX19_X28[4];
+ pRD->pCurrentContextPointers->X24 = m_MachState.ptrX19_X28[5];
+ pRD->pCurrentContextPointers->X25 = m_MachState.ptrX19_X28[6];
+ pRD->pCurrentContextPointers->X26 = m_MachState.ptrX19_X28[7];
+ pRD->pCurrentContextPointers->X27 = m_MachState.ptrX19_X28[8];
+ pRD->pCurrentContextPointers->X28 = m_MachState.ptrX19_X28[9];
+ pRD->pCurrentContextPointers->Lr = NULL;
+#endif
+}
+#endif // CROSSGEN_COMPILE
+
+TADDR FixupPrecode::GetMethodDesc()
+{
+ _ASSERTE(!"ARM64:NYI");
+ return NULL;
+}
+
+#ifdef DACCESS_COMPILE
+void FixupPrecode::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+#endif // DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+void StubPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
+{
+ WRAPPER_NO_CONTRACT;
+
+ int n = 0;
+
+ m_rgCode[n++] = 0x10000089; // adr x9, #16
+ m_rgCode[n++] = 0xA940312A; // ldp x10,x12,[x9]
+ m_rgCode[n++] = 0xD61F0140; // br x10
+
+ _ASSERTE(n+1 == _countof(m_rgCode));
+
+ m_pTarget = GetPreStubEntryPoint();
+ m_pMethodDesc = (TADDR)pMD;
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+void StubPrecode::Fixup(DataImage *image)
+{
+ WRAPPER_NO_CONTRACT;
+
+ image->FixupFieldToNode(this, offsetof(StubPrecode, m_pTarget),
+ image->GetHelperThunk(CORINFO_HELP_EE_PRESTUB),
+ 0,
+ IMAGE_REL_BASED_PTR);
+
+ image->FixupField(this, offsetof(StubPrecode, m_pMethodDesc),
+ (void*)GetMethodDesc(),
+ 0,
+ IMAGE_REL_BASED_PTR);
+}
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+void NDirectImportPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
+{
+ WRAPPER_NO_CONTRACT;
+
+ int n = 0;
+
+ m_rgCode[n++] = 0x10000088; // adr x8, #16
+ m_rgCode[n++] = 0xA940310A; // ldp x10,x12,[x8]
+ m_rgCode[n++] = 0xD61F0140; // br x10
+
+ _ASSERTE(n+1 == _countof(m_rgCode));
+
+ m_pTarget = GetEEFuncEntryPoint(NDirectImportThunk);
+ m_pMethodDesc = (TADDR)pMD;
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+void NDirectImportPrecode::Fixup(DataImage *image)
+{
+ WRAPPER_NO_CONTRACT;
+
+ image->FixupField(this, offsetof(NDirectImportPrecode, m_pMethodDesc),
+ (void*)GetMethodDesc(),
+ 0,
+ IMAGE_REL_BASED_PTR);
+
+ image->FixupFieldToNode(this, offsetof(NDirectImportPrecode, m_pTarget),
+ image->GetHelperThunk(CORINFO_HELP_EE_PINVOKE_FIXUP),
+ 0,
+ IMAGE_REL_BASED_PTR);
+}
+#endif
+
+void FixupPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int iMethodDescChunkIndex /*=0*/, int iPrecodeChunkIndex /*=0*/)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+// Partial initialization. Used to save regrouped chunks.
+void FixupPrecode::InitForSave(int iPrecodeChunkIndex)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+void FixupPrecode::Fixup(DataImage *image, MethodDesc * pMD)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+void ThisPtrRetBufPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+
+#ifdef HAS_REMOTING_PRECODE
+
+void RemotingPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+void RemotingPrecode::Fixup(DataImage *image, ZapNode *pCodeNode)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+void CTPMethodTable::ActivatePrecodeRemotingThunk()
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+#endif // HAS_REMOTING_PRECODE
+
+
+#ifndef CROSSGEN_COMPILE
+BOOL DoesSlotCallPrestub(PCODE pCode)
+{
+ PTR_DWORD pInstr = dac_cast<PTR_DWORD>(PCODEToPINSTR(pCode));
+
+ // ARM64TODO: Check for FixupPrecode
+
+ // StubPrecode
+ if (pInstr[0] == 0x10000089 && // adr x9, #16
+ pInstr[1] == 0xA940312A && // ldp x10,x12,[x9]
+ pInstr[2] == 0xD61F0140) // br x10
+ {
+ PCODE pTarget = dac_cast<PTR_StubPrecode>(pInstr)->m_pTarget;
+
+ // ARM64TODO: implement for NGen case
+
+ return pTarget == GetPreStubEntryPoint();
+ }
+
+ return FALSE;
+
+}
+
+#endif // CROSSGEN_COMPILE
+
+#endif // !DACCESS_COMPILE
+
+void UpdateRegDisplayFromCalleeSavedRegisters(REGDISPLAY * pRD, CalleeSavedRegisters * pCalleeSaved)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ pRD->pCurrentContext->X19 = pCalleeSaved->x19;
+ pRD->pCurrentContext->X20 = pCalleeSaved->x20;
+ pRD->pCurrentContext->X21 = pCalleeSaved->x21;
+ pRD->pCurrentContext->X22 = pCalleeSaved->x22;
+ pRD->pCurrentContext->X23 = pCalleeSaved->x23;
+ pRD->pCurrentContext->X24 = pCalleeSaved->x24;
+ pRD->pCurrentContext->X25 = pCalleeSaved->x25;
+ pRD->pCurrentContext->X26 = pCalleeSaved->x26;
+ pRD->pCurrentContext->X27 = pCalleeSaved->x27;
+ pRD->pCurrentContext->X28 = pCalleeSaved->x28;
+
+ T_KNONVOLATILE_CONTEXT_POINTERS * pContextPointers = pRD->pCurrentContextPointers;
+ pContextPointers->X19 = (PDWORD64)&pCalleeSaved->x19;
+ pContextPointers->X20 = (PDWORD64)&pCalleeSaved->x20;
+ pContextPointers->X21 = (PDWORD64)&pCalleeSaved->x21;
+ pContextPointers->X22 = (PDWORD64)&pCalleeSaved->x22;
+ pContextPointers->X23 = (PDWORD64)&pCalleeSaved->x23;
+ pContextPointers->X24 = (PDWORD64)&pCalleeSaved->x24;
+ pContextPointers->X25 = (PDWORD64)&pCalleeSaved->x25;
+ pContextPointers->X26 = (PDWORD64)&pCalleeSaved->x26;
+ pContextPointers->X27 = (PDWORD64)&pCalleeSaved->x27;
+ pContextPointers->X28 = (PDWORD64)&pCalleeSaved->x28;
+}
+
+void TransitionFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+
+ pRD->IsCallerContextValid = FALSE;
+ pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
+
+ // copy the argumetn registers
+ ArgumentRegisters *pArgRegs = GetArgumentRegisters();
+ for (int i = 0; i < ARGUMENTREGISTERS_SIZE; i++)
+ pRD->pCurrentContext->X[i] = pArgRegs->x[i];
+
+ // copy the callee saved regs
+ CalleeSavedRegisters *pCalleeSaved = GetCalleeSavedRegisters();
+ UpdateRegDisplayFromCalleeSavedRegisters(pRD, pCalleeSaved);
+
+ // copy the control registers
+ pRD->pCurrentContext->Fp = pCalleeSaved->x29;
+ pRD->pCurrentContext->Lr = pCalleeSaved->x30;
+ pRD->pCurrentContext->Pc = GetReturnAddress();
+ pRD->pCurrentContext->Sp = this->GetSP();
+
+ // Finally, syncup the regdisplay with the context
+ SyncRegDisplayToCurrentContext(pRD);
+
+ LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK TransitionFrame::UpdateRegDisplay(pc:%p, sp:%p)\n", pRD->ControlPC, pRD->SP));
+
+
+}
+
+void TailCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+#ifndef DACCESS_COMPILE
+
+void TailCallFrame::InitFromContext(T_CONTEXT * pContext)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+#endif // !DACCESS_COMPILE
+
+void FaultingExceptionFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // Copy the context to regdisplay
+ memcpy(pRD->pCurrentContext, &m_ctx, sizeof(T_CONTEXT));
+
+ pRD->ControlPC = ::GetIP(&m_ctx);
+ pRD->SP = ::GetSP(&m_ctx);
+
+ // Update the integer registers in KNONVOLATILE_CONTEXT_POINTERS from
+ // the exception context we have.
+ pRD->pCurrentContextPointers->X19 = (PDWORD64)&m_ctx.X19;
+ pRD->pCurrentContextPointers->X20 = (PDWORD64)&m_ctx.X20;
+ pRD->pCurrentContextPointers->X21 = (PDWORD64)&m_ctx.X21;
+ pRD->pCurrentContextPointers->X22 = (PDWORD64)&m_ctx.X22;
+ pRD->pCurrentContextPointers->X23 = (PDWORD64)&m_ctx.X23;
+ pRD->pCurrentContextPointers->X24 = (PDWORD64)&m_ctx.X24;
+ pRD->pCurrentContextPointers->X25 = (PDWORD64)&m_ctx.X25;
+ pRD->pCurrentContextPointers->X26 = (PDWORD64)&m_ctx.X26;
+ pRD->pCurrentContextPointers->X27 = (PDWORD64)&m_ctx.X27;
+ pRD->pCurrentContextPointers->X28 = (PDWORD64)&m_ctx.X28;
+ pRD->pCurrentContextPointers->Fp = NULL;
+ pRD->pCurrentContextPointers->Lr = NULL;
+
+
+
+ pRD->IsCallerContextValid = FALSE;
+ pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
+}
+
+void InlinedCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+#ifdef PROFILING_SUPPORTED
+ PRECONDITION(CORProfilerStackSnapshotEnabled() || InlinedCallFrame::FrameHasActiveCall(this));
+#endif
+ HOST_NOCALLS;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ if (!InlinedCallFrame::FrameHasActiveCall(this))
+ {
+ LOG((LF_CORDB, LL_ERROR, "WARNING: InlinedCallFrame::UpdateRegDisplay called on inactive frame %p\n", this));
+ return;
+ }
+
+ // reset pContext; it's only valid for active (top-most) frame
+ pRD->pContext = NULL;
+
+ pRD->ControlPC = m_pCallerReturnAddress;
+ pRD->SP = (DWORD) dac_cast<TADDR>(m_pCallSiteSP);
+
+ pRD->IsCallerContextValid = FALSE;
+ pRD->IsCallerSPValid = FALSE;
+
+ pRD->pCurrentContext->Pc = m_pCallerReturnAddress;
+ pRD->pCurrentContext->Sp = pRD->SP;
+
+ // Update the frame pointer in the current context.
+ pRD->pCurrentContext->Fp = m_pCalleeSavedFP;
+ pRD->pCurrentContextPointers->Fp = &m_pCalleeSavedFP;
+
+ RETURN;
+}
+
+#ifdef FEATURE_HIJACK
+TADDR ResumableFrame::GetReturnAddressPtr(void)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return dac_cast<TADDR>(m_Regs) + offsetof(T_CONTEXT, Pc);
+}
+
+void ResumableFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ CopyMemory(pRD->pCurrentContext, m_Regs, sizeof(T_CONTEXT));
+
+ pRD->ControlPC = m_Regs->Pc;
+ pRD->SP = m_Regs->Sp;
+
+ pRD->pCurrentContextPointers->X19 = &m_Regs->X19;
+ pRD->pCurrentContextPointers->X20 = &m_Regs->X20;
+ pRD->pCurrentContextPointers->X21 = &m_Regs->X21;
+ pRD->pCurrentContextPointers->X22 = &m_Regs->X22;
+ pRD->pCurrentContextPointers->X23 = &m_Regs->X23;
+ pRD->pCurrentContextPointers->X24 = &m_Regs->X24;
+ pRD->pCurrentContextPointers->X25 = &m_Regs->X25;
+ pRD->pCurrentContextPointers->X26 = &m_Regs->X26;
+ pRD->pCurrentContextPointers->X27 = &m_Regs->X27;
+ pRD->pCurrentContextPointers->X28 = &m_Regs->X28;
+ pRD->pCurrentContextPointers->Fp = &m_Regs->Fp;
+ pRD->pCurrentContextPointers->Lr = &m_Regs->Lr;
+
+ for (int i=0; i < 18; i++)
+ pRD->volatileCurrContextPointers.X[i] = &m_Regs->X[i];
+
+ pRD->IsCallerContextValid = FALSE;
+ pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
+
+ RETURN;
+}
+
+void HijackFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!"ARM64:NYI");
+}
+#endif // FEATURE_HIJACK
+
+#if defined(FEATURE_REMOTING) && !defined(CROSSGEN_COMPILE)
+
+#ifndef DACCESS_COMPILE
+PCODE CTPMethodTable::CreateThunkForVirtualMethod(DWORD dwSlot, BYTE *startaddr)
+{
+ _ASSERTE(!"ARM64:NYI");
+ return NULL;
+}
+#endif // DACCESS_COMPILE
+
+BOOL CVirtualThunkMgr::IsThunkByASM(PCODE startaddr)
+{
+ _ASSERTE(!"ARM64:NYI");
+ return FALSE;
+}
+
+MethodDesc *CVirtualThunkMgr::GetMethodDescByASM(PCODE startaddr, MethodTable *pMT)
+{
+ _ASSERTE(!"ARM64:NYI");
+ return NULL;
+}
+
+#ifndef DACCESS_COMPILE
+
+BOOL CVirtualThunkMgr::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace)
+{
+ _ASSERTE(!"ARM64:NYI");
+ return FALSE;
+}
+#endif // !DACCESS_COMPILE
+
+#endif // FEATURE_REMOTING && !CROSSGEN_COMPILE
+
+#ifdef FEATURE_COMINTEROP
+
+void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // adr x12, label_comCallMethodDesc
+ // ldr x10, label_target
+ // br x10
+ // 4 byte padding for alignment
+ // label_target:
+ // target address (8 bytes)
+ // label_comCallMethodDesc:
+ DWORD rgCode[] = {
+ 0x100000cc,
+ 0x5800006a,
+ 0xd61f0140
+ };
+
+ BYTE *pBuffer = (BYTE*)pCOMMethod - COMMETHOD_CALL_PRESTUB_SIZE;
+
+ memcpy(pBuffer, rgCode, sizeof(rgCode));
+ *((PCODE*)(pBuffer + sizeof(rgCode) + 4)) = target;
+
+ // Ensure that the updated instructions get actually written
+ ClrFlushInstructionCache(pBuffer, COMMETHOD_CALL_PRESTUB_SIZE);
+
+ _ASSERTE(IS_ALIGNED(pBuffer + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET, sizeof(void*)) &&
+ *((PCODE*)(pBuffer + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET)) == target);
+}
+#endif // FEATURE_COMINTEROP
+
+
+void JIT_ProfilerEnterLeaveTailcallStub(UINT_PTR ProfilerHandle)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+void JIT_TailCall()
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+extern "C" void * ClrFlsGetBlock()
+{
+ _ASSERTE(!"ARM64:NYI");
+ return NULL;
+}
+
+void InitJITHelpers1()
+{
+ return;
+}
+
+EXTERN_C void __stdcall ProfileEnterNaked(UINT_PTR clientData)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+EXTERN_C void __stdcall ProfileLeaveNaked(UINT_PTR clientData)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+EXTERN_C void __stdcall ProfileTailcallNaked(UINT_PTR clientData)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(T_DISPATCHER_CONTEXT * pDispatcherContext)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ DWORD64 stackSlot = pDispatcherContext->EstablisherFrame + REDIRECTSTUB_SP_OFFSET_CONTEXT;
+ PTR_PTR_CONTEXT ppContext = dac_cast<PTR_PTR_CONTEXT>((TADDR)stackSlot);
+ return *ppContext;
+}
+
+PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(T_CONTEXT * pContext)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ DWORD64 stackSlot = pContext->Sp + REDIRECTSTUB_SP_OFFSET_CONTEXT;
+ PTR_PTR_CONTEXT ppContext = dac_cast<PTR_PTR_CONTEXT>((TADDR)stackSlot);
+ return *ppContext;
+}
+
+void RedirectForThreadAbort()
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+#if !defined(DACCESS_COMPILE) && !defined (CROSSGEN_COMPILE)
+FaultingExceptionFrame *GetFrameFromRedirectedStubStackFrame (DISPATCHER_CONTEXT *pDispatcherContext)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (FaultingExceptionFrame*)((TADDR)pDispatcherContext->ContextRecord->X19);
+}
+
+
+BOOL
+AdjustContextForVirtualStub(
+ EXCEPTION_RECORD *pExceptionRecord,
+ CONTEXT *pContext)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ Thread * pThread = GetThread();
+
+ // We may not have a managed thread object. Example is an AV on the helper thread.
+ // (perhaps during StubManager::IsStub)
+ if (pThread == NULL)
+ {
+ return FALSE;
+ }
+
+ PCODE f_IP = GetIP(pContext);
+
+ VirtualCallStubManager::StubKind sk;
+ VirtualCallStubManager::FindStubManager(f_IP, &sk);
+
+ if (sk == VirtualCallStubManager::SK_DISPATCH)
+ {
+ if (*PTR_DWORD(f_IP) != DISPATCH_STUB_FIRST_DWORD)
+ {
+ _ASSERTE(!"AV in DispatchStub at unknown instruction");
+ return FALSE;
+ }
+ }
+ else
+ if (sk == VirtualCallStubManager::SK_RESOLVE)
+ {
+ if (*PTR_DWORD(f_IP) != RESOLVE_STUB_FIRST_DWORD)
+ {
+ _ASSERTE(!"AV in ResolveStub at unknown instruction");
+ return FALSE;
+ }
+ }
+ else
+ {
+ return FALSE;
+ }
+
+ PCODE callsite = GetAdjustedCallAddress(GetLR(pContext));
+
+ // Lr must already have been saved before calling so it should not be necessary to restore Lr
+
+ pExceptionRecord->ExceptionAddress = (PVOID)callsite;
+ SetIP(pContext, callsite);
+
+ return TRUE;
+}
+#endif // !(DACCESS_COMPILE && CROSSGEN_COMPILE)
+
+bool IsInstrModifyFault(PEXCEPTION_POINTERS pExceptionInfo)
+{
+ return false;
+}
+
+extern "C" {
+
+void FuncEvalHijack(void)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+void ExceptionHijack(void)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+void ExceptionHijackEnd(void)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+void RedirectedHandledJITCaseForGCThreadControl_Stub(void)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+void RedirectedHandledJITCaseForDbgThreadControl_Stub(void)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+void RedirectedHandledJITCaseForUserSuspend_Stub(void)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+void RedirectedHandledJITCaseForYieldTask_Stub(void)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+void RedirectedHandledJITCaseForGCThreadControl_StubEnd()
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+void RedirectedHandledJITCaseForDbgThreadControl_StubEnd()
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+void RedirectedHandledJITCaseForUserSuspend_StubEnd()
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+void RedirectedHandledJITCaseForYieldTask_StubEnd()
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+
+VOID OnHijackObjectTripThread()
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+VOID OnHijackScalarTripThread()
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+VOID OnHijackInteriorPointerTripThread()
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+};
+
+#ifdef FEATURE_COMINTEROP
+extern "C" void GenericComPlusCallStub(void)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_PREJIT
+extern "C" void StubDispatchFixupStub()
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+#endif
+
+//ARM64TODO: check if this should be amd64 and win64
+#ifdef _WIN64
+extern "C" void PInvokeStubForHostInner(DWORD dwStackSize, LPVOID pStackFrame, LPVOID pTarget)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+#endif
+
+void PInvokeStubForHost(void)
+{
+ // Hosted P/Invoke is not implemented on ARM64
+ UNREACHABLE();
+}
+
+UMEntryThunk * UMEntryThunk::Decode(void *pCallback)
+{
+ _ASSERTE(offsetof(UMEntryThunkCode, m_code) == 0);
+ UMEntryThunkCode * pCode = (UMEntryThunkCode*)pCallback;
+
+ // We may be called with an unmanaged external code pointer instead. So if it doesn't look like one of our
+ // stubs (see UMEntryThunkCode::Encode below) then we'll return NULL. Luckily in these scenarios our
+ // caller will perform a hash lookup on successful return to verify our result in case random unmanaged
+ // code happens to look like ours.
+ if ((pCode->m_code[0] == 0x1000008c) &&
+ (pCode->m_code[1] == 0xa9403190) &&
+ (pCode->m_code[2] == 0xd61f0200))
+ {
+ return (UMEntryThunk*)pCode->m_pvSecretParam;
+ }
+
+ return NULL;
+}
+
+void UMEntryThunkCode::Encode(BYTE* pTargetCode, void* pvSecretParam)
+{
+ // adr x12, _label
+ // ldp x16, x12, [x12]
+ // br x16
+ // 4bytes padding
+ // _label
+ // m_pTargetCode data
+ // m_pvSecretParam data
+
+ m_code[0] = 0x1000008c;
+ m_code[1] = 0xa9403190;
+ m_code[2] = 0xd61f0200;
+
+
+ m_pTargetCode = (TADDR)pTargetCode;
+ m_pvSecretParam = (TADDR)pvSecretParam;
+
+ FlushInstructionCache(GetCurrentProcess(),&m_code,sizeof(m_code));
+}
+
+
+#ifdef PROFILING_SUPPORTED
+#include "proftoeeinterfaceimpl.h"
+
+extern UINT_PTR ProfileGetIPFromPlatformSpecificHandle(void * handle)
+{
+ _ASSERTE(!"ARM64:NYI");
+ return NULL;
+}
+
+extern void ProfileSetFunctionIDInPlatformSpecificHandle(void * pPlatformSpecificHandle, FunctionID functionID)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+ProfileArgIterator::ProfileArgIterator(MetaSig * pMetaSig, void* platformSpecificHandle)
+ : m_argIterator(pMetaSig)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+ProfileArgIterator::~ProfileArgIterator()
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+LPVOID ProfileArgIterator::GetNextArgAddr()
+{
+ _ASSERTE(!"ARM64:NYI");
+ return NULL;
+}
+
+LPVOID ProfileArgIterator::GetHiddenArgValue(void)
+{
+ _ASSERTE(!"ARM64:NYI");
+ return NULL;
+}
+
+LPVOID ProfileArgIterator::GetThis(void)
+{
+ _ASSERTE(!"ARM64:NYI");
+ return NULL;
+}
+
+LPVOID ProfileArgIterator::GetReturnBufferAddr(void)
+{
+ _ASSERTE(!"ARM64:NYI");
+ return NULL;
+}
+#endif
+
+#if !defined(DACCESS_COMPILE)
+VOID ResetCurrentContext()
+{
+ LIMITED_METHOD_CONTRACT;
+}
+#endif
+
+extern "C" void ResolveWorkerChainLookupAsmStub()
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+extern "C" void StubDispatchFixupPatchLabel()
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+LONG CLRNoCatchHandler(EXCEPTION_POINTERS* pExceptionInfo, PVOID pv)
+{
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+
+extern "C" void setFPReturn(int fpSize, INT64 retVal)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+extern "C" void getFPReturn(int fpSize, INT64 *retval)
+{
+ _ASSERTE(!"ARM64:NYI");
+}
+
+void StompWriteBarrierEphemeral()
+{
+ //ARM64TODO: implement this
+ return;
+}
+
+void StompWriteBarrierResize(BOOL bReqUpperBoundsCheck)
+{
+ //ARM64TODO: implement this
+ return;
+}
+
+#ifdef DACCESS_COMPILE
+BOOL GetAnyThunkTarget (T_CONTEXT *pctx, TADDR *pTarget, TADDR *pTargetMethodDesc)
+{
+ _ASSERTE(!"ARM64:NYI");
+ return FALSE;
+}
+#endif // DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+// ----------------------------------------------------------------
+// StubLinkerCPU methods
+// ----------------------------------------------------------------
+
+void StubLinkerCPU::EmitMovConstant(IntReg target, UINT64 constant)
+{
+#define WORD_MASK 0xFFFF
+
+ // Move the 64bit constant in 4 chunks (of 16 bits).
+ // MOVZ Rd, <1st word>, LSL 0
+ // MOVK Rd, <2nd word>, LSL 1
+ // MOVK Rd, <3nd word>, LSL 2
+ // MOVK Rd, <4nd word>, LSL 3
+ WORD word = (WORD) (constant & WORD_MASK);
+ Emit32((DWORD)(0xD2<<24 | (4)<<21 | word<<5 | target));
+ if (!(constant & 0xFFFF)) return;
+
+ word = (WORD) ((constant>>16) & WORD_MASK);
+ if (word != 0)
+ Emit32((DWORD)(0xF2<<24 | (5)<<21 | word<<5 | target));
+ if (!(constant & 0xFFFFFFFF)) return;
+
+ word = (WORD) ((constant>>32) & WORD_MASK);
+ if (word != 0)
+ Emit32((DWORD)(0xF2<<24 | (6)<<21 | word<<5 | target));
+ if (!(constant & 0xFFFFFFFFFFFF)) return;
+
+ word = (WORD) ((constant>>48) & WORD_MASK);
+ if (word != 0)
+ Emit32((DWORD)(0xF2<<24 | (7)<<21 | word<<5 | target));
+#undef WORD_MASK
+}
+
+void StubLinkerCPU::EmitCmpImm(IntReg reg, int imm)
+{
+
+ if (0 <= imm && imm < 4096)
+ {
+ // CMP <Xn|SP>, #<imm>{, <shift>}
+ // Encoding: 1|1|1|1|0|0|0|0|shift(2)|imm(12)|Rn|Rt
+ // Where I encode shift as 0 and Rt has to be 1F
+ Emit32((DWORD) ((0xF1<<24) | ((0xFFF & imm)<<10) | (reg<<5) | (0x1F)) );
+
+ }
+ else
+ _ASSERTE(!"ARM64: NYI");
+}
+
+void StubLinkerCPU::EmitCmpReg(IntReg Xn, IntReg Xm)
+{
+
+ // Encoding for CMP (shifted register)
+ // sf|1|1|0|1|0|1|1|shift(2)|0|Xm(5)|imm(6)|Xn(5)|XZR(5)
+ // where
+ // sf = 1 for 64-bit variant,
+ // shift will be set to 00 (LSL)
+ // imm(6), which is the shift amount, will be set to 0
+
+ Emit32((DWORD) (0xEB<<24) | (Xm<<16) | (Xn<<5) | 0x1F);
+}
+
+void StubLinkerCPU::EmitCondFlagJump(CodeLabel * target, UINT cond)
+{
+ WRAPPER_NO_CONTRACT;
+ EmitLabelRef(target, reinterpret_cast<ConditionalBranchInstructionFormat&>(gConditionalBranchIF), cond);
+}
+
+void StubLinkerCPU::EmitJumpRegister(IntReg regTarget)
+{
+ // br regTarget
+ Emit32((DWORD) (0x3587C0<<10 | regTarget<<5));
+}
+
+void StubLinkerCPU::EmitProlog(unsigned short cIntRegArgs, unsigned short cVecRegArgs, unsigned short cCalleeSavedRegs, unsigned short cbStackSpace)
+{
+
+ _ASSERTE(!m_fProlog);
+
+ unsigned short numberOfEntriesOnStack = 2 + cIntRegArgs + cVecRegArgs + cCalleeSavedRegs; // 2 for fp, lr
+
+ // Stack needs to be 16 byte (2 qword) aligned. Compute the required padding before saving it
+ unsigned short totalPaddedFrameSize = static_cast<unsigned short>(ALIGN_UP(cbStackSpace + numberOfEntriesOnStack *sizeof(void*), 2*sizeof(void*)));
+ // The padding is going to be applied to the local stack
+ cbStackSpace = totalPaddedFrameSize - numberOfEntriesOnStack *sizeof(void*);
+
+ // Record the parameters of this prolog so that we can generate a matching epilog and unwind info.
+ DescribeProlog(cIntRegArgs, cVecRegArgs, cCalleeSavedRegs, cbStackSpace);
+
+
+
+ // N.B Despite the range of a jump with a sub sp is 4KB, we're limiting to 504 to save from emiting right prolog that's
+ // expressable in unwind codes efficiently. The largest offset in typical unwindinfo encodings that we use is 504.
+ // so allocations larger than 504 bytes would require setting the SP in multiple strides, which would complicate both
+ // prolog and epilog generation as well as unwindinfo generation.
+ _ASSERTE((totalPaddedFrameSize <= 504) && "NYI:ARM64 Implement StubLinker prologs with larger than 504 bytes of frame size");
+ if (totalPaddedFrameSize > 504)
+ COMPlusThrow(kNotSupportedException);
+
+ // Here is how the stack would look like (Stack grows up)
+ // [Low Address]
+ // +------------+
+ // SP -> | | <-+
+ // : : | Stack Frame, (i.e outgoing arguments) including padding
+ // | | <-+
+ // +------------+
+ // | FP |
+ // +------------+
+ // | LR |
+ // +------------+
+ // | X19 | <-+
+ // +------------+ |
+ // : : | Callee-saved registers
+ // +------------+ |
+ // | X28 | <-+
+ // +------------+
+ // | V0 | <-+
+ // +------------+ |
+ // : : | Vec Args
+ // +------------+ |
+ // | V7 | <-+
+ // +------------+
+ // | X0 | <-+
+ // +------------+ |
+ // : : | Int Args
+ // +------------+ |
+ // | X7 | <-+
+ // +------------+
+ // Old SP -> |[Stack Args]|
+ // [High Address]
+
+
+
+ // Regarding the order of operations in the prolog and epilog;
+ // If the prolog and the epilog matches each other we can simplify emitting the unwind codes and save a few
+ // bytes of unwind codes by making prolog and epilog share the same unwind codes.
+ // In order to do that we need to make the epilog be the reverse of the prolog.
+ // But we wouldn't want to add restoring of the argument registers as that's completely unnecessary.
+ // Besides, saving argument registers cannot be expressed by the unwind code encodings.
+ // So, we'll push saving the argument registers to the very last in the prolog, skip restoring it in epilog,
+ // and also skip reporting it to the OS.
+ //
+ // Another bit that we can save is resetting the frame pointer.
+ // This is not necessary when the SP doesn't get modified beyond prolog and epilog. (i.e no alloca/localloc)
+ // And in that case we don't need to report setting up the FP either.
+
+
+
+ // 1. Relocate SP
+ EmitSubImm(RegSp, RegSp, totalPaddedFrameSize);
+
+ unsigned cbOffset = 2*sizeof(void*) + cbStackSpace; // 2 is for fp,lr
+
+ // 2. Store callee-saved registers
+ _ASSERTE(cCalleeSavedRegs <= 10);
+ for (unsigned short i=0; i<(cCalleeSavedRegs/2)*2; i+=2)
+ EmitLoadStoreRegPairImm(eSTORE, IntReg(19+i), IntReg(19+i+1), RegSp, cbOffset + i*sizeof(void*));
+ if ((cCalleeSavedRegs %2) ==1)
+ EmitLoadStoreRegImm(eSTORE, IntReg(cCalleeSavedRegs-1), RegSp, cbOffset + (cCalleeSavedRegs-1)*sizeof(void*));
+
+ // 3. Store FP/LR
+ EmitLoadStoreRegPairImm(eSTORE, RegFp, RegLr, RegSp, cbStackSpace);
+
+ // 4. Set the frame pointer
+ EmitMovReg(RegFp, RegSp);
+
+ // 5. Store floating point argument registers
+ cbOffset += cCalleeSavedRegs*sizeof(void*);
+ _ASSERTE(cVecRegArgs <= 8);
+ for (unsigned short i=0; i<(cVecRegArgs/2)*2; i+=2)
+ EmitLoadStoreRegPairImm(eSTORE, VecReg(i), VecReg(i+1), RegSp, cbOffset + i*sizeof(void*));
+ if ((cVecRegArgs % 2) == 1)
+ EmitLoadStoreRegImm(eSTORE, VecReg(cVecRegArgs-1), RegSp, cbOffset + (cVecRegArgs-1)*sizeof(void*));
+
+ // 6. Store int argument registers
+ cbOffset += cVecRegArgs*sizeof(void*);
+ _ASSERTE(cIntRegArgs <= 8);
+ for (unsigned short i=0 ; i<(cIntRegArgs/2)*2; i+=2)
+ EmitLoadStoreRegPairImm(eSTORE, IntReg(i), IntReg(i+1), RegSp, cbOffset + i*sizeof(void*));
+ if ((cIntRegArgs % 2) == 1)
+ EmitLoadStoreRegImm(eSTORE,IntReg(cIntRegArgs-1), RegSp, cbOffset + (cIntRegArgs-1)*sizeof(void*));
+}
+
+void StubLinkerCPU::EmitEpilog()
+{
+ _ASSERTE(m_fProlog);
+
+ // 6. Restore int argument registers
+ // nop: We don't need to. They are scratch registers
+
+ // 5. Restore floating point argument registers
+ // nop: We don't need to. They are scratch registers
+
+ // 4. Restore the SP from FP
+ // N.B. We're assuming that the stublinker stubs doesn't do alloca, hence nop
+
+ // 3. Restore FP/LR
+ EmitLoadStoreRegPairImm(eLOAD, RegFp, RegLr, RegSp, m_cbStackSpace);
+
+ // 2. restore the calleeSavedRegisters
+ unsigned cbOffset = 2*sizeof(void*) + m_cbStackSpace; // 2 is for fp,lr
+ if ((m_cCalleeSavedRegs %2) ==1)
+ EmitLoadStoreRegImm(eLOAD, IntReg(m_cCalleeSavedRegs-1), RegSp, cbOffset + (m_cCalleeSavedRegs-1)*sizeof(void*));
+ for (int i=(m_cCalleeSavedRegs/2)*2-2; i>=0; i-=2)
+ EmitLoadStoreRegPairImm(eLOAD, IntReg(19+i), IntReg(19+i+1), RegSp, cbOffset + i*sizeof(void*));
+
+ // 1. Restore SP
+ EmitAddImm(RegSp, RegSp, GetStackFrameSize());
+ EmitRet(RegLr);
+}
+
+void StubLinkerCPU::EmitRet(IntReg Xn)
+{
+ // Encoding: 1101011001011111000000| Rn |00000
+ Emit32((DWORD)(0xD65F0000 | (Xn << 5)));
+}
+
+void StubLinkerCPU::EmitLoadStoreRegPairImm(DWORD flags, IntReg Xt1, IntReg Xt2, IntReg Xn, int offset)
+{
+ EmitLoadStoreRegPairImm(flags, (int)Xt1, (int)Xt2, Xn, offset, FALSE);
+}
+
+void StubLinkerCPU::EmitLoadStoreRegPairImm(DWORD flags, VecReg Vt1, VecReg Vt2, IntReg Xn, int offset)
+{
+ EmitLoadStoreRegPairImm(flags, (int)Vt1, (int)Vt2, Xn, offset, TRUE);
+}
+
+void StubLinkerCPU::EmitLoadStoreRegPairImm(DWORD flags, int regNum1, int regNum2, IntReg Xn, int offset, BOOL isVec)
+{
+ // Encoding:
+ // [opc(2)] | 1 | 0 | 1 | [IsVec(1)] | 0 | [!postIndex(1)] | [writeBack(1)] | [isLoad(1)] | [imm(7)] | [Xt2(5)] | [Xn(5)] | [Xt1(5)]
+ // where opc=01 and if isVec==1, opc=10 otherwise
+
+ BOOL isLoad = flags & 1;
+ BOOL writeBack = flags & 2;
+ BOOL postIndex = flags & 4;
+ _ASSERTE((-512 <= offset) && (offset <= 504));
+ _ASSERTE((offset & 7) == 0);
+ int opc = isVec ? 1 : 2;
+ Emit32((DWORD) ( (opc<<30) | // opc
+ (0x5<<27) |
+ (!!isVec<<26) |
+ (!postIndex<<24) |
+ (!!writeBack<<23) |
+ (!!isLoad<<22) |
+ ((0x7F & (offset >> 3)) << 15) |
+ (regNum2 << 10) |
+ (Xn << 5) |
+ (regNum1)
+ ));
+
+}
+
+
+void StubLinkerCPU::EmitLoadStoreRegImm(DWORD flags, IntReg Xt, IntReg Xn, int offset)
+{
+ EmitLoadStoreRegImm(flags, (int)Xt, Xn, offset, FALSE);
+}
+void StubLinkerCPU::EmitLoadStoreRegImm(DWORD flags, VecReg Vt, IntReg Xn, int offset)
+{
+ EmitLoadStoreRegImm(flags, (int)Vt, Xn, offset, TRUE);
+}
+
+void StubLinkerCPU::EmitLoadStoreRegImm(DWORD flags, int regNum, IntReg Xn, int offset, BOOL isVec)
+{
+ // Encoding:
+ // wb=1 : [size(2)=11] | 1 | 1 | 1 | [IsVec(1)] | 0 | [!writeBack(1)] | 0 | [isLoad(1)] | 0 | [imm(7)] | [!postIndex(1)] | [Xn(5)] | [Xt(5)]
+ // wb=0 : [size(2)=11] | 1 | 1 | 1 | [IsVec(1)] | 0 | [!writeBack(1)] | 0 | [isLoad(1)] | [ imm(12) ] | [Xn(5)] | [Xt(5)]
+ // where IsVec=0 for IntReg, 1 for VecReg
+
+ BOOL isLoad = flags & 1;
+ BOOL writeBack = flags & 2;
+ BOOL postIndex = flags & 4;
+ if (writeBack)
+ {
+ _ASSERTE(-256 <= offset && offset <= 255);
+ Emit32((DWORD) ( (0x1F<<27) |
+ (!!isVec<<26) |
+ (!writeBack<<24) |
+ (!!isLoad<<22) |
+ ((0x1FF & offset) << 12) |
+ (!postIndex<<11) |
+ (0x1<<10) |
+ (Xn<<5) |
+ (regNum))
+ );
+ }
+ else
+ {
+ _ASSERTE((0 <= offset) && (offset <= 32760));
+ _ASSERTE((offset & 7) == 0);
+ Emit32((DWORD) ( (0x1F<<27) |
+ (!!isVec<<26) |
+ (!writeBack<<24) |
+ (!!isLoad<<22) |
+ ((0xFFF & (offset >> 3)) << 10) |
+ (Xn<<5) |
+ (regNum))
+ );
+ }
+
+
+
+}
+
+// Load Register (Register Offset)
+void StubLinkerCPU::EmitLoadRegReg(IntReg Xt, IntReg Xn, IntReg Xm, DWORD option)
+{
+ Emit32((DWORD) ( (0xF8600800) |
+ (option << 12) |
+ (Xm << 16) |
+ (Xn << 5) |
+ (Xt)
+ ));
+
+}
+
+void StubLinkerCPU::EmitMovReg(IntReg Xd, IntReg Xm)
+{
+ if (Xd == RegSp || Xm == RegSp)
+ {
+ // This is a different encoding than the regular MOV (register) below.
+ // Note that RegSp and RegZero share the same encoding.
+ // TODO: check that the intention is not mov Xd, XZR
+ // MOV <Xd|SP>, <Xn|SP>
+ // which is equivalent to
+ // ADD <Xd|SP>, <Xn|SP>, #0
+ // Encoding: sf|0|0|1|0|0|0|1|shift(2)|imm(12)|Xn|Xd
+ // where
+ // sf = 1 -> 64-bit variant
+ // shift and imm12 are both 0
+ Emit32((DWORD) (0x91000000 | (Xm << 5) | Xd));
+ }
+ else
+ {
+ // MOV <Xd>, <Xm>
+ // which is eqivalent to
+ // ORR <Xd>. XZR, <Xm>
+ // Encoding: sf|0|1|0|1|0|1|0|shift(2)|0|Xm|imm(6)|Xn|Xd
+ // where
+ // sf = 1 -> 64-bit variant
+ // shift and imm6 are both 0
+ // Xn = XZR
+ Emit32((DWORD) ( (0xAA << 24) | (Xm << 16) | (0x1F << 5) | Xd));
+ }
+}
+
+void StubLinkerCPU::EmitSubImm(IntReg Xd, IntReg Xn, unsigned int value)
+{
+ // sub <Xd|SP>, <Xn|SP>, #imm{, <shift>}
+ // Encoding: sf|1|0|1|0|0|0|1|shift(2)|imm(12)|Rn|Rd
+ // where <shift> is encoded as LSL #0 (no shift) when shift=00 and LSL #12 when shift=01. (No shift in this impl)
+ // imm(12) is an unsigned immediate in the range of 0 to 4095
+ // Rn and Rd are both encoded as SP=31
+ // sf = 1 for 64-bit variant
+ _ASSERTE((0 <= value) && (value <= 4095));
+ Emit32((DWORD) ((0xD1 << 24) | (value << 10) | (Xd << 5) | Xn));
+
+}
+
+void StubLinkerCPU::EmitAddImm(IntReg Xd, IntReg Xn, unsigned int value)
+{
+ // add SP, SP, #imm{, <shift>}
+ // Encoding: sf|0|0|1|0|0|0|1|shift(2)|imm(12)|Rn|Rd
+ // where <shift> is encoded as LSL #0 (no shift) when shift=00 and LSL #12 when shift=01. (No shift in this impl)
+ // imm(12) is an unsigned immediate in the range of 0 to 4095
+ // Rn and Rd are both encoded as SP=31
+ // sf = 1 for 64-bit variant
+ _ASSERTE((0 <= value) && (value <= 4095));
+ Emit32((DWORD) ((0x91 << 24) | (value << 10) | (Xn << 5) | Xd));
+}
+
+void StubLinkerCPU::EmitCallRegister(IntReg reg)
+{
+ // blr Xn
+ // Encoding: 1|1|0|1|0|1|1|0|0|0|1|1|1|1|1|1|0|0|0|0|0|Rn|0|0|0|0|0
+ Emit32((DWORD) (0xD63F0000 | (reg << 5)));
+}
+
+void StubLinkerCPU::Init()
+{
+ new (gConditionalBranchIF) ConditionalBranchInstructionFormat();
+ new (gBranchIF) BranchInstructionFormat();
+ new (gLoadFromLabelIF) LoadFromLabelInstructionFormat();
+}
+
+// Emits code to adjust arguments for static delegate target.
+VOID StubLinkerCPU::EmitShuffleThunk(ShuffleEntry *pShuffleEntryArray)
+{
+ // On entry x0 holds the delegate instance. Look up the real target address stored in the MethodPtrAux
+ // field and save it in x9. Tailcall to the target method after re-arranging the arguments
+ // ldr x9, [x0, #offsetof(DelegateObject, _methodPtrAux)]
+ EmitLoadStoreRegImm(eLOAD, IntReg(9), IntReg(0), DelegateObject::GetOffsetOfMethodPtrAux());
+
+ for (ShuffleEntry* pEntry = pShuffleEntryArray; pEntry->srcofs != ShuffleEntry::SENTINEL; pEntry++)
+ {
+ if (pEntry->srcofs & ShuffleEntry::REGMASK)
+ {
+ // If source is present in register then destination must also be a register
+ _ASSERTE(pEntry->dstofs & ShuffleEntry::REGMASK);
+
+ EmitMovReg(IntReg(pEntry->dstofs & ShuffleEntry::OFSMASK), IntReg(pEntry->srcofs & ShuffleEntry::OFSMASK));
+ }
+ else if (pEntry->dstofs & ShuffleEntry::REGMASK)
+ {
+ // source must be on the stack
+ _ASSERTE(!(pEntry->srcofs & ShuffleEntry::REGMASK));
+
+ EmitLoadStoreRegImm(eLOAD, IntReg(pEntry->dstofs & ShuffleEntry::OFSMASK), RegSp, pEntry->srcofs * sizeof(void*));
+ }
+ else
+ {
+ // source must be on the stack
+ _ASSERTE(!(pEntry->srcofs & ShuffleEntry::REGMASK));
+
+ // dest must be on the stack
+ _ASSERTE(!(pEntry->dstofs & ShuffleEntry::REGMASK));
+
+ EmitLoadStoreRegImm(eLOAD, IntReg(8), RegSp, pEntry->srcofs * sizeof(void*));
+ EmitLoadStoreRegImm(eSTORE, IntReg(8), RegSp, pEntry->dstofs * sizeof(void*));
+ }
+ }
+
+ // Tailcall to target
+ // br x9
+ EmitJumpRegister(IntReg(9));
+}
+
+void StubLinkerCPU::EmitCallLabel(CodeLabel *target, BOOL fTailCall, BOOL fIndirect)
+{
+ BranchInstructionFormat::VariationCodes variationCode = BranchInstructionFormat::VariationCodes::BIF_VAR_JUMP;
+ if (!fTailCall)
+ variationCode = static_cast<BranchInstructionFormat::VariationCodes>(variationCode | BranchInstructionFormat::VariationCodes::BIF_VAR_CALL);
+ if (fIndirect)
+ variationCode = static_cast<BranchInstructionFormat::VariationCodes>(variationCode | BranchInstructionFormat::VariationCodes::BIF_VAR_INDIRECT);
+
+ EmitLabelRef(target, reinterpret_cast<BranchInstructionFormat&>(gBranchIF), (UINT)variationCode);
+
+}
+
+void StubLinkerCPU::EmitCallManagedMethod(MethodDesc *pMD, BOOL fTailCall)
+{
+ // Use direct call if possible.
+ if (pMD->HasStableEntryPoint())
+ {
+ EmitCallLabel(NewExternalCodeLabel((LPVOID)pMD->GetStableEntryPoint()), fTailCall, FALSE);
+ }
+ else
+ {
+ EmitCallLabel(NewExternalCodeLabel((LPVOID)pMD->GetAddrOfSlot()), fTailCall, TRUE);
+ }
+}
+
+EXTERN_C UINT32 _tls_index;
+void StubLinkerCPU::EmitGetThreadInlined(IntReg Xt)
+{
+#ifdef FEATURE_IMPLICIT_TLS
+ // Trashes x8.
+ IntReg X8 = IntReg(8);
+ _ASSERTE(Xt != X8);
+
+ // Load the _tls_index
+ EmitLabelRef(NewExternalCodeLabel((LPVOID)&_tls_index), reinterpret_cast<LoadFromLabelInstructionFormat&>(gLoadFromLabelIF), X8);
+
+ // Load Teb->ThreadLocalStoragePointer into x8
+ EmitLoadStoreRegImm(eLOAD, Xt, IntReg(18), offsetof(_TEB, ThreadLocalStoragePointer));
+
+ // index it with _tls_index, i.e Teb->ThreadLocalStoragePointer[_tls_index].
+ // This will give us the TLS section for the module on this thread's context
+ EmitLoadRegReg(Xt, Xt, X8, eLSL);
+
+ // read the Thread* from TLS section
+ EmitAddImm(Xt, Xt, OFFSETOF__TLS__tls_CurrentThread);
+ EmitLoadStoreRegImm(eLOAD, Xt, Xt, 0);
+#else
+ _ASSERTE(!"NYI:StubLinkerCPU::EmitGetThreadInlined");
+#endif
+
+}
+
+#ifndef CROSSGEN_COMPILE
+
+void StubLinkerCPU::EmitUnboxMethodStub(MethodDesc *pMD)
+{
+ _ASSERTE(!pMD->RequiresInstMethodDescArg());
+
+ // Address of the value type is address of the boxed instance plus sizeof(MethodDesc*).
+ // add x0, #sizeof(MethodDesc*)
+ EmitAddImm(IntReg(0), IntReg(0), sizeof(MethodDesc*));
+
+ // Tail call the real target.
+ EmitCallManagedMethod(pMD, TRUE /* tail call */);
+}
+
+#endif // CROSSGEN_COMPILE
+
+#endif // #ifndef DACCESS_COMPILE
diff --git a/src/vm/arm64/virtualcallstubcpu.hpp b/src/vm/arm64/virtualcallstubcpu.hpp
new file mode 100644
index 0000000000..9210a0b6fc
--- /dev/null
+++ b/src/vm/arm64/virtualcallstubcpu.hpp
@@ -0,0 +1,473 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// VirtualCallStubCpu.hpp
+//
+#ifndef _VIRTUAL_CALL_STUB_ARM_H
+#define _VIRTUAL_CALL_STUB_ARM_H
+
+#define DISPATCH_STUB_FIRST_DWORD 0xf9400008
+#define RESOLVE_STUB_FIRST_DWORD 0xF940000C
+
+struct ARM64EncodeHelpers
+{
+ inline static DWORD ADR_PATCH(DWORD offset)
+ {
+ DWORD immLO = (offset & 0x03)<<29 ;
+
+ if (immLO ==0 )
+ return (offset<<3);
+ else
+ return immLO<<29 | (offset -immLO)<<3;
+ }
+
+};
+
+#define USES_LOOKUP_STUBS 1
+
+struct LookupStub
+{
+ inline PCODE entryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)&_entryPoint[0]; }
+ inline size_t token() { LIMITED_METHOD_CONTRACT; return _token; }
+ inline size_t size() { LIMITED_METHOD_CONTRACT; return sizeof(LookupStub); }
+private :
+ friend struct LookupHolder;
+
+ DWORD _entryPoint[4];
+ PCODE _resolveWorkerTarget;
+ size_t _token;
+};
+
+struct LookupHolder
+{
+private:
+ LookupStub _stub;
+public:
+ static void InitializeStatic() { }
+
+ void Initialize(PCODE resolveWorkerTarget, size_t dispatchToken)
+ {
+ // adr x9, _resolveWorkerTarget
+ // ldp x10, x12, [x9]
+ // br x10
+ // _resolveWorkerTarget
+ // _token
+ _stub._entryPoint[0] = 0x10000089;
+ _stub._entryPoint[1] = 0xa940312a;
+ _stub._entryPoint[2] = 0xd61f0140;
+ //4th element of _entryPoint array is padding for 8byte alignment
+ _stub._resolveWorkerTarget = resolveWorkerTarget;
+ _stub._token = dispatchToken;
+ }
+
+ LookupStub* stub() { LIMITED_METHOD_CONTRACT; return &_stub; }
+ static LookupHolder* FromLookupEntry(PCODE lookupEntry)
+ {
+ return (LookupHolder*) ( lookupEntry - offsetof(LookupHolder, _stub) - offsetof(LookupStub, _entryPoint) );
+ }
+};
+
+struct DispatchStub
+{
+ inline PCODE entryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)&_entryPoint[0]; }
+
+ inline size_t expectedMT() { LIMITED_METHOD_CONTRACT; return _expectedMT; }
+ inline PCODE implTarget() { LIMITED_METHOD_CONTRACT; return _implTarget; }
+ inline PCODE failTarget() { LIMITED_METHOD_CONTRACT; return _failTarget; }
+ inline size_t size() { LIMITED_METHOD_CONTRACT; return sizeof(DispatchStub); }
+
+private:
+ friend struct DispatchHolder;
+
+ DWORD _entryPoint[8];
+ size_t _expectedMT;
+ PCODE _implTarget;
+ PCODE _failTarget;
+};
+
+struct DispatchHolder
+{
+ static void InitializeStatic() { }
+
+ void Initialize(PCODE implTarget, PCODE failTarget, size_t expectedMT)
+ {
+ // ldr x8, [x0] ; methodTable from object in x0
+ // adr x9, _expectedMT ; _expectedMT is at offset 28 from pc
+ // ldp x10, x12, [x9] ; x10 = _expectedMT & x12 = _implTarget
+ // cmp x8, x10
+ // bne failLabel
+ // br x12
+ // failLabel
+ // ldr x9, _failTarget ; _failTarget is at offset 24 from pc
+ // br x9
+ // _expectedMT
+ // _implTarget
+ // _failTarget
+
+ _stub._entryPoint[0] = DISPATCH_STUB_FIRST_DWORD; // 0xf9400008
+ _stub._entryPoint[1] = 0x100000e9;
+ _stub._entryPoint[2] = 0xa940312a;
+ _stub._entryPoint[3] = 0xeb0a011f;
+ _stub._entryPoint[4] = 0x54000041;
+ _stub._entryPoint[5] = 0xd61f0180;
+ _stub._entryPoint[6] = 0x580000c9;
+ _stub._entryPoint[7] = 0xd61f0120;
+
+ _stub._expectedMT = expectedMT;
+ _stub._implTarget = implTarget;
+ _stub._failTarget = failTarget;
+ }
+
+ DispatchStub* stub() { LIMITED_METHOD_CONTRACT; return &_stub; }
+
+ static DispatchHolder* FromDispatchEntry(PCODE dispatchEntry)
+ {
+ LIMITED_METHOD_CONTRACT;
+ DispatchHolder* dispatchHolder = (DispatchHolder*) ( dispatchEntry - offsetof(DispatchHolder, _stub) - offsetof(DispatchStub, _entryPoint) );
+ return dispatchHolder;
+ }
+
+private:
+ DispatchStub _stub;
+};
+
+struct ResolveStub
+{
+ inline PCODE failEntryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)&_failEntryPoint[0]; }
+ inline PCODE resolveEntryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)&_resolveEntryPoint[0]; }
+ inline PCODE slowEntryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)&_slowEntryPoint[0]; }
+ inline size_t token() { LIMITED_METHOD_CONTRACT; return _token; }
+ inline INT32* pCounter() { LIMITED_METHOD_CONTRACT; return _pCounter; }
+
+ inline UINT32 hashedToken() { LIMITED_METHOD_CONTRACT; return _hashedToken >> LOG2_PTRSIZE; }
+ inline size_t cacheAddress() { LIMITED_METHOD_CONTRACT; return _cacheAddress; }
+ inline size_t size() { LIMITED_METHOD_CONTRACT; return sizeof(ResolveStub); }
+
+private:
+ friend struct ResolveHolder;
+ const static int resolveEntryPointLen = 19;
+ const static int slowEntryPointLen = 4;
+ const static int failEntryPointLen = 8;
+
+ DWORD _resolveEntryPoint[resolveEntryPointLen];
+ DWORD _slowEntryPoint[slowEntryPointLen];
+ DWORD _failEntryPoint[failEntryPointLen];
+ INT32* _pCounter; //Base of the Data Region
+ size_t _cacheAddress; // lookupCache
+ size_t _token;
+ PCODE _resolveWorkerTarget;
+ UINT32 _hashedToken;
+};
+
+struct ResolveHolder
+{
+ static void InitializeStatic() { }
+
+ void Initialize(PCODE resolveWorkerTarget, PCODE patcherTarget,
+ size_t dispatchToken, UINT32 hashedToken,
+ void * cacheAddr, INT32 * counterAddr)
+ {
+ int n=0;
+ DWORD offset;
+ int br_nextEntry[2];
+/******** Rough Convention of used in this routine
+ ;;x9 current variable
+ ;;x10 base address of the data region
+ ;;x11 indirection cell
+ ;;x12 passed MT
+ ;;X13 data read from data region
+ ;;X14 computed address into data region
+ ;;X15 this._token
+ ;;cachemask => [CALL_STUB_CACHE_MASK * sizeof(void*)]
+*********/
+ // Called directly by JITTED code
+ // ResolveStub._resolveEntryPoint(x0:Object*, x1 ...,r7, x11:IndirectionCellAndFlags)
+ // {
+ // MethodTable mt = x0.m_pMethTab;
+ // int i = ((mt + mt >> 12) ^ this._hashedToken) & _cacheMask
+ // ResolveCacheElem e = this._cacheAddress + i
+ // do
+ // {
+ // if (mt == e.pMT && this._token == e.token) (e.target)(x0, x1,...,x7);
+ // e = e.pNext;
+ // } while (e != null)
+ // (this._slowEntryPoint)(x0, x1,.., x7, x11);
+ // }
+ //
+
+#define Dataregionbase _pCounter
+#define DATA_OFFSET(_fieldHigh) (DWORD)((offsetof(ResolveStub, _fieldHigh ) - offsetof(ResolveStub, Dataregionbase)) & 0xffffffff)
+#define PC_REL_OFFSET(_field) (DWORD)((offsetof(ResolveStub, _field) - (offsetof(ResolveStub, _resolveEntryPoint[n]))) & 0xffffffff)
+
+ //ldr x12, [x0,#Object.m_pMethTab ] ; methodTable from object in x0
+ _stub._resolveEntryPoint[n++] = RESOLVE_STUB_FIRST_DWORD; //0xF940000C
+
+ // ;; Compute i = ((mt + mt >> 12) ^ this._hashedToken) & _cacheMask
+
+ //add x9, x12, x12 lsr #12
+ _stub._resolveEntryPoint[n++] = 0x8B4C3189;
+
+ //;;adr x10, #Dataregionbase of ResolveStub
+ _stub._resolveEntryPoint[n++] = 0x1000000A | ARM64EncodeHelpers::ADR_PATCH(PC_REL_OFFSET(Dataregionbase));
+
+ //w13- this._hashedToken
+ //ldr w13, [x10 + DATA_OFFSET(_hashedToken)]
+ offset = DATA_OFFSET(_hashedToken);
+ _ASSERTE(offset >=0 && offset%8 == 0);
+ _stub._resolveEntryPoint[n++] = 0xB940014D | offset<<8;
+
+ //eor x9,x9,x13
+ _stub._resolveEntryPoint[n++] = 0xCA0D0129;
+
+ _ASSERTE(CALL_STUB_CACHE_MASK * sizeof(void*) == 0x7FF8);
+ //x9-i
+ //and x9,x9,#cachemask
+ _stub._resolveEntryPoint[n++] = 0x927D2D29;
+
+ //;; ResolveCacheElem e = this._cacheAddress + i
+ //
+ //ldr x13, [x10 + DATA_OFFSET(_cacheAddress)]
+ offset=DATA_OFFSET(_cacheAddress);
+ _ASSERTE(offset >=0 && offset%8 == 0);
+ _stub._resolveEntryPoint[n++] = 0xF940014D | offset<<7;
+
+ //ldr x9, [x13, x9] ;; x9 = e = this._cacheAddress + i
+ _stub._resolveEntryPoint[n++] = 0xF86969A9 ;
+
+ //hoisting loop invariant this._token F940014F
+ //
+ //ldr x15, [x10 + DATA_OFFSET(_token)]
+ offset = DATA_OFFSET(_token);
+ _ASSERTE(offset >=0 && offset%8 == 0);
+ _stub._resolveEntryPoint[n++] = 0xF940014F | offset<<7;
+
+ int loop=n;
+ //do {
+
+ //;; Check mt == e.pMT
+ //
+ //
+ //ldr x13, [x9, #offsetof(ResolveCacheElem, pMT) ]
+ offset = offsetof(ResolveCacheElem, pMT) & 0x000001ff;
+ _ASSERTE(offset >=0 && offset%8 == 0);
+ _stub._resolveEntryPoint[n++] = 0xF940012D | offset<<7;
+
+ //cmp x12, x13
+ _stub._resolveEntryPoint[n++] = 0xEB0D019F;
+
+ //;; bne nextEntry
+ //place holder for the above instruction
+ br_nextEntry[0]=n++;
+
+ //;; Check this._token == e.token
+ //x15: this._token
+ //
+ //ldr x13, [x9, #offsetof(ResolveCacheElem, token) ]
+ offset = offsetof(ResolveCacheElem, token) & 0xffffffff;
+ _ASSERTE(offset >=0 && offset%8 == 0);
+ _stub._resolveEntryPoint[n++] = 0xF940012D | offset<<7;
+
+ //cmp x15, x13
+ _stub._resolveEntryPoint[n++] = 0xEB0D01FF;
+
+ //;; bne nextEntry
+ //place holder for the above instruction
+ br_nextEntry[1]=n++;
+
+ //ldr x12, [x9, #offsetof(ResolveCacheElem, target) ]
+ offset = offsetof(ResolveCacheElem, target) & 0xffffffff;
+ _ASSERTE(offset >=0 && offset%8 == 0);
+ _stub._resolveEntryPoint[n++] = 0xF940012C | offset<<7;
+
+ // ;; Branch to e.target
+ // br x12
+ _stub._resolveEntryPoint[n++] = 0xD61F0180;
+
+ //;;nextEntry:
+ //back patching the call sites as now we know the offset to nextEntry
+ //bne #offset
+ for(auto i: br_nextEntry)
+ {
+ _stub._resolveEntryPoint[i] = 0x54000001 | ((((n-i)*sizeof(DWORD))<<3) & 0x3FFFFFF);
+ }
+
+ //;; e = e.pNext;
+ //ldr x9, [x9, #offsetof(ResolveCacheElem,pNext ) ]
+ offset = offsetof(ResolveCacheElem, pNext) & 0xffffffff;
+ _ASSERTE(offset >=0 && offset%8 == 0);
+ _stub._resolveEntryPoint[n++] = 0xF9400129 | offset<<7;
+
+ //;; } while(e != null);
+ //;; cbnz x9, loop
+ offset = (DWORD)(loop - n)*sizeof(DWORD);
+ _stub._resolveEntryPoint[n++] = 0xB5000009 | ((offset<<3) & 0xFFFFF0);
+
+
+ _ASSERTE(n == ResolveStub::resolveEntryPointLen);
+ _ASSERTE(_stub._resolveEntryPoint + n == _stub._slowEntryPoint);
+
+ // ResolveStub._slowEntryPoint(x0:MethodToken, x1..x7, x11:IndirectionCellAndFlags)
+ // {
+ // x12 = this._token;
+ // this._resolveWorkerTarget(x0,.., x7, x12);
+ // }
+
+#undef PC_REL_OFFSET
+#define PC_REL_OFFSET(_field) (DWORD)((offsetof(ResolveStub, _field) - (offsetof(ResolveStub, _slowEntryPoint[n]))) & 0xffffffff )
+ n = 0;
+ // ;;slowEntryPoint:
+ // ;;fall through to the slow case
+
+ //;;adr x10, #Dataregionbase
+ _stub._slowEntryPoint[n++] = 0x1000000A | ARM64EncodeHelpers::ADR_PATCH(PC_REL_OFFSET(Dataregionbase));
+
+ //ldr x12, [x10 , DATA_OFFSET(_token)]
+ offset=DATA_OFFSET(_token);
+ _ASSERTE(offset >=0 && offset%8 == 0);
+ _stub._slowEntryPoint[n++] = 0xF940014C | (offset<<10);
+
+ //
+ //ldr x9, [x10 , DATA_OFFSET(_resolveWorkerTarget)]
+ offset=DATA_OFFSET(_resolveWorkerTarget);
+ _ASSERTE(offset >=0 && offset%8 == 0);
+ _stub._slowEntryPoint[n++] = 0xF9400149 | (offset<<10);
+
+ // br x9
+ _stub._slowEntryPoint[n++] = 0xD61F0120;
+
+ _ASSERTE(n == ResolveStub::slowEntryPointLen);
+ // ResolveStub._failEntryPoint(x0:MethodToken, x1,.., x7, x11:IndirectionCellAndFlags)
+ // {
+ // if(--*(this._pCounter) < 0) x11 = x11 | SDF_ResolveBackPatch;
+ // this._resolveEntryPoint(x0,..,x7);
+ // }
+
+#undef PC_REL_OFFSET //NOTE Offset can be negative
+#define PC_REL_OFFSET(_field) (DWORD)((offsetof(ResolveStub, _field) - (offsetof(ResolveStub, _failEntryPoint[n]))) & 0xffffffff)
+ n = 0;
+
+
+ //;;failEntryPoint
+ //;;adr x10, #Dataregionbase
+ _stub._failEntryPoint[n++] = 0x1000000A | ARM64EncodeHelpers::ADR_PATCH(PC_REL_OFFSET(Dataregionbase));
+
+ //
+ //ldr x13, [x10]
+ offset=DATA_OFFSET(_pCounter);
+ _ASSERTE(offset >=0 && offset%8 == 0);
+ _stub._failEntryPoint[n++] = 0xF940014D | offset<<7;
+
+ //ldr x9, [x13]
+ _stub._failEntryPoint[n++] = 0xF94001A9;
+ //subs x9,x9,#1
+ _stub._failEntryPoint[n++] = 0xF1000529;
+ //str x9, [x13]
+ _stub._failEntryPoint[n++] = 0xF90001A9;
+
+ //;;bge resolveEntryPoint
+ offset = PC_REL_OFFSET(_resolveEntryPoint);
+ _stub._failEntryPoint[n++] = 0x5400000A | ((offset <<3)& 0x00FFFFF0) ;
+
+ // ;; orr x11, x11, SDF_ResolveBackPatch
+ // orr x11, x11, #1
+ _ASSERTE(SDF_ResolveBackPatch == 0x1);
+ _stub._failEntryPoint[n++] = 0xB240016B;
+
+ //;;b resolveEntryPoint:
+ offset = PC_REL_OFFSET(_resolveEntryPoint);
+ _stub._failEntryPoint[n++] = 0x14000000 | ((offset>>2) & 0x3FFFFFF);
+
+ _ASSERTE(n == ResolveStub::failEntryPointLen);
+ _stub._pCounter = counterAddr;
+ _stub._hashedToken = hashedToken << LOG2_PTRSIZE;
+ _stub._cacheAddress = (size_t) cacheAddr;
+ _stub._token = dispatchToken;
+ _stub._resolveWorkerTarget = resolveWorkerTarget;
+
+ _ASSERTE(resolveWorkerTarget == (PCODE)ResolveWorkerChainLookupAsmStub);
+ _ASSERTE(patcherTarget == NULL);
+
+#undef DATA_OFFSET
+#undef PC_REL_OFFSET
+#undef Dataregionbase
+ }
+
+ ResolveStub* stub() { LIMITED_METHOD_CONTRACT; return &_stub; }
+
+ static ResolveHolder* FromFailEntry(PCODE failEntry);
+ static ResolveHolder* FromResolveEntry(PCODE resolveEntry);
+private:
+ ResolveStub _stub;
+};
+
+#ifdef DECLARE_DATA
+
+#ifndef DACCESS_COMPILE
+ResolveHolder* ResolveHolder::FromFailEntry(PCODE failEntry)
+{
+ LIMITED_METHOD_CONTRACT;
+ ResolveHolder* resolveHolder = (ResolveHolder*) ( failEntry - offsetof(ResolveHolder, _stub) - offsetof(ResolveStub, _failEntryPoint) );
+ return resolveHolder;
+}
+
+ResolveHolder* ResolveHolder::FromResolveEntry(PCODE resolveEntry)
+{
+ LIMITED_METHOD_CONTRACT;
+ ResolveHolder* resolveHolder = (ResolveHolder*) ( resolveEntry - offsetof(ResolveHolder, _stub) - offsetof(ResolveStub, _resolveEntryPoint) );
+ return resolveHolder;
+}
+
+
+#endif // DACCESS_COMPILE
+
+VirtualCallStubManager::StubKind VirtualCallStubManager::predictStubKind(PCODE stubStartAddress)
+{
+
+ SUPPORTS_DAC;
+#ifdef DACCESS_COMPILE
+
+ _ASSERTE(!"ARM64:NYI");
+ return SK_BREAKPOINT; // Dac always uses the slower lookup
+
+#else
+
+ StubKind stubKind = SK_UNKNOWN;
+ TADDR pInstr = PCODEToPINSTR(stubStartAddress);
+
+ EX_TRY
+ {
+ // If stubStartAddress is completely bogus, then this might AV,
+ // so we protect it with SEH. An AV here is OK.
+ AVInRuntimeImplOkayHolder AVOkay;
+
+ DWORD firstDword = *((DWORD*) pInstr);
+
+ if (firstDword == DISPATCH_STUB_FIRST_DWORD) // assembly of first instruction of DispatchStub : ldr x8, [x0]
+ {
+ stubKind = SK_DISPATCH;
+ }
+ else if (firstDword == RESOLVE_STUB_FIRST_DWORD) // assembly of first instruction of ResolveStub : ldr x12, [x0,#Object.m_pMethTab ]
+ {
+ stubKind = SK_RESOLVE;
+ }
+ else if (firstDword == 0x10000089) // assembly of first instruction of LookupStub : adr x9, _resolveWorkerTarget
+ {
+ stubKind = SK_LOOKUP;
+ }
+ }
+ EX_CATCH
+ {
+ stubKind = SK_UNKNOWN;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return stubKind;
+
+#endif // DACCESS_COMPILE
+}
+
+#endif //DECLARE_DATA
+
+#endif // _VIRTUAL_CALL_STUB_ARM_H
diff --git a/src/vm/armsinglestepper.h b/src/vm/armsinglestepper.h
new file mode 100644
index 0000000000..2edd6b9026
--- /dev/null
+++ b/src/vm/armsinglestepper.h
@@ -0,0 +1,153 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+// Emulate hardware single-step on ARM.
+//
+
+#ifndef __ARM_SINGLE_STEPPER_INCLUDED
+#define __ARM_SINGLE_STEPPER_INCLUDED
+
+// Class abstracting state kept for the IT instruction within the CPSR.
+class ITState
+{
+public:
+ ITState();
+
+ // Must call Get() (or Init()) to initialize this instance from a specific context before calling any
+ // other (non-static) method.
+ void Get(T_CONTEXT *pCtx);
+
+ // Must call Init() (or Get()) to initialize this instance from a raw byte value before calling any other
+ // (non-static) method.
+ void Init(BYTE bState);
+
+ // Does the current IT state indicate we're executing within an IT block?
+ bool InITBlock();
+
+ // Only valid within an IT block. Returns the condition code which will be evaluated for the current
+ // instruction.
+ DWORD CurrentCondition();
+
+ // Transition the IT state to that for the next instruction.
+ void Advance();
+
+ // Write the current IT state back into the given context.
+ void Set(T_CONTEXT *pCtx);
+
+ // Clear IT state (i.e. force execution to be outside of an IT block) in the given context.
+ static void Clear(T_CONTEXT *pCtx);
+
+private:
+ BYTE m_bITState;
+#ifdef _DEBUG
+ bool m_fValid;
+#endif
+};
+
+// Class that encapsulates the context needed to single step one thread.
+class ArmSingleStepper
+{
+public:
+ ArmSingleStepper();
+ ~ArmSingleStepper();
+
+ // Given the context with which a thread will be resumed, modify that context such that resuming the
+ // thread will execute a single instruction before raising an EXCEPTION_BREAKPOINT. The thread context
+ // must be cleaned up via the Fixup method below before any further exception processing can occur (at
+ // which point the caller can behave as though EXCEPTION_SINGLE_STEP was raised).
+ void Enable();
+
+ void Bypass(DWORD ip, WORD opcode1, WORD opcode2);
+
+ void Apply(T_CONTEXT *pCtx);
+
+ // Disables the single stepper.
+ void Disable();
+
+ // Returns whether or not the stepper is enabled.
+ inline bool IsEnabled() const
+ {
+ return m_state == Enabled || m_state == Applied;
+ }
+
+ // When called in response to an exception (preferably in a first chance vectored handler before anyone
+ // else has looked at the thread context) this method will (a) determine whether this exception was raised
+ // by a call to Enable() above, in which case true will be returned and (b) perform final fixup of the
+ // thread context passed in to complete the emulation of a hardware single step. Note that this routine
+ // must be called even if the exception code is not EXCEPTION_BREAKPOINT since the instruction stepped
+ // might have raised its own exception (e.g. A/V) and we still need to fix the thread context in this
+ // case.
+ bool Fixup(T_CONTEXT *pCtx, DWORD dwExceptionCode);
+
+private:
+ enum
+ {
+ kMaxCodeBuffer = 2 + 3 + 1, // WORD slots in our redirect buffer (2 for current instruction, 3 for
+ // breakpoint instructions used to pad out slots in an IT block and one
+ // for the final breakpoint)
+ kBreakpointOp = 0xdefe, // Opcode for the breakpoint instruction used on CoreARM
+ };
+
+ // Bit numbers of the condition flags in the CPSR.
+ enum APSRBits
+ {
+ APSR_N = 31,
+ APSR_Z = 30,
+ APSR_C = 29,
+ APSR_V = 28,
+ };
+
+ enum StepperState
+ {
+ Disabled,
+ Enabled,
+ Applied
+ };
+
+ DWORD m_originalPc; // PC value before stepping
+ ITState m_originalITState; // IT state before stepping
+ DWORD m_targetPc; // Final PC value after stepping if no exception is raised
+ WORD *m_rgCode; // Buffer execution is redirected to during the step
+ StepperState m_state; // Tracks whether the stepper is Enabled, Disabled, or enabled and applied to a context
+ WORD m_opcodes[2]; // Set if we are emulating a non-IT instruction
+ bool m_fEmulatedITInstruction; // Set to true if Enable() emulated an IT instruction
+ bool m_fRedirectedPc; // Used during TryEmulate() to track where PC was written
+ bool m_fEmulate;
+ bool m_fBypass;
+ bool m_fSkipIT; // We are skipping an instruction due to an IT condition.
+
+ // Initializes m_rgCode. Not thread safe.
+ void Init();
+
+ // Count the number of bits set in a DWORD.
+ static DWORD BitCount(DWORD dwValue);
+
+ // Returns true if the current context indicates the ARM condition specified holds.
+ bool ConditionHolds(T_CONTEXT *pCtx, DWORD cond);
+
+ // Get the current value of a register. PC (register 15) is always reported as the current instruction PC
+ // + 4 as per the ARM architecture.
+ DWORD GetReg(T_CONTEXT *pCtx, DWORD reg);
+
+ // Set the current value of a register. If the PC (register 15) is set then m_fRedirectedPc is set to
+ // true.
+ void SetReg(T_CONTEXT *pCtx, DWORD reg, DWORD value);
+
+ // Attempt to read a 1, 2 or 4 byte value from memory, zero or sign extend it to a 4-byte value and place
+ // that value into the buffer pointed at by pdwResult. Returns false if attempting to read the location
+ // caused a fault.
+ bool GetMem(DWORD *pdwResult, DWORD_PTR pAddress, DWORD cbSize, bool fSignExtend);
+
+ // Parse the instruction whose first word is given in opcode1 (if the instruction is 32-bit TryEmulate
+ // will fetch the second word using the value of the PC stored in the current context). If the instruction
+ // reads or writes the PC or is the IT instruction then it will be emulated by updating the thread context
+ // appropriately and true will be returned. If the instruction is not one of those cases (or it is but we
+ // faulted trying to read memory during the emulation) no state is updated and false is returned instead.
+ bool TryEmulate(T_CONTEXT *pCtx, WORD opcode1, WORD opcode2, bool execute);
+};
+
+#endif // !__ARM_SINGLE_STEPPER_INCLUDED
diff --git a/src/vm/array.cpp b/src/vm/array.cpp
new file mode 100644
index 0000000000..f4eb329faa
--- /dev/null
+++ b/src/vm/array.cpp
@@ -0,0 +1,1440 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// File: ARRAY.CPP
+//
+
+//
+// File which contains a bunch of of array related things.
+//
+
+#include "common.h"
+
+#include "clsload.hpp"
+#include "method.hpp"
+#include "class.h"
+#include "object.h"
+#include "field.h"
+#include "util.hpp"
+#include "excep.h"
+#include "siginfo.hpp"
+#include "threads.h"
+#include "stublink.h"
+#include "stubcache.h"
+#include "dllimport.h"
+#include "gcdesc.h"
+#include "jitinterface.h"
+#include "eeconfig.h"
+#include "log.h"
+#include "fieldmarshaler.h"
+#include "cgensys.h"
+#include "array.h"
+#include "typestring.h"
+#include "sigbuilder.h"
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable:4244)
+#endif // _MSC_VER
+
+#define MAX_SIZE_FOR_VALUECLASS_IN_ARRAY 0xffff
+#define MAX_PTRS_FOR_VALUECLASSS_IN_ARRAY 0xffff
+
+
+/*****************************************************************************************/
+LPCUTF8 ArrayMethodDesc::GetMethodName()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ switch (GetArrayFuncIndex())
+ {
+ case ARRAY_FUNC_GET:
+ return "Get";
+ case ARRAY_FUNC_SET:
+ return "Set";
+ case ARRAY_FUNC_ADDRESS:
+ return "Address";
+ default:
+ return COR_CTOR_METHOD_NAME; // ".ctor"
+ }
+}
+
+/*****************************************************************************************/
+DWORD ArrayMethodDesc::GetAttrs()
+{
+ LIMITED_METHOD_CONTRACT;
+ return (GetArrayFuncIndex() >= ARRAY_FUNC_CTOR) ? (mdPublic | mdRTSpecialName) : mdPublic;
+}
+
+/*****************************************************************************************/
+CorInfoIntrinsics ArrayMethodDesc::GetIntrinsicID()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ switch (GetArrayFuncIndex())
+ {
+ case ARRAY_FUNC_GET:
+ return CORINFO_INTRINSIC_Array_Get;
+ case ARRAY_FUNC_SET:
+ return CORINFO_INTRINSIC_Array_Set;
+ case ARRAY_FUNC_ADDRESS:
+ return CORINFO_INTRINSIC_Array_Address;
+ default:
+ return CORINFO_INTRINSIC_Illegal;
+ }
+}
+
+#ifndef DACCESS_COMPILE
+
+/*****************************************************************************************/
+
+//
+// Generate a short sig (descr) for an array accessors
+//
+
+VOID ArrayClass::GenerateArrayAccessorCallSig(
+ DWORD dwRank,
+ DWORD dwFuncType, // Load, store, or <init>
+ PCCOR_SIGNATURE *ppSig,// Generated signature
+ DWORD * pcSig, // Generated signature size
+ LoaderAllocator *pLoaderAllocator,
+ AllocMemTracker *pamTracker
+#ifdef FEATURE_ARRAYSTUB_AS_IL
+ ,BOOL fForStubAsIL
+#endif
+ )
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+ PRECONDITION(dwRank >= 1 && dwRank < 0x3ffff);
+ } CONTRACTL_END;
+
+ PCOR_SIGNATURE pSig;
+ PCOR_SIGNATURE pSigMemory;
+ DWORD dwCallSigSize = dwRank;
+ DWORD dwArgCount = (dwFuncType == ArrayMethodDesc::ARRAY_FUNC_SET) ? dwRank+1 : dwRank;
+ DWORD i;
+
+ switch (dwFuncType)
+ {
+ // <callconv> <argcount> VAR 0 I4 , ... , I4
+ case ArrayMethodDesc::ARRAY_FUNC_GET:
+ dwCallSigSize += 4;
+ break;
+
+ // <callconv> <argcount> VOID I4 , ... , I4
+ case ArrayMethodDesc::ARRAY_FUNC_CTOR:
+ dwCallSigSize += 3;
+ break;
+
+ // <callconv> <argcount> VOID I4 , ... , I4 VAR 0
+ case ArrayMethodDesc::ARRAY_FUNC_SET:
+ dwCallSigSize += 5;
+ break;
+
+ // <callconv> <argcount> BYREF VAR 0 I4 , ... , I4
+ case ArrayMethodDesc::ARRAY_FUNC_ADDRESS:
+ dwCallSigSize += 5;
+#ifdef FEATURE_ARRAYSTUB_AS_IL
+ if(fForStubAsIL) {dwArgCount++; dwCallSigSize++;}
+#endif
+ break;
+ }
+
+ // If the argument count is larger than 127 then it will require 2 bytes for the encoding
+ if (dwArgCount > 0x7f)
+ dwCallSigSize++;
+
+ pSigMemory = (PCOR_SIGNATURE)pamTracker->Track(pLoaderAllocator->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(dwCallSigSize)));
+
+ pSig = pSigMemory;
+ BYTE callConv = IMAGE_CEE_CS_CALLCONV_DEFAULT + IMAGE_CEE_CS_CALLCONV_HASTHIS;
+
+ if (dwFuncType == ArrayMethodDesc::ARRAY_FUNC_ADDRESS
+#ifdef FEATURE_ARRAYSTUB_AS_IL
+ && !fForStubAsIL
+#endif
+ )
+ {
+ callConv |= CORINFO_CALLCONV_PARAMTYPE; // Address routine needs special hidden arg
+ }
+
+ *pSig++ = callConv;
+ pSig += CorSigCompressData(dwArgCount, pSig); // Argument count
+ switch (dwFuncType)
+ {
+ case ArrayMethodDesc::ARRAY_FUNC_GET:
+ *pSig++ = ELEMENT_TYPE_VAR;
+ *pSig++ = 0; // variable 0
+ break;
+ case ArrayMethodDesc::ARRAY_FUNC_CTOR:
+ *pSig++ = (BYTE) ELEMENT_TYPE_VOID; // Return type
+ break;
+ case ArrayMethodDesc::ARRAY_FUNC_SET:
+ *pSig++ = (BYTE) ELEMENT_TYPE_VOID; // Return type
+ break;
+ case ArrayMethodDesc::ARRAY_FUNC_ADDRESS:
+ *pSig++ = (BYTE) ELEMENT_TYPE_BYREF; // Return type
+ *pSig++ = ELEMENT_TYPE_VAR;
+ *pSig++ = 0; // variable 0
+ break;
+ }
+
+#if defined(FEATURE_ARRAYSTUB_AS_IL ) && !defined(_TARGET_X86_)
+ if(dwFuncType == ArrayMethodDesc::ARRAY_FUNC_ADDRESS && fForStubAsIL)
+ {
+ *pSig++ = ELEMENT_TYPE_I;
+ }
+#endif
+
+ for (i = 0; i < dwRank; i++)
+ *pSig++ = ELEMENT_TYPE_I4;
+
+ if (dwFuncType == ArrayMethodDesc::ARRAY_FUNC_SET)
+ {
+ *pSig++ = ELEMENT_TYPE_VAR;
+ *pSig++ = 0; // variable 0
+ }
+#if defined(FEATURE_ARRAYSTUB_AS_IL ) && defined(_TARGET_X86_)
+ else if(dwFuncType == ArrayMethodDesc::ARRAY_FUNC_ADDRESS && fForStubAsIL)
+ {
+ *pSig++ = ELEMENT_TYPE_I;
+ }
+#endif
+
+ // Make sure the sig came out exactly as large as we expected
+ _ASSERTE(pSig == pSigMemory + dwCallSigSize);
+
+ *ppSig = pSigMemory;
+ *pcSig = (DWORD)(pSig-pSigMemory);
+}
+
+//
+// Allocate a new MethodDesc for a fake array method.
+//
+// Based on code in class.cpp.
+//
+void ArrayClass::InitArrayMethodDesc(
+ ArrayMethodDesc *pNewMD,
+ PCCOR_SIGNATURE pShortSig,
+ DWORD cShortSig,
+ DWORD dwVtableSlot,
+ LoaderAllocator *pLoaderAllocator,
+ AllocMemTracker *pamTracker)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Note: The method desc memory is zero initialized
+
+ pNewMD->SetMemberDef(0);
+
+ pNewMD->SetSlot((WORD) dwVtableSlot);
+ pNewMD->SetStoredMethodSig(pShortSig, cShortSig);
+
+ _ASSERTE(!pNewMD->MayHaveNativeCode());
+ pNewMD->SetTemporaryEntryPoint(pLoaderAllocator, pamTracker);
+
+#ifdef _DEBUG
+ _ASSERTE(pNewMD->GetMethodName() && GetDebugClassName());
+ pNewMD->m_pszDebugMethodName = pNewMD->GetMethodName();
+ pNewMD->m_pszDebugClassName = GetDebugClassName();
+ pNewMD->m_pDebugMethodTable.SetValue(pNewMD->GetMethodTable());
+#endif // _DEBUG
+}
+
+/*****************************************************************************************/
+MethodTable* Module::CreateArrayMethodTable(TypeHandle elemTypeHnd, CorElementType arrayKind, unsigned Rank, AllocMemTracker *pamTracker)
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+ PRECONDITION(Rank > 0);
+ } CONTRACTL_END;
+
+ MethodTable * pElemMT = elemTypeHnd.GetMethodTable();
+
+ CorElementType elemType = elemTypeHnd.GetSignatureCorElementType();
+
+ // Shared EEClass if there is one
+ MethodTable * pCanonMT = NULL;
+
+ // Strictly speaking no method table should be needed for
+ // arrays of the faked up TypeDescs for variable types that are
+ // used when verfifying generic code.
+ // However verification is tied in with some codegen in the JITs, so give these
+ // the shared MT just in case.
+ // This checks match precisely one in ParamTypeDesc::OwnsMethodTable
+ if (CorTypeInfo::IsGenericVariable(elemType)) {
+ // This is loading the canonical version of the array so we can override
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+ return(ClassLoader::LoadArrayTypeThrowing(TypeHandle(g_pObjectClass), arrayKind, Rank).GetMethodTable());
+ }
+
+ // Arrays of reference types all share the same EEClass.
+ //
+ // We can't share nested SZARRAYs because they have different
+ // numbers of constructors.
+ //
+ // Unfortunately, we cannot share more because of it would affect user visible System.RuntimeMethodHandle behavior
+ if (CorTypeInfo::IsObjRef(elemType) && elemType != ELEMENT_TYPE_SZARRAY && pElemMT != g_pObjectClass)
+ {
+ // This is loading the canonical version of the array so we can override
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+ pCanonMT = ClassLoader::LoadArrayTypeThrowing(TypeHandle(g_pObjectClass), arrayKind, Rank).GetMethodTable();
+ }
+
+ BOOL containsPointers = CorTypeInfo::IsObjRef(elemType);
+ if (elemType == ELEMENT_TYPE_VALUETYPE && pElemMT->ContainsPointers())
+ containsPointers = TRUE;
+
+ // this is the base for every array type
+ MethodTable *pParentClass = g_pArrayClass;
+ _ASSERTE(pParentClass); // Must have already loaded the System.Array class
+ _ASSERTE(pParentClass->IsFullyLoaded());
+
+ DWORD numCtors = 2; // ELEMENT_TYPE_ARRAY has two ctor functions, one with and one without lower bounds
+ if (arrayKind == ELEMENT_TYPE_SZARRAY)
+ {
+ numCtors = 1;
+ TypeHandle ptr = elemTypeHnd;
+ while (ptr.IsTypeDesc() && ptr.AsTypeDesc()->GetInternalCorElementType() == ELEMENT_TYPE_SZARRAY) {
+ numCtors++;
+ ptr = ptr.AsTypeDesc()->GetTypeParam();
+ }
+ }
+
+ /****************************************************************************************/
+
+ // Parent class is the top level array
+ // The vtable will have all of top level class's methods, plus any methods we have for array classes
+ DWORD numVirtuals = pParentClass->GetNumVirtuals();
+ DWORD numNonVirtualSlots = numCtors + 3; // 3 for the proper rank Get, Set, Address
+
+ size_t cbMT = sizeof(MethodTable);
+ cbMT += MethodTable::GetNumVtableIndirections(numVirtuals) * sizeof(PTR_PCODE);
+
+ // GC info
+ size_t cbCGCDescData = 0;
+ if (containsPointers)
+ {
+ cbCGCDescData += CGCDesc::ComputeSize(1);
+ if (elemType == ELEMENT_TYPE_VALUETYPE)
+ {
+ size_t nSeries = CGCDesc::GetCGCDescFromMT(pElemMT)->GetNumSeries();
+ cbCGCDescData += (nSeries - 1)*sizeof (val_serie_item);
+ _ASSERTE(cbCGCDescData == CGCDesc::ComputeSizeRepeating(nSeries));
+ }
+ }
+#ifdef FEATURE_COLLECTIBLE_TYPES
+ else if (this->IsCollectible())
+ {
+ cbCGCDescData = (DWORD)CGCDesc::ComputeSize(1);
+ }
+#endif
+
+ DWORD dwMultipurposeSlotsMask = 0;
+ dwMultipurposeSlotsMask |= MethodTable::enum_flag_HasPerInstInfo;
+ dwMultipurposeSlotsMask |= MethodTable::enum_flag_HasInterfaceMap;
+ if (pCanonMT == NULL)
+ dwMultipurposeSlotsMask |= MethodTable::enum_flag_HasNonVirtualSlots;
+ if (this != elemTypeHnd.GetModule())
+ dwMultipurposeSlotsMask |= MethodTable::enum_flag_HasModuleOverride;
+
+ // Allocate space for optional members
+ // We always have a non-virtual slot array, see assert at end
+ cbMT += MethodTable::GetOptionalMembersAllocationSize(dwMultipurposeSlotsMask,
+ FALSE, // RemotableMethodInfo
+ FALSE, // GenericsStaticsInfo
+ FALSE, // GuidInfo
+ FALSE, // CCWTemplate
+ FALSE, // RCWPerTypeData
+ FALSE, // RemotingVtsInfo
+ FALSE, // ContextStatic
+ FALSE); // TokenOverflow
+
+ // This is the offset of the beginning of the interface map
+ size_t imapOffset = cbMT;
+
+ // This is added after we determine the offset of the interface maps
+ // because the memory appears before the pointer to the method table
+ cbMT += cbCGCDescData;
+
+ // Inherit top level class's interface map
+ cbMT += pParentClass->GetNumInterfaces() * sizeof(InterfaceInfo_t);
+
+#ifdef FEATURE_PREJIT
+ Module* pComputedPZM = Module::ComputePreferredZapModule(NULL, Instantiation(&elemTypeHnd, 1));
+ BOOL canShareVtableChunks = MethodTable::CanShareVtableChunksFrom(pParentClass, this, pComputedPZM);
+#else
+ BOOL canShareVtableChunks = MethodTable::CanShareVtableChunksFrom(pParentClass, this);
+#endif // FEATURE_PREJIT
+
+ size_t offsetOfUnsharedVtableChunks = cbMT;
+
+ // We either share all of the parent's virtual slots or none of them
+ // If none, we need to allocate space for the slots
+ if (!canShareVtableChunks)
+ {
+ cbMT += numVirtuals * sizeof(PCODE);
+ }
+
+ // Canonical methodtable has an array of non virtual slots pointed to by the optional member
+ size_t offsetOfNonVirtualSlots = 0;
+ size_t cbArrayClass = 0;
+
+ if (pCanonMT == NULL)
+ {
+ offsetOfNonVirtualSlots = cbMT;
+ cbMT += numNonVirtualSlots * sizeof(PCODE);
+
+ // Allocate ArrayClass (including space for packed fields), MethodTable, and class name in one alloc.
+ // Remember to pad allocation size for ArrayClass portion to ensure MethodTable is pointer aligned.
+ cbArrayClass = ALIGN_UP(sizeof(ArrayClass) + sizeof(EEClassPackedFields), sizeof(void*));
+ }
+
+ // ArrayClass already includes one void*
+ LoaderAllocator* pAllocator= this->GetLoaderAllocator();
+ BYTE* pMemory = (BYTE *)pamTracker->Track(pAllocator->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(cbArrayClass) +
+ S_SIZE_T(cbMT)));
+
+ // Note: Memory allocated on loader heap is zero filled
+ // memset(pMemory, 0, sizeof(ArrayClass) + cbMT);
+
+ ArrayClass* pClass = NULL;
+
+ if (pCanonMT == NULL)
+ {
+ pClass = ::new (pMemory) ArrayClass();
+ }
+
+ // Head of MethodTable memory (starts after ArrayClass), this points at the GCDesc stuff in front
+ // of a method table (if needed)
+ BYTE* pMTHead = pMemory + cbArrayClass + cbCGCDescData;
+
+ MethodTable* pMT = (MethodTable *) pMTHead;
+
+ pMT->SetMultipurposeSlotsMask(dwMultipurposeSlotsMask);
+
+ // Allocate the private data block ("private" during runtime in the ngen'ed case).
+ MethodTableWriteableData * pMTWriteableData = (MethodTableWriteableData *) (BYTE *)
+ pamTracker->Track(pAllocator->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(MethodTableWriteableData))));
+ pMT->SetWriteableData(pMTWriteableData);
+
+ // This also disables IBC logging until the type is sufficiently intitialized so
+ // it needs to be done early
+ pMTWriteableData->SetIsNotFullyLoadedForBuildMethodTable();
+
+ // Fill in pClass
+ if (pClass != NULL)
+ {
+ pClass->SetInternalCorElementType(arrayKind);
+ pClass->SetAttrClass (tdPublic | tdSerializable | tdSealed); // This class is public, serializable, sealed
+ pClass->SetRank (Rank);
+ pClass->SetArrayElementType (elemType);
+ if (pElemMT->GetClass()->ContainsStackPtr())
+ pClass->SetContainsStackPtr();
+ pClass->SetMethodTable (pMT);
+
+#if defined(CHECK_APP_DOMAIN_LEAKS) || defined(_DEBUG)
+ // Non-covariant arrays of agile types are agile
+ if (elemType != ELEMENT_TYPE_CLASS && elemTypeHnd.IsAppDomainAgile())
+ pClass->SetAppDomainAgile();
+ pClass->SetAppDomainAgilityDone();
+#endif
+
+ // Fill In the method table
+ pClass->SetNumMethods(numVirtuals + numNonVirtualSlots);
+
+ pClass->SetNumNonVirtualSlots(numNonVirtualSlots);
+ }
+
+ pMT->SetNumVirtuals(numVirtuals);
+
+ pMT->SetParentMethodTable(pParentClass);
+
+ DWORD dwComponentSize = elemTypeHnd.GetSize();
+
+ if (elemType == ELEMENT_TYPE_VALUETYPE || elemType == ELEMENT_TYPE_VOID)
+ {
+ // The only way for dwComponentSize to be large is to be part of a value class. If this changes
+ // then the check will need to be moved outside valueclass check.
+ if(dwComponentSize > MAX_SIZE_FOR_VALUECLASS_IN_ARRAY) {
+ StackSString ssElemName;
+ elemTypeHnd.GetName(ssElemName);
+
+ StackScratchBuffer scratch;
+ elemTypeHnd.GetAssembly()->ThrowTypeLoadException(ssElemName.GetUTF8(scratch), IDS_CLASSLOAD_VALUECLASSTOOLARGE);
+ }
+ }
+
+ if (pClass != NULL)
+ {
+ pMT->SetClass(pClass);
+ }
+ else
+ {
+ pMT->SetCanonicalMethodTable(pCanonMT);
+ }
+
+ pMT->SetIsArray(arrayKind, elemType);
+
+ pMT->SetApproxArrayElementTypeHandle(elemTypeHnd);
+
+ _ASSERTE(FitsIn<WORD>(dwComponentSize));
+ pMT->SetComponentSize(static_cast<WORD>(dwComponentSize));
+
+ pMT->SetLoaderModule(this);
+ pMT->SetLoaderAllocator(pAllocator);
+
+ pMT->SetModule(elemTypeHnd.GetModule());
+
+ if (elemTypeHnd.ContainsGenericVariables())
+ pMT->SetContainsGenericVariables();
+
+#ifdef FEATURE_TYPEEQUIVALENCE
+ if (elemTypeHnd.HasTypeEquivalence())
+ {
+ // propagate the type equivalence flag
+ pMT->SetHasTypeEquivalence();
+ }
+#endif // FEATURE_TYPEEQUIVALENCE
+
+ _ASSERTE(pMT->IsClassPreInited());
+
+ // Set BaseSize to be size of non-data portion of the array
+ DWORD baseSize = ObjSizeOf(ArrayBase);
+ if (arrayKind == ELEMENT_TYPE_ARRAY)
+ baseSize += Rank*sizeof(DWORD)*2;
+
+#if !defined(_WIN64) && (DATA_ALIGNMENT > 4)
+ if (dwComponentSize >= DATA_ALIGNMENT)
+ baseSize = (DWORD)ALIGN_UP(baseSize, DATA_ALIGNMENT);
+#endif // !defined(_WIN64) && (DATA_ALIGNMENT > 4)
+ pMT->SetBaseSize(baseSize);
+ // Because of array method table persisting, we need to copy the map
+ memcpy(pMTHead + imapOffset, pParentClass->GetInterfaceMap(),
+ pParentClass->GetNumInterfaces() * sizeof(InterfaceInfo_t));
+ pMT->SetInterfaceMap(pParentClass->GetNumInterfaces(), (InterfaceInfo_t *)(pMTHead + imapOffset));
+
+ // Copy down flags for these interfaces as well. This is simplified a bit since we know that System.Array
+ // only has a few interfaces and the flags will fit inline into the MethodTable's optional members.
+ _ASSERTE(MethodTable::GetExtraInterfaceInfoSize(pParentClass->GetNumInterfaces()) == 0);
+ pMT->InitializeExtraInterfaceInfo(NULL);
+
+ for (UINT32 i = 0; i < pParentClass->GetNumInterfaces(); i++)
+ {
+ if (pParentClass->IsInterfaceDeclaredOnClass(i))
+ pMT->SetInterfaceDeclaredOnClass(i);
+ }
+
+ // The type is sufficiently initialized for most general purpose accessor methods to work.
+ // Mark the type as restored to avoid avoid asserts. Note that this also enables IBC logging.
+ pMTWriteableData->SetIsFullyLoadedForBuildMethodTable();
+
+ {
+ // Fill out the vtable indirection slots
+ MethodTable::VtableIndirectionSlotIterator it = pMT->IterateVtableIndirectionSlots();
+ while (it.Next())
+ {
+ if (canShareVtableChunks)
+ {
+ // Share the parent chunk
+ it.SetIndirectionSlot(pParentClass->GetVtableIndirections()[it.GetIndex()]);
+ }
+ else
+ {
+ // Use the locally allocated chunk
+ it.SetIndirectionSlot((PTR_PCODE)(pMemory+cbArrayClass+offsetOfUnsharedVtableChunks));
+ offsetOfUnsharedVtableChunks += it.GetSize();
+ }
+ }
+
+ // If we are not sharing parent chunks, copy down the slot contents
+ if (!canShareVtableChunks)
+ {
+ // Copy top level class's vtable - note, vtable is contained within the MethodTable
+ for (UINT32 i = 0; i < numVirtuals; i++)
+ pMT->SetSlot(i, pParentClass->GetSlot(i));
+ }
+
+ if (pClass != NULL)
+ pMT->SetNonVirtualSlotsArray((PTR_PCODE)(pMemory+cbArrayClass+offsetOfNonVirtualSlots));
+ }
+
+#ifdef _DEBUG
+ StackSString debugName;
+ TypeString::AppendType(debugName, TypeHandle(pMT));
+ StackScratchBuffer buff;
+ const char* pDebugNameUTF8 = debugName.GetUTF8(buff);
+ S_SIZE_T safeLen = S_SIZE_T(strlen(pDebugNameUTF8))+S_SIZE_T(1);
+ if(safeLen.IsOverflow()) COMPlusThrowHR(COR_E_OVERFLOW);
+ size_t len = safeLen.Value();
+ char * name = (char*) pamTracker->Track(pAllocator->
+ GetHighFrequencyHeap()->
+ AllocMem(safeLen));
+ strcpy_s(name, len, pDebugNameUTF8);
+
+ if (pClass != NULL)
+ pClass->SetDebugClassName(name);
+ pMT->SetDebugClassName(name);
+#endif // _DEBUG
+
+ if (pClass != NULL)
+ {
+ // Count the number of method descs we need so we can allocate chunks.
+ DWORD dwMethodDescs = numCtors
+ + 3; // for rank specific Get, Set, Address
+
+ MethodDescChunk * pChunks = MethodDescChunk::CreateChunk(pAllocator->GetHighFrequencyHeap(),
+ dwMethodDescs, mcArray, FALSE /* fNonVtableSlot*/, FALSE /* fNativeCodeSlot */, FALSE /* fComPlusCallInfo */,
+ pMT, pamTracker);
+ pClass->SetChunks(pChunks);
+
+ MethodTable::IntroducedMethodIterator it(pMT);
+
+ DWORD dwMethodIndex = 0;
+ for (; it.IsValid(); it.Next())
+ {
+ ArrayMethodDesc* pNewMD = (ArrayMethodDesc *) it.GetMethodDesc();
+ _ASSERTE(pNewMD->GetClassification() == mcArray);
+
+ DWORD dwFuncRank;
+ DWORD dwFuncType;
+
+ if (dwMethodIndex < ArrayMethodDesc::ARRAY_FUNC_CTOR)
+ {
+ // Generate a new stand-alone, Rank Specific Get, Set and Address method.
+ dwFuncRank = Rank;
+ dwFuncType = dwMethodIndex;
+ }
+ else
+ {
+ if (arrayKind == ELEMENT_TYPE_SZARRAY)
+ {
+ // For SZARRAY arrays, set up multiple constructors.
+ dwFuncRank = 1 + (dwMethodIndex - ArrayMethodDesc::ARRAY_FUNC_CTOR);
+ }
+ else
+ {
+ // ELEMENT_TYPE_ARRAY has two constructors, one without lower bounds and one with lower bounds
+ _ASSERTE((dwMethodIndex == ArrayMethodDesc::ARRAY_FUNC_CTOR) || (dwMethodIndex == ArrayMethodDesc::ARRAY_FUNC_CTOR+1));
+ dwFuncRank = (dwMethodIndex == ArrayMethodDesc::ARRAY_FUNC_CTOR) ? Rank : 2 * Rank;
+ }
+ dwFuncType = ArrayMethodDesc::ARRAY_FUNC_CTOR;
+ }
+
+ PCCOR_SIGNATURE pSig;
+ DWORD cSig;
+
+ pClass->GenerateArrayAccessorCallSig(dwFuncRank, dwFuncType, &pSig, &cSig, pAllocator, pamTracker
+ #ifdef FEATURE_ARRAYSTUB_AS_IL
+ ,0
+ #endif
+ );
+
+ pClass->InitArrayMethodDesc(pNewMD, pSig, cSig, numVirtuals + dwMethodIndex, pAllocator, pamTracker);
+
+ dwMethodIndex++;
+ }
+ _ASSERTE(dwMethodIndex == dwMethodDescs);
+ }
+
+ // Set up GC information
+ if (elemType == ELEMENT_TYPE_VALUETYPE || elemType == ELEMENT_TYPE_VOID)
+ {
+ // If it's an array of value classes, there is a different format for the GCDesc if it contains pointers
+ if (pElemMT->ContainsPointers())
+ {
+ CGCDescSeries *pSeries;
+
+ // There must be only one series for value classes
+ CGCDescSeries *pByValueSeries = CGCDesc::GetCGCDescFromMT(pElemMT)->GetHighestSeries();
+
+ pMT->SetContainsPointers();
+
+ // negative series has a special meaning, indicating a different form of GCDesc
+ SSIZE_T nSeries = (SSIZE_T) CGCDesc::GetCGCDescFromMT(pElemMT)->GetNumSeries();
+ CGCDesc::GetCGCDescFromMT(pMT)->InitValueClassSeries(pMT, nSeries);
+
+ pSeries = CGCDesc::GetCGCDescFromMT(pMT)->GetHighestSeries();
+
+ // sort by offset
+ SSIZE_T AllocSizeSeries;
+ if (!ClrSafeInt<SSIZE_T>::multiply(sizeof(CGCDescSeries*), nSeries, AllocSizeSeries))
+ COMPlusThrowOM();
+ CGCDescSeries** sortedSeries = (CGCDescSeries**) _alloca(AllocSizeSeries);
+ int index;
+ for (index = 0; index < nSeries; index++)
+ sortedSeries[index] = &pByValueSeries[-index];
+
+ // section sort
+ for (int i = 0; i < nSeries; i++) {
+ for (int j = i+1; j < nSeries; j++)
+ if (sortedSeries[j]->GetSeriesOffset() < sortedSeries[i]->GetSeriesOffset())
+ {
+ CGCDescSeries* temp = sortedSeries[i];
+ sortedSeries[i] = sortedSeries[j];
+ sortedSeries[j] = temp;
+ }
+ }
+
+ // Offset of the first pointer in the array
+ // This equals the offset of the first pointer if this were an array of entirely pointers, plus the offset of the
+ // first pointer in the value class
+ pSeries->SetSeriesOffset(ArrayBase::GetDataPtrOffset(pMT)
+ + (sortedSeries[0]->GetSeriesOffset()) - sizeof (Object) );
+ for (index = 0; index < nSeries; index ++)
+ {
+ size_t numPtrsInBytes = sortedSeries[index]->GetSeriesSize()
+ + pElemMT->GetBaseSize();
+ size_t currentOffset;
+ size_t skip;
+ currentOffset = sortedSeries[index]->GetSeriesOffset()+numPtrsInBytes;
+ if (index != nSeries-1)
+ {
+ skip = sortedSeries[index+1]->GetSeriesOffset()-currentOffset;
+ }
+ else if (index == 0)
+ {
+ skip = pElemMT->GetAlignedNumInstanceFieldBytes() - numPtrsInBytes;
+ }
+ else
+ {
+ skip = sortedSeries[0]->GetSeriesOffset() + pElemMT->GetBaseSize()
+ - ObjSizeOf(Object) - currentOffset;
+ }
+
+ _ASSERTE(!"Module::CreateArrayMethodTable() - unaligned GC info" || IS_ALIGNED(skip, sizeof(size_t)));
+
+ unsigned short NumPtrs = (unsigned short) (numPtrsInBytes / sizeof(void*));
+ if(skip > MAX_SIZE_FOR_VALUECLASS_IN_ARRAY || numPtrsInBytes > MAX_PTRS_FOR_VALUECLASSS_IN_ARRAY) {
+ StackSString ssElemName;
+ elemTypeHnd.GetName(ssElemName);
+
+ StackScratchBuffer scratch;
+ elemTypeHnd.GetAssembly()->ThrowTypeLoadException(ssElemName.GetUTF8(scratch),
+ IDS_CLASSLOAD_VALUECLASSTOOLARGE);
+ }
+
+ val_serie_item *val_item = &(pSeries->val_serie[-index]);
+
+ val_item->set_val_serie_item (NumPtrs, (unsigned short)skip);
+ }
+ }
+ }
+ else if (CorTypeInfo::IsObjRef(elemType))
+ {
+ CGCDescSeries *pSeries;
+
+ pMT->SetContainsPointers();
+
+ // This array is all GC Pointers
+ CGCDesc::GetCGCDescFromMT(pMT)->Init( pMT, 1 );
+
+ pSeries = CGCDesc::GetCGCDescFromMT(pMT)->GetHighestSeries();
+
+ pSeries->SetSeriesOffset(ArrayBase::GetDataPtrOffset(pMT));
+ // For arrays, the size is the negative of the BaseSize (the GC always adds the total
+ // size of the object, so what you end up with is the size of the data portion of the array)
+ pSeries->SetSeriesSize(-(SSIZE_T)(pMT->GetBaseSize()));
+ }
+
+#ifdef FEATURE_COLLECTIBLE_TYPES
+ if (!pMT->ContainsPointers() && this->IsCollectible())
+ {
+ CGCDescSeries *pSeries;
+
+ // For collectible types, insert empty gc series
+ CGCDesc::GetCGCDescFromMT(pMT)->InitValueClassSeries(pMT, 1);
+ pSeries = CGCDesc::GetCGCDescFromMT(pMT)->GetHighestSeries();
+ pSeries->SetSeriesOffset(ArrayBase::GetDataPtrOffset(pMT));
+ pSeries->val_serie[0].set_val_serie_item (0, pMT->GetComponentSize());
+ }
+#endif
+
+ // If we get here we are assuming that there was no truncation. If this is not the case then
+ // an array whose base type is not a value class was created and was larger then 0xffff (a word)
+ _ASSERTE(dwComponentSize == pMT->GetComponentSize());
+
+#ifdef FEATURE_PREJIT
+ _ASSERTE(pComputedPZM == Module::GetPreferredZapModuleForMethodTable(pMT));
+#endif
+
+ return(pMT);
+} // Module::CreateArrayMethodTable
+
+#ifndef CROSSGEN_COMPILE
+
+#ifdef FEATURE_ARRAYSTUB_AS_IL
+
+class ArrayOpLinker : public ILStubLinker
+{
+ ILCodeStream * m_pCode;
+ ArrayMethodDesc * m_pMD;
+
+ SigTypeContext m_emptyContext;
+
+public:
+ ArrayOpLinker(ArrayMethodDesc * pMD)
+ : ILStubLinker(pMD->GetModule(), pMD->GetSignature(), &m_emptyContext, pMD, TRUE, TRUE, FALSE)
+ {
+ m_pCode = NewCodeStream(kDispatch);
+ m_pMD = pMD;
+ }
+
+ void EmitStub()
+ {
+ MethodTable *pMT = m_pMD->GetMethodTable();
+ BOOL fHasLowerBounds = pMT->GetInternalCorElementType() == ELEMENT_TYPE_ARRAY;
+
+ DWORD dwTotalLocalNum = NewLocal(ELEMENT_TYPE_I4);
+ DWORD dwFactorLocalNum = NewLocal(ELEMENT_TYPE_I4);
+ DWORD dwLengthLocalNum = NewLocal(ELEMENT_TYPE_I4);
+
+ mdToken tokPinningHelper = GetToken(MscorlibBinder::GetField(FIELD__PINNING_HELPER__M_DATA));
+
+ ILCodeLabel * pRangeExceptionLabel = NewCodeLabel();
+ ILCodeLabel * pRangeExceptionLabel1 = NewCodeLabel();
+ ILCodeLabel * pCheckDone = NewCodeLabel();
+ ILCodeLabel * pNotSZArray = NewCodeLabel();
+ ILCodeLabel * pTypeMismatchExceptionLabel = NULL;
+
+ UINT rank = pMT->GetRank();
+ UINT idx = rank;
+ UINT firstIdx = 0;
+ UINT hiddenArgIdx = rank;
+ _ASSERTE(rank>0);
+
+
+#ifndef _TARGET_X86_
+ if(m_pMD->GetArrayFuncIndex() == ArrayMethodDesc::ARRAY_FUNC_ADDRESS)
+ {
+ idx++;
+ firstIdx = 1;
+ hiddenArgIdx = 0;
+ }
+#endif
+
+ ArrayClass *pcls = (ArrayClass*)(pMT->GetClass());
+ if(pcls->GetArrayElementType() == ELEMENT_TYPE_CLASS)
+ {
+ // Type Check
+ if(m_pMD->GetArrayFuncIndex() == ArrayMethodDesc::ARRAY_FUNC_SET)
+ {
+ ILCodeLabel * pTypeCheckOK = NewCodeLabel();
+
+ m_pCode->EmitLDARG(rank); // load value to store
+ m_pCode->EmitBRFALSE(pTypeCheckOK); //Storing NULL is OK
+
+ m_pCode->EmitLDARG(rank); // return param
+ m_pCode->EmitLDFLDA(tokPinningHelper);
+ m_pCode->EmitLDC(Object::GetOffsetOfFirstField());
+ m_pCode->EmitSUB();
+ m_pCode->EmitLDIND_I(); // TypeHandle
+
+ m_pCode->EmitLoadThis();
+ m_pCode->EmitLDFLDA(tokPinningHelper);
+ m_pCode->EmitLDC(Object::GetOffsetOfFirstField());
+ m_pCode->EmitSUB();
+ m_pCode->EmitLDIND_I(); // Array MT
+ m_pCode->EmitLDC(MethodTable::GetOffsetOfArrayElementTypeHandle());
+ m_pCode->EmitADD();
+ m_pCode->EmitLDIND_I();
+
+ m_pCode->EmitCEQ();
+ m_pCode->EmitBRTRUE(pTypeCheckOK); // Same type is OK
+
+ // Call type check helper
+ m_pCode->EmitLDARG(rank);
+ m_pCode->EmitLoadThis();
+ m_pCode->EmitCALL(METHOD__STUBHELPERS__ARRAY_TYPE_CHECK,2,0);
+
+ m_pCode->EmitLabel(pTypeCheckOK);
+
+ }
+ else if(m_pMD->GetArrayFuncIndex() == ArrayMethodDesc::ARRAY_FUNC_ADDRESS)
+ {
+ // Check that the hidden param is same type
+ ILCodeLabel *pTypeCheckPassed = NewCodeLabel();
+ pTypeMismatchExceptionLabel = NewCodeLabel();
+
+ m_pCode->EmitLDARG(hiddenArgIdx); // hidden param
+ m_pCode->EmitBRFALSE(pTypeCheckPassed);
+ m_pCode->EmitLDARG(hiddenArgIdx);
+ m_pCode->EmitLDFLDA(tokPinningHelper);
+ m_pCode->EmitLDC(offsetof(ParamTypeDesc, m_Arg) - (Object::GetOffsetOfFirstField()+2));
+ m_pCode->EmitADD();
+ m_pCode->EmitLDIND_I();
+
+ m_pCode->EmitLoadThis();
+ m_pCode->EmitLDFLDA(tokPinningHelper);
+ m_pCode->EmitLDC(Object::GetOffsetOfFirstField());
+ m_pCode->EmitSUB();
+ m_pCode->EmitLDIND_I(); // Array MT
+ m_pCode->EmitLDC(MethodTable::GetOffsetOfArrayElementTypeHandle());
+ m_pCode->EmitADD();
+ m_pCode->EmitLDIND_I();
+
+ m_pCode->EmitCEQ();
+ m_pCode->EmitBRFALSE(pTypeMismatchExceptionLabel); // throw exception if not same
+ m_pCode->EmitLabel(pTypeCheckPassed);
+ }
+ }
+
+ if(rank == 1 && fHasLowerBounds)
+ {
+ // check if the array is SZArray.
+ m_pCode->EmitLoadThis();
+ m_pCode->EmitLDFLDA(tokPinningHelper);
+ m_pCode->EmitLDC(Object::GetOffsetOfFirstField());
+ m_pCode->EmitSUB();
+ m_pCode->EmitLDIND_I();
+ m_pCode->EmitLDC(MethodTable::GetOffsetOfFlags());
+ m_pCode->EmitADD();
+ m_pCode->EmitLDIND_I4();
+ m_pCode->EmitLDC(MethodTable::GetIfArrayThenSzArrayFlag());
+ m_pCode->EmitAND();
+ m_pCode->EmitBRFALSE(pNotSZArray); // goto multi-dimmArray code if not szarray
+
+ // it is SZArray
+ // bounds check
+ m_pCode->EmitLoadThis();
+ m_pCode->EmitLDFLDA(tokPinningHelper);
+ m_pCode->EmitLDC(ArrayBase::GetOffsetOfNumComponents() - Object::GetOffsetOfFirstField());
+ m_pCode->EmitADD();
+ m_pCode->EmitLDIND_I4();
+ m_pCode->EmitLDARG(firstIdx);
+ m_pCode->EmitBLE_UN(pRangeExceptionLabel);
+
+ m_pCode->EmitLoadThis();
+ m_pCode->EmitLDFLDA(tokPinningHelper);
+ m_pCode->EmitLDC(ArrayBase::GetBoundsOffset(pMT) - Object::GetOffsetOfFirstField());
+ m_pCode->EmitADD();
+ m_pCode->EmitLDARG(firstIdx);
+ m_pCode->EmitBR(pCheckDone);
+ m_pCode->EmitLabel(pNotSZArray);
+ }
+
+ while(idx-- > firstIdx)
+ {
+ // Cache length
+ m_pCode->EmitLoadThis();
+ m_pCode->EmitLDFLDA(tokPinningHelper);
+ m_pCode->EmitLDC((ArrayBase::GetBoundsOffset(pMT) - Object::GetOffsetOfFirstField()) + (idx-firstIdx)*sizeof(DWORD));
+ m_pCode->EmitADD();
+ m_pCode->EmitLDIND_I4();
+ m_pCode->EmitSTLOC(dwLengthLocalNum);
+
+ // Fetch index
+ m_pCode->EmitLDARG(idx);
+
+ if (fHasLowerBounds)
+ {
+ // Load lower bound
+ m_pCode->EmitLoadThis();
+ m_pCode->EmitLDFLDA(tokPinningHelper);
+ m_pCode->EmitLDC((ArrayBase::GetLowerBoundsOffset(pMT) - Object::GetOffsetOfFirstField()) + (idx-firstIdx)*sizeof(DWORD));
+ m_pCode->EmitADD();
+ m_pCode->EmitLDIND_I4();
+
+ // Subtract lower bound
+ m_pCode->EmitSUB();
+ }
+
+ // Compare with length
+ m_pCode->EmitDUP();
+ m_pCode->EmitLDLOC(dwLengthLocalNum);
+ m_pCode->EmitBGE_UN(pRangeExceptionLabel1);
+
+ // Add to the running total if we have one already
+ if ((idx-firstIdx) != (rank - 1))
+ {
+ m_pCode->EmitLDLOC(dwFactorLocalNum);
+ m_pCode->EmitMUL();
+ m_pCode->EmitLDLOC(dwTotalLocalNum);
+ m_pCode->EmitADD();
+ }
+ m_pCode->EmitSTLOC(dwTotalLocalNum);
+
+ // Update factor if this is not the last iteration
+ if ((idx-firstIdx) != 0)
+ {
+ m_pCode->EmitLDLOC(dwLengthLocalNum);
+ if ((idx-firstIdx) != (rank - 1))
+ {
+ m_pCode->EmitLDLOC(dwFactorLocalNum);
+ m_pCode->EmitMUL();
+ }
+ m_pCode->EmitSTLOC(dwFactorLocalNum);
+ }
+ }
+
+ // Compute element address
+ m_pCode->EmitLoadThis();
+ m_pCode->EmitLDFLDA(tokPinningHelper);
+ m_pCode->EmitLDC(ArrayBase::GetDataPtrOffset(pMT) - Object::GetOffsetOfFirstField());
+ m_pCode->EmitADD();
+ m_pCode->EmitLDLOC(dwTotalLocalNum);
+
+ m_pCode->EmitLabel(pCheckDone);
+
+ SIZE_T elemSize = pMT->GetComponentSize();
+ if (elemSize != 1)
+ {
+ m_pCode->EmitLDC(elemSize);
+ m_pCode->EmitMUL();
+ }
+ m_pCode->EmitADD();
+
+ LocalDesc elemType(pMT->GetApproxArrayElementTypeHandle().GetInternalCorElementType());
+
+ switch (m_pMD->GetArrayFuncIndex())
+ {
+
+ case ArrayMethodDesc::ARRAY_FUNC_GET:
+ if(elemType.ElementType[0]==ELEMENT_TYPE_VALUETYPE)
+ {
+ m_pCode->EmitLDOBJ(GetToken(pMT->GetApproxArrayElementTypeHandle()));
+ }
+ else
+ m_pCode->EmitLDIND_T(&elemType);
+ break;
+
+ case ArrayMethodDesc::ARRAY_FUNC_SET:
+ // Value to store into the array
+ m_pCode->EmitLDARG(rank);
+
+ if(elemType.ElementType[0]==ELEMENT_TYPE_VALUETYPE)
+ {
+ m_pCode->EmitSTOBJ(GetToken(pMT->GetApproxArrayElementTypeHandle()));
+ }
+ else
+ m_pCode->EmitSTIND_T(&elemType);
+ break;
+
+ case ArrayMethodDesc::ARRAY_FUNC_ADDRESS:
+ break;
+
+ default:
+ _ASSERTE(!"Unknown ArrayFuncIndex");
+ }
+
+ m_pCode->EmitRET();
+
+ m_pCode->EmitLDC(0);
+ m_pCode->EmitLabel(pRangeExceptionLabel1); // Assumes that there is one "int" pushed on the stack
+ m_pCode->EmitPOP();
+
+ mdToken tokIndexOutOfRangeCtorExcep = GetToken((MscorlibBinder::GetException(kIndexOutOfRangeException))->GetDefaultConstructor());
+ m_pCode->EmitLabel(pRangeExceptionLabel);
+ m_pCode->EmitNEWOBJ(tokIndexOutOfRangeCtorExcep, 0);
+ m_pCode->EmitTHROW();
+
+ if(pTypeMismatchExceptionLabel != NULL)
+ {
+ mdToken tokTypeMismatchExcepCtor = GetToken((MscorlibBinder::GetException(kArrayTypeMismatchException))->GetDefaultConstructor());
+
+ m_pCode->EmitLabel(pTypeMismatchExceptionLabel);
+ m_pCode->EmitNEWOBJ(tokTypeMismatchExcepCtor, 0);
+ m_pCode->EmitTHROW();
+ }
+ }
+};
+
+Stub *GenerateArrayOpStub(ArrayMethodDesc* pMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ ArrayOpLinker sl(pMD);
+
+ sl.EmitStub();
+
+ PCCOR_SIGNATURE pSig;
+ DWORD cbSig;
+ AllocMemTracker amTracker;
+
+ if (pMD->GetArrayFuncIndex() == ArrayMethodDesc::ARRAY_FUNC_ADDRESS)
+ {
+ // The stub has to have signature with explicit hidden argument instead of CORINFO_CALLCONV_PARAMTYPE.
+ // Generate a new signature for the stub here.
+ ((ArrayClass*)(pMD->GetMethodTable()->GetClass()))->GenerateArrayAccessorCallSig(pMD->GetMethodTable()->GetRank(),
+ ArrayMethodDesc::ARRAY_FUNC_ADDRESS,
+ &pSig,
+ &cbSig,
+ pMD->GetLoaderAllocator(),
+ &amTracker,
+ 1);
+ }
+ else
+ {
+ pMD->GetSig(&pSig,&cbSig);
+ }
+
+ amTracker.SuppressRelease();
+
+ static const ILStubTypes stubTypes[3] = { ILSTUB_ARRAYOP_GET, ILSTUB_ARRAYOP_SET, ILSTUB_ARRAYOP_ADDRESS };
+
+ _ASSERTE(pMD->GetArrayFuncIndex() <= COUNTOF(stubTypes));
+ NDirectStubFlags arrayOpStubFlag = (NDirectStubFlags)stubTypes[pMD->GetArrayFuncIndex()];
+
+ MethodDesc * pStubMD = ILStubCache::CreateAndLinkNewILStubMethodDesc(pMD->GetLoaderAllocator(),
+ pMD->GetMethodTable(),
+ arrayOpStubFlag,
+ pMD->GetModule(),
+ pSig, cbSig,
+ NULL,
+ &sl);
+
+ return Stub::NewStub(JitILStub(pStubMD));
+}
+
+#else // FEATURE_ARRAYSTUB_AS_IL
+//========================================================================
+// Generates the platform-independent arrayop stub.
+//========================================================================
+void GenerateArrayOpScript(ArrayMethodDesc *pMD, ArrayOpScript *paos)
+{
+ STANDARD_VM_CONTRACT;
+
+ ArrayOpIndexSpec *pai = NULL;
+ MethodTable *pMT = pMD->GetMethodTable();
+ ArrayClass *pcls = (ArrayClass*)(pMT->GetClass());
+
+ // The ArrayOpScript and ArrayOpIndexSpec structs double as hash keys
+ // for the ArrayStubCache. Thus, it's imperative that there be no
+ // unused "pad" fields that contain unstable values.
+ // pMT->GetRank() is bounded so the arithmetics here is safe.
+ memset(paos, 0, sizeof(ArrayOpScript) + sizeof(ArrayOpIndexSpec) * pMT->GetRank());
+
+ paos->m_rank = (BYTE)(pMT->GetRank());
+ paos->m_fHasLowerBounds = (pMT->GetInternalCorElementType() == ELEMENT_TYPE_ARRAY);
+
+ paos->m_ofsoffirst = ArrayBase::GetDataPtrOffset(pMT);
+
+ switch (pMD->GetArrayFuncIndex())
+ {
+ case ArrayMethodDesc::ARRAY_FUNC_GET:
+ paos->m_op = ArrayOpScript::LOAD;
+ break;
+ case ArrayMethodDesc::ARRAY_FUNC_SET:
+ paos->m_op = ArrayOpScript::STORE;
+ break;
+ case ArrayMethodDesc::ARRAY_FUNC_ADDRESS:
+ paos->m_op = ArrayOpScript::LOADADDR;
+ break;
+ default:
+ _ASSERTE(!"Unknown array func!");
+ }
+
+ MetaSig msig(pMD);
+ _ASSERTE(!msig.IsVarArg()); // No array signature is varargs, code below does not expect it.
+
+ switch (pcls->GetArrayElementType())
+ {
+ // These are all different because of sign extension
+
+ case ELEMENT_TYPE_I1:
+ paos->m_elemsize = 1;
+ paos->m_signed = TRUE;
+ break;
+
+ case ELEMENT_TYPE_BOOLEAN:
+ case ELEMENT_TYPE_U1:
+ paos->m_elemsize = 1;
+ break;
+
+ case ELEMENT_TYPE_I2:
+ paos->m_elemsize = 2;
+ paos->m_signed = TRUE;
+ break;
+
+ case ELEMENT_TYPE_CHAR:
+ case ELEMENT_TYPE_U2:
+ paos->m_elemsize = 2;
+ break;
+
+ case ELEMENT_TYPE_I4:
+ IN_WIN32(case ELEMENT_TYPE_I:)
+ paos->m_elemsize = 4;
+ paos->m_signed = TRUE;
+ break;
+
+ case ELEMENT_TYPE_U4:
+ IN_WIN32(case ELEMENT_TYPE_U:)
+ IN_WIN32(case ELEMENT_TYPE_PTR:)
+ paos->m_elemsize = 4;
+ break;
+
+ case ELEMENT_TYPE_I8:
+ IN_WIN64(case ELEMENT_TYPE_I:)
+ paos->m_elemsize = 8;
+ paos->m_signed = TRUE;
+ break;
+
+ case ELEMENT_TYPE_U8:
+ IN_WIN64(case ELEMENT_TYPE_U:)
+ IN_WIN64(case ELEMENT_TYPE_PTR:)
+ paos->m_elemsize = 8;
+ break;
+
+ case ELEMENT_TYPE_R4:
+ paos->m_elemsize = 4;
+ paos->m_flags |= paos->ISFPUTYPE;
+ break;
+
+ case ELEMENT_TYPE_R8:
+ paos->m_elemsize = 8;
+ paos->m_flags |= paos->ISFPUTYPE;
+ break;
+
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_ARRAY:
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_OBJECT:
+ paos->m_elemsize = sizeof(LPVOID);
+ paos->m_flags |= paos->NEEDSWRITEBARRIER;
+ if (paos->m_op != ArrayOpScript::LOAD)
+ {
+ paos->m_flags |= paos->NEEDSTYPECHECK;
+ }
+
+ break;
+
+ case ELEMENT_TYPE_VALUETYPE:
+ paos->m_elemsize = pMT->GetComponentSize();
+ if (pMT->ContainsPointers())
+ {
+ paos->m_gcDesc = CGCDesc::GetCGCDescFromMT(pMT);
+ paos->m_flags |= paos->NEEDSWRITEBARRIER;
+ }
+ break;
+
+ default:
+ _ASSERTE(!"Unsupported Array Type!");
+ }
+
+ ArgIterator argit(&msig);
+
+#ifdef _TARGET_X86_
+ paos->m_cbretpop = argit.CbStackPop();
+#endif
+
+ if (argit.HasRetBuffArg())
+ {
+ paos->m_flags |= ArrayOpScript::HASRETVALBUFFER;
+ paos->m_fRetBufLoc = argit.GetRetBuffArgOffset();
+ }
+
+ if (paos->m_op == ArrayOpScript::LOADADDR)
+ {
+ paos->m_typeParamOffs = argit.GetParamTypeArgOffset();
+ }
+
+ for (UINT idx = 0; idx < paos->m_rank; idx++)
+ {
+ pai = (ArrayOpIndexSpec*)(paos->GetArrayOpIndexSpecs() + idx);
+
+ pai->m_idxloc = argit.GetNextOffset();
+ pai->m_lboundofs = paos->m_fHasLowerBounds ? (UINT32) (ArrayBase::GetLowerBoundsOffset(pMT) + idx*sizeof(DWORD)) : 0;
+ pai->m_lengthofs = ArrayBase::GetBoundsOffset(pMT) + idx*sizeof(DWORD);
+ }
+
+ if (paos->m_op == paos->STORE)
+ {
+ paos->m_fValLoc = argit.GetNextOffset();
+ }
+}
+
+//---------------------------------------------------------
+// Cache for array stubs
+//---------------------------------------------------------
+class ArrayStubCache : public StubCacheBase
+{
+ virtual void CompileStub(const BYTE *pRawStub,
+ StubLinker *psl);
+ virtual UINT Length(const BYTE *pRawStub);
+
+public:
+ static ArrayStubCache * GetArrayStubCache()
+ {
+ STANDARD_VM_CONTRACT;
+
+ static ArrayStubCache * s_pArrayStubCache = NULL;
+
+ if (s_pArrayStubCache == NULL)
+ {
+ ArrayStubCache * pArrayStubCache = new ArrayStubCache();
+ if (FastInterlockCompareExchangePointer(&s_pArrayStubCache, pArrayStubCache, NULL) != NULL)
+ delete pArrayStubCache;
+ }
+
+ return s_pArrayStubCache;
+ }
+};
+
+Stub *GenerateArrayOpStub(ArrayMethodDesc* pMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodTable *pMT = pMD->GetMethodTable();
+
+ ArrayOpScript *paos = (ArrayOpScript*)_alloca(sizeof(ArrayOpScript) + sizeof(ArrayOpIndexSpec) * pMT->GetRank());
+
+ GenerateArrayOpScript(pMD, paos);
+
+ Stub *pArrayOpStub;
+ pArrayOpStub = ArrayStubCache::GetArrayStubCache()->Canonicalize((const BYTE *)paos);
+ if (pArrayOpStub == NULL)
+ COMPlusThrowOM();
+
+ return pArrayOpStub;
+}
+
+void ArrayStubCache::CompileStub(const BYTE *pRawStub,
+ StubLinker *psl)
+{
+ STANDARD_VM_CONTRACT;
+
+ ((CPUSTUBLINKER*)psl)->EmitArrayOpStub((ArrayOpScript*)pRawStub);
+}
+
+UINT ArrayStubCache::Length(const BYTE *pRawStub)
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return ((ArrayOpScript*)pRawStub)->Length();
+}
+
+#endif // FEATURE_ARRAYSTUB_AS_IL
+
+#endif // CROSSGEN_COMPILE
+
+//---------------------------------------------------------------------
+// This method returns TRUE if pInterfaceMT could be one of the interfaces
+// that are implicitly implemented by SZArrays
+
+BOOL IsImplicitInterfaceOfSZArray(MethodTable *pInterfaceMT)
+{
+ LIMITED_METHOD_CONTRACT;
+ PRECONDITION(pInterfaceMT->IsInterface());
+
+ // Is target interface Anything<T> in mscorlib?
+ if (!pInterfaceMT->HasInstantiation() || !pInterfaceMT->GetModule()->IsSystem())
+ return FALSE;
+
+ unsigned rid = pInterfaceMT->GetTypeDefRid();
+
+ // Is target interface IList<T> or one of its ancestors, or IReadOnlyList<T>?
+ return (rid == MscorlibBinder::GetExistingClass(CLASS__ILISTGENERIC)->GetTypeDefRid() ||
+ rid == MscorlibBinder::GetExistingClass(CLASS__ICOLLECTIONGENERIC)->GetTypeDefRid() ||
+ rid == MscorlibBinder::GetExistingClass(CLASS__IENUMERABLEGENERIC)->GetTypeDefRid()
+#if !defined(FEATURE_CORECLR) || defined(FEATURE_COMINTEROP)
+ || rid == MscorlibBinder::GetExistingClass(CLASS__IREADONLYCOLLECTIONGENERIC)->GetTypeDefRid()
+ || rid == MscorlibBinder::GetExistingClass(CLASS__IREADONLYLISTGENERIC)->GetTypeDefRid()
+#endif
+ );
+}
+
+//---------------------------------------------------------------------
+// Check if arrays supports certain interfaces that don't appear in the base interface
+// list. It does not check the base interfaces themselves - you must do that
+// separately.
+//---------------------------------------------------------------------
+BOOL ArraySupportsBizarreInterface(ArrayTypeDesc *pArrayTypeDesc, MethodTable *pInterfaceMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+
+ PRECONDITION(pInterfaceMT->IsInterface());
+ PRECONDITION(pArrayTypeDesc->IsArray());
+ }
+ CONTRACTL_END
+
+#ifdef _DEBUG
+ MethodTable *pArrayMT = pArrayTypeDesc->GetMethodTable();
+ _ASSERTE(pArrayMT->IsArray());
+ _ASSERTE(pArrayMT->IsRestored());
+#endif
+
+ // IList<T> & IReadOnlyList<T> only supported for SZ_ARRAYS
+ if (pArrayTypeDesc->GetInternalCorElementType() != ELEMENT_TYPE_SZARRAY)
+ return FALSE;
+
+ ClassLoader::EnsureLoaded(pInterfaceMT, CLASS_DEPENDENCIES_LOADED);
+
+ if (!IsImplicitInterfaceOfSZArray(pInterfaceMT))
+ return FALSE;
+
+ return TypeDesc::CanCastParam(pArrayTypeDesc->GetTypeParam(), pInterfaceMT->GetInstantiation()[0], NULL);
+}
+
+//----------------------------------------------------------------------------------
+// Calls to (IList<T>)(array).Meth are actually implemented by SZArrayHelper.Meth<T>
+// This workaround exists for two reasons:
+//
+// - For working set reasons, we don't want insert these methods in the array hierachy
+// in the normal way.
+// - For platform and devtime reasons, we still want to use the C# compiler to generate
+// the method bodies.
+//
+// (Though it's questionable whether any devtime was saved.)
+//
+// This method takes care of the mapping between the two. Give it a method
+// IList<T>.Meth, and it will return SZArrayHelper.Meth<T>.
+//----------------------------------------------------------------------------------
+MethodDesc* GetActualImplementationForArrayGenericIListOrIReadOnlyListMethod(MethodDesc *pItfcMeth, TypeHandle theT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ int slot = pItfcMeth->GetSlot();
+
+ // We need to pick the right starting method depending on the depth of the inheritance chain
+ static const BinderMethodID startingMethod[] = {
+ METHOD__SZARRAYHELPER__GETENUMERATOR, // First method of IEnumerable`1
+ METHOD__SZARRAYHELPER__GET_COUNT, // First method of ICollection`1/IReadOnlyCollection`1
+ METHOD__SZARRAYHELPER__GET_ITEM // First method of IList`1/IReadOnlyList`1
+ };
+
+ // Subtract one for the non-generic IEnumerable that the generic enumerable inherits from
+ unsigned int inheritanceDepth = pItfcMeth->GetMethodTable()->GetNumInterfaces() - 1;
+ PREFIX_ASSUME(0 <= inheritanceDepth && inheritanceDepth < NumItems(startingMethod));
+
+ MethodDesc *pGenericImplementor = MscorlibBinder::GetMethod((BinderMethodID)(startingMethod[inheritanceDepth] + slot));
+
+ // The most common reason for this assert is that the order of the SZArrayHelper methods in
+ // mscorlib.h does not match the order they are implemented on the generic interfaces.
+ _ASSERTE(pGenericImplementor == MemberLoader::FindMethodByName(g_pSZArrayHelperClass, pItfcMeth->GetName()));
+
+ // OPTIMIZATION: For any method other than GetEnumerator(), we can safely substitute
+ // "Object" for reference-type theT's. This causes fewer methods to be instantiated.
+ if (startingMethod[inheritanceDepth] != METHOD__SZARRAYHELPER__GETENUMERATOR &&
+ !theT.IsValueType())
+ {
+ theT = TypeHandle(g_pObjectClass);
+ }
+
+ MethodDesc *pActualImplementor = MethodDesc::FindOrCreateAssociatedMethodDesc(pGenericImplementor,
+ g_pSZArrayHelperClass,
+ FALSE,
+ Instantiation(&theT, 1),
+ FALSE // allowInstParam
+ );
+ _ASSERTE(pActualImplementor);
+ return pActualImplementor;
+}
+#endif // DACCESS_COMPILE
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#pragma warning(disable:4244)
+#endif // _MSC_VER: warning C4244
diff --git a/src/vm/array.h b/src/vm/array.h
new file mode 100644
index 0000000000..72fe9af442
--- /dev/null
+++ b/src/vm/array.h
@@ -0,0 +1,114 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#ifndef _ARRAY_H_
+#define _ARRAY_H_
+
+#define MAX_RANK 32 // If you make this bigger, you need to make MAX_CLASSNAME_LENGTH bigger too.
+ // if you have an 32 dim array with at least 2 elements in each dim that
+ // takes up 4Gig!!! Thus this is a reasonable maximum.
+// (Note: at the time of the above comment, the rank was 32, and
+// MAX_CLASSNAME_LENGTH was 256. I'm now changing MAX_CLASSNAME_LENGTH
+// to 1024, but not changing MAX_RANK.)
+
+class MethodTable;
+
+
+#ifndef FEATURE_ARRAYSTUB_AS_IL
+
+//======================================================================
+// The following structures double as hash keys for the ArrayStubCache.
+// Thus, it's imperative that there be no
+// unused "pad" fields that contain unstable values.
+#include <pshpack1.h>
+
+
+// Specifies one index spec. This is used mostly to get the argument
+// location done early when we still have a signature to work with.
+struct ArrayOpIndexSpec
+{
+ UINT32 m_idxloc; //if (m_fref) offset in ArgumentReg else base-frame offset into stack.
+ UINT32 m_lboundofs; //offset within array of lowerbound
+ UINT32 m_lengthofs; //offset within array of lengths
+};
+
+
+struct ArrayOpScript
+{
+ enum
+ {
+ LOAD = 0,
+ STORE = 1,
+ LOADADDR = 2,
+ };
+
+
+ // FLAGS
+ enum
+ {
+ ISFPUTYPE = 0x01,
+ NEEDSWRITEBARRIER = 0x02,
+ HASRETVALBUFFER = 0x04,
+ NEEDSTYPECHECK = 0x10,
+ };
+
+ //
+ // these args have been reordered for better packing..
+ //
+
+ BYTE m_rank; // # of ArrayOpIndexSpec's
+ BYTE m_fHasLowerBounds; // if FALSE, all lowerbounds are 0
+ BYTE m_flags;
+ BYTE m_signed; // whether to sign-extend or zero-extend (for short types)
+
+ BYTE m_op; // STORE/LOAD/LOADADDR
+ BYTE m_pad1;
+
+ UINT16 m_fRetBufLoc; // if HASRETVALBUFFER, stack offset or argreg offset of retbuf ptr
+ UINT16 m_fValLoc; // for STORES, stack offset or argreg offset of value
+
+ UINT16 m_cbretpop; // how much to pop
+
+ UINT32 m_elemsize; // size in bytes of element.
+ UINT m_ofsoffirst; // offset of first element
+ INT m_typeParamOffs; // offset of type param
+ CGCDesc* m_gcDesc; // layout of GC stuff (0 if not needed)
+
+ // Array of ArrayOpIndexSpec's follow (one for each dimension).
+
+ const ArrayOpIndexSpec *GetArrayOpIndexSpecs() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (const ArrayOpIndexSpec *)(1+ this);
+ }
+
+ UINT Length() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return sizeof(*this) + m_rank * sizeof(ArrayOpIndexSpec);
+ }
+
+};
+
+#include <poppack.h>
+//======================================================================
+
+#endif // FEATURE_ARRAYSTUB_AS_IL
+
+
+Stub *GenerateArrayOpStub(ArrayMethodDesc* pMD);
+
+
+
+BOOL IsImplicitInterfaceOfSZArray(MethodTable *pIntfMT);
+BOOL ArraySupportsBizarreInterface(ArrayTypeDesc *pArrayTypeDesc, MethodTable *pInterfaceMT);
+
+MethodDesc* GetActualImplementationForArrayGenericIListOrIReadOnlyListMethod(MethodDesc *pItfcMeth, TypeHandle theT);
+
+#endif// _ARRAY_H_
+
diff --git a/src/vm/assembly.cpp b/src/vm/assembly.cpp
new file mode 100644
index 0000000000..e7b7c5b4c4
--- /dev/null
+++ b/src/vm/assembly.cpp
@@ -0,0 +1,5131 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: Assembly.cpp
+**
+**
+** Purpose: Implements assembly (loader domain) architecture
+**
+**
+===========================================================*/
+
+#include "common.h"
+
+#include <stdlib.h>
+
+#include "assembly.hpp"
+#include "appdomain.hpp"
+#include "security.h"
+#include "perfcounters.h"
+#include "assemblyname.hpp"
+
+#ifdef FEATURE_FUSION
+#include "fusion.h"
+#include "assemblysink.h"
+#include "ngenoptout.h"
+#endif
+
+#if !defined(FEATURE_CORECLR) && !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+#include "assemblyusagelogmanager.h"
+#include "policy.h"
+#endif
+
+#include "eeprofinterfaces.h"
+#include "reflectclasswriter.h"
+#include "comdynamic.h"
+
+#include <wincrypt.h>
+#include "urlmon.h"
+#include "sha1.h"
+
+#include "eeconfig.h"
+#include "strongname.h"
+
+#include "ceefilegenwriter.h"
+#include "assemblynative.hpp"
+#include "threadsuspend.h"
+
+#ifdef FEATURE_PREJIT
+#include "corcompile.h"
+#endif
+
+#include "appdomainnative.hpp"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#include "appdomainhelper.h"
+#endif
+#include "customattribute.h"
+#include "winnls.h"
+
+#include "constrainedexecutionregion.h"
+#include "caparser.h"
+#include "../md/compiler/custattr.h"
+#include "mdaassistants.h"
+
+#include "peimagelayout.inl"
+
+#if !defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE)
+#include <shlobj.h>
+#include "eventmsg.h"
+#endif
+
+
+// Define these macro's to do strict validation for jit lock and class init entry leaks.
+// This defines determine if the asserts that verify for these leaks are defined or not.
+// These asserts can sometimes go off even if no entries have been leaked so this defines
+// should be used with caution.
+//
+// If we are inside a .cctor when the application shut's down then the class init lock's
+// head will be set and this will cause the assert to go off.,
+//
+// If we are jitting a method when the application shut's down then the jit lock's head
+// will be set causing the assert to go off.
+
+//#define STRICT_JITLOCK_ENTRY_LEAK_DETECTION
+//#define STRICT_CLSINITLOCK_ENTRY_LEAK_DETECTION
+
+
+#ifndef DACCESS_COMPILE
+
+// This value is to make it easier to diagnose Assembly Loader "grant set" crashes.
+// See Dev11 bug 358184 for more details.
+
+// This value is not thread safe and is not intended to be. It is just a best
+// effort to collect more data on the problem. Is is possible, though unlikely,
+// that thread A would record a reason for an upcoming crash,
+// thread B would then record a different reason, and we would then
+// crash on thread A, thus ending up with the recorded reason not matching
+// the thread we crash in. Be aware of this when using this value
+// to help your debugging.
+DWORD g_dwLoaderReasonForNotSharing = 0; // See code:DomainFile::m_dwReasonForRejectingNativeImage for a similar variable.
+
+// These will sometimes result in a crash with error code 0x80131401 SECURITY_E_INCOMPATIBLE_SHARE
+// "Loading this assembly would produce a different grant set from other instances."
+enum ReasonForNotSharing
+{
+ ReasonForNotSharing_NoInfoRecorded = 0x1,
+ ReasonForNotSharing_NullDomainassembly = 0x2,
+ ReasonForNotSharing_DebuggerFlagMismatch = 0x3,
+ ReasonForNotSharing_NullPeassembly = 0x4,
+ ReasonForNotSharing_MissingAssemblyClosure1 = 0x5,
+ ReasonForNotSharing_MissingAssemblyClosure2 = 0x6,
+ ReasonForNotSharing_MissingDependenciesResolved = 0x7,
+ ReasonForNotSharing_ClosureComparisonFailed = 0x8,
+};
+
+#define NO_FRIEND_ASSEMBLIES_MARKER ((FriendAssemblyDescriptor *)S_FALSE)
+
+//----------------------------------------------------------------------------------------------
+// The ctor's job is to initialize the Assembly enough so that the dtor can safely run.
+// It cannot do any allocations or operations that might fail. Those operations should be done
+// in Assembly::Init()
+//----------------------------------------------------------------------------------------------
+Assembly::Assembly(BaseDomain *pDomain, PEAssembly* pFile, DebuggerAssemblyControlFlags debuggerFlags, BOOL fIsCollectible) :
+ m_FreeFlag(0),
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ m_pAllowedFiles(NULL),
+ m_crstAllowedFiles(CrstAllowedFiles),
+#endif
+ m_pDomain(pDomain),
+ m_pClassLoader(NULL),
+ m_pEntryPoint(NULL),
+ m_pManifest(NULL),
+ m_pManifestFile(clr::SafeAddRef(pFile)),
+ m_pOnDiskManifest(NULL),
+ m_pFriendAssemblyDescriptor(NULL),
+ m_pbStrongNameKeyPair(NULL),
+ m_pwStrongNameKeyContainer(NULL),
+ m_isDynamic(false),
+ m_isDisabledPrivateReflection(0),
+#ifdef FEATURE_COLLECTIBLE_TYPES
+ m_isCollectible(fIsCollectible),
+#endif
+ m_needsToHideManifestForEmit(FALSE),
+ m_dwDynamicAssemblyAccess(ASSEMBLY_ACCESS_RUN),
+ m_nextAvailableModuleIndex(1),
+ m_pLoaderAllocator(NULL),
+#ifdef FEATURE_COMINTEROP
+ m_pITypeLib(NULL),
+ m_winMDStatus(WinMDStatus_Unknown),
+ m_pManifestWinMDImport(NULL),
+#endif // FEATURE_COMINTEROP
+ m_pSharedSecurityDesc(NULL),
+ m_pTransparencyBehavior(NULL),
+ m_fIsDomainNeutral(pDomain == SharedDomain::GetDomain()),
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ m_bMissingDependenciesCheckDone(FALSE),
+#ifdef FEATURE_FUSION
+ m_pBindingClosure(NULL),
+#endif
+#endif // FEATURE_LOADER_OPTIMIZATION
+ m_debuggerFlags(debuggerFlags),
+ m_fTerminated(FALSE),
+ m_HostAssemblyId(0)
+#ifdef FEATURE_COMINTEROP
+ , m_InteropAttributeStatus(INTEROP_ATTRIBUTE_UNSET)
+#endif
+#ifndef FEATURE_CORECLR
+ , m_fSupportsAutoNGen(FALSE)
+#endif
+{
+ STANDARD_VM_CONTRACT;
+}
+
+// This name needs to stay in sync with AssemblyBuilder.MANIFEST_MODULE_NAME
+// which is used in AssemblyBuilder.InitManifestModule
+#define REFEMIT_MANIFEST_MODULE_NAME W("RefEmit_InMemoryManifestModule")
+
+//----------------------------------------------------------------------------------------------
+// Does most Assembly initialization tasks. It can assume the ctor has already run
+// and the assembly is safely destructable. Whether this function throws or succeeds,
+// it must leave the Assembly in a safely destructable state.
+//----------------------------------------------------------------------------------------------
+void Assembly::Init(AllocMemTracker *pamTracker, LoaderAllocator *pLoaderAllocator)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (IsSystem())
+ {
+ _ASSERTE(pLoaderAllocator == NULL); // pLoaderAllocator may only be non-null for collectible types
+ m_pLoaderAllocator = SystemDomain::GetGlobalLoaderAllocator();
+ }
+ else
+ {
+ if (!IsDomainNeutral())
+ {
+ if (!IsCollectible())
+ {
+ // pLoaderAllocator will only be non-null for reflection emit assemblies
+ _ASSERTE((pLoaderAllocator == NULL) || (pLoaderAllocator == GetDomain()->AsAppDomain()->GetLoaderAllocator()));
+ m_pLoaderAllocator = GetDomain()->AsAppDomain()->GetLoaderAllocator();
+ }
+ else
+ {
+ _ASSERTE(pLoaderAllocator != NULL); // ppLoaderAllocator must be non-null for collectible assemblies
+
+ m_pLoaderAllocator = pLoaderAllocator;
+ }
+ }
+ else
+ {
+ _ASSERTE(pLoaderAllocator == NULL); // pLoaderAllocator may only be non-null for collectible types
+ // use global loader heaps
+ m_pLoaderAllocator = SystemDomain::GetGlobalLoaderAllocator();
+ }
+ }
+ _ASSERTE(m_pLoaderAllocator != NULL);
+
+ m_pClassLoader = new ClassLoader(this);
+ m_pClassLoader->Init(pamTracker);
+
+ m_pSharedSecurityDesc = Security::CreateSharedSecurityDescriptor(this);
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ m_pAllowedFiles = new EEUtf8StringHashTable();
+#endif
+
+ COUNTER_ONLY(GetPerfCounters().m_Loading.cAssemblies++);
+
+#ifndef CROSSGEN_COMPILE
+ if (GetManifestFile()->IsDynamic())
+ // manifest modules of dynamic assemblies are always transient
+ m_pManifest = ReflectionModule::Create(this, GetManifestFile(), pamTracker, REFEMIT_MANIFEST_MODULE_NAME, TRUE);
+ else
+#endif
+ m_pManifest = Module::Create(this, mdFileNil, GetManifestFile(), pamTracker);
+
+ PrepareModuleForAssembly(m_pManifest, pamTracker);
+
+ CacheManifestFiles();
+
+ CacheManifestExportedTypes(pamTracker);
+
+#if !defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE)
+ GenerateBreadcrumbForServicing();
+
+ m_fSupportsAutoNGen = SupportsAutoNGenWorker();
+
+ ReportAssemblyUse();
+#endif
+
+ // Check for the special System.Numerics.Vectors assembly.
+ // If we encounter a non-trusted assembly by this name, we will simply not recognize any of its
+ // methods as intrinsics.
+ if (!strcmp(GetSimpleName(), "System.Numerics.Vectors"))
+ {
+ m_fIsSIMDVectorAssembly = true;
+ }
+ else
+ {
+ m_fIsSIMDVectorAssembly = false;
+ }
+
+ // We'll load the friend assembly information lazily. For the ngen case we should avoid
+ // loading it entirely.
+ //CacheFriendAssemblyInfo();
+
+ {
+ CANNOTTHROWCOMPLUSEXCEPTION();
+ FAULT_FORBID();
+ //Cannot fail after this point.
+
+ PublishModuleIntoAssembly(m_pManifest);
+
+ return; // Explicit return to let you know you are NOT welcome to add code after the CANNOTTHROW/FAULT_FORBID expires
+ }
+}
+
+BOOL Assembly::IsDisabledPrivateReflection()
+{
+ CONTRACTL
+ {
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ enum { UNINITIALIZED, ENABLED, DISABLED};
+
+ if (m_isDisabledPrivateReflection == UNINITIALIZED)
+ {
+ IMDInternalImport *pImport = GetManifestImport();
+ HRESULT hr = pImport->GetCustomAttributeByName(GetManifestToken(), DISABLED_PRIVATE_REFLECTION_TYPE, NULL, 0);
+ IfFailThrow(hr);
+
+ if (hr == S_OK)
+ {
+ m_isDisabledPrivateReflection = DISABLED;
+ }
+ else
+ {
+ m_isDisabledPrivateReflection = ENABLED;
+ }
+ }
+
+ return m_isDisabledPrivateReflection == DISABLED;
+}
+
+#ifndef CROSSGEN_COMPILE
+Assembly::~Assembly()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ DISABLED(FORBID_FAULT); //Must clean up some profiler stuff
+ }
+ CONTRACTL_END
+
+ Terminate();
+
+ if (m_pFriendAssemblyDescriptor != NULL && m_pFriendAssemblyDescriptor != NO_FRIEND_ASSEMBLIES_MARKER)
+ delete m_pFriendAssemblyDescriptor;
+
+ if (m_pbStrongNameKeyPair && (m_FreeFlag & FREE_KEY_PAIR))
+ delete[] m_pbStrongNameKeyPair;
+ if (m_pwStrongNameKeyContainer && (m_FreeFlag & FREE_KEY_CONTAINER))
+ delete[] m_pwStrongNameKeyContainer;
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ if (m_pAllowedFiles)
+ delete(m_pAllowedFiles);
+#endif
+#ifdef FEATURE_FUSION
+ if (m_pBindingClosure)
+ {
+ m_pBindingClosure->Release();
+ }
+#endif
+ if (IsDynamic()) {
+ if (m_pOnDiskManifest)
+ // clear the on disk manifest if it is not cleared yet.
+ m_pOnDiskManifest = NULL;
+ }
+
+ if (m_pManifestFile)
+ {
+ m_pManifestFile->Release();
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (m_pManifestWinMDImport)
+ {
+ m_pManifestWinMDImport->Release();
+ }
+#endif // FEATURE_COMINTEROP
+}
+
+#ifdef FEATURE_PREJIT
+void Assembly::DeleteNativeCodeRanges()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ ModuleIterator i = IterateModules();
+ while (i.Next())
+ i.GetModule()->DeleteNativeCodeRanges();
+}
+#endif
+
+#ifdef PROFILING_SUPPORTED
+void ProfilerCallAssemblyUnloadStarted(Assembly* assemblyUnloaded)
+{
+ WRAPPER_NO_CONTRACT;
+ {
+ BEGIN_PIN_PROFILER(CORProfilerPresent());
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->AssemblyUnloadStarted((AssemblyID)assemblyUnloaded);
+ END_PIN_PROFILER();
+ }
+}
+
+void ProfilerCallAssemblyUnloadFinished(Assembly* assemblyUnloaded)
+{
+ WRAPPER_NO_CONTRACT;
+ {
+ BEGIN_PIN_PROFILER(CORProfilerPresent());
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->AssemblyUnloadFinished((AssemblyID) assemblyUnloaded, S_OK);
+ END_PIN_PROFILER();
+ }
+}
+#endif
+
+void Assembly::StartUnload()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FORBID_FAULT;
+#ifdef PROFILING_SUPPORTED
+ if (CORProfilerTrackAssemblyLoads())
+ {
+ ProfilerCallAssemblyUnloadStarted(this);
+ }
+#endif
+
+ // we need to release tlb files eagerly
+#ifdef FEATURE_COMINTEROP
+ if(g_fProcessDetach == FALSE)
+ {
+ DefaultCatchFilterParam param; param.pv = COMPLUS_EXCEPTION_EXECUTE_HANDLER;
+ PAL_TRY(Assembly *, pThis, this)
+ {
+ if (pThis->m_pITypeLib && pThis->m_pITypeLib != (ITypeLib*)-1) {
+ pThis->m_pITypeLib->Release();
+ pThis->m_pITypeLib = NULL;
+ }
+ }
+ PAL_EXCEPT_FILTER(DefaultCatchFilter)
+ {
+ }
+ PAL_ENDTRY
+ }
+#endif // FEATURE_COMINTEROP
+
+}
+
+void Assembly::Terminate( BOOL signalProfiler )
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ STRESS_LOG1(LF_LOADER, LL_INFO100, "Assembly::Terminate (this = 0x%p)\n", reinterpret_cast<void *>(this));
+
+ if (this->m_fTerminated)
+ return;
+
+ delete m_pSharedSecurityDesc;
+ m_pSharedSecurityDesc = NULL;
+
+ if (m_pClassLoader != NULL)
+ {
+ GCX_PREEMP();
+ delete m_pClassLoader;
+ m_pClassLoader = NULL;
+ }
+
+ if (m_pLoaderAllocator != NULL)
+ {
+ if (IsCollectible())
+ {
+ // This cleanup code starts resembling parts of AppDomain::Terminate too much.
+ // It would be useful to reduce duplication and also establish clear responsibilites
+ // for LoaderAllocator::Destroy, Assembly::Terminate, LoaderAllocator::Terminate
+ // and LoaderAllocator::~LoaderAllocator. We need to establish how these
+ // cleanup paths interact with app-domain unload and process tear-down, too.
+
+ if (!IsAtProcessExit())
+ {
+ // Suspend the EE to do some clean up that can only occur
+ // while no threads are running.
+ GCX_COOP (); // SuspendEE may require current thread to be in Coop mode
+ // SuspendEE cares about the reason flag only when invoked for a GC
+ // Other values are typically ignored. If using SUSPEND_FOR_APPDOMAIN_SHUTDOWN
+ // is inappropriate, we can introduce a new flag or hijack an unused one.
+ ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_FOR_APPDOMAIN_SHUTDOWN);
+ }
+
+ ExecutionManager::Unload(m_pLoaderAllocator);
+
+ m_pLoaderAllocator->UninitVirtualCallStubManager();
+ MethodTable::ClearMethodDataCache();
+ _ASSERTE(m_pDomain->IsAppDomain());
+ AppDomain *pAppDomain = m_pDomain->AsAppDomain();
+ ClearJitGenericHandleCache(pAppDomain);
+
+ if (!IsAtProcessExit())
+ {
+ // Resume the EE.
+ ThreadSuspend::RestartEE(FALSE, TRUE);
+ }
+
+ // Once the manifest file is tenured, the managed LoaderAllocatorScout is responsible for cleanup.
+ if (m_pManifest != NULL && m_pManifest->IsTenured())
+ {
+ pAppDomain->RegisterLoaderAllocatorForDeletion(m_pLoaderAllocator);
+ }
+ }
+ m_pLoaderAllocator = NULL;
+ }
+
+ COUNTER_ONLY(GetPerfCounters().m_Loading.cAssemblies--);
+
+
+#ifdef PROFILING_SUPPORTED
+ if (CORProfilerTrackAssemblyLoads())
+ {
+ ProfilerCallAssemblyUnloadFinished(this);
+ }
+#endif // PROFILING_SUPPORTED
+
+ this->m_fTerminated = TRUE;
+}
+#endif // CROSSGEN_COMPILE
+
+Assembly * Assembly::Create(
+ BaseDomain * pDomain,
+ PEAssembly * pFile,
+ DebuggerAssemblyControlFlags debuggerFlags,
+ BOOL fIsCollectible,
+ AllocMemTracker * pamTracker,
+ LoaderAllocator * pLoaderAllocator)
+{
+ STANDARD_VM_CONTRACT;
+
+ NewHolder<Assembly> pAssembly (new Assembly(pDomain, pFile, debuggerFlags, fIsCollectible));
+
+ // If there are problems that arise from this call stack, we'll chew up a lot of stack
+ // with the various EX_TRY/EX_HOOKs that we will encounter.
+ INTERIOR_STACK_PROBE_FOR(GetThread(), DEFAULT_ENTRY_PROBE_SIZE);
+#ifdef PROFILING_SUPPORTED
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackAssemblyLoads());
+ GCX_COOP();
+ g_profControlBlock.pProfInterface->AssemblyLoadStarted((AssemblyID)(Assembly *) pAssembly);
+ END_PIN_PROFILER();
+ }
+
+ // Need TRY/HOOK instead of holder so we can get HR of exception thrown for profiler callback
+ EX_TRY
+#endif
+ {
+ pAssembly->Init(pamTracker, pLoaderAllocator);
+ }
+#ifdef PROFILING_SUPPORTED
+ EX_HOOK
+ {
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackAssemblyLoads());
+ GCX_COOP();
+ g_profControlBlock.pProfInterface->AssemblyLoadFinished((AssemblyID)(Assembly *) pAssembly,
+ GET_EXCEPTION()->GetHR());
+ END_PIN_PROFILER();
+ }
+ }
+ EX_END_HOOK;
+#endif
+ pAssembly.SuppressRelease();
+ END_INTERIOR_STACK_PROBE;
+
+ return pAssembly;
+} // Assembly::Create
+
+
+#ifndef CROSSGEN_COMPILE
+Assembly *Assembly::CreateDynamic(AppDomain *pDomain, CreateDynamicAssemblyArgs *args)
+{
+ // WARNING: not backout clean
+ CONTRACT(Assembly *)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(args));
+ }
+ CONTRACT_END;
+
+ // This must be before creation of the AllocMemTracker so that the destructor for the AllocMemTracker happens before the destructor for pLoaderAllocator.
+ // That is necessary as the allocation of Assembly objects and other related details is done on top of heaps located in
+ // the loader allocator objects.
+ NewHolder<LoaderAllocator> pLoaderAllocator;
+
+ AllocMemTracker amTracker;
+ AllocMemTracker *pamTracker = &amTracker;
+
+ Assembly *pRetVal = NULL;
+
+ AppDomain *pCallersDomain;
+ MethodDesc *pmdEmitter = SystemDomain::GetCallersMethod(args->stackMark, &pCallersDomain);
+
+ // Called either from interop or async delegate invocation. Rejecting because we don't
+ // know how to set the correct permission on the new dynamic assembly.
+ if (!pmdEmitter)
+ COMPlusThrow(kInvalidOperationException);
+
+ Assembly *pCallerAssembly = pmdEmitter->GetAssembly();
+
+ // First, we set up a pseudo-manifest file for the assembly.
+
+ // Set up the assembly name
+
+ STRINGREF strRefName = (STRINGREF) args->assemblyName->GetSimpleName();
+
+ if (strRefName == NULL)
+ COMPlusThrow(kArgumentException, W("ArgumentNull_AssemblyNameName"));
+
+ StackSString name;
+ strRefName->GetSString(name);
+
+ if (name.GetCount() == 0)
+ COMPlusThrow(kArgumentException, W("ArgumentNull_AssemblyNameName"));
+
+ SString::Iterator i = name.Begin();
+ if (COMCharacter::nativeIsWhiteSpace(*i)
+ || name.Find(i, '\\')
+ || name.Find(i, ':')
+ || name.Find(i, '/'))
+ {
+ COMPlusThrow(kArgumentException, W("Argument_InvalidAssemblyName"));
+ }
+
+ // Set up the assembly manifest metadata
+ // When we create dynamic assembly, we always use a working copy of IMetaDataAssemblyEmit
+ // to store temporary runtime assembly information. This is to preserve the invariant that
+ // an assembly must have a PEFile with proper metadata.
+ // This working copy of IMetaDataAssemblyEmit will store every AssemblyRef as a simple name
+ // reference as we must have an instance of Assembly(can be dynamic assembly) before we can
+ // add such a reference. Also because the referenced assembly if dynamic strong name, it may
+ // not be ready to be hashed!
+
+ SafeComHolder<IMetaDataAssemblyEmit> pAssemblyEmit;
+ PEFile::DefineEmitScope(
+ IID_IMetaDataAssemblyEmit,
+ &pAssemblyEmit);
+
+ // remember the hash algorithm
+ ULONG ulHashAlgId = args->assemblyName->GetAssemblyHashAlgorithm();
+ if (ulHashAlgId == 0)
+ ulHashAlgId = CALG_SHA1;
+
+ ASSEMBLYMETADATA assemData;
+ memset(&assemData, 0, sizeof(assemData));
+
+ // get the version info (default to 0.0.0.0 if none)
+ VERSIONREF versionRef = (VERSIONREF) args->assemblyName->GetVersion();
+ if (versionRef != NULL)
+ {
+ assemData.usMajorVersion = (USHORT)versionRef->GetMajor();
+ assemData.usMinorVersion = (USHORT)versionRef->GetMinor();
+ assemData.usBuildNumber = (USHORT)versionRef->GetBuild();
+ assemData.usRevisionNumber = (USHORT)versionRef->GetRevision();
+ }
+
+ struct _gc
+ {
+ OBJECTREF granted;
+ OBJECTREF denied;
+ OBJECTREF cultureinfo;
+ STRINGREF pString;
+ OBJECTREF orArrayOrContainer;
+ OBJECTREF throwable;
+ OBJECTREF strongNameKeyPair;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ StackSString culture;
+
+ gc.cultureinfo = args->assemblyName->GetCultureInfo();
+ if (gc.cultureinfo != NULL)
+ {
+ MethodDescCallSite getName(METHOD__CULTURE_INFO__GET_NAME, &gc.cultureinfo);
+
+ ARG_SLOT args2[] =
+ {
+ ObjToArgSlot(gc.cultureinfo)
+ };
+
+ // convert culture info into a managed string form
+ gc.pString = getName.Call_RetSTRINGREF(args2);
+ gc.pString->GetSString(culture);
+
+ assemData.szLocale = (LPWSTR) (LPCWSTR) culture;
+ }
+
+ SBuffer publicKey;
+ if (args->assemblyName->GetPublicKey() != NULL)
+ {
+ publicKey.Set(args->assemblyName->GetPublicKey()->GetDataPtr(),
+ args->assemblyName->GetPublicKey()->GetNumComponents());
+ }
+
+
+ // get flags
+ DWORD dwFlags = args->assemblyName->GetFlags();
+
+ // Now create a dynamic PE file out of the name & metadata
+ PEAssemblyHolder pFile;
+
+ {
+ GCX_PREEMP();
+
+ mdAssembly ma;
+ IfFailThrow(pAssemblyEmit->DefineAssembly(publicKey, publicKey.GetSize(), ulHashAlgId,
+ name, &assemData, dwFlags,
+ &ma));
+ pFile = PEAssembly::Create(pCallerAssembly->GetManifestFile(), pAssemblyEmit, args->access & ASSEMBLY_ACCESS_REFLECTION_ONLY);
+ }
+
+ AssemblyLoadSecurity loadSecurity;
+#ifndef FEATURE_CORECLR
+ DWORD dwSpecialFlags = 0xFFFFFFFF;
+
+ // Don't bother with setting up permissions if this isn't allowed to run
+ // This doesn't apply in CoreCLR because you cannot specify evidence when creating a dynamic assembly
+ if ((args->identity != NULL) &&
+ (args->access & ASSEMBLY_ACCESS_RUN))
+ {
+ loadSecurity.m_pAdditionalEvidence = &args->identity;
+ }
+ else
+ {
+ if (pCallerAssembly != NULL) // can be null if caller is interop
+ {
+ if (args->securityContextSource == kCurrentAssembly)
+ {
+ IAssemblySecurityDescriptor *pCallerSecDesc = pCallerAssembly->GetSecurityDescriptor(pCallersDomain);
+ gc.granted = pCallerSecDesc->GetGrantedPermissionSet(&(gc.denied));
+ dwSpecialFlags = pCallerSecDesc->GetSpecialFlags();
+ }
+ else
+ {
+ IApplicationSecurityDescriptor *pCallersDomainSecDesc = pCallersDomain->GetSecurityDescriptor();
+
+#ifdef FEATURE_CAS_POLICY
+ // We only want to propigate the identity of homogenous domains, since heterogenous domains tend
+ // to be fully trusted even if they are housing partially trusted code - which could lead to an
+ // elevation of privilege if we allow the grant set to be pushed to assemblies partially trusted
+ // code is loading.
+ if (!pCallersDomainSecDesc->IsHomogeneous())
+ {
+ COMPlusThrow(kNotSupportedException, W("NotSupported_SecurityContextSourceAppDomainInHeterogenous"));
+ }
+#endif // FEATURE_CAS_POLICY
+
+ gc.granted = pCallersDomainSecDesc->GetGrantedPermissionSet();
+ dwSpecialFlags = pCallersDomainSecDesc->GetSpecialFlags();
+ }
+
+ // Caller may be in another appdomain context, in which case we'll
+ // need to marshal/unmarshal the grant and deny sets across.
+#ifdef FEATURE_REMOTING // should not happen without remoting
+ if (pCallersDomain != ::GetAppDomain())
+ {
+ gc.granted = AppDomainHelper::CrossContextCopyFrom(pCallersDomain->GetId(), &(gc.granted));
+ if (gc.denied != NULL)
+ {
+ gc.denied = AppDomainHelper::CrossContextCopyFrom(pCallersDomain->GetId(), &(gc.denied));
+ }
+ }
+#else // !FEATURE_REMOTING
+ _ASSERTE(pCallersDomain == ::GetAppDomain());
+#endif // FEATURE_REMOTING
+ }
+ }
+#else // FEATURE_CORECLR
+ // In SilverLight all dynamic assemblies should be transparent and partially trusted, even if they are
+ // created by platform assemblies. Thus they should inherit the grant sets from the appdomain not the
+ // parent assembly.
+ IApplicationSecurityDescriptor *pCurrentDomainSecDesc = ::GetAppDomain()->GetSecurityDescriptor();
+ gc.granted = pCurrentDomainSecDesc->GetGrantedPermissionSet();
+ DWORD dwSpecialFlags = pCurrentDomainSecDesc->GetSpecialFlags();
+#endif // !FEATURE_CORECLR
+
+ // If the dynamic assembly creator did not specify evidence for the newly created assembly, then it
+ // should inherit the grant set of the creation assembly.
+ if (loadSecurity.m_pAdditionalEvidence == NULL)
+ {
+#ifdef FEATURE_CAS_POLICY
+ // If we're going to inherit the grant set of an anonymously hosted dynamic method, it will be
+ // full trust/transparent. In that case, we should demand full trust.
+ if(args->securityContextSource == kCurrentAssembly &&
+ pCallerAssembly != NULL &&
+ pCallersDomain != NULL &&
+ pCallerAssembly->GetDomainAssembly(pCallersDomain) == pCallersDomain->GetAnonymouslyHostedDynamicMethodsAssembly())
+ {
+ loadSecurity.m_fPropagatingAnonymouslyHostedDynamicMethodGrant = true;
+ }
+#endif // FEATURE_CAS_POLICY
+
+ loadSecurity.m_pGrantSet = &gc.granted;
+ loadSecurity.m_pRefusedSet = &gc.denied;
+ loadSecurity.m_dwSpecialFlags = dwSpecialFlags;
+ }
+
+ NewHolder<DomainAssembly> pDomainAssembly;
+
+ {
+ GCX_PREEMP();
+
+ // Create a new LoaderAllocator if appropriate
+ if ((args->access & ASSEMBLY_ACCESS_COLLECT) != 0)
+ {
+ AssemblyLoaderAllocator *pAssemblyLoaderAllocator = new AssemblyLoaderAllocator();
+ pLoaderAllocator = pAssemblyLoaderAllocator;
+
+ // Some of the initialization functions are not virtual. Call through the derived class
+ // to prevent calling the base class version.
+ pAssemblyLoaderAllocator->Init(pDomain);
+
+ // Setup the managed proxy now, but do not actually transfer ownership to it.
+ // Once everything is setup and nothing can fail anymore, the ownership will be
+ // atomically transfered by call to LoaderAllocator::ActivateManagedTracking().
+ pAssemblyLoaderAllocator->SetupManagedTracking(&args->loaderAllocator);
+ }
+ else
+ {
+ pLoaderAllocator = pDomain->GetLoaderAllocator();
+ pLoaderAllocator.SuppressRelease();
+ }
+
+ // Create a domain assembly
+ pDomainAssembly = new DomainAssembly(pDomain, pFile, &loadSecurity, pLoaderAllocator);
+ }
+
+ // Start loading process
+
+#ifdef FEATURE_CAS_POLICY
+ // Get the security descriptor for the assembly.
+ IAssemblySecurityDescriptor *pSecDesc = pDomainAssembly->GetSecurityDescriptor();
+
+ // Propagate identity and permission request information into the assembly's
+ // security descriptor. Then when policy is resolved we'll end up with the
+ // correct grant set.
+ // If identity has not been provided then the caller's assembly will be
+ // calculated instead and we'll just copy the granted permissions from the
+ // caller to the new assembly and mark policy as resolved (done
+ // automatically by SetGrantedPermissionSet).
+ pSecDesc->SetRequestedPermissionSet(args->requiredPset,
+ args->optionalPset,
+ args->refusedPset);
+#endif // FEATURE_CAS_POLICY
+
+ {
+ // Create a concrete assembly
+ // (!Do not remove scoping brace: order is important here: the Assembly holder must destruct before the AllocMemTracker!)
+ NewHolder<Assembly> pAssem;
+
+ {
+ GCX_PREEMP();
+ // Assembly::Create will call SuppressRelease on the NewHolder that holds the LoaderAllocator when it transfers ownership
+ pAssem = Assembly::Create(pDomain, pFile, pDomainAssembly->GetDebuggerInfoBits(), args->access & ASSEMBLY_ACCESS_COLLECT ? TRUE : FALSE, pamTracker, pLoaderAllocator);
+
+ ReflectionModule* pModule = (ReflectionModule*) pAssem->GetManifestModule();
+ pModule->SetCreatingAssembly( pCallerAssembly );
+
+
+ if ((args->access & ASSEMBLY_ACCESS_COLLECT) != 0)
+ {
+ // Initializing the virtual call stub manager is delayed to remove the need for the LoaderAllocator destructor to properly handle
+ // uninitializing the VSD system. (There is a need to suspend the runtime, and that's tricky)
+ pLoaderAllocator->InitVirtualCallStubManager(pDomain, TRUE);
+ }
+ }
+
+ pAssem->m_isDynamic = true;
+
+ pAssem->m_dwDynamicAssemblyAccess = args->access;
+
+#ifdef FEATURE_CAS_POLICY
+ // If a legacy assembly is emitting an assembly, then we implicitly add the legacy attribute. If the legacy
+ // assembly is also in partial trust, we implicitly make the emitted assembly transparent.
+ ModuleSecurityDescriptor *pEmittingMSD = ModuleSecurityDescriptor::GetModuleSecurityDescriptor(pCallerAssembly);
+ if (pEmittingMSD->GetSecurityRuleSet() == SecurityRuleSet_Level1)
+ {
+ IAssemblySecurityDescriptor *pCallerSecDesc = pCallerAssembly->GetSecurityDescriptor(pCallersDomain);
+ if (!pCallerSecDesc->IsFullyTrusted())
+ {
+ args->flags = kTransparentAssembly;
+ }
+ }
+
+ // If the code emitting the dynamic assembly is transparent and it is attempting to emit a non-transparent
+ // assembly, then we need to do a demand for the grant set of the emitting assembly (which should also be
+ // is the grant set of the dynamic assembly).
+ if (Security::IsMethodTransparent(pmdEmitter) && !(args->flags & kTransparentAssembly))
+ {
+ Security::DemandGrantSet(pCallerAssembly->GetSecurityDescriptor(pCallersDomain));
+ }
+#else // FEATURE_CORECLR
+ // Making the dynamic assembly opportunistically critical in full trust CoreCLR and transparent otherwise.
+ if (!GetAppDomain()->GetSecurityDescriptor()->IsFullyTrusted())
+ {
+ args->flags = kTransparentAssembly;
+ }
+#endif //!FEATURE_CORECLR
+
+ // Fake up a module security descriptor for the assembly.
+ TokenSecurityDescriptorFlags tokenFlags = TokenSecurityDescriptorFlags_None;
+ if (args->flags & kAllCriticalAssembly)
+ tokenFlags |= TokenSecurityDescriptorFlags_AllCritical;
+ if (args->flags & kAptcaAssembly)
+ tokenFlags |= TokenSecurityDescriptorFlags_APTCA;
+ if (args->flags & kCriticalAssembly)
+ tokenFlags |= TokenSecurityDescriptorFlags_Critical;
+ if (args->flags & kTransparentAssembly)
+ tokenFlags |= TokenSecurityDescriptorFlags_Transparent;
+ if (args->flags & kTreatAsSafeAssembly)
+ tokenFlags |= TokenSecurityDescriptorFlags_TreatAsSafe;
+
+#ifdef FEATURE_APTCA
+ if (args->aptcaBlob != NULL)
+ {
+ tokenFlags |= ParseAptcaAttribute(args->aptcaBlob->GetDirectPointerToNonObjectElements(),
+ args->aptcaBlob->GetNumComponents());
+ }
+
+#endif // FEATURE_APTCA
+
+#ifndef FEATURE_CORECLR
+ // Use the security rules given to us if the emitting code has selected a specific one. Otherwise,
+ // inherit the security rules of the emitting assembly.
+ if (args->securityRulesBlob != NULL)
+ {
+ tokenFlags |= ParseSecurityRulesAttribute(args->securityRulesBlob->GetDirectPointerToNonObjectElements(),
+ args->securityRulesBlob->GetNumComponents());
+ }
+ else
+ {
+ // Ensure that dynamic assemblies created by mscorlib always specify a rule set, since we want to
+ // make sure that creating a level 2 assembly was an explicit decision by the emitting code,
+ // rather than an implicit decision because mscorlib is level 2 itself.
+ //
+ // If you're seeing this assert, it means that you've created a dynamic assembly from mscorlib,
+ // but did not pass a CustomAttributeBuilder for the SecurityRulesAttribute to the
+ // DefineDynamicAssembly call.
+ _ASSERTE(!pCallerAssembly->IsSystem());
+
+ // Use the creating assembly's security rule set for the emitted assembly
+ SecurityRuleSet callerRuleSet =
+ ModuleSecurityDescriptor::GetModuleSecurityDescriptor(pCallerAssembly)->GetSecurityRuleSet();
+ tokenFlags |= EncodeSecurityRuleSet(callerRuleSet);
+
+ tokenFlags |= TokenSecurityDescriptorFlags_SecurityRules;
+ }
+#endif // !FEATURE_CORECLR
+
+ _ASSERTE(pAssem->GetManifestModule()->m_pModuleSecurityDescriptor != NULL);
+ pAssem->GetManifestModule()->m_pModuleSecurityDescriptor->OverrideTokenFlags(tokenFlags);
+
+ // Set the additional strong name information
+
+ pAssem->SetStrongNameLevel(Assembly::SN_NONE);
+
+ if (publicKey.GetSize() > 0)
+ {
+ pAssem->SetStrongNameLevel(Assembly::SN_PUBLIC_KEY);
+#ifndef FEATURE_CORECLR
+ gc.strongNameKeyPair = args->assemblyName->GetStrongNameKeyPair();
+ // If there's a public key, there might be a strong name key pair.
+ if (gc.strongNameKeyPair != NULL)
+ {
+ MethodDescCallSite getKeyPair(METHOD__STRONG_NAME_KEY_PAIR__GET_KEY_PAIR, &gc.strongNameKeyPair);
+
+ ARG_SLOT arglist[] =
+ {
+ ObjToArgSlot(gc.strongNameKeyPair),
+ PtrToArgSlot(&gc.orArrayOrContainer)
+ };
+
+ BOOL bKeyInArray;
+ bKeyInArray = (BOOL)getKeyPair.Call_RetBool(arglist);
+
+ if (bKeyInArray)
+ {
+ U1ARRAYREF orArray = (U1ARRAYREF)gc.orArrayOrContainer;
+ pAssem->m_cbStrongNameKeyPair = orArray->GetNumComponents();
+ pAssem->m_pbStrongNameKeyPair = new BYTE[pAssem->m_cbStrongNameKeyPair];
+
+ pAssem->m_FreeFlag |= pAssem->FREE_KEY_PAIR;
+ memcpy(pAssem->m_pbStrongNameKeyPair, orArray->GetDataPtr(), pAssem->m_cbStrongNameKeyPair);
+ pAssem->SetStrongNameLevel(Assembly::SN_FULL_KEYPAIR_IN_ARRAY);
+ }
+ else
+ {
+ STRINGREF orContainer = (STRINGREF)gc.orArrayOrContainer;
+ DWORD cchContainer = orContainer->GetStringLength();
+ pAssem->m_pwStrongNameKeyContainer = new WCHAR[cchContainer + 1];
+
+ pAssem->m_FreeFlag |= pAssem->FREE_KEY_CONTAINER;
+ memcpy(pAssem->m_pwStrongNameKeyContainer, orContainer->GetBuffer(), cchContainer * sizeof(WCHAR));
+ pAssem->m_pwStrongNameKeyContainer[cchContainer] = W('\0');
+
+ pAssem->SetStrongNameLevel(Assembly::SN_FULL_KEYPAIR_IN_CONTAINER);
+ }
+ }
+ else
+#endif // FEATURE_CORECLR
+ {
+ // Since we have no way to validate the public key of a dynamic assembly we don't allow
+ // partial trust code to emit a dynamic assembly with an arbitrary public key.
+ // Ideally we shouldn't allow anyone to emit a dynamic assembly with only a public key,
+ // but we allow a couple of exceptions to reduce the compat risk: full trust, caller's own key.
+ // As usual we treat anonymously hosted dynamic methods as partial trust code.
+ DomainAssembly* pCallerDomainAssembly = pCallerAssembly->GetDomainAssembly(pCallersDomain);
+ if (!pCallerDomainAssembly->GetSecurityDescriptor()->IsFullyTrusted() ||
+ pCallerDomainAssembly == pCallersDomain->GetAnonymouslyHostedDynamicMethodsAssembly())
+ {
+ DWORD cbKey = 0;
+ const void* pKey = pCallerAssembly->GetPublicKey(&cbKey);
+
+ if (!publicKey.Equals((const BYTE *)pKey, cbKey))
+ COMPlusThrow(kInvalidOperationException, W("InvalidOperation_StrongNameKeyPairRequired"));
+ }
+ }
+ }
+
+ //we need to suppress release for pAssem to avoid double release
+ pAssem.SuppressRelease ();
+
+ {
+ GCX_PREEMP();
+
+ // Finish loading process
+ // <TODO> would be REALLY nice to unify this with main loading loop </TODO>
+ pDomainAssembly->Begin();
+ pDomainAssembly->SetAssembly(pAssem);
+ pDomainAssembly->m_level = FILE_LOAD_ALLOCATE;
+ pDomainAssembly->DeliverSyncEvents();
+ pDomainAssembly->DeliverAsyncEvents();
+ pDomainAssembly->FinishLoad();
+ pDomainAssembly->ClearLoading();
+ pDomainAssembly->m_level = FILE_ACTIVE;
+ }
+
+ // Force the transparency of the module to be computed now, so that we can catch any errors due to
+ // inconsistent assembly level attributes during the assembly creation call, rather than at some
+ // later point.
+ pAssem->GetManifestModule()->m_pModuleSecurityDescriptor->VerifyDataComputed();
+
+ {
+ CANNOTTHROWCOMPLUSEXCEPTION();
+ FAULT_FORBID();
+
+ //Cannot fail after this point
+
+ pDomainAssembly.SuppressRelease(); // This also effectively suppresses the release of the pAssem
+ pamTracker->SuppressRelease();
+
+ // Once we reach this point, the loader allocator lifetime is controlled by the Assembly object.
+ if ((args->access & ASSEMBLY_ACCESS_COLLECT) != 0)
+ {
+ // Atomically transfer ownership to the managed heap
+ pLoaderAllocator->ActivateManagedTracking();
+ pLoaderAllocator.SuppressRelease();
+ }
+
+ pAssem->SetIsTenured();
+ pRetVal = pAssem;
+ }
+ }
+ GCPROTECT_END();
+
+ RETURN pRetVal;
+} // Assembly::CreateDynamic
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ReflectionModule *Assembly::CreateDynamicModule(LPCWSTR wszModuleName, LPCWSTR wszFileName, BOOL fIsTransient, INT32* ptkFile)
+{
+ CONTRACT(ReflectionModule *)
+ {
+ STANDARD_VM_CHECK;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ AllocMemTracker amTracker;
+
+ // Add a manifest entry for the module
+ mdFile token;
+ IMetaDataAssemblyEmit *pAssemblyEmit = GetManifestFile()->GetAssemblyEmitter();
+ IfFailThrow(pAssemblyEmit->DefineFile(wszFileName, NULL, 0, 0, &token));
+
+ if (ptkFile)
+ *ptkFile = (INT32)token;
+
+ GetManifestModule()->UpdateDynamicMetadataIfNeeded();
+
+ // Define initial metadata for the module
+ SafeComHolder<IMetaDataEmit> pEmit;
+ PEFile::DefineEmitScope(IID_IMetaDataEmit, (void **)&pEmit);
+
+ // the module name will be set later when we create the ReflectionModule
+
+ // Create the PEFile for the module
+ PEModuleHolder pFile(PEModule::Create(GetManifestFile(), token, pEmit));
+
+ // Create the DomainModule
+ NewHolder<DomainModule> pDomainModule(new DomainModule(::GetAppDomain(), GetDomainAssembly(), pFile));
+
+ // Create the module itself
+ ReflectionModuleHolder pWrite(ReflectionModule::Create(this, pFile, &amTracker, wszModuleName, fIsTransient));
+
+ amTracker.SuppressRelease(); //@todo: OOM: is this the right place to commit the tracker?
+ pWrite->SetIsTenured();
+
+ // Modules take the DebuggerAssemblyControlFlags down from its parent Assembly initially.
+ // By default, this turns on JIT optimization.
+
+ pWrite->SetDebuggerInfoBits(GetDebuggerInfoBits());
+
+ // Associate the two
+ pDomainModule->SetModule(pWrite);
+ m_pManifest->StoreFileThrowing(token, pWrite);
+
+ // Simulate loading process
+ pDomainModule->Begin();
+ pDomainModule->DeliverSyncEvents();
+ pDomainModule->DeliverAsyncEvents();
+ pDomainModule->FinishLoad();
+ pDomainModule->ClearLoading();
+ pDomainModule->m_level = FILE_ACTIVE;
+
+ pDomainModule.SuppressRelease();
+ ReflectionModule *pModule = pWrite.Extract();
+
+ LPCSTR szUTF8FileName;
+ CQuickBytes qbLC;
+
+ // Get the UTF8 file name
+ IfFailThrow(m_pManifest->GetMDImport()->GetFileProps(token, &szUTF8FileName, NULL, NULL, NULL));
+ UTF8_TO_LOWER_CASE(szUTF8FileName, qbLC);
+ LPCSTR szUTF8FileNameLower = (LPUTF8) qbLC.Ptr();
+
+ CrstHolder lock(&m_crstAllowedFiles);
+
+ // insert the value into manifest's look up table.
+ // Need to perform case insensitive hashing as well.
+ m_pAllowedFiles->InsertValue(szUTF8FileName, (HashDatum)(size_t)token, TRUE);
+ m_pAllowedFiles->InsertValue(szUTF8FileNameLower, (HashDatum)(size_t)token, TRUE);
+
+ // Now make file token associate with the loaded module
+ m_pManifest->StoreFileThrowing(token, pModule);
+
+ RETURN pModule;
+} // Assembly::CreateDynamicModule
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+
+#endif // CROSSGEN_COMPILE
+
+void Assembly::SetDomainAssembly(DomainAssembly *pDomainAssembly)
+{
+ CONTRACTL
+ {
+ PRECONDITION(CheckPointer(pDomainAssembly));
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ GetManifestModule()->SetDomainFile(pDomainAssembly);
+
+ IAssemblySecurityDescriptor *pSec = pDomainAssembly->GetSecurityDescriptor();
+
+ GCX_COOP();
+ pSec->ResolvePolicy(GetSharedSecurityDescriptor(), pDomainAssembly->ShouldSkipPolicyResolution());
+
+} // Assembly::SetDomainAssembly
+
+#endif // #ifndef DACCESS_COMPILE
+
+DomainAssembly *Assembly::GetDomainAssembly(AppDomain *pDomain)
+{
+ CONTRACT(DomainAssembly *)
+ {
+ PRECONDITION(CheckPointer(pDomain, NULL_NOT_OK));
+ POSTCONDITION(CheckPointer(RETVAL));
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACT_END;
+
+ RETURN GetManifestModule()->GetDomainAssembly(pDomain);
+}
+
+DomainAssembly *Assembly::FindDomainAssembly(AppDomain *pDomain)
+{
+ CONTRACT(DomainAssembly *)
+ {
+ PRECONDITION(CheckPointer(pDomain));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ PREFIX_ASSUME (GetManifestModule() !=NULL);
+ RETURN GetManifestModule()->FindDomainAssembly(pDomain);
+}
+
+BOOL Assembly::IsIntrospectionOnly()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pManifestFile->IsIntrospectionOnly();
+}
+
+PTR_LoaderHeap Assembly::GetLowFrequencyHeap()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return GetLoaderAllocator()->GetLowFrequencyHeap();
+}
+
+PTR_LoaderHeap Assembly::GetHighFrequencyHeap()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return GetLoaderAllocator()->GetHighFrequencyHeap();
+}
+
+
+PTR_LoaderHeap Assembly::GetStubHeap()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return GetLoaderAllocator()->GetStubHeap();
+}
+
+
+PTR_BaseDomain Assembly::GetDomain()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(m_pDomain);
+ return (m_pDomain);
+}
+IAssemblySecurityDescriptor *Assembly::GetSecurityDescriptor(AppDomain *pDomain)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END
+
+ IAssemblySecurityDescriptor* pSecDesc;
+
+ if (pDomain == NULL)
+ {
+#ifndef DACCESS_COMPILE
+ pDomain = ::GetAppDomain();
+#else //DACCESS_COMPILE
+ DacNotImpl();
+#endif //DACCESS_COMPILE
+ }
+
+ PREFIX_ASSUME(FindDomainAssembly(pDomain) != NULL);
+ pSecDesc = FindDomainAssembly(pDomain)->GetSecurityDescriptor();
+
+ CONSISTENCY_CHECK(pSecDesc != NULL);
+
+ return pSecDesc;
+}
+
+#ifndef DACCESS_COMPILE
+
+const SecurityTransparencyBehavior *Assembly::GetSecurityTransparencyBehavior()
+{
+ CONTRACT(const SecurityTransparencyBehavior *)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ if (m_pTransparencyBehavior == NULL)
+ {
+ ModuleSecurityDescriptor *pModuleSecurityDescriptor = ModuleSecurityDescriptor::GetModuleSecurityDescriptor(this);
+ SetSecurityTransparencyBehavior(SecurityTransparencyBehavior::GetTransparencyBehavior(pModuleSecurityDescriptor->GetSecurityRuleSet()));
+ }
+
+ RETURN(m_pTransparencyBehavior);
+}
+
+// This method is like GetTransparencyBehavior, but will not attempt to get the transparency behavior if we
+// don't already know it, and therefore may return NULL
+const SecurityTransparencyBehavior *Assembly::TryGetSecurityTransparencyBehavior()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pTransparencyBehavior;
+}
+
+
+// The transparency behavior object passed to this method must have a lifetime of at least as long
+// as the assembly itself.
+void Assembly::SetSecurityTransparencyBehavior(const SecurityTransparencyBehavior *pTransparencyBehavior)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pTransparencyBehavior));
+ PRECONDITION(m_pTransparencyBehavior == NULL || m_pTransparencyBehavior == pTransparencyBehavior);
+ }
+ CONTRACTL_END;
+
+ m_pTransparencyBehavior = pTransparencyBehavior;
+}
+
+void Assembly::SetParent(BaseDomain* pParent)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_pDomain = pParent;
+}
+
+#endif // !DACCCESS_COMPILE
+
+mdFile Assembly::GetManifestFileToken(LPCSTR name)
+{
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ HashDatum datum;
+ // Note: We're doing a case sensitive lookup
+ // This is OK because the lookup string and the string we insert into the hashtable
+ // are obtained from the same place.
+
+ // m_pAllowedFiles only grows - entries are never deleted from it. So we do not take
+ // a lock around GetValue. If the code is modified such that we delete entries from m_pAllowedFiles,
+ // reconsider whether the callers that consume the mdFile should take the m_crstAllowedFiles lock.
+ if (m_pAllowedFiles->GetValue(name, &datum)) {
+
+ if (datum != NULL) // internal module
+ return (mdFile)(size_t)PTR_TO_TADDR(datum);
+ else // manifest file
+ return mdFileNil;
+ }
+ else
+ return mdTokenNil; // not found
+#else
+ return mdFileNil;
+#endif
+}
+
+mdFile Assembly::GetManifestFileToken(IMDInternalImport *pImport, mdFile kFile)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ LPCSTR name;
+ if ((TypeFromToken(kFile) != mdtFile) ||
+ !pImport->IsValidToken(kFile))
+ {
+ BAD_FORMAT_NOTHROW_ASSERT(!"Invalid File token");
+ return mdTokenNil;
+ }
+
+ if (FAILED(pImport->GetFileProps(kFile, &name, NULL, NULL, NULL)))
+ {
+ BAD_FORMAT_NOTHROW_ASSERT(!"Invalid File token");
+ return mdTokenNil;
+ }
+
+ return GetManifestFileToken(name);
+}
+
+Module *Assembly::FindModuleByExportedType(mdExportedType mdType,
+ Loader::LoadFlag loadFlag,
+ mdTypeDef mdNested,
+ mdTypeDef* pCL)
+{
+ CONTRACT(Module *)
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else INJECT_FAULT(COMPlusThrowOM(););
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, loadFlag==Loader::Load ? NULL_NOT_OK : NULL_OK));
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END
+
+ mdToken mdLinkRef;
+ mdToken mdBinding;
+
+ IMDInternalImport *pManifestImport = GetManifestImport();
+
+ IfFailThrow(pManifestImport->GetExportedTypeProps(
+ mdType,
+ NULL,
+ NULL,
+ &mdLinkRef, // Impl
+ &mdBinding, // Hint
+ NULL)); // dwflags
+
+ // Don't trust the returned tokens.
+ if (!pManifestImport->IsValidToken(mdLinkRef))
+ {
+ if (loadFlag != Loader::Load)
+ {
+ RETURN NULL;
+ }
+ else
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT, BFA_INVALID_TOKEN);
+ }
+ }
+
+ switch(TypeFromToken(mdLinkRef)) {
+ case mdtAssemblyRef:
+ {
+ *pCL = mdTypeDefNil; // We don't trust the mdBinding token
+
+ Assembly *pAssembly = NULL;
+ switch(loadFlag)
+ {
+ case Loader::Load:
+ {
+#ifndef DACCESS_COMPILE
+ // LoadAssembly never returns NULL
+ DomainAssembly * pDomainAssembly =
+ GetManifestModule()->LoadAssembly(::GetAppDomain(), mdLinkRef);
+ PREFIX_ASSUME(pDomainAssembly != NULL);
+
+ RETURN pDomainAssembly->GetCurrentModule();
+#else
+ _ASSERTE(!"DAC shouldn't attempt to trigger loading");
+ return NULL;
+#endif // !DACCESS_COMPILE
+ };
+ case Loader::DontLoad:
+ pAssembly = GetManifestModule()->GetAssemblyIfLoaded(mdLinkRef);
+ break;
+ case Loader::SafeLookup:
+ pAssembly = GetManifestModule()->LookupAssemblyRef(mdLinkRef);
+ break;
+ default:
+ _ASSERTE(FALSE);
+ }
+
+ if (pAssembly)
+ RETURN pAssembly->GetManifestModule();
+ else
+ RETURN NULL;
+
+ }
+
+ case mdtFile:
+ {
+ // We may not want to trust this TypeDef token, since it
+ // was saved in a scope other than the one it was defined in
+ if (mdNested == mdTypeDefNil)
+ *pCL = mdBinding;
+ else
+ *pCL = mdNested;
+
+ // Note that we don't want to attempt a LoadModule if a GetModuleIfLoaded will
+ // succeed, because it has a stronger contract.
+ Module *pModule = GetManifestModule()->GetModuleIfLoaded(mdLinkRef, TRUE, FALSE);
+#ifdef DACCESS_COMPILE
+ return pModule;
+#else
+ if (pModule != NULL)
+ RETURN pModule;
+
+ if(loadFlag==Loader::SafeLookup)
+ return NULL;
+
+ // We should never get here in the GC case - the above should have succeeded.
+ CONSISTENCY_CHECK(!FORBIDGC_LOADER_USE_ENABLED());
+
+ DomainFile * pDomainModule = GetManifestModule()->LoadModule(::GetAppDomain(), mdLinkRef, FALSE, loadFlag!=Loader::Load);
+
+ if (pDomainModule == NULL)
+ RETURN NULL;
+ else
+ {
+ pModule = pDomainModule->GetCurrentModule();
+ if (pModule == NULL)
+ {
+ _ASSERTE(loadFlag!=Loader::Load);
+ }
+
+ RETURN pModule;
+ }
+#endif // DACCESS_COMPILE
+ }
+
+ case mdtExportedType:
+ // Only override the nested type token if it hasn't been set yet.
+ if (mdNested != mdTypeDefNil)
+ mdBinding = mdNested;
+
+ RETURN FindModuleByExportedType(mdLinkRef, loadFlag, mdBinding, pCL);
+
+ default:
+ ThrowHR(COR_E_BADIMAGEFORMAT, BFA_INVALID_TOKEN_TYPE);
+ }
+} // Assembly::FindModuleByExportedType
+
+
+// The returned Module is non-NULL unless you prevented the load by setting loadFlag=Loader::DontLoad.
+/* static */
+Module * Assembly::FindModuleByTypeRef(
+ Module * pModule,
+ mdTypeRef tkType,
+ Loader::LoadFlag loadFlag,
+ BOOL * pfNoResolutionScope)
+{
+ CONTRACT(Module *)
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM();); }
+
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(TypeFromToken(tkType) == mdtTypeRef);
+ PRECONDITION(CheckPointer(pfNoResolutionScope));
+ POSTCONDITION( CheckPointer(RETVAL, loadFlag==Loader::Load ? NULL_NOT_OK : NULL_OK) );
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END
+
+ // WARNING! Correctness of the type forwarder detection algorithm in code:ClassLoader::ResolveTokenToTypeDefThrowing
+ // relies on this function not performing any form of type forwarding itself.
+
+ IMDInternalImport * pImport;
+ mdTypeRef tkTopLevelEncloserTypeRef;
+
+ pImport = pModule->GetMDImport();
+ if (TypeFromToken(tkType) != mdtTypeRef)
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT, BFA_INVALID_TOKEN_TYPE);
+ }
+
+ {
+ // Find the top level encloser
+ GCX_NOTRIGGER();
+
+ // If nested, get top level encloser's impl
+ int iter = 0;
+ int maxIter = 1000;
+ do
+ {
+ _ASSERTE(TypeFromToken(tkType) == mdtTypeRef);
+ tkTopLevelEncloserTypeRef = tkType;
+
+ if (!pImport->IsValidToken(tkType) || iter >= maxIter)
+ {
+ break;
+ }
+
+ IfFailThrow(pImport->GetResolutionScopeOfTypeRef(tkType, &tkType));
+
+ // nil-scope TR okay if there's an ExportedType
+ // Return manifest file
+ if (IsNilToken(tkType))
+ {
+ *pfNoResolutionScope = TRUE;
+ RETURN(pModule);
+ }
+ iter++;
+ }
+ while (TypeFromToken(tkType) == mdtTypeRef);
+ }
+
+ *pfNoResolutionScope = FALSE;
+
+#ifndef DACCESS_COMPILE
+ if (!pImport->IsValidToken(tkType)) // redundant check only when invalid token already found.
+ {
+ THROW_BAD_FORMAT(BFA_BAD_TYPEREF_TOKEN, pModule);
+ }
+#endif //!DACCESS_COMPILE
+
+ switch (TypeFromToken(tkType))
+ {
+ case mdtModule:
+ {
+ // Type is in the referencing module.
+ GCX_NOTRIGGER();
+ CANNOTTHROWCOMPLUSEXCEPTION();
+ RETURN( pModule );
+ }
+
+ case mdtModuleRef:
+ {
+ if ((loadFlag != Loader::Load) || IsGCThread() || IsStackWalkerThread())
+ {
+ // Either we're not supposed to load, or we're doing a GC or stackwalk
+ // in which case we shouldn't need to load. So just look up the module
+ // and return what we find.
+ RETURN(pModule->LookupModule(tkType,FALSE));
+ }
+
+#ifndef DACCESS_COMPILE
+ DomainFile * pActualDomainFile = pModule->LoadModule(::GetAppDomain(), tkType, FALSE, loadFlag!=Loader::Load);
+ if (pActualDomainFile == NULL)
+ {
+ RETURN NULL;
+ }
+ else
+ {
+ RETURN(pActualDomainFile->GetModule());
+ }
+
+#else //DACCESS_COMPILE
+ _ASSERTE(loadFlag!=Loader::Load);
+ DacNotImpl();
+ RETURN NULL;
+#endif //DACCESS_COMPILE
+ }
+ break;
+
+ case mdtAssemblyRef:
+ {
+ // Do this first because it has a strong contract
+ Assembly * pAssembly = NULL;
+
+#if defined(FEATURE_COMINTEROP) || !defined(DACCESS_COMPILE)
+ LPCUTF8 szNamespace = NULL;
+ LPCUTF8 szClassName = NULL;
+#endif
+
+#ifdef FEATURE_COMINTEROP
+ if (pModule->HasBindableIdentity(tkType))
+#endif// FEATURE_COMINTEROP
+ {
+ _ASSERTE(!IsAfContentType_WindowsRuntime(pModule->GetAssemblyRefFlags(tkType)));
+ if (loadFlag == Loader::SafeLookup)
+ {
+ pAssembly = pModule->LookupAssemblyRef(tkType);
+ }
+ else
+ {
+ pAssembly = pModule->GetAssemblyIfLoaded(tkType);
+ }
+ }
+#ifdef FEATURE_COMINTEROP
+ else
+ {
+ _ASSERTE(IsAfContentType_WindowsRuntime(pModule->GetAssemblyRefFlags(tkType)));
+
+ if (FAILED(pImport->GetNameOfTypeRef(
+ tkTopLevelEncloserTypeRef,
+ &szNamespace,
+ &szClassName)))
+ {
+ THROW_BAD_FORMAT(BFA_BAD_TYPEREF_TOKEN, pModule);
+ }
+
+ pAssembly = pModule->GetAssemblyIfLoaded(
+ tkType,
+ szNamespace,
+ szClassName,
+ NULL); // pMDImportOverride
+ }
+#endif // FEATURE_COMINTEROP
+
+ if (pAssembly != NULL)
+ {
+ RETURN pAssembly->m_pManifest;
+ }
+
+#ifdef DACCESS_COMPILE
+ RETURN NULL;
+#else
+ if (loadFlag != Loader::Load)
+ {
+ RETURN NULL;
+ }
+
+#ifndef FEATURE_CORECLR
+ // Event Tracing for Windows is used to log data for performance and functional testing purposes.
+ // The events below are used to help measure the performance of assembly loading of a static reference.
+ FireEtwLoaderPhaseStart((::GetAppDomain() ? ::GetAppDomain()->GetId().m_dwId : ETWAppDomainIdNotAvailable), ETWLoadContextNotAvailable, ETWFieldUnused, ETWLoaderStaticLoad, NULL, NULL, GetClrInstanceId());
+#endif //!FEATURE_CORECLR
+
+ DomainAssembly * pDomainAssembly = pModule->LoadAssembly(
+ ::GetAppDomain(),
+ tkType,
+ szNamespace,
+ szClassName);
+
+#ifndef FEATURE_CORECLR
+ if (ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, TRACE_LEVEL_INFORMATION, CLR_PRIVATEBINDING_KEYWORD))
+ {
+ StackSString assemblySimpleName;
+ EX_TRY
+ {
+ if ((pDomainAssembly != NULL) && (pDomainAssembly->GetCurrentAssembly() != NULL))
+ {
+ assemblySimpleName.AppendUTF8(pDomainAssembly->GetCurrentAssembly()->GetSimpleName());
+ assemblySimpleName.Normalize(); // Ensures that the later cast to LPCWSTR does not throw.
+ }
+ }
+ EX_CATCH
+ {
+ assemblySimpleName.Clear();
+ }
+ EX_END_CATCH(RethrowTransientExceptions)
+
+ FireEtwLoaderPhaseEnd(::GetAppDomain() ? ::GetAppDomain()->GetId().m_dwId : ETWAppDomainIdNotAvailable, ETWLoadContextNotAvailable, ETWFieldUnused, ETWLoaderStaticLoad, NULL, assemblySimpleName.IsEmpty() ? NULL : (LPCWSTR)assemblySimpleName, GetClrInstanceId());
+ }
+#endif //!FEATURE_CORECLR
+
+ if (pDomainAssembly == NULL)
+ RETURN NULL;
+
+ pAssembly = pDomainAssembly->GetCurrentAssembly();
+ if (pAssembly == NULL)
+ {
+ RETURN NULL;
+ }
+ else
+ {
+ RETURN pAssembly->m_pManifest;
+ }
+#endif //!DACCESS_COMPILE
+ }
+
+ default:
+ ThrowHR(COR_E_BADIMAGEFORMAT, BFA_INVALID_TOKEN_TYPE);
+ }
+} // Assembly::FindModuleByTypeRef
+
+#ifndef DACCESS_COMPILE
+
+Module *Assembly::FindModuleByName(LPCSTR pszModuleName)
+{
+ CONTRACT(Module *)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ CQuickBytes qbLC;
+
+ // Need to perform case insensitive hashing.
+ UTF8_TO_LOWER_CASE(pszModuleName, qbLC);
+ pszModuleName = (LPUTF8) qbLC.Ptr();
+
+ mdFile kFile = GetManifestFileToken(pszModuleName);
+ if (kFile == mdTokenNil)
+ ThrowHR(COR_E_UNAUTHORIZEDACCESS);
+
+ if (this == SystemDomain::SystemAssembly())
+ RETURN m_pManifest->GetModuleIfLoaded(kFile, TRUE, TRUE);
+ else
+ RETURN m_pManifest->LoadModule(::GetAppDomain(), kFile)->GetModule();
+}
+
+void Assembly::CacheManifestExportedTypes(AllocMemTracker *pamTracker)
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ // Prejitted assemblies are expected to have their table prebuilt.
+ // If not, we do it here at load time (as if we would jit the assembly).
+
+ if (m_pManifest->IsPersistedObject(m_pManifest->m_pAvailableClasses))
+ RETURN;
+
+ mdToken mdExportedType;
+
+ HENUMInternalHolder phEnum(GetManifestImport());
+ phEnum.EnumInit(mdtExportedType,
+ mdTokenNil);
+
+ ClassLoader::AvailableClasses_LockHolder lh(m_pClassLoader);
+
+ for(int i = 0; GetManifestImport()->EnumNext(&phEnum, &mdExportedType); i++)
+ m_pClassLoader->AddExportedTypeHaveLock(GetManifestModule(),
+ mdExportedType,
+ pamTracker);
+
+ RETURN;
+}
+void Assembly::CacheManifestFiles()
+{
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ mdToken tkFile;
+ LPCSTR pszFileName;
+ CQuickBytes qbLC;
+
+ HENUMInternalHolder phEnum(GetManifestImport());
+ phEnum.EnumInit(mdtFile,
+ mdTokenNil);
+
+
+ DWORD dwCount = GetManifestImport()->EnumGetCount(&phEnum);
+ LockOwner lockOwner = { &m_crstAllowedFiles, IsOwnerOfCrst };
+ if (!m_pAllowedFiles->Init(dwCount+1, &lockOwner))
+ ThrowOutOfMemory();
+
+ CrstHolder lock(&m_crstAllowedFiles);
+
+ m_nextAvailableModuleIndex = dwCount+1;
+
+ while (GetManifestImport()->EnumNext(&phEnum, &tkFile))
+ {
+ if (TypeFromToken(tkFile) == mdtFile)
+ {
+ IfFailThrow(GetManifestImport()->GetFileProps(
+ tkFile,
+ &pszFileName,
+ NULL, // hash
+ NULL, // hash len
+ NULL)); // flags
+
+ // Add to hash table
+ m_pAllowedFiles->InsertValue(pszFileName, (HashDatum)(size_t)tkFile, TRUE);
+
+ // Need to perform case insensitive hashing as well.
+ {
+ UTF8_TO_LOWER_CASE(pszFileName, qbLC);
+ pszFileName = (LPUTF8) qbLC.Ptr();
+ }
+
+ // Add each internal module
+ m_pAllowedFiles->InsertValue(pszFileName, (HashDatum)(size_t)tkFile, TRUE);
+ }
+ }
+
+ HENUMInternalHolder phEnumModules(GetManifestImport());
+ phEnumModules.EnumInit(mdtModuleRef, mdTokenNil);
+ mdToken tkModuleRef;
+
+ while (GetManifestImport()->EnumNext(&phEnumModules, &tkModuleRef))
+ {
+ LPCSTR pszModuleRefName, pszModuleRefNameLower;
+
+ if (TypeFromToken(tkModuleRef) == mdtModuleRef)
+ {
+ IfFailThrow(GetManifestImport()->GetModuleRefProps(tkModuleRef, &pszModuleRefName));
+
+ // Convert to lower case and lookup
+ {
+ UTF8_TO_LOWER_CASE(pszModuleRefName, qbLC);
+ pszModuleRefNameLower = (LPUTF8) qbLC.Ptr();
+ }
+
+ HashDatum datum;
+ if (m_pAllowedFiles->GetValue(pszModuleRefNameLower, &datum))
+ {
+ mdFile tkFileForModuleRef = (mdFile)(size_t)datum;
+ m_pAllowedFiles->InsertValue(pszModuleRefName, (HashDatum)(size_t)tkFileForModuleRef);
+ }
+ }
+ }
+
+ // Add the manifest file
+ if (!GetManifestImport()->IsValidToken(GetManifestImport()->GetModuleFromScope()))
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ IfFailThrow(GetManifestImport()->GetScopeProps(&pszFileName, NULL));
+
+ // Add to hash table
+ m_pAllowedFiles->InsertValue(pszFileName, NULL, TRUE);
+
+ // Need to perform case insensitive hashing as well.
+ {
+ UTF8_TO_LOWER_CASE(pszFileName, qbLC);
+ pszFileName = (LPUTF8) qbLC.Ptr();
+ }
+
+ m_pAllowedFiles->InsertValue(pszFileName, NULL, TRUE);
+
+ RETURN;
+#endif
+}
+
+
+//<TODO>@TODO: if module is not signed it needs to acquire the
+//permissions from the assembly.</TODO>
+void Assembly::PrepareModuleForAssembly(Module* module, AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(CheckPointer(module));
+ }
+ CONTRACTL_END;
+
+ if (!module->IsPersistedObject(module->m_pAvailableClasses)) {
+ if (!(module->IsResource()))
+ // ! We intentionally do not take the AvailableClass lock here. It creates problems at
+ // startup and we haven't yet published the module yet so nobody should be searching it.
+ m_pClassLoader->PopulateAvailableClassHashTable(module,
+ pamTracker);
+ }
+
+
+#ifdef DEBUGGING_SUPPORTED
+ // Modules take the DebuggerAssemblyControlFlags down from its
+ // parent Assembly initially.
+ module->SetDebuggerInfoBits(GetDebuggerInfoBits());
+
+ LOG((LF_CORDB, LL_INFO10, "Module %s: bits=0x%x\n",
+ module->GetFile()->GetSimpleName(),
+ module->GetDebuggerInfoBits()));
+#endif // DEBUGGING_SUPPORTED
+
+ m_pManifest->EnsureFileCanBeStored(module->GetModuleRef());
+}
+
+// This is the final step of publishing a Module into an Assembly. This step cannot fail.
+void Assembly::PublishModuleIntoAssembly(Module *module)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ GetManifestModule()->EnsuredStoreFile(module->GetModuleRef(), module);
+ FastInterlockIncrement((LONG*)&m_pClassLoader->m_cUnhashedModules);
+}
+
+
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+Module* Assembly::FindModule(PEFile *pFile, BOOL includeLoading)
+{
+ CONTRACT(Module *)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ DomainFile *pModule = GetDomainAssembly()->FindModule(pFile, includeLoading);
+
+ if (pModule == NULL)
+ RETURN NULL;
+ else
+ RETURN pModule->GetModule();
+}
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+
+#ifdef FEATURE_MIXEDMODE
+DomainFile* Assembly::FindIJWDomainFile(HMODULE hMod, const SString &path)
+{
+ CONTRACT (DomainFile*)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(GetManifestModule()));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ ModuleIterator i = IterateModules();
+ while (i.Next())
+ {
+ PEFile *pFile = i.GetModule()->GetFile();
+
+ if ( !pFile->IsResource()
+ && !pFile->IsDynamic()
+ && !pFile->IsILOnly())
+ {
+ if ( (pFile->GetLoadedIL()!= NULL && pFile->GetIJWBase() == hMod)
+ || PEImage::PathEquals(pFile->GetPath(), path))
+ RETURN i.GetModule()->GetDomainFile();
+ }
+ }
+ RETURN NULL;
+}
+#endif // FEATURE_MIXEDMODE
+
+//*****************************************************************************
+// Set up the list of names of any friend assemblies
+void Assembly::CacheFriendAssemblyInfo()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ if (m_pFriendAssemblyDescriptor == NULL)
+ {
+ FriendAssemblyDescriptor *pFriendAssemblies = FriendAssemblyDescriptor::CreateFriendAssemblyDescriptor(this->GetManifestFile());
+ if (pFriendAssemblies == NULL)
+ {
+ pFriendAssemblies = NO_FRIEND_ASSEMBLIES_MARKER;
+ }
+
+ void *pvPreviousDescriptor = InterlockedCompareExchangeT(&m_pFriendAssemblyDescriptor,
+ pFriendAssemblies,
+ NULL);
+
+ if (pvPreviousDescriptor != NULL && pFriendAssemblies != NO_FRIEND_ASSEMBLIES_MARKER)
+ {
+ if (pFriendAssemblies != NO_FRIEND_ASSEMBLIES_MARKER)
+ {
+ delete pFriendAssemblies;
+ }
+ }
+ }
+} // void Assembly::CacheFriendAssemblyInfo()
+
+//*****************************************************************************
+// Is the given assembly a friend of this assembly?
+bool Assembly::GrantsFriendAccessTo(Assembly *pAccessingAssembly, FieldDesc *pFD)
+{
+ WRAPPER_NO_CONTRACT;
+
+ CacheFriendAssemblyInfo();
+
+ if (m_pFriendAssemblyDescriptor == NO_FRIEND_ASSEMBLIES_MARKER)
+ {
+ return false;
+ }
+
+ return m_pFriendAssemblyDescriptor->GrantsFriendAccessTo(pAccessingAssembly, pFD);
+}
+
+bool Assembly::GrantsFriendAccessTo(Assembly *pAccessingAssembly, MethodDesc *pMD)
+{
+ WRAPPER_NO_CONTRACT;
+
+ CacheFriendAssemblyInfo();
+
+ if (m_pFriendAssemblyDescriptor == NO_FRIEND_ASSEMBLIES_MARKER)
+ {
+ return false;
+ }
+
+ return m_pFriendAssemblyDescriptor->GrantsFriendAccessTo(pAccessingAssembly, pMD);
+}
+
+bool Assembly::GrantsFriendAccessTo(Assembly *pAccessingAssembly, MethodTable *pMT)
+{
+ WRAPPER_NO_CONTRACT;
+
+ CacheFriendAssemblyInfo();
+
+ if (m_pFriendAssemblyDescriptor == NO_FRIEND_ASSEMBLIES_MARKER)
+ {
+ return false;
+ }
+
+ return m_pFriendAssemblyDescriptor->GrantsFriendAccessTo(pAccessingAssembly, pMT);
+}
+
+bool Assembly::IgnoresAccessChecksTo(Assembly *pAccessedAssembly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pAccessedAssembly));
+ }
+ CONTRACTL_END;
+
+ CacheFriendAssemblyInfo();
+
+ if (m_pFriendAssemblyDescriptor == NO_FRIEND_ASSEMBLIES_MARKER)
+ {
+ return false;
+ }
+
+ if (pAccessedAssembly->IsDisabledPrivateReflection())
+ {
+ return false;
+ }
+
+ if (!m_fIsDomainNeutral && !GetSecurityDescriptor(GetDomain()->AsAppDomain())->IsFullyTrusted())
+ {
+ return false;
+ }
+
+ return m_pFriendAssemblyDescriptor->IgnoresAccessChecksTo(pAccessedAssembly);
+}
+
+
+#ifndef CROSSGEN_COMPILE
+
+enum CorEntryPointType
+{
+ EntryManagedMain, // void main(String[])
+ EntryCrtMain // unsigned main(void)
+};
+
+#ifdef STRESS_THREAD
+
+struct Stress_Thread_Param
+{
+ MethodDesc *pFD;
+ GlobalStrongHandleHolder argHandle;
+ short numSkipArgs;
+ CorEntryPointType EntryType;
+ Thread* pThread;
+
+public:
+ Stress_Thread_Param()
+ : pFD(NULL),
+ argHandle(),
+ numSkipArgs(0),
+ EntryType(EntryManagedMain),
+ pThread(NULL)
+ { LIMITED_METHOD_CONTRACT; }
+
+ Stress_Thread_Param* Clone ()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ NewHolder<Stress_Thread_Param> retVal= new Stress_Thread_Param;
+
+ retVal->pFD = pFD;
+ if (argHandle.GetValue()!=NULL)
+ {
+ GCX_COOP();
+ retVal->argHandle.Assign(CreateDuplicateHandle(argHandle.GetValue()));
+ }
+ retVal->numSkipArgs = numSkipArgs;
+ retVal->EntryType = EntryType;
+ retVal->pThread = pThread;
+ return retVal.Extract();
+ }
+};
+
+struct Stress_Thread_Worker_Param
+{
+ Stress_Thread_Param *lpParameter;
+ ULONG retVal;
+};
+
+static void Stress_Thread_Proc_Worker_Impl(Stress_Thread_Worker_Param * args)
+{
+ STATIC_CONTRACT_THROWS;
+
+ args->retVal = E_FAIL;
+
+ Stress_Thread_Param* lpParam = (Stress_Thread_Param *)args->lpParameter;
+
+ ARG_SLOT stackVar = 0;
+
+ MethodDescCallSite threadStart(lpParam->pFD);
+
+ // Build the parameter array and invoke the method.
+ if (lpParam->EntryType == EntryManagedMain)
+ {
+ PTRARRAYREF StrArgArray = (PTRARRAYREF)ObjectFromHandle(lpParam->argHandle.GetValue());
+ stackVar = ObjToArgSlot(StrArgArray);
+ }
+
+ if (lpParam->pFD->IsVoid())
+ {
+ threadStart.Call(&stackVar);
+ args->retVal = GetLatchedExitCode();
+ }
+ else
+ {
+ // We are doing the same cast as in RunMain. Main is required to return INT32 if it returns.
+ ARG_SLOT retVal = (INT32)threadStart.Call_RetArgSlot(&stackVar);
+ args->retVal = static_cast<ULONG>(retVal);
+ }
+}
+
+// wrap into EX_TRY_NOCATCH and call the real thing
+static void Stress_Thread_Proc_Worker (LPVOID ptr)
+{
+ STATIC_CONTRACT_THROWS;
+
+ EX_TRY_NOCATCH(Stress_Thread_Worker_Param *, args, (Stress_Thread_Worker_Param *) ptr)
+ {
+ Stress_Thread_Proc_Worker_Impl(args);
+ //<TODO>
+ // When we get mainCRTStartup from the C++ then this should be able to go away.</TODO>
+ fflush(stdout);
+ fflush(stderr);
+ }
+ EX_END_NOCATCH
+}
+
+static DWORD WINAPI __stdcall Stress_Thread_Proc (LPVOID lpParameter)
+{
+ STATIC_CONTRACT_THROWS;
+
+ Stress_Thread_Worker_Param args = {(Stress_Thread_Param*)lpParameter,0};
+ Stress_Thread_Param *lpParam = (Stress_Thread_Param *)lpParameter;
+ Thread *pThread = lpParam->pThread;
+ if (!pThread->HasStarted())
+ return 0;
+
+ _ASSERTE(::GetAppDomain() != NULL);
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return E_FAIL);
+ EX_TRY
+ {
+
+ ADID KickOffDomain = pThread->GetKickOffDomainId();
+
+ // should always have a kickoff domain - a thread should never start in a domain that is unloaded
+ // because otherwise it would have been collected because nobody can hold a reference to thread object
+ // in a domain that has been unloaded. But it is possible that we started the unload, in which
+ // case this thread wouldn't be allowed in or would be punted anyway.
+ if (KickOffDomain != lpParam->pThread->GetDomain()->GetId())
+ pThread->DoADCallBack(KickOffDomain, Stress_Thread_Proc_Worker, &args);
+ else
+ Stress_Thread_Proc_Worker(&args);
+
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ delete (Stress_Thread_Param *) lpParameter;
+ // Enable preemptive GC so a GC thread can suspend me.
+ GCX_PREEMP_NO_DTOR();
+ DestroyThread(pThread);
+
+ END_SO_INTOLERANT_CODE;
+ return args.retVal;
+}
+
+static void Stress_Thread_Start (LPVOID lpParameter)
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+ Thread *pCurThread = GetThread();
+ if (pCurThread->m_stressThreadCount == -1) {
+ pCurThread->m_stressThreadCount = g_pConfig->GetStressThreadCount();
+ }
+ DWORD dwThreads = pCurThread->m_stressThreadCount;
+ if (dwThreads <= 1)
+ RETURN;
+
+ Thread ** threads = new Thread* [dwThreads-1];
+
+ DWORD n;
+ for (n = 0; n < dwThreads-1; n ++)
+ {
+ threads[n] = SetupUnstartedThread();
+ if (threads[n] == NULL)
+ COMPlusThrowOM();
+
+ threads[n]->m_stressThreadCount = dwThreads/2;
+ Stress_Thread_Param *param = ((Stress_Thread_Param*)lpParameter)->Clone();
+ param->pThread = threads[n];
+ if (!threads[n]->CreateNewThread(0, Stress_Thread_Proc, param))
+ {
+ delete param;
+ threads[n]->DecExternalCount(FALSE);
+ ThrowOutOfMemory();
+ }
+ threads[n]->SetThreadPriority (THREAD_PRIORITY_NORMAL);
+ }
+
+ for (n = 0; n < dwThreads-1; n ++)
+ {
+ threads[n]->StartThread();
+ }
+ __SwitchToThread (0, CALLER_LIMITS_SPINNING);
+
+ RETURN;
+}
+
+void Stress_Thread_RunMain(MethodDesc* pFD, CorEntryPointType EntryType, short numSkipArgs, OBJECTHANDLE argHandle)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Stress_Thread_Param Param;
+ Param.pFD = pFD;
+ Param.argHandle.Assign(argHandle);
+ Param.numSkipArgs = numSkipArgs;
+ Param.EntryType = EntryType;
+ Param.pThread = NULL;
+ Stress_Thread_Start (&Param);
+}
+
+
+#endif // STRESS_THREAD
+
+void DECLSPEC_NORETURN ThrowMainMethodException(MethodDesc* pMD, UINT resID)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ DefineFullyQualifiedNameForClassW();
+ LPCWSTR szClassName = GetFullyQualifiedNameForClassW(pMD->GetMethodTable());
+ LPCUTF8 szUTFMethodName;
+ if (FAILED(pMD->GetMDImport()->GetNameOfMethodDef(pMD->GetMemberDef(), &szUTFMethodName)))
+ {
+ szUTFMethodName = "Invalid MethodDef record";
+ }
+ PREFIX_ASSUME(szUTFMethodName!=NULL);
+ MAKE_WIDEPTR_FROMUTF8(szMethodName, szUTFMethodName);
+ COMPlusThrowHR(COR_E_METHODACCESS, resID, szClassName, szMethodName);
+}
+
+// Returns true if this is a valid main method?
+void ValidateMainMethod(MethodDesc * pFD, CorEntryPointType *pType)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+
+ PRECONDITION(CheckPointer(pType));
+ }
+ CONTRACTL_END;
+
+ // Must be static, but we don't care about accessibility
+ if ((pFD->GetAttrs() & mdStatic) == 0)
+ ThrowMainMethodException(pFD, IDS_EE_MAIN_METHOD_MUST_BE_STATIC);
+
+ if (pFD->GetNumGenericClassArgs() != 0 || pFD->GetNumGenericMethodArgs() != 0)
+ ThrowMainMethodException(pFD, IDS_EE_LOAD_BAD_MAIN_SIG);
+
+ // Check for types
+ SigPointer sig(pFD->GetSigPointer());
+
+ ULONG nCallConv;
+ if (FAILED(sig.GetData(&nCallConv)))
+ ThrowMainMethodException(pFD, BFA_BAD_SIGNATURE);
+
+ if (nCallConv != IMAGE_CEE_CS_CALLCONV_DEFAULT)
+ ThrowMainMethodException(pFD, IDS_EE_LOAD_BAD_MAIN_SIG);
+
+ ULONG nParamCount;
+ if (FAILED(sig.GetData(&nParamCount)))
+ ThrowMainMethodException(pFD, BFA_BAD_SIGNATURE);
+
+
+ CorElementType nReturnType;
+ if (FAILED(sig.GetElemType(&nReturnType)))
+ ThrowMainMethodException(pFD, BFA_BAD_SIGNATURE);
+
+ if ((nReturnType != ELEMENT_TYPE_VOID) && (nReturnType != ELEMENT_TYPE_I4) && (nReturnType != ELEMENT_TYPE_U4))
+ ThrowMainMethodException(pFD, IDS_EE_MAIN_METHOD_HAS_INVALID_RTN);
+
+ if (nParamCount == 0)
+ *pType = EntryCrtMain;
+ else {
+ *pType = EntryManagedMain;
+
+ if (nParamCount != 1)
+ ThrowMainMethodException(pFD, IDS_EE_TO_MANY_ARGUMENTS_IN_MAIN);
+
+ CorElementType argType;
+ CorElementType argType2 = ELEMENT_TYPE_END;
+
+ if (FAILED(sig.GetElemType(&argType)))
+ ThrowMainMethodException(pFD, BFA_BAD_SIGNATURE);
+
+ if (argType == ELEMENT_TYPE_SZARRAY)
+ if (FAILED(sig.GetElemType(&argType2)))
+ ThrowMainMethodException(pFD, BFA_BAD_SIGNATURE);
+
+ if (argType != ELEMENT_TYPE_SZARRAY || argType2 != ELEMENT_TYPE_STRING)
+ ThrowMainMethodException(pFD, IDS_EE_LOAD_BAD_MAIN_SIG);
+ }
+}
+
+/* static */
+HRESULT RunMain(MethodDesc *pFD ,
+ short numSkipArgs,
+ INT32 *piRetVal,
+ PTRARRAYREF *stringArgs /*=NULL*/)
+{
+ STATIC_CONTRACT_THROWS;
+ _ASSERTE(piRetVal);
+
+ DWORD cCommandArgs = 0; // count of args on command line
+ LPWSTR *wzArgs = NULL; // command line args
+ HRESULT hr = S_OK;
+
+ *piRetVal = -1;
+
+ // The exit code for the process is communicated in one of two ways. If the
+ // entrypoint returns an 'int' we take that. Otherwise we take a latched
+ // process exit code. This can be modified by the app via setting
+ // Environment's ExitCode property.
+ //
+ // When we're executing the default exe main in the default domain, set the latched exit code to
+ // zero as a default. If it gets set to something else by user code then that value will be returned.
+ //
+ // StringArgs appears to be non-null only when the main method is explicitly invoked via the hosting api
+ // or through creating a subsequent domain and running an exe within it. In those cases we don't
+ // want to reset the (global) latched exit code.
+ if (stringArgs == NULL)
+ SetLatchedExitCode(0);
+
+ if (!pFD) {
+ _ASSERTE(!"Must have a function to call!");
+ return E_FAIL;
+ }
+
+ CorEntryPointType EntryType = EntryManagedMain;
+ ValidateMainMethod(pFD, &EntryType);
+
+ if ((EntryType == EntryManagedMain) &&
+ (stringArgs == NULL)) {
+ #ifndef FEATURE_CORECLR
+ // If you look at the DIFF on this code then you will see a major change which is that we
+ // no longer accept all the different types of data arguments to main. We now only accept
+ // an array of strings.
+
+ wzArgs = CorCommandLine::GetArgvW(&cCommandArgs);
+ // In the WindowsCE case where the app has additional args the count will come back zero.
+ if (cCommandArgs > 0) {
+ if (!wzArgs)
+ return E_INVALIDARG;
+ }
+#else // !FEATURE_CORECLR
+ return E_INVALIDARG;
+#endif // !FEATURE_CORECLR
+ }
+
+ ETWFireEvent(Main_V1);
+
+ struct Param
+ {
+ MethodDesc *pFD;
+ short numSkipArgs;
+ INT32 *piRetVal;
+ PTRARRAYREF *stringArgs;
+ CorEntryPointType EntryType;
+ DWORD cCommandArgs;
+ LPWSTR *wzArgs;
+ } param;
+ param.pFD = pFD;
+ param.numSkipArgs = numSkipArgs;
+ param.piRetVal = piRetVal;
+ param.stringArgs = stringArgs;
+ param.EntryType = EntryType;
+ param.cCommandArgs = cCommandArgs;
+ param.wzArgs = wzArgs;
+
+ EX_TRY_NOCATCH(Param *, pParam, &param)
+ {
+ MethodDescCallSite threadStart(pParam->pFD);
+
+ PTRARRAYREF StrArgArray = NULL;
+ GCPROTECT_BEGIN(StrArgArray);
+
+ // Build the parameter array and invoke the method.
+ if (pParam->EntryType == EntryManagedMain) {
+ if (pParam->stringArgs == NULL) {
+ // Allocate a COM Array object with enough slots for cCommandArgs - 1
+ StrArgArray = (PTRARRAYREF) AllocateObjectArray((pParam->cCommandArgs - pParam->numSkipArgs), g_pStringClass);
+
+ // Create Stringrefs for each of the args
+ for (DWORD arg = pParam->numSkipArgs; arg < pParam->cCommandArgs; arg++) {
+ STRINGREF sref = StringObject::NewString(pParam->wzArgs[arg]);
+ StrArgArray->SetAt(arg - pParam->numSkipArgs, (OBJECTREF) sref);
+ }
+ }
+ else
+ StrArgArray = *pParam->stringArgs;
+ }
+
+#ifdef STRESS_THREAD
+ OBJECTHANDLE argHandle = (StrArgArray != NULL) ? CreateGlobalStrongHandle (StrArgArray) : NULL;
+ Stress_Thread_RunMain(pParam->pFD, pParam->EntryType, pParam->numSkipArgs, argHandle);
+#endif
+
+ ARG_SLOT stackVar = ObjToArgSlot(StrArgArray);
+
+ if (pParam->pFD->IsVoid())
+ {
+ // Set the return value to 0 instead of returning random junk
+ *pParam->piRetVal = 0;
+ threadStart.Call(&stackVar);
+ }
+ else
+ {
+ *pParam->piRetVal = (INT32)threadStart.Call_RetArgSlot(&stackVar);
+ if (pParam->stringArgs == NULL)
+ {
+ SetLatchedExitCode(*pParam->piRetVal);
+ }
+ }
+
+ GCPROTECT_END();
+
+ //<TODO>
+ // When we get mainCRTStartup from the C++ then this should be able to go away.</TODO>
+ fflush(stdout);
+ fflush(stderr);
+ }
+ EX_END_NOCATCH
+
+ ETWFireEvent(MainEnd_V1);
+
+ return hr;
+}
+
+static void RunMainPre()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(GetThread() != 0);
+ g_fWeControlLifetime = TRUE;
+}
+
+static void RunMainPost()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(CheckPointer(GetThread()));
+ }
+ CONTRACTL_END
+
+ GCX_PREEMP();
+ ThreadStore::s_pThreadStore->WaitForOtherThreads();
+
+ DWORD dwSecondsToSleep = g_pConfig->GetSleepOnExit();
+
+ // if dwSeconds is non-zero then we will sleep for that many seconds
+ // before we exit this allows the vaDumpCmd to detect that our process
+ // has gone idle and this allows us to get a vadump of our process at
+ // this point in it's execution
+ //
+ if (dwSecondsToSleep != 0)
+ {
+ ClrSleepEx(dwSecondsToSleep * 1000, FALSE);
+ }
+}
+
+INT32 Assembly::ExecuteMainMethod(PTRARRAYREF *stringArgs)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ ENTRY_POINT;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ // reset the error code for std C
+ errno=0;
+
+ HRESULT hr = S_OK;
+ INT32 iRetVal = 0;
+
+ BEGIN_ENTRYPOINT_THROWS;
+
+ Thread *pThread = GetThread();
+ MethodDesc *pMeth;
+ {
+ // This thread looks like it wandered in -- but actually we rely on it to keep the process alive.
+ pThread->SetBackground(FALSE);
+
+ GCX_COOP();
+
+ pMeth = GetEntryPoint();
+ if (pMeth) {
+ RunMainPre();
+
+#if defined(FEATURE_APPX_BINDER) && defined(FEATURE_MULTICOREJIT)
+ if (AppX::IsAppXProcess())
+ {
+ GCX_PREEMP();
+
+ // we call this to obtain and cache the PRAID value which is used
+ // by multicore JIT manager and watson bucket params generation.
+
+ // NOTE: this makes a COM call into WinRT so we must do this after we've
+ // set the thread's apartment state which will do CoInitializeEx().
+ LPCWSTR praid;
+ hr = AppX::GetApplicationId(praid);
+ _ASSERTE(SUCCEEDED(hr));
+
+ if (!pMeth->GetModule()->HasNativeImage())
+ {
+ // For Appx, multicore JIT is only needed when root assembly does not have NI image
+ // When it has NI image, we can't generate profile, and do not need to playback profile
+ AppDomain * pDomain = pThread->GetDomain();
+ pDomain->GetMulticoreJitManager().AutoStartProfileAppx(pDomain);
+ }
+ }
+#endif // FEATURE_APPX_BINDER && FEATURE_MULTICOREJIT
+
+#ifdef FEATURE_CORECLR
+ // Set the root assembly as the assembly that is containing the main method
+ // The root assembly is used in the GetEntryAssembly method that on CoreCLR is used
+ // to get the TargetFrameworkMoniker for the app
+ AppDomain * pDomain = pThread->GetDomain();
+ pDomain->SetRootAssembly(pMeth->GetAssembly());
+#endif
+
+ hr = RunMain(pMeth, 1, &iRetVal, stringArgs);
+ }
+ }
+
+ //RunMainPost is supposed to be called on the main thread of an EXE,
+ //after that thread has finished doing useful work. It contains logic
+ //to decide when the process should get torn down. So, don't call it from
+ // AppDomain.ExecuteAssembly()
+ if (pMeth) {
+ if (stringArgs == NULL)
+ RunMainPost();
+ }
+ else {
+ StackSString displayName;
+ GetDisplayName(displayName);
+ COMPlusThrowHR(COR_E_MISSINGMETHOD, IDS_EE_FAILED_TO_FIND_MAIN, displayName);
+ }
+
+ IfFailThrow(hr);
+
+ END_ENTRYPOINT_THROWS;
+ return iRetVal;
+}
+#endif // CROSSGEN_COMPILE
+
+MethodDesc* Assembly::GetEntryPoint()
+{
+ CONTRACT(MethodDesc*)
+ {
+ THROWS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ MODE_ANY;
+
+ // Can return NULL if no entry point.
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ if (m_pEntryPoint)
+ RETURN m_pEntryPoint;
+
+ mdToken mdEntry = m_pManifestFile->GetEntryPointToken();
+ if (IsNilToken(mdEntry))
+ RETURN NULL;
+
+ Module *pModule = NULL;
+ switch(TypeFromToken(mdEntry)) {
+ case mdtFile:
+ pModule = m_pManifest->LoadModule(::GetAppDomain(), mdEntry, FALSE)->GetModule();
+
+ mdEntry = pModule->GetEntryPointToken();
+ if ( (TypeFromToken(mdEntry) != mdtMethodDef) ||
+ (!pModule->GetMDImport()->IsValidToken(mdEntry)) )
+ pModule = NULL;
+ break;
+
+ case mdtMethodDef:
+ if (m_pManifestFile->GetPersistentMDImport()->IsValidToken(mdEntry))
+ pModule = m_pManifest;
+ break;
+ }
+
+ // May be unmanaged entrypoint
+ if (!pModule)
+ RETURN NULL;
+
+ // We need to get its properties and the class token for this MethodDef token.
+ mdToken mdParent;
+ if (FAILED(pModule->GetMDImport()->GetParentToken(mdEntry, &mdParent))) {
+ StackSString displayName;
+ GetDisplayName(displayName);
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, IDS_EE_ILLEGAL_TOKEN_FOR_MAIN, displayName);
+ }
+
+ // For the entrypoint, also validate if the paramList is valid or not. We do this check
+ // by asking for the return-value (sequence 0) parameter to MDInternalRO::FindParamOfMethod.
+ // Incase the parameter list is invalid, CLDB_E_FILE_CORRUPT will be returned
+ // byMDInternalRO::FindParamOfMethod and we will bail out.
+ //
+ // If it does not exist (return value CLDB_E_RECORD_NOTFOUND) or if it is found (S_OK),
+ // we do not bother as the values would have come upon ensurin a valid parameter record
+ // list.
+ mdParamDef pdParam;
+ HRESULT hrValidParamList = pModule->GetMDImport()->FindParamOfMethod(mdEntry, 0, &pdParam);
+ if (hrValidParamList == CLDB_E_FILE_CORRUPT)
+ {
+ // Throw an exception for bad_image_format (because of corrupt metadata)
+ StackSString displayName;
+ GetDisplayName(displayName);
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, IDS_EE_ILLEGAL_TOKEN_FOR_MAIN, displayName);
+ }
+
+ if (mdParent != COR_GLOBAL_PARENT_TOKEN) {
+ GCX_COOP();
+ // This code needs a class init frame, because without it, the
+ // debugger will assume any code that results from searching for a
+ // type handle (ie, loading an assembly) is the first line of a program.
+ FrameWithCookie<DebuggerClassInitMarkFrame> __dcimf;
+
+ MethodTable * pInitialMT = ClassLoader::LoadTypeDefOrRefThrowing(pModule, mdParent,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::FailIfUninstDefOrRef).GetMethodTable();
+
+ m_pEntryPoint = MemberLoader::FindMethod(pInitialMT, mdEntry);
+
+ __dcimf.Pop();
+ }
+ else
+ {
+ m_pEntryPoint = pModule->FindMethod(mdEntry);
+ }
+
+ RETURN m_pEntryPoint;
+}
+
+#ifndef CROSSGEN_COMPILE
+OBJECTREF Assembly::GetExposedObject()
+{
+ CONTRACT(OBJECTREF)
+ {
+ GC_TRIGGERS;
+ THROWS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ MODE_COOPERATIVE;
+ }
+ CONTRACT_END;
+
+ RETURN GetDomainAssembly()->GetExposedAssemblyObject();
+}
+#endif // CROSSGEN_COMPILE
+
+/* static */
+BOOL Assembly::FileNotFound(HRESULT hr)
+{
+ LIMITED_METHOD_CONTRACT;
+ return IsHRESULTForExceptionKind(hr, kFileNotFoundException) ||
+#ifdef FEATURE_COMINTEROP
+ (hr == RO_E_METADATA_NAME_NOT_FOUND) ||
+#endif //FEATURE_COMINTEROP
+ (hr == CLR_E_BIND_TYPE_NOT_FOUND);
+}
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+PEModule * Assembly::LoadModule_AddRef(mdFile kFile, BOOL fLoadResource)
+{
+ CONTRACT(PEModule *)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, fLoadResource ? NULL_NOT_OK : NULL_OK));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END
+
+ if (! ((TypeFromToken(kFile) == mdtFile) &&
+ GetManifestImport()->IsValidToken(kFile)) )
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT, BFA_INVALID_FILE_TOKEN);
+ }
+
+ LPCSTR psModuleName;
+ DWORD dwFlags;
+ IfFailThrow(GetManifestImport()->GetFileProps(
+ kFile,
+ &psModuleName,
+ NULL,
+ NULL,
+ &dwFlags));
+
+ if (! (IsFfContainsMetaData(dwFlags) || fLoadResource) )
+ RETURN NULL;
+
+ SString name(SString::Utf8, psModuleName);
+ PEModule * pModule = NULL;
+
+ if (AssemblySpec::VerifyBindingString((LPCWSTR)name))
+ {
+ EX_TRY
+ {
+ GCX_PREEMP();
+
+#ifdef FEATURE_FUSION // specific to remote modules
+ if (GetFusionAssembly()) {
+ StackSString path;
+ ::GetAppDomain()->GetFileFromFusion(GetFusionAssembly(),
+ (LPCWSTR)name, path);
+ pModule = PEModule::Open(m_pManifestFile, kFile, path);
+ goto lDone;
+ }
+
+ if (GetIHostAssembly()) {
+ pModule = PEModule::Open(m_pManifestFile, kFile, name);
+ goto lDone;
+ }
+#endif
+ if (!m_pManifestFile->GetPath().IsEmpty()) {
+ StackSString path = m_pManifestFile->GetPath();
+
+ SString::Iterator i = path.End()-1;
+
+ if (PEAssembly::FindLastPathSeparator(path, i)) {
+ path.Truncate(++i);
+ path.Insert(i, name);
+ }
+ pModule = PEModule::Open(m_pManifestFile, kFile, path);
+ }
+#ifdef FEATURE_FUSION
+ lDone: ;
+#endif
+ }
+ EX_CATCH
+ {
+ Exception *ex = GET_EXCEPTION();
+ if (FileNotFound(ex->GetHR()) ||
+ (ex->GetHR() == FUSION_E_INVALID_NAME))
+ pModule = RaiseModuleResolveEvent_AddRef(psModuleName, kFile);
+
+ if (pModule == NULL)
+ {
+ EEFileLoadException::Throw(name, ex->GetHR(), ex);
+ }
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+ }
+
+ if (pModule == NULL)
+ {
+ pModule = RaiseModuleResolveEvent_AddRef(psModuleName, kFile);
+ if (pModule == NULL)
+ {
+ EEFileLoadException::Throw(name, HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND));
+ }
+ }
+
+ RETURN pModule;
+}
+
+PEModule * Assembly::RaiseModuleResolveEvent_AddRef(LPCSTR szName, mdFile kFile)
+{
+ CONTRACT(PEModule *)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ Module* pModule = NULL;
+
+#ifndef CROSSGEN_COMPILE
+ GCX_COOP();
+
+ struct _gc {
+ OBJECTREF AssemblyRef;
+ STRINGREF str;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+ if ((gc.AssemblyRef = GetExposedObject()) != NULL)
+ {
+ MethodDescCallSite onModuleResolve(METHOD__ASSEMBLY__ON_MODULE_RESOLVE, &gc.AssemblyRef);
+ gc.str = StringObject::NewString(szName);
+ ARG_SLOT args[2] = {
+ ObjToArgSlot(gc.AssemblyRef),
+ ObjToArgSlot(gc.str)
+ };
+
+ REFLECTMODULEBASEREF ResultingModuleRef =
+ (REFLECTMODULEBASEREF) onModuleResolve.Call_RetOBJECTREF(args);
+
+ if (ResultingModuleRef != NULL)
+ {
+ pModule = ResultingModuleRef->GetModule();
+ }
+ }
+ GCPROTECT_END();
+
+ if (pModule && ( (!(pModule->IsIntrospectionOnly())) != !(IsIntrospectionOnly()) ))
+ {
+ COMPlusThrow(kFileLoadException, IDS_CLASSLOAD_MODULE_RESOLVE_INTROSPECTION_MISMATCH);
+ }
+
+ if ((pModule != NULL) &&
+ (pModule == m_pManifest->LookupFile(kFile)))
+ {
+ RETURN clr::SafeAddRef((PEModule *)pModule->GetFile());
+ }
+#endif // CROSSGEN_COMPILE
+
+ RETURN NULL;
+}
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+
+BOOL Assembly::GetResource(LPCSTR szName, DWORD *cbResource,
+ PBYTE *pbInMemoryResource, Assembly** pAssemblyRef,
+ LPCSTR *szFileName, DWORD *dwLocation,
+ StackCrawlMark *pStackMark, BOOL fSkipSecurityCheck,
+ BOOL fSkipRaiseResolveEvent)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ DomainAssembly *pAssembly = NULL;
+ BOOL result = GetDomainAssembly()->GetResource(szName, cbResource,
+ pbInMemoryResource, &pAssembly,
+ szFileName, dwLocation, pStackMark, fSkipSecurityCheck,
+ fSkipRaiseResolveEvent);
+ if (result && pAssemblyRef != NULL && pAssembly!=NULL)
+ *pAssemblyRef = pAssembly->GetAssembly();
+
+ return result;
+}
+
+#ifdef FEATURE_PREJIT
+BOOL Assembly::IsInstrumented()
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FAULT;
+
+ BOOL isInstrumented = false;
+
+ EX_TRY
+ {
+ FAULT_NOT_FATAL();
+
+ isInstrumented = IsInstrumentedHelper();
+ }
+ EX_CATCH
+ {
+ isInstrumented = false;
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ return isInstrumented;
+}
+
+BOOL Assembly::IsInstrumentedHelper()
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FAULT;
+
+ // Dynamic Assemblies cannot be instrumented
+ if (IsDynamic())
+ return false;
+
+ // We must have a native image in order to perform IBC instrumentation
+ if (!GetManifestFile()->HasNativeImage())
+ return false;
+
+ // @Consider using the full name instead of the short form
+ // (see GetFusionAssemblyName()->IsEqual).
+
+ LPCUTF8 szZapBBInstr = g_pConfig->GetZapBBInstr();
+ LPCUTF8 szAssemblyName = GetSimpleName();
+
+ if (!szZapBBInstr || !szAssemblyName ||
+ (*szZapBBInstr == '\0') || (*szAssemblyName == '\0'))
+ return false;
+
+ // Convert to unicode so that we can do a case insensitive comparison
+
+ SString instrumentedAssemblyNamesList(SString::Utf8, szZapBBInstr);
+ SString assemblyName(SString::Utf8, szAssemblyName);
+
+ const WCHAR *wszInstrumentedAssemblyNamesList = instrumentedAssemblyNamesList.GetUnicode();
+ const WCHAR *wszAssemblyName = assemblyName.GetUnicode();
+
+ // wszInstrumentedAssemblyNamesList is a space separated list of assembly names.
+ // We need to determine if wszAssemblyName is in this list.
+ // If there is a "*" in the list, then all assemblies match.
+
+ const WCHAR * pCur = wszInstrumentedAssemblyNamesList;
+
+ do
+ {
+ _ASSERTE(pCur[0] != W('\0'));
+ const WCHAR * pNextSpace = wcschr(pCur, W(' '));
+ _ASSERTE(pNextSpace == NULL || pNextSpace[0] == W(' '));
+
+ if (pCur != pNextSpace)
+ {
+ // pCur is not pointing to a space
+ _ASSERTE(pCur[0] != W(' '));
+
+ if (pCur[0] == W('*') && (pCur[1] == W(' ') || pCur[1] == W('\0')))
+ return true;
+
+ if (pNextSpace == NULL)
+ {
+ // We have reached the last name in the list. There are no more spaces.
+ return (SString::_wcsicmp(wszAssemblyName, pCur) == 0);
+ }
+ else
+ {
+ if (SString::_wcsnicmp(wszAssemblyName, pCur, static_cast<COUNT_T>(pNextSpace - pCur)) == 0)
+ return true;
+ }
+ }
+
+ pCur = pNextSpace + 1;
+ }
+ while (pCur[0] != W('\0'));
+
+ return false;
+}
+#endif // FEATURE_PREJIT
+
+//***********************************************************
+// Add an assembly to the assemblyref list. pAssemEmitter specifies where
+// the AssemblyRef is emitted to.
+//***********************************************************
+mdAssemblyRef Assembly::AddAssemblyRef(Assembly *refedAssembly, IMetaDataAssemblyEmit *pAssemEmitter, BOOL fUsePublicKeyToken)
+{
+ CONTRACT(mdAssemblyRef)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(CheckPointer(refedAssembly));
+#ifdef FEATURE_CORECLR
+ PRECONDITION(CheckPointer(pAssemEmitter, NULL_NOT_OK));
+#else
+ PRECONDITION(CheckPointer(pAssemEmitter, NULL_OK));
+#endif //FEATURE_CORECLR
+ POSTCONDITION(!IsNilToken(RETVAL));
+ POSTCONDITION(TypeFromToken(RETVAL) == mdtAssemblyRef);
+ }
+ CONTRACT_END;
+
+ SafeComHolder<IMetaDataAssemblyEmit> emitHolder;
+#if !defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE)
+ if (pAssemEmitter == NULL)
+ {
+ pAssemEmitter = GetOnDiskMDAssemblyEmitter();
+ emitHolder.Assign(pAssemEmitter);
+ }
+#endif // FEATURE_CORECLR && !CROSSGEN_COMPILE
+
+ AssemblySpec spec;
+ spec.InitializeSpec(refedAssembly->GetManifestFile());
+
+ if (refedAssembly->IsCollectible())
+ {
+ if (this->IsCollectible())
+ this->GetLoaderAllocator()->EnsureReference(refedAssembly->GetLoaderAllocator());
+ else
+ COMPlusThrow(kNotSupportedException, W("NotSupported_CollectibleBoundNonCollectible"));
+ }
+
+ mdAssemblyRef ar;
+ IfFailThrow(spec.EmitToken(pAssemEmitter, &ar, fUsePublicKeyToken));
+
+ RETURN ar;
+} // Assembly::AddAssemblyRef
+
+//***********************************************************
+// Add a typedef to the runtime TypeDef table of this assembly
+//***********************************************************
+void Assembly::AddType(
+ Module *pModule,
+ mdTypeDef cl)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ AllocMemTracker amTracker;
+
+ if (pModule->GetAssembly() != this)
+ {
+ // you cannot add a typedef outside of the assembly to the typedef table
+ _ASSERTE(!"Bad usage!");
+ }
+ m_pClassLoader->AddAvailableClassDontHaveLock(pModule,
+ cl,
+ &amTracker);
+ amTracker.SuppressRelease();
+}
+
+//***********************************************************
+// Add an ExportedType to the runtime TypeDef table of this assembly
+//***********************************************************
+void Assembly::AddExportedType(mdExportedType cl)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ AllocMemTracker amTracker;
+ m_pClassLoader->AddExportedTypeDontHaveLock(GetManifestModule(),
+ cl,
+ &amTracker);
+ amTracker.SuppressRelease();
+}
+
+
+#if !defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE)
+
+//***********************************************************
+//
+// get the IMetaDataAssemblyEmit for the on disk manifest.
+// Note that the pointer returned is AddRefed. It is the caller's
+// responsibility to release the reference.
+//
+//***********************************************************
+IMetaDataAssemblyEmit *Assembly::GetOnDiskMDAssemblyEmitter()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ IMetaDataAssemblyEmit *pAssemEmitter = NULL;
+ IMetaDataEmit *pEmitter;
+ RefClassWriter *pRCW;
+
+ _ASSERTE(m_pOnDiskManifest);
+
+ pRCW = m_pOnDiskManifest->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ // If the RefClassWriter has a on disk emitter, then use it rather than the in-memory emitter.
+ pEmitter = pRCW->GetOnDiskEmitter();
+
+ if (pEmitter == NULL)
+ pEmitter = m_pOnDiskManifest->GetEmitter();
+
+ _ASSERTE(pEmitter != NULL);
+
+ IfFailThrow(pEmitter->QueryInterface(IID_IMetaDataAssemblyEmit, (void**) &pAssemEmitter));
+
+ if (pAssemEmitter == NULL)
+ {
+ // the manifest is not writable
+ _ASSERTE(!"Bad usage!");
+ }
+ return pAssemEmitter;
+}
+
+//***********************************************************
+//
+// prepare saving manifest to disk.
+//
+//***********************************************************
+void Assembly::PrepareSavingManifest(ReflectionModule *pAssemblyModule)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (pAssemblyModule)
+ {
+ // embedded assembly
+ m_pOnDiskManifest = pAssemblyModule;
+ m_fEmbeddedManifest = true;
+ }
+ else
+ {
+ m_fEmbeddedManifest = false;
+
+ StackSString name(SString::Utf8, GetSimpleName());
+
+ // Create the module
+ m_pOnDiskManifest = CreateDynamicModule(name, name, FALSE /*fIsTransient*/);
+ // store the fact this on disk manifest is temporary and can be hidden from the user
+ m_needsToHideManifestForEmit = TRUE;
+ }
+
+ NonVMComHolder<IMetaDataAssemblyEmit> pAssemblyEmit(GetOnDiskMDAssemblyEmitter());
+
+ // Copy assembly metadata to emit scope
+ //<TODO>@todo: add Title, Description, Alias as CA</TODO>
+ // <TODO>@todo: propagate all of the information</TODO>
+ // <TODO>@todo: introduce a helper in metadata to take the ansi version of string.</TODO>
+
+ IMetaDataAssemblyImport *pAssemblyImport = GetManifestFile()->GetAssemblyImporter();
+
+ const void *pbPublicKey;
+ ULONG cbPublicKey;
+ ULONG ulHashAlgId;
+ LPWSTR szName;
+ ULONG chName;
+ ASSEMBLYMETADATA MetaData;
+ DWORD dwAssemblyFlags;
+
+ MetaData.cbLocale = 0;
+ MetaData.ulProcessor = 0;
+ MetaData.ulOS = 0;
+ IfFailThrow(pAssemblyImport->GetAssemblyProps(TokenFromRid(1, mdtAssembly),
+ NULL, NULL, NULL,
+ NULL, 0, &chName,
+ &MetaData, NULL));
+ StackSString name;
+ szName = name.OpenUnicodeBuffer(chName);
+
+ SString locale;
+ MetaData.szLocale = locale.OpenUnicodeBuffer(MetaData.cbLocale);
+
+ SBuffer proc;
+ MetaData.rProcessor = (DWORD *) proc.OpenRawBuffer(MetaData.ulProcessor*sizeof(*MetaData.rProcessor));
+
+ SBuffer os;
+ MetaData.rOS = (OSINFO *) os.OpenRawBuffer(MetaData.ulOS*sizeof(*MetaData.rOS));
+
+ IfFailThrow(pAssemblyImport->GetAssemblyProps(TokenFromRid(1, mdtAssembly),
+ &pbPublicKey, &cbPublicKey, &ulHashAlgId,
+ szName, chName, &chName,
+ &MetaData, &dwAssemblyFlags));
+
+ mdAssembly ad;
+ IfFailThrow(pAssemblyEmit->DefineAssembly(pbPublicKey, cbPublicKey, ulHashAlgId,
+ szName, &MetaData, dwAssemblyFlags, &ad));
+
+ SafeComHolder<IMetaDataImport> pImport;
+ IfFailThrow(pAssemblyEmit->QueryInterface(IID_IMetaDataImport, (void**)&pImport));
+ ULONG cExistingName = 0;
+ if (FAILED(pImport->GetScopeProps(NULL, 0, &cExistingName, NULL)) || cExistingName == 0)
+ {
+ SafeComHolder<IMetaDataEmit> pEmit;
+ IfFailThrow(pAssemblyEmit->QueryInterface(IID_IMetaDataEmit, (void**)&pEmit));
+ IfFailThrow(pEmit->SetModuleProps(szName));
+ }
+
+ name.CloseBuffer();
+ locale.CloseBuffer();
+ proc.CloseRawBuffer();
+ os.CloseRawBuffer();
+} // Assembly::PrepareSavingManifest
+
+
+//***********************************************************
+//
+// add a file name to the file list of this assembly. On disk only.
+//
+//***********************************************************
+mdFile Assembly::AddFile(LPCWSTR wszFileName)
+{
+ STANDARD_VM_CONTRACT;
+
+ SafeComHolder<IMetaDataAssemblyEmit> pAssemEmitter(GetOnDiskMDAssemblyEmitter());
+ mdFile fl;
+
+ // Define File.
+ IfFailThrow( pAssemEmitter->DefineFile(
+ wszFileName, // [IN] Name of the file.
+ 0, // [IN] Hash Blob.
+ 0, // [IN] Count of bytes in the Hash Blob.
+ 0, // [IN] Flags.
+ &fl) ); // [OUT] Returned File token.
+
+ return fl;
+} // Assembly::AddFile
+
+
+//***********************************************************
+//
+// Set the hash value on a file table entry.
+//
+//***********************************************************
+void Assembly::SetFileHashValue(mdFile tkFile, LPCWSTR wszFullFileName)
+{
+ STANDARD_VM_CONTRACT;
+
+ SafeComHolder<IMetaDataAssemblyEmit> pAssemEmitter(GetOnDiskMDAssemblyEmitter());
+
+ // Get the hash value.
+ SBuffer buffer;
+ PEImageHolder map(PEImage::OpenImage(StackSString(wszFullFileName)));
+ map->ComputeHash(GetHashAlgId(), buffer);
+
+ // Set the hash blob.
+ IfFailThrow( pAssemEmitter->SetFileProps(
+ tkFile, // [IN] File Token.
+ buffer, // [IN] Hash Blob.
+ buffer.GetSize(), // [IN] Count of bytes in the Hash Blob.
+ (DWORD) -1)); // [IN] Flags.
+
+} // Assembly::SetHashValue
+
+//*****************************************************************************
+// Add a Type name to the ExportedType table in the on-disk assembly manifest.
+//*****************************************************************************
+mdExportedType Assembly::AddExportedTypeOnDisk(LPCWSTR wszExportedType, mdToken tkImpl, mdToken tkTypeDef, CorTypeAttr flags)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(TypeFromToken(tkTypeDef) == mdtTypeDef);
+
+ // The on-disk assembly manifest
+ SafeComHolder<IMetaDataAssemblyEmit> pAssemEmitter(GetOnDiskMDAssemblyEmitter());
+
+ mdExportedType ct;
+
+ IfFailThrow( pAssemEmitter->DefineExportedType(
+ wszExportedType, // [IN] Name of the COMType.
+ tkImpl, // [IN] mdFile or mdAssemblyRef that provides the ExportedType.
+ tkTypeDef, // [IN] TypeDef token within the file.
+ flags, // [IN] Flags.
+ &ct) ); // [OUT] Returned ExportedType token.
+
+ return ct;
+} // Assembly::AddExportedTypeOnDisk
+
+//*******************************************************************************
+// Add a Type name to the ExportedType table in the in-memory assembly manifest.
+//*******************************************************************************
+mdExportedType Assembly::AddExportedTypeInMemory(LPCWSTR wszExportedType, mdToken tkImpl, mdToken tkTypeDef, CorTypeAttr flags)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(TypeFromToken(tkTypeDef) == mdtTypeDef);
+
+ // The in-memory assembly manifest
+ IMetaDataAssemblyEmit* pAssemEmitter = GetManifestFile()->GetAssemblyEmitter();
+
+ mdExportedType ct;
+
+ IfFailThrow( pAssemEmitter->DefineExportedType(
+ wszExportedType, // [IN] Name of the COMType.
+ tkImpl, // [IN] mdFile or mdAssemblyRef that provides the ExportedType.
+ tkTypeDef, // [IN] TypeDef token within the file.
+ flags, // [IN] Flags.
+ &ct) ); // [OUT] Returned ExportedType token.
+
+ return ct;
+} // Assembly::AddExportedTypeInMemory
+
+
+//***********************************************************
+// add an entry to ManifestResource table for a stand alone managed resource. On disk only.
+//***********************************************************
+void Assembly::AddStandAloneResource(LPCWSTR wszName, LPCWSTR wszDescription, LPCWSTR wszMimeType, LPCWSTR wszFileName, LPCWSTR wszFullFileName, int iAttribute)
+{
+ STANDARD_VM_CONTRACT;
+
+ SafeComHolder<IMetaDataAssemblyEmit> pAssemEmitter(GetOnDiskMDAssemblyEmitter());
+ mdFile tkFile;
+ mdManifestResource mr;
+ SBuffer hash;
+
+ // Get the hash value;
+ if (GetHashAlgId())
+ {
+ PEImageHolder pImage(PEImage::OpenImage(StackSString(wszFullFileName)));
+ pImage->ComputeHash(GetHashAlgId(), hash);
+ }
+
+ IfFailThrow( pAssemEmitter->DefineFile(
+ wszFileName, // [IN] Name of the file.
+ hash, // [IN] Hash Blob.
+ hash.GetSize(), // [IN] Count of bytes in the Hash Blob.
+ ffContainsNoMetaData, // [IN] Flags.
+ &tkFile) ); // [OUT] Returned File token.
+
+
+ IfFailThrow( pAssemEmitter->DefineManifestResource(
+ wszName, // [IN] Name of the resource.
+ tkFile, // [IN] mdFile or mdAssemblyRef that provides the resource.
+ 0, // [IN] Offset to the beginning of the resource within the file.
+ iAttribute, // [IN] Flags.
+ &mr) ); // [OUT] Returned ManifestResource token.
+
+} // Assembly::AddStandAloneResource
+
+
+//***********************************************************
+// Save security permission requests.
+//***********************************************************
+void Assembly::AddDeclarativeSecurity(DWORD dwAction, void const *pValue, DWORD cbValue)
+{
+ STANDARD_VM_CONTRACT;
+
+ mdAssembly tkAssembly = 0x20000001;
+
+ SafeComHolder<IMetaDataAssemblyEmit> pAssemEmitter(GetOnDiskMDAssemblyEmitter());
+ _ASSERTE( pAssemEmitter );
+
+ SafeComHolder<IMetaDataEmitHelper> pEmitHelper;
+ IfFailThrow( pAssemEmitter->QueryInterface(IID_IMetaDataEmitHelper, (void**)&pEmitHelper) );
+
+ IfFailThrow(pEmitHelper->AddDeclarativeSecurityHelper(tkAssembly,
+ dwAction,
+ pValue,
+ cbValue,
+ NULL));
+}
+
+
+//***********************************************************
+// Allocate space for a strong name signature in the manifest
+//***********************************************************
+HRESULT Assembly::AllocateStrongNameSignature(ICeeFileGen *pCeeFileGen,
+ HCEEFILE ceeFile)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END
+
+ HRESULT hr;
+ HCEESECTION TData;
+ DWORD dwDataOffset;
+ DWORD dwDataLength;
+ DWORD dwDataRVA;
+ VOID *pvBuffer;
+ const void *pbPublicKey;
+ ULONG cbPublicKey;
+
+ // Determine size of signature blob.
+
+ IfFailRet(GetManifestImport()->GetAssemblyProps(TokenFromRid(1, mdtAssembly),
+ &pbPublicKey, &cbPublicKey, NULL,
+ NULL, NULL, NULL));
+
+ if (!StrongNameSignatureSize((BYTE *) pbPublicKey, cbPublicKey, &dwDataLength)) {
+ hr = StrongNameErrorInfo();
+ return hr;
+ }
+
+ // Allocate space for the signature in the text section and update the COM+
+ // header to point to the space.
+ IfFailRet(pCeeFileGen->GetIlSection(ceeFile, &TData));
+ IfFailRet(pCeeFileGen->GetSectionDataLen(TData, &dwDataOffset));
+ IfFailRet(pCeeFileGen->GetSectionBlock(TData, dwDataLength, 4, &pvBuffer));
+ IfFailRet(pCeeFileGen->GetMethodRVA(ceeFile, dwDataOffset, &dwDataRVA));
+ IfFailRet(pCeeFileGen->SetStrongNameEntry(ceeFile, dwDataLength, dwDataRVA));
+
+ return S_OK;
+}
+
+
+//***********************************************************
+// Strong name sign a manifest already persisted to disk
+//***********************************************************
+HRESULT Assembly::SignWithStrongName(LPCWSTR wszFileName)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END
+
+ HRESULT hr = S_OK;
+
+ // If we're going to do a full signing we have a key pair either
+ // in a key container or provided directly in a byte array.
+
+ switch (m_eStrongNameLevel) {
+ case SN_FULL_KEYPAIR_IN_ARRAY:
+ if (!StrongNameSignatureGeneration(wszFileName, NULL, m_pbStrongNameKeyPair, m_cbStrongNameKeyPair, NULL, NULL))
+ hr = StrongNameErrorInfo();
+ break;
+
+ case SN_FULL_KEYPAIR_IN_CONTAINER:
+ if (!StrongNameSignatureGeneration(wszFileName, m_pwStrongNameKeyContainer, NULL, 0, NULL, NULL))
+ hr = StrongNameErrorInfo();
+ break;
+
+ default:
+ break;
+ }
+
+ return hr;
+}
+
+
+//***********************************************************
+// save the manifest to disk!
+//***********************************************************
+void Assembly::SaveManifestToDisk(LPCWSTR wszFileName, int entrypoint, int fileKind, DWORD corhFlags, DWORD peFlags)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = NOERROR;
+ HCEEFILE ceeFile = NULL;
+ ICeeFileGen *pCeeFileGen = NULL;
+ RefClassWriter *pRCW;
+ IMetaDataEmit *pEmitter;
+
+ _ASSERTE( m_fEmbeddedManifest == false );
+
+ pRCW = m_pOnDiskManifest->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ IfFailGo( pRCW->EnsureCeeFileGenCreated(corhFlags, peFlags) );
+
+ pCeeFileGen = pRCW->GetCeeFileGen();
+ ceeFile = pRCW->GetHCEEFILE();
+ _ASSERTE(ceeFile && pCeeFileGen);
+
+ //Emit the MetaData
+ pEmitter = m_pOnDiskManifest->GetClassWriter()->GetEmitter();
+ IfFailGo( pCeeFileGen->EmitMetaDataEx(ceeFile, pEmitter) );
+
+ // Allocate space for a strong name signature if a public key was supplied
+ // (this doesn't strong name the assembly, but it makes it possible to do so
+ // as a post processing step).
+ if (IsStrongNamed())
+ IfFailGo(AllocateStrongNameSignature(pCeeFileGen, ceeFile));
+
+ IfFailGo( pCeeFileGen->SetOutputFileName(ceeFile, (LPWSTR)wszFileName) );
+
+ // the entryPoint for an assembly is a tkFile token if exist.
+ if (RidFromToken(entrypoint) != mdTokenNil)
+ IfFailGo( pCeeFileGen->SetEntryPoint(ceeFile, entrypoint) );
+ if (fileKind == Dll)
+ {
+ pCeeFileGen->SetDllSwitch(ceeFile, true);
+ }
+ else
+ {
+ // should have a valid entry point for applications
+ if (fileKind == WindowApplication)
+ {
+ IfFailGo( pCeeFileGen->SetSubsystem(ceeFile, IMAGE_SUBSYSTEM_WINDOWS_GUI, CEE_IMAGE_SUBSYSTEM_MAJOR_VERSION, CEE_IMAGE_SUBSYSTEM_MINOR_VERSION) );
+ }
+ else
+ {
+ _ASSERTE(fileKind == ConsoleApplication);
+ IfFailGo( pCeeFileGen->SetSubsystem(ceeFile, IMAGE_SUBSYSTEM_WINDOWS_CUI, CEE_IMAGE_SUBSYSTEM_MAJOR_VERSION, CEE_IMAGE_SUBSYSTEM_MINOR_VERSION) );
+ }
+
+ }
+
+ //Generate the CeeFile
+ IfFailGo(pCeeFileGen->GenerateCeeFile(ceeFile) );
+
+ // Strong name sign the resulting assembly if required.
+ if (IsStrongNamed())
+ IfFailGo(SignWithStrongName(wszFileName));
+
+ // now release the m_pOnDiskManifest
+ErrExit:
+ pRCW->DestroyCeeFileGen();
+
+ // we keep the on disk manifest so that the GetModules code can skip over this ad-hoc module when modules are enumerated.
+ // Need to see if we can remove the creation of this module alltogether
+ //m_pOnDiskManifest = NULL;
+
+ if (FAILED(hr))
+ {
+ if (HRESULT_FACILITY(hr) == FACILITY_WIN32)
+ {
+ if (IsWin32IOError(HRESULT_CODE(hr)))
+ {
+ COMPlusThrowHR(COR_E_IO);
+ }
+ else
+ {
+ COMPlusThrowHR(hr);
+ }
+ }
+ if (hr == CEE_E_CVTRES_NOT_FOUND)
+ COMPlusThrow(kIOException, W("Argument_cvtres_NotFound"));
+ COMPlusThrowHR(hr);
+ }
+} // Assembly::SaveManifestToDisk
+
+#endif // FEATURE_CORECLR && !CROSSGEN_COMPILE
+
+
+HRESULT STDMETHODCALLTYPE
+GetAssembliesByName(LPCWSTR szAppBase,
+ LPCWSTR szPrivateBin,
+ LPCWSTR szAssemblyName,
+ IUnknown *ppIUnk[],
+ ULONG cMax,
+ ULONG *pcAssemblies)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_PREEMPTIVE;
+ GC_TRIGGERS;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END
+
+ HRESULT hr = S_OK;
+
+ if (g_fEEInit) {
+ // Cannot call this during EE startup
+ return MSEE_E_ASSEMBLYLOADINPROGRESS;
+ }
+
+ if (!(szAssemblyName && ppIUnk && pcAssemblies))
+ return E_POINTER;
+
+#if defined(FEATURE_CORECLR) || defined(CROSSGEN_COMPILE)
+ hr = COR_E_NOTSUPPORTED;
+#else
+ AppDomain *pDomain = NULL;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ if(szAppBase || szPrivateBin)
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+ MethodDescCallSite createDomainEx(METHOD__APP_DOMAIN__CREATE_DOMAINEX);
+ struct _gc {
+ STRINGREF pFriendlyName;
+ STRINGREF pAppBase;
+ STRINGREF pPrivateBin;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+ gc.pFriendlyName = StringObject::NewString(W("GetAssembliesByName"));
+
+ if(szAppBase)
+ {
+ gc.pAppBase = StringObject::NewString(szAppBase);
+ }
+
+ if(szPrivateBin)
+ {
+ gc.pPrivateBin = StringObject::NewString(szPrivateBin);
+ }
+
+ ARG_SLOT args[5] =
+ {
+ ObjToArgSlot(gc.pFriendlyName),
+ NULL,
+ ObjToArgSlot(gc.pAppBase),
+ ObjToArgSlot(gc.pPrivateBin),
+ BoolToArgSlot(false)
+ };
+ APPDOMAINREF pDom = (APPDOMAINREF) createDomainEx.Call_RetOBJECTREF(args);
+ if (pDom == NULL)
+ {
+ hr = E_FAIL;
+ }
+ else
+ {
+ Context *pContext = CRemotingServices::GetServerContextForProxy((OBJECTREF) pDom);
+ _ASSERTE(pContext);
+ pDomain = pContext->GetDomain();
+ }
+
+ GCPROTECT_END();
+ }
+ else
+ pDomain = SystemDomain::System()->DefaultDomain();
+
+ Assembly *pFoundAssembly;
+ if (SUCCEEDED(hr)) {
+ pFoundAssembly = pDomain->LoadAssemblyHelper(szAssemblyName,
+ NULL);
+ if (SUCCEEDED(hr)) {
+ if (cMax < 1)
+ hr = HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
+ else {
+ ppIUnk[0] = (IUnknown *)pFoundAssembly->GetManifestAssemblyImporter();
+ ppIUnk[0]->AddRef();
+ }
+ *pcAssemblies = 1;
+ }
+ }
+
+ END_EXTERNAL_ENTRYPOINT;
+#endif // FEATURE_CORECLR
+
+ return hr;
+}// Used by the IMetadata API's to access an assemblies metadata.
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+
+void Assembly::SetMissingDependenciesCheckDone()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_bMissingDependenciesCheckDone=TRUE;
+};
+
+BOOL Assembly::MissingDependenciesCheckDone()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_bMissingDependenciesCheckDone;
+};
+
+
+#ifdef FEATURE_FUSION
+void Assembly::SetBindingClosure(IAssemblyBindingClosure* pClosure) // Addrefs. It is assumed the caller did not addref pClosure for us.
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_pBindingClosure == NULL);
+ _ASSERTE(pClosure != NULL);
+
+ m_pBindingClosure = pClosure;
+ pClosure->AddRef(); // It is assumed the caller did not addref pBindingClosure for us.
+}
+
+IAssemblyBindingClosure * Assembly::GetBindingClosure()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pBindingClosure;
+}
+
+
+// The shared module list is effectively an extension of the shared domain assembly hash table.
+// It is the canonical list and aribiter of modules loaded from this assembly by any app domain.
+// Modules are stored here immediately on creating (to prevent duplicate creation), as opposed to
+// in the rid map, where they are only placed upon load completion.
+
+BOOL Assembly::CanBeShared(DomainAssembly *pDomainAssembly)
+{
+ CONTRACTL
+ {
+ PRECONDITION(CheckPointer(pDomainAssembly));
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CODESHARING,
+ LL_INFO100,
+ "Checking if we can share: \"%S\" in domain 0x%x.\n",
+ GetDebugName(), pDomainAssembly->GetAppDomain()));
+
+ STRESS_LOG2(LF_CODESHARING, LL_INFO1000,"Checking whether DomainAssembly %p is compatible with Assembly %p",
+ pDomainAssembly,this);
+
+ // We must always share the same system assemblies
+ if (IsSystem())
+ {
+ STRESS_LOG0(LF_CODESHARING, LL_INFO1000,"System assembly - sharing");
+ return TRUE;
+ }
+
+ if ((pDomainAssembly->GetDebuggerInfoBits()&~(DACF_PDBS_COPIED|DACF_IGNORE_PDBS|DACF_OBSOLETE_TRACK_JIT_INFO))
+ != (m_debuggerFlags&~(DACF_PDBS_COPIED|DACF_IGNORE_PDBS|DACF_OBSOLETE_TRACK_JIT_INFO)))
+ {
+ LOG((LF_CODESHARING,
+ LL_INFO100,
+ "We can't share it, desired debugging flags %x are different than %x\n",
+ pDomainAssembly->GetDebuggerInfoBits(), (m_debuggerFlags&~(DACF_PDBS_COPIED|DACF_IGNORE_PDBS|DACF_OBSOLETE_TRACK_JIT_INFO))));
+ STRESS_LOG2(LF_CODESHARING, LL_INFO100,"Flags diff= %08x [%08x/%08x]",pDomainAssembly->GetDebuggerInfoBits(),
+ m_debuggerFlags);
+ g_dwLoaderReasonForNotSharing = ReasonForNotSharing_DebuggerFlagMismatch;
+ return FALSE;
+ }
+
+ PEAssembly * pDomainAssemblyFile = pDomainAssembly->GetFile();
+ if (pDomainAssemblyFile == NULL)
+ {
+ g_dwLoaderReasonForNotSharing = ReasonForNotSharing_NullPeassembly;
+ return FALSE;
+ }
+
+ IAssemblyBindingClosure * pContext = GetBindingClosure();
+ if (pContext == NULL)
+ {
+ STRESS_LOG1(LF_CODESHARING, LL_INFO1000,"No context 1 - status=%d",pDomainAssemblyFile->IsSystem());
+ if (pDomainAssemblyFile->IsSystem())
+ return TRUE;
+ else
+ {
+ g_dwLoaderReasonForNotSharing = ReasonForNotSharing_MissingAssemblyClosure1;
+ return FALSE;
+ }
+ }
+
+ IAssemblyBindingClosure * pCurrentContext = pDomainAssembly->GetAssemblyBindingClosure(LEVEL_STARTING);
+ if (pCurrentContext == NULL)
+ {
+ STRESS_LOG1(LF_CODESHARING, LL_INFO1000,"No context 2 - status=%d",pDomainAssemblyFile->IsSystem());
+ if (pDomainAssemblyFile->IsSystem())
+ return TRUE;
+ else
+ {
+ g_dwLoaderReasonForNotSharing = ReasonForNotSharing_MissingAssemblyClosure2;
+ return FALSE;
+ }
+ }
+
+ // ensure the closures are walked
+ {
+ ReleaseHolder<IBindResult> pWinRTBindResult;
+
+ IUnknown * pUnk;
+ if (pDomainAssembly->GetFile()->IsWindowsRuntime())
+ { // It is .winmd file (WinRT assembly)
+ IfFailThrow(CLRPrivAssemblyWinRT::GetIBindResult(pDomainAssembly->GetFile()->GetHostAssembly(), &pWinRTBindResult));
+ pUnk = pWinRTBindResult;
+ }
+ else
+ {
+ pUnk = pDomainAssembly->GetFile()->GetFusionAssembly();
+ }
+
+ GCX_PREEMP();
+ IfFailThrow(pCurrentContext->EnsureWalked(pUnk, ::GetAppDomain()->GetFusionContext(), LEVEL_COMPLETE));
+ }
+
+ if ((pContext->HasBeenWalked(LEVEL_COMPLETE) != S_OK) || !MissingDependenciesCheckDone())
+ {
+ GCX_COOP();
+
+ BOOL fMissingDependenciesResolved = FALSE;
+
+ ENTER_DOMAIN_PTR(SystemDomain::System()->DefaultDomain(), ADV_DEFAULTAD);
+ {
+ {
+ ReleaseHolder<IBindResult> pWinRTBindResult;
+
+ IUnknown * pUnk;
+ if (GetManifestFile()->IsWindowsRuntime())
+ { // It is .winmd file (WinRT assembly)
+ IfFailThrow(CLRPrivAssemblyWinRT::GetIBindResult(GetManifestFile()->GetHostAssembly(), &pWinRTBindResult));
+ pUnk = pWinRTBindResult;
+ }
+ else
+ {
+ pUnk = GetManifestFile()->GetFusionAssembly();
+ }
+
+ GCX_PREEMP();
+ IfFailThrow(pContext->EnsureWalked(pUnk, ::GetAppDomain()->GetFusionContext(), LEVEL_COMPLETE));
+ }
+ DomainAssembly * domainAssembly = ::GetAppDomain()->FindDomainAssembly(this);
+ if (domainAssembly != NULL)
+ {
+ if (domainAssembly->CheckMissingDependencies() == CMD_Resolved)
+ {
+ //cannot share
+ fMissingDependenciesResolved = TRUE;
+ }
+ }
+ }
+ END_DOMAIN_TRANSITION;
+
+ if (fMissingDependenciesResolved)
+ {
+ STRESS_LOG0(LF_CODESHARING, LL_INFO1000,"Missing dependencies resolved - not sharing");
+ g_dwLoaderReasonForNotSharing = ReasonForNotSharing_MissingDependenciesResolved;
+ return FALSE;
+ }
+ }
+
+ HRESULT hr = pContext->IsEqual(pCurrentContext);
+ IfFailThrow(hr);
+ if (hr != S_OK)
+ {
+ STRESS_LOG1(LF_CODESHARING, LL_INFO1000,"Closure comparison returned %08x - not sharing",hr);
+ g_dwLoaderReasonForNotSharing = ReasonForNotSharing_ClosureComparisonFailed;
+ return FALSE;
+ }
+
+ LOG((LF_CODESHARING, LL_INFO100, "We can share it : \"%S\"\n", GetDebugName()));
+ STRESS_LOG0(LF_CODESHARING, LL_INFO1000,"Everything is fine - sharing");
+ return TRUE;
+}
+#endif
+
+#ifdef FEATURE_VERSIONING
+
+BOOL Assembly::CanBeShared(DomainAssembly *pDomainAssembly)
+{
+ PTR_PEAssembly pFile=pDomainAssembly->GetFile();
+
+ if(pFile == NULL)
+ return FALSE;
+
+ if(pFile->IsDynamic())
+ return FALSE;
+
+ if(IsSystem() && pFile->IsSystem())
+ return TRUE;
+
+ if ((pDomainAssembly->GetDebuggerInfoBits()&~(DACF_PDBS_COPIED|DACF_IGNORE_PDBS|DACF_OBSOLETE_TRACK_JIT_INFO))
+ != (m_debuggerFlags&~(DACF_PDBS_COPIED|DACF_IGNORE_PDBS|DACF_OBSOLETE_TRACK_JIT_INFO)))
+ {
+ LOG((LF_CODESHARING,
+ LL_INFO100,
+ "We can't share it, desired debugging flags %x are different than %x\n",
+ pDomainAssembly->GetDebuggerInfoBits(), (m_debuggerFlags&~(DACF_PDBS_COPIED|DACF_IGNORE_PDBS|DACF_OBSOLETE_TRACK_JIT_INFO))));
+ STRESS_LOG2(LF_CODESHARING, LL_INFO100,"Flags diff= %08x [%08x/%08x]",pDomainAssembly->GetDebuggerInfoBits(),
+ m_debuggerFlags);
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+#endif // FEATURE_VERSIONING
+
+#endif // FEATURE_LOADER_OPTIMIZATION
+
+#if defined(FEATURE_APTCA) || defined(FEATURE_CORESYSTEM)
+BOOL Assembly::AllowUntrustedCaller()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ return ModuleSecurityDescriptor::GetModuleSecurityDescriptor(this)->IsAPTCA();
+}
+#endif // defined(FEATURE_APTCA) || defined(FEATURE_CORESYSTEM)
+
+void DECLSPEC_NORETURN Assembly::ThrowTypeLoadException(LPCUTF8 pszFullName, UINT resIDWhy)
+{
+ WRAPPER_NO_CONTRACT;
+ ThrowTypeLoadException(NULL, pszFullName, NULL,
+ resIDWhy);
+}
+
+void DECLSPEC_NORETURN Assembly::ThrowTypeLoadException(LPCUTF8 pszNameSpace, LPCUTF8 pszTypeName,
+ UINT resIDWhy)
+{
+ WRAPPER_NO_CONTRACT;
+ ThrowTypeLoadException(pszNameSpace, pszTypeName, NULL,
+ resIDWhy);
+
+}
+
+void DECLSPEC_NORETURN Assembly::ThrowTypeLoadException(NameHandle *pName, UINT resIDWhy)
+{
+ STATIC_CONTRACT_THROWS;
+
+ if (pName->GetName()) {
+ ThrowTypeLoadException(pName->GetNameSpace(),
+ pName->GetName(),
+ NULL,
+ resIDWhy);
+ }
+ else
+ ThrowTypeLoadException(pName->GetTypeModule()->GetMDImport(),
+ pName->GetTypeToken(),
+ resIDWhy);
+
+}
+
+void DECLSPEC_NORETURN Assembly::ThrowTypeLoadException(IMDInternalImport *pInternalImport,
+ mdToken token,
+ UINT resIDWhy)
+{
+ WRAPPER_NO_CONTRACT;
+ ThrowTypeLoadException(pInternalImport, token, NULL, resIDWhy);
+}
+
+void DECLSPEC_NORETURN Assembly::ThrowTypeLoadException(IMDInternalImport *pInternalImport,
+ mdToken token,
+ LPCUTF8 pszFieldOrMethodName,
+ UINT resIDWhy)
+{
+ STATIC_CONTRACT_THROWS;
+ char pszBuff[32];
+ LPCUTF8 pszClassName = (LPCUTF8)pszBuff;
+ LPCUTF8 pszNameSpace = "Invalid_Token";
+
+ if(pInternalImport->IsValidToken(token))
+ {
+ switch (TypeFromToken(token)) {
+ case mdtTypeRef:
+ if (FAILED(pInternalImport->GetNameOfTypeRef(token, &pszNameSpace, &pszClassName)))
+ {
+ pszNameSpace = pszClassName = "Invalid TypeRef record";
+ }
+ break;
+ case mdtTypeDef:
+ if (FAILED(pInternalImport->GetNameOfTypeDef(token, &pszClassName, &pszNameSpace)))
+ {
+ pszNameSpace = pszClassName = "Invalid TypeDef record";
+ }
+ break;
+ case mdtTypeSpec:
+
+ // If you see this assert, you need to make sure the message for
+ // this resID is appropriate for TypeSpecs
+ _ASSERTE((resIDWhy == IDS_CLASSLOAD_GENERAL) ||
+ (resIDWhy == IDS_CLASSLOAD_BADFORMAT) ||
+ (resIDWhy == IDS_CLASSLOAD_TYPESPEC));
+
+ resIDWhy = IDS_CLASSLOAD_TYPESPEC;
+ }
+ }
+ else
+ sprintf_s(pszBuff, sizeof(pszBuff), "0x%8.8X", token);
+
+ ThrowTypeLoadException(pszNameSpace, pszClassName,
+ pszFieldOrMethodName, resIDWhy);
+}
+
+
+
+void DECLSPEC_NORETURN Assembly::ThrowTypeLoadException(LPCUTF8 pszNameSpace,
+ LPCUTF8 pszTypeName,
+ LPCUTF8 pszMethodName,
+ UINT resIDWhy)
+{
+ STATIC_CONTRACT_THROWS;
+
+ StackSString displayName;
+ GetDisplayName(displayName);
+
+ ::ThrowTypeLoadException(pszNameSpace, pszTypeName, displayName,
+ pszMethodName, resIDWhy);
+}
+
+void DECLSPEC_NORETURN Assembly::ThrowBadImageException(LPCUTF8 pszNameSpace,
+ LPCUTF8 pszTypeName,
+ UINT resIDWhy)
+{
+ STATIC_CONTRACT_THROWS;
+
+ StackSString displayName;
+ GetDisplayName(displayName);
+
+ StackSString fullName;
+ SString sNameSpace(SString::Utf8, pszNameSpace);
+ SString sTypeName(SString::Utf8, pszTypeName);
+ fullName.MakeFullNamespacePath(sNameSpace, sTypeName);
+
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, resIDWhy, fullName, displayName);
+}
+
+
+#ifdef FEATURE_COMINTEROP
+//
+// Manage an ITypeLib pointer for this Assembly.
+//
+ITypeLib* Assembly::GetTypeLib()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ // Get the value we are going to return.
+ ITypeLib *pResult = m_pITypeLib;
+ // If there is a value, AddRef() it.
+ if (pResult && pResult != (ITypeLib*)-1)
+ pResult->AddRef();
+ return pResult;
+} // ITypeLib* Assembly::GetTypeLib()
+
+void Assembly::SetTypeLib(ITypeLib *pNew)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ ITypeLib *pOld;
+ pOld = InterlockedExchangeT(&m_pITypeLib, pNew);
+ // TypeLibs are refcounted pointers.
+ if (pNew != pOld)
+ {
+ if (pNew && pNew != (ITypeLib*)-1)
+ pNew->AddRef();
+ if (pOld && pOld != (ITypeLib*)-1)
+ pOld->Release();
+ }
+} // void Assembly::SetTypeLib()
+
+Assembly::WinMDStatus Assembly::GetWinMDStatus()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_winMDStatus == WinMDStatus_Unknown)
+ {
+ IWinMDImport *pWinMDImport = GetManifestWinMDImport();
+ if (pWinMDImport != NULL)
+ {
+ BOOL bIsWinMDExp;
+ VERIFY(SUCCEEDED(pWinMDImport->IsScenarioWinMDExp(&bIsWinMDExp)));
+
+ if (bIsWinMDExp)
+ {
+ // this is a managed backed WinMD
+ m_winMDStatus = WinMDStatus_IsManagedWinMD;
+ }
+ else
+ {
+ // this is a pure WinMD
+ m_winMDStatus = WinMDStatus_IsPureWinMD;
+ }
+ }
+ else
+ {
+ // this is not a WinMD at all
+ m_winMDStatus = WinMDStatus_IsNotWinMD;
+ }
+ }
+
+ return m_winMDStatus;
+}
+
+bool Assembly::IsWinMD()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetWinMDStatus() != WinMDStatus_IsNotWinMD;
+}
+
+bool Assembly::IsManagedWinMD()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetWinMDStatus() == WinMDStatus_IsManagedWinMD;
+}
+
+IWinMDImport *Assembly::GetManifestWinMDImport()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_pManifestWinMDImport == NULL)
+ {
+ ReleaseHolder<IWinMDImport> pWinMDImport;
+ if (SUCCEEDED(m_pManifest->GetMDImport()->QueryInterface(IID_IWinMDImport, (void **)&pWinMDImport)))
+ {
+ if (InterlockedCompareExchangeT<IWinMDImport *>(&m_pManifestWinMDImport, pWinMDImport, NULL) == NULL)
+ {
+ pWinMDImport.SuppressRelease();
+ }
+ }
+ }
+
+ return m_pManifestWinMDImport;
+}
+
+#endif // FEATURE_COMINTEROP
+
+#if !defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE)
+void Assembly::GenerateBreadcrumbForServicing()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (AppX::IsAppXProcess() || IsIntrospectionOnly() || GetManifestFile()->IsDynamic())
+ {
+ return;
+ }
+
+ if (HasServiceableAttribute() || IsExistingOobAssembly())
+ {
+ StackSString ssDisplayName;
+ GetDisplayName(ssDisplayName);
+
+ WriteBreadcrumb(ssDisplayName);
+ CheckDenyList(ssDisplayName);
+ }
+}
+
+void Assembly::WriteBreadcrumb(const SString &ssDisplayName)
+{
+ STANDARD_VM_CONTRACT;
+
+ WCHAR path[MAX_PATH];
+ HRESULT hr = WszSHGetFolderPath(NULL, CSIDL_COMMON_APPDATA, NULL, SHGFP_TYPE_CURRENT, ARRAYSIZE(path), path);
+ if (hr != S_OK)
+ {
+ return;
+ }
+
+ if (wcscat_s(path, W("\\Microsoft\\NetFramework\\BreadcrumbStore\\")) != 0)
+ {
+ return;
+ }
+
+ size_t dirPathLen = wcslen(path);
+
+ // Validate the display name. E.g., we don't want the display name to start with "..\\".
+ bool inSimpleName = true;
+ for (SString::CIterator it = ssDisplayName.Begin(); it != ssDisplayName.End(); ++it)
+ {
+ WCHAR c = *it;
+
+ // The following characters are always allowed: a-zA-Z0-9_
+ if (c >= W('a') && c <= W('z') || c >= W('A') && c <= W('Z') || c >= W('0') && c <= W('9') || c == W('_')) continue;
+
+ // The period is allowed except as the first char.
+ if (c == W('.') && it != ssDisplayName.Begin()) continue;
+
+ // A comma terminates the assembly simple name, and we are in key=value portion of the display name.
+ if (c == W(','))
+ {
+ inSimpleName = false;
+ continue;
+ }
+
+ // In key=value portion, space and equal sign are also allowed.
+ if (!inSimpleName && (c == W(' ') || c == W('='))) continue;
+
+ // If we reach here, we have an invalid assembly display name. Return without writing breadcrumb.
+ return;
+ }
+
+ // Log a breadcrumb using full display name.
+ if (wcscat_s(path, ssDisplayName.GetUnicode()) == 0)
+ {
+ HandleHolder hFile = WszCreateFile(path, 0, 0, NULL, CREATE_NEW, FILE_ATTRIBUTE_NORMAL, NULL);
+ }
+
+ // Log another breadcrumb using display name without version.
+ // First make a copy of the display name, and look for its version part.
+ StackSString ssNoVersion(ssDisplayName);
+ SString::Iterator itVersion = ssNoVersion.Begin();
+ if (!ssNoVersion.Find(itVersion, W(", Version=")))
+ {
+ return;
+ }
+
+ // Start from the comma before Version=, advance past the comma, then look for the next comma.
+ SString::Iterator itVersionEnd = itVersion;
+ ++itVersionEnd;
+ if (!ssNoVersion.Find(itVersionEnd, W(',')))
+ {
+ // Version is the last key=value pair.
+ itVersionEnd = ssNoVersion.End();
+ }
+
+ // Erase the version.
+ ssNoVersion.Delete(itVersion, itVersionEnd - itVersion);
+
+ // Generate the full path string and create the file.
+ path[dirPathLen] = W('\0');
+ if (wcscat_s(path, ssNoVersion.GetUnicode()) == 0)
+ {
+ HandleHolder hFile = WszCreateFile(path, 0, 0, NULL, CREATE_NEW, FILE_ATTRIBUTE_NORMAL, NULL);
+ }
+
+}
+
+bool Assembly::HasServiceableAttribute()
+{
+ STANDARD_VM_CONTRACT;
+
+ IMDInternalImport *pImport = GetManifestImport();
+ MDEnumHolder hEnum(pImport);
+ HRESULT hr = pImport->EnumCustomAttributeByNameInit(GetManifestToken(), ASSEMBLY_METADATA_TYPE, &hEnum);
+ if (hr != S_OK)
+ {
+ return false;
+ }
+
+ mdCustomAttribute tkAttribute;
+ while (pImport->EnumNext(&hEnum, &tkAttribute))
+ {
+ // Get raw custom attribute.
+ const BYTE *pbAttr = NULL; // Custom attribute data as a BYTE*.
+ ULONG cbAttr = 0; // Size of custom attribute data.
+ if (FAILED(pImport->GetCustomAttributeAsBlob(tkAttribute, reinterpret_cast<const void **>(&pbAttr), &cbAttr)))
+ {
+ THROW_BAD_FORMAT(BFA_INVALID_TOKEN, GetManifestModule());
+ }
+
+ CustomAttributeParser cap(pbAttr, cbAttr);
+ if (FAILED(cap.ValidateProlog()))
+ {
+ THROW_BAD_FORMAT(BFA_BAD_CA_HEADER, GetManifestModule());
+ }
+
+ // Get the metadata key. It is not null terminated.
+ LPCUTF8 key;
+ ULONG cbKey;
+ if (FAILED(cap.GetString(&key, &cbKey)))
+ {
+ THROW_BAD_FORMAT(BFA_BAD_CA_HEADER, GetManifestModule());
+ }
+
+ const LPCUTF8 szServiceable = "Serviceable";
+ const ULONG cbServiceable = 11;
+ if (cbKey != cbServiceable || strncmp(key, szServiceable, cbKey) != 0)
+ {
+ continue;
+ }
+
+ // Get the metadata value. It is not null terminated.
+ if (FAILED(cap.GetString(&key, &cbKey)))
+ {
+ THROW_BAD_FORMAT(BFA_BAD_CA_HEADER, GetManifestModule());
+ }
+
+ const LPCUTF8 szTrue = "True";
+ const ULONG cbTrue = 4;
+ if (cbKey == cbTrue && strncmp(key, szTrue, cbKey) == 0)
+ {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool Assembly::IsExistingOobAssembly()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return ExistingOobAssemblyList::Instance()->IsOnlist(this);
+}
+
+void Assembly::CheckDenyList(const SString &ssDisplayName)
+{
+ STANDARD_VM_CONTRACT;
+
+ StackSString ssKeyName(W("SOFTWARE\\Microsoft\\.NETFramework\\Policy\\DenyList\\"));
+
+ ssKeyName.Append(ssDisplayName);
+
+ RegKeyHolder hKey;
+ LONG status = RegOpenKeyEx(HKEY_LOCAL_MACHINE, ssKeyName.GetUnicode(), 0, KEY_WOW64_64KEY | GENERIC_READ, &hKey);
+
+ if (status != ERROR_SUCCESS)
+ {
+ return;
+ }
+
+ StackSString ssFwlink;
+ HRESULT hr = Clr::Util::Reg::ReadStringValue(hKey, NULL, NULL, ssFwlink);
+ if (FAILED(hr) || ssFwlink.GetCount() == 0)
+ {
+ ssFwlink.Set(W("http://go.microsoft.com/fwlink/?LinkID=286319"));
+ }
+
+ StackSString ssMessageTemplate;
+ if(!ssMessageTemplate.LoadResource(CCompRC::Optional, IDS_EE_ASSEMBLY_ON_DENY_LIST))
+ {
+ ssMessageTemplate.Set(W("The assembly %1 that the application tried to load has a known vulnerability. Please go to %2 to find a fix for this issue."));
+ }
+
+ StackSString ssMessage;
+ ssMessage.FormatMessage(FORMAT_MESSAGE_FROM_STRING, ssMessageTemplate.GetUnicode(), 0, 0, ssDisplayName, ssFwlink);
+
+ ClrReportEvent(
+ W(".NET Runtime"), // Event source
+ EVENTLOG_ERROR_TYPE, // Type
+ 0, // Category
+ SecurityConfig, // Event ID
+ NULL, // User SID
+ ssMessage.GetUnicode()); // Message
+
+ NewHolder<EEMessageException> pEx(new EEMessageException(kSecurityException, IDS_EE_ASSEMBLY_ON_DENY_LIST, ssDisplayName.GetUnicode(), ssFwlink.GetUnicode()));
+ EEFileLoadException::Throw(m_pManifestFile, pEx->GetHR(), pEx);
+}
+
+BOOL IsReportableAssembly(PEAssembly *pPEAssembly)
+{
+ STANDARD_VM_CONTRACT;
+
+ // If the assembly could have used a native image, but did not, report the IL image
+ BOOL fCanUseNativeImage = (pPEAssembly->HasHostAssembly() || pPEAssembly->IsContextLoad()) &&
+ pPEAssembly->CanUseNativeImage() &&
+ !IsNativeImageOptedOut(pPEAssembly->GetFusionAssemblyName());
+
+ return fCanUseNativeImage;
+}
+
+BOOL Assembly::SupportsAutoNGenWorker()
+{
+ STANDARD_VM_CONTRACT;
+
+ PEAssembly *pPEAssembly = GetManifestFile();
+
+ if (pPEAssembly->IsSourceGAC() && Fusion::Util::IsUnifiedAssembly(pPEAssembly->GetFusionAssemblyName()) == S_OK)
+ {
+ // Assemblies in the .NET Framework supports Auto NGen.
+ return TRUE;
+ }
+
+ if (IsAfContentType_WindowsRuntime(GetFlags()))
+ {
+ // WinMD files support Auto NGen.
+ return TRUE;
+ }
+
+ if (pPEAssembly->HasHostAssembly())
+ {
+ // Auto NGen is enabled on all Metro app assemblies.
+ return TRUE;
+ }
+
+ if (pPEAssembly->IsSourceGAC())
+ {
+ // For non-framework assemblies in GAC, look for TargetFrameworkAttriute.
+ const BYTE *pbAttr; // Custom attribute data as a BYTE*.
+ ULONG cbAttr; // Size of custom attribute data.
+ HRESULT hr = GetManifestImport()->GetCustomAttributeByName(GetManifestToken(), TARGET_FRAMEWORK_TYPE, (const void**)&pbAttr, &cbAttr);
+ if (hr != S_OK)
+ {
+ return FALSE;
+ }
+
+ CustomAttributeParser cap(pbAttr, cbAttr);
+ if (FAILED(cap.ValidateProlog()))
+ {
+ THROW_BAD_FORMAT(BFA_BAD_CA_HEADER, GetManifestModule());
+ }
+ LPCUTF8 lpTargetFramework;
+ ULONG cbTargetFramework;
+ if (FAILED(cap.GetString(&lpTargetFramework, &cbTargetFramework)))
+ {
+ THROW_BAD_FORMAT(BFA_BAD_CA_HEADER, GetManifestModule());
+ }
+
+ if (lpTargetFramework == NULL || cbTargetFramework == 0)
+ {
+ return FALSE;
+ }
+
+ SString ssTargetFramework(SString::Utf8, lpTargetFramework, cbTargetFramework);
+
+ // Look for two special TargetFramework values that disables AutoNGen. To guard against future
+ // variations of the string values, we do prefix matches.
+ SString ssFramework40(SString::Literal, W(".NETFramework,Version=v4.0"));
+ SString ssPortableLib(SString::Literal, W(".NETPortable,"));
+ if (ssTargetFramework.BeginsWithCaseInsensitive(ssFramework40) || ssTargetFramework.BeginsWithCaseInsensitive(ssPortableLib))
+ {
+ return FALSE;
+ }
+
+ // If TargetFramework doesn't match one of the two special values, we enable Auto NGen.
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+void Assembly::ReportAssemblyUse()
+{
+ STANDARD_VM_CONTRACT;
+
+ // Do not log if we don't have a global gac logger object
+ if (g_pIAssemblyUsageLogGac != NULL)
+ {
+ // Only consider reporting for loads that could possibly use native images.
+ PEAssembly *pPEAssembly = this->GetManifestFile();
+ if (IsReportableAssembly(pPEAssembly) && !pPEAssembly->IsReportedToUsageLog())
+ {
+ // Do not log repeatedly
+ pPEAssembly->SetReportedToUsageLog();
+
+ ReleaseHolder<IAssemblyUsageLog> pRefCountedUsageLog;
+ IAssemblyUsageLog *pUsageLog = NULL;
+ if (SupportsAutoNGen())
+ {
+ if (pPEAssembly->IsSourceGAC())
+ {
+ pUsageLog = g_pIAssemblyUsageLogGac;
+ }
+ else if (pPEAssembly->HasHostAssembly())
+ {
+ UINT_PTR binderId;
+ IfFailThrow(pPEAssembly->GetHostAssembly()->GetBinderID(&binderId));
+ pRefCountedUsageLog = AssemblyUsageLogManager::GetUsageLogForBinder(binderId);
+ pUsageLog = pRefCountedUsageLog;
+ }
+ }
+
+ if (pUsageLog)
+ {
+ PEAssembly *pPEAssembly = GetManifestFile();
+ StackSString name;
+ // GAC Assemblies are reported by assembly name
+ if (pUsageLog == g_pIAssemblyUsageLogGac)
+ {
+ this->GetDisplayName(name);
+ }
+ // Other assemblies (AppX...) are reported by file path
+ else
+ {
+ name.Set(pPEAssembly->GetILimage()->GetPath().GetUnicode());
+ }
+
+ if (pPEAssembly->HasNativeImage())
+ {
+ if(!IsSystem())
+ {
+ // If the assembly used a native image, report it
+ ReleaseHolder<PEImage> pNativeImage = pPEAssembly->GetNativeImageWithRef();
+ pUsageLog->LogFile(name.GetUnicode(), pNativeImage->GetPath().GetUnicode(), ASSEMBLY_USAGE_LOG_FLAGS_NI);
+ }
+ }
+ else
+ {
+ // If the assembly could have used a native image, but did not, report the IL image
+ pUsageLog->LogFile(name.GetUnicode(), NULL, ASSEMBLY_USAGE_LOG_FLAGS_IL);
+ }
+ }
+ }
+ }
+}
+#endif // FEATURE_CORECLR && !CROSSGEN_COMPILE
+
+#endif // #ifndef DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+void Assembly::EnsureActive()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ GetDomainAssembly()->EnsureActive();
+}
+#endif //!DACCESS_COMPILE
+
+CHECK Assembly::CheckActivated()
+{
+#ifndef DACCESS_COMPILE
+ WRAPPER_NO_CONTRACT;
+
+ CHECK(GetDomainAssembly()->CheckActivated());
+#endif
+ CHECK_OK;
+}
+
+
+
+#ifdef DACCESS_COMPILE
+
+void
+Assembly::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ // We don't need Assembly info in triage dumps.
+ if (flags == CLRDATA_ENUM_MEM_TRIAGE)
+ {
+ return;
+ }
+
+ DAC_ENUM_DTHIS();
+ EMEM_OUT(("MEM: %p Assembly\n", dac_cast<TADDR>(this)));
+
+ if (m_pDomain.IsValid())
+ {
+ m_pDomain->EnumMemoryRegions(flags, true);
+ }
+ if (m_pClassLoader.IsValid())
+ {
+ m_pClassLoader->EnumMemoryRegions(flags);
+ }
+ if (m_pManifest.IsValid())
+ {
+ m_pManifest->EnumMemoryRegions(flags, true);
+ }
+ if (m_pManifestFile.IsValid())
+ {
+ m_pManifestFile->EnumMemoryRegions(flags);
+ }
+}
+
+#endif
+
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_FRAMEWORK_INTERNAL
+
+// static
+const LPCSTR FriendAssemblyDescriptor::AllInternalsVisibleProperty = "AllInternalsVisible";
+
+#endif // FEATURE_FRAMEWORK_INTERNAL
+
+FriendAssemblyDescriptor::FriendAssemblyDescriptor()
+#ifdef FEATURE_FRAMEWORK_INTERNAL
+ : m_crstFriendMembersCache(CrstFriendAccessCache)
+#endif // FEATURE_FRAMEWORK_INTERNAL
+{
+#ifdef FEATURE_FRAMEWORK_INTERNAL
+ LockOwner lockOwner = { &m_crstFriendMembersCache, IsOwnerOfCrst };
+ m_htFriendMembers.Init(FriendMemberHashSize, &lockOwner);
+#endif // FEATURE_FRAMEWORK_INTERNAL
+}
+
+FriendAssemblyDescriptor::~FriendAssemblyDescriptor()
+{
+ CONTRACTL
+ {
+ DESTRUCTOR_CHECK;
+ }
+ CONTRACTL_END;
+
+ ArrayList::Iterator itFullAccessAssemblies = m_alFullAccessFriendAssemblies.Iterate();
+ while (itFullAccessAssemblies.Next())
+ {
+ FriendAssemblyName_t *pFriendAssemblyName = static_cast<FriendAssemblyName_t *>(itFullAccessAssemblies.GetElement());
+#ifdef FEATURE_FUSION
+ pFriendAssemblyName->Release();
+#else // FEATURE_FUSION
+ delete pFriendAssemblyName;
+#endif // FEATURE_FUSION
+ }
+
+#if FEATURE_FRAMEWORK_INTERNAL
+ ArrayList::Iterator itPartialAccessAssemblies = m_alPartialAccessFriendAssemblies.Iterate();
+ while (itPartialAccessAssemblies.Next())
+ {
+ FriendAssemblyName_t *pFriendAssemblyName = static_cast<FriendAssemblyName_t *>(itPartialAccessAssemblies.GetElement());
+#ifdef FEATURE_FUSION
+ pFriendAssemblyName->Release();
+#else // FEATURE_FUSION
+ delete pFriendAssemblyName;
+#endif // FEATURE_FUSION
+ }
+#endif // FEATURE_FRAMEWORK_INTERNAL
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Builds a FriendAssemblyDescriptor for a given assembly
+//
+// Arguments:
+// pAssembly - assembly to get friend assembly information for
+//
+// Return Value:
+// A friend assembly descriptor if the assembly declares any friend assemblies, otherwise NULL
+//
+
+// static
+FriendAssemblyDescriptor *FriendAssemblyDescriptor::CreateFriendAssemblyDescriptor(PEAssembly *pAssembly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pAssembly));
+ }
+ CONTRACTL_END
+
+ NewHolder<FriendAssemblyDescriptor> pFriendAssemblies = new FriendAssemblyDescriptor;
+
+ // We're going to do this twice, once for InternalsVisibleTo and once for IgnoresAccessChecks
+ ReleaseHolder<IMDInternalImport> pImport(pAssembly->GetMDImportWithRef());
+ for(int count = 0 ; count < 2 ; ++count)
+ {
+ _ASSERTE(pImport != NULL);
+ MDEnumHolder hEnum(pImport);
+ HRESULT hr = S_OK;
+
+ if (count == 0)
+ {
+ hr = pImport->EnumCustomAttributeByNameInit(TokenFromRid(1, mdtAssembly), FRIEND_ASSEMBLY_TYPE, &hEnum);
+ }
+ else
+ {
+ hr = pImport->EnumCustomAttributeByNameInit(TokenFromRid(1, mdtAssembly), SUBJECT_ASSEMBLY_TYPE, &hEnum);
+ }
+
+ IfFailThrow(hr);
+
+ // Nothing to do if there are no attributes
+ if (hr == S_FALSE)
+ {
+ continue;
+ }
+
+ // Enumerate over the declared friends
+ mdCustomAttribute tkAttribute;
+ while (pImport->EnumNext(&hEnum, &tkAttribute))
+ {
+ // Get raw custom attribute.
+ const BYTE *pbAttr = NULL; // Custom attribute data as a BYTE*.
+ ULONG cbAttr = 0; // Size of custom attribute data.
+ if (FAILED(pImport->GetCustomAttributeAsBlob(tkAttribute, reinterpret_cast<const void **>(&pbAttr), &cbAttr)))
+ {
+ THROW_BAD_FORMAT(BFA_INVALID_TOKEN, pAssembly);
+ }
+
+ CustomAttributeParser cap(pbAttr, cbAttr);
+ if (FAILED(cap.ValidateProlog()))
+ {
+ THROW_BAD_FORMAT(BFA_BAD_CA_HEADER, pAssembly);
+ }
+
+ // Get the name of the friend assembly.
+ LPCUTF8 szString;
+ ULONG cbString;
+ if (FAILED(cap.GetNonNullString(&szString, &cbString)))
+ {
+ THROW_BAD_FORMAT(BFA_BAD_CA_HEADER, pAssembly);
+ }
+
+ // Convert the string to Unicode.
+ StackSString displayName(SString::Utf8, szString, cbString);
+
+ // Create an AssemblyNameObject from the string.
+ FriendAssemblyNameHolder pFriendAssemblyName;
+#ifdef FEATURE_FUSION
+ hr = CreateAssemblyNameObject(&pFriendAssemblyName, displayName.GetUnicode(), CANOF_PARSE_FRIEND_DISPLAY_NAME, NULL);
+#else // FEATURE_FUSION
+ StackScratchBuffer buffer;
+ pFriendAssemblyName = new FriendAssemblyName_t;
+ hr = pFriendAssemblyName->Init(displayName.GetUTF8(buffer));
+
+ if (SUCCEEDED(hr))
+ {
+ hr = pFriendAssemblyName->CheckFriendAssemblyName();
+ }
+#endif // FEATURE_FUSION
+
+ if (FAILED(hr))
+ {
+ THROW_HR_ERROR_WITH_INFO(hr, pAssembly);
+ }
+
+ if (count == 1)
+ {
+ pFriendAssemblies->AddSubjectAssembly(pFriendAssemblyName);
+ pFriendAssemblyName.SuppressRelease();
+ // Below checks are unnecessary for IgnoresAccessChecks
+ continue;
+ }
+
+ // CoreCLR does not have a valid scenario for strong-named assemblies requiring their dependencies
+ // to be strong-named as well.
+#if !defined(FEATURE_CORECLR)
+ // If this assembly has a strong name, then its friends declarations need to have strong names too
+ if (pAssembly->IsStrongNamed())
+ {
+#ifdef FEATURE_FUSION
+ DWORD dwSize = 0;
+ if (SUCCEEDED(hr = pFriendAssemblyName->GetProperty(ASM_NAME_PUBLIC_KEY, NULL, &dwSize)))
+ {
+ // If this call succeeds with an empty buffer, then the supplied name doesn't have a public key.
+ THROW_HR_ERROR_WITH_INFO(META_E_CA_FRIENDS_SN_REQUIRED, pAssembly);
+ }
+ else if (hr != HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER))
+ {
+ IfFailThrow(hr);
+ }
+#else // FEATURE_FUSION
+ // Desktop crossgen comes here
+ if (!pFriendAssemblyName->IsStrongNamed())
+ {
+ // If this call succeeds with an empty buffer, then the supplied name doesn't have a public key.
+ THROW_HR_ERROR_WITH_INFO(META_E_CA_FRIENDS_SN_REQUIRED, pAssembly);
+ }
+#endif // FEATURE_FUSION
+ }
+#endif // !defined(FEATURE_CORECLR)
+
+#ifdef FEATURE_FRAMEWORK_INTERNAL
+ bool fAllInternalsVisible = true;
+
+ // Framework internal is only available for framework assemblies
+ if (pAssembly->IsProfileAssembly())
+ {
+ //
+ // Find out if the friend assembly is allowed access to all internals, or only selected internals.
+ // We default to true for compatibility with behavior of previous runtimes.
+ //
+
+ CaNamedArg allInternalsArg;
+ allInternalsArg.InitBoolField(const_cast<LPCSTR>(AllInternalsVisibleProperty), true);
+ hr = ParseKnownCaNamedArgs(cap, &allInternalsArg, 1);
+ if (FAILED(hr) && hr != META_E_CA_UNKNOWN_ARGUMENT)
+ {
+ IfFailThrow(hr);
+ }
+
+ fAllInternalsVisible = !!allInternalsArg.val.u1;
+ }
+
+ pFriendAssemblies->AddFriendAssembly(pFriendAssemblyName, fAllInternalsVisible);
+
+#else // FEATURE_FRAMEWORK_INTERNAL
+ pFriendAssemblies->AddFriendAssembly(pFriendAssemblyName);
+#endif // FEATURE_FRAMEWORK_INTERNAL
+
+ pFriendAssemblyName.SuppressRelease();
+ }
+ }
+
+ pFriendAssemblies.SuppressRelease();
+ return pFriendAssemblies.Extract();
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Adds an assembly to the list of friend assemblies for this descriptor
+//
+// Arguments:
+// pFriendAssembly - friend assembly to add to the list
+// fAllInternalsVisible - true if all internals are visible to the friend, false if only specifically
+// marked internals are visible
+//
+// Notes:
+// This method takes ownership of the friend assembly name. It is not thread safe and does not check to
+// see if an assembly has already been added to the friend assembly list.
+//
+
+#ifdef FEATURE_FRAMEWORK_INTERNAL
+void FriendAssemblyDescriptor::AddFriendAssembly(FriendAssemblyName_t *pFriendAssembly, bool fAllInternalsVisible)
+#else // FEATURE_FRAMEWORK_INTERNAL
+void FriendAssemblyDescriptor::AddFriendAssembly(FriendAssemblyName_t *pFriendAssembly)
+#endif // FEATURE_FRAMEWORK_INTERNAL
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pFriendAssembly));
+ }
+ CONTRACTL_END
+
+#ifdef FEATURE_FRAMEWORK_INTERNAL
+ if (fAllInternalsVisible)
+#endif // FEATURE_FRAMEWORK_INTERNAL
+ {
+ m_alFullAccessFriendAssemblies.Append(pFriendAssembly);
+ }
+#ifdef FEATURE_FRAMEWORK_INTERNAL
+ else
+ {
+ m_alPartialAccessFriendAssemblies.Append(pFriendAssembly);
+ }
+#endif // FEATURE_FRAMEWORK_INTERNAL
+}
+
+void FriendAssemblyDescriptor::AddSubjectAssembly(FriendAssemblyName_t *pFriendAssembly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pFriendAssembly));
+ }
+ CONTRACTL_END
+
+ m_subjectAssemblies.Append(pFriendAssembly);
+}
+
+#ifdef FEATURE_FRAMEWORK_INTERNAL
+
+//
+// Helpers to see if a member is internal, and therefore could be considered for friend access.
+//
+
+// static
+bool FriendAssemblyDescriptor::FriendAccessAppliesTo(FieldDesc *pFD)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DWORD dwFieldProtection = pFD->GetFieldProtection();
+ return IsFdAssembly(dwFieldProtection) ||
+ IsFdFamANDAssem(dwFieldProtection) ||
+ IsFdFamORAssem(dwFieldProtection);
+}
+
+// static
+bool FriendAssemblyDescriptor::FriendAccessAppliesTo(MethodDesc *pMD)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DWORD dwMethodProtection = pMD->GetAttrs();
+ return IsMdAssem(dwMethodProtection) ||
+ IsMdFamANDAssem(dwMethodProtection) ||
+ IsMdFamORAssem(dwMethodProtection);
+}
+
+// static
+bool FriendAssemblyDescriptor::FriendAccessAppliesTo(MethodTable *pMT)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DWORD dwTypeProtection = pMT->GetClass()->GetProtection();
+ return IsTdNotPublic(dwTypeProtection) ||
+ IsTdNestedAssembly(dwTypeProtection) ||
+ IsTdNestedFamANDAssem(dwTypeProtection) ||
+ IsTdNestedFamORAssem(dwTypeProtection);
+}
+
+//
+// Helper methods to get the metadata token for items that may have the FriendAccessAllowed attribute.
+//
+
+// static
+mdToken FriendAssemblyDescriptor::GetMetadataToken(FieldDesc *pFD)
+{
+ WRAPPER_NO_CONTRACT;
+ return pFD->GetMemberDef();
+}
+
+// static
+mdToken FriendAssemblyDescriptor::GetMetadataToken(MethodDesc *pMD)
+{
+ WRAPPER_NO_CONTRACT;
+ return pMD->GetMemberDef();
+}
+
+// static
+mdToken FriendAssemblyDescriptor::GetMetadataToken(MethodTable *pMT)
+{
+ WRAPPER_NO_CONTRACT;
+ return pMT->GetCl();
+}
+
+// static
+bool FriendAssemblyDescriptor::HasFriendAccessAttribute(IMDInternalImport *pMDImport, mdToken tkMember)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pMDImport));
+ }
+ CONTRACTL_END;
+
+ const BYTE *pbAttribute = NULL;
+ ULONG cbAttribute = 0;
+ HRESULT hr = pMDImport->GetCustomAttributeByName(tkMember,
+ FRIEND_ACCESS_ALLOWED_ATTRIBUTE_TYPE,
+ reinterpret_cast<const void **>(&pbAttribute),
+ &cbAttribute);
+ IfFailThrow(hr);
+
+ return hr == S_OK;
+}
+
+#endif // FEATURE_FRAMEWORK_INTERNAL
+
+// static
+bool FriendAssemblyDescriptor::IsAssemblyOnList(PEAssembly *pAssembly, const ArrayList &alAssemblyNames)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pAssembly));
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_FUSION
+ AssemblySpec asmDef;
+ asmDef.InitializeSpec(pAssembly);
+#endif
+
+ ArrayList::ConstIterator itAssemblyNames = alAssemblyNames.Iterate();
+ while (itAssemblyNames.Next())
+ {
+ const FriendAssemblyName_t *pFriendAssemblyName = static_cast<const FriendAssemblyName_t *>(itAssemblyNames.GetElement());
+#ifdef FEATURE_FUSION
+ // This is a const operation on the pointer, but Fusion is not const-correct.
+ // @TODO - propigate const correctness through Fusion and remove this cast
+ HRESULT hr = const_cast<FriendAssemblyName_t *>(pFriendAssemblyName)->IsEqual(pAssembly->GetFusionAssemblyName(), ASM_CMPF_DEFAULT);
+ IfFailThrow(hr);
+#else
+ HRESULT hr = AssemblySpec::RefMatchesDef(pFriendAssemblyName, &asmDef) ? S_OK : S_FALSE;
+#endif
+
+ if (hr == S_OK)
+ {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+#endif // !DACCESS_COMPILE
+
+
+#if !defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE) && !defined(DACCESS_COMPILE)
+
+ExistingOobAssemblyList::ExistingOobAssemblyList()
+{
+ STANDARD_VM_CONTRACT;
+
+ RegKeyHolder hKey;
+ LONG status = RegOpenKeyExW(HKEY_LOCAL_MACHINE, W("SOFTWARE\\Microsoft\\.NETFramework\\Policy\\Servicing"), 0, KEY_WOW64_64KEY | GENERIC_READ, &hKey);
+ if (status != ERROR_SUCCESS)
+ {
+ return;
+ }
+
+ for (DWORD i = 0; ; i++)
+ {
+ WCHAR name[MAX_PATH + 1];
+ DWORD cchName = ARRAYSIZE(name);
+ status = RegEnumKeyExW(hKey, i, name, &cchName, NULL, NULL, NULL, NULL);
+
+ if (status == ERROR_NO_MORE_ITEMS)
+ {
+ break;
+ }
+
+ if (status == ERROR_SUCCESS)
+ {
+ NonVMComHolder<IAssemblyName> pAssemblyName;
+ HRESULT hr = CreateAssemblyNameObject(&pAssemblyName, name, CANOF_PARSE_DISPLAY_NAME, NULL);
+ if (SUCCEEDED(hr))
+ {
+ hr = m_alExistingOobAssemblies.Append(pAssemblyName.GetValue());
+ if (SUCCEEDED(hr))
+ {
+ pAssemblyName.SuppressRelease();
+ }
+ }
+ }
+ }
+}
+
+bool ExistingOobAssemblyList::IsOnlist(Assembly *pAssembly)
+{
+ STANDARD_VM_CONTRACT;
+
+ ArrayList::Iterator itAssemblyNames = m_alExistingOobAssemblies.Iterate();
+ while (itAssemblyNames.Next())
+ {
+ IAssemblyName *pAssemblyName = static_cast<IAssemblyName *>(itAssemblyNames.GetElement());
+ HRESULT hr = pAssemblyName->IsEqual(pAssembly->GetFusionAssemblyName(), ASM_CMPF_DEFAULT);
+ if (hr == S_OK)
+ {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void ExistingOobAssemblyList::Init()
+{
+ STANDARD_VM_CONTRACT;
+
+ s_pInstance = new ExistingOobAssemblyList();
+}
+
+ExistingOobAssemblyList *ExistingOobAssemblyList::s_pInstance;
+#endif // !defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE) && !defined(DACCESS_COMPILE)
diff --git a/src/vm/assembly.hpp b/src/vm/assembly.hpp
new file mode 100644
index 0000000000..6493b0b1d1
--- /dev/null
+++ b/src/vm/assembly.hpp
@@ -0,0 +1,1111 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: Assembly.hpp
+**
+
+**
+** Purpose: Implements assembly (loader domain) architecture
+**
+**
+===========================================================*/
+#ifndef _ASSEMBLY_H
+#define _ASSEMBLY_H
+
+#include "ceeload.h"
+#include "exceptmacros.h"
+#include "clsload.hpp"
+#ifdef FEATURE_FUSION
+#include "fusion.h"
+#include "fusionbind.h"
+#endif
+#include "eehash.h"
+#include "listlock.h"
+#include "iceefilegen.h"
+#include "cordbpriv.h"
+#include "assemblyspec.hpp"
+
+// A helper macro for the assembly's module hash (m_pAllowedFiles).
+#define UTF8_TO_LOWER_CASE(str, qb) \
+{ \
+ WRAPPER_NO_CONTRACT; \
+ INT32 allocBytes = InternalCasingHelper::InvariantToLower(NULL, 0, str); \
+ qb.AllocThrows(allocBytes); \
+ InternalCasingHelper::InvariantToLower((LPUTF8) qb.Ptr(), allocBytes, str); \
+}
+
+
+class BaseDomain;
+class AppDomain;
+class DomainAssembly;
+class DomainModule;
+class SystemDomain;
+class ClassLoader;
+class ComDynamicWrite;
+class AssemblySink;
+class AssemblyNative;
+class AssemblySpec;
+class ISharedSecurityDescriptor;
+class SecurityTransparencyBehavior;
+class Pending;
+class AllocMemTracker;
+class FriendAssemblyDescriptor;
+
+// Bits in m_dwDynamicAssemblyAccess (see System.Reflection.Emit.AssemblyBuilderAccess.cs)
+#define ASSEMBLY_ACCESS_RUN 0x01
+#define ASSEMBLY_ACCESS_SAVE 0x02
+#define ASSEMBLY_ACCESS_REFLECTION_ONLY 0x04
+#define ASSEMBLY_ACCESS_COLLECT 0x8
+
+// This must match System.Reflection.Emit.DynamicAssemblyFlags in AssemblyBuilder.cs
+enum DynamicAssemblyFlags
+{
+ kAllCriticalAssembly = 0x00000001,
+ kAptcaAssembly = 0x00000002,
+ kCriticalAssembly = 0x00000004,
+ kTransparentAssembly = 0x00000008,
+ kTreatAsSafeAssembly = 0x00000010
+};
+
+struct CreateDynamicAssemblyArgsGC
+{
+ APPDOMAINREF refThis;
+ OBJECTREF refusedPset;
+ OBJECTREF optionalPset;
+ OBJECTREF requiredPset;
+ OBJECTREF identity;
+ ASSEMBLYNAMEREF assemblyName;
+ U1ARRAYREF securityRulesBlob;
+ U1ARRAYREF aptcaBlob;
+ LOADERALLOCATORREF loaderAllocator;
+};
+
+// This enumeration must be kept in sync with the managed enum System.Security.SecurityContextSource
+typedef enum
+{
+ kCurrentAppDomain = 0,
+ kCurrentAssembly
+}
+SecurityContextSource;
+
+struct CreateDynamicAssemblyArgs : CreateDynamicAssemblyArgsGC
+{
+ INT32 access;
+ DynamicAssemblyFlags flags;
+ StackCrawlMark* stackMark;
+ SecurityContextSource securityContextSource;
+};
+
+// An assembly is the unit of deployment for managed code. Typically Assemblies are one to one with files
+// (Modules), however this is not necessary, as an assembly can contain serveral files (typically you only
+// do this so that you can resource-only modules that are national language specific)
+//
+// Conceptually Assemblies are loaded into code:AppDomain
+//
+// So in general an assemly is a list of code:Module, where a code:Module is 1-1 with a DLL or EXE file.
+//
+// One of the modules the code:Assembly.m_pManifest is special in that it knows about all the other
+// modules in an assembly (often it is the only one).
+//
+class Assembly
+{
+ friend class BaseDomain;
+ friend class SystemDomain;
+ friend class ClassLoader;
+ friend class AssemblyNative;
+ friend class AssemblySpec;
+ friend class NDirect;
+ friend class AssemblyNameNative;
+ friend class ClrDataAccess;
+
+public:
+ Assembly(BaseDomain *pDomain, PEAssembly *pFile, DebuggerAssemblyControlFlags debuggerFlags, BOOL fIsCollectible);
+ void Init(AllocMemTracker *pamTracker, LoaderAllocator *pLoaderAllocator);
+
+ void StartUnload();
+ void Terminate( BOOL signalProfiler = TRUE );
+
+ static Assembly *Create(BaseDomain *pDomain, PEAssembly *pFile, DebuggerAssemblyControlFlags debuggerFlags, BOOL fIsCollectible, AllocMemTracker *pamTracker, LoaderAllocator *pLoaderAllocator);
+
+ BOOL IsSystem() { WRAPPER_NO_CONTRACT; return m_pManifestFile->IsSystem(); }
+
+ static Assembly *CreateDynamic(AppDomain *pDomain, CreateDynamicAssemblyArgs *args);
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ ReflectionModule *CreateDynamicModule(LPCWSTR szModuleName, LPCWSTR szFileName, BOOL fIsTransient, INT32* ptkFile = NULL);
+#endif
+
+ MethodDesc *GetEntryPoint();
+
+ //****************************************************************************************
+ //
+ // Additional init tasks for Modules. This should probably be part of Module::Initialize()
+ // but there's at least one call to ReflectionModule::Create that is *not* followed by a
+ // PrepareModule call.
+ void PrepareModuleForAssembly(Module* module, AllocMemTracker *pamTracker);
+
+ // This is the final step of publishing a Module into an Assembly. This step cannot fail.
+ void PublishModuleIntoAssembly(Module *module);
+
+#ifndef DACCESS_COMPILE
+ void SetIsTenured()
+ {
+ WRAPPER_NO_CONTRACT;
+ m_pManifest->SetIsTenured();
+ }
+
+ // CAUTION: This should only be used as backout code if an assembly is unsuccessfully
+ // added to the shared domain assembly map.
+ void UnsetIsTenured()
+ {
+ WRAPPER_NO_CONTRACT;
+ m_pManifest->UnsetIsTenured();
+ }
+#endif // DACCESS_COMPILE
+
+ //****************************************************************************************
+ //
+ // Returns the class loader associated with the assembly.
+ ClassLoader* GetLoader()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_pClassLoader;
+ }
+
+ // ------------------------------------------------------------
+ // Modules
+ // ------------------------------------------------------------
+
+ class ModuleIterator
+ {
+ Module* m_pManifest;
+ DWORD m_i;
+
+ public:
+ // The preferred constructor. If you use this, you don't have to
+ // call Start() yourself
+ ModuleIterator(Assembly *pAssembly)
+ {
+ WRAPPER_NO_CONTRACT;
+ Start(pAssembly);
+ }
+
+ // When you don't have the Assembly at contruction time, use this
+ // constructor, and explicitly call Start() to begin the iteration.
+ ModuleIterator()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ m_pManifest = NULL;
+ m_i = (DWORD) -1;
+ }
+
+ void Start(Assembly * pAssembly)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ m_pManifest = pAssembly->GetManifestModule();
+ m_i = (DWORD) -1;
+ }
+
+ BOOL Next()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ while (++m_i <= m_pManifest->GetFileMax())
+ {
+ if (GetModule() != NULL)
+ return TRUE;
+ }
+ return FALSE;
+ }
+
+ Module *GetModule()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return m_pManifest->LookupFile(TokenFromRid(m_i, mdtFile));
+ }
+ };
+
+ ModuleIterator IterateModules()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return ModuleIterator(this);
+ }
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ //****************************************************************************************
+ //
+ // Find the module
+ Module* FindModule(PEFile *pFile, BOOL includeLoading = FALSE);
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+
+#ifdef FEATURE_MIXEDMODE
+ // Finds loading modules as well
+ DomainFile* FindIJWDomainFile(HMODULE hMod, const SString &path);
+#endif
+ //****************************************************************************************
+ //
+ // Get the domain the assembly lives in.
+ PTR_BaseDomain Parent()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pDomain;
+ }
+
+ // Sets the assemblies domain.
+ void SetParent(BaseDomain* pParent);
+
+ //-----------------------------------------------------------------------------------------
+ // If true, this assembly is loaded only for introspection. We can load modules, types, etc,
+ // but no code execution or object instantiation is permitted.
+ //-----------------------------------------------------------------------------------------
+ BOOL IsIntrospectionOnly();
+
+ //-----------------------------------------------------------------------------------------
+ // EnsureActive ensures that the assembly is properly prepped in the current app domain
+ // for active uses like code execution, static field access, and instance allocation
+ //-----------------------------------------------------------------------------------------
+#ifndef DACCESS_COMPILE
+ VOID EnsureActive();
+#endif
+
+ //-----------------------------------------------------------------------------------------
+ // CheckActivated is a check predicate which should be used in active use paths like code
+ // execution, static field access, and instance allocation
+ //-----------------------------------------------------------------------------------------
+ CHECK CheckActivated();
+
+ // Returns the parent domain if it is not the system area. Returns NULL if it is the
+ // system domain
+ PTR_BaseDomain GetDomain();
+ PTR_LoaderAllocator GetLoaderAllocator() { LIMITED_METHOD_DAC_CONTRACT; return m_pLoaderAllocator; }
+
+ BOOL GetModuleZapFile(LPCWSTR name, SString &path);
+
+#if defined(FEATURE_APTCA) || defined(FEATURE_CORESYSTEM)
+ BOOL AllowUntrustedCaller();
+#endif // defined(FEATURE_APTCA) || defined(FEATURE_CORESYSTEM)
+
+#ifdef LOGGING
+ LPCWSTR GetDebugName()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetManifestFile()->GetDebugName();
+ }
+#endif
+
+ LPCUTF8 GetSimpleName()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetManifestFile()->GetSimpleName();
+ }
+
+ BOOL IsStrongNamed()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetManifestFile()->IsStrongNamed();
+ }
+
+ const void *GetPublicKey(DWORD *pcbPK)
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetManifestFile()->GetPublicKey(pcbPK);
+ }
+
+ ULONG GetHashAlgId()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetManifestFile()->GetHashAlgId();
+ }
+
+ HRESULT GetVersion(USHORT *pMajor, USHORT *pMinor, USHORT *pBuild, USHORT *pRevision)
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetManifestFile()->GetVersion(pMajor, pMinor, pBuild, pRevision);
+ }
+
+ LPCUTF8 GetLocale()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetManifestFile()->GetLocale();
+ }
+
+ DWORD GetFlags()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetManifestFile()->GetFlags();
+ }
+
+
+ // Level of strong name support (dynamic assemblies only).
+ enum StrongNameLevel {
+ SN_NONE = 0,
+ SN_PUBLIC_KEY = 1,
+ SN_FULL_KEYPAIR_IN_ARRAY = 2,
+ SN_FULL_KEYPAIR_IN_CONTAINER = 3
+ };
+
+ StrongNameLevel GetStrongNameLevel()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_eStrongNameLevel;
+ }
+
+ void SetStrongNameLevel(StrongNameLevel eLevel)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_eStrongNameLevel = eLevel;
+ }
+
+ // returns whether CAS policy needs to be resolved for this assembly
+ // or whether it's safe to skip that step.
+ BOOL CanSkipPolicyResolution()
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsSystem() || IsIntrospectionOnly() || (m_isDynamic && !(m_dwDynamicAssemblyAccess & ASSEMBLY_ACCESS_RUN));
+ }
+
+ PTR_LoaderHeap GetLowFrequencyHeap();
+ PTR_LoaderHeap GetHighFrequencyHeap();
+ PTR_LoaderHeap GetStubHeap();
+
+ PTR_Module GetManifestModule()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_pManifest;
+ }
+
+ ReflectionModule* GetOnDiskManifestModule()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pOnDiskManifest;
+ }
+
+ BOOL NeedsToHideManifestForEmit()
+ {
+ return m_needsToHideManifestForEmit;
+ }
+
+ PTR_PEAssembly GetManifestFile()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_pManifestFile;
+ }
+
+ IMDInternalImport* GetManifestImport()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return m_pManifestFile->GetPersistentMDImport();
+ }
+
+#ifndef DACCESS_COMPILE
+ IMetaDataAssemblyImport* GetManifestAssemblyImporter()
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_pManifestFile->GetAssemblyImporter();
+ }
+#endif // DACCESS_COMPILE
+
+ mdAssembly GetManifestToken()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return TokenFromRid(1, mdtAssembly);
+ }
+
+#ifndef DACCESS_COMPILE
+ void GetDisplayName(SString &result, DWORD flags = 0)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return m_pManifestFile->GetDisplayName(result, flags);
+ }
+#endif // DACCESS_COMPILE
+
+ void GetCodeBase(SString &result)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return m_pManifestFile->GetCodeBase(result);
+ }
+
+ OBJECTREF GetExposedObject();
+
+ DebuggerAssemblyControlFlags GetDebuggerInfoBits(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_debuggerFlags;
+ }
+
+ void SetDebuggerInfoBits(DebuggerAssemblyControlFlags flags)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_debuggerFlags = flags;
+ }
+
+ void SetCopiedPDBs()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_debuggerFlags = (DebuggerAssemblyControlFlags) (m_debuggerFlags | DACF_PDBS_COPIED);
+ }
+
+ ULONG HashIdentity()
+ {
+ return GetManifestFile()->HashIdentity();
+ }
+
+ BOOL IsDisabledPrivateReflection();
+
+ //****************************************************************************************
+ //
+ // Uses the given token to load a module or another assembly. Returns the module in
+ // which the implementation resides.
+
+ mdFile GetManifestFileToken(IMDInternalImport *pImport, mdFile kFile);
+ mdFile GetManifestFileToken(LPCSTR name);
+
+ // On failure:
+ // if loadFlag == Loader::Load => throw
+ // if loadFlag != Loader::Load => return NULL
+ Module *FindModuleByExportedType(mdExportedType mdType,
+ Loader::LoadFlag loadFlag,
+ mdTypeDef mdNested,
+ mdTypeDef *pCL);
+
+ static Module * FindModuleByTypeRef(Module * pModule,
+ mdTypeRef typeRef,
+ Loader::LoadFlag loadFlag,
+ BOOL * pfNoResolutionScope);
+
+ Module *FindModuleByName(LPCSTR moduleName);
+
+ //****************************************************************************************
+ //
+ INT32 ExecuteMainMethod(PTRARRAYREF *stringArgs);
+
+ //****************************************************************************************
+
+ Assembly();
+ ~Assembly();
+#ifdef FEATURE_PREJIT
+ void DeleteNativeCodeRanges();
+#endif
+
+ BOOL GetResource(LPCSTR szName, DWORD *cbResource,
+ PBYTE *pbInMemoryResource, Assembly **pAssemblyRef,
+ LPCSTR *szFileName, DWORD *dwLocation,
+ StackCrawlMark *pStackMark = NULL, BOOL fSkipSecurityCheck = FALSE,
+ BOOL fSkipRaiseResolveEvent = FALSE);
+
+ //****************************************************************************************
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+ FORCEINLINE BOOL IsDynamic() { LIMITED_METHOD_CONTRACT; return m_isDynamic; }
+ FORCEINLINE BOOL IsCollectible() { LIMITED_METHOD_DAC_CONTRACT; return m_isCollectible; }
+ FORCEINLINE BOOL HasRunAccess() {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_dwDynamicAssemblyAccess & ASSEMBLY_ACCESS_RUN;
+ }
+ FORCEINLINE BOOL HasSaveAccess() {LIMITED_METHOD_CONTRACT; return m_dwDynamicAssemblyAccess & ASSEMBLY_ACCESS_SAVE; }
+
+ DWORD GetNextModuleIndex() { LIMITED_METHOD_CONTRACT; return m_nextAvailableModuleIndex++; }
+
+ void AddType(Module* pModule,
+ mdTypeDef cl);
+ void AddExportedType(mdExportedType cl);
+#ifndef FEATURE_CORECLR
+ void PrepareSavingManifest(ReflectionModule *pAssemblyModule);
+ mdFile AddFile(LPCWSTR wszFileName);
+ void SetFileHashValue(mdFile tkFile, LPCWSTR wszFullFileName);
+#endif
+ mdAssemblyRef AddAssemblyRef(Assembly *refedAssembly, IMetaDataAssemblyEmit *pAssemEmitter = NULL, BOOL fUsePublicKeyToken = TRUE);
+#ifndef FEATURE_CORECLR
+ mdExportedType AddExportedTypeOnDisk(LPCWSTR wszExportedType, mdToken tkImpl, mdToken tkTypeDef, CorTypeAttr flags);
+ mdExportedType AddExportedTypeInMemory(LPCWSTR wszExportedType, mdToken tkImpl, mdToken tkTypeDef, CorTypeAttr flags);
+ void AddStandAloneResource(LPCWSTR wszName, LPCWSTR wszDescription, LPCWSTR wszMimeType, LPCWSTR wszFileName, LPCWSTR wszFullFileName, int iAttribute);
+ void SaveManifestToDisk(LPCWSTR wszFileName, int entrypoint, int fileKind, DWORD corhFlags, DWORD peFlags);
+#endif // FEATURE_CORECLR
+#ifndef FEATURE_CORECLR
+ void AddDeclarativeSecurity(DWORD dwAction, void const *pValue, DWORD cbValue);
+
+ IMetaDataAssemblyEmit *GetOnDiskMDAssemblyEmitter();
+#endif // FEATURE_CORECLR
+
+ //****************************************************************************************
+
+ DomainAssembly *GetDomainAssembly(AppDomain *pDomain);
+ void SetDomainAssembly(DomainAssembly *pAssembly);
+
+ // Verison of GetDomainAssembly that uses the current AppDomain (N/A in DAC builds)
+#ifndef DACCESS_COMPILE
+ DomainAssembly *GetDomainAssembly() { WRAPPER_NO_CONTRACT; return GetDomainAssembly(GetAppDomain()); }
+#endif
+
+ // FindDomainAssembly will return NULL if the assembly is not in the given domain
+ DomainAssembly *FindDomainAssembly(AppDomain *pDomain);
+
+#if defined(FEATURE_COLLECTIBLE_TYPES) && !defined(DACCESS_COMPILE)
+ OBJECTHANDLE GetLoaderAllocatorObjectHandle() { WRAPPER_NO_CONTRACT; return GetLoaderAllocator()->GetLoaderAllocatorObjectHandle(); }
+#endif // FEATURE_COLLECTIBLE_TYPES
+
+ IAssemblySecurityDescriptor *GetSecurityDescriptor(AppDomain *pDomain = NULL);
+ ISharedSecurityDescriptor *GetSharedSecurityDescriptor() { LIMITED_METHOD_CONTRACT; return m_pSharedSecurityDesc; }
+
+#ifndef DACCESS_COMPILE
+ const SecurityTransparencyBehavior *GetSecurityTransparencyBehavior();
+ const SecurityTransparencyBehavior *TryGetSecurityTransparencyBehavior();
+ void SetSecurityTransparencyBehavior(const SecurityTransparencyBehavior *pTransparencyBehavior);
+#endif // !DACCESS_COMPILE
+
+
+ BOOL CanBeShared(DomainAssembly *pAsAssembly);
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ BOOL MissingDependenciesCheckDone();
+ void SetMissingDependenciesCheckDone();
+#ifdef FEATURE_FUSION
+ void SetBindingClosure(IAssemblyBindingClosure* pClosure); // Addrefs. It is assumed the caller did not addref pClosure for us.
+ IAssemblyBindingClosure* GetBindingClosure();
+#endif
+#endif // FEATURE_LOADER_OPTIMIZATION
+
+ void SetDomainNeutral() { LIMITED_METHOD_CONTRACT; m_fIsDomainNeutral = TRUE; }
+ BOOL IsDomainNeutral() { LIMITED_METHOD_DAC_CONTRACT; return m_fIsDomainNeutral; }
+
+ BOOL IsSIMDVectorAssembly() { LIMITED_METHOD_DAC_CONTRACT; return m_fIsSIMDVectorAssembly; }
+
+#ifdef FEATURE_PREJIT
+ BOOL IsInstrumented();
+ BOOL IsInstrumentedHelper();
+#endif // FEATURE_PREJIT
+
+ HRESULT AllocateStrongNameSignature(ICeeFileGen *pCeeFileGen,
+ HCEEFILE ceeFile);
+ HRESULT SignWithStrongName(LPCWSTR wszFileName);
+
+#ifdef FEATURE_FUSION
+ IAssembly* GetFusionAssembly()
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_pManifestFile->GetFusionAssembly();
+ }
+
+ IAssemblyName* GetFusionAssemblyName()
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_pManifestFile->GetFusionAssemblyName();
+ }
+
+ IAssemblyName* GetFusionAssemblyNameNoCreate()
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_pManifestFile->GetFusionAssemblyNameNoCreate();
+ }
+
+ IHostAssembly* GetIHostAssembly()
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_pManifestFile->GetIHostAssembly();
+ }
+#endif// FEATURE_FUSION
+
+#ifdef FEATURE_COMINTEROP
+ // Get any cached ITypeLib* for the assembly.
+ ITypeLib *GetTypeLib();
+ // Cache the ITypeLib*, if one is not already cached.
+ void SetTypeLib(ITypeLib *pITLB);
+#endif // FEATURE_COMINTEROP
+
+#ifndef DACCESS_COMPILE
+
+ void DECLSPEC_NORETURN ThrowTypeLoadException(LPCUTF8 pszFullName, UINT resIDWhy);
+
+ void DECLSPEC_NORETURN ThrowTypeLoadException(LPCUTF8 pszNameSpace, LPCUTF8 pTypeName,
+ UINT resIDWhy);
+
+ void DECLSPEC_NORETURN ThrowTypeLoadException(NameHandle *pName, UINT resIDWhy);
+
+ void DECLSPEC_NORETURN ThrowTypeLoadException(IMDInternalImport *pInternalImport,
+ mdToken token,
+ UINT resIDWhy);
+
+ void DECLSPEC_NORETURN ThrowTypeLoadException(IMDInternalImport *pInternalImport,
+ mdToken token,
+ LPCUTF8 pszFieldOrMethodName,
+ UINT resIDWhy);
+
+ void DECLSPEC_NORETURN ThrowTypeLoadException(LPCUTF8 pszNameSpace,
+ LPCUTF8 pszTypeName,
+ LPCUTF8 pszMethodName,
+ UINT resIDWhy);
+
+ void DECLSPEC_NORETURN ThrowBadImageException(LPCUTF8 pszNameSpace,
+ LPCUTF8 pszTypeName,
+ UINT resIDWhy);
+
+ UINT64 GetHostAssemblyId() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_HostAssemblyId;
+ }
+
+#endif // #ifndef DACCESS_COMPILE
+
+ //****************************************************************************************
+ //
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ PEModule * LoadModule_AddRef(mdFile kFile, BOOL fLoadResource);
+ PEModule * RaiseModuleResolveEvent_AddRef(LPCSTR szName, mdFile kFile);
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+ static BOOL FileNotFound(HRESULT hr);
+
+ //****************************************************************************************
+ // Is the given assembly a friend of this assembly?
+ bool GrantsFriendAccessTo(Assembly *pAccessingAssembly, FieldDesc *pFD);
+ bool GrantsFriendAccessTo(Assembly *pAccessingAssembly, MethodDesc *pMD);
+ bool GrantsFriendAccessTo(Assembly *pAccessingAssembly, MethodTable *pMT);
+ bool IgnoresAccessChecksTo(Assembly *pAccessedAssembly);
+
+#ifdef FEATURE_COMINTEROP
+ bool IsImportedFromTypeLib()
+ {
+ WRAPPER_NO_CONTRACT;
+ return ((GetInteropAttributeMask() & INTEROP_ATTRIBUTE_IMPORTED_FROM_TYPELIB) != 0);
+ }
+
+ bool IsPIAOrImportedFromTypeLib()
+ {
+ WRAPPER_NO_CONTRACT;
+ return ((GetInteropAttributeMask() & (INTEROP_ATTRIBUTE_IMPORTED_FROM_TYPELIB | INTEROP_ATTRIBUTE_PRIMARY_INTEROP_ASSEMBLY)) != 0);
+ }
+
+ bool IsPIA()
+ {
+ WRAPPER_NO_CONTRACT;
+ return ((GetInteropAttributeMask() & INTEROP_ATTRIBUTE_PRIMARY_INTEROP_ASSEMBLY) != 0);
+ }
+
+ // Does this assembly contain windows metadata
+ bool IsWinMD();
+
+ // Does this assembly contain windows metadata with managed implementation
+ bool IsManagedWinMD();
+
+ // Returns the IWinMDImport interface of the manifest module metadata or NULL if this assembly is not a .winmd
+ IWinMDImport *GetManifestWinMDImport();
+#endif
+
+#ifndef FEATURE_CORECLR
+ BOOL SupportsAutoNGen()
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_fSupportsAutoNGen;
+ }
+#endif
+
+protected:
+
+ enum {
+ FREE_KEY_PAIR = 4,
+ FREE_KEY_CONTAINER = 8,
+ };
+
+ void ReportAssemblyUse();
+
+#ifdef FEATURE_COMINTEROP
+ enum WinMDStatus
+ {
+ WinMDStatus_Unknown,
+ WinMDStatus_IsPureWinMD,
+ WinMDStatus_IsManagedWinMD,
+ WinMDStatus_IsNotWinMD
+ };
+
+ // Determine if the assembly is a pure Windows Metadata file, contians managed implementation, or is not
+ // Windows Metadata at all.
+ WinMDStatus GetWinMDStatus();
+
+ enum InteropAttributeStatus {
+ INTEROP_ATTRIBUTE_UNSET = 0,
+ INTEROP_ATTRIBUTE_CACHED = 1,
+ INTEROP_ATTRIBUTE_IMPORTED_FROM_TYPELIB = 2,
+ INTEROP_ATTRIBUTE_PRIMARY_INTEROP_ASSEMBLY = 4,
+ };
+
+ InteropAttributeStatus GetInteropAttributeMask()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_InteropAttributeStatus & INTEROP_ATTRIBUTE_CACHED)
+ return m_InteropAttributeStatus;
+
+ int mask = INTEROP_ATTRIBUTE_UNSET;
+
+ if (!IsWinMD()) // ignore classic COM interop CAs in .winmd
+ {
+ if (this->GetManifestImport()->GetCustomAttributeByName(TokenFromRid(1, mdtAssembly), INTEROP_IMPORTEDFROMTYPELIB_TYPE, 0, 0) == S_OK)
+ mask |= INTEROP_ATTRIBUTE_IMPORTED_FROM_TYPELIB;
+ if (this->GetManifestImport()->GetCustomAttributeByName(TokenFromRid(1, mdtAssembly), INTEROP_PRIMARYINTEROPASSEMBLY_TYPE, 0, 0) == S_OK)
+ mask |= INTEROP_ATTRIBUTE_PRIMARY_INTEROP_ASSEMBLY;
+ }
+
+ if (!IsDynamic())
+ {
+ mask |= INTEROP_ATTRIBUTE_CACHED;
+ m_InteropAttributeStatus = static_cast<InteropAttributeStatus>(mask);
+ }
+
+ return static_cast<InteropAttributeStatus>(mask);
+ }
+#endif // FEATURE_INTEROP
+
+ // Keep track of the vars that need to be freed.
+ short int m_FreeFlag;
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ // Hash of files in manifest by name to File token
+ PTR_EEUtf8StringHashTable m_pAllowedFiles;
+ // Critical section guarding m_pAllowedFiles
+ Crst m_crstAllowedFiles;
+#endif
+
+private:
+
+ //****************************************************************************************
+
+ void CacheManifestExportedTypes(AllocMemTracker *pamTracker);
+ void CacheManifestFiles();
+
+ void CacheFriendAssemblyInfo();
+
+#ifndef FEATURE_CORECLR
+ void GenerateBreadcrumbForServicing();
+ void WriteBreadcrumb(const SString &ssDisplayName);
+ bool HasServiceableAttribute();
+ bool IsExistingOobAssembly();
+ void CheckDenyList(const SString &ssDisplayName);
+
+ BOOL SupportsAutoNGenWorker();
+#endif
+
+ PTR_BaseDomain m_pDomain; // Parent Domain
+ PTR_ClassLoader m_pClassLoader; // Single Loader
+
+
+
+ PTR_MethodDesc m_pEntryPoint; // Method containing the entry point
+ PTR_Module m_pManifest;
+ PTR_PEAssembly m_pManifestFile;
+ ReflectionModule* m_pOnDiskManifest; // This is the module containing the on disk manifest.
+ BOOL m_fEmbeddedManifest;
+
+ FriendAssemblyDescriptor *m_pFriendAssemblyDescriptor;
+
+ // Strong name key info for reflection emit
+ PBYTE m_pbStrongNameKeyPair;
+ DWORD m_cbStrongNameKeyPair;
+ LPWSTR m_pwStrongNameKeyContainer;
+ StrongNameLevel m_eStrongNameLevel;
+
+ BOOL m_isDynamic;
+#ifdef FEATURE_COLLECTIBLE_TYPES
+ BOOL m_isCollectible;
+#endif // FEATURE_COLLECTIBLE_TYPES
+ // this boolean is used by Reflection.Emit to determine when to hide m_pOnDiskManifest.
+ // Via reflection emit m_pOnDiskManifest may be explicitly defined by the user and thus available
+ // or created implicitly via Save in which case it needs to be hidden from the user for
+ // backward compatibility reason.
+ // This is a bit of a workaround however and that whole story should be understood a bit better...
+ BOOL m_needsToHideManifestForEmit;
+ DWORD m_dwDynamicAssemblyAccess;
+ DWORD m_nextAvailableModuleIndex;
+ PTR_LoaderAllocator m_pLoaderAllocator;
+ DWORD m_isDisabledPrivateReflection;
+
+#ifdef FEATURE_COMINTEROP
+ // If a TypeLib is ever required for this module, cache the pointer here.
+ ITypeLib *m_pITypeLib;
+ InteropAttributeStatus m_InteropAttributeStatus;
+
+ WinMDStatus m_winMDStatus;
+ IWinMDImport *m_pManifestWinMDImport;
+#endif // FEATURE_COMINTEROP
+
+ ISharedSecurityDescriptor* m_pSharedSecurityDesc; // Security descriptor (permission requests, signature etc)
+ const SecurityTransparencyBehavior *m_pTransparencyBehavior; // Transparency implementation the assembly uses
+
+ BOOL m_fIsDomainNeutral;
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ BOOL m_bMissingDependenciesCheckDone;
+#ifdef FEATURE_FUSION
+ IAssemblyBindingClosure * m_pBindingClosure;
+#endif
+#endif // FEATURE_LOADER_OPTIMIZATION
+
+ DebuggerAssemblyControlFlags m_debuggerFlags;
+
+ BOOL m_fTerminated;
+
+ BOOL m_fIsSIMDVectorAssembly;
+ UINT64 m_HostAssemblyId;
+
+ DWORD m_dwReliabilityContract;
+
+#ifndef FEATURE_CORECLR
+ BOOL m_fSupportsAutoNGen;
+#endif
+};
+
+typedef Assembly::ModuleIterator ModuleIterator;
+
+#ifndef DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+//
+// FriendSecurityDescriptor contains information on which assemblies are friends of an assembly, as well as
+// which individual internals are visible to those friend assemblies.
+//
+
+class FriendAssemblyDescriptor
+{
+public:
+ ~FriendAssemblyDescriptor();
+
+ static
+ FriendAssemblyDescriptor *CreateFriendAssemblyDescriptor(PEAssembly *pAssembly);
+
+ //---------------------------------------------------------------------------------------
+ //
+ // Checks to see if an assembly has friend access to a particular member.
+ //
+ // Arguments:
+ // pAccessingAssembly – the assembly requesting friend access
+ // pMember - the member that is attempting to be accessed
+ //
+ // Return Value:
+ // true if friend access is allowed, false otherwise
+ //
+ // Notes:
+ // Template type T should be either FieldDesc, MethodDesc, or MethodTable.
+ //
+
+ template <class T>
+ bool GrantsFriendAccessTo(Assembly *pAccessingAssembly, T *pMember)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pAccessingAssembly));
+ PRECONDITION(CheckPointer(pMember));
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_FRAMEWORK_INTERNAL
+ if (IsAssemblyOnList(pAccessingAssembly, m_alPartialAccessFriendAssemblies))
+ {
+ return FriendAccessAppliesTo(pMember) && IsMemberVisibleToFriends(pMember);
+ }
+ else
+#endif // FEATURE_FRAMEWORK_INTERNAL
+ if (IsAssemblyOnList(pAccessingAssembly, m_alFullAccessFriendAssemblies))
+ {
+ return true;
+ }
+#if defined(FEATURE_STRONGNAME_TESTKEY_ALLOWED) && defined(FEATURE_FRAMEWORK_INTERNAL)
+ else if (pMember->GetModule()->GetFile()->GetAssembly()->IsProfileAssembly()&&
+ pAccessingAssembly->GetManifestFile() != NULL &&
+ pAccessingAssembly->GetManifestFile()->IsProfileTestAssembly())
+ {
+ // Test hook - All platoform assemblies consider any test assembly which is part of the profile to implicitly
+ // be on the friends list. This allows test access to the framework internal attributes, without
+ // having to add test assemblies to the explicit friend assembly list.
+ return FriendAccessAppliesTo(pMember) && IsMemberVisibleToFriends(pMember);
+ }
+#endif // FEATURE_STRONGNAME_TESTKEY_ALLOWED && FEATURE_FRAMEWORK_INTERNAL
+ else
+ {
+ return false;
+ }
+ }
+
+#ifndef FEATURE_CORECLR
+ //------------------------------------------------------------------------------
+ // It is undesirable to reintroduce the concept of inquiring about friendship without specifying a member or type
+ // but necessary for TP. In case of doubt, it's safer to return "true" as this won't affect
+ // correctness (but might cause unnecessary ngen's when updating assemblies.)
+ //------------------------------------------------------------------------------
+ bool MightGrantFriendAccessTo(PEAssembly *pAccessingAssembly)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pAccessingAssembly));
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_FRAMEWORK_INTERNAL
+ if (IsAssemblyOnList(pAccessingAssembly, m_alPartialAccessFriendAssemblies))
+ {
+ return true;
+ }
+ else
+#endif // FEATURE_FRAMEWORK_INTERNAL
+ if (IsAssemblyOnList(pAccessingAssembly, m_alFullAccessFriendAssemblies))
+ {
+ return true;
+ }
+ return false;
+ }
+#endif // !FEATURE_CORECLR
+
+ bool IgnoresAccessChecksTo(Assembly *pAccessedAssembly)
+ {
+ return IsAssemblyOnList(pAccessedAssembly, m_subjectAssemblies);
+ }
+
+private:
+#ifdef FEATURE_FRAMEWORK_INTERNAL
+ static const LPCSTR AllInternalsVisibleProperty;
+ static const DWORD FriendMemberHashSize = 31; // Number of buckets in the friend member hash table
+#endif // FEATURE_FRAMEWORK_INTERNAL
+
+#ifdef FEATURE_FUSION
+ typedef IAssemblyName FriendAssemblyName_t;
+ typedef NonVMComHolder<IAssemblyName> FriendAssemblyNameHolder;
+#else // FEATURE_FUSION
+ typedef AssemblySpec FriendAssemblyName_t;
+ typedef NewHolder<AssemblySpec> FriendAssemblyNameHolder;
+#endif // FEATURE_FUSION
+
+ ArrayList m_alFullAccessFriendAssemblies; // Friend assemblies which have access to all internals
+ ArrayList m_subjectAssemblies; // Subject assemblies which we will not perform access checks against
+
+#ifdef FEATURE_FRAMEWORK_INTERNAL
+ ArrayList m_alPartialAccessFriendAssemblies; // Friend assemblies which have access to only specific internals
+ EEPtrHashTable m_htFriendMembers; // Cache of internal members checked for visibility to friend assemblies
+ Crst m_crstFriendMembersCache; // Critical section guarding m_htFriendMembers
+#endif // FEATURE_FRAMEWORK_INTERNAL
+
+ FriendAssemblyDescriptor();
+
+#ifdef FEATURE_FRAMEWORK_INTERNAL
+ void AddFriendAssembly(FriendAssemblyName_t *pFriendAssembly, bool fAllInternalsVisible);
+#else // FEATURE_FRAMEWORK_INTERNAL
+ void AddFriendAssembly(FriendAssemblyName_t *pFriendAssembly);
+#endif // FEATURE_FRAMEWORK_INTERNAL
+ void AddSubjectAssembly(FriendAssemblyName_t *pSubjectAssembly);
+
+#ifdef FEATURE_FRAMEWORK_INTERNAL
+ static
+ bool FriendAccessAppliesTo(FieldDesc *pFD);
+
+ static
+ bool FriendAccessAppliesTo(MethodDesc *pMD);
+
+ static
+ bool FriendAccessAppliesTo(MethodTable *pMT);
+
+ static
+ mdToken GetMetadataToken(FieldDesc *pFD);
+
+ static
+ mdToken GetMetadataToken(MethodDesc *pMD);
+
+ static
+ mdToken GetMetadataToken(MethodTable *pMT);
+
+ static
+ bool HasFriendAccessAttribute(IMDInternalImport *pMDImport);
+#endif // FEATURE_FRAMEWORK_INTERNAL
+
+ static
+ bool IsAssemblyOnList(Assembly *pAssembly, const ArrayList &alAssemblyNames)
+ {
+ return IsAssemblyOnList(pAssembly->GetManifestFile(), alAssemblyNames);
+ }
+
+ static
+ bool IsAssemblyOnList(PEAssembly *pAssembly, const ArrayList &alAssemblyNames);
+
+#ifdef FEATURE_FRAMEWORK_INTERNAL
+ bool HasFriendAccessAttribute(IMDInternalImport *pMDImport, mdToken tkMember);
+
+ //---------------------------------------------------------------------------------------
+ //
+ // Checks to see if a specific member has the FriendAccessAllowed attribute
+ //
+ //
+
+ template<class T>
+ bool IsMemberVisibleToFriends(T *pMember)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pMember));
+ PRECONDITION(FriendAccessAppliesTo(pMember));
+ }
+ CONTRACTL_END;
+
+ CrstHolder lock(&m_crstFriendMembersCache);
+
+ HashDatum hd;
+ if (!m_htFriendMembers.GetValue(pMember, &hd))
+ {
+ bool fAllowsAccess = HasFriendAccessAttribute(pMember->GetMDImport(), GetMetadataToken(pMember));
+ hd = reinterpret_cast<HashDatum>(fAllowsAccess);
+
+ m_htFriendMembers.InsertValue(pMember, hd);
+ }
+
+ return static_cast<bool>(!!hd);
+ }
+#endif // FEATURE_FRAMEWORK_INTERNAL
+};
+
+#endif // !DACCESS_COMPILE
+
+#if !defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE)
+class ExistingOobAssemblyList
+{
+public:
+#ifndef DACCESS_COMPILE
+ ExistingOobAssemblyList();
+
+ bool IsOnlist(Assembly *pAssembly);
+
+ static void Init();
+ static ExistingOobAssemblyList *Instance() { return s_pInstance; }
+#endif
+
+private:
+ ArrayList m_alExistingOobAssemblies;
+
+ // The single instance of this class:
+ static ExistingOobAssemblyList *s_pInstance;
+};
+#endif // !defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE)
+
+#endif
diff --git a/src/vm/assemblyname.cpp b/src/vm/assemblyname.cpp
new file mode 100644
index 0000000000..a0a1426529
--- /dev/null
+++ b/src/vm/assemblyname.cpp
@@ -0,0 +1,304 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: AssemblyName.cpp
+**
+** Purpose: Implements AssemblyName (loader domain) architecture
+**
+**
+
+
+**
+===========================================================*/
+
+#include "common.h"
+
+#include <stdlib.h>
+#include <shlwapi.h>
+
+#include "assemblyname.hpp"
+#include "security.h"
+#include "field.h"
+#ifdef FEATURE_FUSION
+#include "fusion.h"
+#endif
+#include "strongname.h"
+#include "eeconfig.h"
+
+#ifndef URL_ESCAPE_AS_UTF8
+#define URL_ESCAPE_AS_UTF8 0x00040000 // Percent-encode all non-ASCII characters as their UTF-8 equivalents.
+#endif
+
+FCIMPL1(Object*, AssemblyNameNative::GetFileInformation, StringObject* filenameUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ struct _gc
+ {
+ ASSEMBLYNAMEREF result;
+ STRINGREF filename;
+ } gc;
+
+ gc.result = NULL;
+ gc.filename = (STRINGREF) filenameUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ if (gc.filename == NULL)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_FileName"));
+
+ if (gc.filename->GetStringLength() == 0)
+ COMPlusThrow(kArgumentException, W("Argument_EmptyFileName"));
+
+ gc.result = (ASSEMBLYNAMEREF) AllocateObject(MscorlibBinder::GetClass(CLASS__ASSEMBLY_NAME));
+
+
+ ///////////////////////////////////////////////
+ SString sFileName(gc.filename->GetBuffer());
+ PEImageHolder pImage = PEImage::OpenImage(sFileName, MDInternalImport_NoCache);
+
+ EX_TRY
+ {
+ pImage->VerifyIsAssembly();
+ }
+ EX_CATCH
+ {
+ Exception *ex = GET_EXCEPTION();
+ EEFileLoadException::Throw(sFileName,ex->GetHR(),ex);
+ }
+ EX_END_CATCH_UNREACHABLE;
+
+ SString sUrl = sFileName;
+ PEAssembly::PathToUrl(sUrl);
+
+ AssemblySpec spec;
+ spec.InitializeSpec(TokenFromRid(mdtAssembly,1),pImage->GetMDImport(),NULL,TRUE);
+ spec.SetCodeBase(sUrl);
+ spec.AssemblyNameInit(&gc.result, pImage);
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(gc.result);
+}
+FCIMPLEND
+
+FCIMPL1(Object*, AssemblyNameNative::ToString, Object* refThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF pObj = NULL;
+ ASSEMBLYNAMEREF pThis = (ASSEMBLYNAMEREF) (OBJECTREF) refThisUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(pThis);
+
+ if (pThis == NULL)
+ COMPlusThrow(kNullReferenceException, W("NullReference_This"));
+
+ Thread *pThread = GetThread();
+
+ CheckPointHolder cph(pThread->m_MarshalAlloc.GetCheckpoint()); //hold checkpoint for autorelease
+
+ AssemblySpec spec;
+ spec.InitializeSpec(&(pThread->m_MarshalAlloc), (ASSEMBLYNAMEREF*) &pThis, FALSE, FALSE);
+
+ StackSString name;
+#ifndef FEATURE_FUSION
+ spec.GetFileOrDisplayName(ASM_DISPLAYF_VERSION |
+ ASM_DISPLAYF_CULTURE |
+ ASM_DISPLAYF_PUBLIC_KEY_TOKEN,
+ name);
+#else
+ spec.GetFileOrDisplayName(0, name);
+#endif // FEATURE_FUSION
+
+ pObj = (OBJECTREF) StringObject::NewString(name);
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(pObj);
+}
+FCIMPLEND
+
+
+FCIMPL1(Object*, AssemblyNameNative::GetPublicKeyToken, Object* refThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF orOutputArray = NULL;
+ OBJECTREF refThis = (OBJECTREF) refThisUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refThis);
+
+ if (refThis == NULL)
+ COMPlusThrow(kNullReferenceException, W("NullReference_This"));
+
+ ASSEMBLYNAMEREF orThis = (ASSEMBLYNAMEREF)refThis;
+ U1ARRAYREF orPublicKey = orThis->GetPublicKey();
+
+ if (orPublicKey != NULL) {
+ DWORD cb = orPublicKey->GetNumComponents();
+ StrongNameBufferHolder<BYTE> pbToken;
+
+ if (cb) {
+ CQuickBytes qb;
+ BYTE *pbKey = (BYTE*) qb.AllocThrows(cb);
+ memcpy(pbKey, orPublicKey->GetDataPtr(), cb);
+
+ {
+ GCX_PREEMP();
+ if (!StrongNameTokenFromPublicKey(pbKey, cb, &pbToken, &cb))
+ COMPlusThrowHR(StrongNameErrorInfo());
+ }
+ }
+
+ Security::CopyEncodingToByteArray(pbToken, cb, &orOutputArray);
+ }
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(orOutputArray);
+}
+FCIMPLEND
+
+#ifndef FEATURE_CORECLR
+FCIMPL1(Object*, AssemblyNameNative::EscapeCodeBase, StringObject* filenameUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ STRINGREF rv = NULL;
+ STRINGREF filename = (STRINGREF) filenameUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(filename);
+
+ LPWSTR pCodeBase = NULL;
+ DWORD dwCodeBase = 0;
+ CQuickBytes qb;
+
+ if (filename != NULL) {
+ WCHAR* pString;
+ int iString;
+ filename->RefInterpretGetStringValuesDangerousForGC(&pString, &iString);
+ dwCodeBase = (DWORD) iString;
+ pCodeBase = (LPWSTR) qb.AllocThrows((++dwCodeBase) * sizeof(WCHAR));
+ memcpy(pCodeBase, pString, dwCodeBase*sizeof(WCHAR));
+ }
+
+ if(pCodeBase) {
+ CQuickBytes qb2;
+ DWORD dwEscaped = 1;
+
+ DWORD flags = 0;
+ if (RunningOnWin7())
+ flags |= URL_ESCAPE_AS_UTF8;
+
+ UrlEscape(pCodeBase, (LPWSTR) qb2.Ptr(), &dwEscaped, flags);
+
+ LPWSTR result = (LPWSTR)qb2.AllocThrows((++dwEscaped) * sizeof(WCHAR));
+ HRESULT hr = UrlEscape(pCodeBase, result, &dwEscaped, flags);
+
+ if (SUCCEEDED(hr))
+ rv = StringObject::NewString(result);
+ else
+ COMPlusThrowHR(hr);
+ }
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(rv);
+}
+FCIMPLEND
+#endif // !FEATURE_CORECLR
+
+FCIMPL4(void, AssemblyNameNative::Init, Object * refThisUNSAFE, OBJECTREF * pAssemblyRef, CLR_BOOL fForIntrospection, CLR_BOOL fRaiseResolveEvent)
+{
+ FCALL_CONTRACT;
+
+ ASSEMBLYNAMEREF pThis = (ASSEMBLYNAMEREF) (OBJECTREF) refThisUNSAFE;
+ HRESULT hr = S_OK;
+
+ HELPER_METHOD_FRAME_BEGIN_1(pThis);
+
+ *pAssemblyRef = NULL;
+
+ if (pThis == NULL)
+ COMPlusThrow(kNullReferenceException, W("NullReference_This"));
+
+ Thread * pThread = GetThread();
+
+ CheckPointHolder cph(pThread->m_MarshalAlloc.GetCheckpoint()); //hold checkpoint for autorelease
+
+ AssemblySpec spec;
+ hr = spec.InitializeSpec(&(pThread->m_MarshalAlloc), (ASSEMBLYNAMEREF *) &pThis, TRUE, FALSE);
+
+ if (SUCCEEDED(hr))
+ {
+ spec.AssemblyNameInit(&pThis,NULL);
+ }
+ else if ((hr == FUSION_E_INVALID_NAME) && fRaiseResolveEvent)
+ {
+ Assembly * pAssembly = GetAppDomain()->RaiseAssemblyResolveEvent(&spec, fForIntrospection, FALSE);
+
+ if (pAssembly == NULL)
+ {
+ EEFileLoadException::Throw(&spec, hr);
+ }
+ else
+ {
+ *((OBJECTREF *) (&(*pAssemblyRef))) = pAssembly->GetExposedObject();
+ }
+ }
+ else
+ {
+ ThrowHR(hr);
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+/// "parse" tells us to parse the simple name of the assembly as if it was the full name
+/// almost never the right thing to do, but needed for compat
+/* static */
+FCIMPL3(FC_BOOL_RET, AssemblyNameNative::ReferenceMatchesDefinition, AssemblyNameBaseObject* refUNSAFE, AssemblyNameBaseObject* defUNSAFE, CLR_BOOL fParse)
+{
+ FCALL_CONTRACT;
+
+ struct _gc
+ {
+ ASSEMBLYNAMEREF pRef;
+ ASSEMBLYNAMEREF pDef;
+ } gc;
+ gc.pRef = (ASSEMBLYNAMEREF)ObjectToOBJECTREF (refUNSAFE);
+ gc.pDef = (ASSEMBLYNAMEREF)ObjectToOBJECTREF (defUNSAFE);
+
+ BOOL result = FALSE;
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ Thread *pThread = GetThread();
+
+ CheckPointHolder cph(pThread->m_MarshalAlloc.GetCheckpoint()); //hold checkpoint for autorelease
+
+ if (gc.pRef == NULL)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_AssemblyName"));
+ if (gc.pDef == NULL)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_AssemblyName"));
+
+ AssemblySpec refSpec;
+ refSpec.InitializeSpec(&(pThread->m_MarshalAlloc), (ASSEMBLYNAMEREF*) &gc.pRef, fParse, FALSE);
+
+ AssemblySpec defSpec;
+ defSpec.InitializeSpec(&(pThread->m_MarshalAlloc), (ASSEMBLYNAMEREF*) &gc.pDef, fParse, FALSE);
+
+#ifdef FEATURE_FUSION
+ SafeComHolder<IAssemblyName> pRefName (NULL);
+ IfFailThrow(refSpec.CreateFusionName(&pRefName, FALSE));
+
+ SafeComHolder <IAssemblyName> pDefName (NULL);
+ IfFailThrow(defSpec.CreateFusionName(&pDefName, FALSE));
+
+ // Order matters: Ref->IsEqual(Def)
+ result = (S_OK == pRefName->IsEqual(pDefName, ASM_CMPF_IL_ALL));
+#else
+ result=AssemblySpec::RefMatchesDef(&refSpec,&defSpec);
+#endif
+ HELPER_METHOD_FRAME_END();
+ FC_RETURN_BOOL(result);
+}
+FCIMPLEND
diff --git a/src/vm/assemblyname.hpp b/src/vm/assemblyname.hpp
new file mode 100644
index 0000000000..bfe982caa3
--- /dev/null
+++ b/src/vm/assemblyname.hpp
@@ -0,0 +1,32 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: AssemblyName.hpp
+**
+** Purpose: Implements AssemblyName (loader domain) architecture
+**
+**
+
+
+**
+===========================================================*/
+#ifndef _AssemblyName_H
+#define _AssemblyName_H
+
+class AssemblyNameNative
+{
+public:
+ static FCDECL1(Object*, GetFileInformation, StringObject* filenameUNSAFE);
+ static FCDECL1(Object*, ToString, Object* refThisUNSAFE);
+ static FCDECL1(Object*, GetPublicKeyToken, Object* refThisUNSAFE);
+ static FCDECL1(Object*, EscapeCodeBase, StringObject* filenameUNSAFE);
+ static FCDECL4(void, Init, Object * refThisUNSAFE, OBJECTREF * pAssemblyRef, CLR_BOOL fForIntrospection, CLR_BOOL fRaiseResolveEvent);
+ static FCDECL3(FC_BOOL_RET, ReferenceMatchesDefinition, AssemblyNameBaseObject* refUNSAFE, AssemblyNameBaseObject* defUNSAFE, CLR_BOOL fParse);
+};
+
+#endif // _AssemblyName_H
+
diff --git a/src/vm/assemblynamelist.h b/src/vm/assemblynamelist.h
new file mode 100644
index 0000000000..476163fdc9
--- /dev/null
+++ b/src/vm/assemblynamelist.h
@@ -0,0 +1,111 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// assemblynamelist.h
+//
+
+//
+//
+/// provides class to implement lookups by assemby name.
+/// always checks the simple name
+/// never checks culture
+///
+/// ALSO: it leaks the stored assembly names, so currently can be used only in globals
+///
+/// checks version/pk/pa only if present in the one being looked up
+
+
+#ifndef ASSEMBLYNAMELISTHASHTRAITS_H
+#define ASSEMBLYNAMELISTHASHTRAITS_H
+
+#include "naming.h"
+
+class AssemblyNameListHashTraits : public NoRemoveSHashTraits<DefaultSHashTraits<IAssemblyName*> >
+{
+public:
+ typedef IAssemblyName* key_t;
+
+ static key_t GetKey(element_t pName)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return pName;
+ }
+ static const element_t Null()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return NULL;
+ }
+ static bool IsNull(const element_t &name)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (name == NULL);
+ }
+ static BOOL Equals(key_t pIAssemblyNameInMap, key_t pIAssemblyNameToCheck)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pIAssemblyNameInMap));
+ PRECONDITION(CheckPointer(pIAssemblyNameToCheck));
+ }
+ CONTRACTL_END;
+
+ DWORD dwMask = ASM_CMPF_NAME;
+ if (CAssemblyName::IsStronglyNamed(pIAssemblyNameInMap))
+ dwMask |= ASM_CMPF_PUBLIC_KEY_TOKEN;
+
+ DWORD cbSize = 0;
+ HRESULT hr = pIAssemblyNameInMap->GetProperty(ASM_NAME_MAJOR_VERSION, static_cast<PBYTE>(nullptr), &cbSize);
+ if (hr == HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER))
+ dwMask |= ASM_CMPF_VERSION;
+
+ cbSize = 0;
+ hr = pIAssemblyNameInMap->GetProperty(ASM_NAME_ARCHITECTURE, static_cast<PBYTE>(nullptr), &cbSize);
+ if (hr == HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER))
+ dwMask |= ASM_CMPF_ARCHITECTURE;
+
+
+ hr = pIAssemblyNameToCheck->IsEqual(pIAssemblyNameInMap,
+ dwMask);
+ return (hr == S_OK);
+ }
+
+ static count_t Hash(key_t pIAssemblyName)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pIAssemblyName));
+ }
+ CONTRACTL_END;
+
+ DWORD dwHash = 0;
+
+ // use only simple name for hashing
+ if (FAILED(CAssemblyName::GetHash(pIAssemblyName,0,
+ 0xffffffff,
+ &dwHash)))
+ {
+ // Returning bogus hash is safe; it will cause Equals to be called more often
+ dwHash = 0;
+ }
+
+ return static_cast<count_t>(dwHash);
+ }
+};
+
+
+typedef SHash<AssemblyNameListHashTraits> AssemblyNameList;
+
+#endif // ASSEMBLYNAMELISTHASHTRAITS_H
diff --git a/src/vm/assemblynamesconfigfactory.cpp b/src/vm/assemblynamesconfigfactory.cpp
new file mode 100644
index 0000000000..68fefc8029
--- /dev/null
+++ b/src/vm/assemblynamesconfigfactory.cpp
@@ -0,0 +1,265 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// AssemblyNamesConfigFactory.cpp
+//
+
+//
+//
+// Parses XML files and adding runtime entries to assembly list
+// Abstract, derived classes need to override AddAssemblyName
+#include "common.h"
+#include "common.h"
+#include <xmlparser.h>
+#include <objbase.h>
+#include "parse.h"
+#include "assemblynamesconfigfactory.h"
+
+
+#define ISWHITE(ch) ((ch) >= 0x09 && (ch) <= 0x0D || (ch) == 0x20)
+
+#define CONST_STRING_AND_LEN(str) str, NumItems(str)-1
+
+extern int EEXMLStringCompare(const WCHAR *pStr1,
+ DWORD cchStr1,
+ const WCHAR *pStr2,
+ DWORD cchStr2);
+extern HRESULT VersionFromString(LPCWSTR wzVersion, WORD *pwVerMajor, WORD *pwVerMinor,
+ WORD *pwVerBld, WORD *pwVerRev);
+extern HRESULT MapProcessorArchitectureToPEKIND(LPCWSTR pwzProcArch, PEKIND *pe);
+
+AssemblyNamesConfigFactory::AssemblyNamesConfigFactory()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_pAssemblyName = NULL;
+ m_bCurrentEntryInvalid = TRUE;
+ m_dwCurrentElementDepth = 0;
+ m_dwProperty = ASM_NAME_MAX_PARAMS;
+}
+
+AssemblyNamesConfigFactory::~AssemblyNamesConfigFactory()
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+
+HRESULT STDMETHODCALLTYPE AssemblyNamesConfigFactory::NotifyEvent(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ XML_NODEFACTORY_EVENT iEvt)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return S_OK;
+}
+
+HRESULT STDMETHODCALLTYPE AssemblyNamesConfigFactory::BeginChildren(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ XML_NODE_INFO __RPC_FAR *pNodeInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_dwCurrentElementDepth ++;
+
+ return S_OK;
+}
+
+//---------------------------------------------------------------------------
+HRESULT STDMETHODCALLTYPE AssemblyNamesConfigFactory::EndChildren(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ BOOL fEmptyNode,
+ /* [in] */ XML_NODE_INFO __RPC_FAR *pNodeInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ if (m_dwCurrentElementDepth == 1 && m_pAssemblyName != NULL)
+ {
+ if (!m_bCurrentEntryInvalid)
+ {
+ // publish
+ AddAssemblyName(m_pAssemblyName);
+ };
+ m_pAssemblyName->Release();
+ m_pAssemblyName = NULL;
+ }
+
+ if (!fEmptyNode)
+ m_dwCurrentElementDepth --;
+ }
+ EX_CATCH_HRESULT(hr);
+ return hr;
+}
+
+
+
+HRESULT STDMETHODCALLTYPE AssemblyNamesConfigFactory::CreateNode(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ PVOID pNode,
+ /* [in] */ USHORT cNumRecs,
+ /* [in] */ XML_NODE_INFO* __RPC_FAR * __RPC_FAR apNodeInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END;
+
+ if(m_dwCurrentElementDepth > 1)
+ return S_OK;
+
+ HRESULT hr = S_OK;
+
+ for(DWORD i = 0; i < cNumRecs; i++) {
+ CONTRACT_VIOLATION(ThrowsViolation); // Lots of stuff in here throws!
+
+ if(apNodeInfo[i]->dwType == XML_ELEMENT ||
+ apNodeInfo[i]->dwType == XML_ATTRIBUTE ||
+ apNodeInfo[i]->dwType == XML_PCDATA)
+ {
+
+ DWORD dwStringSize = apNodeInfo[i]->ulLen;
+ LPWSTR pszString = (WCHAR*) apNodeInfo[i]->pwcText;
+ // Trim the value
+
+ // we should never decrement lgth if it's 0, because it's unsigned
+
+ for(;*pszString && ISWHITE(*pszString) && dwStringSize>0; pszString++, dwStringSize--);
+ while( dwStringSize > 0 && ISWHITE(pszString[dwStringSize-1]))
+ dwStringSize--;
+ switch(apNodeInfo[i]->dwType)
+ {
+ case XML_ELEMENT :
+ if(EEXMLStringCompare(pszString, dwStringSize, CONST_STRING_AND_LEN(W("assemblyIdentity"))) == 0)
+ {
+ // new entry
+ _ASSERTE(m_pAssemblyName == NULL);
+ IfFailRet(CreateAssemblyNameObject(&m_pAssemblyName, NULL,0,NULL));
+ m_bCurrentEntryInvalid = FALSE;
+ }
+ else
+ {
+ m_bCurrentEntryInvalid = TRUE;
+ }
+
+ break;
+
+
+ case XML_ATTRIBUTE :
+ if(m_bCurrentEntryInvalid)
+ break;
+
+ if(EEXMLStringCompare(pszString, dwStringSize, CONST_STRING_AND_LEN(W("name"))) == 0)
+ {
+ m_dwProperty = ASM_NAME_NAME;
+ }
+ else
+ if(EEXMLStringCompare(pszString, dwStringSize, CONST_STRING_AND_LEN(W("version"))) == 0)
+ {
+ m_dwProperty = ASM_NAME_MAJOR_VERSION;
+ }
+ else
+ if(EEXMLStringCompare(pszString, dwStringSize, CONST_STRING_AND_LEN(W("publicKeyToken"))) == 0)
+ {
+ m_dwProperty = ASM_NAME_PUBLIC_KEY_TOKEN;
+ }
+ else
+ if(EEXMLStringCompare(pszString, dwStringSize, CONST_STRING_AND_LEN(W("processorArchitecture"))) == 0)
+ {
+ m_dwProperty = ASM_NAME_ARCHITECTURE;
+ }
+ else
+ {
+ m_bCurrentEntryInvalid = TRUE;
+ }
+ break;
+
+
+ case XML_PCDATA :
+ if(m_bCurrentEntryInvalid)
+ break;
+
+ _ASSERTE(m_pAssemblyName!= NULL); // can only be null if m_bCurrentEntryInvalid
+ switch(m_dwProperty)
+ {
+ case ASM_NAME_NAME:
+ {
+ StackSString s(pszString,dwStringSize);
+ // takes number of bytes, thus *2
+ IfFailRet(m_pAssemblyName->SetProperty(ASM_NAME_NAME, LPCWSTR(s), (dwStringSize+1)*sizeof(WCHAR)));
+ }
+ break;
+ case ASM_NAME_MAJOR_VERSION:
+ {
+ StackSString s(pszString,dwStringSize);
+ WORD wVerMajor = 0;
+ WORD wVerMinor = 0;
+ WORD wVerBld = 0;
+ WORD wVerRev = 0;
+ if (SUCCEEDED(VersionFromString(s, &wVerMajor, &wVerMinor, &wVerBld, &wVerRev)))
+ {
+ IfFailRet(m_pAssemblyName->SetProperty(ASM_NAME_MAJOR_VERSION, &wVerMajor, sizeof(WORD)));
+ IfFailRet(m_pAssemblyName->SetProperty(ASM_NAME_MINOR_VERSION, &wVerMinor, sizeof(WORD)));
+ IfFailRet(m_pAssemblyName->SetProperty(ASM_NAME_BUILD_NUMBER, &wVerBld, sizeof(WORD)));
+ IfFailRet(m_pAssemblyName->SetProperty(ASM_NAME_REVISION_NUMBER, &wVerRev, sizeof(WORD)));
+ }
+ else
+ m_bCurrentEntryInvalid = TRUE;
+
+ }
+ break;
+ case ASM_NAME_ARCHITECTURE:
+ {
+ StackSString s(pszString,dwStringSize);
+ PEKIND PeKind = peNone;
+ if(SUCCEEDED(MapProcessorArchitectureToPEKIND(s, &PeKind)))
+ {
+ IfFailRet(m_pAssemblyName->SetProperty(ASM_NAME_ARCHITECTURE, (LPBYTE) &PeKind, sizeof(PeKind)));
+ }
+ else
+ {
+ m_bCurrentEntryInvalid = TRUE;
+ }
+
+ }
+ break;
+ case ASM_NAME_PUBLIC_KEY_TOKEN:
+ {
+ if(EEXMLStringCompare(pszString, dwStringSize, CONST_STRING_AND_LEN(W("null"))) == 0)
+ {
+ IfFailRet(m_pAssemblyName->SetProperty(ASM_NAME_NULL_PUBLIC_KEY_TOKEN, NULL, 0));
+ }
+ else
+ {
+ if (dwStringSize % 2 != 0)
+ return FUSION_E_INVALID_NAME;
+
+ DWORD cbProp = dwStringSize / 2;
+ NewHolder<BYTE> pbProp = new BYTE[cbProp];
+ CParseUtils::UnicodeHexToBin(pszString, dwStringSize, pbProp); //????
+ IfFailRet(m_pAssemblyName->SetProperty(ASM_NAME_PUBLIC_KEY_TOKEN, pbProp, cbProp));
+ }
+ break;
+ }
+
+ default:
+ _ASSERTE(!"Invalid format");
+ m_bCurrentEntryInvalid = TRUE;
+ break;
+ }
+ break;
+ }
+
+ }
+ }
+ return S_OK;
+}
diff --git a/src/vm/assemblynamesconfigfactory.h b/src/vm/assemblynamesconfigfactory.h
new file mode 100644
index 0000000000..8a38f7ff42
--- /dev/null
+++ b/src/vm/assemblynamesconfigfactory.h
@@ -0,0 +1,73 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// AssemblyNamesConfigFactory.h
+//
+
+//
+//
+// Parses XML files and adding runtime entries to assembly list
+// Abstract, derived classes need to override AddAssemblyName
+
+
+#ifndef ASSEMBLYNAMESCONFIGFACTORY_H
+#define ASSEMBLYNAMESCONFIGFACTORY_H
+
+#include "unknwn.h"
+#include "../xmlparser/_reference.h"
+#include "../xmlparser/_unknown.h"
+
+
+class AssemblyNamesConfigFactory : public _unknown<IXMLNodeFactory, &IID_IXMLNodeFactory>
+{
+
+public:
+ AssemblyNamesConfigFactory ();
+ ~AssemblyNamesConfigFactory ();
+ HRESULT STDMETHODCALLTYPE NotifyEvent(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ XML_NODEFACTORY_EVENT iEvt);
+
+ HRESULT STDMETHODCALLTYPE BeginChildren(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ XML_NODE_INFO* __RPC_FAR pNodeInfo);
+
+ HRESULT STDMETHODCALLTYPE EndChildren(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ BOOL fEmptyNode,
+ /* [in] */ XML_NODE_INFO* __RPC_FAR pNodeInfo);
+
+ HRESULT STDMETHODCALLTYPE Error(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ HRESULT hrErrorCode,
+ /* [in] */ USHORT cNumRecs,
+ /* [in] */ XML_NODE_INFO* __RPC_FAR * __RPC_FAR apNodeInfo)
+ {
+ LIMITED_METHOD_CONTRACT;
+ /*
+ UNUSED(pSource);
+ UNUSED(hrErrorCode);
+ UNUSED(cNumRecs);
+ UNUSED(apNodeInfo);
+ */
+ return hrErrorCode;
+ }
+
+ HRESULT STDMETHODCALLTYPE CreateNode(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ PVOID pNodeParent,
+ /* [in] */ USHORT cNumRecs,
+ /* [in] */ XML_NODE_INFO* __RPC_FAR * __RPC_FAR apNodeInfo);
+
+ virtual void AddAssemblyName(IAssemblyName*) = 0;
+protected:
+ IAssemblyName* m_pAssemblyName;
+ BOOL m_bCurrentEntryInvalid;
+ DWORD m_dwCurrentElementDepth;
+ DWORD m_dwProperty;
+
+};
+
+
+#endif
diff --git a/src/vm/assemblynative.cpp b/src/vm/assemblynative.cpp
new file mode 100644
index 0000000000..3b542cddd8
--- /dev/null
+++ b/src/vm/assemblynative.cpp
@@ -0,0 +1,2616 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: AssemblyNative.cpp
+**
+** Purpose: Implements AssemblyNative (loader domain) architecture
+**
+**
+
+
+**
+===========================================================*/
+
+#include "common.h"
+
+#include <shlwapi.h>
+#include <stdlib.h>
+#ifdef FEATURE_FUSION
+#include "actasm.h"
+#include "appctx.h"
+#include "asm.h"
+#endif
+#include "assemblynative.hpp"
+#include "field.h"
+#include "assemblyname.hpp"
+#include "eeconfig.h"
+#include "security.h"
+#include "strongname.h"
+#include "interoputil.h"
+#include "frames.h"
+#include "typeparse.h"
+#ifdef FEATURE_REMOTING
+#include "appdomainhelper.h"
+#endif
+#include "stackprobe.h"
+#ifdef FEATURE_FUSION
+#include "dbglog.h"
+#include "bindinglog.hpp"
+#include "policy.h"
+#endif
+
+#ifdef FEATURE_CORECLR
+#include "appdomainnative.hpp"
+#include "../binder/inc/clrprivbindercoreclr.h"
+#endif // FEATURE_CORECLR
+
+#ifndef FEATURE_CORECLR
+#include "assemblynativeresource.h"
+#endif // !FEATURE_CORECLR
+
+#if defined(FEATURE_HOSTED_BINDER) && !defined(FEATURE_CORECLR)
+#include "clrprivbinderloadfile.h"
+#endif
+
+
+#ifdef FEATURE_FUSION
+//----------------------------------------------------------------------------------------------------
+// Allows managed code in mscorlib to find out whether an assembly name corresponds to mscorlib,
+// a .NET Framework assembly found in the unification list (see fxretarget.h), or a portable assembly (see portabilityPolicy.cpp)
+// See Fusion::Util::IsAnyFrameworkAssembly for more details.
+// The NGEN task uses this function (via System.Reflection.RuntimeAssembly.IsFrameworkAssembly)
+FCIMPL1(FC_BOOL_RET, AssemblyNative::IsFrameworkAssembly, AssemblyNameBaseObject* refAssemblyNameUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ struct _gc
+ {
+ ASSEMBLYNAMEREF assemblyName;
+ } gc;
+ gc.assemblyName = (ASSEMBLYNAMEREF) refAssemblyNameUNSAFE;
+
+ BOOL bIsFxAssembly = FALSE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+ AssemblySpec spec;
+
+ Thread *pThread = GetThread();
+ CheckPointHolder cph(pThread->m_MarshalAlloc.GetCheckpoint()); //hold checkpoint for autorelease
+
+ spec.InitializeSpec(&(pThread->m_MarshalAlloc),
+ &gc.assemblyName,
+ FALSE, /*fIsStringized*/
+ FALSE /*fForIntrospection*/
+ );
+ ReleaseHolder<IAssemblyName> pIAssemblyName;
+ IfFailThrow(spec.CreateFusionName(&pIAssemblyName,FALSE));
+
+ bIsFxAssembly = (IfFailThrow(Fusion::Util::IsAnyFrameworkAssembly(pIAssemblyName)) == S_OK);
+ HELPER_METHOD_FRAME_END();
+
+ FC_RETURN_BOOL(bIsFxAssembly);
+}
+FCIMPLEND
+#endif // FEATURE_FUSION
+
+#ifdef FEATURE_FUSION
+//----------------------------------------------------------------------------------------------------
+FCIMPL1(FC_BOOL_RET, AssemblyNative::IsNewPortableAssembly, AssemblyNameBaseObject* refAssemblyNameUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ struct _gc
+ {
+ ASSEMBLYNAMEREF assemblyName;
+ } gc;
+ gc.assemblyName = (ASSEMBLYNAMEREF) refAssemblyNameUNSAFE;
+
+ BOOL fIsPortable = FALSE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ AssemblySpec spec;
+
+ Thread *pThread = GetThread();
+ CheckPointHolder cph(pThread->m_MarshalAlloc.GetCheckpoint()); //hold checkpoint for autorelease
+
+ {
+ GCX_COOP();
+ spec.InitializeSpec(&(pThread->m_MarshalAlloc),
+ &gc.assemblyName,
+ FALSE, /*fIsStringized*/
+ FALSE /*fForIntrospection*/);
+ }
+
+ ReleaseHolder<IAssemblyName> pIAssemblyName;
+ IfFailThrow(spec.CreateFusionName(&pIAssemblyName,FALSE));
+
+ fIsPortable = (IfFailThrow(Fusion::Util::IsNewPortableAssembly(pIAssemblyName)) == S_OK);
+ HELPER_METHOD_FRAME_END();
+
+ FC_RETURN_BOOL(fIsPortable);
+}
+FCIMPLEND
+#endif // FEATURE_FUSION
+
+#ifdef FEATURE_HOSTED_BINDER
+FCIMPL9(Object*, AssemblyNative::Load, AssemblyNameBaseObject* assemblyNameUNSAFE,
+ StringObject* codeBaseUNSAFE,
+ Object* securityUNSAFE,
+ AssemblyBaseObject* requestingAssemblyUNSAFE,
+ StackCrawlMark* stackMark,
+ ICLRPrivBinder * pPrivHostBinder,
+ CLR_BOOL fThrowOnFileNotFound,
+ CLR_BOOL fForIntrospection,
+ CLR_BOOL fSuppressSecurityChecks)
+#else // !FEATURE_HOSTED_BINDER
+FCIMPL8(Object*, AssemblyNative::Load, AssemblyNameBaseObject* assemblyNameUNSAFE,
+ StringObject* codeBaseUNSAFE,
+ Object* securityUNSAFE,
+ AssemblyBaseObject* requestingAssemblyUNSAFE,
+ StackCrawlMark* stackMark,
+ CLR_BOOL fThrowOnFileNotFound,
+ CLR_BOOL fForIntrospection,
+ CLR_BOOL fSuppressSecurityChecks)
+#endif // FEATURE_HOSTED_BINDER
+{
+ FCALL_CONTRACT;
+
+ struct _gc
+ {
+ ASSEMBLYNAMEREF assemblyName;
+ STRINGREF codeBase;
+ ASSEMBLYREF requestingAssembly;
+ OBJECTREF security;
+ ASSEMBLYREF rv;
+ } gc;
+
+ gc.assemblyName = (ASSEMBLYNAMEREF) assemblyNameUNSAFE;
+ gc.codeBase = (STRINGREF) codeBaseUNSAFE;
+ gc.requestingAssembly = (ASSEMBLYREF) requestingAssemblyUNSAFE;
+ gc.security = (OBJECTREF) securityUNSAFE;
+ gc.rv = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ if (gc.assemblyName == NULL)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_AssemblyName"));
+
+ if (fForIntrospection)
+ {
+ if (!GetThread()->GetDomain()->IsVerificationDomain())
+ GetThread()->GetDomain()->SetIllegalVerificationDomain();
+ }
+
+ Thread * pThread = GetThread();
+ CheckPointHolder cph(pThread->m_MarshalAlloc.GetCheckpoint()); //hold checkpoint for autorelease
+
+ DomainAssembly * pParentAssembly = NULL;
+
+ if(gc.assemblyName->GetSimpleName() == NULL)
+ {
+ if (gc.codeBase == NULL)
+ COMPlusThrow(kArgumentException, W("Format_StringZeroLength"));
+ if ((!fForIntrospection) && CorHost2::IsLoadFromBlocked())
+ COMPlusThrow(kFileLoadException, FUSION_E_LOADFROM_BLOCKED);
+ }
+ else if (!fForIntrospection)
+ {
+#ifdef FEATURE_HOSTED_BINDER
+ // name specified, if immersive ignore the codebase
+ if (GetThread()->GetDomain()->HasLoadContextHostBinder())
+ gc.codeBase = NULL;
+#endif //FEATURE_HOSTED_BINDER
+
+ // Compute parent assembly
+ Assembly * pRefAssembly;
+ if (gc.requestingAssembly == NULL)
+ {
+ pRefAssembly = SystemDomain::GetCallersAssembly(stackMark);
+
+ // Cross-appdomain callers aren't allowed as the parent
+ if (pRefAssembly &&
+ (pRefAssembly->GetDomain() != pThread->GetDomain()))
+ {
+ pRefAssembly = NULL;
+ }
+ }
+ else
+ pRefAssembly = gc.requestingAssembly->GetAssembly();
+
+ // Shared or collectible assemblies should not be used for the parent in the
+ // late-bound case.
+ if (pRefAssembly && (!pRefAssembly->IsDomainNeutral()) && (!pRefAssembly->IsCollectible()))
+ {
+ pParentAssembly= pRefAssembly->GetDomainAssembly();
+ }
+
+ }
+
+ // Initialize spec
+ AssemblySpec spec;
+ spec.InitializeSpec(&(pThread->m_MarshalAlloc),
+ &gc.assemblyName,
+ FALSE,
+ fForIntrospection);
+
+ if (!spec.HasUniqueIdentity())
+ { // Insuficient assembly name for binding (e.g. ContentType=WindowsRuntime cannot bind by assembly name)
+ EEFileLoadException::Throw(&spec, COR_E_NOTSUPPORTED);
+ }
+
+#ifdef FEATURE_HOSTED_BINDER
+ if (pPrivHostBinder != NULL)
+ {
+ pParentAssembly = NULL;
+ spec.SetHostBinder(pPrivHostBinder);
+ }
+#endif // FEATURE_HOSTED_BINDER
+
+ if (gc.codeBase != NULL)
+ spec.SetCodeBase(&(pThread->m_MarshalAlloc), &gc.codeBase);
+
+ if (pParentAssembly != NULL)
+ spec.SetParentAssembly(pParentAssembly);
+
+ AssemblyLoadSecurity loadSecurity;
+ loadSecurity.m_pAdditionalEvidence = &gc.security;
+ loadSecurity.m_fCheckLoadFromRemoteSource = !!(gc.codeBase != NULL);
+ loadSecurity.m_fSuppressSecurityChecks = !!fSuppressSecurityChecks;
+
+ // If we're in an APPX domain, then all loads from the application will find themselves within the APPX package
+ // graph or from a trusted location. However, assemblies within the package may have been marked by Windows as
+ // not being from the MyComputer zone, which can trip the LoadFromRemoteSources check. Since we do not need to
+ // defend against accidental loads from HTTP for APPX applications, we simply suppress the remote load check.
+ if (AppX::IsAppXProcess())
+ {
+ loadSecurity.m_fCheckLoadFromRemoteSource = false;
+ }
+
+ Assembly *pAssembly;
+
+ {
+ GCX_PREEMP();
+ pAssembly = spec.LoadAssembly(FILE_LOADED, &loadSecurity, fThrowOnFileNotFound, FALSE /*fRaisePrebindEvents*/, stackMark);
+ }
+
+ if (pAssembly != NULL)
+ gc.rv = (ASSEMBLYREF) pAssembly->GetExposedObject();
+
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(gc.rv);
+}
+FCIMPLEND
+
+Assembly* AssemblyNative::LoadFromBuffer(BOOL fForIntrospection, const BYTE* pAssemblyData, UINT64 uAssemblyLength, const BYTE* pPDBData, UINT64 uPDBLength, StackCrawlMark* stackMark, Object * securityUNSAFE, SecurityContextSource securityContextSource)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Assembly *pAssembly;
+
+ struct _gc {
+ OBJECTREF orefSecurity;
+ OBJECTREF granted;
+ OBJECTREF denied;
+ } gc;
+
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.orefSecurity = (OBJECTREF) securityUNSAFE;
+
+ if((!fForIntrospection) && CorHost2::IsLoadFromBlocked())
+ COMPlusThrow(kFileLoadException, FUSION_E_LOADFROM_BLOCKED);
+
+ if (pAssemblyData == NULL)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_Array"));
+
+#ifndef FEATURE_CORECLR
+ // Event Tracing for Windows is used to log data for performance and functional testing purposes.
+ // The events in this function are used to help measure the performance of assembly loading as a whole when loading from a buffer.
+ FireEtwLoaderPhaseStart(GetAppDomain() ? GetAppDomain()->GetId().m_dwId : ETWAppDomainIdNotAvailable, ETWLoadContextNotAvailable, ETWFieldUnused, ETWLoaderDynamicLoad, NULL, NULL, GetClrInstanceId());
+#endif // FEATURE_CORECLR
+
+ if (fForIntrospection) {
+ if (!GetThread()->GetDomain()->IsVerificationDomain())
+ GetThread()->GetDomain()->SetIllegalVerificationDomain();
+ }
+
+ // Get caller's assembly so we can extract their codebase and propagate it
+ // into the new assembly (which obviously doesn't have one of its own).
+
+ AppDomain *pCallersDomain = NULL;
+ MethodDesc* pCallerMD = SystemDomain::GetCallersMethod (stackMark, &pCallersDomain);
+ Assembly *pCallersAssembly = (pCallerMD ? pCallerMD->GetAssembly() : NULL);
+ BOOL fPropagateIdentity = ((!fForIntrospection) && (gc.orefSecurity == NULL));
+
+ // Callers assembly can be null if caller is interop
+ // @todo: we really don't want to call this assembly "mscorlib" to anyone who asks
+ // for its code base. But the required effect here is that it recieves full trust
+ // as far as its codebase goes so this should be OK. We really need to allow a
+ // "no code base" condition to avoid confusion
+ if (pCallersAssembly == NULL) {
+ pCallersAssembly = SystemDomain::System()->SystemAssembly();
+ } else {
+#ifdef FEATURE_CAS_POLICY
+ // If no evidence was provided to the Assembly.Load(byte[]) call,
+ // we want to inherit the evidence from the security context source
+ if (fPropagateIdentity) {
+ ISecurityDescriptor *pSecDesc = NULL;
+ if (securityContextSource == kCurrentAppDomain) {
+ pSecDesc = pCallersDomain->GetSecurityDescriptor();
+ }
+ else {
+ _ASSERTE(securityContextSource == kCurrentAssembly);
+ pSecDesc = pCallersAssembly->GetSecurityDescriptor(pCallersDomain);
+ }
+
+ ENTER_DOMAIN_PTR(pSecDesc->GetDomain(),ADV_RUNNINGIN)
+ {
+ gc.orefSecurity = pSecDesc->GetEvidence();
+ }
+ END_DOMAIN_TRANSITION;
+
+ // Caller may be in another appdomain context, in which case we'll
+ // need to marshal/unmarshal the evidence across.
+#ifdef FEATURE_REMOTING // should not happenwithout remoting
+ if (pCallersDomain != GetAppDomain())
+ gc.orefSecurity = AppDomainHelper::CrossContextCopyFrom(pCallersDomain->GetId(), &gc.orefSecurity);
+#else
+ _ASSERTE(pCallersDomain == GetAppDomain());
+#endif
+ }
+#endif // FEATURE_CAS_POLICY
+ }
+
+ if ((COUNT_T)uAssemblyLength !=uAssemblyLength) // overflow
+ ThrowOutOfMemory();
+
+ PEAssemblyHolder pFile;
+
+ {
+ GCX_PREEMP();
+
+ CLRPrivBinderLoadFile* pBinderToUse = NULL;
+
+#if defined(FEATURE_HOSTED_BINDER) && !defined(FEATURE_CORECLR)
+ if (GetAppDomain()->HasLoadContextHostBinder())
+ {
+ pBinderToUse = CLRPrivBinderLoadFile::GetOrCreateBinder();
+ }
+#endif // FEATURE_HOSTED_BINDER && !FEATURE_CORECLR
+
+ pFile = PEAssembly::OpenMemory(pCallersAssembly->GetManifestFile(),
+ pAssemblyData, (COUNT_T)uAssemblyLength,
+ fForIntrospection,
+ pBinderToUse);
+ }
+
+ fPropagateIdentity = (fPropagateIdentity && pCallersDomain && pCallersAssembly);
+
+ AssemblyLoadSecurity loadSecurity;
+ loadSecurity.m_pEvidence = &gc.orefSecurity;
+ if (fPropagateIdentity)
+ {
+ DWORD dwSpecialFlags = 0;
+
+#ifdef FEATURE_CAS_POLICY
+ if (securityContextSource == kCurrentAssembly)
+ {
+ IAssemblySecurityDescriptor *pCallersSecDesc = pCallersAssembly->GetSecurityDescriptor(pCallersDomain);
+ gc.granted = pCallersSecDesc->GetGrantedPermissionSet( &(gc.denied));
+ dwSpecialFlags = pCallersSecDesc->GetSpecialFlags();
+
+ // If we're going to inherit the grant set of an anonymously hosted dynamic method, it will be
+ // full trust/transparent. In that case, we should demand full trust.
+ if(pCallersAssembly != NULL && pCallersDomain != NULL && pCallersAssembly->GetDomainAssembly(pCallersDomain) == pCallersDomain->GetAnonymouslyHostedDynamicMethodsAssembly())
+ {
+ loadSecurity.m_fPropagatingAnonymouslyHostedDynamicMethodGrant = true;
+ }
+ }
+ else
+#endif // FEATURE_CAS_POLICY
+ {
+ IApplicationSecurityDescriptor *pDomainSecDesc = pCallersDomain->GetSecurityDescriptor();
+
+#ifdef FEATURE_CAS_POLICY
+ // We only want to propigate the identity of homogenous domains, since heterogenous domains tend
+ // to be fully trusted even if they are housing partially trusted code - which could lead to an
+ // elevation of privilege if we allow the grant set to be pushed to assemblies partially trusted
+ // code is loading.
+ if (!pDomainSecDesc->IsHomogeneous())
+ {
+ COMPlusThrow(kNotSupportedException, W("NotSupported_SecurityContextSourceAppDomainInHeterogenous"));
+ }
+#endif // FEATURE_CAS_POLICY
+
+
+ gc.granted = pDomainSecDesc->GetGrantedPermissionSet();
+ dwSpecialFlags = pDomainSecDesc->GetSpecialFlags();
+ }
+
+#ifdef FEATURE_REMOTING
+ // Caller may be in another appdomain context, in which case we'll need to marshal/unmarshal the grant
+ // and deny sets across.
+ if (pCallersDomain != GetAppDomain())
+ {
+ gc.granted = AppDomainHelper::CrossContextCopyFrom(pCallersDomain->GetId(), &(gc.granted));
+ if (gc.denied != NULL)
+ {
+ gc.denied = AppDomainHelper::CrossContextCopyFrom(pCallersDomain->GetId(), &(gc.denied));
+ }
+ }
+#endif // FEATURE_REMOTING
+
+ // Instead of resolving policy, the loader should use an inherited grant set
+ loadSecurity.m_pGrantSet = &gc.granted;
+ loadSecurity.m_pRefusedSet = &gc.denied;
+ loadSecurity.m_dwSpecialFlags = dwSpecialFlags;
+
+ // if the caller is from another appdomain we wil not be able to get the ssembly's security descriptor
+ // but that is ok, since getting a pointer to our AppDomain required full trust
+ if (!pCallersDomain->GetSecurityDescriptor()->IsFullyTrusted() ||
+ ( pCallersAssembly->FindDomainAssembly(::GetAppDomain()) != NULL && !pCallersAssembly->GetSecurityDescriptor()->IsFullyTrusted()) )
+ pFile->VerifyStrongName();
+ }
+ pAssembly = GetPostPolicyAssembly(pFile, fForIntrospection, &loadSecurity, TRUE);
+
+ // perform necessary Transparency checks for this Load(byte[]) call (based on the calling method).
+ if (pCallerMD)
+ {
+ Security::PerformTransparencyChecksForLoadByteArray(pCallerMD, pAssembly->GetSecurityDescriptor());
+ }
+
+ // In order to assign the PDB image (if present),
+ // the resulting assembly's image needs to be exactly the one
+ // we created above. We need pointer comparison instead of pe image equivalence
+ // to avoid mixed binaries/PDB pairs of other images.
+ // This applies to both Desktop CLR and CoreCLR, with or without fusion.
+ BOOL fIsSameAssembly = (pAssembly->GetManifestFile()->GetILimage() == pFile->GetILimage());
+
+#ifdef FEATURE_REFLECTION_ONLY_LOAD
+ if (fForIntrospection)
+ {
+ IAssemblyName * pIAssemblyName = pAssembly->GetFusionAssemblyName();
+
+ AppDomain::AssemblyIterator i = GetAppDomain()->IterateAssembliesEx(
+ (AssemblyIterationFlags)(kIncludeLoaded | kIncludeIntrospection));
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+
+ while (i.Next(pDomainAssembly.This()))
+ {
+ Assembly * pCachedAssembly = pDomainAssembly->GetAssembly();
+ IAssemblyName * pCachedAssemblyName = pCachedAssembly->GetFusionAssemblyName();
+ if ((pAssembly != pCachedAssembly) && (S_OK == pCachedAssemblyName->IsEqual(pIAssemblyName, ASM_CMPF_IL_ALL)))
+ {
+ COMPlusThrow(kFileLoadException, IDS_EE_REFLECTIONONLY_LOADFROM, W("")); //@todo: need to fill in assemblyname
+ }
+ }
+ }
+#endif // FEATURE_REFLECTION_ONLY_LOAD
+
+#ifndef FEATURE_CORECLR
+ FireEtwLoaderPhaseEnd(GetAppDomain() ? GetAppDomain()->GetId().m_dwId : ETWAppDomainIdNotAvailable, ETWLoadContextNotAvailable, ETWFieldUnused, ETWLoaderDynamicLoad, NULL, NULL, GetClrInstanceId());
+#endif // FEATURE_CORECLR
+ LOG((LF_CLASSLOADER,
+ LL_INFO100,
+ "\tLoaded in-memory module\n"));
+
+ // Setting the PDB info is only applicable for our original assembly.
+ // This applies to both Desktop CLR and CoreCLR, with or without fusion.
+ if (fIsSameAssembly)
+ {
+#ifdef DEBUGGING_SUPPORTED
+ // If we were given symbols, save a copy of them.
+ // the debugger, load them now).
+ if (pPDBData != NULL)
+ {
+ GCX_PREEMP();
+ if ((DWORD)uPDBLength != uPDBLength) // overflow
+ ThrowOutOfMemory();
+ pAssembly->GetManifestModule()->SetSymbolBytes(pPDBData, (DWORD)uPDBLength);
+ }
+#endif // DEBUGGING_SUPPORTED
+ }
+
+ GCPROTECT_END();
+
+ return pAssembly;
+}
+
+#ifdef FEATURE_CORECLR
+// static
+void QCALLTYPE AssemblyNative::LoadFromUnmanagedArray(CLR_BOOL fForIntrospection, BYTE* pAssemblyData, UINT64 uAssemblyLength, BYTE* pPDBData, UINT64 uPDBLength, QCall::StackCrawlMarkHandle stackMark, QCall::ObjectHandleOnStack retAssembly)
+{
+ QCALL_CONTRACT;
+
+ DomainAssembly * pDomainAssembly = NULL;
+
+ BEGIN_QCALL;
+ Assembly* pAssembly = NULL;
+ GCX_COOP();
+ pAssembly=LoadFromBuffer(fForIntrospection, pAssemblyData, uAssemblyLength, pPDBData, uPDBLength, stackMark, NULL, kCurrentAppDomain);
+ pDomainAssembly = pAssembly->GetDomainAssembly();
+ retAssembly.Set(pDomainAssembly->GetExposedAssemblyObject());
+ END_QCALL;
+}
+#endif // FEATURE_CORECLR
+
+
+
+
+
+FCIMPL6(Object*, AssemblyNative::LoadImage, U1Array* PEByteArrayUNSAFE,
+ U1Array* SymByteArrayUNSAFE, Object* securityUNSAFE,
+ StackCrawlMark* stackMark, CLR_BOOL fForIntrospection, SecurityContextSource securityContextSource)
+{
+ FCALL_CONTRACT;
+
+ struct _gc
+ {
+ U1ARRAYREF PEByteArray;
+ U1ARRAYREF SymByteArray;
+ OBJECTREF security;
+ OBJECTREF Throwable;
+ OBJECTREF refRetVal;
+ } gc;
+
+ gc.PEByteArray = (U1ARRAYREF) PEByteArrayUNSAFE;
+ gc.SymByteArray = (U1ARRAYREF) SymByteArrayUNSAFE;
+ gc.security = (OBJECTREF) securityUNSAFE;
+ gc.Throwable = NULL;
+ gc.refRetVal = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+
+ if (gc.PEByteArray == NULL)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_Array"));
+
+ NewArrayHolder<BYTE> pbSyms;
+ DWORD cbSyms = 0;
+
+#ifdef DEBUGGING_SUPPORTED
+ // If we were given symbols, save a copy of them.
+ // the debugger, load them now).
+ if (gc.SymByteArray != NULL)
+ {
+ Security::CopyByteArrayToEncoding(&gc.SymByteArray,
+ &pbSyms, &cbSyms);
+
+ }
+#endif // DEBUGGING_SUPPORTED
+
+ Assembly* pAssembly = NULL;
+ // Pin byte array for loading
+ {
+ Wrapper<OBJECTHANDLE, DoNothing, DestroyPinningHandle> handle(
+ GetAppDomain()->CreatePinningHandle(gc.PEByteArray));
+
+ const BYTE *pbImage = gc.PEByteArray->GetDirectConstPointerToNonObjectElements();
+ DWORD cbImage = gc.PEByteArray->GetNumComponents();
+ pAssembly = LoadFromBuffer(fForIntrospection, pbImage, cbImage, pbSyms, cbSyms, stackMark, OBJECTREFToObject(gc.security), securityContextSource);
+ }
+
+#ifdef FEATURE_FUSION
+ if (!fForIntrospection && IsLoggingNeeded())
+ {
+ BinderLogging::BindingLog::LogLoadByteArray(GetAppDomain()->GetFusionContext(), pAssembly);
+ }
+#endif
+
+ if (pAssembly != NULL)
+ gc.refRetVal = pAssembly->GetExposedObject();
+
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(gc.refRetVal);
+}
+FCIMPLEND
+
+FCIMPL2(Object*, AssemblyNative::LoadFile, StringObject* pathUNSAFE, Object* securityUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ struct _gc {
+ OBJECTREF refRetVal;
+ OBJECTREF refSecurity;
+ STRINGREF strPath;
+ } gc;
+
+ gc.refRetVal = NULL;
+ gc.refSecurity = ObjectToOBJECTREF(securityUNSAFE);
+ gc.strPath = ObjectToSTRINGREF(pathUNSAFE);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+#ifdef FEATURE_CORECLR
+ if(!GetAppDomain()->IsLoadFileAllowed())
+ COMPlusThrow(kNotSupportedException);
+#endif
+
+ if(CorHost2::IsLoadFromBlocked())
+ COMPlusThrow(kFileLoadException, FUSION_E_LOADFROM_BLOCKED);
+
+ if (pathUNSAFE == NULL)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_Path"));
+
+ StackSString path;
+ gc.strPath->GetSString(path);
+
+#ifdef FEATURE_FUSION // use BindResult for abstraction
+ // Event Tracing for Windows is used to log data for performance and functional testing purposes.
+ // The events in this function are used to help measure the performance of assembly loading as a whole when loading directly from a file,
+ // of binding to an assembly, as well as of lookup scenarios such as from a host store.
+ FireEtwLoaderPhaseStart(ETWAppDomainIdNotAvailable, ETWLoadContextNotAvailable, ETWFieldUnused, ETWLoaderDynamicLoad, path, NULL, GetClrInstanceId());
+ SafeComHolder<IAssembly> pFusionAssembly;
+ SafeComHolder<IBindResult> pNativeFusionAssembly;
+ SafeComHolder<IFusionBindLog> pFusionLog;
+
+ PEAssemblyHolder pFile;
+ FireEtwBindingPhaseStart(GetAppDomain() ? GetAppDomain()->GetId().m_dwId : ETWAppDomainIdNotAvailable, ETWLoadContextNotAvailable, ETWFieldUnused, ETWLoaderLoadTypeNotAvailable, path, NULL, GetClrInstanceId());
+
+ if(GetAppDomain()->HasLoadContextHostBinder())
+ {
+ GCX_PREEMP();
+ CLRPrivBinderLoadFile* pLFBinder = CLRPrivBinderLoadFile::GetOrCreateBinder();
+ ReleaseHolder<PEImage> pImage(PEImage::OpenImage(path));
+ ReleaseHolder<ICLRPrivAssembly> pAsm;
+ ReleaseHolder<IAssemblyName> pAssemblyName;
+ IfFailThrow(pLFBinder->BindAssemblyExplicit(pImage, &pAssemblyName, &pAsm));
+ IfFailThrow(GetAppDomain()->BindHostedPrivAssembly(nullptr, pAsm, pAssemblyName, &pFile));
+ _ASSERTE(pFile);
+ }
+ else
+ {
+ GCX_PREEMP();
+ IfFailThrow(ExplicitBind(path, GetAppDomain()->GetFusionContext(),
+ EXPLICITBIND_FLAGS_NON_BINDABLE,
+ NULL, &pFusionAssembly, &pNativeFusionAssembly, &pFusionLog));
+ pFile.Assign(PEAssembly::Open(pFusionAssembly, pNativeFusionAssembly, pFusionLog, FALSE, FALSE));
+ }
+
+ FireEtwBindingLookupAndProbingPhaseEnd(GetAppDomain() ? GetAppDomain()->GetId().m_dwId : ETWAppDomainIdNotAvailable, ETWLoadContextNotAvailable, ETWFieldUnused, ETWLoaderLoadTypeNotAvailable, path, NULL, GetClrInstanceId());
+
+ FireEtwBindingPhaseEnd(GetAppDomain() ? GetAppDomain()->GetId().m_dwId : ETWAppDomainIdNotAvailable, ETWLoadContextNotAvailable, ETWFieldUnused, ETWLoaderLoadTypeNotAvailable, path, NULL, GetClrInstanceId());
+
+ AssemblyLoadSecurity loadSecurity;
+ loadSecurity.m_pAdditionalEvidence = &gc.refSecurity;
+ loadSecurity.m_fCheckLoadFromRemoteSource = true;
+
+ // If we're in an APPX domain, then all loads from the application will find themselves within the APPX package
+ // graph or from a trusted location. However, assemblies within the package may have been marked by Windows as
+ // not being from the MyComputer zone, which can trip the LoadFromRemoteSources check. Since we do not need to
+ // defend against accidental loads from HTTP for APPX applications, we simply suppress the remote load check.
+ if (AppX::IsAppXProcess())
+ {
+ loadSecurity.m_fCheckLoadFromRemoteSource = false;
+ }
+
+ Assembly *pAssembly = GetPostPolicyAssembly(pFile, FALSE, &loadSecurity);
+
+ FireEtwLoaderPhaseEnd(ETWAppDomainIdNotAvailable, ETWLoadContextNotAvailable, ETWFieldUnused, ETWLoaderDynamicLoad, path, NULL, GetClrInstanceId());
+
+ if (IsLoggingNeeded())
+ {
+ BinderLogging::BindingLog::LogLoadFile(GetAppDomain()->GetFusionContext(), path, pAssembly);
+ }
+
+#else // FEATURE_FUSION
+ Assembly *pAssembly = AssemblySpec::LoadAssembly(path);
+#endif // FEATURE_FUSION
+
+ LOG((LF_CLASSLOADER,
+ LL_INFO100,
+ "\tLoaded assembly from a file\n"));
+
+
+ if (pAssembly != NULL)
+ gc.refRetVal = (ASSEMBLYREF) pAssembly->GetExposedObject();
+
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(gc.refRetVal);
+}
+FCIMPLEND
+
+#if defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+
+/* static */
+Assembly* AssemblyNative::LoadFromPEImage(CLRPrivBinderAssemblyLoadContext* pBinderContext, PEImage *pILImage, PEImage *pNIImage)
+{
+ CONTRACT(Assembly*)
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pBinderContext));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ Assembly *pLoadedAssembly = NULL;
+
+ ReleaseHolder<ICLRPrivAssembly> pAssembly;
+
+ // Get the correct PEImage to work with.
+ BOOL fIsNativeImage = TRUE;
+ PEImage *pImage = pNIImage;
+ if (pNIImage == NULL)
+ {
+ // Since we do not have a NI image, we are working with IL assembly
+ pImage = pILImage;
+ fIsNativeImage = FALSE;
+ }
+ _ASSERTE(pImage != NULL);
+
+ BOOL fHadLoadFailure = FALSE;
+
+ // Force the image to be loaded and mapped so that subsequent loads do not
+ // map a duplicate copy.
+ if (pImage->IsFile())
+ {
+ pImage->Load();
+ }
+ else
+ {
+ pImage->LoadNoFile();
+ }
+
+ DWORD dwMessageID = IDS_EE_FILELOAD_ERROR_GENERIC;
+
+ // Set the caller's assembly to be mscorlib
+ DomainAssembly *pCallersAssembly = SystemDomain::System()->SystemAssembly()->GetDomainAssembly();
+ PEAssembly *pParentAssembly = pCallersAssembly->GetFile();
+
+ // Initialize the AssemblySpec
+ AssemblySpec spec;
+ spec.InitializeSpec(TokenFromRid(1, mdtAssembly), pImage->GetMDImport(), pCallersAssembly);
+ spec.SetBindingContext(pBinderContext);
+
+ HRESULT hr = pBinderContext->BindUsingPEImage(pImage, fIsNativeImage, &pAssembly);
+ if (hr != S_OK)
+ {
+ // Give a more specific message for the case when we found the assembly with the same name already loaded.
+ if (hr == COR_E_FILELOAD)
+ {
+ dwMessageID = IDS_HOST_ASSEMBLY_RESOLVER_ASSEMBLY_ALREADY_LOADED_IN_CONTEXT;
+ }
+
+ StackSString name;
+ spec.GetFileOrDisplayName(0, name);
+ COMPlusThrowHR(COR_E_FILELOAD, dwMessageID, name);
+ }
+
+ PEAssemblyHolder pPEAssembly(PEAssembly::Open(pParentAssembly, pILImage, pNIImage, pAssembly, FALSE));
+
+ GCX_COOP();
+
+ PTR_AppDomain pCurDomain = GetAppDomain();
+ IApplicationSecurityDescriptor *pDomainSecDesc = pCurDomain->GetSecurityDescriptor();
+
+ OBJECTREF refGrantedPermissionSet = NULL;
+ AssemblyLoadSecurity loadSecurity;
+ DomainAssembly *pDomainAssembly = NULL;
+
+ // Setup the AssemblyLoadSecurity to perform the assembly load
+ GCPROTECT_BEGIN(refGrantedPermissionSet);
+
+ loadSecurity.m_dwSpecialFlags = pDomainSecDesc->GetSpecialFlags();
+ refGrantedPermissionSet = pDomainSecDesc->GetGrantedPermissionSet();
+ loadSecurity.m_pGrantSet = &refGrantedPermissionSet;
+
+ pDomainAssembly = pCurDomain->LoadDomainAssembly(&spec, pPEAssembly, FILE_LOADED, &loadSecurity);
+ pLoadedAssembly = pDomainAssembly->GetAssembly();
+
+ GCPROTECT_END();
+
+ RETURN pLoadedAssembly;
+}
+
+
+/* static */
+void QCALLTYPE AssemblyNative::LoadFromPath(INT_PTR ptrNativeAssemblyLoadContext, LPCWSTR pwzILPath, LPCWSTR pwzNIPath, QCall::ObjectHandleOnStack retLoadedAssembly)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ PTR_AppDomain pCurDomain = GetAppDomain();
+
+ // Get the binder context in which the assembly will be loaded.
+ CLRPrivBinderAssemblyLoadContext *pBinderContext = reinterpret_cast<CLRPrivBinderAssemblyLoadContext*>(ptrNativeAssemblyLoadContext);
+ _ASSERTE(pBinderContext != NULL);
+
+ // Form the PEImage for the ILAssembly. Incase of an exception, the holders will ensure
+ // the release of the image.
+ PEImageHolder pILImage, pNIImage;
+
+ if (pwzILPath != NULL)
+ {
+ pILImage = PEImage::OpenImage(pwzILPath);
+
+ // Need to verify that this is a valid CLR assembly.
+ if (!pILImage->CheckILFormat())
+ ThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_IL);
+ }
+
+ // Form the PEImage for the NI assembly, if specified
+ if (pwzNIPath != NULL)
+ {
+ pNIImage = PEImage::OpenImage(pwzNIPath, MDInternalImport_TrustedNativeImage);
+
+ if (pNIImage->HasReadyToRunHeader())
+ {
+ // ReadyToRun images are treated as IL images by the rest of the system
+ if (!pNIImage->CheckILFormat())
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+
+ pILImage = pNIImage.Extract();
+ pNIImage = NULL;
+ }
+ else
+ {
+ if (!pNIImage->CheckNativeFormat())
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ }
+
+ Assembly *pLoadedAssembly = AssemblyNative::LoadFromPEImage(pBinderContext, pILImage, pNIImage);
+
+ {
+ GCX_COOP();
+ retLoadedAssembly.Set(pLoadedAssembly->GetExposedObject());
+ }
+
+ LOG((LF_CLASSLOADER,
+ LL_INFO100,
+ "\tLoaded assembly from a file\n"));
+
+ END_QCALL;
+}
+
+/*static */
+void QCALLTYPE AssemblyNative::LoadFromStream(INT_PTR ptrNativeAssemblyLoadContext, INT_PTR ptrAssemblyArray,
+ INT32 cbAssemblyArrayLength, INT_PTR ptrSymbolArray, INT32 cbSymbolArrayLength,
+ QCall::ObjectHandleOnStack retLoadedAssembly)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ // Ensure that the invariants are in place
+ _ASSERTE(ptrNativeAssemblyLoadContext != NULL);
+ _ASSERTE((ptrAssemblyArray != NULL) && (cbAssemblyArrayLength > 0));
+ _ASSERTE((ptrSymbolArray == NULL) || (cbSymbolArrayLength > 0));
+
+ // We must have a flat image stashed away since we need a private
+ // copy of the data which we can verify before doing the mapping.
+ PVOID pAssemblyArray = reinterpret_cast<PVOID>(ptrAssemblyArray);
+
+ PEImageHolder pILImage(PEImage::LoadFlat(pAssemblyArray, (COUNT_T)cbAssemblyArrayLength));
+
+ // Need to verify that this is a valid CLR assembly.
+ if (!pILImage->CheckILFormat())
+ ThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_IL);
+
+ // Get the binder context in which the assembly will be loaded
+ CLRPrivBinderAssemblyLoadContext *pBinderContext = reinterpret_cast<CLRPrivBinderAssemblyLoadContext*>(ptrNativeAssemblyLoadContext);
+
+ // Pass the stream based assembly as IL and NI in an attempt to bind and load it
+ Assembly* pLoadedAssembly = AssemblyNative::LoadFromPEImage(pBinderContext, pILImage, NULL);
+ {
+ GCX_COOP();
+ retLoadedAssembly.Set(pLoadedAssembly->GetExposedObject());
+ }
+
+ LOG((LF_CLASSLOADER,
+ LL_INFO100,
+ "\tLoaded assembly from a file\n"));
+
+ // In order to assign the PDB image (if present),
+ // the resulting assembly's image needs to be exactly the one
+ // we created above. We need pointer comparison instead of pe image equivalence
+ // to avoid mixed binaries/PDB pairs of other images.
+ // This applies to both Desktop CLR and CoreCLR, with or without fusion.
+ BOOL fIsSameAssembly = (pLoadedAssembly->GetManifestFile()->GetILimage() == pILImage);
+
+ // Setting the PDB info is only applicable for our original assembly.
+ // This applies to both Desktop CLR and CoreCLR, with or without fusion.
+ if (fIsSameAssembly)
+ {
+#ifdef DEBUGGING_SUPPORTED
+ // If we were given symbols, save a copy of them.
+ if (ptrSymbolArray != NULL)
+ {
+ PBYTE pSymbolArray = reinterpret_cast<PBYTE>(ptrSymbolArray);
+ pLoadedAssembly->GetManifestModule()->SetSymbolBytes(pSymbolArray, (DWORD)cbSymbolArrayLength);
+ }
+#endif // DEBUGGING_SUPPORTED
+ }
+
+ END_QCALL;
+}
+
+#endif // defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+
+/* static */
+Assembly* AssemblyNative::GetPostPolicyAssembly(PEAssembly *pFile,
+ BOOL fForIntrospection,
+ AssemblyLoadSecurity *pLoadSecurity,
+ BOOL fIsLoadByteArray /* = FALSE */)
+{
+ CONTRACT(Assembly*)
+ {
+ MODE_ANY;
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pFile));
+ PRECONDITION(CheckPointer(pLoadSecurity));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ GCX_PREEMP();
+
+#ifdef FEATURE_FUSION
+ if (!fForIntrospection && !GetAppDomain()->HasLoadContextHostBinder()) {
+ DWORD dwSize = 0;
+ // if strongly named and not an exempt
+ BOOL bOptionallyRetargetable;
+
+ IfFailThrow(IsOptionallyRetargetableAssembly(pFile->GetFusionAssemblyName(), &bOptionallyRetargetable));
+ if ( !bOptionallyRetargetable && pFile->GetFusionAssemblyName()->GetProperty(ASM_NAME_PUBLIC_KEY_TOKEN, NULL, &dwSize) == HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER)) {
+
+ SafeComHolder<IAssemblyName> pPostPolicyName(NULL);
+ HRESULT hr = PreBindAssembly(GetAppDomain()->GetFusionContext(),
+ pFile->GetFusionAssemblyName(),
+ NULL, // pAsmParent
+ &pPostPolicyName,
+ NULL); // pvReserved
+ if (FAILED(hr)) {
+ if (hr == FUSION_E_REF_DEF_MISMATCH) {
+ // Policy redirects to another version
+ AssemblySpec spec;
+ spec.InitializeSpec(pPostPolicyName, FALSE);
+ RETURN spec.LoadAssembly(FILE_LOADED, pLoadSecurity);
+ }
+ else
+ ThrowHR(hr);
+ }
+ else {
+ ReleaseHolder<IAssembly> pAsm;
+
+ SafeComHolder<IAssemblyCache> pIAsmCache (NULL);
+ IfFailThrow(CreateAssemblyCache(&pIAsmCache, 0));
+
+ DWORD dwFlags = ASM_DISPLAYF_FULL;
+
+ if (pFile->IsMarkedAsNoPlatform()) { // No Platform implies that the assembly is not tied to a specific machine architecture, which means we need to do full GAC probing.
+ hr = CreateAssemblyFromCacheLookup(GetAppDomain()->GetFusionContext(), pFile->GetFusionAssemblyName(), TRUE, &pAsm, NULL);
+ }
+ else {
+ SString sourceDisplay;
+ FusionBind::GetAssemblyNameDisplayName(pFile->GetFusionAssemblyName(), sourceDisplay, dwFlags);
+ hr = pIAsmCache->QueryAssemblyInfo(0, sourceDisplay, NULL);
+ }
+
+ if (SUCCEEDED(hr)) {
+ // It's in the GAC
+ AssemblySpec spec;
+ spec.InitializeSpec(pFile->GetFusionAssemblyName(), FALSE);
+ RETURN spec.LoadAssembly(FILE_LOADED, pLoadSecurity);
+ }
+ else if (hr != HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND))
+ ThrowHR(hr);
+ }
+ }
+ }
+#else // FEATURE_FUSION
+ if (fIsLoadByteArray)
+ {
+ PEImage *pPEImage = pFile->GetILimage();
+ HRESULT hr = S_OK;
+ PTR_AppDomain pCurDomain = GetAppDomain();
+ CLRPrivBinderCoreCLR *pTPABinder = pCurDomain->GetTPABinderContext();
+
+ _ASSERTE(pCurDomain->GetFusionContext() == pTPABinder);
+ hr = pTPABinder->PreBindByteArray(pPEImage, fForIntrospection);
+ if (hr == S_OK)
+ {
+ AssemblySpec spec;
+ spec.InitializeSpec(pFile);
+
+ // Set the binder associated with the AssemblySpec
+ spec.SetBindingContext(pTPABinder);
+ RETURN spec.LoadAssembly(FILE_LOADED, pLoadSecurity);
+ }
+ else
+ {
+ _ASSERTE(hr != S_FALSE);
+ ThrowHR(hr);
+ }
+ }
+#endif // FEATURE_FUSION
+
+ RETURN GetAppDomain()->LoadAssembly(NULL, pFile, FILE_LOADED, pLoadSecurity);
+}
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+void QCALLTYPE AssemblyNative::LoadModule(QCall::AssemblyHandle pAssembly,
+ LPCWSTR wszModuleName,
+ LPCBYTE pRawModule, INT32 cbModule,
+ LPCBYTE pRawSymbolStore, INT32 cbSymbolStore,
+ QCall::ObjectHandleOnStack retModule)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ Module * pModule = NULL;
+
+ if(CorHost2::IsLoadFromBlocked())
+ COMPlusThrow(kFileLoadException, FUSION_E_LOADFROM_BLOCKED);
+
+ if (wszModuleName == NULL)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_FileName"));
+
+ if (pRawModule == NULL)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_Array"));
+
+ if (*wszModuleName == '\0')
+ COMPlusThrow(kArgumentException, W("Argument_EmptyFileName"));
+
+ CQuickBytes qbLC;
+
+ MAKE_UTF8PTR_FROMWIDE(pName, wszModuleName);
+ LPCSTR psModuleName = pName;
+
+ // Need to perform case insensitive lookup.
+ {
+ UTF8_TO_LOWER_CASE(psModuleName, qbLC);
+ psModuleName = (LPUTF8) qbLC.Ptr();
+ }
+
+ HashDatum datum;
+ mdFile kFile = NULL;
+ // m_pAllowedFiles only grows - entries are never deleted from it. So we do not take
+ // a lock around GetValue. If the code is modified such that we delete entries from m_pAllowedFiles,
+ // reconsider whether we should take the m_crstAllowedFiles lock here (see the uses of kFile below).
+ if (pAssembly->GetAssembly()->m_pAllowedFiles->GetValue(psModuleName, &datum))
+ kFile = (mdFile)(size_t)datum;
+
+ // If the name doesn't match one of the File def names, don't load this module.
+ // If this name matches the manifest file (datum was NULL), don't load either.
+ if (!kFile)
+ COMPlusThrow(kArgumentException, W("Arg_InvalidFileName"));
+
+
+ PEModuleHolder pFile(PEModule::OpenMemory(pAssembly->GetFile(), kFile,
+ pRawModule, cbModule));
+
+ DomainModule *pDomainModule = GetAppDomain()->LoadDomainModule(pAssembly->GetDomainAssembly(),
+ pFile, FILE_LOADED);
+ pModule = pDomainModule->GetModule();
+
+ if (!pFile->Equals(pModule->GetFile()))
+ COMPlusThrow(kArgumentException, W("Argument_ModuleAlreadyLoaded"));
+
+ LOG((LF_CLASSLOADER,
+ LL_INFO100,
+ "\tLoaded in-memory module\n"));
+
+#ifdef DEBUGGING_SUPPORTED
+ if (!pModule->IsResource())
+ {
+ // If we were given symbols, hold onto a copy
+ if (pRawSymbolStore != NULL)
+ {
+ pModule->SetSymbolBytes(pRawSymbolStore, cbSymbolStore);
+ }
+ }
+#endif // DEBUGGING_SUPPORTED
+
+ if (pModule != NULL)
+ {
+ GCX_COOP();
+ retModule.Set(pModule->GetExposedObject());
+ }
+
+ END_QCALL;
+
+ return;
+}
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+
+void QCALLTYPE AssemblyNative::GetLocation(QCall::AssemblyHandle pAssembly, QCall::StringHandleOnStack retString)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+#ifndef FEATURE_CORECLR
+ // workaround - lie about where mscorlib is. Mscorlib is now loaded out of the GAC,
+ // but some apps query its location to find the system directory. (Notably system.web)
+ if (pAssembly->IsSystem())
+ {
+ retString.Set(SystemDomain::System()->BaseLibrary());
+ }
+ else
+#endif // !FEATURE_CORECLR
+ {
+ retString.Set(pAssembly->GetFile()->GetPath());
+ }
+
+ END_QCALL;
+}
+
+FCIMPL1(FC_BOOL_RET, AssemblyNative::IsReflectionOnly, AssemblyBaseObject *pAssemblyUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ ASSEMBLYREF refAssembly = (ASSEMBLYREF)ObjectToOBJECTREF(pAssemblyUNSAFE);
+
+ if (refAssembly == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ FC_RETURN_BOOL(refAssembly->GetDomainAssembly()->IsIntrospectionOnly());
+}
+FCIMPLEND
+
+void QCALLTYPE AssemblyNative::GetType(QCall::AssemblyHandle pAssembly, LPCWSTR wszName, BOOL bThrowOnError, BOOL bIgnoreCase, QCall::ObjectHandleOnStack retType)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(wszName));
+ }
+ CONTRACTL_END;
+
+ TypeHandle retTypeHandle;
+
+ BEGIN_QCALL;
+
+ if (!wszName)
+ COMPlusThrowArgumentNull(W("name"), W("ArgumentNull_String"));
+
+ GCX_COOP();
+
+ OBJECTREF keepAlive = NULL;
+ GCPROTECT_BEGIN(keepAlive);
+
+ {
+ GCX_PREEMP();
+
+ BOOL prohibitAsmQualifiedName = TRUE;
+
+#ifdef FEATURE_LEGACYNETCF
+ // NetCF type name parser allowed assembly name to be overriden here
+ if (GetAppDomain()->GetAppDomainCompatMode() == BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8)
+ prohibitAsmQualifiedName = FALSE;
+#endif
+
+ // Load the class from this assembly (fail if it is in a different one).
+ retTypeHandle = TypeName::GetTypeManaged(wszName, pAssembly, bThrowOnError, bIgnoreCase, pAssembly->IsIntrospectionOnly(), prohibitAsmQualifiedName, NULL, FALSE, &keepAlive);
+ }
+
+ if (!retTypeHandle.IsNull())
+ {
+ retType.Set(retTypeHandle.GetManagedClassObject());
+ }
+
+ GCPROTECT_END();
+
+ END_QCALL;
+
+ return;
+}
+
+FCIMPL1(FC_BOOL_RET, AssemblyNative::IsDynamic, AssemblyBaseObject* pAssemblyUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ ASSEMBLYREF refAssembly = (ASSEMBLYREF)ObjectToOBJECTREF(pAssemblyUNSAFE);
+
+ if (refAssembly == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ FC_RETURN_BOOL(refAssembly->GetDomainAssembly()->GetFile()->IsDynamic());
+}
+FCIMPLEND
+
+void QCALLTYPE AssemblyNative::GetVersion(QCall::AssemblyHandle pAssembly, INT32* pMajorVersion, INT32* pMinorVersion, INT32*pBuildNumber, INT32* pRevisionNumber)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ UINT16 major=0xffff, minor=0xffff, build=0xffff, revision=0xffff;
+
+ pAssembly->GetFile()->GetVersion(&major, &minor, &build, &revision);
+
+ *pMajorVersion = major;
+ *pMinorVersion = minor;
+ *pBuildNumber = build;
+ *pRevisionNumber = revision;
+
+ END_QCALL;
+}
+
+void QCALLTYPE AssemblyNative::GetPublicKey(QCall::AssemblyHandle pAssembly, QCall::ObjectHandleOnStack retPublicKey)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ DWORD cbPublicKey = 0;
+ const void *pbPublicKey = pAssembly->GetFile()->GetPublicKey(&cbPublicKey);
+ retPublicKey.SetByteArray((BYTE *)pbPublicKey, cbPublicKey);
+
+ END_QCALL;
+}
+
+#if !FEATURE_CORECLR
+
+BYTE QCALLTYPE AssemblyNative::GetSecurityRuleSet(QCall::AssemblyHandle pAssembly)
+{
+ QCALL_CONTRACT;
+
+ SecurityRuleSet ruleSet = SecurityRuleSet_Default;
+
+ BEGIN_QCALL;
+
+ ModuleSecurityDescriptor *pMSD = ModuleSecurityDescriptor::GetModuleSecurityDescriptor(pAssembly->GetAssembly());
+ ruleSet = pMSD->GetSecurityRuleSet();
+
+ END_QCALL;
+
+ return static_cast<BYTE>(ruleSet);
+}
+
+#endif // !FEATURE_CORECLR
+
+void QCALLTYPE AssemblyNative::GetSimpleName(QCall::AssemblyHandle pAssembly, QCall::StringHandleOnStack retSimpleName)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+ retSimpleName.Set(pAssembly->GetSimpleName());
+ END_QCALL;
+}
+
+void QCALLTYPE AssemblyNative::GetLocale(QCall::AssemblyHandle pAssembly, QCall::StringHandleOnStack retString)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ LPCUTF8 pLocale = pAssembly->GetFile()->GetLocale();
+ if(pLocale)
+ {
+ retString.Set(pLocale);
+ }
+
+ END_QCALL;
+}
+
+void QCALLTYPE AssemblyNative::GetCodeBase(QCall::AssemblyHandle pAssembly, BOOL fCopiedName, QCall::StringHandleOnStack retString)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ StackSString codebase;
+
+#ifndef FEATURE_CORECLR
+ if (pAssembly->IsSystem()) {
+ // workaround: lie about the location of mscorlib. Some callers assume it is in the install dir.
+ codebase.Set(SystemDomain::System()->BaseLibrary());
+ PEAssembly::PathToUrl(codebase);
+ }
+ else
+#endif // !FEATURE_CORECLR
+ {
+ pAssembly->GetFile()->GetCodeBase(codebase);
+ }
+
+ retString.Set(codebase);
+
+ END_QCALL;
+}
+
+INT32 QCALLTYPE AssemblyNative::GetHashAlgorithm(QCall::AssemblyHandle pAssembly)
+{
+ QCALL_CONTRACT;
+
+ INT32 retVal=0;
+ BEGIN_QCALL;
+ retVal = pAssembly->GetFile()->GetHashAlgId();
+ END_QCALL;
+ return retVal;
+}
+
+INT32 QCALLTYPE AssemblyNative::GetFlags(QCall::AssemblyHandle pAssembly)
+{
+ QCALL_CONTRACT;
+
+ INT32 retVal=0;
+ BEGIN_QCALL;
+ retVal = pAssembly->GetFile()->GetFlags();
+ END_QCALL;
+ return retVal;
+}
+
+BYTE * QCALLTYPE AssemblyNative::GetResource(QCall::AssemblyHandle pAssembly, LPCWSTR wszName, UINT64 * length, QCall::StackCrawlMarkHandle stackMark, BOOL skipSecurityCheck)
+{
+ QCALL_CONTRACT;
+
+ PBYTE pbInMemoryResource = NULL;
+
+ BEGIN_QCALL;
+
+ if (wszName == NULL)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_String"));
+
+ // Get the name in UTF8
+ SString name(SString::Literal, wszName);
+
+ StackScratchBuffer scratch;
+ LPCUTF8 pNameUTF8 = name.GetUTF8(scratch);
+
+ if (*pNameUTF8 == '\0')
+ COMPlusThrow(kArgumentException, W("Format_StringZeroLength"));
+
+ DWORD cbResource;
+ if (pAssembly->GetResource(pNameUTF8, &cbResource,
+ &pbInMemoryResource, NULL, NULL,
+ NULL, stackMark, skipSecurityCheck, FALSE))
+ {
+ *length = cbResource;
+ }
+
+ END_QCALL;
+
+ // Can return null if resource file is zero-length
+ return pbInMemoryResource;
+}
+
+#ifndef FEATURE_CORECLR
+
+BOOL QCALLTYPE AssemblyNative::UseRelativeBindForSatellites()
+{
+ QCALL_CONTRACT;
+
+ BOOL retVal = TRUE;
+
+ BEGIN_QCALL;
+ retVal = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_RelativeBindForResources);
+ END_QCALL;
+
+ return retVal;
+
+}
+#endif // !FEATURE_CORECLR
+
+INT32 QCALLTYPE AssemblyNative::GetManifestResourceInfo(QCall::AssemblyHandle pAssembly, LPCWSTR wszName, QCall::ObjectHandleOnStack retAssembly, QCall::StringHandleOnStack retFileName, QCall::StackCrawlMarkHandle stackMark)
+{
+ QCALL_CONTRACT;
+
+ INT32 rv = -1;
+
+ BEGIN_QCALL;
+
+ if (wszName == NULL)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_String"));
+
+ // Get the name in UTF8
+ SString name(SString::Literal, wszName);
+
+ StackScratchBuffer scratch;
+ LPCUTF8 pNameUTF8 = name.GetUTF8(scratch);
+
+ if (*pNameUTF8 == '\0')
+ COMPlusThrow(kArgumentException, W("Format_StringZeroLength"));
+
+ DomainAssembly * pReferencedAssembly = NULL;
+ LPCSTR pFileName = NULL;
+ DWORD dwLocation = 0;
+
+ if (pAssembly->GetResource(pNameUTF8, NULL, NULL, &pReferencedAssembly, &pFileName,
+ &dwLocation, stackMark, FALSE, FALSE))
+ {
+ if (pFileName)
+ retFileName.Set(pFileName);
+
+ GCX_COOP();
+
+ if (pReferencedAssembly)
+ retAssembly.Set(pReferencedAssembly->GetExposedAssemblyObject());
+
+ rv = dwLocation;
+ }
+
+ END_QCALL;
+
+ return rv;
+}
+
+void QCALLTYPE AssemblyNative::GetModules(QCall::AssemblyHandle pAssembly, BOOL fLoadIfNotFound, BOOL fGetResourceModules, QCall::ObjectHandleOnStack retModules)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ HENUMInternalHolder phEnum(pAssembly->GetMDImport());
+ phEnum.EnumInit(mdtFile, mdTokenNil);
+
+ InlineSArray<DomainFile *, 8> modules;
+
+ modules.Append(pAssembly);
+
+ ReflectionModule *pOnDiskManifest = NULL;
+ if (pAssembly->GetAssembly()->NeedsToHideManifestForEmit())
+ pOnDiskManifest = pAssembly->GetAssembly()->GetOnDiskManifestModule();
+
+ mdFile mdFile;
+ while (pAssembly->GetMDImport()->EnumNext(&phEnum, &mdFile))
+ {
+ DomainFile *pModule = pAssembly->GetModule()->LoadModule(GetAppDomain(), mdFile, fGetResourceModules, !fLoadIfNotFound);
+
+ if (pModule && pModule->GetModule() != pOnDiskManifest) {
+ modules.Append(pModule);
+ }
+ }
+
+ {
+ GCX_COOP();
+
+ PTRARRAYREF orModules = NULL;
+
+ GCPROTECT_BEGIN(orModules);
+
+ // Return the modules
+ orModules = (PTRARRAYREF)AllocateObjectArray(modules.GetCount(), MscorlibBinder::GetClass(CLASS__MODULE));
+
+ for(COUNT_T i = 0; i < modules.GetCount(); i++)
+ {
+ DomainFile * pModule = modules[i];
+
+ OBJECTREF o = pModule->GetExposedModuleObject();
+ orModules->SetAt(i, o);
+ }
+
+ retModules.Set(orModules);
+
+ GCPROTECT_END();
+ }
+
+ END_QCALL;
+}
+
+BOOL QCALLTYPE AssemblyNative::GetNeutralResourcesLanguageAttribute(QCall::AssemblyHandle pAssembly, QCall::StringHandleOnStack cultureName, INT16& outFallbackLocation)
+{
+ CONTRACTL {
+ QCALL_CHECK;
+ } CONTRACTL_END;
+
+ BOOL retVal = FALSE;
+ BEGIN_QCALL;
+
+ _ASSERTE(pAssembly);
+ Assembly * pAsm = pAssembly->GetAssembly();
+ _ASSERTE(pAsm);
+ Module * pModule = pAsm->GetManifestModule();
+ _ASSERTE(pModule);
+
+ LPCUTF8 pszCultureName = NULL;
+ ULONG cultureNameLength = 0;
+ INT16 fallbackLocation = 0;
+
+ // find the attribute if it exists
+ if (pModule->GetNeutralResourcesLanguage(&pszCultureName, &cultureNameLength, &fallbackLocation, FALSE)) {
+ StackSString culture(SString::Utf8, pszCultureName, cultureNameLength);
+ cultureName.Set(culture);
+ outFallbackLocation = fallbackLocation;
+ retVal = TRUE;
+ }
+
+ END_QCALL;
+
+ return retVal;
+}
+
+void QCALLTYPE AssemblyNative::GetModule(QCall::AssemblyHandle pAssembly, LPCWSTR wszFileName, QCall::ObjectHandleOnStack retModule)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ Module * pModule = NULL;
+
+ CQuickBytes qbLC;
+
+ if (wszFileName == NULL)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_FileName"));
+ if (wszFileName[0] == W('\0'))
+ COMPlusThrow(kArgumentException, W("Argument_EmptyFileName"));
+
+
+ MAKE_UTF8PTR_FROMWIDE(szModuleName, wszFileName);
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ // Need to perform case insensitive lookup.
+ {
+ UTF8_TO_LOWER_CASE(szModuleName, qbLC);
+ szModuleName = (LPUTF8) qbLC.Ptr();
+ }
+
+ HashDatum datum = NULL;
+
+ // m_pAllowedFiles only grows - entries are never deleted from it. So we do not take
+ // a lock around GetValue. If the code is modified such that we delete entries from m_pAllowedFiles,
+ // reconsider whether we should take the m_crstAllowedFiles lock here (see the uses of datum below).
+ if (pAssembly->GetAssembly()->m_pAllowedFiles->GetValue(szModuleName, &datum))
+ {
+ if (datum)
+ {
+ // internal module
+ mdFile tokFile = (mdFile)(UINT_PTR)datum;
+
+ pModule = pAssembly->GetModule()->LoadModule(GetAppDomain(), tokFile)->GetModule();
+ }
+ else
+ { // manifest module
+ pModule = pAssembly->GetDomainAssembly()->GetModule();
+ }
+ }
+#else
+
+ LPCUTF8 pModuleName = NULL;
+
+ if SUCCEEDED(pAssembly->GetDomainAssembly()->GetModule()->GetScopeName(&pModuleName))
+ {
+ if (::SString::_stricmp(pModuleName, szModuleName) == 0)
+ pModule = pAssembly->GetDomainAssembly()->GetModule();
+ }
+
+#endif
+
+ if (pModule != NULL)
+ {
+ GCX_COOP();
+ retModule.Set(pModule->GetExposedObject());
+ }
+
+ END_QCALL;
+
+ return;
+}
+
+void QCALLTYPE AssemblyNative::GetExportedTypes(QCall::AssemblyHandle pAssembly, QCall::ObjectHandleOnStack retTypes)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ InlineSArray<TypeHandle, 20> types;
+
+ Assembly * pAsm = pAssembly->GetAssembly();
+
+ IMDInternalImport *pImport = pAsm->GetManifestImport();
+
+ {
+ HENUMTypeDefInternalHolder phTDEnum(pImport);
+ phTDEnum.EnumTypeDefInit();
+
+ mdTypeDef mdTD;
+ while(pImport->EnumNext(&phTDEnum, &mdTD))
+ {
+ DWORD dwFlags;
+ IfFailThrow(pImport->GetTypeDefProps(
+ mdTD,
+ &dwFlags,
+ NULL));
+
+ // nested type
+ mdTypeDef mdEncloser = mdTD;
+ while (SUCCEEDED(pImport->GetNestedClassProps(mdEncloser, &mdEncloser)) &&
+ IsTdNestedPublic(dwFlags))
+ {
+ IfFailThrow(pImport->GetTypeDefProps(
+ mdEncloser,
+ &dwFlags,
+ NULL));
+ }
+
+ if (IsTdPublic(dwFlags))
+ {
+ TypeHandle typeHnd = ClassLoader::LoadTypeDefThrowing(pAsm->GetManifestModule(), mdTD,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef);
+ types.Append(typeHnd);
+ }
+ }
+ }
+
+ {
+ HENUMInternalHolder phCTEnum(pImport);
+ phCTEnum.EnumInit(mdtExportedType, mdTokenNil);
+
+ // Now get the ExportedTypes that don't have TD's in the manifest file
+ mdExportedType mdCT;
+ while(pImport->EnumNext(&phCTEnum, &mdCT))
+ {
+ mdToken mdImpl;
+ LPCSTR pszNameSpace;
+ LPCSTR pszClassName;
+ DWORD dwFlags;
+
+ IfFailThrow(pImport->GetExportedTypeProps(
+ mdCT,
+ &pszNameSpace,
+ &pszClassName,
+ &mdImpl,
+ NULL, //binding
+ &dwFlags));
+
+ // nested type
+ while ((TypeFromToken(mdImpl) == mdtExportedType) &&
+ (mdImpl != mdExportedTypeNil) &&
+ IsTdNestedPublic(dwFlags))
+ {
+ IfFailThrow(pImport->GetExportedTypeProps(
+ mdImpl,
+ NULL, //namespace
+ NULL, //name
+ &mdImpl,
+ NULL, //binding
+ &dwFlags));
+ }
+
+ if ((TypeFromToken(mdImpl) == mdtFile) &&
+ (mdImpl != mdFileNil) &&
+ IsTdPublic(dwFlags))
+ {
+ NameHandle typeName(pszNameSpace, pszClassName);
+ typeName.SetTypeToken(pAsm->GetManifestModule(), mdCT);
+ TypeHandle typeHnd = pAsm->GetLoader()->LoadTypeHandleThrowIfFailed(&typeName);
+
+ types.Append(typeHnd);
+ }
+ }
+ }
+
+ {
+ GCX_COOP();
+
+ PTRARRAYREF orTypes = NULL;
+
+ GCPROTECT_BEGIN(orTypes);
+
+ // Return the types
+ orTypes = (PTRARRAYREF)AllocateObjectArray(types.GetCount(), MscorlibBinder::GetClass(CLASS__TYPE));
+
+ for(COUNT_T i = 0; i < types.GetCount(); i++)
+ {
+ TypeHandle typeHnd = types[i];
+
+ OBJECTREF o = typeHnd.GetManagedClassObject();
+ orTypes->SetAt(i, o);
+ }
+
+ retTypes.Set(orTypes);
+
+ GCPROTECT_END();
+ }
+
+ END_QCALL;
+}
+
+void QCALLTYPE AssemblyNative::GetForwardedTypes(QCall::AssemblyHandle pAssembly, QCall::ObjectHandleOnStack retTypes)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ InlineSArray<TypeHandle, 8> types;
+
+ Assembly * pAsm = pAssembly->GetAssembly();
+
+ IMDInternalImport *pImport = pAsm->GetManifestImport();
+
+ // enumerate the ExportedTypes table
+ {
+ HENUMInternalHolder phCTEnum(pImport);
+ phCTEnum.EnumInit(mdtExportedType, mdTokenNil);
+
+ // Now get the ExportedTypes that don't have TD's in the manifest file
+ mdExportedType mdCT;
+ while(pImport->EnumNext(&phCTEnum, &mdCT))
+ {
+ mdToken mdImpl;
+ LPCSTR pszNameSpace;
+ LPCSTR pszClassName;
+ DWORD dwFlags;
+
+ IfFailThrow(pImport->GetExportedTypeProps(mdCT,
+ &pszNameSpace,
+ &pszClassName,
+ &mdImpl,
+ NULL, //binding
+ &dwFlags));
+
+ if ((TypeFromToken(mdImpl) == mdtAssemblyRef) && (mdImpl != mdAssemblyRefNil))
+ {
+ NameHandle typeName(pszNameSpace, pszClassName);
+ typeName.SetTypeToken(pAsm->GetManifestModule(), mdCT);
+ TypeHandle typeHnd = pAsm->GetLoader()->LoadTypeHandleThrowIfFailed(&typeName);
+
+ types.Append(typeHnd);
+ }
+ }
+ }
+
+ // Populate retTypes
+ {
+ GCX_COOP();
+
+ PTRARRAYREF orTypes = NULL;
+
+ GCPROTECT_BEGIN(orTypes);
+
+ // Return the types
+ orTypes = (PTRARRAYREF)AllocateObjectArray(types.GetCount(), MscorlibBinder::GetClass(CLASS__TYPE));
+
+ for(COUNT_T i = 0; i < types.GetCount(); i++)
+ {
+ TypeHandle typeHnd = types[i];
+
+ OBJECTREF o = typeHnd.GetManagedClassObject();
+ orTypes->SetAt(i, o);
+ }
+
+ retTypes.Set(orTypes);
+
+ GCPROTECT_END();
+ }
+
+ END_QCALL;
+}
+
+FCIMPL1(Object*, AssemblyNative::GetManifestResourceNames, AssemblyBaseObject * pAssemblyUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ ASSEMBLYREF refAssembly = (ASSEMBLYREF)ObjectToOBJECTREF(pAssemblyUNSAFE);
+
+ if (refAssembly == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ DomainAssembly *pAssembly = refAssembly->GetDomainAssembly();
+ PTRARRAYREF rv = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_2(rv, refAssembly);
+
+ IMDInternalImport *pImport = pAssembly->GetMDImport();
+
+ HENUMInternalHolder phEnum(pImport);
+ DWORD dwCount;
+
+ phEnum.EnumInit(mdtManifestResource, mdTokenNil);
+ dwCount = pImport->EnumGetCount(&phEnum);
+
+ PTRARRAYREF ItemArray = (PTRARRAYREF) AllocateObjectArray(dwCount, g_pStringClass);
+
+ mdManifestResource mdResource;
+
+ GCPROTECT_BEGIN(ItemArray);
+ for(DWORD i = 0; i < dwCount; i++) {
+ pImport->EnumNext(&phEnum, &mdResource);
+ LPCSTR pszName = NULL;
+
+ IfFailThrow(pImport->GetManifestResourceProps(
+ mdResource,
+ &pszName, // name
+ NULL, // linkref
+ NULL, // offset
+ NULL)); //flags
+
+ OBJECTREF o = (OBJECTREF) StringObject::NewString(pszName);
+ ItemArray->SetAt(i, o);
+ }
+
+ rv = ItemArray;
+ GCPROTECT_END();
+
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(rv);
+}
+FCIMPLEND
+
+FCIMPL1(Object*, AssemblyNative::GetReferencedAssemblies, AssemblyBaseObject * pAssemblyUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ struct _gc {
+ PTRARRAYREF ItemArray;
+ ASSEMBLYNAMEREF pObj;
+ ASSEMBLYREF refAssembly;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ gc.refAssembly = (ASSEMBLYREF)ObjectToOBJECTREF(pAssemblyUNSAFE);
+
+ if (gc.refAssembly == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ DomainAssembly *pAssembly = gc.refAssembly->GetDomainAssembly();
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ IMDInternalImport *pImport = pAssembly->GetAssembly()->GetManifestImport();
+
+ MethodTable* pAsmNameClass = MscorlibBinder::GetClass(CLASS__ASSEMBLY_NAME);
+
+ HENUMInternalHolder phEnum(pImport);
+ DWORD dwCount = 0;
+
+ phEnum.EnumInit(mdtAssemblyRef, mdTokenNil);
+
+ dwCount = pImport->EnumGetCount(&phEnum);
+
+ mdAssemblyRef mdAssemblyRef;
+
+ gc.ItemArray = (PTRARRAYREF) AllocateObjectArray(dwCount, pAsmNameClass);
+
+ for(DWORD i = 0; i < dwCount; i++)
+ {
+ pImport->EnumNext(&phEnum, &mdAssemblyRef);
+
+ AssemblySpec spec;
+ spec.InitializeSpec(mdAssemblyRef, pImport);
+
+ gc.pObj = (ASSEMBLYNAMEREF) AllocateObject(pAsmNameClass);
+ spec.AssemblyNameInit(&gc.pObj,NULL);
+
+ gc.ItemArray->SetAt(i, (OBJECTREF) gc.pObj);
+ }
+
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(gc.ItemArray);
+}
+FCIMPLEND
+
+void QCALLTYPE AssemblyNative::GetEntryPoint(QCall::AssemblyHandle pAssembly, QCall::ObjectHandleOnStack retMethod)
+{
+ QCALL_CONTRACT;
+
+ MethodDesc* pMeth = NULL;
+
+ BEGIN_QCALL;
+
+ pMeth = pAssembly->GetAssembly()->GetEntryPoint();
+ if (pMeth != NULL)
+ {
+ GCX_COOP();
+ retMethod.Set(pMeth->GetStubMethodInfo());
+ }
+
+ END_QCALL;
+
+ return;
+}
+
+#ifndef FEATURE_CORECLR
+// prepare saving manifest to disk
+void QCALLTYPE AssemblyNative::PrepareForSavingManifestToDisk(QCall::AssemblyHandle pAssembly, QCall::ModuleHandle pAssemblyModule)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ pAssembly->GetAssembly()->PrepareSavingManifest((ReflectionModule *)(Module *)pAssemblyModule);
+
+ END_QCALL;
+}
+
+#endif
+
+#ifndef FEATURE_CORECLR
+// add a file name to the file list of this assembly. On disk only.
+mdFile QCALLTYPE AssemblyNative::AddFile(QCall::AssemblyHandle pAssembly, LPCWSTR wszFileName)
+{
+ QCALL_CONTRACT;
+
+ mdFile retVal = 0;
+
+ BEGIN_QCALL;
+
+ retVal = pAssembly->GetAssembly()->AddFile(wszFileName);
+
+ END_QCALL;
+
+ return retVal;
+}
+#endif //FEATURE_CORECLR
+
+#ifndef FEATURE_CORECLR
+// set the hash value on a file.
+void QCALLTYPE AssemblyNative::SetFileHashValue(QCall::AssemblyHandle pAssembly, INT32 tkFile, LPCWSTR wszFullFileName)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ pAssembly->GetAssembly()->SetFileHashValue(tkFile, wszFullFileName);
+
+ END_QCALL;
+}
+#endif //FEATURE_CORECLR
+
+#ifndef FEATURE_CORECLR
+// Add a Type name to the ExportedType table in the on-disk assembly manifest.
+mdExportedType QCALLTYPE AssemblyNative::AddExportedTypeOnDisk(QCall::AssemblyHandle pAssembly, LPCWSTR wszCOMTypeName, INT32 tkImpl, INT32 tkTypeDef, INT32 flags)
+{
+ QCALL_CONTRACT;
+
+ mdExportedType retVal = 0;
+
+ BEGIN_QCALL;
+
+ retVal = pAssembly->GetAssembly()->AddExportedTypeOnDisk(wszCOMTypeName, tkImpl, tkTypeDef, (CorTypeAttr)flags);
+
+ END_QCALL;
+
+ return retVal;
+}
+
+// Add a Type name to the ExportedType table in the in-memory assembly manifest.
+mdExportedType QCALLTYPE AssemblyNative::AddExportedTypeInMemory(QCall::AssemblyHandle pAssembly, LPCWSTR wszCOMTypeName, INT32 tkImpl, INT32 tkTypeDef, INT32 flags)
+{
+ QCALL_CONTRACT;
+
+ mdExportedType retVal = 0;
+
+ BEGIN_QCALL;
+
+ retVal = pAssembly->GetAssembly()->AddExportedTypeInMemory(wszCOMTypeName, tkImpl, tkTypeDef, (CorTypeAttr)flags);
+
+ END_QCALL;
+
+ return retVal;
+}
+#endif //FEATURE_CORECLR
+
+#ifndef FEATURE_CORECLR
+// add a Stand alone resource to ManifestResource table
+void QCALLTYPE AssemblyNative::AddStandAloneResource(QCall::AssemblyHandle pAssembly, LPCWSTR wszName, LPCWSTR wszFileName, LPCWSTR wszFullFileName, INT32 iAttribute)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ pAssembly->GetAssembly()->AddStandAloneResource(
+ wszName,
+ NULL,
+ NULL,
+ wszFileName,
+ wszFullFileName,
+ iAttribute);
+
+ END_QCALL;
+}
+#endif //FEATURE_CORECLR
+
+#ifndef FEATURE_CORECLR
+// Save security permission requests.
+void QCALLTYPE AssemblyNative::AddDeclarativeSecurity(QCall::AssemblyHandle pAssembly, INT32 action, PVOID blob, INT32 length)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ pAssembly->GetAssembly()->AddDeclarativeSecurity(action, blob, length);
+
+ END_QCALL;
+}
+#endif //FEATURE_CORECLR
+
+//---------------------------------------------------------------------------------------
+//
+// Get the raw bytes making up this assembly
+//
+// Arguments:
+// pAssembly - Assembly to get the data of
+// retRawBytes - [out] raw bytes of the assembly
+//
+
+// static
+void QCALLTYPE AssemblyNative::GetRawBytes(QCall::AssemblyHandle pAssembly,
+ QCall::ObjectHandleOnStack retRawBytes)
+{
+ QCALL_CONTRACT;
+ BEGIN_QCALL;
+
+ PEFile *pPEFile = pAssembly->GetFile();
+ if (pPEFile != NULL)
+ {
+ PEImage *pPEImage = pPEFile->GetILimage();
+
+ if (pPEImage != NULL)
+ {
+ SBuffer dataBuffer;
+ pPEImage->GetImageBits(PEImageLayout::LAYOUT_FLAT, dataBuffer);
+
+ if (dataBuffer.GetSize() > 0)
+ {
+ retRawBytes.SetByteArray(dataBuffer, dataBuffer.GetSize());
+ }
+ }
+ }
+
+ END_QCALL;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Release QCALL for System.SafePEFileHandle
+//
+//
+
+// static
+void QCALLTYPE AssemblyNative::ReleaseSafePEFileHandle(PEFile *pPEFile)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(pPEFile));
+ }
+ CONTRACTL_END;
+
+ BEGIN_QCALL;
+
+ pPEFile->Release();
+
+ END_QCALL;
+}
+
+// save the manifest to disk!
+extern void ManagedBitnessFlagsToUnmanagedBitnessFlags(
+ INT32 portableExecutableKind, INT32 imageFileMachine,
+ DWORD* pPeFlags, DWORD* pCorhFlags);
+
+#ifndef FEATURE_CORECLR
+void QCALLTYPE AssemblyNative::SaveManifestToDisk(QCall::AssemblyHandle pAssembly,
+ LPCWSTR wszManifestFileName,
+ INT32 entrypoint,
+ INT32 fileKind,
+ INT32 portableExecutableKind,
+ INT32 imageFileMachine)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ DWORD peFlags = 0, corhFlags = 0;
+ ManagedBitnessFlagsToUnmanagedBitnessFlags(portableExecutableKind, imageFileMachine, &peFlags, &corhFlags);
+
+ pAssembly->GetAssembly()->SaveManifestToDisk(wszManifestFileName, entrypoint, fileKind, corhFlags, peFlags);
+
+ END_QCALL;
+}
+#endif // !FEATURE_CORECLR
+
+void QCALLTYPE AssemblyNative::GetFullName(QCall::AssemblyHandle pAssembly, QCall::StringHandleOnStack retString)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ StackSString name;
+ pAssembly->GetFile()->GetDisplayName(name);
+ retString.Set(name);
+
+ END_QCALL;
+}
+
+void QCALLTYPE AssemblyNative::GetExecutingAssembly(QCall::StackCrawlMarkHandle stackMark, QCall::ObjectHandleOnStack retAssembly)
+{
+ QCALL_CONTRACT;
+
+ DomainAssembly * pExecutingAssembly = NULL;
+
+ BEGIN_QCALL;
+
+ Assembly* pAssembly = SystemDomain::GetCallersAssembly(stackMark);
+ if(pAssembly)
+ {
+ pExecutingAssembly = pAssembly->GetDomainAssembly();
+ GCX_COOP();
+ retAssembly.Set(pExecutingAssembly->GetExposedAssemblyObject());
+ }
+
+ END_QCALL;
+ return;
+}
+
+void QCALLTYPE AssemblyNative::GetEntryAssembly(QCall::ObjectHandleOnStack retAssembly)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ DomainAssembly * pRootAssembly = NULL;
+ Assembly * pAssembly = GetAppDomain()->m_pRootAssembly;
+
+ if (pAssembly)
+ {
+ pRootAssembly = pAssembly->GetDomainAssembly();
+ GCX_COOP();
+ retAssembly.Set(pRootAssembly->GetExposedAssemblyObject());
+ }
+
+ END_QCALL;
+
+ return;
+}
+
+
+void QCALLTYPE AssemblyNative::GetGrantSet(QCall::AssemblyHandle pAssembly, QCall::ObjectHandleOnStack retGranted, QCall::ObjectHandleOnStack retDenied)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ IAssemblySecurityDescriptor *pSecDesc = pAssembly->GetSecurityDescriptor();
+
+ {
+ GCX_COOP();
+
+ pSecDesc->Resolve();
+
+ OBJECTREF granted, denied;
+
+ granted = pSecDesc->GetGrantedPermissionSet(&denied);
+
+ retGranted.Set(granted);
+ retDenied.Set(denied);
+ }
+
+ END_QCALL;
+}
+
+#ifdef FEATURE_LEGACYNETCF
+BOOL QCALLTYPE AssemblyNative::GetIsProfileAssembly(QCall::AssemblyHandle pAssembly)
+{
+ QCALL_CONTRACT;
+
+ BOOL fIsProfile = FALSE;
+
+ BEGIN_QCALL;
+
+ fIsProfile = pAssembly->GetFile()->IsProfileAssembly();
+
+ END_QCALL;
+
+ return fIsProfile;
+}
+#endif // FEATURE_LEGACYNETCF
+
+//
+// QCalls to determine if everything introduced by the assembly is either security critical or safe critical
+//
+
+// static
+BOOL QCALLTYPE AssemblyNative::IsAllSecurityCritical(QCall::AssemblyHandle pAssembly)
+{
+ QCALL_CONTRACT;
+
+ BOOL fIsCritical = FALSE;
+
+ BEGIN_QCALL;
+
+ fIsCritical = pAssembly->GetSecurityDescriptor()->IsAllCritical();
+
+ END_QCALL;
+
+ return fIsCritical;
+}
+
+// static
+BOOL QCALLTYPE AssemblyNative::IsAllSecuritySafeCritical(QCall::AssemblyHandle pAssembly)
+{
+ QCALL_CONTRACT;
+
+ BOOL fIsSafeCritical = FALSE;
+
+ BEGIN_QCALL;
+
+ fIsSafeCritical = pAssembly->GetSecurityDescriptor()->IsAllSafeCritical();
+
+ END_QCALL;
+
+ return fIsSafeCritical;
+}
+
+// static
+BOOL QCALLTYPE AssemblyNative::IsAllPublicAreaSecuritySafeCritical(QCall::AssemblyHandle pAssembly)
+{
+ QCALL_CONTRACT;
+
+ BOOL fIsAllPublicAreaSafeCritical = FALSE;
+
+ BEGIN_QCALL;
+
+ fIsAllPublicAreaSafeCritical = pAssembly->GetSecurityDescriptor()->IsAllPublicAreaSafeCritical();
+
+ END_QCALL;
+
+ return fIsAllPublicAreaSafeCritical;
+}
+
+// static
+BOOL QCALLTYPE AssemblyNative::IsAllSecurityTransparent(QCall::AssemblyHandle pAssembly)
+{
+ QCALL_CONTRACT;
+
+ BOOL fIsTransparent = FALSE;
+
+ BEGIN_QCALL;
+
+ fIsTransparent = pAssembly->GetSecurityDescriptor()->IsAllTransparent();
+
+ END_QCALL;
+
+ return fIsTransparent;
+}
+
+// return the on disk assembly module for reflection emit. This only works for dynamic assembly.
+FCIMPL1(ReflectModuleBaseObject *, AssemblyNative::GetOnDiskAssemblyModule, AssemblyBaseObject* pAssemblyUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ ASSEMBLYREF refAssembly = (ASSEMBLYREF)ObjectToOBJECTREF(pAssemblyUNSAFE);
+
+ if (refAssembly == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ DomainAssembly *pAssembly = refAssembly->GetDomainAssembly();
+
+ FC_RETURN_MODULE_OBJECT(pAssembly->GetCurrentAssembly()->GetOnDiskManifestModule(), refAssembly);
+}
+FCIMPLEND
+
+// return the in memory assembly module for reflection emit. This only works for dynamic assembly.
+FCIMPL1(ReflectModuleBaseObject *, AssemblyNative::GetInMemoryAssemblyModule, AssemblyBaseObject* pAssemblyUNSAFE)
+{
+ FCALL_CONTRACT;
+
+
+ ASSEMBLYREF refAssembly = (ASSEMBLYREF)ObjectToOBJECTREF(pAssemblyUNSAFE);
+
+ if (refAssembly == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ DomainAssembly *pAssembly = refAssembly->GetDomainAssembly();
+
+ FC_RETURN_MODULE_OBJECT(pAssembly->GetCurrentModule(), refAssembly);
+}
+FCIMPLEND
+
+
+#ifndef FEATURE_CORECLR
+// Create a stand-alone resource file for version resource.
+void QCALLTYPE AssemblyNative::CreateVersionInfoResource(LPCWSTR pwzFilename,
+ LPCWSTR pwzTitle,
+ LPCWSTR pwzIconFilename,
+ LPCWSTR pwzDescription,
+ LPCWSTR pwzCopyright,
+ LPCWSTR pwzTrademark,
+ LPCWSTR pwzCompany,
+ LPCWSTR pwzProduct,
+ LPCWSTR pwzProductVersion,
+ LPCWSTR pwzFileVersion,
+ INT32 lcid,
+ BOOL fIsDll,
+ QCall::StringHandleOnStack retFileName)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ Win32Res res; // Resource helper object.
+ const void *pvData=0; // Pointer to the resource.
+ ULONG cbData; // Size of the resource data.
+ ULONG cbWritten;
+ WCHAR szFile[MAX_PATH+1]; // File name for resource file.
+ WCHAR szPath[MAX_PATH+1]; // Path name for resource file.
+ HandleHolder hFile;
+
+ res.SetInfo(pwzFilename,
+ pwzTitle,
+ pwzIconFilename,
+ pwzDescription,
+ pwzCopyright,
+ pwzTrademark,
+ pwzCompany,
+ pwzProduct,
+ pwzProductVersion,
+ pwzFileVersion,
+ lcid,
+ fIsDll);
+
+ res.MakeResFile(&pvData, &cbData);
+
+ //<TODO>Change the COMPlusThrowWin32's to exceptions with
+ // messages including the path/file name</TODO>
+
+ // Persist to a file.
+ if (!WszGetTempPath(MAX_PATH, szPath))
+ COMPlusThrowWin32();
+ if (!WszGetTempFileName(szPath, W("RES"), 0, szFile))
+ COMPlusThrowWin32();
+
+ hFile = WszCreateFile(szFile, GENERIC_READ|GENERIC_WRITE, 0, NULL, OPEN_EXISTING, 0, NULL);
+ if (hFile == INVALID_HANDLE_VALUE)
+ COMPlusThrowWin32();
+
+ if (!WriteFile(hFile, pvData, cbData, &cbWritten, NULL))
+ COMPlusThrowWin32();
+
+ retFileName.Set(szFile);
+
+ END_QCALL;
+}
+#endif // !FEATURE_CORECLR
+
+#ifndef FEATURE_CORECLR
+FCIMPL1(FC_BOOL_RET, AssemblyNative::IsGlobalAssemblyCache, AssemblyBaseObject* pAssemblyUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ ASSEMBLYREF refAssembly = (ASSEMBLYREF)ObjectToOBJECTREF(pAssemblyUNSAFE);
+
+ if (refAssembly == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ DomainAssembly *pAssembly = refAssembly->GetDomainAssembly();
+
+ FC_RETURN_BOOL(pAssembly->GetFile()->IsSourceGAC());
+}
+FCIMPLEND
+#endif // !FEATURE_CORECLR
+
+void QCALLTYPE AssemblyNative::GetImageRuntimeVersion(QCall::AssemblyHandle pAssembly, QCall::StringHandleOnStack retString)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ // Retrieve the PEFile from the assembly.
+ PEFile* pPEFile = pAssembly->GetFile();
+ PREFIX_ASSUME(pPEFile!=NULL);
+
+ LPCSTR pszVersion = NULL;
+ IfFailThrow(pPEFile->GetMDImport()->GetVersionString(&pszVersion));
+
+ SString version(SString::Utf8, pszVersion);
+ #ifndef FEATURE_CORECLR
+ AdjustImageRuntimeVersion(&version);
+#endif // FEATURE_CORECLR
+
+ // Allocate a managed string that contains the version and return it.
+ retString.Set(version);
+
+ END_QCALL;
+}
+
+#ifdef FEATURE_FUSION
+INT64 QCALLTYPE AssemblyNative::GetHostContext(QCall::AssemblyHandle pAssembly)
+{
+ QCALL_CONTRACT;
+
+ UINT64 Context = 0;
+
+ BEGIN_QCALL;
+
+ IHostAssembly *pIHostAssembly = pAssembly->GetFile()->GetIHostAssembly();
+ if (pIHostAssembly != NULL)
+ {
+ IfFailThrow(pIHostAssembly->GetAssemblyContext(&Context));
+ }
+
+ END_QCALL;
+
+ return Context;
+}
+#endif // FEATURE_FUSION
+
+#ifdef FEATURE_CAS_POLICY
+BOOL QCALLTYPE AssemblyNative::IsStrongNameVerified(QCall::AssemblyHandle pAssembly)
+{
+ QCALL_CONTRACT;
+
+ BOOL fStrongNameVerified = FALSE;
+
+ BEGIN_QCALL;
+
+ PEFile *pPEFile = pAssembly->GetFile();
+ fStrongNameVerified = pPEFile->IsStrongNameVerified();
+
+ END_QCALL;
+
+ return fStrongNameVerified;
+}
+#endif // FEATURE_CAS_POLICY
+
+#ifdef FEATURE_APPX
+/*static*/
+BOOL QCALLTYPE AssemblyNative::IsDesignerBindingContext(QCall::AssemblyHandle pAssembly)
+{
+ QCALL_CONTRACT;
+
+ BOOL fRet = FALSE;
+
+ BEGIN_QCALL;
+
+ PEFile *pPEFile = pAssembly->GetFile();
+ fRet = pPEFile->IsDesignerBindingContext();
+
+ END_QCALL;
+
+ return fRet;
+}
+#endif // FEATURE_APPX
+
+#if defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+/*static*/
+INT_PTR QCALLTYPE AssemblyNative::InitializeAssemblyLoadContext(INT_PTR ptrManagedAssemblyLoadContext)
+{
+ QCALL_CONTRACT;
+
+ INT_PTR ptrNativeAssemblyLoadContext = NULL;
+
+ BEGIN_QCALL;
+
+ // We do not need to take a lock since this method is invoked from the ctor of AssemblyLoadContext managed type and
+ // only one thread is ever executing a ctor for a given instance.
+ //
+ // Initialize a binder
+ CLRPrivBinderAssemblyLoadContext *pBindContext = NULL;
+
+ // Initialize the assembly binder instance in the VM
+ PTR_AppDomain pCurDomain = AppDomain::GetCurrentDomain();
+ CLRPrivBinderCoreCLR *pTPABinderContext = pCurDomain->GetTPABinderContext();
+ IfFailThrow(CLRPrivBinderAssemblyLoadContext::SetupContext(pCurDomain->GetId().m_dwId, pTPABinderContext, ptrManagedAssemblyLoadContext, &pBindContext));
+ _ASSERTE(pBindContext != NULL);
+
+ ptrNativeAssemblyLoadContext = reinterpret_cast<INT_PTR>(pBindContext);
+
+ END_QCALL;
+
+ return ptrNativeAssemblyLoadContext;
+}
+
+/*static*/
+BOOL QCALLTYPE AssemblyNative::OverrideDefaultAssemblyLoadContextForCurrentDomain(INT_PTR ptrNativeAssemblyLoadContext)
+{
+ QCALL_CONTRACT;
+
+ BOOL fOverrodeDefaultLoadContext = FALSE;
+
+ BEGIN_QCALL;
+
+ AppDomain *pCurDomain = AppDomain::GetCurrentDomain();
+
+ if (pCurDomain->LockBindingModel())
+ {
+ // Only one thread will ever enter here - it will be the ones that actually locked the binding model
+ //
+ // AssemblyLoadContext should have a binder associated with it
+ IUnknown *pOverrideBinder = reinterpret_cast<IUnknown *>(ptrNativeAssemblyLoadContext);
+ _ASSERTE(pOverrideBinder != NULL);
+
+ // Get reference to the current default context binder
+
+ IUnknown * pCurrentDefaultContextBinder = pCurDomain->GetFusionContext();
+
+ // The default context binder can never be null since the runtime always sets one up
+ _ASSERTE(pCurrentDefaultContextBinder != NULL);
+
+ // The default context should also be the same as TPABinder context
+ _ASSERTE(pCurrentDefaultContextBinder == pCurDomain->GetTPABinderContext());
+
+ // Override the default context binder in the VM
+ pCurDomain->OverrideDefaultContextBinder(pOverrideBinder);
+
+ fOverrodeDefaultLoadContext = TRUE;
+ }
+
+ END_QCALL;
+
+ return fOverrodeDefaultLoadContext;
+}
+
+BOOL QCALLTYPE AssemblyNative::CanUseAppPathAssemblyLoadContextInCurrentDomain()
+{
+ QCALL_CONTRACT;
+
+ BOOL fCanUseAppPathAssemblyLoadContext = FALSE;
+
+ BEGIN_QCALL;
+
+ AppDomain *pCurDomain = AppDomain::GetCurrentDomain();
+
+ pCurDomain->LockBindingModel();
+
+ fCanUseAppPathAssemblyLoadContext = !pCurDomain->IsHostAssemblyResolverInUse();
+
+ END_QCALL;
+
+ return fCanUseAppPathAssemblyLoadContext;
+}
+
+/*static*/
+INT_PTR QCALLTYPE AssemblyNative::GetLoadContextForAssembly(QCall::AssemblyHandle pAssembly)
+{
+ QCALL_CONTRACT;
+
+ INT_PTR ptrManagedAssemblyLoadContext = NULL;
+
+ BEGIN_QCALL;
+
+ // Get the PEAssembly for the RuntimeAssembly
+ PEFile *pPEFile = pAssembly->GetFile();
+ PTR_PEAssembly pPEAssembly = pPEFile->AsAssembly();
+ _ASSERTE(pAssembly != NULL);
+
+ // Platform assemblies are semantically bound against the "Default" binder which could be the TPA Binder or
+ // the overridden binder. In either case, the reference to the same will be returned when this QCall returns.
+ if (!pPEAssembly->IsProfileAssembly())
+ {
+ // Get the binding context for the assembly.
+ //
+ // GetBindingContext returns a ICLRPrivAssembly which can be used to get access to the
+ // actual ICLRPrivBinder instance in which the assembly was loaded.
+ PTR_ICLRPrivBinder pBindingContext = pPEAssembly->GetBindingContext();
+ UINT_PTR assemblyBinderID = 0;
+ IfFailThrow(pBindingContext->GetBinderID(&assemblyBinderID));
+
+ AppDomain *pCurDomain = AppDomain::GetCurrentDomain();
+ CLRPrivBinderCoreCLR *pTPABinder = pCurDomain->GetTPABinderContext();
+
+ // If the assembly was bound using the TPA binder,
+ // then we will return the reference to "Default" binder from the managed implementation when this QCall returns.
+ //
+ // See earlier comment about "Default" binder for additional context.
+ ICLRPrivBinder *pOpaqueBinder = reinterpret_cast<ICLRPrivBinder *>(assemblyBinderID);
+ if (!AreSameBinderInstance(pTPABinder, pOpaqueBinder))
+ {
+ // Only CLRPrivBinderAssemblyLoadContext instance contains the reference to its
+ // corresponding managed instance.
+ CLRPrivBinderAssemblyLoadContext *pBinder = (CLRPrivBinderAssemblyLoadContext *)(pOpaqueBinder);
+
+ // Fetch the managed binder reference from the native binder instance
+ ptrManagedAssemblyLoadContext = pBinder->GetManagedAssemblyLoadContext();
+ _ASSERTE(ptrManagedAssemblyLoadContext != NULL);
+ }
+ }
+
+ END_QCALL;
+
+ return ptrManagedAssemblyLoadContext;
+}
+#endif // defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
diff --git a/src/vm/assemblynative.hpp b/src/vm/assemblynative.hpp
new file mode 100644
index 0000000000..a1b1ca2a3a
--- /dev/null
+++ b/src/vm/assemblynative.hpp
@@ -0,0 +1,288 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: AssemblyNative.hpp
+**
+** Purpose: Implements FCalls for managed Assembly class
+**
+**
+
+
+**
+===========================================================*/
+#ifndef _ASSEMBLYNATIVE_H
+#define _ASSEMBLYNATIVE_H
+
+class CLRPrivBinderAssemblyLoadContext;
+
+class AssemblyNative
+{
+ friend class Assembly;
+ friend class BaseDomain;
+ friend class DomainAssembly;
+
+private:
+ static Assembly* GetPostPolicyAssembly(PEAssembly *pFile,
+ BOOL fForIntrospection,
+ AssemblyLoadSecurity *pLoadSecurity,
+ BOOL fIsLoadByteArray = FALSE);
+
+ static Assembly* LoadFromBuffer(BOOL fForIntrospection,
+ const BYTE* pAssemblyData,
+ UINT64 uAssemblyLength,
+ const BYTE* pPDBData,
+ UINT64 uPDBLength,
+ StackCrawlMark* stackMark,
+ Object * securityUNSAFE,
+ SecurityContextSource securityContextSource);
+public:
+ // static FCALLs
+ static
+ void QCALLTYPE GetEntryAssembly(QCall::ObjectHandleOnStack retAssembly);
+
+ static
+ void QCALLTYPE GetExecutingAssembly(QCall::StackCrawlMarkHandle stackMark, QCall::ObjectHandleOnStack retAssembly);
+
+ static FCDECL2(Object*, LoadFile, StringObject* pathUNSAFE,
+ Object* securityUNSAFE);
+ static FCDECL6(Object*, LoadImage, U1Array* PEByteArrayUNSAFE, U1Array* SymByteArrayUNSAFE, Object* securityUNSAFE, StackCrawlMark* stackMark, CLR_BOOL fForIntrospection, SecurityContextSource securityContextSource);
+#ifdef FEATURE_CORECLR
+ static
+ void QCALLTYPE LoadFromUnmanagedArray(CLR_BOOL fForIntrospection,
+ BYTE* pAssembly,
+ UINT64 uAssemblyLength,
+ BYTE* pPDB,
+ UINT64 uPDBLength,
+ QCall::StackCrawlMarkHandle stackMark,
+ QCall::ObjectHandleOnStack retAssembly);
+#endif
+
+#ifdef FEATURE_HOSTED_BINDER
+ static FCDECL9(Object*, Load, AssemblyNameBaseObject* assemblyNameUNSAFE,
+ StringObject* codeBaseUNSAFE,
+ Object* securityUNSAFE,
+ AssemblyBaseObject* requestingAssemblyUNSAFE,
+ StackCrawlMark* stackMark,
+ ICLRPrivBinder * pPrivHostBinder,
+ CLR_BOOL fThrowOnFileNotFound,
+ CLR_BOOL fForIntrospection,
+ CLR_BOOL fSuppressSecurityChecks);
+
+#else //!FEATURE_HOSTED_BINDER
+ static FCDECL8(Object*, Load, AssemblyNameBaseObject* assemblyNameUNSAFE,
+ StringObject* codeBaseUNSAFE,
+ Object* securityUNSAFE,
+ AssemblyBaseObject* requestingAssemblyUNSAFE,
+ StackCrawlMark* stackMark,
+ CLR_BOOL fThrowOnFileNotFound,
+ CLR_BOOL fForIntrospection,
+ CLR_BOOL fSuppressSecurityChecks);
+#endif // FEATURE_HOSTED_BINDER
+
+ static FCDECL1(FC_BOOL_RET, IsFrameworkAssembly, AssemblyNameBaseObject* refAssemblyNameUNSAFE);
+ static FCDECL1(FC_BOOL_RET, IsNewPortableAssembly, AssemblyNameBaseObject* refAssemblyNameUNSAFE);
+
+ //
+ // instance FCALLs
+ //
+
+ static
+ void QCALLTYPE GetLocale(QCall::AssemblyHandle pAssembly, QCall::StringHandleOnStack retString);
+
+ static
+ INT32 QCALLTYPE GetHashAlgorithm(QCall::AssemblyHandle pAssembly);
+
+#ifndef FEATURE_CORECLR
+ static
+ BYTE QCALLTYPE GetSecurityRuleSet(QCall::AssemblyHandle pAssembly);
+#endif // !FEATURE_CORECLR
+
+ static
+ void QCALLTYPE GetSimpleName(QCall::AssemblyHandle pAssembly, QCall::StringHandleOnStack retSimpleName);
+
+ static
+ void QCALLTYPE GetPublicKey(QCall::AssemblyHandle pAssembly, QCall::ObjectHandleOnStack retPublicKey);
+
+ static
+ INT32 QCALLTYPE GetFlags(QCall::AssemblyHandle pAssembly);
+
+ static
+ void QCALLTYPE GetFullName(QCall::AssemblyHandle pAssembly, QCall::StringHandleOnStack retString);
+
+ static
+ void QCALLTYPE GetLocation(QCall::AssemblyHandle pAssembly, QCall::StringHandleOnStack retString);
+
+ static
+ FCDECL1(FC_BOOL_RET, IsReflectionOnly, AssemblyBaseObject * pAssemblyUNSAFE);
+
+ static
+ void QCALLTYPE GetCodeBase(QCall::AssemblyHandle pAssembly, BOOL fCopiedName, QCall::StringHandleOnStack retString);
+
+ static
+ BYTE * QCALLTYPE GetResource(QCall::AssemblyHandle pAssembly, LPCWSTR wszName, UINT64 * length, QCall::StackCrawlMarkHandle stackMark, BOOL skipSecurityCheck);
+
+ static
+ BOOL QCALLTYPE GetNeutralResourcesLanguageAttribute(QCall::AssemblyHandle pAssembly, QCall::StringHandleOnStack cultureName, INT16& outFallbackLocation);
+
+ static
+ FCDECL1(FC_BOOL_RET, IsDynamic, AssemblyBaseObject * pAssemblyUNSAFE);
+
+ static
+ void QCALLTYPE GetVersion(QCall::AssemblyHandle pAssembly, INT32* pMajorVersion, INT32* pMinorVersion, INT32*pBuildNumber, INT32* pRevisionNumber);
+
+ static
+ void QCALLTYPE LoadModule(QCall::AssemblyHandle pAssembly,
+ LPCWSTR wszModuleName,
+ LPCBYTE pRawModule, INT32 cbModule,
+ LPCBYTE pRawSymbolStore, INT32 cbSymbolStore,
+ QCall::ObjectHandleOnStack retModule);
+
+ static
+ void QCALLTYPE GetType(QCall::AssemblyHandle pAssembly, LPCWSTR wszName, BOOL bThrowOnError, BOOL bIgnoreCase, QCall::ObjectHandleOnStack retType);
+
+ static
+ INT32 QCALLTYPE GetManifestResourceInfo(QCall::AssemblyHandle pAssembly, LPCWSTR wszName, QCall::ObjectHandleOnStack retAssembly, QCall::StringHandleOnStack retFileName, QCall::StackCrawlMarkHandle stackMark);
+
+ static
+ BOOL QCALLTYPE UseRelativeBindForSatellites();
+
+ static
+ void QCALLTYPE GetModules(QCall::AssemblyHandle pAssembly, BOOL fLoadIfNotFound, BOOL fGetResourceModules, QCall::ObjectHandleOnStack retModules);
+
+ static
+ void QCALLTYPE GetModule(QCall::AssemblyHandle pAssembly, LPCWSTR wszFileName, QCall::ObjectHandleOnStack retModule);
+
+ static
+ void QCALLTYPE GetExportedTypes(QCall::AssemblyHandle pAssembly, QCall::ObjectHandleOnStack retTypes);
+
+ static
+ void QCALLTYPE GetForwardedTypes(QCall::AssemblyHandle pAssembly, QCall::ObjectHandleOnStack retTypes);
+
+ static FCDECL1(Object*, GetManifestResourceNames, AssemblyBaseObject * pAssemblyUNSAFE);
+ static FCDECL1(Object*, GetReferencedAssemblies, AssemblyBaseObject * pAssemblyUNSAFE);
+
+ static
+ void QCALLTYPE GetEntryPoint(QCall::AssemblyHandle pAssembly, QCall::ObjectHandleOnStack retMethod);
+
+ static FCDECL1(ReflectModuleBaseObject *, GetOnDiskAssemblyModule, AssemblyBaseObject * pAssemblyUNSAFE);
+ static FCDECL1(ReflectModuleBaseObject *, GetInMemoryAssemblyModule, AssemblyBaseObject * pAssemblyUNSAFE);
+
+#ifndef FEATURE_CORECLR
+ static
+ FCDECL1(FC_BOOL_RET, IsGlobalAssemblyCache, AssemblyBaseObject* pAssemblyUNSAFE);
+#endif // !FEATURE_CORECLR
+
+ static
+ void QCALLTYPE GetGrantSet(QCall::AssemblyHandle pAssembly, QCall::ObjectHandleOnStack retGranted, QCall::ObjectHandleOnStack retDenied);
+
+ static
+ BOOL QCALLTYPE IsAllSecurityCritical(QCall::AssemblyHandle pAssembly);
+
+ static
+ BOOL QCALLTYPE IsAllSecuritySafeCritical(QCall::AssemblyHandle pAssembly);
+
+ static
+ BOOL QCALLTYPE IsAllPublicAreaSecuritySafeCritical(QCall::AssemblyHandle pAssembly);
+
+ static
+ BOOL QCALLTYPE IsAllSecurityTransparent(QCall::AssemblyHandle pAssembly);
+
+ static
+ void QCALLTYPE GetImageRuntimeVersion(QCall::AssemblyHandle pAssembly, QCall::StringHandleOnStack retString);
+
+#ifdef FEATURE_LEGACYNETCF
+ static
+ BOOL QCALLTYPE GetIsProfileAssembly(QCall::AssemblyHandle pAssembly);
+#endif // FEATURE_LEGACYNETCF
+
+ static
+ INT64 QCALLTYPE GetHostContext(QCall::AssemblyHandle pAssembly);
+
+#ifdef FEATURE_CAS_POLICY
+ static
+ BOOL QCALLTYPE IsStrongNameVerified(QCall::AssemblyHandle pAssembly);
+#endif // FEATURE_CAS_POLICY
+
+ //
+ // AssemblyBuilder FCALLs
+ //
+
+ static
+ void QCALLTYPE PrepareForSavingManifestToDisk(QCall::AssemblyHandle pAssembly, QCall::ModuleHandle pAssemblyModule);
+
+#ifndef FEATURE_CORECLR
+ static
+ void QCALLTYPE SaveManifestToDisk(QCall::AssemblyHandle pAssembly,
+ LPCWSTR wszManifestFileName,
+ INT32 entrypoint,
+ INT32 fileKind,
+ INT32 portableExecutableKind,
+ INT32 imageFileMachine);
+
+ static
+ mdExportedType QCALLTYPE AddExportedTypeOnDisk(QCall::AssemblyHandle pAssembly, LPCWSTR wzzCOMTypeName, INT32 tkImpl, INT32 tkTypeDef, INT32 flags);
+
+ static
+ mdExportedType QCALLTYPE AddExportedTypeInMemory(QCall::AssemblyHandle pAssembly, LPCWSTR wzzCOMTypeName, INT32 tkImpl, INT32 tkTypeDef, INT32 flags);
+
+#endif // FEATURE_CORECLR
+
+ static
+ mdFile QCALLTYPE AddFile(QCall::AssemblyHandle pAssembly, LPCWSTR wszFileName);
+
+ static
+ void QCALLTYPE SetFileHashValue(QCall::AssemblyHandle pAssembly, INT32 tkFile, LPCWSTR wszFullFileName);
+
+ static
+ void QCALLTYPE AddStandAloneResource(QCall::AssemblyHandle pAssembly, LPCWSTR wszName, LPCWSTR wszFileName, LPCWSTR wszFullFileName, INT32 iAttribute);
+
+ static
+ void QCALLTYPE AddDeclarativeSecurity(QCall::AssemblyHandle pAssembly, INT32 action, PVOID blob, INT32 length);
+
+#ifndef FEATURE_CORECLR
+ static
+ void QCALLTYPE CreateVersionInfoResource(LPCWSTR pwzFilename,
+ LPCWSTR pwzTitle,
+ LPCWSTR pwzIconFilename,
+ LPCWSTR pwzDescription,
+ LPCWSTR pwzCopyright,
+ LPCWSTR pwzTrademark,
+ LPCWSTR pwzCompany,
+ LPCWSTR pwzProduct,
+ LPCWSTR pwzProductVersion,
+ LPCWSTR pwzFileVersion,
+ INT32 lcid,
+ BOOL fIsDll,
+ QCall::StringHandleOnStack retFileName);
+#endif // !FEATURE_CORECLR
+
+ static
+ void QCALLTYPE GetRawBytes(QCall::AssemblyHandle pAssembly, QCall::ObjectHandleOnStack retRawBytes);
+
+ //
+ // PEFile QCalls
+ //
+
+ static
+ void QCALLTYPE ReleaseSafePEFileHandle(PEFile *pPEFile);
+
+#ifdef FEATURE_APPX
+ static
+ BOOL QCALLTYPE IsDesignerBindingContext(QCall::AssemblyHandle pAssembly);
+#endif
+
+ static INT_PTR QCALLTYPE InitializeAssemblyLoadContext(INT_PTR ptrManagedAssemblyLoadContext);
+ static BOOL QCALLTYPE OverrideDefaultAssemblyLoadContextForCurrentDomain(INT_PTR ptrNativeAssemblyLoadContext);
+ static BOOL QCALLTYPE CanUseAppPathAssemblyLoadContextInCurrentDomain();
+ static void QCALLTYPE LoadFromPath(INT_PTR ptrNativeAssemblyLoadContext, LPCWSTR pwzILPath, LPCWSTR pwzNIPath, QCall::ObjectHandleOnStack retLoadedAssembly);
+ static void QCALLTYPE LoadFromStream(INT_PTR ptrNativeAssemblyLoadContext, INT_PTR ptrAssemblyArray, INT32 cbAssemblyArrayLength, INT_PTR ptrSymbolArray, INT32 cbSymbolArrayLength, QCall::ObjectHandleOnStack retLoadedAssembly);
+ static Assembly* LoadFromPEImage(CLRPrivBinderAssemblyLoadContext* pBinderContext, PEImage *pILImage, PEImage *pNIImage);
+ static INT_PTR QCALLTYPE AssemblyNative::GetLoadContextForAssembly(QCall::AssemblyHandle pAssembly);
+};
+
+#endif
+
diff --git a/src/vm/assemblynativeresource.cpp b/src/vm/assemblynativeresource.cpp
new file mode 100644
index 0000000000..2f13e2b78d
--- /dev/null
+++ b/src/vm/assemblynativeresource.cpp
@@ -0,0 +1,586 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+////////////////////////////////////////////////////////////////////////////////
+// ResFile.CPP
+
+
+
+#include "common.h"
+
+#include "assemblynativeresource.h"
+#include <limits.h>
+
+#ifndef CP_WINUNICODE
+ #define CP_WINUNICODE 1200
+#endif
+
+#ifndef MAKEINTRESOURCE
+ #define MAKEINTRESOURCE MAKEINTRESOURCEW
+#endif
+
+Win32Res::Win32Res()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ m_szFile = NULL;
+ m_Icon = NULL;
+ int i;
+ for (i = 0; i < NUM_VALUES; i++)
+ m_Values[i] = NULL;
+ for (i = 0; i < NUM_VALUES; i++)
+ m_Values[i] = NULL;
+ m_fDll = false;
+ m_pData = NULL;
+ m_pCur = NULL;
+ m_pEnd = NULL;
+}
+
+Win32Res::~Win32Res()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ m_szFile = NULL;
+ m_Icon = NULL;
+ int i;
+ for (i = 0; i < NUM_VALUES; i++)
+ m_Values[i] = NULL;
+ for (i = 0; i < NUM_VALUES; i++)
+ m_Values[i] = NULL;
+ m_fDll = false;
+ if (m_pData)
+ delete [] m_pData;
+ m_pData = NULL;
+ m_pCur = NULL;
+
+ m_pEnd = NULL;
+}
+
+//*****************************************************************************
+// Initializes the structures with version information.
+//*****************************************************************************
+VOID Win32Res::SetInfo(
+ LPCWSTR szFile,
+ LPCWSTR szTitle,
+ LPCWSTR szIconName,
+ LPCWSTR szDescription,
+ LPCWSTR szCopyright,
+ LPCWSTR szTrademark,
+ LPCWSTR szCompany,
+ LPCWSTR szProduct,
+ LPCWSTR szProductVersion,
+ LPCWSTR szFileVersion,
+ LCID lcid,
+ BOOL fDLL)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(szFile != NULL);
+
+ m_szFile = szFile;
+ if (szIconName && szIconName[0] != 0)
+ m_Icon = szIconName; // a non-mepty string
+
+#define NonNull(sz) (sz == NULL || *sz == W('\0') ? W(" ") : sz)
+ m_Values[v_Description] = NonNull(szDescription);
+ m_Values[v_Title] = NonNull(szTitle);
+ m_Values[v_Copyright] = NonNull(szCopyright);
+ m_Values[v_Trademark] = NonNull(szTrademark);
+ m_Values[v_Product] = NonNull(szProduct);
+ m_Values[v_ProductVersion] = NonNull(szProductVersion);
+ m_Values[v_Company] = NonNull(szCompany);
+ m_Values[v_FileVersion] = NonNull(szFileVersion);
+#undef NonNull
+
+ m_fDll = fDLL;
+ m_lcid = lcid;
+}
+
+VOID Win32Res::MakeResFile(const void **pData, DWORD *pcbData)
+{
+ STANDARD_VM_CONTRACT;
+
+ static const RESOURCEHEADER magic = { 0x00000000, 0x00000020, 0xFFFF, 0x0000, 0xFFFF, 0x0000,
+ 0x00000000, 0x0000, 0x0000, 0x00000000, 0x00000000 };
+ _ASSERTE(pData != NULL && pcbData != NULL);
+
+ *pData = NULL;
+ *pcbData = 0;
+ m_pData = new BYTE[(sizeof(RESOURCEHEADER) * 3 + sizeof(EXEVERRESOURCE))];
+
+ m_pCur = m_pData;
+ m_pEnd = m_pData + sizeof(RESOURCEHEADER) * 3 + sizeof(EXEVERRESOURCE);
+
+ // inject the magic empty entry
+ Write( &magic, sizeof(magic) );
+
+ WriteVerResource();
+
+ if (m_Icon)
+ {
+ WriteIconResource();
+ }
+
+ *pData = m_pData;
+ *pcbData = (DWORD)(m_pCur - m_pData);
+ return;
+}
+
+
+/*
+ * WriteIconResource
+ * Writes the Icon resource into the RES file.
+ *
+ * RETURNS: TRUE on succes, FALSE on failure (errors reported to user)
+ */
+VOID Win32Res::WriteIconResource()
+{
+ STANDARD_VM_CONTRACT;
+
+ HandleHolder hIconFile = INVALID_HANDLE_VALUE;
+ WORD wTemp, wCount, resID = 2; // Skip 1 for the version ID
+ DWORD dwRead = 0, dwWritten = 0;
+
+ RESOURCEHEADER grpHeader = { 0x00000000, 0x00000020, 0xFFFF, (WORD)(size_t)RT_GROUP_ICON, 0xFFFF, 0x7F00, // 0x7F00 == IDI_APPLICATION
+ 0x00000000, 0x1030, 0x0000, 0x00000000, 0x00000000 };
+
+ // Read the icon
+ hIconFile = WszCreateFile( m_Icon, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING,
+ FILE_FLAG_SEQUENTIAL_SCAN, NULL);
+ if (hIconFile == INVALID_HANDLE_VALUE) {
+ COMPlusThrowWin32();
+ }
+
+ // Read the magic reserved WORD
+ if (ReadFile( hIconFile, &wTemp, sizeof(WORD), &dwRead, NULL) == FALSE) {
+ COMPlusThrowWin32();
+ } else if (wTemp != 0 || dwRead != sizeof(WORD)) {
+ COMPlusThrowHR(HRESULT_FROM_WIN32(ERROR_INVALID_DATA));
+ }
+
+ // Verify the Type WORD
+ if (ReadFile( hIconFile, &wCount, sizeof(WORD), &dwRead, NULL) == FALSE) {
+ COMPlusThrowWin32();
+ } else if (wCount != 1 || dwRead != sizeof(WORD)) {
+ COMPlusThrowHR(HRESULT_FROM_WIN32(ERROR_INVALID_DATA));
+ }
+
+ // Read the Count WORD
+ if (ReadFile( hIconFile, &wCount, sizeof(WORD), &dwRead, NULL) == FALSE) {
+ COMPlusThrowWin32();
+ } else if (wCount == 0 || dwRead != sizeof(WORD)) {
+ COMPlusThrowHR(HRESULT_FROM_WIN32(ERROR_INVALID_DATA));
+ }
+
+ NewArrayHolder<ICONRESDIR> grp = new ICONRESDIR[wCount];
+ grpHeader.DataSize = 3 * sizeof(WORD) + wCount * sizeof(ICONRESDIR);
+
+ // For each Icon
+ for (WORD i = 0; i < wCount; i++) {
+ ICONDIRENTRY ico;
+ DWORD icoPos, newPos;
+ RESOURCEHEADER icoHeader = { 0x00000000, 0x00000020, 0xFFFF, (WORD)(size_t)RT_ICON, 0xFFFF, 0x0000,
+ 0x00000000, 0x1010, 0x0000, 0x00000000, 0x00000000 };
+ icoHeader.Name = resID++;
+
+ // Read the Icon header
+ if (ReadFile( hIconFile, &ico, sizeof(ICONDIRENTRY), &dwRead, NULL) == FALSE) {
+ COMPlusThrowWin32();
+ }
+ else if (dwRead != sizeof(ICONDIRENTRY)) {
+ COMPlusThrowHR(HRESULT_FROM_WIN32(ERROR_INVALID_DATA));
+ }
+
+ _ASSERTE(sizeof(ICONRESDIR) + sizeof(WORD) == sizeof(ICONDIRENTRY));
+ memcpy(grp + i, &ico, sizeof(ICONRESDIR));
+ grp[i].IconId = icoHeader.Name;
+ icoHeader.DataSize = ico.dwBytesInRes;
+
+ NewArrayHolder<BYTE> icoBuffer = new BYTE[icoHeader.DataSize];
+
+ // Write the header to the RES file
+ Write( &icoHeader, sizeof(RESOURCEHEADER) );
+
+ // Position to read the Icon data
+ icoPos = SetFilePointer( hIconFile, 0, NULL, FILE_CURRENT);
+ if (icoPos == INVALID_SET_FILE_POINTER) {
+ COMPlusThrowWin32();
+ }
+ newPos = SetFilePointer( hIconFile, ico.dwImageOffset, NULL, FILE_BEGIN);
+ if (newPos == INVALID_SET_FILE_POINTER) {
+ COMPlusThrowWin32();
+ }
+
+ // Actually read the data
+ if (ReadFile( hIconFile, icoBuffer, icoHeader.DataSize, &dwRead, NULL) == FALSE) {
+ COMPlusThrowWin32();
+ }
+ else if (dwRead != icoHeader.DataSize) {
+ COMPlusThrowHR(HRESULT_FROM_WIN32(ERROR_INVALID_DATA));
+ }
+
+ // Because Icon files don't seem to record the actual Planes and BitCount in
+ // the ICONDIRENTRY, get the info from the BITMAPINFOHEADER at the beginning
+ // of the data here:
+ grp[i].Planes = ((BITMAPINFOHEADER*)(BYTE*)icoBuffer)->biPlanes;
+ grp[i].BitCount = ((BITMAPINFOHEADER*)(BYTE*)icoBuffer)->biBitCount;
+
+ // Now write the data to the RES file
+ Write( (BYTE*)icoBuffer, icoHeader.DataSize );
+
+ // Reposition to read the next Icon header
+ newPos = SetFilePointer( hIconFile, icoPos, NULL, FILE_BEGIN);
+ if (newPos != icoPos) {
+ COMPlusThrowWin32();
+ }
+ }
+
+ // inject the icon group
+ Write( &grpHeader, sizeof(RESOURCEHEADER) );
+
+ // Write the header to the RES file
+ wTemp = 0; // the reserved WORD
+ Write( &wTemp, sizeof(WORD) );
+
+ wTemp = RES_ICON; // the GROUP type
+ Write( &wTemp, sizeof(WORD) );
+
+ Write( &wCount, sizeof(WORD) );
+
+ // now write the entries
+ Write( grp, sizeof(ICONRESDIR) * wCount );
+
+ return;
+}
+
+/*
+ * WriteVerResource
+ * Writes the version resource into the RES file.
+ *
+ * RETURNS: TRUE on succes, FALSE on failure (errors reported to user)
+ */
+VOID Win32Res::WriteVerResource()
+{
+ STANDARD_VM_CONTRACT;
+
+ WCHAR szLangCp[9]; // language/codepage string.
+ EXEVERRESOURCE VerResource;
+ WORD cbStringBlocks;
+ int i;
+ bool bUseFileVer = false;
+ WCHAR rcFile[_MAX_PATH]; // Name of file without path
+ WCHAR rcFileExtension[_MAX_PATH]; // file extension
+ WCHAR rcFileName[_MAX_PATH]; // Name of file with extension but without path
+ DWORD cbTmp;
+
+ SplitPath(m_szFile, 0, 0, 0, 0, rcFile, _MAX_PATH, rcFileExtension, _MAX_PATH);
+
+ wcscpy_s(rcFileName, COUNTOF(rcFileName), rcFile);
+ wcscat_s(rcFileName, COUNTOF(rcFileName), rcFileExtension);
+
+ static const EXEVERRESOURCE VerResourceTemplate = {
+ sizeof(EXEVERRESOURCE), sizeof(VS_FIXEDFILEINFO), 0, W("VS_VERSION_INFO"),
+ {
+ VS_FFI_SIGNATURE, // Signature
+ VS_FFI_STRUCVERSION, // structure version
+ 0, 0, // file version number
+ 0, 0, // product version number
+ VS_FFI_FILEFLAGSMASK, // file flags mask
+ 0, // file flags
+ VOS__WINDOWS32,
+ VFT_APP, // file type
+ 0, // subtype
+ 0, 0 // file date/time
+ },
+ sizeof(WORD) * 2 + 2 * HDRSIZE + KEYBYTES("VarFileInfo") + KEYBYTES("Translation"),
+ 0,
+ 1,
+ W("VarFileInfo"),
+ sizeof(WORD) * 2 + HDRSIZE + KEYBYTES("Translation"),
+ sizeof(WORD) * 2,
+ 0,
+ W("Translation"),
+ 0,
+ 0,
+ 2 * HDRSIZE + KEYBYTES("StringFileInfo") + KEYBYTES("12345678"),
+ 0,
+ 1,
+ W("StringFileInfo"),
+ HDRSIZE + KEYBYTES("12345678"),
+ 0,
+ 1,
+ W("12345678")
+ };
+ static const WCHAR szComments[] = W("Comments");
+ static const WCHAR szCompanyName[] = W("CompanyName");
+ static const WCHAR szFileDescription[] = W("FileDescription");
+ static const WCHAR szCopyright[] = W("LegalCopyright");
+ static const WCHAR szTrademark[] = W("LegalTrademarks");
+ static const WCHAR szProdName[] = W("ProductName");
+ static const WCHAR szFileVerResName[] = W("FileVersion");
+ static const WCHAR szProdVerResName[] = W("ProductVersion");
+ static const WCHAR szInternalNameResName[] = W("InternalName");
+ static const WCHAR szOriginalNameResName[] = W("OriginalFilename");
+
+ // If there's no product version, use the file version
+ if (m_Values[v_ProductVersion][0] == 0) {
+ m_Values[v_ProductVersion] = m_Values[v_FileVersion];
+ bUseFileVer = true;
+ }
+
+ // Keep the two following arrays in the same order
+#define MAX_KEY 10
+ static const LPCWSTR szKeys [MAX_KEY] = {
+ szComments,
+ szCompanyName,
+ szFileDescription,
+ szFileVerResName,
+ szInternalNameResName,
+ szCopyright,
+ szTrademark,
+ szOriginalNameResName,
+ szProdName,
+ szProdVerResName,
+ };
+ LPCWSTR szValues [MAX_KEY] = { // values for keys
+ m_Values[v_Description], //compiler->assemblyDescription == NULL ? W("") : compiler->assemblyDescription,
+ m_Values[v_Company], // Company Name
+ m_Values[v_Title], // FileDescription //compiler->assemblyTitle == NULL ? W("") : compiler->assemblyTitle,
+ m_Values[v_FileVersion], // FileVersion
+ rcFileName, // InternalName
+ m_Values[v_Copyright], // Copyright
+ m_Values[v_Trademark], // Trademark
+ rcFileName, // OriginalName
+ m_Values[v_Product], // Product Name //compiler->assemblyTitle == NULL ? W("") : compiler->assemblyTitle,
+ m_Values[v_ProductVersion] // Product Version
+ };
+
+ memcpy(&VerResource, &VerResourceTemplate, sizeof(VerResource));
+
+ if (m_fDll)
+ VerResource.vsFixed.dwFileType = VFT_DLL;
+ else
+ VerResource.vsFixed.dwFileType = VFT_APP;
+
+ // Extract the numeric version from the string.
+ m_Version[0] = m_Version[1] = m_Version[2] = m_Version[3] = 0;
+ int nNumStrings = swscanf_s(m_Values[v_FileVersion], W("%hu.%hu.%hu.%hu"), m_Version, m_Version + 1, m_Version + 2, m_Version + 3);
+
+ // Fill in the FIXEDFILEINFO
+ VerResource.vsFixed.dwFileVersionMS =
+ ((DWORD)m_Version[0] << 16) + m_Version[1];
+
+ VerResource.vsFixed.dwFileVersionLS =
+ ((DWORD)m_Version[2] << 16) + m_Version[3];
+
+ if (bUseFileVer) {
+ VerResource.vsFixed.dwProductVersionLS = VerResource.vsFixed.dwFileVersionLS;
+ VerResource.vsFixed.dwProductVersionMS = VerResource.vsFixed.dwFileVersionMS;
+ }
+ else {
+ WORD v[4];
+ v[0] = v[1] = v[2] = v[3] = 0;
+ // Try to get the version numbers, but don't waste time or give any errors
+ // just default to zeros
+ nNumStrings = swscanf_s(m_Values[v_ProductVersion], W("%hu.%hu.%hu.%hu"), v, v + 1, v + 2, v + 3);
+
+ VerResource.vsFixed.dwProductVersionMS =
+ ((DWORD)v[0] << 16) + v[1];
+
+ VerResource.vsFixed.dwProductVersionLS =
+ ((DWORD)v[2] << 16) + v[3];
+ }
+
+ // There is no documentation on what units to use for the date! So we use zero.
+ // The Windows resource compiler does too.
+ VerResource.vsFixed.dwFileDateMS = VerResource.vsFixed.dwFileDateLS = 0;
+
+ // Fill in codepage/language -- we'll assume the IDE language/codepage
+ // is the right one.
+ if (m_lcid != -1)
+ VerResource.langid = static_cast<WORD>(m_lcid);
+ else
+ VerResource.langid = MAKELANGID(LANG_NEUTRAL, SUBLANG_NEUTRAL);
+ VerResource.codepage = CP_WINUNICODE; // Unicode codepage.
+
+ swprintf_s(szLangCp, NumItems(szLangCp), W("%04x%04x"), VerResource.langid, VerResource.codepage);
+ wcscpy_s(VerResource.szLangCpKey, COUNTOF(VerResource.szLangCpKey), szLangCp);
+
+ // Determine the size of all the string blocks.
+ cbStringBlocks = 0;
+ for (i = 0; i < MAX_KEY; i++) {
+ if (szValues[i] == NULL || wcslen(szValues[i]) == 0)
+ continue;
+ cbTmp = SizeofVerString( szKeys[i], szValues[i]);
+ if ((cbStringBlocks + cbTmp) > USHRT_MAX / 2)
+ COMPlusThrow(kArgumentException, W("Argument_VerStringTooLong"));
+ cbStringBlocks += (WORD) cbTmp;
+ }
+
+ if ((cbStringBlocks + VerResource.cbLangCpBlock) > USHRT_MAX / 2)
+ COMPlusThrow(kArgumentException, W("Argument_VerStringTooLong"));
+ VerResource.cbLangCpBlock += cbStringBlocks;
+
+ if ((cbStringBlocks + VerResource.cbStringBlock) > USHRT_MAX / 2)
+ COMPlusThrow(kArgumentException, W("Argument_VerStringTooLong"));
+ VerResource.cbStringBlock += cbStringBlocks;
+
+ if ((cbStringBlocks + VerResource.cbRootBlock) > USHRT_MAX / 2)
+ COMPlusThrow(kArgumentException, W("Argument_VerStringTooLong"));
+ VerResource.cbRootBlock += cbStringBlocks;
+
+ // Call this VS_VERSION_INFO
+ RESOURCEHEADER verHeader = { 0x00000000, 0x0000003C, 0xFFFF, (WORD)(size_t)RT_VERSION, 0xFFFF, 0x0001,
+ 0x00000000, 0x0030, 0x0000, 0x00000000, 0x00000000 };
+ verHeader.DataSize = VerResource.cbRootBlock;
+
+ // Write the header
+ Write( &verHeader, sizeof(RESOURCEHEADER) );
+
+ // Write the version resource
+ Write( &VerResource, sizeof(VerResource) );
+
+
+ // Write each string block.
+ for (i = 0; i < MAX_KEY; i++) {
+ if (szValues[i] == NULL || wcslen(szValues[i]) == 0)
+ continue;
+ WriteVerString( szKeys[i], szValues[i] );
+ }
+#undef MAX_KEY
+
+ return;
+}
+
+/*
+ * SizeofVerString
+ * Determines the size of a version string to the given stream.
+ * RETURNS: size of block in bytes.
+ */
+WORD Win32Res::SizeofVerString(LPCWSTR lpszKey, LPCWSTR lpszValue)
+{
+ STANDARD_VM_CONTRACT;
+
+ size_t cbKey, cbValue;
+
+ cbKey = (wcslen(lpszKey) + 1) * 2; // Make room for the NULL
+ cbValue = (wcslen(lpszValue) + 1) * 2;
+ if (cbValue == 2)
+ cbValue = 4; // Empty strings need a space and NULL terminator (for Win9x)
+ if (cbKey + cbValue >= 0xFFF0)
+ COMPlusThrow(kArgumentException, W("Argument_VerStringTooLong"));
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:6305) // "Potential mismatch between sizeof and countof quantities"
+#endif
+
+ return (WORD)(PadKeyLen(cbKey) + // key, 0 padded to DWORD boundary
+ PadValLen(cbValue) + // value, 0 padded to dword boundary
+ HDRSIZE); // block header.
+
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+}
+
+/*----------------------------------------------------------------------------
+ * WriteVerString
+ * Writes a version string to the given file.
+ */
+VOID Win32Res::WriteVerString( LPCWSTR lpszKey, LPCWSTR lpszValue)
+{
+ STANDARD_VM_CONTRACT;
+
+ size_t cbKey, cbValue, cbBlock;
+ bool bNeedsSpace = false;
+
+ cbKey = (wcslen(lpszKey) + 1) * 2; // includes terminating NUL
+ cbValue = wcslen(lpszValue);
+ if (cbValue > 0)
+ cbValue++; // make room for NULL
+ else {
+ bNeedsSpace = true;
+ cbValue = 2; // Make room for space and NULL (for Win9x)
+ }
+ cbBlock = SizeofVerString(lpszKey, lpszValue);
+
+ NewArrayHolder<BYTE> pbBlock = new BYTE[(DWORD)cbBlock + HDRSIZE];
+ ZeroMemory(pbBlock, (DWORD)cbBlock + HDRSIZE);
+
+ _ASSERTE(cbValue < USHRT_MAX && cbKey < USHRT_MAX && cbBlock < USHRT_MAX);
+
+ // Copy header, key and value to block.
+ *(WORD *)((BYTE *)pbBlock) = (WORD)cbBlock;
+ *(WORD *)(pbBlock + sizeof(WORD)) = (WORD)cbValue;
+ *(WORD *)(pbBlock + 2 * sizeof(WORD)) = 1; // 1 = text value
+ // size = (cbBlock + HDRSIZE - HDRSIZE) / sizeof(WCHAR)
+ wcscpy_s((WCHAR*)(pbBlock + HDRSIZE), (cbBlock / sizeof(WCHAR)), lpszKey);
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:6305) // "Potential mismatch between sizeof and countof quantities"
+#endif
+
+ if (bNeedsSpace)
+ *((WCHAR*)(pbBlock + (HDRSIZE + PadKeyLen(cbKey)))) = W(' ');
+ else
+ {
+ wcscpy_s((WCHAR*)(pbBlock + (HDRSIZE + PadKeyLen(cbKey))),
+ //size = ((cbBlock + HDRSIZE) - (HDRSIZE + PadKeyLen(cbKey))) / sizeof(WCHAR)
+ (cbBlock - PadKeyLen(cbKey))/sizeof(WCHAR),
+ lpszValue);
+ }
+
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+ // Write block
+ Write( pbBlock, cbBlock);
+
+ return;
+}
+
+VOID Win32Res::Write(LPCVOID pData, size_t len)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (m_pCur + len > m_pEnd) {
+ // Grow
+ size_t newSize = (m_pEnd - m_pData);
+
+ // double the size unless we need more than that
+ if (len > newSize)
+ newSize += len;
+ else
+ newSize *= 2;
+
+ LPBYTE pNew = new BYTE[newSize];
+ memcpy(pNew, m_pData, m_pCur - m_pData);
+ delete [] m_pData;
+ // Relocate the pointers
+ m_pCur = pNew + (m_pCur - m_pData);
+ m_pData = pNew;
+ m_pEnd = pNew + newSize;
+ }
+
+ // Copy it in
+ memcpy(m_pCur, pData, len);
+ m_pCur += len;
+ return;
+}
+
diff --git a/src/vm/assemblynativeresource.h b/src/vm/assemblynativeresource.h
new file mode 100644
index 0000000000..9c7057384d
--- /dev/null
+++ b/src/vm/assemblynativeresource.h
@@ -0,0 +1,135 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+////////////////////////////////////////////////////////////////////////////////
+// ResFile.H
+// This handles Win32Resources
+//
+
+
+
+#pragma once
+
+class CFile;
+
+class Win32Res {
+public:
+ Win32Res();
+ ~Win32Res();
+
+ VOID SetInfo(LPCWSTR szFile,
+ LPCWSTR szTitle,
+ LPCWSTR szIconName,
+ LPCWSTR szDescription,
+ LPCWSTR szCopyright,
+ LPCWSTR szTrademark,
+ LPCWSTR szCompany,
+ LPCWSTR szProduct,
+ LPCWSTR szProductVersion,
+ LPCWSTR szFileVersion,
+ LCID lcid,
+ BOOL fDLL);
+ VOID MakeResFile(const void **pData, DWORD *pcbData);
+
+private:
+#define PadKeyLen(cb) ((((cb) + 5) & ~3) - 2)
+#define PadValLen(cb) ((cb + 3) & ~3)
+#define KEYSIZE(sz) (PadKeyLen(sizeof(sz)*sizeof(WCHAR))/sizeof(WCHAR))
+#define KEYBYTES(sz) (KEYSIZE(sz)*sizeof(WCHAR))
+#define HDRSIZE (3 * sizeof(WORD))
+
+ static WORD SizeofVerString(LPCWSTR lpszKey, LPCWSTR lpszValue);
+ VOID WriteVerString(LPCWSTR lpszKey, LPCWSTR lpszValue);
+ VOID WriteVerResource();
+ VOID WriteIconResource();
+
+ VOID Write(LPCVOID pData, size_t len);
+ LPCWSTR m_szFile;
+ LPCWSTR m_Icon;
+ enum {
+ v_Description,
+ v_Title,
+ v_Copyright,
+ v_Trademark,
+ v_Product,
+ v_ProductVersion,
+ v_Company,
+ v_FileVersion,
+ NUM_VALUES
+ };
+ LPCWSTR m_Values[NUM_VALUES];
+ ULONG m_Version[4];
+ int m_lcid;
+ BOOL m_fDll;
+ PBYTE m_pData;
+ PBYTE m_pCur;
+ PBYTE m_pEnd;
+
+
+ // RES file structs (borrowed from MSDN)
+#pragma pack( push)
+#pragma pack(1)
+ struct RESOURCEHEADER {
+ DWORD DataSize;
+ DWORD HeaderSize;
+ WORD Magic1;
+ WORD Type;
+ WORD Magic2;
+ WORD Name;
+ DWORD DataVersion;
+ WORD MemoryFlags;
+ WORD LanguageId;
+ DWORD Version;
+ DWORD Characteristics;
+ };
+
+ struct ICONDIRENTRY {
+ BYTE bWidth;
+ BYTE bHeight;
+ BYTE bColorCount;
+ BYTE bReserved;
+ WORD wPlanes;
+ WORD wBitCount;
+ DWORD dwBytesInRes;
+ DWORD dwImageOffset;
+ };
+
+ struct ICONRESDIR {
+ BYTE Width; // = ICONDIRENTRY.bWidth;
+ BYTE Height; // = ICONDIRENTRY.bHeight;
+ BYTE ColorCount; // = ICONDIRENTRY.bColorCount;
+ BYTE reserved; // = ICONDIRENTRY.bReserved;
+ WORD Planes; // = ICONDIRENTRY.wPlanes;
+ WORD BitCount; // = ICONDIRENTRY.wBitCount;
+ DWORD BytesInRes; // = ICONDIRENTRY.dwBytesInRes;
+ WORD IconId; // = RESOURCEHEADER.Name
+ };
+ struct EXEVERRESOURCE {
+ WORD cbRootBlock; // size of whole resource
+ WORD cbRootValue; // size of VS_FIXEDFILEINFO structure
+ WORD fRootText; // root is text?
+ WCHAR szRootKey[KEYSIZE("VS_VERSION_INFO")]; // Holds "VS_VERSION_INFO"
+ VS_FIXEDFILEINFO vsFixed; // fixed information.
+ WORD cbVarBlock; // size of VarFileInfo block
+ WORD cbVarValue; // always 0
+ WORD fVarText; // VarFileInfo is text?
+ WCHAR szVarKey[KEYSIZE("VarFileInfo")]; // Holds "VarFileInfo"
+ WORD cbTransBlock; // size of Translation block
+ WORD cbTransValue; // size of Translation value
+ WORD fTransText; // Translation is text?
+ WCHAR szTransKey[KEYSIZE("Translation")]; // Holds "Translation"
+ WORD langid; // language id
+ WORD codepage; // codepage id
+ WORD cbStringBlock; // size of StringFileInfo block
+ WORD cbStringValue; // always 0
+ WORD fStringText; // StringFileInfo is text?
+ WCHAR szStringKey[KEYSIZE("StringFileInfo")]; // Holds "StringFileInfo"
+ WORD cbLangCpBlock; // size of language/codepage block
+ WORD cbLangCpValue; // always 0
+ WORD fLangCpText; // LangCp is text?
+ WCHAR szLangCpKey[KEYSIZE("12345678")]; // Holds hex version of language/codepage
+ // followed by strings
+ };
+#pragma pack( pop)
+};
diff --git a/src/vm/assemblysink.cpp b/src/vm/assemblysink.cpp
new file mode 100644
index 0000000000..82d02c8572
--- /dev/null
+++ b/src/vm/assemblysink.cpp
@@ -0,0 +1,154 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: AssemblySink.cpp
+**
+** Purpose: Implements AssemblySink, event objects that block
+** the current thread waiting for an asynchronous load
+** of an assembly to succeed.
+**
+**
+
+
+**
+===========================================================*/
+
+#include "common.h"
+#ifdef FEATURE_FUSION
+#include <stdlib.h>
+#include "assemblysink.h"
+#include "assemblyspec.hpp"
+#include "corpriv.h"
+#include "appdomain.inl"
+
+AssemblySink::AssemblySink(AppDomain* pDomain)
+{
+ WRAPPER_NO_CONTRACT;
+ m_Domain=pDomain->GetId();
+ m_pSpec=NULL;
+ m_CheckCodebase = FALSE;
+}
+
+void AssemblySink::Reset()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_CheckCodebase = FALSE;
+ FusionSink::Reset();
+}
+
+ULONG AssemblySink::Release()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_TRIGGERS);}
+ MODE_ANY;
+ PRECONDITION(CheckPointer(this));
+ } CONTRACTL_END;
+
+
+ ULONG cRef = InterlockedDecrement(&m_cRef);
+ if (!cRef) {
+ Reset();
+ AssemblySink* ret = this;
+ // If we have a domain we keep a pool of one around. If we get an entry
+ // back from the pool then we were not added to the pool and need to be deleted.
+ // If we do not have a pool then we need to delete it.
+
+
+
+
+ // TODO: SetupThread may throw. What do we do with Release?
+ HRESULT hr = S_OK;
+ SetupThreadNoThrow(&hr);
+ {
+ GCX_COOP();
+
+ if(m_Domain.m_dwId) {
+ AppDomainFromIDHolder AD(m_Domain, TRUE);
+ if (!AD.IsUnloaded())
+ ret = FastInterlockCompareExchangePointer(&(AD->m_pAsyncPool),
+ this,
+ NULL);
+
+ }
+ }
+
+ if(ret != NULL)
+ delete this;
+ }
+ return (cRef);
+}
+
+
+
+STDMETHODIMP AssemblySink::OnProgress(DWORD dwNotification,
+ HRESULT hrNotification,
+ LPCWSTR szNotification,
+ DWORD dwProgress,
+ DWORD dwProgressMax,
+ LPVOID pvBindInfo,
+ IUnknown* punk)
+{
+ STATIC_CONTRACT_NOTHROW;
+
+ HRESULT hr = S_OK;
+
+ switch(dwNotification) {
+
+ case ASM_NOTIFICATION_BIND_INFO:
+ FusionBindInfo *pBindInfo;
+
+ pBindInfo = (FusionBindInfo *)pvBindInfo;
+
+ if (pBindInfo && pBindInfo->pNamePolicy && m_pSpec) {
+ pBindInfo->pNamePolicy->AddRef();
+ m_pSpec->SetNameAfterPolicy(pBindInfo->pNamePolicy);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ if (SUCCEEDED(hr))
+ hr = FusionSink::OnProgress(dwNotification, hrNotification, szNotification,
+ dwProgress, dwProgressMax, pvBindInfo, punk);
+
+ return hr;
+}
+
+
+HRESULT AssemblySink::Wait()
+{
+ STATIC_CONTRACT_NOTHROW;
+
+ HRESULT hr = FusionSink::Wait();
+
+ if (FAILED(hr)) {
+ // If we get an exception then we will just release this sink. It may be the
+ // case that the appdomain was terminated. Other exceptions will cause the
+ // sink to be scavenged but this is ok. A new one will be generated for the
+ // next bind.
+ m_Domain.m_dwId = 0;
+ // The AssemblySpec passed is stack allocated in some cases.
+ // Remove reference to it to prevent AV in delayed fusion bind notifications.
+ m_pSpec = NULL;
+ }
+
+ return hr;
+}
+#endif
diff --git a/src/vm/assemblysink.h b/src/vm/assemblysink.h
new file mode 100644
index 0000000000..5bdd7d28c3
--- /dev/null
+++ b/src/vm/assemblysink.h
@@ -0,0 +1,60 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: AssemblySink.hpp
+**
+** Purpose: Asynchronous call back for loading classes
+**
+**
+
+
+**
+===========================================================*/
+#ifndef _ASSEMBLYSINK_H
+#define _ASSEMBLYSINK_H
+
+#ifndef FEATURE_FUSION
+#error FEATURE_FUSION is not enabled, please do not include assemblysink.h
+#endif
+
+class AppDomain;
+
+class AssemblySink : public FusionSink
+{
+public:
+ AssemblySink(AppDomain* pDomain);
+ ~AssemblySink() { WRAPPER_NO_CONTRACT; };
+
+ void Reset();
+
+ ULONG STDMETHODCALLTYPE Release(void);
+
+ STDMETHODIMP OnProgress(DWORD dwNotification,
+ HRESULT hrNotification,
+ LPCWSTR szNotification,
+ DWORD dwProgress,
+ DWORD dwProgressMax,
+ LPVOID pvBindInfo,
+ IUnknown* punk);
+
+ virtual HRESULT Wait();
+
+ void RequireCodebaseSecurityCheck() {LIMITED_METHOD_CONTRACT; m_CheckCodebase = TRUE;}
+ BOOL DoCodebaseSecurityCheck() {LIMITED_METHOD_CONTRACT; return m_CheckCodebase;}
+ void SetAssemblySpec(AssemblySpec* pSpec)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pSpec=pSpec;
+ }
+
+private:
+ ADID m_Domain; // Which domain (index) do I belong to
+ AssemblySpec* m_pSpec;
+ BOOL m_CheckCodebase;
+};
+
+#endif
diff --git a/src/vm/assemblyspec.cpp b/src/vm/assemblyspec.cpp
new file mode 100644
index 0000000000..2e2102d068
--- /dev/null
+++ b/src/vm/assemblyspec.cpp
@@ -0,0 +1,2484 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: AssemblySpec.cpp
+**
+** Purpose: Implements Assembly binding class
+**
+**
+
+
+**
+===========================================================*/
+
+#include "common.h"
+
+#include <stdlib.h>
+
+#ifdef FEATURE_FUSION
+#include "actasm.h"
+#include "appctx.h"
+#endif
+#include "assemblyspec.hpp"
+#include "security.h"
+#include "eeconfig.h"
+#include "strongname.h"
+#include "strongnameholders.h"
+#ifdef FEATURE_FUSION
+#include "assemblysink.h"
+#include "dbglog.h"
+#include "bindinglog.hpp"
+#include "assemblyfilehash.h"
+#endif
+#include "mdaassistants.h"
+#include "eventtrace.h"
+
+#ifdef FEATURE_COMINTEROP
+#include "clrprivbinderutil.h"
+#include "winrthelpers.h"
+#endif
+
+#ifdef _DEBUG
+// This debug-only wrapper for LookupAssembly is solely for the use of postconditions and
+// assertions. The problem is that the real LookupAssembly can throw an OOM
+// simply because it can't allocate scratch space. For the sake of asserting,
+// we can treat those as successful lookups.
+BOOL UnsafeVerifyLookupAssembly(AssemblySpecBindingCache *pCache, AssemblySpec *pSpec, DomainAssembly *pComparator)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ BOOL result = FALSE;
+
+ EX_TRY
+ {
+ SCAN_IGNORE_FAULT; // Won't go away: This wrapper exists precisely to turn an OOM here into something our postconditions can deal with.
+ result = (pComparator == pCache->LookupAssembly(pSpec));
+ }
+ EX_CATCH
+ {
+ Exception *ex = GET_EXCEPTION();
+
+ result = ex->IsTransient();
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ return result;
+
+}
+#endif
+
+#ifdef _DEBUG
+// This debug-only wrapper for LookupFile is solely for the use of postconditions and
+// assertions. The problem is that the real LookupFile can throw an OOM
+// simply because it can't allocate scratch space. For the sake of asserting,
+// we can treat those as successful lookups.
+BOOL UnsafeVerifyLookupFile(AssemblySpecBindingCache *pCache, AssemblySpec *pSpec, PEAssembly *pComparator)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ BOOL result = FALSE;
+
+ EX_TRY
+ {
+ SCAN_IGNORE_FAULT; // Won't go away: This wrapper exists precisely to turn an OOM here into something our postconditions can deal with.
+ result = pCache->LookupFile(pSpec)->Equals(pComparator);
+ }
+ EX_CATCH
+ {
+ Exception *ex = GET_EXCEPTION();
+
+ result = ex->IsTransient();
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ return result;
+
+}
+
+#endif
+
+#ifdef _DEBUG
+
+// This debug-only wrapper for Contains is solely for the use of postconditions and
+// assertions. The problem is that the real Contains can throw an OOM
+// simply because it can't allocate scratch space. For the sake of asserting,
+// we can treat those as successful lookups.
+BOOL UnsafeContains(AssemblySpecBindingCache *pCache, AssemblySpec *pSpec)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ BOOL result = FALSE;
+
+ EX_TRY
+ {
+ SCAN_IGNORE_FAULT; // Won't go away: This wrapper exists precisely to turn an OOM here into something our postconditions can deal with.
+ result = pCache->Contains(pSpec);
+ }
+ EX_CATCH
+ {
+ Exception *ex = GET_EXCEPTION();
+
+ result = ex->IsTransient();
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ return result;
+
+}
+#endif
+
+
+
+AssemblySpecHash::~AssemblySpecHash()
+{
+ CONTRACTL
+ {
+ DESTRUCTOR_CHECK;
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PtrHashMap::PtrIterator i = m_map.begin();
+ while (!i.end())
+ {
+ AssemblySpec *s = (AssemblySpec*) i.GetValue();
+ if (m_pHeap != NULL)
+ s->~AssemblySpec();
+ else
+ delete s;
+
+ ++i;
+ }
+}
+
+// Check assembly name for invalid characters
+// Return value:
+// TRUE: If no invalid characters were found, or if the assembly name isn't set
+// FALSE: If invalid characters were found
+// This is needed to prevent security loopholes with ':', '/' and '\' in the assembly name
+BOOL AssemblySpec::IsValidAssemblyName()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ if (GetName())
+ {
+ SString ssAssemblyName(SString::Utf8, GetName());
+ for (SString::Iterator i = ssAssemblyName.Begin(); i[0] != W('\0'); i++) {
+ switch (i[0]) {
+ case W(':'):
+ case W('\\'):
+ case W('/'):
+ return FALSE;
+
+ default:
+ break;
+ }
+ }
+ }
+ return TRUE;
+}
+
+HRESULT AssemblySpec::InitializeSpecInternal(mdToken kAssemblyToken,
+ IMDInternalImport *pImport,
+ DomainAssembly *pStaticParent,
+ BOOL fIntrospectionOnly,
+ BOOL fAllowAllocation)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ if (fAllowAllocation) {GC_TRIGGERS;} else {GC_NOTRIGGER;};
+ if (fAllowAllocation) {INJECT_FAULT(COMPlusThrowOM());} else {FORBID_FAULT;};
+ NOTHROW;
+ MODE_ANY;
+ PRECONDITION(pImport->IsValidToken(kAssemblyToken));
+ PRECONDITION(TypeFromToken(kAssemblyToken) == mdtAssembly
+ || TypeFromToken(kAssemblyToken) == mdtAssemblyRef);
+ PRECONDITION(pStaticParent == NULL || !(pStaticParent->IsIntrospectionOnly() && !fIntrospectionOnly)); //Something's wrong if an introspection assembly loads an assembly for execution.
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ // We also did this check as a precondition as we should have prevented this structurally - but just
+ // in case, make sure retail stops us from proceeding further.
+ if (pStaticParent != NULL && pStaticParent->IsIntrospectionOnly() && !fIntrospectionOnly)
+ {
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
+ }
+
+ // Normalize this boolean as it tends to be used for comparisons
+ m_fIntrospectionOnly = !!fIntrospectionOnly;
+
+ IfFailThrow(BaseAssemblySpec::Init(kAssemblyToken,pImport));
+
+ if (IsContentType_WindowsRuntime())
+ {
+ if (!fAllowAllocation)
+ { // We don't support this because we must be able to allocate in order to
+ // extract embedded type names for the native image scenario. Currently,
+ // the only caller of this method with fAllowAllocation == FALSE is
+ // Module::GetAssemblyIfLoaded, and since this method will only check the
+ // assembly spec cache, and since we can't cache WinRT assemblies, this
+ // limitation should have no negative impact.
+ IfFailThrow(E_FAIL);
+ }
+
+ // Extract embedded content, if present (currently used for embedded WinRT type names).
+ ParseEncodedName();
+ }
+
+ // For static binds, we cannot reference a weakly named assembly from a strong named one.
+ // (Note that this constraint doesn't apply to dynamic binds which is why this check is
+ // not farther down the stack.)
+ if (pStaticParent != NULL)
+ {
+ // We dont validate this for CoreCLR as there is no good use-case for this scenario.
+#if !defined(FEATURE_CORECLR)
+ // It is OK for signed assemblies to reference WinRT assemblies (.winmd files) that are not signed
+ if (!IsContentType_WindowsRuntime() && pStaticParent->GetFile()->IsStrongNamed() && !IsStrongNamed())
+ {
+ ThrowHR(FUSION_E_PRIVATE_ASM_DISALLOWED);
+ }
+#endif // !defined(FEATURE_CORECLR)
+
+ SetParentAssembly(pStaticParent);
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+} // AssemblySpec::InitializeSpecInternal
+
+#ifdef FEATURE_FUSION
+void AssemblySpec::InitializeSpec(IAssemblyName *pName,
+ DomainAssembly *pStaticParent /*=NULL*/ ,
+ BOOL fIntrospectionOnly /*=FALSE*/ )
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // Normalize this boolean as it tends to be used for comparisons
+ m_fIntrospectionOnly = !!fIntrospectionOnly;
+ IfFailThrow(Init(pName));
+
+ // For static binds, we cannot reference a strongly named assembly from a weakly named one.
+ // (Note that this constraint doesn't apply to dynamic binds which is why this check is
+ // not farther down the stack.)
+
+ if (pStaticParent != NULL) {
+ if (pStaticParent->GetFile()->IsStrongNamed() && !IsStrongNamed())
+ {
+ EEFileLoadException::Throw(this, FUSION_E_PRIVATE_ASM_DISALLOWED);
+ }
+ SetParentAssembly(pStaticParent);
+ }
+
+ // Extract embedded WinRT name, if present.
+ ParseEncodedName();
+}
+#endif //FEATURE_FUSION
+
+#ifdef FEATURE_MIXEDMODE
+void AssemblySpec::InitializeSpec(HMODULE hMod,
+ BOOL fIntrospectionOnly /*=FALSE*/)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // Normalize this boolean as it tends to be used for comparisons
+ m_fIntrospectionOnly = !!fIntrospectionOnly;
+
+ PEDecoder pe(hMod);
+
+ if (!pe.CheckILFormat())
+ {
+ StackSString path;
+ PEImage::GetPathFromDll(hMod, path);
+ EEFileLoadException::Throw(path, COR_E_BADIMAGEFORMAT);
+ }
+
+ COUNT_T size;
+ const void *data = pe.GetMetadata(&size);
+ SafeComHolder<IMDInternalImport> pImport;
+ IfFailThrow(GetMetaDataInternalInterface((void *) data, size, ofRead,
+ IID_IMDInternalImport,
+ (void **) &pImport));
+
+ mdAssembly a;
+ if (FAILED(pImport->GetAssemblyFromScope(&a)))
+ ThrowHR(COR_E_ASSEMBLYEXPECTED);
+
+ InitializeSpec(a, pImport, NULL, fIntrospectionOnly);
+}
+#endif //FEATURE_MIXEDMODE
+
+void AssemblySpec::InitializeSpec(PEAssembly * pFile)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pFile));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+ ReleaseHolder<IMDInternalImport> pImport(pFile->GetMDImportWithRef());
+ mdAssembly a;
+ IfFailThrow(pImport->GetAssemblyFromScope(&a));
+
+ InitializeSpec(a, pImport, NULL, pFile->IsIntrospectionOnly());
+
+#ifdef FEATURE_COMINTEROP
+ if (IsContentType_WindowsRuntime())
+ {
+ LPCSTR szNamespace;
+ LPCSTR szTypeName;
+ SString ssFakeNameSpaceAllocationBuffer;
+ IfFailThrow(::GetFirstWinRTTypeDef(pImport, &szNamespace, &szTypeName, pFile->GetPath(), &ssFakeNameSpaceAllocationBuffer));
+
+ SetWindowsRuntimeType(szNamespace, szTypeName);
+
+ // pFile is not guaranteed to stay around (it might be unloaded with the AppDomain), we have to copy the type name
+ CloneFields(WINRT_TYPE_NAME_OWNED);
+ }
+#endif //FEATURE_COMINTEROP
+
+#if defined(FEATURE_CORECLR)
+ // Set the binding context for the AssemblySpec
+ ICLRPrivBinder* pCurrentBinder = GetBindingContext();
+ ICLRPrivBinder* pExpectedBinder = pFile->GetBindingContext();
+ if (pCurrentBinder == NULL)
+ {
+ // We should aways having the binding context in the PEAssembly. The only exception to this are the following:
+ //
+ // 1) when we are here during EEStartup and loading mscorlib.dll.
+ // 2) We are dealing with dynamic assemblies
+ _ASSERTE((pExpectedBinder != NULL) || pFile->IsSystem() || pFile->IsDynamic());
+ SetBindingContext(pExpectedBinder);
+ }
+#endif // defined(FEATURE_CORECLR)
+}
+
+#ifndef CROSSGEN_COMPILE
+
+// This uses thread storage to allocate space. Please use Checkpoint and release it.
+#ifdef FEATURE_FUSION
+HRESULT AssemblySpec::InitializeSpec(StackingAllocator* alloc, ASSEMBLYNAMEREF* pName,
+ BOOL fParse /*=TRUE*/, BOOL fIntrospectionOnly /*=FALSE*/)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(alloc));
+ PRECONDITION(CheckPointer(pName));
+ PRECONDITION(IsProtectedByGCFrame(pName));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // Simple name
+ if ((*pName)->GetSimpleName() != NULL) {
+ WCHAR* pString;
+ int iString;
+ ((STRINGREF) (*pName)->GetSimpleName())->RefInterpretGetStringValuesDangerousForGC(&pString, &iString);
+ DWORD lgth = WszWideCharToMultiByte(CP_UTF8, 0, pString, iString, NULL, 0, NULL, NULL);
+ if (lgth + 1 < lgth)
+ ThrowHR(E_INVALIDARG);
+ LPSTR lpName = (LPSTR) alloc->Alloc(S_UINT32(lgth) + S_UINT32(1));
+ WszWideCharToMultiByte(CP_UTF8, 0, pString, iString,
+ lpName, lgth+1, NULL, NULL);
+ lpName[lgth] = '\0';
+ m_pAssemblyName = lpName;
+ }
+
+ if (fParse) {
+ HRESULT hr = ParseName();
+ // Sometimes Fusion flags invalid characters in the name, sometimes it doesn't
+ // depending on where the invalid characters are
+ // We want to Raise the assembly resolve event on all invalid characters
+ // but calling ParseName before checking for invalid characters gives Fusion a chance to
+ // parse the rest of the name (to get a public key token, etc.)
+ if ((hr == FUSION_E_INVALID_NAME) || (!IsValidAssemblyName())) {
+ // This is the only case where we do not throw on an error
+ // We don't want to throw so as to give the caller a chance to call RaiseAssemblyResolveEvent
+ // The only caller that cares is System.Reflection.Assembly.InternalLoad which calls us through
+ // AssemblyNameNative::Init
+ return FUSION_E_INVALID_NAME;
+ }
+ else
+ IfFailThrow(hr);
+ }
+ else {
+ // Flags
+ m_dwFlags = (*pName)->GetFlags();
+
+ // Version
+ VERSIONREF version = (VERSIONREF) (*pName)->GetVersion();
+ if(version == NULL) {
+ m_context.usMajorVersion = (USHORT)-1;
+ m_context.usMinorVersion = (USHORT)-1;
+ m_context.usBuildNumber = (USHORT)-1;
+ m_context.usRevisionNumber = (USHORT)-1;
+ }
+ else {
+ m_context.usMajorVersion = (USHORT)version->GetMajor();
+ m_context.usMinorVersion = (USHORT)version->GetMinor();
+ m_context.usBuildNumber = (USHORT)version->GetBuild();
+ m_context.usRevisionNumber = (USHORT)version->GetRevision();
+ }
+
+ m_context.szLocale = 0;
+
+ if ((*pName)->GetCultureInfo() != NULL)
+ {
+ struct _gc {
+ OBJECTREF cultureinfo;
+ STRINGREF pString;
+ } gc;
+
+ gc.cultureinfo = (*pName)->GetCultureInfo();
+ gc.pString = NULL;
+
+ GCPROTECT_BEGIN(gc);
+
+ MethodDescCallSite getName(METHOD__CULTURE_INFO__GET_NAME, &gc.cultureinfo);
+
+ ARG_SLOT args[] = {
+ ObjToArgSlot(gc.cultureinfo)
+ };
+ gc.pString = getName.Call_RetSTRINGREF(args);
+ if (gc.pString != NULL) {
+ WCHAR* pString;
+ int iString;
+ gc.pString->RefInterpretGetStringValuesDangerousForGC(&pString, &iString);
+ DWORD lgth = WszWideCharToMultiByte(CP_UTF8, 0, pString, iString, NULL, 0, NULL, NULL);
+ LPSTR lpLocale = (LPSTR) alloc->Alloc(S_UINT32(lgth) + S_UINT32(1));
+ WszWideCharToMultiByte(CP_UTF8, 0, pString, iString,
+ lpLocale, lgth+1, NULL, NULL);
+ lpLocale[lgth] = '\0';
+ m_context.szLocale = lpLocale;
+ }
+ GCPROTECT_END();
+ }
+
+ // Strong name
+ // Note that we prefer to take a public key token if present,
+ // even if flags indicate a full public key
+ if ((*pName)->GetPublicKeyToken() != NULL) {
+ m_dwFlags &= ~afPublicKey;
+ PBYTE pArray = NULL;
+ pArray = (*pName)->GetPublicKeyToken()->GetDirectPointerToNonObjectElements();
+ m_cbPublicKeyOrToken = (*pName)->GetPublicKeyToken()->GetNumComponents();
+ m_pbPublicKeyOrToken = new (alloc) BYTE[m_cbPublicKeyOrToken];
+ memcpy(m_pbPublicKeyOrToken, pArray, m_cbPublicKeyOrToken);
+ }
+ else if ((*pName)->GetPublicKey() != NULL) {
+ m_dwFlags |= afPublicKey;
+ PBYTE pArray = NULL;
+ pArray = (*pName)->GetPublicKey()->GetDirectPointerToNonObjectElements();
+ m_cbPublicKeyOrToken = (*pName)->GetPublicKey()->GetNumComponents();
+ m_pbPublicKeyOrToken = new (alloc) BYTE[m_cbPublicKeyOrToken];
+ memcpy(m_pbPublicKeyOrToken, pArray, m_cbPublicKeyOrToken);
+ }
+ }
+
+ // Hash for control
+ // <TODO>@TODO cts, can we use unsafe in this case!!!</TODO>
+ if ((*pName)->GetHashForControl() != NULL)
+ SetHashForControl((*pName)->GetHashForControl()->GetDataPtr(),
+ (*pName)->GetHashForControl()->GetNumComponents(),
+ (*pName)->GetHashAlgorithmForControl());
+
+ // Normalize this boolean as it tends to be used for comparisons
+ m_fIntrospectionOnly = !!fIntrospectionOnly;
+
+ // Extract embedded WinRT name, if present.
+ ParseEncodedName();
+
+ return S_OK;
+}
+
+#else // FEATURE_FUSION
+HRESULT AssemblySpec::InitializeSpec(StackingAllocator* alloc, ASSEMBLYNAMEREF* pName,
+ BOOL fParse /*=TRUE*/, BOOL fIntrospectionOnly /*=FALSE*/)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(alloc));
+ PRECONDITION(CheckPointer(pName));
+ PRECONDITION(IsProtectedByGCFrame(pName));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // Simple name
+ if ((*pName)->GetSimpleName() != NULL) {
+ WCHAR* pString;
+ int iString;
+ ((STRINGREF) (*pName)->GetSimpleName())->RefInterpretGetStringValuesDangerousForGC(&pString, &iString);
+ DWORD lgth = WszWideCharToMultiByte(CP_UTF8, 0, pString, iString, NULL, 0, NULL, NULL);
+ if (lgth + 1 < lgth)
+ ThrowHR(E_INVALIDARG);
+ LPSTR lpName = (LPSTR) alloc->Alloc(S_UINT32(lgth) + S_UINT32(1));
+ WszWideCharToMultiByte(CP_UTF8, 0, pString, iString,
+ lpName, lgth+1, NULL, NULL);
+ lpName[lgth] = '\0';
+ // Calling Init here will trash the cached lpName in AssemblySpec, but lpName is still needed by ParseName
+ // call below.
+ SetName(lpName);
+ }
+ else
+ {
+ // Ensure we always have an assembly simple name.
+ LPSTR lpName = (LPSTR) alloc->Alloc(S_UINT32(1));
+ lpName[0] = '\0';
+ SetName(lpName);
+ }
+
+ if (fParse) {
+ HRESULT hr = ParseName();
+ // Sometimes Fusion flags invalid characters in the name, sometimes it doesn't
+ // depending on where the invalid characters are
+ // We want to Raise the assembly resolve event on all invalid characters
+ // but calling ParseName before checking for invalid characters gives Fusion a chance to
+ // parse the rest of the name (to get a public key token, etc.)
+ if ((hr == FUSION_E_INVALID_NAME) || (!IsValidAssemblyName())) {
+ // This is the only case where we do not throw on an error
+ // We don't want to throw so as to give the caller a chance to call RaiseAssemblyResolveEvent
+ // The only caller that cares is System.Reflection.Assembly.InternalLoad which calls us through
+ // AssemblyNameNative::Init
+ return FUSION_E_INVALID_NAME;
+ }
+ else
+ IfFailThrow(hr);
+ }
+ else {
+ AssemblyMetaDataInternal asmInfo;
+ // Flags
+ DWORD dwFlags = (*pName)->GetFlags();
+
+ // Version
+ VERSIONREF version = (VERSIONREF) (*pName)->GetVersion();
+ if(version == NULL) {
+ asmInfo.usMajorVersion = (USHORT)-1;
+ asmInfo.usMinorVersion = (USHORT)-1;
+ asmInfo.usBuildNumber = (USHORT)-1;
+ asmInfo.usRevisionNumber = (USHORT)-1;
+ }
+ else {
+ asmInfo.usMajorVersion = (USHORT)version->GetMajor();
+ asmInfo.usMinorVersion = (USHORT)version->GetMinor();
+ asmInfo.usBuildNumber = (USHORT)version->GetBuild();
+ asmInfo.usRevisionNumber = (USHORT)version->GetRevision();
+ }
+
+ asmInfo.szLocale = 0;
+ asmInfo.ulOS = 0;
+ asmInfo.rOS = 0;
+ asmInfo.ulProcessor = 0;
+ asmInfo.rProcessor = 0;
+
+ if ((*pName)->GetCultureInfo() != NULL)
+ {
+ struct _gc {
+ OBJECTREF cultureinfo;
+ STRINGREF pString;
+ } gc;
+
+ gc.cultureinfo = (*pName)->GetCultureInfo();
+ gc.pString = NULL;
+
+ GCPROTECT_BEGIN(gc);
+
+ MethodDescCallSite getName(METHOD__CULTURE_INFO__GET_NAME, &gc.cultureinfo);
+
+ ARG_SLOT args[] = {
+ ObjToArgSlot(gc.cultureinfo)
+ };
+ gc.pString = getName.Call_RetSTRINGREF(args);
+ if (gc.pString != NULL) {
+ WCHAR* pString;
+ int iString;
+ gc.pString->RefInterpretGetStringValuesDangerousForGC(&pString, &iString);
+ DWORD lgth = WszWideCharToMultiByte(CP_UTF8, 0, pString, iString, NULL, 0, NULL, NULL);
+ S_UINT32 lengthWillNull = S_UINT32(lgth) + S_UINT32(1);
+ LPSTR lpLocale = (LPSTR) alloc->Alloc(lengthWillNull);
+ if (lengthWillNull.IsOverflow())
+ {
+ COMPlusThrowHR(COR_E_OVERFLOW);
+ }
+ WszWideCharToMultiByte(CP_UTF8, 0, pString, iString,
+ lpLocale, lengthWillNull.Value(), NULL, NULL);
+ lpLocale[lgth] = '\0';
+ asmInfo.szLocale = lpLocale;
+ }
+ GCPROTECT_END();
+ }
+
+ // Strong name
+ DWORD cbPublicKeyOrToken=0;
+ BYTE* pbPublicKeyOrToken=NULL;
+ // Note that we prefer to take a public key token if present,
+ // even if flags indicate a full public key
+ if ((*pName)->GetPublicKeyToken() != NULL) {
+ dwFlags &= ~afPublicKey;
+ PBYTE pArray = NULL;
+ pArray = (*pName)->GetPublicKeyToken()->GetDirectPointerToNonObjectElements();
+ cbPublicKeyOrToken = (*pName)->GetPublicKeyToken()->GetNumComponents();
+ pbPublicKeyOrToken = pArray;
+ }
+ else if ((*pName)->GetPublicKey() != NULL) {
+ dwFlags |= afPublicKey;
+ PBYTE pArray = NULL;
+ pArray = (*pName)->GetPublicKey()->GetDirectPointerToNonObjectElements();
+ cbPublicKeyOrToken = (*pName)->GetPublicKey()->GetNumComponents();
+ pbPublicKeyOrToken = pArray;
+ }
+ BaseAssemblySpec::Init(GetName(),&asmInfo,pbPublicKeyOrToken,cbPublicKeyOrToken,dwFlags);
+ }
+
+ CloneFieldsToStackingAllocator(alloc);
+
+ // Hash for control
+ // <TODO>@TODO cts, can we use unsafe in this case!!!</TODO>
+ if ((*pName)->GetHashForControl() != NULL)
+ SetHashForControl((*pName)->GetHashForControl()->GetDataPtr(),
+ (*pName)->GetHashForControl()->GetNumComponents(),
+ (*pName)->GetHashAlgorithmForControl());
+
+ // Normalize this boolean as it tends to be used for comparisons
+ m_fIntrospectionOnly = !!fIntrospectionOnly;
+
+ // Extract embedded WinRT name, if present.
+ ParseEncodedName();
+
+ return S_OK;
+}
+#endif // FEATURE_FUSION
+
+void AssemblySpec::AssemblyNameInit(ASSEMBLYNAMEREF* pAsmName, PEImage* pImageInfo)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ PRECONDITION(IsProtectedByGCFrame (pAsmName));
+ }
+ CONTRACTL_END;
+
+ struct _gc {
+ OBJECTREF CultureInfo;
+ STRINGREF Locale;
+ OBJECTREF Version;
+ U1ARRAYREF PublicKeyOrToken;
+ STRINGREF Name;
+ STRINGREF CodeBase;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ if ((m_context.usMajorVersion != (USHORT) -1) &&
+ (m_context.usMinorVersion != (USHORT) -1)) {
+
+ MethodTable* pVersion = MscorlibBinder::GetClass(CLASS__VERSION);
+
+ // version
+ gc.Version = AllocateObject(pVersion);
+
+
+ MethodDescCallSite ctorMethod(METHOD__VERSION__CTOR);
+
+ ARG_SLOT VersionArgs[5] =
+ {
+ ObjToArgSlot(gc.Version),
+ (ARG_SLOT) m_context.usMajorVersion,
+ (ARG_SLOT) m_context.usMinorVersion,
+ (ARG_SLOT) m_context.usBuildNumber,
+ (ARG_SLOT) m_context.usRevisionNumber,
+ };
+ ctorMethod.Call(VersionArgs);
+ }
+
+ // cultureinfo
+ if (m_context.szLocale) {
+
+ MethodTable* pCI = MscorlibBinder::GetClass(CLASS__CULTURE_INFO);
+ gc.CultureInfo = AllocateObject(pCI);
+
+ gc.Locale = StringObject::NewString(m_context.szLocale);
+
+ MethodDescCallSite strCtor(METHOD__CULTURE_INFO__STR_CTOR);
+
+ ARG_SLOT args[2] =
+ {
+ ObjToArgSlot(gc.CultureInfo),
+ ObjToArgSlot(gc.Locale)
+ };
+
+ strCtor.Call(args);
+ }
+
+
+ // public key or token byte array
+ if (m_pbPublicKeyOrToken)
+ Security::CopyEncodingToByteArray((BYTE*) m_pbPublicKeyOrToken,
+ m_cbPublicKeyOrToken,
+ (OBJECTREF*) &gc.PublicKeyOrToken);
+
+ // simple name
+ if(GetName())
+ gc.Name = StringObject::NewString(GetName());
+
+ if (GetCodeBase())
+ gc.CodeBase = StringObject::NewString(GetCodeBase());
+
+ BOOL fPublicKey = m_dwFlags & afPublicKey;
+
+ ULONG hashAlgId=0;
+ if (pImageInfo != NULL)
+ {
+ if(!pImageInfo->GetMDImport()->IsValidToken(TokenFromRid(1, mdtAssembly)))
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ IfFailThrow(pImageInfo->GetMDImport()->GetAssemblyProps(TokenFromRid(1, mdtAssembly), NULL, NULL, &hashAlgId, NULL, NULL, NULL));
+ }
+
+ MethodDescCallSite init(METHOD__ASSEMBLY_NAME__INIT);
+
+ ARG_SLOT MethodArgs[] =
+ {
+ ObjToArgSlot(*pAsmName),
+ ObjToArgSlot(gc.Name),
+ fPublicKey ? ObjToArgSlot(gc.PublicKeyOrToken) :
+ (ARG_SLOT) NULL, // public key
+ fPublicKey ? (ARG_SLOT) NULL :
+ ObjToArgSlot(gc.PublicKeyOrToken), // public key token
+ ObjToArgSlot(gc.Version),
+ ObjToArgSlot(gc.CultureInfo),
+ (ARG_SLOT) hashAlgId,
+ (ARG_SLOT) 1, // AssemblyVersionCompatibility.SameMachine
+ ObjToArgSlot(gc.CodeBase),
+ (ARG_SLOT) m_dwFlags,
+ (ARG_SLOT) NULL // key pair
+ };
+
+ init.Call(MethodArgs);
+
+ // Only set the processor architecture if we're looking at a newer binary that has
+ // that information in the PE, and we're not looking at a reference assembly.
+ if(pImageInfo && !pImageInfo->HasV1Metadata() && !pImageInfo->IsReferenceAssembly())
+ {
+ DWORD dwMachine, dwKind;
+
+ pImageInfo->GetPEKindAndMachine(&dwMachine,&dwKind);
+
+ MethodDescCallSite setPA(METHOD__ASSEMBLY_NAME__SET_PROC_ARCH_INDEX);
+
+ ARG_SLOT PAMethodArgs[] = {
+ ObjToArgSlot(*pAsmName),
+ (ARG_SLOT)dwMachine,
+ (ARG_SLOT)dwKind
+ };
+
+ setPA.Call(PAMethodArgs);
+ }
+
+ GCPROTECT_END();
+}
+
+// This uses thread storage to allocate space. Please use Checkpoint and release it.
+void AssemblySpec::SetCodeBase(StackingAllocator* alloc, STRINGREF *pCodeBase)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pCodeBase));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // Codebase
+ if (pCodeBase != NULL && *pCodeBase != NULL) {
+ WCHAR* pString;
+ int iString;
+ (*pCodeBase)->RefInterpretGetStringValuesDangerousForGC(&pString, &iString);
+
+ DWORD dwCodeBase = (DWORD) iString+1;
+ m_wszCodeBase = new (alloc) WCHAR[dwCodeBase];
+ memcpy((void*)m_wszCodeBase, pString, dwCodeBase * sizeof(WCHAR));
+ }
+}
+
+#endif // CROSSGEN_COMPILE
+
+#ifdef FEATURE_FUSION
+
+/* static */
+void AssemblySpec::DemandFileIOPermission(PEAssembly *pFile)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pFile));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // should have already checked permission if the codebase is set
+ if (!GetCodeBase()) {
+
+ if (pFile->IsBindingCodeBase()) {
+ if (pFile->IsSourceDownloadCache()) {
+ StackSString check;
+ pFile->GetCodeBase(check);
+
+ DemandFileIOPermission(check, FALSE, FILE_WEBPERM);
+ }
+ else
+ DemandFileIOPermission(pFile->GetPath(), TRUE, FILE_READANDPATHDISC);
+ }
+ }
+}
+
+STDAPI RuntimeCheckLocationAccess(LPCWSTR wszLocation)
+{
+
+ if (GetThread()==NULL)
+ return S_FALSE;
+
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(wszLocation));
+ }
+ CONTRACTL_END;
+ OVERRIDE_LOAD_LEVEL_LIMIT(FILE_ACTIVE);
+ HRESULT hr=S_OK;
+ DWORD dwDemand = 0;
+
+ if (SString::_wcsnicmp(wszLocation, W("file"), 4))
+ dwDemand = AssemblySpec::FILE_WEBPERM;
+ else
+ dwDemand = AssemblySpec::FILE_READANDPATHDISC;
+
+ EX_TRY
+ {
+ AssemblySpec::DemandFileIOPermission(wszLocation,
+ FALSE,
+ dwDemand);
+ }
+ EX_CATCH_HRESULT(hr);
+ return hr;
+
+}
+
+/* static */
+void AssemblySpec::DemandFileIOPermission(LPCWSTR wszCodeBase,
+ BOOL fHavePath,
+ DWORD dwDemandFlag)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(wszCodeBase));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ GCX_COOP();
+
+ MethodDescCallSite demandPermission(METHOD__ASSEMBLY__DEMAND_PERMISSION);
+
+ STRINGREF codeBase = NULL;
+ GCPROTECT_BEGIN(codeBase);
+
+ codeBase = StringObject::NewString(wszCodeBase);
+ ARG_SLOT args[3] =
+ {
+ ObjToArgSlot(codeBase),
+ BoolToArgSlot(fHavePath),
+ dwDemandFlag
+ };
+ demandPermission.Call(args);
+ GCPROTECT_END();
+}
+
+BOOL AssemblySpec::FindAssemblyFile(AppDomain* pAppDomain, BOOL fThrowOnFileNotFound,
+ IAssembly** ppIAssembly, IHostAssembly **ppIHostAssembly, IBindResult** ppNativeFusionAssembly,
+ IFusionBindLog** ppFusionLog, HRESULT *pHRBindResult, StackCrawlMark *pCallerStackMark /* = NULL */,
+ AssemblyLoadSecurity *pLoadSecurity /* = NULL */)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pAppDomain));
+ PRECONDITION(CheckPointer(pHRBindResult));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+
+ IApplicationContext *pFusionContext = pAppDomain->GetFusionContext();
+
+ AssemblySink* pSink = pAppDomain->AllocateAssemblySink(this);
+ SafeComHolderPreemp<IAssemblyBindSink> sinkholder(pSink);
+
+ BOOL fSuppressSecurityChecks = pLoadSecurity != NULL && pLoadSecurity->m_fSuppressSecurityChecks;
+
+ if (!GetCodeBase() && !fSuppressSecurityChecks)
+ pSink->RequireCodebaseSecurityCheck();
+
+ BOOL fIsWellKnown = FALSE;
+ HRESULT hr = S_OK;
+
+ IfFailGo(AssemblySpec::LoadAssembly(pFusionContext,
+ pSink,
+ ppIAssembly,
+ ppIHostAssembly,
+ ppNativeFusionAssembly,
+ IsIntrospectionOnly(),
+ fSuppressSecurityChecks));
+
+ // Host should have already done appropriate permission demand
+ if (!(*ppIHostAssembly)) {
+ DWORD dwLocation;
+ IfFailGo((*ppIAssembly)->GetAssemblyLocation(&dwLocation));
+
+ fIsWellKnown = (dwLocation == ASMLOC_UNKNOWN);
+
+ // check if it was cached, where a codebase had originally loaded it
+ if (pSink->DoCodebaseSecurityCheck() &&
+ !fSuppressSecurityChecks &&
+ (dwLocation & ASMLOC_CODEBASE_HINT)) {
+ if ((dwLocation & ASMLOC_LOCATION_MASK) == ASMLOC_DOWNLOAD_CACHE) {
+ StackSString codeBase;
+ SafeComHolderPreemp<IAssemblyName> pNameDef;
+
+ // <TODO>We could be caching the IAssemblyName and codebase</TODO>
+ IfFailGo((*ppIAssembly)->GetAssemblyNameDef(&pNameDef));
+
+ FusionBind::GetAssemblyNameStringProperty(pNameDef, ASM_NAME_CODEBASE_URL, codeBase);
+
+ DemandFileIOPermission(codeBase, FALSE, FILE_WEBPERM);
+ }
+ else if ((dwLocation & ASMLOC_LOCATION_MASK) != ASMLOC_GAC) {
+ StackSString path;
+ FusionBind::GetAssemblyManifestModulePath((*ppIAssembly), path);
+
+ DemandFileIOPermission(path, TRUE, FILE_READANDPATHDISC);
+ }
+ }
+
+ // Verify control hash
+ if (m_HashForControl.GetSize() > 0) {
+ StackSString path;
+
+ FusionBind::GetAssemblyManifestModulePath((*ppIAssembly), path);
+
+ AssemblyFileHash fileHash;
+ IfFailGo(fileHash.SetFileName(path));
+ IfFailGo(fileHash.CalculateHash(m_dwHashAlg));
+
+ if (!m_HashForControl.Equals(fileHash.GetHash(), fileHash.GetHashSize()))
+ IfFailGo(FUSION_E_REF_DEF_MISMATCH);
+ }
+ }
+
+#ifdef MDA_SUPPORTED
+ MdaLoadFromContext* pProbe = MDA_GET_ASSISTANT(LoadFromContext);
+ if (pProbe) {
+ pProbe->NowLoading(ppIAssembly, pCallerStackMark);
+ }
+#endif
+
+ *ppFusionLog = pSink->m_pFusionLog;
+ if (*ppFusionLog)
+ (*ppFusionLog)->AddRef();
+ return fIsWellKnown;
+
+ ErrExit:
+ {
+
+ *pHRBindResult = hr;
+
+ if (fThrowOnFileNotFound || (!Assembly::FileNotFound(hr)))
+ EEFileLoadException::Throw(this, pSink->m_pFusionLog, hr);
+ }
+
+ return FALSE;
+}
+#endif // FEATURE_FUSION
+
+void AssemblySpec::MatchRetargetedPublicKeys(Assembly *pAssembly)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pAssembly));
+ }
+ CONTRACTL_END;
+#ifdef FEATURE_FUSION
+ GCX_PREEMP();
+
+ // Manually apply fusion policy to obtain retargeted public key
+ SafeComHolderPreemp<IAssemblyName> pRequestedAssemblyName(NULL);
+ SafeComHolderPreemp<IAssemblyName> pPostPolicyAssemblyName(NULL);
+ IfFailThrow(CreateFusionName(&pRequestedAssemblyName));
+ HRESULT hr = PreBindAssembly(GetAppDomain()->GetFusionContext(),
+ pRequestedAssemblyName,
+ NULL, // pAsmParent
+ &pPostPolicyAssemblyName,
+ NULL // pvReserved
+ );
+ if (SUCCEEDED(hr)
+ || (FAILED(hr) && (hr == FUSION_E_REF_DEF_MISMATCH))) {
+ IAssemblyName *pResultAssemblyName = pAssembly->GetFusionAssemblyName();
+ if (pResultAssemblyName
+ && pPostPolicyAssemblyName
+ && pResultAssemblyName->IsEqual(pPostPolicyAssemblyName, ASM_CMPF_PUBLIC_KEY_TOKEN) == S_OK)
+ return;
+ }
+#endif // FEATURE_FUSION
+ ThrowHR(FUSION_E_REF_DEF_MISMATCH);
+}
+
+
+// Check if the supplied assembly's public key matches up with the one in the Spec, if any
+// Throws an appropriate exception in case of a mismatch
+void AssemblySpec::MatchPublicKeys(Assembly *pAssembly)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ // Check that the public keys are the same as in the AR.
+ if (IsStrongNamed()) {
+
+ const void *pbPublicKey;
+ DWORD cbPublicKey;
+ pbPublicKey = pAssembly->GetPublicKey(&cbPublicKey);
+ if (cbPublicKey == 0)
+ ThrowHR(FUSION_E_PRIVATE_ASM_DISALLOWED);
+
+ if (m_dwFlags & afPublicKey) {
+ if ((m_cbPublicKeyOrToken != cbPublicKey) ||
+ memcmp(m_pbPublicKeyOrToken, pbPublicKey, m_cbPublicKeyOrToken))
+ return MatchRetargetedPublicKeys(pAssembly);
+ }
+
+ // Ref has a token
+ else {
+ StrongNameBufferHolder<BYTE> pbStrongNameToken;
+ DWORD cbStrongNameToken;
+
+ if (!StrongNameTokenFromPublicKey((BYTE*) pbPublicKey,
+ cbPublicKey,
+ &pbStrongNameToken,
+ &cbStrongNameToken))
+ ThrowHR(StrongNameErrorInfo());
+ if ((m_cbPublicKeyOrToken != cbStrongNameToken) ||
+ memcmp(m_pbPublicKeyOrToken,
+ pbStrongNameToken,
+ cbStrongNameToken)) {
+ return MatchRetargetedPublicKeys(pAssembly);
+ }
+ }
+ }
+}
+
+
+PEAssembly *AssemblySpec::ResolveAssemblyFile(AppDomain *pDomain, BOOL fPreBind)
+{
+ CONTRACT(PEAssembly *)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ // No assembly resolve on codebase binds
+ if (GetName() == NULL)
+ RETURN NULL;
+
+ Assembly *pAssembly = pDomain->RaiseAssemblyResolveEvent(this, IsIntrospectionOnly(), fPreBind);
+
+ if (pAssembly != NULL) {
+#ifdef FEATURE_FUSION
+ if (!IsIntrospectionOnly() && IsLoggingNeeded()) {
+ BinderLogging::BindingLog::CacheResultOfAssemblyResolveEvent(pDomain->GetFusionContext(), GetParentLoadContext(), pAssembly);
+ }
+#endif
+ PEAssembly *pFile = pAssembly->GetManifestFile();
+ pFile->AddRef();
+
+ RETURN pFile;
+ }
+
+ RETURN NULL;
+}
+
+
+Assembly *AssemblySpec::LoadAssembly(FileLoadLevel targetLevel, AssemblyLoadSecurity *pLoadSecurity, BOOL fThrowOnFileNotFound, BOOL fRaisePrebindEvents, StackCrawlMark *pCallerStackMark)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ DomainAssembly * pDomainAssembly = LoadDomainAssembly(targetLevel, pLoadSecurity, fThrowOnFileNotFound, fRaisePrebindEvents, pCallerStackMark);
+ if (pDomainAssembly == NULL) {
+ _ASSERTE(!fThrowOnFileNotFound);
+ return NULL;
+ }
+ return pDomainAssembly->GetAssembly();
+}
+
+#if defined(FEATURE_CORECLR)
+// Returns a BOOL indicating if the two Binder references point to the same
+// binder instance.
+BOOL AreSameBinderInstance(ICLRPrivBinder *pBinderA, ICLRPrivBinder *pBinderB)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ BOOL fIsSameInstance = FALSE;
+
+ if ((pBinderA != NULL) && (pBinderB != NULL))
+ {
+ // Get the ID for the first binder
+ UINT_PTR binderIDA = 0, binderIDB = 0;
+ HRESULT hr = pBinderA->GetBinderID(&binderIDA);
+ if (SUCCEEDED(hr))
+ {
+ // Get the ID for the second binder
+ hr = pBinderB->GetBinderID(&binderIDB);
+ if (SUCCEEDED(hr))
+ {
+ fIsSameInstance = (binderIDA == binderIDB);
+ }
+ }
+ }
+
+ return fIsSameInstance;
+}
+#endif // defined(FEATURE_CORECLR)
+
+ICLRPrivBinder* AssemblySpec::GetBindingContextFromParentAssembly(AppDomain *pDomain)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(pDomain != NULL);
+ }
+ CONTRACTL_END;
+
+ ICLRPrivBinder *pParentAssemblyBinder = NULL;
+ DomainAssembly *pParentDomainAssembly = GetParentAssembly();
+
+ if(pParentDomainAssembly != NULL)
+ {
+ // Get the PEAssembly associated with the parent's domain assembly
+ PEAssembly *pParentPEAssembly = pParentDomainAssembly->GetFile();
+
+ // ICLRPrivAssembly implements ICLRPrivBinder and thus, "is a" binder in a manner of semantics.
+ pParentAssemblyBinder = pParentPEAssembly->GetBindingContext();
+
+#if defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+ if (pParentAssemblyBinder != NULL)
+ {
+ CLRPrivBinderCoreCLR *pTPABinder = pDomain->GetTPABinderContext();
+ if (AreSameBinderInstance(pTPABinder, pParentAssemblyBinder))
+ {
+ // If the parent assembly is a platform (TPA) assembly, then its binding context will always be the TPABinder context. In
+ // such case, we will return the default context for binding to allow the bind to go
+ // via the custom binder context, if it was overridden. If it was not overridden, then we will get the expected
+ // TPABinder context anyways.
+ //
+ // Get the reference to the default binding context (this could be the TPABinder context or custom AssemblyLoadContext)
+ pParentAssemblyBinder = static_cast<ICLRPrivBinder*>(pDomain->GetFusionContext());
+ }
+ }
+#endif // defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+ }
+
+#if defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+
+#if defined(FEATURE_COMINTEROP)
+ if (!IsContentType_WindowsRuntime() && (pParentAssemblyBinder != NULL))
+ {
+ CLRPrivBinderWinRT *pWinRTBinder = pDomain->GetWinRtBinder();
+ if (AreSameBinderInstance(pWinRTBinder, pParentAssemblyBinder))
+ {
+ // We could be here when a non-WinRT assembly load is triggerred by a winmd (e.g. System.Runtime being loaded due to
+ // types being referenced from Windows.Foundation.Winmd).
+ //
+ // If the AssemblySpec does not correspond to WinRT type but our parent assembly binder is a WinRT binder,
+ // then such an assembly will not be found by the binder. In such a case, we reset our binder reference.
+ pParentAssemblyBinder = NULL;
+ }
+ }
+#endif // defined(FEATURE_COMINTEROP)
+
+ if (!pParentAssemblyBinder)
+ {
+ // We can be here when loading assemblies via the host (e.g. ICLRRuntimeHost2::ExecuteAssembly) or when attempting
+ // to load assemblies via custom AssemblyLoadContext implementation.
+ //
+ // In such a case, the parent assembly (semantically) is mscorlib and thus, the default binding context should be
+ // used as the parent assembly binder.
+ pParentAssemblyBinder = static_cast<ICLRPrivBinder*>(pDomain->GetFusionContext());
+ }
+#endif // defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+
+ return pParentAssemblyBinder;
+}
+
+DomainAssembly *AssemblySpec::LoadDomainAssembly(FileLoadLevel targetLevel,
+ AssemblyLoadSecurity *pLoadSecurity,
+ BOOL fThrowOnFileNotFound,
+ BOOL fRaisePrebindEvents,
+ StackCrawlMark *pCallerStackMark)
+{
+ CONTRACT(DomainAssembly *)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION((!fThrowOnFileNotFound && CheckPointer(RETVAL, NULL_OK))
+ || CheckPointer(RETVAL));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ ETWOnStartup (LoaderCatchCall_V1, LoaderCatchCallEnd_V1);
+ AppDomain* pDomain = GetAppDomain();
+
+#ifndef FEATURE_CORECLR
+ // Event Tracing for Windows is used to log data for performance and functional testing purposes.
+ // The events in this function are used to help measure the performance of assembly loading as a whole for dynamic loads.
+
+ // Special-purpose holder structure to ensure the LoaderPhaseEnd ETW event is fired when returning from function.
+ struct ETWLoaderPhaseHolder
+ {
+ StackSString ETWCodeBase, ETWAssemblyName;
+
+ DWORD _dwAppDomainId;
+ BOOL initialized;
+
+ ETWLoaderPhaseHolder()
+ : _dwAppDomainId(ETWAppDomainIdNotAvailable)
+ , initialized(FALSE)
+ { }
+
+ void Init(DWORD dwAppDomainId, LPCWSTR wszCodeBase, LPCSTR szAssemblyName)
+ {
+ _dwAppDomainId = dwAppDomainId;
+
+ EX_TRY
+ {
+ if (wszCodeBase != NULL)
+ {
+ ETWCodeBase.Append(wszCodeBase);
+ ETWCodeBase.Normalize(); // Ensures that the later cast to LPCWSTR does not throw.
+ }
+ }
+ EX_CATCH
+ {
+ ETWCodeBase.Clear();
+ }
+ EX_END_CATCH(RethrowTransientExceptions)
+
+ EX_TRY
+ {
+ if (szAssemblyName != NULL)
+ {
+ ETWAssemblyName.AppendUTF8(szAssemblyName);
+ ETWAssemblyName.Normalize(); // Ensures that the later cast to LPCWSTR does not throw.
+ }
+ }
+ EX_CATCH
+ {
+ ETWAssemblyName.Clear();
+ }
+ EX_END_CATCH(RethrowTransientExceptions)
+
+ FireEtwLoaderPhaseStart(_dwAppDomainId, ETWLoadContextNotAvailable, ETWFieldUnused, ETWLoaderDynamicLoad, ETWCodeBase.IsEmpty() ? NULL : (LPCWSTR)ETWCodeBase, ETWAssemblyName.IsEmpty() ? NULL : (LPCWSTR)ETWAssemblyName, GetClrInstanceId());
+
+ initialized = TRUE;
+ }
+
+ ~ETWLoaderPhaseHolder()
+ {
+ if (initialized)
+ {
+ FireEtwLoaderPhaseEnd(_dwAppDomainId, ETWLoadContextNotAvailable, ETWFieldUnused, ETWLoaderDynamicLoad, ETWCodeBase.IsEmpty() ? NULL : (LPCWSTR)ETWCodeBase, ETWAssemblyName.IsEmpty() ? NULL : (LPCWSTR)ETWAssemblyName, GetClrInstanceId());
+ }
+ }
+ };
+
+ ETWLoaderPhaseHolder loaderPhaseHolder;
+ if (ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, TRACE_LEVEL_INFORMATION, CLR_PRIVATEBINDING_KEYWORD)) {
+#ifdef FEATURE_FUSION
+ loaderPhaseHolder.Init(pDomain->GetId().m_dwId, m_wszCodeBase, m_pAssemblyName);
+#else
+ loaderPhaseHolder.Init(pDomain->GetId().m_dwId, NULL, NULL);
+#endif
+ }
+#endif // FEATURE_CORECLR
+
+ DomainAssembly *pAssembly = nullptr;
+
+#ifdef FEATURE_HOSTED_BINDER
+ ICLRPrivBinder * pBinder = GetHostBinder();
+
+ // If no binder was explicitly set, check if parent assembly has a binder.
+ if (pBinder == nullptr)
+ {
+ pBinder = GetBindingContextFromParentAssembly(pDomain);
+ }
+
+#ifdef FEATURE_APPX_BINDER
+ // If no explicit or parent binder, check domain.
+ if (pBinder == nullptr && AppX::IsAppXProcess())
+ {
+ pBinder = pDomain->GetCurrentLoadContextHostBinder();
+ }
+#endif
+
+ if (pBinder != nullptr)
+ {
+ ReleaseHolder<ICLRPrivAssembly> pPrivAssembly;
+ HRESULT hrCachedResult;
+ if (SUCCEEDED(pBinder->FindAssemblyBySpec(GetAppDomain(), this, &hrCachedResult, &pPrivAssembly)) &&
+ SUCCEEDED(hrCachedResult))
+ {
+ pAssembly = pDomain->FindAssembly(pPrivAssembly);
+ }
+ }
+#endif
+ if ((pAssembly == nullptr) && CanUseWithBindingCache())
+ {
+ pAssembly = pDomain->FindCachedAssembly(this);
+ }
+
+ if (pAssembly)
+ {
+ pDomain->LoadDomainFile(pAssembly, targetLevel);
+ RETURN pAssembly;
+ }
+
+#ifdef FEATURE_REFLECTION_ONLY_LOAD
+ if (IsIntrospectionOnly() && (GetCodeBase() == NULL))
+ {
+ SafeComHolder<IAssemblyName> pIAssemblyName;
+ IfFailThrow(CreateFusionName(&pIAssemblyName));
+
+ // Note: We do not support introspection-only collectible assemblies (yet)
+ AppDomain::AssemblyIterator i = pDomain->IterateAssembliesEx(
+ (AssemblyIterationFlags)(kIncludeLoaded | kIncludeIntrospection | kExcludeCollectible));
+ CollectibleAssemblyHolder<DomainAssembly *> pCachedDomainAssembly;
+
+ while (i.Next(pCachedDomainAssembly.This()))
+ {
+ _ASSERTE(!pCachedDomainAssembly->IsCollectible());
+ IAssemblyName * pCachedAssemblyName = pCachedDomainAssembly->GetAssembly()->GetFusionAssemblyName();
+ if (S_OK == pCachedAssemblyName->IsEqual(pIAssemblyName, ASM_CMPF_IL_ALL))
+ {
+ RETURN pCachedDomainAssembly;
+ }
+ }
+ }
+#endif // FEATURE_REFLECTION_ONLY_LOAD
+
+ PEAssemblyHolder pFile(pDomain->BindAssemblySpec(this, fThrowOnFileNotFound, fRaisePrebindEvents, pCallerStackMark, pLoadSecurity));
+ if (pFile == NULL)
+ RETURN NULL;
+
+ pAssembly = pDomain->LoadDomainAssembly(this, pFile, targetLevel, pLoadSecurity);
+
+ RETURN pAssembly;
+}
+
+/* static */
+Assembly *AssemblySpec::LoadAssembly(LPCSTR pSimpleName,
+ AssemblyMetaDataInternal* pContext,
+ const BYTE * pbPublicKeyOrToken,
+ DWORD cbPublicKeyOrToken,
+ DWORD dwFlags)
+{
+ CONTRACT(Assembly *)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pSimpleName));
+ POSTCONDITION(CheckPointer(RETVAL));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ AssemblySpec spec;
+ IfFailThrow(spec.Init(pSimpleName, pContext,
+ pbPublicKeyOrToken, cbPublicKeyOrToken, dwFlags));
+
+ RETURN spec.LoadAssembly(FILE_LOADED);
+}
+
+/* static */
+Assembly *AssemblySpec::LoadAssembly(LPCWSTR pFilePath)
+{
+ CONTRACT(Assembly *)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pFilePath));
+ POSTCONDITION(CheckPointer(RETVAL));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ AssemblySpec spec;
+ spec.SetCodeBase(pFilePath);
+ RETURN spec.LoadAssembly(FILE_LOADED);
+}
+
+#ifndef FEATURE_FUSION
+HRESULT AssemblySpec::CheckFriendAssemblyName()
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Version, Culture, Architecture, and publickeytoken are not permitted
+ if ((m_context.usMajorVersion != (USHORT) -1) ||
+ (m_context.szLocale != NULL) ||
+ (IsAfPA_Specified(m_dwFlags)) ||
+ (IsStrongNamed() && !HasPublicKey()))
+ {
+ return META_E_CA_BAD_FRIENDS_ARGS;
+ }
+ else
+ {
+ return S_OK;
+ }
+}
+#endif //FEATURE_FUSION
+
+HRESULT AssemblySpec::EmitToken(
+ IMetaDataAssemblyEmit *pEmit,
+ mdAssemblyRef *pToken,
+ BOOL fUsePublicKeyToken, /*=TRUE*/
+ BOOL fMustBeBindable /*=FALSE*/)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ MODE_ANY;
+ NOTHROW;
+ GC_NOTRIGGER;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ PRECONDITION(HasUniqueIdentity() || AppDomain::GetCurrentDomain()->IsCompilationDomain());
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ SmallStackSString ssName;
+ fMustBeBindable ? GetEncodedName(ssName) : GetName(ssName);
+
+ ASSEMBLYMETADATA AMD;
+
+ AMD.usMajorVersion = m_context.usMajorVersion;
+ AMD.usMinorVersion = m_context.usMinorVersion;
+ AMD.usBuildNumber = m_context.usBuildNumber;
+ AMD.usRevisionNumber = m_context.usRevisionNumber;
+
+ if (m_context.szLocale) {
+ AMD.cbLocale = MultiByteToWideChar(CP_UTF8, 0, m_context.szLocale, -1, NULL, 0);
+ if(AMD.cbLocale==0)
+ IfFailGo(HRESULT_FROM_GetLastError());
+ AMD.szLocale = (LPWSTR) alloca(AMD.cbLocale * sizeof(WCHAR) );
+ if(MultiByteToWideChar(CP_UTF8, 0, m_context.szLocale, -1, AMD.szLocale, AMD.cbLocale)==0)
+ IfFailGo(HRESULT_FROM_GetLastError());
+ }
+ else {
+ AMD.cbLocale = 0;
+ AMD.szLocale = NULL;
+ }
+
+ // If we've been asked to emit a public key token in the reference but we've
+ // been given a public key then we need to generate the token now.
+ if (m_cbPublicKeyOrToken && fUsePublicKeyToken && IsAfPublicKey(m_dwFlags)) {
+ StrongNameBufferHolder<BYTE> pbPublicKeyToken;
+ DWORD cbPublicKeyToken;
+ if (!StrongNameTokenFromPublicKey(m_pbPublicKeyOrToken,
+ m_cbPublicKeyOrToken,
+ &pbPublicKeyToken,
+ &cbPublicKeyToken)) {
+ IfFailGo(StrongNameErrorInfo());
+ }
+
+ hr = pEmit->DefineAssemblyRef(pbPublicKeyToken,
+ cbPublicKeyToken,
+ ssName.GetUnicode(),
+ &AMD,
+ NULL,
+ 0,
+ m_dwFlags & ~afPublicKey,
+ pToken);
+ }
+ else {
+ hr = pEmit->DefineAssemblyRef(m_pbPublicKeyOrToken,
+ m_cbPublicKeyOrToken,
+ ssName.GetUnicode(),
+ &AMD,
+ NULL,
+ 0,
+ m_dwFlags,
+ pToken);
+ }
+
+ hr = S_OK;
+ ErrExit:
+ ;
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+}
+
+//===========================================================================================
+// Constructs an AssemblySpec for the given IAssemblyName. Recognizes IAssemblyName objects
+// that were built from WinRT AssemblySpec objects, extracts the encoded type name, and sets
+// the type namespace and class name properties appropriately.
+
+void AssemblySpec::ParseEncodedName()
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END
+
+#ifdef FEATURE_COMINTEROP
+ if (IsContentType_WindowsRuntime())
+ {
+ StackSString ssEncodedName(SString::Utf8, m_pAssemblyName);
+ ssEncodedName.Normalize();
+
+ SString::Iterator itBang = ssEncodedName.Begin();
+ if (ssEncodedName.Find(itBang, SL(W("!"))))
+ {
+ StackSString ssAssemblyName(ssEncodedName, ssEncodedName.Begin(), itBang - ssEncodedName.Begin());
+ StackSString ssTypeName(ssEncodedName, ++itBang, ssEncodedName.End() - itBang);
+ SetName(ssAssemblyName);
+ SetWindowsRuntimeType(ssTypeName);
+ }
+ }
+#endif
+}
+
+void AssemblySpec::SetWindowsRuntimeType(
+ LPCUTF8 szNamespace,
+ LPCUTF8 szClassName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+#ifdef FEATURE_COMINTEROP
+ // Release already allocated string
+ if (m_ownedFlags & WINRT_TYPE_NAME_OWNED)
+ {
+ if (m_szWinRtTypeNamespace != nullptr)
+ delete [] m_szWinRtTypeNamespace;
+ if (m_szWinRtTypeClassName != nullptr)
+ delete [] m_szWinRtTypeClassName;
+ }
+ m_szWinRtTypeNamespace = szNamespace;
+ m_szWinRtTypeClassName = szClassName;
+
+ m_ownedFlags &= ~WINRT_TYPE_NAME_OWNED;
+#else
+ // Classic (non-phone) CoreCLR does not support WinRT interop; this should never be called with a non-empty type name
+ _ASSERTE((szNamespace == NULL) && (szClassName == NULL));
+#endif
+}
+
+void AssemblySpec::SetWindowsRuntimeType(
+ SString const & _ssTypeName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Release already allocated string
+ if (m_ownedFlags & WINRT_TYPE_NAME_OWNED)
+ {
+ if (m_szWinRtTypeNamespace != nullptr)
+ delete[] m_szWinRtTypeNamespace;
+ if (m_szWinRtTypeClassName != nullptr)
+ delete[] m_szWinRtTypeClassName;
+ m_ownedFlags &= ~WINRT_TYPE_NAME_OWNED;
+ }
+
+ SString ssTypeName;
+ _ssTypeName.ConvertToUTF8(ssTypeName);
+
+ LPUTF8 szTypeName = (LPUTF8)ssTypeName.GetUTF8NoConvert();
+ ns::SplitInline(szTypeName, m_szWinRtTypeNamespace, m_szWinRtTypeClassName);
+ m_ownedFlags &= ~WINRT_TYPE_NAME_OWNED;
+ // Make a copy of the type name strings
+ CloneFields(WINRT_TYPE_NAME_OWNED);
+}
+
+
+AssemblySpecBindingCache::AssemblySpecBindingCache()
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+AssemblySpecBindingCache::~AssemblySpecBindingCache()
+{
+ CONTRACTL
+ {
+ DESTRUCTOR_CHECK;
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Clear();
+}
+
+void AssemblySpecBindingCache::Clear()
+{
+ CONTRACTL
+ {
+ DESTRUCTOR_CHECK;
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PtrHashMap::PtrIterator i = m_map.begin();
+ while (!i.end())
+ {
+ AssemblyBinding *b = (AssemblyBinding*) i.GetValue();
+ if (m_pHeap == NULL)
+ delete b;
+ else
+ b->~AssemblyBinding();
+
+ ++i;
+ }
+
+ m_map.Clear();
+}
+
+void AssemblySpecBindingCache::OnAppDomainUnload()
+{
+ CONTRACTL
+ {
+ DESTRUCTOR_CHECK;
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PtrHashMap::PtrIterator i = m_map.begin();
+ while (!i.end())
+ {
+ AssemblyBinding *b = (AssemblyBinding*) i.GetValue();
+ b->OnAppDomainUnload();
+
+ ++i;
+ }
+}
+
+void AssemblySpecBindingCache::Init(CrstBase *pCrst, LoaderHeap *pHeap)
+{
+ WRAPPER_NO_CONTRACT;
+
+ LockOwner lock = {pCrst, IsOwnerOfCrst};
+ m_map.Init(INITIAL_ASM_SPEC_HASH_SIZE, CompareSpecs, TRUE, &lock);
+ m_pHeap = pHeap;
+}
+
+#if defined(FEATURE_CORECLR)
+AssemblySpecBindingCache::AssemblyBinding* AssemblySpecBindingCache::GetAssemblyBindingEntryForAssemblySpec(AssemblySpec* pSpec, BOOL fThrow)
+{
+ CONTRACTL
+ {
+ if (fThrow)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ else
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ FORBID_FAULT;
+ }
+ MODE_ANY;
+ PRECONDITION(pSpec != NULL);
+ }
+ CONTRACTL_END;
+
+ AssemblyBinding* pEntry = (AssemblyBinding *) INVALIDENTRY;
+ UPTR key = (UPTR)pSpec->Hash();
+
+ // On CoreCLR, we will use the BinderID as the key
+ ICLRPrivBinder *pBinderContextForLookup = NULL;
+ AppDomain *pSpecDomain = pSpec->GetAppDomain();
+ bool fGetBindingContextFromParent = true;
+
+ // Check if the AssemblySpec already has specified its binding context. This will be set for assemblies that are
+ // attempted to be explicitly bound using AssemblyLoadContext LoadFrom* methods.
+ pBinderContextForLookup = pSpec->GetBindingContext();
+ if (pBinderContextForLookup != NULL)
+ {
+ // We are working with the actual binding context in which the assembly was expected to be loaded.
+ // Thus, we dont need to get it from the parent assembly.
+ fGetBindingContextFromParent = false;
+ }
+
+ if (fGetBindingContextFromParent)
+ {
+ // MScorlib does not have a binding context associated with it and its lookup will only be done
+ // using its AssemblySpec hash.
+ if (!pSpec->IsAssemblySpecForMscorlib())
+ {
+ pBinderContextForLookup = pSpec->GetBindingContextFromParentAssembly(pSpecDomain);
+ pSpec->SetBindingContext(pBinderContextForLookup);
+ }
+ }
+
+ UPTR lookupKey = key;
+ if (pBinderContextForLookup)
+ {
+ UINT_PTR binderID = 0;
+ HRESULT hr = pBinderContextForLookup->GetBinderID(&binderID);
+ _ASSERTE(SUCCEEDED(hr));
+ lookupKey = key^binderID;
+ }
+
+ pEntry = (AssemblyBinding *) m_map.LookupValue(lookupKey, pSpec);
+ if (pEntry == (AssemblyBinding *) INVALIDENTRY)
+ {
+ // We didnt find the AssemblyBinding entry against the binder of the parent assembly.
+ // It is possible that the AssemblySpec corresponds to a TPA assembly, so try the lookup
+ // against the TPABinder context.
+ ICLRPrivBinder* pTPABinderContext = pSpecDomain->GetTPABinderContext();
+ if (!AreSameBinderInstance(pTPABinderContext, pBinderContextForLookup))
+ {
+ UINT_PTR tpaBinderID = 0;
+ HRESULT hr = pTPABinderContext->GetBinderID(&tpaBinderID);
+ _ASSERTE(SUCCEEDED(hr));
+ lookupKey = key^tpaBinderID;
+
+ // Set the binding context in AssemblySpec to be TPABinder
+ // as that will be used in the Lookup operation below.
+ if (fGetBindingContextFromParent)
+ {
+ pSpec->SetBindingContext(pTPABinderContext);
+ }
+
+ pEntry = (AssemblyBinding *) m_map.LookupValue(lookupKey, pSpec);
+ }
+ }
+
+ // Reset the binding context if one was originally never present in the AssemblySpec and we didnt find any entry
+ // in the cache.
+ if (fGetBindingContextFromParent)
+ {
+ if (pEntry == (AssemblyBinding *) INVALIDENTRY)
+ {
+ pSpec->SetBindingContext(NULL);
+ }
+ }
+
+ return pEntry;
+}
+#endif // defined(FEATURE_CORECLR)
+
+BOOL AssemblySpecBindingCache::Contains(AssemblySpec *pSpec)
+{
+ WRAPPER_NO_CONTRACT;
+
+#if !defined(FEATURE_CORECLR)
+ DWORD key = pSpec->Hash();
+ AssemblyBinding *entry = (AssemblyBinding *) m_map.LookupValue(key, pSpec);
+ return (entry != (AssemblyBinding *) INVALIDENTRY);
+#else // defined(FEATURE_CORECLR)
+ return (GetAssemblyBindingEntryForAssemblySpec(pSpec, TRUE) != (AssemblyBinding *) INVALIDENTRY);
+#endif // !defined(FEATURE_CORECLR)
+}
+
+DomainAssembly *AssemblySpecBindingCache::LookupAssembly(AssemblySpec *pSpec,
+ BOOL fThrow /*=TRUE*/)
+{
+ CONTRACT(DomainAssembly *)
+ {
+ INSTANCE_CHECK;
+ if (fThrow) {
+ GC_TRIGGERS;
+ THROWS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ else {
+ GC_NOTRIGGER;
+ NOTHROW;
+ FORBID_FAULT;
+ }
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ AssemblyBinding *entry = (AssemblyBinding *) INVALIDENTRY;
+
+#if !defined(FEATURE_CORECLR)
+ DWORD key = pSpec->Hash();
+ entry = (AssemblyBinding *) m_map.LookupValue(key, pSpec);
+#else // defined(FEATURE_CORECLR)
+ entry = GetAssemblyBindingEntryForAssemblySpec(pSpec, fThrow);
+#endif // !defined(FEATURE_CORECLR)
+
+ if (entry == (AssemblyBinding *) INVALIDENTRY)
+ RETURN NULL;
+ else
+ {
+ if ((entry->GetAssembly() == NULL) && fThrow)
+ {
+ // May be either unloaded, or an exception occurred.
+ entry->ThrowIfError();
+ }
+
+ RETURN entry->GetAssembly();
+ }
+}
+
+PEAssembly *AssemblySpecBindingCache::LookupFile(AssemblySpec *pSpec, BOOL fThrow /*=TRUE*/)
+{
+ CONTRACT(PEAssembly *)
+ {
+ INSTANCE_CHECK;
+ if (fThrow) {
+ GC_TRIGGERS;
+ THROWS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ else {
+ GC_NOTRIGGER;
+ NOTHROW;
+ FORBID_FAULT;
+ }
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ AssemblyBinding *entry = (AssemblyBinding *) INVALIDENTRY;
+
+#if !defined(FEATURE_CORECLR)
+ DWORD key = pSpec->Hash();
+ entry = (AssemblyBinding *) m_map.LookupValue(key, pSpec);
+#else // defined(FEATURE_CORECLR)
+ entry = GetAssemblyBindingEntryForAssemblySpec(pSpec, fThrow);
+#endif // !defined(FEATURE_CORECLR)
+
+ if (entry == (AssemblyBinding *) INVALIDENTRY)
+ RETURN NULL;
+ else
+ {
+ if (fThrow && (entry->GetFile() == NULL))
+ {
+ CONSISTENCY_CHECK(entry->IsError());
+ entry->ThrowIfError();
+ }
+
+ RETURN entry->GetFile();
+ }
+}
+
+
+class AssemblyBindingHolder
+{
+public:
+ AssemblyBindingHolder()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_entry = NULL;
+ m_pHeap = NULL;
+ }
+
+ AssemblySpecBindingCache::AssemblyBinding *CreateAssemblyBinding(LoaderHeap *pHeap)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ m_pHeap = pHeap;
+ if (pHeap)
+ {
+ m_entry = new (m_amTracker.Track(pHeap->AllocMem(S_SIZE_T(sizeof(AssemblySpecBindingCache::AssemblyBinding))))) AssemblySpecBindingCache::AssemblyBinding;
+ }
+ else
+ {
+ m_entry = new AssemblySpecBindingCache::AssemblyBinding;
+ }
+ return m_entry;
+ }
+
+ ~AssemblyBindingHolder()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ if (m_entry)
+ {
+ if (m_pHeap)
+ {
+ // just call destructor - m_amTracker will delete the memory for m_entry itself.
+ m_entry->~AssemblyBinding();
+ }
+ else
+ {
+ delete m_entry;
+ }
+ }
+ }
+
+ void SuppressRelease()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_entry = NULL;
+ m_pHeap = NULL;
+ m_amTracker.SuppressRelease();
+ }
+
+ AllocMemTracker *GetPamTracker()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return &m_amTracker;
+ }
+
+
+
+private:
+ AssemblySpecBindingCache::AssemblyBinding *m_entry;
+ LoaderHeap *m_pHeap;
+ AllocMemTracker m_amTracker;
+};
+
+// NOTE ABOUT STATE OF CACHE ENTRIES:
+//
+// A cache entry can be in one of 4 states:
+// 1. Empty (no entry)
+// 2. File (a PEAssembly has been bound, but not yet an Assembly)
+// 3. Assembly (Both a PEAssembly & Assembly are available.)
+// 4. Error (an error has occurred)
+//
+// The legal state transitions are:
+// 1 -> any
+// 2 -> 3
+// 2 -> 4
+
+
+BOOL AssemblySpecBindingCache::StoreAssembly(AssemblySpec *pSpec, DomainAssembly *pAssembly)
+{
+ CONTRACT(BOOL)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+#ifdef FEATURE_HOSTED_BINDER
+ // Host binder based assembly spec's cannot currently be safely inserted into caches.
+ PRECONDITION(pSpec->GetHostBinder() == nullptr);
+#endif // FEATURE_HOSTED_BINDER
+ POSTCONDITION(UnsafeContains(this, pSpec));
+ POSTCONDITION(UnsafeVerifyLookupAssembly(this, pSpec, pAssembly));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ UPTR key = (UPTR)pSpec->Hash();
+
+#if defined(FEATURE_CORECLR)
+ // On CoreCLR, we will use the BinderID as the key
+ ICLRPrivBinder* pBinderContextForLookup = pAssembly->GetFile()->GetBindingContext();
+ _ASSERTE(pBinderContextForLookup || pAssembly->GetFile()->IsSystem());
+ if (pBinderContextForLookup)
+ {
+ UINT_PTR binderID = 0;
+ HRESULT hr = pBinderContextForLookup->GetBinderID(&binderID);
+ _ASSERTE(SUCCEEDED(hr));
+ key = key^binderID;
+
+ if (!pSpec->GetBindingContext())
+ {
+ pSpec->SetBindingContext(pBinderContextForLookup);
+ }
+ }
+#endif // defined(FEATURE_CORECLR)
+
+ AssemblyBinding *entry = (AssemblyBinding *) m_map.LookupValue(key, pSpec);
+
+ if (entry == (AssemblyBinding *) INVALIDENTRY)
+ {
+ AssemblyBindingHolder abHolder;
+ entry = abHolder.CreateAssemblyBinding(m_pHeap);
+
+ entry->Init(pSpec,pAssembly->GetFile(),pAssembly,NULL,m_pHeap, abHolder.GetPamTracker());
+
+ m_map.InsertValue(key, entry);
+
+ abHolder.SuppressRelease();
+
+ STRESS_LOG2(LF_CLASSLOADER,LL_INFO10,"StoreFile (StoreAssembly): Add cached entry (%p) with PEFile %p",entry,pAssembly->GetFile());
+ RETURN TRUE;
+ }
+ else
+ {
+ if (!entry->IsError())
+ {
+ if (entry->GetAssembly() != NULL)
+ {
+ // OK if this is a duplicate
+ if (entry->GetAssembly() == pAssembly)
+ RETURN TRUE;
+ }
+ else
+ {
+ // OK if we have have a matching PEAssembly
+ if (entry->GetFile() != NULL
+ && pAssembly->GetFile()->Equals(entry->GetFile()))
+ {
+ entry->SetAssembly(pAssembly);
+ RETURN TRUE;
+ }
+ }
+ }
+
+ // Invalid cache transition (see above note about state transitions)
+ RETURN FALSE;
+ }
+}
+
+// Note that this routine may be called outside a lock, so may be racing with another thread.
+// Returns TRUE if add was successful - if FALSE is returned, caller should honor current
+// cached value to ensure consistency.
+
+BOOL AssemblySpecBindingCache::StoreFile(AssemblySpec *pSpec, PEAssembly *pFile)
+{
+ CONTRACT(BOOL)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+#ifdef FEATURE_HOSTED_BINDER
+ // Host binder based assembly spec's cannot currently be safely inserted into caches.
+ PRECONDITION(pSpec->GetHostBinder() == nullptr);
+#endif // FEATURE_HOSTED_BINDER
+ POSTCONDITION((!RETVAL) || (UnsafeContains(this, pSpec) && UnsafeVerifyLookupFile(this, pSpec, pFile)));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ UPTR key = (UPTR)pSpec->Hash();
+
+#if defined(FEATURE_CORECLR)
+ // On CoreCLR, we will use the BinderID as the key
+ ICLRPrivBinder* pBinderContextForLookup = pFile->GetBindingContext();
+ _ASSERTE(pBinderContextForLookup || pFile->IsSystem());
+ if (pBinderContextForLookup)
+ {
+ UINT_PTR binderID = 0;
+ HRESULT hr = pBinderContextForLookup->GetBinderID(&binderID);
+ _ASSERTE(SUCCEEDED(hr));
+ key = key^binderID;
+
+ if (!pSpec->GetBindingContext())
+ {
+ pSpec->SetBindingContext(pBinderContextForLookup);
+ }
+ }
+#endif // defined(FEATURE_CORECLR)
+
+ AssemblyBinding *entry = (AssemblyBinding *) m_map.LookupValue(key, pSpec);
+
+ if (entry == (AssemblyBinding *) INVALIDENTRY)
+ {
+ AssemblyBindingHolder abHolder;
+ entry = abHolder.CreateAssemblyBinding(m_pHeap);
+
+ entry->Init(pSpec,pFile,NULL,NULL,m_pHeap, abHolder.GetPamTracker());
+
+ m_map.InsertValue(key, entry);
+ abHolder.SuppressRelease();
+
+ STRESS_LOG2(LF_CLASSLOADER,LL_INFO10,"StoreFile: Add cached entry (%p) with PEFile %p\n", entry, pFile);
+
+ RETURN TRUE;
+ }
+ else
+ {
+ if (!entry->IsError())
+ {
+ // OK if this is a duplicate
+ if (entry->GetFile() != NULL
+ && pFile->Equals(entry->GetFile()))
+ RETURN TRUE;
+ }
+ else
+ if (entry->IsPostBindError())
+ {
+ // Another thread has reported what's going to happen later.
+ entry->ThrowIfError();
+
+ }
+ STRESS_LOG2(LF_CLASSLOADER,LL_INFO10,"Incompatible cached entry found (%p) when adding PEFile %p\n", entry, pFile);
+ // Invalid cache transition (see above note about state transitions)
+ RETURN FALSE;
+ }
+}
+
+BOOL AssemblySpecBindingCache::StoreException(AssemblySpec *pSpec, Exception* pEx)
+{
+ CONTRACT(BOOL)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+#ifdef FEATURE_HOSTED_BINDER
+ // Host binder based assembly spec's cannot currently be safely inserted into caches.
+ PRECONDITION(pSpec->GetHostBinder() == nullptr);
+#endif // FEATURE_HOSTED_BINDER
+ DISABLED(POSTCONDITION(UnsafeContains(this, pSpec))); //<TODO>@todo: Getting violations here - StoreExceptions could happen anywhere so this is possibly too aggressive.</TODO>
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ UPTR key = (UPTR)pSpec->Hash();
+
+#if !defined(FEATURE_CORECLR)
+ AssemblyBinding *entry = (AssemblyBinding *) m_map.LookupValue(key, pSpec);
+#else // defined(FEATURE_CORECLR)
+ AssemblyBinding *entry = GetAssemblyBindingEntryForAssemblySpec(pSpec, TRUE);
+ if (entry == (AssemblyBinding *) INVALIDENTRY)
+ {
+ // TODO: Merge this with the failure lookup in the binder
+ //
+ // Since no entry was found for this assembly in any binding context, save the failure
+ // in the TPABinder context
+ ICLRPrivBinder* pBinderToSaveException = NULL;
+ pBinderToSaveException = pSpec->GetBindingContext();
+ if (pBinderToSaveException == NULL)
+ {
+ if (!pSpec->IsAssemblySpecForMscorlib())
+ {
+ pBinderToSaveException = pSpec->GetBindingContextFromParentAssembly(pSpec->GetAppDomain());
+ UINT_PTR binderID = 0;
+ HRESULT hr = pBinderToSaveException->GetBinderID(&binderID);
+ _ASSERTE(SUCCEEDED(hr));
+ key = key^binderID;
+ }
+ }
+ }
+#endif // defined(FEATURE_CORECLR)
+
+ if (entry == (AssemblyBinding *) INVALIDENTRY) {
+ AssemblyBindingHolder abHolder;
+ entry = abHolder.CreateAssemblyBinding(m_pHeap);
+
+ entry->Init(pSpec,NULL,NULL,pEx,m_pHeap, abHolder.GetPamTracker());
+
+ m_map.InsertValue(key, entry);
+ abHolder.SuppressRelease();
+
+ STRESS_LOG2(LF_CLASSLOADER,LL_INFO10,"StoreFile (StoreException): Add cached entry (%p) with exception %p",entry,pEx);
+ RETURN TRUE;
+ }
+ else
+ {
+ // OK if this is a duplicate
+ if (entry->IsError())
+ {
+ if (entry->GetHR() == pEx->GetHR())
+ RETURN TRUE;
+ }
+ else
+ {
+ // OK to transition to error if we don't have an Assembly yet
+ if (entry->GetAssembly() == NULL)
+ {
+ entry->InitException(pEx);
+ RETURN TRUE;
+ }
+ }
+
+ // Invalid cache transition (see above note about state transitions)
+ RETURN FALSE;
+ }
+}
+
+/* static */
+BOOL AssemblySpecHash::CompareSpecs(UPTR u1, UPTR u2)
+{
+ // the same...
+ WRAPPER_NO_CONTRACT;
+ return AssemblySpecBindingCache::CompareSpecs(u1,u2);
+}
+
+
+
+
+/* static */
+BOOL AssemblySpecBindingCache::CompareSpecs(UPTR u1, UPTR u2)
+{
+ WRAPPER_NO_CONTRACT;
+ AssemblySpec *a1 = (AssemblySpec *) (u1 << 1);
+ AssemblySpec *a2 = (AssemblySpec *) u2;
+
+#if defined(FEATURE_HOSTED_BINDER) && defined(FEATURE_APPX_BINDER)
+ _ASSERTE(a1->GetAppDomain() == a2->GetAppDomain());
+ if (a1->GetAppDomain()->HasLoadContextHostBinder())
+ return (CLRPrivBinderUtil::CompareHostBinderSpecs(a1,a2));
+#endif
+
+ if ((!a1->CompareEx(a2)) ||
+ (a1->IsIntrospectionOnly() != a2->IsIntrospectionOnly()))
+ return FALSE;
+ return TRUE;
+}
+
+
+
+/* static */
+BOOL DomainAssemblyCache::CompareBindingSpec(UPTR spec1, UPTR spec2)
+{
+ WRAPPER_NO_CONTRACT;
+
+ AssemblySpec* pSpec1 = (AssemblySpec*) (spec1 << 1);
+ AssemblyEntry* pEntry2 = (AssemblyEntry*) spec2;
+
+#if defined(FEATURE_HOSTED_BINDER) && defined(FEATURE_FUSION)
+ AssemblySpec* pSpec2 = &pEntry2->spec;
+ _ASSERTE(pSpec1->GetAppDomain() == pSpec2->GetAppDomain());
+ if (pSpec1->GetAppDomain()->HasLoadContextHostBinder())
+ return (CLRPrivBinderUtil::CompareHostBinderSpecs(pSpec1,pSpec2));
+#endif
+
+
+ if ((!pSpec1->CompareEx(&pEntry2->spec)) ||
+ (pSpec1->IsIntrospectionOnly() != pEntry2->spec.IsIntrospectionOnly()))
+ return FALSE;
+
+ return TRUE;
+}
+
+
+DomainAssemblyCache::AssemblyEntry* DomainAssemblyCache::LookupEntry(AssemblySpec* pSpec)
+{
+ CONTRACT (DomainAssemblyCache::AssemblyEntry*)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END
+
+ DWORD hashValue = pSpec->Hash();
+
+ LPVOID pResult = m_Table.LookupValue(hashValue, pSpec);
+ if(pResult == (LPVOID) INVALIDENTRY)
+ RETURN NULL;
+ else
+ RETURN (AssemblyEntry*) pResult;
+}
+
+VOID DomainAssemblyCache::InsertEntry(AssemblySpec* pSpec, LPVOID pData1, LPVOID pData2/*=NULL*/)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ LPVOID ptr = LookupEntry(pSpec);
+ if(ptr == NULL) {
+
+ BaseDomain::CacheLockHolder lh(m_pDomain);
+
+ ptr = LookupEntry(pSpec);
+ if(ptr == NULL) {
+ AllocMemTracker amTracker;
+ AllocMemTracker *pamTracker = &amTracker;
+
+ AssemblyEntry* pEntry = (AssemblyEntry*) pamTracker->Track( m_pDomain->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(AssemblyEntry))) );
+ new (&pEntry->spec) AssemblySpec ();
+
+ pEntry->spec.CopyFrom(pSpec);
+ pEntry->spec.CloneFieldsToLoaderHeap(AssemblySpec::ALL_OWNED, m_pDomain->GetLowFrequencyHeap(), pamTracker);
+ pEntry->pData[0] = pData1;
+ pEntry->pData[1] = pData2;
+ DWORD hashValue = pEntry->Hash();
+ m_Table.InsertValue(hashValue, pEntry);
+
+ pamTracker->SuppressRelease();
+ }
+ // lh goes out of scope here
+ }
+#ifdef _DEBUG
+ else {
+ _ASSERTE(pData1 == ((AssemblyEntry*) ptr)->pData[0]);
+ _ASSERTE(pData2 == ((AssemblyEntry*) ptr)->pData[1]);
+ }
+#endif
+
+}
+
+#ifdef FEATURE_FUSION
+
+IAssembly * AssemblySpec::GetParentIAssembly()
+{
+ LIMITED_METHOD_CONTRACT;
+ if(m_pParentAssembly)
+ return m_pParentAssembly->GetFile()->GetFusionAssembly();
+
+ return NULL;
+}
+
+LPCVOID AssemblySpec::GetParentAssemblyPtr()
+{
+ LIMITED_METHOD_CONTRACT;
+ if(m_pParentAssembly)
+ {
+#ifdef FEATURE_HOSTED_BINDER
+ if (m_pParentAssembly->GetFile()->HasHostAssembly())
+ return m_pParentAssembly->GetFile()->GetHostAssembly();
+ else
+#endif
+ return m_pParentAssembly->GetFile()->GetFusionAssembly();
+ }
+ return NULL;
+}
+
+#endif //FEATURE_FUSION
+
+
+
+DomainAssembly * AssemblySpec::GetParentAssembly()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pParentAssembly;
+}
diff --git a/src/vm/assemblyspec.hpp b/src/vm/assemblyspec.hpp
new file mode 100644
index 0000000000..9dc46fc655
--- /dev/null
+++ b/src/vm/assemblyspec.hpp
@@ -0,0 +1,687 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: AssemblySpec.hpp
+**
+** Purpose: Implements classes used to bind to assemblies
+**
+**
+
+
+**
+===========================================================*/
+#ifndef _ASSEMBLYSPEC_H
+#define _ASSEMBLYSPEC_H
+#include "hash.h"
+#include "memorypool.h"
+#ifdef FEATURE_FUSION
+#include "fusionbind.h"
+#endif
+#include "assemblyspecbase.h"
+#include "domainfile.h"
+#include "genericstackprobe.h"
+#include "holder.h"
+
+class AppDomain;
+class Assembly;
+class DomainAssembly;
+enum FileLoadLevel;
+
+class AssemblySpec : public BaseAssemblySpec
+{
+ private:
+
+ friend class AppDomain;
+ friend class AssemblyNameNative;
+
+ AppDomain *m_pAppDomain;
+ SBuffer m_HashForControl;
+ DWORD m_dwHashAlg;
+ DomainAssembly *m_pParentAssembly;
+
+ BOOL IsValidAssemblyName();
+
+ HRESULT InitializeSpecInternal(mdToken kAssemblyRefOrDef,
+ IMDInternalImport *pImport,
+ DomainAssembly *pStaticParent,
+ BOOL fIntrospectionOnly,
+ BOOL fAllowAllocation);
+
+ // InitializeSpecInternal should be used very carefully so it's made private.
+ // functions that take special care (and thus are allowed to use the function) are listed below
+ friend Assembly * Module::GetAssemblyIfLoaded(
+ mdAssemblyRef kAssemblyRef,
+ LPCSTR szWinRtNamespace,
+ LPCSTR szWinRtClassName,
+ IMDInternalImport * pMDImportOverride,
+ BOOL fDoNotUtilizeExtraChecks,
+ ICLRPrivBinder *pBindingContextForLoadedAssembly);
+
+ public:
+
+#ifndef DACCESS_COMPILE
+ AssemblySpec() : m_pAppDomain(::GetAppDomain())
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pParentAssembly = NULL;
+ }
+#endif //!DACCESS_COMPILE
+
+ AssemblySpec(AppDomain *pAppDomain) : m_pAppDomain(pAppDomain)
+ {
+ LIMITED_METHOD_CONTRACT
+ m_pParentAssembly = NULL;
+ }
+
+#ifdef FEATURE_FUSION
+ virtual IAssembly* GetParentIAssembly();
+
+ virtual LPCVOID GetParentAssemblyPtr();
+#endif
+
+ DomainAssembly* GetParentAssembly();
+
+ ICLRPrivBinder* GetBindingContextFromParentAssembly(AppDomain *pDomain);
+
+ bool HasParentAssembly()
+ { WRAPPER_NO_CONTRACT; return GetParentAssembly() != NULL; }
+
+ void InitializeSpec(mdToken kAssemblyRefOrDef,
+ IMDInternalImport *pImport,
+ DomainAssembly *pStaticParent = NULL,
+ BOOL fIntrospectionOnly = FALSE)
+ {
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ HRESULT hr=InitializeSpecInternal(kAssemblyRefOrDef, pImport,pStaticParent,fIntrospectionOnly,TRUE);
+ if(FAILED(hr))
+ EEFileLoadException::Throw(this,hr);
+#ifndef FEATURE_CORECLR
+ CloneFields();
+#endif
+ };
+
+#ifdef FEATURE_FUSION
+ void InitializeSpec(IAssemblyName *pName,
+ DomainAssembly *pStaticParent = NULL,
+ BOOL fIntrospectionOnly = FALSE);
+#endif // FEATURE_FUSION
+
+ void InitializeSpec(PEAssembly *pFile);
+ HRESULT InitializeSpec(StackingAllocator* alloc,
+ ASSEMBLYNAMEREF* pName,
+ BOOL fParse = TRUE,
+ BOOL fIntrospectionOnly = FALSE);
+
+ void AssemblyNameInit(ASSEMBLYNAMEREF* pName, PEImage* pImageInfo); //[in,out], [in]
+
+#ifdef FEATURE_MIXEDMODE
+ void InitializeSpec(HINSTANCE hMod, BOOL fIntrospectionOnly = FALSE);
+#endif // FEATURE_MIXEDMODE
+
+ void SetCodeBase(LPCWSTR szCodeBase)
+ {
+ WRAPPER_NO_CONTRACT;
+ BaseAssemblySpec::SetCodeBase(szCodeBase);
+ }
+ void SetCodeBase(StackingAllocator* alloc, STRINGREF *pCodeBase);
+
+ void SetParentAssembly(DomainAssembly *pAssembly)
+ {
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_pParentAssembly = pAssembly;
+#ifdef FEATURE_FUSION
+ if (pAssembly)
+ {
+#ifdef FEATURE_HOSTED_BINDER
+ _ASSERTE(GetHostBinder() == nullptr);
+#endif // FEATURE_HOSTED_BINDER
+ m_fParentLoadContext=pAssembly->GetFile()->GetLoadContext();
+ }
+ else
+ m_fParentLoadContext = LOADCTX_TYPE_DEFAULT;
+#endif
+ }
+
+ // Note that this method does not clone the fields!
+ void CopyFrom(AssemblySpec* pSource)
+ {
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ BaseAssemblySpec::CopyFrom(pSource);
+
+ SetIntrospectionOnly(pSource->IsIntrospectionOnly());
+ SetParentAssembly(pSource->GetParentAssembly());
+ m_HashForControl = pSource->m_HashForControl;
+ m_dwHashAlg = pSource->m_dwHashAlg;
+ }
+
+
+#ifndef FEATURE_FUSION
+ HRESULT CheckFriendAssemblyName();
+#endif // FEATURE_FUSION
+
+
+ HRESULT EmitToken(IMetaDataAssemblyEmit *pEmit,
+ mdAssemblyRef *pToken,
+ BOOL fUsePublicKeyToken = TRUE,
+ BOOL fMustBeBindable = FALSE /*(used only by FusionBind's implementation)*/);
+
+ // Make sure this matches in the managed Assembly.DemandPermission()
+ enum FilePermFlag {
+ FILE_PATHDISCOVERY = 0x0,
+ FILE_READ = 0x1,
+ FILE_READANDPATHDISC = 0x2,
+ FILE_WEBPERM = 0x3
+ };
+
+#ifdef FEATURE_FUSION
+ static void DemandFileIOPermission(LPCWSTR wszCodeBase,
+ BOOL fHavePath,
+ DWORD dwDemandFlag);
+ void DemandFileIOPermission(PEAssembly *pFile);
+#endif
+
+
+#ifndef FEATURE_FUSION
+ VOID Bind(
+ AppDomain* pAppDomain,
+ BOOL fThrowOnFileNotFound,
+ CoreBindResult* pBindResult,
+ BOOL fNgenExplicitBind = FALSE,
+ BOOL fExplicitBindToNativeImage = FALSE,
+ StackCrawlMark *pCallerStackMark = NULL );
+#ifndef FEATURE_CORECLR
+ static VOID BindToSystem(BINDER_SPACE::Assembly** ppAssembly);
+#endif
+#endif
+
+ Assembly *LoadAssembly(FileLoadLevel targetLevel,
+ AssemblyLoadSecurity *pLoadSecurity = NULL,
+ BOOL fThrowOnFileNotFound = TRUE,
+ BOOL fRaisePrebindEvents = TRUE,
+ StackCrawlMark *pCallerStackMark = NULL);
+ DomainAssembly *LoadDomainAssembly(FileLoadLevel targetLevel,
+ AssemblyLoadSecurity *pLoadSecurity = NULL,
+ BOOL fThrowOnFileNotFound = TRUE,
+ BOOL fRaisePrebindEvents = TRUE,
+ StackCrawlMark *pCallerStackMark = NULL);
+
+ //****************************************************************************************
+ //
+ // Creates and loads an assembly based on the name and context.
+ static Assembly *LoadAssembly(LPCSTR pSimpleName,
+ AssemblyMetaDataInternal* pContext,
+ const BYTE * pbPublicKeyOrToken,
+ DWORD cbPublicKeyOrToken,
+ DWORD dwFlags);
+
+#ifdef FEATURE_FUSION
+ //****************************************************************************************
+ //
+ HRESULT LoadAssembly(IApplicationContext *pFusionContext,
+ FusionSink *pSink,
+ IAssembly** ppIAssembly,
+ IHostAssembly** ppIHostAssembly,
+ IBindResult **ppNativeFusionAssembly,
+ BOOL fForIntrospectionOnly,
+ BOOL fSuppressSecurityChecks);
+#endif
+
+ // Load an assembly based on an explicit path
+ static Assembly *LoadAssembly(LPCWSTR pFilePath);
+
+#ifdef FEATURE_FUSION
+ BOOL FindAssemblyFile(AppDomain *pAppDomain, BOOL fThrowOnFileNotFound,
+ IAssembly** ppIAssembly, IHostAssembly **ppIHostAssembly, IBindResult** pNativeFusionAssembly,
+ IFusionBindLog **ppFusionLog, HRESULT *pHRBindResult, StackCrawlMark *pCallerStackMark = NULL,
+ AssemblyLoadSecurity *pLoadSecurity = NULL);
+#endif // FEATURE_FUSION
+
+ private:
+ void MatchRetargetedPublicKeys(Assembly *pAssembly);
+ public:
+ void MatchPublicKeys(Assembly *pAssembly);
+ PEAssembly *ResolveAssemblyFile(AppDomain *pAppDomain, BOOL fPreBind);
+
+ AppDomain *GetAppDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pAppDomain;
+ }
+
+ HRESULT SetHashForControl(PBYTE pHashForControl, DWORD dwHashForControl, DWORD dwHashAlg)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pHashForControl));
+ }
+ CONTRACTL_END;
+
+ m_HashForControl.Set(pHashForControl, dwHashForControl);
+ m_dwHashAlg=dwHashAlg;
+ return S_OK;
+ }
+
+ void ParseEncodedName();
+
+ void SetWindowsRuntimeType(LPCUTF8 szNamespace, LPCUTF8 szClassName);
+ void SetWindowsRuntimeType(SString const & _ssTypeName);
+
+ inline HRESULT SetContentType(AssemblyContentType type)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (type == AssemblyContentType_Default)
+ {
+ m_dwFlags = (m_dwFlags & ~afContentType_Mask) | afContentType_Default;
+ return S_OK;
+ }
+ else if (type == AssemblyContentType_WindowsRuntime)
+ {
+ m_dwFlags = (m_dwFlags & ~afContentType_Mask) | afContentType_WindowsRuntime;
+ return S_OK;
+ }
+ else
+ {
+ _ASSERTE(!"Unexpected content type.");
+ return E_UNEXPECTED;
+ }
+ }
+
+ // Returns true if the object can be used to bind to the target assembly.
+ // One case in which this is not true is when the content type is WinRT
+ // but no type name has been set.
+ inline bool HasBindableIdentity() const
+ {
+ STATIC_CONTRACT_LIMITED_METHOD;
+#ifdef FEATURE_COMINTEROP
+ return (HasUniqueIdentity() ||
+ (IsContentType_WindowsRuntime() && (GetWinRtTypeClassName() != NULL)));
+#else
+ return TRUE;
+#endif
+ }
+
+ inline BOOL CanUseWithBindingCache() const
+ {
+ STATIC_CONTRACT_LIMITED_METHOD;
+#if defined(FEATURE_HOSTED_BINDER) && defined(FEATURE_APPX_BINDER)
+ return (GetHostBinder() == nullptr) && HasUniqueIdentity();
+#else
+ return HasUniqueIdentity();
+#endif
+ }
+
+#ifdef FEATURE_HOSTED_BINDER
+ inline ICLRPrivBinder *GetHostBinder() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pHostBinder;
+ }
+
+ inline void SetHostBinder(ICLRPrivBinder *pHostBinder)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pHostBinder = pHostBinder;
+ }
+#endif
+
+};
+
+#define INITIAL_ASM_SPEC_HASH_SIZE 7
+class AssemblySpecHash
+{
+ LoaderHeap *m_pHeap;
+ PtrHashMap m_map;
+
+ public:
+
+#ifndef DACCESS_COMPILE
+ AssemblySpecHash(LoaderHeap *pHeap = NULL)
+ : m_pHeap(pHeap)
+ {
+ CONTRACTL
+ {
+ CONSTRUCTOR_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_map.Init(INITIAL_ASM_SPEC_HASH_SIZE, CompareSpecs, FALSE, NULL);
+ }
+
+ ~AssemblySpecHash();
+#endif
+
+#ifndef DACCESS_COMPILE
+ //
+ // Returns TRUE if the spec was already in the table
+ //
+
+ BOOL Store(AssemblySpec *pSpec, AssemblySpec **ppStoredSpec = NULL)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ DWORD key = pSpec->Hash();
+
+ AssemblySpec *entry = (AssemblySpec *) m_map.LookupValue(key, pSpec);
+
+ if (entry == (AssemblySpec*) INVALIDENTRY)
+ {
+ if (m_pHeap != NULL)
+ entry = new (m_pHeap->AllocMem(S_SIZE_T(sizeof(AssemblySpec)))) AssemblySpec;
+ else
+ entry = new AssemblySpec;
+
+ GCX_PREEMP();
+ entry->CopyFrom(pSpec);
+ entry->CloneFields(AssemblySpec::ALL_OWNED);
+
+ m_map.InsertValue(key, entry);
+
+ if (ppStoredSpec != NULL)
+ *ppStoredSpec = entry;
+
+ return FALSE;
+ }
+ else
+ {
+ if (ppStoredSpec != NULL)
+ *ppStoredSpec = entry;
+ return TRUE;
+ }
+ }
+#endif // DACCESS_COMPILE
+
+ DWORD Hash(AssemblySpec *pSpec)
+ {
+ WRAPPER_NO_CONTRACT;
+ return pSpec->Hash();
+ }
+
+ static BOOL CompareSpecs(UPTR u1, UPTR u2);
+};
+
+
+class AssemblySpecBindingCache
+{
+ friend class AssemblyBindingHolder;
+ struct AssemblyBinding
+ {
+ public:
+ ~AssemblyBinding()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (m_pFile != NULL)
+ m_pFile->Release();
+
+ if (m_exceptionType==EXTYPE_EE)
+ delete m_pException;
+ };
+
+ void OnAppDomainUnload()
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (m_exceptionType == EXTYPE_EE)
+ {
+ m_exceptionType = EXTYPE_NONE;
+ delete m_pException;
+ m_pException = NULL;
+ }
+ };
+
+ inline DomainAssembly* GetAssembly(){ LIMITED_METHOD_CONTRACT; return m_pAssembly;};
+ inline void SetAssembly(DomainAssembly* pAssembly){ LIMITED_METHOD_CONTRACT; m_pAssembly=pAssembly;};
+ inline PEAssembly* GetFile(){ LIMITED_METHOD_CONTRACT; return m_pFile;};
+ inline BOOL IsError(){ LIMITED_METHOD_CONTRACT; return (m_exceptionType!=EXTYPE_NONE);};
+
+ // bound to the file, but failed later
+ inline BOOL IsPostBindError(){ LIMITED_METHOD_CONTRACT; return IsError() && GetFile()!=NULL;};
+
+ inline void ThrowIfError()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ switch(m_exceptionType)
+ {
+ case EXTYPE_NONE: return;
+ case EXTYPE_HR: ThrowHR(m_hr);
+ case EXTYPE_EE: PAL_CPP_THROW(Exception *, m_pException->DomainBoundClone());
+ default: _ASSERTE(!"Unexpected exception type");
+ }
+ };
+ inline void Init(AssemblySpec* pSpec, PEAssembly* pFile, DomainAssembly* pAssembly, Exception* pEx, LoaderHeap *pHeap, AllocMemTracker *pamTracker)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ InitInternal(pSpec,pFile,pAssembly);
+ if (pHeap != NULL)
+ {
+ m_spec.CloneFieldsToLoaderHeap(AssemblySpec::ALL_OWNED,pHeap, pamTracker);
+ }
+ else
+ {
+ m_spec.CloneFields(m_spec.ALL_OWNED);
+ }
+ InitException(pEx);
+
+ }
+
+ inline HRESULT GetHR()
+ {
+ LIMITED_METHOD_CONTRACT;
+ switch(m_exceptionType)
+ {
+ case EXTYPE_NONE: return S_OK;
+ case EXTYPE_HR: return m_hr;
+ case EXTYPE_EE: return m_pException->GetHR();
+ default: _ASSERTE(!"Unexpected exception type");
+ }
+ return E_UNEXPECTED;
+ };
+
+ inline void InitException(Exception* pEx)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_exceptionType==EXTYPE_NONE);
+
+ if (pEx==NULL)
+ return;
+
+ _ASSERTE(!pEx->IsTransient());
+
+ EX_TRY
+ {
+ m_pException = pEx->DomainBoundClone();
+ _ASSERTE(m_pException);
+ m_exceptionType=EXTYPE_EE;
+ }
+ EX_CATCH
+ {
+ InitException(pEx->GetHR());
+ }
+ EX_END_CATCH(RethrowTransientExceptions);
+
+ };
+
+ inline void InitException(HRESULT hr)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_exceptionType==EXTYPE_NONE);
+ if (FAILED(hr))
+ {
+ m_exceptionType=EXTYPE_HR;
+ m_hr=hr;
+ }
+ };
+ protected:
+
+ inline void InitInternal(AssemblySpec* pSpec, PEAssembly* pFile, DomainAssembly* pAssembly )
+ {
+ WRAPPER_NO_CONTRACT;
+ m_spec.CopyFrom(pSpec);
+ m_pFile = pFile;
+ if (m_pFile)
+ m_pFile->AddRef();
+ m_pAssembly = pAssembly;
+ m_exceptionType=EXTYPE_NONE;
+ }
+
+ AssemblySpec m_spec;
+ PEAssembly *m_pFile;
+ DomainAssembly *m_pAssembly;
+ enum{
+ EXTYPE_NONE = 0x00000000,
+ EXTYPE_HR = 0x00000001,
+ EXTYPE_EE = 0x00000002,
+ };
+ INT m_exceptionType;
+ union
+ {
+ HRESULT m_hr;
+ Exception* m_pException;
+ };
+ };
+
+ PtrHashMap m_map;
+ LoaderHeap *m_pHeap;
+
+#if defined(FEATURE_CORECLR)
+ AssemblySpecBindingCache::AssemblyBinding* GetAssemblyBindingEntryForAssemblySpec(AssemblySpec* pSpec, BOOL fThrow);
+#endif // defined(FEATURE_CORECLR)
+
+ public:
+
+ AssemblySpecBindingCache() DAC_EMPTY();
+ ~AssemblySpecBindingCache() DAC_EMPTY();
+
+ void Init(CrstBase *pCrst, LoaderHeap *pHeap = NULL);
+ void Clear();
+
+ void OnAppDomainUnload();
+
+ BOOL Contains(AssemblySpec *pSpec);
+
+ DomainAssembly *LookupAssembly(AssemblySpec *pSpec, BOOL fThrow=TRUE);
+ PEAssembly *LookupFile(AssemblySpec *pSpec, BOOL fThrow = TRUE);
+
+ BOOL StoreAssembly(AssemblySpec *pSpec, DomainAssembly *pAssembly);
+ BOOL StoreFile(AssemblySpec *pSpec, PEAssembly *pFile);
+
+ BOOL StoreException(AssemblySpec *pSpec, Exception* pEx);
+
+ DWORD Hash(AssemblySpec *pSpec)
+ {
+ WRAPPER_NO_CONTRACT;
+ return pSpec->Hash();
+ }
+
+ static BOOL CompareSpecs(UPTR u1, UPTR u2);
+};
+
+#define INITIAL_DOMAIN_ASSEMBLY_CACHE_SIZE 17
+class DomainAssemblyCache
+{
+ struct AssemblyEntry {
+ AssemblySpec spec;
+ LPVOID pData[2]; // Can be an Assembly, PEAssembly, or an Unmanaged DLL
+
+ DWORD Hash()
+ {
+ WRAPPER_NO_CONTRACT;
+ return spec.Hash();
+ }
+ };
+
+ PtrHashMap m_Table;
+ AppDomain* m_pDomain;
+
+public:
+
+ static BOOL CompareBindingSpec(UPTR spec1, UPTR spec2);
+
+ void InitializeTable(AppDomain* pDomain, CrstBase *pCrst)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(pDomain);
+ m_pDomain = pDomain;
+
+ LockOwner lock = {pCrst, IsOwnerOfCrst};
+ m_Table.Init(INITIAL_DOMAIN_ASSEMBLY_CACHE_SIZE, &CompareBindingSpec, true, &lock);
+ }
+
+ AssemblyEntry* LookupEntry(AssemblySpec* pSpec);
+
+ LPVOID LookupEntry(AssemblySpec* pSpec, UINT index)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(index < 2);
+ AssemblyEntry* ptr = LookupEntry(pSpec);
+ if(ptr == NULL)
+ return NULL;
+ else
+ return ptr->pData[index];
+ }
+
+ VOID InsertEntry(AssemblySpec* pSpec, LPVOID pData1, LPVOID pData2 = NULL);
+
+private:
+
+};
+
+#endif
diff --git a/src/vm/assemblyspecbase.h b/src/vm/assemblyspecbase.h
new file mode 100644
index 0000000000..8accfa1144
--- /dev/null
+++ b/src/vm/assemblyspecbase.h
@@ -0,0 +1,29 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ============================================================
+//
+// AssemblySpecBase.h
+//
+
+
+//
+// Chooses the appropriate implementation to base AssemblySpec on
+//
+// ============================================================
+
+
+#ifndef __ASSEMBLY_SPEC_BASE_H__
+#define __ASSEMBLY_SPEC_BASE_H__
+
+#ifndef FEATURE_FUSION
+#include "coreclr/corebindresult.h"
+#include "coreclr/corebindresult.inl"
+#include "../binder/inc/assembly.hpp"
+#endif // FEATURE_FUSION
+
+#include "baseassemblyspec.h"
+#include "baseassemblyspec.inl"
+
+#endif // __ASSEMBLY_SPEC_BASE_H__
diff --git a/src/vm/baseassemblyspec.cpp b/src/vm/baseassemblyspec.cpp
new file mode 100644
index 0000000000..f0e8da548d
--- /dev/null
+++ b/src/vm/baseassemblyspec.cpp
@@ -0,0 +1,750 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ============================================================
+//
+// BaseAssemblySpec.cpp
+//
+// Implements the BaseAssemblySpec class
+//
+
+
+// ============================================================
+
+#include "common.h"
+#include "thekey.h"
+
+VOID BaseAssemblySpec::CloneFieldsToStackingAllocator( StackingAllocator* alloc)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(ThrowOutOfMemory(););
+ }
+ CONTRACTL_END
+
+#if _DEBUG
+ DWORD hash = Hash();
+#endif
+
+ if ((~m_ownedFlags & NAME_OWNED) &&
+ m_pAssemblyName) {
+ S_UINT32 len = S_UINT32((DWORD) strlen(m_pAssemblyName)) + S_UINT32(1);
+ if(len.IsOverflow()) COMPlusThrowHR(COR_E_OVERFLOW);
+ LPSTR temp = (LPSTR)alloc->Alloc(len);
+ strcpy_s(temp, len.Value(), m_pAssemblyName);
+ m_pAssemblyName = temp;
+ }
+
+ if ((~m_ownedFlags & PUBLIC_KEY_OR_TOKEN_OWNED) &&
+ m_pbPublicKeyOrToken && m_cbPublicKeyOrToken > 0) {
+ BYTE *temp = (BYTE *)alloc->Alloc(S_UINT32(m_cbPublicKeyOrToken)) ;
+ memcpy(temp, m_pbPublicKeyOrToken, m_cbPublicKeyOrToken);
+ m_pbPublicKeyOrToken = temp;
+ }
+
+ if ((~m_ownedFlags & LOCALE_OWNED) &&
+ m_context.szLocale) {
+ S_UINT32 len = S_UINT32((DWORD) strlen(m_context.szLocale)) + S_UINT32(1);
+ if(len.IsOverflow()) COMPlusThrowHR(COR_E_OVERFLOW);
+ LPSTR temp = (char *)alloc->Alloc(len) ;
+ strcpy_s(temp, len.Value(), m_context.szLocale);
+ m_context.szLocale = temp;
+ }
+
+ if ((~m_ownedFlags & CODEBASE_OWNED) &&
+ m_wszCodeBase) {
+ S_UINT32 len = S_UINT32((DWORD) wcslen(m_wszCodeBase)) + S_UINT32(1);
+ if(len.IsOverflow()) COMPlusThrowHR(COR_E_OVERFLOW);
+ LPWSTR temp = (LPWSTR)alloc->Alloc(len*S_UINT32(sizeof(WCHAR)));
+ wcscpy_s(temp, len.Value(), m_wszCodeBase);
+ m_wszCodeBase = temp;
+ }
+
+ if ((~m_ownedFlags & WINRT_TYPE_NAME_OWNED)) {
+ if (m_szWinRtTypeNamespace)
+ {
+ S_UINT32 len = S_UINT32((DWORD) strlen(m_szWinRtTypeNamespace)) + S_UINT32(1);
+ if(len.IsOverflow()) COMPlusThrowHR(COR_E_OVERFLOW);
+ LPSTR temp = (LPSTR)alloc->Alloc(len*S_UINT32(sizeof(CHAR)));
+ strcpy_s(temp, len.Value(), m_szWinRtTypeNamespace);
+ m_szWinRtTypeNamespace = temp;
+ }
+
+ if (m_szWinRtTypeClassName)
+ {
+ S_UINT32 len = S_UINT32((DWORD) strlen(m_szWinRtTypeClassName)) + S_UINT32(1);
+ if(len.IsOverflow()) COMPlusThrowHR(COR_E_OVERFLOW);
+ LPSTR temp = (LPSTR)alloc->Alloc(len*S_UINT32(sizeof(CHAR)));
+ strcpy_s(temp, len.Value(), m_szWinRtTypeClassName);
+ m_szWinRtTypeClassName = temp;
+ }
+ }
+
+ _ASSERTE(hash == Hash());
+
+}
+
+#ifndef DACCESS_COMPILE
+BOOL BaseAssemblySpec::IsMscorlib()
+{
+ CONTRACTL
+ {
+ THROWS;
+ INSTANCE_CHECK;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+ if (m_pAssemblyName == NULL)
+ {
+ LPCWSTR file = GetCodeBase();
+ if (file)
+ {
+ StackSString path(file);
+ PEAssembly::UrlToPath(path);
+ return SystemDomain::System()->IsBaseLibrary(path);
+ }
+ return FALSE;
+ }
+
+ _ASSERTE(strlen(g_psBaseLibraryName) == 8);
+
+ // <TODO>More of bug 213471</TODO>
+ size_t iNameLen = strlen(m_pAssemblyName);
+ return ( (iNameLen >= 8) &&
+ ( (!stricmpUTF8(m_pAssemblyName, g_psBaseLibrary)) ||
+ ( (!SString::_strnicmp(m_pAssemblyName, g_psBaseLibraryName, 8)) &&
+ ( (iNameLen == 8) || (m_pAssemblyName[8] == ',') ) ) ) );
+}
+
+#ifdef FEATURE_CORECLR
+BOOL BaseAssemblySpec::IsAssemblySpecForMscorlib()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ INSTANCE_CHECK;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(strlen(g_psBaseLibraryName) == 8);
+ }
+ CONTRACTL_END;
+
+ BOOL fIsAssemblySpecForMscorlib = FALSE;
+
+ if (m_pAssemblyName)
+ {
+ size_t iNameLen = strlen(m_pAssemblyName);
+ fIsAssemblySpecForMscorlib = ( (iNameLen >= 8) &&
+ ( (!_stricmp(m_pAssemblyName, g_psBaseLibrary)) ||
+ ( (!_strnicmp(m_pAssemblyName, g_psBaseLibraryName, 8)) &&
+ ( (iNameLen == 8) || (m_pAssemblyName[8] == ',') ) ) ) );
+ }
+
+ return fIsAssemblySpecForMscorlib;
+}
+
+#define MSCORLIB_PUBLICKEY g_rbTheSilverlightPlatformKey
+#else
+#define MSCORLIB_PUBLICKEY g_rbNeutralPublicKey
+#endif
+
+
+// A satellite assembly for mscorlib is named "mscorlib.resources" or
+// mscorlib.debug.resources.dll and uses the same public key as mscorlib.
+// It does not necessarily have the same version, and the Culture will
+// always be set to something like "jp-JP".
+BOOL BaseAssemblySpec::IsMscorlibSatellite()
+{
+ CONTRACTL
+ {
+ THROWS;
+ INSTANCE_CHECK;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (m_pAssemblyName == NULL)
+ {
+ LPCWSTR file = GetCodeBase();
+ if (file)
+ {
+ StackSString path(file);
+ PEAssembly::UrlToPath(path);
+ return SystemDomain::System()->IsBaseLibrarySatellite(path);
+ }
+ return FALSE;
+ }
+
+ _ASSERTE(strlen(g_psBaseLibrarySatelliteAssemblyName) == 18);
+
+ // <TODO>More of bug 213471</TODO>
+ size_t iNameLen = strlen(m_pAssemblyName);
+
+ // we allow name to be of the form mscorlib.resources.dll only
+ BOOL r = ( (m_cbPublicKeyOrToken == sizeof(MSCORLIB_PUBLICKEY)) &&
+ (iNameLen >= 18) &&
+ (!SString::_strnicmp(m_pAssemblyName, g_psBaseLibrarySatelliteAssemblyName, 18)) &&
+ ( (iNameLen == 18) || (m_pAssemblyName[18] == ',') ) );
+
+ r = r && ( memcmp(m_pbPublicKeyOrToken,MSCORLIB_PUBLICKEY,sizeof(MSCORLIB_PUBLICKEY)) == 0);
+
+ return r;
+}
+
+VOID BaseAssemblySpec::ConvertPublicKeyToToken()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(HasPublicKey());
+ }
+ CONTRACTL_END;
+
+ StrongNameBufferHolder<BYTE> pbPublicKeyToken;
+ DWORD cbPublicKeyToken;
+ if (!StrongNameTokenFromPublicKey(m_pbPublicKeyOrToken,
+ m_cbPublicKeyOrToken,
+ &pbPublicKeyToken,
+ &cbPublicKeyToken))
+ ThrowHR(StrongNameErrorInfo());
+
+ BYTE *temp = new BYTE [cbPublicKeyToken];
+ memcpy(temp, pbPublicKeyToken, cbPublicKeyToken);
+
+ if (m_ownedFlags & PUBLIC_KEY_OR_TOKEN_OWNED)
+ delete [] m_pbPublicKeyOrToken;
+ else
+ m_ownedFlags |= PUBLIC_KEY_OR_TOKEN_OWNED;
+
+ m_pbPublicKeyOrToken = temp;
+ m_cbPublicKeyOrToken = cbPublicKeyToken;
+ m_dwFlags &= ~afPublicKey;
+}
+
+#ifndef FEATURE_FUSION
+// Similar to BaseAssemblySpec::CompareEx, but allows the ref to be partially specified
+// Returns TRUE if ref matches def, FALSE otherwise.
+//
+// static
+BOOL BaseAssemblySpec::CompareRefToDef(const BaseAssemblySpec *pRef, const BaseAssemblySpec *pDef)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if(pRef->m_wszCodeBase || pDef->m_wszCodeBase)
+ {
+ if(!pRef->m_wszCodeBase || !pDef->m_wszCodeBase)
+ return FALSE;
+
+ return wcscmp(pRef->m_wszCodeBase,(pDef->m_wszCodeBase)) == 0;
+ }
+
+ // Compare fields
+
+ //
+ // name is non-optional
+ //
+ if (pRef->m_pAssemblyName != pDef->m_pAssemblyName
+ && (pRef->m_pAssemblyName == NULL || pDef->m_pAssemblyName == NULL
+ || CompareStrings(pRef->m_pAssemblyName, pDef->m_pAssemblyName)))
+ {
+ return FALSE;
+ }
+
+ //
+ // public key [token] is non-optional
+ //
+ if (pRef->m_cbPublicKeyOrToken != pDef->m_cbPublicKeyOrToken
+ || memcmp(pRef->m_pbPublicKeyOrToken, pDef->m_pbPublicKeyOrToken, pRef->m_cbPublicKeyOrToken))
+ {
+ return FALSE;
+ }
+
+ //
+ // flags are non-optional, except processor architecture and content type
+ //
+ DWORD dwFlagsMask = ~(afPA_FullMask | afContentType_Mask);
+ if ((pRef->m_dwFlags & dwFlagsMask) != (pDef->m_dwFlags & dwFlagsMask))
+ return FALSE;
+
+ // To match Fusion behavior, we ignore processor architecture (GetAssemblyNameRefFromMDImport
+ // does not look at architecture part of the flags, and having processor architecture in
+ // InternalsVisibleTo attribute causess META_E_CA_BAD_FRIENDS_ARGS exception).
+ // Content type is optional in pRef.
+ if (!IsAfContentType_Default(pRef->m_dwFlags) && (pRef->m_dwFlags & afContentType_Mask) != (pDef->m_dwFlags & afContentType_Mask))
+ return FALSE;
+
+
+ //
+ // version info is optional in the ref
+ //
+ if (pRef->m_context.usMajorVersion != (USHORT) -1)
+ {
+ if (pRef->m_context.usMajorVersion != pDef->m_context.usMajorVersion)
+ return FALSE;
+
+ if (pRef->m_context.usMinorVersion != (USHORT) -1)
+ {
+ if (pRef->m_context.usMinorVersion != pDef->m_context.usMinorVersion)
+ return FALSE;
+
+ if (pRef->m_context.usBuildNumber != (USHORT) -1)
+ {
+ if (pRef->m_context.usBuildNumber != pDef->m_context.usBuildNumber)
+ return FALSE;
+
+ if (pRef->m_context.usRevisionNumber != (USHORT) -1)
+ {
+ if (pRef->m_context.usRevisionNumber != pDef->m_context.usRevisionNumber)
+ return FALSE;
+ }
+ }
+ }
+ }
+
+ //
+ // locale info is optional in the ref
+ //
+ if ((pRef->m_context.szLocale != NULL)
+ && (pRef->m_context.szLocale != pDef->m_context.szLocale)
+ && strcmp(pRef->m_context.szLocale, pDef->m_context.szLocale))
+ {
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+// static
+BOOL BaseAssemblySpec::RefMatchesDef(const BaseAssemblySpec* pRef, const BaseAssemblySpec* pDef)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(pRef->GetName()!=NULL && pDef->GetName()!=NULL);
+ }
+ CONTRACTL_END;
+
+ if (pRef->IsStrongNamed())
+ {
+ if (!pDef->IsStrongNamed())
+ return FALSE;
+
+ if(pRef->HasPublicKey())
+ {
+ // cannot use pRef->CompareEx(pDef) here because it does a full comparison
+ // and the ref may be partial.
+ return CompareRefToDef(pRef, pDef);
+ }
+ else
+ {
+ BaseAssemblySpec defCopy;
+ defCopy.CopyFrom(pDef);
+ defCopy.ConvertPublicKeyToToken();
+
+ return CompareRefToDef(pRef, &defCopy);
+ }
+ }
+ else
+ {
+ return (CompareStrings(pRef->GetName(), pDef->GetName())==0);
+ }
+}
+#endif // FEATURE_FUSION
+
+//===========================================================================================
+// This function may embed additional information, if required.
+//
+// For WinRT (ContentType=WindowsRuntime) assembly specs, this will embed the type name in
+// the IAssemblyName's ASM_NAME_NAME property; otherwise this just creates an IAssemblyName
+// for the provided assembly spec.
+
+void BaseAssemblySpec::GetEncodedName(SString & ssEncodedName) const
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END
+
+#ifdef FEATURE_COMINTEROP
+ if (IsContentType_WindowsRuntime() && GetWinRtTypeClassName() != NULL)
+ {
+ ssEncodedName.SetUTF8(GetName());
+ ssEncodedName.Append(SL(W("!")));
+ if (GetWinRtTypeNamespace() != NULL)
+ {
+ ssEncodedName.AppendUTF8(GetWinRtTypeNamespace());
+ ssEncodedName.Append(SL(W(".")));
+ }
+ ssEncodedName.AppendUTF8(GetWinRtTypeClassName());
+ }
+ else
+#endif
+ {
+ ssEncodedName.SetUTF8(m_pAssemblyName);
+ }
+}
+
+VOID BaseAssemblySpec::SetName(SString const & ssName)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ GC_NOTRIGGER;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ if (m_ownedFlags & NAME_OWNED)
+ {
+ delete [] m_pAssemblyName;
+ m_ownedFlags &= ~NAME_OWNED;
+ }
+
+ m_pAssemblyName = NULL;
+
+ IfFailThrow(FString::ConvertUnicode_Utf8(ssName.GetUnicode(), & ((LPSTR &) m_pAssemblyName)));
+
+ m_ownedFlags |= NAME_OWNED;
+}
+
+HRESULT BaseAssemblySpec::Init(IAssemblyName *pName)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pName);
+
+ HRESULT hr;
+
+ // Fill out info from name, if we have it.
+
+ DWORD cbSize = 0;
+ hr=pName->GetProperty(ASM_NAME_NAME, NULL, &cbSize);
+ if (hr == HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER)) {
+ hr=S_OK;
+ CQuickBytes qb;
+ LPWSTR pwName = (LPWSTR) qb.AllocNoThrow(cbSize);
+ if (!pwName)
+ return E_OUTOFMEMORY;
+
+ IfFailRet(pName->GetProperty(ASM_NAME_NAME, pwName, &cbSize));
+
+ m_pAssemblyName = NULL;
+
+ hr = FString::ConvertUnicode_Utf8(pwName, & ((LPSTR &) m_pAssemblyName));
+
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ m_ownedFlags |= NAME_OWNED;
+ }
+ IfFailRet(hr);
+
+ // Note: cascade checks so we don't set lower priority version #'s if higher ones are missing
+ cbSize = sizeof(m_context.usMajorVersion);
+ hr=pName->GetProperty(ASM_NAME_MAJOR_VERSION, &m_context.usMajorVersion, &cbSize);
+
+ if (hr!=S_OK || !cbSize)
+ m_context.usMajorVersion = (USHORT) -1;
+ else {
+ cbSize = sizeof(m_context.usMinorVersion);
+ hr=pName->GetProperty(ASM_NAME_MINOR_VERSION, &m_context.usMinorVersion, &cbSize);
+ }
+
+ if (hr!=S_OK || !cbSize)
+ m_context.usMinorVersion = (USHORT) -1;
+ else {
+ cbSize = sizeof(m_context.usBuildNumber);
+ pName->GetProperty(ASM_NAME_BUILD_NUMBER, &m_context.usBuildNumber, &cbSize);
+ }
+
+ if (hr!=S_OK || !cbSize)
+ m_context.usBuildNumber = (USHORT) -1;
+ else {
+ cbSize = sizeof(m_context.usRevisionNumber);
+ pName->GetProperty(ASM_NAME_REVISION_NUMBER, &m_context.usRevisionNumber, &cbSize);
+ }
+
+ if (hr!=S_OK || !cbSize)
+ m_context.usRevisionNumber = (USHORT) -1;
+
+ if (hr==E_INVALIDARG)
+ hr=S_FALSE;
+
+ IfFailRet(hr);
+
+ cbSize = 0;
+ hr = pName->GetProperty(ASM_NAME_CULTURE, NULL, &cbSize);
+
+ if (hr == HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER)) {
+ LPWSTR pwName = (LPWSTR) alloca(cbSize);
+ IfFailRet(pName->GetProperty(ASM_NAME_CULTURE, pwName, &cbSize));
+
+ hr = FString::ConvertUnicode_Utf8(pwName, & ((LPSTR &) m_context.szLocale));
+
+ m_ownedFlags |= LOCALE_OWNED;
+ }
+
+ IfFailRet(hr);
+
+ m_dwFlags = 0;
+
+ cbSize = 0;
+ hr=pName->GetProperty(ASM_NAME_PUBLIC_KEY_TOKEN, NULL, &cbSize);
+ if (hr== HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER)) {
+ m_pbPublicKeyOrToken = new (nothrow) BYTE[cbSize];
+ if (m_pbPublicKeyOrToken == NULL)
+ return E_OUTOFMEMORY;
+ m_cbPublicKeyOrToken = cbSize;
+ m_ownedFlags |= PUBLIC_KEY_OR_TOKEN_OWNED;
+ IfFailRet(pName->GetProperty(ASM_NAME_PUBLIC_KEY_TOKEN, m_pbPublicKeyOrToken, &cbSize));
+ }
+ else {
+ if (hr!=E_INVALIDARG)
+ IfFailRet(hr);
+ hr=pName->GetProperty(ASM_NAME_PUBLIC_KEY, NULL, &cbSize);
+ if (hr == HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER)) {
+ hr=S_OK;
+ // <TODO>@todo: we need to normalize this into a public key token so
+ // comparisons work correctly. But this involves binding to mscorsn.</TODO>
+ m_pbPublicKeyOrToken = new (nothrow) BYTE[cbSize];
+ if (m_pbPublicKeyOrToken == NULL)
+ return E_OUTOFMEMORY;
+ m_cbPublicKeyOrToken = cbSize;
+ m_dwFlags |= afPublicKey;
+ m_ownedFlags |= PUBLIC_KEY_OR_TOKEN_OWNED;
+ IfFailRet(pName->GetProperty(ASM_NAME_PUBLIC_KEY, m_pbPublicKeyOrToken, &cbSize));
+ }
+ else {
+ IfFailRet(hr);
+ hr= pName->GetProperty(ASM_NAME_NULL_PUBLIC_KEY, NULL, &cbSize);
+ if (hr!=S_OK)
+ hr=pName->GetProperty(ASM_NAME_NULL_PUBLIC_KEY_TOKEN, NULL, &cbSize);
+ if ( hr == S_OK ) {
+ m_pbPublicKeyOrToken = new (nothrow) BYTE[0];
+ if (m_pbPublicKeyOrToken == NULL)
+ return E_OUTOFMEMORY;
+ m_cbPublicKeyOrToken = 0;
+ m_ownedFlags |= PUBLIC_KEY_OR_TOKEN_OWNED;
+ }
+ if (hr==E_INVALIDARG)
+ hr=S_FALSE;
+ IfFailRet(hr);
+
+ }
+ }
+
+ // Recover the afRetargetable flag
+ BOOL bRetarget;
+ cbSize = sizeof(bRetarget);
+ hr = pName->GetProperty(ASM_NAME_RETARGET, &bRetarget, &cbSize);
+ if (hr == S_OK && cbSize != 0 && bRetarget)
+ m_dwFlags |= afRetargetable;
+
+ // Recover the Processor Architecture flags
+ PEKIND peKind;
+ cbSize = sizeof(PEKIND);
+ hr = pName->GetProperty(ASM_NAME_ARCHITECTURE, &peKind, &cbSize);
+ if ((hr == S_OK) && (cbSize != 0) && (peKind < (afPA_NoPlatform >> afPA_Shift)) && (peKind >= (afPA_MSIL >> afPA_Shift)))
+ m_dwFlags |= (((DWORD)peKind) << afPA_Shift);
+
+ cbSize = 0;
+ hr=pName->GetProperty(ASM_NAME_CODEBASE_URL, NULL, &cbSize);
+ if (hr == HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER)) {
+ m_wszCodeBase = new (nothrow) WCHAR [ cbSize/sizeof(WCHAR) ];
+ if (m_wszCodeBase == NULL)
+ return E_OUTOFMEMORY;
+ m_ownedFlags |= CODE_BASE_OWNED;
+ IfFailRet(pName->GetProperty(ASM_NAME_CODEBASE_URL,
+ (void*)m_wszCodeBase, &cbSize));
+ }
+ else
+ IfFailRet(hr);
+
+ // Recover the Content Type enum
+ DWORD dwContentType;
+ cbSize = sizeof(dwContentType);
+ hr = pName->GetProperty(ASM_NAME_CONTENT_TYPE, &dwContentType, &cbSize);
+ if ((hr == S_OK) && (cbSize == sizeof(dwContentType)))
+ {
+ _ASSERTE((dwContentType == AssemblyContentType_Default) || (dwContentType == AssemblyContentType_WindowsRuntime));
+ if (dwContentType == AssemblyContentType_WindowsRuntime)
+ {
+ m_dwFlags |= afContentType_WindowsRuntime;
+ }
+ }
+
+ return S_OK;
+}
+
+HRESULT BaseAssemblySpec::CreateFusionName(
+ IAssemblyName **ppName,
+ BOOL fIncludeCodeBase/*=TRUE*/,
+ BOOL fMustBeBindable /*=FALSE*/) const
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ IAssemblyName *pFusionAssemblyName = NULL;
+ LPWSTR pwLocale = NULL;
+ CQuickBytes qb;
+
+ NonVMComHolder< IAssemblyName > holder(NULL);
+
+ SmallStackSString ssAssemblyName;
+ fMustBeBindable ? GetEncodedName(ssAssemblyName) : GetName(ssAssemblyName);
+
+ IfFailGo(CreateAssemblyNameObject(&pFusionAssemblyName, ssAssemblyName.GetUnicode(), 0, NULL));
+
+ holder = pFusionAssemblyName;
+
+ if (m_context.usMajorVersion != (USHORT) -1) {
+ IfFailGo(pFusionAssemblyName->SetProperty(ASM_NAME_MAJOR_VERSION,
+ &m_context.usMajorVersion,
+ sizeof(USHORT)));
+
+ if (m_context.usMinorVersion != (USHORT) -1) {
+ IfFailGo(pFusionAssemblyName->SetProperty(ASM_NAME_MINOR_VERSION,
+ &m_context.usMinorVersion,
+ sizeof(USHORT)));
+
+ if (m_context.usBuildNumber != (USHORT) -1) {
+ IfFailGo(pFusionAssemblyName->SetProperty(ASM_NAME_BUILD_NUMBER,
+ &m_context.usBuildNumber,
+ sizeof(USHORT)));
+
+ if (m_context.usRevisionNumber != (USHORT) -1)
+ IfFailGo(pFusionAssemblyName->SetProperty(ASM_NAME_REVISION_NUMBER,
+ &m_context.usRevisionNumber,
+ sizeof(USHORT)));
+ }
+ }
+ }
+
+ if (m_context.szLocale) {
+ int pwLocaleLen = WszMultiByteToWideChar(CP_UTF8, 0, m_context.szLocale, -1, 0, 0);
+ if(pwLocaleLen == 0) {
+ IfFailGo(HRESULT_FROM_GetLastError());
+ } else if (pwLocaleLen > MAKE_MAX_LENGTH) {
+ IfFailGo(COR_E_OVERFLOW);
+ }
+ pwLocale = (LPWSTR) qb.AllocNoThrow((pwLocaleLen + 1) *sizeof(WCHAR));
+ if (!pwLocaleLen)
+ IfFailGo(E_OUTOFMEMORY);
+ if (!WszMultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS,
+ m_context.szLocale, -1, pwLocale, pwLocaleLen))
+ IfFailGo(HRESULT_FROM_GetLastError());
+ pwLocale[pwLocaleLen] = 0;
+
+ IfFailGo(pFusionAssemblyName->SetProperty(ASM_NAME_CULTURE,
+ pwLocale,
+ (DWORD)(wcslen(pwLocale) + 1) * sizeof (WCHAR)));
+ }
+
+ if (m_pbPublicKeyOrToken) {
+ if (m_cbPublicKeyOrToken) {
+ if(m_dwFlags & afPublicKey) {
+ IfFailGo(pFusionAssemblyName->SetProperty(ASM_NAME_PUBLIC_KEY,
+ m_pbPublicKeyOrToken, m_cbPublicKeyOrToken));
+ }
+ else {
+ IfFailGo(pFusionAssemblyName->SetProperty(ASM_NAME_PUBLIC_KEY_TOKEN,
+ m_pbPublicKeyOrToken, m_cbPublicKeyOrToken));
+ }
+ }
+ else {
+#ifdef FEATURE_FUSION
+ IfFailGo(pFusionAssemblyName->SetProperty(ASM_NAME_NULL_PUBLIC_KEY_TOKEN,
+ NULL, 0));
+#endif
+ }
+ }
+
+#ifdef FEATURE_FUSION
+ // See if the assembly[ref] is retargetable (ie, for a generic assembly).
+ if (IsAfRetargetable(m_dwFlags)) {
+ BOOL bTrue = TRUE;
+ IfFailGo(pFusionAssemblyName->SetProperty(ASM_NAME_RETARGET,
+ &bTrue, sizeof(bTrue)));
+ }
+#endif
+
+ // Set the Processor Architecture (if any)
+ {
+ DWORD dwPEkind = (DWORD)PAIndex(m_dwFlags);
+ // Note: Value 0x07 = code:afPA_NoPlatform falls through
+ if ((dwPEkind >= peMSIL) && (dwPEkind <= peARM))
+ {
+ PEKIND peKind = (PEKIND)dwPEkind;
+ IfFailGo(pFusionAssemblyName->SetProperty(ASM_NAME_ARCHITECTURE,
+ &peKind, sizeof(peKind)));
+ }
+ }
+
+ // Set the Content Type (if any)
+ {
+ if (IsAfContentType_WindowsRuntime(m_dwFlags))
+ {
+ DWORD dwContentType = AssemblyContentType_WindowsRuntime;
+ IfFailGo(pFusionAssemblyName->SetProperty(
+ ASM_NAME_CONTENT_TYPE,
+ &dwContentType,
+ sizeof(dwContentType)));
+ }
+ }
+
+#ifdef FEATURE_FUSION
+ if (fIncludeCodeBase && m_wszCodeBase) {
+ IfFailGo(pFusionAssemblyName->SetProperty(ASM_NAME_CODEBASE_URL,
+ (void*)m_wszCodeBase,
+ (DWORD)(wcslen(m_wszCodeBase)+1) * sizeof(WCHAR)));
+ }
+#else
+ _ASSERTE(m_wszCodeBase == NULL);
+#endif
+
+ *ppName = pFusionAssemblyName;
+
+ holder.SuppressRelease();
+ hr = S_OK;
+
+ ErrExit:
+ ;
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+}
+
+#endif // !DACCESS_COMPILE
diff --git a/src/vm/baseassemblyspec.h b/src/vm/baseassemblyspec.h
new file mode 100644
index 0000000000..0c232c6069
--- /dev/null
+++ b/src/vm/baseassemblyspec.h
@@ -0,0 +1,306 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ============================================================
+//
+// BaseAssemblySpec.h
+//
+
+
+//
+// Declares the BaseAssemblySpec class
+//
+// ============================================================
+
+#ifndef __BASE_ASSEMBLY_SPEC_H__
+#define __BASE_ASSEMBLY_SPEC_H__
+
+class StackingAllocator;
+
+// a class representing assembly name in Loader
+class BaseAssemblySpec
+{
+protected:
+ AssemblyMetaDataInternal m_context;
+ LPCSTR m_pAssemblyName;
+ PBYTE m_pbPublicKeyOrToken;
+ DWORD m_cbPublicKeyOrToken;
+ DWORD m_dwFlags; // CorAssemblyFlags
+ LPCWSTR m_wszCodeBase; // URL to the code
+#ifdef FEATURE_FUSION
+ LOADCTX_TYPE m_fParentLoadContext; // m_pParentAssembly->GetFusionLoadContext()
+ ReleaseHolder<IAssemblyName> m_pNameAfterPolicy;
+#endif
+ LPCSTR m_szWinRtTypeNamespace;
+ LPCSTR m_szWinRtTypeClassName;
+#ifdef FEATURE_HOSTED_BINDER
+ ICLRPrivBinder *m_pHostBinder;
+#endif
+ int m_ownedFlags;
+ BOOL m_fIntrospectionOnly;
+#if defined(FEATURE_CORECLR)
+ ICLRPrivBinder *m_pBindingContext;
+#endif // defined(FEATURE_CORECLR)
+
+public:
+ enum
+ {
+ NAME_OWNED = 0x01,
+ PUBLIC_KEY_OR_TOKEN_OWNED = 0x02,
+ CODE_BASE_OWNED = 0x04,
+ LOCALE_OWNED = 0x08,
+ CODEBASE_OWNED = 0x10,
+ WINRT_TYPE_NAME_OWNED = 0x20,
+ // Set if ParseName() returned illegal textual identity.
+ // Cannot process the string any further.
+ BAD_NAME_OWNED = 0x40,
+ ALL_OWNED = 0xFF,
+ };
+
+ BaseAssemblySpec();
+ ~BaseAssemblySpec();
+
+ HRESULT Init(LPCSTR pAssemblyName,
+ const AssemblyMetaDataInternal* pContext,
+ const BYTE * pbPublicKeyOrToken, DWORD cbPublicKeyOrToken,
+ DWORD dwFlags);
+
+ HRESULT Init(mdToken tkAssemblyRef, IMDInternalImport *pImport);
+ HRESULT Init(mdAssembly tkAssemblyRef, IMetaDataAssemblyImport* pImport);
+ HRESULT Init(LPCSTR pAssemblyDisplayName);
+
+ HRESULT Init(IAssemblyName *pName);
+
+ // Note that this method does not clone the fields!
+ VOID CopyFrom(const BaseAssemblySpec *pSpec);
+
+ VOID CloneFields(int flags=ALL_OWNED);
+ VOID CloneFieldsToLoaderHeap(int flags, LoaderHeap *pHeap, AllocMemTracker *pamTracker);
+ VOID CloneFieldsToStackingAllocator(StackingAllocator* alloc);
+
+#if defined(FEATURE_CORECLR)
+ inline void SetBindingContext(ICLRPrivBinder *pBindingContext)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_pBindingContext = pBindingContext;
+ }
+
+ inline ICLRPrivBinder* GetBindingContext()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pBindingContext;
+ }
+
+ BOOL IsAssemblySpecForMscorlib();
+#endif // defined(FEATURE_CORECLR)
+
+ HRESULT ParseName();
+ DWORD Hash();
+
+ LPCSTR GetName() const;
+ inline void GetName(SString & ssName) const { WRAPPER_NO_CONTRACT; ssName.SetUTF8(GetName()); }
+
+ void SetName(LPCSTR szName);
+ void SetName(SString const & ssName);
+
+ LPCWSTR GetCodeBase();
+ void SetCodeBase(LPCWSTR szCodeBase);
+
+ VOID SetCulture(LPCSTR szCulture);
+
+ VOID ConvertPublicKeyToToken();
+
+ void SetContext(ASSEMBLYMETADATA* assemblyData);
+
+ inline AssemblyMetaDataInternal *GetContext() { LIMITED_METHOD_CONTRACT; return &m_context; }
+ inline AssemblyMetaDataInternal const *GetContext() const { LIMITED_METHOD_CONTRACT; return &m_context; }
+
+ BOOL IsStrongNamed() const;
+ BOOL HasPublicKey() const;
+ BOOL HasPublicKeyToken() const;
+ BOOL IsMscorlibSatellite();
+ BOOL IsMscorlibDebugSatellite();
+ BOOL IsMscorlib();
+
+ //
+ // Windows Runtime functions that could not be refactored out to AssemblySpec
+ //
+ inline LPCSTR GetWinRtTypeNamespace() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_szWinRtTypeNamespace;
+ }
+ inline LPCSTR GetWinRtTypeClassName() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_szWinRtTypeClassName;
+ }
+
+ //****************************************************************************************
+ //
+ // Creates an IAssemblyName object representing this AssemblySpec.
+ //
+ // fMustBeBindable - if set to TRUE, the resulting IAssemblyName may contain internal
+ // encodings needed to make an identity bindable (this is the case
+ // for WinRT assemblies: a representative type name is encoded as
+ // part of the assembly simple name). Be careful to ensure that
+ // encoded identities are not exposed to customers.
+ HRESULT CreateFusionName(
+ IAssemblyName **ppName,
+ BOOL fIncludeCodeBase = TRUE, /* Used by fusion only */
+ BOOL fMustBeBindable = FALSE) const;
+
+#ifdef FEATURE_FUSION
+ // for fusion binding
+ virtual IAssembly* GetParentIAssembly() =0;
+
+ // for identity comparison
+ virtual LPCVOID GetParentAssemblyPtr() =0;
+
+ inline LOADCTX_TYPE GetParentLoadContext()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_fParentLoadContext;
+ }
+#endif
+
+ BOOL IsIntrospectionOnly()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // Important to ensure we return a normalized boolean (the introspection fields
+ // of different AssemblySpecs can be compared.)
+ return !!m_fIntrospectionOnly;
+ }
+
+ VOID SetIntrospectionOnly(BOOL fIntrospectionOnly)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_fIntrospectionOnly = !!fIntrospectionOnly;
+ }
+
+ inline BOOL IsContentType_WindowsRuntime() const
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifdef FEATURE_COMINTEROP
+ return IsAfContentType_WindowsRuntime(m_dwFlags);
+#else
+ return FALSE;
+#endif
+ }
+
+ void GetEncodedName(SString & ssEncodedName) const;
+
+ // Returns true if this object uniquely identifies a single assembly;
+ // false otherwise. This will return false for Windows Runtime assemblies,
+ // as WinRT assembly names do not represent an identity. This method
+ // does not take into account additional attributes such as type namespace
+ // and name.
+ inline BOOL HasUniqueIdentity() const
+ {
+ STATIC_CONTRACT_LIMITED_METHOD;
+ return !IsContentType_WindowsRuntime();
+ }
+
+ enum CompareExFlags
+ {
+ ASC_Default = 0x00, // Default comparison policy.
+ ASC_DefinitionEquality = 0x01, // Will not treat non-bindable content types as equivalent.
+ };
+
+ BOOL CompareEx(BaseAssemblySpec *pSpec, DWORD dwCompareFlags = ASC_Default);
+ static int CompareStrings(LPCUTF8 string1, LPCUTF8 string2);
+ static BOOL RefMatchesDef(const BaseAssemblySpec* pRef, const BaseAssemblySpec* pDef);
+ static BOOL VerifyBindingString(LPCWSTR pwStr);
+
+ void GetFileOrDisplayName(DWORD flags, SString &result) const;
+
+ inline void GetPublicKey(
+ PBYTE * ppbPublicKey,
+ DWORD * pcbPublicKey) const
+ {
+ LIMITED_METHOD_CONTRACT;
+ PRECONDITION(HasPublicKey());
+ if (ppbPublicKey != nullptr)
+ {
+ *ppbPublicKey = m_pbPublicKeyOrToken;
+ }
+ if (pcbPublicKey != nullptr)
+ {
+ *pcbPublicKey = m_cbPublicKeyOrToken;
+ }
+ }
+
+ inline void GetPublicKeyToken(
+ PBYTE * ppbPublicKeyToken,
+ DWORD * pcbPublicKeyToken) const
+ {
+ LIMITED_METHOD_CONTRACT;
+ PRECONDITION(HasPublicKeyToken());
+ if (ppbPublicKeyToken != nullptr)
+ {
+ *ppbPublicKeyToken = m_pbPublicKeyOrToken;
+ }
+ if (pcbPublicKeyToken != nullptr)
+ {
+ *pcbPublicKeyToken = m_cbPublicKeyOrToken;
+ }
+ }
+
+ inline BOOL IsRetargetable() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return IsAfRetargetable(m_dwFlags);
+ }
+
+#ifdef FEATURE_FUSION
+ inline IAssemblyName* GetNameAfterPolicy() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pNameAfterPolicy;
+ }
+
+ inline void ReleaseNameAfterPolicy()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pNameAfterPolicy=NULL;
+ }
+
+ inline void SetNameAfterPolicy(IAssemblyName* pName)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pNameAfterPolicy=pName;
+ }
+
+
+ void SetPEKIND(PEKIND peKind)
+ {
+ LIMITED_METHOD_CONTRACT;
+ C_ASSERT(afPA_None == PAFlag(peNone));
+ C_ASSERT(afPA_MSIL == PAFlag(peMSIL));
+ C_ASSERT(afPA_x86 == PAFlag(peI386));
+ C_ASSERT(afPA_IA64 == PAFlag(peIA64));
+ C_ASSERT(afPA_AMD64 == PAFlag(peAMD64));
+ C_ASSERT(afPA_ARM == PAFlag(peARM));
+
+ _ASSERTE((peKind <= peARM) || (peKind == peInvalid));
+
+ m_dwFlags &= ~afPA_FullMask;
+ m_dwFlags |= PAFlag(peKind);
+ }
+
+ PEKIND GetPEKIND() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return static_cast<PEKIND>(PAIndex(m_dwFlags));
+ }
+#endif //FEATURE_FUSION
+
+protected:
+ static BOOL CompareRefToDef(const BaseAssemblySpec *pRef, const BaseAssemblySpec *pDef);
+};
+
+#endif // __BASE_ASSEMBLY_SPEC_H__
diff --git a/src/vm/baseassemblyspec.inl b/src/vm/baseassemblyspec.inl
new file mode 100644
index 0000000000..7789d600bc
--- /dev/null
+++ b/src/vm/baseassemblyspec.inl
@@ -0,0 +1,720 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ============================================================
+//
+// BaseAssemblySpec.inl
+//
+
+
+//
+// Implements the BaseAssemblySpec class
+//
+// ============================================================
+
+#ifndef __BASE_ASSEMBLY_SPEC_INL__
+#define __BASE_ASSEMBLY_SPEC_INL__
+
+extern LocaleID g_lcid;
+
+#if defined(FEATURE_CORECLR)
+BOOL AreSameBinderInstance(ICLRPrivBinder *pBinderA, ICLRPrivBinder *pBinderB);
+#endif // defined(FEATURE_CORECLR)
+
+inline int BaseAssemblySpec::CompareStrings(LPCUTF8 string1, LPCUTF8 string2)
+{
+ WRAPPER_NO_CONTRACT;
+ SString s1;
+ SString s2;
+ s1.SetUTF8(string1);
+ s2.SetUTF8(string2);
+ return s1.CompareCaseInsensitive(s2,g_lcid);
+}
+
+
+inline BaseAssemblySpec::BaseAssemblySpec()
+{
+ LIMITED_METHOD_CONTRACT;
+ ZeroMemory(this, sizeof(*this));
+ m_context.usMajorVersion = (USHORT) -1;
+ m_context.usMinorVersion = (USHORT) -1;
+ m_context.usBuildNumber = (USHORT) -1;
+ m_context.usRevisionNumber = (USHORT) -1;
+};
+
+inline BaseAssemblySpec::~BaseAssemblySpec()
+{
+ WRAPPER_NO_CONTRACT;
+ if (m_ownedFlags & NAME_OWNED)
+ delete [] m_pAssemblyName;
+ if (m_ownedFlags & PUBLIC_KEY_OR_TOKEN_OWNED)
+ delete [] m_pbPublicKeyOrToken;
+ if (m_wszCodeBase && (m_ownedFlags & CODE_BASE_OWNED))
+ delete [] m_wszCodeBase;
+ if (m_ownedFlags & LOCALE_OWNED)
+ delete [] m_context.szLocale;
+ if (m_szWinRtTypeClassName && (m_ownedFlags & WINRT_TYPE_NAME_OWNED))
+ delete [] m_szWinRtTypeClassName;
+ if (m_szWinRtTypeNamespace && (m_ownedFlags & WINRT_TYPE_NAME_OWNED))
+ delete [] m_szWinRtTypeNamespace;
+}
+
+inline HRESULT BaseAssemblySpec::Init(LPCSTR pAssemblyName,
+ const AssemblyMetaDataInternal* pContext,
+ const BYTE * pbPublicKeyOrToken, DWORD cbPublicKeyOrToken,
+ DWORD dwFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(pContext);
+
+ m_pAssemblyName = pAssemblyName;
+ m_pbPublicKeyOrToken = const_cast<BYTE *>(pbPublicKeyOrToken);
+ m_cbPublicKeyOrToken = cbPublicKeyOrToken;
+ m_dwFlags = dwFlags;
+ m_ownedFlags = 0;
+
+ m_context = *pContext;
+
+ return S_OK;
+}
+
+inline HRESULT BaseAssemblySpec::Init(LPCSTR pAssemblyDisplayName)
+{
+ WRAPPER_NO_CONTRACT;
+ m_pAssemblyName = pAssemblyDisplayName;
+ // We eagerly parse the name to allow FusionBind::Hash to avoid throwing.
+ return ParseName();
+}
+
+inline VOID BaseAssemblySpec::CloneFields(int ownedFlags)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(ThrowOutOfMemory(););
+ }
+ CONTRACTL_END
+
+#if _DEBUG
+ DWORD hash = Hash();
+#endif
+
+ if ((~m_ownedFlags & NAME_OWNED) && (ownedFlags & NAME_OWNED) &&
+ m_pAssemblyName) {
+ size_t len = strlen(m_pAssemblyName) + 1;
+ LPSTR temp = new char [len];
+ strcpy_s(temp, len, m_pAssemblyName);
+ m_pAssemblyName = temp;
+ m_ownedFlags |= NAME_OWNED;
+ }
+
+ if ((~m_ownedFlags & PUBLIC_KEY_OR_TOKEN_OWNED) &&
+ (ownedFlags & PUBLIC_KEY_OR_TOKEN_OWNED) && m_pbPublicKeyOrToken) {
+ BYTE *temp = new BYTE [m_cbPublicKeyOrToken];
+ memcpy(temp, m_pbPublicKeyOrToken, m_cbPublicKeyOrToken);
+ m_pbPublicKeyOrToken = temp;
+ m_ownedFlags |= PUBLIC_KEY_OR_TOKEN_OWNED;
+ }
+
+ if ((~m_ownedFlags & LOCALE_OWNED) && (ownedFlags & LOCALE_OWNED) &&
+ m_context.szLocale) {
+ size_t len = strlen(m_context.szLocale) + 1;
+ LPSTR temp = new char [len];
+ strcpy_s(temp, len, m_context.szLocale);
+ m_context.szLocale = temp;
+ m_ownedFlags |= LOCALE_OWNED;
+ }
+
+ if ((~m_ownedFlags & CODEBASE_OWNED) && (ownedFlags & CODEBASE_OWNED) &&
+ m_wszCodeBase) {
+ size_t len = wcslen(m_wszCodeBase) + 1;
+ LPWSTR temp = new WCHAR [len];
+ wcscpy_s(temp, len, m_wszCodeBase);
+ m_wszCodeBase = temp;
+ m_ownedFlags |= CODEBASE_OWNED;
+ }
+
+ if ((~m_ownedFlags & WINRT_TYPE_NAME_OWNED) && (ownedFlags & WINRT_TYPE_NAME_OWNED)) {
+
+ NewArrayHolder<CHAR> nameTemp, namespaceTemp;
+
+ if (m_szWinRtTypeClassName) {
+
+ size_t nameLen = strlen(m_szWinRtTypeClassName) + 1;
+ nameTemp = new CHAR [nameLen];
+ strcpy_s(nameTemp, nameLen, m_szWinRtTypeClassName);
+ }
+
+ if (m_szWinRtTypeNamespace){
+
+ size_t namespaceLen = strlen(m_szWinRtTypeNamespace) + 1;
+ namespaceTemp = new CHAR [namespaceLen];
+ strcpy_s(namespaceTemp, namespaceLen, m_szWinRtTypeNamespace);
+ }
+
+ m_szWinRtTypeClassName = nameTemp.Extract();
+ m_szWinRtTypeNamespace = namespaceTemp.Extract();
+ if (m_szWinRtTypeClassName != NULL || m_szWinRtTypeNamespace != NULL)
+ {
+ m_ownedFlags |= WINRT_TYPE_NAME_OWNED;
+ }
+ }
+
+ _ASSERTE(hash == Hash());
+}
+
+inline VOID BaseAssemblySpec::CloneFieldsToLoaderHeap(int flags, LoaderHeap *pHeap, AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(ThrowOutOfMemory(););
+ }
+ CONTRACTL_END
+
+#if _DEBUG
+ DWORD hash = Hash();
+#endif
+
+ if ((~m_ownedFlags & NAME_OWNED) && (flags &NAME_OWNED) &&
+ m_pAssemblyName) {
+ size_t len = strlen(m_pAssemblyName) + 1;
+ LPSTR temp = (LPSTR)pamTracker->Track( pHeap->AllocMem(S_SIZE_T (len)) );
+ strcpy_s(temp, len, m_pAssemblyName);
+ m_pAssemblyName = temp;
+ }
+
+ if ((~m_ownedFlags & PUBLIC_KEY_OR_TOKEN_OWNED) && (flags &PUBLIC_KEY_OR_TOKEN_OWNED) &&
+ m_pbPublicKeyOrToken && m_cbPublicKeyOrToken > 0) {
+ BYTE *temp = (BYTE *)pamTracker->Track( pHeap->AllocMem(S_SIZE_T (m_cbPublicKeyOrToken)) );
+ memcpy(temp, m_pbPublicKeyOrToken, m_cbPublicKeyOrToken);
+ m_pbPublicKeyOrToken = temp;
+ }
+
+ if ((~m_ownedFlags & LOCALE_OWNED) && (flags &LOCALE_OWNED) &&
+ m_context.szLocale) {
+ size_t len = strlen(m_context.szLocale) + 1;
+ LPSTR temp = (char *)pamTracker->Track( pHeap->AllocMem(S_SIZE_T (len)) );
+ strcpy_s(temp, len, m_context.szLocale);
+ m_context.szLocale = temp;
+ }
+
+ if ((~m_ownedFlags & CODEBASE_OWNED) && (flags &CODEBASE_OWNED) &&
+ m_wszCodeBase) {
+ size_t len = wcslen(m_wszCodeBase) + 1;
+ LPWSTR temp = (LPWSTR)pamTracker->Track( pHeap->AllocMem(S_SIZE_T(len*sizeof(WCHAR))) );
+ wcscpy_s(temp, len, m_wszCodeBase);
+ m_wszCodeBase = temp;
+ }
+
+ if ((~m_ownedFlags & WINRT_TYPE_NAME_OWNED) && (flags & WINRT_TYPE_NAME_OWNED)) {
+ if (m_szWinRtTypeNamespace)
+ {
+ size_t len = strlen(m_szWinRtTypeNamespace) + 1;
+ LPSTR temp = (LPSTR)pamTracker->Track( pHeap->AllocMem(S_SIZE_T(len*sizeof(CHAR))) );
+ strcpy_s(temp, len, m_szWinRtTypeNamespace);
+ m_szWinRtTypeNamespace = temp;
+ }
+
+ if (m_szWinRtTypeClassName)
+ {
+ size_t len = strlen(m_szWinRtTypeClassName) + 1;
+ LPSTR temp = (LPSTR)pamTracker->Track( pHeap->AllocMem(S_SIZE_T(len*sizeof(CHAR))) );
+ strcpy_s(temp, len, m_szWinRtTypeClassName);
+ m_szWinRtTypeClassName = temp;
+ }
+ }
+
+ _ASSERTE(hash == Hash());
+
+}
+
+
+inline void BaseAssemblySpec::CopyFrom(const BaseAssemblySpec *pSpec)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(ThrowOutOfMemory(););
+ }
+ CONTRACTL_END
+
+ m_pAssemblyName = pSpec->m_pAssemblyName;
+
+ m_pbPublicKeyOrToken = pSpec->m_pbPublicKeyOrToken;
+ m_cbPublicKeyOrToken = pSpec->m_cbPublicKeyOrToken;
+ m_dwFlags = pSpec->m_dwFlags;
+ m_ownedFlags = 0;
+
+ m_wszCodeBase=pSpec->m_wszCodeBase;
+ m_szWinRtTypeNamespace = pSpec->m_szWinRtTypeNamespace;
+ m_szWinRtTypeClassName = pSpec->m_szWinRtTypeClassName;
+
+ m_context = pSpec->m_context;
+
+#ifdef FEATURE_HOSTED_BINDER
+ m_pHostBinder = pSpec->m_pHostBinder;
+#endif
+
+#ifdef FEATURE_CORECLR
+ if ((pSpec->m_ownedFlags & BAD_NAME_OWNED) != 0)
+ {
+ m_ownedFlags |= BAD_NAME_OWNED;
+ }
+#endif
+
+#ifdef FEATURE_FUSION
+ IAssemblyName* pNameAfterPolicy=pSpec->GetNameAfterPolicy();
+ if (pNameAfterPolicy)
+ {
+ pNameAfterPolicy->AddRef();
+ SetNameAfterPolicy(pNameAfterPolicy);
+ }
+#endif
+
+#if defined(FEATURE_CORECLR)
+ m_pBindingContext = pSpec->m_pBindingContext;
+#endif // defined(FEATURE_CORECLR)
+
+}
+
+
+inline DWORD BaseAssemblySpec::Hash()
+{
+ CONTRACTL {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+#ifdef FEATURE_CORECLR
+ if(m_wszCodeBase)
+ return HashString(m_wszCodeBase);
+#endif
+
+ // Hash fields.
+ DWORD hash = 0;
+
+ if (m_pAssemblyName)
+ hash ^= HashStringA(m_pAssemblyName);
+ hash = _rotl(hash, 4);
+
+ hash ^= HashBytes(m_pbPublicKeyOrToken, m_cbPublicKeyOrToken);
+ hash = _rotl(hash, 4);
+
+ hash ^= m_dwFlags;
+ hash = _rotl(hash, 4);
+
+#ifndef FEATURE_CORECLR
+ if (m_wszCodeBase)
+ hash ^= HashString(m_wszCodeBase);
+ hash = _rotl(hash, 4);
+#endif
+
+ hash ^= m_context.usMajorVersion;
+ hash = _rotl(hash, 8);
+
+ if (m_context.usMajorVersion != (USHORT) -1) {
+ hash ^= m_context.usMinorVersion;
+ hash = _rotl(hash, 8);
+
+ if (m_context.usMinorVersion != (USHORT) -1) {
+ hash ^= m_context.usBuildNumber;
+ hash = _rotl(hash, 8);
+
+ if (m_context.usBuildNumber != (USHORT) -1) {
+ hash ^= m_context.usRevisionNumber;
+ hash = _rotl(hash, 8);
+ }
+ }
+ }
+
+ if (m_context.szLocale)
+ hash ^= HashStringA(m_context.szLocale);
+ hash = _rotl(hash, 4);
+
+ if (m_szWinRtTypeNamespace)
+ {
+ hash ^= HashStringA(m_szWinRtTypeNamespace);
+ hash = _rotl(hash, 4);
+ }
+
+ if (m_szWinRtTypeClassName)
+ {
+ hash ^= HashStringA(m_szWinRtTypeClassName);
+ hash = _rotl(hash, 4);
+ }
+
+#ifdef FEATURE_FUSION
+ hash ^= (m_fParentLoadContext == LOADCTX_TYPE_LOADFROM);
+#endif
+
+ return hash;
+}
+
+
+inline BOOL BaseAssemblySpec::CompareEx(BaseAssemblySpec *pSpec, DWORD dwCompareFlags)
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifndef FEATURE_CORECLR
+ _ASSERTE(pSpec != NULL);
+
+ if ((m_dwFlags & afContentType_Mask) == (pSpec->m_dwFlags & afContentType_Mask))
+ {
+ if (IsContentType_WindowsRuntime() && pSpec->IsContentType_WindowsRuntime())
+ {
+ // If comparing assembly definitions, can not use bindability attributes as
+ // a shortcut for equivalence, as this type of shortcut is only applicable
+ // when comparing assembly references (not definitions).
+ //
+ // Example of why this is needed: native images still need to compare
+ // assembly identities even if they are not bindable, because it needs to
+ // ensure that the exact same assembly file (definition) is used at runtime
+ // as was used during compilation.
+ if ((dwCompareFlags & ASC_DefinitionEquality) != ASC_DefinitionEquality)
+ {
+ // WinRT assembly references are meaningless, they are all equal to each other
+ return TRUE;
+ }
+ }
+ }
+ else
+ {
+ return FALSE;
+ }
+#endif
+
+#ifdef FEATURE_CORECLR
+ if(m_wszCodeBase || pSpec->m_wszCodeBase)
+ {
+ if(!m_wszCodeBase || !pSpec->m_wszCodeBase)
+ return FALSE;
+ return wcscmp(m_wszCodeBase,(pSpec->m_wszCodeBase))==0;
+ }
+#endif
+
+ // Compare fields
+#ifdef FEATURE_FUSION
+ BOOL fIsInLoadFromContext = (m_fParentLoadContext == LOADCTX_TYPE_LOADFROM);
+ BOOL fSpecIsInLoadFromContext = (pSpec->m_fParentLoadContext == LOADCTX_TYPE_LOADFROM);
+ if (fIsInLoadFromContext != fSpecIsInLoadFromContext)
+ return FALSE;
+#endif
+
+ if (m_pAssemblyName != pSpec->m_pAssemblyName
+ && (m_pAssemblyName == NULL || pSpec->m_pAssemblyName == NULL
+ || strcmp(m_pAssemblyName, pSpec->m_pAssemblyName)))
+ return FALSE;
+
+ if (m_cbPublicKeyOrToken != pSpec->m_cbPublicKeyOrToken
+ || memcmp(m_pbPublicKeyOrToken, pSpec->m_pbPublicKeyOrToken, m_cbPublicKeyOrToken))
+ return FALSE;
+
+#ifndef FEATURE_CORECLR
+ if (m_wszCodeBase != pSpec->m_wszCodeBase
+ && (m_wszCodeBase == NULL || pSpec->m_wszCodeBase == NULL
+ || wcscmp(m_wszCodeBase, pSpec->m_wszCodeBase)))
+ return FALSE;
+#endif
+
+ if (m_dwFlags != pSpec->m_dwFlags)
+ return FALSE;
+
+ if (m_context.usMajorVersion != pSpec->m_context.usMajorVersion)
+ return FALSE;
+
+ if (m_context.usMajorVersion != (USHORT) -1) {
+ if (m_context.usMinorVersion != pSpec->m_context.usMinorVersion)
+ return FALSE;
+
+ if (m_context.usMinorVersion != (USHORT) -1) {
+ if (m_context.usBuildNumber != pSpec->m_context.usBuildNumber)
+ return FALSE;
+
+ if (m_context.usBuildNumber != (USHORT) -1) {
+ if (m_context.usRevisionNumber != pSpec->m_context.usRevisionNumber)
+ return FALSE;
+ }
+ }
+ }
+
+ if (m_context.szLocale != pSpec->m_context.szLocale
+ && (m_context.szLocale == NULL || pSpec->m_context.szLocale == NULL
+ || strcmp(m_context.szLocale, pSpec->m_context.szLocale)))
+ return FALSE;
+
+#ifdef FEATURE_FUSION
+ if (!IsIntrospectionOnly() && !pSpec->IsIntrospectionOnly()) {
+ // Post-policy load-neither binds can be picked up by nobody
+ // except their own parent assembly. This only applies to executable assemblies.
+ BOOL bParentsMustMatch;
+
+ // doesn't need the check if one is in load context
+ bParentsMustMatch = (m_fParentLoadContext == LOADCTX_TYPE_UNKNOWN && pSpec->m_fParentLoadContext == LOADCTX_TYPE_UNKNOWN);
+
+ if ( bParentsMustMatch && GetParentAssemblyPtr() != pSpec->GetParentAssemblyPtr())
+ return FALSE;
+ }
+#endif
+
+#if defined(FEATURE_CORECLR)
+ // If the assemblySpec contains the binding context, then check if they match.
+ if (!(pSpec->IsAssemblySpecForMscorlib() && IsAssemblySpecForMscorlib()))
+ {
+ if (!AreSameBinderInstance(pSpec->m_pBindingContext, m_pBindingContext))
+ {
+ return FALSE;
+ }
+ }
+#endif // defined(FEATURE_CORECLR)
+
+
+ return TRUE;
+}
+
+
+inline HRESULT BaseAssemblySpec::Init(mdToken kAssemblyToken,
+ IMDInternalImport *pImport)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ if (TypeFromToken(kAssemblyToken) == mdtAssembly) {
+
+ IfFailRet(pImport->GetAssemblyProps(kAssemblyToken,
+ (const void **) &m_pbPublicKeyOrToken,
+ &m_cbPublicKeyOrToken,
+ NULL,
+ &m_pAssemblyName,
+ &m_context,
+ &m_dwFlags));
+
+ if (m_cbPublicKeyOrToken != 0)
+ m_dwFlags |= afPublicKey;
+ }
+ else
+ IfFailRet(pImport->GetAssemblyRefProps(kAssemblyToken,
+ (const void**) &m_pbPublicKeyOrToken,
+ &m_cbPublicKeyOrToken,
+ &m_pAssemblyName,
+ &m_context,
+ NULL,
+ NULL,
+ &m_dwFlags));
+
+ // When m_cbPublicKeyOrToken is 0, a NULL in m_pbPublicKeyOrToken indicates that public key or token
+ // is not specified, while a non-NULL in m_pbPublicKeyOrToken indicates an empty public key (i.e.,
+ // a non-strongnamed assembly). However, the MetaData API puts a random value in m_pbPublicKeyOrToken
+ // when m_cbPublicKeyOrToken is 0. Since AssemblyDef or AssemblyRef can't using partial name, we
+ // always ensure that m_pbPublicKeyOrToken is not NULL.
+ if (m_cbPublicKeyOrToken == 0)
+ m_pbPublicKeyOrToken = (PBYTE)1;
+
+ return S_OK;
+}
+
+inline HRESULT BaseAssemblySpec::Init(mdToken tkAssemblyRef,
+ IMetaDataAssemblyImport *pImport)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Retrieve size of assembly name
+ ASSEMBLYMETADATA sContext;
+ LPWSTR wszAssemblyName=NULL;
+ ZeroMemory(&sContext, sizeof(ASSEMBLYMETADATA));
+ HRESULT hr = S_OK;
+ if(TypeFromToken(tkAssemblyRef) == mdtAssembly)
+ {
+ DWORD cchName;
+ IfFailRet(pImport->GetAssemblyProps(tkAssemblyRef, // [IN] The Assembly for which to get the properties.
+ NULL, // [OUT] Pointer to the public key or token.
+ NULL, // [OUT] Count of bytes in the public key or token.
+ NULL, // [OUT] Hash Algorithm
+ NULL, // [OUT] Buffer to fill with name.
+ NULL, // [IN] Size of buffer in wide chars.
+ &cchName, // [OUT] Actual # of wide chars in name.
+ &sContext, // [OUT] Assembly MetaData.
+ NULL)); // [OUT] Flags.
+
+ // Get the assembly name other naming properties
+ wszAssemblyName = (LPWSTR)_alloca(cchName * sizeof(WCHAR));
+ IfFailRet(pImport->GetAssemblyProps(tkAssemblyRef,
+ (const void **)&m_pbPublicKeyOrToken,
+ &m_cbPublicKeyOrToken,
+ NULL,
+ wszAssemblyName,
+ cchName,
+ &cchName,
+ &sContext,
+ &m_dwFlags));
+ }
+ else if(TypeFromToken(tkAssemblyRef) == mdtAssemblyRef)
+ {
+ DWORD cchName;
+ IfFailRet(pImport->GetAssemblyRefProps(tkAssemblyRef, // [IN] The AssemblyRef for which to get the properties.
+ NULL, // [OUT] Pointer to the public key or token.
+ NULL, // [OUT] Count of bytes in the public key or token.
+ NULL, // [OUT] Buffer to fill with name.
+ NULL, // [IN] Size of buffer in wide chars.
+ &cchName, // [OUT] Actual # of wide chars in name.
+ &sContext, // [OUT] Assembly MetaData.
+ NULL, // [OUT] Hash blob.
+ NULL, // [OUT] Count of bytes in the hash blob.
+ NULL)); // [OUT] Flags.
+
+ // Get the assembly name other naming properties
+ wszAssemblyName = (LPWSTR)_alloca(cchName * sizeof(WCHAR));
+ IfFailRet(pImport->GetAssemblyRefProps(tkAssemblyRef,
+ (const void **)&m_pbPublicKeyOrToken,
+ &m_cbPublicKeyOrToken,
+ wszAssemblyName,
+ cchName,
+ &cchName,
+ &sContext,
+ NULL,
+ NULL,
+ &m_dwFlags));
+ }
+ else
+ {
+ _ASSERTE(false && "unexpected token");
+ }
+ MAKE_UTF8PTR_FROMWIDE_NOTHROW(szAssemblyName,wszAssemblyName);
+ IfNullRet(szAssemblyName);
+ size_t len=strlen(szAssemblyName)+1;
+ NewArrayHolder<char> assemblyName(new(nothrow) char[len]);
+ IfNullRet(assemblyName);
+ strcpy_s(assemblyName,len,szAssemblyName);
+
+ m_pAssemblyName=assemblyName.Extract();
+ m_ownedFlags |= CODEBASE_OWNED;
+ SetContext(&sContext);
+ return S_OK;
+}
+
+inline void BaseAssemblySpec::SetCodeBase(LPCWSTR szCodeBase)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if(m_wszCodeBase && (m_ownedFlags & CODEBASE_OWNED))
+ delete m_wszCodeBase;
+ m_ownedFlags &= ~CODEBASE_OWNED;
+ m_wszCodeBase=szCodeBase;
+}
+
+inline LPCWSTR BaseAssemblySpec::GetCodeBase()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_wszCodeBase;
+}
+
+inline void BaseAssemblySpec::SetName(LPCSTR szName)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ if (m_pAssemblyName && (m_ownedFlags & NAME_OWNED))
+ delete [] m_pAssemblyName;
+ m_ownedFlags &= ~NAME_OWNED;
+ m_pAssemblyName = szName;
+}
+
+inline void BaseAssemblySpec::SetCulture(LPCSTR szCulture)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (m_context.szLocale && (m_ownedFlags & LOCALE_OWNED))
+ delete [] m_context.szLocale;
+ m_ownedFlags &= ~LOCALE_OWNED;
+ if (strcmp(szCulture,"neutral")==0)
+ m_context.szLocale="";
+ else
+ m_context.szLocale=szCulture;
+}
+
+inline void BaseAssemblySpec::SetContext(ASSEMBLYMETADATA* assemblyData)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_context.usMajorVersion=assemblyData->usMajorVersion;
+ m_context.usMinorVersion=assemblyData->usMinorVersion;
+ m_context.usBuildNumber=assemblyData->usBuildNumber;
+ m_context.usRevisionNumber=assemblyData->usRevisionNumber;
+ m_context.rProcessor=assemblyData->rProcessor;
+ m_context.ulProcessor=assemblyData->ulProcessor;
+ m_context.rOS=assemblyData->rOS;
+ m_context.ulOS=assemblyData->ulOS;
+ m_context.szLocale="";
+};
+
+inline BOOL BaseAssemblySpec::IsStrongNamed() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_cbPublicKeyOrToken;
+}
+
+inline BOOL BaseAssemblySpec::HasPublicKey() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return IsAfPublicKey(m_dwFlags) && m_cbPublicKeyOrToken != 0;
+}
+
+inline BOOL BaseAssemblySpec::HasPublicKeyToken() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return IsAfPublicKeyToken(m_dwFlags) && m_cbPublicKeyOrToken != 0;
+}
+
+inline LPCSTR BaseAssemblySpec::GetName() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pAssemblyName;
+}
+
+
+
+inline BOOL BaseAssemblySpec::VerifyBindingString(LPCWSTR pwStr)
+{
+ WRAPPER_NO_CONTRACT;
+ if (wcschr(pwStr, '\\') ||
+ wcschr(pwStr, '/') ||
+ wcschr(pwStr, ':'))
+ return FALSE;
+
+ return TRUE;
+}
+
+
+#endif // __BASE_ASSEMBLY_SPEC_INL__
diff --git a/src/vm/binder.cpp b/src/vm/binder.cpp
new file mode 100644
index 0000000000..86e579771c
--- /dev/null
+++ b/src/vm/binder.cpp
@@ -0,0 +1,1336 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+
+#include "common.h"
+
+#include "binder.h"
+#include "ecall.h"
+
+#include "field.h"
+#include "excep.h"
+#ifdef FEATURE_REMOTING
+#include "message.h"
+#endif // FEATURE_REMOTING
+#include "eeconfig.h"
+#include "rwlock.h"
+#include "runtimehandles.h"
+#include "customattribute.h"
+#include "debugdebugger.h"
+#include "dllimport.h"
+#include "nativeoverlapped.h"
+#include "clrvarargs.h"
+#include "sigbuilder.h"
+
+#ifdef FEATURE_PREJIT
+#include "compile.h"
+#endif
+
+//
+// Retrieve structures from ID.
+//
+NOINLINE PTR_MethodTable MscorlibBinder::LookupClass(BinderClassID id)
+{
+ WRAPPER_NO_CONTRACT;
+ return (&g_Mscorlib)->LookupClassLocal(id);
+}
+
+PTR_MethodTable MscorlibBinder::GetClassLocal(BinderClassID id)
+{
+ WRAPPER_NO_CONTRACT;
+
+ PTR_MethodTable pMT = VolatileLoad(&(m_pClasses[id]));
+ if (pMT == NULL)
+ return LookupClassLocal(id);
+ return pMT;
+}
+
+PTR_MethodTable MscorlibBinder::LookupClassLocal(BinderClassID id)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(ThrowOutOfMemory());
+
+ PRECONDITION(id != CLASS__NIL);
+ PRECONDITION(id <= m_cClasses);
+ }
+ CONTRACTL_END;
+
+ PTR_MethodTable pMT = NULL;
+
+ // Binder methods are used for loading "known" types from mscorlib.dll. Thus they are unlikely to be part
+ // of a recursive cycle. This is used too broadly to force manual overrides at every callsite.
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+
+ const MscorlibClassDescription *d = m_classDescriptions + (int)id;
+
+ pMT = ClassLoader::LoadTypeByNameThrowing(GetModule()->GetAssembly(), d->nameSpace, d->name).AsMethodTable();
+
+ _ASSERTE(pMT->GetModule() == GetModule());
+
+#ifndef DACCESS_COMPILE
+ VolatileStore(&m_pClasses[id], pMT);
+#endif
+
+ return pMT;
+}
+
+NOINLINE MethodDesc * MscorlibBinder::LookupMethod(BinderMethodID id)
+{
+ WRAPPER_NO_CONTRACT;
+ return (&g_Mscorlib)->LookupMethodLocal(id);
+}
+
+MethodDesc * MscorlibBinder::GetMethodLocal(BinderMethodID id)
+{
+ WRAPPER_NO_CONTRACT;
+
+ MethodDesc * pMD = VolatileLoad(&(m_pMethods[id]));
+ if (pMD == NULL)
+ return LookupMethodLocal(id);
+ return pMD;
+}
+
+MethodDesc * MscorlibBinder::LookupMethodLocal(BinderMethodID id)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(ThrowOutOfMemory());
+
+ PRECONDITION(id != METHOD__NIL);
+ PRECONDITION(id <= m_cMethods);
+ }
+ CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+ MethodDesc * pMD = NULL;
+
+ const MscorlibMethodDescription *d = m_methodDescriptions + (id - 1);
+
+ MethodTable * pMT = GetClassLocal(d->classID);
+ _ASSERTE(pMT != NULL && "Couldn't find a type in mscorlib!");
+
+ if (d->sig != NULL)
+ {
+ Signature sig = GetSignatureLocal(d->sig);
+
+ pMD = MemberLoader::FindMethod(pMT, d->name, sig.GetRawSig(), sig.GetRawSigLen(), GetModule());
+ }
+ else
+ {
+ pMD = MemberLoader::FindMethodByName(pMT, d->name);
+ }
+
+
+ PREFIX_ASSUME_MSGF(pMD != NULL, ("EE expects method to exist: %s:%s Sig pointer: %p\n", pMT->GetDebugClassName(), d->name, d->sig));
+
+ VolatileStore(&m_pMethods[id], pMD);
+
+ return pMD;
+#else
+ DacNotImpl();
+ return NULL;
+#endif
+}
+
+NOINLINE FieldDesc * MscorlibBinder::LookupField(BinderFieldID id)
+{
+ WRAPPER_NO_CONTRACT;
+ return (&g_Mscorlib)->LookupFieldLocal(id);
+}
+
+FieldDesc * MscorlibBinder::GetFieldLocal(BinderFieldID id)
+{
+ WRAPPER_NO_CONTRACT;
+
+ FieldDesc * pFD = VolatileLoad(&(m_pFields[id]));
+ if (pFD == NULL)
+ return LookupFieldLocal(id);
+ return pFD;
+}
+
+FieldDesc * MscorlibBinder::LookupFieldLocal(BinderFieldID id)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(ThrowOutOfMemory());
+
+ PRECONDITION(id != FIELD__NIL);
+ PRECONDITION(id <= m_cFields);
+ }
+ CONTRACTL_END;
+
+ FieldDesc * pFD = NULL;
+
+ const MscorlibFieldDescription *d = m_fieldDescriptions + (id - 1);
+
+ MethodTable * pMT = GetClassLocal(d->classID);
+
+ pFD = MemberLoader::FindField(pMT, d->name, NULL, 0, NULL);
+
+#ifndef DACCESS_COMPILE
+ PREFIX_ASSUME_MSGF(pFD != NULL, ("EE expects field to exist: %s:%s\n", pMT->GetDebugClassName(), d->name));
+
+ VolatileStore(&(m_pFields[id]), pFD);
+#endif
+
+ return pFD;
+}
+
+NOINLINE PTR_MethodTable MscorlibBinder::LookupClassIfExist(BinderClassID id)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ FORBID_FAULT;
+ MODE_ANY;
+
+ PRECONDITION(id != CLASS__NIL);
+ PRECONDITION(id <= (&g_Mscorlib)->m_cClasses);
+ }
+ CONTRACTL_END;
+
+ // Run the class loader in non-load mode.
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+ // Binder methods are used for loading "known" types from mscorlib.dll. Thus they are unlikely to be part
+ // of a recursive cycle. This is used too broadly to force manual overrides at every callsite.
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+
+ const MscorlibClassDescription *d = (&g_Mscorlib)->m_classDescriptions + (int)id;
+
+ PTR_MethodTable pMT = ClassLoader::LoadTypeByNameThrowing(GetModule()->GetAssembly(), d->nameSpace, d->name,
+ ClassLoader::ReturnNullIfNotFound, ClassLoader::DontLoadTypes, CLASS_LOAD_UNRESTOREDTYPEKEY).AsMethodTable();
+
+ _ASSERTE((pMT == NULL) || (pMT->GetModule() == GetModule()));
+
+#ifndef DACCESS_COMPILE
+ if ((pMT != NULL) && pMT->IsFullyLoaded())
+ VolatileStore(&(g_Mscorlib.m_pClasses[id]), pMT);
+#endif
+
+ return pMT;
+}
+
+Signature MscorlibBinder::GetSignature(LPHARDCODEDMETASIG pHardcodedSig)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+// Make sure all HardCodedMetaSig's are global. Because there is no individual
+// cleanup of converted binary sigs, using allocated HardCodedMetaSig's
+// can lead to a quiet memory leak.
+#ifdef _DEBUG_IMPL
+
+// This #include workaround generates a monster boolean expression that compares
+// "this" against the address of every global defined in metasig.h
+ if (! (0
+#define METASIG_BODY(varname, types) || pHardcodedSig==&gsig_ ## varname
+#include "metasig.h"
+ ))
+ {
+ _ASSERTE(!"The HardCodedMetaSig struct can only be declared as a global in metasig.h.");
+ }
+#endif
+
+ return (&g_Mscorlib)->GetSignatureLocal(pHardcodedSig);
+}
+
+Signature MscorlibBinder::GetTargetSignature(LPHARDCODEDMETASIG pHardcodedSig)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+#ifdef CROSSGEN_COMPILE
+ return GetModule()->m_pBinder->GetSignatureLocal(pHardcodedSig);
+#else
+ return (&g_Mscorlib)->GetSignatureLocal(pHardcodedSig);
+#endif
+}
+
+// Get the metasig, do a one-time conversion if necessary
+Signature MscorlibBinder::GetSignatureLocal(LPHARDCODEDMETASIG pHardcodedSig)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ PTR_CBYTE pMetaSig = PTR_CBYTE((TADDR)VolatileLoad(&pHardcodedSig->m_pMetaSig));
+
+ // To minimize code and data size, the hardcoded metasigs are baked as much as possible
+ // at compile time. Only the signatures with type references require one-time conversion at runtime.
+
+ // the negative size means signature with unresolved type references
+ if ((INT8)*pMetaSig < 0)
+ {
+#ifndef DACCESS_COMPILE
+ pMetaSig = ConvertSignature(pHardcodedSig, pMetaSig);
+#else
+ DacNotImpl();
+#endif
+ }
+
+ // The metasig has to be resolved at this point
+ INT8 cbSig = (INT8)*pMetaSig;
+ _ASSERTE(cbSig > 0);
+
+#ifdef DACCESS_COMPILE
+ PCCOR_SIGNATURE pSig = (PCCOR_SIGNATURE)
+ DacInstantiateTypeByAddress(dac_cast<TADDR>(pMetaSig + 1),
+ cbSig,
+ true);
+#else
+ PCCOR_SIGNATURE pSig = pMetaSig+1;
+#endif
+
+ return Signature(pSig, cbSig);
+}
+
+#ifndef DACCESS_COMPILE
+
+//------------------------------------------------------------------
+// Resolve type references in the hardcoded metasig.
+// Returns a new signature with type refences resolved.
+//------------------------------------------------------------------
+void MscorlibBinder::BuildConvertedSignature(const BYTE* pSig, SigBuilder * pSigBuilder)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pSig));
+ PRECONDITION(CheckPointer(pSigBuilder));
+ }
+ CONTRACTL_END
+
+ unsigned argCount;
+ unsigned callConv;
+ INDEBUG(bool bSomethingResolved = false;)
+
+ // calling convention
+ callConv = *pSig++;
+ pSigBuilder->AppendData(callConv);
+
+ if ((callConv & IMAGE_CEE_CS_CALLCONV_MASK) == IMAGE_CEE_CS_CALLCONV_DEFAULT) {
+ // arg count
+ argCount = *pSig++;
+ pSigBuilder->AppendData(argCount);
+ }
+ else {
+ if ((callConv & IMAGE_CEE_CS_CALLCONV_MASK) != IMAGE_CEE_CS_CALLCONV_FIELD)
+ THROW_BAD_FORMAT(BFA_BAD_SIGNATURE, (Module*)NULL);
+ argCount = 0;
+ }
+
+ // <= because we want to include the return value or the field
+ for (unsigned i = 0; i <= argCount; i++) {
+
+ for (;;) {
+ BinderClassID id = CLASS__NIL;
+ bool again = false;
+
+ CorElementType type = (CorElementType)*pSig++;
+
+ switch (type)
+ {
+ case ELEMENT_TYPE_BYREF:
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_SZARRAY:
+ again = true;
+ break;
+
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_VALUETYPE:
+ // The binder class id may overflow 1 byte. Use 2 bytes to encode it.
+ id = (BinderClassID) (*pSig + 0x100 * *(pSig + 1));
+ pSig += 2;
+ break;
+
+ case ELEMENT_TYPE_VOID:
+ if (i != 0) {
+ if (pSig[-2] != ELEMENT_TYPE_PTR)
+ THROW_BAD_FORMAT(BFA_ONLY_VOID_PTR_IN_ARGS, (Module*)NULL); // only pointer to void allowed in arguments
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ pSigBuilder->AppendElementType(type);
+
+ if (id != CLASS__NIL)
+ {
+ pSigBuilder->AppendToken(GetClassLocal(id)->GetCl());
+
+ INDEBUG(bSomethingResolved = true;)
+ }
+
+ if (!again)
+ break;
+ }
+ }
+
+ _ASSERTE(bSomethingResolved);
+}
+
+const BYTE* MscorlibBinder::ConvertSignature(LPHARDCODEDMETASIG pHardcodedSig, const BYTE* pSig)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ GCX_PREEMP();
+
+ SigBuilder sigBuilder;
+
+ BuildConvertedSignature(pSig+1, &sigBuilder);
+
+ DWORD cbCount;
+ PVOID pSignature = sigBuilder.GetSignature(&cbCount);
+
+ {
+ CrstHolder ch(&s_SigConvertCrst);
+
+ if (*(INT8*)pHardcodedSig->m_pMetaSig < 0) {
+
+ BYTE* pResolved = (BYTE*)(void*)(SystemDomain::GetGlobalLoaderAllocator()->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(1) + S_SIZE_T(cbCount)));
+
+ _ASSERTE(FitsIn<INT8>(cbCount));
+ *(INT8*)pResolved = static_cast<INT8>(cbCount);
+ CopyMemory(pResolved+1, pSignature, cbCount);
+
+ // this has to be last, overwrite the pointer to the metasig with the resolved one
+ VolatileStore<const BYTE *>(&const_cast<HardCodedMetaSig *>(pHardcodedSig)->m_pMetaSig, pResolved);
+ }
+ }
+
+ return pHardcodedSig->m_pMetaSig;
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+#ifdef _DEBUG
+void MscorlibBinder::TriggerGCUnderStress()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_TOLERANT;
+ INJECT_FAULT(ThrowOutOfMemory());
+ }
+ CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+ _ASSERTE (GetThread ());
+ TRIGGERSGC ();
+ // Force a GC here because GetClass could trigger GC nondeterminsticly
+ if (g_pConfig->GetGCStressLevel() != 0)
+ {
+ DEBUG_ONLY_REGION();
+ Thread * pThread = GetThread ();
+ BOOL bInCoopMode = pThread->PreemptiveGCDisabled ();
+ GCX_COOP ();
+ if (bInCoopMode)
+ {
+ pThread->PulseGCMode ();
+ }
+ }
+#endif //DACCESS_COMPILE
+}
+#endif // _DEBUG
+
+DWORD MscorlibBinder::GetFieldOffset(BinderFieldID id)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return GetField(id)->GetOffset();
+}
+
+#ifndef DACCESS_COMPILE
+
+CrstStatic MscorlibBinder::s_SigConvertCrst;
+
+/*static*/
+void MscorlibBinder::Startup()
+{
+ WRAPPER_NO_CONTRACT
+ s_SigConvertCrst.Init(CrstSigConvert);
+}
+
+#if defined(_DEBUG) && !defined(CROSSGEN_COMPILE)
+
+// NoClass is used to suppress check for unmanaged and managed size match
+#define NoClass char[USHRT_MAX]
+
+const MscorlibBinder::OffsetAndSizeCheck MscorlibBinder::OffsetsAndSizes[] =
+{
+ #define DEFINE_CLASS_U(nameSpace, stringName, unmanagedType) \
+ { PTR_CSTR((TADDR) g_ ## nameSpace ## NS ), PTR_CUTF8((TADDR) # stringName), sizeof(unmanagedType), 0, 0, 0 },
+
+ #define DEFINE_FIELD_U(stringName, unmanagedContainingType, unmanagedOffset) \
+ { 0, 0, 0, PTR_CUTF8((TADDR) # stringName), offsetof(unmanagedContainingType, unmanagedOffset), sizeof(((unmanagedContainingType*)1)->unmanagedOffset) },
+ #include "mscorlib.h"
+};
+
+//
+// check the basic consistency between mscorlib and mscorwks
+//
+void MscorlibBinder::Check()
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodTable * pMT = NULL;
+
+ for (unsigned i = 0; i < NumItems(OffsetsAndSizes); i++)
+ {
+ const OffsetAndSizeCheck * p = OffsetsAndSizes + i;
+
+ if (p->className != NULL)
+ {
+ pMT = ClassLoader::LoadTypeByNameThrowing(GetModule()->GetAssembly(), p->classNameSpace, p->className).AsMethodTable();
+
+ if (p->expectedClassSize == sizeof(NoClass))
+ continue;
+
+ // hidden size of the type that participates in the alignment calculation
+ DWORD hiddenSize = pMT->IsValueType() ? sizeof(MethodTable*) : 0;
+
+ DWORD size = pMT->GetBaseSize() - (sizeof(ObjHeader)+hiddenSize);
+
+ DWORD expectedsize = (DWORD)ALIGN_UP(p->expectedClassSize + (sizeof(ObjHeader) + hiddenSize),
+ DATA_ALIGNMENT) - (sizeof(ObjHeader) + hiddenSize);
+
+ CONSISTENCY_CHECK_MSGF(size == expectedsize,
+ ("Managed object size does not match unmanaged object size\n"
+ "man: 0x%x, unman: 0x%x, Name: %s\n", size, expectedsize, pMT->GetDebugClassName()));
+ }
+ else
+ if (p->fieldName != NULL)
+ {
+ // This assert will fire if there is DEFINE_FIELD_U macro without preceeding DEFINE_CLASS_U macro in mscorlib.h
+ _ASSERTE(pMT != NULL);
+
+ FieldDesc * pFD = MemberLoader::FindField(pMT, p->fieldName, NULL, 0, NULL);
+ _ASSERTE(pFD != NULL);
+
+ DWORD offset = pFD->GetOffset();
+
+ if (!pFD->IsFieldOfValueType())
+ {
+ offset += Object::GetOffsetOfFirstField();
+ }
+
+ CONSISTENCY_CHECK_MSGF(offset == p->expectedFieldOffset,
+ ("Managed class field offset does not match unmanaged class field offset\n"
+ "man: 0x%x, unman: 0x%x, Class: %s, Name: %s\n", offset, p->expectedFieldOffset, pFD->GetApproxEnclosingMethodTable()->GetDebugClassName(), pFD->GetName()));
+
+ DWORD size = pFD->LoadSize();
+
+ CONSISTENCY_CHECK_MSGF(size == p->expectedFieldSize,
+ ("Managed class field size does not match unmanaged class field size\n"
+ "man: 0x%x, unman: 0x%x, Class: %s, Name: %s\n", size, p->expectedFieldSize, pFD->GetApproxEnclosingMethodTable()->GetDebugClassName(), pFD->GetName()));
+ }
+ }
+}
+
+//
+// check consistency of the unmanaged and managed fcall signatures
+//
+/* static */ FCSigCheck* FCSigCheck::g_pFCSigCheck;
+const char * aType[] =
+{
+ "void",
+ "FC_BOOL_RET",
+ "CLR_BOOL",
+ "FC_CHAR_RET",
+ "CLR_CHAR",
+ "FC_INT8_RET",
+ "INT8",
+ "FC_UINT8_RET",
+ "UINT8",
+ "FC_INT16_RET",
+ "INT16",
+ "FC_UINT16_RET",
+ "UINT16",
+ "INT64",
+ "VINT64",
+ "UINT64",
+ "VUINT64",
+ "float",
+ "Vfloat",
+ "double",
+ "Vdouble"
+};
+
+const char * aInt32Type[] =
+{
+ "INT32",
+ "UINT32", // we might remove it to have a better check
+ "int",
+ "unsigned int", // we might remove it to have a better check
+ "DWORD", // we might remove it to have a better check
+ "HRESULT", // we might remove it to have a better check
+ "mdToken", // we might remove it to have a better check
+ "ULONG", // we might remove it to have a better check
+ "mdMemberRef", // we might remove it to have a better check
+ "mdCustomAttribute", // we might remove it to have a better check
+ "mdTypeDef", // we might remove it to have a better check
+ "mdFieldDef", // we might remove it to have a better check
+ "LONG",
+ "CLR_I4",
+ "LCID" // we might remove it to have a better check
+};
+
+const char * aUInt32Type[] =
+{
+ "UINT32",
+ "unsigned int",
+ "DWORD",
+ "INT32", // we might remove it to have a better check
+ "ULONG"
+};
+
+static BOOL IsStrInArray(const char* sStr, size_t len, const char* aStrArray[], int nSize)
+{
+ STANDARD_VM_CONTRACT;
+ for (int i = 0; i < nSize; i++)
+ {
+ if (SString::_strnicmp(aStrArray[i], sStr, (COUNT_T)len) == 0)
+ {
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+static void FCallCheckSignature(MethodDesc* pMD, PCODE pImpl)
+{
+ STANDARD_VM_CONTRACT;
+
+ char* pUnmanagedSig = NULL;
+
+ FCSigCheck* pSigCheck = FCSigCheck::g_pFCSigCheck;
+ while (pSigCheck != NULL)
+ {
+ if (pImpl == (PCODE)pSigCheck->func) {
+ pUnmanagedSig = pSigCheck->signature;
+ break;
+ }
+ pSigCheck = pSigCheck->next;
+ }
+
+ MetaSig msig(pMD);
+ int argIndex = -2; // start with return value
+ int enregisteredArguments = 0;
+ char* pUnmanagedArg = pUnmanagedSig;
+ for (;;)
+ {
+ CorElementType argType = ELEMENT_TYPE_END;
+ TypeHandle argTypeHandle;
+
+ if (argIndex == -2)
+ {
+ // return value
+ argType = msig.GetReturnType();
+ if (argType == ELEMENT_TYPE_VALUETYPE)
+ argTypeHandle = msig.GetRetTypeHandleThrowing();
+ }
+
+ if (argIndex == -1)
+ {
+ // this ptr
+ if (msig.HasThis())
+ argType = ELEMENT_TYPE_CLASS;
+ else
+ argIndex++; // move on to the first argument
+ }
+
+ if (argIndex >= 0)
+ {
+ argType = msig.NextArg();
+ if (argType == ELEMENT_TYPE_END)
+ break;
+ if (argType == ELEMENT_TYPE_VALUETYPE)
+ argTypeHandle = msig.GetLastTypeHandleThrowing();
+ }
+
+ const char* expectedType = NULL;
+
+ switch (argType)
+ {
+ case ELEMENT_TYPE_VOID:
+ expectedType = pMD->IsCtor() ? NULL : "void";
+ break;
+ case ELEMENT_TYPE_BOOLEAN:
+ expectedType = (argIndex == -2) ? "FC_BOOL_RET" : "CLR_BOOL";
+ break;
+ case ELEMENT_TYPE_CHAR:
+ expectedType = (argIndex == -2) ? "FC_CHAR_RET" : "CLR_CHAR";
+ break;
+ case ELEMENT_TYPE_I1:
+ expectedType = (argIndex == -2) ? "FC_INT8_RET" : "INT8";
+ break;
+ case ELEMENT_TYPE_U1:
+ expectedType = (argIndex == -2) ? "FC_UINT8_RET" : "UINT8";
+ break;
+ case ELEMENT_TYPE_I2:
+ expectedType = (argIndex == -2) ? "FC_INT16_RET" : "INT16";
+ break;
+ case ELEMENT_TYPE_U2:
+ expectedType = (argIndex == -2) ? "FC_UINT16_RET" : "UINT16";
+ break;
+ //case ELEMENT_TYPE_I4:
+ // expectedType = "INT32";
+ // break;
+ // case ELEMENT_TYPE_U4:
+ // expectedType = "UINT32";
+ // break;
+
+ // See the comments in fcall.h on what the "V" prefix means.
+ case ELEMENT_TYPE_I8:
+ expectedType = ((argIndex == -2) || (enregisteredArguments >= 2)) ? "INT64" : "VINT64";
+ break;
+ case ELEMENT_TYPE_U8:
+ expectedType = ((argIndex == -2) || (enregisteredArguments >= 2)) ? "UINT64" : "VUINT64";
+ break;
+ case ELEMENT_TYPE_R4:
+ expectedType = ((argIndex == -2) || (enregisteredArguments >= 2)) ? "float" : "Vfloat";
+ break;
+ case ELEMENT_TYPE_R8:
+ expectedType = ((argIndex == -2) || (enregisteredArguments >= 2)) ? "double" : "Vdouble";
+ break;
+ default:
+ // no checks for other types
+ break;
+ }
+
+ // Count number of enregistered arguments for x86
+ if ((argIndex != -2) && !((expectedType != NULL) && (*expectedType == 'V')))
+ {
+ enregisteredArguments++;
+ }
+
+ if (pUnmanagedSig != NULL)
+ {
+ CONSISTENCY_CHECK_MSGF(pUnmanagedArg != NULL,
+ ("Unexpected end of managed fcall signature\n"
+ "Method: %s:%s\n", pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName));
+
+ char* pUnmanagedArgEnd = strchr(pUnmanagedArg, ',');
+
+ char* pUnmanagedTypeEnd = (pUnmanagedArgEnd != NULL) ?
+ pUnmanagedArgEnd : (pUnmanagedArg + strlen(pUnmanagedArg));
+
+ if (argIndex != -2)
+ {
+ // skip argument name
+ while(pUnmanagedTypeEnd > pUnmanagedArg)
+ {
+ char c = *(pUnmanagedTypeEnd-1);
+ if ((c != '_')
+ && ((c < '0') || ('9' < c))
+ && ((c < 'a') || ('z' < c))
+ && ((c < 'A') || ('Z' < c)))
+ break;
+ pUnmanagedTypeEnd--;
+ }
+ }
+
+ // skip whitespaces
+ while(pUnmanagedTypeEnd > pUnmanagedArg)
+ {
+ char c = *(pUnmanagedTypeEnd-1);
+ if ((c != 0x20) && (c != '\t') && (c != '\n') && (c != '\r'))
+ break;
+ pUnmanagedTypeEnd--;
+ }
+
+ size_t len = pUnmanagedTypeEnd - pUnmanagedArg;
+ // generate the unmanaged argument signature to show them in the error message if possible
+ StackSString ssUnmanagedType(SString::Ascii, pUnmanagedArg, (COUNT_T)len);
+ StackScratchBuffer buffer;
+ const char * pUnManagedType = ssUnmanagedType.GetANSI(buffer);
+
+ if (expectedType != NULL)
+ {
+ // when managed type is well known
+ if (!(strlen(expectedType) == len && SString::_strnicmp(expectedType, pUnmanagedArg, (COUNT_T)len) == 0))
+ {
+ printf("CheckExtended: The managed and unmanaged fcall signatures do not match, Method: %s:%s. Argument: %d Expecting: %s Actual: %s\n", pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName, argIndex, expectedType, pUnManagedType);
+ }
+ }
+ else
+ {
+ // when managed type is not wellknown, we still can find sig mismatch if native type is a well known type
+ BOOL bSigError = false;
+ if (argType == ELEMENT_TYPE_VOID && pMD->IsCtor())
+ {
+ bSigError = false;
+ }
+ else if (argType == ELEMENT_TYPE_I4)
+ {
+ bSigError = !IsStrInArray(pUnmanagedArg, len, aInt32Type, NumItems(aInt32Type));
+ }
+ else if (argType == ELEMENT_TYPE_U4)
+ {
+ bSigError = !IsStrInArray(pUnmanagedArg, len, aUInt32Type, NumItems(aUInt32Type));
+ }
+ else if (argType == ELEMENT_TYPE_VALUETYPE)
+ {
+ // we already did special check for value type
+ bSigError = false;
+ }
+ else
+ {
+ bSigError = IsStrInArray(pUnmanagedArg, len, aType, NumItems(aType));
+ }
+ if (bSigError)
+ {
+ printf("CheckExtended: The managed and unmanaged fcall signatures do not match, Method: %s:%s. Argument: %d Expecting: (CorElementType)%d actual: %s\n", pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName, argIndex, argType, pUnManagedType);
+ }
+ }
+ pUnmanagedArg = (pUnmanagedArgEnd != NULL) ? (pUnmanagedArgEnd+1) : NULL;
+ }
+
+ argIndex++;
+ }
+
+ if (pUnmanagedSig != NULL)
+ {
+ if (msig.IsVarArg())
+ {
+ if (!((pUnmanagedArg != NULL) && strcmp(pUnmanagedArg, "...") == 0))
+ {
+ printf("CheckExtended: Expecting varargs in unmanaged fcall signature, Method: %s:%s\n", pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName);
+ }
+ }
+ else
+ {
+ if (!(pUnmanagedArg == NULL))
+ {
+ printf("CheckExtended: Unexpected end of unmanaged fcall signature, Method: %s:%s\n", pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName);
+ }
+ }
+ }
+}
+
+//
+// extended check of consistency between mscorlib and mscorwks:
+// - verifies that all references from mscorlib to mscorwks are present
+// - verifies that all references from mscorwks to mscorlib are present
+// - limited detection of mismatches between managed and unmanaged fcall signatures
+//
+void MscorlibBinder::CheckExtended()
+{
+ STANDARD_VM_CONTRACT;
+
+ // check the consistency of BCL and VM
+ // note: it is not enabled by default because of it is time consuming and
+ // changes the bootstrap sequence of the EE
+ if (!CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ConsistencyCheck))
+ return;
+
+ //
+ // VM referencing BCL (mscorlib.h)
+ //
+ for (BinderClassID cID = (BinderClassID) 1; (int)cID < m_cClasses; cID = (BinderClassID) (cID + 1))
+ {
+ bool fError = false;
+ EX_TRY
+ {
+ if (MscorlibBinder::GetClassName(cID) != NULL) // Allow for CorSigElement entries with no classes
+ {
+ if (NULL == MscorlibBinder::GetClass(cID))
+ {
+ fError = true;
+ }
+ }
+ }
+ EX_CATCH
+ {
+ fError = true;
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ if (fError)
+ {
+ printf("CheckExtended: VM expects type to exist: %s.%s\n", MscorlibBinder::GetClassNameSpace(cID), MscorlibBinder::GetClassName(cID));
+ }
+ }
+
+ for (BinderMethodID mID = (BinderMethodID) 1; mID < (BinderMethodID) MscorlibBinder::m_cMethods; mID = (BinderMethodID) (mID + 1))
+ {
+ bool fError = false;
+ BinderClassID cID = m_methodDescriptions[mID-1].classID;
+ EX_TRY
+ {
+ if (NULL == MscorlibBinder::GetMethod(mID))
+ {
+ fError = true;
+ }
+ }
+ EX_CATCH
+ {
+ fError = true;
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ if (fError)
+ {
+ printf("CheckExtended: VM expects method to exist: %s.%s::%s\n", MscorlibBinder::GetClassNameSpace(cID), MscorlibBinder::GetClassName(cID), MscorlibBinder::GetMethodName(mID));
+ }
+ }
+
+ for (BinderFieldID fID = (BinderFieldID) 1; fID < (BinderFieldID) MscorlibBinder::m_cFields; fID = (BinderFieldID) (fID + 1))
+ {
+ bool fError = false;
+ BinderClassID cID = m_fieldDescriptions[fID-1].classID;
+ EX_TRY
+ {
+ if (NULL == MscorlibBinder::GetField(fID))
+ {
+ fError = true;
+ }
+ }
+ EX_CATCH
+ {
+ fError = true;
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ if (fError)
+ {
+ printf("CheckExtended: VM expects field to exist: %s.%s::%s\n", MscorlibBinder::GetClassNameSpace(cID), MscorlibBinder::GetClassName(cID), MscorlibBinder::GetFieldName(fID));
+ }
+ }
+
+ //
+ // BCL referencing VM (ecall.cpp)
+ //
+ SetSHash<DWORD> usedECallIds;
+
+ HRESULT hr = S_OK;
+ Module *pModule = MscorlibBinder::m_pModule;
+ IMDInternalImport *pInternalImport = pModule->GetMDImport();
+
+ HENUMInternal hEnum;
+
+ // for all methods...
+ IfFailGo(pInternalImport->EnumAllInit(mdtMethodDef, &hEnum));
+
+ for (;;) {
+ mdTypeDef td;
+ mdTypeDef tdClass;
+ DWORD dwImplFlags;
+ DWORD dwMemberAttrs;
+
+ if (!pInternalImport->EnumNext(&hEnum, &td))
+ break;
+
+ pInternalImport->GetMethodImplProps(td, NULL, &dwImplFlags);
+
+ IfFailGo(pInternalImport->GetMethodDefProps(td, &dwMemberAttrs));
+
+ // ... that are internal calls ...
+ if (!IsMiInternalCall(dwImplFlags) && !IsMdPinvokeImpl(dwMemberAttrs))
+ continue;
+
+ IfFailGo(pInternalImport->GetParentToken(td, &tdClass));
+
+ TypeHandle type;
+
+ EX_TRY
+ {
+ type = ClassLoader::LoadTypeDefOrRefThrowing(pModule, tdClass,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef);
+ }
+ EX_CATCH
+ {
+ LPCUTF8 pszClassName;
+ LPCUTF8 pszNameSpace;
+ if (FAILED(pInternalImport->GetNameOfTypeDef(tdClass, &pszClassName, &pszNameSpace)))
+ {
+ pszClassName = pszNameSpace = "Invalid TypeDef record";
+ }
+ printf("CheckExtended: Unable to load class from mscorlib: %s.%s\n", pszNameSpace, pszClassName);
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ MethodDesc *pMD = MemberLoader::FindMethod(type.AsMethodTable(), td);
+ _ASSERTE(pMD);
+
+ // Required to support generic FCalls (only instance methods on generic types constrained to "class" are allowed)
+ if (type.IsGenericTypeDefinition()) {
+ pMD = pMD->FindOrCreateTypicalSharedInstantiation();
+ }
+
+ DWORD id = 0;
+
+ if (pMD->IsFCall())
+ {
+ id = ((FCallMethodDesc *)pMD)->GetECallID();
+ if (id == 0) {
+ id = ECall::GetIDForMethod(pMD);
+ }
+ }
+ else
+ if (pMD->IsNDirect())
+ {
+ PInvokeStaticSigInfo sigInfo;
+ NDirect::PopulateNDirectMethodDesc((NDirectMethodDesc *)pMD, &sigInfo);
+
+ if (pMD->IsQCall())
+ {
+ id = ((NDirectMethodDesc *)pMD)->GetECallID();
+ if (id == 0) {
+ id = ECall::GetIDForMethod(pMD);
+ }
+ }
+ else
+ {
+ continue;
+ }
+ }
+ else
+ {
+ continue;
+ }
+
+ // ... check that the method is in the fcall table.
+ if (id == 0) {
+ LPCUTF8 pszClassName;
+ LPCUTF8 pszNameSpace;
+ if (FAILED(pInternalImport->GetNameOfTypeDef(tdClass, &pszClassName, &pszNameSpace)))
+ {
+ pszClassName = pszNameSpace = "Invalid TypeDef record";
+ }
+ LPCUTF8 pszName;
+ if (FAILED(pInternalImport->GetNameOfMethodDef(td, &pszName)))
+ {
+ pszName = "Invalid method name";
+ }
+ printf("CheckExtended: Unable to find internalcall implementation: %s.%s::%s\n", pszNameSpace, pszClassName, pszName);
+ }
+
+ if (id != 0)
+ {
+ usedECallIds.Add(id);
+ }
+
+ if (pMD->IsFCall())
+ {
+ FCallCheckSignature(pMD, ECall::GetFCallImpl(pMD));
+ }
+ }
+
+ pInternalImport->EnumClose(&hEnum);
+
+ // Verify that there are no unused entries in the ecall table
+ ECall::CheckUnusedECalls(usedECallIds);
+
+ //
+ // Stub constants
+ //
+#define ASMCONSTANTS_C_ASSERT(cond)
+#define ASMCONSTANTS_RUNTIME_ASSERT(cond) _ASSERTE(cond)
+#include "asmconstants.h"
+
+ _ASSERTE(sizeof(VARIANT) == MscorlibBinder::GetClass(CLASS__NATIVEVARIANT)->GetNativeSize());
+
+ printf("CheckExtended: completed without exception.\n");
+
+ErrExit:
+ _ASSERTE(SUCCEEDED(hr));
+}
+
+#endif // _DEBUG && !CROSSGEN_COMPILE
+
+extern const MscorlibClassDescription c_rgMscorlibClassDescriptions[];
+extern const USHORT c_nMscorlibClassDescriptions;
+
+extern const MscorlibMethodDescription c_rgMscorlibMethodDescriptions[];
+extern const USHORT c_nMscorlibMethodDescriptions;
+
+extern const MscorlibFieldDescription c_rgMscorlibFieldDescriptions[];
+extern const USHORT c_nMscorlibFieldDescriptions;
+
+#ifdef CROSSGEN_COMPILE
+namespace CrossGenMscorlib
+{
+ extern const MscorlibClassDescription c_rgMscorlibClassDescriptions[];
+ extern const USHORT c_nMscorlibClassDescriptions;
+
+ extern const MscorlibMethodDescription c_rgMscorlibMethodDescriptions[];
+ extern const USHORT c_nMscorlibMethodDescriptions;
+
+ extern const MscorlibFieldDescription c_rgMscorlibFieldDescriptions[];
+ extern const USHORT c_nMscorlibFieldDescriptions;
+};
+#endif
+
+void MscorlibBinder::AttachModule(Module * pModule)
+{
+ STANDARD_VM_CONTRACT;
+
+ MscorlibBinder * pGlobalBinder = &g_Mscorlib;
+
+ pGlobalBinder->SetDescriptions(pModule,
+ c_rgMscorlibClassDescriptions, c_nMscorlibClassDescriptions,
+ c_rgMscorlibMethodDescriptions, c_nMscorlibMethodDescriptions,
+ c_rgMscorlibFieldDescriptions, c_nMscorlibFieldDescriptions);
+
+#if defined(FEATURE_PREJIT) && !defined(CROSSGEN_COMPILE)
+ MscorlibBinder * pPersistedBinder = pModule->m_pBinder;
+
+ if (pPersistedBinder != NULL
+ // Do not use persisted binder for profiling native images. See comment in code:MscorlibBinder::Fixup.
+ && !(pModule->GetNativeImage()->GetNativeVersionInfo()->wConfigFlags & CORCOMPILE_CONFIG_PROFILING))
+ {
+ pGlobalBinder->m_pClasses = pPersistedBinder->m_pClasses;
+ pGlobalBinder->m_pMethods = pPersistedBinder->m_pMethods;
+ pGlobalBinder->m_pFields = pPersistedBinder->m_pFields;
+
+ pModule->m_pBinder = pGlobalBinder;
+ return;
+ }
+#endif // FEATURE_PREJIT && CROSSGEN_COMPILE
+
+ pGlobalBinder->AllocateTables();
+
+#ifdef CROSSGEN_COMPILE
+ MscorlibBinder * pTargetBinder = (MscorlibBinder *)(void *)
+ pModule->GetAssembly()->GetLowFrequencyHeap()
+ ->AllocMem(S_SIZE_T(sizeof(MscorlibBinder)));
+
+ pTargetBinder->SetDescriptions(pModule,
+ CrossGenMscorlib::c_rgMscorlibClassDescriptions, CrossGenMscorlib::c_nMscorlibClassDescriptions,
+ CrossGenMscorlib::c_rgMscorlibMethodDescriptions, CrossGenMscorlib::c_nMscorlibMethodDescriptions,
+ CrossGenMscorlib::c_rgMscorlibFieldDescriptions, CrossGenMscorlib::c_nMscorlibFieldDescriptions);
+
+ pTargetBinder->AllocateTables();
+
+ pModule->m_pBinder = pTargetBinder;
+#else
+ pModule->m_pBinder = pGlobalBinder;
+#endif
+}
+
+void MscorlibBinder::SetDescriptions(Module * pModule,
+ const MscorlibClassDescription * pClassDescriptions, USHORT nClasses,
+ const MscorlibMethodDescription * pMethodDescriptions, USHORT nMethods,
+ const MscorlibFieldDescription * pFieldDescriptions, USHORT nFields)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_pModule = pModule;
+
+ m_classDescriptions = pClassDescriptions;
+ m_cClasses = nClasses;
+
+ m_methodDescriptions = pMethodDescriptions;
+ m_cMethods = nMethods;
+
+ m_fieldDescriptions = pFieldDescriptions;
+ m_cFields = nFields;
+}
+
+void MscorlibBinder::AllocateTables()
+{
+ STANDARD_VM_CONTRACT;
+
+ LoaderHeap * pHeap = m_pModule->GetAssembly()->GetLowFrequencyHeap();
+
+ m_pClasses = (MethodTable **)(void *)
+ pHeap->AllocMem(S_SIZE_T(m_cClasses) * S_SIZE_T(sizeof(*m_pClasses)));
+ // Note: Memory allocated on loader heap is zero filled
+ // ZeroMemory(m_pClasses, m_cClasses * sizeof(*m_pClasses));
+
+ m_pMethods = (MethodDesc **)(void *)
+ pHeap->AllocMem(S_SIZE_T(m_cMethods) * S_SIZE_T(sizeof(*m_pMethods)));
+ // Note: Memory allocated on loader heap is zero filled
+ // ZeroMemory(m_pMethods, m_cMethodMDs * sizeof(*m_pMethods));
+
+ m_pFields = (FieldDesc **)(void *)
+ pHeap->AllocMem(S_SIZE_T(m_cFields) * S_SIZE_T(sizeof(*m_pFields)));
+ // Note: Memory allocated on loader heap is zero filled
+ // ZeroMemory(m_pFields, m_cFieldRIDs * sizeof(*m_pFields));
+}
+
+PTR_MethodTable MscorlibBinder::LoadPrimitiveType(CorElementType et)
+{
+ STANDARD_VM_CONTRACT;
+
+ PTR_MethodTable pMT = g_Mscorlib.m_pClasses[et];
+
+ // Primitive types hit cyclic reference on binder during type loading so we have to load them in two steps
+ if (pMT == NULL)
+ {
+ const MscorlibClassDescription *d = (&g_Mscorlib)->m_classDescriptions + (int)et;
+
+ pMT = ClassLoader::LoadTypeByNameThrowing(GetModule()->GetAssembly(), d->nameSpace, d->name,
+ ClassLoader::ThrowIfNotFound, ClassLoader::LoadTypes, CLASS_LOAD_APPROXPARENTS).AsMethodTable();
+ g_Mscorlib.m_pClasses[et] = pMT;
+
+ ClassLoader::EnsureLoaded(pMT);
+ }
+
+ return pMT;
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+void MscorlibBinder::BindAll()
+{
+ STANDARD_VM_CONTRACT;
+
+ for (BinderClassID cID = (BinderClassID) 1; cID < m_cClasses; cID = (BinderClassID) (cID + 1))
+ {
+ if (m_classDescriptions[cID].name != NULL) // Allow for CorSigElement entries with no classes
+ GetClassLocal(cID);
+ }
+
+ for (BinderMethodID mID = (BinderMethodID) 1; mID < m_cMethods; mID = (BinderMethodID) (mID + 1))
+ GetMethodLocal(mID);
+
+ for (BinderFieldID fID = (BinderFieldID) 1; fID < m_cFields; fID = (BinderFieldID) (fID + 1))
+ GetFieldLocal(fID);
+}
+
+void MscorlibBinder::Save(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ image->StoreStructure(this, sizeof(MscorlibBinder),
+ DataImage::ITEM_BINDER);
+
+ image->StoreStructure(m_pClasses, m_cClasses * sizeof(*m_pClasses),
+ DataImage::ITEM_BINDER_ITEMS);
+
+ image->StoreStructure(m_pMethods, m_cMethods * sizeof(*m_pMethods),
+ DataImage::ITEM_BINDER_ITEMS);
+
+ image->StoreStructure(m_pFields, m_cFields * sizeof(*m_pFields),
+ DataImage::ITEM_BINDER_ITEMS);
+}
+
+void MscorlibBinder::Fixup(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ image->FixupPointerField(this, offsetof(MscorlibBinder, m_pModule));
+
+ int i;
+
+ image->FixupPointerField(this, offsetof(MscorlibBinder, m_pClasses));
+ for (i = 1; i < m_cClasses; i++)
+ {
+#if _DEBUG
+ //
+ // We do not want to check for restore at runtime for performance reasons.
+ // If there is ever a case that requires restore, it should be special
+ // cased here and restored explicitly by GetClass/GetField/GetMethod caller.
+ //
+ // Profiling NGen images force restore for all types. We are still going to save
+ // the binder for nidump, but we are not going to use it at runtime.
+ //
+ if (m_pClasses[i] != NULL && !GetAppDomain()->ToCompilationDomain()->m_fForceProfiling)
+ {
+ _ASSERTE(!m_pClasses[i]->NeedsRestore(image));
+ }
+#endif
+ image->FixupPointerField(m_pClasses, i * sizeof(m_pClasses[0]));
+ }
+
+ image->FixupPointerField(this, offsetof(MscorlibBinder, m_pMethods));
+ for (i = 1; i < m_cMethods; i++)
+ {
+#if _DEBUG
+ // See comment above.
+ if (m_pMethods[i] != NULL && !GetAppDomain()->ToCompilationDomain()->m_fForceProfiling)
+ {
+ _ASSERTE(!m_pMethods[i]->NeedsRestore(image));
+ }
+#endif
+
+ image->FixupPointerField(m_pMethods, i * sizeof(m_pMethods[0]));
+ }
+
+ image->FixupPointerField(this, offsetof(MscorlibBinder, m_pFields));
+ for (i = 1; i < m_cFields; i++)
+ {
+ image->FixupPointerField(m_pFields, i * sizeof(m_pFields[0]));
+ }
+
+ image->ZeroPointerField(this, offsetof(MscorlibBinder, m_classDescriptions));
+ image->ZeroPointerField(this, offsetof(MscorlibBinder, m_methodDescriptions));
+ image->ZeroPointerField(this, offsetof(MscorlibBinder, m_fieldDescriptions));
+}
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+#endif // #ifndef DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+
+void
+MscorlibBinder::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ DAC_ENUM_DTHIS();
+
+ DacEnumMemoryRegion(dac_cast<TADDR>(m_classDescriptions),
+ m_cClasses * sizeof(MscorlibClassDescription));
+ DacEnumMemoryRegion(dac_cast<TADDR>(m_methodDescriptions),
+ (m_cMethods - 1) * sizeof(MscorlibMethodDescription));
+ DacEnumMemoryRegion(dac_cast<TADDR>(m_fieldDescriptions),
+ (m_cFields - 1) * sizeof(MscorlibFieldDescription));
+
+ if (m_pModule.IsValid())
+ {
+ m_pModule->EnumMemoryRegions(flags, true);
+ }
+
+ DacEnumMemoryRegion(dac_cast<TADDR>(m_pClasses),
+ m_cClasses * sizeof(PTR_MethodTable));
+ DacEnumMemoryRegion(dac_cast<TADDR>(m_pMethods),
+ m_cMethods * sizeof(PTR_MethodDesc));
+ DacEnumMemoryRegion(dac_cast<TADDR>(m_pFields),
+ m_cFields * sizeof(PTR_FieldDesc));
+}
+
+#endif // #ifdef DACCESS_COMPILE
+
+GVAL_IMPL(MscorlibBinder, g_Mscorlib);
diff --git a/src/vm/binder.h b/src/vm/binder.h
new file mode 100644
index 0000000000..208a9a99f9
--- /dev/null
+++ b/src/vm/binder.h
@@ -0,0 +1,501 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#if !defined(_BINDERMODULE_H_) && !defined(CLR_STANDALONE_BINDER)
+#define _BINDERMODULE_H_
+
+class DataImage;
+class Module;
+class MethodTable;
+class MethodDesc;
+class FieldDesc;
+
+typedef const struct HardCodedMetaSig *LPHARDCODEDMETASIG;
+
+// As hard-coded metasigs are constant data ordinarily it
+// wouldn't be necessary to use PTR access. However, access
+// through the Binder class requires it.
+typedef DPTR(const struct HardCodedMetaSig) PTR_HARDCODEDMETASIG;
+
+struct HardCodedMetaSig
+{
+ const BYTE* m_pMetaSig; // metasig prefixed with INT8 length:
+ // length > 0 - resolved, lenght < 0 - has unresolved type references
+};
+
+#define DEFINE_METASIG(body) extern const body
+#define DEFINE_METASIG_T(body) extern body
+#define METASIG_BODY(varname, types) HardCodedMetaSig gsig_ ## varname;
+#include "metasig.h"
+
+//
+// Use the Binder objects to avoid doing unnecessary name lookup
+// (esp. in the prejit case)
+//
+// E.g. MscorlibBinder::GetClass(CLASS__APP_DOMAIN);
+//
+
+// BinderClassIDs are of the form CLASS__XXX
+
+enum BinderClassID
+{
+#define TYPEINFO(e,ns,c,s,g,ia,ip,if,im,gv) CLASS__ ## e,
+#include "cortypeinfo.h"
+#undef TYPEINFO
+
+#define DEFINE_CLASS(i,n,s) CLASS__ ## i,
+#include "mscorlib.h"
+
+ CLASS__MSCORLIB_COUNT,
+
+ // Aliases for element type classids
+ CLASS__NIL = CLASS__ELEMENT_TYPE_END,
+ CLASS__VOID = CLASS__ELEMENT_TYPE_VOID,
+ CLASS__BOOLEAN = CLASS__ELEMENT_TYPE_BOOLEAN,
+ CLASS__CHAR = CLASS__ELEMENT_TYPE_CHAR,
+ CLASS__BYTE = CLASS__ELEMENT_TYPE_U1,
+ CLASS__SBYTE = CLASS__ELEMENT_TYPE_I1,
+ CLASS__INT16 = CLASS__ELEMENT_TYPE_I2,
+ CLASS__UINT16 = CLASS__ELEMENT_TYPE_U2,
+ CLASS__INT32 = CLASS__ELEMENT_TYPE_I4,
+ CLASS__UINT32 = CLASS__ELEMENT_TYPE_U4,
+ CLASS__INT64 = CLASS__ELEMENT_TYPE_I8,
+ CLASS__UINT64 = CLASS__ELEMENT_TYPE_U8,
+ CLASS__SINGLE = CLASS__ELEMENT_TYPE_R4,
+ CLASS__DOUBLE = CLASS__ELEMENT_TYPE_R8,
+ CLASS__STRING = CLASS__ELEMENT_TYPE_STRING,
+ CLASS__TYPED_REFERENCE = CLASS__ELEMENT_TYPE_TYPEDBYREF,
+ CLASS__INTPTR = CLASS__ELEMENT_TYPE_I,
+ CLASS__UINTPTR = CLASS__ELEMENT_TYPE_U,
+ CLASS__OBJECT = CLASS__ELEMENT_TYPE_OBJECT
+};
+
+
+// BinderMethodIDs are of the form METHOD__XXX__YYY,
+// where X is the class and Y is the method
+
+enum BinderMethodID : int
+{
+ METHOD__NIL = 0,
+
+#define DEFINE_METHOD(c,i,s,g) METHOD__ ## c ## __ ## i,
+#include "mscorlib.h"
+
+ METHOD__MSCORLIB_COUNT,
+};
+
+// BinderFieldIDs are of the form FIELD__XXX__YYY,
+// where X is the class and Y is the field
+
+enum BinderFieldID
+{
+ FIELD__NIL = 0,
+
+#define DEFINE_FIELD(c,i,s) FIELD__ ## c ## __ ## i,
+#include "mscorlib.h"
+
+ FIELD__MSCORLIB_COUNT,
+};
+
+struct MscorlibClassDescription
+{
+ PTR_CSTR nameSpace;
+ PTR_CSTR name;
+};
+
+struct MscorlibMethodDescription
+{
+ BinderClassID classID;
+ PTR_CSTR name;
+ PTR_HARDCODEDMETASIG sig;
+};
+
+struct MscorlibFieldDescription
+{
+ BinderClassID classID;
+ PTR_CSTR name;
+};
+
+class MscorlibBinder
+{
+ public:
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+
+ //
+ // Note that the frequently called methods are intentionally static to reduce code bloat.
+ // Instance methods would push the address of the global object at every callsite.
+ //
+
+ static PTR_Module GetModule();
+
+ //
+ // Retrieve structures from ID.
+ //
+ // Note that none of the MscorlibBinder methods trigger static
+ // constructors. The JITed code takes care of triggering them.
+ //
+ static PTR_MethodTable GetClass(BinderClassID id);
+ static MethodDesc * GetMethod(BinderMethodID id);
+ static FieldDesc * GetField(BinderFieldID id);
+
+ //
+ // A slighly faster version that assumes that the class was fetched
+ // by the binder earlier.
+ //
+ static PTR_MethodTable GetExistingClass(BinderClassID id);
+ static MethodDesc * GetExistingMethod(BinderMethodID id);
+ static FieldDesc * GetExistingField(BinderFieldID id);
+
+ //
+ // Utilities for classes
+ //
+ static FORCEINLINE BOOL IsClass(MethodTable *pMT, BinderClassID id)
+ {
+ return dac_cast<TADDR>(GetClass(id)) == dac_cast<TADDR>(pMT);
+ }
+
+ // Get the class only if it has been loaded already
+ static PTR_MethodTable GetClassIfExist(BinderClassID id);
+
+ static LPCUTF8 GetClassNameSpace(BinderClassID id);
+ static LPCUTF8 GetClassName(BinderClassID id);
+
+ //
+ // Utilities for methods
+ //
+ static LPCUTF8 GetMethodName(BinderMethodID id);
+ static LPHARDCODEDMETASIG GetMethodSig(BinderMethodID id);
+
+ static Signature GetMethodSignature(BinderMethodID id)
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetSignature(GetMethodSig(id));
+ }
+
+ //
+ // Utilities for fields
+ //
+ static LPCUTF8 GetFieldName(BinderFieldID id);
+
+ static DWORD GetFieldOffset(BinderFieldID id);
+
+ //
+ // Utilities for exceptions
+ //
+
+ static MethodTable *GetException(RuntimeExceptionKind kind)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(kind <= kLastExceptionInMscorlib); // Not supported for exceptions defined outside mscorlib.
+ BinderClassID id = (BinderClassID) (kind + CLASS__MSCORLIB_COUNT);
+ return GetClass(id);
+ }
+
+ static BOOL IsException(MethodTable *pMT, RuntimeExceptionKind kind)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(kind <= kLastExceptionInMscorlib); // Not supported for exceptions defined outside mscorlib.
+ BinderClassID id = (BinderClassID) (kind + CLASS__MSCORLIB_COUNT);
+ return dac_cast<TADDR>(GetClassIfExist(id)) == dac_cast<TADDR>(pMT);
+ }
+
+ static LPCUTF8 GetExceptionName(RuntimeExceptionKind kind)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(kind <= kLastExceptionInMscorlib); // Not supported for exceptions defined outside mscorlib.
+ BinderClassID id = (BinderClassID) (kind + CLASS__MSCORLIB_COUNT);
+ return GetClassName(id);
+ }
+
+ //
+ // Utilities for signature element types
+ //
+
+ static PTR_MethodTable GetElementType(CorElementType type)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetExistingClass((BinderClassID) (type));
+ }
+
+ // This should be called during CLR initialization only
+ static PTR_MethodTable LoadPrimitiveType(CorElementType et);
+
+ // Get the metasig, do a one-time conversion if necessary
+ static Signature GetSignature(LPHARDCODEDMETASIG pHardcodedSig);
+ static Signature GetTargetSignature(LPHARDCODEDMETASIG pHardcodedSig);
+
+ //
+ // Static initialization
+ //
+ static void Startup();
+
+ //
+ // These are called by initialization code:
+ //
+ static void AttachModule(Module *pModule);
+
+#ifdef FEATURE_PREJIT
+ //
+ // Store the binding arrays to a prejit image
+ // so we don't have to do name lookup at runtime
+ //
+ void BindAll();
+ void Save(DataImage *image);
+ void Fixup(DataImage *image);
+#endif
+
+#ifdef _DEBUG
+ void Check();
+ void CheckExtended();
+#endif
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+private:
+
+ // We have two different instances of the binder in crossgen. The instance local methods
+ // are used when it is necessary to differentiate between them.
+ PTR_MethodTable LookupClassLocal(BinderClassID id);
+ MethodDesc * LookupMethodLocal(BinderMethodID id);
+ FieldDesc * LookupFieldLocal(BinderFieldID id);
+
+ PTR_MethodTable GetClassLocal(BinderClassID id);
+ MethodDesc * GetMethodLocal(BinderMethodID id);
+ FieldDesc * GetFieldLocal(BinderFieldID id);
+
+ static PTR_MethodTable LookupClass(BinderClassID id);
+ static MethodDesc * LookupMethod(BinderMethodID id);
+ static FieldDesc * LookupField(BinderFieldID id);
+
+ static PTR_MethodTable LookupClassIfExist(BinderClassID id);
+
+ Signature GetSignatureLocal(LPHARDCODEDMETASIG pHardcodedSig);
+
+ void BuildConvertedSignature(const BYTE* pSig, SigBuilder * pSigBuilder);
+ const BYTE* ConvertSignature(LPHARDCODEDMETASIG pHardcodedSig, const BYTE* pSig);
+
+ void SetDescriptions(Module * pModule,
+ const MscorlibClassDescription * pClassDescriptions, USHORT nClasses,
+ const MscorlibMethodDescription * pMethodDescriptions, USHORT nMethods,
+ const MscorlibFieldDescription * pFieldDescriptions, USHORT nFields);
+
+ void AllocateTables();
+
+#ifdef _DEBUG
+ static void TriggerGCUnderStress();
+#endif
+
+ PTR_Module m_pModule;
+
+ DPTR(PTR_MethodTable) m_pClasses;
+ DPTR(PTR_MethodDesc) m_pMethods;
+ DPTR(PTR_FieldDesc) m_pFields;
+
+ // This is necessary to avoid embeding copy of the descriptions into mscordacwks
+ DPTR(const MscorlibClassDescription) m_classDescriptions;
+ DPTR(const MscorlibMethodDescription) m_methodDescriptions;
+ DPTR(const MscorlibFieldDescription) m_fieldDescriptions;
+
+ USHORT m_cClasses;
+ USHORT m_cMethods;
+ USHORT m_cFields;
+
+ static CrstStatic s_SigConvertCrst;
+
+#ifdef _DEBUG
+
+ struct OffsetAndSizeCheck
+ {
+ PTR_CSTR classNameSpace;
+ PTR_CSTR className;
+ SIZE_T expectedClassSize;
+
+ PTR_CSTR fieldName;
+ SIZE_T expectedFieldOffset;
+ SIZE_T expectedFieldSize;
+ };
+
+ static const OffsetAndSizeCheck OffsetsAndSizes[];
+
+#endif
+};
+
+//
+// Global bound modules:
+//
+
+GVAL_DECL(MscorlibBinder, g_Mscorlib);
+
+FORCEINLINE PTR_MethodTable MscorlibBinder::GetClass(BinderClassID id)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(ThrowOutOfMemory());
+
+ PRECONDITION(id != CLASS__NIL);
+ PRECONDITION((&g_Mscorlib)->m_cClasses > 0); // Make sure mscorlib has been loaded.
+ PRECONDITION(id <= (&g_Mscorlib)->m_cClasses);
+ }
+ CONTRACTL_END;
+
+ // Force a GC here under stress because type loading could trigger GC nondeterminsticly
+ INDEBUG(TriggerGCUnderStress());
+
+ PTR_MethodTable pMT = VolatileLoad(&((&g_Mscorlib)->m_pClasses[id]));
+ if (pMT == NULL)
+ return LookupClass(id);
+ return pMT;
+}
+
+FORCEINLINE MethodDesc * MscorlibBinder::GetMethod(BinderMethodID id)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(ThrowOutOfMemory());
+
+ PRECONDITION(id != METHOD__NIL);
+ PRECONDITION(id <= (&g_Mscorlib)->m_cMethods);
+ }
+ CONTRACTL_END;
+
+ // Force a GC here under stress because type loading could trigger GC nondeterminsticly
+ INDEBUG(TriggerGCUnderStress());
+
+ MethodDesc * pMD = VolatileLoad(&((&g_Mscorlib)->m_pMethods[id]));
+ if (pMD == NULL)
+ return LookupMethod(id);
+ return pMD;
+}
+
+FORCEINLINE FieldDesc * MscorlibBinder::GetField(BinderFieldID id)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(ThrowOutOfMemory());
+
+ PRECONDITION(id != FIELD__NIL);
+ PRECONDITION(id <= (&g_Mscorlib)->m_cFields);
+ }
+ CONTRACTL_END;
+
+ // Force a GC here under stress because type loading could trigger GC nondeterminsticly
+ INDEBUG(TriggerGCUnderStress());
+
+ FieldDesc * pFD = VolatileLoad(&((&g_Mscorlib)->m_pFields[id]));
+ if (pFD == NULL)
+ return LookupField(id);
+ return pFD;
+}
+
+FORCEINLINE PTR_MethodTable MscorlibBinder::GetExistingClass(BinderClassID id)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ PTR_MethodTable pMT = (&g_Mscorlib)->m_pClasses[id];
+ _ASSERTE(pMT != NULL);
+ return pMT;
+}
+
+FORCEINLINE MethodDesc * MscorlibBinder::GetExistingMethod(BinderMethodID id)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ MethodDesc * pMD = (&g_Mscorlib)->m_pMethods[id];
+ _ASSERTE(pMD != NULL);
+ return pMD;
+}
+
+FORCEINLINE FieldDesc * MscorlibBinder::GetExistingField(BinderFieldID id)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ FieldDesc * pFD = (&g_Mscorlib)->m_pFields[id];
+ _ASSERTE(pFD != NULL);
+ return pFD;
+}
+
+FORCEINLINE PTR_MethodTable MscorlibBinder::GetClassIfExist(BinderClassID id)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ FORBID_FAULT;
+ MODE_ANY;
+
+ PRECONDITION(id != CLASS__NIL);
+ PRECONDITION(id <= (&g_Mscorlib)->m_cClasses);
+ }
+ CONTRACTL_END;
+
+ PTR_MethodTable pMT = VolatileLoad(&((&g_Mscorlib)->m_pClasses[id]));
+ if (pMT == NULL)
+ return LookupClassIfExist(id);
+ return pMT;
+}
+
+
+FORCEINLINE PTR_Module MscorlibBinder::GetModule()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ PTR_Module pModule = (&g_Mscorlib)->m_pModule;
+ _ASSERTE(pModule != NULL);
+ return pModule;
+}
+
+FORCEINLINE LPCUTF8 MscorlibBinder::GetClassNameSpace(BinderClassID id)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(id != CLASS__NIL);
+ _ASSERTE(id <= (&g_Mscorlib)->m_cClasses);
+ return (&g_Mscorlib)->m_classDescriptions[id].nameSpace;
+}
+
+FORCEINLINE LPCUTF8 MscorlibBinder::GetClassName(BinderClassID id)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(id != CLASS__NIL);
+ _ASSERTE(id <= (&g_Mscorlib)->m_cClasses);
+ return (&g_Mscorlib)->m_classDescriptions[id].name;
+}
+
+FORCEINLINE LPCUTF8 MscorlibBinder::GetMethodName(BinderMethodID id)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(id != METHOD__NIL);
+ _ASSERTE(id <= (&g_Mscorlib)->m_cMethods);
+ return (&g_Mscorlib)->m_methodDescriptions[id-1].name;
+}
+
+FORCEINLINE LPHARDCODEDMETASIG MscorlibBinder::GetMethodSig(BinderMethodID id)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(id != METHOD__NIL);
+ _ASSERTE(id <= (&g_Mscorlib)->m_cMethods);
+ return (&g_Mscorlib)->m_methodDescriptions[id-1].sig;
+}
+
+FORCEINLINE LPCUTF8 MscorlibBinder::GetFieldName(BinderFieldID id)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(id != FIELD__NIL);
+ _ASSERTE(id <= (&g_Mscorlib)->m_cFields);
+ return (&g_Mscorlib)->m_fieldDescriptions[id-1].name;
+}
+
+#endif // _BINDERMODULE_H_
diff --git a/src/vm/cachelinealloc.cpp b/src/vm/cachelinealloc.cpp
new file mode 100644
index 0000000000..0b6d2b4bcb
--- /dev/null
+++ b/src/vm/cachelinealloc.cpp
@@ -0,0 +1,296 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//---------------------------------------------------------------------------
+// CCacheLineAllocator
+//
+
+//
+// This file dImplements the CCacheLineAllocator class.
+//
+// @comm
+//
+// Notes:
+// The CacheLineAllocator maintains a pool of free CacheLines
+//
+// The CacheLine Allocator provides static member functions
+// GetCacheLine and FreeCacheLine,
+//---------------------------------------------------------------------------
+
+
+
+#include "common.h"
+#include <stddef.h>
+#include "cachelinealloc.h"
+
+#include "threads.h"
+#include "excep.h"
+
+///////////////////////////////////////////////////////
+// CCacheLineAllocator::CCacheLineAllocator()
+//
+//////////////////////////////////////////////////////
+
+CCacheLineAllocator::CCacheLineAllocator()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_freeList32.Init();
+ m_freeList64.Init();
+ m_registryList.Init();
+}
+
+///////////////////////////////////////////////////////
+// void CCacheLineAllocator::~CCacheLineAllocator()
+//
+//////////////////////////////////////////////////////
+
+CCacheLineAllocator::~CCacheLineAllocator()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LPCacheLine tempPtr = NULL;
+ while((tempPtr = m_registryList.RemoveHead()) != NULL)
+ {
+ for (int i =0; i < CacheLine::numEntries; i++)
+ {
+ if(tempPtr->m_pAddr[i] != NULL)
+ {
+ if (!g_fProcessDetach)
+ VFree(tempPtr->m_pAddr[i]);
+ }
+ }
+ delete tempPtr;
+ }
+}
+
+
+
+///////////////////////////////////////////////////////
+// static void *CCacheLineAllocator::VAlloc(ULONG cbSize)
+//
+//////////////////////////////////////////////////////
+
+
+void *CCacheLineAllocator::VAlloc(ULONG cbSize)
+{
+ CONTRACT(void*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(CONTRACT_RETURN NULL);
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ // helper to call virtual free to release memory
+
+ int i =0;
+ void* pv = ClrVirtualAlloc (NULL, cbSize, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ if (pv != NULL)
+ {
+ LPCacheLine tempPtr = m_registryList.GetHead();
+ if (tempPtr == NULL)
+ {
+ goto LNew;
+ }
+
+ for (i =0; i < CacheLine::numEntries; i++)
+ {
+ if(tempPtr->m_pAddr[i] == NULL)
+ {
+ tempPtr->m_pAddr[i] = pv;
+ RETURN pv;
+ }
+ }
+
+LNew:
+ // initialize the bucket before returning
+ tempPtr = new (nothrow) CacheLine();
+ if (tempPtr != NULL)
+ {
+ tempPtr->Init64();
+ tempPtr->m_pAddr[0] = pv;
+ m_registryList.InsertHead(tempPtr);
+ }
+ else
+ {
+ // couldn't find space to register this page
+ ClrVirtualFree(pv, 0, MEM_RELEASE);
+ RETURN NULL;
+ }
+ }
+ RETURN pv;
+}
+
+///////////////////////////////////////////////////////
+// void CCacheLineAllocator::VFree(void* pv)
+//
+//////////////////////////////////////////////////////
+
+
+void CCacheLineAllocator::VFree(void* pv)
+{
+ BOOL bRes = FALSE;
+
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pv));
+ POSTCONDITION(bRes);
+ }
+ CONTRACT_END;
+
+ // helper to call virtual free to release memory
+
+ bRes = ClrVirtualFree (pv, 0, MEM_RELEASE);
+
+ RETURN_VOID;
+}
+
+///////////////////////////////////////////////////////
+// void *CCacheLineAllocator::GetCacheLine()
+//
+//////////////////////////////////////////////////////
+
+//WARNING: must have a lock when calling this function
+void *CCacheLineAllocator::GetCacheLine64()
+{
+ CONTRACT(void*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(CONTRACT_RETURN NULL);
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ LPCacheLine tempPtr = m_freeList64.RemoveHead();
+ if (tempPtr != NULL)
+ {
+ // initialize the bucket before returning
+ tempPtr->Init64();
+ RETURN tempPtr;
+ }
+
+#define AllocSize 4096*16
+
+ ////////////////////////////////'
+ /// Virtual Allocation for some more cache lines
+
+ BYTE* ptr = (BYTE*)VAlloc(AllocSize);
+
+ if(!ptr)
+ RETURN NULL;
+
+
+ tempPtr = (LPCacheLine)ptr;
+ // Link all the buckets
+ tempPtr = tempPtr+1;
+ LPCacheLine maxPtr = (LPCacheLine)(ptr + AllocSize);
+
+ while(tempPtr < maxPtr)
+ {
+ m_freeList64.InsertHead(tempPtr);
+ tempPtr++;
+ }
+
+ // return the first block
+ tempPtr = (LPCacheLine)ptr;
+ tempPtr->Init64();
+ RETURN tempPtr;
+}
+
+
+///////////////////////////////////////////////////////
+// void *CCacheLineAllocator::GetCacheLine32()
+//
+//////////////////////////////////////////////////////
+
+//WARNING: must have a lock when calling this function
+void *CCacheLineAllocator::GetCacheLine32()
+{
+ CONTRACT(void*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(CONTRACT_RETURN NULL);
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ LPCacheLine tempPtr = m_freeList32.RemoveHead();
+ if (tempPtr != NULL)
+ {
+ // initialize the bucket before returning
+ tempPtr->Init32();
+ RETURN tempPtr;
+ }
+ tempPtr = (LPCacheLine)GetCacheLine64();
+ if (tempPtr != NULL)
+ {
+ m_freeList32.InsertHead(tempPtr);
+ tempPtr = (LPCacheLine)((BYTE *)tempPtr+32);
+ }
+ RETURN tempPtr;
+}
+///////////////////////////////////////////////////////
+// void CCacheLineAllocator::FreeCacheLine64(void * tempPtr)
+//
+//////////////////////////////////////////////////////
+//WARNING: must have a lock when calling this function
+void CCacheLineAllocator::FreeCacheLine64(void * tempPtr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(tempPtr));
+ }
+ CONTRACTL_END;
+
+ LPCacheLine pCLine = (LPCacheLine )tempPtr;
+ m_freeList64.InsertHead(pCLine);
+}
+
+
+///////////////////////////////////////////////////////
+// void CCacheLineAllocator::FreeCacheLine32(void * tempPtr)
+//
+//////////////////////////////////////////////////////
+//WARNING: must have a lock when calling this function
+void CCacheLineAllocator::FreeCacheLine32(void * tempPtr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(tempPtr));
+ }
+ CONTRACTL_END;
+
+ LPCacheLine pCLine = (LPCacheLine )tempPtr;
+ m_freeList32.InsertHead(pCLine);
+}
diff --git a/src/vm/cachelinealloc.h b/src/vm/cachelinealloc.h
new file mode 100644
index 0000000000..d2184f7174
--- /dev/null
+++ b/src/vm/cachelinealloc.h
@@ -0,0 +1,147 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//---------------------------------------------------------------------------
+// CCacheLineAllocator
+//
+
+//
+// @doc
+// @module cachelineAlloc.h
+//
+// This file defines the CacheLine Allocator class.
+//
+// @comm
+//
+//
+// <nl> Definitions.:
+// <nl> Class Name Header file
+// <nl> --------------------------- ---------------
+// <nl> <c CCacheLineAllocator> BAlloc.h
+//
+// <nl><nl>
+// Notes:
+// The CacheLineAllocator maintains a pool of free CacheLines
+//
+// The CacheLine Allocator provides static member functions
+// GetCacheLine and FreeCacheLine,
+//
+// <nl><nl>
+//
+//---------------------------------------------------------------------------
+
+#ifndef _H_CACHELINE_ALLOCATOR_
+#define _H_CACHELINE_ALLOCATOR_
+
+#include "slist.h"
+
+#include <pshpack1.h>
+
+class CacheLine
+{
+public:
+ enum
+ {
+ numEntries = 15,
+ numValidBytes = numEntries * sizeof(void *)
+ };
+
+ // store next pointer and the entries
+ SLink m_Link;
+ union
+ {
+ void* m_pAddr[numEntries];
+ BYTE m_xxx[numValidBytes];
+ };
+
+ // init
+ void Init32()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // initialize cacheline
+ memset(&m_Link,0,32);
+ }
+
+ void Init64()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // initialize cacheline
+ memset(&m_Link,0,64);
+ }
+
+ CacheLine()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // initialize cacheline
+ memset(&m_Link,0,sizeof(CacheLine));
+ }
+};
+#include <poppack.h>
+
+typedef CacheLine* LPCacheLine;
+
+/////////////////////////////////////////////////////////
+// class CCacheLineAllocator
+// Handles Allocation/DeAllocation of cache lines
+// used for hash table overflow buckets
+///////////////////////////////////////////////////////
+class CCacheLineAllocator
+{
+ typedef SList<CacheLine, true> REGISTRYLIST;
+ typedef SList<CacheLine, true> FREELIST32;
+ typedef SList<CacheLine, true> FREELIST64;
+
+public:
+
+ //constructor
+ CCacheLineAllocator ();
+ //destructor
+ ~CCacheLineAllocator ();
+
+ // free cacheline blocks
+ FREELIST32 m_freeList32; //32 byte
+ FREELIST64 m_freeList64; //64 byte
+
+ // registry for virtual free
+ REGISTRYLIST m_registryList;
+
+ void *VAlloc(ULONG cbSize);
+
+ void VFree(void* pv);
+
+ // GetCacheLine,
+ void * GetCacheLine32();
+
+ // GetCacheLine,
+ void * GetCacheLine64();
+
+ // FreeCacheLine,
+ void FreeCacheLine32(void *pCacheLine);
+
+ // FreeCacheLine,
+ void FreeCacheLine64(void *pCacheLine);
+
+};
+#endif
diff --git a/src/vm/callhelpers.cpp b/src/vm/callhelpers.cpp
new file mode 100644
index 0000000000..a910c0ea30
--- /dev/null
+++ b/src/vm/callhelpers.cpp
@@ -0,0 +1,684 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+/*
+ * CallHelpers.CPP: helpers to call managed code
+ *
+
+ */
+
+#include "common.h"
+#include "dbginterface.h"
+
+// To include declaration of "AppDomainTransitionExceptionFilter"
+#include "excep.h"
+
+// To include declaration of "SignatureNative"
+#include "runtimehandles.h"
+
+
+#if defined(FEATURE_MULTICOREJIT) && defined(_DEBUG)
+
+// Allow system module, and first party WinMD files for Appx
+
+void AssertMulticoreJitAllowedModule(PCODE pTarget)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ MethodDesc* pMethod = Entry2MethodDesc(pTarget, NULL);
+
+ Module * pModule = pMethod->GetModule_NoLogging();
+
+#if defined(FEATURE_APPX_BINDER)
+
+ // For Appx process, allow certain modules to load on background thread
+ if (AppX::IsAppXProcess())
+ {
+ if (MulticoreJitManager::IsLoadOkay(pModule))
+ {
+ return;
+ }
+ }
+#endif
+
+ _ASSERTE(pModule->IsSystem());
+}
+
+#endif
+
+// For X86, INSTALL_COMPLUS_EXCEPTION_HANDLER grants us sufficient protection to call into
+// managed code.
+//
+// But on 64-bit, the personality routine will not pop frames or trackers as exceptions unwind
+// out of managed code. Instead, we rely on explicit cleanup like CLRException::HandlerState::CleanupTry
+// or UMThunkUnwindFrameChainHandler.
+//
+// So most callers should call through CallDescrWorkerWithHandler (or a wrapper like MethodDesc::Call)
+// and get the platform-appropriate exception handling. A few places try to optimize by calling direct
+// to managed methods (see ArrayInitializeWorker or FastCallFinalize). This sort of thing is
+// dangerous. You have to worry about marking yourself as a legal managed caller and you have to
+// worry about how exceptions will be handled on a WIN64EXCEPTIONS plan. It is generally only suitable
+// for X86.
+
+//*******************************************************************************
+void CallDescrWorkerWithHandler(
+ CallDescrData * pCallDescrData,
+ BOOL fCriticalCall)
+{
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+#if defined(FEATURE_MULTICOREJIT) && defined(_DEBUG)
+
+ // For multicore JITting, background thread should not call managed code, except when calling system code (e.g. throwing managed exception)
+ if (GetThread()->HasThreadStateNC(Thread::TSNC_CallingManagedCodeDisabled))
+ {
+ AssertMulticoreJitAllowedModule(pCallDescrData->pTarget);
+ }
+
+#endif
+
+
+ BEGIN_CALL_TO_MANAGEDEX(fCriticalCall ? EEToManagedCriticalCall : EEToManagedDefault);
+
+ CallDescrWorker(pCallDescrData);
+
+ END_CALL_TO_MANAGED();
+}
+
+
+#if !defined(_WIN64) && defined(_DEBUG)
+
+//*******************************************************************************
+// assembly code, in i386/asmhelpers.asm
+void CallDescrWorker(CallDescrData * pCallDescrData)
+{
+ //
+ // This function must not have a contract ... it's caller has pushed an FS:0 frame (COMPlusFrameHandler) that must
+ // be the first handler on the stack. The contract causes, at a minimum, a C++ exception handler to be pushed to
+ // handle the destruction of the contract object. If there is an exception in the managed code called from here,
+ // and that exception is handled in that same block of managed code, then the COMPlusFrameHandler will actually
+ // unwind the C++ handler before branching to the catch clause in managed code. That essentially causes an
+ // out-of-order destruction of the contract object, resulting in very odd crashes later.
+ //
+#if 0
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+#endif // 0
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ _ASSERTE(!NingenEnabled() && "You cannot invoke managed code inside the ngen compilation process.");
+
+ TRIGGERSGC_NOSTOMP(); // Can't stomp object refs because they are args to the function
+
+ // Save a copy of dangerousObjRefs in table.
+ Thread* curThread;
+ DWORD_PTR ObjRefTable[OBJREF_TABSIZE];
+
+ curThread = GetThread();
+ _ASSERTE(curThread != NULL);
+
+ static_assert_no_msg(sizeof(curThread->dangerousObjRefs) == sizeof(ObjRefTable));
+ memcpy(ObjRefTable, curThread->dangerousObjRefs, sizeof(ObjRefTable));
+
+#ifndef FEATURE_INTERPRETER
+ // When the interpreter is used, this mayb be called from preemptive code.
+ _ASSERTE(curThread->PreemptiveGCDisabled()); // Jitted code expects to be in cooperative mode
+#endif
+
+ // If the current thread owns spinlock or unbreakable lock, it cannot call managed code.
+ _ASSERTE(!curThread->HasUnbreakableLock() &&
+ (curThread->m_StateNC & Thread::TSNC_OwnsSpinLock) == 0);
+
+#ifdef _TARGET_ARM_
+ _ASSERTE(IsThumbCode(pCallDescrData->pTarget));
+#endif
+
+ CallDescrWorkerInternal(pCallDescrData);
+
+ // Restore dangerousObjRefs when we return back to EE after call
+ memcpy(curThread->dangerousObjRefs, ObjRefTable, sizeof(ObjRefTable));
+
+ TRIGGERSGC();
+
+ ENABLESTRESSHEAP();
+}
+#endif // !defined(_WIN64) && defined(_DEBUG)
+
+void DispatchCallDebuggerWrapper(
+ CallDescrData * pCallDescrData,
+ ContextTransitionFrame* pFrame,
+ BOOL fCriticalCall
+)
+{
+ // Use static contracts b/c we have SEH.
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ struct Param : NotifyOfCHFFilterWrapperParam
+ {
+ CallDescrData * pCallDescrData;
+ BOOL fCriticalCall;
+ } param;
+
+ param.pFrame = pFrame;
+ param.pCallDescrData = pCallDescrData;
+ param.fCriticalCall = fCriticalCall;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ CallDescrWorkerWithHandler(
+ pParam->pCallDescrData,
+ pParam->fCriticalCall);
+ }
+ PAL_EXCEPT_FILTER(AppDomainTransitionExceptionFilter)
+ {
+ // Should never reach here b/c handler should always continue search.
+ _ASSERTE(!"Unreachable");
+ }
+ PAL_ENDTRY
+}
+
+// Helper for VM->managed calls with simple signatures.
+void * DispatchCallSimple(
+ SIZE_T *pSrc,
+ DWORD numStackSlotsToCopy,
+ PCODE pTargetAddress,
+ DWORD dwDispatchCallSimpleFlags)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+#ifdef DEBUGGING_SUPPORTED
+ if (CORDebuggerTraceCall())
+ g_pDebugInterface->TraceCall((const BYTE *)pTargetAddress);
+#endif // DEBUGGING_SUPPORTED
+
+ CallDescrData callDescrData;
+
+#ifdef CALLDESCR_ARGREGS
+ callDescrData.pSrc = pSrc + NUM_ARGUMENT_REGISTERS;
+ callDescrData.numStackSlots = numStackSlotsToCopy;
+ callDescrData.pArgumentRegisters = (ArgumentRegisters *)pSrc;
+#else
+ callDescrData.pSrc = pSrc;
+ callDescrData.numStackSlots = numStackSlotsToCopy;
+#endif
+#ifdef CALLDESCR_FPARGREGS
+ callDescrData.pFloatArgumentRegisters = NULL;
+#endif
+#ifdef CALLDESCR_REGTYPEMAP
+ callDescrData.dwRegTypeMap = 0;
+#endif
+ callDescrData.fpReturnSize = 0;
+ callDescrData.pTarget = pTargetAddress;
+
+ if ((dwDispatchCallSimpleFlags & DispatchCallSimple_CatchHandlerFoundNotification) != 0)
+ {
+ DispatchCallDebuggerWrapper(
+ &callDescrData,
+ NULL,
+ dwDispatchCallSimpleFlags & DispatchCallSimple_CriticalCall);
+ }
+ else
+ {
+ CallDescrWorkerWithHandler(&callDescrData, dwDispatchCallSimpleFlags & DispatchCallSimple_CriticalCall);
+ }
+
+ return *(void **)(&callDescrData.returnValue);
+}
+
+// This method performs the proper profiler and debugger callbacks before dispatching the
+// call. The caller has the responsibility of furnishing the target address, register and stack arguments.
+// Stack arguments should be in reverse order, and pSrc should point to past the last argument
+// Returns the return value or the exception object if one was thrown.
+void DispatchCall(
+ CallDescrData * pCallDescrData,
+ OBJECTREF *pRefException,
+ ContextTransitionFrame* pFrame /* = NULL */
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , CorruptionSeverity *pSeverity /*= NULL*/
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ )
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+#ifdef DEBUGGING_SUPPORTED
+ if (CORDebuggerTraceCall())
+ g_pDebugInterface->TraceCall((const BYTE *)pCallDescrData->pTarget);
+#endif // DEBUGGING_SUPPORTED
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ if (pSeverity != NULL)
+ {
+ // By default, assume any exception that comes out is NotCorrupting
+ *pSeverity = NotCorrupting;
+ }
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ EX_TRY
+ {
+ DispatchCallDebuggerWrapper(pCallDescrData,
+ pFrame,
+ FALSE);
+ }
+ EX_CATCH
+ {
+ *pRefException = GET_THROWABLE();
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ if (pSeverity != NULL)
+ {
+ // By default, assume any exception that comes out is NotCorrupting
+ *pSeverity = GetThread()->GetExceptionState()->GetLastActiveExceptionCorruptionSeverity();
+ }
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ }
+ EX_END_CATCH(RethrowTransientExceptions);
+}
+
+#ifdef CALLDESCR_REGTYPEMAP
+//*******************************************************************************
+void FillInRegTypeMap(int argOffset, CorElementType typ, BYTE * pMap)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMap, NULL_NOT_OK));
+ }
+ CONTRACTL_END;
+
+ int regArgNum = TransitionBlock::GetArgumentIndexFromOffset(argOffset);
+
+ // Create a map of the first 8 argument types. This is used in
+ // CallDescrWorkerInternal to load args into general registers or
+ // floating point registers.
+ //
+ // we put these in order from the LSB to the MSB so that we can keep
+ // the map in a register and just examine the low byte and then shift
+ // right for each arg.
+
+ if (regArgNum < NUM_ARGUMENT_REGISTERS)
+ {
+ pMap[regArgNum] = typ;
+ }
+}
+#endif // CALLDESCR_REGTYPEMAP
+
+#if defined(_DEBUG) && defined(FEATURE_COMINTEROP)
+extern int g_fMainThreadApartmentStateSet;
+extern int g_fInitializingInitialAD;
+extern Volatile<LONG> g_fInExecuteMainMethod;
+#endif
+
+//*******************************************************************************
+#ifdef FEATURE_INTERPRETER
+ARG_SLOT MethodDescCallSite::CallTargetWorker(const ARG_SLOT *pArguments, bool transitionToPreemptive)
+#else
+ARG_SLOT MethodDescCallSite::CallTargetWorker(const ARG_SLOT *pArguments)
+#endif
+{
+ //
+ // WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
+ //
+ // This method needs to have a GC_TRIGGERS contract because it
+ // calls managed code. However, IT MAY NOT TRIGGER A GC ITSELF
+ // because the argument array is not protected and may contain gc
+ // refs.
+ //
+ // WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
+ //
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ MODE_COOPERATIVE;
+ PRECONDITION(GetAppDomain()->CheckCanExecuteManagedCode(m_pMD));
+ PRECONDITION(m_pMD->CheckActivated()); // EnsureActive will trigger, so we must already be activated
+
+#ifdef FEATURE_COMINTEROP
+ // If we're an exe, then we must either be initializing the first AD, or have already setup the main thread's
+ // COM apartment state.
+ // If you hit this assert, then you likely introduced code during startup that could inadvertently
+ // initialize the COM apartment state of the main thread before we set it based on the user attribute.
+ PRECONDITION(g_fInExecuteMainMethod ? (g_fMainThreadApartmentStateSet || g_fInitializingInitialAD) : TRUE);
+#endif // FEATURE_COMINTEROP
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!NingenEnabled() && "You cannot invoke managed code inside the ngen compilation process.");
+
+ // If we're invoking an mscorlib method, lift the restriction on type load limits. Calls into mscorlib are
+ // typically calls into specific and controlled helper methods for security checks and other linktime tasks.
+ //
+ // @todo: In an ideal world, we would require each of those sites to do the override rather than disabling
+ // the assert broadly here. However, by limiting the override to mscorlib methods, we should still be able
+ // to effectively enforce the more general rule about loader recursion.
+ MAYBE_OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED, m_pMD->GetModule()->IsSystem());
+
+ LPBYTE pTransitionBlock;
+ UINT nStackBytes;
+ UINT fpReturnSize;
+#ifdef CALLDESCR_REGTYPEMAP
+ UINT64 dwRegTypeMap;
+#endif
+#ifdef CALLDESCR_FPARGREGS
+ FloatArgumentRegisters *pFloatArgumentRegisters = NULL;
+#endif
+ void* pvRetBuff = NULL;
+
+ {
+ //
+ // the incoming argument array is not gc-protected, so we
+ // may not trigger a GC before we actually call managed code
+ //
+ GCX_FORBID();
+
+ // Record this call if required
+ g_IBCLogger.LogMethodDescAccess(m_pMD);
+
+ //
+ // All types must already be loaded. This macro also sets up a FAULT_FORBID region which is
+ // also required for critical calls since we cannot inject any failure points between the
+ // caller of MethodDesc::CallDescr and the actual transition to managed code.
+ //
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+ _ASSERTE(GetAppDomain()->ShouldHaveCode());
+
+#ifdef FEATURE_INTERPRETER
+ _ASSERTE(isCallConv(m_methodSig.GetCallingConvention(), IMAGE_CEE_CS_CALLCONV_DEFAULT)
+ || isCallConv(m_methodSig.GetCallingConvention(), CorCallingConvention(IMAGE_CEE_CS_CALLCONV_C))
+ || isCallConv(m_methodSig.GetCallingConvention(), CorCallingConvention(IMAGE_CEE_CS_CALLCONV_VARARG))
+ || isCallConv(m_methodSig.GetCallingConvention(), CorCallingConvention(IMAGE_CEE_CS_CALLCONV_NATIVEVARARG))
+ || isCallConv(m_methodSig.GetCallingConvention(), CorCallingConvention(IMAGE_CEE_CS_CALLCONV_STDCALL)));
+#else
+ _ASSERTE(isCallConv(m_methodSig.GetCallingConvention(), IMAGE_CEE_CS_CALLCONV_DEFAULT));
+ _ASSERTE(!(m_methodSig.GetCallingConventionInfo() & CORINFO_CALLCONV_PARAMTYPE));
+#endif
+
+#ifdef DEBUGGING_SUPPORTED
+ if (CORDebuggerTraceCall())
+ {
+ g_pDebugInterface->TraceCall((const BYTE *)m_pCallTarget);
+ }
+#endif // DEBUGGING_SUPPORTED
+
+#if CHECK_APP_DOMAIN_LEAKS
+ if (g_pConfig->AppDomainLeaks())
+ {
+ // See if we are in the correct domain to call on the object
+ if (m_methodSig.HasThis() && !m_pMD->GetMethodTable()->IsValueType())
+ {
+ CONTRACT_VIOLATION(ThrowsViolation|GCViolation|FaultViolation);
+ OBJECTREF pThis = ArgSlotToObj(pArguments[0]);
+ if (!pThis->AssignAppDomain(GetAppDomain()))
+ _ASSERTE(!"Attempt to call method on object in wrong domain");
+ }
+ }
+#endif // CHECK_APP_DOMAIN_LEAKS
+
+#ifdef _DEBUG
+ {
+ // The metasig should be reset
+ _ASSERTE(m_methodSig.GetArgNum() == 0);
+
+ // Check to see that any value type args have been loaded and restored.
+ // This is because we may be calling a FramedMethodFrame which will use the sig
+ // to trace the args, but if any are unloaded we will be stuck if a GC occurs.
+ _ASSERTE(m_pMD->IsRestored_NoLogging());
+ CorElementType argType;
+ while ((argType = m_methodSig.NextArg()) != ELEMENT_TYPE_END)
+ {
+ if (argType == ELEMENT_TYPE_VALUETYPE)
+ {
+ TypeHandle th = m_methodSig.GetLastTypeHandleThrowing(ClassLoader::DontLoadTypes);
+ CONSISTENCY_CHECK(th.CheckFullyLoaded());
+ CONSISTENCY_CHECK(th.IsRestored_NoLogging());
+ }
+ }
+ m_methodSig.Reset();
+ }
+#endif // _DEBUG
+
+ DWORD arg = 0;
+
+ nStackBytes = m_argIt.SizeOfFrameArgumentArray();
+
+ // Create a fake FramedMethodFrame on the stack.
+
+ // Note that SizeOfFrameArgumentArray does overflow checks with sufficient margin to prevent overflows here
+ DWORD dwAllocaSize = TransitionBlock::GetNegSpaceSize() + sizeof(TransitionBlock) + nStackBytes;
+
+ LPBYTE pAlloc = (LPBYTE)_alloca(dwAllocaSize);
+
+ pTransitionBlock = pAlloc + TransitionBlock::GetNegSpaceSize();
+
+#ifdef CALLDESCR_REGTYPEMAP
+ dwRegTypeMap = 0;
+ BYTE* pMap = (BYTE*)&dwRegTypeMap;
+#endif // CALLDESCR_REGTYPEMAP
+
+ if (m_argIt.HasThis())
+ {
+ *((LPVOID*)(pTransitionBlock + m_argIt.GetThisOffset())) = ArgSlotToPtr(pArguments[arg++]);
+ }
+
+ if (m_argIt.HasRetBuffArg())
+ {
+ *((LPVOID*)(pTransitionBlock + m_argIt.GetRetBuffArgOffset())) = ArgSlotToPtr(pArguments[arg++]);
+ }
+#ifdef FEATURE_HFA
+#ifdef FEATURE_INTERPRETER
+ // Something is necessary for HFA's, but what's below (in the FEATURE_INTERPRETER ifdef)
+ // doesn't seem to do the proper test. It fires,
+ // incorrectly, for a one-word struct that *doesn't* have a ret buff. So we'll try this, instead:
+ // We're here because it doesn't have a ret buff. If it would, except that the struct being returned
+ // is an HFA, *then* assume the invoker made this slot a ret buff pointer.
+ // It's an HFA if the return type is a struct, but it has a non-zero FP return size.
+ // (If it were an HFA, but had a ret buff because it was varargs, then we wouldn't be here.
+ // Also this test won't work for float enums.
+ else if (m_methodSig.GetReturnType() == ELEMENT_TYPE_VALUETYPE
+ && m_argIt.GetFPReturnSize() > 0)
+#else // FEATURE_INTERPRETER
+ else if (ELEMENT_TYPE_VALUETYPE == m_methodSig.GetReturnTypeNormalized())
+#endif // FEATURE_INTERPRETER
+ {
+ pvRetBuff = ArgSlotToPtr(pArguments[arg++]);
+ }
+#endif // FEATURE_HFA
+
+
+#ifdef FEATURE_INTERPRETER
+ if (m_argIt.IsVarArg())
+ {
+ *((LPVOID*)(pTransitionBlock + m_argIt.GetVASigCookieOffset())) = ArgSlotToPtr(pArguments[arg++]);
+ }
+
+ if (m_argIt.HasParamType())
+ {
+ *((LPVOID*)(pTransitionBlock + m_argIt.GetParamTypeArgOffset())) = ArgSlotToPtr(pArguments[arg++]);
+ }
+#endif
+
+ int ofs;
+ for (; TransitionBlock::InvalidOffset != (ofs = m_argIt.GetNextOffset()); arg++)
+ {
+#ifdef CALLDESCR_REGTYPEMAP
+ FillInRegTypeMap(ofs, m_argIt.GetArgType(), pMap);
+#endif
+
+#ifdef CALLDESCR_FPARGREGS
+ // Under CALLDESCR_FPARGREGS -ve offsets indicate arguments in floating point registers. If we
+ // have at least one such argument we point the call worker at the floating point area of the
+ // frame (we leave it null otherwise since the worker can perform a useful optimization if it
+ // knows no floating point registers need to be set up).
+ if ((ofs < 0) && (pFloatArgumentRegisters == NULL))
+ pFloatArgumentRegisters = (FloatArgumentRegisters*)(pTransitionBlock +
+ TransitionBlock::GetOffsetOfFloatArgumentRegisters());
+#endif
+
+#if CHECK_APP_DOMAIN_LEAKS
+ // Make sure the arg is in the right app domain
+ if (g_pConfig->AppDomainLeaks() && m_argIt.GetArgType() == ELEMENT_TYPE_CLASS)
+ {
+ CONTRACT_VIOLATION(ThrowsViolation|GCViolation|FaultViolation);
+ OBJECTREF objRef = ArgSlotToObj(pArguments[arg]);
+ if (!objRef->AssignAppDomain(GetAppDomain()))
+ _ASSERTE(!"Attempt to pass object in wrong app domain to method");
+ }
+#endif // CHECK_APP_DOMAIN_LEAKS
+
+ PVOID pDest = pTransitionBlock + ofs;
+
+ UINT32 stackSize = m_argIt.GetArgSize();
+ switch (stackSize)
+ {
+ case 1:
+ case 2:
+ case 4:
+ *((INT32*)pDest) = (INT32)pArguments[arg];
+ break;
+
+ case 8:
+ *((INT64*)pDest) = pArguments[arg];
+ break;
+
+ default:
+ // The ARG_SLOT contains a pointer to the value-type
+#ifdef ENREGISTERED_PARAMTYPE_MAXSIZE
+ if (m_argIt.IsArgPassedByRef())
+ {
+ // We need to pass in a pointer, but be careful of the ARG_SLOT calling convention.
+ // We might already have a pointer in the ARG_SLOT
+ *(PVOID*)pDest = stackSize>sizeof(ARG_SLOT) ?
+ (LPVOID)ArgSlotToPtr(pArguments[arg]) :
+ (LPVOID)ArgSlotEndianessFixup((ARG_SLOT*)&pArguments[arg], stackSize);
+ }
+ else
+#endif // ENREGISTERED_PARAMTYPE_MAXSIZE
+ if (stackSize>sizeof(ARG_SLOT))
+ {
+ CopyMemory(pDest, ArgSlotToPtr(pArguments[arg]), stackSize);
+ }
+ else
+ {
+ CopyMemory(pDest, (LPVOID) (&pArguments[arg]), stackSize);
+ }
+ break;
+ }
+ }
+
+ fpReturnSize = m_argIt.GetFPReturnSize();
+
+ } // END GCX_FORBID & ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE
+
+ CallDescrData callDescrData;
+
+ callDescrData.pSrc = pTransitionBlock + sizeof(TransitionBlock);
+ callDescrData.numStackSlots = nStackBytes / STACK_ELEM_SIZE;
+#ifdef CALLDESCR_ARGREGS
+ callDescrData.pArgumentRegisters = (ArgumentRegisters*)(pTransitionBlock + TransitionBlock::GetOffsetOfArgumentRegisters());
+#endif
+#ifdef CALLDESCR_FPARGREGS
+ callDescrData.pFloatArgumentRegisters = pFloatArgumentRegisters;
+#endif
+#ifdef CALLDESCR_REGTYPEMAP
+ callDescrData.dwRegTypeMap = dwRegTypeMap;
+#endif
+ callDescrData.fpReturnSize = fpReturnSize;
+ callDescrData.pTarget = m_pCallTarget;
+
+#ifdef FEATURE_INTERPRETER
+ if (transitionToPreemptive)
+ {
+ GCPreemp transitionIfILStub(transitionToPreemptive);
+ DWORD* pLastError = &GetThread()->m_dwLastErrorInterp;
+ CallDescrWorkerInternal(&callDescrData);
+ *pLastError = GetLastError();
+ }
+ else
+#endif // FEATURE_INTERPRETER
+ {
+ CallDescrWorkerWithHandler(&callDescrData);
+ }
+
+ if (pvRetBuff != NULL)
+ {
+ memcpyNoGCRefs(pvRetBuff, &callDescrData.returnValue, sizeof(callDescrData.returnValue));
+ }
+
+ ARG_SLOT retval = *(ARG_SLOT *)(&callDescrData.returnValue);
+
+#if !defined(_WIN64) && BIGENDIAN
+ {
+ GCX_FORBID();
+
+ if (!m_methodSig.Is64BitReturn())
+ {
+ retval >>= 32;
+ }
+ }
+#endif // !defined(_WIN64) && BIGENDIAN
+
+ return retval;
+}
+
+void CallDefaultConstructor(OBJECTREF ref)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ MethodTable *pMT = ref->GetTrueMethodTable();
+
+ PREFIX_ASSUME(pMT != NULL);
+
+ if (!pMT->HasDefaultConstructor())
+ {
+ SString ctorMethodName(SString::Utf8, COR_CTOR_METHOD_NAME);
+ COMPlusThrowNonLocalized(kMissingMethodException, ctorMethodName.GetUnicode());
+ }
+
+ GCPROTECT_BEGIN (ref);
+
+ MethodDesc *pMD = pMT->GetDefaultConstructor();
+
+ PREPARE_NONVIRTUAL_CALLSITE_USING_METHODDESC(pMD);
+ DECLARE_ARGHOLDER_ARRAY(CtorArgs, 1);
+ CtorArgs[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(ref);
+
+ // Call the ctor...
+ CATCH_HANDLER_FOUND_NOTIFICATION_CALLSITE;
+ CALL_MANAGED_METHOD_NORET(CtorArgs);
+
+ GCPROTECT_END ();
+}
diff --git a/src/vm/callhelpers.h b/src/vm/callhelpers.h
new file mode 100644
index 0000000000..07dbf3505f
--- /dev/null
+++ b/src/vm/callhelpers.h
@@ -0,0 +1,654 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+/*============================================================
+**
+** File: callhelpers.h
+** Purpose: Provides helpers for making managed calls
+**
+
+===========================================================*/
+#ifndef __CALLHELPERS_H__
+#define __CALLHELPERS_H__
+
+struct CallDescrData
+{
+ //
+ // Input arguments
+ //
+ LPVOID pSrc;
+ UINT32 numStackSlots;
+#ifdef CALLDESCR_ARGREGS
+ const ArgumentRegisters * pArgumentRegisters;
+#endif
+#ifdef CALLDESCR_FPARGREGS
+ const FloatArgumentRegisters * pFloatArgumentRegisters;
+#endif
+#ifdef CALLDESCR_REGTYPEMAP
+ UINT64 dwRegTypeMap;
+#endif
+ UINT32 fpReturnSize;
+ PCODE pTarget;
+
+ //
+ // Return value
+ //
+#ifdef ENREGISTERED_RETURNTYPE_MAXSIZE
+ // Use UINT64 to ensure proper alignment
+ UINT64 returnValue[ENREGISTERED_RETURNTYPE_MAXSIZE / sizeof(UINT64)];
+#else
+ UINT64 returnValue;
+#endif
+};
+
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+
+extern "C" void STDCALL CallDescrWorkerInternal(CallDescrData * pCallDescrData);
+
+#if !defined(_WIN64) && defined(_DEBUG)
+void CallDescrWorker(CallDescrData * pCallDescrData);
+#else
+#define CallDescrWorker(pCallDescrData) CallDescrWorkerInternal(pCallDescrData)
+#endif
+
+void CallDescrWorkerWithHandler(
+ CallDescrData * pCallDescrData,
+ BOOL fCriticalCall = FALSE);
+
+void DispatchCall(
+ CallDescrData * pCallDescrData,
+ OBJECTREF * pRefException,
+ ContextTransitionFrame* pFrame = NULL
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , CorruptionSeverity * pSeverity = NULL
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ );
+
+// Helper for VM->managed calls with simple signatures.
+void * DispatchCallSimple(
+ SIZE_T *pSrc,
+ DWORD numStackSlotsToCopy,
+ PCODE pTargetAddress,
+ DWORD dwDispatchCallSimpleFlags);
+
+bool IsCerRootMethod(MethodDesc *pMD);
+
+class MethodDescCallSite
+{
+private:
+ MethodDesc* m_pMD;
+ PCODE m_pCallTarget;
+ MetaSig m_methodSig;
+ ArgIterator m_argIt;
+
+#ifdef _DEBUG
+ __declspec(noinline) void LogWeakAssert()
+ {
+ LIMITED_METHOD_CONTRACT;
+ LOG((LF_ASSERT, LL_WARNING, "%s::%s\n", m_pMD->m_pszDebugClassName, m_pMD->m_pszDebugMethodName));
+ }
+#endif // _DEBUG
+
+ void DefaultInit(OBJECTREF* porProtectedThis)
+ {
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ //
+ // Make sure we are passing in a 'this' if and only if it is required
+ //
+ if (m_pMD->IsVtableMethod())
+ {
+ CONSISTENCY_CHECK_MSG(NULL != porProtectedThis, "You did not pass in the 'this' object for a vtable method");
+ }
+ else
+ {
+ if (NULL != porProtectedThis)
+ {
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_AssertOnUnneededThis))
+ {
+ CONSISTENCY_CHECK_MSG(NULL == porProtectedThis, "You passed in a 'this' object to a non-vtable method.");
+ }
+ else
+ {
+ LogWeakAssert();
+ }
+
+ }
+ }
+#endif // _DEBUG
+
+ m_pCallTarget = m_pMD->GetCallTarget(porProtectedThis);
+
+ m_argIt.ForceSigWalk();
+ }
+
+#ifdef FEATURE_INTERPRETER
+public:
+ ARG_SLOT CallTargetWorker(const ARG_SLOT *pArguments, bool transitionToPreemptive = false);
+#else
+ ARG_SLOT CallTargetWorker(const ARG_SLOT *pArguments);
+#endif
+
+public:
+ // Used to avoid touching metadata for mscorlib methods.
+ // instance methods must pass in the 'this' object
+ // static methods must pass null
+ MethodDescCallSite(BinderMethodID id, OBJECTREF* porProtectedThis = NULL) :
+ m_pMD(
+ MscorlibBinder::GetMethod(id)
+ ),
+ m_methodSig(id),
+ m_argIt(&m_methodSig)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ DefaultInit(porProtectedThis);
+ }
+
+ // Used to avoid touching metadata for mscorlib methods.
+ // instance methods must pass in the 'this' object
+ // static methods must pass null
+ MethodDescCallSite(BinderMethodID id, OBJECTHANDLE hThis) :
+ m_pMD(
+ MscorlibBinder::GetMethod(id)
+ ),
+ m_methodSig(id),
+ m_argIt(&m_methodSig)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ DefaultInit((OBJECTREF*)hThis);
+ }
+
+ // instance methods must pass in the 'this' object
+ // static methods must pass null
+ MethodDescCallSite(MethodDesc* pMD, OBJECTREF* porProtectedThis = NULL) :
+ m_pMD(pMD),
+ m_methodSig(pMD),
+ m_argIt(&m_methodSig)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (porProtectedThis == NULL)
+ {
+ // We don't have a "this" pointer - ensure that we have activated the containing module
+ m_pMD->EnsureActive();
+ }
+
+ DefaultInit(porProtectedThis);
+ }
+
+ // instance methods must pass in the 'this' object
+ // static methods must pass null
+ MethodDescCallSite(MethodDesc* pMD, OBJECTHANDLE hThis) :
+ m_pMD(pMD),
+ m_methodSig(pMD),
+ m_argIt(&m_methodSig)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (hThis == NULL)
+ {
+ // We don't have a "this" pointer - ensure that we have activated the containing module
+ m_pMD->EnsureActive();
+ }
+
+ DefaultInit((OBJECTREF*)hThis);
+ }
+
+ // instance methods must pass in the 'this' object
+ // static methods must pass null
+ MethodDescCallSite(MethodDesc* pMD, LPHARDCODEDMETASIG pwzSignature, OBJECTREF* porProtectedThis = NULL) :
+ m_pMD(pMD),
+ m_methodSig(pwzSignature),
+ m_argIt(&m_methodSig)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (porProtectedThis == NULL)
+ {
+ // We don't have a "this" pointer - ensure that we have activated the containing module
+ m_pMD->EnsureActive();
+ }
+
+ DefaultInit(porProtectedThis);
+ }
+
+ //
+ // Only use this constructor if you're certain you know where
+ // you're going and it cannot be affected by generics/virtual
+ // dispatch/etc..
+ //
+ MethodDescCallSite(MethodDesc* pMD, PCODE pCallTarget) :
+ m_pMD(pMD),
+ m_pCallTarget(pCallTarget),
+ m_methodSig(pMD),
+ m_argIt(&m_methodSig)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_pMD->EnsureActive();
+
+ m_argIt.ForceSigWalk();
+ }
+
+#ifdef FEATURE_INTERPRETER
+ MethodDescCallSite(MethodDesc* pMD, MetaSig* pSig, PCODE pCallTarget) :
+ m_pMD(pMD),
+ m_pCallTarget(pCallTarget),
+ m_methodSig(*pSig),
+ m_argIt(pSig)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_pMD->EnsureActive();
+
+ m_argIt.ForceSigWalk();
+ }
+#endif // FEATURE_INTERPRETER
+
+ MetaSig* GetMetaSig()
+ {
+ return &m_methodSig;
+ }
+
+ //
+ // Call_RetXXX definition macros:
+ //
+ // These macros provide type protection for the return value from calls to managed
+ // code. This should help to prevent errors like what we're seeing on 64bit where
+ // the JIT64 is returning the BOOL as 1byte with the rest of the ARG_SLOT still
+ // polluted by the remnants of its last value. Previously we would cast to a (BOOL)
+ // and end up having if((BOOL)pMD->Call(...)) statements always being true.
+ //
+
+ // Use OTHER_ELEMENT_TYPE when defining CallXXX_RetXXX variations where the return type
+ // is not in CorElementType (like LPVOID) or the return type can be one of a number of
+ // CorElementTypes, like XXX_RetObjPtr which is used for all kinds of Object* return
+ // types, or XXX_RetArgSlot which is unspecified.
+#define OTHER_ELEMENT_TYPE -1
+
+// Note "permitvaluetypes" is not really used for anything
+#define MDCALLDEF(wrappedmethod, permitvaluetypes, ext, rettype, eltype) \
+ FORCEINLINE rettype wrappedmethod##ext (const ARG_SLOT* pArguments) \
+ { \
+ WRAPPER_NO_CONTRACT; \
+ { \
+ GCX_FORBID(); /* arg array is not protected */ \
+ CONSISTENCY_CHECK(eltype == OTHER_ELEMENT_TYPE || \
+ eltype == m_methodSig.GetReturnType()); \
+ } \
+ ARG_SLOT retval; \
+ retval = CallTargetWorker(pArguments); \
+ return *(rettype *)ArgSlotEndianessFixup(&retval, sizeof(rettype)); \
+ }
+
+#define MDCALLDEF_REFTYPE(wrappedmethod, permitvaluetypes, ext, ptrtype, reftype) \
+ FORCEINLINE reftype wrappedmethod##ext (const ARG_SLOT* pArguments) \
+ { \
+ WRAPPER_NO_CONTRACT; \
+ { \
+ GCX_FORBID(); /* arg array is not protected */ \
+ CONSISTENCY_CHECK(MetaSig::RETOBJ == m_pMD->ReturnsObject(true)); \
+ } \
+ ARG_SLOT retval; \
+ retval = CallTargetWorker(pArguments); \
+ return ObjectTo##reftype(*(ptrtype *) \
+ ArgSlotEndianessFixup(&retval, sizeof(ptrtype))); \
+ }
+
+
+ // The MDCALLDEF_XXX_VOID macros take a customized assertion and calls the worker without
+ // returning a value, this is the macro that _should_ be used to define the CallXXX variations
+ // (without _RetXXX extension) so that misuse will be caught at compile time.
+
+#define MDCALLDEF_VOID(wrappedmethod, permitvaluetypes) \
+ FORCEINLINE void wrappedmethod (const ARG_SLOT* pArguments) \
+ { \
+ WRAPPER_NO_CONTRACT; \
+ CallTargetWorker(pArguments); \
+ }
+
+#define MDCALLDEFF_STD_RETTYPES(wrappedmethod,permitvaluetypes) \
+ MDCALLDEF_VOID(wrappedmethod,permitvaluetypes) \
+ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetBool, CLR_BOOL, ELEMENT_TYPE_BOOLEAN) \
+ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetChar, CLR_CHAR, ELEMENT_TYPE_CHAR) \
+ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetI1, CLR_I1, ELEMENT_TYPE_I1) \
+ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetU1, CLR_U1, ELEMENT_TYPE_U1) \
+ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetI2, CLR_I2, ELEMENT_TYPE_I2) \
+ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetU2, CLR_U2, ELEMENT_TYPE_U2) \
+ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetI4, CLR_I4, ELEMENT_TYPE_I4) \
+ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetU4, CLR_U4, ELEMENT_TYPE_U4) \
+ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetI8, CLR_I8, ELEMENT_TYPE_I8) \
+ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetU8, CLR_U8, ELEMENT_TYPE_U8) \
+ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetR4, CLR_R4, ELEMENT_TYPE_R4) \
+ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetR8, CLR_R8, ELEMENT_TYPE_R8) \
+ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetI, CLR_I, ELEMENT_TYPE_I) \
+ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetU, CLR_U, ELEMENT_TYPE_U) \
+ MDCALLDEF(wrappedmethod,permitvaluetypes, _RetArgSlot,ARG_SLOT, OTHER_ELEMENT_TYPE)
+
+
+ public:
+ //--------------------------------------------------------------------
+ // Invoke a method. Arguments are packaged up in right->left order
+ // which each array element corresponding to one argument.
+ //
+ // Can throw a COM+ exception.
+ //
+ // All the appropriate "virtual" semantics (include thunking like context
+ // proxies) occurs inside Call.
+ //
+ // Call should never be called on interface MethodDesc's. The exception
+ // to this rule is when calling on a COM object. In that case the call
+ // needs to go through an interface MD and CallOnInterface is there
+ // for that.
+ //--------------------------------------------------------------------
+
+ //
+ // NOTE on Call methods
+ // MethodDesc::Call uses a virtual portable calling convention
+ // Arguments are put left-to-right in the ARG_SLOT array, in the following order:
+ // - this pointer (if any)
+ // - return buffer address (if signature.HasRetBuffArg())
+ // - all other fixed arguments (left-to-right)
+ // Vararg is not supported yet.
+ //
+ // The args that fit in an ARG_SLOT are inline. The ones that don't fit in an ARG_SLOT are allocated somewhere else
+ // (usually on the stack) and a pointer to that area is put in the corresponding ARG_SLOT
+ // ARG_SLOT is guaranteed to be big enough to fit all basic types and pointer types. Basically, one has
+ // to check only for aggregate value-types and 80-bit floating point values or greater.
+ //
+ // Calls with value type parameters must use the CallXXXWithValueTypes
+ // variants. Using the WithValueTypes variant indicates that the caller
+ // has gc-protected the contents of value types of size greater than
+ // ENREGISTERED_PARAMTYPE_MAXSIZE (when it is defined, which is currently
+ // only on AMD64). ProtectValueClassFrame can be used to accomplish this,
+ // see CallDescrWithObjectArray in stackbuildersink.cpp.
+ //
+ // Not all usages of MethodDesc::CallXXX have been ported to the new convention. The end goal is to port them all and get
+ // rid of the non-portable BYTE* version.
+ //
+ // We have converted all usage of CallXXX in the runtime to some more specific CallXXX_RetXXX type (CallXXX usages
+ // where the return value is unused remain CallXXX). In most cases we were able to use something more specific than
+ // CallXXX_RetArgSlot (which is the equivalent of the old behavior). It is recommended that as you add usages of
+ // CallXXX in the future you try to avoid CallXXX_RetArgSlot whenever possible.
+ //
+ // If the return value is unused you can use the CallXXX syntax which has a void return and is not protected
+ // by any assertions around the return value type. This should protect against people trying to use the old
+ // semantics of ->Call as if they try to assign the return value to something they'll get a compile time error.
+ //
+ // If you are unable to be sure of the return type at runtime and are just blindly casting then continue to use
+ // CallXXX_RetArgSlot, Do not for instance use CallXXX_RetI4 as a mechanism to cast the result to an I4 as it will
+ // also try to assert the fact that the callee managed method actually does return an I4.
+ //
+
+ // All forms of CallXXX should have at least the CallXXX_RetArgSlot definition which maps to the old behavior
+ // - MDCALL_ARG_____STD_RETTYPES includes CallXXX_RetArgSlot
+ // - MDCALL_ARG_SIG_STD_RETTYPES includes CallXXX_RetArgSlot
+
+ // XXX Call_RetXXX(const ARG_SLOT* pArguments);
+ MDCALLDEFF_STD_RETTYPES(Call, FALSE)
+ MDCALLDEF( Call, FALSE, _RetHR, HRESULT, OTHER_ELEMENT_TYPE)
+ MDCALLDEF( Call, FALSE, _RetObjPtr, Object*, OTHER_ELEMENT_TYPE)
+ MDCALLDEF_REFTYPE( Call, FALSE, _RetOBJECTREF, Object*, OBJECTREF)
+ MDCALLDEF_REFTYPE( Call, FALSE, _RetSTRINGREF, StringObject*, STRINGREF)
+ MDCALLDEF( Call, FALSE, _RetLPVOID, LPVOID, OTHER_ELEMENT_TYPE)
+
+ // XXX CallWithValueTypes_RetXXX(const ARG_SLOT* pArguments);
+ MDCALLDEF_VOID( CallWithValueTypes, TRUE)
+ MDCALLDEF( CallWithValueTypes, TRUE, _RetArgSlot, ARG_SLOT, OTHER_ELEMENT_TYPE)
+ MDCALLDEF_REFTYPE( CallWithValueTypes, TRUE, _RetOBJECTREF, Object*, OBJECTREF)
+ MDCALLDEF( CallWithValueTypes, TRUE, _RetOleColor, OLE_COLOR, OTHER_ELEMENT_TYPE)
+#undef OTHER_ELEMENT_TYPE
+#undef MDCALL_ARG_SIG_STD_RETTYPES
+#undef MDCALLDEF
+#undef MDCALLDEF_REFTYPE
+#undef MDCALLDEF_VOID
+}; // MethodDescCallSite
+
+
+#ifdef CALLDESCR_REGTYPEMAP
+void FillInRegTypeMap(int argOffset, CorElementType typ, BYTE * pMap);
+#endif // CALLDESCR_REGTYPEMAP
+
+
+/***********************************************************************/
+/* Macros used to indicate a call to managed code is starting/ending */
+/***********************************************************************/
+
+enum EEToManagedCallFlags
+{
+ EEToManagedDefault = 0x0000,
+ EEToManagedCriticalCall = 0x0001,
+};
+
+#define BEGIN_CALL_TO_MANAGED() \
+ BEGIN_CALL_TO_MANAGEDEX(EEToManagedDefault)
+
+#define BEGIN_CALL_TO_MANAGEDEX(flags) \
+{ \
+ MAKE_CURRENT_THREAD_AVAILABLE(); \
+ DECLARE_CPFH_EH_RECORD(CURRENT_THREAD); \
+ _ASSERTE(CURRENT_THREAD); \
+ _ASSERTE(!CURRENT_THREAD->IsAbortPrevented() || \
+ CURRENT_THREAD->IsAbortCheckDisabled()); \
+ _ASSERTE((CURRENT_THREAD->m_StateNC & Thread::TSNC_OwnsSpinLock) == 0); \
+ /* This bit should never be set when we call into managed code. The */ \
+ /* stack walking code explicitly clears this around any potential calls */ \
+ /* into managed code. */ \
+ _ASSERTE(!IsStackWalkerThread()); \
+ /* If this isn't a critical transition, we need to check to see if a */ \
+ /* thread abort has been requested */ \
+ if (!(flags & EEToManagedCriticalCall)) \
+ { \
+ TESTHOOKCALL(AppDomainCanBeUnloaded(CURRENT_THREAD->GetDomain()->GetId().m_dwId,FALSE)); \
+ if (CURRENT_THREAD->IsAbortRequested()) { \
+ CURRENT_THREAD->HandleThreadAbort(); \
+ } \
+ } \
+ BEGIN_SO_TOLERANT_CODE(CURRENT_THREAD); \
+ INSTALL_COMPLUS_EXCEPTION_HANDLER_NO_DECLARE();
+
+#define END_CALL_TO_MANAGED() \
+ UNINSTALL_COMPLUS_EXCEPTION_HANDLER(); \
+ END_SO_TOLERANT_CODE; \
+}
+
+/***********************************************************************/
+/* Macros that provide abstraction to the usage of DispatchCallSimple */
+/***********************************************************************/
+
+enum DispatchCallSimpleFlags
+{
+ DispatchCallSimple_CriticalCall = 0x0001,
+ DispatchCallSimple_CatchHandlerFoundNotification = 0x0002,
+};
+
+#define ARGHOLDER_TYPE LPVOID
+#define OBJECTREF_TO_ARGHOLDER(x) (LPVOID)OBJECTREFToObject(x)
+#define STRINGREF_TO_ARGHOLDER(x) (LPVOID)STRINGREFToObject(x)
+#define PTR_TO_ARGHOLDER(x) (LPVOID)x
+#define DWORD_TO_ARGHOLDER(x) (LPVOID)(SIZE_T)x
+
+#define INIT_VARIABLES(count) \
+ DWORD __numArgs = count; \
+ DWORD __dwDispatchCallSimpleFlags = 0; \
+
+#define PREPARE_NONVIRTUAL_CALLSITE(id) \
+ static PCODE s_pAddr##id = NULL; \
+ PCODE __pSlot = VolatileLoad(&s_pAddr##id); \
+ if ( __pSlot == NULL ) \
+ { \
+ MethodDesc *pMeth = MscorlibBinder::GetMethod(id); \
+ _ASSERTE(pMeth); \
+ __pSlot = pMeth->GetMultiCallableAddrOfCode(); \
+ VolatileStore(&s_pAddr##id, __pSlot); \
+ }
+
+#define PREPARE_VIRTUAL_CALLSITE(id, objref) \
+ MethodDesc *__pMeth = MscorlibBinder::GetMethod(id); \
+ PCODE __pSlot = __pMeth->GetCallTarget(&objref);
+
+#define PREPARE_VIRTUAL_CALLSITE_USING_METHODDESC(pMD, objref) \
+ PCODE __pSlot = pMD->GetCallTarget(&objref);
+
+#ifdef _DEBUG
+#define SIMPLE_VIRTUAL_METHOD_CHECK(slotNumber, methodTable) \
+ { \
+ MethodDesc* __pMeth = methodTable->GetMethodDescForSlot(slotNumber); \
+ _ASSERTE(__pMeth); \
+ _ASSERTE(!__pMeth->HasMethodInstantiation() && \
+ !__pMeth->GetMethodTable()->IsInterface()); \
+ }
+#else
+#define SIMPLE_VIRTUAL_METHOD_CHECK(slotNumber, objref)
+#endif
+
+// a simple virtual method is a non-interface/non-generic method
+// Note: objref has to be protected!
+#define PREPARE_SIMPLE_VIRTUAL_CALLSITE(id, objref) \
+ static WORD s_slot##id = MethodTable::NO_SLOT; \
+ WORD __slot = VolatileLoad(&s_slot##id); \
+ if (__slot == MethodTable::NO_SLOT) \
+ { \
+ MethodDesc *pMeth = MscorlibBinder::GetMethod(id); \
+ _ASSERTE(pMeth); \
+ __slot = pMeth->GetSlot(); \
+ VolatileStore(&s_slot##id, __slot); \
+ } \
+ PREPARE_SIMPLE_VIRTUAL_CALLSITE_USING_SLOT(__slot, objref) \
+
+// a simple virtual method is a non-interface/non-generic method
+#define PREPARE_SIMPLE_VIRTUAL_CALLSITE_USING_SLOT(slotNumber, objref) \
+ MethodTable* __pObjMT = (objref)->GetMethodTable(); \
+ SIMPLE_VIRTUAL_METHOD_CHECK(slotNumber, __pObjMT); \
+ PCODE __pSlot = (PCODE) __pObjMT->GetRestoredSlot(slotNumber);
+
+#define PREPARE_NONVIRTUAL_CALLSITE_USING_METHODDESC(pMD) \
+ PCODE __pSlot = (pMD)->GetSingleCallableAddrOfCode();
+
+#define PREPARE_NONVIRTUAL_CALLSITE_USING_CODE(pCode) \
+ PCODE __pSlot = pCode;
+
+#define CRITICAL_CALLSITE \
+ __dwDispatchCallSimpleFlags |= DispatchCallSimple_CriticalCall;
+
+// This flag should be used for callsites that catch exception up the stack inside the VM. The most common causes are
+// such as END_DOMAIN_TRANSITION or EX_CATCH. Catching exceptions in the managed code is properly instrumented and
+// does not need this notification.
+//
+// The notification is what enables both the managed ‘unhandled exception’ dialog and the ‘user unhandled’ dialog when
+// JMC is turned on. Many things that VS puts up the unhandled exception dialog for are actually cases where the native
+// exception was caught, for example catching exceptions at the thread base. JMC requires further accuracy - in that case
+// VS is checking to see if an exception escaped particular ranges of managed code frames.
+#define CATCH_HANDLER_FOUND_NOTIFICATION_CALLSITE \
+ __dwDispatchCallSimpleFlags |= DispatchCallSimple_CatchHandlerFoundNotification;
+
+#define PERFORM_CALL \
+ void * __retval = NULL; \
+ __retval = DispatchCallSimple(__pArgs, \
+ __numStackSlotsToCopy, \
+ __pSlot, \
+ __dwDispatchCallSimpleFlags);\
+
+#ifdef CALLDESCR_ARGREGS
+
+#if defined(_TARGET_X86_)
+
+// Arguments on x86 are passed backward
+#define ARGNUM_0 1
+#define ARGNUM_1 0
+#define ARGNUM_N(n) __numArgs - n + 1
+
+#else
+
+#define ARGNUM_0 0
+#define ARGNUM_1 1
+#define ARGNUM_N(n) n
+
+#endif
+
+#define PRECALL_PREP(args) \
+ DWORD __numStackSlotsToCopy = (__numArgs > NUM_ARGUMENT_REGISTERS) ? (__numArgs - NUM_ARGUMENT_REGISTERS) : 0; \
+ SIZE_T * __pArgs = (SIZE_T *)args;
+
+#define DECLARE_ARGHOLDER_ARRAY(arg, count) \
+ INIT_VARIABLES(count) \
+ ARGHOLDER_TYPE arg[(count <= NUM_ARGUMENT_REGISTERS ? NUM_ARGUMENT_REGISTERS : count)];
+
+#else // CALLDESCR_ARGREGS
+
+#define ARGNUM_0 0
+#define ARGNUM_1 1
+#define ARGNUM_N(n) n
+
+#define PRECALL_PREP(args) \
+ DWORD __numStackSlotsToCopy = (__numArgs > NUM_ARGUMENT_REGISTERS) ? __numArgs : NUM_ARGUMENT_REGISTERS; \
+ SIZE_T * __pArgs = (SIZE_T *)args;
+
+#define DECLARE_ARGHOLDER_ARRAY(arg, count) \
+ INIT_VARIABLES(count) \
+ ARGHOLDER_TYPE arg[(count <= NUM_ARGUMENT_REGISTERS ? NUM_ARGUMENT_REGISTERS : count)];
+
+#endif // CALLDESCR_ARGREGS
+
+
+#define CALL_MANAGED_METHOD(ret, rettype, args) \
+ PRECALL_PREP(args) \
+ PERFORM_CALL \
+ ret = *(rettype *)(&__retval);
+
+#define CALL_MANAGED_METHOD_NORET(args) \
+ PRECALL_PREP(args) \
+ PERFORM_CALL
+
+#define CALL_MANAGED_METHOD_RETREF(ret, reftype, args) \
+ PRECALL_PREP(args) \
+ PERFORM_CALL \
+ ret = (reftype)ObjectToOBJECTREF((Object *)__retval);
+
+#define ARGNUM_2 ARGNUM_N(2)
+#define ARGNUM_3 ARGNUM_N(3)
+#define ARGNUM_4 ARGNUM_N(4)
+#define ARGNUM_5 ARGNUM_N(5)
+#define ARGNUM_6 ARGNUM_N(6)
+#define ARGNUM_7 ARGNUM_N(7)
+#define ARGNUM_8 ARGNUM_N(8)
+
+
+void CallDefaultConstructor(OBJECTREF ref);
+
+#endif //!DACCESS_COMPILE && !CROSSGEN_COMPILE
+
+#endif // __CALLHELPERS_H__
diff --git a/src/vm/callingconvention.h b/src/vm/callingconvention.h
new file mode 100644
index 0000000000..2d3e0a7526
--- /dev/null
+++ b/src/vm/callingconvention.h
@@ -0,0 +1,1508 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+
+//
+// Provides an abstraction over platform specific calling conventions (specifically, the calling convention
+// utilized by the JIT on that platform). The caller enumerates each argument of a signature in turn, and is
+// provided with information mapping that argument into registers and/or stack locations.
+//
+
+#ifndef __CALLING_CONVENTION_INCLUDED
+#define __CALLING_CONVENTION_INCLUDED
+
+// Describes how a single argument is laid out in registers and/or stack locations when given as an input to a
+// managed method as part of a larger signature.
+//
+// Locations are split into floating point registers, general registers and stack offsets. Registers are
+// obviously architecture dependent but are represented as a zero-based index into the usual sequence in which
+// such registers are allocated for input on the platform in question. For instance:
+// X86: 0 == ecx, 1 == edx
+// ARM: 0 == r0, 1 == r1, 2 == r2 etc.
+//
+// Stack locations are represented as offsets from the stack pointer (at the point of the call). The offset is
+// given as an index of a pointer sized slot. Similarly the size of data on the stack is given in slot-sized
+// units. For instance, given an index of 2 and a size of 3:
+// X86: argument starts at [ESP + 8] and is 12 bytes long
+// AMD64: argument starts at [RSP + 16] and is 24 bytes long
+//
+// The structure is flexible enough to describe an argument that is split over several (consecutive) registers
+// and possibly on to the stack as well.
+struct ArgLocDesc
+{
+ int m_idxFloatReg; // First floating point register used (or -1)
+ int m_cFloatReg; // Count of floating point registers used (or 0)
+
+ int m_idxGenReg; // First general register used (or -1)
+ int m_cGenReg; // Count of general registers used (or 0)
+
+ int m_idxStack; // First stack slot used (or -1)
+ int m_cStack; // Count of stack slots used (or 0)
+
+#if defined(_TARGET_ARM_)
+ BOOL m_fRequires64BitAlignment; // True if the argument should always be aligned (in registers or on the stack
+#endif
+
+ ArgLocDesc()
+ {
+ Init();
+ }
+
+ // Initialize to represent a non-placed argument (no register or stack slots referenced).
+ void Init()
+ {
+ m_idxFloatReg = -1;
+ m_cFloatReg = 0;
+ m_idxGenReg = -1;
+ m_cGenReg = 0;
+ m_idxStack = -1;
+ m_cStack = 0;
+#if defined(_TARGET_ARM_)
+ m_fRequires64BitAlignment = FALSE;
+#endif
+ }
+};
+
+//
+// TransitionBlock is layout of stack frame of method call, saved argument registers and saved callee saved registers. Even though not
+// all fields are used all the time, we use uniform form for simplicity.
+//
+struct TransitionBlock
+{
+#if defined(_TARGET_X86_)
+ ArgumentRegisters m_argumentRegisters;
+ CalleeSavedRegisters m_calleeSavedRegisters;
+ TADDR m_ReturnAddress;
+#elif defined(_TARGET_AMD64_)
+#ifdef UNIX_AMD64_ABI
+ ArgumentRegisters m_argumentRegisters;
+#endif
+ CalleeSavedRegisters m_calleeSavedRegisters;
+ TADDR m_ReturnAddress;
+#elif defined(_TARGET_ARM_)
+ union {
+ CalleeSavedRegisters m_calleeSavedRegisters;
+ // alias saved link register as m_ReturnAddress
+ struct {
+ INT32 r4, r5, r6, r7, r8, r9, r10;
+ INT32 r11;
+ TADDR m_ReturnAddress;
+ };
+ };
+ ArgumentRegisters m_argumentRegisters;
+#elif defined(_TARGET_ARM64_)
+ union {
+ CalleeSavedRegisters m_calleeSavedRegisters;
+ struct {
+ INT64 x29; // frame pointer
+ TADDR m_ReturnAddress;
+ INT64 x19, x20, x21, x22, x23, x24, x25, x26, x27, x28;
+ };
+ };
+ ArgumentRegisters m_argumentRegisters;
+#else
+ PORTABILITY_ASSERT("TransitionBlock");
+#endif
+
+ // The transition block should define everything pushed by callee. The code assumes in number of places that
+ // end of the transition block is caller's stack pointer.
+
+ static int GetOffsetOfReturnAddress()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return offsetof(TransitionBlock, m_ReturnAddress);
+ }
+
+ static BYTE GetOffsetOfArgs()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return sizeof(TransitionBlock);
+ }
+
+ static int GetOffsetOfArgumentRegisters()
+ {
+ LIMITED_METHOD_CONTRACT;
+ int offs;
+#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)
+ offs = sizeof(TransitionBlock);
+#else
+ offs = offsetof(TransitionBlock, m_argumentRegisters);
+#endif
+ return offs;
+ }
+
+ static BOOL IsStackArgumentOffset(int offset)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ int ofsArgRegs = GetOffsetOfArgumentRegisters();
+
+ return offset >= (int) (ofsArgRegs + ARGUMENTREGISTERS_SIZE);
+ }
+
+ static BOOL IsArgumentRegisterOffset(int offset)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ int ofsArgRegs = GetOffsetOfArgumentRegisters();
+
+ return offset >= ofsArgRegs && offset < (int) (ofsArgRegs + ARGUMENTREGISTERS_SIZE);
+ }
+
+#ifndef _TARGET_X86_
+ static UINT GetArgumentIndexFromOffset(int offset)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (offset - GetOffsetOfArgumentRegisters()) / sizeof(TADDR);
+ }
+#endif
+
+#ifdef CALLDESCR_FPARGREGS
+ static BOOL IsFloatArgumentRegisterOffset(int offset)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return offset < 0;
+ }
+
+ static int GetOffsetOfFloatArgumentRegisters()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return -GetNegSpaceSize();
+ }
+#endif
+
+ static int GetOffsetOfCalleeSavedRegisters()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return offsetof(TransitionBlock, m_calleeSavedRegisters);
+ }
+
+ static int GetNegSpaceSize()
+ {
+ LIMITED_METHOD_CONTRACT;
+ int negSpaceSize = 0;
+#ifdef CALLDESCR_FPARGREGS
+ negSpaceSize += sizeof(FloatArgumentRegisters);
+#endif
+#ifdef _TARGET_ARM_
+ negSpaceSize += sizeof(TADDR); // padding to make FloatArgumentRegisters address 8-byte aligned
+#endif
+ return negSpaceSize;
+ }
+
+ static const int InvalidOffset = -1;
+};
+
+//-----------------------------------------------------------------------
+// ArgIterator is helper for dealing with calling conventions.
+// It is tightly coupled with TransitionBlock. It uses offsets into
+// TransitionBlock to represent argument locations for efficiency
+// reasons. Alternatively, it can also return ArgLocDesc for less
+// performance critical code.
+//
+// The ARGITERATOR_BASE argument of the template is provider of the parsed
+// method signature. Typically, the arg iterator works on top of MetaSig.
+// Reflection invoke uses alternative implementation to save signature parsing
+// time because of it has the parsed signature available.
+//-----------------------------------------------------------------------
+template<class ARGITERATOR_BASE>
+class ArgIteratorTemplate : public ARGITERATOR_BASE
+{
+public:
+ //------------------------------------------------------------
+ // Constructor
+ //------------------------------------------------------------
+ ArgIteratorTemplate()
+ {
+ WRAPPER_NO_CONTRACT;
+ m_dwFlags = 0;
+ }
+
+ UINT SizeOfArgStack()
+ {
+ WRAPPER_NO_CONTRACT;
+ if (!(m_dwFlags & SIZE_OF_ARG_STACK_COMPUTED))
+ ForceSigWalk();
+ _ASSERTE((m_dwFlags & SIZE_OF_ARG_STACK_COMPUTED) != 0);
+ return m_nSizeOfArgStack;
+ }
+
+ // For use with ArgIterator. This function computes the amount of additional
+ // memory required above the TransitionBlock. The parameter offsets
+ // returned by ArgIteratorTemplate::GetNextOffset are relative to a
+ // FramedMethodFrame, and may be in either of these regions.
+ UINT SizeOfFrameArgumentArray()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ UINT size = SizeOfArgStack();
+
+#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)
+ // The argument registers are not included in the stack size on AMD64
+ size += ARGUMENTREGISTERS_SIZE;
+#endif
+
+ return size;
+ }
+
+ //------------------------------------------------------------------------
+
+#ifdef _TARGET_X86_
+ UINT CbStackPop()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (this->IsVarArg())
+ return 0;
+ else
+ return SizeOfArgStack();
+ }
+#endif
+
+ // Is there a hidden parameter for the return parameter?
+ //
+ BOOL HasRetBuffArg()
+ {
+ WRAPPER_NO_CONTRACT;
+ if (!(m_dwFlags & RETURN_FLAGS_COMPUTED))
+ ComputeReturnFlags();
+ return (m_dwFlags & RETURN_HAS_RET_BUFFER);
+ }
+
+ UINT GetFPReturnSize()
+ {
+ WRAPPER_NO_CONTRACT;
+ if (!(m_dwFlags & RETURN_FLAGS_COMPUTED))
+ ComputeReturnFlags();
+ return m_dwFlags >> RETURN_FP_SIZE_SHIFT;
+ }
+
+#ifdef _TARGET_X86_
+ //=========================================================================
+ // Indicates whether an argument is to be put in a register using the
+ // default IL calling convention. This should be called on each parameter
+ // in the order it appears in the call signature. For a non-static method,
+ // this function should also be called once for the "this" argument, prior
+ // to calling it for the "real" arguments. Pass in a typ of ELEMENT_TYPE_CLASS.
+ //
+ // *pNumRegistersUsed: [in,out]: keeps track of the number of argument
+ // registers assigned previously. The caller should
+ // initialize this variable to 0 - then each call
+ // will update it.
+ //
+ // typ: the signature type
+ //=========================================================================
+ static BOOL IsArgumentInRegister(int * pNumRegistersUsed, CorElementType typ)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if ( (*pNumRegistersUsed) < NUM_ARGUMENT_REGISTERS) {
+ if (gElementTypeInfo[typ].m_enregister) {
+ (*pNumRegistersUsed)++;
+ return(TRUE);
+ }
+ }
+
+ return(FALSE);
+ }
+#endif // _TARGET_X86_
+
+#if defined(ENREGISTERED_PARAMTYPE_MAXSIZE)
+
+ // Note that this overload does not handle varargs
+ static BOOL IsArgPassedByRef(TypeHandle th)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!th.IsNull());
+
+ // This method only works for valuetypes. It includes true value types,
+ // primitives, enums and TypedReference.
+ _ASSERTE(th.IsValueType());
+
+ size_t size = th.GetSize();
+#ifdef _TARGET_AMD64_
+ return IsArgPassedByRef(size);
+#elif defined(_TARGET_ARM64_)
+ // Composites greater than 16 bytes are passed by reference
+ return ((size > ENREGISTERED_PARAMTYPE_MAXSIZE) && !th.IsHFA());
+#else
+ PORTABILITY_ASSERT("ArgIteratorTemplate::IsArgPassedByRef");
+ return FALSE;
+#endif
+ }
+
+#ifdef _TARGET_AMD64_
+ // This overload should only be used in AMD64-specific code only.
+ static BOOL IsArgPassedByRef(size_t size)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // If the size is bigger than ENREGISTERED_PARAM_TYPE_MAXSIZE, or if the size is NOT a power of 2, then
+ // the argument is passed by reference.
+ return (size > ENREGISTERED_PARAMTYPE_MAXSIZE) || ((size & (size-1)) != 0);
+ }
+#endif
+
+ // This overload should be used for varargs only.
+ static BOOL IsVarArgPassedByRef(size_t size)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef _TARGET_AMD64_
+ return IsArgPassedByRef(size);
+#else
+ return (size > ENREGISTERED_PARAMTYPE_MAXSIZE);
+#endif
+ }
+
+ BOOL IsArgPassedByRef()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef _TARGET_AMD64_
+ return IsArgPassedByRef(m_argSize);
+#elif defined(_TARGET_ARM64_)
+ if (m_argType == ELEMENT_TYPE_VALUETYPE)
+ {
+ _ASSERTE(!m_argTypeHandle.IsNull());
+ return ((m_argSize > ENREGISTERED_PARAMTYPE_MAXSIZE) && (!m_argTypeHandle.IsHFA() || IsVarArg()));
+ }
+ return FALSE;
+#else
+ PORTABILITY_ASSERT("ArgIteratorTemplate::IsArgPassedByRef");
+ return FALSE;
+#endif
+ }
+
+#endif // ENREGISTERED_PARAMTYPE_MAXSIZE
+
+ //------------------------------------------------------------
+ // Return the offsets of the special arguments
+ //------------------------------------------------------------
+
+ static int GetThisOffset();
+
+ int GetRetBuffArgOffset();
+ int GetVASigCookieOffset();
+ int GetParamTypeArgOffset();
+
+ //------------------------------------------------------------
+ // Each time this is called, this returns a byte offset of the next
+ // argument from the TransitionBlock* pointer.
+ //
+ // Returns TransitionBlock::InvalidOffset once you've hit the end
+ // of the list.
+ //------------------------------------------------------------
+ int GetNextOffset();
+
+ CorElementType GetArgType(TypeHandle *pTypeHandle = NULL)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (pTypeHandle != NULL)
+ {
+ *pTypeHandle = m_argTypeHandle;
+ }
+ return m_argType;
+ }
+
+ int GetArgSize()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_argSize;
+ }
+
+ void ForceSigWalk();
+
+#ifndef _TARGET_X86_
+ // Accessors for built in argument descriptions of the special implicit parameters not mentioned directly
+ // in signatures (this pointer and the like). Whether or not these can be used successfully before all the
+ // explicit arguments have been scanned is platform dependent.
+ void GetThisLoc(ArgLocDesc * pLoc) { WRAPPER_NO_CONTRACT; GetSimpleLoc(GetThisOffset(), pLoc); }
+ void GetRetBuffArgLoc(ArgLocDesc * pLoc) { WRAPPER_NO_CONTRACT; GetSimpleLoc(GetRetBuffArgOffset(), pLoc); }
+ void GetParamTypeLoc(ArgLocDesc * pLoc) { WRAPPER_NO_CONTRACT; GetSimpleLoc(GetParamTypeArgOffset(), pLoc); }
+ void GetVASigCookieLoc(ArgLocDesc * pLoc) { WRAPPER_NO_CONTRACT; GetSimpleLoc(GetVASigCookieOffset(), pLoc); }
+#endif // !_TARGET_X86_
+
+#ifdef _TARGET_ARM_
+ // Get layout information for the argument that the ArgIterator is currently visiting.
+ void GetArgLoc(int argOffset, ArgLocDesc *pLoc)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ pLoc->Init();
+
+ pLoc->m_fRequires64BitAlignment = m_fRequires64BitAlignment;
+
+ int cSlots = (GetArgSize() + 3) / 4;
+
+ if (TransitionBlock::IsFloatArgumentRegisterOffset(argOffset))
+ {
+ pLoc->m_idxFloatReg = (argOffset - TransitionBlock::GetOffsetOfFloatArgumentRegisters()) / 4;
+ pLoc->m_cFloatReg = cSlots;
+ return;
+ }
+
+ if (!TransitionBlock::IsStackArgumentOffset(argOffset))
+ {
+ pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(argOffset);
+
+ if (cSlots <= (4 - pLoc->m_idxGenReg))
+ {
+ pLoc->m_cGenReg = cSlots;
+ }
+ else
+ {
+ pLoc->m_cGenReg = 4 - pLoc->m_idxGenReg;
+
+ pLoc->m_idxStack = 0;
+ pLoc->m_cStack = cSlots - pLoc->m_cGenReg;
+ }
+ }
+ else
+ {
+ pLoc->m_idxStack = TransitionBlock::GetArgumentIndexFromOffset(argOffset) - 4;
+ pLoc->m_cStack = cSlots;
+ }
+ }
+#endif // _TARGET_ARM_
+
+#ifdef _TARGET_ARM64_
+ // Get layout information for the argument that the ArgIterator is currently visiting.
+ void GetArgLoc(int argOffset, ArgLocDesc *pLoc)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ pLoc->Init();
+
+ if (TransitionBlock::IsFloatArgumentRegisterOffset(argOffset))
+ {
+ // Dividing by 8 as size of each register in FloatArgumentRegisters is 8 bytes.
+ pLoc->m_idxFloatReg = (argOffset - TransitionBlock::GetOffsetOfFloatArgumentRegisters()) / 8;
+
+ if (!m_argTypeHandle.IsNull() && m_argTypeHandle.IsHFA())
+ {
+ CorElementType type = m_argTypeHandle.GetHFAType();
+ pLoc->m_cFloatReg = (type == ELEMENT_TYPE_R4)? GetArgSize()/sizeof(float): GetArgSize()/sizeof(double);
+ }
+ else
+ {
+ pLoc->m_cFloatReg = 1;
+ }
+ return;
+ }
+
+ int cSlots = (GetArgSize() + 7)/ 8;
+
+ // Composites greater than 16bytes are passed by reference
+ if (GetArgType() == ELEMENT_TYPE_VALUETYPE && GetArgSize() > ENREGISTERED_PARAMTYPE_MAXSIZE)
+ {
+ cSlots = 1;
+ }
+
+ if (!TransitionBlock::IsStackArgumentOffset(argOffset))
+ {
+ pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(argOffset);
+ pLoc->m_cGenReg = cSlots;
+ }
+ else
+ {
+ pLoc->m_idxStack = TransitionBlock::GetArgumentIndexFromOffset(argOffset) - 8;
+ pLoc->m_cStack = cSlots;
+ }
+ }
+#endif // _TARGET_ARM64_
+
+#if defined(_TARGET_AMD64_) && defined(UNIX_AMD64_ABI)
+ // Get layout information for the argument that the ArgIterator is currently visiting.
+ void GetArgLoc(int argOffset, ArgLocDesc *pLoc)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ pLoc->Init();
+
+ if (TransitionBlock::IsFloatArgumentRegisterOffset(argOffset))
+ {
+ // Dividing by 8 as size of each register in FloatArgumentRegisters is 8 bytes.
+ pLoc->m_idxFloatReg = (argOffset - TransitionBlock::GetOffsetOfFloatArgumentRegisters()) / 8;
+
+ // UNIXTODO: Passing of structs, HFAs. For now, use the Windows convention.
+ pLoc->m_cFloatReg = 1;
+ return;
+ }
+
+ // UNIXTODO: Passing of structs, HFAs. For now, use the Windows convention.
+ int cSlots = 1;
+
+ if (!TransitionBlock::IsStackArgumentOffset(argOffset))
+ {
+ pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(argOffset);
+ pLoc->m_cGenReg = cSlots;
+ }
+ else
+ {
+ pLoc->m_idxStack = (argOffset - TransitionBlock::GetOffsetOfArgs()) / 8;
+ pLoc->m_cStack = cSlots;
+ }
+ }
+#endif // _TARGET_ARM64_ && UNIX_AMD64_ABI
+
+protected:
+ DWORD m_dwFlags; // Cached flags
+ int m_nSizeOfArgStack; // Cached value of SizeOfArgStack
+
+ DWORD m_argNum;
+
+ // Cached information about last argument
+ CorElementType m_argType;
+ int m_argSize;
+ TypeHandle m_argTypeHandle;
+
+#ifdef _TARGET_X86_
+ int m_curOfs; // Current position of the stack iterator
+ int m_numRegistersUsed;
+#endif
+
+#ifdef _TARGET_AMD64_
+#ifdef UNIX_AMD64_ABI
+ int m_idxGenReg;
+ int m_idxStack;
+ int m_idxFPReg;
+#else
+ int m_curOfs; // Current position of the stack iterator
+#endif
+#endif
+
+#ifdef _TARGET_ARM_
+ int m_idxGenReg; // Next general register to be assigned a value
+ int m_idxStack; // Next stack slot to be assigned a value
+
+ WORD m_wFPRegs; // Bitmask of available floating point argument registers (s0-s15/d0-d7)
+ bool m_fRequires64BitAlignment; // Cached info about the current arg
+#endif
+
+#ifdef _TARGET_ARM64_
+ int m_idxGenReg; // Next general register to be assigned a value
+ int m_idxStack; // Next stack slot to be assigned a value
+ int m_idxFPReg; // Next FP register to be assigned a value
+#endif
+
+ enum {
+ ITERATION_STARTED = 0x0001, // Started iterating over arguments
+ SIZE_OF_ARG_STACK_COMPUTED = 0x0002,
+ RETURN_FLAGS_COMPUTED = 0x0004,
+ RETURN_HAS_RET_BUFFER = 0x0008, // Cached value of HasRetBuffArg
+
+#ifdef _TARGET_X86_
+ PARAM_TYPE_REGISTER_MASK = 0x0030,
+ PARAM_TYPE_REGISTER_STACK = 0x0010,
+ PARAM_TYPE_REGISTER_ECX = 0x0020,
+ PARAM_TYPE_REGISTER_EDX = 0x0030,
+#endif
+
+ METHOD_INVOKE_NEEDS_ACTIVATION = 0x0040, // Flag used by ArgIteratorForMethodInvoke
+
+ RETURN_FP_SIZE_SHIFT = 8, // The rest of the flags is cached value of GetFPReturnSize
+ };
+
+ void ComputeReturnFlags();
+
+#ifndef _TARGET_X86_
+ void GetSimpleLoc(int offset, ArgLocDesc * pLoc)
+ {
+ WRAPPER_NO_CONTRACT;
+ pLoc->Init();
+ pLoc->m_idxGenReg = TransitionBlock::GetArgumentIndexFromOffset(offset);
+ pLoc->m_cGenReg = 1;
+ }
+#endif
+};
+
+
+template<class ARGITERATOR_BASE>
+int ArgIteratorTemplate<ARGITERATOR_BASE>::GetThisOffset()
+{
+ WRAPPER_NO_CONTRACT;
+
+ // This pointer is in the first argument register by default
+ int ret = TransitionBlock::GetOffsetOfArgumentRegisters();
+
+#ifdef _TARGET_X86_
+ // x86 is special as always
+ ret += offsetof(ArgumentRegisters, ECX);
+#endif
+
+ return ret;
+}
+
+template<class ARGITERATOR_BASE>
+int ArgIteratorTemplate<ARGITERATOR_BASE>::GetRetBuffArgOffset()
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(this->HasRetBuffArg());
+
+ // RetBuf arg is in the second argument register by default
+ int ret = TransitionBlock::GetOffsetOfArgumentRegisters();
+
+#if _TARGET_X86_
+ // x86 is special as always
+ ret += this->HasThis() ? offsetof(ArgumentRegisters, EDX) : offsetof(ArgumentRegisters, ECX);
+#else
+ if (this->HasThis())
+ ret += sizeof(void *);
+#endif
+
+ return ret;
+}
+
+template<class ARGITERATOR_BASE>
+int ArgIteratorTemplate<ARGITERATOR_BASE>::GetVASigCookieOffset()
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(this->IsVarArg());
+
+#if defined(_TARGET_X86_)
+ // x86 is special as always
+ return sizeof(TransitionBlock);
+#else
+ // VaSig cookie is after this and retbuf arguments by default.
+ int ret = TransitionBlock::GetOffsetOfArgumentRegisters();
+
+ if (this->HasThis())
+ {
+ ret += sizeof(void*);
+ }
+
+ if (this->HasRetBuffArg())
+ {
+ ret += sizeof(void*);
+ }
+
+ return ret;
+#endif
+}
+
+//-----------------------------------------------------------
+// Get the extra param offset for shared generic code
+//-----------------------------------------------------------
+template<class ARGITERATOR_BASE>
+int ArgIteratorTemplate<ARGITERATOR_BASE>::GetParamTypeArgOffset()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ _ASSERTE(this->HasParamType());
+
+#ifdef _TARGET_X86_
+ // x86 is special as always
+ if (!(m_dwFlags & SIZE_OF_ARG_STACK_COMPUTED))
+ ForceSigWalk();
+
+ switch (m_dwFlags & PARAM_TYPE_REGISTER_MASK)
+ {
+ case PARAM_TYPE_REGISTER_ECX:
+ return TransitionBlock::GetOffsetOfArgumentRegisters() + offsetof(ArgumentRegisters, ECX);
+ case PARAM_TYPE_REGISTER_EDX:
+ return TransitionBlock::GetOffsetOfArgumentRegisters() + offsetof(ArgumentRegisters, EDX);
+ default:
+ break;
+ }
+
+ // The param type arg is last stack argument otherwise
+ return sizeof(TransitionBlock);
+#else
+ // The hidden arg is after this and retbuf arguments by default.
+ int ret = TransitionBlock::GetOffsetOfArgumentRegisters();
+
+ if (this->HasThis())
+ {
+ ret += sizeof(void*);
+ }
+
+ if (this->HasRetBuffArg())
+ {
+ ret += sizeof(void*);
+ }
+
+ return ret;
+#endif
+}
+
+// To avoid corner case bugs, limit maximum size of the arguments with sufficient margin
+#define MAX_ARG_SIZE 0xFFFFFF
+
+//------------------------------------------------------------
+// Each time this is called, this returns a byte offset of the next
+// argument from the Frame* pointer. This offset can be positive *or* negative.
+//
+// Returns TransitionBlock::InvalidOffset once you've hit the end of the list.
+//------------------------------------------------------------
+template<class ARGITERATOR_BASE>
+int ArgIteratorTemplate<ARGITERATOR_BASE>::GetNextOffset()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ if (!(m_dwFlags & ITERATION_STARTED))
+ {
+ int numRegistersUsed = 0;
+
+ if (this->HasThis())
+ numRegistersUsed++;
+
+ if (this->HasRetBuffArg())
+ numRegistersUsed++;
+
+ _ASSERTE(!this->IsVarArg() || !this->HasParamType());
+
+#ifndef _TARGET_X86_
+ if (this->IsVarArg() || this->HasParamType())
+ {
+ numRegistersUsed++;
+ }
+#endif
+
+#ifdef _TARGET_X86_
+ if (this->IsVarArg())
+ {
+ numRegistersUsed = NUM_ARGUMENT_REGISTERS; // Nothing else gets passed in registers for varargs
+ }
+
+#ifdef FEATURE_INTERPRETER
+ BYTE callconv = CallConv();
+ switch (callconv)
+ {
+ case IMAGE_CEE_CS_CALLCONV_C:
+ case IMAGE_CEE_CS_CALLCONV_STDCALL:
+ m_numRegistersUsed = NUM_ARGUMENT_REGISTERS;
+ m_curOfs = TransitionBlock::GetOffsetOfArgs() + numRegistersUsed * sizeof(void *);
+ m_fUnmanagedCallConv = true;
+ break;
+
+ case IMAGE_CEE_CS_CALLCONV_THISCALL:
+ case IMAGE_CEE_CS_CALLCONV_FASTCALL:
+ _ASSERTE_MSG(false, "Unsupported calling convention.");
+
+ default:
+ m_fUnmanagedCallConv = false;
+ m_numRegistersUsed = numRegistersUsed;
+ m_curOfs = TransitionBlock::GetOffsetOfArgs() + SizeOfArgStack();
+ }
+#else
+ m_numRegistersUsed = numRegistersUsed;
+ m_curOfs = TransitionBlock::GetOffsetOfArgs() + SizeOfArgStack();
+#endif
+
+#elif defined(_TARGET_AMD64_)
+#ifdef UNIX_AMD64_ABI
+ m_idxGenReg = numRegistersUsed;
+ m_idxStack = 0;
+ m_idxFPReg = 0;
+#else
+ m_curOfs = TransitionBlock::GetOffsetOfArgs() + numRegistersUsed * sizeof(void *);
+#endif
+#elif defined(_TARGET_ARM_)
+ m_idxGenReg = numRegistersUsed;
+ m_idxStack = 0;
+
+ m_wFPRegs = 0;
+#elif defined(_TARGET_ARM64_)
+ m_idxGenReg = numRegistersUsed;
+ m_idxStack = 0;
+
+ m_idxFPReg = 0;
+#else
+ PORTABILITY_ASSERT("ArgIteratorTemplate::GetNextOffset");
+#endif
+
+ m_argNum = 0;
+
+ m_dwFlags |= ITERATION_STARTED;
+ }
+
+ if (m_argNum == this->NumFixedArgs())
+ return TransitionBlock::InvalidOffset;
+
+ TypeHandle thValueType;
+ CorElementType argType = this->GetNextArgumentType(m_argNum++, &thValueType);
+
+ int argSize = MetaSig::GetElemSize(argType, thValueType);
+
+ m_argType = argType;
+ m_argSize = argSize;
+ m_argTypeHandle = thValueType;
+
+#ifdef _TARGET_X86_
+#ifdef FEATURE_INTERPRETER
+ if (m_fUnmanagedCallConv)
+ {
+ int argOfs = m_curOfs;
+ m_curOfs += StackElemSize(argSize);
+ return argOfs;
+ }
+#endif
+ if (IsArgumentInRegister(&m_numRegistersUsed, argType))
+ {
+ return TransitionBlock::GetOffsetOfArgumentRegisters() + (NUM_ARGUMENT_REGISTERS - m_numRegistersUsed) * sizeof(void *);
+ }
+
+ m_curOfs -= StackElemSize(argSize);
+ _ASSERTE(m_curOfs >= TransitionBlock::GetOffsetOfArgs());
+ return m_curOfs;
+#elif defined(_TARGET_AMD64_)
+#ifdef UNIX_AMD64_ABI
+ int cFPRegs = 0;
+
+ switch (argType)
+ {
+
+ case ELEMENT_TYPE_R4:
+ // 32-bit floating point argument.
+ cFPRegs = 1;
+ break;
+
+ case ELEMENT_TYPE_R8:
+ // 64-bit floating point argument.
+ cFPRegs = 1;
+ break;
+
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ // UNIXTODO: Passing of structs, HFAs. For now, use the Windows convention.
+ argSize = sizeof(TADDR);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ int cbArg = StackElemSize(argSize);
+ int cArgSlots = cbArg / STACK_ELEM_SIZE;
+
+ if (cFPRegs>0)
+ {
+ if (cFPRegs + m_idxFPReg <= 8)
+ {
+ int argOfs = TransitionBlock::GetOffsetOfFloatArgumentRegisters() + m_idxFPReg * 8;
+ m_idxFPReg += cFPRegs;
+ return argOfs;
+ }
+ }
+ else
+ {
+ if (m_idxGenReg + cArgSlots <= 6)
+ {
+ int argOfs = TransitionBlock::GetOffsetOfArgumentRegisters() + m_idxGenReg * 8;
+ m_idxGenReg += cArgSlots;
+ return argOfs;
+ }
+ }
+
+ int argOfs = TransitionBlock::GetOffsetOfArgs() + m_idxStack * 8;
+ m_idxStack += cArgSlots;
+ return argOfs;
+#else
+ // Each argument takes exactly one slot on AMD64
+ int argOfs = m_curOfs;
+ m_curOfs += sizeof(void *);
+ return argOfs;
+#endif
+#elif defined(_TARGET_ARM_)
+ // First look at the underlying type of the argument to determine some basic properties:
+ // 1) The size of the argument in bytes (rounded up to the stack slot size of 4 if necessary).
+ // 2) Whether the argument represents a floating point primitive (ELEMENT_TYPE_R4 or ELEMENT_TYPE_R8).
+ // 3) Whether the argument requires 64-bit alignment (anything that contains a Int64/UInt64).
+
+ bool fFloatingPoint = false;
+ bool fRequiresAlign64Bit = false;
+
+ switch (argType)
+ {
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ // 64-bit integers require 64-bit alignment on ARM.
+ fRequiresAlign64Bit = true;
+ break;
+
+ case ELEMENT_TYPE_R4:
+ // 32-bit floating point argument.
+ fFloatingPoint = true;
+ break;
+
+ case ELEMENT_TYPE_R8:
+ // 64-bit floating point argument.
+ fFloatingPoint = true;
+ fRequiresAlign64Bit = true;
+ break;
+
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ // Value type case: extract the alignment requirement, note that this has to handle
+ // the interop "native value types".
+ fRequiresAlign64Bit = thValueType.RequiresAlign8();
+
+ // Handle HFAs: packed structures of 1-4 floats or doubles that are passed in FP argument
+ // registers if possible.
+ if (thValueType.IsHFA())
+ fFloatingPoint = true;
+
+ break;
+ }
+
+ default:
+ // The default is are 4-byte arguments (or promoted to 4 bytes), non-FP and don't require any
+ // 64-bit alignment.
+ break;
+ }
+
+ // Now attempt to place the argument into some combination of floating point or general registers and
+ // the stack.
+
+ // Save the alignment requirement
+ m_fRequires64BitAlignment = fRequiresAlign64Bit;
+
+ int cbArg = StackElemSize(argSize);
+ int cArgSlots = cbArg / 4;
+
+ // Ignore floating point argument placement in registers if we're dealing with a vararg function (the ABI
+ // specifies this so that vararg processing on the callee side is simplified).
+ if (fFloatingPoint && !this->IsVarArg())
+ {
+ // Handle floating point (primitive) arguments.
+
+ // First determine whether we can place the argument in VFP registers. There are 16 32-bit
+ // and 8 64-bit argument registers that share the same register space (e.g. D0 overlaps S0 and
+ // S1). The ABI specifies that VFP values will be passed in the lowest sequence of registers that
+ // haven't been used yet and have the required alignment. So the sequence (float, double, float)
+ // would be mapped to (S0, D1, S1) or (S0, S2/S3, S1).
+ //
+ // We use a 16-bit bitmap to record which registers have been used so far.
+ //
+ // So we can use the same basic loop for each argument type (float, double or HFA struct) we set up
+ // the following input parameters based on the size and alignment requirements of the arguments:
+ // wAllocMask : bitmask of the number of 32-bit registers we need (1 for 1, 3 for 2, 7 for 3 etc.)
+ // cSteps : number of loop iterations it'll take to search the 16 registers
+ // cShift : how many bits to shift the allocation mask on each attempt
+
+ WORD wAllocMask = (1 << (cbArg / 4)) - 1;
+ WORD cSteps = (WORD)(fRequiresAlign64Bit ? 9 - (cbArg / 8) : 17 - (cbArg / 4));
+ WORD cShift = fRequiresAlign64Bit ? 2 : 1;
+
+ // Look through the availability bitmask for a free register or register pair.
+ for (WORD i = 0; i < cSteps; i++)
+ {
+ if ((m_wFPRegs & wAllocMask) == 0)
+ {
+ // We found one, mark the register or registers as used.
+ m_wFPRegs |= wAllocMask;
+
+ // Indicate the registers used to the caller and return.
+ return TransitionBlock::GetOffsetOfFloatArgumentRegisters() + (i * cShift * 4);
+ }
+ wAllocMask <<= cShift;
+ }
+
+ // The FP argument is going to live on the stack. Once this happens the ABI demands we mark all FP
+ // registers as unavailable.
+ m_wFPRegs = 0xffff;
+
+ // Doubles or HFAs containing doubles need the stack aligned appropriately.
+ if (fRequiresAlign64Bit)
+ m_idxStack = ALIGN_UP(m_idxStack, 2);
+
+ // Indicate the stack location of the argument to the caller.
+ int argOfs = TransitionBlock::GetOffsetOfArgs() + m_idxStack * 4;
+
+ // Record the stack usage.
+ m_idxStack += cArgSlots;
+
+ return argOfs;
+ }
+
+ //
+ // Handle the non-floating point case.
+ //
+
+ if (m_idxGenReg < 4)
+ {
+ if (fRequiresAlign64Bit)
+ {
+ // The argument requires 64-bit alignment. Align either the next general argument register if
+ // we have any left. See step C.3 in the algorithm in the ABI spec.
+ m_idxGenReg = ALIGN_UP(m_idxGenReg, 2);
+ }
+
+ int argOfs = TransitionBlock::GetOffsetOfArgumentRegisters() + m_idxGenReg * 4;
+
+ int cRemainingRegs = 4 - m_idxGenReg;
+ if (cArgSlots <= cRemainingRegs)
+ {
+ // Mark the registers just allocated as used.
+ m_idxGenReg += cArgSlots;
+ return argOfs;
+ }
+
+ // The ABI supports splitting a non-FP argument across registers and the stack. But this is
+ // disabled if the FP arguments already overflowed onto the stack (i.e. the stack index is not
+ // zero). The following code marks the general argument registers as exhausted if this condition
+ // holds. See steps C.5 in the algorithm in the ABI spec.
+
+ m_idxGenReg = 4;
+
+ if (m_idxStack == 0)
+ {
+ m_idxStack += cArgSlots - cRemainingRegs;
+ return argOfs;
+ }
+ }
+
+ if (fRequiresAlign64Bit)
+ {
+ // The argument requires 64-bit alignment. If it is going to be passed on the stack, align
+ // the next stack slot. See step C.6 in the algorithm in the ABI spec.
+ m_idxStack = ALIGN_UP(m_idxStack, 2);
+ }
+
+ int argOfs = TransitionBlock::GetOffsetOfArgs() + m_idxStack * 4;
+
+ // Advance the stack pointer over the argument just placed.
+ m_idxStack += cArgSlots;
+
+ return argOfs;
+#elif defined(_TARGET_ARM64_)
+
+ int cFPRegs = 0;
+
+ switch (argType)
+ {
+
+ case ELEMENT_TYPE_R4:
+ // 32-bit floating point argument.
+ cFPRegs = 1;
+ break;
+
+ case ELEMENT_TYPE_R8:
+ // 64-bit floating point argument.
+ cFPRegs = 1;
+ break;
+
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ // Handle HFAs: packed structures of 2-4 floats or doubles that are passed in FP argument
+ // registers if possible.
+ if (thValueType.IsHFA())
+ {
+ CorElementType type = thValueType.GetHFAType();
+ cFPRegs = (type == ELEMENT_TYPE_R4)? (argSize/sizeof(float)): (argSize/sizeof(double));
+ }
+ else
+ {
+ // Composite greater than 16bytes should be passed by reference
+ if (argSize > ENREGISTERED_PARAMTYPE_MAXSIZE)
+ {
+ argSize = sizeof(TADDR);
+ }
+ }
+
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ int cbArg = StackElemSize(argSize);
+ int cArgSlots = cbArg / STACK_ELEM_SIZE;
+
+ if (cFPRegs>0 && !this->IsVarArg())
+ {
+ if (cFPRegs + m_idxFPReg <= 8)
+ {
+ int argOfs = TransitionBlock::GetOffsetOfFloatArgumentRegisters() + m_idxFPReg * 8;
+ m_idxFPReg += cFPRegs;
+ return argOfs;
+ }
+ else
+ {
+ m_idxFPReg = 8;
+ }
+ }
+ else
+ {
+ if (m_idxGenReg + cArgSlots <= 8)
+ {
+ int argOfs = TransitionBlock::GetOffsetOfArgumentRegisters() + m_idxGenReg * 8;
+ m_idxGenReg += cArgSlots;
+ return argOfs;
+ }
+ else
+ {
+ m_idxGenReg = 8;
+ }
+ }
+
+ int argOfs = TransitionBlock::GetOffsetOfArgs() + m_idxStack * 8;
+ m_idxStack += cArgSlots;
+ return argOfs;
+#else
+ PORTABILITY_ASSERT("ArgIteratorTemplate::GetNextOffset");
+ return TransitionBlock::InvalidOffset;
+#endif
+}
+
+template<class ARGITERATOR_BASE>
+void ArgIteratorTemplate<ARGITERATOR_BASE>::ComputeReturnFlags()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ TypeHandle thValueType;
+ CorElementType type = this->GetReturnType(&thValueType);
+
+ DWORD flags = RETURN_FLAGS_COMPUTED;
+ switch (type)
+ {
+ case ELEMENT_TYPE_TYPEDBYREF:
+#ifdef ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE
+ if (sizeof(TypedByRef) > ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE)
+ flags |= RETURN_HAS_RET_BUFFER;
+#else
+ flags |= RETURN_HAS_RET_BUFFER;
+#endif
+ break;
+
+ case ELEMENT_TYPE_R4:
+ flags |= sizeof(float) << RETURN_FP_SIZE_SHIFT;
+ break;
+
+ case ELEMENT_TYPE_R8:
+ flags |= sizeof(double) << RETURN_FP_SIZE_SHIFT;
+ break;
+
+ case ELEMENT_TYPE_VALUETYPE:
+#ifdef ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE
+ {
+ _ASSERTE(!thValueType.IsNull());
+
+#ifdef FEATURE_HFA
+ if (thValueType.IsHFA() && !this->IsVarArg())
+ {
+ CorElementType hfaType = thValueType.GetHFAType();
+
+ flags |= (hfaType == ELEMENT_TYPE_R4) ?
+ ((4 * sizeof(float)) << RETURN_FP_SIZE_SHIFT) :
+ ((4 * sizeof(double)) << RETURN_FP_SIZE_SHIFT);
+
+ break;
+ }
+#endif
+
+ size_t size = thValueType.GetSize();
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+ // Return value types of size which are not powers of 2 using a RetBuffArg
+ if ((size & (size-1)) != 0)
+ {
+ flags |= RETURN_HAS_RET_BUFFER;
+ break;
+ }
+#endif
+
+ if (size <= ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE)
+ break;
+ }
+#endif // ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE
+
+ // Value types are returned using return buffer by default
+ flags |= RETURN_HAS_RET_BUFFER;
+ break;
+
+ default:
+ break;
+ }
+
+ m_dwFlags |= flags;
+}
+
+template<class ARGITERATOR_BASE>
+void ArgIteratorTemplate<ARGITERATOR_BASE>::ForceSigWalk()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ // This can be only used before the actual argument iteration started
+ _ASSERTE((m_dwFlags & ITERATION_STARTED) == 0);
+
+#ifdef _TARGET_X86_
+ //
+ // x86 is special as always
+ //
+
+ int numRegistersUsed = 0;
+ int nSizeOfArgStack = 0;
+
+ if (this->HasThis())
+ numRegistersUsed++;
+
+ if (this->HasRetBuffArg())
+ numRegistersUsed++;
+
+ if (this->IsVarArg())
+ {
+ nSizeOfArgStack += sizeof(void *);
+ numRegistersUsed = NUM_ARGUMENT_REGISTERS; // Nothing else gets passed in registers for varargs
+ }
+
+#ifdef FEATURE_INTERPRETER
+ BYTE callconv = CallConv();
+ switch (callconv)
+ case IMAGE_CEE_CS_CALLCONV_C:
+ case IMAGE_CEE_CS_CALLCONV_STDCALL:
+ numRegistersUsed = NUM_ARGUMENT_REGISTERS;
+ nSizeOfArgStack = TransitionBlock::GetOffsetOfArgs() + numRegistersUsed * sizeof(void *);
+ break;
+
+ case IMAGE_CEE_CS_CALLCONV_THISCALL:
+ case IMAGE_CEE_CS_CALLCONV_FASTCALL:
+ _ASSERTE_MSG(false, "Unsupported calling convention.");
+ default:
+ }
+#endif // FEATURE_INTERPRETER
+
+ DWORD nArgs = this->NumFixedArgs();
+ for (DWORD i = 0; i < nArgs; i++)
+ {
+ TypeHandle thValueType;
+ CorElementType type = this->GetNextArgumentType(i, &thValueType);
+
+ if (!IsArgumentInRegister(&numRegistersUsed, type))
+ {
+ int structSize = MetaSig::GetElemSize(type, thValueType);
+
+ nSizeOfArgStack += StackElemSize(structSize);
+
+#ifndef DACCESS_COMPILE
+ if (nSizeOfArgStack > MAX_ARG_SIZE)
+ {
+#ifdef _DEBUG
+ // We should not ever throw exception in the "FORBIDGC_LOADER_USE_ENABLED" mode.
+ // The contract violation is required to workaround bug in the static contract analyzer.
+ _ASSERTE(!FORBIDGC_LOADER_USE_ENABLED());
+ CONTRACT_VIOLATION(ThrowsViolation);
+#endif
+#ifdef BINDER
+ IfFailThrow(COR_E_NOTSUPPORTED);
+#else
+ COMPlusThrow(kNotSupportedException);
+#endif
+ }
+#endif
+ }
+ }
+
+ if (this->HasParamType())
+ {
+ DWORD paramTypeFlags = 0;
+ if (numRegistersUsed < NUM_ARGUMENT_REGISTERS)
+ {
+ numRegistersUsed++;
+ paramTypeFlags = (numRegistersUsed == 1) ?
+ PARAM_TYPE_REGISTER_ECX : PARAM_TYPE_REGISTER_EDX;
+ }
+ else
+ {
+ nSizeOfArgStack += sizeof(void *);
+ paramTypeFlags = PARAM_TYPE_REGISTER_STACK;
+ }
+ m_dwFlags |= paramTypeFlags;
+ }
+
+#else // _TARGET_X86_
+
+ int maxOffset = TransitionBlock::GetOffsetOfArgs();
+
+ int ofs;
+ while (TransitionBlock::InvalidOffset != (ofs = GetNextOffset()))
+ {
+ int stackElemSize;
+
+#ifdef _TARGET_AMD64_
+ // All stack arguments take just one stack slot on AMD64 because of arguments bigger
+ // than a stack slot are passed by reference.
+ stackElemSize = STACK_ELEM_SIZE;
+#else
+ stackElemSize = StackElemSize(GetArgSize());
+#if defined(ENREGISTERED_PARAMTYPE_MAXSIZE)
+ if (IsArgPassedByRef())
+ stackElemSize = STACK_ELEM_SIZE;
+#endif
+#endif
+
+ int endOfs = ofs + stackElemSize;
+ if (endOfs > maxOffset)
+ {
+#if !defined(DACCESS_COMPILE) && !defined(BINDER)
+ if (endOfs > MAX_ARG_SIZE)
+ {
+#ifdef _DEBUG
+ // We should not ever throw exception in the "FORBIDGC_LOADER_USE_ENABLED" mode.
+ // The contract violation is required to workaround bug in the static contract analyzer.
+ _ASSERTE(!FORBIDGC_LOADER_USE_ENABLED());
+ CONTRACT_VIOLATION(ThrowsViolation);
+#endif
+ COMPlusThrow(kNotSupportedException);
+ }
+#endif
+ maxOffset = endOfs;
+ }
+ }
+ // Clear the iterator started flag
+ m_dwFlags &= ~ITERATION_STARTED;
+
+ int nSizeOfArgStack = maxOffset - TransitionBlock::GetOffsetOfArgs();
+
+#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)
+ nSizeOfArgStack = (nSizeOfArgStack > (int)sizeof(ArgumentRegisters)) ?
+ (nSizeOfArgStack - sizeof(ArgumentRegisters)) : 0;
+#endif
+
+#endif // _TARGET_X86_
+
+ // Cache the result
+ m_nSizeOfArgStack = nSizeOfArgStack;
+ m_dwFlags |= SIZE_OF_ARG_STACK_COMPUTED;
+
+ this->Reset();
+}
+
+class ArgIteratorBase
+{
+protected:
+ MetaSig * m_pSig;
+
+ FORCEINLINE CorElementType GetReturnType(TypeHandle * pthValueType)
+ {
+ WRAPPER_NO_CONTRACT;
+#ifdef ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE
+ return m_pSig->GetReturnTypeNormalized(pthValueType);
+#else
+ return m_pSig->GetReturnTypeNormalized();
+#endif
+ }
+
+ FORCEINLINE CorElementType GetNextArgumentType(DWORD iArg, TypeHandle * pthValueType)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(iArg == m_pSig->GetArgNum());
+ CorElementType et = m_pSig->PeekArgNormalized(pthValueType);
+ m_pSig->SkipArg();
+ return et;
+ }
+
+ FORCEINLINE void Reset()
+ {
+ WRAPPER_NO_CONTRACT;
+ m_pSig->Reset();
+ }
+
+public:
+ BOOL HasThis()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pSig->HasThis();
+ }
+
+ BOOL HasParamType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pSig->GetCallingConventionInfo() & CORINFO_CALLCONV_PARAMTYPE;
+ }
+
+ BOOL IsVarArg()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pSig->IsVarArg() || m_pSig->IsTreatAsVarArg();
+ }
+
+ DWORD NumFixedArgs()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pSig->NumFixedArgs();
+ }
+
+#ifdef FEATURE_INTERPRETER
+ BYTE CallConv()
+ {
+ return m_pSig->GetCallingConvention();
+ }
+#endif // FEATURE_INTERPRETER
+
+ //
+ // The following is used by the profiler to dig into the iterator for
+ // discovering if the method has a This pointer or a return buffer.
+ // Do not use this to re-initialize the signature, use the exposed Init()
+ // method in this class.
+ //
+ MetaSig *GetSig(void)
+ {
+ return m_pSig;
+ }
+};
+
+class ArgIterator : public ArgIteratorTemplate<ArgIteratorBase>
+{
+public:
+ ArgIterator(MetaSig * pSig)
+ {
+ m_pSig = pSig;
+ }
+
+ // This API returns true if we are returning a structure in registers instead of using a byref return buffer
+ BOOL HasNonStandardByvalReturn()
+ {
+ WRAPPER_NO_CONTRACT;
+
+#ifdef ENREGISTERED_RETURNTYPE_MAXSIZE
+ CorElementType type = m_pSig->GetReturnTypeNormalized();
+ return (type == ELEMENT_TYPE_VALUETYPE || type == ELEMENT_TYPE_TYPEDBYREF) && !HasRetBuffArg();
+#else
+ return FALSE;
+#endif
+ }
+};
+
+// Conventience helper
+inline BOOL HasRetBuffArg(MetaSig * pSig)
+{
+ WRAPPER_NO_CONTRACT;
+ ArgIterator argit(pSig);
+ return argit.HasRetBuffArg();
+}
+
+#endif // __CALLING_CONVENTION_INCLUDED
diff --git a/src/vm/ceeload.cpp b/src/vm/ceeload.cpp
new file mode 100644
index 0000000000..e0fadaf58d
--- /dev/null
+++ b/src/vm/ceeload.cpp
@@ -0,0 +1,16163 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: CEELOAD.CPP
+//
+
+//
+
+// CEELOAD reads in the PE file format using LoadLibrary
+// ===========================================================================
+
+
+#include "common.h"
+
+#include "array.h"
+#include "ceeload.h"
+#include "hash.h"
+#include "vars.hpp"
+#include "reflectclasswriter.h"
+#include "method.hpp"
+#include "stublink.h"
+#include "security.h"
+#include "cgensys.h"
+#include "excep.h"
+#include "dbginterface.h"
+#include "dllimport.h"
+#include "eeprofinterfaces.h"
+#include "perfcounters.h"
+#include "encee.h"
+#include "jitinterface.h"
+#include "eeconfig.h"
+#include "dllimportcallback.h"
+#include "contractimpl.h"
+#include "typehash.h"
+#include "instmethhash.h"
+#include "virtualcallstub.h"
+#include "typestring.h"
+#include "stringliteralmap.h"
+#include "eventtrace.h"
+#include <formattype.h>
+#include "fieldmarshaler.h"
+#include "sigbuilder.h"
+#include "tls.h"
+#include "metadataexports.h"
+#include "inlinetracking.h"
+
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#include "crossdomaincalls.h"
+#include "objectclone.h"
+#endif
+
+#ifdef FEATURE_PREJIT
+#include "exceptionhandling.h"
+#include "corcompile.h"
+#include "compile.h"
+#include "nibblestream.h"
+#include "zapsig.h"
+#endif //FEATURE_PREJIT
+
+#ifdef FEATURE_COMINTEROP
+#include "runtimecallablewrapper.h"
+#include "comcallablewrapper.h"
+#endif //FEATURE_COMINTEROP
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable:4724)
+#endif // _MSC_VER
+
+#include "ngenhash.inl"
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif // _MSC_VER
+
+
+#include "perflog.h"
+#include "ecall.h"
+#include "../md/compiler/custattr.h"
+#include "constrainedexecutionregion.h"
+#include "typekey.h"
+#include "peimagelayout.inl"
+#include "ildbsymlib.h"
+
+#if defined(FEATURE_HOSTED_BINDER) && defined(FEATURE_APPX_BINDER)
+#include "clrprivbinderappx.h"
+#endif //defined(FEATURE_HOSTED_BINDER) && defined(FEATURE_APPX_BINDER)
+
+#if defined(PROFILING_SUPPORTED)
+#include "profilermetadataemitvalidator.h"
+#endif
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable:4244)
+#endif // _MSC_VER
+
+#ifdef _WIN64
+#define COR_VTABLE_PTRSIZED COR_VTABLE_64BIT
+#define COR_VTABLE_NOT_PTRSIZED COR_VTABLE_32BIT
+#else // !_WIN64
+#define COR_VTABLE_PTRSIZED COR_VTABLE_32BIT
+#define COR_VTABLE_NOT_PTRSIZED COR_VTABLE_64BIT
+#endif // !_WIN64
+
+// Hash table parameter of available classes (name -> module/class) hash
+#define AVAILABLE_CLASSES_HASH_BUCKETS 1024
+#define AVAILABLE_CLASSES_HASH_BUCKETS_COLLECTIBLE 128
+#define PARAMTYPES_HASH_BUCKETS 23
+#define PARAMMETHODS_HASH_BUCKETS 11
+#define METHOD_STUBS_HASH_BUCKETS 11
+
+#define GUID_TO_TYPE_HASH_BUCKETS 16
+
+#define CEE_FILE_GEN_GROWTH_COLLECTIBLE 2048
+
+#define NGEN_STATICS_ALLCLASSES_WERE_LOADED -1
+
+
+//---------------------------------------------------------------------------------------
+InstrumentedILOffsetMapping::InstrumentedILOffsetMapping()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ m_cMap = 0;
+ m_rgMap = NULL;
+ _ASSERTE(IsNull());
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Check whether there is any mapping information stored in this object.
+//
+// Notes:
+// The memory should be alive throughout the process lifetime until
+// the Module containing the instrumented method is destructed.
+//
+
+BOOL InstrumentedILOffsetMapping::IsNull()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE((m_cMap == 0) == (m_rgMap == NULL));
+ return (m_cMap == 0);
+}
+
+#if !defined(DACCESS_COMPILE)
+//---------------------------------------------------------------------------------------
+//
+// Release the memory used by the array of COR_IL_MAPs.
+//
+// Notes:
+// * The memory should be alive throughout the process lifetime until the Module containing
+// the instrumented method is destructed.
+// * This struct should be read-only in DAC builds.
+//
+
+void InstrumentedILOffsetMapping::Clear()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_rgMap != NULL)
+ {
+ delete [] m_rgMap;
+ }
+
+ m_cMap = 0;
+ m_rgMap = NULL;
+}
+#endif // !DACCESS_COMPILE
+
+#if !defined(DACCESS_COMPILE)
+void InstrumentedILOffsetMapping::SetMappingInfo(SIZE_T cMap, COR_IL_MAP * rgMap)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE((cMap == 0) == (rgMap == NULL));
+ m_cMap = cMap;
+ m_rgMap = ARRAY_PTR_COR_IL_MAP(rgMap);
+}
+#endif // !DACCESS_COMPILE
+
+SIZE_T InstrumentedILOffsetMapping::GetCount() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE((m_cMap == 0) == (m_rgMap == NULL));
+ return m_cMap;
+}
+
+ARRAY_PTR_COR_IL_MAP InstrumentedILOffsetMapping::GetOffsets() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE((m_cMap == 0) == (m_rgMap == NULL));
+ return m_rgMap;
+}
+
+PTR_PersistentInlineTrackingMap Module::GetNgenInlineTrackingMap()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_persistentInlineTrackingMap;
+}
+
+
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_MIXEDMODE
+
+#include <pshpack1.h>
+struct MUThunk
+{
+ VASigCookie *m_pCookie;
+ PCCOR_SIGNATURE m_pSig;
+ LPVOID m_pTarget;
+#ifdef _TARGET_X86_
+ LPVOID GetCode()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return &m_op1;
+ }
+
+ BYTE m_op1; //0x58 POP eax ;;pop return address
+
+ BYTE m_op2; //0x68 PUSH cookie
+ UINT32 m_opcookie;//
+
+ BYTE m_op3; //0x50 PUSH eax ;;repush return address
+
+ BYTE m_op4; //0xb8 MOV eax,target
+ UINT32 m_optarget;//
+ BYTE m_jmp; //0xe9 JMP PInvokeCalliStub
+ UINT32 m_jmptarg;
+#else // !_TARGET_X86_
+ LPVOID GetCode()
+ {
+ LIMITED_METHOD_CONTRACT;
+ PORTABILITY_ASSERT("MUThunk not implemented on this platform");
+ return NULL;
+ }
+#endif // !_TARGET_X86_
+};
+#include <poppack.h>
+
+
+//
+// A hashtable for u->m thunks not represented in the fixup tables.
+//
+class MUThunkHash : public CClosedHashBase {
+ private:
+ //----------------------------------------------------
+ // Hash key for CClosedHashBase
+ //----------------------------------------------------
+ struct UTHKey {
+ LPVOID m_pTarget;
+ PCCOR_SIGNATURE m_pSig;
+ DWORD m_cSig;
+ };
+
+ //----------------------------------------------------
+ // Hash entry for CClosedHashBase
+ //----------------------------------------------------
+ struct UTHEntry {
+ UTHKey m_key;
+ ELEMENTSTATUS m_status;
+ MUThunk *m_pMUThunk;
+ };
+
+ public:
+ MUThunkHash(Module *pModule) :
+ CClosedHashBase(
+#ifdef _DEBUG
+ 3,
+#else // !_DEBUG
+ 17, // CClosedHashTable will grow as necessary
+#endif // !_DEBUG
+
+ sizeof(UTHEntry),
+ FALSE
+ ),
+ m_crst(CrstMUThunkHash)
+
+ {
+ WRAPPER_NO_CONTRACT;
+ m_pModule = pModule;
+ }
+
+ ~MUThunkHash()
+ {
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ DESTRUCTOR_CHECK;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ }
+ CONTRACT_END
+
+ UTHEntry *phe = (UTHEntry*)GetFirst();
+ while (phe) {
+ delete (BYTE*)phe->m_pMUThunk->m_pSig;
+ DeleteExecutable(phe->m_pMUThunk);
+ phe = (UTHEntry*)GetNext((BYTE*)phe);
+ }
+
+ RETURN;
+ }
+
+
+#ifdef FEATURE_MIXEDMODE
+ public:
+ LPVOID GetMUThunk(LPVOID pTarget, PCCOR_SIGNATURE pSig0, DWORD cSig)
+ {
+ STATIC_CONTRACT_THROWS;
+
+ // A persistent copy of the sig
+ NewArrayHolder<COR_SIGNATURE> sigHolder = new COR_SIGNATURE[cSig];
+
+ memcpyNoGCRefs(sigHolder.GetValue(), pSig0, cSig);
+ sigHolder[0] = IMAGE_CEE_CS_CALLCONV_STDCALL;
+
+ // Have to lookup cookie eagerly because once we've added a blank
+ // entry to the hashtable, it's not easy to tolerate failure.
+ VASigCookie *pCookie = m_pModule->GetVASigCookie(Signature(sigHolder, cSig));
+
+ if (pCookie == NULL)
+ {
+ return NULL;
+ }
+ sigHolder.SuppressRelease();
+ return GetMUThunkHelper(pTarget, sigHolder, cSig, pCookie);
+ }
+private:
+ LPVOID GetMUThunkHelper(LPVOID pTarget, PCCOR_SIGNATURE pSig, DWORD cSig, VASigCookie *pCookie)
+ {
+ CONTRACT (LPVOID)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END
+
+ UTHEntry *phe;
+ CrstHolder ch(&m_crst);
+
+ UTHKey key;
+ key.m_pTarget = pTarget;
+ key.m_pSig = pSig;
+ key.m_cSig = cSig;
+
+ bool bNew;
+ phe = (UTHEntry*)FindOrAdd((LPVOID)&key, /*modifies*/bNew);
+
+ if (phe)
+ {
+ if (bNew)
+ {
+ phe->m_pMUThunk = new (executable) MUThunk;
+ phe->m_pMUThunk->m_pCookie = pCookie;
+ phe->m_pMUThunk->m_pSig = pSig;
+ phe->m_pMUThunk->m_pTarget = pTarget;
+#ifdef _TARGET_X86_
+ phe->m_pMUThunk->m_op1 = 0x58; //POP EAX
+ phe->m_pMUThunk->m_op2 = 0x68; //PUSH
+ phe->m_pMUThunk->m_opcookie = (UINT32)(size_t)pCookie;
+ phe->m_pMUThunk->m_op3 = 0x50; //POP EAX
+ phe->m_pMUThunk->m_op4 = 0xb8; //mov eax
+ phe->m_pMUThunk->m_optarget = (UINT32)(size_t)pTarget;
+ phe->m_pMUThunk->m_jmp = 0xe9; //jmp
+ phe->m_pMUThunk->m_jmptarg = (UINT32)(GetEEFuncEntryPoint(GenericPInvokeCalliHelper) - ((size_t)( 1 + &(phe->m_pMUThunk->m_jmptarg))));
+#else // !_TARGET_X86_
+ PORTABILITY_ASSERT("MUThunkHash not implemented on this platform");
+#endif // !_TARGET_X86_
+
+ phe->m_key = key;
+ phe->m_status = USED;
+ }
+ else
+ {
+ delete[] (BYTE*)pSig;
+ }
+ }
+ else
+ {
+ delete[] (BYTE*)pSig;
+ }
+
+ if (phe)
+ RETURN (LPVOID)(phe->m_pMUThunk->GetCode());
+ else
+ RETURN NULL;
+ }
+#endif // FEATURE_MIXEDMODE
+
+public:
+
+ // *** OVERRIDES FOR CClosedHashBase ***/
+
+ //*****************************************************************************
+ // Hash is called with a pointer to an element in the table. You must override
+ // this method and provide a hash algorithm for your element type.
+ //*****************************************************************************
+ virtual unsigned int Hash( // The key value.
+ void const *pData) // Raw data to hash.
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ UTHKey *pKey = (UTHKey*)pData;
+ return (ULONG)(size_t)(pKey->m_pTarget);
+ }
+
+
+ //*****************************************************************************
+ // Compare is used in the typical memcmp way, 0 is eqaulity, -1/1 indicate
+ // direction of miscompare. In this system everything is always equal or not.
+ //*****************************************************************************
+ unsigned int Compare( // 0, -1, or 1.
+ void const *pData, // Raw key data on lookup.
+ BYTE *pElement) // The element to compare data against.
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ UTHKey *pkey1 = (UTHKey*)pData;
+ UTHKey *pkey2 = &( ((UTHEntry*)pElement)->m_key );
+
+ if (pkey1->m_pTarget != pkey2->m_pTarget)
+ return 1;
+
+ if (S_OK != MetaSig::CompareMethodSigsNT(pkey1->m_pSig, pkey1->m_cSig, m_pModule, NULL, pkey2->m_pSig, pkey2->m_cSig, m_pModule, NULL))
+ return 1;
+
+ return 0;
+ }
+
+ //*****************************************************************************
+ // Return true if the element is free to be used.
+ //*****************************************************************************
+ virtual ELEMENTSTATUS Status( // The status of the entry.
+ BYTE *pElement) // The element to check.
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return ((UTHEntry*)pElement)->m_status;
+ }
+
+ //*****************************************************************************
+ // Sets the status of the given element.
+ //*****************************************************************************
+ virtual void SetStatus(
+ BYTE *pElement, // The element to set status for.
+ ELEMENTSTATUS eStatus) // New status.
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ ((UTHEntry*)pElement)->m_status = eStatus;
+ }
+
+ //*****************************************************************************
+ // Returns the internal key value for an element.
+ //*****************************************************************************
+ virtual void *GetKey( // The data to hash on.
+ BYTE *pElement) // The element to return data ptr for.
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (BYTE*) &(((UTHEntry*)pElement)->m_key);
+ }
+
+
+
+ Module *m_pModule;
+ Crst m_crst;
+};
+#endif // FEATURE_MIXEDMODE
+
+
+// ===========================================================================
+// Module
+// ===========================================================================
+
+//---------------------------------------------------------------------------------------------------
+// This wrapper just invokes the real initialization inside a try/hook.
+// szName is not null only for dynamic modules
+//---------------------------------------------------------------------------------------------------
+void Module::DoInit(AllocMemTracker *pamTracker, LPCWSTR szName)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+
+#ifdef PROFILING_SUPPORTED
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackModuleLoads());
+ GCX_COOP();
+ g_profControlBlock.pProfInterface->ModuleLoadStarted((ModuleID) this);
+ END_PIN_PROFILER();
+ }
+ // Need TRY/HOOK instead of holder so we can get HR of exception thrown for profiler callback
+ EX_TRY
+#endif
+ {
+ Initialize(pamTracker, szName);
+ }
+#ifdef PROFILING_SUPPORTED
+
+
+ EX_HOOK
+ {
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackModuleLoads());
+ g_profControlBlock.pProfInterface->ModuleLoadFinished((ModuleID) this, GET_EXCEPTION()->GetHR());
+ END_PIN_PROFILER();
+ }
+ }
+ EX_END_HOOK;
+
+#endif
+}
+
+// Set the given bit on m_dwTransientFlags. Return true if we won the race to set the bit.
+BOOL Module::SetTransientFlagInterlocked(DWORD dwFlag)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ for (;;)
+ {
+ DWORD dwTransientFlags = m_dwTransientFlags;
+ if ((dwTransientFlags & dwFlag) != 0)
+ return FALSE;
+ if ((DWORD)FastInterlockCompareExchange((LONG*)&m_dwTransientFlags, dwTransientFlags | dwFlag, dwTransientFlags) == dwTransientFlags)
+ return TRUE;
+ }
+}
+
+#if PROFILING_SUPPORTED
+void Module::NotifyProfilerLoadFinished(HRESULT hr)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Note that in general we wil reuse shared modules. So we need to make sure we only notify
+ // the profiler once.
+ if (SetTransientFlagInterlocked(IS_PROFILER_NOTIFIED))
+ {
+ // Record how many types are already present
+ DWORD countTypesOrig = 0;
+ DWORD countExportedTypesOrig = 0;
+ if (!IsResource())
+ {
+ countTypesOrig = GetMDImport()->GetCountWithTokenKind(mdtTypeDef);
+ countExportedTypesOrig = GetMDImport()->GetCountWithTokenKind(mdtExportedType);
+ }
+
+ // Notify the profiler, this may cause metadata to be updated
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackModuleLoads());
+ {
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->ModuleLoadFinished((ModuleID) this, hr);
+
+ if (SUCCEEDED(hr))
+ {
+ g_profControlBlock.pProfInterface->ModuleAttachedToAssembly((ModuleID) this,
+ (AssemblyID)m_pAssembly);
+ }
+ }
+ END_PIN_PROFILER();
+ }
+
+ // If there are more types than before, add these new types to the
+ // assembly
+ if (!IsResource())
+ {
+ DWORD countTypesAfterProfilerUpdate = GetMDImport()->GetCountWithTokenKind(mdtTypeDef);
+ DWORD countExportedTypesAfterProfilerUpdate = GetMDImport()->GetCountWithTokenKind(mdtExportedType);
+ // typeDefs rids 0 and 1 aren't included in the count, thus X typeDefs before means rid X+1 was valid and our incremental addition should start at X+2
+ for (DWORD typeDefRid = countTypesOrig + 2; typeDefRid < countTypesAfterProfilerUpdate + 2; typeDefRid++)
+ {
+ GetAssembly()->AddType(this, TokenFromRid(typeDefRid, mdtTypeDef));
+ }
+ // exportedType rid 0 isn't included in the count, thus X exportedTypes before means rid X was valid and our incremental addition should start at X+1
+ for (DWORD exportedTypeDef = countExportedTypesOrig + 1; exportedTypeDef < countExportedTypesAfterProfilerUpdate + 1; exportedTypeDef++)
+ {
+ GetAssembly()->AddExportedType(TokenFromRid(exportedTypeDef, mdtExportedType));
+ }
+ }
+
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackAssemblyLoads());
+ if (IsManifest())
+ {
+ GCX_COOP();
+ g_profControlBlock.pProfInterface->AssemblyLoadFinished((AssemblyID) m_pAssembly, hr);
+ }
+ END_PIN_PROFILER();
+ }
+ }
+}
+
+#ifndef CROSSGEN_COMPILE
+IMetaDataEmit *Module::GetValidatedEmitter()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_pValidatedEmitter.Load() == NULL)
+ {
+ // In the past profilers could call any API they wanted on the the IMetaDataEmit interface and we didn't
+ // verify anything. To ensure we don't break back-compat the verifications are not enabled by default.
+ // Right now I have only added verifications for NGEN images, but in the future we might want verifications
+ // for all modules.
+ IMetaDataEmit* pEmit = NULL;
+ if (CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_ProfAPI_ValidateNGENInstrumentation) && HasNativeImage())
+ {
+ ProfilerMetadataEmitValidator* pValidator = new ProfilerMetadataEmitValidator(GetEmitter());
+ pValidator->QueryInterface(IID_IMetaDataEmit, (void**)&pEmit);
+ }
+ else
+ {
+ pEmit = GetEmitter();
+ pEmit->AddRef();
+ }
+ // Atomically swap it into the field (release it if we lose the race)
+ if (FastInterlockCompareExchangePointer(&m_pValidatedEmitter, pEmit, NULL) != NULL)
+ {
+ pEmit->Release();
+ }
+ }
+ return m_pValidatedEmitter.Load();
+}
+#endif // CROSSGEN_COMPILE
+#endif // PROFILING_SUPPORTED
+
+void Module::NotifyEtwLoadFinished(HRESULT hr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END
+
+ // we report only successful loads
+ if (SUCCEEDED(hr) &&
+ ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ KEYWORDZERO))
+ {
+ BOOL fSharedModule = !SetTransientFlagInterlocked(IS_ETW_NOTIFIED);
+ ETW::LoaderLog::ModuleLoad(this, fSharedModule);
+ }
+}
+
+// Module initialization occurs in two phases: the constructor phase and the Initialize phase.
+//
+// The constructor phase initializes just enough so that Destruct() can be safely called.
+// It cannot throw or fail.
+//
+Module::Module(Assembly *pAssembly, mdFile moduleRef, PEFile *file)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ PREFIX_ASSUME(pAssembly != NULL);
+
+ m_pAssembly = pAssembly;
+ m_moduleRef = moduleRef;
+ m_file = file;
+ m_dwTransientFlags = CLASSES_FREED;
+
+ if (!m_file->HasNativeImage())
+ {
+ // Memory allocated on LoaderHeap is zero-filled. Spot-check it here.
+ _ASSERTE(m_pBinder == NULL);
+ _ASSERTE(m_symbolFormat == eSymbolFormatNone);
+ }
+
+ file->AddRef();
+}
+
+
+#ifdef FEATURE_PREJIT
+
+void Module::InitializeNativeImage(AllocMemTracker* pamTracker)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(HasNativeImage());
+ }
+ CONTRACTL_END;
+
+ if(m_pModuleSecurityDescriptor)
+ {
+ _ASSERTE(m_pModuleSecurityDescriptor->GetModule() == this);
+ }
+
+ PEImageLayout * pNativeImage = GetNativeImage();
+
+ ExecutionManager::AddNativeImageRange(dac_cast<TADDR>(pNativeImage->GetBase()), pNativeImage->GetVirtualSize(), this);
+
+ CORCOMPILE_VERSION_INFO * pNativeVersionInfo = pNativeImage->GetNativeVersionInfoMaybeNull();
+ if ((pNativeVersionInfo != NULL) && (pNativeVersionInfo->wConfigFlags & CORCOMPILE_CONFIG_INSTRUMENTATION))
+ {
+ m_nativeImageProfiling = GetAssembly()->IsInstrumented();
+ }
+
+ // Link the module to the profile data list if available.
+ COUNT_T cbProfileList;
+ m_methodProfileList = pNativeImage->GetNativeProfileDataList(&cbProfileList);
+#ifdef FEATURE_LAZY_COW_PAGES
+ if (cbProfileList)
+ EnsureWritablePages(m_methodProfileList, cbProfileList);
+#endif
+
+#ifndef CROSSGEN_COMPILE
+ LoadTokenTables();
+ LoadHelperTable();
+#endif // CROSSGEN_COMPILE
+
+#if defined(HAVE_GCCOVER)
+ if (GCStress<cfg_instr_ngen>::IsEnabled())
+ {
+ // Setting up gc coverage requires the base system classes
+ // to be initialized. So we must defer this for mscorlib.
+ if(!IsSystem())
+ {
+ SetupGcCoverageForNativeImage(this);
+ }
+ }
+#endif // defined(HAVE_GCCOVER)
+}
+
+void Module::SetNativeMetadataAssemblyRefInCache(DWORD rid, PTR_Assembly pAssembly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_NativeMetadataAssemblyRefMap == NULL)
+ {
+ IMDInternalImport* pImport = GetNativeAssemblyImport();
+ DWORD dwMaxRid = pImport->GetCountWithTokenKind(mdtAssemblyRef);
+ _ASSERTE(dwMaxRid > 0);
+
+ S_SIZE_T dwAllocSize = S_SIZE_T(sizeof(PTR_Assembly)) * S_SIZE_T(dwMaxRid);
+
+ AllocMemTracker amTracker;
+ PTR_Assembly * NativeMetadataAssemblyRefMap = (PTR_Assembly *) amTracker.Track( GetLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(dwAllocSize) );
+
+ // Note: Memory allocated on loader heap is zero filled
+
+ if (InterlockedCompareExchangeT<PTR_Assembly *>(&m_NativeMetadataAssemblyRefMap, NativeMetadataAssemblyRefMap, NULL) == NULL)
+ amTracker.SuppressRelease();
+ }
+ _ASSERTE(m_NativeMetadataAssemblyRefMap != NULL);
+
+ _ASSERTE(rid <= GetNativeAssemblyImport()->GetCountWithTokenKind(mdtAssemblyRef));
+ m_NativeMetadataAssemblyRefMap[rid-1] = pAssembly;
+}
+#else // FEATURE_PREJIT
+BOOL Module::IsPersistedObject(void *address)
+{
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+}
+
+#endif // FEATURE_PREJIT
+
+// Module initialization occurs in two phases: the constructor phase and the Initialize phase.
+//
+// The Initialize() phase completes the initialization after the constructor has run.
+// It can throw exceptions but whether it throws or succeeds, it must leave the Module
+// in a state where Destruct() can be safely called.
+//
+// szName is only used by dynamic modules, see ReflectionModule::Initialize
+//
+//
+void Module::Initialize(AllocMemTracker *pamTracker, LPCWSTR szName)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ STANDARD_VM_CHECK;
+ PRECONDITION(szName == NULL);
+ }
+ CONTRACTL_END;
+
+ m_pSimpleName = m_file->GetSimpleName();
+
+ m_Crst.Init(CrstModule);
+ m_LookupTableCrst.Init(CrstModuleLookupTable, CrstFlags(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD));
+ m_FixupCrst.Init(CrstModuleFixup, (CrstFlags)(CRST_HOST_BREAKABLE|CRST_REENTRANCY));
+ m_InstMethodHashTableCrst.Init(CrstInstMethodHashTable, CRST_REENTRANCY);
+ m_ISymUnmanagedReaderCrst.Init(CrstISymUnmanagedReader, CRST_DEBUGGER_THREAD);
+
+ if (!m_file->HasNativeImage())
+ {
+ AllocateMaps();
+
+ if (IsSystem() ||
+ (strcmp(m_pSimpleName, "System") == 0) ||
+ (strcmp(m_pSimpleName, "System.Core") == 0) ||
+ (strcmp(m_pSimpleName, "Windows.Foundation") == 0))
+ {
+ FastInterlockOr(&m_dwPersistedFlags, LOW_LEVEL_SYSTEM_ASSEMBLY_BY_NAME);
+ }
+
+ _ASSERT(m_pModuleSecurityDescriptor == NULL);
+ m_pModuleSecurityDescriptor = new ModuleSecurityDescriptor(this);
+ }
+
+ m_dwTransientFlags &= ~((DWORD)CLASSES_FREED); // Set flag indicating LookupMaps are now in a consistent and destructable state
+
+ // Initialize the instance fields that we need for all non-Resource Modules
+ if (!IsResource())
+ {
+ if (m_pAvailableClasses == NULL)
+ {
+ m_pAvailableClasses = EEClassHashTable::Create(this,
+ GetAssembly()->IsCollectible() ? AVAILABLE_CLASSES_HASH_BUCKETS_COLLECTIBLE : AVAILABLE_CLASSES_HASH_BUCKETS,
+ FALSE /* bCaseInsensitive */, pamTracker);
+ }
+
+ if (m_pAvailableParamTypes == NULL)
+ {
+ m_pAvailableParamTypes = EETypeHashTable::Create(GetLoaderAllocator(), this, PARAMTYPES_HASH_BUCKETS, pamTracker);
+ }
+
+ if (m_pInstMethodHashTable == NULL)
+ {
+ m_pInstMethodHashTable = InstMethodHashTable::Create(GetLoaderAllocator(), this, PARAMMETHODS_HASH_BUCKETS, pamTracker);
+ }
+
+ if(m_pMemberRefToDescHashTable == NULL)
+ {
+ if (IsReflection())
+ {
+ m_pMemberRefToDescHashTable = MemberRefToDescHashTable::Create(this, MEMBERREF_MAP_INITIAL_SIZE, pamTracker);
+ }
+ else
+ {
+ IMDInternalImport * pImport = GetMDImport();
+
+ // Get #MemberRefs and create memberrefToDesc hash table
+ m_pMemberRefToDescHashTable = MemberRefToDescHashTable::Create(this, pImport->GetCountWithTokenKind(mdtMemberRef)+1, pamTracker);
+ }
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (IsCompilationProcess() && m_pGuidToTypeHash == NULL)
+ {
+ // only allocate this during NGEN-ing
+ m_pGuidToTypeHash = GuidToMethodTableHashTable::Create(this, GUID_TO_TYPE_HASH_BUCKETS, pamTracker);
+ }
+#endif // FEATURE_COMINTEROP
+ }
+
+ if (GetAssembly()->IsDomainNeutral() && !IsSingleAppDomain())
+ {
+ m_ModuleIndex = Module::AllocateModuleIndex();
+ m_ModuleID = (DomainLocalModule*)Module::IndexToID(m_ModuleIndex);
+ }
+ else
+ {
+ // this will be initialized a bit later.
+ m_ModuleID = NULL;
+ m_ModuleIndex.m_dwIndex = (SIZE_T)-1;
+ }
+
+#ifdef FEATURE_COLLECTIBLE_TYPES
+ if (GetAssembly()->IsCollectible())
+ {
+ FastInterlockOr(&m_dwPersistedFlags, COLLECTIBLE_MODULE);
+ }
+#endif // FEATURE_COLLECTIBLE_TYPES
+
+ // Prepare statics that are known at module load time
+ AllocateStatics(pamTracker);
+
+#ifdef FEATURE_PREJIT
+ // Set up native image
+ if (HasNativeImage())
+ InitializeNativeImage(pamTracker);
+#ifdef FEATURE_READYTORUN
+ else
+ if (!IsResource())
+ m_pReadyToRunInfo = ReadyToRunInfo::Initialize(this, pamTracker);
+#endif
+#endif // FEATURE_PREJIT
+
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+ if (g_CorCompileVerboseLevel)
+ m_pNgenStats = new NgenStats();
+#endif
+
+ if (!IsResource() && (m_AssemblyRefByNameTable == NULL))
+ {
+ Module::CreateAssemblyRefByNameTable(pamTracker);
+ }
+
+ // If the program has the "ForceEnc" env variable set we ensure every eligible
+ // module has EnC turned on.
+ if (g_pConfig->ForceEnc() && IsEditAndContinueCapable())
+ EnableEditAndContinue();
+
+ LOG((LF_CLASSLOADER, LL_INFO10, "Loaded pModule: \"%ws\".\n", GetDebugName()));
+
+}
+
+#endif // DACCESS_COMPILE
+
+
+#ifdef FEATURE_COMINTEROP
+
+#ifndef DACCESS_COMPILE
+
+// static
+GuidToMethodTableHashTable* GuidToMethodTableHashTable::Create(Module* pModule, DWORD cInitialBuckets,
+ AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(!FORBIDGC_LOADER_USE_ENABLED());
+ }
+ CONTRACTL_END;
+
+ LoaderHeap *pHeap = pModule->GetAssembly()->GetLowFrequencyHeap();
+ GuidToMethodTableHashTable *pThis = (GuidToMethodTableHashTable*)pamTracker->Track(pHeap->AllocMem((S_SIZE_T)sizeof(GuidToMethodTableHashTable)));
+
+ // The base class get initialized through chaining of constructors. We allocated the hash instance via the
+ // loader heap instead of new so use an in-place new to call the constructors now.
+ new (pThis) GuidToMethodTableHashTable(pModule, pHeap, cInitialBuckets);
+
+ return pThis;
+}
+
+GuidToMethodTableEntry *GuidToMethodTableHashTable::InsertValue(PTR_GUID pGuid, PTR_MethodTable pMT,
+ BOOL bReplaceIfFound, AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(!FORBIDGC_LOADER_USE_ENABLED());
+ }
+ CONTRACTL_END;
+
+ GuidToMethodTableEntry *pEntry = NULL;
+ LookupContext ctx;
+
+ if (bReplaceIfFound)
+ {
+ pEntry = FindItem(pGuid, NULL);
+ }
+
+ if (pEntry != NULL)
+ {
+ pEntry->m_pMT = pMT;
+ }
+ else
+ {
+ pEntry = BaseAllocateEntry(pamTracker);
+ pEntry->m_Guid = pGuid;
+ pEntry->m_pMT = pMT;
+
+ DWORD hash = Hash(pGuid);
+ BaseInsertEntry(hash, pEntry);
+ }
+
+ return pEntry;
+}
+
+#endif // !DACCESS_COMPILE
+
+PTR_MethodTable GuidToMethodTableHashTable::GetValue(const GUID * pGuid, LookupContext *pContext)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ PRECONDITION(CheckPointer(pGuid));
+ }
+ CONTRACTL_END;
+
+ GuidToMethodTableEntry * pEntry = FindItem(pGuid, pContext);
+ if (pEntry != NULL)
+ {
+ return pEntry->m_pMT;
+ }
+
+ return NULL;
+}
+
+GuidToMethodTableEntry *GuidToMethodTableHashTable::FindItem(const GUID * pGuid, LookupContext *pContext)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ PRECONDITION(CheckPointer(pGuid));
+ }
+ CONTRACTL_END;
+
+ // It's legal for the caller not to pass us a LookupContext, but we might need to iterate
+ // internally (since we lookup via hash and hashes may collide). So substitute our own
+ // private context if one was not provided.
+ LookupContext sAltContext;
+ if (pContext == NULL)
+ pContext = &sAltContext;
+
+ // The base class provides the ability to enumerate all entries with the same hash code.
+ // We further check which of these entries actually match the full key.
+ PTR_GuidToMethodTableEntry pSearch = BaseFindFirstEntryByHash(Hash(pGuid), pContext);
+ while (pSearch)
+ {
+ if (CompareKeys(pSearch, pGuid))
+ {
+ return pSearch;
+ }
+
+ pSearch = BaseFindNextEntryByHash(pContext);
+ }
+
+ return NULL;
+}
+
+BOOL GuidToMethodTableHashTable::CompareKeys(PTR_GuidToMethodTableEntry pEntry, const GUID * pGuid)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return *pGuid == *(pEntry->m_Guid);
+}
+
+DWORD GuidToMethodTableHashTable::Hash(const GUID * pGuid)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ static_assert_no_msg(sizeof(GUID) % sizeof(DWORD) == 0);
+ static_assert_no_msg(sizeof(GUID) / sizeof(DWORD) == 4);
+ DWORD * pSlice = (DWORD*) pGuid;
+ return pSlice[0] ^ pSlice[1] ^ pSlice[2] ^ pSlice[3];
+}
+
+
+BOOL GuidToMethodTableHashTable::FindNext(Iterator *it, GuidToMethodTableEntry **ppEntry)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (!it->m_fIterating)
+ {
+ BaseInitIterator(&it->m_sIterator);
+ it->m_fIterating = true;
+ }
+
+ *ppEntry = it->m_sIterator.Next();
+ return *ppEntry ? TRUE : FALSE;
+}
+
+DWORD GuidToMethodTableHashTable::GetCount()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return BaseGetElementCount();
+}
+
+#if defined(FEATURE_NATIVE_IMAGE_GENERATION) && !defined(DACCESS_COMPILE)
+
+void GuidToMethodTableHashTable::Save(DataImage *pImage, CorProfileData *pProfileData)
+{
+ WRAPPER_NO_CONTRACT;
+ Base_t::BaseSave(pImage, pProfileData);
+}
+
+void GuidToMethodTableHashTable::Fixup(DataImage *pImage)
+{
+ WRAPPER_NO_CONTRACT;
+ Base_t::BaseFixup(pImage);
+}
+
+bool GuidToMethodTableHashTable::SaveEntry(DataImage *pImage, CorProfileData *pProfileData,
+ GuidToMethodTableEntry *pOldEntry, GuidToMethodTableEntry *pNewEntry,
+ EntryMappingTable *pMap)
+{
+ LIMITED_METHOD_CONTRACT;
+ return false;
+}
+
+void GuidToMethodTableHashTable::FixupEntry(DataImage *pImage, GuidToMethodTableEntry *pEntry, void *pFixupBase, DWORD cbFixupOffset)
+{
+ WRAPPER_NO_CONTRACT;
+ pImage->FixupField(pFixupBase, cbFixupOffset + offsetof(GuidToMethodTableEntry, m_pMT), pEntry->m_pMT);
+ pImage->FixupField(pFixupBase, cbFixupOffset + offsetof(GuidToMethodTableEntry, m_Guid), pEntry->m_Guid);
+}
+
+#endif // FEATURE_NATIVE_IMAGE_GENERATION && !DACCESS_COMPILE
+
+
+#ifdef FEATURE_PREJIT
+
+#ifndef DACCESS_COMPILE
+BOOL Module::CanCacheWinRTTypeByGuid(MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(IsCompilationProcess());
+ }
+ CONTRACTL_END;
+
+ // Don't cache mscorlib-internal declarations of WinRT types.
+ if (IsSystem() && pMT->IsProjectedFromWinRT())
+ return FALSE;
+
+ // Don't cache redirected WinRT types.
+ if (WinRTTypeNameConverter::IsRedirectedWinRTSourceType(pMT))
+ return FALSE;
+
+ // Don't cache in a module that's not the NGen target, since the result
+ // won't be saved, and since the such a module might be read-only.
+ if (GetAppDomain()->ToCompilationDomain()->GetTargetModule() != this)
+ return FALSE;
+
+ return TRUE;
+}
+
+void Module::CacheWinRTTypeByGuid(PTR_MethodTable pMT, PTR_GuidInfo pgi /*= NULL*/)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(pMT->IsLegalNonArrayWinRTType());
+ PRECONDITION(pgi != NULL || pMT->GetGuidInfo() != NULL);
+ PRECONDITION(IsCompilationProcess());
+ }
+ CONTRACTL_END;
+
+ if (pgi == NULL)
+ {
+ pgi = pMT->GetGuidInfo();
+ }
+
+ AllocMemTracker amt;
+ m_pGuidToTypeHash->InsertValue(&pgi->m_Guid, pMT, TRUE, &amt);
+ amt.SuppressRelease();
+}
+
+#endif // !DACCESS_COMPILE
+
+PTR_MethodTable Module::LookupTypeByGuid(const GUID & guid)
+{
+ WRAPPER_NO_CONTRACT;
+ // Triton ni images do not have this hash.
+ if (m_pGuidToTypeHash != NULL)
+ return m_pGuidToTypeHash->GetValue(&guid, NULL);
+ else
+ return NULL;
+}
+
+void Module::GetCachedWinRTTypes(SArray<PTR_MethodTable> * pTypes, SArray<GUID> * pGuids)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ // Triton ni images do not have this hash.
+ if (m_pGuidToTypeHash != NULL)
+ {
+ GuidToMethodTableHashTable::Iterator it(m_pGuidToTypeHash);
+ GuidToMethodTableEntry *pEntry;
+ while (m_pGuidToTypeHash->FindNext(&it, &pEntry))
+ {
+ pTypes->Append(pEntry->m_pMT);
+ pGuids->Append(*pEntry->m_Guid);
+ }
+ }
+}
+
+#endif // FEATURE_PREJIT
+
+#endif // FEATURE_COMINTEROP
+
+#ifndef DACCESS_COMPILE
+MemberRefToDescHashTable* MemberRefToDescHashTable::Create(Module *pModule, DWORD cInitialBuckets, AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(!FORBIDGC_LOADER_USE_ENABLED());
+ }
+ CONTRACTL_END;
+
+ LoaderHeap *pHeap = pModule->GetAssembly()->GetLowFrequencyHeap();
+ MemberRefToDescHashTable *pThis = (MemberRefToDescHashTable*)pamTracker->Track(pHeap->AllocMem((S_SIZE_T)sizeof(MemberRefToDescHashTable)));
+
+ // The base class get initialized through chaining of constructors. We allocated the hash instance via the
+ // loader heap instead of new so use an in-place new to call the constructors now.
+ new (pThis) MemberRefToDescHashTable(pModule, pHeap, cInitialBuckets);
+
+ return pThis;
+}
+
+//Inserts FieldRef
+MemberRefToDescHashEntry* MemberRefToDescHashTable::Insert(mdMemberRef token , FieldDesc *value)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(!FORBIDGC_LOADER_USE_ENABLED());
+ }
+ CONTRACTL_END;
+
+ LookupContext sAltContext;
+
+ _ASSERTE((dac_cast<TADDR>(value) & IS_FIELD_MEMBER_REF) == 0);
+
+ MemberRefToDescHashEntry *pEntry = (PTR_MemberRefToDescHashEntry) BaseFindFirstEntryByHash(RidFromToken(token), &sAltContext);
+ if (pEntry != NULL)
+ {
+ // If memberRef is hot token in that case entry for memberref is already persisted in ngen image. So entry for it will already be present in hash table.
+ // However its value will be null. We need to set its actual value.
+ if(pEntry->m_value == dac_cast<TADDR>(NULL))
+ {
+ EnsureWritablePages(&(pEntry->m_value));
+ pEntry->m_value = dac_cast<TADDR>(value)|IS_FIELD_MEMBER_REF;
+ }
+
+ _ASSERTE(pEntry->m_value == (dac_cast<TADDR>(value)|IS_FIELD_MEMBER_REF));
+ return pEntry;
+ }
+
+ // For non hot tokens insert new entry in hashtable
+ pEntry = BaseAllocateEntry(NULL);
+ pEntry->m_value = dac_cast<TADDR>(value)|IS_FIELD_MEMBER_REF;
+ BaseInsertEntry(RidFromToken(token), pEntry);
+
+ return pEntry;
+}
+
+// Insert MethodRef
+MemberRefToDescHashEntry* MemberRefToDescHashTable::Insert(mdMemberRef token , MethodDesc *value)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(!FORBIDGC_LOADER_USE_ENABLED());
+ }
+ CONTRACTL_END;
+
+ LookupContext sAltContext;
+
+ MemberRefToDescHashEntry *pEntry = (PTR_MemberRefToDescHashEntry) BaseFindFirstEntryByHash(RidFromToken(token), &sAltContext);
+ if (pEntry != NULL)
+ {
+ // If memberRef is hot token in that case entry for memberref is already persisted in ngen image. So entry for it will already be present in hash table.
+ // However its value will be null. We need to set its actual value.
+ if(pEntry->m_value == dac_cast<TADDR>(NULL))
+ {
+ EnsureWritablePages(&(pEntry->m_value));
+ pEntry->m_value = dac_cast<TADDR>(value);
+ }
+
+ _ASSERTE(pEntry->m_value == dac_cast<TADDR>(value));
+ return pEntry;
+ }
+
+ // For non hot tokens insert new entry in hashtable
+ pEntry = BaseAllocateEntry(NULL);
+ pEntry->m_value = dac_cast<TADDR>(value);
+ BaseInsertEntry(RidFromToken(token), pEntry);
+
+ return pEntry;
+}
+
+#if defined(FEATURE_NATIVE_IMAGE_GENERATION)
+void MemberRefToDescHashTable::Save(DataImage *pImage, CorProfileData *pProfileData)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Mark if the tokens are hot
+ if (pProfileData)
+ {
+ DWORD numInTokenList = pProfileData->GetHotTokens(mdtMemberRef>>24, 1<<RidMap, 1<<RidMap, NULL, 0);
+
+ if (numInTokenList > 0)
+ {
+ LookupContext sAltContext;
+
+ mdToken *tokenList = (mdToken*)(void*)pImage->GetModule()->GetLoaderAllocator()->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(mdToken)) * S_SIZE_T(numInTokenList));
+
+ pProfileData->GetHotTokens(mdtMemberRef>>24, 1<<RidMap, 1<<RidMap, tokenList, numInTokenList);
+ for (DWORD i = 0; i < numInTokenList; i++)
+ {
+ DWORD rid = RidFromToken(tokenList[i]);
+ MemberRefToDescHashEntry *pEntry = (PTR_MemberRefToDescHashEntry) BaseFindFirstEntryByHash(RidFromToken(tokenList[i]), &sAltContext);
+ if (pEntry != NULL)
+ {
+ _ASSERTE((pEntry->m_value & 0x1) == 0);
+ pEntry->m_value |= 0x1;
+ }
+ }
+ }
+ }
+
+ BaseSave(pImage, pProfileData);
+}
+
+void MemberRefToDescHashTable::FixupEntry(DataImage *pImage, MemberRefToDescHashEntry *pEntry, void *pFixupBase, DWORD cbFixupOffset)
+{
+ //As there is no more hard binding initialize MemberRef* to NULL
+ pImage->ZeroPointerField(pFixupBase, cbFixupOffset + offsetof(MemberRefToDescHashEntry, m_value));
+}
+
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+#endif // !DACCESS_COMPILE
+
+PTR_MemberRef MemberRefToDescHashTable::GetValue(mdMemberRef token, BOOL *pfIsMethod)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ LookupContext sAltContext;
+
+ MemberRefToDescHashEntry *pEntry = (PTR_MemberRefToDescHashEntry) BaseFindFirstEntryByHash(RidFromToken(token), &sAltContext);
+ if (pEntry != NULL)
+ {
+ if(pEntry->m_value & IS_FIELD_MEMBER_REF)
+ *pfIsMethod = FALSE;
+ else
+ *pfIsMethod = TRUE;
+ return (PTR_MemberRef)(pEntry->m_value & (~MEMBER_REF_MAP_ALL_FLAGS));
+ }
+
+ return NULL;
+}
+
+
+void Module::SetDebuggerInfoBits(DebuggerAssemblyControlFlags newBits)
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(((newBits << DEBUGGER_INFO_SHIFT_PRIV) &
+ ~DEBUGGER_INFO_MASK_PRIV) == 0);
+
+ m_dwTransientFlags &= ~DEBUGGER_INFO_MASK_PRIV;
+ m_dwTransientFlags |= (newBits << DEBUGGER_INFO_SHIFT_PRIV);
+
+#ifdef DEBUGGING_SUPPORTED
+ BOOL setEnC = ((newBits & DACF_ENC_ENABLED) != 0) && IsEditAndContinueCapable() && !GetAssembly()->IsDomainNeutral();
+
+ // The only way can change Enc is through debugger override.
+ if (setEnC)
+ {
+ EnableEditAndContinue();
+ }
+ else
+ {
+ if (!g_pConfig->ForceEnc())
+ DisableEditAndContinue();
+ }
+#endif // DEBUGGING_SUPPORTED
+
+#if defined(DACCESS_COMPILE)
+ // Now that we've changed m_dwTransientFlags, update that in the target too.
+ // This will fail for read-only target.
+ // If this fails, it will throw an exception.
+ // @dbgtodo dac write: finalize on plans for how DAC writes to the target.
+ HRESULT hrDac;
+ hrDac = DacWriteHostInstance(this, true);
+ _ASSERTE(SUCCEEDED(hrDac)); // would throw if there was an error.
+#endif // DACCESS_COMPILE
+}
+
+#ifndef DACCESS_COMPILE
+/* static */
+Module *Module::Create(Assembly *pAssembly, mdFile moduleRef, PEFile *file, AllocMemTracker *pamTracker)
+{
+ CONTRACT(Module *)
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pAssembly));
+ PRECONDITION(CheckPointer(file));
+ PRECONDITION(!IsNilToken(moduleRef) || file->IsAssembly());
+ POSTCONDITION(CheckPointer(RETVAL));
+ POSTCONDITION(RETVAL->GetFile() == file);
+ }
+ CONTRACT_END;
+
+ // Hoist CONTRACT into separate routine because of EX incompatibility
+
+ Module *pModule = NULL;
+
+ // Create the module
+
+#ifdef FEATURE_PREJIT
+
+ if (file->HasNativeImage())
+ {
+ pModule = file->GetLoadedNative()->GetPersistedModuleImage();
+ PREFIX_ASSUME(pModule != NULL);
+ CONSISTENCY_CHECK_MSG(pModule->m_pAssembly == NULL || !pModule->IsTenured(), // if the module is not tenured it could be our previous attempt
+ "Native image can only be used once per process\n");
+ EnsureWritablePages(pModule);
+ pModule = new ((void*) pModule) Module(pAssembly, moduleRef, file);
+ PREFIX_ASSUME(pModule != NULL);
+ }
+
+#endif // FEATURE_PREJIT
+
+ if (pModule == NULL)
+ {
+#ifdef EnC_SUPPORTED
+ if (IsEditAndContinueCapable(file) && !pAssembly->IsDomainNeutral())
+ {
+ // if file is EnCCapable, always create an EnC-module, but EnC won't necessarily be enabled.
+ // Debugger enables this by calling SetJITCompilerFlags on LoadModule callback.
+
+ void* pMemory = pamTracker->Track(pAssembly->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(EditAndContinueModule))));
+ pModule = new (pMemory) EditAndContinueModule(pAssembly, moduleRef, file);
+ }
+ else
+#endif // EnC_SUPPORTED
+ {
+ void* pMemory = pamTracker->Track(pAssembly->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(Module))));
+ pModule = new (pMemory) Module(pAssembly, moduleRef, file);
+ }
+ }
+
+ PREFIX_ASSUME(pModule != NULL);
+ ModuleHolder pModuleSafe(pModule);
+ pModuleSafe->DoInit(pamTracker, NULL);
+
+ RETURN pModuleSafe.Extract();
+}
+
+//
+// Destructor for Module
+//
+
+void Module::Destruct()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_EEMEM, INFO3, "Deleting module %x\n", this));
+#ifdef PROFILING_SUPPORTED
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackModuleLoads());
+ if (!IsBeingUnloaded())
+ {
+ // Profiler is causing some peripheral class loads. Probably this just needs
+ // to be turned into a Fault_not_fatal and moved to a specific place inside the profiler.
+ EX_TRY
+ {
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->ModuleUnloadStarted((ModuleID) this);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+
+ DACNotify::DoModuleUnloadNotification(this);
+
+ // Free classes in the class table
+ FreeClassTables();
+
+
+#if defined(FEATURE_REMOTING) && !defined(HAS_REMOTING_PRECODE)
+ // Destroys thunks for all methods included in hash table.
+ if (m_pInstMethodHashTable != NULL)
+ {
+ InstMethodHashTable::Iterator it(m_pInstMethodHashTable);
+ InstMethodHashEntry *pEntry;
+
+ while (m_pInstMethodHashTable->FindNext(&it, &pEntry))
+ {
+ MethodDesc *pMD = pEntry->GetMethod();
+ if (!pMD->IsRestored())
+ continue;
+
+ if(pMD->GetMethodTable()->IsMarshaledByRef())
+ CRemotingServices::DestroyThunk(pMD);
+ }
+ }
+#endif // FEATURE_REMOTING && !HAS_REMOTING_PRECODE
+
+#ifdef DEBUGGING_SUPPORTED
+ if (g_pDebugInterface)
+ {
+ GCX_PREEMP();
+ g_pDebugInterface->DestructModule(this);
+ }
+
+#endif // DEBUGGING_SUPPORTED
+
+ ReleaseISymUnmanagedReader();
+
+ // Clean up sig cookies
+ VASigCookieBlock *pVASigCookieBlock = m_pVASigCookieBlock;
+ while (pVASigCookieBlock)
+ {
+ VASigCookieBlock *pNext = pVASigCookieBlock->m_Next;
+ delete pVASigCookieBlock;
+
+ pVASigCookieBlock = pNext;
+ }
+
+ // Clean up the IL stub cache
+ if (m_pILStubCache != NULL)
+ {
+ delete m_pILStubCache;
+ }
+
+#ifdef FEATURE_MIXEDMODE // IJW
+ delete m_pMUThunkHash;
+ delete m_pThunkHeap;
+#endif // FEATURE_MIXEDMODE // IJW
+
+
+#ifdef PROFILING_SUPPORTED
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackModuleLoads());
+ // Profiler is causing some peripheral class loads. Probably this just needs
+ // to be turned into a Fault_not_fatal and moved to a specific place inside the profiler.
+ EX_TRY
+ {
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->ModuleUnloadFinished((ModuleID) this, S_OK);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ END_PIN_PROFILER();
+ }
+
+ if (m_pValidatedEmitter.Load() != NULL)
+ {
+ m_pValidatedEmitter->Release();
+ }
+#endif // PROFILING_SUPPORTED
+
+ //
+ // Warning - deleting the zap file will cause the module to be unmapped
+ //
+ ClearInMemorySymbolStream();
+
+ m_Crst.Destroy();
+ m_FixupCrst.Destroy();
+ m_LookupTableCrst.Destroy();
+ m_InstMethodHashTableCrst.Destroy();
+ m_ISymUnmanagedReaderCrst.Destroy();
+
+ if (m_pCerPrepInfo)
+ {
+ _ASSERTE(m_pCerCrst != NULL);
+ CrstHolder sCrstHolder(m_pCerCrst);
+
+ EEHashTableIteration sIter;
+ m_pCerPrepInfo->IterateStart(&sIter);
+ while (m_pCerPrepInfo->IterateNext(&sIter)) {
+ CerPrepInfo *pPrepInfo = (CerPrepInfo*)m_pCerPrepInfo->IterateGetValue(&sIter);
+ delete pPrepInfo;
+ }
+
+ delete m_pCerPrepInfo;
+ }
+ if (m_pCerCrst)
+ delete m_pCerCrst;
+
+ if (m_debuggerSpecificData.m_pDynamicILCrst)
+ {
+ delete m_debuggerSpecificData.m_pDynamicILCrst;
+ }
+
+ if (m_debuggerSpecificData.m_pDynamicILBlobTable)
+ {
+ delete m_debuggerSpecificData.m_pDynamicILBlobTable;
+ }
+
+ if (m_debuggerSpecificData.m_pTemporaryILBlobTable)
+ {
+ delete m_debuggerSpecificData.m_pTemporaryILBlobTable;
+ }
+
+ if (m_debuggerSpecificData.m_pILOffsetMappingTable)
+ {
+ for (ILOffsetMappingTable::Iterator pCurElem = m_debuggerSpecificData.m_pILOffsetMappingTable->Begin(),
+ pEndElem = m_debuggerSpecificData.m_pILOffsetMappingTable->End();
+ pCurElem != pEndElem;
+ pCurElem++)
+ {
+ ILOffsetMappingEntry entry = *pCurElem;
+ entry.m_mapping.Clear();
+ }
+ delete m_debuggerSpecificData.m_pILOffsetMappingTable;
+ }
+
+#ifdef FEATURE_PREJIT
+ if (m_pCerNgenRootTable && (m_dwTransientFlags & M_CER_ROOT_TABLE_ON_HEAP))
+ delete m_pCerNgenRootTable;
+
+ if (HasNativeImage())
+ {
+ m_file->Release();
+ }
+ else
+#endif // FEATURE_PREJIT
+ {
+ m_file->Release();
+
+ if (m_pModuleSecurityDescriptor)
+ delete m_pModuleSecurityDescriptor;
+ }
+
+ // If this module was loaded as domain-specific, then
+ // we must free its ModuleIndex so that it can be reused
+ FreeModuleIndex();
+}
+
+#ifdef FEATURE_PREJIT
+void Module::DeleteNativeCodeRanges()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ if (HasNativeImage())
+ {
+ PEImageLayout * pNativeImage = GetNativeImage();
+
+ ExecutionManager::DeleteRange(dac_cast<TADDR>(pNativeImage->GetBase()));
+ }
+}
+#endif
+
+bool Module::NeedsGlobalMethodTable()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ IMDInternalImport * pImport = GetMDImport();
+ if (!IsResource() && pImport->IsValidToken(COR_GLOBAL_PARENT_TOKEN))
+ {
+ {
+ HENUMInternalHolder funcEnum(pImport);
+ funcEnum.EnumGlobalFunctionsInit();
+ if (pImport->EnumGetCount(&funcEnum) != 0)
+ return true;
+ }
+
+ {
+ HENUMInternalHolder fieldEnum(pImport);
+ fieldEnum.EnumGlobalFieldsInit();
+ if (pImport->EnumGetCount(&fieldEnum) != 0)
+ return true;
+ }
+ }
+
+ // resource module or no global statics nor global functions
+ return false;
+}
+
+
+MethodTable *Module::GetGlobalMethodTable()
+{
+ CONTRACT (MethodTable *)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(CONTRACT_RETURN NULL;);
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+
+ if ((m_dwPersistedFlags & COMPUTED_GLOBAL_CLASS) == 0)
+ {
+ MethodTable *pMT = NULL;
+
+ if (NeedsGlobalMethodTable())
+ {
+ pMT = ClassLoader::LoadTypeDefThrowing(this, COR_GLOBAL_PARENT_TOKEN,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::FailIfUninstDefOrRef).AsMethodTable();
+ }
+
+ FastInterlockOr(&m_dwPersistedFlags, COMPUTED_GLOBAL_CLASS);
+ RETURN pMT;
+ }
+ else
+ {
+ RETURN LookupTypeDef(COR_GLOBAL_PARENT_TOKEN).AsMethodTable();
+ }
+}
+
+
+#endif // !DACCESS_COMPILE
+
+#ifdef FEATURE_PREJIT
+
+/*static*/
+BOOL Module::IsAlwaysSavedInPreferredZapModule(Instantiation classInst, // the type arguments to the type (if any)
+ Instantiation methodInst) // the type arguments to the method (if any)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return ClassLoader::IsTypicalSharedInstantiation(classInst) &&
+ ClassLoader::IsTypicalSharedInstantiation(methodInst);
+}
+
+//this gets called recursively for generics, so do a probe.
+PTR_Module Module::ComputePreferredZapModule(Module * pDefinitionModule,
+ Instantiation classInst,
+ Instantiation methodInst)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ PTR_Module ret = NULL;
+ INTERIOR_STACK_PROBE_NOTHROW_CHECK_THREAD(DontCallDirectlyForceStackOverflow());
+
+ ret = Module::ComputePreferredZapModuleHelper( pDefinitionModule,
+ classInst,
+ methodInst );
+ END_INTERIOR_STACK_PROBE;
+ return ret;
+}
+
+//
+// Is pModule likely a dependency of pOtherModule? Heuristic used by preffered zap module algorithm.
+// It can return both false positives and negatives.
+//
+// Keep in sync with tools\mdilbind\mdilmodule.cpp
+//
+static bool IsLikelyDependencyOf(Module * pModule, Module * pOtherModule)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ PRECONDITION(CheckPointer(pOtherModule));
+ }
+ CONTRACTL_END
+
+ // Every module has a dependency with itself
+ if (pModule == pOtherModule)
+ return true;
+
+ //
+ // Explicit check for low level system assemblies is working around Win8P facades introducing extra layer between low level system assemblies
+ // (System.dll or System.Core.dll) and the app assemblies. Because of this extra layer, the check below won't see the direct
+ // reference between these low level system assemblies and the app assemblies. The prefererred zap module for instantiations of generic
+ // collections from these low level system assemblies (like LinkedList<AppType>) should be module of AppType. It would be module of the generic
+ // collection without this check. On desktop (FEATURE_FULL_NGEN defined), it would result into inefficient code because of the instantiations
+ // would be speculative. On CoreCLR (FEATURE_FULL_NGEN not defined), it would result into the instantiations not getting saved into native
+ // image at all.
+ //
+ // Similar problem exists for Windows.Foundation.winmd. There is a cycle between Windows.Foundation.winmd and Windows.Storage.winmd. This cycle
+ // would cause prefererred zap module for instantiations of foundation types (like IAsyncOperation<StorageFolder>) to be Windows.Foundation.winmd.
+ // It is a bad choice. It should be Windows.Storage.winmd instead. We explicitly push Windows.Foundation to lower level by treating it as
+ // low level system assembly to avoid this problem.
+ //
+ if (pModule->IsLowLevelSystemAssemblyByName())
+ {
+ if (!pOtherModule->IsLowLevelSystemAssemblyByName())
+ return true;
+
+ // Every module depends upon mscorlib
+ if (pModule->IsSystem())
+ return true;
+
+ // mscorlib does not depend upon any other module
+ if (pOtherModule->IsSystem())
+ return false;
+ }
+ else
+ {
+ if (pOtherModule->IsLowLevelSystemAssemblyByName())
+ return false;
+ }
+
+ // At this point neither pModule or pOtherModule is mscorlib
+
+#ifndef DACCESS_COMPILE
+ //
+ // We will check to see if the pOtherModule has a reference to pModule
+ //
+
+ // If we can match the assembly ref in the ManifestModuleReferencesMap we can early out.
+ // This early out kicks in less than half of the time. It hurts performance on average.
+ // if (!IsNilToken(pOtherModule->FindAssemblyRef(pModule->GetAssembly())))
+ // return true;
+
+ if (pOtherModule->HasReferenceByName(pModule->GetSimpleName()))
+ return true;
+#endif // DACCESS_COMPILE
+
+ return false;
+}
+
+// Determine the "preferred ngen home" for an instantiated type or method
+// * This is the first ngen module that the loader will look in;
+// * Also, we only hard bind to a type or method that lives in its preferred module
+// The following properties must hold of the preferred module:
+// - it must be one of the component type's declaring modules
+// - if the type or method is open then the preferred module must be that of one of the type parameters
+// (this ensures that we can always hard bind to open types and methods created during ngen)
+// - for always-saved instantiations it must be the declaring module of the generic definition
+// Otherwise, we try to pick a module that is likely to reference the type or method
+//
+/* static */
+PTR_Module Module::ComputePreferredZapModuleHelper(
+ Module * pDefinitionModule, // the module that declares the generic type or method
+ Instantiation classInst, // the type arguments to the type (if any)
+ Instantiation methodInst) // the type arguments to the method (if any)
+{
+ CONTRACT(PTR_Module)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pDefinitionModule, NULL_OK));
+ // One of them will be non-null... Note we don't use CheckPointer
+ // because that raises a breakpoint in the debugger
+ PRECONDITION(pDefinitionModule != NULL || !classInst.IsEmpty() || !methodInst.IsEmpty());
+ POSTCONDITION(CheckPointer(RETVAL));
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END
+
+ DWORD totalArgs = classInst.GetNumArgs() + methodInst.GetNumArgs();
+
+ // The open type parameters takes precendence over closed type parameters since
+ // we always hardbind to open types.
+ for (DWORD i = 0; i < totalArgs; i++)
+ {
+ TypeHandle thArg = (i < classInst.GetNumArgs()) ? classInst[i] : methodInst[i - classInst.GetNumArgs()];
+
+ // Encoded types are never open
+ _ASSERTE(!thArg.IsEncodedFixup());
+ Module * pOpenModule = thArg.GetDefiningModuleForOpenType();
+ if (pOpenModule != NULL)
+ RETURN dac_cast<PTR_Module>(pOpenModule);
+ }
+
+ // The initial value of pCurrentPZM is the pDefinitionModule or mscorlib
+ Module* pCurrentPZM = (pDefinitionModule != NULL) ? pDefinitionModule : MscorlibBinder::GetModule();
+ bool preferredZapModuleBasedOnValueType = false;
+
+ for (DWORD i = 0; i < totalArgs; i++)
+ {
+ TypeHandle pTypeParam = (i < classInst.GetNumArgs()) ? classInst[i] : methodInst[i - classInst.GetNumArgs()];
+
+ _ASSERTE(pTypeParam != NULL);
+ _ASSERTE(!pTypeParam.IsEncodedFixup());
+
+ Module * pParamPZM = GetPreferredZapModuleForTypeHandle(pTypeParam);
+
+ //
+ // If pCurrentPZM is not a dependency of pParamPZM
+ // then we aren't going to update pCurrentPZM
+ //
+ if (IsLikelyDependencyOf(pCurrentPZM, pParamPZM))
+ {
+ // If we have a type parameter that is a value type
+ // and we don't yet have a value type based pCurrentPZM
+ // then we will select it's module as the new pCurrentPZM.
+ //
+ if (pTypeParam.IsValueType() && !preferredZapModuleBasedOnValueType)
+ {
+ pCurrentPZM = pParamPZM;
+ preferredZapModuleBasedOnValueType = true;
+ }
+ else
+ {
+ // The normal rule is to replace the pCurrentPZM only when
+ // both of the following are true:
+ // pCurrentPZM is a dependency of pParamPZM
+ // and pParamPZM is not a dependency of pCurrentPZM
+ //
+ // note that the second condition is alway true when pCurrentPZM is mscorlib
+ //
+ if (!IsLikelyDependencyOf(pParamPZM, pCurrentPZM))
+ {
+ pCurrentPZM = pParamPZM;
+ }
+ }
+ }
+ }
+
+ RETURN dac_cast<PTR_Module>(pCurrentPZM);
+}
+
+PTR_Module Module::ComputePreferredZapModule(TypeKey *pKey)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ if (pKey->GetKind() == ELEMENT_TYPE_CLASS)
+ {
+ return Module::ComputePreferredZapModule(pKey->GetModule(),
+ pKey->GetInstantiation());
+ }
+ else if (pKey->GetKind() != ELEMENT_TYPE_FNPTR)
+ return Module::GetPreferredZapModuleForTypeHandle(pKey->GetElementType());
+ else
+ return NULL;
+
+}
+
+/* see code:Module::ComputePreferredZapModuleHelper for more */
+/*static*/
+PTR_Module Module::GetPreferredZapModuleForMethodTable(MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ PTR_Module pRet=NULL;
+
+ INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(10, NO_FORBIDGC_LOADER_USE_ThrowSO(););
+
+ if (pMT->IsArray())
+ {
+ TypeHandle elemTH = pMT->GetApproxArrayElementTypeHandle();
+ pRet= ComputePreferredZapModule(NULL, Instantiation(&elemTH, 1));
+ }
+ else if (pMT->HasInstantiation() && !pMT->IsGenericTypeDefinition())
+ {
+ pRet= ComputePreferredZapModule(pMT->GetModule(),
+ pMT->GetInstantiation());
+ }
+ else
+ {
+ // If it is uninstantiated or it is the generic type definition itself
+ // then its loader module is simply the module containing its TypeDef
+ pRet= pMT->GetModule();
+ }
+ END_INTERIOR_STACK_PROBE;
+ return pRet;
+}
+
+
+/*static*/
+PTR_Module Module::GetPreferredZapModuleForTypeDesc(PTR_TypeDesc pTD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ SUPPORTS_DAC;
+ if (pTD->HasTypeParam())
+ return GetPreferredZapModuleForTypeHandle(pTD->GetTypeParam());
+ else if (pTD->IsGenericVariable())
+ return pTD->GetModule();
+
+ _ASSERTE(pTD->GetInternalCorElementType() == ELEMENT_TYPE_FNPTR);
+ PTR_FnPtrTypeDesc pFnPtrTD = dac_cast<PTR_FnPtrTypeDesc>(pTD);
+
+ // Result type of function type is used for preferred zap module
+ return GetPreferredZapModuleForTypeHandle(pFnPtrTD->GetRetAndArgTypesPointer()[0]);
+}
+
+/*static*/
+PTR_Module Module::GetPreferredZapModuleForTypeHandle(TypeHandle t)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ SUPPORTS_DAC;
+ if (t.IsTypeDesc())
+ return GetPreferredZapModuleForTypeDesc(t.AsTypeDesc());
+ else
+ return GetPreferredZapModuleForMethodTable(t.AsMethodTable());
+}
+
+/*static*/
+PTR_Module Module::GetPreferredZapModuleForMethodDesc(const MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (pMD->IsTypicalMethodDefinition())
+ {
+ return PTR_Module(pMD->GetModule());
+ }
+ else if (pMD->IsGenericMethodDefinition())
+ {
+ return GetPreferredZapModuleForMethodTable(pMD->GetMethodTable());
+ }
+ else
+ {
+ return ComputePreferredZapModule(pMD->GetModule(),
+ pMD->GetClassInstantiation(),
+ pMD->GetMethodInstantiation());
+ }
+}
+
+/* see code:Module::ComputePreferredZapModuleHelper for more */
+/*static*/
+PTR_Module Module::GetPreferredZapModuleForFieldDesc(FieldDesc * pFD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // The approx MT is sufficient: it's always the one that owns the FieldDesc
+ // data structure
+ return GetPreferredZapModuleForMethodTable(pFD->GetApproxEnclosingMethodTable());
+}
+#endif // FEATURE_PREJIT
+
+
+
+BOOL Module::IsManifest()
+{
+ WRAPPER_NO_CONTRACT;
+ return dac_cast<TADDR>(GetAssembly()->GetManifestModule()) ==
+ dac_cast<TADDR>(this);
+}
+
+DomainAssembly* Module::GetDomainAssembly(AppDomain *pDomain)
+{
+ CONTRACT(DomainAssembly *)
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pDomain, NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL));
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+ if (IsManifest())
+ RETURN (DomainAssembly *) GetDomainFile(pDomain);
+ else
+ RETURN (DomainAssembly *) m_pAssembly->GetDomainAssembly(pDomain);
+}
+
+DomainFile *Module::GetDomainFile(AppDomain *pDomain)
+{
+ CONTRACT(DomainFile *)
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pDomain));
+ POSTCONDITION(CheckPointer(RETVAL));
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ if (Module::IsEncodedModuleIndex(GetModuleID()))
+ {
+ DomainLocalBlock *pLocalBlock = pDomain->GetDomainLocalBlock();
+ DomainFile *pDomainFile = pLocalBlock->TryGetDomainFile(GetModuleIndex());
+
+#if !defined(DACCESS_COMPILE) && defined(FEATURE_LOADER_OPTIMIZATION)
+ if (pDomainFile == NULL)
+ pDomainFile = pDomain->LoadDomainNeutralModuleDependency(this, FILE_LOADED);
+#endif // !DACCESS_COMPILE
+
+ RETURN (PTR_DomainFile) pDomainFile;
+ }
+ else
+ {
+
+ CONSISTENCY_CHECK(dac_cast<TADDR>(pDomain) == dac_cast<TADDR>(GetDomain()) || IsSingleAppDomain());
+ RETURN dac_cast<PTR_DomainFile>(m_ModuleID->GetDomainFile());
+ }
+}
+
+DomainAssembly* Module::FindDomainAssembly(AppDomain *pDomain)
+{
+ CONTRACT(DomainAssembly *)
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pDomain));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ if (IsManifest())
+ RETURN dac_cast<PTR_DomainAssembly>(FindDomainFile(pDomain));
+ else
+ RETURN m_pAssembly->FindDomainAssembly(pDomain);
+}
+
+DomainModule *Module::GetDomainModule(AppDomain *pDomain)
+{
+ CONTRACT(DomainModule *)
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pDomain));
+ PRECONDITION(!IsManifest());
+ POSTCONDITION(CheckPointer(RETVAL));
+
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+ RETURN (DomainModule *) GetDomainFile(pDomain);
+}
+
+DomainFile *Module::FindDomainFile(AppDomain *pDomain)
+{
+ CONTRACT(DomainFile *)
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pDomain));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ if (Module::IsEncodedModuleIndex(GetModuleID()))
+ {
+ DomainLocalBlock *pLocalBlock = pDomain->GetDomainLocalBlock();
+ RETURN pLocalBlock->TryGetDomainFile(GetModuleIndex());
+ }
+ else
+ {
+ if (dac_cast<TADDR>(pDomain) == dac_cast<TADDR>(GetDomain()) || IsSingleAppDomain())
+ RETURN m_ModuleID->GetDomainFile();
+ else
+ RETURN NULL;
+ }
+}
+
+DomainModule *Module::FindDomainModule(AppDomain *pDomain)
+{
+ CONTRACT(DomainModule *)
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pDomain));
+ PRECONDITION(!IsManifest());
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+ RETURN (DomainModule *) FindDomainFile(pDomain);
+}
+
+#ifndef DACCESS_COMPILE
+#include "staticallocationhelpers.inl"
+
+// Parses metadata and initializes offsets of per-class static blocks.
+void Module::BuildStaticsOffsets(AllocMemTracker *pamTracker)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Trade off here. We want a slot for each type. That way we can get to 2 bits per class and
+ // index directly and not need a mapping from ClassID to MethodTable (we will use the RID
+ // as the mapping)
+ IMDInternalImport *pImport = GetMDImport();
+
+ DWORD * pRegularStaticOffsets = NULL;
+ DWORD * pThreadStaticOffsets = NULL;
+
+ // Get the number of types/classes defined in this module. Add 1 to count the module itself
+ DWORD dwNumTypes = pImport->GetCountWithTokenKind(mdtTypeDef) + 1; // +1 for module type
+
+ // [0] covers regular statics, [1] covers thread statics
+ DWORD dwGCHandles[2] = { 0, 0 };
+
+ // Organization in memory of the static block
+ //
+ //
+ // | GC Statics |
+ // |
+ // |
+ // | Class Data (one byte per class) | pointer to gc statics | primitive type statics |
+ //
+ //
+ DWORD dwNonGCBytes[2] = {
+ DomainLocalModule::OffsetOfDataBlob() + sizeof(BYTE)*dwNumTypes,
+ ThreadLocalModule::OffsetOfDataBlob() + sizeof(BYTE)*dwNumTypes
+ };
+
+ HENUMInternalHolder hTypeEnum(pImport);
+ hTypeEnum.EnumAllInit(mdtTypeDef);
+
+ mdTypeDef type;
+ // Parse each type of the class
+ while (pImport->EnumNext(&hTypeEnum, &type))
+ {
+ // Set offset for this type
+ DWORD dwIndex = RidFromToken(type) - 1;
+
+ // [0] covers regular statics, [1] covers thread statics
+ DWORD dwAlignment[2] = { 1, 1 };
+ DWORD dwClassNonGCBytes[2] = { 0, 0 };
+ DWORD dwClassGCHandles[2] = { 0, 0 };
+
+ // need to check if the type is generic and if so exclude it from iteration as we don't know the size
+ HENUMInternalHolder hGenericEnum(pImport);
+ hGenericEnum.EnumInit(mdtGenericParam, type);
+ ULONG cGenericParams = pImport->EnumGetCount(&hGenericEnum);
+ if (cGenericParams == 0)
+ {
+ HENUMInternalHolder hFieldEnum(pImport);
+ hFieldEnum.EnumInit(mdtFieldDef, type);
+
+ mdFieldDef field;
+ // Parse each field of the type
+ while (pImport->EnumNext(&hFieldEnum, &field))
+ {
+ BOOL fSkip = FALSE;
+
+ CorElementType ElementType = ELEMENT_TYPE_END;
+ mdToken tkValueTypeToken = 0;
+ int kk; // Use one set of variables for regular statics, and the other set for thread statics
+
+ fSkip = GetStaticFieldElementTypeForFieldDef(this, pImport, field, &ElementType, &tkValueTypeToken, &kk);
+ if (fSkip)
+ continue;
+
+ // We account for "regular statics" and "thread statics" separately.
+ // Currently we are lumping RVA and context statics into "regular statics",
+ // but we probably shouldn't.
+ switch (ElementType)
+ {
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_BOOLEAN:
+ dwClassNonGCBytes[kk] += 1;
+ break;
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_CHAR:
+ dwAlignment[kk] = max(2, dwAlignment[kk]);
+ dwClassNonGCBytes[kk] += 2;
+ break;
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_R4:
+ dwAlignment[kk] = max(4, dwAlignment[kk]);
+ dwClassNonGCBytes[kk] += 4;
+ break;
+ case ELEMENT_TYPE_FNPTR:
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_U:
+ dwAlignment[kk] = max((1 << LOG2_PTRSIZE), dwAlignment[kk]);
+ dwClassNonGCBytes[kk] += (1 << LOG2_PTRSIZE);
+ break;
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_R8:
+ dwAlignment[kk] = max(8, dwAlignment[kk]);
+ dwClassNonGCBytes[kk] += 8;
+ break;
+ case ELEMENT_TYPE_VAR:
+ case ELEMENT_TYPE_MVAR:
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_ARRAY:
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_OBJECT:
+ dwClassGCHandles[kk] += 1;
+ break;
+ case ELEMENT_TYPE_VALUETYPE:
+ // Statics for valuetypes where the valuetype is defined in this module are handled here. Other valuetype statics utilize the pessimistic model below.
+ dwClassGCHandles[kk] += 1;
+ break;
+ case ELEMENT_TYPE_END:
+ default:
+ // The actual element type was ELEMENT_TYPE_VALUETYPE, but the as we don't want to load additional assemblies
+ // to determine these static offsets, we've fallen back to a pessimistic model.
+ if (tkValueTypeToken != 0)
+ {
+ // We'll have to be pessimistic here
+ dwClassNonGCBytes[kk] += MAX_PRIMITIVE_FIELD_SIZE;
+ dwAlignment[kk] = max(MAX_PRIMITIVE_FIELD_SIZE, dwAlignment[kk]);
+
+ dwClassGCHandles[kk] += 1;
+ break;
+ }
+ else
+ {
+ // field has an unexpected type
+ ThrowHR(VER_E_FIELD_SIG);
+ break;
+ }
+ }
+ }
+
+ if (pRegularStaticOffsets == NULL && (dwClassGCHandles[0] != 0 || dwClassNonGCBytes[0] != 0))
+ {
+ // Lazily allocate table for offsets. We need offsets for GC and non GC areas. We add +1 to use as a sentinel.
+ pRegularStaticOffsets = (PTR_DWORD)pamTracker->Track(
+ GetLoaderAllocator()->GetHighFrequencyHeap()->AllocMem(
+ (S_SIZE_T(2 * sizeof(DWORD))*(S_SIZE_T(dwNumTypes)+S_SIZE_T(1)))));
+
+ for (DWORD i = 0; i < dwIndex; i++) {
+ pRegularStaticOffsets[i * 2 ] = dwGCHandles[0]*sizeof(OBJECTREF);
+ pRegularStaticOffsets[i * 2 + 1] = dwNonGCBytes[0];
+ }
+ }
+
+ if (pThreadStaticOffsets == NULL && (dwClassGCHandles[1] != 0 || dwClassNonGCBytes[1] != 0))
+ {
+ // Lazily allocate table for offsets. We need offsets for GC and non GC areas. We add +1 to use as a sentinel.
+ pThreadStaticOffsets = (PTR_DWORD)pamTracker->Track(
+ GetLoaderAllocator()->GetHighFrequencyHeap()->AllocMem(
+ (S_SIZE_T(2 * sizeof(DWORD))*(S_SIZE_T(dwNumTypes)+S_SIZE_T(1)))));
+
+ for (DWORD i = 0; i < dwIndex; i++) {
+ pThreadStaticOffsets[i * 2 ] = dwGCHandles[1]*sizeof(OBJECTREF);
+ pThreadStaticOffsets[i * 2 + 1] = dwNonGCBytes[1];
+ }
+ }
+ }
+
+ if (pRegularStaticOffsets != NULL)
+ {
+ // Align the offset of non gc statics
+ dwNonGCBytes[0] = (DWORD) ALIGN_UP(dwNonGCBytes[0], dwAlignment[0]);
+
+ // Save current offsets
+ pRegularStaticOffsets[dwIndex*2] = dwGCHandles[0]*sizeof(OBJECTREF);
+ pRegularStaticOffsets[dwIndex*2 + 1] = dwNonGCBytes[0];
+
+ // Increment for next class
+ dwGCHandles[0] += dwClassGCHandles[0];
+ dwNonGCBytes[0] += dwClassNonGCBytes[0];
+ }
+
+ if (pThreadStaticOffsets != NULL)
+ {
+ // Align the offset of non gc statics
+ dwNonGCBytes[1] = (DWORD) ALIGN_UP(dwNonGCBytes[1], dwAlignment[1]);
+
+ // Save current offsets
+ pThreadStaticOffsets[dwIndex*2] = dwGCHandles[1]*sizeof(OBJECTREF);
+ pThreadStaticOffsets[dwIndex*2 + 1] = dwNonGCBytes[1];
+
+ // Increment for next class
+ dwGCHandles[1] += dwClassGCHandles[1];
+ dwNonGCBytes[1] += dwClassNonGCBytes[1];
+ }
+ }
+
+ m_maxTypeRidStaticsAllocated = dwNumTypes;
+
+ if (pRegularStaticOffsets != NULL)
+ {
+ pRegularStaticOffsets[dwNumTypes*2] = dwGCHandles[0]*sizeof(OBJECTREF);
+ pRegularStaticOffsets[dwNumTypes*2 + 1] = dwNonGCBytes[0];
+ }
+
+ if (pThreadStaticOffsets != NULL)
+ {
+ pThreadStaticOffsets[dwNumTypes*2] = dwGCHandles[1]*sizeof(OBJECTREF);
+ pThreadStaticOffsets[dwNumTypes*2 + 1] = dwNonGCBytes[1];
+ }
+
+ m_pRegularStaticOffsets = pRegularStaticOffsets;
+ m_pThreadStaticOffsets = pThreadStaticOffsets;
+
+ m_dwMaxGCRegularStaticHandles = dwGCHandles[0];
+ m_dwMaxGCThreadStaticHandles = dwGCHandles[1];
+
+ m_dwRegularStaticsBlockSize = dwNonGCBytes[0];
+ m_dwThreadStaticsBlockSize = dwNonGCBytes[1];
+}
+
+void Module::GetOffsetsForRegularStaticData(
+ mdToken cl,
+ BOOL bDynamic, DWORD dwGCStaticHandles,
+ DWORD dwNonGCStaticBytes,
+ DWORD * pOutStaticHandleOffset,
+ DWORD * pOutNonGCStaticOffset)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ *pOutStaticHandleOffset = 0;
+ *pOutNonGCStaticOffset = 0;
+
+ if (!dwGCStaticHandles && !dwNonGCStaticBytes)
+ {
+ return;
+ }
+
+ // Statics for instantiated types are allocated dynamically per-instantiation
+ if (bDynamic)
+ {
+ // Non GC statics are embedded in the Dynamic Entry.
+ *pOutNonGCStaticOffset = DomainLocalModule::DynamicEntry::GetOffsetOfDataBlob();
+ return;
+ }
+
+ if (m_pRegularStaticOffsets == NULL)
+ {
+ THROW_BAD_FORMAT(BFA_METADATA_CORRUPT, this);
+ }
+ _ASSERTE(m_pRegularStaticOffsets != (PTR_DWORD) NGEN_STATICS_ALLCLASSES_WERE_LOADED);
+
+ // We allocate in the big blob.
+ DWORD index = RidFromToken(cl) - 1;
+
+ *pOutStaticHandleOffset = m_pRegularStaticOffsets[index*2];
+
+ *pOutNonGCStaticOffset = m_pRegularStaticOffsets[index*2 + 1];
+
+ // Check we didnt go out of what we predicted we would need for the class
+ if (*pOutStaticHandleOffset + sizeof(OBJECTREF*)*dwGCStaticHandles >
+ m_pRegularStaticOffsets[(index+1)*2] ||
+ *pOutNonGCStaticOffset + dwNonGCStaticBytes >
+ m_pRegularStaticOffsets[(index+1)*2 + 1])
+ { // It's most likely that this is due to bad metadata, thus the exception. However, the
+ // previous comments for this bit of code mentioned that this could be a corner case bug
+ // with static field size estimation, though this is entirely unlikely since the code has
+ // been this way for at least two releases.
+ THROW_BAD_FORMAT(BFA_METADATA_CORRUPT, this);
+ }
+}
+
+
+void Module::GetOffsetsForThreadStaticData(
+ mdToken cl,
+ BOOL bDynamic, DWORD dwGCStaticHandles,
+ DWORD dwNonGCStaticBytes,
+ DWORD * pOutStaticHandleOffset,
+ DWORD * pOutNonGCStaticOffset)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ *pOutStaticHandleOffset = 0;
+ *pOutNonGCStaticOffset = 0;
+
+ if (!dwGCStaticHandles && !dwNonGCStaticBytes)
+ {
+ return;
+ }
+
+ // Statics for instantiated types are allocated dynamically per-instantiation
+ if (bDynamic)
+ {
+ // Non GC thread statics are embedded in the Dynamic Entry.
+ *pOutNonGCStaticOffset = ThreadLocalModule::DynamicEntry::GetOffsetOfDataBlob();
+ return;
+ }
+
+ if (m_pThreadStaticOffsets == NULL)
+ {
+ THROW_BAD_FORMAT(BFA_METADATA_CORRUPT, this);
+ }
+ _ASSERTE(m_pThreadStaticOffsets != (PTR_DWORD) NGEN_STATICS_ALLCLASSES_WERE_LOADED);
+
+ // We allocate in the big blob.
+ DWORD index = RidFromToken(cl) - 1;
+
+ *pOutStaticHandleOffset = m_pThreadStaticOffsets[index*2];
+
+ *pOutNonGCStaticOffset = m_pThreadStaticOffsets[index*2 + 1];
+
+ // Check we didnt go out of what we predicted we would need for the class
+ if (*pOutStaticHandleOffset + sizeof(OBJECTREF*)*dwGCStaticHandles >
+ m_pThreadStaticOffsets[(index+1)*2] ||
+ *pOutNonGCStaticOffset + dwNonGCStaticBytes >
+ m_pThreadStaticOffsets[(index+1)*2 + 1])
+ {
+ // It's most likely that this is due to bad metadata, thus the exception. However, the
+ // previous comments for this bit of code mentioned that this could be a corner case bug
+ // with static field size estimation, though this is entirely unlikely since the code has
+ // been this way for at least two releases.
+ THROW_BAD_FORMAT(BFA_METADATA_CORRUPT, this);
+ }
+}
+
+
+// initialize Crst controlling the Dynamic IL hashtable
+void Module::InitializeDynamicILCrst()
+{
+ Crst * pCrst = new Crst(CrstDynamicIL, CrstFlags(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD));
+ if (InterlockedCompareExchangeT(
+ &m_debuggerSpecificData.m_pDynamicILCrst, pCrst, NULL) != NULL)
+ {
+ delete pCrst;
+ }
+}
+
+// Add a (token, address) pair to the table of IL blobs for reflection/dynamics
+// Arguments:
+// Input:
+// token method token
+// blobAddress address of the start of the IL blob address, including the header
+// fTemporaryOverride
+// is this a permanent override that should go in the
+// DynamicILBlobTable, or a temporary one?
+// Output: not explicit, but if the pair was not already in the table it will be added.
+// Does not add duplicate tokens to the table.
+
+void Module::SetDynamicIL(mdToken token, TADDR blobAddress, BOOL fTemporaryOverride)
+{
+ DynamicILBlobEntry entry = {mdToken(token), TADDR(blobAddress)};
+
+ // Lazily allocate a Crst to serialize update access to the info structure.
+ // Carefully synchronize to ensure we don't leak a Crst in race conditions.
+ if (m_debuggerSpecificData.m_pDynamicILCrst == NULL)
+ {
+ InitializeDynamicILCrst();
+ }
+
+ CrstHolder ch(m_debuggerSpecificData.m_pDynamicILCrst);
+
+ // Figure out which table to fill in
+ PTR_DynamicILBlobTable &table(fTemporaryOverride ? m_debuggerSpecificData.m_pTemporaryILBlobTable
+ : m_debuggerSpecificData.m_pDynamicILBlobTable);
+
+ // Lazily allocate the hash table.
+ if (table == NULL)
+ {
+ table = PTR_DynamicILBlobTable(new DynamicILBlobTable);
+ }
+ table->AddOrReplace(entry);
+}
+
+#endif // !DACCESS_COMPILE
+
+// Get the stored address of the IL blob for reflection/dynamics
+// Arguments:
+// Input:
+// token method token
+// fAllowTemporary also check the temporary overrides
+// Return Value: starting (target) address of the IL blob corresponding to the input token
+
+TADDR Module::GetDynamicIL(mdToken token, BOOL fAllowTemporary)
+{
+ SUPPORTS_DAC;
+
+#ifndef DACCESS_COMPILE
+ // The Crst to serialize update access to the info structure is lazily allocated.
+ // If it hasn't been allocated yet, then we don't have any IL blobs (temporary or otherwise)
+ if (m_debuggerSpecificData.m_pDynamicILCrst == NULL)
+ {
+ return TADDR(NULL);
+ }
+
+ CrstHolder ch(m_debuggerSpecificData.m_pDynamicILCrst);
+#endif
+
+ // Both hash tables are lazily allocated, so if they're NULL
+ // then we have no IL blobs
+
+ if (fAllowTemporary && m_debuggerSpecificData.m_pTemporaryILBlobTable != NULL)
+ {
+ DynamicILBlobEntry entry = m_debuggerSpecificData.m_pTemporaryILBlobTable->Lookup(token);
+
+ // Only return a value if the lookup succeeded
+ if (!DynamicILBlobTraits::IsNull(entry))
+ {
+ return entry.m_il;
+ }
+ }
+
+ if (m_debuggerSpecificData.m_pDynamicILBlobTable == NULL)
+ {
+ return TADDR(NULL);
+ }
+
+ DynamicILBlobEntry entry = m_debuggerSpecificData.m_pDynamicILBlobTable->Lookup(token);
+ // If the lookup fails, it returns the 'NULL' entry
+ // The 'NULL' entry has m_il set to NULL, so either way we're safe
+ return entry.m_il;
+}
+
+#if !defined(DACCESS_COMPILE)
+//---------------------------------------------------------------------------------------
+//
+// Add instrumented IL offset mapping for the specified method.
+//
+// Arguments:
+// token - the MethodDef token of the method in question
+// mapping - the mapping information between original IL offsets and instrumented IL offsets
+//
+// Notes:
+// * Once added, the mapping stays valid until the Module containing the method is destructed.
+// * The profiler may potentially update the mapping more than once.
+//
+
+void Module::SetInstrumentedILOffsetMapping(mdMethodDef token, InstrumentedILOffsetMapping mapping)
+{
+ ILOffsetMappingEntry entry(token, mapping);
+
+ // Lazily allocate a Crst to serialize update access to the hash table.
+ // Carefully synchronize to ensure we don't leak a Crst in race conditions.
+ if (m_debuggerSpecificData.m_pDynamicILCrst == NULL)
+ {
+ InitializeDynamicILCrst();
+ }
+
+ CrstHolder ch(m_debuggerSpecificData.m_pDynamicILCrst);
+
+ // Lazily allocate the hash table.
+ if (m_debuggerSpecificData.m_pILOffsetMappingTable == NULL)
+ {
+ m_debuggerSpecificData.m_pILOffsetMappingTable = PTR_ILOffsetMappingTable(new ILOffsetMappingTable);
+ }
+
+ ILOffsetMappingEntry currentEntry = m_debuggerSpecificData.m_pILOffsetMappingTable->Lookup(ILOffsetMappingTraits::GetKey(entry));
+ if (!ILOffsetMappingTraits::IsNull(currentEntry))
+ currentEntry.m_mapping.Clear();
+
+ m_debuggerSpecificData.m_pILOffsetMappingTable->AddOrReplace(entry);
+}
+#endif // DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+//
+// Retrieve the instrumented IL offset mapping for the specified method.
+//
+// Arguments:
+// token - the MethodDef token of the method in question
+//
+// Return Value:
+// Return the mapping information between original IL offsets and instrumented IL offsets.
+// Check InstrumentedILOffsetMapping::IsNull() to see if any mapping is available.
+//
+// Notes:
+// * Once added, the mapping stays valid until the Module containing the method is destructed.
+// * The profiler may potentially update the mapping more than once.
+//
+
+InstrumentedILOffsetMapping Module::GetInstrumentedILOffsetMapping(mdMethodDef token)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ // Lazily allocate a Crst to serialize update access to the hash table.
+ // If the Crst is NULL, then we couldn't possibly have added any mapping yet, so just return NULL.
+ if (m_debuggerSpecificData.m_pDynamicILCrst == NULL)
+ {
+ InstrumentedILOffsetMapping emptyMapping;
+ return emptyMapping;
+ }
+
+ CrstHolder ch(m_debuggerSpecificData.m_pDynamicILCrst);
+
+ // If the hash table hasn't been created, then we couldn't possibly have added any mapping yet,
+ // so just return NULL.
+ if (m_debuggerSpecificData.m_pILOffsetMappingTable == NULL)
+ {
+ InstrumentedILOffsetMapping emptyMapping;
+ return emptyMapping;
+ }
+
+ ILOffsetMappingEntry entry = m_debuggerSpecificData.m_pILOffsetMappingTable->Lookup(token);
+ return entry.m_mapping;
+}
+
+#undef DECODE_TYPEID
+#undef ENCODE_TYPEID
+#undef IS_ENCODED_TYPEID
+
+
+
+#ifndef DACCESS_COMPILE
+
+
+BOOL Module::IsNoStringInterning()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END
+
+ if (!(m_dwPersistedFlags & COMPUTED_STRING_INTERNING))
+ {
+ // The flags should be precomputed in native images
+ _ASSERTE(!HasNativeImage());
+
+ // Default is string interning
+ BOOL fNoStringInterning = FALSE;
+
+#ifdef FEATURE_LEGACYNETCF
+ // NetCF ignored this attribute
+ if (GetAppDomain()->GetAppDomainCompatMode() != BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8)
+ {
+#endif
+
+ HRESULT hr;
+
+ // This flag applies to assembly, but it is stored on module so it can be cached in ngen image
+ // Thus, we should ever need it for manifest module only.
+ IMDInternalImport *mdImport = GetAssembly()->GetManifestImport();
+ _ASSERTE(mdImport);
+
+ mdToken token;
+ IfFailThrow(mdImport->GetAssemblyFromScope(&token));
+
+ const BYTE *pVal;
+ ULONG cbVal;
+
+ hr = mdImport->GetCustomAttributeByName(token,
+ COMPILATIONRELAXATIONS_TYPE,
+ (const void**)&pVal, &cbVal);
+
+ // Parse the attribute
+ if (hr == S_OK)
+ {
+ CustomAttributeParser cap(pVal, cbVal);
+ IfFailThrow(cap.SkipProlog());
+
+ // Get Flags
+ UINT32 flags;
+ IfFailThrow(cap.GetU4(&flags));
+
+ if (flags & CompilationRelaxations_NoStringInterning)
+ {
+ fNoStringInterning = TRUE;
+ }
+ }
+
+#ifdef FEATURE_LEGACYNETCF
+ }
+#endif
+
+#ifdef _DEBUG
+ static ConfigDWORD g_NoStringInterning;
+ DWORD dwOverride = g_NoStringInterning.val(CLRConfig::INTERNAL_NoStringInterning);
+
+ if (dwOverride == 0)
+ {
+ // Disabled
+ fNoStringInterning = FALSE;
+ }
+ else if (dwOverride == 2)
+ {
+ // Always true (testing)
+ fNoStringInterning = TRUE;
+ }
+#endif // _DEBUG
+
+ FastInterlockOr(&m_dwPersistedFlags, COMPUTED_STRING_INTERNING |
+ (fNoStringInterning ? NO_STRING_INTERNING : 0));
+ }
+
+ return !!(m_dwPersistedFlags & NO_STRING_INTERNING);
+}
+
+BOOL Module::GetNeutralResourcesLanguage(LPCUTF8 * cultureName, ULONG * cultureNameLength, INT16 * fallbackLocation, BOOL cacheAttribute)
+{
+ STANDARD_VM_CONTRACT;
+
+ BOOL retVal = FALSE;
+ if (!(m_dwPersistedFlags & NEUTRAL_RESOURCES_LANGUAGE_IS_CACHED))
+ {
+ const BYTE *pVal = NULL;
+ ULONG cbVal = 0;
+
+ // This flag applies to assembly, but it is stored on module so it can be cached in ngen image
+ // Thus, we should ever need it for manifest module only.
+ IMDInternalImport *mdImport = GetAssembly()->GetManifestImport();
+ _ASSERTE(mdImport);
+
+ mdToken token;
+ IfFailThrow(mdImport->GetAssemblyFromScope(&token));
+
+ // Check for the existance of the attribute.
+ HRESULT hr = mdImport->GetCustomAttributeByName(token,"System.Resources.NeutralResourcesLanguageAttribute",(const void **)&pVal, &cbVal);
+ if (hr == S_OK) {
+
+ // we should not have a native image (it would have been cached at ngen time)
+ _ASSERTE(!HasNativeImage());
+
+ CustomAttributeParser cap(pVal, cbVal);
+ IfFailThrow(cap.SkipProlog());
+ IfFailThrow(cap.GetString(cultureName, cultureNameLength));
+ IfFailThrow(cap.GetI2(fallbackLocation));
+ // Should only be true on Module.Save(). Update flag to show we have the attribute cached
+ if (cacheAttribute)
+ FastInterlockOr(&m_dwPersistedFlags, NEUTRAL_RESOURCES_LANGUAGE_IS_CACHED);
+
+ retVal = TRUE;
+ }
+ }
+ else
+ {
+ *cultureName = m_pszCultureName;
+ *cultureNameLength = m_CultureNameLength;
+ *fallbackLocation = m_FallbackLocation;
+ retVal = TRUE;
+
+#ifdef _DEBUG
+ // confirm that the NGENed attribute is correct
+ LPCUTF8 pszCultureNameCheck = NULL;
+ ULONG cultureNameLengthCheck = 0;
+ INT16 fallbackLocationCheck = 0;
+ const BYTE *pVal = NULL;
+ ULONG cbVal = 0;
+
+ IMDInternalImport *mdImport = GetAssembly()->GetManifestImport();
+ _ASSERTE(mdImport);
+ mdToken token;
+ IfFailThrow(mdImport->GetAssemblyFromScope(&token));
+
+ // Confirm that the attribute exists, and has the save value as when we ngen'd it
+ HRESULT hr = mdImport->GetCustomAttributeByName(token,"System.Resources.NeutralResourcesLanguageAttribute",(const void **)&pVal, &cbVal);
+ _ASSERTE(hr == S_OK);
+ CustomAttributeParser cap(pVal, cbVal);
+ IfFailThrow(cap.SkipProlog());
+ IfFailThrow(cap.GetString(&pszCultureNameCheck, &cultureNameLengthCheck));
+ IfFailThrow(cap.GetI2(&fallbackLocationCheck));
+ _ASSERTE(cultureNameLengthCheck == m_CultureNameLength);
+ _ASSERTE(fallbackLocationCheck == m_FallbackLocation);
+ _ASSERTE(strncmp(pszCultureNameCheck,m_pszCultureName,m_CultureNameLength) == 0);
+#endif // _DEBUG
+ }
+
+ return retVal;
+}
+
+
+#ifndef FEATURE_CORECLR
+BOOL Module::HasDefaultDllImportSearchPathsAttribute()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if(IsDefaultDllImportSearchPathsAttributeCached())
+ {
+ return (m_dwPersistedFlags & DEFAULT_DLL_IMPORT_SEARCH_PATHS_STATUS) != 0 ;
+ }
+ IMDInternalImport *mdImport = GetAssembly()->GetManifestImport();
+
+ BOOL attributeIsFound = FALSE;
+ attributeIsFound = GetDefaultDllImportSearchPathsAttributeValue(mdImport, TokenFromRid(1, mdtAssembly),&m_DefaultDllImportSearchPathsAttributeValue);
+ if(attributeIsFound)
+ {
+ FastInterlockOr(&m_dwPersistedFlags, DEFAULT_DLL_IMPORT_SEARCH_PATHS_IS_CACHED | DEFAULT_DLL_IMPORT_SEARCH_PATHS_STATUS);
+ }
+ else
+ {
+ FastInterlockOr(&m_dwPersistedFlags, DEFAULT_DLL_IMPORT_SEARCH_PATHS_IS_CACHED);
+ }
+
+ return (m_dwPersistedFlags & DEFAULT_DLL_IMPORT_SEARCH_PATHS_STATUS) != 0 ;
+}
+#endif // !FEATURE_CORECLR
+
+// Returns a BOOL to indicate if we have computed whether compiler has instructed us to
+// wrap the non-CLS compliant exceptions or not.
+BOOL Module::IsRuntimeWrapExceptionsStatusComputed()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_dwPersistedFlags & COMPUTED_WRAP_EXCEPTIONS);
+}
+
+BOOL Module::IsRuntimeWrapExceptions()
+{
+ CONTRACTL
+ {
+ THROWS;
+ if (IsRuntimeWrapExceptionsStatusComputed()) GC_NOTRIGGER; else GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ if (!(IsRuntimeWrapExceptionsStatusComputed()))
+ {
+ // The flags should be precomputed in native images
+ _ASSERTE(!HasNativeImage());
+
+ HRESULT hr;
+ BOOL fRuntimeWrapExceptions = FALSE;
+
+ // This flag applies to assembly, but it is stored on module so it can be cached in ngen image
+ // Thus, we should ever need it for manifest module only.
+ IMDInternalImport *mdImport = GetAssembly()->GetManifestImport();
+
+ mdToken token;
+ IfFailGo(mdImport->GetAssemblyFromScope(&token));
+
+ const BYTE *pVal;
+ ULONG cbVal;
+
+ hr = mdImport->GetCustomAttributeByName(token,
+ RUNTIMECOMPATIBILITY_TYPE,
+ (const void**)&pVal, &cbVal);
+
+ // Parse the attribute
+ if (hr == S_OK)
+ {
+ CustomAttributeParser ca(pVal, cbVal);
+ CaNamedArg namedArgs[1];
+
+ // First, the void constructor:
+ IfFailGo(ParseKnownCaArgs(ca, NULL, 0));
+
+ // Then, find the named argument
+ namedArgs[0].InitBoolField("WrapNonExceptionThrows");
+
+ IfFailGo(ParseKnownCaNamedArgs(ca, namedArgs, lengthof(namedArgs)));
+
+ if (namedArgs[0].val.boolean)
+ fRuntimeWrapExceptions = TRUE;
+ }
+ErrExit:
+ FastInterlockOr(&m_dwPersistedFlags, COMPUTED_WRAP_EXCEPTIONS |
+ (fRuntimeWrapExceptions ? WRAP_EXCEPTIONS : 0));
+ }
+
+ return !!(m_dwPersistedFlags & WRAP_EXCEPTIONS);
+}
+
+BOOL Module::IsPreV4Assembly()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END
+
+ if (!(m_dwPersistedFlags & COMPUTED_IS_PRE_V4_ASSEMBLY))
+ {
+ // The flags should be precomputed in native images
+ _ASSERTE(!HasNativeImage());
+
+ IMDInternalImport *pImport = GetAssembly()->GetManifestImport();
+ _ASSERTE(pImport);
+
+ BOOL fIsPreV4Assembly = FALSE;
+ LPCSTR szVersion = NULL;
+ if (SUCCEEDED(pImport->GetVersionString(&szVersion)))
+ {
+ if (szVersion != NULL && strlen(szVersion) > 2)
+ {
+ fIsPreV4Assembly = (szVersion[0] == 'v' || szVersion[0] == 'V') &&
+ (szVersion[1] == '1' || szVersion[1] == '2');
+ }
+ }
+
+ FastInterlockOr(&m_dwPersistedFlags, COMPUTED_IS_PRE_V4_ASSEMBLY |
+ (fIsPreV4Assembly ? IS_PRE_V4_ASSEMBLY : 0));
+ }
+
+ return !!(m_dwPersistedFlags & IS_PRE_V4_ASSEMBLY);
+}
+
+DWORD Module::GetReliabilityContract()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ if (!(m_dwPersistedFlags & COMPUTED_RELIABILITY_CONTRACT))
+ {
+ // The flags should be precomputed in native images
+ _ASSERTE(!HasNativeImage());
+
+ // This flag applies to assembly, but it is stored on module so it can be cached in ngen image
+ // Thus, we should ever need it for manifest module only.
+ IMDInternalImport *mdImport = GetAssembly()->GetManifestImport();
+
+ m_dwReliabilityContract = ::GetReliabilityContract(mdImport, TokenFromRid(1, mdtAssembly));
+
+ FastInterlockOr(&m_dwPersistedFlags, COMPUTED_RELIABILITY_CONTRACT);
+ }
+
+ return m_dwReliabilityContract;
+}
+
+ArrayDPTR(FixupPointer<PTR_MethodTable>) ModuleCtorInfo::GetGCStaticMTs(DWORD index)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (index < numHotGCStaticsMTs)
+ {
+ _ASSERTE(ppHotGCStaticsMTs != NULL);
+
+ return ppHotGCStaticsMTs + index;
+ }
+ else
+ {
+ _ASSERTE(ppColdGCStaticsMTs != NULL);
+
+ // shift the start of the cold table because all cold offsets are also shifted
+ return ppColdGCStaticsMTs + (index - numHotGCStaticsMTs);
+ }
+}
+
+DWORD Module::AllocateDynamicEntry(MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(pMT->GetModuleForStatics() == this);
+ PRECONDITION(pMT->IsDynamicStatics());
+ PRECONDITION(!pMT->ContainsGenericVariables());
+ }
+ CONTRACTL_END;
+
+ DWORD newId = FastInterlockExchangeAdd((LONG*)&m_cDynamicEntries, 1);
+
+ if (newId >= m_maxDynamicEntries)
+ {
+ CrstHolder ch(&m_Crst);
+
+ if (newId >= m_maxDynamicEntries)
+ {
+ SIZE_T maxDynamicEntries = max(16, m_maxDynamicEntries);
+ while (maxDynamicEntries <= newId)
+ {
+ maxDynamicEntries *= 2;
+ }
+
+ DynamicStaticsInfo* pNewDynamicStaticsInfo = (DynamicStaticsInfo*)
+ (void*)GetLoaderAllocator()->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(DynamicStaticsInfo)) * S_SIZE_T(maxDynamicEntries));
+
+ if (m_pDynamicStaticsInfo)
+ memcpy(pNewDynamicStaticsInfo, m_pDynamicStaticsInfo, sizeof(DynamicStaticsInfo) * m_maxDynamicEntries);
+
+ m_pDynamicStaticsInfo = pNewDynamicStaticsInfo;
+ m_maxDynamicEntries = maxDynamicEntries;
+ }
+ }
+
+ EnsureWritablePages(&(m_pDynamicStaticsInfo[newId]))->pEnclosingMT = pMT;
+
+ LOG((LF_CLASSLOADER, LL_INFO10000, "STATICS: Assigned dynamic ID %d to %s\n", newId, pMT->GetDebugClassName()));
+
+ return newId;
+}
+
+void Module::FreeModuleIndex()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ if (GetAssembly()->IsDomainNeutral())
+ {
+ // We do not recycle ModuleIndexes used by domain neutral Modules.
+ }
+ else
+ {
+ if (m_ModuleID != NULL)
+ {
+ // Module's m_ModuleID should not contain the ID, it should
+ // contain a pointer to the DLM
+ _ASSERTE(!Module::IsEncodedModuleIndex((SIZE_T)m_ModuleID));
+ _ASSERTE(m_ModuleIndex == m_ModuleID->GetModuleIndex());
+
+ // Get the ModuleIndex from the DLM and free it
+ Module::FreeModuleIndex(m_ModuleIndex);
+ }
+ else
+ {
+ // This was an empty, short-lived Module object that
+ // was never assigned a ModuleIndex...
+ }
+ }
+}
+
+
+
+
+ModuleIndex Module::AllocateModuleIndex()
+{
+ DWORD val;
+ g_pModuleIndexDispenser->NewId(NULL, val);
+
+ // For various reasons, the IDs issued by the IdDispenser start at 1.
+ // Domain neutral module IDs have historically started at 0, and we
+ // have always assigned ID 0 to mscorlib. Thus, to make it so that
+ // domain neutral module IDs start at 0, we will subtract 1 from the
+ // ID that we got back from the ID dispenser.
+ ModuleIndex index((SIZE_T)(val-1));
+
+ return index;
+}
+
+void Module::FreeModuleIndex(ModuleIndex index)
+{
+ WRAPPER_NO_CONTRACT;
+ // We subtracted 1 after we allocated this ID, so we need to
+ // add 1 before we free it.
+ DWORD val = index.m_dwIndex + 1;
+
+ g_pModuleIndexDispenser->DisposeId(val);
+}
+
+
+void Module::AllocateRegularStaticHandles(AppDomain* pDomain)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+#ifndef CROSSGEN_COMPILE
+ if (NingenEnabled())
+ return;
+
+ // Allocate the handles we will need. Note that AllocateStaticFieldObjRefPtrs will only
+ // allocate if pModuleData->GetGCStaticsBasePointerAddress(pMT) != 0, avoiding creating
+ // handles more than once for a given MT or module
+
+ DomainLocalModule *pModuleData = GetDomainLocalModule(pDomain);
+
+ _ASSERTE(pModuleData->GetPrecomputedGCStaticsBasePointerAddress() != NULL);
+ if (this->m_dwMaxGCRegularStaticHandles > 0)
+ {
+ // If we're setting up a non-default domain, we want the allocation to look like it's
+ // coming from the created domain.
+
+ // REVISIT_TODO: The comparison "pDomain != GetDomain()" will always be true for domain-neutral
+ // modules, since GetDomain() will return the SharedDomain, which is NOT an AppDomain.
+ // Was this intended? If so, there should be a clarifying comment. If not, then we should
+ // probably do "pDomain != GetAppDomain()" instead.
+
+ if (pDomain != GetDomain() &&
+ pDomain != SystemDomain::System()->DefaultDomain() &&
+ IsSystem())
+ {
+ pDomain->AllocateStaticFieldObjRefPtrsCrossDomain(this->m_dwMaxGCRegularStaticHandles,
+ pModuleData->GetPrecomputedGCStaticsBasePointerAddress());
+ }
+ else
+ {
+ pDomain->AllocateStaticFieldObjRefPtrs(this->m_dwMaxGCRegularStaticHandles,
+ pModuleData->GetPrecomputedGCStaticsBasePointerAddress());
+ }
+
+ // We should throw if we fail to allocate and never hit this assert
+ _ASSERTE(pModuleData->GetPrecomputedGCStaticsBasePointer() != NULL);
+ }
+#endif // CROSSGEN_COMPILE
+}
+
+BOOL Module::IsStaticStoragePrepared(mdTypeDef tkType)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Right now the design is that we do one static allocation pass during NGEN,
+ // and a 2nd pass for it at module init time for modules that weren't NGENed or the NGEN
+ // pass was unsucessful. If we are loading types after that then we must use dynamic
+ // static storage. These dynamic statics require an additional indirection so they
+ // don't perform quite as well.
+ //
+ // This check was created for the scenario where a profiler adds additional types
+ // however it seems likely this check would also accurately handle other dynamic
+ // scenarios such as ref.emit and EnC as long as they are adding new types and
+ // not new statics to existing types.
+ _ASSERTE(TypeFromToken(tkType) == mdtTypeDef);
+ return m_maxTypeRidStaticsAllocated >= RidFromToken(tkType);
+}
+
+void Module::AllocateStatics(AllocMemTracker *pamTracker)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (IsResource())
+ {
+ m_dwRegularStaticsBlockSize = DomainLocalModule::OffsetOfDataBlob();
+ m_dwThreadStaticsBlockSize = ThreadLocalModule::OffsetOfDataBlob();
+
+ // If it has no code, we don't have to allocate anything
+ LOG((LF_CLASSLOADER, LL_INFO10000, "STATICS: Resource module %s. No statics neeeded\n", GetSimpleName()));
+ _ASSERTE(m_maxTypeRidStaticsAllocated == 0);
+ return;
+ }
+#ifdef FEATURE_PREJIT
+ if (m_pRegularStaticOffsets == (PTR_DWORD) NGEN_STATICS_ALLCLASSES_WERE_LOADED)
+ {
+ _ASSERTE(HasNativeImage());
+
+ // This is an ngen image and all the classes were loaded at ngen time, so we're done.
+ LOG((LF_CLASSLOADER, LL_INFO10000, "STATICS: 'Complete' Native image found, no statics parsing needed for module %s.\n", GetSimpleName()));
+ // typeDefs rids 0 and 1 aren't included in the count, thus X typeDefs means rid X+1 is valid
+ _ASSERTE(m_maxTypeRidStaticsAllocated == GetMDImport()->GetCountWithTokenKind(mdtTypeDef) + 1);
+ return;
+ }
+#endif
+ LOG((LF_CLASSLOADER, LL_INFO10000, "STATICS: Allocating statics for module %s\n", GetSimpleName()));
+
+ // Build the offset table, which will tell us what the offsets for the statics of each class are (one offset for gc handles, one offset
+ // for non gc types)
+ BuildStaticsOffsets(pamTracker);
+}
+
+// This method will report GC static refs of the module. It doesn't have to be complete (ie, it's
+// currently used to opportunistically get more concurrency in the marking of statics), so it currently
+// ignores any statics that are not preallocated (ie: won't report statics from IsDynamicStatics() MT)
+// The reason this function is in Module and not in DomainFile (together with DomainLocalModule is because
+// for shared modules we need a very fast way of getting to the DomainLocalModule. For that we use
+// a table in DomainLocalBlock that's indexed with a module ID
+//
+// This method is a secondary way for the GC to find statics, and it is only used when we are on
+// a multiproc machine and we are using the ServerHeap. The primary way used by the GC to find
+// statics is through the handle table. Module::AllocateRegularStaticHandles() allocates a GC handle
+// from the handle table, and the GC will trace this handle and find the statics.
+
+void Module::EnumRegularStaticGCRefs(AppDomain* pAppDomain, promote_func* fn, ScanContext* sc)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACT_END;
+
+ _ASSERTE(GCHeap::IsGCInProgress() &&
+ GCHeap::IsServerHeap() &&
+ IsGCSpecialThread());
+
+
+ DomainLocalModule *pModuleData = GetDomainLocalModule(pAppDomain);
+ DWORD dwHandles = m_dwMaxGCRegularStaticHandles;
+
+ if (IsResource())
+ {
+ RETURN;
+ }
+
+ LOG((LF_GC, LL_INFO100, "Scanning statics for module %s\n", GetSimpleName()));
+
+ OBJECTREF* ppObjectRefs = pModuleData->GetPrecomputedGCStaticsBasePointer();
+ for (DWORD i = 0 ; i < dwHandles ; i++)
+ {
+ // Handles are allocated in SetDomainFile (except for bootstrapped mscorlib). In any
+ // case, we shouldnt get called if the module hasn't had it's handles allocated (as we
+ // only get here if IsActive() is true, which only happens after SetDomainFile(), which
+ // is were we allocate handles.
+ _ASSERTE(ppObjectRefs);
+ fn((Object **)(ppObjectRefs+i), sc, 0);
+ }
+
+ LOG((LF_GC, LL_INFO100, "Done scanning statics for module %s\n", GetSimpleName()));
+
+ RETURN;
+}
+
+void Module::SetDomainFile(DomainFile *pDomainFile)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pDomainFile));
+ PRECONDITION(IsManifest() == pDomainFile->IsAssembly());
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ DomainLocalModule* pModuleData = 0;
+
+ // Do we need to allocate memory for the non GC statics?
+ if ((GetAssembly()->IsDomainNeutral() && !IsSingleAppDomain())|| m_ModuleID == NULL)
+ {
+ // Allocate memory for the module statics.
+ LoaderAllocator *pLoaderAllocator = NULL;
+ if (GetAssembly()->IsCollectible())
+ {
+ pLoaderAllocator = GetAssembly()->GetLoaderAllocator();
+ }
+ else
+ {
+ pLoaderAllocator = pDomainFile->GetAppDomain()->GetLoaderAllocator();
+ }
+
+ SIZE_T size = GetDomainLocalModuleSize();
+
+ LOG((LF_CLASSLOADER, LL_INFO10, "STATICS: Allocating %i bytes for precomputed statics in module %S in LoaderAllocator %p\n",
+ size, this->GetDebugName(), pLoaderAllocator));
+
+ // We guarantee alignment for 64-bit regular statics on 32-bit platforms even without FEATURE_64BIT_ALIGNMENT for performance reasons.
+
+ _ASSERTE(size >= DomainLocalModule::OffsetOfDataBlob());
+
+ pModuleData = (DomainLocalModule*)(void*)
+ pLoaderAllocator->GetHighFrequencyHeap()->AllocAlignedMem(
+ size, MAX_PRIMITIVE_FIELD_SIZE);
+
+ // Note: Memory allocated on loader heap is zero filled
+ // memset(pModuleData, 0, size);
+
+ // Verify that the space is really zero initialized
+ _ASSERTE(pModuleData->GetPrecomputedGCStaticsBasePointer() == NULL);
+
+ // Make sure that the newly allocated DomainLocalModule gets
+ // a copy of the domain-neutral module ID.
+ if (GetAssembly()->IsDomainNeutral() && !IsSingleAppDomain())
+ {
+ // If the module was loaded as domain-neutral, we can find the ID by
+ // casting 'm_ModuleID'.
+
+ _ASSERTE(Module::IDToIndex((SIZE_T)m_ModuleID) == this->m_ModuleIndex);
+ pModuleData->m_ModuleIndex = Module::IDToIndex((SIZE_T)m_ModuleID);
+
+ // Eventually I want to just do this instead...
+ //pModuleData->m_ModuleIndex = this->m_ModuleIndex;
+ }
+ else
+ {
+ // If the module was loaded as domain-specific, then we need to assign
+ // this module a domain-neutral module ID.
+ pModuleData->m_ModuleIndex = Module::AllocateModuleIndex();
+ m_ModuleIndex = pModuleData->m_ModuleIndex;
+ }
+ }
+ else
+ {
+ pModuleData = this->m_ModuleID;
+ LOG((LF_CLASSLOADER, LL_INFO10, "STATICS: Allocation not needed for ngened non shared module %s in Appdomain %08x\n"));
+ }
+
+ if (GetAssembly()->IsDomainNeutral() && !IsSingleAppDomain())
+ {
+ DomainLocalBlock *pLocalBlock;
+ {
+ pLocalBlock = pDomainFile->GetAppDomain()->GetDomainLocalBlock();
+ pLocalBlock->SetModuleSlot(GetModuleIndex(), pModuleData);
+ }
+
+ pLocalBlock->SetDomainFile(GetModuleIndex(), pDomainFile);
+ }
+ else
+ {
+ // Non shared case, module points directly to the statics. In ngen case
+ // m_pDomainModule is already set for the non shared case
+ if (m_ModuleID == NULL)
+ {
+ m_ModuleID = pModuleData;
+ }
+
+ m_ModuleID->SetDomainFile(pDomainFile);
+ }
+
+ // Allocate static handles now.
+ // NOTE: Bootstrapping issue with mscorlib - we will manually allocate later
+ if (g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT] != NULL)
+ AllocateRegularStaticHandles(pDomainFile->GetAppDomain());
+}
+
+#ifndef CROSSGEN_COMPILE
+OBJECTREF Module::GetExposedObject()
+{
+ CONTRACT(OBJECTREF)
+ {
+ INSTANCE_CHECK;
+ POSTCONDITION(RETVAL != NULL);
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACT_END;
+
+ RETURN GetDomainFile()->GetExposedModuleObject();
+}
+#endif // CROSSGEN_COMPILE
+
+//
+// AllocateMap allocates the RID maps based on the size of the current
+// metadata (if any)
+//
+
+void Module::AllocateMaps()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ enum
+ {
+ TYPEDEF_MAP_INITIAL_SIZE = 5,
+ TYPEREF_MAP_INITIAL_SIZE = 5,
+ MEMBERDEF_MAP_INITIAL_SIZE = 10,
+ GENERICPARAM_MAP_INITIAL_SIZE = 5,
+ GENERICTYPEDEF_MAP_INITIAL_SIZE = 5,
+ FILEREFERENCES_MAP_INITIAL_SIZE = 5,
+ ASSEMBLYREFERENCES_MAP_INITIAL_SIZE = 5,
+ };
+
+ PTR_TADDR pTable = NULL;
+
+ if (IsResource())
+ return;
+
+ if (IsReflection())
+ {
+ // For dynamic modules, it is essential that we at least have a TypeDefToMethodTable
+ // map with an initial block. Otherwise, all the iterators will abort on an
+ // initial empty table and we will e.g. corrupt the backpatching chains during
+ // an appdomain unload.
+ m_TypeDefToMethodTableMap.dwCount = TYPEDEF_MAP_INITIAL_SIZE;
+
+ // The above is essential. The following ones are precautionary.
+ m_TypeRefToMethodTableMap.dwCount = TYPEREF_MAP_INITIAL_SIZE;
+ m_MethodDefToDescMap.dwCount = MEMBERDEF_MAP_INITIAL_SIZE;
+ m_FieldDefToDescMap.dwCount = MEMBERDEF_MAP_INITIAL_SIZE;
+ m_GenericParamToDescMap.dwCount = GENERICPARAM_MAP_INITIAL_SIZE;
+ m_GenericTypeDefToCanonMethodTableMap.dwCount = TYPEDEF_MAP_INITIAL_SIZE;
+ m_FileReferencesMap.dwCount = FILEREFERENCES_MAP_INITIAL_SIZE;
+ m_ManifestModuleReferencesMap.dwCount = ASSEMBLYREFERENCES_MAP_INITIAL_SIZE;
+ m_MethodDefToPropertyInfoMap.dwCount = MEMBERDEF_MAP_INITIAL_SIZE;
+ }
+ else
+ {
+ IMDInternalImport * pImport = GetMDImport();
+
+ // Get # TypeDefs (add 1 for COR_GLOBAL_PARENT_TOKEN)
+ m_TypeDefToMethodTableMap.dwCount = pImport->GetCountWithTokenKind(mdtTypeDef)+2;
+
+ // Get # TypeRefs
+ m_TypeRefToMethodTableMap.dwCount = pImport->GetCountWithTokenKind(mdtTypeRef)+1;
+
+ // Get # MethodDefs
+ m_MethodDefToDescMap.dwCount = pImport->GetCountWithTokenKind(mdtMethodDef)+1;
+
+ // Get # FieldDefs
+ m_FieldDefToDescMap.dwCount = pImport->GetCountWithTokenKind(mdtFieldDef)+1;
+
+ // Get # GenericParams
+ m_GenericParamToDescMap.dwCount = pImport->GetCountWithTokenKind(mdtGenericParam)+1;
+
+ // Get the number of FileReferences in the map
+ m_FileReferencesMap.dwCount = pImport->GetCountWithTokenKind(mdtFile)+1;
+
+ // Get the number of AssemblyReferences in the map
+ m_ManifestModuleReferencesMap.dwCount = pImport->GetCountWithTokenKind(mdtAssemblyRef)+1;
+
+ // These maps are only added to during NGen, so for other scenarios leave them empty
+ if (IsCompilationProcess())
+ {
+ m_GenericTypeDefToCanonMethodTableMap.dwCount = m_TypeDefToMethodTableMap.dwCount;
+ m_MethodDefToPropertyInfoMap.dwCount = m_MethodDefToDescMap.dwCount;
+ }
+ else
+ {
+ m_GenericTypeDefToCanonMethodTableMap.dwCount = 0;
+ m_MethodDefToPropertyInfoMap.dwCount = 0;
+ }
+ }
+
+ S_SIZE_T nTotal;
+
+ nTotal += m_TypeDefToMethodTableMap.dwCount;
+ nTotal += m_TypeRefToMethodTableMap.dwCount;
+ nTotal += m_MethodDefToDescMap.dwCount;
+ nTotal += m_FieldDefToDescMap.dwCount;
+ nTotal += m_GenericParamToDescMap.dwCount;
+ nTotal += m_GenericTypeDefToCanonMethodTableMap.dwCount;
+ nTotal += m_FileReferencesMap.dwCount;
+ nTotal += m_ManifestModuleReferencesMap.dwCount;
+ nTotal += m_MethodDefToPropertyInfoMap.dwCount;
+
+ _ASSERTE (m_pAssembly && m_pAssembly->GetLowFrequencyHeap());
+ pTable = (PTR_TADDR)(void*)m_pAssembly->GetLowFrequencyHeap()->AllocMem(nTotal * S_SIZE_T(sizeof(TADDR)));
+
+ // Note: Memory allocated on loader heap is zero filled
+ // memset(pTable, 0, nTotal * sizeof(void*));
+
+ m_TypeDefToMethodTableMap.pNext = NULL;
+ m_TypeDefToMethodTableMap.supportedFlags = TYPE_DEF_MAP_ALL_FLAGS;
+ m_TypeDefToMethodTableMap.pTable = pTable;
+
+ m_TypeRefToMethodTableMap.pNext = NULL;
+ m_TypeRefToMethodTableMap.supportedFlags = TYPE_REF_MAP_ALL_FLAGS;
+ m_TypeRefToMethodTableMap.pTable = &pTable[m_TypeDefToMethodTableMap.dwCount];
+
+ m_MethodDefToDescMap.pNext = NULL;
+ m_MethodDefToDescMap.supportedFlags = METHOD_DEF_MAP_ALL_FLAGS;
+ m_MethodDefToDescMap.pTable = &m_TypeRefToMethodTableMap.pTable[m_TypeRefToMethodTableMap.dwCount];
+
+ m_FieldDefToDescMap.pNext = NULL;
+ m_FieldDefToDescMap.supportedFlags = FIELD_DEF_MAP_ALL_FLAGS;
+ m_FieldDefToDescMap.pTable = &m_MethodDefToDescMap.pTable[m_MethodDefToDescMap.dwCount];
+
+ m_GenericParamToDescMap.pNext = NULL;
+ m_GenericParamToDescMap.supportedFlags = GENERIC_PARAM_MAP_ALL_FLAGS;
+ m_GenericParamToDescMap.pTable = &m_FieldDefToDescMap.pTable[m_FieldDefToDescMap.dwCount];
+
+ m_GenericTypeDefToCanonMethodTableMap.pNext = NULL;
+ m_GenericTypeDefToCanonMethodTableMap.supportedFlags = GENERIC_TYPE_DEF_MAP_ALL_FLAGS;
+ m_GenericTypeDefToCanonMethodTableMap.pTable = &m_GenericParamToDescMap.pTable[m_GenericParamToDescMap.dwCount];
+
+ m_FileReferencesMap.pNext = NULL;
+ m_FileReferencesMap.supportedFlags = FILE_REF_MAP_ALL_FLAGS;
+ m_FileReferencesMap.pTable = &m_GenericTypeDefToCanonMethodTableMap.pTable[m_GenericTypeDefToCanonMethodTableMap.dwCount];
+
+ m_ManifestModuleReferencesMap.pNext = NULL;
+ m_ManifestModuleReferencesMap.supportedFlags = MANIFEST_MODULE_MAP_ALL_FLAGS;
+ m_ManifestModuleReferencesMap.pTable = &m_FileReferencesMap.pTable[m_FileReferencesMap.dwCount];
+
+ m_MethodDefToPropertyInfoMap.pNext = NULL;
+ m_MethodDefToPropertyInfoMap.supportedFlags = PROPERTY_INFO_MAP_ALL_FLAGS;
+ m_MethodDefToPropertyInfoMap.pTable = &m_ManifestModuleReferencesMap.pTable[m_ManifestModuleReferencesMap.dwCount];
+}
+
+
+//
+// FreeClassTables frees the classes in the module
+//
+
+void Module::FreeClassTables()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_dwTransientFlags & CLASSES_FREED)
+ return;
+
+ FastInterlockOr(&m_dwTransientFlags, CLASSES_FREED);
+
+ // disable ibc here because it can cause errors during the destruction of classes
+ IBCLoggingDisabler disableLogging;
+
+#if _DEBUG
+ DebugLogRidMapOccupancy();
+#endif
+
+ //
+ // Free the types filled out in the TypeDefToEEClass map
+ //
+
+ // Go through each linked block
+ LookupMap<PTR_MethodTable>::Iterator typeDefIter(&m_TypeDefToMethodTableMap);
+ while (typeDefIter.Next())
+ {
+ MethodTable * pMT = typeDefIter.GetElement();
+
+ if (pMT != NULL && pMT->IsRestored())
+ {
+ pMT->GetClass()->Destruct(pMT);
+ }
+ }
+
+ // Now do the same for constructed types (arrays and instantiated generic types)
+ if (IsTenured()) // If we're destructing because of an error during the module's creation, we'll play it safe and not touch this table as its memory is freed by a
+ { // separate AllocMemTracker. Though you're supposed to destruct everything else before destructing the AllocMemTracker, this is an easy invariant to break so
+ // we'll play extra safe on this end.
+ if (m_pAvailableParamTypes != NULL)
+ {
+ EETypeHashTable::Iterator it(m_pAvailableParamTypes);
+ EETypeHashEntry *pEntry;
+ while (m_pAvailableParamTypes->FindNext(&it, &pEntry))
+ {
+ TypeHandle th = pEntry->GetTypeHandle();
+
+ if (!th.IsRestored())
+ continue;
+
+#ifdef FEATURE_COMINTEROP
+ // Some MethodTables/TypeDescs have COM interop goo attached to them which must be released
+ if (!th.IsTypeDesc())
+ {
+ MethodTable *pMT = th.AsMethodTable();
+ if (pMT->HasCCWTemplate())
+ {
+ // code:MethodTable::GetComCallWrapperTemplate() may go through canonical methodtable indirection cell.
+ // The module load could be aborted before completing code:FILE_LOAD_EAGER_FIXUPS phase that's responsible
+ // for resolving pre-restored indirection cells, so we have to check for it here explicitly.
+ if (CORCOMPILE_IS_POINTER_TAGGED(pMT->GetCanonicalMethodTableFixup()))
+ continue;
+
+ ComCallWrapperTemplate *pTemplate = pMT->GetComCallWrapperTemplate();
+ if (pTemplate != NULL)
+ {
+ pTemplate->Release();
+ }
+ }
+ }
+ else if (th.IsArray())
+ {
+ ComCallWrapperTemplate *pTemplate = th.AsArray()->GetComCallWrapperTemplate();
+ if (pTemplate != NULL)
+ {
+ pTemplate->Release();
+ }
+ }
+#endif // FEATURE_COMINTEROP
+
+ // We need to call destruct on instances of EEClass whose "canonical" dependent lives in this table
+ // There is nothing interesting to destruct on array EEClass
+ if (!th.IsTypeDesc())
+ {
+ MethodTable * pMT = th.AsMethodTable();
+ if (pMT->IsCanonicalMethodTable())
+ pMT->GetClass()->Destruct(pMT);
+ }
+ }
+ }
+ }
+}
+
+#endif // !DACCESS_COMPILE
+
+ClassLoader *Module::GetClassLoader()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ _ASSERTE(m_pAssembly != NULL);
+ return m_pAssembly->GetLoader();
+}
+
+PTR_BaseDomain Module::GetDomain()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ _ASSERTE(m_pAssembly != NULL);
+ return m_pAssembly->GetDomain();
+}
+
+#ifndef DACCESS_COMPILE
+
+IAssemblySecurityDescriptor *Module::GetSecurityDescriptor()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(m_pAssembly != NULL);
+ return m_pAssembly->GetSecurityDescriptor();
+}
+
+#ifndef CROSSGEN_COMPILE
+void Module::StartUnload()
+{
+ WRAPPER_NO_CONTRACT;
+#ifdef PROFILING_SUPPORTED
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackModuleLoads());
+ if (!IsBeingUnloaded())
+ {
+ // Profiler is causing some peripheral class loads. Probably this just needs
+ // to be turned into a Fault_not_fatal and moved to a specific place inside the profiler.
+ EX_TRY
+ {
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->ModuleUnloadStarted((ModuleID) this);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+#ifdef FEATURE_PREJIT
+ // Write out the method profile data
+ /*hr=*/WriteMethodProfileDataLogFile(true);
+#endif // FEATURE_PREJIT
+ SetBeingUnloaded();
+}
+#endif // CROSSGEN_COMPILE
+
+void Module::ReleaseILData(void)
+{
+ WRAPPER_NO_CONTRACT;
+
+ ReleaseISymUnmanagedReader();
+}
+
+
+#ifdef FEATURE_FUSION
+
+//
+// Module::FusionCopyPDBs asks Fusion to copy PDBs for a given
+// assembly if they need to be copied. This is for the case where a PE
+// file is shadow copied to the Fusion cache. Fusion needs to be told
+// to take the time to copy the PDB, too.
+//
+STDAPI CopyPDBs(IAssembly *pAsm); // private fusion API
+void Module::FusionCopyPDBs(LPCWSTR moduleName)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Assembly *pAssembly = GetAssembly();
+
+ // Just return if we've already done this for this Module's
+ // Assembly.
+ if ((pAssembly->GetDebuggerInfoBits() & DACF_PDBS_COPIED) ||
+ (pAssembly->GetFusionAssembly() == NULL))
+ {
+ LOG((LF_CORDB, LL_INFO10,
+ "Don't need to copy PDB's for module %S\n",
+ moduleName));
+
+ return;
+ }
+
+ LOG((LF_CORDB, LL_INFO10,
+ "Attempting to copy PDB's for module %S\n", moduleName));
+
+ HRESULT hr;
+ hr = CopyPDBs(pAssembly->GetFusionAssembly());
+ LOG((LF_CORDB, LL_INFO10,
+ "Fusion.dll!CopyPDBs returned hr=0x%08x for module 0x%08x\n",
+ hr, this));
+
+ // Remember that we've copied the PDBs for this assembly.
+ pAssembly->SetCopiedPDBs();
+}
+
+// This function will return PDB stream if exist.
+// It is the caller responsibility to call release on *ppStream after a successful
+// result.
+// We will first check to see if we have a cached pdb stream available. If not,
+// we will ask fusion which in terms to ask host vis HostProvideAssembly. Host may
+// decide to provide one or not.
+//
+HRESULT Module::GetHostPdbStream(IStream **ppStream)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ if(GetThread()) {GC_TRIGGERS;} else {GC_NOTRIGGER;}
+ }
+ CONTRACTL_END
+
+ HRESULT hr = NOERROR;
+
+ _ASSERTE(ppStream);
+
+ *ppStream = NULL;
+
+ if (m_file->IsIStream() == false)
+ {
+ // not a host stream
+ return E_FAIL;
+ }
+
+ // Maybe fusion can ask our host. This will give us back a PDB stream if
+ // host decides to provide one.
+ //
+ if (m_file->IsAssembly())
+ {
+ GCX_PREEMP();
+ hr = ((PEAssembly*)m_file)->GetIHostAssembly()->GetAssemblyDebugStream(ppStream);
+ }
+ else
+ {
+ _ASSERTE(m_file->IsModule());
+ IHostAssemblyModuleImport *pIHAMI;
+ MAKE_WIDEPTR_FROMUTF8_NOTHROW(pName, m_file->GetSimpleName());
+ if (pName == NULL)
+ return E_OUTOFMEMORY;
+ IfFailRet(m_file->GetAssembly()->GetIHostAssembly()->GetModuleByName(pName, &pIHAMI));
+ hr = pIHAMI->GetModuleDebugStream(ppStream);
+ }
+ return hr;
+}
+
+#endif
+
+//---------------------------------------------------------------------------------------
+//
+// Simple wrapper around calling IsAfContentType_WindowsRuntime() against the flags
+// returned from the PEAssembly's GetFlagsNoTrigger()
+//
+// Return Value:
+// nonzero iff we successfully determined pModule is a WinMD. FALSE if pModule is not
+// a WinMD, or we fail trying to find out.
+//
+BOOL Module::IsWindowsRuntimeModule()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CAN_TAKE_LOCK; // Accesses metadata directly, which takes locks
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ BOOL fRet = FALSE;
+
+ DWORD dwFlags;
+
+ if (FAILED(GetAssembly()->GetManifestFile()->GetFlagsNoTrigger(&dwFlags)))
+ return FALSE;
+
+ return IsAfContentType_WindowsRuntime(dwFlags);
+}
+
+BOOL Module::IsInCurrentVersionBubble()
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+ if (!IsCompilationProcess())
+ return TRUE;
+
+ // The module being compiled is always part of the current version bubble
+ AppDomain * pAppDomain = GetAppDomain();
+ if (pAppDomain->IsCompilationDomain() && pAppDomain->ToCompilationDomain()->GetTargetModule() == this)
+ return TRUE;
+
+ if (IsReadyToRunCompilation())
+ return FALSE;
+
+#ifdef FEATURE_COMINTEROP
+ if (g_fNGenWinMDResilient)
+ return !GetAssembly()->IsWinMD();
+#endif
+
+ return TRUE;
+#else // FEATURE_NATIVE_IMAGE_GENERATION
+ return TRUE;
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+}
+
+//---------------------------------------------------------------------------------------
+//
+// WinMD-aware helper to grab a readable public metadata interface. Any place that thinks
+// it wants to use Module::GetRWImporter + QI now should use this wrapper instead.
+//
+// Arguments:
+// * dwOpenFlags - Combo from CorOpenFlags. Better not contain ofWrite!
+// * riid - Public IID requested
+// * ppvInterface - [out] Requested interface. On success, *ppvInterface is returned
+// refcounted; caller responsible for Release.
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+HRESULT Module::GetReadablePublicMetaDataInterface(DWORD dwOpenFlags, REFIID riid, LPVOID * ppvInterface)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CAN_TAKE_LOCK; // IsWindowsRuntimeModule accesses metadata directly, which takes locks
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE((dwOpenFlags & ofWrite) == 0);
+
+ // Temporary place to store public, AddRef'd interface pointers
+ ReleaseHolder<IUnknown> pIUnkPublic;
+
+ // Temporary place to store the IUnknown from which we'll do the final QI to get the
+ // requested public interface. Any assignment to pIUnk assumes pIUnk does not need
+ // to do a Release() (either the interface was internal and not AddRef'd, or was
+ // public and will be released by the above holder).
+ IUnknown * pIUnk = NULL;
+
+ HRESULT hr = S_OK;
+
+ // Normally, we just get an RWImporter to do the QI on, and we're on our way.
+ EX_TRY
+ {
+ pIUnk = GetRWImporter();
+ }
+ EX_CATCH_HRESULT_NO_ERRORINFO(hr);
+
+ if (FAILED(hr) && IsWindowsRuntimeModule())
+ {
+ // WinMD modules don't like creating RW importers. They also (currently)
+ // have no plumbing to get to their public metadata interfaces from the
+ // Module. So we actually have to start from scratch at the dispenser.
+
+ // To start with, get a dispenser, and get the metadata memory blob we've
+ // already loaded. If either of these fail, just return the error HRESULT
+ // from the above GetRWImporter() call.
+
+ // We'll get an addref'd IMetaDataDispenser, so use a holder to release it
+ ReleaseHolder<IMetaDataDispenser> pDispenser;
+ if (FAILED(InternalCreateMetaDataDispenser(IID_IMetaDataDispenser, &pDispenser)))
+ {
+ _ASSERTE(FAILED(hr));
+ return hr;
+ }
+
+ COUNT_T cbMetadata = 0;
+ PTR_CVOID pvMetadata = GetAssembly()->GetManifestFile()->GetLoadedMetadata(&cbMetadata);
+ if ((pvMetadata == NULL) || (cbMetadata == 0))
+ {
+ _ASSERTE(FAILED(hr));
+ return hr;
+ }
+
+ // Now that the pieces are ready, we can use the riid specified by the
+ // profiler in this call to the dispenser to get the requested interface. If
+ // this fails, then this is the interesting HRESULT for the caller to see.
+ //
+ // We'll get an AddRef'd public interface, so use a holder to release it
+ hr = pDispenser->OpenScopeOnMemory(
+ pvMetadata,
+ cbMetadata,
+ (dwOpenFlags | ofReadOnly), // Force ofReadOnly on behalf of the profiler
+ riid,
+ &pIUnkPublic);
+ if (FAILED(hr))
+ return hr;
+
+ // Set pIUnk so we can do the final QI from it below as we do in the other
+ // cases.
+ pIUnk = pIUnkPublic;
+ }
+
+ // Get the requested interface
+ if (SUCCEEDED(hr) && (ppvInterface != NULL))
+ {
+ _ASSERTE(pIUnk != NULL);
+ hr = pIUnk->QueryInterface(riid, (void **) ppvInterface);
+ }
+
+ return hr;
+}
+
+// a special token that indicates no reader could be created - don't try again
+static ISymUnmanagedReader* const k_pInvalidSymReader = (ISymUnmanagedReader*)0x1;
+
+#if defined(FEATURE_ISYM_READER) && !defined(CROSSGEN_COMPILE)
+ISymUnmanagedReader *Module::GetISymUnmanagedReaderNoThrow(void)
+{
+ CONTRACT(ISymUnmanagedReader *)
+ {
+ INSTANCE_CHECK;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ NOTHROW;
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+ ISymUnmanagedReader *ret = NULL;
+
+ EX_TRY
+ {
+ ret = GetISymUnmanagedReader();
+ }
+ EX_CATCH
+ {
+ // We swallow any exception and say that we simply couldn't get a reader by returning NULL.
+ // The only type of error that should be possible here is OOM.
+ /* DISABLED due to Dev10 bug 619495
+ CONSISTENCY_CHECK_MSG(
+ GET_EXCEPTION()->GetHR() == E_OUTOFMEMORY,
+ "Exception from GetISymUnmanagedReader");
+ */
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ RETURN (ret);
+}
+
+ISymUnmanagedReader *Module::GetISymUnmanagedReader(void)
+{
+ CONTRACT(ISymUnmanagedReader *)
+ {
+ INSTANCE_CHECK;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ PRECONDITION(Security::IsResolved(GetAssembly()));
+ THROWS;
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+ // No symbols for resource modules
+ if (IsResource())
+ RETURN NULL;
+
+ if (g_fEEShutDown)
+ RETURN NULL;
+
+ // Verify that symbol reading is permitted for this module.
+ // If we know we've already created a symbol reader, don't bother checking. There is
+ // no advantage to allowing symbol reading to be turned off if we've already created the reader.
+ // Note that we can't just put this code in the creation block below because we might have to
+ // call managed code to resolve security policy, and we can't do that while holding a lock.
+ // There is no disadvantage other than a minor perf cost to calling this unnecessarily, so the
+ // race on m_pISymUnmanagedReader here is OK. The perf cost is minor because the only real
+ // work is done by the security system which caches the result.
+ if( m_pISymUnmanagedReader == NULL && !IsSymbolReadingEnabled() )
+ RETURN NULL;
+
+ // Take the lock for the m_pISymUnmanagedReader
+ // This ensures that we'll only ever attempt to create one reader at a time, and we won't
+ // create a reader if we're in the middle of destroying one that has become stale.
+ // Actual access to the reader can safely occur outside the lock as long as it has its own
+ // AddRef which we take inside the lock at the bottom of this method.
+ CrstHolder holder(&m_ISymUnmanagedReaderCrst);
+
+ UINT lastErrorMode = 0;
+
+ // If we haven't created a reader yet, do so now
+ if (m_pISymUnmanagedReader == NULL)
+ {
+ // Mark our reader as invalid so that if we fail to create the reader
+ // (including if an exception is thrown), we won't keep trying.
+ m_pISymUnmanagedReader = k_pInvalidSymReader;
+
+ // There are 4 main cases here:
+ // 1. Assembly is on disk and we'll get the symbols from a file next to the assembly
+ // 2. Assembly is provided by the host and we'll get the symbols from the host
+ // 3. Assembly was loaded in-memory (by byte array or ref-emit), and symbols were
+ // provided along with it.
+ // 4. Assembly was loaded in-memory but no symbols were provided.
+
+ // Determine whether we should be looking in memory for the symbols (cases 2 & 3)
+ bool fInMemorySymbols = ( m_file->IsIStream() || GetInMemorySymbolStream() );
+ if( !fInMemorySymbols && m_file->GetPath().IsEmpty() )
+ {
+ // Case 4. We don't have a module path, an IStream or an in memory symbol stream,
+ // so there is no-where to try and get symbols from.
+ RETURN (NULL);
+ }
+
+ // Create a binder to find the reader.
+ //
+ // <REVISIT_TODO>@perf: this is slow, creating and destroying the binder every
+ // time. We should cache this somewhere, but I'm not 100% sure
+ // where right now...</REVISIT_TODO>
+ HRESULT hr = S_OK;
+
+ SafeComHolder<ISymUnmanagedBinder> pBinder;
+
+#if defined(FEATURE_CORECLR)
+ if (g_pDebugInterface == NULL)
+ {
+ // @TODO: this is reachable when debugging!
+ UNREACHABLE_MSG("About to CoCreateInstance! This code should not be "
+ "reachable or needs to be reimplemented for CoreCLR!");
+ }
+#endif // FEATURE_CORECLR
+
+ if (this->GetInMemorySymbolStreamFormat() == eSymbolFormatILDB)
+ {
+ // We've got in-memory ILDB symbols, create the ILDB symbol binder
+ // Note that in this case, we must be very careful not to use diasymreader.dll
+ // at all - we don't trust it, and shouldn't run any code in it
+ IfFailThrow(IldbSymbolsCreateInstance(CLSID_CorSymBinder_SxS,
+ IID_ISymUnmanagedBinder,
+ (void**)&pBinder));
+ }
+ else
+ {
+ // We're going to be working with PDB format symbols
+ // Attempt to coCreate the symbol binder.
+ // CoreCLR supports not having a symbol reader installed, so this is expected there.
+ // On desktop, the framework installer is supposed to install diasymreader.dll as well
+ // and so this shouldn't happen.
+ hr = FakeCoCreateInstanceEx(CLSID_CorSymBinder_SxS,
+ GetInternalSystemDirectory(),
+ IID_ISymUnmanagedBinder,
+ (void**)&pBinder,
+ NULL);
+ if (FAILED(hr))
+ {
+#ifdef FEATURE_CORECLR
+ RETURN (NULL);
+#else
+ ThrowHR(hr);
+#endif
+ }
+
+ }
+
+ LOG((LF_CORDB, LL_INFO10, "M::GISUR: Created binder\n"));
+
+ // Note: we change the error mode here so we don't get any popups as the PDB symbol reader attempts to search the
+ // hard disk for files.
+ lastErrorMode = SetErrorMode(SEM_NOOPENFILEERRORBOX|SEM_FAILCRITICALERRORS);
+
+ SafeComHolder<ISymUnmanagedReader> pReader;
+
+ if (fInMemorySymbols)
+ {
+ SafeComHolder<IStream> pIStream( NULL );
+
+ // If debug stream is already specified, don't bother to go through fusion
+ // This is the common case for case 2 (hosted modules) and case 3 (Ref.Emit).
+ if (GetInMemorySymbolStream() )
+ {
+
+ if( IsReflection() )
+ {
+ // If this is Reflection.Emit, we must clone the stream because another thread may
+ // update it when someone is using the reader we create here leading to AVs.
+ // Note that the symbol stream should be up to date since we flush the writer
+ // after every addition in Module::AddClass.
+ IfFailThrow(GetInMemorySymbolStream()->Clone(&pIStream));
+ }
+ else
+ {
+ // The stream is not changing. Just add-ref to it.
+ pIStream = GetInMemorySymbolStream();
+ pIStream->AddRef();
+ }
+ }
+#ifdef FEATURE_FUSION
+ else
+ {
+ // Verified this above.
+ _ASSERTE(m_file->IsIStream());
+
+ // Case 2: get assembly from host.
+ // This commonly would be cached already as GetInMemorySymbolStream() in code:Module.FetchPdbsFromHost,
+ // but may not be cached if the host didn't provide the PDBs at the time.
+ hr = GetHostPdbStream(&pIStream);
+ }
+#endif
+ if (SUCCEEDED(hr))
+ {
+ hr = pBinder->GetReaderFromStream(GetRWImporter(), pIStream, &pReader);
+ }
+ }
+ else
+ {
+ // The assembly is on disk, so try and load symbols based on the path to the assembly (case 1)
+ const SString &path = m_file->GetPath();
+
+ // Call Fusion to ensure that any PDB's are shadow copied before
+ // trying to get a symbol reader. This has to be done once per
+ // Assembly.
+#ifdef FEATURE_FUSION
+ FusionCopyPDBs(path);
+#endif
+ // for this to work with winmds we cannot simply call GetRWImporter() as winmds are RO
+ // and thus don't implement the RW interface. so we call this wrapper function which knows
+ // how to get a IMetaDataImport interface regardless of the underlying module type.
+ ReleaseHolder<IUnknown> pUnk = NULL;
+ hr = GetReadablePublicMetaDataInterface(ofReadOnly, IID_IMetaDataImport, &pUnk);
+ if (SUCCEEDED(hr))
+ hr = pBinder->GetReaderForFile(pUnk, path, NULL, &pReader);
+ }
+
+ SetErrorMode(lastErrorMode);
+
+ if (SUCCEEDED(hr))
+ {
+ m_pISymUnmanagedReader = pReader.Extract();
+ LOG((LF_CORDB, LL_INFO10, "M::GISUR: Loaded symbols for module %S\n", GetDebugName()));
+ }
+ else
+ {
+ // We failed to create the reader, don't try again next time
+ LOG((LF_CORDB, LL_INFO10, "M::GISUR: Failed to load symbols for module %S\n", GetDebugName()));
+ _ASSERTE( m_pISymUnmanagedReader == k_pInvalidSymReader );
+ }
+
+ } // if( m_pISymUnmanagedReader == NULL )
+
+ // If we previously failed to create the reader, return NULL
+ if (m_pISymUnmanagedReader == k_pInvalidSymReader)
+ {
+ RETURN (NULL);
+ }
+
+ // Success - return an AddRef'd copy of the reader
+ m_pISymUnmanagedReader->AddRef();
+ RETURN (m_pISymUnmanagedReader);
+}
+#endif // FEATURE_ISYM_READER && !CROSSGEN_COMPILE
+
+BOOL Module::IsSymbolReadingEnabled()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // The only time we need symbols available is for debugging and taking stack traces,
+ // neither of which can be done if the assembly can't run. The advantage of being strict
+ // is that there is a perf penalty adding types to a module if you must support reading
+ // symbols at any time. If symbols don't need to be accesible then we can
+ // optimize by only commiting symbols when the assembly is saved to disk. See DDB 671107.
+ if(!GetAssembly()->HasRunAccess())
+ {
+ return FALSE;
+ }
+
+ // If the module has symbols in-memory (eg. RefEmit) that are in ILDB
+ // format, then there isn't any reason not to supply them. The reader
+ // code is always available, and we trust it's security.
+ if (this->GetInMemorySymbolStreamFormat() == eSymbolFormatILDB)
+ {
+ return TRUE;
+ }
+
+#ifdef DEBUGGING_SUPPORTED
+ if (!g_pDebugInterface)
+ {
+ // if debugging is disabled (no debug pack installed), do not load symbols
+ // This is done for two reasons. We don't completely trust the security of
+ // the diasymreader.dll code, so we don't want to use it in mainline scenarios.
+ // Secondly, there's not reason that diasymreader.dll will even necssarily be
+ // be on the machine if the debug pack isn't installed.
+ return FALSE;
+ }
+#endif // DEBUGGING_SUPPORTED
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ // See if there is an explicit policy configuration overriding our default.
+ // This can be set by the SymbolReadingPolicy config switch or by a host via
+ // ICLRDebugManager.AllowFileLineInfo.
+ ESymbolReadingPolicy policy = CCLRDebugManager::GetSymbolReadingPolicy();
+ if( policy == eSymbolReadingAlways )
+ {
+ return TRUE;
+ }
+ else if( policy == eSymbolReadingNever )
+ {
+ return FALSE;
+ }
+ _ASSERTE( policy == eSymbolReadingFullTrustOnly );
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ // Default policy - only read symbols corresponding to full-trust assemblies.
+ // Note that there is no strong (cryptographic) connection between a symbol file and its assembly.
+ // The intent here is just to ensure that the common high-risk scenarios (AppLaunch, etc)
+ // will never be able to load untrusted PDB files.
+ //
+ if (GetSecurityDescriptor()->IsFullyTrusted())
+ {
+ return TRUE;
+ }
+ return FALSE;
+}
+
+// At this point, this is only called when we're creating an appdomain
+// out of an array of bytes, so we'll keep the IStream that we create
+// around in case the debugger attaches later (including detach & re-attach!)
+void Module::SetSymbolBytes(LPCBYTE pbSyms, DWORD cbSyms)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Create a IStream from the memory for the syms.
+ SafeComHolder<CGrowableStream> pStream(new CGrowableStream());
+
+ // Do not need to AddRef the CGrowableStream because the constructor set it to 1
+ // ref count already. The Module will keep a copy for its own use.
+
+ // Make sure to set the symbol stream on the module before
+ // attempting to send UpdateModuleSyms messages up for it.
+ SetInMemorySymbolStream(pStream, eSymbolFormatPDB);
+
+ // This can only be called when the module is being created. No-one should have
+ // tried to use the symbols yet, and so there should not be a reader.
+ // If instead, we wanted to call this when a reader could have been created, we need to
+ // serialize access by taking the reader lock, and flush the old reader by calling
+ // code:Module.ReleaseISymUnmanagedReader
+ _ASSERTE( m_pISymUnmanagedReader == NULL );
+
+#ifdef LOGGING
+ LPCWSTR pName = NULL;
+ pName = GetDebugName();
+#endif // LOGGING
+
+ ULONG cbWritten;
+ DWORD dwError = pStream->Write((const void *)pbSyms,
+ (ULONG)cbSyms,
+ &cbWritten);
+ IfFailThrow(HRESULT_FROM_WIN32(dwError));
+
+ // Don't eager load the diasymreader
+
+ // Tell the debugger that symbols have been loaded for this
+ // module. We iterate through all domains which contain this
+ // module's assembly, and send a debugger notify for each one.
+ // <REVISIT_TODO>@perf: it would scale better if we directly knew which domains
+ // the assembly was loaded in.</REVISIT_TODO>
+ if (CORDebuggerAttached())
+ {
+ AppDomainIterator i(FALSE);
+
+ while (i.Next())
+ {
+ AppDomain *pDomain = i.GetDomain();
+
+ if (pDomain->IsDebuggerAttached() && (GetDomain() == SystemDomain::System() ||
+ pDomain->ContainsAssembly(m_pAssembly)))
+ {
+ g_pDebugInterface->SendUpdateModuleSymsEventAndBlock(this, pDomain);
+ }
+ }
+ }
+}
+
+// Clear any cached symbol reader
+void Module::ReleaseISymUnmanagedReader(void)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ // Caller is responsible for taking the reader lock if the call could occur when
+ // other threads are using or creating the reader
+ if( m_pISymUnmanagedReader != NULL )
+ {
+ // If we previously failed to create a reader, don't attempt to release it
+ // but do clear it out so that we can try again (eg. symbols may have changed)
+ if( m_pISymUnmanagedReader != k_pInvalidSymReader )
+ {
+ m_pISymUnmanagedReader->Release();
+ }
+ m_pISymUnmanagedReader = NULL;
+ }
+}
+
+// Lazily creates a new IL stub cache for this module.
+ILStubCache* Module::GetILStubCache()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // Use per-AD cache for domain specific modules when not NGENing
+ BaseDomain *pDomain = GetDomain();
+ if (!pDomain->IsSharedDomain() && !pDomain->AsAppDomain()->IsCompilationDomain())
+ return pDomain->AsAppDomain()->GetILStubCache();
+
+ if (m_pILStubCache == NULL)
+ {
+ ILStubCache *pILStubCache = new ILStubCache(GetLoaderAllocator()->GetHighFrequencyHeap());
+
+ if (FastInterlockCompareExchangePointer(&m_pILStubCache, pILStubCache, NULL) != NULL)
+ {
+ // some thread swooped in and set the field
+ delete pILStubCache;
+ }
+ }
+ _ASSERTE(m_pILStubCache != NULL);
+ return m_pILStubCache;
+}
+
+// Called to finish the process of adding a new class with Reflection.Emit
+void Module::AddClass(mdTypeDef classdef)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(!IsResource());
+ }
+ CONTRACTL_END;
+
+ // The fake class associated with the module (global fields & functions) needs to be initialized here
+ // Normal classes are added to the available class hash when their typedef is first created.
+ if (RidFromToken(classdef) == 0)
+ {
+ BuildClassForModule();
+ }
+
+ // Since the module is being modified, the in-memory symbol stream
+ // (if any) has probably also been modified. If we support reading the symbols
+ // then we need to commit the changes to the writer and flush any old readers
+ // However if we don't support reading then we can skip this which will give
+ // a substantial perf improvement. See DDB 671107.
+ if(IsSymbolReadingEnabled())
+ {
+ CONSISTENCY_CHECK(IsReflection()); // this is only used for dynamic modules
+ ISymUnmanagedWriter * pWriter = GetReflectionModule()->GetISymUnmanagedWriter();
+ if (pWriter != NULL)
+ {
+ // Serialize with any concurrent reader creations
+ // Specifically, if we started creating a reader on one thread, and then updated the
+ // symbols on another thread, we need to wait until the initial reader creation has
+ // completed and release it so we don't get stuck with a stale reader.
+ // Also, if we commit to the stream while we're in the process of creating a reader,
+ // the reader will get corrupted/incomplete data.
+ // Note that we must also be in co-operative mode here to ensure the debugger helper
+ // thread can't be simultaneously reading this stream while the process is synchronized
+ // (code:Debugger::GetSymbolBytes)
+ CrstHolder holder(&m_ISymUnmanagedReaderCrst);
+
+ // Flush writes to the symbol store to the symbol stream
+ // Note that we do this when finishing the addition of the class, instead of
+ // on-demand in GetISymUnmanagedReader because the writer is not thread-safe.
+ // Here, we're inside the lock of TypeBuilder.CreateType, and so it's safe to
+ // manipulate the writer.
+ SafeComHolderPreemp<ISymUnmanagedWriter3> pWriter3;
+ HRESULT thr = pWriter->QueryInterface(IID_ISymUnmanagedWriter3, (void**)&pWriter3);
+ CONSISTENCY_CHECK(SUCCEEDED(thr));
+ if (SUCCEEDED(thr))
+ {
+ thr = pWriter3->Commit();
+ if (SUCCEEDED(thr))
+ {
+ // Flush any cached symbol reader to ensure we pick up any new symbols
+ ReleaseISymUnmanagedReader();
+ }
+ }
+
+ // If either the QI or Commit failed
+ if (FAILED(thr))
+ {
+ // The only way we expect this might fail is out-of-memory. In that
+ // case we silently fail to update the symbol stream with new data, but
+ // we leave the existing reader intact.
+ CONSISTENCY_CHECK(thr==E_OUTOFMEMORY);
+ }
+ }
+ }
+}
+
+//---------------------------------------------------------------------------
+// For the global class this builds the table of MethodDescs an adds the rids
+// to the MethodDef map.
+//---------------------------------------------------------------------------
+void Module::BuildClassForModule()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ IMDInternalImport * pImport = GetMDImport();
+ DWORD cFunctions, cFields;
+
+ {
+ // Obtain count of global functions
+ HENUMInternalHolder hEnum(pImport);
+ hEnum.EnumGlobalFunctionsInit();
+ cFunctions = pImport->EnumGetCount(&hEnum);
+ }
+
+ {
+ // Obtain count of global fields
+ HENUMInternalHolder hEnum(pImport);
+ hEnum.EnumGlobalFieldsInit();
+ cFields = pImport->EnumGetCount(&hEnum);
+ }
+
+ // If we have any work to do...
+ if (cFunctions > 0 || cFields > 0)
+ {
+ COUNTER_ONLY(size_t _HeapSize = 0);
+
+ TypeKey typeKey(this, COR_GLOBAL_PARENT_TOKEN);
+ TypeHandle typeHnd = GetClassLoader()->LoadTypeHandleForTypeKeyNoLock(&typeKey);
+
+#ifdef ENABLE_PERF_COUNTERS
+
+ _HeapSize = GetLoaderAllocator()->GetHighFrequencyHeap()->GetSize();
+
+ GetPerfCounters().m_Loading.cbLoaderHeapSize = _HeapSize;
+#endif // ENABLE_PERF_COUNTERS
+
+ }
+}
+
+#endif // !DACCESS_COMPILE
+
+// Returns true iff the debugger should be notified about this module
+//
+// Notes:
+// Debugger doesn't need to be notified about modules that can't be executed,
+// like inspection and resource only. These are just pure data.
+//
+// This should be immutable for an instance of a module. That ensures that the debugger gets consistent
+// notifications about it. It this value mutates, than the debugger may miss relevant notifications.
+BOOL Module::IsVisibleToDebugger()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ if (IsResource())
+ {
+ return FALSE;
+ }
+
+ if (IsIntrospectionOnly())
+ {
+ return FALSE;
+ }
+
+
+ // If for whatever other reason, we can't run it, then don't notify the debugger about it.
+ Assembly * pAssembly = GetAssembly();
+ if (!pAssembly->HasRunAccess())
+ {
+ return FALSE;
+ }
+ return TRUE;
+}
+
+PEImageLayout * Module::GetNativeOrReadyToRunImage()
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef FEATURE_READYTORUN
+ if (IsReadyToRun())
+ return GetReadyToRunInfo()->GetImage();
+#endif
+
+ return GetNativeImage();
+}
+
+PTR_CORCOMPILE_IMPORT_SECTION Module::GetImportSections(COUNT_T *pCount)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_READYTORUN
+ if (IsReadyToRun())
+ return GetReadyToRunInfo()->GetImportSections(pCount);
+#endif
+
+ return GetNativeImage()->GetNativeImportSections(pCount);
+}
+
+PTR_CORCOMPILE_IMPORT_SECTION Module::GetImportSectionFromIndex(COUNT_T index)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_READYTORUN
+ if (IsReadyToRun())
+ return GetReadyToRunInfo()->GetImportSectionFromIndex(index);
+#endif
+
+ return GetNativeImage()->GetNativeImportSectionFromIndex(index);
+}
+
+PTR_CORCOMPILE_IMPORT_SECTION Module::GetImportSectionForRVA(RVA rva)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_READYTORUN
+ if (IsReadyToRun())
+ return GetReadyToRunInfo()->GetImportSectionForRVA(rva);
+#endif
+
+ return GetNativeImage()->GetNativeImportSectionForRVA(rva);
+}
+
+TADDR Module::GetIL(DWORD target)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ if (target == 0)
+ return NULL;
+
+ return m_file->GetIL(target);
+}
+
+PTR_VOID Module::GetRvaField(DWORD rva, BOOL fZapped)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+#ifdef FEATURE_PREJIT
+ if (fZapped && m_file->IsILOnly())
+ {
+ return dac_cast<PTR_VOID>(m_file->GetLoadedNative()->GetRvaData(rva,NULL_OK));
+ }
+#endif // FEATURE_PREJIT
+
+ return m_file->GetRvaField(rva);
+}
+
+#ifndef DACCESS_COMPILE
+
+CHECK Module::CheckRvaField(RVA field)
+{
+ WRAPPER_NO_CONTRACT;
+ if (!IsReflection())
+ CHECK(m_file->CheckRvaField(field));
+ CHECK_OK;
+}
+
+CHECK Module::CheckRvaField(RVA field, COUNT_T size)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ if (!IsReflection())
+ CHECK(m_file->CheckRvaField(field, size));
+ CHECK_OK;
+}
+
+#endif // !DACCESS_COMPILE
+
+BOOL Module::HasTls()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return m_file->HasTls();
+}
+
+BOOL Module::IsRvaFieldTls(DWORD rva)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return m_file->IsRvaFieldTls(rva);
+}
+
+UINT32 Module::GetFieldTlsOffset(DWORD rva)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return m_file->GetFieldTlsOffset(rva);
+}
+
+UINT32 Module::GetTlsIndex()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return m_file->GetTlsIndex();
+}
+
+PCCOR_SIGNATURE Module::GetSignature(RVA signature)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return m_file->GetSignature(signature);
+}
+
+RVA Module::GetSignatureRva(PCCOR_SIGNATURE signature)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return m_file->GetSignatureRva(signature);
+}
+
+
+
+// In DAC builds this function was being called on host addresses which may or may not
+// have been marshalled from the target. Such addresses can't be reliably mapped back to
+// target addresses, which means we can't tell whether they came from the IL or not
+//
+// Security note: Any security which you might wish to gain by verifying the origin of
+// a signature isn't available in DAC. The attacker can provide a dump which spoofs all
+// module ranges. In other words the attacker can make the signature appear to come from
+// anywhere, but still violate all the rules that a signature from that location would
+// otherwise follow. I am removing this function from DAC in order to prevent anyone from
+// getting a false sense of security (in addition to its functional shortcomings)
+
+#ifndef DACCESS_COMPILE
+BOOL Module::IsSigInIL(PCCOR_SIGNATURE signature)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ FORBID_FAULT;
+ MODE_ANY;
+ NOTHROW;
+ SO_TOLERANT;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return m_file->IsPtrInILImage(signature);
+}
+
+#ifdef FEATURE_PREJIT
+StubMethodHashTable *Module::GetStubMethodHashTable()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END
+
+ if (m_pStubMethodHashTable == NULL && SystemDomain::GetCurrentDomain()->IsCompilationDomain())
+ {
+ // we only need to create the hash table when NGENing, it is read-only at run-time
+ AllocMemTracker amTracker;
+ m_pStubMethodHashTable = StubMethodHashTable::Create(GetLoaderAllocator(), this, METHOD_STUBS_HASH_BUCKETS, &amTracker);
+ amTracker.SuppressRelease();
+ }
+
+ return m_pStubMethodHashTable;
+}
+#endif // FEATURE_PREJIT
+
+CHECK Module::CheckSignatureRva(RVA signature)
+{
+ WRAPPER_NO_CONTRACT;
+ CHECK(m_file->CheckSignatureRva(signature));
+ CHECK_OK;
+}
+
+CHECK Module::CheckSignature(PCCOR_SIGNATURE signature)
+{
+ WRAPPER_NO_CONTRACT;
+ CHECK(m_file->CheckSignature(signature));
+ CHECK_OK;
+}
+
+void Module::InitializeStringData(DWORD token, EEStringData *pstrData, CQuickBytes *pqb)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(TypeFromToken(token) == mdtString);
+ }
+ CONTRACTL_END;
+
+ BOOL fIs80Plus;
+ DWORD dwCharCount;
+ LPCWSTR pString;
+ if (FAILED(GetMDImport()->GetUserString(token, &dwCharCount, &fIs80Plus, &pString)) ||
+ (pString == NULL))
+ {
+ THROW_BAD_FORMAT(BFA_BAD_STRING_TOKEN_RANGE, this);
+ }
+
+#if !BIGENDIAN
+ pstrData->SetStringBuffer(pString);
+#else // !!BIGENDIAN
+ _ASSERTE(pqb != NULL);
+
+ LPWSTR pSwapped;
+
+ pSwapped = (LPWSTR) pqb->AllocThrows(dwCharCount * sizeof(WCHAR));
+ memcpy((void*)pSwapped, (void*)pString, dwCharCount*sizeof(WCHAR));
+ SwapStringLength(pSwapped, dwCharCount);
+
+ pstrData->SetStringBuffer(pSwapped);
+#endif // !!BIGENDIAN
+
+ // MD and String look at this bit in opposite ways. Here's where we'll do the conversion.
+ // MD sets the bit to true if the string contains characters greater than 80.
+ // String sets the bit to true if the string doesn't contain characters greater than 80.
+
+ pstrData->SetCharCount(dwCharCount);
+ pstrData->SetIsOnlyLowChars(!fIs80Plus);
+}
+
+#ifndef CROSSGEN_COMPILE
+
+#ifdef FEATURE_PREJIT
+OBJECTHANDLE Module::ResolveStringRefHelper(DWORD token, BaseDomain *pDomain, PTR_CORCOMPILE_IMPORT_SECTION pSection, EEStringData *pStrData)
+{
+ PEImageLayout *pNativeImage = GetNativeImage();
+
+ // Get the table
+ COUNT_T tableSize;
+ TADDR tableBase = pNativeImage->GetDirectoryData(&pSection->Section, &tableSize);
+
+ // Walk the handle table.
+ // @TODO: If we ever care about the perf of this function, we could sort the tokens
+ // using as a key the string they point to, so we could do a binary search
+ for (SIZE_T * pEntry = (SIZE_T *)tableBase ; pEntry < (SIZE_T *)(tableBase + tableSize); pEntry++)
+ {
+ // Ensure that the compiler won't fetch the value twice
+ SIZE_T entry = VolatileLoadWithoutBarrier(pEntry);
+
+ if (CORCOMPILE_IS_POINTER_TAGGED(entry))
+ {
+ BYTE * pBlob = (BYTE *) pNativeImage->GetRvaData(CORCOMPILE_UNTAG_TOKEN(entry));
+
+ // Note that we only care about strings from current module, and so we do not check ENCODE_MODULE_OVERRIDE
+ if (*pBlob++ == ENCODE_STRING_HANDLE &&
+ TokenFromRid(CorSigUncompressData((PCCOR_SIGNATURE&) pBlob), mdtString) == token)
+ {
+ EnsureWritablePages(pEntry);
+
+ // This string hasn't been fixed up. Synchronize the update with the normal
+ // fixup logic
+ {
+ CrstHolder ch(this->GetFixupCrst());
+
+ if (!CORCOMPILE_IS_POINTER_TAGGED(*pEntry))
+ {
+ // We lost the race, just return current entry
+ }
+ else
+ {
+ *pEntry = (SIZE_T) ResolveStringRef(token, pDomain, false);
+ }
+ }
+
+ return (OBJECTHANDLE) *pEntry;
+ }
+ }
+ else
+ {
+ OBJECTREF* pRef = (OBJECTREF*) entry;
+ _ASSERTE((*pRef)->GetMethodTable() == g_pStringClass);
+
+ STRINGREF stringRef = (STRINGREF) *pRef;
+
+ // Is this the string we are trying to resolve?
+ if (pStrData->GetCharCount() == stringRef->GetStringLength() &&
+ memcmp((void*)pStrData->GetStringBuffer(),
+ (void*) stringRef->GetBuffer(),
+ pStrData->GetCharCount()*sizeof(WCHAR)) == 0)
+ {
+ // We found it, so we just have to return this instance
+ return (OBJECTHANDLE) entry;
+ }
+ }
+ }
+ return NULL;
+}
+#endif // FEATURE_PREJIT
+
+OBJECTHANDLE Module::ResolveStringRef(DWORD token, BaseDomain *pDomain, bool bNeedToSyncWithFixups)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(TypeFromToken(token) == mdtString);
+ }
+ CONTRACTL_END;
+
+ EEStringData strData;
+ OBJECTHANDLE string = NULL;
+
+#if !BIGENDIAN
+ InitializeStringData(token, &strData, NULL);
+#else // !!BIGENDIAN
+ CQuickBytes qb;
+ InitializeStringData(token, &strData, &qb);
+#endif // !!BIGENDIAN
+
+ GCX_COOP();
+
+ // We can only do this for native images as they guarantee that resolvestringref will be
+ // called only once per string from this module. @TODO: We really dont have any way of asserting
+ // this, which would be nice... (and is needed to guarantee correctness)
+#ifdef FEATURE_PREJIT
+ if (HasNativeImage() && IsNoStringInterning())
+ {
+ if (bNeedToSyncWithFixups)
+ {
+ // In an ngen image, it is possible that we get here but not be coming from a fixup,
+ // (FixupNativeEntry case). In that unfortunate case (ngen partial images, dynamic methods,
+ // lazy string inits) we will have to troll through the fixup list, and in the case the string is there,
+ // reuse it, if it's there but hasn't been fixed up, fix it up now, and in the case it isn't
+ // there at all, then go to our old style string interning. Going through this code path is
+ // guaranteed to be slow. If necessary, we can further optimize it by sorting the token table,
+ // Another way of solving this would be having a token to string table (would require knowing
+ // all our posible stings in the ngen case (this is possible by looking at the IL))
+
+ PEImageLayout * pNativeImage = GetNativeImage();
+
+ COUNT_T nSections;
+ PTR_CORCOMPILE_IMPORT_SECTION pSections = pNativeImage->GetNativeImportSections(&nSections);
+
+ for (COUNT_T iSection = 0; iSection < nSections; iSection++)
+ {
+ PTR_CORCOMPILE_IMPORT_SECTION pSection = pSections + iSection;
+
+ if (pSection->Type != CORCOMPILE_IMPORT_TYPE_STRING_HANDLE)
+ continue;
+
+ OBJECTHANDLE oh = ResolveStringRefHelper(token, pDomain, pSection, &strData);
+ if (oh != NULL)
+ return oh;
+ }
+
+ // The string is not in our fixup list, so just intern it old style (using hashtable)
+ goto INTERN_OLD_STYLE;
+
+ }
+ /* Unfortunately, this assert won't work in some cases of generics, consider the following scenario:
+
+ 1) Generic type in mscorlib.
+ 2) Instantiation of generic (1) (via valuetype) in another module
+ 3) other module now holds a copy of the code of the generic for that particular instantiation
+ however, it is resolving the string literals against mscorlib, which breaks the invariant
+ this assert was based on (no string fixups against other modules). In fact, with NoStringInterning,
+ our behavior is not very intuitive.
+ */
+ /*
+ _ASSERTE(pDomain == GetAssembly()->GetDomain() && "If your are doing ldstr for a string"
+ "in another module, either the JIT is very smart or you have a bug, check INLINE_NO_CALLEE_LDSTR");
+
+ */
+ /*
+ Dev10 804385 bugfix -
+ We should be using appdomain that the string token lives in (GetAssembly->GetDomain())
+ to allocate the System.String object instead of the appdomain that first uses the ldstr <token> (pDomain).
+
+ Otherwise, it is possible to get into the situation that pDomain is unloaded but GetAssembly->GetDomain() is
+ still kicking around. Anything else that is still using that string will now be pointing to an object
+ that will be freed when the next GC happens.
+ */
+ pDomain = GetAssembly()->GetDomain();
+
+ // The caller is going to update an ngen fixup entry. The fixup entry
+ // is used to reference the string and to ensure that the string is
+ // allocated only once. Hence, this operation needs to be done under a lock.
+ _ASSERTE(GetFixupCrst()->OwnedByCurrentThread());
+
+ // Allocate handle
+ OBJECTREF* pRef = pDomain->AllocateObjRefPtrsInLargeTable(1);
+
+ STRINGREF str = AllocateStringObject(&strData);
+ SetObjectReference(pRef, str, NULL);
+
+ #ifdef LOGGING
+ int length = strData.GetCharCount();
+ length = min(length, 100);
+ WCHAR *szString = (WCHAR *)_alloca((length + 1) * sizeof(WCHAR));
+ memcpyNoGCRefs((void*)szString, (void*)strData.GetStringBuffer(), length * sizeof(WCHAR));
+ szString[length] = '\0';
+ LOG((LF_APPDOMAIN, LL_INFO10000, "String literal \"%S\" won't be interned due to NoInterningAttribute\n", szString));
+ #endif // LOGGING
+
+ return (OBJECTHANDLE) pRef;
+ }
+
+
+INTERN_OLD_STYLE:
+#endif
+ // Retrieve the string from the either the appropriate LoaderAllocator
+ LoaderAllocator *pLoaderAllocator;
+
+ if (this->IsCollectible())
+ pLoaderAllocator = this->GetLoaderAllocator();
+ else
+ pLoaderAllocator = pDomain->GetLoaderAllocator();
+
+ string = (OBJECTHANDLE)pLoaderAllocator->GetStringObjRefPtrFromUnicodeString(&strData);
+
+ return string;
+}
+#endif // CROSSGEN_COMPILE
+
+//
+// Used by the verifier. Returns whether this stringref is valid.
+//
+CHECK Module::CheckStringRef(DWORD token)
+{
+ LIMITED_METHOD_CONTRACT;
+ CHECK(TypeFromToken(token)==mdtString);
+ CHECK(!IsNilToken(token));
+ CHECK(GetMDImport()->IsValidToken(token));
+ CHECK_OK;
+}
+
+mdToken Module::GetEntryPointToken()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return m_file->GetEntryPointToken();
+}
+
+BYTE *Module::GetProfilerBase()
+{
+ CONTRACT(BYTE*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACT_END;
+
+ if (m_file == NULL) // I'd rather assert this is not the case...
+ {
+ RETURN NULL;
+ }
+ else if (HasNativeImage())
+ {
+ RETURN (BYTE*)(GetNativeImage()->GetBase());
+ }
+ else if (m_file->IsLoaded())
+ {
+ RETURN (BYTE*)(m_file->GetLoadedIL()->GetBase());
+ }
+ else
+ {
+ RETURN NULL;
+ }
+}
+
+void Module::AddActiveDependency(Module *pModule, BOOL unconditional)
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(pModule != this);
+ PRECONDITION(!IsSystem());
+ PRECONDITION(!GetAssembly()->IsDomainNeutral() || pModule->GetAssembly()->IsDomainNeutral() || GetAppDomain()->IsDefaultDomain());
+ POSTCONDITION(IsSingleAppDomain() || HasActiveDependency(pModule));
+ POSTCONDITION(IsSingleAppDomain() || !unconditional || HasUnconditionalActiveDependency(pModule));
+ // Postcondition about activation
+ }
+ CONTRACT_END;
+
+ // Activation tracking is not require in single domain mode. Activate the target immediately.
+ if (IsSingleAppDomain())
+ {
+ pModule->EnsureActive();
+ RETURN;
+ }
+
+ // In the default AppDomain we delay a closure walk until a sharing attempt has been made
+ // This might result in a situation where a domain neutral assembly from the default AppDomain
+ // depends on something resolved by assembly resolve event (even Ref.Emit assemblies)
+ // Since we won't actually share such assemblies, and the default AD itself cannot go away we
+ // do not need to assert for such assemblies, thus " || GetAppDomain()->IsDefaultDomain()"
+
+ CONSISTENCY_CHECK_MSG(!GetAssembly()->IsDomainNeutral() || pModule->GetAssembly()->IsDomainNeutral() || GetAppDomain()->IsDefaultDomain(),
+ "Active dependency from domain neutral to domain bound is illegal");
+
+ // We must track this dependency for multiple domains' use
+ STRESS_LOG2(LF_CLASSLOADER, LL_INFO100000," %p -> %p\n",this,pModule);
+
+ _ASSERTE(!unconditional || pModule->HasNativeImage());
+ _ASSERTE(!unconditional || HasNativeImage());
+
+ COUNT_T index;
+
+ // this function can run in parallel with DomainFile::Activate and sychronizes via GetNumberOfActivations()
+ // because we expose dependency only in the end Domain::Activate might miss it, but it will increment a counter module
+ // so we can realize we have to additionally propagate a dependency into that appdomain.
+ // currently we do it just by rescanning al appdomains.
+ // needless to say, updating the counter and checking counter+adding dependency to the list should be atomic
+
+
+ BOOL propagate = FALSE;
+ ULONG startCounter=0;
+ ULONG endCounter=0;
+ do
+ {
+ // First, add the dependency to the physical dependency list
+ {
+#ifdef _DEBUG
+ CHECK check;
+ if (unconditional)
+ check=DomainFile::CheckUnactivatedInAllDomains(this);
+#endif // _DEBUG
+
+ CrstHolder lock(&m_Crst);
+ startCounter=GetNumberOfActivations();
+
+ index = m_activeDependencies.FindElement(0, pModule);
+ if (index == (COUNT_T) ArrayList::NOT_FOUND)
+ {
+ propagate = TRUE;
+ STRESS_LOG3(LF_CLASSLOADER, LL_INFO100,"Adding new module dependency %p -> %p, unconditional=%i\n",this,pModule,unconditional);
+ }
+
+ if (unconditional)
+ {
+ if (propagate)
+ {
+ CONSISTENCY_CHECK_MSG(check,
+ "Unconditional dependency cannot be added after module has already been activated");
+
+ index = m_activeDependencies.GetCount();
+ m_activeDependencies.Append(pModule);
+ m_unconditionalDependencies.SetBit(index);
+ STRESS_LOG2(LF_CLASSLOADER, LL_INFO100," Unconditional module dependency propagated %p -> %p\n",this,pModule);
+ // Now other threads can skip this dependency without propagating.
+ }
+ RETURN;
+ }
+
+ }
+
+ // Now we have to propagate any module activations in the loader
+
+ if (propagate)
+ {
+
+ _ASSERTE(!unconditional);
+ DomainFile::PropagateNewActivation(this, pModule);
+
+ CrstHolder lock(&m_Crst);
+ STRESS_LOG2(LF_CLASSLOADER, LL_INFO100," Conditional module dependency propagated %p -> %p\n",this,pModule);
+ // Now other threads can skip this dependency without propagating.
+ endCounter=GetNumberOfActivations();
+ if(startCounter==endCounter)
+ m_activeDependencies.Append(pModule);
+ }
+
+ }while(propagate && startCounter!=endCounter); //need to retry if someone was activated in parallel
+ RETURN;
+}
+
+BOOL Module::HasActiveDependency(Module *pModule)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pModule));
+ }
+ CONTRACTL_END;
+
+ if (pModule == this)
+ return TRUE;
+
+ DependencyIterator i = IterateActiveDependencies();
+ while (i.Next())
+ {
+ if (i.GetDependency() == pModule)
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+BOOL Module::HasUnconditionalActiveDependency(Module *pModule)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ CAN_TAKE_LOCK;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pModule));
+ }
+ CONTRACTL_END;
+
+ if (pModule == this)
+ return TRUE;
+
+ DependencyIterator i = IterateActiveDependencies();
+ while (i.Next())
+ {
+ if (i.GetDependency() == pModule
+ && i.IsUnconditional())
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+void Module::EnableModuleFailureTriggers(Module *pModuleTo, AppDomain *pDomain)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ // At this point we need to enable failure triggers we have placed in the code for this module. However,
+ // the failure trigger codegen logic is NYI. To keep correctness, we just allow the exception to propagate
+ // here. Note that in general this will enforce the failure invariants, but will also result in some rude
+ // behavior as these failures will be propagated too widely rather than constrained to the appropriate
+ // assemblies/app domains.
+ //
+ // This should throw.
+ STRESS_LOG2(LF_CLASSLOADER, LL_INFO100,"EnableModuleFailureTriggers for module %p in AppDomain %i\n",pModuleTo,pDomain->GetId().m_dwId);
+ DomainFile *pDomainFileTo = pModuleTo->GetDomainFile(pDomain);
+ pDomainFileTo->EnsureActive();
+
+ // @NYI: shouldn't get here yet since we propagate failures
+ UNREACHABLE_MSG("Module failure triggers NYI");
+}
+
+#endif //!DACCESS_COMPILE
+
+//
+// an GetAssemblyIfLoadedAppDomainIterator is used to iterate over all domains that
+// are known to be walkable at the time GetAssemblyIfLoaded is executed.
+//
+// The iteration is guaranteed to include all domains that exist at the
+// start & end of the iteration that are safely accessible. This class is logically part
+// of GetAssemblyIfLoaded and logically has the same set of contracts.
+//
+
+class GetAssemblyIfLoadedAppDomainIterator
+{
+ enum IteratorType
+ {
+ StackwalkingThreadIterator,
+ AllAppDomainWalkingIterator,
+ CurrentAppDomainIterator
+ } m_iterType;
+
+public:
+ GetAssemblyIfLoadedAppDomainIterator() :
+ m_adIteratorAll(TRUE),
+ m_appDomainCurrent(NULL),
+ m_pFrame(NULL),
+ m_fNextCalledForCurrentADIterator(FALSE)
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifndef DACCESS_COMPILE
+ if (IsStackWalkerThread())
+ {
+ Thread * pThread = (Thread *)ClrFlsGetValue(TlsIdx_StackWalkerWalkingThread);
+ m_iterType = StackwalkingThreadIterator;
+ m_pFrame = pThread->GetFrame();
+ m_appDomainCurrent = pThread->GetDomain();
+ }
+ else if (IsGCThread())
+ {
+ m_iterType = AllAppDomainWalkingIterator;
+ m_adIteratorAll.Init();
+ }
+ else
+ {
+ _ASSERTE(::GetAppDomain() != NULL);
+ m_appDomainCurrent = ::GetAppDomain();
+ m_iterType = CurrentAppDomainIterator;
+ }
+#else //!DACCESS_COMPILE
+ // We have to walk all AppDomains in debugger
+ m_iterType = AllAppDomainWalkingIterator;
+ m_adIteratorAll.Init();
+#endif //!DACCESS_COMPILE
+ }
+
+ BOOL Next()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ switch (m_iterType)
+ {
+#ifndef DACCESS_COMPILE
+ case StackwalkingThreadIterator:
+ if (!m_fNextCalledForCurrentADIterator)
+ {
+ m_fNextCalledForCurrentADIterator = TRUE;
+
+ // Try searching frame chain if the current domain is NULL
+ if (m_appDomainCurrent == NULL)
+ return Next();
+
+ return TRUE;
+ }
+ else
+ {
+ while (m_pFrame != FRAME_TOP)
+ {
+ AppDomain * pDomain = m_pFrame->GetReturnDomain();
+ if ((pDomain != NULL) && (pDomain != m_appDomainCurrent))
+ {
+ m_appDomainCurrent = pDomain;
+ return TRUE;
+ }
+ m_pFrame = m_pFrame->PtrNextFrame();
+ }
+
+ return FALSE;
+ }
+#endif //!DACCESS_COMPILE
+
+ case AllAppDomainWalkingIterator:
+ {
+ BOOL fSuccess = m_adIteratorAll.Next();
+ if (fSuccess)
+ m_appDomainCurrent = m_adIteratorAll.GetDomain();
+ return fSuccess;
+ }
+
+#ifndef DACCESS_COMPILE
+ case CurrentAppDomainIterator:
+ {
+ BOOL retVal;
+ retVal = !m_fNextCalledForCurrentADIterator;
+ m_fNextCalledForCurrentADIterator = TRUE;
+ return retVal;
+ }
+#endif //!DACCESS_COMPILE
+
+ default:
+ _ASSERTE(FALSE);
+ return FALSE;
+ }
+ }
+
+ AppDomain * GetDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_appDomainCurrent;
+ }
+
+ BOOL UsingCurrentAD()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_iterType == CurrentAppDomainIterator;
+ }
+
+ private:
+
+ UnsafeAppDomainIterator m_adIteratorAll;
+ AppDomain * m_appDomainCurrent;
+ Frame * m_pFrame;
+ BOOL m_fNextCalledForCurrentADIterator;
+}; // class GetAssemblyIfLoadedAppDomainIterator
+
+#if !defined(DACCESS_COMPILE) && defined(FEATURE_PREJIT)
+// This function, given an AssemblyRef into the ngen generated native metadata section, will find the assembly referenced if
+// 1. The Assembly is defined with a different name than the AssemblyRef provides
+// 2. The Assembly has reached the stage of being loaded.
+// This function is used as a helper function to assist GetAssemblyIfLoaded with its tasks in the conditions
+// where GetAssemblyIfLoaded must succeed (or we violate various invariants in the system required for
+// correct implementation of GC, Stackwalking, and generic type loading.
+Assembly * Module::GetAssemblyIfLoadedFromNativeAssemblyRefWithRefDefMismatch(mdAssemblyRef kAssemblyRef, BOOL *pfDiscoveredAssemblyRefMatchesTargetDefExactly)
+{
+ CONTRACT(Assembly *)
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ _ASSERTE(HasNativeImage());
+
+ Assembly *pAssembly = NULL;
+ IMDInternalImport *pImportFoundNativeImage = this->GetNativeAssemblyImport(FALSE);
+
+ if (!pImportFoundNativeImage)
+ {
+ RETURN NULL;
+ }
+
+ if (kAssemblyRef != mdAssemblyRefNil)
+ {
+ // Scan CORCOMPILE_DEPENDENCIES tables
+ PEImageLayout* pNativeLayout = this->GetNativeImage();
+ COUNT_T dependencyCount;
+ CORCOMPILE_DEPENDENCY *pDependencies = pNativeLayout->GetNativeDependencies(&dependencyCount);
+
+ // Find the assemblyDef that defines the exact target
+ mdAssemblyRef foundAssemblyDef = mdAssemblyRefNil;
+
+ for (COUNT_T i = 0; i < dependencyCount; ++i)
+ {
+ CORCOMPILE_DEPENDENCY* pDependency = &(pDependencies[i]);
+ if (pDependency->dwAssemblyRef == kAssemblyRef)
+ {
+ foundAssemblyDef = pDependency->dwAssemblyDef;
+ break;
+ }
+ }
+
+ // In this case we know there is no assembly redirection involved. Skip any additional work.
+ if (kAssemblyRef == foundAssemblyDef)
+ {
+ *pfDiscoveredAssemblyRefMatchesTargetDefExactly = true;
+ RETURN NULL;
+ }
+
+ if (foundAssemblyDef != mdAssemblyRefNil)
+ {
+ // Find out if THIS reference is satisfied
+ // Specify fDoNotUtilizeExtraChecks to prevent recursion
+ Assembly *pAssemblyCandidate = this->GetAssemblyIfLoaded(foundAssemblyDef, NULL, NULL, pImportFoundNativeImage, TRUE /*fDoNotUtilizeExtraChecks*/);
+
+ // This extended check is designed only to find assemblies loaded via an AssemblySpecBindingCache based binder. Verify that's what we found.
+ if(pAssemblyCandidate != NULL)
+ {
+#ifdef FEATURE_HOSTED_BINDER
+ if (!pAssemblyCandidate->GetManifestFile()->HasHostAssembly())
+#endif // FEATURE_HOSTED_BINDER
+ {
+ pAssembly = pAssemblyCandidate;
+ }
+#ifdef FEATURE_HOSTED_BINDER
+ else
+ {
+ DWORD binderFlags = 0;
+ ICLRPrivAssembly * pPrivBinder = pAssemblyCandidate->GetManifestFile()->GetHostAssembly();
+ HRESULT hrBinderFlagCheck = pPrivBinder->GetBinderFlags(&binderFlags);
+ if (SUCCEEDED(hrBinderFlagCheck) && (binderFlags & BINDER_FINDASSEMBLYBYSPEC_REQUIRES_EXACT_MATCH))
+ {
+ pAssembly = pAssemblyCandidate;
+ }
+ else
+ {
+ // This should only happen in the generic instantiation case when multiple threads are racing and
+ // the assembly found is one which we will determine is the wrong assembly.
+ //
+ // We can't assert that (as its possible under stress); however it shouldn't happen in the stack walk or GC case, so we assert in those cases.
+ _ASSERTE("Non-AssemblySpecBindingCache based assembly found with extended search" && !(IsStackWalkerThread() || IsGCThread()) && IsGenericInstantiationLookupCompareThread());
+ }
+ }
+#endif // FEATURE_HOSTED_BINDER
+ }
+ }
+ }
+
+ RETURN pAssembly;
+}
+#endif // !defined(DACCESS_COMPILE) && defined(FEATURE_PREJIT)
+
+// Fills ppContainingWinRtAppDomain only if WinRT type name is passed and if the assembly is found (return value != NULL).
+Assembly *
+Module::GetAssemblyIfLoaded(
+ mdAssemblyRef kAssemblyRef,
+ LPCSTR szWinRtNamespace, // = NULL
+ LPCSTR szWinRtClassName, // = NULL
+ IMDInternalImport * pMDImportOverride, // = NULL
+ BOOL fDoNotUtilizeExtraChecks, // = FALSE
+ ICLRPrivBinder *pBindingContextForLoadedAssembly // = NULL
+)
+{
+ CONTRACT(Assembly *)
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ Assembly * pAssembly = NULL;
+ BOOL fCanUseRidMap = ((pMDImportOverride == NULL) &&
+ (szWinRtNamespace == NULL));
+
+#ifdef _DEBUG
+ fCanUseRidMap = fCanUseRidMap && (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_GetAssemblyIfLoadedIgnoreRidMap) == 0);
+#endif
+
+ // If we're here due to a generic instantiation, then we should only be querying information from the ngen image we're finding the generic instantiation in.
+#if !defined(DACCESS_COMPILE) && defined(FEATURE_PREJIT)
+ _ASSERTE(!IsGenericInstantiationLookupCompareThread() || HasNativeImage());
+#endif
+
+ // Don't do a lookup if an override IMDInternalImport is provided, since the lookup is for the
+ // standard IMDInternalImport and might result in an incorrect result.
+ // WinRT references also do not update RID map, so don't try to look it up
+ if (fCanUseRidMap)
+ {
+ pAssembly = LookupAssemblyRef(kAssemblyRef);
+ }
+
+#ifndef DACCESS_COMPILE
+ // Check if actually loaded, unless a GC is in progress or the current thread is
+ // walking the stack (either its own stack, or another thread's stack) as that works
+ // only with loaded assemblies
+ //
+ // NOTE: The case where the current thread is walking a stack can be problematic for
+ // other reasons, as the remaining code of this function uses "GetAppDomain()", when
+ // in fact the right AppDomain to use is the one corresponding to the frame being
+ // traversed on the walked thread. Dev10 TFS bug# 762348 tracks that issue.
+ if ((pAssembly != NULL) && !IsGCThread() && !IsStackWalkerThread())
+ {
+ _ASSERTE(::GetAppDomain() != NULL);
+ DomainAssembly * pDomainAssembly = pAssembly->FindDomainAssembly(::GetAppDomain());
+ if ((pDomainAssembly == NULL) || !pDomainAssembly->IsLoaded())
+ pAssembly = NULL;
+ }
+#endif //!DACCESS_COMPILE
+
+ if (pAssembly == NULL)
+ {
+ // If in stackwalking or gc mode
+ // For each AppDomain that is on the stack being walked...
+ // For each AppDomain in the process... if gc'ing
+ // For the current AppDomain ... if none of the above
+ GetAssemblyIfLoadedAppDomainIterator appDomainIter;
+
+ while (appDomainIter.Next())
+ {
+ AppDomain * pAppDomainExamine = appDomainIter.GetDomain();
+
+ DomainAssembly * pCurAssemblyInExamineDomain = GetAssembly()->FindDomainAssembly(pAppDomainExamine);
+ if (pCurAssemblyInExamineDomain == NULL)
+ {
+ continue;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (szWinRtNamespace != NULL)
+ {
+ if (IsIntrospectionOnly())
+ { // We do not have to implement this method for ReflectionOnly WinRT type requests
+ // ReflectionOnly WinRT types will never have instances on GC heap to be inspected by stackwalking or by debugger
+ break;
+ }
+
+ _ASSERTE(szWinRtClassName != NULL);
+
+ CLRPrivBinderWinRT * pWinRtBinder = pAppDomainExamine->GetWinRtBinder();
+#ifdef FEATURE_HOSTED_BINDER
+ if (pWinRtBinder == nullptr)
+ { // We are most likely in AppX mode (calling AppX::IsAppXProcess() for verification is painful in DACCESS)
+#ifndef DACCESS_COMPILE
+ // Note: We should also look
+ // Check designer binding context present (only in AppXDesignMode)
+ ICLRPrivBinder * pCurrentBinder = pAppDomainExamine->GetLoadContextHostBinder();
+ if (pCurrentBinder != nullptr)
+ { // We have designer binding context, look for the type in it
+ ReleaseHolder<ICLRPrivWinRtTypeBinder> pCurrentWinRtTypeBinder;
+ HRESULT hr = pCurrentBinder->QueryInterface(__uuidof(ICLRPrivWinRtTypeBinder), (void **)&pCurrentWinRtTypeBinder);
+
+ // The binder should be an instance of code:CLRPrivBinderAppX class that implements the interface
+ _ASSERTE(SUCCEEDED(hr) && (pCurrentWinRtTypeBinder != nullptr));
+
+ if (SUCCEEDED(hr))
+ {
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+ pAssembly = (Assembly *)pCurrentWinRtTypeBinder->FindAssemblyForWinRtTypeIfLoaded(
+ (void *)pAppDomainExamine,
+ szWinRtNamespace,
+ szWinRtClassName);
+ }
+ }
+#endif //!DACCESS_COMPILE
+ if (pAssembly == nullptr)
+ {
+#if defined(FEATURE_APPX_BINDER)
+ // Use WinRT binder from "global" AppX binder (there's only 1 AppDomain in non-design mode)
+ CLRPrivBinderAppX * pAppXBinder = CLRPrivBinderAppX::GetBinderOrNull();
+ if (pAppXBinder != nullptr)
+ {
+ pWinRtBinder = pAppXBinder->GetWinRtBinder();
+ }
+#endif // defined(FEATURE_APPX_BINDER)
+ }
+ }
+#endif //FEATURE_HOSTED_BINDER
+
+ if (pWinRtBinder != nullptr)
+ {
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+ pAssembly = pWinRtBinder->FindAssemblyForTypeIfLoaded(
+ dac_cast<PTR_AppDomain>(pAppDomainExamine),
+ szWinRtNamespace,
+ szWinRtClassName);
+ }
+
+ // Never store WinMD AssemblyRefs into the rid map.
+ if (pAssembly != NULL)
+ {
+ break;
+ }
+
+ // Never attemt to search the assembly spec binding cache for this form of WinRT assembly reference.
+ continue;
+ }
+#endif // FEATURE_COMINTEROP
+
+#ifndef DACCESS_COMPILE
+ {
+ IMDInternalImport * pMDImport = (pMDImportOverride == NULL) ? (GetMDImport()) : (pMDImportOverride);
+
+ //we have to be very careful here.
+ //we are using InitializeSpecInternal so we need to make sure that under no condition
+ //the data we pass to it can outlive the assembly spec.
+ AssemblySpec spec;
+ if (FAILED(spec.InitializeSpecInternal(kAssemblyRef,
+ pMDImport,
+ pCurAssemblyInExamineDomain,
+ IsIntrospectionOnly(),
+ FALSE /*fAllowAllocation*/)))
+ {
+ continue;
+ }
+
+#if defined(FEATURE_CORECLR)
+ // If we have been passed the binding context for the loaded assembly that is being looked up in the
+ // cache, then set it up in the AssemblySpec for the cache lookup to use it below.
+ if (pBindingContextForLoadedAssembly != NULL)
+ {
+ _ASSERTE(spec.GetBindingContext() == NULL);
+ spec.SetBindingContext(pBindingContextForLoadedAssembly);
+ }
+#endif // defined(FEATURE_CORECLR)
+ DomainAssembly * pDomainAssembly = nullptr;
+
+#ifdef FEATURE_APPX_BINDER
+ if (AppX::IsAppXProcess_Initialized_NoFault() && GetAssembly()->GetManifestFile()->HasHostAssembly())
+ {
+ ICLRPrivAssembly * pPrivBinder = GetAssembly()->GetManifestFile()->GetHostAssembly();
+ ReleaseHolder<ICLRPrivAssembly> pPrivAssembly;
+ HRESULT hrCachedResult;
+ if (SUCCEEDED(pPrivBinder->FindAssemblyBySpec(pAppDomainExamine, &spec, &hrCachedResult, &pPrivAssembly)) &&
+ SUCCEEDED(hrCachedResult))
+ {
+ pDomainAssembly = pAppDomainExamine->FindAssembly(pPrivAssembly);
+ }
+ }
+ else
+#endif // FEATURE_APPX_BINDER
+ {
+ pDomainAssembly = pAppDomainExamine->FindCachedAssembly(&spec, FALSE /*fThrow*/);
+ }
+
+ if (pDomainAssembly && pDomainAssembly->IsLoaded())
+ pAssembly = pDomainAssembly->GetCurrentAssembly(); // <NOTE> Do not use GetAssembly - that may force the completion of a load
+
+ // Only store in the rid map if working with the current AppDomain.
+ if (fCanUseRidMap && pAssembly && appDomainIter.UsingCurrentAD())
+ StoreAssemblyRef(kAssemblyRef, pAssembly);
+
+ if (pAssembly != NULL)
+ break;
+ }
+#endif //!DACCESS_COMPILE
+ }
+ }
+
+#if !defined(DACCESS_COMPILE) && defined(FEATURE_PREJIT)
+ if (pAssembly == NULL && (IsStackWalkerThread() || IsGCThread() || IsGenericInstantiationLookupCompareThread()) && !fDoNotUtilizeExtraChecks)
+ {
+ // The GetAssemblyIfLoaded function must succeed in finding assemblies which have already been loaded in a series of interesting cases
+ // (GC, Stackwalking, GenericInstantiationLookup). This logic is used to handle cases where the normal lookup done above
+ // may fail, and more extensive (and slow) lookups are necessary. This logic is gated by a long series of checks to ensure it doesn't
+ // run in cases which are not known to be problematic, or would not benefit from the logic here.
+ //
+ // This is logic which tries extra possibilities to find an assembly. It is believed this logic can only be hit in cases where an ngen
+ // image depends on an assembly through some sort of binding version/public key token adjustment (due to binding policy, unification, or portability rules)
+ // and the assembly depended on was loaded through a binder that utilizes the AssemblySpecBindingCache for binder caching. (The cache's in the other
+ // binder's successfully answer the GetAssemblyIfLoaded question in the case of non-exact matches where the match was discovered during
+ // ngen resolution.)
+ // This restricts the scenario to a somewhat restricted case.
+
+ BOOL eligibleForAdditionalChecks = TRUE;
+ if (szWinRtNamespace != NULL)
+ eligibleForAdditionalChecks = FALSE; // WinRT binds do not support this scan
+#ifdef FEATURE_FUSION
+ else if ((this->GetAssembly()->GetManifestFile()->GetLoadContext() != LOADCTX_TYPE_DEFAULT) && (this->GetAssembly()->GetManifestFile()->GetLoadContext() != LOADCTX_TYPE_HOSTED))
+ eligibleForAdditionalChecks = FALSE; // Only load and hosted context binds support this kind of discovery.
+#endif // FEATURE_FUSION
+ else if (this->GetAssembly()->GetManifestFile()->IsDesignerBindingContext())
+ {
+ eligibleForAdditionalChecks = FALSE;
+ // assemblies loaded into leaf designer binding contexts cannot be ngen images, or be depended on by ngen assemblies that bind to different versions of assemblies.
+ // However, in the shared designer binding context assemblies can be loaded with ngen images, and therefore can depend on assemblies in a designer binding context. (the shared context)
+ // A more correct version of this check would probably allow assemblies loaded into the shared designer binding context to be eligibleForAdditionalChecks; however
+ // there are problems. In particular, the logic below which scans through all native images is not strictly correct for scenarios involving a shared assembly context
+ // as the shared assembly context may have different binding rules as compared to the root context. At this time, we prefer to not fix this scenario until
+ // there is customer need for a fix.
+ }
+ else if (IsIntrospectionOnly())
+ eligibleForAdditionalChecks = FALSE;
+
+ AssemblySpec specSearchAssemblyRef;
+
+ // Get the assembly ref information that we are attempting to satisfy.
+ if (eligibleForAdditionalChecks)
+ {
+ IMDInternalImport * pMDImport = (pMDImportOverride == NULL) ? (GetMDImport()) : (pMDImportOverride);
+
+ if (FAILED(specSearchAssemblyRef.InitializeSpecInternal(kAssemblyRef,
+ pMDImport,
+ NULL,
+ FALSE,
+ FALSE /*fAllowAllocation*/)))
+ {
+ eligibleForAdditionalChecks = FALSE; // If an assemblySpec can't be constructed then we're not going to succeed
+ // This should not ever happen, due to the above checks, but this logic
+ // is intended to be defensive against unexpected behavior.
+ }
+ else if (specSearchAssemblyRef.IsContentType_WindowsRuntime())
+ {
+ eligibleForAdditionalChecks = FALSE; // WinRT binds do not support this scan
+ }
+ }
+
+ if (eligibleForAdditionalChecks)
+ {
+ BOOL abortAdditionalChecks = false;
+
+ // When working with an ngenn'd assembly, as an optimization we can scan only that module for dependency info.
+ bool onlyScanCurrentModule = HasNativeImage() && GetFile()->IsAssembly();
+ mdAssemblyRef foundAssemblyRef = mdAssemblyRefNil;
+
+ GetAssemblyIfLoadedAppDomainIterator appDomainIter;
+
+ // In each AppDomain that might be interesting, scan for an ngen image that is loaded that has a dependency on the same
+ // assembly that is now being looked up. If that ngen image has the same dependency, then we can use the CORCOMPILE_DEPENDENCIES
+ // table to find the exact AssemblyDef that defines the assembly, and attempt a load based on that information.
+ // As this logic is expected to be used only in exceedingly rare situations, this code has not been tuned for performance
+ // in any way.
+ while (!abortAdditionalChecks && appDomainIter.Next())
+ {
+ AppDomain * pAppDomainExamine = appDomainIter.GetDomain();
+
+ DomainAssembly * pCurAssemblyInExamineDomain = GetAssembly()->FindDomainAssembly(pAppDomainExamine);
+ if (pCurAssemblyInExamineDomain == NULL)
+ {
+ continue;
+ }
+
+ DomainFile *pDomainFileNativeImage;
+
+ if (onlyScanCurrentModule)
+ {
+ pDomainFileNativeImage = pCurAssemblyInExamineDomain;
+ // Do not reset foundAssemblyRef.
+ // This will allow us to avoid scanning for foundAssemblyRef in each domain we iterate through
+ }
+ else
+ {
+ foundAssemblyRef = mdAssemblyRefNil;
+ pDomainFileNativeImage = pAppDomainExamine->GetDomainFilesWithNativeImagesList();
+ }
+
+ while (!abortAdditionalChecks && (pDomainFileNativeImage != NULL) && (pAssembly == NULL))
+ {
+ Module *pNativeImageModule = pDomainFileNativeImage->GetCurrentModule();
+ _ASSERTE(pNativeImageModule->HasNativeImage());
+ IMDInternalImport *pImportFoundNativeImage = pNativeImageModule->GetNativeAssemblyImport(FALSE);
+ if (pImportFoundNativeImage != NULL)
+ {
+ if (IsNilToken(foundAssemblyRef))
+ {
+ // Enumerate assembly refs in nmd space, and compare against held ref.
+ HENUMInternalHolder hAssemblyRefEnum(pImportFoundNativeImage);
+ if (FAILED(hAssemblyRefEnum.EnumInitNoThrow(mdtAssemblyRef, mdAssemblyRefNil)))
+ {
+ continue;
+ }
+
+ mdAssemblyRef assemblyRef = mdAssemblyRefNil;
+
+ // Find if the native image has a matching assembly ref in its compile dependencies.
+ while (pImportFoundNativeImage->EnumNext(&hAssemblyRefEnum, &assemblyRef) && (pAssembly == NULL))
+ {
+ AssemblySpec specFoundAssemblyRef;
+ if (FAILED(specFoundAssemblyRef.InitializeSpecInternal(assemblyRef,
+ pImportFoundNativeImage,
+ NULL,
+ FALSE,
+ FALSE /*fAllowAllocation*/)))
+ {
+ continue; // If the spec cannot be loaded, it isn't the one we're looking for
+ }
+
+ // Check for AssemblyRef equality
+ if (specSearchAssemblyRef.CompareEx(&specFoundAssemblyRef))
+ {
+ foundAssemblyRef = assemblyRef;
+ break;
+ }
+ }
+ }
+
+ pAssembly = pNativeImageModule->GetAssemblyIfLoadedFromNativeAssemblyRefWithRefDefMismatch(foundAssemblyRef, &abortAdditionalChecks);
+
+ if (fCanUseRidMap && pAssembly && appDomainIter.UsingCurrentAD())
+ StoreAssemblyRef(kAssemblyRef, pAssembly);
+ }
+
+ // If we're only scanning one module for accurate dependency information, break the loop here.
+ if (onlyScanCurrentModule)
+ break;
+
+ pDomainFileNativeImage = pDomainFileNativeImage->FindNextDomainFileWithNativeImage();
+ }
+ }
+ }
+ }
+#endif // !defined(DACCESS_COMPILE) && defined(FEATURE_PREJIT)
+
+ // When walking the stack or computing GC information this function should never fail.
+ _ASSERTE((pAssembly != NULL) || !(IsStackWalkerThread() || IsGCThread()));
+
+#ifdef DACCESS_COMPILE
+
+ // Note: In rare cases when debugger walks the stack, we could actually have pAssembly=NULL here.
+ // To fix that we should DACize the AppDomain-iteration code above (especially AssemblySpec).
+ _ASSERTE(pAssembly != NULL);
+
+#endif //DACCESS_COMPILE
+
+ RETURN pAssembly;
+} // Module::GetAssemblyIfLoaded
+
+DWORD
+Module::GetAssemblyRefFlags(
+ mdAssemblyRef tkAssemblyRef)
+{
+ CONTRACTL
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(TypeFromToken(tkAssemblyRef) == mdtAssemblyRef);
+
+ LPCSTR pszAssemblyName;
+ const void *pbPublicKeyOrToken;
+ DWORD cbPublicKeyOrToken;
+
+ DWORD dwAssemblyRefFlags;
+ IfFailThrow(GetMDImport()->GetAssemblyRefProps(
+ tkAssemblyRef,
+ &pbPublicKeyOrToken,
+ &cbPublicKeyOrToken,
+ &pszAssemblyName,
+ NULL,
+ NULL,
+ NULL,
+ &dwAssemblyRefFlags));
+
+ return dwAssemblyRefFlags;
+} // Module::GetAssemblyRefFlags
+
+#ifndef DACCESS_COMPILE
+
+// Arguments:
+// szWinRtTypeNamespace ... Namespace of WinRT type.
+// szWinRtTypeClassName ... Name of WinRT type, NULL for non-WinRT (classic) types.
+DomainAssembly * Module::LoadAssembly(
+ AppDomain * pDomain,
+ mdAssemblyRef kAssemblyRef,
+ LPCUTF8 szWinRtTypeNamespace,
+ LPCUTF8 szWinRtTypeClassName)
+{
+ CONTRACT(DomainAssembly *)
+ {
+ INSTANCE_CHECK;
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM();); }
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pDomain));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_NOT_OK));
+ //POSTCONDITION((CheckPointer(GetAssemblyIfLoaded(kAssemblyRef, szWinRtTypeNamespace, szWinRtTypeClassName)), NULL_NOT_OK));
+ }
+ CONTRACT_END;
+
+ ETWOnStartup (LoaderCatchCall_V1, LoaderCatchCallEnd_V1);
+
+ DomainAssembly * pDomainAssembly;
+
+ //
+ // Early out quickly if the result is cached
+ //
+ Assembly * pAssembly = LookupAssemblyRef(kAssemblyRef);
+ if (pAssembly != NULL)
+ {
+ _ASSERTE(HasBindableIdentity(kAssemblyRef));
+
+ pDomainAssembly = pAssembly->FindDomainAssembly(pDomain);
+
+ if (pDomainAssembly == NULL)
+ pDomainAssembly = pAssembly->GetDomainAssembly(pDomain);
+ pDomain->LoadDomainFile(pDomainAssembly, FILE_LOADED);
+
+ RETURN pDomainAssembly;
+ }
+
+ bool fHasBindableIdentity = HasBindableIdentity(kAssemblyRef);
+
+#ifdef FEATURE_REFLECTION_ONLY_LOAD
+ if (IsIntrospectionOnly())
+ {
+ // We will not get here on GC thread
+ GCX_PREEMP();
+
+ AssemblySpec spec;
+ spec.InitializeSpec(kAssemblyRef, GetMDImport(), GetDomainFile(GetAppDomain())->GetDomainAssembly(), IsIntrospectionOnly());
+ if (szWinRtTypeClassName != NULL)
+ {
+ spec.SetWindowsRuntimeType(szWinRtTypeNamespace, szWinRtTypeClassName);
+ }
+ pDomainAssembly = GetAppDomain()->BindAssemblySpecForIntrospectionDependencies(&spec);
+ }
+ else
+#endif //FEATURE_REFLECTION_ONLY_LOAD
+ {
+ PEAssemblyHolder pFile = GetDomainFile(GetAppDomain())->GetFile()->LoadAssembly(
+ kAssemblyRef,
+ NULL,
+ szWinRtTypeNamespace,
+ szWinRtTypeClassName);
+ AssemblySpec spec;
+ spec.InitializeSpec(kAssemblyRef, GetMDImport(), GetDomainFile(GetAppDomain())->GetDomainAssembly(), IsIntrospectionOnly());
+#if defined(FEATURE_CORECLR)
+ // Set the binding context in the AssemblySpec if one is available. This can happen if the LoadAssembly ended up
+ // invoking the custom AssemblyLoadContext implementation that returned a reference to an assembly bound to a different
+ // AssemblyLoadContext implementation.
+ ICLRPrivBinder *pBindingContext = pFile->GetBindingContext();
+ if (pBindingContext != NULL)
+ {
+ spec.SetBindingContext(pBindingContext);
+ }
+#endif // defined(FEATURE_CORECLR)
+ if (szWinRtTypeClassName != NULL)
+ {
+ spec.SetWindowsRuntimeType(szWinRtTypeNamespace, szWinRtTypeClassName);
+ }
+ pDomainAssembly = GetAppDomain()->LoadDomainAssembly(&spec, pFile, FILE_LOADED, NULL);
+ }
+
+ if (pDomainAssembly != NULL)
+ {
+ _ASSERTE(
+ IsIntrospectionOnly() || // GetAssemblyIfLoaded will not find introspection-only assemblies
+ !fHasBindableIdentity || // GetAssemblyIfLoaded will not find non-bindable assemblies
+ pDomainAssembly->IsSystem() || // GetAssemblyIfLoaded will not find mscorlib (see AppDomain::FindCachedFile)
+ !pDomainAssembly->IsLoaded() || // GetAssemblyIfLoaded will not find not-yet-loaded assemblies
+ GetAssemblyIfLoaded(kAssemblyRef, NULL, NULL, NULL, FALSE, pDomainAssembly->GetFile()->GetHostAssembly()) != NULL); // GetAssemblyIfLoaded should find all remaining cases
+
+ // Note: We cannot cache WinRT AssemblyRef, because it is meaningless without the TypeRef context
+ if (pDomainAssembly->GetCurrentAssembly() != NULL)
+ {
+ if (fHasBindableIdentity)
+ {
+ StoreAssemblyRef(kAssemblyRef, pDomainAssembly->GetCurrentAssembly());
+ }
+ }
+ }
+
+ RETURN pDomainAssembly;
+}
+
+#endif // !DACCESS_COMPILE
+
+Module *Module::GetModuleIfLoaded(mdFile kFile, BOOL onlyLoadedInAppDomain, BOOL permitResources)
+{
+ CONTRACT(Module *)
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(TypeFromToken(kFile) == mdtFile
+ || TypeFromToken(kFile) == mdtModuleRef);
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ FORBID_FAULT;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+ // Handle the module ref case
+ if (TypeFromToken(kFile) == mdtModuleRef)
+ {
+ LPCSTR moduleName;
+ if (FAILED(GetMDImport()->GetModuleRefProps(kFile, &moduleName)))
+ {
+ RETURN NULL;
+ }
+
+ // This is required only because of some lower casing on the name
+ kFile = GetAssembly()->GetManifestFileToken(moduleName);
+ if (kFile == mdTokenNil)
+ RETURN NULL;
+
+ RETURN GetAssembly()->GetManifestModule()->GetModuleIfLoaded(kFile, onlyLoadedInAppDomain, permitResources);
+ }
+
+ Module *pModule = LookupFile(kFile);
+ if (pModule == NULL)
+ {
+ if (IsManifest())
+ {
+ if (kFile == mdFileNil)
+ pModule = GetAssembly()->GetManifestModule();
+ }
+ else
+ {
+ // If we didn't find it there, look at the "master rid map" in the manifest file
+ Assembly *pAssembly = GetAssembly();
+ mdFile kMatch;
+
+ // This is required only because of some lower casing on the name
+ kMatch = pAssembly->GetManifestFileToken(GetMDImport(), kFile);
+ if (IsNilToken(kMatch))
+ {
+ if (kMatch == mdFileNil)
+ {
+ pModule = pAssembly->GetManifestModule();
+ }
+ else
+ {
+ RETURN NULL;
+ }
+ }
+ else
+ pModule = pAssembly->GetManifestModule()->LookupFile(kMatch);
+ }
+
+#ifndef DACCESS_COMPILE
+ if (pModule != NULL)
+ StoreFileNoThrow(kFile, pModule);
+#endif
+ }
+
+ // We may not want to return a resource module
+ if (!permitResources && pModule && pModule->IsResource())
+ pModule = NULL;
+
+#if defined(FEATURE_MULTIMODULE_ASSEMBLIES)
+ // check if actually loaded, unless happens during GC (GC works only with loaded assemblies)
+ if (!GCHeap::IsGCInProgress() && onlyLoadedInAppDomain && pModule && !pModule->IsManifest())
+ {
+#ifndef DACCESS_COMPILE
+ DomainModule *pDomainModule = pModule->FindDomainModule(GetAppDomain());
+ if (pDomainModule == NULL || !pDomainModule->IsLoaded())
+ pModule = NULL;
+#else
+ // unfortunately DAC doesn't have a GetAppDomain() however multi-module
+ // assemblies aren't very common so it should be ok to fail here for now.
+ DacNotImpl();
+#endif // !DACCESS_COMPILE
+ }
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+ RETURN pModule;
+}
+
+#ifndef DACCESS_COMPILE
+
+DomainFile *Module::LoadModule(AppDomain *pDomain, mdFile kFile,
+ BOOL permitResources/*=TRUE*/, BOOL bindOnly/*=FALSE*/)
+{
+ CONTRACT(DomainFile *)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(TypeFromToken(kFile) == mdtFile
+ || TypeFromToken(kFile) == mdtModuleRef);
+ POSTCONDITION(CheckPointer(RETVAL, !permitResources || bindOnly ? NULL_OK : NULL_NOT_OK));
+ }
+ CONTRACT_END;
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+
+ // Handle the module ref case
+ if (TypeFromToken(kFile) == mdtModuleRef)
+ {
+ LPCSTR moduleName;
+ IfFailThrow(GetMDImport()->GetModuleRefProps(kFile, &moduleName));
+
+ mdFile kFileLocal = GetAssembly()->GetManifestFileToken(moduleName);
+
+ if (kFileLocal == mdTokenNil)
+ {
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+
+ RETURN GetAssembly()->GetManifestModule()->LoadModule(pDomain, kFileLocal, permitResources, bindOnly);
+ }
+
+ // First, make sure the assembly is loaded in our domain
+
+ DomainAssembly *pDomainAssembly = GetAssembly()->FindDomainAssembly(pDomain);
+ if (!bindOnly)
+ {
+ if (pDomainAssembly == NULL)
+ pDomainAssembly = GetAssembly()->GetDomainAssembly(pDomain);
+ pDomain->LoadDomainFile(pDomainAssembly, FILE_LOADED);
+ }
+
+ if (kFile == mdFileNil)
+ RETURN pDomainAssembly;
+
+ if (pDomainAssembly == NULL)
+ RETURN NULL;
+
+ // Now look for the module in the rid maps
+
+ Module *pModule = LookupFile(kFile);
+ if (pModule == NULL && !IsManifest())
+ {
+ // If we didn't find it there, look at the "master rid map" in the manifest file
+ Assembly *pAssembly = GetAssembly();
+ mdFile kMatch = pAssembly->GetManifestFileToken(GetMDImport(), kFile);
+ if (IsNilToken(kMatch)) {
+ if (kMatch == mdFileNil)
+ pModule = pAssembly->GetManifestModule();
+ else
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ else
+ pModule = pAssembly->GetManifestModule()->LookupFile(kMatch);
+ }
+
+ // Get a DomainModule for our domain
+
+ DomainModule *pDomainModule = NULL;
+ if (pModule)
+ {
+ pDomainModule = pModule->FindDomainModule(pDomain);
+
+ if (!bindOnly && (permitResources || !pModule->IsResource()))
+ {
+ if (pDomainModule == NULL)
+ pDomainModule = pDomain->LoadDomainModule(pDomainAssembly, (PEModule*) pModule->GetFile(), FILE_LOADED);
+ else
+ pDomain->LoadDomainFile(pDomainModule, FILE_LOADED);
+ }
+ }
+ else if (!bindOnly)
+ {
+ PEModuleHolder pFile(GetAssembly()->LoadModule_AddRef(kFile, permitResources));
+ if (pFile)
+ pDomainModule = pDomain->LoadDomainModule(pDomainAssembly, pFile, FILE_LOADED);
+ }
+
+ if (pDomainModule != NULL && pDomainModule->GetCurrentModule() != NULL)
+ {
+ // Make sure the module we're loading isn't its own assembly
+ if (pDomainModule->GetCurrentModule()->IsManifest())
+ COMPlusThrowHR(COR_E_ASSEMBLY_NOT_EXPECTED);
+
+ // Cache the result in the rid map
+ StoreFileThrowing(kFile, pDomainModule->GetCurrentModule());
+ }
+
+ // Make sure we didn't load a different module than what was in the rid map
+ CONSISTENCY_CHECK(pDomainModule == NULL || pModule == NULL || pDomainModule->GetModule() == pModule);
+
+ // We may not want to return a resource module
+ if (!permitResources && pDomainModule != NULL && pDomainModule->GetFile()->IsResource())
+ pDomainModule = NULL;
+
+ RETURN pDomainModule;
+#else //!FEATURE_MULTIMODULE_ASSEMBLIES
+ if (bindOnly)
+ {
+ RETURN NULL;
+ }
+ else
+ {
+ LPCSTR psModuleName=NULL;
+ if (TypeFromToken(kFile) == mdtModuleRef)
+ {
+ // This is a moduleRef
+ IfFailThrow(GetMDImport()->GetModuleRefProps(kFile, &psModuleName));
+ }
+ else
+ {
+ // This is mdtFile
+ IfFailThrow(GetAssembly()->GetManifestImport()->GetFileProps(kFile,
+ &psModuleName,
+ NULL,
+ NULL,
+ NULL));
+ }
+ SString name(SString::Utf8, psModuleName);
+ EEFileLoadException::Throw(name, COR_E_MULTIMODULEASSEMBLIESDIALLOWED, NULL);
+ }
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+}
+#endif // !DACCESS_COMPILE
+
+PTR_Module Module::LookupModule(mdToken kFile,BOOL permitResources/*=TRUE*/)
+{
+ CONTRACT(PTR_Module)
+ {
+ INSTANCE_CHECK;
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT;
+ else { INJECT_FAULT(COMPlusThrowOM()); }
+ MODE_ANY;
+ PRECONDITION(TypeFromToken(kFile) == mdtFile
+ || TypeFromToken(kFile) == mdtModuleRef);
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ if (TypeFromToken(kFile) == mdtModuleRef)
+ {
+ LPCSTR moduleName;
+ IfFailThrow(GetMDImport()->GetModuleRefProps(kFile, &moduleName));
+ mdFile kFileLocal = GetAssembly()->GetManifestFileToken(moduleName);
+
+ if (kFileLocal == mdTokenNil)
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+
+ RETURN GetAssembly()->GetManifestModule()->LookupModule(kFileLocal, permitResources);
+ }
+
+ PTR_Module pModule = LookupFile(kFile);
+ if (pModule == NULL && !IsManifest())
+ {
+ // If we didn't find it there, look at the "master rid map" in the manifest file
+ Assembly *pAssembly = GetAssembly();
+ mdFile kMatch = pAssembly->GetManifestFileToken(GetMDImport(), kFile);
+ if (IsNilToken(kMatch)) {
+ if (kMatch == mdFileNil)
+ pModule = pAssembly->GetManifestModule();
+ else
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ else
+ pModule = pAssembly->GetManifestModule()->LookupFile(kMatch);
+ }
+ RETURN pModule;
+}
+
+
+TypeHandle Module::LookupTypeRef(mdTypeRef token)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(TypeFromToken(token) == mdtTypeRef);
+
+ g_IBCLogger.LogRidMapAccess( MakePair( this, token ) );
+
+ TypeHandle entry = TypeHandle::FromTAddr(dac_cast<TADDR>(m_TypeRefToMethodTableMap.GetElement(RidFromToken(token))));
+
+ if (entry.IsNull())
+ return TypeHandle();
+
+ // Cannot do this in a NOTHROW function.
+ // Note that this could be called while doing GC from the prestub of
+ // a method to resolve typerefs in a signature. We cannot THROW
+ // during GC.
+
+ // @PERF: Enable this so that we do not need to touch metadata
+ // to resolve typerefs
+
+#ifdef FIXUPS_ALL_TYPEREFS
+
+ if (CORCOMPILE_IS_POINTER_TAGGED((SIZE_T) entry.AsPtr()))
+ {
+#ifndef DACCESS_COMPILE
+ Module::RestoreTypeHandlePointer(&entry, TRUE);
+ m_TypeRefToMethodTableMap.SetElement(RidFromToken(token), dac_cast<PTR_TypeRef>(value.AsTAddr()));
+#else // DACCESS_COMPILE
+ DacNotImpl();
+#endif // DACCESS_COMPILE
+ }
+
+#endif // FIXUPS_ALL_TYPEREFS
+
+ return entry;
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+mdTypeRef Module::LookupTypeRefByMethodTable(MethodTable *pMT)
+{
+ STANDARD_VM_CONTRACT;
+
+ HENUMInternalHolder hEnumTypeRefs(GetMDImport());
+ mdTypeRef token;
+ hEnumTypeRefs.EnumAllInit(mdtTypeRef);
+ while (hEnumTypeRefs.EnumNext(&token))
+ {
+ TypeHandle thRef = LookupTypeRef(token);
+ if (thRef.IsNull() || thRef.IsTypeDesc())
+ {
+ continue;
+ }
+
+ MethodTable *pMTRef = thRef.AsMethodTable();
+ if (pMT->HasSameTypeDefAs(pMTRef))
+ {
+ _ASSERTE(pMTRef->IsTypicalTypeDefinition());
+ return token;
+ }
+ }
+
+#ifdef FEATURE_READYTORUN_COMPILER
+ if (IsReadyToRunCompilation())
+ {
+ // FUTURE: Encoding of new cross-module references for ReadyToRun
+ // This warning is hit for recursive cross-module inlining. It is commented out to avoid noise.
+ // GetSvcLogger()->Log(W("ReadyToRun: Type reference outside of current version bubble cannot be encoded\n"));
+ }
+ else
+#endif // FEATURE_READYTORUN_COMPILER
+ {
+ // FUTURE TODO: Version resilience
+ _ASSERTE(!"Cross module type reference not found");
+ }
+ ThrowHR(E_FAIL);
+}
+
+mdMemberRef Module::LookupMemberRefByMethodDesc(MethodDesc *pMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ HENUMInternalHolder hEnumMemberRefs(GetMDImport());
+ mdMemberRef token;
+ hEnumMemberRefs.EnumAllInit(mdtMemberRef);
+ while (hEnumMemberRefs.EnumNext(&token))
+ {
+ BOOL fIsMethod = FALSE;
+ TADDR addr = LookupMemberRef(token, &fIsMethod);
+ if (fIsMethod)
+ {
+ MethodDesc *pCurMD = dac_cast<PTR_MethodDesc>(addr);
+ if (pCurMD == pMD)
+ {
+ return token;
+ }
+ }
+ }
+
+ // FUTURE TODO: Version resilience
+ _ASSERTE(!"Cross module method reference not found");
+ ThrowHR(E_FAIL);
+}
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+#ifndef DACCESS_COMPILE
+
+//
+// Increase the size of one of the maps, such that it can handle a RID of at least "rid".
+//
+// This function must also check that another thread didn't already add a LookupMap capable
+// of containing the same RID.
+//
+PTR_TADDR LookupMapBase::GrowMap(Module * pModule, DWORD rid)
+{
+ CONTRACT(PTR_TADDR)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(ThrowOutOfMemory(););
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ LookupMapBase *pMap = this;
+ LookupMapBase *pPrev = NULL;
+ LookupMapBase *pNewMap = NULL;
+
+ // Initial block size
+ DWORD dwIndex = rid;
+ DWORD dwBlockSize = 16;
+
+ {
+ CrstHolder ch(pModule->GetLookupTableCrst());
+ // Check whether we can already handle this RID index
+ do
+ {
+ if (dwIndex < pMap->dwCount)
+ {
+ // Already there - some other thread must have added it
+ RETURN pMap->GetIndexPtr(dwIndex);
+ }
+
+ dwBlockSize *= 2;
+
+ dwIndex -= pMap->dwCount;
+
+ pPrev = pMap;
+ pMap = pMap->pNext;
+ } while (pMap != NULL);
+
+ _ASSERTE(pPrev != NULL); // should never happen, because there's always at least one map
+
+ DWORD dwSizeToAllocate = max(dwIndex + 1, dwBlockSize);
+
+ pNewMap = (LookupMapBase *) (void*)pModule->GetLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(LookupMapBase)) + S_SIZE_T(dwSizeToAllocate)*S_SIZE_T(sizeof(TADDR)));
+
+ // Note: Memory allocated on loader heap is zero filled
+ // memset(pNewMap, 0, sizeof(LookupMap) + dwSizeToAllocate*sizeof(void*));
+
+ pNewMap->pNext = NULL;
+ pNewMap->dwCount = dwSizeToAllocate;
+
+ pNewMap->pTable = dac_cast<ArrayDPTR(TADDR)>(pNewMap + 1);
+
+ // Link ourselves in
+ VolatileStore<LookupMapBase*>(&(pPrev->pNext), pNewMap);
+ }
+
+ RETURN pNewMap->GetIndexPtr(dwIndex);
+}
+
+#endif // DACCESS_COMPILE
+
+PTR_TADDR LookupMapBase::GetElementPtr(DWORD rid)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ LookupMapBase * pMap = this;
+
+#ifdef FEATURE_PREJIT
+ if (pMap->dwNumHotItems > 0)
+ {
+#ifdef _DEBUG_IMPL
+ static DWORD counter = 0;
+ counter++;
+ if (counter >= pMap->dwNumHotItems)
+ {
+ CheckConsistentHotItemList();
+ counter = 0;
+ }
+#endif // _DEBUG_IMPL
+
+ PTR_TADDR pHotItemValue = pMap->FindHotItemValuePtr(rid);
+ if (pHotItemValue)
+ {
+ return pHotItemValue;
+ }
+ }
+#endif // FEATURE_PREJIT
+
+ DWORD dwIndex = rid;
+ do
+ {
+ if (dwIndex < pMap->dwCount)
+ {
+ return pMap->GetIndexPtr(dwIndex);
+ }
+
+ dwIndex -= pMap->dwCount;
+ pMap = pMap->pNext;
+ } while (pMap != NULL);
+
+ return NULL;
+}
+
+
+#ifdef FEATURE_PREJIT
+
+// This method can only be called on a compressed map (MapIsCompressed() == true). Compressed rid maps store
+// the array of values as packed deltas (each value is based on the accumulated of all the previous entries).
+// So this method takes the bit stream of compressed data we're navigating and the value of the last entry
+// retrieved allowing us to calculate the full value of the next entry. Note that the values passed in and out
+// here aren't the final values the top-level caller sees. In order to avoid having to touch the compressed
+// data on image base relocations we actually store a form of RVA (though relative to the map base rather than
+// the module base).
+INT32 LookupMapBase::GetNextCompressedEntry(BitStreamReader *pTableStream, INT32 iLastValue)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ PRECONDITION(MapIsCompressed());
+ }
+ CONTRACTL_END;
+
+ // The next kLookupMapLengthBits bits in the stream are an index into a per-map table that tells us the
+ // length of the encoded delta.
+ DWORD dwValueLength = rgEncodingLengths[pTableStream->Read(kLookupMapLengthBits)];
+
+ // Then follows a single bit that indicates whether the delta should be added (1) or subtracted (0) from
+ // the previous entry value to recover the current entry value.
+ // Once we've read that bit we read the delta (encoded as an unsigned integer using the number of bits
+ // that we read from the encoding lengths table above).
+ if (pTableStream->ReadOneFast())
+ return iLastValue + (INT32)(pTableStream->Read(dwValueLength));
+ else
+ return iLastValue - (INT32)(pTableStream->Read(dwValueLength));
+}
+
+// This method can only be called on a compressed map (MapIsCompressed() == true). Retrieves the final value
+// (e.g. MethodTable*, MethodDesc* etc. based on map type) given the rid of the entry.
+TADDR LookupMapBase::GetValueFromCompressedMap(DWORD rid)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ PRECONDITION(MapIsCompressed());
+ }
+ CONTRACTL_END;
+
+ // Normally to extract the nth entry in the table we have to linearly parse all (n - 1) preceding entries
+ // (since entries are stored as the delta from the previous entry). Obviously this can yield exceptionally
+ // poor performance for the later entries in large tables. So we also build an index of the compressed
+ // stream. This index has an entry for every kLookupMapIndexStride entries in the compressed table. Each
+ // index entry contains the full RVA (relative to the map) of the corresponding table entry plus the bit
+ // offset in the stream from which to start parsing the next entry's data.
+ // In this fashion we can get to within kLookupMapIndexStride entries of our target entry and then decode
+ // our way to the final target.
+
+ // Ensure that index does not go beyond end of the saved table
+ if (rid >= dwCount)
+ return 0;
+
+ // Calculate the nearest entry in the index that is lower than our target index in the full table.
+ DWORD dwIndexEntry = rid / kLookupMapIndexStride;
+
+ // Then calculate how many additional entries we'll need to decode from the compressed streams to recover
+ // the target entry.
+ DWORD dwSubIndex = rid % kLookupMapIndexStride;
+
+ // Open a bit stream reader on the index and skip all the entries prior to the one we're interested in.
+ BitStreamReader sIndexStream(pIndex);
+ sIndexStream.Skip(dwIndexEntry * cIndexEntryBits);
+
+ // The first kBitsPerRVA of the index entry contain the RVA of the corresponding entry in the compressed
+ // table. If this is exactly the entry we want (dwSubIndex == 0) then we can use this RVA to recover the
+ // value the caller wants. Our RVAs are based on the map address rather than the module base (simply
+ // because we don't record the module base in LookupMapBase). A delta of zero encodes a null value,
+ // otherwise we simply add the RVA to the our map address to recover the full pointer.
+ // Note that most LookupMaps are embedded structures (in Module) so we can't directly dac_cast<TADDR> our
+ // "this" pointer for DAC builds. Instead we have to use the slightly slower (in DAC) but more flexible
+ // PTR_HOST_INT_TO_TADDR() which copes with interior host pointers.
+ INT32 iValue = (INT32)sIndexStream.Read(kBitsPerRVA);
+ if (dwSubIndex == 0)
+ return iValue ? PTR_HOST_INT_TO_TADDR(this) + iValue : 0;
+
+ // Otherwise we must parse one or more entries in the compressed table to accumulate more deltas to the
+ // base RVA we read above. The remaining portion of the index entry has the bit offset into the compressed
+ // table at which to begin parsing.
+ BitStreamReader sTableStream(dac_cast<PTR_CBYTE>(pTable));
+ sTableStream.Skip(sIndexStream.Read(cIndexEntryBits - kBitsPerRVA));
+
+ // Parse all the entries up to our target entry. Each step takes the RVA from the previous cycle (or from
+ // the index entry we read above) and applies the compressed delta of the next table entry to it.
+ for (DWORD i = 0; i < dwSubIndex; i++)
+ iValue = GetNextCompressedEntry(&sTableStream, iValue);
+
+ // We have the final RVA so recover the actual pointer from it (a zero RVA encodes a NULL pointer). Note
+ // the use of PTR_HOST_INT_TO_TADDR() rather than dac_cast<TADDR>, see previous comment on
+ // PTR_HOST_INT_TO_TADDR for an explanation.
+ return iValue ? PTR_HOST_INT_TO_TADDR(this) + iValue : 0;
+}
+
+PTR_TADDR LookupMapBase::FindHotItemValuePtr(DWORD rid)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (dwNumHotItems < 5)
+ {
+ // do simple linear search if there are only a few hot items
+ for (DWORD i = 0; i < dwNumHotItems; i++)
+ {
+ if (hotItemList[i].rid == rid)
+ return dac_cast<PTR_TADDR>(
+ dac_cast<TADDR>(hotItemList) + i * sizeof(HotItem) + offsetof(HotItem, value));
+ }
+ }
+ else
+ {
+ // otherwise do binary search
+ if (hotItemList[0].rid <= rid && rid <= hotItemList[dwNumHotItems-1].rid)
+ {
+ DWORD l = 0;
+ DWORD r = dwNumHotItems;
+ while (l + 1 < r)
+ {
+ // loop invariant:
+ _ASSERTE(hotItemList[l].rid <= rid && (r >= dwNumHotItems || rid < hotItemList[r].rid));
+
+ DWORD m = (l + r)/2;
+ // loop condition implies l < m < r, hence interval shrinks every iteration, hence loop terminates
+ _ASSERTE(l < m && m < r);
+ if (rid < hotItemList[m].rid)
+ r = m;
+ else
+ l = m;
+ }
+ // now we know l + 1 == r && hotItemList[l].rid <= rid < hotItemList[r].rid
+ // loop invariant:
+ _ASSERTE(hotItemList[l].rid <= rid && (r >= dwNumHotItems || rid < hotItemList[r].rid));
+ if (hotItemList[l].rid == rid)
+ return dac_cast<PTR_TADDR>(
+ dac_cast<TADDR>(hotItemList) + l * sizeof(HotItem) + offsetof(HotItem, value));
+ }
+ }
+ return NULL;
+}
+
+#ifdef _DEBUG
+void LookupMapBase::CheckConsistentHotItemList()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ for (DWORD i = 0; i < dwNumHotItems; i++)
+ {
+ DWORD rid = hotItemList[i].rid;
+
+ PTR_TADDR pHotValue = dac_cast<PTR_TADDR>(
+ dac_cast<TADDR>(hotItemList) + i * sizeof(HotItem) + offsetof(HotItem, value));
+ TADDR hotValue = RelativePointer<TADDR>::GetValueMaybeNullAtPtr(dac_cast<TADDR>(pHotValue));
+
+ TADDR value;
+ if (MapIsCompressed())
+ {
+ value = GetValueFromCompressedMap(rid);
+ }
+ else
+ {
+ PTR_TADDR pValue = GetIndexPtr(rid);
+ value = RelativePointer<TADDR>::GetValueMaybeNullAtPtr(dac_cast<TADDR>(pValue));
+ }
+
+ _ASSERTE(hotValue == value || value == NULL);
+ }
+}
+#endif // _DEBUG
+
+#endif // FEATURE_PREJIT
+
+// Get number of RIDs that this table can store
+DWORD LookupMapBase::GetSize()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ LookupMapBase * pMap = this;
+ DWORD dwSize = 0;
+ do
+ {
+ dwSize += pMap->dwCount;
+ pMap = pMap->pNext;
+ } while (pMap != NULL);
+
+ return dwSize;
+}
+
+#ifndef DACCESS_COMPILE
+
+#ifdef _DEBUG
+void LookupMapBase::DebugGetRidMapOccupancy(DWORD *pdwOccupied, DWORD *pdwSize)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ *pdwOccupied = 0;
+ *pdwSize = 0;
+
+ LookupMapBase * pMap = this;
+
+ // Go through each linked block
+ for (; pMap != NULL; pMap = pMap->pNext)
+ {
+ DWORD dwIterCount = pMap->dwCount;
+
+ for (DWORD i = 0; i < dwIterCount; i++)
+ {
+#ifdef FEATURE_PREJIT
+ if (pMap->MapIsCompressed())
+ {
+ if (pMap->GetValueFromCompressedMap(i))
+ (*pdwOccupied)++;
+ }
+ else
+#endif // FEATURE_PREJIT
+ if (pMap->pTable[i] != NULL)
+ (*pdwOccupied)++;
+ }
+
+ (*pdwSize) += dwIterCount;
+ }
+}
+
+void Module::DebugLogRidMapOccupancy()
+{
+ WRAPPER_NO_CONTRACT;
+
+#define COMPUTE_RID_MAP_OCCUPANCY(var_suffix, map) \
+ DWORD dwOccupied##var_suffix, dwSize##var_suffix, dwPercent##var_suffix; \
+ map.DebugGetRidMapOccupancy(&dwOccupied##var_suffix, &dwSize##var_suffix); \
+ dwPercent##var_suffix = dwOccupied##var_suffix ? ((dwOccupied##var_suffix * 100) / dwSize##var_suffix) : 0;
+
+ COMPUTE_RID_MAP_OCCUPANCY(1, m_TypeDefToMethodTableMap);
+ COMPUTE_RID_MAP_OCCUPANCY(2, m_TypeRefToMethodTableMap);
+ COMPUTE_RID_MAP_OCCUPANCY(3, m_MethodDefToDescMap);
+ COMPUTE_RID_MAP_OCCUPANCY(4, m_FieldDefToDescMap);
+ COMPUTE_RID_MAP_OCCUPANCY(5, m_GenericParamToDescMap);
+ COMPUTE_RID_MAP_OCCUPANCY(6, m_GenericTypeDefToCanonMethodTableMap);
+ COMPUTE_RID_MAP_OCCUPANCY(7, m_FileReferencesMap);
+ COMPUTE_RID_MAP_OCCUPANCY(8, m_ManifestModuleReferencesMap);
+ COMPUTE_RID_MAP_OCCUPANCY(9, m_MethodDefToPropertyInfoMap);
+
+ LOG((
+ LF_EEMEM,
+ INFO3,
+ " Map occupancy:\n"
+ " TypeDefToMethodTable map: %4d/%4d (%2d %%)\n"
+ " TypeRefToMethodTable map: %4d/%4d (%2d %%)\n"
+ " MethodDefToDesc map: %4d/%4d (%2d %%)\n"
+ " FieldDefToDesc map: %4d/%4d (%2d %%)\n"
+ " GenericParamToDesc map: %4d/%4d (%2d %%)\n"
+ " GenericTypeDefToCanonMethodTable map: %4d/%4d (%2d %%)\n"
+ " FileReferences map: %4d/%4d (%2d %%)\n"
+ " AssemblyReferences map: %4d/%4d (%2d %%)\n"
+ " MethodDefToPropInfo map: %4d/%4d (%2d %%)\n"
+ ,
+ dwOccupied1, dwSize1, dwPercent1,
+ dwOccupied2, dwSize2, dwPercent2,
+ dwOccupied3, dwSize3, dwPercent3,
+ dwOccupied4, dwSize4, dwPercent4,
+ dwOccupied5, dwSize5, dwPercent5,
+ dwOccupied6, dwSize6, dwPercent6,
+ dwOccupied7, dwSize7, dwPercent7,
+ dwOccupied8, dwSize8, dwPercent8,
+ dwOccupied9, dwSize9, dwPercent9
+ ));
+
+#undef COMPUTE_RID_MAP_OCCUPANCY
+}
+#endif // _DEBUG
+
+BOOL Module::CanExecuteCode()
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef FEATURE_PREJIT
+ // In a passive domain, we lock down which assemblies can run code
+ if (!GetAppDomain()->IsPassiveDomain())
+ return TRUE;
+
+ Assembly * pAssembly = GetAssembly();
+ PEAssembly * pPEAssembly = pAssembly->GetManifestFile();
+
+ // Only mscorlib is allowed to execute code in an ngen passive domain
+ if (IsCompilationProcess())
+ return pPEAssembly->IsSystem();
+
+ // ExecuteDLLForAttach does not run the managed entry point in
+ // a passive domain to avoid loader-lock deadlocks.
+ // Hence, it is not safe to execute any code from this assembly.
+ if (pPEAssembly->GetEntryPointToken(INDEBUG(TRUE)) != mdTokenNil)
+ return FALSE;
+
+ // EXEs loaded using LoadAssembly() may not be loaded at their
+ // preferred base address. If they have any relocs, these may
+ // not have been fixed up.
+ if (!pPEAssembly->IsDll() && !pPEAssembly->IsILOnly())
+ return FALSE;
+
+ // If the assembly does not have FullTrust, we should not execute its code.
+ if (!pAssembly->GetSecurityDescriptor()->IsFullyTrusted())
+ return FALSE;
+#endif // FEATURE_PREJIT
+
+ return TRUE;
+}
+
+//
+// FindMethod finds a MethodDesc for a global function methoddef or ref
+//
+
+MethodDesc *Module::FindMethodThrowing(mdToken pMethod)
+{
+ CONTRACT (MethodDesc *)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END
+
+ SigTypeContext typeContext; /* empty type context: methods will not be generic */
+ RETURN MemberLoader::GetMethodDescFromMemberDefOrRefOrSpec(this, pMethod,
+ &typeContext,
+ TRUE, /* strictMetadataChecks */
+ FALSE /* dont get code shared between generic instantiations */);
+}
+
+//
+// FindMethod finds a MethodDesc for a global function methoddef or ref
+//
+
+MethodDesc *Module::FindMethod(mdToken pMethod)
+{
+ CONTRACT (MethodDesc *) {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ } CONTRACT_END;
+
+ MethodDesc *pMDRet = NULL;
+
+ EX_TRY
+ {
+ pMDRet = FindMethodThrowing(pMethod);
+ }
+ EX_CATCH
+ {
+#ifdef _DEBUG
+ CONTRACT_VIOLATION(ThrowsViolation);
+ char szMethodName [MAX_CLASSNAME_LENGTH];
+ CEEInfo::findNameOfToken(this, pMethod, szMethodName, COUNTOF (szMethodName));
+ LOG((LF_IJW, LL_INFO10, "Failed to find Method: %s for Vtable Fixup\n", szMethodName));
+#endif // _DEBUG
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ RETURN pMDRet;
+}
+
+//
+// PopulatePropertyInfoMap precomputes property information during NGen
+// that is expensive to look up from metadata at runtime.
+//
+
+void Module::PopulatePropertyInfoMap()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(IsCompilationProcess());
+ }
+ CONTRACTL_END;
+
+ IMDInternalImport* mdImport = GetMDImport();
+ HENUMInternalHolder hEnum(mdImport);
+ hEnum.EnumAllInit(mdtMethodDef);
+
+ mdMethodDef md;
+ while (hEnum.EnumNext(&md))
+ {
+ mdProperty prop = 0;
+ ULONG semantic = 0;
+ if (mdImport->GetPropertyInfoForMethodDef(md, &prop, NULL, &semantic) == S_OK)
+ {
+ // Store the Rid in the lower 24 bits and the semantic in the upper 8
+ _ASSERTE((semantic & 0xFFFFFF00) == 0);
+ SIZE_T value = RidFromToken(prop) | (semantic << 24);
+
+ // We need to make sure a value of zero indicates an empty LookupMap entry
+ // Fortunately the semantic will prevent value from being zero
+ _ASSERTE(value != 0);
+
+ m_MethodDefToPropertyInfoMap.AddElement(this, RidFromToken(md), value);
+ }
+ }
+ FastInterlockOr(&m_dwPersistedFlags, COMPUTED_METHODDEF_TO_PROPERTYINFO_MAP);
+}
+
+//
+// GetPropertyInfoForMethodDef wraps the metadata function of the same name,
+// first trying to use the information stored in m_MethodDefToPropertyInfoMap.
+//
+
+HRESULT Module::GetPropertyInfoForMethodDef(mdMethodDef md, mdProperty *ppd, LPCSTR *pName, ULONG *pSemantic)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+
+ if ((m_dwPersistedFlags & COMPUTED_METHODDEF_TO_PROPERTYINFO_MAP) != 0)
+ {
+ SIZE_T value = m_MethodDefToPropertyInfoMap.GetElement(RidFromToken(md));
+ if (value == 0)
+ {
+ _ASSERTE(GetMDImport()->GetPropertyInfoForMethodDef(md, ppd, pName, pSemantic) == S_FALSE);
+ return S_FALSE;
+ }
+ else
+ {
+ // Decode the value into semantic and mdProperty as described in PopulatePropertyInfoMap
+ ULONG semantic = (value & 0xFF000000) >> 24;
+ mdProperty prop = TokenFromRid(value & 0x00FFFFFF, mdtProperty);
+
+#ifdef _DEBUG
+ mdProperty dbgPd;
+ LPCSTR dbgName;
+ ULONG dbgSemantic;
+ _ASSERTE(GetMDImport()->GetPropertyInfoForMethodDef(md, &dbgPd, &dbgName, &dbgSemantic) == S_OK);
+#endif
+
+ if (ppd != NULL)
+ {
+ *ppd = prop;
+ _ASSERTE(*ppd == dbgPd);
+ }
+
+ if (pSemantic != NULL)
+ {
+ *pSemantic = semantic;
+ _ASSERTE(*pSemantic == dbgSemantic);
+ }
+
+ if (pName != NULL)
+ {
+ IfFailRet(GetMDImport()->GetPropertyProps(prop, pName, NULL, NULL, NULL));
+
+#ifdef _DEBUG
+ HRESULT hr = GetMDImport()->GetPropertyProps(prop, pName, NULL, NULL, NULL);
+ _ASSERTE(hr == S_OK);
+ _ASSERTE(strcmp(*pName, dbgName) == 0);
+#endif
+ }
+
+ return S_OK;
+ }
+ }
+
+ return GetMDImport()->GetPropertyInfoForMethodDef(md, ppd, pName, pSemantic);
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+// Fill the m_propertyNameSet hash filter with data that represents every
+// property and its name in the module.
+void Module::PrecomputeMatchingProperties(DataImage *image)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(IsCompilationProcess());
+ }
+ CONTRACTL_END;
+
+ IMDInternalImport* mdImport = GetMDImport();
+
+ m_nPropertyNameSet = mdImport->GetCountWithTokenKind(mdtProperty);
+
+ if (m_nPropertyNameSet == 0)
+ {
+ return;
+ }
+
+ m_propertyNameSet = new (image->GetHeap()) BYTE[m_nPropertyNameSet];
+
+ DWORD nEnumeratedProperties = 0;
+
+ HENUMInternalHolder hEnumTypes(mdImport);
+ hEnumTypes.EnumAllInit(mdtTypeDef);
+
+ // Enumerate all properties of all types
+ mdTypeDef tkType;
+ while (hEnumTypes.EnumNext(&tkType))
+ {
+ HENUMInternalHolder hEnumPropertiesForType(mdImport);
+ hEnumPropertiesForType.EnumInit(mdtProperty, tkType);
+
+ mdProperty tkProperty;
+ while (hEnumPropertiesForType.EnumNext(&tkProperty))
+ {
+ LPCSTR name;
+ HRESULT hr = GetMDImport()->GetPropertyProps(tkProperty, &name, NULL, NULL, NULL);
+ IfFailThrow(hr);
+
+ ++nEnumeratedProperties;
+
+ // Use a case-insensitive hash so that we can use this value for
+ // both case-sensitive and case-insensitive name lookups
+ SString ssName(SString::Utf8Literal, name);
+ ULONG nameHashValue = ssName.HashCaseInsensitive();
+
+ // Set one bit in m_propertyNameSet per iteration
+ // This will allow lookup to ensure that the bit from each iteration is set
+ // and if any are not set, know that the (tkProperty,name) pair is not valid
+ for (DWORD i = 0; i < NUM_PROPERTY_SET_HASHES; ++i)
+ {
+ DWORD currentHashValue = HashThreeToOne(tkProperty, nameHashValue, i);
+ DWORD bitPos = currentHashValue % (m_nPropertyNameSet * 8);
+ m_propertyNameSet[bitPos / 8] |= (1 << bitPos % 8);
+ }
+ }
+ }
+
+ _ASSERTE(nEnumeratedProperties == m_nPropertyNameSet);
+}
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+// Check whether the module might possibly have a property with a name with
+// the passed hash value without accessing the property's name. This is done
+// by consulting a hash filter populated at NGen time.
+BOOL Module::MightContainMatchingProperty(mdProperty tkProperty, ULONG nameHash)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_propertyNameSet)
+ {
+ _ASSERTE(HasNativeImage());
+
+ // if this property was added after the name set was computed, conservatively
+ // assume we might have it. This is known to occur in scenarios where a profiler
+ // injects additional metadata at module load time for an NGEN'ed module. In the
+ // future other dynamic additions to the module might produce a similar result.
+ if (RidFromToken(tkProperty) > m_nPropertyNameSet)
+ return TRUE;
+
+ // Check one bit per iteration, failing if any are not set
+ // We know that all will have been set for any valid (tkProperty,name) pair
+ for (DWORD i = 0; i < NUM_PROPERTY_SET_HASHES; ++i)
+ {
+ DWORD currentHashValue = HashThreeToOne(tkProperty, nameHash, i);
+ DWORD bitPos = currentHashValue % (m_nPropertyNameSet * 8);
+ if ((m_propertyNameSet[bitPos / 8] & (1 << bitPos % 8)) == 0)
+ {
+ return FALSE;
+ }
+ }
+ }
+
+ return TRUE;
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+// Ensure that all elements and flags that we want persisted in the LookupMaps are present
+void Module::FinalizeLookupMapsPreSave(DataImage *image)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(IsCompilationProcess());
+ }
+ CONTRACTL_END;
+
+ // For each typedef, if it does not need a restore, add the ZAPPED_TYPE_NEEDS_NO_RESTORE flag
+ {
+ LookupMap<PTR_MethodTable>::Iterator typeDefIter(&m_TypeDefToMethodTableMap);
+
+ while (typeDefIter.Next())
+ {
+ MethodTable * pMT = typeDefIter.GetElement();
+
+ if (pMT != NULL && !pMT->NeedsRestore(image))
+ {
+ m_TypeDefToMethodTableMap.AddFlag(RidFromToken(pMT->GetCl()), ZAPPED_TYPE_NEEDS_NO_RESTORE);
+ }
+ }
+ }
+
+ // For each canonical instantiation of a generic type def, if it does not need a restore, add the ZAPPED_GENERIC_TYPE_NEEDS_NO_RESTORE flag
+ {
+ LookupMap<PTR_MethodTable>::Iterator genericTypeDefIter(&m_GenericTypeDefToCanonMethodTableMap);
+
+ while (genericTypeDefIter.Next())
+ {
+ MethodTable * pMT = genericTypeDefIter.GetElement();
+
+ if (pMT != NULL && !pMT->NeedsRestore(image))
+ {
+ m_GenericTypeDefToCanonMethodTableMap.AddFlag(RidFromToken(pMT->GetCl()), ZAPPED_GENERIC_TYPE_NEEDS_NO_RESTORE);
+ }
+ }
+ }
+
+}
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+// Return true if this module has any live (jitted) JMC functions.
+// If a module has no jitted JMC functions, then it's as if it's a
+// non-user module.
+bool Module::HasAnyJMCFunctions()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // If we have any live JMC funcs in us, then we're a JMC module.
+ // We count JMC functions when we either explicitly toggle their status
+ // or when we get the code:DebuggerMethodInfo for them (which happens in a jit-complete).
+ // Since we don't get the jit-completes for ngen modules, we also check the module's
+ // "default" status. This means we may err on the side of believing we have
+ // JMC methods.
+ return ((m_debuggerSpecificData.m_cTotalJMCFuncs > 0) || m_debuggerSpecificData.m_fDefaultJMCStatus);
+}
+
+// Alter our module's count of JMC functions.
+// Since these may be called on multiple threads (say 2 threads are jitting
+// methods within a module), make it thread safe.
+void Module::IncJMCFuncCount()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ InterlockedIncrement(&m_debuggerSpecificData.m_cTotalJMCFuncs);
+}
+
+void Module::DecJMCFuncCount()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ InterlockedDecrement(&m_debuggerSpecificData.m_cTotalJMCFuncs);
+}
+
+// code:DebuggerMethodInfo are lazily created. Let them lookup what the default is.
+bool Module::GetJMCStatus()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_debuggerSpecificData.m_fDefaultJMCStatus;
+}
+
+// Set the default JMC status of this module.
+void Module::SetJMCStatus(bool fStatus)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_debuggerSpecificData.m_fDefaultJMCStatus = fStatus;
+}
+
+// Update the dynamic metadata if needed. Nop for non-dynamic modules
+void Module::UpdateDynamicMetadataIfNeeded()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ // Only need to serializing metadata for dynamic modules. For non-dynamic modules, metadata is already available.
+ if (!IsReflection())
+ {
+ return;
+ }
+
+ // Since serializing metadata to an auxillary buffer is only needed by the debugger,
+ // we should only be doing this for modules that the debugger can see.
+ if (!IsVisibleToDebugger())
+ {
+ return;
+ }
+
+
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ GetReflectionModule()->CaptureModuleMetaDataToMemory();
+ }
+ EX_CATCH_HRESULT(hr);
+
+ // This Metadata buffer is only used for the debugger, so it's a non-fatal exception for regular CLR execution.
+ // Just swallow it and keep going. However, with the exception of out-of-memory, we do expect it to
+ // succeed, so assert on failures.
+ if (hr != E_OUTOFMEMORY)
+ {
+ SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
+ }
+
+}
+
+#ifdef DEBUGGING_SUPPORTED
+
+#ifdef FEATURE_FUSION
+
+// Fetch Pdbs from the host
+//
+// Returns:
+// No explicit return value.
+// Caches the pdb stream on the module instance if available.
+// Does nothing if not hosted or if the host does not provide a stream.
+// Throws on exception if the host does provide a stream, but we can't copy it out.
+//
+// Notes:
+// This fetches PDBs from the host and caches them so that they are available for when the debugger attaches.
+// This lets Arrowhead tools run against Whidbey hosts in a compatability mode.
+// We expect to add a hosting knob that will allow a host to disable this eager fetching and not run in
+// compat mode.
+void Module::FetchPdbsFromHost()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+
+ ReleaseHolder<IStream> pHostStream;
+
+ hr = GetHostPdbStream(&pHostStream); // addrefs, holder will release
+ if (pHostStream == NULL)
+ {
+ // Common failure case, we're either not hosted, or the host doesn't have a stream.
+ return;
+ }
+ // pHostStream is a stream implemented by the host, so be extra cautious about methods failing,
+ // especially with E_NOTIMPL.
+
+ SafeComHolder<CGrowableStream> pStream(new CGrowableStream()); // throws
+
+ //
+ // Copy from pHostStream (owned by host) to CGrowableStream (owned by CLR, and visible to debugger from OOP).
+ //
+
+ // Get number of bytes to copy.
+ STATSTG SizeData = {0};
+ hr = pHostStream->Stat(&SizeData, STATFLAG_NONAME);
+ IfFailThrow(hr);
+ ULARGE_INTEGER streamSize = SizeData.cbSize;
+
+ if (streamSize.u.HighPart > 0)
+ {
+ // Too big. We shouldn't have a PDB larger than 4gb.
+ ThrowHR(E_OUTOFMEMORY);
+ }
+ ULONG cbRequest = streamSize.u.LowPart;
+
+
+ // Allocate
+ hr = pStream->SetSize(streamSize);
+ IfFailThrow(hr);
+
+ _ASSERTE(pStream->GetRawBuffer().Size() == cbRequest);
+
+ // Do the actual copy
+ ULONG cbActualRead = 0;
+ hr = pHostStream->Read(pStream->GetRawBuffer().StartAddress(), cbRequest, &cbActualRead);
+ IfFailThrow(hr);
+ if (cbRequest != cbActualRead)
+ {
+ ThrowWin32(ERROR_READ_FAULT);
+ }
+
+ // We now have a full copy of the PDB provided from the host.
+ // This addrefs pStream, which lets it survive past the holder's scope.
+ SetInMemorySymbolStream(pStream, eSymbolFormatPDB);
+}
+#endif // FEATURE_FUSION
+
+#endif // DEBUGGING_SUPPORTED
+
+BOOL Module::NotifyDebuggerLoad(AppDomain *pDomain, DomainFile * pDomainFile, int flags, BOOL attaching)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // We don't notify the debugger about modules that don't contain any code.
+ if (!IsVisibleToDebugger())
+ return FALSE;
+
+ // Always capture metadata, even if no debugger is attached. If a debugger later attaches, it will use
+ // this data.
+ {
+ Module * pModule = pDomainFile->GetModule();
+ pModule->UpdateDynamicMetadataIfNeeded();
+ }
+
+#ifdef FEATURE_FUSION
+ // Eagerly fetch pdbs for hosted modules.
+ // This is only needed for debugging, so errors are not fatal in normal cases.
+ HRESULT hrFetchPdbs = S_OK;
+ EX_TRY
+ {
+ FetchPdbsFromHost();
+ }
+ EX_CATCH_HRESULT(hrFetchPdbs);
+#endif // FEATURE_FUSION
+
+ //
+ // Remaining work is only needed if a debugger is attached
+ //
+ if (!attaching && !pDomain->IsDebuggerAttached())
+ return FALSE;
+
+
+ BOOL result = FALSE;
+
+ if (flags & ATTACH_MODULE_LOAD)
+ {
+ g_pDebugInterface->LoadModule(this,
+ m_file->GetPath(),
+ m_file->GetPath().GetCount(),
+ GetAssembly(),
+ pDomain,
+ pDomainFile,
+ attaching);
+
+ result = TRUE;
+ }
+
+ if (flags & ATTACH_CLASS_LOAD)
+ {
+ LookupMap<PTR_MethodTable>::Iterator typeDefIter(&m_TypeDefToMethodTableMap);
+ while (typeDefIter.Next())
+ {
+ MethodTable * pMT = typeDefIter.GetElement();
+
+ if (pMT != NULL && pMT->IsRestored())
+ {
+ result = TypeHandle(pMT).NotifyDebuggerLoad(pDomain, attaching) || result;
+ }
+ }
+ }
+
+ return result;
+}
+
+void Module::NotifyDebuggerUnload(AppDomain *pDomain)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (!pDomain->IsDebuggerAttached())
+ return;
+
+ // We don't notify the debugger about modules that don't contain any code.
+ if (!IsVisibleToDebugger())
+ return;
+
+ LookupMap<PTR_MethodTable>::Iterator typeDefIter(&m_TypeDefToMethodTableMap);
+ while (typeDefIter.Next())
+ {
+ MethodTable * pMT = typeDefIter.GetElement();
+
+ if (pMT != NULL && pMT->IsRestored())
+ {
+ TypeHandle(pMT).NotifyDebuggerUnload(pDomain);
+ }
+ }
+
+ g_pDebugInterface->UnloadModule(this, pDomain);
+}
+
+#if defined(FEATURE_MIXEDMODE) && !defined(CROSSGEN_COMPILE)
+
+//======================================================================================
+// These are used to call back to the shim to get the information about
+// thunks, and to set the new targets.
+typedef mdToken STDMETHODCALLTYPE GetTokenForVTableEntry_t(HINSTANCE hInst, BYTE **ppVTEntry);
+typedef void STDMETHODCALLTYPE SetTargetForVTableEntry_t(HINSTANCE hInst, BYTE **ppVTEntry, BYTE *pTarget);
+typedef BYTE * STDMETHODCALLTYPE GetTargetForVTableEntry_t(HINSTANCE hInst, BYTE **ppVTEntry);
+
+GetTokenForVTableEntry_t *g_pGetTokenForVTableEntry = NULL;
+SetTargetForVTableEntry_t *g_pSetTargetForVTableEntry = NULL;
+GetTargetForVTableEntry_t *g_pGetTargetForVTableEntry = NULL;
+
+//======================================================================================
+void InitThunkCallbackFunctions(HINSTANCE hInstShim)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ typedef enum {
+ e_UNINITIALIZED,
+ e_INITIALIZED_SUCCESS,
+ } InitState_t;
+
+ static InitState_t s_state = e_UNINITIALIZED;
+ if (s_state == e_UNINITIALIZED) {
+ g_pGetTokenForVTableEntry = (GetTokenForVTableEntry_t *)GetProcAddress(hInstShim, "GetTokenForVTableEntry");
+ if (g_pGetTokenForVTableEntry == NULL) {
+ COMPlusThrow(kMissingMethodException, IDS_EE_MSCOREE_MISSING_ENTRYPOINT, W("GetTokenForVTableEntry"));
+ }
+ g_pSetTargetForVTableEntry = (SetTargetForVTableEntry_t *)GetProcAddress(hInstShim, "SetTargetForVTableEntry");
+ if (g_pSetTargetForVTableEntry == NULL) {
+ COMPlusThrow(kMissingMethodException, IDS_EE_MSCOREE_MISSING_ENTRYPOINT, W("SetTargetForVTableEntry"));
+ }
+ g_pGetTargetForVTableEntry = (GetTargetForVTableEntry_t *)GetProcAddress(hInstShim, "GetTargetForVTableEntry");
+ if (g_pGetTargetForVTableEntry == NULL) {
+ COMPlusThrow(kMissingMethodException, IDS_EE_MSCOREE_MISSING_ENTRYPOINT, W("GetTargetForVTableEntry"));
+ }
+ s_state = e_INITIALIZED_SUCCESS;
+ }
+ CONSISTENCY_CHECK(s_state != e_UNINITIALIZED);
+}
+
+//======================================================================================
+void InitShimHINSTANCE()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ if (g_hInstShim == NULL) {
+ g_hInstShim = WszLoadLibrary(MSCOREE_SHIM_W);
+ if (g_hInstShim == NULL) {
+ InlineSString<80> ssErrorFormat;
+ if(!ssErrorFormat.LoadResource(CCompRC::Optional, IDS_EE_MSCOREE_MISSING))
+ {
+ // Keep this in sync with the actual message
+ ssErrorFormat.Set(W("MSCOREE is not loaded."));
+ }
+ EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, ssErrorFormat.GetUnicode());
+ }
+ }
+}
+
+//======================================================================================
+HINSTANCE GetShimHINSTANCE() // dead code?
+{
+ WRAPPER_NO_CONTRACT;
+ InitShimHINSTANCE();
+ return g_hInstShim;
+}
+
+//======================================================================================
+// Fixup vtables stored in the header to contain pointers to method desc
+// prestubs rather than metadata method tokens.
+void Module::FixupVTables()
+{
+ CONTRACTL {
+ INSTANCE_CHECK;
+ STANDARD_VM_CHECK;
+ } CONTRACTL_END;
+
+
+ // If we've already fixed up, or this is not an IJW module, just return.
+ // NOTE: This relies on ILOnly files not having fixups. If this changes,
+ // we need to change this conditional.
+ if (IsIJWFixedUp() || m_file->IsILOnly() || IsIntrospectionOnly()) {
+ return;
+ }
+
+ // An EEException will be thrown if either MSCOREE or any of the required
+ // entrypoints cannot be found.
+ InitShimHINSTANCE();
+ InitThunkCallbackFunctions(g_hInstShim);
+
+ HINSTANCE hInstThis = GetFile()->GetIJWBase();
+
+ // <REVISIT_TODO>@todo: workaround!</REVISIT_TODO>
+ // If we are compiling in-process, we don't want to fixup the vtables - as it
+ // will have side effects on the other copy of the module!
+ if (SystemDomain::GetCurrentDomain()->IsPassiveDomain()) {
+ return;
+ }
+
+#ifdef FEATURE_PREJIT
+ // We delayed filling in this value until the LoadLibrary occurred
+ if (HasTls() && HasNativeImage()) {
+ CORCOMPILE_EE_INFO_TABLE *pEEInfo = GetNativeImage()->GetNativeEEInfoTable();
+ pEEInfo->rvaStaticTlsIndex = GetTlsIndex();
+ }
+#endif
+ // Get vtable fixup data
+ COUNT_T cFixupRecords;
+ IMAGE_COR_VTABLEFIXUP *pFixupTable = m_file->GetVTableFixups(&cFixupRecords);
+
+ // No records then return
+ if (cFixupRecords == 0) {
+ return;
+ }
+
+ // Now, we need to take a lock to serialize fixup.
+ PEImage::IJWFixupData *pData = PEImage::GetIJWData(m_file->GetIJWBase());
+
+ // If it's already been fixed (in some other appdomain), record the fact and return
+ if (pData->IsFixedUp()) {
+ SetIsIJWFixedUp();
+ return;
+ }
+
+ //////////////////////////////////////////////////////
+ //
+ // This is done in three stages:
+ // 1. We enumerate the types we'll need to load
+ // 2. We load the types
+ // 3. We create and install the thunks
+ //
+
+ COUNT_T cVtableThunks = 0;
+ struct MethodLoadData
+ {
+ mdToken token;
+ MethodDesc *pMD;
+ };
+ MethodLoadData *rgMethodsToLoad = NULL;
+ COUNT_T cMethodsToLoad = 0;
+
+ //
+ // Stage 1
+ //
+
+ // Each fixup entry describes a vtable, so iterate the vtables and sum their counts
+ {
+ DWORD iFixup;
+ for (iFixup = 0; iFixup < cFixupRecords; iFixup++)
+ cVtableThunks += pFixupTable[iFixup].Count;
+ }
+
+ Thread *pThread = GetThread();
+ StackingAllocator *pAlloc = &pThread->m_MarshalAlloc;
+ CheckPointHolder cph(pAlloc->GetCheckpoint());
+
+ // Allocate the working array of tokens.
+ cMethodsToLoad = cVtableThunks;
+
+ rgMethodsToLoad = new (pAlloc) MethodLoadData[cMethodsToLoad];
+ memset(rgMethodsToLoad, 0, cMethodsToLoad*sizeof(MethodLoadData));
+
+ // Now take the IJW module lock and get all the tokens
+ {
+ // Take the lock
+ CrstHolder lockHolder(pData->GetLock());
+
+ // If someone has beaten us, just return
+ if (pData->IsFixedUp())
+ {
+ SetIsIJWFixedUp();
+ return;
+ }
+
+ COUNT_T iCurMethod = 0;
+
+ if (cFixupRecords != 0)
+ {
+ for (COUNT_T iFixup = 0; iFixup < cFixupRecords; iFixup++)
+ {
+ // Vtables can be 32 or 64 bit.
+ if ((pFixupTable[iFixup].Type == (COR_VTABLE_PTRSIZED)) ||
+ (pFixupTable[iFixup].Type == (COR_VTABLE_PTRSIZED|COR_VTABLE_FROM_UNMANAGED)) ||
+ (pFixupTable[iFixup].Type == (COR_VTABLE_PTRSIZED|COR_VTABLE_FROM_UNMANAGED_RETAIN_APPDOMAIN)))
+ {
+ const BYTE** pPointers = (const BYTE **) m_file->GetVTable(pFixupTable[iFixup].RVA);
+ for (int iMethod = 0; iMethod < pFixupTable[iFixup].Count; iMethod++)
+ {
+ if (pData->IsMethodFixedUp(iFixup,iMethod))
+ continue;
+ mdToken mdTok = (*g_pGetTokenForVTableEntry)(hInstThis, (BYTE **)(pPointers + iMethod));
+ CONSISTENCY_CHECK(mdTok != mdTokenNil);
+ rgMethodsToLoad[iCurMethod++].token = mdTok;
+ }
+ }
+ }
+ }
+
+ }
+
+ //
+ // Stage 2 - Load the types
+ //
+
+ {
+ for (COUNT_T iCurMethod = 0; iCurMethod < cMethodsToLoad; iCurMethod++)
+ {
+ mdToken curTok = rgMethodsToLoad[iCurMethod].token;
+ if(!GetMDImport()->IsValidToken(curTok))
+ {
+ _ASSERTE(!"Invalid token in v-table fix-up table");
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+
+
+ // Find the method desc
+ MethodDesc *pMD;
+
+ {
+ CONTRACT_VIOLATION(LoadsTypeViolation);
+ pMD = FindMethodThrowing(curTok);
+ }
+
+ CONSISTENCY_CHECK(CheckPointer(pMD));
+
+ rgMethodsToLoad[iCurMethod].pMD = pMD;
+ }
+ }
+
+ //
+ // Stage 3 - Create the thunk data
+ //
+ {
+ // Take the lock
+ CrstHolder lockHolder(pData->GetLock());
+
+ // If someone has beaten us, just return
+ if (pData->IsFixedUp())
+ {
+ SetIsIJWFixedUp();
+ return;
+ }
+
+ // This is the app domain which all of our U->M thunks for this module will have
+ // affinity with. Note that if the module is shared between multiple domains, all thunks will marshal back
+ // to the original domain, so some of the thunks may cause a surprising domain switch to occur.
+ // (And furthermore note that if the original domain is unloaded, all the thunks will simply throw an
+ // exception.)
+ //
+ // (The essential problem is that these thunks are shared via the global process address space
+ // rather than per domain, thus there is no context to figure out our domain from. We could
+ // use the current thread's domain, but that is effectively undefined in unmanaged space.)
+ //
+ // The bottom line is that the IJW model just doesn't fit with multiple app domain design very well, so
+ // better to have well defined limitations than flaky behavior.
+ //
+ //
+
+ AppDomain *pAppDomain = GetAppDomain();
+
+ // Used to index into rgMethodsToLoad
+ COUNT_T iCurMethod = 0;
+
+
+ // Each fixup entry describes a vtable (each slot contains a metadata token
+ // at this stage).
+ DWORD iFixup;
+ for (iFixup = 0; iFixup < cFixupRecords; iFixup++)
+ cVtableThunks += pFixupTable[iFixup].Count;
+
+ DWORD dwIndex=0;
+ DWORD dwThunkIndex = 0;
+
+ // Now to fill in the thunk table.
+ for (iFixup = 0; iFixup < cFixupRecords; iFixup++)
+ {
+ const BYTE** pPointers = (const BYTE **)
+ m_file->GetVTable(pFixupTable[iFixup].RVA);
+
+ // Vtables can be 32 or 64 bit.
+ if (pFixupTable[iFixup].Type == COR_VTABLE_PTRSIZED)
+ {
+ for (int iMethod = 0; iMethod < pFixupTable[iFixup].Count; iMethod++)
+ {
+ if (pData->IsMethodFixedUp(iFixup,iMethod))
+ continue;
+
+ mdToken mdTok = rgMethodsToLoad[iCurMethod].token;
+ MethodDesc *pMD = rgMethodsToLoad[iCurMethod].pMD;
+ iCurMethod++;
+
+#ifdef _DEBUG
+ if (pMD->IsNDirect())
+ {
+ LOG((LF_IJW, LL_INFO10, "[0x%lx] <-- PINV thunk for \"%s\" (target = 0x%lx)\n",
+ (size_t)&(pPointers[iMethod]), pMD->m_pszDebugMethodName,
+ (size_t) (((NDirectMethodDesc*)pMD)->GetNDirectTarget())));
+ }
+#endif // _DEBUG
+
+ CONSISTENCY_CHECK(dwThunkIndex < cVtableThunks);
+
+ // Point the local vtable slot to the thunk we created
+ (*g_pSetTargetForVTableEntry)(hInstThis, (BYTE **)&pPointers[iMethod], (BYTE *)pMD->GetMultiCallableAddrOfCode());
+
+ pData->MarkMethodFixedUp(iFixup,iMethod);
+
+ dwThunkIndex++;
+ }
+
+ }
+ else if (pFixupTable[iFixup].Type == (COR_VTABLE_PTRSIZED|COR_VTABLE_FROM_UNMANAGED))
+ {
+
+ for (int iMethod = 0; iMethod < pFixupTable[iFixup].Count; iMethod++)
+ {
+ if (pData->IsMethodFixedUp(iFixup,iMethod))
+ continue;
+
+ mdToken mdTok = rgMethodsToLoad[iCurMethod].token;
+ MethodDesc *pMD = rgMethodsToLoad[iCurMethod].pMD;
+ iCurMethod++;
+ LOG((LF_IJW, LL_INFO10, "[0x%p] <-- VTable thunk for \"%s\" (pMD = 0x%p)\n",
+ (UINT_PTR)&(pPointers[iMethod]), pMD->m_pszDebugMethodName, pMD));
+
+ UMEntryThunk *pUMEntryThunk = (UMEntryThunk*)(void*)(GetDllThunkHeap()->AllocAlignedMem(sizeof(UMEntryThunk), CODE_SIZE_ALIGN)); // UMEntryThunk contains code
+ FillMemory(pUMEntryThunk, sizeof(*pUMEntryThunk), 0);
+
+ UMThunkMarshInfo *pUMThunkMarshInfo = (UMThunkMarshInfo*)(void*)(GetThunkHeap()->AllocAlignedMem(sizeof(UMThunkMarshInfo), CODE_SIZE_ALIGN));
+ FillMemory(pUMThunkMarshInfo, sizeof(*pUMThunkMarshInfo), 0);
+
+ pUMThunkMarshInfo->LoadTimeInit(pMD);
+ pUMEntryThunk->LoadTimeInit(NULL, NULL, pUMThunkMarshInfo, pMD, pAppDomain->GetId());
+ (*g_pSetTargetForVTableEntry)(hInstThis, (BYTE **)&pPointers[iMethod], (BYTE *)pUMEntryThunk->GetCode());
+
+ pData->MarkMethodFixedUp(iFixup,iMethod);
+ }
+ }
+ else if (pFixupTable[iFixup].Type == (COR_VTABLE_PTRSIZED|COR_VTABLE_FROM_UNMANAGED_RETAIN_APPDOMAIN))
+ {
+
+ for (int iMethod = 0; iMethod < pFixupTable[iFixup].Count; iMethod++)
+ {
+ if (pData->IsMethodFixedUp(iFixup,iMethod))
+ continue;
+
+ mdToken mdTok = rgMethodsToLoad[iCurMethod].token;
+ iCurMethod++;
+
+ IJWNOADThunk* pThunkLocal = new(GetDllThunkHeap()->AllocAlignedMem(sizeof(IJWNOADThunk), CODE_SIZE_ALIGN)) IJWNOADThunk(GetFile()->GetIJWBase(),dwIndex++,mdTok);
+ (*g_pSetTargetForVTableEntry)(hInstThis, (BYTE **)&pPointers[iMethod], (BYTE *)pThunkLocal->GetCode());
+
+ pData->MarkMethodFixedUp(iFixup,iMethod);
+ }
+ }
+ else if ((pFixupTable[iFixup].Type & COR_VTABLE_NOT_PTRSIZED) == COR_VTABLE_NOT_PTRSIZED)
+ {
+ // fixup type doesn't match the platform
+ THROW_BAD_FORMAT(BFA_FIXUP_WRONG_PLATFORM, this);
+ }
+ else
+ {
+ _ASSERTE(!"Unknown vtable fixup type");
+ }
+ }
+
+
+ if(!GetAssembly()->IsDomainNeutral())
+ CreateDomainThunks();
+
+ SetDomainIdOfIJWFixups(pAppDomain->GetId());
+#ifdef FEATURE_PREJIT
+ if (HasNativeImage()) {
+ CORCOMPILE_EE_INFO_TABLE *pEEInfo = GetNativeImage()->GetNativeEEInfoTable();
+
+ if (pEEInfo->nativeEntryPointStart != 0) {
+ PTR_PEImageLayout pIJWLayout = m_file->GetLoadedIL();
+ SIZE_T base = (SIZE_T)pIJWLayout->GetBase();
+
+ _ASSERTE(pIJWLayout->CheckRva((RVA)pEEInfo->nativeEntryPointStart));
+ _ASSERTE(pIJWLayout->CheckRva((RVA)pEEInfo->nativeEntryPointEnd));
+
+ pEEInfo->nativeEntryPointStart += base;
+ pEEInfo->nativeEntryPointEnd += base;
+ }
+ else {
+ _ASSERTE(pEEInfo->nativeEntryPointEnd == 0);
+ }
+ }
+#endif
+ // Indicate that this module has been fixed before releasing the lock
+ pData->SetIsFixedUp(); // On the data
+ SetIsIJWFixedUp(); // On the module
+ } // End of Stage 3
+}
+
+// Self-initializing accessor for m_pThunkHeap
+LoaderHeap *Module::GetDllThunkHeap()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ return PEImage::GetDllThunkHeap(GetFile()->GetIJWBase());
+
+}
+LoaderHeap *Module::GetThunkHeap()
+{
+ CONTRACT (LoaderHeap *)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END
+
+ if (!m_pThunkHeap)
+ {
+ size_t * pPrivatePCLBytes = NULL;
+ size_t * pGlobalPCLBytes = NULL;
+
+#ifdef PROFILING_SUPPORTED
+ pPrivatePCLBytes = &(GetPerfCounters().m_Loading.cbLoaderHeapSize);
+#endif
+
+ LoaderHeap *pNewHeap = new LoaderHeap(VIRTUAL_ALLOC_RESERVE_GRANULARITY, // DWORD dwReserveBlockSize
+ 0, // DWORD dwCommitBlockSize
+ pPrivatePCLBytes,
+ ThunkHeapStubManager::g_pManager->GetRangeList(),
+ TRUE); // BOOL fMakeExecutable
+
+ if (FastInterlockCompareExchangePointer(&m_pThunkHeap, pNewHeap, 0) != 0)
+ {
+ delete pNewHeap;
+ }
+ }
+
+ RETURN m_pThunkHeap;
+}
+
+void Module::SetADThunkTable(UMEntryThunk* pTable)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ GetDomainLocalModule()->SetADThunkTable(pTable);
+}
+
+UMEntryThunk* Module::GetADThunkTable()
+{
+ CONTRACT(UMEntryThunk*)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END
+
+ DomainLocalModule* pMod=GetDomainLocalModule();
+ _ASSERTE(pMod);
+ UMEntryThunk * pADThunkTable = pMod->GetADThunkTable();
+ if (pADThunkTable == NULL)
+ {
+ CreateDomainThunks();
+ pADThunkTable = pMod->GetADThunkTable();
+ _ASSERTE(pADThunkTable != NULL);
+ }
+
+ RETURN (UMEntryThunk*)pADThunkTable;
+};
+
+void Module::CreateDomainThunks()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ AppDomain *pAppDomain = GetAppDomain();
+ if(!pAppDomain)
+ {
+ _ASSERTE(!"No appdomain");
+ return;
+ }
+
+ UINT32 cFixupRecords;
+ IMAGE_COR_VTABLEFIXUP *pFixupTable = m_file->GetVTableFixups(&cFixupRecords);
+
+ DWORD iFixup;
+ DWORD cVtableThunks=0;
+ for (iFixup = 0; iFixup < cFixupRecords; iFixup++)
+ {
+ if (pFixupTable[iFixup].Type==(COR_VTABLE_FROM_UNMANAGED_RETAIN_APPDOMAIN|COR_VTABLE_PTRSIZED))
+ {
+ cVtableThunks += pFixupTable[iFixup].Count;
+ }
+ }
+
+ if (cVtableThunks==0)
+ {
+ return;
+ }
+
+ AllocMemTracker amTracker;
+ AllocMemTracker *pamTracker = &amTracker;
+
+ UMEntryThunk* pTable=((UMEntryThunk*)pamTracker->Track(pAppDomain->GetStubHeap()->AllocAlignedMem(sizeof(UMEntryThunk)*cVtableThunks, CODE_SIZE_ALIGN)));
+ DWORD dwCurrIndex=0;
+ for (iFixup = 0; iFixup < cFixupRecords; iFixup++)
+ {
+ if (pFixupTable[iFixup].Type == (COR_VTABLE_FROM_UNMANAGED_RETAIN_APPDOMAIN|COR_VTABLE_PTRSIZED))
+ {
+ const BYTE **pPointers = (const BYTE **) m_file->GetVTable(pFixupTable[iFixup].RVA);
+ for (int iMethod = 0; iMethod < pFixupTable[iFixup].Count; iMethod++)
+ {
+ PCODE pCode = (PCODE)
+ (*g_pGetTargetForVTableEntry)((HINSTANCE)GetFile()->GetIJWBase(), (BYTE **)&pPointers[iMethod]);
+ IJWNOADThunk* pThnk = IJWNOADThunk::FromCode(pCode);
+ mdToken tok=pThnk->GetToken(); //!!
+ if(!GetMDImport()->IsValidToken(tok))
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT, BFA_INVALID_TOKEN);
+ return;
+ }
+
+ MethodDesc *pMD = FindMethodThrowing(tok);
+
+ // @TODO: Check for out of memory
+ UMThunkMarshInfo *pUMThunkMarshInfo = (UMThunkMarshInfo*)pamTracker->Track(pAppDomain->GetStubHeap()->AllocAlignedMem(sizeof(UMThunkMarshInfo), CODE_SIZE_ALIGN));
+ _ASSERTE(pUMThunkMarshInfo != NULL);
+
+ pUMThunkMarshInfo->LoadTimeInit(pMD);
+ pTable[dwCurrIndex].LoadTimeInit(NULL, NULL, pUMThunkMarshInfo, pMD, pAppDomain->GetId());
+
+ // If we're setting up a domain that is cached, update the code pointer in the cache
+ if (pThnk->IsCachedAppDomainID(pAppDomain->GetId()))
+ pThnk->SetCachedInfo(pAppDomain->GetId(), (LPVOID)GetEEFuncEntryPoint((LPVOID)pTable[dwCurrIndex].GetCode()));
+
+ dwCurrIndex++;
+ }
+ }
+ }
+
+ pamTracker->SuppressRelease();
+ SetADThunkTable(pTable);
+}
+
+LPVOID Module::GetUMThunk(LPVOID pManagedIp, PCCOR_SIGNATURE pSig, ULONG cSig)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ return GetDomainFile()->GetUMThunk(pManagedIp, pSig, cSig);
+}
+
+
+void *Module::GetMUThunk(LPVOID pUnmanagedIp, PCCOR_SIGNATURE pSig, ULONG cSig)
+{
+ CONTRACT (void*)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END
+
+ if (m_pMUThunkHash == NULL)
+ {
+ MUThunkHash *pMUThunkHash = new MUThunkHash(this);
+ if (FastInterlockCompareExchangePointer(&m_pMUThunkHash, pMUThunkHash, NULL) != NULL)
+ delete pMUThunkHash;
+ }
+ RETURN m_pMUThunkHash->GetMUThunk(pUnmanagedIp, pSig, cSig);
+}
+
+#endif //FEATURE_MIXEDMODE && !CROSSGEN_COMPILE
+
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+
+// These helpers are used in Module::ExpandAll
+// to avoid EX_TRY/EX_CATCH in a loop (uses _alloca and guzzles stack)
+
+static TypeHandle LoadTypeDefOrRefHelper(DataImage * image, Module * pModule, mdToken tk)
+{
+ STANDARD_VM_CONTRACT;
+
+ TypeHandle th;
+
+ EX_TRY
+ {
+ th = ClassLoader::LoadTypeDefOrRefThrowing(pModule, tk,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef);
+ }
+ EX_CATCH
+ {
+ image->GetPreloader()->Error(tk, GET_EXCEPTION());
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ return th;
+}
+
+static TypeHandle LoadTypeSpecHelper(DataImage * image, Module * pModule, mdToken tk,
+ PCCOR_SIGNATURE pSig, ULONG cSig)
+{
+ STANDARD_VM_CONTRACT;
+
+ TypeHandle th;
+
+ EX_TRY
+ {
+ SigPointer p(pSig, cSig);
+ SigTypeContext typeContext;
+ th = p.GetTypeHandleThrowing(pModule, &typeContext);
+ }
+ EX_CATCH
+ {
+ image->GetPreloader()->Error(tk, GET_EXCEPTION());
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ return th;
+}
+
+static TypeHandle LoadGenericInstantiationHelper(DataImage * image, Module * pModule, mdToken tk, Instantiation inst)
+{
+ STANDARD_VM_CONTRACT;
+
+ TypeHandle th;
+
+ EX_TRY
+ {
+ th = ClassLoader::LoadGenericInstantiationThrowing(pModule, tk, inst);
+ }
+ EX_CATCH
+ {
+ image->GetPreloader()->Error(tk, GET_EXCEPTION());
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ return th;
+}
+
+static void GetDescFromMemberRefHelper(DataImage * image, Module * pModule, mdToken tk)
+{
+ STANDARD_VM_CONTRACT;
+
+ EX_TRY
+ {
+ MethodDesc * pMD = NULL;
+ FieldDesc * pFD = NULL;
+ TypeHandle th;
+
+ // Note: using an empty type context is now OK, because even though the token is a MemberRef
+ // neither the token nor its parent will directly refer to type variables.
+ // @TODO GENERICS: want to allow loads of generic methods here but need strict metadata checks on parent
+ SigTypeContext typeContext;
+ MemberLoader::GetDescFromMemberRef(pModule, tk, &pMD, &pFD,
+ &typeContext,
+ FALSE /* strict metadata checks */, &th);
+ }
+ EX_CATCH
+ {
+ image->GetPreloader()->Error(tk, GET_EXCEPTION());
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+}
+
+void Module::SetProfileData(CorProfileData * profileData)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_pProfileData = profileData;
+}
+
+CorProfileData * Module::GetProfileData()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pProfileData;
+}
+
+mdTypeDef Module::LookupIbcTypeToken(Module * pExternalModule, mdToken ibcToken, SString* optionalFullNameOut)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ _ASSERTE(TypeFromToken(ibcToken) == ibcExternalType);
+
+ CorProfileData * profileData = this->GetProfileData();
+
+ CORBBTPROF_BLOB_TYPE_DEF_ENTRY * blobTypeDefEntry;
+ blobTypeDefEntry = profileData->GetBlobExternalTypeDef(ibcToken);
+
+ if (blobTypeDefEntry == NULL)
+ return mdTypeDefNil;
+
+ IbcNameHandle ibcName;
+ ibcName.szName = &blobTypeDefEntry->name[0];
+ ibcName.tkIbcNameSpace = blobTypeDefEntry->nameSpaceToken;
+ ibcName.tkIbcNestedClass = blobTypeDefEntry->nestedClassToken;
+ ibcName.szNamespace = NULL;
+ ibcName.tkEnclosingClass = mdTypeDefNil;
+
+ if (!IsNilToken(blobTypeDefEntry->nameSpaceToken))
+ {
+ _ASSERTE(IsNilToken(blobTypeDefEntry->nestedClassToken));
+
+ idExternalNamespace nameSpaceToken = blobTypeDefEntry->nameSpaceToken;
+ _ASSERTE(TypeFromToken(nameSpaceToken) == ibcExternalNamespace);
+
+ CORBBTPROF_BLOB_NAMESPACE_DEF_ENTRY * blobNamespaceDefEntry;
+ blobNamespaceDefEntry = profileData->GetBlobExternalNamespaceDef(nameSpaceToken);
+
+ if (blobNamespaceDefEntry == NULL)
+ return mdTypeDefNil;
+
+ ibcName.szNamespace = &blobNamespaceDefEntry->name[0];
+
+ if (optionalFullNameOut != NULL)
+ {
+ optionalFullNameOut->Append(W("["));
+ optionalFullNameOut->AppendUTF8(pExternalModule->GetSimpleName());
+ optionalFullNameOut->Append(W("]"));
+
+ if ((ibcName.szNamespace != NULL) && ((*ibcName.szNamespace) != W('\0')))
+ {
+ optionalFullNameOut->AppendUTF8(ibcName.szNamespace);
+ optionalFullNameOut->Append(W("."));
+ }
+ optionalFullNameOut->AppendUTF8(ibcName.szName);
+ }
+ }
+ else if (!IsNilToken(blobTypeDefEntry->nestedClassToken))
+ {
+ idExternalType nestedClassToken = blobTypeDefEntry->nestedClassToken;
+ _ASSERTE(TypeFromToken(nestedClassToken) == ibcExternalType);
+
+ ibcName.tkEnclosingClass = LookupIbcTypeToken(pExternalModule, nestedClassToken, optionalFullNameOut);
+
+ if (optionalFullNameOut != NULL)
+ {
+ optionalFullNameOut->Append(W("+"));
+ optionalFullNameOut->AppendUTF8(ibcName.szName);
+ }
+
+ if (IsNilToken(ibcName.tkEnclosingClass))
+ return mdTypeDefNil;
+ }
+
+ //*****************************************
+ // look up function for TypeDef
+ //*****************************************
+ // STDMETHOD(FindTypeDef)(
+ // LPCSTR szNamespace, // [IN] Namespace for the TypeDef.
+ // LPCSTR szName, // [IN] Name of the TypeDef.
+ // mdToken tkEnclosingClass, // [IN] TypeRef/TypeDef Token for the enclosing class.
+ // mdTypeDef *ptypedef) PURE; // [IN] return typedef
+
+ IMDInternalImport *pInternalImport = pExternalModule->GetMDImport();
+
+ mdTypeDef mdResult = mdTypeDefNil;
+
+ HRESULT hr = pInternalImport->FindTypeDef(ibcName.szNamespace, ibcName.szName, ibcName.tkEnclosingClass, &mdResult);
+
+ if(FAILED(hr))
+ mdResult = mdTypeDefNil;
+
+ return mdResult;
+}
+
+struct IbcCompareContext
+{
+ Module * pModule;
+ TypeHandle enclosingType;
+ DWORD cMatch; // count of methods that had a matching method name
+ bool useBestSig; // if true we should use the BestSig when we don't find an exact match
+ PCCOR_SIGNATURE pvBestSig; // Current Best matching signature
+ DWORD cbBestSig; //
+};
+
+//---------------------------------------------------------------------------------------
+//
+// Compare two signatures from the same scope.
+//
+BOOL
+CompareIbcMethodSigs(
+ PCCOR_SIGNATURE pvCandidateSig, // Candidate signature
+ DWORD cbCandidateSig, //
+ PCCOR_SIGNATURE pvIbcSignature, // The Ibc signature that we want to match
+ DWORD cbIbcSignature, //
+ void * pvContext) // void pointer to IbcCompareContext
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ //
+ // Same pointer return TRUE
+ //
+ if (pvCandidateSig == pvIbcSignature)
+ {
+ _ASSERTE(cbCandidateSig == cbIbcSignature);
+ return TRUE;
+ }
+
+ //
+ // Check for exact match
+ //
+ if (cbCandidateSig == cbIbcSignature)
+ {
+ if (memcmp(pvCandidateSig, pvIbcSignature, cbIbcSignature) == 0)
+ {
+ return TRUE;
+ }
+ }
+
+ IbcCompareContext * context = (IbcCompareContext *) pvContext;
+
+ //
+ // No exact match, we will return FALSE and keep looking at other matching method names
+ //
+ // However since the method name was an exact match we will remember this signature,
+ // so that if it is the best match we can look it up again and return it's methodDef token
+ //
+ if (context->cMatch == 0)
+ {
+ context->pvBestSig = pvCandidateSig;
+ context->cbBestSig = cbCandidateSig;
+ context->cMatch = 1;
+ context->useBestSig = true;
+ }
+ else
+ {
+ context->cMatch++;
+
+ SigTypeContext emptyTypeContext;
+ SigTypeContext ibcTypeContext = SigTypeContext(context->enclosingType);
+ MetaSig ibcSignature (pvIbcSignature, cbIbcSignature, context->pModule, &ibcTypeContext);
+
+ MetaSig candidateSig (pvCandidateSig, cbCandidateSig, context->pModule, &emptyTypeContext);
+ MetaSig bestSignature(context->pvBestSig, context->cbBestSig, context->pModule, &emptyTypeContext);
+ //
+ // Is candidateSig a better match than bestSignature?
+ //
+ // First check the calling convention
+ //
+ if (candidateSig.GetCallingConventionInfo() != bestSignature.GetCallingConventionInfo())
+ {
+ if (bestSignature.GetCallingConventionInfo() == ibcSignature.GetCallingConventionInfo())
+ goto LEAVE_BEST;
+ if (candidateSig.GetCallingConventionInfo() == ibcSignature.GetCallingConventionInfo())
+ goto SELECT_CANDIDATE;
+ //
+ // Neither one is a match
+ //
+ goto USE_NEITHER;
+ }
+
+ //
+ // Next check the number of arguments
+ //
+ if (candidateSig.NumFixedArgs() != bestSignature.NumFixedArgs())
+ {
+ //
+ // Does one of the two have the same number of args?
+ //
+ if (bestSignature.NumFixedArgs() == ibcSignature.NumFixedArgs())
+ goto LEAVE_BEST;
+ if (candidateSig.NumFixedArgs() == ibcSignature.NumFixedArgs())
+ goto SELECT_CANDIDATE;
+ //
+ // Neither one is a match
+ //
+ goto USE_NEITHER;
+ }
+ else if (candidateSig.NumFixedArgs() != ibcSignature.NumFixedArgs())
+ {
+ //
+ // Neither one is a match
+ //
+ goto USE_NEITHER;
+ }
+
+ CorElementType etIbc;
+ CorElementType etCandidate;
+ CorElementType etBest;
+ //
+ // Next get the return element type
+ //
+ // etIbc = ibcSignature.GetReturnProps().PeekElemTypeClosed(ibcSignature.GetSigTypeContext());
+ IfFailThrow(ibcSignature.GetReturnProps().PeekElemType(&etIbc));
+ IfFailThrow(candidateSig.GetReturnProps().PeekElemType(&etCandidate));
+ IfFailThrow(bestSignature.GetReturnProps().PeekElemType(&etBest));
+ //
+ // Do they have different return types?
+ //
+ if (etCandidate != etBest)
+ {
+ if (etBest == etIbc)
+ goto LEAVE_BEST;
+
+ if (etCandidate == etIbc)
+ goto SELECT_CANDIDATE;
+ }
+
+ //
+ // Now iterate over the method argument types to see which signature
+ // is the better match
+ //
+ for (DWORD i = 0; (i < ibcSignature.NumFixedArgs()); i++)
+ {
+ ibcSignature.SkipArg();
+ IfFailThrow(ibcSignature.GetArgProps().PeekElemType(&etIbc));
+
+ candidateSig.SkipArg();
+ IfFailThrow(candidateSig.GetArgProps().PeekElemType(&etCandidate));
+
+ bestSignature.SkipArg();
+ IfFailThrow(bestSignature.GetArgProps().PeekElemType(&etBest));
+
+ //
+ // Do they have different argument types?
+ //
+ if (etCandidate != etBest)
+ {
+ if (etBest == etIbc)
+ goto LEAVE_BEST;
+
+ if (etCandidate == etIbc)
+ goto SELECT_CANDIDATE;
+ }
+ }
+ // When we fall though to here we did not find any differences
+ // that we could base a choice on
+ //
+ context->useBestSig = true;
+
+SELECT_CANDIDATE:
+ context->pvBestSig = pvCandidateSig;
+ context->cbBestSig = cbCandidateSig;
+ context->useBestSig = true;
+ return FALSE;
+
+USE_NEITHER:
+ context->useBestSig = false;
+ return FALSE;
+ }
+
+LEAVE_BEST:
+ return FALSE;
+} // CompareIbcMethodSigs
+
+mdMethodDef Module::LookupIbcMethodToken(TypeHandle enclosingType, mdToken ibcToken, SString* optionalFullNameOut)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ _ASSERTE(TypeFromToken(ibcToken) == ibcExternalMethod);
+
+ CorProfileData * profileData = this->GetProfileData();
+
+ CORBBTPROF_BLOB_METHOD_DEF_ENTRY * blobMethodDefEntry;
+ blobMethodDefEntry = profileData->GetBlobExternalMethodDef(ibcToken);
+
+ if (blobMethodDefEntry == NULL)
+ return mdMethodDefNil;
+
+ idExternalType signatureToken = blobMethodDefEntry->signatureToken;
+ _ASSERTE(!IsNilToken(signatureToken));
+ _ASSERTE(TypeFromToken(signatureToken) == ibcExternalSignature);
+
+ CORBBTPROF_BLOB_SIGNATURE_DEF_ENTRY * blobSignatureDefEntry;
+ blobSignatureDefEntry = profileData->GetBlobExternalSignatureDef(signatureToken);
+
+ if (blobSignatureDefEntry == NULL)
+ return mdMethodDefNil;
+
+ IbcNameHandle ibcName;
+ ibcName.szName = &blobMethodDefEntry->name[0];
+ ibcName.tkIbcNestedClass = blobMethodDefEntry->nestedClassToken;
+ ibcName.tkIbcNameSpace = idExternalNamespaceNil;
+ ibcName.szNamespace = NULL;
+ ibcName.tkEnclosingClass = mdTypeDefNil;
+
+ Module * pExternalModule = enclosingType.GetModule();
+ PCCOR_SIGNATURE pvSig = NULL;
+ ULONG cbSig = 0;
+
+ _ASSERTE(!IsNilToken(ibcName.tkIbcNestedClass));
+ _ASSERTE(TypeFromToken(ibcName.tkIbcNestedClass) == ibcExternalType);
+
+ ibcName.tkEnclosingClass = LookupIbcTypeToken(pExternalModule, ibcName.tkIbcNestedClass, optionalFullNameOut);
+
+ if (IsNilToken(ibcName.tkEnclosingClass))
+ THROW_BAD_FORMAT(BFA_MISSING_IBC_EXTERNAL_TYPE, this);
+
+ if (optionalFullNameOut != NULL)
+ {
+ optionalFullNameOut->Append(W("."));
+ optionalFullNameOut->AppendUTF8(ibcName.szName); // MethodName
+ optionalFullNameOut->Append(W("()"));
+ }
+
+ pvSig = blobSignatureDefEntry->sig;
+ cbSig = blobSignatureDefEntry->cSig;
+
+ //*****************************************
+ // look up functions for TypeDef
+ //*****************************************
+ // STDMETHOD(FindMethodDefUsingCompare)(
+ // mdTypeDef classdef, // [IN] given typedef
+ // LPCSTR szName, // [IN] member name
+ // PCCOR_SIGNATURE pvSigBlob, // [IN] point to a blob value of CLR signature
+ // ULONG cbSigBlob, // [IN] count of bytes in the signature blob
+ // PSIGCOMPARE pSignatureCompare, // [IN] Routine to compare signatures
+ // void* pSignatureArgs, // [IN] Additional info to supply the compare function
+ // mdMethodDef *pmd) PURE; // [OUT] matching memberdef
+ //
+
+ IMDInternalImport * pInternalImport = pExternalModule->GetMDImport();
+
+ IbcCompareContext context;
+ memset(&context, 0, sizeof(IbcCompareContext));
+ context.pModule = this;
+ context.enclosingType = enclosingType;
+ context.cMatch = 0;
+ context.useBestSig = false;
+
+ mdMethodDef mdResult = mdMethodDefNil;
+ HRESULT hr = pInternalImport->FindMethodDefUsingCompare(ibcName.tkEnclosingClass, ibcName.szName,
+ pvSig, cbSig,
+ CompareIbcMethodSigs, (void *) &context,
+ &mdResult);
+ if (SUCCEEDED(hr))
+ {
+ _ASSERTE(mdResult != mdMethodDefNil);
+ }
+ else if (context.useBestSig)
+ {
+ hr = pInternalImport->FindMethodDefUsingCompare(ibcName.tkEnclosingClass, ibcName.szName,
+ context.pvBestSig, context.cbBestSig,
+ CompareIbcMethodSigs, (void *) &context,
+ &mdResult);
+ _ASSERTE(SUCCEEDED(hr));
+ _ASSERTE(mdResult != mdMethodDefNil);
+ }
+ else
+ {
+ mdResult = mdMethodDefNil;
+ }
+
+ return mdResult;
+}
+
+SString * Module::IBCErrorNameString()
+{
+ CONTRACT(SString *)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ if (m_pIBCErrorNameString == NULL)
+ {
+ m_pIBCErrorNameString = new SString();
+ }
+
+ RETURN m_pIBCErrorNameString;
+}
+
+void Module::IBCTypeLoadFailed(CORBBTPROF_BLOB_PARAM_SIG_ENTRY *pBlobSigEntry,
+ SString& exceptionMessage, SString* typeNameError)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pBlobSigEntry));
+ }
+ CONTRACTL_END
+
+ //
+ // Print an error message for the type load failure
+ //
+ StackSString msg(W("Failed to load type token "));
+ SString typeName;
+
+ char buff[16];
+ sprintf_s(buff, COUNTOF(buff), "%08x", pBlobSigEntry->blob.token);
+ StackSString szToken(SString::Ascii, &buff[0]);
+ msg += szToken;
+
+ if (!exceptionMessage.IsEmpty())
+ {
+ if ((typeNameError != NULL) && !typeNameError->IsEmpty())
+ {
+ msg += W(" for the profile data in ");
+ msg.Append(exceptionMessage);
+ msg += W(".");
+
+ msg += W(" The type was ");
+ msg.Append(*typeNameError);
+ msg += W(".");
+ }
+ else
+ {
+ msg += W(" from profile data. The error is ");
+ msg.Append(exceptionMessage);
+ }
+ }
+ msg += W("\n");
+
+ GetSvcLogger()->Log(msg, LogLevel_Info);
+}
+
+void Module::IBCMethodLoadFailed(CORBBTPROF_BLOB_PARAM_SIG_ENTRY *pBlobSigEntry,
+ SString& exceptionMessage, SString* methodNameError)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pBlobSigEntry));
+ }
+ CONTRACTL_END
+
+ //
+ // Print an error message for the type load failure
+ //
+ StackSString msg(W("Failed to load method token "));
+
+ char buff[16];
+ sprintf_s(buff, COUNTOF(buff), "%08x", pBlobSigEntry->blob.token);
+ StackSString szToken(SString::Ascii, &buff[0]);
+ msg += szToken;
+
+ if (!exceptionMessage.IsEmpty())
+ {
+ if ((methodNameError != NULL) && !methodNameError->IsEmpty())
+ {
+ msg += W(" for the profile data in ");
+ msg.Append(exceptionMessage);
+ msg += W(".");
+
+ msg += W(" The method was ");
+ msg.Append(*methodNameError);
+ msg += W(".\n");
+ }
+ else
+ {
+ msg += W(" from profile data. The error is ");
+ msg.Append(exceptionMessage);
+ }
+ }
+ msg += W("\n");
+
+ GetSvcLogger()->Log(msg, LogLevel_Info);
+}
+
+TypeHandle Module::LoadIBCTypeHelper(CORBBTPROF_BLOB_PARAM_SIG_ENTRY *pBlobSigEntry)
+{
+ CONTRACT(TypeHandle)
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pBlobSigEntry));
+ }
+ CONTRACT_END
+
+ TypeHandle loadedType;
+
+ PCCOR_SIGNATURE pSig = pBlobSigEntry->sig;
+ ULONG cSig = pBlobSigEntry->cSig;
+
+ SigPointer p(pSig, cSig);
+
+ ZapSig::Context zapSigContext(this, (void *)this, ZapSig::IbcTokens);
+ ZapSig::Context * pZapSigContext = &zapSigContext;
+
+ EX_TRY
+ {
+ IBCErrorNameString()->Clear();
+
+ // This is what ZapSig::FindTypeHandleFromSignature does...
+ //
+ SigTypeContext typeContext; // empty type context
+
+ loadedType = p.GetTypeHandleThrowing( this,
+ &typeContext,
+ ClassLoader::LoadTypes,
+ CLASS_LOADED,
+ FALSE,
+ NULL,
+ pZapSigContext);
+#if defined(_DEBUG) && !defined(DACCESS_COMPILE)
+ g_pConfig->DebugCheckAndForceIBCFailure(EEConfig::CallSite_1);
+#endif
+ }
+ EX_CATCH
+ {
+ CONTRACT_VIOLATION(ThrowsViolation);
+
+ StackSString exceptionMessage;
+ GET_EXCEPTION()->GetMessage(exceptionMessage);
+ IBCTypeLoadFailed(pBlobSigEntry, exceptionMessage, IBCErrorNameString());
+ loadedType = TypeHandle();
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ RETURN loadedType;
+}
+
+//---------------------------------------------------------------------------------------
+//
+MethodDesc* Module::LoadIBCMethodHelper(CORBBTPROF_BLOB_PARAM_SIG_ENTRY * pBlobSigEntry)
+{
+ CONTRACT(MethodDesc*)
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pBlobSigEntry));
+ }
+ CONTRACT_END
+
+ MethodDesc* pMethod = NULL;
+
+ PCCOR_SIGNATURE pSig = pBlobSigEntry->sig;
+ ULONG cSig = pBlobSigEntry->cSig;
+
+ SigPointer p(pSig, cSig);
+
+ ZapSig::Context zapSigContext(this, (void *)this, ZapSig::IbcTokens);
+ ZapSig::Context * pZapSigContext = &zapSigContext;
+
+ TypeHandle enclosingType;
+
+ //
+ // First Decode and Load the enclosing type for this method
+ //
+ EX_TRY
+ {
+ IBCErrorNameString()->Clear();
+
+ // This is what ZapSig::FindTypeHandleFromSignature does...
+ //
+ SigTypeContext typeContext; // empty type context
+
+ enclosingType = p.GetTypeHandleThrowing( this,
+ &typeContext,
+ ClassLoader::LoadTypes,
+ CLASS_LOADED,
+ FALSE,
+ NULL,
+ pZapSigContext);
+ IfFailThrow(p.SkipExactlyOne());
+#if defined(_DEBUG) && !defined(DACCESS_COMPILE)
+ g_pConfig->DebugCheckAndForceIBCFailure(EEConfig::CallSite_2);
+#endif
+ }
+ EX_CATCH
+ {
+ CONTRACT_VIOLATION(ThrowsViolation);
+
+ StackSString exceptionMessage;
+ GET_EXCEPTION()->GetMessage(exceptionMessage);
+ IBCTypeLoadFailed(pBlobSigEntry, exceptionMessage, IBCErrorNameString());
+ enclosingType = TypeHandle();
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ if (enclosingType.IsNull())
+ return NULL;
+
+ //
+ // Now Decode and Load the method
+ //
+ EX_TRY
+ {
+ MethodTable *pOwnerMT = enclosingType.GetMethodTable();
+ _ASSERTE(pOwnerMT != NULL);
+
+ // decode flags
+ DWORD methodFlags;
+ IfFailThrow(p.GetData(&methodFlags));
+ BOOL isInstantiatingStub = ((methodFlags & ENCODE_METHOD_SIG_InstantiatingStub) == ENCODE_METHOD_SIG_InstantiatingStub);
+ BOOL isUnboxingStub = ((methodFlags & ENCODE_METHOD_SIG_UnboxingStub) == ENCODE_METHOD_SIG_UnboxingStub);
+ BOOL fMethodNeedsInstantiation = ((methodFlags & ENCODE_METHOD_SIG_MethodInstantiation) == ENCODE_METHOD_SIG_MethodInstantiation);
+ BOOL fMethodUsesSlotEncoding = ((methodFlags & ENCODE_METHOD_SIG_SlotInsteadOfToken) == ENCODE_METHOD_SIG_SlotInsteadOfToken);
+
+ if ( fMethodUsesSlotEncoding )
+ {
+ // get the method desc using slot number
+ DWORD slot;
+ IfFailThrow(p.GetData(&slot));
+
+ pMethod = pOwnerMT->GetMethodDescForSlot(slot);
+ }
+ else // otherwise we use the normal metadata MethodDef token encoding and we handle ibc tokens.
+ {
+ //
+ // decode method token
+ //
+ RID methodRid;
+ IfFailThrow(p.GetData(&methodRid));
+
+ mdMethodDef methodToken;
+
+ //
+ // Is our enclosingType from another module?
+ //
+ if (this == enclosingType.GetModule())
+ {
+ //
+ // The enclosing type is from our module
+ // The method token is a normal MethodDef token
+ //
+ methodToken = TokenFromRid(methodRid, mdtMethodDef);
+ }
+ else
+ {
+ //
+ // The enclosing type is from an external module
+ // The method token is a ibcExternalMethod token
+ //
+ idExternalType ibcToken = RidToToken(methodRid, ibcExternalMethod);
+ methodToken = this->LookupIbcMethodToken(enclosingType, ibcToken);
+
+ if (IsNilToken(methodToken))
+ {
+ SString * fullTypeName = IBCErrorNameString();
+ fullTypeName->Clear();
+ this->LookupIbcMethodToken(enclosingType, ibcToken, fullTypeName);
+
+ THROW_BAD_FORMAT(BFA_MISSING_IBC_EXTERNAL_METHOD, this);
+ }
+ }
+
+
+ SigTypeContext methodTypeContext( enclosingType );
+ pMethod = MemberLoader::GetMethodDescFromMemberDefOrRefOrSpec(
+ pOwnerMT->GetModule(),
+ methodToken,
+ &methodTypeContext,
+ FALSE,
+ FALSE );
+ }
+
+ Instantiation inst;
+
+ // Instantiate the method if needed, or create a stub to a static method in a generic class.
+ if (fMethodNeedsInstantiation && pMethod->HasMethodInstantiation())
+ {
+ DWORD nargs = pMethod->GetNumGenericMethodArgs();
+ SIZE_T cbMem;
+
+ if (!ClrSafeInt<SIZE_T>::multiply(nargs, sizeof(TypeHandle), cbMem/* passed by ref */))
+ ThrowHR(COR_E_OVERFLOW);
+
+ TypeHandle * pInst = (TypeHandle*) _alloca(cbMem);
+ SigTypeContext typeContext; // empty type context
+
+ for (DWORD i = 0; i < nargs; i++)
+ {
+ pInst[i] = p.GetTypeHandleThrowing( this,
+ &typeContext,
+ ClassLoader::LoadTypes,
+ CLASS_LOADED,
+ FALSE,
+ NULL,
+ pZapSigContext);
+ IfFailThrow(p.SkipExactlyOne());
+ }
+
+ inst = Instantiation(pInst, nargs);
+ }
+ else
+ {
+ inst = pMethod->LoadMethodInstantiation();
+ }
+
+ // This must be called even if nargs == 0, in order to create an instantiating
+ // stub for static methods in generic classees if needed, also for BoxedEntryPointStubs
+ // in non-generic structs.
+ pMethod = MethodDesc::FindOrCreateAssociatedMethodDesc(pMethod, pOwnerMT,
+ isUnboxingStub,
+ inst,
+ !(isInstantiatingStub || isUnboxingStub));
+
+#if defined(_DEBUG) && !defined(DACCESS_COMPILE)
+ g_pConfig->DebugCheckAndForceIBCFailure(EEConfig::CallSite_3);
+#endif
+
+ }
+ EX_CATCH
+ {
+ CONTRACT_VIOLATION(ThrowsViolation);
+
+ StackSString exceptionMessage;
+ GET_EXCEPTION()->GetMessage(exceptionMessage);
+ IBCMethodLoadFailed(pBlobSigEntry, exceptionMessage, IBCErrorNameString());
+ pMethod = NULL;
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ RETURN pMethod;
+} // Module::LoadIBCMethodHelper
+
+#ifdef FEATURE_COMINTEROP
+//---------------------------------------------------------------------------------------
+//
+// This function is a workaround for missing IBC data in WinRT assemblies and
+// not-yet-implemented sharing of IL_STUB(__Canon arg) IL stubs for all interfaces.
+//
+static void ExpandWindowsRuntimeType(TypeHandle t, DataImage *image)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(!t.IsNull());
+ }
+ CONTRACTL_END
+
+ if (t.IsTypeDesc())
+ return;
+
+ // This array contains our poor man's IBC data - instantiations that are known to
+ // be used by other assemblies.
+ static const struct
+ {
+ LPCUTF8 m_szTypeName;
+ BinderClassID m_GenericBinderClassID;
+ }
+ rgForcedInstantiations[] = {
+ { "Windows.UI.Xaml.Data.IGroupInfo", CLASS__IENUMERABLEGENERIC },
+ { "Windows.UI.Xaml.UIElement", CLASS__ILISTGENERIC },
+ { "Windows.UI.Xaml.Visibility", CLASS__CLRIREFERENCEIMPL },
+ { "Windows.UI.Xaml.VerticalAlignment", CLASS__CLRIREFERENCEIMPL },
+ { "Windows.UI.Xaml.HorizontalAlignment", CLASS__CLRIREFERENCEIMPL },
+ // The following instantiations are used by Microsoft.PlayerFramework - http://playerframework.codeplex.com/
+ { "Windows.UI.Xaml.Media.AudioCategory", CLASS__CLRIREFERENCEIMPL },
+ { "Windows.UI.Xaml.Media.AudioDeviceType", CLASS__CLRIREFERENCEIMPL },
+ { "Windows.UI.Xaml.Media.MediaElementState", CLASS__CLRIREFERENCEIMPL },
+ { "Windows.UI.Xaml.Media.Stereo3DVideoRenderMode", CLASS__CLRIREFERENCEIMPL },
+ { "Windows.UI.Xaml.Media.Stereo3DVideoPackingMode", CLASS__CLRIREFERENCEIMPL },
+ };
+
+ DefineFullyQualifiedNameForClass();
+ LPCUTF8 szTypeName = GetFullyQualifiedNameForClass(t.AsMethodTable());
+
+ for (SIZE_T i = 0; i < COUNTOF(rgForcedInstantiations); i++)
+ {
+ if (strcmp(szTypeName, rgForcedInstantiations[i].m_szTypeName) == 0)
+ {
+ EX_TRY
+ {
+ TypeHandle thGenericType = TypeHandle(MscorlibBinder::GetClass(rgForcedInstantiations[i].m_GenericBinderClassID));
+
+ Instantiation inst(&t, 1);
+ thGenericType.Instantiate(inst);
+ }
+ EX_CATCH
+ {
+ image->GetPreloader()->Error(t.GetCl(), GET_EXCEPTION());
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+ }
+ }
+
+ if (strcmp(szTypeName, "Windows.Foundation.Collections.IObservableVector`1") == 0)
+ {
+ EX_TRY
+ {
+ TypeHandle thArg = TypeHandle(g_pObjectClass);
+
+ Instantiation inst(&thArg, 1);
+ t.Instantiate(inst);
+ }
+ EX_CATCH
+ {
+ image->GetPreloader()->Error(t.GetCl(), GET_EXCEPTION());
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+ }
+}
+#endif // FEATURE_COMINTEROP
+
+//---------------------------------------------------------------------------------------
+//
+void Module::ExpandAll(DataImage *image)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(!IsResource());
+ }
+ CONTRACTL_END
+
+ mdToken tk;
+ DWORD assemblyFlags = GetAssembly()->GetFlags();
+
+ // construct a compact layout writer if necessary
+#ifdef MDIL
+ ICompactLayoutWriter *pCompactLayoutWriter = NULL;
+ if (!GetAppDomain()->IsNoMDILCompilationDomain())
+ {
+ pCompactLayoutWriter = ICompactLayoutWriter::MakeCompactLayoutWriter(this, image->m_pZapImage);
+ }
+#endif //MDIL
+ //
+ // Explicitly load the global class.
+ //
+
+ MethodTable *pGlobalMT = GetGlobalMethodTable();
+#ifdef MDIL
+ if (pCompactLayoutWriter != NULL && pGlobalMT != NULL)
+ {
+ EEClass *pGlocalClass = pGlobalMT->GetClass();
+ pGlocalClass->WriteCompactLayout(pCompactLayoutWriter, image->m_pZapImage);
+ }
+#endif //MDIL
+
+ //
+ // Load all classes. This also fills out the
+ // RID maps for the typedefs, method defs,
+ // and field defs.
+ //
+
+ IMDInternalImport *pInternalImport = GetMDImport();
+ {
+ HENUMInternalHolder hEnum(pInternalImport);
+ hEnum.EnumTypeDefInit();
+
+ while (pInternalImport->EnumTypeDefNext(&hEnum, &tk))
+ {
+#ifdef FEATURE_COMINTEROP
+ // Skip the non-managed WinRT types since they're only used by Javascript and C++
+ //
+ // With WinRT files, we want to exclude certain types that cause us problems:
+ // * Attribute types defined in Windows.Foundation. The constructor's methodimpl flags
+ // specify it is an internal runtime function and gets set as an FCALL when we parse
+ // the type
+ //
+ if (IsAfContentType_WindowsRuntime(assemblyFlags))
+ {
+ mdToken tkExtends;
+ pInternalImport->GetTypeDefProps(tk, NULL, &tkExtends);
+
+ if (TypeFromToken(tkExtends) == mdtTypeRef)
+ {
+ LPCSTR szNameSpace = NULL;
+ LPCSTR szName = NULL;
+ pInternalImport->GetNameOfTypeRef(tkExtends, &szNameSpace, &szName);
+
+ if (!strcmp(szNameSpace, "System") && !_stricmp((szName), "Attribute"))
+ {
+ continue;
+ }
+ }
+ }
+#endif // FEATURE_COMINTEROP
+
+ TypeHandle t = LoadTypeDefOrRefHelper(image, this, tk);
+
+ if (t.IsNull()) // Skip this type
+ continue;
+
+#ifdef MDIL
+ if (pCompactLayoutWriter != NULL)
+ {
+ MethodTable *pMT = t.AsMethodTable();
+ EEClass *pClass = pMT->GetClass();
+ pClass->WriteCompactLayout(pCompactLayoutWriter, image->m_pZapImage);
+ }
+#endif // MDIL
+
+ if (!t.HasInstantiation())
+ {
+ EEClassHashEntry_t *pBucket = NULL;
+ HashDatum data;
+ StackSString ssFullyQualifiedName;
+ mdToken mdEncloser;
+ EEClassHashTable *pTable = GetAvailableClassHash();
+
+ _ASSERTE(pTable != NULL);
+
+ t.GetName(ssFullyQualifiedName);
+
+ // Convert to UTF8
+ StackScratchBuffer scratch;
+ LPCUTF8 szFullyQualifiedName = ssFullyQualifiedName.GetUTF8(scratch);
+
+ BOOL isNested = ClassLoader::IsNested(this, tk, &mdEncloser);
+ EEClassHashTable::LookupContext sContext;
+ pBucket = pTable->GetValue(szFullyQualifiedName, &data, isNested, &sContext);
+
+ if (isNested)
+ {
+ while (pBucket != NULL)
+ {
+ _ASSERTE (TypeFromToken(tk) == mdtTypeDef);
+ BOOL match = GetClassLoader()->CompareNestedEntryWithTypeDef( pInternalImport,
+ mdEncloser,
+ GetAvailableClassHash(),
+ pBucket->GetEncloser());
+ if (match)
+ break;
+
+ pBucket = pTable->FindNextNestedClass(szFullyQualifiedName, &data, &sContext);
+ }
+ }
+
+ // Save the typehandle instead of the token in the hash entry so that ngen'ed images
+ // don't have to lookup based on token and update this entry
+ if ((pBucket != NULL) && !t.IsNull() && t.IsRestored())
+ pBucket->SetData(t.AsPtr());
+ }
+
+ DWORD nGenericClassParams = t.GetNumGenericArgs();
+ if (nGenericClassParams != 0)
+ {
+ // For generic types, load the instantiation at Object
+ SIZE_T cbMem;
+ if (!ClrSafeInt<SIZE_T>::multiply(sizeof(TypeHandle), nGenericClassParams, cbMem/* passed by ref */))
+ {
+ ThrowHR(COR_E_OVERFLOW);
+ }
+ CQuickBytes qbGenericClassArgs;
+ TypeHandle *genericClassArgs = reinterpret_cast<TypeHandle*>(qbGenericClassArgs.AllocThrows(cbMem));
+ for (DWORD i = 0; i < nGenericClassParams; i++)
+ {
+ genericClassArgs[i] = TypeHandle(g_pCanonMethodTableClass);
+ }
+
+ TypeHandle thCanonInst = LoadGenericInstantiationHelper(image, this, tk, Instantiation(genericClassArgs, nGenericClassParams));
+
+ // If successful, add the instantiation to the Module's map of generic types instantiated at Object
+ if (!thCanonInst.IsNull() && !thCanonInst.IsTypeDesc())
+ {
+ MethodTable * pCanonMT = thCanonInst.AsMethodTable();
+ m_GenericTypeDefToCanonMethodTableMap.AddElement(this, RidFromToken(pCanonMT->GetCl()), pCanonMT);
+ }
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (IsAfContentType_WindowsRuntime(assemblyFlags))
+ {
+ ExpandWindowsRuntimeType(t, image);
+ }
+#endif // FEATURE_COMINTEROP
+ }
+ }
+
+ //
+ // Fill out TypeRef RID map
+ //
+
+ {
+ HENUMInternalHolder hEnum(pInternalImport);
+ hEnum.EnumAllInit(mdtTypeRef);
+
+ while (pInternalImport->EnumNext(&hEnum, &tk))
+ {
+ mdToken tkResolutionScope = mdTokenNil;
+ pInternalImport->GetResolutionScopeOfTypeRef(tk, &tkResolutionScope);
+
+#ifdef FEATURE_COMINTEROP
+ // WinRT first party files are authored with TypeRefs pointing to TypeDefs in the same module.
+ // This causes us to load types we do not want to NGen such as custom attributes. We will not
+ // expand any module local TypeRefs for WinMDs to prevent this.
+ if(TypeFromToken(tkResolutionScope)==mdtModule && IsAfContentType_WindowsRuntime(assemblyFlags))
+ continue;
+#endif // FEATURE_COMINTEROP
+ TypeHandle t = LoadTypeDefOrRefHelper(image, this, tk);
+
+ if (t.IsNull()) // Skip this type
+ continue;
+
+#ifdef FEATURE_COMINTEROP
+ if (!g_fNGenWinMDResilient && TypeFromToken(tkResolutionScope) == mdtAssemblyRef)
+ {
+ DWORD dwAssemblyRefFlags;
+ IfFailThrow(pInternalImport->GetAssemblyRefProps(tkResolutionScope, NULL, NULL, NULL, NULL, NULL, NULL, &dwAssemblyRefFlags));
+
+ if (IsAfContentType_WindowsRuntime(dwAssemblyRefFlags))
+ {
+ Assembly *pAssembly = t.GetAssembly();
+ PEAssembly *pPEAssembly = pAssembly->GetManifestFile();
+ AssemblySpec refSpec;
+ refSpec.InitializeSpec(tkResolutionScope, pInternalImport);
+ LPCSTR psznamespace;
+ LPCSTR pszname;
+ pInternalImport->GetNameOfTypeRef(tk, &psznamespace, &pszname);
+ refSpec.SetWindowsRuntimeType(psznamespace, pszname);
+ GetAppDomain()->ToCompilationDomain()->AddDependency(&refSpec,pPEAssembly);
+ }
+ }
+#endif // FEATURE_COMINTEROP
+ }
+ }
+
+ //
+ // Load all type specs
+ //
+
+ {
+ HENUMInternalHolder hEnum(pInternalImport);
+ hEnum.EnumAllInit(mdtTypeSpec);
+
+ while (pInternalImport->EnumNext(&hEnum, &tk))
+ {
+ ULONG cSig;
+ PCCOR_SIGNATURE pSig;
+
+ IfFailThrow(pInternalImport->GetTypeSpecFromToken(tk, &pSig, &cSig));
+
+ // Load all types specs that do not contain variables
+ if (SigPointer(pSig, cSig).IsPolyType(NULL) == hasNoVars)
+ {
+ LoadTypeSpecHelper(image, this, tk, pSig, cSig);
+ }
+ }
+ }
+
+ //
+ // Load all the reported parameterized types and methods
+ //
+ CORBBTPROF_BLOB_ENTRY *pBlobEntry = GetProfileData()->GetBlobStream();
+
+ if (pBlobEntry != NULL)
+ {
+ while (pBlobEntry->TypeIsValid())
+ {
+ if (TypeFromToken(pBlobEntry->token) == ibcTypeSpec)
+ {
+ _ASSERTE(pBlobEntry->type == ParamTypeSpec);
+ CORBBTPROF_BLOB_PARAM_SIG_ENTRY *pBlobSigEntry = (CORBBTPROF_BLOB_PARAM_SIG_ENTRY *) pBlobEntry;
+
+ TypeHandle th = LoadIBCTypeHelper(pBlobSigEntry);
+ if (!th.IsNull())
+ {
+ image->GetPreloader()->TriageTypeForZap(th, TRUE);
+ }
+ }
+ else if (TypeFromToken(pBlobEntry->token) == ibcMethodSpec)
+ {
+ _ASSERTE(pBlobEntry->type == ParamMethodSpec);
+ CORBBTPROF_BLOB_PARAM_SIG_ENTRY *pBlobSigEntry = (CORBBTPROF_BLOB_PARAM_SIG_ENTRY *) pBlobEntry;
+
+ MethodDesc *pMD = LoadIBCMethodHelper(pBlobSigEntry);
+ if (pMD != NULL)
+ {
+ image->GetPreloader()->TriageMethodForZap(pMD, TRUE);
+ }
+ }
+ pBlobEntry = pBlobEntry->GetNextEntry();
+ }
+ _ASSERTE(pBlobEntry->type == EndOfBlobStream);
+ }
+
+ {
+ //
+ // Fill out MemberRef RID map and va sig cookies for
+ // varargs member refs.
+ //
+
+ HENUMInternalHolder hEnum(pInternalImport);
+ hEnum.EnumAllInit(mdtMemberRef);
+
+ while (pInternalImport->EnumNext(&hEnum, &tk))
+ {
+ mdTypeRef parent;
+ IfFailThrow(pInternalImport->GetParentOfMemberRef(tk, &parent));
+
+#ifdef FEATURE_COMINTEROP
+ if (IsAfContentType_WindowsRuntime(assemblyFlags) && TypeFromToken(parent) == mdtTypeRef)
+ {
+ mdToken tkResolutionScope = mdTokenNil;
+ pInternalImport->GetResolutionScopeOfTypeRef(parent, &tkResolutionScope);
+ // WinRT first party files are authored with TypeRefs pointing to TypeDefs in the same module.
+ // This causes us to load types we do not want to NGen such as custom attributes. We will not
+ // expand any module local TypeRefs for WinMDs to prevent this.
+ if(TypeFromToken(tkResolutionScope)==mdtModule)
+ continue;
+
+ LPCSTR szNameSpace = NULL;
+ LPCSTR szName = NULL;
+ if (SUCCEEDED(pInternalImport->GetNameOfTypeRef(parent, &szNameSpace, &szName)))
+ {
+ if (WinMDAdapter::ConvertWellKnownTypeNameFromClrToWinRT(&szNameSpace, &szName))
+ {
+ //
+ // This is a MemberRef from a redirected WinRT type
+ // We should skip it as managed view will never see this MemberRef anyway
+ // Not skipping this will result MissingMethodExceptions as members in redirected
+ // types doesn't exactly match their redirected CLR type counter part
+ //
+ // Typically we only need to do this for interfaces as we should never see MemberRef
+ // from non-interfaces, but here to keep things simple I'm skipping every memberref that
+ // belongs to redirected WinRT type
+ //
+ continue;
+ }
+ }
+
+ }
+#endif // FEATURE_COMINTEROP
+
+ // If the MethodRef has a TypeSpec as a parent (i.e. refers to a method on an array type
+ // or on a generic class), then it could in turn refer to type variables of
+ // an unknown class/method. So we don't preresolve any MemberRefs which have TypeSpecs as
+ // parents. The RID maps are not filled out for such tokens anyway.
+ if (TypeFromToken(parent) != mdtTypeSpec)
+ {
+ GetDescFromMemberRefHelper(image, this, tk);
+ }
+ }
+ }
+
+ //
+ // Fill out binder
+ //
+
+ if (m_pBinder != NULL)
+ {
+ m_pBinder->BindAll();
+ }
+
+#ifdef MDIL
+ if (pCompactLayoutWriter)
+ {
+ pCompactLayoutWriter->Flush();
+ }
+#endif // MDIL
+} // Module::ExpandAll
+
+/* static */
+void Module::SaveMethodTable(DataImage * image,
+ MethodTable * pMT,
+ DWORD profilingFlags)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (image->IsStored(pMT))
+ return;
+
+ pMT->Save(image, profilingFlags);
+}
+
+
+/* static */
+void Module::SaveTypeHandle(DataImage * image,
+ TypeHandle t,
+ DWORD profilingFlags)
+{
+ STANDARD_VM_CONTRACT;
+
+ t.CheckRestore();
+ if (t.IsTypeDesc())
+ {
+ TypeDesc *pTD = t.AsTypeDesc();
+ if (!image->IsStored(pTD))
+ {
+ pTD->Save(image);
+ }
+ }
+ else
+ {
+ MethodTable *pMT = t.AsMethodTable();
+ if (pMT != NULL && !image->IsStored(pMT))
+ {
+ SaveMethodTable(image, pMT, profilingFlags);
+ _ASSERTE(image->IsStored(pMT));
+ }
+ }
+#ifdef _DEBUG
+ if (LoggingOn(LF_JIT, LL_INFO100))
+ {
+ Module *pPrefModule = Module::GetPreferredZapModuleForTypeHandle(t);
+ if (image->GetModule() != pPrefModule)
+ {
+ StackSString typeName;
+ t.CheckRestore();
+ TypeString::AppendTypeDebug(typeName, t);
+ LOG((LF_ZAP, LL_INFO100, "The type %S was saved outside its preferred module %S\n", typeName.GetUnicode(), pPrefModule->GetPath().GetUnicode()));
+ }
+ }
+#endif // _DEBUG
+}
+
+void ModuleCtorInfo::Save(DataImage *image, CorProfileData *profileData)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (!numElements)
+ return;
+
+ DWORD i = 0;
+ DWORD totalBoxedStatics = 0;
+
+ // sort the tables so that
+ // - the hot ppMT entries are at the beginning of the ppMT table
+ // - the hot cctor entries are at the beginning of the cctorInfoHot table
+ // - the cold cctor entries are at the end, and we make cctorInfoCold point
+ // the first cold entry
+ //
+ // the invariant in this loop is:
+ // items 0...numElementsHot-1 are hot
+ // items numElementsHot...i-1 are cold
+ for (i = 0; i < numElements; i++)
+ {
+ MethodTable *ppMTTemp = ppMT[i];
+
+ // Count the number of boxed statics along the way
+ totalBoxedStatics += ppMTTemp->GetNumBoxedRegularStatics();
+
+ bool hot = true; // if there's no profiling data, assume the entries are all hot.
+ if (profileData->GetTokenFlagsData(TypeProfilingData))
+ {
+ if ((profileData->GetTypeProfilingFlagsOfToken(ppMTTemp->GetCl()) & (1 << ReadCCtorInfo)) == 0)
+ hot = false;
+ }
+ if (hot)
+ {
+ // swap ppMT[i] and ppMT[numElementsHot] to maintain the loop invariant
+ ppMT[i] = ppMT[numElementsHot];
+ ppMT[numElementsHot] = ppMTTemp;
+
+ numElementsHot++;
+ }
+ }
+
+ numHotHashes = numElementsHot ? RoundUpToPower2((numElementsHot * sizeof(PTR_MethodTable)) / CACHE_LINE_SIZE) : 0;
+ numColdHashes = (numElements - numElementsHot) ? RoundUpToPower2(((numElements - numElementsHot) *
+ sizeof(PTR_MethodTable)) / CACHE_LINE_SIZE) : 0;
+
+ LOG((LF_ZAP, LL_INFO10, "ModuleCtorInfo::numHotHashes: 0x%4x\n", numHotHashes));
+ if (numColdHashes != 0)
+ {
+ LOG((LF_ZAP, LL_INFO10, "ModuleCtorInfo::numColdHashes: 0x%4x\n", numColdHashes));
+ }
+
+ // The "plus one" is so we can store the offset to the end of the array at the end of
+ // the hashoffsets arrays, enabling faster lookups.
+ hotHashOffsets = new DWORD[numHotHashes + 1];
+ coldHashOffsets = new DWORD[numColdHashes + 1];
+
+ DWORD *hashArray = new DWORD[numElements];
+
+ for (i = 0; i < numElementsHot; i++)
+ {
+ hashArray[i] = GenerateHash(ppMT[i], HOT);
+ }
+ for (i = numElementsHot; i < numElements; i++)
+ {
+ hashArray[i] = GenerateHash(ppMT[i], COLD);
+ }
+
+ // Sort the two arrays by hash values to create regions with the same hash values.
+ ClassCtorInfoEntryArraySort cctorInfoHotSort(hashArray, ppMT, numElementsHot);
+ ClassCtorInfoEntryArraySort cctorInfoColdSort(hashArray + numElementsHot, ppMT + numElementsHot,
+ numElements - numElementsHot);
+ cctorInfoHotSort.Sort();
+ cctorInfoColdSort.Sort();
+
+ // Generate the indices that index into the correct "hash region" in the hot part of the ppMT array, and store
+ // them in the hotHashOffests arrays.
+ DWORD curHash = 0;
+ i = 0;
+ while (i < numElementsHot)
+ {
+ if (curHash < hashArray[i])
+ {
+ hotHashOffsets[curHash++] = i;
+ }
+ else if (curHash == hashArray[i])
+ {
+ hotHashOffsets[curHash++] = i++;
+ }
+ else
+ {
+ i++;
+ }
+ }
+ while (curHash <= numHotHashes)
+ {
+ hotHashOffsets[curHash++] = numElementsHot;
+ }
+
+ // Generate the indices that index into the correct "hash region" in the hot part of the ppMT array, and store
+ // them in the coldHashOffsets arrays.
+ curHash = 0;
+ i = numElementsHot;
+ while (i < numElements)
+ {
+ if (curHash < hashArray[i])
+ {
+ coldHashOffsets[curHash++] = i;
+ }
+ else if (curHash == hashArray[i])
+ {
+ coldHashOffsets[curHash++] = i++;
+ }
+ else i++;
+ }
+ while (curHash <= numColdHashes)
+ {
+ coldHashOffsets[curHash++] = numElements;
+ }
+
+ delete[] hashArray;
+
+
+ cctorInfoHot = new ClassCtorInfoEntry[numElements];
+
+ // make cctorInfoCold point to the first cold element
+ cctorInfoCold = cctorInfoHot + numElementsHot;
+
+ ppHotGCStaticsMTs = (totalBoxedStatics != 0) ? new FixupPointer<PTR_MethodTable>[totalBoxedStatics] : NULL;
+ numHotGCStaticsMTs = totalBoxedStatics;
+
+ DWORD iGCStaticMT = 0;
+
+ for (i = 0; i < numElements; i++)
+ {
+ if (numElements == numElementsHot)
+ {
+ numHotGCStaticsMTs = iGCStaticMT;
+ numColdGCStaticsMTs = (totalBoxedStatics - iGCStaticMT);
+
+ // make ppColdGCStaticsMTs point to the first cold element
+ ppColdGCStaticsMTs = ppHotGCStaticsMTs + numHotGCStaticsMTs;
+ }
+
+ MethodTable* pMT = ppMT[i];
+ ClassCtorInfoEntry* pEntry = &cctorInfoHot[i];
+
+ WORD numBoxedStatics = pMT->GetNumBoxedRegularStatics();
+ pEntry->numBoxedStatics = numBoxedStatics;
+ pEntry->hasFixedAddressVTStatics = !!pMT->HasFixedAddressVTStatics();
+
+ FieldDesc *pField = pMT->HasGenericsStaticsInfo() ?
+ pMT->GetGenericsStaticFieldDescs() : (pMT->GetApproxFieldDescListRaw() + pMT->GetNumIntroducedInstanceFields());
+ FieldDesc *pFieldEnd = pField + pMT->GetNumStaticFields();
+
+ pEntry->firstBoxedStaticOffset = (DWORD)-1;
+ pEntry->firstBoxedStaticMTIndex = (DWORD)-1;
+
+ DWORD numFoundBoxedStatics = 0;
+ while (pField < pFieldEnd)
+ {
+ _ASSERTE(pField->IsStatic());
+
+ if (!pField->IsSpecialStatic() && pField->IsByValue())
+ {
+ if (pEntry->firstBoxedStaticOffset == (DWORD)-1)
+ {
+ pEntry->firstBoxedStaticOffset = pField->GetOffset();
+ pEntry->firstBoxedStaticMTIndex = iGCStaticMT;
+ }
+ _ASSERTE(pField->GetOffset() - pEntry->firstBoxedStaticOffset
+ == (iGCStaticMT - pEntry->firstBoxedStaticMTIndex) * sizeof(MethodTable*));
+
+ TypeHandle th = pField->GetFieldTypeHandleThrowing();
+ ppHotGCStaticsMTs[iGCStaticMT++].SetValue(th.GetMethodTable());
+
+ numFoundBoxedStatics++;
+ }
+ pField++;
+ }
+ _ASSERTE(numBoxedStatics == numFoundBoxedStatics);
+ }
+ _ASSERTE(iGCStaticMT == totalBoxedStatics);
+
+ if (numElementsHot > 0)
+ {
+ image->StoreStructure(cctorInfoHot,
+ sizeof(ClassCtorInfoEntry) * numElementsHot,
+ DataImage::ITEM_MODULE_CCTOR_INFO_HOT);
+
+ image->StoreStructure(hotHashOffsets,
+ sizeof(DWORD) * (numHotHashes + 1),
+ DataImage::ITEM_MODULE_CCTOR_INFO_HOT);
+ }
+
+ if (numElements > 0)
+ image->StoreStructure(ppMT,
+ sizeof(MethodTable *) * numElements,
+ DataImage::ITEM_MODULE_CCTOR_INFO_HOT);
+
+ if (numElements > numElementsHot)
+ {
+ image->StoreStructure(cctorInfoCold,
+ sizeof(ClassCtorInfoEntry) * (numElements - numElementsHot),
+ DataImage::ITEM_MODULE_CCTOR_INFO_COLD);
+
+ image->StoreStructure(coldHashOffsets,
+ sizeof(DWORD) * (numColdHashes + 1),
+ DataImage::ITEM_MODULE_CCTOR_INFO_COLD);
+ }
+
+ if ( numHotGCStaticsMTs )
+ {
+ // Save the mt templates
+ image->StoreStructure( ppHotGCStaticsMTs, numHotGCStaticsMTs * sizeof(MethodTable*),
+ DataImage::ITEM_GC_STATIC_HANDLES_HOT);
+ }
+ else
+ {
+ ppHotGCStaticsMTs = NULL;
+ }
+
+ if ( numColdGCStaticsMTs )
+ {
+ // Save the hot mt templates
+ image->StoreStructure( ppColdGCStaticsMTs, numColdGCStaticsMTs * sizeof(MethodTable*),
+ DataImage::ITEM_GC_STATIC_HANDLES_COLD);
+ }
+ else
+ {
+ ppColdGCStaticsMTs = NULL;
+ }
+}
+
+#ifdef FEATURE_REMOTING
+static void IsCrossAppDomainOptimizableWrapper(MethodDesc * pMD,
+ DWORD* pnumDwords)
+{
+ STANDARD_VM_CONTRACT;
+
+ GCX_COOP();
+
+ EX_TRY
+ {
+ if (pMD->GetNumGenericMethodArgs() == 0 && !pMD->IsStatic())
+ RemotableMethodInfo::IsCrossAppDomainOptimizable(pMD, pnumDwords);
+ }
+ EX_CATCH
+ {
+ // If there is an exception, it'll mean the info for this method will remain uninitialized.
+ // Just ignore the exception. At runtime, we'll try to initialize it
+ // An exception is possible during ngen if all dependencies are not available
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+}
+
+static void PrepareRemotableMethodInfo(MethodTable * pMT)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (!pMT->HasRemotableMethodInfo())
+ return;
+
+ MethodTable::MethodIterator it(pMT);
+ for (; it.IsValid(); it.Next())
+ {
+ DWORD numDwords = 0;
+ IsCrossAppDomainOptimizableWrapper(it.GetMethodDesc(), &numDwords);
+ }
+}
+#endif // FEATURE_REMOTING
+
+bool Module::AreAllClassesFullyLoaded()
+{
+ STANDARD_VM_CONTRACT;
+
+ // Adjust for unused space
+ IMDInternalImport *pImport = GetMDImport();
+
+ HENUMInternalHolder hEnum(pImport);
+ hEnum.EnumAllInit(mdtTypeDef);
+
+ mdTypeDef token;
+ while (pImport->EnumNext(&hEnum, &token))
+ {
+ _ASSERTE(TypeFromToken(token) == mdtTypeDef);
+
+ // Special care has to been taken with COR_GLOBAL_PARENT_TOKEN, as the class
+ // may not be needed, (but we have to distinguish between not needed and threw error).
+ if (token == COR_GLOBAL_PARENT_TOKEN &&
+ !NeedsGlobalMethodTable())
+ {
+ // No EEClass for this token if there was no need for a global method table
+ continue;
+ }
+
+ TypeHandle th = LookupTypeDef(token);
+ if (th.IsNull())
+ return false;
+
+ if (!th.AsMethodTable()->IsFullyLoaded())
+ return false;
+ }
+
+ return true;
+}
+
+void Module::PrepareTypesForSave(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ //
+ // Prepare typedefs
+ //
+ {
+ LookupMap<PTR_MethodTable>::Iterator typeDefIter(&m_TypeDefToMethodTableMap);
+ while (typeDefIter.Next())
+ {
+ MethodTable * pMT = typeDefIter.GetElement();
+
+ if (pMT == NULL || !pMT->IsFullyLoaded())
+ continue;
+
+#ifdef FEATURE_REMOTING
+ PrepareRemotableMethodInfo(pMT);
+#endif // FEATURE_REMOTING
+
+ // If this module defines any CriticalFinalizerObject derived classes,
+ // then we'll prepare these types for Constrained Execution Regions (CER) now.
+ // (Normally they're prepared at object instantiation time, a little too late for ngen).
+ PrepareCriticalType(pMT);
+ }
+ }
+
+ //
+ // Prepare typespecs
+ //
+ {
+ // Create a local copy in case the new elements are added to the hashtable during population
+ InlineSArray<TypeHandle, 20> pTypes;
+
+ // Make sure the iterator is destroyed before there is a chance of loading new types
+ {
+ EETypeHashTable::Iterator it(m_pAvailableParamTypes);
+ EETypeHashEntry *pEntry;
+ while (m_pAvailableParamTypes->FindNext(&it, &pEntry))
+ {
+ TypeHandle t = pEntry->GetTypeHandle();
+
+ if (t.IsTypeDesc())
+ continue;
+
+ if (!image->GetPreloader()->IsTypeInTransitiveClosureOfInstantiations(CORINFO_CLASS_HANDLE(t.AsPtr())))
+ continue;
+
+ pTypes.Append(t);
+ }
+ }
+
+#ifdef FEATURE_REMOTING
+ for(COUNT_T i = 0; i < pTypes.GetCount(); i ++)
+ {
+ MethodTable * pMT = pTypes[i].AsMethodTable();
+
+ PrepareRemotableMethodInfo(pMT);
+
+ // @todo: prepare critical instantiated types?
+ }
+#endif // FEATURE_REMOTING
+ }
+
+ image->GetPreloader()->TriageForZap(FALSE, FALSE);
+}
+
+static const char* const MethodTableRestoreReasonDescription[TotalMethodTables + 1] =
+{
+ #undef RESTORE_REASON_FUNC
+ #define RESTORE_REASON_FUNC(s) #s,
+
+ METHODTABLE_RESTORE_REASON()
+
+ #undef RESTORE_REASON
+
+ "TotalMethodTablesEvaluated"
+};
+
+
+// MethodDescByMethodTableTraits could be a local class in Module::Save(), but g++ doesn't like
+// instantiating templates with private classes.
+class MethodDescByMethodTableTraits : public NoRemoveSHashTraits< DefaultSHashTraits<MethodDesc *> >
+{
+public:
+ typedef MethodTable * key_t;
+ static MethodDesc * Null() { return NULL; }
+ static bool IsNull(MethodDesc * pMD) { return pMD == NULL; }
+ static MethodTable * GetKey(MethodDesc * pMD) { return pMD->GetMethodTable_NoLogging(); }
+ static count_t Hash(MethodTable * pMT) { LIMITED_METHOD_CONTRACT; return (count_t) (UINT_PTR) pMT->GetTypeDefRid_NoLogging(); }
+ static BOOL Equals(MethodTable * pMT1, MethodTable * pMT2)
+ {
+ return pMT1 == pMT2;
+ }
+};
+
+void Module::Save(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Precompute type specific auxiliary information saved into NGen image
+ // Note that this operation can load new types.
+ PrepareTypesForSave(image);
+
+ // Cache values of all persisted flags computed from custom attributes
+ IsNoStringInterning();
+ IsRuntimeWrapExceptions();
+ GetReliabilityContract();
+ IsPreV4Assembly();
+
+#ifndef FEATURE_CORECLR
+ HasDefaultDllImportSearchPathsAttribute();
+#endif
+
+ // Precompute property information to avoid runtime metadata lookup
+ PopulatePropertyInfoMap();
+
+ // Any any elements and compute values of any LookupMap flags that were not available previously
+ FinalizeLookupMapsPreSave(image);
+
+ //
+ // Save the module
+ //
+
+ ZapStoredStructure * pModuleNode = image->StoreStructure(this, sizeof(Module),
+ DataImage::ITEM_MODULE);
+
+ m_pNGenLayoutInfo = (NGenLayoutInfo *)(void *)image->GetModule()->GetLoaderAllocator()->
+ GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(NGenLayoutInfo)));
+ image->StoreStructure(m_pNGenLayoutInfo, sizeof(NGenLayoutInfo), DataImage::ITEM_BINDER_ITEMS);
+
+ //
+ // If we are NGening, we don't need to keep a list of va
+ // sig cookies, as we already have a complete set (of course we do
+ // have to persist the cookies themselves, though.
+ //
+
+ //
+ // Initialize maps of child data structures. Note that each tables's blocks are
+ // concantentated to a single block in the process.
+ //
+ CorProfileData * profileData = GetProfileData();
+
+ // ngen the neutral resources culture
+ if(GetNeutralResourcesLanguage(&m_pszCultureName, &m_CultureNameLength, &m_FallbackLocation, TRUE)) {
+ image->StoreStructure((void *) m_pszCultureName,
+ (ULONG)(m_CultureNameLength + 1),
+ DataImage::ITEM_BINDER_ITEMS,
+ 1);
+ }
+
+
+ m_TypeRefToMethodTableMap.Save(image, DataImage::ITEM_TYPEREF_MAP, profileData, mdtTypeRef);
+ image->BindPointer(&m_TypeRefToMethodTableMap, pModuleNode, offsetof(Module, m_TypeRefToMethodTableMap));
+
+ if(m_pMemberRefToDescHashTable)
+ m_pMemberRefToDescHashTable->Save(image, profileData);
+
+ m_TypeDefToMethodTableMap.Save(image, DataImage::ITEM_TYPEDEF_MAP, profileData, mdtTypeDef);
+ image->BindPointer(&m_TypeDefToMethodTableMap, pModuleNode, offsetof(Module, m_TypeDefToMethodTableMap));
+
+ m_MethodDefToDescMap.Save(image, DataImage::ITEM_METHODDEF_MAP, profileData, mdtMethodDef);
+ image->BindPointer(&m_MethodDefToDescMap, pModuleNode, offsetof(Module, m_MethodDefToDescMap));
+
+ m_FieldDefToDescMap.Save(image, DataImage::ITEM_FIELDDEF_MAP, profileData, mdtFieldDef);
+ image->BindPointer(&m_FieldDefToDescMap, pModuleNode, offsetof(Module, m_FieldDefToDescMap));
+
+ m_GenericParamToDescMap.Save(image, DataImage::ITEM_GENERICPARAM_MAP, profileData, mdtGenericParam);
+ image->BindPointer(&m_GenericParamToDescMap, pModuleNode, offsetof(Module, m_GenericParamToDescMap));
+
+ m_GenericTypeDefToCanonMethodTableMap.Save(image, DataImage::ITEM_GENERICTYPEDEF_MAP, profileData, mdtTypeDef);
+ image->BindPointer(&m_GenericTypeDefToCanonMethodTableMap, pModuleNode, offsetof(Module, m_GenericTypeDefToCanonMethodTableMap));
+
+ if (m_pAvailableClasses)
+ m_pAvailableClasses->Save(image, profileData);
+
+ //
+ // Also save the parent maps; the contents will
+ // need to be rewritten, but we can allocate the
+ // space in the image.
+ //
+
+ // these items have no hot list and no attribution
+ m_FileReferencesMap.Save(image, DataImage::ITEM_FILEREF_MAP, profileData, 0);
+ image->BindPointer(&m_FileReferencesMap, pModuleNode, offsetof(Module, m_FileReferencesMap));
+
+ m_ManifestModuleReferencesMap.Save(image, DataImage::ITEM_ASSEMREF_MAP, profileData, 0);
+ image->BindPointer(&m_ManifestModuleReferencesMap, pModuleNode, offsetof(Module, m_ManifestModuleReferencesMap));
+
+ m_MethodDefToPropertyInfoMap.Save(image, DataImage::ITEM_PROPERTYINFO_MAP, profileData, 0, TRUE /*fCopyValues*/);
+ image->BindPointer(&m_MethodDefToPropertyInfoMap, pModuleNode, offsetof(Module, m_MethodDefToPropertyInfoMap));
+
+ if (m_pBinder != NULL)
+ m_pBinder->Save(image);
+
+ if (profileData)
+ {
+ // Store types.
+
+ // Saving hot things first is a very good thing, because we place items
+ // in the order they are saved and things that have hot items are also
+ // more likely to have their other structures touched, hence these should
+ // also be placed together, at least if we don't have any further information to go on.
+ // Note we place particular hot items with more care in the Arrange phase.
+ //
+ CORBBTPROF_TOKEN_INFO * pTypeProfilingData = profileData->GetTokenFlagsData(TypeProfilingData);
+ DWORD cTypeProfilingData = profileData->GetTokenFlagsCount(TypeProfilingData);
+
+ for (unsigned int i = 0; i < cTypeProfilingData; i++)
+ {
+ CORBBTPROF_TOKEN_INFO *entry = &pTypeProfilingData[i];
+ mdToken token = entry->token;
+ DWORD flags = entry->flags;
+#if defined(_DEBUG) && !defined(DACCESS_COMPILE)
+ g_pConfig->DebugCheckAndForceIBCFailure(EEConfig::CallSite_4);
+#endif
+
+ if ((flags & (1 << ReadMethodTable)) == 0)
+ continue;
+
+ if (TypeFromToken(token) == mdtTypeDef)
+ {
+ MethodTable *pMT = LookupTypeDef(token).GetMethodTable();
+ if (pMT && pMT->IsFullyLoaded())
+ {
+ SaveMethodTable(image, pMT, flags);
+ }
+ }
+ else if (TypeFromToken(token) == ibcTypeSpec)
+ {
+ CORBBTPROF_BLOB_ENTRY *pBlobEntry = profileData->GetBlobStream();
+ if (pBlobEntry)
+ {
+ while (pBlobEntry->TypeIsValid())
+ {
+ if (TypeFromToken(pBlobEntry->token) == ibcTypeSpec)
+ {
+ _ASSERTE(pBlobEntry->type == ParamTypeSpec);
+
+ if (pBlobEntry->token == token)
+ {
+ CORBBTPROF_BLOB_PARAM_SIG_ENTRY *pBlobSigEntry = (CORBBTPROF_BLOB_PARAM_SIG_ENTRY *) pBlobEntry;
+ TypeHandle th = LoadIBCTypeHelper(pBlobSigEntry);
+
+ if (!th.IsNull())
+ {
+ // When we have stale IBC data the type could have been rejected from this image.
+ if (image->GetPreloader()->IsTypeInTransitiveClosureOfInstantiations(CORINFO_CLASS_HANDLE(th.AsPtr())))
+ {
+ SaveTypeHandle(image, th, flags);
+ }
+ }
+ }
+ }
+ pBlobEntry = pBlobEntry->GetNextEntry();
+ }
+ _ASSERTE(pBlobEntry->type == EndOfBlobStream);
+ }
+ }
+ }
+
+ if (m_pAvailableParamTypes != NULL)
+ {
+ // If we have V1 IBC data then we save the hot
+ // out-of-module generic instantiations here
+
+ CORBBTPROF_TOKEN_INFO * tokens_begin = profileData->GetTokenFlagsData(GenericTypeProfilingData);
+ CORBBTPROF_TOKEN_INFO * tokens_end = tokens_begin + profileData->GetTokenFlagsCount(GenericTypeProfilingData);
+
+ if (tokens_begin != tokens_end)
+ {
+ SArray<CORBBTPROF_TOKEN_INFO> tokens(tokens_begin, tokens_end);
+ tokens_begin = &tokens[0];
+ tokens_end = tokens_begin + tokens.GetCount();
+
+ util::sort(tokens_begin, tokens_end);
+
+ // enumerate AvailableParamTypes map and find all hot generic instantiations
+ EETypeHashTable::Iterator it(m_pAvailableParamTypes);
+ EETypeHashEntry *pEntry;
+ while (m_pAvailableParamTypes->FindNext(&it, &pEntry))
+ {
+ TypeHandle t = pEntry->GetTypeHandle();
+#if defined(_DEBUG) && !defined(DACCESS_COMPILE)
+ g_pConfig->DebugCheckAndForceIBCFailure(EEConfig::CallSite_5);
+#endif
+
+ if (t.HasInstantiation())
+ {
+ SString tokenName;
+ t.GetName(tokenName);
+ unsigned cur_token = tokenName.Hash() & 0xffff;
+
+ CORBBTPROF_TOKEN_INFO * found = util::lower_bound(tokens_begin, tokens_end, CORBBTPROF_TOKEN_INFO(cur_token));
+ if (found != tokens_end && found->token == cur_token && (found->flags & (1 << ReadMethodTable)))
+ {
+ // When we have stale IBC data the type could have been rejected from this image.
+ if (image->GetPreloader()->IsTypeInTransitiveClosureOfInstantiations(CORINFO_CLASS_HANDLE(t.AsPtr())))
+ SaveTypeHandle(image, t, found->flags);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ //
+ // Now save any types in the TypeDefToMethodTableMap map
+
+ {
+ LookupMap<PTR_MethodTable>::Iterator typeDefIter(&m_TypeDefToMethodTableMap);
+
+ while (typeDefIter.Next())
+ {
+ MethodTable * pMT = typeDefIter.GetElement();
+
+ if (pMT != NULL &&
+ !image->IsStored(pMT) && pMT->IsFullyLoaded())
+ {
+ image->BeginAssociatingStoredObjectsWithMethodTable(pMT);
+ SaveMethodTable(image, pMT, 0);
+ image->EndAssociatingStoredObjectsWithMethodTable();
+ }
+ }
+ }
+
+ //
+ // Now save any TypeDescs in m_GenericParamToDescMap map
+
+ {
+ LookupMap<PTR_TypeVarTypeDesc>::Iterator genericParamIter(&m_GenericParamToDescMap);
+
+ while (genericParamIter.Next())
+ {
+ TypeVarTypeDesc *pTD = genericParamIter.GetElement();
+
+ if (pTD != NULL)
+ {
+ pTD->Save(image);
+ }
+ }
+ }
+
+#ifdef _DEBUG
+ SealGenericTypesAndMethods();
+#endif
+
+ //
+ // Now save any types in the AvailableParamTypes map
+ //
+ if (m_pAvailableParamTypes != NULL)
+ {
+ EETypeHashTable::Iterator it(m_pAvailableParamTypes);
+ EETypeHashEntry *pEntry;
+ while (m_pAvailableParamTypes->FindNext(&it, &pEntry))
+ {
+ TypeHandle t = pEntry->GetTypeHandle();
+
+ if (image->GetPreloader()->IsTypeInTransitiveClosureOfInstantiations(CORINFO_CLASS_HANDLE(t.AsPtr())))
+ {
+ if (t.GetCanonicalMethodTable() != NULL)
+ {
+ image->BeginAssociatingStoredObjectsWithMethodTable(t.GetCanonicalMethodTable());
+ SaveTypeHandle(image, t, 0);
+ image->EndAssociatingStoredObjectsWithMethodTable();
+ }
+ else
+ {
+ SaveTypeHandle(image, t, 0);
+ }
+ }
+ }
+ }
+
+ //
+ // Now save any methods in the InstMethodHashTable
+ //
+ if (m_pInstMethodHashTable != NULL)
+ {
+ //
+ // Find all MethodDescs that we are going to save, and hash them with MethodTable as the key
+ //
+
+ typedef SHash<MethodDescByMethodTableTraits> MethodDescByMethodTableHash;
+
+ MethodDescByMethodTableHash methodDescs;
+
+ InstMethodHashTable::Iterator it(m_pInstMethodHashTable);
+ InstMethodHashEntry *pEntry;
+ while (m_pInstMethodHashTable->FindNext(&it, &pEntry))
+ {
+ MethodDesc *pMD = pEntry->GetMethod();
+
+ _ASSERTE(!pMD->IsTightlyBoundToMethodTable());
+
+ if (!image->IsStored(pMD) &&
+ image->GetPreloader()->IsMethodInTransitiveClosureOfInstantiations(CORINFO_METHOD_HANDLE(pMD)))
+ {
+ methodDescs.Add(pMD);
+ }
+ }
+
+ //
+ // Save all MethodDescs on the same MethodTable using one chunk builder
+ //
+
+ for (MethodDescByMethodTableHash::Iterator i1 = methodDescs.Begin(), end1 = methodDescs.End(); i1 != end1; i1++)
+ {
+ MethodDesc * pMD = *(i1);
+ if (image->IsStored(pMD))
+ continue;
+
+ MethodTable * pMT = pMD->GetMethodTable();
+
+ MethodDesc::SaveChunk methodDescSaveChunk(image);
+
+ for (MethodDescByMethodTableHash::KeyIterator i2 = methodDescs.Begin(pMT), end2 = methodDescs.End(pMT); i2 != end2; i2++)
+ {
+ _ASSERTE(!image->IsStored(*i2));
+ methodDescSaveChunk.Append(*i2);
+ }
+
+ methodDescSaveChunk.Save();
+ }
+ }
+
+ // Now save the tables themselves
+ if (m_pAvailableParamTypes != NULL)
+ {
+ m_pAvailableParamTypes->Save(image, this, profileData);
+ }
+
+ if (m_pInstMethodHashTable != NULL)
+ {
+ m_pInstMethodHashTable->Save(image, profileData);
+ }
+
+ {
+ MethodTable * pStubMT = GetILStubCache()->GetStubMethodTable();
+ if (pStubMT != NULL)
+ {
+ SaveMethodTable(image, pStubMT, 0);
+ }
+ }
+
+ if (m_pStubMethodHashTable != NULL)
+ {
+ m_pStubMethodHashTable->Save(image, profileData);
+ }
+
+#ifdef FEATURE_COMINTEROP
+ // the type saving operations above had the side effect of populating m_pGuidToTypeHash
+ if (m_pGuidToTypeHash != NULL)
+ {
+ m_pGuidToTypeHash->Save(image, profileData);
+ }
+#endif // FEATURE_COMINTEROP
+
+ // Compute and save the property name set
+ PrecomputeMatchingProperties(image);
+ image->StoreStructure(m_propertyNameSet,
+ m_nPropertyNameSet * sizeof(BYTE),
+ DataImage::ITEM_PROPERTY_NAME_SET);
+
+ // Save Constrained Execution Region (CER) fixup information (used to eagerly fixup trees of methods to avoid any runtime
+ // induced failures when invoking the tree).
+ if (m_pCerNgenRootTable != NULL)
+ m_pCerNgenRootTable->Save(image, profileData);
+
+ // Sort the list of RVA statics in an ascending order wrt the RVA
+ // and save them.
+ image->SaveRvaStructure();
+
+ // Save static data
+ LOG((LF_CLASSLOADER, LL_INFO10000, "STATICS: Saving module static data\n"));
+
+ // We have this scenario where ngen will fail to load some classes but will generate
+ // a valid exe, or it will choose not to save some loaded classes due to some error
+ // conditions, where statics will be committed at runtime for the classes that ngen
+ // wasn't able to load or save. So we can't cut down the static block size blindly if we've
+ // failed to load or save any class. We don't think this scenario deserves complicated code
+ // paths to get the extra working set perf (you would be pulling in the jitter if
+ // you need any of these classes), So we are basically simplifying this down, if we failed
+ // to load or save any class we won't compress the statics block and will persist the original
+ // estimation.
+
+ // All classes were loaded and saved, cut down the block
+ if (AreAllClassesFullyLoaded())
+ {
+ // Set a mark indicating we had all our classes loaded
+ m_pRegularStaticOffsets = (PTR_DWORD) NGEN_STATICS_ALLCLASSES_WERE_LOADED;
+ m_pThreadStaticOffsets = (PTR_DWORD) NGEN_STATICS_ALLCLASSES_WERE_LOADED;
+ }
+ else
+ {
+ // Since not all of the classes loaded we want to zero the pointers to the offset tables so they'll be
+ // recalculated at runtime. But we can't do that here since we might try to reload some of the failed
+ // types during the arrange phase (as the result of trying to parse profiling data). So we'll defer
+ // zero'ing anything until the fixup phase.
+
+ // Not all classes were stored, revert to uncompressed maps to support run-time changes
+ m_TypeDefToMethodTableMap.ConvertSavedMapToUncompressed(image, DataImage::ITEM_TYPEDEF_MAP);
+ m_MethodDefToDescMap.ConvertSavedMapToUncompressed(image, DataImage::ITEM_METHODDEF_MAP);
+ }
+
+ m_ModuleCtorInfo.Save(image, profileData);
+ image->BindPointer(&m_ModuleCtorInfo, pModuleNode, offsetof(Module, m_ModuleCtorInfo));
+
+ if (m_pDynamicStaticsInfo)
+ {
+ image->StoreStructure(m_pDynamicStaticsInfo, m_maxDynamicEntries*sizeof(DynamicStaticsInfo),
+ DataImage::ITEM_DYNAMIC_STATICS_INFO_TABLE);
+ }
+
+ // save the module security descriptor
+ if (m_pModuleSecurityDescriptor)
+ {
+ m_pModuleSecurityDescriptor->Save(image);
+ }
+
+ InlineTrackingMap *inlineTrackingMap = image->GetInlineTrackingMap();
+ if (inlineTrackingMap)
+ {
+ m_persistentInlineTrackingMap = new (image->GetHeap()) PersistentInlineTrackingMap(this);
+ m_persistentInlineTrackingMap->Save(image, inlineTrackingMap);
+ }
+
+ if (m_pNgenStats && g_CorCompileVerboseLevel >= CORCOMPILE_STATS)
+ {
+ GetSvcLogger()->Printf ("%-35s: %s\n", "MethodTable Restore Reason", "Count");
+ DWORD dwTotal = 0;
+ for (int i=0; i<TotalMethodTables; i++)
+ {
+ GetSvcLogger()->Printf ("%-35s: %d\n", MethodTableRestoreReasonDescription[i], m_pNgenStats->MethodTableRestoreNumReasons[i]);
+ dwTotal += m_pNgenStats->MethodTableRestoreNumReasons[i];
+ }
+ GetSvcLogger()->Printf ("%-35s: %d\n", "TotalMethodTablesNeedRestore", dwTotal);
+ GetSvcLogger()->Printf ("%-35s: %d\n", MethodTableRestoreReasonDescription[TotalMethodTables], m_pNgenStats->MethodTableRestoreNumReasons[TotalMethodTables]);
+ }
+}
+
+
+#ifdef _DEBUG
+//
+// We call these methods to seal the
+// lists: m_pAvailableClasses and m_pAvailableParamTypes
+//
+void Module::SealGenericTypesAndMethods()
+{
+ LIMITED_METHOD_CONTRACT;
+ // Enforce that after this point in ngen that no more types or methods will be loaded.
+ //
+ // We increment the seal count here and only decrement it after we have completed the ngen image
+ //
+ if (m_pAvailableParamTypes != NULL)
+ {
+ m_pAvailableParamTypes->Seal();
+ }
+ if (m_pInstMethodHashTable != NULL)
+ {
+ m_pInstMethodHashTable->Seal();
+ }
+}
+//
+// We call these methods to unseal the
+// lists: m_pAvailableClasses and m_pAvailableParamTypes
+//
+void Module::UnsealGenericTypesAndMethods()
+{
+ LIMITED_METHOD_CONTRACT;
+ // Allow us to create generic types and methods again
+ //
+ // We only decrement it after we have completed the ngen image
+ //
+ if (m_pAvailableParamTypes != NULL)
+ {
+ m_pAvailableParamTypes->Unseal();
+ }
+ if (m_pInstMethodHashTable != NULL)
+ {
+ m_pInstMethodHashTable->Unseal();
+ }
+}
+#endif
+
+
+void Module::PrepopulateDictionaries(DataImage *image, BOOL nonExpansive)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Prepopulating the dictionaries for instantiated types
+ // is in theory an iteraive process, i.e. filling in
+ // a dictionary slot may result in a class load of a new type whose
+ // dictionary may itself need to be prepopulated. The type expressions
+ // involved can get larger, so there's no a-priori reason to expect this
+ // process to terminate.
+ //
+ // Given a starting set of instantiated types, several strategies are
+ // thus possible - no prepopulation (call this PP0), or
+ // prepopulate only the dictionaries of the types that are in the initial
+ // set (call this PP1), or do two iterations (call this PP2) etc. etc.
+ // Whichever strategy we choose we can always afford to do
+ // one round of prepopulation where we populate slots
+ // whose corresponding resulting method/types are already loaded.
+ // Call this PPn+PP-FINAL.
+ //
+ // Below we implement PP1+PP-FINAL for instantiated types and PP0+PP-FINAL
+ // for instantiations of generic methods. We use PP1 because most collection
+ // classes (List, Dictionary etc.) only require one pass of prepopulation in order
+ // to fully prepopulate the dictionary.
+
+ // Do PP1 for instantiated types... Do one iteration where we force type loading...
+ // Because this phase may cause new entries to appear in the hash table we
+ // copy the array of types to the stack before we do anything else.
+ if (!nonExpansive && CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_Prepopulate1))
+ {
+ if (m_pAvailableParamTypes != NULL)
+ {
+ // Create a local copy in case the new elements are added to the hashtable during population
+ InlineSArray<TypeHandle, 20> pTypes;
+
+ EETypeHashTable::Iterator it(m_pAvailableParamTypes);
+ EETypeHashEntry *pEntry;
+ while (m_pAvailableParamTypes->FindNext(&it, &pEntry))
+ {
+ TypeHandle th = pEntry->GetTypeHandle();
+ if (th.IsTypeDesc())
+ continue;
+
+ // Don't do prepopulation for open types - they shouldn't really have dictionaries anyway.
+ MethodTable * pMT = th.AsMethodTable();
+ if (pMT->ContainsGenericVariables())
+ continue;
+
+ // Only do PP1 on things that land in their preferred Zap module.
+ // Forcing the load of dictionary entries in the case where we are
+ // speculatively saving a copy of an instantiation outside its preferred
+ // zap module is too expensive for the common collection class cases.
+ ///
+ // Invalid generic instantiations will not be fully loaded.
+ // We want to ignore them as touching them will re-raise the TypeLoadException
+ if (pMT->IsFullyLoaded() && image->GetModule() == GetPreferredZapModuleForMethodTable(pMT))
+ {
+ pTypes.Append(th);
+ }
+ }
+ it.Reset();
+
+ for(COUNT_T i = 0; i < pTypes.GetCount(); i ++)
+ {
+ TypeHandle th = pTypes[i];
+ _ASSERTE(image->GetModule() == GetPreferredZapModuleForTypeHandle(th) );
+ _ASSERTE(!th.IsTypeDesc() && !th.ContainsGenericVariables());
+ th.AsMethodTable()->PrepopulateDictionary(image, FALSE /* not nonExpansive, i.e. can load types */);
+ }
+ }
+ }
+
+ // PP-FINAL for instantiated types.
+ // This is the final stage where we hardbind any remaining entries that map
+ // to results that have already been loaded...
+ // Thus we set the "nonExpansive" flag on PrepopulateDictionary
+ // below, which may in turn greatly limit the amount of prepopulating we do
+ // (partly because it's quite difficult to determine if some potential entries
+ // in the dictionary are already loaded)
+
+ if (m_pAvailableParamTypes != NULL)
+ {
+ INDEBUG(DWORD nTypes = m_pAvailableParamTypes->GetCount());
+
+ EETypeHashTable::Iterator it(m_pAvailableParamTypes);
+ EETypeHashEntry *pEntry;
+ while (m_pAvailableParamTypes->FindNext(&it, &pEntry))
+ {
+ TypeHandle th = pEntry->GetTypeHandle();
+ if (th.IsTypeDesc())
+ continue;
+
+ MethodTable * pMT = th.AsMethodTable();
+ if (pMT->ContainsGenericVariables())
+ continue;
+
+ pMT->PrepopulateDictionary(image, TRUE /* nonExpansive */);
+ }
+
+ // No new instantiations should be added by nonExpansive prepopulation
+ _ASSERTE(nTypes == m_pAvailableParamTypes->GetCount());
+ }
+
+ // PP-FINAL for instantiations of generic methods.
+ if (m_pInstMethodHashTable != NULL)
+ {
+ INDEBUG(DWORD nMethods = m_pInstMethodHashTable->GetCount());
+
+ InstMethodHashTable::Iterator it(m_pInstMethodHashTable);
+ InstMethodHashEntry *pEntry;
+ while (m_pInstMethodHashTable->FindNext(&it, &pEntry))
+ {
+ MethodDesc *pMD = pEntry->GetMethod();
+ if (!pMD->ContainsGenericVariables())
+ {
+ pMD->PrepopulateDictionary(image, TRUE /* nonExpansive */);
+ }
+ }
+
+ // No new instantiations should be added by nonExpansive prepopulation
+ _ASSERTE(nMethods == m_pInstMethodHashTable->GetCount());
+ }
+}
+
+void Module::PlaceType(DataImage *image, TypeHandle th, DWORD profilingFlags)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (th.IsNull())
+ return;
+
+ MethodTable *pMT = th.GetMethodTable();
+
+ if (pMT && pMT->GetLoaderModule() == this)
+ {
+ EEClass *pClass = pMT->GetClass();
+
+ if (profilingFlags & (1 << WriteMethodTableWriteableData))
+ {
+ image->PlaceStructureForAddress(pMT->GetWriteableData(),CORCOMPILE_SECTION_WRITE);
+ }
+
+ if (profilingFlags & (1 << ReadMethodTable))
+ {
+ CorCompileSection section = CORCOMPILE_SECTION_READONLY_HOT;
+ if (pMT->IsWriteable())
+ section = CORCOMPILE_SECTION_HOT_WRITEABLE;
+ image->PlaceStructureForAddress(pMT, section);
+
+ if (pMT->HasInterfaceMap())
+ image->PlaceInternedStructureForAddress(pMT->GetInterfaceMap(), CORCOMPILE_SECTION_READONLY_SHARED_HOT, CORCOMPILE_SECTION_READONLY_HOT);
+
+ MethodTable::VtableIndirectionSlotIterator it = pMT->IterateVtableIndirectionSlots();
+ while (it.Next())
+ {
+ image->PlaceInternedStructureForAddress(it.GetIndirectionSlot(), CORCOMPILE_SECTION_READONLY_SHARED_HOT, CORCOMPILE_SECTION_READONLY_HOT);
+ }
+
+ image->PlaceStructureForAddress(pMT->GetWriteableData(), CORCOMPILE_SECTION_HOT);
+ }
+
+ if (profilingFlags & (1 << ReadNonVirtualSlots))
+ {
+ if (pMT->HasNonVirtualSlotsArray())
+ image->PlaceStructureForAddress(pMT->GetNonVirtualSlotsArray(), CORCOMPILE_SECTION_READONLY_HOT);
+ }
+
+ if (profilingFlags & (1 << ReadDispatchMap) && pMT->HasDispatchMapSlot())
+ {
+ image->PlaceInternedStructureForAddress(pMT->GetDispatchMap(), CORCOMPILE_SECTION_READONLY_SHARED_HOT, CORCOMPILE_SECTION_READONLY_HOT);
+ }
+
+ if (profilingFlags & (1 << WriteEEClass))
+ {
+ image->PlaceStructureForAddress(pClass, CORCOMPILE_SECTION_WRITE);
+
+ if (pClass->HasOptionalFields())
+ image->PlaceStructureForAddress(pClass->GetOptionalFields(), CORCOMPILE_SECTION_WRITE);
+ }
+
+ else if (profilingFlags & (1 << ReadEEClass))
+ {
+ image->PlaceStructureForAddress(pClass, CORCOMPILE_SECTION_HOT);
+
+ if (pClass->HasOptionalFields())
+ image->PlaceStructureForAddress(pClass->GetOptionalFields(), CORCOMPILE_SECTION_HOT);
+
+ if (pClass->GetVarianceInfo() != NULL)
+ image->PlaceInternedStructureForAddress(pClass->GetVarianceInfo(), CORCOMPILE_SECTION_READONLY_WARM, CORCOMPILE_SECTION_READONLY_WARM);
+
+#ifdef FEATURE_COMINTEROP
+ if (pClass->GetSparseCOMInteropVTableMap() != NULL)
+ {
+ image->PlaceStructureForAddress(pClass->GetSparseCOMInteropVTableMap(), CORCOMPILE_SECTION_WARM);
+ image->PlaceInternedStructureForAddress(pClass->GetSparseCOMInteropVTableMap()->GetMapList(), CORCOMPILE_SECTION_READONLY_WARM, CORCOMPILE_SECTION_READONLY_WARM);
+ }
+#endif
+ }
+
+ if (profilingFlags & (1 << ReadFieldDescs))
+ {
+ image->PlaceStructureForAddress(pMT->GetApproxFieldDescListRaw(), CORCOMPILE_SECTION_READONLY_HOT);
+ }
+
+ if (profilingFlags != 0)
+ {
+ if (pMT->HasPerInstInfo())
+ {
+ Dictionary ** pPerInstInfo = pMT->GetPerInstInfo();
+
+ BOOL fIsEagerBound = pMT->CanEagerBindToParentDictionaries(image, NULL);
+
+ if (fIsEagerBound)
+ {
+ image->PlaceInternedStructureForAddress(pPerInstInfo, CORCOMPILE_SECTION_READONLY_SHARED_HOT, CORCOMPILE_SECTION_READONLY_HOT);
+ }
+ else
+ {
+ image->PlaceStructureForAddress(pPerInstInfo, CORCOMPILE_SECTION_WRITE);
+ }
+ }
+
+ Dictionary * pDictionary = pMT->GetDictionary();
+ if (pDictionary != NULL)
+ {
+ BOOL fIsWriteable;
+
+ if (!pMT->IsCanonicalMethodTable())
+ {
+ // CanEagerBindToMethodTable would not work for targeted patching here. The dictionary
+ // layout is sensitive to compilation order that can be changed by TP compatible changes.
+ BOOL canSaveSlots = (image->GetModule() == pMT->GetCanonicalMethodTable()->GetLoaderModule());
+
+ fIsWriteable = pDictionary->IsWriteable(image, canSaveSlots,
+ pMT->GetNumGenericArgs(),
+ pMT->GetModule(),
+ pClass->GetDictionaryLayout());
+ }
+ else
+ {
+ fIsWriteable = FALSE;
+ }
+
+ if (fIsWriteable)
+ {
+ image->PlaceStructureForAddress(pDictionary, CORCOMPILE_SECTION_HOT_WRITEABLE);
+ image->PlaceStructureForAddress(pClass->GetDictionaryLayout(), CORCOMPILE_SECTION_WARM);
+ }
+ else
+ {
+ image->PlaceInternedStructureForAddress(pDictionary, CORCOMPILE_SECTION_READONLY_SHARED_HOT, CORCOMPILE_SECTION_READONLY_HOT);
+ }
+ }
+ }
+
+ if (profilingFlags & (1 << ReadFieldMarshalers))
+ {
+ if (pClass->HasLayout() && pClass->GetLayoutInfo()->GetNumCTMFields() > 0)
+ {
+ image->PlaceStructureForAddress((void *)pClass->GetLayoutInfo()->GetFieldMarshalers(), CORCOMPILE_SECTION_HOT);
+ }
+ }
+ }
+ if (th.IsTypeDesc())
+ {
+ if (profilingFlags & (1 << WriteTypeDesc))
+ image->PlaceStructureForAddress(th.AsTypeDesc(), CORCOMPILE_SECTION_WRITE);
+ else if (profilingFlags & (1 << ReadTypeDesc))
+ image->PlaceStructureForAddress(th.AsTypeDesc(), CORCOMPILE_SECTION_HOT);
+ else
+ image->PlaceStructureForAddress(th.AsTypeDesc(), CORCOMPILE_SECTION_WARM);
+ }
+}
+
+void Module::PlaceMethod(DataImage *image, MethodDesc *pMD, DWORD profilingFlags)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (pMD == NULL)
+ return;
+
+ if (pMD->GetLoaderModule() != this)
+ return;
+
+ if (profilingFlags & (1 << ReadMethodCode))
+ {
+ if (pMD->IsNDirect())
+ {
+ NDirectMethodDesc *pNMD = (NDirectMethodDesc *)pMD;
+ image->PlaceStructureForAddress((void*) pNMD->GetWriteableData(), CORCOMPILE_SECTION_WRITE);
+
+#ifdef HAS_NDIRECT_IMPORT_PRECODE
+ // The NDirect import thunk glue is used only if no marshaling is required
+ if (!pNMD->MarshalingRequired())
+ {
+ image->PlaceStructureForAddress((void*) pNMD->GetNDirectImportThunkGlue(), CORCOMPILE_SECTION_METHOD_PRECODE_HOT);
+ }
+#endif // HAS_NDIRECT_IMPORT_PRECODE
+
+ // Late bound NDirect methods require their LibName at startup.
+ if (!pNMD->IsQCall())
+ {
+ image->PlaceStructureForAddress((void*) pNMD->GetLibName(), CORCOMPILE_SECTION_READONLY_HOT);
+ image->PlaceStructureForAddress((void*) pNMD->GetEntrypointName(), CORCOMPILE_SECTION_READONLY_HOT);
+ }
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (pMD->IsComPlusCall())
+ {
+ ComPlusCallMethodDesc *pCMD = (ComPlusCallMethodDesc *)pMD;
+
+ // If the ComPlusCallMethodDesc was actually used for interop, its ComPlusCallInfo should be hot.
+ image->PlaceStructureForAddress((void*) pCMD->m_pComPlusCallInfo, CORCOMPILE_SECTION_HOT);
+ }
+#endif // FEATURE_COMINTEROP
+
+ // Stubs-as-IL have writeable signatures sometimes, so can’t place them
+ // into read-only section. We should not get here for stubs-as-il anyway,
+ // but we will filter them out just to be sure.
+ if (pMD->HasStoredSig() && !pMD->IsILStub())
+ {
+ StoredSigMethodDesc *pSMD = (StoredSigMethodDesc*) pMD;
+
+ if (pSMD->HasStoredMethodSig())
+ {
+ image->PlaceInternedStructureForAddress((void*) pSMD->GetStoredMethodSig(), CORCOMPILE_SECTION_READONLY_SHARED_HOT, CORCOMPILE_SECTION_READONLY_HOT);
+ }
+ }
+ }
+
+ // We store the entire hot chunk in the SECTION_WRITE section
+ if (profilingFlags & (1 << WriteMethodDesc))
+ {
+ image->PlaceStructureForAddress(pMD, CORCOMPILE_SECTION_WRITE);
+ }
+
+ if (profilingFlags & (1 << ReadCerMethodList))
+ {
+ // protect against stale IBC data
+ // Check if the profiling data incorrectly set the ReadCerMethodList bit.
+ // This is more likely to happen with incremental IBC.
+ if ((m_pCerNgenRootTable != NULL) && m_pCerNgenRootTable->IsNgenRootMethod(pMD))
+ {
+ image->PlaceStructureForAddress(m_pCerNgenRootTable->GetList(pMD), CORCOMPILE_SECTION_HOT);
+ }
+ }
+
+ if (profilingFlags & (1 << WriteMethodPrecode))
+ {
+ Precode* pPrecode = pMD->GetSavedPrecodeOrNull(image);
+ // protect against stale IBC data
+ if (pPrecode != NULL)
+ {
+ CorCompileSection section = CORCOMPILE_SECTION_METHOD_PRECODE_WRITE;
+ if (pPrecode->IsPrebound(image))
+ section = CORCOMPILE_SECTION_METHOD_PRECODE_HOT;
+ // Note: This is going to place the entire PRECODE_FIXUP chunk if we have one
+ image->PlaceStructureForAddress(pPrecode, section);
+ }
+ }
+ else if (profilingFlags & (1 << ReadMethodPrecode))
+ {
+ Precode* pPrecode = pMD->GetSavedPrecodeOrNull(image);
+ // protect against stale IBC data
+ if (pPrecode != NULL)
+ {
+ // Note: This is going to place the entire PRECODE_FIXUP chunk if we have one
+ image->PlaceStructureForAddress(pPrecode, CORCOMPILE_SECTION_METHOD_PRECODE_HOT);
+ }
+ }
+}
+
+void Module::Arrange(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ // We collect IBC logging profiling data and use that to guide the layout of the image.
+ image->PlaceStructureForAddress(this, CORCOMPILE_SECTION_MODULE);
+
+ // The stub method table is shared by all IL stubs in the module, so place it into the hot section
+ MethodTable * pStubMT = GetILStubCache()->GetStubMethodTable();
+ if (pStubMT != NULL)
+ PlaceType(image, pStubMT, ReadMethodTable);
+
+ CorProfileData * profileData = GetProfileData();
+ if (profileData)
+ {
+ //
+ // Place hot type structues in the order specifiled by TypeProfilingData array
+ //
+ CORBBTPROF_TOKEN_INFO * pTypeProfilingData = profileData->GetTokenFlagsData(TypeProfilingData);
+ DWORD cTypeProfilingData = profileData->GetTokenFlagsCount(TypeProfilingData);
+ for (unsigned int i = 0; (i < cTypeProfilingData); i++)
+ {
+ CORBBTPROF_TOKEN_INFO * entry = &pTypeProfilingData[i];
+ mdToken token = entry->token;
+ DWORD flags = entry->flags;
+#if defined(_DEBUG) && !defined(DACCESS_COMPILE)
+ g_pConfig->DebugCheckAndForceIBCFailure(EEConfig::CallSite_6);
+#endif
+
+ if (TypeFromToken(token) == mdtTypeDef)
+ {
+ TypeHandle th = LookupTypeDef(token);
+ //
+ // Place a hot normal type and it's data
+ //
+ PlaceType(image, th, flags);
+ }
+ else if (TypeFromToken(token) == ibcTypeSpec)
+ {
+ CORBBTPROF_BLOB_PARAM_SIG_ENTRY *pBlobSigEntry = profileData->GetBlobSigEntry(token);
+
+ if (pBlobSigEntry == NULL)
+ {
+ //
+ // Print an error message for the type load failure
+ //
+ StackSString msg(W("Did not find definition for type token "));
+
+ char buff[16];
+ sprintf_s(buff, COUNTOF(buff), "%08x", token);
+ StackSString szToken(SString::Ascii, &buff[0]);
+ msg += szToken;
+ msg += W(" in profile data.\n");
+
+ GetSvcLogger()->Log(msg, LogLevel_Info);
+ }
+ else // (pBlobSigEntry != NULL)
+ {
+ _ASSERTE(pBlobSigEntry->blob.token == token);
+ //
+ // decode generic type signature
+ //
+ TypeHandle th = LoadIBCTypeHelper(pBlobSigEntry);
+
+ //
+ // Place a hot instantiated type and it's data
+ //
+ PlaceType(image, th, flags);
+ }
+ }
+ else if (TypeFromToken(token) == mdtFieldDef)
+ {
+ FieldDesc *pFD = LookupFieldDef(token);
+ if (pFD && pFD->IsILOnlyRVAField())
+ {
+ if (entry->flags & (1 << RVAFieldData))
+ {
+ BYTE *pRVAData = (BYTE*) pFD->GetStaticAddressHandle(NULL);
+ //
+ // Place a hot RVA static field
+ //
+ image->PlaceStructureForAddress(pRVAData, CORCOMPILE_SECTION_RVA_STATICS_HOT);
+ }
+ }
+ }
+ }
+
+ //
+ // Place hot methods and method data in the order specifiled by MethodProfilingData array
+ //
+ CORBBTPROF_TOKEN_INFO * pMethodProfilingData = profileData->GetTokenFlagsData(MethodProfilingData);
+ DWORD cMethodProfilingData = profileData->GetTokenFlagsCount(MethodProfilingData);
+ for (unsigned int i = 0; (i < cMethodProfilingData); i++)
+ {
+ mdToken token = pMethodProfilingData[i].token;
+ DWORD profilingFlags = pMethodProfilingData[i].flags;
+#if defined(_DEBUG) && !defined(DACCESS_COMPILE)
+ g_pConfig->DebugCheckAndForceIBCFailure(EEConfig::CallSite_7);
+#endif
+
+ if (TypeFromToken(token) == mdtMethodDef)
+ {
+ MethodDesc * pMD = LookupMethodDef(token);
+ //
+ // Place a hot normal method and it's data
+ //
+ PlaceMethod(image, pMD, profilingFlags);
+ }
+ else if (TypeFromToken(token) == ibcMethodSpec)
+ {
+ CORBBTPROF_BLOB_PARAM_SIG_ENTRY *pBlobSigEntry = profileData->GetBlobSigEntry(token);
+
+ if (pBlobSigEntry == NULL)
+ {
+ //
+ // Print an error message for the type load failure
+ //
+ StackSString msg(W("Did not find definition for method token "));
+
+ char buff[16];
+ sprintf_s(buff, COUNTOF(buff), "%08x", token);
+ StackSString szToken(SString::Ascii, &buff[0]);
+ msg += szToken;
+ msg += W(" in profile data.\n");
+
+ GetSvcLogger()->Log(msg, LogLevel_Info);
+ }
+ else // (pBlobSigEntry != NULL)
+ {
+ _ASSERTE(pBlobSigEntry->blob.token == token);
+ MethodDesc * pMD = LoadIBCMethodHelper(pBlobSigEntry);
+
+ if (pMD != NULL)
+ {
+ //
+ // Place a hot instantiated method and it's data
+ //
+ PlaceMethod(image, pMD, profilingFlags);
+ }
+ }
+ }
+ }
+ }
+
+ // Now place all remaining items
+ image->PlaceRemainingStructures();
+}
+
+void ModuleCtorInfo::Fixup(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (numElementsHot > 0)
+ {
+ image->FixupPointerField(this, offsetof(ModuleCtorInfo, cctorInfoHot));
+ image->FixupPointerField(this, offsetof(ModuleCtorInfo, hotHashOffsets));
+ }
+ else
+ {
+ image->ZeroPointerField(this, offsetof(ModuleCtorInfo, cctorInfoHot));
+ image->ZeroPointerField(this, offsetof(ModuleCtorInfo, hotHashOffsets));
+ }
+
+ _ASSERTE(numElements > numElementsHot || numElements == numElementsHot);
+ if (numElements > numElementsHot)
+ {
+ image->FixupPointerField(this, offsetof(ModuleCtorInfo, cctorInfoCold));
+ image->FixupPointerField(this, offsetof(ModuleCtorInfo, coldHashOffsets));
+ }
+ else
+ {
+ image->ZeroPointerField(this, offsetof(ModuleCtorInfo, cctorInfoCold));
+ image->ZeroPointerField(this, offsetof(ModuleCtorInfo, coldHashOffsets));
+ }
+
+ if (numElements > 0)
+ {
+ image->FixupPointerField(this, offsetof(ModuleCtorInfo, ppMT));
+
+ for (DWORD i=0; i<numElements; i++)
+ {
+ image->FixupPointerField(ppMT, i * sizeof(ppMT[0]));
+ }
+ }
+ else
+ {
+ image->ZeroPointerField(this, offsetof(ModuleCtorInfo, ppMT));
+ }
+
+ if (numHotGCStaticsMTs > 0)
+ {
+ image->FixupPointerField(this, offsetof(ModuleCtorInfo, ppHotGCStaticsMTs));
+
+ image->BeginRegion(CORINFO_REGION_HOT);
+ for (DWORD i=0; i < numHotGCStaticsMTs; i++)
+ {
+ image->FixupMethodTablePointer(ppHotGCStaticsMTs, &ppHotGCStaticsMTs[i]);
+ }
+ image->EndRegion(CORINFO_REGION_HOT);
+ }
+ else
+ {
+ image->ZeroPointerField(this, offsetof(ModuleCtorInfo, ppHotGCStaticsMTs));
+ }
+
+ if (numColdGCStaticsMTs > 0)
+ {
+ image->FixupPointerField(this, offsetof(ModuleCtorInfo, ppColdGCStaticsMTs));
+
+ image->BeginRegion(CORINFO_REGION_COLD);
+ for (DWORD i=0; i < numColdGCStaticsMTs; i++)
+ {
+ image->FixupMethodTablePointer(ppColdGCStaticsMTs, &ppColdGCStaticsMTs[i]);
+ }
+ image->EndRegion(CORINFO_REGION_COLD);
+ }
+ else
+ {
+ image->ZeroPointerField(this, offsetof(ModuleCtorInfo, ppColdGCStaticsMTs));
+ }
+}
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#endif
+void Module::Fixup(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Propagate all changes to the image copy
+ memcpy(image->GetImagePointer(this), this, sizeof(Module));
+
+ //
+ // Zero out VTable
+ //
+
+ image->ZeroPointerField(this, 0);
+
+ image->FixupPointerField(this, offsetof(Module, m_pNGenLayoutInfo));
+
+ image->ZeroField(this, offsetof(Module, m_pSimpleName), sizeof(m_pSimpleName));
+
+ image->ZeroField(this, offsetof(Module, m_file), sizeof(m_file));
+
+ image->FixupPointerField(this, offsetof(Module, m_pDllMain));
+
+ image->ZeroField(this, offsetof(Module, m_dwTransientFlags), sizeof(m_dwTransientFlags));
+
+ image->ZeroField(this, offsetof(Module, m_pVASigCookieBlock), sizeof(m_pVASigCookieBlock));
+ image->ZeroField(this, offsetof(Module, m_pAssembly), sizeof(m_pAssembly));
+ image->ZeroField(this, offsetof(Module, m_moduleRef), sizeof(m_moduleRef));
+
+ image->ZeroField(this, offsetof(Module, m_Crst), sizeof(m_Crst));
+ image->ZeroField(this, offsetof(Module, m_FixupCrst), sizeof(m_FixupCrst));
+
+ image->ZeroField(this, offsetof(Module, m_pProfilingBlobTable), sizeof(m_pProfilingBlobTable));
+ image->ZeroField(this, offsetof(Module, m_pProfileData), sizeof(m_pProfileData));
+ image->ZeroPointerField(this, offsetof(Module, m_pIBCErrorNameString));
+
+ image->ZeroPointerField(this, offsetof(Module, m_pNgenStats));
+
+ // fixup the pointer for NeutralResourcesLanguage, if we have it cached
+ if(!!(m_dwPersistedFlags & NEUTRAL_RESOURCES_LANGUAGE_IS_CACHED)) {
+ image->FixupPointerField(this, offsetof(Module, m_pszCultureName));
+ }
+
+ // Fixup the property name set
+ image->FixupPointerField(this, offsetof(Module, m_propertyNameSet));
+
+ //
+ // Fixup the method table
+ //
+
+ image->ZeroField(this, offsetof(Module, m_pISymUnmanagedReader), sizeof(m_pISymUnmanagedReader));
+ image->ZeroField(this, offsetof(Module, m_ISymUnmanagedReaderCrst), sizeof(m_ISymUnmanagedReaderCrst));
+
+ // Clear active dependencies - they will be refilled at load time
+ image->ZeroField(this, offsetof(Module, m_activeDependencies), sizeof(m_activeDependencies));
+ new (image->GetImagePointer(this, offsetof(Module, m_unconditionalDependencies))) SynchronizedBitMask();
+ image->ZeroField(this, offsetof(Module, m_unconditionalDependencies) + offsetof(SynchronizedBitMask, SynchronizedBitMask::m_bitMaskLock) + offsetof(SimpleRWLock,SimpleRWLock::m_spinCount), sizeof(m_unconditionalDependencies.m_bitMaskLock.m_spinCount));
+ image->ZeroField(this, offsetof(Module, m_dwNumberOfActivations), sizeof(m_dwNumberOfActivations));
+
+ image->ZeroField(this, offsetof(Module, m_LookupTableCrst), sizeof(m_LookupTableCrst));
+
+ m_TypeDefToMethodTableMap.Fixup(image);
+ m_TypeRefToMethodTableMap.Fixup(image, FALSE);
+ m_MethodDefToDescMap.Fixup(image);
+ m_FieldDefToDescMap.Fixup(image);
+ if(m_pMemberRefToDescHashTable != NULL)
+ {
+ image->FixupPointerField(this, offsetof(Module, m_pMemberRefToDescHashTable));
+ m_pMemberRefToDescHashTable->Fixup(image);
+ }
+ m_GenericParamToDescMap.Fixup(image);
+ m_GenericTypeDefToCanonMethodTableMap.Fixup(image);
+ m_FileReferencesMap.Fixup(image, FALSE);
+ m_ManifestModuleReferencesMap.Fixup(image, FALSE);
+ m_MethodDefToPropertyInfoMap.Fixup(image, FALSE);
+
+ image->ZeroPointerField(this, offsetof(Module, m_pILStubCache));
+
+ if (m_pAvailableClasses != NULL) {
+ image->FixupPointerField(this, offsetof(Module, m_pAvailableClasses));
+ m_pAvailableClasses->Fixup(image);
+ }
+
+ image->ZeroField(this, offsetof(Module, m_pAvailableClassesCaseIns), sizeof(m_pAvailableClassesCaseIns));
+ image->ZeroField(this, offsetof(Module, m_InstMethodHashTableCrst), sizeof(m_InstMethodHashTableCrst));
+
+ image->BeginRegion(CORINFO_REGION_COLD);
+
+ if (m_pAvailableParamTypes) {
+ image->FixupPointerField(this, offsetof(Module, m_pAvailableParamTypes));
+ m_pAvailableParamTypes->Fixup(image);
+ }
+
+ if (m_pInstMethodHashTable) {
+ image->FixupPointerField(this, offsetof(Module, m_pInstMethodHashTable));
+ m_pInstMethodHashTable->Fixup(image);
+ }
+
+ {
+ MethodTable * pStubMT = GetILStubCache()->GetStubMethodTable();
+ if (pStubMT != NULL)
+ pStubMT->Fixup(image);
+ }
+
+ if (m_pStubMethodHashTable) {
+ image->FixupPointerField(this, offsetof(Module, m_pStubMethodHashTable));
+ m_pStubMethodHashTable->Fixup(image);
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (m_pGuidToTypeHash) {
+ image->FixupPointerField(this, offsetof(Module, m_pGuidToTypeHash));
+ m_pGuidToTypeHash->Fixup(image);
+ }
+#endif // FEATURE_COMINTEROP
+
+ image->EndRegion(CORINFO_REGION_COLD);
+
+#ifdef _DEBUG
+ //
+ // Unseal the generic tables:
+ //
+ // - We need to run managed code to serialize the Security attributes of the ngen image
+ // and we are now using generic types in the Security/Reflection code.
+ // - Compilation of other modules of multimodule assemblies may add more types
+ // to the generic tables.
+ //
+ UnsealGenericTypesAndMethods();
+#endif
+
+ m_ModuleCtorInfo.Fixup(image);
+
+ //
+ // Fixup binder
+ //
+
+ if (m_pBinder != NULL)
+ {
+ image->FixupPointerField(this, offsetof(Module, m_pBinder));
+ m_pBinder->Fixup(image);
+ }
+
+
+ //
+ // Fixup classes
+ //
+
+ {
+ LookupMap<PTR_MethodTable>::Iterator typeDefIter(&m_TypeDefToMethodTableMap);
+
+ image->BeginRegion(CORINFO_REGION_COLD);
+ while (typeDefIter.Next())
+ {
+ MethodTable * t = typeDefIter.GetElement();
+ if (image->IsStored(t))
+ t->Fixup(image);
+ }
+ image->EndRegion(CORINFO_REGION_COLD);
+ }
+
+ {
+ LookupMap<PTR_TypeRef>::Iterator typeRefIter(&m_TypeRefToMethodTableMap);
+ DWORD rid = 0;
+
+ image->BeginRegion(CORINFO_REGION_HOT);
+ while (typeRefIter.Next())
+ {
+ TADDR flags;
+ TypeHandle th = TypeHandle::FromTAddr(dac_cast<TADDR>(typeRefIter.GetElementAndFlags(&flags)));
+
+ if (!th.IsNull())
+ {
+ if (th.GetLoaderModule() != this || image->IsStored(th.AsPtr()))
+ {
+ PTR_TADDR hotItemValuePtr = m_TypeRefToMethodTableMap.FindHotItemValuePtr(rid);
+ BOOL fSet = FALSE;
+
+ if (image->CanEagerBindToTypeHandle(th))
+ {
+ if (image->CanHardBindToZapModule(th.GetLoaderModule()))
+ {
+ PVOID pTarget = th.IsTypeDesc() ? th.AsTypeDesc() : th.AsPtr();
+ SSIZE_T offset = th.IsTypeDesc() ? 2 : 0;
+
+ _ASSERTE((flags & offset) == 0);
+
+ image->FixupField(m_TypeRefToMethodTableMap.pTable, rid * sizeof(TADDR),
+ pTarget, flags | offset, IMAGE_REL_BASED_RelativePointer);
+
+ // In case this item is also in the hot item subtable, fix it up there as well
+ if (hotItemValuePtr != NULL)
+ {
+ image->FixupField(m_TypeRefToMethodTableMap.hotItemList,
+ (BYTE *)hotItemValuePtr - (BYTE *)m_TypeRefToMethodTableMap.hotItemList,
+ pTarget, flags | offset, IMAGE_REL_BASED_RelativePointer);
+ }
+ fSet = TRUE;
+ }
+ else
+ // Create the indirection only if the entry is hot or we do have indirection cell already
+ if (hotItemValuePtr != NULL || image->GetExistingTypeHandleImport(th) != NULL)
+ {
+ _ASSERTE((flags & FIXUP_POINTER_INDIRECTION) == 0);
+
+ ZapNode * pImport = image->GetTypeHandleImport(th);
+ image->FixupFieldToNode(m_TypeRefToMethodTableMap.pTable, rid * sizeof(TADDR),
+ pImport, flags | FIXUP_POINTER_INDIRECTION, IMAGE_REL_BASED_RelativePointer);
+ if (hotItemValuePtr != NULL)
+ {
+ image->FixupFieldToNode(m_TypeRefToMethodTableMap.hotItemList,
+ (BYTE *)hotItemValuePtr - (BYTE *)m_TypeRefToMethodTableMap.hotItemList,
+ pImport, flags | FIXUP_POINTER_INDIRECTION, IMAGE_REL_BASED_RelativePointer);
+ }
+ fSet = TRUE;
+ }
+ }
+
+ if (!fSet)
+ {
+ image->ZeroPointerField(m_TypeRefToMethodTableMap.pTable, rid * sizeof(TADDR));
+ // In case this item is also in the hot item subtable, fix it up there as well
+ if (hotItemValuePtr != NULL)
+ {
+ image->ZeroPointerField(m_TypeRefToMethodTableMap.hotItemList,
+ (BYTE *)hotItemValuePtr - (BYTE *)m_TypeRefToMethodTableMap.hotItemList);
+ }
+ }
+ }
+ }
+
+ rid++;
+ }
+ image->EndRegion(CORINFO_REGION_HOT);
+ }
+
+ {
+ LookupMap<PTR_TypeVarTypeDesc>::Iterator genericParamIter(&m_GenericParamToDescMap);
+
+ while (genericParamIter.Next())
+ {
+ TypeVarTypeDesc * pTypeDesc = genericParamIter.GetElement();
+
+ if (pTypeDesc != NULL)
+ {
+ _ASSERTE(image->IsStored(pTypeDesc));
+ pTypeDesc->Fixup(image);
+ }
+ }
+ }
+
+ //
+ // Fixup the assembly reference map table
+ //
+
+ {
+ LookupMap<PTR_Module>::Iterator manifestModuleIter(&m_ManifestModuleReferencesMap);
+ DWORD rid = 0;
+
+ while (manifestModuleIter.Next())
+ {
+ TADDR flags;
+ Module * pModule = manifestModuleIter.GetElementAndFlags(&flags);
+
+ if (pModule != NULL)
+ {
+ if (image->CanEagerBindToModule(pModule))
+ {
+ if (image->CanHardBindToZapModule(pModule))
+ {
+ image->FixupField(m_ManifestModuleReferencesMap.pTable, rid * sizeof(TADDR),
+ pModule, flags, IMAGE_REL_BASED_RelativePointer);
+ }
+ else
+ {
+ image->ZeroPointerField(m_ManifestModuleReferencesMap.pTable, rid * sizeof(TADDR));
+ }
+ }
+ else
+ {
+ image->ZeroPointerField(m_ManifestModuleReferencesMap.pTable, rid * sizeof(TADDR));
+ }
+ }
+
+ rid++;
+ }
+ }
+
+ //
+ // Zero out file references table.
+ //
+ image->ZeroField(m_FileReferencesMap.pTable, 0,
+ m_FileReferencesMap.GetSize() * sizeof(void*));
+
+ //
+ // Fixup Constrained Execution Regions restoration records.
+ //
+ if (m_pCerNgenRootTable != NULL)
+ {
+ image->BeginRegion(CORINFO_REGION_HOT);
+ image->FixupPointerField(this, offsetof(Module, m_pCerNgenRootTable));
+ m_pCerNgenRootTable->Fixup(image);
+ image->EndRegion(CORINFO_REGION_HOT);
+ }
+ else
+ image->ZeroPointerField(this, offsetof(Module, m_pCerNgenRootTable));
+
+ // Zero out fields we always compute at runtime lazily.
+ image->ZeroField(this, offsetof(Module, m_pCerPrepInfo), sizeof(m_pCerPrepInfo));
+ image->ZeroField(this, offsetof(Module, m_pCerCrst), sizeof(m_pCerCrst));
+
+ image->ZeroField(this, offsetof(Module, m_debuggerSpecificData), sizeof(m_debuggerSpecificData));
+
+ image->ZeroField(this, offsetof(Module, m_AssemblyRefByNameCount), sizeof(m_AssemblyRefByNameCount));
+ image->ZeroPointerField(this, offsetof(Module, m_AssemblyRefByNameTable));
+
+ image->ZeroPointerField(this,offsetof(Module, m_NativeMetadataAssemblyRefMap));
+
+ //
+ // Fixup statics
+ //
+ LOG((LF_CLASSLOADER, LL_INFO10000, "STATICS: fixing up module static data\n"));
+
+ image->ZeroPointerField(this, offsetof(Module, m_ModuleID));
+ image->ZeroField(this, offsetof(Module, m_ModuleIndex), sizeof(m_ModuleIndex));
+
+ image->FixupPointerField(this, offsetof(Module, m_pDynamicStaticsInfo));
+
+ DynamicStaticsInfo* pDSI = m_pDynamicStaticsInfo;
+ for (DWORD i = 0; i < m_cDynamicEntries; i++, pDSI++)
+ {
+ if (pDSI->pEnclosingMT->GetLoaderModule() == this &&
+ // CEEPreloader::TriageTypeForZap() could have rejected this type
+ image->IsStored(pDSI->pEnclosingMT))
+ {
+ image->FixupPointerField(m_pDynamicStaticsInfo, (BYTE *)&pDSI->pEnclosingMT - (BYTE *)m_pDynamicStaticsInfo);
+ }
+ else
+ {
+ // Some other (mutually-recursive) dependency must have loaded
+ // a generic instantiation whose static were pumped into the
+ // assembly being ngenned.
+ image->ZeroPointerField(m_pDynamicStaticsInfo, (BYTE *)&pDSI->pEnclosingMT - (BYTE *)m_pDynamicStaticsInfo);
+ }
+ }
+
+ // fix up module security descriptor
+ if (m_pModuleSecurityDescriptor)
+ {
+ image->FixupPointerField(this, offsetof(Module, m_pModuleSecurityDescriptor));
+ m_pModuleSecurityDescriptor->Fixup(image);
+ }
+ else
+ {
+ image->ZeroPointerField(this, offsetof(Module, m_pModuleSecurityDescriptor));
+ }
+
+ // If we failed to load some types we need to reset the pointers to the static offset tables so they'll be
+ // rebuilt at runtime.
+ if (m_pRegularStaticOffsets != (PTR_DWORD)NGEN_STATICS_ALLCLASSES_WERE_LOADED)
+ {
+ _ASSERTE(m_pThreadStaticOffsets != (PTR_DWORD)NGEN_STATICS_ALLCLASSES_WERE_LOADED);
+ image->ZeroPointerField(this, offsetof(Module, m_pRegularStaticOffsets));
+ image->ZeroPointerField(this, offsetof(Module, m_pThreadStaticOffsets));
+ }
+
+ // Fix up inlining data
+ if(m_persistentInlineTrackingMap)
+ {
+ image->FixupPointerField(this, offsetof(Module, m_persistentInlineTrackingMap));
+ m_persistentInlineTrackingMap->Fixup(image);
+ }
+ else
+ {
+ image->ZeroPointerField(this, offsetof(Module, m_persistentInlineTrackingMap));
+ }
+
+ SetIsModuleSaved();
+}
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+#ifdef FEATURE_PREJIT
+//
+// Is "address" a data-structure in the native image?
+//
+
+BOOL Module::IsPersistedObject(void *address)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ if (!HasNativeImage())
+ return FALSE;
+
+ PEImageLayout *pLayout = GetNativeImage();
+ _ASSERTE(pLayout->IsMapped());
+
+ return (address >= pLayout->GetBase()
+ && address < (BYTE*)pLayout->GetBase() + pLayout->GetVirtualSize());
+}
+
+Module *Module::GetModuleFromIndex(DWORD ix)
+{
+ CONTRACT(Module*)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ if (HasNativeImage())
+ {
+ PRECONDITION(GetNativeImage()->CheckNativeImportFromIndex(ix));
+ CORCOMPILE_IMPORT_TABLE_ENTRY *p = GetNativeImage()->GetNativeImportFromIndex(ix);
+ RETURN ZapSig::DecodeModuleFromIndexes(this, p->wAssemblyRid, p->wModuleRid);
+ }
+ else
+ {
+ mdAssemblyRef mdAssemblyRefToken = TokenFromRid(ix, mdtAssemblyRef);
+ Assembly *pAssembly = this->LookupAssemblyRef(mdAssemblyRefToken);
+ if (pAssembly)
+ {
+ RETURN pAssembly->GetManifestModule();
+ }
+ else
+ {
+ // GetModuleFromIndex failed
+ RETURN NULL;
+ }
+ }
+}
+#endif // FEATURE_PREJIT
+
+#endif // !DACCESS_COMPILE
+
+#ifdef FEATURE_PREJIT
+
+Module *Module::GetModuleFromIndexIfLoaded(DWORD ix)
+{
+ CONTRACT(Module*)
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(HasNativeImage());
+ PRECONDITION(GetNativeImage()->CheckNativeImportFromIndex(ix));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+#ifndef DACCESS_COMPILE
+ CORCOMPILE_IMPORT_TABLE_ENTRY *p = GetNativeImage()->GetNativeImportFromIndex(ix);
+
+ RETURN ZapSig::DecodeModuleFromIndexesIfLoaded(this, p->wAssemblyRid, p->wModuleRid);
+#else // DACCESS_COMPILE
+ DacNotImpl();
+ RETURN NULL;
+#endif // DACCESS_COMPILE
+}
+
+#ifndef DACCESS_COMPILE
+
+BYTE *Module::GetNativeFixupBlobData(RVA rva)
+{
+ CONTRACT(BYTE *)
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN (BYTE *) GetNativeOrReadyToRunImage()->GetRvaData(rva);
+}
+
+IMDInternalImport *Module::GetNativeAssemblyImport(BOOL loadAllowed)
+{
+ CONTRACT(IMDInternalImport *)
+ {
+ INSTANCE_CHECK;
+ if (loadAllowed) GC_TRIGGERS; else GC_NOTRIGGER;
+ if (loadAllowed) THROWS; else NOTHROW;
+ if (loadAllowed) INJECT_FAULT(COMPlusThrowOM()); else FORBID_FAULT;
+ MODE_ANY;
+ PRECONDITION(HasNativeImage());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN GetFile()->GetPersistentNativeImage()->GetNativeMDImport(loadAllowed);
+}
+
+
+/*static*/
+void Module::RestoreMethodTablePointerRaw(MethodTable ** ppMT,
+ Module *pContainingModule,
+ ClassLoadLevel level)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Ensure that the compiler won't fetch the value twice
+ TADDR fixup = VolatileLoadWithoutBarrier((TADDR *)ppMT);
+
+#ifdef _DEBUG
+ if (pContainingModule != NULL)
+ {
+ Module * dbg_pZapModule = ExecutionManager::FindZapModule(dac_cast<TADDR>(ppMT));
+ _ASSERTE((dbg_pZapModule == NULL) || (pContainingModule == dbg_pZapModule));
+ }
+#endif //_DEBUG
+
+ if (CORCOMPILE_IS_POINTER_TAGGED(fixup))
+ {
+#ifdef _WIN64
+ CONSISTENCY_CHECK((CORCOMPILE_UNTAG_TOKEN(fixup)>>32) == 0);
+#endif
+
+ RVA fixupRva = (RVA) CORCOMPILE_UNTAG_TOKEN(fixup);
+
+ if (pContainingModule == NULL)
+ pContainingModule = ExecutionManager::FindZapModule(dac_cast<TADDR>(ppMT));
+ PREFIX_ASSUME(pContainingModule != NULL);
+
+ _ASSERTE((*pContainingModule->GetNativeFixupBlobData(fixupRva) & ~ENCODE_MODULE_OVERRIDE) == ENCODE_TYPE_HANDLE);
+
+ Module * pInfoModule;
+ PCCOR_SIGNATURE pBlobData = pContainingModule->GetEncodedSig(fixupRva, &pInfoModule);
+
+ TypeHandle th = ZapSig::DecodeType(pContainingModule,
+ pInfoModule,
+ pBlobData,
+ level);
+ *EnsureWritablePages(ppMT) = th.AsMethodTable();
+ }
+ else if (*ppMT)
+ {
+ ClassLoader::EnsureLoaded(*ppMT, level);
+ }
+}
+
+/*static*/
+void Module::RestoreMethodTablePointer(FixupPointer<PTR_MethodTable> * ppMT,
+ Module *pContainingModule,
+ ClassLoadLevel level)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (ppMT->IsNull())
+ return;
+
+ if (ppMT->IsTagged())
+ {
+ RestoreMethodTablePointerRaw(ppMT->GetValuePtr(), pContainingModule, level);
+ }
+ else
+ {
+ ClassLoader::EnsureLoaded(ppMT->GetValue(), level);
+ }
+}
+
+/*static*/
+void Module::RestoreMethodTablePointer(RelativeFixupPointer<PTR_MethodTable> * ppMT,
+ Module *pContainingModule,
+ ClassLoadLevel level)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (ppMT->IsNull())
+ return;
+
+ if (ppMT->IsTagged((TADDR)ppMT))
+ {
+ RestoreMethodTablePointerRaw(ppMT->GetValuePtr((TADDR)ppMT), pContainingModule, level);
+ }
+ else
+ {
+ ClassLoader::EnsureLoaded(ppMT->GetValue((TADDR)ppMT), level);
+ }
+}
+
+#endif // !DACCESS_COMPILE
+
+BOOL Module::IsZappedCode(PCODE code)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ if (!HasNativeImage())
+ return FALSE;
+
+ PEImageLayout *pNativeImage = GetNativeImage();
+
+ UINT32 cCode = 0;
+ PCODE pCodeSection;
+
+ pCodeSection = pNativeImage->GetNativeHotCode(&cCode);
+ if ((pCodeSection <= code) && (code < pCodeSection + cCode))
+ {
+ return TRUE;
+ }
+
+ pCodeSection = pNativeImage->GetNativeCode(&cCode);
+ if ((pCodeSection <= code) && (code < pCodeSection + cCode))
+ {
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+BOOL Module::IsZappedPrecode(PCODE code)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (m_pNGenLayoutInfo == NULL)
+ return FALSE;
+
+ for (SIZE_T i = 0; i < COUNTOF(m_pNGenLayoutInfo->m_Precodes); i++)
+ {
+ if (m_pNGenLayoutInfo->m_Precodes[i].IsInRange(code))
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+PCCOR_SIGNATURE Module::GetEncodedSig(RVA fixupRva, Module **ppDefiningModule)
+{
+ CONTRACT(PCCOR_SIGNATURE)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ POSTCONDITION(CheckPointer(RETVAL));
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+#ifndef DACCESS_COMPILE
+ PCCOR_SIGNATURE pBuffer = GetNativeFixupBlobData(fixupRva);
+
+ BYTE kind = *pBuffer++;
+
+ *ppDefiningModule = (kind & ENCODE_MODULE_OVERRIDE) ? GetModuleFromIndex(CorSigUncompressData(pBuffer)) : this;
+
+ RETURN pBuffer;
+#else
+ RETURN NULL;
+#endif // DACCESS_COMPILE
+}
+
+PCCOR_SIGNATURE Module::GetEncodedSigIfLoaded(RVA fixupRva, Module **ppDefiningModule)
+{
+ CONTRACT(PCCOR_SIGNATURE)
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_INTOLERANT;
+ POSTCONDITION(CheckPointer(RETVAL));
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+#ifndef DACCESS_COMPILE
+ PCCOR_SIGNATURE pBuffer = GetNativeFixupBlobData(fixupRva);
+
+ BYTE kind = *pBuffer++;
+
+ *ppDefiningModule = (kind & ENCODE_MODULE_OVERRIDE) ? GetModuleFromIndexIfLoaded(CorSigUncompressData(pBuffer)) : this;
+
+ RETURN pBuffer;
+#else
+ *ppDefiningModule = NULL;
+ RETURN NULL;
+#endif // DACCESS_COMPILE
+}
+
+/*static*/
+PTR_Module Module::RestoreModulePointerIfLoaded(DPTR(RelativeFixupPointer<PTR_Module>) ppModule, Module *pContainingModule)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ if (!ppModule->IsTagged(dac_cast<TADDR>(ppModule)))
+ return ppModule->GetValue(dac_cast<TADDR>(ppModule));
+
+#ifndef DACCESS_COMPILE
+ PTR_Module * ppValue = ppModule->GetValuePtr(dac_cast<TADDR>(ppModule));
+
+ // Ensure that the compiler won't fetch the value twice
+ TADDR fixup = VolatileLoadWithoutBarrier((TADDR *)ppValue);
+
+ if (CORCOMPILE_IS_POINTER_TAGGED(fixup))
+ {
+
+#ifdef _WIN64
+ CONSISTENCY_CHECK((CORCOMPILE_UNTAG_TOKEN(fixup)>>32) == 0);
+#endif
+
+ RVA fixupRva = (RVA) CORCOMPILE_UNTAG_TOKEN(fixup);
+
+ _ASSERTE((*pContainingModule->GetNativeFixupBlobData(fixupRva) & ~ENCODE_MODULE_OVERRIDE) == ENCODE_MODULE_HANDLE);
+
+ Module * pInfoModule;
+ PCCOR_SIGNATURE pBlobData = pContainingModule->GetEncodedSigIfLoaded(fixupRva, &pInfoModule);
+
+ if (pInfoModule)
+ {
+ if (EnsureWritablePagesNoThrow(ppValue, sizeof(*ppValue)))
+ *ppValue = pInfoModule;
+ }
+ return pInfoModule;
+ }
+ else
+ {
+ return PTR_Module(fixup);
+ }
+#else
+ DacNotImpl();
+ return NULL;
+#endif
+}
+
+#ifndef DACCESS_COMPILE
+
+/*static*/
+void Module::RestoreModulePointer(RelativeFixupPointer<PTR_Module> * ppModule, Module *pContainingModule)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ if (!ppModule->IsTagged((TADDR)ppModule))
+ return;
+
+ PTR_Module * ppValue = ppModule->GetValuePtr((TADDR)ppModule);
+
+ // Ensure that the compiler won't fetch the value twice
+ TADDR fixup = VolatileLoadWithoutBarrier((TADDR *)ppValue);
+
+ if (CORCOMPILE_IS_POINTER_TAGGED(fixup))
+ {
+#ifdef _WIN64
+ CONSISTENCY_CHECK((CORCOMPILE_UNTAG_TOKEN(fixup)>>32) == 0);
+#endif
+
+ RVA fixupRva = (RVA) CORCOMPILE_UNTAG_TOKEN(fixup);
+
+ _ASSERTE((*pContainingModule->GetNativeFixupBlobData(fixupRva) & ~ENCODE_MODULE_OVERRIDE) == ENCODE_MODULE_HANDLE);
+
+ Module * pInfoModule;
+ PCCOR_SIGNATURE pBlobData = pContainingModule->GetEncodedSig(fixupRva, &pInfoModule);
+
+ *EnsureWritablePages(ppValue) = pInfoModule;
+ }
+}
+
+/*static*/
+void Module::RestoreTypeHandlePointerRaw(TypeHandle *pHandle, Module* pContainingModule, ClassLoadLevel level)
+{
+ CONTRACTL
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else {INJECT_FAULT(COMPlusThrowOM(););}
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ if (pContainingModule != NULL)
+ {
+ Module * dbg_pZapModule = ExecutionManager::FindZapModule(dac_cast<TADDR>(pHandle));
+ _ASSERTE((dbg_pZapModule == NULL) || (pContainingModule == dbg_pZapModule));
+ }
+#endif //_DEBUG
+
+ TADDR fixup;
+
+ if (IS_ALIGNED(pHandle, sizeof(TypeHandle)))
+ {
+ // Ensure that the compiler won't fetch the value twice
+ fixup = VolatileLoadWithoutBarrier((TADDR *)pHandle);
+ }
+ else
+ {
+ // This is necessary to handle in-place fixups (see by FixupTypeHandlePointerInplace)
+ // in stubs-as-il signatures.
+
+ //
+ // protect this unaligned read with the Module Crst for the rare case that
+ // the TypeHandle to fixup is in a signature and unaligned.
+ //
+ if (NULL == pContainingModule)
+ {
+ pContainingModule = ExecutionManager::FindZapModule(dac_cast<TADDR>(pHandle));
+ }
+ CrstHolder ch(&pContainingModule->m_Crst);
+ fixup = *(TADDR UNALIGNED *)pHandle;
+ }
+
+ if (CORCOMPILE_IS_POINTER_TAGGED(fixup))
+ {
+#ifdef _WIN64
+ CONSISTENCY_CHECK((CORCOMPILE_UNTAG_TOKEN(fixup)>>32) == 0);
+#endif
+
+ RVA fixupRva = (RVA) CORCOMPILE_UNTAG_TOKEN(fixup);
+
+ if (NULL == pContainingModule)
+ {
+ pContainingModule = ExecutionManager::FindZapModule(dac_cast<TADDR>(pHandle));
+ }
+ PREFIX_ASSUME(pContainingModule != NULL);
+
+ _ASSERTE((*pContainingModule->GetNativeFixupBlobData(fixupRva) & ~ENCODE_MODULE_OVERRIDE) == ENCODE_TYPE_HANDLE);
+
+ Module * pInfoModule;
+ PCCOR_SIGNATURE pBlobData = pContainingModule->GetEncodedSig(fixupRva, &pInfoModule);
+
+ TypeHandle thResolved = ZapSig::DecodeType(pContainingModule,
+ pInfoModule,
+ pBlobData,
+ level);
+ EnsureWritablePages(pHandle);
+ if (IS_ALIGNED(pHandle, sizeof(TypeHandle)))
+ {
+ *pHandle = thResolved;
+ }
+ else
+ {
+ //
+ // protect this unaligned write with the Module Crst for the rare case that
+ // the TypeHandle to fixup is in a signature and unaligned.
+ //
+ CrstHolder ch(&pContainingModule->m_Crst);
+ *(TypeHandle UNALIGNED *)pHandle = thResolved;
+ }
+ }
+ else if (fixup != NULL)
+ {
+ ClassLoader::EnsureLoaded(TypeHandle::FromTAddr(fixup), level);
+ }
+}
+
+/*static*/
+void Module::RestoreTypeHandlePointer(FixupPointer<TypeHandle> * pHandle,
+ Module *pContainingModule,
+ ClassLoadLevel level)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (pHandle->IsNull())
+ return;
+
+ if (pHandle->IsTagged())
+ {
+ RestoreTypeHandlePointerRaw(pHandle->GetValuePtr(), pContainingModule, level);
+ }
+ else
+ {
+ ClassLoader::EnsureLoaded(pHandle->GetValue(), level);
+ }
+}
+
+/*static*/
+void Module::RestoreTypeHandlePointer(RelativeFixupPointer<TypeHandle> * pHandle,
+ Module *pContainingModule,
+ ClassLoadLevel level)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (pHandle->IsNull())
+ return;
+
+ if (pHandle->IsTagged((TADDR)pHandle))
+ {
+ RestoreTypeHandlePointerRaw(pHandle->GetValuePtr((TADDR)pHandle), pContainingModule, level);
+ }
+ else
+ {
+ ClassLoader::EnsureLoaded(pHandle->GetValue((TADDR)pHandle), level);
+ }
+}
+
+/*static*/
+void Module::RestoreMethodDescPointerRaw(PTR_MethodDesc * ppMD, Module *pContainingModule, ClassLoadLevel level)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Ensure that the compiler won't fetch the value twice
+ TADDR fixup = VolatileLoadWithoutBarrier((TADDR *)ppMD);
+
+#ifdef _DEBUG
+ if (pContainingModule != NULL)
+ {
+ Module * dbg_pZapModule = ExecutionManager::FindZapModule(dac_cast<TADDR>(ppMD));
+ _ASSERTE((dbg_pZapModule == NULL) || (pContainingModule == dbg_pZapModule));
+ }
+#endif //_DEBUG
+
+ if (CORCOMPILE_IS_POINTER_TAGGED(fixup))
+ {
+ GCX_PREEMP();
+
+#ifdef _WIN64
+ CONSISTENCY_CHECK((CORCOMPILE_UNTAG_TOKEN(fixup)>>32) == 0);
+#endif
+
+ RVA fixupRva = (RVA) CORCOMPILE_UNTAG_TOKEN(fixup);
+
+ if (pContainingModule == NULL)
+ pContainingModule = ExecutionManager::FindZapModule(dac_cast<TADDR>(ppMD));
+ PREFIX_ASSUME(pContainingModule != NULL);
+
+ _ASSERTE((*pContainingModule->GetNativeFixupBlobData(fixupRva) & ~ENCODE_MODULE_OVERRIDE) == ENCODE_METHOD_HANDLE);
+
+ Module * pInfoModule;
+ PCCOR_SIGNATURE pBlobData = pContainingModule->GetEncodedSig(fixupRva, &pInfoModule);
+
+ *EnsureWritablePages(ppMD) = ZapSig::DecodeMethod(pContainingModule,
+ pInfoModule,
+ pBlobData);
+ }
+ else if (*ppMD) {
+ (*ppMD)->CheckRestore(level);
+ }
+}
+
+/*static*/
+void Module::RestoreMethodDescPointer(FixupPointer<PTR_MethodDesc> * ppMD,
+ Module *pContainingModule,
+ ClassLoadLevel level)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (ppMD->IsNull())
+ return;
+
+ if (ppMD->IsTagged())
+ {
+ RestoreMethodDescPointerRaw(ppMD->GetValuePtr(), pContainingModule, level);
+ }
+ else
+ {
+ ppMD->GetValue()->CheckRestore(level);
+ }
+}
+
+/*static*/
+void Module::RestoreMethodDescPointer(RelativeFixupPointer<PTR_MethodDesc> * ppMD,
+ Module *pContainingModule,
+ ClassLoadLevel level)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (ppMD->IsNull())
+ return;
+
+ if (ppMD->IsTagged((TADDR)ppMD))
+ {
+ RestoreMethodDescPointerRaw(ppMD->GetValuePtr((TADDR)ppMD), pContainingModule, level);
+ }
+ else
+ {
+ ppMD->GetValue((TADDR)ppMD)->CheckRestore(level);
+ }
+}
+
+/*static*/
+void Module::RestoreFieldDescPointer(FixupPointer<PTR_FieldDesc> * ppFD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PTR_FieldDesc * ppValue = ppFD->GetValuePtr();
+
+ // Ensure that the compiler won't fetch the value twice
+ TADDR fixup = VolatileLoadWithoutBarrier((TADDR *)ppValue);
+
+ if (CORCOMPILE_IS_POINTER_TAGGED(fixup))
+ {
+#ifdef _WIN64
+ CONSISTENCY_CHECK((CORCOMPILE_UNTAG_TOKEN(fixup)>>32) == 0);
+#endif
+
+ Module * pContainingModule = ExecutionManager::FindZapModule(dac_cast<TADDR>(ppValue));
+ PREFIX_ASSUME(pContainingModule != NULL);
+
+ RVA fixupRva = (RVA) CORCOMPILE_UNTAG_TOKEN(fixup);
+
+ _ASSERTE((*pContainingModule->GetNativeFixupBlobData(fixupRva) & ~ENCODE_MODULE_OVERRIDE) == ENCODE_FIELD_HANDLE);
+
+ Module * pInfoModule;
+ PCCOR_SIGNATURE pBlobData = pContainingModule->GetEncodedSig(fixupRva, &pInfoModule);
+
+ *EnsureWritablePages(ppValue) = ZapSig::DecodeField(pContainingModule,
+ pInfoModule,
+ pBlobData);
+ }
+}
+
+
+//-----------------------------------------------------------------------------
+
+#if 0
+
+ This diagram illustrates the layout of fixups in the ngen image.
+ This is the case where function foo2 has a class-restore fixup
+ for class C1 in b.dll.
+
+ zapBase+curTableVA+rva / FixupList (see Fixup Encoding below)
+ m_pFixupBlobs
+ +-------------------+
+ pEntry->VA +--------------------+ | non-NULL | foo1
+ |Handles | +-------------------+
+ZapHeader.ImportTable | | | non-NULL |
+ | | +-------------------+
+ +------------+ +--------------------+ | non-NULL |
+ |a.dll | |Class cctors |<---+ +-------------------+
+ | | | | \ | 0 |
+ | | p->VA/ | |<---+ \ +===================+
+ | | blobs +--------------------+ \ +-------non-NULL | foo2
+ +------------+ |Class restore | \ +-------------------+
+ |b.dll | | | +-------non-NULL |
+ | | | | +-------------------+
+ | token_C1 |<--------------blob(=>fixedUp/0) |<--pBlob--------index |
+ | | \ | | +-------------------+
+ | | \ +--------------------+ | non-NULL |
+ | | \ | | +-------------------+
+ | | \ | . | | 0 |
+ | | \ | . | +===================+
+ +------------+ \ | . | | 0 | foo3
+ \ | | +===================+
+ \ +--------------------+ | non-NULL | foo4
+ \ |Various fixups that | +-------------------+
+ \ |need too happen | | 0 |
+ \| | +===================+
+ |(CorCompileTokenTable)
+ | |
+ pEntryEnd->VA +--------------------+
+
+
+
+#endif // 0
+
+//-----------------------------------------------------------------------------
+
+BOOL Module::FixupNativeEntry(CORCOMPILE_IMPORT_SECTION * pSection, SIZE_T fixupIndex, SIZE_T *fixupCell)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(fixupCell));
+ }
+ CONTRACTL_END;
+
+ // Ensure that the compiler won't fetch the value twice
+ SIZE_T fixup = VolatileLoadWithoutBarrier(fixupCell);
+
+ if (pSection->Signatures != NULL)
+ {
+ if (fixup == NULL)
+ {
+ PTR_DWORD pSignatures = dac_cast<PTR_DWORD>(GetNativeOrReadyToRunImage()->GetRvaData(pSection->Signatures));
+
+ if (!LoadDynamicInfoEntry(this, pSignatures[fixupIndex], fixupCell))
+ return FALSE;
+
+ _ASSERTE(*fixupCell != NULL);
+ }
+ }
+ else
+ {
+ if (CORCOMPILE_IS_FIXUP_TAGGED(fixup, pSection))
+ {
+ // Fixup has not been fixed up yet
+ if (!LoadDynamicInfoEntry(this, (RVA)CORCOMPILE_UNTAG_TOKEN(fixup), fixupCell))
+ return FALSE;
+
+ _ASSERTE(!CORCOMPILE_IS_FIXUP_TAGGED(*fixupCell, pSection));
+ }
+ else
+ {
+ //
+ // Handle tables are special. We may need to restore static handle or previous
+ // attempts to load handle could have been partial.
+ //
+ if (pSection->Type == CORCOMPILE_IMPORT_TYPE_TYPE_HANDLE)
+ {
+ TypeHandle::FromPtr((void *)fixup).CheckRestore();
+ }
+ else
+ if (pSection->Type == CORCOMPILE_IMPORT_TYPE_METHOD_HANDLE)
+ {
+ ((MethodDesc *)(fixup))->CheckRestore();
+ }
+ }
+ }
+
+ return TRUE;
+}
+
+//-----------------------------------------------------------------------------
+
+void Module::RunEagerFixups()
+{
+ STANDARD_VM_CONTRACT;
+
+ COUNT_T nSections;
+ PTR_CORCOMPILE_IMPORT_SECTION pSections = GetImportSections(&nSections);
+
+ if (nSections == 0)
+ return;
+
+#ifdef _DEBUG
+ // Loading types during eager fixup is not a tested scenario. Make bugs out of any attempts to do so in a
+ // debug build. Use holder to recover properly in case of exception.
+ class ForbidTypeLoadHolder
+ {
+ public:
+ ForbidTypeLoadHolder()
+ {
+ BEGIN_FORBID_TYPELOAD();
+ }
+
+ ~ForbidTypeLoadHolder()
+ {
+ END_FORBID_TYPELOAD();
+ }
+ }
+ forbidTypeLoad;
+#endif
+
+ // TODO: Verify that eager fixup dependency graphs can contain no cycles
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+
+ PEImageLayout *pNativeImage = GetNativeOrReadyToRunImage();
+
+ for (COUNT_T iSection = 0; iSection < nSections; iSection++)
+ {
+ PTR_CORCOMPILE_IMPORT_SECTION pSection = pSections + iSection;
+
+ if ((pSection->Flags & CORCOMPILE_IMPORT_FLAGS_EAGER) == 0)
+ continue;
+
+ COUNT_T tableSize;
+ TADDR tableBase = pNativeImage->GetDirectoryData(&pSection->Section, &tableSize);
+
+ if (pSection->Signatures != NULL)
+ {
+ PTR_DWORD pSignatures = dac_cast<PTR_DWORD>(pNativeImage->GetRvaData(pSection->Signatures));
+
+ for (SIZE_T * fixupCell = (SIZE_T *)tableBase; fixupCell < (SIZE_T *)(tableBase + tableSize); fixupCell++)
+ {
+ SIZE_T fixupIndex = fixupCell - (SIZE_T *)tableBase;
+ if (!LoadDynamicInfoEntry(this, pSignatures[fixupIndex], fixupCell))
+ {
+ _ASSERTE(!"LoadDynamicInfoEntry failed");
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ _ASSERTE(*fixupCell != NULL);
+ }
+ }
+ else
+ {
+ for (SIZE_T * fixupCell = (SIZE_T *)tableBase; fixupCell < (SIZE_T *)(tableBase + tableSize); fixupCell++)
+ {
+ // Ensure that the compiler won't fetch the value twice
+ SIZE_T fixup = VolatileLoadWithoutBarrier(fixupCell);
+
+ // This method may execute multiple times in multi-domain scenarios. Check that the fixup has not been
+ // fixed up yet.
+ if (CORCOMPILE_IS_FIXUP_TAGGED(fixup, pSection))
+ {
+ if (!LoadDynamicInfoEntry(this, (RVA)CORCOMPILE_UNTAG_TOKEN(fixup), fixupCell))
+ {
+ _ASSERTE(!"LoadDynamicInfoEntry failed");
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ _ASSERTE(!CORCOMPILE_IS_FIXUP_TAGGED(*fixupCell, pSection));
+ }
+ }
+ }
+ }
+}
+
+void Module::LoadTokenTables()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(HasNativeImage());
+ }
+ CONTRACTL_END;
+
+#ifndef CROSSGEN_COMPILE
+ if (NingenEnabled())
+ return;
+
+ CORCOMPILE_EE_INFO_TABLE *pEEInfo = GetNativeImage()->GetNativeEEInfoTable();
+ PREFIX_ASSUME(pEEInfo != NULL);
+
+ pEEInfo->inlinedCallFrameVptr = InlinedCallFrame::GetMethodFrameVPtr();
+ pEEInfo->addrOfCaptureThreadGlobal = (LONG *)&g_TrapReturningThreads;
+
+ //CoreClr doesn't always have the debugger loaded
+ //patch up the ngen image to point to this address so that the JIT bypasses JMC if there is no debugger
+ static DWORD g_dummyJMCFlag = 0;
+ pEEInfo->addrOfJMCFlag = g_pDebugInterface ? g_pDebugInterface->GetJMCFlagAddr(this) : &g_dummyJMCFlag;
+
+ pEEInfo->gsCookie = GetProcessGSCookie();
+
+ if (!IsSystem())
+ {
+ pEEInfo->emptyString = (CORINFO_Object **)StringObject::GetEmptyStringRefPtr();
+ }
+
+#ifdef FEATURE_IMPLICIT_TLS
+ pEEInfo->threadTlsIndex = TLS_OUT_OF_INDEXES;
+#else
+ pEEInfo->threadTlsIndex = GetThreadTLSIndex();
+#endif
+ pEEInfo->rvaStaticTlsIndex = NULL;
+#endif // CROSSGEN_COMPILE
+}
+
+#endif // !DACCESS_COMPILE
+
+// Returns the RVA to the compressed debug information blob for the given method
+
+CORCOMPILE_DEBUG_ENTRY Module::GetMethodDebugInfoOffset(MethodDesc *pMD)
+{
+ CONTRACT(CORCOMPILE_DEBUG_ENTRY)
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(HasNativeImage());
+ PRECONDITION(CheckPointer(pMD) && pMD->IsPreImplemented());
+ POSTCONDITION(GetNativeImage()->CheckRva(RETVAL, NULL_OK));
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ if (!GetNativeImage()->HasNativeDebugMap() || pMD->IsRuntimeSupplied())
+ RETURN 0;
+
+ COUNT_T size;
+ PTR_CORCOMPILE_DEBUG_RID_ENTRY ridTable =
+ dac_cast<PTR_CORCOMPILE_DEBUG_RID_ENTRY>(GetNativeImage()->GetNativeDebugMap(&size));
+
+ COUNT_T count = size / sizeof(CORCOMPILE_DEBUG_RID_ENTRY);
+ // The size should be odd for better hashing
+ _ASSERTE((count & 1) != 0);
+
+ CORCOMPILE_DEBUG_RID_ENTRY ridEntry = ridTable[GetDebugRidEntryHash(pMD->GetMemberDef()) % count];
+
+ // Do we have multiple code corresponding to the same RID
+ if (!IsMultipleLabelledEntries(ridEntry))
+ {
+ RETURN(ridEntry);
+ }
+
+ PTR_CORCOMPILE_DEBUG_LABELLED_ENTRY pLabelledEntry =
+ PTR_CORCOMPILE_DEBUG_LABELLED_ENTRY
+ (GetNativeImage()->GetRvaData(ridEntry &
+ ~CORCOMPILE_DEBUG_MULTIPLE_ENTRIES));
+
+ DWORD codeRVA = GetNativeImage()->
+ GetDataRva((const TADDR)pMD->GetNativeCode());
+#if defined(_TARGET_ARM_)
+ // Since the Thumb Bit is set on ARM, the RVA calculated above will have it set as well
+ // and will result in the failure of checks in the loop below. Hence, mask off the
+ // bit before proceeding ahead.
+ codeRVA = ThumbCodeToDataPointer<DWORD, DWORD>(codeRVA);
+#endif // _TARGET_ARM_
+
+ for (;;)
+ {
+ if (pLabelledEntry->nativeCodeRVA == codeRVA)
+ {
+ RETURN (pLabelledEntry->debugInfoOffset & ~CORCOMPILE_DEBUG_MULTIPLE_ENTRIES);
+ }
+
+ if (!IsMultipleLabelledEntries(pLabelledEntry->debugInfoOffset))
+ {
+ break;
+ }
+
+ pLabelledEntry++;
+ }
+
+ _ASSERTE(!"Debug info not found - corrupted ngen image?");
+ RETURN (0);
+}
+
+PTR_BYTE Module::GetNativeDebugInfo(MethodDesc * pMD)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(HasNativeImage());
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(pMD->GetZapModule() == this);
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ CORCOMPILE_DEBUG_ENTRY debugInfoOffset = GetMethodDebugInfoOffset(pMD);
+
+ if (debugInfoOffset == 0)
+ return NULL;
+
+ return dac_cast<PTR_BYTE>(GetNativeImage()->GetRvaData(debugInfoOffset));
+}
+#endif //FEATURE_PREJIT
+
+
+
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_PREJIT
+//
+// Profile data management
+//
+
+ICorJitInfo::ProfileBuffer * Module::AllocateProfileBuffer(mdToken _token, DWORD _count, DWORD _ILSize)
+{
+ CONTRACT (ICorJitInfo::ProfileBuffer*)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(CONTRACT_RETURN NULL;);
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ assert(_ILSize != 0);
+
+ DWORD listSize = sizeof(CORCOMPILE_METHOD_PROFILE_LIST);
+ DWORD headerSize = sizeof(CORBBTPROF_METHOD_HEADER);
+ DWORD blockSize = _count * sizeof(CORBBTPROF_BLOCK_DATA);
+ DWORD totalSize = listSize + headerSize + blockSize;
+
+ BYTE * memory = (BYTE *) (void *) this->m_pAssembly->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(totalSize));
+
+ CORCOMPILE_METHOD_PROFILE_LIST * methodProfileList = (CORCOMPILE_METHOD_PROFILE_LIST *) (memory + 0);
+ CORBBTPROF_METHOD_HEADER * methodProfileData = (CORBBTPROF_METHOD_HEADER *) (memory + listSize);
+
+ // Note: Memory allocated on the LowFrequencyHeap is zero filled
+
+ methodProfileData->size = headerSize + blockSize;
+ methodProfileData->method.token = _token;
+ methodProfileData->method.ILSize = _ILSize;
+ methodProfileData->method.cBlock = _count;
+
+ assert(methodProfileData->size == methodProfileData->Size());
+
+ // Link it to the per module list of profile data buffers
+
+ methodProfileList->next = m_methodProfileList;
+ m_methodProfileList = methodProfileList;
+
+ RETURN ((ICorJitInfo::ProfileBuffer *) &methodProfileData->method.block[0]);
+}
+
+HANDLE Module::OpenMethodProfileDataLogFile(GUID mvid)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HANDLE profileDataFile = INVALID_HANDLE_VALUE;
+
+ SString path;
+ LPCWSTR assemblyPath = m_file->GetPath();
+ LPCWSTR ibcDir = g_pConfig->GetZapBBInstrDir(); // should we put the ibc data into a particular directory?
+ if (ibcDir == 0) {
+ path.Set(assemblyPath); // no, then put it beside the IL dll
+ }
+ else {
+ LPCWSTR assemblyFileName = wcsrchr(assemblyPath, '\\');
+ if (assemblyFileName)
+ assemblyFileName++; // skip past the \ char
+ else
+ assemblyFileName = assemblyPath;
+
+ path.Set(ibcDir); // yes, put it in the directory, named with the assembly name.
+ path.Append('\\');
+ path.Append(assemblyFileName);
+ }
+
+ SString::Iterator ext = path.End(); // remove the extension
+ if (path.FindBack(ext, '.'))
+ path.Truncate(ext);
+ path.Append(W(".ibc")); // replace with .ibc extension
+
+ profileDataFile = WszCreateFile(path, GENERIC_READ | GENERIC_WRITE, 0, NULL,
+ OPEN_ALWAYS,
+ FILE_ATTRIBUTE_NORMAL | FILE_FLAG_SEQUENTIAL_SCAN,
+ NULL);
+
+ if (profileDataFile == INVALID_HANDLE_VALUE) COMPlusThrowWin32();
+
+ DWORD count;
+ CORBBTPROF_FILE_HEADER fileHeader;
+
+ SetFilePointer(profileDataFile, 0, NULL, FILE_BEGIN);
+ BOOL result = ReadFile(profileDataFile, &fileHeader, sizeof(fileHeader), &count, NULL);
+ if (result &&
+ (count == sizeof(fileHeader)) &&
+ (fileHeader.HeaderSize == sizeof(CORBBTPROF_FILE_HEADER)) &&
+ (fileHeader.Magic == CORBBTPROF_MAGIC) &&
+ (fileHeader.Version == CORBBTPROF_CURRENT_VERSION) &&
+ (fileHeader.MVID == mvid))
+ {
+ //
+ // The existing file was from the same assembly version - just append to it.
+ //
+
+ SetFilePointer(profileDataFile, 0, NULL, FILE_END);
+ }
+ else
+ {
+ //
+ // Either this is a new file, or it's from a previous version. Replace the contents.
+ //
+
+ SetFilePointer(profileDataFile, 0, NULL, FILE_BEGIN);
+ }
+
+ return profileDataFile;
+}
+
+// Note that this method cleans up the profile buffers, so it's crucial that
+// no managed code in the module is allowed to run once this method has
+// been called!
+
+class ProfileMap
+{
+public:
+ SIZE_T getCurrentOffset() {WRAPPER_NO_CONTRACT; return buffer.Size();}
+
+ void * getOffsetPtr(SIZE_T offset)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(offset <= buffer.Size());
+ return ((void *) (((char *) buffer.Ptr()) + offset));
+ }
+
+ void *Allocate(SIZE_T size)
+ {
+ CONTRACT(void *)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(CONTRACT_RETURN NULL;);
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ SIZE_T oldSize = buffer.Size();
+ buffer.ReSizeThrows(oldSize + size);
+ RETURN getOffsetPtr(oldSize);
+ }
+
+private:
+ CQuickBytes buffer;
+};
+
+class ProfileEmitter
+{
+public:
+
+ ProfileEmitter()
+ {
+ LIMITED_METHOD_CONTRACT;
+ pSectionList = NULL;
+ }
+
+ ~ProfileEmitter()
+ {
+ WRAPPER_NO_CONTRACT;
+ while (pSectionList)
+ {
+ SectionList *temp = pSectionList->next;
+ delete pSectionList;
+ pSectionList = temp;
+ }
+ }
+
+ ProfileMap *EmitNewSection(SectionFormat format)
+ {
+ WRAPPER_NO_CONTRACT;
+ SectionList *s = new SectionList();
+
+ s->format = format;
+ s->next = pSectionList;
+ pSectionList = s;
+
+ return &s->profileMap;
+ }
+
+ //
+ // Serialize the profile sections into pMap
+ //
+
+ void Serialize(ProfileMap *profileMap, GUID mvid)
+ {
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ //
+ // Allocate the file header
+ //
+ {
+ CORBBTPROF_FILE_HEADER *fileHeader;
+ fileHeader = (CORBBTPROF_FILE_HEADER *) profileMap->Allocate(sizeof(CORBBTPROF_FILE_HEADER));
+
+ fileHeader->HeaderSize = sizeof(CORBBTPROF_FILE_HEADER);
+ fileHeader->Magic = CORBBTPROF_MAGIC;
+ fileHeader->Version = CORBBTPROF_CURRENT_VERSION;
+ fileHeader->MVID = mvid;
+ }
+
+ //
+ // Count the number of sections
+ //
+ ULONG32 numSections = 0;
+ for (SectionList *p = pSectionList; p; p = p->next)
+ {
+ numSections++;
+ }
+
+ //
+ // Allocate the section table
+ //
+ SIZE_T tableEntryOffset;
+ {
+ CORBBTPROF_SECTION_TABLE_HEADER *tableHeader;
+ tableHeader = (CORBBTPROF_SECTION_TABLE_HEADER *)
+ profileMap->Allocate(sizeof(CORBBTPROF_SECTION_TABLE_HEADER));
+
+ tableHeader->NumEntries = numSections;
+ tableEntryOffset = profileMap->getCurrentOffset();
+
+ CORBBTPROF_SECTION_TABLE_ENTRY *tableEntry;
+ tableEntry = (CORBBTPROF_SECTION_TABLE_ENTRY *)
+ profileMap->Allocate(sizeof(CORBBTPROF_SECTION_TABLE_ENTRY) * numSections);
+ }
+
+ //
+ // Allocate the data sections
+ //
+ {
+ ULONG secCount = 0;
+ for (SectionList *pSec = pSectionList; pSec; pSec = pSec->next, secCount++)
+ {
+ SIZE_T offset = profileMap->getCurrentOffset();
+ assert((offset & 0x3) == 0);
+
+ SIZE_T actualSize = pSec->profileMap.getCurrentOffset();
+ SIZE_T alignUpSize = AlignUp(actualSize, sizeof(DWORD));
+
+ profileMap->Allocate(alignUpSize);
+
+ memcpy(profileMap->getOffsetPtr(offset), pSec->profileMap.getOffsetPtr(0), actualSize);
+ if (alignUpSize > actualSize)
+ {
+ memset(((BYTE*)profileMap->getOffsetPtr(offset))+actualSize, 0, (alignUpSize - actualSize));
+ }
+
+ CORBBTPROF_SECTION_TABLE_ENTRY *tableEntry;
+ tableEntry = (CORBBTPROF_SECTION_TABLE_ENTRY *) profileMap->getOffsetPtr(tableEntryOffset);
+ tableEntry += secCount;
+ tableEntry->FormatID = pSec->format;
+ tableEntry->Data.Offset = offset;
+ tableEntry->Data.Size = alignUpSize;
+ }
+ }
+
+ //
+ // Allocate the end token marker
+ //
+ {
+ ULONG *endToken;
+ endToken = (ULONG *) profileMap->Allocate(sizeof(ULONG));
+
+ *endToken = CORBBTPROF_END_TOKEN;
+ }
+ }
+
+private:
+ struct SectionList
+ {
+ SectionFormat format;
+ ProfileMap profileMap;
+ SectionList *next;
+ };
+ SectionList * pSectionList;
+};
+
+
+/*static*/ idTypeSpec TypeSpecBlobEntry::s_lastTypeSpecToken = idTypeSpecNil;
+/*static*/ idMethodSpec MethodSpecBlobEntry::s_lastMethodSpecToken = idMethodSpecNil;
+/*static*/ idExternalNamespace ExternalNamespaceBlobEntry::s_lastExternalNamespaceToken = idExternalNamespaceNil;
+/*static*/ idExternalType ExternalTypeBlobEntry::s_lastExternalTypeToken = idExternalTypeNil;
+/*static*/ idExternalSignature ExternalSignatureBlobEntry::s_lastExternalSignatureToken = idExternalSignatureNil;
+/*static*/ idExternalMethod ExternalMethodBlobEntry::s_lastExternalMethodToken = idExternalMethodNil;
+
+
+inline static size_t HashCombine(size_t h1, size_t h2)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ size_t result = (h1 * 129) ^ h2;
+ return result;
+}
+
+bool TypeSpecBlobEntry::IsEqual(const ProfilingBlobEntry * other) const
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (this->kind() != other->kind())
+ return false;
+
+ const TypeSpecBlobEntry * other2 = static_cast<const TypeSpecBlobEntry *>(other);
+
+ if (this->cbSig() != other2->cbSig())
+ return false;
+
+ PCCOR_SIGNATURE p1 = this->pSig();
+ PCCOR_SIGNATURE p2 = other2->pSig();
+
+ for (DWORD i=0; (i < this->cbSig()); i++)
+ if (p1[i] != p2[i])
+ return false;
+
+ return true;
+}
+
+size_t TypeSpecBlobEntry::Hash() const
+{
+ WRAPPER_NO_CONTRACT;
+
+ size_t hashValue = HashInit();
+
+ PCCOR_SIGNATURE p1 = pSig();
+ for (DWORD i=0; (i < cbSig()); i++)
+ hashValue = HashCombine(hashValue, p1[i]);
+
+ return hashValue;
+}
+
+TypeSpecBlobEntry::TypeSpecBlobEntry(DWORD _cbSig, PCCOR_SIGNATURE _pSig)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(_cbSig > 0);
+ PRECONDITION(CheckPointer(_pSig));
+ }
+ CONTRACTL_END;
+
+ m_token = idTypeSpecNil;
+ m_flags = 0;
+ m_cbSig = 0;
+
+ COR_SIGNATURE * pNewSig = (COR_SIGNATURE *) new (nothrow) BYTE[_cbSig];
+ if (pNewSig != NULL)
+ {
+ m_flags = 0;
+ m_cbSig = _cbSig;
+ memcpy(pNewSig, _pSig, _cbSig);
+ }
+ m_pSig = const_cast<PCCOR_SIGNATURE>(pNewSig);
+}
+
+/* static */ const TypeSpecBlobEntry * TypeSpecBlobEntry::FindOrAdd(PTR_Module pModule,
+ DWORD _cbSig,
+ PCCOR_SIGNATURE _pSig)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if ((_cbSig == 0) || (_pSig == NULL))
+ return NULL;
+
+ TypeSpecBlobEntry sEntry(_cbSig, _pSig);
+
+ const ProfilingBlobEntry * pEntry = pModule->GetProfilingBlobTable()->Lookup(&sEntry);
+ if (pEntry == NULL)
+ {
+ //
+ // Not Found, add a new type spec profiling blob entry
+ //
+ TypeSpecBlobEntry * newEntry = new (nothrow) TypeSpecBlobEntry(_cbSig, _pSig);
+ if (newEntry == NULL)
+ return NULL;
+
+ newEntry->newToken(); // Assign a new ibc type spec token
+ CONTRACT_VIOLATION(ThrowsViolation);
+ pModule->GetProfilingBlobTable()->Add(newEntry);
+ pEntry = newEntry;
+ }
+
+ //
+ // Return the type spec entry that we found or the new one that we just created
+ //
+ _ASSERTE(pEntry->kind() == ParamTypeSpec);
+ return static_cast<const TypeSpecBlobEntry *>(pEntry);
+}
+
+bool MethodSpecBlobEntry::IsEqual(const ProfilingBlobEntry * other) const
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (this->kind() != other->kind())
+ return false;
+
+ const MethodSpecBlobEntry * other2 = static_cast<const MethodSpecBlobEntry *>(other);
+
+ if (this->cbSig() != other2->cbSig())
+ return false;
+
+ PCCOR_SIGNATURE p1 = this->pSig();
+ PCCOR_SIGNATURE p2 = other2->pSig();
+
+ for (DWORD i=0; (i < this->cbSig()); i++)
+ if (p1[i] != p2[i])
+ return false;
+
+ return true;
+}
+
+size_t MethodSpecBlobEntry::Hash() const
+{
+ WRAPPER_NO_CONTRACT;
+
+ size_t hashValue = HashInit();
+
+ PCCOR_SIGNATURE p1 = pSig();
+ for (DWORD i=0; (i < cbSig()); i++)
+ hashValue = HashCombine(hashValue, p1[i]);
+
+ return hashValue;
+}
+
+MethodSpecBlobEntry::MethodSpecBlobEntry(DWORD _cbSig, PCCOR_SIGNATURE _pSig)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(_cbSig > 0);
+ PRECONDITION(CheckPointer(_pSig));
+ }
+ CONTRACTL_END;
+
+ m_token = idMethodSpecNil;
+ m_flags = 0;
+ m_cbSig = 0;
+
+ COR_SIGNATURE * pNewSig = (COR_SIGNATURE *) new (nothrow) BYTE[_cbSig];
+ if (pNewSig != NULL)
+ {
+ m_flags = 0;
+ m_cbSig = _cbSig;
+ memcpy(pNewSig, _pSig, _cbSig);
+ }
+ m_pSig = const_cast<PCCOR_SIGNATURE>(pNewSig);
+}
+
+/* static */ const MethodSpecBlobEntry * MethodSpecBlobEntry::FindOrAdd(PTR_Module pModule,
+ DWORD _cbSig,
+ PCCOR_SIGNATURE _pSig)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if ((_cbSig == 0) || (_pSig == NULL))
+ return NULL;
+
+ MethodSpecBlobEntry sEntry(_cbSig, _pSig);
+
+ const ProfilingBlobEntry * pEntry = pModule->GetProfilingBlobTable()->Lookup(&sEntry);
+ if (pEntry == NULL)
+ {
+ //
+ // Not Found, add a new method spec profiling blob entry
+ //
+ MethodSpecBlobEntry * newEntry = new (nothrow) MethodSpecBlobEntry(_cbSig, _pSig);
+ if (newEntry == NULL)
+ return NULL;
+
+ newEntry->newToken(); // Assign a new ibc method spec token
+ CONTRACT_VIOLATION(ThrowsViolation);
+ pModule->GetProfilingBlobTable()->Add(newEntry);
+ pEntry = newEntry;
+ }
+
+ //
+ // Return the method spec entry that we found or the new one that we just created
+ //
+ _ASSERTE(pEntry->kind() == ParamMethodSpec);
+ return static_cast<const MethodSpecBlobEntry *>(pEntry);
+}
+
+bool ExternalNamespaceBlobEntry::IsEqual(const ProfilingBlobEntry * other) const
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (this->kind() != other->kind())
+ return false;
+
+ const ExternalNamespaceBlobEntry * other2 = static_cast<const ExternalNamespaceBlobEntry *>(other);
+
+ if (this->cbName() != other2->cbName())
+ return false;
+
+ LPCSTR p1 = this->pName();
+ LPCSTR p2 = other2->pName();
+
+ for (DWORD i=0; (i < this->cbName()); i++)
+ if (p1[i] != p2[i])
+ return false;
+
+ return true;
+}
+
+size_t ExternalNamespaceBlobEntry::Hash() const
+{
+ WRAPPER_NO_CONTRACT;
+
+ size_t hashValue = HashInit();
+
+ LPCSTR p1 = pName();
+ for (DWORD i=0; (i < cbName()); i++)
+ hashValue = HashCombine(hashValue, p1[i]);
+
+ return hashValue;
+}
+
+ExternalNamespaceBlobEntry::ExternalNamespaceBlobEntry(LPCSTR _pName)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(_pName));
+ }
+ CONTRACTL_END;
+
+ m_token = idExternalNamespaceNil;
+ m_cbName = 0;
+ m_pName = NULL;
+
+ DWORD _cbName = (DWORD) strlen(_pName) + 1;
+ LPSTR * pName = (LPSTR *) new (nothrow) CHAR[_cbName];
+ if (pName != NULL)
+ {
+ m_cbName = _cbName;
+ memcpy(pName, _pName, _cbName);
+ m_pName = (LPCSTR) pName;
+ }
+}
+
+/* static */ const ExternalNamespaceBlobEntry * ExternalNamespaceBlobEntry::FindOrAdd(PTR_Module pModule, LPCSTR _pName)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if ((_pName == NULL) || (::strlen(_pName) == 0))
+ return NULL;
+
+ ExternalNamespaceBlobEntry sEntry(_pName);
+
+ const ProfilingBlobEntry * pEntry = pModule->GetProfilingBlobTable()->Lookup(&sEntry);
+ if (pEntry == NULL)
+ {
+ //
+ // Not Found, add a new external namespace blob entry
+ //
+ ExternalNamespaceBlobEntry * newEntry = new (nothrow) ExternalNamespaceBlobEntry(_pName);
+ if (newEntry == NULL)
+ return NULL;
+
+ newEntry->newToken(); // Assign a new ibc external namespace token
+ CONTRACT_VIOLATION(ThrowsViolation);
+ pModule->GetProfilingBlobTable()->Add(newEntry);
+ pEntry = newEntry;
+ }
+
+ //
+ // Return the external namespace entry that we found or the new one that we just created
+ //
+ _ASSERTE(pEntry->kind() == ExternalNamespaceDef);
+ return static_cast<const ExternalNamespaceBlobEntry *>(pEntry);
+}
+
+bool ExternalTypeBlobEntry::IsEqual(const ProfilingBlobEntry * other) const
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (this->kind() != other->kind())
+ return false;
+
+ const ExternalTypeBlobEntry * other2 = static_cast<const ExternalTypeBlobEntry *>(other);
+
+ if (this->assemblyRef() != other2->assemblyRef())
+ return false;
+
+ if (this->nestedClass() != other2->nestedClass())
+ return false;
+
+ if (this->nameSpace() != other2->nameSpace())
+ return false;
+
+ if (this->cbName() != other2->cbName())
+ return false;
+
+ LPCSTR p1 = this->pName();
+ LPCSTR p2 = other2->pName();
+
+ for (DWORD i=0; (i < this->cbName()); i++)
+ if (p1[i] != p2[i])
+ return false;
+
+ return true;
+}
+
+size_t ExternalTypeBlobEntry::Hash() const
+{
+ WRAPPER_NO_CONTRACT;
+
+ size_t hashValue = HashInit();
+
+ hashValue = HashCombine(hashValue, assemblyRef());
+ hashValue = HashCombine(hashValue, nestedClass());
+ hashValue = HashCombine(hashValue, nameSpace());
+
+ LPCSTR p1 = pName();
+
+ for (DWORD i=0; (i < cbName()); i++)
+ hashValue = HashCombine(hashValue, p1[i]);
+
+ return hashValue;
+}
+
+ExternalTypeBlobEntry::ExternalTypeBlobEntry(mdToken _assemblyRef,
+ mdToken _nestedClass,
+ mdToken _nameSpace,
+ LPCSTR _pName)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(_pName));
+ }
+ CONTRACTL_END;
+
+ m_token = idExternalTypeNil;
+ m_assemblyRef = mdAssemblyRefNil;
+ m_nestedClass = idExternalTypeNil;
+ m_nameSpace = idExternalNamespaceNil;
+ m_cbName = 0;
+ m_pName = NULL;
+
+ DWORD _cbName = (DWORD) strlen(_pName) + 1;
+ LPSTR * pName = (LPSTR *) new (nothrow) CHAR[_cbName];
+ if (pName != NULL)
+ {
+ m_assemblyRef = _assemblyRef;
+ m_nestedClass = _nestedClass;
+ m_nameSpace = _nameSpace;
+ m_cbName = _cbName;
+ memcpy(pName, _pName, _cbName);
+ m_pName = (LPCSTR) pName;
+ }
+}
+
+/* static */ const ExternalTypeBlobEntry * ExternalTypeBlobEntry::FindOrAdd(PTR_Module pModule,
+ mdToken _assemblyRef,
+ mdToken _nestedClass,
+ mdToken _nameSpace,
+ LPCSTR _pName)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if ((_pName == NULL) || (::strlen(_pName) == 0))
+ return NULL;
+
+ ExternalTypeBlobEntry sEntry(_assemblyRef, _nestedClass, _nameSpace, _pName);
+
+ const ProfilingBlobEntry * pEntry = pModule->GetProfilingBlobTable()->Lookup(&sEntry);
+ if (pEntry == NULL)
+ {
+ //
+ // Not Found, add a new external type blob entry
+ //
+ ExternalTypeBlobEntry * newEntry = new (nothrow) ExternalTypeBlobEntry(_assemblyRef, _nestedClass, _nameSpace, _pName);
+ if (newEntry == NULL)
+ return NULL;
+
+ newEntry->newToken(); // Assign a new ibc external type token
+ CONTRACT_VIOLATION(ThrowsViolation);
+ pModule->GetProfilingBlobTable()->Add(newEntry);
+ pEntry = newEntry;
+ }
+
+ //
+ // Return the external type entry that we found or the new one that we just created
+ //
+ _ASSERTE(pEntry->kind() == ExternalTypeDef);
+ return static_cast<const ExternalTypeBlobEntry *>(pEntry);
+}
+
+bool ExternalSignatureBlobEntry::IsEqual(const ProfilingBlobEntry * other) const
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (this->kind() != other->kind())
+ return false;
+
+ const ExternalSignatureBlobEntry * other2 = static_cast<const ExternalSignatureBlobEntry *>(other);
+
+ if (this->cbSig() != other2->cbSig())
+ return false;
+
+ PCCOR_SIGNATURE p1 = this->pSig();
+ PCCOR_SIGNATURE p2 = other2->pSig();
+
+ for (DWORD i=0; (i < this->cbSig()); i++)
+ if (p1[i] != p2[i])
+ return false;
+
+ return true;
+}
+
+size_t ExternalSignatureBlobEntry::Hash() const
+{
+ WRAPPER_NO_CONTRACT;
+
+ size_t hashValue = HashInit();
+
+ hashValue = HashCombine(hashValue, cbSig());
+
+ PCCOR_SIGNATURE p1 = pSig();
+
+ for (DWORD i=0; (i < cbSig()); i++)
+ hashValue = HashCombine(hashValue, p1[i]);
+
+ return hashValue;
+}
+
+ExternalSignatureBlobEntry::ExternalSignatureBlobEntry(DWORD _cbSig, PCCOR_SIGNATURE _pSig)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(_cbSig > 0);
+ PRECONDITION(CheckPointer(_pSig));
+ }
+ CONTRACTL_END;
+
+ m_token = idExternalSignatureNil;
+ m_cbSig = 0;
+
+ COR_SIGNATURE * pNewSig = (COR_SIGNATURE *) new (nothrow) BYTE[_cbSig];
+ if (pNewSig != NULL)
+ {
+ m_cbSig = _cbSig;
+ memcpy(pNewSig, _pSig, _cbSig);
+ }
+ m_pSig = const_cast<PCCOR_SIGNATURE>(pNewSig);
+}
+
+/* static */ const ExternalSignatureBlobEntry * ExternalSignatureBlobEntry::FindOrAdd(PTR_Module pModule,
+ DWORD _cbSig,
+ PCCOR_SIGNATURE _pSig)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if ((_cbSig == 0) || (_pSig == NULL))
+ return NULL;
+
+ ExternalSignatureBlobEntry sEntry(_cbSig, _pSig);
+
+ const ProfilingBlobEntry * pEntry = pModule->GetProfilingBlobTable()->Lookup(&sEntry);
+ if (pEntry == NULL)
+ {
+ //
+ // Not Found, add a new external signature blob entry
+ //
+ ExternalSignatureBlobEntry * newEntry = new (nothrow) ExternalSignatureBlobEntry(_cbSig, _pSig);
+ if (newEntry == NULL)
+ return NULL;
+
+ newEntry->newToken(); // Assign a new ibc external signature token
+ CONTRACT_VIOLATION(ThrowsViolation);
+ pModule->GetProfilingBlobTable()->Add(newEntry);
+ pEntry = newEntry;
+ }
+
+ //
+ // Return the external signature entry that we found or the new one that we just created
+ //
+ _ASSERTE(pEntry->kind() == ExternalSignatureDef);
+ return static_cast<const ExternalSignatureBlobEntry *>(pEntry);
+}
+
+bool ExternalMethodBlobEntry::IsEqual(const ProfilingBlobEntry * other) const
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (this->kind() != other->kind())
+ return false;
+
+ const ExternalMethodBlobEntry * other2 = static_cast<const ExternalMethodBlobEntry *>(other);
+
+ if (this->nestedClass() != other2->nestedClass())
+ return false;
+
+ if (this->signature() != other2->signature())
+ return false;
+
+ if (this->cbName() != other2->cbName())
+ return false;
+
+ LPCSTR p1 = this->pName();
+ LPCSTR p2 = other2->pName();
+
+ for (DWORD i=0; (i < this->cbName()); i++)
+ if (p1[i] != p2[i])
+ return false;
+
+ return true;
+}
+
+size_t ExternalMethodBlobEntry::Hash() const
+{
+ WRAPPER_NO_CONTRACT;
+
+ size_t hashValue = HashInit();
+
+ hashValue = HashCombine(hashValue, nestedClass());
+ hashValue = HashCombine(hashValue, signature());
+
+ LPCSTR p1 = pName();
+
+ for (DWORD i=0; (i < cbName()); i++)
+ hashValue = HashCombine(hashValue, p1[i]);
+
+ return hashValue;
+}
+
+ExternalMethodBlobEntry::ExternalMethodBlobEntry(mdToken _nestedClass,
+ mdToken _signature,
+ LPCSTR _pName)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(_pName));
+ }
+ CONTRACTL_END;
+
+ m_token = idExternalMethodNil;
+ m_nestedClass = idExternalTypeNil;
+ m_signature = idExternalSignatureNil;
+ m_cbName = 0;
+
+ DWORD _cbName = (DWORD) strlen(_pName) + 1;
+ LPSTR * pName = (LPSTR *) new (nothrow) CHAR[_cbName];
+ if (pName != NULL)
+ {
+ m_nestedClass = _nestedClass;
+ m_signature = _signature;
+ m_cbName = _cbName;
+ memcpy(pName, _pName, _cbName);
+ m_pName = (LPSTR) pName;
+ }
+ }
+
+/* static */ const ExternalMethodBlobEntry * ExternalMethodBlobEntry::FindOrAdd(
+ PTR_Module pModule,
+ mdToken _nestedClass,
+ mdToken _signature,
+ LPCSTR _pName)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(_pName));
+ }
+ CONTRACTL_END;
+
+ if ((_pName == NULL) || (::strlen(_pName) == 0))
+ return NULL;
+
+ ExternalMethodBlobEntry sEntry(_nestedClass, _signature, _pName);
+
+ const ProfilingBlobEntry * pEntry = pModule->GetProfilingBlobTable()->Lookup(&sEntry);
+ if (pEntry == NULL)
+ {
+ //
+ // Not Found, add a new external type blob entry
+ //
+ ExternalMethodBlobEntry * newEntry;
+ newEntry = new (nothrow) ExternalMethodBlobEntry(_nestedClass, _signature, _pName);
+ if (newEntry == NULL)
+ return NULL;
+
+ newEntry->newToken(); // Assign a new ibc external method token
+ CONTRACT_VIOLATION(ThrowsViolation);
+ pModule->GetProfilingBlobTable()->Add(newEntry);
+ pEntry = newEntry;
+ }
+
+ //
+ // Return the external method entry that we found or the new one that we just created
+ //
+ _ASSERTE(pEntry->kind() == ExternalMethodDef);
+ return static_cast<const ExternalMethodBlobEntry *>(pEntry);
+}
+
+
+static bool GetBasename(LPCWSTR _src, __out_ecount(dstlen) __out_z LPWSTR _dst, int dstlen)
+{
+ LIMITED_METHOD_CONTRACT;
+ LPCWSTR src = _src;
+ LPWSTR dst = _dst;
+
+ if ((src == NULL) || (dstlen <= 0))
+ return false;
+
+ bool inQuotes = false;
+ LPWSTR dstLast = dst + (dstlen - 1);
+ while (dst < dstLast)
+ {
+ WCHAR wch = *src++;
+ if (wch == W('"'))
+ {
+ inQuotes = !inQuotes;
+ continue;
+ }
+
+ if (wch == 0)
+ break;
+
+ *dst++ = wch;
+
+ if (!inQuotes)
+ {
+ if ((wch == W('\\')) || (wch == W(':')))
+ {
+ dst = _dst;
+ }
+ else if (wch == W(' '))
+ {
+ dst--;
+ break;
+ }
+ }
+ }
+ *dst++ = 0;
+ return true;
+}
+
+static void ProfileDataAllocateScenarioInfo(ProfileEmitter * pEmitter, LPCSTR scopeName, GUID* pMvid)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ ProfileMap *profileMap = pEmitter->EmitNewSection(ScenarioInfo);
+
+ //
+ // Allocate and initialize the scenario info section
+ //
+ {
+ CORBBTPROF_SCENARIO_INFO_SECTION_HEADER *siHeader;
+ siHeader = (CORBBTPROF_SCENARIO_INFO_SECTION_HEADER *) profileMap->Allocate(sizeof(CORBBTPROF_SCENARIO_INFO_SECTION_HEADER));
+
+ siHeader->NumScenarios = 1;
+ siHeader->TotalNumRuns = 1;
+ }
+
+ //
+ // Allocate and initialize the scenario header section
+ //
+ {
+ LPCWSTR pCmdLine = GetCommandLineW();
+ S_SIZE_T cCmdLine = S_SIZE_T(wcslen(pCmdLine));
+ cCmdLine += 1;
+ if (cCmdLine.IsOverflow())
+ {
+ ThrowHR(COR_E_OVERFLOW);
+ }
+
+ LPCWSTR pSystemInfo = W("<machine,OS>");
+ S_SIZE_T cSystemInfo = S_SIZE_T(wcslen(pSystemInfo));
+ cSystemInfo += 1;
+ if (cSystemInfo.IsOverflow())
+ {
+ ThrowHR(COR_E_OVERFLOW);
+ }
+
+ FILETIME runTime, unused1, unused2, unused3;
+ GetProcessTimes(GetCurrentProcess(), &runTime, &unused1, &unused2, &unused3);
+
+ WCHAR scenarioName[256];
+ GetBasename(pCmdLine, &scenarioName[0], 256);
+
+ LPCWSTR pName = &scenarioName[0];
+ S_SIZE_T cName = S_SIZE_T(wcslen(pName));
+ cName += 1;
+ if (cName.IsOverflow())
+ {
+ ThrowHR(COR_E_OVERFLOW);
+ }
+
+ S_SIZE_T sizeHeader = S_SIZE_T(sizeof(CORBBTPROF_SCENARIO_HEADER));
+ sizeHeader += cName * S_SIZE_T(sizeof(WCHAR));
+ if (sizeHeader.IsOverflow())
+ {
+ ThrowHR(COR_E_OVERFLOW);
+ }
+
+ S_SIZE_T sizeRun = S_SIZE_T(sizeof(CORBBTPROF_SCENARIO_RUN));
+ sizeRun += cCmdLine * S_SIZE_T(sizeof(WCHAR));
+ sizeRun += cSystemInfo * S_SIZE_T(sizeof(WCHAR));
+ if (sizeRun.IsOverflow())
+ {
+ ThrowHR(COR_E_OVERFLOW);
+ }
+
+ //
+ // Allocate the Scenario Header struct
+ //
+ SIZE_T sHeaderOffset;
+ {
+ CORBBTPROF_SCENARIO_HEADER *sHeader;
+ S_SIZE_T sHeaderSize = sizeHeader + sizeRun;
+ if (sHeaderSize.IsOverflow())
+ {
+ ThrowHR(COR_E_OVERFLOW);
+ }
+
+ sHeaderOffset = profileMap->getCurrentOffset();
+ sHeader = (CORBBTPROF_SCENARIO_HEADER *) profileMap->Allocate(sizeHeader.Value());
+
+ sHeader->size = sHeaderSize.Value();
+ sHeader->scenario.ordinal = 1;
+ sHeader->scenario.mask = 1;
+ sHeader->scenario.priority = 0;
+ sHeader->scenario.numRuns = 1;
+ sHeader->scenario.cName = cName.Value();
+ wcscpy_s(sHeader->scenario.name, cName.Value(), pName);
+ }
+
+ //
+ // Allocate the Scenario Run struct
+ //
+ {
+ CORBBTPROF_SCENARIO_RUN *sRun;
+ sRun = (CORBBTPROF_SCENARIO_RUN *) profileMap->Allocate(sizeRun.Value());
+
+ sRun->runTime = runTime;
+ sRun->mvid = *pMvid;
+ sRun->cCmdLine = cCmdLine.Value();
+ sRun->cSystemInfo = cSystemInfo.Value();
+ wcscpy_s(sRun->cmdLine, cCmdLine.Value(), pCmdLine);
+ wcscpy_s(sRun->cmdLine+cCmdLine.Value(), cSystemInfo.Value(), pSystemInfo);
+ }
+#ifdef _DEBUG
+ {
+ CORBBTPROF_SCENARIO_HEADER * sHeader;
+ sHeader = (CORBBTPROF_SCENARIO_HEADER *) profileMap->getOffsetPtr(sHeaderOffset);
+ assert(sHeader->size == sHeader->Size());
+ }
+#endif
+ }
+}
+
+static void ProfileDataAllocateMethodBlockCounts(ProfileEmitter * pEmitter, CORCOMPILE_METHOD_PROFILE_LIST * pMethodProfileListHead)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ ProfileMap *profileMap = pEmitter->EmitNewSection(MethodBlockCounts);
+
+ //
+ // Allocate and initialize the method block count section
+ //
+ SIZE_T mbcHeaderOffset;
+ {
+ CORBBTPROF_METHOD_BLOCK_COUNTS_SECTION_HEADER *mbcHeader;
+ mbcHeaderOffset = profileMap->getCurrentOffset();
+ mbcHeader = (CORBBTPROF_METHOD_BLOCK_COUNTS_SECTION_HEADER *)
+ profileMap->Allocate(sizeof(CORBBTPROF_METHOD_BLOCK_COUNTS_SECTION_HEADER));
+ mbcHeader->NumMethods = 0; // This gets filled in later
+ }
+
+ ULONG numMethods = 0; // We count the number of methods that were executed
+
+ for (CORCOMPILE_METHOD_PROFILE_LIST * methodProfileList = pMethodProfileListHead;
+ methodProfileList;
+ methodProfileList = methodProfileList->next)
+ {
+ CORBBTPROF_METHOD_HEADER * pInfo = methodProfileList->GetInfo();
+
+ assert(pInfo->size == pInfo->Size());
+
+ //
+ // We set methodWasExecuted based upon the ExecutionCount of the very first block
+ //
+ bool methodWasExecuted = (pInfo->method.block[0].ExecutionCount > 0);
+
+ //
+ // If the method was not executed then we don't need to output this methods block counts
+ //
+ SIZE_T methodHeaderOffset;
+ if (methodWasExecuted)
+ {
+ DWORD profileDataSize = pInfo->size;
+ methodHeaderOffset = profileMap->getCurrentOffset();
+ CORBBTPROF_METHOD_HEADER *methodHeader = (CORBBTPROF_METHOD_HEADER *) profileMap->Allocate(profileDataSize);
+ memcpy(methodHeader, pInfo, profileDataSize);
+ numMethods++;
+ }
+
+ // Reset all of the basic block counts to zero
+ for (ULONG i=0; (i < pInfo->method.cBlock); i++ )
+ {
+ //
+ // If methodWasExecuted is false then every block's ExecutionCount should also be zero
+ //
+ _ASSERTE(methodWasExecuted || (pInfo->method.block[i].ExecutionCount == 0));
+
+ pInfo->method.block[i].ExecutionCount = 0;
+ }
+ }
+
+ {
+ CORBBTPROF_METHOD_BLOCK_COUNTS_SECTION_HEADER *mbcHeader;
+ // We have to refetch the mbcHeader as calls to Allocate will resize and thus move the mbcHeader
+ mbcHeader = (CORBBTPROF_METHOD_BLOCK_COUNTS_SECTION_HEADER *) profileMap->getOffsetPtr(mbcHeaderOffset);
+ mbcHeader->NumMethods = numMethods;
+ }
+}
+
+/*static*/ void Module::ProfileDataAllocateTokenLists(ProfileEmitter * pEmitter, Module::TokenProfileData* pTokenProfileData)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ //
+ // Allocate and initialize the token list sections
+ //
+ if (pTokenProfileData)
+ {
+ for (int format = 0; format < (int)SectionFormatCount; format++)
+ {
+ CQuickArray<CORBBTPROF_TOKEN_INFO> *pTokenArray = &(pTokenProfileData->m_formats[format].tokenArray);
+
+ if (pTokenArray->Size() != 0)
+ {
+ ProfileMap * profileMap = pEmitter->EmitNewSection((SectionFormat) format);
+
+ CORBBTPROF_TOKEN_LIST_SECTION_HEADER *header;
+ header = (CORBBTPROF_TOKEN_LIST_SECTION_HEADER *)
+ profileMap->Allocate(sizeof(CORBBTPROF_TOKEN_LIST_SECTION_HEADER) +
+ pTokenArray->Size() * sizeof(CORBBTPROF_TOKEN_INFO));
+
+ header->NumTokens = pTokenArray->Size();
+ memcpy( (header + 1), &((*pTokenArray)[0]), pTokenArray->Size() * sizeof(CORBBTPROF_TOKEN_INFO));
+
+ // Reset the collected tokens
+ for (unsigned i = 0; i < CORBBTPROF_TOKEN_MAX_NUM_FLAGS; i++)
+ {
+ pTokenProfileData->m_formats[format].tokenBitmaps[i].Reset();
+ }
+ pTokenProfileData->m_formats[format].tokenArray.ReSizeNoThrow(0);
+ }
+ }
+ }
+}
+
+static void ProfileDataAllocateTokenDefinitions(ProfileEmitter * pEmitter, Module * pModule)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ //
+ // Allocate and initialize the ibc token definition section (aka the Blob stream)
+ //
+ ProfileMap * profileMap = pEmitter->EmitNewSection(BlobStream);
+
+ // Compute the size of the metadata section:
+ // It is the sum of all of the Metadata Profile pool entries
+ // plus the sum of all of the Param signature entries
+ //
+ size_t totalSize = 0;
+
+ for (ProfilingBlobTable::Iterator cur = pModule->GetProfilingBlobTable()->Begin(),
+ end = pModule->GetProfilingBlobTable()->End();
+ (cur != end);
+ cur++)
+ {
+ const ProfilingBlobEntry * pEntry = *cur;
+ size_t blobElementSize = pEntry->varSize();
+ switch (pEntry->kind()) {
+ case ParamTypeSpec:
+ case ParamMethodSpec:
+ blobElementSize += sizeof(CORBBTPROF_BLOB_PARAM_SIG_ENTRY);
+ break;
+
+ case ExternalNamespaceDef:
+ blobElementSize += sizeof(CORBBTPROF_BLOB_NAMESPACE_DEF_ENTRY);
+ break;
+
+ case ExternalTypeDef:
+ blobElementSize += sizeof(CORBBTPROF_BLOB_TYPE_DEF_ENTRY);
+ break;
+
+ case ExternalSignatureDef:
+ blobElementSize += sizeof(CORBBTPROF_BLOB_SIGNATURE_DEF_ENTRY);
+ break;
+
+ case ExternalMethodDef:
+ blobElementSize += sizeof(CORBBTPROF_BLOB_METHOD_DEF_ENTRY);
+ break;
+
+ default:
+ _ASSERTE(!"Unexpected blob type");
+ break;
+ }
+ totalSize += blobElementSize;
+ }
+
+ profileMap->Allocate(totalSize);
+
+ size_t currentPos = 0;
+
+ // Traverse each element and record it
+ size_t blobElementSize = 0;
+ for (ProfilingBlobTable::Iterator cur = pModule->GetProfilingBlobTable()->Begin(),
+ end = pModule->GetProfilingBlobTable()->End();
+ (cur != end);
+ cur++, currentPos += blobElementSize)
+ {
+ const ProfilingBlobEntry * pEntry = *cur;
+ blobElementSize = pEntry->varSize();
+ void *profileData = profileMap->getOffsetPtr(currentPos);
+
+ switch (pEntry->kind()) {
+ case ParamTypeSpec:
+ {
+ CORBBTPROF_BLOB_PARAM_SIG_ENTRY * bProfileData = (CORBBTPROF_BLOB_PARAM_SIG_ENTRY*) profileData;
+ const TypeSpecBlobEntry * typeSpecBlobEntry = static_cast<const TypeSpecBlobEntry *>(pEntry);
+
+ blobElementSize += sizeof(CORBBTPROF_BLOB_PARAM_SIG_ENTRY);
+ bProfileData->blob.size = static_cast<DWORD>(blobElementSize);
+ bProfileData->blob.type = typeSpecBlobEntry->kind();
+ bProfileData->blob.token = typeSpecBlobEntry->token();
+ _ASSERTE(typeSpecBlobEntry->cbSig() > 0);
+ bProfileData->cSig = typeSpecBlobEntry->cbSig();
+ memcpy(&bProfileData->sig[0], typeSpecBlobEntry->pSig(), typeSpecBlobEntry->cbSig());
+ break;
+ }
+
+ case ParamMethodSpec:
+ {
+ CORBBTPROF_BLOB_PARAM_SIG_ENTRY * bProfileData = (CORBBTPROF_BLOB_PARAM_SIG_ENTRY*) profileData;
+ const MethodSpecBlobEntry * methodSpecBlobEntry = static_cast<const MethodSpecBlobEntry *>(pEntry);
+
+ blobElementSize += sizeof(CORBBTPROF_BLOB_PARAM_SIG_ENTRY);
+ bProfileData->blob.size = static_cast<DWORD>(blobElementSize);
+ bProfileData->blob.type = methodSpecBlobEntry->kind();
+ bProfileData->blob.token = methodSpecBlobEntry->token();
+ _ASSERTE(methodSpecBlobEntry->cbSig() > 0);
+ bProfileData->cSig = methodSpecBlobEntry->cbSig();
+ memcpy(&bProfileData->sig[0], methodSpecBlobEntry->pSig(), methodSpecBlobEntry->cbSig());
+ break;
+ }
+
+ case ExternalNamespaceDef:
+ {
+ CORBBTPROF_BLOB_NAMESPACE_DEF_ENTRY * bProfileData = (CORBBTPROF_BLOB_NAMESPACE_DEF_ENTRY*) profileData;
+ const ExternalNamespaceBlobEntry * namespaceBlobEntry = static_cast<const ExternalNamespaceBlobEntry *>(pEntry);
+
+ blobElementSize += sizeof(CORBBTPROF_BLOB_NAMESPACE_DEF_ENTRY);
+ bProfileData->blob.size = static_cast<DWORD>(blobElementSize);
+ bProfileData->blob.type = namespaceBlobEntry->kind();
+ bProfileData->blob.token = namespaceBlobEntry->token();
+ _ASSERTE(namespaceBlobEntry->cbName() > 0);
+ bProfileData->cName = namespaceBlobEntry->cbName();
+ memcpy(&bProfileData->name[0], namespaceBlobEntry->pName(), namespaceBlobEntry->cbName());
+ break;
+ }
+
+ case ExternalTypeDef:
+ {
+ CORBBTPROF_BLOB_TYPE_DEF_ENTRY * bProfileData = (CORBBTPROF_BLOB_TYPE_DEF_ENTRY*) profileData;
+ const ExternalTypeBlobEntry * typeBlobEntry = static_cast<const ExternalTypeBlobEntry *>(pEntry);
+
+ blobElementSize += sizeof(CORBBTPROF_BLOB_TYPE_DEF_ENTRY);
+ bProfileData->blob.size = static_cast<DWORD>(blobElementSize);
+ bProfileData->blob.type = typeBlobEntry->kind();
+ bProfileData->blob.token = typeBlobEntry->token();
+ bProfileData->assemblyRefToken = typeBlobEntry->assemblyRef();
+ bProfileData->nestedClassToken = typeBlobEntry->nestedClass();
+ bProfileData->nameSpaceToken = typeBlobEntry->nameSpace();
+ _ASSERTE(typeBlobEntry->cbName() > 0);
+ bProfileData->cName = typeBlobEntry->cbName();
+ memcpy(&bProfileData->name[0], typeBlobEntry->pName(), typeBlobEntry->cbName());
+ break;
+ }
+
+ case ExternalSignatureDef:
+ {
+ CORBBTPROF_BLOB_SIGNATURE_DEF_ENTRY * bProfileData = (CORBBTPROF_BLOB_SIGNATURE_DEF_ENTRY*) profileData;
+ const ExternalSignatureBlobEntry * signatureBlobEntry = static_cast<const ExternalSignatureBlobEntry *>(pEntry);
+
+ blobElementSize += sizeof(CORBBTPROF_BLOB_SIGNATURE_DEF_ENTRY);
+ bProfileData->blob.size = static_cast<DWORD>(blobElementSize);
+ bProfileData->blob.type = signatureBlobEntry->kind();
+ bProfileData->blob.token = signatureBlobEntry->token();
+ _ASSERTE(signatureBlobEntry->cbSig() > 0);
+ bProfileData->cSig = signatureBlobEntry->cbSig();
+ memcpy(&bProfileData->sig[0], signatureBlobEntry->pSig(), signatureBlobEntry->cbSig());
+ break;
+ }
+
+ case ExternalMethodDef:
+ {
+ CORBBTPROF_BLOB_METHOD_DEF_ENTRY * bProfileData = (CORBBTPROF_BLOB_METHOD_DEF_ENTRY*) profileData;
+ const ExternalMethodBlobEntry * methodBlobEntry = static_cast<const ExternalMethodBlobEntry *>(pEntry);
+
+ blobElementSize += sizeof(CORBBTPROF_BLOB_METHOD_DEF_ENTRY);
+ bProfileData->blob.size = static_cast<DWORD>(blobElementSize);
+ bProfileData->blob.type = methodBlobEntry->kind();
+ bProfileData->blob.token = methodBlobEntry->token();
+ bProfileData->nestedClassToken = methodBlobEntry->nestedClass();
+ bProfileData->signatureToken = methodBlobEntry->signature();
+ _ASSERTE(methodBlobEntry->cbName() > 0);
+ bProfileData->cName = methodBlobEntry->cbName();
+ memcpy(&bProfileData->name[0], methodBlobEntry->pName(), methodBlobEntry->cbName());
+ break;
+ }
+
+ default:
+ _ASSERTE(!"Unexpected blob type");
+ break;
+ }
+ }
+
+ _ASSERTE(currentPos == totalSize);
+
+ // Emit a terminating entry with type EndOfBlobStream to mark the end
+ DWORD mdElementSize = sizeof(CORBBTPROF_BLOB_ENTRY);
+ void *profileData = profileMap->Allocate(mdElementSize);
+ memset(profileData, 0, mdElementSize);
+
+ CORBBTPROF_BLOB_ENTRY* mdProfileData = (CORBBTPROF_BLOB_ENTRY*) profileData;
+ mdProfileData->type = EndOfBlobStream;
+ mdProfileData->size = sizeof(CORBBTPROF_BLOB_ENTRY);
+}
+
+// Responsible for writing out the profile data if the COMPLUS_BBInstr
+// environment variable is set. This is called when the module is unloaded
+// (usually at shutdown).
+HRESULT Module::WriteMethodProfileDataLogFile(bool cleanup)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ if (IsResource())
+ return S_OK;
+
+ EX_TRY
+ {
+ if (GetAssembly()->IsInstrumented() && (m_pProfilingBlobTable != NULL) && (m_tokenProfileData != NULL))
+ {
+ ProfileEmitter * pEmitter = new ProfileEmitter();
+
+ // Get this ahead of time - metadata access may be logged, which will
+ // take the m_tokenProfileData->crst, which we take a couple lines below
+ LPCSTR pszName;
+ GUID mvid;
+ IfFailThrow(GetMDImport()->GetScopeProps(&pszName, &mvid));
+
+ CrstHolder ch(&m_tokenProfileData->crst);
+
+ //
+ // Create the scenario info section
+ //
+ ProfileDataAllocateScenarioInfo(pEmitter, pszName, &mvid);
+
+ //
+ // Create the method block count section
+ //
+ ProfileDataAllocateMethodBlockCounts(pEmitter, m_methodProfileList);
+
+ //
+ // Create the token list sections
+ //
+ ProfileDataAllocateTokenLists(pEmitter, m_tokenProfileData);
+
+ //
+ // Create the ibc token definition section (aka the Blob stream)
+ //
+ ProfileDataAllocateTokenDefinitions(pEmitter, this);
+
+ //
+ // Now store the profile data in the ibc file
+ //
+ ProfileMap profileImage;
+ pEmitter->Serialize(&profileImage, mvid);
+
+ HandleHolder profileDataFile(OpenMethodProfileDataLogFile(mvid));
+
+ ULONG count;
+ BOOL result = WriteFile(profileDataFile, profileImage.getOffsetPtr(0), profileImage.getCurrentOffset(), &count, NULL);
+ if (!result || (count != profileImage.getCurrentOffset()))
+ {
+ DWORD lasterror = GetLastError();
+ _ASSERTE(!"Error writing ibc profile data to file");
+ hr = HRESULT_FROM_WIN32(lasterror);
+ }
+ }
+
+ if (cleanup)
+ {
+ DeleteProfilingData();
+ }
+ }
+ EX_CATCH
+ {
+ hr = E_FAIL;
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ return hr;
+}
+
+
+/* static */
+void Module::WriteAllModuleProfileData(bool cleanup)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Iterate over all the app domains; for each one iterator over its
+ // assemblies; for each one iterate over its modules.
+ EX_TRY
+ {
+ AppDomainIterator appDomainIterator(FALSE);
+ while(appDomainIterator.Next())
+ {
+ AppDomain * appDomain = appDomainIterator.GetDomain();
+ AppDomain::AssemblyIterator assemblyIterator = appDomain->IterateAssembliesEx(
+ (AssemblyIterationFlags)(kIncludeLoaded | kIncludeExecution));
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+
+ while (assemblyIterator.Next(pDomainAssembly.This()))
+ {
+ DomainModuleIterator i = pDomainAssembly->IterateModules(kModIterIncludeLoaded);
+ while (i.Next())
+ {
+ /*hr=*/i.GetModule()->WriteMethodProfileDataLogFile(cleanup);
+ }
+ }
+ }
+ }
+ EX_CATCH
+ { }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+PTR_ProfilingBlobTable Module::GetProfilingBlobTable()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pProfilingBlobTable;
+}
+
+void Module::CreateProfilingData()
+{
+ TokenProfileData *tpd = TokenProfileData::CreateNoThrow();
+
+ PVOID pv = InterlockedCompareExchangeT(&m_tokenProfileData, tpd, NULL);
+ if (pv != NULL)
+ {
+ delete tpd;
+ }
+
+ PTR_ProfilingBlobTable ppbt = new (nothrow) ProfilingBlobTable();
+
+ if (ppbt != NULL)
+ {
+ pv = InterlockedCompareExchangeT(&m_pProfilingBlobTable, ppbt, NULL);
+ if (pv != NULL)
+ {
+ delete ppbt;
+ }
+ }
+}
+
+void Module::DeleteProfilingData()
+{
+ if (m_pProfilingBlobTable != NULL)
+ {
+ for (ProfilingBlobTable::Iterator cur = m_pProfilingBlobTable->Begin(),
+ end = m_pProfilingBlobTable->End();
+ (cur != end);
+ cur++)
+ {
+ const ProfilingBlobEntry * pCurrentEntry = *cur;
+ delete pCurrentEntry;
+ }
+ delete m_pProfilingBlobTable;
+ m_pProfilingBlobTable = NULL;
+ }
+
+ if (m_tokenProfileData != NULL)
+ {
+ delete m_tokenProfileData;
+ m_tokenProfileData = NULL;
+ }
+
+ // the metadataProfileData is free'ed in destructor of the corresponding MetaDataTracker
+}
+#endif //FEATURE_PREJIT
+
+#ifdef FEATURE_MIXEDMODE
+void Module::SetIsIJWFixedUp()
+{
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockOr(&m_dwTransientFlags, IS_IJW_FIXED_UP);
+}
+#endif
+
+
+#ifdef FEATURE_PREJIT
+/* static */
+Module::TokenProfileData *Module::TokenProfileData::CreateNoThrow(void)
+{
+ STATIC_CONTRACT_NOTHROW;
+
+ TokenProfileData *tpd = NULL;
+
+ EX_TRY
+ {
+ //
+ // This constructor calls crst.Init(), which may throw. So putting (nothrow) doesn't
+ // do what we would want it to. Thus I wrap it here in a TRY/CATCH and revert to NULL
+ // if it fails.
+ //
+ tpd = new TokenProfileData();
+ }
+ EX_CATCH
+ {
+ tpd = NULL;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return tpd;
+}
+
+#endif // FEATURE_PREJIT
+
+#endif // !DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+void Module::SetBeingUnloaded()
+{
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockOr((ULONG*)&m_dwTransientFlags, IS_BEING_UNLOADED);
+}
+#endif
+
+#ifdef FEATURE_PREJIT
+void Module::LogTokenAccess(mdToken token, SectionFormat format, ULONG flagnum)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(g_IBCLogger.InstrEnabled());
+ PRECONDITION(flagnum < CORBBTPROF_TOKEN_MAX_NUM_FLAGS);
+ }
+ CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+
+ //
+ // If we are in ngen instrumentation mode, then we should record this token.
+ //
+
+ if (!m_nativeImageProfiling)
+ return;
+
+ mdToken rid = RidFromToken(token);
+ CorTokenType tkType = (CorTokenType) TypeFromToken(token);
+ SectionFormat tkKind = (SectionFormat) (tkType >> 24);
+
+ if ((rid == 0) && (tkKind < (SectionFormat) TBL_COUNT))
+ return;
+
+ FAULT_NOT_FATAL();
+
+ _ASSERTE(TypeProfilingData == FirstTokenFlagSection + TBL_TypeDef);
+ _ASSERTE(MethodProfilingData == FirstTokenFlagSection + TBL_Method);
+ _ASSERTE(SectionFormatCount >= FirstTokenFlagSection + TBL_COUNT + 4);
+
+ if (!m_tokenProfileData)
+ {
+ CreateProfilingData();
+ }
+
+ if (!m_tokenProfileData)
+ {
+ return;
+ }
+
+ if (tkKind == (SectionFormat) (ibcTypeSpec >> 24))
+ tkKind = IbcTypeSpecSection;
+ else if (tkKind == (SectionFormat) (ibcMethodSpec >> 24))
+ tkKind = IbcMethodSpecSection;
+
+ _ASSERTE(tkKind < SectionFormatCount);
+ if (tkKind >= SectionFormatCount)
+ {
+ return;
+ }
+
+ CQuickArray<CORBBTPROF_TOKEN_INFO> * pTokenArray = &m_tokenProfileData->m_formats[format].tokenArray;
+ RidBitmap * pTokenBitmap = &m_tokenProfileData->m_formats[tkKind].tokenBitmaps[flagnum];
+
+ // Have we seen this token with this flag already?
+ if (pTokenBitmap->IsTokenInBitmap(token))
+ {
+ return;
+ }
+
+ // Insert the token to the bitmap
+ if (FAILED(pTokenBitmap->InsertToken(token)))
+ {
+ return;
+ }
+
+ ULONG flag = 1 << flagnum;
+
+ // [ToDo] Fix: this is a sequential search and can be very slow
+ for (unsigned int i = 0; i < pTokenArray->Size(); i++)
+ {
+ if ((*pTokenArray)[i].token == token)
+ {
+ _ASSERTE(! ((*pTokenArray)[i].flags & flag));
+ (*pTokenArray)[i].flags |= flag;
+ return;
+ }
+ }
+
+ if (FAILED(pTokenArray->ReSizeNoThrow(pTokenArray->Size() + 1)))
+ {
+ return;
+ }
+
+ (*pTokenArray)[pTokenArray->Size() - 1].token = token;
+ (*pTokenArray)[pTokenArray->Size() - 1].flags = flag;
+ (*pTokenArray)[pTokenArray->Size() - 1].scenarios = 0;
+
+#endif // !DACCESS_COMPILE
+}
+
+void Module::LogTokenAccess(mdToken token, ULONG flagNum)
+{
+ WRAPPER_NO_CONTRACT;
+ SectionFormat format = (SectionFormat)((TypeFromToken(token)>>24) + FirstTokenFlagSection);
+ if (FirstTokenFlagSection <= format && format < SectionFormatCount)
+ {
+ LogTokenAccess(token, format, flagNum);
+ }
+}
+#endif // FEATURE_PREJIT
+
+#ifndef DACCESS_COMPILE
+#ifdef FEATURE_PREJIT
+
+//
+// Encoding callbacks
+//
+
+/*static*/ DWORD Module::EncodeModuleHelper(void * pModuleContext, Module *pReferencedModule)
+{
+ Module* pReferencingModule = (Module *) pModuleContext;
+ _ASSERTE(pReferencingModule != pReferencedModule);
+
+ Assembly *pReferencingAssembly = pReferencingModule->GetAssembly();
+ Assembly *pReferencedAssembly = pReferencedModule->GetAssembly();
+
+ _ASSERTE(pReferencingAssembly != pReferencedAssembly);
+
+ if (pReferencedAssembly == pReferencingAssembly)
+ {
+ return 0;
+ }
+
+ mdAssemblyRef token = pReferencingModule->FindAssemblyRef(pReferencedAssembly);
+
+ if (IsNilToken(token))
+ {
+ return ENCODE_MODULE_FAILED;
+ }
+
+ return RidFromToken(token);
+}
+
+/*static*/ void Module::TokenDefinitionHelper(void* pModuleContext, Module *pReferencedModule, DWORD index, mdToken* pToken)
+{
+ LIMITED_METHOD_CONTRACT;
+ HRESULT hr;
+ Module * pReferencingModule = (Module *) pModuleContext;
+ mdAssemblyRef mdAssemblyRef = TokenFromRid(index, mdtAssemblyRef);
+ IMDInternalImport * pImport = pReferencedModule->GetMDImport();
+ LPCUTF8 szName = NULL;
+
+ if (TypeFromToken(*pToken) == mdtTypeDef)
+ {
+ //
+ // Compute nested type (if any)
+ //
+ mdTypeDef mdEnclosingType = idExternalTypeNil;
+ hr = pImport->GetNestedClassProps(*pToken, &mdEnclosingType);
+ // If there's not enclosing type, then hr=CLDB_E_RECORD_NOTFOUND and mdEnclosingType is unchanged
+ _ASSERTE((hr == S_OK) || (hr == CLDB_E_RECORD_NOTFOUND));
+
+ if (!IsNilToken(mdEnclosingType))
+ {
+ _ASSERT(TypeFromToken(mdEnclosingType) == mdtTypeDef);
+ TokenDefinitionHelper(pModuleContext, pReferencedModule, index, &mdEnclosingType);
+ }
+ _ASSERT(TypeFromToken(mdEnclosingType) == ibcExternalType);
+
+ //
+ // Compute type name and namespace.
+ //
+ LPCUTF8 szNamespace = NULL;
+ hr = pImport->GetNameOfTypeDef(*pToken, &szName, &szNamespace);
+ _ASSERTE(hr == S_OK);
+
+ //
+ // Transform namespace string into ibc external namespace token
+ //
+ idExternalNamespace idNamespace = idExternalNamespaceNil;
+ if (szNamespace != NULL)
+ {
+ const ExternalNamespaceBlobEntry * pNamespaceEntry;
+ pNamespaceEntry = ExternalNamespaceBlobEntry::FindOrAdd(pReferencingModule, szNamespace);
+ if (pNamespaceEntry != NULL)
+ {
+ idNamespace = pNamespaceEntry->token();
+ }
+ }
+ _ASSERTE(TypeFromToken(idNamespace) == ibcExternalNamespace);
+
+ //
+ // Transform type name into ibc external type token
+ //
+ idExternalType idType = idExternalTypeNil;
+ _ASSERTE(szName != NULL);
+ const ExternalTypeBlobEntry * pTypeEntry = NULL;
+ pTypeEntry = ExternalTypeBlobEntry::FindOrAdd(pReferencingModule,
+ mdAssemblyRef,
+ mdEnclosingType,
+ idNamespace,
+ szName);
+ if (pTypeEntry != NULL)
+ {
+ idType = pTypeEntry->token();
+ }
+ _ASSERTE(TypeFromToken(idType) == ibcExternalType);
+
+ *pToken = idType; // Remap pToken to our idExternalType token
+ }
+ else if (TypeFromToken(*pToken) == mdtMethodDef)
+ {
+ //
+ // Compute nested type (if any)
+ //
+ mdTypeDef mdEnclosingType = idExternalTypeNil;
+ hr = pImport->GetParentToken(*pToken, &mdEnclosingType);
+ _ASSERTE(!FAILED(hr));
+
+ if (!IsNilToken(mdEnclosingType))
+ {
+ _ASSERT(TypeFromToken(mdEnclosingType) == mdtTypeDef);
+ TokenDefinitionHelper(pModuleContext, pReferencedModule, index, &mdEnclosingType);
+ }
+ _ASSERT(TypeFromToken(mdEnclosingType) == ibcExternalType);
+
+ //
+ // Compute the method name and signature
+ //
+ PCCOR_SIGNATURE pSig = NULL;
+ DWORD cbSig = 0;
+ hr = pImport->GetNameAndSigOfMethodDef(*pToken, &pSig, &cbSig, &szName);
+ _ASSERTE(hr == S_OK);
+
+ //
+ // Transform signature into ibc external signature token
+ //
+ idExternalSignature idSignature = idExternalSignatureNil;
+ if (pSig != NULL)
+ {
+ const ExternalSignatureBlobEntry * pSignatureEntry;
+ pSignatureEntry = ExternalSignatureBlobEntry::FindOrAdd(pReferencingModule, cbSig, pSig);
+ if (pSignatureEntry != NULL)
+ {
+ idSignature = pSignatureEntry->token();
+ }
+ }
+ _ASSERTE(TypeFromToken(idSignature) == ibcExternalSignature);
+
+ //
+ // Transform method name into ibc external method token
+ //
+ idExternalMethod idMethod = idExternalMethodNil;
+ _ASSERTE(szName != NULL);
+ const ExternalMethodBlobEntry * pMethodEntry = NULL;
+ pMethodEntry = ExternalMethodBlobEntry::FindOrAdd(pReferencingModule,
+ mdEnclosingType,
+ idSignature,
+ szName);
+ if (pMethodEntry != NULL)
+ {
+ idMethod = pMethodEntry->token();
+ }
+ _ASSERTE(TypeFromToken(idMethod) == ibcExternalMethod);
+
+ *pToken = idMethod; // Remap pToken to our idMethodSpec token
+ }
+ else
+ {
+ _ASSERTE(!"Unexpected token type");
+ }
+}
+
+idTypeSpec Module::LogInstantiatedType(TypeHandle typeHnd, ULONG flagNum)
+{
+ CONTRACT(idTypeSpec)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(g_IBCLogger.InstrEnabled());
+ PRECONDITION(!typeHnd.HasUnrestoredTypeKey());
+ // We want to report the type only in its own loader module as a type's
+ // MethodTable can only live in its own loader module.
+ // We can relax this if we allow a (duplicate) MethodTable to live
+ // in any module (which might be needed for ngen of generics)
+#ifdef FEATURE_PREJIT
+ PRECONDITION(this == GetPreferredZapModuleForTypeHandle(typeHnd));
+#endif
+ }
+ CONTRACT_END;
+
+ idTypeSpec result = idTypeSpecNil;
+
+ if (m_nativeImageProfiling)
+ {
+ CONTRACT_VIOLATION(ThrowsViolation|FaultViolation|GCViolation);
+
+ SigBuilder sigBuilder;
+
+ ZapSig zapSig(this, this, ZapSig::IbcTokens,
+ Module::EncodeModuleHelper, Module::TokenDefinitionHelper);
+ BOOL fSuccess = zapSig.GetSignatureForTypeHandle(typeHnd, &sigBuilder);
+
+ // a return value of 0 indicates a failure to create the signature
+ if (fSuccess)
+ {
+ DWORD cbSig;
+ PCCOR_SIGNATURE pSig = (PCCOR_SIGNATURE)sigBuilder.GetSignature(&cbSig);
+
+ ULONG flag = (1 << flagNum);
+ TypeSpecBlobEntry * pEntry = const_cast<TypeSpecBlobEntry *>(TypeSpecBlobEntry::FindOrAdd(this, cbSig, pSig));
+ if (pEntry != NULL)
+ {
+ // Update the flags with any new bits
+ pEntry->orFlag(flag);
+ result = pEntry->token();
+ }
+ }
+ }
+ _ASSERTE(TypeFromToken(result) == ibcTypeSpec);
+
+ RETURN result;
+}
+
+idMethodSpec Module::LogInstantiatedMethod(const MethodDesc * md, ULONG flagNum)
+{
+ CONTRACT(idMethodSpec)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION( md != NULL );
+ }
+ CONTRACT_END;
+
+ idMethodSpec result = idMethodSpecNil;
+
+ if (m_nativeImageProfiling)
+ {
+ CONTRACT_VIOLATION(ThrowsViolation|FaultViolation|GCViolation);
+
+ // get data
+ SigBuilder sigBuilder;
+
+ BOOL fSuccess;
+ fSuccess = ZapSig::EncodeMethod(const_cast<MethodDesc *>(md), this, &sigBuilder,
+ (LPVOID) this,
+ (ENCODEMODULE_CALLBACK) Module::EncodeModuleHelper,
+ (DEFINETOKEN_CALLBACK) Module::TokenDefinitionHelper);
+
+ if (fSuccess)
+ {
+ DWORD dataSize;
+ BYTE * pBlob = (BYTE *)sigBuilder.GetSignature(&dataSize);
+
+ ULONG flag = (1 << flagNum);
+ MethodSpecBlobEntry * pEntry = const_cast<MethodSpecBlobEntry *>(MethodSpecBlobEntry::FindOrAdd(this, dataSize, pBlob));
+ if (pEntry != NULL)
+ {
+ // Update the flags with any new bits
+ pEntry->orFlag(flag);
+ result = pEntry->token();
+ }
+ }
+ }
+
+ _ASSERTE(TypeFromToken(result) == ibcMethodSpec);
+ RETURN result;
+}
+#endif // DACCESS_COMPILE
+#endif //FEATURE_PREJIT
+
+#ifndef DACCESS_COMPILE
+
+#ifndef CROSSGEN_COMPILE
+// ===========================================================================
+// ReflectionModule
+// ===========================================================================
+
+/* static */
+ReflectionModule *ReflectionModule::Create(Assembly *pAssembly, PEFile *pFile, AllocMemTracker *pamTracker, LPCWSTR szName, BOOL fIsTransient)
+{
+ CONTRACT(ReflectionModule *)
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pAssembly));
+ PRECONDITION(CheckPointer(pFile));
+ PRECONDITION(pFile->IsDynamic());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ // Hoist CONTRACT into separate routine because of EX incompatibility
+
+ mdFile token;
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ if (pFile->IsAssembly())
+ token = mdFileNil;
+ else
+ token = ((PEModule *)pFile)->GetToken();
+#else
+ _ASSERTE(pFile->IsAssembly());
+ token = mdFileNil;
+#endif
+
+ // Initial memory block for Modules must be zero-initialized (to make it harder
+ // to introduce Destruct crashes arising from OOM's during initialization.)
+
+ void* pMemory = pamTracker->Track(pAssembly->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(ReflectionModule))));
+ ReflectionModuleHolder pModule(new (pMemory) ReflectionModule(pAssembly, token, pFile));
+
+ pModule->DoInit(pamTracker, szName);
+
+ // Set this at module creation time. The m_fIsTransient field should never change during the lifetime of this ReflectionModule.
+ pModule->SetIsTransient(fIsTransient ? true : false);
+
+ RETURN pModule.Extract();
+}
+
+
+// Module initialization occurs in two phases: the constructor phase and the Initialize phase.
+//
+// The constructor phase initializes just enough so that Destruct() can be safely called.
+// It cannot throw or fail.
+//
+ReflectionModule::ReflectionModule(Assembly *pAssembly, mdFile token, PEFile *pFile)
+ : Module(pAssembly, token, pFile)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ m_pInMemoryWriter = NULL;
+ m_sdataSection = NULL;
+ m_pISymUnmanagedWriter = NULL;
+ m_pCreatingAssembly = NULL;
+ m_pCeeFileGen = NULL;
+ m_pDynamicMetadata = NULL;
+ m_fSuppressMetadataCapture = false;
+ m_fIsTransient = false;
+}
+
+HRESULT STDMETHODCALLTYPE CreateICeeGen(REFIID riid, void **pCeeGen);
+
+// Module initialization occurs in two phases: the constructor phase and the Initialize phase.
+//
+// The Initialize() phase completes the initialization after the constructor has run.
+// It can throw exceptions but whether it throws or succeeds, it must leave the Module
+// in a state where Destruct() can be safely called.
+//
+void ReflectionModule::Initialize(AllocMemTracker *pamTracker, LPCWSTR szName)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ STANDARD_VM_CHECK;
+ PRECONDITION(szName != NULL);
+ }
+ CONTRACTL_END;
+
+ Module::Initialize(pamTracker);
+
+ IfFailThrow(CreateICeeGen(IID_ICeeGen, (void **)&m_pCeeFileGen));
+
+ // Collectible modules should try to limit the growth of their associate IL section, as common scenarios for collectible
+ // modules include single type modules
+ if (IsCollectible())
+ {
+ ReleaseHolder<ICeeGenInternal> pCeeGenInternal(NULL);
+ IfFailThrow(m_pCeeFileGen->QueryInterface(IID_ICeeGenInternal, (void **)&pCeeGenInternal));
+ IfFailThrow(pCeeGenInternal->SetInitialGrowth(CEE_FILE_GEN_GROWTH_COLLECTIBLE));
+ }
+
+ m_pInMemoryWriter = new RefClassWriter();
+
+ IfFailThrow(m_pInMemoryWriter->Init(GetCeeGen(), GetEmitter(), szName));
+
+ m_CrstLeafLock.Init(CrstLeafLock);
+}
+
+void ReflectionModule::Destruct()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ delete m_pInMemoryWriter;
+
+ if (m_pISymUnmanagedWriter)
+ {
+ m_pISymUnmanagedWriter->Close();
+ m_pISymUnmanagedWriter->Release();
+ m_pISymUnmanagedWriter = NULL;
+ }
+
+ if (m_pCeeFileGen)
+ m_pCeeFileGen->Release();
+
+ Module::Destruct();
+
+ delete m_pDynamicMetadata;
+ m_pDynamicMetadata = NULL;
+
+ m_CrstLeafLock.Destroy();
+}
+
+// Returns true iff metadata capturing is suppressed.
+//
+// Notes:
+// This is during the window after code:ReflectionModule.SuppressMetadataCapture and before
+// code:ReflectionModule.ResumeMetadataCapture.
+//
+// If metadata updates are suppressed, then class-load notifications should be suppressed too.
+bool ReflectionModule::IsMetadataCaptureSuppressed()
+{
+ return m_fSuppressMetadataCapture;
+}
+//
+// Holder of changed value of MDUpdateMode via IMDInternalEmit::SetMDUpdateMode.
+// Returns back the original value on release.
+//
+class MDUpdateModeHolder
+{
+public:
+ MDUpdateModeHolder()
+ {
+ m_pInternalEmitter = NULL;
+ m_OriginalMDUpdateMode = ULONG_MAX;
+ }
+ ~MDUpdateModeHolder()
+ {
+ WRAPPER_NO_CONTRACT;
+ (void)Release();
+ }
+ HRESULT SetMDUpdateMode(IMetaDataEmit *pEmitter, ULONG updateMode)
+ {
+ LIMITED_METHOD_CONTRACT;
+ HRESULT hr = S_OK;
+
+ _ASSERTE(updateMode != ULONG_MAX);
+
+ IfFailRet(pEmitter->QueryInterface(IID_IMDInternalEmit, (void **)&m_pInternalEmitter));
+ _ASSERTE(m_pInternalEmitter != NULL);
+
+ IfFailRet(m_pInternalEmitter->SetMDUpdateMode(updateMode, &m_OriginalMDUpdateMode));
+ _ASSERTE(m_OriginalMDUpdateMode != ULONG_MAX);
+
+ return hr;
+ }
+ HRESULT Release(ULONG expectedPreviousUpdateMode = ULONG_MAX)
+ {
+ HRESULT hr = S_OK;
+
+ if (m_OriginalMDUpdateMode != ULONG_MAX)
+ {
+ _ASSERTE(m_pInternalEmitter != NULL);
+ ULONG previousUpdateMode;
+ // Ignore the error when releasing
+ hr = m_pInternalEmitter->SetMDUpdateMode(m_OriginalMDUpdateMode, &previousUpdateMode);
+ m_OriginalMDUpdateMode = ULONG_MAX;
+
+ if (expectedPreviousUpdateMode != ULONG_MAX)
+ {
+ if ((hr == S_OK) && (expectedPreviousUpdateMode != previousUpdateMode))
+ {
+ hr = S_FALSE;
+ }
+ }
+ }
+ if (m_pInternalEmitter != NULL)
+ {
+ (void)m_pInternalEmitter->Release();
+ m_pInternalEmitter = NULL;
+ }
+ return hr;
+ }
+ ULONG GetOriginalMDUpdateMode()
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(m_OriginalMDUpdateMode != LONG_MAX);
+ return m_OriginalMDUpdateMode;
+ }
+private:
+ IMDInternalEmit *m_pInternalEmitter;
+ ULONG m_OriginalMDUpdateMode;
+};
+
+// Called in live paths to fetch metadata for dynamic modules. This makes the metadata available to the
+// debugger from out-of-process.
+//
+// Notes:
+// This buffer can be retrieved by the debugger via code:ReflectionModule.GetDynamicMetadataBuffer
+//
+// Threading:
+// - Callers must ensure nobody else is adding to the metadata.
+// - This function still takes its own locks to cooperate with the Debugger's out-of-process access.
+// The debugger can slip this thread outside the locks to ensure the data is consistent.
+//
+// This does not raise a debug notification to invalidate the metadata. Reasoning is that this only
+// happens in two cases:
+// 1) manifest module is updated with the name of a new dynamic module.
+// 2) on each class load, in which case we already send a debug event. In this case, we already send a
+// class-load notification, so sending a separate "metadata-refresh" would make the eventing twice as
+// chatty. Class-load events are high-volume and events are slow.
+// Thus we can avoid the chatiness by ensuring the debugger knows that Class-load also means "refresh
+// metadata".
+//
+void ReflectionModule::CaptureModuleMetaDataToMemory()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ // If we've suppresed metadata capture, then skip this. We'll recapture when we enable it. This allows
+ // for batching up capture.
+ // If a debugger is attached, then the CLR will still send ClassLoad notifications for dynamic modules,
+ // which mean we still need to keep the metadata available. This is the same as Whidbey.
+ // An alternative (and better) design would be to suppress ClassLoad notifications too, but then we'd
+ // need some way of sending a "catchup" notification to the debugger after we re-enable notifications.
+ if (IsMetadataCaptureSuppressed() && !CORDebuggerAttached())
+ {
+ return;
+ }
+
+ // Do not release the emitter. This is a weak reference.
+ IMetaDataEmit *pEmitter = this->GetEmitter();
+ _ASSERTE(pEmitter != NULL);
+
+ HRESULT hr;
+
+ MDUpdateModeHolder hMDUpdateMode;
+ IfFailThrow(hMDUpdateMode.SetMDUpdateMode(pEmitter, MDUpdateExtension));
+ _ASSERTE(hMDUpdateMode.GetOriginalMDUpdateMode() == MDUpdateFull);
+
+ DWORD numBytes;
+ hr = pEmitter->GetSaveSize(cssQuick, &numBytes);
+ IfFailThrow(hr);
+
+ // Operate on local data, and then persist it into the module once we know it's valid.
+ NewHolder<SBuffer> pBuffer(new SBuffer());
+ _ASSERTE(pBuffer != NULL); // allocation would throw first
+
+ // ReflectionModule is still in a consistent state, and now we're just operating on local data to
+ // assemble the new metadata buffer. If this fails, then worst case is that metadata does not include
+ // recently generated classes.
+
+ // Caller ensures serialization that guarantees that the metadata doesn't grow underneath us.
+ BYTE * pRawData = pBuffer->OpenRawBuffer(numBytes);
+ hr = pEmitter->SaveToMemory(pRawData, numBytes);
+ pBuffer->CloseRawBuffer();
+
+ IfFailThrow(hr);
+
+ // Now that we're successful, transfer ownership back into the module.
+ {
+ CrstHolder ch(&m_CrstLeafLock);
+
+ delete m_pDynamicMetadata;
+
+ m_pDynamicMetadata = pBuffer.Extract();
+ }
+
+ //
+
+ hr = hMDUpdateMode.Release(MDUpdateExtension);
+ // Will be S_FALSE if someone changed the MDUpdateMode (from MDUpdateExtension) meanwhile
+ _ASSERTE(hr == S_OK);
+}
+
+// Suppress the eager metadata serialization.
+//
+// Notes:
+// This casues code:ReflectionModule.CaptureModuleMetaDataToMemory to be a nop.
+// This is not nestable.
+// This exists purely for performance reasons.
+//
+// Don't call this directly. Use a SuppressMetadataCaptureHolder holder to ensure it's
+// balanced with code:ReflectionModule.ResumeMetadataCapture
+//
+// Types generating while eager metadata-capture is suppressed should not actually be executed until
+// after metadata capture is restored.
+void ReflectionModule::SuppressMetadataCapture()
+{
+ LIMITED_METHOD_CONTRACT;
+ // If this fires, then you probably missed a call to ResumeMetadataCapture.
+ CONSISTENCY_CHECK_MSG(!m_fSuppressMetadataCapture, "SuppressMetadataCapture is not nestable");
+ m_fSuppressMetadataCapture = true;
+}
+
+// Resumes eager metadata serialization.
+//
+// Notes:
+// This casues code:ReflectionModule.CaptureModuleMetaDataToMemory to resume eagerly serializing metadata.
+// This must be called after code:ReflectionModule.SuppressMetadataCapture.
+//
+void ReflectionModule::ResumeMetadataCapture()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(m_fSuppressMetadataCapture);
+ m_fSuppressMetadataCapture = false;
+
+ CaptureModuleMetaDataToMemory();
+}
+
+void ReflectionModule::ReleaseILData()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (m_pISymUnmanagedWriter)
+ {
+ m_pISymUnmanagedWriter->Release();
+ m_pISymUnmanagedWriter = NULL;
+ }
+
+ Module::ReleaseILData();
+}
+#endif // CROSSGEN_COMPILE
+
+#endif // !DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+// Accessor to expose m_pDynamicMetadata to debugger.
+//
+// Returns:
+// Pointer to SBuffer containing metadata buffer. May be null.
+//
+// Notes:
+// Only used by the debugger, so only accessible via DAC.
+// The buffer is updated via code:ReflectionModule.CaptureModuleMetaDataToMemory
+PTR_SBuffer ReflectionModule::GetDynamicMetadataBuffer() const
+{
+ SUPPORTS_DAC;
+
+ // If we ask for metadata, but have been suppressing capture, then we're out of date.
+ // However, the debugger may be debugging already baked types in the module and so may need the metadata
+ // for that. So we return what we do have.
+ //
+ // Debugger will get the next metadata update:
+ // 1) with the next load class
+ // 2) or if this is right after the last class, see code:ReflectionModule.CaptureModuleMetaDataToMemory
+
+ return m_pDynamicMetadata;
+}
+#endif
+
+TADDR ReflectionModule::GetIL(RVA il) // virtual
+{
+#ifndef DACCESS_COMPILE
+ WRAPPER_NO_CONTRACT;
+
+ BYTE* pByte = NULL;
+ m_pCeeFileGen->GetMethodBuffer(il, &pByte);
+ return TADDR(pByte);
+#else // DACCESS_COMPILE
+ SUPPORTS_DAC;
+ DacNotImpl();
+ return NULL;
+#endif // DACCESS_COMPILE
+}
+
+PTR_VOID ReflectionModule::GetRvaField(RVA field, BOOL fZapped) // virtual
+{
+ _ASSERTE(!fZapped);
+#ifndef DACCESS_COMPILE
+ WRAPPER_NO_CONTRACT;
+ // This function should be call only if the target is a field or a field with RVA.
+ PTR_BYTE pByte = NULL;
+ m_pCeeFileGen->ComputePointer(m_sdataSection, field, &pByte);
+ return dac_cast<PTR_VOID>(pByte);
+#else // DACCESS_COMPILE
+ SUPPORTS_DAC;
+ DacNotImpl();
+ return NULL;
+#endif // DACCESS_COMPILE
+}
+
+#ifndef DACCESS_COMPILE
+
+// ===========================================================================
+// VASigCookies
+// ===========================================================================
+
+//==========================================================================
+// Enregisters a VASig.
+//==========================================================================
+VASigCookie *Module::GetVASigCookie(Signature vaSignature)
+{
+ CONTRACT(VASigCookie*)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACT_END;
+
+ VASigCookieBlock *pBlock;
+ VASigCookie *pCookie;
+
+ pCookie = NULL;
+
+ // First, see if we already enregistered this sig.
+ // Note that we're outside the lock here, so be a bit careful with our logic
+ for (pBlock = m_pVASigCookieBlock; pBlock != NULL; pBlock = pBlock->m_Next)
+ {
+ for (UINT i = 0; i < pBlock->m_numcookies; i++)
+ {
+ if (pBlock->m_cookies[i].signature.GetRawSig() == vaSignature.GetRawSig())
+ {
+ pCookie = &(pBlock->m_cookies[i]);
+ break;
+ }
+ }
+ }
+
+ if (!pCookie)
+ {
+ // If not, time to make a new one.
+
+ // Compute the size of args first, outside of the lock.
+
+ // @TODO GENERICS: We may be calling a varargs method from a
+ // generic type/method. Using an empty context will make such a
+ // case cause an unexpected exception. To make this work,
+ // we need to create a specialized signature for every instantiation
+ SigTypeContext typeContext;
+
+ MetaSig metasig(vaSignature, this, &typeContext);
+ ArgIterator argit(&metasig);
+
+ // Upper estimate of the vararg size
+ DWORD sizeOfArgs = argit.SizeOfArgStack();
+
+ // enable gc before taking lock
+ {
+ CrstHolder ch(&m_Crst);
+
+ // Note that we were possibly racing to create the cookie, and another thread
+ // may have already created it. We could put another check
+ // here, but it's probably not worth the effort, so we'll just take an
+ // occasional duplicate cookie instead.
+
+ // Is the first block in the list full?
+ if (m_pVASigCookieBlock && m_pVASigCookieBlock->m_numcookies
+ < VASigCookieBlock::kVASigCookieBlockSize)
+ {
+ // Nope, reserve a new slot in the existing block.
+ pCookie = &(m_pVASigCookieBlock->m_cookies[m_pVASigCookieBlock->m_numcookies]);
+ }
+ else
+ {
+ // Yes, create a new block.
+ VASigCookieBlock *pNewBlock = new VASigCookieBlock();
+
+ pNewBlock->m_Next = m_pVASigCookieBlock;
+ pNewBlock->m_numcookies = 0;
+ m_pVASigCookieBlock = pNewBlock;
+ pCookie = &(pNewBlock->m_cookies[0]);
+ }
+
+ // Now, fill in the new cookie (assuming we had enough memory to create one.)
+ pCookie->pModule = this;
+ pCookie->pNDirectILStub = NULL;
+ pCookie->sizeOfArgs = sizeOfArgs;
+ pCookie->signature = vaSignature;
+
+ // Finally, now that it's safe for ansynchronous readers to see it,
+ // update the count.
+ m_pVASigCookieBlock->m_numcookies++;
+ }
+ }
+
+ RETURN pCookie;
+}
+
+// ===========================================================================
+// LookupMap
+// ===========================================================================
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+
+int __cdecl LookupMapBase::HotItem::Cmp(const void* a_, const void* b_)
+{
+ LIMITED_METHOD_CONTRACT;
+ const HotItem *a = (const HotItem *)a_;
+ const HotItem *b = (const HotItem *)b_;
+
+ if (a->rid < b->rid)
+ return -1;
+ else if (a->rid > b->rid)
+ return 1;
+ else
+ return 0;
+}
+
+void LookupMapBase::CreateHotItemList(DataImage *image, CorProfileData *profileData, int table, BOOL fSkipNullEntries /*= FALSE*/)
+{
+ STANDARD_VM_CONTRACT;
+ _ASSERTE(!MapIsCompressed());
+
+ if (profileData)
+ {
+ DWORD numInTokenList = profileData->GetHotTokens(table, 1<<RidMap, 1<<RidMap, NULL, 0);
+
+ if (numInTokenList > 0)
+ {
+ HotItem *itemList = (HotItem*)(void*)image->GetModule()->GetLoaderAllocator()->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(HotItem)) * S_SIZE_T(numInTokenList));
+ mdToken *tokenList = (mdToken*)(void*)image->GetModule()->GetLoaderAllocator()->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(mdToken)) * S_SIZE_T(numInTokenList));
+
+ profileData->GetHotTokens(table, 1<<RidMap, 1<<RidMap, tokenList, numInTokenList);
+ DWORD numItems = 0;
+ for (DWORD i = 0; i < numInTokenList; i++)
+ {
+ DWORD rid = RidFromToken(tokenList[i]);
+ TADDR value = RelativePointer<TADDR>::GetValueMaybeNullAtPtr(dac_cast<TADDR>(GetElementPtr(RidFromToken(tokenList[i]))));
+ if (!fSkipNullEntries || value != NULL)
+ {
+ itemList[numItems].rid = rid;
+ itemList[numItems].value = value;
+ ++numItems;
+ }
+ }
+
+ if (numItems > 0)
+ {
+ qsort(itemList, // start of array
+ numItems, // array size in elements
+ sizeof(HotItem), // element size in bytes
+ HotItem::Cmp); // comparer function
+
+ // Eliminate any duplicates in the list. Due to the qsort, they must be adjacent now.
+ // We do this by walking the array and copying entries that are not duplicates of the previous one.
+ // We can start the loop at +1, because 0 is not a duplicate of the previous entry, and does not
+ // need to be copied either.
+ DWORD j = 1;
+ for (DWORD i = 1; i < numItems; i++)
+ {
+ if (itemList[i].rid != itemList[i-1].rid)
+ {
+ itemList[j].rid = itemList[i].rid;
+ itemList[j].value = itemList[i].value;
+ j++;
+ }
+ }
+ _ASSERTE(j <= numItems);
+ numItems = j;
+
+ // We have treated the values as normal TADDRs to let qsort move them around freely.
+ // Fix them up to be the relative pointers now.
+ for (DWORD ii = 0; ii < numItems; ii++)
+ {
+ if (itemList[ii].value != NULL)
+ RelativePointer<TADDR>::SetValueMaybeNullAtPtr(dac_cast<TADDR>(&itemList[ii].value), itemList[ii].value);
+ }
+
+ if (itemList != NULL)
+ image->StoreStructure(itemList, sizeof(HotItem)*numItems,
+ DataImage::ITEM_RID_MAP_HOT);
+
+ hotItemList = itemList;
+ dwNumHotItems = numItems;
+ }
+ }
+ }
+}
+
+void LookupMapBase::Save(DataImage *image, DataImage::ItemKind kind, CorProfileData *profileData, int table, BOOL fCopyValues /*= FALSE*/)
+{
+ STANDARD_VM_CONTRACT;
+
+ // the table index that comes in is a token mask, the upper 8 bits are the table type for the tokens, that's all we want
+ table >>= 24;
+
+ dwNumHotItems = 0;
+ hotItemList = NULL;
+
+ if (table != 0)
+ {
+ // Because we use the same IBC encoding to record a touch to the m_GenericTypeDefToCanonMethodTableMap as
+ // to the m_TypeDefToMethodTableMap, the hot items we get in both will be the union of the touches. This limitation
+ // in the IBC infrastructure does not hurt us much because touching an entry for a generic type in one map often if
+ // not always implies touching the corresponding entry in the other. But when saving the GENERICTYPEDEF_MAP it
+ // does mean that we need to be prepared to see "hot" items whose data is NULL in this map (specifically, the non-
+ // generic types). We don't want the hot list to be unnecessarily big with these entries, so tell CreateHotItemList to
+ // skip them.
+ BOOL fSkipNullEntries = (kind == DataImage::ITEM_GENERICTYPEDEF_MAP);
+ CreateHotItemList(image, profileData, table, fSkipNullEntries);
+ }
+
+ // Determine whether we want to compress this lookup map (to improve density of cold pages in the map on
+ // hot item cache misses). We only enable this optimization for the TypeDefToMethodTable, the
+ // GenericTypeDefToCanonMethodTable, and the MethodDefToDesc maps since (a) they're the largest and
+ // as a result reap the most space savings and (b) these maps are fully populated in an ngen image and immutable
+ // at runtime, something that's important when dealing with a compressed version of the table.
+ if (kind == DataImage::ITEM_TYPEDEF_MAP || kind == DataImage::ITEM_GENERICTYPEDEF_MAP || kind == DataImage::ITEM_METHODDEF_MAP)
+ {
+ // The bulk of the compression work is done in the later stages of ngen image generation (since it
+ // relies on knowing the final RVAs of each value stored in the table). So we create a specialzed
+ // ZapNode that knows how to perform the compression for us.
+ image->StoreCompressedLayoutMap(this, DataImage::ITEM_COMPRESSED_MAP);
+
+ // We need to know we decided to compress during the Fixup stage but the table kind is not available
+ // there. So we use the cIndexEntryBits field as a flag (this will be initialized to zero and is only
+ // set to a meaningful value near the end of ngen image generation, during the compression of the
+ // table itself).
+ cIndexEntryBits = 1;
+
+ // The ZapNode we allocated above takes care of all the rest of the processing for this map, so we're
+ // done here.
+ return;
+ }
+
+ SaveUncompressedMap(image, kind, fCopyValues);
+}
+
+void LookupMapBase::SaveUncompressedMap(DataImage *image, DataImage::ItemKind kind, BOOL fCopyValues /*= FALSE*/)
+{
+ STANDARD_VM_CONTRACT;
+
+ // We should only be calling this once per map
+ _ASSERTE(!image->IsStored(pTable));
+
+ //
+ // We will only store one (big) node instead of the full list,
+ // and make the one node large enough to fit all the RIDs
+ //
+
+ ZapStoredStructure * pTableNode = image->StoreStructure(NULL, GetSize() * sizeof(TADDR), kind);
+
+ LookupMapBase *map = this;
+ DWORD offsetIntoCombo = 0;
+ while (map != NULL)
+ {
+ DWORD len = map->dwCount * sizeof(void*);
+
+ if (fCopyValues)
+ image->CopyDataToOffset(pTableNode, offsetIntoCombo, map->pTable, len);
+
+ image->BindPointer(map->pTable,pTableNode,offsetIntoCombo);
+ offsetIntoCombo += len;
+ map = map->pNext;
+ }
+}
+
+void LookupMapBase::ConvertSavedMapToUncompressed(DataImage *image, DataImage::ItemKind kind)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Check whether we decided to compress this map (see Save() above).
+ if (cIndexEntryBits == 0)
+ return;
+
+ cIndexEntryBits = 0;
+ SaveUncompressedMap(image, kind);
+}
+
+void LookupMapBase::Fixup(DataImage *image, BOOL fFixupEntries /*=TRUE*/)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (hotItemList != NULL)
+ image->FixupPointerField(this, offsetof(LookupMapBase, hotItemList));
+
+ // Find the biggest RID supported by the entire list of LookupMaps.
+ // We will only store one LookupMap node instead of the full list,
+ // and make it big enough to fit all RIDs.
+ *(DWORD *)image->GetImagePointer(this, offsetof(LookupMapBase, dwCount)) = GetSize();
+
+ // Persist the supportedFlags that this particular instance was created with.
+ *(TADDR *)image->GetImagePointer(this, offsetof(LookupMapBase, supportedFlags)) = supportedFlags;
+
+ image->ZeroPointerField(this, offsetof(LookupMapBase, pNext));
+
+ // Check whether we've decided to compress this map (see Save() above).
+ if (cIndexEntryBits == 1)
+ {
+ // In the compressed case most of the Fixup logic is performed by the specialized ZapNode we allocated
+ // during Save(). But we still have to record fixups for any hot items we've cached (these aren't
+ // compressed).
+ for (DWORD i = 0; i < dwNumHotItems; i++)
+ {
+ TADDR *pHotValueLoc = &hotItemList[i].value;
+ TADDR pHotValue = RelativePointer<TADDR>::GetValueMaybeNullAtPtr((TADDR)pHotValueLoc);
+ TADDR flags = pHotValue & supportedFlags;
+ pHotValue -= flags;
+
+ if (image->IsStored((PVOID)pHotValue))
+ {
+ image->FixupField(hotItemList,
+ (BYTE *)pHotValueLoc - (BYTE *)hotItemList,
+ (PVOID)pHotValue, flags, IMAGE_REL_BASED_RelativePointer);
+ }
+ else
+ {
+ image->ZeroPointerField(hotItemList, (BYTE *)pHotValueLoc - (BYTE *)hotItemList);
+ }
+ }
+
+ // The ZapNode will handle everything else so we're done.
+ return;
+ }
+
+ // Note that the caller is responsible for calling FixupPointerField()
+ // or zeroing out the contents of pTable as appropriate
+ image->FixupPointerField(this, offsetof(LookupMapBase, pTable));
+
+ if (fFixupEntries)
+ {
+ LookupMap<PVOID>::Iterator iter((LookupMap<PVOID> *)this);
+ DWORD rid = 0;
+
+ while (iter.Next())
+ {
+ TADDR flags;
+ PVOID p = iter.GetElementAndFlags(&flags);
+ PTR_TADDR hotItemValuePtr = FindHotItemValuePtr(rid);
+
+ if (image->IsStored(p))
+ {
+ image->FixupField(pTable, rid * sizeof(TADDR),
+ p, flags, IMAGE_REL_BASED_RelativePointer);
+
+ // In case this item is also in the hot item subtable, fix it up there as well
+ if (hotItemValuePtr != NULL)
+ image->FixupField(hotItemList,
+ (BYTE *)hotItemValuePtr - (BYTE *)hotItemList,
+ p, flags, IMAGE_REL_BASED_RelativePointer);
+ }
+ else
+ {
+ image->ZeroPointerField(pTable, rid * sizeof(TADDR));
+ // In case this item is also in the hot item subtable, zero it there as well
+ if (hotItemValuePtr != NULL)
+ image->ZeroPointerField(hotItemList,
+ (BYTE *)hotItemValuePtr - (BYTE *)hotItemList);
+ }
+
+ rid++;
+ }
+ }
+}
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+#endif // !DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+
+void
+LookupMapBase::EnumMemoryRegions(CLRDataEnumMemoryFlags flags,
+ bool enumThis)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ if (enumThis)
+ {
+ DacEnumHostDPtrMem(this);
+ }
+ if (pTable.IsValid())
+ {
+#ifdef FEATURE_PREJIT
+ if (MapIsCompressed())
+ {
+ // Compressed maps have tables whose size cannot be calculated cheaply. Plus they have an
+ // additional index blob.
+ DacEnumMemoryRegion(dac_cast<TADDR>(pTable),
+ cbTable);
+ DacEnumMemoryRegion(dac_cast<TADDR>(pIndex),
+ cbIndex);
+ }
+ else
+#endif // FEATURE_PREJIT
+ DacEnumMemoryRegion(dac_cast<TADDR>(pTable),
+ dwCount * sizeof(TADDR));
+ }
+#ifdef FEATURE_PREJIT
+ if (dwNumHotItems && hotItemList.IsValid())
+ {
+ DacEnumMemoryRegion(dac_cast<TADDR>(hotItemList),
+ dwNumHotItems * sizeof(HotItem));
+ }
+#endif // FEATURE_PREJIT
+}
+
+
+/* static */
+void
+LookupMapBase::ListEnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ LookupMapBase * headMap = this;
+ bool enumHead = false;
+ while (headMap)
+ {
+ headMap->EnumMemoryRegions(flags, enumHead);
+
+ if (!headMap->pNext.IsValid())
+ {
+ break;
+ }
+
+ headMap = headMap->pNext;
+ enumHead = true;
+ }
+}
+
+#endif // DACCESS_COMPILE
+
+
+// Optimization intended for Module::IsIntrospectionOnly and Module::EnsureActive only
+#include <optsmallperfcritical.h>
+
+BOOL Module::IsIntrospectionOnly()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetAssembly()->IsIntrospectionOnly();
+}
+
+#ifndef DACCESS_COMPILE
+VOID Module::EnsureActive()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ GetDomainFile()->EnsureActive();
+}
+#endif // DACCESS_COMPILE
+
+#include <optdefault.h>
+
+
+#ifndef DACCESS_COMPILE
+
+VOID Module::EnsureAllocated()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ GetDomainFile()->EnsureAllocated();
+}
+
+VOID Module::EnsureLibraryLoaded()
+{
+ STANDARD_VM_CONTRACT;
+ GetDomainFile()->EnsureLibraryLoaded();
+}
+#endif // !DACCESS_COMPILE
+
+CHECK Module::CheckActivated()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+ DomainFile *pDomainFile = FindDomainFile(GetAppDomain());
+ CHECK(pDomainFile != NULL);
+ PREFIX_ASSUME(pDomainFile != NULL);
+ CHECK(pDomainFile->CheckActivated());
+#endif
+ CHECK_OK;
+}
+
+#ifdef DACCESS_COMPILE
+
+void
+ModuleCtorInfo::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+
+ // This class is contained so do not enumerate 'this'.
+ DacEnumMemoryRegion(dac_cast<TADDR>(ppMT), numElements *
+ sizeof(TADDR));
+ DacEnumMemoryRegion(dac_cast<TADDR>(cctorInfoHot), numElementsHot *
+ sizeof(ClassCtorInfoEntry));
+ DacEnumMemoryRegion(dac_cast<TADDR>(cctorInfoCold),
+ (numElements - numElementsHot) *
+ sizeof(ClassCtorInfoEntry));
+ DacEnumMemoryRegion(dac_cast<TADDR>(hotHashOffsets), numHotHashes *
+ sizeof(DWORD));
+ DacEnumMemoryRegion(dac_cast<TADDR>(coldHashOffsets), numColdHashes *
+ sizeof(DWORD));
+}
+
+void Module::EnumMemoryRegions(CLRDataEnumMemoryFlags flags,
+ bool enumThis)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ if (enumThis)
+ {
+ DAC_ENUM_VTHIS();
+ EMEM_OUT(("MEM: %p Module\n", dac_cast<TADDR>(this)));
+ }
+
+ //Save module id data only if it a real pointer, not a tagged sugestion to use ModuleIndex.
+ if (!Module::IsEncodedModuleIndex(GetModuleID()))
+ {
+ if (m_ModuleID.IsValid())
+ {
+ m_ModuleID->EnumMemoryRegions(flags);
+ }
+ }
+
+ // TODO: Enumerate DomainLocalModules? It's not clear if we need all AppDomains
+ // in the multi-domain case (where m_ModuleID has it's low-bit set).
+ if (m_file.IsValid())
+ {
+ m_file->EnumMemoryRegions(flags);
+ }
+ if (m_pAssembly.IsValid())
+ {
+ m_pAssembly->EnumMemoryRegions(flags);
+ }
+
+ m_TypeRefToMethodTableMap.ListEnumMemoryRegions(flags);
+ m_TypeDefToMethodTableMap.ListEnumMemoryRegions(flags);
+
+ if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE)
+ {
+ if (m_pAvailableClasses.IsValid())
+ {
+ m_pAvailableClasses->EnumMemoryRegions(flags);
+ }
+ if (m_pAvailableParamTypes.IsValid())
+ {
+ m_pAvailableParamTypes->EnumMemoryRegions(flags);
+ }
+ if (m_pInstMethodHashTable.IsValid())
+ {
+ m_pInstMethodHashTable->EnumMemoryRegions(flags);
+ }
+ if (m_pAvailableClassesCaseIns.IsValid())
+ {
+ m_pAvailableClassesCaseIns->EnumMemoryRegions(flags);
+ }
+#ifdef FEATURE_PREJIT
+ if (m_pStubMethodHashTable.IsValid())
+ {
+ m_pStubMethodHashTable->EnumMemoryRegions(flags);
+ }
+#endif // FEATURE_PREJIT
+#ifdef FEATURE_MIXEDMODE
+ if (m_pThunkHeap.IsValid())
+ {
+ m_pThunkHeap->EnumMemoryRegions(flags);
+ }
+#endif // FEATURE_MIXEDMODE
+ if (m_pBinder.IsValid())
+ {
+ m_pBinder->EnumMemoryRegions(flags);
+ }
+ m_ModuleCtorInfo.EnumMemoryRegions(flags);
+
+ // Save the LookupMap structures.
+ m_MethodDefToDescMap.ListEnumMemoryRegions(flags);
+ m_FieldDefToDescMap.ListEnumMemoryRegions(flags);
+ m_pMemberRefToDescHashTable->EnumMemoryRegions(flags);
+ m_GenericParamToDescMap.ListEnumMemoryRegions(flags);
+ m_GenericTypeDefToCanonMethodTableMap.ListEnumMemoryRegions(flags);
+ m_FileReferencesMap.ListEnumMemoryRegions(flags);
+ m_ManifestModuleReferencesMap.ListEnumMemoryRegions(flags);
+ m_MethodDefToPropertyInfoMap.ListEnumMemoryRegions(flags);
+
+ LookupMap<PTR_MethodTable>::Iterator typeDefIter(&m_TypeDefToMethodTableMap);
+ while (typeDefIter.Next())
+ {
+ if (typeDefIter.GetElement())
+ {
+ typeDefIter.GetElement()->EnumMemoryRegions(flags);
+ }
+ }
+
+ LookupMap<PTR_TypeRef>::Iterator typeRefIter(&m_TypeRefToMethodTableMap);
+ while (typeRefIter.Next())
+ {
+ if (typeRefIter.GetElement())
+ {
+ TypeHandle th = TypeHandle::FromTAddr(dac_cast<TADDR>(typeRefIter.GetElement()));
+ th.EnumMemoryRegions(flags);
+ }
+ }
+
+ LookupMap<PTR_MethodDesc>::Iterator methodDefIter(&m_MethodDefToDescMap);
+ while (methodDefIter.Next())
+ {
+ if (methodDefIter.GetElement())
+ {
+ methodDefIter.GetElement()->EnumMemoryRegions(flags);
+ }
+ }
+
+ LookupMap<PTR_FieldDesc>::Iterator fieldDefIter(&m_FieldDefToDescMap);
+ while (fieldDefIter.Next())
+ {
+ if (fieldDefIter.GetElement())
+ {
+ fieldDefIter.GetElement()->EnumMemoryRegions(flags);
+ }
+ }
+
+ LookupMap<PTR_TypeVarTypeDesc>::Iterator genericParamIter(&m_GenericParamToDescMap);
+ while (genericParamIter.Next())
+ {
+ if (genericParamIter.GetElement())
+ {
+ genericParamIter.GetElement()->EnumMemoryRegions(flags);
+ }
+ }
+
+ LookupMap<PTR_MethodTable>::Iterator genericTypeDefIter(&m_GenericTypeDefToCanonMethodTableMap);
+ while (genericTypeDefIter.Next())
+ {
+ if (genericTypeDefIter.GetElement())
+ {
+ genericTypeDefIter.GetElement()->EnumMemoryRegions(flags);
+ }
+ }
+
+ } // !CLRDATA_ENUM_MEM_MINI && !CLRDATA_ENUM_MEM_TRIAGE
+
+
+ LookupMap<PTR_Module>::Iterator fileRefIter(&m_FileReferencesMap);
+ while (fileRefIter.Next())
+ {
+ if (fileRefIter.GetElement())
+ {
+ fileRefIter.GetElement()->EnumMemoryRegions(flags, true);
+ }
+ }
+
+ LookupMap<PTR_Module>::Iterator asmRefIter(&m_ManifestModuleReferencesMap);
+ while (asmRefIter.Next())
+ {
+ if (asmRefIter.GetElement())
+ {
+ asmRefIter.GetElement()->GetAssembly()->EnumMemoryRegions(flags);
+ }
+ }
+
+ ECall::EnumFCallMethods();
+}
+
+FieldDesc *Module::LookupFieldDef(mdFieldDef token)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(TypeFromToken(token) == mdtFieldDef);
+ g_IBCLogger.LogRidMapAccess( MakePair( this, token ) );
+ return m_FieldDefToDescMap.GetElement(RidFromToken(token));
+}
+
+#endif // DACCESS_COMPILE
+
+
+#ifndef DACCESS_COMPILE
+
+// Access to CerPrepInfo, the structure used to track CERs prepared at runtime (as opposed to ngen time). GetCerPrepInfo will
+// return the structure associated with the given method desc if it exists or NULL otherwise. CreateCerPrepInfo will get the
+// structure if it exists or allocate and return a new struct otherwise. Creation of CerPrepInfo structures is automatically
+// synchronized by the CerCrst (lazily allocated as needed).
+CerPrepInfo *Module::GetCerPrepInfo(MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ if (m_pCerPrepInfo == NULL)
+ return NULL;
+
+ // Don't need a crst for read only access to the hash table.
+ HashDatum sDatum;
+ if (m_pCerPrepInfo->GetValue(pMD, &sDatum))
+ return (CerPrepInfo*)sDatum;
+ else
+ return NULL;
+}
+
+CerPrepInfo *Module::CreateCerPrepInfo(MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ // Lazily allocate a Crst to serialize update access to the info structure.
+ // Carefully synchronize to ensure we don't leak a Crst in race conditions.
+ if (m_pCerCrst == NULL)
+ {
+ Crst *pCrst = new Crst(CrstCer);
+ if (InterlockedCompareExchangeT(&m_pCerCrst, pCrst, NULL) != NULL)
+ delete pCrst;
+ }
+
+ CrstHolder sCrstHolder(m_pCerCrst);
+
+ // Lazily allocate the info structure.
+ if (m_pCerPrepInfo == NULL)
+ {
+ LockOwner sLock = {m_pCerCrst, IsOwnerOfCrst};
+ NewHolder <EEPtrHashTable> tempCerPrepInfo (new EEPtrHashTable());
+ if (!tempCerPrepInfo->Init(CER_DEFAULT_HASH_SIZE, &sLock))
+ COMPlusThrowOM();
+ m_pCerPrepInfo = tempCerPrepInfo.Extract ();
+ }
+ else
+ {
+ // Try getting an existing value first.
+ HashDatum sDatum;
+ if (m_pCerPrepInfo->GetValue(pMD, &sDatum))
+ return (CerPrepInfo*)sDatum;
+ }
+
+ // We get here if there was no info structure or no existing method desc entry. Either way we now have an info structure and
+ // need to create a new method desc entry.
+ NewHolder<CerPrepInfo> pInfo(new CerPrepInfo());
+
+ m_pCerPrepInfo->InsertValue(pMD, (HashDatum)pInfo);
+
+ return pInfo.Extract();
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+// Access to CerNgenRootTable which holds holds information for all the CERs rooted at a method in this module (that were
+// discovered during an ngen).
+
+// Add a list of MethodContextElements representing a CER to the root table keyed by the MethodDesc* of the root method. Creates
+// or expands the root table as necessary. This should only be called during ngen (at runtime we only read the table).
+void Module::AddCerListToRootTable(MethodDesc *pRootMD, MethodContextElement *pList)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(IsCompilationProcess());
+ }
+ CONTRACTL_END;
+
+ // Although this is only called during ngen we still get cases where a module comes through here already ngen'd (because of
+ // ngen's habit of letting code execute during compilation). Until that's fixed we'll just back out if the module has already
+ // fixed the root table into unwriteable storage.
+ if (m_pCerNgenRootTable && !(m_dwTransientFlags & M_CER_ROOT_TABLE_ON_HEAP))
+ return;
+
+ // Lazily allocate a Crst to serialize update access to the info structure.
+ // Carefully synchronize to ensure we don't leak a Crst in race conditions.
+ if (m_pCerCrst == NULL)
+ {
+ Crst *pCrst = new Crst(CrstCer);
+ if (InterlockedCompareExchangeT(&m_pCerCrst, pCrst, NULL) != NULL)
+ delete pCrst;
+ }
+
+ CrstHolder sCrstHolder(m_pCerCrst);
+
+ // Lazily allocate the root table structure.
+ if (m_pCerNgenRootTable == NULL)
+ {
+ FastInterlockOr(&m_dwTransientFlags, M_CER_ROOT_TABLE_ON_HEAP);
+ m_pCerNgenRootTable = new CerNgenRootTable();
+ }
+
+ _ASSERTE(m_dwTransientFlags & M_CER_ROOT_TABLE_ON_HEAP);
+
+ // And add the new element.
+ m_pCerNgenRootTable->AddRoot(pRootMD, pList);
+}
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+#ifdef FEATURE_PREJIT
+// Returns true if the given method is a CER root detected at ngen time.
+bool Module::IsNgenCerRootMethod(MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ _ASSERTE(pMD->GetModule() == this);
+ if (m_pCerNgenRootTable)
+ return m_pCerNgenRootTable->IsNgenRootMethod(pMD);
+ return false;
+}
+
+// Restores the CER rooted at this method (no-op if this method isn't a CER root).
+void Module::RestoreCer(MethodDesc *pMD)
+{
+ STANDARD_VM_CONTRACT;
+ _ASSERTE(pMD->GetModule() == this);
+ if (m_pCerNgenRootTable)
+ m_pCerNgenRootTable->Restore(pMD);
+}
+
+#endif // FEATURE_PREJIT
+
+#endif // !DACCESS_COMPILE
+
+
+
+//-------------------------------------------------------------------------------
+// Make best-case effort to obtain an image name for use in an error message.
+//
+// This routine must expect to be called before the this object is fully loaded.
+// It can return an empty if the name isn't available or the object isn't initialized
+// enough to get a name, but it mustn't crash.
+//-------------------------------------------------------------------------------
+LPCWSTR Module::GetPathForErrorMessages()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ }
+ CONTRACTL_END
+
+ PEFile *pFile = GetFile();
+
+ if (pFile)
+ {
+ return pFile->GetPathForErrorMessages();
+ }
+ else
+ {
+ return W("");
+ }
+}
+
+#ifdef FEATURE_CORECLR
+#ifndef DACCESS_COMPILE
+BOOL IsVerifiableWrapper(MethodDesc* pMD)
+{
+ BOOL ret = FALSE;
+ //EX_TRY contains _alloca, so I can't use this inside of a loop. 4wesome.
+ EX_TRY
+ {
+ ret = pMD->IsVerifiable();
+ }
+ EX_CATCH
+ {
+ //if the method has a security exception, it will fly through IsVerifiable. Shunt
+ //to the unverifiable path below.
+ }
+ EX_END_CATCH(RethrowTerminalExceptions)
+ return ret;
+}
+#endif //DACCESS_COMPILE
+void Module::VerifyAllMethods()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+#ifndef DACCESS_COMPILE
+ //If the EE isn't started yet, it's not safe to jit. We fail in COM jitting a p/invoke.
+ if (!g_fEEStarted)
+ return;
+
+ struct Local
+ {
+ static bool VerifyMethodsForTypeDef(Module * pModule, mdTypeDef td)
+ {
+ bool ret = true;
+ TypeHandle th = ClassLoader::LoadTypeDefThrowing(pModule, td, ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef);
+
+ MethodTable * pMT = th.GetMethodTable();
+ MethodTable::MethodIterator it(pMT);
+ for (; it.IsValid(); it.Next())
+ {
+ MethodDesc * pMD = it.GetMethodDesc();
+ if (pMD->HasILHeader() && Security::IsMethodTransparent(pMD)
+ && (g_pObjectCtorMD != pMD))
+ {
+ if (!IsVerifiableWrapper(pMD))
+ {
+#ifdef _DEBUG
+ SString s;
+ if (LoggingOn(LF_VERIFIER, LL_ERROR))
+ TypeString::AppendMethodDebug(s, pMD);
+ LOG((LF_VERIFIER, LL_ERROR, "Transparent Method (0x%p), %S is unverifiable\n",
+ pMD, s.GetUnicode()));
+#endif
+ ret = false;
+ }
+ }
+ }
+ return ret;
+ }
+ };
+ //Verify all methods in a module eagerly, forcing them to get loaded.
+
+ /* XXX Thu 4/26/2007
+ * This code is lifted mostly from Validator.cpp
+ */
+ IMDInternalImport * pMDI = GetMDImport();
+ HENUMTypeDefInternalHolder hEnum(pMDI);
+ mdTypeDef td;
+ hEnum.EnumTypeDefInit();
+
+ bool isAllVerifiable = true;
+ //verify global methods
+ if (GetGlobalMethodTable())
+ {
+ //verify everything in the MT.
+ if (!Local::VerifyMethodsForTypeDef(this, COR_GLOBAL_PARENT_TOKEN))
+ isAllVerifiable = false;
+ }
+ while (pMDI->EnumTypeDefNext(&hEnum, &td))
+ {
+ //verify everything
+ if (!Local::VerifyMethodsForTypeDef(this, td))
+ isAllVerifiable = false;
+ }
+ if (!isAllVerifiable)
+ EEFileLoadException::Throw(GetFile(), COR_E_VERIFICATION);
+#endif //DACCESS_COMPILE
+}
+#endif //FEATURE_CORECLR
+
+
+#if defined(_DEBUG) && !defined(DACCESS_COMPILE) && !defined(CROSS_COMPILE)
+void Module::ExpandAll()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ //This is called from inside EEStartupHelper, so it breaks the SO rules. However, this is debug only
+ //(and only supported for limited jit testing), so it's ok here.
+ CONTRACT_VIOLATION(SOToleranceViolation);
+
+ //If the EE isn't started yet, it's not safe to jit. We fail in COM jitting a p/invoke.
+ if (!g_fEEStarted)
+ return;
+ struct Local
+ {
+ static void CompileMethodDesc(MethodDesc * pMD)
+ {
+ //Must have a method body
+ if (pMD->HasILHeader()
+ //Can't jit open instantiations
+ && !pMD->IsGenericMethodDefinition()
+ //These are the only methods we can jit
+ && (pMD->IsStatic() || pMD->GetNumGenericMethodArgs() == 0
+ || pMD->HasClassInstantiation())
+ && (pMD->MayHaveNativeCode() && !pMD->IsFCallOrIntrinsic()))
+ {
+ COR_ILMETHOD * ilHeader = pMD->GetILHeader();
+ COR_ILMETHOD_DECODER::DecoderStatus ignored;
+ NewHolder<COR_ILMETHOD_DECODER> pHeader(new COR_ILMETHOD_DECODER(ilHeader,
+ pMD->GetMDImport(),
+ &ignored));
+#ifdef FEATURE_INTERPRETER
+ pMD->MakeJitWorker(pHeader, CORJIT_FLG_MAKEFINALCODE, 0);
+#else
+ pMD->MakeJitWorker(pHeader, 0, 0);
+#endif
+ }
+ }
+ static void CompileMethodsForMethodTable(MethodTable * pMT)
+ {
+ MethodTable::MethodIterator it(pMT);
+ for (; it.IsValid(); it.Next())
+ {
+ MethodDesc * pMD = it.GetMethodDesc();
+ CompileMethodDesc(pMD);
+ }
+ }
+#if 0
+ static void CompileMethodsForTypeDef(Module * pModule, mdTypeDef td)
+ {
+ TypeHandle th = ClassLoader::LoadTypeDefThrowing(pModule, td, ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef);
+
+ MethodTable * pMT = th.GetMethodTable();
+ CompileMethodsForMethodTable(pMT);
+ }
+#endif
+ static void CompileMethodsForTypeDefRefSpec(Module * pModule, mdToken tok)
+ {
+ TypeHandle th;
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ th = ClassLoader::LoadTypeDefOrRefOrSpecThrowing(
+ pModule,
+ tok,
+ NULL /*SigTypeContext*/);
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ //Only do this for non-generic types and unshared generic types
+ //(canonical generics and value type generic instantiations).
+ if (SUCCEEDED(hr) && !th.IsTypeDesc()
+ && th.AsMethodTable()->IsCanonicalMethodTable())
+ {
+ CompileMethodsForMethodTable(th.AsMethodTable());
+ }
+ }
+ static void CompileMethodsForMethodDefRefSpec(Module * pModule, mdToken tok)
+ {
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ MethodDesc * pMD =
+ MemberLoader::GetMethodDescFromMemberDefOrRefOrSpec(pModule, tok,
+ /*SigTypeContext*/NULL,
+ TRUE, TRUE);
+ CompileMethodDesc(pMD);
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ //@telesto what should we do with this HR? the Silverlight code doesn't seem
+ //to do anything...but that doesn't seem safe...
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+ };
+ //Jit all methods eagerly
+
+ /* XXX Thu 4/26/2007
+ * This code is lifted mostly from code:Module::VerifyAllMethods
+ */
+ IMDInternalImport * pMDI = GetMDImport();
+ HENUMTypeDefInternalHolder hEnum(pMDI);
+ mdTypeDef td;
+ hEnum.EnumTypeDefInit();
+
+ //verify global methods
+ if (GetGlobalMethodTable())
+ {
+ //jit everything in the MT.
+ Local::CompileMethodsForTypeDefRefSpec(this, COR_GLOBAL_PARENT_TOKEN);
+ }
+ while (pMDI->EnumTypeDefNext(&hEnum, &td))
+ {
+ //jit everything
+ Local::CompileMethodsForTypeDefRefSpec(this, td);
+ }
+
+ //Get the type refs. They're always awesome.
+ HENUMInternalHolder hEnumTypeRefs(pMDI);
+ mdToken tr;
+
+ hEnumTypeRefs.EnumAllInit(mdtTypeRef);
+ while (hEnumTypeRefs.EnumNext(&tr))
+ {
+ Local::CompileMethodsForTypeDefRefSpec(this, tr);
+ }
+
+ //make sure to get the type specs
+ HENUMInternalHolder hEnumTypeSpecs(pMDI);
+ mdToken ts;
+
+ hEnumTypeSpecs.EnumAllInit(mdtTypeSpec);
+ while (hEnumTypeSpecs.EnumNext(&ts))
+ {
+ Local::CompileMethodsForTypeDefRefSpec(this, ts);
+ }
+
+
+ //And now for the interesting generic methods
+ HENUMInternalHolder hEnumMethodSpecs(pMDI);
+ mdToken ms;
+
+ hEnumMethodSpecs.EnumAllInit(mdtMethodSpec);
+ while (hEnumMethodSpecs.EnumNext(&ms))
+ {
+ Local::CompileMethodsForMethodDefRefSpec(this, ms);
+ }
+}
+#endif //_DEBUG && !DACCESS_COMPILE && !CROSS_COMPILE
+
+//-------------------------------------------------------------------------------
+
+// Verify consistency of asmconstants.h
+
+// Wrap all C_ASSERT's in asmconstants.h with a class definition. Many of the
+// fields referenced below are private, and this class is a friend of the
+// enclosing type. (A C_ASSERT isn't a compiler intrinsic, just a magic
+// typedef that produces a compiler error when the condition is false.)
+#include "clrvarargs.h" /* for VARARG C_ASSERTs in asmconstants.h */
+class CheckAsmOffsets
+{
+#define ASMCONSTANTS_C_ASSERT(cond) \
+ typedef char UNIQUE_LABEL(__C_ASSERT__)[(cond) ? 1 : -1];
+#include "asmconstants.h"
+};
+
+//-------------------------------------------------------------------------------
+
+#ifndef DACCESS_COMPILE
+
+void Module::CreateAssemblyRefByNameTable(AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ LoaderHeap * pHeap = GetLoaderAllocator()->GetLowFrequencyHeap();
+ IMDInternalImport * pImport = GetMDImport();
+
+ DWORD dwMaxRid = pImport->GetCountWithTokenKind(mdtAssemblyRef);
+ if (dwMaxRid == 0)
+ return;
+
+ S_SIZE_T dwAllocSize = S_SIZE_T(sizeof(LPWSTR)) * S_SIZE_T(dwMaxRid);
+ m_AssemblyRefByNameTable = (LPCSTR *) pamTracker->Track( pHeap->AllocMem(dwAllocSize) );
+
+ DWORD dwCount = 0;
+ for (DWORD rid=1; rid <= dwMaxRid; rid++)
+ {
+ mdAssemblyRef mdToken = TokenFromRid(rid,mdtAssemblyRef);
+ LPCSTR szName;
+ HRESULT hr;
+
+ hr = pImport->GetAssemblyRefProps(mdToken, NULL, NULL, &szName, NULL, NULL, NULL, NULL);
+
+ if (SUCCEEDED(hr))
+ {
+ m_AssemblyRefByNameTable[dwCount++] = szName;
+ }
+ }
+ m_AssemblyRefByNameCount = dwCount;
+}
+
+bool Module::HasReferenceByName(LPCUTF8 pModuleName)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ for (DWORD i=0; i < m_AssemblyRefByNameCount; i++)
+ {
+ if (0 == strcmp(pModuleName, m_AssemblyRefByNameTable[i]))
+ return true;
+ }
+
+ return false;
+}
+#endif
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif // _MSC_VER: warning C4244
+
+#if defined(_DEBUG) && !defined(DACCESS_COMPILE)
+NOINLINE void NgenForceFailure_AV()
+{
+ LIMITED_METHOD_CONTRACT;
+ static int* alwaysNull = 0;
+ *alwaysNull = 0;
+}
+
+NOINLINE void NgenForceFailure_TypeLoadException()
+{
+ WRAPPER_NO_CONTRACT;
+ ::ThrowTypeLoadException("ForceIBC", "Failure", W("Assembly"), NULL, IDS_CLASSLOAD_BADFORMAT);
+}
+
+void EEConfig::DebugCheckAndForceIBCFailure(BitForMask bitForMask)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ static DWORD s_ibcCheckCount = 0;
+
+ // Both of these must be set to non-zero values for us to force a failure
+ //
+ if ((NgenForceFailureCount() == 0) || (NgenForceFailureKind() == 0))
+ return;
+
+ // The bitForMask value must also beset in the FailureMask
+ //
+ if ((((DWORD) bitForMask) & NgenForceFailureMask()) == 0)
+ return;
+
+ s_ibcCheckCount++;
+ if (s_ibcCheckCount < NgenForceFailureCount())
+ return;
+
+ // We force one failure every NgenForceFailureCount()
+ //
+ s_ibcCheckCount = 0;
+ switch (NgenForceFailureKind())
+ {
+ case 1:
+ NgenForceFailure_TypeLoadException();
+ break;
+ case 2:
+ NgenForceFailure_AV();
+ break;
+ }
+}
+#endif // defined(_DEBUG) && !defined(DACCESS_COMPILE)
+
diff --git a/src/vm/ceeload.h b/src/vm/ceeload.h
new file mode 100644
index 0000000000..1f762b3751
--- /dev/null
+++ b/src/vm/ceeload.h
@@ -0,0 +1,3866 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: CEELOAD.H
+//
+
+//
+// CEELOAD.H defines the class use to represent the PE file
+// ===========================================================================
+
+#ifndef CEELOAD_H_
+#define CEELOAD_H_
+
+#ifndef CLR_STANDALONE_BINDER
+
+#include "common.h"
+#ifdef FEATURE_FUSION
+#include <fusion.h>
+#endif
+#include "vars.hpp" // for LPCUTF8
+#include "hash.h"
+#include "clsload.hpp"
+#include "cgensys.h"
+#include "corsym.h"
+#include "typehandle.h"
+#include "arraylist.h"
+#include "pefile.h"
+#include "typehash.h"
+#include "contractimpl.h"
+#include "bitmask.h"
+#include "instmethhash.h"
+#include "eetwain.h" // For EnumGCRefs (we should probably move that somewhere else, but can't
+ // find anything better (modulo common or vars.hpp)
+#include "classloadlevel.h"
+#include "precode.h"
+#include "corbbtprof.h"
+#include "ilstubcache.h"
+#include "classhash.h"
+
+#ifdef FEATURE_PREJIT
+#include "corcompile.h"
+#include "dataimage.h"
+#include <gcinfodecoder.h>
+#endif // FEATURE_PREJIT
+
+#ifdef FEATURE_COMINTEROP
+#include "winrttypenameconverter.h"
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_READYTORUN
+#include "readytoruninfo.h"
+#endif
+
+#else // CLR_STANDALONE_BINDER
+
+#include "volatile.h"
+#include "crst.h"
+#include "bitmask.h"
+#include "arraylist.h"
+
+#define VPTR(type) type*
+typedef DPTR(class Assembly) PTR_Assembly;
+typedef DPTR(class Binder) PTR_Binder;
+typedef DPTR(class CGrowableStream) PTR_CGrowableStream;
+typedef DPTR(struct DomainLocalModule) PTR_DomainLocalModule;
+typedef DPTR(class EEClassHashTable) PTR_EEClassHashTable;
+typedef DPTR(class EETypeHashTable) PTR_EETypeHashTable;
+typedef DPTR(class FieldDesc) PTR_FieldDesc;
+typedef DPTR(class InstMethodHashTable) PTR_InstMethodHashTable;
+typedef DPTR(class LoaderHeap) PTR_LoaderHeap;
+typedef DPTR(class MethodDesc) PTR_MethodDesc;
+typedef DPTR(class MethodTable) PTR_MethodTable;
+typedef VPTR(class PEFile) PTR_PEFile;
+typedef DPTR(class ProfilingBlobTable) PTR_ProfilingBlobTable;
+typedef DPTR(class TypeVarTypeDesc) PTR_TypeVarTypeDesc;
+template<typename PTR_TYPE> class FixupPointer;
+
+class EEPtrHashTable;
+class ISymUnmanagedReader;
+class NgenStats;
+
+#endif // CLR_STANDALONE_BINDER
+
+
+class PELoader;
+class Stub;
+class MethodDesc;
+class FieldDesc;
+class Crst;
+class IAssemblySecurityDescriptor;
+class ClassConverter;
+class RefClassWriter;
+class ReflectionModule;
+class EEStringData;
+class MethodDescChunk;
+class SigTypeContext;
+class Assembly;
+class BaseDomain;
+class AppDomain;
+class CompilationDomain;
+class DomainModule;
+struct DomainLocalModule;
+class SystemDomain;
+class Module;
+class SString;
+class Pending;
+class MethodTable;
+class AppDomain;
+class DynamicMethodTable;
+struct CerPrepInfo;
+class ModuleSecurityDescriptor;
+#ifdef FEATURE_PREJIT
+class CerNgenRootTable;
+struct MethodContextElement;
+class TypeHandleList;
+class ProfileEmitter;
+class ReJitManager;
+class TrackingMap;
+class PersistentInlineTrackingMap;
+
+
+typedef DPTR(PersistentInlineTrackingMap) PTR_PersistentInlineTrackingMap;
+
+extern VerboseLevel g_CorCompileVerboseLevel;
+#endif // FEATURE_PREJIT
+
+//
+// LookupMaps are used to implement RID maps
+// It is a linked list of nodes, each handling a successive (and consecutive)
+// range of RIDs.
+//
+// LookupMapBase is non-type safe implementation of the worker methods. LookupMap is type
+// safe wrapper around it.
+//
+
+typedef DPTR(struct LookupMapBase) PTR_LookupMapBase;
+
+#ifdef FEATURE_PREJIT
+
+//
+// LookupMap cold entry compression support
+//
+// A lookup map (the cold section) is notionally an array of pointer values indexed by rid. The pointers are
+// generally to data structures such as MethodTables or MethodDescs. When we compress such a table (at ngen
+// time) we wish to avoid direct pointers, since these would need to be fixed up due to image base
+// relocations. Instead we store RVAs (Relative Virtual Addresses). Unlike regular RVAs our base address is
+// the map address itself (as opposed to the module base). We do this purely out of convenience since
+// LookupMaps don't store the module base address.
+//
+// It turns out that very often the value pointers (and hence the value RVAs) are related to each other:
+// adjacent map entries often point to data structures that were allocated next to or close to each other. The
+// compression algorithm takes advantage of this fact: instead of storing value RVAs we store the deltas
+// between RVAs. So the nth value in the table is composed of the addition of the deltas from the preceding (n
+// - 1) entries. Since the deltas are often small (especially when we take structure alignment into account
+// and realize that we can discard the lower 2 or 3 bits of the delta) we can store them in a compressed
+// manner by discarding the insignificant leading zero bits in each value.
+//
+// So now we imagine our compressed table to be a sequence of entries, each entry being a variably sized delta
+// from the previous entry. As a result we need some means to encode how large each delta in the table is. We
+// could use a fixed size field (a 5-bit length field would be able to encode any length between 1 and 32
+// bits, say). This is troublesome since although most entry values are close in value there are a few
+// (usually a minority) that require much larger deltas (hot/cold data splitting based on profiling can cause
+// this for instance). For most tables this would force us to use a large fixed-size length field for every
+// entry, just to deal with the relatively uncommon worst case (5 bits would be enough, but many entry deltas
+// can be encoded in 2 or 3 bits).
+//
+// Instead we utilize a compromise: we store all delta lengths with a small number of bits
+// (kLookupMapLengthBits below). Instead of encoding the length directly this value indexes a per-map table of
+// possible delta encoding lengths. During ngen we calculate the optimal value for each entry in this encoding
+// length table. The advantage here is that it lets us encode both best case and worst case delta lengths with
+// a fixed size but small field. The disadvantage is that some deltas will be encoded with more bits than they
+// strictly need.
+//
+// This still leaves the problem of runtime lookup performance. Touches to the cold section of a LookupMap
+// aren't all that critical (after all the data is meant to be cold), but looking up the last entry of a map
+// with 22 thousand entries (roughly what the MethodDefToDesc map in mscorlib is sized at at the time of
+// writing) is still likely to so inefficient as to be noticeable. Remember that the issue is that we have to
+// decode all predecessor entries in order to compute the value of a given entry in the table.
+//
+// To address this we introduce an index to each compressed map. The index contains an entry for each
+// kLookupMapIndexStride'th entry in the compressed map. The index entry consists of the RVA of the
+// corresponding table value and the bit offset into the compressed map at which the data for the next entry
+// commences. Thus we can use the index to find a value within kLookupMapIndexStride entries of our target and
+// then proceed to decode only the last few compressed entries to finish the job. This reduces the lookup to a
+// constant time operation once more (given a reasonable value for kLookupMapIndexStride).
+//
+// The main areas in which this algorithm can be tuned are the number of bits used as an index into the
+// encoding lengths table (kLookupMapLengthBits) and the frequency with which entries are bookmarked in the
+// index (kLookupMapIndexStride). The current values have been set based on looking at models of mscorlib,
+// PresentationCore and PresentationFramework built from the actual ridmap data in their ngen images and
+// methodically trying different values in order to maximize compression or balance size versus likely runtime
+// performance. An alternative strategy was considered using direct (non-length prefix) encoding of the
+// deltas with a couple of variantions on probability-based variable length encoding (completely unbalanced
+// tree and completely balanced tree with pessimally encoded worst case escapes). But these were found to
+// yield best case results similar to the above but with more complex processing required at ngen (optimal
+// results for these algorithms are achieved when you have enough resources to build a probability map of your
+// entire data).
+//
+// Note that not all lookup tables are suitable for compression. In fact we compress only TypeDefToMethodTable
+// and MethodDefToDesc tables. For one thing this optimization only brings benefits to larger tables. But more
+// importantly we cannot mutate compressed entries (for obvious reasons). Many of the lookup maps are only
+// partially populated at ngen time or otherwise might be updated at runtime and thus are not candidates.
+//
+// In the threshhold timeframe (predicted to be .Net 4.5.3 at the time of writing), we added profiler support
+// for adding new types to NGEN images. Historically we could always do this for jitted images, but one of the
+// blockers for NGEN were the compressed RID maps. We worked around that by supporting multi-node maps in which
+// the first node is compressed, but all future nodes are uncompressed. The NGENed portion will all land in the
+// compressed node, while the new profiler added data will land in the uncompressed portion. Note this could
+// probably be leveraged for other dynamic scenarios such as a limited form of EnC, but nothing further has
+// been implemented at this time.
+//
+
+// Some useful constants used when compressing tables.
+enum {
+ kLookupMapLengthBits = 2, // Bits used to encode an index into a table of possible value lengths
+ kLookupMapLengthEntries = 1 << kLookupMapLengthBits, // Number of entries in the encoding table above
+ kLookupMapIndexStride = 0x10, // The range of table entries covered by one index entry (power of two for faster hash lookup)
+ kBitsPerRVA = sizeof(DWORD) * 8, // Bits in an (uncompressed) table value RVA (RVAs
+ // currently still 32-bit even on 64-bit platforms)
+#ifdef _WIN64
+ kFlagBits = 3, // Number of bits at the bottom of a value
+ // pointer that may be used for flags
+#else // _WIN64
+ kFlagBits = 2,
+#endif // _WIN64
+
+};
+
+#endif // FEATURE_PREJIT
+
+struct LookupMapBase
+{
+ DPTR(LookupMapBase) pNext;
+
+ ArrayDPTR(TADDR) pTable;
+
+ // Number of elements in this node (only RIDs less than this value can be present in this node)
+ DWORD dwCount;
+
+ // Set of flags that the map supports writing on top of the data value
+ TADDR supportedFlags;
+
+#ifdef FEATURE_PREJIT
+ struct HotItem
+ {
+ DWORD rid;
+ TADDR value;
+ static int __cdecl Cmp(const void* a_, const void* b_);
+ };
+ DWORD dwNumHotItems;
+ ArrayDPTR(HotItem) hotItemList;
+ PTR_TADDR FindHotItemValuePtr(DWORD rid);
+
+ //
+ // Compressed map support
+ //
+ PTR_CBYTE pIndex; // Bookmark for every kLookupMapIndexStride'th entry in the table
+ DWORD cIndexEntryBits; // Number of bits in every index entry
+ DWORD cbTable; // Number of bytes of compressed table data at pTable
+ DWORD cbIndex; // Number of bytes of index data at pIndex
+ BYTE rgEncodingLengths[kLookupMapLengthEntries]; // Table of delta encoding lengths for
+ // compressed values
+
+ // Returns true if this map instance is compressed (this can only happen at runtime when running against
+ // an ngen image). Currently and for the forseeable future only TypeDefToMethodTable and MethodDefToDesc
+ // tables can be compressed.
+ bool MapIsCompressed()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return pIndex != NULL;
+ }
+
+protected:
+ // Internal routine used to iterate though one entry in the compressed table.
+ INT32 GetNextCompressedEntry(BitStreamReader *pTableStream, INT32 iLastValue);
+
+public:
+ // Public method used to retrieve the full value (non-RVA) of a compressed table entry.
+ TADDR GetValueFromCompressedMap(DWORD rid);
+
+#ifndef DACCESS_COMPILE
+#ifndef CLR_STANDALONE_BINDER
+ void CreateHotItemList(DataImage *image, CorProfileData *profileData, int table, BOOL fSkipNullEntries = FALSE);
+ void Save(DataImage *image, DataImage::ItemKind kind, CorProfileData *profileData, int table, BOOL fCopyValues = FALSE);
+ void SaveUncompressedMap(DataImage *image, DataImage::ItemKind kind, BOOL fCopyValues = FALSE);
+ void ConvertSavedMapToUncompressed(DataImage *image, DataImage::ItemKind kind);
+ void Fixup(DataImage *image, BOOL fFixupEntries = TRUE);
+#endif // !CLR_STANDALONE_BINDER
+#endif // !DACCESS_COMPILE
+
+#ifdef _DEBUG
+ void CheckConsistentHotItemList();
+#endif
+
+#endif // FEATURE_PREJIT
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags,
+ bool enumThis);
+ void ListEnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif // DACCESS_COMPILE
+
+ PTR_TADDR GetIndexPtr(DWORD index)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+#ifdef FEATURE_PREJIT
+ _ASSERTE(!MapIsCompressed());
+#endif // FEATURE_PREJIT
+ _ASSERTE(index < dwCount);
+ return dac_cast<PTR_TADDR>(pTable) + index;
+ }
+
+ PTR_TADDR GetElementPtr(DWORD rid);
+ PTR_TADDR GrowMap(Module * pModule, DWORD rid);
+
+ // Get number of RIDs that this table can store
+ DWORD GetSize();
+
+#ifdef _DEBUG
+ void DebugGetRidMapOccupancy(DWORD *pdwOccupied, DWORD *pdwSize);
+#endif
+};
+
+#define NO_MAP_FLAGS ((TADDR)0)
+
+template <typename TYPE>
+struct LookupMap : LookupMapBase
+{
+ static TYPE GetValueAt(PTR_TADDR pValue, TADDR* pFlags, TADDR supportedFlags);
+ static void SetValueAt(PTR_TADDR pValue, TYPE value, TADDR flags);
+
+ TYPE GetElement(DWORD rid, TADDR* pFlags);
+ void SetElement(DWORD rid, TYPE value, TADDR flags);
+ BOOL TrySetElement(DWORD rid, TYPE value, TADDR flags);
+ void AddElement(Module * pModule, DWORD rid, TYPE value, TADDR flags);
+ void EnsureElementCanBeStored(Module * pModule, DWORD rid);
+ DWORD Find(TYPE value, TADDR* flags);
+
+
+public:
+
+ //
+ // Retrieve the value associated with a rid
+ //
+ TYPE GetElement(DWORD rid)
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ return GetElement(rid, NULL);
+ }
+
+ TYPE GetElementAndFlags(DWORD rid, TADDR* pFlags)
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(pFlags != NULL);
+
+ return GetElement(rid, pFlags);
+ }
+
+ //
+ // Stores an association in a map that has been previously grown to
+ // the required size. Will never throw or fail.
+ //
+ void SetElement(DWORD rid, TYPE value)
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ SetElement(rid, value, 0);
+ }
+
+ void SetElementWithFlags(DWORD rid, TYPE value, TADDR flags)
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ // Validate flags: that they are in the predefined range and that the range does not collide with value
+ _ASSERTE((flags & supportedFlags) == flags);
+ _ASSERTE((dac_cast<TADDR>(value) & supportedFlags) == 0);
+
+ SetElement(rid, value, flags);
+ }
+
+ void AddFlag(DWORD rid, TADDR flag)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE((flag & supportedFlags) == flag);
+ _ASSERTE(!MapIsCompressed());
+ _ASSERTE(dwNumHotItems == 0);
+
+ PTR_TADDR pElement = GetElementPtr(rid);
+ _ASSERTE(pElement);
+
+ if (!pElement)
+ {
+ return;
+ }
+
+ TADDR existingFlags;
+ TYPE existingValue = GetValueAt(pElement, &existingFlags, supportedFlags);
+ SetValueAt(pElement, existingValue, existingFlags | flag);
+ }
+
+ //
+ // Try to store an association in a map. Will never throw or fail.
+ //
+ BOOL TrySetElement(DWORD rid, TYPE value)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return TrySetElement(rid, value, 0);
+ }
+
+ BOOL TrySetElementWithFlags(DWORD rid, TYPE value, TADDR flags)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // Validate flags: that they are in the predefined range and that the range does not collide with value
+ _ASSERTE((flags & supportedFlags) == flags);
+ _ASSERTE((dac_cast<TADDR>(value) & supportedFlags) == 0);
+
+ return TrySetElement(rid, value, flags);
+ }
+
+ //
+ // Stores an association in a map. Grows the map as necessary.
+ //
+ void AddElement(Module * pModule, DWORD rid, TYPE value)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ AddElement(pModule, rid, value, 0);
+ }
+
+ void AddElementWithFlags(Module * pModule, DWORD rid, TYPE value, TADDR flags)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // Validate flags: that they are in the predefined range and that the range does not collide with value
+ _ASSERTE((flags & supportedFlags) == flags);
+ _ASSERTE((dac_cast<TADDR>(value) & supportedFlags) == 0);
+
+ AddElement(pModule, rid, value, flags);
+ }
+
+ //
+ // Find the given value in the table and return its RID
+ //
+ DWORD Find(TYPE value)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return Find(value, NULL);
+ }
+
+ DWORD FindWithFlags(TYPE value, TADDR flags)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // Validate flags: that they are in the predefined range and that the range does not collide with value
+ _ASSERTE((flags & supportedFlags) == flags);
+ _ASSERTE((dac_cast<TADDR>(value) & supportedFlags) == 0);
+
+ return Find(value, &flags);
+ }
+
+ class Iterator
+ {
+ public:
+ Iterator(LookupMap* map);
+
+ BOOL Next();
+
+ TYPE GetElement()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ return GetElement(NULL);
+ }
+
+ TYPE GetElementAndFlags(TADDR* pFlags)
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ return GetElement(pFlags);
+ }
+
+ private:
+ TYPE GetElement(TADDR* pFlags);
+
+ LookupMap* m_map;
+ DWORD m_index;
+#ifdef FEATURE_PREJIT
+ // Support for iterating compressed maps.
+ INT32 m_currentEntry; // RVA of current entry value
+ BitStreamReader m_tableStream; // Our current context in the compressed bit stream
+#endif // FEATURE_PREJIT
+ };
+};
+
+// Place holder types for RID maps that store cross-module references
+
+#ifndef CLR_STANDALONE_BINDER
+
+class TypeRef { };
+typedef DPTR(class TypeRef) PTR_TypeRef;
+
+class MemberRef { };
+typedef DPTR(class MemberRef) PTR_MemberRef;
+
+#else // CLR_STANDALONE_BINDER
+
+struct TypeRef;
+typedef DPTR(struct TypeRef) PTR_TypeRef;
+
+struct MemberRef;
+typedef DPTR(struct MemberRef) PTR_MemberRef;
+
+#endif // CLR_STANDALONE_BINDER
+
+// flag used to mark member ref pointers to field descriptors in the member ref cache
+#define IS_FIELD_MEMBER_REF ((TADDR)0x00000002)
+
+
+//
+// NGen image layout information that we need to quickly access at runtime
+//
+typedef DPTR(struct NGenLayoutInfo) PTR_NGenLayoutInfo;
+struct NGenLayoutInfo
+{
+ // One range for each hot, unprofiled, cold code sections
+ MemoryRange m_CodeSections[3];
+
+ // Pointer to the RUNTIME_FUNCTION table for hot, unprofiled, and cold code sections.
+ PTR_RUNTIME_FUNCTION m_pRuntimeFunctions[3];
+
+ // Number of RUNTIME_FUNCTIONs for hot, unprofiled, and cold code sections.
+ DWORD m_nRuntimeFunctions[3];
+
+ // A parallel arrays of MethodDesc RVAs for hot and unprofiled methods. Both of the array are parallel for m_pRuntimeFunctions
+ // The first array is for hot methods. The second array is for unprofiled methods.
+ PTR_DWORD m_MethodDescs[2];
+
+ // Lookup table to speed up RUNTIME_FUNCTION lookup.
+ // The first array is for hot methods. The second array is for unprofiled methods.
+ // Number of elements is m_UnwindInfoLookupTableEntryCount + 1.
+ // Last element of the lookup table is a sentinal entry that's good to cover the rest of the code section.
+ // Values are indices into m_pRuntimeFunctions array.
+ PTR_DWORD m_UnwindInfoLookupTable[2];
+
+ // Count of lookup entries in m_UnwindInfoLookupTable
+ DWORD m_UnwindInfoLookupTableEntryCount[2];
+
+ // Map for matching the cold code with hot code. Index is relative position of RUNTIME_FUNCTION within the section.
+ PTR_CORCOMPILE_COLD_METHOD_ENTRY m_ColdCodeMap;
+
+ // One range for each hot, cold, write, hot writeable, and cold writeable precode sections
+ MemoryRange m_Precodes[4];
+
+ MemoryRange m_JumpStubs;
+ MemoryRange m_StubLinkStubs;
+ MemoryRange m_VirtualMethodThunks;
+ MemoryRange m_ExternalMethodThunks;
+ MemoryRange m_ExceptionInfoLookupTable;
+
+ PCODE m_pPrestubJumpStub;
+#ifdef HAS_FIXUP_PRECODE
+ PCODE m_pPrecodeFixupJumpStub;
+#endif
+ PCODE m_pVirtualImportFixupJumpStub;
+ PCODE m_pExternalMethodFixupJumpStub;
+ DWORD m_rvaFilterPersonalityRoutine;
+};
+
+#ifndef CLR_STANDALONE_BINDER
+
+//
+// VASigCookies are allocated to encapsulate a varargs call signature.
+// A reference to the cookie is embedded in the code stream. Cookies
+// are shared amongst call sites with identical signatures in the same
+// module
+//
+
+typedef DPTR(struct VASigCookie) PTR_VASigCookie;
+typedef DPTR(PTR_VASigCookie) PTR_PTR_VASigCookie;
+struct VASigCookie
+{
+ // The JIT wants knows that the size of the arguments comes first
+ // so please keep this field first
+ unsigned sizeOfArgs; // size of argument list
+ Volatile<PCODE> pNDirectILStub; // will be use if target is NDirect (tag == 0)
+ PTR_Module pModule;
+ Signature signature;
+};
+
+//
+// VASigCookies are allocated in VASigCookieBlocks to amortize
+// allocation cost and allow proper bookkeeping.
+//
+
+struct VASigCookieBlock
+{
+ enum {
+#ifdef _DEBUG
+ kVASigCookieBlockSize = 2
+#else // !_DEBUG
+ kVASigCookieBlockSize = 20
+#endif // !_DEBUG
+ };
+
+ VASigCookieBlock *m_Next;
+ UINT m_numcookies;
+ VASigCookie m_cookies[kVASigCookieBlockSize];
+};
+
+#else // CLR_STANDALONE_BINDER
+
+struct VASigCookieBlock;
+
+#endif // CLR_STANDALONE_BINDER
+
+// This lookup table persists the information about boxed statics into the ngen'ed image
+// which allows one to the type static initialization without touching expensive EEClasses. Note
+// that since the persisted info is stored at ngen time as opposed to class layout time,
+// in jitted scenarios we would still touch EEClasses. This imples that the variables which store
+// this info in the EEClasses are still present.
+
+// We used this table to store more data require to run cctors in the past (it explains the name),
+// but we are only using it for boxed statics now. Boxed statics are rare. The complexity may not
+// be worth the gains. We should consider removing this cache and avoid the complexity.
+
+typedef DPTR(struct ClassCtorInfoEntry) PTR_ClassCtorInfoEntry;
+struct ClassCtorInfoEntry
+{
+ DWORD firstBoxedStaticOffset;
+ DWORD firstBoxedStaticMTIndex;
+ WORD numBoxedStatics;
+ WORD hasFixedAddressVTStatics; // This is WORD avoid padding in the datastructure. It is really bool.
+};
+
+#define MODULE_CTOR_ELEMENTS 256
+struct ModuleCtorInfo
+{
+ DWORD numElements;
+ DWORD numLastAllocated;
+ DWORD numElementsHot;
+ DPTR(PTR_MethodTable) ppMT; // size is numElements
+ PTR_ClassCtorInfoEntry cctorInfoHot; // size is numElementsHot
+ PTR_ClassCtorInfoEntry cctorInfoCold; // size is numElements-numElementsHot
+
+ PTR_DWORD hotHashOffsets; // Indices to the start of each "hash region" in the hot part of the ppMT array.
+ PTR_DWORD coldHashOffsets; // Indices to the start of each "hash region" in the cold part of the ppMT array.
+ DWORD numHotHashes;
+ DWORD numColdHashes;
+
+ ArrayDPTR(FixupPointer<PTR_MethodTable>) ppHotGCStaticsMTs; // hot table
+ ArrayDPTR(FixupPointer<PTR_MethodTable>) ppColdGCStaticsMTs; // cold table
+
+ DWORD numHotGCStaticsMTs;
+ DWORD numColdGCStaticsMTs;
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+ typedef enum {HOT, COLD} REGION;
+ FORCEINLINE DWORD GenerateHash(PTR_MethodTable pMT, REGION region)
+ {
+ SUPPORTS_DAC;
+
+ DWORD tmp1 = pMT->GetTypeDefRid();
+ DWORD tmp2 = pMT->GetNumVirtuals();
+ DWORD tmp3 = pMT->GetNumInterfaces();
+
+ tmp1 = (tmp1 << 7) + (tmp1 << 0); // 10000001
+ tmp2 = (tmp2 << 6) + (tmp2 << 1); // 01000010
+ tmp3 = (tmp3 << 4) + (tmp3 << 3); // 00011000
+
+ tmp1 ^= (tmp1 >> 4); // 10001001 0001
+ tmp2 ^= (tmp2 >> 4); // 01000110 0010
+ tmp3 ^= (tmp3 >> 4); // 00011001 1000
+
+ DWORD hashVal = tmp1 + tmp2 + tmp3;
+
+ if (region == HOT)
+ hashVal &= (numHotHashes - 1); // numHotHashes is required to be a power of two
+ else
+ hashVal &= (numColdHashes - 1); // numColdHashes is required to be a power of two
+
+ return hashVal;
+ };
+
+#ifndef CLR_STANDALONE_BINDER
+ ArrayDPTR(FixupPointer<PTR_MethodTable>) GetGCStaticMTs(DWORD index);
+
+#ifdef FEATURE_PREJIT
+
+ void AddElement(MethodTable *pMethodTable);
+ void Save(DataImage *image, CorProfileData *profileData);
+ void Fixup(DataImage *image);
+
+ class ClassCtorInfoEntryArraySort : public CQuickSort<DWORD>
+ {
+ private:
+ PTR_MethodTable *m_pBase1;
+
+ public:
+ //Constructor
+ ClassCtorInfoEntryArraySort(DWORD *base, PTR_MethodTable *base1, int count)
+ : CQuickSort<DWORD>(base, count)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ m_pBase1 = base1;
+ }
+
+ //Returns -1,0,or 1 if first's nativeStartOffset is less than, equal to, or greater than second's
+ FORCEINLINE int Compare(DWORD *first, DWORD *second)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (*first < *second)
+ return -1;
+ else if (*first == *second)
+ return 0;
+ else
+ return 1;
+ }
+
+ // Swap is overwriten so that we can sort both the MethodTable pointer
+ // array and the ClassCtorInfoEntry array in parrallel.
+ FORCEINLINE void Swap(SSIZE_T iFirst, SSIZE_T iSecond)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ DWORD sTemp;
+ PTR_MethodTable sTemp1;
+
+ if (iFirst == iSecond) return;
+
+ sTemp = m_pBase[iFirst];
+ m_pBase[iFirst] = m_pBase[iSecond];
+ m_pBase[iSecond] = sTemp;
+
+ sTemp1 = m_pBase1[iFirst];
+ m_pBase1[iFirst] = m_pBase1[iSecond];
+ m_pBase1[iSecond] = sTemp1;
+ }
+ };
+#endif // FEATURE_PREJIT
+#endif // CLR_STANDALONE_BINDER
+};
+
+
+
+#if defined(FEATURE_PREJIT) && !defined(CLR_STANDALONE_BINDER)
+
+// For IBC Profiling we collect signature blobs for instantiated types.
+// For such instantiated types and methods we create our own ibc token
+//
+// For instantiated types, there also may be no corresponding type token
+// or method token for the instantiated types or method in our module.
+// For these cases we create our own ibc token definition that is used
+// to refer to these external types and methods. We have to handle
+// external nested types and namespaces and method signatures.
+//
+// ParamTypeSpec = 4, // Instantiated Type Signature
+// ParamMethodSpec = 5, // Instantiated Method Signature
+// ExternalNamespaceDef = 6, // External Namespace Token Definition
+// ExternalTypeDef = 7, // External Type Token Definition
+// ExternalSignatureDef = 8, // External Signature Definition
+// ExternalMethodDef = 9, // External Method Token Definition
+//
+// typedef DPTR(class ProfilingBlobEntry) PTR_ProfilingBlobEntry;
+class ProfilingBlobEntry
+{
+public:
+ virtual ~ProfilingBlobEntry() { LIMITED_METHOD_CONTRACT; };
+ virtual bool IsEqual(const ProfilingBlobEntry * other) const = 0; // Pure Virtual
+ virtual size_t Hash() const = 0;
+ virtual BlobType kind() const = 0;
+ virtual size_t varSize() const = 0;
+ virtual void newToken() = 0;
+ mdToken token() const { LIMITED_METHOD_CONTRACT; return m_token; }
+
+protected:
+ mdToken m_token;
+};
+
+class TypeSpecBlobEntry : public ProfilingBlobEntry
+{
+public:
+ TypeSpecBlobEntry(DWORD _cbSig, PCCOR_SIGNATURE _pSig);
+
+ virtual ~TypeSpecBlobEntry() { LIMITED_METHOD_CONTRACT; delete [] m_pSig; }
+ virtual BlobType kind() const { LIMITED_METHOD_CONTRACT; return ParamTypeSpec; }
+ virtual size_t varSize() const { LIMITED_METHOD_CONTRACT; return sizeof(COR_SIGNATURE) * m_cbSig; }
+ virtual void newToken() { LIMITED_METHOD_CONTRACT; m_token = ++s_lastTypeSpecToken; }
+ DWORD flags() const { LIMITED_METHOD_CONTRACT; return m_flags; }
+ DWORD cbSig() const { LIMITED_METHOD_CONTRACT; return m_cbSig; }
+ PCCOR_SIGNATURE pSig() const { LIMITED_METHOD_CONTRACT; return m_pSig; }
+ void orFlag(DWORD flag) { LIMITED_METHOD_CONTRACT; m_flags |= flag; }
+ static size_t HashInit() { LIMITED_METHOD_CONTRACT; return 156437; }
+
+ virtual bool IsEqual(const ProfilingBlobEntry * other) const;
+ virtual size_t Hash() const;
+
+ static const TypeSpecBlobEntry * FindOrAdd(PTR_Module pModule,
+ DWORD _cbSig,
+ PCCOR_SIGNATURE _pSig);
+
+private:
+ DWORD m_flags;
+ DWORD m_cbSig;
+ PCCOR_SIGNATURE m_pSig;
+
+ static idTypeSpec s_lastTypeSpecToken;
+};
+
+class MethodSpecBlobEntry : public ProfilingBlobEntry
+{
+public:
+ MethodSpecBlobEntry(DWORD _cbSig, PCCOR_SIGNATURE _pSig);
+
+ virtual ~MethodSpecBlobEntry() { LIMITED_METHOD_CONTRACT; delete [] m_pSig; }
+ virtual BlobType kind() const { LIMITED_METHOD_CONTRACT; return ParamMethodSpec; }
+ virtual size_t varSize() const { LIMITED_METHOD_CONTRACT; return sizeof(COR_SIGNATURE) * m_cbSig; }
+ virtual void newToken() { LIMITED_METHOD_CONTRACT; m_token = ++s_lastMethodSpecToken; }
+ DWORD flags() const { LIMITED_METHOD_CONTRACT; return m_flags; }
+ DWORD cbSig() const { LIMITED_METHOD_CONTRACT; return m_cbSig; }
+ PCCOR_SIGNATURE pSig() const { LIMITED_METHOD_CONTRACT; return m_pSig; }
+ void orFlag(DWORD flag) { LIMITED_METHOD_CONTRACT; m_flags |= flag; }
+ static size_t HashInit() { LIMITED_METHOD_CONTRACT; return 187751; }
+
+ virtual bool IsEqual(const ProfilingBlobEntry * other) const;
+ virtual size_t Hash() const;
+
+ static const MethodSpecBlobEntry * FindOrAdd(PTR_Module pModule,
+ DWORD _cbSig,
+ PCCOR_SIGNATURE _pSig);
+
+private:
+ DWORD m_flags;
+ DWORD m_cbSig;
+ PCCOR_SIGNATURE m_pSig;
+
+ static idTypeSpec s_lastMethodSpecToken;
+};
+
+class ExternalNamespaceBlobEntry : public ProfilingBlobEntry
+{
+public:
+ ExternalNamespaceBlobEntry(LPCSTR _pName);
+
+ virtual ~ExternalNamespaceBlobEntry() { LIMITED_METHOD_CONTRACT; delete [] m_pName; }
+ virtual BlobType kind() const { LIMITED_METHOD_CONTRACT; return ExternalNamespaceDef; }
+ virtual size_t varSize() const { LIMITED_METHOD_CONTRACT; return sizeof(CHAR) * m_cbName; }
+ virtual void newToken() { LIMITED_METHOD_CONTRACT; m_token = ++s_lastExternalNamespaceToken; }
+ DWORD cbName() const { LIMITED_METHOD_CONTRACT; return m_cbName; }
+ LPCSTR pName() const { LIMITED_METHOD_CONTRACT; return m_pName; }
+ static size_t HashInit() { LIMITED_METHOD_CONTRACT; return 225307; }
+
+ virtual bool IsEqual(const ProfilingBlobEntry * other) const;
+ virtual size_t Hash() const;
+
+ static const ExternalNamespaceBlobEntry * FindOrAdd(PTR_Module pModule, LPCSTR _pName);
+
+private:
+ DWORD m_cbName;
+ LPCSTR m_pName;
+
+ static idExternalNamespace s_lastExternalNamespaceToken;
+};
+
+class ExternalTypeBlobEntry : public ProfilingBlobEntry
+{
+public:
+ ExternalTypeBlobEntry(mdToken _assemblyRef, mdToken _nestedClass,
+ mdToken _nameSpace, LPCSTR _pName);
+
+ virtual ~ExternalTypeBlobEntry() { LIMITED_METHOD_CONTRACT; delete [] m_pName; }
+ virtual BlobType kind() const { LIMITED_METHOD_CONTRACT; return ExternalTypeDef; }
+ virtual size_t varSize() const { LIMITED_METHOD_CONTRACT; return sizeof(CHAR) * m_cbName; }
+ virtual void newToken() { LIMITED_METHOD_CONTRACT; m_token = ++s_lastExternalTypeToken; }
+ mdToken assemblyRef() const { LIMITED_METHOD_CONTRACT; return m_assemblyRef; }
+ mdToken nestedClass() const { LIMITED_METHOD_CONTRACT; return m_nestedClass; }
+ mdToken nameSpace() const { LIMITED_METHOD_CONTRACT; return m_nameSpace; }
+ DWORD cbName() const { LIMITED_METHOD_CONTRACT; return m_cbName; }
+ LPCSTR pName() const { LIMITED_METHOD_CONTRACT; return m_pName; }
+ static size_t HashInit() { LIMITED_METHOD_CONTRACT; return 270371; }
+
+ virtual bool IsEqual(const ProfilingBlobEntry * other) const;
+ virtual size_t Hash() const;
+
+ static const ExternalTypeBlobEntry * FindOrAdd(PTR_Module pModule,
+ mdToken _assemblyRef,
+ mdToken _nestedClass,
+ mdToken _nameSpace,
+ LPCSTR _pName);
+
+private:
+ mdToken m_assemblyRef;
+ mdToken m_nestedClass;
+ mdToken m_nameSpace;
+ DWORD m_cbName;
+ LPCSTR m_pName;
+
+ static idExternalType s_lastExternalTypeToken;
+};
+
+class ExternalSignatureBlobEntry : public ProfilingBlobEntry
+{
+public:
+ ExternalSignatureBlobEntry(DWORD _cbSig, PCCOR_SIGNATURE _pSig);
+
+ virtual ~ExternalSignatureBlobEntry() { LIMITED_METHOD_CONTRACT; delete [] m_pSig; }
+ virtual BlobType kind() const { LIMITED_METHOD_CONTRACT; return ExternalSignatureDef; }
+ virtual size_t varSize() const { LIMITED_METHOD_CONTRACT; return sizeof(COR_SIGNATURE) * m_cbSig; }
+ virtual void newToken() { LIMITED_METHOD_CONTRACT; m_token = ++s_lastExternalSignatureToken; }
+ DWORD cbSig() const { LIMITED_METHOD_CONTRACT; return m_cbSig; }
+ PCCOR_SIGNATURE pSig() const { LIMITED_METHOD_CONTRACT; return m_pSig; }
+ static size_t HashInit() { LIMITED_METHOD_CONTRACT; return 324449; }
+
+ virtual bool IsEqual(const ProfilingBlobEntry * other) const;
+ virtual size_t Hash() const;
+
+ static const ExternalSignatureBlobEntry * FindOrAdd(PTR_Module pModule,
+ DWORD _cbSig,
+ PCCOR_SIGNATURE _pSig);
+
+private:
+ DWORD m_cbSig;
+ PCCOR_SIGNATURE m_pSig;
+
+ static idExternalSignature s_lastExternalSignatureToken;
+};
+
+class ExternalMethodBlobEntry : public ProfilingBlobEntry
+{
+public:
+ ExternalMethodBlobEntry(mdToken _nestedClass, mdToken _signature, LPCSTR _pName);
+
+ virtual ~ExternalMethodBlobEntry() { LIMITED_METHOD_CONTRACT; delete [] m_pName; }
+ virtual BlobType kind() const { LIMITED_METHOD_CONTRACT; return ExternalMethodDef; }
+ virtual size_t varSize() const { LIMITED_METHOD_CONTRACT; return sizeof(CHAR) * m_cbName; }
+ virtual void newToken() { LIMITED_METHOD_CONTRACT; m_token = ++s_lastExternalMethodToken; }
+ mdToken nestedClass() const { LIMITED_METHOD_CONTRACT; return m_nestedClass; }
+ mdToken signature() const { LIMITED_METHOD_CONTRACT; return m_signature; }
+ DWORD cbName() const { LIMITED_METHOD_CONTRACT; return m_cbName; }
+ LPCSTR pName() const { LIMITED_METHOD_CONTRACT; return m_pName; }
+ static size_t HashInit() { LIMITED_METHOD_CONTRACT; return 389357; }
+
+ virtual bool IsEqual(const ProfilingBlobEntry * other) const;
+ virtual size_t Hash() const;
+
+ static const ExternalMethodBlobEntry * FindOrAdd(PTR_Module pModule,
+ mdToken _nestedClass,
+ mdToken _signature,
+ LPCSTR _pName);
+
+private:
+ mdToken m_nestedClass;
+ mdToken m_signature;
+ DWORD m_cbName;
+ LPCSTR m_pName;
+
+ static idExternalMethod s_lastExternalMethodToken;
+};
+
+struct IbcNameHandle
+{
+ mdToken tkIbcNameSpace;
+ mdToken tkIbcNestedClass;
+
+ LPCSTR szName;
+ LPCSTR szNamespace;
+ mdToken tkEnclosingClass;
+};
+
+//
+// Hashtable of ProfilingBlobEntry *
+//
+class ProfilingBlobTraits : public NoRemoveSHashTraits<DefaultSHashTraits<ProfilingBlobEntry *> >
+{
+public:
+ typedef ProfilingBlobEntry * key_t;
+
+ static key_t GetKey(element_t e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return e;
+ }
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return k1->IsEqual(k2);
+ }
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (count_t) k->Hash();
+ }
+ static const element_t Null()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return NULL;
+ }
+
+ static bool IsNull(const element_t &e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (e == NULL);
+ }
+};
+
+typedef SHash<ProfilingBlobTraits> ProfilingBlobTable;
+typedef DPTR(ProfilingBlobTable) PTR_ProfilingBlobTable;
+
+
+#define METHODTABLE_RESTORE_REASON() \
+ RESTORE_REASON_FUNC(CanNotPreRestoreHardBindToParentMethodTable) \
+ RESTORE_REASON_FUNC(CanNotPreRestoreHardBindToCanonicalMethodTable) \
+ RESTORE_REASON_FUNC(CrossModuleNonCanonicalMethodTable) \
+ RESTORE_REASON_FUNC(CanNotHardBindToInstanceMethodTableChain) \
+ RESTORE_REASON_FUNC(GenericsDictionaryNeedsRestore) \
+ RESTORE_REASON_FUNC(InterfaceIsGeneric) \
+ RESTORE_REASON_FUNC(CrossModuleGenericsStatics) \
+ RESTORE_REASON_FUNC(ComImportStructDependenciesNeedRestore) \
+ RESTORE_REASON_FUNC(CrossAssembly) \
+ RESTORE_REASON_FUNC(ArrayElement) \
+ RESTORE_REASON_FUNC(ProfilingEnabled)
+
+#undef RESTORE_REASON_FUNC
+#define RESTORE_REASON_FUNC(s) s ,
+typedef enum
+{
+
+ METHODTABLE_RESTORE_REASON()
+
+ TotalMethodTables
+} MethodTableRestoreReason;
+#undef RESTORE_REASON_FUNC
+
+class NgenStats
+{
+public:
+ NgenStats()
+ {
+ LIMITED_METHOD_CONTRACT;
+ memset (MethodTableRestoreNumReasons, 0, sizeof(DWORD)*(TotalMethodTables+1));
+ }
+
+ DWORD MethodTableRestoreNumReasons[TotalMethodTables + 1];
+};
+#endif // FEATURE_PREJIT && !CLR_STANDALONE_BINDER
+
+//
+// A Module is the primary unit of code packaging in the runtime. It
+// corresponds mostly to an OS executable image, although other kinds
+// of modules exist.
+//
+class UMEntryThunk;
+
+// Hashtable of absolute addresses of IL blobs for dynamics, keyed by token
+
+ struct DynamicILBlobEntry
+{
+ mdToken m_methodToken;
+ TADDR m_il;
+};
+
+class DynamicILBlobTraits : public NoRemoveSHashTraits<DefaultSHashTraits<DynamicILBlobEntry> >
+{
+public:
+ typedef mdToken key_t;
+
+ static key_t GetKey(element_t e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return e.m_methodToken;
+ }
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return k1 == k2;
+ }
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return (count_t)(size_t)k;
+ }
+ static const element_t Null()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ DynamicILBlobEntry e;
+ e.m_il = TADDR(0);
+ e.m_methodToken = 0;
+ return e;
+ }
+ static bool IsNull(const element_t &e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return e.m_methodToken == 0;
+ }
+};
+
+typedef SHash<DynamicILBlobTraits> DynamicILBlobTable;
+typedef DPTR(DynamicILBlobTable) PTR_DynamicILBlobTable;
+
+
+#ifndef CLR_STANDALONE_BINDER
+
+// declare an array type of COR_IL_MAP entries
+typedef ArrayDPTR(COR_IL_MAP) ARRAY_PTR_COR_IL_MAP;
+
+//---------------------------------------------------------------------------------------
+//
+// A profiler may instrument a method by changing the IL. This is typically done when the profiler receives
+// a JITCompilationStarted notification. The profiler also has the option to provide the runtime with
+// a mapping between original IL offsets and instrumented IL offsets. This struct is a simple container
+// for storing the mapping information. We store the mapping information on the Module class, where it can
+// be accessed by the debugger from out-of-process.
+//
+
+class InstrumentedILOffsetMapping
+{
+public:
+ InstrumentedILOffsetMapping();
+
+ // Check whether there is any mapping information stored in this object.
+ BOOL IsNull();
+
+#if !defined(DACCESS_COMPILE)
+ // Release the memory used by the array of COR_IL_MAPs.
+ void Clear();
+
+ void SetMappingInfo(SIZE_T cMap, COR_IL_MAP * rgMap);
+#endif // !DACCESS_COMPILE
+
+ SIZE_T GetCount() const;
+ ARRAY_PTR_COR_IL_MAP GetOffsets() const;
+
+private:
+ SIZE_T m_cMap; // the number of elements in m_rgMap
+ ARRAY_PTR_COR_IL_MAP m_rgMap; // an array of COR_IL_MAPs
+};
+
+//---------------------------------------------------------------------------------------
+//
+// Hash table entry for storing InstrumentedILOffsetMapping. This is keyed by the MethodDef token.
+//
+
+struct ILOffsetMappingEntry
+{
+ ILOffsetMappingEntry()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ m_methodToken = mdMethodDefNil;
+ // No need to initialize m_mapping. The default ctor of InstrumentedILOffsetMapping does the job.
+ }
+
+ ILOffsetMappingEntry(mdMethodDef token, InstrumentedILOffsetMapping mapping)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ m_methodToken = token;
+ m_mapping = mapping;
+ }
+
+ mdMethodDef m_methodToken;
+ InstrumentedILOffsetMapping m_mapping;
+};
+
+//---------------------------------------------------------------------------------------
+//
+// This class is used to create the hash table for the instrumented IL offset mapping.
+// It encapsulates the desired behaviour of the templated hash table and implements
+// the various functions needed by the hash table.
+//
+
+class ILOffsetMappingTraits : public NoRemoveSHashTraits<DefaultSHashTraits<ILOffsetMappingEntry> >
+{
+public:
+ typedef mdMethodDef key_t;
+
+ static key_t GetKey(element_t e)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return e.m_methodToken;
+ }
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (k1 == k2);
+ }
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (count_t)(size_t)k;
+ }
+ static const element_t Null()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ ILOffsetMappingEntry e;
+ return e;
+ }
+ static bool IsNull(const element_t &e) { LIMITED_METHOD_DAC_CONTRACT; return e.m_methodToken == mdMethodDefNil; }
+};
+
+#endif // CLR_STANDALONE_BINDER
+
+// ESymbolFormat specified the format used by a symbol stream
+typedef enum
+{
+ eSymbolFormatNone, /* symbol format to use not yet determined */
+ eSymbolFormatPDB, /* PDB format from diasymreader.dll - only safe for trusted scenarios */
+ eSymbolFormatILDB /* ILDB format from ildbsymbols.dll */
+}ESymbolFormat;
+
+#ifdef CLR_STANDALONE_BINDER
+class ILOffsetMappingTraits;
+#endif
+
+// Hash table of profiler-provided instrumented IL offset mapping, keyed by the MethodDef token
+typedef SHash<ILOffsetMappingTraits> ILOffsetMappingTable;
+typedef DPTR(ILOffsetMappingTable) PTR_ILOffsetMappingTable;
+
+
+#ifdef FEATURE_COMINTEROP
+
+//---------------------------------------------------------------------------------------
+//
+// The type of each entry in the Guid to MT hash
+//
+typedef DPTR(GUID) PTR_GUID;
+typedef DPTR(struct GuidToMethodTableEntry) PTR_GuidToMethodTableEntry;
+struct GuidToMethodTableEntry
+{
+ PTR_GUID m_Guid;
+ PTR_MethodTable m_pMT;
+};
+
+//---------------------------------------------------------------------------------------
+//
+// The hash type itself
+//
+typedef DPTR(class GuidToMethodTableHashTable) PTR_GuidToMethodTableHashTable;
+class GuidToMethodTableHashTable : public NgenHashTable<GuidToMethodTableHashTable, GuidToMethodTableEntry, 4>
+{
+public:
+ typedef NgenHashTable<GuidToMethodTableHashTable, GuidToMethodTableEntry, 4> Base_t;
+ friend class Base_t;
+
+#ifndef DACCESS_COMPILE
+
+private:
+ GuidToMethodTableHashTable(Module *pModule, LoaderHeap *pHeap, DWORD cInitialBuckets)
+ : Base_t(pModule, pHeap, cInitialBuckets)
+ { LIMITED_METHOD_CONTRACT; }
+
+public:
+ static GuidToMethodTableHashTable* Create(Module* pModule, DWORD cInitialBuckets, AllocMemTracker *pamTracker);
+
+ GuidToMethodTableEntry * InsertValue(PTR_GUID pGuid, PTR_MethodTable pMT, BOOL bReplaceIfFound, AllocMemTracker *pamTracker);
+
+#endif // !DACCESS_COMPILE
+
+public:
+ typedef Base_t::LookupContext LookupContext;
+
+ PTR_MethodTable GetValue(const GUID * pGuid, LookupContext *pContext);
+ GuidToMethodTableEntry * FindItem(const GUID * pGuid, LookupContext *pContext);
+
+private:
+ BOOL CompareKeys(PTR_GuidToMethodTableEntry pEntry, const GUID * pGuid);
+ static DWORD Hash(const GUID * pGuid);
+
+public:
+ // An iterator for the table
+ struct Iterator
+ {
+ public:
+ Iterator() : m_pTable(NULL), m_fIterating(false)
+ { LIMITED_METHOD_DAC_CONTRACT; }
+ Iterator(GuidToMethodTableHashTable * pTable) : m_pTable(pTable), m_fIterating(false)
+ { LIMITED_METHOD_DAC_CONTRACT; }
+
+ private:
+ friend class GuidToMethodTableHashTable;
+
+ GuidToMethodTableHashTable * m_pTable;
+ BaseIterator m_sIterator;
+ bool m_fIterating;
+ };
+
+ BOOL FindNext(Iterator *it, GuidToMethodTableEntry **ppEntry);
+ DWORD GetCount();
+
+#ifdef DACCESS_COMPILE
+ // do not save this in mini-/heap-dumps
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+ { SUPPORTS_DAC; }
+ void EnumMemoryRegionsForEntry(GuidToMethodTableEntry *pEntry, CLRDataEnumMemoryFlags flags)
+ { SUPPORTS_DAC; }
+#endif // DACCESS_COMPILE
+
+#if defined(FEATURE_PREJIT) && !defined(DACCESS_COMPILE)
+
+public:
+ void Save(DataImage *pImage, CorProfileData *pProfileData);
+ void Fixup(DataImage *pImage);
+
+private:
+ // We save all entries
+ bool ShouldSave(DataImage *pImage, GuidToMethodTableEntry *pEntry)
+ { LIMITED_METHOD_CONTRACT; return true; }
+
+ bool IsHotEntry(GuidToMethodTableEntry *pEntry, CorProfileData *pProfileData)
+ { LIMITED_METHOD_CONTRACT; return false; }
+
+ bool SaveEntry(DataImage *pImage, CorProfileData *pProfileData,
+ GuidToMethodTableEntry *pOldEntry, GuidToMethodTableEntry *pNewEntry,
+ EntryMappingTable *pMap);
+
+ void FixupEntry(DataImage *pImage, GuidToMethodTableEntry *pEntry, void *pFixupBase, DWORD cbFixupOffset);
+
+#endif // FEATURE_PREJIT && !DACCESS_COMPILE
+
+};
+
+#endif // FEATURE_COMINTEROP
+
+
+//Hash for MemberRef to Desc tables (fieldDesc or MethodDesc)
+typedef DPTR(struct MemberRefToDescHashEntry) PTR_MemberRefToDescHashEntry;
+
+struct MemberRefToDescHashEntry
+{
+ TADDR m_value;
+};
+
+typedef DPTR(class MemberRefToDescHashTable) PTR_MemberRefToDescHashTable;
+
+#define MEMBERREF_MAP_INITIAL_SIZE 10
+
+class MemberRefToDescHashTable: public NgenHashTable<MemberRefToDescHashTable, MemberRefToDescHashEntry, 2>
+{
+ friend class NgenHashTable<MemberRefToDescHashTable, MemberRefToDescHashEntry, 2>;
+#ifndef DACCESS_COMPILE
+
+private:
+ MemberRefToDescHashTable(Module *pModule, LoaderHeap *pHeap, DWORD cInitialBuckets):
+ NgenHashTable<MemberRefToDescHashTable, MemberRefToDescHashEntry, 2>(pModule, pHeap, cInitialBuckets)
+ { LIMITED_METHOD_CONTRACT; }
+
+public:
+
+ static MemberRefToDescHashTable* Create(Module *pModule, DWORD cInitialBuckets, AllocMemTracker *pamTracker);
+
+ MemberRefToDescHashEntry* Insert(mdMemberRef token, MethodDesc *value);
+ MemberRefToDescHashEntry* Insert(mdMemberRef token , FieldDesc *value);
+#endif //!DACCESS_COMPILE
+
+public:
+ typedef NgenHashTable<MemberRefToDescHashTable, MemberRefToDescHashEntry, 2>::LookupContext LookupContext;
+
+ PTR_MemberRef GetValue(mdMemberRef token, BOOL *pfIsMethod);
+
+#ifdef DACCESS_COMPILE
+
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+ {
+ WRAPPER_NO_CONTRACT;
+ BaseEnumMemoryRegions(flags);
+ }
+
+ void EnumMemoryRegionsForEntry(MemberRefToDescHashEntry *pEntry, CLRDataEnumMemoryFlags flags)
+ { SUPPORTS_DAC; }
+
+#endif
+
+#if defined(FEATURE_PREJIT) && !defined(DACCESS_COMPILE)
+
+ void Fixup(DataImage *pImage)
+ {
+ WRAPPER_NO_CONTRACT;
+ BaseFixup(pImage);
+ }
+
+ void Save(DataImage *pImage, CorProfileData *pProfileData);
+
+
+private:
+ bool ShouldSave(DataImage *pImage, MemberRefToDescHashEntry *pEntry)
+ {
+ return IsHotEntry(pEntry, NULL);
+ }
+
+ bool IsHotEntry(MemberRefToDescHashEntry *pEntry, CorProfileData *pProfileData) // yes according to IBC data
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(pEntry != NULL);
+ // Low order bit of data field indicates a hot entry.
+ return (pEntry->m_value & 0x1) != 0;
+
+ }
+
+
+ bool SaveEntry(DataImage *pImage, CorProfileData *pProfileData,
+ MemberRefToDescHashEntry *pOldEntry, MemberRefToDescHashEntry *pNewEntry,
+ EntryMappingTable *pMap)
+ {
+ //The entries are mutable
+ return FALSE;
+ }
+
+ void FixupEntry(DataImage *pImage, MemberRefToDescHashEntry *pEntry, void *pFixupBase, DWORD cbFixupOffset);
+
+#endif
+};
+
+#ifdef FEATURE_READYTORUN
+typedef DPTR(class ReadyToRunInfo) PTR_ReadyToRunInfo;
+#endif
+
+struct ThreadLocalModule;
+
+// A code:Module represents a DLL or EXE file loaded from the disk. It could either be a IL module or a
+// Native code (NGEN module). A module live in a code:Assembly
+//
+// Some important fields are
+// * code:Module.m_file - this points at a code:PEFile that understands the layout of a PE file. The most
+// important part is getting at the code:Module (see file:..\inc\corhdr.h#ManagedHeader) from there
+// you can get at the Meta-data and IL)
+// * code:Module.m_pAvailableClasses - this is a table that lets you look up the types (the code:EEClass)
+// for all the types in the module
+//
+// See file:..\inc\corhdr.h#ManagedHeader for more on the layout of managed exectuable files.
+
+class Module
+{
+#ifdef CLR_STANDALONE_BINDER
+ friend class MdilModule;
+
+ // CLR's Module has some virtual methods, and therefore has a vtable. The binder's version doesn't have
+ // any virtual methods, but for compatibility with CLR it must have a vtable. So a dummy virtual method
+ // is defined here. The vtable address is zeroed out in Module::Fixup before saved in native image,
+ // and reset to the correct value in Module ctor when the native image is loaded.
+ virtual void DummyVirtualMethod() {}
+#endif
+
+#ifdef DACCESS_COMPILE
+ friend class ClrDataAccess;
+ friend class NativeImageDumper;
+#endif
+
+ friend class DataImage;
+
+ VPTR_BASE_CONCRETE_VTABLE_CLASS(Module)
+
+private:
+ PTR_CUTF8 m_pSimpleName; // Cached simple name for better performance and easier diagnostics
+
+ PTR_PEFile m_file;
+
+ MethodDesc *m_pDllMain;
+
+ enum {
+ // These are the values set in m_dwTransientFlags.
+ // Note that none of these flags survive a prejit save/restore.
+
+ MODULE_IS_TENURED = 0x00000001, // Set once we know for sure the Module will not be freed until the appdomain itself exits
+ M_CER_ROOT_TABLE_ON_HEAP = 0x00000002, // Set when m_pCerNgenRootTable is allocated from heap (at ngen time)
+ CLASSES_FREED = 0x00000004,
+ IS_EDIT_AND_CONTINUE = 0x00000008, // is EnC Enabled for this module
+
+ IS_PROFILER_NOTIFIED = 0x00000010,
+ IS_ETW_NOTIFIED = 0x00000020,
+
+ //
+ // Note: the order of these must match the order defined in
+ // cordbpriv.h for DebuggerAssemblyControlFlags. The three
+ // values below should match the values defined in
+ // DebuggerAssemblyControlFlags when shifted right
+ // DEBUGGER_INFO_SHIFT bits.
+ //
+ DEBUGGER_USER_OVERRIDE_PRIV = 0x00000400,
+ DEBUGGER_ALLOW_JIT_OPTS_PRIV= 0x00000800,
+ DEBUGGER_TRACK_JIT_INFO_PRIV= 0x00001000,
+ DEBUGGER_ENC_ENABLED_PRIV = 0x00002000, // this is what was attempted to be set. IS_EDIT_AND_CONTINUE is actual result.
+ DEBUGGER_PDBS_COPIED = 0x00004000,
+ DEBUGGER_IGNORE_PDBS = 0x00008000,
+ DEBUGGER_INFO_MASK_PRIV = 0x0000Fc00,
+ DEBUGGER_INFO_SHIFT_PRIV = 10,
+
+ // Used to indicate that this module has had it's IJW fixups properly installed.
+ IS_IJW_FIXED_UP = 0x00080000,
+ IS_BEING_UNLOADED = 0x00100000,
+
+ // Used to indicate that the module is loaded sufficiently for generic candidate instantiations to work
+ MODULE_READY_FOR_TYPELOAD = 0x00200000,
+
+ // Used during NGen only
+ TYPESPECS_TRIAGED = 0x40000000,
+ MODULE_SAVED = 0x80000000,
+ };
+
+ enum {
+ // These are the values set in m_dwPersistedFlags. These will survive
+ // a prejit save/restore
+ // unused = 0x00000001,
+ COMPUTED_GLOBAL_CLASS = 0x00000002,
+
+ // This flag applies to assembly, but it is stored so it can be cached in ngen image
+ COMPUTED_STRING_INTERNING = 0x00000004,
+ NO_STRING_INTERNING = 0x00000008,
+
+ // This flag applies to assembly, but it is stored so it can be cached in ngen image
+ COMPUTED_WRAP_EXCEPTIONS = 0x00000010,
+ WRAP_EXCEPTIONS = 0x00000020,
+
+ // This flag applies to assembly, but it is stored so it can be cached in ngen image
+ COMPUTED_RELIABILITY_CONTRACT=0x00000040,
+
+ // This flag applies to assembly, but is also stored here so that it can be cached in ngen image
+ COLLECTIBLE_MODULE = 0x00000080,
+
+ // Caches metadata version
+ COMPUTED_IS_PRE_V4_ASSEMBLY = 0x00000100,
+ IS_PRE_V4_ASSEMBLY = 0x00000200,
+
+ //If attribute value has been cached before
+ DEFAULT_DLL_IMPORT_SEARCH_PATHS_IS_CACHED = 0x00000400,
+
+ //If module has default dll import search paths attribute
+ DEFAULT_DLL_IMPORT_SEARCH_PATHS_STATUS = 0x00000800,
+
+ //If attribute value has been cached before
+ NEUTRAL_RESOURCES_LANGUAGE_IS_CACHED = 0x00001000,
+
+ //If m_MethodDefToPropertyInfoMap has been generated
+ COMPUTED_METHODDEF_TO_PROPERTYINFO_MAP = 0x00002000,
+
+ // Low level system assembly. Used by preferred zap module computation.
+ LOW_LEVEL_SYSTEM_ASSEMBLY_BY_NAME = 0x00004000,
+ };
+
+ Volatile<DWORD> m_dwTransientFlags;
+ Volatile<DWORD> m_dwPersistedFlags;
+
+ // Linked list of VASig cookie blocks: protected by m_pStubListCrst
+ VASigCookieBlock *m_pVASigCookieBlock;
+
+ PTR_Assembly m_pAssembly;
+ mdFile m_moduleRef;
+
+ CrstExplicitInit m_Crst;
+ CrstExplicitInit m_FixupCrst;
+
+ // Debugging symbols reader interface. This will only be
+ // initialized if needed, either by the debugging subsystem or for
+ // an exception.
+ ISymUnmanagedReader * m_pISymUnmanagedReader;
+
+ // The reader lock is used to serialize all creation of symbol readers.
+ // It does NOT seralize all access to the readers since we freely give
+ // out references to the reader outside this class. Instead, once a
+ // reader object is created, it is entirely read-only and so thread-safe.
+ CrstExplicitInit m_ISymUnmanagedReaderCrst;
+
+ // Storage for the in-memory symbol stream if any
+ // Debugger may retrieve this from out-of-process.
+ PTR_CGrowableStream m_pIStreamSym;
+
+ // Format the above stream is in (if any)
+ ESymbolFormat m_symbolFormat;
+
+ // Active dependencies
+#ifndef CLR_STANDALONE_BINDER
+ ArrayList m_activeDependencies;
+#else
+ // Avoid calling ctor/dtor, since the binder only needs a placeholder.
+ ArrayListStatic m_activeDependencies;
+#endif
+ SynchronizedBitMask m_unconditionalDependencies;
+ ULONG m_dwNumberOfActivations;
+
+ // For protecting additions to the heap
+ CrstExplicitInit m_LookupTableCrst;
+
+ #define TYPE_DEF_MAP_ALL_FLAGS ((TADDR)0x00000001)
+ #define ZAPPED_TYPE_NEEDS_NO_RESTORE ((TADDR)0x00000001)
+
+ #define TYPE_REF_MAP_ALL_FLAGS NO_MAP_FLAGS
+ // For type ref map, 0x1 cannot be used as a flag: reserved for FIXUP_POINTER_INDIRECTION bit
+ // For type ref map, 0x2 cannot be used as a flag: reserved for TypeHandle to signify TypeDesc
+
+ #define METHOD_DEF_MAP_ALL_FLAGS NO_MAP_FLAGS
+
+ #define FIELD_DEF_MAP_ALL_FLAGS NO_MAP_FLAGS
+
+ #define MEMBER_REF_MAP_ALL_FLAGS ((TADDR)0x00000003)
+ // For member ref hash table, 0x1 is reserved for IsHot bit
+ #define IS_FIELD_MEMBER_REF ((TADDR)0x00000002) // denotes that target is a FieldDesc
+
+ #define GENERIC_PARAM_MAP_ALL_FLAGS NO_MAP_FLAGS
+
+ #define GENERIC_TYPE_DEF_MAP_ALL_FLAGS ((TADDR)0x00000001)
+ #define ZAPPED_GENERIC_TYPE_NEEDS_NO_RESTORE ((TADDR)0x00000001)
+
+ #define FILE_REF_MAP_ALL_FLAGS NO_MAP_FLAGS
+ // For file ref map, 0x1 cannot be used as a flag: reserved for FIXUP_POINTER_INDIRECTION bit
+
+ #define MANIFEST_MODULE_MAP_ALL_FLAGS NO_MAP_FLAGS
+ // For manifest module map, 0x1 cannot be used as a flag: reserved for FIXUP_POINTER_INDIRECTION bit
+
+ #define PROPERTY_INFO_MAP_ALL_FLAGS NO_MAP_FLAGS
+
+ // Linear mapping from TypeDef token to MethodTable *
+ // For generic types, IsGenericTypeDefinition() is true i.e. instantiation at formals
+ LookupMap<PTR_MethodTable> m_TypeDefToMethodTableMap;
+
+ // Linear mapping from TypeRef token to TypeHandle *
+ LookupMap<PTR_TypeRef> m_TypeRefToMethodTableMap;
+
+ // Linear mapping from MethodDef token to MethodDesc *
+ // For generic methods, IsGenericTypeDefinition() is true i.e. instantiation at formals
+ LookupMap<PTR_MethodDesc> m_MethodDefToDescMap;
+
+ // Linear mapping from FieldDef token to FieldDesc*
+ LookupMap<PTR_FieldDesc> m_FieldDefToDescMap;
+
+ // mapping from MemberRef token to MethodDesc*, FieldDesc*
+ PTR_MemberRefToDescHashTable m_pMemberRefToDescHashTable;
+
+ // Linear mapping from GenericParam token to TypeVarTypeDesc*
+ LookupMap<PTR_TypeVarTypeDesc> m_GenericParamToDescMap;
+
+ // Linear mapping from TypeDef token to the MethodTable * for its canonical generic instantiation
+ // If the type is not generic, the entry is guaranteed to be NULL. This means we are paying extra
+ // space in order to use the LookupMap infrastructure, but what it buys us is IBC support and
+ // a compressed format for NGen that makes up for it.
+ LookupMap<PTR_MethodTable> m_GenericTypeDefToCanonMethodTableMap;
+
+ // Mapping from File token to Module *
+ LookupMap<PTR_Module> m_FileReferencesMap;
+
+ // Mapping of AssemblyRef token to Module *
+ LookupMap<PTR_Module> m_ManifestModuleReferencesMap;
+
+ // Mapping from MethodDef token to pointer-sized value encoding property information
+ LookupMap<SIZE_T> m_MethodDefToPropertyInfoMap;
+
+ // IL stub cache with fabricated MethodTable parented by this module.
+ ILStubCache *m_pILStubCache;
+
+#ifndef FEATURE_CORECLR
+ ULONG m_DefaultDllImportSearchPathsAttributeValue;
+#endif
+
+ LPCUTF8 m_pszCultureName;
+ ULONG m_CultureNameLength;
+ INT16 m_FallbackLocation;
+
+#ifdef PROFILING_SUPPORTED_DATA
+ // a wrapper for the underlying PEFile metadata emitter which validates that the metadata edits being
+ // made are supported modifications to the type system
+ VolatilePtr<IMetaDataEmit> m_pValidatedEmitter;
+#endif
+
+public:
+
+#ifndef CLR_STANDALONE_BINDER
+ LookupMap<PTR_MethodTable>::Iterator EnumerateTypeDefs()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return LookupMap<PTR_MethodTable>::Iterator(&m_TypeDefToMethodTableMap);
+ }
+#endif
+
+ // Hash of available types by name
+ PTR_EEClassHashTable m_pAvailableClasses;
+
+ // Hashtable of generic type instances
+ PTR_EETypeHashTable m_pAvailableParamTypes;
+
+ // For protecting additions to m_pInstMethodHashTable
+ CrstExplicitInit m_InstMethodHashTableCrst;
+
+ // Hashtable of instantiated methods and per-instantiation static methods
+ PTR_InstMethodHashTable m_pInstMethodHashTable;
+
+#ifdef FEATURE_PREJIT
+ // Mapping from tokens to IL marshaling stubs (NGEN only).
+ PTR_StubMethodHashTable m_pStubMethodHashTable;
+#endif // FEATURE_PREJIT
+
+ // This is used by the Debugger. We need to store a dword
+ // for a count of JMC functions. This is a count, not a pointer.
+ // We'll pass the address of this field
+ // off to the jit, which will include it in probes injected for
+ // debuggable code.
+ // This means we need the dword at the time a function is jitted.
+ // The Debugger has its own module structure, but those aren't created
+ // if a debugger isn't attached.
+ // We put it here instead of in the debugger's module because:
+ // 1) we need a module structure that's around even when the debugger
+ // isn't attached... so we use the EE's module.
+ // 2) Needs to be here for ngen
+ DWORD m_dwDebuggerJMCProbeCount;
+
+ // We can skip the JMC probes if we know that a module has no JMC stuff
+ // inside. So keep a strict count of all functions inside us.
+ bool HasAnyJMCFunctions();
+ void IncJMCFuncCount();
+ void DecJMCFuncCount();
+
+ // Get and set the default JMC status of this module.
+ bool GetJMCStatus();
+ void SetJMCStatus(bool fStatus);
+
+ // If this is a dynamic module, eagerly serialize the metadata so that it is available for DAC.
+ // This is a nop for non-dynamic modules.
+ void UpdateDynamicMetadataIfNeeded();
+
+#ifdef _DEBUG
+ //
+ // We call these methods to seal/unseal the
+ // lists: m_pAvailableClasses and m_pAvailableParamTypes
+ //
+ // When they are sealed ClassLoader::PublishType cannot
+ // add new generic types or methods
+ //
+ void SealGenericTypesAndMethods();
+ void UnsealGenericTypesAndMethods();
+#endif
+
+private:
+ // Set the given bit on m_dwTransientFlags. Return true if we won the race to set the bit.
+ BOOL SetTransientFlagInterlocked(DWORD dwFlag);
+
+ // Invoke fusion hooks into host to fetch PDBs
+ void FetchPdbsFromHost();
+
+ // Cannoically-cased hashtable of the available class names for
+ // case insensitive lookup. Contains pointers into
+ // m_pAvailableClasses.
+ PTR_EEClassHashTable m_pAvailableClassesCaseIns;
+
+ // Pointer to binder, if we have one
+ friend class MscorlibBinder;
+ PTR_MscorlibBinder m_pBinder;
+
+public:
+ BOOL IsCollectible()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (m_dwPersistedFlags & COLLECTIBLE_MODULE) != 0;
+ }
+
+#ifdef FEATURE_PREJIT
+
+private:
+ PTR_NGenLayoutInfo m_pNGenLayoutInfo;
+#ifdef FEATURE_READYTORUN
+ PTR_ReadyToRunInfo m_pReadyToRunInfo;
+#endif
+
+ PTR_ProfilingBlobTable m_pProfilingBlobTable; // While performing IBC instrumenting this hashtable is populated with the External defs
+ CorProfileData * m_pProfileData; // While ngen-ing with IBC optimizations this contains a link to the IBC data for the assembly
+ SString * m_pIBCErrorNameString; // Used when reporting IBC type loading errors
+
+ // Profile information
+ BOOL m_nativeImageProfiling;
+ CORCOMPILE_METHOD_PROFILE_LIST *m_methodProfileList;
+
+#if defined(FEATURE_COMINTEROP)
+
+ #if defined(CLR_STANDALONE_BINDER)
+
+ private: PTR_GuidToMethodTableHashTable m_AlwaysNull_pGuidToTypeHash;
+
+ #else // !defined(CLR_STANDALONE_BINDER)
+
+ public:
+
+ #ifndef DACCESS_COMPILE
+ BOOL CanCacheWinRTTypeByGuid(MethodTable *pMT);
+ void CacheWinRTTypeByGuid(PTR_MethodTable pMT, PTR_GuidInfo pgi = NULL);
+ #endif // !DACCESS_COMPILE
+
+ PTR_MethodTable LookupTypeByGuid(const GUID & guid);
+ void GetCachedWinRTTypes(SArray<PTR_MethodTable> * pTypes, SArray<GUID> * pGuids);
+
+ private:
+ PTR_GuidToMethodTableHashTable m_pGuidToTypeHash; // A map from GUID to Type, for the "WinRT-interesting" types
+
+ #endif // !defined(CLR_STANDALONE_BINDER)
+
+#endif // defined(FEATURE_COMINTEROP)
+
+#endif // FEATURE_PREJIT
+
+ // Module wide static fields information
+ ModuleCtorInfo m_ModuleCtorInfo;
+
+#ifdef FEATURE_PREJIT
+ struct TokenProfileData
+ {
+#ifndef CLR_STANDALONE_BINDER
+
+ static TokenProfileData *CreateNoThrow(void);
+
+ TokenProfileData()
+ // We need a critical section that can be entered in both preemptive and cooperative modes.
+ // Hopefully this restriction can be removed in the future.
+ : crst(CrstSaveModuleProfileData, CRST_UNSAFE_ANYMODE)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ ~TokenProfileData()
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ Crst crst;
+
+ struct Formats
+ {
+ CQuickArray<CORBBTPROF_TOKEN_INFO> tokenArray;
+ RidBitmap tokenBitmaps[CORBBTPROF_TOKEN_MAX_NUM_FLAGS];
+ } m_formats[SectionFormatCount];
+
+#endif // CLR_STANDALONE_BINDER
+ } *m_tokenProfileData;
+
+ // Stats for prejit log
+ NgenStats *m_pNgenStats;
+#endif // FEATURE_PREJIT
+
+#ifdef FEATURE_MIXEDMODE
+ // LoaderHeap for storing thunks
+ PTR_LoaderHeap m_pThunkHeap;
+
+ // Self-initializing accessor for thunk heap
+ LoaderHeap *GetThunkHeap();
+ // Self-initializing accessor for domain-independent thunk heap
+ LoaderHeap *GetDllThunkHeap();
+
+
+public:
+ UMEntryThunk* GetADThunkTable();
+ void SetADThunkTable(UMEntryThunk* pTable);
+
+protected:
+ // Domain that the IJW fixups were applied in
+ ADID m_DomainIdOfIJWFixups;
+
+#ifndef CLR_STANDALONE_BINDER
+
+public:
+ ADID GetDomainIdOfIJWFixups()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERT(m_DomainIdOfIJWFixups != ADID());
+ return m_DomainIdOfIJWFixups;
+ }
+
+ void SetDomainIdOfIJWFixups(ADID id)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERT(id != ADID());
+ m_DomainIdOfIJWFixups = id;
+ }
+
+#endif // CLR_STANDALONE_BINDER
+#endif // FEATURE_MIXEDMODE
+
+#ifndef CLR_STANDALONE_BINDER
+
+protected:
+
+ void CreateDomainThunks();
+
+protected:
+ void DoInit(AllocMemTracker *pamTracker, LPCWSTR szName);
+
+protected:
+#ifndef DACCESS_COMPILE
+ virtual void Initialize(AllocMemTracker *pamTracker, LPCWSTR szName = NULL);
+#ifdef FEATURE_PREJIT
+ void InitializeNativeImage(AllocMemTracker* pamTracker);
+#endif
+#endif
+
+ void AllocateMaps();
+
+#ifdef _DEBUG
+ void DebugLogRidMapOccupancy();
+#endif // _DEBUG
+
+ static HRESULT VerifyFile(PEFile *file, BOOL fZap);
+
+ public:
+ static Module *Create(Assembly *pAssembly, mdFile kFile, PEFile *pFile, AllocMemTracker *pamTracker);
+
+ protected:
+ Module(Assembly *pAssembly, mdFile moduleRef, PEFile *file);
+
+
+ public:
+#ifndef DACCESS_COMPILE
+ virtual void Destruct();
+#ifdef FEATURE_PREJIT
+ void DeleteNativeCodeRanges();
+#endif
+#endif
+
+ PTR_LoaderAllocator GetLoaderAllocator();
+
+ PTR_PEFile GetFile() const { LIMITED_METHOD_DAC_CONTRACT; return m_file; }
+
+ static size_t GetFileOffset() { LIMITED_METHOD_CONTRACT; return offsetof(Module, m_file); }
+
+ BOOL IsManifest();
+
+#ifdef FEATURE_MIXEDMODE
+ void FixupVTables();
+#endif
+
+ void FreeClassTables();
+
+#ifdef DACCESS_COMPILE
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags,
+ bool enumThis);
+#endif // DACCESS_COMPILE
+
+ ReflectionModule *GetReflectionModule() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(IsReflection());
+ return dac_cast<PTR_ReflectionModule>(this);
+ }
+
+ PTR_Assembly GetAssembly() const;
+
+ int GetClassLoaderIndex()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return RidFromToken(m_moduleRef);
+ }
+
+ MethodTable *GetGlobalMethodTable();
+ bool NeedsGlobalMethodTable();
+
+ // Only for non-manifest modules
+ DomainModule *GetDomainModule(AppDomain *pDomain);
+ DomainModule *FindDomainModule(AppDomain *pDomain);
+
+ // This works for manifest modules too
+ DomainFile *GetDomainFile(AppDomain *pDomain);
+ DomainFile *FindDomainFile(AppDomain *pDomain);
+
+ // Operates on assembly of module
+ DomainAssembly *GetDomainAssembly(AppDomain *pDomain);
+ DomainAssembly *FindDomainAssembly(AppDomain *pDomain);
+
+ // Versions which rely on the current AppDomain (N/A for DAC builds)
+#ifndef DACCESS_COMPILE
+ DomainModule * GetDomainModule() { WRAPPER_NO_CONTRACT; return GetDomainModule(GetAppDomain()); }
+ DomainFile * GetDomainFile() { WRAPPER_NO_CONTRACT; return GetDomainFile(GetAppDomain()); }
+ DomainAssembly * GetDomainAssembly() { WRAPPER_NO_CONTRACT; return GetDomainAssembly(GetAppDomain()); }
+#endif
+
+ void SetDomainFile(DomainFile *pDomainFile);
+
+ OBJECTREF GetExposedObject();
+
+ ClassLoader *GetClassLoader();
+ PTR_BaseDomain GetDomain();
+ ReJitManager * GetReJitManager();
+ IAssemblySecurityDescriptor* GetSecurityDescriptor();
+
+ mdFile GetModuleRef()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_moduleRef;
+ }
+
+
+ BOOL IsResource() const { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return GetFile()->IsResource(); }
+ BOOL IsPEFile() const { WRAPPER_NO_CONTRACT; return !GetFile()->IsDynamic(); }
+ BOOL IsReflection() const { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return GetFile()->IsDynamic(); }
+ BOOL IsIbcOptimized() const { WRAPPER_NO_CONTRACT; return GetFile()->IsIbcOptimized(); }
+ // Returns true iff the debugger can see this module.
+ BOOL IsVisibleToDebugger();
+
+
+ BOOL IsEditAndContinueEnabled()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ // We are seeing cases where this flag is set for a module that is not an EditAndContinueModule. This should
+ // never happen unless the module is EditAndContinueCapable, in which case we would have created an EditAndContinueModule
+ // not a Module.
+ //_ASSERTE((m_dwTransientFlags & IS_EDIT_AND_CONTINUE) == 0 || IsEditAndContinueCapable());
+ return (IsEditAndContinueCapable()) && ((m_dwTransientFlags & IS_EDIT_AND_CONTINUE) != 0);
+ }
+
+ BOOL IsEditAndContinueCapable();
+
+ BOOL IsIStream() { LIMITED_METHOD_CONTRACT; return GetFile()->IsIStream(); }
+
+ BOOL IsSystem() { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return m_file->IsSystem(); }
+
+ static BOOL IsEditAndContinueCapable(PEFile *file)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+
+ // Some modules are never EnC-capable
+ return ! (file->IsSystem() || file->IsResource() || file->HasNativeImage() || file->IsDynamic());
+ }
+
+ void EnableEditAndContinue()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ // _ASSERTE(IsEditAndContinueCapable());
+ LOG((LF_ENC, LL_INFO100, "EnableEditAndContinue: this:0x%x, %s\n", this, GetDebugName()));
+ m_dwTransientFlags |= IS_EDIT_AND_CONTINUE;
+ }
+
+ void DisableEditAndContinue()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ // don't _ASSERTE(IsEditAndContinueCapable());
+ LOG((LF_ENC, LL_INFO100, "DisableEditAndContinue: this:0x%x, %s\n", this, GetDebugName()));
+ m_dwTransientFlags = m_dwTransientFlags.Load() & (~IS_EDIT_AND_CONTINUE);
+ }
+
+ BOOL IsTenured()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwTransientFlags & MODULE_IS_TENURED;
+ }
+
+#ifndef DACCESS_COMPILE
+ VOID SetIsTenured()
+ {
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockOr(&m_dwTransientFlags, MODULE_IS_TENURED);
+ }
+
+ // CAUTION: This should only be used as backout code if an assembly is unsuccessfully
+ // added to the shared domain assembly map.
+ VOID UnsetIsTenured()
+ {
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockAnd(&m_dwTransientFlags, ~MODULE_IS_TENURED);
+ }
+#endif // !DACCESS_COMPILE
+
+
+ // This means the module has been sufficiently fixed up/security checked
+ // that type loads can occur in domains. This is not sufficient to indicate
+ // that domain-specific types can be loaded when applied to domain-neutral modules
+ BOOL IsReadyForTypeLoad()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwTransientFlags & MODULE_READY_FOR_TYPELOAD;
+ }
+
+#ifndef DACCESS_COMPILE
+ VOID SetIsReadyForTypeLoad()
+ {
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockOr(&m_dwTransientFlags, MODULE_READY_FOR_TYPELOAD);
+ }
+#endif
+
+ BOOL IsLowLevelSystemAssemblyByName()
+ {
+ LIMITED_METHOD_CONTRACT;
+ // The flag is set during initialization, so we can skip the memory barrier
+ return m_dwPersistedFlags.LoadWithoutBarrier() & LOW_LEVEL_SYSTEM_ASSEMBLY_BY_NAME;
+ }
+
+ BOOL IsIntrospectionOnly();
+
+#ifndef DACCESS_COMMPILE
+ VOID EnsureActive();
+ VOID EnsureAllocated();
+ VOID EnsureLibraryLoaded();
+#endif
+
+ CHECK CheckActivated();
+ ULONG GetNumberOfActivations();
+ ULONG IncrementNumberOfActivations();
+
+ IMDInternalImport *GetMDImport() const
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+#ifdef DACCESS_COMPILE
+ if (IsReflection())
+ {
+ return DacGetMDImport(GetReflectionModule(), true);
+ }
+#endif // DACCESS_COMPILE
+ return m_file->GetPersistentMDImport();
+ }
+
+#ifndef DACCESS_COMPILE
+ IMetaDataEmit *GetEmitter()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return m_file->GetEmitter();
+ }
+
+#if defined(PROFILING_SUPPORTED) && !defined(CROSSGEN_COMPILE)
+ IMetaDataEmit *GetValidatedEmitter();
+#endif
+
+ IMetaDataImport2 *GetRWImporter()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return m_file->GetRWImporter();
+ }
+
+ IMetaDataAssemblyImport *GetAssemblyImporter()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return m_file->GetAssemblyImporter();
+ }
+
+ HRESULT GetReadablePublicMetaDataInterface(DWORD dwOpenFlags, REFIID riid, LPVOID * ppvInterface);
+#endif // !DACCESS_COMPILE
+
+ BOOL IsWindowsRuntimeModule();
+
+ BOOL IsInCurrentVersionBubble();
+
+ LPCWSTR GetPathForErrorMessages();
+
+
+#ifdef FEATURE_ISYM_READER
+ // Gets an up-to-date symbol reader for this module, lazily creating it if necessary
+ // The caller must call Release
+ ISymUnmanagedReader *GetISymUnmanagedReader(void);
+ ISymUnmanagedReader *GetISymUnmanagedReaderNoThrow(void);
+#endif // FEATURE_ISYM_READER
+
+ // Save a copy of the provided debugging symbols in the InMemorySymbolStream.
+ // These are used by code:Module::GetInMemorySymbolStream and code:Module.GetISymUnmanagedReader
+ // This can only be called during module creation, before anyone may have tried to create a reader.
+ void SetSymbolBytes(LPCBYTE pSyms, DWORD cbSyms);
+
+ // Does the current configuration permit reading of symbols for this module?
+ // Note that this may require calling into managed code (to resolve security policy).
+ BOOL IsSymbolReadingEnabled(void);
+
+ BOOL IsPersistedObject(void *address);
+
+
+ // Get the in-memory symbol stream for this module, if any.
+ // If none, this will return null. This is used by modules loaded in-memory (eg. from a byte-array)
+ // and by dynamic modules. Callers that actually do anything with the return value will almost
+ // certainly want to check GetInMemorySymbolStreamFormat to know how to interpret the bytes
+ // in the stream.
+ PTR_CGrowableStream GetInMemorySymbolStream()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ // Symbol format should be "none" if-and-only-if our stream is null
+ // If this fails, it may mean somebody is trying to examine this module after
+ // code:Module::Destruct has been called.
+ _ASSERTE( (m_symbolFormat == eSymbolFormatNone) == (m_pIStreamSym == NULL) );
+
+ return m_pIStreamSym;
+ }
+
+ // Get the format of the in-memory symbol stream for this module, or
+ // eSymbolFormatNone if no in-memory symbols.
+ ESymbolFormat GetInMemorySymbolStreamFormat()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ // Symbol format should be "none" if-and-only-if our stream is null
+ // If this fails, it may mean somebody is trying to examine this module after
+ // code:Module::Destruct has been called.
+ _ASSERTE( (m_symbolFormat == eSymbolFormatNone) == (m_pIStreamSym == NULL) );
+
+ return m_symbolFormat;
+ }
+
+#ifndef DACCESS_COMPILE
+ // Set the in-memory stream for debug symbols
+ // This must only be called when there is no existing stream.
+ // This takes an AddRef on the supplied stream.
+ void SetInMemorySymbolStream(CGrowableStream *pStream, ESymbolFormat symbolFormat)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // Must have provided valid stream data
+ CONSISTENCY_CHECK(pStream != NULL);
+ CONSISTENCY_CHECK(symbolFormat != eSymbolFormatNone);
+
+ // we expect set to only be called once
+ CONSISTENCY_CHECK(m_pIStreamSym == NULL);
+ CONSISTENCY_CHECK(m_symbolFormat == eSymbolFormatNone);
+
+ m_symbolFormat = symbolFormat;
+ m_pIStreamSym = pStream;
+ m_pIStreamSym->AddRef();
+ }
+
+ // Release and clear the in-memory symbol stream if any
+ void ClearInMemorySymbolStream()
+ {
+ LIMITED_METHOD_CONTRACT;
+ if( m_pIStreamSym != NULL )
+ {
+ m_pIStreamSym->Release();
+ m_pIStreamSym = NULL;
+ // We could set m_symbolFormat to eSymbolFormatNone to be consistent with not having
+ // a stream, but no-one should be trying to look at it after destruct time, so it's
+ // better to leave it inconsistent and get an ASSERT if someone tries to examine the
+ // module's sybmol stream after the module was destructed.
+ }
+ }
+
+ // Release the symbol reader if any
+ // Caller is responsible for aquiring the reader lock if this could occur
+ // concurrently with other uses of the reader (i.e. not shutdown/unload time)
+ void ReleaseISymUnmanagedReader(void);
+
+ virtual void ReleaseILData();
+
+#ifdef FEATURE_FUSION
+ void FusionCopyPDBs(LPCWSTR moduleName);
+ // This function will return PDB stream if exist.
+ HRESULT GetHostPdbStream(IStream **ppStream);
+#endif // FEATURE_FUSION
+
+#endif // DACCESS_COMPILE
+
+ // IL stub cache
+ ILStubCache* GetILStubCache();
+
+ // Classes
+ void AddClass(mdTypeDef classdef);
+ void BuildClassForModule();
+ PTR_EEClassHashTable GetAvailableClassHash()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ {
+ // IsResource() may lock when accessing metadata, but this is only in debug,
+ // for the assert below
+ CONTRACT_VIOLATION(TakesLockViolation);
+
+ _ASSERTE(!IsResource());
+ }
+
+ return m_pAvailableClasses;
+ }
+#ifndef DACCESS_COMPILE
+ void SetAvailableClassHash(EEClassHashTable *pAvailableClasses)
+ {
+ LIMITED_METHOD_CONTRACT;
+ {
+ // IsResource() may lock when accessing metadata, but this is only in debug,
+ // for the assert below
+ CONTRACT_VIOLATION(TakesLockViolation);
+
+ _ASSERTE(!IsResource());
+ }
+ m_pAvailableClasses = pAvailableClasses;
+ }
+#endif // !DACCESS_COMPILE
+ PTR_EEClassHashTable GetAvailableClassCaseInsHash()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ {
+ // IsResource() may lock when accessing metadata, but this is only in debug,
+ // for the assert below
+ CONTRACT_VIOLATION(TakesLockViolation);
+
+ _ASSERTE(!IsResource());
+ }
+ return m_pAvailableClassesCaseIns;
+ }
+#ifndef DACCESS_COMPILE
+ void SetAvailableClassCaseInsHash(EEClassHashTable *pAvailableClassesCaseIns)
+ {
+ LIMITED_METHOD_CONTRACT;
+ {
+ // IsResource() may lock when accessing metadata, but this is only in debug,
+ // for the assert below
+ CONTRACT_VIOLATION(TakesLockViolation);
+
+ _ASSERTE(!IsResource());
+ }
+ m_pAvailableClassesCaseIns = pAvailableClassesCaseIns;
+ }
+#endif // !DACCESS_COMPILE
+
+ // Constructed types tables
+ EETypeHashTable *GetAvailableParamTypes()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ {
+ // IsResource() may lock when accessing metadata, but this is only in debug,
+ // for the assert below
+ CONTRACT_VIOLATION(TakesLockViolation);
+
+ _ASSERTE(!IsResource());
+ }
+ return m_pAvailableParamTypes;
+ }
+
+ InstMethodHashTable *GetInstMethodHashTable()
+ {
+ LIMITED_METHOD_CONTRACT;
+ {
+ // IsResource() may lock when accessing metadata, but this is only in debug,
+ // for the assert below
+ CONTRACT_VIOLATION(TakesLockViolation);
+
+ _ASSERTE(!IsResource());
+ }
+ return m_pInstMethodHashTable;
+ }
+
+#ifdef FEATURE_PREJIT
+ // Gets or creates the token -> IL stub MethodDesc hash.
+ StubMethodHashTable *GetStubMethodHashTable();
+#endif // FEATURE_PREJIT
+
+ // Creates a new Method table for an array. Used to make type handles
+ // Note that if kind == SZARRAY or ARRAY, we get passed the GENERIC_ARRAY
+ // needed to create the array. That way we dont need to load classes during
+ // the class load, which avoids the need for a 'being loaded' list
+ MethodTable* CreateArrayMethodTable(TypeHandle elemType, CorElementType kind, unsigned rank, class AllocMemTracker *pamTracker);
+
+ // This is called from CreateArrayMethodTable
+ MethodTable* CreateGenericArrayMethodTable(TypeHandle elemType);
+
+ // string helper
+ void InitializeStringData(DWORD token, EEStringData *pstrData, CQuickBytes *pqb);
+
+ // Resolving
+ OBJECTHANDLE ResolveStringRef(DWORD Token, BaseDomain *pDomain, bool bNeedToSyncWithFixups);
+#ifdef FEATURE_PREJIT
+ OBJECTHANDLE ResolveStringRefHelper(DWORD token, BaseDomain *pDomain, PTR_CORCOMPILE_IMPORT_SECTION pSection, EEStringData *strData);
+#endif
+
+ CHECK CheckStringRef(RVA rva);
+
+ // Module/Assembly traversal
+ Assembly * GetAssemblyIfLoaded(
+ mdAssemblyRef kAssemblyRef,
+ LPCSTR szWinRtNamespace = NULL,
+ LPCSTR szWinRtClassName = NULL,
+ IMDInternalImport * pMDImportOverride = NULL,
+ BOOL fDoNotUtilizeExtraChecks = FALSE,
+ ICLRPrivBinder *pBindingContextForLoadedAssembly = NULL
+ );
+
+private:
+ // Helper function used by GetAssemblyIfLoaded. Do not call directly.
+ Assembly *GetAssemblyIfLoadedFromNativeAssemblyRefWithRefDefMismatch(mdAssemblyRef kAssemblyRef, BOOL *pfDiscoveredAssemblyRefMatchesTargetDefExactly);
+public:
+
+ DomainAssembly * LoadAssembly(
+ AppDomain * pDomain,
+ mdAssemblyRef kAssemblyRef,
+ LPCUTF8 szWinRtTypeNamespace = NULL,
+ LPCUTF8 szWinRtTypeClassName = NULL);
+ Module *GetModuleIfLoaded(mdFile kFile, BOOL onlyLoadedInAppDomain, BOOL loadAllowed);
+ DomainFile *LoadModule(AppDomain *pDomain, mdFile kFile, BOOL loadResources = TRUE, BOOL bindOnly = FALSE);
+ PTR_Module LookupModule(mdToken kFile, BOOL loadResources = TRUE); //wrapper over GetModuleIfLoaded, takes modulerefs as well
+ DWORD GetAssemblyRefFlags(mdAssemblyRef tkAssemblyRef);
+
+ bool HasBindableIdentity(mdAssemblyRef tkAssemblyRef)
+ {
+ WRAPPER_NO_CONTRACT;
+ return !IsAfContentType_WindowsRuntime(GetAssemblyRefFlags(tkAssemblyRef));
+ }
+
+ // RID maps
+ TypeHandle LookupTypeDef(mdTypeDef token, ClassLoadLevel *pLoadLevel = NULL)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ BAD_FORMAT_NOTHROW_ASSERT(TypeFromToken(token) == mdtTypeDef);
+
+ g_IBCLogger.LogRidMapAccess( MakePair( this, token ) );
+
+ TADDR flags;
+ TypeHandle th = TypeHandle(m_TypeDefToMethodTableMap.GetElementAndFlags(RidFromToken(token), &flags));
+
+ if (pLoadLevel && !th.IsNull())
+ {
+ if (!IsCompilationProcess() && (flags & ZAPPED_TYPE_NEEDS_NO_RESTORE))
+ {
+ // Make sure the flag is consistent with the target data and implies the load level we think it does
+ _ASSERTE(th.AsMethodTable()->IsPreRestored());
+ _ASSERTE(th.GetLoadLevel() == CLASS_LOADED);
+
+ *pLoadLevel = CLASS_LOADED;
+ }
+ else
+ {
+ *pLoadLevel = th.GetLoadLevel();
+ }
+ }
+
+ return th;
+ }
+
+ TypeHandle LookupFullyCanonicalInstantiation(mdTypeDef token, ClassLoadLevel *pLoadLevel = NULL)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ BAD_FORMAT_NOTHROW_ASSERT(TypeFromToken(token) == mdtTypeDef);
+
+ g_IBCLogger.LogRidMapAccess( MakePair( this, token ) );
+
+ TADDR flags;
+ TypeHandle th = TypeHandle(m_GenericTypeDefToCanonMethodTableMap.GetElementAndFlags(RidFromToken(token), &flags));
+
+ if (pLoadLevel && !th.IsNull())
+ {
+ if (!IsCompilationProcess() && (flags & ZAPPED_GENERIC_TYPE_NEEDS_NO_RESTORE))
+ {
+ // Make sure the flag is consistent with the target data and implies the load level we think it does
+ _ASSERTE(th.AsMethodTable()->IsPreRestored());
+ _ASSERTE(th.GetLoadLevel() == CLASS_LOADED);
+
+ *pLoadLevel = CLASS_LOADED;
+ }
+ else
+ {
+ *pLoadLevel = th.GetLoadLevel();
+ }
+ }
+
+ return th;
+ }
+
+#ifndef DACCESS_COMPILE
+ VOID EnsureTypeDefCanBeStored(mdTypeDef token)
+ {
+ WRAPPER_NO_CONTRACT; // THROWS/GC_NOTRIGGER/INJECT_FAULT()/MODE_ANY
+ m_TypeDefToMethodTableMap.EnsureElementCanBeStored(this, RidFromToken(token));
+ }
+
+ void EnsuredStoreTypeDef(mdTypeDef token, TypeHandle value)
+ {
+ WRAPPER_NO_CONTRACT; // NOTHROW/GC_NOTRIGGER/FORBID_FAULT/MODE_ANY
+
+ _ASSERTE(TypeFromToken(token) == mdtTypeDef);
+ m_TypeDefToMethodTableMap.SetElement(RidFromToken(token), value.AsMethodTable());
+ }
+
+#endif // !DACCESS_COMPILE
+
+ TypeHandle LookupTypeRef(mdTypeRef token);
+
+ mdTypeRef LookupTypeRefByMethodTable(MethodTable *pMT);
+
+ mdMemberRef LookupMemberRefByMethodDesc(MethodDesc *pMD);
+
+#ifndef DACCESS_COMPILE
+ //
+ // Increase the size of the TypeRef-to-MethodTable LookupMap to make sure the specified token
+ // can be stored. Note that nothing is actually added to the LookupMap at this point.
+ //
+ // Arguments:
+ // token - the TypeRef metadata token we need to accommodate
+ //
+
+ void EnsureTypeRefCanBeStored(mdTypeRef token)
+ {
+ WRAPPER_NO_CONTRACT; // THROWS/GC_NOTRIGGER/INJECT_FAULT()/MODE_ANY
+
+ _ASSERTE(TypeFromToken(token) == mdtTypeRef);
+ m_TypeRefToMethodTableMap.EnsureElementCanBeStored(this, RidFromToken(token));
+ }
+
+ void StoreTypeRef(mdTypeRef token, TypeHandle value)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(TypeFromToken(token) == mdtTypeRef);
+
+ g_IBCLogger.LogRidMapAccess( MakePair( this, token ) );
+
+ // The TypeRef cache is strictly a lookaside cache. If we get an OOM trying to grow the table,
+ // we cannot abort the load. (This will cause fatal errors during gc promotion.)
+ m_TypeRefToMethodTableMap.TrySetElement(RidFromToken(token),
+ dac_cast<PTR_TypeRef>(value.AsTAddr()));
+ }
+#endif // !DACCESS_COMPILE
+
+ MethodDesc *LookupMethodDef(mdMethodDef token);
+
+#ifndef DACCESS_COMPILE
+ void EnsureMethodDefCanBeStored(mdMethodDef token)
+ {
+ WRAPPER_NO_CONTRACT; // THROWS/GC_NOTRIGGER/INJECT_FAULT()/MODE_ANY
+ m_MethodDefToDescMap.EnsureElementCanBeStored(this, RidFromToken(token));
+ }
+
+ void EnsuredStoreMethodDef(mdMethodDef token, MethodDesc *value)
+ {
+ WRAPPER_NO_CONTRACT; // NOTHROW/GC_NOTRIGGER/FORBID_FAULT/MODE_ANY
+
+ _ASSERTE(TypeFromToken(token) == mdtMethodDef);
+ m_MethodDefToDescMap.SetElement(RidFromToken(token), value);
+ }
+#endif // !DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+ FieldDesc *LookupFieldDef(mdFieldDef token)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(TypeFromToken(token) == mdtFieldDef);
+ return m_FieldDefToDescMap.GetElement(RidFromToken(token));
+ }
+#else // DACCESS_COMPILE
+ // FieldDesc isn't defined at this point so PTR_FieldDesc can't work.
+ FieldDesc *LookupFieldDef(mdFieldDef token);
+#endif // DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+ void EnsureFieldDefCanBeStored(mdFieldDef token)
+ {
+ WRAPPER_NO_CONTRACT; // THROWS/GC_NOTRIGGER/INJECT_FAULT()/MODE_ANY
+ m_FieldDefToDescMap.EnsureElementCanBeStored(this, RidFromToken(token));
+ }
+
+ void EnsuredStoreFieldDef(mdFieldDef token, FieldDesc *value)
+ {
+ WRAPPER_NO_CONTRACT; // NOTHROW/GC_NOTRIGGER/FORBID_FAULT/MODE_ANY
+
+ _ASSERTE(TypeFromToken(token) == mdtFieldDef);
+ m_FieldDefToDescMap.SetElement(RidFromToken(token), value);
+ }
+#endif // !DACCESS_COMPILE
+
+ FORCEINLINE TADDR LookupMemberRef(mdMemberRef token, BOOL *pfIsMethod)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(TypeFromToken(token) == mdtMemberRef);
+
+ TADDR pResult = dac_cast<TADDR>(m_pMemberRefToDescHashTable->GetValue(token, pfIsMethod));
+ g_IBCLogger.LogRidMapAccess( MakePair( this, token ) );
+ return pResult;
+ }
+ MethodDesc *LookupMemberRefAsMethod(mdMemberRef token);
+#ifndef DACCESS_COMPILE
+ void StoreMemberRef(mdMemberRef token, FieldDesc *value)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(TypeFromToken(token) == mdtMemberRef);
+ CrstHolder ch(this->GetLookupTableCrst());
+ m_pMemberRefToDescHashTable->Insert(token, value);
+ }
+ void StoreMemberRef(mdMemberRef token, MethodDesc *value)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(TypeFromToken(token) == mdtMemberRef);
+ CrstHolder ch(this->GetLookupTableCrst());
+ m_pMemberRefToDescHashTable->Insert(token, value);
+ }
+#endif // !DACCESS_COMPILE
+
+ PTR_TypeVarTypeDesc LookupGenericParam(mdGenericParam token)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(TypeFromToken(token) == mdtGenericParam);
+ return m_GenericParamToDescMap.GetElement(RidFromToken(token));
+ }
+#ifndef DACCESS_COMPILE
+ void StoreGenericParamThrowing(mdGenericParam token, TypeVarTypeDesc *value)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(TypeFromToken(token) == mdtGenericParam);
+ m_GenericParamToDescMap.AddElement(this, RidFromToken(token), value);
+ }
+#endif // !DACCESS_COMPILE
+
+ PTR_Module LookupFile(mdFile token)
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(TypeFromToken(token) == mdtFile);
+ return m_FileReferencesMap.GetElement(RidFromToken(token));
+ }
+
+
+#ifndef DACCESS_COMPILE
+ void EnsureFileCanBeStored(mdFile token)
+ {
+ WRAPPER_NO_CONTRACT; // THROWS/GC_NOTRIGGER/INJECT_FAULT()/MODE_ANY
+
+ _ASSERTE(TypeFromToken(token) == mdtFile);
+ m_FileReferencesMap.EnsureElementCanBeStored(this, RidFromToken(token));
+ }
+
+ void EnsuredStoreFile(mdFile token, Module *value)
+ {
+ WRAPPER_NO_CONTRACT; // NOTHROW/GC_NOTRIGGER/FORBID_FAULT
+
+
+ _ASSERTE(TypeFromToken(token) == mdtFile);
+ m_FileReferencesMap.SetElement(RidFromToken(token), value);
+ }
+
+
+ void StoreFileThrowing(mdFile token, Module *value)
+ {
+ WRAPPER_NO_CONTRACT;
+
+
+ _ASSERTE(TypeFromToken(token) == mdtFile);
+ m_FileReferencesMap.AddElement(this, RidFromToken(token), value);
+ }
+
+ BOOL StoreFileNoThrow(mdFile token, Module *value)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(TypeFromToken(token) == mdtFile);
+ return m_FileReferencesMap.TrySetElement(RidFromToken(token), value);
+ }
+
+ mdAssemblyRef FindManifestModule(Module *value)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return m_ManifestModuleReferencesMap.Find(value) | mdtAssembly;
+ }
+#endif // !DACCESS_COMPILE
+
+ DWORD GetFileMax() { LIMITED_METHOD_DAC_CONTRACT; return m_FileReferencesMap.GetSize(); }
+
+ Assembly *LookupAssemblyRef(mdAssemblyRef token);
+
+#ifndef DACCESS_COMPILE
+ //
+ // Increase the size of the AssemblyRef-to-Module LookupMap to make sure the specified token
+ // can be stored. Note that nothing is actually added to the LookupMap at this point.
+ //
+ // Arguments:
+ // token - the AssemblyRef metadata token we need to accommodate
+ //
+
+ void EnsureAssemblyRefCanBeStored(mdAssemblyRef token)
+ {
+ WRAPPER_NO_CONTRACT; // THROWS/GC_NOTRIGGER/INJECT_FAULT()/MODE_ANY
+
+ _ASSERTE(TypeFromToken(token) == mdtAssemblyRef);
+ m_ManifestModuleReferencesMap.EnsureElementCanBeStored(this, RidFromToken(token));
+ }
+
+ void ForceStoreAssemblyRef(mdAssemblyRef token, Assembly *value);
+ void StoreAssemblyRef(mdAssemblyRef token, Assembly *value);
+
+ mdAssemblyRef FindAssemblyRef(Assembly *targetAssembly);
+
+ void CreateAssemblyRefByNameTable(AllocMemTracker *pamTracker);
+ bool HasReferenceByName(LPCUTF8 pModuleName);
+
+#endif // !DACCESS_COMPILE
+
+#ifdef FEATURE_PREJIT
+ void FinalizeLookupMapsPreSave(DataImage *image);
+#endif
+
+ DWORD GetAssemblyRefMax() {LIMITED_METHOD_CONTRACT; return m_ManifestModuleReferencesMap.GetSize(); }
+
+ MethodDesc *FindMethodThrowing(mdToken pMethod);
+ MethodDesc *FindMethod(mdToken pMethod);
+
+ void PopulatePropertyInfoMap();
+ HRESULT GetPropertyInfoForMethodDef(mdMethodDef md, mdProperty *ppd, LPCSTR *pName, ULONG *pSemantic);
+
+ #define NUM_PROPERTY_SET_HASHES 4
+#ifdef FEATURE_PREJIT
+ void PrecomputeMatchingProperties(DataImage *image);
+#endif
+ BOOL MightContainMatchingProperty(mdProperty tkProperty, ULONG nameHash);
+
+#endif //CLR_STANDALONE_BINDER
+
+private:
+ ArrayDPTR(BYTE) m_propertyNameSet;
+ DWORD m_nPropertyNameSet;
+
+#ifndef CLR_STANDALONE_BINDER
+
+public:
+
+ // Debugger stuff
+ BOOL NotifyDebuggerLoad(AppDomain *pDomain, DomainFile * pDomainFile, int level, BOOL attaching);
+ void NotifyDebuggerUnload(AppDomain *pDomain);
+
+ void SetDebuggerInfoBits(DebuggerAssemblyControlFlags newBits);
+
+ DebuggerAssemblyControlFlags GetDebuggerInfoBits(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return (DebuggerAssemblyControlFlags)((m_dwTransientFlags &
+ DEBUGGER_INFO_MASK_PRIV) >>
+ DEBUGGER_INFO_SHIFT_PRIV);
+ }
+
+#ifdef PROFILING_SUPPORTED
+ BOOL IsProfilerNotified() {LIMITED_METHOD_CONTRACT; return (m_dwTransientFlags & IS_PROFILER_NOTIFIED) != 0; }
+ void NotifyProfilerLoadFinished(HRESULT hr);
+#endif // PROFILING_SUPPORTED
+
+ PTR_PersistentInlineTrackingMap GetNgenInlineTrackingMap();
+
+public:
+ void NotifyEtwLoadFinished(HRESULT hr);
+
+ // Get any cached ITypeLib* for the module.
+ ITypeLib *GetTypeLib();
+ // Cache the ITypeLib*, if one is not already cached.
+ void SetTypeLib(ITypeLib *pITLB);
+ ITypeLib *GetTypeLibTCE();
+ void SetTypeLibTCE(ITypeLib *pITLB);
+
+ // Enregisters a VASig.
+ VASigCookie *GetVASigCookie(Signature vaSignature);
+
+ // DLL entry point
+ MethodDesc *GetDllEntryPoint()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pDllMain;
+ }
+ void SetDllEntryPoint(MethodDesc *pMD)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pDllMain = pMD;
+ }
+
+ BOOL CanExecuteCode();
+
+#ifdef FEATURE_MIXEDMODE
+ LPVOID GetUMThunk(LPVOID pManagedIp, PCCOR_SIGNATURE pSig, ULONG cSig);
+ LPVOID GetMUThunk(LPVOID pUnmanagedIp, PCCOR_SIGNATURE pSig, ULONG cSig);
+#endif // FEATURE_MIXEDMODE
+
+ // This data is only valid for NGEN'd modules, and for modules we're creating at NGEN time.
+ ModuleCtorInfo* GetZapModuleCtorInfo()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return &m_ModuleCtorInfo;
+ }
+
+#endif // CLR_STANDALONE_BINDER
+
+ private:
+
+#ifdef FEATURE_MIXEDMODE
+ class MUThunkHash *m_pMUThunkHash;
+#endif // FEATURE_MIXEDMODE
+
+#ifndef CLR_STANDALONE_BINDER
+
+ public:
+#ifndef DACCESS_COMPILE
+ BOOL Equals(Module *pModule) { WRAPPER_NO_CONTRACT; return m_file->Equals(pModule->m_file); }
+ BOOL Equals(PEFile *pFile) { WRAPPER_NO_CONTRACT; return m_file->Equals(pFile); }
+#endif // !DACCESS_COMPILE
+
+ LPCUTF8 GetSimpleName()
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(m_pSimpleName != NULL);
+ return m_pSimpleName;
+ }
+
+ HRESULT GetScopeName(LPCUTF8 * pszName) { WRAPPER_NO_CONTRACT; return m_file->GetScopeName(pszName); }
+ const SString &GetPath() { WRAPPER_NO_CONTRACT; return m_file->GetPath(); }
+
+#ifdef LOGGING
+ LPCWSTR GetDebugName() { WRAPPER_NO_CONTRACT; return m_file->GetDebugName(); }
+#endif
+
+ BOOL IsILOnly() { WRAPPER_NO_CONTRACT; return m_file->IsILOnly(); }
+
+#ifdef FEATURE_PREJIT
+ BOOL HasNativeImage()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return m_file->HasNativeImage();
+ }
+
+ PEImageLayout *GetNativeImage()
+ {
+ CONTRACT(PEImageLayout *)
+ {
+ PRECONDITION(m_file->HasNativeImage());
+ POSTCONDITION(CheckPointer(RETVAL));
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ CANNOT_TAKE_LOCK;
+ SO_TOLERANT;
+ }
+ CONTRACT_END;
+
+ RETURN m_file->GetLoadedNative();
+ }
+#else
+ BOOL HasNativeImage()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+ }
+
+ PEImageLayout * GetNativeImage()
+ {
+ // Should never get here
+ PRECONDITION(HasNativeImage());
+ return NULL;
+ }
+#endif // FEATURE_PREJIT
+
+ PEImageLayout * GetNativeOrReadyToRunImage();
+ PTR_CORCOMPILE_IMPORT_SECTION GetImportSections(COUNT_T *pCount);
+ PTR_CORCOMPILE_IMPORT_SECTION GetImportSectionFromIndex(COUNT_T index);
+ PTR_CORCOMPILE_IMPORT_SECTION GetImportSectionForRVA(RVA rva);
+
+ // These are overridden by reflection modules
+ virtual TADDR GetIL(RVA il);
+
+ virtual PTR_VOID GetRvaField(RVA field, BOOL fZapped);
+ CHECK CheckRvaField(RVA field);
+ CHECK CheckRvaField(RVA field, COUNT_T size);
+
+ const void *GetInternalPInvokeTarget(RVA target)
+ { WRAPPER_NO_CONTRACT; return m_file->GetInternalPInvokeTarget(target); }
+
+ BOOL HasTls();
+ BOOL IsRvaFieldTls(DWORD field);
+ UINT32 GetFieldTlsOffset(DWORD field);
+ UINT32 GetTlsIndex();
+
+ PCCOR_SIGNATURE GetSignature(RVA signature);
+ RVA GetSignatureRva(PCCOR_SIGNATURE signature);
+ CHECK CheckSignatureRva(RVA signature);
+ CHECK CheckSignature(PCCOR_SIGNATURE signature);
+ BOOL IsSigInIL(PCCOR_SIGNATURE signature);
+
+ mdToken GetEntryPointToken();
+
+ BYTE *GetProfilerBase();
+
+
+ // Active transition path management
+ //
+ // This list keeps track of module which we have active transition
+ // paths to. An active transition path is where we move from
+ // active execution in one module to another module without
+ // involving triggering the file loader to ensure that the
+ // destination module is active. We must explicitly list these
+ // relationships so the the loader can ensure that the activation
+ // constraints are a priori satisfied.
+ //
+ // Conditional vs. Unconditional describes how we deal with
+ // activation failure of a dependency. In the unconditional case,
+ // we propagate the activation failure to the depending module.
+ // In the conditional case, we activate a "trigger" in the active
+ // transition path which will cause the path to fail in particular
+ // app domains where the destination module failed to activate.
+ // (This trigger in the path typically has a perf cost even in the
+ // nonfailing case.)
+ //
+ // In either case we must try to perform the activation eagerly -
+ // even in the conditional case we have to know whether to turn on
+ // the trigger or not before we let the active transition path
+ // execute.
+
+ void AddActiveDependency(Module *pModule, BOOL unconditional);
+
+ // Active dependency iterator
+ class DependencyIterator
+ {
+ protected:
+ ArrayList::Iterator m_i;
+ COUNT_T m_index;
+ SynchronizedBitMask* m_unconditionalFlags;
+
+ friend class Module;
+
+ DependencyIterator(ArrayList *list, SynchronizedBitMask *unconditionalFlags)
+ : m_index((COUNT_T)-1),
+ m_unconditionalFlags(unconditionalFlags)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_i = list->Iterate();
+ }
+
+ public:
+ Module *GetDependency()
+ {
+ return ((FixupPointer<PTR_Module> *)m_i.GetElementPtr())->GetValue();
+ }
+
+ BOOL Next()
+ {
+ LIMITED_METHOD_CONTRACT;
+ while (m_i.Next())
+ {
+ ++m_index;
+
+#ifdef FEATURE_PREJIT
+ // When iterating all dependencies, we do not restore any tokens
+ // as we want to be lazy.
+ PTR_Module pModule = ((FixupPointer<PTR_Module> *)m_i.GetElementPtr())->GetValue();
+ if (!CORCOMPILE_IS_POINTER_TAGGED(dac_cast<TADDR>(pModule)))
+ return TRUE;
+
+#else
+ return TRUE;
+#endif
+
+ }
+ return FALSE;
+ }
+ BOOL IsUnconditional()
+ {
+ if (m_unconditionalFlags == NULL)
+ return TRUE;
+ else
+ return m_unconditionalFlags->TestBit(m_index);
+ }
+ };
+
+ DependencyIterator IterateActiveDependencies()
+ {
+ WRAPPER_NO_CONTRACT;
+ return DependencyIterator(&m_activeDependencies, &m_unconditionalDependencies);
+ }
+
+ BOOL HasActiveDependency(Module *pModule);
+ BOOL HasUnconditionalActiveDependency(Module *pModule);
+
+ // Turn triggers from this module into runtime checks
+ void EnableModuleFailureTriggers(Module *pModule, AppDomain *pDomain);
+
+#endif // !CLR_STANDALONE_BINDER
+
+#ifdef FEATURE_PREJIT
+#ifndef CLR_STANDALONE_BINDER
+
+ BOOL IsZappedCode(PCODE code);
+ BOOL IsZappedPrecode(PCODE code);
+
+ CORCOMPILE_DEBUG_ENTRY GetMethodDebugInfoOffset(MethodDesc *pMD);
+ PTR_BYTE GetNativeDebugInfo(MethodDesc * pMD);
+
+ // The methods below must be called when loading back an ngen'ed image for any fields that
+ // might be an encoded token (rather than a hard pointer) and/or need a restore operation
+ //
+ static void RestoreMethodTablePointerRaw(PTR_MethodTable * ppMT,
+ Module *pContainingModule = NULL,
+ ClassLoadLevel level = CLASS_LOADED);
+ static void RestoreTypeHandlePointerRaw(TypeHandle *pHandle,
+ Module *pContainingModule = NULL,
+ ClassLoadLevel level = CLASS_LOADED);
+ static void RestoreMethodDescPointerRaw(PTR_MethodDesc * ppMD,
+ Module *pContainingModule = NULL,
+ ClassLoadLevel level = CLASS_LOADED);
+
+ static void RestoreMethodTablePointer(FixupPointer<PTR_MethodTable> * ppMT,
+ Module *pContainingModule = NULL,
+ ClassLoadLevel level = CLASS_LOADED);
+ static void RestoreTypeHandlePointer(FixupPointer<TypeHandle> *pHandle,
+ Module *pContainingModule = NULL,
+ ClassLoadLevel level = CLASS_LOADED);
+ static void RestoreMethodDescPointer(FixupPointer<PTR_MethodDesc> * ppMD,
+ Module *pContainingModule = NULL,
+ ClassLoadLevel level = CLASS_LOADED);
+
+ static void RestoreMethodTablePointer(RelativeFixupPointer<PTR_MethodTable> * ppMT,
+ Module *pContainingModule = NULL,
+ ClassLoadLevel level = CLASS_LOADED);
+ static void RestoreTypeHandlePointer(RelativeFixupPointer<TypeHandle> *pHandle,
+ Module *pContainingModule = NULL,
+ ClassLoadLevel level = CLASS_LOADED);
+ static void RestoreMethodDescPointer(RelativeFixupPointer<PTR_MethodDesc> * ppMD,
+ Module *pContainingModule = NULL,
+ ClassLoadLevel level = CLASS_LOADED);
+
+ static void RestoreFieldDescPointer(FixupPointer<PTR_FieldDesc> * ppFD);
+
+ static void RestoreModulePointer(RelativeFixupPointer<PTR_Module> * ppModule, Module *pContainingModule);
+
+ static PTR_Module RestoreModulePointerIfLoaded(DPTR(RelativeFixupPointer<PTR_Module>) ppModule, Module *pContainingModule);
+
+ PCCOR_SIGNATURE GetEncodedSig(RVA fixupRva, Module **ppDefiningModule);
+ PCCOR_SIGNATURE GetEncodedSigIfLoaded(RVA fixupRva, Module **ppDefiningModule);
+
+ BYTE *GetNativeFixupBlobData(RVA fixup);
+
+ IMDInternalImport *GetNativeAssemblyImport(BOOL loadAllowed = TRUE);
+
+ BOOL FixupNativeEntry(CORCOMPILE_IMPORT_SECTION * pSection, SIZE_T fixupIndex, SIZE_T *fixup);
+
+ //this split exists to support new CLR Dump functionality in DAC. The
+ //template removes any indirections.
+ BOOL FixupDelayList(TADDR pFixupList);
+
+ template<typename Ptr, typename FixupNativeEntryCallback>
+ BOOL FixupDelayListAux(TADDR pFixupList,
+ Ptr pThis, FixupNativeEntryCallback pfnCB,
+ PTR_CORCOMPILE_IMPORT_SECTION pImportSections, COUNT_T nImportSections,
+ PEDecoder * pNativeImage);
+ void RunEagerFixups();
+
+ IMDInternalImport *GetNativeFixupImport();
+ Module *GetModuleFromIndex(DWORD ix);
+ Module *GetModuleFromIndexIfLoaded(DWORD ix);
+
+ // This is to rebuild stub dispatch maps to module-local values.
+ void UpdateStubDispatchTypeTable(DataImage *image);
+
+ void SetProfileData(CorProfileData * profileData);
+ CorProfileData *GetProfileData();
+
+
+ mdTypeDef LookupIbcTypeToken( Module * pExternalModule, mdToken ibcToken, SString* optionalFullNameOut = NULL);
+ mdMethodDef LookupIbcMethodToken(TypeHandle enclosingType, mdToken ibcToken, SString* optionalFullNameOut = NULL);
+
+ SString * IBCErrorNameString();
+
+ void IBCTypeLoadFailed( CORBBTPROF_BLOB_PARAM_SIG_ENTRY *pBlobSigEntry,
+ SString& exceptionMessage, SString* typeNameError);
+ void IBCMethodLoadFailed(CORBBTPROF_BLOB_PARAM_SIG_ENTRY *pBlobSigEntry,
+ SString& exceptionMessage, SString* typeNameError);
+
+ TypeHandle LoadIBCTypeHelper( CORBBTPROF_BLOB_PARAM_SIG_ENTRY *pBlobSigEntry);
+ MethodDesc * LoadIBCMethodHelper(CORBBTPROF_BLOB_PARAM_SIG_ENTRY *pBlobSigEntry);
+
+
+ void ExpandAll(DataImage *image);
+ // profileData may be different than the profileData passed in to
+ // ExpandAll() depending on more information that may now be available
+ // (after all the methods have been compiled)
+
+#else // CLR_STANDALONE_BINDER
+public:
+#endif // CLR_STANDALONE_BINDER
+
+ void Save(DataImage *image);
+ void Arrange(DataImage *image);
+ void PlaceType(DataImage *image, TypeHandle th, DWORD profilingFlags);
+ void PlaceMethod(DataImage *image, MethodDesc *pMD, DWORD profilingFlags);
+ void Fixup(DataImage *image);
+
+#ifndef CLR_STANDALONE_BINDER
+
+ bool AreAllClassesFullyLoaded();
+
+ // Precompute type-specific auxiliary information saved into NGen image
+ void PrepareTypesForSave(DataImage *image);
+
+ static void SaveMethodTable(DataImage *image,
+ MethodTable *pMT,
+ DWORD profilingFlags);
+
+ static void SaveTypeHandle(DataImage *image,
+ TypeHandle t,
+ DWORD profilingFlags);
+
+private:
+ static BOOL CanEagerBindTo(Module *targetModule, Module *pPreferredZapModule, void *address);
+public:
+
+ static PTR_Module ComputePreferredZapModule(Module * pDefinitionModule, // the module that declares the generic type or method
+ Instantiation classInst, // the type arguments to the type (if any)
+ Instantiation methodInst = Instantiation()); // the type arguments to the method (if any)
+
+ static PTR_Module ComputePreferredZapModuleHelper(Module * pDefinitionModule,
+ Instantiation classInst,
+ Instantiation methodInst);
+
+ static PTR_Module ComputePreferredZapModule(TypeKey * pKey);
+
+ // Return true if types or methods of this instantiation are *always* precompiled and saved
+ // in the preferred zap module
+ // At present, only true for <__Canon,...,__Canon> instantiation
+ static BOOL IsAlwaysSavedInPreferredZapModule(Instantiation classInst,
+ Instantiation methodInst = Instantiation());
+
+ static PTR_Module GetPreferredZapModuleForTypeHandle(TypeHandle t);
+ static PTR_Module GetPreferredZapModuleForMethodTable(MethodTable * pMT);
+ static PTR_Module GetPreferredZapModuleForMethodDesc(const MethodDesc * pMD);
+ static PTR_Module GetPreferredZapModuleForFieldDesc(FieldDesc * pFD);
+ static PTR_Module GetPreferredZapModuleForTypeDesc(PTR_TypeDesc pTD);
+
+ void PrepopulateDictionaries(DataImage *image, BOOL nonExpansive);
+
+
+ void LoadTokenTables();
+ void LoadHelperTable();
+
+ PTR_NGenLayoutInfo GetNGenLayoutInfo()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pNGenLayoutInfo;
+ }
+
+ PCODE GetPrestubJumpStub()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (!m_pNGenLayoutInfo)
+ return NULL;
+
+ return m_pNGenLayoutInfo->m_pPrestubJumpStub;
+ }
+
+#ifdef HAS_FIXUP_PRECODE
+ PCODE GetPrecodeFixupJumpStub()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (!m_pNGenLayoutInfo)
+ return NULL;
+
+ return m_pNGenLayoutInfo->m_pPrecodeFixupJumpStub;
+ }
+#endif
+
+ BOOL IsVirtualImportThunk(PCODE code)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (!m_pNGenLayoutInfo)
+ return FALSE;
+
+ return m_pNGenLayoutInfo->m_VirtualMethodThunks.IsInRange(code);
+ }
+
+ BOOL IsReadyToRun()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+#ifdef FEATURE_READYTORUN
+ return m_pReadyToRunInfo != NULL;
+#else
+ return FALSE;
+#endif
+ }
+
+#ifdef FEATURE_READYTORUN
+ PTR_ReadyToRunInfo GetReadyToRunInfo()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pReadyToRunInfo;
+ }
+#endif
+
+ ICorJitInfo::ProfileBuffer * AllocateProfileBuffer(mdToken _token, DWORD _size, DWORD _ILSize);
+ HANDLE OpenMethodProfileDataLogFile(GUID mvid);
+ static void ProfileDataAllocateTokenLists(ProfileEmitter * pEmitter, TokenProfileData* pTokenProfileData);
+ HRESULT WriteMethodProfileDataLogFile(bool cleanup);
+ static void WriteAllModuleProfileData(bool cleanup);
+
+ void CreateProfilingData();
+ void DeleteProfilingData();
+
+ PTR_ProfilingBlobTable GetProfilingBlobTable();
+
+ void LogTokenAccess(mdToken token, SectionFormat format, ULONG flagNum);
+ void LogTokenAccess(mdToken token, ULONG flagNum);
+
+ BOOL AreTypeSpecsTriaged()
+ {
+ return m_dwTransientFlags & TYPESPECS_TRIAGED;
+ }
+
+ void SetTypeSpecsTriaged()
+ {
+ FastInterlockOr(&m_dwTransientFlags, TYPESPECS_TRIAGED);
+ }
+
+ BOOL IsModuleSaved()
+ {
+ return m_dwTransientFlags & MODULE_SAVED;
+ }
+
+ void SetIsModuleSaved()
+ {
+ FastInterlockOr(&m_dwTransientFlags, MODULE_SAVED);
+ }
+
+#endif // !CLR_STANDALONE_BINDER
+#endif // FEATURE_PREJIT
+
+#ifndef CLR_STANDALONE_BINDER
+#ifdef _DEBUG
+ //Similar to the ExpandAll we use for NGen, this forces jitting of all methods in a module. This is
+ //used for debug purposes though.
+ void ExpandAll();
+#endif
+
+ BOOL IsIJWFixedUp() { return m_dwTransientFlags & IS_IJW_FIXED_UP; }
+ void SetIsIJWFixedUp();
+
+ BOOL IsBeingUnloaded() { return m_dwTransientFlags & IS_BEING_UNLOADED; }
+ void SetBeingUnloaded();
+ void StartUnload();
+
+
+public:
+ idTypeSpec LogInstantiatedType(TypeHandle typeHnd, ULONG flagNum);
+ idMethodSpec LogInstantiatedMethod(const MethodDesc * md, ULONG flagNum);
+
+ static DWORD EncodeModuleHelper(void* pModuleContext, Module *pReferencedModule);
+ static void TokenDefinitionHelper(void* pModuleContext, Module *pReferencedModule, DWORD index, mdToken* token);
+
+#endif // CLR_STANDALONE_BINDER
+
+public:
+#ifndef CLR_STANDALONE_BINDER
+ MethodTable* MapZapType(UINT32 typeID);
+
+ void SetDynamicIL(mdToken token, TADDR blobAddress, BOOL fTemporaryOverride);
+ TADDR GetDynamicIL(mdToken token, BOOL fAllowTemporary);
+
+ // store and retrieve the instrumented IL offset mapping for a particular method
+#if !defined(DACCESS_COMPILE)
+ void SetInstrumentedILOffsetMapping(mdMethodDef token, InstrumentedILOffsetMapping mapping);
+#endif // !DACCESS_COMPILE
+ InstrumentedILOffsetMapping GetInstrumentedILOffsetMapping(mdMethodDef token);
+
+public:
+ // This helper returns to offsets for the slots/bytes/handles. They return the offset in bytes from the beggining
+ // of the 1st GC pointer in the statics block for the module.
+ void GetOffsetsForRegularStaticData(
+ mdTypeDef cl,
+ BOOL bDynamic,
+ DWORD dwGCStaticHandles,
+ DWORD dwNonGCStaticBytes,
+ DWORD * pOutStaticHandleOffset,
+ DWORD * pOutNonGCStaticOffset);
+
+ void GetOffsetsForThreadStaticData(
+ mdTypeDef cl,
+ BOOL bDynamic,
+ DWORD dwGCStaticHandles,
+ DWORD dwNonGCStaticBytes,
+ DWORD * pOutStaticHandleOffset,
+ DWORD * pOutNonGCStaticOffset);
+
+
+ BOOL IsStaticStoragePrepared(mdTypeDef tkType);
+
+ DWORD GetNumGCThreadStaticHandles()
+ {
+ return m_dwMaxGCThreadStaticHandles;;
+ }
+
+ CrstBase* GetFixupCrst()
+ {
+ return &m_FixupCrst;
+ }
+
+ void AllocateRegularStaticHandles(AppDomain* pDomainMT);
+
+ void FreeModuleIndex();
+
+ DWORD GetDomainLocalModuleSize()
+ {
+ return m_dwRegularStaticsBlockSize;
+ }
+
+ DWORD GetThreadLocalModuleSize()
+ {
+ return m_dwThreadStaticsBlockSize;
+ }
+
+ DWORD AllocateDynamicEntry(MethodTable *pMT);
+
+ // We need this for the jitted shared case,
+ inline MethodTable* GetDynamicClassMT(DWORD dynamicClassID);
+
+ static BOOL IsEncodedModuleIndex(SIZE_T ModuleID)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (ModuleID&1)==1;
+ }
+
+ static SIZE_T IndexToID(ModuleIndex index)
+ {
+ LIMITED_METHOD_CONTRACT
+
+ return (index.m_dwIndex << 1) | 1;
+ }
+
+ static ModuleIndex IDToIndex(SIZE_T ModuleID)
+ {
+ LIMITED_METHOD_CONTRACT
+ SUPPORTS_DAC;
+
+ _ASSERTE(IsEncodedModuleIndex(ModuleID));
+ ModuleIndex index(ModuleID >> 1);
+
+ return index;
+ }
+
+ static ModuleIndex AllocateModuleIndex();
+ static void FreeModuleIndex(ModuleIndex index);
+
+ ModuleIndex GetModuleIndex()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_ModuleIndex;
+ }
+
+ SIZE_T GetModuleID()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return dac_cast<TADDR>(m_ModuleID);
+ }
+
+ SIZE_T * GetAddrModuleID()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (SIZE_T*) &m_ModuleID;
+ }
+#endif // !CLR_STANDALONE_BINDER
+
+ static SIZE_T GetOffsetOfModuleID()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return offsetof(Module, m_ModuleID);
+ }
+
+#ifndef CLR_STANDALONE_BINDER
+ PTR_DomainLocalModule GetDomainLocalModule(AppDomain *pDomain);
+
+#ifndef DACCESS_COMPILE
+ PTR_DomainLocalModule GetDomainLocalModule() { WRAPPER_NO_CONTRACT; return GetDomainLocalModule(NULL); };
+#endif
+
+#ifdef FEATURE_PREJIT
+ NgenStats *GetNgenStats()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pNgenStats;
+ }
+#endif // FEATURE_PREJIT
+
+ void EnumRegularStaticGCRefs (AppDomain* pAppDomain, promote_func* fn, ScanContext* sc);
+
+#endif // CLR_STANDALONE_BINDER
+
+protected:
+
+ void BuildStaticsOffsets (AllocMemTracker *pamTracker);
+ void AllocateStatics (AllocMemTracker *pamTracker);
+
+ // ModuleID is quite ugly. We should try to switch to using ModuleIndex instead
+ // where appropriate, and we should clean up code that uses ModuleID
+ PTR_DomainLocalModule m_ModuleID; // MultiDomain case: tagged (low bit 1) ModuleIndex
+ // SingleDomain case: pointer to domain local module
+
+ ModuleIndex m_ModuleIndex;
+
+ // reusing the statics area of a method table to store
+ // these for the non domain neutral case, but they're now unified
+ // it so that we don't have different code paths for this.
+ PTR_DWORD m_pRegularStaticOffsets; // Offset of statics in each class
+ PTR_DWORD m_pThreadStaticOffsets; // Offset of ThreadStatics in each class
+
+ // All types with RID <= this value have static storage allocated within the module by AllocateStatics
+ // If AllocateStatics hasn't run yet, the value is 0
+ RID m_maxTypeRidStaticsAllocated;
+
+ // @NICE: see if we can remove these fields
+ DWORD m_dwMaxGCRegularStaticHandles; // Max number of handles we can have.
+ DWORD m_dwMaxGCThreadStaticHandles;
+
+ // Size of the precomputed statics block. This includes class init bytes, gc handles and non gc statics
+ DWORD m_dwRegularStaticsBlockSize;
+ DWORD m_dwThreadStaticsBlockSize;
+
+ // For 'dynamic' statics (Reflection and generics)
+ SIZE_T m_cDynamicEntries; // Number of used entries in DynamicStaticsInfo table
+ SIZE_T m_maxDynamicEntries; // Size of table itself, including unused entries
+
+ // Info we need for dynamic statics that we can store per-module (ie, no need for it to be duplicated
+ // per appdomain)
+ struct DynamicStaticsInfo
+ {
+ MethodTable* pEnclosingMT; // Enclosing type; necessarily in this loader module
+ };
+ DynamicStaticsInfo* m_pDynamicStaticsInfo; // Table with entry for each dynamic ID
+
+
+public:
+ //-----------------------------------------------------------------------------------------
+ // If true, strings only need to be interned at a per module basis, instead of at a
+ // per appdomain basis, which is the default. Use the module accessor so you don't need
+ // to touch the metadata in the ngen case
+ //-----------------------------------------------------------------------------------------
+ BOOL IsNoStringInterning();
+
+ //-----------------------------------------------------------------------------------------
+ // Returns a BOOL to indicate if we have computed whether compiler has instructed us to
+ // wrap the non-CLS compliant exceptions or not.
+ //-----------------------------------------------------------------------------------------
+ BOOL IsRuntimeWrapExceptionsStatusComputed();
+
+ //-----------------------------------------------------------------------------------------
+ // If true, any non-CLSCompliant exceptions (i.e. ones which derive from something other
+ // than System.Exception) are wrapped in a RuntimeWrappedException instance. In other
+ // words, they become compliant
+ //-----------------------------------------------------------------------------------------
+ BOOL IsRuntimeWrapExceptions();
+
+#ifndef FEATURE_CORECLR
+ BOOL HasDefaultDllImportSearchPathsAttribute();
+
+ BOOL IsDefaultDllImportSearchPathsAttributeCached()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_dwPersistedFlags & DEFAULT_DLL_IMPORT_SEARCH_PATHS_IS_CACHED) != 0;
+ }
+
+ ULONG DefaultDllImportSearchPathsAttributeCachedValue()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_DefaultDllImportSearchPathsAttributeValue & 0xFFFFFFFD;
+ }
+
+ BOOL DllImportSearchAssemblyDirectory()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_DefaultDllImportSearchPathsAttributeValue & 0x2) != 0;
+ }
+#endif // !FEATURE_CORECLR
+
+ //-----------------------------------------------------------------------------------------
+ // True iff metadata version string is 1.* or 2.*.
+ // @TODO (post-Dev10): All places that need this information should call this function
+ // instead of parsing the version themselves.
+ //-----------------------------------------------------------------------------------------
+ BOOL IsPreV4Assembly();
+
+ //-----------------------------------------------------------------------------------------
+ // Get reliability contract info, see ConstrainedExecutionRegion.cpp for details.
+ //-----------------------------------------------------------------------------------------
+ DWORD GetReliabilityContract();
+
+ //-----------------------------------------------------------------------------------------
+ // Parse/Return NeutralResourcesLanguageAttribute if it exists (updates Module member variables at ngen time)
+ //-----------------------------------------------------------------------------------------
+ BOOL GetNeutralResourcesLanguage(LPCUTF8 * cultureName, ULONG * cultureNameLength, INT16 * fallbackLocation, BOOL cacheAttribute);
+
+protected:
+
+ Volatile<DWORD> m_dwReliabilityContract;
+
+ // initialize Crst controlling the Dynamic IL hashtables
+ void InitializeDynamicILCrst();
+
+#ifndef DACCESS_COMPILE
+public:
+
+ // Support for getting and creating information about Constrained Execution Regions rooted in this module.
+
+ // Access to CerPrepInfo, the structure used to track CERs prepared at runtime (as opposed to ngen time). GetCerPrepInfo will
+ // return the structure associated with the given method desc if it exists or NULL otherwise. CreateCerPrepInfo will get the
+ // structure if it exists or allocate and return a new struct otherwise. Creation of CerPrepInfo structures is automatically
+ // synchronized by the CerCrst (lazily allocated as needed).
+ CerPrepInfo *GetCerPrepInfo(MethodDesc *pMD);
+ CerPrepInfo *CreateCerPrepInfo(MethodDesc *pMD);
+
+#ifdef FEATURE_PREJIT
+ // Access to CerNgenRootTable which holds holds information for all the CERs rooted at a method in this module (that were
+ // discovered during an ngen).
+
+ // Add a list of MethodContextElements representing a CER to the root table keyed by the MethodDesc* of the root method. Creates
+ // or expands the root table as necessary.
+ void AddCerListToRootTable(MethodDesc *pRootMD, MethodContextElement *pList);
+
+ // Returns true if the given method is a CER root detected at ngen time.
+ bool IsNgenCerRootMethod(MethodDesc *pMD);
+
+ // Restores the CER rooted at this method (no-op if this method isn't a CER root).
+ void RestoreCer(MethodDesc *pMD);
+#endif // FEATURE_PREJIT
+
+ Crst *GetCerCrst()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pCerCrst;
+ }
+#endif // !DACCESS_COMPILE
+
+#ifdef FEATURE_CORECLR
+ void VerifyAllMethods();
+#endif //FEATURE_CORECLR
+
+ CrstBase *GetLookupTableCrst()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return &m_LookupTableCrst;
+ }
+
+private:
+ EEPtrHashTable *m_pCerPrepInfo; // Root methods prepared for Constrained Execution Regions
+ Crst *m_pCerCrst; // Mutex protecting update access to both of the above hashes
+#ifdef FEATURE_PREJIT
+ CerNgenRootTable *m_pCerNgenRootTable; // Root methods of CERs found during ngen and requiring runtime restoration
+#endif
+
+ // This struct stores the data used by the managed debugging infrastructure. If it turns out that
+ // the debugger is increasing the size of the Module class by too much, we can consider allocating
+ // this struct lazily on demand.
+ struct DebuggerSpecificData
+ {
+ // Mutex protecting update access to the DynamicILBlobTable and TemporaryILBlobTable
+ PTR_Crst m_pDynamicILCrst;
+
+ // maps tokens for EnC/dynamics/reflection emit to their corresponding IL blobs
+ // this map *always* overrides the Metadata RVA
+ PTR_DynamicILBlobTable m_pDynamicILBlobTable;
+
+ // maps tokens for to their corresponding overriden IL blobs
+ // this map conditionally overrides the Metadata RVA and the DynamicILBlobTable
+ PTR_DynamicILBlobTable m_pTemporaryILBlobTable;
+
+ // hash table storing any profiler-provided instrumented IL offset mapping
+ PTR_ILOffsetMappingTable m_pILOffsetMappingTable;
+
+ // Strict count of # of methods in this module that are JMC-enabled.
+ LONG m_cTotalJMCFuncs;
+
+ // The default JMC status for methods in this module.
+ // Individual methods can be overridden.
+ bool m_fDefaultJMCStatus;
+ };
+
+ DebuggerSpecificData m_debuggerSpecificData;
+
+ // This is a compressed read only copy of m_inlineTrackingMap, which is being saved to NGEN image.
+ PTR_PersistentInlineTrackingMap m_persistentInlineTrackingMap;
+
+
+ LPCSTR *m_AssemblyRefByNameTable; // array that maps mdAssemblyRef tokens into their simple name
+ DWORD m_AssemblyRefByNameCount; // array size
+
+#if defined(FEATURE_PREJIT)
+ // a.dll calls a method in b.dll and that method call a method in c.dll. When ngening
+ // a.dll it is possible then method in b.dll can be inlined. When that happens a.ni.dll stores
+ // an added native metadata which has information about assemblyRef to c.dll
+ // Now due to facades, this scenario is very common. This led to lots of calls to
+ // binder to get the module corresponding to assemblyRef in native metadata.
+ // Adding a lookup map to cache assembly ptr so that AssemblySpec::LoadAssembly()
+ // is not called for each fixup
+
+ PTR_Assembly *m_NativeMetadataAssemblyRefMap;
+#endif // !defined(CLR_STANDALONE_BINDER) && defined(FEATURE_PREJIT)
+
+public:
+ ModuleSecurityDescriptor* m_pModuleSecurityDescriptor;
+
+#if !defined(CLR_STANDALONE_BINDER) && !defined(DACCESS_COMPILE) && defined(FEATURE_PREJIT)
+ PTR_Assembly GetNativeMetadataAssemblyRefFromCache(DWORD rid)
+ {
+ PTR_Assembly * NativeMetadataAssemblyRefMap = VolatileLoadWithoutBarrier(&m_NativeMetadataAssemblyRefMap);
+
+ if (NativeMetadataAssemblyRefMap == NULL)
+ return NULL;
+
+ _ASSERTE(rid <= GetNativeAssemblyImport()->GetCountWithTokenKind(mdtAssemblyRef));
+ return NativeMetadataAssemblyRefMap[rid - 1];
+ }
+
+ void SetNativeMetadataAssemblyRefInCache(DWORD rid, PTR_Assembly pAssembly);
+#endif // !defined(CLR_STANDALONE_BINDER) && !defined(DACCESS_COMPILE) && defined(FEATURE_PREJIT)
+};
+
+
+#ifndef CLR_STANDALONE_BINDER
+
+//
+// A ReflectionModule is a module created by reflection
+//
+
+class ReflectionModule : public Module
+{
+ VPTR_VTABLE_CLASS(ReflectionModule, Module)
+
+ public:
+ HCEESECTION m_sdataSection;
+
+ protected:
+ ICeeGen * m_pCeeFileGen;
+private:
+ Assembly *m_pCreatingAssembly;
+ ISymUnmanagedWriter *m_pISymUnmanagedWriter;
+ RefClassWriter *m_pInMemoryWriter;
+
+
+ // Simple Critical Section used for basic leaf-lock operatons.
+ CrstExplicitInit m_CrstLeafLock;
+
+ // Buffer of Metadata storage for dynamic modules. May be NULL. This provides a reasonable way for
+ // the debugger to get metadata of dynamic modules from out of process.
+ // A dynamic module will eagerly serialize its metadata to this buffer.
+ PTR_SBuffer m_pDynamicMetadata;
+
+ // If true, does not eagerly serialize metadata in code:ReflectionModule.CaptureModuleMetaDataToMemory.
+ // This is used to allow bulk emitting types without re-emitting the metadata between each type.
+ bool m_fSuppressMetadataCapture;
+
+ // If true, then only other transient modules can depend on this module.
+ bool m_fIsTransient;
+
+ // Returns true iff metadata capturing is suppressed
+ bool IsMetadataCaptureSuppressed();
+
+ // Toggle whether CaptureModuleMetaDataToMemory should do antyhing. This can be an important perf win to
+ // allow batching up metadata capture. Use SuppressMetadataCaptureHolder to ensure they're balanced.
+ // These are not nestable.
+ void SuppressMetadataCapture();
+ void ResumeMetadataCapture();
+
+ // Glue functions for holders.
+ static void SuppressCaptureWrapper(ReflectionModule * pModule)
+ {
+ pModule->SuppressMetadataCapture();
+ }
+ static void ResumeCaptureWrapper(ReflectionModule * pModule)
+ {
+ pModule->ResumeMetadataCapture();
+ }
+
+
+ ReflectionModule(Assembly *pAssembly, mdFile token, PEFile *pFile);
+
+public:
+
+#ifdef DACCESS_COMPILE
+ // Accessor to expose m_pDynamicMetadata to debugger.
+ PTR_SBuffer GetDynamicMetadataBuffer() const;
+#endif
+
+ static ReflectionModule *Create(Assembly *pAssembly, PEFile *pFile, AllocMemTracker *pamTracker, LPCWSTR szName, BOOL fIsTransient);
+
+ void Initialize(AllocMemTracker *pamTracker, LPCWSTR szName);
+
+ void Destruct();
+#ifndef DACCESS_COMPILE
+ void ReleaseILData();
+#endif
+
+ // Overides functions to access sections
+ virtual TADDR GetIL(RVA target);
+ virtual PTR_VOID GetRvaField(RVA rva, BOOL fZapped);
+
+ Assembly* GetCreatingAssembly( void )
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pCreatingAssembly;
+ }
+
+ void SetCreatingAssembly( Assembly* assembly )
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_pCreatingAssembly = assembly;
+ }
+
+ ICeeGen *GetCeeGen() {LIMITED_METHOD_CONTRACT; return m_pCeeFileGen; }
+
+ RefClassWriter *GetClassWriter()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pInMemoryWriter;
+ }
+
+ ISymUnmanagedWriter *GetISymUnmanagedWriter()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pISymUnmanagedWriter;
+ }
+
+ // Note: we now use the same writer instance for the life of a module,
+ // so there should no longer be any need for the extra indirection.
+ ISymUnmanagedWriter **GetISymUnmanagedWriterAddr()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // We must have setup the writer before trying to get
+ // the address for it. Any calls to this before a
+ // SetISymUnmanagedWriter are very incorrect.
+ _ASSERTE(m_pISymUnmanagedWriter != NULL);
+
+ return &m_pISymUnmanagedWriter;
+ }
+
+ bool IsTransient()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_fIsTransient;
+ }
+
+ void SetIsTransient(bool fIsTransient)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_fIsTransient = fIsTransient;
+ }
+
+#ifndef DACCESS_COMPILE
+
+ typedef Wrapper<
+ ReflectionModule*,
+ ReflectionModule::SuppressCaptureWrapper,
+ ReflectionModule::ResumeCaptureWrapper> SuppressMetadataCaptureHolder;
+
+
+
+ // Eagerly serialize the metadata to a buffer that the debugger can retrieve.
+ void CaptureModuleMetaDataToMemory();
+
+ HRESULT SetISymUnmanagedWriter(ISymUnmanagedWriter *pWriter)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END
+
+
+ // Setting to NULL when we've never set a writer before should
+ // do nothing.
+ if ((pWriter == NULL) && (m_pISymUnmanagedWriter == NULL))
+ return S_OK;
+
+ if (m_pISymUnmanagedWriter != NULL)
+ {
+ // We shouldn't be trying to replace an existing writer anymore
+ _ASSERTE( pWriter == NULL );
+
+ m_pISymUnmanagedWriter->Release();
+ }
+
+ m_pISymUnmanagedWriter = pWriter;
+ return S_OK;
+ }
+#endif // !DACCESS_COMPILE
+};
+
+// Module holders
+FORCEINLINE void VoidModuleDestruct(Module *pModule)
+{
+#ifndef DACCESS_COMPILE
+ if (g_fEEStarted)
+ pModule->Destruct();
+#endif
+}
+
+typedef Wrapper<Module*, DoNothing, VoidModuleDestruct, 0> ModuleHolder;
+
+
+
+FORCEINLINE void VoidReflectionModuleDestruct(ReflectionModule *pModule)
+{
+#ifndef DACCESS_COMPILE
+ pModule->Destruct();
+#endif
+}
+
+typedef Wrapper<ReflectionModule*, DoNothing, VoidReflectionModuleDestruct, 0> ReflectionModuleHolder;
+
+
+
+//----------------------------------------------------------------------
+// VASigCookieEx (used to create a fake VASigCookie for unmanaged->managed
+// calls to vararg functions. These fakes are distinguished from the
+// real thing by having a null mdVASig.
+//----------------------------------------------------------------------
+struct VASigCookieEx : public VASigCookie
+{
+ const BYTE *m_pArgs; // pointer to first unfixed unmanaged arg
+};
+
+bool IsSingleAppDomain();
+
+#endif // CLR_STANDALONE_BINDER
+
+#endif // !CEELOAD_H_
diff --git a/src/vm/ceeload.inl b/src/vm/ceeload.inl
new file mode 100644
index 0000000000..0f81dd5af5
--- /dev/null
+++ b/src/vm/ceeload.inl
@@ -0,0 +1,658 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: CEELOAD.INL
+//
+
+//
+// CEELOAD.INL has inline methods from CEELOAD.H.
+// ===========================================================================
+
+#ifndef CEELOAD_INL_
+#define CEELOAD_INL_
+
+template<typename TYPE>
+inline
+TYPE LookupMap<TYPE>::GetValueAt(PTR_TADDR pValue, TADDR* pFlags, TADDR supportedFlags)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ TYPE value = RelativePointer<TYPE>::GetValueMaybeNullAtPtr(dac_cast<TADDR>(pValue));
+
+ if (pFlags)
+ *pFlags = dac_cast<TADDR>(value) & supportedFlags;
+
+ return (TYPE)(dac_cast<TADDR>(value) & ~supportedFlags);
+}
+
+template<typename TYPE>
+inline
+void LookupMap<TYPE>::SetValueAt(PTR_TADDR pValue, TYPE value, TADDR flags)
+{
+ WRAPPER_NO_CONTRACT;
+
+ value = (TYPE)(dac_cast<TADDR>(value) | flags);
+
+ RelativePointer<TYPE>::SetValueAtPtr(dac_cast<TADDR>(pValue), value);
+}
+
+#ifndef DACCESS_COMPILE
+//
+// Specialization of Get/SetValueAt methods to support maps of pointer-sized value types
+//
+template<>
+inline
+SIZE_T LookupMap<SIZE_T>::GetValueAt(PTR_TADDR pValue, TADDR* pFlags, TADDR supportedFlags)
+{
+ WRAPPER_NO_CONTRACT;
+
+ TADDR value = *pValue;
+
+ if (pFlags)
+ *pFlags = value & supportedFlags;
+
+ return (value & ~supportedFlags);
+}
+
+template<>
+inline
+void LookupMap<SIZE_T>::SetValueAt(PTR_TADDR pValue, SIZE_T value, TADDR flags)
+{
+ WRAPPER_NO_CONTRACT;
+ *pValue = value | flags;
+}
+#endif // DACCESS_COMPILE
+
+//
+// Specialization of GetValueAt methods for tables with cross-module references
+//
+template<>
+inline
+PTR_TypeRef LookupMap<PTR_TypeRef>::GetValueAt(PTR_TADDR pValue, TADDR* pFlags, TADDR supportedFlags)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ // Strip flags before RelativeFixupPointer dereference
+ TADDR value = *pValue;
+
+ TADDR flags = (value & supportedFlags);
+ value -= flags;
+ value = ((RelativeFixupPointer<TADDR>&)(value)).GetValueMaybeNull(dac_cast<TADDR>(pValue));
+
+ if (pFlags)
+ *pFlags = flags;
+
+ return dac_cast<PTR_TypeRef>(value);
+}
+
+template<>
+inline
+PTR_Module LookupMap<PTR_Module>::GetValueAt(PTR_TADDR pValue, TADDR* pFlags, TADDR supportedFlags)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ // Strip flags before RelativeFixupPointer dereference
+ TADDR value = *pValue;
+
+ TADDR flags = (value & supportedFlags);
+ value -= flags;
+ value = ((RelativeFixupPointer<TADDR>&)(value)).GetValueMaybeNull(dac_cast<TADDR>(pValue));
+
+ if (pFlags)
+ *pFlags = flags;
+
+ return dac_cast<PTR_Module>(value);
+}
+
+template<>
+inline
+PTR_MemberRef LookupMap<PTR_MemberRef>::GetValueAt(PTR_TADDR pValue, TADDR* pFlags, TADDR supportedFlags)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ // Strip flags before RelativeFixupPointer dereference
+ TADDR value = *pValue;
+
+ TADDR flags = (value & supportedFlags);
+ value -= flags;
+ value = ((RelativeFixupPointer<TADDR>&)(value)).GetValueMaybeNull(dac_cast<TADDR>(pValue));
+
+ if (pFlags)
+ *pFlags = flags;
+
+ return dac_cast<PTR_MemberRef>(value);
+
+}
+
+// Retrieve the value associated with a rid
+template<typename TYPE>
+TYPE LookupMap<TYPE>::GetElement(DWORD rid, TADDR* pFlags)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+#ifdef FEATURE_PREJIT
+ if (MapIsCompressed())
+{
+ // Can't access compressed entries directly: we need to go through the special helper. However we
+ // must still check the hot cache first (this would normally be done by GetElementPtr() below, but
+ // we can't integrate compressed support there since compressed entries don't have addresses, at
+ // least not byte-aligned ones).
+ PTR_TADDR pHotItemValue = FindHotItemValuePtr(rid);
+ if (pHotItemValue)
+ return GetValueAt(pHotItemValue, pFlags, supportedFlags);
+
+ TADDR value = GetValueFromCompressedMap(rid);
+
+ if (value == NULL)
+ {
+ if ((pNext == NULL) || (rid < dwCount))
+ {
+ if (pFlags)
+ *pFlags = NULL;
+ return NULL;
+ }
+
+ return dac_cast<DPTR(LookupMap)>(pNext)->GetElement(rid - dwCount, pFlags);
+ }
+
+ if (pFlags)
+ *pFlags = (value & supportedFlags);
+
+ return (TYPE)(value & ~supportedFlags);
+ }
+#endif // FEATURE_PREJIT
+
+ PTR_TADDR pElement = GetElementPtr(rid);
+ return (pElement != NULL) ? GetValueAt(pElement, pFlags, supportedFlags) : NULL;
+}
+
+// Stores an association in a map that has been previously grown to
+// the required size. Will never throw or fail.
+template<typename TYPE>
+void LookupMap<TYPE>::SetElement(DWORD rid, TYPE value, TADDR flags)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ BOOL fSuccess;
+ fSuccess = TrySetElement(rid, value, flags);
+ _ASSERTE(fSuccess);
+}
+
+
+#ifndef DACCESS_COMPILE
+
+// Try to store an association in a map. Will never throw or fail.
+template<typename TYPE>
+BOOL LookupMap<TYPE>::TrySetElement(DWORD rid, TYPE value, TADDR flags)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PTR_TADDR pElement = GetElementPtr(rid);
+ if (pElement == NULL)
+ return FALSE;
+#ifdef _DEBUG
+ // Once set, the values in LookupMap should be immutable.
+ TADDR oldFlags;
+ TYPE oldValue = GetValueAt(pElement, &oldFlags, supportedFlags);
+ _ASSERTE(oldValue == NULL || (oldValue == value && oldFlags == flags));
+#endif
+ // Avoid unnecessary writes - do not overwrite existing value
+ if (*pElement == NULL)
+ {
+ if (!EnsureWritablePagesNoThrow(pElement, sizeof (TYPE)))
+ return FALSE;
+ SetValueAt(pElement, value, flags);
+ }
+ return TRUE;
+}
+
+// Stores an association in a map. Grows the map as necessary.
+template<typename TYPE>
+void LookupMap<TYPE>::AddElement(Module * pModule, DWORD rid, TYPE value, TADDR flags)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pModule));
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(ThrowOutOfMemory(););
+ }
+ CONTRACTL_END;
+
+ PTR_TADDR pElement = GetElementPtr(rid);
+ if (pElement == NULL)
+ pElement = GrowMap(pModule, rid);
+#ifdef _DEBUG
+ // Once set, the values in LookupMap should be immutable.
+ TADDR oldFlags;
+ TYPE oldValue = GetValueAt(pElement, &oldFlags, supportedFlags);
+ _ASSERTE(oldValue == NULL || (oldValue == value && oldFlags == flags));
+#endif
+ // Avoid unnecessary writes - do not overwrite existing value
+ if (*pElement == NULL)
+ {
+ EnsureWritablePages(pElement, sizeof (TYPE));
+ SetValueAt(pElement, value, flags);
+ }
+}
+
+
+// Ensures that the map has space for this element
+template<typename TYPE>
+inline
+void LookupMap<TYPE>::EnsureElementCanBeStored(Module * pModule, DWORD rid)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pModule));
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(ThrowOutOfMemory(););
+ }
+ CONTRACTL_END;
+
+ // don't attempt to call GetElementPtr for rids inside the compressed portion of
+ // a multi-node map
+ if (MapIsCompressed() && rid < dwCount)
+ return;
+ PTR_TADDR pElement = GetElementPtr(rid);
+ if (pElement == NULL)
+ GrowMap(pModule, rid);
+
+ EnsureWritablePages(pElement, sizeof (TYPE));
+}
+
+#endif // DACCESS_COMPILE
+
+// Find the given value in the table and return its RID
+template<typename TYPE>
+DWORD LookupMap<TYPE>::Find(TYPE value, TADDR* pFlags)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Iterator it(this);
+
+ DWORD rid = 0;
+ while (it.Next())
+ {
+ TADDR flags;
+ if (it.GetElementAndFlags(&flags) == value && (!pFlags || *pFlags == flags))
+ return rid;
+ rid++;
+ }
+
+ return 0;
+}
+
+template<typename TYPE>
+inline
+LookupMap<TYPE>::Iterator::Iterator(LookupMap* map)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ m_map = map;
+ m_index = (DWORD) -1;
+#ifdef FEATURE_PREJIT
+ // Compressed map support
+ m_currentEntry = 0;
+ if (map->pTable != NULL)
+ m_tableStream = BitStreamReader(dac_cast<PTR_CBYTE>(map->pTable));
+#endif // FEATURE_PREJIT
+}
+
+template<typename TYPE>
+inline BOOL
+LookupMap<TYPE>::Iterator::Next()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (!m_map || !m_map->pTable)
+ {
+ return FALSE;
+ }
+
+ m_index++;
+ if (m_index == m_map->dwCount)
+ {
+ m_map = dac_cast<DPTR(LookupMap)>(m_map->pNext);
+ if (!m_map || !m_map->pTable)
+ {
+ return FALSE;
+ }
+ m_index = 0;
+ }
+
+#ifdef FEATURE_PREJIT
+ // For a compressed map we need to read the encoded delta for the next entry and apply it to our previous
+ // value to obtain the new current value.
+ if (m_map->MapIsCompressed())
+ m_currentEntry = m_map->GetNextCompressedEntry(&m_tableStream, m_currentEntry);
+#endif // FEATURE_PREJIT
+
+ return TRUE;
+}
+
+template<typename TYPE>
+inline TYPE
+LookupMap<TYPE>::Iterator::GetElement(TADDR* pFlags)
+{
+ SUPPORTS_DAC;
+ WRAPPER_NO_CONTRACT;
+
+#ifdef FEATURE_PREJIT
+ // The current value for a compressed map is actually a map-based RVA. A zero RVA indicates a NULL pointer
+ // but otherwise we can recover the full pointer by adding the address of the map we're iterating.
+ // Note that most LookupMaps are embedded structures (in Module) so we can't directly dac_cast<TADDR> our
+ // "this" pointer for DAC builds. Instead we have to use the slightly slower (in DAC) but more flexible
+ // PTR_HOST_INT_TO_TADDR() which copes with interior host pointers.
+ if (m_map->MapIsCompressed())
+ {
+ TADDR value = m_currentEntry ? PTR_HOST_INT_TO_TADDR(m_map) + m_currentEntry : 0;
+
+ if (pFlags)
+ *pFlags = (value & m_map->supportedFlags);
+
+ return (TYPE)(value & ~m_map->supportedFlags);
+ }
+ else
+#endif // FEATURE_PREJIT
+ return GetValueAt(m_map->GetIndexPtr(m_index), pFlags, m_map->supportedFlags);
+}
+
+inline PTR_Assembly Module::GetAssembly() const
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return m_pAssembly;
+}
+
+inline MethodDesc *Module::LookupMethodDef(mdMethodDef token)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ _ASSERTE(TypeFromToken(token) == mdtMethodDef);
+ g_IBCLogger.LogRidMapAccess( MakePair( this, token ) );
+
+ return m_MethodDefToDescMap.GetElement(RidFromToken(token));
+}
+
+inline MethodDesc *Module::LookupMemberRefAsMethod(mdMemberRef token)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE(TypeFromToken(token) == mdtMemberRef);
+ g_IBCLogger.LogRidMapAccess( MakePair( this, token ) );
+ BOOL flags = FALSE;
+ PTR_MemberRef pMemberRef = m_pMemberRefToDescHashTable->GetValue(token, &flags);
+ return flags ? dac_cast<PTR_MethodDesc>(pMemberRef) : NULL;
+}
+
+inline Assembly *Module::LookupAssemblyRef(mdAssemblyRef token)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(TypeFromToken(token) == mdtAssemblyRef);
+ PTR_Module module= m_ManifestModuleReferencesMap.GetElement(RidFromToken(token));
+ return module?module->GetAssembly():NULL;
+}
+
+#ifndef DACCESS_COMPILE
+inline void Module::ForceStoreAssemblyRef(mdAssemblyRef token, Assembly *value)
+{
+ WRAPPER_NO_CONTRACT; // THROWS/GC_NOTRIGGER/INJECT_FAULT()/MODE_ANY
+ _ASSERTE(value->GetManifestModule());
+ _ASSERTE(TypeFromToken(token) == mdtAssemblyRef);
+
+ m_ManifestModuleReferencesMap.AddElement(this, RidFromToken(token), value->GetManifestModule());
+}
+
+inline void Module::StoreAssemblyRef(mdAssemblyRef token, Assembly *value)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(value->GetManifestModule());
+ _ASSERTE(TypeFromToken(token) == mdtAssemblyRef);
+ m_ManifestModuleReferencesMap.TrySetElement(RidFromToken(token), value->GetManifestModule());
+}
+
+inline mdAssemblyRef Module::FindAssemblyRef(Assembly *targetAssembly)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return m_ManifestModuleReferencesMap.Find(targetAssembly->GetManifestModule()) | mdtAssemblyRef;
+}
+
+#endif //DACCESS_COMPILE
+
+inline BOOL Module::IsEditAndContinueCapable()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ return IsEditAndContinueCapable(m_file) && !GetAssembly()->IsDomainNeutral() && !this->IsReflection();
+}
+
+FORCEINLINE PTR_DomainLocalModule Module::GetDomainLocalModule(AppDomain *pDomain)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ if (!Module::IsEncodedModuleIndex(GetModuleID()))
+ {
+ return m_ModuleID;
+ }
+
+#if !defined(DACCESS_COMPILE)
+ if (pDomain == NULL)
+ {
+ pDomain = GetAppDomain();
+ }
+#endif // DACCESS_COMPILE
+
+ // If the module is domain neutral, then you must supply an AppDomain argument.
+ // Use GetDomainLocalModule() if you want to rely on the current AppDomain
+ _ASSERTE(pDomain != NULL);
+
+ return pDomain->GetDomainLocalBlock()->GetModuleSlot(GetModuleIndex());
+}
+
+FORCEINLINE ULONG Module::GetNumberOfActivations()
+{
+ _ASSERTE(m_Crst.OwnedByCurrentThread());
+ return m_dwNumberOfActivations;
+}
+
+FORCEINLINE ULONG Module::IncrementNumberOfActivations()
+{
+ CrstHolder lock(&m_Crst);
+ return ++m_dwNumberOfActivations;
+}
+
+
+#ifdef FEATURE_PREJIT
+
+#include "nibblestream.h"
+
+FORCEINLINE BOOL Module::FixupDelayList(TADDR pFixupList)
+{
+ WRAPPER_NO_CONTRACT;
+
+ COUNT_T nImportSections;
+ PTR_CORCOMPILE_IMPORT_SECTION pImportSections = GetImportSections(&nImportSections);
+
+ return FixupDelayListAux(pFixupList, this, &Module::FixupNativeEntry, pImportSections, nImportSections, GetNativeOrReadyToRunImage());
+}
+
+template<typename Ptr, typename FixupNativeEntryCallback>
+BOOL Module::FixupDelayListAux(TADDR pFixupList,
+ Ptr pThis, FixupNativeEntryCallback pfnCB,
+ PTR_CORCOMPILE_IMPORT_SECTION pImportSections, COUNT_T nImportSections,
+ PEDecoder * pNativeImage)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(pFixupList != NULL);
+ }
+ CONTRACTL_END;
+
+ // Fixup Encoding:
+ // ==============
+ //
+ // The fixup list is sorted in tables. Within each table, the fixups are a
+ // sorted list of INDEXes. The first INDEX in each table is encoded entirely,
+ // but the remaining INDEXes store only the delta increment from the previous INDEX.
+ // The encoding/compression is done by ZapperModule::CompressFixupList().
+ //
+ //-------------------------------------------------------------------------
+ // Here is the detailed description :
+ //
+ // The first entry stores the m_pFixupBlob table index.
+ //
+ // The next entry stores the INDEX into the particular table.
+ // An "entry" can be one or more nibbles. 3 bits of a nibble are used
+ // to store the value, and the top bit indicates if the following nibble
+ // contains rest of the value. If the top bit is not set, then this
+ // nibble is the last part of the value.
+ //
+ // If the next entry is non-0, it is another (delta-encoded relative to the
+ // previous INDEX) INDEX belonging to the same table. If the next entry is 0,
+ // it indicates that all INDEXes in this table are done.
+ //
+ // When the fixups for the previous table is done, there is entry to
+ // indicate the next table (delta-encoded relative to the previous table).
+ // If the entry is 0, then it is the end of the entire fixup list.
+ //
+ //-------------------------------------------------------------------------
+ // This is what the fixup list looks like:
+ //
+ // CorCompileTokenTable index
+ // absolute INDEX
+ // INDEX delta
+ // ...
+ // INDEX delta
+ // 0
+ // CorCompileTokenTable index delta
+ // absolute INDEX
+ // INDEX delta
+ // ...
+ // INDEX delta
+ // 0
+ // CorCompileTokenTable index delta
+ // absolute INDEX
+ // INDEX delta
+ // ...
+ // INDEX delta
+ // 0
+ // 0
+ //
+ //
+
+ NibbleReader reader(PTR_BYTE(pFixupList), (SIZE_T)-1);
+
+ //
+ // The fixups are sorted by the sections they point to.
+ // Walk the set of fixups in every sections
+ //
+
+ DWORD curTableIndex = reader.ReadEncodedU32();
+
+ while (TRUE)
+ {
+ // Get the correct section to work with. This is stored in the first two nibbles (first byte)
+
+ _ASSERTE(curTableIndex < nImportSections);
+ PTR_CORCOMPILE_IMPORT_SECTION pImportSection = pImportSections + curTableIndex;
+
+ COUNT_T cbData;
+ TADDR pData = pNativeImage->GetDirectoryData(&pImportSection->Section, &cbData);
+
+ // Now iterate thru the fixup entries
+ SIZE_T fixupIndex = reader.ReadEncodedU32(); // Accumulate the real rva from the delta encoded rva
+
+ while (TRUE)
+ {
+ CONSISTENCY_CHECK(fixupIndex * sizeof(TADDR) < cbData);
+
+ if (!(pThis->*pfnCB)(pImportSection, fixupIndex, dac_cast<PTR_SIZE_T>(pData + fixupIndex * sizeof(TADDR))))
+ return FALSE;
+
+ int delta = reader.ReadEncodedU32();
+
+ // Delta of 0 means end of entries in this table
+ if (delta == 0)
+ break;
+
+ fixupIndex += delta;
+ }
+
+ unsigned tableIndex = reader.ReadEncodedU32();
+
+ if (tableIndex == 0)
+ break;
+
+ curTableIndex = curTableIndex + tableIndex;
+
+ } // Done with all entries in this table
+
+ return TRUE;
+}
+
+#endif //FEATURE_PREJIT
+
+inline PTR_LoaderAllocator Module::GetLoaderAllocator()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetAssembly()->GetLoaderAllocator();
+}
+
+inline MethodTable* Module::GetDynamicClassMT(DWORD dynamicClassID)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_cDynamicEntries > dynamicClassID);
+ return m_pDynamicStaticsInfo[dynamicClassID].pEnclosingMT;
+}
+
+inline ReJitManager * Module::GetReJitManager()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetDomain()->GetReJitManager();
+}
+
+#endif // CEELOAD_INL_
diff --git a/src/vm/ceemain.cpp b/src/vm/ceemain.cpp
new file mode 100644
index 0000000000..cf4bc29840
--- /dev/null
+++ b/src/vm/ceemain.cpp
@@ -0,0 +1,5012 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: CEEMAIN.CPP
+// ===========================================================================
+//
+
+//
+//
+// The CLR code base uses a hyperlink feature of the HyperAddin plugin for Visual Studio. If you don't see
+// 'HyperAddin' in your Visual Studio menu bar you don't have this support. To get it type
+//
+// \\clrmain\tools\installCLRAddins
+//
+// After installing HyperAddin, your first run of VS should be as an administrator so HyperAddin can update
+// some registry information.
+//
+// At this point the code: prefixes become hyperlinks in Visual Studio and life is good. See
+// http://mswikis/clr/dev/Pages/CLR%20Team%20Commenting.aspx for more information
+//
+// There is a bug associated with Visual Studio where it does not recognise the hyperlink if there is a ::
+// preceeding it on the same line. Since C++ uses :: as a namespace separator, this can often mean that the
+// second hyperlink on a line does not work. To work around this it is better to use '.' instead of :: as
+// the namespace separators in code: hyperlinks.
+//
+// #StartHere
+// #TableOfContents The .NET Runtime Table of contents
+//
+// This comment is mean to be a nexus that allows you to jump quickly to various interesting parts of the
+// runtime.
+//
+// You can refer to product studio bugs using urls like the following
+// * http://bugcheck/bugs/DevDivBugs/2320.asp
+// * http://bugcheck/bugs/VSWhidbey/601210.asp
+//
+// Dev10 Bugs can be added with URLs like the following (for Dev10 bug 671409)
+// * http://tkbgitvstfat01:8090/wi.aspx?id=671409
+//
+//*************************************************************************************************
+//
+// * Introduction to the rutnime file:../../doc/BookOfTheRuntime/Introduction/BOTR%20Introduction.docx
+//
+// #MajorDataStructures. The major data structures associated with the runtime are
+// * code:Thread (see file:threads.h#ThreadClass) - the additional thread state the runtime needs.
+// * code:AppDomain - The managed version of a process
+// * code:Assembly - The unit of deployment and versioing (may be several DLLs but often is only one).
+// * code:Module -represents a Module (DLL or EXE).
+// * code:MethodTable - represents the 'hot' part of a type (needed during normal execution)
+// * code:EEClass - represents the 'cold' part of a type (used during compilation, interop, ...)
+// * code:MethodDesc - represents a Method
+// * code:FieldDesc - represents a Field.
+// * code:Object - represents a object on the GC heap alloated with code:Alloc
+//
+// * ECMA specifications
+// * Partition I Concepts
+// http://download.microsoft.com/download/D/C/1/DC1B219F-3B11-4A05-9DA3-2D0F98B20917/Partition%20I%20Architecture.doc
+// * Partition II Meta Data
+// http://download.microsoft.com/download/D/C/1/DC1B219F-3B11-4A05-9DA3-2D0F98B20917/Partition%20II%20Metadata.doc
+// * Parition III IL
+// http://download.microsoft.com/download/D/C/1/DC1B219F-3B11-4A05-9DA3-2D0F98B20917/Partition%20III%20CIL.doc
+//
+// * Serge Liden (worked on the CLR and owned ILASM / ILDASM for a long time wrote a good book on IL
+// * Expert .NET 2.0 IL Assembler http://www.amazon.com/Expert-NET-2-0-IL-Assembler/dp/1590596463
+//
+// * This is also a pretty nice overview of what the CLR is at
+// http://msdn2.microsoft.com/en-us/netframework/aa497266.aspx
+//
+// * code:EEStartup - This routine must be called before any interesting runtime services are used. It is
+// invoked as part of mscorwks's DllMain logic.
+// * code:#EEShutDown - Code called before we shut down the EE.
+//
+// * file:..\inc\corhdr.h#ManagedHeader - From a data structure point of view, this is the entry point into
+// the runtime. This is how all other data in the EXE are found.
+//
+// * code:ICorJitCompiler#EEToJitInterface - This is the interface from the the EE to the Just in time (JIT)
+// compiler. The interface to the JIT is relatively simple (compileMethod), however the EE provides a
+// rich set of callbacks so the JIT can get all the information it needs. See also
+// file:../../doc/BookOfTheRuntime/JIT/JIT%20Design.doc for general information on the JIT.
+//
+// * code:VirtualCallStubManager - This is the main class that implements interface dispatch
+//
+// * Precode - Every method needs entry point for other code to call even if that native code does not
+// actually exist yet. To support this methods can have code:Precode that is an entry point that exists
+// and will call the JIT compiler if the code does not yet exist.
+//
+// * NGEN - NGen stands for Native code GENeration and it is the runtime way of precomiling IL and IL
+// Meta-data into native code and runtime data structures. See
+// file:../../doc/BookOfTheRuntime/NGEN/NGENDesign.doc for an overview. At compilation time the most
+// fundamental data structures is the code:ZapNode which represents something that needs to go into the
+// NGEN image.
+//
+// * What is cooperative / preemtive mode ? file:threads.h#CooperativeMode and
+// file:threads.h#SuspendingTheRuntime
+// * Garbage collection - file:gc.cpp#Overview
+// * code:AppDomain - The managed version of a process.
+// * Calling Into the runtime (FCALLs QCalls) file:../../doc/BookOfTheRuntime/mscorlib/mscorlibDesign.doc
+// * Exceptions - file:../../doc/BookOfTheRuntime/ManagedEH\Design.doc. The most important routine to start
+// with is code:COMPlusFrameHandler which is the routine that we hook up to get called when an unanaged
+// exception happens.
+// * Constrain Execution Regions (reliability) file:../../doc/BookOfTheRuntime/CER/CERDesign.doc)
+// * Assembly Loading file:../../doc/BookOfTheRuntime/AssemblyLoader/AssemblyLoader.doc
+// * Fusion and loading files file:../../doc/BookOfTheRuntime/AssemblyLoader/FusionDesign.doc
+// * Strings file:../../doc/BookOfTheRuntime/BCL/SystemStringDesign.doc
+// * Profiling file:../../doc/BookOfTheRuntime/DiagnosticServices/ProfilingAPIDesign.doc
+// * Remoting file:../../doc/BookOfTheRuntime/EERemotingSupport/RemotingDesign.doc
+// * Managed Debug Assitants file:../../doc/BookOfTheRuntime/MDA/MDADesign.doc
+// * FCALLS QCALLS (calling into the runtime from managed code)
+// file:../../doc/BookOfTheRuntime/Mscorlib/MscorlibDesign.doc
+// * Reflection file:../../doc/BookOfTheRuntime/Reflection/ReflectionDesign.doc
+// * Security
+// * file:../../doc/BookOfTheRuntime/RuntimeSecurity/RuntimeSecurityDesign.doc
+// * file:../../doc/BookOfTheRuntime/LoadtimeSecurity/DeclarativeSecurity-Design.doc
+// * file:../../doc/BookOfTheRuntime/LoadtimeSecurity/StrongName.doc
+// * file:../../doc/BookOfTheRuntime/RuntimeSecurity/ClickOnce Activation.doc
+// * file:../../doc/BookOfTheRuntime/RuntimeSecurity/Cryptography.doc
+// * file:../../doc/BookOfTheRuntime/RuntimeSecurity/DemandEvalDesign.doc
+// * Event Tracing for Windows
+// * file:../inc/eventtrace.h#EventTracing -
+// * This is the main file dealing with event tracing in CLR
+// * The implementation of this class is available in file:eventtrace.cpp
+// * file:../inc/eventtrace.h#CEtwTracer - This is the main class dealing with event tracing in CLR.
+// Follow the link for more information on how this feature has been implemented
+// * http://mswikis/clr/dev/Pages/CLR%20ETW%20Events%20Wiki.aspx - Follow the link for more information on how to
+// use this instrumentation feature.
+
+// ----------------------------------------------------------------------------------------------------
+// Features in the runtime that have been given hyperlinks
+//
+// * code:Nullable#NullableFeature - the Nullable<T> type has special runtime semantics associated with
+// boxing this describes this feature.
+
+#include "common.h"
+
+#include "vars.hpp"
+#include "log.h"
+#include "ceemain.h"
+#include "clsload.hpp"
+#include "object.h"
+#include "hash.h"
+#include "ecall.h"
+#include "ceemain.h"
+#include "dllimport.h"
+#include "syncblk.h"
+#include "eeconfig.h"
+#include "stublink.h"
+#include "handletable.h"
+#include "method.hpp"
+#include "codeman.h"
+#include "frames.h"
+#include "threads.h"
+#include "stackwalk.h"
+#include "gc.h"
+#include "interoputil.h"
+#include "security.h"
+#include "fieldmarshaler.h"
+#include "dbginterface.h"
+#include "eedbginterfaceimpl.h"
+#include "debugdebugger.h"
+#include "cordbpriv.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "comdelegate.h"
+#include "appdomain.hpp"
+#include "perfcounters.h"
+#include "rwlock.h"
+#ifdef FEATURE_IPCMAN
+#include "ipcmanagerinterface.h"
+#endif // FEATURE_IPCMAN
+#include "eventtrace.h"
+#include "corhost.h"
+#include "binder.h"
+#include "olevariant.h"
+#include "comcallablewrapper.h"
+#include "apithreadstress.h"
+#include "ipcfunccall.h"
+#include "perflog.h"
+#include "../dlls/mscorrc/resource.h"
+#ifdef FEATURE_LEGACYSURFACE
+#include "nlsinfo.h"
+#endif
+#include "util.hpp"
+#include "shimload.h"
+#include "comthreadpool.h"
+#include "stackprobe.h"
+#include "posterror.h"
+#include "virtualcallstub.h"
+#include "strongnameinternal.h"
+#include "syncclean.hpp"
+#include "typeparse.h"
+#include "debuginfostore.h"
+#include "mdaassistants.h"
+#include "eemessagebox.h"
+#include "finalizerthread.h"
+#include "threadsuspend.h"
+
+#ifndef FEATURE_PAL
+#include "dwreport.h"
+#endif // !FEATURE_PAL
+
+#include "stringarraylist.h"
+#include "stubhelpers.h"
+#include "perfdefaults.h"
+
+#ifdef FEATURE_STACK_SAMPLING
+#include "stacksampler.h"
+#endif
+
+#include <shlwapi.h>
+
+#include "bbsweep.h"
+
+#ifndef FEATURE_CORECLR
+#include <metahost.h>
+#include "assemblyusagelogmanager.h"
+#endif
+
+#ifdef FEATURE_COMINTEROP
+#include "runtimecallablewrapper.h"
+#include "notifyexternals.h"
+#include "mngstdinterfaces.h"
+#include "rcwwalker.h"
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+#include "olecontexthelpers.h"
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+#ifdef PROFILING_SUPPORTED
+#include "proftoeeinterfaceimpl.h"
+#include "profilinghelper.h"
+#endif // PROFILING_SUPPORTED
+
+#include "newapis.h"
+
+#ifdef FEATURE_COMINTEROP
+#include "synchronizationcontextnative.h" // For SynchronizationContextNative::Cleanup
+#endif
+
+#ifdef FEATURE_INTERPRETER
+#include "interpreter.h"
+#endif // FEATURE_INTERPRETER
+
+#ifndef FEATURE_FUSION
+#include "../binder/inc/coreclrbindercommon.h"
+#endif // !FEATURE_FUSION
+
+#ifdef FEATURE_UEF_CHAINMANAGER
+// This is required to register our UEF callback with the UEF chain manager
+#include <mscoruefwrapper.h>
+#endif // FEATURE_UEF_CHAINMANAGER
+
+#ifdef FEATURE_IPCMAN
+static HRESULT InitializeIPCManager(void);
+static void PublishIPCManager(void);
+static void TerminateIPCManager(void);
+#endif // FEATURE_IPCMAN
+
+static int GetThreadUICultureId(__out LocaleIDValue* pLocale); // TODO: This shouldn't use the LCID. We should rely on name instead
+
+static HRESULT GetThreadUICultureNames(__inout StringArrayList* pCultureNames);
+
+HRESULT EEStartup(COINITIEE fFlags);
+#ifdef FEATURE_FUSION
+extern "C" HRESULT STDMETHODCALLTYPE InitializeFusion();
+#endif
+
+#ifdef FEATURE_MIXEDMODE
+HRESULT PrepareExecuteDLLForThunk(HINSTANCE hInst,
+ DWORD dwReason,
+ LPVOID lpReserved);
+#endif // FEATURE_MIXEDMODE
+#ifndef FEATURE_CORECLR
+BOOL STDMETHODCALLTYPE ExecuteDLL(HINSTANCE hInst,
+ DWORD dwReason,
+ LPVOID lpReserved,
+ BOOL fFromThunk);
+#endif // !FEATURE_CORECLR
+
+BOOL STDMETHODCALLTYPE ExecuteEXE(HMODULE hMod);
+BOOL STDMETHODCALLTYPE ExecuteEXE(__in LPWSTR pImageNameIn);
+
+static void InitializeGarbageCollector();
+
+#ifdef DEBUGGING_SUPPORTED
+static void InitializeDebugger(void);
+static void TerminateDebugger(void);
+extern "C" HRESULT __cdecl CorDBGetInterface(DebugInterface** rcInterface);
+#endif // DEBUGGING_SUPPORTED
+
+
+#if !defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE)
+void* __stdcall GetCLRFunction(LPCSTR FunctionName);
+
+// Pointer to the activated CLR interface provided by the shim.
+ICLRRuntimeInfo *g_pCLRRuntime = NULL;
+
+#endif // !FEATURE_CORECLR && !CROSSGEN_COMPILE
+
+extern "C" IExecutionEngine* __stdcall IEE();
+
+// Remember how the last startup of EE went.
+HRESULT g_EEStartupStatus = S_OK;
+
+// Flag indicating if the EE has been started. This is set prior to initializing the default AppDomain, and so does not indicate that
+// the EE is fully able to execute arbitrary managed code. To ensure the EE is fully started, call EnsureEEStarted rather than just
+// checking this flag.
+Volatile<BOOL> g_fEEStarted = FALSE;
+
+// Flag indicating if the EE should be suspended on shutdown.
+BOOL g_fSuspendOnShutdown = FALSE;
+
+// Flag indicating if the finalizer thread should be suspended on shutdown.
+BOOL g_fSuspendFinalizerOnShutdown = FALSE;
+
+// Flag indicating if the EE was started up by an managed exe.
+BOOL g_fEEManagedEXEStartup = FALSE;
+
+// Flag indicating if the EE was started up by an IJW dll.
+BOOL g_fEEIJWStartup = FALSE;
+
+// Flag indicating if the EE was started up by COM.
+extern BOOL g_fEEComActivatedStartup;
+
+// flag indicating that EE was not started up by IJW, Hosted, COM or my managed exe.
+extern BOOL g_fEEOtherStartup;
+
+// The OS thread ID of the thread currently performing EE startup, or 0 if there is no such thread.
+DWORD g_dwStartupThreadId = 0;
+
+// Event to synchronize EE shutdown.
+static CLREvent * g_pEEShutDownEvent;
+
+static DangerousNonHostedSpinLock g_EEStartupLock;
+
+HRESULT InitializeEE(COINITIEE flags)
+{
+ WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_EVENT_TRACE
+ if(!(g_fEEComActivatedStartup || g_fEEManagedEXEStartup || g_fEEIJWStartup))
+ g_fEEOtherStartup = TRUE;
+#endif // FEATURE_EVENT_TRACE
+ return EnsureEEStarted(flags);
+}
+
+// ---------------------------------------------------------------------------
+// %%Function: EnsureEEStarted()
+//
+// Description: Ensure the CLR is started.
+// ---------------------------------------------------------------------------
+HRESULT EnsureEEStarted(COINITIEE flags)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ if (g_fEEShutDown)
+ return E_FAIL;
+
+ HRESULT hr = E_FAIL;
+
+ // On non x86 platforms, when we load mscorlib.dll during EEStartup, we will
+ // re-enter _CorDllMain with a DLL_PROCESS_ATTACH for mscorlib.dll. We are
+ // far enough in startup that this is allowed, however we don't want to
+ // re-start the startup code so we need to check to see if startup has
+ // been initiated or completed before we call EEStartup.
+ //
+ // We do however want to make sure other threads block until the EE is started,
+ // which we will do further down.
+ if (!g_fEEStarted)
+ {
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+#if defined(FEATURE_CORECLR) && defined(FEATURE_APPX) && !defined(CROSSGEN_COMPILE)
+ STARTUP_FLAGS startupFlags = CorHost2::GetStartupFlags();
+ // On CoreCLR, the host is in charge of determining whether the process is AppX or not.
+ AppX::SetIsAppXProcess(!!(startupFlags & STARTUP_APPX_APP_MODEL));
+#endif
+
+#ifndef FEATURE_PAL
+ // The sooner we do this, the sooner we avoid probing registry entries.
+ // (Perf Optimization for VSWhidbey:113373.)
+ REGUTIL::InitOptionalConfigCache();
+#endif
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostTaskManager *pHostTaskManager = CorHost2::GetHostTaskManager();
+ if (pHostTaskManager)
+ {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ pHostTaskManager->BeginThreadAffinity();
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ BOOL bStarted=FALSE;
+
+ {
+ DangerousNonHostedSpinLockHolder lockHolder(&g_EEStartupLock);
+
+ // Now that we've acquired the lock, check again to make sure we aren't in
+ // the process of starting the CLR or that it hasn't already been fully started.
+ // At this point, if startup has been inited we don't have anything more to do.
+ // And if EEStartup already failed before, we don't do it again.
+ if (!g_fEEStarted && !g_fEEInit && SUCCEEDED (g_EEStartupStatus))
+ {
+ g_dwStartupThreadId = GetCurrentThreadId();
+
+ EEStartup(flags);
+ bStarted=g_fEEStarted;
+ hr = g_EEStartupStatus;
+
+ g_dwStartupThreadId = 0;
+ }
+ else
+ {
+ hr = g_EEStartupStatus;
+ if (SUCCEEDED(g_EEStartupStatus))
+ {
+ hr = S_FALSE;
+ }
+ }
+ }
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (pHostTaskManager)
+ {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ pHostTaskManager->EndThreadAffinity();
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+#ifdef FEATURE_TESTHOOKS
+ if(bStarted)
+ TESTHOOKCALL(RuntimeStarted(RTS_INITIALIZED));
+#endif
+ END_ENTRYPOINT_NOTHROW;
+ }
+ else
+ {
+ //
+ // g_fEEStarted is TRUE, but startup may not be complete since we initialize the default AppDomain
+ // *after* setting that flag. g_fEEStarted is set inside of g_EEStartupLock, and that lock is
+ // not released until the EE is really started - so we can quickly check whether the EE is definitely
+ // started by checking if that lock is currently held. If it is not, then we know the other thread
+ // (that is actually doing the startup) has finished startup. If it is currently held, then we
+ // need to wait for the other thread to release it, which we do by simply acquiring the lock ourselves.
+ //
+ // We do not want to do this blocking if we are the thread currently performing EE startup. So we check
+ // that first.
+ //
+ // Note that the call to IsHeld here is an "acquire" barrier, as is acquiring the lock. And the release of
+ // the lock by the other thread is a "release" barrier, due to the volatile semantics in the lock's
+ // implementation. This assures us that once we observe the lock having been released, we are guaranteed
+ // to observe a fully-initialized EE.
+ //
+ // A note about thread affinity here: we're using the OS thread ID of the current thread without
+ // asking the host to pin us to this thread, as we did above. We can get away with this, because we are
+ // only interested in a particular thread ID (that of the "startup" thread) and *that* particular thread
+ // is already affinitized by the code above. So if we get that particular OS thread ID, we know for sure
+ // we are really the startup thread.
+ //
+ if (g_EEStartupLock.IsHeld() && g_dwStartupThreadId != GetCurrentThreadId())
+ {
+ DangerousNonHostedSpinLockHolder lockHolder(&g_EEStartupLock);
+ }
+
+ hr = g_EEStartupStatus;
+ if (SUCCEEDED(g_EEStartupStatus))
+ {
+ hr = S_FALSE;
+ }
+ }
+
+ return hr;
+}
+
+
+#ifndef CROSSGEN_COMPILE
+
+// This is our Ctrl-C, Ctrl-Break, etc. handler.
+static BOOL WINAPI DbgCtrlCHandler(DWORD dwCtrlType)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+#if defined(DEBUGGING_SUPPORTED)
+ // Note that if a managed-debugger is attached, it's actually attached with the native
+ // debugging pipeline and it will get a control-c notifications via native debug events.
+ // However, if we let the native debugging pipeline handle the event and send the notification
+ // to the debugger, then we break pre-V4 behaviour because we intercept handlers registered
+ // in-process. See Dev10 Bug 846455 for more information.
+ if (CORDebuggerAttached() &&
+ (dwCtrlType == CTRL_C_EVENT || dwCtrlType == CTRL_BREAK_EVENT))
+ {
+ return g_pDebugInterface->SendCtrlCToDebugger(dwCtrlType);
+ }
+ else
+#endif // DEBUGGING_SUPPORTED
+ {
+ g_fInControlC = true; // only for weakening assertions in checked build.
+ return FALSE; // keep looking for a real handler.
+ }
+}
+
+// A host can specify that it only wants one version of hosting interface to be used.
+BOOL g_singleVersionHosting;
+
+#ifndef FEATURE_CORECLR
+HRESULT STDMETHODCALLTYPE
+SetRuntimeInfo(
+ IUnknown * pUnk,
+ STARTUP_FLAGS dwStartupFlags,
+ LPCWSTR pwzHostConfig,
+ const CoreClrCallbacks ** ppClrCallbacks)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ ENTRY_POINT;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(CheckPointer(pwzHostConfig, NULL_OK));
+ } CONTRACTL_END;
+
+ ICLRRuntimeInfo *pRuntime;
+ HRESULT hr;
+
+ IfFailGo(pUnk->QueryInterface(IID_ICLRRuntimeInfo, (LPVOID *)&pRuntime));
+
+ IfFailGo(CorHost2::SetFlagsAndHostConfig(dwStartupFlags, pwzHostConfig, FALSE));
+
+ if (InterlockedCompareExchangeT(&g_pCLRRuntime, pRuntime, NULL) != NULL)
+ {
+ // already set, release this one
+ pRuntime->Release();
+ }
+ *ppClrCallbacks = &GetClrCallbacks();
+
+ErrExit:
+ return hr;
+}
+#endif // !FEATURE_CORECLR
+
+#ifndef FEATURE_CORECLR
+HRESULT InitializeHostConfigFile()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(return E_OUTOFMEMORY);
+ } CONTRACTL_END;
+
+ g_pszHostConfigFile = CorHost2::GetHostConfigFile();
+ g_dwHostConfigFile = (g_pszHostConfigFile == NULL ? 0 : wcslen(g_pszHostConfigFile));
+
+ return S_OK;
+}
+#endif // !FEATURE_CORECLR
+
+void InitializeStartupFlags()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ STARTUP_FLAGS flags = CorHost2::GetStartupFlags();
+
+#ifndef FEATURE_CORECLR
+ // If we are running under a requested performance default mode, honor any changes to startup flags
+ // In the future, we could make this conditional on the host telling us which subset of flags is
+ // valid to override. See file:PerfDefaults.h
+ flags = PerformanceDefaults::GetModifiedStartupFlags(flags);
+#endif // !FEATURE_CORECLR
+
+ if (flags & STARTUP_CONCURRENT_GC)
+ g_IGCconcurrent = 1;
+ else
+ g_IGCconcurrent = 0;
+
+#ifndef FEATURE_CORECLR // TODO: We can remove this. Retaining it now just to be safe
+ if (flags & STARTUP_SINGLE_VERSION_HOSTING_INTERFACE)
+ {
+ g_singleVersionHosting = TRUE;
+ }
+
+#ifndef FEATURE_CORECLR
+ g_pConfig->SetDisableCommitThreadStack(!CLRHosted() || (flags & STARTUP_DISABLE_COMMITTHREADSTACK));
+#else
+ g_pConfig->SetDisableCommitThreadStack(true);
+#endif
+
+ if(flags & STARTUP_LEGACY_IMPERSONATION)
+ g_pConfig->SetLegacyImpersonationPolicy();
+
+ if(flags & STARTUP_ALWAYSFLOW_IMPERSONATION)
+ g_pConfig->SetAlwaysFlowImpersonationPolicy();
+
+ if(flags & STARTUP_HOARD_GC_VM)
+ g_IGCHoardVM = 1;
+ else
+ g_IGCHoardVM = 0;
+
+#ifdef GCTRIMCOMMIT
+ if (flags & STARTUP_TRIM_GC_COMMIT)
+ g_IGCTrimCommit = 1;
+ else
+ g_IGCTrimCommit = 0;
+#endif
+
+ if(flags & STARTUP_ETW)
+ g_fEnableETW = TRUE;
+
+ if(flags & STARTUP_ARM)
+ g_fEnableARM = TRUE;
+#endif // !FEATURE_CORECLR
+
+ GCHeap::InitializeHeapType((flags & STARTUP_SERVER_GC) != 0);
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ g_dwGlobalSharePolicy = (flags&STARTUP_LOADER_OPTIMIZATION_MASK)>>1;
+#endif
+}
+#endif // CROSSGEN_COMPILE
+
+
+#ifdef FEATURE_PREJIT
+// BBSweepStartFunction is the first function to execute in the BBT sweeper thread.
+// It calls WatchForSweepEvent where we wait until a sweep occurs.
+DWORD __stdcall BBSweepStartFunction(LPVOID lpArgs)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ class CLRBBSweepCallback : public ICLRBBSweepCallback
+ {
+ virtual HRESULT WriteProfileData()
+ {
+ BEGIN_ENTRYPOINT_NOTHROW
+ WRAPPER_NO_CONTRACT;
+ Module::WriteAllModuleProfileData(false);
+ END_ENTRYPOINT_NOTHROW;
+ return S_OK;
+ }
+ } clrCallback;
+
+ EX_TRY
+ {
+ g_BBSweep.WatchForSweepEvents(&clrCallback);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(RethrowTerminalExceptions)
+
+ return 0;
+}
+#endif // FEATURE_PREJIT
+
+
+//-----------------------------------------------------------------------------
+
+#ifndef FEATURE_PAL
+// Defined by CRT
+extern "C"
+{
+ extern DWORD_PTR __security_cookie;
+ extern void __fastcall __security_check_cookie(DWORD_PTR cookie);
+}
+#endif // !FEATURE_PAL
+
+void InitGSCookie()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ GSCookie * pGSCookiePtr = GetProcessGSCookiePtr();
+
+ DWORD oldProtection;
+ if(!ClrVirtualProtect((LPVOID)pGSCookiePtr, sizeof(GSCookie), PAGE_EXECUTE_READWRITE, &oldProtection))
+ {
+ ThrowLastError();
+ }
+
+#ifndef FEATURE_PAL
+ // The GSCookie cannot be in a writeable page
+ assert(((oldProtection & (PAGE_READWRITE|PAGE_WRITECOPY|PAGE_EXECUTE_READWRITE|
+ PAGE_EXECUTE_WRITECOPY|PAGE_WRITECOMBINE)) == 0));
+
+ // Forces VC cookie to be initialized.
+ void (__fastcall *pf)(DWORD_PTR cookie) = &__security_check_cookie;
+ pf = NULL;
+
+ GSCookie val = (GSCookie)(__security_cookie ^ GetTickCount());
+#else // !FEATURE_PAL
+ // REVIEW: Need something better for PAL...
+ GSCookie val = (GSCookie)GetTickCount();
+#endif // !FEATURE_PAL
+
+#ifdef _DEBUG
+ // In _DEBUG, always use the same value to make it easier to search for the cookie
+ val = (GSCookie) WIN64_ONLY(0x9ABCDEF012345678) NOT_WIN64(0x12345678);
+#endif
+
+ // To test if it is initialized. Also for ICorMethodInfo::getGSCookie()
+ if (val == 0)
+ val ++;
+ *pGSCookiePtr = val;
+
+ if(!ClrVirtualProtect((LPVOID)pGSCookiePtr, sizeof(GSCookie), oldProtection, &oldProtection))
+ {
+ ThrowLastError();
+ }
+}
+
+#if !defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE)
+void InitAssemblyUsageLogManager()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ g_pIAssemblyUsageLogGac = NULL;
+
+ AssemblyUsageLogManager::Config config;
+
+ config.wszLogDir = NULL;
+ config.cLogBufferSize = 32768;
+#ifdef FEATURE_APPX
+ config.uiLogRefreshInterval = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_NGenAssemblyUsageLogRefreshInterval);
+#endif
+
+ NewArrayHolder<WCHAR> szCustomLogDir(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_NGenAssemblyUsageLog));
+ config.wszLogDir = szCustomLogDir;
+
+ AssemblyUsageLogManager::Init(&config);
+
+ // Once the logger is initialized, create a log object for logging GAC loads.
+ AssemblyUsageLogManager::GetUsageLogForContext(W("fusion"), W("GAC"), &g_pIAssemblyUsageLogGac);
+}
+#endif
+
+// ---------------------------------------------------------------------------
+// %%Function: EEStartupHelper
+//
+// Parameters:
+// fFlags - Initialization flags for the engine. See the
+// EEStartupFlags enumerator for valid values.
+//
+// Returns:
+// S_OK - On success
+//
+// Description:
+// Reserved to initialize the EE runtime engine explicitly.
+// ---------------------------------------------------------------------------
+
+#ifndef IfFailGotoLog
+#define IfFailGotoLog(EXPR, LABEL) \
+do { \
+ hr = (EXPR);\
+ if(FAILED(hr)) { \
+ STRESS_LOG2(LF_STARTUP, LL_ALWAYS, "%s failed with code %x", #EXPR, hr);\
+ goto LABEL; \
+ } \
+ else \
+ STRESS_LOG1(LF_STARTUP, LL_ALWAYS, "%s completed", #EXPR);\
+} while (0)
+#endif
+
+#ifndef IfFailGoLog
+#define IfFailGoLog(EXPR) IfFailGotoLog(EXPR, ErrExit)
+#endif
+
+void jitOnDllProcessAttach();
+
+
+void EEStartupHelper(COINITIEE fFlags)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+#ifdef ENABLE_CONTRACTS_IMPL
+ {
+ extern void ContractRegressionCheck();
+ ContractRegressionCheck();
+ }
+#endif
+
+ HRESULT hr = S_OK;
+ static ConfigDWORD breakOnEELoad;
+ EX_TRY
+ {
+ g_fEEInit = true;
+
+#ifndef CROSSGEN_COMPILE
+
+#ifdef _DEBUG
+ DisableGlobalAllocStore();
+#endif //_DEBUG
+
+ ::SetConsoleCtrlHandler(DbgCtrlCHandler, TRUE/*add*/);
+
+#endif // CROSSGEN_COMPILE
+
+ // SString initialization
+ // This needs to be done before config because config uses SString::Empty()
+ SString::Startup();
+
+ // Initialize EEConfig
+ if (!g_pConfig)
+ {
+ IfFailGo(EEConfig::Setup());
+#if !defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE)
+ IfFailGo(InitializeHostConfigFile());
+ IfFailGo(g_pConfig->SetupConfiguration());
+#endif // !FEATURE_CORECLR && !CROSSGEN_COMPILE
+ }
+
+#ifdef CROSSGEN_COMPILE
+//ARM64TODO: Enable when jit is brought in
+ #if defined(_TARGET_ARM64_)
+ //_ASSERTE(!"ARM64:NYI");
+
+ #else
+ jitOnDllProcessAttach();
+ #endif // defined(_TARGET_ARM64_)
+#endif
+
+#ifndef CROSSGEN_COMPILE
+ // Initialize Numa and CPU group information
+ // Need to do this as early as possible. Used by creating object handle
+ // table inside Ref_Initialization() before GC is initialized.
+ NumaNodeInfo::InitNumaNodeInfo();
+ CPUGroupInfo::EnsureInitialized();
+
+#ifndef FEATURE_CORECLR
+ // Check in EEConfig whether a workload-specific set of performance defaults have been requested
+ // This needs to be done before InitializeStartupFlags in case one is to be overridden
+ PerformanceDefaults::InitializeForScenario(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_PerformanceScenario));
+#endif
+
+ // Initialize global configuration settings based on startup flags
+ // This needs to be done before the EE has started
+ InitializeStartupFlags();
+
+ InitThreadManager();
+ STRESS_LOG0(LF_STARTUP, LL_ALWAYS, "Returned successfully from InitThreadManager");
+
+#ifdef FEATURE_EVENT_TRACE
+ // Initialize event tracing early so we can trace CLR startup time events.
+ InitializeEventTracing();
+
+ // Fire the EE startup ETW event
+ ETWFireEvent(EEStartupStart_V1);
+
+ // Fire the runtime information ETW event
+ ETW::InfoLog::RuntimeInformation(ETW::InfoLog::InfoStructs::Normal);
+#endif // FEATURE_EVENT_TRACE
+
+#ifdef FEATURE_IPCMAN
+ // Give PerfMon a chance to hook up to us
+ // Do this both *before* and *after* ipcman init so corperfmonext.dll
+ // has a chance to release stale private blocks that IPCMan could collide with.
+ // do this early to maximize window between perfmon refresh and ipc block creation.
+ IPCFuncCallSource::DoThreadSafeCall();
+#endif // FEATURE_IPCMAN
+
+ InitGSCookie();
+
+ Frame::Init();
+
+#ifdef FEATURE_TESTHOOKS
+ IfFailGo(CLRTestHookManager::CheckConfig());
+#endif
+
+#endif // CROSSGEN_COMPILE
+
+#ifndef FEATURE_CORECLR
+ // Ensure initialization of Apphacks environment variables
+ GetGlobalCompatibilityFlags();
+#endif // !FEATURE_CORECLR
+
+#ifdef STRESS_LOG
+ if (REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_StressLog, g_pConfig->StressLog ()) != 0) {
+ unsigned facilities = REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_LogFacility, LF_ALL);
+ unsigned level = REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::EXTERNAL_LogLevel, LL_INFO1000);
+ unsigned bytesPerThread = REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_StressLogSize, STRESSLOG_CHUNK_SIZE * 4);
+ unsigned totalBytes = REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_TotalStressLogSize, STRESSLOG_CHUNK_SIZE * 1024);
+ StressLog::Initialize(facilities, level, bytesPerThread, totalBytes, GetModuleInst());
+ g_pStressLog = &StressLog::theLog;
+ }
+#endif
+
+#ifdef LOGGING
+ InitializeLogging();
+#endif
+
+#ifdef ENABLE_PERF_LOG
+ PerfLog::PerfLogInitialize();
+#endif //ENABLE_PERF_LOG
+
+ STRESS_LOG0(LF_STARTUP, LL_ALWAYS, "===================EEStartup Starting===================");
+
+#ifndef CROSSGEN_COMPILE
+#ifndef FEATURE_PAL
+ IfFailGoLog(EnsureRtlFunctions());
+#endif // !FEATURE_PAL
+ InitEventStore();
+#endif
+
+ // Fusion
+#ifdef FEATURE_FUSION
+ {
+ ETWOnStartup (FusionInit_V1, FusionInitEnd_V1);
+ IfFailGoLog(InitializeFusion());
+ }
+#else // FEATURE_FUSION
+ // Initialize the general Assembly Binder infrastructure
+ IfFailGoLog(CCoreCLRBinderHelper::Init());
+#endif // FEATURE_FUSION
+
+ if (g_pConfig != NULL)
+ {
+ IfFailGoLog(g_pConfig->sync());
+ }
+
+ if (breakOnEELoad.val(CLRConfig::UNSUPPORTED_BreakOnEELoad) == 1)
+ {
+#ifdef _DEBUG
+ _ASSERTE(!"Start loading EE!");
+#else
+ DebugBreak();
+#endif
+ }
+
+#ifdef ENABLE_STARTUP_DELAY
+ PREFIX_ASSUME(NULL != g_pConfig);
+ if (g_pConfig->StartupDelayMS())
+ {
+ ClrSleepEx(g_pConfig->StartupDelayMS(), FALSE);
+ }
+#endif
+
+ // Monitors, Crsts, and SimpleRWLocks all use the same spin heuristics
+ // Cache the (potentially user-overridden) values now so they are accessible from asm routines
+ InitializeSpinConstants();
+
+#ifndef CROSSGEN_COMPILE
+
+#if defined(STRESS_HEAP) && defined(_DEBUG) && !defined(FEATURE_CORECLR)
+ // TODO: is this still an issue?
+ // There is a race that causes random AVs on dual proc boxes
+ // that we suspect is due to memory coherancy problems (see Whidbey bug 2360)
+ // Avoid the issue by making the box effectively single proc.
+ if (GCStress<cfg_instr>::IsEnabled() &&
+ g_SystemInfo.dwNumberOfProcessors > 1)
+ SetProcessAffinityMask(GetCurrentProcess(),
+ 1 << (DbgGetEXETimeStamp() % g_SystemInfo.dwNumberOfProcessors));
+#endif // STRESS_HEAP && _DEBUG && !FEATURE_CORECLR
+
+#ifdef FEATURE_PREJIT
+ // Initialize the sweeper thread. THis is violating our rules with hosting
+ // so we only do it in the non-hosted case
+ if (g_pConfig->GetZapBBInstr() != NULL && !CLRTaskHosted())
+ {
+ DWORD threadID;
+ HANDLE hBBSweepThread = ::CreateThread(NULL,
+ 0,
+ (LPTHREAD_START_ROUTINE) BBSweepStartFunction,
+ NULL,
+ 0,
+ &threadID);
+ _ASSERTE(hBBSweepThread);
+ g_BBSweep.SetBBSweepThreadHandle(hBBSweepThread);
+ }
+#endif // FEATURE_PREJIT
+
+#ifdef FEATURE_IPCMAN
+ // Initialize all our InterProcess Communications with COM+
+ IfFailGoLog(InitializeIPCManager());
+#endif // FEATURE_IPCMAN
+
+#ifdef ENABLE_PERF_COUNTERS
+ hr = PerfCounters::Init();
+ _ASSERTE(SUCCEEDED(hr));
+ IfFailGo(hr);
+#endif
+
+#ifdef FEATURE_IPCMAN
+ // Marks the data in the IPC blocks as initialized so that readers know
+ // that it is safe to read data from the blocks
+ PublishIPCManager();
+#endif //FEATURE_IPCMAN
+
+#ifdef FEATURE_INTERPRETER
+ Interpreter::Initialize();
+#endif // FEATURE_INTERPRETER
+
+ StubManager::InitializeStubManagers();
+
+#ifndef FEATURE_PAL
+ {
+ // Record mscorwks geometry
+ PEDecoder pe(g_pMSCorEE);
+
+ g_runtimeLoadedBaseAddress = (SIZE_T)pe.GetBase();
+ g_runtimeVirtualSize = (SIZE_T)pe.GetVirtualSize();
+ InitCodeAllocHint(g_runtimeLoadedBaseAddress, g_runtimeVirtualSize, GetRandomInt(64));
+ }
+#endif // !FEATURE_PAL
+
+#endif // CROSSGEN_COMPILE
+
+ // Set up the cor handle map. This map is used to load assemblies in
+ // memory instead of using the normal system load
+ PEImage::Startup();
+
+ AccessCheckOptions::Startup();
+
+ MscorlibBinder::Startup();
+
+ Stub::Init();
+ StubLinkerCPU::Init();
+
+#ifndef CROSSGEN_COMPILE
+
+ // Initialize remoting
+#ifdef FEATURE_REMOTING
+ CRemotingServices::Initialize();
+#endif // FEATURE_REMOTING
+
+ // weak_short, weak_long, strong; no pin
+ if (!Ref_Initialize())
+ IfFailGo(E_OUTOFMEMORY);
+
+ // Initialize contexts
+ Context::Initialize();
+
+ g_pEEShutDownEvent = new CLREvent();
+ g_pEEShutDownEvent->CreateManualEvent(FALSE);
+
+#ifdef FEATURE_RWLOCK
+ // Initialize RWLocks
+ CRWLock::ProcessInit();
+#endif // FEATURE_RWLOCK
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ // Initialize debugger manager
+ CCLRDebugManager::ProcessInit();
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+#ifdef FEATURE_IPCMAN
+ // Initialize CCLRSecurityAttributeManager
+ CCLRSecurityAttributeManager::ProcessInit();
+#endif // FEATURE_IPCMAN
+
+ VirtualCallStubManager::InitStatic();
+
+ GCInterface::m_MemoryPressureLock.Init(CrstGCMemoryPressure);
+
+#ifndef FEATURE_CORECLR
+ // Initialize Assembly Usage Logger
+ InitAssemblyUsageLogManager();
+#endif
+
+#endif // CROSSGEN_COMPILE
+
+ // Setup the domains. Threads are started in a default domain.
+
+ // Static initialization
+ PEAssembly::Attach();
+ BaseDomain::Attach();
+ SystemDomain::Attach();
+
+ // Start up the EE intializing all the global variables
+ ECall::Init();
+
+ COMDelegate::Init();
+
+ ExecutionManager::Init();
+
+#ifndef CROSSGEN_COMPILE
+
+#ifndef FEATURE_PAL
+ // Watson initialization must precede InitializeDebugger() and InstallUnhandledExceptionFilter()
+ // because on CoreCLR when Waston is enabled, debugging service needs to be enabled and UEF will be used.
+ if (!InitializeWatson(fFlags))
+ {
+ IfFailGo(E_FAIL);
+ }
+
+ // Note: In Windows 7, the OS will take over the job of error reporting, and so most
+ // of our watson code should not be used. In such cases, we will however still need
+ // to provide some services to windows error reporting, such as computing bucket
+ // parameters for a managed unhandled exception.
+ if (RunningOnWin7() && IsWatsonEnabled() && !RegisterOutOfProcessWatsonCallbacks())
+ {
+ IfFailGo(E_FAIL);
+ }
+#endif // !FEATURE_PAL
+
+#ifdef DEBUGGING_SUPPORTED
+ if(!NingenEnabled())
+ {
+ // Initialize the debugging services. This must be done before any
+ // EE thread objects are created, and before any classes or
+ // modules are loaded.
+ InitializeDebugger(); // throws on error
+ }
+#endif // DEBUGGING_SUPPORTED
+
+#ifdef MDA_SUPPORTED
+ ManagedDebuggingAssistants::EEStartupActivation();
+#endif
+
+#ifdef PROFILING_SUPPORTED
+ // Initialize the profiling services.
+ hr = ProfilingAPIUtility::InitializeProfiling();
+
+ _ASSERTE(SUCCEEDED(hr));
+ IfFailGo(hr);
+#endif // PROFILING_SUPPORTED
+
+ InitializeExceptionHandling();
+
+ //
+ // Install our global exception filter
+ //
+ if (!InstallUnhandledExceptionFilter())
+ {
+ IfFailGo(E_FAIL);
+ }
+
+ // throws on error
+ SetupThread();
+
+#ifdef DEBUGGING_SUPPORTED
+ // Notify debugger once the first thread is created to finish initialization.
+ if (g_pDebugInterface != NULL)
+ {
+ g_pDebugInterface->StartupPhase2(GetThread());
+ }
+#endif
+
+#ifdef FEATURE_IPCMAN
+ // Give PerfMon a chance to hook up to us
+ // Do this both *before* and *after* ipcman init so corperfmonext.dll
+ // has a chance to release stale private blocks that IPCMan could collide with.
+ IPCFuncCallSource::DoThreadSafeCall();
+ STRESS_LOG0(LF_STARTUP, LL_ALWAYS, "Returned successfully from second call to IPCFuncCallSource::DoThreadSafeCall");
+#endif // FEATURE_IPCMAN
+
+ InitPreStubManager();
+
+#ifdef FEATURE_COMINTEROP
+ InitializeComInterop();
+#endif // FEATURE_COMINTEROP
+
+ StubHelpers::Init();
+ NDirect::Init();
+
+ // Before setting up the execution manager initialize the first part
+ // of the JIT helpers.
+ InitJITHelpers1();
+ InitJITHelpers2();
+
+ SyncBlockCache::Attach();
+
+ // Set up the sync block
+ SyncBlockCache::Start();
+
+ StackwalkCache::Init();
+
+ // Start up security
+ Security::Start();
+
+ AppDomain::CreateADUnloadStartEvent();
+
+ // In coreclr, clrjit is compiled into it, but SO work in clrjit has not been done.
+#ifdef FEATURE_STACK_PROBE
+ if (CLRHosted() && GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) == eRudeUnloadAppDomain)
+ {
+ InitStackProbes();
+ }
+#endif
+
+ InitializeGarbageCollector();
+
+ InitializePinHandleTable();
+
+#ifdef DEBUGGING_SUPPORTED
+ // Make a call to publish the DefaultDomain for the debugger
+ // This should be done before assemblies/modules are loaded into it (i.e. SystemDomain::Init)
+ // and after its OK to switch GC modes and syncronize for sending events to the debugger.
+ // @dbgtodo synchronization: this can probably be simplified in V3
+ LOG((LF_CORDB | LF_SYNC | LF_STARTUP, LL_INFO1000, "EEStartup: adding default domain 0x%x\n",
+ SystemDomain::System()->DefaultDomain()));
+ SystemDomain::System()->PublishAppDomainAndInformDebugger(SystemDomain::System()->DefaultDomain());
+#endif
+
+#ifndef FEATURE_CORECLR
+ ExistingOobAssemblyList::Init();
+#endif
+
+#endif // CROSSGEN_COMPILE
+
+ SystemDomain::System()->Init();
+
+#ifdef PROFILING_SUPPORTED
+ // <TODO>This is to compensate for the DefaultDomain workaround contained in
+ // SystemDomain::Attach in which the first user domain is created before profiling
+ // services can be initialized. Profiling services cannot be moved to before the
+ // workaround because it needs SetupThread to be called.</TODO>
+
+ SystemDomain::NotifyProfilerStartup();
+#endif // PROFILING_SUPPORTED
+
+#ifndef CROSSGEN_COMPILE
+ if (CLRHosted()
+#ifdef _DEBUG
+ || ((fFlags & COINITEE_DLL) == 0 &&
+ g_pConfig->GetHostTestADUnload())
+#endif
+ ) {
+ // If we are hosted, a host may specify unloading AD when a managed allocation in
+ // critical region fails. We need to precreate a thread to unload AD.
+ AppDomain::CreateADUnloadWorker();
+ }
+#endif // CROSSGEN_COMPILE
+
+ g_fEEInit = false;
+
+ SystemDomain::System()->DefaultDomain()->LoadSystemAssemblies();
+
+ SystemDomain::System()->DefaultDomain()->SetupSharedStatics();
+
+#ifdef _DEBUG
+ APIThreadStress::SetThreadStressCount(g_pConfig->GetAPIThreadStressCount());
+#endif
+#ifdef FEATURE_STACK_SAMPLING
+ StackSampler::Init();
+#endif
+
+#ifndef CROSSGEN_COMPILE
+ if (!NingenEnabled())
+ {
+ // Perform any once-only SafeHandle initialization.
+ SafeHandle::Init();
+ }
+
+#ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS
+ // retrieve configured max size for the mini-metadata buffer (defaults to 64KB)
+ g_MiniMetaDataBuffMaxSize = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_MiniMdBufferCapacity);
+ // align up to OS_PAGE_SIZE, with a maximum of 1 MB
+ g_MiniMetaDataBuffMaxSize = (DWORD) min(ALIGN_UP(g_MiniMetaDataBuffMaxSize, OS_PAGE_SIZE), 1024 * 1024);
+ // allocate the buffer. this is never touched while the process is running, so it doesn't
+ // contribute to the process' working set. it is needed only as a "shadow" for a mini-metadata
+ // buffer that will be set up and reported / updated in the Watson process (the
+ // DacStreamsManager class coordinates this)
+ g_MiniMetaDataBuffAddress = (TADDR) ClrVirtualAlloc(NULL,
+ g_MiniMetaDataBuffMaxSize, MEM_COMMIT, PAGE_READWRITE);
+#endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS
+
+ // Load mscorsn.dll if the app requested the legacy mode in its configuration file.
+ if (g_pConfig->LegacyLoadMscorsnOnStartup())
+ IfFailGo(LoadMscorsn());
+
+#endif // CROSSGEN_COMPILE
+
+ g_fEEStarted = TRUE;
+ g_EEStartupStatus = S_OK;
+ hr = S_OK;
+ STRESS_LOG0(LF_STARTUP, LL_ALWAYS, "===================EEStartup Completed===================");
+
+#if defined(_DEBUG) && !defined(CROSSGEN_COMPILE)
+
+ //if g_fEEStarted was false when we loaded the System Module, we did not run ExpandAll on it. In
+ //this case, make sure we run ExpandAll here. The rationale is that if we Jit before g_fEEStarted
+ //is true, we can't initialize Com, so we can't jit anything that uses Com types. Also, it's
+ //probably not safe to Jit while g_fEEStarted is false.
+ //
+ //Also, if you run this it's possible we'll call CoInitialize, which defaults to MTA. This might
+ //mess up an application that uses STA. However, this mode is only supported for certain limited
+ //jit testing scenarios, so it can live with the limitation.
+ if (g_pConfig->ExpandModulesOnLoad())
+ {
+ SystemDomain::SystemModule()->ExpandAll();
+ }
+
+ //For a similar reason, let's not run VerifyAllOnLoad either.
+#ifdef FEATURE_CORECLR
+ if (g_pConfig->VerifyModulesOnLoad())
+ {
+ SystemDomain::SystemModule()->VerifyAllMethods();
+ }
+#endif //FEATURE_CORECLR
+
+ // Perform mscorlib consistency check if requested
+ g_Mscorlib.CheckExtended();
+
+#endif // _DEBUG && !CROSSGEN_COMPILE
+
+ErrExit: ;
+ }
+ EX_CATCH
+ {
+#ifdef CROSSGEN_COMPILE
+ // for minimal impact we won't update hr for regular builds
+ hr = GET_EXCEPTION()->GetHR();
+ _ASSERTE(FAILED(hr));
+#endif // CROSSGEN_COMPILE
+ }
+ EX_END_CATCH(RethrowTerminalExceptionsWithInitCheck)
+
+ if (!g_fEEStarted) {
+ if (g_fEEInit)
+ g_fEEInit = false;
+
+ if (!FAILED(hr))
+ hr = E_FAIL;
+
+ g_EEStartupStatus = hr;
+ }
+
+ if (breakOnEELoad.val(CLRConfig::UNSUPPORTED_BreakOnEELoad) == 2)
+ {
+#ifdef _DEBUG
+ _ASSERTE(!"Done loading EE!");
+#else
+ DebugBreak();
+#endif
+ }
+
+}
+
+LONG FilterStartupException(PEXCEPTION_POINTERS p, PVOID pv)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(p));
+ PRECONDITION(CheckPointer(pv));
+ } CONTRACTL_END;
+
+ g_EEStartupStatus = (HRESULT)p->ExceptionRecord->ExceptionInformation[0];
+
+ // Make sure we got a failure code in this case
+ if (!FAILED(g_EEStartupStatus))
+ g_EEStartupStatus = E_FAIL;
+
+ // Initializations has failed so reset the g_fEEInit flag.
+ g_fEEInit = false;
+
+ if (p->ExceptionRecord->ExceptionCode == BOOTUP_EXCEPTION_COMPLUS)
+ {
+ // Don't ever handle the exception in a checked build
+#ifndef _DEBUG
+ return EXCEPTION_EXECUTE_HANDLER;
+#endif
+ }
+
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+
+// EEStartup is responcible for all the one time intialization of the runtime. Some of the highlights of
+// what it does include
+// * Creates the default and shared, appdomains.
+// * Loads mscorlib.dll and loads up the fundamental types (System.Object ...)
+//
+// see code:EEStartup#TableOfContents for more on the runtime in general.
+// see code:#EEShutdown for a analagous routine run during shutdown.
+//
+HRESULT EEStartup(COINITIEE fFlags)
+{
+ // Cannot use normal contracts here because of the PAL_TRY.
+ STATIC_CONTRACT_NOTHROW;
+
+ _ASSERTE(!g_fEEStarted && !g_fEEInit && SUCCEEDED (g_EEStartupStatus));
+
+ PAL_TRY(COINITIEE *, pfFlags, &fFlags)
+ {
+ EEStartupHelper(*pfFlags);
+ }
+ PAL_EXCEPT_FILTER (FilterStartupException)
+ {
+ // The filter should have set g_EEStartupStatus to a failure HRESULT.
+ _ASSERTE(FAILED(g_EEStartupStatus));
+ }
+ PAL_ENDTRY
+
+#ifndef CROSSGEN_COMPILE
+ if(SUCCEEDED(g_EEStartupStatus) && (fFlags & COINITEE_MAIN) == 0)
+ g_EEStartupStatus = SystemDomain::SetupDefaultDomainNoThrow();
+#endif
+
+ return g_EEStartupStatus;
+}
+
+
+#ifndef CROSSGEN_COMPILE
+
+#ifdef FEATURE_COMINTEROP
+
+void InnerCoEEShutDownCOM()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ static LONG AlreadyDone = -1;
+
+ if (g_fEEStarted != TRUE)
+ return;
+
+ if (FastInterlockIncrement(&AlreadyDone) != 0)
+ return;
+
+ g_fShutDownCOM = true;
+
+ // Release IJupiterGCMgr *
+ RCWWalker::OnEEShutdown();
+
+ // Release all of the RCWs in all contexts in all caches.
+ ReleaseRCWsInCaches(NULL);
+
+ // Release all marshaling data in all AppDomains
+ AppDomainIterator i(TRUE);
+ while (i.Next())
+ i.GetDomain()->DeleteMarshalingData();
+
+ // Release marshaling data in shared domain as well
+ SharedDomain::GetDomain()->DeleteMarshalingData();
+
+#ifdef FEATURE_APPX
+ // Cleanup cached factory pointer in SynchronizationContextNative
+ SynchronizationContextNative::Cleanup();
+#endif
+
+ // remove any tear-down notification we have setup
+ RemoveTearDownNotifications();
+}
+
+// ---------------------------------------------------------------------------
+// %%Function: CoEEShutdownCOM()
+//
+// Parameters:
+// none
+//
+// Returns:
+// Nothing
+//
+// Description:
+// COM Objects shutdown stuff should be done here
+// ---------------------------------------------------------------------------
+void STDMETHODCALLTYPE CoEEShutDownCOM()
+{
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ ENTRY_POINT;
+ } CONTRACTL_END;
+
+ if (g_fEEStarted != TRUE)
+ return;
+
+ HRESULT hr;
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+
+ InnerCoEEShutDownCOM();
+
+ END_EXTERNAL_ENTRYPOINT;
+
+ // API doesn't allow us to communicate a failure HRESULT. MDAs can
+ // be enabled to catch failure inside CanRunManagedCode.
+ // _ASSERTE(SUCCEEDED(hr));
+}
+
+#endif // FEATURE_COMINTEROP
+
+// ---------------------------------------------------------------------------
+// %%Function: ForceEEShutdown()
+//
+// Description: Force the EE to shutdown now.
+//
+// Note: returns when sca is SCA_ReturnWhenShutdownComplete.
+// ---------------------------------------------------------------------------
+void ForceEEShutdown(ShutdownCompleteAction sca)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Don't bother to take the lock for this case.
+
+ STRESS_LOG0(LF_STARTUP, INFO3, "EEShutdown invoked from ForceEEShutdown");
+ EEPolicy::HandleExitProcess(sca);
+}
+
+//---------------------------------------------------------------------------
+// %%Function: ExternalShutdownHelper
+//
+// Parameters:
+// int exitCode :: process exit code
+// ShutdownCompleteAction sca :: indicates whether ::ExitProcess() is
+// called or if the function returns.
+//
+// Returns:
+// Nothing
+//
+// Description:
+// This is a helper shared by CorExitProcess and ShutdownRuntimeWithoutExiting
+// which causes the runtime to shutdown after the appropriate checks.
+// ---------------------------------------------------------------------------
+static void ExternalShutdownHelper(int exitCode, ShutdownCompleteAction sca)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ ENTRY_POINT;
+ } CONTRACTL_END;
+
+ CONTRACT_VIOLATION(GCViolation | ModeViolation | SOToleranceViolation);
+
+ if (g_fEEShutDown || !g_fEEStarted)
+ return;
+
+ if (HasIllegalReentrancy())
+ {
+ return;
+ }
+ else
+ if (!CanRunManagedCode())
+ {
+ return;
+ }
+
+ // The exit code for the process is communicated in one of two ways. If the
+ // entrypoint returns an 'int' we take that. Otherwise we take a latched
+ // process exit code. This can be modified by the app via System.SetExitCode().
+ SetLatchedExitCode(exitCode);
+
+#ifndef FEATURE_CORECLR // no shim
+ // Bump up the ref-count on the module
+ for (int i =0; i<6; i++)
+ CLRLoadLibrary(MSCOREE_SHIM_W);
+#endif // FEATURE_CORECLR
+
+ ForceEEShutdown(sca);
+
+ // @TODO: If we cannot run ManagedCode, BEGIN_EXTERNAL_ENTRYPOINT will skip
+ // the shutdown. We could call ::ExitProcess in that failure case, but that
+ // would violate our hosting agreement. We are supposed to go through EEPolicy::
+ // HandleExitProcess(). Is this legal if !CanRunManagedCode()?
+
+}
+
+//---------------------------------------------------------------------------
+// %%Function: void STDMETHODCALLTYPE CorExitProcess(int exitCode)
+//
+// Parameters:
+// int exitCode :: process exit code
+//
+// Returns:
+// Nothing
+//
+// Description:
+// COM Objects shutdown stuff should be done here
+// ---------------------------------------------------------------------------
+extern "C" void STDMETHODCALLTYPE CorExitProcess(int exitCode)
+{
+ WRAPPER_NO_CONTRACT;
+
+ ExternalShutdownHelper(exitCode, SCA_ExitProcessWhenShutdownComplete);
+}
+
+//---------------------------------------------------------------------------
+// %%Function: ShutdownRuntimeWithoutExiting
+//
+// Parameters:
+// int exitCode :: process exit code
+//
+// Returns:
+// Nothing
+//
+// Description:
+// This is a helper used only by the v4+ Shim to shutdown this runtime and
+// and return when the work has completed. It is exposed to the Shim via
+// GetCLRFunction.
+// ---------------------------------------------------------------------------
+void ShutdownRuntimeWithoutExiting(int exitCode)
+{
+ WRAPPER_NO_CONTRACT;
+
+ ExternalShutdownHelper(exitCode, SCA_ReturnWhenShutdownComplete);
+}
+
+//---------------------------------------------------------------------------
+// %%Function: IsRuntimeStarted
+//
+// Parameters:
+// pdwStartupFlags: out parameter that is set to the startup flags if the
+// runtime is started.
+//
+// Returns:
+// TRUE if the runtime has been started, FALSE otherwise.
+//
+// Description:
+// This is a helper used only by the v4+ Shim to determine if this runtime
+// has ever been started. It is exposed ot the Shim via GetCLRFunction.
+// ---------------------------------------------------------------------------
+BOOL IsRuntimeStarted(DWORD *pdwStartupFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (pdwStartupFlags != NULL) // this parameter is optional
+ {
+ *pdwStartupFlags = 0;
+#ifndef FEATURE_CORECLR
+ if (g_fEEStarted)
+ {
+ *pdwStartupFlags = CorHost2::GetStartupFlags();
+ }
+#endif
+ }
+ return g_fEEStarted;
+}
+
+static bool WaitForEndOfShutdown_OneIteration()
+{
+ CONTRACTL{
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ // We are shutting down. GC triggers does not have any effect now.
+ CONTRACT_VIOLATION(GCViolation);
+
+ // If someone calls EEShutDown while holding OS loader lock, the thread we created for shutdown
+ // won't start running. This is a deadlock we can not fix. Instead, we timeout and continue the
+ // current thread.
+ DWORD timeout = GetEEPolicy()->GetTimeout(OPR_ProcessExit);
+ timeout *= 2;
+ ULONGLONG endTime = CLRGetTickCount64() + timeout;
+ bool done = false;
+
+ EX_TRY
+ {
+ ULONGLONG curTime = CLRGetTickCount64();
+ if (curTime > endTime)
+ {
+ done = true;
+ }
+ else
+ {
+#ifdef PROFILING_SUPPORTED
+ if (CORProfilerPresent())
+ {
+ // A profiler is loaded, so just wait without timeout. This allows
+ // profilers to complete potentially lengthy post processing, without the
+ // CLR killing them off first. The Office team's server memory profiler,
+ // for example, does a lot of post-processing that can exceed the 80
+ // second imit we normally impose here. The risk of waiting without
+ // timeout is that, if there really is a deadlock, shutdown will hang.
+ // Since that will only happen if a profiler is loaded, that is a
+ // reasonable compromise
+ timeout = INFINITE;
+ }
+ else
+#endif //PROFILING_SUPPORTED
+ {
+ timeout = static_cast<DWORD>(endTime - curTime);
+ }
+ DWORD status = g_pEEShutDownEvent->Wait(timeout,TRUE);
+ if (status == WAIT_OBJECT_0 || status == WAIT_TIMEOUT)
+ {
+ done = true;
+ }
+ else
+ {
+ done = false;
+ }
+ }
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ return done;
+}
+
+void WaitForEndOfShutdown()
+{
+ CONTRACTL{
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ // We are shutting down. GC triggers does not have any effect now.
+ CONTRACT_VIOLATION(GCViolation);
+
+ Thread *pThread = GetThread();
+ // After a thread is blocked in WaitForEndOfShutdown, the thread should not enter runtime again,
+ // and block at WaitForEndOfShutdown again.
+ if (pThread)
+ {
+ _ASSERTE(!pThread->HasThreadStateNC(Thread::TSNC_BlockedForShutdown));
+ pThread->SetThreadStateNC(Thread::TSNC_BlockedForShutdown);
+ }
+
+ while (!WaitForEndOfShutdown_OneIteration());
+}
+
+// ---------------------------------------------------------------------------
+// Function: EEShutDownHelper(BOOL fIsDllUnloading)
+//
+// The real meat of shut down happens here. See code:#EEShutDown for details, including
+// what fIsDllUnloading means.
+//
+void STDMETHODCALLTYPE EEShutDownHelper(BOOL fIsDllUnloading)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ // Used later for a callback.
+ CEEInfo ceeInf;
+
+ if(fIsDllUnloading)
+ {
+ ETW::EnumerationLog::ProcessShutdown();
+ }
+
+#if defined(FEATURE_CAS_POLICY) || defined(FEATURE_COMINTEROP)
+ // Get the current thread.
+ Thread * pThisThread = GetThread();
+#endif
+
+ // If the process is detaching then set the global state.
+ // This is used to get around FreeLibrary problems.
+ if(fIsDllUnloading)
+ g_fProcessDetach = true;
+
+ if (IsDbgHelperSpecialThread())
+ {
+ // Our debugger helper thread does not allow Thread object to be set up.
+ // We should not run shutdown code on debugger helper thread.
+ _ASSERTE(fIsDllUnloading);
+ return;
+ }
+
+#ifdef _DEBUG
+ // stop API thread stress
+ APIThreadStress::SetThreadStressCount(0);
+#endif
+
+ STRESS_LOG1(LF_STARTUP, LL_INFO10, "EEShutDown entered unloading = %d", fIsDllUnloading);
+
+#ifdef _DEBUG
+ if (_DbgBreakCount)
+ _ASSERTE(!"An assert was hit before EE Shutting down");
+
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnEEShutdown))
+ _ASSERTE(!"Shutting down EE!");
+#endif
+
+#ifdef DEBUGGING_SUPPORTED
+ // This is a nasty, terrible, horrible thing. If we're being
+ // called from our DLL main, then the odds are good that our DLL
+ // main has been called as the result of some person calling
+ // ExitProcess. That rips the debugger helper thread away very
+ // ungracefully. This check is an attempt to recognize that case
+ // and avoid the impending hang when attempting to get the helper
+ // thread to do things for us.
+ if ((g_pDebugInterface != NULL) && g_fProcessDetach)
+ g_pDebugInterface->EarlyHelperThreadDeath();
+#endif // DEBUGGING_SUPPORTED
+
+ BOOL fFinalizeOK = FALSE;
+
+ EX_TRY
+ {
+ ClrFlsSetThreadType(ThreadType_Shutdown);
+
+ if (!fIsDllUnloading)
+ {
+ ProcessEventForHost(Event_ClrDisabled, NULL);
+ }
+ else if (g_fEEShutDown)
+ {
+ // I'm in the final shutdown and the first part has already been run.
+ goto part2;
+ }
+
+ // Indicate the EE is the shut down phase.
+ g_fEEShutDown |= ShutDown_Start;
+
+ fFinalizeOK = TRUE;
+
+ // Terminate the BBSweep thread
+ g_BBSweep.ShutdownBBSweepThread();
+
+ // We perform the final GC only if the user has requested it through the GC class.
+ // We should never do the final GC for a process detach
+ if (!g_fProcessDetach && !g_fFastExitProcess)
+ {
+ g_fEEShutDown |= ShutDown_Finalize1;
+ FinalizerThread::EnableFinalization();
+ fFinalizeOK = FinalizerThread::FinalizerThreadWatchDog();
+ }
+
+#ifndef FEATURE_CORECLR
+ if (!g_fFastExitProcess)
+ {
+ // Log usage data to disk. (Only do this in normal shutdown scenarios, and not involving ngen)
+ if (!IsCompilationProcess())
+ AssemblyUsageLogManager::GenerateLog(AssemblyUsageLogManager::GENERATE_LOG_FLAGS_NONE);
+ }
+#endif
+
+ // Ok. Let's stop the EE.
+ if (!g_fProcessDetach)
+ {
+ // Convert key locks into "shutdown" mode. A lock in shutdown mode means:
+ // - Only the finalizer/helper/shutdown threads will be able to take the the lock.
+ // - Any other thread that tries takes it will just get redirected to an endless WaitForEndOfShutdown().
+ //
+ // The only managed code that should run after this point is the finalizers for shutdown.
+ // We convert locks needed for running + debugging such finalizers. Since such locks may need to be
+ // juggled between multiple threads (finalizer/helper/shutdown), no single thread can take the
+ // lock and not give it up.
+ //
+ // Each lock needs its own shutdown flag (they can't all be converted at once).
+ // To avoid deadlocks, we need to convert locks in order of crst level (biggest first).
+
+ // Notify the debugger that we're going into shutdown to convert debugger-lock to shutdown.
+ if (g_pDebugInterface != NULL)
+ {
+ g_pDebugInterface->LockDebuggerForShutdown();
+ }
+
+ // This call will convert the ThreadStoreLock into "shutdown" mode, just like the debugger lock above.
+ g_fEEShutDown |= ShutDown_Finalize2;
+ if (fFinalizeOK)
+ {
+ fFinalizeOK = FinalizerThread::FinalizerThreadWatchDog();
+ }
+
+ if (!fFinalizeOK)
+ {
+ // One of the calls to FinalizerThreadWatchDog failed due to timeout, so we need to prevent
+ // any thread from running managed code, including the finalizer.
+ ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_FOR_SHUTDOWN);
+ g_fSuspendOnShutdown = TRUE;
+ g_fSuspendFinalizerOnShutdown = TRUE;
+ ThreadStore::TrapReturningThreads(TRUE);
+ ThreadSuspend::RestartEE(FALSE, TRUE);
+ }
+ }
+
+#ifdef FEATURE_PREJIT
+ // If we're doing basic block profiling, we need to write the log files to disk.
+
+ static BOOL fIBCLoggingDone = FALSE;
+ if (!fIBCLoggingDone)
+ {
+ if (g_IBCLogger.InstrEnabled())
+ Module::WriteAllModuleProfileData(true);
+
+ fIBCLoggingDone = TRUE;
+ }
+
+#endif // FEATURE_PREJIT
+
+ ceeInf.JitProcessShutdownWork(); // Do anything JIT-related that needs to happen at shutdown.
+
+#ifdef FEATURE_INTERPRETER
+ // This will check a flag and do nothing if not enabled.
+ Interpreter::PrintPostMortemData();
+#endif // FEATURE_INTERPRETER
+
+ FastInterlockExchange((LONG*)&g_fForbidEnterEE, TRUE);
+
+ if (g_fProcessDetach)
+ {
+ ThreadStore::TrapReturningThreads(TRUE);
+ }
+
+ if (!g_fProcessDetach && !fFinalizeOK)
+ {
+ goto lDone;
+ }
+
+#ifdef PROFILING_SUPPORTED
+ // If profiling is enabled, then notify of shutdown first so that the
+ // profiler can make any last calls it needs to. Do this only if we
+ // are not detaching
+
+ if (CORProfilerPresent())
+ {
+ // If EEShutdown is not being called due to a ProcessDetach event, so
+ // the profiler should still be present
+ if (!g_fProcessDetach)
+ {
+ BEGIN_PIN_PROFILER(CORProfilerPresent());
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->Shutdown();
+ END_PIN_PROFILER();
+ }
+
+ g_fEEShutDown |= ShutDown_Profiler;
+
+ // Free the interface objects.
+ ProfilingAPIUtility::TerminateProfiling();
+ }
+#endif // PROFILING_SUPPORTED
+
+#ifndef FEATURE_CORECLR
+ // CoEEShutDownCOM moved to
+ // the Finalizer thread. See bug 87809
+ if (!g_fProcessDetach && !g_fFastExitProcess)
+ {
+ g_fEEShutDown |= ShutDown_COM;
+ if (fFinalizeOK)
+ {
+ FinalizerThread::FinalizerThreadWatchDog();
+ }
+ }
+#ifdef _DEBUG
+ else
+ g_fEEShutDown |= ShutDown_COM;
+#endif
+#endif //FEATURE_CORECLR
+
+#ifdef _DEBUG
+ g_fEEShutDown |= ShutDown_SyncBlock;
+#endif
+ {
+ // From here on out we might call stuff that violates mode requirements, but we ignore these
+ // because we are shutting down.
+ CONTRACT_VIOLATION(ModeViolation);
+
+#ifdef FEATURE_CAS_POLICY
+ // Save the security policy cache as necessary.
+ if (!g_fProcessDetach || pThisThread != NULL)
+ {
+ // If process shutdown has started, it is not safe to create Thread object which is needed
+ // by the following call.
+ Security::SaveCache();
+ }
+#endif
+#ifdef FEATURE_COMINTEROP
+ // We need to call CoUninitialize in part one to ensure orderly shutdown of COM dlls.
+ if (!g_fFastExitProcess)
+ {
+ if (pThisThread!= NULL)
+ {
+ pThisThread->CoUninitialize();
+ }
+ }
+#endif // FEATURE_COMINTEROP
+ }
+
+ // This is the end of Part 1.
+
+part2:
+ // If process shutdown is in progress and Crst locks to be used in shutdown phase 2
+ // are already in use, then skip phase 2. This will happen only when those locks
+ // are orphaned. In Vista, the penalty for attempting to enter such locks is
+ // instant process termination.
+ if (g_fProcessDetach)
+ {
+ // The assert below is a bit too aggresive and has generally brought cases that have been race conditions
+ // and not easily reproed to validate a bug. A typical race scenario is when there are two threads,
+ // T1 and T2, with T2 having taken a lock (e.g. SystemDomain lock), the OS terminates
+ // T2 for some reason. Later, when we enter the shutdown thread, we would assert on such
+ // a lock leak, but there is not much we can do since the OS wont notify us prior to thread
+ // termination. And this is not even a user bug.
+ //
+ // Converting it to a STRESS LOG to reduce noise, yet keep things in radar if they need
+ // to be investigated.
+ //_ASSERTE_MSG(g_ShutdownCrstUsageCount == 0, "Some locks to be taken during shutdown may already be orphaned!");
+ if (g_ShutdownCrstUsageCount > 0)
+ {
+ STRESS_LOG0(LF_STARTUP, LL_INFO10, "Some locks to be taken during shutdown may already be orphaned!");
+ goto lDone;
+ }
+ }
+
+ {
+ CONTRACT_VIOLATION(ModeViolation);
+
+ // On the new plan, we only do the tear-down under the protection of the loader
+ // lock -- after the OS has stopped all other threads.
+ if (fIsDllUnloading && (g_fEEShutDown & ShutDown_Phase2) == 0)
+ {
+ g_fEEShutDown |= ShutDown_Phase2;
+
+ // Shutdown finalizer before we suspend all background threads. Otherwise we
+ // never get to finalize anything. Obviously.
+
+#ifdef _DEBUG
+ if (_DbgBreakCount)
+ _ASSERTE(!"An assert was hit After Finalizer run");
+#endif
+
+ // No longer process exceptions
+ g_fNoExceptions = true;
+
+ //
+ // Remove our global exception filter. If it was NULL before, we want it to be null now.
+ //
+ UninstallUnhandledExceptionFilter();
+
+ // <TODO>@TODO: This does things which shouldn't occur in part 2. Namely,
+ // calling managed dll main callbacks (AppDomain::SignalProcessDetach), and
+ // RemoveAppDomainFromIPC.
+ //
+ // (If we move those things to earlier, this can be called only if fShouldWeCleanup.)</TODO>
+ if (!g_fFastExitProcess)
+ {
+ SystemDomain::DetachBegin();
+ }
+
+
+#ifdef DEBUGGING_SUPPORTED
+ // Terminate the debugging services.
+ TerminateDebugger();
+#endif // DEBUGGING_SUPPORTED
+
+ StubManager::TerminateStubManagers();
+
+#ifdef FEATURE_INTERPRETER
+ Interpreter::Terminate();
+#endif // FEATURE_INTERPRETER
+
+#ifdef SHOULD_WE_CLEANUP
+ if (!g_fFastExitProcess)
+ {
+ Ref_Shutdown(); // shut down the handle table
+ }
+#endif /* SHOULD_WE_CLEANUP */
+
+#ifdef ENABLE_PERF_COUNTERS
+ // Terminate Perf Counters as late as we can (to get the most data)
+ PerfCounters::Terminate();
+#endif // ENABLE_PERF_COUNTERS
+
+ //@TODO: find the right place for this
+ VirtualCallStubManager::UninitStatic();
+
+#ifdef FEATURE_IPCMAN
+ // Terminate the InterProcess Communications with COM+
+ TerminateIPCManager();
+#endif // FEATURE_IPCMAN
+
+#ifdef ENABLE_PERF_LOG
+ PerfLog::PerfLogDone();
+#endif //ENABLE_PERF_LOG
+
+#ifdef FEATURE_IPCMAN
+ // Give PerfMon a chance to hook up to us
+ // Have perfmon resync list *after* we close IPC so that it will remove
+ // this process
+ IPCFuncCallSource::DoThreadSafeCall();
+#endif // FEATURE_IPCMAN
+
+ Frame::Term();
+
+ if (!g_fFastExitProcess)
+ {
+ SystemDomain::DetachEnd();
+ }
+
+ TerminateStackProbes();
+
+ // Unregister our vectored exception and continue handlers from the OS.
+ // This will ensure that if any other DLL unload (after ours) has an exception,
+ // we wont attempt to process that exception (which could lead to various
+ // issues including AV in the runtime).
+ //
+ // This should be done:
+ //
+ // 1) As the last action during the shutdown so that any unexpected AVs
+ // in the runtime during shutdown do result in FailFast in VEH.
+ //
+ // 2) Only when the runtime is processing DLL_PROCESS_DETACH.
+ CLRRemoveVectoredHandlers();
+
+#ifdef _DEBUG
+ if (_DbgBreakCount)
+ _ASSERTE(!"EE Shutting down after an assert");
+#endif
+
+
+#ifdef LOGGING
+ extern unsigned FcallTimeHist[11];
+#endif
+ LOG((LF_STUBS, LL_INFO10, "FcallHist %3d %3d %3d %3d %3d %3d %3d %3d %3d %3d %3d\n",
+ FcallTimeHist[0], FcallTimeHist[1], FcallTimeHist[2], FcallTimeHist[3],
+ FcallTimeHist[4], FcallTimeHist[5], FcallTimeHist[6], FcallTimeHist[7],
+ FcallTimeHist[8], FcallTimeHist[9], FcallTimeHist[10]));
+
+ WriteJitHelperCountToSTRESSLOG();
+
+ STRESS_LOG0(LF_STARTUP, LL_INFO10, "EEShutdown shutting down logging");
+
+#if 0 // Dont clean up the stress log, so that even at process exit we have a log (after all the process is going away
+ if (!g_fFastExitProcess)
+ StressLog::Terminate(TRUE);
+#endif
+
+ if (g_pConfig != NULL)
+ g_pConfig->Cleanup();
+
+#ifdef LOGGING
+ ShutdownLogging();
+#endif
+ }
+ }
+
+ lDone: ;
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ ClrFlsClearThreadType(ThreadType_Shutdown);
+ if (!g_fProcessDetach)
+ {
+ g_pEEShutDownEvent->Set();
+ }
+}
+
+
+#ifdef FEATURE_COMINTEROP
+
+BOOL IsThreadInSTA()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // If ole32.dll is not loaded
+ if (WszGetModuleHandle(W("ole32.dll")) == NULL)
+ {
+ return FALSE;
+ }
+
+ BOOL fInSTA = TRUE;
+ // To be conservative, check if finalizer thread is around
+ EX_TRY
+ {
+ Thread *pFinalizerThread = FinalizerThread::GetFinalizerThread();
+ if (!pFinalizerThread || pFinalizerThread->Join(0, FALSE) != WAIT_TIMEOUT)
+ {
+ fInSTA = FALSE;
+ }
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (!fInSTA)
+ {
+ return FALSE;
+ }
+
+ THDTYPE type;
+ HRESULT hr = S_OK;
+
+ hr = GetCurrentThreadTypeNT5(&type);
+ if (hr == S_OK)
+ {
+ fInSTA = (type == THDTYPE_PROCESSMESSAGES) ? TRUE : FALSE;
+
+ // If we get back THDTYPE_PROCESSMESSAGES, we are guaranteed to
+ // be an STA thread. If not, we are an MTA thread, however
+ // we can't know if the thread has been explicitly set to MTA
+ // (via a call to CoInitializeEx) or if it has been implicitly
+ // made MTA (if it hasn't been CoInitializeEx'd but CoInitialize
+ // has already been called on some other thread in the process.
+ }
+ else
+ {
+ // CoInitialize hasn't been called in the process yet so assume the current thread
+ // is MTA.
+ fInSTA = FALSE;
+ }
+
+ return fInSTA;
+}
+#endif
+
+BOOL g_fWeOwnProcess = FALSE;
+
+static LONG s_ActiveShutdownThreadCount = 0;
+
+// ---------------------------------------------------------------------------
+// Function: EEShutDownProcForSTAThread(LPVOID lpParameter)
+//
+// Parameters:
+// LPVOID lpParameter: unused
+//
+// Description:
+// When EEShutDown decides that the shut down logic must occur on another thread,
+// EEShutDown creates a new thread, and this function acts as the thread proc. See
+// code:#STAShutDown for details.
+//
+DWORD WINAPI EEShutDownProcForSTAThread(LPVOID lpParameter)
+{
+ STATIC_CONTRACT_SO_INTOLERANT;;
+
+
+ ClrFlsSetThreadType(ThreadType_ShutdownHelper);
+
+ EEShutDownHelper(FALSE);
+ for (int i = 0; i < 10; i ++)
+ {
+ if (s_ActiveShutdownThreadCount)
+ {
+ return 0;
+ }
+ __SwitchToThread(20, CALLER_LIMITS_SPINNING);
+ }
+
+ EPolicyAction action = GetEEPolicy()->GetDefaultAction(OPR_ProcessExit, NULL);
+ if (action < eRudeExitProcess)
+ {
+ action = eRudeExitProcess;
+ }
+ UINT exitCode;
+ if (g_fWeOwnProcess)
+ {
+ exitCode = GetLatchedExitCode();
+ }
+ else
+ {
+ exitCode = HOST_E_EXITPROCESS_TIMEOUT;
+ }
+ EEPolicy::HandleExitProcessFromEscalation(action, exitCode);
+
+ return 0;
+}
+
+// ---------------------------------------------------------------------------
+// #EEShutDown
+//
+// Function: EEShutDown(BOOL fIsDllUnloading)
+//
+// Parameters:
+// BOOL fIsDllUnloading:
+// * TRUE => Called from CLR's DllMain (DLL_PROCESS_DETACH). Not safe point for
+// full cleanup
+// * FALSE => Called some other way (e.g., end of the CLR's main). Safe to do
+// full cleanup.
+//
+// Description:
+//
+// All ee shutdown stuff should be done here. EEShutDown is generally called in one
+// of two ways:
+// * 1. From code:EEPolicy::HandleExitProcess (via HandleExitProcessHelper), with
+// fIsDllUnloading == FALSE. This code path is typically invoked by the CLR's
+// main just falling through to the end. Full cleanup can be performed when
+// EEShutDown is called this way.
+// * 2. From CLR's DllMain (DLL_PROCESS_DETACH), with fIsDllUnloading == TRUE. When
+// called this way, much cleanup code is unsafe to run, and is thus skipped.
+//
+// Actual shut down logic is factored out to EEShutDownHelper which may be called
+// directly by EEShutDown, or indirectly on another thread (see code:#STAShutDown).
+//
+// In order that callees may also know the value of fIsDllUnloading, EEShutDownHelper
+// sets g_fProcessDetach = fIsDllUnloading, and g_fProcessDetach may then be retrieved
+// via code:IsAtProcessExit.
+//
+// NOTE 1: Actually, g_fProcessDetach is set to TRUE if fIsDllUnloading is TRUE. But
+// g_fProcessDetach doesn't appear to be explicitly set to FALSE. (Apparently
+// g_fProcessDetach is implicitly initialized to FALSE as clr.dll is loaded.)
+//
+// NOTE 2: EEDllMain(DLL_PROCESS_DETACH) already sets g_fProcessDetach to TRUE, so it
+// appears EEShutDownHelper doesn't have to.
+//
+void STDMETHODCALLTYPE EEShutDown(BOOL fIsDllUnloading)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT; // we don't need to cleanup 'cus we're shutting down
+ PRECONDITION(g_fEEStarted);
+ } CONTRACTL_END;
+
+ // If we have not started runtime successfully, it is not safe to call EEShutDown.
+ if (!g_fEEStarted || g_fFastExitProcess == 2)
+ {
+ return;
+ }
+
+ // Stop stack probing and asserts right away. Once we're shutting down, we can do no more.
+ // And we don't want to SO-protect anything at this point anyway. This really only has impact
+ // on a debug build.
+ TerminateStackProbes();
+
+ // The process is shutting down. No need to check SO contract.
+ SO_NOT_MAINLINE_FUNCTION;
+
+ // We only do the first part of the shutdown once.
+ static LONG OnlyOne = -1;
+
+ if (!fIsDllUnloading)
+ {
+ if (FastInterlockIncrement(&OnlyOne) != 0)
+ {
+ // I'm in a regular shutdown -- but another thread got here first.
+ // It's a race if I return from here -- I'll call ExitProcess next, and
+ // rip things down while the first thread is half-way through a
+ // nice cleanup. Rather than do that, I should just wait until the
+ // first thread calls ExitProcess(). I'll die a nice death when that
+ // happens.
+ GCX_PREEMP_NO_DTOR();
+ WaitForEndOfShutdown();
+ return;
+ }
+
+#ifdef FEATURE_MULTICOREJIT
+ if (!AppX::IsAppXProcess()) // When running as Appx, make the delayed timer driven writing be the only option
+ {
+ MulticoreJitManager::StopProfileAll();
+ }
+#endif
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (!fIsDllUnloading && IsThreadInSTA())
+ {
+ // #STAShutDown
+ //
+ // During shutdown, we may need to release STA interface on the shutdown thread.
+ // It is possible that the shutdown thread may deadlock. During shutdown, all
+ // threads are blocked, except the shutdown thread and finalizer thread. If a
+ // lock is held by one of these suspended threads, it can deadlock the process if
+ // the shutdown thread tries to enter the lock. To mitigate this risk, create
+ // another thread (B) to do shutdown activities (i.e., EEShutDownHelper), while
+ // this thread (A) waits. If B deadlocks, A will time out and immediately return
+ // from EEShutDown. A will then eventually call the OS's ExitProcess, which will
+ // kill the deadlocked thread (and all other threads).
+ //
+ // Many Windows Forms-based apps will also execute the code below to shift shut
+ // down logic to a separate thread, even if they don't use COM objects. Reason
+ // being that they will typically use a main UI thread to pump all Windows
+ // messages (including messages that facilitate cross-thread COM calls to STA COM
+ // objects), and will set that thread up as an STA thread just in case there are
+ // such cross-thread COM calls to contend with. In fact, when you use VS's
+ // File.New.Project to make a new Windows Forms project, VS will mark Main() with
+ // [STAThread]
+ DWORD thread_id = 0;
+ if (CreateThread(NULL,0,EEShutDownProcForSTAThread,NULL,0,&thread_id))
+ {
+ GCX_PREEMP_NO_DTOR();
+
+ ClrFlsSetThreadType(ThreadType_Shutdown);
+ WaitForEndOfShutdown();
+ FastInterlockIncrement(&s_ActiveShutdownThreadCount);
+ ClrFlsClearThreadType(ThreadType_Shutdown);
+ }
+ }
+ else
+ // Otherwise, this thread calls EEShutDownHelper directly. First switch to
+ // cooperative mode if this is a managed thread
+#endif
+ if (GetThread())
+ {
+ GCX_COOP();
+ EEShutDownHelper(fIsDllUnloading);
+ if (!fIsDllUnloading)
+ {
+ FastInterlockIncrement(&s_ActiveShutdownThreadCount);
+ }
+ }
+ else
+ {
+ EEShutDownHelper(fIsDllUnloading);
+ if (!fIsDllUnloading)
+ {
+ FastInterlockIncrement(&s_ActiveShutdownThreadCount);
+ }
+ }
+}
+
+// ---------------------------------------------------------------------------
+// %%Function: IsRuntimeActive()
+//
+// Parameters:
+// none
+//
+// Returns:
+// TRUE or FALSE
+//
+// Description: Indicates if the runtime is active or not. "Active" implies
+// that the runtime has started and is in a position to run
+// managed code. If either of these conditions are false, the
+// function return FALSE.
+//
+// Why couldnt we add !g_fEEStarted check in CanRunManagedCode?
+//
+//
+// ExecuteDLL in ceemain.cpp could start the runtime
+// (due to DLL_PROCESS_ATTACH) after invoking CanRunManagedCode.
+// If the function were to be modified, then this scenario could fail.
+// Hence, I have built over CanRunManagedCode in IsRuntimeActive.
+
+// ---------------------------------------------------------------------------
+BOOL IsRuntimeActive()
+{
+ // If the runtime has started AND we can run managed code,
+ // then runtime is considered "active".
+ BOOL fCanRunManagedCode = CanRunManagedCode();
+ return (g_fEEStarted && fCanRunManagedCode);
+}
+
+// ---------------------------------------------------------------------------
+// %%Function: CanRunManagedCode()
+//
+// Parameters:
+// none
+//
+// Returns:
+// true or false
+//
+// Description: Indicates if one is currently allowed to run managed code.
+// ---------------------------------------------------------------------------
+NOINLINE BOOL CanRunManagedCodeRare(LoaderLockCheck::kind checkKind, HINSTANCE hInst /*= 0*/)
+{
+ CONTRACTL {
+ NOTHROW;
+ if (checkKind == LoaderLockCheck::ForMDA) { GC_TRIGGERS; } else { GC_NOTRIGGER; }; // because of the CustomerDebugProbe
+ MODE_ANY;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ // If we are shutting down the runtime, then we cannot run code.
+ if (g_fForbidEnterEE)
+ return FALSE;
+
+ // If pre-loaded objects are not present, then no way.
+ if (g_pPreallocatedOutOfMemoryException == NULL)
+ return FALSE;
+
+ // If we are finaling live objects or processing ExitProcess event,
+ // we can not allow managed method to run unless the current thread
+ // is the finalizer thread
+ if ((g_fEEShutDown & ShutDown_Finalize2) && !FinalizerThread::IsCurrentThreadFinalizer())
+ return FALSE;
+
+#if defined(FEATURE_COMINTEROP) && defined(MDA_SUPPORTED)
+ if ((checkKind == LoaderLockCheck::ForMDA) && (NULL == MDA_GET_ASSISTANT(LoaderLock)))
+ return TRUE;
+
+ if (checkKind == LoaderLockCheck::None)
+ return TRUE;
+
+ // If we are checking whether the OS loader lock is held by the current thread, then
+ // it better not be. Note that ShouldCheckLoaderLock is a cached test for whether
+ // we are checking this probe. So we can call AuxUlibIsDLLSynchronizationHeld before
+ // verifying that the probe is still enabled.
+ //
+ // What's the difference between ignoreLoaderLock & ShouldCheckLoaderLock?
+ // ShouldCheckLoaderLock is a process-wide flag. In a few places where we
+ // *know* we are in the loader lock but haven't quite reached the dangerous
+ // point, we call CanRunManagedCode suppressing/deferring this check.
+ BOOL IsHeld;
+
+ if (ShouldCheckLoaderLock(FALSE) &&
+ AuxUlibIsDLLSynchronizationHeld(&IsHeld) &&
+ IsHeld)
+ {
+ if (checkKind == LoaderLockCheck::ForMDA)
+ {
+ MDA_TRIGGER_ASSISTANT(LoaderLock, ReportViolation(hInst));
+ }
+ else
+ {
+ return FALSE;
+ }
+ }
+#endif // defined(FEATURE_COMINTEROP) && defined(MDA_SUPPORTED)
+
+ return TRUE;
+}
+
+#include <optsmallperfcritical.h>
+BOOL CanRunManagedCode(LoaderLockCheck::kind checkKind, HINSTANCE hInst /*= 0*/)
+{
+ CONTRACTL {
+ NOTHROW;
+ if (checkKind == LoaderLockCheck::ForMDA) { GC_TRIGGERS; } else { GC_NOTRIGGER; }; // because of the CustomerDebugProbe
+ MODE_ANY;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ // Special-case the common success cases
+ // (Try not to make any calls here so that we don't have to spill our incoming arg regs)
+ if (!g_fForbidEnterEE
+ && (g_pPreallocatedOutOfMemoryException != NULL)
+ && !(g_fEEShutDown & ShutDown_Finalize2)
+ && (((checkKind == LoaderLockCheck::ForMDA)
+#ifdef MDA_SUPPORTED
+ && (NULL == MDA_GET_ASSISTANT(LoaderLock))
+#endif // MDA_SUPPORTED
+ ) || (checkKind == LoaderLockCheck::None)))
+ {
+ return TRUE;
+ }
+
+ // Then call a helper for everything else.
+ return CanRunManagedCodeRare(checkKind, hInst);
+}
+#include <optdefault.h>
+
+
+// ---------------------------------------------------------------------------
+// %%Function: CoInitializeEE(DWORD fFlags)
+//
+// Parameters:
+// fFlags - Initialization flags for the engine. See the
+// COINITIEE enumerator for valid values.
+//
+// Returns:
+// Nothing
+//
+// Description:
+// Initializes the EE if it hasn't already been initialized. This function
+// no longer maintains a ref count since the EE doesn't support being
+// unloaded and re-loaded. It simply ensures the EE has been started.
+// ---------------------------------------------------------------------------
+HRESULT STDMETHODCALLTYPE CoInitializeEE(DWORD fFlags)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ BEGIN_ENTRYPOINT_NOTHROW;
+ hr = InitializeEE((COINITIEE)fFlags);
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+// ---------------------------------------------------------------------------
+// %%Function: CoUninitializeEE
+//
+// Parameters:
+// BOOL fIsDllUnloading :: is it safe point for full cleanup
+//
+// Returns:
+// Nothing
+//
+// Description:
+// Must be called by client on shut down in order to free up the system.
+// ---------------------------------------------------------------------------
+void STDMETHODCALLTYPE CoUninitializeEE(BOOL fIsDllUnloading)
+{
+ LIMITED_METHOD_CONTRACT;
+ //BEGIN_ENTRYPOINT_VOIDRET;
+
+ // This API is unfortunately publicly exported so we cannot get rid
+ // of it. However since the EE doesn't currently support being unloaded
+ // and re-loaded, it is useless to do any ref counting here or to pretend
+ // to unload it. The proper way to shutdown the EE is to call CorExitProcess.
+ //END_ENTRYPOINT_VOIDRET;
+
+}
+
+#ifndef FEATURE_CORECLR
+//*****************************************************************************
+// This entry point is called from the native DllMain of the loaded image.
+// This gives the COM+ loader the chance to dispatch the loader event. The
+// first call will cause the loader to look for the entry point in the user
+// image. Subsequent calls will dispatch to either the user's DllMain or
+// their Module derived class.
+//*****************************************************************************
+BOOL STDMETHODCALLTYPE _CorDllMain( // TRUE on success, FALSE on error.
+ HINSTANCE hInst, // Instance handle of the loaded module.
+ DWORD dwReason, // Reason for loading.
+ LPVOID lpReserved // Unused.
+ )
+{
+ STATIC_CONTRACT_NOTHROW;
+ //STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_ENTRY_POINT;
+
+ //BEGIN_ENTRYPOINT_NOTHROW;
+
+ struct Param
+ {
+ HINSTANCE hInst;
+ DWORD dwReason;
+ LPVOID lpReserved;
+ BOOL retval;
+ } param;
+ param.hInst = hInst;
+ param.dwReason = dwReason;
+ param.lpReserved = lpReserved;
+ param.retval = FALSE;
+
+ // Can't use PAL_TRY/EX_TRY here as they access the ClrDebugState which gets blown away as part of the
+ // PROCESS_DETACH path. Must use special PAL_TRY_FOR_DLLMAIN, passing the reason were in the DllMain.
+ PAL_TRY_FOR_DLLMAIN(Param *, pParam, &param, pParam->dwReason)
+ {
+#ifdef _DEBUG
+ if (CLRTaskHosted() &&
+ ((pParam->dwReason == DLL_PROCESS_ATTACH && pParam->lpReserved == NULL) || // LoadLibrary of a managed dll
+ (pParam->dwReason == DLL_PROCESS_DETACH && pParam->lpReserved == NULL) // FreeLibrary of a managed dll
+ )) {
+ // OS loader lock is being held by the current thread. We can not allow the fiber
+ // to be rescheduled here while processing DllMain for managed dll.
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostTask *pTask = GetCurrentHostTask();
+ if (pTask) {
+ Thread *pThread = GetThread();
+ _ASSERTE (pThread);
+ _ASSERTE (pThread->HasThreadAffinity());
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ }
+#endif
+ // Since we're in _CorDllMain, we know that we were not called because of a
+ // bootstrap thunk, since they will call CorDllMainForThunk. Because of this,
+ // we can pass FALSE for the fFromThunk parameter.
+ pParam->retval = ExecuteDLL(pParam->hInst,pParam->dwReason,pParam->lpReserved, FALSE);
+ }
+ PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ {
+ }
+ PAL_ENDTRY;
+
+ //END_ENTRYPOINT_NOTHROW;
+
+ return param.retval;
+}
+
+#endif // !FEATURE_CORECLR
+
+#ifdef FEATURE_MIXEDMODE
+//*****************************************************************************
+void STDMETHODCALLTYPE CorDllMainForThunk(HINSTANCE hInst, HINSTANCE hInstShim)
+{
+
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+
+ g_fEEIJWStartup = TRUE;
+
+ {
+
+ // If no managed thread exists, then we need to call the prepare method
+ // to try and startup the runtime and/or create a managed thread object
+ // so that installing an unwind and continue handler below is possible.
+ // If we fail to startup or create a thread, we'll raise the basic
+ // EXCEPTION_COMPLUS exception.
+ if (GetThread() == NULL)
+ {
+ HRESULT hr;
+ // Since this method is only called if a bootstrap thunk is invoked, we
+ // know that passing TRUE for fFromThunk is the correct value.
+ if (FAILED(hr = PrepareExecuteDLLForThunk(hInst, 0, NULL)))
+ {
+ RaiseComPlusException();
+ }
+ }
+
+ }
+
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+ // We're actually going to run some managed code and we're inside the loader lock.
+ // There may be a customer debug probe enabled that prevents this.
+ CanRunManagedCode(hInst);
+
+ // Since this method is only called if a bootstrap thunk is invoked, we
+ // know that passing TRUE for fFromThunk is the correct value.
+ ExecuteDLL(hInst, 0, NULL, TRUE);
+
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+}
+#endif // FEATURE_MIXEDMODE
+
+
+#ifndef FEATURE_CORECLR
+
+// This function will do some additional PE Checks to make sure everything looks good.
+// We must do these before we run any managed code (that's why we can't do them in PEVerifier, as
+// managed code is used to determine the policy settings)
+HRESULT DoAdditionalPEChecks(HINSTANCE hInst)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ struct Param
+ {
+ HINSTANCE hInst;
+ HRESULT hr;
+ } param;
+ param.hInst = hInst;
+ param.hr = S_OK;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ PEDecoder pe(pParam->hInst);
+
+ if (!pe.CheckWillCreateGuardPage())
+ pParam->hr = COR_E_BADIMAGEFORMAT;
+ }
+ PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ {
+ }
+ PAL_ENDTRY
+
+ return param.hr;
+}
+
+//*****************************************************************************
+// This entry point is called from the native entry point of the loaded
+// executable image. This simply calls into _CorExeMainInternal, the real
+// entry point inside a filter to trigger unhandled exception processing in the
+// event an exception goes unhandled, independent of the OS UEF mechanism.
+//*****************************************************************************
+__int32 STDMETHODCALLTYPE _CorExeMain( // Executable exit code.
+ )
+{
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_THROWS;
+
+ // We really have nothing to share with our filter at this point.
+ struct Param
+ {
+ PVOID pData;
+ } param;
+ param.pData = NULL;
+
+ PAL_TRY(Param*, _pParam, &param)
+ {
+ // Call the real function that will invoke the managed entry point
+ _CorExeMainInternal();
+ }
+ PAL_EXCEPT_FILTER(EntryPointFilter)
+ {
+ LOG((LF_STARTUP, LL_INFO10, "EntryPointFilter returned EXCEPTION_EXECUTE_HANDLER!"));
+ }
+ PAL_ENDTRY;
+
+ return 0;
+}
+
+//*****************************************************************************
+// This entry point is called from _CorExeMain. If an exception goes unhandled
+// from here, we will trigger unhandled exception processing in _CorExeMain.
+//
+// The command line arguments and other entry point data
+// will be gathered here. The entry point for the user image will be found
+// and handled accordingly.
+//*****************************************************************************
+__int32 STDMETHODCALLTYPE _CorExeMainInternal( // Executable exit code.
+ )
+{
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_ENTRY_POINT;
+
+ // Yes, CorExeMain needs throws. If an exception passes through here, it will cause the
+ // "The application has generated an unhandled exception" dialog and offer to debug.
+
+ BEGIN_ENTRYPOINT_THROWS;
+
+ // Make sure PE file looks ok
+ HRESULT hr;
+ {
+ // We are early in the process, if we get an SO here we will just rip
+ CONTRACT_VIOLATION(SOToleranceViolation);
+ if (FAILED(hr = DoAdditionalPEChecks(WszGetModuleHandle(NULL))))
+ {
+ GCX_PREEMP();
+ VMDumpCOMErrors(hr);
+ SetLatchedExitCode (-1);
+ goto exit;
+ }
+ }
+
+ g_fEEManagedEXEStartup = TRUE;
+ // Before we initialize the EE, make sure we've snooped for all EE-specific
+ // command line arguments that might guide our startup.
+ WCHAR *pCmdLine = WszGetCommandLine();
+ HRESULT result = CorCommandLine::SetArgvW(pCmdLine);
+
+ if (SUCCEEDED(result))
+ {
+ g_fWeOwnProcess = TRUE;
+ result = EnsureEEStarted(COINITEE_MAIN);
+ }
+
+ if (FAILED(result))
+ {
+ g_fWeOwnProcess = FALSE;
+ GCX_PREEMP();
+ VMDumpCOMErrors(result);
+ SetLatchedExitCode (-1);
+ goto exit;
+ }
+
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+ // This will be called from a EXE so this is a self referential file so I am going to call
+ // ExecuteEXE which will do the work to make a EXE load.
+
+ BOOL bretval = 0;
+
+ bretval = ExecuteEXE(WszGetModuleHandle(NULL));
+ if (!bretval) {
+ // The only reason I've seen this type of error in the wild is bad
+ // metadata file format versions and inadequate error handling for
+ // partially signed assemblies. While this may happen during
+ // development, our customers should not get here. This is a back-stop
+ // to catch CLR bugs. If you see this, please try to find a better way
+ // to handle your error, like throwing an unhandled exception.
+ EEMessageBoxCatastrophic(IDS_EE_COREXEMAIN_FAILED_TEXT, IDS_EE_COREXEMAIN_FAILED_TITLE);
+ SetLatchedExitCode (-1);
+ }
+
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+exit:
+ STRESS_LOG1(LF_STARTUP, LL_ALWAYS, "Program exiting: return code = %d", GetLatchedExitCode());
+
+ STRESS_LOG0(LF_STARTUP, LL_INFO10, "EEShutDown invoked from _CorExeMainInternal");
+
+ EEPolicy::HandleExitProcess();
+
+ END_ENTRYPOINT_THROWS;
+
+ return 0;
+}
+
+
+static BOOL CacheCommandLine(__in LPWSTR pCmdLine, __in_opt LPWSTR* ArgvW)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCmdLine));
+ PRECONDITION(CheckPointer(ArgvW));
+ } CONTRACTL_END;
+
+ if (pCmdLine) {
+ size_t len = wcslen(pCmdLine);
+
+ _ASSERT(g_pCachedCommandLine== NULL);
+ g_pCachedCommandLine = new WCHAR[len+1];
+ wcscpy_s(g_pCachedCommandLine, len+1, pCmdLine);
+ }
+
+ if (ArgvW != NULL && ArgvW[0] != NULL) {
+ WCHAR wszModuleName[MAX_PATH];
+ WCHAR wszCurDir[MAX_PATH];
+ if (!WszGetCurrentDirectory(MAX_PATH, wszCurDir))
+ return FALSE;
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:25025)
+#endif
+
+ // usage of PathCombine is safe if we ensure that buffer specified by
+ // parameter1 can accomodate buffers specified by paramater2, parameter3
+ // and one path separator
+ if (lstrlenW(wszCurDir) + lstrlenW(ArgvW[0]) + 1 >= COUNTOF(wszModuleName))
+ return FALSE;
+
+ if (PathCombine(wszModuleName, wszCurDir, ArgvW[0]) == NULL)
+ return FALSE;
+
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+ size_t len = wcslen(wszModuleName);
+ _ASSERT(g_pCachedModuleFileName== NULL);
+ g_pCachedModuleFileName = new WCHAR[len+1];
+ wcscpy_s(g_pCachedModuleFileName, len+1, wszModuleName);
+ }
+
+ return TRUE;
+}
+
+//*****************************************************************************
+// This entry point is called from the native entry piont of the loaded
+// executable image. The command line arguments and other entry point data
+// will be gathered here. The entry point for the user image will be found
+// and handled accordingly.
+//*****************************************************************************
+__int32 STDMETHODCALLTYPE _CorExeMain2( // Executable exit code.
+ PBYTE pUnmappedPE, // -> memory mapped code
+ DWORD cUnmappedPE, // Size of memory mapped code
+ __in LPWSTR pImageNameIn, // -> Executable Name
+ __in LPWSTR pLoadersFileName, // -> Loaders Name
+ __in LPWSTR pCmdLine) // -> Command Line
+{
+
+ // This entry point is used by clix
+ BOOL bRetVal = 0;
+
+ BEGIN_ENTRYPOINT_VOIDRET;
+ {
+ // Before we initialize the EE, make sure we've snooped for all EE-specific
+ // command line arguments that might guide our startup.
+ HRESULT result = CorCommandLine::SetArgvW(pCmdLine);
+
+ if (!CacheCommandLine(pCmdLine, CorCommandLine::GetArgvW(NULL))) {
+ LOG((LF_STARTUP, LL_INFO10, "Program exiting - CacheCommandLine failed\n"));
+ bRetVal = -1;
+ goto exit;
+ }
+
+ if (SUCCEEDED(result))
+ result = InitializeEE(COINITEE_MAIN);
+
+ if (FAILED(result)) {
+ VMDumpCOMErrors(result);
+ SetLatchedExitCode (-1);
+ goto exit;
+ }
+
+ // Load the executable
+ bRetVal = ExecuteEXE(pImageNameIn);
+
+ if (!bRetVal) {
+ // The only reason I've seen this type of error in the wild is bad
+ // metadata file format versions and inadequate error handling for
+ // partially signed assemblies. While this may happen during
+ // development, our customers should not get here. This is a back-stop
+ // to catch CLR bugs. If you see this, please try to find a better way
+ // to handle your error, like throwing an unhandled exception.
+ EEMessageBoxCatastrophic(IDS_EE_COREXEMAIN2_FAILED_TEXT, IDS_EE_COREXEMAIN2_FAILED_TITLE);
+ SetLatchedExitCode (-1);
+ }
+
+exit:
+ STRESS_LOG1(LF_STARTUP, LL_ALWAYS, "Program exiting: return code = %d", GetLatchedExitCode());
+
+ STRESS_LOG0(LF_STARTUP, LL_INFO10, "EEShutDown invoked from _CorExeMain2");
+
+ EEPolicy::HandleExitProcess();
+ }
+ END_ENTRYPOINT_VOIDRET;
+
+ return bRetVal;
+}
+
+//*****************************************************************************
+// This is the call point to wire up an EXE. In this case we have the HMODULE
+// and just need to make sure we do to correct self referantial things.
+//*****************************************************************************
+
+
+BOOL STDMETHODCALLTYPE ExecuteEXE(HMODULE hMod)
+{
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ _ASSERTE(hMod);
+ if (!hMod)
+ return FALSE;
+
+ ETWFireEvent(ExecExe_V1);
+
+ struct Param
+ {
+ HMODULE hMod;
+ } param;
+ param.hMod = hMod;
+
+ EX_TRY_NOCATCH(Param *, pParam, &param)
+ {
+ // Executables are part of the system domain
+ SystemDomain::ExecuteMainMethod(pParam->hMod);
+ }
+ EX_END_NOCATCH;
+
+ ETWFireEvent(ExecExeEnd_V1);
+
+ return TRUE;
+}
+
+BOOL STDMETHODCALLTYPE ExecuteEXE(__in LPWSTR pImageNameIn)
+{
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ EX_TRY_NOCATCH(LPWSTR, pImageNameInner, pImageNameIn)
+ {
+ WCHAR wzPath[MAX_PATH];
+ DWORD dwPathLength = 0;
+
+ // get the path of executable
+ dwPathLength = WszGetFullPathName(pImageNameInner, MAX_PATH, wzPath, NULL);
+
+ if (!dwPathLength || dwPathLength > MAX_PATH)
+ {
+ ThrowWin32( !dwPathLength ? GetLastError() : ERROR_FILENAME_EXCED_RANGE);
+ }
+
+ SystemDomain::ExecuteMainMethod( NULL, (WCHAR *)wzPath );
+ }
+ EX_END_NOCATCH;
+
+ return TRUE;
+}
+#endif // FEATURE_CORECLR
+
+#ifdef FEATURE_MIXEDMODE
+
+LONG RunDllMainFilter(EXCEPTION_POINTERS* ep, LPVOID pv)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ BOOL useLastThrownObject = UpdateCurrentThrowable(ep->ExceptionRecord);
+ DefaultCatchHandler(ep, NULL, useLastThrownObject, FALSE);
+
+ DefaultCatchFilterParam param(COMPLUS_EXCEPTION_EXECUTE_HANDLER);
+ return DefaultCatchFilter(ep, &param);
+}
+
+// <TODO>@Todo: For M10, this only runs unmanaged native classic entry points for
+// the IJW mc++ case.</TODO>
+HRESULT RunDllMain(MethodDesc *pMD, HINSTANCE hInst, DWORD dwReason, LPVOID lpReserved)
+{
+ STATIC_CONTRACT_NOTHROW;
+
+ _ASSERTE(!GetAppDomain()->IsPassiveDomain());
+
+ if (!pMD) {
+ _ASSERTE(!"Must have a valid function to call!");
+ return E_INVALIDARG;
+ }
+
+ if (pMD->IsIntrospectionOnly())
+ return S_OK;
+
+ struct Param
+ {
+ MethodDesc *pMD;
+ HINSTANCE hInst;
+ DWORD dwReason;
+ LPVOID lpReserved;
+ HRESULT hr;
+ }; Param param;
+ param.pMD = pMD;
+ param.hInst = hInst;
+ param.dwReason = dwReason;
+ param.lpReserved = lpReserved;
+ param.hr = S_OK;
+
+ PAL_TRY(Param *, pParamOuter, &param)
+ {
+ EX_TRY_NOCATCH(Param *, pParam, pParamOuter)
+ {
+ HRESULT hr;
+
+ // This call is inherently unverifiable entry point.
+ if (!Security::CanSkipVerification(pParam->pMD)) {
+ hr = SECURITY_E_UNVERIFIABLE;
+ goto Done;
+ }
+
+ {
+ SigPointer sig(pParam->pMD->GetSigPointer());
+
+ ULONG data = 0;
+ CorElementType eType = ELEMENT_TYPE_END;
+ CorElementType eType2 = ELEMENT_TYPE_END;
+
+ IfFailGoto(sig.GetData(&data), Done);
+ if (data != IMAGE_CEE_CS_CALLCONV_DEFAULT) {
+ hr = COR_E_METHODACCESS;
+ goto Done;
+ }
+
+ IfFailGoto(sig.GetData(&data), Done);
+ if (data != 3) {
+ hr = COR_E_METHODACCESS;
+ goto Done;
+ }
+
+ IfFailGoto(sig.GetElemType(&eType), Done);
+ if (eType != ELEMENT_TYPE_I4) { // return type = int32
+ hr = COR_E_METHODACCESS;
+ goto Done;
+ }
+
+ IfFailGoto(sig.GetElemType(&eType), Done);
+ if (eType == ELEMENT_TYPE_PTR)
+ IfFailGoto(sig.GetElemType(&eType2), Done);
+
+ if (eType!= ELEMENT_TYPE_PTR || eType2 != ELEMENT_TYPE_VOID) { // arg1 = void*
+ hr = COR_E_METHODACCESS;
+ goto Done;
+ }
+
+ IfFailGoto(sig.GetElemType(&eType), Done);
+ if (eType != ELEMENT_TYPE_U4) { // arg2 = uint32
+ hr = COR_E_METHODACCESS;
+ goto Done;
+ }
+
+ IfFailGoto(sig.GetElemType(&eType), Done);
+ if (eType == ELEMENT_TYPE_PTR)
+ IfFailGoto(sig.GetElemType(&eType2), Done);
+
+ if (eType != ELEMENT_TYPE_PTR || eType2 != ELEMENT_TYPE_VOID) { // arg3 = void*
+ hr = COR_E_METHODACCESS;
+ goto Done;
+ }
+ }
+
+ {
+ MethodDescCallSite dllMain(pParam->pMD);
+
+ // Set up a callstack with the values from the OS in the argument array
+ ARG_SLOT stackVar[3];
+ stackVar[0] = PtrToArgSlot(pParam->hInst);
+ stackVar[1] = (ARG_SLOT) pParam->dwReason;
+ stackVar[2] = PtrToArgSlot(pParam->lpReserved);
+
+ // Call the method in question with the arguments.
+ if((dllMain.Call_RetI4(&stackVar[0]) == 0)
+ &&(pParam->dwReason==DLL_PROCESS_ATTACH)
+ && (CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_IgnoreDllMainReturn) != 1))
+ {
+ hr = COR_E_INVALIDPROGRAM;
+#ifdef MDA_SUPPORTED
+ MdaDllMainReturnsFalse* pProbe = MDA_GET_ASSISTANT(DllMainReturnsFalse);
+ if(pProbe != NULL) pProbe->ReportError();
+#endif
+ }
+ }
+Done:
+ pParam->hr = hr;
+ }
+ EX_END_NOCATCH
+ }
+ //@TODO: Revisit why this is here, and if it's still necessary.
+ PAL_EXCEPT_FILTER(RunDllMainFilter)
+ {
+ // switch to COOPERATIVE
+ GCX_COOP_NO_DTOR();
+ // don't do anything - just want to catch it
+ }
+ PAL_ENDTRY
+
+ return param.hr;
+}
+
+//*****************************************************************************
+// fFromThunk indicates that a dependency is calling through the Import Export table,
+// and calling indirect through the IJW vtfixup slot.
+//
+// fFromThunk=FALSE means that we are running DllMain during LoadLibrary while
+// holding the loader lock.
+//
+HRESULT ExecuteDLLForAttach(HINSTANCE hInst,
+ DWORD dwReason,
+ LPVOID lpReserved,
+ BOOL fFromThunk)
+{
+ CONTRACTL{
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(lpReserved, NULL_OK));
+ } CONTRACTL_END;
+
+ PEDecoder pe(hInst);
+
+ // Note that ILOnly DLLs can have a managed entry point. This will
+ // be called when the assembly is activated by the CLR loader,
+ // and it need not be run here.
+
+ if (pe.IsILOnly())
+ {
+ _ASSERTE(!fFromThunk);
+ return S_OK;
+ }
+
+ if (!pe.HasManagedEntryPoint() && !fFromThunk)
+ return S_OK;
+
+ // We need to prep the managed assembly for execution.
+
+ AppDomain *pDomain = GetAppDomain();
+
+ // First we must find the DomainFile associated with this HMODULE. There are basically 3
+ // interesting cases:
+ //
+ // 1. The file is being loaded. In this case we have a DomainFile in existence but
+ // (very inconveniently) it does not have its HMODULE set yet. Most likely if we
+ // were to look at the state of this thread up the stack, we'd see that file is
+ // currently being loaded right above us. However, we cannot rely on this because
+ // A. We may be in case 2 (e.g. a static DLL dependency is being loaded first)
+ // B. _CorDllMain may have been called on a different thread.
+ //
+ // 2. The file has never been seen before. In this case we are basically in the dark; we
+ // simply attempt to load the file as an assembly. (If it is not an assembly we will
+ // fail.)
+ //
+ // 3. The file has been loaded but we are getting called anyway in a race. (This should not
+ // happen in the loader lock case, only when we are getting called from thunks).
+ //
+ // So, we:
+ // A. Use the current thread's LoadingFile as a hint. We will rely on this only if it has
+ // the same path as the HMOD.
+ // B. Search the app domain for a DomainFile with a matching base address, or failing that, path.
+ // C. We have no information, so assume it is a new assembly being loaded.
+
+ // A: check the loading file
+
+ StackSString path;
+ PEImage::GetPathFromDll(hInst, path);
+
+ DomainFile *pLoadingFile = GetThread()->GetLoadingFile();
+ GetThread()->ClearLoadingFile();
+
+ if (pLoadingFile != NULL)
+ {
+ if (!PEImage::PathEquals(pLoadingFile->GetFile()->GetPath(), path))
+ {
+ pLoadingFile = NULL;
+ }
+ else
+ {
+ pLoadingFile->GetFile()->SetLoadedHMODULE(hInst);
+ }
+ }
+
+ // B: look for a loading IJW module
+
+ if (pLoadingFile == NULL)
+ {
+ pLoadingFile = pDomain->FindIJWDomainFile(hInst, path);
+ }
+
+ // C: nothing else worked, we require it is an assembly with a manifest in this situation
+ if (pLoadingFile == NULL)
+ {
+ pLoadingFile = pDomain->LoadExplicitAssembly(hInst, FALSE)->GetDomainAssembly();
+ }
+
+ // There are two cases here, loading from thunks emitted from the shim, and being called
+ // inside the loader lock for the legacy IJW dll case.
+
+ if (fFromThunk)
+ {
+ pLoadingFile->EnsureActive();
+ return S_OK;
+ }
+
+ _ASSERTE(!pe.IsILOnly() && pe.HasManagedEntryPoint());
+ // Get the entry point for the IJW module
+ Module *pModule = pLoadingFile->GetCurrentModule();
+ mdMethodDef tkEntry = pModule->GetEntryPointToken();
+
+ BOOL hasEntryPoint = (TypeFromToken(tkEntry) == mdtMethodDef &&
+ !IsNilToken(tkEntry));
+
+ if (!hasEntryPoint)
+ {
+ return S_OK;
+ }
+
+ if (pDomain->IsPassiveDomain())
+ {
+ // Running managed code while holding the loader lock can cause deadlocks.
+ // These deadlocks might happen when this assembly gets executed. However,
+ // we should avoid those deadlocks if we are in a passive AppDomain.
+ // Also, managed entry point is now legacy, and has should be replaced
+ // with Module .cctor.
+ //
+ // We also rely on Module::CanExecuteCode() to prevent
+ // any further code from being executed from this assembly.
+ _ASSERTE(pLoadingFile && pLoadingFile->GetFile() && pLoadingFile->GetFile()->GetILimage());
+ pLoadingFile->GetFile()->GetILimage()->SetPassiveDomainOnly();
+ return S_OK;
+ }
+
+ // We're actually going to run some managed code and we're inside the loader lock.
+ // There may be a customer debug probe enabled that prevents this.
+ CanRunManagedCode(hInst);
+
+ // If we are not being called from thunks, we are inside the loader lock
+ // & have this single opportunity to run our dll main.
+ // Since we are in deadlock danger anyway (note this is the problematic legacy
+ // case only!) we disable our file loading and type loading reentrancy protection & allow
+ // loads to fully proceed.
+
+ // class level override is needed for the entire operation, not just EnsureActive
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+
+ {
+ OVERRIDE_LOAD_LEVEL_LIMIT(FILE_ACTIVE);
+
+ // Complete the load as necessary
+ pLoadingFile->EnsureActive();
+ }
+
+ MethodDesc *pMD = pModule->FindMethodThrowing(tkEntry);
+ CONSISTENCY_CHECK(CheckPointer(pMD));
+
+ pModule->SetDllEntryPoint(pMD);
+
+ GCX_COOP();
+
+ // pModule may be for a different domain.
+ PEFile *pPEFile = pMD->GetModule()->GetFile();
+ if (!pPEFile->CheckLoaded())
+ {
+ pPEFile->SetLoadedHMODULE(hInst);
+ }
+
+ // Call the managed entry point
+ HRESULT hr = RunDllMain(pMD, hInst, dwReason, lpReserved);
+
+ return hr;
+}
+
+#endif // FEATURE_MIXEDMODE
+
+//*****************************************************************************
+BOOL ExecuteDLL_ReturnOrThrow(HRESULT hr, BOOL fFromThunk)
+{
+ CONTRACTL {
+ if (fFromThunk) THROWS; else NOTHROW;
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ // If we have a failure result, and we're called from a thunk,
+ // then we need to throw an exception to communicate the error.
+ if (FAILED(hr) && fFromThunk)
+ {
+ COMPlusThrowHR(hr);
+ }
+ return SUCCEEDED(hr);
+}
+
+#if !defined(FEATURE_CORECLR) && defined(_DEBUG)
+//*****************************************************************************
+// Factor some common debug code.
+//*****************************************************************************
+static void EnsureManagedThreadExistsForHostedThread()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ if (CLRTaskHosted()) {
+ // If CLR is hosted, and this is on a thread that a host controls,
+ // we must have created Thread object.
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostTask *pHostTask = GetCurrentHostTask();
+ if (pHostTask)
+ {
+ CONSISTENCY_CHECK(CheckPointer(GetThread()));
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ }
+}
+#endif // !FEATURE_CORECLR && _DEBUG
+
+#ifdef FEATURE_MIXEDMODE
+//*****************************************************************************
+// This ensure that the runtime is started and an EEThread object is created
+// for the current thread. This functionality is duplicated in ExecuteDLL,
+// except that this code will not throw.
+//*****************************************************************************
+HRESULT PrepareExecuteDLLForThunk(HINSTANCE hInst,
+ DWORD dwReason,
+ LPVOID lpReserved)
+{
+ CONTRACTL {
+ NOTHROW;
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ PRECONDITION(CheckPointer(lpReserved, NULL_OK));
+ PRECONDITION(CheckPointer(hInst));
+ } CONTRACTL_END;
+
+
+ HRESULT hr = S_OK;
+ Thread *pThread = GetThread();
+
+ INDEBUG(EnsureManagedThreadExistsForHostedThread();)
+
+ if (pThread == NULL)
+ {
+ // If necessary, start the runtime and create a managed thread object.
+ hr = EnsureEEStarted(COINITEE_DLL);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+ if ((pThread = SetupThreadNoThrow(&hr)) == NULL)
+ {
+ return hr;
+ }
+ }
+
+ CONSISTENCY_CHECK(CheckPointer(pThread));
+
+ return S_OK;
+}
+
+#endif // FEATURE_MIXEDMODE
+
+#ifndef FEATURE_CORECLR
+//*****************************************************************************
+// This is the call point to make a DLL that is already loaded into our address
+// space run. There will be other code to actually have us load a DLL due to a
+// class referance.
+//*****************************************************************************
+BOOL STDMETHODCALLTYPE ExecuteDLL(HINSTANCE hInst,
+ DWORD dwReason,
+ LPVOID lpReserved,
+ BOOL fFromThunk)
+{
+
+ CONTRACTL{
+ THROWS;
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ ENTRY_POINT;
+ PRECONDITION(CheckPointer(lpReserved, NULL_OK));
+ PRECONDITION(CheckPointer(hInst));
+ PRECONDITION(GetThread() != NULL || !fFromThunk);
+ } CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ BOOL fRetValue = FALSE;
+
+ // This needs to be before the BEGIN_ENTRYPOINT_THROWS since
+ // we can't call ReportStackOverflow if we're almost done with
+ // shutdown and can't run managed code.
+ if (!CanRunManagedCode(LoaderLockCheck::None))
+ {
+ return fRetValue;
+ }
+
+ BEGIN_ENTRYPOINT_THROWS;
+
+ Thread *pThread = GetThread();
+
+ if (!hInst)
+ {
+ fRetValue = ExecuteDLL_ReturnOrThrow(E_FAIL, fFromThunk);
+ goto Exit;
+ }
+
+ // Note that we always check fFromThunk before checking the dwReason value.
+ // This is because the dwReason value is undefined in the case that we're
+ // being invoked due to a bootstrap (because that is by definition outside
+ // of the loader lock and there is no appropriate dwReason value).
+ if (fFromThunk ||
+ dwReason == DLL_PROCESS_ATTACH ||
+ dwReason == DLL_THREAD_ATTACH)
+ {
+ INDEBUG(EnsureManagedThreadExistsForHostedThread();)
+
+
+ // If necessary, start the runtime and create a managed thread object.
+ if (fFromThunk || dwReason == DLL_PROCESS_ATTACH)
+ {
+ hr = EnsureEEStarted(COINITEE_DLL);
+
+ if (SUCCEEDED(hr) && pThread == NULL)
+ {
+ pThread = SetupThreadNoThrow(&hr);
+ }
+
+ if(FAILED(hr))
+ {
+ fRetValue = ExecuteDLL_ReturnOrThrow(hr, fFromThunk);
+ goto Exit;
+ }
+ }
+
+ // IJW assemblies cause the thread doing the process attach to
+ // re-enter ExecuteDLL and do a thread attach. This happens when
+ // CoInitializeEE() above executed
+ else if (!(pThread &&
+ pThread->GetDomain() &&
+ CanRunManagedCode(LoaderLockCheck::None)))
+ {
+ fRetValue = ExecuteDLL_ReturnOrThrow(S_OK, fFromThunk);
+ goto Exit;
+ }
+
+ // we now have a thread setup - either the 1st if set it up, or
+ // the else if ran if we didn't have a thread setup.
+
+#ifdef FEATURE_MIXEDMODE
+
+ EX_TRY
+ {
+ hr = ExecuteDLLForAttach(hInst, dwReason, lpReserved, fFromThunk);
+ }
+ EX_CATCH
+ {
+ // We rethrow directly here instead of using ExecuteDLL_ReturnOrThrow() to
+ // preserve the full exception information, rather than just the HRESULT
+ if (fFromThunk)
+ {
+ EX_RETHROW;
+ }
+ else
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (FAILED(hr))
+ {
+ fRetValue = ExecuteDLL_ReturnOrThrow(hr, fFromThunk);
+ goto Exit;
+ }
+#endif // FEATURE_MIXEDMODE
+ }
+ else
+ {
+ PEDecoder pe(hInst);
+ if (pe.HasManagedEntryPoint())
+ {
+ // If the EE is still intact, then run user entry points. Otherwise
+ // detach was handled when the app domain was stopped.
+ //
+ // Checks for the loader lock will occur within RunDllMain, if that's
+ FAULT_NOT_FATAL();
+ if (CanRunManagedCode(LoaderLockCheck::None))
+ {
+ hr = SystemDomain::RunDllMain(hInst, dwReason, lpReserved);
+ }
+ }
+ // This does need to match the attach. We will only unload dll's
+ // at the end and CoUninitialize will just bounce at 0. WHEN and IF we
+ // get around to unloading IL DLL's during execution prior to
+ // shutdown we will need to bump the reference one to compensate
+ // for this call.
+ if (dwReason == DLL_PROCESS_DETACH && !g_fForbidEnterEE)
+ {
+#ifdef FEATURE_MIXEDMODE
+ // If we're in a decent state, we need to free the memory associated
+ // with the IJW thunk fixups.
+ // we are not in a decent state if the process is terminating (lpReserved!=NULL)
+ if (g_fEEStarted && !g_fEEShutDown && !lpReserved)
+ {
+ PEImage::UnloadIJWModule(hInst);
+ }
+#endif // FEATURE_MIXEDMODE
+ }
+ }
+
+ fRetValue = ExecuteDLL_ReturnOrThrow(hr, fFromThunk);
+
+Exit:
+
+ END_ENTRYPOINT_THROWS;
+ return fRetValue;
+}
+#endif // !FEATURE_CORECLR
+
+
+Volatile<BOOL> g_bIsGarbageCollectorFullyInitialized = FALSE;
+
+void SetGarbageCollectorFullyInitialized()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ g_bIsGarbageCollectorFullyInitialized = TRUE;
+}
+
+// Tells whether the garbage collector is fully initialized
+// Stronger than IsGCHeapInitialized
+BOOL IsGarbageCollectorFullyInitialized()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return g_bIsGarbageCollectorFullyInitialized;
+}
+
+//
+// Initialize the Garbage Collector
+//
+
+void InitializeGarbageCollector()
+{
+ CONTRACTL{
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ HRESULT hr;
+
+ // Build the special Free Object used by the Generational GC
+ _ASSERT(g_pFreeObjectMethodTable == NULL);
+ g_pFreeObjectMethodTable = (MethodTable *) new BYTE[sizeof(MethodTable)];
+ ZeroMemory(g_pFreeObjectMethodTable, sizeof(MethodTable));
+
+ // As the flags in the method table indicate there are no pointers
+ // in the object, there is no gc descriptor, and thus no need to adjust
+ // the pointer to skip the gc descriptor.
+
+ g_pFreeObjectMethodTable->SetBaseSize(ObjSizeOf (ArrayBase));
+ g_pFreeObjectMethodTable->SetComponentSize(1);
+
+ GCHeap *pGCHeap = GCHeap::CreateGCHeap();
+ if (!pGCHeap)
+ ThrowOutOfMemory();
+
+ hr = pGCHeap->Initialize();
+ IfFailThrow(hr);
+
+ // Thread for running finalizers...
+ if (FinalizerThread::FinalizerThreadCreate() != 1)
+ ThrowOutOfMemory();
+
+ // Now we really have fully initialized the garbage collector
+ SetGarbageCollectorFullyInitialized();
+}
+
+/*****************************************************************************/
+/* This is here only so that if we get an exception we stop before we catch it */
+LONG DllMainFilter(PEXCEPTION_POINTERS p, PVOID pv)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"Exception happened in mscorwks!DllMain!");
+ return EXCEPTION_EXECUTE_HANDLER;
+}
+
+//*****************************************************************************
+// This is the part of the old-style DllMain that initializes the
+// stuff that the EE team works on. It's called from the real DllMain
+// up in MSCOREE land. Separating the DllMain tasks is simply for
+// convenience due to the dual build trees.
+//*****************************************************************************
+BOOL STDMETHODCALLTYPE EEDllMain( // TRUE on success, FALSE on error.
+ HINSTANCE hInst, // Instance handle of the loaded module.
+ DWORD dwReason, // Reason for loading.
+ LPVOID lpReserved) // Unused.
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ // this runs at the top of a thread, SO is not a concern here...
+ STATIC_CONTRACT_SO_NOT_MAINLINE;
+
+
+ // HRESULT hr;
+ // BEGIN_EXTERNAL_ENTRYPOINT(&hr);
+ // EE isn't spun up enough to use this macro
+
+ struct Param
+ {
+ HINSTANCE hInst;
+ DWORD dwReason;
+ LPVOID lpReserved;
+ void **pTlsData;
+ } param;
+ param.hInst = hInst;
+ param.dwReason = dwReason;
+ param.lpReserved = lpReserved;
+ param.pTlsData = NULL;
+
+ // Can't use PAL_TRY/EX_TRY here as they access the ClrDebugState which gets blown away as part of the
+ // PROCESS_DETACH path. Must use special PAL_TRY_FOR_DLLMAIN, passing the reason were in the DllMain.
+ PAL_TRY_FOR_DLLMAIN(Param *, pParam, &param, pParam->dwReason)
+ {
+
+ switch (pParam->dwReason)
+ {
+ case DLL_PROCESS_ATTACH:
+ {
+ // We cache the SystemInfo for anyone to use throughout the
+ // life of the DLL.
+ GetSystemInfo(&g_SystemInfo);
+
+ // Remember module instance
+ g_pMSCorEE = pParam->hInst;
+
+#ifndef FEATURE_CORECLR
+ CoreClrCallbacks cccallbacks;
+ cccallbacks.m_hmodCoreCLR = (HINSTANCE)g_pMSCorEE;
+ cccallbacks.m_pfnIEE = IEE;
+ cccallbacks.m_pfnGetCORSystemDirectory = GetCORSystemDirectoryInternal;
+ cccallbacks.m_pfnGetCLRFunction = GetCLRFunction;
+
+ InitUtilcode(cccallbacks);
+#endif // !FEATURE_CORECLR
+
+ // Set callbacks so that LoadStringRC knows which language our
+ // threads are in so that it can return the proper localized string.
+ // TODO: This shouldn't rely on the LCID (id), but only the name
+ SetResourceCultureCallbacks(GetThreadUICultureNames,
+ GetThreadUICultureId);
+
+ InitEEPolicy();
+ InitHostProtectionManager();
+
+ break;
+ }
+
+ case DLL_PROCESS_DETACH:
+ {
+ // lpReserved is NULL if we're here because someone called FreeLibrary
+ // and non-null if we're here because the process is exiting.
+ // Since nobody should ever be calling FreeLibrary on mscorwks.dll, lpReserved
+ // should always be non NULL.
+ _ASSERTE(pParam->lpReserved || !g_fEEStarted);
+ g_fProcessDetach = TRUE;
+
+#if defined(ENABLE_CONTRACTS_IMPL) && defined(FEATURE_STACK_PROBE)
+ // We are shutting down process. No need to check SO contract.
+ // And it is impossible to enforce SO contract in global dtor, like ModIntPairList.
+ g_EnableDefaultRWValidation = FALSE;
+#endif
+
+ if (g_fEEStarted)
+ {
+ // GetThread() may be set to NULL for Win9x during shutdown.
+ Thread *pThread = GetThread();
+ if (GCHeap::IsGCInProgress() &&
+ ( (pThread && (pThread != ThreadSuspend::GetSuspensionThread() ))
+ || !g_fSuspendOnShutdown))
+ {
+ g_fEEShutDown |= ShutDown_Phase2;
+ break;
+ }
+
+ LOG((LF_STARTUP, INFO3, "EEShutDown invoked from EEDllMain"));
+ EEShutDown(TRUE); // shut down EE if it was started up
+ }
+ else
+ {
+ CLRRemoveVectoredHandlers();
+ }
+ break;
+ }
+
+ case DLL_THREAD_DETACH:
+ {
+ // Don't destroy threads here if we're in shutdown (shutdown will
+ // clean up for us instead).
+
+ // Store the TLS data; we'll need it later and we might NULL the slot in DetachThread.
+ // This would be problematic because we can't depend on the FLS still existing.
+ pParam->pTlsData = CExecutionEngine::CheckThreadStateNoCreate(0
+#ifdef _DEBUG
+ // When we get here, OS has destroyed FLS, so FlsGetValue returns NULL now.
+ // We have validation code in CExecutionEngine::CheckThreadStateNoCreate to ensure that
+ // our TLS and FLS data are consistent, but since FLS has been destroyed, we need
+ // to silent the check there. The extra arg for check build is for this purpose.
+ , TRUE
+#endif
+ );
+#ifdef FEATURE_IMPLICIT_TLS
+ Thread* thread = GetThread();
+#else
+ // Don't use GetThread because perhaps we didn't initialize yet, or we
+ // have already shutdown the EE. Note that there is a race here. We
+ // might ask for TLS from a slot we just released. We are assuming that
+ // nobody re-allocates that same slot while we are doing this. It just
+ // isn't worth locking for such an obscure case.
+ DWORD tlsVal = GetThreadTLSIndex();
+ Thread *thread = (tlsVal != (DWORD)-1)?(Thread *) UnsafeTlsGetValue(tlsVal):NULL;
+#endif
+ if (thread)
+ {
+#ifdef FEATURE_COMINTEROP
+ // reset the CoInitialize state
+ // so we don't call CoUninitialize during thread detach
+ thread->ResetCoInitialized();
+#endif // FEATURE_COMINTEROP
+ // For case where thread calls ExitThread directly, we need to reset the
+ // frame pointer. Otherwise stackwalk would AV. We need to do it in cooperative mode.
+ // We need to set m_GCOnTransitionsOK so this thread won't trigger GC when toggle GC mode
+ if (thread->m_pFrame != FRAME_TOP)
+ {
+#ifdef _DEBUG
+ thread->m_GCOnTransitionsOK = FALSE;
+#endif
+ GCX_COOP_NO_DTOR();
+ thread->m_pFrame = FRAME_TOP;
+ GCX_COOP_NO_DTOR_END();
+ }
+ thread->DetachThread(TRUE);
+ }
+ }
+ }
+
+ }
+ PAL_EXCEPT_FILTER(DllMainFilter)
+ {
+ }
+ PAL_ENDTRY;
+
+ if (dwReason == DLL_THREAD_DETACH || dwReason == DLL_PROCESS_DETACH)
+ {
+ if (CLRMemoryHosted())
+ {
+ // A host may not support memory operation inside OS loader lock.
+ // We will free these memory on finalizer thread.
+ CExecutionEngine::DetachTlsInfo(param.pTlsData);
+ }
+ else
+ {
+ CExecutionEngine::ThreadDetaching(param.pTlsData);
+ }
+ }
+ return TRUE;
+}
+
+#ifdef FEATURE_COMINTEROP_REGISTRATION
+//*****************************************************************************
+// Helper function to call the managed registration services.
+//*****************************************************************************
+enum EnumRegServicesMethods
+{
+ RegServicesMethods_RegisterAssembly = 0,
+ RegServicesMethods_UnregisterAssembly,
+ RegServicesMethods_LastMember
+};
+
+void InvokeRegServicesMethod(EnumRegServicesMethods Method, HMODULE hMod)
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(Method == RegServicesMethods_RegisterAssembly ||
+ Method == RegServicesMethods_UnregisterAssembly);
+ }
+ CONTRACT_END;
+
+ GCX_PREEMP();
+ Assembly *pAssembly = GetAppDomain()->LoadExplicitAssembly(hMod, TRUE);
+
+ {
+ GCX_COOP();
+
+ // The names of the RegistrationServices methods.
+ static const BinderMethodID aMethods[] =
+ {
+ METHOD__REGISTRATION_SERVICES__REGISTER_ASSEMBLY,
+ METHOD__REGISTRATION_SERVICES__UNREGISTER_ASSEMBLY
+ };
+
+ // Allocate the RegistrationServices object.
+ OBJECTREF RegServicesObj = AllocateObject(MscorlibBinder::GetClass(CLASS__REGISTRATION_SERVICES));
+ GCPROTECT_BEGIN(RegServicesObj)
+ {
+ MethodDescCallSite registrationMethod(aMethods[Method], &RegServicesObj);
+
+ ARG_SLOT Args[] =
+ {
+ ObjToArgSlot(RegServicesObj),
+ ObjToArgSlot(pAssembly->GetExposedObject()),
+ 0 // unused by UnregisterAssembly
+ };
+
+ registrationMethod.Call(Args);
+ }
+ GCPROTECT_END();
+ }
+ RETURN;
+}
+
+//*****************************************************************************
+// This entry point is called to register the classes contained inside a
+// COM+ assembly.
+//*****************************************************************************
+STDAPI EEDllRegisterServer(HMODULE hMod)
+{
+
+ CONTRACTL{
+ NOTHROW;
+ GC_TRIGGERS;
+ ENTRY_POINT;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Start up the runtime since we are going to use managed code to actually
+ // do the registration.
+ IfFailGo(EnsureEEStarted(COINITEE_DEFAULT));
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ InvokeRegServicesMethod(RegServicesMethods_RegisterAssembly, hMod);
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ErrExit:
+
+
+ return hr;
+}
+
+//*****************************************************************************
+// This entry point is called to unregister the classes contained inside a
+// COM+ assembly.
+//*****************************************************************************
+STDAPI EEDllUnregisterServer(HMODULE hMod)
+{
+
+ CONTRACTL{
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ ENTRY_POINT;
+ } CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Start up the runtime since we are going to use managed code to actually
+ // do the registration.
+ IfFailGo(EnsureEEStarted(COINITEE_DEFAULT));
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ InvokeRegServicesMethod(RegServicesMethods_UnregisterAssembly, hMod);
+ }
+ END_EXTERNAL_ENTRYPOINT;
+ErrExit:
+
+
+ return hr;
+}
+#endif // FEATURE_COMINTEROP_REGISTRATION
+
+#ifdef FEATURE_IPCMAN
+extern CCLRSecurityAttributeManager s_CLRSecurityAttributeManager;
+#endif // FEATURE_IPCMAN
+
+
+#ifdef DEBUGGING_SUPPORTED
+//
+// InitializeDebugger initialized the Runtime-side COM+ Debugging Services
+//
+static void InitializeDebugger(void)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Ensure that if we throw, we'll call TerminateDebugger to cleanup.
+ // This makes our Init more atomic by avoiding partially-init states.
+ class EnsureCleanup {
+ BOOL fNeedCleanup;
+ public:
+ EnsureCleanup()
+ {
+ fNeedCleanup = TRUE;
+ }
+
+ void SuppressCleanup()
+ {
+ fNeedCleanup = FALSE;
+ }
+
+ ~EnsureCleanup()
+ {
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+
+ if (fNeedCleanup)
+ {
+ TerminateDebugger();
+ }
+ }
+ } hCleanup;
+
+ HRESULT hr = S_OK;
+
+ LOG((LF_CORDB, LL_INFO10, "Initializing left-side debugging services.\n"));
+
+ FARPROC gi = (FARPROC) &CorDBGetInterface;
+
+ // Init the interface the EE provides to the debugger,
+ // ask the debugger for its interface, and if all goes
+ // well call Startup on the debugger.
+ EEDbgInterfaceImpl::Init();
+ _ASSERTE(g_pEEDbgInterfaceImpl != NULL); // throws on OOM
+
+ // This allocates the Debugger object.
+ typedef HRESULT __cdecl CORDBGETINTERFACE(DebugInterface**);
+ hr = ((CORDBGETINTERFACE*)gi)(&g_pDebugInterface);
+ IfFailThrow(hr);
+
+ g_pDebugInterface->SetEEInterface(g_pEEDbgInterfaceImpl);
+
+ {
+ hr = g_pDebugInterface->Startup(); // throw on error
+ _ASSERTE(SUCCEEDED(hr));
+
+#ifdef FEATURE_CORECLR
+ //
+ // If the debug pack is not installed, Startup will return S_FALSE
+ // and we should cleanup and proceed without debugging support.
+ //
+ if (hr != S_OK)
+ {
+ return;
+ }
+#endif // FEATURE_CORECLR
+ }
+
+#if !defined(FEATURE_CORECLR) // simple hosting
+ // If there's a DebuggerThreadControl interface, then we
+ // need to update the DebuggerSpecialThread list.
+ if (CorHost::GetDebuggerThreadControl())
+ {
+ hr = CorHost::RefreshDebuggerSpecialThreadList();
+ _ASSERTE((SUCCEEDED(hr)) && (hr != S_FALSE));
+
+ // So we don't think this will ever fail, but just in case...
+ IfFailThrow(hr);
+ }
+
+ // If there is a DebuggerThreadControl interface, then it was set before the debugger
+ // was initialized and we need to provide this interface now. If debugging is already
+ // initialized then the IDTC pointer is passed in when it is set through CorHost
+ IDebuggerThreadControl *pDTC = CorHost::GetDebuggerThreadControl();
+
+ if (pDTC != NULL)
+ {
+ g_pDebugInterface->SetIDbgThreadControl(pDTC);
+ }
+#endif // !defined(FEATURE_CORECLR)
+
+ LOG((LF_CORDB, LL_INFO10, "Left-side debugging services setup.\n"));
+
+ hCleanup.SuppressCleanup();
+
+ return;
+}
+
+
+//
+// TerminateDebugger shuts down the Runtime-side COM+ Debugging Services
+// InitializeDebugger will call this if it fails.
+// This may be called even if the debugger is partially initialized.
+// This can be called multiple times.
+//
+static void TerminateDebugger(void)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO10, "Shutting down left-side debugger services.\n"));
+
+ // If initialized failed really early, then we didn't even get the Debugger object.
+ if (g_pDebugInterface != NULL)
+ {
+ // Notify the out-of-process debugger that shutdown of the in-process debugging support has begun. This is only
+ // really used in interop debugging scenarios.
+ g_pDebugInterface->ShutdownBegun();
+
+ // This will kill the helper thread, delete the Debugger object, and free all resources.
+ g_pDebugInterface->StopDebugger();
+ g_pDebugInterface = NULL;
+ }
+
+ // Delete this after Debugger, since Debugger may use this.
+ EEDbgInterfaceImpl::Terminate();
+ _ASSERTE(g_pEEDbgInterfaceImpl == NULL); // Terminate nulls this out for us.
+
+ g_CORDebuggerControlFlags = DBCF_NORMAL_OPERATION;
+
+#if !defined(FEATURE_CORECLR) // simple hosting
+ CorHost::CleanupDebuggerThreadControl();
+#endif // !defined(FEATURE_CORECLR)
+}
+
+
+#ifdef FEATURE_IPCMAN
+// ---------------------------------------------------------------------------
+// Initialize InterProcess Communications for COM+
+// 1. Allocate an IPCManager Implementation and hook it up to our interface *
+// 2. Call proper init functions to activate relevant portions of IPC block
+// ---------------------------------------------------------------------------
+static HRESULT InitializeIPCManager(void)
+{
+ CONTRACTL{
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ HINSTANCE hInstIPCBlockOwner = 0;
+
+ DWORD pid = 0;
+ // Allocate the Implementation. Everyone else will work through the interface
+ g_pIPCManagerInterface = new (nothrow) IPCWriterInterface();
+
+ if (g_pIPCManagerInterface == NULL)
+ {
+ hr = E_OUTOFMEMORY;
+ goto errExit;
+ }
+
+ pid = GetCurrentProcessId();
+
+
+ // Do general init
+ hr = g_pIPCManagerInterface->Init();
+
+ if (!SUCCEEDED(hr))
+ {
+ goto errExit;
+ }
+
+ // Generate private IPCBlock for our PID. Note that for the other side of the debugger,
+ // they'll hook up to the debuggee's pid (and not their own). So we still
+ // have to pass the PID in.
+ EX_TRY
+ {
+ // <TODO>This should go away in the future.</TODO>
+ hr = g_pIPCManagerInterface->CreateLegacyPrivateBlockTempV4OnPid(pid, FALSE, &hInstIPCBlockOwner);
+ }
+ EX_CATCH_HRESULT(hr);
+
+ if (hr == HRESULT_FROM_WIN32(ERROR_ALREADY_EXISTS))
+ {
+ // We failed to create the IPC block because it has already been created. This means that
+ // two mscoree's have been loaded into the process.
+ WCHAR strFirstModule[256];
+ WCHAR strSecondModule[256];
+
+ // Get the name and path of the first loaded MSCOREE.DLL.
+ if (!hInstIPCBlockOwner || !WszGetModuleFileName(hInstIPCBlockOwner, strFirstModule, 256))
+ wcscpy_s(strFirstModule, COUNTOF(strFirstModule), W("<Unknown>"));
+
+ // Get the name and path of the second loaded MSCOREE.DLL.
+ if (!WszGetModuleFileName(g_pMSCorEE, strSecondModule, 256))
+ wcscpy_s(strSecondModule, COUNTOF(strSecondModule), W("<Unknown>"));
+
+ // Load the format strings for the title and the message body.
+ EEMessageBoxCatastrophic(IDS_EE_TWO_LOADED_MSCOREE_MSG, IDS_EE_TWO_LOADED_MSCOREE_TITLE, strFirstModule, strSecondModule);
+ goto errExit;
+ }
+ else
+ {
+ if (!WszGetModuleFileName(GetModuleInst(), (PWSTR)
+ g_pIPCManagerInterface->
+ GetInstancePath(),
+ MAX_PATH))
+ {
+ hr = HRESULT_FROM_GetLastErrorNA();
+ }
+ }
+
+ // Generate public IPCBlock for our PID.
+ EX_TRY
+ {
+ hr = g_pIPCManagerInterface->CreateSxSPublicBlockOnPid(pid);
+ }
+ EX_CATCH_HRESULT(hr);
+
+
+errExit:
+ // If any failure, shut everything down.
+ if (!SUCCEEDED(hr))
+ TerminateIPCManager();
+
+ return hr;
+}
+#endif // FEATURE_IPCMAN
+
+#endif // DEBUGGING_SUPPORTED
+
+
+// ---------------------------------------------------------------------------
+// Marks the IPC block as initialized so that other processes know that the
+// block is safe to read
+// ---------------------------------------------------------------------------
+#ifdef FEATURE_IPCMAN
+static void PublishIPCManager(void)
+{
+ CONTRACTL{
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ if (g_pIPCManagerInterface != NULL)
+ g_pIPCManagerInterface->Publish();
+}
+#endif // FEATURE_IPCMAN
+
+
+
+#ifdef FEATURE_IPCMAN
+// ---------------------------------------------------------------------------
+// Terminate all InterProcess operations
+// ---------------------------------------------------------------------------
+static void TerminateIPCManager(void)
+{
+ CONTRACTL{
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ if (g_pIPCManagerInterface != NULL)
+ {
+ g_pIPCManagerInterface->Terminate();
+ delete g_pIPCManagerInterface;
+ g_pIPCManagerInterface = NULL;
+ }
+
+}
+#endif // FEATURE_IPCMAN
+
+#ifndef LOCALE_SPARENT
+#define LOCALE_SPARENT 0x0000006d
+#endif
+
+// ---------------------------------------------------------------------------
+// Impl for UtilLoadStringRC Callback: In VM, we let the thread decide culture
+// copy culture name into szBuffer and return length
+// ---------------------------------------------------------------------------
+static HRESULT GetThreadUICultureNames(__inout StringArrayList* pCultureNames)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCultureNames));
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ InlineSString<LOCALE_NAME_MAX_LENGTH> sCulture;
+ InlineSString<LOCALE_NAME_MAX_LENGTH> sParentCulture;
+
+
+ Thread * pThread = GetThread();
+
+ if (pThread != NULL) {
+
+ // Switch to cooperative mode, since we'll be looking at managed objects
+ // and we don't want them moving on us.
+ GCX_COOP();
+
+ THREADBASEREF pThreadBase = (THREADBASEREF)pThread->GetExposedObjectRaw();
+
+ if (pThreadBase != NULL)
+ {
+ CULTUREINFOBASEREF pCurrentCulture = pThreadBase->GetCurrentUICulture();
+
+ if (pCurrentCulture != NULL)
+ {
+ STRINGREF cultureName = pCurrentCulture->GetName();
+
+ if (cultureName != NULL)
+ {
+ sCulture.Set(cultureName->GetBuffer(),cultureName->GetStringLength());
+ }
+
+ CULTUREINFOBASEREF pParentCulture = pCurrentCulture->GetParent();
+
+ if (pParentCulture != NULL)
+ {
+ STRINGREF parentCultureName = pParentCulture->GetName();
+
+ if (parentCultureName != NULL)
+ {
+ sParentCulture.Set(parentCultureName->GetBuffer(),parentCultureName->GetStringLength());
+ }
+
+ }
+ }
+ }
+ }
+ // If the lazily-initialized cultureinfo structures aren't initialized yet, we'll
+ // need to do the lookup the hard way.
+ if (sCulture.IsEmpty() || sParentCulture.IsEmpty())
+ {
+ LocaleIDValue id ;
+ int tmp; tmp = GetThreadUICultureId(&id); // TODO: We should use the name instead
+ _ASSERTE(tmp!=0 && id != UICULTUREID_DONTCARE);
+ SIZE_T cchParentCultureName=LOCALE_NAME_MAX_LENGTH;
+#ifdef FEATURE_USE_LCID
+ SIZE_T cchCultureName=LOCALE_NAME_MAX_LENGTH;
+ if (!NewApis::LCIDToLocaleName(id, sCulture.OpenUnicodeBuffer(static_cast<COUNT_T>(cchCultureName)), static_cast<int>(cchCultureName), 0))
+ {
+ hr = HRESULT_FROM_GetLastError();
+ }
+ sCulture.CloseBuffer();
+#else
+ sCulture.Set(id);
+#endif
+
+#ifndef FEATURE_PAL
+ if (!NewApis::GetLocaleInfoEx((LPCWSTR)sCulture, LOCALE_SPARENT, sParentCulture.OpenUnicodeBuffer(static_cast<COUNT_T>(cchParentCultureName)),static_cast<int>(cchParentCultureName)))
+ {
+ hr = HRESULT_FROM_GetLastError();
+ }
+ sParentCulture.CloseBuffer();
+#else // !FEATURE_PAL
+ sParentCulture = sCulture;
+#endif // !FEATURE_PAL
+ }
+ // (LPCWSTR) to restrict the size to null terminated size
+ pCultureNames->AppendIfNotThere((LPCWSTR)sCulture);
+ // Disabling for Dev10 for consistency with managed resource lookup (see AppCompat bug notes in ResourceFallbackManager.cs)
+ // Also, this is in the wrong order - put after the parent culture chain.
+ //AddThreadPreferredUILanguages(pCultureNames);
+ pCultureNames->AppendIfNotThere((LPCWSTR)sParentCulture);
+ pCultureNames->Append(SString::Empty());
+ }
+ EX_CATCH
+ {
+ hr=E_OUTOFMEMORY;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return hr;
+}
+
+// The exit code for the process is communicated in one of two ways. If the
+// entrypoint returns an 'int' we take that. Otherwise we take a latched
+// process exit code. This can be modified by the app via System.SetExitCode().
+static INT32 LatchedExitCode;
+
+void SetLatchedExitCode (INT32 code)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ STRESS_LOG1(LF_SYNC, LL_INFO10, "SetLatchedExitCode = %d\n", code);
+ LatchedExitCode = code;
+}
+
+INT32 GetLatchedExitCode (void)
+{
+ LIMITED_METHOD_CONTRACT;
+ return LatchedExitCode;
+}
+
+
+// ---------------------------------------------------------------------------
+// Impl for UtilLoadStringRC Callback: In VM, we let the thread decide culture
+// Return an int uniquely describing which language this thread is using for ui.
+// ---------------------------------------------------------------------------
+// TODO: Callers should use names, not LCIDs
+#ifdef FEATURE_USE_LCID
+static int GetThreadUICultureId(__out LocaleIDValue* pLocale)
+{
+ CONTRACTL{
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_INTOLERANT;;
+ } CONTRACTL_END;
+
+
+
+ int Result = UICULTUREID_DONTCARE;
+
+ Thread * pThread = GetThread();
+
+ if (pThread != NULL) {
+
+ // Switch to cooperative mode, since we'll be looking at managed objects
+ // and we don't want them moving on us.
+ GCX_COOP();
+
+ THREADBASEREF pThreadBase = (THREADBASEREF)pThread->GetExposedObjectRaw();
+ if (pThreadBase != NULL)
+ {
+ CULTUREINFOBASEREF pCurrentCulture = pThreadBase->GetCurrentUICulture();
+
+ if (pCurrentCulture != NULL)
+ {
+ STRINGREF cultureName = pCurrentCulture->GetName();
+ _ASSERT(cultureName != NULL);
+
+ if ((Result = NewApis::LocaleNameToLCID(cultureName->GetBuffer(), 0)) == 0)
+ Result = (int)UICULTUREID_DONTCARE;
+ }
+ }
+ }
+
+ if (Result == (int)UICULTUREID_DONTCARE)
+ {
+ // This thread isn't set up to use a non-default culture. Let's grab the default
+ // one and return that.
+
+ Result = COMNlsInfo::CallGetUserDefaultUILanguage();
+
+ if (Result == 0 || Result == (int)UICULTUREID_DONTCARE)
+ Result = GetUserDefaultLangID();
+
+ _ASSERTE(Result != 0);
+ if (Result == 0)
+ {
+ Result = (int)UICULTUREID_DONTCARE;
+ }
+
+ }
+ *pLocale=Result;
+ return Result;
+}
+#else
+// TODO: Callers should use names, not LCIDs
+static int GetThreadUICultureId(__out LocaleIDValue* pLocale)
+{
+ CONTRACTL{
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_INTOLERANT;;
+ } CONTRACTL_END;
+
+ _ASSERTE(sizeof(LocaleIDValue)/sizeof(WCHAR) >= LOCALE_NAME_MAX_LENGTH);
+
+ int Result = 0;
+
+ Thread * pThread = GetThread();
+
+ if (pThread != NULL) {
+
+ // Switch to cooperative mode, since we'll be looking at managed objects
+ // and we don't want them moving on us.
+ GCX_COOP();
+
+ THREADBASEREF pThreadBase = (THREADBASEREF)pThread->GetExposedObjectRaw();
+ if (pThreadBase != NULL)
+ {
+ CULTUREINFOBASEREF pCurrentCulture = pThreadBase->GetCurrentUICulture();
+
+ if (pCurrentCulture != NULL)
+ {
+ STRINGREF currentCultureName = pCurrentCulture->GetName();
+
+ if (currentCultureName != NULL)
+ {
+ int cchCurrentCultureNameResult = currentCultureName->GetStringLength();
+ if (cchCurrentCultureNameResult < LOCALE_NAME_MAX_LENGTH)
+ {
+ memcpy(*pLocale, currentCultureName->GetBuffer(), cchCurrentCultureNameResult*sizeof(WCHAR));
+ (*pLocale)[cchCurrentCultureNameResult]='\0';
+ Result=cchCurrentCultureNameResult;
+ }
+ }
+
+ }
+ }
+ }
+ if (Result == 0)
+ {
+#ifndef FEATURE_PAL
+ // This thread isn't set up to use a non-default culture. Let's grab the default
+ // one and return that.
+
+ Result = NewApis::GetUserDefaultLocaleName(*pLocale, LOCALE_NAME_MAX_LENGTH);
+
+ _ASSERTE(Result != 0);
+#else // !FEATURE_PAL
+ static const WCHAR enUS[] = W("en-US");
+ memcpy(*pLocale, enUS, sizeof(enUS));
+ Result = sizeof(enUS);
+#endif // !FEATURE_PAL
+ }
+ return Result;
+}
+
+#endif // FEATURE_USE_LCID
+// ---------------------------------------------------------------------------
+// Export shared logging code for JIT, et.al.
+// ---------------------------------------------------------------------------
+#ifdef _DEBUG
+
+extern VOID LogAssert( LPCSTR szFile, int iLine, LPCSTR expr);
+extern "C"
+//__declspec(dllexport)
+VOID STDMETHODCALLTYPE LogHelp_LogAssert( LPCSTR szFile, int iLine, LPCSTR expr)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ ENTRY_POINT;
+ PRECONDITION(CheckPointer(szFile));
+ PRECONDITION(CheckPointer(expr));
+ } CONTRACTL_END;
+
+ BEGIN_ENTRYPOINT_VOIDRET;
+ LogAssert(szFile, iLine, expr);
+ END_ENTRYPOINT_VOIDRET;
+
+}
+
+extern BOOL NoGuiOnAssert();
+extern "C"
+//__declspec(dllexport)
+BOOL STDMETHODCALLTYPE LogHelp_NoGuiOnAssert()
+{
+ LIMITED_METHOD_CONTRACT;
+ BOOL fRet = FALSE;
+ BEGIN_ENTRYPOINT_VOIDRET;
+ fRet = NoGuiOnAssert();
+ END_ENTRYPOINT_VOIDRET;
+ return fRet;
+}
+
+extern VOID TerminateOnAssert();
+extern "C"
+//__declspec(dllexport)
+VOID STDMETHODCALLTYPE LogHelp_TerminateOnAssert()
+{
+ LIMITED_METHOD_CONTRACT;
+ BEGIN_ENTRYPOINT_VOIDRET;
+// __asm int 3;
+ TerminateOnAssert();
+ END_ENTRYPOINT_VOIDRET;
+
+}
+
+#else // !_DEBUG
+
+extern "C"
+//__declspec(dllexport)
+VOID STDMETHODCALLTYPE LogHelp_LogAssert( LPCSTR szFile, int iLine, LPCSTR expr) {
+ LIMITED_METHOD_CONTRACT;
+
+ //BEGIN_ENTRYPOINT_VOIDRET;
+ //END_ENTRYPOINT_VOIDRET;
+}
+
+extern "C"
+//__declspec(dllexport)
+BOOL STDMETHODCALLTYPE LogHelp_NoGuiOnAssert() {
+ LIMITED_METHOD_CONTRACT;
+
+ //BEGIN_ENTRYPOINT_VOIDRET;
+ //END_ENTRYPOINT_VOIDRET;
+
+ return FALSE;
+}
+
+extern "C"
+//__declspec(dllexport)
+VOID STDMETHODCALLTYPE LogHelp_TerminateOnAssert() {
+ LIMITED_METHOD_CONTRACT;
+
+ //BEGIN_ENTRYPOINT_VOIDRET;
+ //END_ENTRYPOINT_VOIDRET;
+
+}
+
+#endif // _DEBUG
+
+
+#ifndef ENABLE_PERF_COUNTERS
+//
+// perf counter stubs for builds which don't have perf counter support
+// These are needed because we export these functions in our DLL
+
+
+Perf_Contexts* STDMETHODCALLTYPE GetPrivateContextsPerfCounters()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ //BEGIN_ENTRYPOINT_VOIDRET;
+ //END_ENTRYPOINT_VOIDRET;
+
+ return NULL;
+}
+
+#endif
+
+
+#ifdef ENABLE_CONTRACTS_IMPL
+
+// Returns TRUE if any contract violation suppressions are in effect.
+BOOL AreAnyViolationBitsOn()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ UINT_PTR violationMask = GetClrDebugState()->ViolationMask();
+ violationMask &= ~((UINT_PTR)CanFreeMe); //CanFreeMe is a borrowed bit and has nothing to do with violations
+ if (violationMask & ((UINT_PTR)BadDebugState))
+ {
+ return FALSE;
+ }
+
+ return violationMask != 0;
+}
+
+
+// This function is intentionally invoked inside a big CONTRACT_VIOLATION that turns on every violation
+// bit on the map. The dynamic contract at the beginning *should* turn off those violation bits.
+// The body of this function tests to see that it did exactly that. This is to prevent the VSWhidbey B#564831 fiasco
+// from ever recurring.
+void ContractRegressionCheckInner()
+{
+ // DO NOT TURN THIS CONTRACT INTO A STATIC CONTRACT!!! The very purpose of this function
+ // is to ensure that dynamic contracts disable outstanding contract violation bits.
+ // This code only runs once at process startup so it's not going pooch the checked build perf.
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ LOADS_TYPE(CLASS_LOAD_BEGIN);
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END
+
+ if (AreAnyViolationBitsOn())
+ {
+ // If we got here, the contract above FAILED to turn off one or more violation bits. This is a
+ // huge diagnostics hole and must be fixed immediately.
+ _ASSERTE(!("WARNING: mscorwks has detected an internal error that may indicate contracts are"
+ " being silently disabled across the runtime. Do not ignore this assert!"));
+ }
+}
+
+// This function executes once per process to ensure our CONTRACT_VIOLATION() mechanism
+// is properly scope-limited by nested contracts.
+void ContractRegressionCheck()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ {
+ // DO NOT "FIX" THIS CONTRACT_VIOLATION!!!
+ // The existence of this CONTRACT_VIOLATION is not a bug. This is debug-only code specifically written
+ // to test the CONTRACT_VIOLATION mechanism itself. This is needed to prevent a regression of
+ // B#564831 (which left a huge swath of contracts silently disabled for over six months)
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation
+ | GCViolation
+ | FaultViolation
+ | LoadsTypeViolation
+ | TakesLockViolation
+ , ReasonContractInfrastructure
+ );
+ {
+ FAULT_NOT_FATAL();
+ ContractRegressionCheckInner();
+ }
+ }
+
+ if (AreAnyViolationBitsOn())
+ {
+ // If we got here, the CONTRACT_VIOLATION() holder left one or more violation bits turned ON
+ // after we left its scope. This is a huge diagnostic hole and must be fixed immediately.
+ _ASSERTE(!("WARNING: mscorwks has detected an internal error that may indicate contracts are"
+ " being silently disabled across the runtime. Do not ignore this assert!"));
+ }
+
+}
+
+#endif // ENABLE_CONTRACTS_IMPL
+
+#ifndef FEATURE_CORECLR
+//-------------------------------------------------------------------------
+// CorCommandLine state and methods
+//-------------------------------------------------------------------------
+// Class to encapsulate Cor Command line processing
+
+// Statics for the CorCommandLine class
+DWORD CorCommandLine::m_NumArgs = 0;
+LPWSTR *CorCommandLine::m_ArgvW = 0;
+
+LPWSTR CorCommandLine::m_pwszAppFullName = NULL;
+DWORD CorCommandLine::m_dwManifestPaths = 0;
+LPWSTR *CorCommandLine::m_ppwszManifestPaths = NULL;
+DWORD CorCommandLine::m_dwActivationData = 0;
+LPWSTR *CorCommandLine::m_ppwszActivationData = NULL;
+
+#ifdef _DEBUG
+LPCWSTR g_CommandLine;
+#endif
+
+// Set argvw from command line
+/* static */
+HRESULT CorCommandLine::SetArgvW(LPCWSTR lpCommandLine)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+
+ PRECONDITION(CheckPointer(lpCommandLine));
+ }
+ CONTRACTL_END
+
+ HRESULT hr = S_OK;
+ if(!m_ArgvW) {
+ INDEBUG(g_CommandLine = lpCommandLine);
+
+ InitializeLogging(); // This is so early, we may not be initialized
+ LOG((LF_ALWAYS, LL_INFO10, "Executing program with command line '%S'\n", lpCommandLine));
+
+ m_ArgvW = SegmentCommandLine(lpCommandLine, &m_NumArgs);
+
+ if (!m_ArgvW)
+ return E_OUTOFMEMORY;
+
+ // Click once specific parsing
+ hr = ReadClickOnceEnvVariables();
+ }
+
+ return hr;
+}
+
+// Retrieve the command line
+/* static */
+LPWSTR* CorCommandLine::GetArgvW(DWORD *pNumArgs)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (pNumArgs != 0)
+ *pNumArgs = m_NumArgs;
+
+ return m_ArgvW;
+}
+
+HRESULT CorCommandLine::ReadClickOnceEnvVariables()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+
+ EX_TRY
+ {
+ // Find out if this is a ClickOnce application being activated.
+ DWORD cAppFullName = WszGetEnvironmentVariable(g_pwzClickOnceEnv_FullName, NULL, 0);
+ if (cAppFullName > 0) {
+ // get the application full name.
+ m_pwszAppFullName = new WCHAR[cAppFullName];
+ WszGetEnvironmentVariable(g_pwzClickOnceEnv_FullName, m_pwszAppFullName, cAppFullName);
+ // reset the variable now that we read it so child processes
+ // do not think they are a clickonce app.
+ WszSetEnvironmentVariable(g_pwzClickOnceEnv_FullName, NULL);
+
+ // see if we have application manifest files.
+ DWORD dwManifestPaths = 0;
+ while (1) {
+ StackSString manifestFile(g_pwzClickOnceEnv_Manifest);
+ StackSString buf;
+ COUNT_T size = buf.GetUnicodeAllocation();
+ _itow_s(dwManifestPaths, buf.OpenUnicodeBuffer(size), size, 10);
+ buf.CloseBuffer();
+ manifestFile.Append(buf);
+ if (WszGetEnvironmentVariable(manifestFile.GetUnicode(), NULL, 0) > 0)
+ dwManifestPaths++;
+ else
+ break;
+ }
+ m_ppwszManifestPaths = new LPWSTR[dwManifestPaths];
+ for (DWORD i=0; i<dwManifestPaths; i++) {
+ StackSString manifestFile(g_pwzClickOnceEnv_Manifest);
+ StackSString buf;
+ COUNT_T size = buf.GetUnicodeAllocation();
+ _itow_s(i, buf.OpenUnicodeBuffer(size), size, 10);
+ buf.CloseBuffer();
+ manifestFile.Append(buf);
+ DWORD cManifestPath = WszGetEnvironmentVariable(manifestFile.GetUnicode(), NULL, 0);
+ if (cManifestPath > 0) {
+ m_ppwszManifestPaths[i] = new WCHAR[cManifestPath];
+ WszGetEnvironmentVariable(manifestFile.GetUnicode(), m_ppwszManifestPaths[i], cManifestPath);
+ WszSetEnvironmentVariable(manifestFile.GetUnicode(), NULL); // reset the env. variable.
+ }
+ }
+ m_dwManifestPaths = dwManifestPaths;
+
+ // see if we have activation data arguments.
+ DWORD dwActivationData = 0;
+ while (1) {
+ StackSString activationData(g_pwzClickOnceEnv_Parameter);
+ StackSString buf;
+ COUNT_T size = buf.GetUnicodeAllocation();
+ _itow_s(dwActivationData, buf.OpenUnicodeBuffer(size), size, 10);
+ buf.CloseBuffer();
+ activationData.Append(buf);
+ if (WszGetEnvironmentVariable(activationData.GetUnicode(), NULL, 0) > 0)
+ dwActivationData++;
+ else
+ break;
+ }
+ m_ppwszActivationData = new LPWSTR[dwActivationData];
+ for (DWORD i=0; i<dwActivationData; i++) {
+ StackSString activationData(g_pwzClickOnceEnv_Parameter);
+ StackSString buf;
+ COUNT_T size = buf.GetUnicodeAllocation();
+ _itow_s(i, buf.OpenUnicodeBuffer(size), size, 10);
+ buf.CloseBuffer();
+ activationData.Append(buf);
+ DWORD cActivationData = WszGetEnvironmentVariable(activationData.GetUnicode(), NULL, 0);
+ if (cActivationData > 0) {
+ m_ppwszActivationData[i] = new WCHAR[cActivationData];
+ WszGetEnvironmentVariable(activationData.GetUnicode(), m_ppwszActivationData[i], cActivationData);
+ WszSetEnvironmentVariable(activationData.GetUnicode(), NULL); // reset the env. variable.
+ }
+ }
+ m_dwActivationData = dwActivationData;
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ END_SO_INTOLERANT_CODE;
+
+ return hr;
+}
+
+#endif // !FEATURE_CORECLR
+
+#endif // CROSSGEN_COMPILE
+
+
+//
+// GetOSVersion - Gets the real OS version bypassing the OS compatibility shim
+// Mscoree.dll resides in System32 dir and is always excluded from compat shim.
+// This function calls mscoree!shim function via mscoreei ICLRRuntimeHostInternal interface
+// to get the OS version. We do not do this PAL or coreclr..we direclty call the OS
+// in that case.
+//
+BOOL GetOSVersion(LPOSVERSIONINFO lposVer)
+{
+#if !defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE)
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+
+ //declared static to cache the version info
+ static OSVERSIONINFOEX osvi = {0};
+ BOOL ret = TRUE;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), return FALSE);
+
+ //If not yet cached get the OS version info
+ if(osvi.dwMajorVersion == 0)
+ {
+ osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
+
+ ReleaseHolder<ICLRRuntimeHostInternal> pRuntimeHostInternal;
+ //Get the interface
+ HRESULT hr = g_pCLRRuntime->GetInterface(CLSID_CLRRuntimeHostInternal,
+ IID_ICLRRuntimeHostInternal,
+ &pRuntimeHostInternal);
+
+ _ASSERT(SUCCEEDED(hr));
+
+ //Call mscoree!GetVersionExWrapper() through mscoreei interface method
+ hr = pRuntimeHostInternal->GetTrueOSVersion((LPOSVERSIONINFO)&osvi);
+ if(!SUCCEEDED(hr))
+ {
+ osvi.dwMajorVersion = 0;
+ ret = FALSE;
+ goto FUNCEND;
+ }
+ }
+
+ if(lposVer->dwOSVersionInfoSize==sizeof(OSVERSIONINFOEX)||lposVer->dwOSVersionInfoSize==sizeof(OSVERSIONINFO))
+ {
+ //Copy the cached version info to the return memory location
+ memcpy(lposVer,&osvi, lposVer->dwOSVersionInfoSize);
+ }
+ else
+ {
+ //return failure if dwOSVersionInfoSize not set properly
+ ret = FALSE;
+ }
+
+FUNCEND:
+ END_SO_INTOLERANT_CODE;
+
+ return ret;
+#else
+// Fix for warnings when building against WinBlue build 9444.0.130614-1739
+// warning C4996: 'GetVersionExW': was declared deprecated
+// externalapis\windows\winblue\sdk\inc\sysinfoapi.h(442)
+// Deprecated. Use VerifyVersionInfo* or IsWindows* macros from VersionHelpers.
+#pragma warning( disable : 4996 )
+ return WszGetVersionEx(lposVer);
+#pragma warning( default : 4996 )
+#endif
+}
diff --git a/src/vm/ceemain.h b/src/vm/ceemain.h
new file mode 100644
index 0000000000..e4f70057b9
--- /dev/null
+++ b/src/vm/ceemain.h
@@ -0,0 +1,252 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: CEEMAIN.H
+//
+
+//
+//
+
+// CEEMAIN.H defines the entrypoints into the Virtual Execution Engine and
+// gets the load/run process going.
+// ===========================================================================
+
+#ifndef CEEMain_H
+#define CEEMain_H
+
+#include <windef.h> // for HFILE, HANDLE, HMODULE
+
+class EEDbgInterfaceImpl;
+
+// Ensure the EE is started up.
+HRESULT EnsureEEStarted(COINITIEE flags);
+
+// Wrapper around EnsureEEStarted which also sets startup mode.
+HRESULT InitializeEE(COINITIEE flags);
+
+// Has the EE been started up?
+BOOL IsRuntimeStarted(DWORD *pdwStartupFlags);
+
+// Enum to control what happens at the end of EE shutdown. There are two options:
+// 1. Call ::ExitProcess to cause the process to terminate gracefully. This is how
+// shutdown normally ends. "Shutdown" methods that take this action as an argument
+// do not return when SCA_ExitProcessWhenShutdownComplete is passed.
+//
+// 2. Return after performing all shutdown processing. This is a special case used
+// by a shutdown initiated via the Shim, and is used to ensure that all runtimes
+// loaded SxS are shutdown gracefully. "Shutdown" methods that take this action
+// as an argument return when SCA_ReturnWhenShutdownComplete is passed.
+enum ShutdownCompleteAction
+{
+ SCA_ExitProcessWhenShutdownComplete,
+ SCA_ReturnWhenShutdownComplete
+};
+
+// Force shutdown of the EE
+void ForceEEShutdown(ShutdownCompleteAction sca = SCA_ExitProcessWhenShutdownComplete);
+void InnerCoEEShutDownCOM();
+
+// We have an internal class that can be used to expose EE functionality to other CLR
+// DLLs, via the deliberately obscure IEE DLL exports from the shim and the EE
+// NOTE: This class must not ever contain any instance variables. The reason for
+// this is that the IEE function (corhost.cpp) relies on the fact that you
+// may initialize the object more than once without ill effects. If you
+// change this class so that this condition is violated, you must rewrite
+// how the g_pCEE and related variables are initialized.
+class CExecutionEngine : public IExecutionEngine, public IEEMemoryManager
+{
+ //***************************************************************************
+ // public API:
+ //***************************************************************************
+public:
+
+ // Notification of a DLL_THREAD_DETACH or a Thread Terminate.
+ static void ThreadDetaching(void **pTlsData);
+
+ // Delete on TLS block
+ static void DeleteTLS(void **pTlsData);
+
+ // Fiber switch notifications
+ static void SwitchIn();
+ static void SwitchOut();
+
+ static void **CheckThreadState(DWORD slot, BOOL force = TRUE);
+ static void **CheckThreadStateNoCreate(DWORD slot
+#ifdef _DEBUG
+ , BOOL fForDestruction = FALSE
+#endif // _DEBUG
+ );
+
+ // Setup FLS simulation block, including ClrDebugState and StressLog.
+ static void SetupTLSForThread(Thread *pThread);
+
+ static DWORD GetTlsIndex () {return TlsIndex;}
+
+ static LPVOID* GetTlsData();
+ static BOOL SetTlsData (void** ppTlsInfo);
+
+ static BOOL HasDetachedTlsInfo();
+
+ static void CleanupDetachedTlsInfo();
+
+ static void DetachTlsInfo(void **pTlsData);
+
+ //***************************************************************************
+ // private implementation:
+ //***************************************************************************
+private:
+
+ // The debugger needs access to the TlsIndex so that we can read it from OOP.
+ friend class EEDbgInterfaceImpl;
+
+ SVAL_DECL (DWORD, TlsIndex);
+
+ static PTLS_CALLBACK_FUNCTION Callbacks[MAX_PREDEFINED_TLS_SLOT];
+
+ //***************************************************************************
+ // IUnknown methods
+ //***************************************************************************
+
+ HRESULT STDMETHODCALLTYPE QueryInterface(
+ REFIID id,
+ void **pInterface);
+
+ ULONG STDMETHODCALLTYPE AddRef();
+
+ ULONG STDMETHODCALLTYPE Release();
+
+ //***************************************************************************
+ // IExecutionEngine methods for TLS
+ //***************************************************************************
+
+ // Associate a callback for cleanup with a TLS slot
+ VOID STDMETHODCALLTYPE TLS_AssociateCallback(
+ DWORD slot,
+ PTLS_CALLBACK_FUNCTION callback);
+
+ // Get the TLS block for fast Get/Set operations
+ LPVOID* STDMETHODCALLTYPE TLS_GetDataBlock();
+
+ // Get the value at a slot
+ LPVOID STDMETHODCALLTYPE TLS_GetValue(DWORD slot);
+
+ // Get the value at a slot, return FALSE if TLS info block doesn't exist
+ BOOL STDMETHODCALLTYPE TLS_CheckValue(DWORD slot, LPVOID * pValue);
+
+ // Set the value at a slot
+ VOID STDMETHODCALLTYPE TLS_SetValue(DWORD slot, LPVOID pData);
+
+ // Free TLS memory block and make callback
+ VOID STDMETHODCALLTYPE TLS_ThreadDetaching();
+
+ //***************************************************************************
+ // IExecutionEngine methods for locking
+ //***************************************************************************
+
+ CRITSEC_COOKIE STDMETHODCALLTYPE CreateLock(LPCSTR szTag, LPCSTR level, CrstFlags flags);
+
+ void STDMETHODCALLTYPE DestroyLock(CRITSEC_COOKIE lock);
+
+ void STDMETHODCALLTYPE AcquireLock(CRITSEC_COOKIE lock);
+
+ void STDMETHODCALLTYPE ReleaseLock(CRITSEC_COOKIE lock);
+
+ EVENT_COOKIE STDMETHODCALLTYPE CreateAutoEvent(BOOL bInitialState);
+ EVENT_COOKIE STDMETHODCALLTYPE CreateManualEvent(BOOL bInitialState);
+ void STDMETHODCALLTYPE CloseEvent(EVENT_COOKIE event);
+ BOOL STDMETHODCALLTYPE ClrSetEvent(EVENT_COOKIE event);
+ BOOL STDMETHODCALLTYPE ClrResetEvent(EVENT_COOKIE event);
+ DWORD STDMETHODCALLTYPE WaitForEvent(EVENT_COOKIE event, DWORD dwMilliseconds, BOOL bAlertable);
+ DWORD STDMETHODCALLTYPE WaitForSingleObject(HANDLE handle, DWORD dwMilliseconds);
+
+ SEMAPHORE_COOKIE STDMETHODCALLTYPE ClrCreateSemaphore(DWORD dwInitial, DWORD dwMax);
+ void STDMETHODCALLTYPE ClrCloseSemaphore(SEMAPHORE_COOKIE semaphore);
+ DWORD STDMETHODCALLTYPE ClrWaitForSemaphore(SEMAPHORE_COOKIE semaphore, DWORD dwMilliseconds, BOOL bAlertable);
+ BOOL STDMETHODCALLTYPE ClrReleaseSemaphore(SEMAPHORE_COOKIE semaphore, LONG lReleaseCount, LONG *lpPreviousCount);
+
+ MUTEX_COOKIE STDMETHODCALLTYPE ClrCreateMutex(LPSECURITY_ATTRIBUTES lpMutexAttributes,
+ BOOL bInitialOwner,
+ LPCTSTR lpName);
+ void STDMETHODCALLTYPE ClrCloseMutex(MUTEX_COOKIE mutex);
+ BOOL STDMETHODCALLTYPE ClrReleaseMutex(MUTEX_COOKIE mutex);
+ DWORD STDMETHODCALLTYPE ClrWaitForMutex(MUTEX_COOKIE mutex,
+ DWORD dwMilliseconds,
+ BOOL bAlertable);
+
+ DWORD STDMETHODCALLTYPE ClrSleepEx(DWORD dwMilliseconds, BOOL bAlertable);
+
+ BOOL STDMETHODCALLTYPE ClrAllocationDisallowed();
+
+ void STDMETHODCALLTYPE GetLastThrownObjectExceptionFromThread(void **ppvException);
+
+ //***************************************************************************
+ // IEEMemoryManager methods for locking
+ //***************************************************************************
+ LPVOID STDMETHODCALLTYPE ClrVirtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect);
+ BOOL STDMETHODCALLTYPE ClrVirtualFree(LPVOID lpAddress, SIZE_T dwSize, DWORD dwFreeType);
+ SIZE_T STDMETHODCALLTYPE ClrVirtualQuery(LPCVOID lpAddress, PMEMORY_BASIC_INFORMATION lpBuffer, SIZE_T dwLength);
+ BOOL STDMETHODCALLTYPE ClrVirtualProtect(LPVOID lpAddress, SIZE_T dwSize, DWORD flNewProtect, PDWORD lpflOldProtect);
+ HANDLE STDMETHODCALLTYPE ClrGetProcessHeap();
+ HANDLE STDMETHODCALLTYPE ClrHeapCreate(DWORD flOptions, SIZE_T dwInitialSize, SIZE_T dwMaximumSize);
+ BOOL STDMETHODCALLTYPE ClrHeapDestroy(HANDLE hHeap);
+ LPVOID STDMETHODCALLTYPE ClrHeapAlloc(HANDLE hHeap, DWORD dwFlags, SIZE_T dwBytes);
+ BOOL STDMETHODCALLTYPE ClrHeapFree(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem);
+ BOOL STDMETHODCALLTYPE ClrHeapValidate(HANDLE hHeap, DWORD dwFlags, LPCVOID lpMem);
+ HANDLE STDMETHODCALLTYPE ClrGetProcessExecutableHeap();
+
+};
+
+#ifdef _DEBUG
+extern void DisableGlobalAllocStore ();
+#endif //_DEBUG
+
+void SetLatchedExitCode (INT32 code);
+INT32 GetLatchedExitCode (void);
+
+// Tells whether the garbage collector is fully initialized
+// Stronger than IsGCHeapInitialized
+BOOL IsGarbageCollectorFullyInitialized();
+
+#ifndef FEATURE_CORECLR
+//---------------------------------------------------------------------------------------
+//
+// Class to encapsulate Cor Command line processing
+//
+class CorCommandLine
+{
+public:
+
+//********** TYPES
+
+ // Note: We don't bother with interlocked operations as we manipulate these bits,
+ // because we don't anticipate free-threaded access. (Most of this is used only
+ // during startup / shutdown).
+
+//********** DATA
+
+ // Hold the current (possibly parsed) command line here
+ static DWORD m_NumArgs;
+ static LPWSTR *m_ArgvW;
+
+ static LPWSTR m_pwszAppFullName;
+ static DWORD m_dwManifestPaths;
+ static LPWSTR *m_ppwszManifestPaths;
+ static DWORD m_dwActivationData;
+ static LPWSTR *m_ppwszActivationData;
+
+//********** METHODS
+
+ // parse the command line
+ static HRESULT SetArgvW(LPCWSTR lpCommandLine);
+
+ // Retrieve the parsed command line
+ static LPWSTR *GetArgvW(DWORD *pNumArgs);
+
+private:
+ static HRESULT ReadClickOnceEnvVariables();
+};
+#endif // !FEATURE_CORECLR
+
+#endif
diff --git a/src/vm/certificatecache.cpp b/src/vm/certificatecache.cpp
new file mode 100644
index 0000000000..609c4cdb58
--- /dev/null
+++ b/src/vm/certificatecache.cpp
@@ -0,0 +1,86 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#include "common.h"
+
+#ifdef FEATURE_CAS_POLICY
+#include "certificatecache.h"
+
+CertificateCache::CertificateCache () {
+ WRAPPER_NO_CONTRACT;
+ m_dwNumEntries = 0;
+ for (DWORD i=0; i < MAX_CACHED_CERTIFICATES; i++) {
+ m_Entry[i] = NULL;
+ }
+ m_CertificateCacheCrst.Init(CrstPublisherCertificate);
+}
+
+CertificateCache::~CertificateCache () {
+ // Let the OS collect the memory allocated for the cached certificates.
+}
+
+COR_TRUST* CertificateCache::GetEntry (DWORD index) {
+ LIMITED_METHOD_CONTRACT;
+ if (index < 0 || index >= MAX_CACHED_CERTIFICATES)
+ return NULL;
+ return m_Entry[index];
+}
+
+EnumCertificateAdditionFlags CertificateCache::AddEntry (COR_TRUST* pCertificate, DWORD* pIndex) {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(pIndex != NULL);
+ } CONTRACTL_END;
+
+ *pIndex = FindEntry(pCertificate);
+ if (*pIndex < MAX_CACHED_CERTIFICATES)
+ return AlreadyExists; // the certificate is already cached.
+ if (m_dwNumEntries >= MAX_CACHED_CERTIFICATES)
+ return CacheSaturated; // the cache is full
+
+ CrstHolder csh(&m_CertificateCacheCrst);
+ if (m_dwNumEntries >= MAX_CACHED_CERTIFICATES)
+ return CacheSaturated;
+
+ // check again now that we have the lock.
+ *pIndex = FindEntry(pCertificate);
+ if (*pIndex < MAX_CACHED_CERTIFICATES)
+ return AlreadyExists;
+
+ *pIndex = m_dwNumEntries;
+ m_Entry[m_dwNumEntries++] = pCertificate;
+ return Success;
+}
+
+BOOL CertificateCache::Contains (COR_TRUST* pCertificate) {
+ WRAPPER_NO_CONTRACT;
+ DWORD index = FindEntry(pCertificate);
+ return (index < MAX_CACHED_CERTIFICATES && index >= 0);
+}
+
+DWORD CertificateCache::FindEntry (COR_TRUST* pCertificate) {
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }CONTRACTL_END;
+
+ for (DWORD i=0; i < MAX_CACHED_CERTIFICATES; i++) {
+ if (m_Entry[i] != NULL) {
+ if ((pCertificate->cbSigner == m_Entry[i]->cbSigner) &&
+ (memcmp(pCertificate->pbSigner, m_Entry[i]->pbSigner, m_Entry[i]->cbSigner) == 0))
+ return i;
+ }
+ }
+ return 0xFFFFFFFF;
+}
+#endif // FEATURE_CAS_POLICY
diff --git a/src/vm/certificatecache.h b/src/vm/certificatecache.h
new file mode 100644
index 0000000000..f2182abddc
--- /dev/null
+++ b/src/vm/certificatecache.h
@@ -0,0 +1,41 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#ifndef _CERTIFICATECACHE_H_
+#define _CERTIFICATECACHE_H_
+
+#include "corpermp.h"
+#include "crst.h"
+
+#define MAX_CACHED_CERTIFICATES 10
+
+enum EnumCertificateAdditionFlags {
+ Success = 0,
+ CacheSaturated = 1,
+ AlreadyExists = 2
+};
+
+class CertificateCache {
+public:
+ EnumCertificateAdditionFlags AddEntry (COR_TRUST* pCertificate, DWORD* pIndex);
+ COR_TRUST* GetEntry (DWORD index);
+ BOOL Contains (COR_TRUST* pCertificate);
+
+ CertificateCache ();
+ ~CertificateCache ();
+
+private:
+ DWORD m_dwNumEntries;
+ COR_TRUST* m_Entry [MAX_CACHED_CERTIFICATES];
+ CrstStatic m_CertificateCacheCrst;
+
+ DWORD FindEntry (COR_TRUST* pCertificate);
+};
+
+#endif //_CERTIFICATECACHE_H_
diff --git a/src/vm/cgensys.h b/src/vm/cgensys.h
new file mode 100644
index 0000000000..d4dc554429
--- /dev/null
+++ b/src/vm/cgensys.h
@@ -0,0 +1,181 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// CGENSYS.H -
+//
+// Generic header for choosing system-dependent helpers
+//
+
+
+
+#ifndef __cgensys_h__
+#define __cgensys_h__
+
+class MethodDesc;
+class Stub;
+class Thread;
+class CrawlFrame;
+struct EE_ILEXCEPTION_CLAUSE;
+struct TransitionBlock;
+struct VASigCookie;
+struct CORCOMPILE_EXTERNAL_METHOD_THUNK;
+class ComPlusCallMethodDesc;
+
+#include <cgencpu.h>
+
+
+#ifdef EnC_SUPPORTED
+void ResumeAtJit(PT_CONTEXT pContext, LPVOID oldFP);
+#endif
+
+#if defined(_TARGET_X86_)
+void ResumeAtJitEH (CrawlFrame* pCf, BYTE* startPC, EE_ILEXCEPTION_CLAUSE *EHClausePtr, DWORD nestingLevel, Thread *pThread, BOOL unwindStack);
+int CallJitEHFilter (CrawlFrame* pCf, BYTE* startPC, EE_ILEXCEPTION_CLAUSE *EHClausePtr, DWORD nestingLevel, OBJECTREF thrownObj);
+void CallJitEHFinally(CrawlFrame* pCf, BYTE* startPC, EE_ILEXCEPTION_CLAUSE *EHClausePtr, DWORD nestingLevel);
+#endif // _TARGET_X86_
+
+
+// get number of logical to physical processors. Returns 1 on failure or non-intel x86 processors.
+DWORD GetLogicalCpuCount();
+
+//These are in util.cpp
+extern size_t GetLogicalProcessorCacheSizeFromOS();
+extern size_t GetIntelDeterministicCacheEnum();
+extern size_t GetIntelDescriptorValuesCache();
+extern DWORD GetLogicalCpuCountFromOS();
+extern DWORD GetLogicalCpuCountFallback();
+
+
+// Try to determine the largest last-level cache size of the machine - return 0 if unknown or no L2/L3 cache
+size_t GetLargestOnDieCacheSize(BOOL bTrueSize = TRUE);
+
+
+#ifdef FEATURE_COMINTEROP
+extern "C" UINT32 STDCALL CLRToCOMWorker(TransitionBlock * pTransitionBlock, ComPlusCallMethodDesc * pMD);
+extern "C" void GenericComPlusCallStub(void);
+
+extern "C" void GenericComCallStub(void);
+#endif // FEATURE_COMINTEROP
+
+// Non-CPU-specific helper functions called by the CPU-dependent code
+extern "C" PCODE STDCALL PreStubWorker(TransitionBlock * pTransitionBlock, MethodDesc * pMD);
+
+extern "C" void STDCALL VarargPInvokeStubWorker(TransitionBlock * pTransitionBlock, VASigCookie * pVASigCookie, MethodDesc * pMD);
+extern "C" void STDCALL VarargPInvokeStub(void);
+extern "C" void STDCALL VarargPInvokeStub_RetBuffArg(void);
+
+extern "C" void STDCALL GenericPInvokeCalliStubWorker(TransitionBlock * pTransitionBlock, VASigCookie * pVASigCookie, PCODE pUnmanagedTarget);
+extern "C" void STDCALL GenericPInvokeCalliHelper(void);
+
+extern "C" PCODE STDCALL ExternalMethodFixupWorker(TransitionBlock * pTransitionBlock, TADDR pIndirection, DWORD sectionIndex, Module * pModule);
+extern "C" void STDCALL ExternalMethodFixupStub(void);
+extern "C" void STDCALL ExternalMethodFixupPatchLabel(void);
+
+extern "C" void STDCALL VirtualMethodFixupStub(void);
+extern "C" void STDCALL VirtualMethodFixupPatchLabel(void);
+
+extern "C" void STDCALL TransparentProxyStub(void);
+extern "C" void STDCALL TransparentProxyStub_CrossContext();
+extern "C" void STDCALL TransparentProxyStubPatchLabel(void);
+
+#ifdef FEATURE_READYTORUN
+extern "C" void STDCALL DelayLoad_MethodCall();
+
+extern "C" void STDCALL DelayLoad_Helper();
+extern "C" void STDCALL DelayLoad_Helper_Obj();
+extern "C" void STDCALL DelayLoad_Helper_ObjObj();
+#endif
+
+// Returns information about the CPU processor.
+// Note that this information may be the least-common-denominator in the
+// case of a multi-proc machine.
+
+#ifdef _TARGET_X86_
+void GetSpecificCpuInfo(CORINFO_CPU * cpuInfo);
+#else
+inline void GetSpecificCpuInfo(CORINFO_CPU * cpuInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+ cpuInfo->dwCPUType = 0;
+ cpuInfo->dwFeatures = 0;
+ cpuInfo->dwExtendedFeatures = 0;
+}
+
+#endif // !_TARGET_X86_
+
+#if defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE)
+extern "C" DWORD __stdcall getcpuid(DWORD arg, unsigned char result[16]);
+#endif // defined(_TARGET_AMD64_)
+
+inline bool TargetHasAVXSupport()
+{
+#if defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE)
+ unsigned char buffer[16];
+ // All AMD64 targets support cpuid.
+ (void) getcpuid(1, buffer);
+ // getcpuid executes cpuid with eax set to its first argument, and ecx cleared.
+ // It returns the resulting eax, ebx, ecx and edx (in that order) in buffer[].
+ // The AVX feature is ECX bit 28.
+ return ((buffer[11] & 0x10) != 0);
+#endif // defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE)
+ return false;
+}
+
+#ifdef FEATURE_PREJIT
+// Can code compiled for "minReqdCpuType" be used on "actualCpuType"
+inline BOOL IsCompatibleCpuInfo(const CORINFO_CPU * actualCpuInfo,
+ const CORINFO_CPU * minReqdCpuInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+ return ((minReqdCpuInfo->dwFeatures & actualCpuInfo->dwFeatures) ==
+ minReqdCpuInfo->dwFeatures);
+}
+#endif // FEATURE_PREJIT
+
+
+#ifndef DACCESS_COMPILE
+// Given an address in a slot, figure out if the prestub will be called
+BOOL DoesSlotCallPrestub(PCODE pCode);
+#endif
+
+#ifdef DACCESS_COMPILE
+
+// Used by dac/strike to make sense of non-jit/non-jit-helper call targets
+// generated by the runtime.
+BOOL GetAnyThunkTarget (T_CONTEXT *pctx, TADDR *pTarget, TADDR *pTargetMethodDesc);
+
+#endif // DACCESS_COMPILE
+
+
+
+//
+// ResetProcessorStateHolder saves/restores processor state around calls to
+// mscorlib during exception handling.
+//
+class ResetProcessorStateHolder
+{
+#if defined(_TARGET_AMD64_)
+ ULONG m_mxcsr;
+#endif
+
+public:
+
+ ResetProcessorStateHolder ()
+ {
+#if defined(_TARGET_AMD64_)
+ m_mxcsr = _mm_getcsr();
+ _mm_setcsr(0x1f80);
+#endif // _TARGET_AMD64_
+ }
+
+ ~ResetProcessorStateHolder ()
+ {
+#if defined(_TARGET_AMD64_)
+ _mm_setcsr(m_mxcsr);
+#endif // _TARGET_AMD64_
+ }
+};
+
+
+#endif // !__cgensys_h__
diff --git a/src/vm/class.cpp b/src/vm/class.cpp
new file mode 100644
index 0000000000..108016537e
--- /dev/null
+++ b/src/vm/class.cpp
@@ -0,0 +1,4746 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: CLASS.CPP
+//
+
+
+//
+
+//
+// ============================================================================
+
+#include "common.h"
+
+#include "dllimport.h"
+#include "dllimportcallback.h"
+#include "fieldmarshaler.h"
+#include "constrainedexecutionregion.h"
+#include "customattribute.h"
+#include "encee.h"
+
+#ifdef FEATURE_COMINTEROP
+#include "comcallablewrapper.h"
+#include "clrtocomcall.h"
+#include "runtimecallablewrapper.h"
+#endif // FEATURE_COMINTEROP
+
+#ifdef MDIL
+#include "security.h"
+#endif
+
+//#define DEBUG_LAYOUT
+#define SORT_BY_RID
+
+#ifndef DACCESS_COMPILE
+#include "methodtablebuilder.h"
+#endif
+#include "nsenumhandleallcases.h"
+
+#ifndef DACCESS_COMPILE
+
+
+//*******************************************************************************
+EEClass::EEClass(DWORD cbFixedEEClassFields)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Cache size of fixed fields (this instance also contains a set of packed fields whose final size isn't
+ // determined until the end of class loading). We store the size into a spare byte made available by
+ // compiler field alignment, so we need to ensure we never allocate a flavor of EEClass more than 255
+ // bytes long.
+ _ASSERTE(cbFixedEEClassFields <= 0xff);
+ m_cbFixedEEClassFields = (BYTE)cbFixedEEClassFields;
+
+ // All other members are initialized to zero
+}
+
+//*******************************************************************************
+void *EEClass::operator new(
+ size_t size,
+ LoaderHeap *pHeap,
+ AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ // EEClass (or sub-type) is always followed immediately by an EEClassPackedFields structure. This is
+ // maximally sized at runtime but in the ngen scenario will be optimized into a smaller structure (which
+ // is why it must go after all the fixed sized fields).
+ S_SIZE_T safeSize = S_SIZE_T(size) + S_SIZE_T(sizeof(EEClassPackedFields));
+
+ void *p = pamTracker->Track(pHeap->AllocMem(safeSize));
+
+ // No need to memset since this memory came from VirtualAlloc'ed memory
+ // memset (p, 0, size);
+
+ return p;
+}
+
+//*******************************************************************************
+void EEClass::Destruct(MethodTable * pOwningMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ FORBID_FAULT;
+ PRECONDITION(pOwningMT != NULL);
+ }
+ CONTRACTL_END
+
+#ifndef CROSSGEN_COMPILE
+
+ // Not expected to be called for array EEClass
+ _ASSERTE(!pOwningMT->IsArray());
+
+#ifdef _DEBUG
+ _ASSERTE(!IsDestroyed());
+ SetDestroyed();
+#endif
+
+#ifdef PROFILING_SUPPORTED
+ // If profiling, then notify the class is getting unloaded.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackClasses());
+ {
+ // Calls to the profiler callback may throw, or otherwise fail, if
+ // the profiler AVs/throws an unhandled exception/etc. We don't want
+ // those failures to affect the runtime, so we'll ignore them.
+ //
+ // Note that the profiler callback may turn around and make calls into
+ // the profiling runtime that may throw. This try/catch block doesn't
+ // protect the profiler against such failures. To protect the profiler
+ // against that, we will need try/catch blocks around all calls into the
+ // profiling API.
+ //
+ // (Bug #26467)
+ //
+
+ FAULT_NOT_FATAL();
+
+ EX_TRY
+ {
+ GCX_PREEMP();
+
+ g_profControlBlock.pProfInterface->ClassUnloadStarted((ClassID) pOwningMT);
+ }
+ EX_CATCH
+ {
+ // The exception here came from the profiler itself. We'll just
+ // swallow the exception, since we don't want the profiler to bring
+ // down the runtime.
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+ }
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+#ifdef FEATURE_COMINTEROP
+ // clean up any COM Data
+ if (m_pccwTemplate)
+ {
+ m_pccwTemplate->Release();
+ m_pccwTemplate = NULL;
+ }
+
+
+#ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+ if (GetComClassFactory())
+ {
+ GetComClassFactory()->Cleanup();
+ }
+#endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+#endif // FEATURE_COMINTEROP
+
+
+ if (IsDelegate())
+ {
+ DelegateEEClass* pDelegateEEClass = (DelegateEEClass*)this;
+
+ if (pDelegateEEClass->m_pStaticCallStub)
+ {
+ BOOL fStubDeleted = pDelegateEEClass->m_pStaticCallStub->DecRef();
+ if (fStubDeleted)
+ {
+ DelegateInvokeStubManager::g_pManager->RemoveStub(pDelegateEEClass->m_pStaticCallStub);
+ }
+ }
+ if (pDelegateEEClass->m_pInstRetBuffCallStub)
+ {
+ pDelegateEEClass->m_pInstRetBuffCallStub->DecRef();
+ }
+ // While m_pMultiCastInvokeStub is also a member,
+ // it is owned by the m_pMulticastStubCache, not by the class
+ // - it is shared across classes. So we don't decrement
+ // its ref count here
+ delete pDelegateEEClass->m_pUMThunkMarshInfo;
+ }
+
+ // We should never get here for thunking proxy because we do not destroy
+ // default appdomain and mscorlib.dll module during shutdown
+ _ASSERTE(!pOwningMT->IsTransparentProxy());
+
+#if defined(FEATURE_REMOTING) && !defined(HAS_REMOTING_PRECODE)
+ // Destruct the method descs by walking the chunks.
+ MethodTable::IntroducedMethodIterator it(pOwningMT);
+ for (; it.IsValid(); it.Next())
+ {
+ MethodDesc * pMD = it.GetMethodDesc();
+ pMD->Destruct();
+ }
+#endif
+
+#ifdef FEATURE_COMINTEROP
+ if (GetSparseCOMInteropVTableMap() != NULL && !pOwningMT->IsZapped())
+ delete GetSparseCOMInteropVTableMap();
+#endif // FEATURE_COMINTEROP
+
+#ifdef PROFILING_SUPPORTED
+ // If profiling, then notify the class is getting unloaded.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackClasses());
+ {
+ // See comments in the call to ClassUnloadStarted for details on this
+ // FAULT_NOT_FATAL marker and exception swallowing.
+ FAULT_NOT_FATAL();
+ EX_TRY
+ {
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->ClassUnloadFinished((ClassID) pOwningMT, S_OK);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+ }
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+#endif // CROSSGEN_COMPILE
+}
+
+//*******************************************************************************
+/*static*/ EEClass *
+EEClass::CreateMinimalClass(LoaderHeap *pHeap, AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return new (pHeap, pamTracker) EEClass(sizeof(EEClass));
+}
+
+
+//*******************************************************************************
+
+//-----------------------------------------------------------------------------------
+// Note: this only loads the type to CLASS_DEPENDENCIES_LOADED as this can be called
+// indirectly from DoFullyLoad() as part of accessibility checking.
+//-----------------------------------------------------------------------------------
+MethodTable *MethodTable::LoadEnclosingMethodTable(ClassLoadLevel targetLevel)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ mdTypeDef tdEnclosing = GetEnclosingCl();
+
+ if (tdEnclosing == mdTypeDefNil)
+ {
+ return NULL;
+ }
+
+ return ClassLoader::LoadTypeDefThrowing(GetModule(),
+ tdEnclosing,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef,
+ tdNoTypes,
+ targetLevel
+ ).GetMethodTable();
+
+}
+
+#ifdef EnC_SUPPORTED
+
+//*******************************************************************************
+VOID EEClass::FixupFieldDescForEnC(MethodTable * pMT, EnCFieldDesc *pFD, mdFieldDef fieldDef)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_COOPERATIVE;
+ WRAPPER(GC_TRIGGERS);
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ Module * pModule = pMT->GetModule();
+ IMDInternalImport *pImport = pModule->GetMDImport();
+
+#ifdef LOGGING
+ if (LoggingEnabled())
+ {
+ LPCSTR szFieldName;
+ if (FAILED(pImport->GetNameOfFieldDef(fieldDef, &szFieldName)))
+ {
+ szFieldName = "Invalid FieldDef record";
+ }
+ LOG((LF_ENC, LL_INFO100, "EEClass::InitializeFieldDescForEnC %s\n", szFieldName));
+ }
+#endif //LOGGING
+
+
+#ifdef _DEBUG
+ BOOL shouldBreak = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_EncFixupFieldBreak);
+ if (shouldBreak > 0) {
+ _ASSERTE(!"EncFixupFieldBreak");
+ }
+#endif // _DEBUG
+
+ // MethodTableBuilder uses the stacking allocator for most of it's
+ // working memory requirements, so this makes sure to free the memory
+ // once this function is out of scope.
+ CheckPointHolder cph(GetThread()->m_MarshalAlloc.GetCheckpoint());
+
+ MethodTableBuilder::bmtMetaDataInfo bmtMetaData;
+ bmtMetaData.cFields = 1;
+ bmtMetaData.pFields = (mdToken*)_alloca(sizeof(mdToken));
+ bmtMetaData.pFields[0] = fieldDef;
+ bmtMetaData.pFieldAttrs = (DWORD*)_alloca(sizeof(DWORD));
+ IfFailThrow(pImport->GetFieldDefProps(fieldDef, &bmtMetaData.pFieldAttrs[0]));
+
+ MethodTableBuilder::bmtMethAndFieldDescs bmtMFDescs;
+ // We need to alloc the memory, but don't have to fill it in. InitializeFieldDescs
+ // will copy pFD (1st arg) into here.
+ bmtMFDescs.ppFieldDescList = (FieldDesc**)_alloca(sizeof(FieldDesc*));
+
+ MethodTableBuilder::bmtFieldPlacement bmtFP;
+
+ // This simulates the environment that BuildMethodTableThrowing creates
+ // just enough to run InitializeFieldDescs
+ MethodTableBuilder::bmtErrorInfo bmtError;
+ bmtError.pModule = pModule;
+ bmtError.cl = pMT->GetCl();
+ bmtError.dMethodDefInError = mdTokenNil;
+ bmtError.szMethodNameForError = NULL;
+
+ MethodTableBuilder::bmtInternalInfo bmtInternal;
+ bmtInternal.pModule = pModule;
+ bmtInternal.pInternalImport = pImport;
+ bmtInternal.pParentMT = pMT->GetParentMethodTable();
+
+ MethodTableBuilder::bmtProperties bmtProp;
+ bmtProp.fIsValueClass = !!pMT->IsValueType();
+
+ MethodTableBuilder::bmtEnumFieldInfo bmtEnumFields(bmtInternal.pInternalImport);
+
+ if (pFD->IsStatic())
+ {
+ bmtEnumFields.dwNumStaticFields = 1;
+ }
+ else
+ {
+ bmtEnumFields.dwNumInstanceFields = 1;
+ }
+
+ // We shouldn't have to fill this in b/c we're not allowed to EnC value classes, or
+ // anything else with layout info associated with it.
+ LayoutRawFieldInfo *pLayoutRawFieldInfos = (LayoutRawFieldInfo*)_alloca((2) * sizeof(LayoutRawFieldInfo));
+
+ // If not NULL, it means there are some by-value fields, and this contains an entry for each instance or static field,
+ // which is NULL if not a by value field, and points to the EEClass of the field if a by value field. Instance fields
+ // come first, statics come second.
+ MethodTable **pByValueClassCache = NULL;
+
+ EEClass * pClass = pMT->GetClass();
+
+ // InitializeFieldDescs are going to change these numbers to something wrong,
+ // even though we already have the right numbers. Save & restore after.
+ WORD wNumInstanceFields = pMT->GetNumInstanceFields();
+ WORD wNumStaticFields = pMT->GetNumStaticFields();
+ unsigned totalDeclaredFieldSize = 0;
+
+ AllocMemTracker dummyAmTracker;
+
+ BaseDomain * pDomain = pMT->GetDomain();
+ MethodTableBuilder builder(pMT, pClass,
+ &GetThread()->m_MarshalAlloc,
+ &dummyAmTracker);
+
+ MethodTableBuilder::bmtGenericsInfo genericsInfo;
+
+ OBJECTREF pThrowable = NULL;
+ GCPROTECT_BEGIN(pThrowable);
+
+ builder.SetBMTData(pMT->GetLoaderAllocator(),
+ &bmtError,
+ &bmtProp,
+ NULL,
+ NULL,
+ NULL,
+ &bmtMetaData,
+ NULL,
+ &bmtMFDescs,
+ &bmtFP,
+ &bmtInternal,
+ NULL,
+ NULL,
+ &genericsInfo,
+ &bmtEnumFields,
+ NULL);
+
+ EX_TRY
+ {
+ GCX_PREEMP();
+ builder.InitializeFieldDescs(pFD,
+ pLayoutRawFieldInfos,
+ &bmtInternal,
+ &genericsInfo,
+ &bmtMetaData,
+ &bmtEnumFields,
+ &bmtError,
+ &pByValueClassCache,
+ &bmtMFDescs,
+ &bmtFP,
+ NULL, // not needed as thread or context static are not allowed in EnC
+ &totalDeclaredFieldSize);
+ }
+ EX_CATCH_THROWABLE(&pThrowable);
+
+ dummyAmTracker.SuppressRelease();
+
+ // Restore now
+ pClass->SetNumInstanceFields(wNumInstanceFields);
+ pClass->SetNumStaticFields(wNumStaticFields);
+
+ // PERF: For now, we turn off the fast equality check for valuetypes when a
+ // a field is modified by EnC. Consider doing a check and setting the bit only when
+ // necessary.
+ if (pMT->IsValueType())
+ {
+ pClass->SetIsNotTightlyPacked();
+ }
+
+ if (pThrowable != NULL)
+ {
+ COMPlusThrow(pThrowable);
+ }
+
+ GCPROTECT_END();
+
+ pFD->SetMethodTable(pMT);
+
+ // We set this when we first created the FieldDesc, but initializing the FieldDesc
+ // may have overwritten it so we need to set it again.
+ pFD->SetEnCNew();
+
+ return;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// AddField - called when a new field is added by EnC
+//
+// Since instances of this class may already exist on the heap, we can't change the
+// runtime layout of the object to accomodate the new field. Instead we hang the field
+// off the syncblock (for instance fields) or in the FieldDesc for static fields.
+//
+// Here we just create the FieldDesc and link it to the class. The actual storage will
+// be created lazily on demand.
+//
+HRESULT EEClass::AddField(MethodTable * pMT, mdFieldDef fieldDef, EnCFieldDesc **ppNewFD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ Module * pModule = pMT->GetModule();
+ IMDInternalImport *pImport = pModule->GetMDImport();
+
+#ifdef LOGGING
+ if (LoggingEnabled())
+ {
+ LPCSTR szFieldName;
+ if (FAILED(pImport->GetNameOfFieldDef(fieldDef, &szFieldName)))
+ {
+ szFieldName = "Invalid FieldDef record";
+ }
+ LOG((LF_ENC, LL_INFO100, "EEClass::AddField %s\n", szFieldName));
+ }
+#endif //LOGGING
+
+ // We can only add fields to normal classes
+ if (pMT->HasLayout() || pMT->IsValueType())
+ {
+ return CORDBG_E_ENC_CANT_ADD_FIELD_TO_VALUE_OR_LAYOUT_CLASS;
+ }
+
+ // We only add private fields.
+ // This may not be strictly necessary, but helps avoid any semantic confusion with
+ // existing code etc.
+ DWORD dwFieldAttrs;
+ IfFailThrow(pImport->GetFieldDefProps(fieldDef, &dwFieldAttrs));
+
+ LoaderAllocator* pAllocator = pMT->GetLoaderAllocator();
+
+ // Here we allocate a FieldDesc and set just enough info to be able to fix it up later
+ // when we're running in managed code.
+ EnCAddedFieldElement *pAddedField = (EnCAddedFieldElement *)
+ (void*)pAllocator->GetHighFrequencyHeap()->AllocMem_NoThrow(S_SIZE_T(sizeof(EnCAddedFieldElement)));
+ if (!pAddedField)
+ {
+ return E_OUTOFMEMORY;
+ }
+ pAddedField->Init( fieldDef, IsFdStatic(dwFieldAttrs) );
+
+ EnCFieldDesc *pNewFD = &pAddedField->m_fieldDesc;
+
+ // Get the EnCEEClassData for this class
+ // Don't adjust EEClass stats b/c EnC fields shouldn't touch EE data structures.
+ // We'll just update our private EnC structures instead.
+ EnCEEClassData *pEnCClass = ((EditAndContinueModule*)pModule)->GetEnCEEClassData(pMT);
+ if (! pEnCClass)
+ return E_FAIL;
+
+ // Add the field element to the list of added fields for this class
+ pEnCClass->AddField(pAddedField);
+
+ // Store the FieldDesc into the module's field list
+ {
+ CONTRACT_VIOLATION(ThrowsViolation); // B#25680 (Fix Enc violations): Must handle OOM's from Ensure
+ pModule->EnsureFieldDefCanBeStored(fieldDef);
+ }
+ pModule->EnsuredStoreFieldDef(fieldDef, pNewFD);
+ pNewFD->SetMethodTable(pMT);
+
+ // Success, return the new FieldDesc
+ if (ppNewFD)
+ {
+ *ppNewFD = pNewFD;
+ }
+ return S_OK;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// AddMethod - called when a new method is added by EnC
+//
+// The method has already been added to the metadata with token methodDef.
+// Create a new MethodDesc for the method.
+//
+HRESULT EEClass::AddMethod(MethodTable * pMT, mdMethodDef methodDef, RVA newRVA, MethodDesc **ppMethod)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ Module * pModule = pMT->GetModule();
+ IMDInternalImport *pImport = pModule->GetMDImport();
+
+#ifdef LOGGING
+ if (LoggingEnabled())
+ {
+ LPCSTR szMethodName;
+ if (FAILED(pImport->GetNameOfMethodDef(methodDef, &szMethodName)))
+ {
+ szMethodName = "Invalid MethodDef record";
+ }
+ LOG((LF_ENC, LL_INFO100, "EEClass::AddMethod %s\n", szMethodName));
+ }
+#endif //LOGGING
+
+ DWORD dwDescrOffset;
+ DWORD dwImplFlags;
+ HRESULT hr = S_OK;
+
+ if (FAILED(pImport->GetMethodImplProps(methodDef, &dwDescrOffset, &dwImplFlags)))
+ {
+ return COR_E_BADIMAGEFORMAT;
+ }
+
+ DWORD dwMemberAttrs;
+ IfFailThrow(pImport->GetMethodDefProps(methodDef, &dwMemberAttrs));
+
+ // Refuse to add other special cases
+ if (IsReallyMdPinvokeImpl(dwMemberAttrs) ||
+ (pMT->IsInterface() && !IsMdStatic(dwMemberAttrs)) ||
+ IsMiRuntime(dwImplFlags))
+ {
+ _ASSERTE(! "**Error** EEClass::AddMethod only IL private non-virtual methods are supported");
+ LOG((LF_ENC, LL_INFO100, "**Error** EEClass::AddMethod only IL private non-virtual methods are supported\n"));
+ return CORDBG_E_ENC_EDIT_NOT_SUPPORTED;
+ }
+
+#ifdef _DEBUG
+ // Validate that this methodDef correctly has a parent typeDef
+ mdTypeDef parentTypeDef;
+ if (FAILED(hr = pImport->GetParentToken(methodDef, &parentTypeDef)))
+ {
+ _ASSERTE(! "**Error** EEClass::AddMethod parent token not found");
+ LOG((LF_ENC, LL_INFO100, "**Error** EEClass::AddMethod parent token not found\n"));
+ return E_FAIL;
+ }
+#endif // _DEBUG
+
+ EEClass * pClass = pMT->GetClass();
+
+ // @todo: OOM: InitMethodDesc will allocate loaderheap memory but leak it
+ // on failure. This AllocMemTracker should be replaced with a real one.
+ AllocMemTracker dummyAmTracker;
+
+ LoaderAllocator* pAllocator = pMT->GetLoaderAllocator();
+
+ // Create a new MethodDescChunk to hold the new MethodDesc
+ // Create the chunk somewhere we'll know is within range of the VTable
+ MethodDescChunk *pChunk = MethodDescChunk::CreateChunk(pAllocator->GetHighFrequencyHeap(),
+ 1, // methodDescCount
+ mcInstantiated,
+ TRUE /* fNonVtableSlot */,
+ TRUE /* fNativeCodeSlot */,
+ FALSE /* fComPlusCallInfo */,
+ pMT,
+ &dummyAmTracker);
+
+ // Get the new MethodDesc (Note: The method desc memory is zero initialized)
+ MethodDesc *pNewMD = pChunk->GetFirstMethodDesc();
+
+ // Initialize the new MethodDesc
+ MethodTableBuilder builder(pMT,
+ pClass,
+ &GetThread()->m_MarshalAlloc,
+ &dummyAmTracker);
+ EX_TRY
+ {
+ INDEBUG(LPCSTR debug_szFieldName);
+ INDEBUG(if (FAILED(pImport->GetNameOfMethodDef(methodDef, &debug_szFieldName))) { debug_szFieldName = "Invalid MethodDef record"; });
+ builder.InitMethodDesc(pNewMD,
+ mcInstantiated, // Use instantiated methoddesc for EnC added methods to get space for slot
+ methodDef,
+ dwImplFlags,
+ dwMemberAttrs,
+ TRUE, // fEnC
+ newRVA,
+ pImport,
+ NULL
+ COMMA_INDEBUG(debug_szFieldName)
+ COMMA_INDEBUG(pMT->GetDebugClassName())
+ COMMA_INDEBUG(NULL)
+ );
+
+ pNewMD->SetTemporaryEntryPoint(pAllocator, &dummyAmTracker);
+ }
+ EX_CATCH_HRESULT(hr);
+ if (S_OK != hr)
+ return hr;
+
+ dummyAmTracker.SuppressRelease();
+
+ _ASSERTE(pNewMD->IsEnCAddedMethod());
+
+ pNewMD->SetSlot(MethodTable::NO_SLOT); // we can't ever use the slot for EnC methods
+
+ pClass->AddChunk(pChunk);
+
+ // Store the new MethodDesc into the collection for this class
+ pModule->EnsureMethodDefCanBeStored(methodDef);
+ pModule->EnsuredStoreMethodDef(methodDef, pNewMD);
+
+ LOG((LF_ENC, LL_INFO100, "EEClass::AddMethod new methoddesc %p for token %p\n", pNewMD, methodDef));
+
+ // Success - return the new MethodDesc
+ _ASSERTE( SUCCEEDED(hr) );
+ if (ppMethod)
+ {
+ *ppMethod = pNewMD;
+ }
+ return S_OK;
+}
+
+#endif // EnC_SUPPORTED
+
+//---------------------------------------------------------------------------------------
+//
+// Check that the class type parameters are used consistently in this signature blob
+// in accordance with their variance annotations
+// The signature is assumed to be well-formed but indices and arities might not be correct
+//
+BOOL
+EEClass::CheckVarianceInSig(
+ DWORD numGenericArgs,
+ BYTE * pVarianceInfo,
+ Module * pModule,
+ SigPointer psig,
+ CorGenericParamAttr position)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (pVarianceInfo == NULL)
+ return TRUE;
+
+ CorElementType typ;
+ IfFailThrow(psig.GetElemType(&typ));
+
+ switch (typ)
+ {
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_U:
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_BOOLEAN:
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_CHAR:
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_R4:
+ case ELEMENT_TYPE_R8:
+ case ELEMENT_TYPE_VOID:
+ case ELEMENT_TYPE_OBJECT:
+ case ELEMENT_TYPE_TYPEDBYREF:
+ case ELEMENT_TYPE_MVAR:
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_VALUETYPE:
+ return TRUE;
+
+ case ELEMENT_TYPE_VAR:
+ {
+ DWORD index;
+ IfFailThrow(psig.GetData(&index));
+
+ // This will be checked later anyway; so give up and don't indicate a variance failure
+ if (index < 0 || index >= numGenericArgs)
+ return TRUE;
+
+ // Non-variant parameters are allowed to appear anywhere
+ if (pVarianceInfo[index] == gpNonVariant)
+ return TRUE;
+
+ // Covariant and contravariant parameters can *only* appear in resp. covariant and contravariant positions
+ return ((CorGenericParamAttr) (pVarianceInfo[index]) == position);
+ }
+
+ case ELEMENT_TYPE_GENERICINST:
+ {
+ IfFailThrow(psig.GetElemType(&typ));
+ mdTypeRef typeref;
+ IfFailThrow(psig.GetToken(&typeref));
+
+ // The number of type parameters follows
+ DWORD ntypars;
+ IfFailThrow(psig.GetData(&ntypars));
+
+ // If this is a value type, or position == gpNonVariant, then
+ // we're disallowing covariant and contravariant completely
+ if (typ == ELEMENT_TYPE_VALUETYPE || position == gpNonVariant)
+ {
+ for (unsigned i = 0; i < ntypars; i++)
+ {
+ if (!CheckVarianceInSig(numGenericArgs, pVarianceInfo, pModule, psig, gpNonVariant))
+ return FALSE;
+
+ IfFailThrow(psig.SkipExactlyOne());
+ }
+ }
+ // Otherwise we need to take notice of the variance annotation on each type parameter to the generic type
+ else
+ {
+ mdTypeDef typeDef;
+ Module * pDefModule;
+ // This will also be resolved later; so, give up and don't indicate a variance failure
+ if (!ClassLoader::ResolveTokenToTypeDefThrowing(pModule, typeref, &pDefModule, &typeDef))
+ return TRUE;
+
+ HENUMInternal hEnumGenericPars;
+ if (FAILED(pDefModule->GetMDImport()->EnumInit(mdtGenericParam, typeDef, &hEnumGenericPars)))
+ {
+ pDefModule->GetAssembly()->ThrowTypeLoadException(pDefModule->GetMDImport(), typeDef, IDS_CLASSLOAD_BADFORMAT);
+ }
+
+ for (unsigned i = 0; i < ntypars; i++)
+ {
+ mdGenericParam tkTyPar;
+ pDefModule->GetMDImport()->EnumNext(&hEnumGenericPars, &tkTyPar);
+ DWORD flags;
+ if (FAILED(pDefModule->GetMDImport()->GetGenericParamProps(tkTyPar, NULL, &flags, NULL, NULL, NULL)))
+ {
+ pDefModule->GetAssembly()->ThrowTypeLoadException(pDefModule->GetMDImport(), typeDef, IDS_CLASSLOAD_BADFORMAT);
+ }
+ CorGenericParamAttr genPosition = (CorGenericParamAttr) (flags & gpVarianceMask);
+ // If the surrounding context is contravariant then we need to flip the variance of this parameter
+ if (position == gpContravariant)
+ {
+ genPosition = genPosition == gpCovariant ? gpContravariant
+ : genPosition == gpContravariant ? gpCovariant
+ : gpNonVariant;
+ }
+ if (!CheckVarianceInSig(numGenericArgs, pVarianceInfo, pModule, psig, genPosition))
+ return FALSE;
+
+ IfFailThrow(psig.SkipExactlyOne());
+ }
+ pDefModule->GetMDImport()->EnumClose(&hEnumGenericPars);
+ }
+
+ return TRUE;
+ }
+
+ // Arrays behave covariantly
+ case ELEMENT_TYPE_ARRAY:
+ case ELEMENT_TYPE_SZARRAY:
+ return CheckVarianceInSig(numGenericArgs, pVarianceInfo, pModule, psig, position);
+
+ // Pointers behave non-variantly
+ case ELEMENT_TYPE_BYREF:
+ case ELEMENT_TYPE_PTR:
+ return CheckVarianceInSig(numGenericArgs, pVarianceInfo, pModule, psig, gpNonVariant);
+
+ case ELEMENT_TYPE_FNPTR:
+ {
+ // Calling convention
+ IfFailThrow(psig.GetData(NULL));
+
+ // Get arg count;
+ ULONG cArgs;
+ IfFailThrow(psig.GetData(&cArgs));
+
+ // Conservatively, assume non-variance of function pointer types
+ if (!CheckVarianceInSig(numGenericArgs, pVarianceInfo, pModule, psig, gpNonVariant))
+ return FALSE;
+
+ IfFailThrow(psig.SkipExactlyOne());
+
+ for (unsigned i = 0; i < cArgs; i++)
+ {
+ if (!CheckVarianceInSig(numGenericArgs, pVarianceInfo, pModule, psig, gpNonVariant))
+ return FALSE;
+
+ IfFailThrow(psig.SkipExactlyOne());
+ }
+
+ return TRUE;
+ }
+
+ default:
+ THROW_BAD_FORMAT(IDS_CLASSLOAD_BAD_VARIANCE_SIG, pModule);
+ }
+
+ return FALSE;
+} // EEClass::CheckVarianceInSig
+
+void
+ClassLoader::LoadExactParentAndInterfacesTransitively(MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+
+ TypeHandle thisTH(pMT);
+ SigTypeContext typeContext(thisTH);
+ IMDInternalImport* pInternalImport = pMT->GetMDImport();
+ MethodTable *pParentMT = pMT->GetParentMethodTable();
+
+ if (pParentMT != NULL && pParentMT->HasInstantiation())
+ {
+ // Fill in exact parent if it's instantiated
+ mdToken crExtends;
+ IfFailThrow(pInternalImport->GetTypeDefProps(
+ pMT->GetCl(),
+ NULL,
+ &crExtends));
+
+ _ASSERTE(!IsNilToken(crExtends));
+ _ASSERTE(TypeFromToken(crExtends) == mdtTypeSpec);
+
+ TypeHandle newParent = ClassLoader::LoadTypeDefOrRefOrSpecThrowing(pMT->GetModule(), crExtends, &typeContext,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::FailIfUninstDefOrRef,
+ ClassLoader::LoadTypes,
+ CLASS_LOAD_EXACTPARENTS,
+ TRUE);
+
+ MethodTable* pNewParentMT = newParent.AsMethodTable();
+ if (pNewParentMT != pParentMT)
+ {
+ LOG((LF_CLASSLOADER, LL_INFO1000, "GENERICS: Replaced approximate parent %s with exact parent %s from token %x\n", pParentMT->GetDebugClassName(), pNewParentMT->GetDebugClassName(), crExtends));
+
+ // SetParentMethodTable is not used here since we want to update the indirection cell in the NGen case
+ *EnsureWritablePages(pMT->GetParentMethodTablePtr()) = pNewParentMT;
+
+ pParentMT = pNewParentMT;
+ }
+ }
+
+ if (pParentMT != NULL)
+ {
+ EnsureLoaded(pParentMT, CLASS_LOAD_EXACTPARENTS);
+ }
+
+
+ if (pParentMT != NULL && pParentMT->HasPerInstInfo())
+ {
+ // Copy down all inherited dictionary pointers which we
+ // could not embed.
+ DWORD nDicts = pParentMT->GetNumDicts();
+ for (DWORD iDict = 0; iDict < nDicts; iDict++)
+ {
+ if (pMT->GetPerInstInfo()[iDict] != pParentMT->GetPerInstInfo()[iDict])
+ *EnsureWritablePages(&pMT->GetPerInstInfo()[iDict]) = pParentMT->GetPerInstInfo()[iDict];
+ }
+ }
+
+#ifdef FEATURE_PREJIT
+ // Restore action, not in MethodTable::Restore because we may have had approx parents at that point
+ if (pMT->IsZapped())
+ {
+ MethodTable::InterfaceMapIterator it = pMT->IterateInterfaceMap();
+ while (it.Next())
+ {
+ Module::RestoreMethodTablePointer(&it.GetInterfaceInfo()->m_pMethodTable, pMT->GetLoaderModule(), CLASS_LOAD_EXACTPARENTS);
+ }
+ }
+ else
+#endif
+ {
+ MethodTableBuilder::LoadExactInterfaceMap(pMT);
+ }
+
+#ifdef _DEBUG
+ if (g_pConfig->ShouldDumpOnClassLoad(pMT->GetDebugClassName()))
+ {
+ pMT->Debug_DumpInterfaceMap("Exact");
+ }
+#endif //_DEBUG
+} // ClassLoader::LoadExactParentAndInterfacesTransitively
+
+// CLASS_LOAD_EXACTPARENTS phase of loading:
+// * Load the base class at exact instantiation
+// * Recurse LoadExactParents up parent hierarchy
+// * Load explicitly declared interfaces on this class at exact instantiation
+// * Fixup vtable
+//
+/*static*/
+void ClassLoader::LoadExactParents(MethodTable *pMT)
+{
+ CONTRACT_VOID
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pMT));
+ POSTCONDITION(pMT->CheckLoadLevel(CLASS_LOAD_EXACTPARENTS));
+ }
+ CONTRACT_END;
+
+ MethodTable *pApproxParentMT = pMT->GetParentMethodTable();
+
+ if (!pMT->IsCanonicalMethodTable())
+ {
+ EnsureLoaded(TypeHandle(pMT->GetCanonicalMethodTable()), CLASS_LOAD_EXACTPARENTS);
+ }
+
+ LoadExactParentAndInterfacesTransitively(pMT);
+
+ MethodTableBuilder::CopyExactParentSlots(pMT, pApproxParentMT);
+
+ // We can now mark this type as having exact parents
+ pMT->SetHasExactParent();
+
+ RETURN;
+}
+
+//*******************************************************************************
+// This is the routine that computes the internal type of a given type. It normalizes
+// structs that have only one field (of int/ptr sized values), to be that underlying type.
+//
+// * see code:MethodTable#KindsOfElementTypes for more
+// * It get used by code:TypeHandle::GetInternalCorElementType
+CorElementType EEClass::ComputeInternalCorElementTypeForValueType(MethodTable * pMT)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ if (pMT->GetNumInstanceFields() == 1 && (!pMT->HasLayout()
+ || pMT->GetNumInstanceFieldBytes() == 4
+#ifdef _WIN64
+ || pMT->GetNumInstanceFieldBytes() == 8
+#endif // _WIN64
+ )) // Don't do the optimization if we're getting specified anything but the trivial layout.
+ {
+ FieldDesc * pFD = pMT->GetApproxFieldDescListRaw();
+ CorElementType type = pFD->GetFieldType();
+
+ if (type == ELEMENT_TYPE_VALUETYPE)
+ {
+ //@todo: Is it more apropos to call LookupApproxFieldTypeHandle() here?
+ TypeHandle fldHnd = pFD->GetApproxFieldTypeHandleThrowing();
+ CONSISTENCY_CHECK(!fldHnd.IsNull());
+
+ type = fldHnd.GetInternalCorElementType();
+ }
+
+ switch (type)
+ {
+ // "DDB 20951: vc8 unmanaged pointer bug."
+ // If ELEMENT_TYPE_PTR were returned, Compiler::verMakeTypeInfo would have problem
+ // creating a TI_STRUCT out of CORINFO_TYPE_PTR.
+ // As a result, the importer would not be able to realize that the thing on the stack
+ // is an instance of a valuetype (that contains one single "void*" field), rather than
+ // a pointer to a valuetype.
+ // Returning ELEMENT_TYPE_U allows verMakeTypeInfo to go down the normal code path
+ // for creating a TI_STRUCT.
+ case ELEMENT_TYPE_PTR:
+ type = ELEMENT_TYPE_U;
+
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_U:
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+#ifdef _WIN64
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+#endif // _WIN64
+
+ {
+ return type;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ return ELEMENT_TYPE_VALUETYPE;
+}
+
+#if defined(CHECK_APP_DOMAIN_LEAKS) || defined(_DEBUG)
+//*******************************************************************************
+void EEClass::GetPredefinedAgility(Module *pModule, mdTypeDef td,
+ BOOL *pfIsAgile, BOOL *pfCheckAgile)
+{
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ //
+ // There are 4 settings possible:
+ // IsAgile CheckAgile
+ // F F (default) Use normal type logic to determine agility
+ // T F "Proxy" Treated as agile even though may not be.
+ // F T "Maybe" Not agile, but specific instances can be made agile.
+ // T T "Force" All instances are forced agile, even though not typesafe.
+ //
+ // Also, note that object arrays of agile or maybe agile types are made maybe agile.
+ //
+
+ static const struct PredefinedAgility
+ {
+ const char *name;
+ BOOL isAgile;
+ BOOL checkAgile;
+ }
+
+ // Matches based on name with the first records having higher precedence than subsequent ones
+ // so that when there is an ambiguity, the first one will be used:
+ // System.Globalization.CultureNotFoundException
+ // comes before
+ // System.Globalization.*
+ //
+ // although System.Globalization.CultureNotFoundException matches both records, the first
+ // is the one that will be used
+ agility[] =
+ {
+ // The Thread leak across context boundaries.
+ // We manage the leaks manually
+ { g_ThreadClassName, TRUE, FALSE },
+
+ // The SharedStatics class is a container for process-wide data
+ { g_SharedStaticsClassName, FALSE, TRUE },
+
+ // The extra dot at the start is to accomodate the string comparison logic below
+ // when there is no namespace for a type
+ {".StringMaker", FALSE, TRUE },
+
+ {g_StringBufferClassName, FALSE, TRUE },
+
+ { "System.ActivationArguments", FALSE, TRUE },
+ { "System.AppDomainSetup" , FALSE, TRUE },
+ { "System.AppDomainInitializerInfo", FALSE, TRUE },
+
+ // Make all containers maybe agile
+ { "System.Collections.*", FALSE, TRUE },
+ { "System.Collections.Generic.*", FALSE, TRUE },
+
+ // Make all globalization objects agile except for System.Globalization.CultureNotFoundException
+ // The exception inherits from ArgumentException so needs the same agility
+ // this must come before the more general declaration below so that it will match first
+ { "System.Globalization.CultureNotFoundException", FALSE, FALSE },
+ // We have CultureInfo objects on thread. Because threads leak across
+ // app domains, we have to be prepared for CultureInfo to leak across.
+ // CultureInfo exposes all of the other globalization objects, so we
+ // just make the entire namespace app domain agile.
+ { "System.Globalization.*", FALSE, TRUE },
+
+ // Remoting structures for legally smuggling messages across app domains
+ { "System.Runtime.Remoting.Messaging.SmuggledMethodCallMessage", FALSE, TRUE },
+ { "System.Runtime.Remoting.Messaging.SmuggledMethodReturnMessage", FALSE, TRUE },
+ { "System.Runtime.Remoting.Messaging.SmuggledObjRef", FALSE, TRUE},
+ { "System.Runtime.Remoting.ObjRef", FALSE, TRUE },
+ { "System.Runtime.Remoting.ChannelInfo", FALSE, TRUE },
+ { "System.Runtime.Remoting.Channels.CrossAppDomainData", FALSE, TRUE },
+
+ // Remoting cached data structures are all in mscorlib
+ { "System.Runtime.Remoting.Metadata.RemotingCachedData", FALSE, TRUE },
+ { "System.Runtime.Remoting.Metadata.RemotingFieldCachedData", FALSE, TRUE },
+ { "System.Runtime.Remoting.Metadata.RemotingParameterCachedData", FALSE, TRUE },
+ { "System.Runtime.Remoting.Metadata.RemotingMethodCachedData", FALSE, TRUE },
+ { "System.Runtime.Remoting.Metadata.RemotingTypeCachedData", FALSE, TRUE },
+ { "System.Runtime.Remoting.Metadata.SoapAttribute", FALSE, TRUE },
+ { "System.Runtime.Remoting.Metadata.SoapFieldAttribute", FALSE, TRUE },
+ { "System.Runtime.Remoting.Metadata.SoapMethodAttribute",FALSE, TRUE },
+ { "System.Runtime.Remoting.Metadata.SoapParameterAttribute", FALSE, TRUE },
+ { "System.Runtime.Remoting.Metadata.SoapTypeAttribute", FALSE, TRUE },
+
+ // Reflection types
+ { g_ReflectionMemberInfoName, FALSE, TRUE },
+ { g_TypeClassName, FALSE, TRUE },
+ { g_ReflectionClassName, FALSE, TRUE },
+ { g_ReflectionConstructorInfoName, FALSE, TRUE },
+ { g_ReflectionConstructorName, FALSE, TRUE },
+ { g_ReflectionEventInfoName, FALSE, TRUE },
+ { g_ReflectionEventName, FALSE, TRUE },
+ { g_ReflectionFieldInfoName, FALSE, TRUE },
+ { g_ReflectionFieldName, FALSE, TRUE },
+ { g_MethodBaseName, FALSE, TRUE },
+ { g_ReflectionMethodInfoName, FALSE, TRUE },
+ { g_ReflectionMethodName, FALSE, TRUE },
+ { g_ReflectionPropertyInfoName, FALSE, TRUE },
+ { g_ReflectionPropInfoName, FALSE, TRUE },
+ { g_ReflectionParamInfoName, FALSE, TRUE },
+ { g_ReflectionParamName, FALSE, TRUE },
+
+ { "System.RuntimeType+RuntimeTypeCache", FALSE, TRUE },
+ { "System.RuntimeType+RuntimeTypeCache+MemberInfoCache`1", FALSE, TRUE },
+ { "System.RuntimeType+RuntimeTypeCache+MemberInfoCache`1+Filter", FALSE, TRUE },
+ { "System.Reflection.CerHashtable`2", FALSE, TRUE },
+ { "System.Reflection.CerHashtable`2+Table", FALSE, TRUE },
+ { "System.Reflection.RtFieldInfo", FALSE, TRUE },
+ { "System.Reflection.MdFieldInfo", FALSE, TRUE },
+ { "System.Signature", FALSE, TRUE },
+ { "System.Reflection.MetadataImport", FALSE, TRUE },
+
+ // LogSwitches are agile even though we can't prove it
+ // <TODO>@todo: do they need really to be?</TODO>
+ { "System.Diagnostics.LogSwitch", FALSE, TRUE },
+
+ // There is a process global PermissionTokenFactory
+ { "System.Security.PermissionToken", FALSE, TRUE },
+ { g_PermissionTokenFactoryName, FALSE, TRUE },
+
+ // Mark all the exceptions we throw agile. This makes
+ // most BVTs pass even though exceptions leak
+ //
+ // Note that making exception checked automatically
+ // makes a bunch of subclasses checked as well.
+ //
+ // Pre-allocated exceptions
+ { g_ExceptionClassName, FALSE, TRUE },
+ { g_OutOfMemoryExceptionClassName, FALSE, TRUE },
+ { g_StackOverflowExceptionClassName, FALSE, TRUE },
+ { g_ExecutionEngineExceptionClassName, FALSE, TRUE },
+
+ // SecurityDocument contains pointers and other agile types
+ { "System.Security.SecurityDocument", TRUE, TRUE },
+
+ // BinaryFormatter smuggles these across appdomains.
+ { "System.Runtime.Serialization.Formatters.Binary.BinaryObjectWithMap", TRUE, FALSE},
+ { "System.Runtime.Serialization.Formatters.Binary.BinaryObjectWithMapTyped", TRUE, FALSE},
+
+ { NULL }
+ };
+
+ if (pModule == SystemDomain::SystemModule())
+ {
+ while (TRUE)
+ {
+ LPCUTF8 pszName;
+ LPCUTF8 pszNamespace;
+ HRESULT hr;
+ mdTypeDef tdEnclosing;
+
+ if (FAILED(pModule->GetMDImport()->GetNameOfTypeDef(td, &pszName, &pszNamespace)))
+ {
+ break;
+ }
+
+ // We rely the match algorithm matching the first items in the list before subsequent ones
+ // so that when there is an ambiguity, the first one will be used:
+ // System.Globalization.CultureNotFoundException
+ // comes before
+ // System.Globalization.*
+ //
+ // although System.Globalization.CultureNotFoundException matches both records, the first
+ // is the one that will be used
+ const PredefinedAgility *p = agility;
+ while (p->name != NULL)
+ {
+ SIZE_T length = strlen(pszNamespace);
+ if (strncmp(pszNamespace, p->name, length) == 0
+ && (strcmp(pszName, p->name + length + 1) == 0
+ || strcmp("*", p->name + length + 1) == 0))
+ {
+ *pfIsAgile = p->isAgile;
+ *pfCheckAgile = p->checkAgile;
+ return;
+ }
+
+ p++;
+ }
+
+ // Perhaps we have a nested type like 'bucket' that is supposed to be
+ // agile or checked agile by virtue of being enclosed in a type like
+ // hashtable, which is itself inside "System.Collections".
+ tdEnclosing = mdTypeDefNil;
+ hr = pModule->GetMDImport()->GetNestedClassProps(td, &tdEnclosing);
+ if (SUCCEEDED(hr))
+ {
+ BAD_FORMAT_NOTHROW_ASSERT(tdEnclosing != td && TypeFromToken(tdEnclosing) == mdtTypeDef);
+ td = tdEnclosing;
+ }
+ else
+ break;
+ }
+ }
+
+ *pfIsAgile = FALSE;
+ *pfCheckAgile = FALSE;
+}
+
+//*******************************************************************************
+void EEClass::SetAppDomainAgileAttribute(MethodTable * pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ // PRECONDITION(!IsAppDomainAgilityDone());
+ }
+ CONTRACTL_END
+
+ EEClass * pClass = pMT->GetClass();
+
+ //
+ // The most general case for provably a agile class is
+ // (1) No instance fields of non-sealed or non-agile types
+ // (2) Class is in system domain (its type must be not unloadable
+ // & loaded in all app domains)
+ // (3) The class can't have a finalizer
+ // (4) The class can't be a COMClass
+ //
+
+ _ASSERTE(!pClass->IsAppDomainAgilityDone());
+
+ BOOL fCheckAgile = FALSE;
+ BOOL fAgile = FALSE;
+ BOOL fFieldsAgile = TRUE;
+ WORD nFields = 0;
+
+ if (!pMT->GetModule()->IsSystem())
+ {
+ //
+ // No types outside of the system domain can even think about
+ // being agile
+ //
+
+ goto exit;
+ }
+
+ if (pMT->IsComObjectType())
+ {
+ //
+ // No COM type is agile, as there is domain specific stuff in the sync block
+ //
+
+ goto exit;
+ }
+
+ if (pMT->IsInterface())
+ {
+ //
+ // Don't mark interfaces agile
+ //
+
+ goto exit;
+ }
+
+ if (pMT->ContainsGenericVariables())
+ {
+ // Types containing formal type parameters aren't agile
+ goto exit;
+ }
+
+ //
+ // See if we need agile checking in the class
+ //
+
+ GetPredefinedAgility(pMT->GetModule(), pMT->GetCl(),
+ &fAgile, &fCheckAgile);
+
+ if (pMT->HasFinalizer())
+ {
+ if (!fAgile && !fCheckAgile)
+ {
+ //
+ // If we're finalizable, we need domain affinity. Otherwise, we may appear
+ // to a particular app domain not to call the finalizer (since it may run
+ // in a different domain.)
+ //
+ // Note: do not change this assumption. The eager finalizaton code for
+ // appdomain unloading assumes that no obects other than those in mscorlib
+ // can be agile and finalizable
+ //
+ goto exit;
+ }
+ else
+ {
+
+ // Note that a finalizable object will be considered potentially agile if it has one of the two
+ // predefined agility bits set. This will cause an assert in the eager finalization code if you add
+ // a finalizer to such a class - we don't want to have them as we can't run them eagerly and running
+ // them after we've cleared the roots/handles means it can't do much safely. Right now thread is the
+ // only one we allow.
+ _ASSERTE(g_pThreadClass == NULL || pMT->IsAgileAndFinalizable());
+ }
+ }
+
+ //
+ // Now see if the type is "naturally agile" - that is, it's type structure
+ // guarantees agility.
+ //
+
+ if (pMT->GetParentMethodTable() != NULL)
+ {
+ EEClass * pParentClass = pMT->GetParentMethodTable()->GetClass();
+
+ //
+ // Make sure our parent was computed. This should only happen
+ // when we are prejitting - otherwise it is computed for each
+ // class as its loaded.
+ //
+
+ _ASSERTE(pParentClass->IsAppDomainAgilityDone());
+
+ if (!pParentClass->IsAppDomainAgile())
+ {
+ fFieldsAgile = FALSE;
+ if (fCheckAgile)
+ _ASSERTE(pParentClass->IsCheckAppDomainAgile());
+ }
+
+ //
+ // To save having to list a lot of trivial (layout-wise) subclasses,
+ // automatically check a subclass if its parent is checked and
+ // it introduces no new fields.
+ //
+
+ if (!fCheckAgile
+ && pParentClass->IsCheckAppDomainAgile()
+ && pClass->GetNumInstanceFields() == pParentClass->GetNumInstanceFields())
+ fCheckAgile = TRUE;
+ }
+
+ nFields = pMT->GetNumInstanceFields()
+ - (pMT->GetParentMethodTable() == NULL ? 0 : pMT->GetParentMethodTable()->GetNumInstanceFields());
+
+ if (fFieldsAgile || fCheckAgile)
+ {
+ FieldDesc *pFD = pClass->GetFieldDescList();
+ FieldDesc *pFDEnd = pFD + nFields;
+ while (pFD < pFDEnd)
+ {
+ switch (pFD->GetFieldType())
+ {
+ case ELEMENT_TYPE_CLASS:
+ {
+ //
+ // There is a bit of a problem in computing the classes which are naturally agile -
+ // we don't want to load types of non-value type fields. So for now we'll
+ // err on the side of conservatism and not allow any non-value type fields other than
+ // the forced agile types listed above.
+ //
+
+ MetaSig sig(pFD);
+ CorElementType type = sig.NextArg();
+ SigPointer sigPtr = sig.GetArgProps();
+
+ //
+ // Don't worry about strings
+ //
+
+ if (type == ELEMENT_TYPE_STRING)
+ break;
+
+ // Find our field's token so we can proceed cautiously
+ mdToken token = mdTokenNil;
+
+ if (type == ELEMENT_TYPE_CLASS)
+ IfFailThrow(sigPtr.GetToken(&token));
+
+ //
+ // First, a special check to see if the field is of our own type.
+ //
+
+ if (token == pMT->GetCl() && pMT->IsSealed())
+ break;
+
+ //
+ // Now, look for the field's TypeHandle.
+ //
+ // <TODO>@todo: there is some ifdef'd code here to to load the type if it's
+ // not already loading. This code has synchronization problems, as well
+ // as triggering more aggressive loading than normal. So it's disabled
+ // for now.
+ // </TODO>
+
+ TypeHandle th;
+#if 0
+ if (TypeFromToken(token) == mdTypeDef
+ && GetClassLoader()->FindUnresolvedClass(GetModule, token) == NULL)
+ th = pFD->GetFieldTypeHandleThrowing();
+ else
+#endif // 0
+ th = pFD->LookupFieldTypeHandle();
+
+ //
+ // See if the referenced type is agile. Note that there is a reasonable
+ // chance that the type hasn't been loaded yet. If this is the case,
+ // we just have to assume that it's not agile, since we can't trigger
+ // extra loads here (for fear of circular recursion.)
+ //
+ // If you have an agile class which runs into this problem, you can solve it by
+ // setting the type manually to be agile.
+ //
+
+ if (th.IsNull()
+ || !th.IsAppDomainAgile()
+ || (!th.IsTypeDesc()
+ && !th.AsMethodTable()->IsSealed()))
+ {
+ //
+ // Treat the field as non-agile.
+ //
+
+ fFieldsAgile = FALSE;
+ if (fCheckAgile)
+ pFD->SetDangerousAppDomainAgileField();
+ }
+ }
+
+ break;
+
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ TypeHandle th;
+
+ {
+ // Loading a non-self-ref valuetype field.
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+
+ th = pFD->GetApproxFieldTypeHandleThrowing();
+ }
+
+ _ASSERTE(!th.IsNull());
+
+ if (!th.IsAppDomainAgile())
+ {
+ fFieldsAgile = FALSE;
+ if (fCheckAgile)
+ pFD->SetDangerousAppDomainAgileField();
+ }
+ }
+
+ break;
+
+ default:
+ break;
+ }
+
+ pFD++;
+ }
+ }
+
+ if (fFieldsAgile || fAgile)
+ pClass->SetAppDomainAgile();
+
+ if (fCheckAgile && !fFieldsAgile)
+ pClass->SetCheckAppDomainAgile();
+
+exit:
+ LOG((LF_CLASSLOADER, LL_INFO1000, "CLASSLOADER: AppDomainAgileAttribute for %s is %d\n", pClass->GetDebugClassName(), pClass->IsAppDomainAgile()));
+ pClass->SetAppDomainAgilityDone();
+}
+#endif // defined(CHECK_APP_DOMAIN_LEAKS) || defined(_DEBUG)
+
+//*******************************************************************************
+//
+// Debugger notification
+//
+BOOL TypeHandle::NotifyDebuggerLoad(AppDomain *pDomain, BOOL attaching) const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (!CORDebuggerAttached())
+ {
+ return FALSE;
+ }
+
+ if (!GetModule()->IsVisibleToDebugger())
+ {
+ return FALSE;
+ }
+
+ return g_pDebugInterface->LoadClass(
+ *this, GetCl(), GetModule(), pDomain);
+}
+
+//*******************************************************************************
+void TypeHandle::NotifyDebuggerUnload(AppDomain *pDomain) const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (!GetModule()->IsVisibleToDebugger())
+ return;
+
+ if (!pDomain->IsDebuggerAttached())
+ return;
+
+ g_pDebugInterface->UnloadClass(GetCl(), GetModule(), pDomain);
+}
+
+//*******************************************************************************
+// Given the (generics-shared or generics-exact) value class method, find the
+// (generics-shared) unboxing Stub for the given method . We search the vtable.
+//
+// This is needed when creating a delegate to an instance method in a value type
+MethodDesc* MethodTable::GetBoxedEntryPointMD(MethodDesc *pMD)
+{
+ CONTRACT (MethodDesc *) {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(IsValueType());
+ PRECONDITION(!pMD->ContainsGenericVariables());
+ PRECONDITION(!pMD->IsUnboxingStub());
+ POSTCONDITION(RETVAL->IsUnboxingStub());
+ } CONTRACT_END;
+
+ RETURN MethodDesc::FindOrCreateAssociatedMethodDesc(pMD,
+ pMD->GetMethodTable(),
+ TRUE /* get unboxing entry point */,
+ pMD->GetMethodInstantiation(),
+ FALSE /* no allowInstParam */ );
+
+}
+
+//*******************************************************************************
+// Given the unboxing value class method, find the non-unboxing method
+// This is used when generating the code for an BoxedEntryPointStub.
+MethodDesc* MethodTable::GetUnboxedEntryPointMD(MethodDesc *pMD)
+{
+ CONTRACT (MethodDesc *) {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(IsValueType());
+ // reflection needs to call this for methods in non instantiated classes,
+ // so move the assert to the caller when needed
+ //PRECONDITION(!pMD->ContainsGenericVariables());
+ PRECONDITION(pMD->IsUnboxingStub());
+ POSTCONDITION(!RETVAL->IsUnboxingStub());
+ } CONTRACT_END;
+
+ BOOL allowInstParam = (pMD->GetNumGenericMethodArgs() == 0);
+ RETURN MethodDesc::FindOrCreateAssociatedMethodDesc(pMD,
+ this,
+ FALSE /* don't get unboxing entry point */,
+ pMD->GetMethodInstantiation(),
+ allowInstParam);
+}
+
+
+//*******************************************************************************
+// Given the unboxing value class method, find the non-unboxing method
+// This is used when generating the code for an BoxedEntryPointStub.
+MethodDesc* MethodTable::GetExistingUnboxedEntryPointMD(MethodDesc *pMD)
+{
+ CONTRACT (MethodDesc *) {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(IsValueType());
+ // reflection needs to call this for methods in non instantiated classes,
+ // so move the assert to the caller when needed
+ //PRECONDITION(!pMD->ContainsGenericVariables());
+ PRECONDITION(pMD->IsUnboxingStub());
+ POSTCONDITION(!RETVAL->IsUnboxingStub());
+ } CONTRACT_END;
+
+ BOOL allowInstParam = (pMD->GetNumGenericMethodArgs() == 0);
+ RETURN MethodDesc::FindOrCreateAssociatedMethodDesc(pMD,
+ this,
+ FALSE /* don't get unboxing entry point */,
+ pMD->GetMethodInstantiation(),
+ allowInstParam,
+ FALSE, /* forceRemotableMethod */
+ FALSE /* allowCreate */
+ );
+}
+
+#endif // !DACCESS_COMPILE
+
+#ifdef FEATURE_HFA
+//*******************************************************************************
+CorElementType MethodTable::GetHFAType()
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS); // we end up in the class loader which has the conditional contracts
+ WRAPPER(GC_TRIGGERS);
+ }
+ CONTRACTL_END;
+
+ if (!IsHFA())
+ return ELEMENT_TYPE_END;
+
+ MethodTable * pMT = this;
+ for (;;)
+ {
+ _ASSERTE(pMT->IsValueType());
+ _ASSERTE(pMT->GetNumInstanceFields() > 0);
+
+ PTR_FieldDesc pFirstField = pMT->GetApproxFieldDescListRaw();
+
+ CorElementType fieldType = pFirstField->GetFieldType();
+
+ // All HFA fields have to be of the same type, so we can just return the type of the first field
+ switch (fieldType)
+ {
+ case ELEMENT_TYPE_VALUETYPE:
+ pMT = pFirstField->LookupApproxFieldTypeHandle().GetMethodTable();
+ break;
+
+ case ELEMENT_TYPE_R4:
+ case ELEMENT_TYPE_R8:
+ return fieldType;
+
+ default:
+ // This should never happen. MethodTable::IsHFA() should be set only on types
+ // that have a valid HFA type
+ _ASSERTE(false);
+ return ELEMENT_TYPE_END;
+ }
+ }
+}
+
+bool MethodTable::IsNativeHFA()
+{
+ LIMITED_METHOD_CONTRACT;
+ return HasLayout() ? GetLayoutInfo()->IsNativeHFA() : IsHFA();
+}
+
+CorElementType MethodTable::GetNativeHFAType()
+{
+ LIMITED_METHOD_CONTRACT;
+ return HasLayout() ? GetLayoutInfo()->GetNativeHFAType() : GetHFAType();
+}
+#endif // FEATURE_HFA
+
+#ifdef FEATURE_64BIT_ALIGNMENT
+// Returns true iff the native view of this type requires 64-bit aligment.
+bool MethodTable::NativeRequiresAlign8()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (HasLayout())
+ {
+ return (GetLayoutInfo()->GetLargestAlignmentRequirementOfAllMembers() >= 8);
+ }
+ return RequiresAlign8();
+}
+#endif // FEATURE_64BIT_ALIGNMENT
+
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_COMINTEROP
+//==========================================================================================
+TypeHandle MethodTable::GetCoClassForInterface()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ EEClass * pClass = GetClass();
+
+ if (!pClass->IsComClassInterface())
+ return TypeHandle();
+
+ _ASSERTE(IsInterface());
+
+ TypeHandle th = pClass->GetCoClassForInterface();
+ if (!th.IsNull())
+ return th;
+
+ return SetupCoClassForInterface();
+}
+
+//*******************************************************************************
+TypeHandle MethodTable::SetupCoClassForInterface()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(IsComClassInterface());
+
+ }
+ CONTRACTL_END
+
+ TypeHandle CoClassType;
+ const BYTE *pVal = NULL;
+ ULONG cbVal = 0;
+
+ if (!IsProjectedFromWinRT()) // ignore classic COM interop CA on WinRT types
+ {
+ HRESULT hr = GetMDImport()->GetCustomAttributeByName(GetCl(), INTEROP_COCLASS_TYPE , (const void **)&pVal, &cbVal);
+ if (hr == S_OK)
+ {
+ CustomAttributeParser cap(pVal, cbVal);
+
+ IfFailThrow(cap.SkipProlog());
+
+ // Retrieve the COM source interface class name.
+ ULONG cbName;
+ LPCUTF8 szName;
+ IfFailThrow(cap.GetNonNullString(&szName, &cbName));
+
+ // Copy the name to a temporary buffer and NULL terminate it.
+ StackSString ss(SString::Utf8, szName, cbName);
+
+ // Try to load the class using its name as a fully qualified name. If that fails,
+ // then we try to load it in the assembly of the current class.
+ CoClassType = TypeName::GetTypeUsingCASearchRules(ss.GetUnicode(), GetAssembly());
+
+ // Cache the coclass type
+ g_IBCLogger.LogEEClassCOWTableAccess(this);
+ GetClass_NoLogging()->SetCoClassForInterface(CoClassType);
+ }
+ }
+ return CoClassType;
+}
+
+//*******************************************************************************
+void MethodTable::GetEventInterfaceInfo(MethodTable **ppSrcItfClass, MethodTable **ppEvProvClass)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+
+ TypeHandle EventProvType;
+ TypeHandle SrcItfType;
+ const BYTE *pVal = NULL;
+ ULONG cbVal = 0;
+
+ // Retrieve the ComEventProviderAttribute CA.
+ HRESULT hr = GetMDImport()->GetCustomAttributeByName(GetCl(), INTEROP_COMEVENTINTERFACE_TYPE, (const void**)&pVal, &cbVal);
+ if (FAILED(hr))
+ {
+ COMPlusThrowHR(hr);
+ }
+
+ CustomAttributeParser cap(pVal, cbVal);
+
+ // Skip the CA type prefix.
+ IfFailThrow(cap.SkipProlog());
+
+ // Retrieve the COM source interface class name.
+ LPCUTF8 szName;
+ ULONG cbName;
+ IfFailThrow(cap.GetNonNullString(&szName, &cbName));
+
+ // Copy the name to a temporary buffer and NULL terminate it.
+ StackSString ss(SString::Utf8, szName, cbName);
+
+ // Try to load the class using its name as a fully qualified name. If that fails,
+ // then we try to load it in the assembly of the current class.
+ SrcItfType = TypeName::GetTypeUsingCASearchRules(ss.GetUnicode(), GetAssembly());
+
+ // Retrieve the COM event provider class name.
+ IfFailThrow(cap.GetNonNullString(&szName, &cbName));
+
+ // Copy the name to a temporary buffer and NULL terminate it.
+ ss.SetUTF8(szName, cbName);
+
+ // Try to load the class using its name as a fully qualified name. If that fails,
+ // then we try to load it in the assembly of the current class.
+ EventProvType = TypeName::GetTypeUsingCASearchRules(ss.GetUnicode(), GetAssembly());
+
+ // Set the source interface and event provider classes.
+ *ppSrcItfClass = SrcItfType.GetMethodTable();
+ *ppEvProvClass = EventProvType.GetMethodTable();
+}
+
+//*******************************************************************************
+TypeHandle MethodTable::GetDefItfForComClassItf()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ BAD_FORMAT_NOTHROW_ASSERT(GetClass()->IsComClassInterface());
+
+ // The COM class interface uses the normal scheme which is to have no
+ // methods and to implement default interface and optionnally the
+ // default source interface. In this scheme, the first implemented
+ // interface is the default interface which we return.
+ InterfaceMapIterator it = IterateInterfaceMap();
+ if (it.Next())
+ {
+ return TypeHandle(it.GetInterface());
+ }
+ else
+ {
+ // The COM class interface has the methods directly on the itself.
+ // Because of this we need to consider it to be the default interface.
+ return TypeHandle(this);
+ }
+}
+
+#endif // FEATURE_COMINTEROP
+
+
+#endif // !DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+//
+// Get the metadata token of the outer type for a nested type
+//
+// Return Value:
+// The token of the outer class if this EEClass is nested, or mdTypeDefNil if the
+// EEClass is not a nested type
+//
+
+mdTypeDef MethodTable::GetEnclosingCl()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ mdTypeDef tdEnclosing = mdTypeDefNil;
+
+ if (GetClass()->IsNested())
+ {
+ HRESULT hr = GetMDImport()->GetNestedClassProps(GetCl(), &tdEnclosing);
+ if (FAILED(hr))
+ {
+ ThrowHR(hr, BFA_UNABLE_TO_GET_NESTED_PROPS);
+ }
+ }
+
+ return tdEnclosing;
+}
+
+//*******************************************************************************
+//
+// Helper routines for the macros defined at the top of this class.
+// You probably should not use these functions directly.
+//
+template<typename RedirectFunctor>
+SString &MethodTable::_GetFullyQualifiedNameForClassNestedAwareInternal(SString &ssBuf)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ ssBuf.Clear();
+
+ LPCUTF8 pszNamespace;
+ LPCUTF8 pszName;
+ pszName = GetFullyQualifiedNameInfo(&pszNamespace);
+ if (pszName == NULL)
+ {
+ return ssBuf;
+ }
+
+ StackSString ssName(SString::Utf8, pszName);
+
+ mdTypeDef mdEncl = GetCl();
+ IMDInternalImport *pImport = GetMDImport();
+
+ // Check if the type is nested
+ DWORD dwAttr;
+ IfFailThrow(pImport->GetTypeDefProps(GetCl(), &dwAttr, NULL));
+
+ RedirectFunctor redirectFunctor;
+ if (IsTdNested(dwAttr))
+ {
+ StackSString ssFullyQualifiedName;
+ StackSString ssPath;
+
+ // Build the nesting chain.
+ while (SUCCEEDED(pImport->GetNestedClassProps(mdEncl, &mdEncl)))
+ {
+ LPCUTF8 szEnclName;
+ LPCUTF8 szEnclNameSpace;
+ IfFailThrow(pImport->GetNameOfTypeDef(
+ mdEncl,
+ &szEnclName,
+ &szEnclNameSpace));
+
+ ns::MakePath(ssPath,
+ StackSString(SString::Utf8, redirectFunctor(szEnclNameSpace)),
+ StackSString(SString::Utf8, szEnclName));
+ ns::MakeNestedTypeName(ssFullyQualifiedName, ssPath, ssName);
+
+ ssName = ssFullyQualifiedName;
+ }
+ }
+
+ ns::MakePath(
+ ssBuf,
+ StackSString(SString::Utf8, redirectFunctor(pszNamespace)), ssName);
+
+ return ssBuf;
+}
+
+class PassThrough
+{
+public :
+ LPCUTF8 operator() (LPCUTF8 szEnclNamespace)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return szEnclNamespace;
+ }
+};
+
+SString &MethodTable::_GetFullyQualifiedNameForClassNestedAware(SString &ssBuf)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return _GetFullyQualifiedNameForClassNestedAwareInternal<PassThrough>(ssBuf);
+}
+
+//*******************************************************************************
+SString &MethodTable::_GetFullyQualifiedNameForClass(SString &ssBuf)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ ssBuf.Clear();
+
+ if (IsArray())
+ {
+ TypeDesc::ConstructName(GetInternalCorElementType(),
+ GetApproxArrayElementTypeHandle(),
+ GetRank(),
+ ssBuf);
+ }
+ else if (!IsNilToken(GetCl()))
+ {
+ LPCUTF8 szNamespace;
+ LPCUTF8 szName;
+ IfFailThrow(GetMDImport()->GetNameOfTypeDef(GetCl(), &szName, &szNamespace));
+
+ ns::MakePath(ssBuf,
+ StackSString(SString::Utf8, szNamespace),
+ StackSString(SString::Utf8, szName));
+ }
+
+ return ssBuf;
+}
+
+//*******************************************************************************
+//
+// Gets the namespace and class name for the class. The namespace
+// can legitimately come back NULL, however a return value of NULL indicates
+// an error.
+//
+// NOTE: this used to return array class names, which were sometimes squirreled away by the
+// class loader hash table. It's been removed because it wasted space and was basically broken
+// in general (sometimes wasn't set, sometimes set wrong). If you need array class names,
+// use GetFullyQualifiedNameForClass instead.
+//
+LPCUTF8 MethodTable::GetFullyQualifiedNameInfo(LPCUTF8 *ppszNamespace)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END
+
+ if (IsArray())
+ {
+ *ppszNamespace = NULL;
+ return NULL;
+ }
+ else
+ {
+ LPCUTF8 szName;
+ if (FAILED(GetMDImport()->GetNameOfTypeDef(GetCl(), &szName, ppszNamespace)))
+ {
+ *ppszNamespace = NULL;
+ return NULL;
+ }
+ return szName;
+ }
+}
+
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_COMINTEROP
+
+//*******************************************************************************
+CorIfaceAttr MethodTable::GetComInterfaceType()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ // This should only be called on interfaces.
+ BAD_FORMAT_NOTHROW_ASSERT(IsInterface());
+
+ // Check to see if we have already determined the COM interface type
+ // of this interface.
+ CorIfaceAttr ItfType = GetClass()->GetComInterfaceType();
+
+ if (ItfType != (CorIfaceAttr)-1)
+ return ItfType;
+
+ if (IsProjectedFromWinRT())
+ {
+ // WinRT interfaces are always IInspectable-based
+ ItfType = ifInspectable;
+ }
+ else
+ {
+ // Retrieve the interface type from the metadata.
+ HRESULT hr = GetMDImport()->GetIfaceTypeOfTypeDef(GetCl(), (ULONG*)&ItfType);
+ IfFailThrow(hr);
+
+ if (hr != S_OK)
+ {
+ // if not found in metadata, use the default
+ ItfType = ifDual;
+ }
+ }
+
+ // Cache the interface type
+ g_IBCLogger.LogEEClassCOWTableAccess(this);
+ GetClass_NoLogging()->SetComInterfaceType(ItfType);
+
+ return ItfType;
+}
+
+#endif // FEATURE_COMINTEROP
+
+//*******************************************************************************
+void EEClass::GetBestFitMapping(MethodTable * pMT, BOOL *pfBestFitMapping, BOOL *pfThrowOnUnmappableChar)
+{
+ CONTRACTL
+ {
+ THROWS; // OOM only
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ EEClass * pClass = pMT->GetClass();
+
+ // lazy init
+ if (!(pClass->m_VMFlags & VMFLAG_BESTFITMAPPING_INITED))
+ {
+ *pfBestFitMapping = FALSE;
+ *pfThrowOnUnmappableChar = FALSE;
+
+ ReadBestFitCustomAttribute(pMT->GetMDImport(), pMT->GetCl(), pfBestFitMapping, pfThrowOnUnmappableChar);
+
+ DWORD flags = VMFLAG_BESTFITMAPPING_INITED;
+ if (*pfBestFitMapping) flags |= VMFLAG_BESTFITMAPPING;
+ if (*pfThrowOnUnmappableChar) flags |= VMFLAG_THROWONUNMAPPABLECHAR;
+
+ FastInterlockOr(EnsureWritablePages(&pClass->m_VMFlags), flags);
+ }
+ else
+ {
+ *pfBestFitMapping = (pClass->m_VMFlags & VMFLAG_BESTFITMAPPING);
+ *pfThrowOnUnmappableChar = (pClass->m_VMFlags & VMFLAG_THROWONUNMAPPABLECHAR);
+ }
+}
+
+#ifdef _DEBUG
+
+//*******************************************************************************
+void MethodTable::DebugRecursivelyDumpInstanceFields(LPCUTF8 pszClassName, BOOL debug)
+{
+ WRAPPER_NO_CONTRACT; // It's a dev helper, who cares about contracts
+
+ EX_TRY
+ {
+ StackSString ssBuff;
+
+ DWORD cParentInstanceFields;
+ DWORD i;
+
+ CONSISTENCY_CHECK(CheckLoadLevel(CLASS_LOAD_APPROXPARENTS));
+
+ MethodTable *pParentMT = GetParentMethodTable();
+ if (pParentMT != NULL)
+ {
+ cParentInstanceFields = pParentMT->GetClass()->GetNumInstanceFields();
+ DefineFullyQualifiedNameForClass();
+ LPCUTF8 name = GetFullyQualifiedNameForClass(pParentMT);
+ pParentMT->DebugRecursivelyDumpInstanceFields(name, debug);
+ }
+ else
+ {
+ cParentInstanceFields = 0;
+ }
+
+ // Are there any new instance fields declared by this class?
+ if (GetNumInstanceFields() > cParentInstanceFields)
+ {
+ // Display them
+ if(debug) {
+ ssBuff.Printf(W("%S:\n"), pszClassName);
+ WszOutputDebugString(ssBuff.GetUnicode());
+ }
+ else {
+ LOG((LF_CLASSLOADER, LL_ALWAYS, "%s:\n", pszClassName));
+ }
+
+ for (i = 0; i < (GetNumInstanceFields()-cParentInstanceFields); i++)
+ {
+ FieldDesc *pFD = &GetClass()->GetFieldDescList()[i];
+#ifdef DEBUG_LAYOUT
+ printf("offset %s%3d %s\n", pFD->IsByValue() ? "byvalue " : "", pFD->GetOffset_NoLogging(), pFD->GetName());
+#endif
+ if(debug) {
+ ssBuff.Printf(W("offset %3d %S\n"), pFD->GetOffset_NoLogging(), pFD->GetName());
+ WszOutputDebugString(ssBuff.GetUnicode());
+ }
+ else {
+ LOG((LF_CLASSLOADER, LL_ALWAYS, "offset %3d %s\n", pFD->GetOffset_NoLogging(), pFD->GetName()));
+ }
+ }
+ }
+ }
+ EX_CATCH
+ {
+ if(debug)
+ {
+ WszOutputDebugString(W("<Exception Thrown>\n"));
+ }
+ else
+ {
+ LOG((LF_CLASSLOADER, LL_ALWAYS, "<Exception Thrown>\n"));
+ }
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+//*******************************************************************************
+void MethodTable::DebugDumpFieldLayout(LPCUTF8 pszClassName, BOOL debug)
+{
+ WRAPPER_NO_CONTRACT; // It's a dev helper, who cares about contracts
+
+ if (GetNumStaticFields() == 0 && GetNumInstanceFields() == 0)
+ return;
+
+ EX_TRY
+ {
+ StackSString ssBuff;
+
+ DWORD i;
+ DWORD cParentInstanceFields;
+
+ CONSISTENCY_CHECK(CheckLoadLevel(CLASS_LOAD_APPROXPARENTS));
+
+ if (GetParentMethodTable() != NULL)
+ cParentInstanceFields = GetParentMethodTable()->GetNumInstanceFields();
+ else
+ {
+ cParentInstanceFields = 0;
+ }
+
+ if (debug)
+ {
+ ssBuff.Printf(W("Field layout for '%S':\n\n"), pszClassName);
+ WszOutputDebugString(ssBuff.GetUnicode());
+ }
+ else
+ {
+ //LF_ALWAYS allowed here because this is controlled by special env var ShouldDumpOnClassLoad
+ LOG((LF_ALWAYS, LL_ALWAYS, "Field layout for '%s':\n\n", pszClassName));
+ }
+
+ if (GetNumStaticFields() > 0)
+ {
+ if (debug)
+ {
+ WszOutputDebugString(W("Static fields (stored at vtable offsets)\n"));
+ WszOutputDebugString(W("----------------------------------------\n"));
+ }
+ else
+ {
+ //LF_ALWAYS allowed here because this is controlled by special env var ShouldDumpOnClassLoad
+ LOG((LF_ALWAYS, LL_ALWAYS, "Static fields (stored at vtable offsets)\n"));
+ LOG((LF_ALWAYS, LL_ALWAYS, "----------------------------------------\n"));
+ }
+
+ for (i = 0; i < GetNumStaticFields(); i++)
+ {
+ FieldDesc *pFD = GetClass()->GetFieldDescList() + ((GetNumInstanceFields()-cParentInstanceFields) + i);
+ if(debug) {
+ ssBuff.Printf(W("offset %3d %S\n"), pFD->GetOffset_NoLogging(), pFD->GetName());
+ WszOutputDebugString(ssBuff.GetUnicode());
+ }
+ else
+ {
+ //LF_ALWAYS allowed here because this is controlled by special env var ShouldDumpOnClassLoad
+ LOG((LF_ALWAYS, LL_ALWAYS, "offset %3d %s\n", pFD->GetOffset_NoLogging(), pFD->GetName()));
+ }
+ }
+ }
+
+ if (GetNumInstanceFields() > 0)
+ {
+ if (GetNumStaticFields()) {
+ if(debug) {
+ WszOutputDebugString(W("\n"));
+ }
+ else
+ {
+ //LF_ALWAYS allowed here because this is controlled by special env var ShouldDumpOnClassLoad
+ LOG((LF_ALWAYS, LL_ALWAYS, "\n"));
+ }
+ }
+
+ if (debug)
+ {
+ WszOutputDebugString(W("Instance fields\n"));
+ WszOutputDebugString(W("---------------\n"));
+ }
+ else
+ {
+ //LF_ALWAYS allowed here because this is controlled by special env var ShouldDumpOnClassLoad
+ LOG((LF_ALWAYS, LL_ALWAYS, "Instance fields\n"));
+ LOG((LF_ALWAYS, LL_ALWAYS, "---------------\n"));
+ }
+
+ DebugRecursivelyDumpInstanceFields(pszClassName, debug);
+ }
+
+ if (debug)
+ {
+ WszOutputDebugString(W("\n"));
+ }
+ else
+ {
+ //LF_ALWAYS allowed here because this is controlled by special env var ShouldDumpOnClassLoad
+ LOG((LF_ALWAYS, LL_ALWAYS, "\n"));
+ }
+ }
+ EX_CATCH
+ {
+ if (debug)
+ {
+ WszOutputDebugString(W("<Exception Thrown>\n"));
+ }
+ else
+ {
+ //LF_ALWAYS allowed here because this is controlled by special env var ShouldDumpOnClassLoad
+ LOG((LF_ALWAYS, LL_ALWAYS, "<Exception Thrown>\n"));
+ }
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+} // MethodTable::DebugDumpFieldLayout
+
+//*******************************************************************************
+void
+MethodTable::DebugDumpGCDesc(
+ LPCUTF8 pszClassName,
+ BOOL fDebug)
+{
+ WRAPPER_NO_CONTRACT; // It's a dev helper, who cares about contracts
+
+ EX_TRY
+ {
+ StackSString ssBuff;
+
+ if (fDebug)
+ {
+ ssBuff.Printf(W("GC description for '%S':\n\n"), pszClassName);
+ WszOutputDebugString(ssBuff.GetUnicode());
+ }
+ else
+ {
+ //LF_ALWAYS allowed here because this is controlled by special env var ShouldDumpOnClassLoad
+ LOG((LF_ALWAYS, LL_ALWAYS, "GC description for '%s':\n\n", pszClassName));
+ }
+
+ if (ContainsPointersOrCollectible())
+ {
+ CGCDescSeries *pSeries;
+ CGCDescSeries *pHighest;
+
+ if (fDebug)
+ {
+ WszOutputDebugString(W("GCDesc:\n"));
+ } else
+ {
+ //LF_ALWAYS allowed here because this is controlled by special env var ShouldDumpOnClassLoad
+ LOG((LF_ALWAYS, LL_ALWAYS, "GCDesc:\n"));
+ }
+
+ pSeries = CGCDesc::GetCGCDescFromMT(this)->GetLowestSeries();
+ pHighest = CGCDesc::GetCGCDescFromMT(this)->GetHighestSeries();
+
+ while (pSeries <= pHighest)
+ {
+ if (fDebug)
+ {
+ ssBuff.Printf(W(" offset %5d (%d w/o Object), size %5d (%5d w/o BaseSize subtr)\n"),
+ pSeries->GetSeriesOffset(),
+ pSeries->GetSeriesOffset() - sizeof(Object),
+ pSeries->GetSeriesSize(),
+ pSeries->GetSeriesSize() + GetBaseSize() );
+ WszOutputDebugString(ssBuff.GetUnicode());
+ }
+ else
+ {
+ //LF_ALWAYS allowed here because this is controlled by special env var ShouldDumpOnClassLoad
+ LOG((LF_ALWAYS, LL_ALWAYS, " offset %5d (%d w/o Object), size %5d (%5d w/o BaseSize subtr)\n",
+ pSeries->GetSeriesOffset(),
+ pSeries->GetSeriesOffset() - sizeof(Object),
+ pSeries->GetSeriesSize(),
+ pSeries->GetSeriesSize() + GetBaseSize()
+ ));
+ }
+ pSeries++;
+ }
+
+ if (fDebug)
+ {
+ WszOutputDebugString(W("\n"));
+ } else
+ {
+ //LF_ALWAYS allowed here because this is controlled by special env var ShouldDumpOnClassLoad
+ LOG((LF_ALWAYS, LL_ALWAYS, "\n"));
+ }
+ }
+ }
+ EX_CATCH
+ {
+ if (fDebug)
+ {
+ WszOutputDebugString(W("<Exception Thrown>\n"));
+ }
+ else
+ {
+ //LF_ALWAYS allowed here because this is controlled by special env var ShouldDumpOnClassLoad
+ LOG((LF_ALWAYS, LL_ALWAYS, "<Exception Thrown>\n"));
+ }
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+} // MethodTable::DebugDumpGCDesc
+
+#endif // _DEBUG
+
+#ifdef FEATURE_COMINTEROP
+//*******************************************************************************
+CorClassIfaceAttr MethodTable::GetComClassInterfaceType()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(!IsInterface());
+ }
+ CONTRACTL_END
+
+ // If the type is an open generic type, then it is considered ClassInterfaceType.None.
+ if (ContainsGenericVariables())
+ return clsIfNone;
+
+ // Classes that either have generic instantiations (G<int>) or derive from classes
+ // with generic instantiations (D : B<int>) are always considered ClassInterfaceType.None.
+ if (HasGenericClassInstantiationInHierarchy())
+ return clsIfNone;
+
+ // If the class does not support IClassX because it derives from or implements WinRT types,
+ // then it is considered ClassInterfaceType.None unless explicitly overriden by the CA
+ if (!ClassSupportsIClassX(this))
+ return clsIfNone;
+
+ return ReadClassInterfaceTypeCustomAttribute(TypeHandle(this));
+}
+#endif // FEATURE_COMINTEROP
+
+//---------------------------------------------------------------------------------------
+//
+Substitution
+MethodTable::GetSubstitutionForParent(
+ const Substitution * pSubst)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ mdToken crExtends;
+ DWORD dwAttrClass;
+
+ if (IsArray())
+ {
+ return Substitution(GetModule(), SigPointer(), pSubst);
+ }
+
+ IfFailThrow(GetMDImport()->GetTypeDefProps(
+ GetCl(),
+ &dwAttrClass,
+ &crExtends));
+
+ return Substitution(crExtends, GetModule(), pSubst);
+} // MethodTable::GetSubstitutionForParent
+
+#endif //!DACCESS_COMPILE
+
+//*******************************************************************************
+DWORD EEClass::GetReliabilityContract()
+{
+ LIMITED_METHOD_CONTRACT;
+ return HasOptionalFields() ? GetOptionalFields()->m_dwReliabilityContract : RC_NULL;
+}
+
+//*******************************************************************************
+#ifdef FEATURE_PREJIT
+DWORD EEClass::GetSize()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ // Total instance size consists of the fixed ("normal") fields, cached at construction time and dependent
+ // on whether we're a vanilla EEClass or DelegateEEClass etc., and a portion for the packed fields tacked on
+ // the end. The size of the packed fields can be retrieved from the fields themselves or, if we were
+ // unsuccessful in our attempts to compress the data, the full size of the EEClassPackedFields structure
+ // (which is essentially just a DWORD array of all the field values).
+ return m_cbFixedEEClassFields +
+ (m_fFieldsArePacked ? GetPackedFields()->GetPackedSize() : sizeof(EEClassPackedFields));
+}
+#endif // FEATURE_PREJIT
+
+#ifndef DACCESS_COMPILE
+#ifdef FEATURE_COMINTEROP
+
+//
+// Implementations of SparseVTableMap methods.
+//
+
+//*******************************************************************************
+SparseVTableMap::SparseVTableMap()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Note that this will also zero out all gaps. It is important for NGen determinism.
+ ZeroMemory(this, sizeof(*this));
+}
+
+//*******************************************************************************
+SparseVTableMap::~SparseVTableMap()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_MapList != NULL)
+ {
+ delete [] m_MapList;
+ m_MapList = NULL;
+ }
+}
+
+//*******************************************************************************
+// Allocate or expand the mapping list for a new entry.
+void SparseVTableMap::AllocOrExpand()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (m_MapEntries == m_Allocated) {
+
+ Entry *maplist = new Entry[m_Allocated + MapGrow];
+
+ if (m_MapList != NULL)
+ memcpy(maplist, m_MapList, m_MapEntries * sizeof(Entry));
+
+ m_Allocated += MapGrow;
+ delete [] m_MapList;
+ m_MapList = maplist;
+ }
+}
+
+//*******************************************************************************
+// While building mapping list, record a gap in VTable slot numbers.
+void SparseVTableMap::RecordGap(WORD StartMTSlot, WORD NumSkipSlots)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE((StartMTSlot == 0) || (StartMTSlot > m_MTSlot));
+ _ASSERTE(NumSkipSlots > 0);
+
+ // We use the information about the current gap to complete a map entry for
+ // the last non-gap. There is a special case where the vtable begins with a
+ // gap, so we don't have a non-gap to record.
+ if (StartMTSlot == 0) {
+ _ASSERTE((m_MTSlot == 0) && (m_VTSlot == 0));
+ m_VTSlot = NumSkipSlots;
+ return;
+ }
+
+ // We need an entry, allocate or expand the list as necessary.
+ AllocOrExpand();
+
+ // Update the list with an entry describing the last non-gap in vtable
+ // entries.
+ m_MapList[m_MapEntries].m_Start = m_MTSlot;
+ m_MapList[m_MapEntries].m_Span = StartMTSlot - m_MTSlot;
+ m_MapList[m_MapEntries].m_MapTo = m_VTSlot;
+
+ m_VTSlot += (StartMTSlot - m_MTSlot) + NumSkipSlots;
+ m_MTSlot = StartMTSlot;
+
+ m_MapEntries++;
+}
+
+//*******************************************************************************
+// Finish creation of mapping list.
+void SparseVTableMap::FinalizeMapping(WORD TotalMTSlots)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(TotalMTSlots >= m_MTSlot);
+
+ // If mapping ended with a gap, we have nothing else to record.
+ if (TotalMTSlots == m_MTSlot)
+ return;
+
+ // Allocate or expand the list as necessary.
+ AllocOrExpand();
+
+ // Update the list with an entry describing the last non-gap in vtable
+ // entries.
+ m_MapList[m_MapEntries].m_Start = m_MTSlot;
+ m_MapList[m_MapEntries].m_Span = TotalMTSlots - m_MTSlot;
+ m_MapList[m_MapEntries].m_MapTo = m_VTSlot;
+
+ // Update VT slot cursor, because we use it to determine total number of
+ // vtable slots for GetNumVirtuals
+ m_VTSlot += TotalMTSlots - m_MTSlot;
+
+ m_MapEntries++;
+}
+
+//*******************************************************************************
+// Lookup a VTable slot number from a method table slot number.
+WORD SparseVTableMap::LookupVTSlot(WORD MTSlot)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END
+
+ // As an optimization, check the last entry which yielded a correct result.
+ if ((MTSlot >= m_MapList[m_LastUsed].m_Start) &&
+ (MTSlot < (m_MapList[m_LastUsed].m_Start + m_MapList[m_LastUsed].m_Span)))
+ return (MTSlot - m_MapList[m_LastUsed].m_Start) + m_MapList[m_LastUsed].m_MapTo;
+
+ // Check all MT slots spans to see which one our input slot lies in.
+ for (WORD i = 0; i < m_MapEntries; i++) {
+ if ((MTSlot >= m_MapList[i].m_Start) &&
+ (MTSlot < (m_MapList[i].m_Start + m_MapList[i].m_Span))) {
+ m_LastUsed = i;
+ return (MTSlot - m_MapList[i].m_Start) + m_MapList[i].m_MapTo;
+ }
+ }
+
+ _ASSERTE(!"Invalid MethodTable slot");
+ return ~0;
+}
+
+//*******************************************************************************
+// Retrieve the number of slots in the vtable (both empty and full).
+WORD SparseVTableMap::GetNumVTableSlots()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_VTSlot;
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+//*******************************************************************************
+void SparseVTableMap::Save(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ image->StoreStructure(this, sizeof(SparseVTableMap),
+ DataImage::ITEM_SPARSE_VTABLE_MAP_TABLE);
+
+ // Trim unused portion of the table
+ m_Allocated = m_MapEntries;
+
+ image->StoreInternedStructure(m_MapList, m_Allocated * sizeof(Entry),
+ DataImage::ITEM_SPARSE_VTABLE_MAP_ENTRIES);
+}
+
+//*******************************************************************************
+void SparseVTableMap::Fixup(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ image->FixupPointerField(this, offsetof(SparseVTableMap, m_MapList));
+}
+#endif //FEATURE_NATIVE_IMAGE_GENERATION
+#endif //FEATURE_COMINTEROP
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+
+//*******************************************************************************
+void EEClass::Save(DataImage *image, MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(this == pMT->GetClass());
+ PRECONDITION(pMT->IsCanonicalMethodTable());
+ PRECONDITION(pMT->IsFullyLoaded());
+ PRECONDITION(!image->IsStored(this));
+ PRECONDITION(image->GetModule()->GetAssembly() ==
+ GetAppDomain()->ToCompilationDomain()->GetTargetAssembly());
+ }
+ CONTRACTL_END;
+
+ LOG((LF_ZAP, LL_INFO10000, "EEClass::Save %s (%p)\n", m_szDebugClassName, this));
+
+ // Optimize packable fields before saving into ngen image (the packable fields are located at the end of
+ // the EEClass or sub-type instance and packing will transform them into a space-efficient format which
+ // should reduce the result returned by the GetSize() call below). Packing will fail if the compression
+ // algorithm would result in an increase in size. We track this in the m_fFieldsArePacked data member
+ // which we use to determine whether to access the fields in their packed or unpacked format.
+ // Special case: we don't attempt to pack fields for the System.Threading.OverlappedData class since a
+ // host can change the size of this at runtime. This requires modifying one of the packable fields and we
+ // don't support updates to such fields if they were successfully packed.
+ if (g_pOverlappedDataClass == NULL)
+ {
+ g_pOverlappedDataClass = MscorlibBinder::GetClass(CLASS__OVERLAPPEDDATA);
+ _ASSERTE(g_pOverlappedDataClass);
+ }
+ if (this != g_pOverlappedDataClass->GetClass())
+ m_fFieldsArePacked = GetPackedFields()->PackFields();
+
+ DWORD cbSize = GetSize();
+
+ // ***************************************************************
+ // Only put new actions in this function if they really relate to EEClass
+ // rather than MethodTable. For example, if you need to allocate
+ // a per-type entry in some table in the NGEN image, then you will probably
+ // need to allocate one such entry per MethodTable, e.g. per generic
+ // instantiation. You probably don't want to allocate one that is common
+ // to a group of shared instantiations.
+ // ***************************************************************
+
+ DataImage::ItemKind item =
+ (!pMT->IsGenericTypeDefinition() && pMT->ContainsGenericVariables())
+ ? DataImage::ITEM_EECLASS_COLD
+ // Until we get all the access paths for generics tidied up, many paths touch the EEClass, e.g. GetInstantiation()
+ : pMT->HasInstantiation()
+ ? DataImage::ITEM_EECLASS_WARM
+ : DataImage::ITEM_EECLASS;
+
+ // Save optional fields if we have any.
+ if (HasOptionalFields())
+ image->StoreStructure(GetOptionalFields(),
+ sizeof(EEClassOptionalFields),
+ item);
+
+#ifdef _DEBUG
+ if (!image->IsStored(m_szDebugClassName))
+ image->StoreStructure(m_szDebugClassName, (ULONG)(strlen(m_szDebugClassName)+1),
+ DataImage::ITEM_DEBUG,
+ 1);
+#endif // _DEBUG
+
+#ifdef FEATURE_COMINTEROP
+ if (GetSparseCOMInteropVTableMap() != NULL)
+ GetSparseCOMInteropVTableMap()->Save(image);
+#endif // FEATURE_COMINTEROP
+
+ //
+ // Save MethodDescs
+ //
+
+ MethodDescChunk *chunk = GetChunks();
+ if (chunk != NULL)
+ {
+ MethodDesc::SaveChunk methodDescSaveChunk(image);
+
+ MethodTable::IntroducedMethodIterator it(pMT, TRUE);
+ for (; it.IsValid(); it.Next())
+ {
+ MethodDesc * pMD = it.GetMethodDesc();
+
+ // Do not save IL stubs that we have failed to generate code for
+ if (pMD->IsILStub() && image->GetCodeAddress(pMD) == NULL)
+ continue;
+
+ methodDescSaveChunk.Append(pMD);
+ }
+
+ ZapStoredStructure * pChunksNode = methodDescSaveChunk.Save();
+ if (pChunksNode != NULL)
+ image->BindPointer(chunk, pChunksNode, 0);
+
+ }
+
+ //
+ // Save FieldDescs
+ //
+
+ SIZE_T fieldCount = FieldDescListSize(pMT);
+
+ if (fieldCount != 0)
+ {
+ FieldDesc *pFDStart = GetFieldDescList();
+ FieldDesc *pFDEnd = pFDStart + fieldCount;
+
+ FieldDesc *pFD = pFDStart;
+ while (pFD < pFDEnd)
+ {
+ pFD->PrecomputeNameHash();
+ pFD++;
+ }
+
+ ZapStoredStructure * pFDNode = image->StoreStructure(pFDStart, (ULONG)(fieldCount * sizeof(FieldDesc)),
+ DataImage::ITEM_FIELD_DESC_LIST);
+
+ pFD = pFDStart;
+ while (pFD < pFDEnd)
+ {
+ pFD->SaveContents(image);
+ if (pFD != pFDStart)
+ image->BindPointer(pFD, pFDNode, (BYTE *)pFD - (BYTE *)pFDStart);
+ pFD++;
+ }
+ }
+
+ //
+ // Save MethodDescs
+ //
+
+ if (HasLayout())
+ {
+ EEClassLayoutInfo *pInfo = &((LayoutEEClass*)this)->m_LayoutInfo;
+
+ if (pInfo->m_numCTMFields > 0)
+ {
+ ZapStoredStructure * pNode = image->StoreStructure(pInfo->m_pFieldMarshalers,
+ pInfo->m_numCTMFields * MAXFIELDMARSHALERSIZE,
+ DataImage::ITEM_FIELD_MARSHALERS);
+
+ for (UINT iField = 0; iField < pInfo->m_numCTMFields; iField++)
+ {
+ FieldMarshaler *pFM = (FieldMarshaler*)((BYTE *)pInfo->m_pFieldMarshalers + iField * MAXFIELDMARSHALERSIZE);
+ pFM->Save(image);
+
+ if (iField > 0)
+ image->BindPointer(pFM, pNode, iField * MAXFIELDMARSHALERSIZE);
+ }
+ }
+ }
+
+ // Save dictionary layout information
+ DictionaryLayout *pDictLayout = GetDictionaryLayout();
+ if (pMT->IsSharedByGenericInstantiations() && pDictLayout != NULL)
+ {
+ pDictLayout->Save(image);
+ LOG((LF_ZAP, LL_INFO10000, "ZAP: dictionary for %s has %d slots used out of possible %d\n", m_szDebugClassName,
+ pDictLayout->GetNumUsedSlots(), pDictLayout->GetMaxSlots()));
+ }
+
+ if (GetVarianceInfo() != NULL)
+ image->StoreInternedStructure(GetVarianceInfo(),
+ pMT->GetNumGenericArgs(),
+ DataImage::ITEM_CLASS_VARIANCE_INFO);
+
+ image->StoreStructure(this, cbSize, item);
+
+ if (pMT->IsInterface())
+ {
+ // Make sure our guid is computed
+
+#ifdef FEATURE_COMINTEROP
+ // Generic WinRT types can have their GUID computed only if the instantiation is WinRT-legal
+ if (!pMT->IsProjectedFromWinRT() ||
+ !pMT->SupportsGenericInterop(TypeHandle::Interop_NativeToManaged) ||
+ pMT->IsLegalNonArrayWinRTType())
+#endif // FEATURE_COMINTEROP
+ {
+ GUID dummy;
+ if (SUCCEEDED(pMT->GetGuidNoThrow(&dummy, TRUE, FALSE)))
+ {
+ GuidInfo* pGuidInfo = pMT->GetGuidInfo();
+ _ASSERTE(pGuidInfo != NULL);
+
+ image->StoreStructure(pGuidInfo, sizeof(GuidInfo),
+ DataImage::ITEM_GUID_INFO);
+
+#ifdef FEATURE_COMINTEROP
+ if (pMT->IsLegalNonArrayWinRTType())
+ {
+ Module *pModule = pMT->GetModule();
+ if (pModule->CanCacheWinRTTypeByGuid(pMT))
+ {
+ pModule->CacheWinRTTypeByGuid(pMT, pGuidInfo);
+ }
+ }
+#endif // FEATURE_COMINTEROP
+ }
+ else
+ {
+ // make sure we don't store a GUID_NULL guid in the NGEN image
+ // instead we'll compute the GUID at runtime, and throw, if appropriate
+ m_pGuidInfo = NULL;
+ }
+ }
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (IsDelegate())
+ {
+ DelegateEEClass *pDelegateClass = (DelegateEEClass *)this;
+ ComPlusCallInfo *pComInfo = pDelegateClass->m_pComPlusCallInfo;
+
+ if (pComInfo != NULL && pComInfo->ShouldSave(image))
+ {
+ image->StoreStructure(pDelegateClass->m_pComPlusCallInfo,
+ sizeof(ComPlusCallInfo),
+ item);
+ }
+ }
+#endif // FEATURE_COMINTEROP
+
+ LOG((LF_ZAP, LL_INFO10000, "EEClass::Save %s (%p) complete.\n", m_szDebugClassName, this));
+}
+
+//*******************************************************************************
+DWORD EEClass::FieldDescListSize(MethodTable * pMT)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ EEClass * pClass = pMT->GetClass();
+ DWORD fieldCount = pClass->GetNumInstanceFields() + pClass->GetNumStaticFields();
+
+ MethodTable * pParentMT = pMT->GetParentMethodTable();
+ if (pParentMT != NULL)
+ fieldCount -= pParentMT->GetNumInstanceFields();
+ return fieldCount;
+}
+
+//*******************************************************************************
+void EEClass::Fixup(DataImage *image, MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(this == pMT->GetClass());
+ PRECONDITION(pMT->IsCanonicalMethodTable());
+ PRECONDITION(pMT->IsFullyLoaded());
+ PRECONDITION(image->IsStored(this));
+ }
+ CONTRACTL_END;
+
+ LOG((LF_ZAP, LL_INFO10000, "EEClass::Fixup %s (%p)\n", GetDebugClassName(), this));
+
+ // Fixup pointer to optional fields if this class has any. This pointer is a relative pointer (to avoid
+ // the need for base relocation fixups) and thus needs to use the IMAGE_REL_BASED_RELPTR fixup type.
+ if (HasOptionalFields())
+ image->FixupRelativePointerField(this, offsetof(EEClass, m_rpOptionalFields));
+
+#ifdef _DEBUG
+ image->FixupPointerField(this, offsetof(EEClass, m_szDebugClassName));
+#endif
+
+#ifdef FEATURE_COMINTEROP
+ if (GetSparseCOMInteropVTableMap() != NULL)
+ {
+ image->FixupPointerField(GetOptionalFields(), offsetof(EEClassOptionalFields, m_pSparseVTableMap));
+ GetSparseCOMInteropVTableMap()->Fixup(image);
+ }
+#endif // FEATURE_COMINTEROP
+
+ DictionaryLayout *pDictLayout = GetDictionaryLayout();
+ if (pDictLayout != NULL)
+ {
+ pDictLayout->Fixup(image, FALSE);
+ image->FixupPointerField(GetOptionalFields(), offsetof(EEClassOptionalFields, m_pDictLayout));
+ }
+
+ if (HasOptionalFields())
+ image->FixupPointerField(GetOptionalFields(), offsetof(EEClassOptionalFields, m_pVarianceInfo));
+
+ //
+ // We pass in the method table, because some classes (e.g. remoting proxy)
+ // have fake method tables set up in them & we want to restore the regular
+ // one.
+ //
+ image->FixupField(this, offsetof(EEClass, m_pMethodTable), pMT);
+
+ //
+ // Fixup MethodDescChunk and MethodDescs
+ //
+ MethodDescChunk* pChunks = GetChunks();
+
+ if (pChunks!= NULL && image->IsStored(pChunks))
+ {
+ image->FixupRelativePointerField(this, offsetof(EEClass, m_pChunks));
+
+ MethodTable::IntroducedMethodIterator it(pMT, TRUE);
+ for (; it.IsValid(); it.Next())
+ {
+ MethodDesc * pMD = it.GetMethodDesc();
+
+ // Skip IL stubs that were not saved into the image
+ if (pMD->IsILStub() && !image->IsStored(pMD))
+ continue;
+
+ it.GetMethodDesc()->Fixup(image);
+ }
+
+ }
+ else
+ {
+ image->ZeroPointerField(this, offsetof(EEClass, m_pChunks));
+ }
+
+ //
+ // Fixup FieldDescs
+ //
+
+ SIZE_T fieldCount = FieldDescListSize(pMT);
+
+ if (fieldCount != 0)
+ {
+ image->FixupRelativePointerField(this, offsetof(EEClass, m_pFieldDescList));
+
+ FieldDesc *pField = GetFieldDescList();
+ FieldDesc *pFieldEnd = pField + fieldCount;
+ while (pField < pFieldEnd)
+ {
+ pField->Fixup(image);
+ pField++;
+ }
+ }
+
+#ifdef FEATURE_COMINTEROP
+ // These fields will be lazy inited if we zero them
+ if (HasOptionalFields())
+ image->ZeroPointerField(GetOptionalFields(), offsetof(EEClassOptionalFields, m_pCoClassForIntf));
+#ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+ if (HasOptionalFields())
+ image->ZeroPointerField(GetOptionalFields(), offsetof(EEClassOptionalFields, m_pClassFactory));
+#endif
+ image->ZeroPointerField(this, offsetof(EEClass, m_pccwTemplate));
+#endif // FEATURE_COMINTEROP
+
+ if (HasLayout())
+ {
+ image->FixupPointerField(this, offsetof(LayoutEEClass, m_LayoutInfo.m_pFieldMarshalers));
+
+ EEClassLayoutInfo *pInfo = &((LayoutEEClass*)this)->m_LayoutInfo;
+
+ FieldMarshaler *pFM = pInfo->m_pFieldMarshalers;
+ FieldMarshaler *pFMEnd = (FieldMarshaler*) ((BYTE *)pFM + pInfo->m_numCTMFields*MAXFIELDMARSHALERSIZE);
+ while (pFM < pFMEnd)
+ {
+ pFM->Fixup(image);
+ ((BYTE*&)pFM) += MAXFIELDMARSHALERSIZE;
+ }
+ }
+ else if (IsDelegate())
+ {
+ image->FixupPointerField(this, offsetof(DelegateEEClass, m_pInvokeMethod));
+ image->FixupPointerField(this, offsetof(DelegateEEClass, m_pBeginInvokeMethod));
+ image->FixupPointerField(this, offsetof(DelegateEEClass, m_pEndInvokeMethod));
+
+ image->ZeroPointerField(this, offsetof(DelegateEEClass, m_pUMThunkMarshInfo));
+ image->ZeroPointerField(this, offsetof(DelegateEEClass, m_pStaticCallStub));
+ image->ZeroPointerField(this, offsetof(DelegateEEClass, m_pMultiCastInvokeStub));
+ image->ZeroPointerField(this, offsetof(DelegateEEClass, m_pMarshalStub));
+
+#ifdef FEATURE_COMINTEROP
+ DelegateEEClass *pDelegateClass = (DelegateEEClass *)this;
+ ComPlusCallInfo *pComInfo = pDelegateClass->m_pComPlusCallInfo;
+
+ if (image->IsStored(pComInfo))
+ {
+ image->FixupPointerField(this, offsetof(DelegateEEClass, m_pComPlusCallInfo));
+ pComInfo->Fixup(image);
+ }
+ else
+ {
+ image->ZeroPointerField(this, offsetof(DelegateEEClass, m_pComPlusCallInfo));
+ }
+#endif // FEATURE_COMINTEROP
+
+ image->FixupPointerField(this, offsetof(DelegateEEClass, m_pForwardStubMD));
+ image->FixupPointerField(this, offsetof(DelegateEEClass, m_pReverseStubMD));
+ }
+
+ //
+ // This field must be initialized at
+ // load time
+ //
+
+ if (IsInterface() && GetGuidInfo() != NULL)
+ image->FixupPointerField(this, offsetof(EEClass, m_pGuidInfo));
+ else
+ image->ZeroPointerField(this, offsetof(EEClass, m_pGuidInfo));
+
+ LOG((LF_ZAP, LL_INFO10000, "EEClass::Fixup %s (%p) complete.\n", GetDebugClassName(), this));
+}
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+
+//*******************************************************************************
+void EEClass::AddChunk (MethodDescChunk* pNewChunk)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ _ASSERTE(pNewChunk->GetNextChunk() == NULL);
+ pNewChunk->SetNextChunk(GetChunks());
+ SetChunks(pNewChunk);
+}
+
+//*******************************************************************************
+void EEClass::AddChunkIfItHasNotBeenAdded (MethodDescChunk* pNewChunk)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ // return if the chunk has been added
+ if (pNewChunk->GetNextChunk() != NULL)
+ return;
+
+ // even if pNewChunk->GetNextChunk() is NULL, this may still be the first chunk we added
+ // (last in the list) so find the end of the list and verify that
+ MethodDescChunk *chunk = GetChunks();
+ if (chunk != NULL)
+ {
+ while (chunk->GetNextChunk() != NULL)
+ chunk = chunk->GetNextChunk();
+
+ if (chunk == pNewChunk)
+ return;
+ }
+
+ pNewChunk->SetNextChunk(GetChunks());
+ SetChunks(pNewChunk);
+}
+
+#endif // !DACCESS_COMPILE
+
+//*******************************************************************************
+// ApproxFieldDescIterator is used to iterate over fields in a given class.
+// It does not includes EnC fields, and not inherited fields.
+// <NICE> ApproxFieldDescIterator is only used to iterate over static fields in one place,
+// and this will probably change anyway. After
+// we clean this up we should make ApproxFieldDescIterator work
+// over instance fields only </NICE>
+ApproxFieldDescIterator::ApproxFieldDescIterator()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ m_iteratorType = 0;
+ m_pFieldDescList = NULL;
+ m_currField = -1;
+ m_totalFields = 0;
+}
+
+//*******************************************************************************
+void ApproxFieldDescIterator::Init(MethodTable *pMT, int iteratorType)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ m_iteratorType = iteratorType;
+ m_pFieldDescList = pMT->GetApproxFieldDescListRaw();
+ m_currField = -1;
+
+ // This gets non-EnC fields.
+ m_totalFields = pMT->GetNumIntroducedInstanceFields();
+
+ if (!(iteratorType & (int)INSTANCE_FIELDS))
+ {
+ // if not handling instances then skip them by setting curr to last one
+ m_currField = m_totalFields - 1;
+ }
+
+ if (iteratorType & (int)STATIC_FIELDS)
+ {
+ m_totalFields += pMT->GetNumStaticFields();
+ }
+}
+
+//*******************************************************************************
+PTR_FieldDesc ApproxFieldDescIterator::Next()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ // This will iterate through all non-inherited and non-EnC fields.
+ ++m_currField;
+ if (m_currField >= m_totalFields)
+ {
+ return NULL;
+ }
+
+ return m_pFieldDescList + m_currField;
+}
+
+//*******************************************************************************
+bool
+DeepFieldDescIterator::NextClass()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (m_curClass <= 0)
+ {
+ return false;
+ }
+
+ if (m_numClasses <= 0) {
+ _ASSERTE(m_numClasses > 0);
+ return false;
+ }
+
+ MethodTable * pMT;
+
+ //
+ // If we're in the cache just grab the cache entry.
+ //
+ // If we're deeper in the hierarchy than the
+ // portion we cached we need to take the
+ // deepest cache entry and search down manually.
+ //
+
+ if (--m_curClass < m_numClasses)
+ {
+ pMT = m_classes[m_curClass];
+ }
+ else
+ {
+ pMT = m_classes[m_numClasses - 1];
+ int depthDiff = m_curClass - m_numClasses + 1;
+ while (depthDiff--)
+ {
+ pMT = pMT->GetParentMethodTable();
+ }
+ }
+
+ m_fieldIter.Init(pMT, m_fieldIter.GetIteratorType());
+ return true;
+}
+
+//*******************************************************************************
+void
+DeepFieldDescIterator::Init(MethodTable* pMT, int iteratorType,
+ bool includeParents)
+{
+ WRAPPER_NO_CONTRACT;
+
+ MethodTable * lastClass = NULL;
+ int numClasses;
+
+ //
+ // Walk up the parent chain, collecting
+ // parent pointers and counting fields.
+ //
+
+ numClasses = 0;
+ m_numClasses = 0;
+ m_deepTotalFields = 0;
+ m_lastNextFromParentClass = false;
+
+ while (pMT)
+ {
+ if (m_numClasses < (int)NumItems(m_classes))
+ {
+ m_classes[m_numClasses++] = pMT;
+ }
+
+ if ((iteratorType & ApproxFieldDescIterator::INSTANCE_FIELDS) != 0)
+ {
+ m_deepTotalFields += pMT->GetNumIntroducedInstanceFields();
+ }
+ if ((iteratorType & ApproxFieldDescIterator::STATIC_FIELDS) != 0)
+ {
+ m_deepTotalFields += pMT->GetNumStaticFields();
+ }
+
+ numClasses++;
+ lastClass = pMT;
+
+ if (includeParents)
+ {
+ pMT = pMT->GetParentMethodTable();
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ // Start the per-class field iterator on the base-most parent.
+ if (numClasses)
+ {
+ m_curClass = numClasses - 1;
+ m_fieldIter.Init(lastClass, iteratorType);
+ }
+ else
+ {
+ m_curClass = 0;
+ }
+}
+
+//*******************************************************************************
+FieldDesc*
+DeepFieldDescIterator::Next()
+{
+ WRAPPER_NO_CONTRACT;
+
+ FieldDesc* field;
+
+ do
+ {
+ m_lastNextFromParentClass = m_curClass > 0;
+
+ field = m_fieldIter.Next();
+
+ if (!field && !NextClass())
+ {
+ return NULL;
+ }
+ }
+ while (!field);
+
+ return field;
+}
+
+//*******************************************************************************
+bool
+DeepFieldDescIterator::Skip(int numSkip)
+{
+ WRAPPER_NO_CONTRACT;
+
+ while (numSkip >= m_fieldIter.CountRemaining())
+ {
+ numSkip -= m_fieldIter.CountRemaining();
+
+ if (!NextClass())
+ {
+ return false;
+ }
+ }
+
+ while (numSkip--)
+ {
+ m_fieldIter.Next();
+ }
+
+ return true;
+}
+
+#ifdef DACCESS_COMPILE
+
+//*******************************************************************************
+void
+EEClass::EnumMemoryRegions(CLRDataEnumMemoryFlags flags, MethodTable * pMT)
+{
+ SUPPORTS_DAC;
+ DAC_ENUM_DTHIS();
+ EMEM_OUT(("MEM: %p EEClass\n", dac_cast<TADDR>(this)));
+
+ // The DAC_ENUM_DTHIS above won't have reported the packed fields tacked on the end of this instance (they
+ // aren't part of the static class definition because the fields are variably sized and thus have to come
+ // right at the end of the structure, even for sub-types such as LayoutEEClass or DelegateEEClass).
+ DacEnumMemoryRegion(dac_cast<TADDR>(GetPackedFields()), sizeof(EEClassPackedFields));
+
+ if (HasOptionalFields())
+ DacEnumMemoryRegion(dac_cast<TADDR>(GetOptionalFields()), sizeof(EEClassOptionalFields));
+
+ if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE)
+ {
+ PTR_Module pModule = pMT->GetModule();
+ if (pModule.IsValid())
+ {
+ pModule->EnumMemoryRegions(flags, true);
+ }
+ PTR_MethodDescChunk chunk = GetChunks();
+ while (chunk.IsValid())
+ {
+ chunk->EnumMemoryRegions(flags);
+ chunk = chunk->GetNextChunk();
+ }
+ }
+
+ PTR_FieldDesc pFieldDescList = GetFieldDescList();
+ if (pFieldDescList.IsValid())
+ {
+ // add one to make sos's code happy.
+ DacEnumMemoryRegion(dac_cast<TADDR>(pFieldDescList),
+ (pMT->GetNumIntroducedInstanceFields() +
+ GetNumStaticFields() + 1) *
+ sizeof(FieldDesc));
+ }
+
+}
+
+#endif // DACCESS_COMPILE
+
+// Get pointer to the packed fields structure attached to this instance.
+PTR_EEClassPackedFields EEClass::GetPackedFields()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return dac_cast<PTR_EEClassPackedFields>(PTR_HOST_TO_TADDR(this) + m_cbFixedEEClassFields);
+}
+
+// Get the value of the given field. Works regardless of whether the field is currently in its packed or
+// unpacked state.
+DWORD EEClass::GetPackableField(EEClassFieldId eField)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ return m_fFieldsArePacked ?
+ GetPackedFields()->GetPackedField(eField) :
+ GetPackedFields()->GetUnpackedField(eField);
+}
+
+// Set the value of the given field. The field *must* be in the unpacked state for this to be legal (in
+// practice all packable fields must be initialized during class construction and from then on remain
+// immutable).
+void EEClass::SetPackableField(EEClassFieldId eField, DWORD dwValue)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!m_fFieldsArePacked);
+ GetPackedFields()->SetUnpackedField(eField, dwValue);
+}
+
+#ifndef DACCESS_COMPILE
+#ifdef MDIL
+//-------------------------------------------------------------------------------
+void EEClass::WriteCompactLayout(ICompactLayoutWriter *pICLW, ZapImage *pZapImage)
+{
+ STANDARD_VM_CONTRACT;
+
+ EX_TRY
+ {
+ IfFailThrow(WriteCompactLayoutHelper(pICLW));
+ }
+ EX_CATCH
+ {
+ // This catch will prevent type load/assembly load failures that occur during CTL generation to
+ // not bring down the MDIL generation phase.
+ SString message;
+ GET_EXCEPTION()->GetMessage(message);
+ GetSvcLogger()->Printf(LogLevel_Warning, W("%s while generating CTL for typedef 0x%x\n"), message.GetUnicode(), GetMethodTable()->GetCl());
+ }
+ EX_END_CATCH(RethrowCorruptingExceptions)
+}
+
+//-------------------------------------------------------------------------------
+HRESULT EEClass::WriteCompactLayoutHelper(ICompactLayoutWriter *pICLW)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+ MethodTable * pMT = GetMethodTable();
+ Module *pModule = pMT->GetModule();
+ IMDInternalImport *pMDImport = pModule->GetMDImport();
+
+ // Prepare the CTL writer for writing a type
+ pICLW->Reset();
+
+ //
+ // Gather high level information about the type: flags, and tokens for
+ // the type, it's base, and it's enclosing type (if any).
+ //
+
+ DWORD flags = 0;
+ mdToken tkType = pMT->GetCl();
+ mdToken tkBaseType = mdTokenNil;
+ pMDImport->GetTypeDefProps(tkType, &flags, &tkBaseType);
+
+ mdTypeDef tkEnclosingType = mdTokenNil;
+ pMDImport->GetNestedClassProps(tkType, &tkEnclosingType);
+
+ //
+ // Get the count for the number of interfaces from metadata
+ //
+
+ HENUMInternalHolder hEnumInterfaceImpl(pMDImport);
+ hEnumInterfaceImpl.EnumInit(mdtInterfaceImpl, tkType);
+ DWORD interfaceCount = hEnumInterfaceImpl.EnumGetCount();
+
+ //
+ // Get the count of fields introduced by this type.
+ //
+
+ DWORD fieldCount = pMT->GetNumIntroducedInstanceFields() + GetNumStaticFields();
+
+ //
+ // Count the total number of declared methods for this class
+ //
+
+ DWORD declaredMethodCount = 0;
+ DWORD unboxingStubCount = 0;
+ DWORD declaredVirtualMethodCount = 0;
+ { // If this in any way proves to be a speed issue it could
+ // be done more efficiently by just iterating the MethodDescChunks
+ // and just adding the counts of each chunk together. For now this
+ // is the preferred abstraction to use.
+ MethodTable::IntroducedMethodIterator it(GetMethodTable());
+ for (; it.IsValid(); it.Next())
+ {
+ MethodDesc *pMD = it.GetMethodDesc();
+
+ // unboxing stubs need to be handled specially
+ // we don't want to report them - the fact that they are
+ // in the method table and have method descs is a CLR
+ // implementation detail.
+ // however, we need to know their number so we can correct
+ // internal counts that include them
+ if (pMD->IsUnboxingStub())
+ ++unboxingStubCount;
+ else
+ {
+ if (pMD->IsVirtual())
+ declaredVirtualMethodCount++;
+ ++declaredMethodCount;
+ }
+ }
+ }
+
+ //
+ // Calculate how many virtual methods contribute to overrides and how
+ // many contribute to new slots
+ //
+
+ DWORD nonVirtualMethodCount = pMT->GetNumMethods() - unboxingStubCount - pMT->GetNumVirtuals();
+ DWORD newVirtualMethodCount = pMT->GetNumVirtuals() - pMT->GetNumParentVirtuals();
+ if (newVirtualMethodCount > declaredVirtualMethodCount)
+ {
+ // this should only happen for transparent proxy, which has special rules
+ _ASSERTE(pMT->IsTransparentProxy());
+ newVirtualMethodCount = declaredVirtualMethodCount;
+ }
+ DWORD overrideVirtualMethodCount = declaredMethodCount - nonVirtualMethodCount - newVirtualMethodCount;
+ if (overrideVirtualMethodCount > declaredVirtualMethodCount)
+ {
+ // this should only happen for transparent proxy, which has special rules
+ _ASSERTE(pMT->IsTransparentProxy());
+ overrideVirtualMethodCount = declaredVirtualMethodCount;
+ }
+
+ //
+ // Generic types are prefixed by their number of type arguments
+ if (pMT->HasInstantiation())
+ {
+ pICLW->GenericType(pMT->GetNumGenericArgs());
+ Instantiation inst = GetMethodTable()->GetInstantiation();
+ BYTE *varianceInfo = GetVarianceInfo();
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ CorGenericParamAttr flags = GetVarianceOfTypeParameter(varianceInfo, i);
+ pICLW->GenericParameter(inst[i].AsGenericVariable()->GetToken(), flags);
+ }
+ }
+
+ _ASSERTE((pMT == GetMethodTable()));
+ if (GetMethodTable()->IsComObjectType())
+ {
+// printf("Com object type: %08x\n", tkType);
+ flags |= ICompactLayoutWriter::CF_COMOBJECTTYPE;
+ }
+
+ if (IsEquivalentType())
+ {
+ flags |= ICompactLayoutWriter::CF_TYPE_EQUIVALENT;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (IsComClassInterface())
+ {
+// printf("Com class interface type: %08x\n", tkType);
+ flags |= ICompactLayoutWriter::CF_COMCLASSINTERFACE;
+ }
+
+ if (IsComEventItfType())
+ {
+// printf("Com event interface type: %08x\n", tkType);
+ flags |= ICompactLayoutWriter::CF_COMEVENTINTERFACE;
+ }
+#endif // FEATURE_COMINTEROP
+
+ if (GetMethodTable()->HasFixedAddressVTStatics())
+ {
+ flags |= ICompactLayoutWriter::CF_FIXED_ADDRESS_VT_STATICS;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (IsInterface())
+ {
+ switch (GetMethodTable()->GetComInterfaceType())
+ {
+ case ifDual: flags |= ICompactLayoutWriter::CF_DUAL; break;
+ case ifVtable: flags |= ICompactLayoutWriter::CF_VTABLE; break;
+ case ifDispatch: flags |= ICompactLayoutWriter::CF_DISPATCH; break;
+ case ifInspectable: flags |= ICompactLayoutWriter::CF_INSPECTABLE; break;
+ default: (!"assert unexpected com interface type"); break;
+ }
+ }
+#endif // FEATURE_COMINTEROP
+
+ if (GetMethodTable()->DependsOnEquivalentOrForwardedStructs())
+ {
+ flags |= ICompactLayoutWriter::CF_DEPENDS_ON_COM_IMPORT_STRUCTS;
+ }
+
+ if (GetMethodTable()->HasFinalizer())
+ {
+ _ASSERTE(!IsInterface());
+ flags |= ICompactLayoutWriter::CF_FINALIZER;
+ if (GetMethodTable()->HasCriticalFinalizer())
+ flags |= ICompactLayoutWriter::CF_CRITICALFINALIZER;
+ }
+
+
+ // Force computation of transparency bits into EEClass->m_VMFlags
+ Security::IsTypeTransparent(GetMethodTable());
+
+ if ((m_VMFlags & VMFLAG_TRANSPARENCY_MASK) == VMFLAG_TRANSPARENCY_UNKNOWN)
+ printf("Transparency unknown of type: %08x unknown?????\n", tkType);
+
+
+ if (m_VMFlags & VMFLAG_CONTAINS_STACK_PTR)
+ flags |= ICompactLayoutWriter::CF_CONTAINS_STACK_PTR;
+
+ // If the class is marked as unsafe value class we need to filter out those classes
+ // that get marked only "by inheritance" (they contain a field of a type that is marked).
+ // In CTL we will mark only the classes that are marked expicitly via a custom attribute.
+ // The binder will propagate this state again during field layout - thereby avoiding
+ // potentially stale bits.
+
+ // Check that this bit is not already used by somebody else
+ _ASSERTE((flags & ICompactLayoutWriter::CF_UNSAFEVALUETYPE) == 0);
+
+ if (IsUnsafeValueClass())
+ {
+ // If the class is marked as unsafe value class we need to filter out those classes
+ // that get the mark only "by inheritance". In CTL we will mark only the classes
+ // that are marked expicitly in meta-data.
+
+ //printf("%s ", IsMdPublic(flags) ? "Public" : "Intern");
+ //printf("Type 0x%08X is unsafe valuetype", tkType);
+
+ HRESULT hr = pMT->GetMDImport()->GetCustomAttributeByName(tkType,
+ g_CompilerServicesUnsafeValueTypeAttribute,
+ NULL, NULL);
+ IfFailThrow(hr);
+ if (hr == S_OK)
+ {
+ //printf(" (directly marked)", tkType);
+ flags |= ICompactLayoutWriter::CF_UNSAFEVALUETYPE;
+ }
+ //printf("\n");
+ }
+
+ //
+ // Now have enough information to start serializing the type.
+ //
+
+ pICLW->StartType(flags, // CorTypeAttr plus perhaps other flags
+ tkType, // typedef token for this type
+ tkBaseType, // type this type is derived from, if any
+ tkEnclosingType, // type this type is nested in, if any
+ interfaceCount, // how many times ImplementInterface() will be called
+ fieldCount, // how many times Field() will be called
+ declaredMethodCount, // how many times Method() will be called
+ newVirtualMethodCount, // how many new virtuals this type defines
+ overrideVirtualMethodCount );
+
+ DWORD dwPackSize;
+ hr = pMDImport->GetClassPackSize(GetMethodTable()->GetCl(), &dwPackSize);
+ if (!FAILED(hr) && dwPackSize != 0)
+ {
+ _ASSERTE(dwPackSize == 1 || dwPackSize == 2 || dwPackSize == 4 || dwPackSize == 8 || dwPackSize == 16 || dwPackSize == 32 || dwPackSize == 64 || dwPackSize == 128);
+ pICLW->PackType(dwPackSize);
+ }
+
+ IfFailRet(WriteCompactLayoutTypeFlags(pICLW));
+ IfFailRet(WriteCompactLayoutSpecialType(pICLW));
+
+ if (IsInterface() && !HasNoGuid())
+ {
+ GUID guid;
+ GetMethodTable()->GetGuid(&guid, TRUE);
+ GuidInfo *guidInfo = GetGuidInfo();
+ if (guidInfo != NULL)
+ pICLW->GuidInformation(guidInfo);
+ }
+
+ IfFailRet(WriteCompactLayoutFields(pICLW));
+
+ IfFailRet(WriteCompactLayoutMethods(pICLW));
+ IfFailRet(WriteCompactLayoutMethodImpls(pICLW));
+
+ IfFailRet(WriteCompactLayoutInterfaces(pICLW));
+ IfFailRet(WriteCompactLayoutInterfaceImpls(pICLW));
+
+
+ pICLW->EndType();
+
+ return hr;
+}
+
+//-------------------------------------------------------------------------------
+HRESULT EEClass::WriteCompactLayoutTypeFlags(ICompactLayoutWriter *pICLW)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ DWORD flags = m_VMFlags & VMFLAG_TRANSPARENCY_MASK;
+ DWORD extendedTypeFlags = 0;
+ bool needsExtendedTypeFlagsOutput = false;
+
+ if (flags != VMFLAG_TRANSPARENCY_TRANSPARENT)
+ {
+ _ASSERTE((VMFLAG_TRANSPARENCY_MASK == 0x1C));
+ flags = (flags >> 2);
+ extendedTypeFlags |= flags;
+ needsExtendedTypeFlagsOutput = true;
+ }
+ else
+ {
+ extendedTypeFlags |= EXTENDED_TYPE_FLAG_SF_TRANSPARENT;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ // Handle EXTENDED_TYPE_FLAG_PLATFORM_NEEDS_PER_TYPE_RCW_DATA
+ // This flag should only be set for platform types (In Windows.winmd, and in mscorlib/system.dll)
+ bool fBinderHandledNeedsPerTypeRCWDataCase = IsInterface() && GetMethodTable()->GetModule()->GetAssembly()->IsWinMD() && (GetVarianceInfo() != NULL);
+
+ if (!fBinderHandledNeedsPerTypeRCWDataCase && GetMethodTable()->HasRCWPerTypeData())
+ {
+ // This should only happen for runtime components that ship in box. Assert that this is the case. The flag is not a versionable flag.
+
+ // This checks that the assembly is either part of the tpa list, or a winmd file.
+#ifdef FEATURE_CORECLR
+ _ASSERTE("MDIL Compiler has determined that a winrt type needs per-type-RCW data, but is not a platform type." &&
+ (GetMethodTable()->GetModule()->GetAssembly()->GetManifestFile()->IsProfileAssembly() ||
+ GetMethodTable()->GetModule()->GetAssembly()->IsWinMD() ||
+ GetAppDomain()->IsSystemDll(GetMethodTable()->GetModule()->GetAssembly())));
+#endif
+#ifdef _DEBUG
+ if (GetMethodTable()->GetModule()->GetAssembly()->IsWinMD())
+ {
+ // If this is a WinMD file, verify the namespace is Windows. something.
+ DefineFullyQualifiedNameForClass();
+ const char * pszFullyQualifiedName = GetFullyQualifiedNameForClass(this->GetMethodTable());
+
+ if (strncmp(pszFullyQualifiedName, "Windows.", 8) != 0)
+ {
+ _ASSERTE(!"MDIL Compiler has determined that a winrt type needs per-type-RCW data, but that the binder will not generate it, and the flag to generate it is not part of versionable MDIL.");
+ }
+ }
+#endif
+ extendedTypeFlags |= EXTENDED_TYPE_FLAG_PLATFORM_NEEDS_PER_TYPE_RCW_DATA;
+ needsExtendedTypeFlagsOutput = true;
+ }
+#endif // FEATURE_COMINTEROP
+
+ if (needsExtendedTypeFlagsOutput)
+ pICLW->ExtendedTypeFlags(extendedTypeFlags);
+
+ return hr;
+}
+
+#ifdef FEATURE_COMINTEROP
+struct RedirectedTypeToSpecialTypeConversion
+{
+ SPECIAL_TYPE type;
+};
+
+#define DEFINE_PROJECTED_TYPE(szWinRTNS, szWinRTName, szClrNS, szClrName, nClrAsmIdx, nContractAsmIdx, nWinRTIndex, nClrIndex, nWinMDTypeKind) \
+{ SPECIAL_TYPE_ ## nClrIndex },
+
+static const RedirectedTypeToSpecialTypeConversion g_redirectedSpecialTypeInfo[] =
+{
+#include "winrtprojectedtypes.h"
+};
+#undef DEFINE_PROJECTED_TYPE
+#endif
+
+//-------------------------------------------------------------------------------
+HRESULT EEClass::WriteCompactLayoutSpecialType(ICompactLayoutWriter *pICLW)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+ SPECIAL_TYPE type = SPECIAL_TYPE_INVALID;
+#ifdef FEATURE_COMINTEROP
+ // All types with winrt redirection indices are special types
+ WinMDAdapter::RedirectedTypeIndex typeIndex = GetWinRTRedirectedTypeIndex();
+ if (typeIndex != WinMDAdapter::RedirectedTypeIndex_Invalid)
+ {
+ type = g_redirectedSpecialTypeInfo[typeIndex].type;
+ }
+
+ // Additionally System.Collections.ICollection and System.Collections.Generics.ICollection<T> are special types
+ if (this->GetMethodTable()->GetModule()->IsSystem())
+ {
+ DefineFullyQualifiedNameForClass();
+ const char * pszFullyQualifiedName = GetFullyQualifiedNameForClass(this->GetMethodTable());
+
+ if (strcmp(pszFullyQualifiedName, g_CollectionsGenericCollectionItfName) == 0)
+ {
+ type = SPECIAL_TYPE_System_Collections_Generic_ICollection;
+ }
+ else if (::strcmp(pszFullyQualifiedName, g_CollectionsCollectionItfName) == 0)
+ {
+ type = SPECIAL_TYPE_System_Collections_ICollection;
+ }
+ }
+#endif
+
+ if (type != SPECIAL_TYPE_INVALID)
+ {
+ pICLW->SpecialType(type);
+ }
+
+ return hr;
+}
+
+//-------------------------------------------------------------------------------
+HRESULT EEClass::WriteCompactLayoutInterfaces(ICompactLayoutWriter *pICLW)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ MethodTable *pMT = GetMethodTable();
+ IMDInternalImport *pMDImport = pMT->GetModule()->GetMDImport();
+ HENUMInternalHolder hEnumInterfaceImpl(pMDImport);
+ hEnumInterfaceImpl.EnumInit(mdtInterfaceImpl, pMT->GetCl());
+ DWORD interfaceCount = hEnumInterfaceImpl.EnumGetCount();
+
+ for (DWORD i = 0; i < interfaceCount; ++i)
+ {
+ mdInterfaceImpl ii;
+
+ if (!hEnumInterfaceImpl.EnumNext(&ii))
+ { // Less interfaces than count reports is an error
+ return E_FAIL;
+ }
+
+ mdToken tkInterface;
+ IfFailThrow(pMDImport->GetTypeOfInterfaceImpl(ii, &tkInterface));
+
+ pICLW->ImplementInterface(tkInterface);
+ }
+
+ return hr;
+}
+
+//-------------------------------------------------------------------------------
+HRESULT EEClass::WriteCompactLayoutInterfaceImpls(ICompactLayoutWriter *pICLW)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ MethodTable *pMT = GetMethodTable();
+
+ if (pMT->HasDispatchMap())
+ {
+ DispatchMap::Iterator it(pMT);
+ for (; it.IsValid(); it.Next())
+ {
+ DispatchMapEntry *pEntry = it.Entry();
+ CONSISTENCY_CHECK(pEntry->GetTypeID().IsImplementedInterface());
+
+ CONSISTENCY_CHECK(pEntry->GetTypeID().GetInterfaceNum() < pMT->GetNumInterfaces());
+ MethodTable * pMTItf =
+ pMT->GetInterfaceMap()[pEntry->GetTypeID().GetInterfaceNum()].GetMethodTable();
+
+ //
+ // Determine the interface method token
+ //
+
+ MethodDesc *pMDItf = pMTItf->GetMethodDescForSlot(pEntry->GetSlotNumber());
+ mdToken tkItf = pICLW->GetTokenForMethodDesc(pMDItf, pMTItf);
+
+ //
+ // Determine the implementation method token
+ //
+
+// CONSISTENCY_CHECK(!pEntry->IsVirtuallyMapped());
+ MethodDesc *pMDImpl = pMT->GetMethodDescForSlot(pEntry->GetTargetSlotNumber());
+ mdToken tkImpl = pICLW->GetTokenForMethodDesc(pMDImpl);
+
+ //
+ // Serialize
+ //
+
+ pICLW->ImplementInterfaceMethod(tkItf, tkImpl);
+ }
+ }
+
+ return hr;
+}
+
+//-------------------------------------------------------------------------------
+struct SortField
+{
+ int origIndex;
+ ULONG offset;
+};
+
+//-------------------------------------------------------------------------------
+int _cdecl FieldCmpOffsets(const void *a, const void *b)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ const SortField *fa = (const SortField *)a;
+ const SortField *fb = (const SortField *)b;
+ if (fa->offset < fb->offset)
+ return -1;
+ if (fa->offset > fb->offset)
+ return 1;
+ return 0;
+}
+
+#ifdef SORT_BY_RID
+//-------------------------------------------------------------------------------
+struct SortFieldRid
+{
+ int origIndex;
+ ULONG rid;
+};
+
+//-------------------------------------------------------------------------------
+int _cdecl FieldCmpRids(const void *a, const void *b)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ const SortFieldRid *fa = (const SortFieldRid *)a;
+ const SortFieldRid *fb = (const SortFieldRid *)b;
+ if (fa->rid < fb->rid)
+ return -1;
+ if (fa->rid > fb->rid)
+ return 1;
+ return 0;
+}
+
+#endif //SORT_BY_RID
+//-------------------------------------------------------------------------------
+inline PTR_FieldDesc EEClass::GetFieldDescByIndex(DWORD fieldIndex)
+{
+ STANDARD_VM_CONTRACT;
+
+ WRAPPER_NO_CONTRACT;
+ MethodTable * pMT = GetMethodTable();
+ CONSISTENCY_CHECK(fieldIndex < (DWORD)(pMT->GetNumIntroducedInstanceFields()) + (DWORD)GetNumStaticFields());
+
+ // MDIL_NEEDS_REVIEW
+ // was previously: return GetApproxFieldDescListPtr() + fieldIndex;
+
+ return pMT->GetApproxFieldDescListRaw() + fieldIndex;
+}
+
+HRESULT EEClass::WriteCompactLayoutFields(ICompactLayoutWriter *pICLW)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ DWORD dwFieldCount = GetMethodTable()->GetNumIntroducedInstanceFields() + GetNumStaticFields();
+
+
+#ifdef SORT_BY_RID
+ typedef CQuickArray<SortFieldRid> SortFieldArray;
+ SortFieldArray fields;
+ fields.AllocThrows(dwFieldCount);
+ for (DWORD i = 0; i < dwFieldCount; i++)
+ {
+ FieldDesc *fieldDesc = GetFieldDescByIndex(i);
+ fields[i].origIndex = i;
+ fields[i].rid = fieldDesc->GetMemberDef();
+ }
+
+ qsort(fields.Ptr(), dwFieldCount, sizeof(SortFieldRid), FieldCmpRids);
+#else
+ //
+ // Build an index for the fields sorted by offset so that they are serialized
+ // in the same order as they should be deserialized.
+ //
+
+ typedef CQuickArray<SortField> SortFieldArray;
+ SortFieldArray fields;
+ fields.AllocThrows(dwFieldCount);
+ for (DWORD i = 0; i < dwFieldCount; i++)
+ {
+ FieldDesc *fieldDesc = GetFieldDescByIndex(i);
+ fields[i].origIndex = i;
+ fields[i].offset = fieldDesc->GetOffset();
+ }
+
+ qsort(fields.Ptr(), dwFieldCount, sizeof(SortField), FieldCmpOffsets);
+#endif
+
+ //
+ // For each field, gather information and then serialize
+ //
+#ifdef DEBUG_LAYOUT
+ printf("%s %08x (baseSize = %x instance field bytes = %x number of virtuals = %x):\n", GetMethodTable()->IsValueType() ? "Struct" : "Class", GetMethodTable()->GetCl(), GetMethodTable()->GetBaseSize(), GetMethodTable()->GetNumInstanceFieldBytes(), GetMethodTable()->GetNumVirtuals());
+#endif
+
+ for (DWORD i = 0; i < dwFieldCount; i++)
+ {
+ FieldDesc *pFD = GetFieldDescByIndex(fields[i].origIndex);
+
+ mdFieldDef tkField = pFD->GetMemberDef();
+
+ //
+ // Determine storage type of the field
+ //
+
+ ICompactLayoutWriter::FieldStorage fieldStorage = ICompactLayoutWriter::FS_INSTANCE;
+ if (pFD->IsStatic())
+ {
+ if (pFD->IsThreadStatic())
+ {
+ fieldStorage = ICompactLayoutWriter::FS_THREADLOCAL;
+ }
+ else if (pFD->IsContextStatic())
+ {
+ fieldStorage = ICompactLayoutWriter::FS_CONTEXTLOCAL;
+ }
+ else if (pFD->IsRVA())
+ {
+ fieldStorage = ICompactLayoutWriter::FS_RVA;
+ }
+ else
+ {
+ fieldStorage = ICompactLayoutWriter::FS_STATIC;
+ }
+ }
+
+ //
+ // Determine protection of the field
+ //
+
+ ICompactLayoutWriter::FieldProtection fieldProtection;
+ switch (pFD->GetFieldProtection())
+ {
+ case fdPrivateScope:
+ fieldProtection = ICompactLayoutWriter::FP_PRIVATE_SCOPE;
+ break;
+ case fdPrivate:
+ fieldProtection = ICompactLayoutWriter::FP_PRIVATE;
+ break;
+ case fdFamANDAssem:
+ fieldProtection = ICompactLayoutWriter::FP_FAM_AND_ASSEM;
+ break;
+ case fdAssembly:
+ fieldProtection = ICompactLayoutWriter::FP_ASSEMBLY;
+ break;
+ case fdFamily:
+ fieldProtection = ICompactLayoutWriter::FP_FAMILY;
+ break;
+ case fdFamORAssem:
+ fieldProtection = ICompactLayoutWriter::FP_FAM_OR_ASSEM;
+ break;
+ case fdPublic:
+ fieldProtection = ICompactLayoutWriter::FP_PUBLIC;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ //
+ // If the field is a ValueType, retrieve the token for it.
+ //
+ // NOTE: can't just grab the TypeHandle for the field and return
+ // that token because the type could reside in another
+ // metadata scope.
+ //
+
+ mdToken tkValueType = mdTokenNil;
+ CorElementType fieldType = pFD->GetFieldType();
+ PCCOR_SIGNATURE pSig;
+ DWORD cbSig;
+ pFD->GetSig(&pSig, &cbSig);
+
+ SigPointer sigPointer(pSig, cbSig);
+ sigPointer.GetCallingConv(NULL);
+ CorElementType elType;
+ sigPointer.GetElemType(&elType);
+ switch (elType)
+ {
+ case ELEMENT_TYPE_BOOLEAN:
+ case ELEMENT_TYPE_CHAR:
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_R4:
+ case ELEMENT_TYPE_R8:
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_BYREF:
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_U:
+ case ELEMENT_TYPE_FNPTR:
+ _ASSERTE(fieldType == elType);
+ break;
+
+ case ELEMENT_TYPE_VALUETYPE:
+ sigPointer.GetToken(&tkValueType);
+ if (TypeFromToken(tkValueType) != mdtTypeDef)
+ fieldType = ELEMENT_TYPE_VALUETYPE;
+ break;
+
+ case ELEMENT_TYPE_VAR:
+ fieldType = ELEMENT_TYPE_VALUETYPE;
+ // fall thru
+ case ELEMENT_TYPE_GENERICINST:
+ if (fieldType != ELEMENT_TYPE_VALUETYPE)
+ {
+ // Force valuetypes not defined in this module from tokens instead of taking advantage of the knowledge
+ // that this is an enum type.
+ CorElementType elemTypeGeneric;
+ IfFailThrow(sigPointer.GetElemType(&elemTypeGeneric));
+ if (elemTypeGeneric == ELEMENT_TYPE_VALUETYPE)
+ {
+ mdToken tkValueTypeSig;
+ IfFailThrow(sigPointer.GetToken(&tkValueTypeSig));
+ if (TypeFromToken(tkValueTypeSig) != mdtTypeDef)
+ {
+ fieldType = ELEMENT_TYPE_VALUETYPE;
+ }
+ }
+ }
+ tkValueType = pICLW->GetTypeSpecToken(pSig+1, cbSig-1);
+ break;
+
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_OBJECT:
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_ARRAY:
+ _ASSERTE(fieldType == ELEMENT_TYPE_CLASS);
+ break;
+
+ case ELEMENT_TYPE_MVAR:
+ printf("elType = %d\n", elType);
+ _ASSERTE(!"unexpected field type");
+ break;
+
+ default:
+ printf("elType = %d\n", elType);
+ break;
+ }
+
+ //
+ // Record this field
+ //
+
+ pICLW->Field(tkField,
+ fieldStorage,
+ fieldProtection,
+ fieldType,
+ (HasExplicitFieldOffsetLayout() || pFD->IsRVA()) ? pFD->GetOffset() : ~0,
+ tkValueType);
+ }
+
+#ifdef DEBUG_LAYOUT
+ // dump field offsets in token order
+ mdFieldDef lowestFieldToken = ~0;
+ for (DWORD i = 0; i < dwFieldCount; i++)
+ {
+ FieldDesc *pFD = GetFieldDescByIndex(i);
+ mdFieldDef tkField = pFD->GetMemberDef();
+ if (lowestFieldToken >= tkField)
+ lowestFieldToken = tkField;
+ }
+ // print fields in token order - this is quadratic in the number of fields,
+ // but it's just debug output after all
+
+ for (DWORD i = 0; i < dwFieldCount; i++)
+ {
+ mdFieldDef tkField = lowestFieldToken + i;
+ bool fFound = false;
+ for (DWORD i = 0; i < dwFieldCount; i++)
+ {
+ FieldDesc *pFD = GetFieldDescByIndex(i);
+ if (tkField == pFD->GetMemberDef())
+ {
+ printf(" Field %08x of type %x has offset %x\n", tkField, pFD->GetFieldType(), pFD->GetOffset());
+ fFound = true;
+ }
+ }
+ if (!fFound)
+ {
+ printf(" >>>> Gap for field token %08x\n", tkField);
+ }
+ }
+#endif
+
+ if (HasLayout())
+ {
+ // see if we have a field marshaler for this field
+ EEClassLayoutInfo *eeClassLayoutInfo = GetLayoutInfo();
+
+ FieldMarshaler *pFM = eeClassLayoutInfo->m_pFieldMarshalers;
+ FieldMarshaler *pFMEnd = (FieldMarshaler*) ((BYTE *)pFM + eeClassLayoutInfo->m_numCTMFields*MAXFIELDMARSHALERSIZE);
+ while (pFM < pFMEnd)
+ {
+// printf("Field %08x native type = %x external offset = %x\n", tkField, pFM->GetNStructFieldType(), pFM->GetExternalOffset());
+
+ NStructFieldType type = pFM->GetNStructFieldType();
+ DWORD count = 0;
+ DWORD flags = 0;
+ DWORD typeToken1 = 0;
+ DWORD typeToken2 = 0;
+
+#define NFT_CASE_VERIFICATION_TYPE_NAME(type) nftMissingFromEEClass_WriteCompactLayoutFields_ ## type
+
+ switch (type)
+ {
+ NFT_CASE(NFT_NONE)
+ NFT_CASE(NFT_STRINGUNI)
+ NFT_CASE(NFT_COPY1)
+ NFT_CASE(NFT_COPY2)
+ NFT_CASE(NFT_COPY4)
+ NFT_CASE(NFT_COPY8)
+ NFT_CASE(NFT_CBOOL)
+ NFT_CASE(NFT_DATE)
+ NFT_CASE(NFT_DECIMAL)
+ NFT_CASE(NFT_WINBOOL)
+ NFT_CASE(NFT_SAFEHANDLE)
+ NFT_CASE(NFT_CRITICALHANDLE)
+#ifdef FEATURE_COMINTEROP
+ NFT_CASE(NFT_BSTR)
+ NFT_CASE(NFT_VARIANT)
+ NFT_CASE(NFT_VARIANTBOOL)
+ NFT_CASE(NFT_CURRENCY)
+ NFT_CASE(NFT_DATETIMEOFFSET)
+ NFT_CASE(NFT_HSTRING)
+ NFT_CASE(NFT_WINDOWSFOUNDATIONHRESULT)
+ NFT_CASE(NFT_SYSTEMTYPE)
+#endif // FEATURE_COMINTEROP
+ // no additional info for these
+ break;
+
+ NFT_CASE(NFT_STRINGANSI)
+ {
+ FieldMarshaler_StringAnsi *pFM_StringAnsi = (FieldMarshaler_StringAnsi*)pFM;
+ if (pFM_StringAnsi->GetBestFit())
+ flags |= ICompactLayoutWriter::NF_BESTFITMAP;
+ if (pFM_StringAnsi->GetThrowOnUnmappableChar())
+ flags |= ICompactLayoutWriter::NF_THROWONUNMAPPABLECHAR;
+ }
+ break;
+
+ NFT_CASE(NFT_FIXEDSTRINGUNI)
+ {
+ count = pFM->NativeSize()/sizeof(WCHAR);
+ }
+ break;
+
+ NFT_CASE(NFT_FIXEDSTRINGANSI)
+ {
+ FieldMarshaler_FixedStringAnsi *pFM_FixedStringAnsi = (FieldMarshaler_FixedStringAnsi*)pFM;
+ if (pFM_FixedStringAnsi->GetBestFit())
+ flags |= ICompactLayoutWriter::NF_BESTFITMAP;
+ if (pFM_FixedStringAnsi->GetThrowOnUnmappableChar())
+ flags |= ICompactLayoutWriter::NF_THROWONUNMAPPABLECHAR;
+ count = pFM->NativeSize()/sizeof(CHAR);
+ }
+ break;
+
+ NFT_CASE(NFT_FIXEDCHARARRAYANSI)
+ {
+ FieldMarshaler_FixedCharArrayAnsi *pFM_FixedCharArrayAnsi = (FieldMarshaler_FixedCharArrayAnsi*)pFM;
+ if (pFM_FixedCharArrayAnsi->GetBestFit())
+ flags |= ICompactLayoutWriter::NF_BESTFITMAP;
+ if (pFM_FixedCharArrayAnsi->GetThrowOnUnmappableChar())
+ flags |= ICompactLayoutWriter::NF_THROWONUNMAPPABLECHAR;
+ count = pFM->NativeSize()/sizeof(CHAR);
+ }
+ break;
+
+ NFT_CASE(NFT_FIXEDARRAY)
+ {
+ FieldMarshaler_FixedArray *pFM_FixedArray = (FieldMarshaler_FixedArray*)pFM;
+ MethodTable *pMT = pFM_FixedArray->GetElementTypeHandle().AsMethodTable();
+ typeToken1 = pICLW->GetTokenForType(pMT);
+
+ /* do we need this information? there are no accessors...
+ if (pFM_FixedArray->GetBestFit())
+ flags |= ICompactLayoutWriter::NF_BESTFITMAP;
+ if (pFM_FixedArray->GetThrowOnUnmappableChar())
+ flags |= ICompactLayoutWriter::NF_THROWONUNMAPPABLECHAR;
+ */
+ flags |= pFM_FixedArray->GetElementVT() << ICompactLayoutWriter::NF_VARTYPE_SHIFT;
+ count = pFM->NativeSize()/OleVariant::GetElementSizeForVarType(pFM_FixedArray->GetElementVT(), pMT);
+ }
+ break;
+
+ NFT_CASE(NFT_DELEGATE)
+ {
+ MethodTable *pMT = ((FieldMarshaler_Delegate*)pFM)->GetMethodTable();
+ typeToken1 = pICLW->GetTokenForType(pMT);
+ }
+ break;
+
+ NFT_CASE(NFT_ANSICHAR)
+ {
+ FieldMarshaler_Ansi *pFM_Ansi = (FieldMarshaler_Ansi*)pFM;
+ if (pFM_Ansi->GetBestFit())
+ flags |= ICompactLayoutWriter::NF_BESTFITMAP;
+ if (pFM_Ansi->GetThrowOnUnmappableChar())
+ flags |= ICompactLayoutWriter::NF_THROWONUNMAPPABLECHAR;
+ }
+ break;
+
+ NFT_CASE(NFT_NESTEDLAYOUTCLASS)
+ {
+ MethodTable *pMT = ((FieldMarshaler_NestedLayoutClass*)pFM)->GetMethodTable();
+ typeToken1 = pICLW->GetTokenForType(pMT);
+ }
+ break;
+
+ NFT_CASE(NFT_NESTEDVALUECLASS)
+ {
+ MethodTable *pMT = ((FieldMarshaler_NestedValueClass*)pFM)->GetMethodTable();
+ typeToken1 = pICLW->GetTokenForType(pMT);
+ }
+ break;
+
+#ifdef FEATURE_COMINTEROP
+ NFT_CASE(NFT_INTERFACE)
+ {
+ FieldMarshaler_Interface *pFM_Interface = (FieldMarshaler_Interface*)pFM;
+ MethodTable *pMT = pFM_Interface->GetMethodTable();
+ typeToken1 = pICLW->GetTokenForType(pMT);
+ MethodTable *ppItfMT = NULL;
+ pFM_Interface->GetInterfaceInfo(&ppItfMT, &flags);
+ typeToken2 = pICLW->GetTokenForType(ppItfMT);
+ }
+ break;
+
+ NFT_CASE(NFT_WINDOWSFOUNDATIONIREFERENCE)
+ {
+ FieldMarshaler_Nullable *pFM_Nullable = (FieldMarshaler_Nullable*)pFM;
+ MethodTable *pMT = pFM_Nullable->GetMethodTable();
+ typeToken1 = pICLW->GetTokenForType(pMT);
+ }
+ break;
+
+#ifdef FEATURE_CLASSIC_COMINTEROP
+ NFT_CASE(NFT_SAFEARRAY)
+ {
+ FieldMarshaler_SafeArray *pFM_SafeArray = (FieldMarshaler_SafeArray*)pFM;
+ MethodTable *pMT = pFM_SafeArray->GetElementTypeHandle().AsMethodTable();
+ typeToken1 = pICLW->GetTokenForType(pMT);
+ flags = pFM_SafeArray->GetElementVT() << ICompactLayoutWriter::NF_VARTYPE_SHIFT;
+ }
+ break;
+#endif //FEATURE_CLASSIC_COMINTEROP
+
+#endif // FEATURE_COMINTEROP
+ NFT_CASE(NFT_ILLEGAL)
+ // do we need this one even? do we need additional info?
+ break;
+#ifndef FEATURE_COMINTEROP
+ NFT_CASE(NFT_INTERFACE)
+#endif
+ default:
+#define NFT_VERIFY_ALL_CASES
+#include "nsenumhandleallcases.h"
+ _ASSERTE(!"unexpected native type");
+ break;
+
+ }
+
+ pICLW->NativeField(pFM->GetFieldDesc()->GetMemberDef(),
+ type,
+ pFM->GetExternalOffset(),
+ count,
+ flags,
+ typeToken1,
+ typeToken2);
+
+ ((BYTE*&)pFM) += MAXFIELDMARSHALERSIZE;
+ }
+ }
+
+ if (HasExplicitFieldOffsetLayout() || HasLayout() && GetLayoutInfo()->HasExplicitSize())
+ {
+ pICLW->SizeType(GetMethodTable()->GetNumInstanceFieldBytes());
+ }
+
+ return hr;
+}
+
+//-------------------------------------------------------------------------------
+HRESULT EEClass::WriteCompactLayoutMethods(ICompactLayoutWriter *pICLW)
+{
+ // we need this iterator because we want the method descs in declaration order,
+ // but the chunks are in reverse order
+ class ReversedChunkMethoditerator
+ {
+ private:
+ MethodDesc *m_methodDesc;
+ public:
+ ReversedChunkMethoditerator(MethodTable *pMT)
+ {
+ m_methodDesc = NULL;
+ MethodDescChunk *pChunk = pMT->GetClass()->GetChunks();
+ if (pChunk == NULL)
+ return;
+ while (pChunk->GetNextChunk() != NULL)
+ pChunk = pChunk->GetNextChunk();
+ m_methodDesc = pChunk->GetFirstMethodDesc();
+ }
+
+ bool IsValid()
+ {
+ return m_methodDesc != NULL;
+ }
+
+ MethodDesc *GetMethodDesc()
+ {
+ return m_methodDesc;
+ }
+
+ void Next()
+ {
+ MethodDescChunk * pChunk = m_methodDesc->GetMethodDescChunk();
+
+ // Check whether the next MethodDesc is still within the bounds of the current chunk
+ TADDR pNext = dac_cast<TADDR>(m_methodDesc) + m_methodDesc->SizeOf();
+ TADDR pEnd = dac_cast<TADDR>(pChunk) + pChunk->SizeOf();
+
+ if (pNext < pEnd)
+ {
+ // Just skip to the next method in the same chunk
+ m_methodDesc = PTR_MethodDesc(pNext);
+ }
+ else
+ {
+ _ASSERTE(pNext == pEnd);
+
+ // We have walked all the methods in the current chunk. Move on
+ // to the previous chunk.
+ MethodDescChunk *pPrevChunk = m_methodDesc->GetClass()->GetChunks();
+ if (pPrevChunk == pChunk)
+ m_methodDesc = NULL;
+ else
+ {
+ while (pPrevChunk->GetNextChunk() != pChunk)
+ pPrevChunk = pPrevChunk->GetNextChunk();
+ m_methodDesc = pPrevChunk->GetFirstMethodDesc();
+ }
+ }
+ }
+ };
+
+ HRESULT hr = S_OK;
+
+// printf("New virtuals of class %08x\n", GetCl());
+
+ MethodTable *pMT = GetMethodTable();
+ DWORD dwNumParentVirtuals = pMT->GetNumParentVirtuals();
+ IMDInternalImport *pMDImport = pMT->GetModule()->GetMDImport();
+
+ ReversedChunkMethoditerator it(GetMethodTable());
+ WORD lastNewSlotIndex = 0;
+ mdMethodDef tkUnboxingStubNeedsImpl = 0;
+ for (; it.IsValid(); it.Next())
+ {
+ MethodDesc *pMD = it.GetMethodDesc();
+
+ // skip unboxing stubs
+ if (pMD->IsUnboxingStub())
+ {
+ if (pMD->IsMethodImpl())
+ tkUnboxingStubNeedsImpl = pMD->GetMemberDef();
+ continue;
+ }
+
+ mdMethodDef tkMethod = pMD->GetMemberDef();
+
+ //
+ // Gather method information
+ //
+
+ DWORD dwDeclFlags = pMD->GetAttrs();
+ ULONG ulCodeRVA;
+ DWORD dwImplFlags;
+ pMDImport->GetMethodImplProps(tkMethod, &ulCodeRVA, &dwImplFlags);
+
+ //
+ // Figure out if this method overrides a parent method, and
+ // if so find or generate the corresponding token.
+ //
+
+ mdToken tkOverrideMethod = mdTokenNil;
+ WORD slotIndex = pMD->GetSlot();
+ if (pMT->IsValueType() && pMD->IsVirtual())
+ {
+ MethodDesc *pBoxedMD = MethodDesc::FindOrCreateAssociatedMethodDesc(pMD,
+ pMD->GetMethodTable(),
+ TRUE /* get unboxing entry point */,
+ pMD->GetMethodInstantiation(),
+ FALSE /* no allowInstParam */ );
+ if (pBoxedMD != NULL)
+ slotIndex = pBoxedMD->GetSlot();
+ }
+
+#ifdef DEBUG_LAYOUT
+ if (pMD->IsVirtual())
+ printf(" virtual method %08x has slot %x\n", tkMethod, slotIndex);
+#endif
+ if (slotIndex < dwNumParentVirtuals)
+ {
+ MethodTable *pParentMT = pMT->GetParentMethodTable();
+ MethodDesc *pParentMD = pParentMT->GetMethodDescForSlot(slotIndex)->GetDeclMethodDesc(slotIndex);
+ tkOverrideMethod = pICLW->GetTokenForMethodDesc(pParentMD);
+ }
+
+ //
+ // Figure out the implHints - they consist of the classification
+ // and various flags for special methods
+ //
+
+ DWORD dwImplHints = pMD->GetClassification();
+ if (pMT->HasDefaultConstructor() && slotIndex == pMT->GetDefaultConstructorSlot())
+ dwImplHints |= ICompactLayoutWriter::IH_DEFAULT_CTOR;
+ else if (pMT->HasClassConstructor() && slotIndex == pMT->GetClassConstructorSlot())
+ dwImplHints |= ICompactLayoutWriter::IH_CCTOR;
+
+ if (pMD->IsCtor())
+ dwImplHints |= ICompactLayoutWriter::IH_CTOR;
+
+ if (IsDelegate())
+ {
+ DelegateEEClass *delegateEEClass = (DelegateEEClass *)this;
+ if (pMD == delegateEEClass->m_pInvokeMethod)
+ dwImplHints |= ICompactLayoutWriter::IH_DELEGATE_INVOKE;
+ else if (pMD == delegateEEClass->m_pBeginInvokeMethod)
+ dwImplHints |= ICompactLayoutWriter::IH_DELEGATE_BEGIN_INVOKE;
+ else if (pMD == delegateEEClass->m_pEndInvokeMethod)
+ dwImplHints |= ICompactLayoutWriter::IH_DELEGATE_END_INVOKE;
+ }
+
+
+ _ASSERTE((tkUnboxingStubNeedsImpl == 0) || (tkUnboxingStubNeedsImpl == tkMethod)
+ || !"This depends on unboxing stubs always being processed directly before the method they invoke");
+ // cannot use pMD->HasMethodImplSlot() because it has some false positives
+ // (virtual methods on valuetypes implementing interfaces have the flag bit set
+ // but an empty list of replaced methods)
+ if (pMD->IsMethodImpl() || (tkUnboxingStubNeedsImpl == tkMethod))
+ {
+ dwImplHints |= ICompactLayoutWriter::IH_HASMETHODIMPL;
+ }
+
+ tkUnboxingStubNeedsImpl = 0;
+
+ // Make sure that the transparency is cached in the NGen image
+ // (code copied from MethodDesc::Save (ngen code path))
+ // A side effect of this call is caching the transparency bits
+ // in the MethodDesc
+
+ Security::IsMethodTransparent(pMD);
+
+ if (pMD->HasCriticalTransparentInfo())
+ {
+ if (pMD->IsTreatAsSafe())
+ {
+ dwImplHints |= ICompactLayoutWriter::IH_TRANSPARENCY_TREAT_AS_SAFE;
+// printf(" method %08x is treat as safe\n", tkMethod);
+ }
+ else if (pMD->IsTransparent())
+ {
+ dwImplHints |= ICompactLayoutWriter::IH_TRANSPARENCY_TRANSPARENT;
+// printf(" method %08x is transparent\n", tkMethod);
+ }
+ else if (pMD->IsCritical())
+ {
+ dwImplHints |= ICompactLayoutWriter::IH_TRANSPARENCY_CRITICAL;
+// printf(" method %08x is critical\n", tkMethod);
+ }
+ else
+ _ASSERTE(!"one of the above must be true, no?");
+ }
+ else
+ {
+ _ASSERTE((dwImplHints & ICompactLayoutWriter::IH_TRANSPARENCY_MASK) == ICompactLayoutWriter::IH_TRANSPARENCY_NO_INFO);
+// printf(" method %08x has no critical transparent info\n", tkMethod);
+ }
+
+ if (!pMD->IsAbstract() && !pMD->IsILStub() && !pMD->IsUnboxingStub()
+ && pMD->IsIL())
+ {
+ EX_TRY
+ {
+ if (pMD->IsVerifiable())
+ {
+ dwImplHints |= ICompactLayoutWriter::IH_IS_VERIFIABLE;
+ }
+ }
+ EX_CATCH
+ {
+ // If the method has a security exception, it will fly through IsVerifiable.
+ // We only expect to see internal CLR exceptions here, so use RethrowCorruptingExceptions
+ }
+ EX_END_CATCH(RethrowCorruptingExceptions)
+ }
+
+ if (pMD->IsVerified())
+ {
+ dwImplHints |= ICompactLayoutWriter::IH_IS_VERIFIED;
+ }
+
+ //
+ // Serialize the method
+ //
+
+ if (IsMdPinvokeImpl(dwDeclFlags))
+ {
+ _ASSERTE(tkOverrideMethod == mdTokenNil);
+ NDirectMethodDesc *pNMD = (NDirectMethodDesc *)pMD;
+ PInvokeStaticSigInfo pInvokeStaticSigInfo;
+ NDirect::PopulateNDirectMethodDesc(pNMD, &pInvokeStaticSigInfo);
+
+ pICLW->PInvokeMethod(dwDeclFlags,
+ dwImplFlags,
+ dwImplHints,
+ tkMethod,
+ pNMD->GetLibName(),
+ pNMD->GetEntrypointName(),
+ pNMD->ndirect.m_wFlags);
+ }
+ else
+ {
+ if (pMD->IsVirtual() && !tkOverrideMethod)
+ {
+// printf(" Method %08x has new virtual slot %u\n", tkMethod, slotIndex);
+ // make sure new virtual slot indices are ascending - otherwise, the virtual slot indices
+ // created by the binder won't be consistent with ngen, which makes mix and match impossible
+ _ASSERTE(lastNewSlotIndex <= slotIndex);
+ lastNewSlotIndex = slotIndex;
+ }
+
+ pICLW->Method(dwDeclFlags,
+ dwImplFlags,
+ dwImplHints,
+ tkMethod,
+ tkOverrideMethod);
+
+ if (pMD->IsGenericMethodDefinition())
+ {
+ InstantiatedMethodDesc *pIMD = pMD->AsInstantiatedMethodDesc();
+ Instantiation inst = pIMD->GetMethodInstantiation();
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ pICLW->GenericParameter(inst[i].AsGenericVariable()->GetToken(), 0);
+ }
+ }
+ }
+ }
+
+ return hr;
+}
+
+//-------------------------------------------------------------------------------
+HRESULT EEClass::WriteCompactLayoutMethodImpls(ICompactLayoutWriter *pICLW)
+{
+ HRESULT hr = S_OK;
+
+ MethodTable::IntroducedMethodIterator it(GetMethodTable());
+ for (; it.IsValid(); it.Next())
+ {
+ MethodDesc *pMDImpl = it.GetMethodDesc();
+ if (pMDImpl->IsMethodImpl())
+ { // If this is a methodImpl, then iterate all implemented slots
+ // and serialize the (decl,impl) pair.
+ // This guarantees that all methodImpls for a particular method
+ // are "clustered" (if there is more than one)
+ MethodImpl::Iterator implIt(pMDImpl);
+ for (; implIt.IsValid(); implIt.Next())
+ {
+ MethodDesc *pMDDecl = implIt.GetMethodDesc();
+ // MethodImpls should no longer cover interface methodImpls, as that
+ // should be captured in the interface dispatch map.
+ CONSISTENCY_CHECK(!pMDDecl->IsInterface());
+
+ mdToken tkDecl = pICLW->GetTokenForMethodDesc(pMDDecl);
+ pICLW->MethodImpl(tkDecl, pMDImpl->GetMemberDef());
+ }
+ }
+ }
+
+ return hr;
+}
+#endif //MDIL
+#endif
diff --git a/src/vm/class.h b/src/vm/class.h
new file mode 100644
index 0000000000..758a0dbaee
--- /dev/null
+++ b/src/vm/class.h
@@ -0,0 +1,2690 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+// ==++==
+//
+//
+
+//
+// ==--==
+//
+// File: CLASS.H
+//
+
+
+//
+
+//
+// NOTE: Even though EEClass is considered to contain cold data (relative to MethodTable), these data
+// structures *are* touched (especially during startup as part of soft-binding). As a result, and given the
+// number of EEClasses allocated for large assemblies, the size of this structure can have a direct impact on
+// performance, especially startup performance.
+//
+// Given that the data itself is touched infrequently, we can trade off space reduction against cpu-usage to
+// good effect here. A fair amount of work has gone into reducing the size of each EEClass instance (see
+// EEClassOptionalFields and EEClassPackedFields) at the expense of somewhat more convoluted runtime access.
+//
+// Please consider this (and measure the impact of your changes against startup scenarios) before adding
+// fields to EEClass or otherwise increasing its size.
+//
+// ============================================================================
+
+#ifndef CLASS_H
+#define CLASS_H
+
+/*
+ * Include Files
+ */
+#ifndef BINDER
+#include "eecontract.h"
+#include "argslot.h"
+#include "vars.hpp"
+#include "cor.h"
+#include "clrex.h"
+#include "hash.h"
+#include "crst.h"
+#include "objecthandle.h"
+#include "cgensys.h"
+#include "declsec.h"
+#ifdef FEATURE_COMINTEROP
+#include "stdinterfaces.h"
+#endif
+#include "slist.h"
+#include "spinlock.h"
+#include "typehandle.h"
+#include "perfcounters.h"
+#include "methodtable.h"
+#include "eeconfig.h"
+#include "typectxt.h"
+#include "iterator_util.h"
+#endif // BINDER
+
+#ifdef FEATURE_COMINTEROP
+#include "..\md\winmd\inc\adapter.h"
+#endif
+#include "packedfields.inl"
+#include "array.h"
+#ifndef BINDER
+#define IBCLOG(x) g_IBCLogger.##x
+#else
+#include "gcdesc.h"
+#define IBCLOG(x)
+#define COUNTER_ONLY(x)
+class PrestubMethodFrame;
+#endif
+
+#ifdef MDIL
+#include "compactlayoutwriter.h"
+#endif
+
+VOID DECLSPEC_NORETURN RealCOMPlusThrowHR(HRESULT hr);
+
+/*
+ * Macro definitions
+ */
+#define MAX_LOG2_PRIMITIVE_FIELD_SIZE 3
+
+#define MAX_PRIMITIVE_FIELD_SIZE (1 << MAX_LOG2_PRIMITIVE_FIELD_SIZE)
+
+/*
+ * Forward declarations
+ */
+class AppDomain;
+class ArrayClass;
+class ArrayMethodDesc;
+class Assembly;
+class ClassLoader;
+class DictionaryLayout;
+class DomainLocalBlock;
+class FCallMethodDesc;
+class EEClass;
+class EnCFieldDesc;
+class FieldDesc;
+class FieldMarshaler;
+struct LayoutRawFieldInfo;
+class MetaSig;
+class MethodDesc;
+class MethodDescChunk;
+class MethodTable;
+class Module;
+struct ModuleCtorInfo;
+class Object;
+class Stub;
+class Substitution;
+class SystemDomain;
+class TypeHandle;
+class StackingAllocator;
+class AllocMemTracker;
+class InteropMethodTableSlotDataMap;
+class LoadingEntry_LockHolder;
+class DispatchMapBuilder;
+class LoaderAllocator;
+class ComCallWrapperTemplate;
+
+typedef DPTR(DictionaryLayout) PTR_DictionaryLayout;
+
+
+//---------------------------------------------------------------------------------
+// Fields in an explicit-layout class present varying degrees of risk depending
+// on how they overlap.
+//
+// Each level is a superset of the lower (in numerical value) level - i.e.
+// all kVerifiable fields are also kLegal, but not vice-versa.
+//---------------------------------------------------------------------------------
+class ExplicitFieldTrust
+{
+ public:
+ enum TrustLevel
+ {
+ // Note: order is important here - each guarantee also implicitly guarantees all promises
+ // made by values lower in number.
+
+ // What's guaranteed. What the loader does.
+ //----- ----------------------- -------------------------------
+ kNone = 0, // no guarantees at all - Type refuses to load at all.
+ kLegal = 1, // guarantees no objref <-> scalar overlap and no unaligned objref - Type loads but field access won't verify
+ kVerifiable = 2, // guarantees no objref <-> objref overlap and all guarantees above - Type loads and field access will verify
+ kNonOverLayed = 3, // guarantees no overlap at all and all guarantees above - Type loads, field access verifies and Equals() may be optimized if structure is tightly packed
+
+ kMaxTrust = kNonOverLayed,
+ };
+
+};
+
+//----------------------------------------------------------------------------------------------
+// This class is a helper for HandleExplicitLayout. To make it harder to introduce security holes
+// into this function, we will manage all updates to the class's trust level through the ExplicitClassTrust
+// class. This abstraction enforces the rule that the overall class is only as trustworthy as
+// the least trustworthy field.
+//----------------------------------------------------------------------------------------------
+class ExplicitClassTrust : private ExplicitFieldTrust
+{
+ public:
+ ExplicitClassTrust()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_trust = kMaxTrust; // Yes, we start out with maximal trust. This reflects that explicit layout structures with no fields do represent no risk.
+ }
+
+ VOID AddField(TrustLevel fieldTrust)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_trust = min(m_trust, fieldTrust);
+ }
+
+ BOOL IsLegal()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_trust >= kLegal;
+ }
+
+ BOOL IsVerifiable()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_trust >= kVerifiable;
+ }
+
+ BOOL IsNonOverLayed()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_trust >= kNonOverLayed;
+ }
+
+ TrustLevel GetTrustLevel()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_trust;
+ }
+
+ private:
+ TrustLevel m_trust;
+};
+
+//----------------------------------------------------------------------------------------------
+// This class is a helper for HandleExplicitLayout. To make it harder to introduce security holes
+// into this function, this class will collect trust information about individual fields to be later
+// aggregated into the overall class level.
+//
+// This abstraction enforces the rule that all fields are presumed guilty until explicitly declared
+// safe by calling SetTrust(). If you fail to call SetTrust before leaving the block, the destructor
+// will automatically cause the entire class to be declared illegal (and you will get an assert
+// telling you to fix this bug.)
+//----------------------------------------------------------------------------------------------
+class ExplicitFieldTrustHolder : private ExplicitFieldTrust
+{
+ public:
+ ExplicitFieldTrustHolder(ExplicitClassTrust *pExplicitClassTrust)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pExplicitClassTrust = pExplicitClassTrust;
+#ifdef _DEBUG
+ m_trustDeclared = FALSE;
+#endif
+ m_fieldTrust = kNone;
+ }
+
+ VOID SetTrust(TrustLevel fieldTrust)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(fieldTrust >= kNone && fieldTrust <= kMaxTrust);
+ _ASSERTE(!m_trustDeclared && "You should not set the trust value more than once.");
+
+#ifdef _DEBUG
+ m_trustDeclared = TRUE;
+#endif
+ m_fieldTrust = fieldTrust;
+ }
+
+ ~ExplicitFieldTrustHolder()
+ {
+ LIMITED_METHOD_CONTRACT;
+ // If no SetTrust() was ever called, we will default to kNone (i.e. declare the entire type
+ // illegal.) It'd be nice to assert here but since this case can be legitimately reached
+ // on exception unwind, we cannot.
+ m_pExplicitClassTrust->AddField(m_fieldTrust);
+ }
+
+
+ private:
+ ExplicitClassTrust* m_pExplicitClassTrust;
+ TrustLevel m_fieldTrust;
+#ifdef _DEBUG
+ BOOL m_trustDeclared; // Debug flag to detect multiple Sets. (Which we treat as a bug as this shouldn't be necessary.)
+#endif
+};
+
+#ifdef BINDER
+class InterfaceImplEnum;
+#else
+//*******************************************************************************
+// Enumerator to traverse the interface declarations of a type, automatically building
+// a substitution chain on the stack.
+class InterfaceImplEnum
+{
+ Module* m_pModule;
+ HENUMInternalHolder hEnumInterfaceImpl;
+ const Substitution *m_pSubstChain;
+ Substitution m_CurrSubst;
+ mdTypeDef m_CurrTok;
+public:
+ InterfaceImplEnum(Module *pModule, mdTypeDef cl, const Substitution *pSubstChain)
+ : hEnumInterfaceImpl(pModule->GetMDImport())
+ {
+ WRAPPER_NO_CONTRACT;
+ m_pModule = pModule;
+ hEnumInterfaceImpl.EnumInit(mdtInterfaceImpl, cl);
+ m_pSubstChain = pSubstChain;
+ }
+
+ // Returns:
+ // S_OK ... if has next (TRUE)
+ // S_FALSE ... if does not have next (FALSE)
+ // error code.
+ HRESULT Next()
+ {
+ WRAPPER_NO_CONTRACT;
+ HRESULT hr;
+ mdInterfaceImpl ii;
+ if (!m_pModule->GetMDImport()->EnumNext(&hEnumInterfaceImpl, &ii))
+ {
+ return S_FALSE;
+ }
+
+ IfFailRet(m_pModule->GetMDImport()->GetTypeOfInterfaceImpl(ii, &m_CurrTok));
+ m_CurrSubst = Substitution(m_CurrTok, m_pModule, m_pSubstChain);
+ return S_OK;
+ }
+ const Substitution *CurrentSubst() const { LIMITED_METHOD_CONTRACT; return &m_CurrSubst; }
+ mdTypeDef CurrentToken() const { LIMITED_METHOD_CONTRACT; return m_CurrTok; }
+};
+#endif // BINDER
+
+#ifdef FEATURE_COMINTEROP
+//
+// Class used to map MethodTable slot numbers to COM vtable slots numbers
+// (either for calling a classic COM component or for constructing a classic COM
+// vtable via which COM components can call managed classes). This structure is
+// embedded in the EEClass but the mapping list itself is only allocated if the
+// COM vtable is sparse.
+//
+
+class SparseVTableMap
+{
+public:
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+
+ SparseVTableMap();
+ ~SparseVTableMap();
+
+ // First run through MT slots calling RecordGap wherever a gap in VT slots
+ // occurs.
+ void RecordGap(WORD StartMTSlot, WORD NumSkipSlots);
+
+ // Then call FinalizeMapping to create the actual mapping list.
+ void FinalizeMapping(WORD TotalMTSlots);
+
+ // Map MT to VT slot.
+ WORD LookupVTSlot(WORD MTSlot);
+
+ // Retrieve the number of slots in the vtable (both empty and full).
+ WORD GetNumVTableSlots();
+
+ const void* GetMapList()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (void*)m_MapList;
+ }
+
+#ifdef FEATURE_PREJIT
+ // Methods to persist structure
+ void Save(DataImage *image);
+ void Fixup(DataImage *image);
+#endif // FEATURE_PREJIT
+
+private:
+
+ enum { MapGrow = 4 };
+
+ struct Entry
+ {
+ WORD m_Start; // Starting MT slot number
+ WORD m_Span; // # of consecutive slots that map linearly
+ WORD m_MapTo; // Starting VT slot number
+ };
+
+ Entry *m_MapList; // Pointer to array of Entry structures
+ WORD m_MapEntries; // Number of entries in above
+ WORD m_Allocated; // Number of entries allocated
+
+ WORD m_LastUsed; // Index of last entry used in successful lookup
+
+ WORD m_VTSlot; // Current VT slot number, used during list build
+ WORD m_MTSlot; // Current MT slot number, used during list build
+
+ void AllocOrExpand(); // Allocate or expand the mapping list for a new entry
+};
+#endif // FEATURE_COMINTEROP
+
+//=======================================================================
+// Adjunct to the EEClass structure for classes w/ layout
+//=======================================================================
+class EEClassLayoutInfo
+{
+ static VOID CollectLayoutFieldMetadataThrowing(
+ mdTypeDef cl, // cl of the NStruct being loaded
+ BYTE packingSize, // packing size (from @dll.struct)
+ BYTE nlType, // nltype (from @dll.struct)
+#ifdef FEATURE_COMINTEROP
+ BOOL isWinRT, // Is the type a WinRT type
+#endif // FEATURE_COMINTEROP
+ BOOL fExplicitOffsets, // explicit offsets?
+ MethodTable *pParentMT, // the loaded superclass
+ ULONG cMembers, // total number of members (methods + fields)
+ HENUMInternal *phEnumField, // enumerator for field
+ Module* pModule, // Module that defines the scope, loader and heap (for allocate FieldMarshalers)
+ const SigTypeContext *pTypeContext, // Type parameters for NStruct being loaded
+ EEClassLayoutInfo *pEEClassLayoutInfoOut, // caller-allocated structure to fill in.
+ LayoutRawFieldInfo *pInfoArrayOut, // caller-allocated array to fill in. Needs room for cMember+1 elements
+ LoaderAllocator * pAllocator,
+ AllocMemTracker *pamTracker
+ );
+
+
+ friend class ClassLoader;
+ friend class EEClass;
+ friend class MethodTableBuilder;
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+#ifdef BINDER
+ friend class CompactTypeBuilder;
+ friend class MdilModule;
+#endif
+
+ private:
+ // size (in bytes) of fixed portion of NStruct.
+ UINT32 m_cbNativeSize;
+ UINT32 m_cbManagedSize;
+
+ public:
+ // 1,2,4 or 8: this is equal to the largest of the alignment requirements
+ // of each of the EEClass's members. If the NStruct extends another NStruct,
+ // the base NStruct is treated as the first member for the purpose of
+ // this calculation.
+ BYTE m_LargestAlignmentRequirementOfAllMembers;
+
+ // Post V1.0 addition: This is the equivalent of m_LargestAlignmentRequirementOfAllMember
+ // for the managed layout.
+ BYTE m_ManagedLargestAlignmentRequirementOfAllMembers;
+
+ private:
+ enum {
+ // TRUE if the GC layout of the class is bit-for-bit identical
+ // to its unmanaged counterpart (i.e. no internal reference fields,
+ // no ansi-unicode char conversions required, etc.) Used to
+ // optimize marshaling.
+ e_BLITTABLE = 0x01,
+ // Post V1.0 addition: Is this type also sequential in managed memory?
+ e_MANAGED_SEQUENTIAL = 0x02,
+ // When a sequential/explicit type has no fields, it is conceptually
+ // zero-sized, but actually is 1 byte in length. This holds onto this
+ // fact and allows us to revert the 1 byte of padding when another
+ // explicit type inherits from this type.
+ e_ZERO_SIZED = 0x04,
+ // The size of the struct is explicitly specified in the meta-data.
+ e_HAS_EXPLICIT_SIZE = 0x08,
+
+#ifdef FEATURE_HFA
+ // HFA type of the unmanaged layout
+ e_R4_HFA = 0x10,
+ e_R8_HFA = 0x20,
+#endif
+ };
+
+ BYTE m_bFlags;
+
+ // Packing size in bytes (1, 2, 4, 8 etc.)
+ BYTE m_cbPackingSize;
+
+ // # of fields that are of the calltime-marshal variety.
+ UINT m_numCTMFields;
+
+ // An array of FieldMarshaler data blocks, used to drive call-time
+ // marshaling of NStruct reference parameters. The number of elements
+ // equals m_numCTMFields.
+ FieldMarshaler *m_pFieldMarshalers;
+
+
+ public:
+ BOOL GetNativeSize() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_cbNativeSize;
+ }
+
+ UINT32 GetManagedSize() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_cbManagedSize;
+ }
+
+
+ BYTE GetLargestAlignmentRequirementOfAllMembers() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_LargestAlignmentRequirementOfAllMembers;
+ }
+
+ UINT GetNumCTMFields() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_numCTMFields;
+ }
+
+ FieldMarshaler *GetFieldMarshalers() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pFieldMarshalers;
+ }
+
+ BOOL IsBlittable() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_bFlags & e_BLITTABLE) == e_BLITTABLE;
+ }
+
+ BOOL IsManagedSequential() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_bFlags & e_MANAGED_SEQUENTIAL) == e_MANAGED_SEQUENTIAL;
+ }
+
+ // If true, this says that the type was originally zero-sized
+ // and the native size was bumped up to one for similar behaviour
+ // to C++ structs. However, it is necessary to keep track of this
+ // so that we can ignore the one byte padding if other types derive
+ // from this type, that we can
+ BOOL IsZeroSized() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_bFlags & e_ZERO_SIZED) == e_ZERO_SIZED;
+ }
+
+ BOOL HasExplicitSize() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_bFlags & e_HAS_EXPLICIT_SIZE) == e_HAS_EXPLICIT_SIZE;
+ }
+
+ DWORD GetPackingSize() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_cbPackingSize;
+ }
+
+#ifdef FEATURE_HFA
+ bool IsNativeHFA()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_bFlags & (e_R4_HFA | e_R8_HFA)) != 0;
+ }
+
+ CorElementType GetNativeHFAType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (IsNativeHFA())
+ return (m_bFlags & e_R4_HFA) ? ELEMENT_TYPE_R4 : ELEMENT_TYPE_R8;
+ return ELEMENT_TYPE_END;
+ }
+#endif
+
+ private:
+ void SetIsBlittable(BOOL isBlittable)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_bFlags = isBlittable ? (m_bFlags | e_BLITTABLE)
+ : (m_bFlags & ~e_BLITTABLE);
+ }
+
+ void SetIsManagedSequential(BOOL isManagedSequential)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_bFlags = isManagedSequential ? (m_bFlags | e_MANAGED_SEQUENTIAL)
+ : (m_bFlags & ~e_MANAGED_SEQUENTIAL);
+ }
+
+ void SetIsZeroSized(BOOL isZeroSized)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_bFlags = isZeroSized ? (m_bFlags | e_ZERO_SIZED)
+ : (m_bFlags & ~e_ZERO_SIZED);
+ }
+
+ void SetHasExplicitSize(BOOL hasExplicitSize)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_bFlags = hasExplicitSize ? (m_bFlags | e_HAS_EXPLICIT_SIZE)
+ : (m_bFlags & ~e_HAS_EXPLICIT_SIZE);
+ }
+
+#ifdef FEATURE_HFA
+ void SetNativeHFAType(CorElementType hfaType)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_bFlags |= (hfaType == ELEMENT_TYPE_R4) ? e_R4_HFA : e_R8_HFA;
+ }
+#endif
+};
+
+
+
+//
+// This structure is used only when the classloader is building the interface map. Before the class
+// is resolved, the EEClass contains an array of these, which are all interfaces *directly* declared
+// for this class/interface by the metadata - inherited interfaces will not be present if they are
+// not specifically declared.
+//
+// This structure is destroyed after resolving has completed.
+//
+typedef struct
+{
+ // The interface method table; for instantiated interfaces, this is the generic interface
+ MethodTable *m_pMethodTable;
+} BuildingInterfaceInfo_t;
+
+
+//
+// We should not need to touch anything in here once the classes are all loaded, unless we
+// are doing reflection. Try to avoid paging this data structure in.
+//
+
+// Size of hash bitmap for method names
+#define METHOD_HASH_BYTES 8
+
+// Hash table size - prime number
+#define METHOD_HASH_BITS 61
+
+
+// These are some macros for forming fully qualified class names for a class.
+// These are abstracted so that we can decide later if a max length for a
+// class name is acceptable.
+
+// It doesn't make any sense not to have a small but usually quite capable
+// stack buffer to build class names into. Most class names that I can think
+// of would fit in 128 characters, and that's a pretty small amount of stack
+// to use in exchange for not having to new and delete the memory.
+#define DEFAULT_NONSTACK_CLASSNAME_SIZE (MAX_CLASSNAME_LENGTH/4)
+
+#define DefineFullyQualifiedNameForClass() \
+ ScratchBuffer<DEFAULT_NONSTACK_CLASSNAME_SIZE> _scratchbuffer_; \
+ InlineSString<DEFAULT_NONSTACK_CLASSNAME_SIZE> _ssclsname_;
+
+#define DefineFullyQualifiedNameForClassOnStack() \
+ ScratchBuffer<MAX_CLASSNAME_LENGTH> _scratchbuffer_; \
+ InlineSString<MAX_CLASSNAME_LENGTH> _ssclsname_;
+
+#define DefineFullyQualifiedNameForClassW() \
+ InlineSString<DEFAULT_NONSTACK_CLASSNAME_SIZE> _ssclsname_w_;
+
+#define DefineFullyQualifiedNameForClassWOnStack() \
+ InlineSString<MAX_CLASSNAME_LENGTH> _ssclsname_w_;
+
+#define GetFullyQualifiedNameForClassNestedAware(pClass) \
+ pClass->_GetFullyQualifiedNameForClassNestedAware(_ssclsname_).GetUTF8(_scratchbuffer_)
+
+#define GetFullyQualifiedNameForClassNestedAwareW(pClass) \
+ pClass->_GetFullyQualifiedNameForClassNestedAware(_ssclsname_w_).GetUnicode()
+
+#define GetFullyQualifiedNameForClass(pClass) \
+ pClass->_GetFullyQualifiedNameForClass(_ssclsname_).GetUTF8(_scratchbuffer_)
+
+#define GetFullyQualifiedNameForClassW(pClass) \
+ pClass->_GetFullyQualifiedNameForClass(_ssclsname_w_).GetUnicode()
+
+#define GetFullyQualifiedNameForClassW_WinRT(pClass) \
+ pClass->_GetFullyQualifiedNameForClass(_ssclsname_w_).GetUnicode()
+
+#define GetFullyQualifiedNameForClass_WinRT(pClass) \
+ pClass->_GetFullyQualifiedNameForClass(_ssclsname_).GetUTF8(_scratchbuffer_)
+
+// Structure containing EEClass fields used by a minority of EEClass instances. This separation allows us to
+// save memory and improve the density of accessed fields in the EEClasses themselves. This class is reached
+// via the m_rpOptionalFields field EEClass (use the GetOptionalFields() accessor rather than the field
+// itself).
+class EEClassOptionalFields
+{
+ // All fields here are intentionally private. Use the corresponding accessor on EEClass instead (this
+ // makes it easier to add and remove fields from the optional section in the future). We make exceptions
+ // for MethodTableBuilder and NativeImageDumper, which need raw field-level access.
+ friend class EEClass;
+ friend class MethodTableBuilder;
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+
+#ifdef BINDER
+ friend class MdilModule;
+ friend class CompactTypeBuilder;
+#endif
+
+ //
+ // GENERICS RELATED FIELDS.
+ //
+
+ // If IsSharedByGenericInstantiations(), layout of handle dictionary for generic type
+ // (the last dictionary pointed to from PerInstInfo). Otherwise NULL.
+ PTR_DictionaryLayout m_pDictLayout;
+
+ // Variance info for each type parameter (gpNonVariant, gpCovariant, or gpContravariant)
+ // If NULL, this type has no type parameters that are co/contravariant
+ BYTE* m_pVarianceInfo;
+
+ //
+ // COM RELATED FIELDS.
+ //
+
+#ifdef FEATURE_COMINTEROP
+ SparseVTableMap *m_pSparseVTableMap;
+
+ TypeHandle m_pCoClassForIntf; // @TODO: Coclass for an interface
+
+#ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+ // Points to activation information if the type is an activatable COM/WinRT class.
+ ClassFactoryBase *m_pClassFactory;
+#endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+
+ WinMDAdapter::RedirectedTypeIndex m_WinRTRedirectedTypeIndex;
+
+#endif // FEATURE_COMINTEROP
+
+ //
+ // MISC FIELDS
+ //
+
+ #define MODULE_NON_DYNAMIC_STATICS ((DWORD)-1)
+ DWORD m_cbModuleDynamicID;
+
+ DWORD m_dwReliabilityContract;
+
+ SecurityProperties m_SecProps;
+
+ // Set default values for optional fields.
+ inline void Init();
+};
+typedef DPTR(EEClassOptionalFields) PTR_EEClassOptionalFields;
+
+//
+// Another mechanism used to reduce the size of the average EEClass instance is the notion of packed fields.
+// This is based on the observation that EEClass has a large number of integer fields that typically contain
+// small values and that are fixed once class layout has completed. We can compact these fields by discarding
+// the leading zero bits (and for small values there'll be a lot of these) and packing the significant data
+// into compact bitfields. This is a dynamic operation (the exact packing used depends on the exact data
+// stored in the fields).
+//
+// The PackedDWORDFields<> class (defined in PackedFields.inl) encapsulates this. It takes one template
+// parameter, the number of fields to pack, and provides operations to get and set those fields until we're
+// happy with the values, at which point it will compact them for us.
+//
+// The packed fields themselves are stored at the end of the EEClass instance (or the LayoutEEClass or the
+// DelegateEEClass etc.) so we can take advantage of the variable sized nature of the fields. We gain nothing for
+// runtime allocated EEClasses (we have to allocate a maximally sized structure for the packed fields because
+// we can't tell at the beginning of EEClass layout what the field values will be). But in the ngen scenario
+// we can compact the fields just prior to saving and only store the portion of the EEClass that is relvant,
+// helping us with our goal of packing all the EEClass instances together as tightly as possible.
+//
+// Since each packed field is now accessed via an array-like index, we give each of those indices a name with
+// the enum below to make the code more readable.
+//
+
+enum EEClassFieldId
+{
+ EEClass_Field_NumInstanceFields = 0,
+ EEClass_Field_NumMethods,
+ EEClass_Field_NumStaticFields,
+ EEClass_Field_NumHandleStatics,
+ EEClass_Field_NumBoxedStatics,
+ EEClass_Field_NonGCStaticFieldBytes,
+ EEClass_Field_NumThreadStaticFields,
+ EEClass_Field_NumHandleThreadStatics,
+ EEClass_Field_NumBoxedThreadStatics,
+ EEClass_Field_NonGCThreadStaticFieldBytes,
+ EEClass_Field_NumNonVirtualSlots,
+ EEClass_Field_COUNT
+};
+
+typedef PackedDWORDFields<EEClass_Field_COUNT> EEClassPackedFields;
+typedef DPTR(EEClassPackedFields) PTR_EEClassPackedFields;
+
+//@GENERICS:
+// For most types there is a one-to-one mapping between MethodTable* and EEClass*
+// However this is not the case for instantiated types where code and representation
+// are shared between compatible instantiations (e.g. List<string> and List<object>)
+// Then a single EEClass structure is shared between multiple MethodTable structures
+// Uninstantiated generic types (e.g. List) have their own EEClass and MethodTable,
+// used (a) as a representative for the generic type itself, (b) for static fields and
+// methods, which aren't present in the instantiations, and (c) to hold some information
+// (e.g. formal instantiations of superclass and implemented interfaces) that is common
+// to all instantiations and isn't stored in the EEClass structures for instantiated types
+//
+//
+// ** NOTE ** NOTE ** NOTE ** NOTE ** NOTE ** NOTE ** NOTE ** NOTE
+//
+// A word about EEClass vs. MethodTable
+// ------------------------------------
+//
+// At compile-time, we are happy to touch both MethodTable and EEClass. However,
+// at runtime we want to restrict ourselves to the MethodTable. This is critical
+// for common code paths, where we want to keep the EEClass out of our working
+// set. For uncommon code paths, like throwing exceptions or strange Contexts
+// issues, it's okay to access the EEClass.
+//
+// To this end, the TypeHandle (CLASS_HANDLE) abstraction is now based on the
+// MethodTable pointer instead of the EEClass pointer. If you are writing a
+// runtime helper that calls GetClass() to access the associated EEClass, please
+// stop to wonder if you are making a mistake.
+//
+// ** NOTE ** NOTE ** NOTE ** NOTE ** NOTE ** NOTE ** NOTE ** NOTE
+
+
+// An code:EEClass is a representation of the part of a managed type that is not used very frequently (it is
+// cold), and thus is segregated from the hot portion (which lives in code:MethodTable). As noted above an
+// it is also the case that EEClass is SHARED among all instantiations of a generic type, so anything that
+// is specific to a paritcular type can not live off the EEClass.
+//
+// From here you can get to
+// code:MethodTable - The representation of the hot portion of a type.
+// code:MethodDesc - The representation of a method
+// code:FieldDesc - The representation of a field.
+//
+// EEClasses hold the following important fields
+// * code:EEClass.m_pMethodTable - Points a MethodTable associated with
+// * code:EEClass.m_pChunks - a list of code:MethodDescChunk which is simply a list of code:MethodDesc
+// which represent the methods.
+// * code:EEClass.m_pFieldDescList - a list of fields in the type.
+//
+class EEClass // DO NOT CREATE A NEW EEClass USING NEW!
+{
+ /************************************
+ * FRIEND FUNCTIONS
+ ************************************/
+ // DO NOT ADD FRIENDS UNLESS ABSOLUTELY NECESSARY
+ // USE ACCESSORS TO READ/WRITE private field members
+
+ // To access bmt stuff
+ friend class MethodTable;
+ friend class MethodTableBuilder;
+ friend class FieldDesc;
+ friend class CheckAsmOffsets;
+ friend class ClrDataAccess;
+#ifdef BINDER
+ friend class MdilModule;
+ friend class CompactTypeBuilder;
+#endif
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+
+ /************************************
+ * PUBLIC INSTANCE METHODS
+ ************************************/
+public:
+
+ DWORD IsSealed()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return IsTdSealed(m_dwAttrClass);
+ }
+
+ inline DWORD IsInterface()
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsTdInterface(m_dwAttrClass);
+ }
+
+ inline DWORD IsAbstract()
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsTdAbstract(m_dwAttrClass);
+ }
+
+ BOOL HasExplicitFieldOffsetLayout()
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsTdExplicitLayout(GetAttrClass()) && HasLayout();
+ }
+
+ BOOL HasSequentialLayout()
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsTdSequentialLayout(GetAttrClass());
+ }
+ BOOL IsSerializable()
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsTdSerializable(GetAttrClass());
+ }
+ BOOL IsBeforeFieldInit()
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsTdBeforeFieldInit(GetAttrClass());
+ }
+
+ DWORD GetProtection()
+ {
+ WRAPPER_NO_CONTRACT;
+ return (m_dwAttrClass & tdVisibilityMask);
+ }
+
+ // class is blittable
+ BOOL IsBlittable();
+
+ //
+ // Security properties accessor methods
+ //
+
+ inline BOOL RequiresLinktimeCheck()
+ {
+ WRAPPER_NO_CONTRACT;
+ PSecurityProperties psp = GetSecurityProperties();
+ return psp && psp->RequiresLinktimeCheck();
+ }
+
+ inline BOOL RequiresLinkTimeCheckHostProtectionOnly()
+ {
+ WRAPPER_NO_CONTRACT;
+ PSecurityProperties psp = GetSecurityProperties();
+ return psp && psp->RequiresLinkTimeCheckHostProtectionOnly();
+ }
+
+ inline BOOL RequiresInheritanceCheck()
+ {
+ WRAPPER_NO_CONTRACT;
+ PSecurityProperties psp = GetSecurityProperties();
+ return psp && psp->RequiresInheritanceCheck();
+ }
+
+ inline BOOL RequiresCasInheritanceCheck()
+ {
+ WRAPPER_NO_CONTRACT;
+ PSecurityProperties psp = GetSecurityProperties();
+ return psp && psp->RequiresCasInheritanceCheck();
+ }
+
+ inline BOOL RequiresNonCasInheritanceCheck()
+ {
+ WRAPPER_NO_CONTRACT;
+ PSecurityProperties psp = GetSecurityProperties();
+ return psp && psp->RequiresNonCasInheritanceCheck();
+ }
+
+
+#ifndef DACCESS_COMPILE
+ void *operator new(size_t size, LoaderHeap* pHeap, AllocMemTracker *pamTracker);
+ void Destruct(MethodTable * pMT);
+
+ static EEClass * CreateMinimalClass(LoaderHeap *pHeap, AllocMemTracker *pamTracker);
+#endif // !DACCESS_COMPILE
+
+#ifdef EnC_SUPPORTED
+ // Add a new method to an already loaded type for EnC
+ static HRESULT AddMethod(MethodTable * pMT, mdMethodDef methodDef, RVA newRVA, MethodDesc **ppMethod);
+
+ // Add a new field to an already loaded type for EnC
+ static HRESULT AddField(MethodTable * pMT, mdFieldDef fieldDesc, EnCFieldDesc **pAddedField);
+ static VOID FixupFieldDescForEnC(MethodTable * pMT, EnCFieldDesc *pFD, mdFieldDef fieldDef);
+#endif // EnC_SUPPORTED
+
+ inline DWORD IsComImport()
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsTdImport(m_dwAttrClass);
+ }
+
+#ifdef FEATURE_PREJIT
+ DWORD GetSize();
+
+ void Save(DataImage *image, MethodTable *pMT);
+ void Fixup(DataImage *image, MethodTable *pMT);
+#endif // FEATURE_PREJIT
+
+ EEClassLayoutInfo *GetLayoutInfo();
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags, MethodTable *pMT);
+#endif
+
+ static CorElementType ComputeInternalCorElementTypeForValueType(MethodTable * pMT);
+
+ /************************************
+ * INSTANCE MEMBER VARIABLES
+ ************************************/
+#ifdef _DEBUG
+public:
+ inline LPCUTF8 GetDebugClassName ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_szDebugClassName;
+ }
+ inline void SetDebugClassName (LPCUTF8 szDebugClassName)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_szDebugClassName = szDebugClassName;
+ }
+
+ /*
+ * Controls debugging breaks and output if a method class
+ * is mentioned in the registry ("BreakOnClassBuild")
+ * Method layout within this class can cause a debug
+ * break by setting "BreakOnMethodName". Not accessible
+ * outside the class.
+ */
+
+#endif // _DEBUG
+
+#ifdef FEATURE_COMINTEROP
+ /*
+ * Used to map MethodTable slots to VTable slots
+ */
+ inline SparseVTableMap* GetSparseCOMInteropVTableMap ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return HasOptionalFields() ? GetOptionalFields()->m_pSparseVTableMap : NULL;
+ }
+ inline void SetSparseCOMInteropVTableMap (SparseVTableMap *map)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(HasOptionalFields());
+ GetOptionalFields()->m_pSparseVTableMap = map;
+ }
+#endif // FEATURE_COMINTEROP
+
+public:
+#ifndef DACCESS_COMPILE
+#ifdef MDIL
+ void WriteCompactLayout(ICompactLayoutWriter *, ZapImage *);
+ HRESULT WriteCompactLayoutHelper(ICompactLayoutWriter *);
+ HRESULT WriteCompactLayoutInterfaces(ICompactLayoutWriter *);
+ HRESULT WriteCompactLayoutInterfaceImpls(ICompactLayoutWriter *);
+ HRESULT WriteCompactLayoutFields(ICompactLayoutWriter *);
+ HRESULT WriteCompactLayoutMethods(ICompactLayoutWriter *);
+ HRESULT WriteCompactLayoutMethodImpls(ICompactLayoutWriter *);
+ HRESULT WriteCompactLayoutTypeFlags(ICompactLayoutWriter *pICLW);
+ HRESULT WriteCompactLayoutSpecialType(ICompactLayoutWriter *pICLW);
+#endif // MDIL
+#endif
+ /*
+ * Maintain back pointer to statcally hot portion of EEClass.
+ * For an EEClass representing multiple instantiations of a generic type, this is the method table
+ * for the first instantiation requested and is the only one containing entries for non-virtual instance methods
+ * (i.e. non-vtable entries).
+ */
+
+ // Note that EEClass structures may be shared between generic instantiations
+ // (see IsSharedByGenericInstantiations). In these cases EEClass::GetMethodTable
+ // will return the method table pointer corresponding to the "canonical"
+ // instantiation, as defined in typehandle.h.
+ //
+ inline MethodTable* GetMethodTable()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return m_pMethodTable;
+ }
+
+ // DO NOT ADD ANY ASSERTS TO THIS METHOD.
+ // DO NOT USE THIS METHOD.
+ // Yes folks, for better or worse the debugger pokes supposed object addresses
+ // to try to see if objects are valid, possibly firing an AccessViolation or worse,
+ // and then catches the AV and reports a failure to the debug client. This makes
+ // the debugger slightly more robust should any corrupted object references appear
+ // in a session. Thus it is "correct" behaviour for this to AV when used with
+ // an invalid object pointer, and incorrect behaviour for it to
+ // assert.
+ inline PTR_MethodTable GetMethodTableWithPossibleAV()
+ {
+ CANNOT_HAVE_CONTRACT;
+ SUPPORTS_DAC;
+
+ return m_pMethodTable;
+ }
+#ifndef DACCESS_COMPILE
+#ifdef FEATURE_REMOTING
+ inline void SetMethodTableForTransparentProxy(MethodTable* pMT)
+ {
+ LIMITED_METHOD_CONTRACT;
+ // Transparent proxy class' true method table
+ // is replaced by a global thunk table
+
+ _ASSERTE(pMT->IsTransparentProxy() &&
+ m_pMethodTable->IsTransparentProxy());
+
+ IBCLOG(LogEEClassCOWTableAccess(GetMethodTable()));
+
+ m_pMethodTable = pMT;
+ }
+#endif
+
+ inline void SetMethodTable(MethodTable* pMT)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pMethodTable = pMT;
+ }
+#endif // !DACCESS_COMPILE
+
+ /*
+ * Number of fields in the class, including inherited fields.
+ * Does not include fields added from EnC.
+ */
+ inline WORD GetNumInstanceFields()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return (WORD)GetPackableField(EEClass_Field_NumInstanceFields);
+ }
+
+ inline void SetNumInstanceFields (WORD wNumInstanceFields)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetPackableField(EEClass_Field_NumInstanceFields, wNumInstanceFields);
+ }
+
+ /*
+ * Number of static fields declared in this class.
+ * Implementation Note: Static values are laid out at the end of the MethodTable vtable.
+ */
+ inline WORD GetNumStaticFields()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return (WORD)GetPackableField(EEClass_Field_NumStaticFields);
+ }
+ inline void SetNumStaticFields (WORD wNumStaticFields)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetPackableField(EEClass_Field_NumStaticFields, wNumStaticFields);
+ }
+
+ inline WORD GetNumThreadStaticFields()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return (WORD)GetPackableField(EEClass_Field_NumThreadStaticFields);
+ }
+
+ inline void SetNumThreadStaticFields (WORD wNumThreadStaticFields)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetPackableField(EEClass_Field_NumThreadStaticFields, wNumThreadStaticFields);
+ }
+
+ // Statics are stored in a big chunk inside the module
+
+ inline DWORD GetModuleDynamicID()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return HasOptionalFields() ? GetOptionalFields()->m_cbModuleDynamicID : MODULE_NON_DYNAMIC_STATICS;
+ }
+
+ inline void SetModuleDynamicID(DWORD cbModuleDynamicID)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(HasOptionalFields());
+ GetOptionalFields()->m_cbModuleDynamicID = cbModuleDynamicID;
+ }
+
+ /*
+ * Difference between the InterfaceMap ptr and Vtable in the
+ * MethodTable used to indicate the number of static bytes
+ * Now interfaceMap ptr can be optional hence we store it here
+ */
+ inline DWORD GetNonGCRegularStaticFieldBytes()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return GetPackableField(EEClass_Field_NonGCStaticFieldBytes);
+ }
+ inline void SetNonGCRegularStaticFieldBytes (DWORD cbNonGCStaticFieldBytes)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetPackableField(EEClass_Field_NonGCStaticFieldBytes, cbNonGCStaticFieldBytes);
+ }
+
+ inline DWORD GetNonGCThreadStaticFieldBytes()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return GetPackableField(EEClass_Field_NonGCThreadStaticFieldBytes);
+ }
+ inline void SetNonGCThreadStaticFieldBytes (DWORD cbNonGCStaticFieldBytes)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetPackableField(EEClass_Field_NonGCThreadStaticFieldBytes, cbNonGCStaticFieldBytes);
+ }
+
+ inline WORD GetNumNonVirtualSlots()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (WORD)GetPackableField(EEClass_Field_NumNonVirtualSlots);
+ }
+ inline void SetNumNonVirtualSlots(WORD wNumNonVirtualSlots)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetPackableField(EEClass_Field_NumNonVirtualSlots, wNumNonVirtualSlots);
+ }
+
+ inline BOOL IsEquivalentType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_VMFlags & VMFLAG_IS_EQUIVALENT_TYPE;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ inline void SetIsEquivalentType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_VMFlags |= VMFLAG_IS_EQUIVALENT_TYPE;
+ }
+#endif
+
+ /*
+ * Number of static handles allocated
+ */
+ inline WORD GetNumHandleRegularStatics ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (WORD)GetPackableField(EEClass_Field_NumHandleStatics);
+ }
+ inline void SetNumHandleRegularStatics (WORD wNumHandleRegularStatics)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetPackableField(EEClass_Field_NumHandleStatics, wNumHandleRegularStatics);
+ }
+
+ /*
+ * Number of static handles allocated for ThreadStatics
+ */
+ inline WORD GetNumHandleThreadStatics ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (WORD)GetPackableField(EEClass_Field_NumHandleThreadStatics);
+ }
+ inline void SetNumHandleThreadStatics (WORD wNumHandleThreadStatics)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetPackableField(EEClass_Field_NumHandleThreadStatics, wNumHandleThreadStatics);
+ }
+
+ /*
+ * Number of boxed statics allocated
+ */
+ inline WORD GetNumBoxedRegularStatics ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (WORD)GetPackableField(EEClass_Field_NumBoxedStatics);
+ }
+ inline void SetNumBoxedRegularStatics (WORD wNumBoxedRegularStatics)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetPackableField(EEClass_Field_NumBoxedStatics, wNumBoxedRegularStatics);
+ }
+
+ /*
+ * Number of boxed statics allocated for ThreadStatics
+ */
+ inline WORD GetNumBoxedThreadStatics ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (WORD)GetPackableField(EEClass_Field_NumBoxedThreadStatics);
+ }
+ inline void SetNumBoxedThreadStatics (WORD wNumBoxedThreadStatics)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetPackableField(EEClass_Field_NumBoxedThreadStatics, wNumBoxedThreadStatics);
+ }
+
+ /*
+ * Number of bytes to subract from code:MethodTable::GetBaseSize() to get the actual number of bytes
+ * of instance fields stored in the object on the GC heap.
+ */
+ inline DWORD GetBaseSizePadding()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_cbBaseSizePadding;
+ }
+ inline void SetBaseSizePadding(DWORD dwPadding)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(FitsIn<BYTE>(dwPadding));
+ m_cbBaseSizePadding = static_cast<BYTE>(dwPadding);
+ }
+
+ inline DWORD GetUnboxedNumInstanceFieldBytes()
+ {
+ DWORD cbBoxedSize = GetMethodTable()->GetNumInstanceFieldBytes();
+
+ _ASSERTE(GetMethodTable()->IsValueType() || GetMethodTable()->IsEnum());
+ return cbBoxedSize;
+ }
+
+
+ /*
+ * Pointer to a list of FieldDescs declared in this class
+ * There are (m_wNumInstanceFields - GetParentClass()->m_wNumInstanceFields + m_wNumStaticFields) entries
+ * in this array
+ */
+#ifdef FEATURE_PREJIT
+ static DWORD FieldDescListSize(MethodTable * pMT);
+#endif
+
+ inline PTR_FieldDesc GetFieldDescList()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ // Careful about using this method. If it's possible that fields may have been added via EnC, then
+ // must use the FieldDescIterator as any fields added via EnC won't be in the raw list
+ return m_pFieldDescList.GetValueMaybeNull(PTR_HOST_MEMBER_TADDR(EEClass, this, m_pFieldDescList));
+ }
+
+ PTR_FieldDesc GetFieldDescByIndex(DWORD fieldIndex);
+
+#ifndef DACCESS_COMPILE
+ inline void SetFieldDescList (FieldDesc* pFieldDescList)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pFieldDescList.SetValue(PTR_HOST_MEMBER_TADDR(EEClass, this, m_pFieldDescList), pFieldDescList);
+ }
+#endif // !DACCESS_COMPILE
+
+ inline WORD GetNumMethods()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (WORD)GetPackableField(EEClass_Field_NumMethods);
+ }
+ inline void SetNumMethods (WORD wNumMethods)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetPackableField(EEClass_Field_NumMethods, wNumMethods);
+ }
+
+ /*
+ * Cached metadata for this class (GetTypeDefProps)
+ */
+ inline DWORD GetAttrClass()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwAttrClass;
+ }
+ inline void SetAttrClass (DWORD dwAttrClass)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_dwAttrClass = dwAttrClass;
+ }
+
+
+#ifdef FEATURE_COMINTEROP
+ inline DWORD IsComClassInterface()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_VMFlags & VMFLAG_HASCOCLASSATTRIB);
+ }
+ inline VOID SetIsComClassInterface()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_VMFlags |= VMFLAG_HASCOCLASSATTRIB;
+ }
+ inline void SetComEventItfType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(IsInterface());
+ m_VMFlags |= VMFLAG_COMEVENTITFMASK;
+ }
+ // class is a special COM event interface
+ inline BOOL IsComEventItfType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_VMFlags & VMFLAG_COMEVENTITFMASK);
+ }
+#endif // FEATURE_COMINTEROP
+
+#ifdef _DEBUG
+ inline DWORD IsDestroyed()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_wAuxFlags & AUXFLAG_DESTROYED);
+ }
+#endif
+
+ inline BOOL IsCritical()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(HasCriticalTransparentInfo());
+ return (m_VMFlags & VMFLAG_TRANSPARENCY_MASK) != VMFLAG_TRANSPARENCY_TRANSPARENT
+ && !IsAllTransparent();
+ }
+
+ inline BOOL IsTreatAsSafe()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(HasCriticalTransparentInfo());
+ return (m_VMFlags & VMFLAG_TRANSPARENCY_MASK) == VMFLAG_TRANSPARENCY_ALLCRITICAL_TAS ||
+ (m_VMFlags & VMFLAG_TRANSPARENCY_MASK) == VMFLAG_TRANSPARENCY_TAS_NOTCRITICAL
+#ifndef FEATURE_CORECLR
+ || (m_VMFlags & VMFLAG_TRANSPARENCY_MASK) == VMFLAG_TRANSPARENCY_CRITICAL_TAS
+#endif // !FEATURE_CORECLR;
+ ;
+ }
+
+ inline BOOL IsAllTransparent()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(HasCriticalTransparentInfo());
+ return (m_VMFlags & VMFLAG_TRANSPARENCY_MASK) == VMFLAG_TRANSPARENCY_ALL_TRANSPARENT;
+ }
+
+ inline BOOL IsAllCritical()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(HasCriticalTransparentInfo());
+ return (m_VMFlags & VMFLAG_TRANSPARENCY_MASK) == VMFLAG_TRANSPARENCY_ALLCRITICAL
+ || (m_VMFlags & VMFLAG_TRANSPARENCY_MASK) == VMFLAG_TRANSPARENCY_ALLCRITICAL_TAS;
+ }
+
+ inline BOOL HasCriticalTransparentInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_VMFlags & VMFLAG_TRANSPARENCY_MASK) != VMFLAG_TRANSPARENCY_UNKNOWN;
+ }
+
+ void SetCriticalTransparentInfo(
+#ifndef FEATURE_CORECLR
+ BOOL fIsCritical,
+#endif // !FEATURE_CORECLR
+ BOOL fIsTreatAsSafe,
+ BOOL fIsAllTransparent,
+ BOOL fIsAllCritical)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // TAS wihtout critical doesn't make sense - although it was allowed in the v2 desktop model,
+ // so we need to allow it for compatibility reasons on the desktop.
+#ifdef FEATURE_CORECLR
+ _ASSERTE(!fIsTreatAsSafe || fIsAllCritical);
+#endif // FEATURE_CORECLR
+
+ //if nothing is set, then we're transparent.
+ unsigned flags = VMFLAG_TRANSPARENCY_TRANSPARENT;
+
+ if (fIsAllTransparent)
+ {
+ flags = VMFLAG_TRANSPARENCY_ALL_TRANSPARENT;
+ }
+ else if (fIsAllCritical)
+ {
+ flags = fIsTreatAsSafe ? VMFLAG_TRANSPARENCY_ALLCRITICAL_TAS :
+ VMFLAG_TRANSPARENCY_ALLCRITICAL;
+ }
+#ifndef FEATURE_CORECLR
+ else if (fIsCritical)
+ {
+ flags = fIsTreatAsSafe ? VMFLAG_TRANSPARENCY_CRITICAL_TAS :
+ VMFLAG_TRANSPARENCY_CRITICAL;
+ }
+#endif // !FEATURE_CORECLR
+ else
+ {
+ flags = fIsTreatAsSafe ? VMFLAG_TRANSPARENCY_TAS_NOTCRITICAL :
+ VMFLAG_TRANSPARENCY_TRANSPARENT;
+ }
+
+ FastInterlockOr(EnsureWritablePages(&m_VMFlags), flags);
+
+ _ASSERTE(HasCriticalTransparentInfo());
+ }
+ inline DWORD IsUnsafeValueClass()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_VMFlags & VMFLAG_UNSAFEVALUETYPE);
+ }
+
+
+private:
+ inline void SetUnsafeValueClass()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_VMFlags |= VMFLAG_UNSAFEVALUETYPE;
+ }
+
+public:
+ inline BOOL HasNoGuid()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_VMFlags & VMFLAG_NO_GUID);
+ }
+ inline void SetHasNoGuid()
+ {
+ WRAPPER_NO_CONTRACT;
+ FastInterlockOr(EnsureWritablePages(&m_VMFlags), VMFLAG_NO_GUID);
+ }
+
+public:
+
+ inline void SetDoesNotHaveSuppressUnmanagedCodeAccessAttr()
+ {
+ WRAPPER_NO_CONTRACT;
+ FastInterlockOr(EnsureWritablePages(&m_VMFlags),VMFLAG_NOSUPPRESSUNMGDCODEACCESS);
+ }
+
+ inline BOOL HasSuppressUnmanagedCodeAccessAttr()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return !(m_VMFlags & VMFLAG_NOSUPPRESSUNMGDCODEACCESS);
+ }
+
+ inline BOOL HasRemotingProxyAttribute()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_VMFlags & VMFLAG_REMOTING_PROXY_ATTRIBUTE;
+ }
+ inline void SetHasRemotingProxyAttribute()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_VMFlags |= (DWORD)VMFLAG_REMOTING_PROXY_ATTRIBUTE;
+ }
+ inline BOOL IsAlign8Candidate()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_VMFlags & VMFLAG_PREFER_ALIGN8);
+ }
+ inline void SetAlign8Candidate()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_VMFlags |= VMFLAG_PREFER_ALIGN8;
+ }
+#ifdef _DEBUG
+ inline void SetDestroyed()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_wAuxFlags |= AUXFLAG_DESTROYED;
+ }
+#endif
+ inline void SetHasFixedAddressVTStatics()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_VMFlags |= (DWORD) VMFLAG_FIXED_ADDRESS_VT_STATICS;
+ }
+#ifdef FEATURE_COMINTEROP
+ void SetSparseForCOMInterop()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_VMFlags |= (DWORD) VMFLAG_SPARSE_FOR_COMINTEROP;
+ }
+ inline void SetProjectedFromWinRT()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_VMFlags |= (DWORD) VMFLAG_PROJECTED_FROM_WINRT;
+ }
+ inline void SetExportedToWinRT()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_VMFlags |= (DWORD) VMFLAG_EXPORTED_TO_WINRT;
+ }
+ inline void SetMarshalingType(UINT32 mType)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(mType !=0);
+ _ASSERTE((m_VMFlags & VMFLAG_MARSHALINGTYPE_MASK) == 0);
+ switch(mType)
+ {
+ case 1: m_VMFlags |= VMFLAG_MARSHALINGTYPE_INHIBIT;
+ break;
+ case 2: m_VMFlags |= VMFLAG_MARSHALINGTYPE_FREETHREADED;
+ break;
+ case 3: m_VMFlags |= VMFLAG_MARSHALINGTYPE_STANDARD;
+ break;
+ default:
+ _ASSERTE(!"Invalid MarshalingBehaviorAttribute value");
+ }
+ }
+#endif // FEATURE_COMINTEROP
+ inline void SetHasLayout()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_VMFlags |= (DWORD) VMFLAG_HASLAYOUT; //modified before the class is published
+ }
+ inline void SetHasOverLayedFields()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_VMFlags |= VMFLAG_HASOVERLAYEDFIELDS;
+ }
+ inline void SetIsNested()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_VMFlags |= VMFLAG_ISNESTED;
+ }
+
+#ifdef FEATURE_READYTORUN
+ inline BOOL HasLayoutDependsOnOtherModules()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_VMFlags & VMFLAG_LAYOUT_DEPENDS_ON_OTHER_MODULES;
+ }
+
+ inline void SetHasLayoutDependsOnOtherModules()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_VMFlags |= VMFLAG_LAYOUT_DEPENDS_ON_OTHER_MODULES;
+ }
+#endif
+
+ // Is this delegate? Returns false for System.Delegate and System.MulticastDelegate.
+ inline BOOL IsDelegate()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_VMFlags & VMFLAG_DELEGATE;
+ }
+ inline void SetIsDelegate()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_VMFlags |= VMFLAG_DELEGATE;
+ }
+
+ // This is only applicable to interfaces. This method does not
+ // provide correct information for non-interface types.
+ DWORD SomeMethodsRequireInheritanceCheck();
+ void SetSomeMethodsRequireInheritanceCheck();
+
+ BOOL ContainsStackPtr()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_VMFlags & VMFLAG_CONTAINS_STACK_PTR;
+ }
+ void SetContainsStackPtr()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_VMFlags |= (DWORD)VMFLAG_CONTAINS_STACK_PTR;
+ }
+ BOOL HasFixedAddressVTStatics()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_VMFlags & VMFLAG_FIXED_ADDRESS_VT_STATICS;
+ }
+#ifdef FEATURE_COMINTEROP
+ BOOL IsSparseForCOMInterop()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_VMFlags & VMFLAG_SPARSE_FOR_COMINTEROP;
+ }
+ BOOL IsProjectedFromWinRT()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_VMFlags & VMFLAG_PROJECTED_FROM_WINRT;
+ }
+ BOOL IsExportedToWinRT()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_VMFlags & VMFLAG_EXPORTED_TO_WINRT;
+ }
+ BOOL IsMarshalingTypeSet()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_VMFlags & VMFLAG_MARSHALINGTYPE_MASK);
+ }
+ BOOL IsMarshalingTypeFreeThreaded()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ((m_VMFlags & VMFLAG_MARSHALINGTYPE_MASK) == VMFLAG_MARSHALINGTYPE_FREETHREADED);
+ }
+ BOOL IsMarshalingTypeInhibit()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ((m_VMFlags & VMFLAG_MARSHALINGTYPE_MASK) == VMFLAG_MARSHALINGTYPE_INHIBIT);
+ }
+ BOOL IsMarshalingTypeStandard()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ((m_VMFlags & VMFLAG_MARSHALINGTYPE_MASK) == VMFLAG_MARSHALINGTYPE_STANDARD);
+ }
+#endif // FEATURE_COMINTEROP
+ BOOL HasLayout()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_VMFlags & VMFLAG_HASLAYOUT;
+ }
+ BOOL HasOverLayedField()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_VMFlags & VMFLAG_HASOVERLAYEDFIELDS;
+ }
+ BOOL IsNested()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_VMFlags & VMFLAG_ISNESTED;
+ }
+ BOOL HasFieldsWhichMustBeInited()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_VMFlags & VMFLAG_HAS_FIELDS_WHICH_MUST_BE_INITED);
+ }
+ void SetHasFieldsWhichMustBeInited()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_VMFlags |= (DWORD)VMFLAG_HAS_FIELDS_WHICH_MUST_BE_INITED;
+ }
+#ifdef FEATURE_REMOTING
+ DWORD CannotBeBlittedByObjectCloner()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_VMFlags & VMFLAG_CANNOT_BE_BLITTED_BY_OBJECT_CLONER);
+ }
+ void SetCannotBeBlittedByObjectCloner()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_VMFlags |= (DWORD)VMFLAG_CANNOT_BE_BLITTED_BY_OBJECT_CLONER;
+ }
+#else
+ void SetCannotBeBlittedByObjectCloner()
+ {
+ /* no op */
+ }
+#endif
+#ifdef FEATURE_LEGACYNETCF
+ DWORD IsTypeValidOnNetCF()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_VMFlags & VMFLAG_TYPE_VALID_ON_NETCF);
+ }
+ void SetTypeValidOnNetCF()
+ {
+ WRAPPER_NO_CONTRACT;
+ FastInterlockOr(EnsureWritablePages(&m_VMFlags), VMFLAG_TYPE_VALID_ON_NETCF);
+ }
+#endif
+ DWORD HasNonPublicFields()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_VMFlags & VMFLAG_HASNONPUBLICFIELDS);
+ }
+ void SetHasNonPublicFields()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_VMFlags |= (DWORD)VMFLAG_HASNONPUBLICFIELDS;
+ }
+ DWORD IsNotTightlyPacked()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_VMFlags & VMFLAG_NOT_TIGHTLY_PACKED);
+ }
+ void SetIsNotTightlyPacked()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_VMFlags |= (DWORD)VMFLAG_NOT_TIGHTLY_PACKED;
+ }
+ DWORD ContainsMethodImpls()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_VMFlags & VMFLAG_CONTAINS_METHODIMPLS);
+ }
+ void SetContainsMethodImpls()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_VMFlags |= (DWORD)VMFLAG_CONTAINS_METHODIMPLS;
+ }
+
+
+ BOOL IsManagedSequential();
+
+ BOOL HasExplicitSize();
+
+ static void GetBestFitMapping(MethodTable * pMT, BOOL *pfBestFitMapping, BOOL *pfThrowOnUnmappableChar);
+
+ /*
+ * Security attributes for the class are stored here. Do not update this field after the
+ * class is constructed without also updating the enum_flag_NoSecurityProperties on the
+ * methodtable.
+ */
+ inline SecurityProperties* GetSecurityProperties()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return HasOptionalFields() ? &GetOptionalFields()->m_SecProps : NULL;
+ }
+
+
+ /*
+ * The CorElementType for this class (most classes = ELEMENT_TYPE_CLASS)
+ */
+public:
+ // This is what would be used in the calling convention for this type.
+ CorElementType GetInternalCorElementType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return CorElementType(m_NormType);
+ }
+ void SetInternalCorElementType (CorElementType _NormType)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_NormType = static_cast<BYTE>(_NormType);
+ }
+
+ /*
+ * Chain of MethodDesc chunks for the MethodTable
+ */
+public:
+ inline PTR_MethodDescChunk GetChunks();
+
+#ifndef DACCESS_COMPILE
+ inline void SetChunks (MethodDescChunk* pChunks)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pChunks.SetValueMaybeNull(PTR_HOST_MEMBER_TADDR(EEClass, this, m_pChunks), pChunks);
+ }
+#endif // !DACCESS_COMPILE
+ void AddChunk (MethodDescChunk* pNewChunk);
+
+ void AddChunkIfItHasNotBeenAdded (MethodDescChunk* pNewChunk);
+
+ inline PTR_GuidInfo GetGuidInfo()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pGuidInfo;
+ }
+
+ inline void SetGuidInfo(GuidInfo* pGuidInfo)
+ {
+ WRAPPER_NO_CONTRACT;
+ #ifndef DACCESS_COMPILE
+ *EnsureWritablePages(&m_pGuidInfo) = pGuidInfo;
+ #endif // DACCESS_COMPILE
+ }
+
+ // Cached class level reliability contract info, see ConstrainedExecutionRegion.cpp for details.
+ DWORD GetReliabilityContract();
+
+ inline void SetReliabilityContract(DWORD dwValue)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(HasOptionalFields());
+ GetOptionalFields()->m_dwReliabilityContract = dwValue;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ inline TypeHandle GetCoClassForInterface()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(HasOptionalFields());
+ return GetOptionalFields()->m_pCoClassForIntf;
+ }
+
+ inline void SetCoClassForInterface(TypeHandle th)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(HasOptionalFields());
+ *EnsureWritablePages(&GetOptionalFields()->m_pCoClassForIntf) = th;
+ }
+
+ inline WinMDAdapter::RedirectedTypeIndex GetWinRTRedirectedTypeIndex()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return HasOptionalFields() ? GetOptionalFields()->m_WinRTRedirectedTypeIndex
+ : WinMDAdapter::RedirectedTypeIndex_Invalid;
+ }
+
+ inline void SetWinRTRedirectedTypeIndex(WinMDAdapter::RedirectedTypeIndex index)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(HasOptionalFields());
+ _ASSERTE(index != WinMDAdapter::RedirectedTypeIndex_Invalid);
+ GetOptionalFields()->m_WinRTRedirectedTypeIndex = index;
+ }
+#endif // FEATURE_COMINTEROP
+
+ inline UINT32 GetNativeSize()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_cbNativeSize;
+ }
+ static UINT32 GetOffsetOfNativeSize()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (UINT32)(offsetof(EEClass, m_cbNativeSize));
+ }
+ void SetNativeSize(UINT32 nativeSize)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_cbNativeSize = nativeSize;
+ }
+#ifdef FEATURE_COMINTEROP
+ OBJECTHANDLE GetOHDelegate()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_ohDelegate;
+ }
+ void SetOHDelegate (OBJECTHANDLE _ohDelegate)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_ohDelegate = _ohDelegate;
+ }
+ // Set the COM interface type.
+ CorIfaceAttr GetComInterfaceType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_ComInterfaceType;
+ }
+
+ void SetComInterfaceType(CorIfaceAttr ItfType)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(IsInterface());
+ EnsureWritablePages(this);
+ m_ComInterfaceType = ItfType;
+ }
+
+ inline ComCallWrapperTemplate *GetComCallWrapperTemplate()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pccwTemplate;
+ }
+ inline BOOL SetComCallWrapperTemplate(ComCallWrapperTemplate *pTemplate)
+ {
+ WRAPPER_NO_CONTRACT;
+ return (InterlockedCompareExchangeT(EnsureWritablePages(&m_pccwTemplate), pTemplate, NULL) == NULL);
+ }
+
+#ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+ inline ClassFactoryBase *GetComClassFactory()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return HasOptionalFields() ? GetOptionalFields()->m_pClassFactory : NULL;
+ }
+ inline BOOL SetComClassFactory(ClassFactoryBase *pFactory)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(HasOptionalFields());
+ return (InterlockedCompareExchangeT(EnsureWritablePages(&GetOptionalFields()->m_pClassFactory), pFactory, NULL) == NULL);
+ }
+#endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+#endif // FEATURE_COMINTEROP
+
+
+public:
+ PTR_DictionaryLayout GetDictionaryLayout()
+ {
+ SUPPORTS_DAC;
+ WRAPPER_NO_CONTRACT;
+ return HasOptionalFields() ? GetOptionalFields()->m_pDictLayout : NULL;
+ }
+
+ void SetDictionaryLayout(PTR_DictionaryLayout pLayout)
+ {
+ SUPPORTS_DAC;
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(HasOptionalFields());
+ GetOptionalFields()->m_pDictLayout = pLayout;
+ }
+
+ static CorGenericParamAttr GetVarianceOfTypeParameter(BYTE * pbVarianceInfo, DWORD i)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (pbVarianceInfo == NULL)
+ return gpNonVariant;
+ else
+ return (CorGenericParamAttr) (pbVarianceInfo[i]);
+ }
+
+ CorGenericParamAttr GetVarianceOfTypeParameter(DWORD i)
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetVarianceOfTypeParameter(GetVarianceInfo(), i);
+ }
+
+ BYTE* GetVarianceInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return HasOptionalFields() ? GetOptionalFields()->m_pVarianceInfo : NULL;
+ }
+
+ void SetVarianceInfo(BYTE *pVarianceInfo)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(HasOptionalFields());
+ GetOptionalFields()->m_pVarianceInfo = pVarianceInfo;
+ }
+
+#ifndef BINDER
+ // Check that a signature blob uses type parameters correctly
+ // in accordance with the variance annotations specified by this class
+ // The position parameter indicates the variance of the context we're in
+ // (result type is gpCovariant, argument types are gpContravariant, deeper in a signature
+ // we might be gpNonvariant e.g. in a pointer type or non-variant generic type)
+ static BOOL
+ CheckVarianceInSig(
+ DWORD numGenericArgs,
+ BYTE *pVarianceInfo,
+ Module * pModule,
+ SigPointer sp,
+ CorGenericParamAttr position);
+#endif
+
+
+#if defined(CHECK_APP_DOMAIN_LEAKS) || defined(_DEBUG)
+public:
+ enum{
+ AUXFLAG_APP_DOMAIN_AGILE = 0x00000001,
+ AUXFLAG_CHECK_APP_DOMAIN_AGILE = 0x00000002,
+ AUXFLAG_APP_DOMAIN_AGILITY_DONE = 0x00000004,
+ AUXFLAG_DESTROYED = 0x00000008, // The Destruct() method has already been called on this class
+ };
+
+ inline DWORD GetAuxFlagsRaw()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_wAuxFlags;
+ }
+ inline DWORD* GetAuxFlagsPtr()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (DWORD*)(&m_wAuxFlags);
+ }
+ inline void SetAuxFlags(DWORD flag)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_wAuxFlags |= (WORD)flag;
+ }
+
+ // This flag is set (in a checked build only?) for classes whose
+ // instances are always app domain agile. This can
+ // be either because of type system guarantees or because
+ // the class is explicitly marked.
+ inline BOOL IsAppDomainAgile()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_wAuxFlags & AUXFLAG_APP_DOMAIN_AGILE);
+ }
+ inline void SetAppDomainAgile()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_wAuxFlags |= AUXFLAG_APP_DOMAIN_AGILE;
+ }
+ // This flag is set in a checked build for classes whose
+ // instances may be marked app domain agile, but agility
+ // isn't guaranteed by type safety. The JIT will compile
+ // in extra checks to field assignment on some fields
+ // in such a class.
+ inline BOOL IsCheckAppDomainAgile()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_wAuxFlags & AUXFLAG_CHECK_APP_DOMAIN_AGILE);
+ }
+
+ inline void SetCheckAppDomainAgile()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_wAuxFlags |= AUXFLAG_CHECK_APP_DOMAIN_AGILE;
+ }
+
+ // This flag is set in a checked build to indicate that the
+ // appdomain agility for a class had been set. This is used
+ // for debugging purposes to make sure that we don't allocate
+ // an object before the agility is set.
+ inline BOOL IsAppDomainAgilityDone()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_wAuxFlags & AUXFLAG_APP_DOMAIN_AGILITY_DONE);
+ }
+ inline void SetAppDomainAgilityDone()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_wAuxFlags |= AUXFLAG_APP_DOMAIN_AGILITY_DONE;
+ }
+ //
+ // This predicate checks whether or not the class is "naturally"
+ // app domain agile - that is:
+ // (1) it is in the system domain
+ // (2) all the fields are app domain agile
+ // (3) it has no finalizer
+ //
+ // Or, this also returns true for a proxy type which is allowed
+ // to have cross app domain refs.
+ //
+ inline BOOL IsTypesafeAppDomainAgile()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return IsAppDomainAgile() && !IsCheckAppDomainAgile();
+ }
+ //
+ // This predictate tests whether any instances are allowed
+ // to be app domain agile.
+ //
+ inline BOOL IsNeverAppDomainAgile()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return !IsAppDomainAgile() && !IsCheckAppDomainAgile();
+ }
+ static void SetAppDomainAgileAttribute(MethodTable * pMT);
+
+ static void GetPredefinedAgility(Module *pModule, mdTypeDef td, BOOL *pfIsAgile, BOOL *pfIsCheckAgile);
+#endif // defined(CHECK_APP_DOMAIN_LEAKS) || defined(_DEBUG)
+
+ //-------------------------------------------------------------
+ // CONCRETE DATA LAYOUT
+ //
+ // Although accessed far less frequently than MethodTables, EEClasses are still
+ // pulled into working set, especially at startup. This has motivated several space
+ // optimizations in field layout where each is balanced against the need to access
+ // a particular field efficiently.
+ //
+ // Currently, the following strategy is used:
+ //
+ // - Any field that has a default value for the vast majority of EEClass instances
+ // should be stored in the EEClassOptionalFields (see header comment)
+ //
+ // - Any field that is nearly always a small positive integer and is infrequently
+ // accessed should be in the EEClassPackedFields (see header comment)
+ //
+ // If none of these categories apply - such as for always-meaningful pointer members or
+ // sets of flags - a full field is used. Please avoid adding such members if possible.
+ //-------------------------------------------------------------
+
+ // @TODO: needed for asm code in cgenx86.cpp. Can this enum be private?
+ //
+ // Flags for m_VMFlags
+ //
+public:
+ enum
+ {
+#ifdef FEATURE_READYTORUN
+ VMFLAG_LAYOUT_DEPENDS_ON_OTHER_MODULES = 0x00000001,
+#endif
+ VMFLAG_DELEGATE = 0x00000002,
+
+ //Desktop
+ // --------------
+ //Flag | All Transparent | Critical | All Critical | TreatAsSafe
+ //TRANSPARENT | 0 | 0 | 0 | 0
+ //ALL_TRANSPARENT | 1 | 0 | 0 | 0
+ //CRITICAL | 0 | 1 | 0 | 0
+ //TAS_CRITICAL | 0 | 1 | 0 | 1
+ //ALLCRITICAL | 0 | 0 | 1 | 0
+ //ALLCRITICAL_TAS | 0 | 0 | 1 | 1
+ //TAS_NOTCRITICAL | 0 | 0 | 0 | 1
+ //
+ //
+ //On CoreCLR TAS implies Critical and "All Critical" and "Critical" are the same thing.
+ //CoreCLR
+ // --------------
+ //Flag | All Transparent | Critical | TreatAsSafe
+ //TRANSPARENT | 0 | 0 | 0
+ //ALL_TRANSPARENT | 1 | 0 | 0
+ //CRITICAL | 0 | 1 | 0
+ //TAS_CRITICAL | 0 | 1 | 1
+ VMFLAG_TRANSPARENCY_MASK = 0x0000001c,
+ VMFLAG_TRANSPARENCY_UNKNOWN = 0x00000000,
+ VMFLAG_TRANSPARENCY_TRANSPARENT = 0x00000004,
+ VMFLAG_TRANSPARENCY_ALL_TRANSPARENT = 0x00000008,
+ VMFLAG_TRANSPARENCY_CRITICAL = 0x0000000c,
+ VMFLAG_TRANSPARENCY_CRITICAL_TAS = 0x00000010,
+ VMFLAG_TRANSPARENCY_ALLCRITICAL = 0x00000014,
+ VMFLAG_TRANSPARENCY_ALLCRITICAL_TAS = 0x00000018,
+ VMFLAG_TRANSPARENCY_TAS_NOTCRITICAL = 0x0000001c,
+
+ VMFLAG_FIXED_ADDRESS_VT_STATICS = 0x00000020, // Value type Statics in this class will be pinned
+ VMFLAG_HASLAYOUT = 0x00000040,
+ VMFLAG_ISNESTED = 0x00000080,
+#ifdef FEATURE_REMOTING
+ VMFLAG_CANNOT_BE_BLITTED_BY_OBJECT_CLONER = 0x00000100, // This class has GC type fields, or implements ISerializable or has non-Serializable fields
+#endif
+#ifdef FEATURE_LEGACYNETCF
+ VMFLAG_TYPE_VALID_ON_NETCF = 0x00000100, // This type would succesfully load on NetCF
+#endif
+
+ VMFLAG_IS_EQUIVALENT_TYPE = 0x00000200,
+
+ // OVERLAYED is used to detect whether Equals can safely optimize to a bit-compare across the structure.
+ VMFLAG_HASOVERLAYEDFIELDS = 0x00000400,
+
+ // Set this if this class or its parent have instance fields which
+ // must be explicitly inited in a constructor (e.g. pointers of any
+ // kind, gc or native).
+ //
+ // Currently this is used by the verifier when verifying value classes
+ // - it's ok to use uninitialised value classes if there are no
+ // pointer fields in them.
+ VMFLAG_HAS_FIELDS_WHICH_MUST_BE_INITED = 0x00000800,
+
+ VMFLAG_UNSAFEVALUETYPE = 0x00001000,
+
+ VMFLAG_BESTFITMAPPING_INITED = 0x00002000, // VMFLAG_BESTFITMAPPING and VMFLAG_THROWONUNMAPPABLECHAR are valid only if this is set
+ VMFLAG_BESTFITMAPPING = 0x00004000, // BestFitMappingAttribute.Value
+ VMFLAG_THROWONUNMAPPABLECHAR = 0x00008000, // BestFitMappingAttribute.ThrowOnUnmappableChar
+
+ VMFLAG_NOSUPPRESSUNMGDCODEACCESS = 0x00010000,
+ VMFLAG_NO_GUID = 0x00020000,
+ VMFLAG_HASNONPUBLICFIELDS = 0x00040000,
+ VMFLAG_REMOTING_PROXY_ATTRIBUTE = 0x00080000,
+ VMFLAG_CONTAINS_STACK_PTR = 0x00100000,
+ VMFLAG_PREFER_ALIGN8 = 0x00200000, // Would like to have 8-byte alignment
+ VMFLAG_METHODS_REQUIRE_INHERITANCE_CHECKS = 0x00400000,
+
+#ifdef FEATURE_COMINTEROP
+ VMFLAG_SPARSE_FOR_COMINTEROP = 0x00800000,
+ // interfaces may have a coclass attribute
+ VMFLAG_HASCOCLASSATTRIB = 0x01000000,
+ VMFLAG_COMEVENTITFMASK = 0x02000000, // class is a special COM event interface
+ VMFLAG_PROJECTED_FROM_WINRT = 0x04000000,
+ VMFLAG_EXPORTED_TO_WINRT = 0x08000000,
+#endif // FEATURE_COMINTEROP
+
+ // This one indicates that the fields of the valuetype are
+ // not tightly packed and is used to check whether we can
+ // do bit-equality on value types to implement ValueType::Equals.
+ // It is not valid for classes, and only matters if ContainsPointer
+ // is false.
+ VMFLAG_NOT_TIGHTLY_PACKED = 0x10000000,
+
+ // True if methoddesc on this class have any real (non-interface) methodimpls
+ VMFLAG_CONTAINS_METHODIMPLS = 0x20000000,
+
+#ifdef FEATURE_COMINTEROP
+ VMFLAG_MARSHALINGTYPE_MASK = 0xc0000000,
+
+ VMFLAG_MARSHALINGTYPE_INHIBIT = 0x40000000,
+ VMFLAG_MARSHALINGTYPE_FREETHREADED = 0x80000000,
+ VMFLAG_MARSHALINGTYPE_STANDARD = 0xc0000000,
+#endif
+ };
+
+public:
+ // C_ASSERTs in Jitinterface.cpp need this to be public to check the offset.
+ // Put it first so the offset rarely changes, which just reduces the number of times we have to fiddle
+ // with the offset.
+ PTR_GuidInfo m_pGuidInfo; // The cached guid inforation for interfaces.
+
+#ifdef _DEBUG
+public:
+ LPCUTF8 m_szDebugClassName;
+ BOOL m_fDebuggingClass;
+#endif
+
+private:
+ // Layout rest of fields below from largest to smallest to lessen the chance of wasting bytes with
+ // compiler injected padding (especially with the difference between pointers and DWORDs on 64-bit).
+ RelativePointer<PTR_EEClassOptionalFields> m_rpOptionalFields;
+
+ // TODO: Remove this field. It is only used by SOS and object validation for stress.
+ PTR_MethodTable m_pMethodTable;
+
+ RelativePointer<PTR_FieldDesc> m_pFieldDescList;
+ RelativePointer<PTR_MethodDescChunk> m_pChunks;
+
+ union
+ {
+ // valid only if EEClass::IsBlittable() or EEClass::HasLayout() is true
+ UINT32 m_cbNativeSize; // size of fixed portion in bytes
+
+#ifdef FEATURE_COMINTEROP
+ // For COM+ wrapper objects that extend an unmanaged class, this field
+ // may contain a delegate to be called to allocate the aggregated
+ // unmanaged class (instead of using CoCreateInstance).
+ OBJECTHANDLE m_ohDelegate;
+
+ // For interfaces this contains the COM interface type.
+ CorIfaceAttr m_ComInterfaceType;
+#endif // FEATURE_COMINTEROP
+ };
+
+#ifdef FEATURE_COMINTEROP
+ ComCallWrapperTemplate *m_pccwTemplate; // points to interop data structures used when this type is exposed to COM
+#endif // FEATURE_COMINTEROP
+
+ DWORD m_dwAttrClass;
+ DWORD m_VMFlags;
+
+ /*
+ * We maintain some auxillary flags in DEBUG or CHECK_APP_DOMAIN_LEAKS builds,
+ * this frees up some bits in m_wVMFlags
+ */
+#if defined(CHECK_APP_DOMAIN_LEAKS) || defined(_DEBUG)
+ WORD m_wAuxFlags;
+#endif
+
+ // NOTE: Following BYTE fields are layed out together so they'll fit within the same DWORD for efficient
+ // structure packing.
+ BYTE m_NormType;
+ BYTE m_fFieldsArePacked; // TRUE iff fields pointed to by GetPackedFields() are in packed state
+ BYTE m_cbFixedEEClassFields; // Count of bytes of normal fields of this instance (EEClass,
+ // LayoutEEClass etc.). Doesn't count bytes of "packed" fields
+ BYTE m_cbBaseSizePadding; // How many bytes of padding are included in BaseSize
+
+public:
+ // EEClass optional field support. Whether a particular EEClass instance has optional fields is determined
+ // at class load time. The entire EEClassOptionalFields structure is allocated if the EEClass has need of
+ // one or more optional fields.
+
+#ifndef DACCESS_COMPILE
+ void AttachOptionalFields(EEClassOptionalFields *pFields)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_rpOptionalFields.IsNull());
+
+ m_rpOptionalFields.SetValue(pFields);
+ }
+#endif // !DACCESS_COMPILE
+
+ bool HasOptionalFields()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return !m_rpOptionalFields.IsNull();
+ }
+
+ PTR_EEClassOptionalFields GetOptionalFields()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_rpOptionalFields.GetValueMaybeNull(PTR_HOST_MEMBER_TADDR(EEClass, this, m_rpOptionalFields));
+ }
+
+private:
+ //
+ // Support for packed fields.
+ //
+
+ // Get pointer to the packed fields structure attached to this instance.
+ PTR_EEClassPackedFields GetPackedFields();
+
+ // Get the value of the given field. Works regardless of whether the field is currently in its packed or
+ // unpacked state.
+ DWORD GetPackableField(EEClassFieldId eField);
+
+ // Set the value of the given field. The field *must* be in the unpacked state for this to be legal (in
+ // practice all packable fields must be initialized during class construction and from then on remain
+ // immutable).
+ void SetPackableField(EEClassFieldId eField, DWORD dwValue);
+
+ //-------------------------------------------------------------
+ // END CONCRETE DATA LAYOUT
+ //-------------------------------------------------------------
+
+
+
+ /************************************
+ * PROTECTED METHODS
+ ************************************/
+protected:
+#ifndef DACCESS_COMPILE
+ /*
+ * Constructor: prevent any other class from doing a new()
+ */
+ EEClass(DWORD cbFixedEEClassFields);
+
+ /*
+ * Destructor: prevent any other class from deleting
+ */
+ ~EEClass()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+#endif // !DACCESS_COMPILE
+
+};
+
+// --------------------------------------------------------------------------------------------
+template <typename Data>
+class FixedCapacityStackingAllocatedUTF8StringHash
+{
+public:
+ // Entry
+ struct HashEntry
+ {
+ HashEntry * m_pNext; // Next item with same bucketed hash value
+ DWORD m_dwHashValue; // Hash value
+ LPCUTF8 m_pKey; // String key
+ Data m_data; // Data
+ };
+
+ HashEntry ** m_pBuckets; // Pointer to first entry for each bucket
+ DWORD m_dwNumBuckets;
+ BYTE * m_pMemory; // Current pointer into preallocated memory for entries
+ BYTE * m_pMemoryStart; // Start pointer of pre-allocated memory fo entries
+
+ INDEBUG(BYTE * m_pDebugEndMemory;)
+
+ FixedCapacityStackingAllocatedUTF8StringHash()
+ : m_pMemoryStart(NULL)
+ { LIMITED_METHOD_CONTRACT; }
+
+ static DWORD
+ GetHashCode(
+ LPCUTF8 szString)
+ { WRAPPER_NO_CONTRACT; return HashStringA(szString); }
+
+ // Throws on error
+ void
+ Init(
+ DWORD dwMaxEntries,
+ StackingAllocator * pAllocator);
+
+ // Insert new entry at head of list
+ void
+ Insert(
+ LPCUTF8 pszName,
+ const Data & data);
+
+ // Return the first matching entry in the list, or NULL if there is no such entry
+ HashEntry *
+ Lookup(
+ LPCUTF8 pszName);
+
+ // Return the next matching entry in the list, or NULL if there is no such entry.
+ HashEntry *
+ FindNext(
+ HashEntry * pEntry);
+};
+
+
+//---------------------------------------------------------------------------------------
+//
+class LayoutEEClass : public EEClass
+{
+public:
+ EEClassLayoutInfo m_LayoutInfo;
+
+#ifndef DACCESS_COMPILE
+ LayoutEEClass() : EEClass(sizeof(LayoutEEClass))
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifdef _DEBUG
+ FillMemory(&m_LayoutInfo, sizeof(m_LayoutInfo), 0xcc);
+#endif
+ }
+#endif // !DACCESS_COMPILE
+};
+
+class UMThunkMarshInfo;
+
+#ifdef FEATURE_COMINTEROP
+struct ComPlusCallInfo;
+#endif // FEATURE_COMINTEROP
+
+class DelegateEEClass : public EEClass
+{
+public:
+ PTR_Stub m_pStaticCallStub;
+ PTR_Stub m_pInstRetBuffCallStub;
+ PTR_MethodDesc m_pInvokeMethod;
+ PTR_Stub m_pMultiCastInvokeStub;
+ UMThunkMarshInfo* m_pUMThunkMarshInfo;
+ PTR_MethodDesc m_pBeginInvokeMethod;
+ PTR_MethodDesc m_pEndInvokeMethod;
+ Volatile<PCODE> m_pMarshalStub;
+
+#ifdef FEATURE_COMINTEROP
+ ComPlusCallInfo *m_pComPlusCallInfo;
+#endif // FEATURE_COMINTEROP
+
+ //
+ // Ngened IL stub MethodDescs. Fixed up, wrapped with code:Stub, and installed to
+ // m_pMarshalStub (forward) or m_pUMThunkMarshInfo (reverse) when first needed.
+ //
+ MethodDesc* m_pForwardStubMD; // marshaling stub for calls to unmanaged code
+ MethodDesc* m_pReverseStubMD; // marshaling stub for calls from unmanaged code
+
+#ifndef DACCESS_COMPILE
+ DelegateEEClass() : EEClass(sizeof(DelegateEEClass))
+ {
+ LIMITED_METHOD_CONTRACT;
+ // Note: Memory allocated on loader heap is zero filled
+ }
+
+ // We need a LoaderHeap that lives at least as long as the DelegateEEClass, but ideally no longer
+ LoaderHeap *GetStubHeap();
+#endif // !DACCESS_COMPILE
+
+};
+
+
+typedef DPTR(ArrayClass) PTR_ArrayClass;
+
+#ifdef BINDER
+class MdilModule;
+#endif
+
+// Dynamically generated array class structure
+class ArrayClass : public EEClass
+{
+#ifdef FEATURE_PREJIT
+ friend void EEClass::Fixup(DataImage *image, MethodTable *pMethodTable);
+#endif
+
+#ifndef BINDER
+ friend MethodTable* Module::CreateArrayMethodTable(TypeHandle elemTypeHnd, CorElementType arrayKind, unsigned Rank, AllocMemTracker *pamTracker);
+#else
+ friend MdilModule;
+#endif
+#ifndef DACCESS_COMPILE
+ ArrayClass() : EEClass(sizeof(ArrayClass)) { LIMITED_METHOD_CONTRACT; }
+#else
+ friend class NativeImageDumper;
+#endif
+
+private:
+
+ unsigned char m_rank;
+ CorElementType m_ElementType;// Cache of element type in m_ElementTypeHnd
+
+public:
+ DWORD GetRank() {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_rank;
+ }
+ void SetRank (unsigned Rank) {
+ LIMITED_METHOD_CONTRACT;
+ // The only code path calling this function is code:ClassLoader::CreateTypeHandleForTypeKey, which has
+ // checked the rank already. Assert that the rank is less than MAX_RANK and that it fits in one byte.
+ _ASSERTE((Rank <= MAX_RANK) && (Rank <= (unsigned char)(-1)));
+ m_rank = (unsigned char)Rank;
+ }
+
+ CorElementType GetArrayElementType() {
+ LIMITED_METHOD_CONTRACT;
+ return m_ElementType;
+ }
+ void SetArrayElementType(CorElementType ElementType) {
+ LIMITED_METHOD_CONTRACT;
+ m_ElementType = ElementType;
+ }
+
+
+ // Allocate a new MethodDesc for the methods we add to this class
+ void InitArrayMethodDesc(
+ ArrayMethodDesc* pNewMD,
+ PCCOR_SIGNATURE pShortSig,
+ DWORD cShortSig,
+ DWORD dwVtableSlot,
+ LoaderAllocator *pLoaderAllocator,
+ AllocMemTracker *pamTracker);
+
+ // Generate a short sig for an array accessor
+ VOID GenerateArrayAccessorCallSig(DWORD dwRank,
+ DWORD dwFuncType, // Load, store, or <init>
+ PCCOR_SIGNATURE *ppSig, // Generated signature
+ DWORD * pcSig, // Generated signature size
+ LoaderAllocator *pLoaderAllocator,
+ AllocMemTracker *pamTracker
+#ifdef FEATURE_ARRAYSTUB_AS_IL
+ ,BOOL fForStubAsIL
+#endif
+ );
+
+
+};
+
+inline EEClassLayoutInfo *EEClass::GetLayoutInfo()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(HasLayout());
+ return &((LayoutEEClass *) this)->m_LayoutInfo;
+}
+
+inline BOOL EEClass::IsBlittable()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Either we have an opaque bunch of bytes, or we have some fields that are
+ // all isomorphic and explicitly layed out.
+ return (HasLayout() && GetLayoutInfo()->IsBlittable());
+}
+
+inline BOOL EEClass::IsManagedSequential()
+{
+ LIMITED_METHOD_CONTRACT;
+ return HasLayout() && GetLayoutInfo()->IsManagedSequential();
+}
+
+inline BOOL EEClass::HasExplicitSize()
+{
+ LIMITED_METHOD_CONTRACT;
+ return HasLayout() && GetLayoutInfo()->HasExplicitSize();
+}
+
+//==========================================================================
+// These routines manage the prestub (a bootstrapping stub that all
+// FunctionDesc's are initialized with.)
+//==========================================================================
+VOID InitPreStubManager();
+
+EXTERN_C void STDCALL ThePreStub();
+
+inline PCODE GetPreStubEntryPoint()
+{
+ return GetEEFuncEntryPoint(ThePreStub);
+}
+
+PCODE TheUMThunkPreStub();
+
+PCODE TheVarargNDirectStub(BOOL hasRetBuffArg);
+
+
+
+// workaround: These classification bits need cleanup bad: for now, this gets around
+// IJW setting both mdUnmanagedExport & mdPinvokeImpl on expored methods.
+#define IsReallyMdPinvokeImpl(x) ( ((x) & mdPinvokeImpl) && !((x) & mdUnmanagedExport) )
+
+//
+// The MethodNameHash is a temporary loader structure which may be allocated if there are a large number of
+// methods in a class, to quickly get from a method name to a MethodDesc (potentially a chain of MethodDescs).
+//
+
+#define METH_NAME_CACHE_SIZE 5
+#define MAX_MISSES 3
+
+#ifdef EnC_SUPPORTED
+
+struct EnCAddedFieldElement;
+
+#endif // EnC_SUPPORTED
+
+
+// --------------------------------------------------------------------------------------------
+// For generic instantiations the FieldDescs stored for instance
+// fields are approximate, not exact, i.e. they are representatives owned by
+// canonical instantiation and they do not carry exact type information.
+// This will not include EnC related fields. (See EncApproxFieldDescIterator for that)
+class ApproxFieldDescIterator
+{
+private:
+ int m_iteratorType;
+ PTR_FieldDesc m_pFieldDescList;
+ int m_currField;
+ int m_totalFields;
+
+ public:
+ enum IteratorType {
+ INSTANCE_FIELDS = 0x1,
+ STATIC_FIELDS = 0x2,
+ ALL_FIELDS = (INSTANCE_FIELDS | STATIC_FIELDS)
+ };
+ ApproxFieldDescIterator();
+ ApproxFieldDescIterator(MethodTable *pMT, int iteratorType)
+ {
+ SUPPORTS_DAC;
+ Init(pMT, iteratorType);
+ }
+ void Init(MethodTable *pMT, int iteratorType);
+ PTR_FieldDesc Next();
+
+ int GetIteratorType() {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_iteratorType;
+ }
+
+ int Count() {
+ LIMITED_METHOD_CONTRACT;
+ return m_totalFields;
+ }
+ int CountRemaining() {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_totalFields - m_currField - 1;
+ }
+};
+
+//
+// DeepFieldDescIterator iterates over the entire
+// set of fields available to a class, inherited or
+// introduced.
+//
+
+class DeepFieldDescIterator
+{
+private:
+ ApproxFieldDescIterator m_fieldIter;
+ int m_numClasses;
+ int m_curClass;
+ MethodTable* m_classes[16];
+ int m_deepTotalFields;
+ bool m_lastNextFromParentClass;
+
+ bool NextClass();
+
+public:
+ DeepFieldDescIterator()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_numClasses = 0;
+ m_curClass = 0;
+ m_deepTotalFields = 0;
+ m_lastNextFromParentClass = false;
+ }
+ DeepFieldDescIterator(MethodTable* pMT, int iteratorType,
+ bool includeParents = true)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ Init(pMT, iteratorType, includeParents);
+ }
+ void Init(MethodTable* pMT, int iteratorType,
+ bool includeParents = true);
+
+ FieldDesc* Next();
+
+ bool Skip(int numSkip);
+
+ int Count()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_deepTotalFields;
+ }
+ bool IsFieldFromParentClass()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_lastNextFromParentClass;
+ }
+};
+
+#endif // !CLASS_H
diff --git a/src/vm/class.inl b/src/vm/class.inl
new file mode 100644
index 0000000000..12c5230fd2
--- /dev/null
+++ b/src/vm/class.inl
@@ -0,0 +1,60 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: CLASS.INL
+//
+
+
+//
+
+//
+// ============================================================================
+
+#ifndef _CLASS_INL_
+#define _CLASS_INL_
+#include "constrainedexecutionregion.h"
+//***************************************************************************************
+inline PTR_MethodDescChunk EEClass::GetChunks()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pChunks.GetValueMaybeNull(PTR_HOST_MEMBER_TADDR(EEClass, this, m_pChunks));
+}
+
+//***************************************************************************************
+inline DWORD EEClass::SomeMethodsRequireInheritanceCheck()
+{
+ return (m_VMFlags & VMFLAG_METHODS_REQUIRE_INHERITANCE_CHECKS);
+}
+
+//***************************************************************************************
+inline void EEClass::SetSomeMethodsRequireInheritanceCheck()
+{
+ m_VMFlags = m_VMFlags | VMFLAG_METHODS_REQUIRE_INHERITANCE_CHECKS;
+}
+
+//*******************************************************************************
+#ifndef DACCESS_COMPILE
+// Set default values for optional fields.
+inline void EEClassOptionalFields::Init()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_pDictLayout = NULL;
+ m_pVarianceInfo = NULL;
+#ifdef FEATURE_COMINTEROP
+ m_pSparseVTableMap = NULL;
+ m_pCoClassForIntf = TypeHandle();
+#ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+ m_pClassFactory = NULL;
+#endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+ m_WinRTRedirectedTypeIndex = WinMDAdapter::RedirectedTypeIndex_Invalid;
+#endif // FEATURE_COMINTEROP
+ m_cbModuleDynamicID = MODULE_NON_DYNAMIC_STATICS;
+ m_dwReliabilityContract = RC_NULL;
+ m_SecProps = 0;
+}
+#endif // !DACCESS_COMPILE
+
+#endif // _CLASS_INL_
+
diff --git a/src/vm/classcompat.cpp b/src/vm/classcompat.cpp
new file mode 100644
index 0000000000..830568b822
--- /dev/null
+++ b/src/vm/classcompat.cpp
@@ -0,0 +1,3711 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: CLASSCOMPAT.CPP
+
+// ===========================================================================
+// This file contains backward compatibility functionality for COM Interop.
+// ===========================================================================
+//
+
+
+#include "common.h"
+
+#ifndef DACCESS_COMPILE
+
+#include "clsload.hpp"
+#include "method.hpp"
+#include "class.h"
+#include "classcompat.h"
+#include "object.h"
+#include "field.h"
+#include "util.hpp"
+#include "excep.h"
+#include "threads.h"
+#include "stublink.h"
+#include "dllimport.h"
+#include "verifier.hpp"
+#include "jitinterface.h"
+#include "eeconfig.h"
+#include "log.h"
+#include "fieldmarshaler.h"
+#include "cgensys.h"
+#include "gc.h"
+#include "security.h"
+#include "dbginterface.h"
+#include "comdelegate.h"
+#include "sigformat.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "eeprofinterfaces.h"
+#include "dllimportcallback.h"
+#include "listlock.h"
+#include "methodimpl.h"
+#include "guidfromname.h"
+#include "stackprobe.h"
+#include "encee.h"
+#include "encee.h"
+#include "comsynchronizable.h"
+#include "customattribute.h"
+#include "virtualcallstub.h"
+#include "eeconfig.h"
+#include "contractimpl.h"
+#include "prettyprintsig.h"
+
+#include "comcallablewrapper.h"
+#include "clrtocomcall.h"
+#include "runtimecallablewrapper.h"
+
+#include "listlock.inl"
+#include "generics.h"
+#include "contractimpl.h"
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+ClassCompat::InterfaceInfo_t* InteropMethodTableData::FindInterface(MethodTable *pInterface)
+{
+ WRAPPER_NO_CONTRACT;
+
+ for (DWORD i = 0; i < cInterfaceMap; i++)
+ {
+ ClassCompat::InterfaceInfo_t *iMap = &pInterfaceMap[i];
+ if (iMap->m_pMethodTable->IsEquivalentTo(pInterface))
+ {
+ // Extensible RCW's need to be handled specially because they can have interfaces
+ // in their map that are added at runtime. These interfaces will have a start offset
+ // of -1 to indicate this. We cannot take for granted that every instance of this
+ // COM object has this interface so FindInterface on these interfaces is made to fail.
+ //
+ // However, we are only considering the statically available slots here
+ // (m_wNumInterface doesn't contain the dynamic slots), so we can safely
+ // ignore this detail.
+ return iMap;
+ }
+ }
+
+ return NULL;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+// get start slot for interface
+// returns -1 if interface not found
+WORD InteropMethodTableData::GetStartSlotForInterface(MethodTable* pInterface)
+{
+ WRAPPER_NO_CONTRACT;
+
+ ClassCompat::InterfaceInfo_t* pInfo = FindInterface(pInterface);
+
+ if (pInfo != NULL)
+ {
+ WORD startSlot = pInfo->GetInteropStartSlot();
+ _ASSERTE(startSlot != MethodTable::NO_SLOT);
+ return startSlot;
+ }
+
+ return MethodTable::NO_SLOT;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+// This will return the interop slot for pMD in pMT. It will traverse the inheritance tree
+// to find a match.
+/*static*/ WORD InteropMethodTableData::GetSlotForMethodDesc(MethodTable *pMT, MethodDesc *pMD)
+{
+ while (pMT)
+ {
+ InteropMethodTableData *pData = pMT->LookupComInteropData();
+ _ASSERTE(pData);
+ for (DWORD i = 0; i < pData->cVTable; i++)
+ {
+ if (pData->pVTable[i].pMD == pMD)
+ return (WORD) i;
+ }
+ pMT = pMT->GetParentMethodTable();
+ }
+
+ return MethodTable::NO_SLOT;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+InteropMethodTableSlotDataMap::InteropMethodTableSlotDataMap(InteropMethodTableSlotData *pSlotData, DWORD cSlotData)
+{
+ m_pSlotData = pSlotData;
+ m_cSlotData = cSlotData;
+ m_iCurSlot = 0;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+InteropMethodTableSlotData *InteropMethodTableSlotDataMap::Exists_Helper(MethodDesc *pMD)
+{
+ LIMITED_METHOD_CONTRACT;
+ for (DWORD i = 0; i < m_cSlotData; i++)
+ {
+ if (m_pSlotData[i].pDeclMD == pMD)
+ {
+ return (&m_pSlotData[i]);
+ }
+ }
+
+ return (NULL);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+BOOL InteropMethodTableSlotDataMap::Exists(MethodDesc *pMD)
+{
+ return (Exists_Helper(pMD) != NULL);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+InteropMethodTableSlotData *InteropMethodTableSlotDataMap::GetData(MethodDesc *pMD)
+{
+ LIMITED_METHOD_CONTRACT;
+ InteropMethodTableSlotData *pEntry = Exists_Helper(pMD);
+
+ if (pEntry)
+ return pEntry;
+
+ pEntry = GetNewEntry();
+ pEntry->pMD = pMD;
+ pEntry->pDeclMD = pMD;
+ return (pEntry);
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+InteropMethodTableSlotData *InteropMethodTableSlotDataMap::GetNewEntry()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(m_iCurSlot < m_cSlotData);
+ InteropMethodTableSlotData *pEntry = &m_pSlotData[m_iCurSlot++];
+ pEntry->pMD = NULL;
+ pEntry->wFlags = 0;
+ pEntry->wSlot = MethodTable::NO_SLOT;
+ pEntry->pDeclMD = NULL;
+ return (pEntry);
+}
+
+namespace ClassCompat
+{
+
+//////////////////////////////////////////////////////////////////////////////////////////////
+InteropMethodTableData *MethodTableBuilder::BuildInteropVTable(AllocMemTracker *pamTracker)
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+ INSTANCE_CHECK;
+ } CONTRACTL_END;
+
+ MethodTable * pThisMT = GetHalfBakedMethodTable();
+
+ // This should never be called for interfaces or for generic types.
+ _ASSERTE(!pThisMT->IsInterface());
+ _ASSERTE(!pThisMT->ContainsGenericVariables());
+ _ASSERTE(!pThisMT->HasGenericClassInstantiationInHierarchy());
+
+ // Array method tables are created quite differently
+ if (pThisMT->IsArray())
+ return BuildInteropVTableForArray(pamTracker);
+
+#ifdef _DEBUG
+ BOOL fDump = FALSE;
+ LPCUTF8 fullName = pThisMT->GetDebugClassName();
+ if (fullName) {
+ LPWSTR wszRegName = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnInteropVTableBuild);
+ if (wszRegName) {
+ { // Poor man's narrow
+ LPWSTR fromPtr = wszRegName;
+ LPUTF8 toPtr = (LPUTF8) wszRegName;
+ LPUTF8 result = toPtr;
+ while(*fromPtr != 0)
+ *toPtr++ = (char) *fromPtr++;
+ *toPtr = 0;
+ }
+ LPCUTF8 regName = (LPCUTF8) wszRegName;
+ LPCUTF8 bracket = (LPCUTF8) strchr(fullName, '[');
+ size_t len = strlen(fullName);
+ if (bracket != NULL)
+ len = bracket - fullName;
+ if (strncmp(fullName, regName, len) == 0) {
+ _ASSERTE(!"BreakOnInteropVTableBuild");
+ fDump = TRUE;
+ }
+ delete [] wszRegName;
+ }
+ }
+#endif // _DEBUG
+
+ //Get Check Point for the thread-based allocator
+ Thread *pThread = GetThread();
+ CheckPointHolder cph(pThread->m_MarshalAlloc.GetCheckpoint()); //hold checkpoint for autorelease
+
+ HRESULT hr = S_OK;
+ BaseDomain *bmtDomain = pThisMT->GetDomain();
+ Module *pModule = pThisMT->GetModule();
+ mdToken cl = pThisMT->GetCl();
+ MethodTable *pParentMethodTable = pThisMT->GetParentMethodTable();
+
+ // The following structs, defined as private members of MethodTableBuilder, contain the necessary local
+ // parameters needed for MethodTableBuilder
+
+ // Look at the struct definitions for a detailed list of all parameters available
+ // to MethodTableBuilder.
+
+ bmtErrorInfo bmtError;
+ bmtProperties bmtProp;
+ bmtVtable bmtVT;
+ bmtParentInfo bmtParent;
+ bmtInterfaceInfo bmtInterface;
+ bmtMethodInfo bmtMethod(pModule->GetMDImport());
+ bmtTypeInfo bmtType;
+ bmtMethodImplInfo bmtMethodImpl(pModule->GetMDImport());
+
+ //Initialize structs
+
+ bmtError.resIDWhy = IDS_CLASSLOAD_GENERAL; // Set the reason and the offending method def. If the method information
+ bmtError.pThrowable = NULL;
+ bmtError.pModule = pModule;
+ bmtError.cl = cl;
+
+ bmtType.pMDImport = pModule->GetMDImport();
+ bmtType.pModule = pModule;
+ bmtType.cl = cl;
+
+ bmtParent.parentSubst = GetHalfBakedMethodTable()->GetSubstitutionForParent(NULL);
+ if (FAILED(bmtType.pMDImport->GetTypeDefProps(
+ bmtType.cl,
+ &(bmtType.dwAttr),
+ &(bmtParent.token))))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+
+ SetBMTData(
+ bmtDomain,
+ &bmtError,
+ &bmtProp,
+ &bmtVT,
+ &bmtParent,
+ &bmtInterface,
+ &bmtMethod,
+ &bmtType,
+ &bmtMethodImpl);
+
+ // Populate the BMT data structures from the attributes of the incoming MT
+ if (pThisMT->IsValueType()) SetIsValueClass();
+ if (pThisMT->IsEnum()) SetEnum();
+ if (pThisMT->HasLayout()) SetHasLayout();
+ if (pThisMT->IsDelegate()) SetIsDelegate();
+ if (pThisMT->IsContextful()) SetContextful();
+#ifdef FEATURE_COMINTEROP
+ if(pThisMT->GetClass()->IsComClassInterface()) SetIsComClassInterface();
+#endif
+
+ // Populate the interface list - these are allocated on the thread's stacking allocator
+ //@TODO: This doesn't work for generics - fix if generics will be exposed to COM
+ BuildingInterfaceInfo_t *pBuildingInterfaceList;
+ WORD wNumInterfaces;
+ BuildInteropVTable_InterfaceList(&pBuildingInterfaceList, &wNumInterfaces);
+
+ bmtInterface.wInterfaceMapSize = wNumInterfaces;
+
+ WORD i;
+
+ // Interfaces have a parent class of Object, but we don't really want to inherit all of
+ // Object's virtual methods, so pretend we don't have a parent class - at the bottom of this
+ // function we reset the parent method table
+ if (IsInterface())
+ {
+ pParentMethodTable = NULL;
+ }
+
+ bmtParent.pParentMethodTable = pParentMethodTable;
+
+ // Com Import classes are special
+ if (IsComImport() && !IsEnum() && !IsInterface() && !IsValueClass() && !IsDelegate())
+ {
+ _ASSERTE(pParentMethodTable == g_pBaseCOMObject || pThisMT->IsWinRTObjectType());
+ _ASSERTE(!(HasLayout()));
+
+ // if the current class is imported
+ bmtProp.fIsComObjectType = TRUE;
+ }
+
+ bmtParent.pParentMethodTable = pParentMethodTable;
+
+ if (pParentMethodTable != NULL)
+ {
+ if (pParentMethodTable->IsComObjectType())
+ {
+ // if the parent class is of ComObjectType
+ // so is the child
+ bmtProp.fIsComObjectType = TRUE;
+ }
+ }
+
+ // resolve unresolved interfaces, determine an upper bound on the size of the interface map,
+ // and determine the size of the largest interface (in # slots)
+ BuildInteropVTable_ResolveInterfaces(bmtDomain, pBuildingInterfaceList, &bmtType, &bmtInterface, &bmtVT, &bmtParent, bmtError);
+
+ // Enumerate this class's members
+ EnumerateMethodImpls();
+
+ // Enumerate this class's members
+ EnumerateClassMethods();
+
+ AllocateMethodWorkingMemory();
+
+ // Allocate the working memory for the interop data
+ {
+ ////////////////
+ // The interop data for the VTable for COM Interop backward compatibility
+
+ // Allocate space to hold on to the MethodDesc for each entry
+ bmtVT.ppSDVtable = new (&pThread->m_MarshalAlloc) InteropMethodTableSlotData*[bmtVT.dwMaxVtableSize];
+ ZeroMemory(bmtVT.ppSDVtable, bmtVT.dwMaxVtableSize * sizeof(InteropMethodTableSlotData*));
+
+ // Allocate space to hold on to the MethodDesc for each entry
+ bmtVT.ppSDNonVtable = new (&pThread->m_MarshalAlloc) InteropMethodTableSlotData*[NumDeclaredMethods()];
+ ZeroMemory(bmtVT.ppSDNonVtable , sizeof(InteropMethodTableSlotData*)*NumDeclaredMethods());
+
+
+ DWORD cMaxEntries = (bmtVT.dwMaxVtableSize * 2) + (NumDeclaredMethods() * 2);
+ InteropMethodTableSlotData *pInteropData = new (&pThread->m_MarshalAlloc) InteropMethodTableSlotData[cMaxEntries];
+ memset(pInteropData, 0, cMaxEntries * sizeof(InteropMethodTableSlotData));
+
+ bmtVT.pInteropData = new (&pThread->m_MarshalAlloc) InteropMethodTableSlotDataMap(pInteropData, cMaxEntries);
+
+ // Initialize the map with parent information
+ if (bmtParent.pParentMethodTable != NULL)
+ {
+ InteropMethodTableData *pParentInteropData = bmtParent.pParentMethodTable->LookupComInteropData();
+ _ASSERTE(pParentInteropData);
+
+ for ( i = 0; i < pParentInteropData->cVTable; i++)
+ {
+ InteropMethodTableSlotData *pParentSlot = &pParentInteropData->pVTable[i];
+ InteropMethodTableSlotData *pNewEntry = bmtVT.pInteropData->GetData(pParentSlot->pDeclMD);
+ pNewEntry->pMD = pParentSlot->pMD;
+ pNewEntry->pDeclMD = pParentSlot->pDeclMD;
+ pNewEntry->wFlags = pParentSlot->wFlags;
+ pNewEntry->wSlot = pParentSlot->wSlot;
+
+ bmtVT.ppSDVtable[i] = pNewEntry;
+ }
+ }
+ }
+
+ // Determine vtable placement for each member in this class
+ BuildInteropVTable_PlaceMembers(bmtDomain,&bmtType, wNumInterfaces, pBuildingInterfaceList, &bmtMethod,
+ &bmtError, &bmtProp, &bmtParent, &bmtInterface, &bmtMethodImpl, &bmtVT);
+
+ // First copy what we can leverage from the parent's interface map.
+ // The parent's interface map will be identical to the beginning of this class's interface map (i.e.
+ // the interfaces will be listed in the identical order).
+ if (bmtParent.wNumParentInterfaces > 0)
+ {
+ PREFIX_ASSUME(pParentMethodTable != NULL); // We have to have parent to have parent interfaces
+
+ _ASSERTE(pParentMethodTable->LookupComInteropData());
+ _ASSERTE(bmtParent.wNumParentInterfaces == pParentMethodTable->LookupComInteropData()->cInterfaceMap);
+ InterfaceInfo_t *pParentInterfaceList = pParentMethodTable->LookupComInteropData()->pInterfaceMap;
+
+
+ for (i = 0; i < bmtParent.wNumParentInterfaces; i++)
+ {
+#ifdef _DEBUG
+ _ASSERTE(pParentInterfaceList[i].m_pMethodTable == bmtInterface.pInterfaceMap[i].m_pMethodTable);
+
+ MethodTable *pMT = pParentInterfaceList[i].m_pMethodTable;
+
+ // If the interface resides entirely inside the parent's class methods (i.e. no duplicate
+ // slots), then we can place this interface in an identical spot to in the parent.
+ //
+ // Note carefully: the vtable for this interface could start within the first GetNumVirtuals()
+ // entries, but could actually extend beyond it, if we were particularly efficient at placing
+ // this interface, so check that the end of the interface vtable is before
+ // pParentMethodTable->GetNumVirtuals().
+
+ _ASSERTE(pParentInterfaceList[i].GetInteropStartSlot() + pMT->GetNumVirtuals() <=
+ pParentMethodTable->LookupComInteropData()->cVTable);
+#endif // _DEBUG
+ // Interface lies inside parent's methods, so we can place it
+ bmtInterface.pInterfaceMap[i].SetInteropStartSlot(pParentInterfaceList[i].GetInteropStartSlot());
+ }
+ }
+
+ //
+ // If we are a class, then there may be some unplaced vtable methods (which are by definition
+ // interface methods, otherwise they'd already have been placed). Place as many unplaced methods
+ // as possible, in the order preferred by interfaces. However, do not allow any duplicates - once
+ // a method has been placed, it cannot be placed again - if we are unable to neatly place an interface,
+ // create duplicate slots for it starting at dwCurrentDuplicateVtableSlot. Fill out the interface
+ // map for all interfaces as they are placed.
+ //
+ // If we are an interface, then all methods are already placed. Fill out the interface map for
+ // interfaces as they are placed.
+ //
+ if (!IsInterface())
+ {
+ BuildInteropVTable_PlaceVtableMethods(
+ &bmtInterface,
+ wNumInterfaces,
+ pBuildingInterfaceList,
+ &bmtVT,
+ &bmtMethod,
+ &bmtType,
+ &bmtError,
+ &bmtProp,
+ &bmtParent);
+
+ BuildInteropVTable_PlaceMethodImpls(
+ bmtDomain,
+ &bmtType,
+ &bmtMethodImpl,
+ &bmtError,
+ &bmtInterface,
+ &bmtVT,
+ &bmtParent);
+ }
+
+#ifdef _DEBUG
+ if (IsInterface() == FALSE)
+ {
+ for (i = 0; i < bmtInterface.wInterfaceMapSize; i++)
+ {
+ _ASSERTE(bmtInterface.pInterfaceMap[i].GetInteropStartSlot() != MethodTable::NO_SLOT);
+ }
+ }
+#endif // _DEBUG
+
+ // Place all non vtable methods
+ for (i = 0; i < bmtVT.wCurrentNonVtableSlot; i++)
+ {
+ bmtVT.SetMethodDescForSlot(bmtVT.wCurrentVtableSlot + i, bmtVT.ppSDNonVtable[i]->pMD);
+ CONSISTENCY_CHECK(bmtVT.ppSDNonVtable[i]->wSlot != MethodTable::NO_SLOT);
+ bmtVT.ppSDVtable[bmtVT.wCurrentVtableSlot + i] = bmtVT.ppSDNonVtable[i];
+ }
+
+ // Must copy overridden slots to duplicate entries in the vtable
+ BuildInteropVTable_PropagateInheritance(&bmtVT);
+
+ // ensure we didn't overflow the temporary vtable
+ _ASSERTE(bmtVT.wCurrentNonVtableSlot <= bmtVT.dwMaxVtableSize);
+
+ // Finalize.
+ InteropMethodTableData *pInteropMT = NULL;
+
+ FinalizeInteropVTable(
+ pamTracker,
+ pThisMT->GetLoaderAllocator(),
+ &bmtVT,
+ &bmtInterface,
+ &bmtType,
+ &bmtProp,
+ &bmtMethod,
+ &bmtError,
+ &bmtParent,
+ &pInteropMT);
+ _ASSERTE(pInteropMT);
+
+#ifdef _DEBUG
+ if (fDump)
+ {
+ CQuickBytes qb;
+ DWORD cb = 0;
+ PCCOR_SIGNATURE pSig;
+ ULONG cbSig;
+
+ printf("InteropMethodTable\n--------------\n");
+ printf("VTable\n------\n");
+
+ for (DWORD i = 0; i < pInteropMT->cVTable; i++)
+ {
+ // Print the method name
+ InteropMethodTableSlotData *pInteropMD = &pInteropMT->pVTable[i];
+ printf(pInteropMD->pMD->GetName());
+ printf(" ");
+
+ // Print the sig
+ if (FAILED(pInteropMD->pMD->GetMDImport()->GetSigOfMethodDef(pInteropMD->pMD->GetMemberDef(), &cbSig, &pSig)))
+ {
+ pSig = NULL;
+ cbSig = 0;
+ }
+ PrettyPrintSigInternalLegacy(pSig, cbSig, "", &qb, pInteropMD->pMD->GetMDImport());
+ printf((LPCUTF8) qb.Ptr());
+ printf("\n");
+ }
+ }
+#endif // _DEBUG
+
+ NullBMTData();
+
+ return pInteropMT;
+}
+
+//---------------------------------------------------------------------------------------
+InteropMethodTableData *MethodTableBuilder::BuildInteropVTableForArray(AllocMemTracker *pamTracker)
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+ INSTANCE_CHECK;
+ PRECONDITION(GetHalfBakedMethodTable()->IsArray());
+ PRECONDITION(GetHalfBakedMethodTable()->GetNumVirtuals() == GetHalfBakedMethodTable()->GetParentMethodTable()->GetNumVirtuals());
+ } CONTRACTL_END;
+
+ MethodTable * pThisMT = GetHalfBakedMethodTable();
+
+ // Get the interop data for the parent
+ MethodTable *pParentMT = pThisMT->GetParentMethodTable();
+ InteropMethodTableData *pParentMTData = pParentMT->GetComInteropData();
+ CONSISTENCY_CHECK(pParentMTData != NULL);
+
+ // Allocate in the same heap as the array itself
+ LoaderHeap *pHeap = pThisMT->GetLoaderAllocator()->GetLowFrequencyHeap();
+
+ // Allocate the overall structure
+ InteropMethodTableData *pMTData = (InteropMethodTableData *)(void *) pamTracker->Track(pHeap->AllocMem(S_SIZE_T(sizeof(InteropMethodTableData))));
+ memset(pMTData, 0, sizeof(InteropMethodTableData));
+
+ // Allocate the vtable - this is just a copy from System.Array
+ pMTData->cVTable = pParentMTData->cVTable;
+ if (pMTData->cVTable != 0)
+ {
+ pMTData->pVTable = (InteropMethodTableSlotData *)(void *)
+ pamTracker->Track(pHeap->AllocMem(S_SIZE_T(sizeof(InteropMethodTableSlotData)) * S_SIZE_T(pMTData->cVTable)));
+
+ // Copy the vtable
+ for (DWORD i = 0; i < pMTData->cVTable; i++)
+ pMTData->pVTable[i] = pParentMTData->pVTable[i];
+ }
+
+ // Allocate the non-vtable
+ pMTData->cNonVTable = pThisMT->GetNumMethods() - pThisMT->GetNumVirtuals();
+ if (pMTData->cNonVTable != 0)
+ {
+ pMTData->pNonVTable = (InteropMethodTableSlotData *)(void *)
+ pamTracker->Track(pHeap->AllocMem(S_SIZE_T(sizeof(InteropMethodTableSlotData)) * S_SIZE_T(pMTData->cNonVTable)));
+
+ // Copy the non-vtable
+ UINT32 iCurRealSlot = pThisMT->GetNumVirtuals();
+ WORD iCurInteropSlot = pMTData->cVTable;
+ for (DWORD i = 0; i < pMTData->cNonVTable; i++, iCurRealSlot++, iCurInteropSlot++)
+ {
+ pMTData->pNonVTable[i].wSlot = iCurInteropSlot;
+ pMTData->pNonVTable[i].pMD = pThisMT->GetMethodDescForSlot(iCurRealSlot);
+ }
+ }
+
+ // Allocate the interface map
+ pMTData->cInterfaceMap = pParentMTData->cInterfaceMap;
+ if (pMTData->cInterfaceMap != 0)
+ {
+ pMTData->pInterfaceMap = (InterfaceInfo_t *)(void *)
+ pamTracker->Track(pHeap->AllocMem(S_SIZE_T(sizeof(InterfaceInfo_t)) * S_SIZE_T(pMTData->cInterfaceMap)));
+
+ // Copy the interface map
+ for (DWORD i = 0; i < pMTData->cInterfaceMap; i++)
+ pMTData->pInterfaceMap[i] = pParentMTData->pInterfaceMap[i];
+ }
+
+ return pMTData;
+}
+
+//---------------------------------------------------------------------------------------
+VOID MethodTableBuilder::BuildInteropVTable_InterfaceList(
+ BuildingInterfaceInfo_t **ppBuildingInterfaceList,
+ WORD *pcBuildingInterfaceList)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Initialize arguments
+ *pcBuildingInterfaceList = 0;
+ *ppBuildingInterfaceList = NULL;
+
+ // Get the thread for stacking allocator
+ Thread *pThread = GetThread();
+
+ // Get the metadata for enumerating the interfaces of the class
+ IMDInternalImport *pMDImport = GetModule()->GetMDImport();
+
+ // Now load all the interfaces
+ HENUMInternalHolder hEnumInterfaceImpl(pMDImport);
+ hEnumInterfaceImpl.EnumInit(mdtInterfaceImpl, GetCl());
+
+ // Get the count for the number of interfaces from metadata
+ DWORD cAllInterfaces = pMDImport->EnumGetCount(&hEnumInterfaceImpl);
+ WORD cNonGenericItfs = 0;
+
+ // Iterate through each interface token and get the type for the interface and put
+ // it into the BuildingInterfaceInfo_t struct.
+ if (cAllInterfaces != 0)
+ {
+ mdInterfaceImpl ii;
+ Module *pModule = GetModule();
+
+ // Allocate the BuildingInterfaceList table
+ *ppBuildingInterfaceList = new(&pThread->m_MarshalAlloc) BuildingInterfaceInfo_t[cAllInterfaces];
+ BuildingInterfaceInfo_t *pInterfaceBuildInfo = *ppBuildingInterfaceList;
+
+ while (pMDImport->EnumNext(&hEnumInterfaceImpl, &ii))
+ {
+ mdTypeRef crInterface;
+ TypeHandle intType;
+
+ // Get properties on this interface
+ if (FAILED(pMDImport->GetTypeOfInterfaceImpl(ii, &crInterface)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ SigTypeContext typeContext = SigTypeContext(TypeHandle(GetHalfBakedMethodTable()));
+ intType = ClassLoader::LoadTypeDefOrRefOrSpecThrowing(pModule, crInterface, &typeContext,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::FailIfUninstDefOrRef);
+
+ // At this point, the interface should never have any non instantiated generic parameters.
+ _ASSERTE(!intType.IsGenericTypeDefinition());
+
+ // Skip any generic interfaces.
+ if (intType.GetNumGenericArgs() != 0)
+ continue;
+
+ pInterfaceBuildInfo[cNonGenericItfs].m_pMethodTable = intType.AsMethodTable();
+ _ASSERTE(pInterfaceBuildInfo[cNonGenericItfs].m_pMethodTable != NULL);
+ _ASSERTE(pInterfaceBuildInfo[cNonGenericItfs].m_pMethodTable->IsInterface());
+ cNonGenericItfs++;
+ }
+ _ASSERTE(cNonGenericItfs <= cAllInterfaces);
+ }
+
+ *pcBuildingInterfaceList = cNonGenericItfs;
+}
+
+//---------------------------------------------------------------------------------------
+// Used by BuildInteropVTable
+//
+// Determine vtable placement for each member in this class
+//
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#endif
+VOID MethodTableBuilder::BuildInteropVTable_PlaceMembers(
+ BaseDomain *bmtDomain,
+ bmtTypeInfo* bmtType,
+ DWORD numDeclaredInterfaces,
+ BuildingInterfaceInfo_t *pBuildingInterfaceList,
+ bmtMethodInfo* bmtMethod,
+ bmtErrorInfo* bmtError,
+ bmtProperties* bmtProp,
+ bmtParentInfo* bmtParent,
+ bmtInterfaceInfo* bmtInterface,
+ bmtMethodImplInfo* bmtMethodImpl,
+ bmtVtable* bmtVT)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(CheckPointer(bmtType));
+ PRECONDITION(CheckPointer(bmtMethod));
+ PRECONDITION(CheckPointer(bmtError));
+ PRECONDITION(CheckPointer(bmtProp));
+ PRECONDITION(CheckPointer(bmtInterface));
+ PRECONDITION(CheckPointer(bmtParent));
+ PRECONDITION(CheckPointer(bmtMethodImpl));
+ PRECONDITION(CheckPointer(bmtVT));
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!IsInterface());
+
+ Module * pModule = GetModule();
+
+#ifdef _DEBUG
+ LPCUTF8 pszDebugName,pszDebugNamespace;
+ if (FAILED(bmtType->pModule->GetMDImport()->GetNameOfTypeDef(GetCl(), &pszDebugName, &pszDebugNamespace)))
+ {
+ pszDebugName = pszDebugNamespace = "Invalid TypeDef record";
+ }
+#endif // _DEBUG
+
+ HRESULT hr = S_OK;
+ DWORD i, j;
+ DWORD dwClassDeclFlags = 0xffffffff;
+ DWORD dwClassNullDeclFlags = 0xffffffff;
+
+ for (i = 0; i < NumDeclaredMethods(); i++)
+ {
+ LPCUTF8 szMemberName = NULL;
+ PCCOR_SIGNATURE pMemberSignature = NULL;
+ DWORD cMemberSignature = 0;
+ mdToken tokMember;
+ DWORD dwMemberAttrs;
+ DWORD dwDescrOffset;
+ DWORD dwImplFlags;
+ BOOL fMethodImplementsInterface = FALSE;
+ DWORD dwMDImplementsInterfaceNum = 0;
+ DWORD dwMDImplementsSlotNum = 0;
+ DWORD dwParentAttrs;
+
+ tokMember = bmtMethod->rgMethodTokens[i];
+ dwMemberAttrs = bmtMethod->rgMethodAttrs[i];
+ dwDescrOffset = bmtMethod->rgMethodRVA[i];
+ dwImplFlags = bmtMethod->rgMethodImplFlags[i];
+
+ DWORD Classification = bmtMethod->rgMethodClassifications[i];
+
+ // If this member is a method which overrides a parent method, it will be set to non-NULL
+ MethodDesc *pParentMethodDesc = NULL;
+
+ szMemberName = bmtMethod->rgszMethodName[i];
+
+ // constructors and class initialisers are special
+ if (!IsMdRTSpecialName(dwMemberAttrs))
+ {
+ // The method does not have the special marking
+ if (IsMdVirtual(dwMemberAttrs))
+ {
+ // Hash that a method with this name exists in this class
+ // Note that ctors and static ctors are not added to the table
+ DWORD dwHashName = HashStringA(szMemberName);
+
+ // If the member is marked with a new slot we do not need to find it
+ // in the parent
+ if (!IsMdNewSlot(dwMemberAttrs))
+ {
+ // If we're not doing sanity checks, then assume that any method declared static
+ // does not attempt to override some virtual parent.
+ if (!IsMdStatic(dwMemberAttrs) && bmtParent->pParentMethodTable != NULL)
+ {
+ // Attempt to find the method with this name and signature in the parent class.
+ // This method may or may not create pParentMethodHash (if it does not already exist).
+ // It also may or may not fill in pMemberSignature/cMemberSignature.
+ // An error is only returned when we can not create the hash.
+ // NOTE: This operation touches metadata
+ {
+ BOOL fMethodConstraintsMatch = FALSE;
+ VERIFY(SUCCEEDED(LoaderFindMethodInClass(
+ szMemberName,
+ bmtType->pModule,
+ tokMember,
+ &pParentMethodDesc,
+ &pMemberSignature, &cMemberSignature,
+ dwHashName,
+ &fMethodConstraintsMatch)));
+ //this assert should hold because interop methods cannot be generic
+ _ASSERTE(pParentMethodDesc == NULL || fMethodConstraintsMatch);
+ }
+
+ if (pParentMethodDesc != NULL)
+ {
+ dwParentAttrs = pParentMethodDesc->GetAttrs();
+
+ _ASSERTE(IsMdVirtual(dwParentAttrs) && "Non virtual methods should not be searched");
+ _ASSERTE(!(IsMdFinal(dwParentAttrs)));
+ }
+ }
+ }
+ }
+ }
+
+ if(pParentMethodDesc == NULL) {
+ // This method does not exist in the parent. If we are a class, check whether this
+ // method implements any interface. If true, we can't place this method now.
+ if ((!IsInterface()) &&
+ ( IsMdPublic(dwMemberAttrs) &&
+ IsMdVirtual(dwMemberAttrs) &&
+ !IsMdStatic(dwMemberAttrs) &&
+ !IsMdRTSpecialName(dwMemberAttrs))) {
+
+ // Don't check parent class interfaces - if the parent class had to implement an interface,
+ // then it is already guaranteed that we inherited that method.
+ _ASSERTE(!bmtParent->pParentMethodTable || bmtParent->pParentMethodTable->LookupComInteropData());
+ DWORD numInheritedInts = (bmtParent->pParentMethodTable ?
+ (DWORD) bmtParent->pParentMethodTable->LookupComInteropData()->cInterfaceMap: 0);
+
+ for (j = numInheritedInts; j < bmtInterface->wInterfaceMapSize; j++)
+ {
+ MethodTable *pInterface = bmtInterface->pInterfaceMap[j].m_pMethodTable;
+ if (pMemberSignature == NULL)
+ { // We've been trying to avoid asking for the signature - now we need it
+ if (FAILED(bmtType->pMDImport->GetSigOfMethodDef(tokMember, &cMemberSignature, &pMemberSignature)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ }
+
+ WORD slotNum = (WORD) (-1);
+ MethodDesc *pItfMD = MemberLoader::FindMethod(pInterface,
+ szMemberName, pMemberSignature, cMemberSignature, bmtType->pModule);
+
+ if (pItfMD != NULL)
+ {
+ // This method implements an interface - don't place it
+ fMethodImplementsInterface = TRUE;
+
+ // Keep track of this fact and use it while placing the interface
+ slotNum = (WORD) pItfMD->GetSlot();
+ if (bmtInterface->pppInterfaceImplementingMD[j] == NULL)
+ {
+ Thread *pThread = GetThread();
+ StackingAllocator * pAlloc = &pThread->m_MarshalAlloc;
+
+ bmtInterface->pppInterfaceImplementingMD[j] = new (pAlloc) MethodDesc * [pInterface->GetNumVirtuals()];
+ memset(bmtInterface->pppInterfaceImplementingMD[j], 0, sizeof(MethodDesc *) * pInterface->GetNumVirtuals());
+
+ bmtInterface->pppInterfaceDeclaringMD[j] = new (pAlloc) MethodDesc * [pInterface->GetNumVirtuals()];
+ memset(bmtInterface->pppInterfaceDeclaringMD[j], 0, sizeof(MethodDesc *) * pInterface->GetNumVirtuals());
+ }
+
+ bmtInterface->pppInterfaceDeclaringMD[j][slotNum] = pItfMD;
+
+ dwMDImplementsInterfaceNum = j;
+ dwMDImplementsSlotNum = slotNum;
+ break;
+ }
+ }
+ }
+ }
+
+ // Now find the MethodDesc associated with this method
+ MethodDesc *pNewMD = MemberLoader::FindMethod(GetHalfBakedMethodTable(), tokMember);
+ _ASSERTE(!bmtVT->pInteropData->Exists(pNewMD));
+ InteropMethodTableSlotData *pNewMDData = bmtVT->pInteropData->GetData(pNewMD);
+
+ _ASSERTE(pNewMD != NULL);
+ _ASSERTE(dwMemberAttrs == pNewMD->GetAttrs());
+
+ _ASSERTE(bmtParent->ppParentMethodDescBufPtr != NULL);
+ _ASSERTE(((bmtParent->ppParentMethodDescBufPtr - bmtParent->ppParentMethodDescBuf) / sizeof(MethodDesc*))
+ < NumDeclaredMethods());
+ *(bmtParent->ppParentMethodDescBufPtr++) = pParentMethodDesc;
+ *(bmtParent->ppParentMethodDescBufPtr++) = pNewMD;
+
+ if (fMethodImplementsInterface && IsMdVirtual(dwMemberAttrs))
+ {
+ bmtInterface->pppInterfaceImplementingMD[dwMDImplementsInterfaceNum][dwMDImplementsSlotNum] = pNewMD;
+ }
+
+ // Set the MethodDesc value
+ bmtMethod->ppMethodDescList[i] = pNewMD;
+
+ // Make sure that fcalls have a 0 rva. This is assumed by the prejit fixup logic
+ _ASSERTE(((Classification & ~mdcMethodImpl) != mcFCall) || dwDescrOffset == 0);
+
+ // Non-virtual method
+ if (IsMdStatic(dwMemberAttrs) ||
+ !IsMdVirtual(dwMemberAttrs) ||
+ IsMdRTSpecialName(dwMemberAttrs))
+ {
+ // Non-virtual method (doesn't go into the vtable)
+ _ASSERTE(bmtVT->pNonVtableMD[bmtVT->wCurrentNonVtableSlot] == NULL);
+
+ // Set the data for the method
+ pNewMDData->wSlot = bmtVT->wCurrentNonVtableSlot;
+
+ // Add the slot into the non-virtual method table
+ bmtVT->pNonVtableMD[bmtVT->wCurrentNonVtableSlot] = pNewMD;
+ bmtVT->ppSDNonVtable[bmtVT->wCurrentNonVtableSlot] = pNewMDData;
+
+ // Increment the current non-virtual method table slot
+ bmtVT->wCurrentNonVtableSlot++;
+ }
+
+ // Virtual method
+ else
+ {
+ if (IsInterface())
+ { // (shouldn't happen for this codepath)
+ UNREACHABLE();
+ }
+
+ else if (pParentMethodDesc != NULL)
+ { // We are overriding a parent's vtable slot
+ CONSISTENCY_CHECK(bmtVT->pInteropData->Exists(pParentMethodDesc));
+ WORD slotNumber = bmtVT->pInteropData->GetData(pParentMethodDesc)->wSlot;
+
+ // If the MethodDesc was inherited by an interface but not implemented,
+ // then the interface's MethodDesc is sitting in the slot and will not reflect
+ // the true slot number. Need to find the starting slot of the interface in
+ // the parent class to figure out the true slot (starting slot + itf slot)
+ if (pParentMethodDesc->IsInterface())
+ {
+ MethodTable *pItfMT = pParentMethodDesc->GetMethodTable();
+ WORD startSlot = bmtParent->pParentMethodTable->LookupComInteropData()->GetStartSlotForInterface(pItfMT);
+ _ASSERTE(startSlot != (WORD) -1);
+ slotNumber += startSlot;
+ }
+
+ // we are overriding a parent method, so place this method now
+ bmtVT->SetMethodDescForSlot(slotNumber, pNewMD);
+ bmtVT->ppSDVtable[slotNumber] = pNewMDData;
+
+ pNewMDData->wSlot = slotNumber;
+ }
+
+ else if (!fMethodImplementsInterface)
+ { // Place it unless we will do it when laying out an interface or it is a body to
+ // a method impl. If it is an impl then we will use the slots used by the definition.
+
+ // Store the slot for this method
+ pNewMDData->wSlot = bmtVT->wCurrentVtableSlot;
+
+ // Now copy the method into the vtable, and interop data
+ bmtVT->SetMethodDescForSlot(bmtVT->wCurrentVtableSlot, pNewMD);
+ bmtVT->ppSDVtable[bmtVT->wCurrentVtableSlot] = pNewMDData;
+
+ // Increment current vtable slot, since we're not overriding a parent slot
+ bmtVT->wCurrentVtableSlot++;
+ }
+ }
+
+ if(Classification & mdcMethodImpl)
+ { // If this method serves as the BODY of a MethodImpl specification, then
+ // we should iterate all the MethodImpl's for this class and see just how many
+ // of them this method participates in as the BODY.
+ for(DWORD m = 0; m < bmtMethodImpl->dwNumberMethodImpls; m++)
+ {
+ if(tokMember == bmtMethodImpl->rgMethodImplTokens[m].methodBody)
+ {
+ MethodDesc* desc = NULL;
+ BOOL fIsMethod;
+ mdToken mdDecl = bmtMethodImpl->rgMethodImplTokens[m].methodDecl;
+ Substitution *pDeclSubst = &bmtMethodImpl->pMethodDeclSubsts[m];
+
+ // Get the parent
+ mdToken tkParent = mdTypeDefNil;
+ if (TypeFromToken(mdDecl) == mdtMethodDef || TypeFromToken(mdDecl) == mdtMemberRef)
+ {
+ hr = bmtType->pMDImport->GetParentToken(mdDecl,&tkParent);
+ if (FAILED(hr))
+ {
+ BuildMethodTableThrowException(hr, *bmtError);
+ }
+ }
+
+ if (GetCl() == tkParent)
+ { // The DECL has been declared
+ // within the class that we're currently building.
+ hr = S_OK;
+
+ if(bmtError->pThrowable != NULL)
+ *(bmtError->pThrowable) = NULL;
+
+ // <TODO>Verify that the substitution doesn't change for this case </TODO>
+ if(TypeFromToken(mdDecl) != mdtMethodDef) {
+ hr = FindMethodDeclarationForMethodImpl(
+ bmtType->pMDImport,
+ GetCl(),
+ mdDecl,
+ &mdDecl);
+ _ASSERTE(SUCCEEDED(hr));
+
+ // Make sure the virtual states are the same
+ DWORD dwDescAttrs;
+ if (FAILED(bmtType->pMDImport->GetMethodDefProps(mdDecl, &dwDescAttrs)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ _ASSERTE(IsMdVirtual(dwMemberAttrs) == IsMdVirtual(dwDescAttrs));
+ }
+ }
+ else
+ {
+ SigTypeContext typeContext;
+
+ desc = MemberLoader::GetMethodDescFromMemberDefOrRefOrSpec(bmtType->pModule,
+ mdDecl,
+ &typeContext,
+ FALSE, FALSE); // don't demand generic method args
+ mdDecl = mdTokenNil;
+ // Make sure the body is virtaul
+ _ASSERTE(IsMdVirtual(dwMemberAttrs));
+ }
+
+ // Only add the method impl if the interface it is declared on is non generic.
+ // NULL desc represent method impls to methods on the current class which
+ // we know isn't generic.
+ if ((desc == NULL) || (desc->GetMethodTable()->GetNumGenericArgs() == 0))
+ {
+ bmtMethodImpl->AddMethod(pNewMD,
+ desc,
+ mdDecl,
+ pDeclSubst);
+ }
+ }
+ }
+ }
+ } /* end ... for each member */
+}
+
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+//---------------------------------------------------------------------------------------
+// Resolve unresolved interfaces, determine an upper bound on the size of the interface map,
+// and determine the size of the largest interface (in # slots)
+VOID MethodTableBuilder::BuildInteropVTable_ResolveInterfaces(
+ BaseDomain *bmtDomain,
+ BuildingInterfaceInfo_t *pBuildingInterfaceList,
+ bmtTypeInfo* bmtType,
+ bmtInterfaceInfo* bmtInterface,
+ bmtVtable* bmtVT,
+ bmtParentInfo* bmtParent,
+ const bmtErrorInfo & bmtError)
+{
+
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(CheckPointer(bmtDomain));
+ PRECONDITION(CheckPointer(bmtInterface));
+ PRECONDITION(CheckPointer(bmtVT));
+ PRECONDITION(CheckPointer(bmtParent));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ DWORD i;
+ Thread *pThread = GetThread();
+
+ // resolve unresolved interfaces, determine an upper bound on the size of the interface map,
+ // and determine the size of the largest interface (in # slots)
+ bmtInterface->dwMaxExpandedInterfaces = 0; // upper bound on max # interfaces implemented by this class
+
+ // First look through the interfaces explicitly declared by this class
+ for (i = 0; i < bmtInterface->wInterfaceMapSize; i++)
+ {
+ MethodTable *pInterface = pBuildingInterfaceList[i].m_pMethodTable;
+
+ bmtInterface->dwMaxExpandedInterfaces += (1+ pInterface->GetNumInterfaces());
+ }
+
+ // Now look at interfaces inherited from the parent
+ if (bmtParent->pParentMethodTable != NULL)
+ {
+ _ASSERTE(bmtParent->pParentMethodTable->LookupComInteropData());
+ InteropMethodTableData *pInteropData = bmtParent->pParentMethodTable->LookupComInteropData();
+ InterfaceInfo_t *pParentInterfaceMap = pInteropData->pInterfaceMap;
+
+ for (i = 0; i < pInteropData->cInterfaceMap; i++)
+ {
+ MethodTable *pInterface = pParentInterfaceMap[i].m_pMethodTable;
+
+ bmtInterface->dwMaxExpandedInterfaces += (1+pInterface->GetNumInterfaces());
+ }
+ }
+
+ // Create a fully expanded map of all interfaces we implement
+ bmtInterface->pInterfaceMap = new (&pThread->m_MarshalAlloc) InterfaceInfo_t[bmtInterface->dwMaxExpandedInterfaces];
+
+ // # slots of largest interface
+ bmtInterface->dwLargestInterfaceSize = 0;
+
+ DWORD dwNumDeclaredInterfaces = bmtInterface->wInterfaceMapSize;
+
+ BuildInteropVTable_CreateInterfaceMap(pBuildingInterfaceList, bmtInterface, &bmtInterface->wInterfaceMapSize, &bmtInterface->dwLargestInterfaceSize, bmtParent->pParentMethodTable);
+
+ _ASSERTE(bmtInterface->wInterfaceMapSize <= bmtInterface->dwMaxExpandedInterfaces);
+
+ if (bmtInterface->dwLargestInterfaceSize > 0)
+ {
+ // This is needed later - for each interface, we get the MethodDesc pointer for each
+ // method. We need to be able to persist at most one interface at a time, so we
+ // need enough memory for the largest interface.
+ bmtInterface->ppInterfaceMethodDescList = new (&pThread->m_MarshalAlloc) MethodDesc*[bmtInterface->dwLargestInterfaceSize];
+
+ bmtInterface->ppInterfaceDeclMethodDescList = new (&pThread->m_MarshalAlloc) MethodDesc*[bmtInterface->dwLargestInterfaceSize];
+ }
+
+ EEClass *pParentClass = (IsInterface() || bmtParent->pParentMethodTable == NULL) ? NULL : bmtParent->pParentMethodTable->GetClass();
+
+ // For all the new interfaces we bring in, sum the methods
+ bmtInterface->dwTotalNewInterfaceMethods = 0;
+ if (pParentClass != NULL)
+ {
+ for (i = bmtParent->pParentMethodTable->GetNumInterfaces(); i < (bmtInterface->wInterfaceMapSize); i++)
+ bmtInterface->dwTotalNewInterfaceMethods +=
+ bmtInterface->pInterfaceMap[i].m_pMethodTable->GetNumVirtuals();
+ }
+
+ // The interface map is probably smaller than dwMaxExpandedInterfaces, so we'll copy the
+ // appropriate number of bytes when we allocate the real thing later.
+
+ // Inherit parental slot counts
+ if (pParentClass != NULL)
+ {
+ InteropMethodTableData *pParentInteropMT = bmtParent->pParentMethodTable->LookupComInteropData();
+ bmtVT->wCurrentVtableSlot = pParentInteropMT->cVTable;
+ bmtParent->wNumParentInterfaces = pParentInteropMT->cInterfaceMap;
+ }
+ else
+ {
+ bmtVT->wCurrentVtableSlot = 0;
+ bmtParent->wNumParentInterfaces = 0;
+ }
+
+ bmtVT->wCurrentNonVtableSlot = 0;
+
+ bmtInterface->pppInterfaceImplementingMD = (MethodDesc ***) pThread->m_MarshalAlloc.Alloc(S_UINT32(sizeof(MethodDesc *)) * S_UINT32(bmtInterface->dwMaxExpandedInterfaces));
+ memset(bmtInterface->pppInterfaceImplementingMD, 0, sizeof(MethodDesc *) * bmtInterface->dwMaxExpandedInterfaces);
+
+ bmtInterface->pppInterfaceDeclaringMD = (MethodDesc ***) pThread->m_MarshalAlloc.Alloc(S_UINT32(sizeof(MethodDesc *)) * S_UINT32(bmtInterface->dwMaxExpandedInterfaces));
+ memset(bmtInterface->pppInterfaceDeclaringMD, 0, sizeof(MethodDesc *) * bmtInterface->dwMaxExpandedInterfaces);
+
+ return;
+
+}
+
+//---------------------------------------------------------------------------------------
+// Fill out a fully expanded interface map, such that if we are declared to implement I3, and I3 extends I1,I2,
+// then I1,I2 are added to our list if they are not already present.
+//
+// Returns FALSE for failure. <TODO>Currently we don't fail, but @TODO perhaps we should fail if we recurse
+// too much.</TODO>
+//
+VOID MethodTableBuilder::BuildInteropVTable_CreateInterfaceMap(BuildingInterfaceInfo_t *pBuildingInterfaceList,
+ bmtInterfaceInfo* bmtInterface,
+ WORD *pwInterfaceListSize,
+ DWORD *pdwMaxInterfaceMethods,
+ MethodTable *pParentMethodTable)
+{
+ STANDARD_VM_CONTRACT;
+
+ WORD i;
+ InterfaceInfo_t *pInterfaceMap = bmtInterface->pInterfaceMap;
+ WORD wNumInterfaces = bmtInterface->wInterfaceMapSize;
+
+ // pdwInterfaceListSize points to bmtInterface->pInterfaceMapSize so we cache it above
+ *pwInterfaceListSize = 0;
+
+ // First inherit all the parent's interfaces. This is important, because our interface map must
+ // list the interfaces in identical order to our parent.
+ //
+ // <NICE> we should document the reasons why. One reason is that DispatchMapTypeIDs can be indexes
+ // into the list </NICE>
+ if (pParentMethodTable != NULL)
+ {
+ _ASSERTE(pParentMethodTable->LookupComInteropData());
+ InteropMethodTableData *pInteropData = pParentMethodTable->LookupComInteropData();
+ InterfaceInfo_t *pParentInterfaceMap = pInteropData->pInterfaceMap;
+ unsigned cParentInterfaceMap = pInteropData->cInterfaceMap;
+
+ // The parent's interface list is known to be fully expanded
+ for (i = 0; i < cParentInterfaceMap; i++)
+ {
+ // Need to keep track of the interface with the largest number of methods
+ if (pParentInterfaceMap[i].m_pMethodTable->GetNumVirtuals() > *pdwMaxInterfaceMethods)
+ {
+ *pdwMaxInterfaceMethods = pParentInterfaceMap[i].m_pMethodTable->GetNumVirtuals();
+ }
+
+ pInterfaceMap[*pwInterfaceListSize].m_pMethodTable = pParentInterfaceMap[i].m_pMethodTable;
+ pInterfaceMap[*pwInterfaceListSize].SetInteropStartSlot(MethodTable::NO_SLOT);
+ pInterfaceMap[*pwInterfaceListSize].m_wFlags = 0;
+ (*pwInterfaceListSize)++;
+ }
+ }
+
+ // Go through each interface we explicitly implement (if a class), or extend (if an interface)
+ for (i = 0; i < wNumInterfaces; i++)
+ {
+ MethodTable *pDeclaredInterface = pBuildingInterfaceList[i].m_pMethodTable;
+
+ BuildInteropVTable_ExpandInterface(pInterfaceMap, pDeclaredInterface,
+ pwInterfaceListSize, pdwMaxInterfaceMethods,
+ TRUE);
+ }
+}
+
+//---------------------------------------------------------------------------------------
+// Given an interface map to fill out, expand pNewInterface (and its sub-interfaces) into it, increasing
+// pdwInterfaceListSize as appropriate, and avoiding duplicates.
+//
+VOID MethodTableBuilder::BuildInteropVTable_ExpandInterface(InterfaceInfo_t *pInterfaceMap,
+ MethodTable *pNewInterface,
+ WORD *pwInterfaceListSize,
+ DWORD *pdwMaxInterfaceMethods,
+ BOOL fDirect)
+{
+ STANDARD_VM_CONTRACT;
+
+ DWORD i;
+
+ // The interface list contains the fully expanded set of interfaces from the parent then
+ // we start adding all the interfaces we declare. We need to know which interfaces
+ // we declare but do not need duplicates of the ones we declare. This means we can
+ // duplicate our parent entries.
+
+ // Is it already present in the list?
+ for (i = 0; i < (*pwInterfaceListSize); i++) {
+ if (pInterfaceMap[i].m_pMethodTable->IsEquivalentTo(pNewInterface)) {
+ if(fDirect) {
+ pInterfaceMap[i].m_wFlags |= InterfaceInfo_t::interface_declared_on_class;
+ }
+ return; // found it, don't add it again
+ }
+ }
+
+ if (pNewInterface->GetNumVirtuals() > *pdwMaxInterfaceMethods) {
+ *pdwMaxInterfaceMethods = pNewInterface->GetNumVirtuals();
+ }
+
+ // Add it and each sub-interface
+ pInterfaceMap[*pwInterfaceListSize].m_pMethodTable = pNewInterface;
+ pInterfaceMap[*pwInterfaceListSize].SetInteropStartSlot(MethodTable::NO_SLOT);
+ pInterfaceMap[*pwInterfaceListSize].m_wFlags = 0;
+
+ if(fDirect)
+ pInterfaceMap[*pwInterfaceListSize].m_wFlags |= InterfaceInfo_t::interface_declared_on_class;
+
+ (*pwInterfaceListSize)++;
+
+ if (pNewInterface->GetNumInterfaces() != 0) {
+ MethodTable::InterfaceMapIterator it = pNewInterface->IterateInterfaceMap();
+ while (it.Next()) {
+ BuildInteropVTable_ExpandInterface(pInterfaceMap, it.GetInterface(),
+ pwInterfaceListSize, pdwMaxInterfaceMethods, FALSE);
+ }
+ }
+
+ return;
+}
+
+// If we are a class, then there may be some unplaced vtable methods (which are by definition
+// interface methods, otherwise they'd already have been placed). Place as many unplaced methods
+// as possible, in the order preferred by interfaces. However, do not allow any duplicates - once
+// a method has been placed, it cannot be placed again - if we are unable to neatly place an interface,
+// create duplicate slots for it starting at dwCurrentDuplicateVtableSlot. Fill out the interface
+// map for all interfaces as they are placed.
+//
+// If we are an interface, then all methods are already placed. Fill out the interface map for
+// interfaces as they are placed.
+//
+
+//---------------------------------------------------------------------------------------
+VOID MethodTableBuilder::BuildInteropVTable_PlaceVtableMethods(
+ bmtInterfaceInfo* bmtInterface,
+ DWORD numDeclaredInterfaces,
+ BuildingInterfaceInfo_t *pBuildingInterfaceList,
+ bmtVtable* bmtVT,
+ bmtMethodInfo* bmtMethod,
+ bmtTypeInfo* bmtType,
+ bmtErrorInfo* bmtError,
+ bmtProperties* bmtProp,
+ bmtParentInfo* bmtParent)
+{
+ STANDARD_VM_CONTRACT;
+
+ DWORD i;
+ BOOL fParentInterface;
+
+ for (WORD wCurInterface = 0;
+ wCurInterface < bmtInterface->wInterfaceMapSize;
+ wCurInterface++)
+ {
+ fParentInterface = FALSE;
+ // Keep track of the current interface
+ InterfaceInfo_t *pCurItfInfo = &(bmtInterface->pInterfaceMap[wCurInterface]);
+ // The interface we are attempting to place
+ MethodTable *pInterface = pCurItfInfo->m_pMethodTable;
+
+ _ASSERTE(!(pCurItfInfo->IsDeclaredOnClass() &&
+ !pInterface->IsExternallyVisible() &&
+ pInterface->GetAssembly() != bmtType->pModule->GetAssembly() &&
+ !Security::CanSkipVerification(GetAssembly()->GetDomainAssembly())));
+
+ // Did we place this interface already due to the parent class's interface placement?
+ if (pCurItfInfo->GetInteropStartSlot() != MethodTable::NO_SLOT)
+ {
+ // If we have declared it then we re-lay it out
+ if(pCurItfInfo->IsDeclaredOnClass())
+ {
+ // This should be in the outer IF statement, not this inner one, but we'll keep
+ // it this way to remain consistent for backward compatibility.
+ fParentInterface = TRUE;
+
+ // If the interface is folded into the non-interface portion of the vtable, we need to unfold it.
+ WORD wStartSlot = pCurItfInfo->GetInteropStartSlot();
+ MethodTable::MethodIterator it(pInterface);
+ for (; it.IsValid(); it.Next())
+ {
+ if (it.IsVirtual())
+ {
+ if(bmtVT->ppSDVtable[wStartSlot+it.GetSlotNumber()]->wSlot == wStartSlot+it.GetSlotNumber())
+ { // If the MD slot is equal to the vtable slot number, then this means the interface
+ // was folded into the non-interface part of the vtable and needs to get unfolded
+ // in case a specific override occurs for one of the conceptually two distinct
+ // slots and not the other (i.e., a MethodImpl overrides an interface method but not
+ // the class' virtual method).
+ pCurItfInfo->SetInteropStartSlot(MethodTable::NO_SLOT);
+ fParentInterface = FALSE;
+ break;
+ }
+ }
+ }
+ }
+ else
+ {
+ continue;
+ }
+ }
+
+ if (pInterface->GetNumVirtuals() == 0)
+ {
+ // no calls can be made to this interface anyway
+ // so initialize the slot number to 0
+ pCurItfInfo->SetInteropStartSlot((WORD) 0);
+ continue;
+ }
+
+ // If this interface has not been given a starting position do that now.
+ if(!fParentInterface)
+ pCurItfInfo->SetInteropStartSlot(bmtVT->wCurrentVtableSlot);
+
+ // For each method declared in this interface
+ {
+ MethodTable::MethodIterator it(pInterface);
+ for (; it.IsValid(); it.Next())
+ {
+ if (it.IsVirtual())
+ {
+ DWORD dwMemberAttrs;
+
+ // See if we have info gathered while placing members
+ if (bmtInterface->pppInterfaceImplementingMD[wCurInterface] && bmtInterface->pppInterfaceImplementingMD[wCurInterface][it.GetSlotNumber()] != NULL)
+ {
+ bmtInterface->ppInterfaceMethodDescList[it.GetSlotNumber()] = bmtInterface->pppInterfaceImplementingMD[wCurInterface][it.GetSlotNumber()];
+ bmtInterface->ppInterfaceDeclMethodDescList[it.GetSlotNumber()] = bmtInterface->pppInterfaceDeclaringMD[wCurInterface][it.GetSlotNumber()];
+ continue;
+ }
+
+ MethodDesc *pInterfaceMD = pInterface->GetMethodDescForSlot(it.GetSlotNumber());
+ _ASSERTE(pInterfaceMD != NULL);
+
+ LPCUTF8 pszInterfaceMethodName = pInterfaceMD->GetNameOnNonArrayClass();
+ PCCOR_SIGNATURE pInterfaceMethodSig;
+ DWORD cInterfaceMethodSig;
+
+ pInterfaceMD->GetSig(&pInterfaceMethodSig, &cInterfaceMethodSig);
+
+ // Try to find the method explicitly declared in our class
+ for (i = 0; i < NumDeclaredMethods(); i++)
+ {
+ // look for interface method candidates only
+ dwMemberAttrs = bmtMethod->rgMethodAttrs[i];
+
+ // Note that non-publics can legally be exposed via an interface.
+ if (IsMdVirtual(dwMemberAttrs) && IsMdPublic(dwMemberAttrs))
+ {
+ LPCUTF8 pszMemberName;
+
+ pszMemberName = bmtMethod->rgszMethodName[i];
+ _ASSERTE(!(pszMemberName == NULL));
+
+#ifdef _DEBUG
+ if(GetHalfBakedClass()->m_fDebuggingClass && g_pConfig->ShouldBreakOnMethod(pszMemberName))
+ CONSISTENCY_CHECK_MSGF(false, ("BreakOnMethodName: '%s' ", pszMemberName));
+#endif // _DEBUG
+
+ if (strcmp(pszMemberName,pszInterfaceMethodName) == 0)
+ {
+ PCCOR_SIGNATURE pMemberSignature;
+ DWORD cMemberSignature;
+
+ _ASSERTE(TypeFromToken(bmtMethod->rgMethodTokens[i]) == mdtMethodDef);
+ if (FAILED(bmtType->pMDImport->GetSigOfMethodDef(
+ bmtMethod->rgMethodTokens[i],
+ &cMemberSignature,
+ &pMemberSignature)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+
+ if (MetaSig::CompareMethodSigs(
+ pMemberSignature,
+ cMemberSignature,
+ bmtType->pModule, NULL,
+ pInterfaceMethodSig,
+ cInterfaceMethodSig,
+ pInterfaceMD->GetModule(), NULL))
+ { // Found match, break from loop
+ break;
+ }
+ }
+ }
+ } // end ... try to find method
+
+ _ASSERTE(it.GetSlotNumber() < bmtInterface->dwLargestInterfaceSize);
+
+ if (i >= NumDeclaredMethods())
+ {
+ // if this interface has been layed out by our parent then
+ // we do not need to define a new method desc for it
+ if(fParentInterface)
+ {
+ bmtInterface->ppInterfaceMethodDescList[it.GetSlotNumber()] = NULL;
+ bmtInterface->ppInterfaceDeclMethodDescList[it.GetSlotNumber()] = NULL;
+ }
+ else
+ {
+ // We will use the interface implemenation if we do not find one in the
+ // parent. It will have to be overriden by the a method impl unless the
+ // class is abstract or it is a special COM type class.
+
+ MethodDesc* pParentMD = NULL;
+ if(bmtParent->pParentMethodTable)
+ {
+#ifdef _DEBUG
+ if(GetHalfBakedClass()->m_fDebuggingClass && g_pConfig->ShouldBreakOnMethod(pszInterfaceMethodName))
+ CONSISTENCY_CHECK_MSGF(false, ("BreakOnMethodName: '%s' ", pszInterfaceMethodName));
+#endif // _DEBUG
+ // Check the parent class
+ pParentMD = MemberLoader::FindMethod(bmtParent->pParentMethodTable,
+ pszInterfaceMethodName,
+ pInterfaceMethodSig,
+ cInterfaceMethodSig,
+ pInterfaceMD->GetModule(),
+ MemberLoader::FM_Default,
+ &bmtParent->parentSubst);
+ }
+ // make sure we do a better back patching for these methods
+ if(pParentMD && IsMdVirtual(pParentMD->GetAttrs()))
+ {
+ bmtInterface->ppInterfaceMethodDescList[it.GetSlotNumber()] = pParentMD;
+ bmtInterface->ppInterfaceDeclMethodDescList[it.GetSlotNumber()] = pInterfaceMD;
+ }
+ else
+ {
+ bmtInterface->ppInterfaceMethodDescList[it.GetSlotNumber()] = pInterfaceMD;
+ bmtVT->pInteropData->GetData(pInterfaceMD)->wSlot = pInterfaceMD->GetSlot();
+ bmtInterface->ppInterfaceDeclMethodDescList[it.GetSlotNumber()] = NULL;
+ }
+ }
+ }
+ else
+ {
+ // Found as declared method in class. If the interface was layed out by the parent we
+ // will be overridding their slot so our method counts do not increase. We will fold
+ // our method into our parent's interface if we have not been placed.
+ if(fParentInterface)
+ {
+ WORD dwSlot = (WORD) (pCurItfInfo->GetInteropStartSlot() + it.GetSlotNumber());
+ _ASSERTE(bmtVT->wCurrentVtableSlot > dwSlot);
+ MethodDesc *pMD = bmtMethod->ppMethodDescList[i];
+ InteropMethodTableSlotData *pMDData = bmtVT->pInteropData->GetData(pMD);
+ _ASSERTE(pMD && "Missing MethodDesc for declared method in class.");
+ if(pMDData->wSlot == MethodTable::NO_SLOT)
+ {
+ pMDData->wSlot = dwSlot;
+ }
+
+ // Set the slot and interop data
+ bmtVT->SetMethodDescForSlot(dwSlot, pMD);
+ bmtVT->ppSDVtable[dwSlot] = pMDData;
+ _ASSERTE( bmtVT->GetMethodDescForSlot(dwSlot) != NULL);
+ bmtInterface->ppInterfaceMethodDescList[it.GetSlotNumber()] = NULL;
+ bmtInterface->ppInterfaceDeclMethodDescList[it.GetSlotNumber()] = NULL;
+ }
+ else
+ {
+ bmtInterface->ppInterfaceMethodDescList[it.GetSlotNumber()] = bmtMethod->ppMethodDescList[i];
+ bmtInterface->ppInterfaceDeclMethodDescList[it.GetSlotNumber()] = pInterfaceMD;
+ }
+ }
+ }
+ }
+ }
+
+ {
+ MethodTable::MethodIterator it(pInterface);
+ for (; it.IsValid(); it.Next())
+ {
+ if (it.IsVirtual())
+ {
+ // The entry can be null if the interface was previously
+ // laid out by a parent and we did not have a method
+ // that subclassed the interface.
+ if(bmtInterface->ppInterfaceMethodDescList[it.GetSlotNumber()] != NULL)
+ {
+ // Get the MethodDesc which was allocated for the method
+ MethodDesc *pMD = bmtInterface->ppInterfaceMethodDescList[it.GetSlotNumber()];
+ InteropMethodTableSlotData *pMDData = bmtVT->pInteropData->GetData(pMD);
+
+ if (pMDData->wSlot == (WORD) MethodTable::NO_SLOT)
+ {
+ pMDData->wSlot = (WORD) bmtVT->wCurrentVtableSlot;
+ }
+
+ // Set the vtable slot
+ _ASSERTE(bmtVT->GetMethodDescForSlot(bmtVT->wCurrentVtableSlot) == NULL);
+ bmtVT->SetMethodDescForSlot(bmtVT->wCurrentVtableSlot, pMD);
+ _ASSERTE(bmtVT->GetMethodDescForSlot(bmtVT->wCurrentVtableSlot) != NULL);
+ bmtVT->ppSDVtable[bmtVT->wCurrentVtableSlot] = pMDData;
+
+ // Increment the current vtable slot
+ bmtVT->wCurrentVtableSlot++;
+ }
+ }
+ }
+ }
+ }
+}
+
+//---------------------------------------------------------------------------------------
+// We should have collected all the method impls. Cycle through them creating the method impl
+// structure that holds the information about which slots are overridden.
+VOID MethodTableBuilder::BuildInteropVTable_PlaceMethodImpls(
+ BaseDomain *bmtDomain,
+ bmtTypeInfo* bmtType,
+ bmtMethodImplInfo* bmtMethodImpl,
+ bmtErrorInfo* bmtError,
+ bmtInterfaceInfo* bmtInterface,
+ bmtVtable* bmtVT,
+ bmtParentInfo* bmtParent)
+
+{
+ STANDARD_VM_CONTRACT;
+
+ if(bmtMethodImpl->pIndex == 0)
+ return;
+
+ DWORD pIndex = 0;
+
+ // Allocate some temporary storage. The number of overrides for a single method impl
+ // cannot be greater then the number of vtable slots.
+ DWORD* slots = (DWORD*) new (&GetThread()->m_MarshalAlloc) DWORD[bmtVT->wCurrentVtableSlot];
+ MethodDesc **replaced = new (&GetThread()->m_MarshalAlloc) MethodDesc*[bmtVT->wCurrentVtableSlot];
+
+ while(pIndex < bmtMethodImpl->pIndex) {
+
+ DWORD slotIndex = 0;
+ DWORD dwItfCount = 0;
+ MethodDesc* next = bmtMethodImpl->GetBodyMethodDesc(pIndex);
+ MethodDesc* body = NULL;
+
+ // The signature for the body of the method impl. We cache the signature until all
+ // the method impl's using the same body are done.
+ PCCOR_SIGNATURE pBodySignature = NULL;
+ DWORD cBodySignature = 0;
+
+ // The impls are sorted according to the method descs for the body of the method impl.
+ // Loop through the impls until the next body is found. When a single body
+ // has been done move the slots implemented and method descs replaced into the storage
+ // found on the body method desc.
+ do { // collect information until we reach the next body
+ body = next;
+
+ // Get the declaration part of the method impl. It will either be a token
+ // (declaration is on this type) or a method desc.
+ MethodDesc* pDecl = bmtMethodImpl->GetDeclarationMethodDesc(pIndex);
+ if(pDecl == NULL) {
+ // The declaration is on this type to get the token.
+ mdMethodDef mdef = bmtMethodImpl->GetDeclarationToken(pIndex);
+
+ BuildInteropVTable_PlaceLocalDeclaration(mdef,
+ body,
+ bmtType,
+ bmtError,
+ bmtVT,
+ slots, // Adds override to the slot and replaced arrays.
+ replaced,
+ &slotIndex, // Increments count
+ &pBodySignature, // Fills in the signature
+ &cBodySignature);
+ }
+ else {
+ // Method impls to methods on generic interfaces should have already
+ // been filtered out.
+ _ASSERTE(pDecl->GetMethodTable()->GetNumGenericArgs() == 0);
+
+ if(pDecl->GetMethodTable()->IsInterface()) {
+ BuildInteropVTable_PlaceInterfaceDeclaration(pDecl,
+ body,
+ bmtMethodImpl->GetDeclarationSubst(pIndex),
+ bmtType,
+ bmtInterface,
+ bmtError,
+ bmtVT,
+ slots,
+ replaced,
+ &slotIndex, // Increments count
+ &pBodySignature, // Fills in the signature
+ &cBodySignature);
+ }
+ else {
+ BuildInteropVTable_PlaceParentDeclaration(pDecl,
+ body,
+ bmtMethodImpl->GetDeclarationSubst(pIndex),
+ bmtType,
+ bmtError,
+ bmtVT,
+ bmtParent,
+ slots,
+ replaced,
+ &slotIndex, // Increments count
+ &pBodySignature, // Fills in the signature
+ &cBodySignature);
+ }
+ }
+
+ // Move to the next body
+ pIndex++;
+
+ // we hit the end of the list so leave
+ next = pIndex < bmtMethodImpl->pIndex ? bmtMethodImpl->GetBodyMethodDesc(pIndex) : NULL;
+ } while(next == body) ;
+ } // while(next != NULL)
+}
+
+//---------------------------------------------------------------------------------------
+VOID MethodTableBuilder::BuildInteropVTable_PlaceLocalDeclaration(
+ mdMethodDef mdef,
+ MethodDesc* body,
+ bmtTypeInfo* bmtType,
+ bmtErrorInfo* bmtError,
+ bmtVtable* bmtVT,
+ DWORD* slots,
+ MethodDesc** replaced,
+ DWORD* pSlotIndex,
+ PCCOR_SIGNATURE* ppBodySignature,
+ DWORD* pcBodySignature)
+{
+ STANDARD_VM_CONTRACT;
+
+ // we search on the token and m_cl
+ for(USHORT i = 0; i < bmtVT->wCurrentVtableSlot; i++)
+ {
+ // Make sure we haven't already been MethodImpl'd
+ _ASSERTE(bmtVT->ppSDVtable[i]->pMD == bmtVT->ppSDVtable[i]->pDeclMD);
+
+ // We get the current slot. Since we are looking for a method declaration
+ // that is on our class we would never match up with a method obtained from
+ // one of our parents or an Interface.
+ MethodDesc *pMD = bmtVT->ppSDVtable[i]->pMD;
+
+ // If we get a null then we have already replaced this one. We can't check it
+ // so we will just by by-pass this.
+ if(pMD->GetMemberDef() == mdef)
+ {
+ InteropMethodTableSlotData *pDeclData = bmtVT->pInteropData->GetData(pMD);
+ InteropMethodTableSlotData *pImplData = bmtVT->pInteropData->GetData(body);
+
+ // If the body has not been placed then place it here. We do not
+ // place bodies for method impl's until we find a spot for them.
+ if (pImplData->wSlot == MethodTable::NO_SLOT)
+ {
+ pImplData->wSlot = (WORD) i;
+ }
+
+ // We implement this slot, record it
+ slots[*pSlotIndex] = i;
+ replaced[*pSlotIndex] = pMD;
+ bmtVT->SetMethodDescForSlot(i, body);
+ pDeclData->pMD = pImplData->pMD;
+ pDeclData->wSlot = pImplData->wSlot;
+ bmtVT->ppSDVtable[i] = pDeclData;
+
+ // increment the counter
+ (*pSlotIndex)++;
+ }
+ }
+}
+
+//---------------------------------------------------------------------------------------
+VOID MethodTableBuilder::BuildInteropVTable_PlaceInterfaceDeclaration(
+ MethodDesc* pItfDecl,
+ MethodDesc* pImplBody,
+ const Substitution *pDeclSubst,
+ bmtTypeInfo* bmtType,
+ bmtInterfaceInfo* bmtInterface,
+ bmtErrorInfo* bmtError,
+ bmtVtable* bmtVT,
+ DWORD* slots,
+ MethodDesc** replaced,
+ DWORD* pSlotIndex,
+ PCCOR_SIGNATURE* ppBodySignature,
+ DWORD* pcBodySignature)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(pItfDecl && pItfDecl->IsInterface() && !(pItfDecl->IsMethodImpl()));
+
+ // the fact that an interface only shows up once in the vtable
+ // When we are looking for a method desc then the declaration is on
+ // some class or interface that this class implements. The declaration
+ // will either be to an interface or to a class. If it is to a
+ // interface then we need to search for that interface. From that
+ // slot number of the method in the interface we can calculate the offset
+ // into our vtable. If it is to a class it must be a subclass. This uses
+ // the fact that an interface only shows up once in the vtable.
+
+ BOOL fInterfaceFound = FALSE;
+ // Check our vtable for entries that we are suppose to override.
+ // Since this is an external method we must also check the inteface map.
+ // We want to replace any interface methods even if they have been replaced
+ // by a base class.
+ for(USHORT i = 0; i < bmtInterface->wInterfaceMapSize; i++)
+ {
+ MethodTable *pInterface = bmtInterface->pInterfaceMap[i].m_pMethodTable;
+
+ if (pInterface->IsEquivalentTo(pItfDecl->GetMethodTable()))
+ {
+ // We found an interface so no error
+ fInterfaceFound = TRUE;
+
+ WORD wSlot = (WORD) -1;
+ MethodDesc *pMD = NULL;
+
+ // Find out where the interface map is set on our vtable
+ WORD wStartingSlot = (USHORT) bmtInterface->pInterfaceMap[i].GetInteropStartSlot();
+
+ // We need to duplicate the interface to avoid copies. Currently, interfaces
+ // do not overlap so we just need to check to see if there is a non-duplicated
+ // MD. If there is then the interface shares it with the class which means
+ // we need to copy the whole interface
+ for(wSlot = wStartingSlot; wSlot < pInterface->GetNumVirtuals() + wStartingSlot; wSlot++)
+ {
+ // This check will tell us if the method in this slot is the first instance (not a duplicate)
+ if(bmtVT->ppSDVtable[wSlot]->wSlot == wSlot)
+ break;
+ }
+
+ if(wSlot < pInterface->GetNumVirtuals() + wStartingSlot)
+ {
+ // Check to see if we have allocated the temporay array of starting values.
+ // This array is used to backpatch entries to the original location. These
+ // values are never used but will cause problems later when we finish
+ // laying out the method table.
+ if(bmtInterface->pdwOriginalStart == NULL)
+ {
+ Thread *pThread = GetThread();
+ bmtInterface->pdwOriginalStart = new (&pThread->m_MarshalAlloc) DWORD[bmtInterface->dwMaxExpandedInterfaces];
+ memset(bmtInterface->pdwOriginalStart, 0, sizeof(DWORD)*bmtInterface->dwMaxExpandedInterfaces);
+ }
+
+ _ASSERTE(bmtInterface->pInterfaceMap[i].GetInteropStartSlot() != (WORD) 0 && "We assume that an interface does not start at position 0");
+ _ASSERTE(bmtInterface->pdwOriginalStart[i] == 0 && "We should not move an interface twice");
+ bmtInterface->pdwOriginalStart[i] = bmtInterface->pInterfaceMap[i].GetInteropStartSlot();
+
+ // The interface now starts at the end of the map.
+ bmtInterface->pInterfaceMap[i].SetInteropStartSlot(bmtVT->wCurrentVtableSlot);
+ for(WORD d = wStartingSlot; d < pInterface->GetNumVirtuals() + wStartingSlot; d++)
+ {
+ // Copy the MD
+ //@TODO: Maybe need to create new slot data entries for this copy-out based on
+ //@TODO: the MD's of the interface slots.
+ InteropMethodTableSlotData *pDataCopy = bmtVT->ppSDVtable[d];
+ bmtVT->SetMethodDescForSlot(bmtVT->wCurrentVtableSlot, pDataCopy->pMD);
+ bmtVT->ppSDVtable[bmtVT->wCurrentVtableSlot] = pDataCopy;
+ // Increment the various counters
+ bmtVT->wCurrentVtableSlot++;
+ }
+ // Reset the starting slot to the known value
+ wStartingSlot = bmtInterface->pInterfaceMap[i].GetInteropStartSlot();
+ }
+
+ // Make sure we have placed the interface map.
+ _ASSERTE(wStartingSlot != MethodTable::NO_SLOT);
+
+ // Get the Slot location of the method desc (slot of the itf MD + start slot for this class)
+ wSlot = pItfDecl->GetSlot() + wStartingSlot;
+ _ASSERTE(wSlot < bmtVT->wCurrentVtableSlot);
+
+ // Get our current method desc for this slot
+ pMD = bmtVT->ppSDVtable[wSlot]->pMD;
+
+ // If we have not got the method impl signature go get it now. It is cached
+ // in our caller
+ if (*ppBodySignature == NULL)
+ {
+ if (FAILED(bmtType->pMDImport->GetSigOfMethodDef(
+ pImplBody->GetMemberDef(),
+ pcBodySignature,
+ ppBodySignature)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ }
+
+ InteropMethodTableSlotData *pImplSlotData = bmtVT->pInteropData->GetData(pImplBody);
+ _ASSERTE(pImplSlotData->wSlot != MethodTable::NO_SLOT);
+ // If the body has not been placed then place it now.
+ if (pImplSlotData->wSlot == MethodTable::NO_SLOT)
+ {
+ pImplSlotData->wSlot = wSlot;
+ }
+
+ // Store away the values
+ InteropMethodTableSlotData *pItfSlotData = bmtVT->pInteropData->GetData(pItfDecl);
+ slots[*pSlotIndex] = wSlot;
+ replaced[*pSlotIndex] = pItfDecl;
+ bmtVT->SetMethodDescForSlot(wSlot, pImplBody);
+ pItfSlotData->pMD = pImplBody;
+ pItfSlotData->wSlot = pImplSlotData->wSlot;
+ bmtVT->ppSDVtable[wSlot] = pItfSlotData;
+
+ // increment the counter
+ (*pSlotIndex)++;
+
+ // if we have moved the interface we need to back patch the original location
+ // if we had left an interface place holder.
+ if(bmtInterface->pdwOriginalStart && bmtInterface->pdwOriginalStart[i] != 0)
+ {
+ USHORT slot = (USHORT) bmtInterface->pdwOriginalStart[i] + pItfDecl->GetSlot();
+ MethodDesc* pSlotMD = bmtVT->ppSDVtable[slot]->pMD;
+ if(pSlotMD->GetMethodTable() && pSlotMD->IsInterface())
+ {
+ bmtVT->SetMethodDescForSlot(slot, pImplBody);
+ bmtVT->ppSDVtable[slot] = pItfSlotData;
+ }
+ }
+ break;
+ }
+ }
+
+ _ASSERTE(fInterfaceFound);
+}
+
+//---------------------------------------------------------------------------------------
+VOID MethodTableBuilder::BuildInteropVTable_PlaceParentDeclaration(
+ MethodDesc* pDecl,
+ MethodDesc* pImplBody,
+ const Substitution *pDeclSubst,
+ bmtTypeInfo* bmtType,
+ bmtErrorInfo* bmtError,
+ bmtVtable* bmtVT,
+ bmtParentInfo* bmtParent,
+ DWORD* slots,
+ MethodDesc** replaced,
+ DWORD* pSlotIndex,
+ PCCOR_SIGNATURE* ppBodySignature,
+ DWORD* pcBodySignature)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(pDecl && !pDecl->IsInterface());
+
+ BOOL fRet = FALSE;
+
+ // Verify that the class of the declaration is in our heirarchy
+ MethodTable* declType = pDecl->GetMethodTable();
+ MethodTable* pParentMT = bmtParent->pParentMethodTable;
+ while(pParentMT != NULL)
+ {
+
+ if(declType == pParentMT)
+ break;
+ pParentMT = pParentMT->GetParentMethodTable();
+ }
+ _ASSERTE(pParentMT);
+
+ // Compare the signature for the token in the specified scope
+ // If we have not got the method impl signature go get it now
+ if (*ppBodySignature == NULL)
+ {
+ if (FAILED(bmtType->pMDImport->GetSigOfMethodDef(
+ pImplBody->GetMemberDef(),
+ pcBodySignature,
+ ppBodySignature)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ }
+
+ // We get the method from the parents slot. We will replace the method that is currently
+ // defined in that slot and any duplicates for that method desc.
+ WORD wSlot = InteropMethodTableData::GetSlotForMethodDesc(pParentMT, pDecl);
+ InteropMethodTableSlotData *pDeclData = bmtVT->ppSDVtable[wSlot];
+ InteropMethodTableSlotData *pImplData = bmtVT->pInteropData->GetData(pImplBody);
+
+ // Get the real method desc (a base class may have overridden the method
+ // with a method impl)
+ MethodDesc* pReplaceDesc = pDeclData->pDeclMD;
+
+ // If the body has not been placed then place it here
+ if(pImplData->wSlot == MethodTable::NO_SLOT)
+ {
+ pImplData->wSlot = wSlot;
+ }
+
+ slots[*pSlotIndex] = wSlot;
+ replaced[*pSlotIndex] = pReplaceDesc;
+ bmtVT->SetMethodDescForSlot(wSlot, pImplBody);
+ pDeclData->pMD = pImplData->pMD;
+ pDeclData->wSlot = pImplData->wSlot;
+ bmtVT->ppSDVtable[wSlot] = pDeclData;
+
+ // increment the counter
+ (*pSlotIndex)++;
+
+ // we search for all duplicates
+ for(USHORT i = wSlot+1; i < bmtVT->wCurrentVtableSlot; i++)
+ {
+ MethodDesc *pMD = bmtVT->ppSDVtable[i]->pMD;
+
+ MethodDesc* pRealDesc = bmtVT->ppSDVtable[i]->pDeclMD;
+
+ if(pRealDesc == pReplaceDesc)
+ {
+ // We do not want to override a body to another method impl
+ _ASSERTE(!pRealDesc->IsMethodImpl());
+
+ // Make sure we are not overridding another method impl
+ _ASSERTE(!(pMD != pImplBody && pMD->IsMethodImpl() && pMD->GetMethodTable() == NULL));
+
+ slots[*pSlotIndex] = i;
+ replaced[*pSlotIndex] = pRealDesc;
+ bmtVT->pVtable[i] = bmtVT->pVtable[wSlot];
+ bmtVT->pVtableMD[i] = bmtVT->pVtableMD[wSlot];
+ bmtVT->ppSDVtable[i] = bmtVT->ppSDVtable[wSlot];
+
+ // increment the counter
+ (*pSlotIndex)++;
+ }
+ }
+}
+
+//---------------------------------------------------------------------------------------
+VOID MethodTableBuilder::BuildInteropVTable_PropagateInheritance(
+ bmtVtable *bmtVT)
+{
+ STANDARD_VM_CONTRACT;
+
+ for (DWORD i = 0; i < bmtVT->wCurrentVtableSlot; i++)
+ {
+ // For now only propagate inheritance for method desc that are not interface MD's.
+ // This is not sufficient but InterfaceImpl's will complete the picture.
+ InteropMethodTableSlotData *pMDData = bmtVT->ppSDVtable[i];
+ MethodDesc* pMD = pMDData->pMD;
+ CONSISTENCY_CHECK_MSG(CheckPointer(pMD), "Could not resolve MethodDesc Slot!");
+
+ if(!pMD->IsInterface() && pMDData->GetSlot() != i)
+ {
+ pMDData->SetDuplicate();
+ bmtVT->pVtable[i] = bmtVT->pVtable[pMDData->GetSlot()];
+ bmtVT->pVtableMD[i] = bmtVT->pVtableMD[pMDData->GetSlot()];
+ bmtVT->ppSDVtable[i]->pMD = bmtVT->ppSDVtable[pMDData->GetSlot()]->pMD;
+ }
+ }
+}
+
+
+//---------------------------------------------------------------------------------------
+VOID MethodTableBuilder::FinalizeInteropVTable(
+ AllocMemTracker *pamTracker,
+ LoaderAllocator* pAllocator,
+ bmtVtable* bmtVT,
+ bmtInterfaceInfo* bmtInterface,
+ bmtTypeInfo* bmtType,
+ bmtProperties* bmtProp,
+ bmtMethodInfo* bmtMethod,
+ bmtErrorInfo* bmtError,
+ bmtParentInfo* bmtParent,
+ InteropMethodTableData **ppInteropMT)
+{
+ STANDARD_VM_CONTRACT;
+
+ LoaderHeap *pHeap = pAllocator->GetLowFrequencyHeap();
+
+ // Allocate the overall structure
+ InteropMethodTableData *pMTData = (InteropMethodTableData *) pamTracker->Track(pHeap->AllocMem(S_SIZE_T(sizeof(InteropMethodTableData))));
+#ifdef LOGGING
+ g_sdStats.m_cbComInteropData += sizeof(InteropMethodTableData);
+#endif
+ memset(pMTData, 0, sizeof(InteropMethodTableData));
+
+ // Allocate the vtable
+ pMTData->cVTable = bmtVT->wCurrentVtableSlot;
+ if (pMTData->cVTable != 0)
+ {
+ pMTData->pVTable = (InteropMethodTableSlotData *)
+ pamTracker->Track(pHeap->AllocMem(S_SIZE_T(sizeof(InteropMethodTableSlotData)) * S_SIZE_T(pMTData->cVTable)));
+#ifdef LOGGING
+ g_sdStats.m_cbComInteropData += sizeof(InteropMethodTableSlotData) * pMTData->cVTable;
+#endif
+
+ { // Copy the vtable
+ for (DWORD i = 0; i < pMTData->cVTable; i++)
+ {
+ CONSISTENCY_CHECK(bmtVT->ppSDVtable[i]->wSlot != MethodTable::NO_SLOT);
+ pMTData->pVTable[i] = *bmtVT->ppSDVtable[i];
+ }
+ }
+ }
+
+ // Allocate the non-vtable
+ pMTData->cNonVTable = bmtVT->wCurrentNonVtableSlot;
+ if (pMTData->cNonVTable != 0)
+ {
+ pMTData->pNonVTable = (InteropMethodTableSlotData *)
+ pamTracker->Track(pHeap->AllocMem(S_SIZE_T(sizeof(InteropMethodTableSlotData)) * S_SIZE_T(pMTData->cNonVTable)));
+#ifdef LOGGING
+ g_sdStats.m_cbComInteropData += sizeof(InteropMethodTableSlotData) * pMTData->cNonVTable;
+#endif
+
+ { // Copy the non-vtable
+ for (DWORD i = 0; i < pMTData->cNonVTable; i++)
+ {
+ CONSISTENCY_CHECK(bmtVT->ppSDVtable[i]->wSlot != MethodTable::NO_SLOT);
+ pMTData->pNonVTable[i] = *bmtVT->ppSDNonVtable[i];
+ }
+ }
+ }
+
+ // Allocate the interface map
+ pMTData->cInterfaceMap = bmtInterface->wInterfaceMapSize;
+ if (pMTData->cInterfaceMap != 0)
+ {
+ pMTData->pInterfaceMap = (InterfaceInfo_t *)
+ pamTracker->Track(pHeap->AllocMem(S_SIZE_T(sizeof(InterfaceInfo_t)) * S_SIZE_T(pMTData->cInterfaceMap)));
+#ifdef LOGGING
+ g_sdStats.m_cbComInteropData += sizeof(InterfaceInfo_t) * pMTData->cInterfaceMap;
+#endif
+
+ { // Copy the interface map
+ for (DWORD i = 0; i < pMTData->cInterfaceMap; i++)
+ {
+ pMTData->pInterfaceMap[i] = bmtInterface->pInterfaceMap[i];
+ }
+ }
+ }
+
+ *ppInteropMT = pMTData;
+}
+
+//*******************************************************************************
+VOID MethodTableBuilder::EnumerateMethodImpls()
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+ IMDInternalImport *pMDInternalImport = bmtType->pMDImport;
+ DWORD rid, maxRidMD, maxRidMR;
+ hr = bmtMethodImpl->hEnumMethodImpl.EnumMethodImplInitNoThrow(GetCl());
+
+ if (FAILED(hr))
+ {
+ BuildMethodTableThrowException(hr, *bmtError);
+ }
+
+ // This gets the count out of the metadata interface.
+ bmtMethodImpl->dwNumberMethodImpls = bmtMethodImpl->hEnumMethodImpl.EnumMethodImplGetCount();
+
+ // This is the first pass. In this we will simply enumerate the token pairs and fill in
+ // the data structures. In addition, we'll sort the list and eliminate duplicates.
+ if (bmtMethodImpl->dwNumberMethodImpls > 0)
+ {
+ //
+ // Allocate the structures to keep track of the token pairs
+ //
+ bmtMethodImpl->rgMethodImplTokens = new (&GetThread()->m_MarshalAlloc)
+ bmtMethodImplInfo::MethodImplTokenPair[bmtMethodImpl->dwNumberMethodImpls];
+
+ // Iterate through each MethodImpl declared on this class
+ for (DWORD i = 0; i < bmtMethodImpl->dwNumberMethodImpls; i++)
+ {
+ // Grab the next set of body/decl tokens
+ hr = bmtMethodImpl->hEnumMethodImpl.EnumMethodImplNext(
+ &bmtMethodImpl->rgMethodImplTokens[i].methodBody,
+ &bmtMethodImpl->rgMethodImplTokens[i].methodDecl);
+ if (FAILED(hr))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ if (hr == S_FALSE)
+ {
+ // In the odd case that the enumerator fails before we've reached the total reported
+ // entries, let's reset the count and just break out. (Should we throw?)
+ bmtMethodImpl->dwNumberMethodImpls = i;
+ break;
+ }
+ }
+
+ // No need to do any sorting or duplicate elimination if there's not two or more methodImpls
+ if (bmtMethodImpl->dwNumberMethodImpls > 1)
+ {
+ // Now sort
+ qsort(bmtMethodImpl->rgMethodImplTokens,
+ bmtMethodImpl->dwNumberMethodImpls,
+ sizeof(bmtMethodImplInfo::MethodImplTokenPair),
+ &bmtMethodImplInfo::MethodImplTokenPair::Compare);
+
+ // Now eliminate duplicates
+ for (DWORD i = 0; i < bmtMethodImpl->dwNumberMethodImpls - 1; i++)
+ {
+ CONSISTENCY_CHECK((i + 1) < bmtMethodImpl->dwNumberMethodImpls);
+
+ bmtMethodImplInfo::MethodImplTokenPair *e1 = &bmtMethodImpl->rgMethodImplTokens[i];
+ bmtMethodImplInfo::MethodImplTokenPair *e2 = &bmtMethodImpl->rgMethodImplTokens[i + 1];
+
+ // If the pair are equal, eliminate the first one, and reduce the total count by one.
+ if (bmtMethodImplInfo::MethodImplTokenPair::Equal(e1, e2))
+ {
+ DWORD dwCopyNum = bmtMethodImpl->dwNumberMethodImpls - (i + 1);
+ memcpy(e1, e2, dwCopyNum * sizeof(bmtMethodImplInfo::MethodImplTokenPair));
+ bmtMethodImpl->dwNumberMethodImpls--;
+ CONSISTENCY_CHECK(bmtMethodImpl->dwNumberMethodImpls > 0);
+ }
+ }
+ }
+ }
+
+ if (bmtMethodImpl->dwNumberMethodImpls != 0)
+ {
+ //
+ // Allocate the structures to keep track of the impl matches
+ //
+ bmtMethodImpl->pMethodDeclSubsts = new (&GetThread()->m_MarshalAlloc) Substitution[bmtMethodImpl->dwNumberMethodImpls];
+ bmtMethodImpl->rgEntries = new (&GetThread()->m_MarshalAlloc) bmtMethodImplInfo::Entry[bmtMethodImpl->dwNumberMethodImpls];
+
+ // These are used for verification
+ maxRidMD = pMDInternalImport->GetCountWithTokenKind(mdtMethodDef);
+ maxRidMR = pMDInternalImport->GetCountWithTokenKind(mdtMemberRef);
+
+ // Iterate through each MethodImpl declared on this class
+ for (DWORD i = 0; i < bmtMethodImpl->dwNumberMethodImpls; i++)
+ {
+ PCCOR_SIGNATURE pSigDecl = NULL;
+ PCCOR_SIGNATURE pSigBody = NULL;
+ ULONG cbSigDecl;
+ ULONG cbSigBody;
+ mdToken tkParent;
+
+ mdToken theBody, theDecl;
+ Substitution theDeclSubst(bmtType->pModule, SigPointer(), NULL); // this can get updated later below.
+
+ theBody = bmtMethodImpl->rgMethodImplTokens[i].methodBody;
+ theDecl = bmtMethodImpl->rgMethodImplTokens[i].methodDecl;
+
+ // IMPLEMENTATION LIMITATION: currently, we require that the body of a methodImpl
+ // belong to the current type. This is because we need to allocate a different
+ // type of MethodDesc for bodies that are part of methodImpls.
+ if (TypeFromToken(theBody) != mdtMethodDef)
+ {
+ mdToken theNewBody;
+ hr = FindMethodDeclarationForMethodImpl(bmtType->pMDImport,
+ GetCl(),
+ theBody,
+ &theNewBody);
+ if (FAILED(hr))
+ {
+ BuildMethodTableThrowException(hr, IDS_CLASSLOAD_MI_ILLEGAL_BODY, mdMethodDefNil);
+ }
+ theBody = theNewBody;
+
+ // Make sure to update the stored token with the resolved token.
+ bmtMethodImpl->rgMethodImplTokens[i].methodBody = theBody;
+ }
+
+ if (TypeFromToken(theBody) != mdtMethodDef)
+ {
+ BuildMethodTableThrowException(BFA_METHODDECL_NOT_A_METHODDEF);
+ }
+ CONSISTENCY_CHECK(theBody == bmtMethodImpl->rgMethodImplTokens[i].methodBody);
+
+ //
+ // Now that the tokens of Decl and Body are obtained, do the MD validation
+ //
+
+ rid = RidFromToken(theDecl);
+
+ // Perform initial rudimentary validation of the token. Full token verification
+ // will be done in TestMethodImpl when placing the methodImpls.
+ if (TypeFromToken(theDecl) == mdtMethodDef)
+ {
+ // Decl must be valid token
+ if ((rid == 0)||(rid > maxRidMD))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_ILLEGAL_TOKEN_DECL);
+ }
+ // Get signature and length
+ if (FAILED(pMDInternalImport->GetSigOfMethodDef(theDecl, &cbSigDecl, &pSigDecl)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ }
+
+ // The token is not a MethodDef (likely a MemberRef)
+ else
+ {
+ // Decl must be valid token
+ if ((TypeFromToken(theDecl) != mdtMemberRef) || (rid == 0) || (rid > maxRidMR))
+ {
+ bmtError->resIDWhy = IDS_CLASSLOAD_MI_ILLEGAL_TOKEN_DECL;
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_ILLEGAL_TOKEN_DECL);
+ }
+
+ // Get signature and length
+ LPCSTR szDeclName;
+ if (FAILED(pMDInternalImport->GetNameAndSigOfMemberRef(theDecl, &pSigDecl, &cbSigDecl, &szDeclName)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+
+ // Get parent
+ hr = pMDInternalImport->GetParentToken(theDecl,&tkParent);
+ if (FAILED(hr))
+ BuildMethodTableThrowException(hr, *bmtError);
+
+ theDeclSubst = Substitution(tkParent, bmtType->pModule, NULL);
+ }
+
+ // Perform initial rudimentary validation of the token. Full token verification
+ // will be done in TestMethodImpl when placing the methodImpls.
+ {
+ // Body must be valid token
+ rid = RidFromToken(theBody);
+ if ((rid == 0)||(rid > maxRidMD))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_ILLEGAL_TOKEN_BODY);
+ }
+ // Body's parent must be this class
+ hr = pMDInternalImport->GetParentToken(theBody,&tkParent);
+ if (FAILED(hr))
+ BuildMethodTableThrowException(hr, *bmtError);
+ if(tkParent != GetCl())
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_ILLEGAL_BODY);
+ }
+ }
+ // Decl's and Body's signatures must match
+ if ((pSigDecl != NULL) && (cbSigDecl != 0))
+ {
+ if (FAILED(pMDInternalImport->GetSigOfMethodDef(theBody,&cbSigBody, &pSigBody)) ||
+ (pSigBody == NULL) ||
+ (cbSigBody == 0))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_MISSING_SIG_BODY);
+ }
+ // Can't use memcmp because there may be two AssemblyRefs
+ // in this scope, pointing to the same assembly, etc.).
+ if (!MetaSig::CompareMethodSigs(pSigDecl,
+ cbSigDecl,
+ bmtType->pModule,
+ &theDeclSubst,
+ pSigBody,
+ cbSigBody,
+ bmtType->pModule,
+ NULL))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_BODY_DECL_MISMATCH);
+ }
+ }
+ else
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_MISSING_SIG_DECL);
+ }
+
+ bmtMethodImpl->pMethodDeclSubsts[i] = theDeclSubst;
+
+ }
+ }
+}
+
+//*******************************************************************************
+//
+// Used by BuildMethodTable
+//
+// Enumerate this class's members
+//
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#endif
+VOID MethodTableBuilder::EnumerateClassMethods()
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(bmtType));
+ PRECONDITION(CheckPointer(bmtMethod));
+ PRECONDITION(CheckPointer(bmtProp));
+ PRECONDITION(CheckPointer(bmtVT));
+ PRECONDITION(CheckPointer(bmtError));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ DWORD i;
+ Thread *pThread = GetThread();
+ IMDInternalImport *pMDInternalImport = bmtType->pMDImport;
+ mdToken tok;
+ DWORD dwMemberAttrs;
+ BOOL fIsClassEnum = IsEnum();
+ BOOL fIsClassInterface = IsInterface();
+ BOOL fIsClassValueType = IsValueClass();
+#ifdef FEATURE_COMINTEROP
+ BOOL fIsClassComImport = IsComImport();
+#endif
+ BOOL fIsClassNotAbstract = (IsTdAbstract(GetAttrClass()) == 0);
+ PCCOR_SIGNATURE pMemberSignature;
+ ULONG cMemberSignature;
+
+ //
+ // Run through the method list and calculate the following:
+ // # methods.
+ // # "other" methods (i.e. static or private)
+ // # non-other methods
+ //
+
+ bmtVT->dwMaxVtableSize = 0; // we'll fix this later to be the real upper bound on vtable size
+ bmtMethod->cMethods = 0;
+
+ hr = bmtMethod->hEnumMethod.EnumInitNoThrow(mdtMethodDef, GetCl());
+ if (FAILED(hr))
+ {
+ _ASSERTE(!"Cannot count memberdefs");
+ if (FAILED(hr))
+ {
+ BuildMethodTableThrowException(hr, *bmtError);
+ }
+ }
+
+ // Allocate an array to contain the method tokens as well as information about the methods.
+ bmtMethod->cMethAndGaps = bmtMethod->hEnumMethod.EnumGetCount();
+
+ bmtMethod->rgMethodTokens = new (&pThread->m_MarshalAlloc) mdToken[bmtMethod->cMethAndGaps];
+ bmtMethod->rgMethodRVA = new (&pThread->m_MarshalAlloc) ULONG[bmtMethod->cMethAndGaps];
+ bmtMethod->rgMethodAttrs = new (&pThread->m_MarshalAlloc) DWORD[bmtMethod->cMethAndGaps];
+ bmtMethod->rgMethodImplFlags = new (&pThread->m_MarshalAlloc) DWORD[bmtMethod->cMethAndGaps];
+ bmtMethod->rgMethodClassifications = new (&pThread->m_MarshalAlloc) DWORD[bmtMethod->cMethAndGaps];
+
+ bmtMethod->rgszMethodName = new (&pThread->m_MarshalAlloc) LPCSTR[bmtMethod->cMethAndGaps];
+
+ bmtMethod->rgMethodImpl = new (&pThread->m_MarshalAlloc) BYTE[bmtMethod->cMethAndGaps];
+ bmtMethod->rgMethodType = new (&pThread->m_MarshalAlloc) BYTE[bmtMethod->cMethAndGaps];
+
+ enum { SeenCtor = 1, SeenInvoke = 2, SeenBeginInvoke = 4, SeenEndInvoke = 8};
+ unsigned delegateMethodsSeen = 0;
+
+ for (i = 0; i < bmtMethod->cMethAndGaps; i++)
+ {
+ ULONG dwMethodRVA;
+ DWORD dwImplFlags;
+ DWORD Classification;
+ LPSTR strMethodName;
+
+ //
+ // Go to the next method and retrieve its attributes.
+ //
+
+ bmtMethod->hEnumMethod.EnumNext(&tok);
+ DWORD rid = RidFromToken(tok);
+ if ((rid == 0)||(rid > pMDInternalImport->GetCountWithTokenKind(mdtMethodDef)))
+ {
+ BuildMethodTableThrowException(BFA_METHOD_TOKEN_OUT_OF_RANGE);
+ }
+
+ if (FAILED(pMDInternalImport->GetMethodDefProps(tok, &dwMemberAttrs)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ if (IsMdRTSpecialName(dwMemberAttrs) || IsMdVirtual(dwMemberAttrs) || IsDelegate())
+ {
+ if (FAILED(pMDInternalImport->GetNameOfMethodDef(tok, (LPCSTR *)&strMethodName)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ if (IsStrLongerThan(strMethodName,MAX_CLASS_NAME))
+ {
+ BuildMethodTableThrowException(BFA_METHOD_NAME_TOO_LONG);
+ }
+ }
+ else
+ strMethodName = NULL;
+
+ HENUMInternalHolder hEnumTyPars(pMDInternalImport);
+ hr = hEnumTyPars.EnumInitNoThrow(mdtGenericParam, tok);
+ if (FAILED(hr))
+ {
+ BuildMethodTableThrowException(hr, *bmtError);
+ }
+
+ WORD numGenericMethodArgs = (WORD) hEnumTyPars.EnumGetCount();
+
+ // We do not want to support context-bound objects with generic methods.
+ if (IsContextful() && numGenericMethodArgs > 0)
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_CONTEXT_BOUND_GENERIC_METHOD);
+ }
+
+ if (numGenericMethodArgs != 0)
+ {
+ for (unsigned methIdx = 0; methIdx < numGenericMethodArgs; methIdx++)
+ {
+ mdGenericParam tkTyPar;
+ pMDInternalImport->EnumNext(&hEnumTyPars, &tkTyPar);
+ DWORD flags;
+ if (FAILED(pMDInternalImport->GetGenericParamProps(tkTyPar, NULL, &flags, NULL, NULL, NULL)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+
+ if (0 != (flags & ~(gpVarianceMask | gpSpecialConstraintMask)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ switch (flags & gpVarianceMask)
+ {
+ case gpNonVariant:
+ break;
+
+ case gpCovariant: // intentional fallthru
+ case gpContravariant:
+ BuildMethodTableThrowException(VLDTR_E_GP_ILLEGAL_VARIANT_MVAR);
+ break;
+
+ default:
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+
+ }
+ }
+ }
+
+ //
+ // We need to check if there are any gaps in the vtable. These are
+ // represented by methods with the mdSpecial flag and a name of the form
+ // _VTblGap_nnn (to represent nnn empty slots) or _VTblGap (to represent a
+ // single empty slot).
+ //
+
+ if (IsMdRTSpecialName(dwMemberAttrs))
+ {
+ PREFIX_ASSUME(strMethodName != NULL); // if we've gotten here we've called GetNameOfMethodDef
+
+ // The slot is special, but it might not be a vtable spacer. To
+ // determine that we must look at the name.
+ if (strncmp(strMethodName, "_VtblGap", 8) == 0)
+ {
+ //
+ // This slot doesn't really exist, don't add it to the method
+ // table. Instead it represents one or more empty slots, encoded
+ // in the method name. Locate the beginning of the count in the
+ // name. There are these points to consider:
+ // There may be no count present at all (in which case the
+ // count is taken as one).
+ // There may be an additional count just after Gap but before
+ // the '_'. We ignore this.
+ //
+
+ LPCSTR pos = strMethodName + 8;
+
+ // Skip optional number.
+ while (IS_DIGIT(*pos))
+ pos++;
+
+ WORD n = 0;
+
+ // Check for presence of count.
+ if (*pos == '\0')
+ n = 1;
+ else
+ {
+ if (*pos != '_')
+ {
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT,
+ IDS_CLASSLOAD_BADSPECIALMETHOD,
+ tok);
+ }
+
+ // Skip '_'.
+ pos++;
+
+ // Read count.
+ bool fReadAtLeastOneDigit = false;
+ while (IS_DIGIT(*pos))
+ {
+ _ASSERTE(n < 6552);
+ n *= 10;
+ n += DIGIT_TO_INT(*pos);
+ pos++;
+ fReadAtLeastOneDigit = true;
+ }
+
+ // Check for end of name.
+ if (*pos != '\0' || !fReadAtLeastOneDigit)
+ {
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT,
+ IDS_CLASSLOAD_BADSPECIALMETHOD,
+ tok);
+ }
+ }
+
+#ifdef FEATURE_COMINTEROP
+ // Record vtable gap in mapping list.
+ if (GetHalfBakedClass()->GetSparseCOMInteropVTableMap() == NULL)
+ GetHalfBakedClass()->SetSparseCOMInteropVTableMap(new SparseVTableMap());
+
+ GetHalfBakedClass()->GetSparseCOMInteropVTableMap()->RecordGap(NumDeclaredMethods(), n);
+
+ bmtProp->fSparse = true;
+#endif // FEATURE_COMINTEROP
+ continue;
+ }
+
+ }
+
+
+ //
+ // This is a real method so add it to the enumeration of methods. We now need to retrieve
+ // information on the method and store it for later use.
+ //
+ if (FAILED(pMDInternalImport->GetMethodImplProps(tok, &dwMethodRVA, &dwImplFlags)))
+ {
+ BuildMethodTableThrowException(BFA_INVALID_TOKEN);
+ }
+ //
+ // But first - minimal flags validity checks
+ //
+ // No methods in Enums!
+ if (fIsClassEnum)
+ {
+ BuildMethodTableThrowException(BFA_METHOD_IN_A_ENUM);
+ }
+ // RVA : 0
+ if (dwMethodRVA != 0)
+ {
+#ifdef FEATURE_COMINTEROP
+ if(fIsClassComImport)
+ {
+ BuildMethodTableThrowException(BFA_METHOD_WITH_NONZERO_RVA);
+ }
+#endif // FEATURE_COMINTEROP
+ if(IsMdAbstract(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_ABSTRACT_METHOD_WITH_RVA);
+ }
+ if(IsMiRuntime(dwImplFlags))
+ {
+ BuildMethodTableThrowException(BFA_RUNTIME_METHOD_WITH_RVA);
+ }
+ if(IsMiInternalCall(dwImplFlags))
+ {
+ BuildMethodTableThrowException(BFA_INTERNAL_METHOD_WITH_RVA);
+ }
+ }
+
+ // Abstract / not abstract
+ if(IsMdAbstract(dwMemberAttrs))
+ {
+ if(fIsClassNotAbstract)
+ {
+ BuildMethodTableThrowException(BFA_AB_METHOD_IN_AB_CLASS);
+ }
+ if(!IsMdVirtual(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_NONVIRT_AB_METHOD);
+ }
+ }
+ else if(fIsClassInterface && strMethodName &&
+ (strcmp(strMethodName, COR_CCTOR_METHOD_NAME)))
+ {
+ BuildMethodTableThrowException(BFA_NONAB_NONCCTOR_METHOD_ON_INT);
+ }
+
+ // Virtual / not virtual
+ if(IsMdVirtual(dwMemberAttrs))
+ {
+ if(IsMdPinvokeImpl(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_VIRTUAL_PINVOKE_METHOD);
+ }
+ if(IsMdStatic(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_VIRTUAL_STATIC_METHOD);
+ }
+ if(strMethodName && (0==strcmp(strMethodName, COR_CTOR_METHOD_NAME)))
+ {
+ BuildMethodTableThrowException(BFA_VIRTUAL_INSTANCE_CTOR);
+ }
+ }
+
+ // Some interface checks.
+ if (IsInterface())
+ {
+ if (IsMdVirtual(dwMemberAttrs))
+ {
+ if (!IsMdAbstract(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_VIRTUAL_NONAB_INT_METHOD);
+ }
+ }
+ else
+ {
+ // Instance field/method
+ if (!IsMdStatic(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_NONVIRT_INST_INT_METHOD);
+ }
+ }
+ }
+
+ // No synchronized methods in ValueTypes
+ if(fIsClassValueType && IsMiSynchronized(dwImplFlags))
+ {
+ BuildMethodTableThrowException(BFA_SYNC_METHOD_IN_VT);
+ }
+
+ // Global methods:
+ if(IsGlobalClass())
+ {
+ if(!IsMdStatic(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_NONSTATIC_GLOBAL_METHOD);
+ }
+ if (strMethodName) //<TODO>@todo: investigate mc++ generating null name</TODO>
+ {
+ if(0==strcmp(strMethodName, COR_CTOR_METHOD_NAME))
+ {
+ BuildMethodTableThrowException(BFA_GLOBAL_INST_CTOR);
+ }
+ }
+ }
+ //@GENERICS:
+ // Generic methods or methods in generic classes
+ // may not be part of a COM Import class, PInvoke, internal call.
+ if ((numGenericMethodArgs != 0) &&
+ (
+#ifdef FEATURE_COMINTEROP
+ fIsClassComImport ||
+ bmtProp->fComEventItfType ||
+#endif // FEATURE_COMINTEROP
+ IsMdPinvokeImpl(dwMemberAttrs) ||
+ IsMiInternalCall(dwImplFlags)))
+ {
+ BuildMethodTableThrowException(BFA_BAD_PLACE_FOR_GENERIC_METHOD);
+ }
+
+ // Generic methods may not be marked "runtime". However note that
+ // methods in generic delegate classes are, hence we don't apply this to
+ // methods in generic classes in general.
+ if (numGenericMethodArgs != 0 && IsMiRuntime(dwImplFlags))
+ {
+ BuildMethodTableThrowException(BFA_GENERIC_METHOD_RUNTIME_IMPL);
+ }
+
+ // Signature validation
+ if (FAILED(pMDInternalImport->GetSigOfMethodDef(tok,&cMemberSignature, &pMemberSignature)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ hr = validateTokenSig(tok,pMemberSignature,cMemberSignature,dwMemberAttrs,pMDInternalImport);
+ if (FAILED(hr))
+ {
+ BuildMethodTableThrowException(hr, BFA_BAD_SIGNATURE, mdMethodDefNil);
+ }
+
+ //
+ // Determine the method's classification.
+ //
+
+ if (IsReallyMdPinvokeImpl(dwMemberAttrs) || IsMiInternalCall(dwImplFlags))
+ {
+ hr = NDirect::HasNAT_LAttribute(pMDInternalImport, tok, dwMemberAttrs);
+
+ // There was a problem querying for the attribute
+ if (FAILED(hr))
+ {
+ BuildMethodTableThrowException(hr, IDS_CLASSLOAD_BADPINVOKE, tok);
+ }
+
+ // The attribute is not present
+ if (hr == S_FALSE)
+ {
+#ifdef FEATURE_COMINTEROP
+ if (fIsClassComImport || bmtProp->fComEventItfType)
+ {
+ // tlbimported component
+ if (IsMdRTSpecialName(dwMemberAttrs))
+ {
+ // constructor is special
+ Classification = mcFCall;
+ }
+ else
+ {
+ // Tlbimported components we have some
+ // method descs in the call which are just used
+ // for handling methodimpls of all interface methods
+ Classification = mcComInterop;
+ }
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ if (dwMethodRVA == 0)
+ Classification = mcFCall;
+ else
+ Classification = mcNDirect;
+ }
+ // The NAT_L attribute is present, marking this method as NDirect
+ else
+ {
+ CONSISTENCY_CHECK(hr == S_OK);
+ Classification = mcNDirect;
+ }
+ }
+ else if (IsMiRuntime(dwImplFlags))
+ {
+ // currently the only runtime implemented functions are delegate instance methods
+ if (!IsDelegate() || IsMdStatic(dwMemberAttrs) || IsMdAbstract(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_BAD_RUNTIME_IMPL);
+ }
+
+ unsigned newDelegateMethodSeen = 0;
+
+ if (IsMdRTSpecialName(dwMemberAttrs)) // .ctor
+ {
+ if (strcmp(strMethodName, COR_CTOR_METHOD_NAME) != 0 || IsMdVirtual(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_BAD_FLAGS_ON_DELEGATE);
+ }
+ newDelegateMethodSeen = SeenCtor;
+ Classification = mcFCall;
+ }
+ else
+ {
+ if (strcmp(strMethodName, "Invoke") == 0)
+ newDelegateMethodSeen = SeenInvoke;
+ else if (strcmp(strMethodName, "BeginInvoke") == 0)
+ newDelegateMethodSeen = SeenBeginInvoke;
+ else if (strcmp(strMethodName, "EndInvoke") == 0)
+ newDelegateMethodSeen = SeenEndInvoke;
+ else
+ {
+ BuildMethodTableThrowException(BFA_UNKNOWN_DELEGATE_METHOD);
+ }
+ Classification = mcEEImpl;
+ }
+
+ // If we get here we have either set newDelegateMethodSeen or we have thrown a BMT exception
+ _ASSERTE(newDelegateMethodSeen != 0);
+
+ if ((delegateMethodsSeen & newDelegateMethodSeen) != 0)
+ {
+ BuildMethodTableThrowException(BFA_DUPLICATE_DELEGATE_METHOD);
+ }
+
+ delegateMethodsSeen |= newDelegateMethodSeen;
+ }
+ else if (numGenericMethodArgs != 0)
+ {
+ //We use an instantiated method desc to represent a generic method
+ Classification = mcInstantiated;
+ }
+ else if (fIsClassInterface)
+ {
+#ifdef FEATURE_COMINTEROP
+ if (IsMdStatic(dwMemberAttrs))
+ {
+ // Static methods in interfaces need nothing special.
+ Classification = mcIL;
+ }
+ else if (bmtProp->fIsMngStandardItf)
+ {
+ // If the interface is a standard managed interface then allocate space for an FCall method desc.
+ Classification = mcFCall;
+ }
+ else
+ {
+ // If COM interop is supported then all other interface MDs may be
+ // accessed via COM interop <TODO> mcComInterop MDs are BIG -
+ // this is very often a waste of space </TODO>
+ Classification = mcComInterop;
+ }
+#else // !FEATURE_COMINTEROP
+ // This codepath is used by remoting
+ Classification = mcIL;
+#endif // !FEATURE_COMINTEROP
+ }
+ else
+ {
+ Classification = mcIL;
+ }
+
+
+#ifdef _DEBUG
+ // We don't allow stack based declarative security on ecalls, fcalls and
+ // other special purpose methods implemented by the EE (the interceptor
+ // we use doesn't play well with non-jitted stubs).
+ if ((Classification == mcFCall || Classification == mcEEImpl) &&
+ (IsMdHasSecurity(dwMemberAttrs) || IsTdHasSecurity(GetAttrClass())))
+ {
+ DWORD dwSecFlags;
+ DWORD dwNullDeclFlags;
+
+ if (IsTdHasSecurity(GetAttrClass()) &&
+ SUCCEEDED(Security::GetDeclarationFlags(pMDInternalImport, GetCl(), &dwSecFlags, &dwNullDeclFlags)))
+ {
+ CONSISTENCY_CHECK_MSG(!(dwSecFlags & ~dwNullDeclFlags & DECLSEC_RUNTIME_ACTIONS),
+ "Cannot add stack based declarative security to a class containing an ecall/fcall/special method.");
+ }
+ if (IsMdHasSecurity(dwMemberAttrs) &&
+ SUCCEEDED(Security::GetDeclarationFlags(pMDInternalImport, tok, &dwSecFlags, &dwNullDeclFlags)))
+ {
+ CONSISTENCY_CHECK_MSG(!(dwSecFlags & ~dwNullDeclFlags & DECLSEC_RUNTIME_ACTIONS),
+ "Cannot add stack based declarative security to an ecall/fcall/special method.");
+ }
+ }
+#endif // _DEBUG
+
+ // Generic methods should always be mcInstantiated
+ if (!((numGenericMethodArgs == 0) || ((Classification & mdcClassification) == mcInstantiated)))
+ {
+ BuildMethodTableThrowException(BFA_GENERIC_METHODS_INST);
+ }
+ // count how many overrides this method does All methods bodies are defined
+ // on this type so we can just compare the tok with the body token found
+ // from the overrides.
+ for(DWORD impls = 0; impls < bmtMethodImpl->dwNumberMethodImpls; impls++) {
+ if(bmtMethodImpl->rgMethodImplTokens[impls].methodBody == tok) {
+ Classification |= mdcMethodImpl;
+ break;
+ }
+ }
+
+ // For delegates we don't allow any non-runtime implemented bodies
+ // for any of the four special methods
+ if (IsDelegate() && !IsMiRuntime(dwImplFlags))
+ {
+ if ((strcmp(strMethodName, COR_CTOR_METHOD_NAME) == 0) ||
+ (strcmp(strMethodName, "Invoke") == 0) ||
+ (strcmp(strMethodName, "BeginInvoke") == 0) ||
+ (strcmp(strMethodName, "EndInvoke") == 0) )
+ {
+ BuildMethodTableThrowException(BFA_ILLEGAL_DELEGATE_METHOD);
+ }
+ }
+
+ //
+ // Compute the type & other info
+ //
+
+ // Set the index into the storage locations
+ BYTE impl;
+ if (Classification & mdcMethodImpl)
+ {
+ impl = METHOD_IMPL;
+ }
+ else
+ {
+ impl = METHOD_IMPL_NOT;
+ }
+
+ BYTE type;
+ if ((Classification & mdcClassification) == mcNDirect)
+ {
+ type = METHOD_TYPE_NDIRECT;
+ }
+ else if ((Classification & mdcClassification) == mcFCall)
+ {
+ type = METHOD_TYPE_FCALL;
+ }
+ else if ((Classification & mdcClassification) == mcEEImpl)
+ {
+ type = METHOD_TYPE_EEIMPL;
+ }
+#ifdef FEATURE_COMINTEROP
+ else if ((Classification & mdcClassification) == mcComInterop)
+ {
+ type = METHOD_TYPE_INTEROP;
+ }
+#endif // FEATURE_COMINTEROP
+ else if ((Classification & mdcClassification) == mcInstantiated)
+ {
+ type = METHOD_TYPE_INSTANTIATED;
+ }
+ else
+ {
+ type = METHOD_TYPE_NORMAL;
+ }
+
+ //
+ // Store the method and the information we have gathered on it in the metadata info structure.
+ //
+
+ bmtMethod->SetMethodData(NumDeclaredMethods(),
+ tok,
+ dwMemberAttrs,
+ dwMethodRVA,
+ dwImplFlags,
+ Classification,
+ strMethodName,
+ impl,
+ type);
+
+ IncNumDeclaredMethods();
+
+ //
+ // Update the count of the various types of methods.
+ //
+
+ bmtVT->dwMaxVtableSize++;
+ }
+
+ // Check to see that we have all of the required delegate methods (ECMA 13.6 Delegates)
+ if (IsDelegate())
+ {
+ // Do we have all four special delegate methods
+ // or just the two special delegate methods
+ if ((delegateMethodsSeen != (SeenCtor | SeenInvoke | SeenBeginInvoke | SeenEndInvoke)) &&
+ (delegateMethodsSeen != (SeenCtor | SeenInvoke)) )
+ {
+ BuildMethodTableThrowException(BFA_MISSING_DELEGATE_METHOD);
+ }
+ }
+
+ if (i != bmtMethod->cMethAndGaps)
+ {
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT, IDS_CLASSLOAD_BAD_METHOD_COUNT, mdTokenNil);
+ }
+
+ bmtMethod->hEnumMethod.EnumReset();
+
+#ifdef FEATURE_COMINTEROP
+ //
+ // If the interface is sparse, we need to finalize the mapping list by
+ // telling it how many real methods we found.
+ //
+
+ if (bmtProp->fSparse)
+ {
+ GetHalfBakedClass()->GetSparseCOMInteropVTableMap()->FinalizeMapping(NumDeclaredMethods());
+ }
+#endif // FEATURE_COMINTEROP
+}
+
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+//*******************************************************************************
+//
+// Used by BuildMethodTable
+//
+// Determines the maximum size of the vtable and allocates the temporary storage arrays
+// Also copies the parent's vtable into the working vtable.
+//
+VOID MethodTableBuilder::AllocateMethodWorkingMemory()
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(CheckPointer(bmtDomain));
+ PRECONDITION(CheckPointer(bmtMethod));
+ PRECONDITION(CheckPointer(bmtVT));
+ PRECONDITION(CheckPointer(bmtInterface));
+ PRECONDITION(CheckPointer(bmtParent));
+
+ }
+ CONTRACTL_END;
+
+ DWORD i;
+ Thread *pThread = GetThread();
+
+ // Allocate a MethodDesc* for each method (needed later when doing interfaces), and a FieldDesc* for each field
+ bmtMethod->ppMethodDescList = new (&pThread->m_MarshalAlloc) MethodDesc*[NumDeclaredMethods()];
+ ZeroMemory(bmtMethod->ppMethodDescList, NumDeclaredMethods() * sizeof(MethodDesc *));
+
+ // Create a temporary function table (we don't know how large the vtable will be until the very end,
+ // since duplicated interfaces are stored at the end of it). Calculate an upper bound.
+ //
+ // Upper bound is: The parent's class vtable size, plus every method declared in
+ // this class, plus the size of every interface we implement
+ //
+ // In the case of value classes, we add # InstanceMethods again, since we have boxed and unboxed versions
+ // of every vtable method.
+ //
+ if (IsValueClass())
+ {
+ bmtVT->dwMaxVtableSize += NumDeclaredMethods();
+ bmtMethod->ppUnboxMethodDescList = new (&pThread->m_MarshalAlloc) MethodDesc*[NumDeclaredMethods()];
+ ZeroMemory(bmtMethod->ppUnboxMethodDescList, NumDeclaredMethods() * sizeof(MethodDesc*));
+ }
+
+ // sanity check
+ _ASSERTE(bmtParent->pParentMethodTable == NULL ||
+ (bmtInterface->wInterfaceMapSize - bmtParent->pParentMethodTable->GetNumInterfaces()) >= 0);
+
+ // add parent vtable size
+ bmtVT->dwMaxVtableSize += bmtVT->wCurrentVtableSlot;
+
+ for (i = 0; i < bmtInterface->wInterfaceMapSize; i++)
+ {
+ // We double the interface size because we may end up duplicating the Interface for MethodImpls
+ bmtVT->dwMaxVtableSize += (bmtInterface->pInterfaceMap[i].m_pMethodTable->GetNumVirtuals() * 2);
+ }
+
+ // Allocate the temporary vtable
+ bmtVT->pVtable = new (&pThread->m_MarshalAlloc)PCODE [bmtVT->dwMaxVtableSize];
+ ZeroMemory(bmtVT->pVtable, bmtVT->dwMaxVtableSize * sizeof(PCODE));
+ bmtVT->pVtableMD = new (&pThread->m_MarshalAlloc) MethodDesc*[bmtVT->dwMaxVtableSize];
+ ZeroMemory(bmtVT->pVtableMD, bmtVT->dwMaxVtableSize * sizeof(MethodDesc*));
+
+ // Allocate the temporary non-vtable
+ bmtVT->pNonVtableMD = new (&pThread->m_MarshalAlloc) MethodDesc*[NumDeclaredMethods()];
+ ZeroMemory(bmtVT->pNonVtableMD, sizeof(MethodDesc*) * NumDeclaredMethods());
+
+ if (bmtParent->pParentMethodTable != NULL)
+ {
+ // Copy parent's vtable into our "temp" vtable
+ {
+ MethodTable::MethodIterator it(bmtParent->pParentMethodTable);
+ for (;it.IsValid() && it.IsVirtual(); it.Next()) {
+ DWORD slot = it.GetSlotNumber();
+ bmtVT->pVtable[slot] = it.GetTarget().GetTarget();
+ bmtVT->pVtableMD[slot] = NULL; // MethodDescs are resolved lazily
+ }
+ bmtVT->pParentMethodTable = bmtParent->pParentMethodTable;
+ }
+
+#if 0
+ // @<TODO>todo: Figure out the right way to override Equals for value
+ // types only.
+ //
+ // This is broken because
+ // (a) g_pObjectClass->FindMethod("Equals", &gsig_IM_Obj_RetBool); will return
+ // the EqualsValue method
+ // (b) When mscorlib has been preloaded (and thus the munge already done
+ // ahead of time), we cannot easily find both methods
+ // to compute EqualsAddr & EqualsSlot
+ //
+ // For now, the Equals method has a runtime check to see if it's
+ // comparing value types.
+ //</TODO>
+
+ // If it is a value type, over ride a few of the base class methods.
+ if (IsValueClass())
+ {
+ static WORD EqualsSlot;
+
+ // If we haven't been through here yet, get some stuff from the Object class definition.
+ if (EqualsSlot == NULL)
+ {
+ // Get the slot of the Equals method.
+ MethodDesc *pEqualsMD = g_pObjectClass->FindMethod("Equals", &gsig_IM_Obj_RetBool);
+ THROW_BAD_FORMAT_MAYBE(pEqualsMD != NULL, 0, this);
+ EqualsSlot = pEqualsMD->GetSlot();
+
+ // Get the address of the EqualsValue method.
+ MethodDesc *pEqualsValueMD = g_pObjectClass->FindMethod("EqualsValue", &gsig_IM_Obj_RetBool);
+ THROW_BAD_FORMAT_MAYBE(pEqualsValueMD != NULL, 0, this);
+
+ // Patch the EqualsValue method desc in a dangerous way to
+ // look like the Equals method desc.
+ pEqualsValueMD->SetSlot(EqualsSlot);
+ pEqualsValueMD->SetMemberDef(pEqualsMD->GetMemberDef());
+ }
+
+ // Override the valuetype "Equals" with "EqualsValue".
+ bmtVT->SetMethodDescForSlot(EqualsSlot, EqualsSlot);
+ }
+#endif // 0
+ }
+
+ if (NumDeclaredMethods() > 0)
+ {
+ bmtParent->ppParentMethodDescBuf = (MethodDesc **)
+ pThread->m_MarshalAlloc.Alloc(S_UINT32(2) * S_UINT32(NumDeclaredMethods()) *
+ S_UINT32(sizeof(MethodDesc*)));
+
+ bmtParent->ppParentMethodDescBufPtr = bmtParent->ppParentMethodDescBuf;
+ }
+}
+
+//*******************************************************************************
+//
+// Find a method in this class hierarchy - used ONLY by the loader during layout. Do not use at runtime.
+//
+// *ppMemberSignature must be NULL on entry - it and *pcMemberSignature may or may not be filled out
+//
+// ppMethodDesc will be filled out with NULL if no matching method in the hierarchy is found.
+//
+// Returns FALSE if there was an error of some kind.
+//
+// pMethodConstraintsMatch receives the result of comparing the method constraints.
+HRESULT MethodTableBuilder::LoaderFindMethodInClass(
+ LPCUTF8 pszMemberName,
+ Module* pModule,
+ mdMethodDef mdToken,
+ MethodDesc ** ppMethodDesc,
+ PCCOR_SIGNATURE * ppMemberSignature,
+ DWORD * pcMemberSignature,
+ DWORD dwHashName,
+ BOOL * pMethodConstraintsMatch)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(CheckPointer(bmtParent));
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(CheckPointer(ppMethodDesc));
+ PRECONDITION(CheckPointer(ppMemberSignature));
+ PRECONDITION(CheckPointer(pcMemberSignature));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ MethodHashEntry *pEntry;
+ DWORD dwNameHashValue;
+
+ _ASSERTE(pModule);
+ _ASSERTE(*ppMemberSignature == NULL);
+
+ // No method found yet
+ *ppMethodDesc = NULL;
+
+ // Have we created a hash of all the methods in the class chain?
+ if (bmtParent->pParentMethodHash == NULL)
+ {
+ // There may be such a method, so we will now create a hash table to reduce the pain for
+ // further lookups
+
+ // <TODO> Are we really sure that this is worth doing? </TODO>
+ bmtParent->pParentMethodHash = CreateMethodChainHash(bmtParent->pParentMethodTable);
+ }
+
+ // Look to see if the method exists in the parent hash
+ pEntry = bmtParent->pParentMethodHash->Lookup(pszMemberName, dwHashName);
+ if (pEntry == NULL)
+ {
+ return S_OK; // No method by this name exists in the hierarchy
+ }
+
+ // Get signature of the method we're searching for - we will need this to verify an exact name-signature match
+ IfFailRet(pModule->GetMDImport()->GetSigOfMethodDef(
+ mdToken,
+ pcMemberSignature,
+ ppMemberSignature));
+
+ // Hash value we are looking for in the chain
+ dwNameHashValue = pEntry->m_dwHashValue;
+
+ // We've found a method with the same name, but the signature may be different
+ // Traverse the chain of all methods with this name
+ while (1)
+ {
+ PCCOR_SIGNATURE pHashMethodSig = NULL;
+ DWORD cHashMethodSig = 0;
+ Substitution * pSubst = NULL;
+ MethodDesc * entryDesc = pEntry->m_pDesc;
+ MethodTable * entryMT = entryDesc->GetMethodTable();
+ MethodTable * entryCanonMT = entryMT->GetCanonicalMethodTable();
+
+ // If entry is in a parameterized type, its signature may need to be instantiated all the way down the chain
+ // To understand why consider the following example:
+ // class C<T> { void m(T) { ...body... } }
+ // class D<T> : C<T[]> { /* inherits m with signature void m(T[]) */ }
+ // class E<T> : D<List<T>> { void m(List<T>[]) { ... body... } }
+ // Now suppose that we've got the signature of E::m in our hand and are comparing it with the methoddesc for C.m
+ // They're not syntactically the same but are if you instantiate "all the way up"
+ // Possible optimization: don't bother constructing the substitution if the signature of pEntry is closed
+ if (entryCanonMT->GetNumGenericArgs() > 0)
+ {
+ MethodTable *here = GetHalfBakedMethodTable();
+ _ASSERTE(here->GetModule());
+ MethodTable *pParent = bmtParent->pParentMethodTable;
+
+ for (;;)
+ {
+ Substitution *newSubst = new Substitution;
+ *newSubst = here->GetSubstitutionForParent(pSubst);
+ pSubst = newSubst;
+
+ here = pParent->GetCanonicalMethodTable();
+ if (entryCanonMT == here)
+ break;
+ pParent = pParent->GetParentMethodTable();
+ _ASSERT(pParent != NULL);
+ }
+ }
+
+ // Get sig of entry in hash chain
+ entryDesc->GetSig(&pHashMethodSig, &cHashMethodSig);
+
+ // Note instantiation info
+ {
+ hr = MetaSig::CompareMethodSigsNT(*ppMemberSignature, *pcMemberSignature, pModule, NULL,
+ pHashMethodSig, cHashMethodSig, entryDesc->GetModule(), pSubst);
+
+ if (hr == S_OK)
+ { // Found a match
+ *ppMethodDesc = entryDesc;
+ // Check the constraints are consistent,
+ // and return the result to the caller.
+ // We do this here to avoid recalculating pSubst.
+ *pMethodConstraintsMatch =
+ MetaSig::CompareMethodConstraints(NULL, pModule, mdToken, pSubst,
+ entryDesc->GetModule(),
+ entryDesc->GetMemberDef());
+ }
+
+ if (pSubst != NULL)
+ {
+ pSubst->DeleteChain();
+ pSubst = NULL;
+ }
+
+ if (FAILED(hr) || hr == S_OK)
+ {
+ return hr;
+ }
+ }
+
+ do
+ { // Advance to next item in the hash chain which has the same name
+ pEntry = pEntry->m_pNext; // Next entry in the hash chain
+
+ if (pEntry == NULL)
+ {
+ return S_OK; // End of hash chain, no match found
+ }
+ } while ((pEntry->m_dwHashValue != dwNameHashValue) || (strcmp(pEntry->m_pKey, pszMemberName) != 0));
+ }
+
+ return S_OK;
+}
+
+//*******************************************************************************
+//
+// Find a method declaration that must reside in the scope passed in. This method cannot be called if
+// the reference travels to another scope.
+//
+// Protect against finding a declaration that lives within
+// us (the type being created)
+//
+
+HRESULT MethodTableBuilder::FindMethodDeclarationForMethodImpl(
+ IMDInternalImport * pMDInternalImport, // Scope in which tkClass and tkMethod are defined.
+ mdTypeDef tkClass, // Type that the method def resides in
+ mdToken tkMethod, // Token that is being located (MemberRef or MethodDef)
+ mdMethodDef * ptkMethodDef) // Method definition for Member
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ PCCOR_SIGNATURE pSig; // Signature of Member
+ DWORD cSig;
+ LPCUTF8 szMember = NULL;
+ // The token should be a member ref or def. If it is a ref then we need to travel
+ // back to us hopefully.
+ if(TypeFromToken(tkMethod) == mdtMemberRef)
+ {
+ // Get the parent
+ mdToken typeref;
+ if (FAILED(pMDInternalImport->GetParentOfMemberRef(tkMethod, &typeref)))
+ {
+ BAD_FORMAT_NOTHROW_ASSERT(!"Invalid MemberRef record");
+ IfFailRet(COR_E_TYPELOAD);
+ }
+
+ while (TypeFromToken(typeref) == mdtTypeSpec)
+ {
+ // Added so that method impls can refer to instantiated interfaces or classes
+ if (FAILED(pMDInternalImport->GetSigFromToken(typeref, &cSig, &pSig)))
+ {
+ BAD_FORMAT_NOTHROW_ASSERT(!"Invalid TypeSpec record");
+ IfFailRet(COR_E_TYPELOAD);
+ }
+ CorElementType elemType = (CorElementType) *pSig++;
+
+ // If this is a generic inst, we expect that the next elem is ELEMENT_TYPE_CLASS,
+ // which is handled in the case below.
+ if (elemType == ELEMENT_TYPE_GENERICINST)
+ {
+ elemType = (CorElementType) *pSig++;
+ BAD_FORMAT_NOTHROW_ASSERT(elemType == ELEMENT_TYPE_CLASS);
+ }
+
+ // This covers E_T_GENERICINST and E_T_CLASS typespec formats. We don't expect
+ // any other kinds to come through here.
+ if (elemType == ELEMENT_TYPE_CLASS)
+ {
+ CorSigUncompressToken(pSig, &typeref);
+ }
+ else
+ {
+ // This is an unrecognized signature format.
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT,
+ IDS_CLASSLOAD_MI_BAD_SIG,
+ mdMethodDefNil);
+ }
+ }
+
+ // If parent is a method def then this is a varags method
+ if (TypeFromToken(typeref) == mdtMethodDef)
+ {
+ mdTypeDef typeDef;
+ hr = pMDInternalImport->GetParentToken(typeref, &typeDef);
+
+ // Make sure it is a typedef
+ if (TypeFromToken(typeDef) != mdtTypeDef)
+ {
+ BAD_FORMAT_NOTHROW_ASSERT(!"MethodDef without TypeDef as Parent");
+ IfFailRet(COR_E_TYPELOAD);
+ }
+ BAD_FORMAT_NOTHROW_ASSERT(typeDef == tkClass);
+ // This is the real method we are overriding
+ // <TODO>@TODO: CTS this may be illegal and we could throw an error</TODO>
+ *ptkMethodDef = typeref;
+ }
+
+ else
+ {
+ // Verify that the ref points back to us
+ mdToken tkDef = mdTokenNil;
+
+ // We only get here when we know the token does not reference a type
+ // in a different scope.
+ if(TypeFromToken(typeref) == mdtTypeRef)
+ {
+ LPCUTF8 pszNameSpace;
+ LPCUTF8 pszClassName;
+
+ if (FAILED(pMDInternalImport->GetNameOfTypeRef(typeref, &pszNameSpace, &pszClassName)))
+ {
+ IfFailRet(COR_E_TYPELOAD);
+ }
+ mdToken tkRes;
+ if (FAILED(pMDInternalImport->GetResolutionScopeOfTypeRef(typeref, &tkRes)))
+ {
+ IfFailRet(COR_E_TYPELOAD);
+ }
+ hr = pMDInternalImport->FindTypeDef(pszNameSpace,
+ pszClassName,
+ (TypeFromToken(tkRes) == mdtTypeRef) ? tkRes : mdTokenNil,
+ &tkDef);
+ if(FAILED(hr))
+ {
+ IfFailRet(COR_E_TYPELOAD);
+ }
+ }
+
+ // We get a typedef when the parent of the token is a typespec to the type.
+ else if (TypeFromToken(typeref) == mdtTypeDef)
+ {
+ tkDef = typeref;
+ }
+
+ else
+ {
+ CONSISTENCY_CHECK_MSGF(FALSE, ("Invalid methodimpl signature in class %s.", GetDebugClassName()));
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT,
+ IDS_CLASSLOAD_MI_BAD_SIG,
+ mdMethodDefNil);
+ }
+
+ // If we required that the typedef be the same type as the current class,
+ // and it doesn't match, we need to return a failure result.
+ if (tkDef != tkClass)
+ {
+ IfFailRet(COR_E_TYPELOAD);
+ }
+
+ IfFailRet(pMDInternalImport->GetNameAndSigOfMemberRef(tkMethod, &pSig, &cSig, &szMember));
+
+ if (isCallConv(
+ MetaSig::GetCallingConvention(NULL, Signature(pSig, cSig)),
+ IMAGE_CEE_CS_CALLCONV_FIELD))
+ {
+ return VLDTR_E_MR_BADCALLINGCONV;
+ }
+
+ hr = pMDInternalImport->FindMethodDef(
+ tkDef, szMember, pSig, cSig, ptkMethodDef);
+ IfFailRet(hr);
+ }
+ }
+
+ else if (TypeFromToken(tkMethod) == mdtMethodDef)
+ {
+ mdTypeDef typeDef;
+
+ // Verify that we are the parent
+ hr = pMDInternalImport->GetParentToken(tkMethod, &typeDef);
+ IfFailRet(hr);
+
+ if(typeDef != tkClass)
+ {
+ IfFailRet(COR_E_TYPELOAD);
+ }
+
+ *ptkMethodDef = tkMethod;
+ }
+
+ else
+ {
+ IfFailRet(COR_E_TYPELOAD);
+ }
+
+ return hr;
+}
+
+//*******************************************************************************
+void MethodTableBuilder::bmtMethodImplInfo::AddMethod(MethodDesc* pImplDesc, MethodDesc* pDesc, mdToken mdDecl, Substitution *pDeclSubst)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE((pDesc == NULL || mdDecl == mdTokenNil) && (pDesc != NULL || mdDecl != mdTokenNil));
+ rgEntries[pIndex].pDeclDesc = pDesc;
+ rgEntries[pIndex].declToken = mdDecl;
+ rgEntries[pIndex].declSubst = *pDeclSubst;
+ rgEntries[pIndex].pBodyDesc = pImplDesc;
+ pIndex++;
+}
+
+//*******************************************************************************
+// Returns TRUE if tok acts as a body for any methodImpl entry. FALSE, otherwise.
+BOOL MethodTableBuilder::bmtMethodImplInfo::IsBody(mdToken tok)
+{
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(TypeFromToken(tok) == mdtMethodDef);
+ for (DWORD i = 0; i < pIndex; i++) {
+ if (GetBodyMethodDesc(i)->GetMemberDef() == tok) {
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+//*******************************************************************************
+// Returns TRUE for success, FALSE for failure
+void MethodNameHash::Init(DWORD dwMaxEntries, StackingAllocator *pAllocator)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(this));
+ }
+ CONTRACTL_END;
+
+ // Given dwMaxEntries, determine a good value for the number of hash buckets
+ m_dwNumBuckets = (dwMaxEntries / 10);
+
+ if (m_dwNumBuckets < 5)
+ m_dwNumBuckets = 5;
+
+ S_UINT32 scbMemory = (S_UINT32(m_dwNumBuckets) * S_UINT32(sizeof(MethodHashEntry*))) +
+ (S_UINT32(dwMaxEntries) * S_UINT32(sizeof(MethodHashEntry)));
+
+ if (scbMemory.IsOverflow())
+ {
+ ThrowHR(E_INVALIDARG);
+ }
+
+ if (pAllocator)
+ {
+ m_pMemoryStart = (BYTE*)pAllocator->Alloc(scbMemory);
+ }
+ else
+ { // We're given the number of hash table entries we're going to insert,
+ // so we can allocate the appropriate size
+ m_pMemoryStart = new BYTE[scbMemory.Value()];
+ }
+
+ INDEBUG(m_pDebugEndMemory = m_pMemoryStart + scbMemory.Value();)
+
+ // Current alloc ptr
+ m_pMemory = m_pMemoryStart;
+
+ // Allocate the buckets out of the alloc ptr
+ m_pBuckets = (MethodHashEntry**) m_pMemory;
+ m_pMemory += sizeof(MethodHashEntry*)*m_dwNumBuckets;
+
+ // Buckets all point to empty lists to begin with
+ memset(m_pBuckets, 0, scbMemory.Value());
+}
+
+//*******************************************************************************
+// Insert new entry at head of list
+void MethodNameHash::Insert(LPCUTF8 pszName, MethodDesc *pDesc)
+{
+ LIMITED_METHOD_CONTRACT;
+ DWORD dwHash = HashStringA(pszName);
+ DWORD dwBucket = dwHash % m_dwNumBuckets;
+ MethodHashEntry*pNewEntry;
+
+ pNewEntry = (MethodHashEntry *) m_pMemory;
+ m_pMemory += sizeof(MethodHashEntry);
+
+ _ASSERTE(m_pMemory <= m_pDebugEndMemory);
+
+ // Insert at head of bucket chain
+ pNewEntry->m_pNext = m_pBuckets[dwBucket];
+ pNewEntry->m_pDesc = pDesc;
+ pNewEntry->m_dwHashValue = dwHash;
+ pNewEntry->m_pKey = pszName;
+
+ m_pBuckets[dwBucket] = pNewEntry;
+}
+
+//*******************************************************************************
+// Return the first MethodHashEntry with this name, or NULL if there is no such entry
+MethodHashEntry *MethodNameHash::Lookup(LPCUTF8 pszName, DWORD dwHash)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+
+ if (!dwHash)
+ dwHash = HashStringA(pszName);
+ DWORD dwBucket = dwHash % m_dwNumBuckets;
+ MethodHashEntry*pSearch;
+
+ for (pSearch = m_pBuckets[dwBucket]; pSearch; pSearch = pSearch->m_pNext)
+ {
+ if (pSearch->m_dwHashValue == dwHash && !strcmp(pSearch->m_pKey, pszName))
+ return pSearch;
+ }
+
+ return NULL;
+}
+
+//*******************************************************************************
+//
+// Create a hash of all methods in this class. The hash is from method name to MethodDesc.
+//
+MethodNameHash *MethodTableBuilder::CreateMethodChainHash(MethodTable *pMT)
+{
+ STANDARD_VM_CONTRACT;
+
+ Thread *pThread = GetThread();
+ MethodNameHash *pHash = new (&pThread->m_MarshalAlloc) MethodNameHash();
+
+ pHash->Init(pMT->GetNumVirtuals(), &(pThread->m_MarshalAlloc));
+
+ MethodTable::MethodIterator it(pMT);
+ for (;it.IsValid(); it.Next())
+ {
+ if (it.IsVirtual())
+ {
+ MethodDesc *pImplDesc = it.GetMethodDesc();
+ CONSISTENCY_CHECK(CheckPointer(pImplDesc));
+ MethodDesc *pDeclDesc = it.GetDeclMethodDesc();
+ CONSISTENCY_CHECK(CheckPointer(pDeclDesc));
+
+ CONSISTENCY_CHECK(pMT->IsInterface() || !pDeclDesc->IsInterface());
+ pHash->Insert(pDeclDesc->GetNameOnNonArrayClass(), pDeclDesc);
+ }
+ }
+
+ // Success
+ return pHash;
+}
+
+//*******************************************************************************
+void MethodTableBuilder::SetBMTData(
+ BaseDomain *bmtDomain,
+ bmtErrorInfo *bmtError,
+ bmtProperties *bmtProp,
+ bmtVtable *bmtVT,
+ bmtParentInfo *bmtParent,
+ bmtInterfaceInfo *bmtInterface,
+ bmtMethodInfo *bmtMethod,
+ bmtTypeInfo *bmtType,
+ bmtMethodImplInfo *bmtMethodImpl)
+{
+ LIMITED_METHOD_CONTRACT;
+ this->bmtDomain = bmtDomain;
+ this->bmtError = bmtError;
+ this->bmtProp = bmtProp;
+ this->bmtVT = bmtVT;
+ this->bmtParent = bmtParent;
+ this->bmtInterface = bmtInterface;
+ this->bmtMethod = bmtMethod;
+ this->bmtType = bmtType;
+ this->bmtMethodImpl = bmtMethodImpl;
+}
+
+//*******************************************************************************
+void MethodTableBuilder::NullBMTData()
+{
+ LIMITED_METHOD_CONTRACT;
+ this->bmtDomain = NULL;
+ this->bmtError = NULL;
+ this->bmtProp = NULL;
+ this->bmtVT = NULL;
+ this->bmtParent = NULL;
+ this->bmtInterface = NULL;
+ this->bmtMethod = NULL;
+ this->bmtType = NULL;
+ this->bmtMethodImpl = NULL;
+}
+
+//*******************************************************************************
+/*static*/
+VOID DECLSPEC_NORETURN MethodTableBuilder::BuildMethodTableThrowException(
+ HRESULT hr,
+ const bmtErrorInfo & bmtError)
+{
+ STANDARD_VM_CONTRACT;
+
+ LPCUTF8 pszClassName, pszNameSpace;
+ if (FAILED(bmtError.pModule->GetMDImport()->GetNameOfTypeDef(bmtError.cl, &pszClassName, &pszNameSpace)))
+ {
+ pszClassName = pszNameSpace = "Invalid TypeDef record";
+ }
+
+ if (IsNilToken(bmtError.dMethodDefInError) && bmtError.szMethodNameForError == NULL) {
+ if (hr == E_OUTOFMEMORY)
+ COMPlusThrowOM();
+ else
+ bmtError.pModule->GetAssembly()->ThrowTypeLoadException(pszNameSpace, pszClassName,
+ bmtError.resIDWhy);
+ }
+ else {
+ LPCUTF8 szMethodName;
+ if (bmtError.szMethodNameForError == NULL)
+ {
+ if (FAILED((bmtError.pModule->GetMDImport())->GetNameOfMethodDef(bmtError.dMethodDefInError, &szMethodName)))
+ {
+ szMethodName = "Invalid MethodDef record";
+ }
+ }
+ else
+ szMethodName = bmtError.szMethodNameForError;
+
+ bmtError.pModule->GetAssembly()->ThrowTypeLoadException(pszNameSpace, pszClassName,
+ szMethodName, bmtError.resIDWhy);
+ }
+
+}
+
+//*******************************************************************************
+/* static */
+int __cdecl MethodTableBuilder::bmtMethodImplInfo::MethodImplTokenPair::Compare(
+ const void *elem1,
+ const void *elem2)
+{
+ STATIC_CONTRACT_LEAF;
+ MethodImplTokenPair *e1 = (MethodImplTokenPair *)elem1;
+ MethodImplTokenPair *e2 = (MethodImplTokenPair *)elem2;
+ if (e1->methodBody < e2->methodBody) return -1;
+ else if (e1->methodBody > e2->methodBody) return 1;
+ else if (e1->methodDecl < e2->methodDecl) return -1;
+ else if (e1->methodDecl > e2->methodDecl) return 1;
+ else return 0;
+}
+
+//*******************************************************************************
+/* static */
+BOOL MethodTableBuilder::bmtMethodImplInfo::MethodImplTokenPair::Equal(
+ const MethodImplTokenPair *elem1,
+ const MethodImplTokenPair *elem2)
+{
+ STATIC_CONTRACT_LEAF;
+ return ((elem1->methodBody == elem2->methodBody) &&
+ (elem1->methodDecl == elem2->methodDecl));
+}
+
+
+}; // namespace ClassCompat
+
+#endif // !DACCESS_COMPILE
diff --git a/src/vm/classcompat.h b/src/vm/classcompat.h
new file mode 100644
index 0000000000..79815c286b
--- /dev/null
+++ b/src/vm/classcompat.h
@@ -0,0 +1,827 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: CLASS.H
+
+#ifndef CLASSCOMPAT_H
+#define CLASSCOMPAT_H
+
+#ifdef FEATURE_COMINTEROP
+
+/*
+ * Include Files
+ */
+#include "eecontract.h"
+#include "argslot.h"
+#include "vars.hpp"
+#include "cor.h"
+#include "clrex.h"
+#include "hash.h"
+#include "crst.h"
+#include "objecthandle.h"
+#include "cgensys.h"
+#include "declsec.h"
+#include "stdinterfaces.h"
+#include "slist.h"
+#include "spinlock.h"
+#include "typehandle.h"
+#include "perfcounters.h"
+#include "methodtable.h"
+#include "eeconfig.h"
+#include "typectxt.h"
+#include "stackingallocator.h"
+#include "class.h"
+
+/*
+ * Forward declarations
+ */
+class AppDomain;
+class ArrayClass;
+class ArrayMethodDesc;
+class Assembly;
+class ClassLoader;
+class DomainLocalBlock;
+class FCallMethodDesc;
+class EEClass;
+class LayoutEEClass;
+class EnCFieldDesc;
+class FieldDesc;
+class FieldMarshaler;
+struct LayoutRawFieldInfo;
+class MetaSig;
+class MethodDesc;
+class MethodDescChunk;
+class MethodNameHash;
+class MethodTable;
+class Module;
+struct ModuleCtorInfo;
+class Object;
+class Stub;
+class Substitution;
+class SystemDomain;
+class TypeHandle;
+class AllocMemTracker;
+class ZapCodeMap;
+class InteropMethodTableSlotDataMap;
+class LoadingEntry_LockHolder;
+class DispatchMapBuilder;
+
+namespace ClassCompat
+{
+
+//*******************************************************************************
+// workaround: These classification bits need cleanup bad: for now, this gets around
+// IJW setting both mdUnmanagedExport & mdPinvokeImpl on expored methods.
+#define IsReallyMdPinvokeImpl(x) ( ((x) & mdPinvokeImpl) && !((x) & mdUnmanagedExport) )
+
+//*******************************************************************************
+//
+// The MethodNameHash is a temporary loader structure which may be allocated if there are a large number of
+// methods in a class, to quickly get from a method name to a MethodDesc (potentially a chain of MethodDescs).
+//
+
+//*******************************************************************************
+// Entry in the method hash table
+class MethodHashEntry
+{
+public:
+ MethodHashEntry * m_pNext; // Next item with same hash value
+ DWORD m_dwHashValue; // Hash value
+ MethodDesc * m_pDesc;
+ LPCUTF8 m_pKey; // Method name
+};
+
+//*******************************************************************************
+class MethodNameHash
+{
+public:
+
+ MethodHashEntry **m_pBuckets; // Pointer to first entry for each bucket
+ DWORD m_dwNumBuckets;
+ BYTE * m_pMemory; // Current pointer into preallocated memory for entries
+ BYTE * m_pMemoryStart; // Start pointer of pre-allocated memory fo entries
+ MethodNameHash *m_pNext; // Chain them for stub dispatch lookup
+ INDEBUG( BYTE * m_pDebugEndMemory; )
+
+ MethodNameHash()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pMemoryStart = NULL;
+ m_pNext = NULL;
+ }
+
+ ~MethodNameHash()
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (m_pMemoryStart != NULL)
+ delete(m_pMemoryStart);
+ }
+
+ // Throws on error
+ void Init(DWORD dwMaxEntries, StackingAllocator *pAllocator = NULL);
+
+ // Insert new entry at head of list
+ void Insert(
+ LPCUTF8 pszName,
+ MethodDesc *pDesc);
+
+ // Return the first MethodHashEntry with this name, or NULL if there is no such entry
+ MethodHashEntry *Lookup(
+ LPCUTF8 pszName,
+ DWORD dwHash);
+
+ void SetNext(MethodNameHash *pNext) { m_pNext = pNext; }
+ MethodNameHash *GetNext() { return m_pNext; }
+};
+
+
+//*******************************************************************************
+//
+// This structure is used only when the classloader is building the interface map. Before the class
+// is resolved, the EEClass contains an array of these, which are all interfaces *directly* declared
+// for this class/interface by the metadata - inherited interfaces will not be present if they are
+// not specifically declared.
+//
+// This structure is destroyed after resolving has completed.
+//
+typedef struct
+{
+ // The interface method table; for instantiated interfaces, this is the generic interface
+ MethodTable *m_pMethodTable;
+} BuildingInterfaceInfo_t;
+
+//*******************************************************************************
+struct InterfaceInfo_t
+{
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+ enum {
+ interface_declared_on_class = 0x1,
+ interface_implemented_on_parent = 0x2,
+ };
+
+ MethodTable* m_pMethodTable; // Method table of the interface
+ WORD m_wFlags;
+
+private:
+ WORD m_wStartSlot; // starting slot of interface in vtable
+
+public:
+ WORD GetInteropStartSlot()
+ {
+ return m_wStartSlot;
+ }
+ void SetInteropStartSlot(WORD wStartSlot)
+ {
+ m_wStartSlot = wStartSlot;
+ }
+
+ BOOL IsDeclaredOnClass()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_wFlags & interface_declared_on_class);
+ }
+
+ BOOL IsImplementedByParent()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_wFlags & interface_implemented_on_parent);
+ }
+};
+
+//*******************************************************************************
+// MethodTableBuilder simply acts as a holder for the
+// large algorithm that "compiles" a type into
+// a MethodTable/EEClass/DispatchMap/VTable etc. etc.
+//
+// The user of this class (the ClassLoader) currently builds the EEClass
+// first, and does a couple of other things too, though all
+// that work should probably be folded into BuildMethodTableThrowing.
+//
+class MethodTableBuilder
+{
+public:
+ MethodTableBuilder(MethodTable * pMT)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pHalfBakedMT = pMT;
+ m_pHalfBakedClass = pMT->GetClass();
+ NullBMTData();
+ }
+public:
+
+ // This method is purely for backward compatability of COM Interop, and its
+ // implementation can be found in ClassCompat.cpp
+ InteropMethodTableData *BuildInteropVTable(AllocMemTracker *pamTracker);
+ InteropMethodTableData *BuildInteropVTableForArray(AllocMemTracker *pamTracker);
+
+ LPCWSTR GetPathForErrorMessages();
+
+private:
+ enum e_METHOD_IMPL
+ {
+ METHOD_IMPL_NOT,
+#ifndef STUB_DISPATCH_ALL
+ METHOD_IMPL,
+#endif
+ METHOD_IMPL_COUNT
+ };
+
+ enum e_METHOD_TYPE
+ {
+ METHOD_TYPE_NORMAL,
+ METHOD_TYPE_FCALL,
+ METHOD_TYPE_EEIMPL,
+ METHOD_TYPE_NDIRECT,
+ METHOD_TYPE_INTEROP,
+ METHOD_TYPE_INSTANTIATED,
+ METHOD_TYPE_COUNT
+ };
+
+private:
+ // <NICE> Get rid of this.</NICE>
+ EEClass *m_pHalfBakedClass;
+ MethodTable * m_pHalfBakedMT;
+
+ // GetHalfBakedClass: The EEClass you get back from this function may not have all its fields filled in yet.
+ // Thus you have to make sure that the relevant item which you are accessing has
+ // been correctly initialized in the EEClass/MethodTable construction sequence
+ // at the point at which you access it.
+ //
+ // Gradually we will move the code to a model where the process of constructing an EEClass/MethodTable
+ // is more obviously correct, e.g. by relying much less on reading information using GetHalfBakedClass
+ // and GetHalfBakedMethodTable.
+ //
+ // <NICE> Get rid of this.</NICE>
+ EEClass *GetHalfBakedClass() { LIMITED_METHOD_CONTRACT; return m_pHalfBakedClass; }
+ MethodTable *GetHalfBakedMethodTable() { WRAPPER_NO_CONTRACT; return m_pHalfBakedMT; }
+
+ mdTypeDef GetCl() { LIMITED_METHOD_CONTRACT; return bmtType->cl; }
+ BOOL IsGlobalClass() { WRAPPER_NO_CONTRACT; return GetCl() == COR_GLOBAL_PARENT_TOKEN; }
+ BOOL IsEnum() { LIMITED_METHOD_CONTRACT; return bmtProp->fIsEnum; }
+ DWORD GetAttrClass() { LIMITED_METHOD_CONTRACT; return bmtType->dwAttr; }
+ BOOL IsInterface() { WRAPPER_NO_CONTRACT; return IsTdInterface(GetAttrClass()); }
+ BOOL IsValueClass() { LIMITED_METHOD_CONTRACT; return bmtProp->fIsValueClass; }
+ BOOL IsAbstract() { LIMITED_METHOD_CONTRACT; return IsTdAbstract(bmtType->dwAttr); }
+ BOOL HasLayout() { LIMITED_METHOD_CONTRACT; return bmtProp->fHasLayout; }
+ BOOL IsDelegate() { LIMITED_METHOD_CONTRACT; return bmtProp->fIsDelegate; }
+ BOOL IsContextful() { LIMITED_METHOD_CONTRACT; return bmtProp->fIsContextful; }
+ Module *GetModule() { LIMITED_METHOD_CONTRACT; return bmtType->pModule; }
+ Assembly *GetAssembly() { WRAPPER_NO_CONTRACT; return GetModule()->GetAssembly(); }
+ BaseDomain *GetDomain() { LIMITED_METHOD_CONTRACT; return bmtDomain; }
+ ClassLoader *GetClassLoader() { WRAPPER_NO_CONTRACT; return GetModule()->GetClassLoader(); }
+ IMDInternalImport* GetMDImport() { WRAPPER_NO_CONTRACT; return GetModule()->GetMDImport(); }
+#ifdef _DEBUG
+ LPCUTF8 GetDebugClassName() { LIMITED_METHOD_CONTRACT; return bmtProp->szDebugClassName; }
+#endif // _DEBUG
+ BOOL IsComImport() { WRAPPER_NO_CONTRACT; return IsTdImport(GetAttrClass()); }
+ BOOL IsComClassInterface() { LIMITED_METHOD_CONTRACT; return bmtProp->fIsComClassInterface; }
+
+ // <NOTE> The following functions are used during MethodTable construction to setup information
+ // about the type being constructedm in particular information stored in the EEClass.
+ // USE WITH CAUTION!! TRY NOT TO ADD MORE OF THESE!! </NOTE>
+ //
+ // <NICE> Get rid of all of these - we should be able to evaluate these conditions BEFORE
+ // we create the EEClass object, and thus set the flags immediately at the point
+ // we create that object.</NICE>
+ void SetIsValueClass() { LIMITED_METHOD_CONTRACT; bmtProp->fIsValueClass = TRUE; }
+ void SetEnum() { LIMITED_METHOD_CONTRACT; bmtProp->fIsEnum = TRUE; }
+ void SetHasLayout() { LIMITED_METHOD_CONTRACT; bmtProp->fHasLayout = TRUE; }
+ void SetIsDelegate() { LIMITED_METHOD_CONTRACT; bmtProp->fIsDelegate = TRUE; }
+ void SetContextful() { LIMITED_METHOD_CONTRACT; bmtProp->fIsContextful = TRUE; }
+#ifdef _DEBUG
+ void SetDebugClassName(LPUTF8 x) { LIMITED_METHOD_CONTRACT; bmtProp->szDebugClassName = x; }
+#endif
+ void SetIsComClassInterface() { LIMITED_METHOD_CONTRACT; bmtProp->fIsComClassInterface = TRUE; }
+
+ /************************************
+ * PRIVATE INTERNAL STRUCTS
+ ************************************/
+private:
+ struct bmtErrorInfo
+ {
+ UINT resIDWhy;
+ LPCUTF8 szMethodNameForError;
+ mdToken dMethodDefInError;
+ Module* pModule;
+ mdTypeDef cl;
+ OBJECTREF *pThrowable;
+
+ // Set the reason and the offending method def. If the method information
+ // is not from this class set the method name and it will override the method def.
+ inline bmtErrorInfo() : resIDWhy(0), szMethodNameForError(NULL), dMethodDefInError(mdMethodDefNil), pThrowable(NULL) {LIMITED_METHOD_CONTRACT; }
+ };
+
+ struct bmtProperties
+ {
+ BOOL fSparse; // Set to true if a sparse interface is being used.
+
+ // Com Interop, ComWrapper classes extend from ComObject
+ BOOL fIsComObjectType; // whether this class is an instance of ComObject class
+
+ BOOL fIsMngStandardItf; // Set to true if the interface is a manages standard interface.
+ BOOL fComEventItfType; // Set to true if the class is a special COM event interface.
+
+ BOOL fIsValueClass;
+ BOOL fIsEnum;
+ BOOL fIsContextful;
+ BOOL fIsComClassInterface;
+ BOOL fHasLayout;
+ BOOL fIsDelegate;
+
+ LPUTF8 szDebugClassName;
+
+ inline bmtProperties()
+ {
+ LIMITED_METHOD_CONTRACT;
+ memset((void *)this, NULL, sizeof(*this));
+ }
+ };
+
+ struct bmtVtable
+ {
+ WORD wCurrentVtableSlot;
+ WORD wCurrentNonVtableSlot;
+
+ // Temporary vtable - use GetMethodDescForSlot/SetMethodDescForSlot for access.
+ // pVtableMD is initialized lazily from pVtable
+ // pVtable is invalidated if the slot is overwritten.
+ PCODE* pVtable;
+ MethodDesc** pVtableMD;
+ MethodTable *pParentMethodTable;
+
+ MethodDesc** pNonVtableMD;
+ InteropMethodTableSlotData **ppSDVtable;
+ InteropMethodTableSlotData **ppSDNonVtable;
+ DWORD dwMaxVtableSize; // Upper bound on size of vtable
+ InteropMethodTableSlotDataMap *pInteropData;
+
+ DispatchMapBuilder *pDispatchMapBuilder;
+
+ MethodDesc* GetMethodDescForSlot(WORD slot)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ if (pVtable[slot] != NULL && pVtableMD[slot] == NULL)
+ pVtableMD[slot] = pParentMethodTable->GetMethodDescForSlot(slot);
+ _ASSERTE((pVtable[slot] == NULL) ||
+ (MethodTable::GetMethodDescForSlotAddress(pVtable[slot]) == pVtableMD[slot]));
+ return pVtableMD[slot];
+ }
+
+ void SetMethodDescForSlot(WORD slot, MethodDesc* pMD)
+ {
+ WRAPPER_NO_CONTRACT;
+ pVtable[slot] = NULL;
+ pVtableMD[slot] = pMD;
+ }
+
+ inline bmtVtable() { LIMITED_METHOD_CONTRACT; memset((void *)this, NULL, sizeof(*this)); }
+ };
+
+ struct bmtParentInfo
+ {
+ WORD wNumParentInterfaces;
+ MethodDesc **ppParentMethodDescBuf; // Cache for declared methods
+ MethodDesc **ppParentMethodDescBufPtr; // Pointer for iterating over the cache
+
+ MethodNameHash *pParentMethodHash;
+ Substitution parentSubst;
+ MethodTable *pParentMethodTable;
+ mdToken token;
+
+ inline bmtParentInfo() { LIMITED_METHOD_CONTRACT; memset((void *)this, NULL, sizeof(*this)); }
+ };
+
+ struct bmtInterfaceInfo
+ {
+ DWORD dwTotalNewInterfaceMethods;
+ InterfaceInfo_t *pInterfaceMap; // Temporary interface map
+
+ // ppInterfaceSubstitutionChains[i][0] holds the primary substitution for each interface
+ // ppInterfaceSubstitutionChains[i][0..depth[i] ] is the chain of substitutions for each interface
+ Substitution **ppInterfaceSubstitutionChains;
+
+ DWORD *pdwOriginalStart; // If an interface is moved this is the original starting location.
+ WORD wInterfaceMapSize; // # members in interface map
+ DWORD dwLargestInterfaceSize; // # members in largest interface we implement
+ DWORD dwMaxExpandedInterfaces; // Upper bound on size of interface map
+ MethodDesc **ppInterfaceMethodDescList; // List of MethodDescs for current interface
+ MethodDesc **ppInterfaceDeclMethodDescList; // List of MethodDescs for the interface itself
+
+ MethodDesc ***pppInterfaceImplementingMD; // List of MethodDescs that implement interface methods
+ MethodDesc ***pppInterfaceDeclaringMD; // List of MethodDescs from the interface itself
+
+ inline bmtInterfaceInfo() { LIMITED_METHOD_CONTRACT; memset((void *)this, NULL, sizeof(*this)); }
+ };
+
+ struct bmtMethodInfo
+ {
+ DWORD cMethAndGaps; // # meta-data methods of this class ( including the gaps )
+
+ WORD cMethods; // # meta-data methods of this class
+ mdToken * rgMethodTokens; // Enumeration of metadata methods
+ DWORD * rgMethodAttrs; // Enumeration of the attributes of the methods
+ DWORD * rgMethodImplFlags; // Enumeration of the method implementation flags
+ ULONG * rgMethodRVA; // Enumeration of the method RVA's
+ DWORD * rgMethodClassifications; // Enumeration of the method classifications
+ LPCSTR * rgszMethodName; // Enumeration of the method names
+ BYTE * rgMethodImpl; // Enumeration of impl value
+ BYTE * rgMethodType; // Enumeration of type value
+
+ HENUMInternalHolder hEnumMethod;
+
+ MethodDesc ** ppUnboxMethodDescList; // Keep track unboxed entry points (for value classes)
+ MethodDesc ** ppMethodDescList; // MethodDesc pointer for each member
+
+ inline bmtMethodInfo(IMDInternalImport *pMDImport)
+ : cMethAndGaps(0),
+ cMethods(0),
+ rgMethodTokens(NULL),
+ rgMethodAttrs(NULL),
+ rgMethodImplFlags(NULL),
+ rgMethodRVA(NULL),
+ rgMethodClassifications(NULL),
+ rgszMethodName(NULL),
+ rgMethodImpl(NULL),
+ hEnumMethod(pMDImport),
+ ppUnboxMethodDescList(NULL),
+ ppMethodDescList(NULL)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ inline void SetMethodData(int idx,
+ mdToken tok,
+ DWORD dwAttrs,
+ DWORD dwRVA,
+ DWORD dwImplFlags,
+ DWORD classification,
+ LPCSTR szMethodName,
+ BYTE impl,
+ BYTE type)
+ {
+ LIMITED_METHOD_CONTRACT;
+ rgMethodTokens[idx] = tok;
+ rgMethodAttrs[idx] = dwAttrs;
+ rgMethodRVA[idx] = dwRVA;
+ rgMethodImplFlags[idx] = dwImplFlags;
+ rgMethodClassifications[idx] = classification;
+ rgszMethodName[idx] = szMethodName;
+ rgMethodImpl[idx] = impl;
+ rgMethodType[idx] = type;
+ }
+ };
+
+ struct bmtTypeInfo
+ {
+ IMDInternalImport * pMDImport;
+ Module * pModule;
+ mdToken cl;
+ DWORD dwAttr;
+
+ inline bmtTypeInfo() { LIMITED_METHOD_CONTRACT; memset((void *)this, NULL, sizeof(*this)); }
+ };
+
+ struct bmtMethodImplInfo
+ {
+ DWORD dwNumberMethodImpls; // Number of method impls defined for this type
+ HENUMInternalMethodImplHolder hEnumMethodImpl;
+
+ struct MethodImplTokenPair
+ {
+ mdToken methodBody; // MethodDef's for the bodies of MethodImpls. Must be defined in this type.
+ mdToken methodDecl; // Method token that body implements. Is a MethodDef or MemberRef
+ static int __cdecl Compare(const void *elem1, const void *elem2);
+ static BOOL Equal(const MethodImplTokenPair *elem1, const MethodImplTokenPair *elem2);
+ };
+
+ MethodImplTokenPair * rgMethodImplTokens;
+ Substitution * pMethodDeclSubsts; // Used to interpret generic variables in the interface of the declaring type
+
+ DWORD pIndex; // Next open spot in array, we load the BodyDesc's up in order of appearance in the
+ // type's list of methods (a body can appear more then once in the list of MethodImpls)
+ struct Entry
+ {
+ mdToken declToken; // Either the token or the method desc is set for the declaration
+ Substitution declSubst; // Signature instantiations of parent types for Declaration (NULL if not instantiated)
+ MethodDesc* pDeclDesc; // Method descs for Declaration. If null then Declaration is in this type and use the token
+ MethodDesc* pBodyDesc; // Method descs created for Method impl bodies
+ DWORD dwFlags;
+ };
+
+ Entry *rgEntries;
+
+ void AddMethod(MethodDesc* pImplDesc, MethodDesc* pDeclDesc, mdToken mdDecl, Substitution *pDeclSubst);
+
+ MethodDesc* GetDeclarationMethodDesc(DWORD i)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(i < pIndex);
+ return rgEntries[i].pDeclDesc;
+ }
+
+ mdToken GetDeclarationToken(DWORD i)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(i < pIndex);
+ return rgEntries[i].declToken;
+ }
+
+ const Substitution *GetDeclarationSubst(DWORD i)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(i < pIndex);
+ return &rgEntries[i].declSubst;
+ }
+
+ MethodDesc* GetBodyMethodDesc(DWORD i)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(i < pIndex);
+ return rgEntries[i].pBodyDesc;
+ }
+
+ // Returns TRUE if tok acts as a body for any methodImpl entry. FALSE, otherwise.
+ BOOL IsBody(mdToken tok);
+
+ inline bmtMethodImplInfo(IMDInternalImport * pMDImport)
+ : dwNumberMethodImpls(0),
+ hEnumMethodImpl(pMDImport),
+ pIndex(0),
+ rgEntries(NULL)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+ };
+
+ // The following structs, defined as private members of MethodTableBuilder, contain the necessary local
+ // parameters needed for BuildMethodTable
+
+ // Look at the struct definitions for a detailed list of all parameters available
+ // to BuildMethodTable.
+
+ BaseDomain *bmtDomain;
+ bmtErrorInfo *bmtError;
+ bmtProperties *bmtProp;
+ bmtVtable *bmtVT;
+ bmtParentInfo *bmtParent;
+ bmtInterfaceInfo *bmtInterface;
+ bmtMethodInfo *bmtMethod;
+ bmtTypeInfo *bmtType;
+ bmtMethodImplInfo *bmtMethodImpl;
+
+ void SetBMTData(
+ BaseDomain *bmtDomain,
+ bmtErrorInfo *bmtError,
+ bmtProperties *bmtProp,
+ bmtVtable *bmtVT,
+ bmtParentInfo *bmtParent,
+ bmtInterfaceInfo *bmtInterface,
+ bmtMethodInfo *bmtMethod,
+ bmtTypeInfo *bmtType,
+ bmtMethodImplInfo *bmtMethodImpl);
+
+ void NullBMTData();
+
+ class DeclaredMethodIterator
+ {
+ private:
+ MethodTableBuilder &m_mtb;
+ int m_idx;
+
+ public:
+ inline DeclaredMethodIterator(MethodTableBuilder &mtb);
+ inline int CurrentIndex();
+ inline BOOL Next();
+ inline mdToken Token();
+ inline DWORD Attrs();
+ inline DWORD RVA();
+ inline DWORD ImplFlags();
+ inline DWORD Classification();
+ inline LPCSTR Name();
+ inline PCCOR_SIGNATURE GetSig(DWORD *pcbSig);
+ inline BYTE MethodImpl();
+ inline BOOL IsMethodImpl();
+ inline BYTE MethodType();
+ inline MethodDesc *GetMethodDesc();
+ inline void SetMethodDesc(MethodDesc *pMD);
+ inline MethodDesc *GetParentMethodDesc();
+ inline void SetParentMethodDesc(MethodDesc *pMD);
+ inline MethodDesc *GetUnboxedMethodDesc();
+ };
+ friend class DeclaredMethodIterator;
+
+ inline WORD NumDeclaredMethods() { LIMITED_METHOD_CONTRACT; return bmtMethod->cMethods; }
+ inline void IncNumDeclaredMethods() { LIMITED_METHOD_CONTRACT; bmtMethod->cMethods++; }
+
+private:
+ static VOID DECLSPEC_NORETURN BuildMethodTableThrowException(HRESULT hr,
+ const bmtErrorInfo & bmtError);
+
+
+ inline VOID DECLSPEC_NORETURN BuildMethodTableThrowException(
+ HRESULT hr,
+ UINT idResWhy,
+ mdMethodDef tokMethodDef)
+ {
+ WRAPPER_NO_CONTRACT;
+ bmtError->resIDWhy = idResWhy;
+ bmtError->dMethodDefInError = tokMethodDef;
+ bmtError->szMethodNameForError = NULL;
+ bmtError->cl = GetCl();
+ BuildMethodTableThrowException(hr, *bmtError);
+ }
+
+ inline VOID DECLSPEC_NORETURN BuildMethodTableThrowException(
+ HRESULT hr,
+ UINT idResWhy,
+ LPCUTF8 szMethodName)
+ {
+ WRAPPER_NO_CONTRACT;
+ bmtError->resIDWhy = idResWhy;
+ bmtError->dMethodDefInError = mdMethodDefNil;
+ bmtError->szMethodNameForError = szMethodName;
+ bmtError->cl = GetCl();
+ BuildMethodTableThrowException(hr, *bmtError);
+ }
+
+ inline VOID DECLSPEC_NORETURN BuildMethodTableThrowException(
+ UINT idResWhy,
+ mdMethodDef tokMethodDef = mdMethodDefNil)
+ {
+ WRAPPER_NO_CONTRACT;
+ BuildMethodTableThrowException(COR_E_TYPELOAD, idResWhy, tokMethodDef);
+ }
+
+ inline VOID DECLSPEC_NORETURN BuildMethodTableThrowException(
+ UINT idResWhy,
+ LPCUTF8 szMethodName)
+ {
+ WRAPPER_NO_CONTRACT;
+ BuildMethodTableThrowException(COR_E_TYPELOAD, idResWhy, szMethodName);
+ }
+
+private:
+ MethodNameHash *CreateMethodChainHash(
+ MethodTable *pMT);
+
+ HRESULT LoaderFindMethodInClass(
+ LPCUTF8 pszMemberName,
+ Module* pModule,
+ mdMethodDef mdToken,
+ MethodDesc ** ppMethodDesc,
+ PCCOR_SIGNATURE * ppMemberSignature,
+ DWORD * pcMemberSignature,
+ DWORD dwHashName,
+ BOOL * pMethodConstraintsMatch);
+
+ // Finds a method declaration from a MemberRef or Def. It handles the case where
+ // the Ref or Def point back to this class even though it has not been fully
+ // laid out.
+ HRESULT FindMethodDeclarationForMethodImpl(
+ IMDInternalImport *pMDInternalImport, // Scope in which tkClass and tkMethod are defined.
+ mdTypeDef tkClass, // Type that the method def resides in
+ mdToken tkMethod, // Token that is being located (MemberRef or MethodDef)
+ mdToken* ptkMethodDef); // Method definition for Member
+
+ // Enumerates the method impl token pairs and resolves the impl tokens to mdtMethodDef
+ // tokens, since we currently have the limitation that all impls are in the current class.
+ VOID EnumerateMethodImpls();
+
+ VOID EnumerateClassMethods();
+
+ // Allocate temporary memory for tracking all information used in building the MethodTable
+ VOID AllocateMethodWorkingMemory();
+
+ VOID BuildInteropVTable_InterfaceList(
+ BuildingInterfaceInfo_t **ppBuildingInterfaceList,
+ WORD *pcBuildingInterfaceList);
+
+ VOID BuildInteropVTable_PlaceMembers(
+ BaseDomain *bmtDomain,
+ bmtTypeInfo* bmtType,
+ DWORD numDeclaredInterfaces,
+ BuildingInterfaceInfo_t *pBuildingInterfaceList,
+ bmtMethodInfo* bmtMethod,
+ bmtErrorInfo* bmtError,
+ bmtProperties* bmtProp,
+ bmtParentInfo* bmtParent,
+ bmtInterfaceInfo* bmtInterface,
+ bmtMethodImplInfo* bmtMethodImpl,
+ bmtVtable* bmtVT);
+
+ VOID BuildInteropVTable_ResolveInterfaces(
+ BaseDomain *bmtDomain,
+ BuildingInterfaceInfo_t *pBuildingInterfaceList,
+ bmtTypeInfo* bmtType,
+ bmtInterfaceInfo* bmtInterface,
+ bmtVtable* bmtVT,
+ bmtParentInfo* bmtParent,
+ const bmtErrorInfo & bmtError);
+
+ VOID BuildInteropVTable_CreateInterfaceMap(
+ BuildingInterfaceInfo_t *pBuildingInterfaceList,
+ bmtInterfaceInfo* bmtInterface,
+ WORD *pwInterfaceListSize,
+ DWORD *pdwMaxInterfaceMethods,
+ MethodTable *pParentMethodTable);
+
+ VOID BuildInteropVTable_ExpandInterface(
+ InterfaceInfo_t *pInterfaceMap,
+ MethodTable *pNewInterface,
+ WORD *pwInterfaceListSize,
+ DWORD *pdwMaxInterfaceMethods,
+ BOOL fDirect);
+
+ VOID BuildInteropVTable_PlaceVtableMethods(
+ bmtInterfaceInfo* bmtInterface,
+ DWORD numDeclaredInterfaces,
+ BuildingInterfaceInfo_t *pBuildingInterfaceList,
+ bmtVtable* bmtVT,
+ bmtMethodInfo* bmtMethod,
+ bmtTypeInfo* bmtType,
+ bmtErrorInfo* bmtError,
+ bmtProperties* bmtProp,
+ bmtParentInfo* bmtParent);
+
+ VOID BuildInteropVTable_PlaceMethodImpls(
+ BaseDomain *bmtDomain,
+ bmtTypeInfo* bmtType,
+ bmtMethodImplInfo* bmtMethodImpl,
+ bmtErrorInfo* bmtError,
+ bmtInterfaceInfo* bmtInterface,
+ bmtVtable* bmtVT,
+ bmtParentInfo* bmtParent);
+
+ VOID BuildInteropVTable_PlaceLocalDeclaration(
+ mdMethodDef mdef,
+ MethodDesc* body,
+ bmtTypeInfo* bmtType,
+ bmtErrorInfo* bmtError,
+ bmtVtable* bmtVT,
+ DWORD* slots,
+ MethodDesc** replaced,
+ DWORD* pSlotIndex,
+ PCCOR_SIGNATURE* ppBodySignature,
+ DWORD* pcBodySignature);
+
+ VOID BuildInteropVTable_PlaceInterfaceDeclaration(
+ MethodDesc* pDecl,
+ MethodDesc* pImplBody,
+ const Substitution *pDeclSubst,
+ bmtTypeInfo* bmtType,
+ bmtInterfaceInfo* bmtInterface,
+ bmtErrorInfo* bmtError,
+ bmtVtable* bmtVT,
+ DWORD* slots,
+ MethodDesc** replaced,
+ DWORD* pSlotIndex,
+ PCCOR_SIGNATURE* ppBodySignature,
+ DWORD* pcBodySignature);
+
+ VOID BuildInteropVTable_PlaceParentDeclaration(
+ MethodDesc* pDecl,
+ MethodDesc* pImplBody,
+ const Substitution *pDeclSubst,
+ bmtTypeInfo* bmtType,
+ bmtErrorInfo* bmtError,
+ bmtVtable* bmtVT,
+ bmtParentInfo* bmtParent,
+ DWORD* slots,
+ MethodDesc** replaced,
+ DWORD* pSlotIndex,
+ PCCOR_SIGNATURE* ppBodySignature,
+ DWORD* pcBodySignature);
+
+ VOID BuildInteropVTable_PropagateInheritance(
+ bmtVtable *bmtVT);
+
+ VOID FinalizeInteropVTable(
+ AllocMemTracker *pamTracker,
+ LoaderAllocator*,
+ bmtVtable*,
+ bmtInterfaceInfo*,
+ bmtTypeInfo*,
+ bmtProperties*,
+ bmtMethodInfo*,
+ bmtErrorInfo*,
+ bmtParentInfo*,
+ InteropMethodTableData**);
+}; // MethodTableBuilder
+
+}; // Namespace ClassCompat
+
+#endif // FEATURE_COMINTEROP
+
+#endif // !CLASSCOMPAT_H
diff --git a/src/vm/classfactory.cpp b/src/vm/classfactory.cpp
new file mode 100644
index 0000000000..33a95e879a
--- /dev/null
+++ b/src/vm/classfactory.cpp
@@ -0,0 +1,1000 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "common.h"
+
+//#include "ClassFactory3.h"
+#include "winwrap.h"
+#include "comcallablewrapper.h"
+#include "frames.h"
+#include "excep.h"
+#include "registration.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "typeparse.h"
+#include "mdaassistants.h"
+
+
+#ifdef FEATURE_COMINTEROP_MANAGED_ACTIVATION
+
+// Allocate a managed object given the method table pointer.
+HRESULT STDMETHODCALLTYPE EEAllocateInstance(LPUNKNOWN pOuter, MethodTable* pMT, BOOL fHasLicensing, REFIID riid, BOOL fDesignTime, BSTR bstrKey, void** ppv);
+extern BOOL g_fEEComActivatedStartup;
+extern BOOL g_fEEHostedStartup;
+extern GUID g_EEComObjectGuid;
+
+// ---------------------------------------------------------------------------
+// %%Class EEClassFactory
+// IClassFactory implementation for COM+ objects
+// ---------------------------------------------------------------------------
+class EEClassFactory : public IClassFactory2
+{
+public:
+ EEClassFactory(CLSID* pClsId, MethodTable* pTable)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pTable));
+ PRECONDITION(CheckPointer(pClsId));
+ }
+ CONTRACTL_END;
+
+ LOG((LF_INTEROP, LL_INFO100, "EEClassFactory::EEClassFactory for class %s\n", pTable->GetDebugClassName()));
+ m_pMethodTable = pTable;
+ m_cbRefCount = 0;
+ memcpy(&m_ClsId, pClsId, sizeof(GUID));
+ m_hasLicensing = FALSE;
+
+ while (pTable != NULL && pTable != g_pObjectClass)
+ {
+ if (pTable->GetMDImport()->GetCustomAttributeByName(pTable->GetCl(), "System.ComponentModel.LicenseProviderAttribute", 0,0) == S_OK)
+ {
+ m_hasLicensing = TRUE;
+ break;
+ }
+ pTable = pTable->GetParentMethodTable();
+ }
+ }
+
+ ~EEClassFactory()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ LOG((LF_INTEROP, LL_INFO100, "EEClassFactory::~ for class %s\n", m_pMethodTable->GetDebugClassName()));
+ }
+
+ STDMETHODIMP QueryInterface( REFIID iid, void **ppv)
+ {
+ SetupForComCallDWORDNoHostNotif();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(ppv, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ if (ppv == NULL)
+ return E_POINTER;
+
+ *ppv = NULL;
+
+ if (iid == IID_IClassFactory || ((iid == IID_IClassFactory2) && m_hasLicensing ) || iid == IID_IUnknown)
+ {
+ *ppv = (IClassFactory2 *)this;
+ AddRef();
+ }
+
+ return (*ppv != NULL) ? S_OK : E_NOINTERFACE;
+ }
+
+ STDMETHODIMP_(ULONG) AddRef()
+{
+ SetupForComCallDWORDNoHostNotif();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ ULONG l = FastInterlockIncrement(&m_cbRefCount);
+ return l;
+ }
+
+ STDMETHODIMP_(ULONG) Release()
+ {
+ SetupForComCallDWORD();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(m_cbRefCount > 0);
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ ULONG l = -1;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ l = FastInterlockDecrement(&m_cbRefCount);
+ if (l == 0)
+ delete this;
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return l;
+ }
+
+ STDMETHODIMP CreateInstance(LPUNKNOWN punkOuter, REFIID riid, void** ppv)
+ {
+ HRESULT hr = S_OK;
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // SetupForComCallHR uses "SO_INTOLERANT_CODE_NOTHROW" to setup the SO-Intolerant transition
+ // for COM Interop. However, "SO_INTOLERANT_CODE_NOTHROW" expects that no exception can escape
+ // through this boundary but all it does is (in addition to checking that no exception has escaped it)
+ // do stack probing.
+ //
+ // However, Corrupting Exceptions [CE] can escape the COM Interop boundary. Thus, to address that scenario,
+ // we use the macro below that uses BEGIN_SO_INTOLERANT_CODE_NOTHROW to do the equivalent of
+ // SO_INTOLERANT_CODE_NOTHROW and yet allow for CEs to escape through. Since there will be a corresponding
+ // END_SO_INTOLERANT_CODE, the call is splitted into two parts: the Begin and End (see below).
+ BeginSetupForComCallHRWithEscapingCorruptingExceptions();
+#else // !FEATURE_CORRUPTING_EXCEPTIONS
+ SetupForComCallHR();
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ CONTRACTL
+ {
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ THROWS; // CSE can escape out of this function
+#else // !FEATURE_CORRUPTING_EXCEPTIONS
+ NOTHROW;
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ hr = UpdateMethodTable();
+
+ // allocate a com+ object
+ // this will allocate the object in the correct context
+ // we might end up with a tear-off on our COM+ context proxy
+ if (SUCCEEDED(hr))
+ {
+ hr = EEAllocateInstance(punkOuter, m_pMethodTable, m_hasLicensing, riid, TRUE, NULL, ppv);
+ }
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ EndSetupForComCallHRWithEscapingCorruptingExceptions();
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ return hr;
+ }
+
+ STDMETHODIMP LockServer(BOOL fLock)
+ {
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ return S_OK;
+ }
+
+ // The implementation of these two functions is provided below. Prefast chocks if their implementation is here.
+ STDMETHODIMP GetLicInfo(LPLICINFO pLicInfo);
+ STDMETHODIMP RequestLicKey(DWORD dwReserved, BSTR * pbstrKey);
+
+ STDMETHODIMP CreateInstanceLic(IUnknown *punkOuter, IUnknown* pUnkReserved, REFIID riid, BSTR bstrKey, void **ppUnk)
+ {
+ HRESULT hr = S_OK;
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // SetupForComCallHR uses "SO_INTOLERANT_CODE_NOTHROW" to setup the SO-Intolerant transition
+ // for COM Interop. However, "SO_INTOLERANT_CODE_NOTHROW" expects that no exception can escape
+ // through this boundary but all it does is (in addition to checking that no exception has escaped it)
+ // do stack probing.
+ //
+ // However, Corrupting Exceptions [CE] can escape the COM Interop boundary. Thus, to address that scenario,
+ // we use the macro below that uses BEGIN_SO_INTOLERANT_CODE_NOTHROW to do the equivalent of
+ // SO_INTOLERANT_CODE_NOTHROW and yet allow for CEs to escape through. Since there will be a corresponding
+ // END_SO_INTOLERANT_CODE, the call is splitted into two parts: the Begin and End (see below).
+ BeginSetupForComCallHRWithEscapingCorruptingExceptions();
+#else // !FEATURE_CORRUPTING_EXCEPTIONS
+ SetupForComCallHR();
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ CONTRACTL
+ {
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ THROWS; // CSE can escape out of this function
+#else // !FEATURE_CORRUPTING_EXCEPTIONS
+ NOTHROW;
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (!ppUnk)
+ {
+ hr = E_POINTER;
+ goto done;
+ }
+
+ *ppUnk = NULL;
+
+ if (pUnkReserved != NULL)
+ {
+ hr = E_INVALIDARG;
+ goto done;
+ }
+
+ if (bstrKey == NULL)
+ {
+ hr = E_POINTER;
+ goto done;
+ }
+
+ hr = UpdateMethodTable();
+
+ // allocate a com+ object
+ // this will allocate the object in the correct context
+ // we might end up with a tear-off on our COM+ context proxy
+ if (SUCCEEDED(hr))
+ {
+ hr = EEAllocateInstance(punkOuter, m_pMethodTable, m_hasLicensing, riid, /*fDesignTime=*/FALSE, bstrKey, ppUnk);
+ }
+
+done: ;
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ EndSetupForComCallHRWithEscapingCorruptingExceptions();
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ return hr;
+ }
+
+ STDMETHODIMP CreateInstanceWithContext(LPUNKNOWN punkContext, LPUNKNOWN punkOuter, REFIID riid, void** ppv)
+ {
+ HRESULT hr = S_OK;
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // SetupForComCallHR uses "SO_INTOLERANT_CODE_NOTHROW" to setup the SO-Intolerant transition
+ // for COM Interop. However, "SO_INTOLERANT_CODE_NOTHROW" expects that no exception can escape
+ // through this boundary but all it does is (in addition to checking that no exception has escaped it)
+ // do stack probing.
+ //
+ // However, Corrupting Exceptions [CE] can escape the COM Interop boundary. Thus, to address that scenario,
+ // we use the macro below that uses BEGIN_SO_INTOLERANT_CODE_NOTHROW to do the equivalent of
+ // SO_INTOLERANT_CODE_NOTHROW and yet allow for CEs to escape through. Since there will be a corresponding
+ // END_SO_INTOLERANT_CODE, the call is splitted into two parts: the Begin and End (see below).
+ BeginSetupForComCallHRWithEscapingCorruptingExceptions();
+#else // !FEATURE_CORRUPTING_EXCEPTIONS
+ SetupForComCallHR();
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ CONTRACTL
+ {
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ THROWS; // CSE can escape out of this function
+#else // !FEATURE_CORRUPTING_EXCEPTIONS
+ NOTHROW;
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ hr = UpdateMethodTable();
+
+ if (SUCCEEDED(hr))
+ {
+ hr = EEAllocateInstance(punkOuter, m_pMethodTable, m_hasLicensing, riid, TRUE, NULL, ppv);
+ }
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ EndSetupForComCallHRWithEscapingCorruptingExceptions();
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ return hr;
+ }
+
+private:
+ // If we happen to be called from the same AD last time, we can use a cached MT
+ // If not, we need to use the CLSID to find the correct MT for the current domain.
+ CLSID m_ClsId;
+ MethodTable* m_pMethodTable; // most recently used MT
+ ADID m_dwDomainId; // the AD we were in when this ClassFact was last used
+ LONG m_cbRefCount;
+ BOOL m_hasLicensing;
+
+ // Ensure that m_pMethodTable and m_dwDomainId are valid for the current AppDomain. If we're
+ // not in the AD m_dwDomainId use the CLSID to get the MT.
+ STDMETHODIMP UpdateMethodTable()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ Thread *pThread = GetThread();
+ ADID adid = pThread->GetDomain()->GetId();
+ if (adid != m_dwDomainId)
+ {
+ MethodTable* tempMT = NULL;
+ EX_TRY
+ {
+ BEGIN_SO_INTOLERANT_CODE(pThread);
+ GCX_COOP();
+ tempMT = GetTypeForCLSID(m_ClsId);
+ END_SO_INTOLERANT_CODE;
+ }
+ EX_CATCH
+ {
+ hr = E_FAIL;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (tempMT != NULL && SUCCEEDED(hr))
+ {
+ m_pMethodTable = tempMT;
+ m_dwDomainId = adid;
+ }
+ }
+ return hr;
+ }
+
+};
+
+
+STDMETHODIMP EEClassFactory::GetLicInfo(LPLICINFO pLicInfo)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ if (!pLicInfo)
+ return E_POINTER;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ Thread *pThread = GET_THREAD();
+ hr = UpdateMethodTable();
+ if (SUCCEEDED(hr))
+ {
+ MethodTable *pHelperMT = pThread->GetDomain()->GetLicenseInteropHelperMethodTable();
+ MethodDesc *pMD = MemberLoader::FindMethod(pHelperMT, "GetLicInfo", &gsig_IM_LicenseInteropHelper_GetLicInfo);
+ MethodDescCallSite getLicInfo(pMD);
+
+ struct _gc {
+ OBJECTREF pHelper;
+ OBJECTREF pType;
+ } gc;
+ gc.pHelper = NULL; // LicenseInteropHelper
+ gc.pType = NULL;
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.pHelper = pHelperMT->Allocate();
+ gc.pType = m_pMethodTable->GetManagedClassObject();
+
+ {
+ INT32 fRuntimeKeyAvail = 0;
+ INT32 fLicVerified = 0;
+
+ ARG_SLOT args[4];
+ args[0] = ObjToArgSlot(gc.pHelper);
+ args[1] = ObjToArgSlot(gc.pType);
+ args[2] = (ARG_SLOT)&fRuntimeKeyAvail;
+ args[3] = (ARG_SLOT)&fLicVerified;
+ getLicInfo.Call(args);
+
+ pLicInfo->cbLicInfo = sizeof(LICINFO);
+ pLicInfo->fRuntimeKeyAvail = fRuntimeKeyAvail;
+ pLicInfo->fLicVerified = fLicVerified;
+ }
+ GCPROTECT_END();
+ }
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+STDMETHODIMP EEClassFactory::RequestLicKey(DWORD dwReserved, BSTR * pbstrKey)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ if (dwReserved != 0)
+ return E_INVALIDARG;
+
+ if (!pbstrKey)
+ return E_POINTER;
+
+ *pbstrKey = NULL;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ Thread *pThread = GET_THREAD();
+ hr = UpdateMethodTable();
+ if (SUCCEEDED(hr))
+ {
+ MethodTable *pHelperMT = pThread->GetDomain()->GetLicenseInteropHelperMethodTable();
+ MethodDesc *pMD = MemberLoader::FindMethod(pHelperMT, "RequestLicKey", &gsig_SM_LicenseInteropHelper_RequestLicKey);
+ MethodDescCallSite requestLicKey(pMD);
+
+ OBJECTREF pType = NULL;
+
+ GCPROTECT_BEGIN(pType);
+
+ pType = m_pMethodTable->GetManagedClassObject();
+ ARG_SLOT args[2];
+ args[0] = ObjToArgSlot(pType);
+ args[1] = (ARG_SLOT)pbstrKey;
+ hr = requestLicKey.Call_RetHR(args);
+ GCPROTECT_END();
+ }
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+void EEAllocateInstanceWorker(LPUNKNOWN pOuter, MethodTable* pMT, BOOL fHasLicensing, REFIID riid, BOOL fDesignTime, BSTR bstrKey, void** ppv)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(!pMT->IsComImport());
+ }
+ CONTRACTL_END;
+
+ *ppv = NULL;
+ OBJECTREF newobj;
+ CCWHolder pWrap = NULL;
+ BOOL fCtorAlreadyCalled = FALSE;
+ Thread* pThread = GetThread();
+
+ // classes that extend COM Imported class are special
+ if (ExtendsComImport(pMT))
+ {
+ pMT->EnsureInstanceActive();
+ newobj = AllocateObject(pMT);
+ }
+ else if (CRemotingServices::RequiresManagedActivation(pMT) != NoManagedActivation)
+ {
+ fCtorAlreadyCalled = TRUE;
+ newobj = CRemotingServices::CreateProxyOrObject(pMT, TRUE);
+ }
+ else
+ {
+ // If the class doesn't have a LicenseProviderAttribute, let's not
+ // pull in the LicenseManager class and friends.
+ if (!fHasLicensing)
+ {
+ pMT->EnsureInstanceActive();
+ newobj = AllocateObject( pMT, false );
+ }
+ else
+ {
+ MethodTable *pHelperMT = pThread->GetDomain()->GetLicenseInteropHelperMethodTable();
+ MethodDesc *pMD = MemberLoader::FindMethod(pHelperMT, "AllocateAndValidateLicense", &gsig_SM_LicenseInteropHelper_AllocateAndValidateLicense);
+ MethodDescCallSite allocateAndValidateLicense(pMD);
+
+ pHelperMT->EnsureInstanceActive();
+
+ OBJECTREF pType = NULL;
+
+ GCPROTECT_BEGIN(pType);
+
+ pType = pMT->GetManagedClassObject();
+
+ ARG_SLOT args[3];
+ args[0] = ObjToArgSlot(pType);
+ args[1] = (ARG_SLOT)bstrKey;
+ args[2] = fDesignTime ? 1 : 0;
+ newobj = allocateAndValidateLicense.Call_RetOBJECTREF(args);
+ fCtorAlreadyCalled = TRUE;
+
+ GCPROTECT_END();
+ }
+ }
+
+ GCPROTECT_BEGIN(newobj);
+ {
+ //get wrapper for the object, this could enable GC
+ pWrap = ComCallWrapper::InlineGetWrapper(&newobj);
+
+ // don't call any constructors if we already have called them
+ if (!fCtorAlreadyCalled && !pMT->IsValueType())
+ CallDefaultConstructor(newobj);
+ }
+ GCPROTECT_END();
+
+ if (pOuter == NULL)
+ {
+ // Return the tear-off
+ *ppv = ComCallWrapper::GetComIPFromCCW(pWrap, riid, NULL, GetComIPFromCCW::CheckVisibility);
+ if (!*ppv)
+ COMPlusThrowHR(E_NOINTERFACE);
+ }
+ else
+ {
+ // Aggregation support,
+ pWrap->InitializeOuter(pOuter);
+ IfFailThrow(pWrap->GetInnerUnknown(ppv));
+ }
+}
+
+// Allocate a managed object given the method table pointer
+HRESULT STDMETHODCALLTYPE EEAllocateInstance(LPUNKNOWN pOuter, MethodTable* pMT, BOOL fHasLicensing, REFIID riid, BOOL fDesignTime, BSTR bstrKey, void** ppv)
+{
+ CONTRACTL
+ {
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ THROWS; // CSE can escape out of this function
+#else // !FEATURE_CORRUPTING_EXCEPTIONS
+ NOTHROW;
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(CheckPointer(ppv, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ if (ppv == NULL)
+ return E_POINTER;
+ *ppv = NULL;
+
+ if ((!fDesignTime) && bstrKey == NULL)
+ return E_POINTER;
+
+ // aggregating objects should QI for IUnknown
+ if (pOuter != NULL && !IsEqualIID(riid, IID_IUnknown))
+ return E_INVALIDARG;
+
+ HRESULT hr = S_OK;
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // Get the MethodDesc of the type being instantiated. Based upon it,
+ // we will decide whether to rethrow a CSE or not in
+ // END_EXTERNAL_ENTRYPOINT_RETHROW_CORRUPTING_EXCEPTIONS_EX below.
+ PTR_MethodDesc pMDDefConst = NULL;
+ BOOL fHasConstructor = FALSE;
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // Get the MethodDesc of the type being instantiated. Based upon it,
+ // we will decide whether to rethrow a CSE or not in
+ // END_EXTERNAL_ENTRYPOINT_RETHROW_CORRUPTING_EXCEPTIONS_EX below.
+ if (pMT->HasDefaultConstructor())
+ {
+ pMDDefConst = pMT->GetDefaultConstructor();
+ fHasConstructor = (pMDDefConst != NULL);
+ }
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ EEAllocateInstanceWorker(pOuter, pMT, fHasLicensing, riid, fDesignTime, bstrKey, ppv);
+ }
+ END_EXTERNAL_ENTRYPOINT_RETHROW_CORRUPTING_EXCEPTIONS_EX((fHasConstructor && (!CEHelper::CanMethodHandleException(UseLast,pMDDefConst))) ||
+ (!fHasConstructor));
+
+ LOG((LF_INTEROP, LL_INFO100, "EEAllocateInstance for class %s object %8.8x\n", pMT->GetDebugClassName(), *ppv));
+
+ return hr;
+}
+
+IUnknown *AllocateEEClassFactoryHelper(CLSID *pClsId, MethodTable *pMT)
+{
+ CONTRACT (IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(CheckPointer(pClsId));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN ((IUnknown*)new EEClassFactory(pClsId, pMT));
+}
+
+void InitializeClass(TypeHandle th)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(!th.IsNull());
+ }
+ CONTRACTL_END;
+
+ // Make sure the type isn't an interface or an abstract class.
+ if (th.IsAbstract() || th.IsInterface())
+ COMPlusThrowHR(COR_E_MEMBERACCESS);
+
+ // Unless we are dealing with a value class, the type must have a public
+ // default constructor.
+ if (!th.GetMethodTable()->HasExplicitOrImplicitPublicDefaultConstructor())
+ {
+ COMPlusThrowHR(COR_E_MEMBERACCESS);
+ }
+
+ // Call class init if necessary
+ th.GetMethodTable()->EnsureInstanceActive();
+ th.GetMethodTable()->CheckRunClassInitThrowing();
+}
+
+// Try to load a managed class and give out an IClassFactory
+void EEDllGetClassObjectHelper(REFCLSID rclsid, MethodTable* pMT, REFIID riid, LPVOID FAR *ppv)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(CheckPointer(ppv));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ CLSID clsId;
+ SafeComHolder<IUnknown> pUnk = NULL;
+
+ memcpy(&clsId, &rclsid, sizeof(GUID));
+ pUnk = AllocateEEClassFactoryHelper(&clsId, pMT);
+
+ // Bump up the count to protect the object
+ ULONG cbRef = SafeAddRef(pUnk);
+ LogInteropAddRef(pUnk, cbRef, "EEDllGetClassObjectHelper: Bump up refcount to protect object during call");
+
+ // Query for the requested interface.
+ hr = SafeQueryInterface(pUnk, riid, (IUnknown**)ppv);
+ LogInteropQI(pUnk, riid, hr, "EDllGetClassObjectHelper: QI for requested interface");
+ IfFailThrow(hr);
+}
+
+HRESULT STDMETHODCALLTYPE EEDllGetClassObject(REFCLSID rclsid, REFIID riid, LPVOID FAR *ppv)
+{
+ HRESULT hr = S_OK;
+ g_fEEComActivatedStartup = TRUE;
+ g_EEComObjectGuid = rclsid;
+
+ // The EE must be started before SetupForComCallHR is called and the contract is set up.
+ if (FAILED(hr = EnsureEEStarted(COINITEE_DEFAULT)))
+ return hr;
+
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(ppv, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ if (ppv == NULL)
+ return E_POINTER;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr);
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ // We are about to use COM IPs so make sure COM is started up.
+ EnsureComStarted();
+
+ MethodTable *pMT;
+
+ {
+ Thread *pThread = GetThread();
+ BEGIN_SO_INTOLERANT_CODE(pThread);
+ GCX_COOP();
+ pMT = GetTypeForCLSID(rclsid);
+ END_SO_INTOLERANT_CODE;
+ }
+
+ // If we can't find the class based on the CLSID or if the registered managed
+ // class is ComImport class then fail the call. Also, if the type is a generic
+ // type (either opened or closed) then fail the call.
+ if (!pMT || pMT->IsComImport() || (pMT->GetNumGenericArgs() != 0))
+ {
+ COMPlusThrowHR(REGDB_E_CLASSNOTREG);
+ }
+
+ // Verify that the class is indeed creatable and run it's .cctor if it
+ // hasn't been run yet.
+ InitializeClass(TypeHandle(pMT));
+
+ // Allocate the IClassFactory for the type.
+ EEDllGetClassObjectHelper(rclsid, pMT, riid, ppv);
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+} //EEDllGetClassObject
+
+// Helper Functions to get a object based on a name
+void ClrCreateManagedInstanceHelper(MethodTable* pMT, REFIID riid, LPVOID FAR *ppv)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(CheckPointer(ppv));
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+
+ HRESULT hr = S_OK;
+ SafeComHolderPreemp<IUnknown> pUnk = NULL;
+ SafeComHolderPreemp<IClassFactory> pFactory = NULL;
+
+ GUID guid;
+ pMT->GetGuid(&guid, TRUE);
+ pUnk = AllocateEEClassFactoryHelper(&guid, pMT);
+
+ // Bump up the count to protect the object for the duration of this function
+ ULONG cbRef = SafeAddRef(pUnk);
+ LogInteropAddRef(pUnk, cbRef, "ClrCreateManagedInstanceHelper: Bumping refcount to protect object during call");
+
+ // Query the factory for the IClassFactory interface.
+ hr = SafeQueryInterface(pUnk, IID_IClassFactory, (IUnknown**) &pFactory);
+ LogInteropQI(pUnk, IID_IClassFactory, hr, "ClrCreateManagedInstanceHelper: QI for IID_IClassFactory");
+ IfFailThrow(hr);
+
+ // Create an instance of the type.
+ IfFailThrow(pFactory->CreateInstance(NULL, riid, ppv));
+}
+
+STDAPI ClrCreateManagedInstance(LPCWSTR typeName, REFIID riid, LPVOID FAR *ppv)
+{
+ HRESULT hr = S_OK;
+
+ // The EE must be started before SetupForComCallHR is called and the contract is set up.
+ g_fEEHostedStartup = TRUE;
+ if (FAILED(hr = EnsureEEStarted(COINITEE_DEFAULT)))
+ return hr;
+
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(typeName, NULL_OK));
+ PRECONDITION(CheckPointer(ppv, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ if (ppv == NULL)
+ return E_POINTER;
+
+ if (typeName == NULL)
+ return E_INVALIDARG;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr);
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ // We are about to use COM IPs so make sure COM is started up.
+ EnsureComStarted();
+
+ MAKE_UTF8PTR_FROMWIDE(pName, typeName);
+
+ AppDomain* pDomain = SystemDomain::GetCurrentDomain();
+ MethodTable *pMT = TypeName::GetTypeUsingCASearchRules(pName, NULL).GetMethodTable();
+ if (!pMT || pMT->IsComImport())
+ COMPlusThrowHR(REGDB_E_CLASSNOTREG);
+
+ // Verify that the class is indeed creatable and run it's .cctor if it
+ // hasn't been run yet.
+ InitializeClass(TypeHandle(pMT));
+
+ // Allocate the instance of the type.
+ ClrCreateManagedInstanceHelper(pMT, riid, ppv);
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+DWORD RegisterTypeForComClientsHelper(MethodTable *pMT, GUID *pGuid, CLSCTX clsContext, REGCLS flags)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(pMT != NULL);
+ PRECONDITION(pGuid != NULL);
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ DWORD dwCookie = 0;
+ SafeComHolder<IUnknown> pUnk = NULL;
+
+ // We are about to perform COM operations so ensure COM is started up.
+ EnsureComStarted();
+
+ // Allocate an EE class factory for the type.
+ pUnk = AllocateEEClassFactoryHelper(pGuid, pMT);
+
+ // bump up the count to protect the object
+ ULONG cbRef = SafeAddRef(pUnk);
+ LogInteropAddRef(pUnk, cbRef, "RegisterTypeForComClientsNative: Bumping refcount to protect class factory");
+
+ {
+ // Enable GC
+ GCX_PREEMP();
+
+ // Call CoRegisterClassObject.
+ IfFailThrow(CoRegisterClassObject(*(pGuid), pUnk, clsContext, flags, &dwCookie));
+ }
+
+ // CoRegisterClassObject will bump up the ref count so we must release
+ // the extra ref count we added above.
+ return dwCookie;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: RegisterTypeForComClientsNative
+//
+// Synopsis: Registers a class factory with COM classic for a given type
+// and CLSID. Later we can receive activations on this factory
+// and we return a CCW.
+//
+
+//
+//+----------------------------------------------------------------------------
+FCIMPL2(VOID, RegisterTypeForComClientsNative, ReflectClassBaseObject* pTypeUNSAFE, GUID* pGuid)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(pTypeUNSAFE != NULL);
+ PRECONDITION(pGuid != NULL);
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF pType = (REFLECTCLASSBASEREF) pTypeUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_1(pType);
+
+ // Retrieve the method table from the type.
+ MethodTable *pMT = pType->GetType().GetMethodTable();
+
+ // Call the helper to perform the registration.
+ RegisterTypeForComClientsHelper(pMT, pGuid, (CLSCTX)(CLSCTX_INPROC_SERVER | CLSCTX_LOCAL_SERVER), REGCLS_MULTIPLEUSE);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+//+----------------------------------------------------------------------------
+//
+// Method: RegisterTypeForComClientsExNative
+//
+// Synopsis: Registers a class factory with COM classic for a given type.
+//
+//+----------------------------------------------------------------------------
+FCIMPL3(DWORD, RegisterTypeForComClientsExNative, ReflectClassBaseObject* pTypeUNSAFE, CLSCTX clsContext, REGCLS flags)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(pTypeUNSAFE != NULL);
+ }
+ CONTRACTL_END;
+
+ DWORD dwCookie = 0;
+ GUID clsid;
+
+ REFLECTCLASSBASEREF pType = (REFLECTCLASSBASEREF) pTypeUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(pType);
+
+ // Retrieve the method table from the type.
+ MethodTable *pMT = pType->GetType().GetMethodTable();
+
+ // Retrieve the CLSID from the type.
+ pMT->GetGuid(&clsid, TRUE);
+
+ // Call the helper to perform the registration.
+ dwCookie = RegisterTypeForComClientsHelper(pMT, &clsid, clsContext, flags);
+
+ HELPER_METHOD_FRAME_END();
+
+ return dwCookie;
+}
+FCIMPLEND
+
+#else // FEATURE_COMINTEROP_MANAGED_ACTIVATION
+
+STDAPI ClrCreateManagedInstance(LPCWSTR typeName, REFIID riid, LPVOID FAR *ppv)
+{
+
+ return E_NOTIMPL; // @TODO: CoreCLR_REMOVED: completely remove this function
+}
+
+#endif // FEATURE_COMINTEROP_MANAGED_ACTIVATION
+
diff --git a/src/vm/classhash.cpp b/src/vm/classhash.cpp
new file mode 100644
index 0000000000..27e109e746
--- /dev/null
+++ b/src/vm/classhash.cpp
@@ -0,0 +1,1104 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//
+// Hash table associated with each module that records for all types defined in that module the mapping
+// between type name and token (or TypeHandle).
+//
+
+#include "common.h"
+#include "classhash.h"
+#include "ngenhash.inl"
+#include "fstring.h"
+#include "classhash.inl"
+
+PTR_EEClassHashEntry EEClassHashEntry::GetEncloser()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ return m_pEncloser.Get();
+}
+
+PTR_VOID EEClassHashEntry::GetData()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ // TypeHandles are encoded as a relative pointer rather than a regular pointer to avoid the need for image
+ // fixups (any TypeHandles in this hash are defined in the same module).
+ if ((dac_cast<TADDR>(m_Data) & EECLASSHASH_TYPEHANDLE_DISCR) == 0)
+ return RelativePointer<PTR_VOID>::GetValueMaybeNullAtPtr(PTR_HOST_INT_MEMBER_TADDR(EEClassHashEntry, this, m_Data));
+
+ return m_Data;
+}
+
+#ifndef DACCESS_COMPILE
+void EEClassHashEntry::SetData(PTR_VOID data)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // TypeHandles are encoded as a relative pointer rather than a regular pointer to avoid the need for image
+ // fixups (any TypeHandles in this hash are defined in the same module).
+ if (((TADDR)data & EECLASSHASH_TYPEHANDLE_DISCR) == 0)
+ RelativePointer<PTR_VOID>::SetValueMaybeNullAtPtr((TADDR)&m_Data, data);
+ else
+ m_Data = data;
+}
+
+void EEClassHashEntry::SetEncloser(EEClassHashEntry *pEncloser)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_pEncloser.Set(pEncloser);
+}
+
+/*static*/
+EEClassHashTable *EEClassHashTable::Create(Module *pModule, DWORD dwNumBuckets, BOOL bCaseInsensitive, AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(!FORBIDGC_LOADER_USE_ENABLED());
+
+ }
+ CONTRACTL_END;
+
+ LoaderHeap *pHeap = pModule->GetAssembly()->GetLowFrequencyHeap();
+ EEClassHashTable *pThis = (EEClassHashTable*)pamTracker->Track(pHeap->AllocMem((S_SIZE_T)sizeof(EEClassHashTable)));
+
+ // The base class get initialized through chaining of constructors. We allocated the hash instance via the
+ // loader heap instead of new so use an in-place new to call the constructors now.
+ new (pThis) EEClassHashTable(pModule, pHeap, dwNumBuckets);
+
+ pThis->m_bCaseInsensitive = bCaseInsensitive;
+
+ return pThis;
+}
+
+EEClassHashEntry_t *EEClassHashTable::AllocNewEntry(AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM(););
+ MODE_ANY;
+
+ PRECONDITION(!FORBIDGC_LOADER_USE_ENABLED());
+ }
+ CONTRACTL_END;
+
+ // Simply defer to the base class for entry allocation (this is required, the base class wraps the entry
+ // it returns to us in its own metadata).
+ return BaseAllocateEntry(pamTracker);
+}
+
+#endif // !DACCESS_COMPILE
+
+VOID EEClassHashTable::UncompressModuleAndNonExportClassDef(HashDatum Data, Module **ppModule, mdTypeDef *pCL)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ DWORD dwData = (DWORD)dac_cast<TADDR>(Data);
+ _ASSERTE((dwData & EECLASSHASH_TYPEHANDLE_DISCR) == EECLASSHASH_TYPEHANDLE_DISCR);
+ _ASSERTE(!(dwData & EECLASSHASH_MDEXPORT_DISCR));
+
+ *pCL = ((dwData >> 1) & 0x00ffffff) | mdtTypeDef;
+ *ppModule = m_pModule;
+}
+
+bool EEClassHashTable::UncompressModuleAndClassDef(HashDatum Data, Loader::LoadFlag loadFlag,
+ Module **ppModule, mdTypeDef *pCL,
+ mdExportedType *pmdFoundExportedType)
+{
+ CONTRACT(bool)
+ {
+ INSTANCE_CHECK;
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM();); }
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(pCL));
+ PRECONDITION(CheckPointer(ppModule));
+ POSTCONDITION(*ppModule != nullptr || loadFlag != Loader::Load);
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END
+
+ DWORD dwData = (DWORD)dac_cast<TADDR>(Data);
+ _ASSERTE((dwData & EECLASSHASH_TYPEHANDLE_DISCR) == EECLASSHASH_TYPEHANDLE_DISCR);
+ if(dwData & EECLASSHASH_MDEXPORT_DISCR) {
+ *pmdFoundExportedType = ((dwData >> 1) & 0x00ffffff) | mdtExportedType;
+
+ *ppModule = m_pModule->GetAssembly()->
+ FindModuleByExportedType(*pmdFoundExportedType, loadFlag, mdTypeDefNil, pCL);
+ }
+ else {
+ UncompressModuleAndNonExportClassDef(Data, ppModule, pCL);
+ *pmdFoundExportedType = mdTokenNil;
+ _ASSERTE(*ppModule != nullptr); // Should never fail.
+ }
+
+ RETURN (*ppModule != nullptr);
+}
+
+/* static */
+mdToken EEClassHashTable::UncompressModuleAndClassDef(HashDatum Data)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ DWORD dwData = (DWORD)dac_cast<TADDR>(Data); // 64Bit: Pointer truncation is OK here - it's not actually a pointer
+ _ASSERTE((dwData & EECLASSHASH_TYPEHANDLE_DISCR) == EECLASSHASH_TYPEHANDLE_DISCR);
+
+ if(dwData & EECLASSHASH_MDEXPORT_DISCR)
+ return ((dwData >> 1) & 0x00ffffff) | mdtExportedType;
+ else
+ return ((dwData >> 1) & 0x00ffffff) | mdtTypeDef;
+}
+
+VOID EEClassHashTable::ConstructKeyFromData(PTR_EEClassHashEntry pEntry, // IN : Entry to compare
+ ConstructKeyCallback *pCallback) // This class will process the output
+{
+ CONTRACTL
+ {
+ THROWS;
+ WRAPPER(MODE_ANY);
+ WRAPPER(GC_TRIGGERS);
+ if (m_bCaseInsensitive) INJECT_FAULT(COMPlusThrowOM();); else WRAPPER(FORBID_FAULT);
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ LPUTF8 Key[2];
+ Key[0] = Key[1] = NULL;
+
+ {
+#ifdef _DEBUG_IMPL
+ _ASSERTE(!(m_bCaseInsensitive && FORBIDGC_LOADER_USE_ENABLED()));
+#endif
+
+ // cqb - If m_bCaseInsensitive is true for the hash table, the bytes in Key will be allocated
+ // from cqb. This is to prevent wasting bytes in the Loader Heap. Thusly, it is important to note that
+ // in this case, the lifetime of Key is bounded by the lifetime of cqb, which will free the memory
+ // it allocated on destruction.
+
+ _ASSERTE(m_pModule);
+ LPSTR pszName = NULL;
+ LPSTR pszNameSpace = NULL;
+ IMDInternalImport *pInternalImport = NULL;
+
+ PTR_VOID Data = NULL;
+ if (!m_bCaseInsensitive)
+ Data = pEntry->GetData();
+ else
+ Data = (PTR_EEClassHashEntry(pEntry->GetData()))->GetData();
+
+ // Lower bit is a discriminator. If the lower bit is NOT SET, it means we have
+ // a TypeHandle, otherwise, we have a mdtTypedef/mdtExportedType.
+ if ((dac_cast<TADDR>(Data) & EECLASSHASH_TYPEHANDLE_DISCR) == 0)
+ {
+ TypeHandle pType = TypeHandle::FromPtr(Data);
+ _ASSERTE (pType.GetMethodTable());
+ MethodTable *pMT = pType.GetMethodTable();
+ _ASSERTE(pMT != NULL);
+ IfFailThrow(pMT->GetMDImport()->GetNameOfTypeDef(pMT->GetCl(), (LPCSTR *)&pszName, (LPCSTR *)&pszNameSpace));
+ }
+ else // We have a mdtoken
+ {
+ // call the lightweight version first
+ mdToken mdtUncompressed = UncompressModuleAndClassDef(Data);
+ if (TypeFromToken(mdtUncompressed) == mdtExportedType)
+ {
+ IfFailThrow(m_pModule->GetClassLoader()->GetAssembly()->GetManifestImport()->GetExportedTypeProps(
+ mdtUncompressed,
+ (LPCSTR *)&pszNameSpace,
+ (LPCSTR *)&pszName,
+ NULL, //mdImpl
+ NULL, // type def
+ NULL)); // flags
+ }
+ else
+ {
+ _ASSERTE(TypeFromToken(mdtUncompressed) == mdtTypeDef);
+
+ Module * pUncompressedModule;
+ mdTypeDef UncompressedCl;
+ UncompressModuleAndNonExportClassDef(Data, &pUncompressedModule, &UncompressedCl);
+ _ASSERTE (pUncompressedModule && "Uncompressed token of unexpected type");
+ pInternalImport = pUncompressedModule->GetMDImport();
+ _ASSERTE(pInternalImport && "Uncompressed token has no MD import");
+ IfFailThrow(pInternalImport->GetNameOfTypeDef(UncompressedCl, (LPCSTR *)&pszName, (LPCSTR *)&pszNameSpace));
+ }
+ }
+
+ if (!m_bCaseInsensitive)
+ {
+ Key[0] = pszNameSpace;
+ Key[1] = pszName;
+ }
+ else
+ {
+ CONTRACT_VIOLATION(ThrowsViolation|FaultViolation);
+
+#ifndef DACCESS_COMPILE
+ // We can call the nothrow version here because we fulfilled the requirement of calling
+ // InitTables() in the "new" method.
+ INT32 iNSLength = InternalCasingHelper::InvariantToLowerNoThrow(NULL, 0, pszNameSpace);
+ if (!iNSLength)
+ {
+ COMPlusThrowOM();
+ }
+
+ INT32 iNameLength = InternalCasingHelper::InvariantToLowerNoThrow(NULL, 0, pszName);
+ if (!iNameLength)
+ {
+ COMPlusThrowOM();
+ }
+
+ // Prefast overflow sanity check before alloc.
+ INT32 iAllocSize;
+ if (!ClrSafeInt<INT32>::addition(iNSLength, iNameLength, iAllocSize))
+ COMPlusThrowOM();
+ LPUTF8 pszOutNameSpace = (LPUTF8) _alloca(iAllocSize);
+ if (iNSLength == 1)
+ {
+ *pszOutNameSpace = '\0';
+ }
+ else
+ {
+ if (!InternalCasingHelper::InvariantToLowerNoThrow(pszOutNameSpace, iNSLength, pszNameSpace))
+ {
+ COMPlusThrowOM();
+ }
+ }
+ LPUTF8 pszOutName = (LPUTF8) pszOutNameSpace + iNSLength;
+
+ if (!InternalCasingHelper::InvariantToLowerNoThrow(pszOutName, iNameLength, pszName))
+ {
+ COMPlusThrowOM();
+ }
+ Key[0] = pszOutNameSpace;
+ Key[1] = pszOutName;
+#else
+ DacNotImpl();
+#endif // #ifndef DACCESS_COMPILE
+ }
+ }
+
+ pCallback->UseKeys(Key);
+}
+
+#ifndef DACCESS_COMPILE
+
+EEClassHashEntry_t *EEClassHashTable::InsertValue(LPCUTF8 pszNamespace, LPCUTF8 pszClassName, PTR_VOID Data, EEClassHashEntry_t *pEncloser, AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(!FORBIDGC_LOADER_USE_ENABLED());
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pszNamespace != NULL);
+ _ASSERTE(pszClassName != NULL);
+ _ASSERTE(m_pModule);
+
+ EEClassHashEntry *pEntry = BaseAllocateEntry(pamTracker);
+
+ pEntry->SetData(Data);
+ pEntry->SetEncloser(pEncloser);
+#ifdef _DEBUG
+ pEntry->DebugKey[0] = pszNamespace;
+ pEntry->DebugKey[1] = pszClassName;
+#endif
+
+ BaseInsertEntry(Hash(pszNamespace, pszClassName), pEntry);
+
+ return pEntry;
+}
+
+#ifdef _DEBUG
+class ConstructKeyCallbackValidate : public EEClassHashTable::ConstructKeyCallback
+{
+public:
+ virtual void UseKeys(__in_ecount(2) LPUTF8 *Key)
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_DEBUG_ONLY;
+ _ASSERTE (strcmp(pNewEntry->DebugKey[1], Key[1]) == 0);
+ _ASSERTE (strcmp(pNewEntry->DebugKey[0], Key[0]) == 0);
+ SUPPORTS_DAC;
+ }
+
+ EEClassHashEntry_t *pNewEntry;
+
+};
+#endif // _DEBUG
+
+// This entrypoint lets the caller separate the allocation of the entrypoint from the actual insertion into the hashtable. (This lets us
+// do multiple insertions without having to worry about an OOM occuring inbetween.)
+//
+// The newEntry must have been allocated using AllocEntry. It must not be referenced by any other entity (other than a holder or tracker)
+// If this function throws, the caller is responsible for freeing the entry.
+EEClassHashEntry_t *EEClassHashTable::InsertValueUsingPreallocatedEntry(EEClassHashEntry_t *pNewEntry, LPCUTF8 pszNamespace, LPCUTF8 pszClassName, PTR_VOID Data, EEClassHashEntry_t *pEncloser)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+
+ PRECONDITION(!FORBIDGC_LOADER_USE_ENABLED());
+ }
+ CONTRACTL_END;
+
+ pNewEntry->SetData(Data);
+ pNewEntry->SetEncloser(pEncloser);
+
+#ifdef _DEBUG
+ pNewEntry->DebugKey[0] = pszNamespace;
+ pNewEntry->DebugKey[1] = pszClassName;
+#endif
+
+ BaseInsertEntry(Hash(pszNamespace, pszClassName), pNewEntry);
+
+ return pNewEntry;
+}
+
+EEClassHashEntry_t *EEClassHashTable::InsertValueIfNotFound(LPCUTF8 pszNamespace, LPCUTF8 pszClassName, PTR_VOID *pData, EEClassHashEntry_t *pEncloser, BOOL IsNested, BOOL *pbFound, AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+
+ PRECONDITION(!FORBIDGC_LOADER_USE_ENABLED());
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_pModule);
+ _ASSERTE(pszNamespace != NULL);
+ _ASSERTE(pszClassName != NULL);
+ _ASSERTE(m_pModule);
+
+ EEClassHashEntry_t * pNewEntry = FindItem(pszNamespace, pszClassName, IsNested, NULL);
+
+ if (pNewEntry)
+ {
+ *pData = pNewEntry->GetData();
+ *pbFound = TRUE;
+ return pNewEntry;
+ }
+
+ // Reached here implies that we didn't find the entry and need to insert it
+ *pbFound = FALSE;
+
+ pNewEntry = BaseAllocateEntry(pamTracker);
+
+ pNewEntry->SetData(*pData);
+ pNewEntry->SetEncloser(pEncloser);
+
+#ifdef _DEBUG
+ pNewEntry->DebugKey[0] = pszNamespace;
+ pNewEntry->DebugKey[1] = pszClassName;
+#endif
+
+ BaseInsertEntry(Hash(pszNamespace, pszClassName), pNewEntry);
+
+ return pNewEntry;
+}
+
+#endif // !DACCESS_COMPILE
+
+EEClassHashEntry_t *EEClassHashTable::FindItem(LPCUTF8 pszNamespace, LPCUTF8 pszClassName, BOOL IsNested, LookupContext *pContext)
+{
+ CONTRACTL
+ {
+ if (m_bCaseInsensitive) THROWS; else NOTHROW;
+ if (m_bCaseInsensitive) GC_TRIGGERS; else GC_NOTRIGGER;
+ if (m_bCaseInsensitive) INJECT_FAULT(COMPlusThrowOM();); else FORBID_FAULT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_pModule);
+ _ASSERTE(pszNamespace != NULL);
+ _ASSERTE(pszClassName != NULL);
+
+ // It's legal for the caller not to pass us a LookupContext (when the type being queried is not nested
+ // there will never be any need to iterate over the search results). But we might need to iterate
+ // internally (since we lookup via hash and hashes may collide). So substitute our own private context if
+ // one was not provided.
+ LookupContext sAltContext;
+ if (pContext == NULL)
+ pContext = &sAltContext;
+
+ // The base class provides the ability to enumerate all entries with the same hash code. We call this and
+ // further check which of these entries actually match the full key (there can be multiple hits with
+ // nested types in the picture).
+ PTR_EEClassHashEntry pSearch = BaseFindFirstEntryByHash(Hash(pszNamespace, pszClassName), pContext);
+
+ while (pSearch)
+ {
+ LPCUTF8 rgKey[] = { pszNamespace, pszClassName };
+
+ if (CompareKeys(pSearch, rgKey))
+ {
+ // If (IsNested), then we're looking for a nested class
+ // If (pSearch->pEncloser), we've found a nested class
+ if ((IsNested != FALSE) == (pSearch->GetEncloser() != NULL))
+ {
+ if (m_bCaseInsensitive)
+ g_IBCLogger.LogClassHashTableAccess(dac_cast<PTR_EEClassHashEntry>(pSearch->GetData()));
+ else
+ g_IBCLogger.LogClassHashTableAccess(pSearch);
+
+ return pSearch;
+ }
+ }
+
+ pSearch = BaseFindNextEntryByHash(pContext);
+ }
+
+ return NULL;
+}
+
+EEClassHashEntry_t *EEClassHashTable::FindNextNestedClass(NameHandle* pName, PTR_VOID *pData, LookupContext *pContext)
+{
+ CONTRACTL
+ {
+ if (m_bCaseInsensitive) THROWS; else NOTHROW;
+ if (m_bCaseInsensitive) GC_TRIGGERS; else GC_NOTRIGGER;
+ if (m_bCaseInsensitive) INJECT_FAULT(COMPlusThrowOM();); else FORBID_FAULT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_pModule);
+ _ASSERTE(pName);
+
+ if (pName->GetNameSpace())
+ {
+ return FindNextNestedClass(pName->GetNameSpace(), pName->GetName(), pData, pContext);
+ }
+ else {
+#ifndef DACCESS_COMPILE
+ return FindNextNestedClass(pName->GetName(), pData, pContext); // this won't support dac--
+ // it allocates a new namespace string
+#else
+ DacNotImpl();
+ return NULL;
+#endif
+ }
+}
+
+
+EEClassHashEntry_t *EEClassHashTable::FindNextNestedClass(LPCUTF8 pszNamespace, LPCUTF8 pszClassName, PTR_VOID *pData, LookupContext *pContext)
+{
+ CONTRACTL
+ {
+ if (m_bCaseInsensitive) THROWS; else NOTHROW;
+ if (m_bCaseInsensitive) GC_TRIGGERS; else GC_NOTRIGGER;
+ if (m_bCaseInsensitive) INJECT_FAULT(COMPlusThrowOM();); else FORBID_FAULT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_pModule);
+
+ PTR_EEClassHashEntry pSearch = BaseFindNextEntryByHash(pContext);
+
+ while (pSearch)
+ {
+ LPCUTF8 rgKey[] = { pszNamespace, pszClassName };
+
+ if (pSearch->GetEncloser() && CompareKeys(pSearch, rgKey))
+ {
+ *pData = pSearch->GetData();
+ return pSearch;
+ }
+
+ pSearch = BaseFindNextEntryByHash(pContext);
+ }
+
+ return NULL;
+}
+
+const UTF8 Utf8Empty[] = { 0 };
+
+EEClassHashEntry_t *EEClassHashTable::FindNextNestedClass(LPCUTF8 pszFullyQualifiedName, PTR_VOID *pData, LookupContext *pContext)
+{
+ CONTRACTL
+ {
+ if (m_bCaseInsensitive) THROWS; else NOTHROW;
+ if (m_bCaseInsensitive) GC_TRIGGERS; else GC_NOTRIGGER;
+ if (m_bCaseInsensitive) INJECT_FAULT(COMPlusThrowOM();); else FORBID_FAULT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_pModule);
+
+ CQuickBytes szNamespace;
+
+ LPCUTF8 pNamespace = Utf8Empty;
+ LPCUTF8 p;
+
+ if ((p = ns::FindSep(pszFullyQualifiedName)) != NULL)
+ {
+ SIZE_T d = p - pszFullyQualifiedName;
+
+ FAULT_NOT_FATAL();
+ pNamespace = szNamespace.SetStringNoThrow(pszFullyQualifiedName, d);
+
+ if (NULL == pNamespace)
+ {
+ return NULL;
+ }
+
+ p++;
+ }
+ else
+ {
+ p = pszFullyQualifiedName;
+ }
+
+ return FindNextNestedClass(pNamespace, p, pData, pContext);
+}
+
+
+EEClassHashEntry_t * EEClassHashTable::GetValue(LPCUTF8 pszFullyQualifiedName, PTR_VOID *pData, BOOL IsNested, LookupContext *pContext)
+{
+ CONTRACTL
+ {
+ if (m_bCaseInsensitive) THROWS; else NOTHROW;
+ if (m_bCaseInsensitive) GC_TRIGGERS; else GC_NOTRIGGER;
+ if (m_bCaseInsensitive) INJECT_FAULT(COMPlusThrowOM();); else FORBID_FAULT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_pModule);
+
+ CQuickBytes szNamespace;
+
+ LPCUTF8 pNamespace = Utf8Empty;
+
+ LPCUTF8 p = ns::FindSep(pszFullyQualifiedName);
+
+ if (p != NULL)
+ {
+ SIZE_T d = p - pszFullyQualifiedName;
+
+ FAULT_NOT_FATAL();
+ pNamespace = szNamespace.SetStringNoThrow(pszFullyQualifiedName, d);
+
+ if (NULL == pNamespace)
+ {
+ return NULL;
+ }
+
+ p++;
+ }
+ else
+ {
+ p = pszFullyQualifiedName;
+ }
+
+ EEClassHashEntry_t * ret = GetValue(pNamespace, p, pData, IsNested, pContext);
+
+ return ret;
+}
+
+
+EEClassHashEntry_t * EEClassHashTable::GetValue(LPCUTF8 pszNamespace, LPCUTF8 pszClassName, PTR_VOID *pData, BOOL IsNested, LookupContext *pContext)
+{
+ CONTRACTL
+ {
+ if (m_bCaseInsensitive) THROWS; else NOTHROW;
+ if (m_bCaseInsensitive) GC_TRIGGERS; else GC_NOTRIGGER;
+ if (m_bCaseInsensitive) INJECT_FAULT(COMPlusThrowOM();); else FORBID_FAULT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+
+ _ASSERTE(m_pModule);
+ EEClassHashEntry_t *pItem = FindItem(pszNamespace, pszClassName, IsNested, pContext);
+ if (pItem)
+ *pData = pItem->GetData();
+
+ return pItem;
+}
+
+
+EEClassHashEntry_t * EEClassHashTable::GetValue(NameHandle* pName, PTR_VOID *pData, BOOL IsNested, LookupContext *pContext)
+{
+ CONTRACTL
+ {
+ // for DAC builds m_bCaseInsensitive should be false
+ if (m_bCaseInsensitive) THROWS; else NOTHROW;
+ if (m_bCaseInsensitive) GC_TRIGGERS; else GC_NOTRIGGER;
+ if (m_bCaseInsensitive) INJECT_FAULT(COMPlusThrowOM();); else FORBID_FAULT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+
+ _ASSERTE(pName);
+ _ASSERTE(m_pModule);
+ if(pName->GetNameSpace() == NULL) {
+ return GetValue(pName->GetName(), pData, IsNested, pContext);
+ }
+ else {
+ return GetValue(pName->GetNameSpace(), pName->GetName(), pData, IsNested, pContext);
+ }
+}
+
+class ConstructKeyCallbackCompare : public EEClassHashTable::ConstructKeyCallback
+{
+public:
+ virtual void UseKeys(__in_ecount(2) LPUTF8 *pKey1)
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ SUPPORTS_DAC;
+
+ bReturn = (
+ ((pKey1[0] == pKey2[0]) && (pKey1[1] == pKey2[1])) ||
+ ((strcmp (pKey1[0], pKey2[0]) == 0) && (strcmp (pKey1[1], pKey2[1]) == 0))
+ );
+ }
+
+ LPCUTF8 *pKey2;
+ BOOL bReturn;
+};
+
+// Returns TRUE if two keys are the same string.
+//
+// The case-insensitive table can throw OOM out of this function. The case-sensitive table can't.
+BOOL EEClassHashTable::CompareKeys(PTR_EEClassHashEntry pEntry, LPCUTF8 * pKey2)
+{
+ CONTRACTL
+ {
+ if (m_bCaseInsensitive) THROWS; else NOTHROW;
+ if (m_bCaseInsensitive) GC_TRIGGERS; else GC_NOTRIGGER;
+ if (m_bCaseInsensitive) INJECT_FAULT(COMPlusThrowOM();); else FORBID_FAULT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+
+ _ASSERTE(m_pModule);
+ _ASSERTE (pEntry);
+ _ASSERTE (pKey2);
+
+ ConstructKeyCallbackCompare cback;
+
+ cback.pKey2 = pKey2;
+
+ {
+ CONTRACT_VIOLATION(ThrowsViolation);
+ ConstructKeyFromData(pEntry, &cback);
+ }
+
+ return cback.bReturn;
+}
+
+
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+void EEClassHashTable::Save(DataImage *image, CorProfileData *profileData)
+{
+ STANDARD_VM_CONTRACT;
+
+ // See comment on PrepareExportedTypesForSaving for what's going on here.
+ if (m_pModule->IsManifest())
+ PrepareExportedTypesForSaving(image);
+
+ // The base class handles most of the saving logic (it controls the layout of the hash memory). It will
+ // call us back for some per-entry related details (should we save this entry?, is this entry hot? etc.).
+ // See the methods immediately following this one.
+ BaseSave(image, profileData);
+}
+
+// Should a particular entry be persisted into the ngen image?
+bool EEClassHashTable::ShouldSave(DataImage *pImage, EEClassHashEntry_t *pEntry)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // We always save all entries.
+ return true;
+}
+
+// Does profile data indicate that this entry is hot (likely to be read at runtime)?
+bool EEClassHashTable::IsHotEntry(EEClassHashEntry_t *pEntry, CorProfileData *pProfileData)
+{
+ STANDARD_VM_CONTRACT;
+
+ PTR_VOID datum = pEntry->GetData();
+ mdToken token;
+
+ if (m_bCaseInsensitive)
+ datum = PTR_EEClassHashEntry((TADDR)datum)->GetData();
+
+ if ((((ULONG_PTR) datum) & EECLASSHASH_TYPEHANDLE_DISCR) == 0)
+ {
+ TypeHandle t = TypeHandle::FromPtr(datum);
+ _ASSERTE(!t.IsNull());
+ MethodTable *pMT = t.GetMethodTable();
+ if (pMT == NULL)
+ return false;
+
+ token = pMT->GetCl();
+ }
+ else if (((ULONG_PTR)datum & EECLASSHASH_MDEXPORT_DISCR) == 0)
+ {
+ DWORD dwDatum = (DWORD)(DWORD_PTR)(datum);
+ token = ((dwDatum >> 1) & 0x00ffffff) | mdtTypeDef;
+ }
+ else
+ return false;
+
+ if (pProfileData->GetTypeProfilingFlagsOfToken(token) & (1 << ReadClassHashTable))
+ return true;
+
+ return false;
+}
+
+// This is our chance to fixup our hash entries before they're committed to the ngen image. Return true if the
+// entry will remain immutable at runtime (this might allow the entry to be stored in a read-only, shareable
+// portion of the image).
+bool EEClassHashTable::SaveEntry(DataImage *pImage, CorProfileData *pProfileData, EEClassHashEntry_t *pOldEntry, EEClassHashEntry_t *pNewEntry, EntryMappingTable *pMap)
+{
+ STANDARD_VM_CONTRACT;
+
+ // If we're a nested class we have a reference to the entry of our enclosing class. But this reference
+ // will have been broken by the saving process (the base class re-creates and re-orders all entries in
+ // order to optimize them for ngen images). So we read the old encloser address from the old version of
+ // our entry and, if there is one, we use the supplied map to transform that address into the new version.
+ // We can then write the updated address back into our encloser field in the new copy of the entry.
+ EEClassHashEntry_t *pOldEncloser = pOldEntry->GetEncloser();
+ if (pOldEncloser)
+ pNewEntry->SetEncloser(pMap->GetNewEntryAddress(pOldEncloser));
+
+ // We have to do something similar with our TypeHandle references (because they're stored as relative
+ // pointers which became invalid when the entry was moved). The following sequence is a no-op if the data
+ // field contains a token rather than a TypeHandle.
+ PTR_VOID oldData = pOldEntry->GetData();
+ pNewEntry->SetData(oldData);
+
+#ifdef _DEBUG
+ if (!pImage->IsStored(pNewEntry->DebugKey[0]))
+ pImage->StoreStructure(pNewEntry->DebugKey[0], (ULONG)(strlen(pNewEntry->DebugKey[0])+1), DataImage::ITEM_DEBUG);
+ if (!pImage->IsStored(pNewEntry->DebugKey[1]))
+ pImage->StoreStructure(pNewEntry->DebugKey[1], (ULONG)(strlen(pNewEntry->DebugKey[1])+1), DataImage::ITEM_DEBUG);
+#endif // _DEBUG
+
+ // The entry is immutable at runtime if it's encoded as a TypeHandle. If it's a token then it might be
+ // bashed into a TypeHandle later on.
+ return ((TADDR)pNewEntry->GetData() & EECLASSHASH_TYPEHANDLE_DISCR) == 0;
+}
+
+// The manifest module contains exported type entries in the EEClassHashTables. During ngen, these entries get
+// populated to the corresponding TypeHandle. However, at runtime, it is not guaranteed for the module
+// containing the exported type to be loaded. Hence, we cannot use the TypeHandle. Instead, we bash the
+// TypeHandle back to the export token.
+void EEClassHashTable::PrepareExportedTypesForSaving(DataImage *image)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(GetAppDomain()->IsCompilationDomain());
+ PRECONDITION(m_pModule->IsManifest());
+ }
+ CONTRACTL_END
+
+ IMDInternalImport *pImport = m_pModule->GetMDImport();
+
+ HENUMInternalHolder phEnum(pImport);
+ phEnum.EnumInit(mdtExportedType, mdTokenNil);
+ mdToken mdExportedType;
+
+ for (int i = 0; pImport->EnumNext(&phEnum, &mdExportedType); i++)
+ {
+ mdTypeDef typeDef;
+ LPCSTR pszNameSpace, pszName;
+ mdToken mdImpl;
+ DWORD dwFlags;
+ if (FAILED(pImport->GetExportedTypeProps(
+ mdExportedType,
+ &pszNameSpace,
+ &pszName,
+ &mdImpl,
+ &typeDef,
+ &dwFlags)))
+ {
+ THROW_BAD_FORMAT(BFA_NOFIND_EXPORTED_TYPE, m_pModule);
+ continue;
+ }
+
+ CorTokenType tokenType = (CorTokenType) TypeFromToken(mdImpl);
+ CONSISTENCY_CHECK(tokenType == mdtFile ||
+ tokenType == mdtAssemblyRef ||
+ tokenType == mdtExportedType);
+
+ // If mdImpl is a file or an assembly, than it points to the location
+ // of the type. If mdImpl is another exported type, then it is the enclosing
+ // exported type for current (nested) type.
+ BOOL isNested = (tokenType == mdtExportedType);
+
+ // ilasm does not consistently set the dwFlags to correctly reflect nesting
+ //CONSISTENCY_CHECK(!isNested || IsTdNested(dwFlags));
+
+ EEClassHashEntry_t * pEntry = NULL;
+
+ if (!isNested)
+ {
+ pEntry = FindItem(pszNameSpace, pszName, FALSE/*nested*/, NULL);
+ }
+ else
+ {
+ PTR_VOID data;
+ LookupContext sContext;
+
+ // This following line finds the 1st "nested" class EEClassHashEntry_t.
+ if ((pEntry = FindItem(pszNameSpace, pszName, TRUE/*nested*/, &sContext)) != NULL)
+ {
+ // The (immediate) encloser of EEClassHashEntry_t (i.e. pEntry) is stored in pEntry->pEncloser.
+ // It needs to be a type of "mdImpl".
+ // "CompareNestedEntryWithExportedType" will check if "pEntry->pEncloser" is a type of "mdImpl",
+ // as well as walking up the enclosing chain.
+ _ASSERTE (TypeFromToken(mdImpl) == mdtExportedType);
+ while ((!m_pModule->GetClassLoader()->CompareNestedEntryWithExportedType(pImport,
+ mdImpl,
+ this,
+ pEntry->GetEncloser())) &&
+ (pEntry = FindNextNestedClass(pszNameSpace, pszName, &data, &sContext)) != NULL);
+ }
+ }
+
+ if (!pEntry) {
+ THROW_BAD_FORMAT(BFA_NOFIND_EXPORTED_TYPE, m_pModule);
+ continue;
+ }
+
+ if (((ULONG_PTR)(pEntry->GetData())) & EECLASSHASH_TYPEHANDLE_DISCR)
+ continue;
+
+ TypeHandle th = TypeHandle::FromPtr(pEntry->GetData());
+
+#ifdef _DEBUG
+ MethodTable * pMT = th.GetMethodTable();
+ _ASSERTE(tokenType != mdtFile || pMT->GetModule()->GetModuleRef() == mdImpl);
+ // "typeDef" is just a hint for unsigned assemblies, and ILASM sets it to 0
+ // Hence, we need to relax this assert.
+ _ASSERTE(pMT->GetCl() == typeDef || typeDef == mdTokenNil);
+#endif
+
+ if (image->CanEagerBindToTypeHandle(th) && image->CanHardBindToZapModule(th.GetLoaderModule()))
+ continue;
+
+ // Bash the TypeHandle back to the export token
+ pEntry->SetData(EEClassHashTable::CompressClassDef(mdExportedType));
+ }
+}
+
+void EEClassHashTable::Fixup(DataImage *pImage)
+{
+ STANDARD_VM_CONTRACT;
+
+ // The base class does all the main fixup work. We're called back at FixupEntry below for each entry so we
+ // can fixup additional pointers.
+ BaseFixup(pImage);
+}
+
+void EEClassHashTable::FixupEntry(DataImage *pImage, EEClassHashEntry_t *pEntry, void *pFixupBase, DWORD cbFixupOffset)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Cross-entry references require special fixup. Fortunately they know how to do this themselves.
+ pEntry->m_pEncloser.Fixup(pImage, this);
+
+#ifdef _DEBUG
+ pImage->FixupPointerField(pFixupBase, cbFixupOffset + offsetof(EEClassHashEntry_t, DebugKey[0]));
+ pImage->FixupPointerField(pFixupBase, cbFixupOffset + offsetof(EEClassHashEntry_t, DebugKey[1]));
+#endif // _DEBUG
+
+ // Case insensitive tables and normal hash entries pointing to TypeHandles contain relative pointers.
+ // These don't require runtime fixups (because relative pointers remain constant even in the presence of
+ // base relocations) but we still have to register a fixup of type IMAGE_REL_BASED_RELPTR. This does the
+ // work necessary to update the value of the relative pointer once the ngen image is finally layed out
+ // (the layout process can change the relationship between this field and the target it points to, so we
+ // don't know the final delta until the image is just about to be written out).
+ if (m_bCaseInsensitive || ((TADDR)pEntry->GetData() & EECLASSHASH_TYPEHANDLE_DISCR) == 0)
+ {
+ pImage->FixupField(pFixupBase,
+ cbFixupOffset + offsetof(EEClassHashEntry_t, m_Data),
+ pEntry->GetData(),
+ 0,
+ IMAGE_REL_BASED_RELPTR);
+ }
+}
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+/*===========================MakeCaseInsensitiveTable===========================
+**Action: Creates a case-insensitive lookup table for class names. We create a
+** full path (namespace & class name) in lowercase and then use that as the
+** key in our table. The hash datum is a pointer to the EEClassHashEntry in this
+** table.
+**
+!! You MUST have already acquired the appropriate lock before calling this.!!
+**
+**Returns:The newly allocated and completed hashtable.
+==============================================================================*/
+
+class ConstructKeyCallbackCaseInsensitive : public EEClassHashTable::ConstructKeyCallback
+{
+public:
+ virtual void UseKeys(__in_ecount(2) LPUTF8 *key)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ //Build the cannonical name (convert it to lowercase).
+ //Key[0] is the namespace, Key[1] is class name.
+
+ pLoader->CreateCanonicallyCasedKey(key[0], key[1], ppszLowerNameSpace, ppszLowerClsName);
+ }
+
+ ClassLoader *pLoader;
+ LPUTF8 *ppszLowerNameSpace;
+ LPUTF8 *ppszLowerClsName;
+
+};
+
+EEClassHashTable *EEClassHashTable::MakeCaseInsensitiveTable(Module *pModule, AllocMemTracker *pamTracker)
+{
+ EEClassHashEntry_t *pTempEntry;
+ LPUTF8 pszLowerClsName = NULL;
+ LPUTF8 pszLowerNameSpace = NULL;
+
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+
+ PRECONDITION(!FORBIDGC_LOADER_USE_ENABLED());
+ }
+ CONTRACTL_END;
+
+
+
+ _ASSERTE(m_pModule);
+ _ASSERTE (pModule == m_pModule);
+
+ // Allocate the table and verify that we actually got one.
+ EEClassHashTable * pCaseInsTable = EEClassHashTable::Create(pModule,
+ max(BaseGetElementCount() / 2, 11),
+ TRUE /* bCaseInsensitive */,
+ pamTracker);
+
+ // Walk all of the buckets and insert them into our new case insensitive table
+ BaseIterator sIter;
+ BaseInitIterator(&sIter);
+ while ((pTempEntry = sIter.Next()) != NULL)
+ {
+ ConstructKeyCallbackCaseInsensitive cback;
+
+ cback.pLoader = pModule->GetClassLoader();
+ cback.ppszLowerNameSpace = &pszLowerNameSpace;
+ cback.ppszLowerClsName = &pszLowerClsName;
+ ConstructKeyFromData(pTempEntry, &cback);
+
+ //Add the newly created name to our hash table. The hash datum is a pointer
+ //to the entry associated with that name in this hashtable.
+ pCaseInsTable->InsertValue(pszLowerNameSpace, pszLowerClsName, (PTR_VOID)pTempEntry, pTempEntry->GetEncloser(), pamTracker);
+ }
+
+ return pCaseInsTable;
+}
+#endif // !DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+void EEClassHashTable::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+
+ // Defer to the base class to do the bulk of this work. It calls EnumMemoryRegionsForEntry below for each
+ // entry in case we want to enumerate anything extra.
+ BaseEnumMemoryRegions(flags);
+}
+
+void EEClassHashTable::EnumMemoryRegionsForEntry(EEClassHashEntry_t *pEntry, CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+
+ // Nothing more to enumerate (the base class handles enumeration of the memory for the entry itself).
+}
+#endif // DACCESS_COMPILE
diff --git a/src/vm/classhash.h b/src/vm/classhash.h
new file mode 100644
index 0000000000..ae415d9fe9
--- /dev/null
+++ b/src/vm/classhash.h
@@ -0,0 +1,149 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//
+// Hash table associated with each module that records for all types defined in that module the mapping
+// between type name and token (or TypeHandle).
+//
+
+#ifndef __CLASS_HASH_INCLUDED
+#define __CLASS_HASH_INCLUDED
+
+#include "ngenhash.h"
+
+// The type of each entry in the hash.
+typedef DPTR(struct EEClassHashEntry) PTR_EEClassHashEntry;
+class EEClassHashTable;
+typedef struct EEClassHashEntry
+{
+ friend class EEClassHashTable;
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+#ifdef BINDER
+ friend class MdilModule;
+#endif
+
+#ifdef _DEBUG
+ PTR_CUTF8 DebugKey[2]; // Name of the type
+#endif // _DEBUG
+
+ // Accessors for encloser (pointer to hash entry of enclosing type when this entry describes a nested
+ // type). You need to use the accessors since the reference is not encoded as a simple pointer anymore.
+ PTR_EEClassHashEntry GetEncloser();
+ void SetEncloser(EEClassHashEntry *pEncloser) DAC_EMPTY();
+
+ // Bit masks for flags in the data field. <NICE>Ideally we'd abstract this encoding but that's too much
+ // code churn right now.</NICE>
+ #define EECLASSHASH_TYPEHANDLE_DISCR ((ULONG_PTR)(UINT)0x00000001)
+ #define EECLASSHASH_MDEXPORT_DISCR ((ULONG_PTR)(UINT)0x80000000)
+ #define EECLASSHASH_ALREADYSEEN ((ULONG_PTR)(UINT)0x40000000)
+
+ // Accessors for the data field (TypeHandle or a token with EECLASSHASH_TYPEHANDLE_DISCR set and possibly
+ // some of the other flag bits above). The type handle is also encoded as a non-regular pointer, so use
+ // this accessor.
+ PTR_VOID GetData();
+ void SetData(PTR_VOID data) DAC_EMPTY();
+
+private:
+ PTR_VOID m_Data; // Either the token (if EECLASSHASH_TYPEHANDLE_DISCR), or the type handle encoded
+ // as a relative pointer
+
+ NgenHashEntryRef<EEClassHashTable, EEClassHashEntry, 4> m_pEncloser; // If this entry is a for a nested
+ // class, this field stores a
+ // reference to the enclosing type
+ // (which must be in this same
+ // hash). The NgenHashEntryRef<>
+ // is required to abstract some
+ // complex logic required while
+ // ngen'ing such references.
+} EEClassHashEntry_t;
+
+// The hash type itself. All common logic is provided by the NgenHashTable templated base class. See
+// NgenHash.h for details.
+typedef DPTR(class EEClassHashTable) PTR_EEClassHashTable;
+class EEClassHashTable : public NgenHashTable<EEClassHashTable, EEClassHashEntry, 4>
+{
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+
+public:
+ // The LookupContext type we export to track GetValue/FindNextNestedClass enumerations is simply a rename
+ // of the base classes' hash value enumerator.
+ typedef NgenHashTable<EEClassHashTable, EEClassHashEntry, 4>::LookupContext LookupContext;
+
+ static EEClassHashTable *Create(Module *pModule, DWORD dwNumBuckets, BOOL bCaseInsensitive, AllocMemTracker *pamTracker);
+
+ //NOTICE: look at InsertValue() in ClassLoader, that may be the function you want to use. Use this only
+ // when you are sure you want to insert the value in 'this' table. This function does not deal
+ // with case (as often the class loader has to)
+#ifndef BINDER
+ EEClassHashEntry_t *InsertValue(LPCUTF8 pszNamespace, LPCUTF8 pszClassName, PTR_VOID Data, EEClassHashEntry_t *pEncloser, AllocMemTracker *pamTracker);
+ EEClassHashEntry_t *InsertValueIfNotFound(LPCUTF8 pszNamespace, LPCUTF8 pszClassName, PTR_VOID *pData, EEClassHashEntry_t *pEncloser, BOOL IsNested, BOOL *pbFound, AllocMemTracker *pamTracker);
+ EEClassHashEntry_t *InsertValueUsingPreallocatedEntry(EEClassHashEntry_t *pStorageForNewEntry, LPCUTF8 pszNamespace, LPCUTF8 pszClassName, PTR_VOID Data, EEClassHashEntry_t *pEncloser);
+ EEClassHashEntry_t *GetValue(LPCUTF8 pszNamespace, LPCUTF8 pszClassName, PTR_VOID *pData, BOOL IsNested, LookupContext *pContext);
+ EEClassHashEntry_t *GetValue(LPCUTF8 pszFullyQualifiedName, PTR_VOID *pData, BOOL IsNested, LookupContext *pContext);
+ EEClassHashEntry_t *GetValue(NameHandle* pName, PTR_VOID *pData, BOOL IsNested, LookupContext *pContext);
+ EEClassHashEntry_t *AllocNewEntry(AllocMemTracker *pamTracker);
+ EEClassHashTable *MakeCaseInsensitiveTable(Module *pModule, AllocMemTracker *pamTracker);
+ EEClassHashEntry_t *FindItem(LPCUTF8 pszNamespace, LPCUTF8 pszClassName, BOOL IsNested, LookupContext *pContext);
+ EEClassHashEntry_t *FindNextNestedClass(NameHandle* pName, PTR_VOID *pData, LookupContext *pContext);
+ EEClassHashEntry_t *FindNextNestedClass(LPCUTF8 pszNamespace, LPCUTF8 pszClassName, PTR_VOID *pData, LookupContext *pContext);
+ EEClassHashEntry_t *FindNextNestedClass(LPCUTF8 pszFullyQualifiedName, PTR_VOID *pData, LookupContext *pContext);
+#endif // BINDER
+
+ BOOL CompareKeys(PTR_EEClassHashEntry pEntry, LPCUTF8 * pKey2);
+
+ static DWORD Hash(LPCUTF8 pszNamespace, LPCUTF8 pszClassName);
+
+ class ConstructKeyCallback
+ {
+ public:
+ virtual void UseKeys(__in_ecount(2) LPUTF8 *Key) = 0;
+ };
+
+ static PTR_VOID CompressClassDef(mdToken cl /* either a TypeDef or ExportedType*/);
+#ifndef BINDER
+ bool UncompressModuleAndClassDef(PTR_VOID Data, Loader::LoadFlag loadFlag,
+ Module **ppModule, mdTypeDef *pCL,
+ mdExportedType *pmdFoundExportedType);
+ VOID UncompressModuleAndNonExportClassDef(PTR_VOID Data, Module **ppModule,
+ mdTypeDef *pCL);
+ static mdToken UncompressModuleAndClassDef(PTR_VOID Data);
+#endif // !BINDER
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+ void EnumMemoryRegionsForEntry(EEClassHashEntry_t *pEntry, CLRDataEnumMemoryFlags flags);
+#endif
+
+#if defined(FEATURE_PREJIT) && !defined(DACCESS_COMPILE)
+ void Save(DataImage *pImage, CorProfileData *pProfileData);
+ void Fixup(DataImage *pImage);
+
+private:
+ friend class NgenHashTable<EEClassHashTable, EEClassHashEntry, 4>;
+
+ void PrepareExportedTypesForSaving(DataImage *image);
+
+ bool ShouldSave(DataImage *pImage, EEClassHashEntry_t *pEntry);
+ bool IsHotEntry(EEClassHashEntry_t *pEntry, CorProfileData *pProfileData);
+ bool SaveEntry(DataImage *pImage, CorProfileData *pProfileData, EEClassHashEntry_t *pOldEntry, EEClassHashEntry_t *pNewEntry, EntryMappingTable *pMap);
+ void FixupEntry(DataImage *pImage, EEClassHashEntry_t *pEntry, void *pFixupBase, DWORD cbFixupOffset);
+#endif // FEATURE_PREJIT && !DACCESS_COMPILE
+
+private:
+#ifndef DACCESS_COMPILE
+ EEClassHashTable(Module *pModule, LoaderHeap *pHeap, DWORD cInitialBuckets) :
+ NgenHashTable<EEClassHashTable, EEClassHashEntry, 4>(pModule, pHeap, cInitialBuckets) {}
+#endif
+
+ VOID ConstructKeyFromData(PTR_EEClassHashEntry pEntry, ConstructKeyCallback * pCallback);
+
+ BOOL m_bCaseInsensitive; // Default is true FALSE unless we call MakeCaseInsensitiveTable
+};
+
+#endif // !__CLASS_HASH_INCLUDED
diff --git a/src/vm/classhash.inl b/src/vm/classhash.inl
new file mode 100644
index 0000000000..a5658c72eb
--- /dev/null
+++ b/src/vm/classhash.inl
@@ -0,0 +1,68 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//
+// Hash table associated with each module that records for all types defined in that module the mapping
+// between type name and token (or TypeHandle).
+//
+
+
+#ifndef CLASSHASH_INL
+#define CLASSHASH_INL
+
+// Low bit is discriminator between unresolved and resolved.
+// Low bit == 0: Resolved: data == TypeHandle
+// Low bit == 1: Unresolved: data encodes either a typeDef or exportTypeDef. Use bit 31 as discriminator.
+//
+// If not resolved, bit 31 (64-bit: yes, it's bit31, not the high bit!) is discriminator between regular typeDef and exportedType
+//
+// Bit31 == 0: mdTypeDef: 000t tttt tttt tttt tttt tttt tttt ttt1
+// Bit31 == 1: mdExportedType: 100e eeee eeee eeee eeee eeee eeee eee1
+//
+//
+
+/* static */
+inline PTR_VOID EEClassHashTable::CompressClassDef(mdToken cl)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(TypeFromToken(cl) == mdtTypeDef || TypeFromToken(cl) == mdtExportedType);
+
+ switch (TypeFromToken(cl))
+ {
+ case mdtTypeDef: return (PTR_VOID)( 0 | (((ULONG_PTR)cl & 0x00ffffff) << 1) | EECLASSHASH_TYPEHANDLE_DISCR);
+ case mdtExportedType: return (PTR_VOID)(EECLASSHASH_MDEXPORT_DISCR | (((ULONG_PTR)cl & 0x00ffffff) << 1) | EECLASSHASH_TYPEHANDLE_DISCR);
+ default:
+ _ASSERTE(!"Can't get here.");
+ return 0;
+ }
+}
+
+inline DWORD EEClassHashTable::Hash(LPCUTF8 pszNamespace, LPCUTF8 pszClassName)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+
+ DWORD dwHash = 5381;
+ DWORD dwChar;
+
+ while ((dwChar = *pszNamespace++) != 0)
+ dwHash = ((dwHash << 5) + dwHash) ^ dwChar;
+
+ while ((dwChar = *pszClassName++) != 0)
+ dwHash = ((dwHash << 5) + dwHash) ^ dwChar;
+
+ return dwHash;
+}
+
+#endif // CLASSHASH_INL
diff --git a/src/vm/classloadlevel.h b/src/vm/classloadlevel.h
new file mode 100644
index 0000000000..c54e192c2c
--- /dev/null
+++ b/src/vm/classloadlevel.h
@@ -0,0 +1,89 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// classloadlevel.h
+
+
+#ifndef _H_CLASSLOADLEVEL
+#define _H_CLASSLOADLEVEL
+
+// Class loading is split into phases in order to handle recursion
+// through field types and generic instantiations in the presence of
+// multiple threads and the possibility of load failures.
+//
+// This enum represents the level to which a class has been loaded.
+// (See GetLoadLevel() on TypeHandle, MethodTable and TypeDesc).
+//
+// CLASS_LOAD_BEGIN
+//
+// Placeholder level used before type has been created or located in ngen image
+//
+//
+// CLASS_LOAD_UNRESTOREDTYPEKEY
+//
+// Type lives in an ngen image and components of its type key need restoring:
+// for methodtables: generic arguments, EEClass pointer, Module pointer in EEClass
+// for typedescs: param type, template methodtable
+//
+//
+// CLASS_LOAD_UNRESTORED
+//
+// Type lives in an ngen image and contains fields that need restoring
+// (e.g. parent, interfaces, param type)
+//
+//
+// CLASS_LOAD_APPROXPARENTS
+//
+// Type has been created, or loaded from an ngen image and fields
+// have been restored, but some fields have been filled in with only
+// "approximate" information for generic type arguments. In
+// particular, the parent class is approximate, and interfaces are
+// generic (instantiation at formal type parameters). Other
+// information (vtable and dictionary) may be based on these
+// approximate type arguments.
+//
+//
+// CLASS_LOAD_EXACTPARENTS
+//
+// The generic arguments to parent class and interfaces are exact
+// types, and the whole hierarchy (parent and interfaces) is loaded
+// to this level. However, other dependent types (such as generic arguments)
+// may still be loaded at a lower level.
+//
+//
+// CLASS_DEPENDENCIES_LOADED
+//
+// The type is fully loaded, as are all dependents (hierarchy, generic args,
+// canonical MT, etc). For generic instantiations, the constraints
+// have not yet been verified.
+//
+//
+// CLASS_LOADED
+//
+// This is a "read-only" verification phase that changes no state other than
+// to flip the IsFullyLoaded() bit. We use this phase to do conformity
+// checks (which can't be done in an earlier phase) on the class in a
+// recursion-proof manner.
+// For eg, we check constraints on generic types, and access checks for
+// the type of (valuetype) fields.
+//
+
+enum ClassLoadLevel
+{
+ CLASS_LOAD_BEGIN,
+ CLASS_LOAD_UNRESTOREDTYPEKEY,
+ CLASS_LOAD_UNRESTORED,
+ CLASS_LOAD_APPROXPARENTS,
+ CLASS_LOAD_EXACTPARENTS,
+ CLASS_DEPENDENCIES_LOADED,
+ CLASS_LOADED,
+
+ CLASS_LOAD_LEVEL_FINAL = CLASS_LOADED,
+};
+
+
+extern const char * const classLoadLevelName[];
+
+#endif // _H_CLASSLOADLEVEL
diff --git a/src/vm/classnames.h b/src/vm/classnames.h
new file mode 100644
index 0000000000..1b06ea26e8
--- /dev/null
+++ b/src/vm/classnames.h
@@ -0,0 +1,167 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#ifndef __CLASSNAMES_H__
+#define __CLASSNAMES_H__
+
+#include "namespace.h"
+
+// These system class names are not assembly qualified.
+
+#define g_AppDomainClassName "System.AppDomain"
+#define g_ArgIteratorName "ArgIterator"
+#define g_ArrayClassName "System.Array"
+
+#define g_NullableName "Nullable`1"
+
+#define g_CollectionsEnumerableItfName "System.Collections.IEnumerable"
+#define g_CollectionsEnumeratorClassName "System.Collections.IEnumerator"
+#define g_CollectionsCollectionItfName "System.Collections.ICollection"
+#define g_CollectionsGenericCollectionItfName "System.Collections.Generic.ICollection`1"
+#define g_CollectionsGenericReadOnlyCollectionItfName "System.Collections.Generic.IReadOnlyCollection`1"
+
+#ifdef FEATURE_COMINTEROP
+#define g_ECMAKeyToken "B77A5C561934E089" // The ECMA key used by some framework assemblies: mscorlib, system, etc.
+#define g_FXKeyToken "b03f5f7f11d50a3a" // The FX key used by other framework assemblies: System.Web, System.Drawing, etc.
+#define g_CoreClrKeyToken "7CEC85D7BEA7798E" // The silverlight platform key used by CoreClr framework assemblies: mscorlib, system, etc
+#define g_SystemAsmName "System"
+#define g_DrawingAsmName "System.Drawing"
+#define g_ColorClassName "System.Drawing.Color"
+#define g_ColorTranslatorClassName "System.Drawing.ColorTranslator"
+#define g_SystemUriClassName "System.Uri"
+#define g_WinRTUriClassName "Windows.Foundation.Uri"
+#define g_WinRTUriClassNameW W("Windows.Foundation.Uri")
+#define g_WinRTIUriRCFactoryName "Windows.Foundation.IUriRuntimeClassFactory"
+#define g_INotifyCollectionChangedName "System.Collections.Specialized.INotifyCollectionChanged"
+#define g_NotifyCollectionChangedEventHandlerName "System.Collections.Specialized.NotifyCollectionChangedEventHandler"
+#define g_NotifyCollectionChangedEventArgsName "System.Collections.Specialized.NotifyCollectionChangedEventArgs"
+#define g_NotifyCollectionChangedEventArgsMarshalerName "System.Runtime.InteropServices.WindowsRuntime.NotifyCollectionChangedEventArgsMarshaler"
+#define g_WinRTNotifyCollectionChangedEventArgsNameW W("Windows.UI.Xaml.Interop.NotifyCollectionChangedEventArgs")
+#define g_INotifyPropertyChangedName "System.ComponentModel.INotifyPropertyChanged"
+#define g_PropertyChangedEventHandlerName "System.ComponentModel.PropertyChangedEventHandler"
+#define g_PropertyChangedEventArgsName "System.ComponentModel.PropertyChangedEventArgs"
+#define g_PropertyChangedEventArgsMarshalerName "System.Runtime.InteropServices.WindowsRuntime.PropertyChangedEventArgsMarshaler"
+#define g_WinRTPropertyChangedEventArgsNameW W("Windows.UI.Xaml.Data.PropertyChangedEventArgs")
+#define g_WinRTIIteratorClassName "Windows.Foundation.Collections.IIterator`1"
+#define g_WinRTIIteratorClassNameW W("Windows.Foundation.Collections.IIterator`1")
+#define g_ICommandName "System.Windows.Input.ICommand"
+#define g_ComObjectName "__ComObject"
+#define g_RuntimeClassName "RuntimeClass"
+#endif // FEATURE_COMINTEROP
+#ifdef FEATURE_REMOTING
+#define g_ContextBoundObjectClassName "System.ContextBoundObject"
+#endif
+
+#define g_DateClassName "System.DateTime"
+#define g_DateTimeOffsetClassName "System.DateTimeOffset"
+#define g_DecimalClassName "System.Decimal"
+#define g_DecimalName "Decimal"
+
+#ifdef FEATURE_COMINTEROP
+
+#define g_WindowsFoundationActivatableAttributeClassName "Windows.Foundation.Metadata.ActivatableAttribute"
+#define g_WindowsFoundationComposableAttributeClassName "Windows.Foundation.Metadata.ComposableAttribute"
+#define g_WindowsFoundationStaticAttributeClassName "Windows.Foundation.Metadata.StaticAttribute"
+#define g_WindowsFoundationDefaultClassName "Windows.Foundation.Metadata.DefaultAttribute"
+#define g_WindowsFoundationMarshalingBehaviorAttributeClassName "Windows.Foundation.Metadata.MarshalingBehaviorAttribute"
+#define g_WindowsFoundationGCPressureAttributeClassName "Windows.Foundation.Metadata.GCPressureAttribute"
+#endif // FEATURE_COMINTEROP
+
+#define g_EnumeratorToEnumClassName "System.Runtime.InteropServices.CustomMarshalers.EnumeratorToEnumVariantMarshaler"
+#define g_ExceptionClassName "System.Exception"
+#define g_ExecutionEngineExceptionClassName "System.ExecutionEngineException"
+
+#define g_MarshalByRefObjectClassName "System.MarshalByRefObject"
+
+#define g_ThreadStaticAttributeClassName "System.ThreadStaticAttribute"
+#define g_ContextStaticAttributeClassName "System.ContextStaticAttribute"
+#define g_StringFreezingAttributeClassName "System.Runtime.CompilerServices.StringFreezingAttribute"
+#define g_TypeIdentifierAttributeClassName "System.Runtime.InteropServices.TypeIdentifierAttribute"
+
+#define g_ObjectClassName "System.Object"
+#define g_ObjectName "Object"
+#define g_OutOfMemoryExceptionClassName "System.OutOfMemoryException"
+
+#define g_PermissionTokenFactoryName "System.Security.PermissionTokenFactory"
+#define g_PolicyExceptionClassName "System.Security.Policy.PolicyException"
+
+#define g_ReflectionClassName "System.RuntimeType"
+#define g_ReflectionConstructorName "System.Reflection.RuntimeConstructorInfo"
+#define g_ReflectionEventInfoName "System.Reflection.EventInfo"
+#define g_ReflectionEventName "System.Reflection.RuntimeEventInfo"
+#define g_ReflectionExpandoItfName "System.Runtime.InteropServices.Expando.IExpando"
+#define g_CMExpandoToDispatchExMarshaler "System.Runtime.InteropServices.CustomMarshalers.ExpandoToDispatchExMarshaler"
+#define g_CMExpandoViewOfDispatchEx "System.Runtime.InteropServices.CustomMarshalers.ExpandoViewOfDispatchEx"
+#define g_ReflectionFieldName "System.Reflection.RuntimeFieldInfo"
+#define g_ReflectionMemberInfoName "System.Reflection.MemberInfo"
+#define g_MethodBaseName "System.Reflection.MethodBase"
+#define g_ReflectionFieldInfoName "System.Reflection.FieldInfo"
+#define g_ReflectionPropertyInfoName "System.Reflection.PropertyInfo"
+#define g_ReflectionConstructorInfoName "System.Reflection.ConstructorInfo"
+#define g_ReflectionMethodInfoName "System.Reflection.MethodInfo"
+#define g_ReflectionMethodName "System.Reflection.RuntimeMethodInfo"
+#define g_ReflectionMethodInterfaceName "System.IRuntimeMethodInfo"
+#define g_ReflectionAssemblyName "System.Reflection.RuntimeAssembly"
+#define g_ReflectionModuleName "System.Reflection.RuntimeModule"
+#define g_ReflectionParamInfoName "System.Reflection.ParameterInfo"
+#define g_ReflectionParamName "System.Reflection.RuntimeParameterInfo"
+#define g_ReflectionPropInfoName "System.Reflection.RuntimePropertyInfo"
+#define g_ReflectionReflectItfName "System.Reflection.IReflect"
+#define g_RuntimeArgumentHandleName "RuntimeArgumentHandle"
+#define g_RuntimeFieldHandleClassName "System.RuntimeFieldHandle"
+#define g_RuntimeMethodHandleClassName "System.RuntimeMethodHandle"
+#define g_RuntimeMethodHandleInternalName "RuntimeMethodHandleInternal"
+#define g_RuntimeTypeHandleClassName "System.RuntimeTypeHandle"
+
+#define g_SecurityPermissionClassName "System.Security.Permissions.SecurityPermission"
+#define g_StackOverflowExceptionClassName "System.StackOverflowException"
+#define g_StringBufferClassName "System.Text.StringBuilder"
+#define g_StringBufferName "StringBuilder"
+#define g_StringClassName "System.String"
+#define g_StringName "String"
+#define g_SharedStaticsClassName "System.SharedStatics"
+
+#define g_ThreadClassName "System.Threading.Thread"
+#define g_TransparentProxyName "__TransparentProxy"
+#define g_TypeClassName "System.Type"
+
+#define g_VariantClassName "System.Variant"
+#define g_GuidClassName "System.Guid"
+
+#define g_CompilerServicesFixedAddressValueTypeAttribute "System.Runtime.CompilerServices.FixedAddressValueTypeAttribute"
+#define g_CompilerServicesUnsafeValueTypeAttribute "System.Runtime.CompilerServices.UnsafeValueTypeAttribute"
+#define g_UnmanagedFunctionPointerAttribute "System.Runtime.InteropServices.UnmanagedFunctionPointerAttribute"
+#define g_DefaultDllImportSearchPathsAttribute "System.Runtime.InteropServices.DefaultDllImportSearchPathsAttribute"
+
+#define g_CompilerServicesTypeDependencyAttribute "System.Runtime.CompilerServices.TypeDependencyAttribute"
+
+#define g_SecurityCriticalAttribute "System.Security.SecurityCriticalAttribute"
+#define g_SecurityTransparentAttribute "System.Security.SecurityTransparentAttribute"
+#ifndef FEATURE_CORECLR
+#define g_SecurityTreatAsSafeAttribute "System.Security.SecurityTreatAsSafeAttribute"
+#define g_SecurityRulesAttribute "System.Security.SecurityRulesAttribute"
+#endif //FEATURE_CORECLR
+
+#define g_SecuritySafeCriticalAttribute "System.Security.SecuritySafeCriticalAttribute"
+
+#if defined(FEATURE_APTCA) || defined(FEATURE_CORESYSTEM)
+#define g_SecurityAPTCA "System.Security.AllowPartiallyTrustedCallersAttribute"
+#define g_SecurityPartialTrustVisibilityLevel "System.Security.PartialTrustVisibilityLevel"
+#define g_PartialTrustVisibilityLevel "PartialTrustVisibilityLevel"
+#endif // defined(FEATURE_APTCA) || defined(FEATURE_CORESYSTEM)
+
+#define g_ReferenceAssemblyAttribute "System.Runtime.CompilerServices.ReferenceAssemblyAttribute"
+
+#define g_CriticalFinalizerObjectName "CriticalFinalizerObject"
+
+#ifdef FEATURE_SERIALIZATION
+#define g_StreamingContextName "StreamingContext"
+#endif
+
+#define g_AssemblySignatureKeyAttribute "System.Reflection.AssemblySignatureKeyAttribute"
+
+#endif //!__CLASSNAMES_H__
diff --git a/src/vm/clrex.cpp b/src/vm/clrex.cpp
new file mode 100644
index 0000000000..6fe52e53b3
--- /dev/null
+++ b/src/vm/clrex.cpp
@@ -0,0 +1,2873 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+// ---------------------------------------------------------------------------
+// Clrex.cpp
+// ---------------------------------------------------------------------------
+
+
+#include "common.h"
+#include "clrex.h"
+#include "field.h"
+#include "eetoprofinterfacewrapper.inl"
+#include "typestring.h"
+#include "sigformat.h"
+#include "eeconfig.h"
+#include "frameworkexceptionloader.h"
+
+#ifdef WIN64EXCEPTIONS
+#include "exceptionhandling.h"
+#endif // WIN64EXCEPTIONS
+
+#ifdef FEATURE_COMINTEROP
+#include "interoputil.inl"
+#endif // FEATURE_COMINTEROP
+
+// ---------------------------------------------------------------------------
+// CLRException methods
+// ---------------------------------------------------------------------------
+
+CLRException::~CLRException()
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_ANY;
+ if (GetThrowableHandle() == NULL)
+ {
+ CANNOT_TAKE_LOCK;
+ }
+ else
+ {
+ CAN_TAKE_LOCK; // because of DestroyHandle
+ }
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifndef CROSSGEN_COMPILE
+ OBJECTHANDLE throwableHandle = GetThrowableHandle();
+ if (throwableHandle != NULL)
+ {
+ STRESS_LOG1(LF_EH, LL_INFO100, "CLRException::~CLRException destroying throwable: obj = %x\n", GetThrowableHandle());
+ // clear the handle first, so if we SO on destroying it, we don't have a dangling reference
+ SetThrowableHandle(NULL);
+ DestroyHandle(throwableHandle);
+ }
+#endif
+}
+
+OBJECTREF CLRException::GetThrowable()
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ NOTHROW;
+ MODE_COOPERATIVE;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+#ifdef CROSSGEN_COMPILE
+ _ASSERTE(false);
+ return NULL;
+#else
+ OBJECTREF throwable = NULL;
+
+ if (NingenEnabled())
+ {
+ return NULL;
+ }
+
+ Thread *pThread = GetThread();
+
+ if (pThread->IsRudeAbortInitiated()) {
+ return GetPreallocatedRudeThreadAbortException();
+ }
+
+ if ((IsType(CLRLastThrownObjectException::GetType()) &&
+ pThread->LastThrownObject() == GetPreallocatedStackOverflowException()))
+ {
+ return GetPreallocatedStackOverflowException();
+ }
+
+ OBJECTHANDLE oh = GetThrowableHandle();
+ if (oh != NULL)
+ {
+ return ObjectFromHandle(oh);
+ }
+
+ Exception *pLastException = pThread->m_pCreatingThrowableForException;
+ if (pLastException != NULL)
+ {
+ if (IsSameInstanceType(pLastException))
+ {
+#if defined(_DEBUG)
+ static int BreakOnExceptionInGetThrowable = -1;
+ if (BreakOnExceptionInGetThrowable == -1)
+ {
+ BreakOnExceptionInGetThrowable = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnExceptionInGetThrowable);
+ }
+ if (BreakOnExceptionInGetThrowable)
+ {
+ _ASSERTE(!"BreakOnExceptionInGetThrowable");
+ }
+ LOG((LF_EH, LL_INFO100, "GetThrowable: Exception in GetThrowable, translating to a preallocated exception.\n"));
+#endif // _DEBUG
+ // Look at the type of GET_EXCEPTION() and see if it is OOM or SO.
+ if (IsPreallocatedOOMException())
+ {
+ throwable = GetPreallocatedOutOfMemoryException();
+ }
+ else if (GetInstanceType() == EEException::GetType() && GetHR() == COR_E_THREADABORTED)
+ {
+ // If creating a normal ThreadAbortException fails, due to OOM or StackOverflow,
+ // use a pre-created one.
+ // We do not won't to change a ThreadAbortException into OOM or StackOverflow, because
+ // it will cause recursive call when escalation policy is on:
+ // Creating ThreadAbortException fails, we throw OOM. Escalation leads to ThreadAbort.
+ // The cycle repeats.
+ throwable = GetPreallocatedThreadAbortException();
+ }
+ else
+ {
+ // I am not convinced if this case is actually a fatal error in the runtime.
+ // There have been two bugs in early 2006 (VSW 575647 and 575650) that came in here,
+ // both because of OOM and resulted in the ThreadAbort clause above being added since
+ // we were creating a ThreadAbort throwable that, due to OOM, got us on a path
+ // which came here. Both were valid execution paths and scenarios and not a fatal condition.
+ //
+ // I am tempted to return preallocated OOM from here but my concern is that it *may*
+ // result in fake OOM exceptions being thrown that could break valid scenarios.
+ //
+ // Hence, we return preallocated System.Exception instance. Lossy information is better
+ // than wrong or no information (or even FailFast).
+ _ASSERTE (!"Recursion in CLRException::GetThrowable");
+
+ // We didn't recognize it, so use the preallocated System.Exception instance.
+ STRESS_LOG0(LF_EH, LL_INFO100, "CLRException::GetThrowable: Recursion! Translating to preallocated System.Exception.\n");
+ throwable = GetPreallocatedBaseException();
+ }
+ }
+ }
+
+ GCPROTECT_BEGIN(throwable);
+
+ if (throwable == NULL)
+ {
+ // We need to disable the backout stack validation at this point since GetThrowable can
+ // take arbitrarily large amounts of stack for different exception types; however we know
+ // for a fact that we will never go through this code path if the exception is a stack
+ // overflow exception since we already handled that case above with the pre-allocated SO exception.
+ DISABLE_BACKOUT_STACK_VALIDATION;
+
+ class RestoreLastException
+ {
+ Thread *m_pThread;
+ Exception *m_pLastException;
+ public:
+ RestoreLastException(Thread *pThread, Exception *pException)
+ {
+ m_pThread = pThread;
+ m_pLastException = m_pThread->m_pCreatingThrowableForException;
+ m_pThread->m_pCreatingThrowableForException = pException;
+ }
+ ~RestoreLastException()
+ {
+ m_pThread->m_pCreatingThrowableForException = m_pLastException;
+ }
+ };
+
+ RestoreLastException restore(pThread, this);
+
+ EX_TRY
+ {
+ FAULT_NOT_FATAL();
+ throwable = CreateThrowable();
+ }
+ EX_CATCH
+ {
+ // This code used to be this line:
+ // throwable = GET_THROWABLE();
+ // GET_THROWABLE() expands to CLRException::GetThrowable(GET_EXCEPTION()),
+ // (where GET_EXCEPTION() refers to the exception that was thrown from
+ // CreateThrowable() and is being caught in this EX_TRY/EX_CATCH.)
+ // If that exception is the same as the one for which this GetThrowable()
+ // was called, we're in a recursive situation.
+ // Since the CreateThrowable() call should return a type from mscorlib,
+ // there really shouldn't be much opportunity for error. We could be
+ // out of memory, we could overflow the stack, or the runtime could
+ // be in a weird state(the thread could be aborted as well).
+ // Because we've seen a number of recursive death bugs here, just look
+ // explicitly for OOM and SO, and otherwise use ExecutionEngineException.
+
+ // Check whether the exception from CreateThrowable() is the same as the current
+ // exception. If not, call GetThrowable(), otherwise, settle for a
+ // preallocated exception.
+ Exception *pException = GET_EXCEPTION();
+
+ if (GetHR() == COR_E_THREADABORTED)
+ {
+ // If creating a normal ThreadAbortException fails, due to OOM or StackOverflow,
+ // use a pre-created one.
+ // We do not won't to change a ThreadAbortException into OOM or StackOverflow, because
+ // it will cause recursive call when escalation policy is on:
+ // Creating ThreadAbortException fails, we throw OOM. Escalation leads to ThreadAbort.
+ // The cycle repeats.
+ throwable = GetPreallocatedThreadAbortException();
+ }
+ else
+ {
+ throwable = CLRException::GetThrowableFromException(pException);
+ }
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ }
+
+ {
+ DISABLE_BACKOUT_STACK_VALIDATION;
+ if (throwable == NULL)
+ {
+ STRESS_LOG0(LF_EH, LL_INFO100, "CLRException::GetThrowable: We have failed to track exceptions accurately through the system.\n");
+
+ // There's no reason to believe that it is an OOM. A better choice is ExecutionEngineException.
+ // We have failed to track exceptions accurately through the system. However, it's arguably
+ // better to give the wrong exception object than it is to rip the process. So let's leave
+ // it as an Assert for now and convert it to ExecutionEngineException in the next release.
+
+ // SQL Stress is hitting the assert. We want to remove it, so that we can see if there are further errors
+ // masked by the assert.
+ // _ASSERTE(FALSE);
+
+ throwable = GetPreallocatedOutOfMemoryException();
+ }
+
+ EX_TRY
+ {
+ SetThrowableHandle(GetAppDomain()->CreateHandle(throwable));
+ if (m_innerException != NULL && !CLRException::IsPreallocatedExceptionObject(throwable))
+ {
+ // Only set inner exception if the exception is not preallocated.
+ FAULT_NOT_FATAL();
+
+ // If inner exception is not empty, then set the managed exception's
+ // _innerException field properly
+ OBJECTREF throwableValue = CLRException::GetThrowableFromException(m_innerException);
+ ((EXCEPTIONREF)throwable)->SetInnerException(throwableValue);
+ }
+
+ }
+ EX_CATCH
+ {
+ // No matter... we just don't get to cache the throwable.
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+ }
+
+ GCPROTECT_END();
+
+ return throwable;
+#endif
+}
+
+HRESULT CLRException::GetHR()
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_FAIL;
+
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+
+// Is it legal to switch to GCX_COOP in a SO_TOLERANT region?
+ GCX_COOP();
+ hr = GetExceptionHResult(GetThrowable());
+
+ END_SO_INTOLERANT_CODE;
+
+ return hr;
+}
+
+#ifdef FEATURE_COMINTEROP
+HRESULT CLRException::SetErrorInfo()
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ NOTHROW;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ IErrorInfo *pErrorInfo = NULL;
+
+ // Try to get IErrorInfo
+ EX_TRY
+ {
+ pErrorInfo = GetErrorInfo();
+ }
+ EX_CATCH
+ {
+ // Since there was an exception getting IErrorInfo get the exception's HR so
+ // that we return it back to the caller as the new exception.
+ hr = GET_EXCEPTION()->GetHR();
+ pErrorInfo = NULL;
+ LOG((LF_EH, LL_INFO100, "CLRException::SetErrorInfo: caught exception (hr = %08X) while trying to get IErrorInfo\n", hr));
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ if (!pErrorInfo)
+ {
+ // Return the HR to the caller if we dont get IErrorInfo - if the HR is E_NOINTERFACE, then
+ // there was no IErrorInfo available. If its anything else, it implies we failed to get the
+ // interface and have the HR corresponding to the exception we took while trying to get IErrorInfo.
+ return hr;
+ }
+ else
+ {
+ GCX_PREEMP();
+
+ EX_TRY
+ {
+ LeaveRuntimeHolderNoThrow lrh((size_t)::SetErrorInfo);
+ ::SetErrorInfo(0, pErrorInfo);
+ pErrorInfo->Release();
+
+ // Success in setting the ErrorInfo on the thread
+ hr = S_OK;
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ // Log the failure
+ LOG((LF_EH, LL_INFO100, "CLRException::SetErrorInfo: caught exception (hr = %08X) while trying to set IErrorInfo\n", hr));
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+ }
+
+ return hr;
+}
+
+IErrorInfo *CLRException::GetErrorInfo()
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ IErrorInfo *pErrorInfo = NULL;
+
+#ifndef CROSSGEN_COMPILE
+ // Attempt to get IErrorInfo only if COM is initialized.
+ // Not all codepaths expect to have it initialized (e.g. hosting APIs).
+ if (g_fComStarted)
+ {
+ // We probe here for SO since GetThrowable and GetComIPFromObjectRef are SO intolerant
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+
+ // Get errorinfo only when our SO probe succeeds
+ {
+ // Switch to coop mode since GetComIPFromObjectRef requires that
+ // and we could be here in any mode...
+ GCX_COOP();
+
+ OBJECTREF e = NULL;
+ GCPROTECT_BEGIN(e);
+
+ e = GetThrowable();
+
+ if (e != NULL)
+ {
+ pErrorInfo = (IErrorInfo *)GetComIPFromObjectRef(&e, IID_IErrorInfo);
+ }
+
+ GCPROTECT_END();
+ }
+
+ END_SO_INTOLERANT_CODE;
+ }
+ else
+ {
+ // Write to the log incase COM isnt initialized.
+ LOG((LF_EH, LL_INFO100, "CLRException::GetErrorInfo: exiting since COM is not initialized.\n"));
+ }
+#endif //CROSSGEN_COMPILE
+
+ // return the IErrorInfo we got...
+ return pErrorInfo;
+}
+#else // FEATURE_COMINTEROP
+IErrorInfo *CLRException::GetErrorInfo()
+{
+ LIMITED_METHOD_CONTRACT;
+ return NULL;
+}
+HRESULT CLRException::SetErrorInfo()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return S_OK;
+ }
+#endif // FEATURE_COMINTEROP
+
+void CLRException::GetMessage(SString &result)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef CROSSGEN_COMPILE
+ GCX_COOP();
+
+ OBJECTREF e = GetThrowable();
+ if (e != NULL)
+ {
+ _ASSERTE(IsException(e->GetMethodTable()));
+
+ GCPROTECT_BEGIN (e);
+
+ STRINGREF message = ((EXCEPTIONREF)e)->GetMessage();
+
+ if (!message)
+ result.Clear();
+ else
+ message->GetSString(result);
+
+ GCPROTECT_END ();
+ }
+#endif
+}
+
+#ifndef CROSSGEN_COMPILE
+OBJECTREF CLRException::GetPreallocatedBaseException()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(g_pPreallocatedBaseException != NULL);
+ return ObjectFromHandle(g_pPreallocatedBaseException);
+}
+
+OBJECTREF CLRException::GetPreallocatedOutOfMemoryException()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(g_pPreallocatedOutOfMemoryException != NULL);
+ return ObjectFromHandle(g_pPreallocatedOutOfMemoryException);
+}
+
+OBJECTREF CLRException::GetPreallocatedStackOverflowException()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(g_pPreallocatedStackOverflowException != NULL);
+ return ObjectFromHandle(g_pPreallocatedStackOverflowException);
+}
+
+OBJECTREF CLRException::GetPreallocatedExecutionEngineException()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(g_pPreallocatedExecutionEngineException != NULL);
+ return ObjectFromHandle(g_pPreallocatedExecutionEngineException);
+}
+
+OBJECTREF CLRException::GetPreallocatedRudeThreadAbortException()
+{
+ WRAPPER_NO_CONTRACT;
+ // When we are hosted, we pre-create this exception.
+ // This function should be called only if the exception has been created.
+ _ASSERTE(g_pPreallocatedRudeThreadAbortException);
+ return ObjectFromHandle(g_pPreallocatedRudeThreadAbortException);
+}
+
+OBJECTREF CLRException::GetPreallocatedThreadAbortException()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(g_pPreallocatedThreadAbortException);
+ return ObjectFromHandle(g_pPreallocatedThreadAbortException);
+}
+
+OBJECTHANDLE CLRException::GetPreallocatedOutOfMemoryExceptionHandle()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(g_pPreallocatedOutOfMemoryException != NULL);
+ return g_pPreallocatedOutOfMemoryException;
+}
+
+OBJECTHANDLE CLRException::GetPreallocatedThreadAbortExceptionHandle()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(g_pPreallocatedThreadAbortException != NULL);
+ return g_pPreallocatedThreadAbortException;
+}
+
+OBJECTHANDLE CLRException::GetPreallocatedRudeThreadAbortExceptionHandle()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(g_pPreallocatedRudeThreadAbortException != NULL);
+ return g_pPreallocatedRudeThreadAbortException;
+}
+
+OBJECTHANDLE CLRException::GetPreallocatedStackOverflowExceptionHandle()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(g_pPreallocatedStackOverflowException != NULL);
+ return g_pPreallocatedStackOverflowException;
+}
+
+OBJECTHANDLE CLRException::GetPreallocatedExecutionEngineExceptionHandle()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(g_pPreallocatedExecutionEngineException != NULL);
+ return g_pPreallocatedExecutionEngineException;
+}
+
+//
+// Returns TRUE if the given object ref is one of the preallocated exception objects.
+//
+BOOL CLRException::IsPreallocatedExceptionObject(OBJECTREF o)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if ((o == ObjectFromHandle(g_pPreallocatedBaseException)) ||
+ (o == ObjectFromHandle(g_pPreallocatedOutOfMemoryException)) ||
+ (o == ObjectFromHandle(g_pPreallocatedStackOverflowException)) ||
+ (o == ObjectFromHandle(g_pPreallocatedExecutionEngineException)))
+ {
+ return TRUE;
+ }
+
+ // The preallocated rude thread abort exception is not always preallocated.
+ if ((g_pPreallocatedRudeThreadAbortException != NULL) &&
+ (o == ObjectFromHandle(g_pPreallocatedRudeThreadAbortException)))
+ {
+ return TRUE;
+ }
+
+ // The preallocated rude thread abort exception is not always preallocated.
+ if ((g_pPreallocatedThreadAbortException != NULL) &&
+ (o == ObjectFromHandle(g_pPreallocatedThreadAbortException)))
+ {
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+//
+// Returns TRUE if the given object ref is one of the preallocated exception handles
+//
+BOOL CLRException::IsPreallocatedExceptionHandle(OBJECTHANDLE h)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if ((h == g_pPreallocatedBaseException) ||
+ (h == g_pPreallocatedOutOfMemoryException) ||
+ (h == g_pPreallocatedStackOverflowException) ||
+ (h == g_pPreallocatedExecutionEngineException) ||
+ (h == g_pPreallocatedThreadAbortException))
+ {
+ return TRUE;
+ }
+
+ // The preallocated rude thread abort exception is not always preallocated.
+ if ((g_pPreallocatedRudeThreadAbortException != NULL) &&
+ (h == g_pPreallocatedRudeThreadAbortException))
+ {
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+//
+// Returns a preallocated handle to match a preallocated exception object, or NULL if the object isn't one of the
+// preallocated exception objects.
+//
+OBJECTHANDLE CLRException::GetPreallocatedHandleForObject(OBJECTREF o)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (o == ObjectFromHandle(g_pPreallocatedBaseException))
+ {
+ return g_pPreallocatedBaseException;
+ }
+ else if (o == ObjectFromHandle(g_pPreallocatedOutOfMemoryException))
+ {
+ return g_pPreallocatedOutOfMemoryException;
+ }
+ else if (o == ObjectFromHandle(g_pPreallocatedStackOverflowException))
+ {
+ return g_pPreallocatedStackOverflowException;
+ }
+ else if (o == ObjectFromHandle(g_pPreallocatedExecutionEngineException))
+ {
+ return g_pPreallocatedExecutionEngineException;
+ }
+ else if (o == ObjectFromHandle(g_pPreallocatedThreadAbortException))
+ {
+ return g_pPreallocatedThreadAbortException;
+ }
+
+ // The preallocated rude thread abort exception is not always preallocated.
+ if ((g_pPreallocatedRudeThreadAbortException != NULL) &&
+ (o == ObjectFromHandle(g_pPreallocatedRudeThreadAbortException)))
+ {
+ return g_pPreallocatedRudeThreadAbortException;
+ }
+
+ return NULL;
+}
+
+// Prefer a new OOM exception if we can make one. If we cannot, then give back the pre-allocated one.
+OBJECTREF CLRException::GetBestOutOfMemoryException()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF retVal = NULL;
+
+ EX_TRY
+ {
+ FAULT_NOT_FATAL();
+
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+
+ EXCEPTIONREF pOutOfMemory = (EXCEPTIONREF)AllocateObject(g_pOutOfMemoryExceptionClass);
+ pOutOfMemory->SetHResult(COR_E_OUTOFMEMORY);
+ pOutOfMemory->SetXCode(EXCEPTION_COMPLUS);
+
+ retVal = pOutOfMemory;
+
+ END_SO_INTOLERANT_CODE;
+ }
+ EX_CATCH
+ {
+ retVal = GetPreallocatedOutOfMemoryException();
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ _ASSERTE(retVal != NULL);
+
+ return retVal;
+}
+
+
+// Works on non-CLRExceptions as well
+// static function
+OBJECTREF CLRException::GetThrowableFromException(Exception *pException)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ NOTHROW;
+ MODE_COOPERATIVE;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ Thread* pThread = GetThread();
+
+ // Can't have a throwable without a Thread.
+ _ASSERTE(pThread != NULL);
+
+ if (NULL == pException)
+ {
+ return pThread->LastThrownObject();
+ }
+
+ if (pException->IsType(CLRException::GetType()))
+ return ((CLRException*)pException)->GetThrowable();
+
+ if (pException->IsType(EEException::GetType()))
+ return ((EEException*)pException)->GetThrowable();
+
+ // Note: we are creating a throwable on the fly in this case - so
+ // multiple calls will return different objects. If we really need identity,
+ // we could store a throwable handle at the catch site, or store it
+ // on the thread object.
+
+ if (pException->IsType(SEHException::GetType()))
+ {
+ SEHException *pSEHException = (SEHException*)pException;
+
+ switch (pSEHException->m_exception.ExceptionCode)
+ {
+ case EXCEPTION_COMPLUS:
+ // Note: even though the switch compared the exception code,
+ // we have to call the official IsComPlusException() routine
+ // for side-by-side correctness. If that check fails, treat
+ // as an unrelated unmanaged exception.
+ if (IsComPlusException(&(pSEHException->m_exception)))
+ {
+ return pThread->LastThrownObject();
+ }
+ else
+ {
+ break;
+ }
+
+ case STATUS_NO_MEMORY:
+ return GetBestOutOfMemoryException();
+
+ case STATUS_STACK_OVERFLOW:
+ return GetPreallocatedStackOverflowException();
+ }
+
+ DWORD exceptionCode =
+ MapWin32FaultToCOMPlusException(&pSEHException->m_exception);
+
+ EEException e((RuntimeExceptionKind)exceptionCode);
+
+ OBJECTREF throwable = e.GetThrowable();
+ GCPROTECT_BEGIN (throwable);
+ EX_TRY
+ {
+ SCAN_IGNORE_FAULT;
+ if (throwable != NULL && !CLRException::IsPreallocatedExceptionObject(throwable))
+ {
+ _ASSERTE(IsException(throwable->GetMethodTable()));
+
+ // set the exception code
+ ((EXCEPTIONREF)throwable)->SetXCode(pSEHException->m_exception.ExceptionCode);
+ }
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+ GCPROTECT_END ();
+
+ return throwable;
+ }
+ else
+ {
+ // We can enter here for HRException, COMException, DelegatingException
+ // just to name a few.
+ OBJECTREF oRetVal = NULL;
+ GCPROTECT_BEGIN(oRetVal);
+ {
+ EX_TRY
+ {
+ HRESULT hr = pException->GetHR();
+
+ if (hr == E_OUTOFMEMORY || hr == HRESULT_FROM_WIN32(ERROR_NOT_ENOUGH_MEMORY))
+ {
+ oRetVal = GetBestOutOfMemoryException();
+ }
+ else if (hr == COR_E_STACKOVERFLOW)
+ {
+ oRetVal = GetPreallocatedStackOverflowException();
+ }
+ else
+ {
+ SafeComHolder<IErrorInfo> pErrInfo(pException->GetErrorInfo());
+
+ if (pErrInfo != NULL)
+ {
+ GetExceptionForHR(hr, pErrInfo, &oRetVal);
+ }
+ else
+ {
+ SString message;
+ pException->GetMessage(message);
+
+ EEMessageException e(hr, IDS_EE_GENERIC, message);
+
+ oRetVal = e.CreateThrowable();
+ }
+ }
+ }
+ EX_CATCH
+ {
+ // We have caught an exception trying to get a Throwable for the pException we
+ // were given. It is tempting to want to get the Throwable for the new
+ // exception, but that is dangerous, due to infinitely cascading
+ // exceptions, leading to a stack overflow.
+
+ // If we can see that the exception was OOM, return the preallocated OOM,
+ // if we can see that it is SO, return the preallocated SO,
+ // if we can see that it is some other managed exception, return that
+ // exception, otherwise return the preallocated System.Exception.
+ Exception *pNewException = GET_EXCEPTION();
+
+ if (pNewException->IsPreallocatedOOMException())
+ { // It definitely was an OOM
+ STRESS_LOG0(LF_EH, LL_INFO100, "CLRException::GetThrowableFromException: OOM creating throwable; getting pre-alloc'd OOM.\n");
+ if (oRetVal == NULL)
+ oRetVal = GetPreallocatedOutOfMemoryException();
+ }
+ else
+ if (pNewException->IsType(CLRLastThrownObjectException::GetType()) &&
+ (pThread->LastThrownObject() != NULL))
+ {
+ STRESS_LOG0(LF_EH, LL_INFO100, "CLRException::GetThrowableFromException: LTO Exception creating throwable; getting LastThrownObject.\n");
+ if (oRetVal == NULL)
+ oRetVal = pThread->LastThrownObject();
+ }
+ else
+ {
+ // We *could* come here if one of the calls in the EX_TRY above throws an exception (e.g. MissingMethodException if we attempt
+ // to invoke CreateThrowable for a type that does not have a default constructor) that is neither preallocated OOM nor a
+ // CLRLastThrownObject type.
+ //
+ // Like the comment says above, we cannot afford to get the throwable lest we hit SO. In such a case, runtime is not in a bad shape
+ // but we dont know what to return as well. A reasonable answer is to return something less appropriate than ripping down process
+ // or returning an incorrect exception (e.g. OOM) that could break execution paths.
+ //
+ // Hence, we return preallocated System.Exception instance.
+ if (oRetVal == NULL)
+ {
+ oRetVal = GetPreallocatedBaseException();
+ STRESS_LOG0(LF_EH, LL_INFO100, "CLRException::GetThrowableFromException: Unknown Exception creating throwable; getting preallocated System.Exception.\n");
+ }
+ }
+
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+ }
+ GCPROTECT_END();
+
+ return oRetVal;
+ }
+} // OBJECTREF CLRException::GetThrowableFromException()
+
+OBJECTREF CLRException::GetThrowableFromExceptionRecord(EXCEPTION_RECORD *pExceptionRecord)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (IsComPlusException(pExceptionRecord))
+ {
+ return GetThread()->LastThrownObject();
+ }
+
+ return NULL;
+}
+
+void CLRException::HandlerState::CleanupTry()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ if (m_pThread != NULL)
+ {
+ BEGIN_GETTHREAD_ALLOWED;
+ // If there is no frame to unwind, UnwindFrameChain call is just an expensive NOP
+ // due to setting up and tear down of EH records. So we avoid it if we can.
+ if (m_pThread->GetFrame() < m_pFrame)
+ UnwindFrameChain(m_pThread, m_pFrame);
+
+ if (m_fPreemptiveGCDisabled != m_pThread->PreemptiveGCDisabled())
+ {
+ if (m_fPreemptiveGCDisabled)
+ m_pThread->DisablePreemptiveGC();
+ else
+ m_pThread->EnablePreemptiveGC();
+ }
+ END_GETTHREAD_ALLOWED;
+ }
+
+ // Make sure to call the base class's CleanupTry so it can do whatever it wants to do.
+ Exception::HandlerState::CleanupTry();
+}
+
+void CLRException::HandlerState::SetupCatch(INDEBUG_COMMA(__in_z const char * szFile) int lineNum)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_CANNOT_TAKE_LOCK;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ bool fVMInitialized = g_fEEStarted?true:false;
+ Exception::HandlerState::SetupCatch(INDEBUG_COMMA(szFile) lineNum, fVMInitialized);
+
+ Thread *pThread = NULL;
+ DWORD exceptionCode = 0;
+
+ if (fVMInitialized)
+ {
+ pThread = GetThread();
+ exceptionCode = GetCurrentExceptionCode();
+ }
+
+ if (!DidCatchCxx())
+ {
+ if (IsSOExceptionCode(exceptionCode))
+ {
+ // Handle SO exception
+ //
+ // We should ensure that a valid Thread object exists before trying to set SO as the LTO.
+ if (pThread != NULL)
+ {
+ // We have a nasty issue with our EX_TRY/EX_CATCH. If EX_CATCH catches SEH exception,
+ // GET_THROWABLE uses CLRLastThrownObjectException instead, because we don't know
+ // what exception to use. But for SO, we can use preallocated SO exception.
+ GCX_COOP();
+ pThread->SetSOForLastThrownObject();
+ }
+
+ if (exceptionCode == STATUS_STACK_OVERFLOW)
+ {
+ // We have called HandleStackOverflow for soft SO through our vectored exception handler.
+ EEPolicy::HandleStackOverflow(SOD_UnmanagedFrameHandler, FRAME_TOP);
+ }
+ }
+ }
+
+#ifdef WIN64EXCEPTIONS
+ if (!DidCatchCxx())
+ {
+ // this must be done after the second pass has run, it does not
+ // reference anything on the stack, so it is safe to run in an
+ // SEH __except clause as well as a C++ catch clause.
+ ExceptionTracker::PopTrackers(this);
+ }
+#endif // WIN64EXCEPTIONS
+}
+
+#ifdef LOGGING
+void CLRException::HandlerState::SucceedCatch()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_CANNOT_TAKE_LOCK;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ LOG((LF_EH, LL_INFO100, "EX_CATCH catch succeeded (CLRException::HandlerState)\n"));
+
+ //
+ // At this point, we don't believe we need to do any unwinding of the ExInfo chain after an EX_CATCH. The chain
+ // is unwound by CPFH_UnwindFrames1() when it detects that the exception is being caught by an unmanaged
+ // catcher. EX_CATCH looks just like an unmanaged catcher now, so the unwind is already done by the time we get
+ // into the catch. That's different than before the big switch to the new exeption system, and it effects
+ // rethrows. Fixing rethrows is a work item for a little later. For now, we're simplying removing the unwind
+ // from here to avoid the extra unwind, which is harmless in many cases, but is very harmful when a managed
+ // filter throws an exception.
+ //
+ //
+
+ Exception::HandlerState::SucceedCatch();
+}
+#endif
+
+#endif // CROSSGEN_COMPILE
+
+// ---------------------------------------------------------------------------
+// EEException methods
+// ---------------------------------------------------------------------------
+
+//------------------------------------------------------------------------
+// Array that is used to retrieve the right exception for a given HRESULT.
+//------------------------------------------------------------------------
+
+#ifdef FEATURE_COMINTEROP
+
+struct WinRtHR_to_ExceptionKind_Map
+{
+ RuntimeExceptionKind reKind;
+ int cHRs;
+ const HRESULT *aHRs;
+};
+
+enum WinRtOnly_ExceptionKind {
+#define DEFINE_EXCEPTION_HR_WINRT_ONLY(ns, reKind, ...) kWinRtEx##reKind,
+#define DEFINE_EXCEPTION(ns, reKind, bHRformessage, ...)
+#define DEFINE_EXCEPTION_IN_OTHER_FX_ASSEMBLY(ns, reKind, assemblySimpleName, publicKeyToken, bHRformessage, ...)
+#include "rexcep.h"
+kWinRtExLastException
+};
+
+#define DEFINE_EXCEPTION_HR_WINRT_ONLY(ns, reKind, ...) static const HRESULT s_##reKind##WinRtOnlyHRs[] = { __VA_ARGS__ };
+#define DEFINE_EXCEPTION(ns, reKind, bHRformessage, ...)
+#define DEFINE_EXCEPTION_IN_OTHER_FX_ASSEMBLY(ns, reKind, assemblySimpleName, publicKeyToken, bHRformessage, ...)
+#include "rexcep.h"
+
+static const
+WinRtHR_to_ExceptionKind_Map gWinRtHR_to_ExceptionKind_Maps[] = {
+#define DEFINE_EXCEPTION_HR_WINRT_ONLY(ns, reKind, ...) { k##reKind, sizeof(s_##reKind##WinRtOnlyHRs) / sizeof(HRESULT), s_##reKind##WinRtOnlyHRs },
+#define DEFINE_EXCEPTION(ns, reKind, bHRformessage, ...)
+#define DEFINE_EXCEPTION_IN_OTHER_FX_ASSEMBLY(ns, reKind, assemblySimpleName, publicKeyToken, bHRformessage, ...)
+#include "rexcep.h"
+};
+
+#endif // FEATURE_COMINTEROP
+
+struct ExceptionHRInfo
+{
+ int cHRs;
+ const HRESULT *aHRs;
+};
+
+#define DEFINE_EXCEPTION(ns, reKind, bHRformessage, ...) static const HRESULT s_##reKind##HRs[] = { __VA_ARGS__ };
+#define DEFINE_EXCEPTION_HR_WINRT_ONLY(ns, reKind, ...)
+#define DEFINE_EXCEPTION_IN_OTHER_FX_ASSEMBLY(ns, reKind, assemblySimpleName, publicKeyToken, bHRformessage, ...) DEFINE_EXCEPTION(ns, reKind, bHRformessage, __VA_ARGS__)
+#include "rexcep.h"
+
+static const
+ExceptionHRInfo gExceptionHRInfos[] = {
+#define DEFINE_EXCEPTION(ns, reKind, bHRformessage, ...) {sizeof(s_##reKind##HRs) / sizeof(HRESULT), s_##reKind##HRs},
+#define DEFINE_EXCEPTION_HR_WINRT_ONLY(ns, reKind, ...)
+#define DEFINE_EXCEPTION_IN_OTHER_FX_ASSEMBLY(ns, reKind, assemblySimpleName, publicKeyToken, bHRformessage, ...) DEFINE_EXCEPTION(ns, reKind, bHRformessage, __VA_ARGS__)
+#include "rexcep.h"
+};
+
+
+static const
+bool gShouldDisplayHR[] =
+{
+#define DEFINE_EXCEPTION(ns, reKind, bHRformessage, ...) bHRformessage,
+#define DEFINE_EXCEPTION_HR_WINRT_ONLY(ns, reKind, ...)
+#define DEFINE_EXCEPTION_IN_OTHER_FX_ASSEMBLY(ns, reKind, assemblySimpleName, publicKeyToken, bHRformessage, ...) DEFINE_EXCEPTION(ns, reKind, bHRformessage, __VA_ARGS__)
+#include "rexcep.h"
+};
+
+
+/*static*/
+HRESULT EEException::GetHRFromKind(RuntimeExceptionKind reKind)
+{
+ LIMITED_METHOD_CONTRACT;
+ return gExceptionHRInfos[reKind].aHRs[0];
+}
+
+HRESULT EEException::GetHR()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return EEException::GetHRFromKind(m_kind);
+}
+
+IErrorInfo *EEException::GetErrorInfo()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return NULL;
+}
+
+BOOL EEException::GetThrowableMessage(SString &result)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Return a meaningful HR message, if there is one.
+
+ HRESULT hr = GetHR();
+
+ // If the hr is more interesting than the kind, use that
+ // for a message.
+
+ if (hr != S_OK
+ && hr != E_FAIL
+ && (gShouldDisplayHR[m_kind]
+ || gExceptionHRInfos[m_kind].aHRs[0] != hr))
+ {
+ // If it has only one HR, the original message should be good enough
+ _ASSERTE(gExceptionHRInfos[m_kind].cHRs > 1 ||
+ gExceptionHRInfos[m_kind].aHRs[0] != hr);
+
+ GenerateTopLevelHRExceptionMessage(hr, result);
+ return TRUE;
+ }
+
+ // No interesting hr - just keep the class default message.
+
+ return FALSE;
+}
+
+void EEException::GetMessage(SString &result)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // First look for a specialized message
+ if (GetThrowableMessage(result))
+ return;
+
+ // Otherwise, report the class's generic message
+ LPCUTF8 pszExceptionName = NULL;
+ if (m_kind <= kLastExceptionInMscorlib)
+ {
+ pszExceptionName = MscorlibBinder::GetExceptionName(m_kind);
+ result.SetUTF8(pszExceptionName);
+ }
+#ifndef CROSSGEN_COMPILE
+ else
+ {
+ FrameworkExceptionLoader::GetExceptionName(m_kind, result);
+ }
+#endif // CROSSGEN_COMPILE
+}
+
+OBJECTREF EEException::CreateThrowable()
+{
+#ifdef CROSSGEN_COMPILE
+ _ASSERTE(false);
+ return NULL;
+#else
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(g_pPreallocatedOutOfMemoryException != NULL);
+ static int allocCount = 0;
+
+ MethodTable *pMT = NULL;
+ if (m_kind <= kLastExceptionInMscorlib)
+ pMT = MscorlibBinder::GetException(m_kind);
+ else
+ {
+ pMT = FrameworkExceptionLoader::GetException(m_kind);
+ }
+
+ ThreadPreventAsyncHolder preventAsyncHolder(m_kind == kThreadAbortException);
+
+ OBJECTREF throwable = AllocateObject(pMT);
+ allocCount++;
+ GCPROTECT_BEGIN(throwable);
+
+ {
+ ThreadPreventAsyncHolder preventAbort(m_kind == kThreadAbortException ||
+ m_kind == kThreadInterruptedException);
+ CallDefaultConstructor(throwable);
+ }
+
+ HRESULT hr = GetHR();
+ ((EXCEPTIONREF)throwable)->SetHResult(hr);
+
+ SString message;
+ if (GetThrowableMessage(message))
+ {
+ // Set the message field. It is not safe doing this through the constructor
+ // since the string constructor for some exceptions add a prefix to the message
+ // which we don't want.
+ //
+ // We only want to replace whatever the default constructor put there, if we
+ // have something meaningful to add.
+
+ STRINGREF s = StringObject::NewString(message);
+ ((EXCEPTIONREF)throwable)->SetMessage(s);
+ }
+
+ GCPROTECT_END();
+
+ return throwable;
+#endif
+}
+
+RuntimeExceptionKind EEException::GetKindFromHR(HRESULT hr, bool fIsWinRtMode /*= false*/)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ #ifdef FEATURE_COMINTEROP
+ // If we are in WinRT mode, try to get a WinRT specific mapping first:
+ if (fIsWinRtMode)
+ {
+ for (int i = 0; i < kWinRtExLastException; i++)
+ {
+ for (int j = 0; j < gWinRtHR_to_ExceptionKind_Maps[i].cHRs; j++)
+ {
+ if (gWinRtHR_to_ExceptionKind_Maps[i].aHRs[j] == hr)
+ {
+ return gWinRtHR_to_ExceptionKind_Maps[i].reKind;
+ }
+ }
+ }
+ }
+ #endif // FEATURE_COMINTEROP
+
+ // Is not in WinRT mode OR did not find a WinRT specific mapping. Check normal mappings:
+
+ for (int i = 0; i < kLastException; i++)
+ {
+ for (int j = 0; j < gExceptionHRInfos[i].cHRs; j++)
+ {
+ if (gExceptionHRInfos[i].aHRs[j] == hr)
+ return (RuntimeExceptionKind) i;
+ }
+ }
+
+ return (fIsWinRtMode ? kException : kCOMException);
+
+} // RuntimeExceptionKind EEException::GetKindFromHR()
+
+BOOL EEException::GetResourceMessage(UINT iResourceID, SString &result,
+ const SString &arg1, const SString &arg2,
+ const SString &arg3, const SString &arg4,
+ const SString &arg5, const SString &arg6)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ BOOL ok;
+
+ StackSString temp;
+ ok = temp.LoadResource(CCompRC::Error, iResourceID);
+
+ if (ok)
+ result.FormatMessage(FORMAT_MESSAGE_FROM_STRING,
+ (LPCWSTR)temp, 0, 0, arg1, arg2, arg3, arg4, arg5, arg6);
+
+ return ok;
+}
+
+// ---------------------------------------------------------------------------
+// EEMessageException methods
+// ---------------------------------------------------------------------------
+
+HRESULT EEMessageException::GetHR()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return m_hr;
+}
+
+BOOL EEMessageException::GetThrowableMessage(SString &result)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_resID != 0 && GetResourceMessage(m_resID, result))
+ return TRUE;
+
+ return EEException::GetThrowableMessage(result);
+}
+
+BOOL EEMessageException::GetResourceMessage(UINT iResourceID, SString &result)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return EEException::GetResourceMessage(
+ iResourceID, result, m_arg1, m_arg2, m_arg3, m_arg4, m_arg5, m_arg6);
+}
+
+// ---------------------------------------------------------------------------
+// EEResourceException methods
+// ---------------------------------------------------------------------------
+
+void EEResourceException::GetMessage(SString &result)
+{
+ WRAPPER_NO_CONTRACT;
+ //
+ // Return a simplified message,
+ // since we don't want to call managed code here.
+ //
+
+ result.Printf("%s (message resource %s)",
+ MscorlibBinder::GetExceptionName(m_kind), m_resourceName.GetUnicode());
+}
+
+BOOL EEResourceException::GetThrowableMessage(SString &result)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+#ifndef CROSSGEN_COMPILE
+ STRINGREF message = NULL;
+ ResMgrGetString(m_resourceName, &message);
+
+ if (message != NULL)
+ {
+ message->GetSString(result);
+ return TRUE;
+ }
+#endif // CROSSGEN_COMPILE
+
+ return EEException::GetThrowableMessage(result);
+}
+
+// ---------------------------------------------------------------------------
+// EEComException methods
+// ---------------------------------------------------------------------------
+
+static HRESULT Undefer(EXCEPINFO *pExcepInfo)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (pExcepInfo->pfnDeferredFillIn)
+ {
+ EXCEPINFO FilledInExcepInfo;
+
+ HRESULT hr = pExcepInfo->pfnDeferredFillIn(&FilledInExcepInfo);
+ if (SUCCEEDED(hr))
+ {
+ // Free the strings in the original EXCEPINFO.
+ if (pExcepInfo->bstrDescription)
+ {
+ SysFreeString(pExcepInfo->bstrDescription);
+ pExcepInfo->bstrDescription = NULL;
+ }
+ if (pExcepInfo->bstrSource)
+ {
+ SysFreeString(pExcepInfo->bstrSource);
+ pExcepInfo->bstrSource = NULL;
+ }
+ if (pExcepInfo->bstrHelpFile)
+ {
+ SysFreeString(pExcepInfo->bstrHelpFile);
+ pExcepInfo->bstrHelpFile = NULL;
+ }
+
+ // Fill in the new data
+ *pExcepInfo = FilledInExcepInfo;
+ }
+ }
+
+ if (pExcepInfo->scode != 0)
+ return pExcepInfo->scode;
+ else
+ return (HRESULT)pExcepInfo->wCode;
+}
+
+// ---------------------------------------------------------------------------
+// EEFieldException is an EE exception subclass composed of a field
+// ---------------------------------------------------------------------------
+
+
+BOOL EEFieldException::GetThrowableMessage(SString &result)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_messageID == 0)
+ {
+ LPUTF8 szFullName;
+ LPCUTF8 szClassName, szMember;
+ szMember = m_pFD->GetName();
+ DefineFullyQualifiedNameForClass();
+ szClassName = GetFullyQualifiedNameForClass(m_pFD->GetApproxEnclosingMethodTable());
+ MAKE_FULLY_QUALIFIED_MEMBER_NAME(szFullName, NULL, szClassName, szMember, "");
+ result.SetUTF8(szFullName);
+
+ return TRUE;
+ }
+ else
+ {
+ _ASSERTE(m_pAccessingMD != NULL);
+
+ const TypeString::FormatFlags formatFlags = static_cast<TypeString::FormatFlags>(
+ TypeString::FormatNamespace |
+ TypeString::FormatAngleBrackets |
+ TypeString::FormatSignature);
+
+ StackSString caller;
+ TypeString::AppendMethod(caller,
+ m_pAccessingMD,
+ m_pAccessingMD->GetClassInstantiation(),
+ formatFlags);
+
+ StackSString field;
+ TypeString::AppendField(field,
+ m_pFD,
+ m_pFD->GetApproxEnclosingMethodTable()->GetInstantiation(),
+ formatFlags);
+
+ return GetResourceMessage(m_messageID, result, caller, field, m_additionalContext);
+ }
+}
+
+// ---------------------------------------------------------------------------
+// EEMethodException is an EE exception subclass composed of a field
+// ---------------------------------------------------------------------------
+
+BOOL EEMethodException::GetThrowableMessage(SString &result)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_messageID == 0)
+ {
+ LPUTF8 szFullName;
+ LPCUTF8 szClassName, szMember;
+ szMember = m_pMD->GetName();
+ DefineFullyQualifiedNameForClass();
+ szClassName = GetFullyQualifiedNameForClass(m_pMD->GetMethodTable());
+ //@todo GENERICS: exact instantiations?
+ MetaSig tmp(m_pMD);
+ SigFormat sigFormatter(tmp, szMember);
+ const char * sigStr = sigFormatter.GetCStringParmsOnly();
+ MAKE_FULLY_QUALIFIED_MEMBER_NAME(szFullName, NULL, szClassName, szMember, sigStr);
+ result.SetUTF8(szFullName);
+
+ return TRUE;
+ }
+ else
+ {
+ _ASSERTE(m_pAccessingMD != NULL);
+
+ const TypeString::FormatFlags formatFlags = static_cast<TypeString::FormatFlags>(
+ TypeString::FormatNamespace |
+ TypeString::FormatAngleBrackets |
+ TypeString::FormatSignature);
+
+ StackSString caller;
+ TypeString::AppendMethod(caller,
+ m_pAccessingMD,
+ m_pAccessingMD->GetClassInstantiation(),
+ formatFlags);
+
+ StackSString callee;
+ TypeString::AppendMethod(callee,
+ m_pMD,
+ m_pMD->GetClassInstantiation(),
+ formatFlags);
+
+ return GetResourceMessage(m_messageID, result, caller, callee, m_additionalContext);
+ }
+}
+
+BOOL EETypeAccessException::GetThrowableMessage(SString &result)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ const TypeString::FormatFlags formatFlags = static_cast<TypeString::FormatFlags>(
+ TypeString::FormatNamespace |
+ TypeString::FormatAngleBrackets |
+ TypeString::FormatSignature);
+ StackSString type;
+ TypeString::AppendType(type, TypeHandle(m_pMT), formatFlags);
+
+ if (m_messageID == 0)
+ {
+ result.Set(type);
+ return TRUE;
+ }
+ else
+ {
+ _ASSERTE(m_pAccessingMD != NULL);
+
+ StackSString caller;
+ TypeString::AppendMethod(caller,
+ m_pAccessingMD,
+ m_pAccessingMD->GetClassInstantiation(),
+ formatFlags);
+
+ return GetResourceMessage(m_messageID, result, caller, type, m_additionalContext);
+ }
+}
+
+// ---------------------------------------------------------------------------
+// EEArgumentException is an EE exception subclass representing a bad argument
+// ---------------------------------------------------------------------------
+
+typedef struct {
+ OBJECTREF pThrowable;
+ STRINGREF s1;
+ OBJECTREF pTmpThrowable;
+} ProtectArgsStruct;
+
+OBJECTREF EEArgumentException::CreateThrowable()
+{
+#ifdef CROSSGEN_COMPILE
+ _ASSERTE(false);
+ return NULL;
+#else
+
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(GetThread() != NULL);
+
+ ProtectArgsStruct prot;
+ memset(&prot, 0, sizeof(ProtectArgsStruct));
+ ResMgrGetString(m_resourceName, &prot.s1);
+ GCPROTECT_BEGIN(prot);
+
+ MethodTable *pMT = MscorlibBinder::GetException(m_kind);
+ prot.pThrowable = AllocateObject(pMT);
+
+ MethodDesc* pMD = MemberLoader::FindMethod(prot.pThrowable->GetTrueMethodTable(),
+ COR_CTOR_METHOD_NAME, &gsig_IM_Str_Str_RetVoid);
+
+ if (!pMD)
+ {
+ MAKE_WIDEPTR_FROMUTF8(wzMethodName, COR_CTOR_METHOD_NAME);
+ COMPlusThrowNonLocalized(kMissingMethodException, wzMethodName);
+ }
+
+ MethodDescCallSite exceptionCtor(pMD);
+
+ STRINGREF argName = StringObject::NewString(m_argumentName);
+
+ // Note that ArgumentException takes arguments to its constructor in a different order,
+ // for usability reasons. However it is inconsistent with our other exceptions.
+ if (m_kind == kArgumentException)
+ {
+ ARG_SLOT args1[] = {
+ ObjToArgSlot(prot.pThrowable),
+ ObjToArgSlot(prot.s1),
+ ObjToArgSlot(argName),
+ };
+ exceptionCtor.Call(args1);
+ }
+ else
+ {
+ ARG_SLOT args1[] = {
+ ObjToArgSlot(prot.pThrowable),
+ ObjToArgSlot(argName),
+ ObjToArgSlot(prot.s1),
+ };
+ exceptionCtor.Call(args1);
+ }
+
+ GCPROTECT_END(); //Prot
+
+ return prot.pThrowable;
+#endif
+}
+
+
+// ---------------------------------------------------------------------------
+// EETypeLoadException is an EE exception subclass representing a type loading
+// error
+// ---------------------------------------------------------------------------
+
+EETypeLoadException::EETypeLoadException(LPCUTF8 pszNameSpace, LPCUTF8 pTypeName,
+ LPCWSTR pAssemblyName, LPCUTF8 pMessageArg, UINT resIDWhy)
+ : EEException(kTypeLoadException),
+ m_pAssemblyName(pAssemblyName),
+ m_pMessageArg(SString::Utf8, pMessageArg),
+ m_resIDWhy(resIDWhy)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if(pszNameSpace)
+ {
+ SString sNameSpace(SString::Utf8, pszNameSpace);
+ SString sTypeName(SString::Utf8, pTypeName);
+ m_fullName.MakeFullNamespacePath(sNameSpace, sTypeName);
+ }
+ else if (pTypeName)
+ m_fullName.SetUTF8(pTypeName);
+ else {
+ WCHAR wszTemplate[30];
+ if (FAILED(UtilLoadStringRC(IDS_EE_NAME_UNKNOWN,
+ wszTemplate,
+ sizeof(wszTemplate)/sizeof(wszTemplate[0]),
+ FALSE)))
+ wszTemplate[0] = W('\0');
+ MAKE_UTF8PTR_FROMWIDE(name, wszTemplate);
+ m_fullName.SetUTF8(name);
+ }
+}
+
+EETypeLoadException::EETypeLoadException(LPCWSTR pFullName,
+ LPCWSTR pAssemblyName,
+ LPCUTF8 pMessageArg,
+ UINT resIDWhy)
+ : EEException(kTypeLoadException),
+ m_pAssemblyName(pAssemblyName),
+ m_pMessageArg(SString::Utf8, pMessageArg),
+ m_resIDWhy(resIDWhy)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MAKE_UTF8PTR_FROMWIDE(name, pFullName);
+ m_fullName.SetUTF8(name);
+}
+
+void EETypeLoadException::GetMessage(SString &result)
+{
+ WRAPPER_NO_CONTRACT;
+ GetResourceMessage(IDS_CLASSLOAD_GENERAL, result,
+ m_fullName, m_pAssemblyName, m_pMessageArg);
+}
+
+OBJECTREF EETypeLoadException::CreateThrowable()
+{
+#ifdef CROSSGEN_COMPILE
+ _ASSERTE(false);
+ return NULL;
+#else
+
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ COUNTER_ONLY(GetPerfCounters().m_Loading.cLoadFailures++);
+
+ MethodTable *pMT = MscorlibBinder::GetException(kTypeLoadException);
+
+ struct _gc {
+ OBJECTREF pNewException;
+ STRINGREF pNewAssemblyString;
+ STRINGREF pNewClassString;
+ STRINGREF pNewMessageArgString;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ GCPROTECT_BEGIN(gc);
+
+ gc.pNewClassString = StringObject::NewString(m_fullName);
+
+ if (!m_pMessageArg.IsEmpty())
+ gc.pNewMessageArgString = StringObject::NewString(m_pMessageArg);
+
+ if (!m_pAssemblyName.IsEmpty())
+ gc.pNewAssemblyString = StringObject::NewString(m_pAssemblyName);
+
+ gc.pNewException = AllocateObject(pMT);
+
+ MethodDesc* pMD = MemberLoader::FindMethod(gc.pNewException->GetTrueMethodTable(),
+ COR_CTOR_METHOD_NAME, &gsig_IM_Str_Str_Str_Int_RetVoid);
+
+ if (!pMD)
+ {
+ MAKE_WIDEPTR_FROMUTF8(wzMethodName, COR_CTOR_METHOD_NAME);
+ COMPlusThrowNonLocalized(kMissingMethodException, wzMethodName);
+ }
+
+ MethodDescCallSite exceptionCtor(pMD);
+
+ ARG_SLOT args[] = {
+ ObjToArgSlot(gc.pNewException),
+ ObjToArgSlot(gc.pNewClassString),
+ ObjToArgSlot(gc.pNewAssemblyString),
+ ObjToArgSlot(gc.pNewMessageArgString),
+ (ARG_SLOT)m_resIDWhy,
+ };
+
+ exceptionCtor.Call(args);
+
+ GCPROTECT_END();
+
+ return gc.pNewException;
+#endif
+}
+
+// ---------------------------------------------------------------------------
+// EEFileLoadException is an EE exception subclass representing a file loading
+// error
+// ---------------------------------------------------------------------------
+#ifdef FEATURE_FUSION
+EEFileLoadException::EEFileLoadException(const SString &name, HRESULT hr, IFusionBindLog *pFusionLog, Exception *pInnerException/* = NULL*/)
+#else
+EEFileLoadException::EEFileLoadException(const SString &name, HRESULT hr, void *pFusionLog, Exception *pInnerException/* = NULL*/)
+#endif
+ : EEException(GetFileLoadKind(hr)),
+ m_name(name),
+ m_pFusionLog(pFusionLog),
+ m_hr(hr)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // We don't want to wrap IsTransient() exceptions. The caller should really have checked this
+ // before invoking the ctor.
+ _ASSERTE(pInnerException == NULL || !(pInnerException->IsTransient()));
+ m_innerException = pInnerException ? pInnerException->DomainBoundClone() : NULL;
+
+ if (m_name.IsEmpty())
+ {
+ WCHAR wszTemplate[30];
+ if (FAILED(UtilLoadStringRC(IDS_EE_NAME_UNKNOWN,
+ wszTemplate,
+ sizeof(wszTemplate)/sizeof(wszTemplate[0]),
+ FALSE)))
+ {
+ wszTemplate[0] = W('\0');
+ }
+
+ m_name.Set(wszTemplate);
+ }
+#ifdef FEATURE_FUSION
+ if (m_pFusionLog != NULL)
+ m_pFusionLog->AddRef();
+#endif
+}
+
+
+EEFileLoadException::~EEFileLoadException()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+#ifdef FEATURE_FUSION
+ if (m_pFusionLog)
+ m_pFusionLog->Release();
+#endif
+}
+
+
+
+void EEFileLoadException::SetFileName(const SString &fileName, BOOL removePath)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ //<TODO>@TODO: security: It would be nice for debugging purposes if the
+ // user could have the full path, if the user has the right permission.</TODO>
+ if (removePath)
+ {
+ SString::CIterator i = fileName.End();
+
+ if (fileName.FindBack(i, W('\\')))
+ i++;
+
+ if (fileName.FindBack(i, W('/')))
+ i++;
+
+ m_name.Set(fileName, i, fileName.End());
+ }
+ else
+ m_name.Set(fileName);
+}
+
+void EEFileLoadException::GetMessage(SString &result)
+{
+ WRAPPER_NO_CONTRACT;
+
+ SString sHR;
+ GetHRMsg(m_hr, sHR);
+ GetResourceMessage(GetResourceIDForFileLoadExceptionHR(m_hr), result, m_name, sHR);
+}
+
+void EEFileLoadException::GetName(SString &result)
+{
+ WRAPPER_NO_CONTRACT;
+
+ result.Set(m_name);
+}
+
+/* static */
+RuntimeExceptionKind EEFileLoadException::GetFileLoadKind(HRESULT hr)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (Assembly::FileNotFound(hr))
+ return kFileNotFoundException;
+ else
+ {
+ // Make sure this matches the list in rexcep.h
+ if ((hr == COR_E_BADIMAGEFORMAT) ||
+ (hr == CLDB_E_FILE_OLDVER) ||
+ (hr == CLDB_E_INDEX_NOTFOUND) ||
+ (hr == CLDB_E_FILE_CORRUPT) ||
+ (hr == COR_E_NEWER_RUNTIME) ||
+ (hr == COR_E_ASSEMBLYEXPECTED) ||
+ (hr == HRESULT_FROM_WIN32(ERROR_BAD_EXE_FORMAT)) ||
+ (hr == HRESULT_FROM_WIN32(ERROR_EXE_MARKED_INVALID)) ||
+ (hr == CORSEC_E_INVALID_IMAGE_FORMAT) ||
+ (hr == HRESULT_FROM_WIN32(ERROR_NOACCESS)) ||
+ (hr == HRESULT_FROM_WIN32(ERROR_INVALID_ORDINAL)) ||
+ (hr == HRESULT_FROM_WIN32(ERROR_INVALID_DLL)) ||
+ (hr == HRESULT_FROM_WIN32(ERROR_FILE_CORRUPT)) ||
+ (hr == (HRESULT) IDS_CLASSLOAD_32BITCLRLOADING64BITASSEMBLY) ||
+ (hr == COR_E_LOADING_REFERENCE_ASSEMBLY) ||
+ (hr == META_E_BAD_SIGNATURE) ||
+ (hr == COR_E_LOADING_WINMD_REFERENCE_ASSEMBLY))
+ return kBadImageFormatException;
+ else
+ {
+ if ((hr == E_OUTOFMEMORY) || (hr == NTE_NO_MEMORY))
+ return kOutOfMemoryException;
+ else
+ return kFileLoadException;
+ }
+ }
+}
+
+OBJECTREF EEFileLoadException::CreateThrowable()
+{
+#ifdef CROSSGEN_COMPILE
+ _ASSERTE(false);
+ return NULL;
+#else
+
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ COUNTER_ONLY(GetPerfCounters().m_Loading.cLoadFailures++);
+
+ // Fetch any log info from the fusion log
+ SString logText;
+#ifdef FEATURE_FUSION
+ if (m_pFusionLog != NULL)
+ {
+ DWORD dwSize = 0;
+ HRESULT hr = m_pFusionLog->GetBindLog(0,0,NULL,&dwSize);
+ if (hr==HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER))
+ {
+ WCHAR *buffer = logText.OpenUnicodeBuffer(dwSize);
+ hr=m_pFusionLog->GetBindLog(0,0,buffer, &dwSize);
+ logText.CloseBuffer();
+ }
+ }
+#endif
+ struct _gc {
+ OBJECTREF pNewException;
+ STRINGREF pNewFileString;
+ STRINGREF pFusLogString;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ GCPROTECT_BEGIN(gc);
+
+ gc.pNewFileString = StringObject::NewString(m_name);
+ gc.pFusLogString = StringObject::NewString(logText);
+ gc.pNewException = AllocateObject(MscorlibBinder::GetException(m_kind));
+
+ MethodDesc* pMD = MemberLoader::FindMethod(gc.pNewException->GetTrueMethodTable(),
+ COR_CTOR_METHOD_NAME, &gsig_IM_Str_Str_Int_RetVoid);
+
+ if (!pMD)
+ {
+ MAKE_WIDEPTR_FROMUTF8(wzMethodName, COR_CTOR_METHOD_NAME);
+ COMPlusThrowNonLocalized(kMissingMethodException, wzMethodName);
+ }
+
+ MethodDescCallSite exceptionCtor(pMD);
+
+ ARG_SLOT args[] = {
+ ObjToArgSlot(gc.pNewException),
+ ObjToArgSlot(gc.pNewFileString),
+ ObjToArgSlot(gc.pFusLogString),
+ (ARG_SLOT) m_hr
+ };
+
+ exceptionCtor.Call(args);
+
+ GCPROTECT_END();
+
+ return gc.pNewException;
+#endif
+}
+
+
+/* static */
+BOOL EEFileLoadException::CheckType(Exception* ex)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // used as typeof(EEFileLoadException)
+ RuntimeExceptionKind kind = kException;
+ if (ex->IsType(EEException::GetType()))
+ kind=((EEException*)ex)->m_kind;
+
+
+ switch(kind)
+ {
+ case kFileLoadException:
+ case kFileNotFoundException:
+ case kBadImageFormatException:
+ return TRUE;
+ default:
+ return FALSE;
+ }
+};
+
+
+// <TODO>@todo: ideally we would use inner exceptions with these routines</TODO>
+
+/* static */
+#ifdef FEATURE_FUSION
+void DECLSPEC_NORETURN EEFileLoadException::Throw(AssemblySpec *pSpec, IFusionBindLog *pFusionLog, HRESULT hr, Exception *pInnerException/* = NULL*/)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (hr == COR_E_THREADABORTED)
+ COMPlusThrow(kThreadAbortException);
+ if (hr == E_OUTOFMEMORY)
+ COMPlusThrowOM();
+#ifdef FEATURE_COMINTEROP
+ if ((hr == RO_E_METADATA_NAME_NOT_FOUND) || (hr == CLR_E_BIND_TYPE_NOT_FOUND))
+ { // These error codes behave like FileNotFound, but are exposed as TypeLoadException
+ EX_THROW_WITH_INNER(EETypeLoadException, (pSpec->GetWinRtTypeNamespace(), pSpec->GetWinRtTypeClassName(), nullptr, nullptr, IDS_EE_WINRT_LOADFAILURE), pInnerException);
+ }
+#endif //FEATURE_COMINTEROP
+
+ StackSString name;
+ pSpec->GetFileOrDisplayName(0, name);
+ EX_THROW_WITH_INNER(EEFileLoadException, (name, hr, pFusionLog), pInnerException);
+}
+#endif //FEATURE_FUSION
+
+/* static */
+void DECLSPEC_NORETURN EEFileLoadException::Throw(AssemblySpec *pSpec, HRESULT hr, Exception *pInnerException/* = NULL*/)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (hr == COR_E_THREADABORTED)
+ COMPlusThrow(kThreadAbortException);
+ if (hr == E_OUTOFMEMORY)
+ COMPlusThrowOM();
+#ifdef FEATURE_COMINTEROP
+ if ((hr == RO_E_METADATA_NAME_NOT_FOUND) || (hr == CLR_E_BIND_TYPE_NOT_FOUND))
+ { // These error codes behave like FileNotFound, but are exposed as TypeLoadException
+ EX_THROW_WITH_INNER(EETypeLoadException, (pSpec->GetWinRtTypeNamespace(), pSpec->GetWinRtTypeClassName(), nullptr, nullptr, IDS_EE_WINRT_LOADFAILURE), pInnerException);
+ }
+#endif //FEATURE_COMINTEROP
+
+ StackSString name;
+ pSpec->GetFileOrDisplayName(0, name);
+ EX_THROW_WITH_INNER(EEFileLoadException, (name, hr), pInnerException);
+}
+
+/* static */
+void DECLSPEC_NORETURN EEFileLoadException::Throw(PEFile *pFile, HRESULT hr, Exception *pInnerException /* = NULL*/)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (hr == COR_E_THREADABORTED)
+ COMPlusThrow(kThreadAbortException);
+ if (hr == E_OUTOFMEMORY)
+ COMPlusThrowOM();
+
+ StackSString name;
+
+ if (pFile->IsAssembly())
+ ((PEAssembly*)pFile)->GetDisplayName(name);
+ else
+ name = StackSString(SString::Utf8, pFile->GetSimpleName());
+ EX_THROW_WITH_INNER(EEFileLoadException, (name, hr), pInnerException);
+
+}
+
+/* static */
+void DECLSPEC_NORETURN EEFileLoadException::Throw(LPCWSTR path, HRESULT hr, Exception *pInnerException/* = NULL*/)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (hr == COR_E_THREADABORTED)
+ COMPlusThrow(kThreadAbortException);
+ if (hr == E_OUTOFMEMORY)
+ COMPlusThrowOM();
+
+ // Remove path - location must be hidden for security purposes
+
+ LPCWSTR pStart = wcsrchr(path, '\\');
+ if (pStart != NULL)
+ pStart++;
+ else
+ pStart = path;
+ EX_THROW_WITH_INNER(EEFileLoadException, (StackSString(pStart), hr), pInnerException);
+}
+
+/* static */
+#ifdef FEATURE_FUSION
+void DECLSPEC_NORETURN EEFileLoadException::Throw(IAssembly *pIAssembly, IHostAssembly *pIHostAssembly, HRESULT hr, Exception *pInnerException/* = NULL*/)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (hr == COR_E_THREADABORTED)
+ COMPlusThrow(kThreadAbortException);
+ if (hr == E_OUTOFMEMORY || hr == HRESULT_FROM_WIN32(ERROR_NOT_ENOUGH_MEMORY))
+ COMPlusThrowOM();
+
+ StackSString name;
+
+ {
+ SafeComHolder<IAssemblyName> pName;
+
+ HRESULT newHr;
+
+ if (pIAssembly)
+ newHr = pIAssembly->GetAssemblyNameDef(&pName);
+ else
+ newHr = pIHostAssembly->GetAssemblyNameDef(&pName);
+
+ if (SUCCEEDED(newHr))
+ FusionBind::GetAssemblyNameDisplayName(pName, name, 0);
+ }
+
+ EX_THROW_WITH_INNER(EEFileLoadException, (name, hr), pInnerException);
+}
+#endif
+/* static */
+void DECLSPEC_NORETURN EEFileLoadException::Throw(PEAssembly *parent,
+ const void *memory, COUNT_T size, HRESULT hr, Exception *pInnerException/* = NULL*/)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (hr == COR_E_THREADABORTED)
+ COMPlusThrow(kThreadAbortException);
+ if (hr == E_OUTOFMEMORY)
+ COMPlusThrowOM();
+
+ StackSString name;
+ name.Printf("%d bytes loaded from ", size);
+
+ StackSString parentName;
+ parent->GetDisplayName(parentName);
+
+ name.Append(parentName);
+ EX_THROW_WITH_INNER(EEFileLoadException, (name, hr), pInnerException);
+}
+
+#ifndef CROSSGEN_COMPILE
+EECOMException::EECOMException(EXCEPINFO *pExcepInfo)
+ : EEException(GetKindFromHR(Undefer(pExcepInfo)))
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (pExcepInfo->scode != 0)
+ m_ED.hr = pExcepInfo->scode;
+ else
+ m_ED.hr = (HRESULT)pExcepInfo->wCode;
+
+ m_ED.bstrDescription = pExcepInfo->bstrDescription;
+ m_ED.bstrSource = pExcepInfo->bstrSource;
+ m_ED.bstrHelpFile = pExcepInfo->bstrHelpFile;
+ m_ED.dwHelpContext = pExcepInfo->dwHelpContext;
+ m_ED.guid = GUID_NULL;
+
+#ifdef FEATURE_COMINTEROP
+ m_ED.bstrReference = NULL;
+ m_ED.bstrRestrictedError = NULL;
+ m_ED.bstrCapabilitySid = NULL;
+ m_ED.pRestrictedErrorInfo = NULL;
+ m_ED.bHasLanguageRestrictedErrorInfo = FALSE;
+#endif
+
+ // Zero the EXCEPINFO.
+ memset(pExcepInfo, NULL, sizeof(EXCEPINFO));
+}
+
+EECOMException::EECOMException(ExceptionData *pData)
+ : EEException(GetKindFromHR(pData->hr))
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_ED = *pData;
+
+ // Zero the data.
+ ZeroMemory(pData, sizeof(ExceptionData));
+}
+
+EECOMException::EECOMException(
+ HRESULT hr,
+ IErrorInfo *pErrInfo,
+ bool fUseCOMException, // use System.Runtime.InteropServices.COMException as the default exception type (means as much as !IsWinRT)
+ IRestrictedErrorInfo* pRestrictedErrInfo,
+ BOOL bHasLanguageRestrictedErrInfo
+ COMMA_INDEBUG(BOOL bCheckInProcCCWTearOff))
+ : EEException(GetKindFromHR(hr, !fUseCOMException))
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef FEATURE_COMINTEROP
+ // Must use another path for managed IErrorInfos...
+ // note that this doesn't cover out-of-proc managed IErrorInfos.
+ _ASSERTE(!bCheckInProcCCWTearOff || !IsInProcCCWTearOff(pErrInfo));
+ _ASSERTE(pRestrictedErrInfo == NULL || !bCheckInProcCCWTearOff || !IsInProcCCWTearOff(pRestrictedErrInfo));
+#endif // FEATURE_COMINTEROP
+
+ m_ED.hr = hr;
+ m_ED.bstrDescription = NULL;
+ m_ED.bstrSource = NULL;
+ m_ED.bstrHelpFile = NULL;
+ m_ED.dwHelpContext = NULL;
+ m_ED.guid = GUID_NULL;
+
+#ifdef FEATURE_COMINTEROP
+ m_ED.bstrReference = NULL;
+ m_ED.bstrRestrictedError = NULL;
+ m_ED.bstrCapabilitySid = NULL;
+ m_ED.pRestrictedErrorInfo = NULL;
+ m_ED.bHasLanguageRestrictedErrorInfo = bHasLanguageRestrictedErrInfo;
+#endif
+
+ FillExceptionData(&m_ED, pErrInfo, pRestrictedErrInfo);
+}
+
+BOOL EECOMException::GetThrowableMessage(SString &result)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_COMINTEROP
+ if (m_ED.bstrDescription != NULL || m_ED.bstrRestrictedError != NULL)
+ {
+ // For cross language WinRT exceptions, general information will be available in the bstrDescription,
+ // which is populated from IErrorInfo::GetDescription and more specific information will be available
+ // in the bstrRestrictedError which comes from the IRestrictedErrorInfo. If both are available, we
+ // need to concatinate them to produce the final exception message.
+
+ result.Clear();
+
+ // If we have a restricted description, start our message with that
+ if (m_ED.bstrDescription != NULL)
+ {
+ SString generalInformation(m_ED.bstrDescription, SysStringLen(m_ED.bstrDescription));
+ result.Append(generalInformation);
+
+ // If we're also going to have a specific error message, append a newline to separate the two
+ if (m_ED.bstrRestrictedError != NULL)
+ {
+ result.Append(W("\r\n"));
+ }
+ }
+
+ // If we have additional error information, attach it to the end of the string
+ if (m_ED.bstrRestrictedError != NULL)
+ {
+ SString restrictedDescription(m_ED.bstrRestrictedError, SysStringLen(m_ED.bstrRestrictedError));
+ result.Append(restrictedDescription);
+ }
+ }
+#else // !FEATURE_COMINTEROP
+ if (m_ED.bstrDescription != NULL)
+ {
+ result.Set(m_ED.bstrDescription, SysStringLen(m_ED.bstrDescription));
+ }
+#endif // FEATURE_COMINTEROP
+ else
+ {
+ GenerateTopLevelHRExceptionMessage(GetHR(), result);
+ }
+
+ return TRUE;
+}
+
+EECOMException::~EECOMException()
+{
+ WRAPPER_NO_CONTRACT;
+
+ FreeExceptionData(&m_ED);
+}
+
+HRESULT EECOMException::GetHR()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_ED.hr;
+}
+
+OBJECTREF EECOMException::CreateThrowable()
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF throwable = NULL;
+ GCPROTECT_BEGIN(throwable);
+
+ // Note that this will pick up the message from GetThrowableMessage
+ throwable = EEException::CreateThrowable();
+
+ // Set the _helpURL field in the exception.
+ if (m_ED.bstrHelpFile)
+ {
+ // Create the help link from the help file and the help context.
+ STRINGREF helpStr = NULL;
+ if (m_ED.dwHelpContext != 0)
+ {
+ // We have a non 0 help context so use it to form the help link.
+ SString strMessage;
+ strMessage.Printf(W("%s#%d"), m_ED.bstrHelpFile, m_ED.dwHelpContext);
+ helpStr = StringObject::NewString(strMessage);
+ }
+ else
+ {
+ // The help context is 0 so we simply use the help file to from the help link.
+ helpStr = StringObject::NewString(m_ED.bstrHelpFile, SysStringLen(m_ED.bstrHelpFile));
+ }
+
+ ((EXCEPTIONREF)throwable)->SetHelpURL(helpStr);
+ }
+
+ // Set the Source field in the exception.
+ STRINGREF sourceStr = NULL;
+ if (m_ED.bstrSource)
+ {
+ sourceStr = StringObject::NewString(m_ED.bstrSource, SysStringLen(m_ED.bstrSource));
+ }
+ else
+ {
+ // for now set a null source
+ sourceStr = StringObject::GetEmptyString();
+ }
+ ((EXCEPTIONREF)throwable)->SetSource(sourceStr);
+
+#ifdef FEATURE_COMINTEROP
+ //
+ // Support for WinRT interface IRestrictedErrorInfo
+ //
+ if (m_ED.pRestrictedErrorInfo)
+ {
+
+ struct _gc {
+ STRINGREF RestrictedErrorRef;
+ STRINGREF ReferenceRef;
+ STRINGREF RestrictedCapabilitySidRef;
+ OBJECTREF RestrictedErrorInfoObjRef;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ EX_TRY
+ {
+ gc.RestrictedErrorRef = StringObject::NewString(
+ m_ED.bstrRestrictedError,
+ SysStringLen(m_ED.bstrRestrictedError)
+ );
+ gc.ReferenceRef = StringObject::NewString(
+ m_ED.bstrReference,
+ SysStringLen(m_ED.bstrReference)
+ );
+
+ gc.RestrictedCapabilitySidRef = StringObject::NewString(
+ m_ED.bstrCapabilitySid,
+ SysStringLen(m_ED.bstrCapabilitySid)
+ );
+
+ // Convert IRestrictedErrorInfo into a managed object - don't care whether it is a RCW/CCW
+ GetObjectRefFromComIP(
+ &gc.RestrictedErrorInfoObjRef,
+ m_ED.pRestrictedErrorInfo, // IUnknown *
+ NULL, // ClassMT
+ NULL, // ItfMT
+ ObjFromComIP::CLASS_IS_HINT | ObjFromComIP::IGNORE_WINRT_AND_SKIP_UNBOXING
+ );
+
+ //
+ // Call Exception.AddExceptionDataForRestrictedErrorInfo and put error information
+ // from IRestrictedErrorInfo on Exception.Data
+ //
+ MethodDescCallSite addExceptionDataForRestrictedErrorInfo(
+ METHOD__EXCEPTION__ADD_EXCEPTION_DATA_FOR_RESTRICTED_ERROR_INFO,
+ &throwable
+ );
+
+ ARG_SLOT Args[] =
+ {
+ ObjToArgSlot(throwable),
+ ObjToArgSlot(gc.RestrictedErrorRef),
+ ObjToArgSlot(gc.ReferenceRef),
+ ObjToArgSlot(gc.RestrictedCapabilitySidRef),
+ ObjToArgSlot(gc.RestrictedErrorInfoObjRef),
+ BoolToArgSlot(m_ED.bHasLanguageRestrictedErrorInfo)
+ };
+
+ addExceptionDataForRestrictedErrorInfo.Call(Args);
+
+ }
+ EX_CATCH
+ {
+ // IDictionary.Add may throw. Ignore all non terminal exceptions
+ }
+ EX_END_CATCH(RethrowTerminalExceptions)
+
+ GCPROTECT_END();
+ }
+#endif // FEATURE_COMINTEROP
+
+ GCPROTECT_END();
+
+
+ return throwable;
+}
+
+// ---------------------------------------------------------------------------
+// ObjrefException methods
+// ---------------------------------------------------------------------------
+
+ObjrefException::ObjrefException()
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+ObjrefException::ObjrefException(OBJECTREF throwable)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SetThrowableHandle(GetAppDomain()->CreateHandle(throwable));
+}
+
+// --------------------------------------------------------------------------------------------------------------------------------------
+// ObjrefException and CLRLastThrownObjectException are never set as inner exception for an internal CLR exception.
+// As a result, if we invoke DomainBoundClone against an exception, it will never reach these implementations.
+// If someone does set them as inner, it will trigger contract violation – which is valid and should be fixed by whoever
+// set them as inner since Exception::DomainBoundClone is implemented in utilcode that has to work outside the context of CLR and thus,
+// should never trigger GC. This is also why GC_TRIGGERS is not supported in utilcode (refer to its definition in contracts.h).
+// --------------------------------------------------------------------------------------------------------------------------------------
+Exception *ObjrefException::DomainBoundCloneHelper()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ GCX_COOP();
+ return new ObjrefException(GetThrowable());
+}
+
+// ---------------------------------------------------------------------------
+// CLRLastThrownException methods
+// ---------------------------------------------------------------------------
+
+CLRLastThrownObjectException::CLRLastThrownObjectException()
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+Exception *CLRLastThrownObjectException::CloneHelper()
+ {
+ WRAPPER_NO_CONTRACT;
+ GCX_COOP();
+ return new ObjrefException(GetThrowable());
+}
+
+// ---------------------------------------------------------------------------
+// See ObjrefException::DomainBoundCloneHelper comments.
+// ---------------------------------------------------------------------------
+Exception *CLRLastThrownObjectException::DomainBoundCloneHelper()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ GCX_COOP();
+ return new ObjrefException(GetThrowable());
+}
+
+OBJECTREF CLRLastThrownObjectException::CreateThrowable()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ DEBUG_STMT(Validate());
+
+ return GetThread()->LastThrownObject();
+} // OBJECTREF CLRLastThrownObjectException::CreateThrowable()
+
+#if defined(_DEBUG)
+CLRLastThrownObjectException* CLRLastThrownObjectException::Validate()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ DEBUG_ONLY;
+ }
+ CONTRACTL_END;
+
+ // Have to be in coop for GCPROTECT_BEGIN.
+ GCX_COOP();
+
+ OBJECTREF throwable = NULL;
+
+ GCPROTECT_BEGIN(throwable);
+
+ Thread * pThread = GetThread();
+ throwable = pThread->LastThrownObject();
+
+ DWORD dwCurrentExceptionCode = GetCurrentExceptionCode();
+
+#if HAS_TRACK_CXX_EXCEPTION_CODE_HACK
+ DWORD dwLastCxxSEHExceptionCode = pThread->m_LastCxxSEHExceptionCode;
+#endif // HAS_TRACK_CXX_EXCEPTION_CODE_HACK
+
+ if (dwCurrentExceptionCode == BOOTUP_EXCEPTION_COMPLUS)
+ {
+ // BOOTUP_EXCEPTION_COMPLUS can be thrown when a thread setup is failed due to reasons like
+ // runtime is being shutdown or managed code is no longer allowed to be executed.
+ //
+ // If this exception is caught in EX_CATCH, there may not be any LTO setup since:
+ //
+ // 1) It is setup against the thread that may not exist (due to thread setup failure)
+ // 2) This exception is raised using RaiseException (and not the managed raise implementation in RaiseTheExceptionInternalOnly)
+ // since managed code may not be allowed to be executed.
+ //
+ // However, code inside EX_CATCH is abstracted of this specificity of EH and thus, will attempt to fetch the throwble
+ // using GET_THROWABLE that will, in turn, use the GET_EXCEPTION macro to fetch the C++ exception type corresponding to the caught exception.
+ // Since BOOTUP_EXCEPTION_COMPLUS is a SEH exception, this (C++ exception) type will be CLRLastThrownObjectException.
+ //
+ // GET_EXCEPTION will call this method to validate the presence of LTO for a SEH exception caught by EX_CATCH. This is based upon the assumption
+ // that by the time a SEH exception is caught in EX_CATCH, the LTO is setup:
+ //
+ // A) For a managed exception thrown, this is done by RaiseTheExceptionInternalOnly.
+ // B) For a SEH exception that enters managed code from a PInvoke call, this is done by calling SafeSetThrowables after the corresponding throwable is created
+ // using CreateCOMPlusExceptionObject.
+
+ // Clearly, BOOTUP_EXCEPTION_COMPLUS can also be caught in EX_CATCH. However:
+ //
+ // (A) above is not applicable since the exception is raised using RaiseException.
+ //
+ // (B) scenario is interesting. On x86, CPFH_FirstPassHandler also invokes CLRVectoredExceptionHandler (for legacy purposes) that, in Phase3, will return EXCEPTION_CONTINUE_SEARCH for
+ // BOOTUP_EXCEPTION_COMPLUS. This will result in CPFH_FirstPassHandler to simply return from the x86 personality routine without invoking CreateCOMPlusExceptionObject even if managed
+ // frames were present on the stack (as happens in PInvoke). Thus, there is no LTO setup for X86.
+ //
+ // On X64, the personality routine does not invoke VEH but simply creates the exception tracker and will also create throwable and setup LTO if managed frames are present on the stack.
+ // But if there are no managed frames on the stack, then the managed personality routine may or may not get invoked (depending upon if any VM native function is present on the stack whose
+ // personality routine is the managed personality routine). Thus, we may have a case of LTO not being present on X64 as well, for this exception.
+ //
+ // Thus, when we see BOOTUP_EXCEPTION_COMPLUS, we will return back successfully (without doing anything) to imply a successful LTO validation. Eventually, a valid
+ // throwable will be returned to the user of GET_THROWABLE (for details, trace the call to CLRException::GetThrowableFromException for CLRLastThrownObjectException type).
+ //
+ // This also ensures that the handling of BOOTUP_EXCEPTION_COMPLUS is now insync between the chk and fre builds in terms of the throwable returned.
+ }
+ else
+
+#if HAS_TRACK_CXX_EXCEPTION_CODE_HACK // ON x86, we grab the exception code.
+
+ // The exception code can legitimately take several values.
+ // The most obvious is EXCEPTION_COMPLUS, as when managed code does 'throw new Exception'.
+ // Another case is EXCEPTION_MSVC, when we EX_RETHROW a CLRLastThrownObjectException, which will
+ // throw an actual CLRLastThrownObjectException C++ exception.
+ // Other values are possible, if we are wrapping an SEH exception (say, AV) in
+ // a managed exception. In these other cases, the exception object should have
+ // an XCode that is the same as the exception code.
+ // So, if the exception code isn't EXCEPTION_COMPLUS, and isn't EXCEPTION_MSVC, then
+ // we shouldn't be getting a CLRLastThrownObjectException. This indicates that
+ // we are missing a "callout filter", which should have transformed the SEH
+ // exception into a COMPLUS exception.
+ // It also turns out that sometimes we see STATUS_UNWIND more recently than the exception
+ // code. In that case, we have lost the original exception code, and so can't check.
+
+ if (dwLastCxxSEHExceptionCode != EXCEPTION_COMPLUS &&
+ dwLastCxxSEHExceptionCode != EXCEPTION_MSVC &&
+ dwLastCxxSEHExceptionCode != STATUS_UNWIND)
+ {
+ // Maybe there is an exception wrapping a Win32 fault. In that case, the
+ // last exception code won't be EXCEPTION_COMPLUS, but the last thrown exception
+ // will have an XCode equal to the last exception code.
+
+ // Get the exception code from the exception object.
+ DWORD dwExceptionXCode = GetExceptionXCode(throwable);
+
+ // If that code is the same as the last exception code, call it good...
+ if (dwLastCxxSEHExceptionCode != dwExceptionXCode)
+ {
+ // For rude thread abort, we may have updated the LastThrownObject without throwing exception.
+ BOOL fIsRudeThreadAbortException =
+ throwable == CLRException::GetPreallocatedRudeThreadAbortException();
+
+ // For stack overflow, we may have updated the LastThrownObject without throwing exception.
+ BOOL fIsStackOverflowException =
+ throwable == CLRException::GetPreallocatedStackOverflowException() &&
+ (IsSOExceptionCode(dwLastCxxSEHExceptionCode));
+
+ // ... but if not, raise an error.
+ if (!fIsRudeThreadAbortException && !fIsStackOverflowException)
+ {
+ static int iSuppress = -1;
+ if (iSuppress == -1)
+ iSuppress = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SuppressLostExceptionTypeAssert);
+ if (!iSuppress)
+ {
+ // Raising an assert message can cause a mode violation.
+ CONTRACT_VIOLATION(ModeViolation);
+
+ // Use DbgAssertDialog to get the formatting right.
+ DbgAssertDialog(__FILE__, __LINE__,
+ "The 'current' exception is not EXCEPTION_COMPLUS, yet the runtime is\n"
+ " requesting the 'LastThrownObject'.\n"
+ "The runtime may have lost track of the type of an exception in flight.\n"
+ " Please get a good stack trace of the exception that was thrown first\n"
+ " (by re-running the app & catching first chance exceptions), find\n"
+ " the caller of Validate, and file a bug against the owner.\n\n"
+ "To suppress this assert 'set COMPLUS_SuppressLostExceptionTypeAssert=1'");
+ }
+ }
+ }
+ }
+ else
+#endif // _x86_
+
+ if (throwable == NULL)
+ { // If there isn't a LastThrownObject at all, that's a problem for GetLastThrownObject
+ // We've lost track of the exception's type. Raise an assert. (This is configurable to allow
+ // stress labs to turn off the assert.)
+
+ static int iSuppress = -1;
+ if (iSuppress == -1)
+ iSuppress = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SuppressLostExceptionTypeAssert);
+ if (!iSuppress)
+ {
+ // Raising an assert message can cause a mode violation.
+ CONTRACT_VIOLATION(ModeViolation);
+
+ // Use DbgAssertDialog to get the formatting right.
+ DbgAssertDialog(__FILE__, __LINE__,
+ "The 'LastThrownObject' should not be, but is, NULL.\n"
+ "The runtime may have lost track of the type of an exception in flight.\n"
+ "Please get a good stack trace, find the caller of Validate, and file a bug against the owner.\n\n"
+ "To suppress this assert 'set COMPlus_SuppressLostExceptionTypeAssert=1'");
+ }
+ }
+ else
+ { // If there IS a LastThrownObject, then, for
+ // exceptions other than the pre-allocated ones...
+ if (!CLRException::IsPreallocatedExceptionObject(throwable))
+ { // ...check that the exception is from the current appdomain.
+#if CHECK_APP_DOMAIN_LEAKS
+ if (!throwable->CheckAppDomain(GetAppDomain()))
+ { // We've lost track of the exception's type. Raise an assert. (This is configurable to allow
+ // stress labs to turn off the assert.)
+
+ static int iSuppress = -1;
+ if (iSuppress == -1)
+ iSuppress = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SuppressLostExceptionTypeAssert);
+ if (!iSuppress)
+ {
+ // Raising an assert message can cause a mode violation.
+ CONTRACT_VIOLATION(ModeViolation);
+
+ // Use DbgAssertDialog to get the formatting right.
+ DbgAssertDialog(__FILE__, __LINE__,
+ "The 'LastThrownObject' does not belong to the current appdomain.\n"
+ "The runtime may have lost track of the type of an exception in flight.\n"
+ "Please get a good stack trace, find the caller of Validate, and file a bug against the owner.\n\n"
+ "To suppress this assert 'set COMPlus_SuppressLostExceptionTypeAssert=1'");
+ }
+ }
+#endif
+ }
+ }
+
+ GCPROTECT_END();
+
+ return this;
+} // CLRLastThrownObjectException* CLRLastThrownObjectException::Validate()
+#endif // _DEBUG
+
+// ---------------------------------------------------------------------------
+// Helper function to get an exception from outside the exception.
+// Create and return a LastThrownObjectException. Its virtual destructor
+// will clean up properly.
+void GetLastThrownObjectExceptionFromThread_Internal(Exception **ppException)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ SO_TOLERANT; // no risk of an SO after we've allocated the object here
+ }
+ CONTRACTL_END;
+
+ // If the Thread has been set up, then the LastThrownObject may make sense...
+ if (GetThread())
+ {
+ // give back an object that knows about Threads and their exceptions.
+ *ppException = new CLRLastThrownObjectException();
+ }
+ else
+ {
+ // but if no Thread, don't pretend to know about LastThrownObject.
+ *ppException = NULL;
+ }
+
+} // void GetLastThrownObjectExceptionFromThread_Internal()
+
+#endif // CROSSGEN_COMPILE
+
+//@TODO: Make available generally?
+// Wrapper class to encapsulate both array pointer and element count.
+template <typename T>
+class ArrayReference
+{
+public:
+ typedef T value_type;
+ typedef const typename std::remove_const<T>::type const_value_type;
+
+ typedef ArrayDPTR(value_type) array_type;
+ typedef ArrayDPTR(const_value_type) const_array_type;
+
+ // Constructor taking array pointer and size.
+ ArrayReference(array_type array, size_t size)
+ : _array(dac_cast<array_type>(array))
+ , _size(size)
+ { LIMITED_METHOD_CONTRACT; }
+
+ // Constructor taking a statically sized array by reference.
+ template <size_t N>
+ ArrayReference(T (&array)[N])
+ : _array(dac_cast<array_type>(&array[0]))
+ , _size(N)
+ { LIMITED_METHOD_CONTRACT; }
+
+ // Copy constructor.
+ ArrayReference(ArrayReference const & other)
+ : _array(other._array)
+ , _size(other._size)
+ { LIMITED_METHOD_CONTRACT; }
+
+ // Indexer
+ template <typename IdxT>
+ T & operator[](IdxT idx)
+ { LIMITED_METHOD_CONTRACT; _ASSERTE(idx < _size); return _array[idx]; }
+
+ // Implicit conversion operators.
+ operator array_type()
+ { LIMITED_METHOD_CONTRACT; return _array; }
+
+ operator const_array_type() const
+ { LIMITED_METHOD_CONTRACT; return dac_cast<const_array_type>(_array); }
+
+ // Returns the array element count.
+ size_t size() const
+ { LIMITED_METHOD_CONTRACT; return _size; }
+
+ // Iteration methods and types.
+ typedef array_type iterator;
+
+ iterator begin()
+ { LIMITED_METHOD_CONTRACT; return _array; }
+
+ iterator end()
+ { LIMITED_METHOD_CONTRACT; return _array + _size; }
+
+ typedef const_array_type const_iterator;
+
+ const_iterator begin() const
+ { LIMITED_METHOD_CONTRACT; return dac_cast<const_array_type>(_array); }
+
+ const_iterator end() const
+ { LIMITED_METHOD_CONTRACT; return dac_cast<const_array_type>(_array) + _size; }
+
+private:
+ array_type _array;
+ size_t _size;
+};
+
+ArrayReference<const HRESULT> GetHRESULTsForExceptionKind(RuntimeExceptionKind kind)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ switch (kind)
+ {
+ #define DEFINE_EXCEPTION(ns, reKind, bHRformessage, ...) \
+ case k##reKind: \
+ return ArrayReference<const HRESULT>(s_##reKind##HRs); \
+ break;
+ #define DEFINE_EXCEPTION_HR_WINRT_ONLY(ns, reKind, ...)
+ #define DEFINE_EXCEPTION_IN_OTHER_FX_ASSEMBLY(ns, reKind, assemblySimpleName, publicKeyToken, bHRformessage, ...) DEFINE_EXCEPTION(ns, reKind, bHRformessage, __VA_ARGS__)
+ #include "rexcep.h"
+
+ default:
+ _ASSERTE(!"Unknown exception kind!");
+ break;
+
+ }
+
+ return ArrayReference<const HRESULT>(nullptr, 0);
+}
+
+bool IsHRESULTForExceptionKind(HRESULT hr, RuntimeExceptionKind kind)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ ArrayReference<const HRESULT> rgHR = GetHRESULTsForExceptionKind(kind);
+ for (ArrayReference<const HRESULT>::iterator i = rgHR.begin(); i != rgHR.end(); ++i)
+ {
+ if (*i == hr)
+ {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
diff --git a/src/vm/clrex.h b/src/vm/clrex.h
new file mode 100644
index 0000000000..cd91edb634
--- /dev/null
+++ b/src/vm/clrex.h
@@ -0,0 +1,1313 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+// ---------------------------------------------------------------------------
+// CLREx.h
+// ---------------------------------------------------------------------------
+
+
+#ifndef _CLREX_H_
+#define _CLREX_H_
+
+#include <ex.h>
+
+#include "objecthandle.h"
+#include "runtimeexceptionkind.h"
+#include "interoputil.h"
+
+class BaseBind;
+class AssemblySpec;
+class PEFile;
+class PEAssembly;
+
+struct StackTraceElement
+{
+ UINT_PTR ip;
+ UINT_PTR sp;
+ PTR_MethodDesc pFunc;
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ // TRUE if this element represents the last frame of the foreign
+ // exception stack trace.
+ BOOL fIsLastFrameFromForeignStackTrace;
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+
+ bool operator==(StackTraceElement const & rhs) const
+ {
+ return ip == rhs.ip
+ && sp == rhs.sp
+ && pFunc == rhs.pFunc;
+ }
+
+ bool operator!=(StackTraceElement const & rhs) const
+ {
+ return !(*this == rhs);
+ }
+
+ bool PartiallyEqual(StackTraceElement const & rhs) const
+ {
+ return pFunc == rhs.pFunc;
+ }
+
+ void PartialAtomicUpdate(StackTraceElement const & rhs)
+ {
+ ip = rhs.ip;
+ }
+};
+
+class StackTraceInfo
+{
+private:
+ // for building stack trace info
+ StackTraceElement* m_pStackTrace; // pointer to stack trace storage
+ unsigned m_cStackTrace; // size of stack trace storage
+ unsigned m_dFrameCount; // current frame in stack trace
+ unsigned m_cDynamicMethodItems; // number of items in the Dynamic Method array
+ unsigned m_dCurrentDynamicIndex; // index of the next location where the resolver object will be stored
+
+public:
+ void Init();
+ BOOL IsEmpty();
+ void AllocateStackTrace();
+ void ClearStackTrace();
+ void FreeStackTrace();
+ void SaveStackTrace(BOOL bAllowAllocMem, OBJECTHANDLE hThrowable, BOOL bReplaceStack, BOOL bSkipLastElement);
+ BOOL AppendElement(BOOL bAllowAllocMem, UINT_PTR currentIP, UINT_PTR currentSP, MethodDesc* pFunc, CrawlFrame* pCf);
+
+ void GetLeafFrameInfo(StackTraceElement* pStackTraceElement);
+};
+
+
+// ---------------------------------------------------------------------------
+// CLRException represents an exception which has a managed representation.
+// It adds the generic method GetThrowable().
+// ---------------------------------------------------------------------------
+class CLRException : public Exception
+{
+ friend bool DebugIsEECxxExceptionPointer(void* pv);
+ friend class CLRLastThrownObjectException;
+ private:
+ static const int c_type = 0x434c5220; // 'CLR '
+
+ protected:
+ OBJECTHANDLE m_throwableHandle;
+
+ void SetThrowableHandle(OBJECTHANDLE throwable);
+ OBJECTHANDLE GetThrowableHandle() { return m_throwableHandle; }
+
+
+ CLRException();
+public:
+ ~CLRException();
+
+ OBJECTREF GetThrowable();
+
+ // Dynamic type query for catchers
+ static int GetType() {LIMITED_METHOD_CONTRACT; return c_type; }
+ virtual int GetInstanceType() { LIMITED_METHOD_CONTRACT; return c_type; }
+ BOOL IsType(int type) { WRAPPER_NO_CONTRACT; return type == c_type || Exception::IsType(type); }
+
+ BOOL IsSameInstanceType(Exception *pException)
+ {
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_NOTHROW;
+
+ if (pException->GetInstanceType() != GetInstanceType())
+ {
+ return FALSE;
+ }
+ OBJECTREF mine = GetThrowable();
+ OBJECTREF other = ((CLRException*)pException)->GetThrowable();
+ return mine != NULL && other != NULL &&
+ mine->GetMethodTable() == other->GetMethodTable();
+ }
+
+ // Overrides
+ virtual BOOL IsDomainBound()
+ {
+ //@TODO special case for preallocated exceptions?
+ return TRUE;
+ }
+
+ HRESULT GetHR();
+ IErrorInfo *GetErrorInfo();
+ HRESULT SetErrorInfo();
+
+ void GetMessage(SString &result);
+
+ protected:
+
+ virtual OBJECTREF CreateThrowable() { LIMITED_METHOD_CONTRACT; return NULL; }
+
+ public: // These are really private, but are used by the exception macros
+
+
+ // Accessors for all the preallocated exception objects.
+ static OBJECTREF GetPreallocatedBaseException();
+ static OBJECTREF GetPreallocatedOutOfMemoryException();
+ static OBJECTREF GetPreallocatedRudeThreadAbortException();
+ static OBJECTREF GetPreallocatedThreadAbortException();
+ static OBJECTREF GetPreallocatedStackOverflowException();
+ static OBJECTREF GetPreallocatedExecutionEngineException();
+
+ // Accessors for all the preallocated exception handles.
+ static OBJECTHANDLE GetPreallocatedOutOfMemoryExceptionHandle();
+ static OBJECTHANDLE GetPreallocatedRudeThreadAbortExceptionHandle();
+ static OBJECTHANDLE GetPreallocatedThreadAbortExceptionHandle();
+ static OBJECTHANDLE GetPreallocatedStackOverflowExceptionHandle();
+ static OBJECTHANDLE GetPreallocatedExecutionEngineExceptionHandle();
+ static OBJECTHANDLE GetPreallocatedHandleForObject(OBJECTREF o);
+
+ // Use these to determine if a handle or object ref is one of the preallocated handles or object refs.
+ static BOOL IsPreallocatedExceptionObject(OBJECTREF o);
+ static BOOL IsPreallocatedExceptionHandle(OBJECTHANDLE h);
+
+ // Prefer a new OOM exception if we can make one. If we cannot, then give back the pre-allocated
+ // one.
+ static OBJECTREF GetBestOutOfMemoryException();
+
+ static OBJECTREF GetThrowableFromException(Exception *pException);
+ static OBJECTREF GetThrowableFromExceptionRecord(EXCEPTION_RECORD *pExceptionRecord);
+
+ class HandlerState : public Exception::HandlerState
+ {
+ public:
+ Thread* m_pThread;
+ Frame* m_pFrame;
+ BOOL m_fPreemptiveGCDisabled;
+
+ enum NonNullThread
+ {
+ ThreadIsNotNull
+ };
+
+ HandlerState(Thread * pThread);
+ HandlerState(Thread * pThread, NonNullThread dummy);
+
+ void CleanupTry();
+ void SetupCatch(INDEBUG_COMMA(__in_z const char * szFile) int lineNum);
+#ifdef LOGGING // Use parent implementation that inlines into nothing in retail build
+ void SucceedCatch();
+#endif
+ void SetupFinally();
+ };
+};
+
+// prototype for helper function to get exception object from thread's LastThrownObject.
+void GetLastThrownObjectExceptionFromThread_Internal(Exception **ppException);
+
+
+// ---------------------------------------------------------------------------
+// EEException is a CLR exception subclass which has purely unmanaged representation.
+// The standard methods will not do any GC dangerous operations. Thus you
+// can throw and catch such an exception without regard to GC mode.
+// ---------------------------------------------------------------------------
+
+class EEException : public CLRException
+{
+ friend bool DebugIsEECxxExceptionPointer(void* pv);
+
+ private:
+ static const int c_type = 0x45452020; // 'EE '
+
+ public:
+ const RuntimeExceptionKind m_kind;
+
+ EEException(RuntimeExceptionKind kind);
+ EEException(HRESULT hr);
+
+ // Dynamic type query for catchers
+ static int GetType() {LIMITED_METHOD_CONTRACT; return c_type; }
+ virtual int GetInstanceType() { LIMITED_METHOD_CONTRACT; return c_type; }
+ BOOL IsType(int type) { WRAPPER_NO_CONTRACT; return type == c_type || CLRException::IsType(type); }
+
+ BOOL IsSameInstanceType(Exception *pException)
+ {
+ WRAPPER_NO_CONTRACT;
+ return pException->GetInstanceType() == GetType() && ((EEException*)pException)->m_kind == m_kind;
+ }
+
+ // Virtual overrides
+ HRESULT GetHR();
+ IErrorInfo *GetErrorInfo();
+ void GetMessage(SString &result);
+ OBJECTREF CreateThrowable();
+
+ // GetThrowableMessage returns a message to be stored in the throwable.
+ // Returns FALSE if there is no useful value.
+ virtual BOOL GetThrowableMessage(SString &result);
+
+ static BOOL GetResourceMessage(UINT iResourceID, SString &result,
+ const SString &arg1 = SString::Empty(), const SString &arg2 = SString::Empty(),
+ const SString &arg3 = SString::Empty(), const SString &arg4 = SString::Empty(),
+ const SString &arg5 = SString::Empty(), const SString &arg6 = SString::Empty());
+
+ // Note: reKind-->hr is a one-to-many relationship.
+ //
+ // each reKind is associated with one or more hresults.
+ // every hresult is associated with exactly one reKind (with kCOMException being the catch-all.)
+ static RuntimeExceptionKind GetKindFromHR(HRESULT hr, bool fIsWinRtMode = false);
+ protected:
+ static HRESULT GetHRFromKind(RuntimeExceptionKind reKind);
+
+#ifdef _DEBUG
+ EEException() : m_kind(kException)
+ {
+ // Used only for DebugIsEECxxExceptionPointer to get the vtable pointer.
+ // We need a variant which does not allocate memory.
+ }
+#endif // _DEBUG
+
+ virtual Exception *CloneHelper()
+ {
+ WRAPPER_NO_CONTRACT;
+ return new EEException(m_kind);
+ }
+
+};
+
+// ---------------------------------------------------------------------------
+// EEMessageException is an EE exception subclass composed of a type and
+// an unmanaged message of some sort.
+// ---------------------------------------------------------------------------
+
+class EEMessageException : public EEException
+{
+ friend bool DebugIsEECxxExceptionPointer(void* pv);
+
+ private:
+ HRESULT m_hr;
+ UINT m_resID;
+ InlineSString<32> m_arg1;
+ InlineSString<32> m_arg2;
+ SString m_arg3;
+ SString m_arg4;
+ SString m_arg5;
+ SString m_arg6;
+
+ public:
+ EEMessageException(RuntimeExceptionKind kind, UINT resID = 0, LPCWSTR szArg1 = NULL, LPCWSTR szArg2 = NULL,
+ LPCWSTR szArg3 = NULL, LPCWSTR szArg4 = NULL, LPCWSTR szArg5 = NULL, LPCWSTR szArg6 = NULL);
+
+ EEMessageException(HRESULT hr);
+
+ EEMessageException(HRESULT hr, bool fUseCOMException);
+
+ EEMessageException(HRESULT hr, UINT resID, LPCWSTR szArg1 = NULL, LPCWSTR szArg2 = NULL, LPCWSTR szArg3 = NULL,
+ LPCWSTR szArg4 = NULL, LPCWSTR szArg5 = NULL, LPCWSTR szArg6 = NULL);
+
+ EEMessageException(RuntimeExceptionKind kind, HRESULT hr, UINT resID, LPCWSTR szArg1 = NULL, LPCWSTR szArg2 = NULL,
+ LPCWSTR szArg3 = NULL, LPCWSTR szArg4 = NULL, LPCWSTR szArg5 = NULL, LPCWSTR szArg6 = NULL);
+
+ // Virtual overrides
+ HRESULT GetHR();
+
+ BOOL GetThrowableMessage(SString &result);
+
+ UINT GetResID(void) { LIMITED_METHOD_CONTRACT; return m_resID; }
+
+ static BOOL IsEEMessageException(Exception *pException)
+ {
+ return (*(PVOID*)pException == GetEEMessageExceptionVPtr());
+ }
+
+ protected:
+
+ virtual Exception *CloneHelper()
+ {
+ WRAPPER_NO_CONTRACT;
+ return new EEMessageException(
+ m_kind, m_hr, m_resID, m_arg1, m_arg2, m_arg3, m_arg4, m_arg5, m_arg6);
+ }
+
+
+ private:
+
+ static PVOID GetEEMessageExceptionVPtr()
+ {
+ CONTRACT (PVOID)
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ EEMessageException boilerplate(E_FAIL);
+ RETURN (PVOID&)boilerplate;
+ }
+
+ BOOL GetResourceMessage(UINT iResourceID, SString &result);
+
+#ifdef _DEBUG
+ EEMessageException()
+ {
+ // Used only for DebugIsEECxxExceptionPointer to get the vtable pointer.
+ // We need a variant which does not allocate memory.
+ }
+#endif // _DEBUG
+};
+
+// ---------------------------------------------------------------------------
+// EEResourceException is an EE exception subclass composed of a type and
+// an message using a managed exception resource.
+// ---------------------------------------------------------------------------
+
+class EEResourceException : public EEException
+{
+ friend bool DebugIsEECxxExceptionPointer(void* pv);
+
+ private:
+ InlineSString<32> m_resourceName;
+
+ public:
+ EEResourceException(RuntimeExceptionKind kind, const SString &resourceName);
+
+ // Unmanaged message text containing only the resource name (GC safe)
+ void GetMessage(SString &result);
+
+ // Throwable message containig the resource contents (not GC safe)
+ BOOL GetThrowableMessage(SString &result);
+
+ protected:
+
+ virtual Exception *CloneHelper()
+ {
+ WRAPPER_NO_CONTRACT;
+ return new EEResourceException(m_kind, m_resourceName);
+ }
+
+private:
+#ifdef _DEBUG
+ EEResourceException()
+ {
+ // Used only for DebugIsEECxxExceptionPointer to get the vtable pointer.
+ // We need a variant which does not allocate memory.
+ }
+#endif // _DEBUG
+};
+
+// ---------------------------------------------------------------------------
+// EECOMException is an EE exception subclass composed of COM-generated data.
+// Note that you must ensure that the COM data was not derived from a wrapper
+// on a managed Exception object. (If so, you must compose the exception from
+// the managed object itself.)
+// ---------------------------------------------------------------------------
+
+struct ExceptionData
+{
+ HRESULT hr;
+ BSTR bstrDescription;
+ BSTR bstrSource;
+ BSTR bstrHelpFile;
+ DWORD dwHelpContext;
+ GUID guid;
+#ifdef FEATURE_COMINTEROP
+ BSTR bstrRestrictedError; // Returned from IRestrictedErrorInfo::GetErrorDetails
+ BSTR bstrReference; // Returned from IRestrictedErrorInfo::GetReference
+ BSTR bstrCapabilitySid; // Returned from IRestrictedErrorInfo::GetErrorDetails
+ IUnknown *pRestrictedErrorInfo; // AddRef-ed RestrictedErrorInfo pointer
+ // We need to keep this alive as long as user need the reference
+ BOOL bHasLanguageRestrictedErrorInfo;
+#endif // FEATURE_COMINTEROP
+};
+
+class EECOMException : public EEException
+{
+ friend bool DebugIsEECxxExceptionPointer(void* pv);
+
+ private:
+ ExceptionData m_ED;
+
+ public:
+
+ EECOMException(EXCEPINFO *pExcepInfo);
+ EECOMException(ExceptionData *pED);
+ EECOMException(
+ HRESULT hr,
+ IErrorInfo *pErrInfo,
+ bool fUseCOMException,
+ IRestrictedErrorInfo *pRestrictedErrInfo,
+ BOOL bHasLanguageRestrictedErrorInfo
+ COMMA_INDEBUG(BOOL bCheckInProcCCWTearOff = TRUE));
+ ~EECOMException();
+
+ // Virtual overrides
+ HRESULT GetHR();
+
+ BOOL GetThrowableMessage(SString &result);
+ OBJECTREF CreateThrowable();
+
+ protected:
+
+ virtual Exception *CloneHelper()
+ {
+ WRAPPER_NO_CONTRACT;
+ return new EECOMException(&m_ED);
+ }
+
+private:
+#ifdef _DEBUG
+ EECOMException()
+ {
+ // Used only for DebugIsEECxxExceptionPointer to get the vtable pointer.
+ // We need a variant which does not allocate memory.
+ ZeroMemory(&m_ED, sizeof(m_ED));
+ }
+#endif // _DEBUG
+};
+
+// ---------------------------------------------------------------------------
+// EEFieldException is an EE exception subclass composed of a field
+// ---------------------------------------------------------------------------
+class EEFieldException : public EEException
+{
+ friend bool DebugIsEECxxExceptionPointer(void* pv);
+
+ private:
+ FieldDesc *m_pFD;
+ MethodDesc *m_pAccessingMD;
+ SString m_additionalContext;
+ UINT m_messageID;
+
+ public:
+ EEFieldException(FieldDesc *pField);
+ EEFieldException(FieldDesc *pField, MethodDesc *pAccessingMD, const SString &additionalContext, UINT messageID);
+
+ BOOL GetThrowableMessage(SString &result);
+ virtual BOOL IsDomainBound() {return TRUE;};
+protected:
+
+ virtual Exception *CloneHelper()
+ {
+ WRAPPER_NO_CONTRACT;
+ return new EEFieldException(m_pFD, m_pAccessingMD, m_additionalContext, m_messageID);
+ }
+
+private:
+#ifdef _DEBUG
+ EEFieldException()
+ {
+ // Used only for DebugIsEECxxExceptionPointer to get the vtable pointer.
+ // We need a variant which does not allocate memory.
+ }
+#endif // _DEBUG
+};
+
+// ---------------------------------------------------------------------------
+// EEMethodException is an EE exception subclass composed of a field
+// ---------------------------------------------------------------------------
+
+class EEMethodException : public EEException
+{
+ friend bool DebugIsEECxxExceptionPointer(void* pv);
+
+ private:
+ MethodDesc *m_pMD;
+ MethodDesc *m_pAccessingMD;
+ SString m_additionalContext;
+ UINT m_messageID;
+
+ public:
+ EEMethodException(MethodDesc *pMethod);
+ EEMethodException(MethodDesc *pMethod, MethodDesc *pAccessingMD, const SString &additionalContext, UINT messageID);
+
+ BOOL GetThrowableMessage(SString &result);
+ virtual BOOL IsDomainBound() {return TRUE;};
+ protected:
+
+ virtual Exception *CloneHelper()
+ {
+ WRAPPER_NO_CONTRACT;
+ return new EEMethodException(m_pMD, m_pAccessingMD, m_additionalContext, m_messageID);
+ }
+
+private:
+#ifdef _DEBUG
+ EEMethodException()
+ {
+ // Used only for DebugIsEECxxExceptionPointer to get the vtable pointer.
+ // We need a variant which does not allocate memory.
+ }
+#endif // _DEBUG
+};
+
+// ---------------------------------------------------------------------------
+// EETypeAccessException is an EE exception subclass composed of a type being
+// illegally accessed and the method doing the access
+// ---------------------------------------------------------------------------
+
+class EETypeAccessException : public EEException
+{
+ friend bool DebugIsEECxxExceptionPointer(void* pv);
+
+ private:
+ MethodTable *m_pMT;
+ MethodDesc *m_pAccessingMD;
+ SString m_additionalContext;
+ UINT m_messageID;
+
+ public:
+ EETypeAccessException(MethodTable *pMT);
+ EETypeAccessException(MethodTable *pMT, MethodDesc *pAccessingMD, const SString &additionalContext, UINT messageID);
+
+ BOOL GetThrowableMessage(SString &result);
+ virtual BOOL IsDomainBound() {return TRUE;};
+ protected:
+
+ virtual Exception *CloneHelper()
+ {
+ WRAPPER_NO_CONTRACT;
+ return new EETypeAccessException(m_pMT, m_pAccessingMD, m_additionalContext, m_messageID);
+ }
+
+private:
+#ifdef _DEBUG
+ EETypeAccessException()
+ {
+ // Used only for DebugIsEECxxExceptionPointer to get the vtable pointer.
+ // We need a variant which does not allocate memory.
+ }
+#endif // _DEBUG
+};
+
+// ---------------------------------------------------------------------------
+// EEArgumentException is an EE exception subclass representing a bad argument
+// exception
+// ---------------------------------------------------------------------------
+
+class EEArgumentException : public EEException
+{
+ friend bool DebugIsEECxxExceptionPointer(void* pv);
+
+ private:
+ InlineSString<32> m_argumentName;
+ InlineSString<32> m_resourceName;
+
+ public:
+ EEArgumentException(RuntimeExceptionKind reKind, LPCWSTR pArgName,
+ LPCWSTR wszResourceName);
+
+ // @todo: GetMessage
+
+ OBJECTREF CreateThrowable();
+
+ protected:
+
+ virtual Exception *CloneHelper()
+ {
+ WRAPPER_NO_CONTRACT;
+ return new EEArgumentException(m_kind, m_argumentName, m_resourceName);
+ }
+
+private:
+#ifdef _DEBUG
+ EEArgumentException()
+ {
+ // Used only for DebugIsEECxxExceptionPointer to get the vtable pointer.
+ // We need a variant which does not allocate memory.
+ }
+#endif // _DEBUG
+};
+
+// ---------------------------------------------------------------------------
+// EETypeLoadException is an EE exception subclass representing a type loading
+// error
+// ---------------------------------------------------------------------------
+
+class EETypeLoadException : public EEException
+{
+ friend bool DebugIsEECxxExceptionPointer(void* pv);
+
+ private:
+ InlineSString<64> m_fullName;
+ SString m_pAssemblyName;
+ SString m_pMessageArg;
+ UINT m_resIDWhy;
+
+ public:
+ EETypeLoadException(LPCUTF8 pszNameSpace, LPCUTF8 pTypeName,
+ LPCWSTR pAssemblyName, LPCUTF8 pMessageArg, UINT resIDWhy);
+
+ EETypeLoadException(LPCWSTR pFullTypeName,
+ LPCWSTR pAssemblyName, LPCUTF8 pMessageArg, UINT resIDWhy);
+
+ // virtual overrides
+ void GetMessage(SString &result);
+ OBJECTREF CreateThrowable();
+
+ protected:
+
+ virtual Exception *CloneHelper()
+ {
+ WRAPPER_NO_CONTRACT;
+ return new EETypeLoadException(m_fullName, m_pAssemblyName, m_pMessageArg, m_resIDWhy);
+ }
+
+ private:
+ EETypeLoadException(const InlineSString<64> &fullName, LPCWSTR pAssemblyName,
+ const SString &pMessageArg, UINT resIDWhy)
+ : EEException(kTypeLoadException),
+ m_fullName(fullName),
+ m_pAssemblyName(pAssemblyName),
+ m_pMessageArg(pMessageArg),
+ m_resIDWhy(resIDWhy)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+
+#ifdef _DEBUG
+ EETypeLoadException()
+ {
+ // Used only for DebugIsEECxxExceptionPointer to get the vtable pointer.
+ // We need a variant which does not allocate memory.
+ }
+#endif // _DEBUG
+};
+
+// ---------------------------------------------------------------------------
+// EEFileLoadException is an EE exception subclass representing a file loading
+// error
+// ---------------------------------------------------------------------------
+
+class EEFileLoadException : public EEException
+{
+ friend bool DebugIsEECxxExceptionPointer(void* pv);
+
+ private:
+ SString m_name;
+#ifdef FEATURE_FUSION
+ IFusionBindLog *m_pFusionLog;
+#else
+ void *m_pFusionLog;
+#endif
+ HRESULT m_hr;
+
+
+ public:
+
+#ifdef FEATURE_FUSION
+ EEFileLoadException(const SString &name, HRESULT hr, IFusionBindLog *pFusionLog = NULL, Exception *pInnerException = NULL);
+#else
+ EEFileLoadException(const SString &name, HRESULT hr, void *pFusionLog = NULL, Exception *pInnerException = NULL);
+#endif
+ ~EEFileLoadException();
+
+ // virtual overrides
+ HRESULT GetHR()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_hr;
+ }
+ void GetMessage(SString &result);
+ void GetName(SString &result);
+ OBJECTREF CreateThrowable();
+
+ static RuntimeExceptionKind GetFileLoadKind(HRESULT hr);
+#ifdef FEATURE_FUSION
+ static void DECLSPEC_NORETURN Throw(AssemblySpec *pSpec, IFusionBindLog *pFusionLog, HRESULT hr, Exception *pInnerException = NULL);
+ static void DECLSPEC_NORETURN Throw(IAssembly *pIAssembly, IHostAssembly *pIHostAssembly, HRESULT hr, Exception *pInnerException = NULL);
+#endif
+ static void DECLSPEC_NORETURN Throw(AssemblySpec *pSpec, HRESULT hr, Exception *pInnerException = NULL);
+ static void DECLSPEC_NORETURN Throw(PEFile *pFile, HRESULT hr, Exception *pInnerException = NULL);
+ static void DECLSPEC_NORETURN Throw(LPCWSTR path, HRESULT hr, Exception *pInnerException = NULL);
+ static void DECLSPEC_NORETURN Throw(PEAssembly *parent, const void *memory, COUNT_T size, HRESULT hr, Exception *pInnerException = NULL);
+ static BOOL CheckType(Exception* ex); // typeof(EEFileLoadException)
+
+ protected:
+ virtual Exception *CloneHelper()
+ {
+ WRAPPER_NO_CONTRACT;
+ return new EEFileLoadException(m_name, m_hr, m_pFusionLog);
+ }
+
+ private:
+#ifdef _DEBUG
+ EEFileLoadException() : m_pFusionLog(NULL)
+ {
+ // Used only for DebugIsEECxxExceptionPointer to get the vtable pointer.
+ // We need a variant which does not allocate memory.
+ }
+#endif // _DEBUG
+
+ void SetFileName(const SString &fileName, BOOL removePath);
+};
+
+// -------------------------------------------------------------------------------------------------------
+// Throw/catch macros. These are derived from the generic EXCEPTION macros,
+// but add extra functionality for cleaning up thread state on catches
+//
+// Usage:
+// EX_TRY
+// {
+// EX_THROW(EEMessageException, (kind, L"Failure message"));
+// }
+// EX_CATCH
+// {
+// EX_RETHROW()
+// }
+// EX_END_CATCH(RethrowTerminalExceptions or RethrowCorruptingExceptions)
+// --------------------------------------------------------------------------------------------------------
+
+// In DAC builds, we don't want to override the normal utilcode exception handling.
+// We're not actually running in the CLR, but we may need access to some CLR-exception
+// related data structures elsewhere in this header file in order to analyze CLR
+// exceptions that occurred in the target.
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+
+#define GET_THROWABLE() CLRException::GetThrowableFromException(GET_EXCEPTION())
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+
+// For the VM folder, we redefine SET_CE_RETHROW_FLAG_FOR_EX_CATCH to also check the
+// corruption severity when deciding whether to rethrow them or not.
+//
+// We also check the global override flag incase it has been set to force pre-V4 behaviour.
+//
+// Doing the checks for "__fCaughtSO" and "__fCaughtNonCxx" will ensure that we check for
+// corruption severity only if the last exception was a managed exception that could have been rethrown in the VM.
+// When "(__fCaughtSO == FALSE) && (__fCaughtNonCxx == true)" is true, it implies we are dealing with a managed exception
+// inside the VM that is represented by the CLRLastThrownObjectException instance (see EX_TRY/EX_CATCH implementation in VM
+// folder to see how CLRLastThrownObjectException is used).
+//
+// This macro also supports the following scenarios:
+//
+// Scenario 1
+// ----------
+//
+// [VM1] -> [VM2] -> <managed code>
+//
+// If a managed exception is swallowed by an EX_CATCH in native function VM2, which then returns back
+// to native function VM1 that throws, for example, a VM C++ exception, an EX_CATCH(RethrowCorruptingExceptions)
+// in VM1 that catches the C++ exception will not rethrow since the last exception was not a managed CSE but
+// a C++ exception.
+//
+// A variation of this is for VM2 to return back in VM1, which calls VM3 that throws a VM C++ exception that
+// reaches VM1's EX_CATCH(RethrowCorruptingExceptions). VM1 shouldn't be rethrowing the exception in such a case.
+//
+// Scenario 2
+// ----------
+//
+// [VM1 - RethrowCSE] -> [VM2 - RethrowCSE] -> [VM3 - RethrowCSE] -> <managed code>
+//
+// When managed code throws a CSE (e.g. TargetInvocationException flagged as CSE), [VM3] will rethrow it and we will
+// enter EX_CATCH in VM2 which is supposed to rethrow it as well. But if the implementation of EX_CATCH in VM2 throws
+// another VM C++ exception (e.g. EEFileLoadException) *before* rethrow policy is applied, control will reach EX_CATCH
+// in VM1 that *shouldn't* rethrow (even though it has RethrowCSE as the policy) since the last exception was a VM C++
+// exception.
+//
+// Scenario 3
+// ----------
+//
+// This is about VM throwing a managed exception that gets handled either within the VM, with or without CLR's managed code
+// exception handler coming into the picture.
+//
+// This is explained in detail (alongwith relevant changes) in the implementation of RaiseTheException (in excep.cpp).
+
+#undef SET_CE_RETHROW_FLAG_FOR_EX_CATCH
+#define SET_CE_RETHROW_FLAG_FOR_EX_CATCH(expr) ((expr == TRUE) && \
+ (g_pConfig->LegacyCorruptedStateExceptionsPolicy() == false) && \
+ (CEHelper::IsProcessCorruptedStateException(GetCurrentExceptionCode(), FALSE) || \
+ ((!__state.DidCatchSO()) && (!__state.DidCatchCxx()) && \
+ CEHelper::IsLastActiveExceptionCorrupting(TRUE))))
+
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+#undef EX_TRY
+#define EX_TRY \
+ EX_TRY_CUSTOM(CLRException::HandlerState, (::GetThreadNULLOk()), CLRLastThrownObjectException)
+
+// Faster version with thread, skipping GetThread call
+#define EX_TRY_THREAD(pThread) \
+ EX_TRY_CUSTOM(CLRException::HandlerState, (pThread, CLRException::HandlerState::ThreadIsNotNull), CLRLastThrownObjectException)
+
+#if defined(_DEBUG)
+ // Redefine GET_EXCEPTION to validate CLRLastThrownObjectException as much as possible.
+ #undef GET_EXCEPTION
+ #define GET_EXCEPTION() (__pException == NULL ? __defaultException.Validate() : __pException.GetValue())
+#endif // _DEBUG
+
+// When we throw an exception, we need stay in SO-intolerant state and
+// probe for sufficient stack so that we don't SO during the processing.
+#undef HANDLE_SO_TOLERANCE_FOR_THROW
+#define HANDLE_SO_TOLERANCE_FOR_THROW STACK_PROBE_FOR_THROW(GetThread());
+
+LONG CLRNoCatchHandler(EXCEPTION_POINTERS* pExceptionInfo, PVOID pv);
+
+// Re-define the macro to add automatic restoration of the guard page to PAL_EXCEPT and PAL_EXCEPT_FILTER and
+// friends. Note: RestoreGuardPage will only do work if the guard page is not present.
+#undef PAL_SEH_RESTORE_GUARD_PAGE
+#define PAL_SEH_RESTORE_GUARD_PAGE \
+ if (__exCode == STATUS_STACK_OVERFLOW) \
+ { \
+ Thread *__pThread = GetThread(); \
+ if (__pThread != NULL) \
+ { \
+ __pThread->RestoreGuardPage(); \
+ } \
+ }
+
+#undef EX_TRY_NOCATCH
+#define EX_TRY_NOCATCH(ParamType, paramDef, paramRef) \
+ PAL_TRY(ParamType, __EXparam, paramRef) \
+ { \
+ CLRException::HandlerState __state(::GetThreadNULLOk()); \
+ PAL_TRY(ParamType, paramDef, __EXparam) \
+ {
+
+#undef EX_END_NOCATCH
+#define EX_END_NOCATCH \
+ ; \
+ } \
+ PAL_FINALLY \
+ { \
+ __state.CleanupTry(); \
+ } \
+ PAL_ENDTRY \
+ } \
+ PAL_EXCEPT_FILTER(CLRNoCatchHandler) \
+ { \
+ } \
+ PAL_ENDTRY
+
+//
+// We need a way to identify an exception in managed code that is rethrown from a new exception in managed code
+// when we get into our managed EH logic. Currently, we do that by checking the GC mode. If a thread has preemptive
+// GC enabled, but the IP of the exception is in mangaed code, then it must be a rethrow from unmanaged code
+// (including CLR code.) Therefore, we toggle the GC mode before the rethrow to indicate that. Note: we don't do
+// this if we've caught one of our internal C++ Exception objects: by definition, those don't come from managed
+// code, and this allows us to continue to use EX_RETHROW in no-trigger regions.
+//
+#undef EX_RETHROW
+#define EX_RETHROW \
+ do \
+ { \
+ /* don't embed file names in retail to save space and avoid IP */ \
+ /* a findstr /n will allow you to locate it in a pinch */ \
+ STRESS_LOG1(LF_EH, LL_INFO100, \
+ "EX_RETHROW " INDEBUG(__FILE__) " line %d\n", __LINE__); \
+ __pException.SuppressRelease(); \
+ if ((!__state.DidCatchCxx()) && (GetThread() != NULL)) \
+ { \
+ if (GetThread()->PreemptiveGCDisabled()) \
+ { \
+ LOG((LF_EH, LL_INFO10, "EX_RETHROW: going preemptive\n")); \
+ GetThread()->EnablePreemptiveGC(); \
+ } \
+ } \
+ PAL_CPP_RETHROW; \
+ } while (0)
+
+//
+// Note: we only restore the guard page if we did _not_ catch a C++ exception, since a SO exception is a SEH
+// exception.
+//
+// We also need to restore the SO tolerance state, including restoring the cookie for the current stack guard.
+//
+// For VM code EX_CATCH calls CLREXception::HandleState::SetupCatch().
+// When Stack guards are disabled we will tear down the process in
+// CLREXception::HandleState::SetupCatch() if there is a StackOverflow.
+// So we should not reach EX_ENDTRY when there is StackOverflow.
+// This change cannot be done in ex.h as for all other code
+// CLREXception::HandleState::SetupCatch() is not called rather
+// EXception::HandleState::SetupCatch() is called which is a nop.
+//
+#undef EX_ENDTRY
+#define EX_ENDTRY \
+ PAL_CPP_ENDTRY \
+ SO_INFRASTRUCTURE_CODE(if (__state.DidCatch()) { RESTORE_SO_TOLERANCE_STATE; }) \
+ SO_INFRASTRUCTURE_CODE(if (__state.DidCatchSO()) { HANDLE_STACKOVERFLOW_AFTER_CATCH; }) \
+ NO_SO_INFRASTRUCTURE_CODE_ASSERTE(!__state.DidCatchSO()) \
+
+
+// CLRException::GetErrorInfo below invokes GetComIPFromObjectRef
+// that invokes ObjHeader::GetSyncBlock which has the INJECT_FAULT contract.
+//
+// This EX_CATCH_HRESULT implementation can be used in functions
+// that have FORBID_FAULT contracts.
+//
+// However, failure due to OOM (or any other potential exception) in GetErrorInfo
+// implies that we couldnt get the interface pointer from the objectRef and would be
+// returned NULL.
+//
+// Thus, the scoped use of FAULT_NOT_FATAL macro.
+#undef EX_CATCH_HRESULT
+#define EX_CATCH_HRESULT(_hr) \
+ EX_CATCH \
+ { \
+ (_hr) = GET_EXCEPTION()->GetHR(); \
+ { \
+ FAULT_NOT_FATAL(); \
+ HRESULT hrErrorInfo = GET_EXCEPTION()->SetErrorInfo(); \
+ if (FAILED(hrErrorInfo)) \
+ { \
+ (_hr) = hrErrorInfo; \
+ } \
+ } \
+ _ASSERTE(FAILED(_hr)); \
+ } \
+ EX_END_CATCH(SwallowAllExceptions)
+
+#endif // !DACCESS_COMPILE && !CROSSGEN_COMPILE
+
+// When collecting dumps, we need to ignore errors unless the user cancels.
+#define EX_CATCH_RETHROW_ONLY_COR_E_OPERATIONCANCELLED \
+ EX_CATCH \
+ { \
+ /* Swallow the exception and keep going unless COR_E_OPERATIONCANCELED */ \
+ /* was thrown. Used generating dumps, where rethrow will cancel dump. */ \
+ } \
+ EX_END_CATCH(RethrowCancelExceptions)
+
+// Only use this version to wrap single source lines, or it makes debugging painful.
+#define CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED(sourceCode) \
+ EX_TRY \
+ { \
+ sourceCode \
+ } \
+ EX_CATCH_RETHROW_ONLY_COR_E_OPERATIONCANCELLED
+
+
+//==============================================================================
+// High-level macros for common uses of EX_TRY. Try using these rather
+// than the raw EX_TRY constructs.
+//==============================================================================
+
+//===================================================================================
+// Macro for defining external entrypoints such as COM interop boundaries.
+// The boundary will catch all exceptions (including terminals) and convert
+// them into HR/IErrorInfo pairs as appropriate.
+//
+// Usage:
+//
+// HRESULT hr; ;; BEGIN will initialize HR
+// BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+// <do managed stuff> ;; this part will execute in cooperative GC mode
+// END_EXTERNAL_ENTRYPOINT
+// return hr;
+//
+// Comments:
+// The BEGIN macro will setup a Thread if necessary. It should only be called
+// in preemptive mode. If you are calling it from cooperative mode, this implies
+// we are executing "external" code in cooperative mode. The Reentrancy MDA
+// complains about this.
+//
+// Only use this macro for actual boundaries between CLR and
+// outside unmanaged code. If you want to connect internal pieces
+// of CLR code, use EX_TRY instead.
+//===================================================================================
+#ifdef MDA_SUPPORTED
+NOINLINE BOOL HasIllegalReentrancyRare();
+#define HAS_ILLEGAL_REENTRANCY() (NULL != MDA_GET_ASSISTANT(Reentrancy) && HasIllegalReentrancyRare())
+#else
+#define HAS_ILLEGAL_REENTRANCY() false
+#endif
+
+#define BEGIN_EXTERNAL_ENTRYPOINT(phresult) \
+ { \
+ HRESULT *__phr = (phresult); \
+ *__phr = S_OK; \
+ _ASSERTE(GetThread() == NULL || \
+ !GetThread()->PreemptiveGCDisabled()); \
+ if (HAS_ILLEGAL_REENTRANCY()) \
+ { \
+ *__phr = COR_E_ILLEGAL_REENTRANCY; \
+ } \
+ else \
+ if (!CanRunManagedCode()) \
+ { \
+ *__phr = E_PROCESS_SHUTDOWN_REENTRY; \
+ } \
+ else \
+ { \
+ MAKE_CURRENT_THREAD_AVAILABLE_EX(GetThreadNULLOk()); \
+ if (CURRENT_THREAD == NULL) \
+ { \
+ CURRENT_THREAD = SetupThreadNoThrow(__phr); \
+ } \
+ if (CURRENT_THREAD != NULL) \
+ { \
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(CURRENT_THREAD, *__phr = COR_E_STACKOVERFLOW); \
+ EX_TRY_THREAD(CURRENT_THREAD); \
+ { \
+
+#define END_EXTERNAL_ENTRYPOINT \
+ } \
+ EX_CATCH_HRESULT(*__phr); \
+ END_SO_INTOLERANT_CODE; \
+ } \
+ } \
+ } \
+
+// This macro should be used at the entry points (e.g. COM interop boundaries)
+// where CE's are not expected to get swallowed.
+#define END_EXTERNAL_ENTRYPOINT_RETHROW_CORRUPTING_EXCEPTIONS_EX(fCond) \
+ } \
+ EX_CATCH \
+ { \
+ *__phr = GET_EXCEPTION()->GetHR(); \
+ } \
+ EX_END_CATCH(RethrowCorruptingExceptionsEx(fCond)); \
+ END_SO_INTOLERANT_CODE; \
+ } \
+ } \
+ } \
+
+// This macro should be used at the entry points (e.g. COM interop boundaries)
+// where CE's are not expected to get swallowed.
+#define END_EXTERNAL_ENTRYPOINT_RETHROW_CORRUPTING_EXCEPTIONS \
+ END_EXTERNAL_ENTRYPOINT_RETHROW_CORRUPTING_EXCEPTIONS_EX(TRUE)
+
+
+
+//==============================================================================
+
+// ---------------------------------------------------------------------------
+// Inline implementations. Pay no attention to that man behind the curtain.
+// ---------------------------------------------------------------------------
+
+inline CLRException::CLRException()
+ : m_throwableHandle(NULL)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+inline void CLRException::SetThrowableHandle(OBJECTHANDLE throwable)
+{
+ STRESS_LOG1(LF_EH, LL_INFO100, "in CLRException::SetThrowableHandle: obj = %x\n", throwable);
+ m_throwableHandle = throwable;
+}
+
+inline EEException::EEException(RuntimeExceptionKind kind)
+ : m_kind(kind)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+inline EEException::EEException(HRESULT hr)
+ : m_kind(GetKindFromHR(hr))
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+inline EEMessageException::EEMessageException(HRESULT hr)
+ : EEException(GetKindFromHR(hr)),
+ m_hr(hr),
+ m_resID(0)
+{
+ WRAPPER_NO_CONTRACT;
+
+ m_arg1.Printf("%.8x", hr);
+}
+
+inline EEMessageException::EEMessageException(HRESULT hr, bool fUseCOMException)
+ : EEException(GetKindFromHR(hr, !fUseCOMException)),
+ m_hr(hr),
+ m_resID(0)
+{
+ WRAPPER_NO_CONTRACT;
+
+ m_arg1.Printf("%.8x", hr);
+}
+
+//-----------------------------------------------------------------------------
+// Constructor with lots of defaults (to 0 / null)
+// kind -- "clr kind" of the exception
+// resid -- resource id for message
+// strings -- substitution text for message
+inline EEMessageException::EEMessageException(RuntimeExceptionKind kind, UINT resID, LPCWSTR szArg1, LPCWSTR szArg2,
+ LPCWSTR szArg3, LPCWSTR szArg4, LPCWSTR szArg5, LPCWSTR szArg6)
+ : EEException(kind),
+ m_hr(EEException::GetHRFromKind(kind)),
+ m_resID(resID),
+ m_arg1(szArg1),
+ m_arg2(szArg2),
+ m_arg3(szArg3),
+ m_arg4(szArg4),
+ m_arg5(szArg5),
+ m_arg6(szArg6)
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+//-----------------------------------------------------------------------------
+// Constructor with lots of defaults (to 0 / null)
+// hr -- hresult that lead to this exception
+// resid -- resource id for message
+// strings -- substitution text for message
+inline EEMessageException::EEMessageException(HRESULT hr, UINT resID, LPCWSTR szArg1, LPCWSTR szArg2, LPCWSTR szArg3,
+ LPCWSTR szArg4, LPCWSTR szArg5, LPCWSTR szArg6)
+ : EEException(GetKindFromHR(hr)),
+ m_hr(hr),
+ m_resID(resID),
+ m_arg1(szArg1),
+ m_arg2(szArg2),
+ m_arg3(szArg3),
+ m_arg4(szArg4),
+ m_arg5(szArg5),
+ m_arg6(szArg6)
+{
+}
+
+//-----------------------------------------------------------------------------
+// Constructor with no defaults
+// kind -- "clr kind" of the exception
+// hr -- hresult that lead to this exception
+// resid -- resource id for message
+// strings -- substitution text for message
+inline EEMessageException::EEMessageException(RuntimeExceptionKind kind, HRESULT hr, UINT resID, LPCWSTR szArg1,
+ LPCWSTR szArg2, LPCWSTR szArg3, LPCWSTR szArg4, LPCWSTR szArg5,
+ LPCWSTR szArg6)
+ : EEException(kind),
+ m_hr(hr),
+ m_resID(resID),
+ m_arg1(szArg1),
+ m_arg2(szArg2),
+ m_arg3(szArg3),
+ m_arg4(szArg4),
+ m_arg5(szArg5),
+ m_arg6(szArg6)
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+
+inline EEResourceException::EEResourceException(RuntimeExceptionKind kind, const SString &resourceName)
+ : EEException(kind),
+ m_resourceName(resourceName)
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+
+inline EEFieldException::EEFieldException(FieldDesc *pField)
+ : EEException(kFieldAccessException),
+ m_pFD(pField),
+ m_pAccessingMD(NULL),
+ m_messageID(0)
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+inline EEFieldException::EEFieldException(FieldDesc *pField, MethodDesc *pAccessingMD, const SString &additionalContext, UINT messageID)
+ : EEException(kFieldAccessException),
+ m_pFD(pField),
+ m_pAccessingMD(pAccessingMD),
+ m_additionalContext(additionalContext),
+ m_messageID(messageID)
+{
+}
+
+inline EEMethodException::EEMethodException(MethodDesc *pMethod)
+ : EEException(kMethodAccessException),
+ m_pMD(pMethod),
+ m_pAccessingMD(NULL),
+ m_messageID(0)
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+inline EEMethodException::EEMethodException(MethodDesc *pMethod, MethodDesc *pAccessingMD, const SString &additionalContext, UINT messageID)
+ : EEException(kMethodAccessException),
+ m_pMD(pMethod),
+ m_pAccessingMD(pAccessingMD),
+ m_additionalContext(additionalContext),
+ m_messageID(messageID)
+{
+}
+
+inline EETypeAccessException::EETypeAccessException(MethodTable *pMT)
+ : EEException(kTypeAccessException),
+ m_pMT(pMT),
+ m_pAccessingMD(NULL),
+ m_messageID(0)
+{
+}
+
+inline EETypeAccessException::EETypeAccessException(MethodTable *pMT, MethodDesc *pAccessingMD, const SString &additionalContext, UINT messageID)
+ : EEException(kTypeAccessException),
+ m_pMT(pMT),
+ m_pAccessingMD(pAccessingMD),
+ m_additionalContext(additionalContext),
+ m_messageID(messageID)
+{
+}
+
+inline EEArgumentException::EEArgumentException(RuntimeExceptionKind reKind, LPCWSTR pArgName,
+ LPCWSTR wszResourceName)
+ : EEException(reKind),
+ m_argumentName(pArgName),
+ m_resourceName(wszResourceName)
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+
+class ObjrefException : public CLRException
+{
+ friend bool DebugIsEECxxExceptionPointer(void* pv);
+
+ public:
+
+ ObjrefException();
+ ObjrefException(OBJECTREF throwable);
+
+ private:
+ static const int c_type = 0x4F522020; // 'OR '
+
+ public:
+ // Dynamic type query for catchers
+ static int GetType() {LIMITED_METHOD_CONTRACT; return c_type; }
+ virtual int GetInstanceType() { LIMITED_METHOD_CONTRACT; return c_type; }
+ BOOL IsType(int type) { WRAPPER_NO_CONTRACT; return type == c_type || CLRException::IsType(type); }
+
+protected:
+ virtual Exception *CloneHelper()
+ {
+ WRAPPER_NO_CONTRACT;
+ return new ObjrefException();
+ }
+
+ virtual Exception *DomainBoundCloneHelper();
+};
+
+
+class CLRLastThrownObjectException : public CLRException
+{
+ friend bool DebugIsEECxxExceptionPointer(void* pv);
+
+ public:
+ CLRLastThrownObjectException();
+
+ private:
+ static const int c_type = 0x4C544F20; // 'LTO '
+
+ public:
+ // Dynamic type query for catchers
+ static int GetType() {LIMITED_METHOD_CONTRACT; return c_type; }
+ virtual int GetInstanceType() { LIMITED_METHOD_CONTRACT; return c_type; }
+ BOOL IsType(int type) { WRAPPER_NO_CONTRACT; return type == c_type || CLRException::IsType(type); }
+
+ #if defined(_DEBUG)
+ CLRLastThrownObjectException* Validate();
+ #endif // _DEBUG
+
+ protected:
+ virtual Exception *CloneHelper();
+
+ virtual Exception *DomainBoundCloneHelper();
+
+ virtual OBJECTREF CreateThrowable();
+};
+
+// Returns true if the HRESULT maps to the RuntimeExceptionKind (hr => kind).
+bool IsHRESULTForExceptionKind(HRESULT hr, RuntimeExceptionKind kind);
+
+#endif // _CLREX_H_
+
diff --git a/src/vm/clrex.inl b/src/vm/clrex.inl
new file mode 100644
index 0000000000..f4cfd23197
--- /dev/null
+++ b/src/vm/clrex.inl
@@ -0,0 +1,52 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+// ---------------------------------------------------------------------------
+// CLREx.h
+// ---------------------------------------------------------------------------
+
+
+#ifndef _CLREX_INL_
+#define _CLREX_INL_
+
+inline CLRException::HandlerState::HandlerState(Thread * pThread)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_CANNOT_TAKE_LOCK;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ m_pThread = pThread;
+ if (m_pThread == NULL)
+ {
+ m_pFrame = NULL;
+ m_fPreemptiveGCDisabled = FALSE;
+ }
+ else
+ {
+ m_pFrame = m_pThread->GetFrame();
+ m_fPreemptiveGCDisabled = m_pThread->PreemptiveGCDisabled();
+ }
+}
+
+inline CLRException::HandlerState::HandlerState(Thread * pThread, CLRException::HandlerState::NonNullThread dummy)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_CANNOT_TAKE_LOCK;
+ STATIC_CONTRACT_SO_TOLERANT;
+ _ASSERTE(pThread != NULL);
+
+ m_pThread = pThread;
+ m_pFrame = m_pThread->GetFrame();
+ m_fPreemptiveGCDisabled = m_pThread->PreemptiveGCDisabled();
+}
+
+
+#endif // _CLREX_INL_
diff --git a/src/vm/clrprivbinderappx.cpp b/src/vm/clrprivbinderappx.cpp
new file mode 100644
index 0000000000..6892aa20a7
--- /dev/null
+++ b/src/vm/clrprivbinderappx.cpp
@@ -0,0 +1,1058 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#include "common.h" // precompiled header
+#include "assemblyusagelogmanager.h"
+
+//=====================================================================================================================
+#include "clrprivbinderappx.h"
+//CLRPrivBinderAppX * CLRPrivBinderAppX::s_pSingleton = nullptr;
+SPTR_IMPL_INIT(CLRPrivBinderAppX, CLRPrivBinderAppX, s_pSingleton, nullptr);
+
+#ifndef DACCESS_COMPILE
+//=====================================================================================================================
+#include "appxutil.h"
+#include "clrprivbinderutil.h"
+#include "fusionlogging.h"
+#include "clrprivtypecachewinrt.h"
+#include "fusionp.h"
+
+using namespace CLRPrivBinderUtil;
+
+//=====================================================================================================================
+CLRPrivBinderAppX::CLRPrivBinderAppX(LPCWSTR * rgwzAltPath, UINT cAltPaths)
+ : m_MapReadLock(CrstCLRPrivBinderMaps,
+ static_cast<CrstFlags>(CRST_DEBUG_ONLY_CHECK_FORBID_SUSPEND_THREAD |
+ CRST_GC_NOTRIGGER_WHEN_TAKEN |
+ CRST_DEBUGGER_THREAD |
+ // FindAssemblyBySpec complicates matters, which needs to take the m_MapReadLock.
+ // But FindAssemblyBySpec cannot switch to preemptive mode, as that would trigger
+ // a GC. Since this is a leaf lock, and since it does not make any calls out of
+ // the runtime, this lock can be taken in cooperative mode if the locked scope is
+ // also marked as ForbidSuspend (since FindAssemblyBySpec can be called by
+ // the debugger and the profiler). TODO: it would be nice to be able to specify
+ // this flag for just the specific places where it is necessary rather than for
+ // the lock as a whole.
+ CRST_UNSAFE_ANYMODE)),
+ m_MapWriteLock(CrstCLRPrivBinderMapsAdd, CRST_DEFAULT),
+ m_cAltPaths(cAltPaths),
+ m_fCanUseNativeImages(TRUE),
+ m_pParentBinder(nullptr),
+ m_pFusionBinder(nullptr),
+ m_pWinRTBinder(nullptr),
+
+ // Note: the first CLRPrivBinderAppX object is created prior to runtime startup, so this code cannot call
+ // AppX::IsAppXDesignMode; however, FindAssemblyBySpec cannot call IsAppXDesignMode either because that would
+ // cause a GC_TRIGGERS violation for the GetAssemblyIfLoaded scenario. However this doesn't matter because
+ // the assembly map will be empty until at least the first call to BindAssemblyByName is made, at which point
+ // a call to IsAppXDesignMode can be made. Thus, we default to the most conversative setting and overwrite this
+ // value in BindAssemblyByName.
+ m_fusionBindingScope(CLRPrivBinderFusion::kBindingScope_FrameworkSubset)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Copy altpaths
+ if (cAltPaths > 0)
+ {
+ m_rgAltPathsHolder = new NewArrayHolder<WCHAR>[cAltPaths];
+ m_rgAltPaths = new WCHAR *[cAltPaths];
+
+ for (UINT iAltPath = 0; iAltPath < cAltPaths; iAltPath++)
+ {
+ size_t cchAltPath = wcslen(rgwzAltPath[iAltPath]);
+ m_rgAltPathsHolder[iAltPath] = m_rgAltPaths[iAltPath] = new WCHAR[cchAltPath + 1];
+ wcscpy_s(m_rgAltPaths[iAltPath], cchAltPath + 1, rgwzAltPath[iAltPath]);
+ }
+ }
+
+#ifdef FEATURE_FUSION
+ IfFailThrow(RuntimeCreateCachingILFingerprintFactory(&m_pFingerprintFactory));
+#endif
+}
+
+//=====================================================================================================================
+CLRPrivBinderFusion::BindingScope CLRPrivBinderAppX::GetFusionBindingScope()
+{
+ WRAPPER_NO_CONTRACT;
+
+ m_fusionBindingScope = (AppX::IsAppXDesignMode() || (m_pParentBinder != nullptr))
+ ? CLRPrivBinderFusion::kBindingScope_FrameworkAll
+ : CLRPrivBinderFusion::kBindingScope_FrameworkSubset;
+
+ return m_fusionBindingScope;
+}
+
+//=====================================================================================================================
+CLRPrivBinderAppX::~CLRPrivBinderAppX()
+{
+ WRAPPER_NO_CONTRACT;
+
+ m_NameToAssemblyMap.RemoveAll();
+ AssemblyUsageLogManager::UnRegisterBinderFromUsageLog((UINT_PTR)this);
+
+ clr::SafeRelease(m_pWinRTBinder);
+}
+
+//=====================================================================================================================
+CLRPrivBinderAppX *
+CLRPrivBinderAppX::GetOrCreateBinder()
+{
+ STANDARD_VM_CONTRACT;
+ HRESULT hr = S_OK;
+
+ if (s_pSingleton == nullptr)
+ {
+ ReleaseHolder<IAssemblyUsageLog> pNewUsageLog;
+ IfFailThrow(AssemblyUsageLogManager::GetUsageLogForContext(W("App"), AppX::GetHeadPackageMoniker(), &pNewUsageLog));
+
+ ReleaseHolder<CLRPrivBinderAppX> pBinder;
+ pBinder = clr::SafeAddRef(new CLRPrivBinderAppX(nullptr, 0));
+
+ pBinder->m_pFusionBinder = clr::SafeAddRef(new CLRPrivBinderFusion());
+
+ CLRPrivTypeCacheWinRT * pWinRtTypeCache = CLRPrivTypeCacheWinRT::GetOrCreateTypeCache();
+ pBinder->m_pWinRTBinder = clr::SafeAddRef(new CLRPrivBinderWinRT(
+ pBinder,
+ pWinRtTypeCache,
+ nullptr, // rgwzAltPath
+ 0, // cAltPaths
+ CLRPrivBinderWinRT::NamespaceResolutionKind_WindowsAPI,
+ TRUE // fCanUseNativeImages
+ ));
+
+ if (InterlockedCompareExchangeT<decltype(s_pSingleton)>(&s_pSingleton, pBinder, nullptr) == nullptr)
+ pBinder.SuppressRelease();
+
+ // Register binder with usagelog infrastructure.
+ UINT_PTR binderId;
+ IfFailThrow(pBinder->GetBinderID(&binderId));
+ IfFailThrow(AssemblyUsageLogManager::RegisterBinderWithUsageLog(binderId, pNewUsageLog));
+
+ // Create and register WinRT usage log
+ ReleaseHolder<IAssemblyUsageLog> pNewWinRTUsageLog;
+ IfFailThrow(AssemblyUsageLogManager::GetUsageLogForContext(W("WinRT"), AppX::GetHeadPackageMoniker(), &pNewWinRTUsageLog));
+
+ UINT_PTR winRTBinderId;
+ IfFailThrow(pBinder->m_pWinRTBinder->GetBinderID(&winRTBinderId));
+ IfFailThrow(AssemblyUsageLogManager::RegisterBinderWithUsageLog(winRTBinderId, pNewWinRTUsageLog));
+ }
+
+ return s_pSingleton;
+}
+
+//=====================================================================================================================
+// Used only for designer binding context
+CLRPrivBinderAppX * CLRPrivBinderAppX::CreateParentedBinder(
+ ICLRPrivBinder * pParentBinder,
+ CLRPrivTypeCacheWinRT * pWinRtTypeCache,
+ LPCWSTR * rgwzAltPath,
+ UINT cAltPaths,
+ BOOL fCanUseNativeImages)
+{
+ STANDARD_VM_CONTRACT;
+ HRESULT hr = S_OK;
+
+ ReleaseHolder<CLRPrivBinderAppX> pBinder;
+ pBinder = clr::SafeAddRef(new CLRPrivBinderAppX(rgwzAltPath, cAltPaths));
+
+ pBinder->m_pParentBinder = clr::SafeAddRef(pParentBinder);
+ pBinder->m_fCanUseNativeImages = fCanUseNativeImages;
+
+ // We want to share FusionBinder with pParentBinder (which bubbles up through the chain of binders to the global AppXBinder code:s_pSingleton)
+ // Ideally we would get the FusionBinder from pParentBinder (via casting to a new interface). It is much easier just to fetch it from
+ // the global AppX binder directly
+ pBinder->m_pFusionBinder = clr::SafeAddRef(s_pSingleton->GetFusionBinder());
+
+ if (cAltPaths > 0)
+ {
+ pBinder->m_pWinRTBinder = clr::SafeAddRef(new CLRPrivBinderWinRT(
+ pBinder,
+ pWinRtTypeCache,
+ rgwzAltPath,
+ cAltPaths,
+ CLRPrivBinderWinRT::NamespaceResolutionKind_WindowsAPI,
+ fCanUseNativeImages));
+ }
+
+ pBinder.SuppressRelease();
+ return pBinder;
+}
+
+//=====================================================================================================================
+HRESULT CLRPrivBinderAppX::BindAppXAssemblyByNameWorker(
+ IAssemblyName * pIAssemblyName,
+ DWORD dwAppXBindFlags,
+ CLRPrivAssemblyAppX ** ppAssembly)
+{
+ STANDARD_VM_CONTRACT;
+ HRESULT hr = S_OK;
+
+ fusion::logging::StatusScope logStatus(0, ID_FUSLOG_BINDING_STATUS_IMMERSIVE, &hr);
+
+ VALIDATE_ARG_RET(pIAssemblyName != nullptr);
+ VALIDATE_ARG_RET((dwAppXBindFlags & ABF_BindIL) == ABF_BindIL);
+ VALIDATE_ARG_RET(ppAssembly != nullptr);
+
+ DWORD dwContentType = AssemblyContentType_Default;
+ IfFailRet(hr = fusion::util::GetProperty(pIAssemblyName, ASM_NAME_CONTENT_TYPE, &dwContentType));
+ if ((hr == S_OK) && (dwContentType != AssemblyContentType_Default))
+ {
+ IfFailRet(CLR_E_BIND_UNRECOGNIZED_IDENTITY_FORMAT);
+ }
+
+ ReleaseHolder<CLRPrivAssemblyAppX> pAssembly;
+
+ // Get the simple name.
+ WCHAR wzSimpleName[_MAX_PATH];
+ DWORD cchSimpleName = _MAX_PATH;
+ IfFailRet(pIAssemblyName->GetName(&cchSimpleName, wzSimpleName));
+
+ { // Look for previous successful bind. Host callouts are now forbidden.
+ ForbidSuspendThreadCrstHolder lock(&m_MapReadLock);
+ pAssembly = clr::SafeAddRef(m_NameToAssemblyMap.Lookup(wzSimpleName));
+ }
+
+ if (pAssembly == nullptr)
+ {
+ ReleaseHolder<ICLRPrivResource> pResourceIL;
+ ReleaseHolder<ICLRPrivResource> pResourceNI;
+
+ // Create assembly identity using the simple name. For successful binds this will be updated
+ // with the full assembly identity in the VerifyBind callback.
+ NewHolder<AssemblyIdentity> pIdentity = new AssemblyIdentity();
+ IfFailRet(pIdentity->Initialize(wzSimpleName));
+
+ //
+ // Check the head package first to see if this matches an EXE, then check
+ // all packages to see if this matches a DLL.
+ //
+ WCHAR wzFilePath[_MAX_PATH];
+ {
+ hr = HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND);
+
+ if (FAILED(hr))
+ {
+ // Create simple name with .EXE extension
+ WCHAR wzSimpleFileName[_MAX_PATH];
+ wcscpy_s(wzSimpleFileName, NumItems(wzSimpleFileName), wzSimpleName);
+ wcscat_s(wzSimpleFileName, NumItems(wzSimpleFileName), W(".EXE"));
+
+ // Search for the file using AppX::FileFileInCurrentPackage helper.
+ UINT32 cchFilePath = NumItems(wzFilePath);
+ hr = AppX::FindFileInCurrentPackage(
+ wzSimpleFileName,
+ &cchFilePath,
+ wzFilePath,
+ PACKAGE_FILTER_CLR_DEFAULT,
+ (PCWSTR *)(void *)m_rgAltPaths,
+ m_cAltPaths,
+ m_pParentBinder != NULL ? AppX::FindFindInPackageFlags_SkipCurrentPackageGraph : AppX::FindFindInPackageFlags_None);
+ }
+
+ if (FAILED(hr))
+ {
+ // Create simple name with .DLL extension
+ WCHAR wzSimpleFileName[_MAX_PATH];
+ wcscpy_s(wzSimpleFileName, NumItems(wzSimpleFileName), wzSimpleName);
+ wcscat_s(wzSimpleFileName, NumItems(wzSimpleFileName), W(".DLL"));
+
+ // Search for the file using AppX::FileFileInCurrentPackage helper
+ UINT32 cchFilePath = NumItems(wzFilePath);
+ hr = AppX::FindFileInCurrentPackage(
+ wzSimpleFileName,
+ &cchFilePath,
+ wzFilePath,
+ PACKAGE_FILTER_CLR_DEFAULT,
+ (PCWSTR *)(void *)m_rgAltPaths,
+ m_cAltPaths,
+ m_pParentBinder != NULL ? AppX::FindFindInPackageFlags_SkipCurrentPackageGraph : AppX::FindFindInPackageFlags_None);
+ }
+
+ if (SUCCEEDED(hr))
+ {
+ fusion::logging::LogMessage(0, ID_FUSLOG_BINDING_STATUS_FOUND, wzFilePath);
+ }
+ else
+ {
+ // Cache the bind failure result before returning. Careful not to overwrite the bind result with the cache insertion result.
+ HRESULT hrResult = hr;
+ IfFailRet(CacheBindResult(pIdentity, hr));
+ if (hr == S_OK)
+ { // Cache now owns identity object lifetime.
+ pIdentity.SuppressRelease();
+ }
+ hr = hrResult;
+ }
+ IfFailRet(hr);
+ }
+
+ NewHolder<CLRPrivResourcePathImpl> pResourcePath = new CLRPrivResourcePathImpl(wzFilePath);
+ IfFailRet(pResourcePath->QueryInterface(__uuidof(ICLRPrivResource), (LPVOID*)&pResourceIL));
+ pResourcePath.SuppressRelease();
+
+ // Create an IBindResult and provide it to the new CLRPrivAssemblyAppX object.
+ ReleaseHolder<IBindResult> pIBindResult = ToInterface<IBindResult>(
+ new CLRPrivAssemblyBindResultWrapper(pIAssemblyName, wzFilePath, m_pFingerprintFactory));
+
+
+ // Create the new CLRPrivAssemblyAppX object.
+ NewHolder<CLRPrivAssemblyAppX> pAssemblyObj =
+ new CLRPrivAssemblyAppX(pIdentity, this, pResourceIL, pIBindResult);
+
+ //
+ // Check cache. If someone beat us then use it instead; otherwise add new ICLRPrivAssembly.
+ //
+ do
+ {
+ // Because the read lock must be taken within a ForbidSuspend region, use AddInPhases.
+ if (m_NameToAssemblyMap.CheckAddInPhases<ForbidSuspendThreadCrstHolder, CrstHolder>(
+ pAssemblyObj, m_MapReadLock, m_MapWriteLock, pAssemblyObj.GetValue()))
+ {
+ { // Careful not to allow the cache insertion result to overwrite the bind result.
+ HRESULT hrResult = hr;
+ IfFailRet(CacheBindResult(pIdentity, hr));
+ if (hr == S_OK)
+ { // Cache now owns identity object lifetime, but ~CLRPrivBinderAssembly
+ // can also remove the identity from the cache prior to cache deletion.
+ pIdentity.SuppressRelease();
+ }
+ hr = hrResult;
+ }
+
+ pAssembly = pAssemblyObj.Extract();
+ }
+ else
+ {
+ ForbidSuspendThreadCrstHolder lock(&m_MapReadLock);
+ pAssembly = clr::SafeAddRef(m_NameToAssemblyMap.Lookup(wzSimpleName));
+ }
+ }
+ while (pAssembly == nullptr); // Keep looping until we find the existing one, or add a new one
+ }
+
+ _ASSERTE(pAssembly != nullptr);
+
+ if (((dwAppXBindFlags & ABF_BindNI) == ABF_BindNI) &&
+ m_fCanUseNativeImages)
+ {
+ //
+ // Look to see if there's a native image available.
+ //
+
+ // Fire BindingNgenPhaseStart ETW event if enabled.
+ {
+ InlineSString<128> ssAssemblyName;
+ FireEtwBindingNgenPhaseStart(
+ (AppDomain::GetCurrentDomain()->GetId().m_dwId),
+ LOADCTX_TYPE_HOSTED,
+ ETWFieldUnused,
+ ETWLoaderLoadTypeNotAvailable,
+ NULL,
+ FusionBind::GetAssemblyNameDisplayName(pIAssemblyName, ssAssemblyName, ASM_DISPLAYF_FULL).GetUnicode(),
+ GetClrInstanceId());
+ }
+
+ ReleaseHolder<IBindResult> pIBindResultIL;
+ IfFailRet(pAssembly->GetIBindResult(&pIBindResultIL));
+ _ASSERTE(pIBindResultIL != nullptr);
+
+ NewArrayHolder<WCHAR> wzZapSet = DuplicateStringThrowing(g_pConfig->ZapSet());
+ NativeConfigData cfgData = {
+ wzZapSet,
+ PEFile::GetNativeImageConfigFlags()
+ };
+
+ IfFailRet(BindToNativeAssembly(
+ pIBindResultIL, &cfgData, static_cast<IBindContext*>(this), fusion::logging::GetCurrentFusionBindLog()));
+
+ // Ensure that the native image found above in BindToNativeAssembly is reported as existing in the CLRPrivAssembly object
+ if (hr == S_OK)
+ {
+ ReleaseHolder<ICLRPrivResource> pNIImageResource;
+ // This will make GetAvailableImageTypes return that a native image exists.
+ IfFailRet(pAssembly->GetImageResource(ASSEMBLY_IMAGE_TYPE_NATIVE, NULL, &pNIImageResource));
+#ifdef _DEBUG
+ DWORD dwImageTypes;
+
+ _ASSERTE(SUCCEEDED(pAssembly->GetAvailableImageTypes(&dwImageTypes)));
+ _ASSERTE((dwImageTypes & ASSEMBLY_IMAGE_TYPE_NATIVE) == ASSEMBLY_IMAGE_TYPE_NATIVE);
+#endif
+ }
+
+ // Fire BindingNgenPhaseEnd ETW event if enabled.
+ {
+ InlineSString<128> ssAssemblyName;
+ FireEtwBindingNgenPhaseEnd(
+ (AppDomain::GetCurrentDomain()->GetId().m_dwId),
+ LOADCTX_TYPE_HOSTED,
+ ETWFieldUnused,
+ ETWLoaderLoadTypeNotAvailable,
+ NULL,
+ FusionBind::GetAssemblyNameDisplayName(pIAssemblyName, ssAssemblyName, ASM_DISPLAYF_FULL).GetUnicode(),
+ GetClrInstanceId());
+ }
+
+ // BindToNativeAssembly can return S_FALSE, but this could be misleading.
+ if (hr == S_FALSE)
+ hr = S_OK;
+ }
+
+ if (SUCCEEDED(hr))
+ {
+ *ppAssembly = pAssembly.Extract();
+ }
+
+ return hr;
+}
+
+//=====================================================================================================================
+HRESULT CLRPrivBinderAppX::BindAppXAssemblyByName(
+ IAssemblyName * pIAssemblyName,
+ DWORD dwAppXBindFlags,
+ ICLRPrivAssembly ** ppPrivAssembly)
+{
+ STANDARD_VM_CONTRACT;
+ HRESULT hr = S_OK;
+
+ ReleaseHolder<CLRPrivAssemblyAppX> pAppXAssembly;
+ IfFailRet(BindAppXAssemblyByNameWorker(pIAssemblyName, dwAppXBindFlags, &pAppXAssembly));
+ IfFailRet(pAppXAssembly->QueryInterface(__uuidof(ICLRPrivAssembly), (LPVOID*)ppPrivAssembly));
+
+ return hr;
+}
+
+//=====================================================================================================================
+HRESULT CLRPrivBinderAppX::PreBindAppXAssemblyByName(
+ IAssemblyName * pIAssemblyName,
+ DWORD dwAppXBindFlags,
+ IBindResult ** ppIBindResult)
+{
+ STANDARD_VM_CONTRACT;
+ HRESULT hr = S_OK;
+
+ VALIDATE_ARG_RET(pIAssemblyName != nullptr);
+ VALIDATE_ARG_RET(ppIBindResult != nullptr);
+
+
+ ReleaseHolder<CLRPrivAssemblyAppX> pAppXAssembly;
+ IfFailRet(BindAppXAssemblyByNameWorker(pIAssemblyName, dwAppXBindFlags, &pAppXAssembly));
+ IfFailRet(pAppXAssembly->GetIBindResult(ppIBindResult));
+
+ return hr;
+}
+
+//=====================================================================================================================
+HRESULT CLRPrivBinderAppX::FindAssemblyBySpec(
+ LPVOID pvAppDomain,
+ LPVOID pvAssemblySpec,
+ HRESULT * pResult,
+ ICLRPrivAssembly ** ppAssembly)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END
+
+ HRESULT hr = S_OK;
+
+ AppDomain* pAppDomain = reinterpret_cast<AppDomain*>(pvAppDomain);
+ AssemblySpec* pAssemblySpec = reinterpret_cast<AssemblySpec*>(pvAssemblySpec);
+ VALIDATE_PTR_RET(pAppDomain);
+ VALIDATE_PTR_RET(pAssemblySpec);
+ VALIDATE_PTR_RET(pResult);
+ VALIDATE_PTR_RET(ppAssembly);
+
+ //
+ // Follow the same order as a bind.
+ //
+
+ hr = CLR_E_BIND_UNRECOGNIZED_IDENTITY_FORMAT;
+
+ if (FAILED(hr))
+ {
+ _ASSERTE(m_pFusionBinder != nullptr);
+ hr = m_pFusionBinder->FindFusionAssemblyBySpec(pAppDomain, pAssemblySpec, m_fusionBindingScope, pResult, ppAssembly);
+ }
+
+ // See comment in code:CLRPrivBinderAppX::BindAssemblyByName for explanation of this conditional.
+ if (hr == CLR_E_BIND_UNRECOGNIZED_IDENTITY_FORMAT)
+ {
+ if (FAILED(hr) && (m_pWinRTBinder != nullptr))
+ {
+ hr = m_pWinRTBinder->FindWinRTAssemblyBySpec(pAppDomain, pAssemblySpec, pResult, ppAssembly);
+ }
+
+ if (FAILED(hr))
+ {
+ AssemblyIdentity refId;
+ IfFailRet(refId.Initialize(pAssemblySpec));
+ bool fCheckParent = false;
+
+ { // Check for a previously-recorded bind. Host callouts are now forbidden.
+ ForbidSuspendThreadCrstHolder lock(&m_MapReadLock);
+ BindingRecordMap::element_t const * pKeyVal = m_BindingRecordMap.LookupPtr(&refId);
+
+ if (pKeyVal != nullptr)
+ {
+ //
+ // Previous bind occurred. If a failure result is cached then the binder would
+ // have tried the parent binder (if available) before returning.
+ //
+
+ AssemblyIdentity const & defId(*pKeyVal->Key());
+ BindingRecord const & record(pKeyVal->Value());
+
+ *ppAssembly = nullptr;
+ *pResult = record.hr;
+
+ if (SUCCEEDED(*pResult))
+ {
+ //
+ // Previous bind succeeded. Get the corresponding ICLRPrivAssembly.
+ //
+
+ // Check this binder for a match. Host callouts are now forbidden.
+ CLRPrivAssemblyAppX* pPrivAssembly = m_NameToAssemblyMap.Lookup(defId.Name);
+
+ if (pPrivAssembly == nullptr)
+ {
+ _ASSERTE_MSG(false, "Should never see success value and a null CLRPrivAssemblyAppX pointer.");
+ return (*pResult = E_UNEXPECTED);
+ }
+
+ _ASSERTE(pPrivAssembly->m_pIdentity != nullptr);
+ _ASSERTE(pPrivAssembly->m_pIdentity == &defId);
+
+ // Now check that the version and PKT values are compatible.
+ *pResult = CLRPrivBinderUtil::VerifyBind(refId, *pPrivAssembly->m_pIdentity);
+
+ if (SUCCEEDED(*pResult))
+ {
+ VERIFY(SUCCEEDED(pPrivAssembly->QueryInterface(__uuidof(ICLRPrivAssembly), (LPVOID*)ppAssembly)));
+ }
+
+ return S_OK;
+ }
+ else
+ {
+ //
+ // Previous bind failed. Check the parent binder (if available), but do it outside of this binder's lock.
+ //
+
+ fCheckParent = true;
+ }
+ }
+ else
+ {
+ //
+ // No previous bind occurred. Do not check the parent binder since this could result
+ // in an incorrect bind (if this binder would have bound to a different assembly).
+ //
+
+ return E_FAIL;
+ }
+ }
+
+ if (fCheckParent && m_pParentBinder != nullptr)
+ { // Check the parent (shared designer context) for a match.
+ hr = m_pParentBinder->FindAssemblyBySpec(pAppDomain, pAssemblySpec, pResult, ppAssembly);
+ }
+ }
+ }
+
+ // There are three possibilities upon exit:
+ // 1. Cache lookup failed, in which case FAILED(hr) == true
+ // 2. A binding failure was cached, in which case (1) == false && FAILED(*pResult) == true
+ // 3. A binding success was cached, in which case we must find an assembly:
+ // (1) == false && (2) == false && *ppAssembly != nullptr
+ _ASSERTE(FAILED(hr) || FAILED(*pResult) || *ppAssembly != nullptr);
+ return hr;
+}
+
+//=====================================================================================================================
+// Record the binding result to support cache-based lookups (using ICLRPrivCachedBinder::FindAssemblyBySpec).
+
+HRESULT CLRPrivBinderAppX::CacheBindResult(
+ AssemblyIdentity * pIdentity, // On success, will assume object lifetime ownership.
+ HRESULT hrResult)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ VALIDATE_PTR_RET(pIdentity);
+
+ // Initialize the binding record.
+ BindingRecord rec = { hrResult };
+ BindingRecordMap::element_t newEntry(pIdentity, rec);
+
+ // Because the read lock must be taken within a ForbidSusped region, use CheckAddInPhases.
+ if (m_BindingRecordMap.CheckAddInPhases<ForbidSuspendThreadCrstHolder, CrstHolder>(
+ newEntry, m_MapReadLock, m_MapWriteLock))
+ {
+ // Indicates that this identity object was cached.
+ // Caller relinquishes object ownership.
+ return S_OK;
+ }
+ else
+ {
+ // Pre-existing entry was found.
+
+#ifdef _DEBUG
+ ForbidSuspendThreadCrstHolder lock(&m_MapReadLock);
+ auto pExistingEntry = m_BindingRecordMap.LookupPtr(pIdentity);
+ if (pExistingEntry != nullptr)
+ {
+ // It's possible for racing threads to try to cache their results;
+ // just make sure that they got the same HRESULT.
+ _ASSERTE(pExistingEntry->Value().hr == rec.hr);
+ }
+#endif
+
+ // Indicates that previous entry existed, and this identity object was not cached.
+ // Caller retains object ownership.
+ hr = S_FALSE;
+ }
+
+ return hr;
+}
+
+//=====================================================================================================================
+// Implements code:ICLRPrivBinder::BindAssemblyByName
+
+HRESULT CLRPrivBinderAppX::BindAssemblyByName(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly ** ppAssembly)
+{
+ STANDARD_BIND_CONTRACT;
+ BinderHRESULT hr = S_OK;
+ ReleaseHolder<ICLRPrivAssembly> pResult;
+
+ VALIDATE_ARG_RET(pAssemblyName != nullptr && ppAssembly != nullptr);
+
+ EX_TRY
+ {
+ hr = CLR_E_BIND_UNRECOGNIZED_IDENTITY_FORMAT;
+
+ if (FAILED(hr))
+ {
+ _ASSERTE(m_pFusionBinder != nullptr);
+ hr = m_pFusionBinder->BindFusionAssemblyByName(pAssemblyName, GetFusionBindingScope(), &pResult);
+ }
+
+ //
+ // The fusion binder returns CLR_E_BIND_UNRECOGNIZED_IDENTITY_FORMAT only if it did not
+ // recognize pAssemblyName as a FX assembly. Only then should other binders be consulted
+ // (otherwise applications would be able to copy arbitrary FX assemblies into their AppX
+ // package and use them directly).
+ //
+
+ if (hr == CLR_E_BIND_UNRECOGNIZED_IDENTITY_FORMAT)
+ {
+ if (FAILED(hr) && (m_pWinRTBinder != nullptr))
+ {
+ hr = m_pWinRTBinder->BindWinRTAssemblyByName(pAssemblyName, &pResult);
+ }
+
+ if (FAILED(hr))
+ {
+ hr = BindAppXAssemblyByName(pAssemblyName, ABF_Default, &pResult);
+ }
+
+ if (FAILED(hr) && (m_pParentBinder != nullptr))
+ {
+ hr = m_pParentBinder->BindAssemblyByName(pAssemblyName, &pResult);
+ }
+
+ _ASSERTE(FAILED(hr) || pResult != nullptr);
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ // Return if either the bind or the bind cache fails.
+ IfFailRet(hr);
+
+ // Success.
+ *ppAssembly = pResult.Extract();
+ return hr;
+}
+
+//=====================================================================================================================
+// Implements code:IBindContext::PreBind
+HRESULT CLRPrivBinderAppX::PreBind(
+ IAssemblyName * pIAssemblyName,
+ DWORD dwPreBindFlags,
+ IBindResult ** ppIBindResult)
+{
+ STANDARD_BIND_CONTRACT;
+ BinderHRESULT hr = S_OK;
+
+ VALIDATE_ARG_RET((dwPreBindFlags & ~(PRE_BIND_APPLY_POLICY)) == 0);
+ VALIDATE_ARG_RET(pIAssemblyName != nullptr && ppIBindResult != nullptr);
+
+ // Assert that we are only working with binder that supports Native Images context bits.
+ _ASSERTE(m_fCanUseNativeImages);
+
+ EX_TRY
+ {
+ hr = CLR_E_BIND_UNRECOGNIZED_IDENTITY_FORMAT;
+
+ if (FAILED(hr))
+ {
+ hr = m_pFusionBinder->PreBindFusionAssemblyByName(pIAssemblyName, dwPreBindFlags, ppIBindResult);
+ }
+
+ if (FAILED(hr) && (m_pWinRTBinder != nullptr))
+ {
+ hr = m_pWinRTBinder->BindWinRTAssemblyByName(pIAssemblyName, ppIBindResult, TRUE);
+ }
+
+ if (FAILED(hr))
+ {
+ hr = PreBindAppXAssemblyByName(pIAssemblyName, ABF_BindIL, ppIBindResult);
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ if (FAILED(hr) && (m_pParentBinder != nullptr))
+ {
+ ReleaseHolder<IBindContext> pParentBindContext;
+ hr = m_pParentBinder->QueryInterface(__uuidof(IBindContext), (LPVOID *)&pParentBindContext);
+ if (SUCCEEDED(hr))
+ {
+ hr = pParentBindContext->PreBind(pIAssemblyName, dwPreBindFlags, ppIBindResult);
+ }
+ }
+
+ return hr;
+}
+
+//=====================================================================================================================
+UINT_PTR CLRPrivBinderAppX::GetBinderID()
+{
+ LIMITED_METHOD_CONTRACT;
+ return reinterpret_cast<UINT_PTR>(this);
+}
+
+//=====================================================================================================================
+// Implements code:ICLRPrivBinder::GetBinderID
+HRESULT CLRPrivBinderAppX::GetBinderID(
+ UINT_PTR *pBinderId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ *pBinderId = GetBinderID();
+ return S_OK;
+}
+
+//=====================================================================================================================
+// Implements code:IBindContext::IsDefaultContext
+HRESULT CLRPrivBinderAppX::IsDefaultContext()
+{
+ LIMITED_METHOD_CONTRACT;
+ return S_OK;
+}
+
+//=====================================================================================================================
+// Implements code:ICLRPrivWinRtTypeBinder::FindAssemblyForWinRtTypeIfLoaded
+// Finds Assembly * for type in AppDomain * if it is loaded.
+// Returns NULL if assembly is not loaded or type is not found.
+//
+void *
+CLRPrivBinderAppX::FindAssemblyForWinRtTypeIfLoaded(
+ void * pAppDomain,
+ LPCUTF8 szNamespace,
+ LPCUTF8 szClassName)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ void * pAssembly = nullptr;
+ if (m_pWinRTBinder != nullptr)
+ {
+ pAssembly = (void *)m_pWinRTBinder->FindAssemblyForTypeIfLoaded(
+ dac_cast<PTR_AppDomain>((AppDomain *)pAppDomain),
+ szNamespace,
+ szClassName);
+ }
+
+ if ((pAssembly == nullptr) && (m_pParentBinder != nullptr))
+ {
+ ReleaseHolder<ICLRPrivWinRtTypeBinder> pParentBinder =
+ ToInterface_NoThrow<ICLRPrivWinRtTypeBinder>(m_pParentBinder.GetValue());
+ // Parent binder should be another instance of code:CLRPrivBinderAppX class that implements the interface
+ _ASSERTE(pParentBinder != nullptr);
+
+ pAssembly = pParentBinder->FindAssemblyForWinRtTypeIfLoaded(
+ pAppDomain,
+ szNamespace,
+ szClassName);
+ }
+
+ return pAssembly;
+}
+
+//=====================================================================================================================
+CLRPrivAssemblyAppX::CLRPrivAssemblyAppX(
+ CLRPrivBinderUtil::AssemblyIdentity * pIdentity,
+ CLRPrivBinderAppX *pBinder,
+ ICLRPrivResource *pIResourceIL,
+ IBindResult * pIBindResult)
+ : m_pIdentity(pIdentity),
+ m_pBinder(nullptr),
+ m_pIResourceIL(nullptr),
+ m_pIResourceNI(nullptr),
+ m_pIBindResult(nullptr)
+{
+ STANDARD_VM_CONTRACT;
+
+ VALIDATE_PTR_THROW(pIdentity);
+ VALIDATE_PTR_THROW(pBinder);
+ VALIDATE_PTR_THROW(pIResourceIL);
+ VALIDATE_PTR_THROW(pIBindResult);
+
+ m_pBinder = clr::SafeAddRef(pBinder);
+ m_pIResourceIL = clr::SafeAddRef(pIResourceIL);
+ m_pIBindResult = clr::SafeAddRef(pIBindResult);
+}
+
+//=====================================================================================================================
+CLRPrivAssemblyAppX::~CLRPrivAssemblyAppX()
+{
+ LIMITED_METHOD_CONTRACT;
+ clr::SafeRelease(m_pIResourceNI);
+}
+
+//=====================================================================================================================
+LPCWSTR CLRPrivAssemblyAppX::GetSimpleName() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pIdentity->Name;
+}
+
+//=====================================================================================================================
+// Implements code:IUnknown::Release
+ULONG CLRPrivAssemblyAppX::Release()
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+ _ASSERTE(m_cRef > 0);
+
+ ULONG cRef;
+
+ {
+ // To achieve proper lifetime semantics, the name to assembly map elements' CLRPrivAssemblyAppX
+ // instances are not ref counted. We cannot allow discovery of the object via m_NameToAssemblyMap
+ // when the ref count is 0 (to prevent another thread to AddRef and Release it back to 0 in parallel).
+ // All uses of the map are guarded by the map lock, so we have to decrease the ref count under that
+ // lock (to avoid the chance that 2 threads are running Release to ref count 0 at once).
+ // Host callouts are now forbidden.
+ ForbidSuspendThreadCrstHolder lock(&m_pBinder->m_MapReadLock);
+
+ cRef = InterlockedDecrement(&m_cRef);
+ if (cRef == 0)
+ {
+ m_pBinder->m_NameToAssemblyMap.Remove(GetSimpleName());
+ m_pBinder->m_BindingRecordMap.Remove(m_pIdentity);
+ }
+ }
+
+ if (cRef == 0)
+ {
+ delete this;
+ }
+
+ return cRef;
+}
+
+//=====================================================================================================================
+// Implements code:ICLRPrivBinder::BindAssemblyByName
+HRESULT CLRPrivAssemblyAppX::BindAssemblyByName(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly ** ppAssembly)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return m_pBinder->BindAssemblyByName(
+ pAssemblyName,
+ ppAssembly);
+}
+
+//=====================================================================================================================
+// Implements code:ICLRPrivBinder::GetBinderID
+HRESULT CLRPrivAssemblyAppX::GetBinderID(
+ UINT_PTR *pBinderId)
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pBinder->GetBinderID(
+ pBinderId);
+}
+
+//=====================================================================================================================
+// Implements code:ICLRPrivAssembly::IsShareable
+HRESULT CLRPrivAssemblyAppX::IsShareable(
+ BOOL * pbIsShareable)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ VALIDATE_ARG_RET(pbIsShareable != nullptr);
+
+ *pbIsShareable = FALSE;
+ return S_OK;
+}
+
+//=====================================================================================================================
+// Implements code:ICLRPrivAssembly::GetAvailableImageTypes
+HRESULT CLRPrivAssemblyAppX::GetAvailableImageTypes(
+ LPDWORD pdwImageTypes)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ VALIDATE_ARG_RET(pdwImageTypes != nullptr);
+
+ *pdwImageTypes = 0;
+
+ if (m_pIResourceIL != nullptr)
+ *pdwImageTypes |= ASSEMBLY_IMAGE_TYPE_IL;
+
+ if (m_pIResourceNI != nullptr)
+ *pdwImageTypes |= ASSEMBLY_IMAGE_TYPE_NATIVE;
+
+ return S_OK;
+}
+
+//=====================================================================================================================
+static ICLRPrivResource* GetResourceForBindResult(
+ IBindResult * pIBindResult)
+{
+ STANDARD_VM_CONTRACT;
+ VALIDATE_ARG_THROW(pIBindResult != nullptr);
+
+ WCHAR wzPath[_MAX_PATH];
+ DWORD cchPath = NumItems(wzPath);
+ ReleaseHolder<IAssemblyLocation> pIAssemLoc;
+ IfFailThrow(pIBindResult->GetAssemblyLocation(&pIAssemLoc));
+ IfFailThrow(pIAssemLoc->GetPath(wzPath, &cchPath));
+ return ToInterface<ICLRPrivResource>(new CLRPrivResourcePathImpl(wzPath));
+}
+
+//=====================================================================================================================
+// Implements code:ICLRPrivAssembly::GetImageResource
+HRESULT CLRPrivAssemblyAppX::GetImageResource(
+ DWORD dwImageType,
+ DWORD * pdwImageType,
+ ICLRPrivResource ** ppIResource)
+{
+ STANDARD_BIND_CONTRACT;
+ HRESULT hr = S_OK;
+
+ VALIDATE_ARG_RET(ppIResource != nullptr && m_pIBindResult != nullptr);
+
+ EX_TRY
+ {
+ DWORD _dwImageType;
+ if (pdwImageType == nullptr)
+ pdwImageType = &_dwImageType;
+
+ if ((dwImageType & ASSEMBLY_IMAGE_TYPE_NATIVE) == ASSEMBLY_IMAGE_TYPE_NATIVE)
+ {
+ ReleaseHolder<IBindResult> pIBindResultNI;
+ if (m_pIResourceNI == nullptr)
+ {
+ if (SUCCEEDED(hr = m_pIBindResult->GetNativeImage(&pIBindResultNI, nullptr)) && pIBindResultNI != nullptr)
+ {
+ ReleaseHolder<ICLRPrivResource> pResourceNI = GetResourceForBindResult(pIBindResultNI);
+ if (InterlockedCompareExchangeT<ICLRPrivResource *>(&m_pIResourceNI, pResourceNI, nullptr) == nullptr)
+ pResourceNI.SuppressRelease();
+ }
+ else
+ {
+ IfFailGo(CLR_E_BIND_IMAGE_UNAVAILABLE);
+ }
+ }
+
+ *ppIResource = clr::SafeAddRef(m_pIResourceNI);
+ *pdwImageType = ASSEMBLY_IMAGE_TYPE_NATIVE;
+ }
+ else if ((dwImageType & ASSEMBLY_IMAGE_TYPE_IL) == ASSEMBLY_IMAGE_TYPE_IL)
+ {
+ *ppIResource = clr::SafeAddRef(m_pIResourceIL);
+ *pdwImageType = ASSEMBLY_IMAGE_TYPE_IL;
+ }
+ else
+ {
+ hr = CLR_E_BIND_IMAGE_UNAVAILABLE;
+ }
+
+ ErrExit:
+ ;
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+}
+
+
+//=====================================================================================================================
+// Implements code:ICLRPrivBinder::VerifyBind
+HRESULT CLRPrivBinderAppX::VerifyBind(
+ IAssemblyName *pAssemblyName,
+ ICLRPrivAssembly *pAssembly,
+ ICLRPrivAssemblyInfo *pAssemblyInfo)
+{
+ STANDARD_BIND_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ VALIDATE_ARG_RET(pAssemblyName!= nullptr && pAssemblyInfo != nullptr);
+
+ UINT_PTR binderID;
+ IfFailRet(pAssembly->GetBinderID(&binderID));
+
+ if (binderID != GetBinderID())
+ {
+ return pAssembly->VerifyBind(pAssemblyName, pAssembly, pAssemblyInfo);
+ }
+
+ return CLRPrivBinderUtil::VerifyBind(pAssemblyName, pAssemblyInfo);
+}
+
+//=====================================================================================================================
+/*static*/
+LPCWSTR CLRPrivBinderAppX::NameToAssemblyMapTraits::GetKey(CLRPrivAssemblyAppX *pAssemblyAppX)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERT(pAssemblyAppX != nullptr);
+ return pAssemblyAppX->GetSimpleName();
+}
+
+//=====================================================================================================================
+HRESULT CLRPrivAssemblyAppX::GetIBindResult(
+ IBindResult ** ppIBindResult)
+{
+ STANDARD_VM_CONTRACT;
+
+ VALIDATE_ARG_RET(ppIBindResult != nullptr);
+ VALIDATE_CONDITION(m_pIBindResult != nullptr, return E_UNEXPECTED);
+
+ *ppIBindResult = clr::SafeAddRef(m_pIBindResult);
+
+ return S_OK;
+}
+
+#endif // !DACCESS_COMPILE
diff --git a/src/vm/clrprivbinderappx.h b/src/vm/clrprivbinderappx.h
new file mode 100644
index 0000000000..1cc29d3ed2
--- /dev/null
+++ b/src/vm/clrprivbinderappx.h
@@ -0,0 +1,365 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#pragma once
+
+#include "holder.h"
+#include "internalunknownimpl.h"
+#include "shash.h"
+#include "fusion.h"
+#include "clrprivbinding.h"
+#include "clrprivruntimebinders.h"
+#include "clrprivbinderfusion.h"
+#include "clrprivbinderwinrt.h"
+
+//=====================================================================================================================
+// Forward declarations
+class CLRPrivBinderAppX;
+class CLRPrivAssemblyAppX;
+
+class DomainAssembly;
+
+// Forward declaration of helper class used in native image binding.
+class CLRPrivAssemblyAppX_NIWrapper;
+
+typedef DPTR(CLRPrivBinderAppX) PTR_CLRPrivBinderAppX;
+
+//=====================================================================================================================
+class CLRPrivBinderAppX :
+ public IUnknownCommon<ICLRPrivBinder, IBindContext, ICLRPrivWinRtTypeBinder>
+{
+ friend class CLRPrivAssemblyAppX;
+
+public:
+ //=============================================================================================
+ // ICLRPrivBinder methods
+
+ // Implements code:ICLRPrivBinder::BindAssemblyByName
+ STDMETHOD(BindAssemblyByName)(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly ** ppAssembly);
+
+ // Implements code:ICLRPrivBinder::VerifyBind
+ STDMETHOD(VerifyBind)(
+ IAssemblyName *pAssemblyName,
+ ICLRPrivAssembly *pAssembly,
+ ICLRPrivAssemblyInfo *pAssemblyInfo);
+
+ // Implements code:ICLRPrivBinder::GetBinderFlags
+ STDMETHOD(GetBinderFlags)(
+ DWORD *pBinderFlags)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (pBinderFlags == NULL)
+ return E_INVALIDARG;
+
+ *pBinderFlags = m_pParentBinder != NULL ? BINDER_DESIGNER_BINDING_CONTEXT : BINDER_NONE;
+ return S_OK;
+ }
+
+ // Implements code:ICLRPrivBinder::GetBinderID
+ STDMETHOD(GetBinderID)(
+ UINT_PTR *pBinderId);
+
+ STDMETHOD(FindAssemblyBySpec)(
+ LPVOID pvAppDomain,
+ LPVOID pvAssemblySpec,
+ HRESULT * pResult,
+ ICLRPrivAssembly ** ppAssembly);
+
+ //=============================================================================================
+ // ICLRPrivWinRtTypeBinder methods
+
+ // Implements code:ICLRPrivWinRtTypeBinder::FindAssemblyForWinRtTypeIfLoaded
+ STDMETHOD_(void *, FindAssemblyForWinRtTypeIfLoaded)(
+ void * pAppDomain,
+ LPCUTF8 szNamespace,
+ LPCUTF8 szClassName);
+
+ //=============================================================================================
+ // IBindContext methods
+
+ // Implements code:IBindContext::PreBind
+ STDMETHOD(PreBind)(
+ IAssemblyName *pIAssemblyName,
+ DWORD dwPreBindFlags,
+ IBindResult **ppIBindResult);
+
+ // Implements code:IBindContext::IsDefaultContext
+ STDMETHOD(IsDefaultContext)();
+
+ //=============================================================================================
+ // Class methods
+
+ //---------------------------------------------------------------------------------------------
+ static
+ CLRPrivBinderAppX * GetOrCreateBinder();
+
+ static
+ PTR_CLRPrivBinderAppX GetBinderOrNull()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return s_pSingleton;
+ }
+
+ static
+ CLRPrivBinderAppX * CreateParentedBinder(
+ ICLRPrivBinder * pParentBinder,
+ CLRPrivTypeCacheWinRT * pWinRtTypeCache,
+ LPCWSTR * rgwzAltPath,
+ UINT cAltPaths,
+ BOOL fCanUseNativeImages);
+
+ //---------------------------------------------------------------------------------------------
+ ~CLRPrivBinderAppX();
+
+ //---------------------------------------------------------------------------------------------
+ enum AppXBindFlags
+ {
+ ABF_BindIL = 1,
+ ABF_BindNI = 2,
+ ABF_Default = ABF_BindIL | ABF_BindNI,
+ };
+
+ //---------------------------------------------------------------------------------------------
+ CLRPrivBinderFusion * GetFusionBinder()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pFusionBinder;
+ }
+
+ PTR_CLRPrivBinderWinRT GetWinRtBinder()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pWinRTBinder;
+ }
+
+private:
+ //---------------------------------------------------------------------------------------------
+ // Binds within AppX packages only. BindAssemblyByName takes care of delegating to Fusion
+ // when needed.
+ HRESULT BindAppXAssemblyByNameWorker(
+ IAssemblyName * pIAssemblyName,
+ DWORD dwAppXBindFlags,
+ CLRPrivAssemblyAppX ** ppAssembly);
+
+ //---------------------------------------------------------------------------------------------
+ // Binds within AppX packages only. BindAssemblyByName takes care of delegating to Fusion
+ // when needed.
+ HRESULT BindAppXAssemblyByName(
+ IAssemblyName * pIAssemblyName,
+ DWORD dwAppXBindFlags,
+ ICLRPrivAssembly ** ppPrivAssembly);
+
+ //---------------------------------------------------------------------------------------------
+ // Binds within AppX packages only. PreBindAssemblyByName takes care of delegating to Fusion
+ // when needed.
+ HRESULT PreBindAppXAssemblyByName(
+ IAssemblyName * pIAssemblyName,
+ DWORD dwAppXBindFlags,
+ IBindResult ** ppIBindResult);
+
+ //---------------------------------------------------------------------------------------------
+ UINT_PTR GetBinderID();
+
+ //---------------------------------------------------------------------------------------------
+ CLRPrivBinderAppX(LPCWSTR *rgwzAltPath, UINT cAltPaths);
+
+ //---------------------------------------------------------------------------------------------
+ HRESULT CheckGetAppXRT();
+
+ //---------------------------------------------------------------------------------------------
+ HRESULT CacheBindResult(
+ CLRPrivBinderUtil::AssemblyIdentity * pIdentity,
+ HRESULT hrResult);
+
+ //---------------------------------------------------------------------------------------------
+ CLRPrivBinderFusion::BindingScope GetFusionBindingScope();
+
+ //---------------------------------------------------------------------------------------------
+ Crst m_MapReadLock;
+ Crst m_MapWriteLock;
+
+ //---------------------------------------------------------------------------------------------
+ // Identity to CLRPrivBinderAppX map
+ struct NameToAssemblyMapTraits : public StringSHashTraits<CLRPrivAssemblyAppX, WCHAR, CaseInsensitiveStringCompareHash<WCHAR> >
+ {
+ static LPCWSTR GetKey(CLRPrivAssemblyAppX *pAssemblyAppX);
+ };
+ typedef SHash<NameToAssemblyMapTraits> NameToAssemblyMap;
+
+ NameToAssemblyMap m_NameToAssemblyMap;
+
+ //---------------------------------------------------------------------------------------------
+ // Binding record map, used by cache lookup requests.
+ struct BindingRecord
+ {
+ // This stores the result of the original bind request.
+ HRESULT hr;
+ };
+
+ struct BindingRecordMapTraits : public MapSHashTraits<CLRPrivBinderUtil::AssemblyIdentity*, BindingRecord>
+ {
+ typedef MapSHashTraits<CLRPrivBinderUtil::AssemblyIdentity*, BindingRecord> base_t;
+ typedef base_t::element_t element_t;
+ typedef base_t::count_t count_t;
+ typedef base_t::key_t;
+
+ static count_t Hash(key_t k)
+ {
+ return HashiString(k->Name);
+ }
+
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ return SString::_wcsicmp(k1->Name, k2->Name) == 0;
+ }
+
+ static const bool s_DestructPerEntryCleanupAction = true;
+ static inline void OnDestructPerEntryCleanupAction(element_t const & e)
+ {
+ delete [] e.Key();
+ }
+ };
+ typedef SHash<BindingRecordMapTraits> BindingRecordMap;
+
+ BindingRecordMap m_BindingRecordMap;
+
+ //---------------------------------------------------------------------------------------------
+ NewArrayHolder< NewArrayHolder<WCHAR> > m_rgAltPathsHolder;
+ NewArrayHolder< WCHAR* > m_rgAltPaths;
+ UINT m_cAltPaths;
+
+#ifdef FEATURE_FUSION
+ BOOL m_fCanUseNativeImages;
+ ReleaseHolder<IILFingerprintFactory> m_pFingerprintFactory;
+#endif
+
+ //---------------------------------------------------------------------------------------------
+ // ParentBinder is set only in designer binding context (forms a chain of binders)
+ ReleaseHolder<ICLRPrivBinder> m_pParentBinder;
+
+ ReleaseHolder<CLRPrivBinderFusion> m_pFusionBinder;
+ PTR_CLRPrivBinderWinRT m_pWinRTBinder;
+
+ //---------------------------------------------------------------------------------------------
+ //static CLRPrivBinderAppX * s_pSingleton;
+ SPTR_DECL(CLRPrivBinderAppX, s_pSingleton);
+
+ //---------------------------------------------------------------------------------------------
+ // Cache the binding scope in the constructor so that there is no need to call into a WinRT
+ // API in a GC_NOTRIGGER scope later on.
+ CLRPrivBinderFusion::BindingScope m_fusionBindingScope;
+}; // class CLRPrivBinderAppX
+
+
+//=====================================================================================================================
+class CLRPrivAssemblyAppX :
+ public IUnknownCommon<ICLRPrivAssembly>
+{
+ friend class CLRPrivBinderAppX;
+
+public:
+ //---------------------------------------------------------------------------------------------
+ CLRPrivAssemblyAppX(
+ CLRPrivBinderUtil::AssemblyIdentity * pIdentity,
+ CLRPrivBinderAppX *pBinder,
+ ICLRPrivResource *pIResourceIL,
+ IBindResult * pIBindResult);
+
+ //---------------------------------------------------------------------------------------------
+ ~CLRPrivAssemblyAppX();
+
+ //---------------------------------------------------------------------------------------------
+ // Implements code:IUnknown::Release
+ STDMETHOD_(ULONG, Release)();
+
+ //---------------------------------------------------------------------------------------------
+ LPCWSTR GetSimpleName() const;
+
+ //=============================================================================================
+ // ICLRPrivBinder interface methods
+
+ // Implements code:ICLRPrivBinder::BindAssemblyByName
+ STDMETHOD(BindAssemblyByName)(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly ** ppAssembly);
+
+ // Implements code:ICLRPrivBinder::VerifyBind
+ STDMETHOD(VerifyBind)(
+ IAssemblyName *pAssemblyName,
+ ICLRPrivAssembly *pAssembly,
+ ICLRPrivAssemblyInfo *pAssemblyInfo)
+ {
+ STANDARD_BIND_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ VALIDATE_PTR_RET(pAssemblyName);
+ VALIDATE_PTR_RET(pAssembly);
+ VALIDATE_PTR_RET(pAssemblyInfo);
+
+ // Re-initialize the assembly identity with full identity contained in metadata.
+ IfFailRet(m_pIdentity->Initialize(pAssemblyInfo));
+
+ return m_pBinder->VerifyBind(pAssemblyName, pAssembly, pAssemblyInfo);
+ }
+
+ // Implements code:ICLRPrivBinder::GetBinderFlags
+ STDMETHOD(GetBinderFlags)(
+ DWORD *pBinderFlags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pBinder->GetBinderFlags(pBinderFlags);
+ }
+
+ // Implements code:ICLRPrivBinder::GetBinderID
+ STDMETHOD(GetBinderID)(
+ UINT_PTR *pBinderId);
+
+ // Implements code:ICLRPrivBinder::FindAssemblyBySpec
+ STDMETHOD(FindAssemblyBySpec)(
+ LPVOID pvAppDomain,
+ LPVOID pvAssemblySpec,
+ HRESULT * pResult,
+ ICLRPrivAssembly ** ppAssembly)
+ { STATIC_CONTRACT_WRAPPER; return m_pBinder->FindAssemblyBySpec(pvAppDomain, pvAssemblySpec, pResult, ppAssembly); }
+
+ //=============================================================================================
+ // ICLRPrivAssembly interface methods
+
+ // Implements code:ICLRPrivAssembly::IsShareable
+ STDMETHOD(IsShareable)(
+ BOOL * pbIsShareable);
+
+ // Implements code:ICLRPrivAssembly::GetAvailableImageTypes
+ STDMETHOD(GetAvailableImageTypes)(
+ LPDWORD pdwImageTypes);
+
+ // Implements code:ICLRPrivAssembly::GetImageResource
+ STDMETHOD(GetImageResource)(
+ DWORD dwImageType,
+ DWORD *pdwImageType,
+ ICLRPrivResource ** ppIResource);
+
+ //---------------------------------------------------------------------------------------------
+ HRESULT GetIBindResult(
+ IBindResult ** ppIBindResult);
+
+private:
+ CLRPrivBinderUtil::AssemblyIdentity * m_pIdentity;
+
+ ReleaseHolder<CLRPrivBinderAppX> m_pBinder;
+
+ ReleaseHolder<ICLRPrivResource> m_pIResourceIL;
+ // This cannot be a holder as there can be a race to assign to it.
+ ICLRPrivResource * m_pIResourceNI;
+
+ ReleaseHolder<IBindResult> m_pIBindResult;
+};
+
diff --git a/src/vm/clrprivbinderfusion.cpp b/src/vm/clrprivbinderfusion.cpp
new file mode 100644
index 0000000000..aedecde9c6
--- /dev/null
+++ b/src/vm/clrprivbinderfusion.cpp
@@ -0,0 +1,820 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#include "common.h" // precompiled header
+
+#ifndef DACCESS_COMPILE
+
+//=====================================================================================================================
+#include "assemblyspec.hpp"
+#include "corhdr.h"
+#include "domainfile.h"
+#include "fusion.h"
+#include "policy.h"
+#include "sstring.h"
+#include "stackingallocator.h"
+#include "threads.h"
+#include "clrprivbinderfusion.h"
+#include "clrprivbinderutil.h"
+#include "fusionlogging.h"
+
+using namespace CLRPrivBinderUtil;
+
+//=================================================================================================
+#define STDMETHOD_NOTIMPL(...) \
+ STDMETHOD(__VA_ARGS__) \
+ { \
+ WRAPPER_NO_CONTRACT; \
+ _ASSERTE_MSG(false, "Method not implemented."); \
+ return E_NOTIMPL; \
+ }
+
+//=================================================================================================
+static HRESULT PropagateOutStringArgument(
+ __in LPCSTR pszValue,
+ __out_ecount_opt(*pcchArg) LPWSTR pwzArg,
+ __in DWORD cchArg,
+ __out DWORD * pcchArg)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ VALIDATE_PTR_RET(pszValue);
+ VALIDATE_CONDITION((pwzArg == nullptr || cchArg > 0), return E_INVALIDARG);
+
+ HRESULT hr = S_OK;
+
+ if (pwzArg != nullptr)
+ {
+ DWORD cchWritten = WszMultiByteToWideChar(
+ CP_UTF8, 0 /*flags*/, pszValue, -1, pwzArg, cchArg);
+
+ if (cchWritten == 0)
+ {
+ hr = HRESULT_FROM_GetLastError();
+ }
+ else if (pcchArg != nullptr)
+ {
+ *pcchArg = cchWritten;
+ }
+ }
+
+ if (pcchArg != nullptr && (pwzArg == nullptr || hr == HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER)))
+ {
+ *pcchArg = WszMultiByteToWideChar(
+ CP_UTF8, 0 /*flags*/, pszValue, -1, nullptr, 0);
+
+ if (*pcchArg == 0)
+ {
+ hr = HRESULT_FROM_GetLastError();
+ }
+ }
+
+ return hr;
+}
+
+//=================================================================================================
+// This is needed to allow calls to IsAnyFrameworkAssembly in GC_NOTRIGGER/NO_FAULT regions (i.e.,
+// GC stack walking). CAssemblyName (which implements IAssemblyName in most other uses) allocates
+// during construction and so cannot be used in this scenario.
+
+class AssemblySpecAsIAssemblyName
+ : public IAssemblyName
+{
+public:
+ AssemblySpecAsIAssemblyName(
+ AssemblySpec * pSpec)
+ : m_pSpec(pSpec)
+ { LIMITED_METHOD_CONTRACT; }
+
+ //=============================================================================================
+ // IUnknown methods
+
+ // Not used by IsAnyFrameworkAssembly
+ STDMETHOD_(ULONG, AddRef())
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE_MSG(false, "Method not implemented.");
+ return E_NOTIMPL;
+ }
+
+ // Not used by IsAnyFrameworkAssembly
+ STDMETHOD_(ULONG, Release())
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE_MSG(false, "Method not implemented.");
+ return E_NOTIMPL;
+ }
+
+ // Not used by IsAnyFrameworkAssembly
+ STDMETHOD_NOTIMPL(QueryInterface(
+ REFIID riid,
+ void **ppvObject));
+
+ //=============================================================================================
+ // IAssemblyName methods
+
+ STDMETHOD_NOTIMPL(SetProperty(
+ DWORD PropertyId,
+ void const * pvProperty,
+ DWORD cbProperty));
+
+#define ASSURE_SUFFICIENT_BUFFER(SRCSIZE) \
+ do { \
+ if ((pvProperty == nullptr) || (*pcbProperty < SRCSIZE)) { \
+ *pcbProperty = SRCSIZE; \
+ return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER); \
+ } \
+ } while (false)
+
+ STDMETHOD(GetProperty)(
+ DWORD PropertyId,
+ LPVOID pvProperty,
+ LPDWORD pcbProperty)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ VALIDATE_PTR_RET(pcbProperty);
+ VALIDATE_CONDITION((pvProperty == nullptr) == (*pcbProperty == 0), return E_INVALIDARG);
+
+ HRESULT hr = S_OK;
+
+ switch (PropertyId)
+ {
+ case ASM_NAME_NAME:
+ return PropagateOutStringArgument(m_pSpec->GetName(), (LPWSTR) pvProperty,
+ *pcbProperty / sizeof(WCHAR), pcbProperty);
+
+ case ASM_NAME_MAJOR_VERSION:
+ ASSURE_SUFFICIENT_BUFFER(sizeof(USHORT));
+ *reinterpret_cast<USHORT*>(pvProperty) = m_pSpec->GetContext()->usMajorVersion;
+ *pcbProperty = sizeof(USHORT);
+ return S_OK;
+
+ case ASM_NAME_MINOR_VERSION:
+ ASSURE_SUFFICIENT_BUFFER(sizeof(USHORT));
+ *reinterpret_cast<USHORT*>(pvProperty) = m_pSpec->GetContext()->usMinorVersion;
+ *pcbProperty = sizeof(USHORT);
+ return S_OK;
+
+ case ASM_NAME_BUILD_NUMBER:
+ ASSURE_SUFFICIENT_BUFFER(sizeof(USHORT));
+ *reinterpret_cast<USHORT*>(pvProperty) = m_pSpec->GetContext()->usBuildNumber;
+ *pcbProperty = sizeof(USHORT);
+ return S_OK;
+
+ case ASM_NAME_REVISION_NUMBER:
+ ASSURE_SUFFICIENT_BUFFER(sizeof(USHORT));
+ *reinterpret_cast<USHORT*>(pvProperty) = m_pSpec->GetContext()->usRevisionNumber;
+ *pcbProperty = sizeof(USHORT);
+ return S_OK;
+
+ case ASM_NAME_CULTURE:
+ if (m_pSpec->GetContext()->szLocale == nullptr)
+ {
+ return FUSION_E_INVALID_NAME;
+ }
+ return PropagateOutStringArgument(m_pSpec->GetContext()->szLocale, (LPWSTR) pvProperty,
+ *pcbProperty / sizeof(WCHAR), pcbProperty);
+
+ case ASM_NAME_PUBLIC_KEY_TOKEN:
+ {
+ if (!m_pSpec->HasPublicKeyToken())
+ {
+ return FUSION_E_INVALID_NAME;
+ }
+
+ PBYTE pbSN;
+ DWORD cbSN;
+ m_pSpec->GetPublicKeyToken(&pbSN, &cbSN);
+ ASSURE_SUFFICIENT_BUFFER(cbSN);
+ memcpy_s(pvProperty, *pcbProperty, pbSN, cbSN);
+ *pcbProperty = cbSN;
+ }
+ return S_OK;
+
+ case ASM_NAME_RETARGET:
+ ASSURE_SUFFICIENT_BUFFER(sizeof(BOOL));
+ *reinterpret_cast<BOOL*>(pvProperty) = m_pSpec->IsRetargetable();
+ *pcbProperty = sizeof(BOOL);
+ return S_OK;
+
+ default:
+ _ASSERTE_MSG(false, "Unexpected property requested.");
+ return E_INVALIDARG;
+ }
+ }
+
+#undef ASSURE_SUFFICIENT_BUFFER
+
+ // Not used by IsAnyFrameworkAssembly
+ STDMETHOD_NOTIMPL(Finalize());
+
+ // Not used by IsAnyFrameworkAssembly
+ STDMETHOD_NOTIMPL(GetDisplayName(
+ __out_ecount_opt(*pccDisplayName) LPOLESTR szDisplayName,
+ __inout LPDWORD pccDisplayName,
+ DWORD dwDisplayFlags));
+
+ // Not used by IsAnyFrameworkAssembly
+ STDMETHOD_NOTIMPL(Reserved(
+ REFIID refIID,
+ IUnknown *pUnkReserved1,
+ IUnknown *pUnkReserved2,
+ LPCOLESTR szReserved,
+ LONGLONG llReserved,
+ LPVOID pvReserved,
+ DWORD cbReserved,
+ LPVOID *ppReserved));
+
+
+ STDMETHOD(GetName)(
+ __out LPDWORD lpcwBuffer,
+ __out_ecount_opt(*lpcwBuffer) WCHAR *pwzName)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ VALIDATE_PTR_RET(lpcwBuffer);
+ return PropagateOutStringArgument(
+ m_pSpec->GetName(), pwzName, *lpcwBuffer, lpcwBuffer);
+ }
+
+ STDMETHOD(GetVersion)(
+ LPDWORD pdwVersionHi,
+ LPDWORD pdwVersionLow)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ VALIDATE_PTR_RET(pdwVersionHi);
+ VALIDATE_PTR_RET(pdwVersionLow);
+
+ AssemblyMetaDataInternal * pAMDI = m_pSpec->GetContext();
+
+ *pdwVersionHi = MAKELONG(pAMDI->usMinorVersion, pAMDI->usMajorVersion);
+ *pdwVersionLow = MAKELONG(pAMDI->usRevisionNumber, pAMDI->usBuildNumber);
+
+ return S_OK;
+ }
+
+
+ // Exists exclusively to support fusion's IsSystem helper, which compares against 'mscorlib'.
+ STDMETHOD(IsEqual)(
+ IAssemblyName *pName,
+ DWORD dwCmpFlags)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ VALIDATE_PTR_RET(pName);
+
+ // This function is here just to support checks against the name 'mscorlib'.
+ if ((dwCmpFlags & ASM_CMPF_NAME) != ASM_CMPF_NAME)
+ {
+ return E_NOTIMPL;
+ }
+
+ DWORD cchName1 = 0;
+ WCHAR wzName1[_MAX_PATH];
+ IfFailRet(pName->GetName(&cchName1, wzName1));
+ _ASSERTE(SString::_wcsicmp(wzName1, W("mscorlib")) == 0);
+
+ WCHAR wzName2[_MAX_PATH];
+ DWORD cchName2 = WszMultiByteToWideChar(
+ CP_UTF8, 0 /*flags*/, m_pSpec->GetName(), -1, wzName2, (int) (sizeof(wzName2) / sizeof(wzName2[0])));
+
+ if (0 == cchName2)
+ {
+ _ASSERTE(HRESULT_FROM_GetLastError() != HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER));
+ return HRESULT_FROM_GetLastError();
+ }
+
+ if (cchName1 != cchName2)
+ {
+ return S_FALSE;
+ }
+
+ return SString::_wcsnicmp(wzName1, wzName2, cchName1) == 0
+ ? S_OK
+ : S_FALSE;
+ }
+
+ STDMETHOD_NOTIMPL(Clone(
+ IAssemblyName **pName));
+
+private:
+ AssemblySpec * m_pSpec;
+};
+
+//=====================================================================================================================
+HRESULT CLRPrivBinderFusion::FindFusionAssemblyBySpec(
+ LPVOID pvAppDomain,
+ LPVOID pvAssemblySpec,
+ BindingScope kBindingScope,
+ HRESULT * pResult,
+ ICLRPrivAssembly ** ppAssembly)
+{
+ LIMITED_METHOD_CONTRACT;;
+ HRESULT hr = S_OK;
+
+ AppDomain* pAppDomain = reinterpret_cast<AppDomain*>(pvAppDomain);
+ AssemblySpec* pAssemblySpec = reinterpret_cast<AssemblySpec*>(pvAssemblySpec);
+ VALIDATE_PTR_RET(pAppDomain);
+ VALIDATE_PTR_RET(pAssemblySpec);
+ VALIDATE_PTR_RET(pResult);
+ VALIDATE_PTR_RET(ppAssembly);
+
+ if (pAssemblySpec->IsContentType_WindowsRuntime())
+ {
+ return CLR_E_BIND_UNRECOGNIZED_IDENTITY_FORMAT;
+ }
+
+ BOOL fIsSupportedInAppX;
+ {
+ AssemblySpecAsIAssemblyName asName(pAssemblySpec);
+
+ if (Fusion::Util::IsAnyFrameworkAssembly(&asName, &fIsSupportedInAppX) != S_OK)
+ { // Not a framework assembly identity.
+ IfFailRet(CLR_E_BIND_UNRECOGNIZED_IDENTITY_FORMAT);
+ }
+ }
+
+ if (kBindingScope == kBindingScope_FrameworkSubset)
+ { // We should allow only some framework assemblies to load
+
+ // DevMode has to allow all FX assemblies, not just a subset - see code:PreBind for more info
+ {
+ // Disabling for now, as it causes too many violations.
+ //CONTRACT_VIOLATION(GCViolation | FaultViolation | ModeViolation);
+ //_ASSERTE(!AppX::IsAppXDesignMode());
+ }
+
+ if (!fIsSupportedInAppX)
+ { // Assembly is blocked for AppX, fail the load
+ *pResult = HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND);
+ *ppAssembly = nullptr;
+ return S_OK;
+ }
+ }
+
+ return FindAssemblyBySpec(pvAppDomain, pvAssemblySpec, pResult, ppAssembly);
+}
+
+//=====================================================================================================================
+static
+PEAssembly * FindCachedFile(AppDomain * pDomain, AssemblySpec * pSpec)
+{
+ // Look for cached bind result. Prefer a cached DomainAssembly, as it takes priority over a
+ // cached PEAssembly (which can be different from the one associated with the DomainAssembly).
+ DomainAssembly * pDomainAssembly = pDomain->FindCachedAssembly(pSpec, FALSE);
+ return (pDomainAssembly != nullptr)
+ ? (pDomainAssembly->GetFile())
+ : (pDomain->FindCachedFile(pSpec, FALSE));
+}
+
+//=====================================================================================================================
+// There is no need to create a separate binding record, since we can always just look in the AppDomain's
+// AssemblySpecBindingCache for an answer (which is precisely what this function does).
+
+HRESULT CLRPrivBinderFusion::FindAssemblyBySpec(
+ LPVOID pvAppDomain,
+ LPVOID pvAssemblySpec,
+ HRESULT * pResult,
+ ICLRPrivAssembly ** ppAssembly)
+{
+ LIMITED_METHOD_CONTRACT;;
+ HRESULT hr = S_OK;
+
+ AppDomain* pAppDomain = reinterpret_cast<AppDomain*>(pvAppDomain);
+ AssemblySpec* pAssemblySpec = reinterpret_cast<AssemblySpec*>(pvAssemblySpec);
+ VALIDATE_PTR_RET(pAppDomain);
+ VALIDATE_PTR_RET(pAssemblySpec);
+ VALIDATE_PTR_RET(pResult);
+ VALIDATE_PTR_RET(ppAssembly);
+
+ // For the Architecture property, canonicalize peMSIL to peNone (which are considered equivalent),
+ // to ensure consistent lookups in the AssemblySpecBindingCache for the CLRPrivBinderFusion binder.
+ if (pAssemblySpec->GetPEKIND() == peMSIL)
+ {
+ pAssemblySpec->SetPEKIND(peNone);
+ }
+
+ PEAssembly * pPEAssembly = FindCachedFile(pAppDomain, pAssemblySpec);
+ if (pPEAssembly == nullptr)
+ {
+ return E_FAIL;
+ }
+
+ // Could be racing with another thread that has just added the PEAssembly to the binding cache
+ // but not yet allocated and assigned a host assembly.
+ if (!pPEAssembly->HasHostAssembly())
+ {
+ return E_FAIL;
+ }
+
+ *pResult = S_OK;
+ *ppAssembly = clr::SafeAddRef(pPEAssembly->GetHostAssembly());
+
+ return S_OK;
+}
+
+//=====================================================================================================================
+HRESULT CLRPrivBinderFusion::BindAssemblyByNameWorker(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly ** ppAssembly)
+{
+ STANDARD_VM_CONTRACT;
+ PRECONDITION(CheckPointer(pAssemblyName));
+ PRECONDITION(CheckPointer(ppAssembly));
+
+ HRESULT hr = S_OK;
+
+ AppDomain * pCurDomain = AppDomain::GetCurrentDomain();
+ if (pCurDomain == nullptr)
+ ThrowHR(E_UNEXPECTED);
+
+ AssemblySpec prePolicySpec;
+ AssemblySpec postPolicySpec;
+
+ prePolicySpec.InitializeSpec(pAssemblyName);
+
+ // For the Architecture property, canonicalize peMSIL to peNone (which are considered equivalent),
+ // to ensure consistent lookups in the AssemblySpecBindingCache for the CLRPrivBinderFusion binder.
+ if (prePolicySpec.GetPEKIND() == peMSIL)
+ {
+ prePolicySpec.SetPEKIND(peNone);
+ }
+
+ AssemblySpec * pBindSpec = &prePolicySpec;
+ PEAssemblyHolder pPEAssembly = clr::SafeAddRef(FindCachedFile(pCurDomain, pBindSpec));
+
+ if (pPEAssembly == nullptr)
+ {
+ // Early on in domain setup there may not be a fusion context, so skip ApplyPolicy then.
+ _ASSERTE(pCurDomain->GetFusionContext() != nullptr || prePolicySpec.IsMscorlib());
+ if (pCurDomain->GetFusionContext() != nullptr)
+ {
+ ReleaseHolder<IAssemblyName> pPolicyAssemblyName;
+ DWORD dwPolicyApplied = 0;
+ ApplyPolicy(pAssemblyName, pCurDomain->GetFusionContext(), nullptr, &pPolicyAssemblyName, nullptr, nullptr, &dwPolicyApplied);
+
+ if (dwPolicyApplied != 0)
+ {
+ postPolicySpec.InitializeSpec(pPolicyAssemblyName);
+ pBindSpec = &postPolicySpec;
+ pPEAssembly = clr::SafeAddRef(FindCachedFile(pCurDomain, pBindSpec));
+ }
+ }
+
+ if (pPEAssembly == nullptr)
+ {
+ // Trigger a load.
+ pPEAssembly = pCurDomain->BindAssemblySpec(
+ pBindSpec, // AssemblySpec
+ TRUE, // ThrowOnFileNotFound
+ FALSE, // RaisePrebindEvents
+ nullptr, // CallerStackMark
+ nullptr, // AssemblyLoadSecurity
+ FALSE); // fUseHostBinderIfAvailable - to avoid infinite recursion
+ _ASSERTE(FindCachedFile(pCurDomain, pBindSpec) == pPEAssembly || pBindSpec->IsMscorlib());
+ }
+
+ // If a post-policy spec was used, add the pre-policy spec to the binding cache
+ // so that it can be found by FindAssemblyBySpec.
+ if (&prePolicySpec != pBindSpec)
+ {
+ // Failure to add simply means someone else beat us to it. In that case
+ // the FindCachedFile call below (after catch block) will update result
+ // to the cached value.
+ INDEBUG(BOOL fRes =) pCurDomain->AddFileToCache(&prePolicySpec, pPEAssembly, TRUE /* fAllowFailure */);
+ _ASSERTE(!fRes || prePolicySpec.IsMscorlib() || FindCachedFile(pCurDomain, &prePolicySpec) == pPEAssembly);
+ }
+
+ // Ensure that the assembly is discoverable through a consistent assembly name (the assembly def name of the assembly)
+ AssemblySpec specAssemblyDef;
+ specAssemblyDef.InitializeSpec(pPEAssembly);
+
+ // It is expected that all assemlbies found here will be unified assemblies, and therefore have a public key.
+ _ASSERTE(specAssemblyDef.IsStrongNamed());
+
+ // Convert public key into the format that matches the garaunteed cache in the AssemblySpecBindingCache ... see the extended logic
+ // in Module::GetAssemblyIfLoaded.
+ if (specAssemblyDef.IsStrongNamed() && specAssemblyDef.HasPublicKey())
+ {
+ specAssemblyDef.ConvertPublicKeyToToken();
+ }
+ pCurDomain->AddFileToCache(&specAssemblyDef, pPEAssembly, TRUE);
+ }
+
+ if (!pPEAssembly->HasHostAssembly())
+ {
+ // This can happen if we just loaded the PEAssembly with BindAssemblySpec above, or if the PEAssembly
+ // Was not loaded through this binder. (NGEN Case)
+
+ // Note: There can be multiple PEAssembly objects for the same file, however we have to create unique
+ // CLRPrivAssemblyFusion object, otherwise code:AppDomain::FindAssembly will not recognize the duplicates which
+ // will lead to creation of multiple code:DomainAssembly objects for the same file in the same AppDomain.
+
+ InlineSString<128> ssPEAssemblyName;
+ FusionBind::GetAssemblyNameDisplayName(pPEAssembly->GetFusionAssemblyName(), ssPEAssemblyName, ASM_DISPLAYF_FULL);
+ NewHolder<CLRPrivAssemblyFusion> pAssemblyObj = new CLRPrivAssemblyFusion(ssPEAssemblyName.GetUnicode(), this);
+
+ {
+ CrstHolder lock(&m_SetHostAssemblyLock);
+ if (!pPEAssembly->HasHostAssembly())
+ {
+ // Add the host assembly to the PEAssembly.
+ pPEAssembly->SetHostAssembly(pAssemblyObj.Extract());
+ }
+ }
+ }
+
+ // Trigger a load so that a DomainAssembly is associated with the ICLRPrivAssembly created above.
+ pPEAssembly = clr::SafeAddRef(pCurDomain->LoadDomainAssembly(pBindSpec, pPEAssembly, FILE_LOADED)->GetFile());
+
+ _ASSERTE(pPEAssembly != nullptr);
+ _ASSERTE(pPEAssembly->HasHostAssembly());
+ _ASSERTE(pCurDomain->FindAssembly(pPEAssembly->GetHostAssembly()) != nullptr);
+
+ fusion::logging::LogMessage(0, ID_FUSLOG_BINDING_STATUS_FOUND, pPEAssembly->GetPath().GetUnicode());
+
+ *ppAssembly = clr::SafeAddRef(pPEAssembly->GetHostAssembly());
+
+ return hr;
+}
+
+//=====================================================================================================================
+void CLRPrivBinderFusion::BindMscorlib(
+ PEAssembly * pPEAssembly)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef _DEBUG
+ NewArrayHolder<WCHAR> dbg_wszAssemblySimpleName;
+ _ASSERTE(SUCCEEDED(fusion::util::GetProperty(pPEAssembly->GetFusionAssemblyName(), ASM_NAME_NAME, &dbg_wszAssemblySimpleName)));
+
+ _ASSERTE(wcscmp(dbg_wszAssemblySimpleName, W("mscorlib")) == 0);
+#endif //_DEBUG
+
+ NewHolder<CLRPrivAssemblyFusion> pPrivAssembly = new CLRPrivAssemblyFusion(W("mscorlib"), this);
+
+ pPEAssembly->SetHostAssembly(pPrivAssembly.Extract());
+}
+
+//=====================================================================================================================
+HRESULT CLRPrivBinderFusion::BindFusionAssemblyByName(
+ IAssemblyName * pAssemblyName,
+ BindingScope kBindingScope,
+ ICLRPrivAssembly ** ppAssembly)
+{
+ STANDARD_VM_CONTRACT;
+ HRESULT hr = S_OK;
+
+ fusion::logging::StatusScope logStatus(0, ID_FUSLOG_BINDING_STATUS_FRAMEWORK, &hr);
+
+ DWORD dwContentType = AssemblyContentType_Default;
+ IfFailRet(fusion::util::GetProperty(pAssemblyName, ASM_NAME_CONTENT_TYPE, &dwContentType));
+ if ((hr == S_OK) && (dwContentType != AssemblyContentType_Default))
+ { // Not a NetFX content type.
+ IfFailRet(CLR_E_BIND_UNRECOGNIZED_IDENTITY_FORMAT);
+ }
+
+ BOOL fIsSupportedInAppX;
+ if (Fusion::Util::IsAnyFrameworkAssembly(pAssemblyName, &fIsSupportedInAppX) != S_OK)
+ { // Not a framework assembly identity.
+ IfFailRet(CLR_E_BIND_UNRECOGNIZED_IDENTITY_FORMAT);
+ }
+ if (kBindingScope == kBindingScope_FrameworkSubset)
+ { // We should allow only some framework assemblies to load
+
+ // DevMode has to allow all FX assemblies, not just a subset - see code:PreBind for more info
+ _ASSERTE(!AppX::IsAppXDesignMode());
+
+ if (!fIsSupportedInAppX)
+ { // Assembly is blocked for AppX, fail the load
+ fusion::logging::LogMessage(0, ID_FUSLOG_BINDING_STATUS_FX_ASSEMBLY_BLOCKED);
+
+ IfFailRet(HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND));
+ }
+ }
+
+ return (hr = BindAssemblyByNameWorker(pAssemblyName, ppAssembly));
+}
+
+//=====================================================================================================================
+// Implements code:ICLRPrivBinder::BindAssemblyByName
+HRESULT CLRPrivBinderFusion::BindAssemblyByName(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly ** ppAssembly)
+{
+ WRAPPER_NO_CONTRACT;
+ return BindAssemblyByNameWorker(
+ pAssemblyName,
+ ppAssembly);
+}
+
+//=====================================================================================================================
+// Implements code:ICLRPrivBinder::GetBinderID
+HRESULT CLRPrivBinderFusion::GetBinderID(
+ UINT_PTR *pBinderId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ *pBinderId = (UINT_PTR)this;
+ return S_OK;
+}
+
+//=====================================================================================================================
+// Implements code:IBindContext::PreBind
+HRESULT CLRPrivBinderFusion::PreBind(
+ IAssemblyName * pIAssemblyName,
+ DWORD dwPreBindFlags,
+ IBindResult ** ppIBindResult)
+{
+ STANDARD_BIND_CONTRACT;
+ PRECONDITION(CheckPointer(pIAssemblyName));
+ PRECONDITION(CheckPointer(ppIBindResult));
+
+ HRESULT hr = S_OK;
+
+ BOOL fIsSupportedInAppX;
+ if (Fusion::Util::IsAnyFrameworkAssembly(pIAssemblyName, &fIsSupportedInAppX) != S_OK)
+ { // Not a framework assembly identity.
+ return HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND);
+ }
+
+ EX_TRY
+ {
+ // Create new IL binding scope.
+ fusion::logging::BindingScope defaultScope(pIAssemblyName, FUSION_BIND_LOG_CATEGORY_DEFAULT);
+
+ // Ideally the caller would give us arg kBindingContext like in code:BindFusionAssemblyByName, so we can give the same answer.
+ // That is not easy, so we will make the decision here:
+ // - DevMode will allow all FX assemblies (that covers designer binding context scenario for designers that need to
+ // load WPF with ngen images for perf reasons).
+ // We know that the real bind via code:BindFusionAssemblyByName will succeed for the assemblies (because we are in DevMode).
+ // - Normal mode (non-DevMode) we will allow only subset of FX assemblies.
+ // It implies that designer binding context (used by debuggers) will not use ngen images for blocked FX assemblies
+ // (transitively). That is acceptable performance trade-off.
+ if (!AppX::IsAppXDesignMode())
+ { // We should allow only some framework assemblies to load
+ if (!fIsSupportedInAppX)
+ { // Assembly is blocked for AppX, fail the load
+ fusion::logging::LogMessage(0, ID_FUSLOG_BINDING_STATUS_FX_ASSEMBLY_BLOCKED);
+
+ hr = HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND);
+ }
+ }
+
+ if (SUCCEEDED(hr))
+ {
+ AppDomain * pDomain = AppDomain::GetCurrentDomain();
+ ReleaseHolder<IBindContext> pIBindContext;
+ if (SUCCEEDED(hr = GetBindContextFromApplicationContext(pDomain->CreateFusionContext(), &pIBindContext)))
+ {
+ hr = pIBindContext->PreBind(pIAssemblyName, dwPreBindFlags, ppIBindResult);
+ }
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+}
+
+//=====================================================================================================================
+HRESULT CLRPrivBinderFusion::PreBindFusionAssemblyByName(
+ IAssemblyName *pIAssemblyName,
+ DWORD dwPreBindFlags,
+ IBindResult **ppIBindResult)
+{
+ STANDARD_VM_CONTRACT;
+ HRESULT hr = S_OK;
+
+ DWORD dwContentType = AssemblyContentType_Default;
+ IfFailRet(fusion::util::GetProperty(pIAssemblyName, ASM_NAME_CONTENT_TYPE, &dwContentType));
+ if ((hr == S_OK) && (dwContentType != AssemblyContentType_Default))
+ { // Not a NetFX content type.
+ IfFailRet(CLR_E_BIND_UNRECOGNIZED_IDENTITY_FORMAT);
+ }
+
+ IfFailRet(PreBind(pIAssemblyName, dwPreBindFlags, ppIBindResult));
+ _ASSERTE(*ppIBindResult != nullptr);
+
+ if (*ppIBindResult == nullptr)
+ IfFailRet(E_UNEXPECTED);
+
+ return S_OK;
+}
+
+//=====================================================================================================================
+// Implements code:IBindContext::IsDefaultContext
+HRESULT CLRPrivBinderFusion::IsDefaultContext()
+{
+ STANDARD_BIND_CONTRACT;
+ return S_OK;
+}
+
+//=====================================================================================================================
+CLRPrivBinderFusion::~CLRPrivBinderFusion()
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+//=====================================================================================================================
+CLRPrivAssemblyFusion::CLRPrivAssemblyFusion(
+ LPCWSTR wszName,
+ CLRPrivBinderFusion * pBinder)
+ : m_pBinder(clr::SafeAddRef(pBinder)),
+ m_wszName(DuplicateStringThrowing(wszName))
+{
+ STANDARD_VM_CONTRACT;
+}
+
+//=====================================================================================================================
+LPCWSTR CLRPrivAssemblyFusion::GetName() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_wszName;
+}
+
+//=====================================================================================================================
+// Implements code:IUnknown::Release
+ULONG CLRPrivAssemblyFusion::Release()
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+ _ASSERTE(m_cRef > 0);
+
+ ULONG cRef = InterlockedDecrement(&m_cRef);
+
+ if (cRef == 0)
+ {
+ delete this;
+ }
+
+ return cRef;
+}
+
+//=====================================================================================================================
+// Implements code:ICLRPrivBinder::BindAssemblyByName
+HRESULT CLRPrivAssemblyFusion::BindAssemblyByName(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly ** ppAssembly)
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pBinder->BindAssemblyByName(
+ pAssemblyName,
+ ppAssembly);
+}
+
+//=====================================================================================================================
+// Implements code:ICLRPrivBinder::GetBinderID
+HRESULT CLRPrivAssemblyFusion::GetBinderID(
+ UINT_PTR *pBinderId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ *pBinderId = reinterpret_cast<UINT_PTR>(m_pBinder.GetValue());
+ return S_OK;
+}
+
+//=====================================================================================================================
+// Implements code:ICLRPrivAssembly::IsShareable
+HRESULT CLRPrivAssemblyFusion::IsShareable(
+ BOOL * pbIsShareable)
+{
+ LIMITED_METHOD_CONTRACT;
+ *pbIsShareable = TRUE; // These things are only used in the AppX scenario, where all fusion assemblies are unified, shareable assemblies.
+ return S_OK;
+}
+
+//=====================================================================================================================
+// Implements code:ICLRPrivAssembly::GetAvailableImageTypes
+HRESULT CLRPrivAssemblyFusion::GetAvailableImageTypes(
+ LPDWORD pdwImageTypes)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"CLRPrivAssemblyFusion::GetAvailableImageTypes");
+ return E_NOTIMPL;
+}
+
+//=====================================================================================================================
+// Implements code:ICLRPrivAssembly::GetImageResource
+HRESULT CLRPrivAssemblyFusion::GetImageResource(
+ DWORD dwImageType,
+ DWORD *pdwImageType,
+ ICLRPrivResource ** ppIResource)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"CLRPrivAssemblyFusion::GetImageResource");
+ return E_NOTIMPL;
+}
+
+#endif // !DACCESS_COMPILE
+
diff --git a/src/vm/clrprivbinderfusion.h b/src/vm/clrprivbinderfusion.h
new file mode 100644
index 0000000000..001a4900c1
--- /dev/null
+++ b/src/vm/clrprivbinderfusion.h
@@ -0,0 +1,229 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#pragma once
+
+#include "holder.h"
+#include "internalunknownimpl.h"
+#include "shash.h"
+#include "clrprivbinding.h"
+#include "clrprivruntimebinders.h"
+
+//=====================================================================================================================
+// Forward declarations
+class CLRPrivBinderFusion;
+class CLRPrivAssemblyFusion;
+
+class PEAssembly;
+class DomainAssembly;
+struct IMDInternalImport;
+
+//=====================================================================================================================
+class CLRPrivBinderFusion :
+ public IUnknownCommon<ICLRPrivBinder, IBindContext>
+{
+ friend class CLRPrivAssemblyFusion;
+
+public:
+ // Scope for the bind operation
+ enum BindingScope
+ {
+ // Binds only to subset of framework that is not on the black list (non-FX assemblies bindings are rejected)
+ kBindingScope_FrameworkSubset,
+ // Binds to all framework assemblies (incl. those on the black list) (non-FX assemblies bindings are rejected)
+ // Used by designer binding context and in DevMode
+ kBindingScope_FrameworkAll
+ };
+
+public:
+ //=============================================================================================
+ // ICLRPrivBinder methods
+
+ //---------------------------------------------------------------------------------------------
+ // Implements code:ICLRPrivBinder::BindAssemblyByName
+ STDMETHOD(BindAssemblyByName(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly ** ppAssembly));
+
+ //---------------------------------------------------------------------------------------------
+ // Implements code:ICLRPrivBinder::VerifyBind
+ STDMETHOD(VerifyBind)(
+ IAssemblyName *pAssemblyName,
+ ICLRPrivAssembly *pAssembly,
+ ICLRPrivAssemblyInfo *pAssemblyInfo)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (pAssemblyName == nullptr || pAssembly == nullptr || pAssemblyInfo == nullptr)
+ return E_INVALIDARG;
+ return S_OK;
+ }
+
+ //---------------------------------------------------------------------------------------------
+ // Implements code:ICLRPrivBinder::GetBinderFlags
+ STDMETHOD(GetBinderFlags)(
+ DWORD *pBinderFlags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ *pBinderFlags = BINDER_FINDASSEMBLYBYSPEC_REQUIRES_EXACT_MATCH;
+ return S_OK;
+ }
+
+ //---------------------------------------------------------------------------------------------
+ // Implements code:ICLRPrivBinder::GetBinderID
+ STDMETHOD(GetBinderID)(
+ UINT_PTR *pBinderId);
+
+ STDMETHOD(FindAssemblyBySpec)(
+ LPVOID pvAppDomain,
+ LPVOID pvAssemblySpec,
+ HRESULT * pResult,
+ ICLRPrivAssembly ** ppAssembly);
+
+ //=============================================================================================
+ // IBindContext methods
+
+ // Implements code:IBindContext::PreBind
+ STDMETHOD(PreBind)(
+ IAssemblyName *pIAssemblyName,
+ DWORD dwPreBindFlags,
+ IBindResult **ppIBindResult);
+
+ // Implements code:IBindContext::IsDefaultContext
+ STDMETHOD(IsDefaultContext)();
+
+ //=============================================================================================
+ // Class methods
+
+ //---------------------------------------------------------------------------------------------
+ CLRPrivBinderFusion()
+ : m_SetHostAssemblyLock(CrstLeafLock)
+ { STANDARD_VM_CONTRACT; }
+
+ //---------------------------------------------------------------------------------------------
+ ~CLRPrivBinderFusion();
+
+ //---------------------------------------------------------------------------------------------
+ HRESULT FindFusionAssemblyBySpec(
+ LPVOID pvAppDomain,
+ LPVOID pvAssemblySpec,
+ BindingScope kBindingScope,
+ HRESULT * pResult,
+ ICLRPrivAssembly ** ppAssembly);
+
+ //---------------------------------------------------------------------------------------------
+ HRESULT BindFusionAssemblyByName(
+ IAssemblyName * pAssemblyName,
+ BindingScope kBindingScope,
+ ICLRPrivAssembly ** ppAssembly);
+
+ //---------------------------------------------------------------------------------------------
+ HRESULT PreBindFusionAssemblyByName(
+ IAssemblyName * pIAssemblyName,
+ DWORD dwPreBindFlags,
+ IBindResult ** ppIBindResult);
+
+ //---------------------------------------------------------------------------------------------
+ // Binds mscorlib.dll
+ void BindMscorlib(
+ PEAssembly * pPEAssembly);
+
+private:
+ //---------------------------------------------------------------------------------------------
+ HRESULT BindAssemblyByNameWorker(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly ** ppAssembly);
+
+private:
+ //---------------------------------------------------------------------------------------------
+ // This lock is used to serialize assigning ICLRPrivAssembly instances to PEAssembly objects.
+ Crst m_SetHostAssemblyLock;
+
+}; // class CLRPrivBinderFusion
+
+//=====================================================================================================================
+class CLRPrivAssemblyFusion :
+ public IUnknownCommon<ICLRPrivAssembly>
+{
+public:
+ //---------------------------------------------------------------------------------------------
+ CLRPrivAssemblyFusion(
+ LPCWSTR wszName,
+ CLRPrivBinderFusion * pBinder);
+
+ //---------------------------------------------------------------------------------------------
+ LPCWSTR GetName() const;
+
+ //---------------------------------------------------------------------------------------------
+ // Implements code:IUnknown::Release
+ STDMETHOD_(ULONG, Release)();
+
+ //---------------------------------------------------------------------------------------------
+ // Implements code:ICLRPrivBinder::BindAssemblyByName
+ STDMETHOD(BindAssemblyByName)(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly ** ppAssembly);
+
+ //---------------------------------------------------------------------------------------------
+ // Implements code:ICLRPrivAssembly::IsShareable
+ STDMETHOD(IsShareable)(
+ BOOL * pbIsShareable);
+
+ //---------------------------------------------------------------------------------------------
+ // Implements code:ICLRPrivAssembly::GetAvailableImageTypes
+ STDMETHOD(GetAvailableImageTypes)(
+ LPDWORD pdwImageTypes);
+
+ //---------------------------------------------------------------------------------------------
+ // Implements code:ICLRPrivAssembly::GetImageResource
+ STDMETHOD(GetImageResource)(
+ DWORD dwImageType,
+ DWORD *pdwImageType,
+ ICLRPrivResource ** ppIResource);
+
+ //---------------------------------------------------------------------------------------------
+ // Implements code:ICLRPrivBinder::VerifyBind
+ STDMETHOD(VerifyBind)(
+ IAssemblyName *pAssemblyName,
+ ICLRPrivAssembly *pAssembly,
+ ICLRPrivAssemblyInfo *pAssemblyInfo)
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_pBinder->VerifyBind(pAssemblyName, pAssembly, pAssemblyInfo);
+ }
+
+ //---------------------------------------------------------------------------------------------
+ // Implements code:ICLRPrivBinder::GetBinderFlags
+ STDMETHOD(GetBinderFlags)(
+ DWORD *pBinderFlags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pBinder->GetBinderFlags(pBinderFlags);
+ }
+
+ //---------------------------------------------------------------------------------------------
+ // Implements code:ICLRPrivBinder::GetBinderID
+ STDMETHOD(GetBinderID)(
+ UINT_PTR *pBinderId);
+
+ //---------------------------------------------------------------------------------------------
+ // Implements code:ICLRPrivBinder::FindAssemblyBySpec
+ STDMETHOD(FindAssemblyBySpec)(
+ LPVOID pvAppDomain,
+ LPVOID pvAssemblySpec,
+ HRESULT * pResult,
+ ICLRPrivAssembly ** ppAssembly)
+ { STATIC_CONTRACT_WRAPPER; return m_pBinder->FindAssemblyBySpec(pvAppDomain, pvAssemblySpec, pResult, ppAssembly); }
+
+protected:
+ //---------------------------------------------------------------------------------------------
+ // The fusion binder. Need to keep it around as long as this object is around.
+ ReleaseHolder<CLRPrivBinderFusion> m_pBinder;
+
+ // Full display name of the assembly - used to avoid duplicate CLRPrivAssemblyFusion objects
+ NewArrayHolder<WCHAR> m_wszName;
+
+}; // class CLRPrivAssemblyFusion
diff --git a/src/vm/clrprivbinderloadfile.cpp b/src/vm/clrprivbinderloadfile.cpp
new file mode 100644
index 0000000000..d59f251755
--- /dev/null
+++ b/src/vm/clrprivbinderloadfile.cpp
@@ -0,0 +1,265 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#include "common.h" // precompiled header
+#include "clrprivbinderutil.h"
+#include "clrprivbinderloadfile.h"
+#include "clrprivbinderfusion.h"
+#include "clrprivbinderappx.h"
+#include "fusionlogging.h"
+
+using namespace CLRPrivBinderUtil;
+
+#ifndef DACCESS_COMPILE
+
+using namespace CLRPrivBinderUtil;
+using clr::SafeAddRef;
+using clr::SafeRelease;
+
+//=====================================================================================================================
+CLRPrivBinderLoadFile * CLRPrivBinderLoadFile::s_pSingleton = nullptr;
+
+
+//=====================================================================================================================
+CLRPrivBinderLoadFile * CLRPrivBinderLoadFile::GetOrCreateBinder()
+{
+ STANDARD_VM_CONTRACT;
+ HRESULT hr = S_OK;
+
+ if (s_pSingleton == nullptr)
+ {
+ ReleaseHolder<CLRPrivBinderLoadFile> pBinder = SafeAddRef(new CLRPrivBinderLoadFile());
+
+ CLRPrivBinderAppX * pAppXBinder = CLRPrivBinderAppX::GetOrCreateBinder();
+ CLRPrivBinderFusion * pFusionBinder = pAppXBinder->GetFusionBinder();
+
+ pBinder->m_pFrameworkBinder = SafeAddRef(pFusionBinder);
+ _ASSERTE(pBinder->m_pFrameworkBinder != nullptr);
+
+ if (InterlockedCompareExchangeT<decltype(s_pSingleton)>(&s_pSingleton, pBinder, nullptr) == nullptr)
+ pBinder.SuppressRelease();
+ }
+
+ return s_pSingleton;
+}
+
+//=====================================================================================================================
+CLRPrivBinderLoadFile::CLRPrivBinderLoadFile()
+{
+ STANDARD_VM_CONTRACT;
+}
+
+//=====================================================================================================================
+CLRPrivBinderLoadFile::~CLRPrivBinderLoadFile()
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+//=====================================================================================================================
+STDMETHODIMP CLRPrivBinderLoadFile::BindAssemblyExplicit(
+ PEImage* pImage,
+ IAssemblyName **ppAssemblyName,
+ ICLRPrivAssembly ** ppAssembly)
+{
+ STANDARD_BIND_CONTRACT;
+ PRECONDITION(AppDomain::GetCurrentDomain()->IsDefaultDomain());
+ VALIDATE_ARG_RET(pImage != nullptr);
+ VALIDATE_ARG_RET(ppAssemblyName != nullptr);
+ VALIDATE_ARG_RET(ppAssembly != nullptr);
+
+ HRESULT hr = S_OK;
+
+ fusion::logging::StatusScope logStatus(0, ID_FUSLOG_BINDING_STATUS_LOAD_FILE, &hr);
+
+ ReleaseHolder<IAssemblyName> pAssemblyName;
+ ReleaseHolder<ICLRPrivAssembly> pAssembly;
+
+ EX_TRY
+ {
+ // check if a framework assembly
+ {
+ AssemblySpec spec;
+ mdAssembly a;
+ IfFailThrow(pImage->GetMDImport()->GetAssemblyFromScope(&a));
+ spec.InitializeSpec(a, pImage->GetMDImport(), NULL, false);
+ IfFailThrow(spec.CreateFusionName(&pAssemblyName));
+ }
+
+ hr = IfTransientFailThrow(m_pFrameworkBinder->BindFusionAssemblyByName(
+ pAssemblyName,
+ CLRPrivBinderFusion::kBindingScope_FrameworkSubset,
+ &pAssembly));
+ if (FAILED(hr)) // not a Framework assembly
+ {
+ ReleaseHolder<CLRPrivResourcePathImpl> pPathResource =
+ clr::SafeAddRef(new CLRPrivResourcePathImpl(pImage->GetPath().GetUnicode()));
+ pAssembly = clr::SafeAddRef(new CLRPrivAssemblyLoadFile(this, m_pFrameworkBinder, pPathResource));
+
+ hr = S_OK;
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ if (SUCCEEDED(hr))
+ {
+ *ppAssemblyName = pAssemblyName.Extract();
+ *ppAssembly = pAssembly.Extract();
+ }
+
+ return hr;
+};
+
+//=====================================================================================================================
+STDMETHODIMP CLRPrivBinderLoadFile::BindAssemblyByName(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly ** ppAssembly)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(!"No parentless binds are allowed in NEITHER context");
+ return E_NOTIMPL;
+};
+
+//=====================================================================================================================
+STDMETHODIMP CLRPrivBinderLoadFile::VerifyBind(
+ IAssemblyName *pAssemblyName,
+ ICLRPrivAssembly *pAssembly,
+ ICLRPrivAssemblyInfo *pAssemblyInfo)
+{
+ WRAPPER_NO_CONTRACT;
+ // TODO: move ublic Key verification here, once we are willing to fully convert to hosted model
+ _ASSERTE(!"CLRPrivAssemblyLoadFile::VerifyBind");
+ return E_NOTIMPL; // we don't care about anything here...
+};
+
+//=====================================================================================================================
+HRESULT CLRPrivBinderLoadFile::GetBinderID(
+ UINT_PTR *pBinderId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ *pBinderId = (UINT_PTR)this;
+ return S_OK;
+}
+
+//==========================================================================
+CLRPrivAssemblyLoadFile::CLRPrivAssemblyLoadFile(
+ CLRPrivBinderLoadFile* pBinder,
+ CLRPrivBinderFusion* pFrameworkBinder,
+ CLRPrivBinderUtil::CLRPrivResourcePathImpl* pPathResource)
+ : m_pBinder(SafeAddRef(pBinder))
+ , m_pFrameworkBinder(SafeAddRef(pFrameworkBinder))
+ , m_pPathResource(SafeAddRef(pPathResource))
+{
+ STANDARD_VM_CONTRACT;
+ VALIDATE_ARG_THROW(pBinder != nullptr);
+ VALIDATE_ARG_THROW(pFrameworkBinder != nullptr);
+ VALIDATE_ARG_THROW(pPathResource != nullptr);
+}
+
+//=====================================================================================================================
+STDMETHODIMP CLRPrivAssemblyLoadFile::BindAssemblyByName(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly ** ppAssembly)
+{
+ STANDARD_BIND_CONTRACT;
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ hr = m_pFrameworkBinder->BindFusionAssemblyByName(
+ pAssemblyName,
+ CLRPrivBinderFusion::kBindingScope_FrameworkSubset,
+ ppAssembly);
+ if (FAILED(hr) && !Exception::IsTransient(hr)) // not a Framework assembly
+ {
+ *ppAssembly = RaiseAssemblyResolveEvent(pAssemblyName, this);
+ if (*ppAssembly == NULL)
+ {
+ hr = COR_E_FILENOTFOUND;
+ }
+ else
+ {
+ (*ppAssembly)->AddRef();
+ hr = S_OK;
+ }
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+};
+
+//=====================================================================================================================
+HRESULT CLRPrivAssemblyLoadFile::GetBinderID(
+ UINT_PTR *pBinderId)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pBinder->GetBinderID(pBinderId);
+}
+
+//=====================================================================================================================
+STDMETHODIMP CLRPrivAssemblyLoadFile::VerifyBind(
+ IAssemblyName *pAssemblyName,
+ ICLRPrivAssembly *pAssembly,
+ ICLRPrivAssemblyInfo *pAssemblyInfo)
+{
+ STANDARD_BIND_CONTRACT;
+ return S_OK;
+};
+
+//=====================================================================================================================
+HRESULT CLRPrivAssemblyLoadFile::IsShareable(
+ BOOL * pbIsShareable)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ VALIDATE_ARG_RET(pbIsShareable != nullptr);
+
+ *pbIsShareable = FALSE; // no sharing for loadfile
+ return S_OK;
+}
+
+//=====================================================================================================================
+HRESULT CLRPrivAssemblyLoadFile::GetAvailableImageTypes(
+ LPDWORD pdwImageTypes)
+{
+ LIMITED_METHOD_CONTRACT;
+ VALIDATE_ARG_RET(pdwImageTypes != nullptr);
+ PRECONDITION(m_pPathResource != nullptr);
+
+ *pdwImageTypes = ASSEMBLY_IMAGE_TYPE_IL;
+
+ return S_OK;
+}
+
+
+//=====================================================================================================================
+HRESULT CLRPrivAssemblyLoadFile::GetImageResource(
+ DWORD dwImageType,
+ DWORD *pdwImageType,
+ ICLRPrivResource ** ppIResource)
+{
+ LIMITED_METHOD_CONTRACT;
+ VALIDATE_ARG_RET(pdwImageType != nullptr);
+ VALIDATE_ARG_RET(ppIResource != nullptr);
+
+ *ppIResource = nullptr;
+
+ HRESULT hr = S_OK;
+ if ((dwImageType & ASSEMBLY_IMAGE_TYPE_NATIVE) == ASSEMBLY_IMAGE_TYPE_NATIVE)
+ {
+ return CLR_E_BIND_IMAGE_UNAVAILABLE;
+ }
+
+ *pdwImageType = ASSEMBLY_IMAGE_TYPE_IL;
+ *ppIResource = clr::SafeAddRef(m_pPathResource);
+
+ return S_OK;
+}
+
+#endif // !DACCESS_COMPILE
+
diff --git a/src/vm/clrprivbinderloadfile.h b/src/vm/clrprivbinderloadfile.h
new file mode 100644
index 0000000000..d93e3e6b6f
--- /dev/null
+++ b/src/vm/clrprivbinderloadfile.h
@@ -0,0 +1,149 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#pragma once
+
+#include "holder.h"
+#include "internalunknownimpl.h"
+#include "clrprivbinding.h"
+#include "clrprivruntimebinders.h"
+#include "clrprivbinderfusion.h"
+#include "clrprivbinderutil.h"
+
+class PEAssembly;
+
+//=====================================================================================================================
+class CLRPrivBinderLoadFile :
+ public IUnknownCommon<ICLRPrivBinder>
+{
+
+public:
+ //=============================================================================================
+ // ICLRPrivBinder methods
+
+ //---------------------------------------------------------------------------------------------
+ STDMETHOD(BindAssemblyByName)(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly ** ppAssembly);
+
+ //---------------------------------------------------------------------------------------------
+ STDMETHOD(VerifyBind)(
+ IAssemblyName *pAssemblyName,
+ ICLRPrivAssembly *pAssembly,
+ ICLRPrivAssemblyInfo *pAssemblyInfo);
+
+ //---------------------------------------------------------------------------------------------
+ STDMETHOD(GetBinderFlags)(
+ DWORD *pBinderFlags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ *pBinderFlags = BINDER_NONE;
+ return S_OK;
+ }
+
+ //---------------------------------------------------------------------------------------------
+ STDMETHOD(GetBinderID)(
+ UINT_PTR *pBinderId);
+
+ //---------------------------------------------------------------------------------------------
+ // FindAssemblyBySpec is not supported by this binder.
+ STDMETHOD(FindAssemblyBySpec)(
+ LPVOID pvAppDomain,
+ LPVOID pvAssemblySpec,
+ HRESULT * pResult,
+ ICLRPrivAssembly ** ppAssembly)
+ { STATIC_CONTRACT_WRAPPER; return E_FAIL; }
+
+ //=============================================================================================
+ // Class methods
+ //---------------------------------------------------------------------------------------------
+ STDMETHOD(BindAssemblyExplicit)(
+ PEImage* pImage,
+ IAssemblyName **ppAssemblyName,
+ ICLRPrivAssembly ** ppAssembly);
+
+ //---------------------------------------------------------------------------------------------
+ static
+ CLRPrivBinderLoadFile * GetOrCreateBinder();
+
+ //---------------------------------------------------------------------------------------------
+ ~CLRPrivBinderLoadFile();
+
+private:
+ //---------------------------------------------------------------------------------------------
+ CLRPrivBinderLoadFile();
+
+ //---------------------------------------------------------------------------------------------
+ ReleaseHolder<CLRPrivBinderFusion> m_pFrameworkBinder;
+
+ //---------------------------------------------------------------------------------------------
+ static CLRPrivBinderLoadFile * s_pSingleton;
+};
+
+class CLRPrivAssemblyLoadFile :
+ public IUnknownCommon<ICLRPrivAssembly>
+{
+protected:
+ ReleaseHolder<CLRPrivBinderLoadFile> m_pBinder;
+ ReleaseHolder<CLRPrivBinderFusion> m_pFrameworkBinder;
+ ReleaseHolder<CLRPrivBinderUtil::CLRPrivResourcePathImpl> m_pPathResource;
+
+public:
+ //---------------------------------------------------------------------------------------------
+ CLRPrivAssemblyLoadFile(
+ CLRPrivBinderLoadFile* pBinder,
+ CLRPrivBinderFusion* pFrameworkBinder,
+ CLRPrivBinderUtil::CLRPrivResourcePathImpl* pPathResource);
+
+ //=============================================================================================
+ // ICLRPrivAssembly methods
+
+ //---------------------------------------------------------------------------------------------
+ STDMETHOD(BindAssemblyByName)(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly ** ppAssembly);
+
+ //---------------------------------------------------------------------------------------------
+ STDMETHOD(VerifyBind)(
+ IAssemblyName *pAssemblyName,
+ ICLRPrivAssembly *pAssembly,
+ ICLRPrivAssemblyInfo *pAssemblyInfo);
+
+ //---------------------------------------------------------------------------------------------
+ STDMETHOD(GetBinderFlags)(
+ DWORD *pBinderFlags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pBinder->GetBinderFlags(pBinderFlags);
+ }
+
+ //---------------------------------------------------------------------------------------------
+ STDMETHOD(GetBinderID)(
+ UINT_PTR *pBinderId);
+
+ //---------------------------------------------------------------------------------------------
+ STDMETHOD(IsShareable)(
+ BOOL * pbIsShareable);
+
+ //---------------------------------------------------------------------------------------------
+ STDMETHOD(GetAvailableImageTypes)(
+ LPDWORD pdwImageTypes);
+
+ //---------------------------------------------------------------------------------------------
+ STDMETHOD(GetImageResource)(
+ DWORD dwImageType,
+ DWORD *pdwImageType,
+ ICLRPrivResource ** ppIResource);
+
+ //---------------------------------------------------------------------------------------------
+ STDMETHOD(FindAssemblyBySpec)(
+ LPVOID pvAppDomain,
+ LPVOID pvAssemblySpec,
+ HRESULT * pResult,
+ ICLRPrivAssembly ** ppAssembly)
+ { STATIC_CONTRACT_WRAPPER; return m_pBinder->FindAssemblyBySpec(pvAppDomain, pvAssemblySpec, pResult, ppAssembly); }
+};
diff --git a/src/vm/clrprivbinderreflectiononlywinrt.cpp b/src/vm/clrprivbinderreflectiononlywinrt.cpp
new file mode 100644
index 0000000000..2be2e77407
--- /dev/null
+++ b/src/vm/clrprivbinderreflectiononlywinrt.cpp
@@ -0,0 +1,498 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+// Contains the types that implement code:ICLRPrivBinder and code:ICLRPrivAssembly for WinRT ReflectionOnly (aka introspection) binding.
+//
+//=====================================================================================================================
+
+#include "common.h" // precompiled header
+
+#ifndef DACCESS_COMPILE
+#ifdef FEATURE_REFLECTION_ONLY_LOAD
+
+//=====================================================================================================================
+#include "sstring.h"
+#include "policy.h"
+#include "clrprivbinderreflectiononlywinrt.h"
+#include "appxutil.h"
+#include "clrprivbinderutil.h"
+#include "imprthelpers.h" // in fusion/inc
+
+#include <winstring.h>
+#include <typeresolution.h>
+
+using namespace CLRPrivBinderUtil;
+
+//=====================================================================================================================
+
+//=====================================================================================================================
+CLRPrivBinderReflectionOnlyWinRT::CLRPrivBinderReflectionOnlyWinRT(
+ CLRPrivTypeCacheReflectionOnlyWinRT * pTypeCache)
+ : m_MapsLock(CrstLeafLock, CRST_REENTRANCY) // Reentracy is needed for code:CLRPrivAssemblyReflectionOnlyWinRT::Release
+{
+ STANDARD_VM_CONTRACT;
+
+ // This binder is not supported in AppX scenario.
+ _ASSERTE(!AppX::IsAppXProcess());
+
+ _ASSERTE(pTypeCache != nullptr);
+ m_pTypeCache = clr::SafeAddRef(pTypeCache);
+}
+
+//=====================================================================================================================
+CLRPrivBinderReflectionOnlyWinRT::~CLRPrivBinderReflectionOnlyWinRT()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (m_pTypeCache != nullptr)
+ {
+ m_pTypeCache->Release();
+ }
+}
+
+//=====================================================================================================================
+HRESULT
+CLRPrivBinderReflectionOnlyWinRT::BindWinRtType_Internal(
+ LPCSTR szTypeNamespace,
+ LPCSTR szTypeClassName,
+ DomainAssembly * pParentAssembly,
+ CLRPrivAssemblyReflectionOnlyWinRT ** ppAssembly)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ VALIDATE_ARG_RET(ppAssembly != nullptr);
+
+ CLRPrivBinderUtil::WStringList * pFileNameList = nullptr;
+
+ StackSString ssTypeNamespace(SString::Utf8, szTypeNamespace);
+
+ GetFileNameListForNamespace(ssTypeNamespace.GetUnicode(), pParentAssembly, &pFileNameList);
+
+ if (pFileNameList == nullptr)
+ { // There are no files associated with the namespace
+ return CLR_E_BIND_TYPE_NOT_FOUND;
+ }
+
+ StackSString ssTypeName(ssTypeNamespace);
+ ssTypeName.Append(W('.'));
+ ssTypeName.AppendUTF8(szTypeClassName);
+
+ CLRPrivBinderUtil::WStringListElem * pFileNameElem = pFileNameList->GetHead();
+ while (pFileNameElem != nullptr)
+ {
+ const WCHAR * wszFileName = pFileNameElem->GetValue();
+ ReleaseHolder<CLRPrivAssemblyReflectionOnlyWinRT> pAssembly = FindOrCreateAssemblyByFileName(wszFileName);
+ _ASSERTE(pAssembly != NULL);
+
+ IfFailRet(hr = m_pTypeCache->ContainsType(pAssembly, ssTypeName.GetUnicode()));
+ if (hr == S_OK)
+ { // The type we are looking for has been found in this assembly
+ *ppAssembly = pAssembly.Extract();
+ return S_OK;
+ }
+ _ASSERTE(hr == S_FALSE);
+
+ // Try next file name for this namespace
+ pFileNameElem = CLRPrivBinderUtil::WStringList::GetNext(pFileNameElem);
+ }
+
+ // The type has not been found in any of the files from the type's namespace
+ return CLR_E_BIND_TYPE_NOT_FOUND;
+} // CLRPrivBinderReflectionOnlyWinRT::BindWinRtType_Internal
+
+//=====================================================================================================================
+HRESULT
+CLRPrivBinderReflectionOnlyWinRT::BindWinRtType(
+ LPCSTR szTypeNamespace,
+ LPCSTR szTypeClassName,
+ DomainAssembly * pParentAssembly,
+ ICLRPrivAssembly ** ppPrivAssembly)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ ReleaseHolder<CLRPrivAssemblyReflectionOnlyWinRT> pWinRTAssembly;
+ IfFailRet(BindWinRtType_Internal(szTypeNamespace, szTypeClassName, pParentAssembly, &pWinRTAssembly));
+ IfFailRet(pWinRTAssembly->QueryInterface(__uuidof(ICLRPrivAssembly), (LPVOID *)ppPrivAssembly));
+
+ return hr;
+}
+
+//=====================================================================================================================
+// Implements interface method code:ICLRPrivBinder::BindAssemblyByName.
+//
+HRESULT CLRPrivBinderReflectionOnlyWinRT::BindAssemblyByName(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly ** ppAssembly)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE_MSG(false, "Unexpected call to CLRPrivBinderReflectionOnlyWinRT::BindAssemblyByName");
+ return E_UNEXPECTED;
+}
+
+//=====================================================================================================================
+ReleaseHolder<CLRPrivAssemblyReflectionOnlyWinRT>
+CLRPrivBinderReflectionOnlyWinRT::FindAssemblyByFileName(
+ LPCWSTR wszFileName)
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+
+ CrstHolder lock(&m_MapsLock);
+ const FileNameToAssemblyMapEntry * pEntry = m_FileNameToAssemblyMap.LookupPtr(wszFileName);
+ return (pEntry == nullptr) ? nullptr : clr::SafeAddRef(pEntry->m_pAssembly);
+}
+
+//=====================================================================================================================
+// Add FileName -> CLRPrivAssemblyReflectionOnlyWinRT * mapping to the map (multi-thread safe).
+ReleaseHolder<CLRPrivAssemblyReflectionOnlyWinRT>
+CLRPrivBinderReflectionOnlyWinRT::AddFileNameToAssemblyMapping(
+ LPCWSTR wszFileName,
+ CLRPrivAssemblyReflectionOnlyWinRT * pAssembly)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(pAssembly != nullptr);
+
+ CrstHolder lock(&m_MapsLock);
+
+ const FileNameToAssemblyMapEntry * pEntry = m_FileNameToAssemblyMap.LookupPtr(wszFileName);
+ CLRPrivAssemblyReflectionOnlyWinRT * pResultAssembly = nullptr;
+ if (pEntry != nullptr)
+ {
+ pResultAssembly = pEntry->m_pAssembly;
+ }
+ else
+ {
+ FileNameToAssemblyMapEntry e;
+ e.m_wszFileName = wszFileName;
+ e.m_pAssembly = pAssembly;
+ m_FileNameToAssemblyMap.Add(e);
+
+ pResultAssembly = pAssembly;
+ }
+ return clr::SafeAddRef(pResultAssembly);
+}
+
+//=====================================================================================================================
+void
+CLRPrivBinderReflectionOnlyWinRT::RemoveFileNameToAssemblyMapping(
+ LPCWSTR wszFileName)
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+
+ CrstHolder lock(&m_MapsLock);
+ m_FileNameToAssemblyMap.Remove(wszFileName);
+}
+
+//=====================================================================================================================
+ReleaseHolder<CLRPrivAssemblyReflectionOnlyWinRT>
+CLRPrivBinderReflectionOnlyWinRT::FindOrCreateAssemblyByFileName(
+ LPCWSTR wszFileName)
+{
+ STANDARD_VM_CONTRACT;
+
+ ReleaseHolder<CLRPrivAssemblyReflectionOnlyWinRT> pAssembly = FindAssemblyByFileName(wszFileName);
+
+ if (pAssembly == nullptr)
+ {
+ NewHolder<CLRPrivResourcePathImpl> pResource(
+ new CLRPrivResourcePathImpl(wszFileName));
+
+ NewHolder<CLRPrivAssemblyReflectionOnlyWinRT> pNewAssembly(
+ new CLRPrivAssemblyReflectionOnlyWinRT(wszFileName, this, pResource));
+
+ // pNewAssembly holds reference to this now
+ pResource.SuppressRelease();
+
+ // Add the assembly into cache (multi-thread aware)
+ pAssembly = AddFileNameToAssemblyMapping(pResource->GetPath(), pNewAssembly);
+
+ if (pAssembly == pNewAssembly)
+ { // We did not find an existing assembly in the cache and are using the newly created pNewAssembly.
+ // Stop it from being deleted when we go out of scope.
+ pNewAssembly.SuppressRelease();
+ }
+ }
+ return pAssembly.Extract();
+}
+
+//=====================================================================================================================
+// Returns list of file names from code:m_NamespaceToFileNameListMap for the namespace.
+//
+void
+CLRPrivBinderReflectionOnlyWinRT::GetFileNameListForNamespace(
+ LPCWSTR wszNamespace,
+ DomainAssembly * pParentAssembly,
+ CLRPrivBinderUtil::WStringList ** ppFileNameList)
+{
+ STANDARD_VM_CONTRACT;
+
+ CLRPrivBinderUtil::WStringList * pFileNameList = nullptr;
+ {
+ CrstHolder lock(&m_MapsLock);
+
+ const NamespaceToFileNameListMapEntry * pEntry = m_NamespaceToFileNameListMap.LookupPtr(wszNamespace);
+ if (pEntry != nullptr)
+ {
+ // Entries from the map are never removed, so we do not have to protect the file name list with a lock
+ pFileNameList = pEntry->m_pFileNameList;
+ }
+ }
+
+ if (pFileNameList != nullptr)
+ {
+ *ppFileNameList = pFileNameList;
+ }
+ else
+ {
+ CLRPrivBinderUtil::WStringListHolder hFileNameList;
+
+ EX_TRY
+ {
+ m_pTypeCache->RaiseNamespaceResolveEvent(wszNamespace, pParentAssembly, &hFileNameList);
+ }
+ EX_CATCH
+ {
+ Exception * ex = GET_EXCEPTION();
+ if (!ex->IsTransient())
+ { // Exception was caused by user code
+ // Cache empty file name list for this namespace
+ (void)AddFileNameListForNamespace(wszNamespace, nullptr, ppFileNameList);
+ }
+ EX_RETHROW;
+ }
+ EX_END_CATCH_UNREACHABLE
+
+ if (AddFileNameListForNamespace(wszNamespace, hFileNameList.GetValue(), ppFileNameList))
+ { // The file name list was added to the cache - do not delete it
+ _ASSERTE(*ppFileNameList == hFileNameList.GetValue());
+ (void)hFileNameList.Extract();
+ }
+ }
+} // CLRPrivBinderReflectionOnlyWinRT::GetFileNameListForNamespace
+
+//=====================================================================================================================
+// Adds (thread-safe) list of file names to code:m_NamespaceToFileNameListMap for the namespace - returns the cached value.
+// Returns TRUE, if pFileNameList was added to the cache and caller should NOT delete it.
+// Returns FALSE, if pFileNameList was not added to the cache and caller should delete it.
+//
+BOOL
+CLRPrivBinderReflectionOnlyWinRT::AddFileNameListForNamespace(
+ LPCWSTR wszNamespace,
+ CLRPrivBinderUtil::WStringList * pFileNameList,
+ CLRPrivBinderUtil::WStringList ** ppFileNameList)
+{
+ STANDARD_VM_CONTRACT;
+
+ NewArrayHolder<WCHAR> wszEntryNamespace = DuplicateStringThrowing(wszNamespace);
+
+ NamespaceToFileNameListMapEntry entry;
+ entry.m_wszNamespace = wszEntryNamespace;
+ entry.m_pFileNameList = pFileNameList;
+
+ {
+ CrstHolder lock(&m_MapsLock);
+
+ const NamespaceToFileNameListMapEntry * pEntry = m_NamespaceToFileNameListMap.LookupPtr(wszEntryNamespace);
+ if (pEntry == nullptr)
+ {
+ m_NamespaceToFileNameListMap.Add(entry);
+
+ // These values are now owned by the hash table element
+ wszEntryNamespace.SuppressRelease();
+ *ppFileNameList = pFileNameList;
+ return TRUE;
+ }
+ else
+ { // Another thread beat us adding this entry to the hash table
+ *ppFileNameList = pEntry->m_pFileNameList;
+ return FALSE;
+ }
+ }
+} // CLRPrivBinderReflectionOnlyWinRT::AddFileNameListForNamespace
+
+//=====================================================================================================================
+HRESULT
+CLRPrivBinderReflectionOnlyWinRT::BindAssemblyExplicit(
+ const WCHAR * wszFileName,
+ ICLRPrivAssembly ** ppAssembly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+
+ GCX_PREEMP();
+
+ ReleaseHolder<CLRPrivAssemblyReflectionOnlyWinRT> pAssembly = FindOrCreateAssemblyByFileName(wszFileName);
+ _ASSERTE(pAssembly != NULL);
+
+ IfFailRet(pAssembly->QueryInterface(__uuidof(ICLRPrivAssembly), (LPVOID *)ppAssembly));
+
+ return S_OK;
+}
+
+//=====================================================================================================================
+CLRPrivAssemblyReflectionOnlyWinRT::CLRPrivAssemblyReflectionOnlyWinRT(
+ LPCWSTR wzSimpleName,
+ CLRPrivBinderReflectionOnlyWinRT * pBinder,
+ CLRPrivResourcePathImpl * pResourceIL)
+{
+ STANDARD_VM_CONTRACT;
+ VALIDATE_ARG_THROW((wzSimpleName != nullptr) && (pBinder != nullptr) && (pResourceIL != nullptr));
+
+ m_pBinder = clr::SafeAddRef(pBinder);
+ m_pResourceIL = clr::SafeAddRef(pResourceIL);
+}
+
+//=====================================================================================================================
+ULONG CLRPrivAssemblyReflectionOnlyWinRT::Release()
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+ _ASSERTE(m_cRef > 0);
+
+ ULONG cRef;
+
+ {
+ // To achieve proper lifetime semantics, the name to assembly map elements' CLRPrivAssemblyReflectionOnlyWinRT
+ // instances are not ref counted. We cannot allow discovery of the object via m_FileNameToAssemblyMap
+ // when the ref count is 0 (to prevent another thread to AddRef and Release it back to 0 in parallel).
+ // All uses of the map are guarded by the map lock, so we have to decrease the ref count under that
+ // lock (to avoid the chance that 2 threads are running Release to ref count 0 at once).
+ CrstHolder lock(&m_pBinder->m_MapsLock);
+
+ cRef = InterlockedDecrement(&m_cRef);
+ if (cRef == 0)
+ {
+ m_pBinder->RemoveFileNameToAssemblyMapping(m_pResourceIL->GetPath());
+ }
+ }
+
+ if (cRef == 0)
+ {
+ delete this;
+ }
+ return cRef;
+}
+
+//=====================================================================================================================
+// Implements interface method code:ICLRPrivAssembly::IsShareable.
+//
+HRESULT CLRPrivAssemblyReflectionOnlyWinRT::IsShareable(
+ BOOL * pbIsShareable)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ VALIDATE_ARG_RET(pbIsShareable != nullptr);
+
+ *pbIsShareable = FALSE;
+ return S_OK;
+}
+
+//=====================================================================================================================
+// Implements interface method code:ICLRPrivAssembly::GetAvailableImageTypes.
+//
+HRESULT CLRPrivAssemblyReflectionOnlyWinRT::GetAvailableImageTypes(
+ LPDWORD pdwImageTypes)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ VALIDATE_ARG_RET(pdwImageTypes != nullptr);
+
+ *pdwImageTypes = 0;
+
+ if (m_pResourceIL != nullptr)
+ *pdwImageTypes |= ASSEMBLY_IMAGE_TYPE_IL;
+
+ return S_OK;
+}
+
+//=====================================================================================================================
+// Implements interface method code:ICLRPrivAssembly::GetImageResource.
+//
+HRESULT CLRPrivAssemblyReflectionOnlyWinRT::GetImageResource(
+ DWORD dwImageType,
+ DWORD * pdwImageType,
+ ICLRPrivResource ** ppIResource)
+{
+ STANDARD_BIND_CONTRACT;
+ HRESULT hr = S_OK;
+
+ VALIDATE_ARG_RET(ppIResource != nullptr);
+
+ EX_TRY
+ {
+ DWORD _dwImageType;
+ if (pdwImageType == nullptr)
+ {
+ pdwImageType = &_dwImageType;
+ }
+
+ if ((dwImageType & ASSEMBLY_IMAGE_TYPE_IL) == ASSEMBLY_IMAGE_TYPE_IL)
+ {
+ *ppIResource = clr::SafeAddRef(m_pResourceIL);
+ *pdwImageType = ASSEMBLY_IMAGE_TYPE_IL;
+ }
+ else
+ { // Native image is not supported by this binder
+ hr = CLR_E_BIND_IMAGE_UNAVAILABLE;
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+}
+
+//=====================================================================================================================
+// Implements interface method code:ICLRPrivBinder::VerifyBind.
+//
+HRESULT CLRPrivBinderReflectionOnlyWinRT::VerifyBind(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly * pAssembly,
+ ICLRPrivAssemblyInfo * pAssemblyInfo)
+{
+ STANDARD_BIND_CONTRACT;
+ HRESULT hr = S_OK;
+
+ UINT_PTR binderID;
+ IfFailRet(pAssembly->GetBinderID(&binderID));
+ if (binderID != reinterpret_cast<UINT_PTR>(this))
+ {
+ return pAssembly->VerifyBind(pAssemblyName, pAssembly, pAssemblyInfo);
+ }
+
+ // Since WinRT types are bound by type name and not assembly name, assembly-level version validation
+ // does not make sense here. Just return S_OK.
+ return S_OK;
+}
+
+//=====================================================================================================================
+// Implements interface method code:ICLRPrivBinder::GetBinderID.
+//
+HRESULT CLRPrivBinderReflectionOnlyWinRT::GetBinderID(
+ UINT_PTR * pBinderId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ *pBinderId = reinterpret_cast<UINT_PTR>(this);
+ return S_OK;
+}
+
+#endif //FEATURE_REFLECTION_ONLY_LOAD
+#endif //!DACCESS_COMPILE
diff --git a/src/vm/clrprivbinderreflectiononlywinrt.h b/src/vm/clrprivbinderreflectiononlywinrt.h
new file mode 100644
index 0000000000..116d63f24d
--- /dev/null
+++ b/src/vm/clrprivbinderreflectiononlywinrt.h
@@ -0,0 +1,298 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+// Contains the types that implement code:ICLRPrivBinder and code:ICLRPrivAssembly for WinRT ReflectionOnly (aka introspection) binding.
+//
+//=====================================================================================================================
+
+#ifdef FEATURE_HOSTED_BINDER
+#ifdef FEATURE_REFLECTION_ONLY_LOAD
+
+#pragma once
+
+#include "holder.h"
+#include "internalunknownimpl.h"
+#include "clrprivbinding.h"
+#include "clrprivruntimebinders.h"
+#include "clrprivbinderutil.h"
+#include "clr_std/utility"
+
+//=====================================================================================================================
+// Forward declarations
+class CLRPrivBinderReflectionOnlyWinRT;
+class CLRPrivAssemblyReflectionOnlyWinRT;
+class CLRPrivTypeCacheReflectionOnlyWinRT;
+class DomainAssembly;
+
+//=====================================================================================================================
+//=====================================================================================================================
+//=====================================================================================================================
+class CLRPrivBinderReflectionOnlyWinRT :
+ public IUnknownCommon<ICLRPrivBinder>
+{
+ friend class CLRPrivAssemblyReflectionOnlyWinRT;
+
+private:
+ //=============================================================================================
+ // Data structures for Namespace -> FileNameList hash (as returned by RoResolveNamespace API)
+
+ // Entry in SHash table that maps namespace to list of files
+ struct NamespaceToFileNameListMapEntry
+ {
+ PWSTR m_wszNamespace;
+ CLRPrivBinderUtil::WStringList * m_pFileNameList;
+ };
+
+ // SHash traits for Namespace -> FileNameList hash
+ class NamespaceToFileNameListMapTraits : public NoRemoveSHashTraits< DefaultSHashTraits< NamespaceToFileNameListMapEntry > >
+ {
+ public:
+ typedef PCWSTR key_t;
+ static const NamespaceToFileNameListMapEntry Null() { NamespaceToFileNameListMapEntry e; e.m_wszNamespace = nullptr; return e; }
+ static bool IsNull(const NamespaceToFileNameListMapEntry & e) { return e.m_wszNamespace == nullptr; }
+ static PCWSTR GetKey(const NamespaceToFileNameListMapEntry & e) { return e.m_wszNamespace; }
+ static count_t Hash(PCWSTR str) { return HashString(str); }
+ static BOOL Equals(PCWSTR lhs, PCWSTR rhs) { LIMITED_METHOD_CONTRACT; return (wcscmp(lhs, rhs) == 0); }
+
+ void OnDestructPerEntryCleanupAction(const NamespaceToFileNameListMapEntry & e)
+ {
+ delete [] e.m_wszNamespace;
+ CLRPrivBinderUtil::WStringList_Delete(e.m_pFileNameList);
+ }
+ static const bool s_DestructPerEntryCleanupAction = true;
+ };
+
+ typedef SHash<NamespaceToFileNameListMapTraits> NamespaceToFileNameListMap;
+
+ //=============================================================================================
+ // Data structure for FileName -> CLRPrivAssemblyReflectionOnlyWinRT * map
+
+ struct FileNameToAssemblyMapEntry
+ {
+ PCWSTR m_wszFileName; // File name (owned by m_pAssembly)
+ CLRPrivAssemblyReflectionOnlyWinRT * m_pAssembly;
+ };
+
+ class FileNameToAssemblyMapTraits : public DefaultSHashTraits<FileNameToAssemblyMapEntry>
+ {
+ public:
+ typedef PCWSTR key_t;
+ static const FileNameToAssemblyMapEntry Null() { FileNameToAssemblyMapEntry e; e.m_wszFileName = NULL; return e; }
+ static bool IsNull(const FileNameToAssemblyMapEntry &e) { return e.m_wszFileName == NULL; }
+ static const FileNameToAssemblyMapEntry Deleted() { FileNameToAssemblyMapEntry e; e.m_wszFileName = (PCWSTR)-1; return e; }
+ static bool IsDeleted(const FileNameToAssemblyMapEntry & e) { return e.m_wszFileName == (PCWSTR)-1; }
+ static PCWSTR GetKey(const FileNameToAssemblyMapEntry & e) { return e.m_wszFileName; }
+ static count_t Hash(PCWSTR str) { return HashString(str); }
+ static BOOL Equals(PCWSTR lhs, PCWSTR rhs) { LIMITED_METHOD_CONTRACT; return (wcscmp(lhs, rhs) == 0); }
+ };
+
+ typedef SHash<FileNameToAssemblyMapTraits> FileNameToAssemblyMap;
+
+public:
+ //=============================================================================================
+ // ICLRPrivBinder interface methods
+
+ STDMETHOD(BindAssemblyByName)(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly ** ppAssembly);
+
+ // Implements interface method code:ICLRPrivBinder::VerifyBind.
+ STDMETHOD(VerifyBind)(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly * pAssembly,
+ ICLRPrivAssemblyInfo * pAssemblyInfo);
+
+ // Implements interface method code:ICLRPrivBinder::GetBinderFlags
+ STDMETHOD(GetBinderFlags)(
+ DWORD *pBinderFlags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ *pBinderFlags = BINDER_NONE;
+ return S_OK;
+ }
+
+ // Implements interface method code:ICLRPrivBinder::GetBinderID.
+ STDMETHOD(GetBinderID)(
+ UINT_PTR * pBinderId);
+
+ // FindAssemblyBySpec is not supported by this binder.
+ STDMETHOD(FindAssemblyBySpec)(
+ LPVOID pvAppDomain,
+ LPVOID pvAssemblySpec,
+ HRESULT * pResult,
+ ICLRPrivAssembly ** ppAssembly)
+ { STATIC_CONTRACT_WRAPPER; return E_FAIL; }
+
+ //=============================================================================================
+ // Class methods
+
+ CLRPrivBinderReflectionOnlyWinRT(
+ CLRPrivTypeCacheReflectionOnlyWinRT * pTypeCache);
+
+ ~CLRPrivBinderReflectionOnlyWinRT();
+
+ HRESULT BindAssemblyExplicit(
+ const WCHAR * wszFileName,
+ ICLRPrivAssembly ** ppAssembly);
+
+ HRESULT BindWinRtType(
+ LPCSTR szTypeNamespace,
+ LPCSTR szTypeClassName,
+ DomainAssembly * pParentAssembly,
+ ICLRPrivAssembly ** ppPrivAssembly);
+
+private:
+ //=============================================================================================
+ // Accessors for FileName -> CLRPrivAssemblyReflectionOnlyWinRT * map
+
+ ReleaseHolder<CLRPrivAssemblyReflectionOnlyWinRT> FindAssemblyByFileName(
+ LPCWSTR wzsFileName);
+
+ ReleaseHolder<CLRPrivAssemblyReflectionOnlyWinRT> AddFileNameToAssemblyMapping(
+ LPCWSTR wszFileName,
+ CLRPrivAssemblyReflectionOnlyWinRT * pAssembly);
+
+ void RemoveFileNameToAssemblyMapping(
+ LPCWSTR wszFileName);
+
+ ReleaseHolder<CLRPrivAssemblyReflectionOnlyWinRT> FindOrCreateAssemblyByFileName(
+ LPCWSTR wzsFileName);
+
+ //=============================================================================================
+ // Internal methods
+
+ // Returns list of file names from code:m_NamespaceToFileNameListMap for the namespace.
+ void GetFileNameListForNamespace(
+ LPCWSTR wszNamespace,
+ DomainAssembly * pParentAssembly,
+ CLRPrivBinderUtil::WStringList ** ppFileNameList);
+
+ // Adds (thread-safe) list of file names to code:m_NamespaceToFileNameListMap for the namespace - returns the cached value.
+ BOOL AddFileNameListForNamespace(
+ LPCWSTR wszNamespace,
+ CLRPrivBinderUtil::WStringList * pFileNameList,
+ CLRPrivBinderUtil::WStringList ** ppFileNameList);
+
+ HRESULT BindWinRtType_Internal(
+ LPCSTR szTypeNamespace,
+ LPCSTR szTypeClassName,
+ DomainAssembly * pParentAssembly,
+ CLRPrivAssemblyReflectionOnlyWinRT ** ppAssembly);
+
+private:
+ //=============================================================================================
+
+ // Namespace -> FileName list map ... items are never removed
+ NamespaceToFileNameListMap m_NamespaceToFileNameListMap;
+ // FileName -> CLRPrivAssemblyReflectionOnlyWinRT * map ... items can be removed when CLRPrivAssemblyReflectionOnlyWinRT dies
+ FileNameToAssemblyMap m_FileNameToAssemblyMap;
+
+ // Lock for the above maps
+ Crst m_MapsLock;
+
+ //=============================================================================================
+ CLRPrivTypeCacheReflectionOnlyWinRT * m_pTypeCache;
+
+}; // class CLRPrivBinderReflectionOnlyWinRT
+
+
+//=====================================================================================================================
+//=====================================================================================================================
+//=====================================================================================================================
+class CLRPrivAssemblyReflectionOnlyWinRT :
+ public IUnknownCommon<ICLRPrivAssembly>
+{
+ friend class CLRPrivBinderReflectionOnlyWinRT;
+
+public:
+ //=============================================================================================
+ // Class methods
+
+ CLRPrivAssemblyReflectionOnlyWinRT(
+ LPCWSTR wzFullTypeName,
+ CLRPrivBinderReflectionOnlyWinRT * pBinder,
+ CLRPrivBinderUtil::CLRPrivResourcePathImpl * pResourceIL);
+
+ //=============================================================================================
+ // IUnknown interface methods
+
+ // Overridden to implement self-removal from assembly map code:CLRPrivBinderReflectionOnlyWinRT::m_FileNameToAssemblyMap.
+ STDMETHOD_(ULONG, Release)();
+
+ //=============================================================================================
+ // ICLRPrivBinder interface methods
+
+ STDMETHOD(BindAssemblyByName)(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly ** ppAssembly)
+ {
+ STATIC_CONTRACT_WRAPPER;
+ return m_pBinder->BindAssemblyByName(pAssemblyName, ppAssembly);
+ }
+
+ // Implements interface method code:ICLRPrivBinder::VerifyBind.
+ STDMETHOD(VerifyBind)(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly * pAssembly,
+ ICLRPrivAssemblyInfo * pAssemblyInfo)
+ {
+ STATIC_CONTRACT_WRAPPER;
+ return m_pBinder->VerifyBind(pAssemblyName, pAssembly, pAssemblyInfo);
+ }
+
+ //---------------------------------------------------------------------------------------------
+ // Implements interface method code:ICLRPrivBinder::GetBinderFlags
+ STDMETHOD(GetBinderFlags)(
+ DWORD *pBinderFlags)
+ {
+ STATIC_CONTRACT_WRAPPER;
+ return m_pBinder->GetBinderFlags(pBinderFlags);
+ }
+
+ // Implements interface method code:ICLRPrivBinder::GetBinderID.
+ STDMETHOD(GetBinderID)(
+ UINT_PTR * pBinderId)
+ {
+ STATIC_CONTRACT_WRAPPER;
+ return m_pBinder->GetBinderID(pBinderId);
+ }
+
+ //=============================================================================================
+ // ICLRPrivAssembly interface methods
+
+ // Implements interface method code:ICLRPrivAssembly::IsShareable.
+ STDMETHOD(IsShareable)(
+ BOOL * pbIsShareable);
+
+ // Implements interface method code:ICLRPrivAssembly::GetAvailableImageTypes.
+ STDMETHOD(GetAvailableImageTypes)(
+ LPDWORD pdwImageTypes);
+
+ // Implements interface method code:ICLRPrivAssembly::GetImageResource.
+ STDMETHOD(GetImageResource)(
+ DWORD dwImageType,
+ DWORD * pdwImageType,
+ ICLRPrivResource ** ppIResource);
+
+ // Implements code:ICLRPrivBinder::FindAssemblyBySpec
+ STDMETHOD(FindAssemblyBySpec)(
+ LPVOID pvAppDomain,
+ LPVOID pvAssemblySpec,
+ HRESULT * pResult,
+ ICLRPrivAssembly ** ppAssembly)
+ { STATIC_CONTRACT_WRAPPER; return m_pBinder->FindAssemblyBySpec(pvAppDomain, pvAssemblySpec, pResult, ppAssembly); }
+
+private:
+ //=============================================================================================
+
+ ReleaseHolder<CLRPrivBinderReflectionOnlyWinRT> m_pBinder;
+ ReleaseHolder<CLRPrivBinderUtil::CLRPrivResourcePathImpl> m_pResourceIL;
+
+}; // class CLRPrivAssemblyReflectionOnlyWinRT
+
+#endif //FEATURE_REFLECTION_ONLY_LOAD
+#endif //FEATURE_HOSTED_BINDER
diff --git a/src/vm/clrprivbinderutil.cpp b/src/vm/clrprivbinderutil.cpp
new file mode 100644
index 0000000000..dc534b3b67
--- /dev/null
+++ b/src/vm/clrprivbinderutil.cpp
@@ -0,0 +1,836 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+// Contains helper types for assembly binding host infrastructure.
+
+#include "common.h"
+
+#include "utilcode.h"
+#include "strsafe.h"
+
+#include "clrprivbinderutil.h"
+
+inline
+LPWSTR CopyStringThrowing(
+ LPCWSTR wszString)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ NewArrayHolder<WCHAR> wszDup = NULL;
+ if (wszString != NULL)
+ {
+ size_t wszLen = wcslen(wszString);
+ wszDup = new WCHAR[wszLen + 1];
+ IfFailThrow(StringCchCopy(wszDup, wszLen + 1, wszString));
+ }
+ wszDup.SuppressRelease();
+
+ return wszDup;
+}
+
+
+namespace CLRPrivBinderUtil
+{
+#ifndef CLR_STANDALONE_BINDER
+#ifdef FEATURE_FUSION
+ //-----------------------------------------------------------------------------------------------------------------
+ CLRPrivAssemblyBindResultWrapper::CLRPrivAssemblyBindResultWrapper(
+ IAssemblyName *pIAssemblyName,
+ PCWSTR wzAssemblyPath,
+ IILFingerprintFactory *pILFingerprintFactory
+ ) :
+ m_wzAssemblyPath(DuplicateStringThrowing(wzAssemblyPath)),
+ m_pIAssemblyName(clr::SafeAddRef(pIAssemblyName)),
+ m_bIBindResultNISet(false),
+ m_pIBindResultNI(nullptr),
+ m_pIILFingerprint(nullptr),
+ m_pILFingerprintFactory(clr::SafeAddRef(pILFingerprintFactory)),
+ m_lock(CrstLeafLock)
+ {
+ STANDARD_VM_CONTRACT;
+ VALIDATE_ARG_THROW(pIAssemblyName != nullptr && wzAssemblyPath != nullptr);
+ }
+
+ //-----------------------------------------------------------------------------------------------------------------
+ CLRPrivAssemblyBindResultWrapper::~CLRPrivAssemblyBindResultWrapper()
+ {
+ clr::SafeRelease(m_pIAssemblyName);
+ clr::SafeRelease(m_pIILFingerprint);
+ clr::SafeRelease(m_pIBindResultNI);
+ }
+
+ //=================================================================================================================
+ // IBindResult methods
+
+ //-----------------------------------------------------------------------------------------------------------------
+ HRESULT CLRPrivAssemblyBindResultWrapper::GetAssemblyNameDef(
+ /*out*/ IAssemblyName **ppIAssemblyNameDef)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ VALIDATE_ARG_RET(ppIAssemblyNameDef != nullptr);
+ *ppIAssemblyNameDef = clr::SafeAddRef(m_pIAssemblyName);
+ return S_OK;
+ }
+
+ //-----------------------------------------------------------------------------------------------------------------
+ HRESULT CLRPrivAssemblyBindResultWrapper::GetNextAssemblyModuleName(
+ /*in*/ DWORD dwNIndex,
+ __inout_ecount(*pdwCCModuleName) LPWSTR pwzModuleName,
+ /*in, out, annotation("__inout")*/ LPDWORD pdwCCModuleName)
+ {
+ STANDARD_BIND_CONTRACT;
+ _ASSERTE(!("E_NOTIMPL: " __FUNCTION__));
+ return E_NOTIMPL;
+ }
+
+ //-----------------------------------------------------------------------------------------------------------------
+ HRESULT CLRPrivAssemblyBindResultWrapper::GetAssemblyLocation(
+ /*out*/ IAssemblyLocation **ppIAssemblyLocation)
+ {
+ STANDARD_BIND_CONTRACT;
+ VALIDATE_ARG_RET(ppIAssemblyLocation != nullptr);
+ return this->QueryInterface(ppIAssemblyLocation);
+ }
+
+ //-----------------------------------------------------------------------------------------------------------------
+ HRESULT CLRPrivAssemblyBindResultWrapper::GetNativeImage(
+ /*out*/ IBindResult **ppIBindResultNI,
+ /*out*/ BOOL *pfIBindResultNIProbed)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // m_bIBindResultNISet must always be read *before* m_pIBindResultNI
+ bool bIBindResultNISet = m_bIBindResultNISet;
+
+ if (pfIBindResultNIProbed != nullptr)
+ *pfIBindResultNIProbed = bIBindResultNISet;
+
+ if (bIBindResultNISet && ppIBindResultNI != nullptr)
+ *ppIBindResultNI = clr::SafeAddRef(m_pIBindResultNI);
+
+ return S_OK;
+ }
+
+ //-----------------------------------------------------------------------------------------------------------------
+ HRESULT CLRPrivAssemblyBindResultWrapper::SetNativeImage(
+ /*in*/ IBindResult *pIBindResultNI,
+ /*out*/ IBindResult **ppIBindResultNIFinal)
+ {
+ STANDARD_BIND_CONTRACT;
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ // Native Binder needs S_FALSE returned if it loses the race.
+ hr = S_FALSE;
+
+ if (!m_bIBindResultNISet)
+ {
+ CrstHolder lock(&m_lock);
+ if (!m_bIBindResultNISet)
+ {
+ m_pIBindResultNI = clr::SafeAddRef(pIBindResultNI);
+ m_bIBindResultNISet = true;
+
+ // Won the race!
+ hr = S_OK;
+ }
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ if (ppIBindResultNIFinal != nullptr)
+ *ppIBindResultNIFinal = clr::SafeAddRef(m_pIBindResultNI);
+
+ return hr;
+ }
+
+ //-----------------------------------------------------------------------------------------------------------------
+ HRESULT CLRPrivAssemblyBindResultWrapper::IsEqual(
+ /*in*/ IUnknown *pIUnk)
+ {
+ STANDARD_BIND_CONTRACT;
+ HRESULT hr = S_OK;
+
+ VALIDATE_ARG_RET(pIUnk != nullptr);
+
+ ReleaseHolder<IBindResult> pIBindResult;
+
+ hr = pIUnk->QueryInterface(__uuidof(IBindResult), (void **)&pIBindResult);
+ if (SUCCEEDED(hr))
+ {
+ hr = pIBindResult == static_cast<IBindResult*>(this) ? S_OK : S_FALSE;
+ }
+ else if (hr == E_NOINTERFACE)
+ {
+ hr = S_FALSE;
+ }
+
+ return hr;
+ }
+
+ //-----------------------------------------------------------------------------------------------------------------
+ HRESULT CLRPrivAssemblyBindResultWrapper::GetNextAssemblyNameRef(
+ /*in*/ DWORD dwNIndex,
+ /*out*/ IAssemblyName **ppIAssemblyNameRef)
+ {
+ STANDARD_BIND_CONTRACT;
+ _ASSERTE(!("E_UNEXPECTED: " __FUNCTION__));
+ return E_UNEXPECTED;
+ }
+
+ //-----------------------------------------------------------------------------------------------------------------
+ HRESULT CLRPrivAssemblyBindResultWrapper::GetNextDependentAssembly(
+ /*in*/ DWORD dwNIndex,
+ /*out*/ IUnknown **ppIUnknownAssembly)
+ {
+ STANDARD_BIND_CONTRACT;
+ _ASSERTE(!("E_UNEXPECTED: " __FUNCTION__));
+ return E_UNEXPECTED;
+ }
+
+ //-----------------------------------------------------------------------------------------------------------------
+ HRESULT CLRPrivAssemblyBindResultWrapper::GetAssemblyLocationOfILImage(
+ /*out*/ IAssemblyLocation **ppAssemblyLocation)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return this->QueryInterface(ppAssemblyLocation);
+ }
+
+ //-----------------------------------------------------------------------------------------------------------------
+ HRESULT CLRPrivAssemblyBindResultWrapper::GetILFingerprint(
+ /*out*/ IILFingerprint **ppFingerprint)
+ {
+ STANDARD_BIND_CONTRACT;
+ HRESULT hr = S_OK;
+
+ VALIDATE_ARG_RET(ppFingerprint != nullptr);
+
+ EX_TRY
+ {
+ *ppFingerprint = m_pIILFingerprint;
+ if (*ppFingerprint == nullptr)
+ {
+ ReleaseHolder<IILFingerprint> pFingerprint;
+ if (SUCCEEDED(hr = m_pILFingerprintFactory->GetILFingerprintForPath(GetILAssemblyPath(), &pFingerprint)))
+ {
+ if (InterlockedCompareExchangeT<IILFingerprint>(&m_pIILFingerprint, pFingerprint, nullptr) == nullptr)
+ {
+ pFingerprint.SuppressRelease();
+ }
+ }
+ }
+ *ppFingerprint = clr::SafeAddRef(m_pIILFingerprint);
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+ }
+
+ //-----------------------------------------------------------------------------------------------------------------
+ HRESULT CLRPrivAssemblyBindResultWrapper::GetSourceILTimestamp(
+ /*out*/ FILETIME* pFileTime)
+ {
+ STANDARD_BIND_CONTRACT;
+ HRESULT hr = S_OK;
+
+ VALIDATE_ARG_RET(pFileTime != nullptr);
+
+ EX_TRY
+ {
+ WIN32_FILE_ATTRIBUTE_DATA wfd;
+ if (!WszGetFileAttributesEx(GetILAssemblyPath(), GetFileExInfoStandard, &wfd))
+ ThrowLastError();
+ *pFileTime = wfd.ftLastWriteTime;
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+ }
+
+ //-----------------------------------------------------------------------------------------------------------------
+ HRESULT CLRPrivAssemblyBindResultWrapper::GetSourceILSize(
+ /*out*/ DWORD* pSize)
+ {
+ STANDARD_BIND_CONTRACT;
+ HRESULT hr = S_OK;
+
+ VALIDATE_ARG_RET(pSize != nullptr);
+
+ EX_TRY
+ {
+ WIN32_FILE_ATTRIBUTE_DATA wfd;
+ if (!WszGetFileAttributesEx(GetILAssemblyPath(), GetFileExInfoStandard, &wfd))
+ ThrowLastError();
+ if(wfd.nFileSizeHigh != 0)
+ ThrowHR(COR_E_OVERFLOW);
+ *pSize = wfd.nFileSizeLow;
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+ }
+
+ //-----------------------------------------------------------------------------------------------------------------
+ HRESULT CLRPrivAssemblyBindResultWrapper::GetNIInfo(
+ /*out*/ INativeImageInstallInfo** pInfo)
+ {
+ STANDARD_BIND_CONTRACT;
+ _ASSERTE(!("E_UNEXPECTED: " __FUNCTION__));
+ return E_UNEXPECTED;
+ }
+
+ //-----------------------------------------------------------------------------------------------------------------
+ HRESULT CLRPrivAssemblyBindResultWrapper::GetFlags(
+ /*out*/ DWORD * pdwFlags)
+ {
+ STANDARD_BIND_CONTRACT;
+ PRECONDITION(CheckPointer(pdwFlags));
+ if (pdwFlags == nullptr)
+ {
+ return E_POINTER;
+ }
+
+ // Currently, no effort is made to open assemblies and build a full IAssemblyName - this currently
+ // only contains the simple name. Since AppX packages cannot be in-place updated we can be confident
+ // that the binding environment will remain unchanged between NGEN and runtime. As such, return the
+ // flag to indicate that the native image binder should skip self assembly definition validation.
+ *pdwFlags = IBindResultFlag_AssemblyNameDefIncomplete;
+
+ return S_OK;
+ }
+
+ //-----------------------------------------------------------------------------------------------------------------
+ HRESULT CLRPrivAssemblyBindResultWrapper::GetLocationType(
+ /*out*/DWORD *pdwLocationType)
+ {
+ LIMITED_METHOD_CONTRACT;
+ VALIDATE_ARG_RET(pdwLocationType != nullptr);
+
+ if (pdwLocationType == nullptr)
+ return E_INVALIDARG;
+ *pdwLocationType = ASSEMBLY_LOCATION_PATH;
+ return S_OK;
+ }
+
+ //-----------------------------------------------------------------------------------------------------------------
+ HRESULT CLRPrivAssemblyBindResultWrapper::GetPath(
+ __inout_ecount(*pdwccAssemblyPath) LPWSTR pwzAssemblyPath,
+ /*in, annotation("__inout")*/ LPDWORD pdwccAssemblyPath)
+ {
+ STANDARD_BIND_CONTRACT;
+ HRESULT hr = S_OK;
+
+ VALIDATE_ARG_RET(pdwccAssemblyPath != nullptr);
+
+ EX_TRY
+ {
+ DWORD cchILAssemblyPath = static_cast<DWORD>(wcslen(GetILAssemblyPath())) + 1;
+ if (pwzAssemblyPath != nullptr && cchILAssemblyPath <= *pdwccAssemblyPath)
+ {
+ IfFailThrow(StringCchCopy(pwzAssemblyPath, *pdwccAssemblyPath, GetILAssemblyPath()));
+ *pdwccAssemblyPath = cchILAssemblyPath;
+ hr = S_OK;
+ }
+ else
+ {
+ *pdwccAssemblyPath = cchILAssemblyPath;
+ hr = HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+ }
+
+ //-----------------------------------------------------------------------------------------------------------------
+ HRESULT CLRPrivAssemblyBindResultWrapper::GetHostID(
+ /*out*/ UINT64 *puiHostID)
+ {
+ STANDARD_BIND_CONTRACT;
+ _ASSERTE(!("E_UNEXPECTED: " __FUNCTION__));
+ return E_UNEXPECTED;
+ }
+#endif //FEATURE_FUSION
+#endif //!CLR_STANDALONE_BINDER
+
+ //-----------------------------------------------------------------------------------------------------------------
+ HRESULT VerifyBind(
+ IAssemblyName *pRefAssemblyName,
+ ICLRPrivAssemblyInfo *pDefAssemblyInfo)
+ {
+ STANDARD_BIND_CONTRACT;
+
+ HRESULT hr = S_OK;
+ VALIDATE_PTR_RET(pRefAssemblyName);
+ VALIDATE_PTR_RET(pDefAssemblyInfo);
+
+ AssemblyIdentity refIdentity;
+ IfFailRet(refIdentity.Initialize(pRefAssemblyName));
+
+ AssemblyIdentity defIdentity;
+ IfFailRet(defIdentity.Initialize(pDefAssemblyInfo));
+
+ return VerifyBind(refIdentity, defIdentity);
+ }
+
+ //-----------------------------------------------------------------------------------------------------------------
+ HRESULT VerifyBind(
+ CLRPrivBinderUtil::AssemblyIdentity const & refIdentity,
+ CLRPrivBinderUtil::AssemblyIdentity const & defIdentity)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ //
+ // Compare versions. Success conditions are the same as those in Silverlight:
+ // 1. Reference identity has no version.
+ // 2. Both identities have versions, and ref.version <= def.version.
+ //
+ // Since the default value of AssemblyVersion is 0.0.0.0, then if the
+ // ref has no value set then the comparison will use 0.0.0.0, which will
+ // always compare as true to the version contained in the def.
+ //
+
+ if (defIdentity.Version < refIdentity.Version)
+ { // Bound assembly has a lower version number than the reference.
+ return CLR_E_BIND_ASSEMBLY_VERSION_TOO_LOW;
+ }
+
+ //
+ // Compare public key tokens. Success conditions are:
+ // 1. Reference identity has no PKT.
+ // 2. Both identities have identical PKT values.
+ //
+
+ if (refIdentity.KeyToken.GetSize() != 0 && // Ref without PKT always passes.
+ refIdentity.KeyToken != defIdentity.KeyToken) // Otherwise Def must have matching PKT.
+ {
+ return CLR_E_BIND_ASSEMBLY_PUBLIC_KEY_MISMATCH;
+ }
+
+ return S_OK;
+ }
+
+ //---------------------------------------------------------------------------------------------
+ CLRPrivResourcePathImpl::CLRPrivResourcePathImpl(LPCWSTR wzPath)
+ : m_wzPath(CopyStringThrowing(wzPath))
+ { STANDARD_VM_CONTRACT; }
+
+ //---------------------------------------------------------------------------------------------
+ HRESULT CLRPrivResourcePathImpl::GetPath(
+ DWORD cchBuffer,
+ LPDWORD pcchBuffer,
+ __inout_ecount_part(cchBuffer, *pcchBuffer) LPWSTR wzBuffer)
+ {
+ LIMITED_METHOD_CONTRACT;
+ HRESULT hr = S_OK;
+
+ if (pcchBuffer == nullptr)
+ IfFailRet(E_INVALIDARG);
+
+ *pcchBuffer = (DWORD)wcslen(m_wzPath);
+
+ if (wzBuffer != nullptr)
+ {
+ if (FAILED(StringCchCopy(wzBuffer, cchBuffer, m_wzPath)))
+ IfFailRet(HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER));
+ }
+
+ return hr;
+ }
+
+ //---------------------------------------------------------------------------------------------
+ CLRPrivResourceStreamImpl::CLRPrivResourceStreamImpl(IStream * pStream)
+ : m_pStream(pStream)
+ {
+ LIMITED_METHOD_CONTRACT;
+ pStream->AddRef();
+ }
+
+ //---------------------------------------------------------------------------------------------
+ HRESULT CLRPrivResourceStreamImpl::GetStream(
+ REFIID riid,
+ LPVOID * ppvStream)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pStream->QueryInterface(riid, ppvStream);
+ }
+
+ //---------------------------------------------------------------------------------------------
+ HRESULT AssemblyVersion::Initialize(
+ IAssemblyName * pAssemblyName)
+ {
+ WRAPPER_NO_CONTRACT;
+ HRESULT hr = pAssemblyName->GetVersion(&dwMajorMinor, &dwBuildRevision);
+ if (hr == FUSION_E_INVALID_NAME)
+ {
+ hr = S_FALSE;
+ }
+ return hr;
+ }
+
+ //---------------------------------------------------------------------------------------------
+ HRESULT AssemblyVersion::Initialize(
+ ICLRPrivAssemblyInfo * pAssemblyInfo)
+ {
+ WRAPPER_NO_CONTRACT;
+ return pAssemblyInfo->GetAssemblyVersion(&wMajor, &wMinor, &wBuild, &wRevision);
+ }
+
+ //---------------------------------------------------------------------------------------------
+ HRESULT PublicKey::Initialize(
+ ICLRPrivAssemblyInfo * pAssemblyInfo)
+ {
+ LIMITED_METHOD_CONTRACT;
+ HRESULT hr = S_OK;
+
+ VALIDATE_PTR_RET(pAssemblyInfo);
+
+ Uninitialize();
+
+ DWORD cbKeyDef = 0;
+ hr = pAssemblyInfo->GetAssemblyPublicKey(cbKeyDef, &cbKeyDef, nullptr);
+
+ if (hr == HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER))
+ {
+ if (cbKeyDef != 0)
+ {
+ NewArrayHolder<BYTE> pbKeyDef = new (nothrow) BYTE[cbKeyDef];
+ IfNullRet(pbKeyDef);
+
+ if (SUCCEEDED(hr = pAssemblyInfo->GetAssemblyPublicKey(cbKeyDef, &cbKeyDef, pbKeyDef)))
+ {
+ m_key = pbKeyDef.Extract();
+ m_key_owned = true;
+ m_size = cbKeyDef;
+ }
+ }
+ }
+
+ return hr;
+ }
+
+ //---------------------------------------------------------------------------------------------
+ HRESULT PublicKeyToken::Initialize(
+ BYTE * pbKeyToken,
+ DWORD cbKeyToken)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ VALIDATE_CONDITION((pbKeyToken == nullptr) == (cbKeyToken == 0), return E_INVALIDARG);
+ VALIDATE_ARG_RET(cbKeyToken == 0 || cbKeyToken == PUBLIC_KEY_TOKEN_LEN1);
+
+ m_cbKeyToken = cbKeyToken;
+
+ if (pbKeyToken != nullptr)
+ {
+ memcpy(m_rbKeyToken, pbKeyToken, PUBLIC_KEY_TOKEN_LEN1);
+ }
+ else
+ {
+ memset(m_rbKeyToken, 0, PUBLIC_KEY_TOKEN_LEN1);
+ }
+
+ return S_OK;
+ }
+
+ //---------------------------------------------------------------------------------------------
+ HRESULT PublicKeyToken::Initialize(
+ PublicKey const & pk)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ StrongNameBufferHolder<BYTE> pbKeyToken;
+ DWORD cbKeyToken;
+
+ if (!StrongNameTokenFromPublicKey(const_cast<BYTE*>(pk.GetKey()), pk.GetSize(), &pbKeyToken, &cbKeyToken))
+ {
+ return static_cast<HRESULT>(StrongNameErrorInfo());
+ }
+
+ return Initialize(pbKeyToken, cbKeyToken);
+ }
+
+ //=====================================================================================================================
+ HRESULT PublicKeyToken::Initialize(
+ IAssemblyName * pName)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ DWORD cbKeyToken = sizeof(m_rbKeyToken);
+ hr = pName->GetProperty(ASM_NAME_PUBLIC_KEY_TOKEN, m_rbKeyToken, &cbKeyToken);
+ if (SUCCEEDED(hr))
+ {
+ m_cbKeyToken = cbKeyToken;
+ }
+
+ if (hr == FUSION_E_INVALID_NAME)
+ {
+ hr = S_FALSE;
+ }
+
+ return hr;
+ }
+
+ //=====================================================================================================================
+ HRESULT PublicKeyToken::Initialize(
+ ICLRPrivAssemblyInfo * pName)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ PublicKey pk;
+ IfFailRet(pk.Initialize(pName));
+
+ if (hr == S_OK) // Can return S_FALSE if no public key/token defined.
+ {
+ hr = Initialize(pk);
+ }
+
+ return hr;
+ }
+
+ //=====================================================================================================================
+ bool operator==(
+ PublicKeyToken const & lhs,
+ PublicKeyToken const & rhs)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // Sizes must match
+ if (lhs.GetSize() != rhs.GetSize())
+ {
+ return false;
+ }
+
+ // Empty PKT values are considered to be equal.
+ if (lhs.GetSize() == 0)
+ {
+ return true;
+ }
+
+ // Compare values.
+ return memcmp(lhs.GetToken(), rhs.GetToken(), lhs.GetSize()) == 0;
+ }
+
+ //=====================================================================================================================
+ HRESULT AssemblyIdentity::Initialize(
+ LPCWSTR wzName)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return StringCchCopy(Name, sizeof(Name) / sizeof(Name[0]), wzName);
+ }
+
+ //=====================================================================================================================
+ HRESULT AssemblyIdentity::Initialize(
+ ICLRPrivAssemblyInfo * pAssemblyInfo)
+ {
+ STANDARD_BIND_CONTRACT;
+ HRESULT hr = S_OK;
+
+ DWORD cchName = sizeof(Name) / sizeof(Name[0]);
+ IfFailRet(pAssemblyInfo->GetAssemblyName(cchName, &cchName, Name));
+ IfFailRet(Version.Initialize(pAssemblyInfo));
+ IfFailRet(KeyToken.Initialize(pAssemblyInfo));
+
+ return hr;
+ }
+
+ //=====================================================================================================================
+ HRESULT AssemblyIdentity::Initialize(
+ IAssemblyName * pAssemblyName)
+ {
+ STANDARD_BIND_CONTRACT;
+ HRESULT hr = S_OK;
+
+ DWORD cchName = sizeof(Name) / sizeof(Name[0]);
+ IfFailRet(pAssemblyName->GetName(&cchName, Name));
+ IfFailRet(Version.Initialize(pAssemblyName));
+ IfFailRet(KeyToken.Initialize(pAssemblyName));
+
+ return hr;
+ }
+
+#ifdef FEATURE_FUSION
+#ifndef CLR_STANDALONE_BINDER
+ //=====================================================================================================================
+ HRESULT AssemblyIdentity::Initialize(
+ AssemblySpec * pSpec)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END
+
+ HRESULT hr = S_OK;
+
+ if (0 == WszMultiByteToWideChar(
+ CP_UTF8, 0 /*flags*/, pSpec->GetName(), -1, Name, (int) (sizeof(Name) / sizeof(Name[0]))))
+ {
+ return HRESULT_FROM_GetLastError();
+ }
+
+ AssemblyMetaDataInternal * pAMDI = pSpec->GetContext();
+ if (pAMDI != nullptr)
+ {
+ Version.wMajor = pAMDI->usMajorVersion;
+ Version.wMinor = pAMDI->usMinorVersion;
+ Version.wBuild = pAMDI->usBuildNumber;
+ Version.wRevision = pAMDI->usRevisionNumber;
+ }
+
+ if (pSpec->HasPublicKeyToken())
+ {
+ PBYTE pbKey;
+ DWORD cbKey;
+ pSpec->GetPublicKeyToken(&pbKey, &cbKey);
+ IfFailRet(KeyToken.Initialize(pbKey, cbKey));
+ }
+
+ return hr;
+ }
+#endif //!CLR_STANDALONE_BINDER
+#endif
+
+
+ //=====================================================================================================================
+ // Destroys list of strings (code:WStringList).
+ void
+ WStringList_Delete(
+ WStringList * pList)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (pList != nullptr)
+ {
+ for (WStringListElem * pElem = pList->RemoveHead(); pElem != nullptr; pElem = pList->RemoveHead())
+ {
+ // Delete the string
+ delete [] pElem->GetValue();
+ delete pElem;
+ }
+
+ delete pList;
+ }
+ }
+
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+///// ----------------------------- Direct calls to VM -------------------------------------------
+////////////////////////////////////////////////////////////////////////////////////////////////////
+#if !defined(CLR_STANDALONE_BINDER) && defined(FEATURE_APPX_BINDER)
+ ICLRPrivAssembly* RaiseAssemblyResolveEvent(IAssemblyName *pAssemblyName, ICLRPrivAssembly* pRequestingAssembly)
+ {
+ CONTRACT(ICLRPrivAssembly*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(AppX::IsAppXProcess());
+ PRECONDITION(AppDomain::GetCurrentDomain()->IsDefaultDomain());
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ BinderMethodID methodId;
+
+ methodId = METHOD__APP_DOMAIN__ON_ASSEMBLY_RESOLVE; // post-bind execution event (the classic V1.0 event)
+
+ // Elevate threads allowed loading level. This allows the host to load an assembly even in a restricted
+ // condition. Note, however, that this exposes us to possible recursion failures, if the host tries to
+ // load the assemblies currently being loaded. (Such cases would then throw an exception.)
+
+ OVERRIDE_LOAD_LEVEL_LIMIT(FILE_ACTIVE);
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+
+ DomainAssembly* pDomainAssembly = AppDomain::GetCurrentDomain()->FindAssembly(pRequestingAssembly);
+
+ GCX_COOP();
+
+ Assembly* pAssembly = NULL;
+
+ struct _gc {
+ OBJECTREF AppDomainRef;
+ OBJECTREF AssemblyRef;
+ STRINGREF str;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ SString ssAssemblyName;
+ FusionBind::GetAssemblyNameDisplayName(pAssemblyName, ssAssemblyName);
+
+ GCPROTECT_BEGIN(gc);
+ if ((gc.AppDomainRef = GetAppDomain()->GetRawExposedObject()) != NULL)
+ {
+ gc.AssemblyRef = pDomainAssembly->GetExposedAssemblyObject();
+
+ MethodDescCallSite onAssemblyResolve(methodId, &gc.AppDomainRef);
+
+ gc.str = StringObject::NewString(ssAssemblyName.GetUnicode());
+ ARG_SLOT args[3] = {
+ ObjToArgSlot(gc.AppDomainRef),
+ ObjToArgSlot(gc.AssemblyRef),
+ ObjToArgSlot(gc.str)
+ };
+ ASSEMBLYREF ResultingAssemblyRef = (ASSEMBLYREF) onAssemblyResolve.Call_RetOBJECTREF(args);
+ if (ResultingAssemblyRef != NULL)
+ {
+ pAssembly = ResultingAssemblyRef->GetAssembly();
+ }
+ }
+ GCPROTECT_END();
+
+ if (pAssembly != NULL)
+ {
+ if (pAssembly->IsIntrospectionOnly())
+ {
+ // Cannot return an introspection assembly from an execution callback or vice-versa
+ COMPlusThrow(kFileLoadException, IDS_CLASSLOAD_ASSEMBLY_RESOLVE_RETURNED_INTROSPECTION );
+ }
+ if (pAssembly->IsCollectible())
+ {
+ COMPlusThrow(kNotSupportedException, W("NotSupported_CollectibleAssemblyResolve"));
+ }
+
+ // Check that the public key token matches the one specified in the spec
+ // MatchPublicKeys throws as appropriate.
+
+ StackScratchBuffer ssBuffer;
+ AssemblySpec spec;
+ IfFailThrow(spec.Init(ssAssemblyName.GetUTF8(ssBuffer)));
+ spec.MatchPublicKeys(pAssembly);
+
+ }
+
+ if (pAssembly == nullptr)
+ ThrowHR(COR_E_FILENOTFOUND);
+
+ RETURN pAssembly->GetManifestFile()->GetHostAssembly();
+ }
+
+ BOOL CompareHostBinderSpecs(AssemblySpec * a1, AssemblySpec * a2)
+ {
+ WRAPPER_NO_CONTRACT;
+ return a1->CompareEx(a2, AssemblySpec::ASC_Default);
+ }
+#endif //!CLR_STANDALONE_BINDER && FEATURE_APPX
+} // namespace CLRPrivBinderUtil
diff --git a/src/vm/clrprivbinderwinrt.cpp b/src/vm/clrprivbinderwinrt.cpp
new file mode 100644
index 0000000000..dc6556a7cf
--- /dev/null
+++ b/src/vm/clrprivbinderwinrt.cpp
@@ -0,0 +1,1777 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+
+//
+// Contains the types that implement code:ICLRPrivBinder and code:ICLRPrivAssembly for WinRT binding.
+//
+//=============================================================================================
+
+#include "common.h" // precompiled header
+#ifdef CLR_STANDALONE_BINDER
+#include "utilcode.h"
+#include "sstring.h"
+#include "stringarraylist.h"
+#endif
+
+#ifndef FEATURE_CORECLR
+#include "assemblyusagelogmanager.h"
+#endif
+#include "clr/fs/file.h"
+#include "clrprivbinderwinrt.h"
+#include "clrprivbinderutil.h"
+
+#ifndef DACCESS_COMPILE
+
+//=====================================================================================================================
+#include "sstring.h"
+#ifdef FEATURE_FUSION
+#include "fusionlogging.h"
+#include "policy.h"
+#include "imprthelpers.h" // in fusion/inc
+#include "asmimprt.h"
+#endif
+#ifdef FEATURE_APPX
+#include "appxutil.h"
+#endif
+#include <TypeResolution.h>
+#include "delayloadhelpers.h"
+#ifdef FEATURE_CORECLR
+#include "../binder/inc/applicationcontext.hpp"
+#include "../binder/inc/assemblybinder.hpp"
+#include "../binder/inc/assembly.hpp"
+#include "../binder/inc/debuglog.hpp"
+#include "../binder/inc/utils.hpp"
+#include "../binder/inc/fusionassemblyname.hpp"
+#endif
+
+#ifdef CROSSGEN_COMPILE
+#include "crossgenroresolvenamespace.h"
+#include "../binder/inc/fusionassemblyname.hpp"
+#endif
+
+#ifdef CLR_STANDALONE_BINDER
+#include "fakepeimage.h"
+#include "coreclr/corebindresult.inl"
+#endif
+
+using namespace CLRPrivBinderUtil;
+
+
+//=====================================================================================================================
+#define WINDOWS_NAMESPACE W("Windows")
+#define WINDOWS_NAMESPACE_PREFIX WINDOWS_NAMESPACE W(".")
+
+#define WINDOWS_NAMESPACEA "Windows"
+#define WINDOWS_NAMESPACE_PREFIXA WINDOWS_NAMESPACEA "."
+
+//=====================================================================================================================
+static BOOL
+IsWindowsNamespace(const WCHAR * wszNamespace)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (wcsncmp(wszNamespace, WINDOWS_NAMESPACE_PREFIX, (_countof(WINDOWS_NAMESPACE_PREFIX) - 1)) == 0)
+ {
+ return TRUE;
+ }
+ else if (wcscmp(wszNamespace, WINDOWS_NAMESPACE) == 0)
+ {
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+
+//=====================================================================================================================
+BOOL
+IsWindowsNamespace(const char * wszNamespace)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (strncmp(wszNamespace, WINDOWS_NAMESPACE_PREFIXA, (_countof(WINDOWS_NAMESPACE_PREFIXA) - 1)) == 0)
+ {
+ return TRUE;
+ }
+ else if (strcmp(wszNamespace, WINDOWS_NAMESPACEA) == 0)
+ {
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+
+//=====================================================================================================================
+DELAY_LOADED_FUNCTION(WinTypes, RoResolveNamespace);
+
+//=====================================================================================================================
+HRESULT RoResolveNamespace(
+ _In_opt_ const HSTRING name,
+ _In_opt_ const HSTRING windowsMetaDataDir,
+ _In_ const DWORD packageGraphDirsCount,
+ _In_reads_opt_(packageGraphDirsCount) const HSTRING *packageGraphDirs,
+ _Out_opt_ DWORD *metaDataFilePathsCount,
+ _Outptr_opt_result_buffer_(*metaDataFilePathsCount) HSTRING **metaDataFilePaths,
+ _Out_opt_ DWORD *subNamespacesCount,
+ _Outptr_opt_result_buffer_(*subNamespacesCount) HSTRING **subNamespaces)
+{
+ LIMITED_METHOD_CONTRACT;
+ HRESULT hr = S_OK;
+
+ decltype(RoResolveNamespace) * pFunc = nullptr;
+ IfFailRet(DelayLoad::WinTypes::RoResolveNamespace.GetValue(&pFunc));
+
+ return (*pFunc)(
+ name, windowsMetaDataDir, packageGraphDirsCount, packageGraphDirs, metaDataFilePathsCount,
+ metaDataFilePaths, subNamespacesCount, subNamespaces);
+}
+
+//=====================================================================================================================
+CLRPrivBinderWinRT * CLRPrivBinderWinRT::s_pSingleton = nullptr;
+
+//=====================================================================================================================
+CLRPrivBinderWinRT::CLRPrivBinderWinRT(
+ ICLRPrivBinder * pParentBinder,
+ CLRPrivTypeCacheWinRT * pWinRtTypeCache,
+ LPCWSTR * rgwzAltPath,
+ UINT cAltPaths,
+ NamespaceResolutionKind fNamespaceResolutionKind,
+ BOOL fCanUseNativeImages)
+ : m_pTypeCache(clr::SafeAddRef(pWinRtTypeCache))
+ , m_pParentBinder(pParentBinder) // Do not addref, lifetime directly tied to parent.
+#ifdef FEATURE_FUSION
+ , m_fCanUseNativeImages(fCanUseNativeImages)
+#endif
+ , m_fNamespaceResolutionKind(fNamespaceResolutionKind)
+#ifdef FEATURE_CORECLR
+ , m_pApplicationContext(nullptr)
+ , m_appLocalWinMDPath(nullptr)
+#endif
+#ifdef FEATURE_COMINTEROP_WINRT_DESKTOP_HOST
+ , m_fCanSetLocalWinMDPath(TRUE)
+#endif // FEATURE_COMINTEROP_WINRT_DESKTOP_HOST
+{
+ STANDARD_VM_CONTRACT;
+ PRECONDITION(CheckPointer(pWinRtTypeCache));
+
+#ifndef CROSSGEN_COMPILE
+#ifndef CLR_STANDALONE_BINDER
+ // - To prevent deadlock with GC thread, we cannot trigger GC while holding the lock
+ // - To prevent deadlock with profiler thread, we cannot allow thread suspension
+ m_MapsLock.Init(
+ CrstCLRPrivBinderMaps,
+ (CrstFlags)(CRST_REENTRANCY // Reentracy is needed for code:CLRPrivAssemblyWinRT::Release
+ | CRST_GC_NOTRIGGER_WHEN_TAKEN
+ | CRST_DEBUGGER_THREAD
+ INDEBUG(| CRST_DEBUG_ONLY_CHECK_FORBID_SUSPEND_THREAD)));
+ m_MapsAddLock.Init(CrstCLRPrivBinderMapsAdd);
+#endif // CLR_STANDALONE_BINDER
+
+#ifdef FEATURE_COMINTEROP_WINRT_DESKTOP_HOST
+ m_localWinMDPathLock.Init(CrstCrstCLRPrivBinderLocalWinMDPath);
+#endif // FEATURE_COMINTEROP_WINRT_DESKTOP_HOST
+
+ // Copy altpaths
+ if (cAltPaths > 0)
+ {
+ m_rgAltPaths.Allocate(cAltPaths);
+
+ for (UINT iAltPath = 0; iAltPath < cAltPaths; iAltPath++)
+ {
+ IfFailThrow(WindowsCreateString(
+ rgwzAltPath[iAltPath],
+ (UINT32)wcslen(rgwzAltPath[iAltPath]),
+ m_rgAltPaths.GetRawArray() + iAltPath));
+ }
+ }
+#if defined(FEATURE_APPX) && !defined(FEATURE_CORECLR)
+ else if (AppX::IsAppXNGen())
+ {
+ // If this is an NGen worker process for AppX, then the process doesn't actually run in the package,
+ // and RoResolveNamespace won't work without some help. AppX::GetCurrentPackageInfo can give us the
+ // package graph, which we can pass to RoResolveNamespace to make it work properly.
+ UINT32 cbBuffer = 0;
+ UINT32 nCount = 0;
+ HRESULT hr = AppX::GetCurrentPackageInfo(PACKAGE_FILTER_CLR_DEFAULT, &cbBuffer, nullptr, nullptr);
+ if (hr != HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER))
+ ThrowHR(hr);
+
+ NewArrayHolder<BYTE> pbBuffer(new (nothrow) BYTE[cbBuffer]);
+ IfNullThrow(pbBuffer);
+ IfFailThrow(AppX::GetCurrentPackageInfo(PACKAGE_FILTER_CLR_DEFAULT, &cbBuffer, pbBuffer, &nCount));
+
+ m_rgAltPaths.Allocate(nCount);
+
+ PCPACKAGE_INFO pPackageInfo = reinterpret_cast<PCPACKAGE_INFO>(static_cast<PBYTE>(pbBuffer));
+ for (UINT32 iAltPath = 0; iAltPath < nCount; iAltPath++)
+ {
+ IfFailThrow(WindowsCreateString(
+ pPackageInfo[iAltPath].path,
+ (UINT32)wcslen(pPackageInfo[iAltPath].path),
+ m_rgAltPaths.GetRawArray() + iAltPath));
+ }
+ }
+#endif //FEATURE_APPX && !FEATURE_CORECLR
+#endif //CROSSGEN_COMPILE
+
+#ifdef FEATURE_FUSION
+ IfFailThrow(RuntimeCreateCachingILFingerprintFactory(&m_pFingerprintFactory));
+#endif
+}
+
+//=====================================================================================================================
+CLRPrivBinderWinRT::~CLRPrivBinderWinRT()
+{
+ WRAPPER_NO_CONTRACT;
+
+#if !defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE)
+ AssemblyUsageLogManager::UnRegisterBinderFromUsageLog((UINT_PTR)this);
+#endif
+ if (m_pTypeCache != nullptr)
+ {
+ m_pTypeCache->Release();
+ }
+}
+
+//=====================================================================================================================
+CLRPrivBinderWinRT *
+CLRPrivBinderWinRT::GetOrCreateBinder(
+ CLRPrivTypeCacheWinRT * pWinRtTypeCache,
+ NamespaceResolutionKind fNamespaceResolutionKind)
+{
+ STANDARD_VM_CONTRACT;
+ HRESULT hr = S_OK;
+
+ // This should be allocated directly by CLRPrivBinderAppX in the AppX scenario.
+#ifdef FEATURE_APPX_BINDER
+ _ASSERTE(!AppX::IsAppXProcess());
+#endif
+
+ if (s_pSingleton == nullptr)
+ {
+ ReleaseHolder<CLRPrivBinderWinRT> pBinder;
+ pBinder = clr::SafeAddRef(new CLRPrivBinderWinRT(
+ nullptr, // pParentBinder
+ pWinRtTypeCache,
+ nullptr, // rgwzAltPath
+ 0, // cAltPaths
+ fNamespaceResolutionKind,
+ TRUE // fCanUseNativeImages
+ ));
+
+ if (InterlockedCompareExchangeT<decltype(s_pSingleton)>(&s_pSingleton, pBinder, nullptr) == nullptr)
+ {
+ pBinder.SuppressRelease();
+ }
+#if !defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE)
+ // Create and register WinRT usage log
+ ReleaseHolder<IAssemblyUsageLog> pNewWinRTUsageLog;
+ IfFailThrow(AssemblyUsageLogManager::GetUsageLogForContext(W("WinRT"), W("NotApp"), &pNewWinRTUsageLog));
+
+ UINT_PTR winRTBinderId;
+ IfFailThrow(pBinder->GetBinderID(&winRTBinderId));
+ IfFailThrow(AssemblyUsageLogManager::RegisterBinderWithUsageLog(winRTBinderId, pNewWinRTUsageLog));
+#endif
+ }
+ _ASSERTE(s_pSingleton->m_fNamespaceResolutionKind == fNamespaceResolutionKind);
+
+ return clr::SafeAddRef(s_pSingleton);
+}
+
+//=====================================================================================================================
+STDAPI
+CreateAssemblyNameObjectFromMetaData(
+ LPASSEMBLYNAME *ppAssemblyName,
+ LPCOLESTR szAssemblyName,
+ ASSEMBLYMETADATA *pamd,
+ LPVOID pvReserved);
+
+//=====================================================================================================================
+HRESULT CLRPrivBinderWinRT::BindWinRTAssemblyByName(
+ IAssemblyName * pAssemblyName,
+ CLRPrivAssemblyWinRT ** ppAssembly,
+ BOOL fPreBind)
+{
+ STANDARD_VM_CONTRACT;
+ HRESULT hr = S_OK;
+ ReleaseHolder<CLRPrivAssemblyWinRT> pAssembly;
+#ifndef FEATURE_CORECLR
+ NewArrayHolder<WCHAR> wszAssemblySimpleName;
+#endif
+
+#ifndef CROSSGEN_COMPILE
+#ifndef FEATURE_CORECLR
+ fusion::logging::StatusScope logStatus(0, ID_FUSLOG_BINDING_STATUS_WINRT, &hr);
+#else
+ BINDER_SPACE::BINDER_LOG_ENTER(W("CLRPrivBinderWinRT_CoreCLR::BindWinRTAssemblyByName"));
+#endif
+#endif
+ VALIDATE_ARG_RET(pAssemblyName != nullptr);
+ VALIDATE_ARG_RET(ppAssembly != nullptr);
+
+ DWORD dwContentType = AssemblyContentType_Default;
+ IfFailGo(hr = fusion::util::GetProperty(pAssemblyName, ASM_NAME_CONTENT_TYPE, &dwContentType));
+ if ((hr != S_OK) || (dwContentType != AssemblyContentType_WindowsRuntime))
+ {
+ IfFailGo(CLR_E_BIND_UNRECOGNIZED_IDENTITY_FORMAT);
+ }
+
+ // Note: WinRT type resolution is supported also on pre-Win8 with DesignerResolveEvent
+ if (!WinRTSupported() && (m_fNamespaceResolutionKind != NamespaceResolutionKind_DesignerResolveEvent))
+ {
+ IfFailGo(COR_E_PLATFORMNOTSUPPORTED);
+ }
+
+#ifndef FEATURE_CORECLR
+ IfFailGo(fusion::util::GetProperty(pAssemblyName, ASM_NAME_NAME, &wszAssemblySimpleName));
+#else
+ WCHAR wszAssemblySimpleName[_MAX_PATH];
+ DWORD cchAssemblySimpleName = _MAX_PATH;
+ IfFailGo(pAssemblyName->GetName(&cchAssemblySimpleName, wszAssemblySimpleName));
+#endif
+
+ LPWSTR wszFullTypeName = wcschr(wszAssemblySimpleName, W('!'));
+
+ if (wszFullTypeName != nullptr)
+ {
+ _ASSERTE(wszAssemblySimpleName < wszFullTypeName);
+ if (!(wszAssemblySimpleName < wszFullTypeName))
+ {
+ IfFailGo(E_UNEXPECTED);
+ }
+
+ // Turns wszAssemblySimpleName into simple name, wszFullTypeName into type name.
+ *wszFullTypeName++ = W('\0');
+
+ CLRPrivBinderUtil::WStringList * pFileNameList = nullptr;
+ BOOL fIsWindowsNamespace = FALSE;
+
+ {
+ // don't look past the first generics backtick (if any)
+ WCHAR *pGenericBegin = (WCHAR*)wcschr(wszFullTypeName, W('`'));
+ if (pGenericBegin != nullptr)
+ *pGenericBegin = W('\0');
+
+ LPWSTR wszSimpleTypeName = wcsrchr(wszFullTypeName, W('.'));
+
+ // restore the generics backtick
+ if (pGenericBegin != nullptr)
+ *pGenericBegin = W('`');
+
+ if (wszSimpleTypeName == nullptr)
+ {
+ IfFailGo(CLR_E_BIND_UNRECOGNIZED_IDENTITY_FORMAT);
+ }
+
+ // Turns wszFullTypeName into namespace name (without simple type name)
+ *wszSimpleTypeName = W('\0');
+
+ IfFailGo(GetFileNameListForNamespace(wszFullTypeName, &pFileNameList));
+
+ fIsWindowsNamespace = IsWindowsNamespace(wszFullTypeName);
+
+ // Turns wszFullTypeName back into full type name (was namespace name)
+ *wszSimpleTypeName = W('.');
+ }
+
+ if (pFileNameList == nullptr)
+ { // There are no file associated with the namespace
+ IfFailGo(CLR_E_BIND_TYPE_NOT_FOUND);
+ }
+
+ CLRPrivBinderUtil::WStringListElem * pFileNameElem = pFileNameList->GetHead();
+ for (; pFileNameElem != nullptr; pFileNameElem = CLRPrivBinderUtil::WStringList::GetNext(pFileNameElem))
+ {
+ const WCHAR * wszFileName = pFileNameElem->GetValue();
+ pAssembly = FindAssemblyByFileName(wszFileName);
+
+ WCHAR wszFileNameStripped[_MAX_PATH];
+ SplitPath(wszFileName, NULL, NULL, NULL, NULL, wszFileNameStripped, _MAX_PATH, NULL, NULL);
+
+ if (pAssembly == nullptr)
+ {
+ NewHolder<CLRPrivResourcePathImpl> pResource(
+ new CLRPrivResourcePathImpl(wszFileName));
+
+ ReleaseHolder<IAssemblyName> pAssemblyDefName;
+
+ // Instead of using the metadata of the assembly to get the AssemblyDef name, fake one up from the filename.
+ // This ties in with the PreBind binding behavior and ngen. This particular logic was implemented in order
+ // to provide best performance as actually reading the metadata was prohibitively slow. (Due to the cost of opening
+ // the assembly file.) We use a zeroed out ASSEMBLYMETADATA structure to create the assembly name object
+ // in order to ensure that every field of the assembly name is filled out as if this was created from a normal
+ // assembly def row.
+ // See comment on CLRPrivBinderWinRT::PreBind for further details about NGEN binding and WinMDs.
+ ASSEMBLYMETADATA asmd = { 0 };
+ IfFailGo(CreateAssemblyNameObjectFromMetaData(&pAssemblyDefName, wszFileNameStripped, &asmd, NULL));
+ DWORD dwAsmContentType = AssemblyContentType_WindowsRuntime;
+ IfFailGo(pAssemblyDefName->SetProperty(ASM_NAME_CONTENT_TYPE, (LPBYTE)&dwAsmContentType, sizeof(dwAsmContentType)));
+
+ //
+ // Creating the BindResult we will pass to the native binder to find native images.
+ // We strip off the type from the assembly name, leaving the simple assembly name.
+ // The native binder stores native images under subdirectories named after their
+ // simple name so we only want to pass the simple name portion of the name to it,
+ // which it uses along with the fingerprint matching in BindResult to find the
+ // native image for this WinRT assembly.
+ // The WinRT team has said that WinMDs will have the same simple name as the filename.
+ //
+ IfFailGo(pAssemblyDefName->SetProperty(ASM_NAME_NAME, wszFileNameStripped, (lstrlenW(wszFileNameStripped) + 1) * sizeof(WCHAR)));
+
+#ifdef FEATURE_FUSION
+ NewHolder<CLRPrivAssemblyBindResultWrapper> pBindResult(
+ new CLRPrivAssemblyBindResultWrapper(pAssemblyDefName, wszFileName, m_pFingerprintFactory));
+#else
+ NewHolder<CoreBindResult> pBindResult(new CoreBindResult());
+ StackSString sAssemblyPath(pResource->GetPath());
+ ReleaseHolder<BINDER_SPACE::Assembly> pBinderAssembly;
+
+ IfFailGo(GetAssemblyAndTryFindNativeImage(sAssemblyPath, wszFileNameStripped, &pBinderAssembly));
+
+ // We have set bInGac to TRUE here because the plan is full trust for WinRT. If this changes, we may need to check with
+ // AppDomain to verify trust based on the WinMD's path
+ pBindResult->Init(pBinderAssembly, TRUE);
+#endif
+ NewHolder<CLRPrivAssemblyWinRT> pNewAssembly(
+ new CLRPrivAssemblyWinRT(this, pResource, pBindResult, fIsWindowsNamespace));
+
+ // pNewAssembly holds references to these now
+ pResource.SuppressRelease();
+ pBindResult.SuppressRelease();
+
+ // Add the assembly into cache (multi-thread aware)
+ pAssembly = AddFileNameToAssemblyMapping(pResource->GetPath(), pNewAssembly);
+
+ // We did not find an existing assembly in the cache and are using the newly created pNewAssembly.
+ // Stop it from being deleted when we go out of scope.
+ if (pAssembly == pNewAssembly)
+ {
+ pNewAssembly.SuppressRelease();
+ }
+
+#ifndef FEATURE_CORECLR
+ if (fPreBind)
+ {
+ // We are pre-binding to this WinMD and do not want to open it
+ // Compare the filename to the assembly simple name. This is legal to do with WinRT because at NGen time
+ // we embed a WinRT dependency as assembly def name component plus a namespace and type from it.
+ // At bind time, this type should still exist in the same assembly. If it doesn't, and has been moved,
+ // the native image validation will fail anyway and we'll fall back to IL. This is because if the type has
+ // been moved to another WinMD, it must have been removed from the first one because WinRT allows no duplicates.
+ // See comment on CLRPrivBinderWinRT::PreBind for further details.
+ if (!_wcsicmp(wszAssemblySimpleName, wszFileNameStripped))
+ {
+ *ppAssembly = pAssembly.Extract();
+ return (hr = S_OK);
+ }
+ else
+ {
+ continue;
+ }
+ }
+#endif
+ }
+#ifndef FEATURE_CORECLR
+ else if (fPreBind)
+ {
+ // We are pre-binding to this WinMD and do not want to force it to be loaded into the runtime yet.
+ // Compare the filename to the assembly simple name. This is legal to do with WinRT because at NGen time
+ // we embed a WinRT dependency as assembly def name component plus a namespace and type from it.
+ // At bind time, this type should still exist in the same assembly. If it doesn't, and has been moved,
+ // the native image validation will fail anyway and we'll fall back to IL. This is because if the type has
+ // been moved to another WinMD, it must have been removed from the first one because WinRT allows no duplicates.
+ // See comment on CLRPrivBinderWinRT::PreBind for further details.
+ if (!_wcsicmp(wszAssemblySimpleName, wszFileNameStripped))
+ {
+ *ppAssembly = pAssembly.Extract();
+ return (hr = S_OK);
+ }
+ else
+ {
+ continue;
+ }
+ }
+#endif
+
+ //
+ // Look to see if there's a native image available.
+ //
+ hr = pAssembly->EnsureAvailableImageTypes();
+
+ // Determine if this is the assembly we really want to find.
+ IfFailGo(hr = m_pTypeCache->ContainsType(pAssembly, wszFullTypeName));
+ if (hr == S_OK)
+ { // The type we are looking for has been found in this assembly
+#ifndef CROSSGEN_COMPILE
+#ifndef FEATURE_CORECLR
+ fusion::logging::LogMessage(0, ID_FUSLOG_BINDING_STATUS_FOUND, wszFileName);
+#else
+ BINDER_SPACE::BINDER_LOG_LEAVE_HR(W("CLRPrivBinderWinRT_CoreCLR::BindWinRTAssemblyByName"), hr);
+#endif
+#endif
+ *ppAssembly = pAssembly.Extract();
+ return (hr = S_OK);
+ }
+ _ASSERTE(hr == S_FALSE);
+ }
+ }
+
+ // The type has not been found in any of the files from the type's namespace
+ hr = CLR_E_BIND_TYPE_NOT_FOUND;
+ ErrExit:
+
+#ifdef FEATURE_CORECLR
+ BINDER_SPACE::BINDER_LOG_LEAVE_HR(W("CLRPrivBinderWinRT_CoreCLR::BindWinRTAssemblyByName"), hr);
+#endif
+ return hr;
+} // CLRPrivBinderWinRT::BindWinRTAssemblyByName
+
+#ifdef FEATURE_FUSION
+//=====================================================================================================================
+HRESULT CLRPrivBinderWinRT::BindAssemblyToNativeAssembly(CLRPrivAssemblyWinRT *pAssembly)
+{
+ HRESULT hr = S_OK;
+
+ if (!m_fCanUseNativeImages)
+ return hr;
+
+ ReleaseHolder<IBindResult> pIBindResultIL;
+ IfFailRet(pAssembly->GetIBindResult(&pIBindResultIL));
+ _ASSERTE(pIBindResultIL != nullptr);
+
+ NewArrayHolder<WCHAR> wzZapSet = DuplicateStringThrowing(g_pConfig->ZapSet());
+ NativeConfigData cfgData = {
+ wzZapSet,
+ PEFile::GetNativeImageConfigFlags()
+ };
+
+ ReleaseHolder<IBindContext> pIBindContext;
+ IfFailRet(GetParentIBindContext(&pIBindContext));
+
+ // Fire BindingNgenPhaseStart ETW event if enabled.
+ {
+ InlineSString<_MAX_PATH> ssAssemblyName;
+ FireEtwBindingNgenPhaseStart(
+ (AppDomain::GetCurrentDomain()->GetId().m_dwId),
+ LOADCTX_TYPE_HOSTED,
+ ETWFieldUnused,
+ ETWLoaderLoadTypeNotAvailable,
+ NULL,
+ pAssembly->m_pResourceIL->GetPath(),
+ GetClrInstanceId());
+ }
+
+ IfFailRet(BindToNativeAssembly(pIBindResultIL, &cfgData, pIBindContext, fusion::logging::GetCurrentFusionBindLog()));
+
+ // Fire BindingNgenPhaseEnd ETW event if enabled.
+ {
+ InlineSString<_MAX_PATH> ssAssemblyName;
+ FireEtwBindingNgenPhaseEnd(
+ (AppDomain::GetCurrentDomain()->GetId().m_dwId),
+ LOADCTX_TYPE_HOSTED,
+ ETWFieldUnused,
+ ETWLoaderLoadTypeNotAvailable,
+ NULL,
+ pAssembly->m_pResourceIL->GetPath(),
+ GetClrInstanceId());
+ }
+
+ // BindToNativeAssembly can return S_FALSE, but this could be misleading.
+ if (hr == S_FALSE)
+ hr = S_OK;
+
+ return hr;
+}
+#endif
+
+#if defined(FEATURE_COMINTEROP_WINRT_DESKTOP_HOST) && !defined(CROSSGEN_COMPILE)
+BOOL CLRPrivBinderWinRT::SetLocalWinMDPath(HSTRING localWinMDPath)
+{
+ STANDARD_VM_CONTRACT;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+
+ CrstHolder lock(&m_localWinMDPathLock);
+
+ // We use the empty string as a sential, so don't allow explicitly setting the binding base to empty.
+ if (localWinMDPath == nullptr)
+ {
+ return FALSE;
+ }
+
+ // If we've already set a binding base, then the current base must match the exisitng one exactly
+ if (!m_localWinMDPath.IsEmpty())
+ {
+ return m_localWinMDPath.CompareOrdinal(clr::winrt::StringReference(localWinMDPath)) == 0;
+ }
+
+ // If we've already done WinRT binding, we can't set the binding base because that could lead to inconsistent results when binding
+ // the same name after the base is set.
+ if (!m_fCanSetLocalWinMDPath)
+ {
+ return FALSE;
+ }
+
+ m_localWinMDPath.Initialize(localWinMDPath);
+
+ return TRUE;
+}
+#endif // FEATURE_COMINTEROP_WINRT_DESKTOP_HOST && !CROSSGEN_COMPILE
+
+//=====================================================================================================================
+HRESULT CLRPrivBinderWinRT::BindWinRTAssemblyByName(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly ** ppPrivAssembly,
+ BOOL fPreBind)
+{
+ STANDARD_VM_CONTRACT;
+ HRESULT hr = S_OK;
+
+ ReleaseHolder<CLRPrivAssemblyWinRT> pWinRTAssembly;
+ IfFailRet(BindWinRTAssemblyByName(pAssemblyName, &pWinRTAssembly, fPreBind));
+ IfFailRet(pWinRTAssembly->QueryInterface(__uuidof(ICLRPrivAssembly), (LPVOID *)ppPrivAssembly));
+
+ return hr;
+}
+
+//=====================================================================================================================
+HRESULT CLRPrivBinderWinRT::BindWinRTAssemblyByName(
+ IAssemblyName * pAssemblyName,
+ IBindResult ** ppIBindResult,
+ BOOL fPreBind)
+{
+ STANDARD_VM_CONTRACT;
+ HRESULT hr = S_OK;
+
+ VALIDATE_ARG_RET(pAssemblyName != nullptr);
+ VALIDATE_ARG_RET(ppIBindResult != nullptr);
+
+ ReleaseHolder<CLRPrivAssemblyWinRT> pWinRTAssembly;
+ IfFailRet(BindWinRTAssemblyByName(pAssemblyName, &pWinRTAssembly, fPreBind));
+ IfFailRet(pWinRTAssembly->GetIBindResult(ppIBindResult));
+
+ return hr;
+}
+
+#ifndef FEATURE_FUSION
+//
+// This method opens the assembly using the CoreCLR Binder, which has logic supporting opening either the IL or
+// even just the native image without IL present.
+// RoResolveNamespace has already told us the IL file to open. We try and find a native image to open instead
+// by looking in the TPA list and the App_Ni_Paths.
+//
+HRESULT CLRPrivBinderWinRT::GetAssemblyAndTryFindNativeImage(SString &sWinmdFilename, LPCWSTR pwzSimpleName, BINDER_SPACE::Assembly ** ppAssembly)
+{
+ HRESULT hr = S_OK;
+
+#ifdef FEATURE_CORECLR
+ if (!m_pApplicationContext->IsTpaListProvided())
+ return COR_E_FILENOTFOUND;
+
+ BINDER_SPACE::SimpleNameToFileNameMap * tpaMap = m_pApplicationContext->GetTpaList();
+ const BINDER_SPACE::SimpleNameToFileNameMapEntry *pTpaEntry = tpaMap->LookupPtr(pwzSimpleName);
+ if (pTpaEntry != nullptr)
+ {
+ ReleaseHolder<BINDER_SPACE::Assembly> pAssembly;
+
+ if (pTpaEntry->m_wszNIFileName != nullptr)
+ {
+ SString fileName(pTpaEntry->m_wszNIFileName);
+
+ // A GetAssembly overload perhaps, or just another parameter to the existing method
+ hr = BINDER_SPACE::AssemblyBinder::GetAssembly(fileName,
+ FALSE, /* fInspectionOnly */
+ TRUE, /* fIsInGAC */
+ TRUE /* fExplicitBindToNativeImage */,
+ &pAssembly,
+ sWinmdFilename.GetUnicode()
+ );
+
+ // On file not found, simply fall back to app ni path probing
+ if (hr != HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND))
+ {
+ // Any other error is fatal
+ IfFailRet(hr);
+
+ *ppAssembly = pAssembly.Extract();
+ return (hr = S_OK);
+ }
+ }
+ }
+
+ StringArrayList *pBindingPaths = m_pApplicationContext->GetAppNiPaths();
+
+ // Loop through the binding paths looking for a matching assembly
+ for (DWORD i = 0; i < pBindingPaths->GetCount(); i++)
+ {
+ ReleaseHolder<BINDER_SPACE::Assembly> pAssembly;
+ LPCWSTR wszBindingPath = (*pBindingPaths)[i];
+
+ SString simpleName(pwzSimpleName);
+ SString fileName(wszBindingPath);
+ BINDER_SPACE::CombinePath(fileName, simpleName, fileName);
+ fileName.Append(W(".ni.DLL"));
+
+ hr = BINDER_SPACE::AssemblyBinder::GetAssembly(fileName,
+ FALSE, /* fInspectionOnly */
+ FALSE, /* fIsInGAC */
+ TRUE /* fExplicitBindToNativeImage */,
+ &pAssembly);
+
+ // Since we're probing, file not founds are ok and we should just try another
+ // probing path
+ if (hr == HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND))
+ {
+ continue;
+ }
+
+ IfFailRet(hr);
+
+ *ppAssembly = pAssembly.Extract();
+ return (hr = S_OK);
+ }
+
+ // We did not find a native image for this WinMD; open the WinMD file itself as the assembly to return.
+ hr = BINDER_SPACE::AssemblyBinder::GetAssembly(sWinmdFilename,
+ FALSE, /* fInspectionOnly */
+ FALSE, /* fIsInGAC */
+ FALSE /* fExplicitBindToNativeImage */,
+ ppAssembly);
+#else
+ ReleaseHolder<BINDER_SPACE::Assembly> pAssembly;
+
+ // This codepath is used for desktop crossgen
+ pAssembly = new BINDER_SPACE::Assembly();
+
+ pAssembly->SetPEImage(PEImage::OpenImage(sWinmdFilename, MDInternalImport_Default));
+
+ pAssembly->m_assemblyPath.Set(sWinmdFilename);
+
+ *ppAssembly = pAssembly.Extract();
+#endif
+
+ return hr;
+}
+#endif // !FEATURE_FUSION
+
+#ifdef FEATURE_CORECLR
+//=====================================================================================================================
+HRESULT CLRPrivBinderWinRT::SetApplicationContext(BINDER_SPACE::ApplicationContext *pApplicationContext, SString &appLocalWinMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ _ASSERTE(pApplicationContext != nullptr);
+ m_pApplicationContext = pApplicationContext;
+
+ StringArrayList * pAppPaths = m_pApplicationContext->GetAppPaths();
+
+#ifndef CROSSGEN_COMPILE
+ DWORD cAppPaths = pAppPaths->GetCount();
+ m_rgAltPaths.Allocate(cAppPaths);
+
+ for (DWORD i = 0; i < cAppPaths; i++)
+ {
+ IfFailRet(WindowsCreateString(
+ pAppPaths->Get(i).GetUnicode(),
+ (UINT32)(pAppPaths->Get(i).GetCount()),
+ m_rgAltPaths.GetRawArray() + i));
+ }
+
+ if (!appLocalWinMD.IsEmpty())
+ {
+ m_appLocalWinMDPath = DuplicateStringThrowing(appLocalWinMD.GetUnicode());
+ }
+#else
+ Crossgen::SetAppPaths(pAppPaths);
+#endif
+
+ return hr;
+}
+#endif //FEATURE_CORECLR
+
+//=====================================================================================================================
+// Implements interface method code:ICLRPrivBinder::BindAssemblyByName.
+//
+HRESULT CLRPrivBinderWinRT::BindAssemblyByName(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly ** ppAssembly)
+{
+ STANDARD_BIND_CONTRACT;
+ HRESULT hr = S_OK;
+
+ VALIDATE_ARG_RET((pAssemblyName != nullptr) && (ppAssembly != nullptr));
+
+ EX_TRY
+ {
+ if (m_pParentBinder != nullptr)
+ {
+ // Delegate to parent binder.
+ hr = m_pParentBinder->BindAssemblyByName(pAssemblyName, ppAssembly);
+ }
+ else
+ {
+ hr = BindWinRTAssemblyByName(pAssemblyName, ppAssembly);
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+}
+
+//=====================================================================================================================
+ReleaseHolder<CLRPrivAssemblyWinRT>
+CLRPrivBinderWinRT::FindAssemblyByFileName(
+ PCWSTR wszFileName)
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+
+#ifndef CLR_STANDALONE_BINDER
+ ForbidSuspendThreadHolder suspend;
+#endif // !CLR_STANDALONE_BINDER
+ {
+#ifndef CLR_STANDALONE_BINDER
+ CrstHolder lock(&m_MapsLock);
+#endif // !CLR_STANDALONE_BINDER
+
+ const FileNameToAssemblyWinRTMapEntry * pEntry = m_FileNameToAssemblyMap.LookupPtr(wszFileName);
+ return (pEntry == nullptr) ? nullptr : clr::SafeAddRef(pEntry->m_pAssembly);
+ }
+}
+
+//=====================================================================================================================
+// Add FileName -> CLRPrivAssemblyWinRT * mapping to the map (multi-thread safe).
+//
+ReleaseHolder<CLRPrivAssemblyWinRT>
+CLRPrivBinderWinRT::AddFileNameToAssemblyMapping(
+ PCWSTR wszFileName,
+ CLRPrivAssemblyWinRT * pAssembly)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(pAssembly != nullptr);
+
+#ifndef CLR_STANDALONE_BINDER
+ // We have to serialize all Add operations
+ CrstHolder lock(&m_MapsAddLock);
+#endif // !CLR_STANDALONE_BINDER
+
+ // Wrapper for m_FileNameToAssemblyMap.Add that avoids call out into host
+ FileNameToAssemblyWinRTMap::AddPhases addCall;
+
+ // 1. Preallocate one element
+ addCall.PreallocateForAdd(&m_FileNameToAssemblyMap);
+ {
+ // 2. Take the reader lock which can be taken during stack walking
+ // We cannot call out into host from ForbidSuspend region (i.e. no allocations/deallocations)
+#ifndef CLR_STANDALONE_BINDER
+ ForbidSuspendThreadHolder suspend;
+#endif // !CLR_STANDALONE_BINDER
+ {
+#ifndef CLR_STANDALONE_BINDER
+ CrstHolder lock(&m_MapsLock);
+#endif // !CLR_STANDALONE_BINDER
+
+ const FileNameToAssemblyWinRTMapEntry * pEntry = m_FileNameToAssemblyMap.LookupPtr(wszFileName);
+ CLRPrivAssemblyWinRT * pResultAssembly = nullptr;
+ if (pEntry != nullptr)
+ {
+ pResultAssembly = pEntry->m_pAssembly;
+
+ // 3a. Use the newly allocated table (if any) to avoid allocation in the next call (no call out into host)
+ addCall.AddNothing_PublishPreallocatedTable();
+ }
+ else
+ {
+ // 3b. Add the element to the hash table (no call out into host)
+ FileNameToAssemblyWinRTMapEntry e;
+ e.m_wszFileName = wszFileName;
+ e.m_pAssembly = pAssembly;
+ addCall.Add(e);
+
+ pResultAssembly = pAssembly;
+ }
+ return clr::SafeAddRef(pResultAssembly);
+ }
+ }
+ // 4. Cleanup the old memory (if any) - will be called by destructor of addCall
+ //addCall.DeleteOldTable();
+}
+
+//=====================================================================================================================
+void
+CLRPrivBinderWinRT::RemoveFileNameToAssemblyMapping(
+ PCWSTR wszFileName)
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+
+#ifndef CLR_STANDALONE_BINDER
+ ForbidSuspendThreadHolder suspend;
+#endif // !CLR_STANDALONE_BINDER
+ {
+#ifndef CLR_STANDALONE_BINDER
+ CrstHolder lock(&m_MapsLock);
+#endif // !CLR_STANDALONE_BINDER
+
+ m_FileNameToAssemblyMap.Remove(wszFileName);
+ }
+}
+
+//=====================================================================================================================
+// Returns list of file names from code:m_NamespaceToFileNameListMap for the namespace
+//
+HRESULT
+CLRPrivBinderWinRT::GetFileNameListForNamespace(
+ LPCWSTR wszNamespace,
+ CLRPrivBinderUtil::WStringList ** ppFileNameList)
+{
+ STANDARD_VM_CONTRACT;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+
+ HRESULT hr = S_OK;
+
+ CLRPrivBinderUtil::WStringList * pFileNameList = nullptr;
+ {
+#ifndef CLR_STANDALONE_BINDER
+ ForbidSuspendThreadHolder suspend;
+#endif // !CLR_STANDALONE_BINDER
+ {
+#ifndef CLR_STANDALONE_BINDER
+ CrstHolder lock(&m_MapsLock);
+#endif // !CLR_STANDALONE_BINDER
+
+ const NamespaceToFileNameListMapEntry * pEntry = m_NamespaceToFileNameListMap.LookupPtr(wszNamespace);
+ if (pEntry != nullptr)
+ {
+ // Entries from the map are never removed, so we do not have to protect the file name list with a lock
+ pFileNameList = pEntry->m_pFileNameList;
+ }
+ }
+ }
+
+ if (pFileNameList != nullptr)
+ {
+ *ppFileNameList = pFileNameList;
+ }
+ else
+ {
+ CLRPrivBinderUtil::WStringListHolder hFileNameList;
+ LPCWSTR wszNamespaceRoResolve = wszNamespace;
+
+#ifndef CROSSGEN_COMPILE
+ if (m_fNamespaceResolutionKind == NamespaceResolutionKind_WindowsAPI)
+ {
+ CoTaskMemHSTRINGArrayHolder hFileNames;
+
+ UINT32 cchNamespaceRoResolve;
+ IfFailRet(StringCchLength(wszNamespaceRoResolve, &cchNamespaceRoResolve));
+
+ CLRConfigStringHolder wszWinMDPathConfig;
+ LPWSTR wszWinMDPath = nullptr;
+ UINT32 cchWinMDPath = 0;
+
+#ifdef FEATURE_CORECLR
+ wszWinMDPath = m_appLocalWinMDPath;
+#else
+ if (AppX::IsAdaptiveApp())
+ {
+ IfFailRet(AppX::GetWinMetadataDirForAdaptiveApps(&wszWinMDPath));
+ }
+
+ else if (AppX::IsAppXDesignMode() || IsNgenOffline())
+ {
+ wszWinMDPathConfig = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_WinMDPath);
+ wszWinMDPath = wszWinMDPathConfig;
+ }
+#endif // FEATURE_CORECLR
+
+ if (wszWinMDPath != nullptr)
+ {
+ IfFailRet(StringCchLength(wszWinMDPath, &cchWinMDPath));
+ }
+
+ DWORD cFileNames = 0;
+ HSTRING * rgFileNames = nullptr;
+ hr = RoResolveNamespace(
+ WinRtStringRef(wszNamespaceRoResolve, cchNamespaceRoResolve),
+ wszWinMDPath != nullptr ? (HSTRING)WinRtStringRef(wszWinMDPath, cchWinMDPath) : nullptr, // hsWindowsSdkPath
+ m_rgAltPaths.GetCount(), // cPackageGraph
+ m_rgAltPaths.GetRawArray(), // rgPackageGraph
+ &cFileNames,
+ &rgFileNames,
+ nullptr, // pcDirectNamespaceChildren
+ nullptr); // rgDirectNamespaceChildren
+#ifdef FEATURE_CORECLR
+ // For CoreCLR, if the process is not AppX, deliver more appropriate error message
+ // when trying to bind to 3rd party WinMDs that is not confusing.
+ if (HRESULT_FROM_WIN32(APPMODEL_ERROR_NO_PACKAGE) == hr)
+ {
+ if (!AppX::IsAppXProcess())
+ {
+ IfFailRet(HRESULT_FROM_WIN32(ERROR_NOT_SUPPORTED));
+ }
+ }
+#endif
+
+#ifdef FEATURE_COMINTEROP_WINRT_DESKTOP_HOST
+ // If we failed to find the requested name, but we have an application local probing path setup, then
+ // we can use that to try to find the name now.
+ if (hr == RO_E_METADATA_NAME_NOT_FOUND || hr == HRESULT_FROM_WIN32(APPMODEL_ERROR_NO_PACKAGE))
+ {
+ // We only want to probe the application local path for 3rd party WinMDs as these are the only ones
+ // which do not have code sharing enabled. Although we currently only allow a single alternate probing
+ // path per process, shutting this off now will give us easier behavior to support in the future if we
+ // do need to enable per-domain local paths.
+ if (!IsWindowsNamespace(wszNamespaceRoResolve))
+ {
+ HSTRING localWinMDPath = nullptr;
+ {
+ CrstHolder lock(&m_localWinMDPathLock);
+
+ localWinMDPath = m_localWinMDPath.Get();
+
+ // If the host has not configured the local winmd path, and we have not yet done any winmd probing
+ // then see if we have config to setup a local winmd path.
+ if (localWinMDPath == nullptr && m_fCanSetLocalWinMDPath)
+ {
+ NewArrayHolder<WCHAR> configWinMDPath(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_LocalWinMDPath));
+ if (!configWinMDPath.IsNull())
+ {
+ m_localWinMDPath.Initialize(configWinMDPath);
+ localWinMDPath = m_localWinMDPath.Get();
+ }
+ }
+
+ // Do not allow any further setting of the application binding base at this point, since if it
+ // is not currently set, setting it in the future could result in different binding results.
+ m_fCanSetLocalWinMDPath = FALSE;
+ }
+
+ if (localWinMDPath != nullptr)
+ {
+ hr = RoResolveNamespace(
+ WinRtStringRef(wszNamespaceRoResolve, cchNamespaceRoResolve),
+ wszWinMDPath != nullptr ? (HSTRING)WinRtStringRef(wszWinMDPath, cchWinMDPath) : nullptr, // hsWindowsSdkPath
+ 1, // cPackageGraph
+ &localWinMDPath, // rgPackageGraph
+ &cFileNames,
+ &rgFileNames,
+ nullptr, // pcDirectNamespaceChildren
+ nullptr); // rgDirectNamespaceChildren
+ }
+ }
+ }
+#endif // FEATURE_COMINTEROP_WINRT_DESKTOP_HOST
+
+ IfFailRet(hr);
+ if (hr != S_OK)
+ { // Not expecting success codes other than S_OK.
+ IfFailRet(E_UNEXPECTED);
+ }
+
+ hFileNames.Init(rgFileNames, cFileNames);
+
+ for (DWORD i = 0; i < hFileNames.GetCount(); i++)
+ {
+ UINT32 cchFileName = 0;
+ LPCWSTR wszFileName = WindowsGetStringRawBuffer(
+ hFileNames.GetAt(i),
+ &cchFileName);
+
+ BOOL fSkipFilename = FALSE;
+#ifndef FEATURE_CORECLR
+ // If we have a specified path list. Be certain to only find filenames in that list.
+ // NGen for AppX is an exception, where the path list contains the package graph, and we can
+ // accept files found elsewhere (e.g., in the Windows WinMD directory).
+ // On CoreCLR, we have no concept of an AppX package, so we want the passed in app
+ // paths to additively contribute to the set of WinMDs the API can find.
+ if (m_rgAltPaths.GetCount() > 0 && !AppX::IsAppXNGen())
+ {
+ fSkipFilename = TRUE;
+ for (DWORD iAltPath = 0; iAltPath < m_rgAltPaths.GetCount(); iAltPath++)
+ {
+ UINT32 cchAltPath = 0;
+ LPCWSTR wszAltPath = WindowsGetStringRawBuffer(
+ m_rgAltPaths.GetAt(iAltPath),
+ &cchAltPath);
+
+ if (cchAltPath >= cchFileName)
+ continue;
+
+ if (wcsncmp(wszAltPath, wszFileName, cchAltPath) == 0)
+ {
+ fSkipFilename = FALSE;
+ break;
+ }
+ }
+ }
+#endif
+ if (!fSkipFilename)
+ hFileNameList.InsertTail(wszFileName);
+ }
+ }
+ else
+ {
+#if !defined(CLR_STANDALONE_BINDER)
+ // This code is desktop specific.
+ _ASSERTE(m_fNamespaceResolutionKind == NamespaceResolutionKind_DesignerResolveEvent);
+
+ EX_TRY
+ {
+ m_pTypeCache->RaiseDesignerNamespaceResolveEvent(wszNamespace, &hFileNameList);
+ }
+ EX_CATCH
+ {
+ Exception * ex = GET_EXCEPTION();
+ if (!ex->IsTransient())
+ { // Exception was caused by user code
+ // Cache empty file name list for this namespace
+ (void)AddFileNameListForNamespace(wszNamespace, nullptr, ppFileNameList);
+ }
+ EX_RETHROW;
+ }
+ EX_END_CATCH_UNREACHABLE
+#endif // !defined(CLR_STANDALONE_BINDER)
+ }
+
+#else //CROSSGEN_COMPILE
+
+ DWORD cFileNames = 0;
+ SString * rgFileNames = nullptr;
+
+ hr = Crossgen::CrossgenRoResolveNamespace(
+ wszNamespaceRoResolve,
+ &cFileNames,
+ &rgFileNames);
+
+ IfFailRet(hr);
+
+ if (cFileNames > 0)
+ {
+ _ASSERTE(cFileNames == 1); //only support mapping to one file in coregen
+ hFileNameList.InsertTail(rgFileNames->GetUnicode());
+ delete rgFileNames;
+ }
+
+#endif //CROSSGEN_COMPILE
+
+ // Add the Namespace -> File name list entry into cache (even if the file name list is empty)
+ if (AddFileNameListForNamespace(wszNamespace, hFileNameList.GetValue(), ppFileNameList))
+ { // The file name list was added to the cache - do not delete it
+ _ASSERTE(*ppFileNameList == hFileNameList.GetValue());
+ (void)hFileNameList.Extract();
+ }
+ }
+
+ return hr;
+} // CLRPrivBinderWinRT::GetFileNameListForNamespace
+
+//=====================================================================================================================
+// Adds (thread-safe) list of file names to code:m_NamespaceToFileNameListMap for the namespace - returns the cached value.
+// Returns TRUE, if pFileNameList was added to the cache and caller should NOT delete it.
+// Returns FALSE, if pFileNameList was not added to the cache and caller should delete it.
+//
+BOOL
+CLRPrivBinderWinRT::AddFileNameListForNamespace(
+ LPCWSTR wszNamespace,
+ CLRPrivBinderUtil::WStringList * pFileNameList,
+ CLRPrivBinderUtil::WStringList ** ppFileNameList)
+{
+ STANDARD_VM_CONTRACT;
+
+ NewArrayHolder<WCHAR> wszEntryNamespace = DuplicateStringThrowing(wszNamespace);
+
+ NamespaceToFileNameListMapEntry entry;
+ entry.m_wszNamespace = wszEntryNamespace.GetValue();
+ entry.m_pFileNameList = pFileNameList;
+
+#ifndef CLR_STANDALONE_BINDER
+ // We have to serialize all Add operations
+ CrstHolder lock(&m_MapsAddLock);
+#endif // !CLR_STANDALONE_BINDER
+
+ // Wrapper for m_NamespaceToFileNameListMap.Add that avoids call out into host
+ NamespaceToFileNameListMap::AddPhases addCall;
+
+ // Status if the element was added to the hash table or not
+ BOOL fAddedToCache = FALSE;
+
+ // 1. Preallocate one element
+ addCall.PreallocateForAdd(&m_NamespaceToFileNameListMap);
+ {
+ // 2. Take the reader lock which can be taken during stack walking
+ // We cannot call out into host from ForbidSuspend region (i.e. no allocations/deallocations)
+#ifndef CLR_STANDALONE_BINDER
+ ForbidSuspendThreadHolder suspend;
+#endif // !CLR_STANDALONE_BINDER
+ {
+#ifndef CLR_STANDALONE_BINDER
+ CrstHolder lock(&m_MapsLock);
+#endif // !CLR_STANDALONE_BINDER
+
+ const NamespaceToFileNameListMapEntry * pEntry = m_NamespaceToFileNameListMap.LookupPtr(wszNamespace);
+ if (pEntry == nullptr)
+ {
+ // 3a. Add the element to the hash table (no call out into host)
+ addCall.Add(entry);
+
+ // These values are now owned by the hash table element
+ wszEntryNamespace.SuppressRelease();
+ *ppFileNameList = pFileNameList;
+ fAddedToCache = TRUE;
+ }
+ else
+ { // Another thread beat us adding this entry to the hash table
+ *ppFileNameList = pEntry->m_pFileNameList;
+
+ // 3b. Use the newly allocated table (if any) to avoid allocation in the next call (no call out into host)
+ addCall.AddNothing_PublishPreallocatedTable();
+ _ASSERTE(fAddedToCache == FALSE);
+ }
+ }
+ }
+ // 4. Cleanup the old memory (if any), also called from the destructor of addCall
+ addCall.DeleteOldTable();
+
+ return fAddedToCache;
+} // CLRPrivBinderWinRT::AddFileNameListForNamespace
+
+#endif //!DACCESS_COMPILE
+
+//=====================================================================================================================
+// Finds assembly with WinRT type if it is already loaded.
+//
+#ifndef CLR_STANDALONE_BINDER
+PTR_Assembly
+CLRPrivBinderWinRT::FindAssemblyForTypeIfLoaded(
+ PTR_AppDomain pAppDomain,
+ LPCUTF8 szNamespace,
+ LPCUTF8 szClassName)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ WCHAR wszNamespace[MAX_CLASSNAME_LENGTH];
+ int cchNamespace = WszMultiByteToWideChar(CP_UTF8, 0, szNamespace, -1, wszNamespace, _countof(wszNamespace));
+ if (cchNamespace == 0)
+ {
+ return NULL;
+ }
+
+ CLRPrivBinderUtil::WStringListElem * pFileNameElem= nullptr;
+ const NamespaceToFileNameListMapEntry * pNamespaceEntry;
+ {
+ ForbidSuspendThreadHolder suspend;
+ {
+ CrstHolder lock(&m_MapsLock);
+
+ pNamespaceEntry = m_NamespaceToFileNameListMap.LookupPtr(wszNamespace);
+ if ((pNamespaceEntry == nullptr) || (pNamespaceEntry->m_pFileNameList == nullptr))
+ {
+ return NULL;
+ }
+
+ pFileNameElem = pNamespaceEntry->m_pFileNameList->GetHead();
+ }
+ }
+
+ while (pFileNameElem != nullptr)
+ {
+ const WCHAR * wszFileName = pFileNameElem->GetValue();
+ PTR_CLRPrivAssemblyWinRT pPrivAssembly=NULL;
+ const FileNameToAssemblyWinRTMapEntry * pFileNameEntry;
+ {
+ ForbidSuspendThreadHolder suspend;
+ {
+ CrstHolder lock(&m_MapsLock);
+
+ pFileNameEntry = m_FileNameToAssemblyMap.LookupPtr(wszFileName);
+ if (pFileNameEntry == nullptr || pFileNameEntry->m_pAssembly == nullptr)
+ {
+ return NULL;
+ }
+
+ pPrivAssembly = pFileNameEntry->m_pAssembly;
+ }
+ }
+
+ if (pPrivAssembly == NULL)
+ {
+ return NULL;
+ }
+
+ _ASSERT(((void *)(CLRPrivAssemblyWinRT *)0x100) ==
+ ((void *)(ICLRPrivAssembly *)(CLRPrivAssemblyWinRT *)0x100));
+
+ PTR_Assembly pAssembly = NULL;
+ HRESULT hr = m_pTypeCache->ContainsTypeIfLoaded(
+ pAppDomain,
+ dac_cast<PTR_ICLRPrivAssembly>(pPrivAssembly),
+ szNamespace,
+ szClassName,
+ &pAssembly);
+ if (hr == S_OK)
+ { // The type we are looking for has been found in this assembly
+ _ASSERTE(pAssembly != nullptr);
+ return pAssembly;
+ }
+ if (FAILED(hr))
+ { // Assembly was not loaded
+ return NULL;
+ }
+ // Type was not found in the assembly
+ _ASSERTE(hr == S_FALSE);
+
+ // Try next file name for this namespace
+ pFileNameElem = CLRPrivBinderUtil::WStringList::GetNext(pFileNameElem);
+ }
+
+ return NULL;
+} // CLRPrivBinderWinRT::FindAssemblyForTypeIfLoaded
+#endif // !CLR_STANDALONE_BINDER
+
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_FUSION
+//=====================================================================================================================
+// Implements interface method code:IBindContext::PreBind.
+//
+// Prebinding to WinMD files follows a special contract. We want to avoid loading the actual target assembly
+// and we need to validate that all dependencies of the file remain equivalent to that which was available at ngen time
+// We do this by comparing the filename to the assembly simple name. This is legal to do with WinRT because at NGen time
+// we embed a WinRT dependency as assembly def name component plus a namespace and type from it.
+// At bind time, this type should still exist in the same assembly. If it doesn't, and has been moved,
+// the native image validation will fail anyway and we'll fall back to IL. This is because if the type has
+// been moved to another WinMD, it must have been removed from the first one because WinRT allows no duplicates.
+// This no duplicate rule is obviously not actually gauranteed by the WinRT runtime for 3rd party assemblies,
+// but violating the rule is known to cause a number of binding behavior errors that we do not attempt to protect against.
+HRESULT
+CLRPrivBinderWinRT::PreBind(
+ IAssemblyName * pAssemblyName,
+ DWORD dwPreBindFlags,
+ IBindResult ** ppIBindResult)
+{
+ STANDARD_VM_CONTRACT;
+ HRESULT hr = S_OK;
+
+ // Assert that we are only working with a binder that supports native images
+ _ASSERTE(m_fCanUseNativeImages);
+
+ ReleaseHolder<IBindContext> pIBindContext;
+ IfFailRet(GetParentIBindContext(&pIBindContext));
+
+ DWORD dwContentType = AssemblyContentType_Default;
+ DWORD cbContentTypeSize = sizeof(dwContentType);
+ IfFailRet(pAssemblyName->GetProperty(ASM_NAME_CONTENT_TYPE, &dwContentType, &cbContentTypeSize));
+
+ if (dwContentType == AssemblyContentType_Default)
+ {
+ hr = pIBindContext->PreBind(pAssemblyName, dwPreBindFlags, ppIBindResult);
+ }
+ else
+ {
+ hr = BindWinRTAssemblyByName(pAssemblyName, ppIBindResult, TRUE);
+ }
+
+ return hr;
+}
+
+//=====================================================================================================================
+// Implements interface method code:IBindContext::IsDefaultContext.
+//
+HRESULT
+CLRPrivBinderWinRT::IsDefaultContext()
+{
+ LIMITED_METHOD_CONTRACT;
+ return S_OK;
+}
+#endif
+
+//=====================================================================================================================
+CLRPrivAssemblyWinRT::CLRPrivAssemblyWinRT(
+ CLRPrivBinderWinRT * pBinder,
+ CLRPrivResourcePathImpl * pResourceIL,
+ IBindResult * pIBindResult,
+ BOOL fShareable)
+ : m_pBinder(nullptr),
+ m_pResourceIL(nullptr),
+ m_pIResourceNI(nullptr),
+ m_pIBindResult(nullptr),
+ m_fShareable(fShareable),
+ m_dwImageTypes(0)
+{
+ STANDARD_VM_CONTRACT;
+ VALIDATE_ARG_THROW((pBinder != nullptr) && (pResourceIL != nullptr) && (pIBindResult != nullptr));
+
+ m_pBinder = clr::SafeAddRef(pBinder);
+ m_pResourceIL = clr::SafeAddRef(pResourceIL);
+ m_pIBindResult = clr::SafeAddRef(pIBindResult);
+}
+
+//=====================================================================================================================
+CLRPrivAssemblyWinRT::~CLRPrivAssemblyWinRT()
+{
+ LIMITED_METHOD_CONTRACT;
+ clr::SafeRelease(m_pIResourceNI);
+}
+
+//=====================================================================================================================
+// Implements interface method code:IUnknown::Release.
+// Overridden to implement self-removal from assembly map code:CLRPrivBinderWinRT::m_FileNameToAssemblyMap.
+//
+ULONG CLRPrivAssemblyWinRT::Release()
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+ _ASSERTE(m_cRef > 0);
+
+ ULONG cRef;
+
+ {
+ // To achieve proper lifetime semantics, the name to assembly map elements' CLRPrivAssemblyWinRT
+ // instances are not ref counted. We cannot allow discovery of the object via m_FileNameToAssemblyMap
+ // when the ref count is 0 (to prevent another thread to AddRef and Release it back to 0 in parallel).
+ // All uses of the map are guarded by the map lock, so we have to decrease the ref count under that
+ // lock (to avoid the chance that 2 threads are running Release to ref count 0 at once).
+#ifndef CLR_STANDALONE_BINDER
+ ForbidSuspendThreadHolder suspend;
+#endif // !CLR_STANDALONE_BINDER
+ {
+#ifndef CLR_STANDALONE_BINDER
+ CrstHolder lock(&m_pBinder->m_MapsLock);
+#endif // !CLR_STANDALONE_BINDER
+ cRef = InterlockedDecrement(&m_cRef);
+ if (cRef == 0)
+ {
+ m_pBinder->RemoveFileNameToAssemblyMapping(m_pResourceIL->GetPath());
+ }
+ }
+ }
+
+ // Note: We cannot deallocate memory in the ForbidSuspendThread region
+ if (cRef == 0)
+ {
+ delete this;
+ }
+
+ return cRef;
+} // CLRPrivAssemblyWinRT::Release
+
+//=====================================================================================================================
+// Implements interface method code:ICLRPrivAssembly::IsShareable.
+//
+HRESULT CLRPrivAssemblyWinRT::IsShareable(
+ BOOL * pbIsShareable)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ VALIDATE_ARG_RET(pbIsShareable != nullptr);
+
+ *pbIsShareable = m_fShareable;
+ return S_OK;
+}
+
+//=====================================================================================================================
+// Implements interface method code:ICLRPrivAssembly::GetAvailableImageTypes.
+//
+HRESULT CLRPrivAssemblyWinRT::GetAvailableImageTypes(
+ LPDWORD pdwImageTypes)
+{
+ STANDARD_BIND_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ VALIDATE_ARG_RET(pdwImageTypes != nullptr);
+
+ EX_TRY
+ {
+ IfFailGo(EnsureAvailableImageTypes());
+
+ *pdwImageTypes = m_dwImageTypes;
+ hr = S_OK;
+ ErrExit:
+ ;
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+}
+
+#ifdef FEATURE_FUSION
+static ICLRPrivResource * GetResourceForBindResult(
+ IBindResult * pIBindResult)
+{
+ STANDARD_VM_CONTRACT;
+ VALIDATE_ARG_THROW(pIBindResult != nullptr);
+
+ WCHAR wzPath[_MAX_PATH];
+ DWORD cchPath = NumItems(wzPath);
+ ReleaseHolder<IAssemblyLocation> pIAssemLoc;
+ IfFailThrow(pIBindResult->GetAssemblyLocation(&pIAssemLoc));
+ IfFailThrow(pIAssemLoc->GetPath(wzPath, &cchPath));
+ return ToInterface<ICLRPrivResource>(new CLRPrivResourcePathImpl(wzPath));
+}
+#endif
+
+//=====================================================================================================================
+// Implements interface method code:ICLRPrivAssembly::GetImageResource.
+//
+HRESULT CLRPrivAssemblyWinRT::GetImageResource(
+ DWORD dwImageType,
+ DWORD * pdwImageType,
+ ICLRPrivResource ** ppIResource)
+{
+ STANDARD_BIND_CONTRACT;
+ HRESULT hr = S_OK;
+
+ VALIDATE_ARG_RET((ppIResource != nullptr) && (m_pIBindResult != nullptr));
+
+ EX_TRY
+ {
+ IfFailGo(EnsureAvailableImageTypes());
+
+ DWORD _dwImageType;
+ if (pdwImageType == nullptr)
+ {
+ pdwImageType = &_dwImageType;
+ }
+
+ if ((dwImageType & ASSEMBLY_IMAGE_TYPE_NATIVE) == ASSEMBLY_IMAGE_TYPE_NATIVE)
+ {
+ if (m_pIResourceNI == nullptr)
+ {
+ IfFailGo(CLR_E_BIND_IMAGE_UNAVAILABLE);
+ }
+
+ *ppIResource = clr::SafeAddRef(m_pIResourceNI);
+ *pdwImageType = ASSEMBLY_IMAGE_TYPE_NATIVE;
+ }
+ else if ((dwImageType & ASSEMBLY_IMAGE_TYPE_IL) == ASSEMBLY_IMAGE_TYPE_IL)
+ {
+ *ppIResource = clr::SafeAddRef(m_pResourceIL);
+ *pdwImageType = ASSEMBLY_IMAGE_TYPE_IL;
+ }
+ else
+ {
+ hr = CLR_E_BIND_IMAGE_UNAVAILABLE;
+ }
+ ErrExit:
+ ;
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+}
+
+//=====================================================================================================================
+// Implements interface method code:ICLRPrivBinder::VerifyBind.
+//
+HRESULT CLRPrivBinderWinRT::VerifyBind(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly * pAssembly,
+ ICLRPrivAssemblyInfo * pAssemblyInfo)
+{
+ STANDARD_BIND_CONTRACT;
+ HRESULT hr = S_OK;
+
+ VALIDATE_ARG_RET(pAssemblyInfo != nullptr);
+
+ UINT_PTR binderID;
+ IfFailRet(pAssembly->GetBinderID(&binderID));
+ if (binderID != reinterpret_cast<UINT_PTR>(this))
+ {
+ return pAssembly->VerifyBind(pAssemblyName, pAssembly, pAssemblyInfo);
+ }
+
+ // Since WinRT types are bound by type name and not assembly name, assembly-level version validation
+ // does not make sense here. Just return S_OK.
+ return S_OK;
+}
+
+//=====================================================================================================================
+// Implements interface method code:ICLRPrivBinder::GetBinderID.
+//
+HRESULT CLRPrivBinderWinRT::GetBinderID(
+ UINT_PTR * pBinderId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ *pBinderId = reinterpret_cast<UINT_PTR>(this);
+ return S_OK;
+}
+
+#if defined(FEATURE_CORECLR) || defined(CROSSGEN_COMPILE)
+//=====================================================================================================================
+HRESULT CLRPrivBinderWinRT::FindWinRTAssemblyBySpec(
+ LPVOID pvAppDomain,
+ LPVOID pvAssemblySpec,
+ HRESULT * pResult,
+ ICLRPrivAssembly ** ppAssembly)
+{
+ STATIC_CONTRACT_WRAPPER;
+ return E_FAIL;
+}
+#endif
+
+#ifdef FEATURE_FUSION
+//=====================================================================================================================
+HRESULT CLRPrivBinderWinRT::FindWinRTAssemblyBySpec(
+ LPVOID pvAppDomain,
+ LPVOID pvAssemblySpec,
+ HRESULT * pResult,
+ ICLRPrivAssembly ** ppAssembly)
+{
+ LIMITED_METHOD_CONTRACT;;
+ HRESULT hr = S_OK;
+
+ AppDomain* pAppDomain = reinterpret_cast<AppDomain*>(pvAppDomain);
+ AssemblySpec* pAssemblySpec = reinterpret_cast<AssemblySpec*>(pvAssemblySpec);
+ VALIDATE_PTR_RET(pAppDomain);
+ VALIDATE_PTR_RET(pAssemblySpec);
+ VALIDATE_PTR_RET(pResult);
+ VALIDATE_PTR_RET(ppAssembly);
+
+ if (pAssemblySpec->IsContentType_WindowsRuntime())
+ {
+ // FindAssemblyBySpec is not supported by this binder.
+ *pResult = CLR_E_BIND_TYPE_NOT_FOUND;
+ *ppAssembly = nullptr;
+ return S_OK;
+ }
+ else
+ {
+ return CLR_E_BIND_UNRECOGNIZED_IDENTITY_FORMAT;
+ }
+}
+
+//=====================================================================================================================
+HRESULT CLRPrivBinderWinRT::GetParentIBindContext(
+ IBindContext **ppIBindContext)
+{
+ STANDARD_BIND_CONTRACT;
+ VALIDATE_ARG_RET(ppIBindContext != nullptr);
+
+ HRESULT hr = S_OK;
+
+ if (m_pParentBinder != nullptr)
+ {
+ _ASSERTE(AppX::IsAppXProcess());
+ IfFailRet(m_pParentBinder->QueryInterface(__uuidof(IBindContext), (void**)ppIBindContext));
+ }
+ else
+ {
+ _ASSERTE(!AppX::IsAppXProcess());
+ EX_TRY
+ {
+ AppDomain* pDomain = AppDomain::GetCurrentDomain();
+ hr = GetBindContextFromApplicationContext(pDomain->CreateFusionContext(), ppIBindContext);
+ }
+ EX_CATCH_HRESULT(hr);
+ }
+
+ _ASSERTE(FAILED(hr) || *ppIBindContext != nullptr);
+ return hr;
+}
+#endif
+
+//=====================================================================================================================
+HRESULT CLRPrivAssemblyWinRT::GetIBindResult(
+ IBindResult ** ppIBindResult)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ VALIDATE_ARG_RET(ppIBindResult != nullptr);
+ VALIDATE_CONDITION((m_pIBindResult != nullptr), return E_UNEXPECTED);
+
+ *ppIBindResult = clr::SafeAddRef(m_pIBindResult);
+
+ return S_OK;
+}
+
+//=====================================================================================================================
+HRESULT CLRPrivAssemblyWinRT::EnsureAvailableImageTypes()
+{
+ STANDARD_VM_CONTRACT;
+ HRESULT hr = S_OK;
+
+ DWORD dwImageTypesLocal = m_dwImageTypes;
+
+ // If image types has not yet been set, attempt to bind to native assembly
+ if (dwImageTypesLocal == 0)
+ {
+#ifdef FEATURE_FUSION
+ CLRPrivBinderWinRT *pBinder = m_pBinder;
+ IfFailGo(pBinder->BindAssemblyToNativeAssembly(this));
+#endif
+ if (m_pIResourceNI == nullptr)
+ {
+#ifdef FEATURE_FUSION
+ ReleaseHolder<IBindResult> pIBindResultNI;
+
+ if (SUCCEEDED(hr = m_pIBindResult->GetNativeImage(&pIBindResultNI, nullptr)) && pIBindResultNI != nullptr)
+ {
+ ReleaseHolder<ICLRPrivResource> pResourceNI = GetResourceForBindResult(pIBindResultNI);
+ if (InterlockedCompareExchangeT<ICLRPrivResource *>(&m_pIResourceNI, pResourceNI, nullptr) == nullptr)
+ pResourceNI.SuppressRelease();
+ }
+#else
+ if (m_pIBindResult->HasNativeImage())
+ {
+ SString sPath = m_pIBindResult->GetNativeImage()->GetPath();
+ m_pIResourceNI = new CLRPrivResourcePathImpl(sPath.GetUnicode());
+ m_pIResourceNI->AddRef();
+ }
+#endif
+ IfFailGo(hr);
+ }
+
+ DWORD dwImageTypes = 0;
+
+ if (m_pResourceIL != nullptr)
+ dwImageTypes |= ASSEMBLY_IMAGE_TYPE_IL;
+
+ if (m_pIResourceNI != nullptr)
+ dwImageTypes |= ASSEMBLY_IMAGE_TYPE_NATIVE;
+
+ m_dwImageTypes = dwImageTypes;
+ }
+ErrExit:
+
+ return hr;
+}
+
+//=====================================================================================================================
+//static
+HRESULT CLRPrivAssemblyWinRT::GetIBindResult(
+ ICLRPrivAssembly * pPrivAssembly,
+ IBindResult ** ppIBindResult)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ HRESULT hr;
+
+ VALIDATE_ARG_RET(pPrivAssembly != nullptr);
+
+ ReleaseHolder<ICLRPrivAssemblyID_WinRT> pAssemblyID;
+ IfFailRet(pPrivAssembly->QueryInterface(__uuidof(ICLRPrivAssemblyID_WinRT), (LPVOID *)&pAssemblyID));
+ // QI succeeded, we can cast up:
+ CLRPrivAssemblyWinRT * pPrivAssemblyWinRT = static_cast<CLRPrivAssemblyWinRT *>(pPrivAssembly);
+
+ return pPrivAssemblyWinRT->GetIBindResult(ppIBindResult);
+}
+
+#endif //!DACCESS_COMPILE
diff --git a/src/vm/clrprivbinderwinrt.h b/src/vm/clrprivbinderwinrt.h
new file mode 100644
index 0000000000..98993c15b1
--- /dev/null
+++ b/src/vm/clrprivbinderwinrt.h
@@ -0,0 +1,472 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+
+//
+// Contains the types that implement code:ICLRPrivBinder and code:ICLRPrivAssembly for WinRT binding.
+//
+//=============================================================================================
+
+#ifdef FEATURE_HOSTED_BINDER
+
+#pragma once
+
+#include "holder.h"
+#include "internalunknownimpl.h"
+#include "clrprivbinding.h"
+#include "clrprivruntimebinders.h"
+#include "clrprivbinderutil.h"
+#include "clrprivtypecachewinrt.h"
+#include "clr_std/utility"
+#include "winrt/windowsstring.h"
+#include "appxutil.h"
+
+#ifndef FEATURE_FUSION
+#include "coreclr/corebindresult.h"
+
+// IBindResult maps directly to its one and only implementation on CoreCLR.
+typedef CoreBindResult IBindResult;
+#endif // FEATURE_FUSION
+
+//=====================================================================================================================
+// Forward declarations
+class CLRPrivBinderWinRT;
+class CLRPrivAssemblyWinRT;
+#ifdef FEATURE_CORECLR
+class BINDER_SPACE::ApplicationContext;
+class BINDER_SPACE::Assembly;
+#endif
+
+typedef DPTR(CLRPrivBinderWinRT) PTR_CLRPrivBinderWinRT;
+typedef DPTR(CLRPrivAssemblyWinRT) PTR_CLRPrivAssemblyWinRT;
+
+BOOL
+IsWindowsNamespace(const char * wszNamespace);
+
+//=====================================================================================================================
+//=====================================================================================================================
+//=====================================================================================================================
+class CLRPrivBinderWinRT :
+ public IUnknownCommon<ICLRPrivBinder
+#ifdef FEATURE_FUSION
+ , IBindContext
+#endif //FEATURE_FUSION
+ >
+{
+ friend class CLRPrivAssemblyWinRT;
+
+public:
+ //=============================================================================================
+ // Options of namespace resolution
+ enum NamespaceResolutionKind
+ {
+ NamespaceResolutionKind_WindowsAPI, // Using RoResolveNamespace Win8 API
+ NamespaceResolutionKind_DesignerResolveEvent // Using DesignerNamespaceResolve event
+ };
+
+private:
+ //=============================================================================================
+ // Data structures for Namespace -> FileNameList map (as returned by RoResolveNamespace API)
+
+ // Entry in SHash table that maps namespace to list of files
+ struct NamespaceToFileNameListMapEntry
+ {
+ PTR_WSTR m_wszNamespace;
+ CLRPrivBinderUtil::PTR_WStringList m_pFileNameList;
+ };
+
+ // SHash traits for Namespace -> FileNameList hash
+ class NamespaceToFileNameListMapTraits : public NoRemoveSHashTraits< DefaultSHashTraits< NamespaceToFileNameListMapEntry > >
+ {
+ public:
+ typedef PCWSTR key_t;
+ static const NamespaceToFileNameListMapEntry Null() { NamespaceToFileNameListMapEntry e; e.m_wszNamespace = PTR_WSTR(nullptr); return e; }
+ static bool IsNull(const NamespaceToFileNameListMapEntry & e) { return e.m_wszNamespace == nullptr; }
+ static PCWSTR GetKey(const NamespaceToFileNameListMapEntry & e) { return e.m_wszNamespace; }
+ static count_t Hash(PCWSTR str) { return HashString(str); }
+ static BOOL Equals(PCWSTR lhs, PCWSTR rhs) { LIMITED_METHOD_CONTRACT; return (wcscmp(lhs, rhs) == 0); }
+
+ void OnDestructPerEntryCleanupAction(const NamespaceToFileNameListMapEntry & e)
+ {
+ delete [] e.m_wszNamespace;
+ CLRPrivBinderUtil::WStringList_Delete(e.m_pFileNameList);
+ }
+ static const bool s_DestructPerEntryCleanupAction = true;
+ };
+
+ typedef SHash<NamespaceToFileNameListMapTraits> NamespaceToFileNameListMap;
+
+ //=============================================================================================
+ // Data structure for FileName -> CLRPrivAssemblyWinRT * map
+
+ struct FileNameToAssemblyWinRTMapEntry
+ {
+ PTR_CWSTR m_wszFileName; // File name (owned by m_pAssembly)
+ PTR_CLRPrivAssemblyWinRT m_pAssembly;
+ };
+
+ class FileNameToAssemblyWinRTMapTraits : public DefaultSHashTraits<FileNameToAssemblyWinRTMapEntry>
+ {
+ public:
+ typedef PCWSTR key_t;
+ static const FileNameToAssemblyWinRTMapEntry Null() { FileNameToAssemblyWinRTMapEntry e; e.m_wszFileName = PTR_CWSTR(nullptr); return e; }
+ static bool IsNull(const FileNameToAssemblyWinRTMapEntry &e) { return e.m_wszFileName == PTR_CWSTR(nullptr); }
+ static const FileNameToAssemblyWinRTMapEntry Deleted() { FileNameToAssemblyWinRTMapEntry e; e.m_wszFileName = (PTR_CWSTR)-1; return e; }
+ static bool IsDeleted(const FileNameToAssemblyWinRTMapEntry & e) { return dac_cast<TADDR>(e.m_wszFileName) == (TADDR)-1; }
+ static PCWSTR GetKey(const FileNameToAssemblyWinRTMapEntry & e) { return e.m_wszFileName; }
+ static count_t Hash(PCWSTR str) { return HashString(str); }
+ static BOOL Equals(PCWSTR lhs, PCWSTR rhs) { LIMITED_METHOD_CONTRACT; return (wcscmp(lhs, rhs) == 0); }
+ };
+
+ typedef SHash<FileNameToAssemblyWinRTMapTraits> FileNameToAssemblyWinRTMap;
+
+public:
+ //=============================================================================================
+ // ICLRPrivBinder interface methods
+
+ // Implements interface method code:ICLRPrivBinder::BindAssemblyByName.
+ STDMETHOD(BindAssemblyByName)(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly ** ppAssembly);
+
+ // Implements interface method code:ICLRPrivBinder::VerifyBind.
+ STDMETHOD(VerifyBind)(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly * pAssembly,
+ ICLRPrivAssemblyInfo * pAssemblyInfo);
+
+ // Implements interface method code:ICLRPrivBinder::GetBinderFlags
+ STDMETHOD(GetBinderFlags)(
+ DWORD *pBinderFlags)
+ {
+ STATIC_CONTRACT_WRAPPER;
+
+ if (pBinderFlags == NULL)
+ return E_INVALIDARG;
+
+ HRESULT hr = S_OK;
+
+ if (m_pParentBinder != NULL)
+ hr = m_pParentBinder->GetBinderFlags(pBinderFlags);
+ else
+ *pBinderFlags = BINDER_NONE;
+
+ return hr;
+ }
+
+ // Implements interface method code:ICLRPrivBinder::GetBinderID.
+ STDMETHOD(GetBinderID)(
+ UINT_PTR * pBinderId);
+
+ STDMETHOD(FindAssemblyBySpec)(
+ LPVOID pvAppDomain,
+ LPVOID pvAssemblySpec,
+ HRESULT * pResult,
+ ICLRPrivAssembly ** ppAssembly)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+#ifndef DACCESS_COMPILE
+ // CLRPrivBinderWinRT instances only have parent binders in Metro processes (not in classic).
+ _ASSERTE((AppX::IsAppXProcess()) == (m_pParentBinder != nullptr));
+#endif
+
+ if (m_pParentBinder != NULL)
+ {
+ return m_pParentBinder->FindAssemblyBySpec(pvAppDomain, pvAssemblySpec, pResult, ppAssembly);
+ }
+ else
+ {
+ // Note: should never get here if caller is Module::GetAssemblyIfLoaded, but can
+ // be called from AssemblySpec::LoadDomainAssembly..
+ return FindWinRTAssemblyBySpec(pvAppDomain, pvAssemblySpec, pResult, ppAssembly);
+ }
+ }
+
+ HRESULT FindWinRTAssemblyBySpec(
+ LPVOID pvAppDomain,
+ LPVOID pvAssemblySpec,
+ HRESULT * pResult,
+ ICLRPrivAssembly ** ppAssembly);
+
+#ifdef FEATURE_FUSION
+ //=============================================================================================
+ // IBindContext interface methods
+
+ // Implements interface method code:IBindContext::PreBind.
+ STDMETHOD(PreBind)(
+ IAssemblyName * pIAssemblyName,
+ DWORD dwPreBindFlags,
+ IBindResult ** ppIBindResult);
+
+ // Implements interface method code:IBindContext::IsDefaultContext.
+ STDMETHOD(IsDefaultContext)();
+#endif //FEATURE_FUSION
+
+ //=============================================================================================
+ // Class methods
+
+ CLRPrivBinderWinRT(
+ ICLRPrivBinder * pParentBinder,
+ CLRPrivTypeCacheWinRT * pWinRtTypeCache,
+ LPCWSTR * rgwzAltPath,
+ UINT cAltPaths,
+ NamespaceResolutionKind fNamespaceResolutionKind,
+ BOOL fCanUseNativeImages);
+
+ static
+ CLRPrivBinderWinRT * GetOrCreateBinder(
+ CLRPrivTypeCacheWinRT * pWinRtTypeCache,
+ NamespaceResolutionKind fNamespaceResolutionKind);
+
+ ~CLRPrivBinderWinRT();
+
+ // Binds WinRT assemblies only.
+ HRESULT BindWinRTAssemblyByName(
+ IAssemblyName * pIAssemblyName,
+ CLRPrivAssemblyWinRT ** ppAssembly,
+ BOOL fPreBind = FALSE);
+
+ // Binds WinRT assemblies only.
+ HRESULT BindWinRTAssemblyByName(
+ IAssemblyName * pIAssemblyName,
+ ICLRPrivAssembly ** ppPrivAssembly,
+ BOOL fPreBind = FALSE);
+
+ // Binds WinRT assemblies only.
+ HRESULT BindWinRTAssemblyByName(
+ IAssemblyName * pIAssemblyName,
+ IBindResult ** ppIBindResult,
+ BOOL fPreBind = FALSE);
+
+#ifndef FEATURE_FUSION
+ HRESULT GetAssemblyAndTryFindNativeImage(SString &sWinmdFilename, LPCWSTR pwzSimpleName, BINDER_SPACE::Assembly ** ppAssembly);
+#endif
+#ifdef FEATURE_CORECLR
+ // On Phone the application's APP_PATH CoreCLR hosting config property is used as the app
+ // package graph for RoResolveNamespace to find 3rd party WinMDs. This method wires up
+ // the app paths so the WinRT binder will find 3rd party WinMDs.
+ HRESULT SetApplicationContext(BINDER_SPACE::ApplicationContext *pApplicationContext, SString &appLocalWinMD);
+#endif
+#ifndef CLR_STANDALONE_BINDER
+ // Finds assembly with WinRT type if it is already loaded
+ // Note: This method could implement interface code:ICLRPrivWinRtTypeBinder if it is ever needed
+ PTR_Assembly FindAssemblyForTypeIfLoaded(
+ PTR_AppDomain pAppDomain,
+ LPCUTF8 szNamespace,
+ LPCUTF8 szClassName);
+#endif // !CLR_STANDALONE_BINDER
+
+#if defined(FEATURE_COMINTEROP_WINRT_DESKTOP_HOST) && !defined(CROSSGEN_COMPILE)
+ BOOL SetLocalWinMDPath(HSTRING localWinMDPath);
+#endif // FEATURE_COMINTEROP_WINRT_DESKTOP_HOST && !CROSSGEN_COMPILE
+
+private:
+ //=============================================================================================
+ // Accessors for FileName -> CLRPrivAssemblyWinRT * map
+
+ ReleaseHolder<CLRPrivAssemblyWinRT> FindAssemblyByFileName(
+ PCWSTR wzsFileName);
+
+ ReleaseHolder<CLRPrivAssemblyWinRT> AddFileNameToAssemblyMapping(
+ PCWSTR wszFileName,
+ CLRPrivAssemblyWinRT * pAssembly);
+
+ void RemoveFileNameToAssemblyMapping(
+ PCWSTR wszFileName);
+
+ //=============================================================================================
+ // Internal methods
+
+ // Returns list of file names from code:m_NamespaceToFileNameListMap for the namespace
+ HRESULT GetFileNameListForNamespace(LPCWSTR wszNamespace, CLRPrivBinderUtil::WStringList ** ppFileNameList);
+
+ // Adds (thread-safe) list of file names to code:m_NamespaceToFileNameListMap for the namespace.
+ // Returns TRUE if the list was added to the cache.
+ BOOL AddFileNameListForNamespace(
+ LPCWSTR wszNamespace,
+ CLRPrivBinderUtil::WStringList * pFileNameList,
+ CLRPrivBinderUtil::WStringList ** ppFileNameList);
+
+#ifdef FEATURE_FUSION
+ HRESULT BindAssemblyToNativeAssembly(CLRPrivAssemblyWinRT *pAssembly);
+#endif
+
+private:
+ //=============================================================================================
+
+ // Namespace -> FileName list map ... items are never removed
+ NamespaceToFileNameListMap m_NamespaceToFileNameListMap;
+ // FileName -> CLRPrivAssemblyWinRT * map ... items can be removed when CLRPrivAssemblyWinRT dies
+ FileNameToAssemblyWinRTMap m_FileNameToAssemblyMap;
+
+ // Lock for the above maps
+#ifndef CLR_STANDALONE_BINDER
+ CrstExplicitInit m_MapsLock;
+ // Lock for adding into the above maps, in addition to the read-lock above
+ CrstExplicitInit m_MapsAddLock;
+#endif // CLR_STANDALONE_BINDER
+
+ //=============================================================================================
+
+ PTR_CLRPrivTypeCacheWinRT m_pTypeCache;
+
+ // The kind of namespace resolution (RoResolveNamespace Win8 API or DesignerNamespaceResolve event)
+ NamespaceResolutionKind m_fNamespaceResolutionKind;
+
+ static CLRPrivBinderWinRT * s_pSingleton;
+
+ // Parent binder used to delegate bind requests up the binder hierarchy.
+ ICLRPrivBinder * m_pParentBinder;
+
+#ifndef CROSSGEN_COMPILE
+ // Alternative paths for use with RoGetNamespace api
+ CLRPrivBinderUtil::HSTRINGArrayHolder m_rgAltPaths;
+#endif
+
+#ifdef FEATURE_FUSION
+ // Native binder assisting logic
+ BOOL m_fCanUseNativeImages;
+
+ ReleaseHolder<IILFingerprintFactory> m_pFingerprintFactory;
+#endif
+
+#ifdef FEATURE_FUSION
+ HRESULT GetParentIBindContext(IBindContext **ppIBindContext);
+#endif //FEATURE_FUSION
+
+#ifdef FEATURE_CORECLR
+ BINDER_SPACE::ApplicationContext * m_pApplicationContext;
+ NewArrayHolder<WCHAR> m_appLocalWinMDPath;
+#endif
+
+#ifdef FEATURE_COMINTEROP_WINRT_DESKTOP_HOST
+ // App-local location that can be probed for WinMD files
+ BOOL m_fCanSetLocalWinMDPath;
+ CrstExplicitInit m_localWinMDPathLock;
+#ifndef CROSSGEN_COMPILE
+ clr::winrt::String m_localWinMDPath;
+#endif // !CROSSGEN_COMPILE
+#endif // FEATURE_COMINTEROP_WINRT_DESKTOP_HOST
+
+}; // class CLRPrivBinderWinRT
+
+
+//=====================================================================================================================
+//=====================================================================================================================
+//=====================================================================================================================
+class CLRPrivAssemblyWinRT :
+ public IUnknownCommon<ICLRPrivAssembly, ICLRPrivAssemblyID_WinRT>
+{
+ friend class CLRPrivBinderWinRT;
+
+public:
+ //=============================================================================================
+ // Class methods
+
+ CLRPrivAssemblyWinRT(
+ CLRPrivBinderWinRT * pBinder,
+ CLRPrivBinderUtil::CLRPrivResourcePathImpl * pResourceIL,
+ IBindResult * pIBindResult,
+ BOOL fShareable);
+
+ ~CLRPrivAssemblyWinRT();
+
+ HRESULT GetIBindResult(
+ IBindResult ** ppIBindResult);
+
+ static HRESULT GetIBindResult(
+ ICLRPrivAssembly * pPrivAssembly,
+ IBindResult ** ppIBindResult);
+
+ //=============================================================================================
+ // IUnknown interface methods
+
+ // Implements interface method code:IUnknown::Release.
+ // Overridden to implement self-removal from assembly map code:CLRPrivBinderWinRT::m_FileNameToAssemblyMap.
+ STDMETHOD_(ULONG, Release)();
+
+ //=============================================================================================
+ // ICLRPrivBinder interface methods
+
+ // Implements interface method code:ICLRPrivBinder::BindAssemblyByName.
+ STDMETHOD(BindAssemblyByName)(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly ** ppAssembly)
+ {
+ STATIC_CONTRACT_WRAPPER;
+ return m_pBinder->BindAssemblyByName(pAssemblyName, ppAssembly);
+ }
+
+ // Implements interface method code:ICLRPrivBinder::VerifyBind.
+ STDMETHOD(VerifyBind)(
+ IAssemblyName * pAssemblyName,
+ ICLRPrivAssembly * pAssembly,
+ ICLRPrivAssemblyInfo * pAssemblyInfo)
+ {
+ STATIC_CONTRACT_WRAPPER;
+ return m_pBinder->VerifyBind(pAssemblyName, pAssembly, pAssemblyInfo);
+ }
+
+ // Implements interface method code:ICLRPrivBinder::GetBinderFlags
+ STDMETHOD(GetBinderFlags)(
+ DWORD *pBinderFlags)
+ {
+ STATIC_CONTRACT_WRAPPER;
+ return m_pBinder->GetBinderFlags(pBinderFlags);
+ }
+
+ // Implements interface method code:ICLRPrivBinder::GetBinderID.
+ STDMETHOD(GetBinderID)(
+ UINT_PTR * pBinderId)
+ {
+ STATIC_CONTRACT_WRAPPER;
+ return m_pBinder->GetBinderID(pBinderId);
+ }
+
+ // Implements code:ICLRPrivBinder::FindAssemblyBySpec
+ STDMETHOD(FindAssemblyBySpec)(
+ LPVOID pvAppDomain,
+ LPVOID pvAssemblySpec,
+ HRESULT * pResult,
+ ICLRPrivAssembly ** ppAssembly)
+ {
+ STATIC_CONTRACT_WRAPPER;
+ return m_pBinder->FindAssemblyBySpec(pvAppDomain, pvAssemblySpec, pResult, ppAssembly);
+ }
+
+ //=============================================================================================
+ // ICLRPrivAssembly interface methods
+
+ // Implements interface method code:ICLRPrivAssembly::IsShareable.
+ STDMETHOD(IsShareable)(
+ BOOL * pbIsShareable);
+
+ // Implements interface method code:ICLRPrivAssembly::GetAvailableImageTypes.
+ STDMETHOD(GetAvailableImageTypes)(
+ LPDWORD pdwImageTypes);
+
+ // Implements interface method code:ICLRPrivAssembly::GetImageResource.
+ STDMETHOD(GetImageResource)(
+ DWORD dwImageType,
+ DWORD * pdwImageType,
+ ICLRPrivResource ** ppIResource);
+
+private:
+ //=============================================================================================
+
+ HRESULT EnsureAvailableImageTypes();
+
+ ReleaseHolder<CLRPrivBinderWinRT> m_pBinder;
+ ReleaseHolder<CLRPrivBinderUtil::CLRPrivResourcePathImpl> m_pResourceIL;
+ // This cannot be a holder as there can be a race to assign to it.
+ ICLRPrivResource * m_pIResourceNI;
+ ReleaseHolder<IBindResult> m_pIBindResult;
+ BOOL m_fShareable;
+ Volatile<DWORD> m_dwImageTypes;
+}; // class CLRPrivAssemblyWinRT
+
+#endif //FEATURE_HOSTED_BINDER
diff --git a/src/vm/clrprivtypecachereflectiononlywinrt.cpp b/src/vm/clrprivtypecachereflectiononlywinrt.cpp
new file mode 100644
index 0000000000..9867094259
--- /dev/null
+++ b/src/vm/clrprivtypecachereflectiononlywinrt.cpp
@@ -0,0 +1,261 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+// Contains VM implementation of WinRT type cache for code:CLRPrivBinderReflectionOnlyWinRT binder.
+//
+//=====================================================================================================================
+
+#include "common.h" // precompiled header
+
+#ifndef DACCESS_COMPILE
+#ifdef FEATURE_REFLECTION_ONLY_LOAD
+
+#include "clrprivtypecachereflectiononlywinrt.h"
+#include <typeresolution.h>
+
+//=====================================================================================================================
+// S_OK - pAssembly contains type wszTypeName
+// S_FALSE - pAssembly does not contain type wszTypeName
+//
+HRESULT
+CLRPrivTypeCacheReflectionOnlyWinRT::ContainsType(
+ ICLRPrivAssembly * pPrivAssembly,
+ LPCWSTR wszTypeName)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ AppDomain * pAppDomain = AppDomain::GetCurrentDomain();
+
+ ReleaseHolder<PEAssembly> pPEAssembly;
+ IfFailGo(pAppDomain->BindHostedPrivAssembly(nullptr, pPrivAssembly, nullptr, &pPEAssembly, TRUE));
+ _ASSERTE(pPEAssembly != nullptr);
+
+ {
+ // Find DomainAssembly * (can be cached if this is too slow to call always)
+ DomainAssembly * pDomainAssembly = pAppDomain->LoadDomainAssembly(
+ nullptr, // pIdentity
+ pPEAssembly,
+ FILE_LOADED,
+ nullptr); // pLoadSecurity
+
+ // Convert the type name into namespace and type names in UTF8
+ StackSString ssTypeNameWCHAR(wszTypeName);
+
+ StackSString ssTypeName;
+ ssTypeNameWCHAR.ConvertToUTF8(ssTypeName);
+ LPUTF8 szTypeName = (LPUTF8)ssTypeName.GetUTF8NoConvert();
+
+ LPCUTF8 szNamespace;
+ LPCUTF8 szClassName;
+ ns::SplitInline(szTypeName, szNamespace, szClassName);
+
+ NameHandle typeName(szNamespace, szClassName);
+
+ // Find the type in the assembly (use existing hash of all type names defined in the assembly)
+ TypeHandle thType;
+ mdToken tkType;
+ Module * pTypeModule;
+ mdToken tkExportedType;
+ if (pDomainAssembly->GetAssembly()->GetLoader()->FindClassModuleThrowing(
+ &typeName,
+ &thType,
+ &tkType,
+ &pTypeModule,
+ &tkExportedType,
+ nullptr, // ppClassHashEntry
+ nullptr, // pLookInThisModuleOnly
+ Loader::DontLoad))
+ { // The type is present in the assembly
+ hr = S_OK;
+ }
+ else
+ { // The type is not present in the assembly
+ hr = S_FALSE;
+ }
+ }
+
+ErrExit:
+ return hr;
+} // CLRPrivTypeCacheReflectionOnlyWinRT::ContainsType
+
+//=====================================================================================================================
+// Raises user event NamespaceResolveEvent to get a list of files for this namespace.
+//
+void
+CLRPrivTypeCacheReflectionOnlyWinRT::RaiseNamespaceResolveEvent(
+ LPCWSTR wszNamespace,
+ DomainAssembly * pParentAssembly,
+ CLRPrivBinderUtil::WStringListHolder * pFileNameList)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(pFileNameList != nullptr);
+
+ AppDomain * pAppDomain = AppDomain::GetCurrentDomain();
+
+ GCX_COOP();
+
+ struct _gc {
+ OBJECTREF AppDomainRef;
+ OBJECTREF AssemblyRef;
+ STRINGREF str;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+ if ((gc.AppDomainRef = pAppDomain->GetRawExposedObject()) != NULL)
+ {
+ if (pParentAssembly != nullptr)
+ {
+ gc.AssemblyRef = pParentAssembly->GetExposedAssemblyObject();
+ }
+
+ MethodDescCallSite onNamespaceResolve(METHOD__APP_DOMAIN__ON_REFLECTION_ONLY_NAMESPACE_RESOLVE, &gc.AppDomainRef);
+ gc.str = StringObject::NewString(wszNamespace);
+ ARG_SLOT args[3] =
+ {
+ ObjToArgSlot(gc.AppDomainRef),
+ ObjToArgSlot(gc.AssemblyRef),
+ ObjToArgSlot(gc.str)
+ };
+ PTRARRAYREF ResultingAssemblyArrayRef = (PTRARRAYREF) onNamespaceResolve.Call_RetOBJECTREF(args);
+ if (ResultingAssemblyArrayRef != NULL)
+ {
+ for (DWORD i = 0; i < ResultingAssemblyArrayRef->GetNumComponents(); i++)
+ {
+ ASSEMBLYREF ResultingAssemblyRef = (ASSEMBLYREF) ResultingAssemblyArrayRef->GetAt(i);
+ Assembly * pAssembly = ResultingAssemblyRef->GetAssembly();
+
+ if (pAssembly->IsCollectible())
+ {
+ COMPlusThrow(kNotSupportedException, W("NotSupported_CollectibleAssemblyResolve"));
+ }
+
+ PEAssembly * pPEAssembly = pAssembly->GetManifestFile();
+
+ ICLRPrivAssembly * pPrivAssembly = pPEAssembly->GetHostAssembly();
+ if ((pPrivAssembly == NULL) || !IsAfContentType_WindowsRuntime(pPEAssembly->GetFlags()))
+ {
+ COMPlusThrow(kNotSupportedException, IDS_EE_REFLECTIONONLY_WINRT_INVALIDASSEMBLY);
+ }
+
+ pFileNameList->InsertTail(pPEAssembly->GetILimage()->GetPath());
+ }
+ }
+ }
+ GCPROTECT_END();
+} // CLRPrivTypeCacheReflectionOnlyWinRT::RaiseNamespaceResolveEvent
+
+//=====================================================================================================================
+// Implementation of QCall System.Runtime.InteropServices.WindowsRuntime.WindowsRuntimeMetadata.nResolveNamespace
+// It's basically a PInvoke wrapper into Win8 API RoResolveNamespace
+//
+void
+QCALLTYPE
+CLRPrivTypeCacheReflectionOnlyWinRT::ResolveNamespace(
+ LPCWSTR wszNamespace,
+ LPCWSTR wszWindowsSdkPath,
+ LPCWSTR * rgPackageGraphPaths,
+ INT32 cPackageGraphPaths,
+ QCall::ObjectHandleOnStack retFileNames)
+{
+ QCALL_CONTRACT;
+
+ _ASSERTE(wszNamespace != nullptr);
+
+ BEGIN_QCALL;
+
+ CoTaskMemHSTRINGArrayHolder hFileNames;
+
+ if (!WinRTSupported())
+ {
+ IfFailThrow(COR_E_PLATFORMNOTSUPPORTED);
+ }
+
+ {
+ CLRPrivBinderUtil::HSTRINGArrayHolder rgPackageGraph;
+ rgPackageGraph.Allocate(cPackageGraphPaths);
+
+ LPCWSTR wszNamespaceRoResolve = wszNamespace;
+
+ for (INT32 i = 0; i < cPackageGraphPaths; i++)
+ {
+ _ASSERTE(rgPackageGraph.GetRawArray()[i] == nullptr);
+ WinRtString hsPackageGraphPath;
+ IfFailThrow(hsPackageGraphPath.Initialize(rgPackageGraphPaths[i]));
+ hsPackageGraphPath.Detach(&rgPackageGraph.GetRawArray()[i]);
+ }
+
+ UINT32 cchNamespace, cchWindowsSdkPath;
+ IfFailThrow(StringCchLength(wszNamespace, &cchNamespace));
+ IfFailThrow(StringCchLength(wszWindowsSdkPath, &cchWindowsSdkPath));
+
+ DWORD cFileNames = 0;
+ HSTRING * rgFileNames = nullptr;
+ HRESULT hr = RoResolveNamespace(
+ WinRtStringRef(wszNamespace, cchNamespace),
+ WinRtStringRef(wszWindowsSdkPath, cchWindowsSdkPath),
+ rgPackageGraph.GetCount(),
+ rgPackageGraph.GetRawArray(),
+ &cFileNames,
+ &rgFileNames,
+ nullptr, // pcDirectNamespaceChildren
+ nullptr); // rgDirectNamespaceChildren
+ hFileNames.Init(rgFileNames, cFileNames);
+
+ if (hr == HRESULT_FROM_WIN32(APPMODEL_ERROR_NO_PACKAGE))
+ { // User tried to resolve 3rd party namespace without passing package graph - throw InvalidOperationException with custom message
+ _ASSERTE(cPackageGraphPaths == 0);
+ COMPlusThrow(kInvalidOperationException, IDS_EE_REFLECTIONONLY_WINRT_LOADFAILURE_THIRDPARTY);
+ }
+ IfFailThrow(hr);
+ if (hr != S_OK)
+ {
+ IfFailThrow(E_UNEXPECTED);
+ }
+ }
+
+ {
+ GCX_COOP();
+
+ PTRARRAYREF orFileNames = NULL;
+ GCPROTECT_BEGIN(orFileNames);
+
+ orFileNames = (PTRARRAYREF) AllocateObjectArray(hFileNames.GetCount(), g_pStringClass);
+
+ for (DWORD i = 0; i < hFileNames.GetCount(); i++)
+ {
+ UINT32 cchFileName = 0;
+
+ HSTRING hsFileName = hFileNames.GetAt(i);
+ LPCWSTR wszFileName;
+
+ if (hsFileName != nullptr)
+ {
+ wszFileName = WindowsGetStringRawBuffer(
+ hsFileName,
+ &cchFileName);
+
+ STRINGREF str = StringObject::NewString(wszFileName);
+ orFileNames->SetAt(i, str);
+ }
+ }
+
+ retFileNames.Set(orFileNames);
+
+ GCPROTECT_END();
+ }
+
+ END_QCALL;
+} // CLRPrivTypeCacheReflectionOnlyWinRT::ResolveNamespace
+
+//=====================================================================================================================
+
+#endif //FEATURE_REFLECTION_ONLY_LOAD
+#endif //!DACCESS_COMPILE
diff --git a/src/vm/clrprivtypecachereflectiononlywinrt.h b/src/vm/clrprivtypecachereflectiononlywinrt.h
new file mode 100644
index 0000000000..ce6df977ba
--- /dev/null
+++ b/src/vm/clrprivtypecachereflectiononlywinrt.h
@@ -0,0 +1,61 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+// Contains VM implementation of code:ICLRPrivTypeCacheReflectionOnlyWinRT for code:CLRPrivBinderReflectionOnlyWinRT binder.
+//
+//=====================================================================================================================
+
+#ifdef FEATURE_HOSTED_BINDER
+#ifdef FEATURE_REFLECTION_ONLY_LOAD
+
+#pragma once
+
+#include "internalunknownimpl.h"
+#include "clrprivbinding.h"
+
+//=====================================================================================================================
+// Forward declarations
+class DomainAssembly;
+
+//=====================================================================================================================
+class CLRPrivTypeCacheReflectionOnlyWinRT :
+ public IUnknownCommon<IUnknown>
+{
+public:
+ //=============================================================================================
+ // Class methods
+
+ // S_OK - pAssembly contains type wszTypeName
+ // S_FALSE - pAssembly does not contain type wszTypeName
+ STDMETHOD(ContainsType)(
+ ICLRPrivAssembly * pAssembly,
+ LPCWSTR wszTypeName);
+
+#ifndef DACCESS_COMPILE
+
+ // Raises user event NamespaceResolveEvent to get a list of files for this namespace.
+ void RaiseNamespaceResolveEvent(
+ LPCWSTR wszNamespace,
+ DomainAssembly * pParentAssembly,
+ CLRPrivBinderUtil::WStringListHolder * pFileNameList);
+
+#endif //!DACCESS_COMPILE
+
+ // Implementation of QCall System.Runtime.InteropServices.WindowsRuntime.WindowsRuntimeMetadata.nResolveNamespace
+ // It's basically a PInvoke wrapper into Win8 API RoResolveNamespace
+ static
+ void QCALLTYPE ResolveNamespace(
+ LPCWSTR wszNamespace,
+ LPCWSTR wszWindowsSdkPath,
+ LPCWSTR * rgPackageGraphPaths,
+ INT32 cPackageGraphPaths,
+ QCall::ObjectHandleOnStack retFileNames);
+
+}; // class CLRPrivTypeCaheReflectionOnlyWinRT
+
+#endif //FEATURE_REFLECTION_ONLY_LOAD
+#endif // FEATURE_HOSTED_BINDER
diff --git a/src/vm/clrprivtypecachewinrt.cpp b/src/vm/clrprivtypecachewinrt.cpp
new file mode 100644
index 0000000000..4669b9b875
--- /dev/null
+++ b/src/vm/clrprivtypecachewinrt.cpp
@@ -0,0 +1,247 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+// Contains VM implementation of WinRT type cache for code:CLRPrivBinderWinRT binder.
+//
+//=====================================================================================================================
+
+#include "common.h" // precompiled header
+#include "clrprivtypecachewinrt.h"
+
+#ifndef DACCESS_COMPILE
+
+//=====================================================================================================================
+// S_OK - pAssembly contains type wszTypeName
+// S_FALSE - pAssembly does not contain type wszTypeName
+//
+HRESULT
+CLRPrivTypeCacheWinRT::ContainsType(
+ ICLRPrivAssembly * pPrivAssembly,
+ LPCWSTR wszTypeName)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ AppDomain * pAppDomain = AppDomain::GetCurrentDomain();
+
+ ReleaseHolder<PEAssembly> pPEAssembly;
+ IfFailGo(pAppDomain->BindHostedPrivAssembly(nullptr, pPrivAssembly, nullptr, &pPEAssembly));
+ _ASSERTE(pPEAssembly != nullptr);
+
+ {
+ // Find DomainAssembly * (can be cached if this is too slow to call always)
+ DomainAssembly * pDomainAssembly = pAppDomain->LoadDomainAssembly(
+ nullptr, // pIdentity
+ pPEAssembly,
+ FILE_LOAD_DELIVER_EVENTS,
+ nullptr); // pLoadSecurity
+
+ // Convert the type name into namespace and class name in UTF8
+ StackSString ssTypeNameWCHAR(wszTypeName);
+
+ StackSString ssTypeName;
+ ssTypeNameWCHAR.ConvertToUTF8(ssTypeName);
+ LPUTF8 szTypeName = (LPUTF8)ssTypeName.GetUTF8NoConvert();
+
+ LPCUTF8 szNamespace;
+ LPCUTF8 szClassName;
+ ns::SplitInline(szTypeName, szNamespace, szClassName);
+
+ hr = ContainsTypeHelper(pDomainAssembly->GetAssembly(), szNamespace, szClassName);
+ _ASSERTE((hr == S_OK) || (hr == S_FALSE));
+ return hr;
+ }
+
+ErrExit:
+ return hr;
+} // CLRPrivTypeCacheWinRT::ContainsType
+
+#endif //!DACCESS_COMPILE
+
+//=====================================================================================================================
+//
+// Checks if the type (szNamespace/szClassName) is present in the assembly pAssembly.
+//
+// Return value:
+// S_OK - Type is present in the assembly.
+// S_FALSE - Type is not present.
+// No other error codes or success codes
+//
+HRESULT
+CLRPrivTypeCacheWinRT::ContainsTypeHelper(
+ PTR_Assembly pAssembly,
+ LPCUTF8 szNamespace,
+ LPCUTF8 szClassName)
+{
+ CONTRACTL
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ NameHandle typeName(szNamespace, szClassName);
+
+ // Find the type in the assembly (use existing hash of all type names defined in the assembly)
+ TypeHandle thType;
+ mdToken tkType;
+ Module * pTypeModule;
+ mdToken tkExportedType;
+
+ if (pAssembly->GetLoader()->FindClassModuleThrowing(
+ &typeName,
+ &thType,
+ &tkType,
+ &pTypeModule,
+ &tkExportedType,
+ nullptr, // ppClassHashEntry
+ nullptr, // pLookInThisModuleOnly
+ Loader::DontLoad))
+ {
+ return S_OK;
+ }
+ else
+ {
+ return S_FALSE;
+ }
+} // CLRPrivTypeCacheWinRT::ContainsTypeHelper
+
+//=====================================================================================================================
+//
+// Checks if the assembly pPrivAssembly (referenced from assembly in pAppDomain) contains type (szNamespace/szClassName).
+// Fills *ppAssembly if it contains the type.
+//
+// Return value:
+// S_OK - Contains type (fills *ppAssembly).
+// S_FALSE - Does not contain the type (*ppAssembly is not filled).
+// E_FAIL - Assembly is not loaded.
+//
+HRESULT
+CLRPrivTypeCacheWinRT::ContainsTypeIfLoaded(
+ PTR_AppDomain pAppDomain,
+ PTR_ICLRPrivAssembly pPrivAssembly,
+ LPCUTF8 szNamespace,
+ LPCUTF8 szClassName,
+ PTR_Assembly * ppAssembly)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ HRESULT hr;
+
+ PTR_DomainAssembly pDomainAssembly = pAppDomain->FindAssembly(pPrivAssembly);
+ if (pDomainAssembly == nullptr || !pDomainAssembly->IsLoaded())
+ { // The assembly is not loaded into the AppDomain
+ return E_FAIL;
+ }
+ PTR_Assembly pAssembly = dac_cast<PTR_Assembly>(pDomainAssembly->GetLoadedAssembly());
+ if (pAssembly == nullptr)
+ { // The assembly failed to load earlier (exception is cached on pDomainAssembly)
+ return E_FAIL;
+ }
+
+ hr = ContainsTypeHelper(pAssembly, szNamespace, szClassName);
+ _ASSERTE((hr == S_OK) || (hr == S_FALSE));
+ if (hr == S_OK)
+ { // The type is present in the assembly
+ *ppAssembly = pAssembly;
+ }
+ return hr;
+} // CLRPrivTypeCacheWinRT::ContainsTypeIfLoaded
+
+#ifndef DACCESS_COMPILE
+
+#ifndef CROSSGEN_COMPILE
+//=====================================================================================================================
+// Raises user event DesignerNamespaceResolveEvent to get a list of files for this namespace.
+//
+void
+CLRPrivTypeCacheWinRT::RaiseDesignerNamespaceResolveEvent(
+ LPCWSTR wszNamespace,
+ CLRPrivBinderUtil::WStringListHolder * pFileNameList)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(pFileNameList != nullptr);
+
+ AppDomain * pAppDomain = AppDomain::GetCurrentDomain();
+
+ GCX_COOP();
+
+ struct _gc {
+ OBJECTREF AppDomainRef;
+ STRINGREF str;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+ if ((gc.AppDomainRef = pAppDomain->GetRawExposedObject()) != NULL)
+ {
+ MethodDescCallSite onNamespaceResolve(METHOD__APP_DOMAIN__ON_DESIGNER_NAMESPACE_RESOLVE, &gc.AppDomainRef);
+ gc.str = StringObject::NewString(wszNamespace);
+ ARG_SLOT args[2] =
+ {
+ ObjToArgSlot(gc.AppDomainRef),
+ ObjToArgSlot(gc.str)
+ };
+ PTRARRAYREF ResultingFileNameArrayRef = (PTRARRAYREF) onNamespaceResolve.Call_RetOBJECTREF(args);
+ if (ResultingFileNameArrayRef != NULL)
+ {
+ for (DWORD i = 0; i < ResultingFileNameArrayRef->GetNumComponents(); i++)
+ {
+ STRINGREF ResultingFileNameRef = (STRINGREF) ResultingFileNameArrayRef->GetAt(i);
+ _ASSERTE(ResultingFileNameRef != NULL); // Verified in the managed code OnDesignerNamespaceResolveEvent
+
+ SString sFileName;
+ ResultingFileNameRef->GetSString(sFileName);
+ _ASSERTE(!sFileName.IsEmpty()); // Verified in the managed code OnDesignerNamespaceResolveEvent
+
+ pFileNameList->InsertTail(sFileName.GetUnicode());
+ }
+ }
+ }
+ GCPROTECT_END();
+} // CLRPrivTypeCacheWinRT::RaiseDesignerNamespaceResolveEvent
+
+//=====================================================================================================================
+#endif // CROSSGEN_COMPILE
+
+CLRPrivTypeCacheWinRT * CLRPrivTypeCacheWinRT::s_pSingleton = nullptr;
+
+//=====================================================================================================================
+CLRPrivTypeCacheWinRT *
+CLRPrivTypeCacheWinRT::GetOrCreateTypeCache()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (s_pSingleton == nullptr)
+ {
+ ReleaseHolder<CLRPrivTypeCacheWinRT> pTypeCache;
+ pTypeCache = clr::SafeAddRef(new CLRPrivTypeCacheWinRT());
+
+ if (InterlockedCompareExchangeT<decltype(s_pSingleton)>(&s_pSingleton, pTypeCache, nullptr) == nullptr)
+ {
+ pTypeCache.SuppressRelease();
+ }
+ }
+
+ return s_pSingleton;
+}
+
+//=====================================================================================================================
+
+#endif //!DACCESS_COMPILE
diff --git a/src/vm/clrprivtypecachewinrt.h b/src/vm/clrprivtypecachewinrt.h
new file mode 100644
index 0000000000..2eb4d46789
--- /dev/null
+++ b/src/vm/clrprivtypecachewinrt.h
@@ -0,0 +1,105 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+// Contains VM implementation of WinRT type cache for code:CLRPrivBinderWinRT binder.
+//
+//=====================================================================================================================
+
+#ifdef FEATURE_HOSTED_BINDER
+
+#pragma once
+
+#include "internalunknownimpl.h"
+#include "clrprivbinding.h"
+
+#ifdef CLR_STANDALONE_BINDER
+typedef HRESULT (*ContainsTypeFnPtr)(
+ IUnknown * object,
+ ICLRPrivAssembly * pAssembly,
+ LPCWSTR wszTypeName);
+
+// CLRPrivTypeCacheWinRT proxy object for use by the mdilbind assembly binder.
+class CLRPrivTypeCacheWinRT : public IUnknownCommon<IUnknown>
+{
+ ReleaseHolder<IUnknown> m_actualCacheObject;
+ ContainsTypeFnPtr m_containsTypeFunction;
+public:
+ CLRPrivTypeCacheWinRT(IUnknown *object, ContainsTypeFnPtr containsTypeFunction)
+ {
+ m_actualCacheObject = clr::SafeAddRef(object);
+ m_containsTypeFunction = containsTypeFunction;
+ }
+ //=============================================================================================
+ // Class methods
+
+ // S_OK - pAssembly contains type wszTypeName
+ // S_FALSE - pAssembly does not contain type wszTypeName
+ HRESULT ContainsType(
+ ICLRPrivAssembly * pAssembly,
+ LPCWSTR wszTypeName)
+ {
+ return m_containsTypeFunction(m_actualCacheObject, pAssembly, wszTypeName);
+ }
+};
+#else
+//=====================================================================================================================
+class CLRPrivTypeCacheWinRT :
+ public IUnknownCommon<IUnknown>
+{
+public:
+ //=============================================================================================
+ // Class methods
+
+ // S_OK - pAssembly contains type wszTypeName
+ // S_FALSE - pAssembly does not contain type wszTypeName
+ HRESULT ContainsType(
+ ICLRPrivAssembly * pAssembly,
+ LPCWSTR wszTypeName);
+
+ // S_OK - pAssembly contains type wszTypeName
+ // S_FALSE - pAssembly does not contain type wszTypeName
+ // E_FAIL - assembly is not loaded
+ HRESULT ContainsTypeIfLoaded(
+ PTR_AppDomain pAppDomain,
+ PTR_ICLRPrivAssembly pPrivAssembly,
+ LPCUTF8 szNamespace,
+ LPCUTF8 szClassName,
+ PTR_Assembly * ppAssembly);
+
+ static CLRPrivTypeCacheWinRT * GetOrCreateTypeCache();
+
+#ifndef DACCESS_COMPILE
+
+#ifndef CROSSGEN_COMPILE
+ // Raises user event DesignerNamespaceResolveEvent to get a list of files for this namespace.
+ void RaiseDesignerNamespaceResolveEvent(
+ LPCWSTR wszNamespace,
+ CLRPrivBinderUtil::WStringListHolder * pFileNameList);
+#endif // CROSSGEN_COMPILE
+
+#endif //!DACCESS_COMPILE
+
+private:
+ //=============================================================================================
+ // Private methods
+
+ // Checks if the type (szNamespace/szClassName) is present in the assembly pAssembly.
+ HRESULT ContainsTypeHelper(
+ PTR_Assembly pAssembly,
+ LPCUTF8 szNamespace,
+ LPCUTF8 szClassName);
+
+ //=============================================================================================
+ // Class fields
+
+ static CLRPrivTypeCacheWinRT * s_pSingleton;
+
+}; // class CLRPrivTypeCaheWinRT
+#endif
+typedef DPTR(CLRPrivTypeCacheWinRT) PTR_CLRPrivTypeCacheWinRT;
+
+#endif // FEATURE_HOSTED_BINDER
diff --git a/src/vm/clrtocomcall.cpp b/src/vm/clrtocomcall.cpp
new file mode 100644
index 0000000000..3e8c098316
--- /dev/null
+++ b/src/vm/clrtocomcall.cpp
@@ -0,0 +1,1182 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: CLRtoCOMCall.cpp
+//
+
+//
+// CLR to COM call support.
+//
+
+
+#include "common.h"
+
+#include "stublink.h"
+#include "excep.h"
+#include "clrtocomcall.h"
+#include "siginfo.hpp"
+#include "comcallablewrapper.h"
+#include "runtimecallablewrapper.h"
+#include "dllimport.h"
+#include "mlinfo.h"
+#include "eeconfig.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "corhost.h"
+#include "reflectioninvocation.h"
+#include "mdaassistants.h"
+#include "sigbuilder.h"
+
+#define DISPATCH_INVOKE_SLOT 6
+
+#ifndef DACCESS_COMPILE
+
+//
+// dllimport.cpp
+void CreateCLRToDispatchCOMStub(
+ MethodDesc * pMD,
+ DWORD dwStubFlags // NDirectStubFlags
+ );
+
+#ifndef CROSSGEN_COMPILE
+
+PCODE TheGenericComplusCallStub()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return GetEEFuncEntryPoint(GenericComPlusCallStub);
+}
+
+#endif //#ifndef CROSSGEN_COMPILE
+
+
+ComPlusCallInfo *ComPlusCall::PopulateComPlusCallMethodDesc(MethodDesc* pMD, DWORD* pdwStubFlags)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(CheckPointer(pdwStubFlags, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ MethodTable *pMT = pMD->GetMethodTable();
+ MethodTable *pItfMT = NULL;
+
+ // We are going to use this MethodDesc for a CLR->COM call
+ g_IBCLogger.LogMethodCodeAccess(pMD);
+
+ if (pMD->IsComPlusCall())
+ {
+ ComPlusCallMethodDesc *pCMD = (ComPlusCallMethodDesc *)pMD;
+ if (pCMD->m_pComPlusCallInfo == NULL)
+ {
+ // We are going to write the m_pComPlusCallInfo field of the MethodDesc
+ g_IBCLogger.LogMethodDescWriteAccess(pMD);
+ EnsureWritablePages(pCMD);
+
+ LoaderHeap *pHeap = pMD->GetLoaderAllocator()->GetHighFrequencyHeap();
+ ComPlusCallInfo *pTemp = (ComPlusCallInfo *)(void *)pHeap->AllocMem(S_SIZE_T(sizeof(ComPlusCallInfo)));
+
+ pTemp->InitStackArgumentSize();
+
+ InterlockedCompareExchangeT(&pCMD->m_pComPlusCallInfo, pTemp, NULL);
+ }
+ }
+
+ ComPlusCallInfo *pComInfo = ComPlusCallInfo::FromMethodDesc(pMD);
+ _ASSERTE(pComInfo != NULL);
+ EnsureWritablePages(pComInfo);
+
+ BOOL fWinRTCtor = FALSE;
+ BOOL fWinRTComposition = FALSE;
+ BOOL fWinRTStatic = FALSE;
+ BOOL fWinRTDelegate = FALSE;
+
+ if (pMD->IsInterface())
+ {
+ pComInfo->m_cachedComSlot = pMD->GetComSlot();
+ pItfMT = pMT;
+ pComInfo->m_pInterfaceMT = pItfMT;
+ }
+ else if (pMT->IsWinRTDelegate())
+ {
+ pComInfo->m_cachedComSlot = ComMethodTable::GetNumExtraSlots(ifVtable);
+ pItfMT = pMT;
+ pComInfo->m_pInterfaceMT = pItfMT;
+
+ fWinRTDelegate = TRUE;
+ }
+ else
+ {
+ BOOL fIsWinRTClass = (!pMT->IsInterface() && pMT->IsProjectedFromWinRT());
+ MethodDesc *pItfMD;
+
+ if (fIsWinRTClass && pMD->IsCtor())
+ {
+ // ctors on WinRT classes call factory interface methods
+ pItfMD = GetWinRTFactoryMethodForCtor(pMD, &fWinRTComposition);
+ fWinRTCtor = TRUE;
+ }
+ else if (fIsWinRTClass && pMD->IsStatic())
+ {
+ // static members of WinRT classes call static interface methods
+ pItfMD = GetWinRTFactoryMethodForStatic(pMD);
+ fWinRTStatic = TRUE;
+ }
+ else
+ {
+ pItfMD = pMD->GetInterfaceMD();
+ if (pItfMD == NULL)
+ {
+ // the method does not implement any interface
+ StackSString ssClassName;
+ pMT->_GetFullyQualifiedNameForClass(ssClassName);
+ StackSString ssMethodName(SString::Utf8, pMD->GetName());
+
+ COMPlusThrow(kInvalidOperationException, IDS_EE_COMIMPORT_METHOD_NO_INTERFACE, ssMethodName.GetUnicode(), ssClassName.GetUnicode());
+ }
+ }
+
+ pComInfo->m_cachedComSlot = pItfMD->GetComSlot();
+ pItfMT = pItfMD->GetMethodTable();
+ pComInfo->m_pInterfaceMT = pItfMT;
+ }
+
+ if (pdwStubFlags == NULL)
+ return pComInfo;
+
+ pMD->ComputeSuppressUnmanagedCodeAccessAttr(pMD->GetMDImport());
+
+ //
+ // Compute NDirectStubFlags
+ //
+
+ DWORD dwStubFlags = NDIRECTSTUB_FL_COM;
+
+ // Determine if this is a special COM event call.
+ BOOL fComEventCall = pItfMT->IsComEventItfType();
+
+ // Determine if the call needs to do early bound to late bound convertion.
+ BOOL fLateBound = !fComEventCall && pItfMT->IsInterface() && pItfMT->GetComInterfaceType() == ifDispatch;
+
+ if (fLateBound)
+ dwStubFlags |= NDIRECTSTUB_FL_COMLATEBOUND;
+
+ if (fComEventCall)
+ dwStubFlags |= NDIRECTSTUB_FL_COMEVENTCALL;
+
+ bool fIsWinRT = (pItfMT->IsProjectedFromWinRT() || pItfMT->IsWinRTRedirectedDelegate());
+ if (!fIsWinRT && pItfMT->IsWinRTRedirectedInterface(TypeHandle::Interop_ManagedToNative))
+ {
+ if (!pItfMT->HasInstantiation())
+ {
+ // non-generic redirected interface needs to keep its pre-4.5 classic COM interop
+ // behavior so the IL stub will be special - it will conditionally tail-call to
+ // the new WinRT marshaling routines
+ dwStubFlags |= NDIRECTSTUB_FL_WINRTHASREDIRECTION;
+ }
+ else
+ {
+ fIsWinRT = true;
+ }
+ }
+
+ if (fIsWinRT)
+ {
+ dwStubFlags |= NDIRECTSTUB_FL_WINRT;
+
+ if (pMD->IsGenericComPlusCall())
+ dwStubFlags |= NDIRECTSTUB_FL_WINRTSHAREDGENERIC;
+ }
+
+ if (fWinRTCtor)
+ {
+ dwStubFlags |= NDIRECTSTUB_FL_WINRTCTOR;
+
+ if (fWinRTComposition)
+ dwStubFlags |= NDIRECTSTUB_FL_WINRTCOMPOSITION;
+ }
+
+ if (fWinRTStatic)
+ dwStubFlags |= NDIRECTSTUB_FL_WINRTSTATIC;
+
+ if (fWinRTDelegate)
+ dwStubFlags |= NDIRECTSTUB_FL_WINRTDELEGATE | NDIRECTSTUB_FL_WINRT;
+
+ BOOL BestFit = TRUE;
+ BOOL ThrowOnUnmappableChar = FALSE;
+
+ // Marshaling is fully described by the parameter type in WinRT. BestFit custom attributes
+ // are not going to affect the marshaling behavior.
+ if (!fIsWinRT)
+ {
+ ReadBestFitCustomAttribute(pMD, &BestFit, &ThrowOnUnmappableChar);
+ }
+
+ if (BestFit)
+ dwStubFlags |= NDIRECTSTUB_FL_BESTFIT;
+
+ if (ThrowOnUnmappableChar)
+ dwStubFlags |= NDIRECTSTUB_FL_THROWONUNMAPPABLECHAR;
+
+ //
+ // fill in out param
+ //
+ *pdwStubFlags = dwStubFlags;
+
+ return pComInfo;
+}
+
+// static
+MethodDesc *ComPlusCall::GetWinRTFactoryMethodForCtor(MethodDesc *pMDCtor, BOOL *pComposition)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pMDCtor));
+ PRECONDITION(pMDCtor->IsCtor());
+ }
+ CONTRACTL_END;
+
+ MethodTable *pMT = pMDCtor->GetMethodTable();
+ _ASSERTE(pMT->IsProjectedFromWinRT());
+
+ // If someone is trying to access a WinRT attribute, block it since there is no actual implementation type
+ MethodTable *pParentMT = pMT->GetParentMethodTable();
+ if (pParentMT == MscorlibBinder::GetClass(CLASS__ATTRIBUTE))
+ {
+ DefineFullyQualifiedNameForClassW();
+ COMPlusThrow(kInvalidOperationException, IDS_EE_WINRT_ATTRIBUTES_NOT_INVOKABLE, GetFullyQualifiedNameForClassW(pMT));
+ }
+
+ // build the expected factory method signature
+ PCCOR_SIGNATURE pSig;
+ DWORD cSig;
+ pMDCtor->GetSig(&pSig, &cSig);
+ SigParser ctorSig(pSig, cSig);
+
+ ULONG numArgs;
+ CorElementType et;
+
+ IfFailThrow(ctorSig.GetCallingConv(NULL)); // calling convention
+ IfFailThrow(ctorSig.GetData(&numArgs)); // number of args
+ IfFailThrow(ctorSig.SkipExactlyOne()); // skip return type
+
+ // Get the class factory for the type
+ WinRTClassFactory *pFactory = GetComClassFactory(pMT)->AsWinRTClassFactory();
+ BOOL fComposition = pFactory->IsComposition();
+
+ if (numArgs == 0 && !fComposition)
+ {
+ // this is a default ctor - it will use IActivationFactory::ActivateInstance
+ return MscorlibBinder::GetMethod(METHOD__IACTIVATIONFACTORY__ACTIVATE_INSTANCE);
+ }
+
+ // Composition factory methods have two additional arguments
+ // For now a class has either composition factories or regular factories but never both.
+ // In future versions it's possible we may want to allow a class to become unsealed, in
+ // which case we'll probably need to support both and change how we find factory methods.
+ if (fComposition)
+ {
+ numArgs += 2;
+ }
+
+ SigBuilder sigBuilder;
+ sigBuilder.AppendByte(IMAGE_CEE_CS_CALLCONV_HASTHIS);
+ sigBuilder.AppendData(numArgs);
+
+ // the return type is the class that declares the ctor
+ sigBuilder.AppendElementType(ELEMENT_TYPE_INTERNAL);
+ sigBuilder.AppendPointer(pMT);
+
+ // parameter types are identical
+ ctorSig.GetSignature(&pSig, &cSig);
+ sigBuilder.AppendBlob((const PVOID)pSig, cSig);
+
+ if (fComposition)
+ {
+ // in: outer IInspectable to delegate to, or null
+ sigBuilder.AppendElementType(ELEMENT_TYPE_OBJECT);
+
+ // out: non-delegating IInspectable for the created object
+ sigBuilder.AppendElementType(ELEMENT_TYPE_BYREF);
+ sigBuilder.AppendElementType(ELEMENT_TYPE_OBJECT);
+ }
+
+ pSig = (PCCOR_SIGNATURE)sigBuilder.GetSignature(&cSig);
+
+ // ask the factory to find a matching method
+ MethodDesc *pMD = pFactory->FindFactoryMethod(pSig, cSig, pMDCtor->GetModule());
+
+ if (pMD == NULL)
+ {
+ // @TODO: Do we want a richer exception message?
+ SString ctorMethodName(SString::Utf8, COR_CTOR_METHOD_NAME);
+ COMPlusThrowNonLocalized(kMissingMethodException, ctorMethodName.GetUnicode());
+ }
+
+ if (pComposition != NULL)
+ {
+ *pComposition = fComposition;
+ }
+
+ return pMD;
+}
+
+// static
+MethodDesc *ComPlusCall::GetWinRTFactoryMethodForStatic(MethodDesc *pMDStatic)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pMDStatic));
+ PRECONDITION(pMDStatic->IsStatic());
+ }
+ CONTRACTL_END;
+
+ MethodTable *pMT = pMDStatic->GetMethodTable();
+ _ASSERTE(pMT->IsProjectedFromWinRT());
+
+ // build the expected interface method signature
+ PCCOR_SIGNATURE pSig;
+ DWORD cSig;
+ pMDStatic->GetSig(&pSig, &cSig);
+ SigParser ctorSig(pSig, cSig);
+
+ IfFailThrow(ctorSig.GetCallingConv(NULL)); // calling convention
+
+ // use the "has this" calling convention because we're looking for an instance method
+ SigBuilder sigBuilder;
+ sigBuilder.AppendByte(IMAGE_CEE_CS_CALLCONV_HASTHIS);
+
+ // return type and parameter types are identical
+ ctorSig.GetSignature(&pSig, &cSig);
+ sigBuilder.AppendBlob((const PVOID)pSig, cSig);
+
+ pSig = (PCCOR_SIGNATURE)sigBuilder.GetSignature(&cSig);
+
+ // ask the factory to find a matching method
+ WinRTClassFactory *pFactory = GetComClassFactory(pMT)->AsWinRTClassFactory();
+ MethodDesc *pMD = pFactory->FindStaticMethod(pMDStatic->GetName(), pSig, cSig, pMDStatic->GetModule());
+
+ if (pMD == NULL)
+ {
+ // @TODO: Do we want a richer exception message?
+ SString staticMethodName(SString::Utf8, pMDStatic->GetName());
+ COMPlusThrowNonLocalized(kMissingMethodException, staticMethodName.GetUnicode());
+ }
+
+ return pMD;
+}
+
+MethodDesc* ComPlusCall::GetILStubMethodDesc(MethodDesc* pMD, DWORD dwStubFlags)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (SF_IsCOMLateBoundStub(dwStubFlags) || SF_IsCOMEventCallStub(dwStubFlags))
+ return NULL;
+
+ // Get the call signature information
+ StubSigDesc sigDesc(pMD);
+
+ return NDirect::CreateCLRToNativeILStub(
+ &sigDesc,
+ (CorNativeLinkType)0,
+ (CorNativeLinkFlags)0,
+ (CorPinvokeMap)0,
+ dwStubFlags);
+}
+
+
+#ifndef CROSSGEN_COMPILE
+
+PCODE ComPlusCall::GetStubForILStub(MethodDesc* pMD, MethodDesc** ppStubMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(pMD->IsComPlusCall() || pMD->IsGenericComPlusCall());
+
+ ComPlusCallInfo *pComInfo = NULL;
+
+ if (*ppStubMD != NULL)
+ {
+ // pStubMD, if provided, must be preimplemented.
+ _ASSERTE((*ppStubMD)->IsPreImplemented());
+
+ pComInfo = ComPlusCallInfo::FromMethodDesc(pMD);
+ _ASSERTE(pComInfo != NULL);
+
+ _ASSERTE((*ppStubMD) == pComInfo->m_pStubMD.GetValue());
+
+ if (pComInfo->m_pInterfaceMT == NULL)
+ {
+ ComPlusCall::PopulateComPlusCallMethodDesc(pMD, NULL);
+ }
+ else
+ {
+ pComInfo->m_pInterfaceMT->CheckRestore();
+ }
+
+ if (pComInfo->m_pILStub == NULL)
+ {
+ PCODE pCode = JitILStub(*ppStubMD);
+ InterlockedCompareExchangeT<PCODE>(EnsureWritablePages(pComInfo->GetAddrOfILStubField()), pCode, NULL);
+ }
+ else
+ {
+ // Pointer to pre-implemented code initialized at NGen-time
+ _ASSERTE((*ppStubMD)->GetNativeCode() == pComInfo->m_pILStub);
+ }
+ }
+ else
+ {
+ DWORD dwStubFlags;
+ pComInfo = ComPlusCall::PopulateComPlusCallMethodDesc(pMD, &dwStubFlags);
+
+ if (!pComInfo->m_pStubMD.IsNull())
+ {
+ // Discard pre-implemented code
+ PCODE pPreImplementedCode = pComInfo->m_pStubMD.GetValue()->GetNativeCode();
+ InterlockedCompareExchangeT<PCODE>(pComInfo->GetAddrOfILStubField(), NULL, pPreImplementedCode);
+ }
+
+ *ppStubMD = ComPlusCall::GetILStubMethodDesc(pMD, dwStubFlags);
+
+ if (*ppStubMD != NULL)
+ {
+ PCODE pCode = JitILStub(*ppStubMD);
+ InterlockedCompareExchangeT<PCODE>(pComInfo->GetAddrOfILStubField(), pCode, NULL);
+ }
+ else
+ {
+ CreateCLRToDispatchCOMStub(pMD, dwStubFlags);
+ }
+ }
+
+ PCODE pStub = NULL;
+
+ if (*ppStubMD)
+ {
+#ifdef FEATURE_REMOTING
+#ifndef HAS_REMOTING_PRECODE
+ if (!pMD->IsStatic())
+ {
+ pStub = TheGenericComplusCallStub();
+ }
+ else
+#endif // !HAS_REMOTING_PRECODE
+#endif
+ {
+ pStub = *pComInfo->GetAddrOfILStubField();
+ }
+ }
+ else
+ {
+ pStub = TheGenericComplusCallStub();
+ }
+
+ return pStub;
+}
+
+#ifdef FEATURE_REMOTING
+extern
+Signature InitMessageData(messageData *msgData,
+ FramedMethodFrame *pFrame,
+ Module **ppModule,
+ SigTypeContext *pTypeContext);
+#endif // FEATURE_REMOTING
+
+I4ARRAYREF SetUpWrapperInfo(MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ MetaSig msig(pMD);
+ int numArgs = msig.NumFixedArgs();
+
+ I4ARRAYREF WrapperTypeArr = NULL;
+
+ GCPROTECT_BEGIN(WrapperTypeArr)
+ {
+ //
+ // Allocate the array of wrapper types.
+ //
+
+ WrapperTypeArr = (I4ARRAYREF)AllocatePrimitiveArray(ELEMENT_TYPE_I4, numArgs);
+
+ GCX_PREEMP();
+
+ // Collects ParamDef information in an indexed array where element 0 represents
+ // the return type.
+ mdParamDef *params = (mdParamDef*)_alloca((numArgs+1) * sizeof(mdParamDef));
+ CollateParamTokens(msig.GetModule()->GetMDImport(), pMD->GetMemberDef(), numArgs, params);
+
+
+ //
+ // Look up the best fit mapping info via Assembly & Interface level attributes
+ //
+
+ BOOL BestFit = TRUE;
+ BOOL ThrowOnUnmappableChar = FALSE;
+ ReadBestFitCustomAttribute(pMD, &BestFit, &ThrowOnUnmappableChar);
+
+ //
+ // Determine the wrapper type of the arguments.
+ //
+
+ int iParam = 1;
+ CorElementType mtype;
+ while (ELEMENT_TYPE_END != (mtype = msig.NextArg()))
+ {
+ //
+ // Set up the marshaling info for the parameter.
+ //
+
+ MarshalInfo Info(msig.GetModule(), msig.GetArgProps(), msig.GetSigTypeContext(), params[iParam],
+ MarshalInfo::MARSHAL_SCENARIO_COMINTEROP, (CorNativeLinkType)0, (CorNativeLinkFlags)0,
+ TRUE, iParam, numArgs, BestFit, ThrowOnUnmappableChar, FALSE, pMD, TRUE
+ #ifdef _DEBUG
+ , pMD->m_pszDebugMethodName, pMD->m_pszDebugClassName, iParam
+ #endif
+ );
+
+ DispatchWrapperType wrapperType = Info.GetDispWrapperType();
+
+ {
+ GCX_COOP();
+
+ //
+ // Based on the MarshalInfo, set the wrapper type.
+ //
+
+ *((DWORD*)WrapperTypeArr->GetDataPtr() + iParam - 1) = wrapperType;
+ }
+
+ //
+ // Increase the argument index.
+ //
+
+ iParam++;
+ }
+ }
+ GCPROTECT_END();
+
+ return WrapperTypeArr;
+}
+
+UINT32 CLRToCOMEventCallWorker(ComPlusMethodFrame* pFrame, ComPlusCallMethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pFrame));
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ struct _gc {
+ OBJECTREF EventProviderTypeObj;
+ OBJECTREF EventProviderObj;
+ OBJECTREF ThisObj;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+
+ LOG((LF_STUBS, LL_INFO1000, "Calling CLRToCOMEventCallWorker %s::%s \n", pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName));
+
+ // Retrieve the method table and the method desc of the call.
+ MethodDesc *pEvProvMD = pMD->GetEventProviderMD();
+ MethodTable *pEvProvMT = pEvProvMD->GetMethodTable();
+
+ GCPROTECT_BEGIN(gc)
+ {
+ // Retrieve the exposed type object for event provider.
+ gc.EventProviderTypeObj = pEvProvMT->GetManagedClassObject();
+ gc.ThisObj = pFrame->GetThis();
+
+ MethodDescCallSite getEventProvider(METHOD__COM_OBJECT__GET_EVENT_PROVIDER, &gc.ThisObj);
+
+ // Retrieve the event provider for the event interface type.
+ ARG_SLOT GetEventProviderArgs[] =
+ {
+ ObjToArgSlot(gc.ThisObj),
+ ObjToArgSlot(gc.EventProviderTypeObj)
+ };
+
+ gc.EventProviderObj = getEventProvider.Call_RetOBJECTREF(GetEventProviderArgs);
+
+ // Set up an arg iterator to retrieve the arguments from the frame.
+ MetaSig mSig(pMD);
+ ArgIterator ArgItr(&mSig);
+
+ // Make the call on the event provider method desc.
+ MethodDescCallSite eventProvider(pEvProvMD, &gc.EventProviderObj);
+
+ // Retrieve the event handler passed in.
+ OBJECTREF EventHandlerObj = *(OBJECTREF*)(pFrame->GetTransitionBlock() + ArgItr.GetNextOffset());
+
+ ARG_SLOT EventMethArgs[] =
+ {
+ ObjToArgSlot(gc.EventProviderObj),
+ ObjToArgSlot(EventHandlerObj)
+ };
+
+ //
+ // If this can ever return something bigger than an INT64 byval
+ // then this code is broken. Currently, however, it cannot.
+ //
+ *(ARG_SLOT *)(pFrame->GetReturnValuePtr()) = eventProvider.Call_RetArgSlot(EventMethArgs);
+
+ // The COM event call worker does not support value returned in
+ // floating point registers.
+ _ASSERTE(ArgItr.GetFPReturnSize() == 0);
+ }
+ GCPROTECT_END();
+
+ // tell the asm stub that we are not returning an FP type
+ return 0;
+}
+
+#ifdef FEATURE_REMOTING
+UINT32 CLRToCOMLateBoundWorker(ComPlusMethodFrame* pFrame, ComPlusCallMethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pFrame));
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ DISPID DispId = DISPID_UNKNOWN;
+ const unsigned cbExtraSlots = 7;
+ DWORD BindingFlags = BINDER_AllLookup;
+ UINT32 fpRetSize;
+ mdProperty pd;
+ LPCUTF8 strMemberName;
+ mdToken tkMember;
+ ULONG uSemantic;
+
+ LOG((LF_STUBS, LL_INFO1000, "Calling CLRToCOMLateBoundWorker %s::%s \n", pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName));
+
+ // Retrieve the method table and the method desc of the call.
+ MethodTable *pItfMT = pMD->GetInterfaceMethodTable();
+ ComPlusCallMethodDesc *pItfMD = pMD;
+ IMDInternalImport *pMDImport = pItfMT->GetMDImport();
+
+ // Make sure this is only called on dispath only interfaces.
+ _ASSERTE(pItfMT->GetComInterfaceType() == ifDispatch);
+
+ // If this is a method impl MD then we need to retrieve the actual interface MD that
+ // this is a method impl for.
+ // REVISIT_TODO: Stop using ComSlot to convert method impls to interface MD
+ // _ASSERTE(pMD->m_pComPlusCallInfo->m_cachedComSlot == 7);
+ // GopalK
+ if (!pMD->GetMethodTable()->IsInterface()) {
+ pItfMD = (ComPlusCallMethodDesc*)pItfMT->GetMethodDescForSlot(pMD->m_pComPlusCallInfo->m_cachedComSlot - cbExtraSlots);
+ CONSISTENCY_CHECK(pMD->GetInterfaceMD() == pItfMD);
+ }
+
+ // See if there is property information for this member.
+ hr = pItfMT->GetModule()->GetPropertyInfoForMethodDef(pItfMD->GetMemberDef(), &pd, &strMemberName, &uSemantic);
+ if (hr == S_OK)
+ {
+ // We are dealing with a property accessor.
+ tkMember = pd;
+
+ // Determine which type of accessor we are dealing with.
+ switch (uSemantic)
+ {
+ case msGetter:
+ {
+ // We are dealing with a INVOKE_PROPERTYGET.
+ BindingFlags |= BINDER_GetProperty;
+ break;
+ }
+
+ case msSetter:
+ {
+ // We are dealing with a INVOKE_PROPERTYPUT or a INVOKE_PROPERTYPUTREF.
+ ULONG cAssoc;
+ ASSOCIATE_RECORD* pAssoc;
+ HENUMInternal henum;
+ BOOL bPropHasOther = FALSE;
+
+ // Retrieve all the associates.
+ IfFailThrow(pMDImport->EnumAssociateInit(pd,&henum));
+
+ cAssoc = pMDImport->EnumGetCount(&henum);
+ _ASSERTE(cAssoc > 0);
+
+ ULONG allocSize = cAssoc * sizeof(ASSOCIATE_RECORD);
+ if (allocSize < cAssoc)
+ COMPlusThrow(kTypeLoadException, IDS_EE_TOOMANYASSOCIATES);
+
+ pAssoc = (ASSOCIATE_RECORD*) _alloca((size_t) allocSize);
+ IfFailThrow(pMDImport->GetAllAssociates(&henum, pAssoc, cAssoc));
+
+ pMDImport->EnumClose(&henum);
+
+ // Check to see if there is both a set and an other. If this is the case
+ // then the setter is a INVOKE_PROPERTYPUTREF otherwise we will make it a
+ // INVOKE_PROPERTYPUT | INVOKE_PROPERTYPUTREF.
+ for (ULONG i = 0; i < cAssoc; i++)
+ {
+ if (pAssoc[i].m_dwSemantics == msOther)
+ {
+ bPropHasOther = TRUE;
+ break;
+ }
+ }
+
+ if (bPropHasOther)
+ {
+ // There is both a INVOKE_PROPERTYPUT and a INVOKE_PROPERTYPUTREF for this
+ // property so we need to be specific and make this invoke a INVOKE_PROPERTYPUTREF.
+ BindingFlags |= BINDER_PutRefDispProperty;
+ }
+ else
+ {
+ // There is only a setter so we need to make the invoke a Set which will map to
+ // INVOKE_PROPERTYPUT | INVOKE_PROPERTYPUTREF.
+ BindingFlags = BINDER_SetProperty;
+ }
+ break;
+ }
+
+ case msOther:
+ {
+ // We are dealing with a INVOKE_PROPERTYPUT
+ BindingFlags |= BINDER_PutDispProperty;
+ break;
+ }
+
+ default:
+ {
+ _ASSERTE(!"Invalid method semantic!");
+ }
+ }
+ }
+ else
+ {
+ // We are dealing with a normal method.
+ strMemberName = pItfMD->GetName();
+ tkMember = pItfMD->GetMemberDef();
+ BindingFlags |= BINDER_InvokeMethod;
+ }
+
+ struct _gc {
+ OBJECTREF MemberNameObj;
+ OBJECTREF ItfTypeObj;
+ OBJECTREF WrapperTypeArr;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ GCPROTECT_BEGIN(gc);
+ {
+ // Retrieve the exposed type object for the interface.
+ gc.ItfTypeObj = pItfMT->GetManagedClassObject();
+
+ // Retrieve the name of the member we will be invoking on. If the member
+ // has a DISPID then we will use that to optimize the invoke.
+ hr = pItfMD->GetMDImport()->GetDispIdOfMemberDef(tkMember, (ULONG*)&DispId);
+ if (hr == S_OK)
+ {
+ WCHAR strTmp[64];
+
+ _snwprintf_s(strTmp, COUNTOF(strTmp), _TRUNCATE, DISPID_NAME_FORMAT_STRING, DispId);
+ gc.MemberNameObj = StringObject::NewString(strTmp);
+ }
+ else
+ {
+ gc.MemberNameObj = StringObject::NewString(strMemberName);
+ }
+
+ // MessageData struct will be used in creating the message object
+ messageData msgData;
+ Module *pModule = NULL;
+ SigTypeContext typeContext;
+ Signature signature = InitMessageData(&msgData, pFrame, &pModule, &typeContext);
+
+ // If the call requires object wrapping, then set up the array
+ // of wrapper types.
+ if (pMD->RequiresArgumentWrapping())
+ gc.WrapperTypeArr = SetUpWrapperInfo(pItfMD);
+
+ _ASSERTE(!signature.IsEmpty() && pModule);
+
+ // Allocate metasig on the stack
+ MetaSig mSig(signature, pModule, &typeContext);
+ msgData.pSig = &mSig;
+
+ MethodDescCallSite forwardCallToInvoke(METHOD__CLASS__FORWARD_CALL_TO_INVOKE, &gc.ItfTypeObj);
+
+ // Prepare the arguments that will be passed to the method.
+ ARG_SLOT Args[] =
+ {
+ ObjToArgSlot(gc.ItfTypeObj),
+ ObjToArgSlot(gc.MemberNameObj),
+ (ARG_SLOT)BindingFlags,
+ ObjToArgSlot(pFrame->GetThis()),
+ ObjToArgSlot(gc.WrapperTypeArr),
+ (ARG_SLOT)&msgData,
+ };
+
+ // Retrieve the array of members from the type object.
+ forwardCallToInvoke.CallWithValueTypes(Args);
+
+ // the return value is written into the Frame's neginfo, so we don't
+ // need to return it directly. We can just have the stub do that work.
+ // However, the stub needs to know what type of FP return this is, if
+ // any, so we return the fpReturnSize info as the return value.
+ {
+ mSig.Reset();
+
+ ArgIterator argit(&mSig);
+ fpRetSize = argit.GetFPReturnSize();
+ }
+ }
+ GCPROTECT_END();
+
+ return fpRetSize;
+}
+#endif // FEATURE_REMOTING
+
+// calls that propagate from CLR to COM
+
+#pragma optimize( "y", off )
+/*static*/
+UINT32 STDCALL CLRToCOMWorker(TransitionBlock * pTransitionBlock, ComPlusCallMethodDesc * pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ ENTRY_POINT;
+ PRECONDITION(CheckPointer(pTransitionBlock, NULL_NOT_OK));
+ }
+ CONTRACTL_END;
+
+ UINT32 returnValue = 0;
+
+ // This must happen before the UnC handler is setup. Otherwise, an exception will
+ // cause the UnC handler to pop this frame, leaving a GC hole a mile wide.
+
+ MAKE_CURRENT_THREAD_AVAILABLE();
+
+ FrameWithCookie<ComPlusMethodFrame> frame(pTransitionBlock, pMD);
+ ComPlusMethodFrame * pFrame = &frame;
+
+ //we need to zero out the return value buffer because we will report it during GC
+#ifdef ENREGISTERED_RETURNTYPE_MAXSIZE
+ ZeroMemory (pFrame->GetReturnValuePtr(), ENREGISTERED_RETURNTYPE_MAXSIZE);
+#else
+ *(ARG_SLOT *)pFrame->GetReturnValuePtr() = 0;
+#endif
+
+ // Link frame into the chain.
+ pFrame->Push(CURRENT_THREAD);
+
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER
+
+ _ASSERTE(pMD->IsComPlusCall());
+
+ // Make sure we have been properly loaded here
+ CONSISTENCY_CHECK(GetAppDomain()->CheckCanExecuteManagedCode(pMD));
+
+ // Retrieve the interface method table.
+ MethodTable *pItfMT = pMD->GetInterfaceMethodTable();
+
+ // If the interface is a COM event call, then delegate to the CLRToCOMEventCallWorker.
+ if (pItfMT->IsComEventItfType())
+ {
+ returnValue = CLRToCOMEventCallWorker(pFrame, pMD);
+ }
+#ifdef FEATURE_REMOTING
+ else if (pItfMT->GetComInterfaceType() == ifDispatch)
+ {
+ // If the interface is a Dispatch only interface then convert the early bound
+ // call to a late bound call.
+ returnValue = CLRToCOMLateBoundWorker(pFrame, pMD);
+ }
+#endif // FEATURE_REMOTING
+ else
+ {
+ LOG((LF_STUBS, LL_INFO1000, "Calling CLRToCOMWorker %s::%s \n", pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName));
+
+ CONSISTENCY_CHECK_MSG(false, "Should not get here when using IL stubs.");
+ }
+
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+ pFrame->Pop(CURRENT_THREAD);
+
+ return returnValue;
+}
+
+#pragma optimize( "", on )
+
+#endif // CROSSGEN_COMPILE
+#endif // #ifndef DACCESS_COMPILE
+
+#ifndef CROSSGEN_COMPILE
+//---------------------------------------------------------
+// Debugger support for ComPlusMethodFrame
+//---------------------------------------------------------
+TADDR ComPlusCall::GetFrameCallIP(FramedMethodFrame *frame)
+{
+ CONTRACT (TADDR)
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ PRECONDITION(CheckPointer(frame));
+ POSTCONDITION(CheckPointer((void*)RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ ComPlusCallMethodDesc *pCMD = dac_cast<PTR_ComPlusCallMethodDesc>(frame->GetFunction());
+ MethodTable *pItfMT = pCMD->GetInterfaceMethodTable();
+ TADDR ip = NULL;
+#ifndef DACCESS_COMPILE
+ SafeComHolder<IUnknown> pUnk = NULL;
+#endif
+
+ _ASSERTE(pCMD->IsComPlusCall());
+
+ // Note: if this is a COM event call, then the call will be delegated to a different object. The logic below will
+ // fail with an invalid cast error. For V1, we just won't step into those.
+ if (pItfMT->IsComEventItfType())
+ RETURN NULL;
+
+ //
+ // This is called from some strange places - from
+ // unmanaged code, from managed code, from the debugger
+ // helper thread. Make sure we can deal with this object
+ // ref.
+ //
+
+#ifndef DACCESS_COMPILE
+
+ Thread* thread = GetThread();
+ if (thread == NULL)
+ {
+ //
+ // This is being called from the debug helper thread.
+ // Unfortunately this doesn't bode well for the COM+ IP
+ // mapping code - it expects to be called from the appropriate
+ // context.
+ //
+ // This context-naive code will work for most cases.
+ //
+ // It toggles the GC mode, tries to setup a thread, etc, right after our
+ // verification that we have no Thread object above. This needs to be fixed properly in Beta 2. This is a work
+ // around for Beta 1, which is just to #if 0 the code out and return NULL.
+ //
+ pUnk = NULL;
+ }
+ else
+ {
+ GCX_COOP();
+
+ OBJECTREF *pOref = frame->GetThisPtr();
+ pUnk = ComObject::GetComIPFromRCWThrowing(pOref, pItfMT);
+ }
+
+ if (pUnk != NULL)
+ {
+ if (pItfMT->GetComInterfaceType() == ifDispatch)
+ ip = (TADDR)(*(void ***)(IUnknown*)pUnk)[DISPATCH_INVOKE_SLOT];
+ else
+ ip = (TADDR)(*(void ***)(IUnknown*)pUnk)[pCMD->m_pComPlusCallInfo->m_cachedComSlot];
+ }
+
+#else
+ DacNotImpl();
+#endif // #ifndef DACCESS_COMPILE
+
+ RETURN ip;
+}
+
+void ComPlusMethodFrame::GetUnmanagedCallSite(TADDR* ip,
+ TADDR* returnIP,
+ TADDR* returnSP)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ PRECONDITION(CheckPointer(ip, NULL_OK));
+ PRECONDITION(CheckPointer(returnIP, NULL_OK));
+ PRECONDITION(CheckPointer(returnSP, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO100000, "ComPlusMethodFrame::GetUnmanagedCallSite\n"));
+
+ if (ip != NULL)
+ *ip = ComPlusCall::GetFrameCallIP(this);
+
+ TADDR retSP = NULL;
+ // We can't assert retSP here because the debugger may actually call this function even when
+ // the frame is not fully initiailzed. It is ok because the debugger has code to handle this
+ // case. However, other callers may not be tolerant of this case, so we should push this assert
+ // to the callers
+ //_ASSERTE(retSP != NULL);
+
+ if (returnIP != NULL)
+ {
+ *returnIP = retSP ? *(TADDR*)retSP : NULL;
+ }
+
+ if (returnSP != NULL)
+ {
+ *returnSP = retSP;
+ }
+
+}
+
+
+
+BOOL ComPlusMethodFrame::TraceFrame(Thread *thread, BOOL fromPatch,
+ TraceDestination *trace, REGDISPLAY *regs)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ PRECONDITION(CheckPointer(thread));
+ PRECONDITION(CheckPointer(trace));
+ }
+ CONTRACTL_END;
+
+ //
+ // Get the call site info
+ //
+
+#if defined(_WIN64)
+ // Interop debugging is currently not supported on WIN64, so we always return FALSE.
+ // The result is that you can't step into an unmanaged frame or step out to one. You
+ // also can't step a breakpoint in one.
+ return FALSE;
+#endif // _WIN64
+
+ TADDR ip, returnIP, returnSP;
+ GetUnmanagedCallSite(&ip, &returnIP, &returnSP);
+
+ //
+ // If we've already made the call, we can't trace any more.
+ //
+ // !!! Note that this test isn't exact.
+ //
+
+ if (!fromPatch &&
+ (dac_cast<TADDR>(thread->GetFrame()) != dac_cast<TADDR>(this) ||
+ !thread->m_fPreemptiveGCDisabled ||
+ *PTR_TADDR(returnSP) == returnIP))
+ {
+ LOG((LF_CORDB, LL_INFO10000, "ComPlusMethodFrame::TraceFrame: can't trace...\n"));
+ return FALSE;
+ }
+
+ //
+ // Otherwise, return the unmanaged destination.
+ //
+
+ trace->InitForUnmanaged(ip);
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "ComPlusMethodFrame::TraceFrame: ip=0x%p\n", ip));
+
+ return TRUE;
+}
+#endif //CROSSGEN_COMPILE
+
+#ifdef _TARGET_X86_
+
+#ifndef DACCESS_COMPILE
+
+CrstStatic ComPlusCall::s_RetThunkCacheCrst;
+SHash<ComPlusCall::RetThunkSHashTraits> *ComPlusCall::s_pRetThunkCache = NULL;
+
+// One time init.
+void ComPlusCall::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ s_RetThunkCacheCrst.Init(CrstRetThunkCache);
+}
+
+LPVOID ComPlusCall::GetRetThunk(UINT numStackBytes)
+{
+ STANDARD_VM_CONTRACT;
+
+ LPVOID pRetThunk = NULL;
+ CrstHolder crst(&s_RetThunkCacheCrst);
+
+ // Lazily allocate the ret thunk cache.
+ if (s_pRetThunkCache == NULL)
+ s_pRetThunkCache = new SHash<RetThunkSHashTraits>();
+
+ const RetThunkCacheElement *pElement = s_pRetThunkCache->LookupPtr(numStackBytes);
+ if (pElement != NULL)
+ {
+ pRetThunk = pElement->m_pRetThunk;
+ }
+ else
+ {
+ // cache miss -> create a new thunk
+ AllocMemTracker dummyAmTracker;
+ pRetThunk = (LPVOID)dummyAmTracker.Track(SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap()->AllocMem(S_SIZE_T((numStackBytes == 0) ? 1 : 3)));
+
+ BYTE *pThunk = (BYTE *)pRetThunk;
+ if (numStackBytes == 0)
+ {
+ pThunk[0] = 0xc3;
+ }
+ else
+ {
+ pThunk[0] = 0xc2;
+ *(USHORT *)&pThunk[1] = (USHORT)numStackBytes;
+ }
+
+ // add it to the cache
+ RetThunkCacheElement element;
+ element.m_cbStack = numStackBytes;
+ element.m_pRetThunk = pRetThunk;
+ s_pRetThunkCache->Add(element);
+
+ dummyAmTracker.SuppressRelease();
+ }
+
+ return pRetThunk;
+}
+
+#endif // !DACCESS_COMPILE
+
+#endif // _TARGET_X86_
diff --git a/src/vm/clrtocomcall.h b/src/vm/clrtocomcall.h
new file mode 100644
index 0000000000..331ae56f23
--- /dev/null
+++ b/src/vm/clrtocomcall.h
@@ -0,0 +1,75 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: CLRtoCOMCall.h
+//
+
+//
+// Used to handle stub creation for managed to unmanaged transitions.
+//
+
+
+#ifndef __COMPLUSCALL_H__
+#define __COMPLUSCALL_H__
+
+#ifndef FEATURE_COMINTEROP
+#error FEATURE_COMINTEROP is required for this file
+#endif // FEATURE_COMINTEROP
+
+#include "util.hpp"
+
+class ComPlusCall
+{
+ public:
+ //---------------------------------------------------------
+ // Debugger helper function
+ //---------------------------------------------------------
+ static TADDR GetFrameCallIP(FramedMethodFrame *frame);
+
+ static MethodDesc* GetILStubMethodDesc(MethodDesc* pMD, DWORD dwStubFlags);
+ static PCODE GetStubForILStub(MethodDesc* pMD, MethodDesc** ppStubMD);
+
+ static ComPlusCallInfo *PopulateComPlusCallMethodDesc(MethodDesc* pMD, DWORD* pdwStubFlags);
+ static MethodDesc *GetWinRTFactoryMethodForCtor(MethodDesc *pMDCtor, BOOL *pComposition);
+ static MethodDesc *GetWinRTFactoryMethodForStatic(MethodDesc *pMDStatic);
+
+#ifdef _TARGET_X86_
+ static void Init();
+ static LPVOID GetRetThunk(UINT numStackBytes);
+#endif // _TARGET_X86_
+ private:
+ ComPlusCall(); // prevent "new"'s on this class
+
+#ifdef _TARGET_X86_
+ struct RetThunkCacheElement
+ {
+ RetThunkCacheElement()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_cbStack = 0;
+ m_pRetThunk = NULL;
+ }
+
+ UINT m_cbStack;
+ LPVOID m_pRetThunk;
+ };
+
+ class RetThunkSHashTraits : public NoRemoveSHashTraits< DefaultSHashTraits<RetThunkCacheElement> >
+ {
+ public:
+ typedef UINT key_t;
+ static key_t GetKey(element_t e) { LIMITED_METHOD_CONTRACT; return e.m_cbStack; }
+ static BOOL Equals(key_t k1, key_t k2) { LIMITED_METHOD_CONTRACT; return (k1 == k2); }
+ static count_t Hash(key_t k) { LIMITED_METHOD_CONTRACT; return (count_t)(size_t)k; }
+ static const element_t Null() { LIMITED_METHOD_CONTRACT; return RetThunkCacheElement(); }
+ static bool IsNull(const element_t &e) { LIMITED_METHOD_CONTRACT; return (e.m_pRetThunk == NULL); }
+ };
+
+ static SHash<RetThunkSHashTraits> *s_pRetThunkCache;
+ static CrstStatic s_RetThunkCacheCrst;
+#endif // _TARGET_X86_
+};
+
+#endif // __COMPLUSCALL_H__
diff --git a/src/vm/clrvarargs.cpp b/src/vm/clrvarargs.cpp
new file mode 100644
index 0000000000..8411f6be1b
--- /dev/null
+++ b/src/vm/clrvarargs.cpp
@@ -0,0 +1,115 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: CLRVarArgs.cpp
+//
+
+//
+// Variant-specific marshalling.
+
+
+#include "common.h"
+#include "clrvarargs.h"
+
+DWORD VARARGS::CalcVaListSize(VARARGS *data)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Calculate how much space we need for the marshaled stack.
+ // This assumes that the vararg managed and unmanaged calling conventions are similar-enough,
+ // so we can simply use the size stored in the VASigCookie. This actually overestimates
+ // the value since it counts the fixed args as well as the varargs. But that's harmless.
+
+ DWORD dwVaListSize = data->ArgCookie->sizeOfArgs;
+#ifndef _TARGET_X86_
+ dwVaListSize += ARGUMENTREGISTERS_SIZE;
+#endif
+ return dwVaListSize;
+}
+
+void VARARGS::MarshalToManagedVaList(va_list va, VARARGS *dataout)
+{
+ WRAPPER_NO_CONTRACT
+
+ _ASSERTE(dataout != NULL);
+ dataout->SigPtr = SigPointer(NULL, 0);
+ dataout->ArgCookie = NULL;
+ dataout->ArgPtr = (BYTE*)va;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Marshal a ArgIterator to a pre-allocated va_list
+////////////////////////////////////////////////////////////////////////////////
+void
+VARARGS::MarshalToUnmanagedVaList(
+ va_list va, DWORD cbVaListSize, const VARARGS * data)
+{
+ BYTE * pdstbuffer = (BYTE *)va;
+
+ int remainingArgs = data->RemainingArgs;
+ BYTE * psrc = (BYTE *)(data->ArgPtr);
+ BYTE * pdst = pdstbuffer;
+
+ SigPointer sp = data->SigPtr;
+ SigTypeContext typeContext; // This is an empty type context. This is OK because the vararg methods may not be generic
+ while (remainingArgs--)
+ {
+ CorElementType elemType = sp.PeekElemTypeClosed(data->ArgCookie->pModule, &typeContext);
+ switch (elemType)
+ {
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_R4:
+ case ELEMENT_TYPE_R8:
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_U:
+ case ELEMENT_TYPE_PTR:
+ {
+ DWORD cbSize = StackElemSize(CorTypeInfo::Size(elemType));
+
+ #ifdef ENREGISTERED_PARAMTYPE_MAXSIZE
+ if (cbSize > ENREGISTERED_PARAMTYPE_MAXSIZE)
+ cbSize = sizeof(void*);
+ #endif
+
+#ifdef _TARGET_ARM_
+ if (cbSize == 8)
+ {
+ // 64-bit primitives come from and must be copied to 64-bit aligned locations.
+ psrc = (BYTE*)ALIGN_UP(psrc, 8);
+ pdst = (BYTE*)ALIGN_UP(pdst, 8);
+ }
+#endif // _TARGET_ARM_
+
+ #ifdef STACK_GROWS_DOWN_ON_ARGS_WALK
+ psrc -= cbSize;
+ #endif // STACK_GROWS_DOWN_ON_ARGS_WALK
+
+ if (pdst + cbSize > pdstbuffer + cbVaListSize)
+ COMPlusThrow(kArgumentException);
+
+ CopyMemory(pdst, psrc, cbSize);
+
+ #ifdef STACK_GROWS_UP_ON_ARGS_WALK
+ psrc += cbSize;
+ #endif // STACK_GROWS_UP_ON_ARGS_WALK
+
+ pdst += cbSize;
+ IfFailThrow(sp.SkipExactlyOne());
+ }
+ break;
+
+ default:
+ // non-IJW data type - we don't support marshaling these inside a va_list.
+ COMPlusThrow(kNotSupportedException);
+ }
+ }
+} // VARARGS::MarshalToUnmanagedVaList
diff --git a/src/vm/clrvarargs.h b/src/vm/clrvarargs.h
new file mode 100644
index 0000000000..ca22338c0a
--- /dev/null
+++ b/src/vm/clrvarargs.h
@@ -0,0 +1,28 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+////////////////////////////////////////////////////////////////////////////////
+// This module contains the implementation of the native methods for the
+// varargs class(es)..
+//
+
+////////////////////////////////////////////////////////////////////////////////
+
+#ifndef _CLRVARARGS_H_
+#define _CLRVARARGS_H_
+
+
+struct VARARGS
+{
+ VASigCookie *ArgCookie;
+ SigPointer SigPtr;
+ BYTE *ArgPtr;
+ int RemainingArgs;
+
+ static DWORD CalcVaListSize(VARARGS *data);
+ static void MarshalToManagedVaList(va_list va, VARARGS *dataout);
+ static void MarshalToUnmanagedVaList(va_list va, DWORD cbVaListSize, const VARARGS *data);
+};
+
+#endif // _CLRVARARGS_H_
diff --git a/src/vm/clsload.cpp b/src/vm/clsload.cpp
new file mode 100644
index 0000000000..1aa6f23e8c
--- /dev/null
+++ b/src/vm/clsload.cpp
@@ -0,0 +1,6637 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: clsload.cpp
+//
+
+
+
+//
+
+//
+// ============================================================================
+
+#include "common.h"
+#include "winwrap.h"
+#include "ceeload.h"
+#include "siginfo.hpp"
+#include "vars.hpp"
+#include "clsload.hpp"
+#include "classhash.inl"
+#include "class.h"
+#include "method.hpp"
+#include "ecall.h"
+#include "stublink.h"
+#include "object.h"
+#include "excep.h"
+#include "threads.h"
+#include "comsynchronizable.h"
+#include "threads.h"
+#include "dllimport.h"
+#include "security.h"
+#include "dbginterface.h"
+#include "log.h"
+#include "eeconfig.h"
+#include "fieldmarshaler.h"
+#include "jitinterface.h"
+#include "vars.hpp"
+#include "assembly.hpp"
+#include "perfcounters.h"
+#include "eeprofinterfaces.h"
+#include "eehash.h"
+#include "typehash.h"
+#include "comdelegate.h"
+#include "array.h"
+#include "stackprobe.h"
+#include "posterror.h"
+#include "wrappers.h"
+#include "generics.h"
+#include "typestring.h"
+#include "typedesc.h"
+#include "cgencpu.h"
+#include "eventtrace.h"
+#include "typekey.h"
+#include "pendingload.h"
+#include "proftoeeinterfaceimpl.h"
+#include "mdaassistants.h"
+#include "virtualcallstub.h"
+#include "stringarraylist.h"
+
+#if defined(FEATURE_FUSION) && !defined(DACCESS_COMPILE)
+#include "policy.h" // For Fusion::Util::IsAnyFrameworkAssembly
+#endif
+
+// This method determines the "loader module" for an instantiated type
+// or method. The rule must ensure that any types involved in the
+// instantiated type or method do not outlive the loader module itself
+// with respect to app-domain unloading (e.g. MyList<MyType> can't be
+// put in the module of MyList if MyList's assembly is
+// app-domain-neutral but MyType's assembly is app-domain-specific).
+// The rule we use is:
+//
+// * Pick the first type in the class instantiation, followed by
+// method instantiation, whose loader module is non-shared (app-domain-bound)
+// * If no type is app-domain-bound, return the module containing the generic type itself
+//
+// Some useful effects of this rule (for ngen purposes) are:
+//
+// * G<object,...,object> lives in the module defining G
+// * non-mscorlib instantiations of mscorlib-defined generic types live in the module
+// of the instantiation (when only one module is invloved in the instantiation)
+//
+
+/* static */
+PTR_Module ClassLoader::ComputeLoaderModuleWorker(
+ Module * pDefinitionModule, // the module that declares the generic type or method
+ mdToken token, // method or class token for this item
+ Instantiation classInst, // the type arguments to the type (if any)
+ Instantiation methodInst) // the type arguments to the method (if any)
+{
+ CONTRACT(Module*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pDefinitionModule, NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL));
+ SO_INTOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END
+
+ if (classInst.IsEmpty() && methodInst.IsEmpty())
+ RETURN PTR_Module(pDefinitionModule);
+
+#ifndef DACCESS_COMPILE
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+ // Check we're NGEN'ing
+ if (IsCompilationProcess())
+ {
+ RETURN(ComputeLoaderModuleForCompilation(pDefinitionModule, token, classInst, methodInst));
+ }
+#endif // FEATURE_PREJIT
+#endif // #ifndef DACCESS_COMPILE
+
+ Module *pFirstNonSharedLoaderModule = NULL;
+ Module *pFirstNonSystemSharedModule = NULL;
+ Module *pLoaderModule = NULL;
+
+ if (pDefinitionModule)
+ {
+ if (pDefinitionModule->IsCollectible())
+ goto ComputeCollectibleLoaderModule;
+ if (!pDefinitionModule->GetAssembly()->IsDomainNeutral())
+ {
+ pFirstNonSharedLoaderModule = pDefinitionModule;
+ }
+ else
+ if (!pDefinitionModule->IsSystem())
+ {
+ pFirstNonSystemSharedModule = pDefinitionModule;
+ }
+ }
+
+ for (DWORD i = 0; i < classInst.GetNumArgs(); i++)
+ {
+ TypeHandle classArg = classInst[i];
+ _ASSERTE(!classArg.IsEncodedFixup());
+ Module* pModule = classArg.GetLoaderModule();
+ if (pModule->IsCollectible())
+ goto ComputeCollectibleLoaderModule;
+ if (!pModule->GetAssembly()->IsDomainNeutral())
+ {
+ if (pFirstNonSharedLoaderModule == NULL)
+ pFirstNonSharedLoaderModule = pModule;
+ }
+ else
+ if (!pModule->IsSystem())
+ {
+ if (pFirstNonSystemSharedModule == NULL)
+ pFirstNonSystemSharedModule = pModule;
+ }
+ }
+
+ for (DWORD i = 0; i < methodInst.GetNumArgs(); i++)
+ {
+ TypeHandle methodArg = methodInst[i];
+ _ASSERTE(!methodArg.IsEncodedFixup());
+ Module *pModule = methodArg.GetLoaderModule();
+ if (pModule->IsCollectible())
+ goto ComputeCollectibleLoaderModule;
+ if (!pModule->GetAssembly()->IsDomainNeutral())
+ {
+ if (pFirstNonSharedLoaderModule == NULL)
+ pFirstNonSharedLoaderModule = pModule;
+ }
+ else
+ if (!pModule->IsSystem())
+ {
+ if (pFirstNonSystemSharedModule == NULL)
+ pFirstNonSystemSharedModule = pModule;
+ }
+ }
+
+ // RULE: Prefer modules in non-shared assemblies.
+ // This ensures safety of app-domain unloading.
+ if (pFirstNonSharedLoaderModule != NULL)
+ {
+ pLoaderModule = pFirstNonSharedLoaderModule;
+ }
+ else if (pFirstNonSystemSharedModule != NULL)
+ {
+#ifdef FEATURE_FULL_NGEN
+ // pFirstNonSystemSharedModule may be module of speculative generic instantiation.
+ // If we are domain neutral, we have to use constituent of the instantiation to store
+ // statics. We need to ensure that we can create DomainModule in all domains
+ // that this instantiations may get activated in. PZM is good approximation of such constituent.
+ pLoaderModule = Module::ComputePreferredZapModule(pDefinitionModule, classInst, methodInst);
+#else
+ // Use pFirstNonSystemSharedModule just so C<object> ends up in module C - it
+ // shouldn't actually matter at all though.
+ pLoaderModule = pFirstNonSystemSharedModule;
+#endif
+ }
+ else
+ {
+ CONSISTENCY_CHECK(MscorlibBinder::GetModule() && MscorlibBinder::GetModule()->IsSystem());
+
+ pLoaderModule = MscorlibBinder::GetModule();
+ }
+
+ if (FALSE)
+ {
+ComputeCollectibleLoaderModule:
+ LoaderAllocator *pLoaderAllocatorOfDefiningType = NULL;
+ LoaderAllocator *pOldestLoaderAllocator = NULL;
+ Module *pOldestLoaderModule = NULL;
+ UINT64 oldestFoundAge = 0;
+ DWORD classArgsCount = classInst.GetNumArgs();
+ DWORD totalArgsCount = classArgsCount + methodInst.GetNumArgs();
+
+ if (pDefinitionModule != NULL) pLoaderAllocatorOfDefiningType = pDefinitionModule->GetLoaderAllocator();
+
+ for (DWORD i = 0; i < totalArgsCount; i++) {
+
+ TypeHandle arg;
+
+ if (i < classArgsCount)
+ arg = classInst[i];
+ else
+ arg = methodInst[i - classArgsCount];
+
+ Module *pModuleCheck = arg.GetLoaderModule();
+ LoaderAllocator *pLoaderAllocatorCheck = pModuleCheck->GetLoaderAllocator();
+
+ if (pLoaderAllocatorCheck != pLoaderAllocatorOfDefiningType &&
+ pLoaderAllocatorCheck->IsCollectible() &&
+ pLoaderAllocatorCheck->GetCreationNumber() > oldestFoundAge)
+ {
+ pOldestLoaderModule = pModuleCheck;
+ pOldestLoaderAllocator = pLoaderAllocatorCheck;
+ oldestFoundAge = pLoaderAllocatorCheck->GetCreationNumber();
+ }
+ }
+
+ // Only if we didn't find a different loader allocator than the defining loader allocator do we
+ // use the defining loader allocator
+ if (pOldestLoaderModule != NULL)
+ pLoaderModule = pOldestLoaderModule;
+ else
+ pLoaderModule = pDefinitionModule;
+ }
+ RETURN PTR_Module(pLoaderModule);
+}
+
+#ifndef DACCESS_COMPILE
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+/* static */
+PTR_Module ClassLoader::ComputeLoaderModuleForCompilation(
+ Module * pDefinitionModule, // the module that declares the generic type or method
+ mdToken token, // method or class token for this item
+ Instantiation classInst, // the type arguments to the type (if any)
+ Instantiation methodInst) // the type arguments to the method (if any)
+{
+ CONTRACT(Module*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pDefinitionModule, NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL));
+ SO_INTOLERANT;
+ }
+ CONTRACT_END
+
+ // The NGEN rule for compiling constructed types and instantiated methods
+ // into modules other than their "natural" LoaderModule. This is at the heart of
+ // "full generics NGEN".
+ //
+ // If this instantiation doesn't have a unique home then use the ngen module
+
+ // OK, we're certainly NGEN'ing. And if we're NGEN'ing then we're not on the debugger thread.
+ CONSISTENCY_CHECK(((GetThread() && GetAppDomain()) || IsGCThread()) &&
+ "unexpected: running a load on debug thread but IsCompilationProcess() returned TRUE");
+
+ // Save it into its PreferredZapModule if it's always going to be saved there.
+ // This is a stable choice - no need to record it in the table (as we do for others below)
+ if (Module::IsAlwaysSavedInPreferredZapModule(classInst, methodInst))
+ {
+ RETURN (Module::ComputePreferredZapModule(pDefinitionModule, classInst, methodInst));
+ }
+
+ // Check if this compilation process has already decided on an adjustment. Once we decide
+ // on the LoaderModule for an item it must be stable for the duration of a
+ // compilation process, no matter how many modules get NGEN'd.
+
+ ZapperLoaderModuleTableKey key(pDefinitionModule,
+ token,
+ classInst,
+ methodInst);
+
+ Module * pZapperLoaderModule = g_pCEECompileInfo->LookupZapperLoaderModule(&key);
+ if (pZapperLoaderModule != NULL)
+ {
+ RETURN (pZapperLoaderModule);
+ }
+
+ // OK, we need to compute a non-standard zapping module.
+
+ Module * pPreferredZapModule = Module::ComputePreferredZapModule(pDefinitionModule, classInst, methodInst);
+
+ // Check if we're NGEN'ing but where perhaps the compilation domain
+ // isn't set up yet. This can happen in following situations:
+ // - Managed code running during startup before compilation domain is setup.
+ // - Exceptions (e.g. invalid program exceptions) thrown from compilation domain and caught in default domain
+
+ // We're a little stuck - we can't force the item into an NGEN image at this point. So just bail out
+ // and use the loader module we've computed without recording the choice. The loader module should always
+ // be mscorlib in this case.
+ AppDomain * pAppDomain = GetAppDomain();
+ if (!pAppDomain->IsCompilationDomain() ||
+ !pAppDomain->ToCompilationDomain()->GetTargetModule())
+ {
+ _ASSERTE(pPreferredZapModule->IsSystem() || IsNgenPDBCompilationProcess());
+ RETURN (pPreferredZapModule);
+ }
+
+ Module * pTargetModule = pAppDomain->ToCompilationDomain()->GetTargetModule();
+
+ // If it is multi-module assembly and we have not saved PZM yet, do not create
+ // speculative instantiation - just save it in PZM.
+ if (pTargetModule->GetAssembly() == pPreferredZapModule->GetAssembly() &&
+ !pPreferredZapModule->IsModuleSaved())
+ {
+ pZapperLoaderModule = pPreferredZapModule;
+ }
+ else
+ {
+ // Everything else can be saved into the current module.
+ pZapperLoaderModule = pTargetModule;
+ }
+
+ // If generating WinMD resilient code and we so far choose to use the target module,
+ // we need to check if the definition module or any of the instantiation type can
+ // cause version resilient problems.
+ if (g_fNGenWinMDResilient && pZapperLoaderModule == pTargetModule)
+ {
+ if (pDefinitionModule != NULL && !pDefinitionModule->IsInCurrentVersionBubble())
+ {
+ pZapperLoaderModule = pDefinitionModule;
+ goto ModuleAdjustedForVersionResiliency;
+ }
+
+ for (DWORD i = 0; i < classInst.GetNumArgs(); i++)
+ {
+ Module * pModule = classInst[i].GetLoaderModule();
+ if (!pModule->IsInCurrentVersionBubble())
+ {
+ pZapperLoaderModule = pModule;
+ goto ModuleAdjustedForVersionResiliency;
+ }
+ }
+
+ for (DWORD i = 0; i < methodInst.GetNumArgs(); i++)
+ {
+ Module * pModule = methodInst[i].GetLoaderModule();
+ if (!pModule->IsInCurrentVersionBubble())
+ {
+ pZapperLoaderModule = pModule;
+ goto ModuleAdjustedForVersionResiliency;
+ }
+ }
+ModuleAdjustedForVersionResiliency: ;
+ }
+
+ // Record this choice just in case we're NGEN'ing multiple modules
+ // to make sure we always do the same thing if we're asked to compute
+ // the loader module again.
+
+ // Note this whole code path only happens while NGEN'ing, so this violation
+ // is not so bad. It is needed since we allocate stuff on the heap.
+ CONTRACT_VIOLATION(ThrowsViolation|FaultViolation);
+
+ // Copy the instantiation arrays so they can escape the scope of this method.
+ // Since this is a permanent entry in a table for this compilation process
+ // we do not need to collect these. If we did have to we would do it when we deleteed the
+ // ZapperLoaderModuleTable.
+ NewArrayHolder<TypeHandle> pClassArgs = NULL;
+ if (!classInst.IsEmpty())
+ {
+ pClassArgs = new TypeHandle[classInst.GetNumArgs()];
+ for (unsigned int i = 0; i < classInst.GetNumArgs(); i++)
+ pClassArgs[i] = classInst[i];
+ }
+
+ NewArrayHolder<TypeHandle> pMethodArgs = NULL;
+ if (!methodInst.IsEmpty())
+ {
+ pMethodArgs = new TypeHandle[methodInst.GetNumArgs()];
+ for (unsigned int i = 0; i < methodInst.GetNumArgs(); i++)
+ pMethodArgs[i] = methodInst[i];
+ }
+
+ ZapperLoaderModuleTableKey key2(pDefinitionModule,
+ token,
+ Instantiation(pClassArgs, classInst.GetNumArgs()),
+ Instantiation(pMethodArgs, methodInst.GetNumArgs()));
+ g_pCEECompileInfo->RecordZapperLoaderModule(&key2, pZapperLoaderModule);
+
+ pClassArgs.SuppressRelease();
+ pMethodArgs.SuppressRelease();
+
+ RETURN (pZapperLoaderModule);
+}
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+#endif // #ifndef DACCESS_COMPILE
+
+/*static*/
+Module * ClassLoader::ComputeLoaderModule(MethodTable * pMT,
+ mdToken token,
+ Instantiation methodInst)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ return ComputeLoaderModuleWorker(pMT->GetModule(),
+ token,
+ pMT->GetInstantiation(),
+ methodInst);
+}
+/*static*/
+Module *ClassLoader::ComputeLoaderModule(TypeKey *typeKey)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+
+ if (typeKey->GetKind() == ELEMENT_TYPE_CLASS)
+ return ComputeLoaderModuleWorker(typeKey->GetModule(),
+ typeKey->GetTypeToken(),
+ typeKey->GetInstantiation(),
+ Instantiation());
+ else if (typeKey->GetKind() == ELEMENT_TYPE_FNPTR)
+ return ComputeLoaderModuleForFunctionPointer(typeKey->GetRetAndArgTypes(), typeKey->GetNumArgs() + 1);
+ else
+ return ComputeLoaderModuleForParamType(typeKey->GetElementType());
+}
+
+/*static*/
+BOOL ClassLoader::IsTypicalInstantiation(Module *pModule, mdToken token, Instantiation inst)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(TypeFromToken(token) == mdtTypeDef || TypeFromToken(token) == mdtMethodDef);
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ TypeHandle thArg = inst[i];
+
+ if (thArg.IsGenericVariable())
+ {
+ TypeVarTypeDesc* tyvar = thArg.AsGenericVariable();
+
+ PREFIX_ASSUME(tyvar!=NULL);
+ if ((tyvar->GetTypeOrMethodDef() != token) ||
+ (tyvar->GetModule() != dac_cast<PTR_Module>(pModule)) ||
+ (tyvar->GetIndex() != i))
+ return FALSE;
+ }
+ else
+ {
+ return FALSE;
+ }
+ }
+ return TRUE;
+}
+
+// External class loader entry point: load a type by name
+/*static*/
+TypeHandle ClassLoader::LoadTypeByNameThrowing(Assembly *pAssembly,
+ LPCUTF8 nameSpace,
+ LPCUTF8 name,
+ NotFoundAction fNotFound,
+ ClassLoader::LoadTypesFlag fLoadTypes,
+ ClassLoadLevel level)
+{
+ CONTRACT(TypeHandle)
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ MODE_ANY;
+
+ if (FORBIDGC_LOADER_USE_ENABLED() || fLoadTypes != LoadTypes) { LOADS_TYPE(CLASS_LOAD_BEGIN); } else { LOADS_TYPE(level); }
+
+ PRECONDITION(CheckPointer(pAssembly));
+ PRECONDITION(level > CLASS_LOAD_BEGIN && level <= CLASS_LOADED);
+ POSTCONDITION(CheckPointer(RETVAL,
+ (fNotFound == ThrowIfNotFound && fLoadTypes == LoadTypes )? NULL_NOT_OK : NULL_OK));
+ POSTCONDITION(RETVAL.IsNull() || RETVAL.CheckLoadLevel(level));
+ SUPPORTS_DAC;
+#ifdef DACCESS_COMPILE
+ PRECONDITION((fNotFound == ClassLoader::ReturnNullIfNotFound) && (fLoadTypes == DontLoadTypes));
+#endif
+ }
+ CONTRACT_END
+
+ NameHandle nameHandle(nameSpace, name);
+ if (fLoadTypes == DontLoadTypes)
+ nameHandle.SetTokenNotToLoad(tdAllTypes);
+ if (fNotFound == ThrowIfNotFound)
+ RETURN pAssembly->GetLoader()->LoadTypeHandleThrowIfFailed(&nameHandle, level);
+ else
+ RETURN pAssembly->GetLoader()->LoadTypeHandleThrowing(&nameHandle, level);
+}
+
+#ifndef DACCESS_COMPILE
+
+#define DAC_LOADS_TYPE(level, expression) \
+ if (FORBIDGC_LOADER_USE_ENABLED() || (expression)) \
+ { LOADS_TYPE(CLASS_LOAD_BEGIN); } else { LOADS_TYPE(level); }
+#else
+
+#define DAC_LOADS_TYPE(level, expression) { LOADS_TYPE(CLASS_LOAD_BEGIN); }
+#endif // #ifndef DACCESS_COMPILE
+
+//
+// Find a class given name, using the classloader's global list of known classes.
+// If the type is found, it will be restored unless pName->GetTokenNotToLoad() prohibits that
+// Returns NULL if class not found AND pName->OKToLoad returns false
+TypeHandle ClassLoader::LoadTypeHandleThrowIfFailed(NameHandle* pName, ClassLoadLevel level,
+ Module* pLookInThisModuleOnly/*=NULL*/)
+{
+ CONTRACT(TypeHandle)
+ {
+ INSTANCE_CHECK;
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ DAC_LOADS_TYPE(level, !pName->OKToLoad());
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pName));
+ PRECONDITION(level > CLASS_LOAD_BEGIN && level <= CLASS_LOADED);
+ POSTCONDITION(CheckPointer(RETVAL, pName->OKToLoad() ? NULL_NOT_OK : NULL_OK));
+ POSTCONDITION(RETVAL.IsNull() || RETVAL.CheckLoadLevel(level));
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ // Lookup in the classes that this class loader knows about
+ TypeHandle typeHnd = LoadTypeHandleThrowing(pName, level, pLookInThisModuleOnly);
+
+ if(typeHnd.IsNull()) {
+
+ if ( pName->OKToLoad() ) {
+#ifdef _DEBUG_IMPL
+ {
+ LPCUTF8 szName = pName->GetName();
+ if (szName == NULL)
+ szName = "<UNKNOWN>";
+
+ StackSString codeBase;
+ GetAssembly()->GetCodeBase(codeBase);
+
+ LOG((LF_CLASSLOADER, LL_INFO10, "Failed to find class \"%s\" in the manifest for assembly \"%ws\"\n", szName, (LPCWSTR)codeBase));
+ }
+#endif
+
+#ifndef DACCESS_COMPILE
+ COUNTER_ONLY(GetPerfCounters().m_Loading.cLoadFailures++);
+
+ m_pAssembly->ThrowTypeLoadException(pName, IDS_CLASSLOAD_GENERAL);
+#else
+ DacNotImpl();
+#endif
+ }
+ }
+
+ RETURN(typeHnd);
+}
+
+#ifndef DACCESS_COMPILE
+
+//<TODO>@TODO: Need to allow exceptions to be thrown when classloader is cleaned up</TODO>
+EEClassHashEntry_t* ClassLoader::InsertValue(EEClassHashTable *pClassHash, EEClassHashTable *pClassCaseInsHash, LPCUTF8 pszNamespace, LPCUTF8 pszClassName, HashDatum Data, EEClassHashEntry_t *pEncloser, AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ LPUTF8 pszLowerCaseNS = NULL;
+ LPUTF8 pszLowerCaseName = NULL;
+ EEClassHashEntry_t *pCaseInsEntry = NULL;
+
+ EEClassHashEntry_t *pEntry = pClassHash->AllocNewEntry(pamTracker);
+
+ if (pClassCaseInsHash) {
+ CreateCanonicallyCasedKey(pszNamespace, pszClassName, &pszLowerCaseNS, &pszLowerCaseName);
+ pCaseInsEntry = pClassCaseInsHash->AllocNewEntry(pamTracker);
+ }
+
+
+ {
+ // ! We cannot fail after this point.
+ CANNOTTHROWCOMPLUSEXCEPTION();
+ FAULT_FORBID();
+
+
+ pClassHash->InsertValueUsingPreallocatedEntry(pEntry, pszNamespace, pszClassName, Data, pEncloser);
+
+ //If we're keeping a table for case-insensitive lookup, keep that up to date
+ if (pClassCaseInsHash)
+ pClassCaseInsHash->InsertValueUsingPreallocatedEntry(pCaseInsEntry, pszLowerCaseNS, pszLowerCaseName, pEntry, pEncloser);
+
+ return pEntry;
+ }
+
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+BOOL ClassLoader::CompareNestedEntryWithExportedType(IMDInternalImport * pImport,
+ mdExportedType mdCurrent,
+ EEClassHashTable * pClassHash,
+ PTR_EEClassHashEntry pEntry)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ LPCUTF8 Key[2];
+
+ do
+ {
+ if (FAILED(pImport->GetExportedTypeProps(
+ mdCurrent,
+ &Key[0],
+ &Key[1],
+ &mdCurrent,
+ NULL, //binding (type def)
+ NULL))) //flags
+ {
+ return FALSE;
+ }
+
+ if (pClassHash->CompareKeys(pEntry, Key))
+ {
+ // Reached top level class for mdCurrent - return whether
+ // or not pEntry is a top level class
+ // (pEntry is a top level class if its pEncloser is NULL)
+ if ((TypeFromToken(mdCurrent) != mdtExportedType) ||
+ (mdCurrent == mdExportedTypeNil))
+ {
+ return pEntry->GetEncloser() == NULL;
+ }
+ }
+ else // Keys don't match - wrong entry
+ {
+ return FALSE;
+ }
+ }
+ while ((pEntry = pEntry->GetEncloser()) != NULL);
+
+ // Reached the top level class for pEntry, but mdCurrent is nested
+ return FALSE;
+}
+
+
+BOOL ClassLoader::CompareNestedEntryWithTypeDef(IMDInternalImport * pImport,
+ mdTypeDef mdCurrent,
+ EEClassHashTable * pClassHash,
+ PTR_EEClassHashEntry pEntry)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ LPCUTF8 Key[2];
+
+ do {
+ if (FAILED(pImport->GetNameOfTypeDef(mdCurrent, &Key[1], &Key[0])))
+ {
+ return FALSE;
+ }
+
+ if (pClassHash->CompareKeys(pEntry, Key)) {
+ // Reached top level class for mdCurrent - return whether
+ // or not pEntry is a top level class
+ // (pEntry is a top level class if its pEncloser is NULL)
+ if (FAILED(pImport->GetNestedClassProps(mdCurrent, &mdCurrent)))
+ return pEntry->GetEncloser() == NULL;
+ }
+ else // Keys don't match - wrong entry
+ return FALSE;
+ }
+ while ((pEntry = pEntry->GetEncloser()) != NULL);
+
+ // Reached the top level class for pEntry, but mdCurrent is nested
+ return FALSE;
+}
+
+
+BOOL ClassLoader::CompareNestedEntryWithTypeRef(IMDInternalImport * pImport,
+ mdTypeRef mdCurrent,
+ EEClassHashTable * pClassHash,
+ PTR_EEClassHashEntry pEntry)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ LPCUTF8 Key[2];
+
+ do {
+ if (FAILED(pImport->GetNameOfTypeRef(mdCurrent, &Key[0], &Key[1])))
+ {
+ return FALSE;
+ }
+
+ if (pClassHash->CompareKeys(pEntry, Key))
+ {
+ if (FAILED(pImport->GetResolutionScopeOfTypeRef(mdCurrent, &mdCurrent)))
+ {
+ return FALSE;
+ }
+ // Reached top level class for mdCurrent - return whether
+ // or not pEntry is a top level class
+ // (pEntry is a top level class if its pEncloser is NULL)
+ if ((TypeFromToken(mdCurrent) != mdtTypeRef) ||
+ (mdCurrent == mdTypeRefNil))
+ return pEntry->GetEncloser() == NULL;
+ }
+ else // Keys don't match - wrong entry
+ return FALSE;
+ }
+ while ((pEntry = pEntry->GetEncloser())!=NULL);
+
+ // Reached the top level class for pEntry, but mdCurrent is nested
+ return FALSE;
+}
+
+
+/*static*/
+BOOL ClassLoader::IsNested(Module *pModule, mdToken token, mdToken *mdEncloser)
+{
+ CONTRACTL
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ switch(TypeFromToken(token)) {
+ case mdtTypeDef:
+ return (SUCCEEDED(pModule->GetMDImport()->GetNestedClassProps(token, mdEncloser)));
+
+ case mdtTypeRef:
+ IfFailThrow(pModule->GetMDImport()->GetResolutionScopeOfTypeRef(token, mdEncloser));
+ return ((TypeFromToken(*mdEncloser) == mdtTypeRef) &&
+ (*mdEncloser != mdTypeRefNil));
+
+ case mdtExportedType:
+ IfFailThrow(pModule->GetAssembly()->GetManifestImport()->GetExportedTypeProps(
+ token,
+ NULL, // namespace
+ NULL, // name
+ mdEncloser,
+ NULL, //binding (type def)
+ NULL)); //flags
+ return ((TypeFromToken(*mdEncloser) == mdtExportedType) &&
+ (*mdEncloser != mdExportedTypeNil));
+
+ default:
+ ThrowHR(COR_E_BADIMAGEFORMAT, BFA_INVALID_TOKEN_TYPE);
+ }
+}
+
+BOOL ClassLoader::IsNested(NameHandle* pName, mdToken *mdEncloser)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ if (pName->GetTypeModule()) {
+ if (TypeFromToken(pName->GetTypeToken()) == mdtBaseType)
+ {
+ if (pName->GetBucket())
+ return TRUE;
+ return FALSE;
+ }
+ else
+ return IsNested(pName->GetTypeModule(), pName->GetTypeToken(), mdEncloser);
+ }
+ else
+ return FALSE;
+}
+
+EEClassHashEntry_t *ClassLoader::GetClassValue(NameHandleTable nhTable,
+ NameHandle *pName,
+ HashDatum *pData,
+ EEClassHashTable **ppTable,
+ Module* pLookInThisModuleOnly)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ MODE_ANY;
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ PRECONDITION(CheckPointer(pName));
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+
+ mdToken mdEncloser;
+ EEClassHashEntry_t *pBucket = NULL;
+
+#if _DEBUG
+ if (pName->GetName()) {
+ if (pName->GetNameSpace() == NULL)
+ LOG((LF_CLASSLOADER, LL_INFO1000, "Looking up %s by name.\n",
+ pName->GetName()));
+ else
+ LOG((LF_CLASSLOADER, LL_INFO1000, "Looking up %s.%s by name.\n",
+ pName->GetNameSpace(), pName->GetName()));
+ }
+#endif
+
+ if (IsNested(pName, &mdEncloser))
+ {
+ Module *pModule = pName->GetTypeModule();
+ PREFIX_ASSUME(pModule != NULL);
+ PTR_Assembly assembly=GetAssembly();
+ PREFIX_ASSUME(assembly!=NULL);
+ ModuleIterator i = assembly->IterateModules();
+ Module *pClsModule = NULL;
+
+ while (i.Next()) {
+ pClsModule = i.GetModule();
+ if (pClsModule->IsResource())
+ continue;
+ if (pLookInThisModuleOnly && (pClsModule != pLookInThisModuleOnly))
+ continue;
+
+ EEClassHashTable* pTable = NULL;
+ if (nhTable == nhCaseSensitive)
+ {
+ *ppTable = pTable = pClsModule->GetAvailableClassHash();
+
+ }
+ else {
+ // currently we expect only these two kinds--for DAC builds, nhTable will be nhCaseSensitive
+ _ASSERTE(nhTable == nhCaseInsensitive);
+ *ppTable = pTable = pClsModule->GetAvailableClassCaseInsHash();
+
+ if (pTable == NULL) {
+ // We have not built the table yet - the caller will handle
+ return NULL;
+ }
+ }
+
+ _ASSERTE(pTable);
+
+ EEClassHashTable::LookupContext sContext;
+ if ((pBucket = pTable->GetValue(pName, pData, TRUE, &sContext)) != NULL) {
+ switch (TypeFromToken(pName->GetTypeToken())) {
+ case mdtTypeDef:
+ while ((!CompareNestedEntryWithTypeDef(pModule->GetMDImport(),
+ mdEncloser,
+ pClsModule->GetAvailableClassHash(),
+ pBucket->GetEncloser())) &&
+ (pBucket = pTable->FindNextNestedClass(pName, pData, &sContext)) != NULL);
+ break;
+ case mdtTypeRef:
+ while ((!CompareNestedEntryWithTypeRef(pModule->GetMDImport(),
+ mdEncloser,
+ pClsModule->GetAvailableClassHash(),
+ pBucket->GetEncloser())) &&
+ (pBucket = pTable->FindNextNestedClass(pName, pData, &sContext)) != NULL);
+ break;
+ case mdtExportedType:
+ while ((!CompareNestedEntryWithExportedType(pModule->GetAssembly()->GetManifestImport(),
+ mdEncloser,
+ pClsModule->GetAvailableClassHash(),
+ pBucket->GetEncloser())) &&
+ (pBucket = pTable->FindNextNestedClass(pName, pData, &sContext)) != NULL);
+ break;
+ default:
+ while ((pBucket->GetEncloser() != pName->GetBucket()) &&
+ (pBucket = pTable->FindNextNestedClass(pName, pData, &sContext)) != NULL);
+ }
+ }
+ if (pBucket) // break on the first success
+ break;
+ }
+ }
+ else {
+ // Check if this non-nested class is in the table of available classes.
+ ModuleIterator i = GetAssembly()->IterateModules();
+ Module *pModule = NULL;
+
+ while (i.Next()) {
+ pModule = i.GetModule();
+ // i.Next will not return TRUE unless i.GetModule will return non-NULL.
+ PREFIX_ASSUME(pModule != NULL);
+ if (pModule->IsResource())
+ continue;
+ if (pLookInThisModuleOnly && (pModule != pLookInThisModuleOnly))
+ continue;
+
+ PREFIX_ASSUME(pModule!=NULL);
+ EEClassHashTable* pTable = NULL;
+ if (nhTable == nhCaseSensitive)
+ *ppTable = pTable = pModule->GetAvailableClassHash();
+ else {
+ // currently we support only these two types
+ _ASSERTE(nhTable == nhCaseInsensitive);
+ *ppTable = pTable = pModule->GetAvailableClassCaseInsHash();
+
+ // We have not built the table yet - the caller will handle
+ if (pTable == NULL)
+ return NULL;
+ }
+
+ _ASSERTE(pTable);
+ pBucket = pTable->GetValue(pName, pData, FALSE, NULL);
+ if (pBucket) // break on the first success
+ break;
+ }
+ }
+
+ return pBucket;
+}
+
+#ifndef DACCESS_COMPILE
+
+VOID ClassLoader::PopulateAvailableClassHashTable(Module* pModule,
+ AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ mdTypeDef td;
+ HENUMInternal hTypeDefEnum;
+ IMDInternalImport * pImport = pModule->GetMDImport();
+
+ LPCSTR szWinRtNamespacePrefix = NULL;
+ DWORD cchWinRtNamespacePrefix = 0;
+
+#ifdef FEATURE_COMINTEROP
+ SString ssFileName;
+ StackScratchBuffer ssFileNameBuffer;
+
+ if (pModule->GetAssembly()->IsWinMD() &&
+ !pModule->IsIntrospectionOnly())
+ { // WinMD file in execution context (not ReflectionOnly context) - use its file name as WinRT namespace prefix
+ // (Windows requirement)
+ // Note: Reflection can work on 'unfinished' WinMD files where the types are in 'wrong' WinMD file (i.e.
+ // type namespace does not start with the file name)
+
+ _ASSERTE(pModule->GetFile()->IsAssembly()); // No multi-module WinMD file support
+ _ASSERTE(!pModule->GetFile()->GetPath().IsEmpty());
+
+ SplitPath(
+ pModule->GetFile()->GetPath(),
+ NULL, // Drive
+ NULL, // Directory
+ &ssFileName,
+ NULL); // Extension
+
+ szWinRtNamespacePrefix = ssFileName.GetUTF8(ssFileNameBuffer);
+ cchWinRtNamespacePrefix = (DWORD)strlen(szWinRtNamespacePrefix);
+ }
+#endif //FEATURE_COMINTEROP
+
+ IfFailThrow(pImport->EnumTypeDefInit(&hTypeDefEnum));
+
+ // Now loop through all the classdefs adding the CVID and scope to the hash
+ while(pImport->EnumTypeDefNext(&hTypeDefEnum, &td)) {
+
+ AddAvailableClassHaveLock(pModule,
+ td,
+ pamTracker,
+ szWinRtNamespacePrefix,
+ cchWinRtNamespacePrefix);
+ }
+ pImport->EnumTypeDefClose(&hTypeDefEnum);
+}
+
+
+void ClassLoader::LazyPopulateCaseInsensitiveHashTables()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+
+ // Add any unhashed modules into our hash tables, and try again.
+
+ ModuleIterator i = GetAssembly()->IterateModules();
+
+ while (i.Next()) {
+ Module *pModule = i.GetModule();
+ PREFIX_ASSUME(pModule!=NULL);
+ if (pModule->IsResource())
+ continue;
+
+ if (pModule->GetAvailableClassCaseInsHash() == NULL) {
+ AllocMemTracker amTracker;
+ EEClassHashTable *pNewClassCaseInsHash = pModule->GetAvailableClassHash()->MakeCaseInsensitiveTable(pModule, &amTracker);
+
+ LOG((LF_CLASSLOADER, LL_INFO10, "%s's classes being added to case insensitive hash table\n",
+ pModule->GetSimpleName()));
+
+ {
+ CANNOTTHROWCOMPLUSEXCEPTION();
+ FAULT_FORBID();
+
+ amTracker.SuppressRelease();
+ pModule->SetAvailableClassCaseInsHash(pNewClassCaseInsHash);
+ FastInterlockDecrement((LONG*)&m_cUnhashedModules);
+ }
+ }
+ }
+}
+
+/*static*/
+void DECLSPEC_NORETURN ClassLoader::ThrowTypeLoadException(TypeKey *pKey,
+ UINT resIDWhy)
+{
+ STATIC_CONTRACT_THROWS;
+
+ StackSString fullName;
+ StackSString assemblyName;
+ TypeString::AppendTypeKey(fullName, pKey);
+ pKey->GetModule()->GetAssembly()->GetDisplayName(assemblyName);
+ ::ThrowTypeLoadException(fullName, assemblyName, NULL, resIDWhy);
+}
+
+#endif
+
+
+TypeHandle ClassLoader::LoadConstructedTypeThrowing(TypeKey *pKey,
+ LoadTypesFlag fLoadTypes /*= LoadTypes*/,
+ ClassLoadLevel level /*=CLASS_LOADED*/,
+ const InstantiationContext *pInstContext /*=NULL*/)
+{
+ CONTRACT(TypeHandle)
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ if (FORBIDGC_LOADER_USE_ENABLED() || fLoadTypes != LoadTypes) { LOADS_TYPE(CLASS_LOAD_BEGIN); } else { LOADS_TYPE(level); }
+ if (fLoadTypes == DontLoadTypes) SO_TOLERANT; else SO_INTOLERANT;
+ PRECONDITION(CheckPointer(pKey));
+ PRECONDITION(level > CLASS_LOAD_BEGIN && level <= CLASS_LOADED);
+ PRECONDITION(CheckPointer(pInstContext, NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL, fLoadTypes==DontLoadTypes ? NULL_OK : NULL_NOT_OK));
+ POSTCONDITION(RETVAL.IsNull() || RETVAL.GetLoadLevel() >= level);
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END
+
+ TypeHandle typeHnd;
+ ClassLoadLevel existingLoadLevel = CLASS_LOAD_BEGIN;
+
+ // Lookup in the classes that this class loader knows about
+
+ if (pKey->HasInstantiation() && ClassLoader::IsTypicalSharedInstantiation(pKey->GetInstantiation()))
+ {
+ _ASSERTE(pKey->GetModule() == ComputeLoaderModule(pKey));
+ typeHnd = pKey->GetModule()->LookupFullyCanonicalInstantiation(pKey->GetTypeToken(), &existingLoadLevel);
+ }
+
+ if (typeHnd.IsNull())
+ {
+ typeHnd = LookupTypeHandleForTypeKey(pKey);
+ if (!typeHnd.IsNull())
+ {
+ existingLoadLevel = typeHnd.GetLoadLevel();
+ if (existingLoadLevel >= level)
+ g_IBCLogger.LogTypeHashTableAccess(&typeHnd);
+ }
+ }
+
+ // If something has been published in the tables, and it's at the right level, just return it
+ if (!typeHnd.IsNull() && existingLoadLevel >= level)
+ {
+ RETURN typeHnd;
+ }
+
+#ifndef DACCESS_COMPILE
+ if (typeHnd.IsNull() && pKey->HasInstantiation())
+ {
+ if (!Generics::CheckInstantiation(pKey->GetInstantiation()))
+ pKey->GetModule()->GetAssembly()->ThrowTypeLoadException(pKey->GetModule()->GetMDImport(), pKey->GetTypeToken(), IDS_CLASSLOAD_INVALIDINSTANTIATION);
+ }
+#endif
+
+ // If we're not loading any types at all, then we're not creating
+ // instantiations either because we're in FORBIDGC_LOADER_USE mode, so
+ // we should bail out here.
+ if (fLoadTypes == DontLoadTypes)
+ RETURN TypeHandle();
+
+#ifndef DACCESS_COMPILE
+ // If we got here, we now have to allocate a new parameterized type.
+ // By definition, forbidgc-users aren't allowed to reach this point.
+ CONSISTENCY_CHECK(!FORBIDGC_LOADER_USE_ENABLED());
+
+ Module *pLoaderModule = ComputeLoaderModule(pKey);
+ RETURN(pLoaderModule->GetClassLoader()->LoadTypeHandleForTypeKey(pKey, typeHnd, level, pInstContext));
+#else
+ DacNotImpl();
+ RETURN(typeHnd);
+#endif
+}
+
+
+/*static*/
+void ClassLoader::EnsureLoaded(TypeHandle typeHnd, ClassLoadLevel level)
+{
+ CONTRACTL
+ {
+ PRECONDITION(CheckPointer(typeHnd));
+ PRECONDITION(level > CLASS_LOAD_BEGIN && level <= CLASS_LOADED);
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ if (FORBIDGC_LOADER_USE_ENABLED()) { LOADS_TYPE(CLASS_LOAD_BEGIN); } else { LOADS_TYPE(level); }
+ SUPPORTS_DAC;
+
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+#ifndef DACCESS_COMPILE // Nothing to do for the DAC case
+
+ if (typeHnd.GetLoadLevel() < level)
+ {
+ INTERIOR_STACK_PROBE_CHECK_THREAD;
+
+#ifdef FEATURE_PREJIT
+ if (typeHnd.GetLoadLevel() == CLASS_LOAD_UNRESTOREDTYPEKEY)
+ {
+ typeHnd.DoRestoreTypeKey();
+ }
+#endif
+ if (level > CLASS_LOAD_UNRESTORED)
+ {
+ TypeKey typeKey = typeHnd.GetTypeKey();
+
+ Module *pLoaderModule = ComputeLoaderModule(&typeKey);
+ pLoaderModule->GetClassLoader()->LoadTypeHandleForTypeKey(&typeKey, typeHnd, level);
+ }
+
+ END_INTERIOR_STACK_PROBE;
+ }
+
+#endif // DACCESS_COMPILE
+}
+
+/*static*/
+void ClassLoader::TryEnsureLoaded(TypeHandle typeHnd, ClassLoadLevel level)
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifndef DACCESS_COMPILE // Nothing to do for the DAC case
+
+ EX_TRY
+ {
+ ClassLoader::EnsureLoaded(typeHnd, level);
+ }
+ EX_CATCH
+ {
+ // Some type may not load successfully. For eg. generic instantiations
+ // that do not satisfy the constraints of the type arguments.
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+#endif // DACCESS_COMPILE
+}
+
+// This is separated out to avoid the overhead of C++ exception handling in the non-locking case.
+/* static */
+TypeHandle ClassLoader::LookupTypeKeyUnderLock(TypeKey *pKey,
+ EETypeHashTable *pTable,
+ CrstBase *pLock)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ CrstHolder ch(pLock);
+ return pTable->GetValue(pKey);
+}
+
+/* static */
+TypeHandle ClassLoader::LookupTypeKey(TypeKey *pKey,
+ EETypeHashTable *pTable,
+ CrstBase *pLock,
+ BOOL fCheckUnderLock)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ PRECONDITION(CheckPointer(pKey));
+ PRECONDITION(pKey->IsConstructed());
+ PRECONDITION(CheckPointer(pTable));
+ PRECONDITION(!fCheckUnderLock || CheckPointer(pLock));
+ MODE_ANY;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ TypeHandle th;
+
+ // If this is the GC thread, and we're hosted, we're in a sticky situation with
+ // SQL where we may have suspended another thread while doing Thread::SuspendRuntime.
+ // In this case, we have the issue that a thread holding this lock could be
+ // suspended, perhaps implicitly because the active thread on the SQL scheduler
+ // has been suspended by the GC thread. In such a case, we need to skip taking
+ // the lock. We can be sure that there will be no races in such a condition because
+ // we will only be looking for types that are already loaded, or for a type that
+ // is not loaded, but we will never cause the type to get loaded, and so the result
+ // of the lookup will not change.
+#ifndef DACCESS_COMPILE
+ if (fCheckUnderLock && !(IsGCThread() && CLRTaskHosted()))
+#else
+ if (fCheckUnderLock)
+#endif // DACCESS_COMPILE
+ {
+ th = LookupTypeKeyUnderLock(pKey, pTable, pLock);
+ }
+ else
+ {
+ th = pTable->GetValue(pKey);
+ }
+ return th;
+}
+
+
+#ifdef FEATURE_PREJIT
+/* static */
+TypeHandle ClassLoader::LookupInPreferredZapModule(TypeKey *pKey, BOOL fCheckUnderLock)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ PRECONDITION(CheckPointer(pKey));
+ PRECONDITION(pKey->IsConstructed());
+ MODE_ANY;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ // First look for an NGEN'd type in the preferred ngen module
+ TypeHandle th;
+ PTR_Module pPreferredZapModule = Module::ComputePreferredZapModule(pKey);
+
+ if (pPreferredZapModule != NULL && pPreferredZapModule->HasNativeImage())
+ {
+ th = LookupTypeKey(pKey,
+ pPreferredZapModule->GetAvailableParamTypes(),
+ &pPreferredZapModule->GetClassLoader()->m_AvailableTypesLock,
+ fCheckUnderLock);
+ }
+
+ return th;
+}
+#endif // FEATURE_PREJIT
+
+
+/* static */
+TypeHandle ClassLoader::LookupInLoaderModule(TypeKey *pKey, BOOL fCheckUnderLock)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ PRECONDITION(CheckPointer(pKey));
+ PRECONDITION(pKey->IsConstructed());
+ MODE_ANY;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ Module *pLoaderModule = ComputeLoaderModule(pKey);
+ PREFIX_ASSUME(pLoaderModule!=NULL);
+
+ return LookupTypeKey(pKey,
+ pLoaderModule->GetAvailableParamTypes(),
+ &pLoaderModule->GetClassLoader()->m_AvailableTypesLock,
+ fCheckUnderLock);
+}
+
+
+/* static */
+TypeHandle ClassLoader::LookupTypeHandleForTypeKey(TypeKey *pKey)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ // Make an initial lookup without taking any locks.
+ TypeHandle th = LookupTypeHandleForTypeKeyInner(pKey, FALSE);
+
+ // A non-null TypeHandle for the above lookup indicates success
+ // A null TypeHandle only indicates "well, it might have been there,
+ // try again with a lock". This kind of negative result will
+ // only happen while accessing the underlying EETypeHashTable
+ // during a resize, i.e. very rarely. In such a case, we just
+ // perform the lookup again, but indicate that appropriate locks
+ // should be taken.
+
+ if (th.IsNull())
+ {
+ th = LookupTypeHandleForTypeKeyInner(pKey, TRUE);
+ }
+
+ return th;
+}
+/* static */
+TypeHandle ClassLoader::LookupTypeHandleForTypeKeyInner(TypeKey *pKey, BOOL fCheckUnderLock)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ PRECONDITION(CheckPointer(pKey));
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ // Check if it's the typical instantiation. In this case it's not stored in the same
+ // way as other constructed types.
+ if (!pKey->IsConstructed() ||
+ pKey->GetKind() == ELEMENT_TYPE_CLASS && ClassLoader::IsTypicalInstantiation(pKey->GetModule(),
+ pKey->GetTypeToken(),
+ pKey->GetInstantiation()))
+ {
+ return TypeHandle(pKey->GetModule()->LookupTypeDef(pKey->GetTypeToken()));
+ }
+
+#ifdef FEATURE_PREJIT
+ // The following ways of finding a constructed type should be mutually exclusive!
+ // 1. Look for a zapped item in the PreferredZapModule
+ // 2. Look for a unzapped (JIT-loaded) item in the LoaderModule
+
+ TypeHandle thPZM = LookupInPreferredZapModule(pKey, fCheckUnderLock);
+ if (!thPZM.IsNull())
+ {
+ return thPZM;
+ }
+#endif // FEATURE_PREJIT
+
+ // Next look in the loader module. This is where the item is guaranteed to live if
+ // it is not latched from an NGEN image, i.e. if it is JIT loaded.
+ // If the thing is not NGEN'd then this may
+ // be different to pPreferredZapModule. If they are the same then
+ // we can reuse the results of the lookup above.
+ TypeHandle thLM = LookupInLoaderModule(pKey, fCheckUnderLock);
+ if (!thLM.IsNull())
+ {
+ return thLM;
+ }
+
+ return TypeHandle();
+}
+
+
+//---------------------------------------------------------------------------
+// ClassLoader::TryFindDynLinkZapType
+//
+// This is a major routine in the process of finding and using
+// zapped generic instantiations (excluding those which were zapped into
+// their PreferredZapModule).
+//
+// DynLinkZapItems are generic instantiations that may have been NGEN'd
+// into more than one NGEN image (e.g. the code and TypeHandle for
+// List<int> may in principle be zapped into several client images - it is theoretically
+// an NGEN policy decision about how often this done, though for now we
+// have hard-baked a strategy).
+//
+// There are lots of potential problems with this kind of duplication
+// and the way we get around nearly all of these is to make sure that
+// we only use one at most one "unique" copy of each item
+// at runtime. Thus we keep tables in the SharedDomain and the AppDomain indicating
+// which unique items have been chosen. If an item is "loaded" by this technique
+// then it will not be loaded by any other technique.
+//
+// Note generic instantiations may have the good fortune to be zapped
+// into the "PreferredZapModule". If so we can eager bind to them and
+// they will not be considered to be DynLinkZapItems. We always
+// look in the PreferredZapModule first, and we do not add an entry to the
+// DynLinkZapItems table for this case.
+//
+// Zap references to DynLinkZapItems are always via encoded fixups, except
+// for a few intra-module references when one DynLinkZapItem is "TightlyBound"
+// to another, e.g. an canonical DynLinkZap MethodTable may directly refer to
+// its EEClass - this is because we know that if one is used at runtime then the
+// other will also be. These items should be thought of as together constituting
+// one DynLinkedZapItem.
+//
+// This function section searches for a copy of the instantiation in various NGEN images.
+// This is effectively like doing a load since we are choosing which copy of the instantiation
+// to use from among a number of potential candidates. We have to have the loading lock
+// for this item before we can do this to make sure no other threads choose a
+// different copy of the instantiation, and that no other threads are JIT-loading
+// the instantiation.
+
+
+
+#ifndef DACCESS_COMPILE
+#ifdef FEATURE_FULL_NGEN
+/* static */
+TypeHandle ClassLoader::TryFindDynLinkZapType(TypeKey *pKey)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pKey));
+ PRECONDITION(pKey->IsConstructed());
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // For the introspection-only case, we can skip this step as introspection assemblies
+ // do not use NGEN images.
+ if (pKey->IsIntrospectionOnly())
+ return TypeHandle();
+
+ // Never use dyn link zap items during ngen time. We will independently decide later
+ // whether we want to store the item into ngen image or not.
+ // Note that it is not good idea to make decisions based on the list of depencies here
+ // since their list may not be fully populated yet.
+ if (IsCompilationProcess())
+ return TypeHandle();
+
+ TypeHandle th = TypeHandle();
+
+#ifndef CROSSGEN_COMPILE
+ // We need to know which domain the item must live in (DomainNeutral or AppDomain)
+ // Note we can't use the domain from GetLoaderModule()->GetDomain() because at NGEN
+ // time this may not be accurate (we may be deliberately duplicating a domain-neutral
+ // instantiation into a domain-specific image, in the sense that the LoaderModule
+ // returned by ComputeLoaderModule may be the current module being
+ // NGEN'd)....
+
+ BaseDomain * pRequiredDomain = BaseDomain::ComputeBaseDomain(pKey);
+
+ // Next look in each ngen'ed image in turn
+
+ // Searching the shared domain and the app domain are slightly different.
+ if (pRequiredDomain->IsSharedDomain())
+ {
+ // This switch to cooperative mode makes the iteration below thread safe. It ensures that the underlying
+ // async HashMap storage is not going to disapper while we are iterating it. Other uses of SharedAssemblyIterator
+ // have same problem, but I have fixed just this one as targeted ask mode fix.
+ GCX_COOP();
+
+ // Searching for SharedDomain instantiation involves searching all shared assemblies....
+ // Note we may choose to use an instantiation from an assembly that is from an NGEN
+ // image that is not logically speaking part of the currently running AppDomain. This
+ // tkaes advantage of the fact that at the moment SharedDomain NGEN images are never unloaded.
+ // Thus SharedDomain NGEN images effectively contribute all their instantiations to all
+ // AppDomains.
+ //
+ // <NOTE> This will have to change if we ever start unloading NGEN images from the SharedDomain </NOTE>
+ SharedDomain::SharedAssemblyIterator assem;
+ while (th.IsNull() && assem.Next())
+ {
+ ModuleIterator i = assem.GetAssembly()->IterateModules();
+
+ while (i.Next())
+ {
+ Module *pModule = i.GetModule();
+ if (!pModule->HasNativeImage())
+ continue;
+
+ // If the module hasn't reached FILE_LOADED in some domain, it cannot provide candidate instantiations
+ if (!pModule->IsReadyForTypeLoad())
+ continue;
+
+ TypeHandle thFromZapModule = pModule->GetAvailableParamTypes()->GetValue(pKey);
+
+ // Check that the item really is a zapped item, i.e. that it has not been JIT-loaded to the module
+ if (thFromZapModule.IsNull() || !thFromZapModule.IsZapped())
+ continue;
+
+ th = thFromZapModule;
+ }
+ }
+ }
+ else
+ {
+ // Searching for domain specific instantiation involves searching all
+ // domain-specific assemblies in the relevant AppDomain....
+
+ AppDomain * pDomain = pRequiredDomain->AsAppDomain();
+
+ _ASSERTE(!(pKey->IsIntrospectionOnly()));
+ AppDomain::AssemblyIterator assemblyIterator = pDomain->IterateAssembliesEx(
+ (AssemblyIterationFlags)(kIncludeLoaded | kIncludeExecution));
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+
+ while (th.IsNull() && assemblyIterator.Next(pDomainAssembly.This()))
+ {
+ CollectibleAssemblyHolder<Assembly *> pAssembly = pDomainAssembly->GetLoadedAssembly();
+ // Make sure the domain of the NGEN'd images associated with the assembly matches...
+ if (pAssembly->GetDomain() == pRequiredDomain)
+ {
+ DomainAssembly::ModuleIterator i = pDomainAssembly->IterateModules(kModIterIncludeLoaded);
+ while (th.IsNull() && i.Next())
+ {
+ Module * pModule = i.GetLoadedModule();
+ if (!pModule->HasNativeImage())
+ continue;
+
+ // If the module hasn't reached FILE_LOADED in some domain, it cannot provide candidate instantiations
+ if (!pModule->IsReadyForTypeLoad())
+ continue;
+
+ TypeHandle thFromZapModule = pModule->GetAvailableParamTypes()->GetValue(pKey);
+
+ // Check that the item really is a zapped item
+ if (thFromZapModule.IsNull() || !thFromZapModule.IsZapped())
+ continue;
+
+ th = thFromZapModule;
+ }
+ }
+ }
+ }
+#endif // CROSSGEN_COMPILE
+
+ return th;
+}
+#endif // FEATURE_FULL_NGEN
+#endif // !DACCESS_COMPILE
+
+// FindClassModuleThrowing discovers which module the type you're looking for is in and loads the Module if necessary.
+// Basically, it iterates through all of the assembly's modules until a name match is found in a module's
+// AvailableClassHashTable.
+//
+// The possible outcomes are:
+//
+// - Function returns TRUE - class exists and we successfully found/created the containing Module. See below
+// for how to deconstruct the results.
+// - Function returns FALSE - class affirmatively NOT found (that means it doesn't exist as a regular type although
+// it could also be a parameterized type)
+// - Function throws - OOM or some other reason we couldn't do the job (if it's a case-sensitive search
+// and you're looking for already loaded type or you've set the TokenNotToLoad.
+// we are guaranteed not to find a reason to throw.)
+//
+//
+// If it succeeds (returns TRUE), one of the following will occur. Check (*pType)->IsNull() to discriminate.
+//
+// 1. *pType: set to the null TypeHandle()
+// *ppModule: set to the owning Module
+// *pmdClassToken: set to the typedef
+// *pmdFoundExportedType: if this name bound to an ExportedType, this contains the mdtExportedType token (otherwise,
+// it's set to mdTokenNil.) You need this because in this case, *pmdClassToken is just
+// a best guess and you need to verify it. (The division of labor between this
+// and LoadTypeHandle could definitely be better!)
+//
+// 2. *pType: set to non-null TypeHandle()
+// This means someone else had already done this same lookup before you and caused the actual
+// TypeHandle to be cached. Since we know that's what you *really* wanted, we'll just forget the
+// Module/typedef stuff and give you the actual TypeHandle.
+//
+//
+BOOL
+ClassLoader::FindClassModuleThrowing(
+ const NameHandle * pOriginalName,
+ TypeHandle * pType,
+ mdToken * pmdClassToken,
+ Module ** ppModule,
+ mdToken * pmdFoundExportedType,
+ EEClassHashEntry_t ** ppEntry,
+ Module * pLookInThisModuleOnly,
+ Loader::LoadFlag loadFlag)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ PRECONDITION(CheckPointer(pOriginalName));
+ PRECONDITION(CheckPointer(ppModule));
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ NameHandleTable nhTable = nhCaseSensitive; // just to initialize this ...
+
+ // Make a copy of the original name which we can modify (to lowercase)
+ NameHandle localName = *pOriginalName;
+ NameHandle * pName = &localName;
+
+ switch (pName->GetTable())
+ {
+ case nhCaseInsensitive:
+ {
+#ifndef DACCESS_COMPILE
+ // GC-type users should only be loading types through tokens.
+#ifdef _DEBUG_IMPL
+ _ASSERTE(!FORBIDGC_LOADER_USE_ENABLED());
+#endif
+
+ // Use the case insensitive table
+ nhTable = nhCaseInsensitive;
+
+ // Create a low case version of the namespace and name
+ LPUTF8 pszLowerNameSpace = NULL;
+ LPUTF8 pszLowerClassName = NULL;
+ int allocLen;
+
+ if (pName->GetNameSpace())
+ {
+ allocLen = InternalCasingHelper::InvariantToLower(
+ NULL,
+ 0,
+ pName->GetNameSpace());
+ if (allocLen == 0)
+ {
+ return FALSE;
+ }
+
+ pszLowerNameSpace = (LPUTF8)_alloca(allocLen);
+ if (allocLen == 1)
+ {
+ *pszLowerNameSpace = '\0';
+ }
+ else if (!InternalCasingHelper::InvariantToLower(
+ pszLowerNameSpace,
+ allocLen,
+ pName->GetNameSpace()))
+ {
+ return FALSE;
+ }
+ }
+
+ _ASSERTE(pName->GetName() != NULL);
+ allocLen = InternalCasingHelper::InvariantToLower(NULL, 0, pName->GetName());
+ if (allocLen == 0)
+ {
+ return FALSE;
+ }
+
+ pszLowerClassName = (LPUTF8)_alloca(allocLen);
+ if (!InternalCasingHelper::InvariantToLower(
+ pszLowerClassName,
+ allocLen,
+ pName->GetName()))
+ {
+ return FALSE;
+ }
+
+ // Substitute the lower case version of the name.
+ // The field are will be released when we leave this scope
+ pName->SetName(pszLowerNameSpace, pszLowerClassName);
+ break;
+#else
+ DacNotImpl();
+ break;
+#endif // #ifndef DACCESS_COMPILE
+ }
+ case nhCaseSensitive:
+ nhTable = nhCaseSensitive;
+ break;
+ }
+
+ // Remember if there are any unhashed modules. We must do this before
+ // the actual look to avoid a race condition with other threads doing lookups.
+#ifdef LOGGING
+ BOOL incomplete = (m_cUnhashedModules > 0);
+#endif
+
+ HashDatum Data;
+ EEClassHashTable * pTable = NULL;
+ EEClassHashEntry_t * pBucket = GetClassValue(
+ nhTable,
+ pName,
+ &Data,
+ &pTable,
+ pLookInThisModuleOnly);
+
+ if (pBucket == NULL)
+ {
+ if (nhTable == nhCaseInsensitive)
+ {
+ AvailableClasses_LockHolder lh(this);
+
+ // Try again with the lock. This will protect against another thread reallocating
+ // the hash table underneath us
+ pBucket = GetClassValue(
+ nhTable,
+ pName,
+ &Data,
+ &pTable,
+ pLookInThisModuleOnly);
+
+#ifndef DACCESS_COMPILE
+ if ((pBucket == NULL) && (m_cUnhashedModules > 0))
+ {
+ LazyPopulateCaseInsensitiveHashTables();
+
+ // Try yet again with the new classes added
+ pBucket = GetClassValue(
+ nhTable,
+ pName,
+ &Data,
+ &pTable,
+ pLookInThisModuleOnly);
+ }
+#endif
+ }
+ }
+
+ if (pBucket == NULL)
+ {
+#if defined(_DEBUG_IMPL) && !defined(DACCESS_COMPILE)
+ LPCUTF8 szName = pName->GetName();
+ if (szName == NULL)
+ szName = "<UNKNOWN>";
+ LOG((LF_CLASSLOADER, LL_INFO10, "Failed to find type \"%s\", assembly \"%ws\" in hash table. Incomplete = %d\n",
+ szName, GetAssembly()->GetDebugName(), incomplete));
+#endif
+ return FALSE;
+ }
+
+ if (pName->GetTable() == nhCaseInsensitive)
+ {
+ _ASSERTE(Data);
+ pBucket = PTR_EEClassHashEntry(Data);
+ Data = pBucket->GetData();
+ }
+
+ // Lower bit is a discriminator. If the lower bit is NOT SET, it means we have
+ // a TypeHandle. Otherwise, we have a Module/CL.
+ if ((dac_cast<TADDR>(Data) & EECLASSHASH_TYPEHANDLE_DISCR) == 0)
+ {
+ TypeHandle t = TypeHandle::FromPtr(Data);
+ _ASSERTE(!t.IsNull());
+
+ *pType = t;
+ if (ppEntry != NULL)
+ {
+ *ppEntry = pBucket;
+ }
+ return TRUE;
+ }
+
+ // We have a Module/CL
+ if (!pTable->UncompressModuleAndClassDef(Data,
+ loadFlag,
+ ppModule,
+ pmdClassToken,
+ pmdFoundExportedType))
+ {
+ _ASSERTE(loadFlag != Loader::Load);
+ return FALSE;
+ }
+
+ *pType = TypeHandle();
+ if (ppEntry != NULL)
+ {
+ *ppEntry = pBucket;
+ }
+ return TRUE;
+} // ClassLoader::FindClassModuleThrowing
+
+#ifndef DACCESS_COMPILE
+// Returns true if the full name (namespace+name) of pName matches that
+// of typeHnd; otherwise false. Because this is nothrow, it will default
+// to false for all exceptions (such as OOM).
+bool CompareNameHandleWithTypeHandleNoThrow(
+ const NameHandle * pName,
+ TypeHandle typeHnd)
+{
+ bool fRet = false;
+
+ EX_TRY
+ {
+ // This block is specifically designed to handle transient faults such
+ // as OOM exceptions.
+ CONTRACT_VIOLATION(FaultViolation | ThrowsViolation);
+ StackSString ssBuiltName;
+ ns::MakePath(ssBuiltName,
+ StackSString(SString::Utf8, pName->GetNameSpace()),
+ StackSString(SString::Utf8, pName->GetName()));
+ StackSString ssName;
+ typeHnd.GetName(ssName);
+ fRet = ssName.Equals(ssBuiltName) == TRUE;
+ }
+ EX_CATCH
+ {
+ // Technically, the above operations should never result in a non-OOM
+ // exception, but we'll put the rethrow line in there just in case.
+ CONSISTENCY_CHECK(!GET_EXCEPTION()->IsTerminal());
+ RethrowTerminalExceptions;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return fRet;
+}
+#endif // #ifndef DACCESS_COMPILE
+
+// 1024 seems like a good bet at detecting a loop in the type forwarding.
+static const UINT32 const_cMaxTypeForwardingChainSize = 1024;
+
+// Does not throw an exception if the type was not found. Use LoadTypeHandleThrowIfFailed()
+// instead if you need that.
+//
+// Returns:
+// pName->m_pBucket
+// Will be set to the 'final' TypeDef bucket if pName->GetTokenType() is mdtBaseType.
+//
+TypeHandle
+ClassLoader::LoadTypeHandleThrowing(
+ NameHandle * pName,
+ ClassLoadLevel level,
+ Module * pLookInThisModuleOnly /*=NULL*/)
+{
+ CONTRACT(TypeHandle) {
+ INSTANCE_CHECK;
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ DAC_LOADS_TYPE(level, !pName->OKToLoad());
+ PRECONDITION(level > CLASS_LOAD_BEGIN && level <= CLASS_LOADED);
+ PRECONDITION(CheckPointer(pName));
+ POSTCONDITION(RETVAL.IsNull() || RETVAL.GetLoadLevel() >= level);
+ MODE_ANY;
+ SUPPORTS_DAC;
+ } CONTRACT_END
+
+ TypeHandle typeHnd;
+ INTERIOR_STACK_PROBE_NOTHROW_CHECK_THREAD(RETURN_FROM_INTERIOR_PROBE(TypeHandle()));
+
+ Module * pFoundModule = NULL;
+ mdToken FoundCl;
+ EEClassHashEntry_t * pEntry = NULL;
+ mdExportedType FoundExportedType = mdTokenNil;
+
+ UINT32 cLoopIterations = 0;
+
+ ClassLoader * pClsLdr = this;
+
+ while (true)
+ {
+ if (cLoopIterations++ >= const_cMaxTypeForwardingChainSize)
+ { // If we've looped too many times due to type forwarding, return null TypeHandle
+ // Would prefer to return a format exception, but the original behaviour
+ // was to detect a stack overflow possibility and return a null, and
+ // so we need to maintain this.
+ typeHnd = TypeHandle();
+ break;
+ }
+
+ // Look outside the lock (though we're actually still a long way from the
+ // lock at this point...). This may discover that the type is actually
+ // defined in another module...
+
+ if (!pClsLdr->FindClassModuleThrowing(
+ pName,
+ &typeHnd,
+ &FoundCl,
+ &pFoundModule,
+ &FoundExportedType,
+ &pEntry,
+ pLookInThisModuleOnly,
+ pName->OKToLoad() ? Loader::Load
+ : Loader::DontLoad))
+ { // Didn't find anything, no point looping indefinitely
+ break;
+ }
+ _ASSERTE(pEntry != NULL);
+
+ if (pName->GetTypeToken() == mdtBaseType)
+ { // We should return the found bucket in the pName
+ pName->SetBucket(pEntry);
+ }
+
+ if (!typeHnd.IsNull())
+ { // Found the cached value, or a constructedtype
+ if (typeHnd.GetLoadLevel() < level)
+ {
+ typeHnd = pClsLdr->LoadTypeDefThrowing(
+ typeHnd.GetModule(),
+ typeHnd.GetCl(),
+ ClassLoader::ReturnNullIfNotFound,
+ ClassLoader::PermitUninstDefOrRef, // When loading by name we always permit naked type defs/refs
+ pName->GetTokenNotToLoad(),
+ level);
+ }
+ break;
+ }
+
+ // Found a cl, pModule pair
+
+ // If the found module's class loader is not the same as the current class loader,
+ // then this is a forwarded type and we want to do something else (see
+ // code:#LoadTypeHandle_TypeForwarded).
+ if (pFoundModule->GetClassLoader() == pClsLdr)
+ {
+ BOOL fTrustTD = TRUE;
+#ifndef DACCESS_COMPILE
+ CONTRACT_VIOLATION(ThrowsViolation);
+ BOOL fVerifyTD = (FoundExportedType != mdTokenNil) &&
+ !pClsLdr->GetAssembly()->GetSecurityDescriptor()->IsFullyTrusted();
+
+ // If this is an exported type with a mdTokenNil class token, then then
+ // exported type did not give a typedefID hint. We won't be able to trust the typedef
+ // here.
+ if ((FoundExportedType != mdTokenNil) && (FoundCl == mdTokenNil))
+ {
+ fVerifyTD = TRUE;
+ fTrustTD = FALSE;
+ }
+ // verify that FoundCl is a valid token for pFoundModule, because
+ // it may be just the hint saved in an ExportedType in another scope
+ else if (fVerifyTD)
+ {
+ fTrustTD = pFoundModule->GetMDImport()->IsValidToken(FoundCl);
+ }
+#endif // #ifndef DACCESS_COMPILE
+
+ if (fTrustTD)
+ {
+ typeHnd = pClsLdr->LoadTypeDefThrowing(
+ pFoundModule,
+ FoundCl,
+ ClassLoader::ReturnNullIfNotFound,
+ ClassLoader::PermitUninstDefOrRef, // when loading by name we always permit naked type defs/refs
+ pName->GetTokenNotToLoad(),
+ level);
+ }
+#ifndef DACCESS_COMPILE
+ // If we used a TypeDef saved in a ExportedType, if we didn't verify
+ // the hash for this internal module, don't trust the TD value.
+ if (fVerifyTD)
+ {
+ if (typeHnd.IsNull() || !CompareNameHandleWithTypeHandleNoThrow(pName, typeHnd))
+ {
+ if (SUCCEEDED(pClsLdr->FindTypeDefByExportedType(
+ pClsLdr->GetAssembly()->GetManifestImport(),
+ FoundExportedType,
+ pFoundModule->GetMDImport(),
+ &FoundCl)))
+ {
+ typeHnd = pClsLdr->LoadTypeDefThrowing(
+ pFoundModule,
+ FoundCl,
+ ClassLoader::ReturnNullIfNotFound,
+ ClassLoader::PermitUninstDefOrRef,
+ pName->GetTokenNotToLoad(),
+ level);
+ }
+ else
+ {
+ typeHnd = TypeHandle();
+ }
+ }
+ }
+#endif // #ifndef DACCESS_COMPILE
+ break;
+ }
+ else
+ { //#LoadTypeHandle_TypeForwarded
+ // pName is a host instance so it's okay to set fields in it in a DAC build
+ EEClassHashEntry_t * pBucket = pName->GetBucket();
+
+ if (pBucket != NULL)
+ { // Reset pName's bucket entry
+
+ // We will be searching for the type name again, so set the nesting/context type to the
+ // encloser of just found type
+ pName->SetBucket(pBucket->GetEncloser());
+ }
+
+ // Update the class loader for the new module/token pair.
+ pClsLdr = pFoundModule->GetClassLoader();
+ pLookInThisModuleOnly = NULL;
+ }
+
+#ifndef DACCESS_COMPILE
+ // Replace AvailableClasses Module entry with found TypeHandle
+ if (!typeHnd.IsNull() &&
+ typeHnd.IsRestored() &&
+ (pEntry != NULL) &&
+ (pEntry->GetData() != typeHnd.AsPtr()))
+ {
+ pEntry->SetData(typeHnd.AsPtr());
+ }
+#endif // !DACCESS_COMPILE
+ }
+
+ END_INTERIOR_STACK_PROBE;
+ RETURN typeHnd;
+} // ClassLoader::LoadTypeHandleThrowing
+
+/* static */
+TypeHandle ClassLoader::LoadPointerOrByrefTypeThrowing(CorElementType typ,
+ TypeHandle baseType,
+ LoadTypesFlag fLoadTypes/*=LoadTypes*/,
+ ClassLoadLevel level/*=CLASS_LOADED*/)
+{
+ CONTRACT(TypeHandle)
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ if (FORBIDGC_LOADER_USE_ENABLED() || fLoadTypes != LoadTypes) { LOADS_TYPE(CLASS_LOAD_BEGIN); } else { LOADS_TYPE(level); }
+ MODE_ANY;
+ PRECONDITION(CheckPointer(baseType));
+ PRECONDITION(typ == ELEMENT_TYPE_BYREF || typ == ELEMENT_TYPE_PTR);
+ PRECONDITION(level > CLASS_LOAD_BEGIN && level <= CLASS_LOADED);
+ POSTCONDITION(CheckPointer(RETVAL, ((fLoadTypes == LoadTypes) ? NULL_NOT_OK : NULL_OK)));
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END
+
+ TypeKey key(typ, baseType);
+ RETURN(LoadConstructedTypeThrowing(&key, fLoadTypes, level));
+}
+
+/* static */
+TypeHandle ClassLoader::LoadNativeValueTypeThrowing(TypeHandle baseType,
+ LoadTypesFlag fLoadTypes/*=LoadTypes*/,
+ ClassLoadLevel level/*=CLASS_LOADED*/)
+{
+ CONTRACT(TypeHandle)
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ MODE_ANY;
+ PRECONDITION(CheckPointer(baseType));
+ PRECONDITION(baseType.AsMethodTable()->IsValueType());
+ PRECONDITION(level > CLASS_LOAD_BEGIN && level <= CLASS_LOADED);
+ POSTCONDITION(CheckPointer(RETVAL, ((fLoadTypes == LoadTypes) ? NULL_NOT_OK : NULL_OK)));
+ }
+ CONTRACT_END
+
+ TypeKey key(ELEMENT_TYPE_VALUETYPE, baseType);
+ RETURN(LoadConstructedTypeThrowing(&key, fLoadTypes, level));
+}
+
+/* static */
+TypeHandle ClassLoader::LoadFnptrTypeThrowing(BYTE callConv,
+ DWORD ntypars,
+ TypeHandle* inst,
+ LoadTypesFlag fLoadTypes/*=LoadTypes*/,
+ ClassLoadLevel level/*=CLASS_LOADED*/)
+{
+ CONTRACT(TypeHandle)
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ if (FORBIDGC_LOADER_USE_ENABLED() || fLoadTypes != LoadTypes) { LOADS_TYPE(CLASS_LOAD_BEGIN); } else { LOADS_TYPE(level); }
+ PRECONDITION(level > CLASS_LOAD_BEGIN && level <= CLASS_LOADED);
+ POSTCONDITION(CheckPointer(RETVAL, ((fLoadTypes == LoadTypes) ? NULL_NOT_OK : NULL_OK)));
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END
+
+ TypeKey key(callConv, ntypars, inst);
+ RETURN(LoadConstructedTypeThrowing(&key, fLoadTypes, level));
+}
+
+// Find an instantiation of a generic type if it has already been created.
+// If typeDef is not a generic type or is already instantiated then throw an exception.
+// If its arity does not match ntypars then throw an exception.
+// Value will be non-null if we're loading types.
+/* static */
+TypeHandle ClassLoader::LoadGenericInstantiationThrowing(Module *pModule,
+ mdTypeDef typeDef,
+ Instantiation inst,
+ LoadTypesFlag fLoadTypes/*=LoadTypes*/,
+ ClassLoadLevel level/*=CLASS_LOADED*/,
+ const InstantiationContext *pInstContext/*=NULL*/,
+ BOOL fFromNativeImage /*=FALSE*/)
+{
+ // This can be called in FORBIDGC_LOADER_USE mode by the debugger to find
+ // a particular generic type instance that is already loaded.
+ CONTRACT(TypeHandle)
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED() || fLoadTypes != LoadTypes) { LOADS_TYPE(CLASS_LOAD_BEGIN); } else { LOADS_TYPE(level); }
+ PRECONDITION(CheckPointer(pModule));
+ MODE_ANY;
+ PRECONDITION(level > CLASS_LOAD_BEGIN && level <= CLASS_LOADED);
+ PRECONDITION(CheckPointer(pInstContext, NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL, ((fLoadTypes == LoadTypes) ? NULL_NOT_OK : NULL_OK)));
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END
+
+ // Essentially all checks to determine if a generic instantiation of a type
+ // is well-formed go in this method, i.e. this is the
+ // "choke" point through which all attempts
+ // to create an instantiation flow. There is a similar choke point for generic
+ // methods in genmeth.cpp.
+
+ if (inst.IsEmpty() || ClassLoader::IsTypicalInstantiation(pModule, typeDef, inst))
+ {
+ TypeHandle th = LoadTypeDefThrowing(pModule, typeDef,
+ ThrowIfNotFound,
+ PermitUninstDefOrRef,
+ fLoadTypes == DontLoadTypes ? tdAllTypes : tdNoTypes,
+ level,
+ fFromNativeImage ? NULL : &inst);
+ _ASSERTE(th.GetNumGenericArgs() == inst.GetNumArgs());
+ RETURN th;
+ }
+
+ if (!fFromNativeImage)
+ {
+ TypeHandle th = ClassLoader::LoadTypeDefThrowing(pModule, typeDef,
+ ThrowIfNotFound,
+ PermitUninstDefOrRef,
+ fLoadTypes == DontLoadTypes ? tdAllTypes : tdNoTypes,
+ level,
+ fFromNativeImage ? NULL : &inst);
+ _ASSERTE(th.GetNumGenericArgs() == inst.GetNumArgs());
+ }
+
+ TypeKey key(pModule, typeDef, inst);
+
+#ifndef DACCESS_COMPILE
+ // To avoid loading useless shared instantiations, normalize shared instantiations to the canonical form
+ // (e.g. Dictionary<String,_Canon> -> Dictionary<_Canon,_Canon>)
+ // The denormalized shared instantiations should be needed only during JITing, so it is fine to skip this
+ // for DACCESS_COMPILE.
+ if (TypeHandle::IsCanonicalSubtypeInstantiation(inst) && !IsCanonicalGenericInstantiation(inst))
+ {
+ RETURN(ClassLoader::LoadCanonicalGenericInstantiation(&key, fLoadTypes, level));
+ }
+#endif
+
+ RETURN(LoadConstructedTypeThrowing(&key, fLoadTypes, level, pInstContext));
+}
+
+// For non-nested classes, gets the ExportedType name and finds the corresponding
+// TypeDef.
+// For nested classes, gets the name of the ExportedType and its encloser.
+// Recursively gets and keeps the name for each encloser until we have the top
+// level one. Gets the TypeDef token for that. Then, returns from the
+// recursion, using the last found TypeDef token in order to find the
+// next nested level down TypeDef token. Finally, returns the TypeDef
+// token for the type we care about.
+/*static*/
+HRESULT ClassLoader::FindTypeDefByExportedType(IMDInternalImport *pCTImport, mdExportedType mdCurrent,
+ IMDInternalImport *pTDImport, mdTypeDef *mtd)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ mdToken mdImpl;
+ LPCSTR szcNameSpace;
+ LPCSTR szcName;
+ HRESULT hr;
+
+ IfFailRet(pCTImport->GetExportedTypeProps(
+ mdCurrent,
+ &szcNameSpace,
+ &szcName,
+ &mdImpl,
+ NULL, //binding
+ NULL)); //flags
+
+ if ((TypeFromToken(mdImpl) == mdtExportedType) &&
+ (mdImpl != mdExportedTypeNil)) {
+ // mdCurrent is a nested ExportedType
+ IfFailRet(FindTypeDefByExportedType(pCTImport, mdImpl, pTDImport, mtd));
+
+ // Get TypeDef token for this nested type
+ return pTDImport->FindTypeDef(szcNameSpace, szcName, *mtd, mtd);
+ }
+
+ // Get TypeDef token for this top-level type
+ return pTDImport->FindTypeDef(szcNameSpace, szcName, mdTokenNil, mtd);
+}
+
+#ifndef DACCESS_COMPILE
+
+VOID ClassLoader::CreateCanonicallyCasedKey(LPCUTF8 pszNameSpace, LPCUTF8 pszName, __out LPUTF8 *ppszOutNameSpace, __out LPUTF8 *ppszOutName)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM(););
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ // We can use the NoThrow versions here because we only call this routine if we're maintaining
+ // a case-insensitive hash table, and the creation of that table initialized the
+ // CasingHelper system.
+ INT32 iNSLength = InternalCasingHelper::InvariantToLowerNoThrow(NULL, 0, pszNameSpace);
+ if (!iNSLength)
+ {
+ COMPlusThrowOM();
+ }
+
+ INT32 iNameLength = InternalCasingHelper::InvariantToLowerNoThrow(NULL, 0, pszName);
+ if (!iNameLength)
+ {
+ COMPlusThrowOM();
+ }
+
+ {
+ //Calc & allocate path length
+ //Includes terminating null
+ S_SIZE_T allocSize = S_SIZE_T(iNSLength) + S_SIZE_T(iNameLength);
+ if (allocSize.IsOverflow())
+ {
+ ThrowHR(COR_E_OVERFLOW);
+ }
+
+ AllocMemHolder<char> pszOutNameSpace (GetAssembly()->GetHighFrequencyHeap()->AllocMem(allocSize));
+ *ppszOutNameSpace = pszOutNameSpace;
+
+ if (iNSLength == 1)
+ {
+ **ppszOutNameSpace = '\0';
+ }
+ else
+ {
+ if (!InternalCasingHelper::InvariantToLowerNoThrow(*ppszOutNameSpace, iNSLength, pszNameSpace))
+ {
+ COMPlusThrowOM();
+ }
+ }
+
+ *ppszOutName = *ppszOutNameSpace + iNSLength;
+
+ if (!InternalCasingHelper::InvariantToLowerNoThrow(*ppszOutName, iNameLength, pszName))
+ {
+ COMPlusThrowOM();
+ }
+
+ pszOutNameSpace.SuppressRelease();
+ }
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+
+//
+// Return a class that is already loaded
+// Only for type refs and type defs (not type specs)
+//
+/*static*/
+TypeHandle ClassLoader::LookupTypeDefOrRefInModule(Module *pModule, mdToken cl, ClassLoadLevel *pLoadLevel)
+{
+ CONTRACT(TypeHandle)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pModule));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END
+
+ BAD_FORMAT_NOTHROW_ASSERT((TypeFromToken(cl) == mdtTypeRef ||
+ TypeFromToken(cl) == mdtTypeDef ||
+ TypeFromToken(cl) == mdtTypeSpec));
+
+ TypeHandle typeHandle;
+
+ if (TypeFromToken(cl) == mdtTypeDef)
+ typeHandle = pModule->LookupTypeDef(cl, pLoadLevel);
+ else if (TypeFromToken(cl) == mdtTypeRef)
+ {
+ typeHandle = pModule->LookupTypeRef(cl);
+
+ if (pLoadLevel && !typeHandle.IsNull())
+ {
+ *pLoadLevel = typeHandle.GetLoadLevel();
+ }
+ }
+
+ RETURN(typeHandle);
+}
+
+DomainAssembly *ClassLoader::GetDomainAssembly(AppDomain *pDomain/*=NULL*/)
+{
+ WRAPPER_NO_CONTRACT;
+ return GetAssembly()->GetDomainAssembly(pDomain);
+}
+
+#ifndef DACCESS_COMPILE
+
+//
+// Free all modules associated with this loader
+//
+void ClassLoader::FreeModules()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ DISABLED(FORBID_FAULT); //Lots of crud to clean up to make this work
+ }
+ CONTRACTL_END;
+
+ Module *pManifest = NULL;
+ if (GetAssembly() && (NULL != (pManifest = GetAssembly()->GetManifestModule()))) {
+ // Unload the manifest last, since it contains the module list in its rid map
+ ModuleIterator i = GetAssembly()->IterateModules();
+ while (i.Next()) {
+ // Have the module free its various tables and some of the EEClass links
+ if (i.GetModule() != pManifest)
+ i.GetModule()->Destruct();
+ }
+
+ // Now do the manifest module.
+ pManifest->Destruct();
+ }
+
+}
+
+ClassLoader::~ClassLoader()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ DESTRUCTOR_CHECK;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ DISABLED(FORBID_FAULT); //Lots of crud to clean up to make this work
+ }
+ CONTRACTL_END
+
+#ifdef _DEBUG
+ // Do not walk m_pUnresolvedClassHash at destruct time as it is loaderheap allocated memory
+ // and may already have been deallocated via an AllocMemTracker.
+ m_pUnresolvedClassHash = (PendingTypeLoadTable*)(UINT_PTR)0xcccccccc;
+#endif
+
+#ifdef _DEBUG
+// LOG((
+// LF_CLASSLOADER,
+// INFO3,
+// "Deleting classloader %x\n"
+// " >EEClass data: %10d bytes\n"
+// " >Classname hash: %10d bytes\n"
+// " >FieldDesc data: %10d bytes\n"
+// " >MethodDesc data: %10d bytes\n"
+// " >GCInfo: %10d bytes\n"
+// " >Interface maps: %10d bytes\n"
+// " >MethodTables: %10d bytes\n"
+// " >Vtables: %10d bytes\n"
+// " >Static fields: %10d bytes\n"
+// "# methods: %10d\n"
+// "# field descs: %10d\n"
+// "# classes: %10d\n"
+// "# dup intf slots: %10d\n"
+// "# array classrefs: %10d\n"
+// "Array class overhead:%10d bytes\n",
+// this,
+// m_dwEEClassData,
+// m_pAvailableClasses->m_dwDebugMemory,
+// m_dwFieldDescData,
+// m_dwMethodDescData,
+// m_dwGCSize,
+// m_dwInterfaceMapSize,
+// m_dwMethodTableSize,
+// m_dwVtableData,
+// m_dwStaticFieldData,
+// m_dwDebugMethods,
+// m_dwDebugFieldDescs,
+// m_dwDebugClasses,
+// m_dwDebugDuplicateInterfaceSlots,
+// ));
+#endif
+
+ FreeModules();
+
+ m_UnresolvedClassLock.Destroy();
+ m_AvailableClassLock.Destroy();
+ m_AvailableTypesLock.Destroy();
+}
+
+
+//----------------------------------------------------------------------------
+// The constructor should only initialize enough to ensure that the destructor doesn't
+// crash. It cannot allocate or do anything that might fail as that would leave
+// the ClassLoader undestructable. Any such tasks should be done in ClassLoader::Init().
+//----------------------------------------------------------------------------
+ClassLoader::ClassLoader(Assembly *pAssembly)
+{
+ CONTRACTL
+ {
+ CONSTRUCTOR_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ m_pAssembly = pAssembly;
+
+ m_pUnresolvedClassHash = NULL;
+ m_cUnhashedModules = 0;
+
+#ifdef _DEBUG
+ m_dwDebugMethods = 0;
+ m_dwDebugFieldDescs = 0;
+ m_dwDebugClasses = 0;
+ m_dwDebugDuplicateInterfaceSlots = 0;
+ m_dwGCSize = 0;
+ m_dwInterfaceMapSize = 0;
+ m_dwMethodTableSize = 0;
+ m_dwVtableData = 0;
+ m_dwStaticFieldData = 0;
+ m_dwFieldDescData = 0;
+ m_dwMethodDescData = 0;
+ m_dwEEClassData = 0;
+#endif
+}
+
+
+//----------------------------------------------------------------------------
+// This function completes the initialization of the ClassLoader. It can
+// assume the constructor is run and that the function is entered with
+// ClassLoader in a safely destructable state. This function can throw
+// but whether it throws or succeeds, it must leave the ClassLoader in a safely
+// destructable state.
+//----------------------------------------------------------------------------
+VOID ClassLoader::Init(AllocMemTracker *pamTracker)
+{
+ STANDARD_VM_CONTRACT;
+
+ m_pUnresolvedClassHash = PendingTypeLoadTable::Create(GetAssembly()->GetLowFrequencyHeap(),
+ UNRESOLVED_CLASS_HASH_BUCKETS,
+ pamTracker);
+
+ m_UnresolvedClassLock.Init(CrstUnresolvedClassLock);
+
+ // This lock is taken within the classloader whenever we have to enter a
+ // type in one of the modules governed by the loader.
+ // The process of creating these types may be reentrant. The ordering has
+ // not yet been sorted out, and when we sort it out we should also modify the
+ // ordering for m_AvailableTypesLock in BaseDomain.
+ m_AvailableClassLock.Init(
+ CrstAvailableClass,
+ CRST_REENTRANCY);
+
+ // This lock is taken within the classloader whenever we have to insert a new param. type into the table
+ // This lock also needs to be taken for a read operation in a GC_NOTRIGGER scope, thus the ANYMODE flag.
+ m_AvailableTypesLock.Init(
+ CrstAvailableParamTypes,
+ (CrstFlags)(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD));
+
+#ifdef _DEBUG
+ CorTypeInfo::CheckConsistency();
+#endif
+
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+/*static*/
+TypeHandle ClassLoader::LoadTypeDefOrRefOrSpecThrowing(Module *pModule,
+ mdToken typeDefOrRefOrSpec,
+ const SigTypeContext *pTypeContext,
+ NotFoundAction fNotFoundAction /* = ThrowIfNotFound */ ,
+ PermitUninstantiatedFlag fUninstantiated /* = FailIfUninstDefOrRef */,
+ LoadTypesFlag fLoadTypes/*=LoadTypes*/ ,
+ ClassLoadLevel level /* = CLASS_LOADED */,
+ BOOL dropGenericArgumentLevel /* = FALSE */,
+ const Substitution *pSubst)
+{
+ CONTRACT(TypeHandle)
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ MODE_ANY;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ if (FORBIDGC_LOADER_USE_ENABLED() || fLoadTypes != LoadTypes) { LOADS_TYPE(CLASS_LOAD_BEGIN); } else { LOADS_TYPE(level); }
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(level > CLASS_LOAD_BEGIN && level <= CLASS_LOADED);
+ PRECONDITION(FORBIDGC_LOADER_USE_ENABLED() || GetAppDomain()->CheckCanLoadTypes(pModule->GetAssembly()));
+ POSTCONDITION(CheckPointer(RETVAL, (fNotFoundAction == ThrowIfNotFound)? NULL_NOT_OK : NULL_OK));
+ }
+ CONTRACT_END
+
+ if (TypeFromToken(typeDefOrRefOrSpec) == mdtTypeSpec)
+ {
+ ULONG cSig;
+ PCCOR_SIGNATURE pSig;
+
+ IMDInternalImport *pInternalImport = pModule->GetMDImport();
+ if (FAILED(pInternalImport->GetTypeSpecFromToken(typeDefOrRefOrSpec, &pSig, &cSig)))
+ {
+#ifndef DACCESS_COMPILE
+ if (fNotFoundAction == ThrowIfNotFound)
+ {
+ pModule->GetAssembly()->ThrowTypeLoadException(pInternalImport, typeDefOrRefOrSpec, IDS_CLASSLOAD_BADFORMAT);
+ }
+#endif //!DACCESS_COMPILE
+ RETURN (TypeHandle());
+ }
+ SigPointer sigptr(pSig, cSig);
+ TypeHandle typeHnd = sigptr.GetTypeHandleThrowing(pModule, pTypeContext, fLoadTypes,
+ level, dropGenericArgumentLevel, pSubst);
+#ifndef DACCESS_COMPILE
+ if ((fNotFoundAction == ThrowIfNotFound) && typeHnd.IsNull())
+ pModule->GetAssembly()->ThrowTypeLoadException(pInternalImport, typeDefOrRefOrSpec,
+ IDS_CLASSLOAD_GENERAL);
+#endif
+ RETURN (typeHnd);
+ }
+ else
+ {
+ RETURN (LoadTypeDefOrRefThrowing(pModule, typeDefOrRefOrSpec,
+ fNotFoundAction,
+ fUninstantiated,
+ ((fLoadTypes == LoadTypes) ? tdNoTypes : tdAllTypes),
+ level));
+ }
+} // ClassLoader::LoadTypeDefOrRefOrSpecThrowing
+
+// Given a token specifying a typeDef, and a module in which to
+// interpret that token, find or load the corresponding type handle.
+//
+//
+/*static*/
+TypeHandle ClassLoader::LoadTypeDefThrowing(Module *pModule,
+ mdToken typeDef,
+ NotFoundAction fNotFoundAction /* = ThrowIfNotFound */ ,
+ PermitUninstantiatedFlag fUninstantiated /* = FailIfUninstDefOrRef */,
+ mdToken tokenNotToLoad,
+ ClassLoadLevel level,
+ Instantiation * pTargetInstantiation)
+{
+
+ CONTRACT(TypeHandle)
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ MODE_ANY;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ DAC_LOADS_TYPE(level, !NameHandle::OKToLoad(typeDef, tokenNotToLoad));
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(level > CLASS_LOAD_BEGIN && level <= CLASS_LOADED);
+ PRECONDITION(FORBIDGC_LOADER_USE_ENABLED()
+ || GetAppDomain()->CheckCanLoadTypes(pModule->GetAssembly()));
+
+ POSTCONDITION(CheckPointer(RETVAL, NameHandle::OKToLoad(typeDef, tokenNotToLoad) && (fNotFoundAction == ThrowIfNotFound) ? NULL_NOT_OK : NULL_OK));
+ POSTCONDITION(RETVAL.IsNull() || RETVAL.GetCl() == typeDef);
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ TypeHandle typeHnd;
+
+ // First, attempt to find the class if it is already loaded
+ ClassLoadLevel existingLoadLevel = CLASS_LOAD_BEGIN;
+ typeHnd = pModule->LookupTypeDef(typeDef, &existingLoadLevel);
+ if (!typeHnd.IsNull())
+ {
+#ifndef DACCESS_COMPILE
+ // If the type is loaded, we can do cheap arity verification
+ if (pTargetInstantiation != NULL && pTargetInstantiation->GetNumArgs() != typeHnd.AsMethodTable()->GetNumGenericArgs())
+ pModule->GetAssembly()->ThrowTypeLoadException(pModule->GetMDImport(), typeDef, IDS_CLASSLOAD_TYPEWRONGNUMGENERICARGS);
+#endif
+
+ if (existingLoadLevel >= level)
+ RETURN(typeHnd);
+ }
+
+ // We don't want to probe on any threads except for those with a managed thread. This function
+ // can be called from the GC thread etc. so need to control how we probe.
+ INTERIOR_STACK_PROBE_NOTHROW_CHECK_THREAD(goto Exit;);
+
+ IMDInternalImport *pInternalImport = pModule->GetMDImport();
+
+#ifndef DACCESS_COMPILE
+ if (typeHnd.IsNull() && pTargetInstantiation != NULL)
+ {
+ // If the type is not loaded yet, we have to do heavy weight arity verification based on metadata
+ HENUMInternal hEnumGenericPars;
+ HRESULT hr = pInternalImport->EnumInit(mdtGenericParam, typeDef, &hEnumGenericPars);
+ if (FAILED(hr))
+ pModule->GetAssembly()->ThrowTypeLoadException(pInternalImport, typeDef, IDS_CLASSLOAD_BADFORMAT);
+ DWORD nGenericClassParams = pInternalImport->EnumGetCount(&hEnumGenericPars);
+ pInternalImport->EnumClose(&hEnumGenericPars);
+
+ if (pTargetInstantiation->GetNumArgs() != nGenericClassParams)
+ pModule->GetAssembly()->ThrowTypeLoadException(pInternalImport, typeDef, IDS_CLASSLOAD_TYPEWRONGNUMGENERICARGS);
+ }
+#endif
+
+ if (IsNilToken(typeDef) || TypeFromToken(typeDef) != mdtTypeDef || !pInternalImport->IsValidToken(typeDef) )
+ {
+ LOG((LF_CLASSLOADER, LL_INFO10, "Bogus class token to load: 0x%08x\n", typeDef));
+ typeHnd = TypeHandle();
+ }
+ else
+ {
+ // *****************************************************************************
+ //
+ // Important invariant:
+ //
+ // The rule here is that we never go to LoadTypeHandleForTypeKey if a Find should succeed.
+ // This is vital, because otherwise a stack crawl will open up opportunities for
+ // GC. Since operations like setting up a GCFrame will trigger a crawl in stress
+ // mode, a GC at that point would be disastrous. We can't assert this, because
+ // of race conditions. (In other words, the type could suddently be find-able
+ // because another thread loaded it while we were in this method.
+
+ // Not found - try to load it unless we are told not to
+
+#ifndef DACCESS_COMPILE
+ if ( !NameHandle::OKToLoad(typeDef, tokenNotToLoad) )
+ {
+ typeHnd = TypeHandle();
+ }
+ else
+ {
+ // Anybody who puts himself in a FORBIDGC_LOADER state has promised
+ // to use us only for resolving, not loading. We are now transitioning into
+ // loading.
+#ifdef _DEBUG_IMPL
+ _ASSERTE(!FORBIDGC_LOADER_USE_ENABLED());
+#endif
+ TRIGGERSGC();
+
+ if (pModule->IsReflection())
+ {
+ //if (!(pModule->IsIntrospectionOnly()))
+ {
+ // Don't try to load types that are not in available table, when this
+ // is an in-memory module. Raise the type-resolve event instead.
+ typeHnd = TypeHandle();
+
+ // Avoid infinite recursion
+ if (tokenNotToLoad != tdAllAssemblies)
+ {
+ AppDomain* pDomain = SystemDomain::GetCurrentDomain();
+
+ LPUTF8 pszFullName;
+ LPCUTF8 className;
+ LPCUTF8 nameSpace;
+ if (FAILED(pInternalImport->GetNameOfTypeDef(typeDef, &className, &nameSpace)))
+ {
+ LOG((LF_CLASSLOADER, LL_INFO10, "Bogus TypeDef record while loading: 0x%08x\n", typeDef));
+ typeHnd = TypeHandle();
+ }
+ else
+ {
+ MAKE_FULL_PATH_ON_STACK_UTF8(pszFullName,
+ nameSpace,
+ className);
+ GCX_COOP();
+ ASSEMBLYREF asmRef = NULL;
+ DomainAssembly *pDomainAssembly = NULL;
+ GCPROTECT_BEGIN(asmRef);
+
+ pDomainAssembly = pDomain->RaiseTypeResolveEventThrowing(
+ pModule->GetAssembly()->GetDomainAssembly(),
+ pszFullName, &asmRef);
+
+ if (asmRef != NULL)
+ {
+ _ASSERTE(pDomainAssembly != NULL);
+ if (pDomainAssembly->GetAssembly()->GetLoaderAllocator()->IsCollectible())
+ {
+ if (!pModule->GetLoaderAllocator()->IsCollectible())
+ {
+ LOG((LF_CLASSLOADER, LL_INFO10, "Bad result from TypeResolveEvent while loader TypeDef record: 0x%08x\n", typeDef));
+ COMPlusThrow(kNotSupportedException, W("NotSupported_CollectibleBoundNonCollectible"));
+ }
+
+ pModule->GetLoaderAllocator()->EnsureReference(pDomainAssembly->GetAssembly()->GetLoaderAllocator());
+ }
+ }
+ GCPROTECT_END();
+ if (pDomainAssembly != NULL)
+ {
+ Assembly *pAssembly = pDomainAssembly->GetAssembly();
+
+ NameHandle name(nameSpace, className);
+ name.SetTypeToken(pModule, typeDef);
+ name.SetTokenNotToLoad(tdAllAssemblies);
+ typeHnd = pAssembly->GetLoader()->LoadTypeHandleThrowing(&name, level);
+ }
+ }
+ }
+ }
+ }
+ else
+ {
+ TypeKey typeKey(pModule, typeDef);
+ typeHnd = pModule->GetClassLoader()->LoadTypeHandleForTypeKey(&typeKey,
+ typeHnd,
+ level);
+ }
+ }
+#endif // !DACCESS_COMPILE
+ }
+
+// If stack guards are disabled, then this label is unreferenced and produces a compile error.
+#if defined(FEATURE_STACK_PROBE) && !defined(DACCESS_COMPILE)
+Exit:
+#endif
+
+#ifndef DACCESS_COMPILE
+ if ((fUninstantiated == FailIfUninstDefOrRef) && !typeHnd.IsNull() && typeHnd.IsGenericTypeDefinition())
+ {
+ typeHnd = TypeHandle();
+ }
+
+ if ((fNotFoundAction == ThrowIfNotFound) && typeHnd.IsNull() && (tokenNotToLoad != tdAllTypes))
+ {
+ pModule->GetAssembly()->ThrowTypeLoadException(pModule->GetMDImport(),
+ typeDef,
+ IDS_CLASSLOAD_GENERAL);
+ }
+#endif
+ ;
+ END_INTERIOR_STACK_PROBE;
+
+ RETURN(typeHnd);
+}
+
+// Given a token specifying a typeDef or typeRef, and a module in
+// which to interpret that token, find or load the corresponding type
+// handle.
+//
+/*static*/
+TypeHandle ClassLoader::LoadTypeDefOrRefThrowing(Module *pModule,
+ mdToken typeDefOrRef,
+ NotFoundAction fNotFoundAction /* = ThrowIfNotFound */ ,
+ PermitUninstantiatedFlag fUninstantiated /* = FailIfUninstDefOrRef */,
+ mdToken tokenNotToLoad,
+ ClassLoadLevel level)
+{
+
+ CONTRACT(TypeHandle)
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ MODE_ANY;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(level > CLASS_LOAD_BEGIN && level <= CLASS_LOADED);
+ PRECONDITION(FORBIDGC_LOADER_USE_ENABLED()
+ || GetAppDomain()->CheckCanLoadTypes(pModule->GetAssembly()));
+
+ POSTCONDITION(CheckPointer(RETVAL, NameHandle::OKToLoad(typeDefOrRef, tokenNotToLoad) && (fNotFoundAction == ThrowIfNotFound) ? NULL_NOT_OK : NULL_OK));
+ POSTCONDITION(level <= CLASS_LOAD_UNRESTORED || RETVAL.IsNull() || RETVAL.IsRestored());
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ // NotFoundAction could be the bizarre 'ThrowButNullV11McppWorkaround',
+ // which means ThrowIfNotFound EXCEPT if this might be the Everett MCPP
+ // Nil-token ResolutionScope for value type. In that case, it means
+ // ReturnNullIfNotFound.
+ // If we have ThrowButNullV11McppWorkaround, remember that NULL *might*
+ // be OK if there is no resolution scope, but change the value to
+ // ThrowIfNotFound.
+ BOOLEAN bReturnNullOkWhenNoResolutionScope = false;
+ if (fNotFoundAction == ThrowButNullV11McppWorkaround)
+ {
+ bReturnNullOkWhenNoResolutionScope = true;
+ fNotFoundAction = ThrowIfNotFound;
+ }
+
+ // First, attempt to find the class if it is already loaded
+ ClassLoadLevel existingLoadLevel = CLASS_LOAD_BEGIN;
+ TypeHandle typeHnd = LookupTypeDefOrRefInModule(pModule, typeDefOrRef, &existingLoadLevel);
+ if (!typeHnd.IsNull())
+ {
+ if (existingLoadLevel < level)
+ {
+ pModule = typeHnd.GetModule();
+ typeDefOrRef = typeHnd.GetCl();
+ }
+ }
+
+ if (!typeHnd.IsNull() && existingLoadLevel >= level)
+ {
+ // perform the check that it's not an uninstantiated TypeDef/TypeRef
+ // being used inappropriately.
+ if (!((fUninstantiated == FailIfUninstDefOrRef) && !typeHnd.IsNull() && typeHnd.IsGenericTypeDefinition()))
+ {
+ RETURN(typeHnd);
+ }
+ }
+ else
+ {
+ // otherwise try to resolve the TypeRef and/or load the corresponding TypeDef
+ IMDInternalImport *pInternalImport = pModule->GetMDImport();
+ mdToken tokType = TypeFromToken(typeDefOrRef);
+
+ if (IsNilToken(typeDefOrRef) || ((tokType != mdtTypeDef)&&(tokType != mdtTypeRef))
+ || !pInternalImport->IsValidToken(typeDefOrRef) )
+ {
+#ifdef _DEBUG
+ LOG((LF_CLASSLOADER, LL_INFO10, "Bogus class token to load: 0x%08x\n", typeDefOrRef));
+#endif
+
+ typeHnd = TypeHandle();
+ }
+
+ else if (tokType == mdtTypeRef)
+ {
+ BOOL fNoResolutionScope;
+ Module *pFoundModule = Assembly::FindModuleByTypeRef(pModule, typeDefOrRef,
+ tokenNotToLoad==tdAllTypes ?
+ Loader::DontLoad :
+ Loader::Load,
+ &fNoResolutionScope);
+
+ if (pFoundModule != NULL)
+ {
+
+ // Not in my module, have to look it up by name. This is the primary path
+ // taken by the TypeRef case, i.e. we've resolve a TypeRef to a TypeDef/Module
+ // pair.
+ LPCUTF8 pszNameSpace;
+ LPCUTF8 pszClassName;
+ if (FAILED(pInternalImport->GetNameOfTypeRef(
+ typeDefOrRef,
+ &pszNameSpace,
+ &pszClassName)))
+ {
+ typeHnd = TypeHandle();
+ }
+ else
+ {
+ if (fNoResolutionScope)
+ {
+ // Everett C++ compiler can generate a TypeRef with RS=0
+ // without respective TypeDef for unmanaged valuetypes,
+ // referenced only by pointers to them,
+ // so we can fail to load legally w/ no exception
+ typeHnd = ClassLoader::LoadTypeByNameThrowing(pFoundModule->GetAssembly(),
+ pszNameSpace,
+ pszClassName,
+ ClassLoader::ReturnNullIfNotFound,
+ tokenNotToLoad==tdAllTypes ? ClassLoader::DontLoadTypes : ClassLoader::LoadTypes,
+ level);
+
+ if(typeHnd.IsNull() && bReturnNullOkWhenNoResolutionScope)
+ {
+ fNotFoundAction = ReturnNullIfNotFound;
+ RETURN(typeHnd);
+ }
+ }
+ else
+ {
+ NameHandle nameHandle(pModule, typeDefOrRef);
+ nameHandle.SetName(pszNameSpace, pszClassName);
+ nameHandle.SetTokenNotToLoad(tokenNotToLoad);
+ typeHnd = pFoundModule->GetClassLoader()->
+ LoadTypeHandleThrowIfFailed(&nameHandle, level,
+ pFoundModule->IsReflection() ? NULL : pFoundModule);
+ }
+ }
+
+#ifndef DACCESS_COMPILE
+ if (!(typeHnd.IsNull()))
+ pModule->StoreTypeRef(typeDefOrRef, typeHnd);
+#endif
+ }
+ }
+ else
+ {
+ // This is the mdtTypeDef case...
+ typeHnd = LoadTypeDefThrowing(pModule, typeDefOrRef,
+ fNotFoundAction,
+ fUninstantiated,
+ tokenNotToLoad,
+ level);
+ }
+ }
+ TypeHandle thRes = typeHnd;
+
+ // reject the load if it's an uninstantiated TypeDef/TypeRef
+ // being used inappropriately.
+ if ((fUninstantiated == FailIfUninstDefOrRef) && !typeHnd.IsNull() && typeHnd.IsGenericTypeDefinition())
+ thRes = TypeHandle();
+
+ // perform the check to throw when the thing is not found
+ if ((fNotFoundAction == ThrowIfNotFound) && thRes.IsNull() && (tokenNotToLoad != tdAllTypes))
+ {
+#ifndef DACCESS_COMPILE
+ pModule->GetAssembly()->ThrowTypeLoadException(pModule->GetMDImport(),
+ typeDefOrRef,
+ IDS_CLASSLOAD_GENERAL);
+#else
+ DacNotImpl();
+#endif
+ }
+
+ RETURN(thRes);
+}
+
+/*static*/
+BOOL
+ClassLoader::ResolveTokenToTypeDefThrowing(
+ Module * pTypeRefModule,
+ mdTypeRef typeRefToken,
+ Module ** ppTypeDefModule,
+ mdTypeDef * pTypeDefToken,
+ Loader::LoadFlag loadFlag,
+ BOOL * pfUsesTypeForwarder) // The semantic of this parameter: TRUE if a type forwarder is found. It is never set to FALSE.
+{
+ CONTRACT(BOOL)
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ MODE_ANY;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ PRECONDITION(CheckPointer(pTypeRefModule));
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ // It's a TypeDef already
+ if (TypeFromToken(typeRefToken) == mdtTypeDef)
+ {
+ if (ppTypeDefModule != NULL)
+ *ppTypeDefModule = pTypeRefModule;
+ if (pTypeDefToken != NULL)
+ *pTypeDefToken = typeRefToken;
+ RETURN TRUE;
+ }
+
+ TypeHandle typeHnd = pTypeRefModule->LookupTypeRef(typeRefToken);
+
+ // Type is already (partially) loaded and cached in the module's TypeRef table
+ // Do not return here if we are checking for type forwarders
+ if (!typeHnd.IsNull() && (pfUsesTypeForwarder == NULL))
+ {
+ if (ppTypeDefModule != NULL)
+ *ppTypeDefModule = typeHnd.GetModule();
+ if (pTypeDefToken != NULL)
+ *pTypeDefToken = typeHnd.GetCl();
+ RETURN TRUE;
+ }
+
+ BOOL fNoResolutionScope; //not used
+ Module * pFoundRefModule = Assembly::FindModuleByTypeRef(
+ pTypeRefModule,
+ typeRefToken,
+ loadFlag,
+ &fNoResolutionScope);
+
+ if (pFoundRefModule == NULL)
+ { // We didn't find the TypeRef anywhere
+ RETURN FALSE;
+ }
+
+ // If checking for type forwarders, then we can see if a type forwarder was used based on the output of
+ // pFoundRefModule and typeHnd (if typeHnd is set)
+ if (!typeHnd.IsNull() && (pfUsesTypeForwarder != NULL))
+ {
+ if (typeHnd.GetModule() != pFoundRefModule)
+ {
+ *pfUsesTypeForwarder = TRUE;
+ }
+
+ if (ppTypeDefModule != NULL)
+ *ppTypeDefModule = typeHnd.GetModule();
+ if (pTypeDefToken != NULL)
+ *pTypeDefToken = typeHnd.GetCl();
+ RETURN TRUE;
+ }
+
+ // Not in my module, have to look it up by name
+ LPCUTF8 pszNameSpace;
+ LPCUTF8 pszClassName;
+ if (FAILED(pTypeRefModule->GetMDImport()->GetNameOfTypeRef(typeRefToken, &pszNameSpace, &pszClassName)))
+ {
+ RETURN FALSE;
+ }
+ NameHandle nameHandle(pTypeRefModule, typeRefToken);
+ nameHandle.SetName(pszNameSpace, pszClassName);
+ if (loadFlag != Loader::Load)
+ {
+ nameHandle.SetTokenNotToLoad(tdAllTypes);
+ }
+
+ mdToken foundTypeDef;
+ Module * pFoundModule;
+ mdExportedType foundExportedType;
+ Module * pSourceModule = pFoundRefModule;
+
+ for (UINT32 nTypeForwardingChainSize = 0; nTypeForwardingChainSize < const_cMaxTypeForwardingChainSize; nTypeForwardingChainSize++)
+ {
+ foundTypeDef = mdTokenNil;
+ pFoundModule = NULL;
+ foundExportedType = mdTokenNil;
+ if (!pSourceModule->GetClassLoader()->FindClassModuleThrowing(
+ &nameHandle,
+ &typeHnd,
+ &foundTypeDef,
+ &pFoundModule,
+ &foundExportedType,
+ NULL,
+ pSourceModule->IsReflection() ? NULL : pSourceModule,
+ loadFlag))
+ {
+ RETURN FALSE;
+ }
+
+ // Type is already loaded and cached in the loader's by-name table
+ if (!typeHnd.IsNull())
+ {
+ if ((typeHnd.GetModule() != pFoundRefModule) && (pfUsesTypeForwarder != NULL))
+ { // We followed at least one type forwarder to resolve the type
+ *pfUsesTypeForwarder = TRUE;
+ }
+ if (ppTypeDefModule != NULL)
+ *ppTypeDefModule = typeHnd.GetModule();
+ if (pTypeDefToken != NULL)
+ *pTypeDefToken = typeHnd.GetCl();
+ RETURN TRUE;
+ }
+
+ if (pFoundModule == NULL)
+ { // Module was probably not loaded
+ RETURN FALSE;
+ }
+
+ if (TypeFromToken(foundExportedType) != mdtExportedType)
+ { // It's not exported type
+ _ASSERTE(foundExportedType == mdTokenNil);
+
+ if ((pFoundModule != pFoundRefModule) && (pfUsesTypeForwarder != NULL))
+ { // We followed at least one type forwarder to resolve the type
+ *pfUsesTypeForwarder = TRUE;
+ }
+ if (pTypeDefToken != NULL)
+ *pTypeDefToken = foundTypeDef;
+ if (ppTypeDefModule != NULL)
+ *ppTypeDefModule = pFoundModule;
+ RETURN TRUE;
+ }
+ // It's exported type
+
+ // Repeat the search for the type in the newly found module
+ pSourceModule = pFoundModule;
+ }
+ // Type forwarding chain is too long
+ RETURN FALSE;
+} // ClassLoader::ResolveTokenToTypeDefThrowing
+
+#ifndef DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+//
+//static
+VOID
+ClassLoader::GetEnclosingClassThrowing(
+ IMDInternalImport * pInternalImport,
+ Module * pModule,
+ mdTypeDef cl,
+ mdTypeDef * tdEnclosing)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(tdEnclosing);
+ *tdEnclosing = mdTypeDefNil;
+
+ HRESULT hr = pInternalImport->GetNestedClassProps(cl, tdEnclosing);
+
+ if (FAILED(hr))
+ {
+ if (hr != CLDB_E_RECORD_NOTFOUND)
+ COMPlusThrowHR(hr);
+ return;
+ }
+
+ if (TypeFromToken(*tdEnclosing) != mdtTypeDef)
+ pModule->GetAssembly()->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_ENCLOSING);
+} // ClassLoader::GetEnclosingClassThrowing
+
+
+//---------------------------------------------------------------------------------------
+//
+// Load a parent type or implemented interface type.
+//
+// If this is an instantiated type represented by a type spec, then instead of attempting to load the
+// exact type, load an approximate instantiation in which all reference types are replaced by Object.
+// The exact instantiated types will be loaded later by LoadInstantiatedInfo.
+// We do this to avoid cycles early in class loading caused by definitions such as
+// struct M : ICloneable<M> // load ICloneable<object>
+// class C<T> : D<C<T>,int> for any T // load D<object,int>
+//
+//static
+TypeHandle
+ClassLoader::LoadApproxTypeThrowing(
+ Module * pModule,
+ mdToken tok,
+ SigPointer * pSigInst,
+ const SigTypeContext * pClassTypeContext)
+{
+ CONTRACT(TypeHandle)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pSigInst, NULL_OK));
+ PRECONDITION(CheckPointer(pModule));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ IMDInternalImport * pInternalImport = pModule->GetMDImport();
+
+ if (TypeFromToken(tok) == mdtTypeSpec)
+ {
+ ULONG cSig;
+ PCCOR_SIGNATURE pSig;
+ IfFailThrowBF(pInternalImport->GetTypeSpecFromToken(tok, &pSig, &cSig), BFA_METADATA_CORRUPT, pModule);
+
+ SigPointer sigptr = SigPointer(pSig, cSig);
+ CorElementType type = ELEMENT_TYPE_END;
+ IfFailThrowBF(sigptr.GetElemType(&type), BFA_BAD_SIGNATURE, pModule);
+
+ // The only kind of type specs that we recognise are instantiated types
+ if (type != ELEMENT_TYPE_GENERICINST)
+ pModule->GetAssembly()->ThrowTypeLoadException(pInternalImport, tok, IDS_CLASSLOAD_GENERAL);
+
+ // Of these, we outlaw instantiated value classes (they can't be interfaces and can't be subclassed)
+ IfFailThrowBF(sigptr.GetElemType(&type), BFA_BAD_SIGNATURE, pModule);
+
+ if (type != ELEMENT_TYPE_CLASS)
+ pModule->GetAssembly()->ThrowTypeLoadException(pInternalImport, tok, IDS_CLASSLOAD_GENERAL);
+
+ mdToken genericTok = 0;
+ IfFailThrowBF(sigptr.GetToken(&genericTok), BFA_BAD_SIGNATURE, pModule);
+ IfFailThrowBF(sigptr.GetData(NULL), BFA_BAD_SIGNATURE, pModule);
+
+ if (pSigInst != NULL)
+ *pSigInst = sigptr;
+
+ // Try to load the generic type itself
+ THROW_BAD_FORMAT_MAYBE(
+ ((TypeFromToken(genericTok) == mdtTypeRef) || (TypeFromToken(genericTok) == mdtTypeDef)),
+ BFA_UNEXPECTED_GENERIC_TOKENTYPE,
+ pModule);
+ TypeHandle genericTypeTH = LoadTypeDefOrRefThrowing(
+ pModule,
+ genericTok,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef,
+ tdNoTypes,
+ CLASS_LOAD_APPROXPARENTS);
+
+ // We load interfaces at very approximate types - the generic
+ // interface itself. We fix this up in LoadInstantiatedInfo.
+ // This allows us to load recursive interfaces on structs such
+ // as "struct VC : I<VC>". The details of the interface
+ // are not currently needed during the first phase
+ // of setting up the method table.
+ if (genericTypeTH.IsInterface())
+ {
+ RETURN genericTypeTH;
+ }
+ else
+ {
+ // approxTypes, i.e. approximate reference types by Object, i.e. load the canonical type
+ RETURN SigPointer(pSig, cSig).GetTypeHandleThrowing(
+ pModule,
+ pClassTypeContext,
+ ClassLoader::LoadTypes,
+ CLASS_LOAD_APPROXPARENTS,
+ TRUE /*dropGenericArgumentLevel*/);
+ }
+ }
+ else
+ {
+ if (pSigInst != NULL)
+ *pSigInst = SigPointer();
+ RETURN LoadTypeDefOrRefThrowing(
+ pModule,
+ tok,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::FailIfUninstDefOrRef,
+ tdNoTypes,
+ CLASS_LOAD_APPROXPARENTS);
+ }
+} // ClassLoader::LoadApproxTypeThrowing
+
+
+//---------------------------------------------------------------------------------------
+//
+//static
+MethodTable *
+ClassLoader::LoadApproxParentThrowing(
+ Module * pModule,
+ mdToken cl,
+ SigPointer * pParentInst,
+ const SigTypeContext * pClassTypeContext)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ mdTypeRef crExtends;
+ MethodTable * pParentMethodTable = NULL;
+ TypeHandle parentType;
+ DWORD dwAttrClass;
+ Assembly * pAssembly = pModule->GetAssembly();
+ IMDInternalImport * pInternalImport = pModule->GetMDImport();
+
+ // Initialize the return value;
+ *pParentInst = SigPointer();
+
+ // Now load all dependencies of this class
+ if (FAILED(pInternalImport->GetTypeDefProps(
+ cl,
+ &dwAttrClass, // AttrClass
+ &crExtends)))
+ {
+ pAssembly->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_BADFORMAT);
+ }
+
+ if (RidFromToken(crExtends) != mdTokenNil)
+ {
+ // Do an "approximate" load of the parent, replacing reference types in the instantiation by Object
+ // This is to avoid cycles in the loader e.g. on class C : D<C> or class C<T> : D<C<T>>
+ // We fix up the exact parent later in LoadInstantiatedInfo
+ parentType = LoadApproxTypeThrowing(pModule, crExtends, pParentInst, pClassTypeContext);
+
+ pParentMethodTable = parentType.GetMethodTable();
+
+ if (pParentMethodTable == NULL)
+ pAssembly->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_PARENTNULL);
+
+ // cannot inherit from an interface
+ if (pParentMethodTable->IsInterface())
+ pAssembly->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_PARENTINTERFACE);
+
+ if (IsTdInterface(dwAttrClass))
+ {
+ // Interfaces must extend from Object
+ if (! pParentMethodTable->IsObjectClass())
+ pAssembly->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_INTERFACEOBJECT);
+ }
+ }
+
+ return pParentMethodTable;
+} // ClassLoader::LoadApproxParentThrowing
+
+// Perform a single phase of class loading
+// It is the caller's responsibility to lock
+/*static*/
+TypeHandle ClassLoader::DoIncrementalLoad(TypeKey *pTypeKey, TypeHandle typeHnd, ClassLoadLevel currentLevel)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pTypeKey));
+ PRECONDITION(currentLevel >= CLASS_LOAD_BEGIN && currentLevel < CLASS_LOADED);
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ if (LoggingOn(LF_CLASSLOADER, LL_INFO10000))
+ {
+ SString name;
+ TypeString::AppendTypeKeyDebug(name, pTypeKey);
+ LOG((LF_CLASSLOADER, LL_INFO10000, "PHASEDLOAD: About to do incremental load of type %S (%p) from level %s\n", name.GetUnicode(), typeHnd.AsPtr(), classLoadLevelName[currentLevel]));
+ }
+#endif
+
+ // Level is BEGIN if and only if type handle is null
+ CONSISTENCY_CHECK((currentLevel == CLASS_LOAD_BEGIN) == typeHnd.IsNull());
+
+ switch (currentLevel)
+ {
+ // Attain at least level CLASS_LOAD_UNRESTORED (if just locating type in ngen image)
+ // or at least level CLASS_LOAD_APPROXPARENTS (if creating type for the first time)
+ case CLASS_LOAD_BEGIN :
+ {
+ IBCLoggerAwareAllocMemTracker amTracker;
+ typeHnd = CreateTypeHandleForTypeKey(pTypeKey, &amTracker);
+ CONSISTENCY_CHECK(!typeHnd.IsNull());
+ TypeHandle published = PublishType(pTypeKey, typeHnd);
+ if (published == typeHnd)
+ amTracker.SuppressRelease();
+ typeHnd = published;
+ }
+ break;
+
+ case CLASS_LOAD_UNRESTOREDTYPEKEY :
+#ifdef FEATURE_PREJIT
+ typeHnd.DoRestoreTypeKey();
+#endif
+ break;
+
+ // Attain level CLASS_LOAD_APPROXPARENTS, starting with unrestored class
+ case CLASS_LOAD_UNRESTORED :
+#ifdef FEATURE_PREJIT
+ {
+ CONSISTENCY_CHECK(!typeHnd.IsRestored_NoLogging());
+ if (typeHnd.IsTypeDesc())
+ typeHnd.AsTypeDesc()->Restore();
+ else
+ typeHnd.AsMethodTable()->Restore();
+ }
+#endif
+ break;
+
+ // Attain level CLASS_LOAD_EXACTPARENTS
+ case CLASS_LOAD_APPROXPARENTS :
+ if (!typeHnd.IsTypeDesc())
+ {
+ LoadExactParents(typeHnd.AsMethodTable());
+ }
+ break;
+
+ case CLASS_LOAD_EXACTPARENTS :
+ case CLASS_DEPENDENCIES_LOADED :
+ case CLASS_LOADED :
+ break;
+
+ }
+
+ if (typeHnd.GetLoadLevel() >= CLASS_LOAD_EXACTPARENTS)
+ {
+ Notify(typeHnd);
+ }
+
+ return typeHnd;
+}
+
+/*static*/
+// For non-canonical instantiations of generic types, create a fresh type by replicating the canonical instantiation
+// For canonical instantiations of generic types, create a brand new method table
+// For other constructed types, create a type desc and template method table if necessary
+// For all other types, create a method table
+TypeHandle ClassLoader::CreateTypeHandleForTypeKey(TypeKey* pKey, AllocMemTracker* pamTracker)
+{
+ CONTRACT(TypeHandle)
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pKey));
+
+ POSTCONDITION(RETVAL.CheckMatchesKey(pKey));
+ MODE_ANY;
+ }
+ CONTRACT_END
+
+ TypeHandle typeHnd = TypeHandle();
+
+ if (!pKey->IsConstructed())
+ {
+ typeHnd = CreateTypeHandleForTypeDefThrowing(pKey->GetModule(),
+ pKey->GetTypeToken(),
+ pKey->GetInstantiation(),
+ pamTracker);
+ }
+ else if (pKey->HasInstantiation())
+ {
+#ifdef FEATURE_FULL_NGEN
+ // Try to find the type in an NGEN'd image.
+ typeHnd = TryFindDynLinkZapType(pKey);
+
+ if (!typeHnd.IsNull())
+ {
+#ifdef _DEBUG
+ if (LoggingOn(LF_CLASSLOADER, LL_INFO10000))
+ {
+ SString name;
+ TypeString::AppendTypeKeyDebug(name, pKey);
+ LOG((LF_CLASSLOADER, LL_INFO10000, "GENERICS:CreateTypeHandleForTypeKey: found dyn-link ngen type %S with pointer %p in module %S\n", name.GetUnicode(), typeHnd.AsPtr(),
+ typeHnd.GetLoaderModule()->GetDebugName()));
+ }
+#endif
+ if (typeHnd.GetLoadLevel() == CLASS_LOAD_UNRESTOREDTYPEKEY)
+ {
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+
+ typeHnd.DoRestoreTypeKey();
+ }
+ }
+ else
+#endif // FEATURE_FULL_NGEN
+ {
+ if (IsCanonicalGenericInstantiation(pKey->GetInstantiation()))
+ {
+ typeHnd = CreateTypeHandleForTypeDefThrowing(pKey->GetModule(),
+ pKey->GetTypeToken(),
+ pKey->GetInstantiation(),
+ pamTracker);
+ }
+ else
+ {
+ typeHnd = CreateTypeHandleForNonCanonicalGenericInstantiation(pKey,
+ pamTracker);
+ }
+#if defined(_DEBUG) && !defined(CROSSGEN_COMPILE)
+ if (Nullable::IsNullableType(typeHnd))
+ Nullable::CheckFieldOffsets(typeHnd);
+#endif
+ }
+ }
+ else if (pKey->GetKind() == ELEMENT_TYPE_FNPTR)
+ {
+ Module *pLoaderModule = ComputeLoaderModule(pKey);
+ pLoaderModule->GetLoaderAllocator()->EnsureInstantiation(NULL, Instantiation(pKey->GetRetAndArgTypes(), pKey->GetNumArgs() + 1));
+
+ PREFIX_ASSUME(pLoaderModule!=NULL);
+ DWORD numArgs = pKey->GetNumArgs();
+ BYTE* mem = (BYTE*) pamTracker->Track(pLoaderModule->GetAssembly()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(FnPtrTypeDesc)) + S_SIZE_T(sizeof(TypeHandle)) * S_SIZE_T(numArgs)));
+
+ typeHnd = TypeHandle(new(mem) FnPtrTypeDesc(pKey->GetCallConv(), numArgs, pKey->GetRetAndArgTypes()));
+ }
+ else
+ {
+ Module *pLoaderModule = ComputeLoaderModule(pKey);
+ PREFIX_ASSUME(pLoaderModule!=NULL);
+
+ CorElementType kind = pKey->GetKind();
+ TypeHandle paramType = pKey->GetElementType();
+ MethodTable *templateMT;
+
+ // Create a new type descriptor and insert into constructed type table
+ if (CorTypeInfo::IsArray(kind))
+ {
+ DWORD rank = pKey->GetRank();
+ THROW_BAD_FORMAT_MAYBE((kind != ELEMENT_TYPE_ARRAY) || rank > 0, BFA_MDARRAY_BADRANK, pLoaderModule);
+ THROW_BAD_FORMAT_MAYBE((kind != ELEMENT_TYPE_SZARRAY) || rank == 1, BFA_SDARRAY_BADRANK, pLoaderModule);
+
+ // Arrays of BYREFS not allowed
+ if (paramType.GetInternalCorElementType() == ELEMENT_TYPE_BYREF ||
+ paramType.GetInternalCorElementType() == ELEMENT_TYPE_TYPEDBYREF)
+ {
+ ThrowTypeLoadException(pKey, IDS_CLASSLOAD_CANTCREATEARRAYCLASS);
+ }
+
+ // We really don't need this check anymore.
+ if (rank > MAX_RANK)
+ {
+ ThrowTypeLoadException(pKey, IDS_CLASSLOAD_RANK_TOOLARGE);
+ }
+
+ templateMT = pLoaderModule->CreateArrayMethodTable(paramType, kind, rank, pamTracker);
+
+ BYTE* mem = (BYTE*) pamTracker->Track(pLoaderModule->GetAssembly()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(ArrayTypeDesc))));
+ typeHnd = TypeHandle(new(mem) ArrayTypeDesc(templateMT, paramType));
+ }
+ else
+ {
+ // no parameterized type allowed on a reference
+ if (paramType.GetInternalCorElementType() == ELEMENT_TYPE_BYREF ||
+ paramType.GetInternalCorElementType() == ELEMENT_TYPE_TYPEDBYREF)
+ {
+ ThrowTypeLoadException(pKey, IDS_CLASSLOAD_GENERAL);
+ }
+
+ // let <Type>* type have a method table
+ // System.UIntPtr's method table is used for types like int*, void *, string * etc.
+ if (kind == ELEMENT_TYPE_PTR)
+ templateMT = MscorlibBinder::GetElementType(ELEMENT_TYPE_U);
+ else
+ templateMT = NULL;
+
+ BYTE* mem = (BYTE*) pamTracker->Track(pLoaderModule->GetAssembly()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(ParamTypeDesc))));
+ typeHnd = TypeHandle(new(mem) ParamTypeDesc(kind, templateMT, paramType));
+ }
+ }
+
+ RETURN typeHnd;
+}
+
+// Publish a type (and possibly member information) in the loader's
+// tables Types are published before they are fully loaded. In
+// particular, exact parent info (base class and interfaces) is loaded
+// in a later phase
+/*static*/
+TypeHandle ClassLoader::PublishType(TypeKey *pTypeKey, TypeHandle typeHnd)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(typeHnd));
+ PRECONDITION(CheckPointer(pTypeKey));
+
+ // Key must match that of the handle
+ PRECONDITION(typeHnd.CheckMatchesKey(pTypeKey));
+
+ // Don't publish array template method tables; these are accessed only through type descs
+ PRECONDITION(typeHnd.IsTypeDesc() || !typeHnd.AsMethodTable()->IsArray());
+ }
+ CONTRACTL_END;
+
+
+ if (pTypeKey->IsConstructed())
+ {
+ Module *pLoaderModule = ComputeLoaderModule(pTypeKey);
+ EETypeHashTable *pTable = pLoaderModule->GetAvailableParamTypes();
+
+ CrstHolder ch(&pLoaderModule->GetClassLoader()->m_AvailableTypesLock);
+
+ // The type could have been loaded by a different thread as side-effect of avoiding deadlocks caused by LoadsTypeViolation
+ TypeHandle existing = pTable->GetValue(pTypeKey);
+ if (!existing.IsNull())
+ return existing;
+
+ pTable->InsertValue(typeHnd);
+
+#ifdef _DEBUG
+ // Checks to help ensure that the mscorlib in the ngen process does not get contaminated with pointers to the compilation domains.
+ if (pLoaderModule->IsSystem() && IsCompilationProcess() && pLoaderModule->HasNativeImage())
+ {
+ CorElementType kind = pTypeKey->GetKind();
+ MethodTable *typeHandleMethodTable = typeHnd.GetMethodTable();
+ if ((typeHandleMethodTable != NULL) && (typeHandleMethodTable->GetLoaderAllocator() != pLoaderModule->GetLoaderAllocator()))
+ {
+ _ASSERTE(!"MethodTable of type loaded into mscorlib during NGen is not from mscorlib!");
+ }
+ if ((kind != ELEMENT_TYPE_FNPTR) && (kind != ELEMENT_TYPE_VAR) && (kind != ELEMENT_TYPE_MVAR))
+ {
+ if ((kind == ELEMENT_TYPE_SZARRAY) || (kind == ELEMENT_TYPE_ARRAY) || (kind == ELEMENT_TYPE_BYREF) || (kind == ELEMENT_TYPE_PTR) || (kind == ELEMENT_TYPE_VALUETYPE))
+ {
+ // Check to ensure param value is also part of mscorlib.
+ if (pTypeKey->GetElementType().GetLoaderAllocator() != pLoaderModule->GetLoaderAllocator())
+ {
+ _ASSERTE(!"Param value of type key used to load type during NGEN not located within mscorlib yet type is placed into mscorlib");
+ }
+ }
+ else if (kind == ELEMENT_TYPE_FNPTR)
+ {
+ // Check to ensure the parameter types of fnptr are in mscorlib
+ for (DWORD i = 0; i <= pTypeKey->GetNumArgs(); i++)
+ {
+ if (pTypeKey->GetRetAndArgTypes()[i].GetLoaderAllocator() != pLoaderModule->GetLoaderAllocator())
+ {
+ _ASSERTE(!"Ret or Arg type of function pointer type key used to load type during NGEN not located within mscorlib yet type is placed into mscorlib");
+ }
+ }
+ }
+ else if (kind == ELEMENT_TYPE_CLASS)
+ {
+ // Check to ensure that the generic parameters are all within mscorlib
+ for (DWORD i = 0; i < pTypeKey->GetNumGenericArgs(); i++)
+ {
+ if (pTypeKey->GetInstantiation()[i].GetLoaderAllocator() != pLoaderModule->GetLoaderAllocator())
+ {
+ _ASSERTE(!"Instantiation parameter of generic class type key used to load type during NGEN not located within mscorlib yet type is placed into mscorlib");
+ }
+ }
+ }
+ else
+ {
+ // Should not be able to get here
+ _ASSERTE(!"Unknown type key type");
+ }
+ }
+ }
+#endif // DEBUG
+ }
+ else
+ {
+ Module *pModule = pTypeKey->GetModule();
+ mdTypeDef typeDef = pTypeKey->GetTypeToken();
+
+ CrstHolder ch(&pModule->GetClassLoader()->m_AvailableTypesLock);
+
+ // ! We cannot fail after this point.
+ CANNOTTHROWCOMPLUSEXCEPTION();
+ FAULT_FORBID();
+
+ // The type could have been loaded by a different thread as side-effect of avoiding deadlocks caused by LoadsTypeViolation
+ TypeHandle existing = pModule->LookupTypeDef(typeDef);
+ if (!existing.IsNull())
+ return existing;
+
+ MethodTable *pMT = typeHnd.AsMethodTable();
+
+ MethodTable::IntroducedMethodIterator it(pMT);
+ for (; it.IsValid(); it.Next())
+ {
+ MethodDesc * pMD = it.GetMethodDesc();
+ CONSISTENCY_CHECK(pMD != NULL && pMD->GetMethodTable() == pMT);
+ if (!pMD->IsUnboxingStub())
+ {
+ pModule->EnsuredStoreMethodDef(pMD->GetMemberDef(), pMD);
+ }
+ }
+
+ ApproxFieldDescIterator fdIterator(pMT, ApproxFieldDescIterator::ALL_FIELDS);
+ FieldDesc* pFD;
+
+ while ((pFD = fdIterator.Next()) != NULL)
+ {
+ if (pFD->GetEnclosingMethodTable() == pMT)
+ {
+ pModule->EnsuredStoreFieldDef(pFD->GetMemberDef(), pFD);
+ }
+ }
+
+ // Publish the type last - to ensure that nobody can see it until all the method and field RID maps are filled in
+ pModule->EnsuredStoreTypeDef(typeDef, typeHnd);
+ }
+
+ return typeHnd;
+}
+
+// Notify profiler and debugger that a type load has completed
+// Also adjust perf counters
+/*static*/
+void ClassLoader::Notify(TypeHandle typeHnd)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(typeHnd));
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CLASSLOADER, LL_INFO1000, "Notify: %p %s\n", typeHnd.AsPtr(), typeHnd.IsTypeDesc() ? "typedesc" : typeHnd.AsMethodTable()->GetDebugClassName()));
+
+ if (typeHnd.IsTypeDesc())
+ return;
+
+ MethodTable * pMT = typeHnd.AsMethodTable();
+
+#ifdef PROFILING_SUPPORTED
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackClasses());
+ // We don't tell profilers about typedescs, as per IF above. Also, we don't
+ // tell profilers about:
+ if (
+ // ...generics with unbound variables
+ (!pMT->ContainsGenericVariables()) &&
+ // ...or array method tables
+ // (This check is mainly for NGEN restore, as JITted code won't hit
+ // this code path for array method tables anyway)
+ (!pMT->IsArray()))
+ {
+ LOG((LF_CLASSLOADER, LL_INFO1000, "Notifying profiler of Started1 %p %s\n", pMT, pMT->GetDebugClassName()));
+ // Record successful load of the class for the profiler
+ g_profControlBlock.pProfInterface->ClassLoadStarted(TypeHandleToClassID(typeHnd));
+
+ //
+ // Profiler can turn off TrackClasses during the Started() callback. Need to
+ // retest the flag here.
+ //
+ if (CORProfilerTrackClasses())
+ {
+ LOG((LF_CLASSLOADER, LL_INFO1000, "Notifying profiler of Finished1 %p %s\n", pMT, pMT->GetDebugClassName()));
+ g_profControlBlock.pProfInterface->ClassLoadFinished(TypeHandleToClassID(typeHnd),
+ S_OK);
+ }
+ }
+ END_PIN_PROFILER();
+ }
+#endif //PROFILING_SUPPORTED
+
+ g_IBCLogger.LogMethodTableAccess(pMT);
+
+ if (pMT->IsTypicalTypeDefinition())
+ {
+ LOG((LF_CLASSLOADER, LL_INFO100, "Successfully loaded class %s\n", pMT->GetDebugClassName()));
+
+#ifdef DEBUGGING_SUPPORTED
+ {
+ Module * pModule = pMT->GetModule();
+ // Update metadata for dynamic module.
+ pModule->UpdateDynamicMetadataIfNeeded();
+ }
+
+ if (CORDebuggerAttached())
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "NotifyDebuggerLoad clsload 2239 class %s\n", pMT->GetDebugClassName()));
+ typeHnd.NotifyDebuggerLoad(NULL, FALSE);
+ }
+#endif // DEBUGGING_SUPPORTED
+
+#if defined(ENABLE_PERF_COUNTERS)
+ GetPerfCounters().m_Loading.cClassesLoaded ++;
+#endif
+ }
+}
+
+
+//-----------------------------------------------------------------------------
+// Common helper for LoadTypeHandleForTypeKey and LoadTypeHandleForTypeKeyNoLock.
+// Makes the root level call to kick off the transitive closure walk for
+// the final level pushes.
+//-----------------------------------------------------------------------------
+static void PushFinalLevels(TypeHandle typeHnd, ClassLoadLevel targetLevel, const InstantiationContext *pInstContext)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ LOADS_TYPE(targetLevel);
+ }
+ CONTRACTL_END
+
+
+ // This phase brings the type and all its transitive dependencies to their
+ // final state, sans the IsFullyLoaded bit.
+ if (targetLevel >= CLASS_DEPENDENCIES_LOADED)
+ {
+ BOOL fBailed = FALSE;
+ typeHnd.DoFullyLoad(NULL, CLASS_DEPENDENCIES_LOADED, NULL, &fBailed, pInstContext);
+ }
+
+ // This phase does access/constraint and other type-safety checks on the type
+ // and on its transitive dependencies.
+ if (targetLevel == CLASS_LOADED)
+ {
+ DFLPendingList pendingList;
+ BOOL fBailed = FALSE;
+
+ typeHnd.DoFullyLoad(NULL, CLASS_LOADED, &pendingList, &fBailed, pInstContext);
+
+
+ // In the case of a circular dependency, one or more types will have
+ // had their promotions deferred.
+ //
+ // If we got to this point, all checks have successfully passed on
+ // the transitive closure (otherwise, DoFullyLoad would have thrown.)
+ //
+ // So we can go ahead and mark everyone as fully loaded.
+ //
+ UINT numTH = pendingList.Count();
+ TypeHandle *pTHPending = pendingList.Table();
+ for (UINT i = 0; i < numTH; i++)
+ {
+ // NOTE: It is possible for duplicates to appear in this list so
+ // don't do any operation that isn't idempodent.
+
+ pTHPending[i].SetIsFullyLoaded();
+ }
+ }
+}
+
+
+//
+TypeHandle ClassLoader::LoadTypeHandleForTypeKey(TypeKey *pTypeKey,
+ TypeHandle typeHnd,
+ ClassLoadLevel targetLevel/*=CLASS_LOADED*/,
+ const InstantiationContext *pInstContext/*=NULL*/)
+{
+
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ LOADS_TYPE(targetLevel);
+ }
+ CONTRACTL_END
+
+ GCX_PREEMP();
+
+ // Type loading can be recursive. Probe for sufficient stack.
+ //
+ // Execution of the FINALLY in LoadTypeHandleForTypeKey_Body can eat
+ // a lot of stack because LoadTypeHandleForTypeKey_Inner can rethrow
+ // any non-SO exceptions that it takes, ensure that we have plenty
+ // of stack before getting into it (>24 pages on AMD64, remember
+ // that num pages probed is 2*N on AMD64).
+ INTERIOR_STACK_PROBE_FOR(GetThread(),20);
+
+#ifdef _DEBUG
+ if (LoggingOn(LF_CLASSLOADER, LL_INFO1000))
+ {
+ SString name;
+ TypeString::AppendTypeKeyDebug(name, pTypeKey);
+ LOG((LF_CLASSLOADER, LL_INFO10000, "PHASEDLOAD: LoadTypeHandleForTypeKey for type %S to level %s\n", name.GetUnicode(), classLoadLevelName[targetLevel]));
+ CrstHolder unresolvedClassLockHolder(&m_UnresolvedClassLock);
+ m_pUnresolvedClassHash->Dump();
+ }
+#endif
+
+ // When using domain neutral assemblies (and not eagerly propagating dependency loads),
+ // it's possible to get here without having injected the module into the current app domain.
+ // GetDomainFile will accomplish that.
+
+ if (!pTypeKey->IsConstructed())
+ {
+ pTypeKey->GetModule()->GetDomainFile();
+ }
+
+ ClassLoadLevel currentLevel = typeHnd.IsNull() ? CLASS_LOAD_BEGIN : typeHnd.GetLoadLevel();
+ ClassLoadLevel targetLevelUnderLock = targetLevel < CLASS_DEPENDENCIES_LOADED ? targetLevel : (ClassLoadLevel) (CLASS_DEPENDENCIES_LOADED-1);
+ if (currentLevel < targetLevelUnderLock)
+ {
+ typeHnd = LoadTypeHandleForTypeKey_Body(pTypeKey,
+ typeHnd,
+ targetLevelUnderLock);
+ _ASSERTE(!typeHnd.IsNull());
+ }
+ _ASSERTE(typeHnd.GetLoadLevel() >= targetLevelUnderLock);
+
+ PushFinalLevels(typeHnd, targetLevel, pInstContext);
+
+ END_INTERIOR_STACK_PROBE;
+
+ return typeHnd;
+}
+
+//
+TypeHandle ClassLoader::LoadTypeHandleForTypeKeyNoLock(TypeKey *pTypeKey,
+ ClassLoadLevel targetLevel/*=CLASS_LOADED*/,
+ const InstantiationContext *pInstContext/*=NULL*/)
+{
+
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ LOADS_TYPE(targetLevel);
+ PRECONDITION(CheckPointer(pTypeKey));
+ PRECONDITION(targetLevel >= 0 && targetLevel <= CLASS_LOADED);
+ }
+ CONTRACTL_END
+
+ GCX_PREEMP();
+
+ TypeHandle typeHnd = TypeHandle();
+
+ // Type loading can be recursive. Probe for sufficient stack.
+ INTERIOR_STACK_PROBE_FOR(GetThread(),8);
+
+ ClassLoadLevel currentLevel = CLASS_LOAD_BEGIN;
+ ClassLoadLevel targetLevelUnderLock = targetLevel < CLASS_DEPENDENCIES_LOADED ? targetLevel : (ClassLoadLevel) (CLASS_DEPENDENCIES_LOADED-1);
+ while (currentLevel < targetLevelUnderLock)
+ {
+ typeHnd = DoIncrementalLoad(pTypeKey, typeHnd, currentLevel);
+ CONSISTENCY_CHECK(typeHnd.GetLoadLevel() > currentLevel);
+ currentLevel = typeHnd.GetLoadLevel();
+ }
+
+ PushFinalLevels(typeHnd, targetLevel, pInstContext);
+
+ END_INTERIOR_STACK_PROBE;
+
+ return typeHnd;
+}
+
+//---------------------------------------------------------------------------------------
+//
+class PendingTypeLoadHolder
+{
+ Thread * m_pThread;
+ PendingTypeLoadEntry * m_pEntry;
+ PendingTypeLoadHolder * m_pPrevious;
+
+public:
+ PendingTypeLoadHolder(PendingTypeLoadEntry * pEntry)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_pThread = GetThread();
+ m_pEntry = pEntry;
+
+ m_pPrevious = m_pThread->GetPendingTypeLoad();
+ m_pThread->SetPendingTypeLoad(this);
+ }
+
+ ~PendingTypeLoadHolder()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(m_pThread->GetPendingTypeLoad() == this);
+ m_pThread->SetPendingTypeLoad(m_pPrevious);
+ }
+
+ static bool CheckForDeadLockOnCurrentThread(PendingTypeLoadEntry * pEntry)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ PendingTypeLoadHolder * pCurrent = GetThread()->GetPendingTypeLoad();
+
+ while (pCurrent != NULL)
+ {
+ if (pCurrent->m_pEntry == pEntry)
+ return true;
+
+ pCurrent = pCurrent->m_pPrevious;
+ }
+
+ return false;
+ }
+};
+
+//---------------------------------------------------------------------------------------
+//
+TypeHandle
+ClassLoader::LoadTypeHandleForTypeKey_Body(
+ TypeKey * pTypeKey,
+ TypeHandle typeHnd,
+ ClassLoadLevel targetLevel)
+{
+ CONTRACT(TypeHandle)
+ {
+ STANDARD_VM_CHECK;
+ POSTCONDITION(!typeHnd.IsNull() && typeHnd.GetLoadLevel() >= targetLevel);
+ }
+ CONTRACT_END
+
+ if (!pTypeKey->IsConstructed())
+ {
+ Module *pModule = pTypeKey->GetModule();
+ mdTypeDef cl = pTypeKey->GetTypeToken();
+
+ STRESS_LOG2(LF_CLASSLOADER, LL_INFO100000, "LoadTypeHandle: Loading Class from Module %p token %x\n", pModule, cl);
+
+#ifdef _DEBUG
+ IMDInternalImport* pInternalImport = pModule->GetMDImport();
+ LPCUTF8 className;
+ LPCUTF8 nameSpace;
+ if (FAILED(pInternalImport->GetNameOfTypeDef(cl, &className, &nameSpace)))
+ {
+ className = nameSpace = "Invalid TypeDef record";
+ }
+ if (g_pConfig->ShouldBreakOnClassLoad(className))
+ CONSISTENCY_CHECK_MSGF(false, ("BreakOnClassLoad: typename '%s' ", className));
+#endif
+ }
+
+retry:
+ ReleaseHolder<PendingTypeLoadEntry> pLoadingEntry;
+
+ CrstHolderWithState unresolvedClassLockHolder(&m_UnresolvedClassLock);
+
+ // Is it in the hash of classes currently being loaded?
+ pLoadingEntry = m_pUnresolvedClassHash->GetValue(pTypeKey);
+ if (pLoadingEntry)
+ {
+ pLoadingEntry->AddRef();
+
+ // It is in the hash, which means that another thread is waiting for it (or that we are
+ // already loading this class on this thread, which should never happen, since that implies
+ // a recursive dependency).
+ unresolvedClassLockHolder.Release();
+
+ //
+ // Check one last time before waiting that the type handle is not sufficiently loaded to
+ // prevent deadlocks
+ //
+ {
+ if (typeHnd.IsNull())
+ {
+ typeHnd = LookupTypeHandleForTypeKey(pTypeKey);
+ }
+
+ if (!typeHnd.IsNull())
+ {
+ if (typeHnd.GetLoadLevel() >= targetLevel)
+ RETURN typeHnd;
+ }
+ }
+
+ if (PendingTypeLoadHolder::CheckForDeadLockOnCurrentThread(pLoadingEntry))
+ {
+ // Attempting recursive load
+ ClassLoader::ThrowTypeLoadException(pTypeKey, IDS_CLASSLOAD_GENERAL);
+ }
+
+ //
+ // Violation of type loadlevel ordering rules depends on type load failing in case of cyclic dependency that would
+ // otherwise lead to deadlock. We will speculatively proceed with the type load to make it fail in the right spot,
+ // in backward compatible way. In case the type load succeeds, we will only let one type win in PublishType.
+ //
+ if (typeHnd.IsNull() && GetThread()->HasThreadStateNC(Thread::TSNC_LoadsTypeViolation))
+ {
+ PendingTypeLoadHolder ptlh(pLoadingEntry);
+ typeHnd = DoIncrementalLoad(pTypeKey, TypeHandle(), CLASS_LOAD_BEGIN);
+ goto retry;
+ }
+
+ {
+ // Wait for class to be loaded by another thread. This is where we start tracking the
+ // entry, so there is an implicit Acquire in our use of Assign here.
+ CrstHolder loadingEntryLockHolder(&pLoadingEntry->m_Crst);
+ _ASSERTE(pLoadingEntry->HasLock());
+ }
+
+ // Result of other thread loading the class
+ HRESULT hr = pLoadingEntry->m_hrResult;
+
+ if (FAILED(hr)) {
+
+ //
+ // Redo the lookup one more time and return a valid type if possible. The other thread could
+ // have hit error while loading the type to higher level than we need.
+ //
+ {
+ if (typeHnd.IsNull())
+ {
+ typeHnd = LookupTypeHandleForTypeKey(pTypeKey);
+ }
+
+ if (!typeHnd.IsNull())
+ {
+ if (typeHnd.GetLoadLevel() >= targetLevel)
+ RETURN typeHnd;
+ }
+ }
+
+ if (hr == E_ABORT) {
+ LOG((LF_CLASSLOADER, LL_INFO10, "need to retry LoadTypeHandle: %x\n", hr));
+ goto retry;
+ }
+
+ LOG((LF_CLASSLOADER, LL_INFO10, "Failed to load in other entry: %x\n", hr));
+
+ if (hr == E_OUTOFMEMORY) {
+ COMPlusThrowOM();
+ }
+
+ pLoadingEntry->ThrowException();
+ }
+
+ // Get a pointer to the EEClass being loaded
+ typeHnd = pLoadingEntry->m_typeHandle;
+
+ if (!typeHnd.IsNull())
+ {
+ // If the type load on the other thread loaded the type to the needed level, return it here.
+ if (typeHnd.GetLoadLevel() >= targetLevel)
+ RETURN typeHnd;
+ }
+
+ // The type load on the other thread did not load the type "enough". Begin the type load
+ // process again to cause us to load to the needed level.
+ goto retry;
+ }
+
+ if (typeHnd.IsNull())
+ {
+ // The class was not being loaded. However, it may have already been loaded after our
+ // first LoadTypeHandleThrowIfFailed() and before taking the lock.
+ typeHnd = LookupTypeHandleForTypeKey(pTypeKey);
+ }
+
+ ClassLoadLevel currentLevel = CLASS_LOAD_BEGIN;
+ if (!typeHnd.IsNull())
+ {
+ currentLevel = typeHnd.GetLoadLevel();
+ if (currentLevel >= targetLevel)
+ RETURN typeHnd;
+ }
+
+ // It was not loaded, and it is not being loaded, so we must load it. Create a new LoadingEntry
+ // and acquire it immediately so that other threads will block.
+ pLoadingEntry = new PendingTypeLoadEntry(*pTypeKey, typeHnd); // this atomically creates a crst and acquires it
+
+ if (!(m_pUnresolvedClassHash->InsertValue(pLoadingEntry)))
+ {
+ COMPlusThrowOM();
+ }
+
+ // Leave the global lock, so that other threads may now start waiting on our class's lock
+ unresolvedClassLockHolder.Release();
+
+ EX_TRY
+ {
+ PendingTypeLoadHolder ptlh(pLoadingEntry);
+
+ TRIGGERS_TYPELOAD();
+
+ while (currentLevel < targetLevel)
+ {
+ typeHnd = DoIncrementalLoad(pTypeKey, typeHnd, currentLevel);
+ CONSISTENCY_CHECK(typeHnd.GetLoadLevel() > currentLevel);
+ currentLevel = typeHnd.GetLoadLevel();
+
+ // If other threads are waiting for this load, unblock them as soon as possible to prevent deadlocks.
+ if (pLoadingEntry->HasWaiters())
+ break;
+ }
+
+ _ASSERTE(!typeHnd.IsNull());
+ pLoadingEntry->SetResult(typeHnd);
+ }
+ EX_HOOK
+ {
+ LOG((LF_CLASSLOADER, LL_INFO10, "Caught an exception loading: %x, %0x (Module)\n", pTypeKey->GetTypeToken(), pTypeKey->GetModule()));
+
+ if (!GetThread()->HasThreadStateNC(Thread::TSNC_LoadsTypeViolation))
+ {
+ // Fix up the loading entry.
+ Exception *pException = GET_EXCEPTION();
+ pLoadingEntry->SetException(pException);
+ }
+
+ // Unlink this class from the unresolved class list.
+ unresolvedClassLockHolder.Acquire();
+ m_pUnresolvedClassHash->DeleteValue(pTypeKey);
+
+ // Release the lock before proceeding. The unhandled exception filters take number of locks that
+ // have ordering violations with this lock.
+ unresolvedClassLockHolder.Release();
+ }
+ EX_END_HOOK;
+
+ // Unlink this class from the unresolved class list.
+ unresolvedClassLockHolder.Acquire();
+ m_pUnresolvedClassHash->DeleteValue(pTypeKey);
+
+ if (currentLevel < targetLevel)
+ goto retry;
+
+ RETURN typeHnd;
+} // ClassLoader::LoadTypeHandleForTypeKey_Body
+
+#endif //!DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+//
+//static
+TypeHandle
+ClassLoader::LoadArrayTypeThrowing(
+ TypeHandle elemType,
+ CorElementType arrayKind,
+ unsigned rank, //=0
+ LoadTypesFlag fLoadTypes, //=LoadTypes
+ ClassLoadLevel level)
+{
+ CONTRACT(TypeHandle)
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ if (FORBIDGC_LOADER_USE_ENABLED() || fLoadTypes != LoadTypes) { LOADS_TYPE(CLASS_LOAD_BEGIN); } else { LOADS_TYPE(level); }
+ if (fLoadTypes == DontLoadTypes) SO_TOLERANT; else SO_INTOLERANT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ POSTCONDITION(CheckPointer(RETVAL, ((fLoadTypes == LoadTypes) ? NULL_NOT_OK : NULL_OK)));
+ }
+ CONTRACT_END
+
+ CorElementType predefinedElementType = ELEMENT_TYPE_END;
+
+ // Try finding it in our cache of primitive SD arrays
+ if (arrayKind == ELEMENT_TYPE_SZARRAY) {
+ predefinedElementType = elemType.GetSignatureCorElementType();
+ if (predefinedElementType <= ELEMENT_TYPE_R8) {
+ ArrayTypeDesc* typeDesc = g_pPredefinedArrayTypes[predefinedElementType];
+ if (typeDesc != 0)
+ RETURN(TypeHandle(typeDesc));
+ }
+ // This call to AsPtr is somewhat bogus and only used
+ // as an optimization. If the TypeHandle is really a TypeDesc
+ // then the equality checks for the optimizations below will
+ // fail. Thus ArrayMT should not be used elsewhere in this function
+ else if (elemType.AsPtr() == PTR_VOID(g_pObjectClass)) {
+ // Code duplicated because Object[]'s SigCorElementType is E_T_CLASS, not OBJECT
+ ArrayTypeDesc* typeDesc = g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT];
+ if (typeDesc != 0)
+ RETURN(TypeHandle(typeDesc));
+ predefinedElementType = ELEMENT_TYPE_OBJECT;
+ }
+ else if (elemType.AsPtr() == PTR_VOID(g_pStringClass)) {
+ // Code duplicated because String[]'s SigCorElementType is E_T_CLASS, not STRING
+ ArrayTypeDesc* typeDesc = g_pPredefinedArrayTypes[ELEMENT_TYPE_STRING];
+ if (typeDesc != 0)
+ RETURN(TypeHandle(typeDesc));
+ predefinedElementType = ELEMENT_TYPE_STRING;
+ }
+ else {
+ predefinedElementType = ELEMENT_TYPE_END;
+ }
+ rank = 1;
+ }
+
+#ifndef DACCESS_COMPILE
+ // To avoid loading useless shared instantiations, normalize shared instantiations to the canonical form
+ // (e.g. List<_Canon>[] -> _Canon[])
+ // The denormalized shared instantiations should be needed only during JITing, so it is fine to skip this
+ // for DACCESS_COMPILE.
+ if (elemType.IsCanonicalSubtype())
+ {
+ elemType = ClassLoader::CanonicalizeGenericArg(elemType);
+ }
+#endif
+
+ TypeKey key(arrayKind, elemType, FALSE, rank);
+ TypeHandle th = LoadConstructedTypeThrowing(&key, fLoadTypes, level);
+
+ if (predefinedElementType != ELEMENT_TYPE_END && !th.IsNull() && th.IsFullyLoaded())
+ {
+ g_pPredefinedArrayTypes[predefinedElementType] = th.AsArray();
+ }
+
+ RETURN(th);
+} // ClassLoader::LoadArrayTypeThrowing
+
+#ifndef DACCESS_COMPILE
+
+VOID ClassLoader::AddAvailableClassDontHaveLock(Module *pModule,
+ mdTypeDef classdef,
+ AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+#ifdef FEATURE_COMINTEROP
+ _ASSERTE(!pModule->GetAssembly()->IsWinMD()); // WinMD files should never get into this path, otherwise provide szWinRtNamespacePrefix
+#endif
+
+ CrstHolder ch(&m_AvailableClassLock);
+ AddAvailableClassHaveLock(
+ pModule,
+ classdef,
+ pamTracker,
+ NULL, // szWinRtNamespacePrefix
+ 0); // cchWinRtNamespacePrefix
+}
+
+// This routine must be single threaded! The reason is that there are situations which allow
+// the same class name to have two different mdTypeDef tokens (for example, we load two different DLLs
+// simultaneously, and they have some common class files, or we convert the same class file
+// simultaneously on two threads). The problem is that we do not want to overwrite the old
+// <classname> -> pModule mapping with the new one, because this may cause identity problems.
+//
+// This routine assumes you already have the lock. Use AddAvailableClassDontHaveLock() if you
+// don't have it.
+//
+// Also validates that TypeDef namespace begins with szWinRTNamespacePrefix (if it is not NULL).
+// The prefix should be NULL for normal non-WinRT .NET assemblies.
+//
+VOID ClassLoader::AddAvailableClassHaveLock(
+ Module * pModule,
+ mdTypeDef classdef,
+ AllocMemTracker * pamTracker,
+ LPCSTR szWinRtNamespacePrefix,
+ DWORD cchWinRtNamespacePrefix) // Optimization for faster prefix comparison implementation
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ EEClassHashTable *pClassHash = pModule->GetAvailableClassHash();
+ EEClassHashTable *pClassCaseInsHash = pModule->GetAvailableClassCaseInsHash();
+
+ LPCUTF8 pszName;
+ LPCUTF8 pszNameSpace;
+ HashDatum ThrowawayData;
+ IMDInternalImport *pMDImport = pModule->GetMDImport();
+ if (FAILED(pMDImport->GetNameOfTypeDef(classdef, &pszName, &pszNameSpace)))
+ {
+ pszName = pszNameSpace = "Invalid TypeDef token";
+ pModule->GetAssembly()->ThrowBadImageException(pszNameSpace, pszName, BFA_INVALID_TOKEN);
+ }
+
+ EEClassHashEntry_t *pBucket;
+ mdTypeDef enclosing;
+ if (SUCCEEDED(pMDImport->GetNestedClassProps(classdef, &enclosing))) {
+ // nested type
+
+ LPCUTF8 pszEnclosingName;
+ LPCUTF8 pszEnclosingNameSpace;
+ mdTypeDef enclEnclosing;
+
+ // Find this type's encloser's entry in the available table.
+ // We'll save a pointer to it in the new hash entry for this type.
+ BOOL fNestedEncl = SUCCEEDED(pMDImport->GetNestedClassProps(enclosing, &enclEnclosing));
+
+ EEClassHashTable::LookupContext sContext;
+ if (FAILED(pMDImport->GetNameOfTypeDef(enclosing, &pszEnclosingName, &pszEnclosingNameSpace)))
+ {
+ pszName = pszNameSpace = "Invalid TypeDef token";
+ pModule->GetAssembly()->ThrowBadImageException(pszNameSpace, pszName, BFA_INVALID_TOKEN);
+ }
+ if ((pBucket = pClassHash->GetValue(pszEnclosingNameSpace,
+ pszEnclosingName,
+ &ThrowawayData,
+ fNestedEncl,
+ &sContext)) != NULL) {
+ if (fNestedEncl) {
+ // Find entry for enclosing class - NOTE, this assumes that the
+ // enclosing class's TypeDef or ExportedType was inserted previously,
+ // which assumes that, when enuming TD's, we get the enclosing class first
+ while ((!CompareNestedEntryWithTypeDef(pMDImport,
+ enclEnclosing,
+ pClassHash,
+ pBucket->GetEncloser())) &&
+ (pBucket = pClassHash->FindNextNestedClass(pszEnclosingNameSpace,
+ pszEnclosingName,
+ &ThrowawayData,
+ &sContext)) != NULL);
+ }
+
+ if (!pBucket) // Enclosing type not found in hash table
+ pModule->GetAssembly()->ThrowBadImageException(pszNameSpace, pszName, BFA_ENCLOSING_TYPE_NOT_FOUND);
+
+ // In this hash table, if the lower bit is set, it means a Module, otherwise it means EEClass*
+ ThrowawayData = EEClassHashTable::CompressClassDef(classdef);
+ InsertValue(pClassHash, pClassCaseInsHash, pszNameSpace, pszName, ThrowawayData, pBucket, pamTracker);
+ }
+ }
+ else {
+ // Don't add duplicate top-level classes. Top-level classes are
+ // added to the beginning of the bucket, while nested classes are
+ // added to the end. So, a duplicate top-level class could hide
+ // the previous type's EEClass* entry in the hash table.
+ EEClassHashEntry_t *pCaseInsEntry = NULL;
+ LPUTF8 pszLowerCaseNS = NULL;
+ LPUTF8 pszLowerCaseName = NULL;
+
+ if (pClassCaseInsHash) {
+ CreateCanonicallyCasedKey(pszNameSpace, pszName, &pszLowerCaseNS, &pszLowerCaseName);
+ pCaseInsEntry = pClassCaseInsHash->AllocNewEntry(pamTracker);
+ }
+
+ EEClassHashEntry_t *pEntry = pClassHash->FindItem(pszNameSpace, pszName, FALSE, NULL);
+ if (pEntry) {
+ HashDatum Data = pEntry->GetData();
+
+ if (((size_t)Data & EECLASSHASH_TYPEHANDLE_DISCR) &&
+ ((size_t)Data & EECLASSHASH_MDEXPORT_DISCR)) {
+
+ // it's an ExportedType - check the 'already seen' bit and if on, report a class loading exception
+ // otherwise, set it
+ if ((size_t)Data & EECLASSHASH_ALREADYSEEN)
+ pModule->GetAssembly()->ThrowBadImageException(pszNameSpace, pszName, BFA_MULT_TYPE_SAME_NAME);
+ else {
+ Data = (HashDatum)((size_t)Data | EECLASSHASH_ALREADYSEEN);
+ pEntry->SetData(Data);
+ }
+ }
+ else {
+ // We want to throw an exception for a duplicate typedef.
+ // However, this used to be allowed in 1.0/1.1, and some third-party DLLs have
+ // been obfuscated so that they have duplicate private typedefs.
+ // We must allow this for old assemblies for app compat reasons
+#ifdef FEATURE_CORECLR
+#ifdef FEATURE_LEGACYNETCF
+ if (!RuntimeIsLegacyNetCF(0))
+#endif
+ {
+ pModule->GetAssembly()->ThrowBadImageException(pszNameSpace, pszName, BFA_MULT_TYPE_SAME_NAME);
+ }
+#else
+ LPCSTR pszVersion = NULL;
+ if (FAILED(pModule->GetMDImport()->GetVersionString(&pszVersion)))
+ {
+ pModule->GetAssembly()->ThrowBadImageException(pszNameSpace, pszName, BFA_MULT_TYPE_SAME_NAME);
+ }
+
+ SString ssVersion(SString::Utf8, pszVersion);
+ SString ssV1(SString::Literal, "v1.");
+
+ AdjustImageRuntimeVersion(&ssVersion);
+
+ // If not "v1.*", throw an exception
+ if (!ssVersion.BeginsWith(ssV1))
+ pModule->GetAssembly()->ThrowBadImageException(pszNameSpace, pszName, BFA_MULT_TYPE_SAME_NAME);
+#endif
+ }
+ }
+ else {
+ pEntry = pClassHash->AllocNewEntry(pamTracker);
+
+ CANNOTTHROWCOMPLUSEXCEPTION();
+ FAULT_FORBID();
+
+ pClassHash->InsertValueUsingPreallocatedEntry(pEntry, pszNameSpace, pszName, EEClassHashTable::CompressClassDef(classdef), NULL);
+
+ if (pClassCaseInsHash)
+ pClassCaseInsHash->InsertValueUsingPreallocatedEntry(pCaseInsEntry, pszLowerCaseNS, pszLowerCaseName, pEntry, pEntry->GetEncloser());
+ }
+
+#ifdef FEATURE_COMINTEROP
+ // Check WinRT namespace prefix if required
+ if (szWinRtNamespacePrefix != NULL)
+ {
+ DWORD dwAttr;
+ if (FAILED(pMDImport->GetTypeDefProps(classdef, &dwAttr, NULL)))
+ {
+ pModule->GetAssembly()->ThrowBadImageException(pszNameSpace, pszName, BFA_INVALID_TOKEN);
+ }
+
+ // Check only public WinRT types that are not nested (i.e. only types available for binding, excluding NoPIA)
+ if (IsTdPublic(dwAttr) && IsTdWindowsRuntime(dwAttr))
+ {
+ // Guaranteed by the caller - code:ClassLoader::PopulateAvailableClassHashTable
+ _ASSERTE(cchWinRtNamespacePrefix == strlen(szWinRtNamespacePrefix));
+
+ // Now make sure namespace is, or begins with the namespace-prefix (note: 'MyN' should not match namespace 'MyName')
+ // Note: Case insensitive comparison function has to be in sync with Win8 implementation
+ // (ExtractExactCaseNamespaceSegmentFromMetadataFile in com\WinRT\WinTypes\TypeResolution\NamespaceResolution.cpp)
+ BOOL fIsNamespaceSubstring = (pszNameSpace != NULL) &&
+ ((strncmp(pszNameSpace, szWinRtNamespacePrefix, cchWinRtNamespacePrefix) == 0) ||
+ (_strnicmp(pszNameSpace, szWinRtNamespacePrefix, cchWinRtNamespacePrefix) == 0));
+ BOOL fIsSubNamespace = fIsNamespaceSubstring &&
+ ((pszNameSpace[cchWinRtNamespacePrefix] == '\0') ||
+ (pszNameSpace[cchWinRtNamespacePrefix] == '.'));
+ if (!fIsSubNamespace)
+ {
+ pModule->GetAssembly()->ThrowBadImageException(pszNameSpace, pszName, BFA_WINRT_INVALID_NAMESPACE_FOR_TYPE);
+ }
+ }
+ }
+#endif // FEATURE_COMINTEROP
+ }
+}
+
+VOID ClassLoader::AddExportedTypeDontHaveLock(Module *pManifestModule,
+ mdExportedType cl,
+ AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ CrstHolder ch(&m_AvailableClassLock);
+ AddExportedTypeHaveLock(
+ pManifestModule,
+ cl,
+ pamTracker);
+}
+
+VOID ClassLoader::AddExportedTypeHaveLock(Module *pManifestModule,
+ mdExportedType cl,
+ AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+
+ mdToken mdImpl;
+ LPCSTR pszName;
+ LPCSTR pszNameSpace;
+ IMDInternalImport* pAsmImport = pManifestModule->GetMDImport();
+ if (FAILED(pAsmImport->GetExportedTypeProps(
+ cl,
+ &pszNameSpace,
+ &pszName,
+ &mdImpl,
+ NULL, // type def
+ NULL))) // flags
+ {
+ pManifestModule->GetAssembly()->ThrowBadImageException(pszNameSpace, pszName, BFA_INVALID_TOKEN);
+ }
+
+ HashDatum ThrowawayData;
+
+ if (TypeFromToken(mdImpl) == mdtExportedType)
+ {
+ // nested class
+ LPCUTF8 pszEnclosingNameSpace;
+ LPCUTF8 pszEnclosingName;
+ mdToken nextImpl;
+ if (FAILED(pAsmImport->GetExportedTypeProps(
+ mdImpl,
+ &pszEnclosingNameSpace,
+ &pszEnclosingName,
+ &nextImpl,
+ NULL, // type def
+ NULL))) // flags
+ {
+ pManifestModule->GetAssembly()->ThrowBadImageException(pszNameSpace, pszName, BFA_INVALID_TOKEN);
+ }
+
+ // Find entry for enclosing class - NOTE, this assumes that the
+ // enclosing class's ExportedType was inserted previously, which assumes that,
+ // when enuming ExportedTypes, we get the enclosing class first
+ EEClassHashEntry_t *pBucket;
+ EEClassHashTable::LookupContext sContext;
+ if ((pBucket = pManifestModule->GetAvailableClassHash()->GetValue(pszEnclosingNameSpace,
+ pszEnclosingName,
+ &ThrowawayData,
+ TypeFromToken(nextImpl) == mdtExportedType,
+ &sContext)) != NULL) {
+ do {
+ // check to see if this is the correct class
+ if (EEClassHashTable::UncompressModuleAndClassDef(ThrowawayData) == mdImpl) {
+ ThrowawayData = EEClassHashTable::CompressClassDef(cl);
+
+ // we explicitly don't check for the case insensitive hash table because we know it can't have been created yet
+ pManifestModule->GetAvailableClassHash()->InsertValue(pszNameSpace, pszName, ThrowawayData, pBucket, pamTracker);
+ }
+ pBucket = pManifestModule->GetAvailableClassHash()->FindNextNestedClass(pszEnclosingNameSpace, pszEnclosingName, &ThrowawayData, &sContext);
+ } while (pBucket);
+ }
+
+ // If the encloser is not in the hash table, this nested class
+ // was defined in the manifest module, so it doesn't need to be added
+ return;
+ }
+ else {
+ // Defined in the manifest module - add to the hash table by TypeDef instead
+ if (mdImpl == mdFileNil)
+ return;
+
+ // Don't add duplicate top-level classes
+ // In this hash table, if the lower bit is set, it means a Module, otherwise it means EEClass*
+ ThrowawayData = EEClassHashTable::CompressClassDef(cl);
+ // ThrowawayData is an IN OUT param. Going in its the pointer to the new value if the entry needs
+ // to be inserted. The OUT param points to the value stored in the hash table.
+ BOOL bFound;
+ pManifestModule->GetAvailableClassHash()->InsertValueIfNotFound(pszNameSpace, pszName, &ThrowawayData, NULL, FALSE, &bFound, pamTracker);
+ if (bFound) {
+
+ // Check for duplicate ExportedTypes
+ // Let it slide if it's pointing to the same type
+ mdToken foundTypeImpl;
+ if ((size_t)ThrowawayData & EECLASSHASH_MDEXPORT_DISCR)
+ {
+ mdExportedType foundExportedType = EEClassHashTable::UncompressModuleAndClassDef(ThrowawayData);
+ if (FAILED(pAsmImport->GetExportedTypeProps(
+ foundExportedType,
+ NULL, // namespace
+ NULL, // name
+ &foundTypeImpl,
+ NULL, // TypeDef
+ NULL))) // flags
+ {
+ pManifestModule->GetAssembly()->ThrowBadImageException(pszNameSpace, pszName, BFA_INVALID_TOKEN);
+ }
+ }
+ else
+ {
+ foundTypeImpl = mdFileNil;
+ }
+
+ if (mdImpl != foundTypeImpl)
+ {
+ pManifestModule->GetAssembly()->ThrowBadImageException(pszNameSpace, pszName, BFA_MULT_TYPE_SAME_NAME);
+ }
+ }
+ }
+}
+
+static MethodTable* GetEnclosingMethodTable(MethodTable *pMT)
+{
+ CONTRACT(MethodTable*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ POSTCONDITION(RETVAL == NULL || RETVAL->IsTypicalTypeDefinition());
+ }
+ CONTRACT_END;
+
+ MethodTable *pmtEnclosing = NULL;
+
+ // In the common case, the method table will be either shared or in the AppDomain we're currently
+ // running in. If this is true, we can just access its enclosing method table directly.
+ //
+ // However, if the current method table is actually in another AppDomain (for instance, we're reflecting
+ // across AppDomains), then we cannot get its enclsoing type in our AppDomain since doing that may involve
+ // loading the enclosing type. Instead, we need to transition back to the original domain (which we
+ // should already be running in higher up on the stack) and get the method table we're looking for.
+
+ if (pMT->GetDomain()->IsSharedDomain() || pMT->GetDomain()->AsAppDomain() == GetAppDomain())
+ {
+ pmtEnclosing = pMT->LoadEnclosingMethodTable();
+ }
+ else
+ {
+ GCX_COOP();
+ ENTER_DOMAIN_PTR(pMT->GetDomain()->AsAppDomain(), ADV_RUNNINGIN);
+ pmtEnclosing = pMT->LoadEnclosingMethodTable();
+ END_DOMAIN_TRANSITION;
+ }
+
+ RETURN pmtEnclosing;
+}
+
+StaticAccessCheckContext::StaticAccessCheckContext(MethodDesc* pCallerMethod)
+{
+ CONTRACTL
+ {
+ LIMITED_METHOD_CONTRACT;
+ PRECONDITION(CheckPointer(pCallerMethod));
+ }
+ CONTRACTL_END;
+
+ m_pCallerMethod = pCallerMethod;
+ m_pCallerMT = m_pCallerMethod->GetMethodTable();
+ m_pCallerAssembly = m_pCallerMT->GetAssembly();
+}
+
+StaticAccessCheckContext::StaticAccessCheckContext(MethodDesc* pCallerMethod, MethodTable* pCallerType)
+{
+ CONTRACTL
+ {
+ LIMITED_METHOD_CONTRACT;
+ PRECONDITION(CheckPointer(pCallerMethod, NULL_OK));
+ PRECONDITION(CheckPointer(pCallerType));
+ }
+ CONTRACTL_END;
+
+ m_pCallerMethod = pCallerMethod;
+ m_pCallerMT = pCallerType;
+ m_pCallerAssembly = pCallerType->GetAssembly();
+}
+
+// Critical callers do not need the extra access checks
+bool StaticAccessCheckContext::IsCallerCritical()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_pCallerMethod == NULL || !Security::IsMethodTransparent(m_pCallerMethod))
+ {
+ return true;
+ }
+
+ return false;
+}
+
+
+#ifndef FEATURE_CORECLR
+
+//******************************************************************************
+// This function determines whether a Type is accessible from
+// outside of the assembly it lives in.
+
+static BOOL IsTypeVisibleOutsideAssembly(MethodTable* pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ DWORD dwProtection;
+ // check all types in nesting chain, while inner types are public
+ while (IsTdPublic(dwProtection = pMT->GetClass()->GetProtection()) ||
+ IsTdNestedPublic(dwProtection))
+ {
+ // if type is nested, check outer type, too
+ if (IsTdNested(dwProtection))
+ {
+ pMT = GetEnclosingMethodTable(pMT);
+ }
+ // otherwise, type is visible outside of the assembly
+ else
+ {
+ return TRUE;
+ }
+ }
+ return FALSE;
+} // static BOOL IsTypeVisibleOutsideAssembly(MethodTable* pMT)
+
+#endif //!FEATURE_CORECLR
+
+//******************************************************************************
+
+// static
+AccessCheckOptions* AccessCheckOptions::s_pNormalAccessChecks;
+
+//******************************************************************************
+
+void AccessCheckOptions::Startup()
+{
+ STANDARD_VM_CONTRACT;
+
+ s_pNormalAccessChecks = new AccessCheckOptions(
+ AccessCheckOptions::kNormalAccessibilityChecks,
+ NULL,
+ FALSE,
+ (MethodTable *)NULL);
+}
+
+//******************************************************************************
+AccessCheckOptions::AccessCheckOptions(
+ const AccessCheckOptions & templateOptions,
+ BOOL throwIfTargetIsInaccessible,
+ BOOL skipCheckForCriticalCode /*=FALSE*/) :
+ m_pAccessContext(templateOptions.m_pAccessContext)
+{
+ WRAPPER_NO_CONTRACT;
+
+ Initialize(
+ templateOptions.m_accessCheckType,
+ throwIfTargetIsInaccessible,
+ templateOptions.m_pTargetMT,
+ templateOptions.m_pTargetMethod,
+ templateOptions.m_pTargetField,
+ skipCheckForCriticalCode);
+}
+
+//******************************************************************************
+// This function should only be called when normal accessibility is not possible.
+// It returns TRUE if the target can be accessed.
+// Otherwise, it either returns FALSE or throws an exception, depending on the value of throwIfTargetIsInaccessible.
+
+BOOL AccessCheckOptions::DemandMemberAccess(AccessCheckContext *pContext, MethodTable * pTargetMT, BOOL visibilityCheck) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(m_accessCheckType != kNormalAccessibilityChecks);
+ PRECONDITION(CheckPointer(pContext));
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_accessCheckType != kNormalAccessibilityChecks);
+
+ if (NingenEnabled())
+ {
+ // NinGen should always perform normal accessibility checks
+ _ASSERTE(false);
+
+ if (m_fThrowIfTargetIsInaccessible)
+ {
+ ThrowAccessException(pContext, pTargetMT, NULL, FALSE);
+ }
+
+ return FALSE;
+ }
+
+ if (pTargetMT && pTargetMT->GetAssembly()->IsDisabledPrivateReflection())
+ {
+ if (m_fThrowIfTargetIsInaccessible)
+ {
+ ThrowAccessException(pContext, pTargetMT, NULL, FALSE);
+ }
+
+ return FALSE;
+ }
+
+ BOOL canAccessTarget = FALSE;
+
+#ifndef CROSSGEN_COMPILE
+#ifdef FEATURE_CORECLR
+
+ BOOL fAccessingFrameworkCode = FALSE;
+
+ // In CoreCLR kRestrictedMemberAccess means that one can access private/internal
+ // classes/members in app code.
+ if (m_accessCheckType != kMemberAccess && pTargetMT)
+ {
+ // m_accessCheckType must be kRestrictedMemberAccess if we are running in PT.
+ _ASSERTE(GetAppDomain()->GetSecurityDescriptor()->IsFullyTrusted() ||
+ m_accessCheckType == kRestrictedMemberAccess);
+
+ if (visibilityCheck)
+ {
+ // In CoreCLR RMA means visibility checks always succeed if the target is user code.
+ if ((m_accessCheckType == kRestrictedMemberAccess || m_accessCheckType == kRestrictedMemberAccessNoTransparency) &&
+ !Security::IsMicrosoftPlatform(pTargetMT->GetAssembly()->GetSecurityDescriptor()))
+ return TRUE;
+
+ // Accessing private types/members in platform code.
+ fAccessingFrameworkCode = TRUE;
+ }
+ else
+ {
+ // We allow all transparency checks to succeed in LCG methods and reflection invocation.
+ if (m_accessCheckType == kNormalAccessNoTransparency || m_accessCheckType == kRestrictedMemberAccessNoTransparency)
+ return TRUE;
+ }
+ }
+
+ // Always allow interop (NULL) callers full access.
+ if (pContext->IsCalledFromInterop())
+ return TRUE;
+
+ MethodDesc* pCallerMD = pContext->GetCallerMethod();
+
+ // Platform critical code is exempted from all accessibility rules, regardless of the AccessCheckType.
+ if (pCallerMD != NULL &&
+ !Security::IsMethodTransparent(pCallerMD)
+ && Security::IsMicrosoftPlatform(pCallerMD->GetAssembly()->GetSecurityDescriptor()))
+ {
+ return TRUE;
+ }
+
+ // No Access
+ if (m_fThrowIfTargetIsInaccessible)
+ {
+ ThrowAccessException(pContext, pTargetMT, NULL, fAccessingFrameworkCode);
+ }
+
+#else // FEATURE_CORECLR
+
+ GCX_COOP();
+
+ // Overriding the rules of visibility checks in Win8 immersive: no access is allowed to internal
+ // code in the framework even in full trust, unless the caller is also framework code.
+ if ( (m_accessCheckType == kUserCodeOnlyRestrictedMemberAccess ||
+ m_accessCheckType == kUserCodeOnlyRestrictedMemberAccessNoTransparency) &&
+ visibilityCheck )
+ {
+ IAssemblyName *pIAssemblyName = pTargetMT->GetAssembly()->GetFusionAssemblyName();
+
+ HRESULT hr = Fusion::Util::IsAnyFrameworkAssembly(pIAssemblyName);
+
+ // S_OK: pIAssemblyName is a framework assembly.
+ // S_FALSE: pIAssemblyName is not a framework assembly.
+ // Other values: pIAssemblyName is an invalid name.
+ if (hr == S_OK)
+ {
+ if (pContext->IsCalledFromInterop())
+ return TRUE;
+
+ // If the caller method is NULL and we are not called from interop
+ // this is not a normal method access check (e.g. a CA accessibility check)
+ // The access check should fail in this case.
+ hr = S_FALSE;
+
+ MethodDesc* pCallerMD = pContext->GetCallerMethod();
+ if (pCallerMD != NULL)
+ {
+ pIAssemblyName = pCallerMD->GetAssembly()->GetFusionAssemblyName();
+ hr = Fusion::Util::IsAnyFrameworkAssembly(pIAssemblyName);
+ }
+
+ // The caller is not framework code.
+ if (hr != S_OK)
+ {
+ if (m_fThrowIfTargetIsInaccessible)
+ ThrowAccessException(pContext, pTargetMT, NULL, TRUE);
+ else
+ return FALSE;
+ }
+ }
+ }
+
+ EX_TRY
+ {
+ if (m_accessCheckType == kMemberAccess)
+ {
+ Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, REFLECTION_MEMBER_ACCESS);
+ }
+ else
+ {
+ _ASSERTE(m_accessCheckType == kRestrictedMemberAccess ||
+ m_accessCheckType == kUserCodeOnlyRestrictedMemberAccess ||
+ (m_accessCheckType == kUserCodeOnlyRestrictedMemberAccessNoTransparency && visibilityCheck));
+
+ // JIT guarantees that pTargetMT has been fully loaded and ready to execute by this point, but reflection doesn't.
+ // So GetSecurityDescriptor could AV because the DomainAssembly cannot be found.
+ // For now we avoid this by calling EnsureActive aggressively. We might want to move this to the reflection code in the future:
+ // ReflectionInvocation::PerformVisibilityCheck, PerformSecurityCheckHelper, COMDelegate::BindToMethodName/Info, etc.
+ // We don't need to call EnsureInstanceActive because we will be doing access check on all the generic arguments any way so
+ // EnsureActive will be called on everyone of them if needed.
+ pTargetMT->EnsureActive();
+
+ IAssemblySecurityDescriptor * pTargetSecurityDescriptor = pTargetMT->GetModule()->GetSecurityDescriptor();
+ _ASSERTE(pTargetSecurityDescriptor != NULL);
+
+ if (m_pAccessContext != NULL)
+ {
+ // If we have a context, use it to do the demand
+ Security::ReflectionTargetDemand(REFLECTION_MEMBER_ACCESS,
+ pTargetSecurityDescriptor,
+ m_pAccessContext);
+ }
+ else
+ {
+ // Just do a normal Demand
+ Security::ReflectionTargetDemand(REFLECTION_MEMBER_ACCESS, pTargetSecurityDescriptor);
+ }
+ }
+
+ canAccessTarget = TRUE;
+ }
+ EX_CATCH
+ {
+ canAccessTarget = FALSE;
+
+ if (m_fThrowIfTargetIsInaccessible)
+ {
+ ThrowAccessException(pContext, pTargetMT, GET_EXCEPTION());
+ }
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+#endif // FEATURE_CORECLR
+#endif // CROSSGEN_COMPILE
+
+ return canAccessTarget;
+}
+
+//******************************************************************************
+// pFailureMT - the MethodTable that we were trying to access. It can be null
+// if the failure is not because of a specific type. This will be a
+// a component of the instantiation of m_pTargetMT/m_pTargetMethod/m_pTargetField.
+
+void AccessCheckOptions::ThrowAccessException(
+ AccessCheckContext* pContext,
+ MethodTable* pFailureMT, /* = NULL */
+ Exception* pInnerException, /* = NULL */
+ BOOL fAccessingFrameworkCode /* = FALSE */) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pContext));
+ PRECONDITION(CheckPointer(pInnerException, NULL_OK));
+ PRECONDITION(m_fThrowIfTargetIsInaccessible);
+ }
+ CONTRACTL_END;
+
+ GCX_COOP();
+
+ MethodDesc* pCallerMD = pContext->GetCallerMethod();
+
+ if (m_pTargetMT != NULL)
+ {
+ // If we know the specific type that caused the failure, display it.
+ // Else display the whole type that we are trying to access.
+ MethodTable * pMT = (pFailureMT != NULL) ? pFailureMT : m_pTargetMT;
+ ThrowTypeAccessException(pContext, pMT, 0, pInnerException, fAccessingFrameworkCode);
+ }
+ else if (m_pTargetMethod != NULL)
+ {
+ // If the caller and target method are non-null and the same, then this means that we're checking to see
+ // if the method has access to itself in order to validate that it has access to its parameter types,
+ // containing type, and return type. In this case, throw a more informative TypeAccessException to
+ // describe the error that occured (for instance, "this method doesn't have access to one of its
+ // parameter types", rather than "this method doesn't have access to itself").
+ // We only want to do this if we know the exact type that caused the problem, otherwise fall back to
+ // throwing the standard MethodAccessException.
+ if (pCallerMD != NULL && m_pTargetMethod == pCallerMD && pFailureMT != NULL)
+ {
+ ThrowTypeAccessException(pContext, pFailureMT, 0, pInnerException, fAccessingFrameworkCode);
+ }
+ else
+ {
+ ThrowMethodAccessException(pContext, m_pTargetMethod, 0, pInnerException, fAccessingFrameworkCode);
+ }
+ }
+ else
+ {
+ _ASSERTE(m_pTargetField != NULL);
+ ThrowFieldAccessException(pContext, m_pTargetField, 0, pInnerException, fAccessingFrameworkCode);
+ }
+}
+
+//******************************************************************************
+// This will do a security demand if appropriate.
+// If access is not possible, this will either throw an exception or return FALSE
+BOOL AccessCheckOptions::DemandMemberAccessOrFail(AccessCheckContext *pContext, MethodTable * pTargetMT, BOOL visibilityCheck) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // m_fSkipCheckForCriticalCode is only ever set to true for CanAccessMemberForExtraChecks.
+ // For legacy compat we allow the access check to succeed for all AccessCheckType if the caller is critical.
+ if (m_fSkipCheckForCriticalCode)
+ {
+ if (pContext->IsCalledFromInterop() ||
+ !Security::IsMethodTransparent(pContext->GetCallerMethod()))
+ return TRUE;
+ }
+
+ if (DoNormalAccessibilityChecks())
+ {
+ if (pContext->GetCallerAssembly()->IgnoresAccessChecksTo(pTargetMT->GetAssembly()))
+ {
+ return TRUE;
+ }
+
+#if defined(FEATURE_CORECLR) && defined(CROSSGEN_COMPILE)
+ CONSISTENCY_CHECK_MSGF(!pContext->GetCallerAssembly()->GetManifestFile()->IsProfileAssembly(),
+ ("Accessibility check failed while compiling platform assembly. Are you missing FriendAccessAllowed attribute? Caller: %s %s %s Target: %s",
+ pContext->GetCallerAssembly() ? pContext->GetCallerAssembly()->GetSimpleName() : "",
+ pContext->GetCallerMT() ? pContext->GetCallerMT()->GetDebugClassName() : "",
+ pContext->GetCallerMethod() ? pContext->GetCallerMethod()->GetName() : "",
+ pTargetMT ? pTargetMT->GetDebugClassName() : ""));
+#endif
+
+ if (m_fThrowIfTargetIsInaccessible)
+ {
+ ThrowAccessException(pContext, pTargetMT);
+ }
+
+ return FALSE;
+ }
+
+ return DemandMemberAccess(pContext, pTargetMT, visibilityCheck);
+}
+
+//******************************************************************************
+// This should be called if access to the target is not possible.
+// This will either throw an exception or return FALSE.
+BOOL AccessCheckOptions::FailOrThrow(AccessCheckContext *pContext) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pContext));
+ }
+ CONTRACTL_END;
+
+ // m_fSkipCheckForCriticalCode is only ever set to true for CanAccessMemberForExtraChecks.
+ // For legacy compat we allow the access check to succeed for all AccessCheckType if the caller is critical.
+ if (m_fSkipCheckForCriticalCode)
+ {
+ if (pContext->IsCalledFromInterop() ||
+ !Security::IsMethodTransparent(pContext->GetCallerMethod()))
+ return TRUE;
+ }
+
+ if (m_fThrowIfTargetIsInaccessible)
+ {
+ ThrowAccessException(pContext);
+ }
+
+ return FALSE;
+}
+
+// Generate access exception context strings that are due to potential security misconfiguration
+void GetAccessExceptionAdditionalContextForSecurity(Assembly *pAccessingAssembly,
+ Assembly *pTargetAssembly,
+ BOOL isTransparencyError,
+ BOOL fAccessingFrameworkCode,
+ StringArrayList *pContextInformation)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pAccessingAssembly));
+ PRECONDITION(CheckPointer(pTargetAssembly));
+ PRECONDITION(CheckPointer(pContextInformation));
+ }
+ CONTRACTL_END;
+
+ if (fAccessingFrameworkCode)
+ {
+ SString accessingFrameworkCodeError;
+ EEException::GetResourceMessage(IDS_E_ACCESSING_PRIVATE_FRAMEWORK_CODE, accessingFrameworkCodeError);
+
+ pContextInformation->Append(accessingFrameworkCodeError);
+ }
+
+#ifndef FEATURE_CORECLR
+ if (isTransparencyError)
+ {
+ ModuleSecurityDescriptor *pMSD = ModuleSecurityDescriptor::GetModuleSecurityDescriptor(pAccessingAssembly);
+
+ // If the accessing assembly is APTCA and using level 2 transparency, then transparency errors may be
+ // because APTCA newly opts assemblies into being all transparent.
+ if (pMSD->IsMixedTransparency() && !pAccessingAssembly->GetSecurityTransparencyBehavior()->DoesUnsignedImplyAPTCA())
+ {
+ SString callerDisplayName;
+ pAccessingAssembly->GetDisplayName(callerDisplayName);
+
+ SString level2AptcaTransparencyError;
+ EEException::GetResourceMessage(IDS_ACCESS_EXCEPTION_CONTEXT_LEVEL2_APTCA, level2AptcaTransparencyError, callerDisplayName);
+
+ pContextInformation->Append(level2AptcaTransparencyError);
+ }
+
+ // If the assessing assembly is fully transparent and it is partially trusted, then transparency
+ // errors may be because the CLR forced the assembly to be transparent due to its trust level.
+ if (pMSD->IsAllTransparentDueToPartialTrust())
+ {
+ _ASSERTE(pMSD->IsAllTransparent());
+ SString callerDisplayName;
+ pAccessingAssembly->GetDisplayName(callerDisplayName);
+
+ SString partialTrustTransparencyError;
+ EEException::GetResourceMessage(IDS_ACCESS_EXCEPTION_CONTEXT_PT_TRANSPARENT, partialTrustTransparencyError, callerDisplayName);
+
+ pContextInformation->Append(partialTrustTransparencyError);
+ }
+ }
+#endif // FEATURE_CORECLR
+
+#if defined(FEATURE_APTCA) && !defined(CROSSGEN_COMPILE)
+ // If the target assembly is conditionally APTCA, then it may needed to have been enabled in the domain
+ SString conditionalAptcaContext = Security::GetConditionalAptcaAccessExceptionContext(pTargetAssembly);
+ if (!conditionalAptcaContext.IsEmpty())
+ {
+ pContextInformation->Append(conditionalAptcaContext);
+ }
+
+ // If the target assembly is APTCA killbitted, then indicate that as well
+ SString aptcaKillBitContext = Security::GetAptcaKillBitAccessExceptionContext(pTargetAssembly);
+ if (!aptcaKillBitContext.IsEmpty())
+ {
+ pContextInformation->Append(aptcaKillBitContext);
+ }
+#endif // FEATURE_APTCA && !CROSSGEN_COMPILE
+}
+
+// Generate additional context about the root cause of an access exception which may help in debugging it (for
+// instance v4 APTCA implying transparnecy, or conditional APTCA not being enabled). If no additional
+// context is available, then this returns SString.Empty.
+SString GetAdditionalAccessExceptionContext(Assembly *pAccessingAssembly,
+ Assembly *pTargetAssembly,
+ BOOL isTransparencyError,
+ BOOL fAccessingFrameworkCode)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pAccessingAssembly));
+ PRECONDITION(CheckPointer(pTargetAssembly));
+ }
+ CONTRACTL_END;
+
+ StringArrayList contextComponents;
+
+ // See if the exception may have been caused by security
+ GetAccessExceptionAdditionalContextForSecurity(pAccessingAssembly,
+ pTargetAssembly,
+ isTransparencyError,
+ fAccessingFrameworkCode,
+ &contextComponents);
+
+ // Append each component of additional context we found into the additional context string in its own
+ // paragraph.
+ SString additionalContext;
+ for (DWORD i = 0; i < contextComponents.GetCount(); ++i)
+ {
+ SString contextComponent = contextComponents.Get(i);
+ if (!contextComponent.IsEmpty())
+ {
+ additionalContext.Append(W("\n\n"));
+ additionalContext.Append(contextComponent);
+ }
+ }
+
+ return additionalContext;
+}
+
+void DECLSPEC_NORETURN ThrowFieldAccessException(AccessCheckContext* pContext,
+ FieldDesc *pFD,
+ UINT messageID /* = 0 */,
+ Exception *pInnerException /* = NULL */,
+ BOOL fAccessingFrameworkCode /* = FALSE */)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pContext));
+ PRECONDITION(CheckPointer(pFD));
+ }
+ CONTRACTL_END;
+
+ BOOL isTransparencyError = FALSE;
+
+ MethodDesc* pCallerMD = pContext->GetCallerMethod();
+ if (pCallerMD != NULL)
+ isTransparencyError = !Security::CheckCriticalAccess(pContext, NULL, pFD, NULL);
+
+ ThrowFieldAccessException(pCallerMD,
+ pFD,
+ isTransparencyError,
+ messageID,
+ pInnerException,
+ fAccessingFrameworkCode);
+}
+
+void DECLSPEC_NORETURN ThrowFieldAccessException(MethodDesc* pCallerMD,
+ FieldDesc *pFD,
+ BOOL isTransparencyError,
+ UINT messageID /* = 0 */,
+ Exception *pInnerException /* = NULL */,
+ BOOL fAccessingFrameworkCode /* = FALSE */)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCallerMD, NULL_OK));
+ PRECONDITION(CheckPointer(pFD));
+ }
+ CONTRACTL_END;
+
+ if (pCallerMD != NULL)
+ {
+ if (messageID == 0)
+ {
+ // Figure out if we can give a specific reason why this field access was rejected - for instance, if
+ // we see that the caller is transparent and accessing a critical field, then we can put that
+ // information into the exception message.
+ if (isTransparencyError)
+ {
+ messageID = IDS_E_CRITICAL_FIELD_ACCESS_DENIED;
+ }
+ else
+ {
+ messageID = IDS_E_FIELDACCESS;
+ }
+ }
+
+ SString strAdditionalContext = GetAdditionalAccessExceptionContext(pCallerMD->GetAssembly(),
+ pFD->GetApproxEnclosingMethodTable()->GetAssembly(),
+ isTransparencyError,
+ fAccessingFrameworkCode);
+
+ EX_THROW_WITH_INNER(EEFieldException, (pFD, pCallerMD, strAdditionalContext, messageID), pInnerException);
+ }
+ else
+ {
+ EX_THROW_WITH_INNER(EEFieldException, (pFD), pInnerException);
+ }
+}
+
+void DECLSPEC_NORETURN ThrowMethodAccessException(AccessCheckContext* pContext,
+ MethodDesc *pCalleeMD,
+ UINT messageID /* = 0 */,
+ Exception *pInnerException /* = NULL */,
+ BOOL fAccessingFrameworkCode /* = FALSE */)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pContext));
+ PRECONDITION(CheckPointer(pCalleeMD));
+ }
+ CONTRACTL_END;
+
+ BOOL isTransparencyError = FALSE;
+
+ MethodDesc* pCallerMD = pContext->GetCallerMethod();
+ if (pCallerMD != NULL)
+ isTransparencyError = !Security::CheckCriticalAccess(pContext, pCalleeMD, NULL, NULL);
+
+ ThrowMethodAccessException(pCallerMD,
+ pCalleeMD,
+ isTransparencyError,
+ messageID,
+ pInnerException,
+ fAccessingFrameworkCode);
+}
+
+void DECLSPEC_NORETURN ThrowMethodAccessException(MethodDesc* pCallerMD,
+ MethodDesc *pCalleeMD,
+ BOOL isTransparencyError,
+ UINT messageID /* = 0 */,
+ Exception *pInnerException /* = NULL */,
+ BOOL fAccessingFrameworkCode /* = FALSE */)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCallerMD, NULL_OK));
+ PRECONDITION(CheckPointer(pCalleeMD));
+ }
+ CONTRACTL_END;
+
+ if (pCallerMD != NULL)
+ {
+ if (messageID == 0)
+ {
+ // Figure out if we can give a specific reason why this method access was rejected - for instance, if
+ // we see that the caller is transparent and the callee is critical, then we can put that
+ // information into the exception message.
+ if (isTransparencyError)
+ {
+ messageID = IDS_E_CRITICAL_METHOD_ACCESS_DENIED;
+ }
+ else
+ {
+ messageID = IDS_E_METHODACCESS;
+ }
+ }
+
+ SString strAdditionalContext = GetAdditionalAccessExceptionContext(pCallerMD->GetAssembly(),
+ pCalleeMD->GetAssembly(),
+ isTransparencyError,
+ fAccessingFrameworkCode);
+
+ EX_THROW_WITH_INNER(EEMethodException, (pCalleeMD, pCallerMD, strAdditionalContext, messageID), pInnerException);
+ }
+ else
+ {
+ EX_THROW_WITH_INNER(EEMethodException, (pCalleeMD), pInnerException);
+ }
+}
+
+void DECLSPEC_NORETURN ThrowTypeAccessException(AccessCheckContext* pContext,
+ MethodTable *pMT,
+ UINT messageID /* = 0 */,
+ Exception *pInnerException /* = NULL */,
+ BOOL fAccessingFrameworkCode /* = FALSE */)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pContext));
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ BOOL isTransparencyError = FALSE;
+
+ MethodDesc* pCallerMD = pContext->GetCallerMethod();
+ if (pCallerMD != NULL)
+ isTransparencyError = !Security::CheckCriticalAccess(pContext, NULL, NULL, pMT);
+
+ ThrowTypeAccessException(pCallerMD,
+ pMT,
+ isTransparencyError,
+ messageID,
+ pInnerException,
+ fAccessingFrameworkCode);
+}
+
+void DECLSPEC_NORETURN ThrowTypeAccessException(MethodDesc* pCallerMD,
+ MethodTable *pMT,
+ BOOL isTransparencyError,
+ UINT messageID /* = 0 */,
+ Exception *pInnerException /* = NULL */,
+ BOOL fAccessingFrameworkCode /* = FALSE */)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCallerMD, NULL_OK));
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ if (pCallerMD != NULL)
+ {
+ if (messageID == 0)
+ {
+ // Figure out if we can give a specific reason why this type access was rejected - for instance, if
+ // we see that the caller is transparent and is accessing a critical type, then we can put that
+ // information into the exception message.
+ if (isTransparencyError)
+ {
+ messageID = IDS_E_CRITICAL_TYPE_ACCESS_DENIED;
+ }
+ else
+ {
+ messageID = IDS_E_TYPEACCESS;
+ }
+ }
+
+ SString strAdditionalContext = GetAdditionalAccessExceptionContext(pCallerMD->GetAssembly(),
+ pMT->GetAssembly(),
+ isTransparencyError,
+ fAccessingFrameworkCode);
+
+ EX_THROW_WITH_INNER(EETypeAccessException, (pMT, pCallerMD, strAdditionalContext, messageID), pInnerException);
+ }
+ else
+ {
+ EX_THROW_WITH_INNER(EETypeAccessException, (pMT), pInnerException);
+ }
+}
+
+//******************************************************************************
+// This function determines whether a method [if transparent]
+// can access a specified target (e.g. Type, Method, Field)
+static BOOL CheckTransparentAccessToCriticalCode(
+ AccessCheckContext* pContext,
+ DWORD dwMemberAccess,
+ MethodTable* pTargetMT,
+ MethodDesc* pOptionalTargetMethod,
+ FieldDesc* pOptionalTargetField,
+ MethodTable* pOptionalTargetType,
+ const AccessCheckOptions & accessCheckOptions)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pContext));
+ PRECONDITION(accessCheckOptions.TransparencyCheckNeeded());
+ }
+ CONTRACTL_END;
+
+ // At most one of these should be non-NULL
+ _ASSERTE(1 >= ((pOptionalTargetMethod ? 1 : 0) +
+ (pOptionalTargetField ? 1 : 0) +
+ (pOptionalTargetType ? 1 : 0)));
+
+#ifndef FEATURE_CORECLR
+ if (pTargetMT->GetAssembly()->GetSecurityTransparencyBehavior()->DoesPublicImplyTreatAsSafe())
+ {
+ // @ telesto: public => TAS in non-coreclr only. The intent is to remove this ifdef and remove
+ // public => TAS in all flavors/branches.
+ // check if the Target member accessible outside the assembly
+ if (IsMdPublic(dwMemberAccess) && IsTypeVisibleOutsideAssembly(pTargetMT))
+ {
+ return TRUE;
+ }
+ }
+#endif // !FEATURE_CORECLR
+
+ // if the caller [Method] is transparent, do special security checks
+ // check if security disallows access to target member
+ if (!Security::CheckCriticalAccess(
+ pContext,
+ pOptionalTargetMethod,
+ pOptionalTargetField,
+ pOptionalTargetType))
+ {
+#ifdef _DEBUG
+ if (g_pConfig->LogTransparencyErrors())
+ {
+ SecurityTransparent::LogTransparencyError(pContext->GetCallerMethod(), "Transparent code accessing a critical type, method, or field", pOptionalTargetMethod);
+ }
+ if (!g_pConfig->DisableTransparencyEnforcement())
+#endif // _DEBUG
+ {
+ return accessCheckOptions.DemandMemberAccessOrFail(pContext, pTargetMT, FALSE /*visibilityCheck*/);
+ }
+ }
+
+ return TRUE;
+} // static BOOL CheckTransparentAccessToCriticalCode
+
+//---------------------------------------------------------------------------------------
+//
+// Checks to see if access to a member with assembly visiblity is allowed.
+//
+// Arguments:
+// pAccessingAssembly - The assembly requesting access to the internal member
+// pTargetAssembly - The assembly which contains the target member
+// pOptionalTargetField - Internal field being accessed OR
+// pOptionalTargetMethod - Internal type being accessed OR
+// pOptionalTargetType - Internal type being accessed
+//
+// Return Value:
+// TRUE if pTargetAssembly is pAccessingAssembly, or if pTargetAssembly allows
+// pAccessingAssembly friend access to the target. FALSE otherwise.
+//
+
+static BOOL AssemblyOrFriendAccessAllowed(Assembly *pAccessingAssembly,
+ Assembly *pTargetAssembly,
+ FieldDesc *pOptionalTargetField,
+ MethodDesc *pOptionalTargetMethod,
+ MethodTable *pOptionalTargetType)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pAccessingAssembly));
+ PRECONDITION(CheckPointer(pTargetAssembly));
+ PRECONDITION(pOptionalTargetField != NULL || pOptionalTargetMethod != NULL || pOptionalTargetType != NULL);
+ PRECONDITION(pOptionalTargetField == NULL || pOptionalTargetMethod == NULL);
+ }
+ CONTRACTL_END;
+
+ if (pAccessingAssembly == pTargetAssembly)
+ {
+ return TRUE;
+ }
+
+ if (pAccessingAssembly->IgnoresAccessChecksTo(pTargetAssembly))
+ {
+ return TRUE;
+ }
+
+#if defined(FEATURE_REMOTING) && !defined(CROSSGEN_COMPILE)
+ else if (pAccessingAssembly->GetDomain() != pTargetAssembly->GetDomain() &&
+ pAccessingAssembly->GetFusionAssemblyName()->IsEqual(pTargetAssembly->GetFusionAssemblyName(), ASM_CMPF_NAME | ASM_CMPF_PUBLIC_KEY_TOKEN) == S_OK)
+ {
+ // If we're accessing an internal type across AppDomains, we'll end up saying that an assembly is
+ // not allowed to access internal types in itself, since the Assembly *'s will not compare equal.
+ // This ends up being confusing for users who don't have a deep understanding of the loader and type
+ // system, and also creates different behavior if your assembly is shared vs unshared (if you are
+ // shared, your Assembly *'s will match since they're in the shared domain).
+ //
+ // In order to ease the confusion, we'll consider assemblies to be friends of themselves in this
+ // scenario -- if a name and public key match succeeds, we'll grant internal access across domains.
+ return TRUE;
+ }
+#endif // FEATURE_REMOTING && !CROSSGEN_COMPILE
+ else if (pOptionalTargetField != NULL)
+ {
+ return pTargetAssembly->GrantsFriendAccessTo(pAccessingAssembly, pOptionalTargetField);
+ }
+ else if (pOptionalTargetMethod != NULL)
+ {
+ return pTargetAssembly->GrantsFriendAccessTo(pAccessingAssembly, pOptionalTargetMethod);
+ }
+ else
+ {
+ return pTargetAssembly->GrantsFriendAccessTo(pAccessingAssembly, pOptionalTargetType);
+ }
+}
+
+//******************************************************************************
+// This function determines whether a target class is accessible from
+// some given class.
+/* static */
+BOOL ClassLoader::CanAccessMethodInstantiation( // True if access is legal, false otherwise.
+ AccessCheckContext* pContext,
+ MethodDesc* pOptionalTargetMethod, // The desired method; if NULL, return TRUE (or)
+ const AccessCheckOptions & accessCheckOptions)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pContext));
+ }
+ CONTRACTL_END
+
+ // If there is no target method just allow access.
+ // NB: the caller may just be checking access to a field or class, so we allow for NULL.
+ if (!pOptionalTargetMethod)
+ return TRUE;
+
+ // Is the desired target an instantiated generic method?
+ if (pOptionalTargetMethod->HasMethodInstantiation())
+ { // check that the current class has access
+ // to all of the instantiating classes.
+ Instantiation inst = pOptionalTargetMethod->GetMethodInstantiation();
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ TypeHandle th = inst[i];
+
+ MethodTable* pMT = th.GetMethodTableOfElementType();
+
+ // Either a TypeVarTypeDesc or a FnPtrTypeDesc. No access check needed.
+ if (pMT == NULL)
+ continue;
+
+ if (!CanAccessClass(
+ pContext,
+ pMT,
+ th.GetAssembly(),
+ accessCheckOptions))
+ {
+ return FALSE;
+ }
+ }
+ // If we are here, the current class has access to all of the target's instantiating args,
+ }
+ return TRUE;
+}
+
+//******************************************************************************
+// This function determines whether a target class is accessible from
+// some given class.
+// CanAccessClass does the following checks:
+// 1. Transparency check on the target class
+// 2. Recursively calls CanAccessClass on the generic arguments of the target class if it is generic.
+// 3. Visibility check on the target class, if the target class is nested, this will be translated
+// to a member access check on the enclosing type (calling CanAccess with appropriate dwProtection.
+//
+/* static */
+BOOL ClassLoader::CanAccessClass( // True if access is legal, false otherwise.
+ AccessCheckContext* pContext, // The caller context
+ MethodTable* pTargetClass, // The desired target class.
+ Assembly* pTargetAssembly, // Assembly containing the target class.
+ const AccessCheckOptions & accessCheckOptions,
+ BOOL checkTargetTypeTransparency)// = TRUE
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pContext));
+ PRECONDITION(CheckPointer(pTargetClass));
+ }
+ CONTRACTL_END
+
+ // If there is no target class, allow access.
+ // @todo: what does that mean?
+ //if (!pTargetClass)
+ // return TRUE;
+
+ // check transparent/critical on type
+ // Note that dwMemberAccess is of no use here since we don't have a target method yet. It really should be made an optional arg.
+ // For now, we pass in mdPublic.
+ if (checkTargetTypeTransparency && accessCheckOptions.TransparencyCheckNeeded())
+ {
+ if (!CheckTransparentAccessToCriticalCode(
+ pContext,
+ mdPublic,
+ pTargetClass,
+ NULL,
+ NULL,
+ pTargetClass,
+ accessCheckOptions))
+ {
+ // no need to call accessCheckOptions.DemandMemberAccessOrFail here because
+ // CheckTransparentAccessToCriticalCode does that already
+ return FALSE;
+ }
+ }
+
+ // Step 2: Recursively call CanAccessClass on the generic type arguments
+ // Is the desired target a generic instantiation?
+ if (pTargetClass->HasInstantiation())
+ { // Yes, so before going any further, check that the current class has access
+ // to all of the instantiating classes.
+ Instantiation inst = pTargetClass->GetInstantiation();
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ TypeHandle th = inst[i];
+
+ MethodTable* pMT = th.GetMethodTableOfElementType();
+
+ // Either a TypeVarTypeDesc or a FnPtrTypeDesc. No access check needed.
+ if (pMT == NULL)
+ continue;
+
+ if (!CanAccessClass(
+ pContext,
+ pMT,
+ th.GetAssembly(),
+ accessCheckOptions,
+ checkTargetTypeTransparency))
+ {
+ // no need to call accessCheckOptions.DemandMemberAccessOrFail here because the base case in
+ // CanAccessClass does that already
+ return FALSE;
+ }
+ }
+ // If we are here, the current class has access to all of the desired target's instantiating args.
+ // Now, check whether the current class has access to the desired target itself.
+ }
+
+ // Step 3: Visibility Check
+ if (!pTargetClass->GetClass()->IsNested())
+ { // a non-nested class can be either all public or accessible only from its own assembly (and friends).
+ if (IsTdPublic(pTargetClass->GetClass()->GetProtection()))
+ {
+ return TRUE;
+ }
+ else
+ {
+ // Always allow interop callers full access.
+ if (pContext->IsCalledFromInterop())
+ return TRUE;
+
+ Assembly* pCurrentAssembly = pContext->GetCallerAssembly();
+ _ASSERTE(pCurrentAssembly != NULL);
+
+ if (AssemblyOrFriendAccessAllowed(pCurrentAssembly,
+ pTargetAssembly,
+ NULL,
+ NULL,
+ pTargetClass))
+ {
+ return TRUE;
+ }
+ else
+ {
+ return accessCheckOptions.DemandMemberAccessOrFail(pContext, pTargetClass, TRUE /*visibilityCheck*/);
+ }
+ }
+ }
+
+ // If we are here, the desired target class is nested. Translate the type flags
+ // to corresponding method access flags. We need to make a note if friend access was allowed to the
+ // type being checked since we're not passing it directly to the recurisve call to CanAccess, and
+ // instead are just passing in the dwProtectionFlags.
+ DWORD dwProtection = pTargetClass->GetClass()->GetProtection();
+
+ switch(dwProtection) {
+ case tdNestedPublic:
+ dwProtection = mdPublic;
+ break;
+ case tdNestedFamily:
+ dwProtection = mdFamily;
+ break;
+ case tdNestedPrivate:
+ dwProtection = mdPrivate;
+ break;
+ case tdNestedFamORAssem:
+ // If we can access the class because we have assembly or friend access, we have satisfied the
+ // FamORAssem accessibility, so we we can simplify it down to public. Otherwise we require that
+ // family access be allowed to grant access.
+ case tdNestedFamANDAssem:
+ // If we don't grant assembly or friend access to the target class, then there is no way we
+ // could satisfy the FamANDAssem requirement. Otherwise, since we have satsified the Assm
+ // portion, we only need to check for the Fam portion.
+ case tdNestedAssembly:
+ // If we don't grant assembly or friend access to the target class, and that class has assembly
+ // protection, we can fail the request now. Otherwise we can check to make sure a public member
+ // of the outer class is allowed, since we have satisfied the target's accessibility rules.
+
+ // Always allow interop callers full access.
+ if (pContext->IsCalledFromInterop())
+ return TRUE;
+
+ if (AssemblyOrFriendAccessAllowed(pContext->GetCallerAssembly(), pTargetAssembly, NULL, NULL, pTargetClass))
+ dwProtection = (dwProtection == tdNestedFamANDAssem) ? mdFamily : mdPublic;
+ else if (dwProtection == tdNestedFamORAssem)
+ dwProtection = mdFamily;
+ else
+ return accessCheckOptions.DemandMemberAccessOrFail(pContext, pTargetClass, TRUE /*visibilityCheck*/);
+
+ break;
+
+ default:
+ THROW_BAD_FORMAT_MAYBE(!"Unexpected class visibility flag value", BFA_BAD_VISIBILITY, pTargetClass);
+ }
+
+ // The desired target class is nested, so translate the class access request into
+ // a member access request. That is, if the current class is trying to access A::B,
+ // check if it can access things in A with the visibility of B.
+ // So, pass A as the desired target class and visibility of B within A as the member access
+ // We've already done transparency check above. No need to do it again.
+ return ClassLoader::CanAccess(
+ pContext,
+ GetEnclosingMethodTable(pTargetClass),
+ pTargetAssembly,
+ dwProtection,
+ NULL,
+ NULL,
+ accessCheckOptions,
+ FALSE,
+ FALSE);
+} // BOOL ClassLoader::CanAccessClass()
+
+//******************************************************************************
+// This is a front-end to CheckAccessMember that handles the nested class scope. If can't access
+// from the current point and are a nested class, then try from the enclosing class.
+// It does two things in addition to CanAccessMember:
+// 1. If the caller class doesn't have access to the caller, see if the enclosing class does.
+// 2. CanAccessMemberForExtraChecks which checks whether the caller class has access to
+// the signature of the target method or field.
+//
+// checkTargetMethodTransparency is set to FALSE only when the check is for JIT-compilation
+// because the JIT has a mechanism to insert a callout for the case where
+// we need to perform the currentMD <-> TargetMD check at runtime.
+
+/* static */
+BOOL ClassLoader::CanAccess( // TRUE if access is allowed, FALSE otherwise.
+ AccessCheckContext* pContext, // The caller context
+ MethodTable* pTargetMT, // The class containing the desired target member.
+ Assembly* pTargetAssembly, // Assembly containing that class.
+ DWORD dwMemberAccess, // Member access flags of the desired target member (as method bits).
+ MethodDesc* pOptionalTargetMethod, // The target method; NULL if the target is a not a method or
+ // there is no need to check the method's instantiation.
+ FieldDesc* pOptionalTargetField, // or The desired field; if NULL, return TRUE
+ const AccessCheckOptions & accessCheckOptions, // = s_NormalAccessChecks
+ BOOL checkTargetMethodTransparency, // = TRUE
+ BOOL checkTargetTypeTransparency) // = TRUE
+{
+ CONTRACT(BOOL)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(CheckPointer(pContext));
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+ // Recursive: CanAccess->CheckAccessMember->CanAccessClass->CanAccess
+ INTERIOR_STACK_PROBE(GetThread());
+
+ AccessCheckOptions accessCheckOptionsNoThrow(accessCheckOptions, FALSE);
+
+ if (!CheckAccessMember(pContext,
+ pTargetMT,
+ pTargetAssembly,
+ dwMemberAccess,
+ pOptionalTargetMethod,
+ pOptionalTargetField,
+ // Suppress exceptions for nested classes since this is not a hard-failure,
+ // and we can do additional checks
+ accessCheckOptionsNoThrow,
+ checkTargetMethodTransparency,
+ checkTargetTypeTransparency))
+ {
+ // If we're here, CheckAccessMember didn't allow access.
+ BOOL canAccess = FALSE;
+
+ // If the current class is nested, there may be an enclosing class that might have access
+ // to the target. And if the pCurrentMT == NULL, the current class is global, and so there
+ // is no enclosing class.
+ MethodTable* pCurrentMT = pContext->GetCallerMT();
+
+ // if this is called from interop, the CheckAccessMember call above should have already succeeded.
+ _ASSERTE(!pContext->IsCalledFromInterop());
+
+ BOOL isNestedClass = (pCurrentMT && pCurrentMT->GetClass()->IsNested());
+
+ if (isNestedClass)
+ {
+ // A nested class also has access to anything that the enclosing class does, so
+ // recursively check whether the enclosing class can access the desired target member.
+ MethodTable * pEnclosingMT = GetEnclosingMethodTable(pCurrentMT);
+
+ StaticAccessCheckContext accessContext(pContext->GetCallerMethod(),
+ pEnclosingMT,
+ pContext->GetCallerAssembly());
+
+ // On failure, do not throw from inside this call since that will cause the exception message
+ // to refer to the enclosing type.
+ canAccess = ClassLoader::CanAccess(
+ &accessContext,
+ pTargetMT,
+ pTargetAssembly,
+ dwMemberAccess,
+ pOptionalTargetMethod,
+ pOptionalTargetField,
+ accessCheckOptionsNoThrow,
+ checkTargetMethodTransparency,
+ checkTargetTypeTransparency);
+ }
+
+ if (!canAccess)
+ {
+ BOOL fail = accessCheckOptions.FailOrThrow(pContext);
+ RETURN_FROM_INTERIOR_PROBE(fail);
+ }
+ }
+
+ // For member access, we do additional checks to ensure that the specific member can
+ // be accessed
+
+ if (!CanAccessMemberForExtraChecks(
+ pContext,
+ pTargetMT,
+ pOptionalTargetMethod,
+ pOptionalTargetField,
+ accessCheckOptions,
+ checkTargetMethodTransparency))
+ {
+ RETURN_FROM_INTERIOR_PROBE(FALSE);
+ }
+
+ RETURN_FROM_INTERIOR_PROBE(TRUE);
+
+ END_INTERIOR_STACK_PROBE;
+} // BOOL ClassLoader::CanAccess()
+
+//******************************************************************************
+// Performs additional checks for member access
+
+BOOL ClassLoader::CanAccessMemberForExtraChecks(
+ AccessCheckContext* pContext,
+ MethodTable* pTargetExactMT,
+ MethodDesc* pOptionalTargetMethod,
+ FieldDesc* pOptionalTargetField,
+ const AccessCheckOptions & accessCheckOptions,
+ BOOL checkTargetMethodTransparency)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pContext));
+ }
+ CONTRACTL_END;
+
+ // Critical callers do not need the extra checks
+ // This early-out saves the cost of all the subsequent work
+ if (pContext->IsCallerCritical())
+ {
+ return TRUE;
+ }
+
+ if (pOptionalTargetMethod == NULL && pOptionalTargetField == NULL)
+ return TRUE;
+
+ _ASSERTE((pOptionalTargetMethod == NULL) != (pOptionalTargetField == NULL));
+
+ // We should always do checks on member signatures. But for backward compatibility we skip this check
+ // for critical callers. And since we don't want to look for the caller here which might incur a stack walk,
+ // we delay the check to DemandMemberAccessOrFail time.
+ AccessCheckOptions legacyAccessCheckOptions(accessCheckOptions, accessCheckOptions.Throws(), TRUE);
+
+ if (pOptionalTargetMethod)
+ {
+ // A method is accessible only if all the types in the signature
+ // are also accessible.
+ if (!CanAccessSigForExtraChecks(pContext,
+ pOptionalTargetMethod,
+ pTargetExactMT,
+ legacyAccessCheckOptions,
+ checkTargetMethodTransparency))
+ {
+ return FALSE;
+ }
+ }
+ else
+ {
+ _ASSERTE(pOptionalTargetField != NULL);
+
+ // A field is accessible only if the field type is also accessible
+
+ TypeHandle fieldType = pOptionalTargetField->GetExactFieldType(TypeHandle(pTargetExactMT));
+ CorElementType fieldCorType = fieldType.GetSignatureCorElementType();
+
+ MethodTable * pFieldTypeMT = fieldType.GetMethodTableOfElementType();
+
+ // No access check needed on a generic variable or a function pointer
+ if (pFieldTypeMT != NULL)
+ {
+ if (!CanAccessClassForExtraChecks(pContext,
+ pFieldTypeMT,
+ pFieldTypeMT->GetAssembly(),
+ legacyAccessCheckOptions,
+ TRUE))
+ {
+ return FALSE;
+ }
+ }
+ }
+
+ return TRUE;
+}
+
+//******************************************************************************
+// Can all the types in the signature of the pTargetMethodSig be accessed?
+//
+// "ForExtraChecks" means that we only do extra checks (security and transparency)
+// instead of the usual loader visibility checks. Post V2, we can enable all checks.
+
+BOOL ClassLoader::CanAccessSigForExtraChecks( // TRUE if access is allowed, FALSE otherwise.
+ AccessCheckContext* pContext,
+ MethodDesc* pTargetMethodSig, // The target method. If this is a shared method, pTargetExactMT gives
+ // additional information about the exact type
+ MethodTable* pTargetExactMT, // or The desired field; if NULL, return TRUE
+ const AccessCheckOptions & accessCheckOptions,
+ BOOL checkTargetTransparency)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pContext));
+ }
+ CONTRACTL_END;
+
+ MetaSig sig(pTargetMethodSig, TypeHandle(pTargetExactMT));
+
+ // First, check the return type
+
+ TypeHandle retType = sig.GetRetTypeHandleThrowing();
+ MethodTable * pRetMT = retType.GetMethodTableOfElementType();
+
+ // No access check needed on a generic variable or a function pointer
+ if (pRetMT != NULL)
+ {
+ if (!CanAccessClassForExtraChecks(pContext,
+ pRetMT,
+ retType.GetAssembly(),
+ accessCheckOptions,
+ checkTargetTransparency))
+ {
+ return FALSE;
+ }
+ }
+
+ //
+ // Now walk all the arguments in the signature
+ //
+
+ for (CorElementType argType = sig.NextArg(); argType != ELEMENT_TYPE_END; argType = sig.NextArg())
+ {
+ TypeHandle thArg = sig.GetLastTypeHandleThrowing();
+
+ MethodTable * pArgMT = thArg.GetMethodTableOfElementType();
+
+ // Either a TypeVarTypeDesc or a FnPtrTypeDesc. No access check needed.
+ if (pArgMT == NULL)
+ continue;
+
+ BOOL canAcesssElement = CanAccessClassForExtraChecks(
+ pContext,
+ pArgMT,
+ thArg.GetAssembly(),
+ accessCheckOptions,
+ checkTargetTransparency);
+ if (!canAcesssElement)
+ {
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+//******************************************************************************
+// Can the type be accessed?
+//
+// "ForExtraChecks" means that we only do extra checks (security and transparency)
+// instead of the usual loader visibility checks. Post V2, we can enable all checks.
+
+BOOL ClassLoader::CanAccessClassForExtraChecks( // True if access is legal, false otherwise.
+ AccessCheckContext* pContext,
+ MethodTable* pTargetClass, // The desired target class.
+ Assembly* pTargetAssembly, // Assembly containing that class.
+ const AccessCheckOptions & accessCheckOptions,
+ BOOL checkTargetTypeTransparency)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pContext));
+ }
+ CONTRACTL_END;
+
+ // ------------- Old comments begins ------------
+ // Critical callers do not need the extra checks
+ // TODO: can we enable full access checks now?
+ // ------------- Old comments ends ------------
+
+ // We shouldn't bypass accessibility check on member signature for FT/Critical callers
+
+ return CanAccessClass(pContext,
+ pTargetClass,
+ pTargetAssembly,
+ accessCheckOptions,
+ checkTargetTypeTransparency);
+}
+
+//******************************************************************************
+// This is the helper function for the corresponding CanAccess()
+// It does the following checks:
+// 1. CanAccessClass on pTargetMT
+// 2. CanAccessMethodInstantiation if the pOptionalTargetMethod is provided and is generic.
+// 3. Transparency check on pTargetMT, pOptionalTargetMethod and pOptionalTargetField.
+// 4. Visibility check on dwMemberAccess (on pTargetMT)
+
+/* static */
+BOOL ClassLoader::CheckAccessMember( // TRUE if access is allowed, false otherwise.
+ AccessCheckContext* pContext,
+ MethodTable* pTargetMT, // The class containing the desired target member.
+ Assembly* pTargetAssembly, // Assembly containing that class.
+ DWORD dwMemberAccess, // Member access flags of the desired target member (as method bits).
+ MethodDesc* pOptionalTargetMethod, // The target method; NULL if the target is a not a method or
+ // there is no need to check the method's instantiation.
+ FieldDesc* pOptionalTargetField, // target field, NULL if there is no Target field
+ const AccessCheckOptions & accessCheckOptions,
+ BOOL checkTargetMethodTransparency,
+ BOOL checkTargetTypeTransparency
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(CheckPointer(pContext));
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ // we're trying to access a member that is contained in the class pTargetClass, so need to
+ // check if have access to pTargetClass itself from the current point before worry about
+ // having access to the member within the class
+ if (!CanAccessClass(pContext,
+ pTargetMT,
+ pTargetAssembly,
+ accessCheckOptions,
+ checkTargetTypeTransparency))
+ {
+ return FALSE;
+ }
+
+ // If we are trying to access a generic method, we have to ensure its instantiation is accessible.
+ // Note that we need to perform transparency checks on the instantiation even if we have
+ // checkTargetMethodTransparency set to false, since generic type parameters by design do not effect
+ // the transparency of the generic method that is closing over them. This means standard transparency
+ // checks between caller and closed callee may succeed even if the callee's closure includes a critical type.
+ if (!CanAccessMethodInstantiation(
+ pContext,
+ pOptionalTargetMethod,
+ accessCheckOptions))
+ {
+ return FALSE;
+ }
+
+ // pOptionalTargetMethod and pOptionalTargetField can never be NULL at the same time.
+ _ASSERTE(pOptionalTargetMethod == NULL || pOptionalTargetField == NULL);
+
+ // Perform transparency checks
+ // We don't need to do transparency check against pTargetMT here because
+ // it was already done in CanAccessClass above.
+
+ if (accessCheckOptions.TransparencyCheckNeeded() &&
+ (checkTargetMethodTransparency && pOptionalTargetMethod ||
+ pOptionalTargetField))
+ {
+ if (!CheckTransparentAccessToCriticalCode(
+ pContext,
+ dwMemberAccess,
+ pTargetMT,
+ pOptionalTargetMethod,
+ pOptionalTargetField,
+ NULL,
+ accessCheckOptions))
+ {
+ return FALSE;
+ }
+ }
+
+ if (IsMdPublic(dwMemberAccess))
+ {
+ return TRUE;
+ }
+
+ // Always allow interop callers full access.
+ if (pContext->IsCalledFromInterop())
+ return TRUE;
+
+ MethodTable* pCurrentMT = pContext->GetCallerMT();
+
+ if (IsMdPrivateScope(dwMemberAccess))
+ {
+ if (pCurrentMT != NULL && pCurrentMT->GetModule() == pTargetMT->GetModule())
+ {
+ return TRUE;
+ }
+ else
+ {
+ return accessCheckOptions.DemandMemberAccessOrFail(pContext, pTargetMT, TRUE /*visibilityCheck*/);
+ }
+ }
+
+
+#ifdef _DEBUG
+ if (pTargetMT == NULL &&
+ (IsMdFamORAssem(dwMemberAccess) ||
+ IsMdFamANDAssem(dwMemberAccess) ||
+ IsMdFamily(dwMemberAccess))) {
+ THROW_BAD_FORMAT_MAYBE(!"Family flag is not allowed on global functions", BFA_FAMILY_ON_GLOBAL, pTargetMT);
+ }
+#endif
+
+ if (pTargetMT == NULL ||
+ IsMdAssem(dwMemberAccess) ||
+ IsMdFamORAssem(dwMemberAccess) ||
+ IsMdFamANDAssem(dwMemberAccess))
+ {
+ // If the member has Assembly accessibility, grant access if the current
+ // class is in the same assembly as the desired target member, or if the
+ // desired target member's assembly grants friend access to the current
+ // assembly.
+ // @todo: What does it mean for the target class to be NULL?
+
+ Assembly* pCurrentAssembly = pContext->GetCallerAssembly();
+
+ // pCurrentAssembly should never be NULL, unless we are called from interop,
+ // in which case we should have already returned TRUE.
+ _ASSERTE(pCurrentAssembly != NULL);
+
+ const BOOL fAssemblyOrFriendAccessAllowed = AssemblyOrFriendAccessAllowed(pCurrentAssembly,
+ pTargetAssembly,
+ pOptionalTargetField,
+ pOptionalTargetMethod,
+ pTargetMT);
+
+ if ((pTargetMT == NULL || IsMdAssem(dwMemberAccess) || IsMdFamORAssem(dwMemberAccess)) &&
+ fAssemblyOrFriendAccessAllowed)
+ {
+ return TRUE;
+ }
+ else if (IsMdFamANDAssem(dwMemberAccess) &&
+ !fAssemblyOrFriendAccessAllowed)
+ {
+ return accessCheckOptions.DemandMemberAccessOrFail(pContext, pTargetMT, TRUE /*visibilityCheck*/);
+ }
+ }
+
+ // Nested classes can access all members of the parent class.
+ while(pCurrentMT != NULL)
+ {
+ //@GENERICSVER:
+ if (pTargetMT->HasSameTypeDefAs(pCurrentMT))
+ return TRUE;
+
+ if (IsMdPrivate(dwMemberAccess))
+ {
+ if (!pCurrentMT->GetClass()->IsNested())
+ {
+ return accessCheckOptions.DemandMemberAccessOrFail(pContext, pTargetMT, TRUE /*visibilityCheck*/);
+ }
+ }
+ else if (IsMdFamORAssem(dwMemberAccess) || IsMdFamily(dwMemberAccess) || IsMdFamANDAssem(dwMemberAccess))
+ {
+ if (CanAccessFamily(pCurrentMT, pTargetMT))
+ {
+ return TRUE;
+ }
+ }
+
+ pCurrentMT = GetEnclosingMethodTable(pCurrentMT);
+ }
+
+ return accessCheckOptions.DemandMemberAccessOrFail(pContext, pTargetMT, TRUE /*visibilityCheck*/);
+}
+
+// The family check is actually in two parts (Partition I, 8.5.3.2). The first part:
+//
+// ...accessible to referents that support the same type
+// (i.e., an exact type and all of the types that inherit
+// from it).
+//
+// Translation: pCurrentClass must be the same type as pTargetClass or a derived class. (i.e. Derived
+// can access Base.protected but Unrelated cannot access Base.protected).
+//
+// The second part:
+//
+// For verifiable code (see §8.8), there is an additional
+// requirement that can require a runtime check: the reference
+// shall be made through an item whose exact type supports
+// the exact type of the referent. That is, the item whose
+// member is being accessed shall inherit from the type
+// performing the access.
+//
+// Translation: The C++ protected rule. For those unfamiliar, it means that:
+// if you have:
+// GrandChild : Child
+// and
+// Child : Parent
+// and
+// Parent {
+// protected:
+// int protectedField;
+// }
+//
+// Child::function(GrandChild * o) {
+// o->protectedField; //This access is legal.
+// }
+//
+// GrandChild:function2(Child * o) {
+// o->protectedField; //This access is illegal.
+// }
+//
+// The reason for this rule is that if you had:
+// Sibling : Parent
+//
+// Child::function3( Sibling * o ) {
+// o->protectedField; //This access is illegal
+// }
+//
+// This is intuitively correct. However, you need to prevent:
+// Child::function4( Sibling * o ) {
+// ((Parent*)o)->protectedField;
+// }
+//
+// Which means that you must access protected fields through a type that is yourself or one of your
+// derived types.
+
+//This checks the first part of the rule above.
+/* static */
+BOOL ClassLoader::CanAccessFamily(
+ MethodTable *pCurrentClass,
+ MethodTable *pTargetClass)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pTargetClass));
+ }
+ CONTRACTL_END
+
+ _ASSERTE(pCurrentClass);
+ _ASSERTE(pTargetClass);
+
+ //Look to see if Current is a child of the Target.
+ while (pCurrentClass) {
+ MethodTable *pCurInstance = pCurrentClass;
+
+ while (pCurInstance) {
+ //This is correct. csc is incredibly lax about generics. Essentially if you are a subclass of
+ //any type of generic it lets you access it. Since the standard is totally unclear, mirror that
+ //behavior here.
+ if (pCurInstance->HasSameTypeDefAs(pTargetClass)) {
+ return TRUE;
+ }
+
+ pCurInstance = pCurInstance->GetParentMethodTable();
+ }
+
+ ///Looking at 8.5.3, it looks like a protected member of a nested class in a parent type is also
+ //accessible.
+ pCurrentClass = GetEnclosingMethodTable(pCurrentClass);
+ }
+
+ return FALSE;
+}
+
+//If instance is an inner class, this also succeeds if the outer class conforms to 8.5.3.2. A nested class
+//is enclosed inside of the enclosing class' open type. So we need to ignore generic variables. That also
+//helps us with:
+/*
+class Base {
+ protected int m_family;
+}
+class Derived<T> : Base {
+ class Inner {
+ public int function(Derived<T> d) {
+ return d.m_family;
+ }
+ }
+}
+*/
+
+//Since the inner T is not the same T as the enclosing T (since accessing generic variables is a CLS rule,
+//not a CLI rule), we see that as a comparison between Derived<T> and Derived<T'>. CanCastTo rejects that.
+//Instead we just check against the typedef of the two types. This ignores all generic parameters (formal
+//or not).
+
+BOOL CanAccessFamilyVerificationEnclosingHelper(MethodTable * pMTCurrentEnclosingClass,
+ TypeHandle thInstanceClass)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ _ASSERTE(pMTCurrentEnclosingClass);
+
+ if (thInstanceClass.IsGenericVariable())
+ {
+ //In this case it is a TypeVarTypeDesc (i.e. T). If this access would be legal due to a
+ //constraint:
+ //
+ /*
+ public class My<T>
+ {
+ public class Inner<U> where U : My<T>
+ {
+ public int foo(U u)
+ {
+ return u.field;
+ }
+ }
+ protected int field;
+ }
+ */
+ //We need to find the generic class constraint. (The above is legal because U must be a My<T> which makes this
+ //legal by 8.5.3.2)
+ // There may only be 1 class constraint on a generic parameter
+
+ // Get the constraints on this generic variable
+ // At most 1 of them is a class constraint.
+ // That class constraint methodtable can go through the normal search for matching typedef logic below
+ TypeVarTypeDesc *tyvar = thInstanceClass.AsGenericVariable();
+ DWORD numConstraints;
+ TypeHandle *constraints = tyvar->GetConstraints(&numConstraints, CLASS_DEPENDENCIES_LOADED);
+ if (constraints == NULL)
+ {
+ // If we did not find a class constraint, we cannot generate a methodtable to search for
+ return FALSE;
+ }
+ else
+ {
+ for (DWORD i = 0; i < numConstraints; i++)
+ {
+ if (!constraints[i].IsInterface())
+ {
+ // We have found the class constraint on this TypeVarTypeDesc
+ // Recurse on the found class constraint. It is possible that this constraint may also be a TypeVarTypeDesc
+//class Outer4<T>
+//{
+// protected int field;
+//
+// public class Inner<U,V> where V:U where U : Outer4<T>
+// {
+// public int Method(V param) { return (++param.field); }
+// }
+//}
+ return CanAccessFamilyVerificationEnclosingHelper(pMTCurrentEnclosingClass, constraints[i]);
+ }
+ }
+ // If we did not find a class constraint, we cannot generate a methodtable to search for
+ return FALSE;
+ }
+ }
+ do
+ {
+ MethodTable * pAccessor = pMTCurrentEnclosingClass;
+ //If thInstanceClass is a MethodTable, we should only be doing the TypeDef comparison (see
+ //above).
+ if (!thInstanceClass.IsTypeDesc())
+ {
+ MethodTable *pInstanceMT = thInstanceClass.AsMethodTable();
+
+ // This is a CanCastTo implementation for classes, assuming we should ignore generic instantiation parameters.
+ do
+ {
+ if (pAccessor->HasSameTypeDefAs(pInstanceMT))
+ return TRUE;
+ pInstanceMT = pInstanceMT->GetParentMethodTable();
+ }while(pInstanceMT);
+ }
+ else
+ {
+ // Leave this logic in place for now, as I'm not fully confident it can't happen, and we are very close to RTM
+ // This logic was originally written to handle TypeVarTypeDescs, but those are now handled above.
+ _ASSERTE(FALSE);
+ if (thInstanceClass.CanCastTo(TypeHandle(pAccessor)))
+ return TRUE;
+ }
+
+ pMTCurrentEnclosingClass = GetEnclosingMethodTable(pMTCurrentEnclosingClass);
+ }while(pMTCurrentEnclosingClass);
+ return FALSE;
+}
+
+
+//This checks the verification only part of the rule above.
+//From the example above:
+// GrandChild::function2(Child * o) {
+// o->protectedField; //This access is illegal.
+// }
+// pCurrentClass is GrandChild and pTargetClass is Child. This check is completely unnecessary for statics,
+// but by legacy convention you can use GrandChild for pTargetClass in that case.
+
+BOOL ClassLoader::CanAccessFamilyVerification(TypeHandle thCurrentClass,
+ TypeHandle thInstanceClass)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ MODE_ANY;
+ PRECONDITION(!thCurrentClass.IsNull());
+ PRECONDITION(!thCurrentClass.IsTypeDesc());
+ }
+ CONTRACTL_END
+
+ //Check to see if Instance is equal to or derived from pCurrentClass.
+ //
+ //In some cases the type we have for the instance type is actually a TypeVarTypeDesc. In those cases we
+ //need to check against the constraints (You're accessing a member through a 'T' with a type constraint
+ //that makes this legal). For those cases, CanCastTo does what I want.
+ MethodTable * pAccessor = thCurrentClass.GetMethodTable();
+ if (thInstanceClass.CanCastTo(TypeHandle(pAccessor)))
+ return TRUE;
+
+ //ArrayTypeDescs are the only typedescs that have methods, and their methods don't have IL. All other
+ //TypeDescs don't need to be here. So only run this on MethodTables.
+ if (!thInstanceClass.IsNull())
+ {
+ return CanAccessFamilyVerificationEnclosingHelper(pAccessor, thInstanceClass);
+ }
+ return FALSE;
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+
+void
+ClassLoader::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ DAC_ENUM_DTHIS();
+
+ EMEM_OUT(("MEM: %p ClassLoader\n", dac_cast<TADDR>(this)));
+
+ if (m_pAssembly.IsValid())
+ {
+ ModuleIterator modIter = GetAssembly()->IterateModules();
+
+ while (modIter.Next())
+ {
+ modIter.GetModule()->EnumMemoryRegions(flags, true);
+ }
+ }
+}
+
+#endif // #ifdef DACCESS_COMPILE
diff --git a/src/vm/clsload.hpp b/src/vm/clsload.hpp
new file mode 100644
index 0000000000..2b56a8e20e
--- /dev/null
+++ b/src/vm/clsload.hpp
@@ -0,0 +1,1081 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: clsload.hpp
+//
+
+
+
+//
+
+//
+// ============================================================================
+
+#ifndef _H_CLSLOAD
+#define _H_CLSLOAD
+
+#include "crst.h"
+#include "eehash.h"
+#include "vars.hpp"
+#include "stubmgr.h"
+#include "typehandle.h"
+#include "object.h" // only needed for def. of PTRARRAYREF
+#include "classloadlevel.h"
+#include "specstrings.h"
+#include "simplerwlock.hpp"
+#include "classhash.h"
+
+// SystemDomain is a friend of ClassLoader.
+class SystemDomain;
+class Assembly;
+class ClassLoader;
+class TypeKey;
+class PendingTypeLoadEntry;
+class PendingTypeLoadTable;
+class EEClass;
+class Thread;
+class EETypeHashTable;
+class IAssemblySecurityDescriptor;
+class DynamicResolver;
+class SigPointer;
+
+// Hash table parameter for unresolved class hash
+#define UNRESOLVED_CLASS_HASH_BUCKETS 8
+
+// This is information required to look up a type in the loader. Besides the
+// basic name there is the meta data information for the type, whether the
+// the name is case sensitive, and tokens not to load. This last item allows
+// the loader to prevent a type from being recursively loaded.
+typedef enum NameHandleTable
+{
+ nhCaseSensitive = 0,
+ nhCaseInsensitive = 1
+} NameHandleTable;
+
+
+class NameHandle
+{
+ friend class ClassLoader;
+
+ LPCUTF8 m_nameSpace;
+ LPCUTF8 m_name;
+
+ PTR_Module m_pTypeScope;
+ mdToken m_mdType;
+ mdToken m_mdTokenNotToLoad;
+ NameHandleTable m_WhichTable;
+ PTR_EEClassHashEntry m_pBucket;
+
+public:
+
+ NameHandle()
+ {
+ LIMITED_METHOD_CONTRACT;
+ memset((void*) this, NULL, sizeof(*this));
+ }
+
+ NameHandle(LPCUTF8 name) :
+ m_nameSpace(NULL),
+ m_name(name),
+ m_pTypeScope(PTR_NULL),
+ m_mdType(mdTokenNil),
+ m_mdTokenNotToLoad(tdNoTypes),
+ m_WhichTable(nhCaseSensitive),
+ m_pBucket(PTR_NULL)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ NameHandle(LPCUTF8 nameSpace, LPCUTF8 name) :
+ m_nameSpace(nameSpace),
+ m_name(name),
+ m_pTypeScope(PTR_NULL),
+ m_mdType(mdTokenNil),
+ m_mdTokenNotToLoad(tdNoTypes),
+ m_WhichTable(nhCaseSensitive),
+ m_pBucket(PTR_NULL)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ }
+
+#ifndef BINDER
+ NameHandle(Module* pModule, mdToken token) :
+ m_nameSpace(NULL),
+ m_name(NULL),
+ m_pTypeScope(pModule),
+ m_mdType(token),
+ m_mdTokenNotToLoad(tdNoTypes),
+ m_WhichTable(nhCaseSensitive),
+ m_pBucket(PTR_NULL)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ }
+#endif // !BINDER
+
+ NameHandle(const NameHandle & p)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ m_nameSpace = p.m_nameSpace;
+ m_name = p.m_name;
+ m_pTypeScope = p.m_pTypeScope;
+ m_mdType = p.m_mdType;
+ m_mdTokenNotToLoad = p.m_mdTokenNotToLoad;
+ m_WhichTable = p.m_WhichTable;
+ m_pBucket = p.m_pBucket;
+ }
+
+ void SetName(LPCUTF8 pName)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_name = pName;
+ }
+
+ void SetName(LPCUTF8 pNameSpace, LPCUTF8 pName)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC_HOST_ONLY;
+
+ m_nameSpace = pNameSpace;
+ m_name = pName;
+ }
+
+ LPCUTF8 GetName() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_name;
+ }
+
+ LPCUTF8 GetNameSpace() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_nameSpace;
+ }
+
+ void SetTypeToken(Module* pModule, mdToken mdToken)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pTypeScope = dac_cast<PTR_Module>(pModule);
+ m_mdType = mdToken;
+ }
+
+ PTR_Module GetTypeModule() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_pTypeScope;
+ }
+
+ mdToken GetTypeToken() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_mdType;
+ }
+
+ void SetTokenNotToLoad(mdToken mdtok)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC; // "this" must be a host address
+ m_mdTokenNotToLoad = mdtok;
+ }
+
+ mdToken GetTokenNotToLoad() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_mdTokenNotToLoad;
+ }
+
+ void SetCaseInsensitive()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_WhichTable = nhCaseInsensitive;
+ }
+
+ NameHandleTable GetTable() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_WhichTable;
+ }
+
+ void SetBucket(EEClassHashEntry_t * pBucket)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC; // "this" must be a host address
+ m_pBucket = dac_cast<PTR_EEClassHashEntry>(pBucket);
+ }
+
+
+ EEClassHashEntry_t * GetBucket()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_pBucket;
+ }
+
+ static BOOL OKToLoad(mdToken token, mdToken tokenNotToLoad)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return (token == 0 || token != tokenNotToLoad) && tokenNotToLoad != tdAllTypes;
+ }
+
+ BOOL OKToLoad()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return OKToLoad(m_mdType, m_mdTokenNotToLoad);
+ }
+
+};
+
+//-------------------------------------------------------------------------------------------
+//
+// Introducing AccessCheckContext so that we can defer caller resolution as much as possible.
+// Stack walk is expensive and we should avoid it if we can determine accessibility without
+// knowing the caller. For example, public transparent APIs without link demand should always
+// be accessible.
+// We will have two types of AccessCheckContext.
+// 1. StaticAccessCheckContext is used by JIT and other places where the caller is statically known.
+// 2. RefSecContext is used by reflection and resolves the caller by performing a stack walk.
+//
+//-------------------------------------------------------------------------------------------
+class AccessCheckContext
+{
+public:
+ virtual MethodDesc* GetCallerMethod() = 0; // The method that wants access.
+ virtual MethodTable* GetCallerMT() = 0; // The class that wants access; NULL if interop caller.
+ virtual Assembly* GetCallerAssembly() = 0; // Assembly containing that class.
+ virtual bool IsCalledFromInterop() = 0;
+ virtual bool IsCallerCritical() = 0; // Can we do a quick check for caller's transparency status?
+};
+
+class StaticAccessCheckContext : public AccessCheckContext
+{
+public:
+
+ StaticAccessCheckContext(MethodDesc* pCallerMethod, MethodTable* pCallerType, Assembly* pCallerAssembly)
+ : m_pCallerMethod(pCallerMethod),
+ m_pCallerMT(pCallerType),
+ m_pCallerAssembly(pCallerAssembly)
+ {
+ CONTRACTL
+ {
+ LIMITED_METHOD_CONTRACT;
+ PRECONDITION(CheckPointer(pCallerMethod, NULL_OK));
+ PRECONDITION(CheckPointer(pCallerType, NULL_OK));
+ PRECONDITION(CheckPointer(pCallerAssembly));
+ }
+ CONTRACTL_END;
+ }
+
+ StaticAccessCheckContext(MethodDesc* pCallerMethod);
+
+ StaticAccessCheckContext(MethodDesc* pCallerMethod, MethodTable* pCallerType);
+
+ virtual MethodDesc* GetCallerMethod()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pCallerMethod;
+ }
+
+ virtual MethodTable* GetCallerMT()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pCallerMT;
+ }
+
+ virtual Assembly* GetCallerAssembly()
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_pCallerAssembly;
+ }
+
+ virtual bool IsCalledFromInterop()
+ {
+ WRAPPER_NO_CONTRACT;
+ return false;
+ }
+
+ virtual bool IsCallerCritical();
+
+private:
+ MethodDesc* m_pCallerMethod;
+ MethodTable* m_pCallerMT;
+ Assembly* m_pCallerAssembly;
+};
+
+//******************************************************************************
+// This type specifies the kind of accessibility checks to perform.
+// On failure, it can be configured to either return FALSE or to throw an exception.
+class AccessCheckOptions
+{
+public:
+ enum AccessCheckType
+ {
+ // Used by statically compiled code.
+ // Desktop: Just do normal accessibility checks. No security demands.
+ // CoreCLR: Just do normal accessibility checks.
+ kNormalAccessibilityChecks,
+
+ // Used only for resource loading and reflection inovcation when the target is remoted.
+ // Desktop: If normal accessiblity checks fail, return TRUE if a demand for MemberAccess succeeds
+ // CoreCLR: If normal accessiblity checks fail, return TRUE if a the caller is Security(Safe)Critical
+ kMemberAccess,
+
+ // Used by Reflection invocation and DynamicMethod with RestrictedSkipVisibility.
+ // Desktop: If normal accessiblity checks fail, return TRUE if a demand for RestrictedMemberAccess
+ // and grant set of the target assembly succeeds.
+ // CoreCLR: If normal accessiblity checks fail, return TRUE if the callee is App transparent code (in a user assembly)
+ kRestrictedMemberAccess,
+
+ // Used by normal DynamicMethods in full trust CoreCLR
+ // CoreCLR: Do normal visibility checks but bypass transparency checks.
+ kNormalAccessNoTransparency,
+
+ // Used by DynamicMethods with restrictedSkipVisibility in full trust CoreCLR
+ // CoreCLR: Do RestrictedMemberAcess visibility checks but bypass transparency checks.
+ kRestrictedMemberAccessNoTransparency,
+
+#ifndef FEATURE_CORECLR
+ // Used by DynamicMethod with kRestrictedMemberAccess in Win8 immersive mode.
+ // Desktop: Equals kNormalAccessibilityChecks for non-framework code calling framework code,
+ // kRestrictedMemberAccess otherwise.
+ kUserCodeOnlyRestrictedMemberAccess,
+
+ // A variation of kUserCodeOnlyRestrictedMemberAccess, but without transparency checks.
+ // This is used for reflection invocation in Win8 immersive when all domains on the call stack is full trust.
+ // This is an optimization to avoid stackwalks for transparency checks in full trust.
+ // Note that both kUserCodeOnlyRestrictedMemberAccess and kUserCodeOnlyRestrictedMemberAccessNoTransparency
+ // are needed because we restrict user code from accessing framework internals in Win8 immersive even in full trust.
+ kUserCodeOnlyRestrictedMemberAccessNoTransparency
+#endif
+ };
+
+ AccessCheckOptions(
+ AccessCheckType accessCheckType,
+ DynamicResolver * pAccessContext,
+ BOOL throwIfTargetIsInaccessible,
+ MethodTable * pTargetMT);
+
+ AccessCheckOptions(
+ AccessCheckType accessCheckType,
+ DynamicResolver * pAccessContext,
+ BOOL throwIfTargetIsInaccessible,
+ MethodDesc * pTargetMD);
+
+ AccessCheckOptions(
+ AccessCheckType accessCheckType,
+ DynamicResolver * pAccessContext,
+ BOOL throwIfTargetIsInaccessible,
+ FieldDesc * pTargetFD);
+
+ AccessCheckOptions(
+ const AccessCheckOptions & templateAccessCheckOptions,
+ BOOL throwIfTargetIsInaccessible,
+ BOOL skipCheckForCriticalCode = FALSE);
+
+ // Follow standard rules for doing accessability
+ BOOL DoNormalAccessibilityChecks() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_accessCheckType == kNormalAccessibilityChecks;
+ }
+
+ // Do visibility checks including security demands for reflection access to members
+ BOOL DoReflectionAccessibilityChecks() const
+ {
+ WRAPPER_NO_CONTRACT;
+ return !DoNormalAccessibilityChecks();
+ }
+
+ BOOL Throws() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_fThrowIfTargetIsInaccessible;
+ }
+
+ BOOL DemandMemberAccessOrFail(AccessCheckContext *pContext, MethodTable * pTargetMT, BOOL visibilityCheck) const;
+ BOOL FailOrThrow(AccessCheckContext *pContext) const;
+
+ BOOL TransparencyCheckNeeded() const
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifdef FEATURE_CORECLR
+ return (m_accessCheckType != kNormalAccessNoTransparency && m_accessCheckType != kRestrictedMemberAccessNoTransparency);
+#else //FEATURE_CORECLR
+ return (m_accessCheckType != kUserCodeOnlyRestrictedMemberAccessNoTransparency);
+#endif //FEATURE_CORECLR
+ }
+
+ static AccessCheckOptions* s_pNormalAccessChecks;
+
+ static void Startup();
+
+private:
+ void Initialize(
+ AccessCheckType accessCheckType,
+ BOOL throwIfTargetIsInaccessible,
+ MethodTable * pTargetMT,
+ MethodDesc * pTargetMD,
+ FieldDesc * pTargetFD,
+ BOOL skipCheckForCriticalCode = FALSE);
+
+ BOOL DemandMemberAccess(AccessCheckContext *pContext, MethodTable * pTargetMT, BOOL visibilityCheck) const;
+
+ void ThrowAccessException(
+ AccessCheckContext* pContext,
+ MethodTable* pFailureMT = NULL,
+ Exception* pInnerException = NULL,
+ BOOL fAccessingFrameworkCode = FALSE) const;
+
+ MethodTable * m_pTargetMT;
+ MethodDesc * m_pTargetMethod;
+ FieldDesc * m_pTargetField;
+
+ AccessCheckType m_accessCheckType;
+ // The context used to determine if access is allowed. It is the resolver that carries the compressed-stack used to do the Demand.
+ // If this is NULL, the access is checked against the current call-stack.
+ // This is non-NULL only for m_accessCheckType==kRestrictedMemberAccess
+ DynamicResolver * m_pAccessContext;
+ // If the target is not accessible, should the API return FALSE, or should it throw an exception?
+ BOOL m_fThrowIfTargetIsInaccessible;
+ // flag to enable legacy behavior in ClassLoader::CanAccessMemberForExtraChecks.
+ BOOL m_fSkipCheckForCriticalCode;
+};
+
+void DECLSPEC_NORETURN ThrowFieldAccessException(MethodDesc *pCallerMD,
+ FieldDesc *pFD,
+ BOOL isTransparencyError,
+ UINT messageID = 0,
+ Exception *pInnerException = NULL,
+ BOOL fAccessingFrameworkCode = FALSE);
+
+void DECLSPEC_NORETURN ThrowMethodAccessException(MethodDesc *pCallerMD,
+ MethodDesc *pCalleeMD,
+ BOOL isTransparencyError,
+ UINT messageID = 0,
+ Exception *pInnerException = NULL,
+ BOOL fAccessingFrameworkCode = FALSE);
+
+void DECLSPEC_NORETURN ThrowTypeAccessException(MethodDesc *pCallerMD,
+ MethodTable *pMT,
+ BOOL isTransparencyError,
+ UINT messageID = 0,
+ Exception *pInnerException = NULL,
+ BOOL fAccessingFrameworkCode = FALSE);
+
+void DECLSPEC_NORETURN ThrowFieldAccessException(AccessCheckContext* pContext,
+ FieldDesc *pFD,
+ UINT messageID = 0,
+ Exception *pInnerException = NULL,
+ BOOL fAccessingFrameworkCode = FALSE);
+
+void DECLSPEC_NORETURN ThrowMethodAccessException(AccessCheckContext* pContext,
+ MethodDesc *pCalleeMD,
+ UINT messageID = 0,
+ Exception *pInnerException = NULL,
+ BOOL fAccessingFrameworkCode = FALSE);
+
+void DECLSPEC_NORETURN ThrowTypeAccessException(AccessCheckContext* pContext,
+ MethodTable *pMT,
+ UINT messageID = 0,
+ Exception *pInnerException = NULL,
+ BOOL fAccessingFrameworkCode = FALSE);
+
+
+//---------------------------------------------------------------------------------------
+//
+class ClassLoader
+{
+ friend class PendingTypeLoadEntry;
+ friend class MethodTableBuilder;
+ friend class AppDomain;
+ friend class Assembly;
+ friend class Module;
+ friend class CLRPrivTypeCacheWinRT;
+ friend class CLRPrivTypeCacheReflectionOnlyWinRT;
+
+ // the following two classes are friends because they will call LoadTypeHandleForTypeKey by token directly
+ friend class COMDynamicWrite;
+ friend class COMModule;
+
+private:
+ // Classes for which load is in progress
+ PendingTypeLoadTable * m_pUnresolvedClassHash;
+ CrstExplicitInit m_UnresolvedClassLock;
+
+ // Protects addition of elements to module's m_pAvailableClasses.
+ // (indeed thus protects addition of elements to any m_pAvailableClasses in any
+ // of the modules managed by this loader)
+ CrstExplicitInit m_AvailableClassLock;
+
+ CrstExplicitInit m_AvailableTypesLock;
+
+ // Do we have any modules which need to have their classes added to
+ // the available list?
+ Volatile<LONG> m_cUnhashedModules;
+
+ // Back reference to the assembly
+ PTR_Assembly m_pAssembly;
+
+public:
+
+#ifdef _DEBUG
+ DWORD m_dwDebugMethods;
+ DWORD m_dwDebugFieldDescs; // Doesn't include anything we don't allocate a FieldDesc for
+ DWORD m_dwDebugClasses;
+ DWORD m_dwDebugDuplicateInterfaceSlots;
+ DWORD m_dwGCSize;
+ DWORD m_dwInterfaceMapSize;
+ DWORD m_dwMethodTableSize;
+ DWORD m_dwVtableData;
+ DWORD m_dwStaticFieldData;
+ DWORD m_dwFieldDescData;
+ DWORD m_dwMethodDescData;
+ size_t m_dwEEClassData;
+#endif
+
+public:
+ ClassLoader(Assembly *pAssembly);
+ ~ClassLoader();
+
+private:
+
+ VOID PopulateAvailableClassHashTable(Module *pModule,
+ AllocMemTracker *pamTracker);
+
+ void LazyPopulateCaseInsensitiveHashTables();
+
+ // Lookup the hash table entry from the hash table
+ EEClassHashEntry_t *GetClassValue(NameHandleTable nhTable,
+ NameHandle *pName,
+ HashDatum *pData,
+ EEClassHashTable **ppTable,
+ Module* pLookInThisModuleOnly);
+
+
+public:
+ //#LoaderModule
+ // LoaderModule determines in which module an item gets placed.
+ // For everything except paramaterized types and methods the choice is easy.
+ //
+ // If NGEN'ing we may choose to place the item into the current module (which is different from runtime behavior).
+ //
+ // The rule for determining the loader module must ensure that a type or method never outlives its loader module
+ // with respect to app-domain unloading
+ static Module * ComputeLoaderModule(MethodTable * pMT,
+ mdToken token, // the token of the method
+ Instantiation methodInst); // the type arguments to the method (if any)
+ static Module * ComputeLoaderModule(TypeKey * typeKey);
+ inline static PTR_Module ComputeLoaderModuleForFunctionPointer(TypeHandle * pRetAndArgTypes, DWORD NumArgsPlusRetType);
+ inline static PTR_Module ComputeLoaderModuleForParamType(TypeHandle paramType);
+
+private:
+ static PTR_Module ComputeLoaderModuleWorker(Module *pDefinitionModule, // the module that declares the generic type or method
+ mdToken token,
+ Instantiation classInst, // the type arguments to the type (if any)
+ Instantiation methodInst); // the type arguments to the method (if any)
+
+#ifndef BINDER
+ BOOL
+ FindClassModuleThrowing(
+ const NameHandle * pName,
+ TypeHandle * pType,
+ mdToken * pmdClassToken,
+ Module ** ppModule,
+ mdToken * pmdFoundExportedType,
+ EEClassHashEntry_t ** ppEntry,
+ Module * pLookInThisModuleOnly,
+ Loader::LoadFlag loadFlag);
+#endif // !BINDER
+
+ static PTR_Module ComputeLoaderModuleForCompilation(Module *pDefinitionModule, // the module that declares the generic type or method
+ mdToken token,
+ Instantiation classInst, // the type arguments to the type (if any)
+ Instantiation methodInst); // the type arguments to the method (if any)
+
+public:
+ void Init(AllocMemTracker *pamTracker);
+
+ PTR_Assembly GetAssembly();
+ DomainAssembly* GetDomainAssembly(AppDomain *pDomain = NULL);
+
+ void FreeModules();
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+ //==================================================================================
+ // Main entry points to class loader
+ // Organized as follows:
+ // by token:
+ // TypeDef
+ // TypeDefOrRef
+ // TypeDefOrRefOrSpec
+ // by constructed type:
+ // ArrayType
+ // PointerOrByrefType
+ // FnPtrType
+ // GenericInstantiation
+ // by name:
+ // ByName
+ // Each takes a parameter comes, with the following semantics:
+ // fLoadTypes=DontLoadTypes: if type isn't already in the loader's table, return NULL
+ // fLoadTypes=LoadTypes: if type isn't already in the loader's table, then create it
+ // Each comes in two variants, LoadXThrowing and LoadXNoThrow, the latter being just
+ // a exception-handling wrapper around the former.
+ //
+ // Each also allows types to be loaded only up to a particular level (see classloadlevel.h).
+ // The class loader itself makes use of these levels to "break" recursion across
+ // generic instantiations. External clients should leave the parameter at its default
+ // value (CLASS_LOADED).
+ //==================================================================================
+
+public:
+
+ // We use enums for these flags so that we can easily search the codebase to
+ // determine where the flags are set to their non-default values.
+ //
+ // This enum tells us what to do if the load fails. If ThrowIfNotFound is used
+ // with a HRESULT-returning NOTHROW function then it actually indicates that
+ // an error-HRESULT will be returned.
+ // The ThrowButNullV11McppWorkaround value means ThrowIfNotFound, except when the case
+ // of a Nil ResolutionScope for a value type (erroneously generated by Everett MCPP
+ // compiler.)
+ typedef enum { ThrowIfNotFound, ReturnNullIfNotFound, ThrowButNullV11McppWorkaround } NotFoundAction;
+
+ // This flag indicates whether we should accept an uninstantiatednaked TypeDef or TypeRef
+ // for a generic type definition, where "uninstantiated" means "not used as part of
+ // a TypeSpec"
+ typedef enum { FailIfUninstDefOrRef, PermitUninstDefOrRef } PermitUninstantiatedFlag;
+
+ // This flag indicates whether we want to "load" the type if it isn't already in the
+ // loader's tables and has reached the load level desired.
+ typedef enum { LoadTypes, DontLoadTypes } LoadTypesFlag;
+
+
+ // Load types by token (Def, Ref and Spec)
+ static TypeHandle LoadTypeDefThrowing(Module *pModule,
+ mdToken typeDef,
+ NotFoundAction fNotFound = ThrowIfNotFound,
+ PermitUninstantiatedFlag fUninstantiated = FailIfUninstDefOrRef,
+ mdToken tokenNotToLoad = tdNoTypes,
+ ClassLoadLevel level = CLASS_LOADED,
+ Instantiation * pTargetInstantiation = NULL /* used to verify arity of the loaded type */);
+
+ static TypeHandle LoadTypeDefOrRefThrowing(Module *pModule,
+ mdToken typeRefOrDef,
+ NotFoundAction fNotFound = ThrowIfNotFound,
+ PermitUninstantiatedFlag fUninstantiated = FailIfUninstDefOrRef,
+ mdToken tokenNotToLoad = tdNoTypes,
+ ClassLoadLevel level = CLASS_LOADED);
+
+ static TypeHandle LoadTypeDefOrRefOrSpecThrowing(Module *pModule,
+ mdToken typeRefOrDefOrSpec,
+ const SigTypeContext *pTypeContext,
+ NotFoundAction fNotFound = ThrowIfNotFound,
+ PermitUninstantiatedFlag fUninstantiated = FailIfUninstDefOrRef,
+ LoadTypesFlag fLoadTypes = LoadTypes,
+ ClassLoadLevel level = CLASS_LOADED,
+ BOOL dropGenericArgumentLevel = FALSE,
+ const Substitution *pSubst = NULL /* substitution to apply if the token is a type spec with generic variables */ );
+
+ // Load constructed types by providing their constituents
+ static TypeHandle LoadPointerOrByrefTypeThrowing(CorElementType typ,
+ TypeHandle baseType,
+ LoadTypesFlag fLoadTypes = LoadTypes,
+ ClassLoadLevel level = CLASS_LOADED);
+
+ // The resulting type behaves like the unmanaged view of a given value type.
+ static TypeHandle LoadNativeValueTypeThrowing(TypeHandle baseType,
+ LoadTypesFlag fLoadTypes = LoadTypes,
+ ClassLoadLevel level = CLASS_LOADED);
+
+ static TypeHandle LoadArrayTypeThrowing(TypeHandle baseType,
+ CorElementType typ = ELEMENT_TYPE_SZARRAY,
+ unsigned rank = 0,
+ LoadTypesFlag fLoadTypes = LoadTypes,
+ ClassLoadLevel level = CLASS_LOADED);
+
+ static TypeHandle LoadFnptrTypeThrowing(BYTE callConv,
+ DWORD numArgs,
+ TypeHandle* retAndArgTypes,
+ LoadTypesFlag fLoadTypes = LoadTypes,
+ ClassLoadLevel level = CLASS_LOADED);
+
+ // Load types by name
+ static TypeHandle LoadTypeByNameThrowing(Assembly *pAssembly,
+ LPCUTF8 nameSpace,
+ LPCUTF8 name,
+ NotFoundAction fNotFound = ThrowIfNotFound,
+ LoadTypesFlag fLoadTypes = LoadTypes,
+ ClassLoadLevel level = CLASS_LOADED);
+
+#ifndef BINDER
+ // Resolve a TypeRef to a TypeDef
+ // (Just a no-op on TypeDefs)
+ // Return FALSE if operation failed (e.g. type does not exist)
+ // *pfUsesTypeForwarder is set to TRUE if a type forwarder is found. It is never set to FALSE.
+ static BOOL ResolveTokenToTypeDefThrowing(Module * pTypeRefModule,
+ mdTypeRef typeRefToken,
+ Module ** ppTypeDefModule,
+ mdTypeDef * pTypeDefToken,
+ Loader::LoadFlag loadFlag = Loader::Load,
+ BOOL * pfUsesTypeForwarder = NULL);
+#endif // !BINDER
+
+ static void EnsureLoaded(TypeHandle typeHnd, ClassLoadLevel level = CLASS_LOADED);
+ static void TryEnsureLoaded(TypeHandle typeHnd, ClassLoadLevel level = CLASS_LOADED);
+
+public:
+ // Look up a class by name
+ //
+ // Guaranteed to only return NULL if pName->OKToLoad() returns FALSE.
+ // Thus when type loads are enabled this will return non-null.
+ TypeHandle LoadTypeHandleThrowIfFailed(NameHandle* pName, ClassLoadLevel level = CLASS_LOADED,
+ Module* pLookInThisModuleOnly=NULL);
+
+public:
+ // Looks up class in the local module table, if it is there it succeeds,
+ // Otherwise it fails, This is meant only for optimizations etc
+ static TypeHandle LookupTypeDefOrRefInModule(Module *pModule, mdToken cl, ClassLoadLevel *pLoadLevel = NULL);
+
+private:
+
+ VOID AddAvailableClassDontHaveLock(Module *pModule,
+ mdTypeDef classdef,
+ AllocMemTracker *pamTracker);
+
+ VOID AddAvailableClassHaveLock(Module * pModule,
+ mdTypeDef classdef,
+ AllocMemTracker * pamTracker,
+ LPCSTR szWinRtNamespacePrefix,
+ DWORD cchWinRtNamespacePrefix);
+
+ VOID AddExportedTypeDontHaveLock(Module *pManifestModule,
+ mdExportedType cl,
+ AllocMemTracker *pamTracker);
+
+ VOID AddExportedTypeHaveLock(Module *pManifestModule,
+ mdExportedType cl,
+ AllocMemTracker *pamTracker);
+
+public:
+
+ // For an generic type instance return the representative within the class of
+ // all type handles that share code. For example,
+ // <int> --> <int>,
+ // <object> --> <__Canon>,
+ // <string> --> <__Canon>,
+ // <List<string>> --> <__Canon>,
+ // <Struct<string>> --> <Struct<__Canon>>
+ //
+ // If the code for the type handle is not shared then return
+ // the type handle itself.
+ static TypeHandle CanonicalizeGenericArg(TypeHandle genericArg);
+
+ // Determine if the specified type representation induces a sharable
+ // set of compatible instantiations when used as a type parameter to
+ // a generic type or method.
+ //
+ // For example, when sharing at reference types "object" and "Struct<object>"
+ // both induce sets of compatible instantiations, e.g. when used to build types
+ // "List<object>" and "List<Struct<object>>" respectively.
+ static BOOL IsSharableInstantiation(Instantiation inst);
+
+ // Determine if it is normalized canonical generic instantiation.
+ // Dictionary<__Canon, __Canon> -> TRUE
+ // Dictionary<__Canon, int> -> TRUE
+ // Dictionary<__Canon, String> -> FALSE
+ static BOOL IsCanonicalGenericInstantiation(Instantiation inst);
+
+ // Determine if it is the entirely-canonical generic instantiation
+ // Dictionary<__Canon, __Canon> -> TRUE
+ // Dictionary<anything else> -> FALSE
+ static BOOL IsTypicalSharedInstantiation(Instantiation inst);
+
+ // Return TRUE if inst is the typical instantiation for the type or method specified by pModule/token
+ static BOOL IsTypicalInstantiation(Module *pModule, mdToken token, Instantiation inst);
+
+ // Load canonical shared instantiation for type key (each instantiation argument is
+ // substituted by CanonicalizeGenericArg)
+ static TypeHandle LoadCanonicalGenericInstantiation(TypeKey *pTypeKey,
+ LoadTypesFlag fLoadTypes/*=LoadTypes*/,
+ ClassLoadLevel level/*=CLASS_LOADED*/);
+
+ // Create a generic instantiation.
+ // If typeDef is not a generic type then throw an exception
+ // If its arity does not match nGenericClassArgCount then throw an exception
+ // The pointer to the instantiation is not persisted e.g. the type parameters can be stack-allocated.
+ // If inst=NULL then <__Canon,...,__Canon> is assumed
+ // If fLoadTypes=DontLoadTypes then the type handle is not created if it is not
+ // already present in the tables.
+ static TypeHandle LoadGenericInstantiationThrowing(Module *pModule,
+ mdTypeDef typeDef,
+ Instantiation inst,
+ LoadTypesFlag fLoadTypes = LoadTypes,
+ ClassLoadLevel level = CLASS_LOADED,
+ const InstantiationContext *pInstContext = NULL,
+ BOOL fFromNativeImage = FALSE);
+
+// Public access Check APIs
+public:
+
+ static BOOL CanAccessClass(
+ AccessCheckContext* pContext,
+ MethodTable* pTargetClass,
+ Assembly* pTargetAssembly,
+ const AccessCheckOptions & accessCheckOptions = *AccessCheckOptions::s_pNormalAccessChecks,
+ BOOL checkTargetTypeTransparency = TRUE);
+
+ static BOOL CanAccess(
+ AccessCheckContext* pContext,
+ MethodTable* pTargetClass,
+ Assembly* pTargetAssembly,
+ DWORD dwMemberAttrs,
+ MethodDesc* pOptionalTargetMethod,
+ FieldDesc* pOptionalTargetField,
+ const AccessCheckOptions & accessCheckOptions = *AccessCheckOptions::s_pNormalAccessChecks,
+ BOOL checkTargetMethodTransparency = TRUE,
+ BOOL checkTargetTypeTransparency = TRUE);
+
+ static BOOL CanAccessClassForExtraChecks(
+ AccessCheckContext* pContext,
+ MethodTable* pTargetClass,
+ Assembly* pTargetAssembly,
+ const AccessCheckOptions & accessCheckOptions,
+ BOOL checkTargetTypeTransparency);
+
+ static BOOL CanAccessFamilyVerification(
+ TypeHandle thCurrentClass,
+ TypeHandle thInstanceClass);
+
+private:
+ // Access check helpers
+ static BOOL CanAccessMethodInstantiation(
+ AccessCheckContext* pContext,
+ MethodDesc* pOptionalTargetMethod,
+ const AccessCheckOptions & accessCheckOptions);
+
+ static BOOL CanAccessMemberForExtraChecks(
+ AccessCheckContext* pContext,
+ MethodTable* pTargetExactMT,
+ MethodDesc* pOptionalTargetMethod,
+ FieldDesc* pOptionalTargetField,
+ const AccessCheckOptions & accessCheckOptions,
+ BOOL checkTargetMethodTransparency);
+
+ static BOOL CanAccessSigForExtraChecks(
+ AccessCheckContext* pContext,
+ MethodDesc* pTargetMethodSig,
+ MethodTable* pTargetExactMT,
+ const AccessCheckOptions & accessCheckOptions,
+ BOOL checkTargetTransparency);
+
+ static BOOL CanAccessFamily(
+ MethodTable* pCurrentClass,
+ MethodTable* pTargetClass);
+
+ static BOOL CheckAccessMember(
+ AccessCheckContext* pContext,
+ MethodTable* pTargetClass,
+ Assembly* pTargetAssembly,
+ DWORD dwMemberAttrs,
+ MethodDesc* pOptionalTargetMethod,
+ FieldDesc* pOptionalTargetField,
+ const AccessCheckOptions & accessCheckOptions = *AccessCheckOptions::s_pNormalAccessChecks,
+ BOOL checkTargetMethodTransparency = TRUE,
+ BOOL checkTargetTypeTransparency = TRUE);
+
+
+public:
+ //Creates a key with both the namespace and name converted to lowercase and
+ //made into a proper namespace-path.
+ VOID CreateCanonicallyCasedKey(LPCUTF8 pszNameSpace, LPCUTF8 pszName,
+ __out LPUTF8 *ppszOutNameSpace, __out LPUTF8 *ppszOutName);
+
+ static HRESULT FindTypeDefByExportedType(IMDInternalImport *pCTImport,
+ mdExportedType mdCurrent,
+ IMDInternalImport *pTDImport,
+ mdTypeDef *mtd);
+
+#ifndef BINDER
+ class AvailableClasses_LockHolder : public CrstHolder
+ {
+ public:
+ AvailableClasses_LockHolder(ClassLoader *classLoader)
+ : CrstHolder(&classLoader->m_AvailableClassLock)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+ };
+#endif // !BINDER
+
+ friend class AvailableClasses_LockHolder;
+
+private:
+ static TypeHandle LoadConstructedTypeThrowing(TypeKey *pKey,
+ LoadTypesFlag fLoadTypes = LoadTypes,
+ ClassLoadLevel level = CLASS_LOADED,
+ const InstantiationContext *pInstContext = NULL);
+
+ static TypeHandle LookupTypeKeyUnderLock(TypeKey *pKey,
+ EETypeHashTable *pTable,
+ CrstBase *pLock);
+
+ static TypeHandle LookupTypeKey(TypeKey *pKey,
+ EETypeHashTable *pTable,
+ CrstBase *pLock,
+ BOOL fCheckUnderLock);
+
+ static TypeHandle LookupInLoaderModule(TypeKey* pKey, BOOL fCheckUnderLock);
+#ifdef FEATURE_PREJIT
+ static TypeHandle LookupInPreferredZapModule(TypeKey* pKey, BOOL fCheckUnderLock);
+#endif // FEATURE_PREJIT
+
+ // Lookup a handle in the appropriate table
+ // (declaring module for TypeDef or loader-module for constructed types)
+ static TypeHandle LookupTypeHandleForTypeKey(TypeKey *pTypeKey);
+ static TypeHandle LookupTypeHandleForTypeKeyInner(TypeKey *pTypeKey, BOOL fCheckUnderLock);
+
+ static void DECLSPEC_NORETURN ThrowTypeLoadException(TypeKey *pKey, UINT resIDWhy);
+
+
+ BOOL IsNested(NameHandle* pName, mdToken *mdEncloser);
+ static BOOL IsNested(Module *pModude, mdToken typeDefOrRef, mdToken *mdEncloser);
+
+public:
+ // Helpers for FindClassModule()
+ BOOL CompareNestedEntryWithTypeDef(IMDInternalImport *pImport,
+ mdTypeDef mdCurrent,
+ EEClassHashTable *pClassHash,
+ PTR_EEClassHashEntry pEntry);
+ BOOL CompareNestedEntryWithTypeRef(IMDInternalImport *pImport,
+ mdTypeRef mdCurrent,
+ EEClassHashTable *pClassHash,
+ PTR_EEClassHashEntry pEntry);
+ BOOL CompareNestedEntryWithExportedType(IMDInternalImport *pImport,
+ mdExportedType mdCurrent,
+ EEClassHashTable *pClassHash,
+ PTR_EEClassHashEntry pEntry);
+
+ //Attempts to find/load/create a type handle but does not throw
+ // if used in "find" mode.
+ TypeHandle LoadTypeHandleThrowing(NameHandle* pName, ClassLoadLevel level = CLASS_LOADED,
+ Module* pLookInThisModuleOnly=NULL);
+
+private:
+
+#ifndef DACCESS_COMPILE
+ // Perform a single phase of class loading
+ // If no type handle has yet been created, typeHnd is null.
+ static TypeHandle DoIncrementalLoad(TypeKey *pTypeKey,
+ TypeHandle typeHnd,
+ ClassLoadLevel workLevel);
+
+ // Phase CLASS_LOAD_CREATE of class loading
+ static TypeHandle CreateTypeHandleForTypeKey(TypeKey *pTypeKey,
+ AllocMemTracker *pamTracker);
+
+ // Publish the type in the loader's tables
+ static TypeHandle PublishType(TypeKey *pTypeKey, TypeHandle typeHnd);
+
+ // Notify profiler and debugger that a type load has completed
+ // Also update perf counters
+ static void Notify(TypeHandle typeHnd);
+
+ // Phase CLASS_LOAD_EXACTPARENTS of class loading
+ // Load exact parents and interfaces and dependent structures (generics dictionary, vtable fixes)
+ static void LoadExactParents(MethodTable *pMT);
+
+ static void LoadExactParentAndInterfacesTransitively(MethodTable *pMT);
+
+
+ static TypeHandle TryFindDynLinkZapType(TypeKey* pKey);
+
+ // Create a non-canonical instantiation of a generic type based off the canonical instantiation
+ // (For example, MethodTable for List<string> is based on the MethodTable for List<__Canon>)
+ static TypeHandle CreateTypeHandleForNonCanonicalGenericInstantiation(TypeKey *pTypeKey,
+ AllocMemTracker *pamTracker);
+
+ // Loads a class. This is the inner call from the multi-threaded load. This load must
+ // be protected in some manner.
+ // If we're attempting to load a fresh instantiated type then genericArgs should be filled in
+
+ static TypeHandle CreateTypeHandleForTypeDefThrowing(Module *pModule,
+ mdTypeDef cl,
+ Instantiation inst,
+ AllocMemTracker *pamTracker);
+
+ // The token must be a type def. GC must be enabled.
+ // If we're attempting to load a fresh instantiated type then genericArgs should be filled in
+ TypeHandle LoadTypeHandleForTypeKey(TypeKey *pTypeKey,
+ TypeHandle typeHnd,
+ ClassLoadLevel level = CLASS_LOADED,
+ const InstantiationContext *pInstContext = NULL);
+
+ TypeHandle LoadTypeHandleForTypeKeyNoLock(TypeKey *pTypeKey,
+ ClassLoadLevel level = CLASS_LOADED,
+ const InstantiationContext *pInstContext = NULL);
+
+ // Used for initial loading of parent class and implemented interfaces
+ // When tok represents an instantiated type return an *approximate* instantiated
+ // type (where reference type arguments are replaced by Object)
+ static
+ TypeHandle
+ LoadApproxTypeThrowing(
+ Module * pModule,
+ mdToken tok,
+ SigPointer * pSigInst,
+ const SigTypeContext * pClassTypeContext);
+
+ // Returns the parent of a token. The token must be a typedef.
+ // If the parent is a shared constructed type (e.g. class C : List<string>) then
+ // only the canonical instantiation is loaded at this point.
+ // This is to avoid cycles in the loader e.g. on class C : D<C> or class C<T> : D<C<T>>
+ // We fix up the exact parent later in LoadInstantiatedInfo.
+ static
+ MethodTable *
+ LoadApproxParentThrowing(
+ Module * pModule,
+ mdToken cl,
+ SigPointer * pParentInst,
+ const SigTypeContext * pClassTypeContext);
+
+ // Locates the enclosing class of a token if any. The token must be a typedef.
+ static VOID GetEnclosingClassThrowing(IMDInternalImport *pInternalImport,
+ Module *pModule,
+ mdTypeDef cl,
+ mdTypeDef *tdEnclosing);
+
+ // Insert the class in the classes hash table and if needed in the case insensitive one
+ EEClassHashEntry_t *InsertValue(EEClassHashTable *pClassHash,
+ EEClassHashTable *pClassCaseInsHash,
+ LPCUTF8 pszNamespace,
+ LPCUTF8 pszClassName,
+ HashDatum Data,
+ EEClassHashEntry_t *pEncloser,
+ AllocMemTracker *pamTracker);
+
+#ifndef BINDER
+ // don't call this directly.
+ TypeHandle LoadTypeHandleForTypeKey_Body(TypeKey *pTypeKey,
+ TypeHandle typeHnd,
+ ClassLoadLevel targetLevel);
+#endif //!BINDER
+#endif //!DACCESS_COMPILE
+
+}; // class ClassLoader
+
+#endif /* _H_CLSLOAD */
diff --git a/src/vm/clsload.inl b/src/vm/clsload.inl
new file mode 100644
index 0000000000..f2ba34bddb
--- /dev/null
+++ b/src/vm/clsload.inl
@@ -0,0 +1,157 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: clsload.inl
+//
+
+//
+
+//
+// ============================================================================
+
+
+#ifndef _CLSLOAD_INL_
+#define _CLSLOAD_INL_
+
+inline PTR_Assembly ClassLoader::GetAssembly()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_pAssembly;
+}
+
+inline PTR_Module ClassLoader::ComputeLoaderModuleForFunctionPointer(TypeHandle* pRetAndArgTypes, DWORD NumArgsPlusRetType)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+ return ComputeLoaderModuleWorker(NULL,
+ 0,
+ Instantiation(pRetAndArgTypes, NumArgsPlusRetType),
+ Instantiation());
+}
+
+inline PTR_Module ClassLoader::ComputeLoaderModuleForParamType(TypeHandle paramType)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ //
+ // Call GetLoaderModule directly instead of ComputeLoaderModuleWorker to avoid exponential recursion for collectible types
+ //
+ // It is safe to do it even during NGen because of we do not create duplicate copies of arrays and other param types
+ // (see code:CEEPreloader::TriageTypeForZap).
+ //
+ return paramType.GetLoaderModule();
+}
+
+//******************************************************************************
+
+inline void AccessCheckOptions::Initialize(
+ AccessCheckType accessCheckType,
+ BOOL throwIfTargetIsInaccessible,
+ MethodTable * pTargetMT,
+ MethodDesc * pTargetMethod,
+ FieldDesc * pTargetField,
+ BOOL skipCheckForCriticalCode /*=FALSE*/)
+{
+ CONTRACTL
+ {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ // At most one of these can be non-NULL. They can all be NULL if:
+ // 1. we are doing a normal accessibility check, or
+ // 2. we are not going to throw an exception if the accessibility check fails
+ PRECONDITION(accessCheckType == kNormalAccessibilityChecks ||
+ !throwIfTargetIsInaccessible ||
+ ((pTargetMT ? 1 : 0) + (pTargetMethod ? 1 : 0) + (pTargetField ? 1 : 0)) == 1);
+ // m_pAccessContext can only be set for kRestrictedMemberAccess
+#ifdef FEATURE_CORECLR
+ PRECONDITION(m_pAccessContext == NULL ||
+ accessCheckType == AccessCheckOptions::kRestrictedMemberAccess);
+#else
+ PRECONDITION(m_pAccessContext == NULL ||
+ accessCheckType == AccessCheckOptions::kUserCodeOnlyRestrictedMemberAccess ||
+ accessCheckType == AccessCheckOptions::kRestrictedMemberAccess);
+#endif
+ }
+ CONTRACTL_END;
+
+ m_accessCheckType = accessCheckType;
+ m_fThrowIfTargetIsInaccessible = throwIfTargetIsInaccessible;
+ m_pTargetMT = pTargetMT;
+ m_pTargetMethod = pTargetMethod;
+ m_pTargetField = pTargetField;
+ m_fSkipCheckForCriticalCode = skipCheckForCriticalCode;
+}
+
+//******************************************************************************
+
+inline AccessCheckOptions::AccessCheckOptions(
+ AccessCheckType accessCheckType,
+ DynamicResolver * pAccessContext,
+ BOOL throwIfTargetIsInaccessible,
+ MethodTable * pTargetMT) :
+ m_pAccessContext(pAccessContext)
+{
+ WRAPPER_NO_CONTRACT;
+
+ Initialize(
+ accessCheckType,
+ throwIfTargetIsInaccessible,
+ pTargetMT,
+ NULL,
+ NULL);
+}
+
+inline AccessCheckOptions::AccessCheckOptions(
+ AccessCheckType accessCheckType,
+ DynamicResolver * pAccessContext,
+ BOOL throwIfTargetIsInaccessible,
+ MethodDesc * pTargetMethod) :
+ m_pAccessContext(pAccessContext)
+{
+ WRAPPER_NO_CONTRACT;
+
+ Initialize(
+ accessCheckType,
+ throwIfTargetIsInaccessible,
+ NULL,
+ pTargetMethod,
+ NULL);
+}
+
+inline AccessCheckOptions::AccessCheckOptions(
+ AccessCheckType accessCheckType,
+ DynamicResolver * pAccessContext,
+ BOOL throwIfTargetIsInaccessible,
+ FieldDesc * pTargetField) :
+ m_pAccessContext(pAccessContext)
+{
+ WRAPPER_NO_CONTRACT;
+
+ Initialize(
+ accessCheckType,
+ throwIfTargetIsInaccessible,
+ NULL,
+ NULL,
+ pTargetField);
+}
+
+#endif // _CLSLOAD_INL_
+
diff --git a/src/vm/codeman.cpp b/src/vm/codeman.cpp
new file mode 100644
index 0000000000..b73eced187
--- /dev/null
+++ b/src/vm/codeman.cpp
@@ -0,0 +1,6516 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// codeman.cpp - a managment class for handling multiple code managers
+//
+
+//
+
+#include "common.h"
+#include "jitinterface.h"
+#include "corjit.h"
+#include "eetwain.h"
+#include "eeconfig.h"
+#include "excep.h"
+#include "appdomain.hpp"
+#include "codeman.h"
+#include "nibblemapmacros.h"
+#include "generics.h"
+#include "dynamicmethod.h"
+#include "eemessagebox.h"
+#include "eventtrace.h"
+#include "threadsuspend.h"
+
+#include "exceptionhandling.h"
+
+#include "rtlfunctions.h"
+
+#include "jitperf.h"
+#include "shimload.h"
+#include "debuginfostore.h"
+#include "strsafe.h"
+
+#ifdef _WIN64
+#define CHECK_DUPLICATED_STRUCT_LAYOUTS
+#include "../debug/daccess/fntableaccess.h"
+#endif // _WIN64
+
+#define MAX_M_ALLOCATED (16 * 1024)
+
+// Default number of jump stubs in a jump stub block
+#define DEFAULT_JUMPSTUBS_PER_BLOCK 32
+
+SPTR_IMPL(EECodeManager, ExecutionManager, m_pDefaultCodeMan);
+
+SPTR_IMPL(EEJitManager, ExecutionManager, m_pEEJitManager);
+#ifdef FEATURE_PREJIT
+SPTR_IMPL(NativeImageJitManager, ExecutionManager, m_pNativeImageJitManager);
+#endif
+#ifdef FEATURE_READYTORUN
+SPTR_IMPL(ReadyToRunJitManager, ExecutionManager, m_pReadyToRunJitManager);
+#endif
+
+#ifndef DACCESS_COMPILE
+Volatile<RangeSection *> ExecutionManager::m_CodeRangeList = NULL;
+Volatile<LONG> ExecutionManager::m_dwReaderCount = 0;
+Volatile<LONG> ExecutionManager::m_dwWriterLock = 0;
+#else
+SPTR_IMPL(RangeSection, ExecutionManager, m_CodeRangeList);
+SVAL_IMPL(LONG, ExecutionManager, m_dwReaderCount);
+SVAL_IMPL(LONG, ExecutionManager, m_dwWriterLock);
+#endif
+
+#ifndef DACCESS_COMPILE
+
+CrstStatic ExecutionManager::m_JumpStubCrst;
+CrstStatic ExecutionManager::m_RangeCrst;
+
+#endif // DACCESS_COMPILE
+
+#if defined(_TARGET_AMD64_) && !defined(DACCESS_COMPILE) // We don't do this on ARM just amd64
+
+// Support for new style unwind information (to allow OS to stack crawl JIT compiled code).
+
+typedef NTSTATUS (WINAPI* RtlAddGrowableFunctionTableFnPtr) (
+ PVOID *DynamicTable, RUNTIME_FUNCTION* FunctionTable, ULONG EntryCount,
+ ULONG MaximumEntryCount, ULONG_PTR rangeStart, ULONG_PTR rangeEnd);
+typedef VOID (WINAPI* RtlGrowFunctionTableFnPtr) (PVOID DynamicTable, ULONG NewEntryCount);
+typedef VOID (WINAPI* RtlDeleteGrowableFunctionTableFnPtr) (PVOID DynamicTable);
+
+// OS entry points (only exist on Win8 and above)
+static RtlAddGrowableFunctionTableFnPtr pRtlAddGrowableFunctionTable;
+static RtlGrowFunctionTableFnPtr pRtlGrowFunctionTable;
+static RtlDeleteGrowableFunctionTableFnPtr pRtlDeleteGrowableFunctionTable;
+static Volatile<bool> RtlUnwindFtnsInited;
+
+// statics for UnwindInfoTable
+Crst* UnwindInfoTable::s_pUnwindInfoTableLock = NULL;
+Volatile<bool> UnwindInfoTable::s_publishingActive = false;
+
+
+#if _DEBUG
+// Fake functions on Win7 checked build to excercize the code paths, they are no-ops
+NTSTATUS WINAPI FakeRtlAddGrowableFunctionTable (
+ PVOID *DynamicTable, RUNTIME_FUNCTION* FunctionTable, ULONG EntryCount,
+ ULONG MaximumEntryCount, ULONG_PTR rangeStart, ULONG_PTR rangeEnd) { *DynamicTable = (PVOID) 1; return 0; }
+VOID WINAPI FakeRtlGrowFunctionTable (PVOID DynamicTable, ULONG NewEntryCount) { }
+VOID WINAPI FakeRtlDeleteGrowableFunctionTable (PVOID DynamicTable) {}
+#endif
+
+/****************************************************************************/
+// initialize the entry points for new win8 unwind info publishing functions.
+// return true if the initialize is successful (the functions exist)
+
+bool InitUnwindFtns()
+{
+ CONTRACTL {
+ NOTHROW;
+ } CONTRACTL_END;
+
+#ifndef FEATURE_PAL
+ if (!RtlUnwindFtnsInited)
+ {
+ HINSTANCE hNtdll = WszGetModuleHandle(W("ntdll.dll"));
+ if (hNtdll != NULL)
+ {
+ void* growFunctionTable = GetProcAddress(hNtdll, "RtlGrowFunctionTable");
+ void* deleteGrowableFunctionTable = GetProcAddress(hNtdll, "RtlDeleteGrowableFunctionTable");
+ void* addGrowableFunctionTable = GetProcAddress(hNtdll, "RtlAddGrowableFunctionTable");
+
+ // All or nothing AddGroableFunctionTable is last (marker)
+ if (growFunctionTable != NULL &&
+ deleteGrowableFunctionTable != NULL &&
+ addGrowableFunctionTable != NULL)
+ {
+ pRtlGrowFunctionTable = (RtlGrowFunctionTableFnPtr) growFunctionTable;
+ pRtlDeleteGrowableFunctionTable = (RtlDeleteGrowableFunctionTableFnPtr) deleteGrowableFunctionTable;
+ pRtlAddGrowableFunctionTable = (RtlAddGrowableFunctionTableFnPtr) addGrowableFunctionTable;
+ }
+ // Don't call FreeLibrary(hNtdll) because GetModuleHandle did *NOT* increment the reference count!
+ }
+ else
+ {
+#if _DEBUG
+ pRtlGrowFunctionTable = FakeRtlGrowFunctionTable;
+ pRtlDeleteGrowableFunctionTable = FakeRtlDeleteGrowableFunctionTable;
+ pRtlAddGrowableFunctionTable = FakeRtlAddGrowableFunctionTable;
+#endif
+ }
+ RtlUnwindFtnsInited = true;
+ }
+ return (pRtlAddGrowableFunctionTable != NULL);
+#else // !FEATURE_PAL
+ return false;
+#endif // !FEATURE_PAL
+}
+
+/****************************************************************************/
+UnwindInfoTable::UnwindInfoTable(ULONG_PTR rangeStart, ULONG_PTR rangeEnd, ULONG size)
+{
+ STANDARD_VM_CONTRACT;
+ _ASSERTE(s_pUnwindInfoTableLock->OwnedByCurrentThread());
+ _ASSERTE((rangeEnd - rangeStart) <= 0x7FFFFFFF);
+
+ cTableCurCount = 0;
+ cTableMaxCount = size;
+ cDeletedEntries = 0;
+ iRangeStart = rangeStart;
+ iRangeEnd = rangeEnd;
+ hHandle = NULL;
+ pTable = new RUNTIME_FUNCTION[cTableMaxCount];
+}
+
+/****************************************************************************/
+UnwindInfoTable::~UnwindInfoTable()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+ _ASSERTE(s_publishingActive);
+
+ // We do this lock free to because too many places still want no-trigger. It should be OK
+ // It would be cleaner if we could take the lock (we did not have to be GC_NOTRIGGER)
+ UnRegister();
+ delete[] pTable;
+}
+
+/*****************************************************************************/
+void UnwindInfoTable::Register()
+{
+ _ASSERTE(s_pUnwindInfoTableLock->OwnedByCurrentThread());
+ EX_TRY
+ {
+ hHandle = NULL;
+ NTSTATUS ret = pRtlAddGrowableFunctionTable(&hHandle, pTable, cTableCurCount, cTableMaxCount, iRangeStart, iRangeEnd);
+ if (ret != STATUS_SUCCESS)
+ {
+ _ASSERTE(!"Failed to publish UnwindInfo (ignorable)");
+ hHandle = NULL;
+ STRESS_LOG3(LF_JIT, LL_ERROR, "UnwindInfoTable::Register ERROR %x creating table [%p, %p]", ret, iRangeStart, iRangeEnd);
+ }
+ else
+ {
+ STRESS_LOG3(LF_JIT, LL_INFO100, "UnwindInfoTable::Register Handle: %p [%p, %p]", hHandle, iRangeStart, iRangeEnd);
+ }
+ }
+ EX_CATCH
+ {
+ hHandle = NULL;
+ STRESS_LOG2(LF_JIT, LL_ERROR, "UnwindInfoTable::Register Exception while creating table [%p, %p]",
+ iRangeStart, iRangeEnd);
+ _ASSERTE(!"Failed to publish UnwindInfo (ignorable)");
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+}
+
+/*****************************************************************************/
+void UnwindInfoTable::UnRegister()
+{
+ PVOID handle = hHandle;
+ hHandle = 0;
+ if (handle != 0)
+ {
+ STRESS_LOG3(LF_JIT, LL_INFO100, "UnwindInfoTable::UnRegister Handle: %p [%p, %p]", handle, iRangeStart, iRangeEnd);
+ pRtlDeleteGrowableFunctionTable(handle);
+ }
+}
+
+/*****************************************************************************/
+// Add 'data' to the linked list whose head is pointed at by 'unwindInfoPtr'
+//
+/* static */
+void UnwindInfoTable::AddToUnwindInfoTable(UnwindInfoTable** unwindInfoPtr, RUNTIME_FUNCTION* data,
+ TADDR rangeStart, TADDR rangeEnd)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+ _ASSERTE(data->BeginAddress <= RUNTIME_FUNCTION__EndAddress(data, rangeStart));
+ _ASSERTE(RUNTIME_FUNCTION__EndAddress(data, rangeStart) <= (rangeEnd-rangeStart));
+ _ASSERTE(unwindInfoPtr != NULL);
+
+ if (!s_publishingActive)
+ return;
+
+ CrstHolder ch(s_pUnwindInfoTableLock);
+
+ UnwindInfoTable* unwindInfo = *unwindInfoPtr;
+ // was the original list null, If so lazy initialize.
+ if (unwindInfo == NULL)
+ {
+ // We can choose the average method size estimate dynamically based on past experience
+ // 128 is the estimated size of an average method, so we can accurately predict
+ // how many RUNTIME_FUNCTION entries are in each chunk we allocate.
+
+ ULONG size = (ULONG) ((rangeEnd - rangeStart) / 128) + 1;
+
+ // To insure the test the growing logic in debug code make the size much smaller.
+ INDEBUG(size = size / 4 + 1);
+ unwindInfo = (PTR_UnwindInfoTable)new UnwindInfoTable(rangeStart, rangeEnd, size);
+ unwindInfo->Register();
+ *unwindInfoPtr = unwindInfo;
+ }
+ _ASSERTE(unwindInfo != NULL); // If new had failed, we would have thrown OOM
+ _ASSERTE(unwindInfo->cTableCurCount <= unwindInfo->cTableMaxCount);
+ _ASSERTE(unwindInfo->iRangeStart == rangeStart);
+ _ASSERTE(unwindInfo->iRangeEnd == rangeEnd);
+
+ // Means we had a failure publishing to the OS, in this case we give up
+ if (unwindInfo->hHandle == NULL)
+ return;
+
+ // Check for the fast path: we are adding the the end of an UnwindInfoTable with space
+ if (unwindInfo->cTableCurCount < unwindInfo->cTableMaxCount)
+ {
+ if (unwindInfo->cTableCurCount == 0 ||
+ unwindInfo->pTable[unwindInfo->cTableCurCount-1].BeginAddress < data->BeginAddress)
+ {
+ // Yeah, we can simply add to the end of table and we are done!
+ unwindInfo->pTable[unwindInfo->cTableCurCount] = *data;
+ unwindInfo->cTableCurCount++;
+
+ // Add to the function table
+ pRtlGrowFunctionTable(unwindInfo->hHandle, unwindInfo->cTableCurCount);
+
+ STRESS_LOG5(LF_JIT, LL_INFO1000, "AddToUnwindTable Handle: %p [%p, %p] ADDING 0x%xp TO END, now 0x%x entries",
+ unwindInfo->hHandle, unwindInfo->iRangeStart, unwindInfo->iRangeEnd,
+ data->BeginAddress, unwindInfo->cTableCurCount);
+ return;
+ }
+ }
+
+ // OK we need to rellocate the table and reregister. First figure out our 'desiredSpace'
+ // We could imagine being much more efficient for 'bulk' updates, but we don't try
+ // because we assume that this is rare and we want to keep the code simple
+
+ int usedSpace = unwindInfo->cTableCurCount - unwindInfo->cDeletedEntries;
+ int desiredSpace = usedSpace * 5 / 4 + 1; // Increase by 20%
+ // Be more aggresive if we used all of our space;
+ if (usedSpace == unwindInfo->cTableMaxCount)
+ desiredSpace = usedSpace * 3 / 2 + 1; // Increase by 50%
+
+ STRESS_LOG7(LF_JIT, LL_INFO100, "AddToUnwindTable Handle: %p [%p, %p] SLOW Realloc Cnt 0x%x Max 0x%x NewMax 0x%x, Adding %x",
+ unwindInfo->hHandle, unwindInfo->iRangeStart, unwindInfo->iRangeEnd,
+ unwindInfo->cTableCurCount, unwindInfo->cTableMaxCount, desiredSpace, data->BeginAddress);
+
+ UnwindInfoTable* newTab = new UnwindInfoTable(unwindInfo->iRangeStart, unwindInfo->iRangeEnd, desiredSpace);
+
+ // Copy in the entries, removing deleted entries and adding the new entry wherever it belongs
+ int toIdx = 0;
+ bool inserted = false; // Have we inserted 'data' into the table
+ for(ULONG fromIdx = 0; fromIdx < unwindInfo->cTableCurCount; fromIdx++)
+ {
+ if (!inserted && data->BeginAddress < unwindInfo->pTable[fromIdx].BeginAddress)
+ {
+ STRESS_LOG1(LF_JIT, LL_INFO100, "AddToUnwindTable Inserted at MID position 0x%x", toIdx);
+ newTab->pTable[toIdx++] = *data;
+ inserted = true;
+ }
+ if (unwindInfo->pTable[fromIdx].UnwindData != 0) // A 'non-deleted' entry
+ newTab->pTable[toIdx++] = unwindInfo->pTable[fromIdx];
+ }
+ if (!inserted)
+ {
+ STRESS_LOG1(LF_JIT, LL_INFO100, "AddToUnwindTable Inserted at END position 0x%x", toIdx);
+ newTab->pTable[toIdx++] = *data;
+ }
+ newTab->cTableCurCount = toIdx;
+ STRESS_LOG2(LF_JIT, LL_INFO100, "AddToUnwindTable New size 0x%x max 0x%x",
+ newTab->cTableCurCount, newTab->cTableMaxCount);
+ _ASSERTE(newTab->cTableCurCount <= newTab->cTableMaxCount);
+
+ // Unregister the old table
+ *unwindInfoPtr = 0;
+ unwindInfo->UnRegister();
+
+ // Note that there is a short time when we are not publishing...
+
+ // Register the new table
+ newTab->Register();
+ *unwindInfoPtr = newTab;
+
+ delete unwindInfo;
+}
+
+/*****************************************************************************/
+/* static */ void UnwindInfoTable::RemoveFromUnwindInfoTable(UnwindInfoTable** unwindInfoPtr, TADDR baseAddress, TADDR entryPoint)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+ _ASSERTE(unwindInfoPtr != NULL);
+
+ if (!s_publishingActive)
+ return;
+ CrstHolder ch(s_pUnwindInfoTableLock);
+
+ UnwindInfoTable* unwindInfo = *unwindInfoPtr;
+ if (unwindInfo != NULL)
+ {
+ DWORD relativeEntryPoint = (DWORD)(entryPoint - baseAddress);
+ STRESS_LOG3(LF_JIT, LL_INFO100, "RemoveFromUnwindInfoTable Removing %p BaseAddress %p rel %x",
+ entryPoint, baseAddress, relativeEntryPoint);
+ for(ULONG i = 0; i < unwindInfo->cTableCurCount; i++)
+ {
+ if (unwindInfo->pTable[i].BeginAddress <= relativeEntryPoint &&
+ relativeEntryPoint < RUNTIME_FUNCTION__EndAddress(&unwindInfo->pTable[i], unwindInfo->iRangeStart))
+ {
+ if (unwindInfo->pTable[i].UnwindData != 0)
+ unwindInfo->cDeletedEntries++;
+ unwindInfo->pTable[i].UnwindData = 0; // Mark the entry for deletion
+ STRESS_LOG1(LF_JIT, LL_INFO100, "RemoveFromUnwindInfoTable Removed entry 0x%x", i);
+ return;
+ }
+ }
+ }
+ STRESS_LOG2(LF_JIT, LL_WARNING, "RemoveFromUnwindInfoTable COULD NOT FIND %p BaseAddress %p",
+ entryPoint, baseAddress);
+}
+
+/****************************************************************************/
+// Publish the stack unwind data 'data' which is relative 'baseAddress'
+// to the operating system in a way ETW stack tracing can use.
+
+/* static */ void UnwindInfoTable::PublishUnwindInfoForMethod(TADDR baseAddress, RUNTIME_FUNCTION* unwindInfo, int unwindInfoCount)
+{
+ STANDARD_VM_CONTRACT;
+ if (!s_publishingActive)
+ return;
+
+ TADDR entry = baseAddress + unwindInfo->BeginAddress;
+ RangeSection * pRS = ExecutionManager::FindCodeRange(entry, ExecutionManager::GetScanFlags());
+ _ASSERTE(pRS != NULL);
+ if (pRS != NULL)
+ {
+ for(int i = 0; i < unwindInfoCount; i++)
+ AddToUnwindInfoTable(&pRS->pUnwindInfoTable, &unwindInfo[i], pRS->LowAddress, pRS->HighAddress);
+ }
+}
+
+/*****************************************************************************/
+/* static */ void UnwindInfoTable::UnpublishUnwindInfoForMethod(TADDR entryPoint)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+ if (!s_publishingActive)
+ return;
+
+ RangeSection * pRS = ExecutionManager::FindCodeRange(entryPoint, ExecutionManager::GetScanFlags());
+ _ASSERTE(pRS != NULL);
+ if (pRS != NULL)
+ {
+ _ASSERTE(pRS->pjit->GetCodeType() == (miManaged | miIL));
+ if (pRS->pjit->GetCodeType() == (miManaged | miIL))
+ {
+ // This cast is justified because only EEJitManager's have the code type above.
+ EEJitManager* pJitMgr = (EEJitManager*)(pRS->pjit);
+ CodeHeader * pHeader = pJitMgr->GetCodeHeaderFromStartAddress(entryPoint);
+ for(ULONG i = 0; i < pHeader->GetNumberOfUnwindInfos(); i++)
+ RemoveFromUnwindInfoTable(&pRS->pUnwindInfoTable, pRS->LowAddress, pRS->LowAddress + pHeader->GetUnwindInfo(i)->BeginAddress);
+ }
+ }
+}
+
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+extern StubUnwindInfoHeapSegment *g_StubHeapSegments;
+#endif // STUBLINKER_GENERATES_UNWIND_INFO
+
+extern CrstStatic g_StubUnwindInfoHeapSegmentsCrst;
+/*****************************************************************************/
+// Publish all existing JIT compiled methods by iterating through the code heap
+// Note that because we need to keep the entries in order we have to hold
+// s_pUnwindInfoTableLock so that all entries get inserted in the correct order.
+// (we rely on heapIterator walking the methods in a heap section in order).
+
+/* static */ void UnwindInfoTable::PublishUnwindInfoForExistingMethods()
+{
+ STANDARD_VM_CONTRACT;
+ {
+ // CodeHeapIterator holds the m_CodeHeapCritSec, which insures code heaps don't get deallocated while being walked
+ EEJitManager::CodeHeapIterator heapIterator(NULL, NULL);
+
+ // Currently m_CodeHeapCritSec is given the CRST_UNSAFE_ANYMODE flag which allows it to be taken in a GC_NOTRIGGER
+ // region but also disallows GC_TRIGGERS. We need GC_TRIGGERS because we take annother lock. Ideally we would
+ // fix m_CodeHeapCritSec to not have the CRST_UNSAFE_ANYMODE flag, but I currently reached my threshold for fixing
+ // contracts.
+ CONTRACT_VIOLATION(GCViolation);
+
+ while(heapIterator.Next())
+ {
+ MethodDesc *pMD = heapIterator.GetMethod();
+ if(pMD)
+ {
+ _ASSERTE(!pMD->IsZapped());
+
+ PCODE methodEntry =(PCODE) heapIterator.GetMethodCode();
+ RangeSection * pRS = ExecutionManager::FindCodeRange(methodEntry, ExecutionManager::GetScanFlags());
+ _ASSERTE(pRS != NULL);
+ _ASSERTE(pRS->pjit->GetCodeType() == (miManaged | miIL));
+ if (pRS != NULL && pRS->pjit->GetCodeType() == (miManaged | miIL))
+ {
+ // This cast is justified because only EEJitManager's have the code type above.
+ EEJitManager* pJitMgr = (EEJitManager*)(pRS->pjit);
+ CodeHeader * pHeader = pJitMgr->GetCodeHeaderFromStartAddress(methodEntry);
+ int unwindInfoCount = pHeader->GetNumberOfUnwindInfos();
+ for(int i = 0; i < unwindInfoCount; i++)
+ AddToUnwindInfoTable(&pRS->pUnwindInfoTable, pHeader->GetUnwindInfo(i), pRS->LowAddress, pRS->HighAddress);
+ }
+ }
+ }
+ }
+
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ // Enumerate all existing stubs
+ CrstHolder crst(&g_StubUnwindInfoHeapSegmentsCrst);
+ for (StubUnwindInfoHeapSegment* pStubHeapSegment = g_StubHeapSegments; pStubHeapSegment; pStubHeapSegment = pStubHeapSegment->pNext)
+ {
+ // The stubs are in reverse order, so we reverse them so they are in memory order
+ CQuickArrayList<StubUnwindInfoHeader*> list;
+ for (StubUnwindInfoHeader *pHeader = pStubHeapSegment->pUnwindHeaderList; pHeader; pHeader = pHeader->pNext)
+ list.Push(pHeader);
+
+ for(int i = (int) list.Size()-1; i >= 0; --i)
+ {
+ StubUnwindInfoHeader *pHeader = list[i];
+ AddToUnwindInfoTable(&pStubHeapSegment->pUnwindInfoTable, &pHeader->FunctionEntry,
+ (TADDR) pStubHeapSegment->pbBaseAddress, (TADDR) pStubHeapSegment->pbBaseAddress + pStubHeapSegment->cbSegment);
+ }
+ }
+#endif // STUBLINKER_GENERATES_UNWIND_INFO
+}
+
+/*****************************************************************************/
+// turn on the publishing of unwind info. Called when the ETW rundown provider
+// is turned on.
+
+/* static */ void UnwindInfoTable::PublishUnwindInfo(bool publishExisting)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ if (s_publishingActive)
+ return;
+
+ // If we don't have the APIs we need, give up
+ if (!InitUnwindFtns())
+ return;
+
+ EX_TRY
+ {
+ // Create the lock
+ Crst* newCrst = new Crst(CrstUnwindInfoTableLock);
+ if (InterlockedCompareExchangeT(&s_pUnwindInfoTableLock, newCrst, NULL) == NULL)
+ {
+ s_publishingActive = true;
+ if (publishExisting)
+ PublishUnwindInfoForExistingMethods();
+ }
+ else
+ delete newCrst; // we were in a race and failed, throw away the Crst we made.
+
+ } EX_CATCH {
+ STRESS_LOG1(LF_JIT, LL_ERROR, "Exception happened when doing unwind Info rundown. EIP of last AV = %p", g_LastAccessViolationEIP);
+ _ASSERTE(!"Exception thrown while publishing 'catchup' ETW unwind information");
+ s_publishingActive = false; // Try to minimize damage.
+ } EX_END_CATCH(SwallowAllExceptions);
+}
+
+#endif // defined(_TARGET_AMD64_) && !defined(DACCESS_COMPILE)
+
+/*-----------------------------------------------------------------------------
+ This is a listing of which methods uses which synchronization mechanism
+ in the EEJitManager.
+//-----------------------------------------------------------------------------
+
+Setters of EEJitManager::m_CodeHeapCritSec
+-----------------------------------------------
+allocCode
+allocGCInfo
+allocEHInfo
+allocJumpStubBlock
+ResolveEHClause
+RemoveJitData
+Unload
+ReleaseReferenceToHeap
+JitCodeToMethodInfo
+
+
+Need EEJitManager::m_CodeHeapCritSec to be set
+-----------------------------------------------
+NewCodeHeap
+allocCodeRaw
+GetCodeHeapList
+GetCodeHeap
+RemoveCodeHeapFromDomainList
+DeleteCodeHeap
+AddRangeToJitHeapCache
+DeleteJitHeapCache
+
+*/
+
+
+#if !defined(DACCESS_COMPILE)
+EEJitManager::CodeHeapIterator::CodeHeapIterator(BaseDomain *pDomainFilter, LoaderAllocator *pLoaderAllocatorFilter)
+ : m_lockHolder(&(ExecutionManager::GetEEJitManager()->m_CodeHeapCritSec)), m_Iterator(NULL, 0, NULL, 0)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_pHeapList = NULL;
+ m_pDomain = pDomainFilter;
+ m_pLoaderAllocator = pLoaderAllocatorFilter;
+ m_pHeapList = ExecutionManager::GetEEJitManager()->GetCodeHeapList();
+ if(m_pHeapList)
+ new (&m_Iterator) MethodSectionIterator((const void *)m_pHeapList->mapBase, (COUNT_T)m_pHeapList->maxCodeHeapSize, m_pHeapList->pHdrMap, (COUNT_T)HEAP2MAPSIZE(ROUND_UP_TO_PAGE(m_pHeapList->maxCodeHeapSize)));
+};
+
+EEJitManager::CodeHeapIterator::~CodeHeapIterator()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+}
+
+BOOL EEJitManager::CodeHeapIterator::Next()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if(!m_pHeapList)
+ return FALSE;
+
+ while(1)
+ {
+ if(!m_Iterator.Next())
+ {
+ m_pHeapList = m_pHeapList->GetNext();
+ if(!m_pHeapList)
+ return FALSE;
+ new (&m_Iterator) MethodSectionIterator((const void *)m_pHeapList->mapBase, (COUNT_T)m_pHeapList->maxCodeHeapSize, m_pHeapList->pHdrMap, (COUNT_T)HEAP2MAPSIZE(ROUND_UP_TO_PAGE(m_pHeapList->maxCodeHeapSize)));
+ }
+ else
+ {
+ BYTE * code = m_Iterator.GetMethodCode();
+ CodeHeader * pHdr = (CodeHeader *)(code - sizeof(CodeHeader));
+ m_pCurrent = !pHdr->IsStubCodeBlock() ? pHdr->GetMethodDesc() : NULL;
+ if (m_pDomain && m_pCurrent)
+ {
+ BaseDomain *pCurrentBaseDomain = m_pCurrent->GetDomain();
+ if(pCurrentBaseDomain != m_pDomain)
+ continue;
+ }
+
+ // LoaderAllocator filter
+ if (m_pLoaderAllocator && m_pCurrent)
+ {
+ LoaderAllocator *pCurrentLoaderAllocator = m_pCurrent->GetLoaderAllocatorForCode();
+ if(pCurrentLoaderAllocator != m_pLoaderAllocator)
+ continue;
+ }
+
+ return TRUE;
+ }
+ }
+}
+#endif // !DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+//
+// ReaderLockHolder::ReaderLockHolder takes the reader lock, checks for the writer lock
+// and either aborts if the writer lock is held, or yields until the writer lock is released,
+// keeping the reader lock. This is normally called in the constructor for the
+// ReaderLockHolder.
+//
+// The writer cannot be taken if there are any readers. The WriterLockHolder functions take the
+// writer lock and check for any readers. If there are any, the WriterLockHolder functions
+// release the writer and yield to wait for the readers to be done.
+
+ExecutionManager::ReaderLockHolder::ReaderLockHolder(HostCallPreference hostCallPreference /*=AllowHostCalls*/)
+{
+ CONTRACTL {
+ NOTHROW;
+ if (hostCallPreference == AllowHostCalls) { HOST_CALLS; } else { HOST_NOCALLS; }
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ CAN_TAKE_LOCK;
+ } CONTRACTL_END;
+
+ IncCantAllocCount();
+
+ FastInterlockIncrement(&m_dwReaderCount);
+
+ EE_LOCK_TAKEN(GetPtrForLockContract());
+
+ if (VolatileLoad(&m_dwWriterLock) != 0)
+ {
+ if (hostCallPreference != AllowHostCalls)
+ {
+ // Rats, writer lock is held. Gotta bail. Since the reader count was already
+ // incremented, we're technically still blocking writers at the moment. But
+ // the holder who called us is about to call DecrementReader in its
+ // destructor and unblock writers.
+ return;
+ }
+
+ YIELD_WHILE ((VolatileLoad(&m_dwWriterLock) != 0));
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// See code:ExecutionManager::ReaderLockHolder::ReaderLockHolder. This just decrements the reader count.
+
+ExecutionManager::ReaderLockHolder::~ReaderLockHolder()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ FastInterlockDecrement(&m_dwReaderCount);
+ DecCantAllocCount();
+
+ EE_LOCK_RELEASED(GetPtrForLockContract());
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Returns whether the reader lock is acquired
+
+BOOL ExecutionManager::ReaderLockHolder::Acquired()
+{
+ LIMITED_METHOD_CONTRACT;
+ return VolatileLoad(&m_dwWriterLock) == 0;
+}
+
+ExecutionManager::WriterLockHolder::WriterLockHolder()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CAN_TAKE_LOCK;
+ } CONTRACTL_END;
+
+ _ASSERTE(m_dwWriterLock == 0);
+
+ // Signal to a debugger that this thread cannot stop now
+ IncCantStopCount();
+
+ IncCantAllocCount();
+
+ DWORD dwSwitchCount = 0;
+ while (TRUE)
+ {
+ // While this thread holds the writer lock, we must not try to suspend it
+ // or allow a profiler to walk its stack
+ Thread::IncForbidSuspendThread();
+
+ FastInterlockIncrement(&m_dwWriterLock);
+ if (m_dwReaderCount == 0)
+ break;
+ FastInterlockDecrement(&m_dwWriterLock);
+
+ // Before we loop and retry, it's safe to suspend or hijack and inspect
+ // this thread
+ Thread::DecForbidSuspendThread();
+
+ __SwitchToThread(0, ++dwSwitchCount);
+ }
+ EE_LOCK_TAKEN(GetPtrForLockContract());
+}
+
+ExecutionManager::WriterLockHolder::~WriterLockHolder()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ FastInterlockDecrement(&m_dwWriterLock);
+
+ // Writer lock released, so it's safe again for this thread to be
+ // suspended or have its stack walked by a profiler
+ Thread::DecForbidSuspendThread();
+
+ DecCantAllocCount();
+
+ // Signal to a debugger that it's again safe to stop this thread
+ DecCantStopCount();
+
+ EE_LOCK_RELEASED(GetPtrForLockContract());
+}
+
+#else
+
+// For DAC builds, we only care whether the writer lock is held.
+// If it is, we will assume the locked data is in an inconsistent
+// state and throw. We never actually take the lock.
+// Note: Throws
+ExecutionManager::ReaderLockHolder::ReaderLockHolder(HostCallPreference hostCallPreference /*=AllowHostCalls*/)
+{
+ SUPPORTS_DAC;
+
+ if (m_dwWriterLock != 0)
+ {
+ ThrowHR(CORDBG_E_PROCESS_NOT_SYNCHRONIZED);
+ }
+}
+
+ExecutionManager::ReaderLockHolder::~ReaderLockHolder()
+{
+}
+
+#endif // DACCESS_COMPILE
+
+/*-----------------------------------------------------------------------------
+ This is a listing of which methods uses which synchronization mechanism
+ in the ExecutionManager
+//-----------------------------------------------------------------------------
+
+==============================================================================
+ExecutionManger::ReaderLockHolder and ExecutionManger::WriterLockHolder
+Protects the callers of ExecutionManager::GetRangeSection from heap deletions
+while walking RangeSections. You need to take a reader lock before reading the
+values: m_CodeRangeList and hold it while walking the lists
+
+Uses ReaderLockHolder (allows multiple reeaders with no writers)
+-----------------------------------------
+ExecutionManager::FindCodeRange
+ExecutionManager::FindZapModule
+ExecutionManager::EnumMemoryRegions
+
+Uses WriterLockHolder (allows single writer and no readers)
+-----------------------------------------
+ExecutionManager::AddRangeHelper
+ExecutionManager::DeleteRangeHelper
+
+*/
+
+//-----------------------------------------------------------------------------
+
+#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+#define EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS
+#endif
+
+#if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS)
+// The function fragments can be used in Hot/Cold splitting, expressing Large Functions or in 'ShrinkWrapping', which is
+// delaying saving and restoring some callee-saved registers later inside the body of the method.
+// (It's assumed that JIT will not emit any ShrinkWrapping-style methods)
+// For these cases multiple RUNTIME_FUNCTION entries (a.k.a function fragments) are used to define
+// all the regions of the function or funclet. And one of these function fragments cover the beginning of the function/funclet,
+// including the prolog section and is referred as the 'Host Record'.
+// This function returns TRUE if the inspected RUNTIME_FUNCTION entry is NOT a host record
+
+BOOL IsFunctionFragment(TADDR baseAddress, PTR_RUNTIME_FUNCTION pFunctionEntry)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE((pFunctionEntry->UnwindData & 3) == 0); // The unwind data must be an RVA; we don't support packed unwind format
+ DWORD unwindHeader = *(PTR_DWORD)(baseAddress + pFunctionEntry->UnwindData);
+ _ASSERTE((0 == ((unwindHeader >> 18) & 3)) || !"unknown unwind data format, version != 0");
+#if defined(_TARGET_ARM_)
+
+ // On ARM, It's assumed that the prolog is always at the beginning of the function and cannot be split.
+ // Given that, there are 4 possible ways to fragment a function:
+ // 1. Prolog only:
+ // 2. Prolog and some epilogs:
+ // 3. Epilogs only:
+ // 4. No Prolog or epilog
+ //
+ // Function fragments describing 1 & 2 are host records, 3 & 4 are not.
+ // for 3 & 4, the .xdata record's F bit is set to 1, marking clearly what is NOT a host record
+
+ _ASSERTE((pFunctionEntry->BeginAddress & THUMB_CODE) == THUMB_CODE); // Sanity check: it's a thumb address
+ DWORD Fbit = (unwindHeader >> 22) & 0x1; // F "fragment" bit
+ return (Fbit == 1);
+#elif defined(_TARGET_ARM64_)
+
+ // ARM64 is a little bit more flexible, in the sense that it supports partial prologs. However only one of the
+ // prolog regions are allowed to alter SP and that's the Host Record. Partial prologs are used in ShrinkWrapping
+ // scenarios which is not supported, hence we don't need to worry about them. discarding partial prologs
+ // simplifies identifying a host record a lot.
+ //
+ // 1. Prolog only: The host record. Epilog Count and E bit are all 0.
+ // 2. Prolog and some epilogs: The host record with acompannying epilog-only records
+ // 3. Epilogs only: First unwind code is Phantom prolog (Starting with an end_c, indicating an empty prolog)
+ // 4. No prologs or epilogs: Epilog Count = 1 and Epilog Start Index points end_c. (as if it's case #2 with empty epilog codes)
+ //
+
+ int EpilogCount = (int)(unwindHeader >> 22) & 0x1F;
+ int CodeWords = unwindHeader >> 27;
+ PTR_DWORD pUnwindCodes = (PTR_DWORD)(baseAddress + pFunctionEntry->UnwindData);
+ if ((CodeWords == 0) && (EpilogCount == 0))
+ pUnwindCodes++;
+ BOOL Ebit = (unwindHeader >> 21) & 0x1;
+ if (Ebit)
+ {
+ // EpilogCount is the index of the first unwind code that describes the one and only epilog
+ // The unwind codes immediatelly follow the unwindHeader
+ pUnwindCodes++;
+ }
+ else if (EpilogCount != 0)
+ {
+ // EpilogCount is the number of exception scopes defined right after the unwindHeader
+ pUnwindCodes += EpilogCount+1;
+ }
+ else
+ {
+ return FALSE;
+ }
+
+ if ((*pUnwindCodes & 0xFF) == 0xE5) // Phantom prolog
+ return TRUE;
+
+
+#else
+ PORTABILITY_ASSERT("IsFunctionFragnent - NYI on this platform");
+#endif
+ return FALSE;
+}
+
+#endif // EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS
+
+
+#ifndef DACCESS_COMPILE
+
+//**********************************************************************************
+// IJitManager
+//**********************************************************************************
+IJitManager::IJitManager()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_runtimeSupport = ExecutionManager::GetDefaultCodeManager();
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+// When we unload an appdomain, we need to make sure that any threads that are crawling through
+// our heap or rangelist are out. For cooperative-mode threads, we know that they will have
+// been stopped when we suspend the EE so they won't be touching an element that is about to be deleted.
+// However for pre-emptive mode threads, they could be stalled right on top of the element we want
+// to delete, so we need to apply the reader lock to them and wait for them to drain.
+ExecutionManager::ScanFlag ExecutionManager::GetScanFlags()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+ BEGIN_GETTHREAD_ALLOWED;
+
+ Thread *pThread = GetThread();
+
+ if (!pThread)
+ return ScanNoReaderLock;
+
+ // If this thread is hijacked by a profiler and crawling its own stack,
+ // we do need to take the lock
+ if (pThread->GetProfilerFilterContext() != NULL)
+ return ScanReaderLock;
+
+ if (pThread->PreemptiveGCDisabled() || (pThread == ThreadSuspend::GetSuspensionThread()))
+ return ScanNoReaderLock;
+
+ END_GETTHREAD_ALLOWED;
+
+ return ScanReaderLock;
+#else
+ return ScanNoReaderLock;
+#endif
+}
+
+#ifdef DACCESS_COMPILE
+
+void IJitManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ DAC_ENUM_VTHIS();
+ if (m_runtimeSupport.IsValid())
+ {
+ m_runtimeSupport->EnumMemoryRegions(flags);
+ }
+}
+
+#endif // #ifdef DACCESS_COMPILE
+
+#if defined(WIN64EXCEPTIONS)
+
+PTR_VOID GetUnwindDataBlob(TADDR moduleBase, PTR_RUNTIME_FUNCTION pRuntimeFunction, /* out */ SIZE_T * pSize)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#if defined(_TARGET_AMD64_)
+ PTR_UNWIND_INFO pUnwindInfo(dac_cast<PTR_UNWIND_INFO>(moduleBase + RUNTIME_FUNCTION__GetUnwindInfoAddress(pRuntimeFunction)));
+
+ *pSize = ALIGN_UP(offsetof(UNWIND_INFO, UnwindCode) +
+ sizeof(UNWIND_CODE) * pUnwindInfo->CountOfUnwindCodes +
+ sizeof(ULONG) /* personality routine is always present */,
+ sizeof(DWORD));
+
+ return pUnwindInfo;
+
+#elif defined(_TARGET_ARM_)
+
+ // if this function uses packed unwind data then at least one of the two least significant bits
+ // will be non-zero. if this is the case then there will be no xdata record to enumerate.
+ _ASSERTE((pRuntimeFunction->UnwindData & 0x3) == 0);
+
+ // compute the size of the unwind info
+ PTR_TADDR xdata = dac_cast<PTR_TADDR>(pRuntimeFunction->UnwindData + moduleBase);
+
+ ULONG epilogScopes = 0;
+ ULONG unwindWords = 0;
+ ULONG size = 0;
+
+ if ((xdata[0] >> 23) != 0)
+ {
+ size = 4;
+ epilogScopes = (xdata[0] >> 23) & 0x1f;
+ unwindWords = (xdata[0] >> 28) & 0x0f;
+ }
+ else
+ {
+ size = 8;
+ epilogScopes = xdata[1] & 0xffff;
+ unwindWords = (xdata[1] >> 16) & 0xff;
+ }
+
+ if (!(xdata[0] & (1 << 21)))
+ size += 4 * epilogScopes;
+
+ size += 4 * unwindWords;
+
+ _ASSERTE(xdata[0] & (1 << 20)); // personality routine should be always present
+ size += 4;
+
+ *pSize = size;
+ return xdata;
+#else
+ PORTABILITY_ASSERT("GetUnwindDataBlob");
+ return NULL;
+#endif
+}
+
+// GetFuncletStartAddress returns the starting address of the function or funclet indicated by the EECodeInfo address.
+TADDR IJitManager::GetFuncletStartAddress(EECodeInfo * pCodeInfo)
+{
+ PTR_RUNTIME_FUNCTION pFunctionEntry = pCodeInfo->GetFunctionEntry();
+
+#ifdef _TARGET_AMD64_
+ _ASSERTE((pFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) == 0);
+#endif
+
+ TADDR baseAddress = pCodeInfo->GetModuleBase();
+ TADDR funcletStartAddress = baseAddress + RUNTIME_FUNCTION__BeginAddress(pFunctionEntry);
+
+#if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS)
+ // Is the RUNTIME_FUNCTION a fragment? If so, we need to walk backwards until we find the first
+ // non-fragment RUNTIME_FUNCTION, and use that one. This happens when we have very large functions
+ // and multiple RUNTIME_FUNCTION entries per function or funclet. However, all but the first will
+ // have the "F" bit set in the unwind data, indicating a fragment (with phantom prolog unwind codes).
+
+ for (;;)
+ {
+ if (!IsFunctionFragment(baseAddress, pFunctionEntry))
+ {
+ // This is not a fragment; we're done
+ break;
+ }
+
+ // We found a fragment. Walk backwards in the RUNTIME_FUNCTION array until we find a non-fragment.
+ // We're guaranteed to find one, because we require that a fragment live in a function or funclet
+ // that has a prolog, which will have non-fragment .xdata.
+ --pFunctionEntry;
+
+ funcletStartAddress = baseAddress + RUNTIME_FUNCTION__BeginAddress(pFunctionEntry);
+ }
+#endif // EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS
+
+ return funcletStartAddress;
+}
+
+BOOL IJitManager::IsFunclet(EECodeInfo * pCodeInfo)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ TADDR funcletStartAddress = GetFuncletStartAddress(pCodeInfo);
+ TADDR methodStartAddress = pCodeInfo->GetStartAddress();
+
+ return (funcletStartAddress != methodStartAddress);
+}
+
+BOOL IJitManager::IsFilterFunclet(EECodeInfo * pCodeInfo)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!pCodeInfo->IsFunclet())
+ return FALSE;
+
+ TADDR funcletStartAddress = GetFuncletStartAddress(pCodeInfo);
+
+ // This assumes no hot/cold splitting for funclets
+
+ _ASSERTE(FitsInU4(pCodeInfo->GetCodeAddress() - funcletStartAddress));
+ DWORD relOffsetWithinFunclet = static_cast<DWORD>(pCodeInfo->GetCodeAddress() - funcletStartAddress);
+
+ _ASSERTE(pCodeInfo->GetRelOffset() >= relOffsetWithinFunclet);
+ DWORD funcletStartOffset = pCodeInfo->GetRelOffset() - relOffsetWithinFunclet;
+
+ EH_CLAUSE_ENUMERATOR pEnumState;
+ unsigned EHCount = InitializeEHEnumeration(pCodeInfo->GetMethodToken(), &pEnumState);
+ _ASSERTE(EHCount > 0);
+
+ EE_ILEXCEPTION_CLAUSE EHClause;
+ for (ULONG i = 0; i < EHCount; i++)
+ {
+ GetNextEHClause(&pEnumState, &EHClause);
+
+ // Duplicate clauses are always listed at the end, so when we hit a duplicate clause,
+ // we have already visited all of the normal clauses.
+ if (IsDuplicateClause(&EHClause))
+ {
+ break;
+ }
+
+ if (IsFilterHandler(&EHClause))
+ {
+ if (EHClause.FilterOffset == funcletStartOffset)
+ {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+#else // WIN64EXCEPTIONS
+
+FORCEINLINE PTR_VOID GetUnwindDataBlob(TADDR moduleBase, PTR_RUNTIME_FUNCTION pRuntimeFunction, /* out */ SIZE_T * pSize)
+{
+ *pSize = 0;
+ return dac_cast<PTR_VOID>(pRuntimeFunction->UnwindData + moduleBase);
+}
+
+#endif // WIN64EXCEPTIONS
+
+
+#ifndef CROSSGEN_COMPILE
+
+#ifndef DACCESS_COMPILE
+
+//**********************************************************************************
+// EEJitManager
+//**********************************************************************************
+
+EEJitManager::EEJitManager()
+ :
+ // CRST_DEBUGGER_THREAD - We take this lock on debugger thread during EnC add method, among other things
+ // CRST_TAKEN_DURING_SHUTDOWN - We take this lock during shutdown if ETW is on (to do rundown)
+ m_CodeHeapCritSec( CrstSingleUseLock,
+ CrstFlags(CRST_UNSAFE_ANYMODE|CRST_DEBUGGER_THREAD|CRST_TAKEN_DURING_SHUTDOWN)),
+ m_EHClauseCritSec( CrstSingleUseLock )
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ m_pCodeHeap = NULL;
+ m_jit = NULL;
+ m_JITCompiler = NULL;
+#ifdef _TARGET_AMD64_
+ m_JITCompilerOther = NULL;
+#endif
+#ifdef ALLOW_SXS_JIT
+ m_alternateJit = NULL;
+ m_AltJITCompiler = NULL;
+ m_AltJITRequired = false;
+#endif
+
+ m_dwCPUCompileFlags = 0;
+
+ m_cleanupList = NULL;
+}
+
+#if defined(_TARGET_AMD64_)
+extern "C" DWORD __stdcall getcpuid(DWORD arg, unsigned char result[16]);
+#endif // defined(_TARGET_AMD64_)
+
+void EEJitManager::SetCpuInfo()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ //
+ // NOTE: This function needs to be kept in sync with Zapper::CompileAssembly()
+ //
+
+ DWORD dwCPUCompileFlags = 0;
+
+#if defined(_TARGET_X86_)
+ // NOTE: if you're adding any flags here, you probably should also be doing it
+ // for ngen (zapper.cpp)
+ CORINFO_CPU cpuInfo;
+ GetSpecificCpuInfo(&cpuInfo);
+
+ switch (CPU_X86_FAMILY(cpuInfo.dwCPUType))
+ {
+ case CPU_X86_PENTIUM_4:
+ dwCPUCompileFlags |= CORJIT_FLG_TARGET_P4;
+ break;
+ default:
+ break;
+ }
+
+ if (CPU_X86_USE_CMOV(cpuInfo.dwFeatures))
+ {
+ dwCPUCompileFlags |= CORJIT_FLG_USE_CMOV |
+ CORJIT_FLG_USE_FCOMI;
+ }
+
+ if (CPU_X86_USE_SSE2(cpuInfo.dwFeatures))
+ {
+ dwCPUCompileFlags |= CORJIT_FLG_USE_SSE2;
+ }
+#elif defined(_TARGET_AMD64_)
+ unsigned char buffer[16];
+ DWORD maxCpuId = getcpuid(0, buffer);
+ if (maxCpuId >= 0)
+ {
+ // getcpuid executes cpuid with eax set to its first argument, and ecx cleared.
+ // It returns the resulting eax in buffer[0-3], ebx in buffer[4-7], ecx in buffer[8-11],
+ // and edx in buffer[12-15].
+ // We will set the following flags:
+ // CORJIT_FLG_USE_SSE3_4 if the following feature bits are set (input EAX of 1)
+ // SSE3 - ECX bit 0 (buffer[8] & 0x01)
+ // SSSE3 - ECX bit 9 (buffer[9] & 0x02)
+ // SSE4.1 - ECX bit 19 (buffer[10] & 0x08)
+ // SSE4.2 - ECX bit 20 (buffer[10] & 0x10)
+ // CORJIT_FLG_USE_AVX if the following feature bit is set (input EAX of 1):
+ // AVX - ECX bit 28 (buffer[11] & 0x10)
+ // CORJIT_FLG_USE_AVX2 if the following feature bit is set (input EAX of 0x07 and input ECX of 0):
+ // AVX2 - EBX bit 5 (buffer[4] & 0x20)
+ // CORJIT_FLG_USE_AVX_512 is not currently set, but defined so that it can be used in future without
+ // synchronously updating VM and JIT.
+ (void) getcpuid(1, buffer);
+ // If SSE2 is not enabled, there is no point in checking the rest.
+ // SSE2 is bit 26 of EDX (buffer[15] & 0x04)
+ // TODO: Determine whether we should break out the various SSE options further.
+ if ((buffer[15] & 0x04) != 0) // SSE2
+ {
+ if (((buffer[8] & 0x01) != 0) && // SSE3
+ ((buffer[9] & 0x02) != 0) && // SSSE3
+ ((buffer[10] & 0x08) != 0) && // SSE4.1
+ ((buffer[10] & 0x10) != 0)) // SSE4.2
+ {
+ dwCPUCompileFlags |= CORJIT_FLG_USE_SSE3_4;
+ }
+ if ((buffer[11] & 0x10) != 0)
+ {
+ dwCPUCompileFlags |= CORJIT_FLG_USE_AVX;
+ if (maxCpuId >= 0x07)
+ {
+ (void) getcpuid(0x07, buffer);
+ if ((buffer[4] & 0x20) != 0)
+ {
+ dwCPUCompileFlags |= CORJIT_FLG_USE_AVX2;
+ }
+ }
+ }
+ static ConfigDWORD fFeatureSIMD;
+ if (fFeatureSIMD.val(CLRConfig::EXTERNAL_FeatureSIMD) != 0)
+ {
+ dwCPUCompileFlags |= CORJIT_FLG_FEATURE_SIMD;
+ }
+ }
+ }
+#endif // defined(_TARGET_AMD64_)
+
+ m_dwCPUCompileFlags = dwCPUCompileFlags;
+}
+
+// LoadAndInitializeJIT: load the JIT dll into the process, and initialize it (call the UtilCode initialization function,
+// check the JIT-EE interface GUID, etc.)
+//
+// Parameters:
+//
+// pwzJitName - The filename of the JIT .dll file to load. E.g., "altjit.dll".
+// phJit - On return, *phJit is the Windows module handle of the loaded JIT dll. It will be NULL if the load failed.
+// ppICorJitCompiler - On return, *ppICorJitCompiler is the ICorJitCompiler* returned by the JIT's getJit() entrypoint.
+// It is NULL if the JIT returns a NULL interface pointer, or if the JIT-EE interface GUID is mismatched.
+// Note that if the given JIT is loaded, but the interface is mismatched, then *phJit will be legal and non-NULL
+// even though *ppICorJitCompiler is NULL. This allows the caller to unload the JIT dll, if necessary
+// (nobody does this today).
+//
+static void LoadAndInitializeJIT(LPCWSTR pwzJitName, OUT HINSTANCE* phJit, OUT ICorJitCompiler** ppICorJitCompiler)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(phJit != NULL);
+ _ASSERTE(ppICorJitCompiler != NULL);
+
+ *phJit = NULL;
+ *ppICorJitCompiler = NULL;
+
+ HRESULT hr = E_FAIL;
+
+#ifdef FEATURE_MERGE_JIT_AND_ENGINE
+ WCHAR CoreClrFolder[MAX_PATH + 1];
+ extern HINSTANCE g_hThisInst;
+ if (WszGetModuleFileName(g_hThisInst, CoreClrFolder, MAX_PATH))
+ {
+ WCHAR *filePtr = wcsrchr(CoreClrFolder, W('\\'));
+ if (filePtr)
+ {
+ filePtr[1] = W('\0');
+ wcscat_s(CoreClrFolder, MAX_PATH, pwzJitName);
+ *phJit = CLRLoadLibrary(CoreClrFolder);
+ if (*phJit != NULL)
+ {
+ hr = S_OK;
+ }
+ }
+ }
+#else
+ hr = g_pCLRRuntime->LoadLibrary(pwzJitName, phJit);
+#endif
+
+ if (SUCCEEDED(hr))
+ {
+ EX_TRY
+ {
+ typedef void (__stdcall* psxsJitStartup) (CoreClrCallbacks const &);
+ psxsJitStartup sxsJitStartupFn = (psxsJitStartup) GetProcAddress(*phJit, "sxsJitStartup");
+
+ if (sxsJitStartupFn)
+ {
+ CoreClrCallbacks cccallbacks = GetClrCallbacks();
+ (*sxsJitStartupFn) (cccallbacks);
+
+ typedef ICorJitCompiler* (__stdcall* pGetJitFn)();
+ pGetJitFn getJitFn = (pGetJitFn) GetProcAddress(*phJit, "getJit");
+
+ if (getJitFn)
+ {
+ ICorJitCompiler* pICorJitCompiler = (*getJitFn)();
+ if (pICorJitCompiler != NULL)
+ {
+ GUID versionId;
+ memset(&versionId, 0, sizeof(GUID));
+ pICorJitCompiler->getVersionIdentifier(&versionId);
+
+ if (memcmp(&versionId, &JITEEVersionIdentifier, sizeof(GUID)) != 0)
+ {
+ // Mismatched version ID. Fail the load.
+ LOG((LF_JIT, LL_FATALERROR, "Mismatched JIT version identifier"));
+ }
+ else
+ {
+ // The JIT has loaded and passed the version identifier test, so publish the JIT interface to the caller.
+ *ppICorJitCompiler = pICorJitCompiler;
+ }
+ }
+ }
+ }
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+ }
+}
+
+#ifdef FEATURE_MERGE_JIT_AND_ENGINE
+EXTERN_C ICorJitCompiler* __stdcall getJit();
+#endif // FEATURE_MERGE_JIT_AND_ENGINE
+
+// Set this to the result of LoadJIT as a courtesy to code:CorCompileGetRuntimeDll
+extern HMODULE s_ngenCompilerDll;
+
+BOOL EEJitManager::LoadJIT()
+{
+ STANDARD_VM_CONTRACT;
+
+ // If the JIT is already loaded, don't take the lock.
+ if (IsJitLoaded())
+ return TRUE;
+
+ // Abuse m_EHClauseCritSec to ensure that the JIT is loaded on one thread only
+ CrstHolder chRead(&m_EHClauseCritSec);
+
+ // Did someone load the JIT before we got the lock?
+ if (IsJitLoaded())
+ return TRUE;
+
+ SetCpuInfo();
+
+ ICorJitCompiler* newJitCompiler = NULL;
+
+#ifdef FEATURE_MERGE_JIT_AND_ENGINE
+
+ typedef ICorJitCompiler* (__stdcall* pGetJitFn)();
+ pGetJitFn getJitFn = (pGetJitFn) getJit;
+ EX_TRY
+ {
+ newJitCompiler = (*getJitFn)();
+
+ // We don't need to call getVersionIdentifier(), since the JIT is linked together with the VM.
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+#else // !FEATURE_MERGE_JIT_AND_ENGINE
+
+ m_JITCompiler = NULL;
+#ifdef _TARGET_AMD64_
+ m_JITCompilerOther = NULL;
+#endif
+
+ LoadAndInitializeJIT(ExecutionManager::GetJitName(), &m_JITCompiler, &newJitCompiler);
+
+ // Set as a courtesy to code:CorCompileGetRuntimeDll
+ s_ngenCompilerDll = m_JITCompiler;
+
+#if defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE)
+ // If COMPLUS_UseLegacyJit=1, then we fall back to compatjit.dll.
+ //
+ // This fallback mechanism was introduced for Visual Studio "14" Preview, when JIT64 (the legacy JIT) was replaced with
+ // RyuJIT. It was desired to provide a fallback mechanism in case comptibility problems (or other bugs)
+ // were discovered by customers. Setting this COMPLUS variable to 1 does not affect NGEN: existing NGEN images continue
+ // to be used, and all subsequent NGEN compilations continue to use the new JIT.
+ //
+ // If this is a compilation process, then we don't allow specifying a fallback JIT. This is a case where, when NGEN'ing,
+ // we sometimes need to JIT some things (such as when we are NGEN'ing mscorlib). In that case, we want to use exactly
+ // the same JIT as NGEN uses. And NGEN doesn't follow the COMPLUS_UseLegacyJit=1 switch -- it always uses clrjit.dll.
+ //
+ // Note that we always load and initialize the default JIT. This is to handle cases where obfuscators rely on
+ // LoadLibrary("clrjit.dll") returning the module handle of the JIT, and then they call GetProcAddress("getJit") to get
+ // the EE-JIT interface. They also do this without also calling sxsJitStartup()!
+ //
+ // In addition, for reasons related to servicing, we only use RyuJIT when the registry value UseRyuJIT (type DWORD), under
+ // key HKLM\SOFTWARE\Microsoft\.NETFramework, is set to 1. Otherwise, we fall back to JIT64. Note that if this value
+ // is set, we also must use JIT64 for all NGEN compilations as well.
+ //
+ // See the document "RyuJIT Compatibility Fallback Specification.docx" for details.
+
+ bool fUseRyuJit = (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_UseRyuJit) == 1); // uncached access, since this code is run no more than one time
+
+ // ****** TODO: Until the registry value is set by the .NET 4.6 installer, we pretend .NET 4.6 has been installed, which causes
+ // ****** RyuJit to be used by default.
+ fUseRyuJit = true;
+
+ if ((!IsCompilationProcess() || !fUseRyuJit) && // Use RyuJIT for all NGEN, unless we're falling back to JIT64 for everything.
+ (newJitCompiler != nullptr)) // the main JIT must successfully load before we try loading the fallback JIT
+ {
+ BOOL fUsingCompatJit = FALSE;
+
+ if (!fUseRyuJit)
+ {
+ fUsingCompatJit = TRUE;
+ }
+
+ if (!fUsingCompatJit)
+ {
+ DWORD useLegacyJit = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_UseLegacyJit); // uncached access, since this code is run no more than one time
+ if (useLegacyJit == 1)
+ {
+ fUsingCompatJit = TRUE;
+ }
+ }
+
+#if defined(FEATURE_APPX_BINDER)
+ if (!fUsingCompatJit)
+ {
+ // AppX applications don't have a .config file for per-app configuration. So, we allow the placement of a single
+ // distinguished file, "UseLegacyJit.txt" in the root of the app's package to indicate that the app should fall
+ // back to JIT64. This same file is also used to prevent this app from participating in AutoNgen.
+ if (AppX::IsAppXProcess())
+ {
+ WCHAR szPathName[MAX_PATH];
+ UINT32 cchPathName = MAX_PATH;
+ if (AppX::FindFileInCurrentPackage(L"UseLegacyJit.txt", &cchPathName, szPathName, PACKAGE_FILTER_HEAD) == S_OK)
+ {
+ fUsingCompatJit = TRUE;
+ }
+ }
+ }
+#endif // FEATURE_APPX_BINDER
+
+ if (fUsingCompatJit)
+ {
+ // Now, load the compat jit and initialize it.
+
+ LPWSTR pwzJitName = MAKEDLLNAME_W(L"compatjit");
+
+ // Note: if the compatjit fails to load, we ignore it, and continue to use the main JIT for
+ // everything. You can imagine a policy where if the user requests the compatjit, and we fail
+ // to load it, that we fail noisily. We don't do that currently.
+ ICorJitCompiler* fallbackICorJitCompiler;
+ LoadAndInitializeJIT(pwzJitName, &m_JITCompilerOther, &fallbackICorJitCompiler);
+ if (fallbackICorJitCompiler != nullptr)
+ {
+ // Tell the main JIT to fall back to the "fallback" JIT compiler, in case some
+ // obfuscator tries to directly call the main JIT's getJit() function.
+ newJitCompiler->setRealJit(fallbackICorJitCompiler);
+ }
+ }
+ }
+#endif // defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE)
+
+#endif // !FEATURE_MERGE_JIT_AND_ENGINE
+
+#ifdef ALLOW_SXS_JIT
+
+ // Do not load altjit.dll unless COMPLUS_AltJit is set.
+ // Even if the main JIT fails to load, if the user asks for an altjit we try to load it.
+ // This allows us to display load error messages for loading altjit.
+
+ ICorJitCompiler* newAltJitCompiler = NULL;
+
+ LPWSTR altJitConfig;
+ IfFailThrow(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_AltJit, &altJitConfig));
+
+ m_AltJITCompiler = NULL;
+
+ if (altJitConfig != NULL)
+ {
+ // Load the altjit into the system.
+ // Note: altJitName must be declared as a const otherwise assigning the string
+ // constructed by MAKEDLLNAME_W() to altJitName will cause a build break on Unix.
+ LPCWSTR altJitName;
+ IfFailThrow(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_AltJitName, (LPWSTR*)&altJitName));
+
+ if (altJitName == NULL)
+ {
+ altJitName = MAKEDLLNAME_W(W("protojit"));
+ }
+
+ LoadAndInitializeJIT(altJitName, &m_AltJITCompiler, &newAltJitCompiler);
+ }
+
+#endif // ALLOW_SXS_JIT
+
+ // Publish the compilers.
+
+#ifdef ALLOW_SXS_JIT
+ m_AltJITRequired = (altJitConfig != NULL);
+ m_alternateJit = newAltJitCompiler;
+#endif // ALLOW_SXS_JIT
+
+ m_jit = newJitCompiler;
+
+ // Failing to load the main JIT is a failure.
+ // If the user requested an altjit and we failed to load an altjit, that is also a failure.
+ // In either failure case, we'll rip down the VM (so no need to clean up (unload) either JIT that did load successfully.
+ return IsJitLoaded();
+}
+
+#ifndef CROSSGEN_COMPILE
+//**************************************************************************
+
+CodeFragmentHeap::CodeFragmentHeap(LoaderAllocator * pAllocator, StubCodeBlockKind kind)
+ : m_pAllocator(pAllocator), m_pFreeBlocks(NULL), m_kind(kind),
+ // CRST_DEBUGGER_THREAD - We take this lock on debugger thread during EnC add meth
+ m_CritSec(CrstCodeFragmentHeap, CrstFlags(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD))
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+void CodeFragmentHeap::AddBlock(VOID * pMem, size_t dwSize)
+{
+ LIMITED_METHOD_CONTRACT;
+ FreeBlock * pBlock = (FreeBlock *)pMem;
+ pBlock->m_pNext = m_pFreeBlocks;
+ pBlock->m_dwSize = dwSize;
+ m_pFreeBlocks = pBlock;
+}
+
+void CodeFragmentHeap::RemoveBlock(FreeBlock ** ppBlock)
+{
+ LIMITED_METHOD_CONTRACT;
+ FreeBlock * pBlock = *ppBlock;
+ *ppBlock = pBlock->m_pNext;
+ ZeroMemory(pBlock, sizeof(FreeBlock));
+}
+
+TaggedMemAllocPtr CodeFragmentHeap::RealAllocAlignedMem(size_t dwRequestedSize
+ ,unsigned dwAlignment
+#ifdef _DEBUG
+ ,__in __in_z const char *szFile
+ ,int lineNum
+#endif
+ )
+{
+ CrstHolder ch(&m_CritSec);
+
+ dwRequestedSize = ALIGN_UP(dwRequestedSize, sizeof(TADDR));
+
+ if (dwRequestedSize < sizeof(FreeBlock))
+ dwRequestedSize = sizeof(FreeBlock);
+
+ // We will try to batch up allocation of small blocks into one large allocation
+#define SMALL_BLOCK_THRESHOLD 0x100
+ SIZE_T nFreeSmallBlocks = 0;
+
+ FreeBlock ** ppBestFit = NULL;
+ FreeBlock ** ppFreeBlock = &m_pFreeBlocks;
+ while (*ppFreeBlock != NULL)
+ {
+ FreeBlock * pFreeBlock = *ppFreeBlock;
+ if (((BYTE *)pFreeBlock + pFreeBlock->m_dwSize) - (BYTE *)ALIGN_UP(pFreeBlock, dwAlignment) >= (SSIZE_T)dwRequestedSize)
+ {
+ if (ppBestFit == NULL || pFreeBlock->m_dwSize < (*ppBestFit)->m_dwSize)
+ ppBestFit = ppFreeBlock;
+ }
+ else
+ {
+ if (pFreeBlock->m_dwSize < SMALL_BLOCK_THRESHOLD)
+ nFreeSmallBlocks++;
+ }
+ ppFreeBlock = &(*ppFreeBlock)->m_pNext;
+ }
+
+ VOID * pMem;
+ SIZE_T dwSize;
+ if (ppBestFit != NULL)
+ {
+ pMem = *ppBestFit;
+ dwSize = (*ppBestFit)->m_dwSize;
+
+ RemoveBlock(ppBestFit);
+ }
+ else
+ {
+ dwSize = dwRequestedSize;
+ if (dwSize < SMALL_BLOCK_THRESHOLD)
+ dwSize = 4 * SMALL_BLOCK_THRESHOLD;
+ pMem = ExecutionManager::GetEEJitManager()->allocCodeFragmentBlock(dwSize, dwAlignment, m_pAllocator, m_kind);
+ }
+
+ SIZE_T dwExtra = (BYTE *)ALIGN_UP(pMem, dwAlignment) - (BYTE *)pMem;
+ _ASSERTE(dwSize >= dwExtra + dwRequestedSize);
+ SIZE_T dwRemaining = dwSize - (dwExtra + dwRequestedSize);
+
+ // Avoid accumulation of too many small blocks. The more small free blocks we have, the more picky we are going to be about adding new ones.
+ if ((dwRemaining >= max(sizeof(FreeBlock), sizeof(StubPrecode)) + (SMALL_BLOCK_THRESHOLD / 0x10) * nFreeSmallBlocks) || (dwRemaining >= SMALL_BLOCK_THRESHOLD))
+ {
+ AddBlock((BYTE *)pMem + dwExtra + dwRequestedSize, dwRemaining);
+ dwSize -= dwRemaining;
+ }
+
+ TaggedMemAllocPtr tmap;
+ tmap.m_pMem = pMem;
+ tmap.m_dwRequestedSize = dwSize;
+ tmap.m_pHeap = this;
+ tmap.m_dwExtra = dwExtra;
+#ifdef _DEBUG
+ tmap.m_szFile = szFile;
+ tmap.m_lineNum = lineNum;
+#endif
+ return tmap;
+}
+
+void CodeFragmentHeap::RealBackoutMem(void *pMem
+ , size_t dwSize
+#ifdef _DEBUG
+ , __in __in_z const char *szFile
+ , int lineNum
+ , __in __in_z const char *szAllocFile
+ , int allocLineNum
+#endif
+ )
+{
+ CrstHolder ch(&m_CritSec);
+
+ _ASSERTE(dwSize >= sizeof(FreeBlock));
+
+ ZeroMemory((BYTE *)pMem, dwSize);
+
+ //
+ // Try to coalesce blocks if possible
+ //
+ FreeBlock ** ppFreeBlock = &m_pFreeBlocks;
+ while (*ppFreeBlock != NULL)
+ {
+ FreeBlock * pFreeBlock = *ppFreeBlock;
+
+ if ((BYTE *)pFreeBlock == (BYTE *)pMem + dwSize)
+ {
+ // pMem = pMem;
+ dwSize += pFreeBlock->m_dwSize;
+ RemoveBlock(ppFreeBlock);
+ continue;
+ }
+ else
+ if ((BYTE *)pFreeBlock + pFreeBlock->m_dwSize == (BYTE *)pMem)
+ {
+ pMem = pFreeBlock;
+ dwSize += pFreeBlock->m_dwSize;
+ RemoveBlock(ppFreeBlock);
+ continue;
+ }
+
+ ppFreeBlock = &(*ppFreeBlock)->m_pNext;
+ }
+
+ AddBlock(pMem, dwSize);
+}
+#endif // CROSSGEN_COMPILE
+
+//**************************************************************************
+
+LoaderCodeHeap::LoaderCodeHeap(size_t * pPrivatePCLBytes)
+ : m_LoaderHeap(pPrivatePCLBytes,
+ 0, // RangeList *pRangeList
+ TRUE), // BOOL fMakeExecutable
+ m_cbMinNextPad(0)
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+void ThrowOutOfMemoryWithinRange()
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ EX_THROW(EEMessageException, (kOutOfMemoryException, IDS_EE_OUT_OF_MEMORY_WITHIN_RANGE));
+}
+
+HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap *pJitMetaHeap)
+{
+ CONTRACT(HeapList *) {
+ THROWS;
+ GC_NOTRIGGER;
+ POSTCONDITION(CheckPointer(RETVAL));
+ } CONTRACT_END;
+
+ size_t * pPrivatePCLBytes = NULL;
+ size_t reserveSize = pInfo->getReserveSize();
+ size_t initialRequestSize = pInfo->getRequestSize();
+ const BYTE * loAddr = pInfo->m_loAddr;
+ const BYTE * hiAddr = pInfo->m_hiAddr;
+
+ // Make sure that what we are reserving will fix inside a DWORD
+ if (reserveSize != (DWORD) reserveSize)
+ {
+ _ASSERTE(!"reserveSize does not fit in a DWORD");
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
+ }
+
+#ifdef ENABLE_PERF_COUNTERS
+ pPrivatePCLBytes = &(GetPerfCounters().m_Loading.cbLoaderHeapSize);
+#endif
+
+ LOG((LF_JIT, LL_INFO100,
+ "Request new LoaderCodeHeap::CreateCodeHeap(%08x, %08x, for loader allocator" FMT_ADDR "in" FMT_ADDR ".." FMT_ADDR ")\n",
+ (DWORD) reserveSize, (DWORD) initialRequestSize, DBG_ADDR(pInfo->m_pAllocator), DBG_ADDR(loAddr), DBG_ADDR(hiAddr)
+ ));
+
+ NewHolder<LoaderCodeHeap> pCodeHeap(new LoaderCodeHeap(pPrivatePCLBytes));
+
+ BYTE * pBaseAddr = NULL;
+ DWORD dwSizeAcquiredFromInitialBlock = 0;
+
+ pBaseAddr = (BYTE *)pInfo->m_pAllocator->GetCodeHeapInitialBlock(loAddr, hiAddr, (DWORD)initialRequestSize, &dwSizeAcquiredFromInitialBlock);
+ if (pBaseAddr != NULL)
+ {
+ pCodeHeap->m_LoaderHeap.SetReservedRegion(pBaseAddr, dwSizeAcquiredFromInitialBlock, FALSE);
+ }
+ else
+ {
+ if (loAddr != NULL || hiAddr != NULL)
+ {
+ pBaseAddr = ClrVirtualAllocWithinRange(loAddr, hiAddr,
+ reserveSize, MEM_RESERVE, PAGE_NOACCESS);
+ if (!pBaseAddr)
+ ThrowOutOfMemoryWithinRange();
+ }
+ else
+ {
+ pBaseAddr = ClrVirtualAllocExecutable(reserveSize, MEM_RESERVE, PAGE_NOACCESS);
+ if (!pBaseAddr)
+ ThrowOutOfMemory();
+ }
+ pCodeHeap->m_LoaderHeap.SetReservedRegion(pBaseAddr, reserveSize, TRUE);
+ }
+
+
+ // this first allocation is critical as it sets up correctly the loader heap info
+ HeapList *pHp = (HeapList*)pCodeHeap->m_LoaderHeap.AllocMem(sizeof(HeapList));
+
+ pHp->pHeap = pCodeHeap;
+
+ size_t heapSize = pCodeHeap->m_LoaderHeap.GetReservedBytesFree();
+ size_t nibbleMapSize = HEAP2MAPSIZE(ROUND_UP_TO_PAGE(heapSize));
+
+ pHp->startAddress = (TADDR)pHp + sizeof(HeapList);
+
+ pHp->endAddress = pHp->startAddress;
+ pHp->maxCodeHeapSize = heapSize;
+
+ _ASSERTE(heapSize >= initialRequestSize);
+
+ // We do not need to memset this memory, since ClrVirtualAlloc() guarantees that the memory is zero.
+ // Furthermore, if we avoid writing to it, these pages don't come into our working set
+
+ pHp->bFull = false;
+ pHp->bFullForJumpStubs = false;
+
+ pHp->cBlocks = 0;
+
+ pHp->mapBase = ROUND_DOWN_TO_PAGE(pHp->startAddress); // round down to next lower page align
+ pHp->pHdrMap = (DWORD*)(void*)pJitMetaHeap->AllocMem(S_SIZE_T(nibbleMapSize));
+
+ LOG((LF_JIT, LL_INFO100,
+ "Created new CodeHeap(" FMT_ADDR ".." FMT_ADDR ")\n",
+ DBG_ADDR(pHp->startAddress), DBG_ADDR(pHp->startAddress+pHp->maxCodeHeapSize)
+ ));
+
+#ifdef _WIN64
+ emitJump(pHp->CLRPersonalityRoutine, (void *)ProcessCLRException);
+#endif
+
+ pCodeHeap.SuppressRelease();
+ RETURN pHp;
+}
+
+void * LoaderCodeHeap::AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ if (m_cbMinNextPad > (SSIZE_T)header) header = m_cbMinNextPad;
+
+ void * p = m_LoaderHeap.AllocMemForCode_NoThrow(header, size, alignment);
+ if (p == NULL)
+ return NULL;
+
+ // If the next allocation would have started in the same nibble map entry, allocate extra space to prevent it from happening
+ // Note that m_cbMinNextPad can be negative
+ m_cbMinNextPad = ALIGN_UP((SIZE_T)p + 1, BYTES_PER_BUCKET) - ((SIZE_T)p + size);
+
+ return p;
+}
+
+void CodeHeapRequestInfo::Init()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION((m_hiAddr == 0) ||
+ ((m_loAddr < m_hiAddr) &&
+ ((m_loAddr + m_requestSize) < m_hiAddr)));
+ } CONTRACTL_END;
+
+ if (m_pAllocator == NULL)
+ m_pAllocator = m_pMD->GetLoaderAllocatorForCode();
+ m_isDynamicDomain = (m_pMD != NULL) ? m_pMD->IsLCGMethod() : false;
+ m_isCollectible = m_pAllocator->IsCollectible() ? true : false;
+}
+
+#ifdef WIN64EXCEPTIONS
+
+#ifdef _WIN64
+extern "C" PRUNTIME_FUNCTION GetRuntimeFunctionCallback(IN ULONG64 ControlPc,
+ IN PVOID Context)
+#else
+extern "C" PRUNTIME_FUNCTION GetRuntimeFunctionCallback(IN ULONG ControlPc,
+ IN PVOID Context)
+#endif
+{
+ WRAPPER_NO_CONTRACT;
+
+ PRUNTIME_FUNCTION prf = NULL;
+
+ // We must preserve this so that GCStress=4 eh processing doesnt kill last error.
+ BEGIN_PRESERVE_LAST_ERROR;
+
+#ifdef ENABLE_CONTRACTS
+ // See comment in code:Thread::SwitchIn and SwitchOut.
+ Thread *pThread = GetThread();
+ if (!(pThread && pThread->HasThreadStateNC(Thread::TSNC_InTaskSwitch)))
+ {
+
+ // Some 64-bit OOM tests use the hosting interface to re-enter the CLR via
+ // RtlVirtualUnwind to track unique stacks at each failure point. RtlVirtualUnwind can
+ // result in the EEJitManager taking a reader lock. This, in turn, results in a
+ // CANNOT_TAKE_LOCK contract violation if a CANNOT_TAKE_LOCK function were on the stack
+ // at the time. While it's theoretically possible for "real" hosts also to re-enter the
+ // CLR via RtlVirtualUnwind, generally they don't, and we'd actually like to catch a real
+ // host causing such a contract violation. Therefore, we'd like to suppress such contract
+ // asserts when these OOM tests are running, but continue to enforce the contracts by
+ // default. This function returns whether to suppress locking violations.
+ CONDITIONAL_CONTRACT_VIOLATION(
+ TakesLockViolation,
+ g_pConfig->SuppressLockViolationsOnReentryFromOS());
+#endif // ENABLE_CONTRACTS
+
+ EECodeInfo codeInfo((PCODE)ControlPc);
+ if (codeInfo.IsValid())
+ prf = codeInfo.GetFunctionEntry();
+
+ LOG((LF_EH, LL_INFO1000000, "GetRuntimeFunctionCallback(%p) returned %p\n", ControlPc, prf));
+
+#ifdef ENABLE_CONTRACTS
+ }
+#endif // ENABLE_CONTRACTS
+
+ END_PRESERVE_LAST_ERROR;
+
+ return prf;
+}
+#endif // WIN64EXCEPTIONS
+
+HeapList* EEJitManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHeapList *pADHeapList)
+{
+ CONTRACT(HeapList *) {
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
+ POSTCONDITION(CheckPointer(RETVAL));
+ } CONTRACT_END;
+
+ size_t initialRequestSize = pInfo->getRequestSize();
+ size_t minReserveSize = VIRTUAL_ALLOC_RESERVE_GRANULARITY; // ( 64 KB)
+
+#ifdef _WIN64
+ if (pInfo->m_hiAddr == 0)
+ {
+ if (pADHeapList->m_CodeHeapList.Count() > CODE_HEAP_SIZE_INCREASE_THRESHOLD)
+ {
+ minReserveSize *= 4; // Increase the code heap size to 256 KB for workloads with a lot of code.
+ }
+
+ // For non-DynamicDomains that don't have a loAddr/hiAddr range
+ // we bump up the reserve size for the 64-bit platforms
+ if (!pInfo->IsDynamicDomain())
+ {
+ minReserveSize *= 4; // CodeHeaps are larger on AMD64 (256 KB to 1024 KB)
+ }
+ }
+#endif
+
+ // <BUGNUM> VSW 433293 </BUGNUM>
+ // SETUP_NEW_BLOCK reserves the first sizeof(LoaderHeapBlock) bytes for LoaderHeapBlock.
+ // In other word, the first m_pAllocPtr starts at sizeof(LoaderHeapBlock) bytes
+ // after the allocated memory. Therefore, we need to take it into account.
+ size_t requestAndHeadersSize = sizeof(LoaderHeapBlock) + sizeof(HeapList) + initialRequestSize;
+
+ size_t reserveSize = requestAndHeadersSize;
+ if (reserveSize < minReserveSize)
+ reserveSize = minReserveSize;
+ reserveSize = ALIGN_UP(reserveSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY);
+
+ pInfo->setReserveSize(reserveSize);
+
+ HeapList *pHp = NULL;
+
+ DWORD flags = RangeSection::RANGE_SECTION_CODEHEAP;
+
+ if (pInfo->IsDynamicDomain())
+ {
+ flags |= RangeSection::RANGE_SECTION_COLLECTIBLE;
+ pHp = HostCodeHeap::CreateCodeHeap(pInfo, this);
+ }
+ else
+ {
+ LoaderHeap *pJitMetaHeap = pADHeapList->m_pAllocator->GetLowFrequencyHeap();
+
+ if (pInfo->IsCollectible())
+ flags |= RangeSection::RANGE_SECTION_COLLECTIBLE;
+
+ pHp = LoaderCodeHeap::CreateCodeHeap(pInfo, pJitMetaHeap);
+ }
+
+ _ASSERTE (pHp != NULL);
+ _ASSERTE (pHp->maxCodeHeapSize >= initialRequestSize);
+
+ pHp->SetNext(GetCodeHeapList());
+
+ EX_TRY
+ {
+ TADDR pStartRange = (TADDR) pHp;
+ TADDR pEndRange = (TADDR) &((BYTE*)pHp->startAddress)[pHp->maxCodeHeapSize];
+
+ ExecutionManager::AddCodeRange(pStartRange,
+ pEndRange,
+ this,
+ (RangeSection::RangeSectionFlags)flags,
+ pHp);
+ //
+ // add a table to cover each range in the range list
+ //
+ InstallEEFunctionTable(
+ (PVOID)pStartRange, // this is just an ID that gets passed to RtlDeleteFunctionTable;
+ (PVOID)pStartRange,
+ (ULONG)((ULONG64)pEndRange - (ULONG64)pStartRange),
+ GetRuntimeFunctionCallback,
+ this,
+ DYNFNTABLE_JIT);
+ }
+ EX_CATCH
+ {
+ // If we failed to alloc memory in ExecutionManager::AddCodeRange()
+ // then we will delete the LoaderHeap that we allocated
+
+ // pHp is allocated in pHeap, so only need to delete the LoaderHeap itself
+ delete pHp->pHeap;
+
+ pHp = NULL;
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ if (pHp == NULL)
+ {
+ ThrowOutOfMemory();
+ }
+
+ m_pCodeHeap = pHp;
+
+ HeapList **ppHeapList = pADHeapList->m_CodeHeapList.AppendThrowing();
+ *ppHeapList = pHp;
+
+ RETURN(pHp);
+}
+
+void* EEJitManager::allocCodeRaw(CodeHeapRequestInfo *pInfo,
+ size_t header, size_t blockSize, unsigned align,
+ HeapList ** ppCodeHeap /* Writeback, Can be null */ )
+{
+ CONTRACT(void *) {
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
+ POSTCONDITION(CheckPointer(RETVAL));
+ } CONTRACT_END;
+
+ pInfo->setRequestSize(header+blockSize+(align-1));
+
+ // Initialize the writeback value to NULL if a non-NULL pointer was provided
+ if (ppCodeHeap)
+ *ppCodeHeap = NULL;
+
+ void * mem = NULL;
+
+ bool bForJumpStubs = (pInfo->m_loAddr != 0) || (pInfo->m_hiAddr != 0);
+ bool bUseCachedDynamicCodeHeap = pInfo->IsDynamicDomain();
+
+ HeapList * pCodeHeap;
+
+ for (;;)
+ {
+ // Avoid going through the full list in the common case - try to use the most recently used codeheap
+ if (bUseCachedDynamicCodeHeap)
+ {
+ pCodeHeap = (HeapList *)pInfo->m_pAllocator->m_pLastUsedDynamicCodeHeap;
+ pInfo->m_pAllocator->m_pLastUsedDynamicCodeHeap = NULL;
+ }
+ else
+ {
+ pCodeHeap = (HeapList *)pInfo->m_pAllocator->m_pLastUsedCodeHeap;
+ pInfo->m_pAllocator->m_pLastUsedCodeHeap = NULL;
+ }
+
+
+ // If we will use a cached code heap for jump stubs, ensure that the code heap meets the loAddr and highAddr constraint
+ if (bForJumpStubs && pCodeHeap && !CanUseCodeHeap(pInfo, pCodeHeap))
+ {
+ pCodeHeap = NULL;
+ }
+
+ // If we don't have a cached code heap or can't use it, get a code heap
+ if (pCodeHeap == NULL)
+ {
+ pCodeHeap = GetCodeHeap(pInfo);
+ if (pCodeHeap == NULL)
+ break;
+ }
+
+#ifdef _WIN64
+ if (!bForJumpStubs)
+ {
+ //
+ // Keep a small reserve at the end of the codeheap for jump stubs. It should reduce
+ // chance that we won't be able allocate jump stub because of lack of suitable address space.
+ //
+ // It is not a perfect solution. Ideally, we would be able to either ensure that jump stub
+ // allocation won't fail or handle jump stub allocation gracefully (see DevDiv #381823 and
+ // related bugs for details).
+ //
+ size_t reserveForJumpStubs = pCodeHeap->maxCodeHeapSize / 64;
+
+ size_t minReserveForJumpStubs = sizeof(CodeHeader) +
+ sizeof(JumpStubBlockHeader) + (size_t) DEFAULT_JUMPSTUBS_PER_BLOCK * BACK_TO_BACK_JUMP_ALLOCATE_SIZE +
+ CODE_SIZE_ALIGN + BYTES_PER_BUCKET;
+
+ // Reserve only if the size can fit a cluster of jump stubs
+ if (reserveForJumpStubs > minReserveForJumpStubs)
+ {
+ size_t occupiedSize = pCodeHeap->endAddress - pCodeHeap->startAddress;
+
+ if (occupiedSize + pInfo->getRequestSize() + reserveForJumpStubs > pCodeHeap->maxCodeHeapSize)
+ {
+ pCodeHeap->SetHeapFull();
+ continue;
+ }
+ }
+ }
+#endif
+
+ mem = (pCodeHeap->pHeap)->AllocMemForCode_NoThrow(header, blockSize, align);
+ if (mem != NULL)
+ break;
+
+ // The current heap couldn't handle our request. Mark it as full.
+ if (bForJumpStubs)
+ pCodeHeap->SetHeapFullForJumpStubs();
+ else
+ pCodeHeap->SetHeapFull();
+ }
+
+ if (mem == NULL)
+ {
+ // Let us create a new heap.
+
+ DomainCodeHeapList *pList = GetCodeHeapList(pInfo->m_pMD, pInfo->m_pAllocator);
+ if (pList == NULL)
+ {
+ // not found so need to create the first one
+ pList = CreateCodeHeapList(pInfo);
+ _ASSERTE(pList == GetCodeHeapList(pInfo->m_pMD, pInfo->m_pAllocator));
+ }
+ _ASSERTE(pList);
+
+ pCodeHeap = NewCodeHeap(pInfo, pList);
+ _ASSERTE(pCodeHeap);
+
+ mem = (pCodeHeap->pHeap)->AllocMemForCode_NoThrow(header, blockSize, align);
+ if (mem == NULL)
+ ThrowOutOfMemory();
+ _ASSERTE(mem);
+ }
+
+ if (bUseCachedDynamicCodeHeap)
+ {
+ pInfo->m_pAllocator->m_pLastUsedDynamicCodeHeap = pCodeHeap;
+ }
+ else
+ {
+ pInfo->m_pAllocator->m_pLastUsedCodeHeap = pCodeHeap;
+ }
+
+
+ // Record the pCodeHeap value into ppCodeHeap, if a non-NULL pointer was provided
+ if (ppCodeHeap)
+ *ppCodeHeap = pCodeHeap;
+
+ _ASSERTE((TADDR)mem >= pCodeHeap->startAddress);
+
+ if (((TADDR) mem)+blockSize > (TADDR)pCodeHeap->endAddress)
+ {
+ // Update the CodeHeap endAddress
+ pCodeHeap->endAddress = (TADDR)mem+blockSize;
+ }
+
+ RETURN(mem);
+}
+
+CodeHeader* EEJitManager::allocCode(MethodDesc* pMD, size_t blockSize, CorJitAllocMemFlag flag
+#ifdef WIN64EXCEPTIONS
+ , UINT nUnwindInfos
+ , TADDR * pModuleBase
+#endif
+ )
+{
+ CONTRACT(CodeHeader *) {
+ THROWS;
+ GC_NOTRIGGER;
+ POSTCONDITION(CheckPointer(RETVAL));
+ } CONTRACT_END;
+
+ //
+ // Alignment
+ //
+
+ unsigned alignment = CODE_SIZE_ALIGN;
+
+ if ((flag & CORJIT_ALLOCMEM_FLG_16BYTE_ALIGN) != 0)
+ {
+ alignment = max(alignment, 16);
+ }
+
+#if !defined(_WIN64) && !defined(_TARGET_ARM_)
+ // when not optimizing for code size, 8-byte align the method entry point, so that
+ // the JIT can in turn 8-byte align the loop entry headers.
+ //
+ // when ReJIT is enabled, 8-byte-align the method entry point so that we may use an
+ // 8-byte interlocked operation to atomically poke the top most bytes (e.g., to
+ // redirect the rejit jmp-stamp at the top of the method from the prestub to the
+ // rejitted code, or to reinstate original code on a revert).
+ else if ((g_pConfig->GenOptimizeType() != OPT_SIZE) ||
+ ReJitManager::IsReJITEnabled())
+ {
+ alignment = max(alignment, 8);
+ }
+#endif
+
+ //
+ // Compute header layout
+ //
+
+ SIZE_T totalSize = blockSize;
+
+#if defined(USE_INDIRECT_CODEHEADER)
+ SIZE_T realHeaderSize = offsetof(RealCodeHeader, unwindInfos[0]) + (sizeof(RUNTIME_FUNCTION) * nUnwindInfos);
+
+ // if this is a LCG method then we will be allocating the RealCodeHeader
+ // following the code so that the code block can be removed easily by
+ // the LCG code heap.
+ if (pMD->IsLCGMethod())
+ {
+ totalSize = ALIGN_UP(totalSize, sizeof(void*)) + realHeaderSize;
+ static_assert_no_msg(CODE_SIZE_ALIGN >= sizeof(void*));
+ }
+#endif // USE_INDIRECT_CODEHEADER
+
+ CodeHeader * pCodeHdr = NULL;
+
+ CodeHeapRequestInfo requestInfo(pMD);
+
+ // Scope the lock
+ {
+ CrstHolder ch(&m_CodeHeapCritSec);
+
+ HeapList *pCodeHeap = NULL;
+
+ TADDR pCode = (TADDR) allocCodeRaw(&requestInfo, sizeof(CodeHeader), totalSize, alignment, &pCodeHeap);
+
+ _ASSERTE(pCodeHeap);
+
+ if (pMD->IsLCGMethod())
+ {
+ pMD->AsDynamicMethodDesc()->GetLCGMethodResolver()->m_recordCodePointer = (void*) pCode;
+ }
+
+ _ASSERTE(IS_ALIGNED(pCode, alignment));
+
+ JIT_PERF_UPDATE_X86_CODE_SIZE(totalSize);
+
+ // Initialize the CodeHeader *BEFORE* we publish this code range via the nibble
+ // map so that we don't have to harden readers against uninitialized data.
+ // However because we hold the lock, this initialization should be fast and cheap!
+
+ pCodeHdr = ((CodeHeader *)pCode) - 1;
+
+#ifdef USE_INDIRECT_CODEHEADER
+ if (pMD->IsLCGMethod())
+ {
+ pCodeHdr->SetRealCodeHeader((BYTE*)pCode + ALIGN_UP(blockSize, sizeof(void*)));
+ }
+ else
+ {
+ // TODO: think about the CodeHeap carrying around a RealCodeHeader chunking mechanism
+ //
+ // allocate the real header in the low frequency heap
+ BYTE* pRealHeader = (BYTE*)(void*)pMD->GetLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(realHeaderSize));
+ pCodeHdr->SetRealCodeHeader(pRealHeader);
+ }
+#endif
+
+ pCodeHdr->SetDebugInfo(NULL);
+ pCodeHdr->SetEHInfo(NULL);
+ pCodeHdr->SetGCInfo(NULL);
+ pCodeHdr->SetMethodDesc(pMD);
+#ifdef WIN64EXCEPTIONS
+ pCodeHdr->SetNumberOfUnwindInfos(nUnwindInfos);
+ *pModuleBase = (TADDR)pCodeHeap;
+#endif
+
+ NibbleMapSet(pCodeHeap, pCode, TRUE);
+ }
+
+ RETURN(pCodeHdr);
+}
+
+EEJitManager::DomainCodeHeapList *EEJitManager::GetCodeHeapList(MethodDesc *pMD, LoaderAllocator *pAllocator, BOOL fDynamicOnly)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
+ } CONTRACTL_END;
+
+ DomainCodeHeapList *pList = NULL;
+ DomainCodeHeapList **ppList = NULL;
+ int count = 0;
+
+ // get the appropriate list of heaps
+ // pMD is NULL for NGen modules during Module::LoadTokenTables
+ if (fDynamicOnly || (pMD != NULL && pMD->IsLCGMethod()))
+ {
+ ppList = m_DynamicDomainCodeHeaps.Table();
+ count = m_DynamicDomainCodeHeaps.Count();
+ }
+ else
+ {
+ ppList = m_DomainCodeHeaps.Table();
+ count = m_DomainCodeHeaps.Count();
+ }
+
+ // this is a virtual call - pull it out of the loop
+ BOOL fCanUnload = pAllocator->CanUnload();
+
+ // look for a DomainCodeHeapList
+ for (int i=0; i < count; i++)
+ {
+ if (ppList[i]->m_pAllocator == pAllocator ||
+ !fCanUnload && !ppList[i]->m_pAllocator->CanUnload())
+ {
+ pList = ppList[i];
+ break;
+ }
+ }
+ return pList;
+}
+
+HeapList* EEJitManager::GetCodeHeap(CodeHeapRequestInfo *pInfo)
+{
+ CONTRACT(HeapList *) {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
+ } CONTRACT_END;
+
+ HeapList *pResult = NULL;
+
+ _ASSERTE(pInfo->m_pAllocator != NULL);
+
+ // loop through the m_DomainCodeHeaps to find the AppDomain
+ // if not found, then create it
+ DomainCodeHeapList *pList = GetCodeHeapList(pInfo->m_pMD, pInfo->m_pAllocator);
+ if (pList)
+ {
+ // Set pResult to the largest non-full HeapList
+ // that also satisfies the [loAddr..hiAddr] constraint
+ for (int i=0; i < pList->m_CodeHeapList.Count(); i++)
+ {
+ HeapList *pCurrent = pList->m_CodeHeapList[i];
+
+ // Validate that the code heap can be used for the current request
+ if(CanUseCodeHeap(pInfo, pCurrent))
+ {
+ if (pResult == NULL)
+ {
+ // pCurrent is the first (and possibly only) heap that would satistfy
+ pResult = pCurrent;
+ }
+ // We use the initial creation size as a discriminator (i.e largest heap)
+ else if (pResult->maxCodeHeapSize < pCurrent->maxCodeHeapSize)
+ {
+ pResult = pCurrent;
+ }
+ }
+ }
+ }
+
+ RETURN (pResult);
+}
+
+bool EEJitManager::CanUseCodeHeap(CodeHeapRequestInfo *pInfo, HeapList *pCodeHeap)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
+ } CONTRACTL_END;
+
+ bool retVal = false;
+
+ if ((pInfo->m_loAddr == 0) && (pInfo->m_hiAddr == 0))
+ {
+ if (!pCodeHeap->IsHeapFull())
+ {
+ // We have no constraint so this non empty heap will be able to satistfy our request
+ if (pInfo->IsDynamicDomain())
+ {
+ retVal = true;
+ }
+ else
+ {
+ BYTE * lastAddr = (BYTE *) pCodeHeap->startAddress + pCodeHeap->maxCodeHeapSize;
+
+ BYTE * loRequestAddr = (BYTE *) pCodeHeap->endAddress;
+ BYTE * hiRequestAddr = loRequestAddr + pInfo->getRequestSize() + BYTES_PER_BUCKET;
+ if (hiRequestAddr <= lastAddr)
+ {
+ retVal = true;
+ }
+ }
+ }
+ }
+ else
+ {
+ if (!pCodeHeap->IsHeapFullForJumpStubs())
+ {
+ // We also check to see if an allocation in this heap would satistfy
+ // the [loAddr..hiAddr] requirement
+
+ // Calculate the byte range that can ever be returned by
+ // an allocation in this HeapList element
+ //
+ BYTE * firstAddr = (BYTE *) pCodeHeap->startAddress;
+ BYTE * lastAddr = (BYTE *) pCodeHeap->startAddress + pCodeHeap->maxCodeHeapSize;
+
+ _ASSERTE(pCodeHeap->startAddress <= pCodeHeap->endAddress);
+ _ASSERTE(firstAddr <= lastAddr);
+
+ if (pInfo->IsDynamicDomain())
+ {
+ // We check to see if every allocation in this heap
+ // will satistfy the [loAddr..hiAddr] requirement.
+ //
+ // Dynamic domains use a free list allocator,
+ // thus we can receive any address in the range
+ // when calling AllocMemory with a DynamicDomain
+
+ // [firstaddr .. lastAddr] must be entirely within
+ // [pInfo->m_loAddr .. pInfo->m_hiAddr]
+ //
+ if ((pInfo->m_loAddr <= firstAddr) &&
+ (lastAddr <= pInfo->m_hiAddr))
+ {
+ // This heap will always satisfy our constraint
+ retVal = true;
+ }
+ }
+ else // non-DynamicDomain
+ {
+ // Calculate the byte range that would be allocated for the
+ // next allocation request into [loRequestAddr..hiRequestAddr]
+ //
+ BYTE * loRequestAddr = (BYTE *) pCodeHeap->endAddress;
+ BYTE * hiRequestAddr = loRequestAddr + pInfo->getRequestSize() + BYTES_PER_BUCKET;
+ _ASSERTE(loRequestAddr <= hiRequestAddr);
+
+ // loRequestAddr and hiRequestAddr must be entirely within
+ // [pInfo->m_loAddr .. pInfo->m_hiAddr]
+ // additionally hiRequestAddr must also be less than
+ // or equal to lastAddr
+ //
+ if ((pInfo->m_loAddr <= loRequestAddr) &&
+ (hiRequestAddr <= pInfo->m_hiAddr) &&
+ (hiRequestAddr <= lastAddr))
+ {
+ // This heap will be able to satistfy our constraint
+ retVal = true;
+ }
+ }
+ }
+ }
+
+ return retVal;
+}
+
+EEJitManager::DomainCodeHeapList * EEJitManager::CreateCodeHeapList(CodeHeapRequestInfo *pInfo)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
+ } CONTRACTL_END;
+
+ NewHolder<DomainCodeHeapList> pNewList(new DomainCodeHeapList());
+ pNewList->m_pAllocator = pInfo->m_pAllocator;
+
+ DomainCodeHeapList **ppList = NULL;
+ if (pInfo->IsDynamicDomain())
+ ppList = m_DynamicDomainCodeHeaps.AppendThrowing();
+ else
+ ppList = m_DomainCodeHeaps.AppendThrowing();
+ *ppList = pNewList;
+
+ return pNewList.Extract();
+}
+
+LoaderHeap *EEJitManager::GetJitMetaHeap(MethodDesc *pMD)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ LoaderAllocator *pAllocator = pMD->GetLoaderAllocator();
+ _ASSERTE(pAllocator);
+
+ return pAllocator->GetLowFrequencyHeap();
+}
+
+BYTE* EEJitManager::allocGCInfo(CodeHeader* pCodeHeader, DWORD blockSize, size_t * pAllocationSize)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ MethodDesc* pMD = pCodeHeader->GetMethodDesc();
+ // sadly for light code gen I need the check in here. We should change GetJitMetaHeap
+ if (pMD->IsLCGMethod())
+ {
+ CrstHolder ch(&m_CodeHeapCritSec);
+ pCodeHeader->SetGCInfo((BYTE*)(void*)pMD->AsDynamicMethodDesc()->GetResolver()->GetJitMetaHeap()->New(blockSize));
+ }
+ else
+ {
+ pCodeHeader->SetGCInfo((BYTE*) (void*)GetJitMetaHeap(pMD)->AllocMem(S_SIZE_T(blockSize)));
+ }
+ _ASSERTE(pCodeHeader->GetGCInfo()); // AllocMem throws if there's not enough memory
+ JIT_PERF_UPDATE_X86_CODE_SIZE(blockSize);
+
+ * pAllocationSize = blockSize; // Store the allocation size so we can backout later.
+
+ return(pCodeHeader->GetGCInfo());
+}
+
+void* EEJitManager::allocEHInfoRaw(CodeHeader* pCodeHeader, DWORD blockSize, size_t * pAllocationSize)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ MethodDesc* pMD = pCodeHeader->GetMethodDesc();
+ void * mem = NULL;
+
+ // sadly for light code gen I need the check in here. We should change GetJitMetaHeap
+ if (pMD->IsLCGMethod())
+ {
+ CrstHolder ch(&m_CodeHeapCritSec);
+ mem = (void*)pMD->AsDynamicMethodDesc()->GetResolver()->GetJitMetaHeap()->New(blockSize);
+ }
+ else
+ {
+ mem = (void*)GetJitMetaHeap(pMD)->AllocMem(S_SIZE_T(blockSize));
+ }
+ _ASSERTE(mem); // AllocMem throws if there's not enough memory
+
+ JIT_PERF_UPDATE_X86_CODE_SIZE(blockSize);
+
+ * pAllocationSize = blockSize; // Store the allocation size so we can backout later.
+
+ return(mem);
+}
+
+
+EE_ILEXCEPTION* EEJitManager::allocEHInfo(CodeHeader* pCodeHeader, unsigned numClauses, size_t * pAllocationSize)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ // Note - pCodeHeader->phdrJitEHInfo - sizeof(size_t) contains the number of EH clauses
+
+ DWORD temp = EE_ILEXCEPTION::Size(numClauses);
+ DWORD blockSize = 0;
+ if (!ClrSafeInt<DWORD>::addition(temp, sizeof(size_t), blockSize))
+ COMPlusThrowOM();
+
+ BYTE *EHInfo = (BYTE*)allocEHInfoRaw(pCodeHeader, blockSize, pAllocationSize);
+
+ pCodeHeader->SetEHInfo((EE_ILEXCEPTION*) (EHInfo + sizeof(size_t)));
+ pCodeHeader->GetEHInfo()->Init(numClauses);
+ *((size_t *)EHInfo) = numClauses;
+ return(pCodeHeader->GetEHInfo());
+}
+
+JumpStubBlockHeader * EEJitManager::allocJumpStubBlock(MethodDesc* pMD, DWORD numJumps,
+ BYTE * loAddr, BYTE * hiAddr,
+ LoaderAllocator *pLoaderAllocator)
+{
+ CONTRACT(JumpStubBlockHeader *) {
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(loAddr < hiAddr);
+ PRECONDITION(pLoaderAllocator != NULL);
+ POSTCONDITION(CheckPointer(RETVAL));
+ } CONTRACT_END;
+
+ _ASSERTE((sizeof(JumpStubBlockHeader) % CODE_SIZE_ALIGN) == 0);
+ _ASSERTE(numJumps < MAX_M_ALLOCATED);
+
+ size_t blockSize = sizeof(JumpStubBlockHeader) + (size_t) numJumps * BACK_TO_BACK_JUMP_ALLOCATE_SIZE;
+
+ HeapList *pCodeHeap = NULL;
+ CodeHeapRequestInfo requestInfo(pMD, pLoaderAllocator, loAddr, hiAddr);
+
+ TADDR mem;
+ JumpStubBlockHeader * pBlock;
+
+ // Scope the lock
+ {
+ CrstHolder ch(&m_CodeHeapCritSec);
+
+ mem = (TADDR) allocCodeRaw(&requestInfo, sizeof(TADDR), blockSize, CODE_SIZE_ALIGN, &pCodeHeap);
+
+ // CodeHeader comes immediately before the block
+ CodeHeader * pCodeHdr = (CodeHeader *) (mem - sizeof(CodeHeader));
+ pCodeHdr->SetStubCodeBlockKind(STUB_CODE_BLOCK_JUMPSTUB);
+
+ NibbleMapSet(pCodeHeap, mem, TRUE);
+
+ pBlock = (JumpStubBlockHeader *)mem;
+
+ _ASSERTE(IS_ALIGNED(pBlock, CODE_SIZE_ALIGN));
+
+ JIT_PERF_UPDATE_X86_CODE_SIZE(blockSize);
+ }
+
+ pBlock->m_next = NULL;
+ pBlock->m_used = 0;
+ pBlock->m_allocated = numJumps;
+ if (pMD && pMD->IsLCGMethod())
+ pBlock->SetHostCodeHeap(static_cast<HostCodeHeap*>(pCodeHeap->pHeap));
+ else
+ pBlock->SetLoaderAllocator(pLoaderAllocator);
+
+ LOG((LF_JIT, LL_INFO1000, "Allocated new JumpStubBlockHeader for %d stubs at" FMT_ADDR " in loader allocator " FMT_ADDR "\n",
+ numJumps, DBG_ADDR(pBlock) , DBG_ADDR(pLoaderAllocator) ));
+
+ RETURN(pBlock);
+}
+
+void * EEJitManager::allocCodeFragmentBlock(size_t blockSize, unsigned alignment, LoaderAllocator *pLoaderAllocator, StubCodeBlockKind kind)
+{
+ CONTRACT(void *) {
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(pLoaderAllocator != NULL);
+ POSTCONDITION(CheckPointer(RETVAL));
+ } CONTRACT_END;
+
+ HeapList *pCodeHeap = NULL;
+ CodeHeapRequestInfo requestInfo(NULL, pLoaderAllocator, NULL, NULL);
+
+ TADDR mem;
+
+ // Scope the lock
+ {
+ CrstHolder ch(&m_CodeHeapCritSec);
+
+ mem = (TADDR) allocCodeRaw(&requestInfo, sizeof(TADDR), blockSize, alignment, &pCodeHeap);
+
+ // CodeHeader comes immediately before the block
+ CodeHeader * pCodeHdr = (CodeHeader *) (mem - sizeof(CodeHeader));
+ pCodeHdr->SetStubCodeBlockKind(kind);
+
+ NibbleMapSet(pCodeHeap, (TADDR)mem, TRUE);
+ }
+
+ RETURN((void *)mem);
+}
+
+#endif // !DACCESS_COMPILE
+
+
+PTR_VOID EEJitManager::GetGCInfo(const METHODTOKEN& MethodToken)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ return GetCodeHeader(MethodToken)->GetGCInfo();
+}
+
+// creates an enumeration and returns the number of EH clauses
+unsigned EEJitManager::InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState)
+{
+ LIMITED_METHOD_CONTRACT;
+ EE_ILEXCEPTION * EHInfo = GetCodeHeader(MethodToken)->GetEHInfo();
+
+ pEnumState->iCurrentPos = 0; // since the EH info is not compressed, the clause number is used to do the enumeration
+ pEnumState->pExceptionClauseArray = NULL;
+
+ if (!EHInfo)
+ return 0;
+
+ pEnumState->pExceptionClauseArray = dac_cast<TADDR>(EHInfo->EHClause(0));
+ return *(dac_cast<PTR_unsigned>(dac_cast<TADDR>(EHInfo) - sizeof(size_t)));
+}
+
+PTR_EXCEPTION_CLAUSE_TOKEN EEJitManager::GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState,
+ EE_ILEXCEPTION_CLAUSE* pEHClauseOut)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ unsigned iCurrentPos = pEnumState->iCurrentPos;
+ pEnumState->iCurrentPos++;
+
+ EE_ILEXCEPTION_CLAUSE* pClause = &(dac_cast<PTR_EE_ILEXCEPTION_CLAUSE>(pEnumState->pExceptionClauseArray)[iCurrentPos]);
+ *pEHClauseOut = *pClause;
+ return dac_cast<PTR_EXCEPTION_CLAUSE_TOKEN>(pClause);
+}
+
+#ifndef DACCESS_COMPILE
+TypeHandle EEJitManager::ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause,
+ CrawlFrame *pCf)
+{
+ // We don't want to use a runtime contract here since this codepath is used during
+ // the processing of a hard SO. Contracts use a significant amount of stack
+ // which we can't afford for those cases.
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ _ASSERTE(NULL != pCf);
+ _ASSERTE(NULL != pEHClause);
+ _ASSERTE(IsTypedHandler(pEHClause));
+
+
+ TypeHandle typeHnd = TypeHandle();
+ mdToken typeTok = mdTokenNil;
+
+ {
+ CrstHolder chRead(&m_EHClauseCritSec);
+ if (HasCachedTypeHandle(pEHClause))
+ {
+ typeHnd = TypeHandle::FromPtr(pEHClause->TypeHandle);
+ }
+ else
+ {
+ typeTok = pEHClause->ClassToken;
+ }
+ }
+
+ if (!typeHnd.IsNull())
+ {
+ return typeHnd;
+ }
+
+ MethodDesc* pMD = pCf->GetFunction();
+ Module* pModule = pMD->GetModule();
+ PREFIX_ASSUME(pModule != NULL);
+
+ SigTypeContext typeContext(pMD);
+ VarKind k = hasNoVars;
+
+ // In the vast majority of cases the code under the "if" below
+ // will not be executed.
+ //
+ // First grab the representative instantiations. For code
+ // shared by multiple generic instantiations these are the
+ // canonical (representative) instantiation.
+ if (TypeFromToken(typeTok) == mdtTypeSpec)
+ {
+ PCCOR_SIGNATURE pSig;
+ ULONG cSig;
+ IfFailThrow(pModule->GetMDImport()->GetTypeSpecFromToken(typeTok, &pSig, &cSig));
+
+ SigPointer psig(pSig, cSig);
+ k = psig.IsPolyType(&typeContext);
+
+ // Grab the active class and method instantiation. This exact instantiation is only
+ // needed in the corner case of "generic" exception catching in shared
+ // generic code. We don't need the exact instantiation if the token
+ // doesn't contain E_T_VAR or E_T_MVAR.
+ if ((k & hasSharableVarsMask) != 0)
+ {
+ Instantiation classInst;
+ Instantiation methodInst;
+ pCf->GetExactGenericInstantiations(&classInst, &methodInst);
+ SigTypeContext::InitTypeContext(pMD,classInst, methodInst,&typeContext);
+ }
+ }
+
+ typeHnd = ClassLoader::LoadTypeDefOrRefOrSpecThrowing(pModule, typeTok, &typeContext,
+ ClassLoader::ReturnNullIfNotFound);
+
+ // If the type (pModule,typeTok) was not loaded or not
+ // restored then the exception object won't have this type, because an
+ // object of this type has not been allocated.
+ if (typeHnd.IsNull())
+ return typeHnd;
+
+ // We can cache any exception specification except:
+ // - If the type contains type variables in generic code,
+ // e.g. catch E<T> where T is a type variable.
+ // We CANNOT cache E<T> in non-shared instantiations of generic code because
+ // there is only one EHClause cache for the IL, shared across all instantiations.
+ //
+ if((k & hasAnyVarsMask) == 0)
+ {
+ CrstHolder chWrite(&m_EHClauseCritSec);
+
+ // Note another thread might have beaten us to it ...
+ if (!HasCachedTypeHandle(pEHClause))
+ {
+ // We should never cache a NULL typeHnd.
+ _ASSERTE(!typeHnd.IsNull());
+ pEHClause->TypeHandle = typeHnd.AsPtr();
+ SetHasCachedTypeHandle(pEHClause);
+ }
+ else
+ {
+ // If we raced in here with aother thread and got held up on the lock, then we just need to return the
+ // type handle that the other thread put into the clause.
+ // The typeHnd we found and the typeHnd the racing thread found should always be the same
+ _ASSERTE(typeHnd.AsPtr() == pEHClause->TypeHandle);
+ typeHnd = TypeHandle::FromPtr(pEHClause->TypeHandle);
+ }
+ }
+ return typeHnd;
+}
+
+void EEJitManager::RemoveJitData (CodeHeader * pCHdr, size_t GCinfo_len, size_t EHinfo_len)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ MethodDesc* pMD = pCHdr->GetMethodDesc();
+
+ if (pMD->IsLCGMethod()) {
+
+ void * codeStart = (pCHdr + 1);
+
+ {
+ CrstHolder ch(&m_CodeHeapCritSec);
+
+ LCGMethodResolver * pResolver = pMD->AsDynamicMethodDesc()->GetLCGMethodResolver();
+
+ // Clear the pointer only if it matches what we are about to free. There may be cases where the JIT is reentered and
+ // the method JITed multiple times.
+ if (pResolver->m_recordCodePointer == codeStart)
+ pResolver->m_recordCodePointer = NULL;
+ }
+
+#if defined(_TARGET_AMD64_)
+ // Remove the unwind information (if applicable)
+ UnwindInfoTable::UnpublishUnwindInfoForMethod((TADDR)codeStart);
+#endif // defined(_TARGET_AMD64_)
+
+ HostCodeHeap* pHeap = HostCodeHeap::GetCodeHeap((TADDR)codeStart);
+ FreeCodeMemory(pHeap, codeStart);
+
+ // We are leaking GCInfo and EHInfo. They will be freed once the dynamic method is destroyed.
+
+ return;
+ }
+
+ {
+ CrstHolder ch(&m_CodeHeapCritSec);
+
+ HeapList *pHp = GetCodeHeapList();
+
+ while (pHp && ((pHp->startAddress > (TADDR)pCHdr) ||
+ (pHp->endAddress < (TADDR)pCHdr + sizeof(CodeHeader))))
+ {
+ pHp = pHp->GetNext();
+ }
+
+ _ASSERTE(pHp && pHp->pHdrMap);
+ _ASSERTE(pHp && pHp->cBlocks);
+
+ // Better to just return than AV?
+ if (pHp == NULL)
+ return;
+
+ NibbleMapSet(pHp, (TADDR)(pCHdr + 1), FALSE);
+ }
+
+ // Backout the GCInfo
+ if (GCinfo_len > 0) {
+ GetJitMetaHeap(pMD)->BackoutMem(pCHdr->GetGCInfo(), GCinfo_len);
+ }
+
+ // Backout the EHInfo
+ BYTE *EHInfo = (BYTE *)pCHdr->GetEHInfo();
+ if (EHInfo) {
+ EHInfo -= sizeof(size_t);
+
+ _ASSERTE(EHinfo_len>0);
+ GetJitMetaHeap(pMD)->BackoutMem(EHInfo, EHinfo_len);
+ }
+
+ // <TODO>
+ // TODO: Although we have backout the GCInfo and EHInfo, we haven't actually backout the
+ // code buffer itself. As a result, we might leak the CodeHeap if jitting fails after
+ // the code buffer is allocated.
+ //
+ // However, it appears non-trival to fix this.
+ // Here are some of the reasons:
+ // (1) AllocCode calls in AllocCodeRaw to alloc code buffer in the CodeHeap. The exact size
+ // of the code buffer is not known until the alignment is calculated deep on the stack.
+ // (2) AllocCodeRaw is called in 3 different places. We might need to remember the
+ // information for these places.
+ // (3) AllocCodeRaw might create a new CodeHeap. We should remember exactly which
+ // CodeHeap is used to allocate the code buffer.
+ //
+ // Fortunately, this is not a severe leak since the CodeHeap will be reclaimed on appdomain unload.
+ //
+ // </TODO>
+ return;
+}
+
+// appdomain is being unloaded, so delete any data associated with it. We have to do this in two stages.
+// On the first stage, we remove the elements from the list. On the second stage, which occurs after a GC
+// we know that only threads who were in preemptive mode prior to the GC could possibly still be looking
+// at an element that is about to be deleted. All such threads are guarded with a reader count, so if the
+// count is 0, we can safely delete, otherwise we must add to the cleanup list to be deleted later. We know
+// there can only be one unload at a time, so we can use a single var to hold the unlinked, but not deleted,
+// elements.
+void EEJitManager::Unload(LoaderAllocator *pAllocator)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ CrstHolder ch(&m_CodeHeapCritSec);
+
+ DomainCodeHeapList **ppList = m_DomainCodeHeaps.Table();
+ int count = m_DomainCodeHeaps.Count();
+
+ for (int i=0; i < count; i++) {
+ if (ppList[i]->m_pAllocator== pAllocator) {
+ DomainCodeHeapList *pList = ppList[i];
+ m_DomainCodeHeaps.DeleteByIndex(i);
+
+ // pHeapList is allocated in pHeap, so only need to delete the LoaderHeap itself
+ count = pList->m_CodeHeapList.Count();
+ for (i=0; i < count; i++) {
+ HeapList *pHeapList = pList->m_CodeHeapList[i];
+ DeleteCodeHeap(pHeapList);
+ }
+
+ // this is ok to do delete as anyone accessing the DomainCodeHeapList structure holds the critical section.
+ delete pList;
+
+ break;
+ }
+ }
+ ppList = m_DynamicDomainCodeHeaps.Table();
+ count = m_DynamicDomainCodeHeaps.Count();
+ for (int i=0; i < count; i++) {
+ if (ppList[i]->m_pAllocator== pAllocator) {
+ DomainCodeHeapList *pList = ppList[i];
+ m_DynamicDomainCodeHeaps.DeleteByIndex(i);
+
+ // pHeapList is allocated in pHeap, so only need to delete the CodeHeap itself
+ count = pList->m_CodeHeapList.Count();
+ for (i=0; i < count; i++) {
+ HeapList *pHeapList = pList->m_CodeHeapList[i];
+ // m_DynamicDomainCodeHeaps should only contain HostCodeHeap.
+ RemoveFromCleanupList(static_cast<HostCodeHeap*>(pHeapList->pHeap));
+ DeleteCodeHeap(pHeapList);
+ }
+
+ // this is ok to do delete as anyone accessing the DomainCodeHeapList structure holds the critical section.
+ delete pList;
+
+ break;
+ }
+ }
+
+ ResetCodeAllocHint();
+}
+
+EEJitManager::DomainCodeHeapList::DomainCodeHeapList()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_pAllocator = NULL;
+}
+
+EEJitManager::DomainCodeHeapList::~DomainCodeHeapList()
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+void EEJitManager::RemoveCodeHeapFromDomainList(CodeHeap *pHeap, LoaderAllocator *pAllocator)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
+ } CONTRACTL_END;
+
+ // get the AppDomain heap list for pAllocator in m_DynamicDomainCodeHeaps
+ DomainCodeHeapList *pList = GetCodeHeapList(NULL, pAllocator, TRUE);
+
+ // go through the heaps and find and remove pHeap
+ int count = pList->m_CodeHeapList.Count();
+ for (int i = 0; i < count; i++) {
+ HeapList *pHeapList = pList->m_CodeHeapList[i];
+ if (pHeapList->pHeap == pHeap) {
+ // found the heap to remove. If this is the only heap we remove the whole DomainCodeHeapList
+ // otherwise we just remove this heap
+ if (count == 1) {
+ m_DynamicDomainCodeHeaps.Delete(pList);
+ delete pList;
+ }
+ else
+ pList->m_CodeHeapList.Delete(i);
+
+ // if this heaplist is cached in the loader allocator, we must clear it
+ if (pAllocator->m_pLastUsedDynamicCodeHeap == ((void *) pHeapList))
+ {
+ pAllocator->m_pLastUsedDynamicCodeHeap = NULL;
+ }
+
+ break;
+ }
+ }
+}
+
+void EEJitManager::FreeCodeMemory(HostCodeHeap *pCodeHeap, void * codeStart)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ CrstHolder ch(&m_CodeHeapCritSec);
+
+ // FreeCodeMemory is only supported on LCG methods,
+ // so pCodeHeap can only be a HostCodeHeap.
+
+ // clean up the NibbleMap
+ NibbleMapSet(pCodeHeap->m_pHeapList, (TADDR)codeStart, FALSE);
+
+ // The caller of this method doesn't call HostCodeHeap->FreeMemForCode
+ // directly because the operation should be protected by m_CodeHeapCritSec.
+ pCodeHeap->FreeMemForCode(codeStart);
+}
+
+void ExecutionManager::CleanupCodeHeaps()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE (g_fProcessDetach || (GCHeap::IsGCInProgress() && ::IsGCThread()));
+
+ GetEEJitManager()->CleanupCodeHeaps();
+}
+
+void EEJitManager::CleanupCodeHeaps()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE (g_fProcessDetach || (GCHeap::IsGCInProgress() && ::IsGCThread()));
+
+ CrstHolder ch(&m_CodeHeapCritSec);
+
+ if (m_cleanupList == NULL)
+ return;
+
+ HostCodeHeap *pHeap = m_cleanupList;
+ m_cleanupList = NULL;
+
+ while (pHeap)
+ {
+ HostCodeHeap *pNextHeap = pHeap->m_pNextHeapToRelease;
+
+ DWORD allocCount = pHeap->m_AllocationCount;
+ if (allocCount == 0)
+ {
+ LOG((LF_BCL, LL_INFO100, "Level2 - Destryoing CodeHeap [0x%p, vt(0x%x)] - ref count 0\n", pHeap, *(size_t*)pHeap));
+ RemoveCodeHeapFromDomainList(pHeap, pHeap->m_pAllocator);
+ DeleteCodeHeap(pHeap->m_pHeapList);
+ }
+ else
+ {
+ LOG((LF_BCL, LL_INFO100, "Level2 - Restoring CodeHeap [0x%p, vt(0x%x)] - ref count %d\n", pHeap, *(size_t*)pHeap, allocCount));
+ }
+ pHeap = pNextHeap;
+ }
+}
+
+void EEJitManager::RemoveFromCleanupList(HostCodeHeap *pCodeHeap)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
+ } CONTRACTL_END;
+
+ HostCodeHeap *pHeap = m_cleanupList;
+ HostCodeHeap *pPrevHeap = NULL;
+ while (pHeap)
+ {
+ if (pHeap == pCodeHeap)
+ {
+ if (pPrevHeap)
+ {
+ // remove current heap from list
+ pPrevHeap->m_pNextHeapToRelease = pHeap->m_pNextHeapToRelease;
+ }
+ else
+ {
+ m_cleanupList = pHeap->m_pNextHeapToRelease;
+ }
+ break;
+ }
+ pPrevHeap = pHeap;
+ pHeap = pHeap->m_pNextHeapToRelease;
+ }
+}
+
+void EEJitManager::AddToCleanupList(HostCodeHeap *pCodeHeap)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
+ } CONTRACTL_END;
+
+ // it may happen that the current heap count goes to 0 and later on, before it is destroyed, it gets reused
+ // for another dynamic method.
+ // It's then possible that the ref count reaches 0 multiple times. If so we simply don't add it again
+ // Also on cleanup we check the the ref count is actually 0.
+ HostCodeHeap *pHeap = m_cleanupList;
+ while (pHeap)
+ {
+ if (pHeap == pCodeHeap)
+ {
+ LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p, vt(0x%x)] - Already in list\n", pCodeHeap, *(size_t*)pCodeHeap));
+ break;
+ }
+ pHeap = pHeap->m_pNextHeapToRelease;
+ }
+ if (pHeap == NULL)
+ {
+ pCodeHeap->m_pNextHeapToRelease = m_cleanupList;
+ m_cleanupList = pCodeHeap;
+ LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p, vt(0x%x)] - ref count %d - Adding to cleanup list\n", pCodeHeap, *(size_t*)pCodeHeap, pCodeHeap->m_AllocationCount));
+ }
+}
+
+void EEJitManager::DeleteCodeHeap(HeapList *pHeapList)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
+ } CONTRACTL_END;
+
+ HeapList *pHp = GetCodeHeapList();
+ if (pHp == pHeapList)
+ m_pCodeHeap = pHp->GetNext();
+ else
+ {
+ HeapList *pHpNext = pHp->GetNext();
+
+ while (pHpNext != pHeapList)
+ {
+ pHp = pHpNext;
+ _ASSERTE(pHp != NULL); // should always find the HeapList
+ pHpNext = pHp->GetNext();
+ }
+ pHp->SetNext(pHeapList->GetNext());
+ }
+
+ DeleteEEFunctionTable((PVOID)pHeapList);
+
+ ExecutionManager::DeleteRange((TADDR)pHeapList);
+
+ LOG((LF_JIT, LL_INFO100, "DeleteCodeHeap start" FMT_ADDR "end" FMT_ADDR "\n",
+ (const BYTE*)pHeapList->startAddress,
+ (const BYTE*)pHeapList->endAddress ));
+
+ // pHeapList is allocated in pHeap, so only need to delete the CodeHeap itself
+ // !!! For SoC, compiler inserts code to write a special cookie at pHeapList->pHeap after delete operator, at least for debug code.
+ // !!! Since pHeapList is deleted at the same time as pHeap, this causes AV.
+ // delete pHeapList->pHeap;
+ CodeHeap* pHeap = pHeapList->pHeap;
+ delete pHeap;
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+static CodeHeader * GetCodeHeaderFromDebugInfoRequest(const DebugInfoRequest & request)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ TADDR address = (TADDR) request.GetStartAddress();
+ _ASSERTE(address != NULL);
+
+ CodeHeader * pHeader = dac_cast<PTR_CodeHeader>(address & ~3) - 1;
+ _ASSERTE(pHeader != NULL);
+
+ return pHeader;
+}
+
+//-----------------------------------------------------------------------------
+// Get vars from Jit Store
+//-----------------------------------------------------------------------------
+BOOL EEJitManager::GetBoundariesAndVars(
+ const DebugInfoRequest & request,
+ IN FP_IDS_NEW fpNew, IN void * pNewData,
+ OUT ULONG32 * pcMap,
+ OUT ICorDebugInfo::OffsetMapping **ppMap,
+ OUT ULONG32 * pcVars,
+ OUT ICorDebugInfo::NativeVarInfo **ppVars)
+{
+ CONTRACTL {
+ THROWS; // on OOM.
+ GC_NOTRIGGER; // getting vars shouldn't trigger
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ CodeHeader * pHdr = GetCodeHeaderFromDebugInfoRequest(request);
+ _ASSERTE(pHdr != NULL);
+
+ PTR_BYTE pDebugInfo = pHdr->GetDebugInfo();
+
+ // No header created, which means no jit information is available.
+ if (pDebugInfo == NULL)
+ return FALSE;
+
+ // Uncompress. This allocates memory and may throw.
+ CompressDebugInfo::RestoreBoundariesAndVars(
+ fpNew, pNewData, // allocators
+ pDebugInfo, // input
+ pcMap, ppMap,
+ pcVars, ppVars); // output
+
+ return TRUE;
+}
+
+#ifdef DACCESS_COMPILE
+void CodeHeader::EnumMemoryRegions(CLRDataEnumMemoryFlags flags, IJitManager* pJitMan)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ DAC_ENUM_DTHIS();
+
+#ifdef USE_INDIRECT_CODEHEADER
+ this->pRealCodeHeader.EnumMem();
+#endif // USE_INDIRECT_CODEHEADER
+
+ if (this->GetDebugInfo() != NULL)
+ {
+ CompressDebugInfo::EnumMemoryRegions(flags, this->GetDebugInfo());
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Enumerate for minidumps.
+//-----------------------------------------------------------------------------
+void EEJitManager::EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ DebugInfoRequest request;
+ PCODE addrCode = pMD->GetNativeCode();
+ request.InitFromStartingAddr(pMD, addrCode);
+
+ CodeHeader * pHeader = GetCodeHeaderFromDebugInfoRequest(request);
+
+ pHeader->EnumMemoryRegions(flags, NULL);
+}
+#endif // DACCESS_COMPILE
+
+PCODE EEJitManager::GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset)
+{
+ WRAPPER_NO_CONTRACT;
+
+ CodeHeader * pHeader = GetCodeHeader(MethodToken);
+ return pHeader->GetCodeStartAddress() + relOffset;
+}
+
+BOOL EEJitManager::JitCodeToMethodInfo(
+ RangeSection * pRangeSection,
+ PCODE currentPC,
+ MethodDesc ** ppMethodDesc,
+ EECodeInfo * pCodeInfo)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ _ASSERTE(pRangeSection != NULL);
+
+ TADDR start = dac_cast<PTR_EEJitManager>(pRangeSection->pjit)->FindMethodCode(pRangeSection, currentPC);
+ if (start == NULL)
+ return FALSE;
+
+ CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader));
+ if (pCHdr->IsStubCodeBlock())
+ return FALSE;
+
+ _ASSERTE(pCHdr->GetMethodDesc()->SanityCheck());
+
+ if (pCodeInfo)
+ {
+ pCodeInfo->m_methodToken = METHODTOKEN(pRangeSection, dac_cast<TADDR>(pCHdr));
+
+ // This can be counted on for Jitted code. For NGEN code in the case
+ // where we have hot/cold splitting this isn't valid and we need to
+ // take into account cold code.
+ pCodeInfo->m_relOffset = (DWORD)(PCODEToPINSTR(currentPC) - pCHdr->GetCodeStartAddress());
+
+#ifdef WIN64EXCEPTIONS
+ // Computed lazily by code:EEJitManager::LazyGetFunctionEntry
+ pCodeInfo->m_pFunctionEntry = NULL;
+#endif
+ }
+
+ if (ppMethodDesc)
+ {
+ *ppMethodDesc = pCHdr->GetMethodDesc();
+ }
+ return TRUE;
+}
+
+StubCodeBlockKind EEJitManager::GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ TADDR start = dac_cast<PTR_EEJitManager>(pRangeSection->pjit)->FindMethodCode(pRangeSection, currentPC);
+ if (start == NULL)
+ return STUB_CODE_BLOCK_NOCODE;
+ CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader));
+ return pCHdr->IsStubCodeBlock() ? pCHdr->GetStubCodeBlockKind() : STUB_CODE_BLOCK_MANAGED;
+}
+
+TADDR EEJitManager::FindMethodCode(PCODE currentPC)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ RangeSection * pRS = ExecutionManager::FindCodeRange(currentPC, ExecutionManager::GetScanFlags());
+ if (pRS == NULL || (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP) == 0)
+ return STUB_CODE_BLOCK_NOCODE;
+ return dac_cast<PTR_EEJitManager>(pRS->pjit)->FindMethodCode(pRS, currentPC);
+}
+
+// Finds the header corresponding to the code at offset "delta".
+// Returns NULL if there is no header for the given "delta"
+
+TADDR EEJitManager::FindMethodCode(RangeSection * pRangeSection, PCODE currentPC)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE(pRangeSection != NULL);
+
+ HeapList *pHp = dac_cast<PTR_HeapList>(pRangeSection->pHeapListOrZapModule);
+
+ if ((currentPC < pHp->startAddress) ||
+ (currentPC > pHp->endAddress))
+ {
+ return NULL;
+ }
+
+ TADDR base = pHp->mapBase;
+ TADDR delta = currentPC - base;
+ PTR_DWORD pMap = pHp->pHdrMap;
+ PTR_DWORD pMapStart = pMap;
+
+ DWORD tmp;
+
+ size_t startPos = ADDR2POS(delta); // align to 32byte buckets
+ // ( == index into the array of nibbles)
+ DWORD offset = ADDR2OFFS(delta); // this is the offset inside the bucket + 1
+
+ _ASSERTE(offset == (offset & NIBBLE_MASK));
+
+ pMap += (startPos >> LOG2_NIBBLES_PER_DWORD); // points to the proper DWORD of the map
+
+ // get DWORD and shift down our nibble
+
+ PREFIX_ASSUME(pMap != NULL);
+ tmp = VolatileLoadWithoutBarrier<DWORD>(pMap) >> POS2SHIFTCOUNT(startPos);
+
+ if ((tmp & NIBBLE_MASK) && ((tmp & NIBBLE_MASK) <= offset) )
+ {
+ return base + POSOFF2ADDR(startPos, tmp & NIBBLE_MASK);
+ }
+
+ // Is there a header in the remainder of the DWORD ?
+ tmp = tmp >> NIBBLE_SIZE;
+
+ if (tmp)
+ {
+ startPos--;
+ while (!(tmp & NIBBLE_MASK))
+ {
+ tmp = tmp >> NIBBLE_SIZE;
+ startPos--;
+ }
+ return base + POSOFF2ADDR(startPos, tmp & NIBBLE_MASK);
+ }
+
+ // We skipped the remainder of the DWORD,
+ // so we must set startPos to the highest position of
+ // previous DWORD, unless we are already on the first DWORD
+
+ if (startPos < NIBBLES_PER_DWORD)
+ return NULL;
+
+ startPos = ((startPos >> LOG2_NIBBLES_PER_DWORD) << LOG2_NIBBLES_PER_DWORD) - 1;
+
+ // Skip "headerless" DWORDS
+
+ while (pMapStart < pMap && 0 == (tmp = VolatileLoadWithoutBarrier<DWORD>(--pMap)))
+ {
+ startPos -= NIBBLES_PER_DWORD;
+ }
+
+ // This helps to catch degenerate error cases. This relies on the fact that
+ // startPos cannot ever be bigger than MAX_UINT
+ if (((INT_PTR)startPos) < 0)
+ return NULL;
+
+ // Find the nibble with the header in the DWORD
+
+ while (startPos && !(tmp & NIBBLE_MASK))
+ {
+ tmp = tmp >> NIBBLE_SIZE;
+ startPos--;
+ }
+
+ if (startPos == 0 && tmp == 0)
+ return NULL;
+
+ return base + POSOFF2ADDR(startPos, tmp & NIBBLE_MASK);
+}
+
+#if !defined(DACCESS_COMPILE)
+void EEJitManager::NibbleMapSet(HeapList * pHp, TADDR pCode, BOOL bSet)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ // Currently all callers to this method ensure EEJitManager::m_CodeHeapCritSec
+ // is held.
+ _ASSERTE(m_CodeHeapCritSec.OwnedByCurrentThread());
+
+ _ASSERTE(pCode >= pHp->mapBase);
+
+ size_t delta = pCode - pHp->mapBase;
+
+ size_t pos = ADDR2POS(delta);
+ DWORD value = bSet?ADDR2OFFS(delta):0;
+
+ DWORD index = (DWORD) (pos >> LOG2_NIBBLES_PER_DWORD);
+ DWORD mask = ~((DWORD) HIGHEST_NIBBLE_MASK >> ((pos & NIBBLES_PER_DWORD_MASK) << LOG2_NIBBLE_SIZE));
+
+ value = value << POS2SHIFTCOUNT(pos);
+
+ PTR_DWORD pMap = pHp->pHdrMap;
+
+ // assert that we don't overwrite an existing offset
+ // (it's a reset or it is empty)
+ _ASSERTE(!value || !((*(pMap+index))& ~mask));
+
+ // It is important for this update to be atomic. Synchronization would be required with FindMethodCode otherwise.
+ *(pMap+index) = ((*(pMap+index))&mask)|value;
+
+ pHp->cBlocks += (bSet ? 1 : -1);
+}
+#endif // !DACCESS_COMPILE
+
+#if defined(WIN64EXCEPTIONS)
+PTR_RUNTIME_FUNCTION EEJitManager::LazyGetFunctionEntry(EECodeInfo * pCodeInfo)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ if (!pCodeInfo->IsValid())
+ {
+ return NULL;
+ }
+
+ CodeHeader * pHeader = GetCodeHeader(pCodeInfo->GetMethodToken());
+
+ DWORD address = RUNTIME_FUNCTION__BeginAddress(pHeader->GetUnwindInfo(0)) + pCodeInfo->GetRelOffset();
+
+ // We need the module base address to calculate the end address of a function from the functionEntry.
+ // Thus, save it off right now.
+ TADDR baseAddress = pCodeInfo->GetModuleBase();
+
+ // NOTE: We could binary search here, if it would be helpful (e.g., large number of funclets)
+ for (UINT iUnwindInfo = 0; iUnwindInfo < pHeader->GetNumberOfUnwindInfos(); iUnwindInfo++)
+ {
+ PTR_RUNTIME_FUNCTION pFunctionEntry = pHeader->GetUnwindInfo(iUnwindInfo);
+
+ if (RUNTIME_FUNCTION__BeginAddress(pFunctionEntry) <= address && address < RUNTIME_FUNCTION__EndAddress(pFunctionEntry, baseAddress))
+ {
+ return pFunctionEntry;
+ }
+ }
+
+ return NULL;
+}
+
+DWORD EEJitManager::GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ CodeHeader * pCH = GetCodeHeader(MethodToken);
+ TADDR moduleBase = JitTokenToModuleBase(MethodToken);
+
+ _ASSERTE(pCH->GetNumberOfUnwindInfos() >= 1);
+
+ DWORD parentBeginRva = RUNTIME_FUNCTION__BeginAddress(pCH->GetUnwindInfo(0));
+
+ DWORD nFunclets = 0;
+ for (COUNT_T iUnwindInfo = 1; iUnwindInfo < pCH->GetNumberOfUnwindInfos(); iUnwindInfo++)
+ {
+ PTR_RUNTIME_FUNCTION pFunctionEntry = pCH->GetUnwindInfo(iUnwindInfo);
+
+#if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS)
+ if (IsFunctionFragment(moduleBase, pFunctionEntry))
+ {
+ // This is a fragment (not the funclet beginning); skip it
+ continue;
+ }
+#endif // EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS
+
+ DWORD funcletBeginRva = RUNTIME_FUNCTION__BeginAddress(pFunctionEntry);
+ DWORD relParentOffsetToFunclet = funcletBeginRva - parentBeginRva;
+
+ if (nFunclets < dwLength)
+ pStartFuncletOffsets[nFunclets] = relParentOffsetToFunclet;
+ nFunclets++;
+ }
+
+ return nFunclets;
+}
+
+#if defined(DACCESS_COMPILE)
+// This function is basically like RtlLookupFunctionEntry(), except that it works with DAC
+// to read the function entries out of process. Also, it can only look up function entries
+// inside mscorwks.dll, since DAC doesn't know anything about other unmanaged dll's.
+void GetUnmanagedStackWalkInfo(IN ULONG64 ControlPc,
+ OUT UINT_PTR* pModuleBase,
+ OUT UINT_PTR* pFuncEntry)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (pModuleBase)
+ {
+ *pModuleBase = NULL;
+ }
+
+ if (pFuncEntry)
+ {
+ *pFuncEntry = NULL;
+ }
+
+ PEDecoder peDecoder(DacGlobalBase());
+
+ SIZE_T baseAddr = dac_cast<TADDR>(peDecoder.GetBase());
+ SIZE_T cbSize = (SIZE_T)peDecoder.GetVirtualSize();
+
+ // Check if the control PC is inside mscorwks.
+ if ( (baseAddr <= ControlPc) &&
+ (ControlPc < (baseAddr + cbSize))
+ )
+ {
+ if (pModuleBase)
+ {
+ *pModuleBase = baseAddr;
+ }
+
+ if (pFuncEntry)
+ {
+ // Check if there is a static function table.
+ COUNT_T cbSize = 0;
+ TADDR pExceptionDir = peDecoder.GetDirectoryEntryData(IMAGE_DIRECTORY_ENTRY_EXCEPTION, &cbSize);
+
+ if (pExceptionDir != NULL)
+ {
+ // Do a binary search on the static function table of mscorwks.dll.
+ HRESULT hr = E_FAIL;
+ TADDR taFuncEntry;
+ T_RUNTIME_FUNCTION functionEntry;
+
+ DWORD dwLow = 0;
+ DWORD dwHigh = cbSize / sizeof(RUNTIME_FUNCTION);
+ DWORD dwMid = 0;
+
+ while (dwLow <= dwHigh)
+ {
+ dwMid = (dwLow + dwHigh) >> 1;
+ taFuncEntry = pExceptionDir + dwMid * sizeof(RUNTIME_FUNCTION);
+ hr = DacReadAll(taFuncEntry, &functionEntry, sizeof(functionEntry), false);
+ if (FAILED(hr))
+ {
+ return;
+ }
+
+ if (ControlPc < baseAddr + functionEntry.BeginAddress)
+ {
+ dwHigh = dwMid - 1;
+ }
+ else if (ControlPc >= baseAddr + RUNTIME_FUNCTION__EndAddress(&functionEntry, baseAddr))
+ {
+ dwLow = dwMid + 1;
+ }
+ else
+ {
+ _ASSERTE(pFuncEntry);
+ *pFuncEntry = (UINT_PTR)(T_RUNTIME_FUNCTION*)PTR_RUNTIME_FUNCTION(taFuncEntry);
+ break;
+ }
+ }
+
+ if (dwLow > dwHigh)
+ {
+ _ASSERTE(*pFuncEntry == NULL);
+ }
+ }
+ }
+ }
+}
+#endif // DACCESS_COMPILE
+
+extern "C" void GetRuntimeStackWalkInfo(IN ULONG64 ControlPc,
+ OUT UINT_PTR* pModuleBase,
+ OUT UINT_PTR* pFuncEntry)
+{
+
+ WRAPPER_NO_CONTRACT;
+
+ BEGIN_PRESERVE_LAST_ERROR;
+
+ BEGIN_ENTRYPOINT_VOIDRET;
+
+ if (pModuleBase)
+ *pModuleBase = NULL;
+ if (pFuncEntry)
+ *pFuncEntry = NULL;
+
+ EECodeInfo codeInfo((PCODE)ControlPc);
+ if (!codeInfo.IsValid())
+ {
+#if defined(DACCESS_COMPILE)
+ GetUnmanagedStackWalkInfo(ControlPc, pModuleBase, pFuncEntry);
+#endif // DACCESS_COMPILE
+ goto Exit;
+ }
+
+ if (pModuleBase)
+ {
+ *pModuleBase = (UINT_PTR)codeInfo.GetModuleBase();
+ }
+
+ if (pFuncEntry)
+ {
+ *pFuncEntry = (UINT_PTR)(PT_RUNTIME_FUNCTION)codeInfo.GetFunctionEntry();
+ }
+
+Exit:
+ END_ENTRYPOINT_VOIDRET;
+
+ END_PRESERVE_LAST_ERROR;
+}
+#endif // WIN64EXCEPTIONS
+
+#ifdef DACCESS_COMPILE
+
+void EEJitManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ IJitManager::EnumMemoryRegions(flags);
+
+ //
+ // Save all of the code heaps.
+ //
+
+ HeapList* heap;
+
+ for (heap = m_pCodeHeap; heap; heap = heap->GetNext())
+ {
+ DacEnumHostDPtrMem(heap);
+
+ if (heap->pHeap.IsValid())
+ {
+ heap->pHeap->EnumMemoryRegions(flags);
+ }
+
+ DacEnumMemoryRegion(heap->startAddress, (ULONG32)
+ (heap->endAddress - heap->startAddress));
+
+ if (heap->pHdrMap.IsValid())
+ {
+ ULONG32 nibbleMapSize = (ULONG32)
+ HEAP2MAPSIZE(ROUND_UP_TO_PAGE(heap->maxCodeHeapSize));
+ DacEnumMemoryRegion(dac_cast<TADDR>(heap->pHdrMap), nibbleMapSize);
+ }
+ }
+}
+#endif // #ifdef DACCESS_COMPILE
+
+#endif // CROSSGEN_COMPILE
+
+
+#ifndef DACCESS_COMPILE
+
+//*******************************************************
+// Execution Manager
+//*******************************************************
+
+// Init statics
+void ExecutionManager::Init()
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ m_JumpStubCrst.Init(CrstJumpStubCache, CrstFlags(CRST_UNSAFE_ANYMODE|CRST_DEBUGGER_THREAD));
+
+ m_RangeCrst.Init(CrstExecuteManRangeLock, CRST_UNSAFE_ANYMODE);
+
+ m_pDefaultCodeMan = new EECodeManager();
+
+#ifndef CROSSGEN_COMPILE
+ m_pEEJitManager = new EEJitManager();
+#endif
+#ifdef FEATURE_PREJIT
+ m_pNativeImageJitManager = new NativeImageJitManager();
+#endif
+
+#ifdef FEATURE_READYTORUN
+ m_pReadyToRunJitManager = new ReadyToRunJitManager();
+#endif
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+//**************************************************************************
+RangeSection *
+ExecutionManager::FindCodeRange(PCODE currentPC, ScanFlag scanFlag)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ if (currentPC == NULL)
+ return NULL;
+
+ if (scanFlag == ScanReaderLock)
+ return FindCodeRangeWithLock(currentPC);
+
+ return GetRangeSection(currentPC);
+}
+
+//**************************************************************************
+NOINLINE // Make sure that the slow path with lock won't affect the fast path
+RangeSection *
+ExecutionManager::FindCodeRangeWithLock(PCODE currentPC)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ ReaderLockHolder rlh;
+ return GetRangeSection(currentPC);
+}
+
+//**************************************************************************
+MethodDesc * ExecutionManager::GetCodeMethodDesc(PCODE currentPC)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END
+
+ EECodeInfo codeInfo(currentPC);
+ if (!codeInfo.IsValid())
+ return NULL;
+ return codeInfo.GetMethodDesc();
+}
+
+//**************************************************************************
+BOOL ExecutionManager::IsManagedCode(PCODE currentPC)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ if (currentPC == NULL)
+ return FALSE;
+
+ if (GetScanFlags() == ScanReaderLock)
+ return IsManagedCodeWithLock(currentPC);
+
+ return IsManagedCodeWorker(currentPC);
+}
+
+//**************************************************************************
+NOINLINE // Make sure that the slow path with lock won't affect the fast path
+BOOL ExecutionManager::IsManagedCodeWithLock(PCODE currentPC)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ ReaderLockHolder rlh;
+ return IsManagedCodeWorker(currentPC);
+}
+
+//**************************************************************************
+BOOL ExecutionManager::IsManagedCode(PCODE currentPC, HostCallPreference hostCallPreference /*=AllowHostCalls*/, BOOL *pfFailedReaderLock /*=NULL*/)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+#ifdef DACCESS_COMPILE
+ return IsManagedCode(currentPC);
+#else
+ if (hostCallPreference == AllowHostCalls)
+ {
+ return IsManagedCode(currentPC);
+ }
+
+ ReaderLockHolder rlh(hostCallPreference);
+ if (!rlh.Acquired())
+ {
+ _ASSERTE(pfFailedReaderLock != NULL);
+ *pfFailedReaderLock = TRUE;
+ return FALSE;
+ }
+
+ return IsManagedCodeWorker(currentPC);
+#endif
+}
+
+//**************************************************************************
+// Assumes that the ExecutionManager reader/writer lock is taken or that
+// it is safe not to take it.
+BOOL ExecutionManager::IsManagedCodeWorker(PCODE currentPC)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ // This may get called for arbitrary code addresses. Note that the lock is
+ // taken over the call to JitCodeToMethodInfo too so that nobody pulls out
+ // the range section from underneath us.
+
+ RangeSection * pRS = GetRangeSection(currentPC);
+ if (pRS == NULL)
+ return FALSE;
+
+ if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP)
+ {
+#ifndef CROSSGEN_COMPILE
+ // Typically if we find a Jit Manager we are inside a managed method
+ // but on we could also be in a stub, so we check for that
+ // as well and we don't consider stub to be real managed code.
+ TADDR start = dac_cast<PTR_EEJitManager>(pRS->pjit)->FindMethodCode(pRS, currentPC);
+ if (start == NULL)
+ return FALSE;
+ CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader));
+ if (!pCHdr->IsStubCodeBlock())
+ return TRUE;
+#endif
+ }
+#ifdef FEATURE_READYTORUN
+ else
+ if (pRS->flags & RangeSection::RANGE_SECTION_READYTORUN)
+ {
+ if (dac_cast<PTR_ReadyToRunJitManager>(pRS->pjit)->JitCodeToMethodInfo(pRS, currentPC, NULL, NULL))
+ return TRUE;
+ }
+#endif
+ else
+ {
+#ifdef FEATURE_PREJIT
+ // Check that we are in the range with true managed code. We don't
+ // consider jump stubs or precodes to be real managed code.
+
+ Module * pModule = dac_cast<PTR_Module>(pRS->pHeapListOrZapModule);
+
+ NGenLayoutInfo * pLayoutInfo = pModule->GetNGenLayoutInfo();
+
+ if (pLayoutInfo->m_CodeSections[0].IsInRange(currentPC) ||
+ pLayoutInfo->m_CodeSections[1].IsInRange(currentPC) ||
+ pLayoutInfo->m_CodeSections[2].IsInRange(currentPC))
+ {
+ return TRUE;
+ }
+#endif
+ }
+
+ return FALSE;
+}
+
+#ifndef DACCESS_COMPILE
+
+//**************************************************************************
+// Clear the caches for all JITs loaded.
+//
+void ExecutionManager::ClearCaches( void )
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ GetEEJitManager()->ClearCache();
+}
+
+//**************************************************************************
+// Check if caches for any JITs loaded need to be cleaned
+//
+BOOL ExecutionManager::IsCacheCleanupRequired( void )
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ return GetEEJitManager()->IsCacheCleanupRequired();
+}
+
+#ifndef FEATURE_MERGE_JIT_AND_ENGINE
+/*********************************************************************/
+// This static method returns the name of the jit dll
+//
+LPWSTR ExecutionManager::GetJitName()
+{
+ STANDARD_VM_CONTRACT;
+
+ LPWSTR pwzJitName;
+
+ // Try to obtain a name for the jit library from the env. variable
+ IfFailThrow(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_JitName, &pwzJitName));
+
+ if (NULL == pwzJitName)
+ {
+ pwzJitName = MAKEDLLNAME_W(W("clrjit"));
+ }
+
+ return pwzJitName;
+}
+#endif // FEATURE_MERGE_JIT_AND_ENGINE
+
+#endif // #ifndef DACCESS_COMPILE
+
+RangeSection* ExecutionManager::GetRangeSection(TADDR addr)
+{
+ CONTRACTL {
+ NOTHROW;
+ HOST_NOCALLS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ RangeSection * pHead = m_CodeRangeList;
+
+ if (pHead == NULL)
+ {
+ return NULL;
+ }
+
+ RangeSection *pCurr = pHead;
+ RangeSection *pLast = NULL;
+
+#ifndef DACCESS_COMPILE
+ RangeSection *pLastUsedRS = (pCurr != NULL) ? pCurr->pLastUsed : NULL;
+
+ if (pLastUsedRS != NULL)
+ {
+ // positive case
+ if ((addr >= pLastUsedRS->LowAddress) &&
+ (addr < pLastUsedRS->HighAddress) )
+ {
+ return pLastUsedRS;
+ }
+
+ RangeSection * pNextAfterLastUsedRS = pLastUsedRS->pnext;
+
+ // negative case
+ if ((addr < pLastUsedRS->LowAddress) &&
+ (pNextAfterLastUsedRS == NULL || addr >= pNextAfterLastUsedRS->HighAddress))
+ {
+ return NULL;
+ }
+ }
+#endif
+
+ while (pCurr != NULL)
+ {
+ // See if addr is in [pCurr->LowAddress .. pCurr->HighAddress)
+ if (pCurr->LowAddress <= addr)
+ {
+ // Since we are sorted, once pCurr->HighAddress is less than addr
+ // then all subsequence ones will also be lower, so we are done.
+ if (addr >= pCurr->HighAddress)
+ {
+ // we'll return NULL and put pLast into pLastUsed
+ pCurr = NULL;
+ }
+ else
+ {
+ // addr must be in [pCurr->LowAddress .. pCurr->HighAddress)
+ _ASSERTE((pCurr->LowAddress <= addr) && (addr < pCurr->HighAddress));
+
+ // Found the matching RangeSection
+ // we'll return pCurr and put it into pLastUsed
+ pLast = pCurr;
+ }
+
+ break;
+ }
+ pLast = pCurr;
+ pCurr = pCurr->pnext;
+ }
+
+#ifndef DACCESS_COMPILE
+ // Cache pCurr as pLastUsed in the head node
+ // Unless we are on an MP system with many cpus
+ // where this sort of caching actually diminishes scaling during server GC
+ // due to many processors writing to a common location
+ if (g_SystemInfo.dwNumberOfProcessors < 4 || !GCHeap::IsServerHeap() || !GCHeap::IsGCInProgress())
+ pHead->pLastUsed = pLast;
+#endif
+
+ return pCurr;
+}
+
+RangeSection* ExecutionManager::GetRangeSectionAndPrev(RangeSection *pHead, TADDR addr, RangeSection** ppPrev)
+{
+ WRAPPER_NO_CONTRACT;
+
+ RangeSection *pCurr;
+ RangeSection *pPrev;
+ RangeSection *result = NULL;
+
+ for (pPrev = NULL, pCurr = pHead;
+ pCurr != NULL;
+ pPrev = pCurr, pCurr = pCurr->pnext)
+ {
+ // See if addr is in [pCurr->LowAddress .. pCurr->HighAddress)
+ if (pCurr->LowAddress > addr)
+ continue;
+
+ if (addr >= pCurr->HighAddress)
+ break;
+
+ // addr must be in [pCurr->LowAddress .. pCurr->HighAddress)
+ _ASSERTE((pCurr->LowAddress <= addr) && (addr < pCurr->HighAddress));
+
+ // Found the matching RangeSection
+ result = pCurr;
+
+ // Write back pPrev to ppPrev if it is non-null
+ if (ppPrev != NULL)
+ *ppPrev = pPrev;
+
+ break;
+ }
+
+ // If we failed to find a match write NULL to ppPrev if it is non-null
+ if ((ppPrev != NULL) && (result == NULL))
+ {
+ *ppPrev = NULL;
+ }
+
+ return result;
+}
+
+/* static */
+PTR_Module ExecutionManager::FindZapModule(TADDR currentData)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ STATIC_CONTRACT_HOST_CALLS;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ ReaderLockHolder rlh;
+
+ RangeSection * pRS = GetRangeSection(currentData);
+ if (pRS == NULL)
+ return NULL;
+
+ if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP)
+ return NULL;
+
+#ifdef FEATURE_READYTORUN
+ if (pRS->flags & RangeSection::RANGE_SECTION_READYTORUN)
+ return NULL;
+#endif
+
+ return dac_cast<PTR_Module>(pRS->pHeapListOrZapModule);
+}
+
+/* static */
+PTR_Module ExecutionManager::FindModuleForGCRefMap(TADDR currentData)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ RangeSection * pRS = FindCodeRange(currentData, ExecutionManager::GetScanFlags());
+ if (pRS == NULL)
+ return NULL;
+
+ if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP)
+ return NULL;
+
+#ifdef FEATURE_READYTORUN
+ // RANGE_SECTION_READYTORUN is intentionally not filtered out here
+#endif
+
+ return dac_cast<PTR_Module>(pRS->pHeapListOrZapModule);
+}
+
+#ifndef DACCESS_COMPILE
+
+/* NGenMem depends on this entrypoint */
+NOINLINE
+void ExecutionManager::AddCodeRange(TADDR pStartRange,
+ TADDR pEndRange,
+ IJitManager * pJit,
+ RangeSection::RangeSectionFlags flags,
+ void * pHp)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pJit));
+ PRECONDITION(CheckPointer(pHp));
+ } CONTRACTL_END;
+
+ AddRangeHelper(pStartRange,
+ pEndRange,
+ pJit,
+ flags,
+ dac_cast<TADDR>(pHp));
+}
+
+#ifdef FEATURE_PREJIT
+
+void ExecutionManager::AddNativeImageRange(TADDR StartRange,
+ SIZE_T Size,
+ Module * pModule)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pModule));
+ } CONTRACTL_END;
+
+ AddRangeHelper(StartRange,
+ StartRange + Size,
+ GetNativeImageJitManager(),
+ RangeSection::RANGE_SECTION_NONE,
+ dac_cast<TADDR>(pModule));
+}
+#endif
+
+void ExecutionManager::AddRangeHelper(TADDR pStartRange,
+ TADDR pEndRange,
+ IJitManager * pJit,
+ RangeSection::RangeSectionFlags flags,
+ TADDR pHeapListOrZapModule)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ HOST_CALLS;
+ PRECONDITION(pStartRange < pEndRange);
+ PRECONDITION(pHeapListOrZapModule != NULL);
+ } CONTRACTL_END;
+
+ RangeSection *pnewrange = new RangeSection;
+
+ _ASSERTE(pEndRange > pStartRange);
+
+ pnewrange->LowAddress = pStartRange;
+ pnewrange->HighAddress = pEndRange;
+ pnewrange->pjit = pJit;
+ pnewrange->pnext = NULL;
+ pnewrange->flags = flags;
+ pnewrange->pLastUsed = NULL;
+ pnewrange->pHeapListOrZapModule = pHeapListOrZapModule;
+#if defined(_TARGET_AMD64_)
+ pnewrange->pUnwindInfoTable = NULL;
+#endif // defined(_TARGET_AMD64_)
+ {
+ CrstHolder ch(&m_RangeCrst); // Acquire the Crst before linking in a new RangeList
+
+ RangeSection * current = m_CodeRangeList;
+ RangeSection * previous = NULL;
+
+ if (current != NULL)
+ {
+ while (true)
+ {
+ // Sort addresses top down so that more recently created ranges
+ // will populate the top of the list
+ if (pnewrange->LowAddress > current->LowAddress)
+ {
+ // Asserts if ranges are overlapping
+ _ASSERTE(pnewrange->LowAddress >= current->HighAddress);
+ pnewrange->pnext = current;
+
+ if (previous == NULL) // insert new head
+ {
+ m_CodeRangeList = pnewrange;
+ }
+ else
+ { // insert in the middle
+ previous->pnext = pnewrange;
+ }
+ break;
+ }
+
+ RangeSection * next = current->pnext;
+ if (next == NULL) // insert at end of list
+ {
+ current->pnext = pnewrange;
+ break;
+ }
+
+ // Continue walking the RangeSection list
+ previous = current;
+ current = next;
+ }
+ }
+ else
+ {
+ m_CodeRangeList = pnewrange;
+ }
+ }
+}
+
+// Deletes a single range starting at pStartRange
+void ExecutionManager::DeleteRange(TADDR pStartRange)
+{
+ CONTRACTL {
+ NOTHROW; // If this becomes throwing, then revisit the queuing of deletes below.
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ RangeSection *pCurr = NULL;
+ {
+ // Acquire the Crst before unlinking a RangeList.
+ // NOTE: The Crst must be acquired BEFORE we grab the writer lock, as the
+ // writer lock forces us into a forbid suspend thread region, and it's illegal
+ // to enter a Crst after the forbid suspend thread region is entered
+ CrstHolder ch(&m_RangeCrst);
+
+ // Acquire the WriterLock and prevent any readers from walking the RangeList.
+ // This also forces us to enter a forbid suspend thread region, to prevent
+ // hijacking profilers from grabbing this thread and walking it (the walk may
+ // require the reader lock, which would cause a deadlock).
+ WriterLockHolder wlh;
+
+ RangeSection *pPrev = NULL;
+
+ pCurr = GetRangeSectionAndPrev(m_CodeRangeList, pStartRange, &pPrev);
+
+ // pCurr points at the Range that needs to be unlinked from the RangeList
+ if (pCurr != NULL)
+ {
+
+ // If pPrev is NULL the the head of this list is to be deleted
+ if (pPrev == NULL)
+ {
+ m_CodeRangeList = pCurr->pnext;
+ }
+ else
+ {
+ _ASSERT(pPrev->pnext == pCurr);
+
+ pPrev->pnext = pCurr->pnext;
+ }
+
+ // Clear the cache pLastUsed in the head node (if any)
+ RangeSection * head = m_CodeRangeList;
+ if (head != NULL)
+ {
+ head->pLastUsed = NULL;
+ }
+
+ //
+ // Cannot delete pCurr here because we own the WriterLock and if this is
+ // a hosted scenario then the hosting api callback cannot occur in a forbid
+ // suspend region, which the writer lock is.
+ //
+ }
+ }
+
+ //
+ // Now delete the node
+ //
+ if (pCurr != NULL)
+ {
+#if defined(_TARGET_AMD64_)
+ if (pCurr->pUnwindInfoTable != 0)
+ delete pCurr->pUnwindInfoTable;
+#endif // defined(_TARGET_AMD64_)
+ delete pCurr;
+ }
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+
+void ExecutionManager::EnumRangeList(RangeSection* list,
+ CLRDataEnumMemoryFlags flags)
+{
+ while (list != NULL)
+ {
+ // If we can't read the target memory, stop immediately so we don't work
+ // with broken data.
+ if (!DacEnumMemoryRegion(dac_cast<TADDR>(list), sizeof(*list)))
+ break;
+
+ if (list->pjit.IsValid())
+ {
+ list->pjit->EnumMemoryRegions(flags);
+ }
+
+ if (!(list->flags & RangeSection::RANGE_SECTION_CODEHEAP))
+ {
+ PTR_Module pModule = dac_cast<PTR_Module>(list->pHeapListOrZapModule);
+
+ if (pModule.IsValid())
+ {
+ pModule->EnumMemoryRegions(flags, true);
+ }
+ }
+
+ list = list->pnext;
+#if defined (_DEBUG)
+ // Test hook: when testing on debug builds, we want an easy way to test that the while
+ // correctly terminates in the face of ridiculous stuff from the target.
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DumpGeneration_IntentionallyCorruptDataFromTarget) == 1)
+ {
+ // Force us to struggle on with something bad.
+ if (list == NULL)
+ {
+ list = (RangeSection *)&flags;
+ }
+ }
+#endif // (_DEBUG)
+
+ }
+}
+
+void ExecutionManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ STATIC_CONTRACT_HOST_CALLS;
+
+ ReaderLockHolder rlh;
+
+ //
+ // Report the global data portions.
+ //
+
+ m_CodeRangeList.EnumMem();
+ m_pDefaultCodeMan.EnumMem();
+
+ //
+ // Walk structures and report.
+ //
+
+ if (m_CodeRangeList.IsValid())
+ {
+ EnumRangeList(m_CodeRangeList, flags);
+ }
+}
+#endif // #ifdef DACCESS_COMPILE
+
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+
+void ExecutionManager::Unload(LoaderAllocator *pLoaderAllocator)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ // a size of 0 is a signal to Nirvana to flush the entire cache
+ FlushInstructionCache(GetCurrentProcess(),0,0);
+
+ /* StackwalkCacheEntry::EIP is an address into code. Since we are
+ unloading the code, we need to invalidate the cache. Otherwise,
+ its possible that another appdomain might generate code at the very
+ same address, and we might incorrectly think that the old
+ StackwalkCacheEntry corresponds to it. So flush the cache.
+ */
+ StackwalkCache::Invalidate(pLoaderAllocator);
+
+ JumpStubCache * pJumpStubCache = (JumpStubCache *)pLoaderAllocator->m_pJumpStubCache;
+ if (pJumpStubCache != NULL)
+ {
+ delete pJumpStubCache;
+ pLoaderAllocator->m_pJumpStubCache = NULL;
+ }
+
+ GetEEJitManager()->Unload(pLoaderAllocator);
+}
+
+PCODE ExecutionManager::jumpStub(MethodDesc* pMD, PCODE target,
+ BYTE * loAddr, BYTE * hiAddr,
+ LoaderAllocator *pLoaderAllocator)
+{
+ CONTRACT(PCODE) {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(pLoaderAllocator != NULL || pMD != NULL);
+ PRECONDITION(loAddr < hiAddr);
+ POSTCONDITION(RETVAL != NULL);
+ } CONTRACT_END;
+
+ PCODE jumpStub = NULL;
+
+ if (pLoaderAllocator == NULL)
+ pLoaderAllocator = pMD->GetLoaderAllocatorForCode();
+ _ASSERTE(pLoaderAllocator != NULL);
+
+ CrstHolder ch(&m_JumpStubCrst);
+
+ JumpStubCache * pJumpStubCache = (JumpStubCache *)pLoaderAllocator->m_pJumpStubCache;
+ if (pJumpStubCache == NULL)
+ {
+ pJumpStubCache = new JumpStubCache();
+ pLoaderAllocator->m_pJumpStubCache = pJumpStubCache;
+ }
+
+ for (JumpStubTable::KeyIterator i = pJumpStubCache->m_Table.Begin(target),
+ end = pJumpStubCache->m_Table.End(target); i != end; i++)
+ {
+ jumpStub = i->m_jumpStub;
+
+ _ASSERTE(jumpStub != NULL);
+
+ // Is the matching entry with the requested range?
+ if (((TADDR)loAddr <= jumpStub) && (jumpStub <= (TADDR)hiAddr))
+ {
+ RETURN(jumpStub);
+ }
+ }
+
+ // If we get here we need to create a new jump stub
+ // add or change the jump stub table to point at the new one
+ jumpStub = getNextJumpStub(pMD, target, loAddr, hiAddr, pLoaderAllocator); // this statement can throw
+
+ _ASSERTE(((TADDR)loAddr <= jumpStub) && (jumpStub <= (TADDR)hiAddr));
+
+ LOG((LF_JIT, LL_INFO10000, "Add JumpStub to" FMT_ADDR "at" FMT_ADDR "\n",
+ DBG_ADDR(target), DBG_ADDR(jumpStub) ));
+
+ RETURN(jumpStub);
+}
+
+PCODE ExecutionManager::getNextJumpStub(MethodDesc* pMD, PCODE target,
+ BYTE * loAddr, BYTE * hiAddr, LoaderAllocator *pLoaderAllocator)
+{
+ CONTRACT(PCODE) {
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(pLoaderAllocator != NULL);
+ PRECONDITION(m_JumpStubCrst.OwnedByCurrentThread());
+ POSTCONDITION(RETVAL != NULL);
+ } CONTRACT_END;
+
+ BYTE * jumpStub = NULL;
+ bool isLCG = pMD && pMD->IsLCGMethod();
+ JumpStubBlockHeader ** ppHead = isLCG ? &(pMD->AsDynamicMethodDesc()->GetLCGMethodResolver()->m_jumpStubBlock) : &(((JumpStubCache *)(pLoaderAllocator->m_pJumpStubCache))->m_pBlocks);
+ JumpStubBlockHeader * curBlock = *ppHead;
+
+ while (curBlock)
+ {
+ _ASSERTE(pLoaderAllocator == (isLCG ? curBlock->GetHostCodeHeap()->GetAllocator() : curBlock->GetLoaderAllocator()));
+
+ if (curBlock->m_used < curBlock->m_allocated)
+ {
+ jumpStub = (BYTE *) curBlock + sizeof(JumpStubBlockHeader) + ((size_t) curBlock->m_used * BACK_TO_BACK_JUMP_ALLOCATE_SIZE);
+
+ if ((loAddr <= jumpStub) && (jumpStub <= hiAddr))
+ {
+ // We will update curBlock->m_used at "DONE"
+ goto DONE;
+ }
+ }
+
+ curBlock = curBlock->m_next;
+ }
+
+ // If we get here then we need to allocate a new JumpStubBlock
+
+ // allocJumpStubBlock will allocate from the LoaderCodeHeap for normal methods and HostCodeHeap for LCG methods
+ // this can throw an OM exception
+ curBlock = ExecutionManager::GetEEJitManager()->allocJumpStubBlock(pMD, DEFAULT_JUMPSTUBS_PER_BLOCK, loAddr, hiAddr, pLoaderAllocator);
+
+ jumpStub = (BYTE *) curBlock + sizeof(JumpStubBlockHeader) + ((size_t) curBlock->m_used * BACK_TO_BACK_JUMP_ALLOCATE_SIZE);
+
+ _ASSERTE((loAddr <= jumpStub) && (jumpStub <= hiAddr));
+
+ curBlock->m_next = *ppHead;
+ *ppHead = curBlock;
+
+DONE:
+
+ _ASSERTE((curBlock->m_used < curBlock->m_allocated));
+
+#ifdef _TARGET_ARM64_
+ // 8-byte alignment is required on ARM64
+ _ASSERTE(((UINT_PTR)jumpStub & 7) == 0);
+#endif
+
+ emitBackToBackJump(jumpStub, (void*) target);
+
+ if (isLCG)
+ {
+ // always get a new jump stub for LCG method
+ // We don't share jump stubs among different LCG methods so that the jump stubs used
+ // by every LCG method can be cleaned up individually
+ // There is not much benefit to share jump stubs within one LCG method anyway.
+ }
+ else
+ {
+ JumpStubCache * pJumpStubCache = (JumpStubCache *)pLoaderAllocator->m_pJumpStubCache;
+ _ASSERTE(pJumpStubCache != NULL);
+
+ JumpStubEntry entry;
+
+ entry.m_target = target;
+ entry.m_jumpStub = (PCODE)jumpStub;
+
+ pJumpStubCache->m_Table.Add(entry);
+ }
+
+ curBlock->m_used++;
+
+ RETURN((PCODE)jumpStub);
+}
+#endif // !DACCESS_COMPILE && !CROSSGEN_COMPILE
+
+#ifdef FEATURE_PREJIT
+//***************************************************************************************
+//***************************************************************************************
+
+#ifndef DACCESS_COMPILE
+
+NativeImageJitManager::NativeImageJitManager()
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+PTR_VOID NativeImageJitManager::GetGCInfo(const METHODTOKEN& MethodToken)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ PTR_RUNTIME_FUNCTION pRuntimeFunction = dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader);
+ TADDR baseAddress = JitTokenToModuleBase(MethodToken);
+
+#ifndef DACCESS_COMPILE
+ if (g_IBCLogger.InstrEnabled())
+ {
+ PTR_NGenLayoutInfo pNgenLayout = JitTokenToZapModule(MethodToken)->GetNGenLayoutInfo();
+ PTR_MethodDesc pMD = NativeUnwindInfoLookupTable::GetMethodDesc(pNgenLayout, pRuntimeFunction, baseAddress);
+ g_IBCLogger.LogMethodGCInfoAccess(pMD);
+ }
+#endif
+
+ SIZE_T nUnwindDataSize;
+ PTR_VOID pUnwindData = GetUnwindDataBlob(baseAddress, pRuntimeFunction, &nUnwindDataSize);
+
+ // GCInfo immediatelly follows unwind data
+ return dac_cast<PTR_BYTE>(pUnwindData) + nUnwindDataSize;
+}
+
+unsigned NativeImageJitManager::InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ NGenLayoutInfo * pNgenLayout = JitTokenToZapModule(MethodToken)->GetNGenLayoutInfo();
+
+ //early out if the method doesn't have EH info bit set.
+ if (!NativeUnwindInfoLookupTable::HasExceptionInfo(pNgenLayout, PTR_RUNTIME_FUNCTION(MethodToken.m_pCodeHeader)))
+ return 0;
+
+ PTR_CORCOMPILE_EXCEPTION_LOOKUP_TABLE pExceptionLookupTable = dac_cast<PTR_CORCOMPILE_EXCEPTION_LOOKUP_TABLE>(pNgenLayout->m_ExceptionInfoLookupTable.StartAddress());
+ _ASSERTE(pExceptionLookupTable != NULL);
+
+ SIZE_T size = pNgenLayout->m_ExceptionInfoLookupTable.Size();
+ COUNT_T numLookupTableEntries = (COUNT_T)(size / sizeof(CORCOMPILE_EXCEPTION_LOOKUP_TABLE_ENTRY));
+ // at least 2 entries (1 valid entry + 1 sentinal entry)
+ _ASSERTE(numLookupTableEntries >= 2);
+
+ DWORD methodStartRVA = (DWORD)(JitTokenToStartAddress(MethodToken) - JitTokenToModuleBase(MethodToken));
+
+ COUNT_T ehInfoSize = 0;
+ DWORD exceptionInfoRVA = NativeExceptionInfoLookupTable::LookupExceptionInfoRVAForMethod(pExceptionLookupTable,
+ numLookupTableEntries,
+ methodStartRVA,
+ &ehInfoSize);
+ if (exceptionInfoRVA == 0)
+ return 0;
+
+ pEnumState->iCurrentPos = 0;
+ pEnumState->pExceptionClauseArray = JitTokenToModuleBase(MethodToken) + exceptionInfoRVA;
+
+ return ehInfoSize / sizeof(CORCOMPILE_EXCEPTION_CLAUSE);
+}
+
+PTR_EXCEPTION_CLAUSE_TOKEN NativeImageJitManager::GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState,
+ EE_ILEXCEPTION_CLAUSE* pEHClauseOut)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ unsigned iCurrentPos = pEnumState->iCurrentPos;
+ pEnumState->iCurrentPos++;
+
+ CORCOMPILE_EXCEPTION_CLAUSE* pClause = &(dac_cast<PTR_CORCOMPILE_EXCEPTION_CLAUSE>(pEnumState->pExceptionClauseArray)[iCurrentPos]);
+
+ // copy to the input parmeter, this is a nice abstraction for the future
+ // if we want to compress the Clause encoding, we can do without affecting the call sites
+ pEHClauseOut->TryStartPC = pClause->TryStartPC;
+ pEHClauseOut->TryEndPC = pClause->TryEndPC;
+ pEHClauseOut->HandlerStartPC = pClause->HandlerStartPC;
+ pEHClauseOut->HandlerEndPC = pClause->HandlerEndPC;
+ pEHClauseOut->Flags = pClause->Flags;
+ pEHClauseOut->FilterOffset = pClause->FilterOffset;
+
+ return dac_cast<PTR_EXCEPTION_CLAUSE_TOKEN>(pClause);
+}
+
+#ifndef DACCESS_COMPILE
+
+TypeHandle NativeImageJitManager::ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause,
+ CrawlFrame* pCf)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ _ASSERTE(NULL != pCf);
+ _ASSERTE(NULL != pEHClause);
+ _ASSERTE(IsTypedHandler(pEHClause));
+
+ MethodDesc *pMD = PTR_MethodDesc(pCf->GetFunction());
+
+ _ASSERTE(pMD != NULL);
+
+ Module* pModule = pMD->GetModule();
+ PREFIX_ASSUME(pModule != NULL);
+
+ SigTypeContext typeContext(pMD);
+ VarKind k = hasNoVars;
+
+ mdToken typeTok = pEHClause->ClassToken;
+
+ // In the vast majority of cases the code under the "if" below
+ // will not be executed.
+ //
+ // First grab the representative instantiations. For code
+ // shared by multiple generic instantiations these are the
+ // canonical (representative) instantiation.
+ if (TypeFromToken(typeTok) == mdtTypeSpec)
+ {
+ PCCOR_SIGNATURE pSig;
+ ULONG cSig;
+ IfFailThrow(pModule->GetMDImport()->GetTypeSpecFromToken(typeTok, &pSig, &cSig));
+
+ SigPointer psig(pSig, cSig);
+ k = psig.IsPolyType(&typeContext);
+
+ // Grab the active class and method instantiation. This exact instantiation is only
+ // needed in the corner case of "generic" exception catching in shared
+ // generic code. We don't need the exact instantiation if the token
+ // doesn't contain E_T_VAR or E_T_MVAR.
+ if ((k & hasSharableVarsMask) != 0)
+ {
+ Instantiation classInst;
+ Instantiation methodInst;
+ pCf->GetExactGenericInstantiations(&classInst,&methodInst);
+ SigTypeContext::InitTypeContext(pMD,classInst, methodInst,&typeContext);
+ }
+ }
+
+ return ClassLoader::LoadTypeDefOrRefOrSpecThrowing(pModule, typeTok, &typeContext,
+ ClassLoader::ReturnNullIfNotFound);
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+//-----------------------------------------------------------------------------
+// Ngen info manager
+//-----------------------------------------------------------------------------
+BOOL NativeImageJitManager::GetBoundariesAndVars(
+ const DebugInfoRequest & request,
+ IN FP_IDS_NEW fpNew, IN void * pNewData,
+ OUT ULONG32 * pcMap,
+ OUT ICorDebugInfo::OffsetMapping **ppMap,
+ OUT ULONG32 * pcVars,
+ OUT ICorDebugInfo::NativeVarInfo **ppVars)
+{
+ CONTRACTL {
+ THROWS; // on OOM.
+ GC_NOTRIGGER; // getting vars shouldn't trigger
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ // We want the module that the code is instantiated in, not necessarily the one
+ // that it was declared in. This only matters for ngen-generics.
+ MethodDesc * pMD = request.GetMD();
+ Module * pModule = pMD->GetZapModule();
+ PREFIX_ASSUME(pModule != NULL);
+
+ PTR_BYTE pDebugInfo = pModule->GetNativeDebugInfo(pMD);
+
+ // No header created, which means no jit information is available.
+ if (pDebugInfo == NULL)
+ return FALSE;
+
+ // Uncompress. This allocates memory and may throw.
+ CompressDebugInfo::RestoreBoundariesAndVars(
+ fpNew, pNewData, // allocators
+ pDebugInfo, // input
+ pcMap, ppMap,
+ pcVars, ppVars); // output
+
+ return TRUE;
+}
+
+#ifdef DACCESS_COMPILE
+//
+// Need to write out debug info
+//
+void NativeImageJitManager::EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD)
+{
+ SUPPORTS_DAC;
+
+ Module * pModule = pMD->GetZapModule();
+ PREFIX_ASSUME(pModule != NULL);
+ PTR_BYTE pDebugInfo = pModule->GetNativeDebugInfo(pMD);
+
+ if (pDebugInfo != NULL)
+ {
+ CompressDebugInfo::EnumMemoryRegions(flags, pDebugInfo);
+ }
+}
+#endif
+
+PCODE NativeImageJitManager::GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset)
+{
+ WRAPPER_NO_CONTRACT;
+
+ MethodRegionInfo methodRegionInfo;
+ JitTokenToMethodRegionInfo(MethodToken, &methodRegionInfo);
+
+ if (relOffset < methodRegionInfo.hotSize)
+ return methodRegionInfo.hotStartAddress + relOffset;
+
+ SIZE_T coldOffset = relOffset - methodRegionInfo.hotSize;
+ _ASSERTE(coldOffset < methodRegionInfo.coldSize);
+ return methodRegionInfo.coldStartAddress + coldOffset;
+}
+
+BOOL NativeImageJitManager::JitCodeToMethodInfo(RangeSection * pRangeSection,
+ PCODE currentPC,
+ MethodDesc** ppMethodDesc,
+ EECodeInfo * pCodeInfo)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ TADDR currentInstr = PCODEToPINSTR(currentPC);
+
+ Module * pModule = dac_cast<PTR_Module>(pRangeSection->pHeapListOrZapModule);
+
+ NGenLayoutInfo * pLayoutInfo = pModule->GetNGenLayoutInfo();
+ DWORD iRange = 0;
+
+ if (pLayoutInfo->m_CodeSections[0].IsInRange(currentInstr))
+ {
+ iRange = 0;
+ }
+ else
+ if (pLayoutInfo->m_CodeSections[1].IsInRange(currentInstr))
+ {
+ iRange = 1;
+ }
+ else
+ if (pLayoutInfo->m_CodeSections[2].IsInRange(currentInstr))
+ {
+ iRange = 2;
+ }
+ else
+ {
+ return FALSE;
+ }
+
+ TADDR ImageBase = pRangeSection->LowAddress;
+
+ DWORD RelativePc = (DWORD)(currentInstr - ImageBase);
+
+ PTR_RUNTIME_FUNCTION FunctionEntry;
+
+ if (iRange == 2)
+ {
+ int ColdMethodIndex = NativeUnwindInfoLookupTable::LookupUnwindInfoForMethod(RelativePc,
+ pLayoutInfo->m_pRuntimeFunctions[2],
+ 0,
+ pLayoutInfo->m_nRuntimeFunctions[2] - 1);
+
+ if (ColdMethodIndex < 0)
+ return FALSE;
+
+#ifdef WIN64EXCEPTIONS
+ // Save the raw entry
+ int RawColdMethodIndex = ColdMethodIndex;
+
+ PTR_CORCOMPILE_COLD_METHOD_ENTRY pColdCodeMap = pLayoutInfo->m_ColdCodeMap;
+
+ while (pColdCodeMap[ColdMethodIndex].mainFunctionEntryRVA == 0)
+ ColdMethodIndex--;
+
+ FunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(ImageBase + pColdCodeMap[ColdMethodIndex].mainFunctionEntryRVA);
+#else
+ DWORD ColdUnwindData = pLayoutInfo->m_pRuntimeFunctions[2][ColdMethodIndex].UnwindData;
+ _ASSERTE((ColdUnwindData & RUNTIME_FUNCTION_INDIRECT) != 0);
+ FunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(ImageBase + (ColdUnwindData & ~RUNTIME_FUNCTION_INDIRECT));
+#endif
+
+ if (ppMethodDesc)
+ {
+ DWORD methodDescRVA;
+
+ COUNT_T iIndex = (COUNT_T)(FunctionEntry - pLayoutInfo->m_pRuntimeFunctions[0]);
+ if (iIndex >= pLayoutInfo->m_nRuntimeFunctions[0])
+ {
+ iIndex = (COUNT_T)(FunctionEntry - pLayoutInfo->m_pRuntimeFunctions[1]);
+ _ASSERTE(iIndex < pLayoutInfo->m_nRuntimeFunctions[1]);
+ methodDescRVA = pLayoutInfo->m_MethodDescs[1][iIndex];
+ }
+ else
+ {
+ methodDescRVA = pLayoutInfo->m_MethodDescs[0][iIndex];
+ }
+ _ASSERTE(methodDescRVA != NULL);
+
+ // Note that the MethodDesc does not have to be restored. (It happens when we are called
+ // from SetupGcCoverageForNativeMethod.)
+ *ppMethodDesc = PTR_MethodDesc((methodDescRVA & ~HAS_EXCEPTION_INFO_MASK) + ImageBase);
+ }
+
+ if (pCodeInfo)
+ {
+ PTR_RUNTIME_FUNCTION ColdFunctionTable = pLayoutInfo->m_pRuntimeFunctions[2];
+
+ PTR_RUNTIME_FUNCTION ColdFunctionEntry = ColdFunctionTable + ColdMethodIndex;
+ DWORD coldCodeOffset = (DWORD)(RelativePc - RUNTIME_FUNCTION__BeginAddress(ColdFunctionEntry));
+ pCodeInfo->m_relOffset = pLayoutInfo->m_ColdCodeMap[ColdMethodIndex].hotCodeSize + coldCodeOffset;
+
+ // We are using RUNTIME_FUNCTION as METHODTOKEN
+ pCodeInfo->m_methodToken = METHODTOKEN(pRangeSection, dac_cast<TADDR>(FunctionEntry));
+
+#ifdef WIN64EXCEPTIONS
+ PTR_RUNTIME_FUNCTION RawColdFunctionEntry = ColdFunctionTable + RawColdMethodIndex;
+#ifdef _TARGET_AMD64_
+ if ((RawColdFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) != 0)
+ {
+ RawColdFunctionEntry = PTR_RUNTIME_FUNCTION(ImageBase + (RawColdFunctionEntry->UnwindData & ~RUNTIME_FUNCTION_INDIRECT));
+ }
+#endif // _TARGET_AMD64_
+ pCodeInfo->m_pFunctionEntry = RawColdFunctionEntry;
+#endif
+ }
+ }
+ else
+ {
+ PTR_DWORD pRuntimeFunctionLookupTable = dac_cast<PTR_DWORD>(pLayoutInfo->m_UnwindInfoLookupTable[iRange]);
+
+ _ASSERTE(pRuntimeFunctionLookupTable != NULL);
+
+ DWORD RelativeToCodeStart = (DWORD)(currentInstr - dac_cast<TADDR>(pLayoutInfo->m_CodeSections[iRange].StartAddress()));
+ COUNT_T iStrideIndex = RelativeToCodeStart / RUNTIME_FUNCTION_LOOKUP_STRIDE;
+
+ // The lookup table may not be big enough to cover the entire code range if there was padding inserted during NGen image layout.
+ // The last entry is lookup table entry covers the rest of the code range in this case.
+ if (iStrideIndex >= pLayoutInfo->m_UnwindInfoLookupTableEntryCount[iRange])
+ iStrideIndex = pLayoutInfo->m_UnwindInfoLookupTableEntryCount[iRange] - 1;
+
+ int Low = pRuntimeFunctionLookupTable[iStrideIndex];
+ int High = pRuntimeFunctionLookupTable[iStrideIndex+1];
+
+ PTR_RUNTIME_FUNCTION FunctionTable = pLayoutInfo->m_pRuntimeFunctions[iRange];
+ PTR_DWORD pMethodDescs = pLayoutInfo->m_MethodDescs[iRange];
+
+ int MethodIndex = NativeUnwindInfoLookupTable::LookupUnwindInfoForMethod(RelativePc,
+ FunctionTable,
+ Low,
+ High);
+
+ if (MethodIndex < 0)
+ return FALSE;
+
+#ifdef WIN64EXCEPTIONS
+ // Save the raw entry
+ PTR_RUNTIME_FUNCTION RawFunctionEntry = FunctionTable + MethodIndex;;
+
+ // Skip funclets to get the method desc
+ while (pMethodDescs[MethodIndex] == 0)
+ MethodIndex--;
+#endif
+
+ FunctionEntry = FunctionTable + MethodIndex;
+
+ if (ppMethodDesc)
+ {
+ DWORD methodDescRVA = pMethodDescs[MethodIndex];
+ _ASSERTE(methodDescRVA != NULL);
+
+ // Note that the MethodDesc does not have to be restored. (It happens when we are called
+ // from SetupGcCoverageForNativeMethod.)
+ *ppMethodDesc = PTR_MethodDesc((methodDescRVA & ~HAS_EXCEPTION_INFO_MASK) + ImageBase);
+
+ // We are likely executing the code already or going to execute it soon. However, there are a few cases like
+ // code:MethodTable::GetMethodDescForSlot where it is not the case. Log the code access here to avoid these
+ // cases from touching cold code maps.
+ g_IBCLogger.LogMethodCodeAccess(*ppMethodDesc);
+ }
+
+ //Get the function entry that corresponds to the real method desc.
+ _ASSERTE(RelativePc >= RUNTIME_FUNCTION__BeginAddress(FunctionEntry));
+
+ if (pCodeInfo)
+ {
+ pCodeInfo->m_relOffset = (DWORD)
+ (RelativePc - RUNTIME_FUNCTION__BeginAddress(FunctionEntry));
+
+ // We are using RUNTIME_FUNCTION as METHODTOKEN
+ pCodeInfo->m_methodToken = METHODTOKEN(pRangeSection, dac_cast<TADDR>(FunctionEntry));
+
+#ifdef WIN64EXCEPTIONS
+ AMD64_ONLY(_ASSERTE((RawFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) == 0));
+ pCodeInfo->m_pFunctionEntry = RawFunctionEntry;
+#endif
+ }
+ }
+
+ return TRUE;
+}
+
+#if defined(WIN64EXCEPTIONS)
+PTR_RUNTIME_FUNCTION NativeImageJitManager::LazyGetFunctionEntry(EECodeInfo * pCodeInfo)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ if (!pCodeInfo->IsValid())
+ {
+ return NULL;
+ }
+
+ // code:NativeImageJitManager::JitCodeToMethodInfo computes PTR_RUNTIME_FUNCTION eagerly. This path is only
+ // reachable via EECodeInfo::GetMainFunctionInfo, and so we can just return the main entry.
+ _ASSERTE(pCodeInfo->GetRelOffset() == 0);
+
+ return dac_cast<PTR_RUNTIME_FUNCTION>(pCodeInfo->GetMethodToken().m_pCodeHeader);
+}
+
+TADDR NativeImageJitManager::GetFuncletStartAddress(EECodeInfo * pCodeInfo)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+ NGenLayoutInfo * pLayoutInfo = JitTokenToZapModule(pCodeInfo->GetMethodToken())->GetNGenLayoutInfo();
+
+ if (pLayoutInfo->m_CodeSections[2].IsInRange(pCodeInfo->GetCodeAddress()))
+ {
+ // If the address is in the cold section, then we assume it is cold main function
+ // code, NOT a funclet. So, don't do the backward walk: just return the start address
+ // of the main function.
+ // @ARMTODO: Handle hot/cold splitting with EH funclets
+ return pCodeInfo->GetStartAddress();
+ }
+#endif
+
+ return IJitManager::GetFuncletStartAddress(pCodeInfo);
+}
+
+static void GetFuncletStartOffsetsHelper(PCODE pCodeStart, SIZE_T size, SIZE_T ofsAdj,
+ PTR_RUNTIME_FUNCTION pFunctionEntry, TADDR moduleBase,
+ DWORD * pnFunclets, DWORD* pStartFuncletOffsets, DWORD dwLength)
+{
+ _ASSERTE(FitsInU4((pCodeStart + size) - moduleBase));
+ DWORD endAddress = (DWORD)((pCodeStart + size) - moduleBase);
+
+ // Entries are sorted and terminated by sentinel value (DWORD)-1
+ for ( ; RUNTIME_FUNCTION__BeginAddress(pFunctionEntry) < endAddress; pFunctionEntry++)
+ {
+#ifdef _TARGET_AMD64_
+ _ASSERTE((pFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) == 0);
+#endif
+
+#if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS)
+ if (IsFunctionFragment(moduleBase, pFunctionEntry))
+ {
+ // This is a fragment (not the funclet beginning); skip it
+ continue;
+ }
+#endif // EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS
+
+ if (*pnFunclets < dwLength)
+ {
+ TADDR funcletStartAddress = (moduleBase + RUNTIME_FUNCTION__BeginAddress(pFunctionEntry)) + ofsAdj;
+ _ASSERTE(FitsInU4(funcletStartAddress - pCodeStart));
+ pStartFuncletOffsets[*pnFunclets] = (DWORD)(funcletStartAddress - pCodeStart);
+ }
+ (*pnFunclets)++;
+ }
+}
+
+DWORD NativeImageJitManager::GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ PTR_RUNTIME_FUNCTION pFirstFuncletFunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader) + 1;
+
+ TADDR moduleBase = JitTokenToModuleBase(MethodToken);
+ DWORD nFunclets = 0;
+ MethodRegionInfo regionInfo;
+ JitTokenToMethodRegionInfo(MethodToken, &regionInfo);
+
+ // pFirstFuncletFunctionEntry will work for ARM when passed to GetFuncletStartOffsetsHelper()
+ // even if it is a fragment of the main body and not a RUNTIME_FUNCTION for the beginning
+ // of the first hot funclet, because GetFuncletStartOffsetsHelper() will skip all the function
+ // fragments until the first funclet, if any, is found.
+
+ GetFuncletStartOffsetsHelper(regionInfo.hotStartAddress, regionInfo.hotSize, 0,
+ pFirstFuncletFunctionEntry, moduleBase,
+ &nFunclets, pStartFuncletOffsets, dwLength);
+
+ // There are no funclets in cold section on ARM yet
+ // @ARMTODO: support hot/cold splitting in functions with EH
+#if !defined(_TARGET_ARM_) && !defined(_TARGET_ARM64_)
+ if (regionInfo.coldSize != NULL)
+ {
+ NGenLayoutInfo * pLayoutInfo = JitTokenToZapModule(MethodToken)->GetNGenLayoutInfo();
+
+ int iColdMethodIndex = NativeUnwindInfoLookupTable::LookupUnwindInfoForMethod(
+ (DWORD)(regionInfo.coldStartAddress - moduleBase),
+ pLayoutInfo->m_pRuntimeFunctions[2],
+ 0,
+ pLayoutInfo->m_nRuntimeFunctions[2] - 1);
+
+ PTR_RUNTIME_FUNCTION pFunctionEntry = pLayoutInfo->m_pRuntimeFunctions[2] + iColdMethodIndex;
+
+ _ASSERTE(regionInfo.coldStartAddress == moduleBase + RUNTIME_FUNCTION__BeginAddress(pFunctionEntry));
+
+#ifdef _TARGET_AMD64_
+ // Skip cold part of the method body
+ if ((pFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) != 0)
+ pFunctionEntry++;
+#endif
+
+ GetFuncletStartOffsetsHelper(regionInfo.coldStartAddress, regionInfo.coldSize, regionInfo.hotSize,
+ pFunctionEntry, moduleBase,
+ &nFunclets, pStartFuncletOffsets, dwLength);
+ }
+#endif // !_TARGET_ARM_ && !_TARGET_ARM64
+
+ return nFunclets;
+}
+
+BOOL NativeImageJitManager::IsFilterFunclet(EECodeInfo * pCodeInfo)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!pCodeInfo->IsFunclet())
+ return FALSE;
+
+ //
+ // The generic IsFilterFunclet implementation is touching exception handling tables.
+ // It is bad for working set because of it is sometimes called during GC stackwalks.
+ // The optimized version for native images does not touch exception handling tables.
+ //
+
+ NGenLayoutInfo * pLayoutInfo = JitTokenToZapModule(pCodeInfo->GetMethodToken())->GetNGenLayoutInfo();
+
+ SIZE_T size;
+ PTR_VOID pUnwindData = GetUnwindDataBlob(pCodeInfo->GetModuleBase(), pCodeInfo->GetFunctionEntry(), &size);
+ _ASSERTE(pUnwindData != NULL);
+
+ // Personality routine is always the last element of the unwind data
+ DWORD rvaPersonalityRoutine = *(dac_cast<PTR_DWORD>(dac_cast<TADDR>(pUnwindData) + size) - 1);
+
+ BOOL fRet = (pLayoutInfo->m_rvaFilterPersonalityRoutine == rvaPersonalityRoutine);
+
+ // Verify that the optimized implementation is in sync with the slow implementation
+ _ASSERTE(fRet == IJitManager::IsFilterFunclet(pCodeInfo));
+
+ return fRet;
+}
+
+#endif // WIN64EXCEPTIONS
+
+StubCodeBlockKind NativeImageJitManager::GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Module * pZapModule = dac_cast<PTR_Module>(pRangeSection->pHeapListOrZapModule);
+
+ if (pZapModule->IsZappedPrecode(currentPC))
+ {
+ return STUB_CODE_BLOCK_PRECODE;
+ }
+
+ NGenLayoutInfo * pLayoutInfo = pZapModule->GetNGenLayoutInfo();
+ _ASSERTE(pLayoutInfo != NULL);
+
+ if (pLayoutInfo->m_JumpStubs.IsInRange(currentPC))
+ {
+ return STUB_CODE_BLOCK_JUMPSTUB;
+ }
+
+ if (pLayoutInfo->m_StubLinkStubs.IsInRange(currentPC))
+ {
+ return STUB_CODE_BLOCK_STUBLINK;
+ }
+
+ if (pLayoutInfo->m_VirtualMethodThunks.IsInRange(currentPC))
+ {
+ return STUB_CODE_BLOCK_VIRTUAL_METHOD_THUNK;
+ }
+
+ if (pLayoutInfo->m_ExternalMethodThunks.IsInRange(currentPC))
+ {
+ return STUB_CODE_BLOCK_EXTERNAL_METHOD_THUNK;
+ }
+
+ return STUB_CODE_BLOCK_UNKNOWN;
+}
+
+PTR_Module NativeImageJitManager::JitTokenToZapModule(const METHODTOKEN& MethodToken)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return dac_cast<PTR_Module>(MethodToken.m_pRangeSection->pHeapListOrZapModule);
+}
+void NativeImageJitManager::JitTokenToMethodRegionInfo(const METHODTOKEN& MethodToken,
+ MethodRegionInfo * methodRegionInfo)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ _ASSERTE(methodRegionInfo != NULL);
+
+ //
+ // Initialize methodRegionInfo assuming that the method is entirely hot. This is the common
+ // case (either binary is not procedure split or the current method is all hot). We can
+ // adjust these values later if necessary.
+ //
+
+ methodRegionInfo->hotStartAddress = JitTokenToStartAddress(MethodToken);
+ methodRegionInfo->hotSize = GetCodeManager()->GetFunctionSize(GetGCInfo(MethodToken));
+ methodRegionInfo->coldStartAddress = 0;
+ methodRegionInfo->coldSize = 0;
+
+ RangeSection *rangeSection = MethodToken.m_pRangeSection;
+ PREFIX_ASSUME(rangeSection != NULL);
+
+ Module * pModule = dac_cast<PTR_Module>(rangeSection->pHeapListOrZapModule);
+
+ NGenLayoutInfo * pLayoutInfo = pModule->GetNGenLayoutInfo();
+
+ //
+ // If this module is not procedure split, then we're done.
+ //
+ if (pLayoutInfo->m_CodeSections[2].Size() == 0)
+ return;
+
+ //
+ // Perform a binary search in the cold range section until we find our method
+ //
+
+ TADDR ImageBase = rangeSection->LowAddress;
+
+ int Low = 0;
+ int High = pLayoutInfo->m_nRuntimeFunctions[2] - 1;
+
+ PTR_RUNTIME_FUNCTION pRuntimeFunctionTable = pLayoutInfo->m_pRuntimeFunctions[2];
+ PTR_CORCOMPILE_COLD_METHOD_ENTRY pColdCodeMap = pLayoutInfo->m_ColdCodeMap;
+
+ while (Low <= High)
+ {
+ int Middle = Low + (High - Low) / 2;
+
+ int ColdMethodIndex = Middle;
+
+ PTR_RUNTIME_FUNCTION FunctionEntry;
+
+#ifdef WIN64EXCEPTIONS
+ while (pColdCodeMap[ColdMethodIndex].mainFunctionEntryRVA == 0)
+ ColdMethodIndex--;
+
+ FunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(ImageBase + pColdCodeMap[ColdMethodIndex].mainFunctionEntryRVA);
+#else
+ DWORD ColdUnwindData = pRuntimeFunctionTable[ColdMethodIndex].UnwindData;
+ _ASSERTE((ColdUnwindData & RUNTIME_FUNCTION_INDIRECT) != 0);
+ FunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(ImageBase + (ColdUnwindData & ~RUNTIME_FUNCTION_INDIRECT));
+#endif
+
+ if (FunctionEntry == dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader))
+ {
+ PTR_RUNTIME_FUNCTION ColdFunctionEntry = pRuntimeFunctionTable + ColdMethodIndex;
+
+ methodRegionInfo->coldStartAddress = ImageBase + RUNTIME_FUNCTION__BeginAddress(ColdFunctionEntry);
+
+ //
+ // At this point methodRegionInfo->hotSize is set to the total size of
+ // the method obtained from the GC info (we set that in the init code above).
+ // Use that and coldHeader->hotCodeSize to compute the hot and cold code sizes.
+ //
+
+ ULONG hotCodeSize = pColdCodeMap[ColdMethodIndex].hotCodeSize;
+
+ methodRegionInfo->coldSize = methodRegionInfo->hotSize - hotCodeSize;
+ methodRegionInfo->hotSize = hotCodeSize;
+
+ return;
+ }
+ else if (FunctionEntry < dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader))
+ {
+ Low = Middle + 1;
+ }
+ else
+ {
+ // Use ColdMethodIndex to take advantage of entries skipped while looking for method start
+ High = ColdMethodIndex - 1;
+ }
+ }
+
+ //
+ // We didn't find it. Therefore this method doesn't have a cold section.
+ //
+
+ return;
+}
+
+#ifdef DACCESS_COMPILE
+
+void NativeImageJitManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ IJitManager::EnumMemoryRegions(flags);
+}
+
+#if defined(WIN64EXCEPTIONS)
+
+//
+// To locate an entry in the function entry table (the program exceptions data directory), the debugger
+// performs a binary search over the table. This function reports the entries that are encountered in the
+// binary search.
+//
+// Parameters:
+// pRtf: The target function table entry to be located
+// pNativeLayout: A pointer to the loaded native layout for the module containing pRtf
+//
+static void EnumRuntimeFunctionEntriesToFindEntry(PTR_RUNTIME_FUNCTION pRtf, PTR_PEImageLayout pNativeLayout)
+{
+ pRtf.EnumMem();
+
+ if (pNativeLayout == NULL)
+ {
+ return;
+ }
+
+ IMAGE_DATA_DIRECTORY * pProgramExceptionsDirectory = pNativeLayout->GetDirectoryEntry(IMAGE_DIRECTORY_ENTRY_EXCEPTION);
+ if (!pProgramExceptionsDirectory ||
+ (pProgramExceptionsDirectory->Size == 0) ||
+ (pProgramExceptionsDirectory->Size % sizeof(RUNTIME_FUNCTION) != 0))
+ {
+ // Program exceptions directory malformatted
+ return;
+ }
+
+ PTR_BYTE moduleBase(pNativeLayout->GetBase());
+ PTR_RUNTIME_FUNCTION firstFunctionEntry(moduleBase + pProgramExceptionsDirectory->VirtualAddress);
+
+ if (pRtf < firstFunctionEntry ||
+ ((dac_cast<TADDR>(pRtf) - dac_cast<TADDR>(firstFunctionEntry)) % sizeof(RUNTIME_FUNCTION) != 0))
+ {
+ // Program exceptions directory malformatted
+ return;
+ }
+
+// Review conversion of size_t to ULONG.
+#if defined(_MSC_VER)
+#pragma warning(push)
+#pragma warning(disable:4267)
+#endif // defined(_MSC_VER)
+
+ ULONG indexToLocate = pRtf - firstFunctionEntry;
+
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif // defined(_MSC_VER)
+
+ ULONG low = 0; // index in the function entry table of low end of search range
+ ULONG high = (pProgramExceptionsDirectory->Size)/sizeof(RUNTIME_FUNCTION) - 1; // index of high end of search range
+ ULONG mid = (low + high) /2; // index of entry to be compared
+
+ if (indexToLocate > high)
+ {
+ return;
+ }
+
+ while (indexToLocate != mid)
+ {
+ PTR_RUNTIME_FUNCTION functionEntry = firstFunctionEntry + mid;
+ functionEntry.EnumMem();
+ if (indexToLocate > mid)
+ {
+ low = mid + 1;
+ }
+ else
+ {
+ high = mid - 1;
+ }
+ mid = (low + high) /2;
+ _ASSERTE( low <= mid && mid <= high );
+ }
+}
+
+//
+// EnumMemoryRegionsForMethodUnwindInfo - enumerate the memory necessary to read the unwind info for the
+// specified method.
+//
+// Note that in theory, a dump generation library could save the unwind information itself without help
+// from us, since it's stored in the image in the standard function table layout for Win64. However,
+// dump-generation libraries assume that the image will be available at debug time, and if the image
+// isn't available then it is acceptable for stackwalking to break. For ngen images (which are created
+// on the client), it usually isn't possible to have the image available at debug time, and so for minidumps
+// we must explicitly ensure the unwind information is saved into the dump.
+//
+// Arguments:
+// flags - EnumMem flags
+// pMD - MethodDesc for the method in question
+//
+void NativeImageJitManager::EnumMemoryRegionsForMethodUnwindInfo(CLRDataEnumMemoryFlags flags, EECodeInfo * pCodeInfo)
+{
+ // Get the RUNTIME_FUNCTION entry for this method
+ PTR_RUNTIME_FUNCTION pRtf = pCodeInfo->GetFunctionEntry();
+
+ if (pRtf==NULL)
+ {
+ return;
+ }
+
+ // Enumerate the function entry and other entries needed to locate it in the program exceptions directory
+ Module * pModule = JitTokenToZapModule(pCodeInfo->GetMethodToken());
+ EnumRuntimeFunctionEntriesToFindEntry(pRtf, pModule->GetFile()->GetLoadedNative());
+
+ SIZE_T size;
+ PTR_VOID pUnwindData = GetUnwindDataBlob(pCodeInfo->GetModuleBase(), pRtf, &size);
+ if (pUnwindData != NULL)
+ DacEnumMemoryRegion(PTR_TO_TADDR(pUnwindData), size);
+}
+
+#endif //WIN64EXCEPTIONS
+#endif // #ifdef DACCESS_COMPILE
+
+// Return start of exception info for a method, or 0 if the method has no EH info
+DWORD NativeExceptionInfoLookupTable::LookupExceptionInfoRVAForMethod(PTR_CORCOMPILE_EXCEPTION_LOOKUP_TABLE pExceptionLookupTable,
+ COUNT_T numLookupEntries,
+ DWORD methodStartRVA,
+ COUNT_T* pSize)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ _ASSERTE(pExceptionLookupTable != NULL);
+
+ COUNT_T start = 0;
+ COUNT_T end = numLookupEntries - 2;
+
+ // The last entry in the lookup table (end-1) points to a sentinal entry.
+ // The sentinal entry helps to determine the number of EH clauses for the last table entry.
+ _ASSERTE(pExceptionLookupTable->ExceptionLookupEntry(numLookupEntries-1)->MethodStartRVA == (DWORD)-1);
+
+ // Binary search the lookup table
+ // Using linear search is faster once we get down to small number of entries.
+ while (end - start > 10)
+ {
+ COUNT_T middle = start + (end - start) / 2;
+
+ _ASSERTE(start < middle && middle < end);
+
+ DWORD rva = pExceptionLookupTable->ExceptionLookupEntry(middle)->MethodStartRVA;
+
+ if (methodStartRVA < rva)
+ {
+ end = middle - 1;
+ }
+ else
+ {
+ start = middle;
+ }
+ }
+
+ for (COUNT_T i = start; i <= end; ++i)
+ {
+ DWORD rva = pExceptionLookupTable->ExceptionLookupEntry(i)->MethodStartRVA;
+ if (methodStartRVA == rva)
+ {
+ CORCOMPILE_EXCEPTION_LOOKUP_TABLE_ENTRY *pEntry = pExceptionLookupTable->ExceptionLookupEntry(i);
+
+ //Get the count of EH Clause entries
+ CORCOMPILE_EXCEPTION_LOOKUP_TABLE_ENTRY * pNextEntry = pExceptionLookupTable->ExceptionLookupEntry(i + 1);
+ *pSize = pNextEntry->ExceptionInfoRVA - pEntry->ExceptionInfoRVA;
+
+ return pEntry->ExceptionInfoRVA;
+ }
+ }
+
+ // Not found
+ return 0;
+}
+
+int NativeUnwindInfoLookupTable::LookupUnwindInfoForMethod(DWORD RelativePc,
+ PTR_RUNTIME_FUNCTION pRuntimeFunctionTable,
+ int Low,
+ int High)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+
+#ifdef _TARGET_ARM_
+ RelativePc |= THUMB_CODE;
+#endif
+
+ // Entries are sorted and terminated by sentinel value (DWORD)-1
+
+ // Binary search the RUNTIME_FUNCTION table
+ // Use linear search once we get down to a small number of elements
+ // to avoid Binary search overhead.
+ while (High - Low > 10)
+ {
+ int Middle = Low + (High - Low) / 2;
+
+ PTR_RUNTIME_FUNCTION pFunctionEntry = pRuntimeFunctionTable + Middle;
+ if (RelativePc < pFunctionEntry->BeginAddress)
+ {
+ High = Middle - 1;
+ }
+ else
+ {
+ Low = Middle;
+ }
+ }
+
+ for (int i = Low; i <= High; ++i)
+ {
+ // This is safe because of entries are terminated by sentinel value (DWORD)-1
+ PTR_RUNTIME_FUNCTION pNextFunctionEntry = pRuntimeFunctionTable + (i + 1);
+
+ if (RelativePc < pNextFunctionEntry->BeginAddress)
+ {
+ PTR_RUNTIME_FUNCTION pFunctionEntry = pRuntimeFunctionTable + i;
+ if (RelativePc >= pFunctionEntry->BeginAddress)
+ {
+ return i;
+ }
+ break;
+ }
+ }
+
+ return -1;
+}
+
+BOOL NativeUnwindInfoLookupTable::HasExceptionInfo(NGenLayoutInfo * pNgenLayout, PTR_RUNTIME_FUNCTION pMainRuntimeFunction)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ DWORD methodDescRVA = NativeUnwindInfoLookupTable::GetMethodDescRVA(pNgenLayout, pMainRuntimeFunction);
+ return (methodDescRVA & HAS_EXCEPTION_INFO_MASK);
+}
+
+PTR_MethodDesc NativeUnwindInfoLookupTable::GetMethodDesc(NGenLayoutInfo * pNgenLayout, PTR_RUNTIME_FUNCTION pMainRuntimeFunction, TADDR moduleBase)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ DWORD methodDescRVA = NativeUnwindInfoLookupTable::GetMethodDescRVA(pNgenLayout, pMainRuntimeFunction);
+ return PTR_MethodDesc((methodDescRVA & ~HAS_EXCEPTION_INFO_MASK) + moduleBase);
+}
+
+DWORD NativeUnwindInfoLookupTable::GetMethodDescRVA(NGenLayoutInfo * pNgenLayout, PTR_RUNTIME_FUNCTION pMainRuntimeFunction)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ COUNT_T iIndex = (COUNT_T)(pMainRuntimeFunction - pNgenLayout->m_pRuntimeFunctions[0]);
+ DWORD rva = 0;
+ if (iIndex >= pNgenLayout->m_nRuntimeFunctions[0])
+ {
+ iIndex = (COUNT_T)(pMainRuntimeFunction - pNgenLayout->m_pRuntimeFunctions[1]);
+ _ASSERTE(iIndex < pNgenLayout->m_nRuntimeFunctions[1]);
+ rva = pNgenLayout->m_MethodDescs[1][iIndex];
+ }
+ else
+ {
+ rva = pNgenLayout->m_MethodDescs[0][iIndex];
+ }
+ _ASSERTE(rva != 0);
+
+ return rva;
+}
+
+#endif // FEATURE_PREJIT
+
+#ifndef DACCESS_COMPILE
+
+//-----------------------------------------------------------------------------
+
+
+// Nirvana Support
+
+MethodDesc* __stdcall Nirvana_FindMethodDesc(PCODE ptr, BYTE*& hotStartAddress, size_t& hotSize, BYTE*& coldStartAddress, size_t & coldSize)
+{
+ EECodeInfo codeInfo(ptr);
+ if (!codeInfo.IsValid())
+ return NULL;
+
+ IJitManager::MethodRegionInfo methodRegionInfo;
+ codeInfo.GetMethodRegionInfo(&methodRegionInfo);
+
+ hotStartAddress = (BYTE*)methodRegionInfo.hotStartAddress;
+ hotSize = methodRegionInfo.hotSize;
+ coldStartAddress = (BYTE*)methodRegionInfo.coldStartAddress;
+ coldSize = methodRegionInfo.coldSize;
+
+ return codeInfo.GetMethodDesc();
+}
+
+
+bool Nirvana_GetMethodInfo(MethodDesc * pMD, BYTE*& hotStartAddress, size_t& hotSize, BYTE*& coldStartAddress, size_t & coldSize)
+{
+ EECodeInfo codeInfo(pMD->GetNativeCode());
+ if (!codeInfo.IsValid())
+ return false;
+
+ IJitManager::MethodRegionInfo methodRegionInfo;
+ codeInfo.GetMethodRegionInfo(&methodRegionInfo);
+
+ hotStartAddress = (BYTE*)methodRegionInfo.hotStartAddress;
+ hotSize = methodRegionInfo.hotSize;
+ coldStartAddress = (BYTE*)methodRegionInfo.coldStartAddress;
+ coldSize = methodRegionInfo.coldSize;
+
+ return true;
+}
+
+
+#include "sigformat.h"
+
+__forceinline bool Nirvana_PrintMethodDescWorker(__in_ecount(iBuffer) char * szBuffer, size_t iBuffer, MethodDesc * pMD, const char * pSigString)
+{
+ if (iBuffer == 0)
+ return false;
+
+ szBuffer[0] = '\0';
+ pSigString = strchr(pSigString, ' ');
+
+ if (pSigString == NULL)
+ return false;
+
+ ++pSigString;
+
+ LPCUTF8 pNamespace;
+ LPCUTF8 pClassName = pMD->GetMethodTable()->GetFullyQualifiedNameInfo(&pNamespace);
+
+ if (pClassName == NULL)
+ return false;
+
+ if (*pNamespace != 0)
+ {
+ if(FAILED(StringCchPrintfA(szBuffer, iBuffer, "%s.%s.%s", pNamespace, pClassName, pSigString)))
+ return false;
+ }
+ else
+ {
+ if(FAILED(StringCchPrintfA(szBuffer, iBuffer, "%s.%s", pClassName, pSigString)))
+ return false;
+ }
+
+ _ASSERTE(szBuffer[0] != '\0');
+
+ return true;
+}
+
+bool __stdcall Nirvana_PrintMethodDesc(__in_ecount(iBuffer) char * szBuffer, size_t iBuffer, MethodDesc * pMD)
+{
+ bool fResult = false;
+
+ EX_TRY
+ {
+ NewHolder<SigFormat> pSig = new SigFormat(pMD, NULL, false);
+ fResult = Nirvana_PrintMethodDescWorker(szBuffer, iBuffer, pMD, pSig->GetCString());
+ }
+ EX_CATCH
+ {
+ fResult = false;
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ return fResult;
+};
+
+
+// Nirvana_Dummy() is a dummy function that is exported privately by ordinal only.
+// The sole purpose of this function is to reference Nirvana_FindMethodDesc(),
+// Nirvana_GetMethodInfo(), and Nirvana_PrintMethodDesc() so that they are not
+// inlined or removed by the compiler or the linker.
+
+DWORD __stdcall Nirvana_Dummy()
+{
+ LIMITED_METHOD_CONTRACT;
+ void * funcs[] = {
+ (void*)Nirvana_FindMethodDesc,
+ (void*)Nirvana_GetMethodInfo,
+ (void*)Nirvana_PrintMethodDesc
+ };
+
+ size_t n = sizeof(funcs) / sizeof(funcs[0]);
+
+ size_t sum = 0;
+ for (size_t i = 0; i < n; ++i)
+ sum += (size_t)funcs[i];
+
+ return (DWORD)sum;
+}
+
+
+#endif // #ifndef DACCESS_COMPILE
+
+
+#ifdef FEATURE_PREJIT
+
+MethodIterator::MethodIterator(PTR_Module pModule, MethodIteratorOptions mio)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ Init(pModule, pModule->GetNativeImage(), mio);
+}
+
+MethodIterator::MethodIterator(PTR_Module pModule, PEDecoder * pPEDecoder, MethodIteratorOptions mio)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ Init(pModule, pPEDecoder, mio);
+}
+
+void MethodIterator::Init(PTR_Module pModule, PEDecoder * pPEDecoder, MethodIteratorOptions mio)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ m_ModuleBase = dac_cast<TADDR>(pPEDecoder->GetBase());
+
+ methodIteratorOptions = mio;
+
+ m_pNgenLayout = pModule->GetNGenLayoutInfo();
+
+ m_fHotMethodsDone = FALSE;
+ m_CurrentRuntimeFunctionIndex = -1;
+ m_CurrentColdRuntimeFunctionIndex = 0;
+}
+
+BOOL MethodIterator::Next()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ m_CurrentRuntimeFunctionIndex ++;
+
+ if (!m_fHotMethodsDone)
+ {
+ //iterate the hot methods
+ if (methodIteratorOptions & Hot)
+ {
+#ifdef WIN64EXCEPTIONS
+ //Skip to the next method.
+ // skip over method fragments and funclets.
+ while (m_CurrentRuntimeFunctionIndex < m_pNgenLayout->m_nRuntimeFunctions[0])
+ {
+ if (m_pNgenLayout->m_MethodDescs[0][m_CurrentRuntimeFunctionIndex] != 0)
+ return TRUE;
+ m_CurrentRuntimeFunctionIndex++;
+ }
+#else
+ if (m_CurrentRuntimeFunctionIndex < m_pNgenLayout->m_nRuntimeFunctions[0])
+ return TRUE;
+#endif
+ }
+ m_CurrentRuntimeFunctionIndex = 0;
+ m_fHotMethodsDone = TRUE;
+ }
+
+ if (methodIteratorOptions & Unprofiled)
+ {
+#ifdef WIN64EXCEPTIONS
+ //Skip to the next method.
+ // skip over method fragments and funclets.
+ while (m_CurrentRuntimeFunctionIndex < m_pNgenLayout->m_nRuntimeFunctions[1])
+ {
+ if (m_pNgenLayout->m_MethodDescs[1][m_CurrentRuntimeFunctionIndex] != 0)
+ return TRUE;
+ m_CurrentRuntimeFunctionIndex++;
+ }
+#else
+ if (m_CurrentRuntimeFunctionIndex < m_pNgenLayout->m_nRuntimeFunctions[1])
+ return TRUE;
+#endif
+ }
+
+ return FALSE;
+}
+
+PTR_MethodDesc MethodIterator::GetMethodDesc()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return NativeUnwindInfoLookupTable::GetMethodDesc(m_pNgenLayout, GetRuntimeFunction(), m_ModuleBase);
+}
+
+PTR_VOID MethodIterator::GetGCInfo()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // get the gc info from the RT function
+ SIZE_T size;
+ PTR_VOID pUnwindData = GetUnwindDataBlob(m_ModuleBase, GetRuntimeFunction(), &size);
+ return (PTR_VOID)((PTR_BYTE)pUnwindData + size);
+}
+
+TADDR MethodIterator::GetMethodStartAddress()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_ModuleBase + RUNTIME_FUNCTION__BeginAddress(GetRuntimeFunction());
+}
+
+TADDR MethodIterator::GetMethodColdStartAddress()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ PTR_RUNTIME_FUNCTION CurrentFunctionEntry = GetRuntimeFunction();
+
+ //
+ // Catch up with hot code
+ //
+ for ( ; m_CurrentColdRuntimeFunctionIndex < m_pNgenLayout->m_nRuntimeFunctions[2]; m_CurrentColdRuntimeFunctionIndex++)
+ {
+ PTR_RUNTIME_FUNCTION ColdFunctionEntry = m_pNgenLayout->m_pRuntimeFunctions[2] + m_CurrentColdRuntimeFunctionIndex;
+
+ PTR_RUNTIME_FUNCTION FunctionEntry;
+
+#ifdef WIN64EXCEPTIONS
+ DWORD MainFunctionEntryRVA = m_pNgenLayout->m_ColdCodeMap[m_CurrentColdRuntimeFunctionIndex].mainFunctionEntryRVA;
+
+ if (MainFunctionEntryRVA == 0)
+ continue;
+
+ FunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(m_ModuleBase + MainFunctionEntryRVA);
+#else
+ DWORD ColdUnwindData = ColdFunctionEntry->UnwindData;
+ _ASSERTE((ColdUnwindData & RUNTIME_FUNCTION_INDIRECT) != 0);
+ FunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(m_ModuleBase + (ColdUnwindData & ~RUNTIME_FUNCTION_INDIRECT));
+#endif
+
+ if (CurrentFunctionEntry == FunctionEntry)
+ {
+ // we found a match
+ return m_ModuleBase + RUNTIME_FUNCTION__BeginAddress(ColdFunctionEntry);
+ }
+ else
+ if (CurrentFunctionEntry < FunctionEntry)
+ {
+ // method does not have cold code
+ return NULL;
+ }
+ }
+
+ return NULL;
+}
+
+PTR_RUNTIME_FUNCTION MethodIterator::GetRuntimeFunction()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(m_CurrentRuntimeFunctionIndex >= 0);
+ _ASSERTE(m_CurrentRuntimeFunctionIndex < (m_fHotMethodsDone ? m_pNgenLayout->m_nRuntimeFunctions[1] : m_pNgenLayout->m_nRuntimeFunctions[0]));
+ return (m_fHotMethodsDone ? m_pNgenLayout->m_pRuntimeFunctions[1] : m_pNgenLayout->m_pRuntimeFunctions[0]) + m_CurrentRuntimeFunctionIndex;
+}
+
+ULONG MethodIterator::GetHotCodeSize()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(GetMethodColdStartAddress() != NULL);
+ return m_pNgenLayout->m_ColdCodeMap[m_CurrentColdRuntimeFunctionIndex].hotCodeSize;
+}
+
+void MethodIterator::GetMethodRegionInfo(IJitManager::MethodRegionInfo *methodRegionInfo)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ methodRegionInfo->hotStartAddress = GetMethodStartAddress();
+ methodRegionInfo->coldStartAddress = GetMethodColdStartAddress();
+
+ methodRegionInfo->hotSize = ExecutionManager::GetNativeImageJitManager()->GetCodeManager()->GetFunctionSize(GetGCInfo());
+ methodRegionInfo->coldSize = 0;
+
+ if (methodRegionInfo->coldStartAddress != NULL)
+ {
+ //
+ // At this point methodRegionInfo->hotSize is set to the total size of
+ // the method obtained from the GC info (we set that in the init code above).
+ // Use that and pCMH->hotCodeSize to compute the hot and cold code sizes.
+ //
+
+ ULONG hotCodeSize = GetHotCodeSize();
+
+ methodRegionInfo->coldSize = methodRegionInfo->hotSize - hotCodeSize;
+ methodRegionInfo->hotSize = hotCodeSize;
+ }
+}
+
+#endif // FEATURE_PREJIT
+
+
+
+#ifdef FEATURE_READYTORUN
+
+//***************************************************************************************
+//***************************************************************************************
+
+#ifndef DACCESS_COMPILE
+
+ReadyToRunJitManager::ReadyToRunJitManager()
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+ReadyToRunInfo * ReadyToRunJitManager::JitTokenToReadyToRunInfo(const METHODTOKEN& MethodToken)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ return dac_cast<PTR_Module>(MethodToken.m_pRangeSection->pHeapListOrZapModule)->GetReadyToRunInfo();
+}
+
+PTR_RUNTIME_FUNCTION ReadyToRunJitManager::JitTokenToRuntimeFunction(const METHODTOKEN& MethodToken)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ return dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader);
+}
+
+TADDR ReadyToRunJitManager::JitTokenToStartAddress(const METHODTOKEN& MethodToken)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ return JitTokenToModuleBase(MethodToken) +
+ RUNTIME_FUNCTION__BeginAddress(dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader));
+}
+
+PTR_VOID ReadyToRunJitManager::GetGCInfo(const METHODTOKEN& MethodToken)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ PTR_RUNTIME_FUNCTION pRuntimeFunction = JitTokenToRuntimeFunction(MethodToken);
+ TADDR baseAddress = JitTokenToModuleBase(MethodToken);
+
+#ifndef DACCESS_COMPILE
+ if (g_IBCLogger.InstrEnabled())
+ {
+ ReadyToRunInfo * pInfo = JitTokenToReadyToRunInfo(MethodToken);
+ MethodDesc * pMD = pInfo->GetMethodDescForEntryPoint(JitTokenToStartAddress(MethodToken));
+ g_IBCLogger.LogMethodGCInfoAccess(pMD);
+ }
+#endif
+
+ SIZE_T nUnwindDataSize;
+ PTR_VOID pUnwindData = GetUnwindDataBlob(baseAddress, pRuntimeFunction, &nUnwindDataSize);
+
+ // GCInfo immediatelly follows unwind data
+ return dac_cast<PTR_BYTE>(pUnwindData) + nUnwindDataSize;
+}
+
+unsigned ReadyToRunJitManager::InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ ReadyToRunInfo * pReadyToRunInfo = JitTokenToReadyToRunInfo(MethodToken);
+
+ IMAGE_DATA_DIRECTORY * pExceptionInfoDir = pReadyToRunInfo->FindSection(READYTORUN_SECTION_EXCEPTION_INFO);
+ if (pExceptionInfoDir == NULL)
+ return 0;
+
+ PEImageLayout * pLayout = pReadyToRunInfo->GetImage();
+
+ PTR_CORCOMPILE_EXCEPTION_LOOKUP_TABLE pExceptionLookupTable = dac_cast<PTR_CORCOMPILE_EXCEPTION_LOOKUP_TABLE>(pLayout->GetRvaData(pExceptionInfoDir->VirtualAddress));
+
+ COUNT_T numLookupTableEntries = (COUNT_T)(pExceptionInfoDir->Size / sizeof(CORCOMPILE_EXCEPTION_LOOKUP_TABLE_ENTRY));
+ // at least 2 entries (1 valid entry + 1 sentinal entry)
+ _ASSERTE(numLookupTableEntries >= 2);
+
+ DWORD methodStartRVA = (DWORD)(JitTokenToStartAddress(MethodToken) - JitTokenToModuleBase(MethodToken));
+
+ COUNT_T ehInfoSize = 0;
+ DWORD exceptionInfoRVA = NativeExceptionInfoLookupTable::LookupExceptionInfoRVAForMethod(pExceptionLookupTable,
+ numLookupTableEntries,
+ methodStartRVA,
+ &ehInfoSize);
+ if (exceptionInfoRVA == 0)
+ return 0;
+
+ pEnumState->iCurrentPos = 0;
+ pEnumState->pExceptionClauseArray = JitTokenToModuleBase(MethodToken) + exceptionInfoRVA;
+
+ return ehInfoSize / sizeof(CORCOMPILE_EXCEPTION_CLAUSE);
+}
+
+PTR_EXCEPTION_CLAUSE_TOKEN ReadyToRunJitManager::GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState,
+ EE_ILEXCEPTION_CLAUSE* pEHClauseOut)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ unsigned iCurrentPos = pEnumState->iCurrentPos;
+ pEnumState->iCurrentPos++;
+
+ CORCOMPILE_EXCEPTION_CLAUSE* pClause = &(dac_cast<PTR_CORCOMPILE_EXCEPTION_CLAUSE>(pEnumState->pExceptionClauseArray)[iCurrentPos]);
+
+ // copy to the input parmeter, this is a nice abstraction for the future
+ // if we want to compress the Clause encoding, we can do without affecting the call sites
+ pEHClauseOut->TryStartPC = pClause->TryStartPC;
+ pEHClauseOut->TryEndPC = pClause->TryEndPC;
+ pEHClauseOut->HandlerStartPC = pClause->HandlerStartPC;
+ pEHClauseOut->HandlerEndPC = pClause->HandlerEndPC;
+ pEHClauseOut->Flags = pClause->Flags;
+ pEHClauseOut->FilterOffset = pClause->FilterOffset;
+
+ return dac_cast<PTR_EXCEPTION_CLAUSE_TOKEN>(pClause);
+}
+
+StubCodeBlockKind ReadyToRunJitManager::GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ DWORD rva = (DWORD)(currentPC - pRangeSection->LowAddress);
+
+ ReadyToRunInfo * pReadyToRunInfo = dac_cast<PTR_Module>(pRangeSection->pHeapListOrZapModule)->GetReadyToRunInfo();
+
+ IMAGE_DATA_DIRECTORY * pDelayLoadMethodCallThunksDir = pReadyToRunInfo->FindSection(READYTORUN_SECTION_DELAYLOAD_METHODCALL_THUNKS);
+ if (pDelayLoadMethodCallThunksDir != NULL)
+ {
+ if (pDelayLoadMethodCallThunksDir->VirtualAddress <= rva
+ && rva < pDelayLoadMethodCallThunksDir->VirtualAddress + pDelayLoadMethodCallThunksDir->Size)
+ return STUB_CODE_BLOCK_METHOD_CALL_THUNK;
+ }
+
+ return STUB_CODE_BLOCK_UNKNOWN;
+}
+
+#ifndef DACCESS_COMPILE
+
+TypeHandle ReadyToRunJitManager::ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause,
+ CrawlFrame* pCf)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ _ASSERTE(NULL != pCf);
+ _ASSERTE(NULL != pEHClause);
+ _ASSERTE(IsTypedHandler(pEHClause));
+
+ MethodDesc *pMD = PTR_MethodDesc(pCf->GetFunction());
+
+ _ASSERTE(pMD != NULL);
+
+ Module* pModule = pMD->GetModule();
+ PREFIX_ASSUME(pModule != NULL);
+
+ SigTypeContext typeContext(pMD);
+ VarKind k = hasNoVars;
+
+ mdToken typeTok = pEHClause->ClassToken;
+
+ // In the vast majority of cases the code un der the "if" below
+ // will not be executed.
+ //
+ // First grab the representative instantiations. For code
+ // shared by multiple generic instantiations these are the
+ // canonical (representative) instantiation.
+ if (TypeFromToken(typeTok) == mdtTypeSpec)
+ {
+ PCCOR_SIGNATURE pSig;
+ ULONG cSig;
+ IfFailThrow(pModule->GetMDImport()->GetTypeSpecFromToken(typeTok, &pSig, &cSig));
+
+ SigPointer psig(pSig, cSig);
+ k = psig.IsPolyType(&typeContext);
+
+ // Grab the active class and method instantiation. This exact instantiation is only
+ // needed in the corner case of "generic" exception catching in shared
+ // generic code. We don't need the exact instantiation if the token
+ // doesn't contain E_T_VAR or E_T_MVAR.
+ if ((k & hasSharableVarsMask) != 0)
+ {
+ Instantiation classInst;
+ Instantiation methodInst;
+ pCf->GetExactGenericInstantiations(&classInst,&methodInst);
+ SigTypeContext::InitTypeContext(pMD,classInst, methodInst,&typeContext);
+ }
+ }
+
+ return ClassLoader::LoadTypeDefOrRefOrSpecThrowing(pModule, typeTok, &typeContext,
+ ClassLoader::ReturnNullIfNotFound);
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+//-----------------------------------------------------------------------------
+// Ngen info manager
+//-----------------------------------------------------------------------------
+BOOL ReadyToRunJitManager::GetBoundariesAndVars(
+ const DebugInfoRequest & request,
+ IN FP_IDS_NEW fpNew, IN void * pNewData,
+ OUT ULONG32 * pcMap,
+ OUT ICorDebugInfo::OffsetMapping **ppMap,
+ OUT ULONG32 * pcVars,
+ OUT ICorDebugInfo::NativeVarInfo **ppVars)
+{
+ CONTRACTL {
+ THROWS; // on OOM.
+ GC_NOTRIGGER; // getting vars shouldn't trigger
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ EECodeInfo codeInfo(request.GetStartAddress());
+ if (!codeInfo.IsValid())
+ return FALSE;
+
+ ReadyToRunInfo * pReadyToRunInfo = JitTokenToReadyToRunInfo(codeInfo.GetMethodToken());
+ PTR_RUNTIME_FUNCTION pRuntimeFunction = JitTokenToRuntimeFunction(codeInfo.GetMethodToken());
+
+ PTR_BYTE pDebugInfo = pReadyToRunInfo->GetDebugInfo(pRuntimeFunction);
+ if (pDebugInfo == NULL)
+ return FALSE;
+
+ // Uncompress. This allocates memory and may throw.
+ CompressDebugInfo::RestoreBoundariesAndVars(
+ fpNew, pNewData, // allocators
+ pDebugInfo, // input
+ pcMap, ppMap,
+ pcVars, ppVars); // output
+
+ return TRUE;
+}
+
+#ifdef DACCESS_COMPILE
+//
+// Need to write out debug info
+//
+void ReadyToRunJitManager::EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD)
+{
+ SUPPORTS_DAC;
+
+ EECodeInfo codeInfo(pMD->GetNativeCode());
+ if (!codeInfo.IsValid())
+ return;
+
+ ReadyToRunInfo * pReadyToRunInfo = JitTokenToReadyToRunInfo(codeInfo.GetMethodToken());
+ PTR_RUNTIME_FUNCTION pRuntimeFunction = JitTokenToRuntimeFunction(codeInfo.GetMethodToken());
+
+ PTR_BYTE pDebugInfo = pReadyToRunInfo->GetDebugInfo(pRuntimeFunction);
+ if (pDebugInfo == NULL)
+ return;
+
+ CompressDebugInfo::EnumMemoryRegions(flags, pDebugInfo);
+}
+#endif
+
+PCODE ReadyToRunJitManager::GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset)
+{
+ WRAPPER_NO_CONTRACT;
+
+ MethodRegionInfo methodRegionInfo;
+ JitTokenToMethodRegionInfo(MethodToken, &methodRegionInfo);
+
+ if (relOffset < methodRegionInfo.hotSize)
+ return methodRegionInfo.hotStartAddress + relOffset;
+
+ SIZE_T coldOffset = relOffset - methodRegionInfo.hotSize;
+ _ASSERTE(coldOffset < methodRegionInfo.coldSize);
+ return methodRegionInfo.coldStartAddress + coldOffset;
+}
+
+BOOL ReadyToRunJitManager::JitCodeToMethodInfo(RangeSection * pRangeSection,
+ PCODE currentPC,
+ MethodDesc** ppMethodDesc,
+ OUT EECodeInfo * pCodeInfo)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ // READYTORUN: FUTURE: Hot-cold spliting
+
+ TADDR currentInstr = PCODEToPINSTR(currentPC);
+
+ TADDR ImageBase = pRangeSection->LowAddress;
+
+ DWORD RelativePc = (DWORD)(currentInstr - ImageBase);
+
+ Module * pModule = dac_cast<PTR_Module>(pRangeSection->pHeapListOrZapModule);
+ ReadyToRunInfo * pInfo = pModule->GetReadyToRunInfo();
+
+ COUNT_T nRuntimeFunctions = pInfo->m_nRuntimeFunctions;
+ PTR_RUNTIME_FUNCTION pRuntimeFunctions = pInfo->m_pRuntimeFunctions;
+
+ int MethodIndex = NativeUnwindInfoLookupTable::LookupUnwindInfoForMethod(RelativePc,
+ pRuntimeFunctions,
+ 0,
+ nRuntimeFunctions - 1);
+
+ if (MethodIndex < 0)
+ return FALSE;
+
+#ifdef WIN64EXCEPTIONS
+ // Save the raw entry
+ PTR_RUNTIME_FUNCTION RawFunctionEntry = pRuntimeFunctions + MethodIndex;
+
+ MethodDesc *pMethodDesc;
+ while ((pMethodDesc = pInfo->GetMethodDescForEntryPoint(ImageBase + RUNTIME_FUNCTION__BeginAddress(pRuntimeFunctions + MethodIndex))) == NULL)
+ MethodIndex--;
+#endif
+
+ PTR_RUNTIME_FUNCTION FunctionEntry = pRuntimeFunctions + MethodIndex;
+
+ if (ppMethodDesc)
+ {
+#ifdef WIN64EXCEPTIONS
+ *ppMethodDesc = pMethodDesc;
+#else
+ *ppMethodDesc = pInfo->GetMethodDescForEntryPoint(ImageBase + RUNTIME_FUNCTION__BeginAddress(FunctionEntry));
+#endif
+ _ASSERTE(*ppMethodDesc != NULL);
+ }
+
+ if (pCodeInfo)
+ {
+ pCodeInfo->m_relOffset = (DWORD)
+ (RelativePc - RUNTIME_FUNCTION__BeginAddress(FunctionEntry));
+
+ // We are using RUNTIME_FUNCTION as METHODTOKEN
+ pCodeInfo->m_methodToken = METHODTOKEN(pRangeSection, dac_cast<TADDR>(FunctionEntry));
+
+#ifdef WIN64EXCEPTIONS
+ AMD64_ONLY(_ASSERTE((RawFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) == 0));
+ pCodeInfo->m_pFunctionEntry = RawFunctionEntry;
+#endif
+ }
+
+ return TRUE;
+}
+
+#if defined(WIN64EXCEPTIONS)
+PTR_RUNTIME_FUNCTION ReadyToRunJitManager::LazyGetFunctionEntry(EECodeInfo * pCodeInfo)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ if (!pCodeInfo->IsValid())
+ {
+ return NULL;
+ }
+
+ // code:ReadyToRunJitManager::JitCodeToMethodInfo computes PTR_RUNTIME_FUNCTION eagerly. This path is only
+ // reachable via EECodeInfo::GetMainFunctionInfo, and so we can just return the main entry.
+ _ASSERTE(pCodeInfo->GetRelOffset() == 0);
+
+ return dac_cast<PTR_RUNTIME_FUNCTION>(pCodeInfo->GetMethodToken().m_pCodeHeader);
+}
+
+TADDR ReadyToRunJitManager::GetFuncletStartAddress(EECodeInfo * pCodeInfo)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // READYTORUN: FUTURE: Hot-cold spliting
+
+ return IJitManager::GetFuncletStartAddress(pCodeInfo);
+}
+
+DWORD ReadyToRunJitManager::GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength)
+{
+ PTR_RUNTIME_FUNCTION pFirstFuncletFunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader) + 1;
+
+ TADDR moduleBase = JitTokenToModuleBase(MethodToken);
+ DWORD nFunclets = 0;
+ MethodRegionInfo regionInfo;
+ JitTokenToMethodRegionInfo(MethodToken, &regionInfo);
+
+ // pFirstFuncletFunctionEntry will work for ARM when passed to GetFuncletStartOffsetsHelper()
+ // even if it is a fragment of the main body and not a RUNTIME_FUNCTION for the beginning
+ // of the first hot funclet, because GetFuncletStartOffsetsHelper() will skip all the function
+ // fragments until the first funclet, if any, is found.
+
+ GetFuncletStartOffsetsHelper(regionInfo.hotStartAddress, regionInfo.hotSize, 0,
+ pFirstFuncletFunctionEntry, moduleBase,
+ &nFunclets, pStartFuncletOffsets, dwLength);
+
+ // READYTORUN: FUTURE: Hot/cold splitting
+
+ return nFunclets;
+}
+
+BOOL ReadyToRunJitManager::IsFilterFunclet(EECodeInfo * pCodeInfo)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!pCodeInfo->IsFunclet())
+ return FALSE;
+
+ // Get address of the personality routine for the function being queried.
+ SIZE_T size;
+ PTR_VOID pUnwindData = GetUnwindDataBlob(pCodeInfo->GetModuleBase(), pCodeInfo->GetFunctionEntry(), &size);
+ _ASSERTE(pUnwindData != NULL);
+
+ // Personality routine is always the last element of the unwind data
+ DWORD rvaPersonalityRoutine = *(dac_cast<PTR_DWORD>(dac_cast<TADDR>(pUnwindData) + size) - 1);
+
+ // Get the personality routine for the first function in the module, which is guaranteed to be not a funclet.
+ ReadyToRunInfo * pInfo = JitTokenToReadyToRunInfo(pCodeInfo->GetMethodToken());
+ if (pInfo->m_nRuntimeFunctions == 0)
+ return FALSE;
+
+ PTR_VOID pFirstUnwindData = GetUnwindDataBlob(pCodeInfo->GetModuleBase(), pInfo->m_pRuntimeFunctions, &size);
+ _ASSERTE(pFirstUnwindData != NULL);
+ DWORD rvaFirstPersonalityRoutine = *(dac_cast<PTR_DWORD>(dac_cast<TADDR>(pFirstUnwindData) + size) - 1);
+
+ // Compare the two personality routines. If they are different, then the current function is a filter funclet.
+ BOOL fRet = (rvaPersonalityRoutine != rvaFirstPersonalityRoutine);
+
+ // Verify that the optimized implementation is in sync with the slow implementation
+ _ASSERTE(fRet == IJitManager::IsFilterFunclet(pCodeInfo));
+
+ return fRet;
+}
+
+#endif // WIN64EXCEPTIONS
+
+void ReadyToRunJitManager::JitTokenToMethodRegionInfo(const METHODTOKEN& MethodToken,
+ MethodRegionInfo * methodRegionInfo)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ PRECONDITION(methodRegionInfo != NULL);
+ } CONTRACTL_END;
+
+ // READYTORUN: FUTURE: Hot-cold spliting
+
+ methodRegionInfo->hotStartAddress = JitTokenToStartAddress(MethodToken);
+ methodRegionInfo->hotSize = GetCodeManager()->GetFunctionSize(GetGCInfo(MethodToken));
+ methodRegionInfo->coldStartAddress = 0;
+ methodRegionInfo->coldSize = 0;
+}
+
+#ifdef DACCESS_COMPILE
+
+void ReadyToRunJitManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ IJitManager::EnumMemoryRegions(flags);
+}
+
+#if defined(WIN64EXCEPTIONS)
+
+//
+// EnumMemoryRegionsForMethodUnwindInfo - enumerate the memory necessary to read the unwind info for the
+// specified method.
+//
+// Note that in theory, a dump generation library could save the unwind information itself without help
+// from us, since it's stored in the image in the standard function table layout for Win64. However,
+// dump-generation libraries assume that the image will be available at debug time, and if the image
+// isn't available then it is acceptable for stackwalking to break. For ngen images (which are created
+// on the client), it usually isn't possible to have the image available at debug time, and so for minidumps
+// we must explicitly ensure the unwind information is saved into the dump.
+//
+// Arguments:
+// flags - EnumMem flags
+// pMD - MethodDesc for the method in question
+//
+void ReadyToRunJitManager::EnumMemoryRegionsForMethodUnwindInfo(CLRDataEnumMemoryFlags flags, EECodeInfo * pCodeInfo)
+{
+ // Get the RUNTIME_FUNCTION entry for this method
+ PTR_RUNTIME_FUNCTION pRtf = pCodeInfo->GetFunctionEntry();
+
+ if (pRtf==NULL)
+ {
+ return;
+ }
+
+ // Enumerate the function entry and other entries needed to locate it in the program exceptions directory
+ ReadyToRunInfo * pReadyToRunInfo = JitTokenToReadyToRunInfo(pCodeInfo->GetMethodToken());
+ EnumRuntimeFunctionEntriesToFindEntry(pRtf, pReadyToRunInfo->GetImage());
+
+ SIZE_T size;
+ PTR_VOID pUnwindData = GetUnwindDataBlob(pCodeInfo->GetModuleBase(), pRtf, &size);
+ if (pUnwindData != NULL)
+ DacEnumMemoryRegion(PTR_TO_TADDR(pUnwindData), size);
+}
+
+#endif //WIN64EXCEPTIONS
+#endif // #ifdef DACCESS_COMPILE
+
+#endif
diff --git a/src/vm/codeman.h b/src/vm/codeman.h
new file mode 100644
index 0000000000..8dc1d0992a
--- /dev/null
+++ b/src/vm/codeman.h
@@ -0,0 +1,1830 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+
+/******************************************************************************
+
+Module Name:
+
+ codeman.h
+
+Abstract:
+
+ Wrapper to facilitate multiple JITcompiler support in the COM+ Runtime
+
+ The ExecutionManager is responsible for managing the RangeSections.
+ Given an IP, it can find the RangeSection which holds that IP.
+
+ RangeSections contain the JITed codes. Each RangeSection knows the
+ IJitManager which created it.
+
+ An IJitManager knows about which method bodies live in each RangeSection.
+ It can handle methods of one given CodeType. It can map a method body to
+ a MethodDesc. It knows where the GCInfo about the method lives.
+ Today, we have 2 IJitManagers viz.
+ 1. EEJitManager for JITcompiled code generated by clrjit.dll
+ 2. NativeImageJitManager for ngenned code.
+
+ An ICodeManager knows how to crack a specific format of GCInfo. There is
+ a default format (handled by ExecutionManager::GetDefaultCodeManager())
+ which can be shared by different IJitManagers/IJitCompilers.
+
+ An ICorJitCompiler knows how to generate code for a method IL, and produce
+ GCInfo in a format which the corresponding IJitManager's ICodeManager
+ can handle.
+
+ ExecutionManager
+ |
+ +-----------+---------------+---------------+-----------+--- ...
+ | | | |
+ CodeType | CodeType |
+ | | | |
+ v v v v
++---------------+ +--------+<---- R +---------------+ +--------+<---- R
+|ICorJitCompiler|<---->|IJitMan |<---- R |ICorJitCompiler|<---->|IJitMan |<---- R
++---------------+ +--------+<---- R +---------------+ +--------+<---- R
+ | x . | x .
+ | \ . | \ .
+ v \ . v \ .
+ +--------+ R +--------+ R
+ |ICodeMan| |ICodeMan| (RangeSections)
+ +--------+ +--------+
+
+******************************************************************************/
+
+#ifndef __CODEMAN_HPP__
+
+#define __CODEMAN_HPP__
+
+#include "crst.h"
+#include "eetwain.h"
+#include "ceeload.h"
+#include "jitinterface.h"
+#include "debuginfostore.h"
+#include "shash.h"
+#include "pedecoder.h"
+
+class MethodDesc;
+class ICorJitCompiler;
+class IJitManager;
+class EEJitManager;
+class NativeImageJitManager;
+class ReadyToRunJitManager;
+class ExecutionManager;
+class Thread;
+class CrawlFrame;
+struct EE_ILEXCEPTION;
+struct EE_ILEXCEPTION_CLAUSE;
+typedef struct
+{
+ unsigned iCurrentPos;
+ TADDR pExceptionClauseArray;
+} EH_CLAUSE_ENUMERATOR;
+class EECodeInfo;
+
+#define PAGE_MASK (PAGE_SIZE-1)
+#define PAGE_ALIGN ~(PAGE_MASK)
+#define ROUND_DOWN_TO_PAGE(x) ( (size_t) (x) & PAGE_ALIGN)
+#define ROUND_UP_TO_PAGE(x) (((size_t) (x) + PAGE_MASK) & PAGE_ALIGN)
+
+enum StubCodeBlockKind : int
+{
+ STUB_CODE_BLOCK_UNKNOWN,
+ STUB_CODE_BLOCK_JUMPSTUB,
+ STUB_CODE_BLOCK_PRECODE,
+ STUB_CODE_BLOCK_DYNAMICHELPER,
+ // Last valid value. Note that the definition is duplicated in debug\daccess\fntableaccess.cpp
+ STUB_CODE_BLOCK_LAST = 0xF,
+ // Placeholders returned by code:GetStubCodeBlockKind
+ STUB_CODE_BLOCK_NOCODE,
+ STUB_CODE_BLOCK_MANAGED,
+ STUB_CODE_BLOCK_STUBLINK,
+ // Placeholdes used by NGen images
+ STUB_CODE_BLOCK_VIRTUAL_METHOD_THUNK,
+ STUB_CODE_BLOCK_EXTERNAL_METHOD_THUNK,
+ // Placeholdes used by ReadyToRun images
+ STUB_CODE_BLOCK_METHOD_CALL_THUNK,
+};
+
+//-----------------------------------------------------------------------------
+// Method header which exists just before the code.
+// Every IJitManager could have its own format for the header.
+// Today CodeHeader is used by the EEJitManager.
+
+#ifdef USE_INDIRECT_CODEHEADER
+typedef DPTR(struct _hpRealCodeHdr) PTR_RealCodeHeader;
+typedef DPTR(struct _hpCodeHdr) PTR_CodeHeader;
+
+#else // USE_INDIRECT_CODEHEADER
+typedef DPTR(struct _hpCodeHdr) PTR_CodeHeader;
+
+#endif // USE_INDIRECT_CODEHEADER
+
+#ifdef USE_INDIRECT_CODEHEADER
+typedef struct _hpRealCodeHdr
+#else // USE_INDIRECT_CODEHEADER
+typedef struct _hpCodeHdr
+#endif // USE_INDIRECT_CODEHEADER
+{
+public:
+ PTR_BYTE phdrDebugInfo;
+
+ // Note - *(&(pCodeHeader->phdrJitEHInfo) - sizeof(size_t))
+ // contains the number of EH clauses, See EEJitManager::allocEHInfo
+ PTR_EE_ILEXCEPTION phdrJitEHInfo;
+ PTR_BYTE phdrJitGCInfo;
+
+ PTR_MethodDesc phdrMDesc;
+
+#ifdef WIN64EXCEPTIONS
+ DWORD nUnwindInfos;
+ RUNTIME_FUNCTION unwindInfos[0];
+#endif // WIN64EXCEPTIONS
+
+public:
+#ifndef USE_INDIRECT_CODEHEADER
+ //
+ // Note: that the JITted code follows immediately after the MethodDesc*
+ //
+ PTR_BYTE GetDebugInfo()
+ {
+ SUPPORTS_DAC;
+
+ return phdrDebugInfo;
+ }
+ PTR_EE_ILEXCEPTION GetEHInfo()
+ {
+ return phdrJitEHInfo;
+ }
+ PTR_BYTE GetGCInfo()
+ {
+ SUPPORTS_DAC;
+ return phdrJitGCInfo;
+ }
+ PTR_MethodDesc GetMethodDesc()
+ {
+ SUPPORTS_DAC;
+ return phdrMDesc;
+ }
+ TADDR GetCodeStartAddress()
+ {
+ SUPPORTS_DAC;
+ return dac_cast<TADDR>(dac_cast<PTR_CodeHeader>(this) + 1);
+ }
+ StubCodeBlockKind GetStubCodeBlockKind()
+ {
+ SUPPORTS_DAC;
+ return (StubCodeBlockKind)dac_cast<TADDR>(phdrMDesc);
+ }
+ BOOL IsStubCodeBlock()
+ {
+ SUPPORTS_DAC;
+ // Note that it is important for this comparison to be unsigned
+ return dac_cast<TADDR>(phdrMDesc) <= (TADDR)STUB_CODE_BLOCK_LAST;
+ }
+
+ void SetDebugInfo(PTR_BYTE pDI)
+ {
+ phdrDebugInfo = pDI;
+ }
+ void SetEHInfo(PTR_EE_ILEXCEPTION pEH)
+ {
+ phdrJitEHInfo = pEH;
+ }
+ void SetGCInfo(PTR_BYTE pGC)
+ {
+ phdrJitGCInfo = pGC;
+ }
+ void SetMethodDesc(PTR_MethodDesc pMD)
+ {
+ phdrMDesc = pMD;
+ }
+ void SetStubCodeBlockKind(StubCodeBlockKind kind)
+ {
+ phdrMDesc = (PTR_MethodDesc)kind;
+ }
+#endif // !USE_INDIRECT_CODEHEADER
+
+// if we're using the indirect codeheaders then all enumeration is done by the code header
+#ifndef USE_INDIRECT_CODEHEADER
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags, IJitManager* pJitMan);
+#endif // DACCESS_COMPILE
+#endif // USE_INDIRECT_CODEHEADER
+#ifdef USE_INDIRECT_CODEHEADER
+} RealCodeHeader;
+#else // USE_INDIRECT_CODEHEADER
+} CodeHeader;
+#endif // USE_INDIRECT_CODEHEADER
+
+#ifdef USE_INDIRECT_CODEHEADER
+typedef struct _hpCodeHdr
+{
+ PTR_RealCodeHeader pRealCodeHeader;
+
+public:
+ PTR_BYTE GetDebugInfo()
+ {
+ SUPPORTS_DAC;
+ return pRealCodeHeader->phdrDebugInfo;
+ }
+ PTR_EE_ILEXCEPTION GetEHInfo()
+ {
+ return pRealCodeHeader->phdrJitEHInfo;
+ }
+ PTR_BYTE GetGCInfo()
+ {
+ SUPPORTS_DAC;
+ return pRealCodeHeader->phdrJitGCInfo;
+ }
+ PTR_MethodDesc GetMethodDesc()
+ {
+ SUPPORTS_DAC;
+ return pRealCodeHeader->phdrMDesc;
+ }
+ TADDR GetCodeStartAddress()
+ {
+ SUPPORTS_DAC;
+ return dac_cast<PCODE>(dac_cast<PTR_CodeHeader>(this) + 1);
+ }
+ StubCodeBlockKind GetStubCodeBlockKind()
+ {
+ SUPPORTS_DAC;
+ return (StubCodeBlockKind)dac_cast<TADDR>(pRealCodeHeader);
+ }
+ BOOL IsStubCodeBlock()
+ {
+ SUPPORTS_DAC;
+ // Note that it is important for this comparison to be unsigned
+ return dac_cast<TADDR>(pRealCodeHeader) <= (TADDR)STUB_CODE_BLOCK_LAST;
+ }
+
+ void SetRealCodeHeader(BYTE* pRCH)
+ {
+ pRealCodeHeader = PTR_RealCodeHeader((RealCodeHeader*)pRCH);
+ }
+
+ void SetDebugInfo(PTR_BYTE pDI)
+ {
+ pRealCodeHeader->phdrDebugInfo = pDI;
+ }
+ void SetEHInfo(PTR_EE_ILEXCEPTION pEH)
+ {
+ pRealCodeHeader->phdrJitEHInfo = pEH;
+ }
+ void SetGCInfo(PTR_BYTE pGC)
+ {
+ pRealCodeHeader->phdrJitGCInfo = pGC;
+ }
+ void SetMethodDesc(PTR_MethodDesc pMD)
+ {
+ pRealCodeHeader->phdrMDesc = pMD;
+ }
+ void SetStubCodeBlockKind(StubCodeBlockKind kind)
+ {
+ pRealCodeHeader = (PTR_RealCodeHeader)kind;
+ }
+
+#if defined(WIN64EXCEPTIONS)
+ UINT GetNumberOfUnwindInfos()
+ {
+ SUPPORTS_DAC;
+ return pRealCodeHeader->nUnwindInfos;
+ }
+ void SetNumberOfUnwindInfos(UINT nUnwindInfos)
+ {
+ LIMITED_METHOD_CONTRACT;
+ pRealCodeHeader->nUnwindInfos = nUnwindInfos;
+ }
+ PTR_RUNTIME_FUNCTION GetUnwindInfo(UINT iUnwindInfo)
+ {
+ SUPPORTS_DAC;
+ _ASSERTE(iUnwindInfo < GetNumberOfUnwindInfos());
+ return dac_cast<PTR_RUNTIME_FUNCTION>(
+ PTR_TO_MEMBER_TADDR(RealCodeHeader, pRealCodeHeader, unwindInfos) + iUnwindInfo * sizeof(RUNTIME_FUNCTION));
+ }
+#endif // WIN64EXCEPTIONS
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags, IJitManager* pJitMan);
+#endif // DACCESS_COMPILE
+
+} CodeHeader;
+#endif // USE_INDIRECT_CODEHEADER
+
+
+//-----------------------------------------------------------------------------
+// This is a structure used to consolidate the information that we
+// need we creating new code heaps.
+// When creating new JumpStubs we have a constarint that the address used
+// should be in the range [loAddr..hiAddr]
+//
+struct CodeHeapRequestInfo
+{
+ MethodDesc * m_pMD;
+ LoaderAllocator* m_pAllocator;
+ const BYTE * m_loAddr; // lowest address to use to satisfy our request (0 -- don't care)
+ const BYTE * m_hiAddr; // hihest address to use to satisfy our request (0 -- don't care)
+ size_t m_requestSize; // minimum size that must be made available
+ size_t m_reserveSize; // Amount that VirtualAlloc will reserved
+ bool m_isDynamicDomain;
+ bool m_isCollectible;
+
+ bool IsDynamicDomain() { return m_isDynamicDomain; }
+ bool IsCollectible() { return m_isCollectible; }
+
+ size_t getRequestSize() { return m_requestSize; }
+ void setRequestSize(size_t requestSize) { m_requestSize = requestSize; }
+
+ size_t getReserveSize() { return m_reserveSize; }
+ void setReserveSize(size_t reserveSize) { m_reserveSize = reserveSize; }
+
+ void Init();
+
+ CodeHeapRequestInfo(MethodDesc *pMD)
+ : m_pMD(pMD), m_pAllocator(0),
+ m_loAddr(0), m_hiAddr(0),
+ m_requestSize(0), m_reserveSize(0)
+ { WRAPPER_NO_CONTRACT; Init(); }
+
+ CodeHeapRequestInfo(MethodDesc *pMD, LoaderAllocator* pAllocator,
+ BYTE * loAddr, BYTE * hiAddr)
+ : m_pMD(pMD), m_pAllocator(pAllocator),
+ m_loAddr(loAddr), m_hiAddr(hiAddr),
+ m_requestSize(0), m_reserveSize(0)
+ { WRAPPER_NO_CONTRACT; Init(); }
+};
+
+//-----------------------------------------------------------------------------
+//
+// A CodeHeap is the abstraction the IJitManager uses to allocate memory
+// needed to the jitting of a method.
+// The CodeHeap works together with the HeapList to manage a contiguous block of memory.
+// The CodeHeap is a non growable chunk of memory (it can be reserved and
+// committed on demand).
+//
+// A CodeHeap is naturally protected from multiple threads by the code heap
+// critical section - m_pCodeHeapCritSec - so if the implementation of the heap
+// is only for the code manager, no locking needs to occur.
+// It's important however that a delete operation on the CodeHeap (if any) happens
+// via EEJitManager::FreeCodeMemory(HostCodeHeap*, void*)
+//
+// The heap to be created depends on the MethodDesc that is being compiled.
+// Standard code uses the LoaderCodeHeap, a heap based on the LoaderHeap.
+// DynamicMethods - and only those - use a HostCodeHeap, a heap that does
+// normal Alloc/Free so reclamation can be performed.
+//
+// The convention is that every heap implementation would have a static create
+// function that returns a HeapList. The HeapList *must* be properly initialized
+// on return except for the next pointer
+//
+
+typedef VPTR(class CodeHeap) PTR_CodeHeap;
+
+class CodeHeap
+{
+ VPTR_BASE_VTABLE_CLASS(CodeHeap)
+
+public:
+
+#ifdef DACCESS_COMPILE
+ CodeHeap() {}
+#endif
+
+ // virtual dtor. Clean up heap
+ virtual ~CodeHeap() {}
+
+ // Alloc the specified numbers of bytes for code. Returns NULL if the request does not fit
+ // Space for header is reserved immediately before. It is not included in size.
+ virtual void* AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment) = 0;
+
+#ifdef DACCESS_COMPILE
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags) = 0;
+#endif
+
+protected:
+ friend class EEJitManager;
+};
+
+//-----------------------------------------------------------------------------
+// The HeapList works together with the CodeHeap to manage a contiguous block of memory.
+//
+// A single HeapList contains code only for a single AppDomain. EEJitManager uses
+// EEJitManager::DomainCodeHeapList to keep a list of HeapLists for each AppDomain.
+
+// The number of code heaps at which we increase the size of new code heaps.
+#define CODE_HEAP_SIZE_INCREASE_THRESHOLD 5
+
+typedef DPTR(struct _HeapList) PTR_HeapList;
+
+typedef struct _HeapList
+{
+ PTR_HeapList hpNext;
+
+ PTR_CodeHeap pHeap;
+
+ TADDR startAddress;
+ TADDR endAddress; // the current end of the used portion of the Heap
+
+ TADDR mapBase; // "startAddress" rounded down to PAGE_SIZE. pHdrMap is relative to this address
+ PTR_DWORD pHdrMap; // bit array used to find the start of methods
+
+ size_t maxCodeHeapSize;// Size of the entire contiguous block of memory
+ DWORD cBlocks; // Number of allocations
+ bool bFull; // Heap is considered full do not use for new allocations
+ bool bFullForJumpStubs; // Heap is considered full do not use for new allocations of jump stubs
+
+#if defined(_TARGET_AMD64_)
+ BYTE CLRPersonalityRoutine[JUMP_ALLOCATE_SIZE]; // jump thunk to personality routine
+#elif defined(_TARGET_ARM64_)
+ UINT32 CLRPersonalityRoutine[JUMP_ALLOCATE_SIZE/sizeof(UINT32)]; // jump thunk to personality routine
+#endif
+
+ PTR_HeapList GetNext()
+ { SUPPORTS_DAC; return hpNext; }
+
+ void SetNext(PTR_HeapList next)
+ { hpNext = next; }
+
+ void SetHeapFull()
+ { VolatileStore(&bFull, true); }
+
+ bool IsHeapFull()
+ { return VolatileLoad(&bFull); }
+
+ void SetHeapFullForJumpStubs()
+ { VolatileStore(&bFullForJumpStubs, true); }
+
+ bool IsHeapFullForJumpStubs()
+ { return VolatileLoad(&bFullForJumpStubs); }
+
+} HeapList;
+
+//-----------------------------------------------------------------------------
+// Implementation of the standard CodeHeap.
+// Use the ExplicitControlLoaderHeap for allocations
+// (Check the base class above - CodeHeap - for comments on the functions)
+//
+typedef VPTR(class LoaderCodeHeap) PTR_LoaderCodeHeap;
+
+class LoaderCodeHeap : CodeHeap
+{
+#ifdef DACCESS_COMPILE
+ friend class ClrDataAccess;
+#endif
+
+ VPTR_VTABLE_CLASS(LoaderCodeHeap, CodeHeap)
+
+private:
+ ExplicitControlLoaderHeap m_LoaderHeap;
+ SSIZE_T m_cbMinNextPad;
+
+ LoaderCodeHeap(size_t * pPrivatePCLBytes);
+
+public:
+ static HeapList* CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap *pJitMetaHeap);
+
+public:
+ virtual ~LoaderCodeHeap()
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ virtual void* AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment) DAC_EMPTY_RET(NULL);
+
+#ifdef DACCESS_COMPILE
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_LoaderHeap.EnumMemoryRegions(flags);
+ }
+#endif
+};
+
+#if defined(_WIN64)
+// On non X86 platforms, the OS defined UnwindInfo (accessed from RUNTIME_FUNCTION
+// structures) to support the ability unwind the stack. Unfortunatey the pre-Win8
+// APIs defined a callback API for publishing this data dynamically that ETW does
+// not use (and really can't because the walk happens in the kernel). In Win8
+// new APIs were defined that allow incremental publishing via a table.
+//
+// UnwindInfoTable is a class that wraps the OS APIs that we use to publish
+// this table. Its job is to allocate the table, deallocate it when we are
+// done and allow us to add new entries one at a time (AddToUnwindInfoTable)
+//
+// Each _rangesection has a UnwindInfoTable's which hold the
+// RUNTIME_FUNCTION array as well as other bookeeping (the current and maximum
+// size of the array, and the handle used to publish it to the OS.
+//
+// Ideally we would just use this new API when it is available, however to mininmize
+// risk and to make the change perfectly pay-for-play, we us the original mechanism
+// ALWAYS, and in addition publish via the Table ONLY WHEN ETW JIT events are turned
+// on.
+//
+// This class implements a 'catchup' routine that allows us to publish existing JITTed
+// methods when ETW turns on. Currently this is 'sticky' (once we start publishing
+// both ways, we do so for the life of the process.
+//
+typedef DPTR(class UnwindInfoTable) PTR_UnwindInfoTable;
+class UnwindInfoTable {
+public:
+ // All public functions are thread-safe.
+
+ // These are wrapper functions over the UnwindInfoTable functions that are specific to JIT compile code
+ static void PublishUnwindInfoForMethod(TADDR baseAddress, RUNTIME_FUNCTION* unwindInfo, int unwindInfoCount);
+ static void UnpublishUnwindInfoForMethod(TADDR entryPoint);
+
+ // These are lower level functions that assume you have found the list of UnwindInfoTable entries
+ // These are used by the stublinker and the high-level method functions above
+ static void AddToUnwindInfoTable(UnwindInfoTable** unwindInfoPtr, RUNTIME_FUNCTION* data, TADDR rangeStart, TADDR rangeEnd);
+ static void RemoveFromUnwindInfoTable(UnwindInfoTable** unwindInfoPtr, TADDR baseAddress, TADDR entryPoint);
+
+ // By default this publishing is off, this routine turns it on (and optionally publishes existing methods)
+ static void PublishUnwindInfo(bool publishExisting);
+ ~UnwindInfoTable();
+
+private:
+ void UnRegister();
+ void Register();
+ UnwindInfoTable(ULONG_PTR rangeStart, ULONG_PTR rangeEnd, ULONG size);
+ static void PublishUnwindInfoForExistingMethods();
+
+private:
+ static Volatile<bool> s_publishingActive; // Publishing to ETW is turned on
+ static class Crst* s_pUnwindInfoTableLock; // lock protects all public UnwindInfoTable functions
+
+ PVOID hHandle; // OS handle for a published RUNTIME_FUNCTION table
+ ULONG_PTR iRangeStart; // Start of memory described by this table
+ ULONG_PTR iRangeEnd; // End of memory described by this table
+ RUNTIME_FUNCTION* pTable; // The actual list of method unwind info, sorted by address
+ ULONG cTableCurCount;
+ ULONG cTableMaxCount;
+ int cDeletedEntries; // Number of slots we removed.
+};
+
+#endif // defined(_WIN64)
+
+//-----------------------------------------------------------------------------
+// The ExecutionManager uses RangeSection as the abstraction of a contiguous
+// address range to track the code heaps.
+
+typedef DPTR(struct RangeSection) PTR_RangeSection;
+
+struct RangeSection
+{
+ TADDR LowAddress;
+ TADDR HighAddress;
+
+ PTR_IJitManager pjit; // The owner of this address range
+
+#ifndef DACCESS_COMPILE
+ // Volatile because of the list can be walked lock-free
+ Volatile<RangeSection *> pnext; // link rangesections in a sorted list
+#else
+ PTR_RangeSection pnext;
+#endif
+
+ PTR_RangeSection pLastUsed; // for the head node only: a link to rangesections that was used most recently
+
+ enum RangeSectionFlags
+ {
+ RANGE_SECTION_NONE = 0x0,
+ RANGE_SECTION_COLLECTIBLE = 0x1,
+ RANGE_SECTION_CODEHEAP = 0x2,
+#ifdef FEATURE_READYTORUN
+ RANGE_SECTION_READYTORUN = 0x4,
+#endif
+ };
+
+ DWORD flags;
+
+ // union
+ // {
+ // PTR_CodeHeap pCodeHeap; // valid if RANGE_SECTION_HEAP is set
+ // PTR_Module pZapModule; // valid if RANGE_SECTION_HEAP is not set
+ // };
+ TADDR pHeapListOrZapModule;
+#if defined(_WIN64)
+ PTR_UnwindInfoTable pUnwindInfoTable; // Points to unwind information for this memory range.
+#endif // defined(_WIN64)
+};
+
+/*****************************************************************************/
+
+#ifdef CROSSGEN_COMPILE
+#define CodeFragmentHeap LoaderHeap
+#else
+
+//
+// A simple linked-list based allocator to expose code heap as loader heap for allocation of precodes.
+// The loader heap like interface is necessary to support backout. It is also conveniently used to reduce space overhead
+// for small blocks that are common for precodes.
+//
+// Allocating precodes on code heap makes them close to other code, it reduces need for jump stubs and thus chance
+// that we run into bogus OOM because of not being able to allocate memory in particular memory range.
+//
+class CodeFragmentHeap : public ILoaderHeapBackout
+{
+ PTR_LoaderAllocator m_pAllocator;
+
+ struct FreeBlock
+ {
+ DPTR(FreeBlock) m_pNext; // Next block
+ SIZE_T m_dwSize; // Size of this block (includes size of FreeBlock)
+ };
+ typedef DPTR(FreeBlock) PTR_FreeBlock;
+
+ PTR_FreeBlock m_pFreeBlocks;
+ StubCodeBlockKind m_kind;
+
+ Crst m_CritSec;
+
+ void AddBlock(VOID * pMem, size_t dwSize);
+ void RemoveBlock(FreeBlock ** ppBlock);
+
+public:
+ CodeFragmentHeap(LoaderAllocator * pAllocator, StubCodeBlockKind kind);
+
+ TaggedMemAllocPtr RealAllocAlignedMem(size_t dwRequestedSize
+ ,unsigned dwAlignment
+#ifdef _DEBUG
+ ,__in __in_z const char *szFile
+ ,int lineNum
+#endif
+ );
+
+ virtual void RealBackoutMem(void *pMem
+ , size_t dwSize
+#ifdef _DEBUG
+ , __in __in_z const char *szFile
+ , int lineNum
+ , __in __in_z const char *szAllocFile
+ , int allocLineNum
+#endif
+ ) DAC_EMPTY();
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(enum CLRDataEnumMemoryFlags flags)
+ {
+ WRAPPER_NO_CONTRACT;
+ DAC_ENUM_DTHIS();
+ }
+#endif
+};
+#endif // CROSSGEN_COMPILE
+
+typedef DPTR(class CodeFragmentHeap) PTR_CodeFragmentHeap;
+
+//-----------------------------------------------------------------------------
+//
+// Manages the CodeHeap for some of the RangeSections in the ExecutionManager
+//
+//-----------------------------------------------------------------------------
+
+class IJitManager
+{
+ VPTR_BASE_VTABLE_CLASS(IJitManager)
+
+public:
+ struct MethodRegionInfo
+ {
+ TADDR hotStartAddress;
+ size_t hotSize;
+ TADDR coldStartAddress;
+ size_t coldSize;
+ };
+
+#ifndef DACCESS_COMPILE
+ IJitManager();
+#endif // !DACCESS_COMPILE
+
+ virtual DWORD GetCodeType() = 0;
+
+ // Used to read debug info.
+ // 1) Caller passes an allocator which these functions use to allocate memory.
+ // This is b/c the store may need to decompress the information just to figure out the size.
+ // 2) Note that these methods use Uncompressed (Normal) jit data.
+ // Compression is just an implementation detail.
+ // 3) These throw on OOM (exceptional case), and may return a
+ // failing HR if no data is available (not exceptional)
+
+ virtual BOOL GetBoundariesAndVars(
+ const DebugInfoRequest & request,
+ IN FP_IDS_NEW fpNew, IN void * pNewData,
+ OUT ULONG32 * pcMap,
+ OUT ICorDebugInfo::OffsetMapping **ppMap,
+ OUT ULONG32 * pcVars,
+ OUT ICorDebugInfo::NativeVarInfo **ppVars) = 0;
+
+ virtual BOOL JitCodeToMethodInfo(
+ RangeSection * pRangeSection,
+ PCODE currentPC,
+ MethodDesc** ppMethodDesc,
+ OUT EECodeInfo * pCodeInfo) = 0;
+
+ virtual PCODE GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset) = 0;
+
+ virtual TADDR JitTokenToStartAddress(const METHODTOKEN& MethodToken)=0;
+ virtual void JitTokenToMethodRegionInfo(const METHODTOKEN& MethodToken, MethodRegionInfo *methodRegionInfo) = 0;
+ virtual unsigned InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState)=0;
+ virtual PTR_EXCEPTION_CLAUSE_TOKEN GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState,
+ EE_ILEXCEPTION_CLAUSE* pEHclause)=0;
+#ifndef DACCESS_COMPILE
+ virtual TypeHandle ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause,
+ CrawlFrame *pCf)=0;
+#endif // #ifndef DACCESS_COMPILE
+
+ virtual PTR_VOID GetGCInfo(const METHODTOKEN& MethodToken)=0;
+
+ TADDR JitTokenToModuleBase(const METHODTOKEN& MethodToken);
+
+#if defined(WIN64EXCEPTIONS)
+ virtual PTR_RUNTIME_FUNCTION LazyGetFunctionEntry(EECodeInfo * pCodeInfo) = 0;
+
+ // GetFuncletStartAddress returns the starting address of the function or funclet indicated by the EECodeInfo address.
+ virtual TADDR GetFuncletStartAddress(EECodeInfo * pCodeInfo);
+
+ virtual DWORD GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength) = 0;
+
+ BOOL IsFunclet(EECodeInfo * pCodeInfo);
+ virtual BOOL IsFilterFunclet(EECodeInfo * pCodeInfo);
+#endif // WIN64EXCEPTIONS
+
+ virtual StubCodeBlockKind GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC) = 0;
+
+ // DAC-specific virtual functions.
+ // Note that these MUST occur below any other virtual function definitions to ensure that the vtable in
+ // DAC builds is compatible with the non-DAC one so that DAC virtual dispatch will work correctly.
+#if defined(DACCESS_COMPILE)
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+ virtual void EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD) = 0;
+#if defined(WIN64EXCEPTIONS)
+ // Enumerate the memory necessary to retrieve the unwind info for a specific method
+ virtual void EnumMemoryRegionsForMethodUnwindInfo(CLRDataEnumMemoryFlags flags, EECodeInfo * pCodeInfo) = 0;
+#endif // WIN64EXCEPTIONS
+#endif // DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+ void SetCodeManager(ICodeManager *codeMgr)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_runtimeSupport = codeMgr;
+ }
+#endif // !DACCESS_COMPILE
+
+ ICodeManager *GetCodeManager()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_runtimeSupport;
+ }
+
+protected:
+ PTR_ICodeManager m_runtimeSupport;
+};
+
+//-----------------------------------------------------------------------------
+
+class HostCodeHeap;
+typedef VPTR(class HostCodeHeap) PTR_HostCodeHeap;
+
+typedef VPTR(class EEJitManager) PTR_EEJitManager;
+typedef VPTR(class NativeImageJitManager) PTR_NativeImageJitManager;
+typedef VPTR(class ReadyToRunJitManager) PTR_ReadyToRunJitManager;
+
+struct JumpStubBlockHeader
+{
+ JumpStubBlockHeader * m_next;
+ UINT32 m_used;
+ UINT32 m_allocated;
+
+ LoaderAllocator* GetLoaderAllocator()
+ {
+ _ASSERTE(m_zero == 0);
+ return m_Allocator;
+ }
+
+ void SetLoaderAllocator(LoaderAllocator * loaderAllocator)
+ {
+ m_zero = 0;
+ m_Allocator = loaderAllocator;
+ }
+
+ HostCodeHeap* GetHostCodeHeap()
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(m_zero == -1);
+ return m_CodeHeap;
+ }
+
+ void SetHostCodeHeap(HostCodeHeap * hostCodeHeap)
+ {
+ m_zero = -1;
+ m_CodeHeap = hostCodeHeap;
+ }
+
+private:
+ union {
+ HostCodeHeap *m_CodeHeap;
+ LoaderAllocator *m_Allocator;
+ };
+
+ INT64 m_zero; // 0 for normal methods and -1 for LCG methods
+};
+
+
+/*****************************************************************************/
+
+class EEJitManager :public IJitManager
+{
+#ifdef DACCESS_COMPILE
+ friend class ClrDataAccess;
+#endif
+ friend class CheckDuplicatedStructLayouts;
+ friend class CodeHeapIterator;
+
+ VPTR_VTABLE_CLASS(EEJitManager, IJitManager)
+
+public:
+
+ // Failing to load the main JIT is a failure.
+ // If the user requested an altjit and we failed to load an altjit, that is also a failure.
+ BOOL IsJitLoaded()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_jit != NULL)
+#ifdef ALLOW_SXS_JIT
+ && (!m_AltJITRequired || (m_alternateJit != NULL))
+#endif // ALLOW_SXS_JIT
+ ;
+ }
+
+#ifdef ALLOW_SXS_JIT
+ BOOL IsMainJitLoaded()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_jit != NULL);
+ }
+
+ BOOL IsAltJitLoaded()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_alternateJit != NULL);
+ }
+#endif // ALLOW_SXS_JIT
+
+ VOID ClearCache()
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ if( m_jit != NULL )
+ {
+ m_jit->clearCache();
+ }
+#ifdef ALLOW_SXS_JIT
+ if( m_alternateJit != NULL )
+ {
+ m_alternateJit->clearCache();
+ }
+#endif // ALLOW_SXS_JIT
+ }
+
+ BOOL IsCacheCleanupRequired()
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ BOOL ret = FALSE;
+
+ if( m_jit != NULL )
+ {
+ if (m_jit->isCacheCleanupRequired())
+ ret = TRUE;
+ }
+
+#ifdef ALLOW_SXS_JIT
+ if( !ret && m_alternateJit != NULL )
+ {
+ if (m_alternateJit->isCacheCleanupRequired())
+ ret = TRUE;
+ }
+#endif // ALLOW_SXS_JIT
+
+ return ret;
+ }
+
+#ifndef DACCESS_COMPILE
+ EEJitManager();
+
+ // No destructor necessary. Only one instance of this class that is destroyed at process shutdown.
+ // ~EEJitManager();
+#endif // #ifndef DACCESS_COMPILE
+
+ virtual DWORD GetCodeType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (miManaged | miIL);
+ }
+
+ // Used to read debug info.
+ virtual BOOL GetBoundariesAndVars(
+ const DebugInfoRequest & request,
+ IN FP_IDS_NEW fpNew, IN void * pNewData,
+ OUT ULONG32 * pcMap,
+ OUT ICorDebugInfo::OffsetMapping **ppMap,
+ OUT ULONG32 * pcVars,
+ OUT ICorDebugInfo::NativeVarInfo **ppVars);
+
+ virtual BOOL JitCodeToMethodInfo(RangeSection * pRangeSection,
+ PCODE currentPC,
+ MethodDesc ** ppMethodDesc,
+ EECodeInfo * pCodeInfo);
+
+ virtual PCODE GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset);
+
+ virtual TADDR JitTokenToStartAddress(const METHODTOKEN& MethodToken);
+ virtual void JitTokenToMethodRegionInfo(const METHODTOKEN& MethodToken, MethodRegionInfo *methodRegionInfo);
+
+ virtual unsigned InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState);
+ virtual PTR_EXCEPTION_CLAUSE_TOKEN GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState,
+ EE_ILEXCEPTION_CLAUSE* pEHclause);
+#ifndef DACCESS_COMPILE
+ virtual TypeHandle ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause,
+ CrawlFrame *pCf);
+#endif // #ifndef DACCESS_COMPILE
+ PTR_VOID GetGCInfo(const METHODTOKEN& MethodToken);
+#ifndef DACCESS_COMPILE
+ void RemoveJitData(CodeHeader * pCHdr, size_t GCinfo_len, size_t EHinfo_len);
+
+ void Unload(LoaderAllocator* pAllocator);
+ void CleanupCodeHeaps();
+
+ BOOL LoadJIT();
+
+ CodeHeader* allocCode(MethodDesc* pFD, size_t blockSize, CorJitAllocMemFlag flag
+#ifdef WIN64EXCEPTIONS
+ , UINT nUnwindInfos
+ , TADDR * pModuleBase
+#endif
+ );
+ void allocEntryChunk(MethodDescChunk *pMDChunk);
+ BYTE * allocGCInfo(CodeHeader* pCodeHeader, DWORD blockSize, size_t * pAllocationSize);
+ EE_ILEXCEPTION* allocEHInfo(CodeHeader* pCodeHeader, unsigned numClauses, size_t * pAllocationSize);
+ JumpStubBlockHeader* allocJumpStubBlock(MethodDesc* pMD, DWORD numJumps,
+ BYTE * loAddr, BYTE * hiAddr,
+ LoaderAllocator *pLoaderAllocator);
+
+ void * allocCodeFragmentBlock(size_t blockSize, unsigned alignment, LoaderAllocator *pLoaderAllocator, StubCodeBlockKind kind);
+#endif // #ifndef DACCESS_COMPILE
+
+ static CodeHeader * GetCodeHeader(const METHODTOKEN& MethodToken);
+ static CodeHeader * GetCodeHeaderFromStartAddress(TADDR methodStartAddress);
+
+#if defined(WIN64EXCEPTIONS)
+ // Compute function entry lazily. Do not call directly. Use EECodeInfo::GetFunctionEntry instead.
+ virtual PTR_RUNTIME_FUNCTION LazyGetFunctionEntry(EECodeInfo * pCodeInfo);
+
+ virtual DWORD GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength);
+#endif // WIN64EXCEPTIONS
+
+ virtual StubCodeBlockKind GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC);
+
+#if defined(DACCESS_COMPILE)
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+ virtual void EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD);
+#if defined(WIN64EXCEPTIONS)
+ // Enumerate the memory necessary to retrieve the unwind info for a specific method
+ virtual void EnumMemoryRegionsForMethodUnwindInfo(CLRDataEnumMemoryFlags flags, EECodeInfo * pCodeInfo)
+ {
+ // We don't need to do explicitly enumerate the memory for unwind information for JITted methods because
+ // it is stored using the Win64 standard dynamic function table mechanism, and dump generation code knows
+ // it needs to call our code:OutOfProcessFunctionTableCallback in order to save the function table including
+ // unwind information at dump generation time (since it's dynamic, it will not be otherwise
+ // available at debug time).
+ }
+#endif // WIN64EXCEPTIONS
+#endif // DACCESS_COMPILE
+
+ // Heap Management functions
+ void NibbleMapSet(HeapList * pHp, TADDR pCode, BOOL bSet);
+
+ static TADDR FindMethodCode(RangeSection * pRangeSection, PCODE currentPC);
+ static TADDR FindMethodCode(PCODE currentPC);
+
+#ifndef DACCESS_COMPILE
+ void FreeCodeMemory(HostCodeHeap *pCodeHeap, void * codeStart);
+ void RemoveFromCleanupList(HostCodeHeap *pCodeHeap);
+ void AddToCleanupList(HostCodeHeap *pCodeHeap);
+ void DeleteCodeHeap(HeapList *pHeapList);
+ void RemoveCodeHeapFromDomainList(CodeHeap *pHeap, LoaderAllocator *pAllocator);
+#endif
+
+private :
+ struct DomainCodeHeapList {
+ LoaderAllocator *m_pAllocator;
+ CDynArray<HeapList *> m_CodeHeapList;
+ DomainCodeHeapList();
+ ~DomainCodeHeapList();
+ };
+#ifndef DACCESS_COMPILE
+ HeapList* NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHeapList *pADHeapList);
+ HeapList* GetCodeHeap(CodeHeapRequestInfo *pInfo);
+ bool CanUseCodeHeap(CodeHeapRequestInfo *pInfo, HeapList *pCodeHeap);
+ void* allocCodeRaw(CodeHeapRequestInfo *pInfo,
+ size_t header, size_t blockSize, unsigned align,
+ HeapList ** ppCodeHeap /* Writeback, Can be null */ );
+
+ DomainCodeHeapList *GetCodeHeapList(MethodDesc *pMD, LoaderAllocator *pAllocator, BOOL fDynamicOnly = FALSE);
+ DomainCodeHeapList *CreateCodeHeapList(CodeHeapRequestInfo *pInfo);
+ LoaderHeap* GetJitMetaHeap(MethodDesc *pMD);
+
+ HeapList * GetCodeHeapList()
+ {
+ return m_pCodeHeap;
+ }
+
+protected :
+ void * allocEHInfoRaw(CodeHeader* pCodeHeader, DWORD blockSize, size_t * pAllocationSize);
+private :
+#endif // #ifndef DACCESS_COMPILE
+
+ PTR_HeapList m_pCodeHeap;
+
+protected :
+ Crst m_CodeHeapCritSec;
+
+#if !defined(DACCESS_COMPILE)
+public:
+ class CodeHeapIterator
+ {
+ CrstHolder m_lockHolder;
+ HeapList *m_pHeapList;
+ LoaderAllocator *m_pLoaderAllocator;
+ BaseDomain *m_pDomain;
+ MethodSectionIterator m_Iterator;
+ MethodDesc *m_pCurrent;
+
+ public:
+ CodeHeapIterator(BaseDomain *pDomainFilter = NULL, LoaderAllocator *pLoaderAllocatorFilter = NULL);
+ ~CodeHeapIterator();
+ BOOL Next();
+
+ MethodDesc *GetMethod()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pCurrent;
+ }
+
+ TADDR GetMethodCode()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (TADDR)m_Iterator.GetMethodCode();
+ }
+ };
+#endif // !DACCESS_COMPILE
+
+private:
+ DWORD m_dwCPUCompileFlags;
+
+ void SetCpuInfo();
+
+public:
+ inline DWORD GetCPUCompileFlags()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwCPUCompileFlags;
+ }
+
+private :
+ PTR_HostCodeHeap m_cleanupList;
+ //When EH Clauses are resolved we need to atomically update the TypeHandle
+ Crst m_EHClauseCritSec;
+
+ // must hold critical section to access this structure.
+ CUnorderedArray<DomainCodeHeapList *, 5> m_DomainCodeHeaps;
+ CUnorderedArray<DomainCodeHeapList *, 5> m_DynamicDomainCodeHeaps;
+
+public:
+ ICorJitCompiler * m_jit;
+ HINSTANCE m_JITCompiler;
+#ifdef _TARGET_AMD64_
+ HINSTANCE m_JITCompilerOther; // Stores the handle of the legacy JIT, if one is loaded.
+#endif
+
+#ifdef ALLOW_SXS_JIT
+ //put these at the end so that we don't mess up the offsets in the DAC.
+ ICorJitCompiler * m_alternateJit;
+ HINSTANCE m_AltJITCompiler;
+ bool m_AltJITRequired;
+#endif //ALLOW_SXS_JIT
+};
+
+
+//*****************************************************************************
+//
+// This class manages IJitManagers and ICorJitCompilers. It has only static
+// members. It should never be constucted.
+//
+//*****************************************************************************
+
+class ExecutionManager
+{
+ friend class CorExternalDataAccess;
+
+#ifdef DACCESS_COMPILE
+ friend class ClrDataAccess;
+#endif
+
+public:
+ static void Init();
+
+ enum ScanFlag
+ {
+ // When this is passed to a function, it must directly acquire a reader lock
+ // before it may continue
+ ScanReaderLock,
+
+ // This means the function need not directly acquire a reader lock; however, it
+ // may call other functions that may require other reader locks (e.g.,
+ // ExecutionManager::FindJitMan may be called with ScanNoReaderLock, but
+ // still calls IJitManager::JitCodeToMethodInfo which acquires its own
+ // IJitManager reader lock)
+ ScanNoReaderLock
+ };
+
+ // Returns default scan flag for current thread
+ static ScanFlag GetScanFlags();
+
+ // Returns whether currentPC is in managed code. Returns false for jump stubs on WIN64.
+ static BOOL IsManagedCode(PCODE currentPC);
+
+ // Special version with profiler hook
+ static BOOL IsManagedCode(PCODE currentPC, HostCallPreference hostCallPreference, BOOL *pfFailedReaderLock);
+
+ // Returns methodDesc for given PC
+ static MethodDesc * GetCodeMethodDesc(PCODE currentPC);
+
+ static IJitManager* FindJitMan(PCODE currentPC)
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ RangeSection * pRange = FindCodeRange(currentPC, GetScanFlags());
+ return (pRange != NULL) ? pRange->pjit : NULL;
+ }
+
+ static RangeSection * FindCodeRange(PCODE currentPC, ScanFlag scanFlag);
+
+ static BOOL IsCollectibleMethod(const METHODTOKEN& MethodToken);
+
+ class ReaderLockHolder
+ {
+ public:
+ ReaderLockHolder(HostCallPreference hostCallPreference = AllowHostCalls);
+ ~ReaderLockHolder();
+
+ BOOL Acquired();
+ };
+
+#ifdef _WIN64
+ static ULONG GetCLRPersonalityRoutineValue()
+ {
+ LIMITED_METHOD_CONTRACT;
+ static_assert_no_msg(offsetof(HeapList, CLRPersonalityRoutine) ==
+ (size_t)((ULONG)offsetof(HeapList, CLRPersonalityRoutine)));
+ return offsetof(HeapList, CLRPersonalityRoutine);
+ }
+#endif // _WIN64
+
+ static EEJitManager * GetEEJitManager()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pEEJitManager;
+ }
+
+#ifdef FEATURE_PREJIT
+ static NativeImageJitManager * GetNativeImageJitManager()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pNativeImageJitManager;
+ }
+#endif
+
+#ifdef FEATURE_READYTORUN
+ static ReadyToRunJitManager * GetReadyToRunJitManager()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pReadyToRunJitManager;
+ }
+#endif
+
+ static void ClearCaches( void );
+ static BOOL IsCacheCleanupRequired();
+
+ static LPWSTR GetJitName();
+
+ static void Unload(LoaderAllocator *pLoaderAllocator);
+
+ static void AddCodeRange(TADDR StartRange, TADDR EndRange,
+ IJitManager* pJit,
+ RangeSection::RangeSectionFlags flags,
+ void * pHp);
+
+ static void AddNativeImageRange(TADDR StartRange,
+ SIZE_T Size,
+ Module * pModule);
+
+ static void DeleteRange(TADDR StartRange);
+
+ static void CleanupCodeHeaps();
+
+ static ICodeManager* GetDefaultCodeManager()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (ICodeManager *)m_pDefaultCodeMan;
+ }
+
+ static PTR_Module FindZapModule(TADDR currentData);
+
+ // FindZapModule flavor to be used during GC to find GCRefMap
+ static PTR_Module FindModuleForGCRefMap(TADDR currentData);
+
+ static RangeSection* GetRangeSectionAndPrev(RangeSection *pRS, TADDR addr, RangeSection **ppPrev);
+
+#ifdef DACCESS_COMPILE
+ static void EnumRangeList(RangeSection* list,
+ CLRDataEnumMemoryFlags flags);
+ static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+#ifndef DACCESS_COMPILE
+ static PCODE jumpStub(MethodDesc* pMD,
+ PCODE target,
+ BYTE * loAddr,
+ BYTE * hiAddr,
+ LoaderAllocator *pLoaderAllocator = NULL);
+#endif
+
+private :
+ static RangeSection * FindCodeRangeWithLock(PCODE currentPC);
+
+ static BOOL IsManagedCodeWithLock(PCODE currentPC);
+ static BOOL IsManagedCodeWorker(PCODE currentPC);
+
+ static RangeSection* GetRangeSection(TADDR addr);
+
+ SPTR_DECL(EECodeManager, m_pDefaultCodeMan);
+
+ SPTR_DECL(EEJitManager, m_pEEJitManager);
+#ifdef FEATURE_PREJIT
+ SPTR_DECL(NativeImageJitManager, m_pNativeImageJitManager);
+#endif
+#ifdef FEATURE_READYTORUN
+ SPTR_DECL(ReadyToRunJitManager, m_pReadyToRunJitManager);
+#endif
+
+ static CrstStatic m_JumpStubCrst;
+ static CrstStatic m_RangeCrst; // Aquire before writing into m_CodeRangeList and m_DataRangeList
+
+ // infrastructure to manage readers so we can lock them out and delete domain data
+ // make ReaderCount volatile because we have order dependency in READER_INCREMENT
+#ifndef DACCESS_COMPILE
+ static Volatile<RangeSection *> m_CodeRangeList;
+ static Volatile<LONG> m_dwReaderCount;
+ static Volatile<LONG> m_dwWriterLock;
+#else
+ SPTR_DECL(RangeSection, m_CodeRangeList);
+ SVAL_DECL(LONG, m_dwReaderCount);
+ SVAL_DECL(LONG, m_dwWriterLock);
+#endif
+
+#ifndef DACCESS_COMPILE
+ class WriterLockHolder
+ {
+ public:
+ WriterLockHolder();
+ ~WriterLockHolder();
+ };
+#endif
+
+#if defined(_DEBUG)
+ // The LOCK_TAKEN/RELEASED macros need a "pointer" to the lock object to do
+ // comparisons between takes & releases (and to provide debugging info to the
+ // developer). Since Inc/Dec Reader/Writer are static, there's no object to
+ // use. So we just use the pointer to m_dwReaderCount. Note that both
+ // readers & writers use this same pointer, which follows the general convention
+ // of other ReaderWriter locks in the EE code base: each reader/writer locking object
+ // instance protects only 1 piece of data or code. Readers & writers both access the
+ // same locking object & shared resource, so conceptually they would share the same
+ // lock pointer.
+ static void * GetPtrForLockContract()
+ {
+ return (void *) &m_dwReaderCount;
+ }
+#endif // defined(_DEBUG)
+
+ static void AddRangeHelper(TADDR StartRange,
+ TADDR EndRange,
+ IJitManager* pJit,
+ RangeSection::RangeSectionFlags flags,
+ TADDR pHeapListOrZapModule);
+ static void DeleteRangeHelper(RangeSection** ppRangeList,
+ TADDR StartRange);
+
+#ifndef DACCESS_COMPILE
+ static PCODE getNextJumpStub(MethodDesc* pMD,
+ PCODE target,
+ BYTE * loAddr, BYTE * hiAddr,
+ LoaderAllocator *pLoaderAllocator);
+#endif
+
+private:
+ // ***************************************************************************
+ // Hashtable for JumpStubs for jitted code
+
+ struct JumpStubEntry {
+ PCODE m_target;
+ PCODE m_jumpStub;
+ };
+
+ class JumpStubTraits : public DefaultSHashTraits<JumpStubEntry>
+ {
+ public:
+ typedef PCODE key_t;
+
+ static key_t GetKey(element_t e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return e.m_target;
+ }
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return k1 == k2;
+ }
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifdef _WIN64
+ return (count_t) ((size_t) k ^ ((size_t) k >> 32));
+#else
+ return (count_t)(size_t)k;
+#endif
+ }
+
+ static const element_t Null() { LIMITED_METHOD_CONTRACT; JumpStubEntry e; e.m_target = NULL; e.m_jumpStub = NULL; return e; }
+ static bool IsNull(const element_t &e) { LIMITED_METHOD_CONTRACT; return e.m_target == NULL; }
+ static const element_t Deleted() { LIMITED_METHOD_CONTRACT; JumpStubEntry e; e.m_target = (PCODE)-1; e.m_jumpStub = NULL; return e; }
+ static bool IsDeleted(const element_t &e) { LIMITED_METHOD_CONTRACT; return e.m_target == (PCODE)-1; }
+ };
+ typedef SHash<JumpStubTraits> JumpStubTable;
+
+ struct JumpStubCache
+ {
+ JumpStubCache()
+ : m_pBlocks(NULL)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ JumpStubBlockHeader * m_pBlocks;
+ JumpStubTable m_Table;
+ };
+};
+
+inline CodeHeader * EEJitManager::GetCodeHeader(const METHODTOKEN& MethodToken)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(!MethodToken.IsNull());
+ return dac_cast<PTR_CodeHeader>(MethodToken.m_pCodeHeader);
+}
+
+inline CodeHeader * EEJitManager::GetCodeHeaderFromStartAddress(TADDR methodStartAddress)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(methodStartAddress != NULL);
+ ARM_ONLY(_ASSERTE((methodStartAddress & THUMB_CODE) == 0));
+ return dac_cast<PTR_CodeHeader>(methodStartAddress - sizeof(CodeHeader));
+}
+
+inline TADDR EEJitManager::JitTokenToStartAddress(const METHODTOKEN& MethodToken)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ CodeHeader * pCH = GetCodeHeader(MethodToken);
+ return pCH->GetCodeStartAddress();
+}
+
+inline void EEJitManager::JitTokenToMethodRegionInfo(const METHODTOKEN& MethodToken,
+ MethodRegionInfo * methodRegionInfo)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ PRECONDITION(methodRegionInfo != NULL);
+ } CONTRACTL_END;
+
+ methodRegionInfo->hotStartAddress = JitTokenToStartAddress(MethodToken);
+ methodRegionInfo->hotSize = GetCodeManager()->GetFunctionSize(GetGCInfo(MethodToken));
+ methodRegionInfo->coldStartAddress = 0;
+ methodRegionInfo->coldSize = 0;
+}
+
+
+//-----------------------------------------------------------------------------
+#ifdef FEATURE_PREJIT
+
+//*****************************************************************************
+// Stub JitManager for Managed native.
+
+class NativeImageJitManager : public IJitManager
+{
+ VPTR_VTABLE_CLASS(NativeImageJitManager, IJitManager)
+
+public:
+#ifndef DACCESS_COMPILE
+ NativeImageJitManager();
+#endif // #ifndef DACCESS_COMPILE
+
+ virtual DWORD GetCodeType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (miManaged | miNative);
+ }
+
+ // Used to read debug info.
+ virtual BOOL GetBoundariesAndVars(
+ const DebugInfoRequest & request,
+ IN FP_IDS_NEW fpNew, IN void * pNewData,
+ OUT ULONG32 * pcMap,
+ OUT ICorDebugInfo::OffsetMapping **ppMap,
+ OUT ULONG32 * pcVars,
+ OUT ICorDebugInfo::NativeVarInfo **ppVars);
+
+ virtual BOOL JitCodeToMethodInfo(RangeSection * pRangeSection,
+ PCODE currentPC,
+ MethodDesc ** ppMethodDesc,
+ EECodeInfo * pCodeInfo);
+
+ virtual PCODE GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset);
+
+ static PTR_Module JitTokenToZapModule(const METHODTOKEN& MethodToken);
+ virtual TADDR JitTokenToStartAddress(const METHODTOKEN& MethodToken);
+ virtual void JitTokenToMethodRegionInfo(const METHODTOKEN& MethodToken, MethodRegionInfo * methodRegionInfo);
+
+ virtual unsigned InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState);
+
+ virtual PTR_EXCEPTION_CLAUSE_TOKEN GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState,
+ EE_ILEXCEPTION_CLAUSE* pEHclause);
+
+#ifndef DACCESS_COMPILE
+ virtual TypeHandle ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause,
+ CrawlFrame *pCf);
+#endif // #ifndef DACCESS_COMPILE
+
+ virtual PTR_VOID GetGCInfo(const METHODTOKEN& MethodToken);
+
+#if defined(WIN64EXCEPTIONS)
+ virtual PTR_RUNTIME_FUNCTION LazyGetFunctionEntry(EECodeInfo * pCodeInfo);
+
+ virtual TADDR GetFuncletStartAddress(EECodeInfo * pCodeInfo);
+ virtual DWORD GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength);
+ virtual BOOL IsFilterFunclet(EECodeInfo * pCodeInfo);
+#endif // WIN64EXCEPTIONS
+
+ virtual StubCodeBlockKind GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC);
+
+#if defined(DACCESS_COMPILE)
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+ virtual void EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD);
+#if defined(WIN64EXCEPTIONS)
+ // Enumerate the memory necessary to retrieve the unwind info for a specific method
+ virtual void EnumMemoryRegionsForMethodUnwindInfo(CLRDataEnumMemoryFlags flags, EECodeInfo * pCodeInfo);
+#endif //WIN64EXCEPTIONS
+#endif //DACCESS_COMPILE
+};
+
+class NativeExceptionInfoLookupTable
+{
+public:
+ static DWORD LookupExceptionInfoRVAForMethod(PTR_CORCOMPILE_EXCEPTION_LOOKUP_TABLE pTable,
+ COUNT_T numLookupEntries,
+ DWORD methodStartRVA,
+ COUNT_T* pSize);
+};
+
+class NativeUnwindInfoLookupTable
+{
+public:
+ static int LookupUnwindInfoForMethod(DWORD codeOffset,
+ PTR_RUNTIME_FUNCTION pRuntimeFunctionTable,
+ int StartIndex,
+ int EndIndex);
+
+ static BOOL HasExceptionInfo(NGenLayoutInfo * pNgenLayout, PTR_RUNTIME_FUNCTION pMainRuntimeFunction);
+ static PTR_MethodDesc GetMethodDesc(NGenLayoutInfo * pNgenLayout, PTR_RUNTIME_FUNCTION pMainRuntimeFunction, TADDR moduleBase);
+
+private:
+ static DWORD GetMethodDescRVA(NGenLayoutInfo * pNgenLayout, PTR_RUNTIME_FUNCTION pMainRuntimeFunction);
+};
+
+inline TADDR NativeImageJitManager::JitTokenToStartAddress(const METHODTOKEN& MethodToken)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ return JitTokenToModuleBase(MethodToken) +
+ RUNTIME_FUNCTION__BeginAddress(dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader));
+}
+
+#endif // FEATURE_PREJIT
+
+#ifdef FEATURE_READYTORUN
+
+class ReadyToRunJitManager : public IJitManager
+{
+ VPTR_VTABLE_CLASS(ReadyToRunJitManager, IJitManager)
+
+public:
+#ifndef DACCESS_COMPILE
+ ReadyToRunJitManager();
+#endif // #ifndef DACCESS_COMPILE
+
+ virtual DWORD GetCodeType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (miManaged | miNative);
+ }
+
+ // Used to read debug info.
+ virtual BOOL GetBoundariesAndVars(
+ const DebugInfoRequest & request,
+ IN FP_IDS_NEW fpNew, IN void * pNewData,
+ OUT ULONG32 * pcMap,
+ OUT ICorDebugInfo::OffsetMapping **ppMap,
+ OUT ULONG32 * pcVars,
+ OUT ICorDebugInfo::NativeVarInfo **ppVars);
+
+ virtual BOOL JitCodeToMethodInfo(RangeSection * pRangeSection,
+ PCODE currentPC,
+ MethodDesc** ppMethodDesc,
+ OUT EECodeInfo * pCodeInfo);
+
+ virtual PCODE GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset);
+
+ static ReadyToRunInfo * JitTokenToReadyToRunInfo(const METHODTOKEN& MethodToken);
+ static PTR_RUNTIME_FUNCTION JitTokenToRuntimeFunction(const METHODTOKEN& MethodToken);
+
+ virtual TADDR JitTokenToStartAddress(const METHODTOKEN& MethodToken);
+ virtual void JitTokenToMethodRegionInfo(const METHODTOKEN& MethodToken, MethodRegionInfo * methodRegionInfo);
+
+ virtual unsigned InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState);
+
+ virtual PTR_EXCEPTION_CLAUSE_TOKEN GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState,
+ EE_ILEXCEPTION_CLAUSE* pEHclause);
+
+#ifndef DACCESS_COMPILE
+ virtual TypeHandle ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause,
+ CrawlFrame *pCf);
+#endif // #ifndef DACCESS_COMPILE
+
+ virtual PTR_VOID GetGCInfo(const METHODTOKEN& MethodToken);
+
+#if defined(WIN64EXCEPTIONS)
+ virtual PTR_RUNTIME_FUNCTION LazyGetFunctionEntry(EECodeInfo * pCodeInfo);
+
+ virtual TADDR GetFuncletStartAddress(EECodeInfo * pCodeInfo);
+ virtual DWORD GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength);
+ virtual BOOL IsFilterFunclet(EECodeInfo * pCodeInfo);
+#endif // WIN64EXCEPTIONS
+
+ virtual StubCodeBlockKind GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC);
+
+#if defined(DACCESS_COMPILE)
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+ virtual void EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD);
+#if defined(WIN64EXCEPTIONS)
+ // Enumerate the memory necessary to retrieve the unwind info for a specific method
+ virtual void EnumMemoryRegionsForMethodUnwindInfo(CLRDataEnumMemoryFlags flags, EECodeInfo * pCodeInfo);
+#endif //WIN64EXCEPTIONS
+#endif //DACCESS_COMPILE
+};
+
+#endif
+
+//*****************************************************************************
+// EECodeInfo provides information about code at particular address:
+// - Start of the method and relative offset
+// - GC Info of the method
+// etc.
+//
+// EECodeInfo caches information from IJitManager and thus avoids
+// quering IJitManager repeatedly for same data.
+//
+class EECodeInfo
+{
+ friend BOOL EEJitManager::JitCodeToMethodInfo(RangeSection * pRangeSection, PCODE currentPC, MethodDesc** ppMethodDesc, EECodeInfo * pCodeInfo);
+ friend BOOL NativeImageJitManager::JitCodeToMethodInfo(RangeSection * pRangeSection, PCODE currentPC, MethodDesc** ppMethodDesc, EECodeInfo * pCodeInfo);
+#ifdef FEATURE_READYTORUN
+ friend BOOL ReadyToRunJitManager::JitCodeToMethodInfo(RangeSection * pRangeSection, PCODE currentPC, MethodDesc** ppMethodDesc, EECodeInfo * pCodeInfo);
+#endif
+
+public:
+ EECodeInfo();
+
+ EECodeInfo(PCODE codeAddress)
+ {
+ Init(codeAddress);
+ }
+
+ // Explicit initialization
+ void Init(PCODE codeAddress);
+ void Init(PCODE codeAddress, ExecutionManager::ScanFlag scanFlag);
+
+ TADDR GetSavedMethodCode();
+
+ TADDR GetStartAddress();
+
+ BOOL IsValid()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pJM != NULL;
+ }
+
+ IJitManager* GetJitManager()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(m_pJM != NULL);
+ return m_pJM;
+ }
+
+ ICodeManager* GetCodeManager()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetJitManager()->GetCodeManager();
+ }
+
+ const METHODTOKEN& GetMethodToken()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_methodToken;
+ }
+
+ // This returns a pointer to the start of an instruction; conceptually, a PINSTR.
+ TADDR GetCodeAddress()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return PCODEToPINSTR(m_codeAddress);
+ }
+
+ MethodDesc * GetMethodDesc()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pMD;
+ }
+
+ DWORD GetRelOffset()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_relOffset;
+ }
+
+ PTR_VOID GetGCInfo()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetJitManager()->GetGCInfo(GetMethodToken());
+ }
+
+ void GetMethodRegionInfo(IJitManager::MethodRegionInfo *methodRegionInfo)
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetJitManager()->JitTokenToMethodRegionInfo(GetMethodToken(), methodRegionInfo);
+ }
+
+ TADDR GetModuleBase()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetJitManager()->JitTokenToModuleBase(GetMethodToken());
+ }
+
+#ifdef WIN64EXCEPTIONS
+ PTR_RUNTIME_FUNCTION GetFunctionEntry();
+ BOOL IsFunclet() { WRAPPER_NO_CONTRACT; return GetJitManager()->IsFunclet(this); }
+ EECodeInfo GetMainFunctionInfo();
+ ULONG GetFixedStackSize();
+
+#if defined(_TARGET_AMD64_)
+ BOOL HasFrameRegister();
+#endif // _TARGET_AMD64_
+
+#else // WIN64EXCEPTIONS
+ ULONG GetFixedStackSize()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetCodeManager()->GetFrameSize(GetGCInfo());
+ }
+#endif // WIN64EXCEPTIONS
+
+#if defined(_TARGET_AMD64_)
+ void GetOffsetsFromUnwindInfo(ULONG* pRSPOffset, ULONG* pRBPOffset);
+
+#if defined(_DEBUG) && defined(HAVE_GCCOVER)
+ // Find first funclet inside (pvFuncletStart, pvFuncletStart + cbCode)
+ static LPVOID findNextFunclet (LPVOID pvFuncletStart, SIZE_T cbCode, LPVOID *ppvFuncletEnd);
+#endif // _DEBUG && HAVE_GCCOVER
+#endif // _TARGET_AMD64_
+
+private:
+ PCODE m_codeAddress;
+ METHODTOKEN m_methodToken;
+ MethodDesc *m_pMD;
+ IJitManager *m_pJM;
+ DWORD m_relOffset;
+#ifdef WIN64EXCEPTIONS
+ PTR_RUNTIME_FUNCTION m_pFunctionEntry;
+#endif // WIN64EXCEPTIONS
+
+#ifdef _TARGET_AMD64_
+ // Simple helper to return a pointer to the UNWIND_INFO given the offset to the unwind info.
+ UNWIND_INFO * GetUnwindInfoHelper(ULONG unwindInfoOffset);
+#endif // _TARGET_AMD64_
+};
+
+#include "codeman.inl"
+
+
+#ifdef FEATURE_PREJIT
+class MethodSectionIterator;
+
+//
+// MethodIterator class is used to iterate all the methods in an ngen image.
+// It will match and report hot (and cold, if any) sections of a method at the same time.
+//
+class MethodIterator
+{
+public:
+ enum MethodIteratorOptions
+ {
+ Hot = 0x1,
+ Unprofiled =0x2,
+ All = Hot | Unprofiled
+ };
+private:
+ TADDR m_ModuleBase;
+ MethodIteratorOptions methodIteratorOptions;
+
+ NGenLayoutInfo * m_pNgenLayout;
+ BOOL m_fHotMethodsDone;
+ COUNT_T m_CurrentRuntimeFunctionIndex;
+ COUNT_T m_CurrentColdRuntimeFunctionIndex;
+
+ void Init(PTR_Module pModule, PEDecoder * pPEDecoder, MethodIteratorOptions mio);
+
+ public:
+ MethodIterator(PTR_Module pModule, MethodIteratorOptions mio = All);
+ MethodIterator(PTR_Module pModule, PEDecoder * pPEDecoder, MethodIteratorOptions mio = All);
+
+ BOOL Next();
+
+ PTR_MethodDesc GetMethodDesc();
+ PTR_VOID GetGCInfo();
+ TADDR GetMethodStartAddress();
+ TADDR GetMethodColdStartAddress();
+ ULONG GetHotCodeSize();
+
+ PTR_RUNTIME_FUNCTION GetRuntimeFunction();
+
+ void GetMethodRegionInfo(IJitManager::MethodRegionInfo *methodRegionInfo);
+};
+#endif //FEATURE_PREJIT
+
+void ThrowOutOfMemoryWithinRange();
+
+#endif // !__CODEMAN_HPP__
diff --git a/src/vm/codeman.inl b/src/vm/codeman.inl
new file mode 100644
index 0000000000..da6de9fec6
--- /dev/null
+++ b/src/vm/codeman.inl
@@ -0,0 +1,17 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+inline BOOL ExecutionManager::IsCollectibleMethod(const METHODTOKEN& MethodToken)
+{
+ WRAPPER_NO_CONTRACT;
+ return MethodToken.m_pRangeSection->flags & RangeSection::RANGE_SECTION_COLLECTIBLE;
+}
+
+inline TADDR IJitManager::JitTokenToModuleBase(const METHODTOKEN& MethodToken)
+{
+ return MethodToken.m_pRangeSection->LowAddress;
+}
diff --git a/src/vm/comcache.cpp b/src/vm/comcache.cpp
new file mode 100644
index 0000000000..efb429cae0
--- /dev/null
+++ b/src/vm/comcache.cpp
@@ -0,0 +1,1627 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#include "common.h"
+
+#include <crtwrap.h>
+#include "comcache.h"
+#include "runtimecallablewrapper.h"
+#include "mtx.h"
+#include "contxt.h"
+#include "ctxtcall.h"
+#include "win32threadpool.h"
+#include "mdaassistants.h"
+
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+#include "olecontexthelpers.h"
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+
+//================================================================
+// Guid definitions.
+const GUID IID_IEnterActivityWithNoLock = { 0xd7174f82, 0x36b8, 0x4aa8, { 0x80, 0x0a, 0xe9, 0x63, 0xab, 0x2d, 0xfa, 0xb9 } };
+const GUID IID_IFuncEvalAbort = { 0xde6844f6, 0x95ac, 0x4e83, { 0x90, 0x8d, 0x9b, 0x1b, 0xea, 0x2f, 0xe0, 0x8c } };
+
+// sanity check., to find stress bug #82137
+VOID IUnkEntry::CheckValidIUnkEntry()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (IsDisconnected())
+ {
+ COMPlusThrow(kInvalidComObjectException, IDS_EE_COM_OBJECT_NO_LONGER_HAS_WRAPPER);
+ }
+}
+
+// Version that returns an HR instead of throwing.
+HRESULT IUnkEntry::HRCheckValidIUnkEntry()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (IsDisconnected())
+ {
+ return COR_E_INVALIDCOMOBJECT;
+ }
+
+ return S_OK;
+}
+
+// Returns IErrorInfo corresponding to the exception injected by the debugger to abort a func eval,
+// or NULL if there is no such exception.
+static IErrorInfo *CheckForFuncEvalAbortNoThrow(HRESULT hr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // the managed exception thrown by the debugger is translated to EXCEPTION_COMPLUS by COM
+ if (hr == EXCEPTION_COMPLUS)
+ {
+ GCX_PREEMP();
+
+ // we recognize the ones thrown by the debugger by QI'ing the IErrorInfo for a special IID
+ ReleaseHolder<IErrorInfo> pErrorInfo;
+ if (SafeGetErrorInfo(&pErrorInfo) == S_OK)
+ {
+ ReleaseHolder<IUnknown> pUnk;
+ if (SafeQueryInterface(pErrorInfo, IID_IFuncEvalAbort, &pUnk) == S_OK)
+ {
+ // QI succeeded, this is a func eval abort
+ return pErrorInfo.Extract();
+ }
+ else
+ {
+ // QI failed, put the IErrorInfo back
+ LeaveRuntimeHolderNoThrow lrh((size_t)SetErrorInfo);
+ if (SUCCEEDED(lrh.GetHR()))
+ {
+ SetErrorInfo(0, pErrorInfo);
+ }
+ }
+ }
+ }
+
+ return NULL;
+}
+
+// Rethrows the exception injected by the debugger to abort a func eval, or does nothing if there is no such exception.
+static void CheckForFuncEvalAbort(HRESULT hr)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ IErrorInfo *pErrorInfo = CheckForFuncEvalAbortNoThrow(hr);
+ if (pErrorInfo != NULL)
+ {
+ // COMPlusThrowHR internally releases the pErrorInfo
+ COMPlusThrowHR(hr, pErrorInfo);
+ }
+}
+
+//+-------------------------------------------------------------------------
+//
+// Function: STDAPI_(LPSTREAM) CreateMemStm(DWORD cb, BYTE** ppBuf))
+// Create a stream in the memory
+//
+STDAPI_(LPSTREAM) CreateMemStm(DWORD cb, BYTE** ppBuf)
+{
+ CONTRACT(LPSTREAM)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ INJECT_FAULT(CONTRACT_RETURN NULL);
+ PRECONDITION(CheckPointer(ppBuf, NULL_OK));
+ PRECONDITION(CheckPointer(ppBuf, NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ LPSTREAM pstm = NULL;
+
+ BYTE* pMem = new(nothrow) BYTE[cb];
+ if (pMem)
+ {
+ HRESULT hr = CInMemoryStream::CreateStreamOnMemory(pMem, cb, &pstm, TRUE);
+ _ASSERTE(hr == S_OK || pstm == NULL);
+ }
+
+ if(ppBuf)
+ *ppBuf = pMem;
+
+ RETURN pstm;
+}
+
+//=====================================================================
+// HRESULT wCoMarshalInterThreadInterfaceInStream
+HRESULT wCoMarshalInterThreadInterfaceInStream(REFIID riid,
+ LPUNKNOWN pUnk,
+ LPSTREAM *ppStm)
+{
+#ifdef PLATFORM_CE
+ return E_NOTIMPL;
+#endif // !PLATFORM_CE
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(CheckPointer(ppStm));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ LPSTREAM pStm = NULL;
+
+ DWORD mshlFlgs = MSHLFLAGS_NORMAL;
+
+ ULONG lSize;
+ hr = CoGetMarshalSizeMax(&lSize, IID_IUnknown, pUnk, MSHCTX_INPROC, NULL, mshlFlgs);
+
+ if (hr == S_OK)
+ {
+ // Create a stream
+ pStm = CreateMemStm(lSize, NULL);
+
+ if (pStm != NULL)
+ {
+ // Marshal the interface into the stream TABLE STRONG
+ hr = CoMarshalInterface(pStm, riid, pUnk, MSHCTX_INPROC, NULL, mshlFlgs);
+ }
+ else
+ {
+ hr = E_OUTOFMEMORY;
+ }
+ }
+
+ if (SUCCEEDED(hr))
+ {
+ // Reset the stream to the begining
+ LARGE_INTEGER li;
+ LISet32(li, 0);
+ ULARGE_INTEGER li2;
+ pStm->Seek(li, STREAM_SEEK_SET, &li2);
+
+ // Set the return value
+ *ppStm = pStm;
+ }
+ else
+ {
+ // Cleanup if failure
+ SafeReleasePreemp(pStm);
+ *ppStm = NULL;
+ }
+
+ // Return the result
+ return hr;
+}
+
+//================================================================
+// Struct passed in to DoCallback.
+struct CtxEntryEnterContextCallbackData
+{
+ PFNCTXCALLBACK m_pUserCallbackFunc;
+ LPVOID m_pUserData;
+ LPVOID m_pCtxCookie;
+ HRESULT m_UserCallbackHR;
+#ifdef MDA_SUPPORTED
+ CLREvent* m_hTimeoutEvent;
+#endif
+};
+
+//================================================================
+// Static members.
+CtxEntryCache* CtxEntryCache::s_pCtxEntryCache = NULL;
+
+CtxEntryCache::CtxEntryCache()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_Lock.Init(LOCK_COMCTXENTRYCACHE);
+ LockOwner lock = {&m_Lock, IsOwnerOfSpinLock};
+}
+
+CtxEntryCache::~CtxEntryCache()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ for (SHash<CtxEntryCacheTraits>::Iterator it = m_CtxEntryHash.Begin(); it != m_CtxEntryHash.End(); it++)
+ {
+ CtxEntry *pCtxEntry = (CtxEntry *)*it;
+ _ASSERTE(pCtxEntry);
+ LPVOID CtxCookie = pCtxEntry->GetCtxCookie();
+ m_CtxEntryHash.Remove(CtxCookie);
+
+ LOG((LF_INTEROP, LL_INFO100, "Leaked CtxEntry %8.8x with CtxCookie %8.8x, ref count %d\n", pCtxEntry, pCtxEntry->GetCtxCookie(), pCtxEntry->m_dwRefCount));
+ pCtxEntry->m_dwRefCount = 0;
+ delete pCtxEntry;
+ }
+}
+
+
+void CtxEntryCache::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ // This should never be called more than once.
+ PRECONDITION(NULL == s_pCtxEntryCache);
+ }
+ CONTRACTL_END;
+
+ // Allocate the one and only instance of the context entry cache.
+ s_pCtxEntryCache = new CtxEntryCache();
+}
+
+CtxEntryCache* CtxEntryCache::GetCtxEntryCache()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(s_pCtxEntryCache));
+ }
+ CONTRACTL_END;
+
+ return s_pCtxEntryCache;
+}
+
+CtxEntry* CtxEntryCache::CreateCtxEntry(LPVOID pCtxCookie, Thread * pSTAThread)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ CtxEntry * pCtxEntry = NULL;
+ Thread * pThread = GetThread();
+
+ // If we don't already have a context entry for the context cookie,
+ // we need to create one.
+ NewHolder<CtxEntry> pNewCtxEntry = new CtxEntry(pCtxCookie, pSTAThread);
+ // tiggers GC, can't happen when we hold spin lock
+ pNewCtxEntry->Init();
+
+ {
+ TAKE_SPINLOCK_AND_DONOT_TRIGGER_GC(&m_Lock);
+
+ // double check for race
+ pCtxEntry = m_CtxEntryHash.Lookup(pCtxCookie);
+ if (pCtxEntry == NULL)
+ {
+ // We successfully allocated and initialized the entry.
+ m_CtxEntryHash.Add(pNewCtxEntry);
+ pCtxEntry = pNewCtxEntry.Extract();
+ }
+ // We must have an entry now; we need to addref it before
+ // we leave the lock.
+ pCtxEntry->AddRef ();
+ }
+
+ return pCtxEntry;
+}
+
+CtxEntry* CtxEntryCache::FindCtxEntry(LPVOID pCtxCookie, Thread *pThread)
+{
+ CtxEntry *pCtxEntry = NULL;
+ Thread *pSTAThread = NULL;
+
+ CONTRACT (CtxEntry*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pCtxCookie));
+ POSTCONDITION(CheckPointer(RETVAL));
+ POSTCONDITION(pCtxCookie == pCtxEntry->GetCtxCookie());
+ POSTCONDITION(pSTAThread == pCtxEntry->GetSTAThread());
+ }
+ CONTRACT_END;
+
+ // Find our STA (if any)
+ if (pThread->GetApartment() == Thread::AS_InSTA)
+ {
+ // We are in an STA thread. But we may be in a NA context, so do an extra
+ // check for that case.
+ BOOL fNAContext;
+
+ // try the simple cache on Thread first
+ if (pCtxCookie != pThread->GetLastSTACtxCookie(&fNAContext))
+ {
+ APTTYPE type;
+ fNAContext = (SUCCEEDED(GetCurrentApartmentTypeNT5((IObjectContext *)pCtxCookie, &type)) && type == APTTYPE_NA);
+ pThread->SetLastSTACtxCookie(pCtxCookie, fNAContext);
+ }
+
+ if (!fNAContext)
+ pSTAThread = pThread;
+ }
+
+ ASSERT (GetThread ());
+ BOOL bFound = FALSE;
+
+ ACQUIRE_SPINLOCK_NO_HOLDER(&m_Lock, pThread);
+ {
+ // Try to find a context entry for the context cookie.
+ pCtxEntry = m_CtxEntryHash.Lookup(pCtxCookie);
+ if (pCtxEntry)
+ {
+ // We must have an entry now; we need to addref it before
+ // we leave the lock.
+ pCtxEntry->AddRef ();
+ bFound = TRUE;
+ }
+ }
+ RELEASE_SPINLOCK_NO_HOLDER(&m_Lock, pThread);
+
+ if (!bFound)
+ {
+ pCtxEntry = CreateCtxEntry(pCtxCookie, pSTAThread);
+ }
+
+ // Returned the found or allocated entry.
+ RETURN pCtxEntry;
+}
+
+
+void CtxEntryCache::TryDeleteCtxEntry(LPVOID pCtxCookie)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCtxCookie));
+ }
+ CONTRACTL_END;
+
+ BOOL bDelete = FALSE;
+ CtxEntry *pCtxEntry = NULL;
+
+ {
+ TAKE_SPINLOCK_AND_DONOT_TRIGGER_GC(&m_Lock);
+
+ // Try to find a context entry for the context cookie.
+ pCtxEntry = m_CtxEntryHash.Lookup(pCtxCookie);
+ if (pCtxEntry)
+ {
+ // If the ref count of the context entry is still 0, then we can
+ // remove the ctx entry and delete it.
+ if (pCtxEntry->m_dwRefCount == 0)
+ {
+ // First remove the context entry from the list.
+ m_CtxEntryHash.Remove(pCtxCookie);
+
+ // We need to unlock the context entry cache before we delete the
+ // context entry since this can cause release to be called on
+ // an IP which can cause us to re-enter the runtime thus causing a
+ // deadlock.
+ // We can now safely delete the context entry.
+ bDelete = TRUE;
+ }
+ }
+ }
+
+ if (bDelete)
+ {
+ delete pCtxEntry;
+ }
+}
+
+//================================================================
+// Get the RCW associated with this IUnkEntry
+// We assert inside Init that this IUnkEntry is indeed within a RCW
+RCW *IUnkEntry::GetRCW()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (RCW *) (((LPBYTE) this) - offsetof(RCW, m_UnkEntry));
+}
+
+//================================================================
+// Initialize the entry
+void IUnkEntry::Init(
+ IUnknown *pUnk,
+ BOOL bIsFreeThreaded,
+ Thread *pThread
+ DEBUGARG(RCW *pRCW)
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ INDEBUG(PRECONDITION(CheckPointer(pRCW));)
+ }
+ CONTRACTL_END;
+
+ // Make sure this IUnkEntry is part of a RCW so that we can get back to the RCW through offset
+ // if we have to
+ _ASSERTE(((LPBYTE)pRCW) + offsetof(RCW, m_UnkEntry) == (LPBYTE) this);
+
+ // Find our context cookie
+ LPVOID pCtxCookie = GetCurrentCtxCookie();
+ _ASSERTE(pCtxCookie);
+
+ // Set up IUnkEntry's state.
+ if (bIsFreeThreaded)
+ m_pCtxEntry = NULL;
+ else
+ m_pCtxEntry = CtxEntryCache::GetCtxEntryCache()->FindCtxEntry(pCtxCookie, pThread);
+
+ m_pUnknown = pUnk;
+ m_pCtxCookie = pCtxCookie;
+ m_pStream = NULL;
+
+ // Sanity check this IUnkEntry.
+ CheckValidIUnkEntry();
+}
+
+//================================================================
+// Release the interface pointer held by the IUnkEntry.
+VOID IUnkEntry::ReleaseInterface(RCW *pRCW)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ if (g_fProcessDetach)
+ {
+ // The Release call is unsafe if the process is going away (calls into
+ // DLLs we don't know are even mapped).
+ LogInteropLeak(this);
+ }
+ else
+ {
+ // now release the IUnknown that we hold
+ if ((m_pUnknown != 0) && (m_pUnknown != (IUnknown *)0xBADF00D))
+ {
+ ULONG cbRef = SafeReleasePreemp(m_pUnknown, pRCW);
+ LogInteropRelease(m_pUnknown, cbRef, "IUnkEntry::Free: Releasing the held ref");
+ }
+
+ // mark the entry as dead
+ m_pUnknown = (IUnknown *)0xBADF00D;
+ }
+}
+
+//================================================================
+// Free the IUnknown entry. ReleaseInterface must have been called.
+VOID IUnkEntry::Free()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(g_fProcessDetach || m_pUnknown == (IUnknown *)0xBADF00D);
+ }
+ CONTRACTL_END;
+
+ // Log the de-allocation of the IUnknown entry.
+ LOG((LF_INTEROP, LL_INFO10000, "IUnkEntry::Free called for context 0x%08X, to release entry with m_pUnknown %p, on thread %p\n", m_pCtxCookie, m_pUnknown, GetThread()));
+
+ if (g_fProcessDetach)
+ {
+ IStream *pOldStream = m_pStream;
+ if (InterlockedExchangeT(&m_pStream, NULL) == pOldStream)
+ SafeReleasePreemp(pOldStream);
+ }
+ else
+ {
+ IStream *pStream = m_pStream;
+ m_pStream = NULL;
+
+ // This will release the stream, object in the stream and the memory on which the stream was created
+ if (pStream)
+ SafeReleaseStream(pStream);
+
+ }
+
+ // Release the ref count we have on the CtxEntry.
+ CtxEntry *pEntry = GetCtxEntry();
+ if (pEntry)
+ {
+ pEntry->Release();
+ m_pCtxEntry = NULL;
+ }
+}
+
+//================================================================
+// Get IUnknown for the current context from IUnkEntry
+IUnknown* IUnkEntry::GetIUnknownForCurrContext(bool fNoAddRef)
+{
+ CONTRACT (IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, (fNoAddRef ? NULL_OK : NULL_NOT_OK)));
+ }
+ CONTRACT_END;
+
+ IUnknown* pUnk = NULL;
+ LPVOID pCtxCookie = GetCurrentCtxCookie();
+ _ASSERTE(pCtxCookie);
+
+ CheckValidIUnkEntry();
+
+ if (m_pCtxCookie == pCtxCookie || IsFreeThreaded())
+ {
+ pUnk = GetRawIUnknown_NoAddRef();
+
+ if (!fNoAddRef)
+ {
+ RCW_VTABLEPTR(GetRCW());
+ ULONG cbRef = SafeAddRef(pUnk);
+ LogInteropAddRef(pUnk, cbRef, "IUnkEntry::GetIUnknownForCurrContext: Addref pUnk, passing ref to caller");
+ }
+ }
+
+ if (pUnk == NULL && !fNoAddRef)
+ pUnk = UnmarshalIUnknownForCurrContext();
+
+ RETURN pUnk;
+}
+
+//================================================================
+// Unmarshal IUnknown for the current context from IUnkEntry
+IUnknown* IUnkEntry::UnmarshalIUnknownForCurrContext()
+{
+ CONTRACT (IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(!IsFreeThreaded());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ HRESULT hrCDH = S_OK;
+ IUnknown* pUnk = NULL;
+ BOOL fRetry = TRUE;
+ BOOL fUnmarshalFailed = FALSE;
+ BOOL fCallHelper = FALSE;
+
+ CheckValidIUnkEntry();
+
+ _ASSERTE(GetCtxEntry() != NULL);
+
+
+ if(IsMarshalingInhibited() && (m_pCtxCookie != GetCurrentCtxCookie()))
+ {
+ // We want to use an interface in a different context but it can't be marshalled.
+ LOG((LF_INTEROP, LL_INFO100, "IUnkEntry::GetIUnknownForCurrContext failed as the COM object has inhibited marshaling"));
+ COMPlusThrow(kInvalidCastException, IDS_EE_CANNOTCAST_NOMARSHAL);
+ }
+
+ // Make sure we are in preemptive GC mode before we call out to COM.
+ GCX_PREEMP();
+
+ // Need to synchronize
+ while (fRetry)
+ {
+ // Marshal the interface to the stream if it hasn't been done yet.
+ if (m_pStream == NULL)
+ {
+ // If context transition failed, we'll return a failure HRESULT
+ // Otherwise, we return S_OK but m_pStream will stay being NULL
+ hrCDH = MarshalIUnknownToStreamCallback(this);
+ CheckForFuncEvalAbort(hrCDH);
+
+#ifdef MDA_SUPPORTED
+ if (FAILED(hrCDH))
+ {
+ MDA_TRIGGER_ASSISTANT(DisconnectedContext, ReportViolationDisconnected(m_pCtxCookie, hrCDH));
+ }
+ else if (m_pStream == NULL)
+ {
+ MDA_TRIGGER_ASSISTANT(NotMarshalable, ReportViolation());
+ }
+#endif
+ }
+
+ if (TryUpdateEntry())
+ {
+ // If the interface is not marshalable or if we failed to
+ // enter the context, then we don't have any choice but to
+ // use the raw IP.
+ if (m_pStream == NULL)
+ {
+ // We retrieved an IP so stop retrying.
+ fRetry = FALSE;
+
+ // Give out this IUnknown we are holding
+ pUnk = GetRawIUnknown_NoAddRef();
+
+ RCW_VTABLEPTR(GetRCW());
+ ULONG cbRef = SafeAddRefPreemp(pUnk);
+
+ LogInteropAddRef(pUnk, cbRef, "UnmarshalIUnknownForCurrContext handing out raw IUnknown");
+ }
+ else
+ {
+ // we got control for this entry
+ // GetInterface for the current context
+ HRESULT hr;
+ hr = CoUnmarshalInterface(m_pStream, IID_IUnknown, (void **)&pUnk);
+
+ // If the objref in the stream times out, we need to go an marshal into the
+ // stream once again.
+ if (FAILED(hr))
+ {
+ _ASSERTE(m_pStream);
+
+ CheckForFuncEvalAbort(hr);
+
+ // This should release the stream, object in the stream and the memory on which the stream was created
+ SafeReleaseStream(m_pStream);
+ m_pStream = NULL;
+
+ // If unmarshal failed twice, then bail out.
+ if (fUnmarshalFailed)
+ {
+ fRetry = FALSE;
+
+ // Handing out m_pUnknown in this case would be incorrect. We should fix other places that are doing the same thing in Dev10
+ // To minimize code changes, throwing E_NOINTERFACE instead
+ COMPlusThrowHR(E_NOINTERFACE);
+ }
+
+ // Remember we failed to unmarshal.
+ fUnmarshalFailed = TRUE;
+ }
+ else
+ {
+ // Reset the stream to the begining
+ LARGE_INTEGER li;
+ LISet32(li, 0);
+ ULARGE_INTEGER li2;
+ m_pStream->Seek(li, STREAM_SEEK_SET, &li2);
+
+ // Marshal the interface into the stream with appropriate flags
+ hr = CoMarshalInterface(m_pStream,
+ IID_IUnknown, pUnk, MSHCTX_INPROC, NULL, MSHLFLAGS_NORMAL);
+
+ if (FAILED(hr))
+ {
+ CheckForFuncEvalAbort(hr);
+
+ // The proxy is no longer valid. This sometimes manifests itself by
+ // a failure during re-marshaling it to the stream. When this happens,
+ // we need to release the the pUnk we extracted and the stream and try to
+ // re-create the stream. We don't want to release the stream data since
+ // we already extracted the proxy from the stream and released it.
+ RCW_VTABLEPTR(GetRCW());
+ SafeReleasePreemp(pUnk);
+
+ SafeReleasePreemp(m_pStream);
+ m_pStream = NULL;
+ }
+ else
+ {
+ // Reset the stream to the begining
+ LISet32(li, 0);
+ m_pStream->Seek(li, STREAM_SEEK_SET, &li2);
+
+ // We managed to unmarshal the IP from the stream, stop retrying.
+ fRetry = FALSE;
+ }
+ }
+ }
+
+ // Done with the entry.
+ EndUpdateEntry();
+ }
+ else
+ {
+ //================================================================
+ // We can potentially collide with the COM+ activity lock so spawn off
+ // another call that does its stream marshalling on the stack without
+ // the need to do locking.
+ fCallHelper = TRUE;
+ fRetry = FALSE;
+ }
+ }
+
+ if (fCallHelper)
+ {
+ // If we hit a collision earlier, spawn off helper that repeats this operation without locking.
+ pUnk = UnmarshalIUnknownForCurrContextHelper();
+ }
+
+ RETURN pUnk;
+}
+
+//================================================================
+// Release the stream. This will force UnmarshalIUnknownForCurrContext to transition
+// into the context that owns the IP and re-marshal it to the stream.
+void IUnkEntry::ReleaseStream()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // This should release the stream, object in the stream and the memory on which the stream was created
+ if (m_pStream)
+ {
+ SafeReleaseStream(m_pStream);
+ m_pStream = NULL;
+ }
+}
+
+// Indicates if the COM component being wrapped by the IUnkEntry aggreates the FTM
+bool IUnkEntry::IsFreeThreaded()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetRCW()->IsFreeThreaded();
+}
+
+// Indicates if the COM component being wrapped by the IUnkEntry implements INoMashal.
+bool IUnkEntry::IsMarshalingInhibited()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetRCW()->IsMarshalingInhibited();
+}
+
+// Helper function to marshal the IUnknown pointer to the stream.
+static HRESULT MarshalIUnknownToStreamHelper(IUnknown * pUnknown, IStream ** ppStream)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ IStream *pStream = NULL;
+
+ GCX_PREEMP();
+
+ // ensure we register this cookie
+ HRESULT hr = wCoMarshalInterThreadInterfaceInStream(IID_IUnknown, pUnknown, &pStream);
+
+ if ((hr == REGDB_E_IIDNOTREG) ||
+ (hr == E_FAIL) ||
+ (hr == E_NOINTERFACE) ||
+ (hr == E_INVALIDARG) ||
+ (hr == E_UNEXPECTED))
+ {
+ // Interface is not marshallable.
+ pStream = NULL;
+ hr = S_OK;
+ }
+
+ *ppStream = pStream;
+
+ return hr;
+}
+
+//================================================================
+struct StreamMarshalData
+{
+ IUnkEntry * m_pUnkEntry;
+ IStream * m_pStream;
+};
+// Fix for if the lock is held
+HRESULT IUnkEntry::MarshalIUnknownToStreamCallback2(LPVOID pData)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pData));
+ PRECONDITION(g_fProcessDetach == FALSE);
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ StreamMarshalData *psmd = reinterpret_cast<StreamMarshalData *>(pData);
+
+ // This should never be called during process detach.
+
+ hr = psmd->m_pUnkEntry->HRCheckValidIUnkEntry();
+ if (hr != S_OK)
+ {
+ // Interface not marshallable
+ // We'll know marshaling failed because m_pStream == NULL
+ return S_OK;
+ }
+
+ LPVOID pCurrentCtxCookie = GetCurrentCtxCookie();
+ _ASSERTE(pCurrentCtxCookie);
+
+ if (pCurrentCtxCookie == psmd->m_pUnkEntry->m_pCtxCookie)
+ {
+ // We are in the right context marshal the IUnknown to the
+ // stream directly.
+ hr = MarshalIUnknownToStreamHelper(psmd->m_pUnkEntry->m_pUnknown, &psmd->m_pStream);
+ }
+ else
+ {
+ // Transition into the context to marshal the IUnknown to
+ // the stream.
+ _ASSERTE(psmd->m_pUnkEntry->GetCtxEntry() != NULL);
+ hr = psmd->m_pUnkEntry->GetCtxEntry()->EnterContext(MarshalIUnknownToStreamCallback2, psmd);
+ }
+
+ return hr;
+}
+
+//================================================================
+// Unmarshal IUnknown for the current context if the lock is held
+IUnknown* IUnkEntry::UnmarshalIUnknownForCurrContextHelper()
+{
+ CONTRACT (IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(!IsFreeThreaded());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ HRESULT hrCDH = S_OK;
+ IUnknown * pUnk = NULL;
+ SafeComHolder<IStream> spStream;
+
+ CheckValidIUnkEntry();
+
+ // Make sure we are in preemptive GC mode before we call out to COM.
+ GCX_PREEMP();
+
+ // Marshal the interface to the stream. Any call to this function
+ // would be from another apartment so marshalling is needed.
+ StreamMarshalData smd = {this, NULL};
+
+ // If context transition failed, we'll return a failure HRESULT
+ // Otherwise, we return S_OK but m_pStream will stay being NULL
+ hrCDH = MarshalIUnknownToStreamCallback2(&smd);
+
+ spStream = smd.m_pStream;
+ smd.m_pStream = NULL;
+
+ CheckForFuncEvalAbort(hrCDH);
+
+#ifdef MDA_SUPPORTED
+ if (FAILED(hrCDH))
+ {
+ MDA_TRIGGER_ASSISTANT(DisconnectedContext, ReportViolationDisconnected(m_pCtxCookie, hrCDH));
+ }
+ else if(spStream == NULL)
+ {
+ MDA_TRIGGER_ASSISTANT(NotMarshalable, ReportViolation());
+ }
+#endif
+
+ // If the interface is not marshalable or if we failed to
+ // enter the context, then we don't have any choice but to
+ // use the raw IP.
+ if (spStream == NULL)
+ {
+ // Give out this IUnknown we are holding
+ pUnk = GetRawIUnknown_NoAddRef();
+
+ RCW_VTABLEPTR(GetRCW());
+ ULONG cbRef = SafeAddRefPreemp(pUnk);
+
+ LogInteropAddRef(pUnk, cbRef, "UnmarshalIUnknownForCurrContext handing out raw IUnknown");
+ }
+ else
+ {
+ // we got control for this entry
+ // GetInterface for the current context
+ HRESULT hr;
+ hr = CoUnmarshalInterface(spStream, IID_IUnknown, reinterpret_cast<void**>(&pUnk));
+ spStream.Release();
+
+ if (FAILED(hr))
+ {
+ CheckForFuncEvalAbort(hr);
+
+ // Give out this IUnknown we are holding
+ pUnk = GetRawIUnknown_NoAddRef();
+
+ RCW_VTABLEPTR(GetRCW());
+ ULONG cbRef = SafeAddRefPreemp(pUnk);
+
+ LogInteropAddRef(pUnk, cbRef, "UnmarshalIUnknownForCurrContext handing out raw IUnknown");
+ }
+ }
+
+ RETURN pUnk;
+}
+
+//================================================================
+// Callback called to marshal the IUnknown into a stream lazily.
+HRESULT IUnkEntry::MarshalIUnknownToStreamCallback(LPVOID pData)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pData));
+ PRECONDITION(g_fProcessDetach == FALSE);
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ IUnkEntry *pUnkEntry = (IUnkEntry *)pData;
+
+ // This should never be called during process detach.
+
+ hr = pUnkEntry->HRCheckValidIUnkEntry();
+ if (hr != S_OK)
+ {
+ // Interface not marshallable
+ // We'll know marshaling failed because m_pStream == NULL
+ return S_OK;
+ }
+
+ LPVOID pCurrentCtxCookie = GetCurrentCtxCookie();
+ _ASSERTE(pCurrentCtxCookie);
+
+ if (pCurrentCtxCookie == pUnkEntry->m_pCtxCookie)
+ {
+ // We are in the right context marshal the IUnknown to the
+ // stream directly.
+ hr = pUnkEntry->MarshalIUnknownToStream();
+ }
+ else
+ {
+ _ASSERTE(pUnkEntry->GetCtxEntry() != NULL);
+
+ // Transition into the context to marshal the IUnknown to
+ // the stream.
+ hr = pUnkEntry->GetCtxEntry()->EnterContext(MarshalIUnknownToStreamCallback, pUnkEntry);
+ }
+
+ return hr;
+}
+
+//================================================================
+// Helper function to determine if a COM component aggregates the
+// FTM.
+bool IUnkEntry::IsComponentFreeThreaded(IUnknown *pUnk)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ }
+ CONTRACTL_END;
+
+ // First see if the object implements the IAgileObject marker interface
+ SafeComHolderPreemp<IAgileObject> pAgileObject;
+ HRESULT hr = SafeQueryInterfacePreemp(pUnk, IID_IAgileObject, (IUnknown**)&pAgileObject);
+ LogInteropQI(pUnk, IID_IAgileObject, hr, "IUnkEntry::IsComponentFreeThreaded: QI for IAgileObject");
+
+ if (SUCCEEDED(hr))
+ {
+ return true;
+ }
+ else
+ {
+ SafeComHolderPreemp<IMarshal> pMarshal = NULL;
+
+ // If not, then we can try to determine if the component agregates the FTM via IMarshal.
+ hr = SafeQueryInterfacePreemp(pUnk, IID_IMarshal, (IUnknown **)&pMarshal);
+ LogInteropQI(pUnk, IID_IMarshal, hr, "IUnkEntry::IsComponentFreeThreaded: QI for IMarshal");
+ if (SUCCEEDED(hr))
+ {
+ CLSID clsid;
+
+ // The COM component implements IMarshal so we now check to see if the un-marshal class
+ // for this IMarshal is the FTM's un-marshaler.
+ hr = pMarshal->GetUnmarshalClass(IID_IUnknown, NULL, MSHCTX_INPROC, NULL, MSHLFLAGS_NORMAL, &clsid);
+ if (SUCCEEDED(hr) && clsid == CLSID_InProcFreeMarshaler)
+ {
+ // The un-marshaler is indeed the unmarshaler for the FTM so this object
+ // is free threaded.
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+//================================================================
+// Helper function to marshal the IUnknown pointer to the stream.
+HRESULT IUnkEntry::MarshalIUnknownToStream()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ // This must always be called in the right context.
+ PRECONDITION(m_pCtxCookie == GetCurrentCtxCookie());
+ }
+ CONTRACTL_END;
+
+ IStream *pStream = NULL;
+
+ GCX_PREEMP();
+
+ HRESULT hr = S_OK;
+
+ // ensure we register this cookie
+ IUnknown *pUnk = m_pUnknown;
+ if (pUnk == (IUnknown *)0xBADF00D)
+ {
+ hr = COR_E_INVALIDCOMOBJECT;
+ }
+ else
+ {
+ hr = wCoMarshalInterThreadInterfaceInStream(IID_IUnknown, pUnk, &pStream);
+
+ if ((hr == REGDB_E_IIDNOTREG) ||
+ (hr == E_FAIL) ||
+ (hr == E_NOINTERFACE) ||
+ (hr == E_INVALIDARG) ||
+ (hr == E_UNEXPECTED))
+ {
+ // Interface is not marshallable.
+ pStream = NULL;
+ hr = S_OK;
+ }
+ }
+
+ // Try to set the stream in the IUnkEntry. If another thread already set it,
+ // then we need to release the stream we just set up.
+ if (FastInterlockCompareExchangePointer(&m_pStream, pStream, NULL) != NULL)
+ SafeReleaseStream(pStream);
+
+ return hr;
+}
+
+
+// Method to try to start updating the entry.
+bool IUnkEntry::TryUpdateEntry()
+{
+ WRAPPER_NO_CONTRACT;
+
+ CtxEntry *pOldEntry = m_pCtxEntry;
+ if (((DWORD_PTR)pOldEntry & 1) == 0)
+ {
+ CtxEntry *pNewEntry = (CtxEntry *)((DWORD_PTR)pOldEntry | 1);
+ return (InterlockedExchangeT(&m_pCtxEntry, pNewEntry) == pOldEntry);
+ }
+ return false;
+}
+
+// Method to end updating the entry.
+VOID IUnkEntry::EndUpdateEntry()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ CtxEntry *pOldEntry = m_pCtxEntry;
+
+ // we should hold the lock
+ _ASSERTE(((DWORD_PTR)pOldEntry & 1) == 1);
+
+ CtxEntry *pNewEntry = (CtxEntry *)((DWORD_PTR)pOldEntry & ~1);
+
+ // and it's us who resets the bit
+ VERIFY(InterlockedExchangeT(&m_pCtxEntry, pNewEntry) == pOldEntry);
+}
+
+
+#ifdef MDA_SUPPORTED
+
+// Default to a 60 second timeout
+#define MDA_CONTEXT_SWITCH_DEADLOCK_TIMEOUT 60000
+#define MDA_CONTEXT_SWITCH_DEADLOCK_ITERATION_COUNT 1000
+
+struct MDAContextSwitchDeadlockArgs
+{
+ CLREvent* hEvent;
+ LPVOID OriginContext;
+ LPVOID DestinationContext;
+};
+
+DWORD WINAPI MDAContextSwitchDeadlockThreadProc(LPVOID lpParameter)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(MDA_GET_ASSISTANT(ContextSwitchDeadlock)));
+ }
+ CONTRACTL_END;
+
+ // We need to ensure a thread object has been set up since we will toggle to cooperative GC mode
+ // inside the wait loop.
+ Thread *pThread = SetupThreadNoThrow();
+ if (pThread == NULL)
+ {
+ // If we failed to allocate the thread object, we will skip running the watchdog thread
+ // and simply return.
+ return 0;
+ }
+
+ DWORD retval = 0;
+ NewHolder<MDAContextSwitchDeadlockArgs> args = (MDAContextSwitchDeadlockArgs*)lpParameter;
+
+ // This interesting piece of code allows us to avoid firing the MDA when a process is
+ // being debugged while the context transition is in progress. It is needed because
+ // CLREvent::Wait will timeout after the specified amount of wall time, even if the
+ // process is broken into the debugger for a portion of the time. By splitting the
+ // wait into a bunch of smaller waits, we allow many more step/continue operations to
+ // occur before we signal the timeout.
+ for (int i = 0; i < MDA_CONTEXT_SWITCH_DEADLOCK_ITERATION_COUNT; i++)
+ {
+ retval = args->hEvent->Wait(MDA_CONTEXT_SWITCH_DEADLOCK_TIMEOUT / MDA_CONTEXT_SWITCH_DEADLOCK_ITERATION_COUNT, FALSE);
+ if (retval != WAIT_TIMEOUT)
+ break;
+
+ // Transition to cooperative GC mode and back. This will allow us to stop the timeout
+ // if we are broken in while managed only debugging.
+ {
+ GCX_COOP();
+ }
+ }
+
+ if (retval == WAIT_TIMEOUT)
+ {
+ // We didn't transition into the context within the alloted timeout period.
+ // We'll fire the mda and close the event, but we can't delete is as the
+ // thread may still complete the transition and attempt to signal the event.
+ // So we'll just leak it and let the transition thread recognize that the
+ // event is no longer valid so it can simply delete it.
+ MDA_TRIGGER_ASSISTANT(ContextSwitchDeadlock, ReportDeadlock(args->OriginContext, args->DestinationContext));
+
+ args->hEvent->CloseEvent();
+ return 1;
+ }
+
+ delete args->hEvent;
+
+ return 0;
+}
+
+
+void QueueMDAThread(CtxEntryEnterContextCallbackData* data)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(data->m_hTimeoutEvent));
+ }
+ CONTRACTL_END;
+
+ MDAContextSwitchDeadlockArgs* args = NULL;
+
+ EX_TRY
+ {
+ args = new MDAContextSwitchDeadlockArgs;
+
+ args->OriginContext = GetCurrentCtxCookie();
+ args->DestinationContext = data->m_pCtxCookie;
+ args->hEvent = data->m_hTimeoutEvent;
+
+ // Will execute in the Default AppDomain
+ ThreadpoolMgr::QueueUserWorkItem(MDAContextSwitchDeadlockThreadProc, (LPVOID)args, WT_EXECUTELONGFUNCTION);
+ }
+ EX_CATCH
+ {
+ delete data->m_hTimeoutEvent;
+ data->m_hTimeoutEvent = NULL;
+
+ if (args)
+ delete args;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+#endif // MDA_SUPPORTED
+
+
+// Initialize the entry, returns true on success (i.e. the entry was free).
+bool InterfaceEntry::Init(MethodTable* pMT, IUnknown* pUnk)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // It is important the fields be set in this order.
+ if (InterlockedCompareExchangeT(&m_pUnknown, pUnk, NULL) == NULL)
+ {
+ m_pMT = (IE_METHODTABLE_PTR)pMT;
+ return true;
+ }
+ return false;
+}
+
+// Helper to determine if the entry is free.
+BOOL InterfaceEntry::IsFree()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pUnknown.Load() == NULL;
+}
+
+void InterfaceEntry::Free()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // We use the m_pUnknown field to synchronize access to the entry so that's the only
+ // one we need to reset. After all, the set of interfaces that the object is known to
+ // support is one of the most important debugging cues so let's keep m_pMT intact.
+ m_pUnknown.Store(NULL);
+}
+
+//================================================================
+// Constructor for the context entry.
+CtxEntry::CtxEntry(LPVOID pCtxCookie, Thread *pSTAThread)
+: m_pCtxCookie(pCtxCookie)
+, m_pObjCtx(NULL)
+, m_dwRefCount(0)
+, m_pSTAThread(pSTAThread)
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+//================================================================
+// Destructor for the context entry.
+CtxEntry::~CtxEntry()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(m_dwRefCount == 0);
+ }
+ CONTRACTL_END;
+
+ // If the context is a valid context then release it.
+ if (m_pObjCtx && !g_fProcessDetach)
+ {
+ SafeRelease(m_pObjCtx);
+ m_pObjCtx = NULL;
+ }
+
+ // Set the context cookie to 0xBADF00D to indicate the current context
+ // has been deleted.
+ m_pCtxCookie = (LPVOID)0xBADF00D;
+}
+
+//================================================================
+// Initialization method for the context entry.
+VOID CtxEntry::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+
+ // Make sure COM has been started
+ PRECONDITION(g_fComStarted == TRUE);
+ }
+ CONTRACTL_END;
+
+ // Retrieve the IObjectContext.
+ HRESULT hr = GetCurrentObjCtx(&m_pObjCtx);
+
+ // In case the call to GetCurrentObjCtx fails (which should never really happen)
+ // we will throw an exception.
+ if (FAILED(hr))
+ COMPlusThrowHR(hr);
+}
+
+
+// Add a reference to the CtxEntry.
+DWORD CtxEntry::AddRef()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ULONG cbRef = FastInterlockIncrement((LONG*)&m_dwRefCount);
+ LOG((LF_INTEROP, LL_INFO100, "CtxEntry::Addref %8.8x with %d\n", this, cbRef));
+ return cbRef;
+}
+
+
+//================================================================
+// Method to decrement the ref count of the context entry.
+DWORD CtxEntry::Release()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(m_dwRefCount > 0);
+ }
+ CONTRACTL_END;
+
+ LPVOID pCtxCookie = m_pCtxCookie;
+
+ LONG cbRef = FastInterlockDecrement((LONG*)&m_dwRefCount);
+ LOG((LF_INTEROP, LL_INFO100, "CtxEntry::Release %8.8x with %d\n", this, cbRef));
+
+ // If the ref count falls to 0, try and delete the ctx entry.
+ // This might not end up deleting it if another thread tries to
+ // retrieve this ctx entry at the same time this one tries
+ // to delete it.
+ if (cbRef == 0)
+ CtxEntryCache::GetCtxEntryCache()->TryDeleteCtxEntry(pCtxCookie);
+
+ // WARNING: The this pointer cannot be used at this point.
+ return cbRef;
+}
+
+//================================================================
+// Method to transition into the context and call the callback
+// from within the context.
+HRESULT CtxEntry::EnterContext(PFNCTXCALLBACK pCallbackFunc, LPVOID pData)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(pCallbackFunc));
+ PRECONDITION(CheckPointer(pData));
+ // This should not be called if the this context is the current context.
+ PRECONDITION(m_pCtxCookie != GetCurrentCtxCookie());
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // If we are in process detach, we cannot safely try to enter another context
+ // since we don't know if OLE32 is still loaded.
+ if (g_fProcessDetach)
+ {
+ LOG((LF_INTEROP, LL_INFO100, "Entering into context 0x08%x has failed since we are in process detach\n", m_pCtxCookie));
+ return RPC_E_DISCONNECTED;
+ }
+
+ // Make sure we are in preemptive GC mode before we call out to COM.
+ GCX_PREEMP();
+
+ // Prepare the information struct passed into the callback.
+ CtxEntryEnterContextCallbackData CallbackInfo;
+ CallbackInfo.m_pUserCallbackFunc = pCallbackFunc;
+ CallbackInfo.m_pUserData = pData;
+ CallbackInfo.m_pCtxCookie = m_pCtxCookie;
+ CallbackInfo.m_UserCallbackHR = E_FAIL;
+#ifdef MDA_SUPPORTED
+ CallbackInfo.m_hTimeoutEvent = NULL;
+
+ MdaContextSwitchDeadlock* mda = MDA_GET_ASSISTANT(ContextSwitchDeadlock);
+ if (mda)
+ {
+ EX_TRY
+ {
+ CallbackInfo.m_hTimeoutEvent = new CLREvent();
+ CallbackInfo.m_hTimeoutEvent->CreateAutoEvent(FALSE);
+ }
+ EX_CATCH
+ {
+ if (CallbackInfo.m_hTimeoutEvent)
+ {
+ delete CallbackInfo.m_hTimeoutEvent;
+ CallbackInfo.m_hTimeoutEvent = NULL;
+ }
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+#endif // MDA_SUPPORTED
+
+ // Retrieve the IContextCallback interface from the IObjectContext.
+ SafeComHolderPreemp<IContextCallback> pCallback;
+ hr = SafeQueryInterfacePreemp(m_pObjCtx, IID_IContextCallback, (IUnknown**)&pCallback);
+ LogInteropQI(m_pObjCtx, IID_IContextCallback, hr, "QI for IID_IContextCallback");
+ _ASSERTE(SUCCEEDED(hr) && pCallback);
+
+ // Setup the callback data structure with the callback Args
+ ComCallData callBackData;
+ callBackData.dwDispid = 0;
+ callBackData.dwReserved = 0;
+ callBackData.pUserDefined = &CallbackInfo;
+
+#ifdef MDA_SUPPORTED
+ // Make sure we don't deadlock when trying to enter the context.
+ if (mda && CallbackInfo.m_hTimeoutEvent)
+ {
+ QueueMDAThread(&CallbackInfo);
+ }
+#endif
+
+ EX_TRY
+ {
+ LeaveRuntimeHolder lrHolder(**(size_t**)(IContextCallback*)pCallback);
+ hr = ((IContextCallback*)pCallback)->ContextCallback(EnterContextCallback, &callBackData, IID_IEnterActivityWithNoLock, 2, NULL);
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (FAILED(hr))
+ {
+ // If the transition failed because of an aborted func eval, simply propagate
+ // the HRESULT/IErrorInfo back to the caller as we cannot throw here.
+ SafeComHolder<IErrorInfo> pErrorInfo = CheckForFuncEvalAbortNoThrow(hr);
+ if (pErrorInfo != NULL)
+ {
+ LOG((LF_INTEROP, LL_INFO100, "Entering into context 0x08X has failed since the debugger is blocking it\n", m_pCtxCookie));
+
+ // put the IErrorInfo back
+ {
+ LeaveRuntimeHolderNoThrow lrh((size_t)SetErrorInfo);
+ if (SUCCEEDED(lrh.GetHR()))
+ {
+ SetErrorInfo(0, pErrorInfo);
+ }
+ }
+ }
+ else
+ {
+ // The context is disconnected so we cannot transition into it.
+ LOG((LF_INTEROP, LL_INFO100, "Entering into context 0x08X has failed since the context has disconnected\n", m_pCtxCookie));
+ }
+ }
+
+ return hr;
+}
+
+
+//================================================================
+// Callback function called by DoCallback.
+HRESULT __stdcall CtxEntry::EnterContextCallback(ComCallData* pComCallData)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_NOT_MAINLINE;
+ PRECONDITION(CheckPointer(pComCallData));
+ }
+ CONTRACTL_END;
+
+ // Retrieve the callback data.
+ CtxEntryEnterContextCallbackData *pData = (CtxEntryEnterContextCallbackData*)pComCallData->pUserDefined;
+
+
+#ifdef MDA_SUPPORTED
+ // If active, signal the MDA watcher so we don't accidentally trigger a timeout.
+ MdaContextSwitchDeadlock* mda = MDA_GET_ASSISTANT(ContextSwitchDeadlock);
+ if (mda)
+ {
+ // If our watchdog worker is still waiting on us, the event will be valid.
+ if (pData->m_hTimeoutEvent->IsValid())
+ {
+ pData->m_hTimeoutEvent->Set();
+ }
+ else
+ {
+ // If we did timeout, we will have already cleaned up the event...just delete it now.
+ delete pData->m_hTimeoutEvent;
+ }
+ }
+#endif // MDA_SUPPORTED
+
+ Thread *pThread = GetThread();
+
+ // Make sure the thread has been set before we call the user callback function.
+ if (!pThread)
+ {
+ // huh! we are in the middle of shutdown
+ // and there is no way we can add a new thread
+ // so let us just return RPC_E_DISCONNECTED
+ // look at the pCallBack->DoCallback above
+ // to see why we are returning this SCODE
+ if(g_fEEShutDown)
+ return RPC_E_DISCONNECTED;
+
+ // Otherwise, we need to create a managed thread object for this new thread
+ else
+ {
+ HRESULT hr;
+ pThread = SetupThreadNoThrow(&hr);
+ if (pThread == NULL)
+ return hr;
+ }
+ }
+
+ // at this point we should be in the right context on NT4,
+ // if not then it is possible that the actual apartment state for this
+ // thread has changed and we have stale info in our thread or the CtxEntry
+ LPVOID pCtxCookie = GetCurrentCtxCookie();
+ _ASSERTE(pCtxCookie);
+ if (pData->m_pCtxCookie != pCtxCookie)
+ return RPC_E_DISCONNECTED;
+
+ // Call the user callback function and store the return value the
+ // callback data.
+ pData->m_UserCallbackHR = pData->m_pUserCallbackFunc(pData->m_pUserData);
+
+ // Return S_OK to indicate the context transition was successfull.
+ return S_OK;
+}
diff --git a/src/vm/comcache.h b/src/vm/comcache.h
new file mode 100644
index 0000000000..77d3e26860
--- /dev/null
+++ b/src/vm/comcache.h
@@ -0,0 +1,308 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ComCache.h
+//
+
+//
+// Classes/Structures used to represent and store info on COM interfaces and contexts.
+
+
+#ifndef _H_COMCACHE
+#define _H_COMCACHE
+
+#ifndef FEATURE_COMINTEROP
+#error FEATURE_COMINTEROP is required for this file
+#endif // FEATURE_COMINTEROP
+
+#include "contxt.h"
+#include "ctxtcall.h"
+
+//================================================================
+// Forward declarations.
+class CtxEntryCache;
+class CtxEntry;
+class Thread;
+
+//================================================================
+// OLE32 helpers.
+HRESULT wCoMarshalInterThreadInterfaceInStream(REFIID riid, LPUNKNOWN pUnk, LPSTREAM* ppStm);
+STDAPI_(LPSTREAM) CreateMemStm(DWORD cb, BYTE** ppBuf);
+
+
+typedef DPTR(CtxEntry) PTR_CtxEntry;
+
+//==============================================================
+// An entry representing a COM+ 1.0 context or an appartment.
+class CtxEntry
+{
+ // The CtxEntryCache needs to be able to see the internals
+ // of the CtxEntry.
+ friend CtxEntryCache;
+
+ // NewHolder<CtxEntry> needs to be able to call the destructor of CtxEntry.
+ // DISABLE Warning C4396, the inline specifier cannot be used when a friend declaration refers to a specialization of a function template
+#pragma warning(push) // store original warning levels
+#pragma warning(disable: 4396)
+ friend void Delete<CtxEntry>(CtxEntry *);
+#pragma warning(pop) // restore original warning levels
+
+
+private:
+ // Disallow creation and deletion of the CtxEntries.
+ CtxEntry(LPVOID pCtxCookie, Thread* pSTAThread);
+ ~CtxEntry();
+
+ // Initialization method called from the CtxEntryCache.
+ VOID Init();
+
+public:
+ // Add a reference to the CtxEntry.
+ DWORD AddRef();
+
+ // Release a reference to the CtxEntry.
+ DWORD Release();
+
+ // Function to enter the context. The specified callback function will
+ // be called from within the context.
+ HRESULT EnterContext(PFNCTXCALLBACK pCallbackFunc, LPVOID pData);
+
+ // Accessor for the context cookie.
+ LPVOID GetCtxCookie()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pCtxCookie;
+ }
+
+ // Accessor for the STA thread.
+ Thread* GetSTAThread()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pSTAThread;
+ }
+
+private:
+ // Callback function called by DoCallback.
+ static HRESULT __stdcall EnterContextCallback(ComCallData* pData);
+
+ LPVOID m_pCtxCookie; // The OPAQUE context cookie.
+ IUnknown* m_pObjCtx; // The object context interface.
+ DWORD m_dwRefCount; // The ref count.
+ Thread* m_pSTAThread; // STA thread associated with the context, if any
+};
+
+//==============================================================
+// IUnkEntry: represent a single COM component
+struct IUnkEntry
+{
+ // The context entry needs to be a friend to be able to call InitSpecial.
+ friend CtxEntry;
+ // RCW need to access IUnkEntry
+ friend RCW;
+
+#ifdef _DEBUG
+ // Does not throw if m_pUnknown is no longer valid, debug only.
+ IUnknown *GetRawIUnknown_NoAddRef_NoThrow()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_pUnknown != NULL && m_pUnknown != (IUnknown*)0xBADF00D);
+
+ return m_pUnknown;
+ }
+#endif // _DEBUG
+
+ IUnknown *GetRawIUnknown_NoAddRef()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ IUnknown *pUnk = m_pUnknown;
+#ifndef DACCESS_COMPILE
+ if (pUnk == (IUnknown *)0xBADF00D)
+ {
+ // All callers of this method had checked the pUnk before so this must be a race.
+ COMPlusThrow(kInvalidComObjectException, IDS_EE_COM_OBJECT_RELEASE_RACE);
+ }
+#endif // !DACCESS_COMPILE
+
+ return pUnk;
+ }
+
+ LPVOID GetCtxCookie()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pCtxCookie;
+ }
+
+ // Is the RCW disconnected from its COM object?
+ inline bool IsDisconnected()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_pUnknown == (IUnknown*)0xBADF00D ||
+ (GetCtxEntry() != NULL && m_pCtxCookie != GetCtxEntry()->GetCtxCookie()));
+ }
+
+
+private :
+ // Initialize the entry, returns true if we are in an STA.
+ // We assert inside Init that this IUnkEntry is indeed within a RCW
+ void Init(IUnknown* pUnk, BOOL bIsFreeThreaded, Thread *pThread DEBUGARG(RCW *pRCW));
+
+ // Release the interface pointer held by the IUnkEntry.
+ VOID ReleaseInterface(RCW *pRCW);
+
+ // Free the IUnknown entry. ReleaseInterface must have been called.
+ VOID Free();
+
+ // Get the RCW associated with this IUnkEntry
+ // We assert inside Init that this IUnkEntry is indeed within a RCW
+ RCW *GetRCW();
+
+ // Get IUnknown for the current context from IUnkEntry
+ IUnknown* GetIUnknownForCurrContext(bool fNoAddRef);
+
+ // Unmarshal IUnknown for the current context from IUnkEntry
+ IUnknown* UnmarshalIUnknownForCurrContext();
+
+ // Release the stream. This will force UnmarshalIUnknownForCurrContext to transition
+ // into the context that owns the IP and re-marshal it to the stream.
+ void ReleaseStream();
+
+ // Indicates if the COM component being wrapped by the IUnkEntry aggregates the FTM
+ inline bool IsFreeThreaded();
+
+ // Indicates if the COM component being wrapped by the IUnkEntry implements INoMashal.
+ inline bool IsMarshalingInhibited();
+
+ VOID CheckValidIUnkEntry();
+
+ HRESULT HRCheckValidIUnkEntry();
+
+ // Unmarshal IUnknown for the current context if the lock is held
+ IUnknown* UnmarshalIUnknownForCurrContextHelper();
+
+ // Fix for if the lock is held that works on a stack allocated stream
+ // instead of the member variable stream
+ static HRESULT MarshalIUnknownToStreamCallback2(LPVOID pData);
+
+ // Callback called to marshal the IUnknown into a stream lazily.
+ static HRESULT MarshalIUnknownToStreamCallback(LPVOID pData);
+
+ // Helper function called from MarshalIUnknownToStreamCallback.
+ HRESULT MarshalIUnknownToStream();
+
+ // Method to try and start updating the the entry.
+ bool TryUpdateEntry();
+
+ // Method to end updating the entry.
+ VOID EndUpdateEntry();
+
+ // Helper function to determine if a COM component aggregates the FTM.
+ static bool IsComponentFreeThreaded(IUnknown *pUnk);
+
+ inline PTR_CtxEntry GetCtxEntry()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ PTR_CtxEntry pCtxEntry = dac_cast<PTR_CtxEntry>(dac_cast<TADDR>(m_pCtxEntry) & ~1);
+
+ return pCtxEntry;
+ }
+
+ // Context cookie at the point where we acquired the interface pointer
+ LPVOID m_pCtxCookie;
+
+ // Context entry representing the context where we acquired the interface pointer.
+ // We use the lowest bit for synchronization and we rely on the fact that the
+ // context itself (the rest of the bits) does not change throughout the lifetime
+ // of this object.
+ PTR_CtxEntry m_pCtxEntry;
+
+ // IUnknown interface
+ IUnknown* m_pUnknown;
+
+ // IStream used for marshalling
+ IStream* m_pStream;
+};
+
+// Don't use this directly as the methodtable could have been released
+// by an AD Unload.
+typedef MethodTable* IE_METHODTABLE_PTR;
+
+//==============================================================
+// Interface Entry represents a single COM IP
+struct InterfaceEntry
+{
+ // Initialize the entry, returns true on success (i.e. the entry was free).
+ bool Init(MethodTable* pMT, IUnknown* pUnk);
+
+ // Helper to determine if the entry is free.
+ BOOL IsFree();
+
+ // Mark the entry as free.
+ void Free();
+
+ // Member of the entry. These must be volatile so the compiler
+ // will not try and optimize reads and writes to them.
+ Volatile<IE_METHODTABLE_PTR> m_pMT; // Interface asked for
+ Volatile<IUnknown*> m_pUnknown; // Result of query
+};
+
+class CtxEntryCacheTraits : public DefaultSHashTraits<CtxEntry *>
+{
+public:
+ typedef LPVOID key_t;
+ static CtxEntry *Null() { LIMITED_METHOD_CONTRACT; return NULL; }
+ static bool IsNull(CtxEntry *e) { LIMITED_METHOD_CONTRACT; return (e == NULL); }
+ static const LPVOID GetKey(CtxEntry *e) { LIMITED_METHOD_CONTRACT; return e->GetCtxCookie(); }
+ static count_t Hash(LPVOID key_t) { LIMITED_METHOD_CONTRACT; return (count_t) key_t; }
+ static BOOL Equals(LPVOID lhs, LPVOID rhs) { LIMITED_METHOD_CONTRACT; return (lhs == rhs); }
+ static CtxEntry *Deleted() { LIMITED_METHOD_CONTRACT; return (CtxEntry *)-1; }
+ static bool IsDeleted(CtxEntry *e) { LIMITED_METHOD_CONTRACT; return e == (CtxEntry *)-1; }
+};
+
+//==============================================================
+// The cache of context entries.
+class CtxEntryCache
+{
+ // The CtxEntry needs to be able to call some of the private
+ // method of the CtxEntryCache.
+ friend CtxEntry;
+
+private:
+ // Disallow creation and deletion of the CtxEntryCache.
+ CtxEntryCache();
+ ~CtxEntryCache();
+
+public:
+ // Static initialization routine for the CtxEntryCache.
+ static VOID Init();
+
+ // Static accessor for the one and only instance of the CtxEntryCache.
+ static CtxEntryCache *GetCtxEntryCache();
+
+ // Method to retrieve/create a CtxEntry for the specified context cookie.
+ CtxEntry *FindCtxEntry(LPVOID pCtxCookie, Thread *pSTAThread);
+
+private:
+ CtxEntry * CreateCtxEntry(LPVOID pCtxCookie, Thread * pSTAThread);
+
+ // Helper function called from the CtxEntry.
+ void TryDeleteCtxEntry(LPVOID pCtxCookie);
+
+ SHash<CtxEntryCacheTraits> m_CtxEntryHash;
+
+ // spin lock for fast synchronization
+ SpinLock m_Lock;
+
+ // The one and only instance for the context entry cache.
+ static CtxEntryCache* s_pCtxEntryCache;
+};
+
+#endif
diff --git a/src/vm/comcallablewrapper.cpp b/src/vm/comcallablewrapper.cpp
new file mode 100644
index 0000000000..059234f153
--- /dev/null
+++ b/src/vm/comcallablewrapper.cpp
@@ -0,0 +1,6770 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//--------------------------------------------------------------------------
+// ComCallablewrapper.cpp
+//
+// Implementation for various Wrapper classes
+//
+// COMWrapper : COM callable wrappers for CLR interfaces
+//
+
+//--------------------------------------------------------------------------
+
+
+#include "common.h"
+#include "clrtypes.h"
+
+#include "comcallablewrapper.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+
+#include "object.h"
+#include "field.h"
+#include "method.hpp"
+#include "class.h"
+#include "runtimecallablewrapper.h"
+#include "olevariant.h"
+#include "cachelinealloc.h"
+#include "threads.h"
+#include "ceemain.h"
+#include "excep.h"
+#include "stublink.h"
+#include "cgensys.h"
+#include "comtoclrcall.h"
+#include "clrtocomcall.h"
+#include "objecthandle.h"
+#include "comutilnative.h"
+#include "eeconfig.h"
+#include "interoputil.h"
+#include "dispex.h"
+#include "perfcounters.h"
+#include "guidfromname.h"
+#include "security.h"
+#include "comconnectionpoints.h"
+#include <objsafe.h> // IID_IObjctSafe
+#include "virtualcallstub.h"
+#include "contractimpl.h"
+#include "caparser.h"
+#include "appdomain.inl"
+#include "rcwwalker.h"
+#include "windowsruntimebufferhelper.h"
+#include "winrttypenameconverter.h"
+
+#ifdef MDA_SUPPORTED
+const int DEBUG_AssertSlots = 50;
+#endif
+
+// The enum that describes the value of the IDispatchImplAttribute custom attribute.
+enum IDispatchImplType
+{
+ SystemDefinedImpl = 0,
+ InternalImpl = 1,
+ CompatibleImpl = 2
+};
+
+// The enum that describe the value of System.Runtime.InteropServices.CustomQueryInterfaceResult
+// It is the return value of the method System.Runtime.InteropServices.ICustomQueryInterface.GetInterface
+enum CustomQueryInterfaceResult
+{
+ Handled = 0,
+ NotHandled = 1,
+ Failed = 2
+};
+
+typedef CQuickArray<MethodTable*> CQuickEEClassPtrs;
+
+// Startup and shutdown lock
+CrstStatic g_CreateWrapperTemplateCrst;
+
+
+// This is the prestub that is used for Com calls entering COM+
+extern "C" VOID ComCallPreStub();
+
+class NewCCWHolderBase : public HolderBase<ComCallWrapper *>
+{
+
+protected:
+ NewCCWHolderBase(ComCallWrapper *pValue)
+ : HolderBase<ComCallWrapper *>(pValue)
+ {
+ }
+
+ // BaseHolder only initialize BASE with one parameter, so I had to
+ // use a separate function to set the cache which will be used in the release
+ void SetCache(ComCallWrapperCache *pCache)
+ {
+ m_pCache = pCache;
+ }
+
+ void DoAcquire()
+ {
+ // Do nothing
+ }
+
+ void DoRelease()
+ {
+ this->m_value->FreeWrapper(m_pCache);
+ }
+
+
+private :
+ ComCallWrapperCache *m_pCache;
+};
+
+typedef ComCallWrapper *ComCallWrapperPtr;
+
+// This is used to hold a newly created CCW. It will release the CCW (and linked wrappers)
+// upon exit, if SuppressRelease() is not called. It doesn't try to release the SimpleComCallWrapper
+// or destroy the handle
+// I need to use BaseHolder instead of BaseWrapper because BaseHolder allows me to use a class as BASE
+//
+class NewCCWHolder : public BaseHolder<ComCallWrapperPtr, NewCCWHolderBase>
+{
+public :
+ NewCCWHolder(ComCallWrapperCache *pCache)
+ {
+ SetCache(pCache);
+ }
+
+ ComCallWrapperPtr& operator=(ComCallWrapperPtr p)
+ {
+ Assign(p);
+ return m_value;
+ }
+
+ FORCEINLINE const ComCallWrapperPtr &operator->()
+ {
+ return this->m_value;
+ }
+
+ operator ComCallWrapperPtr()
+ {
+ return m_value;
+ }
+};
+
+// Calls Destruct on ComCallMethodDesc's in an array - used as backout code when laying out ComMethodTable.
+void DestructComCallMethodDescs(ArrayList *pDescArray)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ ArrayList::Iterator i = pDescArray->Iterate();
+ while (i.Next())
+ {
+ ComCallMethodDesc *pCMD = (ComCallMethodDesc *)i.GetElement();
+ pCMD->Destruct();
+ }
+}
+
+typedef Wrapper<ArrayList *, DoNothing<ArrayList *>, DestructComCallMethodDescs> ComCallMethodDescArrayHolder;
+
+//--------------------------------------------------------------------------
+// IsDuplicateClassItfMD(MethodDesc *pMD, unsigned int ix)
+// Determines if the specified method desc is a duplicate.
+// Note that this method should only be called to determine duplicates on
+// the class interface.
+//--------------------------------------------------------------------------
+bool IsDuplicateClassItfMD(MethodDesc *pMD, unsigned int ix)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ if (!pMD->IsDuplicate())
+ return false;
+ if (pMD->GetSlot() == ix)
+ return false;
+
+ return true;
+}
+
+//--------------------------------------------------------------------------
+// IsDuplicateClassItfMD(MethodDesc *pMD, unsigned int ix)
+// Determines if the specified method desc is a duplicate.
+// Note that this method should only be called to determine duplicates on
+// the class interface.
+//--------------------------------------------------------------------------
+bool IsDuplicateClassItfMD(InteropMethodTableSlotData *pInteropMD, unsigned int ix)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pInteropMD));
+ }
+ CONTRACTL_END;
+
+ if (!pInteropMD->IsDuplicate())
+ return false;
+ if (pInteropMD->GetSlot() == ix)
+ return false;
+
+ return true;
+}
+
+bool IsOverloadedComVisibleMember(MethodDesc *pMD, MethodDesc *pParentMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(CheckPointer(pParentMD));
+ }
+ CONTRACTL_END;
+
+ // Array methods should never be exposed to COM.
+ if (pMD->IsArray())
+ return FALSE;
+
+ // If this is the same MethodDesc, then it is not an overload at all
+ if (pMD == pParentMD)
+ return FALSE;
+
+ // If the new member is not visible from COM then it isn't an overloaded public member.
+ if (!IsMethodVisibleFromCom(pMD))
+ return FALSE;
+
+ // If the old member is visible from COM then the new one is not a public overload.
+ if (IsMethodVisibleFromCom(pParentMD))
+ return FALSE;
+
+ // The new member is a COM visible overload of a non COM visible member.
+ return TRUE;
+}
+
+bool IsNewComVisibleMember(MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ // Array methods should never be exposed to COM.
+ if (pMD->IsArray())
+ return FALSE;
+
+ // Check to see if the member is visible from COM.
+ return IsMethodVisibleFromCom(pMD) ? true : false;
+}
+
+bool IsStrictlyUnboxed(MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ MethodTable *pMT = pMD->GetMethodTable();
+
+ MethodTable::MethodIterator it(pMT);
+ for (; it.IsValid(); it.Next()) {
+ MethodDesc *pCurrMD = it.GetMethodDesc();
+ if (pCurrMD->GetMemberDef() == pMD->GetMemberDef())
+ return false;
+ }
+
+ return true;
+}
+
+void FillInComVtableSlot(SLOT* pComVtable, // must point to the first slot after the "extra slots" (e.g. IUnknown/IDispatch slots)
+ UINT uComSlot, // must be relative to pComVtable
+ ComCallMethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pComVtable));
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ pComVtable[uComSlot] = (SLOT)(((BYTE*)pMD - COMMETHOD_CALL_PRESTUB_SIZE)ARM_ONLY(+THUMB_CODE));
+}
+
+
+
+ComCallMethodDesc* ComMethodTable::ComCallMethodDescFromSlot(unsigned i)
+{
+ CONTRACT(ComCallMethodDesc*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+ ComCallMethodDesc* pCMD = NULL;
+
+ SLOT* rgVtable = (SLOT*)((ComMethodTable *)this+1);
+
+// NOTE: make sure to keep this in sync with FillInComVtableSlot
+ pCMD = (ComCallMethodDesc*)(((BYTE *)rgVtable[i]) + COMMETHOD_CALL_PRESTUB_SIZE ARM_ONLY(-THUMB_CODE));
+
+ RETURN pCMD;
+}
+
+//--------------------------------------------------------------------------
+// Determines if the Compatible IDispatch implementation is required for
+// the specified class.
+//--------------------------------------------------------------------------
+bool IsOleAutDispImplRequiredForClass(MethodTable *pClass)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pClass));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ const BYTE * pVal;
+ ULONG cbVal;
+ Assembly * pAssembly = pClass->GetAssembly();
+ IDispatchImplType DispImplType = SystemDefinedImpl;
+
+ if (pClass->IsWinRTObjectType() || pClass->IsExportedToWinRT())
+ {
+ // IDispatch is not supported in WinRT
+ return false;
+ }
+
+ // First check for the IDispatchImplType custom attribute first.
+ hr = pClass->GetMDImport()->GetCustomAttributeByName(pClass->GetCl(), INTEROP_IDISPATCHIMPL_TYPE, (const void**)&pVal, &cbVal);
+ if (hr == S_OK)
+ {
+ CustomAttributeParser cap(pVal, cbVal);
+ IfFailThrow(cap.SkipProlog());
+ UINT8 u1;
+ IfFailThrow(cap.GetU1(&u1));
+
+ DispImplType = (IDispatchImplType)u1;
+ if ((DispImplType > 2) || (DispImplType < 0))
+ DispImplType = SystemDefinedImpl;
+ }
+
+ // If the custom attribute was set to something other than system defined then we will use that.
+ if (DispImplType != SystemDefinedImpl)
+ return (bool) (DispImplType == CompatibleImpl);
+
+ // Check to see if the assembly has the IDispatchImplType attribute set.
+ hr = pAssembly->GetManifestImport()->GetCustomAttributeByName(pAssembly->GetManifestToken(), INTEROP_IDISPATCHIMPL_TYPE, (const void**)&pVal, &cbVal);
+ if (hr == S_OK)
+ {
+ CustomAttributeParser cap(pVal, cbVal);
+ IfFailThrow(cap.SkipProlog());
+ UINT8 u1;
+ IfFailThrow(cap.GetU1(&u1));
+
+ DispImplType = (IDispatchImplType)u1;
+ if ((DispImplType > 2) || (DispImplType < 0))
+ DispImplType = SystemDefinedImpl;
+ }
+
+ // If the custom attribute was set to something other than system defined then we will use that.
+ if (DispImplType != SystemDefinedImpl)
+ return (bool) (DispImplType == CompatibleImpl);
+
+ // Removed registry key check per reg cleanup bug 45978
+ // Effect: Will return false so code cleanup
+ return false;
+}
+
+MethodTable* RefineProxy(OBJECTREF pServer)
+{
+ CONTRACT (MethodTable*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ MethodTable* pRefinedClass = NULL;
+
+#ifdef FEATURE_REMOTING
+ GCPROTECT_BEGIN(pServer);
+ if (pServer->IsTransparentProxy())
+ {
+ // if we have a transparent proxy let us refine it fully
+ // before giving it out to unmanaged code
+ REFLECTCLASSBASEREF refClass= CRemotingServices::GetClass(pServer);
+ pRefinedClass = refClass->GetType().GetMethodTable();
+ }
+ GCPROTECT_END();
+#endif
+
+ RETURN pRefinedClass;
+}
+
+//--------------------------------------------------------------------------
+// This routine is called anytime a com method is invoked for the first time.
+// It is responsible for generating the real stub.
+//
+// This function's only caller is the ComPreStub.
+//
+// For the duration of the prestub, the current Frame on the stack
+// will be a PrestubMethodFrame (which derives from FramedMethodFrame.)
+// Hence, things such as exceptions and gc will work normally.
+//
+// On rare occasions, the ComPrestub may get called twice because two
+// threads try to call the same method simultaneously.
+//--------------------------------------------------------------------------
+extern "C" PCODE ComPreStubWorker(ComPrestubMethodFrame *pPFrame, UINT64 *pErrorReturn)
+{
+ CONTRACT (PCODE)
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ ENTRY_POINT;
+ PRECONDITION(CheckPointer(pPFrame));
+ PRECONDITION(CheckPointer(pErrorReturn));
+ }
+ CONTRACT_END;
+
+ HRESULT hr = S_OK;
+ PCODE retAddr = NULL;
+
+ BEGIN_ENTRYPOINT_VOIDRET;
+
+ PCODE pStub = NULL;
+ BOOL fNonTransientExceptionThrown = FALSE;
+
+ ComCallMethodDesc *pCMD = pPFrame->GetComCallMethodDesc();
+ IUnknown *pUnk = *(IUnknown **)pPFrame->GetPointerToArguments();
+
+ OBJECTREF pThrowable = NULL;
+
+ if (!CanRunManagedCode())
+ {
+ hr = E_PROCESS_SHUTDOWN_REENTRY;
+ }
+ else
+ {
+ Thread* pThread = SetupThreadNoThrow();
+ if (pThread == NULL)
+ {
+ hr = E_OUTOFMEMORY;
+ }
+ else
+ {
+ // Transition to cooperative GC mode before we start setting up the stub.
+ GCX_COOP();
+
+ // The PreStub allocates memory for the frame, but doesn't link it
+ // into the chain or fully initialize it. Do so now.
+ pPFrame->Init();
+ pPFrame->Push();
+
+ ComCallWrapper *pWrap = NULL;
+
+ GCPROTECT_BEGIN(pThrowable)
+ {
+ // We need a try/catch around the code to enter the domain since entering
+ // an AppDomain can throw an exception.
+ EX_TRY
+ {
+ // check for invalid wrappers in the debug build
+ // in the retail all bets are off
+ pWrap = ComCallWrapper::GetWrapperFromIP(pUnk);
+ _ASSERTE(pWrap->IsWrapperActive() || pWrap->IsAggregated());
+
+ // Make sure we're not trying to call on the class interface of a class with ComVisible(false) members
+ // in its hierarchy.
+ if ((pCMD->IsFieldCall()) || (NULL == pCMD->GetInterfaceMethodDesc() && !pCMD->GetMethodDesc()->IsInterface()))
+ {
+ // If we have a fieldcall or a null interface MD, we could be dealing with the IClassX interface.
+ ComMethodTable* pComMT = ComMethodTable::ComMethodTableFromIP(pUnk);
+ pComMT->CheckParentComVisibility(FALSE);
+ }
+
+ ENTER_DOMAIN_ID_PREDICATED(pWrap->GetDomainID(), !!pWrap->NeedToSwitchDomains(pThread))
+ {
+ OBJECTREF pADThrowable = NULL;
+
+ BOOL fExceptionThrown = FALSE;
+
+ GCPROTECT_BEGIN(pADThrowable);
+ {
+ if (pCMD->IsMethodCall())
+ {
+ // We need to ensure all valuetypes are loaded in
+ // the target domain so that GC can happen later
+
+ EX_TRY
+ {
+ MethodDesc* pTargetMD = pCMD->GetMethodDesc();
+ MetaSig::EnsureSigValueTypesLoaded(pTargetMD);
+
+ if (pCMD->IsWinRTCtor() || pCMD->IsWinRTStatic() || pCMD->IsWinRTRedirectedMethod())
+ {
+ // Activation, static method invocation, and call through a redirected interface may be the first
+ // managed code that runs in the module. Fully load it here so we don't have to call EnsureInstanceActive
+ // on every activation/static call.
+ pTargetMD->GetMethodTable()->EnsureInstanceActive();
+ }
+ }
+ EX_CATCH
+ {
+ pADThrowable = GET_THROWABLE();
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+ }
+
+ if (pADThrowable != NULL)
+ {
+ // Transform the exception into an HRESULT. This also sets up
+ // an IErrorInfo on the current thread for the exception.
+ hr = SetupErrorInfo(pADThrowable, pCMD);
+ pADThrowable = NULL;
+ fExceptionThrown = TRUE;
+ }
+ }
+ GCPROTECT_END();
+
+ if(!fExceptionThrown)
+ {
+ GCPROTECT_BEGIN(pADThrowable);
+ {
+ // We need a try/catch around the call to the worker since we need
+ // to transform any exceptions into HRESULTs. We want to do this
+ // inside the AppDomain of the CCW.
+ EX_TRY
+ {
+ GCX_PREEMP();
+ pStub = ComCall::GetComCallMethodStub(pCMD);
+ }
+ EX_CATCH
+ {
+ fNonTransientExceptionThrown = !GET_EXCEPTION()->IsTransient();
+ pADThrowable = GET_THROWABLE();
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ if (pADThrowable != NULL)
+ {
+#ifdef MDA_SUPPORTED
+ if (fNonTransientExceptionThrown)
+ {
+ MDA_TRIGGER_ASSISTANT(InvalidMemberDeclaration, ReportViolation(pCMD, &pADThrowable));
+ }
+#endif
+
+ // Transform the exception into an HRESULT. This also sets up
+ // an IErrorInfo on the current thread for the exception.
+ hr = SetupErrorInfo(pADThrowable, pCMD);
+ pADThrowable = NULL;
+ }
+ }
+ GCPROTECT_END();
+ }
+ }
+ END_DOMAIN_TRANSITION;
+ }
+ EX_CATCH
+ {
+ pThrowable = GET_THROWABLE();
+
+ // If an exception was thrown while transitionning back to the original
+ // AppDomain then can't use the stub and must report an error.
+ pStub = NULL;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (pThrowable != NULL)
+ {
+ // Transform the exception into an HRESULT. This also sets up
+ // an IErrorInfo on the current thread for the exception.
+ hr = SetupErrorInfo(pThrowable, pCMD);
+ pThrowable = NULL;
+ }
+ }
+ GCPROTECT_END();
+
+ // Unlink the PrestubMethodFrame.
+ pPFrame->Pop();
+
+ if (pStub)
+ {
+ // Now, replace the prestub with the new stub.
+ static_assert((COMMETHOD_CALL_PRESTUB_SIZE - COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET) % DATA_ALIGNMENT == 0,
+ "The call target in COM prestub must be aligned so we can guarantee atomicity of updates");
+
+ UINT_PTR* ppofs = (UINT_PTR*) (((BYTE*)pCMD) - COMMETHOD_CALL_PRESTUB_SIZE + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET);
+
+ *ppofs = ((UINT_PTR)pStub
+#ifdef _TARGET_X86_
+ - (size_t)pCMD
+#endif
+ );
+
+ // Return the address of the prepad. The prepad will regenerate the hidden parameter and due
+ // to the update above will execute the new stub code the second time around.
+ retAddr = (PCODE)(((BYTE*)pCMD - COMMETHOD_CALL_PRESTUB_SIZE)ARM_ONLY(+THUMB_CODE));
+
+ goto Exit;
+ }
+ }
+ }
+
+ // We failed to set up the stub so we need to report an error to the caller.
+ //
+ // IMPORTANT: No floating point operations can occur after this point!
+ //
+ *pErrorReturn = 0;
+ if (pCMD->IsNativeHResultRetVal())
+ *pErrorReturn = hr;
+ else if (pCMD->IsNativeBoolRetVal())
+ *pErrorReturn = 0;
+ else if (pCMD->IsNativeR4RetVal())
+ setFPReturn(4, CLR_NAN_32);
+ else if (pCMD->IsNativeR8RetVal())
+ setFPReturn(8, CLR_NAN_64);
+ else
+ _ASSERTE(pCMD->IsNativeVoidRetVal());
+
+#ifdef _TARGET_X86_
+ // Number of bytes to pop is upper half of the return value on x86
+ *(((INT32 *)pErrorReturn) + 1) = pCMD->GetNumStackBytes();
+#endif
+
+ retAddr = NULL;
+
+Exit:
+
+ END_ENTRYPOINT_VOIDRET;
+
+ RETURN retAddr;
+}
+
+FORCEINLINE void CPListRelease(CQuickArray<ConnectionPoint*>* value)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (value)
+ {
+ // Delete all the connection points.
+ for (UINT i = 0; i < value->Size(); i++)
+ delete (*value)[i];
+
+ // Delete the list itself.
+ delete value;
+ }
+}
+
+typedef CQuickArray<ConnectionPoint*> CPArray;
+
+FORCEINLINE void CPListDoNothing(CPArray*)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+class CPListHolder : public Wrapper<CPArray*, CPListDoNothing, CPListRelease, NULL>
+{
+public:
+ CPListHolder(CPArray* p = NULL)
+ : Wrapper<CPArray*, CPListDoNothing, CPListRelease, NULL>(p)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ FORCEINLINE void operator=(CPArray* p)
+ {
+ WRAPPER_NO_CONTRACT;
+ Wrapper<CPArray*, CPListDoNothing, CPListRelease, NULL>::operator=(p);
+ }
+};
+
+
+WeakReferenceImpl::WeakReferenceImpl(SimpleComCallWrapper *pSimpleWrapper, Thread *pCurrentThread)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(pCurrentThread == GetThread());
+ }
+ CONTRACTL_END;
+
+ //
+ // Create a short weak handle in the current domain and use it to track the lifetime of the object in this domain
+ // It is a short weak handle so that we can avoid client calling into a object that will be/is being/has been finalized
+ // We DONOT use the appdomain of the CCW because the object could be domain-agile and could bleed through
+ // appdomain boundary. In that case, the object becomes a different object from user's perspective, and the fact
+ // that it is the same object is just an optimization. Therefore, we always create a new WeakReferenceImpl
+ // instance based on the current domain, as if we were deaing with a copy of the object.
+ //
+ AppDomain *pDomain = pCurrentThread->GetDomain();
+
+ m_adid = pDomain->GetId();
+ m_pContext = pCurrentThread->GetContext();
+
+ {
+ GCX_COOP_THREAD_EXISTS(pCurrentThread);
+ m_ppObject = pDomain->CreateShortWeakHandle(pSimpleWrapper->GetObjectRef());
+ }
+
+ // Start with ref count = 1
+ AddRef();
+}
+
+WeakReferenceImpl::~WeakReferenceImpl()
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Ignore the HR. Cleanup must return HR though (due to BEGIN_EXTERNAL_ENTRYPOINT)
+ Cleanup();
+}
+
+HRESULT WeakReferenceImpl::Cleanup()
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ //
+ // Destroy the handle if the AppDomain is still there
+ // The AppDomain is the domain where this WeakReferenceImpl is created
+ //
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ AppDomainFromIDHolder ad(m_adid, TRUE);
+
+ if (!ad.IsUnloaded())
+ DestroyShortWeakHandle(m_ppObject);
+
+ m_ppObject = NULL;
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return S_OK;
+}
+
+struct WeakReferenceResolveCallbackArgs
+{
+ WeakReferenceImpl *pThis;
+ Thread *pThread;
+ GUID iid;
+ IInspectable **ppvObject;
+ HRESULT *pHR;
+};
+
+HRESULT STDMETHODCALLTYPE WeakReferenceImpl::Resolve(REFIID riid, IInspectable **ppvObject)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ if (ppvObject == NULL)
+ return E_INVALIDARG;
+
+ *ppvObject = NULL;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ Thread *pThread = GET_THREAD();
+
+ WeakReferenceResolveCallbackArgs args = { this, pThread, riid, ppvObject, &hr };
+
+ //
+ // Transition to the right domain
+ // WeakReference is bound to the domain where this WeakReference is created, so we must
+ // transition to the domain of WeakReference, not the domain of the CCW, as they might be different
+ // if the CCW is agile.
+ //
+ if (pThread->GetDomain()->GetId() == m_adid)
+ {
+ Resolve_Callback(&args);
+ }
+ else
+ {
+ GCX_COOP_THREAD_EXISTS(pThread);
+
+ pThread->DoContextCallBack(
+ m_adid,
+ m_pContext,
+ (Context::ADCallBackFcnType)Resolve_Callback_SwitchToPreemp,
+ (LPVOID)&args);
+ }
+
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+void WeakReferenceImpl::Resolve_Callback(LPVOID lpData)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(lpData));
+ }
+ CONTRACTL_END;
+
+ WeakReferenceResolveCallbackArgs *lpArgs = reinterpret_cast<WeakReferenceResolveCallbackArgs *>(lpData);
+
+ *(lpArgs->pHR) = lpArgs->pThis->ResolveInternal(lpArgs->pThread, lpArgs->iid, lpArgs->ppvObject);
+}
+
+void WeakReferenceImpl::Resolve_Callback_SwitchToPreemp(LPVOID lpData)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(lpData));
+ }
+ CONTRACTL_END;
+
+ WeakReferenceResolveCallbackArgs *lpArgs = reinterpret_cast<WeakReferenceResolveCallbackArgs *>(lpData);
+
+ GCX_PREEMP_THREAD_EXISTS(lpArgs->pThread);
+
+ Resolve_Callback(lpData);
+}
+
+//
+// Resolving WeakReference into a IInspectable*
+// Must be called in the right domain where this WeakReference is created
+//
+HRESULT WeakReferenceImpl::ResolveInternal(Thread *pThread, REFIID riid, IInspectable **ppvObject)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ppvObject));
+ PRECONDITION(AppDomain::GetCurrentDomain()->GetId() == m_adid);
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ SafeComHolder<IUnknown> pUnk;
+
+ {
+ GCX_COOP_THREAD_EXISTS(pThread);
+
+ OBJECTREF refTarget = NULL;
+ GCPROTECT_BEGIN_THREAD(pThread, refTarget);
+ refTarget = ObjectFromHandle(m_ppObject);
+ if (refTarget != NULL)
+ {
+ //
+ // Retrieve the wrapper
+ //
+ // NOTE: Even though the object is alive, the old CCW (where you create the weakreference)
+ // could be gone if :
+ // 1. the object is domain-agile (for example, string), and
+ // 2. the domain A where the object used to live is unloaded, and
+ // 3. the domain B where we create WeakReferenceImpl is a in a different domain
+ //
+ // In the above case, the object is alive, and the CCW in domain A is neutered,
+ // and InlineGetWrapper creates a new CCW
+ // This means, you might get a different IUnknown* identity with Resolve in this case,
+ // but this has always been the case since whidbey. If we were to fix the identity
+ // problem, we need to make sure:
+ // 1. We hand out different CCW for each domain (instead of having "agile" CCWs), and
+ // 2. per-Domain SyncBlockInfo on SyncBlock
+ //
+ CCWHolder pWrap = ComCallWrapper::InlineGetWrapper(&refTarget);
+
+ //
+ // Retrieve the pUnk pointer and AddRef
+ //
+ pUnk = pWrap->GetBasicIP();
+ }
+ GCPROTECT_END();
+ }
+
+ if (pUnk != NULL)
+ {
+ hr = Unknown_QueryInterface(pUnk, riid, (void **)ppvObject);
+ }
+
+ return hr;
+
+}
+
+NOINLINE void LogCCWRefCountChange_BREAKPOINT(ComCallWrapper *pCCW)
+{
+ LIMITED_METHOD_CONTRACT;
+ // Empty function to put breakpoint on when debugging CCW ref-counting issues.
+ // At this point *(pCCW->m_ppThis) is the managed object wrapped by the CCW.
+
+ // Bogus code so the function is not optimized away.
+ if (pCCW == NULL)
+ DebugBreak();
+}
+
+void SimpleComCallWrapper::BuildRefCountLogMessage(LPCWSTR wszOperation, StackSString &ssMessage, ULONG dwEstimatedRefCount)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ GCX_COOP();
+
+ // There's no way how to get the class name if the AD is unloaded. We will just skip it if it
+ // is the case. Note that we do log the AD unload event in SimpleComCallWrapper::Neuter so there
+ // should still be enough useful information in the log.
+ AppDomainFromIDHolder ad(GetRawDomainID(), TRUE);
+ if (!ad.IsUnloaded())
+ {
+ LPCUTF8 pszClassName;
+ LPCUTF8 pszNamespace;
+ if (SUCCEEDED(m_pMT->GetMDImport()->GetNameOfTypeDef(m_pMT->GetCl(), &pszClassName, &pszNamespace)))
+ {
+ OBJECTHANDLE handle = GetMainWrapper()->GetRawObjectHandle();
+ Object* obj = NULL;
+ if (handle != NULL)
+ obj = OBJECTREFToObject(ObjectFromHandle(handle));
+
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, CCWRefCountChange))
+ FireEtwCCWRefCountChange(handle, obj, this, dwEstimatedRefCount, (LONGLONG) ad.GetAddress(),
+ pszClassName, pszNamespace, wszOperation, GetClrInstanceId());
+
+ if (g_pConfig->ShouldLogCCWRefCountChange(pszClassName, pszNamespace))
+ {
+ EX_TRY
+ {
+ StackSString ssClassName;
+ TypeString::AppendType(ssClassName, TypeHandle(m_pMT));
+
+ ssMessage.Printf(W("LogCCWRefCountChange[%s]: '%s', Object=poi(%p)"),
+ wszOperation, // %s operation
+ ssClassName.GetUnicode(), // %s type name
+ handle); // %p Object
+ }
+ EX_CATCH
+ { }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+ }
+ }
+}
+
+// static
+void SimpleComCallWrapper::LogRefCount(ComCallWrapper *pWrap, StackSString &ssMessage, ULONG dwRefCountToLog)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!ssMessage.IsEmpty())
+ {
+ EX_TRY
+ {
+ ssMessage.AppendPrintf(W(", RefCount=%u\n"), dwRefCountToLog);
+ WszOutputDebugString(ssMessage.GetUnicode());
+ }
+ EX_CATCH
+ { }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ LogCCWRefCountChange_BREAKPOINT(pWrap);
+ }
+}
+
+LONGLONG SimpleComCallWrapper::ReleaseImplWithLogging(LONGLONG * pRefCount)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ LONGLONG newRefCount;
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), goto NoLog );
+
+ StackSString ssMessage;
+ ComCallWrapper *pWrap = GetMainWrapper();
+ BuildRefCountLogMessage(W("Release"), ssMessage, GET_EXT_COM_REF(READ_REF(*pRefCount)-1));
+
+ // Decrement the ref count
+ newRefCount = ::InterlockedDecrement64(pRefCount);
+
+ LogRefCount(pWrap, ssMessage, GET_EXT_COM_REF(newRefCount));
+
+ END_SO_INTOLERANT_CODE;
+ return newRefCount;
+
+#ifdef FEATURE_STACK_PROBE // this code is unreachable if FEATURE_STACK_PROBE is not defined
+NoLog:
+ // Decrement the ref count
+ return ::InterlockedDecrement64(pRefCount);
+#endif // FEATURE_STACK_PROBE
+}
+
+
+//--------------------------------------------------------------------------
+// simple ComCallWrapper for all simple std interfaces, that are not used very often
+// like IProvideClassInfo, ISupportsErrorInfo etc.
+//--------------------------------------------------------------------------
+SimpleComCallWrapper::SimpleComCallWrapper()
+{
+ WRAPPER_NO_CONTRACT;
+
+ memset(this, 0, sizeof(SimpleComCallWrapper));
+}
+
+//--------------------------------------------------------------------------
+// VOID SimpleComCallWrapper::Cleanup()
+//--------------------------------------------------------------------------
+VOID SimpleComCallWrapper::Cleanup()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // in case the caller stills holds on to the IP
+ for (int i = 0; i < enum_LastStdVtable; i++)
+ {
+ m_rgpVtable[i] = 0;
+ }
+
+ m_pWrap = NULL;
+ m_pMT = NULL;
+
+ if (HasOverlappedRef())
+ {
+ if (m_operlappedPtr)
+ {
+ WindowsRuntimeBufferHelper::ReleaseOverlapped(m_operlappedPtr);
+ m_operlappedPtr = NULL;
+ }
+ UnMarkOverlappedRef();
+ }
+ else
+ {
+ if (m_pCPList) // enum_HasOverlappedRef
+ {
+ for (UINT i = 0; i < m_pCPList->Size(); i++)
+ delete (*m_pCPList)[i];
+
+ delete m_pCPList;
+ m_pCPList = NULL;
+ }
+ }
+
+ // if this object was made agile, then we will have stashed away the original handle
+ // so we must release it if the AD wasn't unloaded
+ if (IsAgile() && m_hOrigDomainHandle)
+ {
+ // the domain which the original handle belongs to might be already unloaded
+ if (GetRawDomainID()==::GetAppDomain()->GetId())
+ DestroyRefcountedHandle(m_hOrigDomainHandle);
+ else
+ {
+ GCX_COOP();
+ {
+ AppDomainFromIDHolder ad(GetRawDomainID(), TRUE);
+ if (!ad.IsUnloaded())
+ DestroyRefcountedHandle(m_hOrigDomainHandle);
+ }
+ }
+ m_hOrigDomainHandle = NULL;
+ }
+
+ if (m_pTemplate)
+ {
+ m_pTemplate->Release();
+ m_pTemplate = NULL;
+ }
+
+ if (m_pAuxData)
+ {
+ delete m_pAuxData;
+ m_pAuxData = NULL;
+ }
+}
+
+
+VOID SimpleComCallWrapper::Neuter(bool fSkipHandleCleanup)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(m_pSyncBlock));
+ PRECONDITION(!IsNeutered());
+ // fSkipHandleCleanup only safe under AppX process because there are no appDomains
+ PRECONDITION(!(fSkipHandleCleanup && !AppX::IsAppXProcess()));
+ }
+ CONTRACTL_END;
+
+ STRESS_LOG1 (LF_INTEROP, LL_INFO100, "Neutering CCW 0x%p\n", this->GetMainWrapper());
+
+ // Disconnect the object from the CCW
+ // Starting now, if this object gets passed out
+ // to unmanaged code, it will create a new CCW tied
+ // to the domain it was passed out from.
+ InteropSyncBlockInfo* pInteropInfo = m_pSyncBlock->GetInteropInfoNoCreate();
+ if (pInteropInfo)
+ pInteropInfo->SetCCW(NULL);
+
+ // NULL the syncblock entry - we can't hang onto this anymore as the syncblock will be killed asynchronously to us.
+ ResetSyncBlock();
+
+ if (!fSkipHandleCleanup)
+ {
+ // Disconnect the CCW from the object
+ // Calls made on this CCW will no longer succeed.
+ // The CCW has been neutered.
+ // do this for each of the CCWs
+ m_pWrap->Neuter();
+ }
+
+ // NULL the context, we shall only use m_dwDomainId from this point on
+ m_pContext = NULL;
+
+ // NULL the handles of DispatchMemberInfo if the AppDomain host them has been unloaded
+ DispatchExInfo *pDispatchExInfo = GetDispatchExInfo();
+ if (pDispatchExInfo)
+ {
+ pDispatchExInfo->DestroyMemberInfoHandles();
+ }
+
+ StackSString ssMessage;
+ ComCallWrapper *pWrap = m_pWrap;
+ if (g_pConfig->LogCCWRefCountChangeEnabled())
+ {
+ BuildRefCountLogMessage(W("Neuter"), ssMessage, GET_EXT_COM_REF(READ_REF(m_llRefCount) | CLEANUP_SENTINEL));
+ }
+
+ // Set the neutered bit on the ref-count.
+ LONGLONG *pRefCount = &m_llRefCount;
+ LONGLONG oldRefCount = *pRefCount;
+ LONGLONG newRefCount = oldRefCount | CLEANUP_SENTINEL;
+ while (InterlockedCompareExchange64((LONGLONG*)pRefCount, newRefCount, oldRefCount) != oldRefCount)
+ {
+ oldRefCount = *pRefCount;
+ newRefCount = oldRefCount | CLEANUP_SENTINEL;
+ }
+
+ // IMPORTANT: Do not touch instance fields or any other data associated with the CCW beyond this
+ // point unless newRefCount equals CLEANUP_SENTINEL (it's the only case when we know that Release
+ // could not swoop in and destroy our data structures).
+
+ if (g_pConfig->LogCCWRefCountChangeEnabled())
+ {
+ LogRefCount(pWrap, ssMessage, GET_EXT_COM_REF(newRefCount));
+ }
+
+ // If we hit the sentinel value, it's our responsibility to clean up.
+ if (newRefCount == CLEANUP_SENTINEL)
+ m_pWrap->Cleanup();
+}
+
+//--------------------------------------------------------------------------
+//destructor
+//--------------------------------------------------------------------------
+SimpleComCallWrapper::~SimpleComCallWrapper()
+{
+ WRAPPER_NO_CONTRACT;
+
+ Cleanup();
+}
+
+//--------------------------------------------------------------------------
+// Creates a simple wrapper off the process heap (thus avoiding any debug
+// memory tracking) and initializes the memory to zero
+// static
+//--------------------------------------------------------------------------
+//
+SimpleComCallWrapper* SimpleComCallWrapper::CreateSimpleWrapper()
+{
+ CONTRACT (SimpleComCallWrapper*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ NewHolder<SimpleComCallWrapper> pWrapper = new SimpleComCallWrapper;
+ memset (pWrapper, 0, sizeof(SimpleComCallWrapper));
+
+ pWrapper.SuppressRelease();
+ RETURN pWrapper;
+}
+
+//--------------------------------------------------------------------------
+// Init, with the MethodTable, pointer to the vtable of the interface
+// and the main ComCallWrapper if the interface needs it
+//--------------------------------------------------------------------------
+void SimpleComCallWrapper::InitNew(OBJECTREF oref, ComCallWrapperCache *pWrapperCache, ComCallWrapper* pWrap,
+ ComCallWrapper *pClassWrap, Context *pContext, SyncBlock *pSyncBlock,
+ ComCallWrapperTemplate* pTemplate)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(oref != NULL);
+ PRECONDITION(CheckPointer(pWrap));
+ PRECONDITION(CheckPointer(pWrapperCache, NULL_OK));
+ PRECONDITION(CheckPointer(pContext));
+ PRECONDITION(CheckPointer(pSyncBlock, NULL_OK));
+ PRECONDITION(CheckPointer(pTemplate));
+ PRECONDITION(m_pSyncBlock == NULL);
+ PRECONDITION(CheckPointer(g_pExceptionClass));
+ }
+ CONTRACTL_END;
+
+ MethodTable* pMT = pTemplate->GetClassType().GetMethodTable();
+ PREFIX_ASSUME(pMT != NULL);
+
+#ifdef FEATURE_REMOTING
+ if (CRemotingServices::IsTransparentProxy(OBJECTREFToObject(oref)))
+ m_flags |= enum_IsObjectTP;
+#endif
+
+ m_pMT = pMT;
+ m_pWrap = pWrap;
+ m_pClassWrap = pClassWrap;
+ m_pWrapperCache = pWrapperCache;
+ m_pTemplate = pTemplate;
+ m_pTemplate->AddRef();
+
+ m_pOuter = NULL;
+
+ m_pSyncBlock = pSyncBlock;
+ m_pContext = pContext;
+ m_dwDomainId = pContext->GetDomain()->GetId();
+ m_hOrigDomainHandle = NULL;
+
+ //@TODO: CTS, when we transition into the correct context before creating a wrapper
+ // then uncomment the next line
+ //_ASSERTE(pContext == GetCurrentContext());
+
+ if (pMT->IsComObjectType())
+ m_flags |= enum_IsExtendsCom;
+
+#ifdef _DEBUG
+ for (int i = 0; i < enum_LastStdVtable; i++)
+ _ASSERTE(GetStdInterfaceKind((IUnknown*)(&g_rgStdVtables[i])) == i);
+#endif // _DEBUG
+
+ for (int i = 0; i < enum_LastStdVtable; i++)
+ m_rgpVtable[i] = g_rgStdVtables[i];
+
+ // If the managed object extends a COM base class then we need to set IProvideClassInfo
+ // to NULL until we determine if we need to use the IProvideClassInfo of the base class
+ // or the one of the managed class.
+ if (IsExtendsCOMObject())
+ m_rgpVtable[enum_IProvideClassInfo] = NULL;
+
+ // IStringable is valid only on classes that are exposed to WinRT.
+ m_rgpVtable[enum_IStringable] = NULL;
+
+ // IErrorInfo is valid only for exception classes
+ m_rgpVtable[enum_IErrorInfo] = NULL;
+
+ // IDispatchEx is valid only for classes that have expando capabilities.
+ m_rgpVtable[enum_IDispatchEx] = NULL;
+}
+
+//--------------------------------------------------------------------------
+// ReInit,with the new sync block and the urt context
+//--------------------------------------------------------------------------
+void SimpleComCallWrapper::ReInit(SyncBlock* pSyncBlock)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pSyncBlock));
+ }
+ CONTRACTL_END;
+
+ m_pSyncBlock = pSyncBlock;
+}
+
+//--------------------------------------------------------------------------
+// Returns TRUE if the ICustomQI implementation returns Handled or Failed for IID_IMarshal.
+//--------------------------------------------------------------------------
+BOOL SimpleComCallWrapper::CustomQIRespondsToIMarshal()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(GetComCallWrapperTemplate()->SupportsICustomQueryInterface());
+ }
+ CONTRACTL_END;
+
+ if ((m_flags & enum_CustomQIRespondsToIMarshal_Inited) == 0)
+ {
+ DWORD newFlags = enum_CustomQIRespondsToIMarshal_Inited;
+
+ SafeComHolder<IUnknown> pUnk;
+ if (ComCallWrapper::GetComIPFromCCW_HandleCustomQI(GetMainWrapper(), IID_IMarshal, NULL, &pUnk))
+ {
+ newFlags |= enum_CustomQIRespondsToIMarshal;
+ }
+ FastInterlockOr((ULONG *)&m_flags, newFlags);
+ }
+
+ return (m_flags & enum_CustomQIRespondsToIMarshal);
+}
+
+//--------------------------------------------------------------------------
+// Initializes the information used for exposing exceptions to COM.
+//--------------------------------------------------------------------------
+void SimpleComCallWrapper::InitExceptionInfo()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_rgpVtable[enum_IErrorInfo] = g_rgStdVtables[enum_IErrorInfo];
+}
+
+//--------------------------------------------------------------------------
+// Initializes the IDispatchEx information.
+//--------------------------------------------------------------------------
+void SimpleComCallWrapper::InitDispatchExInfo()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+
+ // Make sure the class supports at least IReflect..
+ PRECONDITION(SupportsIReflect(m_pMT));
+ }
+ CONTRACTL_END;
+
+ SimpleCCWAuxData *pAuxData = GetOrCreateAuxData();
+ if (pAuxData->m_pDispatchExInfo)
+ return;
+
+ // Create the DispatchExInfo object.
+ NewHolder<DispatchExInfo> pDispExInfo = new DispatchExInfo(this, m_pMT, SupportsIExpando(m_pMT));
+
+ // Synchronize the DispatchExInfo with the actual expando object.
+ pDispExInfo->SynchWithManagedView();
+
+ // Swap the lock into the class member in a thread safe manner.
+ if (NULL == FastInterlockCompareExchangePointer(&pAuxData->m_pDispatchExInfo, pDispExInfo.GetValue(), NULL))
+ pDispExInfo.SuppressRelease();
+
+ // Set the vtable entry to ensure that the next QI call will return immediately.
+ m_rgpVtable[enum_IDispatchEx] = g_rgStdVtables[enum_IDispatchEx];
+}
+
+void SimpleComCallWrapper::SetUpCPList()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ CQuickArray<MethodTable *> SrcItfList;
+
+ _ASSERTE(!HasOverlappedRef());
+
+ // If the list has already been set up, then return.
+ if (m_pCPList)
+ return;
+
+ // Retrieve the list of COM source interfaces for the managed class.
+ GetComSourceInterfacesForClass(m_pMT, SrcItfList);
+
+ // Call the helper to do the rest of the set up.
+ SetUpCPListHelper(SrcItfList.Ptr(), (int)SrcItfList.Size());
+}
+
+
+void SimpleComCallWrapper::SetUpCPListHelper(MethodTable **apSrcItfMTs, int cSrcItfs)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(apSrcItfMTs));
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!HasOverlappedRef());
+
+ CPListHolder pCPList = NULL;
+ ComCallWrapper *pWrap = GetMainWrapper();
+ int NumCPs = 0;
+
+ // Allocate the list of connection points.
+ pCPList = CreateCPArray();
+ pCPList->AllocThrows(cSrcItfs);
+
+ for (int i = 0; i < cSrcItfs; i++)
+ {
+ // Create a CP helper thru which CP operations will be done.
+ // Should we throw here instead of ignoring creation errors?
+ ConnectionPoint *pCP = TryCreateConnectionPoint(pWrap, apSrcItfMTs[i]);
+ if (pCP != NULL)
+ {
+ // Add the connection point to the list.
+ (*pCPList)[NumCPs++] = pCP;
+ }
+
+ }
+
+ // Now that we now the actual number of connection points we were
+ // able to hook up, resize the array.
+ pCPList->Shrink(NumCPs);
+
+ // Finally, we set the connection point list in the simple wrapper. If
+ // no other thread already set it, we set pCPList to NULL to indicate
+ // that ownership has been transfered to the simple wrapper.
+ if (InterlockedCompareExchangeT(&m_pCPList, pCPList.GetValue(), NULL) == NULL)
+ pCPList.SuppressRelease();
+}
+
+ConnectionPoint *SimpleComCallWrapper::TryCreateConnectionPoint(ComCallWrapper *pWrap, MethodTable *pEventMT)
+{
+ CONTRACT (ConnectionPoint*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pWrap));
+ PRECONDITION(CheckPointer(pEventMT));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ ConnectionPoint *pCP = NULL;
+
+ EX_TRY
+ {
+ pCP = CreateConnectionPoint(pWrap, pEventMT);
+ }
+ EX_CATCH
+ {
+ pCP = NULL;
+ }
+ EX_END_CATCH(RethrowTerminalExceptions)
+
+ RETURN pCP;
+}
+
+ConnectionPoint *SimpleComCallWrapper::CreateConnectionPoint(ComCallWrapper *pWrap, MethodTable *pEventMT)
+{
+ CONTRACT (ConnectionPoint*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pWrap));
+ PRECONDITION(CheckPointer(pEventMT));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN (new ConnectionPoint(pWrap, pEventMT));
+}
+
+CQuickArray<ConnectionPoint*> *SimpleComCallWrapper::CreateCPArray()
+{
+ CONTRACT (CQuickArray<ConnectionPoint*>*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN (new CQuickArray<ConnectionPoint*>());
+}
+
+//--------------------------------------------------------------------------
+// Returns TRUE if the simple wrapper represents a COM+ exception object.
+//--------------------------------------------------------------------------
+BOOL SimpleComCallWrapper::SupportsExceptions(MethodTable *pClass)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pClass, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ while (pClass != NULL)
+ {
+ if (pClass == g_pExceptionClass)
+ return TRUE;
+
+ pClass = pClass->GetComPlusParentMethodTable();
+ }
+ return FALSE;
+}
+
+//--------------------------------------------------------------------------
+// Returns TRUE if the pClass represents a class and is exposed to WinRT.
+//--------------------------------------------------------------------------
+BOOL SimpleComCallWrapper::SupportsIStringable(MethodTable *pClass)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pClass, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // Support IStringable if the Methodtable represents a class.
+ if (pClass != NULL
+ && IsTdClass(pClass->GetAttrClass())
+ )
+ {
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+//--------------------------------------------------------------------------
+// Returns TRUE if the COM+ object that this wrapper represents implements
+// IExpando.
+//--------------------------------------------------------------------------
+BOOL SimpleComCallWrapper::SupportsIReflect(MethodTable *pClass)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pClass));
+ }
+ CONTRACTL_END;
+
+ // We want to disallow passing out IDispatchEx for Type inheritors to close a security hole.
+ if (pClass == g_pRuntimeTypeClass)
+ return FALSE;
+
+ if (MscorlibBinder::IsClass(pClass, CLASS__CLASS_INTROSPECTION_ONLY))
+ return FALSE;
+
+ if (MscorlibBinder::IsClass(pClass, CLASS__TYPE_BUILDER))
+ return FALSE;
+
+ if (MscorlibBinder::IsClass(pClass, CLASS__TYPE))
+ return FALSE;
+
+ if (MscorlibBinder::IsClass(pClass, CLASS__ENUM_BUILDER))
+ return FALSE;
+
+ // Check to see if the MethodTable associated with the wrapper implements IExpando.
+ return pClass->ImplementsInterface(MscorlibBinder::GetClass(CLASS__IREFLECT));
+}
+
+//--------------------------------------------------------------------------
+// Returns TRUE if the COM+ object that this wrapper represents implements
+// IReflect.
+//--------------------------------------------------------------------------
+BOOL SimpleComCallWrapper::SupportsIExpando(MethodTable *pClass)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pClass));
+ }
+ CONTRACTL_END;
+
+ // Check to see if the MethodTable associated with the wrapper implements IExpando.
+ return pClass->ImplementsInterface(MscorlibBinder::GetClass(CLASS__IEXPANDO));
+}
+
+// NOINLINE to prevent RCWHolder from forcing caller to push/pop an FS:0 handler
+NOINLINE BOOL SimpleComCallWrapper::ShouldUseManagedIProvideClassInfo()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ BOOL bUseManagedIProvideClassInfo = TRUE;
+
+ // Retrieve the MethodTable of the wrapper.
+ ComCallWrapper *pMainWrap = GetMainWrapper();
+
+ // Only extensible RCW's should go down this code path.
+ _ASSERTE(pMainWrap->IsExtendsCOMObject());
+
+ MethodTable * pObjectMT = pMainWrap->GetSimpleWrapper()->GetMethodTable();
+ MethodTable * pMT = pObjectMT;
+
+ // Find the first COM visible IClassX starting at the bottom of the hierarchy and
+ // going up the inheritance chain.
+ while (pMT != NULL)
+ {
+ if (IsTypeVisibleFromCom(TypeHandle(pMT)))
+ break;
+ pMT = pMT->GetComPlusParentMethodTable();
+ }
+
+ // Since this is an extensible RCW if the CLR classes that derive from the COM component
+ // are not visible then we will give out the COM component's IProvideClassInfo.
+ if (pMT == NULL || pMT == g_pObjectClass)
+ {
+ SyncBlock* pSyncBlock = GetSyncBlock();
+ _ASSERTE(pSyncBlock);
+
+ RCWHolder pRCW(GetThread());
+ RCWPROTECT_BEGIN(pRCW, pSyncBlock);
+
+ bUseManagedIProvideClassInfo = !pRCW->SupportsIProvideClassInfo();
+
+ RCWPROTECT_END(pRCW);
+ }
+
+ // Object should always be visible if we return TRUE
+ _ASSERTE(!bUseManagedIProvideClassInfo || pMT != NULL);
+
+ return bUseManagedIProvideClassInfo;
+}
+
+
+// QI for well known interfaces from within the runtime direct fetch, instead of guid comparisons.
+// The returned interface is AddRef'd.
+IUnknown* SimpleComCallWrapper::QIStandardInterface(Enum_StdInterfaces index)
+{
+ CONTRACT (IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ // assert for valid index
+ PRECONDITION(index < enum_LastStdVtable);
+ INSTANCE_CHECK;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ IUnknown* pIntf = NULL;
+
+ if (m_rgpVtable[index] != NULL)
+ {
+ pIntf = (IUnknown*)&m_rgpVtable[index];
+ }
+ else if (index == enum_IProvideClassInfo)
+ {
+ // If we either have a visible managed part to the class or if the base class
+ // does not implement IProvideClassInfo then use the one on the managed class.
+ if (ShouldUseManagedIProvideClassInfo())
+ {
+ // Set up the vtable pointer so that next time we don't have to determine
+ // that the IProvideClassInfo is provided by the managed class.
+ m_rgpVtable[enum_IProvideClassInfo] = g_rgStdVtables[enum_IProvideClassInfo];
+
+ // Return the interface pointer to the standard IProvideClassInfo interface.
+ pIntf = (IUnknown*)&m_rgpVtable[enum_IProvideClassInfo];
+ }
+ }
+ else if (index == enum_IErrorInfo)
+ {
+ if (SupportsExceptions(m_pMT))
+ {
+ // Initialize the exception info before we return the interface.
+ InitExceptionInfo();
+ pIntf = (IUnknown*)&m_rgpVtable[enum_IErrorInfo];
+ }
+ }
+ else if(index == enum_IStringable)
+ {
+ if(SupportsIStringable(m_pMT))
+ {
+ // Set up the vtable pointer so that next time we don't have to determine
+ // that the IStringable is provided by the managed class.
+ m_rgpVtable[enum_IStringable] = g_rgStdVtables[enum_IStringable];
+
+ // Return the interface pointer to the standard IStringable interface.
+ pIntf = (IUnknown*)&m_rgpVtable[enum_IStringable];
+ }
+ }
+
+ else if (index == enum_IDispatchEx)
+ {
+#ifdef FEATURE_CORECLR
+ if (AppX::IsAppXProcess())
+ {
+ RETURN NULL;
+ }
+#endif // FEATURE_CORECLR
+
+ if (SupportsIReflect(m_pMT))
+ {
+ // Initialize the DispatchExInfo before we return the interface.
+ InitDispatchExInfo();
+ pIntf = (IUnknown*)&m_rgpVtable[enum_IDispatchEx];
+ }
+ }
+
+ // If we found what we were looking for, then AddRef the wrapper.
+ // Note that we don't do SafeAddRef(pIntf) because it's overkill to
+ // go via IUnknown when we already have the wrapper in-hand.
+ if (pIntf)
+ {
+ if (index == enum_InnerUnknown)
+ this->AddRef();
+ else
+ this->AddRefWithAggregationCheck();
+ }
+
+ RETURN pIntf;
+}
+
+#include <optsmallperfcritical.h> // improves CCW QI perf by ~10%
+
+#define IS_EQUAL_GUID(refguid,data1,data2,data3, data4,data5,data6,data7,data8,data9,data10,data11) \
+ ((((DWORD*)&refguid)[0] == (data1)) && \
+ (((DWORD*)&refguid)[1] == ((data3<<16)|data2)) && \
+ (((DWORD*)&refguid)[2] == ((data7<<24)|(data6<<16)|(data5<<8)|data4)) && \
+ (((DWORD*)&refguid)[3] == ((data11<<24)|(data10<<16)|(data9<<8)|data8))) \
+
+#define IS_EQUAL_GUID_LOW_12_BYTES(refguid,data1,data2,data3, data4,data5,data6,data7,data8,data9,data10,data11) \
+ ((((DWORD*)&refguid)[1] == ((data3<<16)|data2)) && \
+ (((DWORD*)&refguid)[2] == ((data7<<24)|(data6<<16)|(data5<<8)|data4)) && \
+ (((DWORD*)&refguid)[3] == ((data11<<24)|(data10<<16)|(data9<<8)|data8))) \
+
+#define HANDLE_IID_INLINE(itfEnum,data1,data2,data3, data4,data5,data6,data7,data8,data9,data10,data11) \
+ CASE_IID_INLINE(itfEnum,data1,data2,data3, data4,data5,data6,data7,data8,data9,data10,data11) \
+ { \
+ RETURN QIStandardInterface(itfEnum); \
+ } \
+ break; \
+
+#define CASE_IID_INLINE(itfEnum,data1,data2,data3, data4,data5,data6,data7,data8,data9,data10,data11) \
+ case data1: \
+ if (IS_EQUAL_GUID_LOW_12_BYTES(riid,data1,data2,data3, data4,data5,data6,data7,data8,data9,data10,data11)) \
+
+#define IS_KNOWN_INTERFACE_CONTRACT(iid) \
+ CONTRACT(bool) \
+ { \
+ MODE_ANY; \
+ NOTHROW; \
+ GC_NOTRIGGER; \
+ SO_TOLERANT; \
+ POSTCONDITION(RETVAL == !!IsEqualGUID(iid, riid)); \
+ } \
+ CONTRACT_END; \
+
+inline bool IsIUnknown(REFIID riid)
+{
+ IS_KNOWN_INTERFACE_CONTRACT(IID_IUnknown);
+ RETURN IS_EQUAL_GUID(riid, 0x00000000,0x0000,0x0000,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x46);
+}
+inline bool IsIInspectable(REFIID riid)
+{
+ IS_KNOWN_INTERFACE_CONTRACT(IID_IInspectable);
+ RETURN IS_EQUAL_GUID(riid, 0xAF86E2E0,0xB12D,0x4c6a,0x9C,0x5A,0xD7,0xAA,0x65,0x10,0x1E,0x90);
+}
+inline bool IsIDispatch(REFIID riid)
+{
+ IS_KNOWN_INTERFACE_CONTRACT(IID_IDispatch);
+ RETURN IS_EQUAL_GUID(riid, 0x00020400,0x0000,0x0000,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x46);
+}
+inline bool IsIManagedObject(REFIID riid)
+{
+ IS_KNOWN_INTERFACE_CONTRACT(IID_IManagedObject);
+ RETURN IS_EQUAL_GUID(riid, 0xC3FCC19E,0xA970,0x11D2,0x8B,0x5A,0x00,0xA0,0xC9,0xB7,0xC9,0xC4);
+}
+inline bool IsGUID_NULL(REFIID riid)
+{
+ IS_KNOWN_INTERFACE_CONTRACT(GUID_NULL);
+ RETURN IS_EQUAL_GUID(riid, 0x00000000,0x0000,0x0000,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00);
+}
+inline bool IsIAgileObject(REFIID riid)
+{
+ IS_KNOWN_INTERFACE_CONTRACT(IID_IAgileObject);
+ RETURN IS_EQUAL_GUID(riid, 0x94ea2b94,0xe9cc,0x49e0,0xc0,0xff,0xee,0x64,0xca,0x8f,0x5b,0x90);
+}
+inline bool IsIErrorInfo(REFIID riid)
+{
+ IS_KNOWN_INTERFACE_CONTRACT(IID_IErrorInfo);
+ RETURN IS_EQUAL_GUID(riid, 0x1CF2B120,0x547D,0x101B,0x8E,0x65,0x08,0x00,0x2B,0x2B,0xD1,0x19);
+}
+
+// QI for well known interfaces from within the runtime based on an IID.
+IUnknown* SimpleComCallWrapper::QIStandardInterface(REFIID riid)
+{
+ CONTRACT (IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INSTANCE_CHECK;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ // IID_IMarshal 00000003-0000-0000-C000-000000000046
+ // IID_IWeakReferenceSource 00000038-0000-0000-C000-000000000046
+ // IID_IErrorInfo 1CF2B120-547D-101B-8E65-08002B2BD119
+ // IID_ICCW 64BD43F8-BFEE-4EC4-B7EB-2935158DAE21
+ // IID_ICustomPropertyProvider 7C925755-3E48-42B4-8677-76372267033F
+ // IID_IAgileObject 94ea2b94-e9cc-49e0-c0ff-ee64ca8f5b90
+ // IID_IDispatchEx A6EF9860-C720-11d0-9337-00A0C90DCAA9
+ // IID_IProvideClassInfo B196B283-BAB4-101A-B69C-00AA00341D07
+ // IID_IConnectionPointContainer B196B284-BAB4-101A-B69C-00AA00341D07
+ // IID_IManagedObject c3fcc19e-a970-11d2-8b5a-00a0c9b7c9c4
+ // IID_IObjectSafety CB5BDC81-93C1-11cf-8F20-00805F2CD064
+ // IID_ISupportErrorInfo DF0B3D60-548F-101B-8E65-08002B2BD119
+ // IID_IStringable.................96369F54-8EB6-48f0-ABCE-C1B211E627C3
+
+ // Switch on the first DWORD since they're all (currently) unique.
+ switch (riid.Data1)
+ {
+ HANDLE_IID_INLINE(enum_IMarshal ,0x00000003,0x0000,0x0000,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x46);
+ HANDLE_IID_INLINE(enum_IErrorInfo ,0x1CF2B120,0x547D,0x101B,0x8E,0x65,0x08,0x00,0x2B,0x2B,0xD1,0x19);
+ HANDLE_IID_INLINE(enum_ICCW ,0x64BD43F8,0xBFEE,0x4EC4,0xB7,0xEB,0x29,0x35,0x15,0x8D,0xAE,0x21);
+ HANDLE_IID_INLINE(enum_ICustomPropertyProvider ,0x7C925755,0x3E48,0x42B4,0x86,0x77,0x76,0x37,0x22,0x67,0x03,0x3F); // hit1, above
+ HANDLE_IID_INLINE(enum_IDispatchEx ,0xA6EF9860,0xC720,0x11d0,0x93,0x37,0x00,0xA0,0xC9,0x0D,0xCA,0xA9); // hit3, !=
+ HANDLE_IID_INLINE(enum_ISupportsErrorInfo ,0xDF0B3D60,0x548F,0x101B,0x8E,0x65,0x08,0x00,0x2B,0x2B,0xD1,0x19);
+ HANDLE_IID_INLINE(enum_IStringable ,0x96369f54,0x8eb6,0x48f0,0xab,0xce,0xc1,0xb2,0x11,0xe6,0x27,0xc3);
+
+ CASE_IID_INLINE( enum_IProvideClassInfo ,0xB196B283,0xBAB4,0x101A,0xB6,0x9C,0x00,0xAA,0x00,0x34,0x1D,0x07) // hit4, !=
+ {
+ // respond only if this is a classic COM interop scenario
+ MethodTable *pClassMT = GetMethodTable();
+ if (!pClassMT->IsExportedToWinRT() && !pClassMT->IsWinRTObjectType())
+ {
+ RETURN QIStandardInterface(enum_IProvideClassInfo);
+ }
+ }
+ break;
+
+ CASE_IID_INLINE( enum_IConnectionPointContainer,0xB196B284,0xBAB4,0x101A,0xB6,0x9C,0x00,0xAA,0x00,0x34,0x1D,0x07) // b196b284 101abab4 aa009cb6 071d3400
+ {
+ // respond only if this is a classic COM interop scenario
+ MethodTable *pClassMT = GetMethodTable();
+ if (!pClassMT->IsExportedToWinRT() && !pClassMT->IsWinRTObjectType())
+ {
+ RETURN QIStandardInterface(enum_IConnectionPointContainer);
+ }
+ }
+ break;
+
+ CASE_IID_INLINE( enum_IWeakReferenceSource ,0x00000038,0x0000,0x0000,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x46)
+ {
+ // respond only if this type implements a WinRT interface
+ if (m_pTemplate->SupportsIInspectable())
+ RETURN QIStandardInterface(enum_IWeakReferenceSource);
+ }
+ break;
+
+ CASE_IID_INLINE( enum_IManagedObject ,0xc3fcc19e,0xa970,0x11d2,0x8b,0x5a,0x00,0xa0,0xc9,0xb7,0xc9,0xc4) // hit2, below, !=
+ {
+ // Check whether the type of the object wrapped by this CCW is exported to WinRT. This is the only
+ // case where we are sure that it's not a classic COM interop scenario and we can fail the QI for
+ // IManagedObject. Otherwise check the AppDomain setting.
+ MethodTable *pClassMT = GetMethodTable();
+ if (!pClassMT->IsExportedToWinRT() && !pClassMT->IsWinRTObjectType() && GetAppDomain()->GetPreferComInsteadOfManagedRemoting() == FALSE)
+ RETURN QIStandardInterface(enum_IManagedObject);
+ }
+ break;
+
+ CASE_IID_INLINE( enum_IObjectSafety ,0xCB5BDC81,0x93C1,0x11cf,0x8F,0x20,0x00,0x80,0x5F,0x2C,0xD0,0x64)
+ {
+ // Don't implement IObjectSafety by default.
+ // Use IObjectSafety only for IE Hosting or similar hosts
+ // which create sandboxed AppDomains.
+ // Unconditionally implementing IObjectSafety would allow
+ // Untrusted scripts to use managed components.
+ // Managed components could implement their own IObjectSafety to
+ // override this.
+ BOOL bShouldProvideIObjectSafety=FALSE;
+ {
+ GCX_COOP();
+ AppDomainFromIDHolder pDomain(GetDomainID(), FALSE);
+ if (!pDomain.IsUnloaded())
+ bShouldProvideIObjectSafety=!pDomain->GetSecurityDescriptor()->IsFullyTrusted();
+ }
+
+ if(bShouldProvideIObjectSafety)
+ RETURN QIStandardInterface(enum_IObjectSafety);
+ }
+ break;
+
+ CASE_IID_INLINE( enum_IAgileObject ,0x94ea2b94,0xe9cc,0x49e0,0xc0,0xff,0xee,0x64,0xca,0x8f,0x5b,0x90)
+ {
+ // Don't implement IAgileObject if we are aggregated, if we are in a non AppX process, if the object explicitly implements IMarshal,
+ // or if its ICustomQI returns Failed or Handled for IID_IMarshal (compat).
+ //
+ // The AppX check was primarily done to ensure that we dont break VS in classic mode when it loads the desktop CLR since it needs
+ // objects to be non-Agile. In Apollo, we had objects agile in CoreCLR and even though we introduced AppX support in PhoneBlue,
+ // we should not constrain object agility using the desktop constraint, especially since VS does not rely on CoreCLR for its
+ // desktop execution.
+ //
+ // Keeping the Apollo behaviour also ensures that we allow SL 8.1 scenarios (which do not pass the AppX flag like the modern host)
+ // to use CorDispatcher for async, in the expected manner, as the OS implementation for CoreDispatcher expects objects to be Agile.
+ if (!IsAggregated()
+#if !defined(FEATURE_CORECLR)
+ && AppX::IsAppXProcess()
+#endif // !defined(FEATURE_CORECLR)
+ )
+ {
+ ComCallWrapperTemplate *pTemplate = GetComCallWrapperTemplate();
+ if (!pTemplate->ImplementsIMarshal())
+ {
+ if (!pTemplate->SupportsICustomQueryInterface() || !CustomQIRespondsToIMarshal())
+ {
+ RETURN QIStandardInterface(enum_IAgileObject);
+ }
+ }
+ }
+ }
+ break;
+ }
+
+ RETURN NULL;
+}
+#include <optdefault.h>
+
+//--------------------------------------------------------------------------
+// Init Outer unknown, cache a GIT cookie
+//--------------------------------------------------------------------------
+void SimpleComCallWrapper::InitOuter(IUnknown* pOuter)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pOuter, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ if (pOuter != NULL)
+ m_pOuter = pOuter;
+
+ MarkAggregated();
+}
+
+//--------------------------------------------------------------------------
+// Init Outer unknown, cache a GIT cookie
+//--------------------------------------------------------------------------
+void SimpleComCallWrapper::ResetOuter()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ m_pOuter = NULL;
+
+ if (IsAggregated())
+ UnMarkAggregated();
+}
+
+
+//--------------------------------------------------------------------------
+// Get Outer Unknown on the correct thread
+//--------------------------------------------------------------------------
+IUnknown* SimpleComCallWrapper::GetOuter()
+{
+ CONTRACT (IUnknown*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ SO_TOLERANT;
+ }
+ CONTRACT_END;
+
+ if (m_pClassWrap)
+ {
+ // Forward to the real wrapper if this CCW represents a variant interface
+ RETURN m_pClassWrap->GetSimpleWrapper()->GetOuter();
+ }
+
+ RETURN m_pOuter;
+}
+
+BOOL SimpleComCallWrapper::FindConnectionPoint(REFIID riid, IConnectionPoint **ppCP)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(ppCP));
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!HasOverlappedRef());
+
+ // If the connection point list hasn't been set up yet, then set it up now.
+ if (!m_pCPList)
+ SetUpCPList();
+
+ // Search through the list for a connection point for the requested IID.
+
+ // Go to preemp mode early to prevent multiple GC mode switches.
+ GCX_PREEMP();
+
+ for (UINT i = 0; i < m_pCPList->Size(); i++)
+ {
+ ConnectionPoint *pCP = (*m_pCPList)[i];
+ if (pCP->GetIID() == riid)
+ {
+ // We found a connection point for the requested IID.
+ HRESULT hr = SafeQueryInterfacePreemp(pCP, IID_IConnectionPoint, (IUnknown**)ppCP);
+ _ASSERTE(hr == S_OK);
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+void SimpleComCallWrapper::EnumConnectionPoints(IEnumConnectionPoints **ppEnumCP)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(ppEnumCP));
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!HasOverlappedRef());
+
+ // If the connection point list hasn't been set up yet, then set it up now.
+ if (!m_pCPList)
+ SetUpCPList();
+
+ // Create a new connection point enum.
+ ComCallWrapper *pWrap = GetMainWrapper();
+ NewHolder<ConnectionPointEnum>pCPEnum = new ConnectionPointEnum(pWrap, m_pCPList);
+
+ // Retrieve the IEnumConnectionPoints interface. This cannot fail.
+ HRESULT hr = SafeQueryInterface((IUnknown*)pCPEnum, IID_IEnumConnectionPoints, (IUnknown**)ppEnumCP);
+ _ASSERTE(hr == S_OK);
+
+ pCPEnum.SuppressRelease();
+}
+
+//--------------------------------------------------------------------------
+// COM called wrappers on COM+ objects
+// Purpose: Expose COM+ objects as COM classic Interfaces
+// Reqmts: Wrapper has to have the same layout as the COM2 interface
+//
+// The wrapper objects are aligned at 16 bytes, and the original this
+// pointer is replicated every 16 bytes, so for any COM2 interface
+// within the wrapper, the original 'this' can be obtained by masking
+// low 4 bits of COM2 IP.
+//
+// 16 byte aligned COM2 Vtable
+// +-----------+
+// | Org. this |
+// +-----------+ +-----+
+// COM2 IP-->| VTable ptr|----------------------------->|slot1|
+// +-----------+ +-----+ +-----+
+// COM2 IP-->| VTable ptr|---------->|slot1| |slot2|
+// +-----------+ +-----+ + +
+// | VTable ptr| | ....| | ... |
+// +-----------+ + + + +
+// | Org. this | |slotN| |slotN|
+// + + +-----+ +-----+
+// | .... |
+// + +
+// | |
+// +-----------+
+//
+//
+// VTable and Stubs: can share stub code, we need to have different vtables
+// for different interfaces, so the stub can jump to different
+// marshalling code.
+// Stubs : adjust this pointer and jump to the approp. address,
+// Marshalling params and results, based on the method signature the stub jumps to
+// approp. code to handle marshalling and unmarshalling.
+//
+//--------------------------------------------------------------------------
+
+
+//--------------------------------------------------------------------------
+// void ComCallWrapper::MarkHandleWeak()
+// mark the wrapper as holding a weak handle to the object
+//--------------------------------------------------------------------------
+
+void ComCallWrapper::MarkHandleWeak()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+#ifdef _DEBUG
+ AppDomainFromIDHolder ad(GetSimpleWrapper()->GetDomainID(), TRUE);
+ _ASSERTE(!ad.IsUnloaded());
+#endif
+ SyncBlock* pSyncBlock = GetSyncBlock();
+ _ASSERTE(pSyncBlock);
+
+ GetSimpleWrapper()->MarkHandleWeak();
+}
+
+//--------------------------------------------------------------------------
+// void ComCallWrapper::ResetHandleStrength()
+// mark the wrapper as not having a weak handle
+//--------------------------------------------------------------------------
+
+void ComCallWrapper::ResetHandleStrength()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ AppDomainFromIDHolder ad(GetSimpleWrapper()->GetDomainID(), TRUE);
+ _ASSERTE(!ad.IsUnloaded());
+#endif
+ SyncBlock* pSyncBlock = GetSyncBlock();
+ _ASSERTE(pSyncBlock);
+
+ GetSimpleWrapper()->ResetHandleStrength();
+}
+
+//--------------------------------------------------------------------------
+// void ComCallWrapper::InitializeOuter(IUnknown* pOuter)
+// init outer unknown, aggregation support
+//--------------------------------------------------------------------------
+void ComCallWrapper::InitializeOuter(IUnknown* pOuter)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pOuter, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ GetSimpleWrapper()->InitOuter(pOuter);
+}
+
+
+//--------------------------------------------------------------------------
+// BOOL ComCallWrapper::IsAggregated()
+// check if the wrapper is aggregated
+//--------------------------------------------------------------------------
+BOOL ComCallWrapper::IsAggregated()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return GetSimpleWrapper()->IsAggregated();
+}
+
+
+//--------------------------------------------------------------------------
+// BOOL ComCallWrapper::IsObjectTP()
+// check if the wrapper is to a TP object
+//--------------------------------------------------------------------------
+BOOL ComCallWrapper::IsObjectTP()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return GetSimpleWrapper()->IsObjectTP();
+}
+
+
+
+//--------------------------------------------------------------------------
+// BOOL ComCallWrapper::IsExtendsCOMObject(()
+// check if the wrapper is to a managed object that extends a com object
+//--------------------------------------------------------------------------
+BOOL ComCallWrapper::IsExtendsCOMObject()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return GetSimpleWrapper()->IsExtendsCOMObject();
+}
+
+//--------------------------------------------------------------------------
+// HRESULT ComCallWrapper::GetInnerUnknown(void** ppv)
+// aggregation support, get inner unknown
+//--------------------------------------------------------------------------
+HRESULT ComCallWrapper::GetInnerUnknown(void **ppv)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(ppv));
+ PRECONDITION(GetSimpleWrapper()->GetOuter() != NULL);
+ }
+ CONTRACTL_END;
+
+ return GetSimpleWrapper()->GetInnerUnknown(ppv);
+}
+
+//--------------------------------------------------------------------------
+// Get Outer Unknown on the correct thread
+//--------------------------------------------------------------------------
+IUnknown* ComCallWrapper::GetOuter()
+{
+ CONTRACT (IUnknown*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ RETURN GetSimpleWrapper()->GetOuter();
+}
+
+//--------------------------------------------------------------------------
+// SyncBlock* ComCallWrapper::GetSyncBlock()
+//--------------------------------------------------------------------------
+SyncBlock* ComCallWrapper::GetSyncBlock()
+{
+ CONTRACT (SyncBlock*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN GetSimpleWrapper()->GetSyncBlock();
+}
+
+//--------------------------------------------------------------------------
+//ComCallWrapper* ComCallWrapper::CopyFromTemplate(ComCallWrapperTemplate* pTemplate,
+// OBJECTREF* pRef)
+// create a wrapper and initialize it from the template
+//--------------------------------------------------------------------------
+ComCallWrapper* ComCallWrapper::CopyFromTemplate(ComCallWrapperTemplate* pTemplate,
+ ComCallWrapperCache *pWrapperCache,
+ OBJECTHANDLE oh)
+{
+ CONTRACT (ComCallWrapper*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pTemplate));
+ PRECONDITION(CheckPointer(pWrapperCache));
+ PRECONDITION(oh != NULL);
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ // num interfaces on the object
+ size_t numInterfaces = pTemplate->GetNumInterfaces();
+
+ // we have a template, create a wrapper and initialize from the template
+ // alloc wrapper, aligned 32 bytes
+ if (pWrapperCache->IsDomainUnloading())
+ COMPlusThrow(kAppDomainUnloadedException);
+
+ NewCCWHolder pStartWrapper(pWrapperCache);
+ pStartWrapper = (ComCallWrapper*)pWrapperCache->GetCacheLineAllocator()->
+#ifdef _WIN64
+ GetCacheLine64();
+ _ASSERTE(sizeof(ComCallWrapper) <= 64);
+#else
+ GetCacheLine32();
+ _ASSERTE(sizeof(ComCallWrapper) <= 32);
+#endif
+
+ if (pStartWrapper == NULL)
+ COMPlusThrowOM();
+
+ LOG((LF_INTEROP, LL_INFO100, "ComCallWrapper::CopyFromTemplate on Object %8.8x, Wrapper %8.8x\n", oh, pStartWrapper));
+
+ // addref commgr
+ pWrapperCache->AddRef();
+
+ // store the object handle
+ pStartWrapper->m_ppThis = oh;
+
+ // The first slot will hold the Basic interface.
+ // The second slot will hold the IClassX interface which will be generated on the fly.
+ unsigned blockIndex = 0;
+ if (pTemplate->RepresentsVariantInterface())
+ {
+ // interface CCW doesn't need the basic ComMT, it will fall back to its class CCW
+ // for anything but the one variant interface it represents
+ pStartWrapper->m_rgpIPtr[blockIndex++] = NULL;
+ }
+ else
+ {
+ pStartWrapper->m_rgpIPtr[blockIndex++] = (SLOT *)(pTemplate->GetBasicComMT() + 1);
+ }
+ pStartWrapper->m_rgpIPtr[blockIndex++] = NULL;
+
+ ComCallWrapper* pWrapper = pStartWrapper;
+ for (unsigned i =0; i< numInterfaces; i++)
+ {
+ if (blockIndex >= NumVtablePtrs)
+ {
+ // alloc wrapper, aligned 32 bytes
+ ComCallWrapper* pNewWrapper = (ComCallWrapper*)pWrapperCache->GetCacheLineAllocator()->
+#ifdef _WIN64
+ GetCacheLine64();
+ _ASSERTE(sizeof(ComCallWrapper) <= 64);
+#else
+ GetCacheLine32();
+ _ASSERTE(sizeof(ComCallWrapper) <= 32);
+#endif
+
+ _ASSERTE(0 == (((DWORD_PTR)pNewWrapper) & ~enum_ThisMask));
+
+ // Link the wrapper
+ SetNext(pWrapper, pNewWrapper);
+
+ blockIndex = 0; // reset block index
+ if (pNewWrapper == NULL)
+ {
+ RETURN NULL;
+ }
+
+ pWrapper = pNewWrapper;
+
+ // initialize the object reference
+ pWrapper->m_ppThis = oh;
+ }
+
+ pWrapper->m_rgpIPtr[blockIndex++] = pTemplate->GetVTableSlot(i);
+ }
+
+ // If the wrapper is part of a chain, then set the terminator.
+ if (pWrapper != pStartWrapper)
+ SetNext(pWrapper, LinkedWrapperTerminator);
+
+ pStartWrapper.SuppressRelease();
+
+ RETURN pStartWrapper;
+}
+
+//--------------------------------------------------------------------------
+// void ComCallWrapper::Cleanup(ComCallWrapper* pWrap)
+// clean up , release gc registered reference and free wrapper
+//--------------------------------------------------------------------------
+void ComCallWrapper::Cleanup()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INSTANCE_CHECK;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_pSimpleWrapper);
+
+ // Save it into a variable to observe a consistent state
+ LONGLONG llRefCount = m_pSimpleWrapper->GetRealRefCount();
+
+ LOG((LF_INTEROP, LL_INFO100,
+ "Calling ComCallWrapper::Cleanup on CCW 0x%p. cbRef = 0x%x, cbJupiterRef = 0x%x, IsPegged = %d, GlobalPeggingFlag = %d\n",
+ this, GET_COM_REF(llRefCount), GET_JUPITER_REF(llRefCount), IsPegged(), RCWWalker::IsGlobalPeggingOn()));
+
+ if (GET_COM_REF(llRefCount) != 0)
+ {
+ // _ASSERTE(g_fEEShutDown == TRUE);
+ // could be either in shutdown or forced GC in appdomain unload
+ // there are external COM references to this wrapper
+ // so let us just forget about cleaning now
+ // when the ref-count reaches 0, we will
+ // do the cleanup anyway
+ return;
+ }
+
+ // If COMRef == 0 && JupiterRef > 0 && !Neutered
+ if ((GET_JUPITER_REF(llRefCount)) != 0 &&
+ (llRefCount & SimpleComCallWrapper::CLEANUP_SENTINEL) == 0)
+ {
+ LOG((LF_INTEROP, LL_INFO100, "Neutering ComCallWrapper 0x%p: COM Ref = 0 but Jupiter Ref > 0\n", this));
+
+ //
+ // AppX ONLY:
+ //
+ // Skip cleaning up the handle on the CCW to avoid QueryInterface_ICCW crashing when
+ // accessing the handle, otherwise we could run into a race where the CCW is being neutered
+ // and has set m_ppThis to NULL before it sets the 'neutered' bit.
+ //
+ // It is only safe to do so under AppX because there are no AppDomains in AppX so there is
+ // no need to cleanup the handle immediately
+ //
+ // We'll clean up the handle later when Jupiter release the final ref
+ //
+ bool fSkipHandleCleanup = AppX::IsAppXProcess();
+ m_pSimpleWrapper->Neuter(fSkipHandleCleanup);
+
+ return;
+ }
+
+ STRESS_LOG1 (LF_INTEROP, LL_INFO100, "Cleaning up CCW 0x%p\n", this);
+
+ // Retrieve the COM call wrapper cache before we clear anything
+ ComCallWrapperCache *pWrapperCache = m_pSimpleWrapper->GetWrapperCache();
+
+ BOOL fOwnsHandle = FALSE;
+ SyncBlock* pSyncBlock = m_pSimpleWrapper->GetSyncBlock();
+
+ // only the "root" CCW owns the handle
+ // Even though we don't use this for native deriving from managed scenarios,
+ // we still use multiple CCWs from variance
+ fOwnsHandle = !(GetComCallWrapperTemplate()->RepresentsVariantInterface());
+
+ // This CCW may have belonged to an object that was killed when its AD was unloaded, but the CCW has a positive RefCount.
+ // In this case, the SyncBlock and/or InteropSyncBlockInfo will be null.
+ if (pSyncBlock)
+ {
+ InteropSyncBlockInfo* pInteropInfo = pSyncBlock->GetInteropInfoNoCreate();
+
+ if (pInteropInfo)
+ {
+ // Disconnect the object from the CCW
+ // Starting now, if this object gets passed out
+ // to unmanaged code, it will create a new CCW tied
+ // to the domain it was passed out from.
+ pInteropInfo->SetCCW(NULL);
+
+ // NULL the syncblock entry - we can't hang onto this anymore as the syncblock will be killed asynchronously to us.
+ m_pSimpleWrapper->ResetSyncBlock();
+
+ // Check for an associated RCW
+ RCWHolder pRCW(GetThread());
+ pRCW.InitNoCheck(pSyncBlock);
+ NewRCWHolder pNewRCW = pRCW.GetRawRCWUnsafe();
+
+ if (!pRCW.IsNull())
+ {
+ // Remove the RCW from the cache
+ RCWCache* pCache = RCWCache::GetRCWCacheNoCreate();
+ _ASSERTE(pCache);
+
+ {
+ // Switch to cooperative mode for RCWCache::LockHolder::LockHolder (COOPERATIVE)
+ GCX_COOP();
+
+ RCWCache::LockHolder lh(pCache);
+ pCache->RemoveWrapper(&pRCW);
+ }
+ }
+ }
+ }
+
+ // get this info before the simple wrapper gets cleaned up.
+ ADID domainId=CURRENT_APPDOMAIN_ID;
+ if (m_pSimpleWrapper)
+ {
+ m_pSimpleWrapper->Cleanup();
+ domainId=m_pSimpleWrapper->GetDomainID();
+ }
+
+ if (g_fEEStarted || m_pSimpleWrapper->GetOuter() == NULL)
+ {
+ delete m_pSimpleWrapper;
+ ClearSimpleWrapper(this);
+ }
+
+ {
+ // Switch to cooperative mode for AppDomainFromIDHolder
+ // AppDomainFromIDHolder.Assign might forbid GC and AppDomainFromIDHolder.Release might re-enable GC.
+ // The state is stored in ClrDebugState, which GCX_COOP() macros will push into stack & pop from stack
+ // So use GCX_COOP() around all these statements for AppDomainFromIDHolder
+ GCX_COOP();
+
+ // deregister the handle, in the first block. If no domain, then it's already done
+ AppDomainFromIDHolder pTgtDomain;
+ if (domainId != CURRENT_APPDOMAIN_ID)
+ {
+ pTgtDomain.Assign(domainId, FALSE);
+ }
+
+ if (fOwnsHandle && m_ppThis && !pTgtDomain.IsUnloaded())
+ {
+ LOG((LF_INTEROP, LL_INFO100, "ComCallWrapper::Cleanup on Object %8.8x\n", m_ppThis));
+ ClearHandle();
+ }
+
+ pTgtDomain.Release();
+ }
+
+ m_ppThis = NULL;
+ FreeWrapper(pWrapperCache);
+}
+
+//--------------------------------------------------------------------------
+// void ComCallWrapper::Neuter()
+// walk the CCW list and clear all handles to the object
+//--------------------------------------------------------------------------
+void ComCallWrapper::Neuter()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ClearHandle();
+
+ ComCallWrapper* pWrap = this;
+ while (pWrap != NULL)
+ {
+ ComCallWrapper* pTempWrap = ComCallWrapper::GetNext(pWrap);
+ pWrap->m_ppThis = NULL;
+ pWrap = pTempWrap;
+ }
+}
+
+
+//--------------------------------------------------------------------------
+// void ComCallWrapper::ClearHandle()
+// clear the ref-counted handle
+//--------------------------------------------------------------------------
+void ComCallWrapper::ClearHandle()
+{
+ WRAPPER_NO_CONTRACT;
+
+ OBJECTHANDLE pThis = m_ppThis;
+ if (FastInterlockCompareExchangePointer(&m_ppThis, NULL, pThis) == pThis)
+ {
+ DestroyRefcountedHandle(pThis);
+ }
+}
+
+
+//--------------------------------------------------------------------------
+// void ComCallWrapper::FreeWrapper(ComCallWrapper* pWrap)
+// walk the list and free all wrappers
+//--------------------------------------------------------------------------
+void ComCallWrapper::FreeWrapper(ComCallWrapperCache *pWrapperCache)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pWrapperCache));
+ }
+ CONTRACTL_END;
+
+ {
+ ComCallWrapperCache::LockHolder lh(pWrapperCache);
+
+ ComCallWrapper* pWrap2 = IsLinked() ? GetNext(this) : NULL;
+
+ while (pWrap2 != NULL)
+ {
+ ComCallWrapper* pTempWrap = GetNext(pWrap2);
+ #ifdef _WIN64
+ pWrapperCache->GetCacheLineAllocator()->FreeCacheLine64(pWrap2);
+ #else //_WIN64
+ pWrapperCache->GetCacheLineAllocator()->FreeCacheLine32(pWrap2);
+ #endif //_WIN64
+ pWrap2 = pTempWrap;
+ }
+ #ifdef _WIN64
+ pWrapperCache->GetCacheLineAllocator()->FreeCacheLine64(this);
+ #else //_WIN64
+ pWrapperCache->GetCacheLineAllocator()->FreeCacheLine32(this);
+ #endif //_WIN64
+ }
+
+ // release ccw mgr
+ pWrapperCache->Release();
+}
+
+void ComCallWrapper::DoScriptingSecurityCheck()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // If the object is shared or agile, and the current domain doesn't have
+ // UmgdCodePermission, we fail the call.
+ AppDomain* pCurrDomain = GetThread()->GetDomain();
+ ADID currID = pCurrDomain->GetId();
+
+ ADID ccwID = m_pSimpleWrapper->GetRawDomainID();
+
+ if (currID != ccwID)
+ {
+ IApplicationSecurityDescriptor* pASD = pCurrDomain->GetSecurityDescriptor();
+
+ if (!pASD->CanCallUnmanagedCode())
+ Security::ThrowSecurityException(g_SecurityPermissionClassName, SPFLAGSUNMANAGEDCODE);
+ }
+}
+
+//--------------------------------------------------------------------------
+//ComCallWrapper* ComCallWrapper::CreateWrapper(OBJECTREF* ppObj, ComCallWrapperTemplate *pTemplate, ComCallWrapper *pClassCCW)
+// this function should be called only with pre-emptive GC disabled
+// GCProtect the object ref being passed in, as this code could enable gc
+//--------------------------------------------------------------------------
+ComCallWrapper* ComCallWrapper::CreateWrapper(OBJECTREF* ppObj, ComCallWrapperTemplate *pTemplate, ComCallWrapper *pClassCCW)
+{
+ CONTRACT(ComCallWrapper *)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(ppObj != NULL);
+ PRECONDITION(CheckPointer(pTemplate, NULL_OK));
+ PRECONDITION(CheckPointer(pClassCCW, NULL_OK));
+ PRECONDITION(pTemplate == NULL || !pTemplate->RepresentsVariantInterface() || pClassCCW != NULL);
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ ComCallWrapper* pStartWrapper = NULL;
+ OBJECTREF pServer = NULL;
+
+ GCPROTECT_BEGIN(pServer);
+
+ pServer = *ppObj;
+
+#ifdef FEATURE_REMOTING
+ Context *pContext = Context::GetExecutionContext(pServer);
+#else
+ Context *pContext = GetAppDomain()->GetDefaultContext();
+#endif
+
+ // Force Refine the object if it is a transparent proxy
+ RefineProxy(pServer);
+
+ // grab the sync block from the server
+ SyncBlock* pSyncBlock = pServer->GetSyncBlock();
+
+ pSyncBlock->SetPrecious();
+
+ // if the object belongs to a domain neutral class, need to allocate the wrapper in the default domain.
+ // The object is potentially agile so if allocate out of the current domain and then hand out to
+ // multiple domains we might never release the wrapper for that object and hence never unload the CCWC.
+ ComCallWrapperCache *pWrapperCache = NULL;
+ TypeHandle thClass = pServer->GetTrueTypeHandle();
+
+ //
+ // Collectible types do not support com interop
+ //
+ if (thClass.GetMethodTable()->Collectible())
+ {
+ COMPlusThrow(kNotSupportedException, W("NotSupported_CollectibleCOM"));
+ }
+
+ if (thClass.IsDomainNeutral())
+ {
+ pWrapperCache = SystemDomain::System()->DefaultDomain()->GetComCallWrapperCache();
+ }
+ else
+ {
+ pWrapperCache = pContext->GetDomain()->GetComCallWrapperCache();
+ }
+
+
+ {
+ // check if somebody beat us to it
+ pStartWrapper = GetWrapperForObject(pServer, pTemplate);
+
+ if (pStartWrapper == NULL)
+ {
+ if (pTemplate == NULL)
+ {
+ // get the wrapper template from object's type if it was not passed explicitly
+ pTemplate = ComCallWrapperTemplate::GetTemplate(thClass);
+ }
+
+ // Make sure the CCW will be destroyed when exception happens
+ // Also keep pWrapperCache alive within this scope
+ // It needs to be destroyed after ComCallWrapperCache::LockHolder otherwise there would be a lock violation
+ NewCCWHolder pNewCCW(pWrapperCache);
+
+ // Now we'll take the lock in a place where we won't be calling managed code and check again.
+ {
+ ComCallWrapperCache::LockHolder lh(pWrapperCache);
+
+ pStartWrapper = GetWrapperForObject(pServer, pTemplate);
+ if (pStartWrapper == NULL)
+ {
+ Wrapper<OBJECTHANDLE, DoNothing, DestroyRefcountedHandle> oh;
+
+ ComCallWrapper *pRootWrapper = GetWrapperForObject(pServer, NULL);
+ if (pRootWrapper == NULL)
+ {
+ // create handle for the object. This creates a handle in the current domain. We can't tell
+ // if the object is agile in non-checked, so we trust that our checking works and when we
+ // attempt to hand this out to another domain then we will assume that the object is truly
+ // agile and will convert the handle to a global handle.
+ oh = pContext->GetDomain()->CreateRefcountedHandle(NULL);
+ _ASSERTE(oh);
+ }
+ else
+ {
+ // if the object already has a CCW, we reuse the handle
+ oh = pRootWrapper->GetObjectHandle();
+ oh.SuppressRelease();
+ }
+
+ // copy from template
+ pNewCCW = CopyFromTemplate(pTemplate, pWrapperCache, oh);
+
+ NewHolder<SimpleComCallWrapper> pSimpleWrap = SimpleComCallWrapper::CreateSimpleWrapper();
+
+ pSimpleWrap->InitNew(pServer, pWrapperCache, pNewCCW, pClassCCW, pContext, pSyncBlock, pTemplate);
+
+ InitSimpleWrapper(pNewCCW, pSimpleWrap);
+
+ if (pRootWrapper == NULL)
+ {
+ // store the object in the handle - this must happen before we publish the CCW
+ // in the sync block, so that other threads don't see a CCW pointing to nothing
+ StoreObjectInHandle( oh, pServer );
+
+ // finally, store the wrapper for the object in the sync block
+ pSyncBlock->GetInteropInfo()->SetCCW(pNewCCW);
+ }
+ else
+ {
+ // link the wrapper to the existing chain of CCWs
+ while (ComCallWrapper::GetNext(pRootWrapper) != NULL)
+ {
+ pRootWrapper = ComCallWrapper::GetNext(pRootWrapper);
+ }
+ ComCallWrapper::SetNext(pRootWrapper, pNewCCW);
+ }
+
+ oh.SuppressRelease();
+ pNewCCW.SuppressRelease();
+ pSimpleWrap.SuppressRelease();
+
+ pStartWrapper = pNewCCW;
+ }
+ }
+ }
+ }
+ GCPROTECT_END();
+
+ RETURN pStartWrapper;
+}
+
+
+//--------------------------------------------------------------------------
+// signed ComCallWrapper::GetIndexForIntfMT(ComCallWrapperTemplate *pTemplate, MethodTable *pIntfMT)
+// check if the interface is supported, return a index into the IMap
+// returns -1, if pIntfMT is not supported
+//--------------------------------------------------------------------------
+signed ComCallWrapper::GetIndexForIntfMT(ComCallWrapperTemplate *pTemplate, MethodTable *pIntfMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pTemplate));
+ PRECONDITION(CheckPointer(pIntfMT));
+ }
+ CONTRACTL_END;
+
+ for (unsigned j = 0; j < pTemplate->GetNumInterfaces(); j++)
+ {
+ ComMethodTable *pItfComMT = (ComMethodTable *)pTemplate->GetVTableSlot(j) - 1;
+ if (pItfComMT->GetMethodTable()->IsEquivalentTo(pIntfMT))
+ return j;
+ }
+
+ // oops, iface not found
+ return -1;
+}
+
+//--------------------------------------------------------------------------
+// SLOT** ComCallWrapper::GetComIPLocInWrapper(ComCallWrapper* pWrap, unsigned iIndex)
+// identify the location within the wrapper where the vtable for this index will
+// be stored
+//--------------------------------------------------------------------------
+SLOT** ComCallWrapper::GetComIPLocInWrapper(ComCallWrapper* pWrap, unsigned iIndex)
+{
+ CONTRACT (SLOT**)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pWrap));
+ PRECONDITION(iIndex > 1); // We should never attempt to get the basic or IClassX interface here.
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ SLOT** pTearOff = NULL;
+ while (iIndex >= NumVtablePtrs)
+ {
+ //@todo delayed creation support
+ _ASSERTE(pWrap->IsLinked() != 0);
+ pWrap = GetNext(pWrap);
+ iIndex-= NumVtablePtrs;
+ }
+ _ASSERTE(pWrap != NULL);
+ pTearOff = (SLOT **)&pWrap->m_rgpIPtr[iIndex];
+
+ RETURN pTearOff;
+}
+
+//--------------------------------------------------------------------------
+// Get IClassX interface pointer from the wrapper. This method will also
+// lay out the IClassX COM method table if it has not yet been laid out.
+// The returned interface is AddRef'd.
+//--------------------------------------------------------------------------
+IUnknown* ComCallWrapper::GetIClassXIP(bool inspectionOnly)
+{
+ CONTRACT (IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ ComCallWrapper *pWrap = this;
+ IUnknown *pIntf = NULL;
+ ComMethodTable* pIClassXComMT = NULL;
+
+ // The IClassX VTable pointer is in the start wrapper.
+ if (pWrap->IsLinked())
+ pWrap = ComCallWrapper::GetStartWrapper(pWrap);
+
+ SLOT* slot = pWrap->m_rgpIPtr[Slot_IClassX];
+ if (NULL == slot)
+ {
+ if (inspectionOnly)
+ RETURN NULL;
+
+ // Get the IClassX ComMethodTable (create if it doesn't exist),
+ // and set it into the vtable map.
+ pIClassXComMT = m_pSimpleWrapper->m_pTemplate->GetClassComMT();
+ pWrap->m_rgpIPtr[Slot_IClassX] = (SLOT *)(pIClassXComMT + 1);
+ }
+ else
+ {
+ pIClassXComMT = (ComMethodTable*)slot - 1;
+ }
+
+ // Lay out of the IClassX COM method table if it has not yet been laid out.
+ if (!pIClassXComMT->IsLayoutComplete())
+ {
+ // We won't attempt to lay out the class if we are only trying to
+ // passively inspect the interface.
+ if (inspectionOnly)
+ RETURN NULL;
+ else
+ pIClassXComMT->LayOutClassMethodTable();
+ }
+
+ // Return the IClassX vtable pointer.
+ pIntf = (IUnknown*)&pWrap->m_rgpIPtr[Slot_IClassX];
+
+ // If we are only inspecting, don't addref.
+ if (inspectionOnly)
+ RETURN pIntf;
+
+ // AddRef the wrapper.
+ // Note that we don't do SafeAddRef(pIntf) because it's overkill to
+ // go via IUnknown when we already have the wrapper in-hand.
+ ULONG cbRef = pWrap->AddRefWithAggregationCheck();
+
+ // 0xbadF00d implies the AddRef didn't go through
+ RETURN ((cbRef != 0xbadf00d) ? pIntf : NULL);
+}
+
+IUnknown* ComCallWrapper::GetBasicIP(bool inspectionOnly)
+{
+ CONTRACT (IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ // If the legacy switch is set, we'll always return the IClassX IP
+ // when QIing for IUnknown or IDispatch.
+ // Whidbey Tactics has decided to make this opt-in rather than
+ // opt-out for now. Remove the check for the legacy switch.
+ if ((g_pConfig == NULL || !g_pConfig->NewComVTableLayout()) && GetComCallWrapperTemplate()->SupportsIClassX())
+ RETURN GetIClassXIP(inspectionOnly);
+
+ ComCallWrapper *pWrap = this;
+ IUnknown *pIntf = NULL;
+
+ // The IClassX VTable pointer is in the start wrapper.
+ if (pWrap->IsLinked())
+ pWrap = ComCallWrapper::GetStartWrapper(pWrap);
+
+ ComMethodTable* pIBasicComMT = (ComMethodTable*)pWrap->m_rgpIPtr[Slot_Basic] - 1;
+ _ASSERTE(pIBasicComMT);
+
+ // Lay out the basic COM method table if it has not yet been laid out.
+ if (!pIBasicComMT->IsLayoutComplete())
+ {
+ if (inspectionOnly)
+ RETURN NULL;
+ else
+ pIBasicComMT->LayOutBasicMethodTable();
+ }
+
+ // Return the basic vtable pointer.
+ pIntf = (IUnknown*)&pWrap->m_rgpIPtr[Slot_Basic];
+
+ // If we are not addref'ing the IUnknown (for passive inspection like ETW), return it now.
+ if (inspectionOnly)
+ RETURN pIntf;
+
+ // AddRef the wrapper.
+ // Note that we don't do SafeAddRef(pIntf) because it's overkill to
+ // go via IUnknown when we already have the wrapper in-hand.
+ ULONG cbRef = pWrap->AddRefWithAggregationCheck();
+
+ // 0xbadF00d implies the AddRef didn't go through
+ RETURN ((cbRef != 0xbadf00d) ? pIntf : NULL);
+}
+
+struct InvokeICustomQueryInterfaceGetInterfaceArgs
+{
+ ComCallWrapper *pWrap;
+ GUID *pGuid;
+ IUnknown **ppUnk;
+ CustomQueryInterfaceResult *pRetVal;
+};
+
+VOID __stdcall InvokeICustomQueryInterfaceGetInterface_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+ InvokeICustomQueryInterfaceGetInterfaceArgs *pArgs = (InvokeICustomQueryInterfaceGetInterfaceArgs*)ptr;
+
+ {
+ GCX_COOP();
+ OBJECTREF pObj = pArgs->pWrap->GetObjectRef();
+
+ GCPROTECT_BEGIN(pObj);
+
+ // 1. Get MD
+ MethodDesc *pMD = pArgs->pWrap->GetSimpleWrapper()->GetComCallWrapperTemplate()->GetICustomQueryInterfaceGetInterfaceMD();
+
+ // 2. Get Object Handle
+ OBJECTHANDLE hndCustomQueryInterface = pArgs->pWrap->GetObjectHandle();
+
+ // 3 construct the MethodDescCallSite
+ MethodDescCallSite GetInterface(pMD, hndCustomQueryInterface);
+
+ ARG_SLOT Args[] = {
+ ObjToArgSlot(pObj),
+ PtrToArgSlot(pArgs->pGuid),
+ PtrToArgSlot(pArgs->ppUnk),
+ };
+
+ *(pArgs->pRetVal) = (CustomQueryInterfaceResult)GetInterface.Call_RetArgSlot(Args);
+ GCPROTECT_END();
+ }
+}
+
+VOID InvokeICustomQueryInterfaceGetInterface_AppDomainTransition(LPVOID ptr, ADID targetADID, Context *pTargetContext)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+ Thread *pThread = GetThread();
+ GCX_COOP_THREAD_EXISTS(pThread);
+ pThread->DoContextCallBack(
+ targetADID,
+ pTargetContext,
+ (Context::ADCallBackFcnType)InvokeICustomQueryInterfaceGetInterface_CallBack,
+ ptr);
+}
+
+// Returns a covariant supertype of pMT with the given IID or NULL if not found.
+// static
+MethodTable *ComCallWrapper::FindCovariantSubtype(MethodTable *pMT, REFIID riid)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(pMT->IsInterface() && pMT->HasVariance());
+ }
+ CONTRACTL_END;
+
+ Instantiation inst = pMT->GetInstantiation();
+
+ // we only support one type parameter
+ if (inst.GetNumArgs() != 1)
+ return NULL;
+
+ // and it must be covariant
+ if (pMT->GetClass()->GetVarianceOfTypeParameter(0) != gpCovariant)
+ return NULL;
+
+ TypeHandle thArg = inst[0];
+
+ // arrays are not allowed, valuetypes don't support covariance
+ if (thArg.IsTypeDesc() || thArg.IsArray() || thArg.IsValueType())
+ return NULL;
+
+ // if the argument is System.Object, there's no covariant supertype
+ if (thArg.IsObjectType())
+ return NULL;
+
+ GUID guid;
+ TypeHandle thBaseClass = thArg.GetParent();
+
+ // try base classes first (this includes System.Object if thArg is an interface)
+ while (!thBaseClass.IsNull())
+ {
+ // The internal base classes do not have guid. Skip them to avoid confusing exception being
+ // thrown and swallowed inside GetGuidNoThrow.
+ if (thBaseClass != TypeHandle(g_pBaseCOMObject) && thBaseClass != TypeHandle(g_pBaseRuntimeClass))
+ {
+ Instantiation newInst(&thBaseClass, 1);
+ MethodTable *pItfMT = TypeHandle(pMT).Instantiate(newInst).AsMethodTable();
+
+ if (SUCCEEDED(pItfMT->GetGuidNoThrow(&guid, FALSE, FALSE)) && guid == riid)
+ {
+ return pItfMT;
+ }
+ }
+
+ thBaseClass = thBaseClass.GetParent();
+ }
+
+ // now try implemented interfaces
+ MethodTable::InterfaceMapIterator it = thArg.AsMethodTable()->IterateInterfaceMap();
+ while (it.Next())
+ {
+ TypeHandle thNewArg = TypeHandle(it.GetInterface());
+ Instantiation newInst(&thNewArg, 1);
+
+ MethodTable *pItfMT = TypeHandle(pMT).Instantiate(newInst).AsMethodTable();
+
+ if (SUCCEEDED(pItfMT->GetGuidNoThrow(&guid, FALSE, FALSE)) && guid == riid)
+ {
+ return pItfMT;
+ }
+ }
+
+ return NULL;
+}
+
+// Like GetComIPFromCCW, but will try to find riid/pIntfMT among interfaces implemented by this object that have variance.
+IUnknown* ComCallWrapper::GetComIPFromCCWUsingVariance(REFIID riid, MethodTable* pIntfMT, GetComIPFromCCW::flags flags)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(GetComCallWrapperTemplate()->SupportsVariantInterface());
+ PRECONDITION(!GetComCallWrapperTemplate()->RepresentsVariantInterface());
+ }
+ CONTRACTL_END;
+
+ // try the fast per-ComCallWrapperTemplate cache first
+ ComCallWrapperTemplate::IIDToInterfaceTemplateCache *pCache = GetComCallWrapperTemplate()->GetOrCreateIIDToInterfaceTemplateCache();
+
+ GUID local_iid;
+ const IID *piid = &riid;
+ if (InlineIsEqualGUID(riid, GUID_NULL))
+ {
+ // we have a fast IID -> ComCallWrapperTemplate cache so we need the IID first
+ _ASSERTE(pIntfMT != NULL);
+
+ if (FAILED(pIntfMT->GetGuidNoThrow(&local_iid, TRUE)))
+ {
+ return NULL;
+ }
+ piid = &local_iid;
+ }
+
+ ComCallWrapperTemplate *pIntfTemplate = NULL;
+ if (pCache->LookupInterfaceTemplate(*piid, &pIntfTemplate))
+ {
+ // we've seen a QI for this IID before
+ if (pIntfTemplate == NULL)
+ {
+ // and it failed, so we can return immediately
+ return NULL;
+ }
+
+ // make sure we pick up the MT stored in the ComMT because that's the WinRT one (for example
+ // IIterable<object>) against which GetComIPFromCCW_VariantInterface is comparing its MT argument
+ _ASSERTE(pIntfMT == NULL || pIntfMT == pIntfTemplate->GetComMTForIndex(0)->GetMethodTable());
+ pIntfMT = pIntfTemplate->GetComMTForIndex(0)->GetMethodTable();
+ }
+ else if (pIntfMT == NULL)
+ {
+ _ASSERTE(riid != GUID_NULL);
+
+ // Here we are, handling a QI for an IID that we don't recognize because the managed object we are wrapping
+ // does not implement that exact interface. However, it may implement another interface which is castable to
+ // what we are looking for via co- or contra-variance. The problem is that the IID computation algorithm is
+ // a one-way function so there's no way to deduce the interface while knowing only the IID.
+
+ // try the AD-wide cache
+ pIntfMT = GetAppDomain()->LookupTypeByGuid(riid);
+
+ if (pIntfMT == NULL)
+ {
+ // Now we should enumerate all types that are "related" to our object and try to find a match. This has
+ // a couple of issues. It can take a long time (imagine a type with n covariant generic parameters and
+ // us holding a type where these are instantantiated with classes, each with a hierarchy m levels deep
+ // - we are looking at loading m^n instantiations which may not be feasible and would make QI too slow).
+ // And it will not work for contravariance anyway (it's not quite possible to enumerate all subtypes of
+ // a given generic argument).
+ //
+ // We'll perform a simplified check which is limited only to covariance with one generic parameter (luckily
+ // all WinRT variant types currently fall into this bucket).
+ //
+ TypeHandle thClass = GetComCallWrapperTemplate()->GetClassType();
+
+ ComCallWrapperTemplate::CCWInterfaceMapIterator it(thClass, NULL, false);
+ while (it.Next())
+ {
+ MethodTable *pImplementedIntfMT = it.GetInterface();
+ if (pImplementedIntfMT->HasVariance())
+ {
+ pIntfMT = FindCovariantSubtype(pImplementedIntfMT, riid);
+ if (pIntfMT != NULL)
+ break;
+ }
+ }
+ }
+ }
+
+ if (pIntfMT == NULL || !pIntfMT->IsInterface())
+ {
+ // we did not recognize the IID - cache the negative result
+ pCache->InsertInterfaceTemplate(*piid, NULL);
+ }
+ else
+ {
+ if (pIntfTemplate == NULL)
+ {
+ // if we have an interface type but not the corresponding template so we have to do some extra work
+ MethodTable *pVariantIntfMT = NULL;
+ if (!pIntfMT->HasVariance())
+ {
+ // We may be passed a WinRT interface which does not have variance from .NET type system
+ // point of view. Simply replace it with the corresponding .NET type if it is the case.
+ pVariantIntfMT = RCW::GetVariantMethodTable(pIntfMT);
+ }
+ else
+ {
+ pVariantIntfMT = pIntfMT;
+ }
+
+ TypeHandle thClass = GetComCallWrapperTemplate()->GetClassType();
+ if (pVariantIntfMT != NULL && thClass.CanCastTo(pVariantIntfMT))
+ {
+ _ASSERTE_MSG(!thClass.GetMethodTable()->ImplementsInterface(pVariantIntfMT), "This should have been taken care of by GetComIPFromCCW");
+
+ // At this point, conceptually we would like to add a new ComMethodTable to the ComCallWrapperTemplate
+ // representing pMT because we just discovered an interface that the unmanaged side is interested in.
+ // However, this does not fit very well to the overall CCW architecture and could use a lot of memory
+ // (each class that implements IEnumerable<T> may end up with a ComMethodTable for IEnumerable<object>
+ // for example) so we sacrifice a bit of run-time perf for this not-so-mainline scenario and create a
+ // CCW specifically for IEnumerable<object> that is shared by all CCWs.
+
+ // get the per-interface CCW template for pIntfMT
+ pIntfTemplate = ComCallWrapperTemplate::GetTemplate(pVariantIntfMT);
+ }
+
+ // cache the pIntfTemplate (may be NULL)
+ pCache->InsertInterfaceTemplate(*piid, pIntfTemplate);
+ }
+
+ if (pIntfTemplate != NULL)
+ {
+ // get a CCW for the template, associated with our object
+ CCWHolder pCCW;
+ {
+ GCX_COOP();
+ OBJECTREF oref = NULL;
+
+ GCPROTECT_BEGIN(oref);
+ {
+ oref = GetObjectRef();
+ pCCW = InlineGetWrapper(&oref, pIntfTemplate, this);
+ }
+ GCPROTECT_END();
+ }
+
+ // and let the per-interface CCW handle the QI
+ return GetComIPFromCCW_VariantInterface(pCCW, riid, pIntfMT, flags, pIntfTemplate);
+ }
+ }
+
+ return NULL;
+}
+
+// static
+inline IUnknown * ComCallWrapper::GetComIPFromCCW_VisibilityCheck(
+ IUnknown * pIntf, MethodTable * pIntfMT, ComMethodTable * pIntfComMT,
+ GetComIPFromCCW::flags flags)
+{
+ CONTRACT(IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pIntf));
+ PRECONDITION(CheckPointer(pIntfComMT));
+ }
+ CONTRACT_END;
+
+ // Ensure that the interface we are passing out was defined in trusted code.
+ if ((!(flags & GetComIPFromCCW::SuppressSecurityCheck) && pIntfComMT->IsDefinedInUntrustedCode()) ||
+ // Do a visibility check if needed.
+ ((flags & GetComIPFromCCW::CheckVisibility) && (!pIntfComMT->IsComVisible())))
+ {
+ // If not, fail to return the interface.
+ SafeRelease(pIntf);
+ RETURN NULL;
+ }
+ RETURN pIntf;
+}
+
+// static
+IUnknown * ComCallWrapper::GetComIPFromCCW_VariantInterface(
+ ComCallWrapper * pWrap, REFIID riid, MethodTable * pIntfMT,
+ GetComIPFromCCW::flags flags, ComCallWrapperTemplate * pTemplate)
+{
+ CONTRACT(IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pWrap));
+ }
+ CONTRACT_END;
+
+ IUnknown* pIntf = NULL;
+ ComMethodTable *pIntfComMT = NULL;
+
+ // we are only going to respond to the one interface that this CCW represents
+ pIntfComMT = pTemplate->GetComMTForIndex(0);
+ if (pIntfComMT->GetIID() == riid || (pIntfMT != NULL && GetIndexForIntfMT(pTemplate, pIntfMT) == 0))
+ {
+ SLOT **ppVtable = GetComIPLocInWrapper(pWrap, Slot_FirstInterface);
+ _ASSERTE(*ppVtable != NULL); // this should point to COM Vtable or interface vtable
+
+ if (!pIntfComMT->IsLayoutComplete() && !pIntfComMT->LayOutInterfaceMethodTable(NULL))
+ {
+ RETURN NULL;
+ }
+
+ // The interface pointer is the pointer to the vtable.
+ pIntf = (IUnknown*)ppVtable;
+
+ // AddRef the wrapper.
+ // Note that we don't do SafeAddRef(pIntf) because it's overkill to
+ // go via IUnknown when we already have the wrapper in-hand.
+ pWrap->AddRefWithAggregationCheck();
+
+ RETURN GetComIPFromCCW_VisibilityCheck(pIntf, pIntfMT, pIntfComMT, flags);
+ }
+
+ // for anything else, fall back to the CCW representing the "parent" class
+ RETURN GetComIPFromCCW(pWrap->GetSimpleWrapper()->GetClassWrapper(), riid, pIntfMT, flags);
+}
+
+// static
+bool ComCallWrapper::GetComIPFromCCW_HandleCustomQI(
+ ComCallWrapper * pWrap, REFIID riid, MethodTable * pIntfMT, IUnknown ** ppUnkOut)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pWrap));
+ }
+ CONTRACTL_END;
+
+ // Customize QI: We call the method System.Runtime.InteropServices.ICustomQueryInterface
+ // GetInterface implemented by user to do the customized QI work.
+ CustomQueryInterfaceResult retVal = Handled;
+
+ // prepare the GUID
+ GUID guid;
+ if (IsEqualGUID(riid, GUID_NULL) && pIntfMT != NULL)
+ {
+ // riid is null, we retrieve the guid from the methodtable
+ pIntfMT->GetGuid(&guid, true);
+ }
+ else
+ {
+ // copy riid to avoid user modify it
+ guid = riid;
+ }
+
+ InvokeICustomQueryInterfaceGetInterfaceArgs args = {pWrap, &guid, ppUnkOut, &retVal};
+
+ ADID targetADID;
+ Context *pTargetContext;
+ if (pWrap->NeedToSwitchDomains(GetThread(), &targetADID, &pTargetContext))
+ InvokeICustomQueryInterfaceGetInterface_AppDomainTransition(&args, targetADID, pTargetContext);
+ else
+ InvokeICustomQueryInterfaceGetInterface_CallBack(&args);
+ // return if user already handle the QI
+ if (retVal == Handled)
+ return true;
+ // return NULL if user wants to fail the QI
+ if (retVal == Failed)
+ {
+ *ppUnkOut = NULL;
+ return true;
+ }
+ // assure that user returns the known return value
+ _ASSERTE(retVal == NotHandled);
+ return false;
+}
+
+// A MODE_ANY helper to get the MethodTable of the 'this' object. This helper keeps
+// the GCX_COOP transition out of the caller (it implies a holder which implies an
+// FS:0 handler on x86).
+MethodTable * ComCallWrapper::GetMethodTableOfObjectRef()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ GCX_COOP();
+ return GetObjectRef()->GetTrueMethodTable();
+}
+
+// static
+IUnknown * ComCallWrapper::GetComIPFromCCW_HandleExtendsCOMObject(
+ ComCallWrapper * pWrap, REFIID riid, MethodTable * pIntfMT,
+ ComCallWrapperTemplate * pTemplate, signed imapIndex, unsigned intfIndex)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ BOOL bDelegateToBase = FALSE;
+ if (imapIndex != -1)
+ {
+ MethodTable * pMT = pWrap->GetMethodTableOfObjectRef();
+
+ // Check if this index is actually an interface implemented by us
+ // if it belongs to the base COM guy then we can hand over the call
+ // to him
+ if (pMT->IsWinRTObjectType())
+ {
+ bDelegateToBase = pTemplate->GetComMTForIndex(intfIndex)->IsWinRTTrivialAggregate();
+ }
+ else
+ {
+ MethodTable::InterfaceMapIterator intIt = pMT->IterateInterfaceMapFrom(intfIndex);
+
+ // If the number of slots is 0, then no need to proceed
+ if (intIt.GetInterface()->GetNumVirtuals() != 0)
+ {
+ MethodDesc *pClsMD = NULL;
+
+ // Find the implementation for the first slot of the interface
+ DispatchSlot impl(pMT->FindDispatchSlot(intIt.GetInterface()->GetTypeID(), 0));
+ CONSISTENCY_CHECK(!impl.IsNull());
+
+ // Get the MethodDesc for this slot in the class
+ pClsMD = impl.GetMethodDesc();
+
+ MethodTable * pClsMT = pClsMD->GetMethodTable();
+ if (pClsMT->IsInterface() || pClsMT->IsComImport())
+ bDelegateToBase = TRUE;
+ }
+ else
+ {
+ // The interface has no methods so we cannot override it. Because of this
+ // it makes sense to delegate to the base COM component.
+ bDelegateToBase = TRUE;
+ }
+ }
+ }
+ else
+ {
+ // If we don't implement the interface, we delegate to base
+ bDelegateToBase = TRUE;
+ }
+
+ if (bDelegateToBase)
+ {
+ // This is an interface of the base COM guy so delegate the call to him
+ SyncBlock* pBlock = pWrap->GetSyncBlock();
+ _ASSERTE(pBlock);
+
+ SafeComHolder<IUnknown> pUnk;
+
+ RCWHolder pRCW(GetThread());
+ RCWPROTECT_BEGIN(pRCW, pBlock);
+
+ pUnk = (pIntfMT != NULL) ? pRCW->GetComIPFromRCW(pIntfMT)
+ : pRCW->GetComIPFromRCW(riid);
+
+ RCWPROTECT_END(pRCW);
+ return pUnk.Extract();
+ }
+
+ return NULL;
+}
+
+//--------------------------------------------------------------------------
+// IUnknown* ComCallWrapper::GetComIPfromCCW(ComCallWrapper *pWrap, REFIID riid, MethodTable* pIntfMT, BOOL bCheckVisibility)
+// Get an interface from wrapper, based on riid or pIntfMT. The returned interface is AddRef'd.
+//--------------------------------------------------------------------------
+// static
+IUnknown* ComCallWrapper::GetComIPFromCCW(ComCallWrapper *pWrap, REFIID riid, MethodTable* pIntfMT,
+ GetComIPFromCCW::flags flags)
+{
+ CONTRACT(IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pWrap));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ // scan the wrapper
+ if (pWrap->IsLinked())
+ pWrap = ComCallWrapper::GetStartWrapper(pWrap);
+
+ ComCallWrapperTemplate *pTemplate = pWrap->GetSimpleWrapper()->GetComCallWrapperTemplate();
+
+ // We should not be getting a CCW that represents a CCW interface as GetWrapperFromIP will always
+ // convert a IP to main CCW
+ _ASSERTE(!pTemplate->RepresentsVariantInterface());
+
+ if (IsIUnknown(riid))
+ {
+ // We don't do visibility checks on IUnknown.
+ RETURN pWrap->GetBasicIP();
+ }
+
+ if (IsIErrorInfo(riid) && AppX::IsAppXProcess())
+ {
+ // Don't let the user override the default IErrorInfo implementation in AppX because
+ // Jupiter uses it for WER. See code:GetExceptionDescription for details.
+ SimpleComCallWrapper* pSimpleWrap = pWrap->GetSimpleWrapper();
+ RETURN pSimpleWrap->QIStandardInterface(enum_IErrorInfo);
+ }
+
+ if (!(flags & GetComIPFromCCW::SuppressCustomizedQueryInterface)
+ && pTemplate->SupportsICustomQueryInterface())
+ {
+ // Customize QI: We call the method System.Runtime.InteropServices.ICustomQueryInterface
+ // GetInterface implemented by user to do the customized QI work.
+ IUnknown * pUnkCustomQIResult = NULL;
+ if (GetComIPFromCCW_HandleCustomQI(pWrap, riid, pIntfMT, &pUnkCustomQIResult))
+ RETURN pUnkCustomQIResult;
+ }
+
+ if (IsIInspectable(riid))
+ {
+ // ICustomPropertyProvider will be the cannonical IInspectable for every managed object
+ SimpleComCallWrapper* pSimpleWrap = pWrap->GetSimpleWrapper();
+ RETURN pSimpleWrap->QIStandardInterface(enum_ICustomPropertyProvider);
+ }
+ if (IsIDispatch(riid))
+ {
+#ifdef FEATURE_CORECLR
+ if (AppX::IsAppXProcess())
+ {
+ RETURN NULL;
+ }
+#endif // FEATURE_CORECLR
+
+ // We don't do visibility checks on IUnknown.
+ RETURN pWrap->GetIDispatchIP();
+ }
+ if (IsIManagedObject(riid))
+ {
+ // If we are aggregated and somehow the aggregator delegated a QI on
+ // IManagedObject to us, fail the request so we don't accidently get a
+ // COM+ caller linked directly to us.
+ if (!pWrap->IsObjectTP() && pWrap->GetSimpleWrapper()->GetOuter() != NULL)
+ RETURN NULL;
+
+ if (pIntfMT == NULL)
+ {
+ SimpleComCallWrapper* pSimpleWrap = pWrap->GetSimpleWrapper();
+ IUnknown * pIntf = pSimpleWrap->QIStandardInterface(riid);
+ if (pIntf)
+ RETURN pIntf;
+ }
+ }
+
+ signed imapIndex = -1;
+ if (pIntfMT == NULL)
+ {
+ if (IsGUID_NULL(riid)) // there's no interface with GUID_NULL IID so we can bail out right away
+ RETURN NULL;
+
+ // Go through all the implemented methods except the COM imported class interfaces
+ // and compare the IID's to find the requested one.
+ for (unsigned j = 0; j < pTemplate->GetNumInterfaces(); j++)
+ {
+ ComMethodTable *pItfComMT = (ComMethodTable *)pTemplate->GetVTableSlot(j) - 1;
+ if (pItfComMT && !pItfComMT->IsComClassItf())
+ {
+ if (InlineIsEqualGUID(pItfComMT->GetIID(), riid))
+ {
+ pIntfMT = pItfComMT->GetMethodTable();
+ imapIndex = j;
+ break;
+ }
+ }
+ }
+
+ if (imapIndex == -1)
+ {
+ // Check for the standard interfaces.
+ SimpleComCallWrapper* pSimpleWrap = pWrap->GetSimpleWrapper();
+ IUnknown * pIntf = pSimpleWrap->QIStandardInterface(riid);
+ if (pIntf)
+ RETURN pIntf;
+
+ pIntf = GetComIPFromCCW_ForIID_Worker(pWrap, riid, pIntfMT, flags, pTemplate);
+ if (pIntf)
+ RETURN pIntf;
+ }
+ }
+ else
+ {
+ imapIndex = GetIndexForIntfMT(pTemplate, pIntfMT);
+
+ if (!pIntfMT->IsInterface())
+ {
+ IUnknown * pIntf = GetComIPFromCCW_ForIntfMT_Worker(pWrap, pIntfMT, flags);
+ if (pIntf)
+ RETURN pIntf;
+ }
+ }
+
+ // At this point, all of the 'fast' special cases have already returned and we're
+ // left with either no interface found (imapIndex == -1) or a user-code-implemented
+ // interface was found ((imapIndex != -1) && (pIntfMT != NULL)).
+
+ unsigned intfIndex = imapIndex;
+ if (imapIndex != -1)
+ {
+ // We don't support QI calls for non-WinRT interfaces that have generic arguments.
+ _ASSERTE(pIntfMT != NULL);
+ if (pIntfMT->HasInstantiation() && !pIntfMT->SupportsGenericInterop(TypeHandle::Interop_NativeToManaged, MethodTable::modeProjected))
+ {
+ COMPlusThrow(kInvalidOperationException, IDS_EE_ATTEMPT_TO_CREATE_GENERIC_CCW);
+ }
+
+ // The first block has one slot for the IClassX vtable pointer
+ // and one slot for the basic vtable pointer.
+ imapIndex += Slot_FirstInterface;
+ }
+ else if (pTemplate->SupportsVariantInterface())
+ {
+ // We haven't found an interface corresponding to the incoming pIntfMT/IID because we don't implement it.
+ // However, we could still implement an interface that is castable to pIntfMT/IID via co-/contra-variance.
+ IUnknown * pIntf = pWrap->GetComIPFromCCWUsingVariance(riid, pIntfMT, flags);
+ if (pIntf != NULL)
+ {
+ RETURN pIntf;
+ }
+ }
+
+ // COM plus objects that extend from COM guys are special
+ // unless the CCW points a TP in which case the COM object
+ // is remote, so let the calls go through the CCW
+ // Also, if we're being asked for just an IInspectable, we don't need to do this (we may be in the process
+ // of activating our aggregated object so can't use the RCW yet) - this is analagous to how IUnkown is handled
+ // specially with GetBasicIP at the top of this function.
+ if (pWrap->IsExtendsCOMObject() && !pWrap->IsObjectTP() && !IsEqualGUID(riid, IID_IInspectable))
+ {
+ IUnknown * pIntf = GetComIPFromCCW_HandleExtendsCOMObject(pWrap, riid, pIntfMT,
+ pTemplate, imapIndex, intfIndex);
+ if (pIntf)
+ RETURN pIntf;
+ }
+
+ // check if interface is supported
+ if (imapIndex == -1)
+ RETURN NULL;
+
+ // interface method table != NULL
+ _ASSERTE(pIntfMT != NULL);
+
+ // IUnknown* loc within the wrapper
+ SLOT** ppVtable = GetComIPLocInWrapper(pWrap, imapIndex);
+ _ASSERTE(*ppVtable != NULL); // this should point to COM Vtable or interface vtable
+
+ // Finish laying out the interface COM method table if it has not been done yet.
+ ComMethodTable *pItfComMT = ComMethodTable::ComMethodTableFromIP((IUnknown*)ppVtable);
+ if (!pItfComMT->IsLayoutComplete())
+ {
+ MethodTable *pClassMT;
+ if (pItfComMT->IsWinRTFactoryInterface() || pItfComMT->IsWinRTStaticInterface())
+ {
+ // use the runtime class instead of the factory class
+ pClassMT = pTemplate->GetWinRTRuntimeClass();
+ }
+ else
+ {
+ pClassMT = pTemplate->GetClassType().GetMethodTable();
+ }
+ if (!pItfComMT->LayOutInterfaceMethodTable(pClassMT))
+ RETURN NULL;
+ }
+
+ // AddRef the wrapper.
+ // Note that we don't do SafeAddRef(pIntf) because it's overkill to
+ // go via IUnknown when we already have the wrapper in-hand.
+ ULONG cbRef = pWrap->AddRefWithAggregationCheck();
+
+ // 0xbadF00d implies the AddRef didn't go through
+ if (cbRef == 0xbadf00d)
+ RETURN NULL;
+
+ // The interface pointer is the pointer to the vtable.
+ IUnknown * pIntf = (IUnknown*)ppVtable;
+ // Retrieve the COM method table from the interface.
+ ComMethodTable * pIntfComMT = ComMethodTable::ComMethodTableFromIP(pIntf);
+
+ // Manual inlining of GetComIPFromCCW_VisibilityCheck() for common case.
+ if (// Ensure that the interface we are passing out was defined in trusted code.
+ (!(flags & GetComIPFromCCW::SuppressSecurityCheck) && pIntfComMT->IsDefinedInUntrustedCode())
+ // Do a visibility check if needed.
+ || ((flags & GetComIPFromCCW::CheckVisibility) && (!pIntfComMT->IsComVisible())))
+ {
+ // If not, fail to return the interface.
+ SafeRelease(pIntf);
+ pIntf = NULL;
+ }
+ RETURN pIntf;
+}
+
+// static
+IUnknown * ComCallWrapper::GetComIPFromCCW_ForIID_Worker(
+ ComCallWrapper * pWrap, REFIID riid, MethodTable * pIntfMT, GetComIPFromCCW::flags flags,
+ ComCallWrapperTemplate * pTemplate)
+{
+ CONTRACT(IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pWrap));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ ComMethodTable * pIntfComMT = NULL;
+ MethodTable * pMT = pWrap->GetMethodTableOfObjectRef();
+
+ // At this point, it must be that the IID is one of IClassX IIDs or
+ // it isn't implemented on this class. We'll have to search through and set
+ // up the entire hierarchy to determine which it is.
+ if (IsIClassX(pMT, riid, &pIntfComMT))
+ {
+ // If the class that this IClassX's was generated for is marked
+ // as ClassInterfaceType.AutoDual or AutoDisp, or it is a WinRT
+ // delegate, then give out the IClassX IP.
+ if (pIntfComMT->GetClassInterfaceType() == clsIfAutoDual || pIntfComMT->GetClassInterfaceType() == clsIfAutoDisp ||
+ pIntfComMT->IsWinRTDelegate())
+ {
+ // Make sure the all the base classes of the class this IClassX corresponds to
+ // are visible to COM.
+ pIntfComMT->CheckParentComVisibility(FALSE);
+
+ // Giveout IClassX of this class because the IID matches one of the IClassX in the hierarchy
+ // This assumes any IClassX implementation must be derived from base class IClassX's implementation
+ IUnknown * pIntf = pWrap->GetIClassXIP();
+ RETURN GetComIPFromCCW_VisibilityCheck(pIntf, pIntfMT, pIntfComMT, flags);
+ }
+ }
+
+ RETURN NULL;
+}
+
+// static
+IUnknown * ComCallWrapper::GetComIPFromCCW_ForIntfMT_Worker(
+ ComCallWrapper * pWrap, MethodTable * pIntfMT, GetComIPFromCCW::flags flags)
+{
+ CONTRACT(IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pWrap));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ MethodTable * pMT = pWrap->GetMethodTableOfObjectRef();
+
+ // class method table
+ if (pMT->CanCastToClass(pIntfMT))
+ {
+ // Make sure we're not trying to pass out a generic-based class interface (except for WinRT delegates)
+ if (pMT->HasInstantiation() && !pMT->SupportsGenericInterop(TypeHandle::Interop_NativeToManaged))
+ {
+ COMPlusThrow(kInvalidOperationException, IDS_EE_ATTEMPT_TO_CREATE_GENERIC_CCW);
+ }
+
+ // Retrieve the COM method table for the requested interface.
+ ComCallWrapperTemplate *pIntfCCWTemplate = ComCallWrapperTemplate::GetTemplate(TypeHandle(pIntfMT));
+ if (pIntfCCWTemplate->SupportsIClassX())
+ {
+ ComMethodTable * pIntfComMT = pIntfCCWTemplate->GetClassComMT();
+
+ // If the class that this IClassX's was generated for is marked
+ // as ClassInterfaceType.AutoDual or AutoDisp, or it is a WinRT
+ // delegate, then give out the IClassX IP.
+ if (pIntfComMT->GetClassInterfaceType() == clsIfAutoDual || pIntfComMT->GetClassInterfaceType() == clsIfAutoDisp ||
+ pIntfComMT->IsWinRTDelegate())
+ {
+ // Make sure the all the base classes of the class this IClassX corresponds to
+ // are visible to COM.
+ pIntfComMT->CheckParentComVisibility(FALSE);
+
+ // Giveout IClassX
+ IUnknown * pIntf = pWrap->GetIClassXIP();
+ RETURN GetComIPFromCCW_VisibilityCheck(pIntf, pIntfMT, pIntfComMT, flags);
+ }
+ }
+ }
+ RETURN NULL;
+}
+
+
+//--------------------------------------------------------------------------
+// Get an interface from wrapper, based on riid or pIntfMT. The returned interface is AddRef'd.
+//--------------------------------------------------------------------------
+IUnknown* ComCallWrapper::GetComIPFromCCWNoThrow(ComCallWrapper *pWrap, REFIID riid, MethodTable* pIntfMT,
+ GetComIPFromCCW::flags flags)
+{
+ CONTRACT(IUnknown*)
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pWrap));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ IUnknown * pUnk = NULL;
+
+ EX_TRY
+ {
+ pUnk = GetComIPFromCCW(pWrap, riid, pIntfMT, flags);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ RETURN pUnk;
+}
+
+//--------------------------------------------------------------------------
+// Get the IDispatch interface pointer for the wrapper.
+// The returned interface is AddRef'd.
+//--------------------------------------------------------------------------
+IDispatch* ComCallWrapper::GetIDispatchIP()
+{
+ CONTRACT (IDispatch*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ SimpleComCallWrapper* pSimpleWrap = GetSimpleWrapper();
+ MethodTable* pMT = pSimpleWrap->GetMethodTable();
+
+ // Retrieve the type of the default interface for the class.
+ // WinRT types always has DefaultInterfaceType_IUnknown and therefore won't support IDispatch
+ TypeHandle hndDefItfClass;
+ DefaultInterfaceType DefItfType = GetDefaultInterfaceForClassWrapper(TypeHandle(pMT), &hndDefItfClass);
+
+ if ((DefItfType == DefaultInterfaceType_AutoDual) || (DefItfType == DefaultInterfaceType_AutoDispatch))
+ {
+ // Make sure we release the BasicIP we're about to get.
+ SafeComHolder<IUnknown> pBasic = GetBasicIP();
+ ComMethodTable* pCMT = ComMethodTable::ComMethodTableFromIP(pBasic);
+ pCMT->CheckParentComVisibility(TRUE);
+ }
+
+ // If the class implements IReflect then use the IDispatchEx implementation.
+ // WinRT objects cannot implement IReflect as WinMDExp doesn't support exporting non-WinRT interfaces
+ if (SimpleComCallWrapper::SupportsIReflect(pMT))
+ {
+ // The class implements IReflect so lets let it handle IDispatch calls.
+ // We will do this by exposing the IDispatchEx implementation of IDispatch.
+ RETURN (IDispatch *)pSimpleWrap->QIStandardInterface(IID_IDispatchEx);
+ }
+
+ // Get the correct default interface
+ switch (DefItfType)
+ {
+ case DefaultInterfaceType_Explicit:
+ {
+ _ASSERTE(!hndDefItfClass.IsNull());
+ _ASSERTE(hndDefItfClass.IsInterface());
+
+ CorIfaceAttr ifaceType = hndDefItfClass.GetMethodTable()->GetComInterfaceType();
+ if (IsDispatchBasedItf(ifaceType))
+ {
+ RETURN (IDispatch*)GetComIPFromCCW(this, GUID_NULL, hndDefItfClass.GetMethodTable(),
+ GetComIPFromCCW::SuppressSecurityCheck);
+ }
+ else
+ {
+ RETURN NULL;
+ }
+ }
+
+ case DefaultInterfaceType_IUnknown:
+ {
+ RETURN NULL;
+ }
+
+ case DefaultInterfaceType_AutoDual:
+ case DefaultInterfaceType_AutoDispatch:
+ {
+ RETURN (IDispatch*)GetBasicIP();
+ }
+
+ case DefaultInterfaceType_BaseComClass:
+ {
+ SyncBlock* pBlock = GetSyncBlock();
+ _ASSERTE(pBlock);
+
+ SafeComHolder<IDispatch> pDisp;
+
+ RCWHolder pRCW(GetThread());
+ RCWPROTECT_BEGIN(pRCW, pBlock);
+
+ pDisp = pRCW->GetIDispatch();
+
+ RCWPROTECT_END(pRCW);
+ RETURN pDisp.Extract();
+ }
+
+ default:
+ {
+ _ASSERTE(!"Invalid default interface type!");
+ RETURN NULL;
+ }
+ }
+}
+
+// MakeAgile needs the object passed in because it has to set it in the new handle
+// If the original handle is from an unloaded appdomain, it will no longer be valid
+// so we won't be able to get the object.
+void ComCallWrapper::MakeAgile(OBJECTREF pObj)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+
+ // if this assert fires, then we've called addref from a place where we need to
+ // make the object agile but we haven't supplied the object. Need to change the caller.
+ PRECONDITION(pObj != NULL);
+ }
+ CONTRACTL_END;
+
+ OBJECTHANDLE origHandle = m_ppThis;
+ RCOBJECTHANDLEHolder agileHandle = SharedDomain::GetDomain()->CreateRefcountedHandle(pObj);
+ _ASSERTE(agileHandle);
+
+ SimpleComCallWrapper *pSimpleWrap = GetSimpleWrapper();
+ ComCallWrapperCache *pWrapperCache = pSimpleWrap->GetWrapperCache();
+
+
+ // lock the wrapper cache so nobody else can update to agile while we are
+ {
+ ComCallWrapperCache::LockHolder lh(pWrapperCache);
+
+ if (pSimpleWrap->IsAgile())
+ {
+ // someone beat us to it - let the holder destroy it and return
+ return;
+ }
+
+ // Update all the wrappers to use the agile handle.
+ ComCallWrapper *pWrap = this;
+ while (pWrap)
+ {
+ pWrap->m_ppThis = agileHandle;
+ pWrap = GetNext(pWrap);
+ }
+
+ // so all the handles are updated - now update the simple wrapper
+ // keep the lock so someone else doesn't try to
+ pSimpleWrap->MakeAgile(origHandle);
+ }
+
+ agileHandle.SuppressRelease();
+}
+
+//--------------------------------------------------------------------------
+// ComCallable wrapper manager
+// constructor
+//--------------------------------------------------------------------------
+ComCallWrapperCache::ComCallWrapperCache() :
+ m_lock(CrstCOMWrapperCache),
+ m_cbRef(0),
+ m_pCacheLineAllocator(NULL),
+ m_pDomain(NULL)
+{
+ WRAPPER_NO_CONTRACT;
+
+}
+
+
+//-------------------------------------------------------------------
+// ComCallable wrapper manager
+// destructor
+//-------------------------------------------------------------------
+ComCallWrapperCache::~ComCallWrapperCache()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_INTEROP, LL_INFO100, "ComCallWrapperCache::~ComCallWrapperCache %8.8x in domain [%d] %8.8x %S\n",
+ this, GetDomain()?GetDomain()->GetId().m_dwId:0,
+ GetDomain(), GetDomain() ? GetDomain()->GetFriendlyNameForLogging() : NULL));
+
+ if (m_pCacheLineAllocator)
+ {
+ delete m_pCacheLineAllocator;
+ m_pCacheLineAllocator = NULL;
+ }
+
+ AppDomain *pDomain = GetDomain(); // don't use member directly, need to mask off flags
+ if (pDomain)
+ {
+ // clear hook in AppDomain as we're going away
+ pDomain->ResetComCallWrapperCache();
+ }
+}
+
+
+//-------------------------------------------------------------------
+// ComCallable wrapper manager
+// Create/Init method
+//-------------------------------------------------------------------
+ComCallWrapperCache *ComCallWrapperCache::Create(AppDomain *pDomain)
+{
+ CONTRACT (ComCallWrapperCache*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pDomain));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ NewHolder<ComCallWrapperCache> pWrapperCache = new ComCallWrapperCache();
+
+ LOG((LF_INTEROP, LL_INFO100, "ComCallWrapperCache::Create %8.8x in domain %8.8x %S\n",
+ (ComCallWrapperCache *)pWrapperCache, pDomain, pDomain->GetFriendlyName(FALSE)));
+
+ NewHolder<CCacheLineAllocator> line = new CCacheLineAllocator;
+
+ pWrapperCache->m_pDomain = pDomain;
+ pWrapperCache->m_pCacheLineAllocator = line;
+
+ pWrapperCache->AddRef();
+
+ line.SuppressRelease();
+ pWrapperCache.SuppressRelease();
+ RETURN pWrapperCache;
+}
+
+
+void ComCallWrapperCache::Neuter()
+{
+ WRAPPER_NO_CONTRACT;
+
+ LOG((LF_INTEROP, LL_INFO100, "ComCallWrapperCache::Neuter %8.8x in domain [%d] %8.8x %S\n",
+ this, GetDomain()?GetDomain()->GetId().m_dwId:0,
+ GetDomain(),GetDomain() ? GetDomain()->GetFriendlyNameForLogging() : NULL));
+ ClearDomain();
+}
+
+
+//-------------------------------------------------------------------
+// ComCallable wrapper manager
+// LONG AddRef()
+//-------------------------------------------------------------------
+LONG ComCallWrapperCache::AddRef()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ COUNTER_ONLY(GetPerfCounters().m_Interop.cCCW++);
+
+ LONG i = FastInterlockIncrement(&m_cbRef);
+ LOG((LF_INTEROP, LL_INFO100, "ComCallWrapperCache::Addref %8.8x with %d in domain [%d] %8.8x %S\n",
+ this, i, GetDomain()?GetDomain()->GetId().m_dwId:0,
+ GetDomain(), GetDomain() ? GetDomain()->GetFriendlyNameForLogging() : NULL));
+
+ return i;
+}
+
+//-------------------------------------------------------------------
+// ComCallable wrapper manager
+// LONG Release()
+//-------------------------------------------------------------------
+LONG ComCallWrapperCache::Release()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ COUNTER_ONLY(GetPerfCounters().m_Interop.cCCW--);
+
+ LONG i = FastInterlockDecrement(&m_cbRef);
+ _ASSERTE(i >= 0);
+
+ LOG((LF_INTEROP, LL_INFO100, "ComCallWrapperCache::Release %8.8x with %d in domain [%d] %8.8x %S\n",
+ this, i, GetDomain()?GetDomain()->GetId().m_dwId:0,
+ GetDomain(), GetDomain() ? GetDomain()->GetFriendlyNameForLogging() : NULL));
+ if ( i == 0)
+ delete this;
+
+ return i;
+}
+
+
+
+
+
+
+//--------------------------------------------------------------------------
+// void ComMethodTable::Cleanup()
+// free the stubs and the vtable
+//--------------------------------------------------------------------------
+void ComMethodTable::Cleanup()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ unsigned cbExtraSlots = GetNumExtraSlots(GetInterfaceType());
+ unsigned cbSlots = m_cbSlots;
+
+ SLOT* pComVtable = (SLOT *)(this + 1);
+
+ // If we have created and laid out the method desc then we need to delete them.
+ if (IsLayoutComplete())
+ {
+#ifdef PROFILING_SUPPORTED
+ // We used to issue the COMClassicVTableDestroyed callback from here.
+ // However, that causes an AV. At this point the MethodTable is gone
+ // (as the AppDomain containing it has been unloaded), but the ComMethodTable
+ // still points to it. The code here used to wrap a TypeHandle around the
+ // MethodTable pointer, cast to a ClassID, and then call COMClassicVTableDestroyed.
+ // But the act of casting to a TypeHandle invokes debug-code to verify the
+ // MethodTable, which causes an AV.
+ //
+ // For now, we're not issuing the COMClassicVTableDestroyed callback anymore.
+ // <REVISIT_TODO>Reexamine the profiling API around
+ // CCWs and move the callback elsewhere and / or rethink the current
+ // set of CCW callbacks to mirror reality more accurately.</REVISIT_TODO>
+#endif // PROFILING_SUPPORTED
+
+ for (unsigned i = cbExtraSlots; i < cbSlots+cbExtraSlots; i++)
+ {
+ // Don't bother grabbing the ComCallMethodDesc if the method represented by the
+ // current vtable slot doesn't belong to the current ComMethodTable.
+ if (!OwnedbyThisMT(i))
+ {
+ continue;
+ }
+
+ // ComCallMethodDescFromSlot returns NULL when the
+ // ComCallMethodDesc has already been cleaned up.
+ ComCallMethodDesc* pCMD = ComCallMethodDescFromSlot(i);
+ if ( (pComVtable[i] == (SLOT)-1 ) ||
+ (pCMD == NULL)
+ )
+ {
+ continue;
+ }
+
+ // All the stubs that are in a COM->COM+ VTable are to the generic
+ // helpers (g_pGenericComCallStubFields, etc.). So all we do is
+ // discard the resources held by the ComMethodDesc.
+ pCMD->Destruct();
+ }
+ }
+
+ if (m_pDispatchInfo)
+ delete m_pDispatchInfo;
+ if (m_pMDescr)
+ DeleteExecutable(m_pMDescr);
+ if (m_pITypeInfo && !g_fProcessDetach)
+ SafeRelease(m_pITypeInfo);
+
+ DeleteExecutable(this);
+}
+
+
+//--------------------------------------------------------------------------
+// Lay's out the members of a ComMethodTable that represents an IClassX.
+//--------------------------------------------------------------------------
+void ComMethodTable::LayOutClassMethodTable()
+{
+ CONTRACTL
+ {
+ PRECONDITION(m_pMT != NULL);
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ if (IsWinRTDelegate())
+ {
+ // IClassX for WinRT delegates is a special interface
+ LayOutDelegateMethodTable();
+ return;
+ }
+
+ GCX_PREEMP();
+
+ unsigned i;
+ IDispatchVtable* pDispVtable;
+ SLOT *pComVtable;
+ unsigned cbPrevSlots = 0;
+ unsigned cbAlloc = 0;
+ NewExecutableHolder<BYTE> pMDMemoryPtr = NULL;
+ BYTE* pMethodDescMemory = NULL;
+ unsigned cbNumParentVirtualMethods = 0;
+ unsigned cbTotalParentFields = 0;
+ unsigned cbParentComMTSlots = 0;
+ MethodTable* pComPlusParentClass = m_pMT->GetComPlusParentMethodTable();
+ MethodTable* pParentClass = m_pMT->GetParentMethodTable();
+ MethodTable* pCurrParentClass = pParentClass;
+ MethodTable* pCurrMT = m_pMT;
+ InteropMethodTableData *pCurrParentInteropMT = NULL;
+ InteropMethodTableData *pCurrInteropMT = NULL;
+ ComMethodTable* pParentComMT = NULL;
+ const unsigned cbExtraSlots = GetNumExtraSlots(ifDual);
+ CQuickEEClassPtrs apClassesToProcess;
+ int cClassesToProcess = 0;
+
+ //
+ // If we have a parent ensure its IClassX COM method table is laid out.
+ //
+
+ if (pComPlusParentClass)
+ {
+ pParentComMT = ComCallWrapperTemplate::SetupComMethodTableForClass(pComPlusParentClass, TRUE);
+ cbParentComMTSlots = pParentComMT->m_cbSlots;
+ }
+
+ LOG((LF_INTEROP, LL_INFO1000, "LayOutClassMethodTable: %s, parent: %s, this: %p\n", m_pMT->GetDebugClassName(), pParentClass ? pParentClass->GetDebugClassName() : 0, this));
+
+ //
+ // Allocate a temporary space to generate the vtable into.
+ //
+
+ S_UINT32 cbTempVtable = (S_UINT32(m_cbSlots) + S_UINT32(cbExtraSlots)) * S_UINT32(sizeof(SLOT));
+
+ if (cbTempVtable.IsOverflow())
+ ThrowHR(COR_E_OVERFLOW);
+
+ NewArrayHolder<BYTE> pTempVtable = new BYTE[cbTempVtable.Value()];
+ pDispVtable = (IDispatchVtable *)pTempVtable.GetValue();
+
+ //
+ // Set up the IUnknown and IDispatch methods.
+ //
+
+ // Setup IUnknown vtable
+ pDispVtable->m_qi = (SLOT)Unknown_QueryInterface;
+ pDispVtable->m_addref = (SLOT)Unknown_AddRef;
+ pDispVtable->m_release = (SLOT)Unknown_Release;
+
+
+ // Set up the common portion of the IDispatch vtable.
+ pDispVtable->m_GetTypeInfoCount = (SLOT)Dispatch_GetTypeInfoCount_Wrapper;
+ pDispVtable->m_GetTypeInfo = (SLOT)Dispatch_GetTypeInfo_Wrapper;
+
+ // If the class interface is a pure disp interface then we need to use the
+ // internal implementation of IDispatch for GetIdsOfNames and Invoke.
+ if (GetClassInterfaceType() == clsIfAutoDisp)
+ {
+ // Use the internal implementation.
+ pDispVtable->m_GetIDsOfNames = (SLOT)InternalDispatchImpl_GetIDsOfNames_Wrapper;
+ pDispVtable->m_Invoke = (SLOT)InternalDispatchImpl_Invoke_Wrapper;
+ }
+ else
+ {
+ // We need to set the entry points to the Dispatch versions which determine
+ // which implementation to use at runtime based on the class that implements
+ // the interface.
+ pDispVtable->m_GetIDsOfNames = (SLOT)Dispatch_GetIDsOfNames_Wrapper;
+ pDispVtable->m_Invoke = (SLOT)Dispatch_Invoke_Wrapper;
+ }
+
+
+ //
+ // Lay out the portion of the vtable containing the methods of the class.
+ //
+ // Note that we only do this if the class doesn't have any generic instantiations
+ // in it's hierarchy.
+ //
+ ArrayList NewCOMMethodDescs;
+ ComCallMethodDescArrayHolder NewCOMMethodDescsHolder(&NewCOMMethodDescs);
+
+ unsigned cbNewSlots = 0;
+
+ //
+ // Copy the members down from our parent's template
+ // We guarantee to have at least all the slots from parent's template
+ //
+
+ pComVtable = ((SLOT*)pDispVtable) + cbExtraSlots;
+ if (pParentComMT)
+ {
+ SLOT *pPrevComVtable = ((SLOT *)(pParentComMT + 1)) + cbExtraSlots;
+ CopyMemory(pComVtable, pPrevComVtable, sizeof(SLOT) * cbParentComMTSlots);
+ cbPrevSlots = cbParentComMTSlots;
+ }
+
+ if (!m_pMT->HasGenericClassInstantiationInHierarchy())
+ {
+ //
+ // Allocate method desc's for the rest of the slots.
+ //
+ unsigned cbMethodDescs = (COMMETHOD_PREPAD + sizeof(ComCallMethodDesc)) * (m_cbSlots - cbParentComMTSlots);
+ cbAlloc = cbMethodDescs;
+ if (cbAlloc > 0)
+ {
+ pMDMemoryPtr = (BYTE*) new (executable) BYTE[cbAlloc + sizeof(UINT_PTR)];
+ pMethodDescMemory = (BYTE*)pMDMemoryPtr;
+
+ // initialize the method desc memory to zero
+ FillMemory(pMethodDescMemory, cbAlloc, 0x0);
+
+ *(UINT_PTR *)pMethodDescMemory = cbMethodDescs; // fill in the size of the method desc's
+
+ // move past the size
+ pMethodDescMemory += sizeof(UINT_PTR);
+ }
+
+ _ASSERTE(0 == (((DWORD_PTR)pMethodDescMemory) & (sizeof(void*)-1)));
+
+ //
+ // Create an array of all the classes that need to be laid out.
+ //
+
+ do
+ {
+ apClassesToProcess.ReSizeThrows(cClassesToProcess + 2);
+ apClassesToProcess[cClassesToProcess++] = pCurrMT;
+ pCurrMT = pCurrMT->GetParentMethodTable();
+ }
+ while (pCurrMT != pComPlusParentClass);
+ apClassesToProcess[cClassesToProcess++] = pCurrMT;
+
+ //
+ // Set up the COM call method desc's for all the methods and fields that were introduced
+ // between the current class and its parent COM+ class. This includes any methods on
+ // COM classes.
+ //
+ for (cClassesToProcess -= 2; cClassesToProcess >= 0; cClassesToProcess--)
+ {
+ //
+ // Retrieve the current class and the current parent class.
+ //
+
+ pCurrMT = apClassesToProcess[cClassesToProcess];
+ pCurrInteropMT = pCurrMT->GetComInteropData();
+ _ASSERTE(pCurrInteropMT);
+
+ pCurrParentClass = apClassesToProcess[cClassesToProcess + 1];
+
+
+ //
+ // Retrieve the number of fields and vtable methods on the parent class.
+ //
+
+ if (pCurrParentClass)
+ {
+ cbTotalParentFields = pCurrParentClass->GetNumInstanceFields();
+ pCurrParentInteropMT = pCurrParentClass->GetComInteropData();
+ _ASSERTE(pCurrParentInteropMT);
+ cbNumParentVirtualMethods = pCurrParentInteropMT->cVTable;
+ }
+
+
+ //
+ // Set up the COM call method desc's for methods that were not public in the parent class
+ // but were made public in the current class.
+ //
+
+ for (i = 0; i < cbNumParentVirtualMethods; i++)
+ {
+ MethodDesc* pMD = NULL;
+ InteropMethodTableSlotData *pCurrInteropMD = NULL;
+ pCurrInteropMD = &pCurrInteropMT->pVTable[i];
+ pMD = pCurrInteropMD->pMD;
+ MethodDesc* pParentMD = NULL;
+ InteropMethodTableSlotData *pCurrParentInteropMD = NULL;
+ pCurrParentInteropMD = &pCurrParentInteropMT->pVTable[i];
+ pParentMD = pCurrParentInteropMD->pMD;
+
+ if (pMD &&
+ !(pCurrInteropMD ? IsDuplicateClassItfMD(pCurrInteropMD, i) : IsDuplicateClassItfMD(pMD, i)) &&
+ IsOverloadedComVisibleMember(pMD, pParentMD))
+ {
+ // some bytes are reserved for CALL xxx before the method desc
+ ComCallMethodDesc* pNewMD = (ComCallMethodDesc *) (pMethodDescMemory + COMMETHOD_PREPAD);
+ NewCOMMethodDescs.Append(pNewMD);
+
+ pNewMD->InitMethod(pMD, NULL);
+
+ emitCOMStubCall(pNewMD, GetEEFuncEntryPoint(ComCallPreStub));
+
+ FillInComVtableSlot(pComVtable, cbPrevSlots++, pNewMD);
+
+ pMethodDescMemory += (COMMETHOD_PREPAD + sizeof(ComCallMethodDesc));
+ }
+ }
+
+
+ //
+ // Set up the COM call method desc's for all newly introduced public methods.
+ //
+
+ unsigned cbNumVirtualMethods = 0;
+ cbNumVirtualMethods = pCurrInteropMT->cVTable;
+ for (i = cbNumParentVirtualMethods; i < cbNumVirtualMethods; i++)
+ {
+ MethodDesc* pMD = NULL;
+ InteropMethodTableSlotData *pCurrInteropMD = NULL;
+ pCurrInteropMD = &pCurrInteropMT->pVTable[i];
+ pMD = pCurrInteropMD->pMD;
+
+ if (pMD &&
+ !(pCurrInteropMD ? IsDuplicateClassItfMD(pCurrInteropMD, i) : IsDuplicateClassItfMD(pMD, i)) &&
+ IsNewComVisibleMember(pMD))
+ {
+ // some bytes are reserved for CALL xxx before the method desc
+ ComCallMethodDesc* pNewMD = (ComCallMethodDesc *) (pMethodDescMemory + COMMETHOD_PREPAD);
+ NewCOMMethodDescs.Append(pNewMD);
+
+ pNewMD->InitMethod(pMD, NULL);
+
+ emitCOMStubCall(pNewMD, GetEEFuncEntryPoint(ComCallPreStub));
+
+ FillInComVtableSlot(pComVtable, cbPrevSlots++, pNewMD);
+
+ pMethodDescMemory += (COMMETHOD_PREPAD + sizeof(ComCallMethodDesc));
+ }
+ }
+
+
+ //
+ // Add the non virtual methods introduced on the current class.
+ //
+
+ MethodTable::MethodIterator it(pCurrMT);
+ for (; it.IsValid(); it.Next())
+ {
+ if (!it.IsVirtual()) {
+ MethodDesc* pMD = it.GetMethodDesc();
+
+ if (pMD != NULL && !IsDuplicateClassItfMD(pMD, it.GetSlotNumber()) &&
+ IsNewComVisibleMember(pMD) && !pMD->IsStatic() && !pMD->IsCtor()
+ && (!pCurrMT->IsValueType() || (GetClassInterfaceType() != clsIfAutoDual && IsStrictlyUnboxed(pMD))))
+ {
+ // some bytes are reserved for CALL xxx before the method desc
+ ComCallMethodDesc* pNewMD = (ComCallMethodDesc *) (pMethodDescMemory + COMMETHOD_PREPAD);
+ NewCOMMethodDescs.Append(pNewMD);
+
+ pNewMD->InitMethod(pMD, NULL);
+
+ emitCOMStubCall(pNewMD, GetEEFuncEntryPoint(ComCallPreStub));
+
+ FillInComVtableSlot(pComVtable, cbPrevSlots++, pNewMD);
+
+ pMethodDescMemory += (COMMETHOD_PREPAD + sizeof(ComCallMethodDesc));
+ }
+ }
+ }
+
+
+ //
+ // Set up the COM call method desc's for the public fields defined in the current class.
+ //
+
+ // <TODO>check this approximation - we may be losing exact type information </TODO>
+ ApproxFieldDescIterator fdIterator(pCurrMT, ApproxFieldDescIterator::INSTANCE_FIELDS);
+ FieldDesc* pFD = NULL;
+ while ((pFD = fdIterator.Next()) != NULL)
+ {
+ if (IsMemberVisibleFromCom(pCurrMT, pFD->GetMemberDef(), mdTokenNil)) // if it is a public field grab it
+ {
+ // set up a getter method
+ // some bytes are reserved for CALL xxx before the method desc
+ ComCallMethodDesc* pNewMD = (ComCallMethodDesc *) (pMethodDescMemory + COMMETHOD_PREPAD);
+ NewCOMMethodDescs.Append(pNewMD);
+
+ pNewMD->InitField(pFD, TRUE);
+
+ emitCOMStubCall(pNewMD, GetEEFuncEntryPoint(ComCallPreStub));
+
+ FillInComVtableSlot(pComVtable, cbPrevSlots++, pNewMD);
+
+ pMethodDescMemory+= (COMMETHOD_PREPAD + sizeof(ComCallMethodDesc));
+
+ // setup a setter method
+ // some bytes are reserved for CALL xxx before the method desc
+ pNewMD = (ComCallMethodDesc *) (pMethodDescMemory + COMMETHOD_PREPAD);
+ NewCOMMethodDescs.Append(pNewMD);
+
+ pNewMD->InitField(pFD, FALSE);
+
+ emitCOMStubCall(pNewMD, GetEEFuncEntryPoint(ComCallPreStub));
+
+ FillInComVtableSlot(pComVtable, cbPrevSlots++, pNewMD);
+
+ pMethodDescMemory+= (COMMETHOD_PREPAD + sizeof(ComCallMethodDesc));
+ }
+ }
+ }
+ }
+ _ASSERTE(m_cbSlots == cbPrevSlots);
+
+ {
+ // Take the lock and copy data from the temporary vtable to this instance
+ CrstHolder ch(&g_CreateWrapperTemplateCrst);
+
+ if (IsLayoutComplete())
+ return;
+
+ // IDispatch vtable follows the header
+ CopyMemory(this + 1, pDispVtable, cbTempVtable.Value());
+
+ // Set the layout complete flag and release the lock.
+ m_Flags |= enum_LayoutComplete;
+
+ // We've successfully laid out the class method table so we need to suppress the release of the
+ // memory for the ComCallMethodDescs and store it inside the ComMethodTable so we can
+ // release it when we clean up the ComMethodTable.
+ m_pMDescr = (BYTE*)pMDMemoryPtr;
+ pMDMemoryPtr.SuppressRelease();
+ NewCOMMethodDescsHolder.SuppressRelease();
+ }
+
+ LOG((LF_INTEROP, LL_INFO1000, "LayOutClassMethodTable: %s, parent: %s, this: %p [DONE]\n", m_pMT->GetDebugClassName(), pParentClass ? pParentClass->GetDebugClassName() : 0, this));
+}
+
+
+BOOL ComMethodTable::CheckSigTypesCanBeLoaded(MethodTable *pItfClass)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pItfClass));
+ }
+ CONTRACTL_END;
+
+ if (!IsLayoutComplete())
+ {
+ if (!IsSigClassLoadChecked())
+ {
+ BOOL fCheckSuccess = TRUE;
+ unsigned cbSlots = pItfClass->GetNumVirtuals();
+
+ GCX_COOP();
+ OBJECTREF pThrowable = NULL;
+ GCPROTECT_BEGIN(pThrowable);
+ {
+ EX_TRY
+ {
+ // check the sigs of the methods to see if we can load
+ // all the classes
+ for (unsigned i = 0; i < cbSlots; i++)
+ {
+ MethodDesc* pIntfMD = m_pMT->GetMethodDescForSlot(i);
+ MetaSig::CheckSigTypesCanBeLoaded(pIntfMD);
+ }
+ }
+ EX_CATCH_THROWABLE(&pThrowable);
+
+ if (pThrowable != NULL)
+ {
+ HRESULT hr = SetupErrorInfo(pThrowable);
+
+ if (hr == COR_E_TYPELOAD)
+ {
+ // We only care about TypeLoadExceptions here. If one occurs, it means
+ // that some of the type in the signature could not be loaded so this
+ // interface is not useable from COM.
+ SetSigClassCannotLoad();
+ }
+ else
+ {
+ // We want to rethrow any other exceptions that occur.
+ COMPlusThrow(pThrowable);
+ }
+ }
+ }
+ GCPROTECT_END();
+
+ SetSigClassLoadChecked();
+ }
+
+ _ASSERTE(IsSigClassLoadChecked() != 0);
+
+ // check if all types loaded successfully
+ if (IsSigClassCannotLoad())
+ {
+ LogInterop(W("CLASS LOAD FAILURE: in Interface method signature"));
+ return FALSE;
+ }
+ }
+ return TRUE;
+}
+
+
+
+//--------------------------------------------------------------------------
+// Lay out the members of a ComMethodTable that represents an interface.
+//--------------------------------------------------------------------------
+BOOL ComMethodTable::LayOutInterfaceMethodTable(MethodTable* pClsMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pClsMT, NULL_OK));
+ PRECONDITION(pClsMT == NULL || !pClsMT->IsInterface());
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+
+ MethodTable *pItfClass = m_pMT;
+ CorIfaceAttr ItfType = m_pMT->GetComInterfaceType();
+ ULONG cbExtraSlots = GetNumExtraSlots(ItfType);
+
+ BYTE *pMethodDescMemory = NULL;
+ IUnkVtable* pUnkVtable;
+ SLOT *pComVtable;
+ unsigned i;
+
+#ifndef FEATURE_CORECLR
+ // Skip this unnecessary expensive check for CoreCLR
+ if (!CheckSigTypesCanBeLoaded(pItfClass))
+ return FALSE;
+#endif
+
+ LOG((LF_INTEROP, LL_INFO1000, "LayOutInterfaceMethodTable: %s, this: %p\n", pItfClass->GetDebugClassName(), this));
+
+ unsigned cbSlots = pItfClass->GetNumVirtuals();
+
+ //
+ // Allocate a temporary space to generate the vtable into.
+ //
+ S_UINT32 cbTempVtable = (S_UINT32(m_cbSlots) + S_UINT32(cbExtraSlots)) * S_UINT32(sizeof(SLOT));
+ cbTempVtable += S_UINT32(cbSlots) * S_UINT32((COMMETHOD_PREPAD + sizeof(ComCallMethodDesc)));
+
+ if (cbTempVtable.IsOverflow())
+ ThrowHR(COR_E_OVERFLOW);
+
+ NewArrayHolder<BYTE> pTempVtable = new BYTE[cbTempVtable.Value()];
+
+ pUnkVtable = (IUnkVtable *)pTempVtable.GetValue();
+ pComVtable = ((SLOT*)pUnkVtable) + cbExtraSlots;
+
+ // Set all vtable slots to -1 for sparse vtables. That way we catch attempts
+ // to access empty slots quickly and, during cleanup, we can tell empty
+ // slots from full ones.
+ if (m_pMT->IsSparseForCOMInterop())
+ memset(pUnkVtable + cbExtraSlots, -1, m_cbSlots * sizeof(SLOT));
+
+ // Method descs are at the end of the vtable
+ // m_cbSlots interfaces methods + IUnk methods
+ pMethodDescMemory = (BYTE *)&pComVtable[m_cbSlots];
+
+ // Setup IUnk vtable
+ pUnkVtable->m_qi = (SLOT)Unknown_QueryInterface;
+ pUnkVtable->m_addref = (SLOT)Unknown_AddRef;
+ pUnkVtable->m_release = (SLOT)Unknown_Release;
+
+ if (IsDispatchBasedItf(ItfType))
+ {
+ // Setup the IDispatch vtable.
+ IDispatchVtable* pDispVtable = (IDispatchVtable*)pUnkVtable;
+
+ // Set up the common portion of the IDispatch vtable.
+ pDispVtable->m_GetTypeInfoCount = (SLOT)Dispatch_GetTypeInfoCount_Wrapper;
+ pDispVtable->m_GetTypeInfo = (SLOT)Dispatch_GetTypeInfo_Wrapper;
+
+ // If the interface is a pure disp interface then we need to use the internal
+ // implementation since OleAut does not support invoking on pure disp interfaces.
+ if (ItfType == ifDispatch)
+ {
+ // Use the internal implementation.
+ pDispVtable->m_GetIDsOfNames = (SLOT)InternalDispatchImpl_GetIDsOfNames_Wrapper;
+ pDispVtable->m_Invoke = (SLOT)InternalDispatchImpl_Invoke_Wrapper;
+ }
+ else
+ {
+ // We need to set the entry points to the Dispatch versions which determine
+ // which implmentation to use at runtime based on the class that implements
+ // the interface.
+ pDispVtable->m_GetIDsOfNames = (SLOT)Dispatch_GetIDsOfNames_Wrapper;
+ pDispVtable->m_Invoke = (SLOT)Dispatch_Invoke_Wrapper;
+ }
+ }
+ else if (ItfType == ifInspectable)
+ {
+ // Setup the IInspectable vtable.
+ IInspectableVtable *pInspVtable = (IInspectableVtable *)pUnkVtable;
+
+ pInspVtable->m_GetIIDs = (SLOT)Inspectable_GetIIDs_Wrapper;
+ pInspVtable->m_GetRuntimeClassName = (SLOT)Inspectable_GetRuntimeClassName_Wrapper;
+ pInspVtable->m_GetTrustLevel = (SLOT)Inspectable_GetTrustLevel_Wrapper;
+ }
+
+ ArrayList NewCOMMethodDescs;
+ ComCallMethodDescArrayHolder NewCOMMethodDescsHolder(&NewCOMMethodDescs);
+
+ for (i = 0; i < cbSlots; i++)
+ {
+ // Some space for a CALL xx xx xx xx stub is reserved before the beginning of the MethodDesc
+ ComCallMethodDesc* pNewMD = (ComCallMethodDesc *) (pMethodDescMemory + COMMETHOD_PREPAD);
+ NewCOMMethodDescs.Append(pNewMD);
+
+ MethodDesc* pIntfMD = m_pMT->GetMethodDescForSlot(i);
+
+ if (m_pMT->HasInstantiation())
+ {
+ pIntfMD = MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pIntfMD,
+ m_pMT,
+ FALSE, // forceBoxedEntryPoint
+ Instantiation(), // methodInst
+ FALSE, // allowInstParam
+ TRUE); // forceRemotableMethod
+ }
+
+ MethodDesc *pClassMD = NULL;
+ if (IsWinRTFactoryInterface())
+ {
+ // lookup the .ctor corresponding this factory interface method
+ pClassMD = ComCall::GetCtorForWinRTFactoryMethod(pClsMT, pIntfMD);
+ _ASSERTE(pClassMD->IsCtor());
+ }
+ else if (IsWinRTStaticInterface())
+ {
+ // lookup the static method corresponding this factory interface method
+ pClassMD = ComCall::GetStaticForWinRTFactoryMethod(pClsMT, pIntfMD);
+ _ASSERTE(pClassMD->IsStatic());
+ }
+ else if (IsWinRTRedirectedInterface())
+ {
+ pClassMD = WinRTInterfaceRedirector::GetStubMethodForRedirectedInterface(
+ GetWinRTRedirectedInterfaceIndex(),
+ i,
+ TypeHandle::Interop_NativeToManaged,
+ FALSE,
+ m_pMT->GetInstantiation());
+ }
+ else if (pClsMT != NULL)
+ {
+ DispatchSlot impl(pClsMT->FindDispatchSlotForInterfaceMD(pIntfMD));
+ pClassMD = impl.GetMethodDesc();
+ }
+
+ if (pClassMD != NULL)
+ {
+ pNewMD->InitMethod(pClassMD, pIntfMD, IsWinRTRedirectedInterface());
+ }
+ else
+ {
+ // we will perform interface dispatch at run-time
+ // note that even in fully statically typed WinRT, we don't always have an implementation
+ // MethodDesc in the hierarchy because our metadata adapter does not make these up for
+ // redirected interfaces
+ pNewMD->InitMethod(pIntfMD, NULL, IsWinRTRedirectedInterface());
+ }
+
+ pMethodDescMemory += (COMMETHOD_PREPAD + sizeof(ComCallMethodDesc));
+ }
+
+ {
+ // Take the lock and copy data from the temporary vtable to this instance
+ CrstHolder ch(&g_CreateWrapperTemplateCrst);
+
+ if (IsLayoutComplete())
+ return TRUE;
+
+ // IUnk vtable follows the header
+ CopyMemory(this + 1, pUnkVtable, cbTempVtable.Value());
+
+ // Finish by emitting stubs and initializing the slots
+ pUnkVtable = (IUnkVtable *)(this + 1);
+ pComVtable = ((SLOT*)pUnkVtable) + cbExtraSlots;
+
+ // Method descs are at the end of the vtable
+ // m_cbSlots interfaces methods + IUnk methods
+ pMethodDescMemory = (BYTE *)&pComVtable[m_cbSlots];
+
+ for (i = 0; i < cbSlots; i++)
+ {
+ ComCallMethodDesc* pNewMD = (ComCallMethodDesc *) (pMethodDescMemory + COMMETHOD_PREPAD);
+ MethodDesc* pIntfMD = m_pMT->GetMethodDescForSlot(i);
+
+ emitCOMStubCall(pNewMD, GetEEFuncEntryPoint(ComCallPreStub));
+
+ UINT slotIndex = (pIntfMD->GetComSlot() - cbExtraSlots);
+ FillInComVtableSlot(pComVtable, slotIndex, pNewMD);
+
+ pMethodDescMemory += (COMMETHOD_PREPAD + sizeof(ComCallMethodDesc));
+ }
+
+ // Set the layout complete flag and release the lock.
+ m_Flags |= enum_LayoutComplete;
+ NewCOMMethodDescsHolder.SuppressRelease();
+ }
+
+#ifdef PROFILING_SUPPORTED
+ // Notify profiler of the CCW, so it can avoid double-counting.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackCCW());
+#if defined(_DEBUG)
+ WCHAR rIID[40]; // {00000000-0000-0000-0000-000000000000}
+ GuidToLPWSTR(m_IID, rIID, lengthof(rIID));
+ LOG((LF_CORPROF, LL_INFO100, "COMClassicVTableCreated Class:%hs, IID:%ls, vTbl:%#08x\n",
+ pItfClass->GetDebugClassName(), rIID, pUnkVtable));
+#else
+ LOG((LF_CORPROF, LL_INFO100, "COMClassicVTableCreated Class:%#x, IID:{%08x-...}, vTbl:%#08x\n",
+ pItfClass, m_IID.Data1, pUnkVtable));
+#endif
+ g_profControlBlock.pProfInterface->COMClassicVTableCreated((ClassID) TypeHandle(pItfClass).AsPtr(),
+ m_IID,
+ pUnkVtable,
+ m_cbSlots+cbExtraSlots);
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ LOG((LF_INTEROP, LL_INFO1000, "LayOutInterfaceMethodTable: %s, this: %p [DONE]\n", pItfClass->GetDebugClassName(), this));
+
+ return TRUE;
+}
+
+void ComMethodTable::LayOutBasicMethodTable()
+{
+ CONTRACTL
+ {
+ PRECONDITION(m_pMT != NULL);
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+
+ IDispatchVtable* pDispVtable;
+
+ LOG((LF_INTEROP, LL_INFO1000, "LayOutBasicMethodTable: %s, this: %p\n", m_pMT->GetDebugClassName(), this));
+
+ //
+ // Set up the IUnknown and IDispatch methods. Each thread will write exactly the same values to the
+ // slots so we let it run concurrently and execute a memory barrier by doing InterlockOr at the end.
+ //
+
+ // IDispatch vtable follows the header
+ pDispVtable = (IDispatchVtable*)(this + 1);
+
+ // Setup IUnknown vtable
+ pDispVtable->m_qi = (SLOT)Unknown_QueryInterface;
+ pDispVtable->m_addref = (SLOT)Unknown_AddRef;
+ pDispVtable->m_release = (SLOT)Unknown_Release;
+
+
+ // Set up the common portion of the IDispatch vtable.
+ pDispVtable->m_GetTypeInfoCount = (SLOT)Dispatch_GetTypeInfoCount_Wrapper;
+ pDispVtable->m_GetTypeInfo = (SLOT)Dispatch_GetTypeInfo_Wrapper;
+
+ // If the class interface is a pure disp interface then we need to use the
+ // internal implementation of IDispatch for GetIdsOfNames and Invoke.
+ if (GetClassInterfaceType() == clsIfAutoDisp)
+ {
+ // Use the internal implementation.
+ pDispVtable->m_GetIDsOfNames = (SLOT)InternalDispatchImpl_GetIDsOfNames_Wrapper;
+ pDispVtable->m_Invoke = (SLOT)InternalDispatchImpl_Invoke_Wrapper;
+ }
+ else
+ {
+ // We need to set the entry points to the Dispatch versions which determine
+ // which implementation to use at runtime based on the class that implements
+ // the interface.
+ pDispVtable->m_GetIDsOfNames = (SLOT)Dispatch_GetIDsOfNames_Wrapper;
+ pDispVtable->m_Invoke = (SLOT)Dispatch_Invoke_Wrapper;
+ }
+
+#ifdef MDA_SUPPORTED
+#ifndef _DEBUG
+ // Only lay these out if the MDA is active when in retail.
+ if (NULL != MDA_GET_ASSISTANT(DirtyCastAndCallOnInterface))
+#endif
+ // Layout the assert stub slots so that people doing dirty casts get an assert telling
+ // them what's wrong.
+ {
+ SLOT* assertSlot = ((SLOT*)(pDispVtable + 1));
+ for (int i = 0; i < DEBUG_AssertSlots; i++)
+ {
+ assertSlot[i] = (SLOT)DirtyCast_Assert;
+ }
+ }
+#endif
+
+ //
+ // Set the layout complete flag.
+ //
+ FastInterlockOr((DWORD *)&m_Flags, enum_LayoutComplete);
+
+ LOG((LF_INTEROP, LL_INFO1000, "LayOutClassMethodTable: %s, this: %p [DONE]\n", m_pMT->GetDebugClassName(), this));
+}
+
+//--------------------------------------------------------------------------
+// Lay out a ComMethodTable that represents a WinRT delegate interface.
+//--------------------------------------------------------------------------
+void ComMethodTable::LayOutDelegateMethodTable()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(IsWinRTDelegate());
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+
+ MethodTable *pDelegateMT = m_pMT;
+ ULONG cbExtraSlots = GetNumExtraSlots(ifVtable);
+
+ BYTE *pMethodDescMemory = NULL;
+ IUnkVtable* pUnkVtable;
+ SLOT *pComVtable;
+
+ // If this is a redirected delegate, then we need to get its WinRT ABI definition type
+ WinMDAdapter::RedirectedTypeIndex index = static_cast<WinMDAdapter::RedirectedTypeIndex>(0);
+ if (WinRTDelegateRedirector::ResolveRedirectedDelegate(pDelegateMT, &index))
+ {
+ pDelegateMT = WinRTDelegateRedirector::GetWinRTTypeForRedirectedDelegateIndex(index);
+
+ if (m_pMT->HasInstantiation())
+ {
+ pDelegateMT = TypeHandle(pDelegateMT).Instantiate(m_pMT->GetInstantiation()).AsMethodTable();
+ }
+ }
+
+ LOG((LF_INTEROP, LL_INFO1000, "LayOutDelegateMethodTable: %s, this: %p\n", pDelegateMT->GetDebugClassName(), this));
+
+ unsigned cbSlots = 1; // one slot for the Invoke method
+
+ //
+ // Allocate a temporary space to generate the vtable into.
+ //
+ S_UINT32 cbTempVtable = (S_UINT32(m_cbSlots) + S_UINT32(cbExtraSlots)) * S_UINT32(sizeof(SLOT));
+ cbTempVtable += S_UINT32(cbSlots) * S_UINT32((COMMETHOD_PREPAD + sizeof(ComCallMethodDesc)));
+
+ if (cbTempVtable.IsOverflow())
+ ThrowHR(COR_E_OVERFLOW);
+
+ NewArrayHolder<BYTE> pTempVtable = new BYTE[cbTempVtable.Value()];
+
+ pUnkVtable = (IUnkVtable *)pTempVtable.GetValue();
+ pComVtable = ((SLOT*)pUnkVtable) + cbExtraSlots;
+
+ // Method descs are at the end of the vtable
+ // m_cbSlots interfaces methods + IUnk methods
+ pMethodDescMemory = (BYTE *)&pComVtable[m_cbSlots];
+
+ // Setup IUnk vtable
+ pUnkVtable->m_qi = (SLOT)Unknown_QueryInterface;
+ pUnkVtable->m_addref = (SLOT)Unknown_AddRef;
+ pUnkVtable->m_release = (SLOT)Unknown_Release;
+
+ // Some space for a CALL xx xx xx xx stub is reserved before the beginning of the MethodDesc
+ ComCallMethodDescHolder NewMDHolder = (ComCallMethodDesc *) (pMethodDescMemory + COMMETHOD_PREPAD);
+ MethodDesc* pInvokeMD = ((DelegateEEClass *)(pDelegateMT->GetClass()))->m_pInvokeMethod;
+
+ if (pInvokeMD->IsSharedByGenericInstantiations())
+ {
+ // we need an exact MD to represent the call
+ pInvokeMD = InstantiatedMethodDesc::FindOrCreateExactClassMethod(pDelegateMT, pInvokeMD);
+ }
+
+ NewMDHolder.GetValue()->InitMethod(pInvokeMD, NULL);
+ _ASSERTE(cbSlots == 1);
+
+ {
+ // Take the lock and copy data from the temporary vtable to this instance
+ CrstHolder ch(&g_CreateWrapperTemplateCrst);
+
+ if (IsLayoutComplete())
+ return;
+
+ // IUnk vtable follows the header
+ CopyMemory(this + 1, pUnkVtable, cbTempVtable.Value());
+
+ // Finish by emitting stubs and initializing the slots
+ pUnkVtable = (IUnkVtable *)(this + 1);
+ pComVtable = ((SLOT *)pUnkVtable) + cbExtraSlots;
+
+ // Method descs are at the end of the vtable
+ // m_cbSlots delegate methods + IUnk methods
+ pMethodDescMemory = (BYTE *)&pComVtable[m_cbSlots];
+
+ ComCallMethodDesc* pNewMD = (ComCallMethodDesc *) (pMethodDescMemory + COMMETHOD_PREPAD);
+ emitCOMStubCall(pNewMD, GetEEFuncEntryPoint(ComCallPreStub));
+
+ FillInComVtableSlot(pComVtable, 0, pNewMD);
+ _ASSERTE(cbSlots == 1);
+
+ // Set the layout complete flag and release the lock.
+ m_Flags |= enum_LayoutComplete;
+ NewMDHolder.SuppressRelease();
+ }
+
+#ifdef PROFILING_SUPPORTED
+ // Notify profiler of the CCW, so it can avoid double-counting.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackCCW());
+#if defined(_DEBUG)
+ WCHAR rIID[40]; // {00000000-0000-0000-0000-000000000000}
+ GuidToLPWSTR(m_IID, rIID, lengthof(rIID));
+ LOG((LF_CORPROF, LL_INFO100, "COMClassicVTableCreated Class:%hs, IID:%ls, vTbl:%#08x\n",
+ pDelegateMT->GetDebugClassName(), rIID, pUnkVtable));
+#else
+ LOG((LF_CORPROF, LL_INFO100, "COMClassicVTableCreated Class:%#x, IID:{%08x-...}, vTbl:%#08x\n",
+ pDelegateMT, m_IID.Data1, pUnkVtable));
+#endif
+ g_profControlBlock.pProfInterface->COMClassicVTableCreated((ClassID) TypeHandle(pDelegateMT).AsPtr(),
+ m_IID,
+ pUnkVtable,
+ m_cbSlots+cbExtraSlots);
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ LOG((LF_INTEROP, LL_INFO1000, "LayOutDelegateMethodTable: %s, this: %p [DONE]\n", pDelegateMT->GetDebugClassName(), this));
+}
+
+//--------------------------------------------------------------------------
+// Retrieves the DispatchInfo associated with the COM method table. If
+// the DispatchInfo has not been initialized yet then it is initilized.
+//--------------------------------------------------------------------------
+DispatchInfo *ComMethodTable::GetDispatchInfo()
+{
+ CONTRACT (DispatchInfo*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ if (!m_pDispatchInfo)
+ {
+ // Create the DispatchInfo object.
+ NewHolder<DispatchInfo> pDispInfo = new DispatchInfo(m_pMT);
+
+ // Synchronize the DispatchInfo with the actual expando object.
+ pDispInfo->SynchWithManagedView();
+
+ // Swap the lock into the class member in a thread safe manner.
+ if (NULL == FastInterlockCompareExchangePointer(&m_pDispatchInfo, pDispInfo.GetValue(), NULL))
+ pDispInfo.SuppressRelease();
+
+ }
+
+ RETURN m_pDispatchInfo;
+}
+
+//--------------------------------------------------------------------------
+// Set an ITypeInfo pointer for the method table.
+//--------------------------------------------------------------------------
+void ComMethodTable::SetITypeInfo(ITypeInfo *pNew)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pNew));
+ }
+ CONTRACTL_END;
+
+ SafeComHolder<ITypeInfo> pOld;
+ pOld = InterlockedExchangeT(&m_pITypeInfo, pNew);
+
+ // TypeLibs are refcounted pointers.
+ if (pNew == pOld)
+ pOld.SuppressRelease();
+ else
+ SafeAddRef(pNew);
+}
+
+//--------------------------------------------------------------------------
+// Return the parent ComMethodTable.
+//--------------------------------------------------------------------------
+ComMethodTable *ComMethodTable::GetParentClassComMT()
+{
+ CONTRACT (ComMethodTable*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(IsIClassX());
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ MethodTable *pParentComPlusMT = m_pMT->GetComPlusParentMethodTable();
+ if (!pParentComPlusMT)
+ RETURN NULL;
+
+ ComCallWrapperTemplate *pTemplate = pParentComPlusMT->GetComCallWrapperTemplate();
+ if (!pTemplate)
+ RETURN NULL;
+
+ RETURN pTemplate->GetClassComMT();
+}
+
+//---------------------------------------------------------
+// ComCallWrapperTemplate::IIDToInterfaceTemplateCache
+//---------------------------------------------------------
+
+// Perf critical cache lookup code, in particular we want InlineIsEqualGUID to be inlined.
+#include <optsmallperfcritical.h>
+
+// Looks up an interface template in the cache.
+bool ComCallWrapperTemplate::IIDToInterfaceTemplateCache::LookupInterfaceTemplate(REFIID riid, ComCallWrapperTemplate **ppTemplate)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SpinLock::Holder lock(&m_lock);
+
+ for (SIZE_T i = 0; i < CACHE_SIZE; i++)
+ {
+ // is the item in use?
+ if (!m_items[i].IsFree())
+ {
+ // does the IID match?
+ if (InlineIsEqualGUID(m_items[i].m_iid, riid))
+ {
+ // mark the item as hot to help avoid eviction
+ m_items[i].MarkHot();
+ *ppTemplate = m_items[i].GetTemplate();
+ return true;
+ }
+ }
+ }
+
+ *ppTemplate = NULL;
+ return false;
+}
+
+#include <optdefault.h>
+
+// Inserts an interface template in the cache. If the cache is full and an item needs to be evicted,
+// it tries to find one that hasn't been recently used.
+void ComCallWrapperTemplate::IIDToInterfaceTemplateCache::InsertInterfaceTemplate(REFIID riid, ComCallWrapperTemplate *pTemplate)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SpinLock::Holder lock(&m_lock);
+
+ for (SIZE_T i = 0; i < CACHE_SIZE; i++)
+ {
+ // is the item free?
+ if (m_items[i].IsFree())
+ {
+ m_items[i].m_iid = riid;
+ m_items[i].SetTemplate(pTemplate);
+ return;
+ }
+ }
+
+ // the cache is full - find an item to evict and reset all items to "cold"
+ SIZE_T index_to_evict = 0;
+ for (SIZE_T i = 0; i < CACHE_SIZE; i++)
+ {
+ // is the item cold?
+ if (!m_items[i].IsHot())
+ {
+ index_to_evict = i;
+ }
+ m_items[i].MarkCold();
+ }
+
+ m_items[index_to_evict].m_iid = riid;
+ m_items[index_to_evict].SetTemplate(pTemplate);
+}
+
+//---------------------------------------------------------
+// ComCallWrapperTemplate::CCWInterfaceMapIterator
+//---------------------------------------------------------
+ComCallWrapperTemplate::CCWInterfaceMapIterator::CCWInterfaceMapIterator(TypeHandle thClass, WinRTManagedClassFactory *pClsFact, bool fIterateRedirectedInterfaces)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ MethodTable *pMT = thClass.GetMethodTable();
+
+ // iterate interface map of the type
+ MethodTable::InterfaceMapIterator it = pMT->IterateInterfaceMap();
+ while (it.Next())
+ {
+ MethodTable *pItfMT = it.GetInterface();
+ AppendInterface(pItfMT, false);
+
+ if (fIterateRedirectedInterfaces && WinRTSupported())
+ {
+ WinMDAdapter::RedirectedTypeIndex redirectedIndex;
+ if (WinRTInterfaceRedirector::ResolveRedirectedInterface(pItfMT, &redirectedIndex) && pItfMT->IsLegalNonArrayWinRTType())
+ {
+ MethodTable *pWinRTItfMT = WinRTInterfaceRedirector::GetWinRTTypeForRedirectedInterfaceIndex(redirectedIndex);
+ if (pItfMT->HasInstantiation())
+ {
+ _ASSERTE(pWinRTItfMT->HasInstantiation());
+ pWinRTItfMT = TypeHandle(pWinRTItfMT).Instantiate(pItfMT->GetInstantiation()).GetMethodTable();
+ }
+
+ AppendInterface(pWinRTItfMT, true).m_RedirectedIndex = redirectedIndex;
+ }
+ }
+ }
+
+ // handle single dimensional arrays
+ if (WinRTSupported() && thClass.IsArray() && !pMT->IsMultiDimArray())
+ {
+ // We treat arrays as if they implemented IIterable<T>, IVector<T>, and IVectorView<T> (WinRT only)
+ TypeHandle thGenArg = thClass.AsArray()->GetArrayElementTypeHandle();
+ Instantiation inst(&thGenArg, 1);
+
+ BinderClassID id = (fIterateRedirectedInterfaces ? CLASS__IITERABLE : CLASS__IENUMERABLEGENERIC);
+ MethodTable *pWinRTItfMT = TypeHandle(MscorlibBinder::GetClass(id)).Instantiate(inst).AsMethodTable();
+
+ // if this IIterable<T>/IEnumerable<T> is an invalid WinRT type, skip it, so that the ComCallWrapperTemplate is still usable
+ if (pWinRTItfMT->IsLegalNonArrayWinRTType())
+ {
+ // append IIterable<T>/IEnumerable<T>
+ AppendInterface(pWinRTItfMT, fIterateRedirectedInterfaces).m_RedirectedIndex = WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IEnumerable;
+
+ // append IVector<T>/IList<T>
+ id = (fIterateRedirectedInterfaces ? CLASS__IVECTOR : CLASS__ILISTGENERIC);
+ pWinRTItfMT = TypeHandle(MscorlibBinder::GetClass(id)).Instantiate(inst).AsMethodTable();
+ _ASSERTE(pWinRTItfMT->IsLegalNonArrayWinRTType());
+
+ AppendInterface(pWinRTItfMT, fIterateRedirectedInterfaces).m_RedirectedIndex = WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IList;
+
+ // append IVectorView<T>/IReadOnlyList<T>
+ id = (fIterateRedirectedInterfaces ? CLASS__IVECTORVIEW : CLASS__IREADONLYLISTGENERIC);
+ pWinRTItfMT = TypeHandle(MscorlibBinder::GetClass(id)).Instantiate(inst).AsMethodTable();
+ _ASSERTE(pWinRTItfMT->IsLegalNonArrayWinRTType());
+
+ AppendInterface(pWinRTItfMT, fIterateRedirectedInterfaces).m_RedirectedIndex = WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IReadOnlyList;
+ }
+ }
+
+ // add factory and static interfaces
+ if (pClsFact != NULL)
+ {
+ SArray<MethodTable *> *pExtraInterfaces = pClsFact->GetFactoryInterfaces();
+ if (pExtraInterfaces != NULL)
+ {
+ COUNT_T NumInterfaces = pExtraInterfaces->GetCount();
+ for (COUNT_T i = 0; i < NumInterfaces; i++)
+ {
+ AppendInterface((*pExtraInterfaces)[i], false).m_dwIsFactoryInterface = true;
+ }
+ }
+
+ pExtraInterfaces = pClsFact->GetStaticInterfaces();
+ if (pExtraInterfaces != NULL)
+ {
+ COUNT_T NumInterfaces = pExtraInterfaces->GetCount();
+ for (COUNT_T i = 0; i < NumInterfaces; i++)
+ {
+ AppendInterface((*pExtraInterfaces)[i], false).m_dwIsStaticInterface = true;
+ }
+ }
+ }
+
+ Reset();
+}
+
+// Append a new interface to the m_Interfaces array.
+ComCallWrapperTemplate::CCWInterfaceMapIterator::InterfaceProps &ComCallWrapperTemplate::CCWInterfaceMapIterator::AppendInterface(MethodTable *pItfMT, bool isRedirected)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ InterfaceProps &props = *m_Interfaces.Append();
+
+ props.m_pItfMT = pItfMT;
+ props.m_RedirectedIndex = (WinMDAdapter::RedirectedTypeIndex)-1;
+ props.m_dwIsRedirectedInterface = isRedirected;
+ props.m_dwIsFactoryInterface = false;
+ props.m_dwIsStaticInterface = false;
+
+ return props;
+}
+
+//---------------------------------------------------------
+// One-time init
+//---------------------------------------------------------
+/*static*/
+void ComCallWrapperTemplate::Init()
+{
+ WRAPPER_NO_CONTRACT;
+
+ g_CreateWrapperTemplateCrst.Init(CrstWrapperTemplate, (CrstFlags)(CRST_REENTRANCY | CRST_HOST_BREAKABLE));
+}
+
+ComCallWrapperTemplate::ComCallWrapperTemplate()
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+//--------------------------------------------------------------------------
+// static void ComCallWrapperTemplate::Cleanup(ComCallWrapperTemplate* pTemplate)
+// Cleanup the template
+//--------------------------------------------------------------------------
+void ComCallWrapperTemplate::Cleanup()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INSTANCE_CHECK;
+ PRECONDITION(!m_thClass.IsNull());
+ }
+ CONTRACTL_END;
+
+ for (unsigned j = 0; j < m_cbInterfaces; j++)
+ {
+ SLOT* pComVtable = m_rgpIPtr[j];
+
+ if (pComVtable != 0)
+ {
+ ComMethodTable* pHeader = (ComMethodTable*)pComVtable-1;
+ pHeader->Release(); // release the vtable
+ }
+
+#ifdef _DEBUG
+ m_rgpIPtr[j] = (SLOT *)(size_t)INVALID_POINTER_CD;
+#endif
+ }
+
+ if (m_pClassComMT)
+ m_pClassComMT->Release();
+
+ if (m_pBasicComMT)
+ m_pBasicComMT->Release();
+
+ if (m_pIIDToInterfaceTemplateCache)
+ delete m_pIIDToInterfaceTemplateCache;
+
+ delete[] this;
+}
+
+
+LONG ComCallWrapperTemplate::AddRef()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return InterlockedIncrement(&m_cbRefCount);
+}
+
+LONG ComCallWrapperTemplate::Release()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(m_cbRefCount > 0);
+ }
+ CONTRACTL_END;
+
+ // use a different var here becuase cleanup will delete the object
+ // so can no longer make member refs
+ LONG cbRef = InterlockedDecrement(&m_cbRefCount);
+ if (cbRef == 0)
+ Cleanup();
+
+ return cbRef;
+}
+
+ComMethodTable* ComCallWrapperTemplate::GetClassComMT()
+{
+ CONTRACT (ComMethodTable*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(SupportsIClassX());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ // First check the cache
+ if (m_pClassComMT)
+ RETURN m_pClassComMT;
+
+ MethodTable *pMT = m_thClass.GetMethodTable();
+
+ // Preload the policy for these classes before we take the lock.
+ for (MethodTable* pMethodTable = pMT; pMethodTable != NULL; pMethodTable = pMethodTable->GetParentMethodTable())
+ {
+ Security::CanCallUnmanagedCode(pMethodTable->GetModule());
+ }
+
+ // We haven't set it up yet, generate one.
+ ComMethodTable* pClassComMT;
+ if (pMT->IsDelegate() && (pMT->IsProjectedFromWinRT() || WinRTTypeNameConverter::IsRedirectedType(pMT)))
+ {
+ // WinRT delegates have a special class vtable
+ pClassComMT = CreateComMethodTableForDelegate(pMT);
+ }
+ else
+ {
+ pClassComMT = CreateComMethodTableForClass(pMT);
+ }
+ pClassComMT->AddRef();
+
+ // Cache it.
+ if (InterlockedCompareExchangeT(&m_pClassComMT, pClassComMT, NULL) != NULL)
+ {
+ pClassComMT->Release();
+ }
+
+ RETURN m_pClassComMT;
+}
+
+ComMethodTable* ComCallWrapperTemplate::GetComMTForItf(MethodTable *pItfMT)
+{
+ CONTRACT (ComMethodTable*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pItfMT));
+ PRECONDITION(pItfMT->IsInterface());
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ // Look through all the implemented interfaces to see if the specified
+ // one is present yet.
+ for (UINT iItf = 0; iItf < m_cbInterfaces; iItf++)
+ {
+ ComMethodTable* pItfComMT = (ComMethodTable *)m_rgpIPtr[iItf] - 1;
+ if (pItfComMT && (pItfComMT->m_pMT == pItfMT))
+ RETURN pItfComMT;
+ }
+
+ // The class does not implement the specified interface.
+ RETURN NULL;
+}
+
+ComMethodTable* ComCallWrapperTemplate::GetComMTForIndex(ULONG ulItfIndex)
+{
+ CONTRACT (ComMethodTable*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(ulItfIndex < m_cbInterfaces);
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ ComMethodTable *pItfComMT = (ComMethodTable *)m_rgpIPtr[ulItfIndex] - 1;
+ RETURN pItfComMT;
+}
+
+ComMethodTable* ComCallWrapperTemplate::GetBasicComMT()
+{
+ CONTRACT (ComMethodTable*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN m_pBasicComMT;
+}
+
+
+ULONG ComCallWrapperTemplate::GetNumInterfaces()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_cbInterfaces;
+}
+
+SLOT* ComCallWrapperTemplate::GetVTableSlot(ULONG index)
+{
+ CONTRACT (SLOT*)
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ PRECONDITION(index >= 0 && index < m_cbInterfaces);
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN m_rgpIPtr[index];
+}
+
+BOOL ComCallWrapperTemplate::HasInvisibleParent()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_flags & enum_InvisibleParent);
+}
+
+// Determines whether the template is for a type that cannot be safely marshalled to
+// an out of proc COM client
+BOOL ComCallWrapperTemplate::IsSafeTypeForMarshalling()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+}
+ CONTRACTL_END;
+
+ if (m_flags & enum_IsSafeTypeForMarshalling)
+ {
+ return TRUE;
+ }
+
+ if ((CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_AllowDComReflection) != 0))
+ {
+ return TRUE;
+ }
+
+ BOOL isSafe = TRUE;
+ PTR_MethodTable pMt = this->GetClassType().GetMethodTable();
+ EX_TRY
+ {
+ // Do casting checks so that we handle derived types as well. The base blocked types are:
+ // System.Reflection.Assembly, System.Reflection.MemberInfo, System.Reflection.Module,
+ // System.Reflection.MethodBody, and System.Reflection.ParameterInfo.
+ // Some interesting derived types that get blocked as a result are:
+ // System.Type, System.Reflection.TypeInfo, System.Reflection.MethodInfo, and System.Reflection.FieldInfo
+ if (pMt->CanCastToClass(MscorlibBinder::GetClass(CLASS__ASSEMBLYBASE)) ||
+ pMt->CanCastToClass(MscorlibBinder::GetClass(CLASS__MEMBER)) ||
+ pMt->CanCastToClass(MscorlibBinder::GetClass(CLASS__MODULEBASE)) ||
+ pMt->CanCastToClass(MscorlibBinder::GetClass(CLASS__METHOD_BODY)) ||
+ pMt->CanCastToClass(MscorlibBinder::GetClass(CLASS__PARAMETER)))
+ {
+ isSafe = FALSE;
+ }
+ }
+ EX_CATCH
+ {
+ isSafe = FALSE;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (isSafe)
+ {
+ FastInterlockOr(&m_flags, enum_IsSafeTypeForMarshalling);
+ }
+
+ return isSafe;
+}
+
+//--------------------------------------------------------------------------
+// Checks to see if the parent of the current class interface is visible to COM.
+// Throws an InvalidOperationException if not.
+//--------------------------------------------------------------------------
+void ComCallWrapperTemplate::CheckParentComVisibility(BOOL fForIDispatch)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+
+ // Throw an exception to report the error.
+ if (!CheckParentComVisibilityNoThrow(fForIDispatch))
+ COMPlusThrow(kInvalidOperationException, IDS_EE_COM_INVISIBLE_PARENT);
+}
+
+BOOL ComCallWrapperTemplate::CheckParentComVisibilityNoThrow(BOOL fForIDispatch)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+
+ // If the parent is visible to COM then everything is ok.
+ if (!HasInvisibleParent())
+ return TRUE;
+
+#ifdef MDA_SUPPORTED
+ // Fire an MDA to help people diagnose the fact they are attempting to
+ // expose a class with a non COM visible base class to COM.
+ MDA_TRIGGER_ASSISTANT(NonComVisibleBaseClass, ReportViolation(m_thClass.GetMethodTable(), fForIDispatch));
+#endif
+
+ return FALSE;
+}
+
+DefaultInterfaceType ComCallWrapperTemplate::GetDefaultInterface(MethodTable **ppDefaultItf)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if ((m_flags & enum_DefaultInterfaceTypeComputed) == 0)
+ {
+ // we have not computed the default interface yet
+ TypeHandle th;
+ DefaultInterfaceType defItfType = GetDefaultInterfaceForClassInternal(m_thClass, &th);
+
+ _ASSERTE(th.IsNull() || !th.IsTypeDesc());
+ m_pDefaultItf = th.AsMethodTable();
+
+ FastInterlockOr(&m_flags, enum_DefaultInterfaceTypeComputed | (DWORD)defItfType);
+ }
+
+ *ppDefaultItf = m_pDefaultItf;
+ return (DefaultInterfaceType)(m_flags & enum_DefaultInterfaceType);
+}
+
+//--------------------------------------------------------------------------
+// Creates a ComMethodTable for a class's IClassX.
+//--------------------------------------------------------------------------
+ComMethodTable* ComCallWrapperTemplate::CreateComMethodTableForClass(MethodTable *pClassMT)
+{
+ CONTRACT (ComMethodTable*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pClassMT));
+ PRECONDITION(!pClassMT->IsInterface());
+ PRECONDITION(!pClassMT->GetComPlusParentMethodTable() || pClassMT->GetComPlusParentMethodTable()->GetComCallWrapperTemplate());
+ PRECONDITION(SupportsIClassX());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ unsigned cbNewPublicFields = 0;
+ unsigned cbNewPublicMethods = 0;
+ MethodTable* pComPlusParentClass = pClassMT->GetComPlusParentMethodTable();
+ MethodTable* pParentClass = pClassMT->GetParentMethodTable();
+ MethodTable* pCurrParentClass = pComPlusParentClass;
+ MethodTable* pCurrMT = pClassMT;
+ InteropMethodTableData* pCurrParentInteropMT = NULL;
+ InteropMethodTableData* pCurrInteropMT = NULL;
+ CorClassIfaceAttr ClassItfType = pClassMT->GetComClassInterfaceType();
+ ComMethodTable *pParentComMT = NULL;
+ unsigned cbTotalParentFields = 0;
+ unsigned cbNumParentVirtualMethods = 0;
+ unsigned cbParentComMTSlots = 0;
+ unsigned i;
+ const unsigned cbExtraSlots = ComMethodTable::GetNumExtraSlots(ifDual);
+ CQuickEEClassPtrs apClassesToProcess;
+ int cClassesToProcess = 0;
+
+ // If the specified class has a parent then retrieve information on him.
+ // This makes sure we always have the space for parent slots
+ if (pComPlusParentClass)
+ {
+ ComCallWrapperTemplate *pComPlusParentTemplate = pComPlusParentClass->GetComCallWrapperTemplate();
+ _ASSERTE(pComPlusParentTemplate);
+ pParentComMT = pComPlusParentTemplate->GetClassComMT();
+ cbParentComMTSlots = pParentComMT->GetNumSlots();
+ }
+
+ // We only set up the members of the class interface if the doesn't have any generic instantiations
+ // in it's hierarchy.
+ if (!pClassMT->HasGenericClassInstantiationInHierarchy())
+ {
+ LOG((LF_INTEROP, LL_INFO1000, "CreateComMethodTableForClass %s\n", pClassMT->GetClass()->GetDebugClassName()));
+ LOG((LF_INTEROP, LL_INFO1000, "parent class: %s\n", (pComPlusParentClass) ? pParentComMT->GetMethodTable()->GetClass()->GetDebugClassName() : 0));
+
+
+ // Create an array of all the classes for which we need to compute the added members.
+ do
+ {
+ apClassesToProcess.ReSizeThrows(cClassesToProcess + 2);
+ apClassesToProcess[cClassesToProcess++] = pCurrMT;
+ pCurrMT = pCurrMT->GetParentMethodTable();
+ }
+ while (pCurrMT != pComPlusParentClass);
+ apClassesToProcess[cClassesToProcess++] = pCurrMT;
+
+ // Compute the number of methods and fields that were added between our parent
+ // COM+ class and the current class. This includes methods on COM classes
+ // between the current class and its parent COM+ class.
+ for (cClassesToProcess -= 2; cClassesToProcess >= 0; cClassesToProcess--)
+ {
+ // Retrieve the current class and the current parent class.
+ pCurrMT = apClassesToProcess[cClassesToProcess];
+ pCurrInteropMT = pCurrMT->GetComInteropData();
+ _ASSERTE(pCurrInteropMT);
+
+ pCurrParentClass = apClassesToProcess[cClassesToProcess + 1];
+
+ // Retrieve the number of fields and vtable methods on the parent class.
+ if (pCurrParentClass)
+ {
+ cbTotalParentFields = pCurrParentClass->GetNumInstanceFields();
+ pCurrParentInteropMT = pCurrParentClass->GetComInteropData();
+ _ASSERTE(pCurrParentInteropMT);
+ cbNumParentVirtualMethods = pCurrParentInteropMT->cVTable;
+ }
+
+ // Compute the number of methods that were private but made public on this class.
+ for (i = 0; i < cbNumParentVirtualMethods; i++)
+ {
+ MethodDesc* pMD = NULL;
+ InteropMethodTableSlotData *pCurrInteropMD = NULL;
+ pCurrInteropMD = &pCurrInteropMT->pVTable[i];
+ pMD = pCurrInteropMD->pMD;
+
+ MethodDesc* pParentMD = NULL;
+ InteropMethodTableSlotData *pCurrParentInteropMD = NULL;
+ pCurrParentInteropMD = &pCurrParentInteropMT->pVTable[i];
+ pParentMD = pCurrParentInteropMD->pMD;
+
+ if (pMD &&
+ !(pCurrInteropMD ? IsDuplicateClassItfMD(pCurrInteropMD, i) : IsDuplicateClassItfMD(pMD, i)) &&
+ IsOverloadedComVisibleMember(pMD, pParentMD))
+ {
+ cbNewPublicMethods++;
+ }
+ }
+
+ // Compute the number of public methods that were added.
+ unsigned cbNumVirtualMethods = 0;
+ cbNumVirtualMethods = pCurrInteropMT->cVTable;
+
+ for (i = cbNumParentVirtualMethods; i < cbNumVirtualMethods; i++)
+ {
+ MethodDesc* pMD = NULL;
+ InteropMethodTableSlotData *pCurrInteropMD = NULL;
+ pCurrInteropMD = &pCurrInteropMT->pVTable[i];
+ pMD = pCurrInteropMD->pMD;
+
+ if (pMD &&
+ !(pCurrInteropMD ? IsDuplicateClassItfMD(pCurrInteropMD, i) : IsDuplicateClassItfMD(pMD, i)) &&
+ IsNewComVisibleMember(pMD))
+ {
+ cbNewPublicMethods++;
+ }
+ }
+
+ // Add the non virtual methods introduced on the current class.
+ MethodTable::MethodIterator it(pCurrMT);
+ for (; it.IsValid(); it.Next())
+ {
+ if (!it.IsVirtual())
+ {
+ MethodDesc* pMD = it.GetMethodDesc();
+ if (pMD && !IsDuplicateClassItfMD(pMD, it.GetSlotNumber()) && IsNewComVisibleMember(pMD) &&
+ !pMD->IsStatic() && !pMD->IsCtor() &&
+ (!pCurrMT->IsValueType() || (ClassItfType != clsIfAutoDual && IsStrictlyUnboxed(pMD))))
+ {
+ cbNewPublicMethods++;
+ }
+ }
+ }
+
+ // Compute the number of new public fields this class introduces.
+ // <TODO>check this approximation </TODO>
+ ApproxFieldDescIterator fdIterator(pCurrMT, ApproxFieldDescIterator::INSTANCE_FIELDS);
+ FieldDesc* pFD;
+
+ while ((pFD = fdIterator.Next()) != NULL)
+ {
+ if (IsMemberVisibleFromCom(pCurrMT, pFD->GetMemberDef(), mdTokenNil))
+ cbNewPublicFields++;
+ }
+ }
+ }
+
+
+ // Alloc space for the class method table, includes getter and setter
+ // for public fields
+ S_UINT32 cbNewSlots = S_UINT32(cbNewPublicFields) * S_UINT32(2) + S_UINT32(cbNewPublicMethods);
+ S_UINT32 cbTotalSlots = S_UINT32(cbParentComMTSlots) + cbNewSlots;
+
+ LOG((LF_INTEROP, LL_INFO1000, "cbExtraSlots: %d\n", cbExtraSlots));
+ LOG((LF_INTEROP, LL_INFO1000, "cbParentComMTSlots: %d\n", cbParentComMTSlots));
+ LOG((LF_INTEROP, LL_INFO1000, "cbNewSlots: %d\n", cbNewSlots.IsOverflow() ? 0 : cbNewSlots.Value()));
+ LOG((LF_INTEROP, LL_INFO1000, " cbNewPublicFields: %d\n", cbNewPublicFields));
+ LOG((LF_INTEROP, LL_INFO1000, " cbNewPublicMethods: %d\n", cbNewPublicMethods));
+ LOG((LF_INTEROP, LL_INFO1000, "cbTotalSlots: %d\n", cbTotalSlots.IsOverflow() ? 0 : cbTotalSlots.Value()));
+
+ // Alloc COM vtable & method descs
+ S_UINT32 cbVtable = (cbTotalSlots + S_UINT32(cbExtraSlots)) * S_UINT32(sizeof(SLOT));
+ S_UINT32 cbToAlloc = S_UINT32(sizeof(ComMethodTable)) + cbVtable;
+
+ if (cbToAlloc.IsOverflow())
+ ThrowHR(COR_E_OVERFLOW);
+
+ NewExecutableHolder<ComMethodTable> pComMT = (ComMethodTable*) new (executable) BYTE[cbToAlloc.Value()];
+
+ _ASSERTE(!cbNewSlots.IsOverflow() && !cbTotalSlots.IsOverflow() && !cbVtable.IsOverflow());
+
+
+ // set up the header
+ pComMT->m_ptReserved = (SLOT)(size_t)0xDEADC0FF; // reserved
+ pComMT->m_pMT = pClassMT; // pointer to the class method table
+ pComMT->m_cbRefCount = 0;
+ pComMT->m_pMDescr = NULL;
+ pComMT->m_pITypeInfo = NULL;
+ pComMT->m_pDispatchInfo = NULL;
+ pComMT->m_cbSlots = cbTotalSlots.Value(); // number of slots not counting IDisp methods.
+ pComMT->m_IID = GUID_NULL;
+
+
+ // Set the flags.
+ pComMT->m_Flags = enum_ClassVtableMask | ClassItfType;
+
+ // Determine if the interface is visible from COM.
+ if (IsTypeVisibleFromCom(TypeHandle(pComMT->m_pMT)))
+ pComMT->m_Flags |= enum_ComVisible;
+
+ if (!Security::CanCallUnmanagedCode(pComMT->m_pMT->GetModule()))
+ {
+ pComMT->m_Flags |= enum_IsUntrusted;
+ }
+
+
+#if _DEBUG
+ {
+ // In debug set all the vtable slots to 0xDEADCA11.
+ SLOT *pComVTable = (SLOT*)(pComMT + 1);
+ for (unsigned iComSlots = 0; iComSlots < cbTotalSlots.Value() + cbExtraSlots; iComSlots++)
+ *(pComVTable + iComSlots) = (SLOT)(size_t)0xDEADCA11;
+ }
+#endif
+
+ LOG((LF_INTEROP, LL_INFO1000, "---------- end of CreateComMethodTableForClass %s -----------\n", pClassMT->GetClass()->GetDebugClassName()));
+
+ pComMT.SuppressRelease();
+ RETURN pComMT;
+}
+
+//--------------------------------------------------------------------------
+// Creates a ComMethodTable for a an interface.
+//--------------------------------------------------------------------------
+ComMethodTable* ComCallWrapperTemplate::CreateComMethodTableForInterface(MethodTable* pInterfaceMT)
+{
+ CONTRACT (ComMethodTable*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pInterfaceMT));
+ PRECONDITION(pInterfaceMT->IsInterface());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ MethodTable *pItfClass = pInterfaceMT;
+ CorIfaceAttr ItfType = pInterfaceMT->GetComInterfaceType();
+ ULONG cbExtraSlots = ComMethodTable::GetNumExtraSlots(ItfType);
+
+ LOG((LF_INTEROP, LL_INFO1000, "CreateComMethodTableForInterface %s\n", pItfClass->GetDebugClassName()));
+
+ // @todo get slots off the methodtable
+ unsigned cbSlots = pInterfaceMT->GetNumVirtuals();
+ unsigned cbComSlots = pInterfaceMT->IsSparseForCOMInterop() ? pInterfaceMT->GetClass()->GetSparseCOMInteropVTableMap()->GetNumVTableSlots() : cbSlots;
+
+ LOG((LF_INTEROP, LL_INFO1000, "cbExtraSlots = %d\n", cbExtraSlots));
+ LOG((LF_INTEROP, LL_INFO1000, "cbComSlots = %d\n", cbComSlots));
+ LOG((LF_INTEROP, LL_INFO1000, "cbSlots = %d\n", cbSlots));
+
+ S_UINT32 cbVtable = (S_UINT32(cbComSlots) + S_UINT32(cbExtraSlots)) * S_UINT32(sizeof(SLOT));
+ S_UINT32 cbMethDescs = S_UINT32(cbSlots) * S_UINT32((COMMETHOD_PREPAD + sizeof(ComCallMethodDesc)));
+ S_UINT32 cbToAlloc = S_UINT32(sizeof(ComMethodTable)) + cbVtable + cbMethDescs;
+
+ if (cbToAlloc.IsOverflow())
+ ThrowHR(COR_E_OVERFLOW);
+
+ NewExecutableHolder<ComMethodTable> pComMT = (ComMethodTable*) new (executable) BYTE[cbToAlloc.Value()];
+
+ _ASSERTE(!cbVtable.IsOverflow() && !cbMethDescs.IsOverflow());
+
+ // set up the header
+ pComMT->m_ptReserved = (SLOT)(size_t)0xDEADC0FF; // reserved
+ pComMT->m_pMT = pInterfaceMT; // pointer to the interface's method table
+ pComMT->m_cbSlots = cbComSlots; // number of slots not counting IUnk
+ pComMT->m_cbRefCount = 0;
+ pComMT->m_pMDescr = NULL;
+ pComMT->m_pITypeInfo = NULL;
+ pComMT->m_pDispatchInfo = NULL;
+
+ // Set the flags.
+ pComMT->m_Flags = ItfType;
+
+ // Set the IID of the interface.
+ pInterfaceMT->GetGuid(&pComMT->m_IID, TRUE);
+ pComMT->m_Flags |= enum_GuidGenerated;
+
+ // Determine if the interface is visible from COM.
+ if (IsTypeVisibleFromCom(TypeHandle(pComMT->m_pMT)))
+ pComMT->m_Flags |= enum_ComVisible;
+
+ // Determine if the interface is a COM imported class interface.
+ if (pItfClass->GetClass()->IsComClassInterface())
+ pComMT->m_Flags |= enum_ComClassItf;
+
+ if (!Security::CanCallUnmanagedCode(pComMT->m_pMT->GetModule()))
+ {
+ pComMT->m_Flags |= enum_IsUntrusted;
+ }
+
+#ifdef _DEBUG
+ {
+ // In debug set all the vtable slots to 0xDEADCA11.
+ SLOT *pComVTable = (SLOT*)(pComMT + 1);
+ for (unsigned iComSlots = 0; iComSlots < cbComSlots + cbExtraSlots; iComSlots++)
+ *(pComVTable + iComSlots) = (SLOT)(size_t)0xDEADCA11;
+ }
+#endif
+
+ MethodTable *pMT = m_thClass.GetMethodTable();
+ MethodTable *pParentMT = pMT->GetParentMethodTable();
+ if (pParentMT != NULL && pParentMT->IsProjectedFromWinRT())
+ {
+ // Determine if this class has overriden any methods on the interface. If not, we'll
+ // set a flag so when a QI comes, we will return directly the base WinRT object.
+ if (pMT->HasSameInterfaceImplementationAsParent(pInterfaceMT, pParentMT))
+ {
+ pComMT->m_Flags |= enum_IsWinRTTrivialAggregate;
+ }
+ }
+
+ LOG((LF_INTEROP, LL_INFO1000, "---------- end of CreateComMethodTableForInterface %s -----------\n", pItfClass->GetDebugClassName()));
+
+ pComMT.SuppressRelease();
+ RETURN pComMT;
+}
+
+ComMethodTable* ComCallWrapperTemplate::CreateComMethodTableForBasic(MethodTable* pMT)
+{
+ CONTRACT (ComMethodTable*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ const unsigned cbExtraSlots = ComMethodTable::GetNumExtraSlots(ifDispatch);
+ CorClassIfaceAttr ClassItfType = pMT->GetComClassInterfaceType();
+
+ LOG((LF_INTEROP, LL_INFO1000, "CreateComMethodTableForBasic %s\n", pMT->GetDebugClassName()));
+
+ unsigned cbVtable = cbExtraSlots * sizeof(SLOT);
+ unsigned cbToAlloc = sizeof(ComMethodTable) + cbVtable;
+
+#ifdef MDA_SUPPORTED
+#ifndef _DEBUG
+ // Only add these if the MDA is active while in retail.
+ if (NULL != MDA_GET_ASSISTANT(DirtyCastAndCallOnInterface))
+#endif
+ {
+ // Add some extra slots that will assert to catch dirty casts.
+ cbToAlloc += sizeof(SLOT) * DEBUG_AssertSlots;
+ }
+#endif
+
+ NewExecutableHolder<ComMethodTable> pComMT = (ComMethodTable*) new (executable) BYTE[cbToAlloc];
+
+ // set up the header
+ pComMT->m_ptReserved = (SLOT)(size_t)0xDEADC0FF;
+ pComMT->m_pMT = pMT;
+ pComMT->m_cbSlots = 0; // number of slots not counting IUnk
+ pComMT->m_cbRefCount = 0;
+ pComMT->m_pMDescr = NULL;
+ pComMT->m_pITypeInfo = NULL;
+ pComMT->m_pDispatchInfo = NULL;
+
+ // Initialize the flags.
+ pComMT->m_Flags = enum_IsBasic;
+ pComMT->m_Flags |= enum_ClassVtableMask | ClassItfType;
+
+ // Set the IID of the interface.
+ pComMT->m_IID = IID_IUnknown;
+ pComMT->m_Flags |= enum_GuidGenerated;
+
+ // Determine if the interface is visible from COM.
+ if (IsTypeVisibleFromCom(TypeHandle(pComMT->m_pMT)))
+ pComMT->m_Flags |= enum_ComVisible;
+
+ // Determine if the interface is a COM imported class interface.
+ if (pMT->GetClass()->IsComClassInterface())
+ pComMT->m_Flags |= enum_ComClassItf;
+
+ if (!Security::CanCallUnmanagedCode(pMT->GetModule()))
+ {
+ pComMT->m_Flags |= enum_IsUntrusted;
+ }
+
+#ifdef MDA_SUPPORTED
+#ifdef _DEBUG
+ {
+ // In debug set all the vtable slots to 0xDEADCA11.
+ SLOT *pComVTable = (SLOT*)(pComMT + 1);
+ for (unsigned iComSlots = 0; iComSlots < DEBUG_AssertSlots + cbExtraSlots; iComSlots++)
+ *(pComVTable + iComSlots) = (SLOT)(size_t)0xDEADCA11;
+ }
+#endif
+#endif
+
+
+ LOG((LF_INTEROP, LL_INFO1000, "---------- end of CreateComMethodTableForBasic %s -----------\n", pMT->GetDebugClassName()));
+
+ pComMT.SuppressRelease();
+ RETURN pComMT;
+}
+
+//--------------------------------------------------------------------------
+// Creates a ComMethodTable for a WinRT-exposed delegate class.
+//--------------------------------------------------------------------------
+ComMethodTable* ComCallWrapperTemplate::CreateComMethodTableForDelegate(MethodTable *pDelegateMT)
+{
+ CONTRACT (ComMethodTable*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pDelegateMT));
+ PRECONDITION(pDelegateMT->IsDelegate());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ const unsigned cbExtraSlots = ComMethodTable::GetNumExtraSlots(ifVtable);
+ const unsigned cbTotalSlots = 1; // one slot for the Invoke method
+
+ LOG((LF_INTEROP, LL_INFO1000, "CreateComMethodTableForDelegate %s\n", pDelegateMT->GetClass()->GetDebugClassName()));
+
+ // Alloc space for the delegate COM method table
+ LOG((LF_INTEROP, LL_INFO1000, "cbExtraSlots: %d\n", cbExtraSlots));
+ LOG((LF_INTEROP, LL_INFO1000, "cbTotalSlots: %d\n", cbTotalSlots));
+
+ // Alloc COM vtable & method descs
+ S_UINT32 cbVtable = (S_UINT32(cbTotalSlots) + S_UINT32(cbExtraSlots)) * S_UINT32(sizeof(SLOT));
+ S_UINT32 cbMethDescs = S_UINT32(cbTotalSlots) * S_UINT32((COMMETHOD_PREPAD + sizeof(ComCallMethodDesc)));
+ S_UINT32 cbToAlloc = S_UINT32(sizeof(ComMethodTable)) + cbVtable + cbMethDescs;
+
+ if (cbToAlloc.IsOverflow())
+ ThrowHR(COR_E_OVERFLOW);
+
+ NewExecutableHolder<ComMethodTable> pComMT = (ComMethodTable*) new (executable) BYTE[cbToAlloc.Value()];
+
+ // set up the header
+ pComMT->m_ptReserved = (SLOT)(size_t)0xDEADC0FF; // reserved
+ pComMT->m_pMT = pDelegateMT;
+ pComMT->m_cbRefCount = 0;
+ pComMT->m_pMDescr = NULL;
+ pComMT->m_pITypeInfo = NULL;
+ pComMT->m_pDispatchInfo = NULL;
+ pComMT->m_cbSlots = cbTotalSlots; // number of slots not counting IUnknown methods.
+
+ // Set the flags.
+ pComMT->m_Flags = enum_ClassVtableMask | clsIfNone | enum_ComVisible | enum_IsWinRTDelegate;
+
+ MethodTable *pMTForIID = pDelegateMT;
+ WinMDAdapter::RedirectedTypeIndex index = static_cast<WinMDAdapter::RedirectedTypeIndex>(0);
+ if (WinRTDelegateRedirector::ResolveRedirectedDelegate(pDelegateMT, &index))
+ {
+ pMTForIID = WinRTDelegateRedirector::GetWinRTTypeForRedirectedDelegateIndex(index);
+
+ if (pDelegateMT->HasInstantiation())
+ {
+ pMTForIID = TypeHandle(pMTForIID).Instantiate(pDelegateMT->GetInstantiation()).AsMethodTable();
+ }
+ }
+ pMTForIID->GetGuid(&pComMT->m_IID, TRUE);
+
+ pComMT->m_Flags |= enum_GuidGenerated;
+
+ if (!Security::CanCallUnmanagedCode(pComMT->m_pMT->GetModule()))
+ {
+ pComMT->m_Flags |= enum_IsUntrusted;
+ }
+
+#if _DEBUG
+ {
+ // In debug set all the vtable slots to 0xDEADCA11.
+ SLOT *pComVTable = (SLOT*)(pComMT + 1);
+ for (unsigned iComSlots = 0; iComSlots < cbTotalSlots + cbExtraSlots; iComSlots++)
+ *(pComVTable + iComSlots) = (SLOT)(size_t)0xDEADCA11;
+ }
+#endif
+
+ LOG((LF_INTEROP, LL_INFO1000, "---------- end of CreateComMethodTableForDelegate %s -----------\n", pDelegateMT->GetClass()->GetDebugClassName()));
+
+ pComMT.SuppressRelease();
+ RETURN pComMT;
+}
+
+//--------------------------------------------------------------------------
+// Creates a ComMethodTable for an interface and stores it in the m_rgpIPtr array.
+//--------------------------------------------------------------------------
+ComMethodTable *ComCallWrapperTemplate::InitializeForInterface(MethodTable *pParentMT, MethodTable *pItfMT, DWORD dwIndex)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ComMethodTable *pItfComMT = NULL;
+ if (m_pParent != NULL)
+ {
+ pItfComMT = m_pParent->GetComMTForItf(pItfMT);
+ if (pItfComMT != NULL)
+ {
+ if (pItfComMT->IsWinRTTrivialAggregate())
+ {
+ // if the parent COM MT is a trivial aggregate, we must verify that the same is true at this level
+ if (!m_thClass.GetMethodTable()->HasSameInterfaceImplementationAsParent(pItfMT, pParentMT))
+ {
+ // the interface is implemented by parent but this class reimplemented/overrode
+ // its method(s) so we will need to build a new COM vtable for it
+ pItfComMT = NULL;
+ }
+ }
+ else
+ {
+ // if the parent COM MT is not a trivial aggregate, simple MethodTable slot check is enough
+ if (!m_thClass.GetMethodTable()->ImplementsInterfaceWithSameSlotsAsParent(pItfMT, pParentMT))
+ {
+ // the interface is implemented by parent but this class reimplemented
+ // its method(s) so we will need to build a new COM vtable for it
+ pItfComMT = NULL;
+ }
+ }
+ }
+ }
+
+ if (pItfComMT == NULL)
+ {
+ // we couldn't use parent's vtable so we create a new one
+ pItfComMT = CreateComMethodTableForInterface(pItfMT);
+ }
+
+ m_rgpIPtr[dwIndex] = (SLOT*)(pItfComMT + 1);
+ pItfComMT->AddRef();
+
+ if (pItfMT->HasVariance())
+ {
+ m_flags |= enum_SupportsVariantInterface;
+ }
+
+ // update pItfMT in case code:CreateComMethodTableForInterface decided to redirect the interface
+ pItfMT = pItfComMT->GetMethodTable();
+ if (pItfMT == MscorlibBinder::GetExistingClass(CLASS__ICUSTOM_QUERYINTERFACE))
+ {
+ m_flags |= enum_ImplementsICustomQueryInterface;
+ }
+ else if (pItfMT->GetComInterfaceType() == ifInspectable)
+ {
+ m_flags |= enum_SupportsIInspectable;
+ }
+ else if (InlineIsEqualGUID(pItfComMT->GetIID(), IID_IMarshal))
+ {
+ // detect IMarshal so we can handle IAgileObject in a backward compatible way
+ m_flags |= enum_ImplementsIMarshal;
+ }
+
+ return pItfComMT;
+}
+
+
+//--------------------------------------------------------------------------
+// ComCallWrapper* ComCallWrapper::CreateTemplate(TypeHandle thClass)
+// create a template wrapper, which is cached in the class
+// used for initializing other wrappers for instances of the class
+//--------------------------------------------------------------------------
+ComCallWrapperTemplate* ComCallWrapperTemplate::CreateTemplate(TypeHandle thClass, WinRTManagedClassFactory *pClsFact /* = NULL */)
+{
+ CONTRACT (ComCallWrapperTemplate*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(!thClass.IsNull());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ GCX_PREEMP();
+
+ if (pClsFact == NULL && !thClass.IsTypeDesc() && !thClass.AsMethodTable()->HasCCWTemplate())
+ {
+ // Canonicalize the class type because we are going to stick the template pointer to EEClass.
+ thClass = thClass.GetCanonicalMethodTable();
+ }
+ MethodTable *pMT = thClass.GetMethodTable();
+
+ MethodTable *pParentMT = pMT->GetComPlusParentMethodTable();
+ ComCallWrapperTemplate *pParentTemplate = NULL;
+ unsigned iItf = 0;
+
+ // Create the parent's template if it has not been created yet.
+ if (pParentMT)
+ {
+ pParentTemplate = pParentMT->GetComCallWrapperTemplate();
+ if (!pParentTemplate)
+ pParentTemplate = CreateTemplate(pParentMT);
+ }
+
+ // Preload the policy for this interface
+ CCWInterfaceMapIterator it(thClass, pClsFact, true);
+ while (it.Next())
+ {
+ Module *pModule = it.GetInterface()->GetModule();
+ Security::CanCallUnmanagedCode(pModule);
+ }
+
+ // Num interfaces in the template.
+ unsigned numInterfaces = it.GetCount();
+
+ // Check to see if another thread has already set up the template.
+ {
+ // Move this inside the scope so it is destroyed before its memory is.
+ ComCallWrapperTemplateHolder pTemplate = NULL;
+
+ if (pClsFact != NULL)
+ pTemplate = pClsFact->GetComCallWrapperTemplate();
+ else
+ pTemplate = thClass.GetComCallWrapperTemplate();
+
+ if (pTemplate)
+ {
+ pTemplate.SuppressRelease();
+ RETURN pTemplate;
+ }
+
+ // Allocate the template.
+ pTemplate = (ComCallWrapperTemplate*)new BYTE[sizeof(ComCallWrapperTemplate) + numInterfaces * sizeof(SLOT)];
+
+ // Store the information required by the template.
+ // Also, eagerly set vars to NULL, in case we are interrupted during construction
+ // and try to destruct the template.
+ memset(pTemplate->m_rgpIPtr, 0, numInterfaces * sizeof(SLOT));
+ pTemplate->m_thClass = thClass;
+ pTemplate->m_cbInterfaces = numInterfaces;
+ pTemplate->m_pParent = pParentTemplate;
+ pTemplate->m_cbRefCount = 1;
+ pTemplate->m_pClassComMT = NULL; // Defer setting this up.
+ pTemplate->m_pBasicComMT = NULL;
+ pTemplate->m_pDefaultItf = NULL;
+ pTemplate->m_pWinRTRuntimeClass = (pClsFact != NULL ? pClsFact->GetClass() : NULL);
+ pTemplate->m_pICustomQueryInterfaceGetInterfaceMD = NULL;
+ pTemplate->m_pIIDToInterfaceTemplateCache = NULL;
+ pTemplate->m_flags = 0;
+
+ // Determine the COM visibility of classes in our hierarchy.
+ pTemplate->DetermineComVisibility();
+
+ // Eagerly create the basic CMT.
+ pTemplate->m_pBasicComMT = pTemplate->CreateComMethodTableForBasic(pMT);
+ pTemplate->m_pBasicComMT->AddRef();
+
+ if (ClassSupportsIClassX(pMT))
+ {
+ // we will allow building IClassX for the class
+ pTemplate->m_flags |= enum_SupportsIClassX;
+ }
+
+ if (pMT->IsArray() && !pMT->IsMultiDimArray())
+ {
+ // SZ arrays support covariant interfaces
+ pTemplate->m_flags |= enum_SupportsVariantInterface;
+ }
+
+ if (IsOleAutDispImplRequiredForClass(pMT))
+ {
+ // Determine what IDispatch implementation this class should use
+ pTemplate->m_flags |= enum_UseOleAutDispatchImpl;
+ }
+
+ // Eagerly create the interface CMTs.
+ // when iterate the interfaces implemented by the methodtable, we can check whether
+ // the interface supports ICustomQueryInterface.
+ MscorlibBinder::GetClass(CLASS__ICUSTOM_QUERYINTERFACE);
+
+ it.Reset();
+ while (it.Next())
+ {
+ MethodTable *pItfMT = it.GetInterface();
+ ComMethodTable *pItfComMT = pTemplate->InitializeForInterface(pParentMT, pItfMT, it.GetIndex());
+
+ if (it.IsRedirectedInterface())
+ pItfComMT->SetWinRTRedirectedInterfaceIndex(it.GetRedirectedInterfaceIndex());
+ else if (it.IsFactoryInterface())
+ pItfComMT->SetIsWinRTFactoryInterface();
+ else if (it.IsStaticInterface())
+ pItfComMT->SetIsWinRTStaticInterface();
+ }
+
+ if (pClsFact != NULL)
+ {
+ // Cache the template in the class factory
+ if (!pClsFact->SetComCallWrapperTemplate(pTemplate))
+ {
+ // another thread beat us to it
+ pTemplate = pClsFact->GetComCallWrapperTemplate();
+ _ASSERTE(pTemplate != NULL);
+
+ pTemplate.SuppressRelease();
+ RETURN pTemplate;
+ }
+ }
+ else
+ {
+ // Cache the template in class.
+ if (!thClass.SetComCallWrapperTemplate(pTemplate))
+ {
+ // another thread beat us to it
+ pTemplate = thClass.GetComCallWrapperTemplate();
+ _ASSERTE(pTemplate != NULL);
+
+ pTemplate.SuppressRelease();
+ RETURN pTemplate;
+ }
+ }
+ pTemplate.SuppressRelease();
+
+#ifdef PROFILING_SUPPORTED
+ // Notify profiler of the CCW, so it can avoid double-counting.
+ if (pTemplate->SupportsIClassX())
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackCCW());
+ // When under the profiler, we'll eagerly generate the IClassX CMT.
+ pTemplate->GetClassComMT();
+
+ IID IClassXIID = GUID_NULL;
+ SLOT *pComVtable = (SLOT *)(pTemplate->m_pClassComMT + 1);
+
+ // If the class is visible from COM, then give out the IClassX IID.
+ if (pTemplate->m_pClassComMT->IsComVisible())
+ GenerateClassItfGuid(thClass, &IClassXIID);
+
+#if defined(_DEBUG)
+ WCHAR rIID[40]; // {00000000-0000-0000-0000-000000000000}
+ GuidToLPWSTR(IClassXIID, rIID, lengthof(rIID));
+ SString ssName;
+ thClass.GetName(ssName);
+ LOG((LF_CORPROF, LL_INFO100, "COMClassicVTableCreated Class:%ls, IID:%ls, vTbl:%#08x\n",
+ ssName.GetUnicode(), rIID, pComVtable));
+#else
+ LOG((LF_CORPROF, LL_INFO100, "COMClassicVTableCreated TypeHandle:%#x, IID:{%08x-...}, vTbl:%#08x\n",
+ thClass.AsPtr(), IClassXIID.Data1, pComVtable));
+#endif
+ g_profControlBlock.pProfInterface->COMClassicVTableCreated(
+ (ClassID) thClass.AsPtr(), IClassXIID, pComVtable,
+ pTemplate->m_pClassComMT->m_cbSlots +
+ ComMethodTable::GetNumExtraSlots(pTemplate->m_pClassComMT->GetInterfaceType()));
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+ RETURN pTemplate;
+ }
+}
+
+//--------------------------------------------------------------------------
+// Creates a new Template for just one interface.
+//--------------------------------------------------------------------------
+ComCallWrapperTemplate *ComCallWrapperTemplate::CreateTemplateForInterface(MethodTable *pItfMT)
+{
+ CONTRACT (ComCallWrapperTemplate*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pItfMT));
+ PRECONDITION(pItfMT->IsInterface());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ GCX_PREEMP();
+
+ // Num interfaces in the template.
+ unsigned numInterfaces = 1;
+
+ // Allocate the template.
+ ComCallWrapperTemplateHolder pTemplate = pItfMT->GetComCallWrapperTemplate();
+ if (pTemplate)
+ {
+ pTemplate.SuppressRelease();
+ RETURN pTemplate;
+ }
+
+ pTemplate = (ComCallWrapperTemplate *)new BYTE[sizeof(ComCallWrapperTemplate) + numInterfaces * sizeof(SLOT)];
+
+ // Store the information required by the template.
+ // Also, eagerly set vars to NULL, in case we are interrupted during construction
+ // and try to destruct the template.
+ memset(pTemplate->m_rgpIPtr, 0, numInterfaces * sizeof(SLOT));
+ pTemplate->m_thClass = TypeHandle(pItfMT);
+ pTemplate->m_cbInterfaces = numInterfaces;
+ pTemplate->m_pParent = NULL;
+ pTemplate->m_cbRefCount = 1;
+ pTemplate->m_pClassComMT = NULL;
+ pTemplate->m_pBasicComMT = NULL;
+ pTemplate->m_pDefaultItf = pItfMT;
+ pTemplate->m_pWinRTRuntimeClass = NULL;
+ pTemplate->m_pICustomQueryInterfaceGetInterfaceMD = NULL;
+ pTemplate->m_pIIDToInterfaceTemplateCache = NULL;
+ pTemplate->m_flags = enum_RepresentsVariantInterface;
+
+ // Initialize the one ComMethodTable
+ ComMethodTable *pItfComMT;
+
+ WinMDAdapter::RedirectedTypeIndex redirectedInterfaceIndex;
+ if (WinRTInterfaceRedirector::ResolveRedirectedInterface(pItfMT, &redirectedInterfaceIndex))
+ {
+ // pItfMT is redirected, initialize the ComMethodTable accordingly
+ MethodTable *pWinRTItfMT = WinRTInterfaceRedirector::GetWinRTTypeForRedirectedInterfaceIndex(redirectedInterfaceIndex);
+ pWinRTItfMT = TypeHandle(pWinRTItfMT).Instantiate(pItfMT->GetInstantiation()).GetMethodTable();
+
+ pItfComMT = pTemplate->InitializeForInterface(NULL, pWinRTItfMT, 0);
+ pItfComMT->SetWinRTRedirectedInterfaceIndex(redirectedInterfaceIndex);
+ }
+ else
+ {
+ pItfComMT = pTemplate->InitializeForInterface(NULL, pItfMT, 0);
+ }
+
+ // Cache the template on the interface.
+ if (!pItfMT->SetComCallWrapperTemplate(pTemplate))
+ {
+ // another thread beat us to it
+ pTemplate = pItfMT->GetComCallWrapperTemplate();
+ _ASSERTE(pTemplate != NULL);
+ }
+
+ pTemplate.SuppressRelease();
+ RETURN pTemplate;
+}
+
+void ComCallWrapperTemplate::DetermineComVisibility()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_flags &= (~enum_InvisibleParent);
+
+ // If the config switch is set, we ignore this.
+ if ((g_pConfig == NULL) || (g_pConfig->LegacyComHierarchyVisibility() == FALSE))
+ {
+ // If there are no parents...leave it as false.
+ if (m_pParent == NULL)
+ return;
+
+ // If our parent has an invisible parent
+ if (m_pParent->HasInvisibleParent())
+ {
+ m_flags |= enum_InvisibleParent;
+ }
+ // If our parent is invisible
+ else if (!IsTypeVisibleFromCom(m_pParent->m_thClass))
+ {
+ m_flags |= enum_InvisibleParent;
+ }
+ }
+}
+
+//--------------------------------------------------------------------------
+// ComCallWrapperTemplate* ComCallWrapperTemplate::GetTemplate(TypeHandle thClass)
+// look for a template in the method table, if not create one
+//--------------------------------------------------------------------------
+ComCallWrapperTemplate* ComCallWrapperTemplate::GetTemplate(TypeHandle thType)
+{
+ CONTRACT (ComCallWrapperTemplate*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+
+ // Check to see if the specified class already has a template set up.
+ ComCallWrapperTemplate* pTemplate = thType.GetComCallWrapperTemplate();
+ if (pTemplate)
+ RETURN pTemplate;
+
+ // Create the template and return it. CreateTemplate will take care of synchronization.
+ if (thType.IsInterface())
+ {
+ RETURN CreateTemplateForInterface(thType.AsMethodTable());
+ }
+ else
+ {
+ RETURN CreateTemplate(thType);
+ }
+}
+
+//--------------------------------------------------------------------------
+// ComMethodTable *ComCallWrapperTemplate::SetupComMethodTableForClass(MethodTable *pMT)
+// Sets up the wrapper template for the speficied class and sets up a COM
+// method table for the IClassX interface of the specified class. If the
+// bLayOutComMT flag is set then if the IClassX COM method table has not
+// been laid out yet then it will be.
+//--------------------------------------------------------------------------
+ComMethodTable *ComCallWrapperTemplate::SetupComMethodTableForClass(MethodTable *pMT, BOOL bLayOutComMT)
+{
+ CONTRACT (ComMethodTable*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(!pMT->IsInterface());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ // Retrieve the COM call wrapper template for the class.
+ ComCallWrapperTemplate *pTemplate = GetTemplate(pMT);
+
+ // Retrieve the IClassX COM method table.
+ ComMethodTable *pIClassXComMT = pTemplate->GetClassComMT();
+ _ASSERTE(pIClassXComMT);
+
+ // Lay out the IClassX COM method table if it hasn't been laid out yet and
+ // the bLayOutComMT flag is set.
+ if (!pIClassXComMT->IsLayoutComplete() && bLayOutComMT)
+ {
+ pIClassXComMT->LayOutClassMethodTable();
+ _ASSERTE(pIClassXComMT->IsLayoutComplete());
+ }
+
+ RETURN pIClassXComMT;
+}
+
+
+MethodDesc * ComCallWrapperTemplate::GetICustomQueryInterfaceGetInterfaceMD()
+{
+ CONTRACT (MethodDesc*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(m_flags & enum_ImplementsICustomQueryInterface);
+ }
+ CONTRACT_END;
+
+ if (m_pICustomQueryInterfaceGetInterfaceMD == NULL)
+ m_pICustomQueryInterfaceGetInterfaceMD = m_thClass.GetMethodTable()->GetMethodDescForInterfaceMethod(
+ MscorlibBinder::GetMethod(METHOD__ICUSTOM_QUERYINTERFACE__GET_INTERFACE));
+ RETURN m_pICustomQueryInterfaceGetInterfaceMD;
+}
+
+ComCallWrapperTemplate::IIDToInterfaceTemplateCache *ComCallWrapperTemplate::GetOrCreateIIDToInterfaceTemplateCache()
+{
+ CONTRACT (IIDToInterfaceTemplateCache *)
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ IIDToInterfaceTemplateCache *pCache = m_pIIDToInterfaceTemplateCache.Load();
+ if (pCache == NULL)
+ {
+ pCache = new IIDToInterfaceTemplateCache();
+
+ IIDToInterfaceTemplateCache *pOldCache = InterlockedCompareExchangeT(&m_pIIDToInterfaceTemplateCache, pCache, NULL);
+ if (pOldCache != NULL)
+ {
+ delete pCache;
+ RETURN pOldCache;
+ }
+ }
+ RETURN pCache;
+}
+
+
+//--------------------------------------------------------------------------
+// Module* ComCallMethodDesc::GetModule()
+// Get Module
+//--------------------------------------------------------------------------
+Module* ComCallMethodDesc::GetModule()
+{
+ CONTRACT (Module*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION( IsFieldCall() ? (m_pFD != NULL) : (m_pMD != NULL) );
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ MethodTable* pClass = (IsFieldCall()) ? m_pFD->GetEnclosingMethodTable() : m_pMD->GetMethodTable();
+ _ASSERTE(pClass != NULL);
+
+ RETURN pClass->GetModule();
+}
diff --git a/src/vm/comcallablewrapper.h b/src/vm/comcallablewrapper.h
new file mode 100644
index 0000000000..d198166631
--- /dev/null
+++ b/src/vm/comcallablewrapper.h
@@ -0,0 +1,2690 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: Com Callable wrapper classes
+**
+
+===========================================================*/
+
+#ifndef _COMCALLABLEWRAPPER_H
+#define _COMCALLABLEWRAPPER_H
+
+#ifdef FEATURE_COMINTEROP
+
+#include "vars.hpp"
+#include "stdinterfaces.h"
+#include "threads.h"
+#include "objecthandle.h"
+#include "comutilnative.h"
+#include "spinlock.h"
+#include "comtoclrcall.h"
+#include "dispatchinfo.h"
+#include "wrappers.h"
+#include "internalunknownimpl.h"
+#include "rcwwalker.h"
+#include "util.hpp"
+
+class CCacheLineAllocator;
+class ConnectionPoint;
+class MethodTable;
+class ComCallWrapper;
+struct SimpleComCallWrapper;
+class RCWHolder;
+struct ComMethodTable;
+
+typedef DPTR(struct SimpleComCallWrapper) PTR_SimpleComCallWrapper;
+
+// Terminator to indicate that indicates the end of a chain of linked wrappers.
+#define LinkedWrapperTerminator (PTR_ComCallWrapper)-1
+
+class ComCallWrapperCache
+{
+ enum
+ {
+ AD_IS_UNLOADING = 0x01,
+ };
+
+public:
+ // Encapsulate a SpinLockHolder, so that clients of our lock don't have to know
+ // the details of our implementation.
+ class LockHolder : public CrstHolder
+ {
+ public:
+ LockHolder(ComCallWrapperCache *pCache)
+ : CrstHolder(&pCache->m_lock)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+ };
+
+ ComCallWrapperCache();
+ ~ComCallWrapperCache();
+
+ // create a new WrapperCache (one per domain)
+ static ComCallWrapperCache* Create(AppDomain *pDomain);
+
+ // Called when the domain is going away. We may have outstanding references to this cache,
+ // so we keep it around in a neutered state.
+ void Neuter();
+
+ // refcount
+ LONG AddRef();
+ LONG Release();
+
+ CCacheLineAllocator* GetCacheLineAllocator()
+ {
+ CONTRACT (CCacheLineAllocator*)
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN m_pCacheLineAllocator;
+ }
+
+ AppDomain* GetDomain()
+ {
+ CONTRACT (AppDomain*)
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ RETURN ((AppDomain*)((size_t)m_pDomain & ~AD_IS_UNLOADING));
+ }
+
+ void ClearDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_pDomain = (AppDomain *)AD_IS_UNLOADING;
+ }
+
+ void SetDomainIsUnloading()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_pDomain = (AppDomain*)((size_t)m_pDomain | AD_IS_UNLOADING);
+ }
+
+ void ResetDomainIsUnloading()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_pDomain = (AppDomain*)((size_t)m_pDomain & (~AD_IS_UNLOADING));
+ }
+
+ BOOL IsDomainUnloading()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return ((size_t)m_pDomain & AD_IS_UNLOADING) != 0;
+ }
+
+private:
+ LONG m_cbRef;
+ CCacheLineAllocator* m_pCacheLineAllocator;
+ AppDomain* m_pDomain;
+
+ // spin lock for fast synchronization
+ Crst m_lock;
+};
+
+
+//---------------------------------------------------------------------------------
+// COM called wrappers on CLR objects
+// Purpose: Expose CLR objects as COM classic Interfaces
+// Reqmts: Wrapper has to have the same layout as the COM2 interface
+//
+// The wrapper objects are aligned at 16 bytes, and the original this
+// pointer is replicated every 16 bytes, so for any COM2 interface
+// within the wrapper, the original 'this' can be obtained by masking
+// low 4 bits of COM2 IP.
+//
+// 16 byte aligned COM2 Vtable
+// +-----------+
+// | Org. this |
+// +-----------+ +-----+
+// COM2 IP-->| VTable ptr|----------------------------->|slot1|
+// +-----------+ +-----+ +-----+
+// COM2 IP-->| VTable ptr|---------->|slot1| |slot2|
+// +-----------+ +-----+ + +
+// | VTable ptr| | ....| | ... |
+// +-----------+ + + + +
+// | Org. this | |slotN| |slotN|
+// + + +-----+ +-----+
+// | .... |
+// + +
+// | |
+// +-----------+
+//
+//
+// The first slot of the first CCW is used to hold the basic interface -
+// an interface that implements the methods of IUnknown & IDispatch. The basic
+// interface's IDispatch implementation will call through to the class methods
+// as if it were the class interface.
+//
+// The second slot of the first CCW is used to hold the IClassX interface -
+// an interface that implements IUnknown, IDispatch, and a custom interface
+// that contains all of the members of the class and its hierarchy. This
+// will only be generated on demand and is only usable if the class and all
+// of its parents are visible to COM.
+//
+// VTable and Stubs: can share stub code, we need to have different vtables
+// for different interfaces, so the stub can jump to different
+// marshalling code.
+// Stubs : adjust this pointer and jump to the approp. address,
+// Marshalling params and results, based on the method signature the stub jumps to
+// approp. code to handle marshalling and unmarshalling.
+//
+//
+//--------------------------------------------------------------------------------
+
+class WinRTManagedClassFactory;
+
+//--------------------------------------------------------------------------------
+// COM callable wrappers for CLR objects
+//--------------------------------------------------------------------------------
+typedef DPTR(class ComCallWrapperTemplate) PTR_ComCallWrapperTemplate;
+class ComCallWrapperTemplate
+{
+ friend class ClrDataAccess;
+
+public:
+ // Small "L1" cache to speed up QI's on CCWs with variance. It caches both positive and negative
+ // results (i.e. also keeps track of IIDs that the QI doesn't respond to).
+ class IIDToInterfaceTemplateCache
+ {
+ enum
+ {
+ // There is a small number of types that the class is castable to via variance and QI'ed for
+ // (typically just IFoo<object> where the class implements IFoo<IBar> and IFoo is covariant).
+ // There is also some number of IIDs QI'ed for by external code (e.g. Jupiter) that we won't
+ // recognize - this number is potentially unbounded so even if this was a different data
+ // structure, we would want to limit its size. Simple sequentially searched array seems to
+ // work the best both in terms of memory footprint and lookup performance.
+ CACHE_SIZE = 16,
+ };
+
+ struct CacheItem
+ {
+ IID m_iid;
+
+ // The lowest bit indicates whether this item is being used (since NULL is a legal value).
+ // The second lowest bit indicates whether the item has been accessed since the last eviction.
+ // The rest of the bits contain ComCallWrapperTemplate pointer.
+ SIZE_T m_pTemplate;
+
+ bool IsFree()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_pTemplate == 0);
+ }
+
+ bool IsHot()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ((m_pTemplate & 0x2) == 0x2);
+ }
+
+ ComCallWrapperTemplate *GetTemplate()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (ComCallWrapperTemplate *)(m_pTemplate & ~0x3);
+ }
+
+ void SetTemplate(ComCallWrapperTemplate *pTemplate)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pTemplate = ((SIZE_T)pTemplate | 0x1);
+ }
+
+ void MarkHot()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pTemplate |= 0x2;
+ }
+
+ void MarkCold()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pTemplate &= ~0x2;
+ }
+ };
+
+ // array of cache items
+ CacheItem m_items[CACHE_SIZE];
+
+ // spin lock to protect concurrent access to m_items
+ SpinLock m_lock;
+
+ public:
+ IIDToInterfaceTemplateCache()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ ZeroMemory(this, sizeof(IIDToInterfaceTemplateCache));
+ m_lock.Init(LOCK_TYPE_DEFAULT);
+ }
+
+ bool LookupInterfaceTemplate(REFIID riid, ComCallWrapperTemplate **ppTemplate);
+ void InsertInterfaceTemplate(REFIID riid, ComCallWrapperTemplate *pTemplate);
+ };
+
+ // Iterates COM-exposed interfaces of a class. Handles arrays which support IIterable<T>,
+ // IVector<T>, and IVectorView<T>, as well as WinRT class factories which support factory
+ // and static interfaces. It is also aware of redirected interfaces - both the .NET and the
+ // corresponding WinRT type are reported
+ class CCWInterfaceMapIterator
+ {
+ private:
+ struct InterfaceProps
+ {
+ MethodTable *m_pItfMT;
+
+ WinMDAdapter::RedirectedTypeIndex m_RedirectedIndex; // valid if m_dwIsRedirectedInterface is set
+
+ DWORD m_dwIsRedirectedInterface : 1;
+ DWORD m_dwIsFactoryInterface : 1;
+ DWORD m_dwIsStaticInterface : 1;
+ };
+
+ StackSArray<InterfaceProps> m_Interfaces;
+ COUNT_T m_Index;
+
+ inline const InterfaceProps &GetCurrentInterfaceProps() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_Interfaces[(COUNT_T)m_Index];
+ }
+
+ InterfaceProps &AppendInterface(MethodTable *pItfMT, bool isRedirected);
+
+ public:
+ CCWInterfaceMapIterator(TypeHandle thClass, WinRTManagedClassFactory *pClsFact, bool fIterateRedirectedInterfaces);
+
+ BOOL Next()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (++m_Index < GetCount());
+ }
+
+ MethodTable *GetInterface() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return GetCurrentInterfaceProps().m_pItfMT;
+ }
+
+ DWORD GetIndex() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_Index;
+ }
+
+ DWORD GetCount() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_Interfaces.GetCount();
+ }
+
+ void Reset()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_Index = (COUNT_T)-1;
+ }
+
+ BOOL IsFactoryInterface() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return GetCurrentInterfaceProps().m_dwIsFactoryInterface;
+ }
+
+ BOOL IsStaticInterface() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return GetCurrentInterfaceProps().m_dwIsStaticInterface;
+ }
+
+ BOOL IsRedirectedInterface() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return GetCurrentInterfaceProps().m_dwIsRedirectedInterface;
+ }
+
+ WinMDAdapter::RedirectedTypeIndex GetRedirectedInterfaceIndex() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return GetCurrentInterfaceProps().m_RedirectedIndex;
+ }
+ };
+
+ // Static initializer run at startup.
+ static void Init();
+
+ // Template accessor, creates a template if one is not already cached.
+ static ComCallWrapperTemplate* GetTemplate(TypeHandle thClass);
+
+ // Ref-count the templates
+ LONG AddRef();
+ LONG Release();
+
+ // Properties
+ ComMethodTable* GetClassComMT();
+ ComMethodTable* GetComMTForItf(MethodTable *pItfMT);
+ ComMethodTable* GetComMTForIndex(ULONG ulItfIndex);
+ ComMethodTable* GetBasicComMT();
+ ULONG GetNumInterfaces();
+ SLOT* GetVTableSlot(ULONG index);
+ BOOL HasInvisibleParent();
+ void CheckParentComVisibility(BOOL fForIDispatch);
+ BOOL CheckParentComVisibilityNoThrow(BOOL fForIDispatch);
+
+ // Calls GetDefaultInterfaceForClassInternal and caches the result.
+ DefaultInterfaceType GetDefaultInterface(MethodTable **ppDefaultItf);
+
+ // Sets up the class method table for the IClassX and also lays it out.
+ static ComMethodTable *SetupComMethodTableForClass(MethodTable *pMT, BOOL bLayOutComMT);
+
+ MethodDesc * GetICustomQueryInterfaceGetInterfaceMD();
+
+ IIDToInterfaceTemplateCache *GetOrCreateIIDToInterfaceTemplateCache();
+
+ BOOL SupportsICustomQueryInterface()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_flags & enum_ImplementsICustomQueryInterface);
+ }
+
+ BOOL SupportsIInspectable()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_flags & enum_SupportsIInspectable);
+ }
+
+ BOOL SupportsVariantInterface()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_flags & enum_SupportsVariantInterface);
+ }
+
+ BOOL RepresentsVariantInterface()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_flags & enum_RepresentsVariantInterface);
+ }
+
+ BOOL IsUseOleAutDispatchImpl()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_flags & enum_UseOleAutDispatchImpl);
+ }
+
+ BOOL ImplementsIMarshal()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_flags & enum_ImplementsIMarshal);
+ }
+
+ BOOL SupportsIClassX()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_flags & enum_SupportsIClassX);
+ }
+
+ TypeHandle GetClassType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_thClass;
+ }
+
+ MethodTable *GetWinRTRuntimeClass()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pWinRTRuntimeClass;
+ }
+
+ BOOL IsSafeTypeForMarshalling();
+
+ // Creates a new Template and caches it on the MethodTable/ArrayTypeDesc or class factory.
+ static ComCallWrapperTemplate *CreateTemplate(TypeHandle thClass, WinRTManagedClassFactory *pClsFact = NULL);
+
+ // Creates a new Template for just one interface. Used for lazily created CCWs for interfaces with variance.
+ static ComCallWrapperTemplate *CreateTemplateForInterface(MethodTable *pItfMT);
+
+private:
+
+ enum ComCallWrapperTemplateFlags
+ {
+ // first 3 bits are interpreted as DefaultInterfaceType
+ enum_DefaultInterfaceType = 0x7,
+ enum_DefaultInterfaceTypeComputed = 0x10,
+
+ enum_InvisibleParent = 0x20,
+ enum_ImplementsICustomQueryInterface = 0x40,
+ enum_SupportsIInspectable = 0x80,
+ enum_SupportsIClassX = 0x100,
+
+ enum_SupportsVariantInterface = 0x200, // this is a template for a class that implements an interface with variance
+ enum_RepresentsVariantInterface = 0x400, // this is a template for an interface with variance
+
+ enum_UseOleAutDispatchImpl = 0x800, // the class is decorated with IDispatchImplAttribute(CompatibleImpl)
+
+ enum_ImplementsIMarshal = 0x1000, // the class implements a managed interface with Guid == IID_IMarshal
+
+ enum_IsSafeTypeForMarshalling = 0x2000, // The class can be safely marshalled out of process via DCOM
+ };
+
+ // Hide the constructor
+ ComCallWrapperTemplate();
+
+ // Cleanup called when the ref-count hits zero.
+ void Cleanup();
+
+ // Helper method called by code:CreateTemplate.
+ ComMethodTable* InitializeForInterface(MethodTable *pParentMT, MethodTable *pItfMT, DWORD dwIndex);
+
+ // Create a non laid out COM method table for the specified class or interface.
+ ComMethodTable* CreateComMethodTableForClass(MethodTable *pClassMT);
+ ComMethodTable* CreateComMethodTableForInterface(MethodTable* pInterfaceMT);
+ ComMethodTable* CreateComMethodTableForBasic(MethodTable* pClassMT);
+ ComMethodTable* CreateComMethodTableForDelegate(MethodTable *pDelegateMT);
+
+ void DetermineComVisibility();
+
+private:
+ LONG m_cbRefCount;
+ ComCallWrapperTemplate* m_pParent;
+ TypeHandle m_thClass;
+ MethodTable* m_pDefaultItf;
+ MethodTable* m_pWinRTRuntimeClass;
+ ComMethodTable* m_pClassComMT;
+ ComMethodTable* m_pBasicComMT;
+ DWORD m_flags;
+ MethodDesc* m_pICustomQueryInterfaceGetInterfaceMD;
+ Volatile<IIDToInterfaceTemplateCache *> m_pIIDToInterfaceTemplateCache;
+ ULONG m_cbInterfaces;
+ SLOT* m_rgpIPtr[1];
+};
+
+inline void ComCallWrapperTemplateRelease(ComCallWrapperTemplate *value)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (value)
+ {
+ value->Release();
+ }
+}
+
+typedef Wrapper<ComCallWrapperTemplate *, DoNothing<ComCallWrapperTemplate *>, ComCallWrapperTemplateRelease, NULL> ComCallWrapperTemplateHolder;
+
+
+//--------------------------------------------------------------------------------
+// Header on top of Vtables that we create for COM callable interfaces
+//--------------------------------------------------------------------------------
+#pragma pack(push)
+#pragma pack(1)
+
+struct IUnkVtable
+{
+ SLOT m_qi; // IUnk::QI
+ SLOT m_addref; // IUnk::AddRef
+ SLOT m_release; // IUnk::Release
+};
+
+struct IDispatchVtable : IUnkVtable
+{
+ // idispatch methods
+ SLOT m_GetTypeInfoCount;
+ SLOT m_GetTypeInfo;
+ SLOT m_GetIDsOfNames;
+ SLOT m_Invoke;
+};
+
+struct IInspectableVtable : IUnkVtable
+{
+ SLOT m_GetIIDs;
+ SLOT m_GetRuntimeClassName;
+ SLOT m_GetTrustLevel;
+};
+
+enum Masks
+{
+ enum_InterfaceTypeMask = 0x00000003,
+ enum_ClassInterfaceTypeMask = 0x00000003,
+ enum_ClassVtableMask = 0x00000004,
+ enum_LayoutComplete = 0x00000010,
+ enum_ComVisible = 0x00000040,
+ enum_SigClassCannotLoad = 0x00000080,
+ enum_SigClassLoadChecked = 0x00000100,
+ enum_ComClassItf = 0x00000200,
+ enum_GuidGenerated = 0x00000400,
+ enum_IsUntrusted = 0x00001000,
+ enum_IsBasic = 0x00002000,
+ enum_IsWinRTDelegate = 0x00004000,
+ enum_IsWinRTTrivialAggregate = 0x00008000,
+ enum_IsWinRTFactoryInterface = 0x00010000,
+ enum_IsWinRTStaticInterface = 0x00020000,
+ enum_IsWinRTRedirectedInterface = 0x00040000,
+
+ enum_WinRTRedirectedInterfaceMask = 0xFF000000, // the highest byte contains redirected interface index
+};
+
+typedef DPTR(struct ComMethodTable) PTR_ComMethodTable;
+struct ComMethodTable
+{
+ friend class ComCallWrapperTemplate;
+
+ // Cleanup, frees all the stubs and the vtable
+ void Cleanup();
+
+ // The appropriate finalize method must be called before the COM method table is
+ // exposed to COM or before any methods are called on it.
+ void LayOutClassMethodTable();
+ BOOL LayOutInterfaceMethodTable(MethodTable* pClsMT);
+ void LayOutBasicMethodTable();
+ void LayOutDelegateMethodTable();
+
+ // Accessor for the IDispatch information.
+ DispatchInfo* GetDispatchInfo();
+
+ LONG AddRef()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return InterlockedIncrement(&m_cbRefCount);
+ }
+
+ LONG Release()
+ {
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ PRECONDITION(m_cbRefCount > 0);
+ }
+ CONTRACTL_END;
+
+ // use a different var here becuase cleanup will delete the object
+ // so can no longer make member refs
+ LONG cbRef = InterlockedDecrement(&m_cbRefCount);
+ if (cbRef == 0)
+ Cleanup();
+
+ return cbRef;
+ }
+
+ CorIfaceAttr GetInterfaceType()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (IsIClassXOrBasicItf())
+ return ifDual;
+ else
+ return (CorIfaceAttr)(m_Flags & enum_InterfaceTypeMask);
+ }
+
+ CorClassIfaceAttr GetClassInterfaceType()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(IsIClassXOrBasicItf());
+ return (CorClassIfaceAttr)(m_Flags & enum_ClassInterfaceTypeMask);
+ }
+
+ BOOL IsDefinedInUntrustedCode()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_Flags & enum_IsUntrusted) ? TRUE : FALSE;
+ }
+
+ BOOL IsIClassX()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (IsIClassXOrBasicItf() && !IsBasic());
+ }
+
+ BOOL IsIClassXOrBasicItf()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_Flags & enum_ClassVtableMask) != 0;
+ }
+
+ BOOL IsComClassItf()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_Flags & enum_ComClassItf) != 0;
+ }
+
+ BOOL IsLayoutComplete()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_Flags & enum_LayoutComplete) != 0;
+ }
+
+ BOOL IsComVisible()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_Flags & enum_ComVisible) != 0;
+ }
+
+ BOOL IsBasic()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_Flags & enum_IsBasic) != 0;
+ }
+
+ BOOL IsWinRTDelegate()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_Flags & enum_IsWinRTDelegate) != 0;
+ }
+
+ BOOL IsWinRTTrivialAggregate()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_Flags & enum_IsWinRTTrivialAggregate) != 0;
+ }
+
+ BOOL IsWinRTFactoryInterface()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_Flags & enum_IsWinRTFactoryInterface) != 0;
+ }
+
+ BOOL IsWinRTStaticInterface()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_Flags & enum_IsWinRTStaticInterface) != 0;
+ }
+
+ VOID SetIsWinRTFactoryInterface()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_Flags |= enum_IsWinRTFactoryInterface;
+ }
+
+ VOID SetIsWinRTStaticInterface()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_Flags |= enum_IsWinRTStaticInterface;
+ }
+
+ BOOL IsWinRTRedirectedInterface()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_Flags & enum_IsWinRTRedirectedInterface) != 0;
+ }
+
+ WinMDAdapter::RedirectedTypeIndex GetWinRTRedirectedInterfaceIndex()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (WinMDAdapter::RedirectedTypeIndex)((m_Flags & enum_WinRTRedirectedInterfaceMask) >> 24);
+ }
+
+ void SetWinRTRedirectedInterfaceIndex(WinMDAdapter::RedirectedTypeIndex index)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_Flags |= ((size_t)index << 24);
+ m_Flags |= enum_IsWinRTRedirectedInterface;
+ _ASSERTE(GetWinRTRedirectedInterfaceIndex() == index);
+ }
+
+ BOOL HasInvisibleParent()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ((ComCallWrapperTemplate*)m_pMT->GetComCallWrapperTemplate())->HasInvisibleParent();
+ }
+
+ BOOL IsSigClassLoadChecked()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_Flags & enum_SigClassLoadChecked) != 0;
+ }
+
+ BOOL IsSigClassCannotLoad()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return 0 != (m_Flags & enum_SigClassCannotLoad);
+ }
+
+ VOID SetSigClassCannotLoad()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_Flags |= enum_SigClassCannotLoad;
+ }
+
+ VOID SetSigClassLoadChecked()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_Flags |= enum_SigClassLoadChecked;
+ }
+
+ DWORD GetSlots()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_cbSlots;
+ }
+
+ ITypeInfo *GetITypeInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pITypeInfo;
+ }
+
+ void SetITypeInfo(ITypeInfo *pITI);
+
+ static WORD GetNumExtraSlots(CorIfaceAttr ItfType)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ switch (ItfType)
+ {
+ case ifVtable: return (sizeof(IUnkVtable) / sizeof(SLOT));
+ case ifInspectable: return (sizeof(IInspectableVtable) / sizeof(SLOT));
+ default: return (sizeof(IDispatchVtable) / sizeof(SLOT));
+ }
+ }
+
+ // Gets the ComCallMethodDesc out of a Vtable slot correctly for all platforms
+ ComCallMethodDesc* ComCallMethodDescFromSlot(unsigned i);
+
+ BOOL IsSlotAField(unsigned i)
+ {
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ PRECONDITION(IsLayoutComplete());
+ PRECONDITION(i < m_cbSlots);
+ }
+ CONTRACTL_END;
+
+ i += GetNumExtraSlots(GetInterfaceType());
+ ComCallMethodDesc* pCMD = ComCallMethodDescFromSlot(i);
+ return pCMD->IsFieldCall();
+ }
+
+ MethodDesc* GetMethodDescForSlot(unsigned i)
+ {
+ CONTRACT (MethodDesc*)
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ PRECONDITION(IsLayoutComplete());
+ PRECONDITION(i < m_cbSlots);
+ PRECONDITION(!IsSlotAField(i));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ i += GetNumExtraSlots(GetInterfaceType());
+
+ ComCallMethodDesc* pCMD;
+
+ pCMD = ComCallMethodDescFromSlot(i);
+ _ASSERTE(pCMD->IsMethodCall());
+
+ RETURN pCMD->GetMethodDesc();
+ }
+
+ ComCallMethodDesc* GetFieldCallMethodDescForSlot(unsigned i)
+ {
+ CONTRACT (ComCallMethodDesc*)
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ PRECONDITION(IsLayoutComplete());
+ PRECONDITION(i < m_cbSlots);
+ PRECONDITION(IsSlotAField(i));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ i += GetNumExtraSlots(GetInterfaceType());
+ ComCallMethodDesc* pCMD = ComCallMethodDescFromSlot(i);
+
+ _ASSERTE(pCMD->IsFieldCall());
+ RETURN (ComCallMethodDesc *)pCMD;
+ }
+
+ BOOL OwnedbyThisMT(unsigned slotIndex)
+ {
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!IsIClassXOrBasicItf())
+ return TRUE;
+
+ if (m_pMDescr != NULL)
+ {
+ // These are the methods from the default interfaces such as IUnknown.
+ unsigned cbExtraSlots = GetNumExtraSlots(GetInterfaceType());
+
+ // Refer to ComMethodTable::LayOutClassMethodTable().
+ ULONG cbSize = *(ULONG *)m_pMDescr;
+ ULONG cbNewSlots = cbSize / (COMMETHOD_PREPAD + sizeof(ComCallMethodDesc));
+ _ASSERTE( (cbSize % (COMMETHOD_PREPAD + sizeof(ComCallMethodDesc))) == 0);
+
+ // m_cbSlots is the total number of methods in addition to the ones from the
+ // default interfaces. cbNewSlots is the total number of methods introduced
+ // by this class (== m_cbSlots - <slots from parent MT>).
+ return (slotIndex >= (cbExtraSlots + m_cbSlots - cbNewSlots));
+ }
+
+ return FALSE;
+ }
+
+ ComMethodTable *GetParentClassComMT();
+
+ static inline PTR_ComMethodTable ComMethodTableFromIP(PTR_IUnknown pUnk)
+ {
+ CONTRACT (PTR_ComMethodTable)
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ SUPPORTS_DAC;
+ PRECONDITION(CheckPointer(pUnk));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ PTR_ComMethodTable pMT = dac_cast<PTR_ComMethodTable>(*PTR_TADDR(pUnk) - sizeof(ComMethodTable));
+
+ // validate the object
+ _ASSERTE((SLOT)(size_t)0xDEADC0FF == pMT->m_ptReserved );
+
+ RETURN pMT;
+ }
+
+ ULONG GetNumSlots()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_cbSlots;
+ }
+
+ PTR_MethodTable GetMethodTable()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pMT;
+ }
+
+
+ inline REFIID GetIID()
+ {
+ // Cannot use a normal CONTRACT since the return type is ref type which
+ // causes problems with normal CONTRACTs.
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Generate the IClassX IID if it hasn't been generated yet.
+ if (!(m_Flags & enum_GuidGenerated))
+ {
+ GenerateClassItfGuid(TypeHandle(m_pMT), &m_IID);
+ m_Flags |= enum_GuidGenerated;
+ }
+
+ return m_IID;
+ }
+
+ void CheckParentComVisibility(BOOL fForIDispatch)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ ((ComCallWrapperTemplate*)m_pMT->GetComCallWrapperTemplate())->CheckParentComVisibility(fForIDispatch);
+ }
+
+ BOOL CheckParentComVisibilityNoThrow(BOOL fForIDispatch)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return ((ComCallWrapperTemplate*)m_pMT->GetComCallWrapperTemplate())->CheckParentComVisibilityNoThrow(fForIDispatch);
+ }
+
+private:
+ // Helper methods.
+ BOOL CheckSigTypesCanBeLoaded(MethodTable *pItfClass);
+
+ SLOT m_ptReserved; //= (SLOT) 0xDEADC0FF; reserved
+ PTR_MethodTable m_pMT; // pointer to the VMs method table
+ ULONG m_cbSlots; // number of slots in the interface (excluding IUnk/IDisp)
+ LONG m_cbRefCount; // ref-count the vtable as it is being shared
+ size_t m_Flags; // make sure this is initialized to zero
+ LPVOID m_pMDescr; // pointer to methoddescr.s owned by this MT
+ ITypeInfo* m_pITypeInfo; // cached pointer to ITypeInfo
+ DispatchInfo* m_pDispatchInfo; // The dispatch info used to expose IDispatch to COM.
+ IID m_IID; // The IID of the interface.
+};
+
+#pragma pack(pop)
+
+
+struct GetComIPFromCCW
+{
+ enum flags
+ {
+ None = 0,
+ CheckVisibility = 1,
+ SuppressSecurityCheck = 2,
+ SuppressCustomizedQueryInterface = 4
+ };
+};
+
+inline GetComIPFromCCW::flags operator|(GetComIPFromCCW::flags lhs, GetComIPFromCCW::flags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ return static_cast<GetComIPFromCCW::flags>(static_cast<DWORD>(lhs) | static_cast<DWORD>(rhs));
+}
+inline GetComIPFromCCW::flags operator|=(GetComIPFromCCW::flags & lhs, GetComIPFromCCW::flags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ lhs = static_cast<GetComIPFromCCW::flags>(static_cast<DWORD>(lhs) | static_cast<DWORD>(rhs));
+ return lhs;
+}
+
+class ComCallWrapper
+{
+ friend class MarshalNative;
+ friend class ClrDataAccess;
+
+private:
+ enum
+ {
+#ifdef _WIN64
+ NumVtablePtrs = 5,
+ enum_ThisMask = ~0x3f, // mask on IUnknown ** to get at the OBJECT-REF handle
+#else
+
+ NumVtablePtrs = 5,
+ enum_ThisMask = ~0x1f, // mask on IUnknown ** to get at the OBJECT-REF handle
+#endif
+ Slot_IClassX = 1,
+ Slot_Basic = 0,
+
+ Slot_FirstInterface = 2,
+ };
+
+public:
+ ADID GetDomainID();
+
+ // The first overload respects the is-agile flag and context, the other two respect the flag but
+ // ignore the context (this is mostly for back compat reasons, new code should call the first overload).
+ BOOL NeedToSwitchDomains(Thread *pThread, ADID *pTargetADID, Context **ppTargetContext);
+ BOOL NeedToSwitchDomains(Thread *pThread);
+ BOOL NeedToSwitchDomains(ADID appdomainID);
+
+ void MakeAgile(OBJECTREF pObj);
+ void CheckMakeAgile(OBJECTREF pObj);
+
+ VOID ResetHandleStrength();
+ VOID MarkHandleWeak();
+
+ BOOL IsHandleWeak();
+
+ OBJECTHANDLE GetObjectHandle();
+ OBJECTHANDLE GetRawObjectHandle() { LIMITED_METHOD_CONTRACT; return m_ppThis; } // no NULL check
+
+protected:
+ // don't instantiate this class directly
+ ComCallWrapper()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+ ~ComCallWrapper()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ void Init();
+
+#ifndef DACCESS_COMPILE
+ inline static void SetNext(ComCallWrapper* pWrap, ComCallWrapper* pNextWrapper)
+ {
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pWrap));
+ PRECONDITION(CheckPointer(pNextWrapper, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ pWrap->m_pNext = pNextWrapper;
+ }
+#endif // !DACCESS_COMPILE
+
+ inline static PTR_ComCallWrapper GetNext(PTR_ComCallWrapper pWrap)
+ {
+ CONTRACT (PTR_ComCallWrapper)
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ SUPPORTS_DAC;
+ PRECONDITION(CheckPointer(pWrap));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ RETURN (LinkedWrapperTerminator == pWrap->m_pNext ? NULL : pWrap->m_pNext);
+ }
+
+ // Helper to perform a security check for passing out CCWs late-bound to scripting code.
+ void DoScriptingSecurityCheck();
+
+ // Helper to create a wrapper, pClassCCW must be specified if pTemplate->RepresentsVariantInterface()
+ static ComCallWrapper* CreateWrapper(OBJECTREF* pObj, ComCallWrapperTemplate *pTemplate, ComCallWrapper *pClassCCW);
+
+ // helper to get the IUnknown* within a wrapper
+ static SLOT** GetComIPLocInWrapper(ComCallWrapper* pWrap, unsigned iIndex);
+
+ // helper to get index within the interface map for an interface that matches
+ // the interface MT
+ static signed GetIndexForIntfMT(ComCallWrapperTemplate *pTemplate, MethodTable *pIntfMT);
+
+ // helper to get wrapper from sync block
+ static PTR_ComCallWrapper GetStartWrapper(PTR_ComCallWrapper pWrap);
+
+ // helper to create a wrapper from a template
+ static ComCallWrapper* CopyFromTemplate(ComCallWrapperTemplate* pTemplate,
+ ComCallWrapperCache *pWrapperCache,
+ OBJECTHANDLE oh);
+
+ // helper to find a covariant supertype of pMT with the given IID
+ static MethodTable *FindCovariantSubtype(MethodTable *pMT, REFIID riid);
+
+ // Like GetComIPFromCCW, but will try to find riid/pIntfMT among interfaces implemented by this
+ // object that have variance. Assumes that call GetComIPFromCCW with same arguments has failed.
+ IUnknown *GetComIPFromCCWUsingVariance(REFIID riid, MethodTable *pIntfMT, GetComIPFromCCW::flags flags);
+
+ static IUnknown * GetComIPFromCCW_VariantInterface(
+ ComCallWrapper * pWrap, REFIID riid, MethodTable * pIntfMT, GetComIPFromCCW::flags flags,
+ ComCallWrapperTemplate * pTemplate);
+
+ inline static IUnknown * GetComIPFromCCW_VisibilityCheck(
+ IUnknown * pIntf, MethodTable * pIntfMT, ComMethodTable * pIntfComMT,
+ GetComIPFromCCW::flags flags);
+
+ static IUnknown * GetComIPFromCCW_HandleExtendsCOMObject(
+ ComCallWrapper * pWrap, REFIID riid, MethodTable * pIntfMT,
+ ComCallWrapperTemplate * pTemplate, signed imapIndex, unsigned intfIndex);
+
+ static IUnknown * GetComIPFromCCW_ForIID_Worker(
+ ComCallWrapper * pWrap, REFIID riid, MethodTable * pIntfMT, GetComIPFromCCW::flags flags,
+ ComCallWrapperTemplate * pTemplate);
+
+ static IUnknown * GetComIPFromCCW_ForIntfMT_Worker(
+ ComCallWrapper * pWrap, MethodTable * pIntfMT, GetComIPFromCCW::flags flags);
+
+
+public:
+ static bool GetComIPFromCCW_HandleCustomQI(
+ ComCallWrapper * pWrap, REFIID riid, MethodTable * pIntfMT, IUnknown ** ppUnkOut);
+
+ // walk the list and free all blocks
+ void FreeWrapper(ComCallWrapperCache *pWrapperCache);
+
+ BOOL IsWrapperActive();
+
+ // IsLinkedWrapper
+ inline BOOL IsLinked()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INSTANCE_CHECK;
+ }
+ CONTRACTL_END;
+
+ return m_pNext != NULL;
+ }
+
+
+ // wrapper is not guaranteed to be present
+ // accessor to wrapper object in the sync block
+ inline static PTR_ComCallWrapper GetWrapperForObject(OBJECTREF pObj, ComCallWrapperTemplate *pTemplate = NULL)
+ {
+ CONTRACT (PTR_ComCallWrapper)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SUPPORTS_DAC;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ PTR_SyncBlock pSync = pObj->PassiveGetSyncBlock();
+ if (!pSync)
+ RETURN NULL;
+
+ PTR_InteropSyncBlockInfo pInteropInfo = pSync->GetInteropInfoNoCreate();
+ if (!pInteropInfo)
+ RETURN NULL;
+
+ PTR_ComCallWrapper pCCW = pInteropInfo->GetCCW();
+
+ if (pTemplate != NULL)
+ {
+ // make sure we use the right CCW - the object may have multiple CCWs associated
+ // with it which were created based on different CCW templates
+ while (pCCW != NULL && pCCW->GetComCallWrapperTemplate() != pTemplate)
+ {
+ pCCW = GetNext(pCCW);
+ }
+ }
+
+ RETURN pCCW;
+ }
+
+ // get inner unknown
+ HRESULT GetInnerUnknown(void **pv);
+
+ // Init outer unknown
+ void InitializeOuter(IUnknown* pOuter);
+
+ // is the object aggregated by a COM component
+ BOOL IsAggregated();
+
+ // is the object a transparent proxy
+ BOOL IsObjectTP();
+
+ // is the object extends from (aggregates) a COM component
+ BOOL IsExtendsCOMObject();
+
+ // get syncblock stored in the simplewrapper
+ SyncBlock* GetSyncBlock();
+
+ // get the CCW template this wrapper is based on
+ PTR_ComCallWrapperTemplate GetComCallWrapperTemplate();
+
+ // get outer unk
+ IUnknown* GetOuter();
+
+ // Get IClassX interface pointer from the wrapper.
+ // The inspectionOnly parameter should only be true to this function if you are
+ // only passively inspecting the value and not using the interface (such as
+ // passing out the pointer via ETW or in the dac).
+ IUnknown* GetIClassXIP(bool inspectionOnly=false);
+
+ // Get the basic interface pointer from the wrapper.
+ // The inspectionOnly parameter should only be true to this function if you are
+ // only passively inspecting the value and not using the interface (such as
+ // passing out the pointer via ETW or in the dac).
+ IUnknown* GetBasicIP(bool inspectionOnly=false);
+
+ // Get the IDispatch interface pointer from the wrapper.
+ IDispatch *GetIDispatchIP();
+
+ // Get ObjectRef from wrapper - this is called by GetObjectRef and GetStartWrapper.
+ // Need this becuase GetDomainSynchronized will call GetStartWrapper which will call
+ // GetObjectRef which will cause a little bit of nasty infinite recursion.
+ inline OBJECTREF GetObjectRef()
+ {
+ CONTRACT (OBJECTREF)
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(m_ppThis));
+ SO_TOLERANT;
+ }
+ CONTRACT_END;
+
+ if (m_ppThis == NULL)
+ {
+ // Force a fail fast if this CCW is already neutered
+ AccessNeuteredCCW_FailFast();
+ }
+
+ RETURN ObjectFromHandle(m_ppThis);
+ }
+
+ //
+ // Force a fail fast for better diagnostics
+ // Don't inline so that this call would show up in the callstack
+ //
+ NOINLINE void AccessNeuteredCCW_FailFast()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_ACCESSING_CCW);
+ }
+
+ // A MODE_ANY helper to get the MethodTable of the 'this' object. This helper keeps
+ // the GCX_COOP transition out of the caller (it implies a holder which implies an
+ // FS:0 handler on x86).
+ MethodTable* GetMethodTableOfObjectRef();
+
+ // clean up an object wrapper
+ void Cleanup();
+
+ // If the object gets collected while the CCW is still active, neuter (disconnect) the CCW.
+ void Neuter();
+ void ClearHandle();
+
+ // fast access to wrapper for a com+ object,
+ // inline check, and call out of line to create, out of line version might cause gc
+ //to be enabled
+ static ComCallWrapper* __stdcall InlineGetWrapper(OBJECTREF* pObj, ComCallWrapperTemplate *pTemplate = NULL,
+ ComCallWrapper *pClassCCW = NULL);
+
+ // Get RefCount
+ inline ULONG GetRefCount();
+
+ // AddRef a wrapper
+ inline ULONG AddRef();
+
+ // AddRef a wrapper
+ inline ULONG AddRefWithAggregationCheck();
+
+ // Release for a Wrapper object
+ inline ULONG Release();
+
+ // Get Jupiter RefCount
+ inline ULONG GetJupiterRefCount();
+
+ // AddRef Jupiter Ref Count
+ // Jupiter Ref count becomes strong ref if pegged, otherwise weak ref
+ inline ULONG AddJupiterRef();
+
+ // Release Jupiter Ref Count
+ // Jupiter Ref count becomes strong ref if pegged, otherwise weak ref
+ inline ULONG ReleaseJupiterRef();
+
+ // Return whether this CCW is pegged or not by Jupiter
+ inline BOOL IsPegged();
+
+ // Return whether this CCW is pegged or not (either by Jupiter, or globally)
+ // We globally peg every Jupiter CCW outside Gen 2 GCs
+ inline BOOL IsConsideredPegged();
+
+ // Initialize the simple wrapper.
+ static void InitSimpleWrapper(ComCallWrapper* pWrap, SimpleComCallWrapper* pSimpleWrap);
+
+ // Clear the simple wrapper. This must be called on the start wrapper.
+ static void ClearSimpleWrapper(ComCallWrapper* pWrap);
+
+ //Get Simple wrapper, for std interfaces such as IProvideClassInfo
+ PTR_SimpleComCallWrapper GetSimpleWrapper()
+ {
+ CONTRACT (PTR_SimpleComCallWrapper)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INSTANCE_CHECK;
+ POSTCONDITION(CheckPointer(RETVAL));
+ SUPPORTS_DAC;
+ SO_TOLERANT;
+ }
+ CONTRACT_END;
+
+ RETURN m_pSimpleWrapper;
+ }
+
+
+ // Get wrapper from IP, for std. interfaces like IDispatch
+ inline static PTR_ComCallWrapper GetWrapperFromIP(PTR_IUnknown pUnk);
+
+#if !defined(DACCESS_COMPILE)
+ inline static ComCallWrapper* GetStartWrapperFromIP(IUnknown* pUnk)
+ {
+ CONTRACT (ComCallWrapper*)
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pUnk));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ ComCallWrapper* pWrap = GetWrapperFromIP(pUnk);
+ if (pWrap->IsLinked())
+ pWrap = GetStartWrapper(pWrap);
+
+ RETURN pWrap;
+ }
+#endif // DACCESS_COMPILE
+
+ // Get an interface from wrapper, based on riid or pIntfMT
+ static IUnknown* GetComIPFromCCW(ComCallWrapper *pWrap, REFIID riid, MethodTable* pIntfMT, GetComIPFromCCW::flags flags = GetComIPFromCCW::None);
+ static IUnknown* GetComIPFromCCWNoThrow(ComCallWrapper *pWrap, REFIID riid, MethodTable* pIntfMT, GetComIPFromCCW::flags flags = GetComIPFromCCW::None);
+
+
+private:
+ // pointer to OBJECTREF
+ OBJECTHANDLE m_ppThis;
+
+ // Pointer to the simple wrapper.
+ PTR_SimpleComCallWrapper m_pSimpleWrapper;
+
+ // Block of vtable pointers.
+ SLOT* m_rgpIPtr[NumVtablePtrs];
+
+ // Pointer to the next wrapper.
+ PTR_ComCallWrapper m_pNext;
+};
+
+FORCEINLINE void CCWRelease(ComCallWrapper* p)
+{
+ WRAPPER_NO_CONTRACT;
+
+ p->Release();
+}
+
+class CCWHolder : public Wrapper<ComCallWrapper*, CCWHolderDoNothing, CCWRelease, NULL>
+{
+public:
+ CCWHolder(ComCallWrapper* p = NULL)
+ : Wrapper<ComCallWrapper*, CCWHolderDoNothing, CCWRelease, NULL>(p)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ FORCEINLINE void operator=(ComCallWrapper* p)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ Wrapper<ComCallWrapper*, CCWHolderDoNothing, CCWRelease, NULL>::operator=(p);
+ }
+};
+
+typedef DPTR(class WeakReferenceImpl) PTR_WeakReferenceImpl;
+
+//
+// Represents a domain-bound weak reference to the object (not the CCW)
+//
+class WeakReferenceImpl : public IUnknownCommon<IWeakReference>
+{
+private:
+ ADID m_adid; // AppDomain ID of where this weak reference is created
+ Context *m_pContext; // Saved context
+ OBJECTHANDLE m_ppObject; // Short weak global handle points back to the object,
+ // created in domain ID = m_adid
+
+public:
+ WeakReferenceImpl(SimpleComCallWrapper *pSimpleWrapper, Thread *pCurrentThread);
+ virtual ~WeakReferenceImpl();
+
+ // IWeakReference methods
+ virtual HRESULT STDMETHODCALLTYPE Resolve(REFIID riid, IInspectable **ppvObject);
+
+private :
+ static void Resolve_Callback(LPVOID lpData);
+ static void Resolve_Callback_SwitchToPreemp(LPVOID lpData);
+
+ HRESULT ResolveInternal(Thread *pThread, REFIID riid, IInspectable **ppvObject);
+
+ HRESULT Cleanup();
+};
+
+//
+// Uncommonly used data on Simple CCW
+// Created on-demand
+//
+// We used to have two fields now it only has one field, and I'm keeping this structure
+// just in case we want to put more stuff in here later
+//
+struct SimpleCCWAuxData
+{
+ VolatilePtr<DispatchExInfo> m_pDispatchExInfo; // Information required by the IDispatchEx standard interface
+ // Not available on WinRT types
+
+ SimpleCCWAuxData()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_pDispatchExInfo = NULL;
+ }
+
+ ~SimpleCCWAuxData()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_pDispatchExInfo)
+ {
+ delete m_pDispatchExInfo;
+ m_pDispatchExInfo = NULL;
+ }
+ }
+};
+
+//--------------------------------------------------------------------------------
+// simple ComCallWrapper for all simple std interfaces, that are not used very often
+// like IProvideClassInfo, ISupportsErrorInfo etc.
+//--------------------------------------------------------------------------------
+struct SimpleComCallWrapper
+{
+private:
+ friend class ComCallWrapper;
+ friend class ClrDataAccess;
+ friend class WeakReferenceImpl;
+
+ enum SimpleComCallWrapperFlags
+ {
+ enum_IsAggregated = 0x1,
+ enum_IsExtendsCom = 0x2,
+ enum_IsHandleWeak = 0x4,
+ enum_IsObjectTP = 0x8,
+ enum_IsAgile = 0x10,
+ enum_IsPegged = 0x80,
+ enum_HasOverlappedRef = 0x100,
+ enum_CustomQIRespondsToIMarshal = 0x200,
+ enum_CustomQIRespondsToIMarshal_Inited = 0x400,
+ };
+
+public :
+ enum : LONGLONG
+ {
+ CLEANUP_SENTINEL = 0x0000000080000000, // Sentinel -> 1 bit
+ COM_REFCOUNT_MASK = 0x000000007FFFFFFF, // COM -> 31 bits
+ JUPITER_REFCOUNT_MASK = 0xFFFFFFFF00000000, // Jupiter -> 32 bits
+ JUPITER_REFCOUNT_SHIFT = 32,
+ JUPITER_REFCOUNT_INC = 0x0000000100000000,
+ EXT_COM_REFCOUNT_MASK = 0x00000000FFFFFFFF, // For back-compat, preserve the higher-bit so that outside can observe it
+ ALL_REFCOUNT_MASK = 0xFFFFFFFF7FFFFFFF,
+ };
+
+ #define GET_JUPITER_REF(x) ((ULONG)(((x) & SimpleComCallWrapper::JUPITER_REFCOUNT_MASK) >> SimpleComCallWrapper::JUPITER_REFCOUNT_SHIFT))
+ #define GET_COM_REF(x) ((ULONG)((x) & SimpleComCallWrapper::COM_REFCOUNT_MASK))
+ #define GET_EXT_COM_REF(x) ((ULONG)((x) & SimpleComCallWrapper::EXT_COM_REFCOUNT_MASK))
+
+#ifdef _WIN64
+ #define READ_REF(x) (x)
+#else
+ #define READ_REF(x) (::InterlockedCompareExchange64((LONGLONG *)&x, 0, 0))
+#endif
+
+public:
+ HRESULT IErrorInfo_hr();
+ BSTR IErrorInfo_bstrDescription();
+ BSTR IErrorInfo_bstrSource();
+ BSTR IErrorInfo_bstrHelpFile();
+ DWORD IErrorInfo_dwHelpContext();
+ GUID IErrorInfo_guid();
+
+ // non virtual methods
+ SimpleComCallWrapper();
+
+ VOID Cleanup();
+
+ // Used to neuter a CCW if its AD is being unloaded underneath it.
+ VOID Neuter(bool fSkipHandleCleanup = false);
+
+ ~SimpleComCallWrapper();
+
+
+ VOID ResetSyncBlock()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pSyncBlock = NULL;
+ }
+
+ SyncBlock* GetSyncBlock()
+ {
+ CONTRACT (SyncBlock*)
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+ RETURN m_pSyncBlock;
+ }
+
+ // Init pointer to the vtable of the interface
+ // and the main ComCallWrapper if the interface needs it
+ void InitNew(OBJECTREF oref, ComCallWrapperCache *pWrapperCache, ComCallWrapper* pWrap,
+ ComCallWrapper *pClassWrap, Context* pContext, SyncBlock* pSyncBlock,
+ ComCallWrapperTemplate* pTemplate);
+
+ // used by reconnect wrapper to new object
+ void ReInit(SyncBlock* pSyncBlock);
+
+ void InitOuter(IUnknown* pOuter);
+
+ void ResetOuter();
+
+ IUnknown* GetOuter();
+
+ // get inner unknown
+ HRESULT GetInnerUnknown(void **ppv)
+ {
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ PRECONDITION(CheckPointer(ppv));
+ }
+ CONTRACTL_END;
+
+ *ppv = QIStandardInterface(enum_InnerUnknown);
+ if (*ppv)
+ return S_OK;
+ else
+ return E_NOINTERFACE;
+ }
+
+ OBJECTREF GetObjectRef()
+ {
+ CONTRACT (OBJECTREF)
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACT_END;
+
+ RETURN (GetMainWrapper()->GetObjectRef());
+ }
+
+ ComCallWrapperCache* GetWrapperCache()
+ {
+ CONTRACT (ComCallWrapperCache*)
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN m_pWrapperCache;
+ }
+
+ // Connection point helper methods.
+ BOOL FindConnectionPoint(REFIID riid, IConnectionPoint **ppCP);
+ void EnumConnectionPoints(IEnumConnectionPoints **ppEnumCP);
+
+ ADID GetDomainID()
+ {
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (IsAgile())
+ return GetThread()->GetDomain()->GetId();
+
+ return m_dwDomainId;
+ }
+
+ ADID GetRawDomainID()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_dwDomainId;
+ }
+
+#ifndef CROSSGEN_COMPILE
+ inline BOOL NeedToSwitchDomains(Thread *pThread, ADID *pTargetADID, Context **ppTargetContext)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ if (IsAgile())
+ return FALSE;
+
+ if (m_dwDomainId == pThread->GetDomain()->GetId() && m_pContext == pThread->GetContext())
+ return FALSE;
+
+ // we intentionally don't provide any other way to read m_pContext so the caller always
+ // gets ADID & Context that are guaranteed to be in sync (note that GetDomainID() lies
+ // if the CCW is agile and using it together with m_pContext leads to issues)
+ *pTargetADID = m_dwDomainId;
+ *ppTargetContext = m_pContext;
+
+ return TRUE;
+ }
+
+ // if you call this you must either pass TRUE for throwIfUnloaded or check
+ // after the result before accessing any pointers that may be invalid.
+ inline BOOL NeedToSwitchDomains(ADID appdomainID)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ // Check for a direct domain ID match first -- this is more common than agile wrappers.
+ return (m_dwDomainId != appdomainID) && !IsAgile();
+ }
+#endif //CROSSGEN_COMPILE
+
+ BOOL ShouldBeAgile()
+ {
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return (!IsAgile() && GetThread()->GetDomain()->GetId()!= m_dwDomainId);
+ }
+
+ void MakeAgile(OBJECTHANDLE origHandle)
+ {
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ m_hOrigDomainHandle = origHandle;
+ FastInterlockOr((ULONG*)&m_flags, enum_IsAgile);
+ }
+
+ BOOL IsAgile()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_flags & enum_IsAgile;
+ }
+
+ BOOL IsObjectTP()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_flags & enum_IsObjectTP;
+ }
+
+ // is the object aggregated by a COM component
+ BOOL IsAggregated()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_flags & enum_IsAggregated;
+ }
+
+ void MarkAggregated()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ FastInterlockOr((ULONG*)&m_flags, enum_IsAggregated);
+ }
+
+ void UnMarkAggregated()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ FastInterlockAnd((ULONG*)&m_flags, ~enum_IsAggregated);
+ }
+
+ BOOL IsHandleWeak()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_flags & enum_IsHandleWeak;
+ }
+
+ void MarkHandleWeak()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ FastInterlockOr((ULONG*)&m_flags, enum_IsHandleWeak);
+ }
+
+ VOID ResetHandleStrength()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ FastInterlockAnd((ULONG*)&m_flags, ~enum_IsHandleWeak);
+ }
+
+ // is the object extends from (aggregates) a COM component
+ BOOL IsExtendsCOMObject()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_flags & enum_IsExtendsCom;
+ }
+
+ inline BOOL IsPegged()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return m_flags & enum_IsPegged;
+ }
+
+ inline void MarkPegged()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ FastInterlockOr((ULONG*)&m_flags, enum_IsPegged);
+ }
+
+ inline void UnMarkPegged()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ FastInterlockAnd((ULONG*)&m_flags, ~enum_IsPegged);
+ }
+
+ inline BOOL HasOverlappedRef()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return m_flags & enum_HasOverlappedRef;
+ }
+
+ // Used for the creation and deletion of simple wrappers
+ static SimpleComCallWrapper* CreateSimpleWrapper();
+
+ // Determines if the type associated with the ComCallWrapper supports exceptions.
+ static BOOL SupportsExceptions(MethodTable *pMT);
+ static BOOL SupportsIStringable(MethodTable *pMT);
+
+ // Determines if the type supports IReflect / IExpando.
+ static BOOL SupportsIReflect(MethodTable *pMT);
+ static BOOL SupportsIExpando(MethodTable *pMT);
+
+ NOINLINE BOOL ShouldUseManagedIProvideClassInfo();
+
+ //--------------------------------------------------------------------------
+ // Retrieves the simple wrapper from an IUnknown pointer that is for one
+ // of the interfaces exposed by the simple wrapper.
+ //--------------------------------------------------------------------------
+ static PTR_SimpleComCallWrapper GetWrapperFromIP(PTR_IUnknown pUnk)
+ {
+ CONTRACT (SimpleComCallWrapper*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pUnk));
+ POSTCONDITION(CheckPointer(RETVAL));
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ int i = GetStdInterfaceKind(pUnk);
+ PTR_SimpleComCallWrapper pSimpleWrapper = dac_cast<PTR_SimpleComCallWrapper>(dac_cast<TADDR>(pUnk) - sizeof(LPBYTE) * i - offsetof(SimpleComCallWrapper,m_rgpVtable));
+
+ // We should never getting back a built-in interface from a SimpleCCW that represents a variant interface
+ _ASSERTE(pSimpleWrapper->m_pClassWrap == NULL);
+
+ RETURN pSimpleWrapper;
+ }
+
+ // get the main wrapper
+ PTR_ComCallWrapper GetMainWrapper()
+ {
+ CONTRACT (PTR_ComCallWrapper)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ INSTANCE_CHECK;
+ POSTCONDITION(CheckPointer(RETVAL));
+ SO_TOLERANT;
+ }
+ CONTRACT_END;
+
+ RETURN m_pWrap;
+ }
+
+ inline PTR_ComCallWrapper GetClassWrapper()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(m_pMT->IsInterface());
+ _ASSERTE(m_pClassWrap != NULL);
+
+ return m_pClassWrap;
+ }
+
+ inline ULONG GetRefCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return GET_COM_REF(READ_REF(m_llRefCount));
+ }
+
+ // Returns the unmarked raw ref count
+ // Make sure we always make a copy of the value instead of inlining
+ NOINLINE LONGLONG GetRealRefCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return READ_REF(m_llRefCount);
+ }
+
+ inline BOOL IsNeutered()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return !!(READ_REF(m_llRefCount) & CLEANUP_SENTINEL);
+ }
+
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+ // CCW refcount logging consists of two steps. BuildRefCountLogMessage is an instance method which
+ // must be called at a point where the CCW is guaranteed to be alive. LogRefCount is static because
+ // we generally don't know the new refcount (the one we want to log) until the CCW is at risk of
+ // having been destroyed by other threads.
+ void BuildRefCountLogMessage(LPCWSTR wszOperation, StackSString &ssMessage, ULONG dwEstimatedRefCount);
+ static void LogRefCount(ComCallWrapper *pWrap, StackSString &ssMessage, ULONG dwRefCountToLog);
+
+ NOINLINE HRESULT LogCCWAddRef(ULONG newRefCount)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ SetupForComCallHRNoHostNotifNoCheckCanRunManagedCode();
+
+ // we can safely assume that the CCW is still alive since this is an AddRef
+ StackSString ssMessage;
+ BuildRefCountLogMessage(W("AddRef"), ssMessage, newRefCount);
+ LogRefCount(GetMainWrapper(), ssMessage, newRefCount);
+
+ return S_OK;
+ }
+
+ inline ULONG AddRef()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (m_pClassWrap)
+ {
+ // Forward to the real wrapper if this CCW represents a variant interface
+ return m_pClassWrap->GetSimpleWrapper()->AddRef();
+ }
+
+ LONGLONG newRefCount = ::InterlockedIncrement64(&m_llRefCount);
+ if (g_pConfig->LogCCWRefCountChangeEnabled())
+ {
+ LogCCWAddRef(GET_EXT_COM_REF(newRefCount));
+ }
+ return GET_EXT_COM_REF(newRefCount);
+ }
+
+ inline ULONG AddRefWithAggregationCheck()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // aggregation check
+ IUnknown* pOuter = this->GetOuter();
+ if (pOuter != NULL)
+ return SafeAddRef(pOuter);
+
+ return this->AddRef();
+ }
+
+private:
+ LONGLONG ReleaseImplWithLogging(LONGLONG * pRefCount);
+
+ NOINLINE void ReleaseImplCleanup()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (!CanRunManagedCode())
+ return;
+ SO_INTOLERANT_CODE_NOTHROW(GetThread(), return; );
+ ReverseEnterRuntimeHolderNoThrow REHolder;
+ if (CLRTaskHosted())
+ {
+ HRESULT hr = REHolder.AcquireNoThrow();
+ if (FAILED(hr))
+ return;
+ }
+
+ m_pWrap->Cleanup();
+ }
+public:
+
+ inline ULONG Release()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (m_pClassWrap)
+ {
+ // Forward to the real wrapper if this CCW represents a variant interface
+ return m_pClassWrap->GetSimpleWrapper()->Release();
+ }
+
+ LONGLONG *pRefCount = &m_llRefCount;
+ ULONG ulComRef = GET_COM_REF(READ_REF(*pRefCount));
+
+ if (ulComRef <= 0)
+ {
+ _ASSERTE(!"Invalid Release() call on already released object. A managed object exposed to COM is being over-released from unmanaged code");
+ return -1;
+ }
+
+ // Null the outer pointer if refcount is about to drop to 0. We cannot perform this
+ // operation after decrementing the refcount as that would race with the finalizer
+ // that may clean this CCW up any time after the refcount drops to 0. With this pre-
+ // decrement reset, we are racing with other Release's and may call ResetOuter multiple
+ // times (which is fine - it's thread safe and idempotent) or call it when the refcount
+ // doesn't really drop to 0 (which is also fine - it would have dropped to 0 under
+ // slightly different timing and the COM client is responsible for preventing this).
+ if (ulComRef == 1)
+ ResetOuter();
+
+ LONGLONG newRefCount;
+ if (g_pConfig->LogCCWRefCountChangeEnabled())
+ {
+ newRefCount = ReleaseImplWithLogging(pRefCount);
+ }
+ else
+ {
+ // Decrement the ref count
+ newRefCount = ::InterlockedDecrement64(pRefCount);
+ }
+
+ // IMPORTANT: Do not touch instance fields or any other data associated with the CCW beyond this
+ // point unless newRefCount equals CLEANUP_SENTINEL (it's the only case when we know that Neuter
+ // or another Release could not swoop in and destroy our data structures).
+
+ // If we hit the sentinel value in COM ref count and jupiter ref count == 0, it's our responsibility to clean up.
+ if (newRefCount == CLEANUP_SENTINEL)
+ {
+ ReleaseImplCleanup();
+ return 0;
+ }
+
+ return GET_EXT_COM_REF(newRefCount);
+ }
+
+ inline ULONG AddJupiterRef()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ LONGLONG llOldRefCount;
+ LONGLONG llNewRefCount;
+
+ do {
+ llOldRefCount = m_llRefCount;
+ llNewRefCount = llOldRefCount + JUPITER_REFCOUNT_INC;
+ } while (InterlockedCompareExchange64(&m_llRefCount, llNewRefCount, llOldRefCount) != llOldRefCount);
+
+ LOG((LF_INTEROP, LL_INFO1000,
+ "SimpleComCallWrapper::AddJupiterRef() called on SimpleComCallWrapper 0x%p, cbRef = 0x%x, cbJupiterRef = 0x%x\n", this, GET_COM_REF(llNewRefCount), GET_JUPITER_REF(llNewRefCount)));
+
+ return GET_JUPITER_REF(llNewRefCount);
+ }
+
+ inline ULONG ReleaseJupiterRef()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ LONGLONG llOldRefCount;
+ LONGLONG llNewRefCount;
+
+ do {
+ llOldRefCount = m_llRefCount;
+ llNewRefCount = llOldRefCount - JUPITER_REFCOUNT_INC;
+ } while (InterlockedCompareExchange64(&m_llRefCount, llNewRefCount, llOldRefCount) != llOldRefCount);
+
+ LOG((LF_INTEROP, LL_INFO1000,
+ "SimpleComCallWrapper::ReleaseJupiterRef() called on SimpleComCallWrapper 0x%p, cbRef = 0x%x, cbJupiterRef = 0x%x\n", this, GET_COM_REF(llNewRefCount), GET_JUPITER_REF(llNewRefCount)));
+
+ if (llNewRefCount == CLEANUP_SENTINEL)
+ {
+ // If we hit the sentinel value, it's our responsibility to clean up.
+ m_pWrap->Cleanup();
+ }
+
+ return GET_JUPITER_REF(llNewRefCount);
+ }
+
+#endif // !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+
+ inline ULONG GetJupiterRefCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return GET_JUPITER_REF(READ_REF(m_llRefCount));
+ }
+
+ MethodTable* GetMethodTable()
+ {
+ CONTRACT (MethodTable*)
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN m_pMT;
+ }
+
+ DispatchExInfo* GetDispatchExInfo()
+ {
+ CONTRACT (DispatchExInfo*)
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+ if (m_pAuxData.Load() == NULL)
+ RETURN NULL;
+ else
+ RETURN m_pAuxData->m_pDispatchExInfo;
+ }
+
+ BOOL SupportsICustomQueryInterface()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pTemplate->SupportsICustomQueryInterface();
+ }
+
+ PTR_ComCallWrapperTemplate GetComCallWrapperTemplate()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pTemplate;
+ }
+
+ // Creates new AddRef-ed IWeakReference*
+ IWeakReference *CreateWeakReference(Thread *pCurrentThread)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(pCurrentThread == GetThread());
+ }
+ CONTRACTL_END;
+
+ // Create a WeakReferenceImpl with RefCount = 1
+ // No need to call AddRef
+ WeakReferenceImpl *pWeakRef = new WeakReferenceImpl(this, pCurrentThread);
+
+ return pWeakRef;
+ }
+
+ void StoreOverlappedPointer(LPOVERLAPPED lpOverlapped)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ this->m_operlappedPtr = lpOverlapped;
+ MarkOverlappedRef();
+ }
+
+ // Returns TRUE if the ICustomQI implementation returns Handled or Failed for IID_IMarshal.
+ BOOL CustomQIRespondsToIMarshal();
+
+ SimpleCCWAuxData *GetOrCreateAuxData()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_pAuxData.Load())
+ return m_pAuxData;
+
+ NewHolder<SimpleCCWAuxData> pAuxData = new SimpleCCWAuxData();
+ if (InterlockedCompareExchangeT(&m_pAuxData, (SimpleCCWAuxData *)pAuxData, NULL) == NULL)
+ pAuxData.SuppressRelease();
+
+ return m_pAuxData;
+ }
+
+private:
+ // Methods to initialize the DispatchEx and exception info.
+ void InitExceptionInfo();
+ void InitDispatchExInfo();
+
+ // Methods to set up the connection point list.
+ void SetUpCPList();
+ void SetUpCPListHelper(MethodTable **apSrcItfMTs, int cSrcItfs);
+ ConnectionPoint *CreateConnectionPoint(ComCallWrapper *pWrap, MethodTable *pEventMT);
+ ConnectionPoint *TryCreateConnectionPoint(ComCallWrapper *pWrap, MethodTable *pEventMT);
+ CQuickArray<ConnectionPoint*> *CreateCPArray();
+
+ // QI for well known interfaces from within the runtime direct fetch, instead of guid comparisons
+ IUnknown* QIStandardInterface(Enum_StdInterfaces index);
+
+ // QI for well known interfaces from within the runtime based on an IID.
+ IUnknown* QIStandardInterface(REFIID riid);
+
+ // These values are never used at the same time, so we can save a few bytes for each CCW by using a union.
+ // Use the inline methods HasOverlappedRef(), MarkOverlappedRef(), and UnMarkOverlappedRef() to differentiate
+ // how this union is to be interpreted.
+ union
+ {
+ CQuickArray<ConnectionPoint*>* m_pCPList;
+ LPOVERLAPPED m_operlappedPtr;
+ };
+
+ // syncblock for the ObjecRef
+ SyncBlock* m_pSyncBlock;
+
+ //outer unknown cookie
+ IUnknown* m_pOuter;
+
+ // pointer to an array of std. vtables
+ SLOT const* m_rgpVtable[enum_LastStdVtable];
+
+ PTR_ComCallWrapper m_pWrap; // the first ComCallWrapper associated with this SimpleComCallWrapper
+ PTR_ComCallWrapper m_pClassWrap; // the first ComCallWrapper associated with the class (only if m_pMT is an interface)
+ MethodTable* m_pMT;
+ Context* m_pContext;
+ ComCallWrapperCache* m_pWrapperCache;
+ PTR_ComCallWrapperTemplate m_pTemplate;
+
+ // when we make the object agile, need to save off the original handle so we can clean
+ // it up when the object goes away.
+ // <TODO>Would be nice to overload one of the other values with this, but then
+ // would have to synchronize on it too</TODO>
+ OBJECTHANDLE m_hOrigDomainHandle;
+
+ // Points to uncommonly used data that are dynamically allocated
+ VolatilePtr<SimpleCCWAuxData> m_pAuxData;
+
+ ADID m_dwDomainId;
+
+ DWORD m_flags;
+
+ // This maintains both COM ref and Jupiter ref in 64-bit
+ LONGLONG m_llRefCount;
+
+ inline void MarkOverlappedRef()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ FastInterlockOr((ULONG*)&m_flags, enum_HasOverlappedRef);
+ }
+
+ inline void UnMarkOverlappedRef()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ FastInterlockAnd((ULONG*)&m_flags, ~enum_HasOverlappedRef);
+ }
+};
+
+inline OBJECTHANDLE ComCallWrapper::GetObjectHandle()
+{
+ CONTRACT (OBJECTHANDLE)
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_COOPERATIVE;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN m_ppThis;
+}
+
+//--------------------------------------------------------------------------------
+// ComCallWrapper* ComCallWrapper::InlineGetWrapper(OBJECTREF* ppObj, ComCallWrapperTemplate *pTemplate)
+// returns the wrapper for the object, if not yet created, creates one
+// returns null for out of memory scenarios.
+// Note: the wrapper is returned AddRef'd and should be Released when finished
+// with.
+//--------------------------------------------------------------------------------
+inline ComCallWrapper* __stdcall ComCallWrapper::InlineGetWrapper(OBJECTREF* ppObj, ComCallWrapperTemplate *pTemplate /*= NULL*/,
+ ComCallWrapper *pClassCCW /*= NULL*/)
+{
+ CONTRACT (ComCallWrapper*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(ppObj));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ // get the wrapper for this com+ object
+ ComCallWrapper* pWrap = GetWrapperForObject(*ppObj, pTemplate);
+
+ if (NULL == pWrap)
+ {
+ pWrap = CreateWrapper(ppObj, pTemplate, pClassCCW);
+ }
+ _ASSERTE(pTemplate == NULL || pTemplate == pWrap->GetSimpleWrapper()->GetComCallWrapperTemplate());
+
+ // All threads will have the same resulting CCW at this point, and
+ // they should all check to see if the CCW they got back is
+ // appropriate for the current AD. If not, then we must mark the
+ // CCW as agile.
+ // If we are creating a CCW that represents a variant interface, use the pClassCCW (which is the main CCW)
+ ComCallWrapper *pMainWrap;
+ if (pClassCCW)
+ pMainWrap = pClassCCW;
+ else
+ pMainWrap = pWrap;
+
+ pMainWrap->CheckMakeAgile(*ppObj);
+
+ // If the object is agile, and this domain doesn't have UmgdCodePermission
+ // fail the call.
+ if (pMainWrap->GetSimpleWrapper()->IsAgile())
+ pMainWrap->DoScriptingSecurityCheck();
+
+ pWrap->AddRef();
+
+ RETURN pWrap;
+}
+
+#ifndef CROSSGEN_COMPILE
+
+inline BOOL ComCallWrapper::NeedToSwitchDomains(Thread *pThread, ADID *pTargetADID, Context **ppTargetContext)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return GetSimpleWrapper()->NeedToSwitchDomains(pThread, pTargetADID, ppTargetContext);
+}
+
+inline BOOL ComCallWrapper::NeedToSwitchDomains(Thread *pThread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ return NeedToSwitchDomains(pThread->GetDomain()->GetId());
+}
+
+
+inline BOOL ComCallWrapper::NeedToSwitchDomains(ADID appdomainID)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return GetSimpleWrapper()->NeedToSwitchDomains(appdomainID);
+}
+
+#endif // CROSSGEN_COMPILE
+
+inline ADID ComCallWrapper::GetDomainID()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return GetSimpleWrapper()->GetDomainID();
+}
+
+
+inline void ComCallWrapper::CheckMakeAgile(OBJECTREF pObj)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (GetSimpleWrapper()->ShouldBeAgile())
+ MakeAgile(pObj);
+}
+
+inline ULONG ComCallWrapper::GetRefCount()
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ INSTANCE_CHECK;
+ }
+ CONTRACTL_END;
+
+ return m_pSimpleWrapper->GetRefCount();
+}
+
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+inline ULONG ComCallWrapper::AddRef()
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ SO_TOLERANT;
+ INSTANCE_CHECK;
+ }
+ CONTRACTL_END;
+
+ return m_pSimpleWrapper->AddRef();
+}
+
+inline ULONG ComCallWrapper::AddRefWithAggregationCheck()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pSimpleWrapper->AddRefWithAggregationCheck();
+}
+
+inline ULONG ComCallWrapper::Release()
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ SO_TOLERANT;
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(m_pSimpleWrapper));
+ }
+ CONTRACTL_END;
+
+ return m_pSimpleWrapper->Release();
+}
+
+inline ULONG ComCallWrapper::AddJupiterRef()
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ INSTANCE_CHECK;
+ }
+ CONTRACTL_END;
+
+ return m_pSimpleWrapper->AddJupiterRef();
+}
+
+inline ULONG ComCallWrapper::ReleaseJupiterRef()
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(m_pSimpleWrapper));
+ }
+ CONTRACTL_END;
+
+ return m_pSimpleWrapper->ReleaseJupiterRef();
+}
+
+inline void ComCallWrapper::InitSimpleWrapper(ComCallWrapper* pWrap, SimpleComCallWrapper* pSimpleWrap)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pWrap));
+ PRECONDITION(CheckPointer(pSimpleWrap));
+ PRECONDITION(pSimpleWrap->GetMainWrapper() == pWrap);
+ }
+ CONTRACTL_END;
+
+ while (pWrap)
+ {
+ pWrap->m_pSimpleWrapper = pSimpleWrap;
+ pWrap = GetNext(pWrap);
+ }
+}
+
+inline void ComCallWrapper::ClearSimpleWrapper(ComCallWrapper* pWrap)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pWrap));
+ }
+ CONTRACTL_END;
+
+ // clear the m_pSimpleWrapper field in all wrappers that share the same SimpleComCallWrapper
+ SimpleComCallWrapper *pSimpleWrapper = pWrap->m_pSimpleWrapper;
+
+ while (pWrap && pWrap->m_pSimpleWrapper == pSimpleWrapper)
+ {
+ pWrap->m_pSimpleWrapper = NULL;
+ pWrap = GetNext(pWrap);
+ }
+}
+#endif // !DACCESS_COMPILE && !CROSSGEN_COMPILE
+
+inline BOOL ComCallWrapper::IsPegged()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INSTANCE_CHECK;
+ }
+ CONTRACTL_END;
+
+ return m_pSimpleWrapper->IsPegged();
+}
+
+inline BOOL ComCallWrapper::IsConsideredPegged()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return m_pSimpleWrapper->IsPegged() || RCWWalker::IsGlobalPeggingOn();
+}
+
+inline ULONG ComCallWrapper::GetJupiterRefCount()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INSTANCE_CHECK;
+ }
+ CONTRACTL_END;
+
+ return m_pSimpleWrapper->GetJupiterRefCount();
+}
+
+
+
+inline PTR_ComCallWrapper ComCallWrapper::GetWrapperFromIP(PTR_IUnknown pUnk)
+{
+ CONTRACT (PTR_ComCallWrapper)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pUnk));
+ POSTCONDITION(CheckPointer(RETVAL));
+ SUPPORTS_DAC;
+ SO_TOLERANT;
+ }
+ CONTRACT_END;
+
+ // This code path may be exercised from out-of-process. Unfortunately, we need to manipulate the
+ // target address here, and so we need to do some non-trivial casting. First, cast the PTR type
+ // to the target address first, and then mask off the least significant bits. Then use the end
+ // result as a target address to instantiate a ComCallWrapper. The line below is equivalent to:
+ // ComCallWrapper* pWrap = (ComCallWrapper*)((size_t)pUnk & enum_ThisMask);
+ PTR_ComCallWrapper pWrap = dac_cast<PTR_ComCallWrapper>(dac_cast<TADDR>(pUnk) & enum_ThisMask);
+
+ // Use class wrapper if this CCW represents a variant interface
+ PTR_ComCallWrapper pClassWrapper = pWrap->GetSimpleWrapper()->m_pClassWrap;
+ if (pClassWrapper)
+ {
+ _ASSERTE(pClassWrapper->GetSimpleWrapper()->m_pClassWrap == NULL);
+
+ RETURN pClassWrapper;
+ }
+
+ RETURN pWrap;
+}
+
+//--------------------------------------------------------------------------
+// PTR_ComCallWrapper ComCallWrapper::GetStartWrapper(PTR_ComCallWrapper pWrap)
+// get outermost wrapper, given a linked wrapper
+// get the start wrapper from the sync block
+//--------------------------------------------------------------------------
+inline PTR_ComCallWrapper ComCallWrapper::GetStartWrapper(PTR_ComCallWrapper pWrap)
+{
+ CONTRACT (PTR_ComCallWrapper)
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ PRECONDITION(CheckPointer(pWrap));
+ PRECONDITION(pWrap->IsLinked());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ PTR_SimpleComCallWrapper pSimpleWrap = pWrap->GetSimpleWrapper();
+ RETURN (pSimpleWrap->GetMainWrapper());
+}
+
+//--------------------------------------------------------------------------
+// PTR_ComCallWrapperTemplate ComCallWrapper::GetComCallWrapperTemplate()
+inline PTR_ComCallWrapperTemplate ComCallWrapper::GetComCallWrapperTemplate()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetSimpleWrapper()->GetComCallWrapperTemplate();
+}
+
+//--------------------------------------------------------------------------
+// BOOL ComCallWrapper::BOOL IsHandleWeak()
+// check if the wrapper has been deactivated
+// Moved here to make DAC build happy and hopefully get it inlined
+//--------------------------------------------------------------------------
+inline BOOL ComCallWrapper::IsHandleWeak()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SimpleComCallWrapper* pSimpleWrap = GetSimpleWrapper();
+ _ASSERTE(pSimpleWrap);
+
+ return pSimpleWrap->IsHandleWeak();
+}
+
+inline BOOL ComCallWrapper::IsWrapperActive()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // Since its called by GCPromote, we assume that this is the start wrapper
+
+ LONGLONG llRefCount = m_pSimpleWrapper->GetRealRefCount();
+ ULONG cbRef = GET_COM_REF(llRefCount);
+ ULONG cbJupiterRef = GET_JUPITER_REF(llRefCount);
+
+ // We only consider jupiter ref count to be a "strong" ref count if it is pegged and it is alive
+ // Note that there is no concern for resurrecting this CCW in the next Gen0/1 GC
+ // because this CCW will be promoted to Gen 2 very quickly
+ BOOL bHasJupiterStrongRefCount = (cbJupiterRef > 0 && IsConsideredPegged());
+
+ BOOL bHasStrongCOMRefCount = ((cbRef > 0) || bHasJupiterStrongRefCount);
+
+ BOOL bIsWrapperActive = (bHasStrongCOMRefCount && !IsHandleWeak());
+
+ LOG((LF_INTEROP, LL_INFO1000,
+ "CCW 0x%p: cbRef = 0x%x, cbJupiterRef = 0x%x, IsPegged = %d, GlobalPegging = %d, IsHandleWeak = %d\n",
+ this,
+ cbRef, cbJupiterRef, IsPegged(), RCWWalker::IsGlobalPeggingOn(), IsHandleWeak()));
+ LOG((LF_INTEROP, LL_INFO1000, "CCW 0x%p: IsWrapperActive returned %d\n", this, bIsWrapperActive));
+
+ return bIsWrapperActive;
+}
+
+
+#endif // FEATURE_COMINTEROP
+
+#endif // _COMCALLABLEWRAPPER_H
diff --git a/src/vm/comconnectionpoints.cpp b/src/vm/comconnectionpoints.cpp
new file mode 100644
index 0000000000..8de6a06415
--- /dev/null
+++ b/src/vm/comconnectionpoints.cpp
@@ -0,0 +1,1309 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: ComConnectionPoints.cpp
+//
+
+// ===========================================================================
+// Implementation of the classes used to expose connection points to COM.
+// ===========================================================================
+
+
+#include "common.h"
+
+#include "comconnectionpoints.h"
+#include "comcallablewrapper.h"
+
+//------------------------------------------------------------------------------------------
+// Implementation of helper class used to expose connection points
+//------------------------------------------------------------------------------------------
+
+void ConnectionPoint::Advise_Wrapper(LPVOID ptr)
+{
+ WRAPPER_NO_CONTRACT;
+
+ Advise_Args *pArgs = (Advise_Args *)ptr;
+ pArgs->pThis->AdviseWorker(pArgs->pUnk, pArgs->pdwCookie);
+}
+
+void ConnectionPoint::Unadvise_Wrapper(LPVOID ptr)
+{
+ WRAPPER_NO_CONTRACT;
+
+ Unadvise_Args *pArgs = (Unadvise_Args *)ptr;
+ pArgs->pThis->UnadviseWorker(pArgs->dwCookie);
+}
+
+void ConnectionPoint::GetConnectionPointContainer_Wrapper(LPVOID ptr)
+{
+ WRAPPER_NO_CONTRACT;
+
+ GetConnectionPointContainer_Args *pArgs = (GetConnectionPointContainer_Args *)ptr;
+ *pArgs->ppCPC = pArgs->pThis->GetConnectionPointContainerWorker();
+}
+
+ConnectionPoint::ConnectionPoint(ComCallWrapper *pWrap, MethodTable *pEventMT)
+: m_pOwnerWrap(pWrap)
+, m_pTCEProviderMT(pWrap->GetSimpleWrapper()->GetMethodTable())
+, m_pEventItfMT(pEventMT)
+, m_Lock(CrstInterop)
+, m_cbRefCount(0)
+, m_apEventMethods(NULL)
+, m_NumEventMethods(0)
+, m_pLastInserted(NULL)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pWrap));
+ PRECONDITION(CheckPointer(pEventMT));
+ }
+ CONTRACTL_END;
+
+ // Retrieve the connection IID.
+ pEventMT->GetGuid(&m_rConnectionIID, TRUE);
+
+ // Set up the event methods.
+ SetupEventMethods();
+}
+
+ConnectionPoint::~ConnectionPoint()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (m_apEventMethods)
+ delete []m_apEventMethods;
+}
+
+HRESULT __stdcall ConnectionPoint::QueryInterface(REFIID riid, void** ppv)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(ppv, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // Verify the arguments.
+ if (!ppv)
+ return E_POINTER;
+
+ // Initialize the out parameters.
+ *ppv = NULL;
+
+ SetupForComCallHR();
+
+ if (riid == IID_IConnectionPoint)
+ {
+ *ppv = static_cast<IConnectionPoint*>(this);
+ }
+ else if (riid == IID_IUnknown)
+ {
+ *ppv = static_cast<IUnknown*>(this);
+ }
+ else
+ {
+ return E_NOINTERFACE;
+ }
+
+ ULONG cbRef = SafeAddRefPreemp((IUnknown*)*ppv);
+ //@TODO(CLE) AddRef logging that doesn't use QI
+
+ return S_OK;
+}
+
+ULONG __stdcall ConnectionPoint::AddRef()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ SetupForComCallHR();
+
+ // The connection point objects share the CCW's ref count.
+ return m_pOwnerWrap->AddRef();
+}
+
+ULONG __stdcall ConnectionPoint::Release()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ SetupForComCallHR();
+
+ HRESULT hr = S_OK;
+ ULONG cbRef = -1;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ // The connection point objects share the CCW's ref count.
+ cbRef = m_pOwnerWrap->Release();
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return cbRef;
+}
+
+HRESULT __stdcall ConnectionPoint::GetConnectionInterface(IID *pIID)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pIID, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // Verify the arguments.
+ if (!pIID)
+ return E_POINTER;
+
+ // Initialize the out parameters.
+ *pIID = GUID_NULL;
+
+ SetupForComCallHR();
+
+ *pIID = m_rConnectionIID;
+ return S_OK;
+}
+
+HRESULT __stdcall ConnectionPoint::GetConnectionPointContainer(IConnectionPointContainer **ppCPC)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(ppCPC, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Verify the arguments.
+ if (!ppCPC)
+ return E_POINTER;
+
+ // Initialize the out parameters.
+ *ppCPC = NULL;
+
+ SetupForComCallHR();
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+ Thread *pThread = GET_THREAD();
+
+ ADID targetADID;
+ Context *pTargetContext;
+ if (m_pOwnerWrap->NeedToSwitchDomains(pThread, &targetADID, &pTargetContext))
+ {
+ GetConnectionPointContainer_Args args = {this, ppCPC};
+ pThread->DoContextCallBack(targetADID, pTargetContext, GetConnectionPointContainer_Wrapper, &args);
+ }
+ else
+ {
+ *ppCPC = GetConnectionPointContainerWorker();
+ }
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+HRESULT __stdcall ConnectionPoint::Advise(IUnknown *pUnk, DWORD *pdwCookie)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pUnk, NULL_OK));
+ PRECONDITION(CheckPointer(pdwCookie, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Verify the arguments.
+ if (!pUnk || !pdwCookie)
+ return E_POINTER;
+
+ // Initialize the out parameters.
+ *pdwCookie = NULL;
+
+ SetupForComCallHR();
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+ Thread *pThread = GET_THREAD();
+
+ ADID targetADID;
+ Context *pTargetContext;
+ if (m_pOwnerWrap->NeedToSwitchDomains(pThread, &targetADID, &pTargetContext))
+ {
+ Advise_Args args = {this, pUnk, pdwCookie};
+ pThread->DoContextCallBack(targetADID, pTargetContext, Advise_Wrapper, &args);
+ }
+ else
+ {
+ AdviseWorker(pUnk, pdwCookie);
+ }
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+HRESULT __stdcall ConnectionPoint::Unadvise(DWORD dwCookie)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Verify the arguments.
+ if (dwCookie == 0)
+ return CONNECT_E_NOCONNECTION;
+
+ SetupForComCallHR();
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+ Thread *pThread = GET_THREAD();
+
+ ADID targetADID;
+ Context *pTargetContext;
+ if (m_pOwnerWrap->NeedToSwitchDomains(pThread, &targetADID, &pTargetContext))
+ {
+ Unadvise_Args args = {this, dwCookie};
+ pThread->DoContextCallBack(targetADID, pTargetContext, Unadvise_Wrapper, &args);
+ }
+ else
+ {
+ UnadviseWorker(dwCookie);
+ }
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+HRESULT __stdcall ConnectionPoint::EnumConnections(IEnumConnections **ppEnum)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(ppEnum, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // Verify the arguments.
+ if (!ppEnum)
+ return E_POINTER;
+
+ // Initialize the out parameters.
+ *ppEnum = NULL;
+
+ SetupForComCallHR();
+
+ ConnectionEnum *pConEnum = new(nothrow) ConnectionEnum(this);
+ if (!pConEnum)
+ return E_OUTOFMEMORY;
+
+ // Retrieve the IEnumConnections interface. This cannot fail.
+ HRESULT hr = SafeQueryInterfacePreemp((IUnknown*)pConEnum, IID_IEnumConnections, (IUnknown**)ppEnum);
+ LogInteropQI((IUnknown*)pConEnum, IID_IEnumConnections, hr, "ConnectionPoint::EnumConnections: QIing for IID_IEnumConnections");
+ _ASSERTE(hr == S_OK);
+
+ return hr;
+}
+
+IConnectionPointContainer *ConnectionPoint::GetConnectionPointContainerWorker()
+{
+ CONTRACT(IConnectionPointContainer*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ // Retrieve the IConnectionPointContainer from the owner wrapper.
+ RETURN (IConnectionPointContainer*)
+ ComCallWrapper::GetComIPFromCCW(m_pOwnerWrap, IID_IConnectionPointContainer, NULL);
+}
+
+void ConnectionPoint::AdviseWorker(IUnknown *pUnk, DWORD *pdwCookie)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(CheckPointer(pdwCookie));
+ }
+ CONTRACTL_END;
+
+ SafeComHolder<IUnknown> pEventItf = NULL;
+ HRESULT hr;
+
+ // Make sure we have a pointer to the interface and not to another IUnknown.
+ hr = SafeQueryInterface(pUnk, m_rConnectionIID, &pEventItf );
+ LogInteropQI(pUnk, m_rConnectionIID, hr, "ConnectionPoint::AdviseWorker: QIing for correct interface");
+
+ if (FAILED(hr) || !pEventItf)
+ COMPlusThrowHR(CONNECT_E_CANNOTCONNECT);
+
+ COMOBJECTREF pEventItfObj = NULL;
+ OBJECTREF pTCEProviderObj = NULL;
+
+ GCPROTECT_BEGIN(pEventItfObj)
+ GCPROTECT_BEGIN(pTCEProviderObj)
+ {
+ // Create a COM+ object ref to wrap the event interface.
+ GetObjectRefFromComIP((OBJECTREF*)&pEventItfObj, pUnk, NULL);
+ IfNullThrow(pEventItfObj);
+
+ // Get the TCE provider COM+ object from the wrapper
+ pTCEProviderObj = m_pOwnerWrap->GetObjectRef();
+
+ for (int cEventMethod = 0; cEventMethod < m_NumEventMethods; cEventMethod++)
+ {
+ // If the managed object supports the event that call the AddEventX method.
+ if (m_apEventMethods[cEventMethod].m_pEventMethod)
+ InvokeProviderMethod( pTCEProviderObj, (OBJECTREF) pEventItfObj, m_apEventMethods[cEventMethod].m_pAddMethod, m_apEventMethods[cEventMethod].m_pEventMethod );
+ }
+
+ // Allocate the object handle and the connection cookie.
+ OBJECTHANDLEHolder phndEventItfObj = GetAppDomain()->CreateHandle((OBJECTREF)pEventItfObj);
+ ConnectionCookieHolder pConCookie = ConnectionCookie::CreateConnectionCookie(phndEventItfObj);
+
+ // pConCookie owns the handle now and will destroy it on exception
+ phndEventItfObj.SuppressRelease();
+
+ // Add the connection cookie to the list.
+ InsertWithLock(pConCookie);
+
+ // Everything went ok so hand back the cookie id.
+ *pdwCookie = pConCookie->m_id;
+
+ pConCookie.SuppressRelease();
+ }
+ GCPROTECT_END();
+ GCPROTECT_END();
+}
+
+void ConnectionPoint::UnadviseWorker(DWORD dwCookie)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ COMOBJECTREF pEventItfObj = NULL;
+ OBJECTREF pTCEProviderObj = NULL;
+
+ GCPROTECT_BEGIN(pEventItfObj)
+ GCPROTECT_BEGIN(pTCEProviderObj)
+ {
+ // The cookie is actually a connection cookie.
+ ConnectionCookieHolder pConCookie = FindWithLock(dwCookie);
+
+ // Retrieve the COM+ object from the cookie which in fact is the object handle.
+ pEventItfObj = (COMOBJECTREF) ObjectFromHandle(pConCookie->m_hndEventProvObj);
+ if (!pEventItfObj)
+ COMPlusThrowHR(E_INVALIDARG);
+
+ // Get the object from the wrapper
+ pTCEProviderObj = m_pOwnerWrap->GetObjectRef();
+
+ for (int cEventMethod = 0; cEventMethod < m_NumEventMethods; cEventMethod++)
+ {
+ // If the managed object supports the event that call the RemoveEventX method.
+ if (m_apEventMethods[cEventMethod].m_pEventMethod)
+ {
+ InvokeProviderMethod(pTCEProviderObj, (OBJECTREF) pEventItfObj, m_apEventMethods[cEventMethod].m_pRemoveMethod, m_apEventMethods[cEventMethod].m_pEventMethod);
+ }
+ }
+
+ // Remove the connection cookie from the list.
+ FindAndRemoveWithLock(pConCookie);
+ }
+ GCPROTECT_END();
+ GCPROTECT_END();
+}
+
+void ConnectionPoint::SetupEventMethods()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ // Remember the number of not supported events.
+ int cNonSupportedEvents = 0;
+
+ // Retrieve the total number of event methods present on the source interface.
+ int cMaxNumEventMethods = m_pEventItfMT->GetNumMethods();
+
+ // If there are no methods then there is nothing to do.
+ if (cMaxNumEventMethods == 0)
+ return;
+
+ // Allocate the event method tables.
+ NewArrayHolder<EventMethodInfo> EventMethodInfos = new EventMethodInfo[cMaxNumEventMethods];
+
+ // Find all the real event methods needed to be able to advise on the current connection point.
+ int NumEventMethods = 0;
+ for (int cEventMethod = 0; cEventMethod < cMaxNumEventMethods; cEventMethod++)
+ {
+ // Retrieve the method descriptor for the current method on the event interface.
+ MethodDesc *pEventMethodDesc = m_pEventItfMT->GetMethodDescForSlot(cEventMethod);
+ if (!pEventMethodDesc)
+ continue;
+
+ // Store the event method on the source interface.
+ EventMethodInfos[NumEventMethods].m_pEventMethod = pEventMethodDesc;
+
+ // Retrieve and store the add and remove methods for the event.
+ EventMethodInfos[NumEventMethods].m_pAddMethod = FindProviderMethodDesc(pEventMethodDesc, EventAdd);
+ EventMethodInfos[NumEventMethods].m_pRemoveMethod = FindProviderMethodDesc(pEventMethodDesc, EventRemove);
+
+ // Make sure we have found both the add and the remove methods.
+ if (!EventMethodInfos[NumEventMethods].m_pAddMethod || !EventMethodInfos[NumEventMethods].m_pRemoveMethod)
+ {
+ cNonSupportedEvents++;
+ continue;
+ }
+
+ // Increment the real number of event methods on the source interface.
+ NumEventMethods++;
+ }
+
+ // If the interface has methods and the object does not support any then we
+ // fail the connection.
+ if ((NumEventMethods == 0) && (cNonSupportedEvents > 0))
+ COMPlusThrowHR(CONNECT_E_NOCONNECTION);
+
+ // Now that the struct is totally setup, we'll set the members.
+ m_NumEventMethods = NumEventMethods;
+ m_apEventMethods = EventMethodInfos;
+ EventMethodInfos.SuppressRelease();
+}
+
+MethodDesc *ConnectionPoint::FindProviderMethodDesc( MethodDesc *pEventMethodDesc, EnumEventMethods Method )
+{
+ CONTRACT (MethodDesc*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pEventMethodDesc));
+ PRECONDITION(Method == EventAdd || Method == EventRemove);
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END
+
+ // Retrieve the event method.
+ MethodDesc *pProvMethodDesc =
+ MemberLoader::FindEventMethod(m_pTCEProviderMT, pEventMethodDesc->GetName(), Method, MemberLoader::FM_IgnoreCase);
+ if (!pProvMethodDesc)
+ RETURN NULL;
+
+ // Validate that the signature of the delegate is the expected signature.
+ MetaSig Sig(pProvMethodDesc);
+ if (Sig.NextArg() != ELEMENT_TYPE_CLASS)
+ RETURN NULL;
+
+ // <TODO>@TODO: this ignores the type of failure - try GetLastTypeHandleThrowing()</TODO>
+ TypeHandle DelegateType = Sig.GetLastTypeHandleNT();
+ if (DelegateType.IsNull())
+ RETURN NULL;
+
+ PCCOR_SIGNATURE pEventMethSig;
+ DWORD cEventMethSig;
+ pEventMethodDesc->GetSig(&pEventMethSig, &cEventMethSig);
+ MethodDesc *pInvokeMD = MemberLoader::FindMethod(DelegateType.GetMethodTable(),
+ "Invoke",
+ pEventMethSig,
+ cEventMethSig,
+ pEventMethodDesc->GetModule());
+
+ if (!pInvokeMD)
+ RETURN NULL;
+
+ // The requested method exists and has the appropriate signature.
+ RETURN pProvMethodDesc;
+}
+
+void ConnectionPoint::InvokeProviderMethod( OBJECTREF pProvider, OBJECTREF pSubscriber, MethodDesc *pProvMethodDesc, MethodDesc *pEventMethodDesc )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pProvMethodDesc));
+ PRECONDITION(CheckPointer(pEventMethodDesc));
+ }
+ CONTRACTL_END;
+
+ GCPROTECT_BEGIN (pSubscriber);
+ GCPROTECT_BEGIN (pProvider);
+ {
+ // Create a method signature to extract the type of the delegate.
+ MetaSig MethodSig( pProvMethodDesc);
+ _ASSERTE( 1 == MethodSig.NumFixedArgs() );
+
+ // Go to the first argument.
+ CorElementType ArgType = MethodSig.NextArg();
+ _ASSERTE( ELEMENT_TYPE_CLASS == ArgType );
+
+ // Retrieve the EE class representing the argument.
+ MethodTable *pDelegateCls = MethodSig.GetLastTypeHandleThrowing().GetMethodTable();
+
+ // Make sure we activate the assembly containing the target method desc
+ pEventMethodDesc->EnsureActive();
+
+ // Allocate an object based on the method table of the delegate class.
+ OBJECTREF pDelegate = pDelegateCls->Allocate();
+
+ GCPROTECT_BEGIN( pDelegate );
+ {
+ // Initialize the delegate using the arguments structure.
+ // <TODO>Generics: ensure we get the right MethodDesc here and in similar places</TODO>
+ // Accept both void (object, native int) and void (object, native uint)
+ MethodDesc *pDlgCtorMD = MemberLoader::FindConstructor(pDelegateCls, &gsig_IM_Obj_IntPtr_RetVoid);
+ if (pDlgCtorMD == NULL)
+ pDlgCtorMD = MemberLoader::FindConstructor(pDelegateCls, &gsig_IM_Obj_UIntPtr_RetVoid);
+
+ // The loader is responsible for only accepting well-formed delegate classes.
+ _ASSERTE(pDlgCtorMD);
+
+ MethodDescCallSite dlgCtor(pDlgCtorMD);
+
+ ARG_SLOT CtorArgs[3] = { ObjToArgSlot(pDelegate),
+ ObjToArgSlot(pSubscriber),
+ (ARG_SLOT)pEventMethodDesc->GetMultiCallableAddrOfCode()
+ };
+ dlgCtor.Call(CtorArgs);
+
+ MethodDescCallSite prov(pProvMethodDesc, &pProvider);
+
+ // Do the actual invocation of the method method.
+ ARG_SLOT Args[2] = { ObjToArgSlot( pProvider ), ObjToArgSlot( pDelegate ) };
+ prov.Call(Args);
+ }
+ GCPROTECT_END();
+ }
+ GCPROTECT_END();
+ GCPROTECT_END();
+}
+
+void ConnectionPoint::InsertWithLock(ConnectionCookie* pConCookie)
+{
+ WRAPPER_NO_CONTRACT;
+
+ LockHolder lh(this);
+
+ bool fDone = false;
+
+ //
+ // handle special cases
+ //
+
+ if (NULL == m_pLastInserted)
+ {
+ //
+ // Special case 1: List is empty.
+ //
+ CONSISTENCY_CHECK(NULL == m_ConnectionList.GetHead());
+
+ pConCookie->m_id = 1;
+ m_ConnectionList.InsertHead(pConCookie);
+ fDone = true;
+ }
+
+ if (!fDone && ((NULL != m_pLastInserted->m_Link.m_pNext) || (idUpperLimit == m_pLastInserted->m_id)))
+ {
+ //
+ // Special case 2: Last inserted is somewhere in the middle of the list or we last
+ // inserted the max token id (we've wrapped around) and ID 1 is not
+ // taken.
+ //
+ CONSISTENCY_CHECK(NULL != m_ConnectionList.GetHead());
+
+ if (1 != m_ConnectionList.GetHead()->m_id)
+ {
+ // if ID 1 is not taken, we can just insert there.
+ pConCookie->m_id = 1;
+ m_ConnectionList.InsertHead(pConCookie);
+ fDone = true;
+ }
+ }
+
+ //
+ // General cases
+ //
+ if (!fDone)
+ {
+ ConnectionCookie* pLocationToStartSearchForInsertPoint = NULL;
+ ConnectionCookie* pInsertionPoint = NULL;
+
+ if (NULL == m_pLastInserted->m_Link.m_pNext)
+ {
+ if (idUpperLimit == m_pLastInserted->m_id)
+ {
+ CONSISTENCY_CHECK(1 == m_ConnectionList.GetHead()->m_id); // should be handled by special case #2
+
+ // we need to wrap around
+ // scan from head for first hole, insert there
+ pLocationToStartSearchForInsertPoint = m_ConnectionList.GetHead();
+ }
+ else
+ {
+ // Most common case: insert at tail, incrementing ID
+ pInsertionPoint = m_pLastInserted;
+ pLocationToStartSearchForInsertPoint = NULL; // don't do any searching, just append
+ }
+ }
+ else
+ {
+ // scan from m_pLastInserted for first hole, insert there
+ pLocationToStartSearchForInsertPoint = m_pLastInserted;
+ }
+
+ if (NULL != pLocationToStartSearchForInsertPoint)
+ {
+ // Starting from pLocationToStartSearchForInsertPoint, scan list to find a
+ // discontinuity in the IDs. Insert there.
+
+ ConnectionCookie* pCurrentNode = pLocationToStartSearchForInsertPoint;
+
+ //
+ // limit case is where we've wrapped around the whole list and found the initial start point again.
+ //
+ while (true)
+ {
+ if (NULL == pCurrentNode->m_Link.m_pNext)
+ {
+ if (pCurrentNode->m_id < idUpperLimit)
+ {
+ // if we reach the end of the list and we have free IDs, let's use them.
+ break;
+ }
+ pCurrentNode = m_ConnectionList.GetHead();
+ }
+ else
+ {
+ ConnectionCookie* pNext = CONTAINING_RECORD(pCurrentNode->m_Link.m_pNext, ConnectionCookie, m_Link);
+ if ((pCurrentNode->m_id + 1) < pNext->m_id)
+ {
+ break;
+ }
+ pCurrentNode = pNext;
+ }
+
+ if (pCurrentNode == pLocationToStartSearchForInsertPoint)
+ {
+ // we came back to the node where we started which means that there's no gap and
+ // all IDs up to idUpperLimit are taken
+ EX_THROW(HRException, (CONNECT_E_ADVISELIMIT));
+ }
+ }
+
+ pInsertionPoint = pCurrentNode;
+ }
+
+
+ CONSISTENCY_CHECK(NULL != pInsertionPoint);
+ CONSISTENCY_CHECK(idUpperLimit != pInsertionPoint->m_id);
+
+#ifdef _DEBUG
+ ConnectionCookie* pNextCookieNode = CONTAINING_RECORD(pInsertionPoint->m_Link.m_pNext, ConnectionCookie, m_Link);
+ DWORD idNew = pInsertionPoint->m_id + 1;
+ CONSISTENCY_CHECK(NULL == pNextCookieNode ||
+ ((pInsertionPoint->m_id < idNew) &&
+ (idNew < pNextCookieNode->m_id)));
+#endif // _DEBUG
+
+ pConCookie->m_id = pInsertionPoint->m_id + 1;
+ pInsertionPoint->m_Link.InsertAfter(&pConCookie->m_Link);
+ }
+
+ m_pLastInserted = pConCookie;
+}
+
+ConnectionCookie* ConnectionPoint::FindWithLock(DWORD idOfCookie)
+{
+ WRAPPER_NO_CONTRACT;
+
+ ConnectionCookie* pCurrentNode;
+
+ {
+ LockHolder lh(this);
+
+ pCurrentNode = m_ConnectionList.GetHead();
+
+ while (pCurrentNode && (pCurrentNode->m_id != idOfCookie))
+ {
+ pCurrentNode = CONTAINING_RECORD(pCurrentNode->m_Link.m_pNext, ConnectionCookie, m_Link);
+ }
+ }
+
+ if (NULL == pCurrentNode)
+ {
+ EX_THROW(HRException, (CONNECT_E_NOCONNECTION));
+ }
+
+ return pCurrentNode;
+}
+
+
+void ConnectionPoint::FindAndRemoveWithLock(ConnectionCookie* pConCookie)
+{
+ WRAPPER_NO_CONTRACT;
+
+ LockHolder lh(this);
+
+ m_ConnectionList.FindAndRemove(pConCookie);
+
+ if (pConCookie == m_pLastInserted)
+ {
+ m_pLastInserted = m_ConnectionList.GetHead();
+ }
+}
+
+ConnectionPointEnum::ConnectionPointEnum(ComCallWrapper *pOwnerWrap, CQuickArray<ConnectionPoint*> *pCPList)
+: m_pOwnerWrap(pOwnerWrap)
+, m_pCPList(pCPList)
+, m_CurrPos(0)
+, m_cbRefCount(0)
+, m_Lock(CrstInterop)
+{
+ WRAPPER_NO_CONTRACT;
+
+ m_pOwnerWrap->AddRef();
+}
+
+ConnectionPointEnum::~ConnectionPointEnum()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (m_pOwnerWrap)
+ m_pOwnerWrap->Release();
+}
+
+HRESULT __stdcall ConnectionPointEnum::QueryInterface(REFIID riid, void** ppv)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(ppv, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // Verify the arguments.
+ if (!ppv)
+ return E_POINTER;
+
+ // Initialize the out parameters.
+ *ppv = NULL;
+
+ SetupForComCallHR();
+
+ if (riid == IID_IEnumConnectionPoints)
+ {
+ *ppv = static_cast<IEnumConnectionPoints*>(this);
+ }
+ else if (riid == IID_IUnknown)
+ {
+ *ppv = static_cast<IUnknown*>(this);
+ }
+ else
+ {
+ return E_NOINTERFACE;
+ }
+
+ ULONG cbRef = SafeAddRefPreemp((IUnknown*)*ppv);
+ //@TODO(CLE) AddRef logging that doesn't use QI
+
+ return S_OK;
+}
+
+ULONG __stdcall ConnectionPointEnum::AddRef()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ SetupForComCallHR();
+
+ LONG i = FastInterlockIncrement((LONG*)&m_cbRefCount );
+ return i;
+}
+
+ULONG __stdcall ConnectionPointEnum::Release()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ SetupForComCallHR();
+
+ HRESULT hr = S_OK;
+ ULONG cbRef = -1;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ cbRef = FastInterlockDecrement((LONG*)&m_cbRefCount );
+ _ASSERTE(cbRef >=0);
+ if (cbRef == 0)
+ delete this;
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return cbRef;
+}
+
+HRESULT __stdcall ConnectionPointEnum::Next(ULONG cConnections, IConnectionPoint **ppCP, ULONG *pcFetched)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(ppCP, NULL_OK));
+ PRECONDITION(CheckPointer(pcFetched, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // Verify the arguments.
+ if (NULL == ppCP)
+ return E_POINTER;
+
+ // Initialize the out parameters.
+ if (pcFetched)
+ *pcFetched = 0;
+
+ SetupForComCallHR();
+
+ UINT cFetched;
+
+ // Acquire the lock before we start traversing the connection point list.
+ {
+ LockHolder lh(this);
+
+ for (cFetched = 0; cFetched < cConnections && m_CurrPos < m_pCPList->Size(); cFetched++, m_CurrPos++)
+ {
+ ppCP[cFetched] = (*m_pCPList)[m_CurrPos];
+ SafeAddRefPreemp(ppCP[cFetched]);
+ }
+
+ if (pcFetched)
+ *pcFetched = cFetched;
+ }
+
+ return cFetched == cConnections ? S_OK : S_FALSE;
+}
+
+HRESULT __stdcall ConnectionPointEnum::Skip(ULONG cConnections)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ SetupForComCallHR();
+
+ // Acquire the lock before we start traversing the connection point list.
+ {
+ LockHolder lh(this);
+
+ if(m_CurrPos + cConnections <= m_pCPList->Size())
+ {
+ // There are enough connection points left in the list to allow
+ // us to skip the required number.
+ m_CurrPos += cConnections;
+ return S_OK;
+ }
+ else
+ {
+ // There aren't enough connection points left so set the current
+ // position to the end of the list and return S_FALSE to indicate
+ // we couldn't skip the requested number.
+ m_CurrPos = (UINT)m_pCPList->Size();
+ return S_FALSE;
+ }
+ }
+}
+
+HRESULT __stdcall ConnectionPointEnum::Reset()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ SetupForComCallHR();
+
+ // Acquire the lock before we start traversing the connection point list.
+ {
+ LockHolder lh(this);
+ m_CurrPos = 0;
+ }
+
+ return S_OK;
+}
+
+HRESULT __stdcall ConnectionPointEnum::Clone(IEnumConnectionPoints **ppEnum)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(ppEnum, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // Verify the arguments.
+ if (!ppEnum)
+ return E_POINTER;
+
+ // Initialize the out parameters.
+ *ppEnum = NULL;
+
+ SetupForComCallHR();
+
+ ConnectionPointEnum *pCPEnum;
+ {
+ CONTRACT_VIOLATION(ThrowsViolation); //ConnectionPointEnum throws
+ pCPEnum = new(nothrow) ConnectionPointEnum(m_pOwnerWrap, m_pCPList);
+ }
+ if (!pCPEnum)
+ return E_OUTOFMEMORY;
+
+ HRESULT hr = SafeQueryInterfacePreemp(pCPEnum, IID_IEnumConnectionPoints, (IUnknown**)ppEnum);
+ LogInteropQI(pCPEnum, IID_IEnumConnectionPoints, hr, "ConnectionPointEnum::Clone: QIing for IID_IEnumConnectionPoints");
+
+ return hr;
+}
+
+ConnectionEnum::ConnectionEnum(ConnectionPoint *pConnectionPoint)
+: m_pConnectionPoint(pConnectionPoint)
+, m_CurrCookie(pConnectionPoint->GetCookieList()->GetHead())
+, m_cbRefCount(0)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ ULONG cbRef = SafeAddRefPreemp(m_pConnectionPoint);
+ //@TODO(CLE) AddRef logging that doesn't use QI
+}
+
+ConnectionEnum::~ConnectionEnum()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ ULONG cbRef = SafeReleasePreemp(m_pConnectionPoint);
+ LogInteropRelease(m_pConnectionPoint, cbRef, "ConnectionEnum::~ConnectionEnum: Releasing the connection point object");
+}
+
+HRESULT __stdcall ConnectionEnum::QueryInterface(REFIID riid, void** ppv)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(ppv, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // Verify the arguments.
+ if (!ppv)
+ return E_POINTER;
+
+ // Initialize the out parameters.
+ *ppv = NULL;
+
+ SetupForComCallHR();
+
+ if (riid == IID_IEnumConnections)
+ {
+ *ppv = static_cast<IEnumConnections*>(this);
+ }
+ else if (riid == IID_IUnknown)
+ {
+ *ppv = static_cast<IUnknown*>(this);
+ }
+ else
+ {
+ return E_NOINTERFACE;
+ }
+
+ ULONG cbRef = SafeAddRefPreemp((IUnknown*)*ppv);
+ //@TODO(CLE) AddRef logging that doesn't use QI
+
+ return S_OK;
+}
+
+ULONG __stdcall ConnectionEnum::AddRef()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ SO_TOLERANT;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ SetupForComCallHR();
+
+ LONG i = FastInterlockIncrement((LONG*)&m_cbRefCount);
+ return i;
+}
+
+ULONG __stdcall ConnectionEnum::Release()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ SetupForComCallHR();
+
+ LONG i = FastInterlockDecrement((LONG*)&m_cbRefCount);
+ _ASSERTE(i >=0);
+ if (i == 0)
+ delete this;
+
+ return i;
+}
+
+HRESULT __stdcall ConnectionEnum::Next(ULONG cConnections, CONNECTDATA* rgcd, ULONG *pcFetched)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(rgcd, NULL_OK));
+ PRECONDITION(CheckPointer(pcFetched, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // Verify the arguments.
+ if (NULL == rgcd)
+ return E_POINTER;
+
+ // Initialize the out parameters.
+ if (pcFetched)
+ *pcFetched = 0;
+
+ SetupForComCallHR();
+
+ HRESULT hr = S_OK;
+ UINT cFetched;
+ CONNECTIONCOOKIELIST *pConnectionList = m_pConnectionPoint->GetCookieList();
+
+ // Acquire the connection point's lock before we start traversing the connection list.
+ {
+ ConnectionPoint::LockHolder lh(m_pConnectionPoint);
+
+ {
+ // Switch to cooperative GC mode before we manipulate OBJCETREF's.
+ GCX_COOP();
+
+ for (cFetched = 0; cFetched < cConnections && m_CurrCookie; cFetched++)
+ {
+ {
+ CONTRACT_VIOLATION(ThrowsViolation);
+ rgcd[cFetched].pUnk = GetComIPFromObjectRef((OBJECTREF*)m_CurrCookie->m_hndEventProvObj, ComIpType_Unknown, NULL);
+ rgcd[cFetched].dwCookie = m_CurrCookie->m_id;
+ }
+ m_CurrCookie = pConnectionList->GetNext(m_CurrCookie);
+ }
+ }
+
+ // Leave the lock now that we are done traversing the list.
+ }
+
+ // Set the count of fetched connections if the caller desires it.
+ if (pcFetched)
+ *pcFetched = cFetched;
+
+ return cFetched == cConnections ? S_OK : S_FALSE;
+}
+
+HRESULT __stdcall ConnectionEnum::Skip(ULONG cConnections)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ SetupForComCallHR();
+
+ HRESULT hr = S_FALSE;
+ CONNECTIONCOOKIELIST *pConnectionList = m_pConnectionPoint->GetCookieList();
+
+ {
+ ConnectionPoint::LockHolder lh(m_pConnectionPoint);
+
+ // Try and skip the requested number of connections.
+ while (m_CurrCookie && cConnections)
+ {
+ m_CurrCookie = pConnectionList->GetNext(m_CurrCookie);
+ cConnections--;
+ }
+ // Leave the lock now that we are done traversing the list.
+ }
+
+ // Check to see if we succeeded.
+ return cConnections == 0 ? S_OK : S_FALSE;
+}
+
+HRESULT __stdcall ConnectionEnum::Reset()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ SetupForComCallHR();
+
+ // Set the current cookie back to the head of the list. We must acquire the
+ // connection point lock before we touch the list.
+ ConnectionPoint::LockHolder lh(m_pConnectionPoint);
+
+ m_CurrCookie = m_pConnectionPoint->GetCookieList()->GetHead();
+
+ return S_OK;
+}
+
+HRESULT __stdcall ConnectionEnum::Clone(IEnumConnections **ppEnum)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(ppEnum, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // Verify the arguments.
+ if (!ppEnum)
+ return E_POINTER;
+
+ // Initialize the out parameters.
+ *ppEnum = NULL;
+
+ // This should setup a SO_INTOLERANT region, why isn't it?
+ SetupForComCallHR();
+
+ ConnectionEnum *pConEnum = new(nothrow) ConnectionEnum(m_pConnectionPoint);
+ if (!pConEnum)
+ return E_OUTOFMEMORY;
+
+ HRESULT hr = SafeQueryInterfacePreemp(pConEnum, IID_IEnumConnections, (IUnknown**)ppEnum);
+ LogInteropQI(pConEnum, IID_IEnumConnections, hr, "ConnectionEnum::Clone: QIing for IID_IEnumConnections");
+
+ return hr;
+}
diff --git a/src/vm/comconnectionpoints.h b/src/vm/comconnectionpoints.h
new file mode 100644
index 0000000000..2efa6159af
--- /dev/null
+++ b/src/vm/comconnectionpoints.h
@@ -0,0 +1,255 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: ComConnectionPoints.h
+//
+
+// ===========================================================================
+// Declaration of the classes used to expose connection points to COM.
+// ===========================================================================
+
+
+#pragma once
+
+#ifndef FEATURE_COMINTEROP
+#error FEATURE_COMINTEROP is required for this file
+#endif // FEATURE_COMINTEROP
+
+#include "vars.hpp"
+#include "comcallablewrapper.h"
+#include "comdelegate.h"
+
+//------------------------------------------------------------------------------------------
+// Definition of helper class used to expose connection points
+//------------------------------------------------------------------------------------------
+
+// Structure containing information regarding the methods that make up an event.
+struct EventMethodInfo
+{
+ MethodDesc* m_pEventMethod;
+ MethodDesc* m_pAddMethod;
+ MethodDesc* m_pRemoveMethod;
+};
+
+
+// Structure passed out as a cookie when Advise is called.
+struct ConnectionCookie
+{
+ ConnectionCookie(OBJECTHANDLE hndEventProvObj) : m_hndEventProvObj(hndEventProvObj)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(NULL != hndEventProvObj);
+ }
+ CONTRACTL_END;
+ }
+
+ ~ConnectionCookie()
+ {
+ WRAPPER_NO_CONTRACT;
+ DestroyHandle(m_hndEventProvObj);
+ }
+
+ // Currently called only from Cooperative mode.
+ static ConnectionCookie* CreateConnectionCookie(OBJECTHANDLE hndEventProvObj)
+ {
+ CONTRACT (ConnectionCookie*)
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(NULL != hndEventProvObj);
+ }
+ CONTRACT_END;
+
+ RETURN (new ConnectionCookie(hndEventProvObj));
+ }
+
+ SLink m_Link;
+ OBJECTHANDLE m_hndEventProvObj;
+ DWORD m_id;
+};
+
+FORCEINLINE void ConnectionCookieRelease(ConnectionCookie* p)
+{
+ WRAPPER_NO_CONTRACT;
+
+ delete p;
+}
+
+// Connection cookie holder used to ensure the cookies are deleted when required.
+class ConnectionCookieHolder : public Wrapper<ConnectionCookie*, ConnectionCookieDoNothing, ConnectionCookieRelease, NULL>
+{
+public:
+ ConnectionCookieHolder(ConnectionCookie* p = NULL)
+ : Wrapper<ConnectionCookie*, ConnectionCookieDoNothing, ConnectionCookieRelease, NULL>(p)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ FORCEINLINE void operator=(ConnectionCookie* p)
+ {
+ WRAPPER_NO_CONTRACT;
+ Wrapper<ConnectionCookie*, ConnectionCookieDoNothing, ConnectionCookieRelease, NULL>::operator=(p);
+ }
+};
+
+// List of connection cookies.
+typedef SList<ConnectionCookie, true> CONNECTIONCOOKIELIST;
+
+// ConnectionPoint class. This class implements IConnectionPoint and does the mapping
+// from a CP handler to a TCE provider.
+class ConnectionPoint : public IConnectionPoint
+{
+public:
+ // Encapsulate CrstHolder, so that clients of our lock don't have to know the
+ // details of its implementation.
+ class LockHolder : public CrstHolder
+ {
+ public:
+ LockHolder(ConnectionPoint *pCP) : CrstHolder(&pCP->m_Lock)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+ };
+
+ ConnectionPoint( ComCallWrapper *pWrap, MethodTable *pEventMT );
+ ~ConnectionPoint();
+
+ HRESULT __stdcall QueryInterface(REFIID riid, void** ppv);
+ ULONG __stdcall AddRef();
+ ULONG __stdcall Release();
+
+ HRESULT __stdcall GetConnectionInterface( IID *pIID );
+ HRESULT __stdcall GetConnectionPointContainer( IConnectionPointContainer **ppCPC );
+ HRESULT __stdcall Advise( IUnknown *pUnk, DWORD *pdwCookie );
+ HRESULT __stdcall Unadvise( DWORD dwCookie );
+ HRESULT __stdcall EnumConnections( IEnumConnections **ppEnum );
+
+ REFIID GetIID()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_rConnectionIID;
+ }
+
+ CONNECTIONCOOKIELIST *GetCookieList()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return &m_ConnectionList;
+ }
+
+private:
+ // Structures used for the AD callback wrappers.
+ struct GetConnectionPointContainer_Args
+ {
+ ConnectionPoint *pThis;
+ IConnectionPointContainer **ppCPC;
+ };
+
+ struct Advise_Args
+ {
+ ConnectionPoint *pThis;
+ IUnknown *pUnk;
+ DWORD *pdwCookie;
+ };
+
+ struct Unadvise_Args
+ {
+ ConnectionPoint *pThis;
+ DWORD dwCookie;
+ };
+
+ // Worker methods.
+ void AdviseWorker(IUnknown *pUnk, DWORD *pdwCookie);
+ void UnadviseWorker( DWORD dwCookie );
+ IConnectionPointContainer *GetConnectionPointContainerWorker();
+
+ // AD callback wrappers.
+ static void GetConnectionPointContainer_Wrapper(LPVOID ptr);
+ static void Advise_Wrapper(LPVOID ptr);
+ static void Unadvise_Wrapper(LPVOID ptr);
+
+ // Helper methods.
+ void SetupEventMethods();
+ MethodDesc *FindProviderMethodDesc( MethodDesc *pEventMethodDesc, EnumEventMethods MethodType );
+ void InvokeProviderMethod( OBJECTREF pProvider, OBJECTREF pSubscriber, MethodDesc *pProvMethodDesc, MethodDesc *pEventMethodDesc );
+ void InsertWithLock(ConnectionCookie* pConCookie);
+ void FindAndRemoveWithLock(ConnectionCookie* pConCookie);
+ ConnectionCookie* FindWithLock(DWORD idOfCookie);
+
+ ComCallWrapper* m_pOwnerWrap;
+ GUID m_rConnectionIID;
+ MethodTable* m_pTCEProviderMT;
+ MethodTable* m_pEventItfMT;
+ Crst m_Lock;
+ CONNECTIONCOOKIELIST m_ConnectionList;
+ EventMethodInfo* m_apEventMethods;
+ int m_NumEventMethods;
+ ULONG m_cbRefCount;
+ ConnectionCookie* m_pLastInserted;
+
+ const static DWORD idUpperLimit = 0xFFFFFFFF;
+};
+
+// Enumeration of connection points.
+class ConnectionPointEnum : IEnumConnectionPoints
+{
+public:
+ // Encapsulate CrstHolder, so that clients of our lock don't have to know the
+ // details of its implementation.
+ class LockHolder : public CrstHolder
+ {
+ public:
+ LockHolder(ConnectionPointEnum *pCP) : CrstHolder(&pCP->m_Lock)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+ };
+
+ ConnectionPointEnum(ComCallWrapper *pOwnerWrap, CQuickArray<ConnectionPoint*> *pCPList);
+ ~ConnectionPointEnum();
+
+ HRESULT __stdcall QueryInterface(REFIID riid, void** ppv);
+ ULONG __stdcall AddRef();
+ ULONG __stdcall Release();
+
+ HRESULT __stdcall Next(ULONG cConnections, IConnectionPoint **ppCP, ULONG *pcFetched);
+ HRESULT __stdcall Skip(ULONG cConnections);
+ HRESULT __stdcall Reset();
+ HRESULT __stdcall Clone(IEnumConnectionPoints **ppEnum);
+
+private:
+ ComCallWrapper* m_pOwnerWrap;
+ CQuickArray<ConnectionPoint*>* m_pCPList;
+ UINT m_CurrPos;
+ ULONG m_cbRefCount;
+ Crst m_Lock;
+};
+
+// Enumeration of connections.
+class ConnectionEnum : IEnumConnections
+{
+public:
+ ConnectionEnum(ConnectionPoint *pConnectionPoint);
+ ~ConnectionEnum();
+
+ HRESULT __stdcall QueryInterface(REFIID riid, void** ppv);
+ ULONG __stdcall AddRef();
+ ULONG __stdcall Release();
+
+ HRESULT __stdcall Next(ULONG cConnections, CONNECTDATA* rgcd, ULONG *pcFetched);
+ HRESULT __stdcall Skip(ULONG cConnections);
+ HRESULT __stdcall Reset();
+ HRESULT __stdcall Clone(IEnumConnections **ppEnum);
+
+private:
+ ConnectionPoint* m_pConnectionPoint;
+ ConnectionCookie* m_CurrCookie;
+ ULONG m_cbRefCount;
+};
diff --git a/src/vm/comdatetime.cpp b/src/vm/comdatetime.cpp
new file mode 100644
index 0000000000..1314278480
--- /dev/null
+++ b/src/vm/comdatetime.cpp
@@ -0,0 +1,126 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "common.h"
+#include "object.h"
+#include "excep.h"
+#include "frames.h"
+#include "vars.hpp"
+#include "comdatetime.h"
+
+const INT64 COMDateTime::TicksPerMillisecond = 10000;
+const INT64 COMDateTime::TicksPerSecond = TicksPerMillisecond * 1000;
+const INT64 COMDateTime::TicksPerMinute = TicksPerSecond * 60;
+const INT64 COMDateTime::TicksPerHour = TicksPerMinute * 60;
+const INT64 COMDateTime::TicksPerDay = TicksPerHour * 24;
+
+const INT64 COMDateTime::MillisPerSecond = 1000;
+const INT64 COMDateTime::MillisPerDay = MillisPerSecond * 60 * 60 * 24;
+
+const int COMDateTime::DaysPer4Years = 365 * 4 + 1;
+const int COMDateTime::DaysPer100Years = DaysPer4Years * 25 - 1;
+const int COMDateTime::DaysPer400Years = DaysPer100Years * 4 + 1;
+
+// Number of days from 1/1/0001 to 1/1/10000
+const int COMDateTime::DaysTo10000 = DaysPer400Years * 25 - 366;
+
+const int COMDateTime::DaysTo1899 = DaysPer400Years * 4 + DaysPer100Years * 3 - 367;
+
+const INT64 COMDateTime::DoubleDateOffset = DaysTo1899 * TicksPerDay;
+
+// OA Min Date is Jan 1, 100 AD. This is after converting to ticks.
+const INT64 COMDateTime::OADateMinAsTicks = (DaysPer100Years - 365) * TicksPerDay;
+
+// All OA dates must be greater than (not >=) OADateMinAsDouble
+const double COMDateTime::OADateMinAsDouble = -657435.0;
+
+// All OA dates must be less than (not <=) OADateMaxAsDouble
+const double COMDateTime::OADateMaxAsDouble = 2958466.0;
+
+const INT64 COMDateTime::MaxTicks = DaysTo10000 * TicksPerDay;
+const INT64 COMDateTime::MaxMillis = DaysTo10000 * MillisPerDay;
+
+const INT64 TicksMask = I64(0x3FFFFFFFFFFFFFFF);
+
+// This function is duplicated in DateTime.cs
+INT64 COMDateTime::DoubleDateToTicks(const double d)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ // Make sure this date is a valid OleAut date. This is the check from the internal
+ // OleAut macro IsValidDate, found in oledisp.h. Eventually at least the 64 bit
+ // build of oleaut will define these gregorian max and min values as public constants.
+ // The check done this way will take care of NaN
+ if (!(d < OADateMaxAsDouble) || !(d > OADateMinAsDouble))
+ COMPlusThrow(kArgumentException, W("Arg_OleAutDateInvalid"));
+
+ // Conversion to int64 will not cause an overflow here, as at this point the "d" is in between OADateMinAsDouble and OADateMaxAsDouble
+ INT64 millis = (INT64)(d * MillisPerDay + (d >= 0? 0.5: -0.5));
+ if (millis < 0) millis -= (millis % MillisPerDay) * 2;
+ // There are cases when we are very close to -1 and 1 in which case millis%MillisPerDay is 0 since we have exactly one day due to rounding issues.
+ millis += DoubleDateOffset / TicksPerMillisecond;
+
+ if (millis < 0 || millis >= MaxMillis)
+ {
+ COMPlusThrow(kArgumentException, W("Arg_OleAutDateScale")); // Cannot be equal to MaxMillis.
+ }
+ return millis * TicksPerMillisecond;
+}
+
+// This function is duplicated in DateTime.cs
+double COMDateTime::TicksToDoubleDate(INT64 ticks)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ //
+
+ // Workaround to handle uninitialized DateTime objects in the CLR
+ // See explanation in DateTime.cs's TicksToOADate function.
+
+ // Strip off the extra kind state
+ ticks = (ticks & TicksMask);
+
+ if (ticks == 0)
+ return 0.0; // OA's 0 date (12/30/1899).
+
+ if (ticks < OADateMinAsTicks)
+ {
+ //We've special-cased day 0 (01/01/0001 in the Gregorian Calendar) such that the
+ //date can be used to represent a DateTime the contains only a time. OA uses
+ //day 0 (12/30/1899) for the same purpose, so we'll do a mapping from our day 0
+ //to their day 0.
+ if (ticks < TicksPerDay)
+ ticks+=DoubleDateOffset;
+ else
+ COMPlusThrow(kOverflowException, W("Arg_OleAutDateInvalid"));
+ }
+
+ INT64 millis = (ticks - DoubleDateOffset) / TicksPerMillisecond;
+ if (millis < 0)
+ {
+ INT64 frac = millis % MillisPerDay;
+ if (frac != 0) millis -= (MillisPerDay + frac) * 2;
+ }
+
+ double d = (double)millis / MillisPerDay;
+
+ // Make sure this date is a valid OleAut date. This is the check from the internal
+ // OleAut macro IsValidDate, found in oledisp.h. Eventually at least the 64 bit
+ // build of oleaut will define these gregorian max and min values as public constants.
+ _ASSERTE(d < OADateMaxAsDouble && d > OADateMinAsDouble);
+
+ return d;
+}
diff --git a/src/vm/comdatetime.h b/src/vm/comdatetime.h
new file mode 100644
index 0000000000..fef4fefbd3
--- /dev/null
+++ b/src/vm/comdatetime.h
@@ -0,0 +1,50 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#ifndef _COMDATETIME_H_
+#define _COMDATETIME_H_
+
+#include <oleauto.h>
+#include "fcall.h"
+
+#include <pshpack1.h>
+
+class COMDateTime {
+ static const INT64 TicksPerMillisecond;
+ static const INT64 TicksPerSecond;
+ static const INT64 TicksPerMinute;
+ static const INT64 TicksPerHour;
+ static const INT64 TicksPerDay;
+
+ static const INT64 MillisPerSecond;
+ static const INT64 MillisPerDay;
+
+ static const int DaysPer4Years;
+ static const int DaysPer100Years;
+ static const int DaysPer400Years;
+ // Number of days from 1/1/0001 to 1/1/10000
+ static const int DaysTo10000;
+
+ static const int DaysTo1899;
+
+ static const INT64 DoubleDateOffset;
+ static const INT64 OADateMinAsTicks; // in ticks
+ static const double OADateMinAsDouble;
+ static const double OADateMaxAsDouble;
+
+ static const INT64 MaxTicks;
+ static const INT64 MaxMillis;
+
+public:
+
+ // Native util functions for other classes.
+ static INT64 DoubleDateToTicks(const double d); // From OleAut Date
+ static double TicksToDoubleDate(const INT64 ticks);
+};
+
+#include <poppack.h>
+
+#endif // _COMDATETIME_H_
diff --git a/src/vm/comdelegate.cpp b/src/vm/comdelegate.cpp
new file mode 100644
index 0000000000..f461931f0e
--- /dev/null
+++ b/src/vm/comdelegate.cpp
@@ -0,0 +1,4000 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: COMDelegate.cpp
+//
+
+// This module contains the implementation of the native methods for the
+// Delegate class.
+//
+
+
+#include "common.h"
+#include "comdelegate.h"
+#include "invokeutil.h"
+#include "excep.h"
+#include "class.h"
+#include "field.h"
+#include "dllimportcallback.h"
+#include "dllimport.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "eeconfig.h"
+#include "mdaassistants.h"
+#include "cgensys.h"
+#include "asmconstants.h"
+#include "security.h"
+#include "virtualcallstub.h"
+#include "callingconvention.h"
+#ifdef FEATURE_COMINTEROP
+#include "comcallablewrapper.h"
+#endif // FEATURE_COMINTEROP
+
+#define DELEGATE_MARKER_UNMANAGEDFPTR -1
+
+
+#ifndef DACCESS_COMPILE
+
+static PCODE GetVirtualCallStub(MethodDesc *method, TypeHandle scopeType)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM()); // from MetaSig::SizeOfArgStack
+ }
+ CONTRACTL_END;
+
+ //TODO: depending on what we decide for generics method we may want to move this check to better places
+ if (method->IsGenericMethodDefinition() || method->HasMethodInstantiation())
+ {
+ COMPlusThrow(kNotSupportedException);
+ }
+
+ // need to grab a virtual dispatch stub
+ // method can be on a canonical MethodTable, we need to allocate the stub on the loader allocator associated with the exact type instantiation.
+ VirtualCallStubManager *pVirtualStubManager = scopeType.GetMethodTable()->GetLoaderAllocator()->GetVirtualCallStubManager();
+ PCODE pTargetCall = pVirtualStubManager->GetCallStub(scopeType, method);
+ _ASSERTE(pTargetCall);
+ return pTargetCall;
+}
+
+#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)
+
+// ShuffleOfs not needed
+
+#elif defined(_TARGET_X86_)
+
+// Return an encoded shuffle entry describing a general register or stack offset that needs to be shuffled.
+static UINT16 ShuffleOfs(INT ofs, UINT stackSizeDelta = 0)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (TransitionBlock::IsStackArgumentOffset(ofs))
+ {
+ ofs = (ofs - TransitionBlock::GetOffsetOfReturnAddress()) + stackSizeDelta;
+
+ if (ofs >= ShuffleEntry::REGMASK)
+ {
+ // method takes too many stack args
+ COMPlusThrow(kNotSupportedException);
+ }
+ }
+ else
+ {
+ ofs -= TransitionBlock::GetOffsetOfArgumentRegisters();
+ ofs |= ShuffleEntry::REGMASK;
+ }
+
+ return static_cast<UINT16>(ofs);
+}
+
+#else // Portable default implementation
+
+// Helpers used when calculating shuffle array entries in GenerateShuffleArray below.
+
+// Return true if the current argument still has slots left to shuffle in general registers or on the stack
+// (currently we never shuffle floating point registers since there's no need).
+static bool AnythingToShuffle(ArgLocDesc * pArg)
+{
+ return (pArg->m_cGenReg > 0) || (pArg->m_cStack > 0);
+}
+
+// Return an encoded shuffle entry describing a general register or stack offset that needs to be shuffled.
+static UINT16 ShuffleOfs(ArgLocDesc * pArg)
+{
+ // Shuffle any registers first (the order matters since otherwise we could end up shuffling a stack slot
+ // over a register we later need to shuffle down as well).
+ if (pArg->m_cGenReg > 0)
+ {
+ pArg->m_cGenReg--;
+ return (UINT16)(ShuffleEntry::REGMASK | pArg->m_idxGenReg++);
+ }
+
+ // If we get here we must have at least one stack slot left to shuffle (this method should only be called
+ // when AnythingToShuffle(pArg) == true).
+ _ASSERTE(pArg->m_cStack > 0);
+ pArg->m_cStack--;
+
+ // Delegates cannot handle overly large argument stacks due to shuffle entry encoding limitations.
+ if (pArg->m_idxStack >= ShuffleEntry::REGMASK)
+ COMPlusThrow(kNotSupportedException);
+
+ return (UINT16)(pArg->m_idxStack++);
+}
+
+#endif
+
+VOID GenerateShuffleArray(MethodDesc* pInvoke, MethodDesc *pTargetMeth, SArray<ShuffleEntry> * pShuffleEntryArray)
+{
+ STANDARD_VM_CONTRACT;
+
+ ShuffleEntry entry;
+ ZeroMemory(&entry, sizeof(entry));
+
+#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)
+ MetaSig msig(pInvoke);
+ ArgIterator argit(&msig);
+
+ if (argit.HasRetBuffArg())
+ {
+ if (!pTargetMeth->IsStatic())
+ {
+ // Use ELEMENT_TYPE_END to signal the special handling required by
+ // instance method with return buffer. "this" needs to come from
+ // the first argument.
+ entry.argtype = ELEMENT_TYPE_END;
+ pShuffleEntryArray->Append(entry);
+
+ msig.NextArgNormalized();
+ }
+ else
+ {
+ entry.argtype = ELEMENT_TYPE_PTR;
+ pShuffleEntryArray->Append(entry);
+ }
+ }
+
+ CorElementType sigType;
+
+ while ((sigType = msig.NextArgNormalized()) != ELEMENT_TYPE_END)
+ {
+ ZeroMemory(&entry, sizeof(entry));
+ entry.argtype = sigType;
+ pShuffleEntryArray->Append(entry);
+ }
+
+ ZeroMemory(&entry, sizeof(entry));
+ entry.srcofs = ShuffleEntry::SENTINEL;
+ pShuffleEntryArray->Append(entry);
+
+#elif defined(_TARGET_X86_)
+ // Must create independent msigs to prevent the argiterators from
+ // interfering with other.
+ MetaSig sSigSrc(pInvoke);
+ MetaSig sSigDst(pTargetMeth);
+
+ _ASSERTE(sSigSrc.HasThis());
+
+ ArgIterator sArgPlacerSrc(&sSigSrc);
+ ArgIterator sArgPlacerDst(&sSigDst);
+
+ UINT stackSizeSrc = sArgPlacerSrc.SizeOfArgStack();
+ UINT stackSizeDst = sArgPlacerDst.SizeOfArgStack();
+
+ if (stackSizeDst > stackSizeSrc)
+ {
+ // we can drop arguments but we can never make them up - this is definitely not allowed
+ COMPlusThrow(kVerificationException);
+ }
+
+ UINT stackSizeDelta = stackSizeSrc - stackSizeDst;
+
+ INT ofsSrc, ofsDst;
+
+ // if the function is non static we need to place the 'this' first
+ if (!pTargetMeth->IsStatic())
+ {
+ entry.srcofs = ShuffleOfs(sArgPlacerSrc.GetNextOffset());
+ entry.dstofs = ShuffleEntry::REGMASK | 4;
+ pShuffleEntryArray->Append(entry);
+ }
+ else if (sArgPlacerSrc.HasRetBuffArg())
+ {
+ // the first register is used for 'this'
+ entry.srcofs = ShuffleOfs(sArgPlacerSrc.GetRetBuffArgOffset());
+ entry.dstofs = ShuffleOfs(sArgPlacerDst.GetRetBuffArgOffset(), stackSizeDelta);
+ if (entry.srcofs != entry.dstofs)
+ pShuffleEntryArray->Append(entry);
+ }
+
+ while (TransitionBlock::InvalidOffset != (ofsSrc = sArgPlacerSrc.GetNextOffset()))
+ {
+ ofsDst = sArgPlacerDst.GetNextOffset();
+
+ int cbSize = sArgPlacerDst.GetArgSize();
+
+ do
+ {
+ entry.srcofs = ShuffleOfs(ofsSrc);
+ entry.dstofs = ShuffleOfs(ofsDst, stackSizeDelta);
+
+ ofsSrc += STACK_ELEM_SIZE;
+ ofsDst += STACK_ELEM_SIZE;
+
+ if (entry.srcofs != entry.dstofs)
+ pShuffleEntryArray->Append(entry);
+
+ cbSize -= STACK_ELEM_SIZE;
+ }
+ while (cbSize > 0);
+ }
+
+ if (stackSizeDelta != 0)
+ {
+ // Emit code to move the return address
+ entry.srcofs = 0; // retaddress is assumed to be at esp
+ entry.dstofs = static_cast<UINT16>(stackSizeDelta);
+ pShuffleEntryArray->Append(entry);
+ }
+
+ entry.srcofs = ShuffleEntry::SENTINEL;
+ entry.dstofs = static_cast<UINT16>(stackSizeDelta);
+ pShuffleEntryArray->Append(entry);
+
+#else // Portable default implementation
+ MetaSig sSigSrc(pInvoke);
+ MetaSig sSigDst(pTargetMeth);
+
+ // Initialize helpers that determine how each argument for the source and destination signatures is placed
+ // in registers or on the stack.
+ ArgIterator sArgPlacerSrc(&sSigSrc);
+ ArgIterator sArgPlacerDst(&sSigDst);
+
+ INT ofsSrc;
+ INT ofsDst;
+ ArgLocDesc sArgSrc;
+ ArgLocDesc sArgDst;
+
+ // If the target method in non-static (this happens for open instance delegates), we need to account for
+ // the implicit this parameter.
+ if (sSigDst.HasThis())
+ {
+ // The this pointer is an implicit argument for the destination signature. But on the source side it's
+ // just another regular argument and needs to be iterated over by sArgPlacerSrc and the MetaSig.
+ sArgPlacerSrc.GetArgLoc(sArgPlacerSrc.GetNextOffset(), &sArgSrc);
+
+ sArgPlacerSrc.GetThisLoc(&sArgDst);
+
+ entry.srcofs = ShuffleOfs(&sArgSrc);
+ entry.dstofs = ShuffleOfs(&sArgDst);
+
+ pShuffleEntryArray->Append(entry);
+ }
+
+ // Handle any return buffer argument.
+ if (sArgPlacerDst.HasRetBuffArg())
+ {
+ // The return buffer argument is implicit in both signatures.
+
+ sArgPlacerSrc.GetRetBuffArgLoc(&sArgSrc);
+ sArgPlacerDst.GetRetBuffArgLoc(&sArgDst);
+
+ entry.srcofs = ShuffleOfs(&sArgSrc);
+ entry.dstofs = ShuffleOfs(&sArgDst);
+
+ // Depending on the type of target method (static vs instance) the return buffer argument may end up
+ // in the same register in both signatures. So we only commit the entry (by moving the entry pointer
+ // along) in the case where it's not a no-op (i.e. the source and destination ops are different).
+ if (entry.srcofs != entry.dstofs)
+ pShuffleEntryArray->Append(entry);
+ }
+
+ // Iterate all the regular arguments. mapping source registers and stack locations to the corresponding
+ // destination locations.
+ while ((ofsSrc = sArgPlacerSrc.GetNextOffset()) != TransitionBlock::InvalidOffset)
+ {
+ ofsDst = sArgPlacerDst.GetNextOffset();
+
+ // Find the argument location mapping for both source and destination signature. A single argument can
+ // occupy a floating point register (in which case we don't need to do anything, they're not shuffled)
+ // or some combination of general registers and the stack.
+ sArgPlacerSrc.GetArgLoc(ofsSrc, &sArgSrc);
+ sArgPlacerDst.GetArgLoc(ofsDst, &sArgDst);
+
+ // Shuffle each slot in the argument (register or stack slot) from source to destination.
+ while (AnythingToShuffle(&sArgSrc))
+ {
+ // Locate the next slot to shuffle in the source and destination and encode the transfer into a
+ // shuffle entry.
+ entry.srcofs = ShuffleOfs(&sArgSrc);
+ entry.dstofs = ShuffleOfs(&sArgDst);
+
+ // Only emit this entry if it's not a no-op (i.e. the source and destination locations are
+ // different).
+ if (entry.srcofs != entry.dstofs)
+ pShuffleEntryArray->Append(entry);
+ }
+
+ // We should have run out of slots to shuffle in the destination at the same time as the source.
+ _ASSERTE(!AnythingToShuffle(&sArgDst));
+ }
+
+ entry.srcofs = ShuffleEntry::SENTINEL;
+ entry.dstofs = 0;
+ pShuffleEntryArray->Append(entry);
+#endif
+}
+
+
+class ShuffleThunkCache : public StubCacheBase
+{
+private:
+ //---------------------------------------------------------
+ // Compile a static delegate shufflethunk. Always returns
+ // STANDALONE since we don't interpret these things.
+ //---------------------------------------------------------
+ virtual void CompileStub(const BYTE *pRawStub,
+ StubLinker *pstublinker)
+ {
+ STANDARD_VM_CONTRACT;
+
+ ((CPUSTUBLINKER*)pstublinker)->EmitShuffleThunk((ShuffleEntry*)pRawStub);
+ }
+
+ //---------------------------------------------------------
+ // Tells the StubCacheBase the length of a ShuffleEntryArray.
+ //---------------------------------------------------------
+ virtual UINT Length(const BYTE *pRawStub)
+ {
+ LIMITED_METHOD_CONTRACT;
+ ShuffleEntry *pse = (ShuffleEntry*)pRawStub;
+ while (pse->srcofs != ShuffleEntry::SENTINEL)
+ {
+ pse++;
+ }
+ return sizeof(ShuffleEntry) * (UINT)(1 + (pse - (ShuffleEntry*)pRawStub));
+ }
+
+ virtual void AddStub(const BYTE* pRawStub, Stub* pNewStub)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef CROSSGEN_COMPILE
+ DelegateInvokeStubManager::g_pManager->AddStub(pNewStub);
+#endif
+ }
+};
+
+ShuffleThunkCache *COMDelegate::m_pShuffleThunkCache = NULL;
+MulticastStubCache *COMDelegate::m_pSecureDelegateStubCache = NULL;
+MulticastStubCache *COMDelegate::m_pMulticastStubCache = NULL;
+
+CrstStatic COMDelegate::s_DelegateToFPtrHashCrst;
+PtrHashMap* COMDelegate::s_pDelegateToFPtrHash = NULL;
+
+
+// One time init.
+void COMDelegate::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ s_DelegateToFPtrHashCrst.Init(CrstDelegateToFPtrHash, CRST_UNSAFE_ANYMODE);
+
+ s_pDelegateToFPtrHash = ::new PtrHashMap();
+
+ LockOwner lock = {&COMDelegate::s_DelegateToFPtrHashCrst, IsOwnerOfCrst};
+ s_pDelegateToFPtrHash->Init(TRUE, &lock);
+
+ m_pShuffleThunkCache = new ShuffleThunkCache();
+ m_pMulticastStubCache = new MulticastStubCache();
+ m_pSecureDelegateStubCache = new MulticastStubCache();
+}
+
+#ifdef FEATURE_COMINTEROP
+ComPlusCallInfo * COMDelegate::PopulateComPlusCallInfo(MethodTable * pDelMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ DelegateEEClass * pClass = (DelegateEEClass *)pDelMT->GetClass();
+
+ // set up the ComPlusCallInfo if it does not exist already
+ if (pClass->m_pComPlusCallInfo == NULL)
+ {
+ LoaderHeap *pHeap = pDelMT->GetLoaderAllocator()->GetHighFrequencyHeap();
+ ComPlusCallInfo *pTemp = (ComPlusCallInfo *)(void *)pHeap->AllocMem(S_SIZE_T(sizeof(ComPlusCallInfo)));
+
+ pTemp->m_cachedComSlot = ComMethodTable::GetNumExtraSlots(ifVtable);
+ pTemp->InitStackArgumentSize();
+
+ InterlockedCompareExchangeT(EnsureWritablePages(&pClass->m_pComPlusCallInfo), pTemp, NULL);
+ }
+
+ *EnsureWritablePages(&pClass->m_pComPlusCallInfo->m_pInterfaceMT) = pDelMT;
+
+ return pClass->m_pComPlusCallInfo;
+}
+#endif // FEATURE_COMINTEROP
+
+// We need a LoaderHeap that lives at least as long as the DelegateEEClass, but ideally no longer
+LoaderHeap *DelegateEEClass::GetStubHeap()
+{
+ return m_pInvokeMethod->GetLoaderAllocator()->GetStubHeap();
+}
+
+
+Stub* COMDelegate::SetupShuffleThunk(MethodTable * pDelMT, MethodDesc *pTargetMeth)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+
+ DelegateEEClass * pClass = (DelegateEEClass *)pDelMT->GetClass();
+
+ MethodDesc *pMD = pClass->m_pInvokeMethod;
+
+ StackSArray<ShuffleEntry> rShuffleEntryArray;
+ GenerateShuffleArray(pMD, pTargetMeth, &rShuffleEntryArray);
+
+ Stub* pShuffleThunk = m_pShuffleThunkCache->Canonicalize((const BYTE *)&rShuffleEntryArray[0]);
+ if (!pShuffleThunk)
+ {
+ COMPlusThrowOM();
+ }
+
+ g_IBCLogger.LogEEClassCOWTableAccess(pDelMT);
+
+ EnsureWritablePages(pClass);
+
+ if (!pTargetMeth->IsStatic() && pTargetMeth->HasRetBuffArg())
+ {
+ if (FastInterlockCompareExchangePointer(&pClass->m_pInstRetBuffCallStub, pShuffleThunk, NULL ) != NULL)
+ {
+ pShuffleThunk->DecRef();
+ pShuffleThunk = pClass->m_pInstRetBuffCallStub;
+ }
+ }
+ else
+ {
+ if (FastInterlockCompareExchangePointer(&pClass->m_pStaticCallStub, pShuffleThunk, NULL ) != NULL)
+ {
+ pShuffleThunk->DecRef();
+ pShuffleThunk = pClass->m_pStaticCallStub;
+ }
+ }
+
+ return pShuffleThunk;
+}
+
+
+#ifndef CROSSGEN_COMPILE
+
+FCIMPL5(FC_BOOL_RET, COMDelegate::BindToMethodName,
+ Object *refThisUNSAFE,
+ Object *targetUNSAFE,
+ ReflectClassBaseObject *pMethodTypeUNSAFE,
+ StringObject* methodNameUNSAFE,
+ int flags)
+{
+ FCALL_CONTRACT;
+
+ struct _gc
+ {
+ DELEGATEREF refThis;
+ OBJECTREF target;
+ STRINGREF methodName;
+ REFLECTCLASSBASEREF refMethodType;
+ } gc;
+
+ gc.refThis = (DELEGATEREF) ObjectToOBJECTREF(refThisUNSAFE);
+ gc.target = (OBJECTREF) targetUNSAFE;
+ gc.methodName = (STRINGREF) methodNameUNSAFE;
+ gc.refMethodType = (REFLECTCLASSBASEREF) ObjectToOBJECTREF(pMethodTypeUNSAFE);
+
+ TypeHandle methodType = gc.refMethodType->GetType();
+
+ //We should thrown an exception if the assembly doesn't have run access.
+ //That would be a breaking change from V2.
+ //
+ //Assembly *pAssem = methodType.GetAssembly();
+ //if (pAssem->IsDynamic() && !pAssem->HasRunAccess())
+ // FCThrowRes(kNotSupportedException, W("NotSupported_DynamicAssemblyNoRunAccess"));
+
+ MethodDesc *pMatchingMethod = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ // Caching of MethodDescs (impl and decl) for MethodTable slots provided significant
+ // performance gain in some reflection emit scenarios.
+ MethodTable::AllowMethodDataCaching();
+
+#ifdef FEATURE_LEGACYNETCF
+ // NetCF has done relaxed signature matching unconditionally
+ if (GetAppDomain()->GetAppDomainCompatMode() == BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8)
+ flags |= DBF_RelaxedSignature;
+#endif
+
+ TypeHandle targetType((gc.target != NULL) ? gc.target->GetTrueMethodTable() : NULL);
+ // get the invoke of the delegate
+ MethodTable * pDelegateType = gc.refThis->GetMethodTable();
+ MethodDesc* pInvokeMeth = COMDelegate::FindDelegateInvokeMethod(pDelegateType);
+ _ASSERTE(pInvokeMeth);
+
+ //
+ // now loop through the methods looking for a match
+ //
+
+ // get the name in UTF8 format
+ SString wszName(SString::Literal, gc.methodName->GetBuffer());
+ StackScratchBuffer utf8Name;
+ LPCUTF8 szNameStr = wszName.GetUTF8(utf8Name);
+
+ // pick a proper compare function
+ typedef int (__cdecl *UTF8StringCompareFuncPtr)(const char *, const char *);
+ UTF8StringCompareFuncPtr StrCompFunc = (flags & DBF_CaselessMatching) ? stricmpUTF8 : strcmp;
+
+ // search the type hierarchy
+ MethodTable *pMTOrig = methodType.GetMethodTable()->GetCanonicalMethodTable();
+ for (MethodTable *pMT = pMTOrig; pMT != NULL; pMT = pMT->GetParentMethodTable())
+ {
+ MethodTable::MethodIterator it(pMT);
+ it.MoveToEnd();
+ for (; it.IsValid() && (pMT == pMTOrig || !it.IsVirtual()); it.Prev())
+ {
+ MethodDesc *pCurMethod = it.GetDeclMethodDesc();
+
+ // We can't match generic methods (since no instantiation information has been provided).
+ if (pCurMethod->IsGenericMethodDefinition())
+ continue;
+
+ if ((pCurMethod != NULL) && (StrCompFunc(szNameStr, pCurMethod->GetName()) == 0))
+ {
+ // found a matching string, get an associated method desc if needed
+ // Use unboxing stubs for instance and virtual methods on value types.
+ // If this is a open delegate to an instance method BindToMethod will rebind it to the non-unboxing method.
+ // Open delegate
+ // Static: never use unboxing stub
+ // BindToMethodInfo/Name will bind to the non-unboxing stub. BindToMethod will reinforce that.
+ // Instance: We only support binding to an unboxed value type reference here, so we must never use an unboxing stub
+ // BindToMethodInfo/Name will bind to the unboxing stub. BindToMethod will rebind to the non-unboxing stub.
+ // Virtual: trivial (not allowed)
+ // Closed delegate
+ // Static: never use unboxing stub
+ // BindToMethodInfo/Name will bind to the non-unboxing stub.
+ // Instance: always use unboxing stub
+ // BindToMethodInfo/Name will bind to the unboxing stub.
+ // Virtual: always use unboxing stub
+ // BindToMethodInfo/Name will bind to the unboxing stub.
+
+ pCurMethod =
+ MethodDesc::FindOrCreateAssociatedMethodDesc(pCurMethod,
+ methodType.GetMethodTable(),
+ (!pCurMethod->IsStatic() && pCurMethod->GetMethodTable()->IsValueType()),
+ pCurMethod->GetMethodInstantiation(),
+ false /* do not allow code with a shared-code calling convention to be returned */,
+ true /* Ensure that methods on generic interfaces are returned as instantiated method descs */);
+ BOOL fIsOpenDelegate;
+ if (!COMDelegate::IsMethodDescCompatible((gc.target == NULL) ? TypeHandle() : gc.target->GetTrueTypeHandle(),
+ methodType,
+ pCurMethod,
+ gc.refThis->GetTypeHandle(),
+ pInvokeMeth,
+ flags,
+ &fIsOpenDelegate))
+ {
+ // Signature doesn't match, skip.
+ continue;
+ }
+
+ if (!COMDelegate::ValidateSecurityTransparency(pCurMethod, gc.refThis->GetTypeHandle().AsMethodTable()))
+ {
+ // violates security transparency rules, skip.
+ continue;
+ }
+
+ // Found the target that matches the signature and satisfies security transparency rules
+ // Initialize the delegate to point to the target method.
+ BindToMethod(&gc.refThis,
+ &gc.target,
+ pCurMethod,
+ methodType.GetMethodTable(),
+ fIsOpenDelegate,
+ TRUE);
+
+ pMatchingMethod = pCurMethod;
+ goto done;
+ }
+ }
+ }
+ done:
+ ;
+ HELPER_METHOD_FRAME_END();
+
+ FC_RETURN_BOOL(pMatchingMethod != NULL);
+}
+FCIMPLEND
+
+
+FCIMPL5(FC_BOOL_RET, COMDelegate::BindToMethodInfo, Object* refThisUNSAFE, Object* targetUNSAFE, ReflectMethodObject *pMethodUNSAFE, ReflectClassBaseObject *pMethodTypeUNSAFE, int flags)
+{
+ FCALL_CONTRACT;
+
+ BOOL result = TRUE;
+
+ struct _gc
+ {
+ DELEGATEREF refThis;
+ OBJECTREF refFirstArg;
+ REFLECTCLASSBASEREF refMethodType;
+ REFLECTMETHODREF refMethod;
+ } gc;
+
+ gc.refThis = (DELEGATEREF) ObjectToOBJECTREF(refThisUNSAFE);
+ gc.refFirstArg = ObjectToOBJECTREF(targetUNSAFE);
+ gc.refMethodType = (REFLECTCLASSBASEREF) ObjectToOBJECTREF(pMethodTypeUNSAFE);
+ gc.refMethod = (REFLECTMETHODREF) ObjectToOBJECTREF(pMethodUNSAFE);
+
+ MethodTable *pMethMT = gc.refMethodType->GetType().GetMethodTable();
+ MethodDesc *method = gc.refMethod->GetMethod();
+
+ //We should thrown an exception if the assembly doesn't have run access.
+ //That would be a breaking change from V2.
+ //
+ //Assembly *pAssem = pMethMT->GetAssembly();
+ //if (pAssem->IsDynamic() && !pAssem->HasRunAccess())
+ // FCThrowRes(kNotSupportedException, W("NotSupported_DynamicAssemblyNoRunAccess"));
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ // Assert to track down VS#458689.
+ _ASSERTE(gc.refThis != gc.refFirstArg);
+
+ // A generic method had better be instantiated (we can't dispatch to an uninstantiated one).
+ if (method->IsGenericMethodDefinition())
+ COMPlusThrow(kArgumentException, W("Arg_DlgtTargMeth"));
+
+ // get the invoke of the delegate
+ MethodTable * pDelegateType = gc.refThis->GetMethodTable();
+ MethodDesc* pInvokeMeth = COMDelegate::FindDelegateInvokeMethod(pDelegateType);
+ _ASSERTE(pInvokeMeth);
+
+ // See the comment in BindToMethodName
+ method =
+ MethodDesc::FindOrCreateAssociatedMethodDesc(method,
+ pMethMT,
+ (!method->IsStatic() && pMethMT->IsValueType()),
+ method->GetMethodInstantiation(),
+ false /* do not allow code with a shared-code calling convention to be returned */,
+ true /* Ensure that methods on generic interfaces are returned as instantiated method descs */);
+
+ BOOL fIsOpenDelegate;
+ if (COMDelegate::IsMethodDescCompatible((gc.refFirstArg == NULL) ? TypeHandle() : gc.refFirstArg->GetTrueTypeHandle(),
+ TypeHandle(pMethMT),
+ method,
+ gc.refThis->GetTypeHandle(),
+ pInvokeMeth,
+ flags,
+ &fIsOpenDelegate) &&
+ COMDelegate::ValidateSecurityTransparency(method, gc.refThis->GetTypeHandle().AsMethodTable()) )
+ {
+ // Initialize the delegate to point to the target method.
+ BindToMethod(&gc.refThis,
+ &gc.refFirstArg,
+ method,
+ pMethMT,
+ fIsOpenDelegate,
+ !(flags & DBF_SkipSecurityChecks));
+ }
+ else
+ result = FALSE;
+
+ HELPER_METHOD_FRAME_END();
+
+ FC_RETURN_BOOL(result);
+}
+FCIMPLEND
+
+// This method is called (in the late bound case only) once a target method has been decided on. All the consistency checks
+// (signature matching etc.) have been done at this point and the only major reason we could fail now is on security grounds
+// (someone trying to create a delegate over a method that's not visible to them for instance). This method will initialize the
+// delegate (wrapping it in a secure delegate if necessary). Upon return the delegate should be ready for invocation.
+void COMDelegate::BindToMethod(DELEGATEREF *pRefThis,
+ OBJECTREF *pRefFirstArg,
+ MethodDesc *pTargetMethod,
+ MethodTable *pExactMethodType,
+ BOOL fIsOpenDelegate,
+ BOOL fCheckSecurity)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pRefThis));
+ PRECONDITION(CheckPointer(pRefFirstArg, NULL_OK));
+ PRECONDITION(CheckPointer(pTargetMethod));
+ PRECONDITION(CheckPointer(pExactMethodType));
+ }
+ CONTRACTL_END;
+
+ // We might have to wrap the delegate in a secure delegate depending on the location of the target method. The following local
+ // keeps track of the real (i.e. non-secure) delegate whether or not this is required.
+ DELEGATEREF refRealDelegate = NULL;
+ GCPROTECT_BEGIN(refRealDelegate);
+
+ // Security checks (i.e. whether the creator of the delegate is allowed to access the target method) are the norm. They are only
+ // disabled when:
+ // 1. this is called by deserialization to recreate an existing delegate instance, where such checks are unwarranted.
+ // 2. this is called from DynamicMethod.CreateDelegate which doesn't need access check.
+ if (fCheckSecurity)
+ {
+ MethodTable *pInstanceMT = pExactMethodType;
+ bool targetPossiblyRemoted = false;
+
+ if (fIsOpenDelegate)
+ {
+ _ASSERTE(pRefFirstArg == NULL || *pRefFirstArg == NULL);
+
+#ifdef FEATURE_REMOTING
+ if (!pTargetMethod->IsStatic())
+ {
+ // Open-instance delegate may have remoted target if the method is declared by
+ // an interface, by a type deriving from MarshalByRefObject, or by System.Object.
+ // The following condition is necessary but not sufficient as it's always possible
+ // to invoke the delegate on a local instance. Precise check would require doing
+ // the check at invocation time. We are secure because we demand MemberAccess when
+ // there is a possibility that the invocation will be remote.
+ //
+ MethodTable *pMT = pTargetMethod->GetMethodTable();
+ targetPossiblyRemoted = (pMT == g_pObjectClass || pMT->IsInterface() || pMT->IsMarshaledByRef());
+ }
+#endif // FEATURE_REMOTING
+ }
+ else
+ {
+ // closed-static is OK and we can check the target in the closed-instance case
+ pInstanceMT = (*pRefFirstArg == NULL ? NULL : (*pRefFirstArg)->GetMethodTable());
+#ifdef FEATURE_REMOTING
+ targetPossiblyRemoted = InvokeUtil::IsTargetRemoted(pTargetMethod, pInstanceMT);
+#endif
+ }
+
+ RefSecContext sCtx(InvokeUtil::GetInvocationAccessCheckType(targetPossiblyRemoted));
+
+ // Check visibility of the target method. If it's an instance method, we have to pass the type
+ // of the instance being accessed which we get from the first argument or from the method itself.
+ // The type of the instance is necessary for visibility checks of protected methods.
+ InvokeUtil::CheckAccessMethod(&sCtx,
+ pExactMethodType,
+ pTargetMethod->IsStatic() ? NULL : pInstanceMT,
+ pTargetMethod);
+
+ // Trip any link demands the target method requires.
+ InvokeUtil::CheckLinktimeDemand(&sCtx,
+ pTargetMethod);
+
+ // Ask for skip verification if a delegate over a .ctor or .cctor is requested.
+ if (pTargetMethod->IsClassConstructorOrCtor())
+ Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_SKIP_VER);
+
+#ifdef FEATURE_COMINTEROP
+ // Check if it's a COM object and if so, demand unmanaged code permission.
+ // <TODO> I think we need a target check here. Investigate. </TODO>
+ if (pTargetMethod && pTargetMethod->GetMethodTable()->IsComObjectType())
+ Security::SpecialDemand(SSWT_DEMAND_FROM_NATIVE, SECURITY_UNMANAGED_CODE);
+#endif // FEATURE_COMINTEROP
+
+ // Devdiv bug 296229: dangerous methods are those that make security decisions based on
+ // the result of stack walks. When a delegate to such a method is invoked asynchronously
+ // the stackwalker will stop at the remoting code and consider the caller unmanaged code.
+ // Unmanaged code is allowed to bypass any security check.
+ if (InvokeUtil::IsDangerousMethod(pTargetMethod))
+ Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, REFLECTION_MEMBER_ACCESS);
+
+ // Check whether the creator of the delegate lives in the same assembly as the target method. If not, and they aren't fully
+ // trusted, we have to make this delegate a secure wrapper and allocate a new inner delegate to represent the real target.
+ MethodDesc *pCreatorMethod = sCtx.GetCallerMethod();
+ if (NeedsSecureDelegate(pCreatorMethod, sCtx.GetCallerDomain(), pTargetMethod))
+ refRealDelegate = CreateSecureDelegate(*pRefThis, pCreatorMethod, pTargetMethod);
+ }
+
+ // If we didn't wrap the real delegate in a secure delegate then the real delegate is the one passed in.
+ if (refRealDelegate == NULL)
+ {
+ if (NeedsWrapperDelegate(pTargetMethod))
+ refRealDelegate = CreateSecureDelegate(*pRefThis, NULL, pTargetMethod);
+ else
+ refRealDelegate = *pRefThis;
+ }
+
+ pTargetMethod->EnsureActive();
+
+ if (fIsOpenDelegate)
+ {
+ _ASSERTE(pRefFirstArg == NULL || *pRefFirstArg == NULL);
+
+ // Open delegates use themselves as the target (which handily allows their shuffle thunks to locate additional data at
+ // invocation time).
+ refRealDelegate->SetTarget(refRealDelegate);
+
+ // We need to shuffle arguments for open delegates since the first argument on the calling side is not meaningful to the
+ // callee.
+ MethodTable * pDelegateMT = (*pRefThis)->GetMethodTable();
+ DelegateEEClass *pDelegateClass = (DelegateEEClass*)pDelegateMT->GetClass();
+ Stub *pShuffleThunk = NULL;
+
+ // Look for a thunk cached on the delegate class first. Note we need a different thunk for instance methods with a
+ // hidden return buffer argument because the extra argument switches place with the target when coming from the caller.
+ if (!pTargetMethod->IsStatic() && pTargetMethod->HasRetBuffArg())
+ pShuffleThunk = pDelegateClass->m_pInstRetBuffCallStub;
+ else
+ pShuffleThunk = pDelegateClass->m_pStaticCallStub;
+
+ // If we haven't already setup a shuffle thunk go do it now (which will cache the result automatically).
+ if (!pShuffleThunk)
+ pShuffleThunk = SetupShuffleThunk(pDelegateMT, pTargetMethod);
+
+ // Indicate that the delegate will jump to the shuffle thunk rather than directly to the target method.
+ refRealDelegate->SetMethodPtr(pShuffleThunk->GetEntryPoint());
+
+ // Use stub dispatch for all virtuals.
+ // <TODO> Investigate not using this for non-interface virtuals. </TODO>
+ // The virtual dispatch stub doesn't work on unboxed value type objects which don't have MT pointers.
+ // Since open instance delegates on value type methods require unboxed objects we cannot use the
+ // virtual dispatch stub for them. On the other hand, virtual methods on value types don't need
+ // to be dispatched because value types cannot be derived. So we treat them like non-virtual methods.
+ if (pTargetMethod->IsVirtual() && !pTargetMethod->GetMethodTable()->IsValueType())
+ {
+ // Since this is an open delegate over a virtual method we cannot virtualize the call target now. So the shuffle thunk
+ // needs to jump to another stub (this time provided by the VirtualStubManager) that will virtualize the call at
+ // runtime.
+ PCODE pTargetCall = GetVirtualCallStub(pTargetMethod, TypeHandle(pExactMethodType));
+ refRealDelegate->SetMethodPtrAux(pTargetCall);
+ refRealDelegate->SetInvocationCount((INT_PTR)(void *)pTargetMethod);
+ }
+ else
+ {
+ // <TODO> If VSD isn't compiled in this gives the wrong result for virtuals (we need run time virtualization). </TODO>
+ // Reflection or the code in BindToMethodName will pass us the unboxing stub for non-static methods on value types. But
+ // for open invocation on value type methods the actual reference will be passed so we need the unboxed method desc
+ // instead.
+ if (pTargetMethod->IsUnboxingStub())
+ {
+ // We want a MethodDesc which is not an unboxing stub, but is an instantiating stub if needed.
+ pTargetMethod = MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pTargetMethod,
+ pExactMethodType,
+ FALSE /* don't want unboxing entry point */,
+ pTargetMethod->GetMethodInstantiation(),
+ FALSE /* don't want MD that requires inst. arguments */,
+ true /* Ensure that methods on generic interfaces are returned as instantiated method descs */);
+ }
+
+ // The method must not require any extra hidden instantiation arguments.
+ _ASSERTE(!pTargetMethod->RequiresInstArg());
+
+ // Note that it is important to cache pTargetCode in local variable to avoid GC hole.
+ // GetMultiCallableAddrOfCode() can trigger GC.
+ PCODE pTargetCode = pTargetMethod->GetMultiCallableAddrOfCode();
+ refRealDelegate->SetMethodPtrAux(pTargetCode);
+ }
+ }
+ else
+ {
+ PCODE pTargetCode = NULL;
+
+ // For virtual methods we can (and should) virtualize the call now (so we don't have to insert a thunk to do so at runtime).
+ // <TODO>
+ // Remove the following if we decide we won't cope with this case on late bound.
+ // We can get virtual delegates closed over null through this code path, so be careful to handle that case (no need to
+ // virtualize since we're just going to throw NullRefException at invocation time).
+ // </TODO>
+ if (pTargetMethod->IsVirtual() &&
+ *pRefFirstArg != NULL &&
+ pTargetMethod->GetMethodTable() != (*pRefFirstArg)->GetMethodTable())
+ pTargetCode = pTargetMethod->GetMultiCallableAddrOfVirtualizedCode(pRefFirstArg, pTargetMethod->GetMethodTable());
+ else
+#ifdef HAS_THISPTR_RETBUF_PRECODE
+ if (pTargetMethod->IsStatic() && pTargetMethod->HasRetBuffArg())
+ pTargetCode = pTargetMethod->GetLoaderAllocatorForCode()->GetFuncPtrStubs()->GetFuncPtrStub(pTargetMethod, PRECODE_THISPTR_RETBUF);
+ else
+#endif // HAS_THISPTR_RETBUF_PRECODE
+ pTargetCode = pTargetMethod->GetMultiCallableAddrOfCode();
+ _ASSERTE(pTargetCode);
+
+ refRealDelegate->SetTarget(*pRefFirstArg);
+ refRealDelegate->SetMethodPtr(pTargetCode);
+ }
+
+ LoaderAllocator *pLoaderAllocator = pTargetMethod->GetLoaderAllocator();
+
+ if (pLoaderAllocator->IsCollectible())
+ refRealDelegate->SetMethodBase(pLoaderAllocator->GetExposedObject());
+
+ GCPROTECT_END();
+}
+
+#ifdef FEATURE_CORECLR
+// On the CoreCLR, we don't allow non-fulltrust delegates to be marshaled out (or created: CorHost::CreateDelegate ensures that)
+// This helper function checks if we have a full-trust delegate with AllowReversePInvokeCallsAttribute targets.
+BOOL COMDelegate::IsFullTrustDelegate(DELEGATEREF pDelegate)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_WINDOWSPHONE
+ // we always allow reverse p/invokes on the phone. The OS provides the sandbox.
+ return TRUE;
+#else
+ if (IsSecureDelegate(pDelegate))
+ {
+ // A secure delegate implies => creator and target are different, and creator is not fully-trusted
+ return FALSE;
+ }
+ else
+ {
+ // Suffices to look at the target assembly and check if that is fully-trusted.
+ // if creator is same as target, we're done.
+ // if creator is not same as target, then the only interesting case is when it's not FT,
+ // and that's captured by the SecureDelegate case above.
+ // The target method yields the target assembly. Target method is not determinable for certain cases:
+ // - Open Virtual Delegates
+ // For those cases we play it safe and return FALSE from this function
+ if (pDelegate->GetInvocationCount() != 0)
+ {
+ // From MulticastDelegate.cs (MulticastDelegate.Equals):
+ // there are 4 kind of delegate kinds that fall into this bucket
+ // 1- Multicast (_invocationList is Object[])
+ // 2- Secure (_invocationList is Delegate)
+ // 3- Unmanaged FntPtr (_invocationList == null)
+ // 4- Open virtual (_invocationCount == MethodDesc of target)
+ // (_invocationList == null, or _invocationList is a LoaderAllocator or DynamicResolver)
+
+ OBJECTREF invocationList = pDelegate->GetInvocationList();
+ if (invocationList != NULL)
+ {
+
+ MethodTable *pMT;
+ pMT = invocationList->GetTrueMethodTable();
+ // Has to be a multicast delegate, or inner open virtual delegate of collectible secure delegate
+ // since we already checked for secure delegates above
+ _ASSERTE(!pMT->IsDelegate());
+
+ if (!pMT->IsArray())
+ {
+ // open Virtual delegate: conservatively return FALSE
+ return FALSE;
+ }
+
+ // Given a multicast delegate we walk the list and make sure all targets are FullTrust.
+ // Yes, this is a recursive call to IsFullTrustDelegate. But we should hit stackoverflow
+ // only for the same cases where invoking that delegate would hit stackoverflow.
+ PTRARRAYREF delegateArrayRef = (PTRARRAYREF) invocationList;
+
+ int numDelegates = delegateArrayRef->GetNumComponents();
+ for(int i = 0; i< numDelegates; i++)
+ {
+ DELEGATEREF innerDel = (DELEGATEREF)delegateArrayRef->GetAt(i);
+ _ASSERTE(innerDel->GetMethodTable()->IsDelegate());
+ if (!IsFullTrustDelegate(innerDel))
+ {
+ // If we find even one non full-trust target in the list, return FALSE
+ return FALSE;
+ }
+ }
+ // All targets in the multicast delegate are FullTrust, so this multicast delegate is
+ // also FullTrust
+ return TRUE;
+ }
+ else
+ {
+ if (pDelegate->GetInvocationCount() == DELEGATE_MARKER_UNMANAGEDFPTR)
+ {
+ // Delegate to unmanaged function pointer - FullTrust
+ return TRUE;
+ }
+
+ //
+ // open Virtual delegate: conservatively return FALSE
+ return FALSE;
+ }
+ }
+ // Regular delegate. Let's just look at the target Method
+ MethodDesc* pMD = GetMethodDesc((OBJECTREF)pDelegate);
+ if (pMD != NULL)
+ {
+ // The target must be decorated with AllowReversePInvokeCallsAttribute
+ if (!IsMethodAllowedToSinkReversePInvoke(pMD)) return FALSE;
+
+ return pMD->GetModule()->GetSecurityDescriptor()->IsFullyTrusted();
+ }
+ }
+ // Default:
+ return FALSE;
+#endif //FEATURE_WINDOWSPHONE
+}
+
+// Checks whether the method is decorated with AllowReversePInvokeCallsAttribute.
+BOOL COMDelegate::IsMethodAllowedToSinkReversePInvoke(MethodDesc *pMD)
+{
+ WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_WINDOWSPHONE
+ // we always allow reverse p/invokes on the phone. The OS provides the sandbox.
+ return TRUE;
+#else
+ return (S_OK == pMD->GetMDImport()->GetCustomAttributeByName(
+ pMD->GetMemberDef(),
+ "System.Runtime.InteropServices.AllowReversePInvokeCallsAttribute",
+ NULL,
+ NULL));
+#endif // FEATURE_WINDOWSPHONE
+}
+#endif // FEATURE_CORECLR
+
+// Marshals a delegate to a unmanaged callback.
+LPVOID COMDelegate::ConvertToCallback(OBJECTREF pDelegateObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ if (!pDelegateObj)
+ return NULL;
+
+ DELEGATEREF pDelegate = (DELEGATEREF) pDelegateObj;
+
+ PCODE pCode;
+ GCPROTECT_BEGIN(pDelegate);
+
+ MethodTable* pMT = pDelegate->GetMethodTable();
+ DelegateEEClass* pClass = (DelegateEEClass*)(pMT->GetClass());
+
+#ifdef FEATURE_CORECLR
+ // On the CoreCLR, we only allow marshaling out delegates that we can guarantee are full-trust delegates
+ if (!IsFullTrustDelegate(pDelegate))
+ {
+ StackSString strDelegateType;
+ TypeString::AppendType(strDelegateType, pMT, TypeString::FormatNamespace | TypeString::FormatAngleBrackets| TypeString::FormatSignature);
+ COMPlusThrow(kSecurityException, IDS_E_DELEGATE_FULLTRUST_ARPIC_1, strDelegateType.GetUnicode());
+ }
+#endif
+
+ if (pMT->HasInstantiation())
+ COMPlusThrowArgumentException(W("delegate"), W("Argument_NeedNonGenericType"));
+
+ if (pMT->Collectible())
+ COMPlusThrow(kNotSupportedException, W("NotSupported_CollectibleDelegateMarshal"));
+
+ // If we are a delegate originally created from an unmanaged function pointer, we will simply return
+ // that function pointer.
+ if (DELEGATE_MARKER_UNMANAGEDFPTR == pDelegate->GetInvocationCount())
+ {
+ pCode = pDelegate->GetMethodPtrAux();
+ }
+ else
+ {
+ UMEntryThunk* pUMEntryThunk = NULL;
+ SyncBlock* pSyncBlock = pDelegate->GetSyncBlock();
+
+ InteropSyncBlockInfo* pInteropInfo = pSyncBlock->GetInteropInfo();
+
+ pUMEntryThunk = (UMEntryThunk*)pInteropInfo->GetUMEntryThunk();
+
+ if (!pUMEntryThunk)
+ {
+
+ UMThunkMarshInfo *pUMThunkMarshInfo = pClass->m_pUMThunkMarshInfo;
+ MethodDesc *pInvokeMeth = FindDelegateInvokeMethod(pMT);
+
+ if (!pUMThunkMarshInfo)
+ {
+ GCX_PREEMP();
+
+ pUMThunkMarshInfo = new UMThunkMarshInfo();
+ pUMThunkMarshInfo->LoadTimeInit(pInvokeMeth);
+
+ g_IBCLogger.LogEEClassCOWTableAccess(pMT);
+ EnsureWritablePages(pClass);
+ if (FastInterlockCompareExchangePointer(&(pClass->m_pUMThunkMarshInfo),
+ pUMThunkMarshInfo,
+ NULL ) != NULL)
+ {
+ delete pUMThunkMarshInfo;
+ pUMThunkMarshInfo = pClass->m_pUMThunkMarshInfo;
+ }
+ }
+
+ _ASSERTE(pUMThunkMarshInfo != NULL);
+ _ASSERTE(pUMThunkMarshInfo == pClass->m_pUMThunkMarshInfo);
+
+ pUMEntryThunk = UMEntryThunk::CreateUMEntryThunk();
+ Holder<UMEntryThunk *, DoNothing, UMEntryThunk::FreeUMEntryThunk> umHolder;
+ umHolder.Assign(pUMEntryThunk);
+
+ // multicast. go thru Invoke
+ OBJECTHANDLE objhnd = GetAppDomain()->CreateLongWeakHandle(pDelegate);
+ _ASSERTE(objhnd != NULL);
+
+ // This target should not ever be used. We are storing it in the thunk for better diagnostics of "call on collected delegate" crashes.
+ PCODE pManagedTargetForDiagnostics = (pDelegate->GetMethodPtrAux() != NULL) ? pDelegate->GetMethodPtrAux() : pDelegate->GetMethodPtr();
+
+ // MethodDesc is passed in for profiling to know the method desc of target
+ pUMEntryThunk->LoadTimeInit(
+ pManagedTargetForDiagnostics,
+ objhnd,
+ pUMThunkMarshInfo, pInvokeMeth,
+ GetAppDomain()->GetId());
+
+#ifdef FEATURE_WINDOWSPHONE
+ // Perform the runtime initialization lazily for better startup time. Lazy initialization
+ // has worse diagnostic experience (the invalid marshaling directive exception is thrown
+ // lazily on the first call instead of during delegate creation), but it should be ok
+ // for CoreCLR on phone because of reverse p-invoke is for internal use only.
+#else
+ {
+ GCX_PREEMP();
+
+ pUMEntryThunk->RunTimeInit();
+ }
+#endif
+
+ if (!pInteropInfo->SetUMEntryThunk(pUMEntryThunk))
+ {
+ pUMEntryThunk = (UMEntryThunk*)pInteropInfo->GetUMEntryThunk();
+ }
+ else
+ {
+ umHolder.SuppressRelease();
+ // Insert the delegate handle / UMEntryThunk* into the hash
+ LPVOID key = (LPVOID)pUMEntryThunk;
+
+ // Assert that the entry isn't already in the hash.
+ _ASSERTE((LPVOID)INVALIDENTRY == COMDelegate::s_pDelegateToFPtrHash->LookupValue((UPTR)key, 0));
+
+ {
+ CrstHolder ch(&COMDelegate::s_DelegateToFPtrHashCrst);
+ COMDelegate::s_pDelegateToFPtrHash->InsertValue((UPTR)key, pUMEntryThunk->GetObjectHandle());
+ }
+ }
+
+ _ASSERTE(pUMEntryThunk != NULL);
+ _ASSERTE(pUMEntryThunk == (UMEntryThunk*)pInteropInfo->GetUMEntryThunk());
+
+ }
+ pCode = (PCODE)pUMEntryThunk->GetCode();
+ }
+
+ GCPROTECT_END();
+ return (LPVOID)pCode;
+}
+
+// Marshals an unmanaged callback to Delegate
+//static
+OBJECTREF COMDelegate::ConvertToDelegate(LPVOID pCallback, MethodTable* pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (!pCallback)
+ {
+ return NULL;
+ }
+
+ //////////////////////////////////////////////////////////////////////////////////////////////////////////
+ // Check if this callback was originally a managed method passed out to unmanaged code.
+ //
+
+ UMEntryThunk* pUMEntryThunk = NULL;
+
+#ifdef MDA_SUPPORTED
+ if (MDA_GET_ASSISTANT(InvalidFunctionPointerInDelegate))
+ {
+ EX_TRY
+ {
+ AVInRuntimeImplOkayHolder AVOkay;
+ pUMEntryThunk = UMEntryThunk::Decode(pCallback);
+ }
+ EX_CATCH
+ {
+ MDA_TRIGGER_ASSISTANT(InvalidFunctionPointerInDelegate, ReportViolation(pCallback));
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+ }
+ else
+#endif // MDA_SUPPORTED
+ {
+ pUMEntryThunk = UMEntryThunk::Decode(pCallback);
+ }
+
+ // Lookup the callsite in the hash, if found, we can map this call back to its managed function.
+ // Otherwise, we'll treat this as an unmanaged callsite.
+ // Make sure that the pointer doesn't have the value of 1 which is our hash table deleted item marker.
+ LPVOID DelegateHnd = (pUMEntryThunk != NULL) && ((UPTR)pUMEntryThunk != (UPTR)1)
+ ? COMDelegate::s_pDelegateToFPtrHash->LookupValue((UPTR)pUMEntryThunk, 0)
+ : (LPVOID)INVALIDENTRY;
+
+ if (DelegateHnd != (LPVOID)INVALIDENTRY)
+ {
+ // Found a managed callsite
+ OBJECTREF pDelegate = NULL;
+ GCPROTECT_BEGIN(pDelegate);
+
+ pDelegate = ObjectFromHandle((OBJECTHANDLE)DelegateHnd);
+
+ // Make sure we're not trying to sneak into another domain.
+ SyncBlock* pSyncBlock = pDelegate->GetSyncBlock();
+ _ASSERTE(pSyncBlock);
+
+ InteropSyncBlockInfo* pInteropInfo = pSyncBlock->GetInteropInfo();
+ _ASSERTE(pInteropInfo);
+
+ pUMEntryThunk = (UMEntryThunk*)pInteropInfo->GetUMEntryThunk();
+ _ASSERTE(pUMEntryThunk);
+
+ if (pUMEntryThunk->GetDomainId() != GetAppDomain()->GetId())
+ COMPlusThrow(kNotSupportedException, W("NotSupported_DelegateMarshalToWrongDomain"));
+
+#ifdef FEATURE_CORECLR
+ // On the CoreCLR, we only allow marshaling out delegates that we can guarantee are full-trust delegates
+ if (!IsFullTrustDelegate((DELEGATEREF)pDelegate))
+ {
+ COMPlusThrow(kSecurityException, IDS_E_DELEGATE_FULLTRUST_ARPIC_2);
+ }
+#endif
+
+ GCPROTECT_END();
+ return pDelegate;
+ }
+
+
+ //////////////////////////////////////////////////////////////////////////////////////////////////////////
+ // This is an unmanaged callsite. We need to create a new delegate.
+ //
+ // The delegate's invoke method will point to a call thunk.
+ // The call thunk will internally shuffle the args, set up a DelegateTransitionFrame, marshal the args,
+ // call the UM Function located at m_pAuxField, unmarshal the args, and return.
+ // Invoke -> CallThunk -> ShuffleThunk -> Frame -> Marshal -> Call AuxField -> UnMarshal
+
+ DelegateEEClass* pClass = (DelegateEEClass*)pMT->GetClass();
+ MethodDesc* pMD = FindDelegateInvokeMethod(pMT);
+
+ if (pMT->Collectible())
+ COMPlusThrow(kNotSupportedException, W("NotSupported_CollectibleDelegateMarshal"));
+
+ PREFIX_ASSUME(pClass != NULL);
+
+ //////////////////////////////////////////////////////////////////////////////////////////////////////////
+ // Get or create the marshaling stub information
+ //
+
+ PCODE pMarshalStub = pClass->m_pMarshalStub;
+ if (pMarshalStub == NULL)
+ {
+ GCX_PREEMP();
+
+ DWORD dwStubFlags = pMT->ClassRequiresUnmanagedCodeCheck() ? NDIRECTSTUB_FL_HASDECLARATIVESECURITY : 0;
+ pMarshalStub = GetStubForInteropMethod(pMD, dwStubFlags, &(pClass->m_pForwardStubMD));
+
+ // Save this new stub on the DelegateEEClass.
+ EnsureWritablePages(dac_cast<PVOID>(&pClass->m_pMarshalStub), sizeof(PCODE));
+ InterlockedCompareExchangeT<PCODE>(&pClass->m_pMarshalStub, pMarshalStub, NULL);
+
+ pMarshalStub = pClass->m_pMarshalStub;
+ }
+
+ // The IL marshaling stub performs the function of the shuffle thunk - it simply omits 'this' in
+ // the call to unmanaged code. The stub recovers the unmanaged target from the delegate instance.
+
+ _ASSERTE(pMarshalStub != NULL);
+
+ //////////////////////////////////////////////////////////////////////////////////////////////////////////
+ // Wire up the stubs to the new delegate instance.
+ //
+
+ LOG((LF_INTEROP, LL_INFO10000, "Created delegate for function pointer: entrypoint: %p\n", pMarshalStub));
+
+ // Create the new delegate
+ DELEGATEREF delObj = (DELEGATEREF) pMT->Allocate();
+
+ {
+ // delObj is not protected
+ GCX_NOTRIGGER();
+
+ // Wire up the unmanaged call stub to the delegate.
+ delObj->SetTarget(delObj); // We are the "this" object
+
+ // For X86, we save the entry point in the delegate's method pointer and the UM Callsite in the aux pointer.
+ delObj->SetMethodPtr(pMarshalStub);
+ delObj->SetMethodPtrAux((PCODE)pCallback);
+
+ // Also, mark this delegate as an unmanaged function pointer wrapper.
+ delObj->SetInvocationCount(DELEGATE_MARKER_UNMANAGEDFPTR);
+ }
+
+#if defined(_TARGET_X86_)
+ GCPROTECT_BEGIN(delObj);
+
+ Stub *pInterceptStub = NULL;
+
+ {
+ GCX_PREEMP();
+
+ MethodDesc *pStubMD = pClass->m_pForwardStubMD;
+ _ASSERTE(pStubMD != NULL && pStubMD->IsILStub());
+
+#ifndef FEATURE_CORECLR
+ if (pStubMD->AsDynamicMethodDesc()->HasCopyCtorArgs())
+ {
+ // static stub that gets its arguments in a thread-static field
+ pInterceptStub = NDirect::GetStubForCopyCtor();
+ }
+#endif // !FEATURE_CORECLR
+
+#ifdef MDA_SUPPORTED
+ if (MDA_GET_ASSISTANT(PInvokeStackImbalance))
+ {
+ pInterceptStub = GenerateStubForMDA(pMD, pStubMD, pCallback, pInterceptStub);
+ }
+#endif // MDA_SUPPORTED
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (NDirect::IsHostHookEnabled() && CallNeedsHostHook((size_t)pCallback))
+ {
+ pInterceptStub = GenerateStubForHost(
+ pMD,
+ pStubMD,
+ pCallback,
+ pInterceptStub);
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ }
+
+ if (pInterceptStub != NULL)
+ {
+ // install the outer-most stub to sync block
+ SyncBlock *pSyncBlock = delObj->GetSyncBlock();
+
+ InteropSyncBlockInfo *pInteropInfo = pSyncBlock->GetInteropInfo();
+ VERIFY(pInteropInfo->SetInterceptStub(pInterceptStub));
+ }
+
+ GCPROTECT_END();
+#endif // defined(_TARGET_X86_)
+
+#ifdef FEATURE_CORECLR
+ // On the CoreCLR, we only allow marshaling out delegates that we can guarantee are full-trust delegates
+ if (!IsFullTrustDelegate(delObj))
+ {
+ COMPlusThrow(kSecurityException, IDS_E_DELEGATE_FULLTRUST_ARPIC_2);
+ }
+#endif
+
+ return delObj;
+}
+
+#ifdef FEATURE_COMINTEROP
+// Marshals a WinRT delegate interface pointer to a managed Delegate
+//static
+OBJECTREF COMDelegate::ConvertWinRTInterfaceToDelegate(IUnknown *pIdentity, MethodTable* pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pIdentity));
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ MethodDesc* pMD = FindDelegateInvokeMethod(pMT);
+
+ if (pMT->Collectible())
+ COMPlusThrow(kNotSupportedException, W("NotSupported_CollectibleDelegateMarshal"));
+
+ if (pMD->IsSharedByGenericInstantiations())
+ {
+ // we need an exact MD to represent the call
+ pMD = InstantiatedMethodDesc::FindOrCreateExactClassMethod(pMT, pMD);
+ }
+ else
+ {
+ // set up ComPlusCallInfo
+ PopulateComPlusCallInfo(pMT);
+ }
+
+ ComPlusCallInfo *pComInfo = ComPlusCallInfo::FromMethodDesc(pMD);
+ PCODE pMarshalStub = (pComInfo == NULL ? NULL : pComInfo->m_pILStub);
+
+ if (pMarshalStub == NULL)
+ {
+ GCX_PREEMP();
+
+ DWORD dwStubFlags = NDIRECTSTUB_FL_COM | NDIRECTSTUB_FL_WINRT | NDIRECTSTUB_FL_WINRTDELEGATE;
+
+ if (pMT->ClassRequiresUnmanagedCodeCheck())
+ dwStubFlags |= NDIRECTSTUB_FL_HASDECLARATIVESECURITY;
+
+ pMarshalStub = GetStubForInteropMethod(pMD, dwStubFlags);
+
+ // At this point we must have a non-NULL ComPlusCallInfo
+ pComInfo = ComPlusCallInfo::FromMethodDesc(pMD);
+ _ASSERTE(pComInfo != NULL);
+
+ // Save this new stub on the ComPlusCallInfo
+ InterlockedCompareExchangeT<PCODE>(EnsureWritablePages(&pComInfo->m_pILStub), pMarshalStub, NULL);
+
+ pMarshalStub = pComInfo->m_pILStub;
+ }
+
+ _ASSERTE(pMarshalStub != NULL);
+
+ //////////////////////////////////////////////////////////////////////////////////////////////////////////
+ // Wire up the stub to the new delegate instance.
+ //
+
+ LOG((LF_INTEROP, LL_INFO10000, "Created delegate for WinRT interface: pUnk: %p\n", pIdentity));
+
+ // Create the new delegate
+ DELEGATEREF delObj = (DELEGATEREF) pMT->Allocate();
+
+ {
+ // delObj is not protected
+ GCX_NOTRIGGER();
+
+ // Wire up the unmanaged call stub to the delegate.
+ delObj->SetTarget(delObj); // We are the "this" object
+
+ // We save the entry point in the delegate's method pointer and the identity pUnk in the aux pointer.
+ delObj->SetMethodPtr(pMarshalStub);
+ delObj->SetMethodPtrAux((PCODE)pIdentity);
+
+ // Also, mark this delegate as an unmanaged function pointer wrapper.
+ delObj->SetInvocationCount(DELEGATE_MARKER_UNMANAGEDFPTR);
+ }
+
+ return delObj;
+}
+#endif // FEATURE_COMINTEROP
+
+void COMDelegate::ValidateDelegatePInvoke(MethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ if (pMD->IsSynchronized())
+ COMPlusThrow(kTypeLoadException, IDS_EE_NOSYNCHRONIZED);
+
+ if (pMD->MethodDesc::IsVarArg())
+ COMPlusThrow(kNotSupportedException, IDS_EE_VARARG_NOT_SUPPORTED);
+}
+
+// static
+PCODE COMDelegate::GetStubForILStub(EEImplMethodDesc* pDelegateMD, MethodDesc** ppStubMD, DWORD dwStubFlags)
+{
+ CONTRACT(PCODE)
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(CheckPointer(pDelegateMD));
+ POSTCONDITION(RETVAL != NULL);
+ }
+ CONTRACT_END;
+
+ ValidateDelegatePInvoke(pDelegateMD);
+
+ dwStubFlags |= NDIRECTSTUB_FL_DELEGATE;
+
+ RETURN NDirect::GetStubForILStub(pDelegateMD, ppStubMD, dwStubFlags);
+}
+
+#endif // CROSSGEN_COMPILE
+
+
+// static
+MethodDesc* COMDelegate::GetILStubMethodDesc(EEImplMethodDesc* pDelegateMD, DWORD dwStubFlags)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodTable *pMT = pDelegateMD->GetMethodTable();
+
+#ifdef FEATURE_COMINTEROP
+ if (pMT->IsWinRTDelegate())
+ {
+ dwStubFlags |= NDIRECTSTUB_FL_COM | NDIRECTSTUB_FL_WINRT | NDIRECTSTUB_FL_WINRTDELEGATE;
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ dwStubFlags |= NDIRECTSTUB_FL_DELEGATE;
+ }
+
+ if (pMT->ClassRequiresUnmanagedCodeCheck())
+ dwStubFlags |= NDIRECTSTUB_FL_HASDECLARATIVESECURITY;
+
+ PInvokeStaticSigInfo sigInfo(pDelegateMD);
+ return NDirect::CreateCLRToNativeILStub(&sigInfo, dwStubFlags, pDelegateMD);
+}
+
+
+#ifndef CROSSGEN_COMPILE
+
+FCIMPL2(FC_BOOL_RET, COMDelegate::CompareUnmanagedFunctionPtrs, Object *refDelegate1UNSAFE, Object *refDelegate2UNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(refDelegate1UNSAFE != NULL);
+ PRECONDITION(refDelegate2UNSAFE != NULL);
+ }
+ CONTRACTL_END;
+
+ DELEGATEREF refD1 = (DELEGATEREF) ObjectToOBJECTREF(refDelegate1UNSAFE);
+ DELEGATEREF refD2 = (DELEGATEREF) ObjectToOBJECTREF(refDelegate2UNSAFE);
+ BOOL ret = FALSE;
+
+ // Make sure this is an unmanaged function pointer wrapped in a delegate.
+ CONSISTENCY_CHECK(DELEGATE_MARKER_UNMANAGEDFPTR == refD1->GetInvocationCount());
+ CONSISTENCY_CHECK(DELEGATE_MARKER_UNMANAGEDFPTR == refD2->GetInvocationCount());
+
+ ret = (refD1->GetMethodPtr() == refD2->GetMethodPtr() &&
+ refD1->GetMethodPtrAux() == refD2->GetMethodPtrAux());
+
+ FC_RETURN_BOOL(ret);
+}
+FCIMPLEND
+
+
+void COMDelegate::RemoveEntryFromFPtrHash(UPTR key)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Remove this entry from the lookup hash.
+ CrstHolder ch(&COMDelegate::s_DelegateToFPtrHashCrst);
+ COMDelegate::s_pDelegateToFPtrHash->DeleteValue(key, NULL);
+}
+
+FCIMPL2(PCODE, COMDelegate::GetCallStub, Object* refThisUNSAFE, PCODE method)
+{
+ FCALL_CONTRACT;
+
+ PCODE target = NULL;
+
+ DELEGATEREF refThis = (DELEGATEREF)ObjectToOBJECTREF(refThisUNSAFE);
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refThis);
+ MethodDesc *pMeth = MethodTable::GetMethodDescForSlotAddress((PCODE)method);
+ _ASSERTE(pMeth);
+ _ASSERTE(!pMeth->IsStatic() && pMeth->IsVirtual());
+ target = GetVirtualCallStub(pMeth, TypeHandle(pMeth->GetMethodTable()));
+ refThis->SetInvocationCount((INT_PTR)(void*)pMeth);
+ HELPER_METHOD_FRAME_END();
+ return target;
+}
+FCIMPLEND
+
+FCIMPL3(PCODE, COMDelegate::AdjustTarget, Object* refThisUNSAFE, Object* targetUNSAFE, PCODE method)
+{
+ FCALL_CONTRACT;
+
+ if (targetUNSAFE == NULL)
+ FCThrow(kArgumentNullException);
+
+ OBJECTREF refThis = ObjectToOBJECTREF(refThisUNSAFE);
+ OBJECTREF target = ObjectToOBJECTREF(targetUNSAFE);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_2(refThis, target);
+
+ _ASSERTE(refThis);
+ _ASSERTE(method);
+
+ MethodTable *pRealMT = target->GetTrueMethodTable();
+
+ MethodTable *pMT = target->GetMethodTable();
+ _ASSERTE((NULL == pMT) || pMT->IsTransparentProxy() || !pRealMT->IsContextful());
+
+ MethodDesc *pMeth = Entry2MethodDesc(method, pRealMT);
+ _ASSERTE(pMeth);
+ _ASSERTE(!pMeth->IsStatic());
+
+ // close delegates
+ MethodTable* pMTTarg = target->GetMethodTable();
+ MethodTable* pMTMeth = pMeth->GetMethodTable();
+
+ BOOL isComObject = false;
+
+#ifdef FEATURE_COMINTEROP
+ isComObject = pMTTarg->IsComObjectType();
+ if (isComObject)
+ DoUnmanagedCodeAccessCheck(pMeth);
+#endif // FEATURE_COMINTEROP
+
+ if (!pMT->IsTransparentProxy())
+ {
+ MethodDesc *pCorrectedMethod = pMeth;
+
+ if (pMTMeth != pMTTarg)
+ {
+ //They cast to an interface before creating the delegate, so we now need
+ //to figure out where this actually lives before we continue.
+ //<TODO>@perf: Grovelling with a signature is really slow. Speed this up.</TODO>
+ if (pCorrectedMethod->IsInterface())
+ {
+ // No need to resolve the interface based method desc to a class based
+ // one for COM objects because we invoke directly thru the interface MT.
+ if (!isComObject)
+ {
+ // <TODO>it looks like we need to pass an ownerType in here.
+ // Why can we take a delegate to an interface method anyway? </TODO>
+ //
+ pCorrectedMethod = pMTTarg->FindDispatchSlotForInterfaceMD(pCorrectedMethod).GetMethodDesc();
+ _ASSERTE(pCorrectedMethod != NULL);
+ }
+ }
+ }
+
+ // Use the Unboxing stub for value class methods, since the value
+ // class is constructed using the boxed instance.
+ if (pMTTarg->IsValueType() && !pCorrectedMethod->IsUnboxingStub())
+ {
+ // those should have been ruled out at jit time (code:COMDelegate::GetDelegateCtor)
+ _ASSERTE((pMTMeth != g_pValueTypeClass) && (pMTMeth != g_pObjectClass));
+ pCorrectedMethod->CheckRestore();
+ pCorrectedMethod = pMTTarg->GetBoxedEntryPointMD(pCorrectedMethod);
+ _ASSERTE(pCorrectedMethod != NULL);
+ }
+
+ if (pMeth != pCorrectedMethod)
+ {
+ method = pCorrectedMethod->GetMultiCallableAddrOfCode();
+ }
+ }
+ HELPER_METHOD_FRAME_END();
+
+ return method;
+}
+FCIMPLEND
+
+#if defined(_MSC_VER) && !defined(FEATURE_PAL)
+// VC++ Compiler intrinsic.
+extern "C" void * _ReturnAddress(void);
+#endif // _MSC_VER && !FEATURE_PAL
+
+// This is the single constructor for all Delegates. The compiler
+// doesn't provide an implementation of the Delegate constructor. We
+// provide that implementation through an ECall call to this method.
+FCIMPL3(void, COMDelegate::DelegateConstruct, Object* refThisUNSAFE, Object* targetUNSAFE, PCODE method)
+{
+ FCALL_CONTRACT;
+
+ struct _gc
+ {
+ DELEGATEREF refThis;
+ OBJECTREF target;
+ } gc;
+
+ gc.refThis = (DELEGATEREF) ObjectToOBJECTREF(refThisUNSAFE);
+ gc.target = (OBJECTREF) targetUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_PROTECT(gc);
+
+ // via reflection you can pass in just about any value for the method.
+ // we can do some basic verification up front to prevent EE exceptions.
+ if (method == NULL)
+ COMPlusThrowArgumentNull(W("method"));
+
+ void* pRetAddr = _ReturnAddress();
+ MethodDesc * pCreatorMethod = ExecutionManager::GetCodeMethodDesc((PCODE)pRetAddr);
+
+ _ASSERTE(gc.refThis);
+ _ASSERTE(method);
+
+ // programmers could feed garbage data to DelegateConstruct().
+ // It's difficult to validate a method code pointer, but at least we'll
+ // try to catch the easy garbage.
+ _ASSERTE(isMemoryReadable(method, 1));
+
+ MethodTable *pMTTarg = NULL;
+ MethodTable *pRealMT = NULL;
+
+ if (gc.target != NULL)
+ {
+ pMTTarg = gc.target->GetMethodTable();
+ pRealMT = gc.target->GetTrueMethodTable();
+ }
+
+ MethodDesc *pMethOrig = Entry2MethodDesc(method, pRealMT);
+ MethodDesc *pMeth = pMethOrig;
+
+ //
+ // If target is a contextful class, then it must be a proxy
+ //
+ _ASSERTE((NULL == pMTTarg) || pMTTarg->IsTransparentProxy() || !pRealMT->IsContextful());
+
+ MethodTable* pDelMT = gc.refThis->GetMethodTable();
+
+ LOG((LF_STUBS, LL_INFO1000, "In DelegateConstruct: for delegate type %s binding to method %s::%s%s, static = %d\n",
+ pDelMT->GetDebugClassName(),
+ pMeth->m_pszDebugClassName, pMeth->m_pszDebugMethodName, pMeth->m_pszDebugMethodSignature, pMeth->IsStatic()));
+
+ _ASSERTE(pMeth);
+
+#ifdef _DEBUG
+ // Assert that everything is OK...This is not some bogus
+ // address...Very unlikely that the code below would work
+ // for a random address in memory....
+ MethodTable* p = pMeth->GetMethodTable();
+ _ASSERTE(p);
+ _ASSERTE(p->ValidateWithPossibleAV());
+#endif // _DEBUG
+
+ if (Nullable::IsNullableType(pMeth->GetMethodTable()))
+ COMPlusThrow(kNotSupportedException);
+
+ DelegateEEClass *pDelCls = (DelegateEEClass*)pDelMT->GetClass();
+ MethodDesc *pDelegateInvoke = COMDelegate::FindDelegateInvokeMethod(pDelMT);
+
+ MetaSig invokeSig(pDelegateInvoke);
+ MetaSig methodSig(pMeth);
+ UINT invokeArgCount = invokeSig.NumFixedArgs();
+ UINT methodArgCount = methodSig.NumFixedArgs();
+ BOOL isStatic = pMeth->IsStatic();
+ if (!isStatic)
+ {
+ methodArgCount++; // count 'this'
+ }
+
+ // do we need a secure delegate?
+
+ // Devdiv bug 296229: dangerous methods are those that make security decisions based on
+ // the result of stack walks. When a delegate to such a method is invoked asynchronously
+ // the stackwalker will stop at the remoting code and consider the caller unmanaged code.
+ // Unmanaged code is allowed to bypass any security check.
+ if (InvokeUtil::IsDangerousMethod(pMeth))
+ Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, REFLECTION_MEMBER_ACCESS);
+
+ if (NeedsSecureDelegate(pCreatorMethod, GetAppDomain(), pMeth))
+ gc.refThis = CreateSecureDelegate(gc.refThis, pCreatorMethod, pMeth);
+ else if (NeedsWrapperDelegate(pMeth))
+ gc.refThis = CreateSecureDelegate(gc.refThis, NULL, pMeth);
+
+ if (pMeth->GetLoaderAllocator()->IsCollectible())
+ gc.refThis->SetMethodBase(pMeth->GetLoaderAllocator()->GetExposedObject());
+
+ // Open delegates.
+ if (invokeArgCount == methodArgCount)
+ {
+ // set the target
+ gc.refThis->SetTarget(gc.refThis);
+
+ // set the shuffle thunk
+ Stub *pShuffleThunk = NULL;
+ if (!pMeth->IsStatic() && pMeth->HasRetBuffArg())
+ pShuffleThunk = pDelCls->m_pInstRetBuffCallStub;
+ else
+ pShuffleThunk = pDelCls->m_pStaticCallStub;
+ if (!pShuffleThunk)
+ pShuffleThunk = SetupShuffleThunk(pDelMT, pMeth);
+
+ gc.refThis->SetMethodPtr(pShuffleThunk->GetEntryPoint());
+
+ // set the ptr aux according to what is needed, if virtual need to call make virtual stub dispatch
+ if (!pMeth->IsStatic() && pMeth->IsVirtual() && !pMeth->GetMethodTable()->IsValueType())
+ {
+ PCODE pTargetCall = GetVirtualCallStub(pMeth, TypeHandle(pMeth->GetMethodTable()));
+ gc.refThis->SetMethodPtrAux(pTargetCall);
+ gc.refThis->SetInvocationCount((INT_PTR)(void *)pMeth);
+ }
+ else
+ {
+ gc.refThis->SetMethodPtrAux(method);
+ }
+
+ }
+ else
+ {
+ MethodTable* pMTMeth = pMeth->GetMethodTable();
+
+ if (!pMeth->IsStatic())
+ {
+ if (pMTTarg)
+ {
+ // We can skip the demand if SuppressUnmanagedCodePermission is present on the class,
+ // or in the case where we are setting up a delegate for a COM event sink
+ // we can skip the check if the source interface is defined in fully trusted code
+ // we can skip the check if the source interface is a disp-only interface
+ BOOL isComObject = false;
+#ifdef FEATURE_COMINTEROP
+ isComObject = pMTTarg->IsComObjectType();
+ if (isComObject)
+ DoUnmanagedCodeAccessCheck(pMeth);
+#endif // FEATURE_COMINTEROP
+
+ if (!pMTTarg->IsTransparentProxy())
+ {
+ if (pMTMeth != pMTTarg)
+ {
+ // They cast to an interface before creating the delegate, so we now need
+ // to figure out where this actually lives before we continue.
+ // <TODO>@perf: We whould never be using this path to invoke on an interface -
+ // that should always be resolved when we are creating the delegate </TODO>
+ if (pMeth->IsInterface())
+ {
+ // No need to resolve the interface based method desc to a class based
+ // one for COM objects because we invoke directly thru the interface MT.
+ if (!isComObject)
+ {
+ // <TODO>it looks like we need to pass an ownerType in here.
+ // Why can we take a delegate to an interface method anyway? </TODO>
+ //
+ pMeth = pMTTarg->FindDispatchSlotForInterfaceMD(pMeth).GetMethodDesc();
+ if (pMeth == NULL)
+ {
+ COMPlusThrow(kArgumentException, W("Arg_DlgtTargMeth"));
+ }
+ }
+ }
+ }
+
+ g_IBCLogger.LogMethodTableAccess(pMTTarg);
+
+ // Use the Unboxing stub for value class methods, since the value
+ // class is constructed using the boxed instance.
+ //
+ // <NICE> We could get the JIT to recognise all delegate creation sequences and
+ // ensure the thing is always an BoxedEntryPointStub anyway </NICE>
+
+ if (pMTMeth->IsValueType() && !pMeth->IsUnboxingStub())
+ {
+ // If these are Object/ValueType.ToString().. etc,
+ // don't need an unboxing Stub.
+
+ if ((pMTMeth != g_pValueTypeClass)
+ && (pMTMeth != g_pObjectClass))
+ {
+ pMeth->CheckRestore();
+ pMeth = pMTTarg->GetBoxedEntryPointMD(pMeth);
+ _ASSERTE(pMeth != NULL);
+ }
+ }
+ // Only update the code address if we've decided to go to a different target...
+ // <NICE> We should make sure the code address that the JIT provided to us is always the right one anyway,
+ // so we don't have to do all this mucking about. </NICE>
+ if (pMeth != pMethOrig)
+ {
+ method = pMeth->GetMultiCallableAddrOfCode();
+ }
+ }
+ }
+
+ if (gc.target == NULL)
+ {
+ COMPlusThrow(kArgumentException, W("Arg_DlgtNullInst"));
+ }
+ }
+#ifdef HAS_THISPTR_RETBUF_PRECODE
+ else if (pMeth->HasRetBuffArg())
+ method = pMeth->GetLoaderAllocatorForCode()->GetFuncPtrStubs()->GetFuncPtrStub(pMeth, PRECODE_THISPTR_RETBUF);
+#endif // HAS_THISPTR_RETBUF_PRECODE
+
+ gc.refThis->SetTarget(gc.target);
+ gc.refThis->SetMethodPtr((PCODE)(void *)method);
+ }
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+
+#ifdef FEATURE_COMINTEROP
+void COMDelegate::DoUnmanagedCodeAccessCheck(MethodDesc* pMeth)
+{
+ // Skip if SuppressUnmanagedCodePermission is present
+ if (pMeth->RequiresLinktimeCheck())
+ {
+ // Check whether this is actually a SuppressUnmanagedCodePermission attribute and
+ // if so, don't do a demand
+#ifndef FEATURE_CORECLR
+ MethodTable* pMTMeth = pMeth->GetMethodTable();
+ if (pMTMeth->GetMDImport()->GetCustomAttributeByName(pMeth->GetMethodTable()->GetCl(),
+ COR_SUPPRESS_UNMANAGED_CODE_CHECK_ATTRIBUTE_ANSI,
+ NULL,
+ NULL) == S_OK ||
+ pMTMeth->GetMDImport()->GetCustomAttributeByName(pMeth->GetMemberDef(),
+ COR_SUPPRESS_UNMANAGED_CODE_CHECK_ATTRIBUTE_ANSI,
+ NULL,
+ NULL) == S_OK)
+#endif
+ {
+ return;
+ }
+ }
+
+ // If this method is defined directly on an interface, get that interface
+ // Otherwise, from the class get the interface that this method is defined on.
+ // Based on this interface, skip the check if the interface is DispatchOnly or
+ // if the interface is defined in fully-trusted code.
+ if (pMeth->IsComPlusCall())
+ {
+ ComPlusCallMethodDesc *pCMD = (ComPlusCallMethodDesc *)pMeth;
+ MethodTable* pMTItf = (pCMD->m_pComPlusCallInfo == NULL ? NULL : pCMD->m_pComPlusCallInfo->m_pInterfaceMT);
+
+ // If the interface methodtable is null, then the ComPlusCallMethodDesc hasn't been set up yet.
+ if (pMTItf == NULL)
+ {
+ GCX_PREEMP();
+ pMeth->DoPrestub(NULL);
+ pMTItf = ((ComPlusCallMethodDesc*)pMeth)->m_pComPlusCallInfo->m_pInterfaceMT;
+ }
+ else
+ {
+ pMTItf->CheckRestore();
+ }
+
+ if (pMTItf->GetComInterfaceType() == ifDispatch)
+ {
+ return;
+ }
+ else if (Security::CanCallUnmanagedCode(pMTItf->GetModule()))
+ {
+ return;
+ }
+ }
+
+ Security::SpecialDemand(SSWT_DEMAND_FROM_NATIVE, SECURITY_UNMANAGED_CODE);
+}
+#endif // FEATURE_COMINTEROP
+
+
+MethodDesc *COMDelegate::GetMethodDesc(OBJECTREF orDelegate)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ MethodDesc *pMethodHandle = NULL;
+
+ DELEGATEREF thisDel = (DELEGATEREF) orDelegate;
+ DELEGATEREF innerDel = NULL;
+
+ INT_PTR count = thisDel->GetInvocationCount();
+ if (count != 0)
+ {
+ // this is one of the following:
+ // - multicast - _invocationList is Array && _invocationCount != 0
+ // - unamanaged ftn ptr - _invocationList == NULL && _invocationCount == -1
+ // - secure delegate - _invocationList is Delegate && _invocationCount != NULL
+ // - virtual delegate - _invocationList == null && _invocationCount == (target MethodDesc)
+ // or _invocationList points to a LoaderAllocator/DynamicResolver (inner open virtual delegate of a Secure Delegate)
+ // in the secure delegate case we want to unwrap and return the method desc of the inner delegate
+ // in the other cases we return the method desc for the invoke
+ innerDel = (DELEGATEREF) thisDel->GetInvocationList();
+ bool fOpenVirtualDelegate = false;
+
+ if (innerDel != NULL)
+ {
+ MethodTable *pMT = innerDel->GetMethodTable();
+ if (pMT->IsDelegate())
+ return GetMethodDesc(innerDel);
+ if (!pMT->IsArray())
+ {
+ // must be a virtual one
+ fOpenVirtualDelegate = true;
+ }
+ }
+ else
+ {
+ if (count != DELEGATE_MARKER_UNMANAGEDFPTR)
+ {
+ // must be a virtual one
+ fOpenVirtualDelegate = true;
+ }
+ }
+
+ if (fOpenVirtualDelegate)
+ pMethodHandle = (MethodDesc*)thisDel->GetInvocationCount();
+ else
+ pMethodHandle = FindDelegateInvokeMethod(thisDel->GetMethodTable());
+ }
+ else
+ {
+ // Next, check for an open delegate
+ PCODE code = thisDel->GetMethodPtrAux();
+
+ if (code != NULL)
+ {
+ // Note that MethodTable::GetMethodDescForSlotAddress is significantly faster than Entry2MethodDesc
+ pMethodHandle = MethodTable::GetMethodDescForSlotAddress(code);
+ }
+ else
+ {
+ MethodTable * pMT = NULL;
+
+ // Must be a normal delegate
+ code = thisDel->GetMethodPtr();
+
+ OBJECTREF orThis = thisDel->GetTarget();
+ if (orThis!=NULL)
+ {
+ pMT = orThis->GetTrueMethodTable();
+ }
+
+ pMethodHandle = Entry2MethodDesc(code, pMT);
+ }
+ }
+
+ _ASSERTE(pMethodHandle);
+ return pMethodHandle;
+}
+
+OBJECTREF COMDelegate::GetTargetObject(OBJECTREF obj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF targetObject = NULL;
+
+ DELEGATEREF thisDel = (DELEGATEREF) obj;
+ OBJECTREF innerDel = NULL;
+
+ if (thisDel->GetInvocationCount() != 0)
+ {
+ // this is one of the following:
+ // - multicast
+ // - unmanaged ftn ptr
+ // - secure delegate
+ // - virtual delegate - _invocationList == null && _invocationCount == (target MethodDesc)
+ // or _invocationList points to a LoaderAllocator/DynamicResolver (inner open virtual delegate of a Secure Delegate)
+ // in the secure delegate case we want to unwrap and return the object of the inner delegate
+ innerDel = (DELEGATEREF) thisDel->GetInvocationList();
+ if (innerDel != NULL)
+ {
+ MethodTable *pMT = innerDel->GetMethodTable();
+ if (pMT->IsDelegate())
+ {
+ targetObject = GetTargetObject(innerDel);
+ }
+ }
+ }
+
+ if (targetObject == NULL)
+ targetObject = thisDel->GetTarget();
+
+ return targetObject;
+}
+
+BOOL COMDelegate::IsTrueMulticastDelegate(OBJECTREF delegate)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ BOOL isMulticast = FALSE;
+
+ size_t invocationCount = ((DELEGATEREF)delegate)->GetInvocationCount();
+ if (invocationCount)
+ {
+ OBJECTREF invocationList = ((DELEGATEREF)delegate)->GetInvocationList();
+ if (invocationList != NULL)
+ {
+ MethodTable *pMT = invocationList->GetMethodTable();
+ isMulticast = pMT->IsArray();
+ }
+ }
+
+ return isMulticast;
+}
+
+PCODE COMDelegate::TheDelegateInvokeStub()
+{
+ CONTRACT(PCODE)
+ {
+ STANDARD_VM_CHECK;
+ POSTCONDITION(RETVAL != NULL);
+ }
+ CONTRACT_END;
+
+#ifdef _TARGET_X86_
+ static PCODE s_pInvokeStub;
+
+ if (s_pInvokeStub == NULL)
+ {
+ CPUSTUBLINKER sl;
+ sl.EmitDelegateInvoke();
+ // Process-wide singleton stub that never unloads
+ Stub *pCandidate = sl.Link(SystemDomain::GetGlobalLoaderAllocator()->GetStubHeap(), NEWSTUB_FL_MULTICAST);
+
+ if (InterlockedCompareExchangeT<PCODE>(&s_pInvokeStub, pCandidate->GetEntryPoint(), NULL) != NULL)
+ {
+ // if we are here someone managed to set the stub before us so we release the current
+ pCandidate->DecRef();
+ }
+ }
+
+ RETURN s_pInvokeStub;
+#else
+ RETURN GetEEFuncEntryPoint(SinglecastDelegateInvokeStub);
+#endif // _TARGET_X86_
+}
+
+// Get the cpu stub for a delegate invoke.
+PCODE COMDelegate::GetInvokeMethodStub(EEImplMethodDesc* pMD)
+{
+ CONTRACT(PCODE)
+ {
+ STANDARD_VM_CHECK;
+ POSTCONDITION(RETVAL != NULL);
+
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACT_END;
+
+ PCODE ret = NULL;
+ MethodTable * pDelMT = pMD->GetMethodTable();
+ DelegateEEClass* pClass = (DelegateEEClass*) pDelMT->GetClass();
+
+ if (pMD == pClass->m_pInvokeMethod)
+ {
+ // Validate the invoke method, which at the moment just means checking the calling convention
+
+ if (*pMD->GetSig() != (IMAGE_CEE_CS_CALLCONV_HASTHIS | IMAGE_CEE_CS_CALLCONV_DEFAULT))
+ COMPlusThrow(kInvalidProgramException);
+
+ ret = COMDelegate::TheDelegateInvokeStub();
+ }
+#ifdef FEATURE_REMOTING
+ else if (pMD == pClass->m_pBeginInvokeMethod)
+ {
+ CRemotingServices::EnsureRemotingStarted();
+
+ if (!ValidateBeginInvoke(pClass))
+ COMPlusThrow(kInvalidProgramException);
+
+ ret = CTPMethodTable::GetDelegateStubEntryPoint();
+ }
+ else if (pMD == pClass->m_pEndInvokeMethod)
+ {
+ CRemotingServices::EnsureRemotingStarted();
+
+ if (!ValidateEndInvoke(pClass))
+ COMPlusThrow(kInvalidProgramException);
+
+ ret = CTPMethodTable::GetDelegateStubEntryPoint();
+ }
+#endif // FEATURE_REMOTING
+ else
+ {
+#ifndef FEATURE_REMOTING
+
+ // Since we do not support asynchronous delegates in CoreCLR, we much ensure that it was indeed a async delegate call
+ // and not an invalid-delegate-layout condition.
+ //
+ // If the call was indeed for async delegate invocation, we will just throw an exception.
+ if ((pMD == pClass->m_pBeginInvokeMethod) || (pMD == pClass->m_pEndInvokeMethod))
+ {
+ COMPlusThrow(kNotSupportedException);
+ }
+
+#endif //FEATURE_REMOTING
+
+ _ASSERTE(!"Bad Delegate layout");
+ COMPlusThrow(kInvalidProgramException);
+ }
+
+ RETURN ret;
+}
+
+FCIMPL1(Object*, COMDelegate::InternalAlloc, ReflectClassBaseObject * pTargetUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ REFLECTCLASSBASEREF refTarget = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTargetUNSAFE);
+ OBJECTREF refRetVal = NULL;
+ TypeHandle targetTH = refTarget->GetType();
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refTarget);
+
+ _ASSERTE(targetTH.GetMethodTable() != NULL && targetTH.GetMethodTable()->IsDelegate());
+
+ refRetVal = targetTH.GetMethodTable()->Allocate();
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(refRetVal);
+}
+FCIMPLEND
+
+FCIMPL1(Object*, COMDelegate::InternalAllocLike, Object* pThis)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF refRetVal = NULL;
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL();
+
+ _ASSERTE(pThis->GetMethodTable() != NULL && pThis->GetMethodTable()->IsDelegate());
+
+ refRetVal = pThis->GetMethodTable()->AllocateNoChecks();
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(refRetVal);
+}
+FCIMPLEND
+
+FCIMPL2(FC_BOOL_RET, COMDelegate::InternalEqualTypes, Object* pThis, Object *pThat)
+{
+ FCALL_CONTRACT;
+
+ MethodTable *pThisMT = pThis->GetMethodTable();
+ MethodTable *pThatMT = pThat->GetMethodTable();
+
+ _ASSERTE(pThisMT != NULL && pThisMT->IsDelegate());
+ _ASSERTE(pThatMT != NULL);
+
+ BOOL bResult = (pThisMT == pThatMT);
+
+ if (!bResult)
+ {
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+ bResult = pThisMT->IsEquivalentTo(pThatMT);
+ HELPER_METHOD_FRAME_END();
+ }
+
+ FC_RETURN_BOOL(bResult);
+}
+FCIMPLEND
+
+#endif // CROSSGEN_COMPILE
+
+
+BOOL COMDelegate::NeedsSecureDelegate(MethodDesc* pCreatorMethod, AppDomain *pCreatorDomain, MethodDesc* pTargetMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_CAS_POLICY
+ return FALSE;
+#else
+ if (pCreatorMethod)
+ {
+ Assembly* pTargetAssembly = pTargetMD->GetAssembly();
+ Assembly* pCreatorAssembly = pCreatorMethod->GetAssembly();
+ if (pCreatorAssembly != pTargetAssembly)
+ {
+ // We don't need secure delegate is everything in the AppDomain is full trust.
+ if (!pCreatorDomain->GetSecurityDescriptor()->DomainMayContainPartialTrustCode())
+ return FALSE;
+
+ IAssemblySecurityDescriptor *pCreatorAsd = pCreatorAssembly->GetSecurityDescriptor(pCreatorDomain);
+
+ // We should also create secure delegates for anonymously hosted dynamic methods which
+ // are themselves full trust (although transparent) yet can be created from partial trust.
+ if (!pCreatorAsd->IsFullyTrusted() ||
+ pCreatorAssembly->GetDomainAssembly(pCreatorDomain) == pCreatorDomain->GetAnonymouslyHostedDynamicMethodsAssembly())
+ {
+ return TRUE;
+ }
+
+ // Note that if we begin to support using an NGEN image which is not fully trusted, we may need
+ // to force on secure delegates as the grant set of the image may not match between NGEN time
+ // and runtime.
+ }
+ }
+
+ return FALSE;
+
+#endif // FEATURE_CAS_POLICY
+}
+
+BOOL COMDelegate::NeedsWrapperDelegate(MethodDesc* pTargetMD)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef _TARGET_ARM_
+ // For arm VSD expects r4 to contain the indirection cell. However r4 is a non-volatile register
+ // and its value must be preserved. So we need to erect a frame and store indirection cell in r4 before calling
+ // virtual stub dispatch. Erecting frame is already done by secure delegates so the secureDelegate infrastructure
+ // can easliy be used for our purpose.
+ // set needsSecureDelegate flag in order to erect a frame. (Secure Delegate stub also loads the right value in r4)
+ if (!pTargetMD->IsStatic() && pTargetMD->IsVirtual() && !pTargetMD->GetMethodTable()->IsValueType())
+ return TRUE;
+#endif
+
+ return FALSE;
+}
+
+
+#ifndef CROSSGEN_COMPILE
+
+// to create a secure delegate wrapper we need:
+// - the delegate to forward to -> _invocationList
+// - the creator assembly -> _methodAuxPtr
+// - the delegate invoke MethodDesc -> _count
+// the 2 fields used for invocation will contain:
+// - the delegate itself -> _pORField
+// - the secure stub -> _pFPField
+DELEGATEREF COMDelegate::CreateSecureDelegate(DELEGATEREF delegate, MethodDesc* pCreatorMethod, MethodDesc* pTargetMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ MethodTable *pDelegateType = delegate->GetMethodTable();
+ MethodDesc *pMD = ((DelegateEEClass*)(pDelegateType->GetClass()))->m_pInvokeMethod;
+ // allocate the object
+ struct _gc {
+ DELEGATEREF refSecDel;
+ DELEGATEREF innerDel;
+ } gc;
+ gc.refSecDel = delegate;
+ gc.innerDel = NULL;
+
+ GCPROTECT_BEGIN(gc);
+
+ // set the proper fields
+ //
+
+ // Object reference field...
+ gc.refSecDel->SetTarget(gc.refSecDel);
+
+ // save the secure invoke stub. GetSecureInvoke() can trigger GC.
+ PCODE tmp = GetSecureInvoke(pMD);
+ gc.refSecDel->SetMethodPtr(tmp);
+ // save the assembly
+ gc.refSecDel->SetMethodPtrAux((PCODE)(void *)pCreatorMethod);
+ // save the delegate MethodDesc for the frame
+ gc.refSecDel->SetInvocationCount((INT_PTR)pMD);
+
+ // save the delegate to forward to
+ gc.innerDel = (DELEGATEREF) pDelegateType->Allocate();
+ gc.refSecDel->SetInvocationList(gc.innerDel);
+
+ if (pCreatorMethod != NULL)
+ {
+ // If the pCreatorMethod is a collectible method, then stash a reference to the
+ // LoaderAllocator/DynamicResolver of the collectible assembly/method in the invocationList
+ // of the inner delegate
+ // (The invocationList of the inner delegate is the only field garaunteed to be unused for
+ // other purposes at this time.)
+ if (pCreatorMethod->IsLCGMethod())
+ {
+ OBJECTREF refCollectible = pCreatorMethod->AsDynamicMethodDesc()->GetLCGMethodResolver()->GetManagedResolver();
+ gc.innerDel->SetInvocationList(refCollectible);
+ }
+ else if (pCreatorMethod->GetLoaderAllocator()->IsCollectible())
+ {
+ OBJECTREF refCollectible = pCreatorMethod->GetLoaderAllocator()->GetExposedObject();
+ gc.innerDel->SetInvocationList(refCollectible);
+ }
+ }
+
+ GCPROTECT_END();
+
+ return gc.innerDel;
+}
+
+// InternalGetMethodInfo
+// This method will get the MethodInfo for a delegate
+FCIMPL1(ReflectMethodObject *, COMDelegate::FindMethodHandle, Object* refThisIn)
+{
+ FCALL_CONTRACT;
+
+ MethodDesc* pMD = NULL;
+ REFLECTMETHODREF pRet = NULL;
+ OBJECTREF refThis = ObjectToOBJECTREF(refThisIn);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refThis);
+
+ pMD = GetMethodDesc(refThis);
+ pRet = pMD->GetStubMethodInfo();
+ HELPER_METHOD_FRAME_END();
+
+ return (ReflectMethodObject*)OBJECTREFToObject(pRet);
+}
+FCIMPLEND
+
+FCIMPL2(FC_BOOL_RET, COMDelegate::InternalEqualMethodHandles, Object *refLeftIn, Object *refRightIn)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF refLeft = ObjectToOBJECTREF(refLeftIn);
+ OBJECTREF refRight = ObjectToOBJECTREF(refRightIn);
+ BOOL fRet = FALSE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_2(refLeft, refRight);
+
+ MethodDesc* pMDLeft = GetMethodDesc(refLeft);
+ MethodDesc* pMDRight = GetMethodDesc(refRight);
+ fRet = pMDLeft == pMDRight;
+
+ HELPER_METHOD_FRAME_END();
+
+ FC_RETURN_BOOL(fRet);
+}
+FCIMPLEND
+
+FCIMPL1(MethodDesc*, COMDelegate::GetInvokeMethod, Object* refThisIn)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF refThis = ObjectToOBJECTREF(refThisIn);
+ MethodTable * pDelMT = refThis->GetMethodTable();
+
+ MethodDesc* pMD = ((DelegateEEClass*)(pDelMT->GetClass()))->m_pInvokeMethod;
+ _ASSERTE(pMD);
+ return pMD;
+}
+FCIMPLEND
+
+#ifdef FEATURE_STUBS_AS_IL
+FCIMPL1(PCODE, COMDelegate::GetMulticastInvoke, Object* refThisIn)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF refThis = ObjectToOBJECTREF(refThisIn);
+ MethodTable *pDelegateMT = refThis->GetMethodTable();
+
+ DelegateEEClass *delegateEEClass = ((DelegateEEClass*)(pDelegateMT->GetClass()));
+ Stub *pStub = delegateEEClass->m_pMultiCastInvokeStub;
+ if (pStub == NULL)
+ {
+ MethodDesc* pMD = delegateEEClass->m_pInvokeMethod;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ GCX_PREEMP();
+
+ MetaSig sig(pMD);
+
+ BOOL fReturnVal = !sig.IsReturnTypeVoid();
+
+ SigTypeContext emptyContext;
+ ILStubLinker sl(pMD->GetModule(), pMD->GetSignature(), &emptyContext, pMD, TRUE, TRUE, FALSE);
+
+ ILCodeStream *pCode = sl.NewCodeStream(ILStubLinker::kDispatch);
+
+ DWORD dwInvocationCountNum = pCode->NewLocal(ELEMENT_TYPE_I4);
+ DWORD dwLoopCounterNum = pCode->NewLocal(ELEMENT_TYPE_I4);
+
+ DWORD dwReturnValNum = -1;
+ if(fReturnVal)
+ dwReturnValNum = pCode->NewLocal(sig.GetRetTypeHandleNT());
+
+ ILCodeLabel *nextDelegate = pCode->NewCodeLabel();
+ ILCodeLabel *endOfMethod = pCode->NewCodeLabel();
+
+ // Get count of delegates
+ pCode->EmitLoadThis();
+ pCode->EmitLDFLD(pCode->GetToken(MscorlibBinder::GetField(FIELD__MULTICAST_DELEGATE__INVOCATION_COUNT)));
+ pCode->EmitSTLOC(dwInvocationCountNum);
+
+ // initialize counter
+ pCode->EmitLDC(0);
+ pCode->EmitSTLOC(dwLoopCounterNum);
+
+ //Label_nextDelegate:
+ pCode->EmitLabel(nextDelegate);
+
+ // compare LoopCounter with InvocationCount. If equal then branch to Label_endOfMethod
+ pCode->EmitLDLOC(dwLoopCounterNum);
+ pCode->EmitLDLOC(dwInvocationCountNum);
+ pCode->EmitBEQ(endOfMethod);
+
+ // Load next delegate from array using LoopCounter as index
+ pCode->EmitLoadThis();
+ pCode->EmitLDFLD(pCode->GetToken(MscorlibBinder::GetField(FIELD__MULTICAST_DELEGATE__INVOCATION_LIST)));
+ pCode->EmitLDLOC(dwLoopCounterNum);
+ pCode->EmitLDELEM_REF();
+
+ // Load the arguments
+ UINT paramCount = 0;
+ while(paramCount < sig.NumFixedArgs())
+ pCode->EmitLDARG(paramCount++);
+
+ // call the delegate
+ pCode->EmitCALL(pCode->GetToken(pMD), sig.NumFixedArgs(), fReturnVal);
+
+ // Save return value.
+ if(fReturnVal)
+ pCode->EmitSTLOC(dwReturnValNum);
+
+ // increment counter
+ pCode->EmitLDLOC(dwLoopCounterNum);
+ pCode->EmitLDC(1);
+ pCode->EmitADD();
+ pCode->EmitSTLOC(dwLoopCounterNum);
+
+#ifdef DEBUGGING_SUPPORTED
+ pCode->EmitLoadThis();
+ pCode->EmitLDLOC(dwLoopCounterNum);
+ pCode->EmitCALL(METHOD__STUBHELPERS__MULTICAST_DEBUGGER_TRACE_HELPER, 2, 0);
+#endif // DEBUGGING_SUPPORTED
+
+ // branch to next delegate
+ pCode->EmitBR(nextDelegate);
+
+ //Label_endOfMethod
+ pCode->EmitLabel(endOfMethod);
+
+ // load the return value. return value from the last delegate call is returned
+ if(fReturnVal)
+ pCode->EmitLDLOC(dwReturnValNum);
+
+ // return
+ pCode->EmitRET();
+
+ PCCOR_SIGNATURE pSig;
+ DWORD cbSig;
+
+ pMD->GetSig(&pSig,&cbSig);
+
+ MethodDesc* pStubMD = ILStubCache::CreateAndLinkNewILStubMethodDesc(pMD->GetLoaderAllocator(),
+ pMD->GetMethodTable(),
+ ILSTUB_MULTICASTDELEGATE_INVOKE,
+ pMD->GetModule(),
+ pSig, cbSig,
+ NULL,
+ &sl);
+
+ pStub = Stub::NewStub(JitILStub(pStubMD));
+
+ g_IBCLogger.LogEEClassCOWTableAccess(pDelegateMT);
+
+ InterlockedCompareExchangeT<PTR_Stub>(EnsureWritablePages(&delegateEEClass->m_pMultiCastInvokeStub), pStub, NULL);
+
+ HELPER_METHOD_FRAME_END();
+ }
+
+ return pStub->GetEntryPoint();
+}
+FCIMPLEND
+
+#else // FEATURE_STUBS_AS_IL
+
+FCIMPL1(PCODE, COMDelegate::GetMulticastInvoke, Object* refThisIn)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF refThis = ObjectToOBJECTREF(refThisIn);
+ MethodTable *pDelegateMT = refThis->GetMethodTable();
+
+ DelegateEEClass *delegateEEClass = ((DelegateEEClass*)(pDelegateMT->GetClass()));
+ Stub *pStub = delegateEEClass->m_pMultiCastInvokeStub;
+ if (pStub == NULL)
+ {
+ MethodDesc* pMD = delegateEEClass->m_pInvokeMethod;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ GCX_PREEMP();
+
+ MetaSig sig(pMD);
+
+ UINT_PTR hash = CPUSTUBLINKER::HashMulticastInvoke(&sig);
+
+ pStub = m_pMulticastStubCache->GetStub(hash);
+ if (!pStub)
+ {
+ CPUSTUBLINKER sl;
+
+ LOG((LF_CORDB,LL_INFO10000, "COMD::GIMS making a multicast delegate\n"));
+
+ sl.EmitMulticastInvoke(hash);
+
+ // The cache is process-wide, based on signature. It never unloads
+ Stub *pCandidate = sl.Link(SystemDomain::GetGlobalLoaderAllocator()->GetStubHeap(), NEWSTUB_FL_MULTICAST);
+
+ Stub *pWinner = m_pMulticastStubCache->AttemptToSetStub(hash,pCandidate);
+ pCandidate->DecRef();
+ if (!pWinner)
+ COMPlusThrowOM();
+
+ LOG((LF_CORDB,LL_INFO10000, "Putting a MC stub at 0x%x (code:0x%x)\n",
+ pWinner, (BYTE*)pWinner+sizeof(Stub)));
+
+ pStub = pWinner;
+ }
+
+ g_IBCLogger.LogEEClassCOWTableAccess(pDelegateMT);
+
+ // we don't need to do an InterlockedCompareExchange here - the m_pMulticastStubCache->AttemptToSetStub
+ // will make sure all threads racing here will get the same stub, so they'll all store the same value
+ EnsureWritablePages(&delegateEEClass->m_pMultiCastInvokeStub);
+ delegateEEClass->m_pMultiCastInvokeStub = pStub;
+
+ HELPER_METHOD_FRAME_END();
+ }
+
+ return pStub->GetEntryPoint();
+}
+FCIMPLEND
+#endif // FEATURE_STUBS_AS_IL
+
+#ifdef FEATURE_STUBS_AS_IL
+PCODE COMDelegate::GetSecureInvoke(MethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_CAS_POLICY
+#error GetSecureInvoke not implemented
+#else
+ UNREACHABLE();
+#endif
+}
+#else // FEATURE_STUBS_AS_IL
+PCODE COMDelegate::GetSecureInvoke(MethodDesc* pMD)
+{
+ CONTRACT (PCODE)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(RETVAL != NULL);
+ }
+ CONTRACT_END;
+
+ GCX_PREEMP();
+
+ MetaSig sig(pMD);
+
+ UINT_PTR hash = CPUSTUBLINKER::HashMulticastInvoke(&sig);
+
+ Stub *pStub = m_pSecureDelegateStubCache->GetStub(hash);
+ if (!pStub)
+ {
+ CPUSTUBLINKER sl;
+
+ LOG((LF_CORDB,LL_INFO10000, "COMD::GIMS making a multicast delegate\n"));
+ sl.EmitSecureDelegateInvoke(hash);
+
+ // The cache is process-wide, based on signature. It never unloads
+ Stub *pCandidate = sl.Link(SystemDomain::GetGlobalLoaderAllocator()->GetStubHeap(), NEWSTUB_FL_MULTICAST);
+
+ Stub *pWinner = m_pSecureDelegateStubCache->AttemptToSetStub(hash, pCandidate);
+ pCandidate->DecRef();
+ if (!pWinner)
+ COMPlusThrowOM();
+
+ LOG((LF_CORDB,LL_INFO10000, "Putting a MC stub at 0x%x (code:0x%x)\n",
+ pWinner, (BYTE*)pWinner+sizeof(Stub)));
+
+ pStub = pWinner;
+ }
+ RETURN (pStub->GetEntryPoint());
+}
+#endif // FEATURE_STUBS_AS_IL
+
+#endif // CROSSGEN_COMPILE
+
+
+static BOOL IsLocationAssignable(TypeHandle fromHandle, TypeHandle toHandle, BOOL relaxedMatch, BOOL fromHandleIsBoxed)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ // Identical types are obviously compatible.
+ if (fromHandle == toHandle)
+ return TRUE;
+
+ // Byref parameters can never be allowed relaxed matching since type safety will always be violated in one
+ // of the two directions (in or out). Checking one of the types is enough since a byref type is never
+ // compatible with a non-byref type.
+ if (fromHandle.IsByRef())
+ relaxedMatch = FALSE;
+
+ // If we allow relaxed matching then any subtype of toHandle is probably
+ // compatible (definitely so if we know fromHandle is coming from a boxed
+ // value such as we get from the bound argument in a closed delegate).
+ if (relaxedMatch && fromHandle.CanCastTo(toHandle))
+ {
+ // If the fromHandle isn't boxed then we need to be careful since
+ // non-object reference arguments aren't going to be compatible with
+ // object reference locations (there's no implicit boxing going to happen
+ // for us).
+ if (!fromHandleIsBoxed)
+ {
+ // Check that the "objrefness" of source and destination matches. In
+ // reality there are only three objref classes that would have
+ // passed the CanCastTo above given a value type source (Object,
+ // ValueType and Enum), but why hard code these in when we can be
+ // more robust?
+ if (fromHandle.IsGenericVariable())
+ {
+ TypeVarTypeDesc *fromHandleVar = fromHandle.AsGenericVariable();
+
+ // We need to check whether constraints of fromHandle have been loaded, because the
+ // CanCastTo operation might have made its decision without enumerating constraints
+ // (e.g. when toHandle is System.Object).
+ if (!fromHandleVar->ConstraintsLoaded())
+ fromHandleVar->LoadConstraints(CLASS_DEPENDENCIES_LOADED);
+
+ if (toHandle.IsGenericVariable())
+ {
+ TypeVarTypeDesc *toHandleVar = toHandle.AsGenericVariable();
+
+ // Constraints of toHandleVar were not touched by CanCastTo.
+ if (!toHandleVar->ConstraintsLoaded())
+ toHandleVar->LoadConstraints(CLASS_DEPENDENCIES_LOADED);
+
+ // Both handles are type variables. The following table lists all possible combinations.
+ //
+ // In brackets are results of IsConstrainedAsObjRef/IsConstrainedAsValueType
+ //
+ // To:| [FALSE/FALSE] | [FALSE/TRUE] | [TRUE/FALSE]
+ // From: | | |
+ // --------------------------------------------------------------------------------------
+ // [FALSE/FALSE] | ERROR | NEVER HAPPENS | ERROR
+ // | we know nothing | | From may be a VT
+ // --------------------------------------------------------------------------------------
+ // [FALSE/TRUE] | ERROR | OK | ERROR
+ // | To may be an ObjRef | both are VT | mismatch
+ // --------------------------------------------------------------------------------------
+ // [TRUE/FALSE] | OK (C# compat) | ERROR - mismatch and | OK
+ // | (*) | no such instantiation | both are ObjRef
+ // --------------------------------------------------------------------------------------
+
+ if (fromHandleVar->ConstrainedAsObjRef())
+ {
+ // (*) Normally we would need to check whether toHandleVar is also constrained
+ // as ObjRef here and fail if it's not. However, the C# compiler currently
+ // allows the toHandleVar constraint to be omitted and infers it. We have to
+ // follow the same rule to avoid introducing a breaking change.
+ //
+ // Example:
+ // class Gen<T, U> where T : class, U
+ //
+ // For the sake of delegate co(ntra)variance, U is also regarded as being
+ // constrained as ObjRef even though it has no constraints.
+
+ if (toHandleVar->ConstrainedAsValueType())
+ {
+ // reference type / value type mismatch
+ return FALSE;
+ }
+ }
+ else
+ {
+ if (toHandleVar->ConstrainedAsValueType())
+ {
+ // If toHandleVar is constrained as value type, fromHandle must be as well.
+ _ASSERTE(fromHandleVar->ConstrainedAsValueType());
+ }
+ else
+ {
+ // It was not possible to prove that the variables are both reference types
+ // or both value types.
+ return FALSE;
+ }
+ }
+ }
+ else
+ {
+ // We need toHandle to be an ObjRef and fromHandle to be constrained as ObjRef,
+ // or toHandle to be a value type and fromHandle to be constrained as a value
+ // type (which must be this specific value type actually as value types are sealed).
+
+ // Constraints of fromHandle must ensure that it will be ObjRef if toHandle is an
+ // ObjRef, and a value type if toHandle is not an ObjRef.
+ if (CorTypeInfo::IsObjRef_NoThrow(toHandle.GetInternalCorElementType()))
+ {
+ if (!fromHandleVar->ConstrainedAsObjRef())
+ return FALSE;
+ }
+ else
+ {
+ if (!fromHandleVar->ConstrainedAsValueType())
+ return FALSE;
+ }
+ }
+ }
+ else
+ {
+ _ASSERTE(!toHandle.IsGenericVariable());
+
+ // The COR element types have all the information we need.
+ if (CorTypeInfo::IsObjRef_NoThrow(fromHandle.GetInternalCorElementType()) !=
+ CorTypeInfo::IsObjRef_NoThrow(toHandle.GetInternalCorElementType()))
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+ }
+ else
+ {
+ // they are not compatible yet enums can go into each other if their underlying element type is the same
+ if (toHandle.GetVerifierCorElementType() == fromHandle.GetVerifierCorElementType()
+ && (toHandle.IsEnum() || fromHandle.IsEnum()))
+ return TRUE;
+
+ }
+
+ return FALSE;
+}
+
+MethodDesc* COMDelegate::FindDelegateInvokeMethod(MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pMT->IsDelegate());
+
+ MethodDesc * pMD = ((DelegateEEClass*)pMT->GetClass())->m_pInvokeMethod;
+ if (pMD == NULL)
+ COMPlusThrowNonLocalized(kMissingMethodException, W("Invoke"));
+ return pMD;
+}
+
+BOOL COMDelegate::IsDelegateInvokeMethod(MethodDesc *pMD)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ MethodTable *pMT = pMD->GetMethodTable();
+ _ASSERTE(pMT->IsDelegate());
+
+ return (pMD == ((DelegateEEClass *)pMT->GetClass())->m_pInvokeMethod);
+}
+
+BOOL COMDelegate::IsMethodDescCompatible(TypeHandle thFirstArg,
+ TypeHandle thExactMethodType,
+ MethodDesc *pTargetMethod,
+ TypeHandle thDelegate,
+ MethodDesc *pInvokeMethod,
+ int flags,
+ BOOL *pfIsOpenDelegate)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Handle easy cases first -- if there's a constraint on whether the target method is static or instance we can check that very
+ // quickly.
+ if (flags & DBF_StaticMethodOnly && !pTargetMethod->IsStatic())
+ return FALSE;
+ if (flags & DBF_InstanceMethodOnly && pTargetMethod->IsStatic())
+ return FALSE;
+
+ // we don't allow you to bind to methods on Nullable<T> because the unboxing stubs don't know how to
+ // handle this case.
+ if (!pTargetMethod->IsStatic() && Nullable::IsNullableType(pTargetMethod->GetMethodTable()))
+ return FALSE;
+
+ // Have to be careful with automatically generated array methods (Get, Set, etc.). The TypeHandle here may actually be one
+ // of the "special case" MethodTables (such as Object[]) instead of an ArrayTypeDesc and our TypeHandle CanCastTo code can't
+ // cope with all the different possible combinations. In general we want to normalize the TypeHandle into an ArrayTypeDesc
+ // for these cases.
+ if (thExactMethodType.IsArrayType() && !thExactMethodType.IsArray())
+ {
+ TypeHandle thElement = thExactMethodType.AsMethodTable()->GetApproxArrayElementTypeHandle();
+ CorElementType etElement = thExactMethodType.AsMethodTable()->GetInternalCorElementType();
+ unsigned uRank = thExactMethodType.AsMethodTable()->GetRank();
+
+ thExactMethodType = ClassLoader::LoadArrayTypeThrowing(thElement,
+ etElement,
+ uRank,
+ ClassLoader::DontLoadTypes);
+ }
+
+ // Get signatures for the delegate invoke and target methods.
+ MetaSig sigInvoke(pInvokeMethod, thDelegate);
+ MetaSig sigTarget(pTargetMethod, thExactMethodType);
+
+ // Check that there is no vararg mismatch.
+ if (sigInvoke.IsVarArg() != sigTarget.IsVarArg())
+ return FALSE;
+
+ // The relationship between the number of arguments on the delegate invoke and target methods tells us a lot about the type of
+ // delegate we'll create (open or closed over the first argument). We're getting the fixed argument counts here, which are all
+ // the arguments apart from any implicit 'this' pointers.
+ // On the delegate invoke side (the caller) the total number of arguments is the number of fixed args to Invoke plus one if the
+ // delegate is closed over an argument (i.e. that argument is provided at delegate creation time).
+ // On the target method side (the callee) the total number of arguments is the number of fixed args plus one if the target is an
+ // instance method.
+ // These two totals should match for any compatible delegate and target method.
+ UINT numFixedInvokeArgs = sigInvoke.NumFixedArgs();
+ UINT numFixedTargetArgs = sigTarget.NumFixedArgs();
+ UINT numTotalTargetArgs = numFixedTargetArgs + (pTargetMethod->IsStatic() ? 0 : 1);
+
+ // Determine whether the match (if it is otherwise compatible) would result in an open or closed delegate or is just completely
+ // out of whack.
+ BOOL fIsOpenDelegate;
+ if (numTotalTargetArgs == numFixedInvokeArgs)
+ // All arguments provided by invoke, delegate must be open.
+ fIsOpenDelegate = TRUE;
+ else if (numTotalTargetArgs == numFixedInvokeArgs + 1)
+ // One too few arguments provided by invoke, delegate must be closed.
+ fIsOpenDelegate = FALSE;
+ else
+ // Target method cannot possibly match the invoke method.
+ return FALSE;
+
+ // Deal with cases where the caller wants a specific type of delegate.
+ if (flags & DBF_OpenDelegateOnly && !fIsOpenDelegate)
+ return FALSE;
+ if (flags & DBF_ClosedDelegateOnly && fIsOpenDelegate)
+ return FALSE;
+
+ // If the target (or first argument) is null, the delegate type would be closed and the caller explicitly doesn't want to allow
+ // closing over null then filter that case now.
+ if (flags & DBF_NeverCloseOverNull && thFirstArg.IsNull() && !fIsOpenDelegate)
+ return FALSE;
+
+ // If, on the other hand, we're looking at an open delegate but the caller has provided a target it's also not a match.
+ if (fIsOpenDelegate && !thFirstArg.IsNull())
+ return FALSE;
+
+ // **********OLD COMMENT**********
+ // We don't allow open delegates over virtual value type methods. That's because we currently have no way to allow the first
+ // argument of the invoke method to be specified in such a way that the passed value would be both compatible with the target
+ // method and type safe. Virtual methods always have an objref instance (they depend on this for the vtable lookup algorithm) so
+ // we can't take a Foo& first argument like other value type methods. We also can't accept System.Object or System.ValueType in
+ // the invoke signature since that's not specific enough and would allow type safety violations.
+ // Someday we may invent a boxing stub which would take a Foo& passed in box it before dispatch. This is unlikely given that
+ // it's a lot of work for an edge case (especially considering that open delegates over value types are always going to be
+ // tightly bound to the specific value type). It would also be an odd case where merely calling a delegate would involve an
+ // allocation and thus potential failure before you even entered the method.
+ // So for now we simply disallow this case.
+ // **********OLD COMMENT END**********
+ // Actually we allow them now. We will treat them like non-virtual methods.
+
+
+ // If we get here the basic shape of the signatures match up for either an open or closed delegate. Now we need to verify that
+ // those signatures are type compatible. This is complicated somewhat by the matrix of delegate type to target method types
+ // (open static vs closed instance etc.). Where we get the first argument type on the invoke side is controlled by open vs
+ // closed: closed delegates get the type from the target, open from the first invoke method argument (which is always a fixed
+ // arg). Similarly the location of the first argument type on the target method side is based on static vs instance (static from
+ // the first fixed arg, instance from the type of the method).
+
+ TypeHandle thFirstInvokeArg;
+ TypeHandle thFirstTargetArg;
+
+ // There is one edge case for an open static delegate which takes no arguments. In that case we're nearly done, just compare the
+ // return types.
+ if (numTotalTargetArgs == 0)
+ {
+ _ASSERTE(pTargetMethod->IsStatic());
+ _ASSERTE(fIsOpenDelegate);
+
+ goto CheckReturnType;
+ }
+
+ // Invoke side first...
+ if (fIsOpenDelegate)
+ {
+ // No bound arguments, take first type from invoke signature.
+ if (sigInvoke.NextArgNormalized() == ELEMENT_TYPE_END)
+ return FALSE;
+ thFirstInvokeArg = sigInvoke.GetLastTypeHandleThrowing();
+ }
+ else
+ // We have one bound argument and the type of that is what we must compare first.
+ thFirstInvokeArg = thFirstArg;
+
+ // And now the first target method argument for comparison...
+ if (pTargetMethod->IsStatic())
+ {
+ // The first argument for a static method is the first fixed arg.
+ if (sigTarget.NextArgNormalized() == ELEMENT_TYPE_END)
+ return FALSE;
+ thFirstTargetArg = sigTarget.GetLastTypeHandleThrowing();
+
+ // Delegates closed over static methods have a further constraint: the first argument of the target must be an object
+ // reference type (otherwise the argument shuffling logic could get complicated).
+ if (!fIsOpenDelegate)
+ {
+ if (thFirstTargetArg.IsGenericVariable())
+ {
+ // If the first argument of the target is a generic variable, it must be constrained to be an object reference.
+ TypeVarTypeDesc *varFirstTargetArg = thFirstTargetArg.AsGenericVariable();
+ if (!varFirstTargetArg->ConstrainedAsObjRef())
+ return FALSE;
+ }
+ else
+ {
+ // Otherwise the code:CorElementType of the argument must be classified as an object reference.
+ CorElementType etFirstTargetArg = thFirstTargetArg.GetInternalCorElementType();
+ if (!CorTypeInfo::IsObjRef(etFirstTargetArg))
+ return FALSE;
+ }
+ }
+ }
+ else
+ {
+ // The type of the first argument to an instance method is from the method type.
+ thFirstTargetArg = thExactMethodType;
+
+ // If the delegate is open and the target method is on a value type or primitive then the first argument of the invoke
+ // method must be a reference to that type. So make promote the type we got from the reference to a ref. (We don't need to
+ // do this for the closed instance case because there we got the invocation side type from the first arg passed in, i.e.
+ // it's had the ref stripped from it implicitly).
+ if (fIsOpenDelegate)
+ {
+ CorElementType etFirstTargetArg = thFirstTargetArg.GetInternalCorElementType();
+ if (etFirstTargetArg <= ELEMENT_TYPE_R8 ||
+ etFirstTargetArg == ELEMENT_TYPE_VALUETYPE ||
+ etFirstTargetArg == ELEMENT_TYPE_I ||
+ etFirstTargetArg == ELEMENT_TYPE_U)
+ thFirstTargetArg = thFirstTargetArg.MakeByRef();
+ }
+ }
+
+ // Now we have enough data to compare the first arguments on the invoke and target side. Skip this if we are closed over null
+ // (we don't have enough type information for the match but it doesn't matter because the null matches all object reference
+ // types, which our first arg must be in this case). We always relax signature matching for the first argument of an instance
+ // method, since it's always allowable to call the method on a more derived type. In cases where we're closed over the first
+ // argument we know that argument is boxed (because it was passed to us as an object). We provide this information to
+ // IsLocationAssignable because it relaxes signature matching for some important cases (e.g. passing a value type to an argument
+ // typed as Object).
+ if (!thFirstInvokeArg.IsNull())
+ if (!IsLocationAssignable(thFirstInvokeArg,
+ thFirstTargetArg,
+ !pTargetMethod->IsStatic() || flags & DBF_RelaxedSignature,
+ !fIsOpenDelegate))
+ return FALSE;
+
+ // Loop over the remaining fixed args, the list should be one to one at this point.
+ while (TRUE)
+ {
+ CorElementType etInvokeArg = sigInvoke.NextArgNormalized();
+ CorElementType etTargetArg = sigTarget.NextArgNormalized();
+ if (etInvokeArg == ELEMENT_TYPE_END || etTargetArg == ELEMENT_TYPE_END)
+ {
+ // We've reached the end of one signature. We better be at the end of the other or it's not a match.
+ if (etInvokeArg != etTargetArg)
+ return FALSE;
+ break;
+ }
+ else
+ {
+ TypeHandle thInvokeArg = sigInvoke.GetLastTypeHandleThrowing();
+ TypeHandle thTargetArg = sigTarget.GetLastTypeHandleThrowing();
+
+ if (!IsLocationAssignable(thInvokeArg, thTargetArg, flags & DBF_RelaxedSignature, FALSE))
+ return FALSE;
+ }
+ }
+
+ CheckReturnType:
+
+ // Almost there, just compare the return types (remember that the assignment is in the other direction here, from callee to
+ // caller, so switch the order of the arguments to IsLocationAssignable).
+ // If we ever relax this we have to think about how to unbox this arg in the Nullable<T> case also.
+ if (!IsLocationAssignable(sigTarget.GetRetTypeHandleThrowing(),
+ sigInvoke.GetRetTypeHandleThrowing(),
+ flags & DBF_RelaxedSignature,
+ FALSE))
+ return FALSE;
+
+ // We must have a match.
+ if (pfIsOpenDelegate)
+ *pfIsOpenDelegate = fIsOpenDelegate;
+ return TRUE;
+}
+
+MethodDesc* COMDelegate::GetDelegateCtor(TypeHandle delegateType, MethodDesc *pTargetMethod, DelegateCtorArgs *pCtorData)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodDesc *pRealCtor = NULL;
+
+ MethodTable *pDelMT = delegateType.AsMethodTable();
+ DelegateEEClass *pDelCls = (DelegateEEClass*)(pDelMT->GetClass());
+
+ MethodDesc *pDelegateInvoke = COMDelegate::FindDelegateInvokeMethod(pDelMT);
+
+ MetaSig invokeSig(pDelegateInvoke);
+ MetaSig methodSig(pTargetMethod);
+ UINT invokeArgCount = invokeSig.NumFixedArgs();
+ UINT methodArgCount = methodSig.NumFixedArgs();
+ BOOL isStatic = pTargetMethod->IsStatic();
+ LoaderAllocator *pTargetMethodLoaderAllocator = pTargetMethod->GetLoaderAllocator();
+ BOOL isCollectible = pTargetMethodLoaderAllocator->IsCollectible();
+ // A method that may be instantiated over a collectible type, and is static will require a delegate
+ // that has the _methodBase field filled in with the LoaderAllocator of the collectible assembly
+ // associated with the instantiation.
+ BOOL fMaybeCollectibleAndStatic = FALSE;
+
+ if (isStatic)
+ {
+ // When this method is called and the method being considered is shared, we typically
+ // are passed a Wrapper method for the explicit canonical instantiation. It would be illegal
+ // to actually call that method, but the jit uses it as a proxy for the real instantiated
+ // method, so we can't make the methoddesc apis that report that it is the shared methoddesc
+ // report that it is. Hence, this collection of checks that will detect if the methoddesc
+ // being used is a normal method desc to shared code, or if it is a wrapped methoddesc
+ // corresponding to the actually uncallable instantiation over __Canon.
+ if (pTargetMethod->GetMethodTable()->IsSharedByGenericInstantiations())
+ {
+ fMaybeCollectibleAndStatic = TRUE;
+ }
+ else if (pTargetMethod->IsSharedByGenericMethodInstantiations())
+ {
+ fMaybeCollectibleAndStatic = TRUE;
+ }
+ else if (pTargetMethod->HasMethodInstantiation())
+ {
+ Instantiation instantiation = pTargetMethod->GetMethodInstantiation();
+ for (DWORD iParam = 0; iParam < instantiation.GetNumArgs(); iParam++)
+ {
+ if (instantiation[iParam] == g_pCanonMethodTableClass)
+ {
+ fMaybeCollectibleAndStatic = TRUE;
+ break;
+ }
+ }
+ }
+ }
+
+ // If this might be collectible and is static, then we will go down the slow path. Implementing
+ // yet another fast path would require a methoddesc parameter, but hopefully isn't necessary.
+ if (fMaybeCollectibleAndStatic)
+ return NULL;
+
+ if (!isStatic)
+ methodArgCount++; // count 'this'
+ MethodDesc *pCallerMethod = (MethodDesc*)pCtorData->pMethod;
+ BOOL needsSecureDelegate = NeedsSecureDelegate(pCallerMethod, GetAppDomain(), pTargetMethod);
+
+ if (!needsSecureDelegate && NeedsWrapperDelegate(pTargetMethod))
+ {
+ // If we need a wrapper even it is not a secure delegate, go through slow path
+ return NULL;
+ }
+
+ // If this is a secure delegate case, and the secure delegate would have a pointer to a collectible
+ // method in it, then use the slow path. This could be optimized with a set of fast paths.
+ if (needsSecureDelegate && (pCallerMethod->IsLCGMethod() || pCallerMethod->GetLoaderAllocator()->IsCollectible()))
+ return NULL;
+
+ // Force the slow path for nullable so that we can give the user an error in case were the verifier is not run.
+ MethodTable* pMT = pTargetMethod->GetMethodTable();
+ if (!pTargetMethod->IsStatic() && Nullable::IsNullableType(pMT))
+ return NULL;
+
+#ifdef FEATURE_COMINTEROP
+ // We'll always force classic COM types to go down the slow path for security checks.
+ if ((pMT->IsComObjectType() && !pMT->IsWinRTObjectType()) ||
+ (pMT->IsComImport() && !pMT->IsProjectedFromWinRT()))
+ {
+ return NULL;
+ }
+#endif
+
+ // Devdiv bug 296229: if the target method is dangerous, forcing the delegate creation to go through the
+ // slow path where we will do a demand to ensure security.
+ if (InvokeUtil::IsDangerousMethod(pTargetMethod))
+ return NULL;
+
+ // DELEGATE KINDS TABLE
+ //
+ // _target _methodPtr _methodPtrAux _invocationList _invocationCount
+ //
+ // 1- Instance closed 'this' ptr target method null null 0
+ // 2- Instance open non-virt delegate shuffle thunk target method null 0
+ // 3- Instance open virtual delegate Virtual-stub dispatch method id null 0
+ // 4- Static closed first arg target method null null 0
+ // 5- Static closed (special sig) delegate specialSig thunk target method first arg 0
+ // 6- Static opened delegate shuffle thunk target method null 0
+ // 7- Secure delegate call thunk MethodDesc (frame) target delegate creator assembly
+ //
+ // Delegate invoke arg count == target method arg count - 2, 3, 6
+ // Delegate invoke arg count == 1 + target method arg count - 1, 4, 5
+ //
+ // 1, 4 - MulticastDelegate.ctor1 (simply assign _target and _methodPtr)
+ // 5 - MulticastDelegate.ctor2 (see table, takes 3 args)
+ // 2, 6 - MulticastDelegate.ctor3 (take shuffle thunk)
+ // 3 - MulticastDelegate.ctor4 (take shuffle thunk, retrive MethodDesc) ???
+ //
+ // 7 - Needs special handling
+ //
+ // With collectible types, we need to fill the _methodBase field in with a value that represents the LoaderAllocator of the target method
+ // if the delegate is not a closed instance delegate.
+ //
+ // There are two techniques that will work for this.
+ // One is to simply use the slow path. We use this for unusual constructs. It is rather slow.
+ // We will use this for the secure variants
+ //
+ // Another is to pass a gchandle to the delegate ctor. This is fastest, but only works if we can predict the gc handle at this time.
+ // We will use this for the non secure variants
+
+ // Collectible secure delegates can go down the slow path
+ if (isCollectible && needsSecureDelegate)
+ return NULL;
+
+ if (invokeArgCount == methodArgCount)
+ {
+ // case 2, 3, 6
+ //@TODO:NEWVTWORK: Might need changing.
+ // The virtual dispatch stub doesn't work on unboxed value type objects which don't have MT pointers.
+ // Since open virtual (delegate kind 3) delegates on value type methods require unboxed objects we cannot use the
+ // virtual dispatch stub for them. On the other hand, virtual methods on value types don't need
+ // to be dispatched because value types cannot be derived. So we treat them like non-virtual methods (delegate kind 2).
+ if (!isStatic && pTargetMethod->IsVirtual() && !pTargetMethod->GetMethodTable()->IsValueType())
+ {
+ // case 3
+ if (needsSecureDelegate)
+ pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_SECURE_VIRTUAL_DISPATCH);
+ else if (isCollectible)
+ pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_COLLECTIBLE_VIRTUAL_DISPATCH);
+ else
+ pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_VIRTUAL_DISPATCH);
+ }
+ else
+ {
+ // case 2, 6
+ if (needsSecureDelegate)
+ pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_SECURE_OPENED);
+ else if (isCollectible)
+ pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_COLLECTIBLE_OPENED);
+ else
+ pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_OPENED);
+ }
+ Stub *pShuffleThunk = NULL;
+ if (!pTargetMethod->IsStatic() && pTargetMethod->HasRetBuffArg())
+ pShuffleThunk = pDelCls->m_pInstRetBuffCallStub;
+ else
+ pShuffleThunk = pDelCls->m_pStaticCallStub;
+
+ if (!pShuffleThunk)
+ pShuffleThunk = SetupShuffleThunk(pDelMT, pTargetMethod);
+ pCtorData->pArg3 = (void*)pShuffleThunk->GetEntryPoint();
+ if (needsSecureDelegate)
+ {
+ // need to fill the info for the secure delegate
+ pCtorData->pArg4 = (void *)GetSecureInvoke(pDelegateInvoke);
+ pCtorData->pArg5 = pCallerMethod;
+ }
+ else if (isCollectible)
+ {
+ pCtorData->pArg4 = pTargetMethodLoaderAllocator->GetLoaderAllocatorObjectHandle();
+ }
+ }
+ else
+ {
+ // case 1, 4, 5
+ //TODO: need to differentiate on 5
+ _ASSERTE(invokeArgCount + 1 == methodArgCount);
+
+#ifdef HAS_THISPTR_RETBUF_PRECODE
+ // Force closed delegates over static methods with return buffer to go via
+ // the slow path to create ThisPtrRetBufPrecode
+ if (isStatic && pTargetMethod->HasRetBuffArg())
+ return NULL;
+#endif
+
+ // under the conditions below the delegate ctor needs to perform some heavy operation
+ // to either resolve the interface call to the real target or to get the unboxing stub (or both)
+ BOOL needsRuntimeInfo = !pTargetMethod->IsStatic() &&
+ (pTargetMethod->IsInterface() ||
+ (pTargetMethod->GetMethodTable()->IsValueType() && !pTargetMethod->IsUnboxingStub()));
+
+ if (needsSecureDelegate)
+ {
+ if (needsRuntimeInfo)
+ pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_SECURE_RT_CLOSED);
+ else
+ {
+ if (!isStatic)
+ pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_SECURE_CLOSED);
+ else
+ pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_SECURE_CLOSED_STATIC);
+ }
+
+ // need to fill the info for the secure delegate
+ pCtorData->pArg3 = (void *)GetSecureInvoke(pDelegateInvoke);
+ pCtorData->pArg4 = pCallerMethod;
+ }
+ else
+ {
+ if (needsRuntimeInfo)
+ pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_RT_CLOSED);
+ else
+ {
+ if (!isStatic)
+ pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_CLOSED);
+ else
+ {
+ if (isCollectible)
+ {
+ pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_COLLECTIBLE_CLOSED_STATIC);
+ pCtorData->pArg3 = pTargetMethodLoaderAllocator->GetLoaderAllocatorObjectHandle();
+ }
+ else
+ {
+ pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_CLOSED_STATIC);
+ }
+ }
+ }
+ }
+ }
+
+ return pRealCtor;
+}
+
+
+/*@GENERICSVER: new (works for generics too)
+ Does a static validation of parameters passed into a delegate constructor.
+
+
+ For "new Delegate(obj.method)" where method is statically typed as "C::m" and
+ the static type of obj is D (some subclass of C)...
+
+ Params:
+ instHnd : Static type of the instance, from which pFtn is obtained. Ignored if pFtn
+ is static (i.e. D)
+ ftnParentHnd: Parent of the MethodDesc, pFtn, used to create the delegate (i.e. type C)
+ pFtn : (possibly shared) MethodDesc of the function pointer used to create the delegate (i.e. C::m)
+ pDlgt : The delegate type (i.e. Delegate)
+ module: The module scoping methodMemberRef and delegateConstructorMemberRef
+ methodMemberRef: the MemberRef, MemberDef or MemberSpec of the target method (i.e. a mdToken for C::m)
+ delegateConstructorMemberRef: the MemberRef, MemberDef or MemberSpec of the delegate constructor (i.e. a mdToken for Delegate::.ctor)
+
+ Validates the following conditions:
+ 1. If the function (pFtn) is not static, pInst should be equal to the type where
+ pFtn is defined or pInst should be a parent of pFtn's type.
+ 2. The signature of the function should be compatible with the signature
+ of the Invoke method of the delegate type.
+ The signature is retrieved from module, methodMemberRef and delegateConstructorMemberRef
+
+ NB: Although some of these arguments are redundant, we pass them in to avoid looking up
+ information that should already be available.
+ Instead of comparing type handles modulo some context, the method directly compares metadata to avoid
+ loading classes referenced in the method signatures (hence the need for the module and member refs).
+ Also, because this method works directly on metadata, without allowing any additional instantiation of the
+ free type variables in the signature of the method or delegate constructor, this code
+ will *only* verify a constructor application at the typical (ie. formal) instantiation.
+*/
+/* static */
+BOOL COMDelegate::ValidateCtor(TypeHandle instHnd,
+ TypeHandle ftnParentHnd,
+ MethodDesc *pFtn,
+ TypeHandle dlgtHnd,
+ BOOL *pfIsOpenDelegate)
+
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(pFtn));
+ PRECONDITION(!dlgtHnd.IsNull());
+ PRECONDITION(!ftnParentHnd.IsNull());
+
+ INJECT_FAULT(COMPlusThrowOM()); // from MetaSig::CompareElementType
+ }
+ CONTRACTL_END;
+
+ DelegateEEClass *pdlgEEClass = (DelegateEEClass*)dlgtHnd.AsMethodTable()->GetClass();
+ PREFIX_ASSUME(pdlgEEClass != NULL);
+ MethodDesc *pDlgtInvoke = pdlgEEClass->m_pInvokeMethod;
+ if (pDlgtInvoke == NULL)
+ return FALSE;
+ return IsMethodDescCompatible(instHnd, ftnParentHnd, pFtn, dlgtHnd, pDlgtInvoke, DBF_RelaxedSignature, pfIsOpenDelegate);
+}
+
+
+// This method checks the delegate type transparency rules.
+// It returns TRUE if the transparency rules are obeyed and FALSE otherwise
+//
+// The Partial Trust Silverlight (SL2, SL4, and PT SL5) rule is:
+// 1. Critical delegates can only be bound to critical target methods
+// 2. Transparent/SafeCritical delegates can only be bound to Transparent/SafeCritical target methods
+//
+// The Full Trust Silverlight rule FOR NOW is: anything is allowed
+// The Desktop rule FOR NOW is: anything is allowed
+//
+// This is called by JIT in early bound delegate creation to determine whether the delegate transparency
+// check is POSSIBLY needed. If the code is shared between appdomains of different trust levels, it is
+// possible that the check is needed in some domains but not the others. So we need to made that distinction
+// at run time in JIT_DelegateSecurityCheck.
+
+/* static */
+BOOL COMDelegate::ValidateSecurityTransparency(MethodDesc *pFtn, MethodTable *pdlgMT)
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef FEATURE_CORECLR
+ if (GetAppDomain()->GetSecurityDescriptor()->IsFullyTrusted())
+ return TRUE;
+
+ BOOL fCriticalDelegate = Security::IsTypeCritical(pdlgMT) && !Security::IsTypeSafeCritical(pdlgMT);
+ BOOL fCriticalTarget = Security::IsMethodCritical(pFtn) && !Security::IsMethodSafeCritical(pFtn);
+
+ // returns true if:
+ // 1. the delegate is critical and the target method is critical, or
+ // 2. the delegate is transparent/safecritical and the target method is transparent/safecritical
+ return (fCriticalDelegate == fCriticalTarget);
+#else
+ return TRUE;
+#endif // !FEATURE_CORECLR
+}
+
+
+BOOL COMDelegate::ValidateBeginInvoke(DelegateEEClass* pClass)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(pClass));
+ PRECONDITION(CheckPointer(pClass->m_pBeginInvokeMethod));
+
+ // insert fault. Can the binder throw an OOM?
+ }
+ CONTRACTL_END;
+
+ if (pClass->m_pInvokeMethod == NULL)
+ return FALSE;
+
+ // We check the signatures under the typical instantiation of the possibly generic class
+ MetaSig beginInvokeSig(pClass->m_pBeginInvokeMethod->LoadTypicalMethodDefinition());
+ MetaSig invokeSig(pClass->m_pInvokeMethod->LoadTypicalMethodDefinition());
+
+ if (beginInvokeSig.GetCallingConventionInfo() != (IMAGE_CEE_CS_CALLCONV_HASTHIS | IMAGE_CEE_CS_CALLCONV_DEFAULT))
+ return FALSE;
+
+ if (beginInvokeSig.NumFixedArgs() != invokeSig.NumFixedArgs() + 2)
+ return FALSE;
+
+ if (beginInvokeSig.GetRetTypeHandleThrowing() != TypeHandle(MscorlibBinder::GetClass(CLASS__IASYNCRESULT)))
+ return FALSE;
+
+ while(invokeSig.NextArg() != ELEMENT_TYPE_END)
+ {
+ beginInvokeSig.NextArg();
+ if (beginInvokeSig.GetLastTypeHandleThrowing() != invokeSig.GetLastTypeHandleThrowing())
+ return FALSE;
+ }
+
+ beginInvokeSig.NextArg();
+ if (beginInvokeSig.GetLastTypeHandleThrowing()!= TypeHandle(MscorlibBinder::GetClass(CLASS__ASYNCCALLBACK)))
+ return FALSE;
+
+ beginInvokeSig.NextArg();
+ if (beginInvokeSig.GetLastTypeHandleThrowing()!= TypeHandle(g_pObjectClass))
+ return FALSE;
+
+ if (beginInvokeSig.NextArg() != ELEMENT_TYPE_END)
+ return FALSE;
+
+ return TRUE;
+}
+
+BOOL COMDelegate::ValidateEndInvoke(DelegateEEClass* pClass)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(pClass));
+ PRECONDITION(CheckPointer(pClass->m_pEndInvokeMethod));
+
+ // insert fault. Can the binder throw an OOM?
+ }
+ CONTRACTL_END;
+
+ if (pClass->m_pInvokeMethod == NULL)
+ return FALSE;
+
+ // We check the signatures under the typical instantiation of the possibly generic class
+ MetaSig endInvokeSig(pClass->m_pEndInvokeMethod->LoadTypicalMethodDefinition());
+ MetaSig invokeSig(pClass->m_pInvokeMethod->LoadTypicalMethodDefinition());
+
+ if (endInvokeSig.GetCallingConventionInfo() != (IMAGE_CEE_CS_CALLCONV_HASTHIS | IMAGE_CEE_CS_CALLCONV_DEFAULT))
+ return FALSE;
+
+ if (endInvokeSig.GetRetTypeHandleThrowing() != invokeSig.GetRetTypeHandleThrowing())
+ return FALSE;
+
+ CorElementType type;
+ while((type = invokeSig.NextArg()) != ELEMENT_TYPE_END)
+ {
+ if (type == ELEMENT_TYPE_BYREF)
+ {
+ endInvokeSig.NextArg();
+ if (endInvokeSig.GetLastTypeHandleThrowing() != invokeSig.GetLastTypeHandleThrowing())
+ return FALSE;
+ }
+ }
+
+ if (endInvokeSig.NextArg() == ELEMENT_TYPE_END)
+ return FALSE;
+
+ if (endInvokeSig.GetLastTypeHandleThrowing() != TypeHandle(MscorlibBinder::GetClass(CLASS__IASYNCRESULT)))
+ return FALSE;
+
+ if (endInvokeSig.NextArg() != ELEMENT_TYPE_END)
+ return FALSE;
+
+ return TRUE;
+}
+
+BOOL COMDelegate::IsSecureDelegate(DELEGATEREF dRef)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+ DELEGATEREF innerDel = NULL;
+ if (dRef->GetInvocationCount() != 0)
+ {
+ innerDel = (DELEGATEREF) dRef->GetInvocationList();
+ if (innerDel != NULL && innerDel->GetMethodTable()->IsDelegate())
+ {
+ // We have a secure delegate
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+#endif // !DACCESS_COMPILE
+
+
+// Decides if pcls derives from Delegate.
+BOOL COMDelegate::IsDelegate(MethodTable *pMT)
+{
+ WRAPPER_NO_CONTRACT;
+ return (pMT == g_pDelegateClass) || (pMT == g_pMulticastDelegateClass) || pMT->IsDelegate();
+}
+
+
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+
+
+// Helper to construct an UnhandledExceptionEventArgs. This may fail for out-of-memory or
+// other reasons. Currently, we fall back on passing a NULL eventargs to the event sink.
+// Another possibility is to have two shared immutable instances (one for isTerminating and
+// another for !isTerminating). These must be immutable because we perform no synchronization
+// around delivery of unhandled exceptions. They occur in a free-threaded manner on various
+// threads.
+//
+// It doesn't add much value to communicate the isTerminating flag under these unusual
+// conditions.
+static void TryConstructUnhandledExceptionArgs(OBJECTREF *pThrowable,
+ BOOL isTerminating,
+ OBJECTREF *pOutEventArgs)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pThrowable != NULL && IsProtectedByGCFrame(pThrowable));
+ _ASSERTE(pOutEventArgs != NULL && IsProtectedByGCFrame(pOutEventArgs));
+ _ASSERTE(*pOutEventArgs == NULL);
+
+ EX_TRY
+ {
+ MethodTable *pMT = MscorlibBinder::GetClass(CLASS__UNHANDLED_EVENTARGS);
+ *pOutEventArgs = AllocateObject(pMT);
+
+ MethodDescCallSite ctor(METHOD__UNHANDLED_EVENTARGS__CTOR, pOutEventArgs);
+
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(*pOutEventArgs),
+ ObjToArgSlot(*pThrowable),
+ BoolToArgSlot(isTerminating)
+ };
+
+ ctor.Call(args);
+ }
+ EX_CATCH
+ {
+ *pOutEventArgs = NULL; // arguably better than half-constructed object
+
+ // It's not even worth asserting, because these aren't our bugs. At
+ // some point, a MDA may be warranted.
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+}
+
+
+// Helper to dispatch a single unhandled exception notification, swallowing anything
+// that goes wrong.
+static void InvokeUnhandledSwallowing(OBJECTREF *pDelegate,
+ OBJECTREF *pDomain,
+ OBJECTREF *pEventArgs)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pDelegate != NULL && IsProtectedByGCFrame(pDelegate));
+ _ASSERTE(pDomain != NULL && IsProtectedByGCFrame(pDomain));
+ _ASSERTE(pEventArgs == NULL || IsProtectedByGCFrame(pEventArgs));
+
+ EX_TRY
+ {
+ // We have used both the FEATURE_ defines here since without CSE feature,
+ // this aspect of notification feature is pointless. And skipping
+ // FEATURE_EXCEPTION_NOTIFICATIONS with only FEATURE_CORRUPTING_EXCEPTIONS
+ // specified would enable this change for builds that dont support
+ // FEATURE_EXCEPTION_NOTIFICATIONS, like CoreCLR. We dont want that to happen
+ // as well.
+#if defined(FEATURE_CORRUPTING_EXCEPTIONS) && defined(FEATURE_EXCEPTION_NOTIFICATIONS)
+ BOOL fCanMethodHandleException = g_pConfig->LegacyCorruptedStateExceptionsPolicy();
+ if (!fCanMethodHandleException)
+ {
+ // CSE policy has not been overridden - proceed with our checks.
+ //
+ // Notifications for CSE are only delivered if the delegate target follows CSE rules.
+ // So, get the corruption severity of the active exception that has gone unhandled.
+ //
+ // By Default, assume that the active exception is not corrupting.
+ CorruptionSeverity severity = NotCorrupting;
+ Thread *pCurThread = GetThread();
+ _ASSERTE(pCurThread != NULL);
+ ThreadExceptionState *pExState = pCurThread->GetExceptionState();
+ if (pExState->IsExceptionInProgress())
+ {
+ // If an exception is active, it implies we have a tracker for it.
+ // Hence, get the corruption severity from the active exception tracker.
+ severity = pExState->GetCurrentExceptionTracker()->GetCorruptionSeverity();
+ _ASSERTE(severity > NotSet);
+ }
+
+ // Notifications are delivered based upon corruption severity of the exception
+ fCanMethodHandleException = ExceptionNotifications::CanDelegateBeInvokedForException(pDelegate, severity);
+ if (!fCanMethodHandleException)
+ {
+ LOG((LF_EH, LL_INFO100, "InvokeUnhandledSwallowing: ADUEN Delegate cannot be invoked for corruption severity %d\n",
+ severity));
+ }
+ }
+
+ if (fCanMethodHandleException)
+#endif // defined(FEATURE_CORRUPTING_EXCEPTIONS) && defined(FEATURE_EXCEPTION_NOTIFICATIONS)
+ {
+ // We've already exercised the prestub on this delegate's COMDelegate::GetMethodDesc,
+ // as part of wiring up a reliable event sink. Deliver the notification.
+ ExceptionNotifications::DeliverExceptionNotification(UnhandledExceptionHandler, pDelegate, pDomain, pEventArgs);
+ }
+ }
+ EX_CATCH
+ {
+ // It's not even worth asserting, because these aren't our bugs. At
+ // some point, a MDA may be warranted.
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+}
+
+
+// cannot combine SEH & C++ exceptions in one method. Split out from InvokeNotify.
+static void InvokeNotifyInner(OBJECTREF *pDelegate, OBJECTREF *pDomain)
+{
+ // static contract, since we use SEH.
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ _ASSERTE(pDelegate != NULL && IsProtectedByGCFrame(pDelegate));
+ _ASSERTE(pDomain != NULL && IsProtectedByGCFrame(pDomain));
+
+ struct Param : ThreadBaseExceptionFilterParam
+ {
+ OBJECTREF *pDelegate;
+ OBJECTREF *pDomain;
+ } param;
+ param.location = SystemNotification;
+ param.pDelegate = pDelegate;
+ param.pDomain = pDomain;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ PREPARE_NONVIRTUAL_CALLSITE_USING_CODE(DELEGATEREF(*pParam->pDelegate)->GetMethodPtr());
+
+ DECLARE_ARGHOLDER_ARRAY(args, 3);
+
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(DELEGATEREF(*pParam->pDelegate)->GetTarget());
+ args[ARGNUM_1] = OBJECTREF_TO_ARGHOLDER(*pParam->pDomain);
+ args[ARGNUM_2] = NULL;
+
+ CALL_MANAGED_METHOD_NORET(args);
+ }
+ PAL_EXCEPT_FILTER(ThreadBaseExceptionFilter)
+ {
+ _ASSERTE(!"ThreadBaseExceptionFilter returned EXECUTE_HANDLER.");
+ }
+ PAL_ENDTRY;
+}
+
+
+
+// Helper to dispatch a single event notification. If anything goes wrong, we cause
+// an unhandled exception notification to occur out of our first pass, and then we
+// swallow and continue.
+static void InvokeNotify(OBJECTREF *pDelegate, OBJECTREF *pDomain)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pDelegate != NULL && IsProtectedByGCFrame(pDelegate));
+ _ASSERTE(pDomain != NULL && IsProtectedByGCFrame(pDomain));
+
+ STRESS_LOG2(LF_GC, LL_INFO1000, "Distributing reliable event: MethodPtr=%p MethodPtrAux=%p\n",
+ DELEGATEREF(*pDelegate)->GetMethodPtr(),
+ DELEGATEREF(*pDelegate)->GetMethodPtrAux());
+
+ // All reliable events should be delivered on finalizer thread
+ _ASSERTE(IsFinalizerThread());
+
+ INDEBUG(Thread* pThread = GetThread());
+
+ // This is an early check for condition that we assert in Thread::InternalReset called from DoOneFinalization later.
+ _ASSERTE(!pThread->HasCriticalRegion());
+ _ASSERTE(!pThread->HasThreadAffinity());
+
+ EX_TRY
+ {
+ InvokeNotifyInner(pDelegate, pDomain);
+ }
+ EX_CATCH
+ {
+ // It's not even worth asserting, because these aren't our bugs. At
+ // some point, a MDA may be warranted.
+ // This is an early check for condition that we assert in Thread::InternalReset called from DoOneFinalization later.
+ _ASSERTE(!pThread->HasCriticalRegion());
+ _ASSERTE(!pThread->HasThreadAffinity());
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ // This is an early check for condition that we assert in Thread::InternalReset called from DoOneFinalization later.
+ _ASSERTE(!pThread->HasCriticalRegion());
+ _ASSERTE(!pThread->HasThreadAffinity());
+}
+
+
+// For critical system events, ensure that each handler gets a notification --
+// even if prior handlers in the chain have thrown an exception. Also, try
+// to deliver an unhandled exception event if we ever swallow an exception
+// out of a reliable notification. Note that the add_ event handers are
+// responsible for any reliable preparation of the target, like eager JITting.
+void DistributeEventReliably(OBJECTREF *pDelegate, OBJECTREF *pDomain)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pDelegate != NULL && IsProtectedByGCFrame(pDelegate));
+ _ASSERTE(pDomain != NULL && IsProtectedByGCFrame(pDomain));
+
+ Thread *pThread = GetThread();
+
+ EX_TRY
+ {
+ struct _gc
+ {
+ PTRARRAYREF Array;
+ OBJECTREF InnerDelegate;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.Array = (PTRARRAYREF) ((DELEGATEREF)(*pDelegate))->GetInvocationList();
+ if (gc.Array == NULL || !gc.Array->GetMethodTable()->IsArray())
+ {
+ InvokeNotify(pDelegate, pDomain);
+ }
+ else
+ {
+ // The _invocationCount could be less than the array size, if we are sharing
+ // immutable arrays cleverly.
+ INT_PTR invocationCount = ((DELEGATEREF)(*pDelegate))->GetInvocationCount();
+
+ _ASSERTE(FitsInU4(invocationCount));
+ DWORD cnt = static_cast<DWORD>(invocationCount);
+
+ _ASSERTE(cnt <= gc.Array->GetNumComponents());
+
+ for (DWORD i=0; i<cnt; i++)
+ {
+ gc.InnerDelegate = gc.Array->m_Array[i];
+ InvokeNotify(&gc.InnerDelegate, pDomain);
+ if (pThread->IsAbortRequested())
+ {
+ pThread->UnmarkThreadForAbort(Thread::TAR_Thread);
+ }
+ }
+ }
+ GCPROTECT_END();
+ }
+ EX_CATCH
+ {
+ // It's not even worth asserting, because these aren't our bugs. At
+ // some point, a MDA may be warranted.
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+}
+
+// The unhandled exception event is a little easier to distribute, because
+// we simply swallow any failures and proceed to the next event sink.
+void DistributeUnhandledExceptionReliably(OBJECTREF *pDelegate,
+ OBJECTREF *pDomain,
+ OBJECTREF *pThrowable,
+ BOOL isTerminating)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pDelegate != NULL && IsProtectedByGCFrame(pDelegate));
+ _ASSERTE(pDomain != NULL && IsProtectedByGCFrame(pDomain));
+ _ASSERTE(pThrowable != NULL && IsProtectedByGCFrame(pThrowable));
+
+ EX_TRY
+ {
+ struct _gc
+ {
+ PTRARRAYREF Array;
+ OBJECTREF InnerDelegate;
+ OBJECTREF EventArgs;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ // Try to construct an UnhandledExceptionEventArgs out of pThrowable & isTerminating.
+ // If unsuccessful, the best we can do is pass NULL.
+ TryConstructUnhandledExceptionArgs(pThrowable, isTerminating, &gc.EventArgs);
+
+ gc.Array = (PTRARRAYREF) ((DELEGATEREF)(*pDelegate))->GetInvocationList();
+ if (gc.Array == NULL || !gc.Array->GetMethodTable()->IsArray())
+ {
+ InvokeUnhandledSwallowing(pDelegate, pDomain, &gc.EventArgs);
+ }
+ else
+ {
+ // The _invocationCount could be less than the array size, if we are sharing
+ // immutable arrays cleverly.
+ INT_PTR invocationCount = ((DELEGATEREF)(*pDelegate))->GetInvocationCount();
+
+ _ASSERTE(FitsInU4(invocationCount));
+ DWORD cnt = static_cast<DWORD>(invocationCount);
+
+ _ASSERTE(cnt <= gc.Array->GetNumComponents());
+
+ for (DWORD i=0; i<cnt; i++)
+ {
+ gc.InnerDelegate = gc.Array->m_Array[i];
+ InvokeUnhandledSwallowing(&gc.InnerDelegate, pDomain, &gc.EventArgs);
+ }
+ }
+ GCPROTECT_END();
+ }
+ EX_CATCH
+ {
+ // It's not even worth asserting, because these aren't our bugs. At
+ // some point, a MDA may be warranted.
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+}
+
+#endif // !DACCESS_COMPILE && !CROSSGEN_COMPILE
diff --git a/src/vm/comdelegate.h b/src/vm/comdelegate.h
new file mode 100644
index 0000000000..18562169f4
--- /dev/null
+++ b/src/vm/comdelegate.h
@@ -0,0 +1,243 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: COMDelegate.h
+//
+// This module contains the native methods for the Delegate class.
+//
+
+
+#ifndef _COMDELEGATE_H_
+#define _COMDELEGATE_H_
+
+class Stub;
+class ShuffleThunkCache;
+
+#include "cgensys.h"
+#include "dllimportcallback.h"
+#include "stubcache.h"
+
+typedef ArgBasedStubCache MulticastStubCache;
+
+VOID GenerateShuffleArray(MethodDesc* pInvoke, MethodDesc *pTargetMeth, struct ShuffleEntry * pShuffleEntryArray, size_t nEntries);
+
+
+// This class represents the native methods for the Delegate class
+class COMDelegate
+{
+private:
+ // friend VOID CPUSTUBLINKER::EmitMulticastInvoke(...);
+ // friend VOID CPUSTUBLINKER::EmitShuffleThunk(...);
+ friend class CPUSTUBLINKER;
+ friend class DelegateInvokeStubManager;
+ friend class SecureDelegateFrame;
+ friend BOOL MulticastFrame::TraceFrame(Thread *thread, BOOL fromPatch,
+ TraceDestination *trace, REGDISPLAY *regs);
+
+ static MulticastStubCache* m_pSecureDelegateStubCache;
+ static MulticastStubCache* m_pMulticastStubCache;
+
+ static CrstStatic s_DelegateToFPtrHashCrst; // Lock for the following hash.
+ static PtrHashMap* s_pDelegateToFPtrHash; // Hash table containing the Delegate->FPtr pairs
+ // passed out to unmanaged code.
+public:
+ static ShuffleThunkCache *m_pShuffleThunkCache;
+
+ //REVIEW: reconcile initialization, one init?
+ // One time init.
+ static void Init();
+
+ static FCDECL3(void, DelegateConstruct, Object* refThis, Object* target, PCODE method);
+
+ static FCDECL1(Object*, InternalAlloc, ReflectClassBaseObject* target);
+ static FCDECL1(Object*, InternalAllocLike, Object* pThis);
+ static FCDECL2(FC_BOOL_RET, InternalEqualTypes, Object* pThis, Object *pThat);
+
+ static FCDECL3(PCODE, AdjustTarget, Object* refThis, Object* target, PCODE method);
+ static FCDECL2(PCODE, GetCallStub, Object* refThis, PCODE method);
+
+ static FCDECL5(FC_BOOL_RET, BindToMethodName, Object* refThisUNSAFE, Object* targetUNSAFE, ReflectClassBaseObject *pMethodTypeUNSAFE, StringObject* methodNameUNSAFE, int flags);
+
+ static FCDECL5(FC_BOOL_RET, BindToMethodInfo, Object* refThisUNSAFE, Object* targetUNSAFE, ReflectMethodObject *method, ReflectClassBaseObject *pMethodTypeUNSAFE, int flags);
+
+ // This gets the MethodInfo for a delegate, creating it if necessary
+ static FCDECL1(ReflectMethodObject*, FindMethodHandle, Object* refThis);
+ static FCDECL2(FC_BOOL_RET, InternalEqualMethodHandles, Object *refLeftIn, Object *refRightIn);
+
+ // Get the invoke method for the delegate. Used to transition delegates to multicast delegates.
+ static FCDECL1(PCODE, GetMulticastInvoke, Object* refThis);
+ static FCDECL1(MethodDesc*, GetInvokeMethod, Object* refThis);
+ static PCODE GetSecureInvoke(MethodDesc* pMD);
+ // determines where the delegate needs to be wrapped for non-security reason
+ static BOOL NeedsWrapperDelegate(MethodDesc* pTargetMD);
+ // determines whether the delegate needs to be wrapped
+ static BOOL NeedsSecureDelegate(MethodDesc* pCreatorMethod, AppDomain *pCreatorDomain, MethodDesc* pTargetMD);
+ // on entry delegate points to the delegate to wrap
+ static DELEGATEREF CreateSecureDelegate(DELEGATEREF delegate, MethodDesc* pCreatorMethod, MethodDesc* pTargetMD);
+
+ // Marshals a delegate to a unmanaged callback.
+ static LPVOID ConvertToCallback(OBJECTREF pDelegate);
+
+ // Marshals an unmanaged callback to Delegate
+ static OBJECTREF ConvertToDelegate(LPVOID pCallback, MethodTable* pMT);
+
+#ifdef FEATURE_COMINTEROP
+ // Marshals a WinRT delegate interface pointer to a managed Delegate
+ static OBJECTREF ConvertWinRTInterfaceToDelegate(IUnknown *pUnk, MethodTable* pMT);
+
+ static ComPlusCallInfo * PopulateComPlusCallInfo(MethodTable * pDelMT);
+#endif // FEATURE_COMINTEROP
+
+ // Checks whether two delegates wrapping function pointers have the same unmanaged target
+ static FCDECL2(FC_BOOL_RET, CompareUnmanagedFunctionPtrs, Object *refDelegate1UNSAFE, Object *refDelegate2UNSAFE);
+
+ static PCODE GetStubForILStub(EEImplMethodDesc* pDelegateMD, MethodDesc** ppStubMD, DWORD dwStubFlags);
+ static MethodDesc* GetILStubMethodDesc(EEImplMethodDesc* pDelegateMD, DWORD dwStubFlags);
+
+ static void ValidateDelegatePInvoke(MethodDesc* pMD);
+
+ static void RemoveEntryFromFPtrHash(UPTR key);
+
+ // Decides if pcls derives from Delegate.
+ static BOOL IsDelegate(MethodTable *pMT);
+
+ // Decides if this is a secure delegate
+ static BOOL IsSecureDelegate(DELEGATEREF dRef);
+
+ // Get the cpu stub for a delegate invoke.
+ static PCODE GetInvokeMethodStub(EEImplMethodDesc* pMD);
+
+ // get the one single delegate invoke stub
+ static PCODE TheDelegateInvokeStub();
+
+#ifdef _TARGET_X86_
+#ifdef MDA_SUPPORTED
+ static Stub *GenerateStubForMDA(MethodDesc *pInvokeMD, MethodDesc *pStubMD, LPVOID pNativeTarget, Stub *pInnerStub);
+#endif // MDA_SUPPORTED
+ static Stub *GenerateStubForHost(MethodDesc *pInvokeMD, MethodDesc *pStubMD, LPVOID pNativeTarget, Stub *pInnerStub);
+#endif // _TARGET_X86_
+
+#ifdef FEATURE_COMINTEROP
+ static void DoUnmanagedCodeAccessCheck(MethodDesc* pMeth);
+#endif // FEATURE_COMINTEROP
+
+ static MethodDesc * __fastcall GetMethodDesc(OBJECTREF obj);
+ static OBJECTREF GetTargetObject(OBJECTREF obj);
+
+ static BOOL IsTrueMulticastDelegate(OBJECTREF delegate);
+
+#ifdef FEATURE_CORECLR
+ static BOOL IsMethodAllowedToSinkReversePInvoke(MethodDesc *pMD);
+#endif
+
+private:
+#ifdef FEATURE_CORECLR
+ static BOOL IsFullTrustDelegate(DELEGATEREF pDelegate);
+#endif
+ static Stub* SetupShuffleThunk(MethodTable * pDelMT, MethodDesc *pTargetMeth);
+
+public:
+ static MethodDesc* FindDelegateInvokeMethod(MethodTable *pMT);
+ static BOOL IsDelegateInvokeMethod(MethodDesc *pMD);
+
+ static BOOL IsMethodDescCompatible(TypeHandle thFirstArg,
+ TypeHandle thExactMethodType,
+ MethodDesc *pTargetMethod,
+ TypeHandle thDelegate,
+ MethodDesc *pInvokeMethod,
+ int flags,
+ BOOL *pfIsOpenDelegate);
+ static MethodDesc* GetDelegateCtor(TypeHandle delegateType, MethodDesc *pTargetMethod, DelegateCtorArgs *pCtorData);
+ //@GENERICSVER: new (suitable for generics)
+ // Method to do static validation of delegate .ctor
+ static BOOL ValidateCtor(TypeHandle objHnd, TypeHandle ftnParentHnd, MethodDesc *pFtn, TypeHandle dlgtHnd, BOOL *pfIsOpenDelegate);
+ static BOOL ValidateSecurityTransparency(MethodDesc *pFtn, MethodTable *pdlgMT); // enforce the transparency rules
+
+private:
+ static BOOL ValidateBeginInvoke(DelegateEEClass* pClass); // make certain the BeginInvoke method is consistant with the Invoke Method
+ static BOOL ValidateEndInvoke(DelegateEEClass* pClass); // make certain the EndInvoke method is consistant with the Invoke Method
+
+ static void BindToMethod(DELEGATEREF *pRefThis,
+ OBJECTREF *pRefFirstArg,
+ MethodDesc *pTargetMethod,
+ MethodTable *pExactMethodType,
+ BOOL fIsOpenDelegate,
+ BOOL fCheckSecurity);
+};
+
+// These flags effect the way BindToMethodInfo and BindToMethodName are allowed to bind a delegate to a target method. Their
+// values must be kept in sync with the definition in bcl\system\delegate.cs.
+enum DelegateBindingFlags
+{
+ DBF_StaticMethodOnly = 0x00000001, // Can only bind to static target methods
+ DBF_InstanceMethodOnly = 0x00000002, // Can only bind to instance (including virtual) methods
+ DBF_OpenDelegateOnly = 0x00000004, // Only allow the creation of delegates open over the 1st argument
+ DBF_ClosedDelegateOnly = 0x00000008, // Only allow the creation of delegates closed over the 1st argument
+ DBF_NeverCloseOverNull = 0x00000010, // A null target will never been considered as a possible null 1st argument
+ DBF_CaselessMatching = 0x00000020, // Use case insensitive lookup for methods matched by name
+ DBF_SkipSecurityChecks = 0x00000040, // Skip security checks (visibility, link demand etc.)
+ DBF_RelaxedSignature = 0x00000080, // Allow relaxed signature matching (co/contra variance)
+};
+
+void DistributeEventReliably(OBJECTREF *pDelegate,
+ OBJECTREF *pDomain);
+
+void DistributeUnhandledExceptionReliably(OBJECTREF *pDelegate,
+ OBJECTREF *pDomain,
+ OBJECTREF *pThrowable,
+ BOOL isTerminating);
+
+// Want no unused bits in ShuffleEntry since unused bits can make
+// equivalent ShuffleEntry arrays look unequivalent and deoptimize our
+// hashing.
+#include <pshpack1.h>
+
+// To handle a call to a static delegate, we create an array of ShuffleEntry
+// structures. Each entry instructs the shuffler to move a chunk of bytes.
+// The size of the chunk is StackElemSize (typically a DWORD): long arguments
+// have to be expressed as multiple ShuffleEntry's.
+//
+// The ShuffleEntry array serves two purposes:
+//
+// 1. A platform-indepedent blueprint for creating the platform-specific
+// shuffle thunk.
+// 2. A hash key for finding the shared shuffle thunk for a particular
+// signature.
+struct ShuffleEntry
+{
+ enum {
+ REGMASK = 0x8000,
+ OFSMASK = 0x7fff,
+ SENTINEL = 0xffff,
+ };
+
+#if defined(_TARGET_AMD64_) && !defined(UNIX_AMD64_ABI)
+ union {
+ UINT16 srcofs;
+ CorElementType argtype; // AMD64: shuffle array is just types
+ };
+#else
+
+ // Special values:
+ // -1 - indicates end of shuffle array: stacksizedelta
+ // == difference in stack size between virtual and static sigs.
+ // high bit - indicates a register argument: mask it off and
+ // the result is an offset into ArgumentRegisters.
+
+ UINT16 srcofs;
+
+ union {
+ UINT16 dstofs; //if srcofs != SENTINEL
+ UINT16 stacksizedelta; //if dstofs == SENTINEL
+ };
+#endif // _TARGET_AMD64_
+};
+
+
+#include <poppack.h>
+
+void __stdcall DoDelegateInvokeForHostCheck(Object* pDelegate);
+
+#endif // _COMDELEGATE_H_
diff --git a/src/vm/comdependenthandle.cpp b/src/vm/comdependenthandle.cpp
new file mode 100644
index 0000000000..19df2154a6
--- /dev/null
+++ b/src/vm/comdependenthandle.cpp
@@ -0,0 +1,77 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: COMDependentHandle.cpp
+//
+
+//
+// FCall's for the DependentHandle class
+//
+// Handle functions require cooperative mode, making these fcalls poor candidates for QCall conversion.
+//
+
+
+#include "common.h"
+#include "comdependenthandle.h"
+
+
+
+FCIMPL3(VOID, DependentHandle::nInitialize, Object *_primary, Object *_secondary, OBJECTHANDLE *outHandle)
+{
+ FCALL_CONTRACT;
+
+ _ASSERTE(outHandle != NULL && *outHandle == NULL); // Multiple initializations disallowed
+
+ OBJECTREF primary(_primary);
+ OBJECTREF secondary(_secondary);
+
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL();
+
+ // Create the handle.
+ *outHandle = GetAppDomain()->CreateDependentHandle(primary, secondary);
+
+ HELPER_METHOD_FRAME_END_POLL();
+
+}
+FCIMPLEND
+
+
+
+FCIMPL1(VOID, DependentHandle::nFree, OBJECTHANDLE handle)
+{
+ FCALL_CONTRACT;
+
+ _ASSERTE(handle != NULL);
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ DestroyDependentHandle(handle);
+
+ HELPER_METHOD_FRAME_END();
+
+}
+FCIMPLEND
+
+
+
+FCIMPL2(VOID, DependentHandle::nGetPrimary, OBJECTHANDLE handle, Object **outPrimary)
+{
+ FCALL_CONTRACT;
+ _ASSERTE(handle != NULL && outPrimary != NULL);
+ *outPrimary = OBJECTREFToObject(ObjectFromHandle(handle));
+}
+FCIMPLEND
+
+
+
+FCIMPL3(VOID, DependentHandle::nGetPrimaryAndSecondary, OBJECTHANDLE handle, Object **outPrimary, Object **outSecondary)
+{
+ FCALL_CONTRACT;
+ _ASSERTE(handle != NULL && outPrimary != NULL && outSecondary != NULL);
+ *outPrimary = OBJECTREFToObject(ObjectFromHandle(handle));
+ *outSecondary = OBJECTREFToObject(GetDependentHandleSecondary(handle));
+}
+FCIMPLEND
+
diff --git a/src/vm/comdependenthandle.h b/src/vm/comdependenthandle.h
new file mode 100644
index 0000000000..1d3251bab4
--- /dev/null
+++ b/src/vm/comdependenthandle.h
@@ -0,0 +1,52 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: COMDependentHandle.h
+//
+
+//
+// FCall's for the DependentHandle class
+//
+
+
+#ifndef __COMDEPENDENTHANDLE_H__
+#define __COMDEPENDENTHANDLE_H__
+
+#include "fcall.h"
+
+// A dependent handle is conceputally a tuple containing two object reference
+//
+// * A Primary object (think key)
+// * A Secondary Object (think value)
+//
+// The reference to both the primary object is (long) weak (will not keep the object alive). However the
+// reference to the secondary object is (long) weak if the primary object is dead, and strong if the primary
+// object is alive. (Hence it is a 'Dependent' handle since the strength of the secondary reference depends
+// on the primary).
+//
+// The effect of this semantics is that it seems that while the DependentHandle exists, the system behaves as
+// if there was a normal strong reference from the primary object to the secondary one.
+//
+// The usefulness of a DependentHandle is to allow other objects to be 'attached' to a given object. By
+// having a hash table where the entries are dependent handles you can attach arbitrary objects to another
+// object.
+//
+// If you attmpted to do this with an ordinary table if the value would have to be a strong reference, which
+// if it points back to the key, will form a loop that the GC will not be able to break DependentHandle is
+// effecively a way of informing the GC about the dedendent relationsship between the key and the value so
+// such cycles can be broken.
+//
+// Almost all the interesting for DependentHandle is in code:Ref_ScanDependentHandles
+class DependentHandle
+{
+public:
+ static FCDECL3(VOID, nInitialize, Object *primary, Object *secondary, OBJECTHANDLE *outHandle);
+ static FCDECL2(VOID, nGetPrimary, OBJECTHANDLE handle, Object **outPrimary);
+ static FCDECL3(VOID, nGetPrimaryAndSecondary, OBJECTHANDLE handle, Object **outPrimary, Object **outSecondary);
+ static FCDECL1(VOID, nFree, OBJECTHANDLE handle);
+};
+
+#endif
+
diff --git a/src/vm/comdynamic.cpp b/src/vm/comdynamic.cpp
new file mode 100644
index 0000000000..13be5f7008
--- /dev/null
+++ b/src/vm/comdynamic.cpp
@@ -0,0 +1,1898 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+////////////////////////////////////////////////////////////////////////////////
+// COMDynamic.h
+// This module defines the native methods that are used for Dynamic IL generation
+
+////////////////////////////////////////////////////////////////////////////////
+
+
+#include "common.h"
+#include "field.h"
+#include "comdynamic.h"
+#include "commodule.h"
+#include "reflectclasswriter.h"
+#include "corerror.h"
+#include "iceefilegen.h"
+#include "strongname.h"
+#include "ceefilegenwriter.h"
+#include "typekey.h"
+
+
+//This structure is used in SetMethodIL to walk the exceptions.
+//It maps to System.Reflection.Emit.ExceptionHandler class
+//DO NOT MOVE ANY OF THE FIELDS
+#include <pshpack1.h>
+struct ExceptionInstance {
+ INT32 m_exceptionType;
+ INT32 m_start;
+ INT32 m_end;
+ INT32 m_filterOffset;
+ INT32 m_handle;
+ INT32 m_handleEnd;
+ INT32 m_type;
+};
+#include <poppack.h>
+
+
+//*************************************************************
+//
+// Defining a type into metadata of this dynamic module
+//
+//*************************************************************
+INT32 QCALLTYPE COMDynamicWrite::DefineGenericParam(QCall::ModuleHandle pModule,
+ LPCWSTR wszFullName,
+ INT32 tkParent,
+ INT32 attributes,
+ INT32 position,
+ INT32 * pConstraintTokens)
+{
+ QCALL_CONTRACT;
+
+ mdTypeDef classE = mdTokenNil;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ IfFailThrow(pRCW->GetEmitter()->DefineGenericParam(
+ tkParent, position, attributes, wszFullName, 0, (mdToken *)pConstraintTokens, &classE));
+
+ END_QCALL;
+
+ return (INT32)classE;
+}
+
+INT32 QCALLTYPE COMDynamicWrite::DefineType(QCall::ModuleHandle pModule,
+ LPCWSTR wszFullName,
+ INT32 tkParent,
+ INT32 attributes,
+ INT32 tkEnclosingType,
+ INT32 * pInterfaceTokens)
+{
+ QCALL_CONTRACT;
+
+ mdTypeDef classE = mdTokenNil;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ HRESULT hr;
+
+ if (RidFromToken(tkEnclosingType))
+ {
+ // defining nested type
+ hr = pRCW->GetEmitter()->DefineNestedType(wszFullName,
+ attributes,
+ tkParent == 0 ? mdTypeRefNil : tkParent,
+ (mdToken *)pInterfaceTokens,
+ tkEnclosingType,
+ &classE);
+ }
+ else
+ {
+ // top level type
+ hr = pRCW->GetEmitter()->DefineTypeDef(wszFullName,
+ attributes,
+ tkParent == 0 ? mdTypeRefNil : tkParent,
+ (mdToken *)pInterfaceTokens,
+ &classE);
+ }
+
+ if (hr == META_S_DUPLICATE)
+ {
+ COMPlusThrow(kArgumentException, W("Argument_DuplicateTypeName"));
+ }
+
+ if (FAILED(hr)) {
+ _ASSERTE(hr == E_OUTOFMEMORY || !"DefineTypeDef Failed");
+ COMPlusThrowHR(hr);
+ }
+
+ AllocMemTracker amTracker;
+ pModule->GetClassLoader()->AddAvailableClassDontHaveLock(pModule,
+ classE,
+ &amTracker);
+ amTracker.SuppressRelease();
+
+ END_QCALL;
+
+ return (INT32)classE;
+}
+
+// This function will reset the parent class in metadata
+void QCALLTYPE COMDynamicWrite::SetParentType(QCall::ModuleHandle pModule, INT32 tdType, INT32 tkParent)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ IfFailThrow( pRCW->GetEmitHelper()->SetTypeParent(tdType, tkParent) );
+
+ END_QCALL;
+}
+
+// This function will add another interface impl
+void QCALLTYPE COMDynamicWrite::AddInterfaceImpl(QCall::ModuleHandle pModule, INT32 tdType, INT32 tkInterface)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ IfFailThrow( pRCW->GetEmitHelper()->AddInterfaceImpl(tdType, tkInterface) );
+
+ END_QCALL;
+}
+
+// This function will create a method within the class
+INT32 QCALLTYPE COMDynamicWrite::DefineMethodSpec(QCall::ModuleHandle pModule, INT32 tkParent, LPCBYTE pSignature, INT32 sigLength)
+{
+ QCALL_CONTRACT;
+
+ mdMethodDef memberE = mdTokenNil;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ // Define the Method
+ IfFailThrow( pRCW->GetEmitter()->DefineMethodSpec(tkParent, //ParentTypeDef
+ (PCCOR_SIGNATURE)pSignature, //Blob value of a COM+ signature
+ sigLength, //Size of the signature blob
+ &memberE) ); //[OUT]methodToken
+
+ END_QCALL;
+
+ return (INT32) memberE;
+}
+
+INT32 QCALLTYPE COMDynamicWrite::DefineMethod(QCall::ModuleHandle pModule, INT32 tkParent, LPCWSTR wszName, LPCBYTE pSignature, INT32 sigLength, INT32 attributes)
+{
+ QCALL_CONTRACT;
+
+ mdMethodDef memberE = mdTokenNil;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ // Define the Method
+ IfFailThrow( pRCW->GetEmitter()->DefineMethod(tkParent, //ParentTypeDef
+ wszName, //Name of Member
+ attributes, //Member Attributes (public, etc);
+ (PCCOR_SIGNATURE)pSignature, //Blob value of a COM+ signature
+ sigLength, //Size of the signature blob
+ 0, //Code RVA
+ miIL | miManaged, //Implementation Flags is default to managed IL
+ &memberE) ); //[OUT]methodToken
+
+ END_QCALL;
+
+ return (INT32) memberE;
+}
+
+/*================================DefineField=================================
+**Action:
+**Returns:
+**Arguments:
+**Exceptions:
+==============================================================================*/
+mdFieldDef QCALLTYPE COMDynamicWrite::DefineField(QCall::ModuleHandle pModule, INT32 tkParent, LPCWSTR wszName, LPCBYTE pSignature, INT32 sigLength, INT32 attr)
+{
+ QCALL_CONTRACT;
+
+ mdFieldDef retVal = mdTokenNil;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ //Emit the field.
+ IfFailThrow( pRCW->GetEmitter()->DefineField(tkParent,
+ wszName, attr,
+ (PCCOR_SIGNATURE)pSignature, sigLength,
+ ELEMENT_TYPE_VOID, NULL,
+ (ULONG) -1, &retVal) );
+
+
+ END_QCALL;
+
+ return retVal;
+}
+
+// This method computes the same result as COR_ILMETHOD_SECT_EH::Size(...) but
+// does so in a way that detects overflow if the number of exception clauses is
+// too great (in which case an OOM exception is thrown). We do this rather than
+// modifying COR_ILMETHOD_SECT_EH::Size because that routine is published in the
+// SDK and can't take breaking changes and because the overflow support (and
+// exception mechanism) we're using is only available to the VM.
+UINT32 ExceptionHandlingSize(unsigned uNumExceptions, COR_ILMETHOD_SECT_EH_CLAUSE_FAT* pClauses)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (uNumExceptions == 0)
+ return 0;
+
+ // Speculatively compute the size for the slim version of the header.
+ S_UINT32 uSmallSize = S_UINT32(sizeof(COR_ILMETHOD_SECT_EH_SMALL)) +
+ (S_UINT32(sizeof(IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_SMALL)) * (S_UINT32(uNumExceptions - 1)));
+
+ if (uSmallSize.IsOverflow())
+ COMPlusThrowOM();
+
+ if (uSmallSize.Value() > COR_ILMETHOD_SECT_SMALL_MAX_DATASIZE)
+ goto FatCase;
+
+ // Check whether any of the clauses won't fit in the slim case.
+ for (UINT32 i = 0; i < uNumExceptions; i++) {
+ COR_ILMETHOD_SECT_EH_CLAUSE_FAT* pFatClause = (COR_ILMETHOD_SECT_EH_CLAUSE_FAT*)&pClauses[i];
+ if (pFatClause->GetTryOffset() > 0xFFFF ||
+ pFatClause->GetTryLength() > 0xFF ||
+ pFatClause->GetHandlerOffset() > 0xFFFF ||
+ pFatClause->GetHandlerLength() > 0xFF) {
+ goto FatCase;
+ }
+ }
+
+ _ASSERTE(uSmallSize.Value() == COR_ILMETHOD_SECT_EH::Size(uNumExceptions, pClauses));
+ return uSmallSize.Value();
+
+ FatCase:
+ S_UINT32 uFatSize = S_UINT32(sizeof(COR_ILMETHOD_SECT_EH_FAT)) +
+ (S_UINT32(sizeof(IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT)) * (S_UINT32(uNumExceptions - 1)));
+
+ if (uFatSize.IsOverflow())
+ COMPlusThrowOM();
+
+ _ASSERTE(uFatSize.Value() == COR_ILMETHOD_SECT_EH::Size(uNumExceptions, pClauses));
+ return uFatSize.Value();
+}
+
+
+// SetMethodIL -- This function will create a method within the class
+void QCALLTYPE COMDynamicWrite::SetMethodIL(QCall::ModuleHandle pModule,
+ INT32 tk,
+ BOOL fIsInitLocal,
+ LPCBYTE pBody,
+ INT32 cbBody,
+ LPCBYTE pLocalSig,
+ INT32 sigLength,
+ UINT16 maxStackSize,
+ ExceptionInstance * pExceptions,
+ INT32 numExceptions,
+ INT32 * pTokenFixups,
+ INT32 numTokenFixups)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ _ASSERTE(pLocalSig);
+
+ PCCOR_SIGNATURE pcSig = (PCCOR_SIGNATURE)pLocalSig;
+ _ASSERTE(*pcSig == IMAGE_CEE_CS_CALLCONV_LOCAL_SIG);
+
+ mdSignature pmLocalSigToken;
+ if (sigLength==2 && pcSig[0]==0 && pcSig[1]==0)
+ {
+ //This is an empty local variable sig
+ pmLocalSigToken=0;
+ }
+ else
+ {
+ IfFailThrow(pRCW->GetEmitter()->GetTokenFromSig( pcSig, sigLength, &pmLocalSigToken));
+ }
+
+ COR_ILMETHOD_FAT fatHeader;
+
+ // set fatHeader.Flags to CorILMethod_InitLocals if user wants to zero init the stack frame.
+ //
+ fatHeader.SetFlags(fIsInitLocal ? CorILMethod_InitLocals : 0);
+ fatHeader.SetMaxStack(maxStackSize);
+ fatHeader.SetLocalVarSigTok(pmLocalSigToken);
+ fatHeader.SetCodeSize(cbBody);
+ bool moreSections = (numExceptions != 0);
+
+ unsigned codeSizeAligned = fatHeader.GetCodeSize();
+ if (moreSections)
+ codeSizeAligned = AlignUp(codeSizeAligned, 4); // to insure EH section aligned
+ unsigned headerSize = COR_ILMETHOD::Size(&fatHeader, numExceptions != 0);
+
+ //Create the exception handlers.
+ CQuickArray<COR_ILMETHOD_SECT_EH_CLAUSE_FAT> clauses;
+ if (numExceptions > 0)
+ {
+ clauses.AllocThrows(numExceptions);
+
+ for (int i = 0; i < numExceptions; i++)
+ {
+ clauses[i].SetFlags((CorExceptionFlag)(pExceptions[i].m_type));
+ clauses[i].SetTryOffset(pExceptions[i].m_start);
+ clauses[i].SetTryLength(pExceptions[i].m_end - pExceptions[i].m_start);
+ clauses[i].SetHandlerOffset(pExceptions[i].m_handle);
+ clauses[i].SetHandlerLength(pExceptions[i].m_handleEnd - pExceptions[i].m_handle);
+ if (pExceptions[i].m_type == COR_ILEXCEPTION_CLAUSE_FILTER)
+ {
+ clauses[i].SetFilterOffset(pExceptions[i].m_filterOffset);
+ }
+ else if (pExceptions[i].m_type!=COR_ILEXCEPTION_CLAUSE_FINALLY)
+ {
+ clauses[i].SetClassToken(pExceptions[i].m_exceptionType);
+ }
+ else
+ {
+ clauses[i].SetClassToken(mdTypeRefNil);
+ }
+ }
+ }
+
+ unsigned ehSize = ExceptionHandlingSize(numExceptions, clauses.Ptr());
+ S_UINT32 totalSizeSafe = S_UINT32(headerSize) + S_UINT32(codeSizeAligned) + S_UINT32(ehSize);
+ if (totalSizeSafe.IsOverflow())
+ COMPlusThrowOM();
+ UINT32 totalSize = totalSizeSafe.Value();
+ ICeeGen* pGen = pRCW->GetCeeGen();
+ BYTE* buf = NULL;
+ ULONG methodRVA;
+ pGen->AllocateMethodBuffer(totalSize, &buf, &methodRVA);
+ if (buf == NULL)
+ COMPlusThrowOM();
+
+ _ASSERTE(buf != NULL);
+ _ASSERTE((((size_t) buf) & 3) == 0); // header is dword aligned
+
+#ifdef _DEBUG
+ BYTE* endbuf = &buf[totalSize];
+#endif
+
+ BYTE * startBuf = buf;
+
+ // Emit the header
+ buf += COR_ILMETHOD::Emit(headerSize, &fatHeader, moreSections, buf);
+
+ //Emit the code
+ //The fatHeader.CodeSize is a workaround to see if we have an interface or an
+ //abstract method. Force enough verification in native to ensure that
+ //this is true.
+ if (fatHeader.GetCodeSize()!=0) {
+ memcpy(buf, pBody, fatHeader.GetCodeSize());
+ }
+ buf += codeSizeAligned;
+
+ // Emit the eh
+ CQuickArray<ULONG> ehTypeOffsets;
+ if (numExceptions > 0)
+ {
+ // Allocate space for the the offsets to the TypeTokens in the Exception headers
+ // in the IL stream.
+ ehTypeOffsets.AllocThrows(numExceptions);
+
+ // Emit the eh. This will update the array ehTypeOffsets with offsets
+ // to Exception type tokens. The offsets are with reference to the
+ // beginning of eh section.
+ buf += COR_ILMETHOD_SECT_EH::Emit(ehSize, numExceptions, clauses.Ptr(),
+ false, buf, ehTypeOffsets.Ptr());
+ }
+ _ASSERTE(buf == endbuf);
+
+ //Get the IL Section.
+ HCEESECTION ilSection;
+ IfFailThrow(pGen->GetIlSection(&ilSection));
+
+ // Token Fixup data...
+ ULONG ilOffset = methodRVA + headerSize;
+
+ //Add all of the relocs based on the info which I saved from ILGenerator.
+
+ //Add the Token Fixups
+ for (int iTokenFixup=0; iTokenFixup<numTokenFixups; iTokenFixup++)
+ {
+ IfFailThrow(pGen->AddSectionReloc(ilSection, pTokenFixups[iTokenFixup] + ilOffset, ilSection, srRelocMapToken));
+ }
+
+ // Add token fixups for exception type tokens.
+ for (int iException=0; iException < numExceptions; iException++)
+ {
+ if (ehTypeOffsets[iException] != (ULONG) -1)
+ {
+ IfFailThrow(pGen->AddSectionReloc(
+ ilSection,
+ ehTypeOffsets[iException] + codeSizeAligned + ilOffset,
+ ilSection, srRelocMapToken));
+ }
+ }
+
+ //nasty interface workaround. What does this mean for abstract methods?
+ if (fatHeader.GetCodeSize() != 0)
+ {
+ // add the starting address of the il blob to the il blob hash table
+ // we need to find this information from out of process for debugger inspection
+ // APIs so we have to store this information where we can get it later
+ pModule->SetDynamicIL(mdToken(tk), TADDR(startBuf), FALSE);
+
+ DWORD dwImplFlags;
+
+ //Set the RVA of the method.
+ IfFailThrow(pRCW->GetMDImport()->GetMethodImplProps(tk, NULL, &dwImplFlags));
+ dwImplFlags |= (miManaged | miIL);
+ IfFailThrow(pRCW->GetEmitter()->SetMethodProps(tk, (DWORD) -1, methodRVA, dwImplFlags));
+ }
+
+ END_QCALL;
+}
+
+void QCALLTYPE COMDynamicWrite::TermCreateClass(QCall::ModuleHandle pModule, INT32 tk, QCall::ObjectHandleOnStack retType)
+{
+ QCALL_CONTRACT;
+
+ TypeHandle typeHnd;
+
+ BEGIN_QCALL;
+
+ _ASSERTE(pModule->GetReflectionModule()->GetClassWriter());
+
+ // Use the same service, regardless of whether we are generating a normal
+ // class, or the special class for the module that holds global functions
+ // & methods.
+ pModule->GetReflectionModule()->AddClass(tk);
+
+ // manually load the class if it is not the global type
+ if (!IsNilToken(tk))
+ {
+ TypeKey typeKey(pModule, tk);
+ typeHnd = pModule->GetClassLoader()->LoadTypeHandleForTypeKey(&typeKey, TypeHandle());
+ }
+
+ if (!typeHnd.IsNull())
+ {
+ GCX_COOP();
+ retType.Set(typeHnd.GetManagedClassObject());
+ }
+
+ END_QCALL;
+
+ return;
+}
+
+/*============================SetPInvokeData============================
+**Action:
+**Returns:
+**Arguments:
+**Exceptions:
+==============================================================================*/
+void QCALLTYPE COMDynamicWrite::SetPInvokeData(QCall::ModuleHandle pModule, LPCWSTR wszDllName, LPCWSTR wszFunctionName, INT32 token, INT32 linkFlags)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ mdModuleRef mrImportDll = mdTokenNil;
+ IfFailThrow(pRCW->GetEmitter()->DefineModuleRef(wszDllName, &mrImportDll));
+
+ IfFailThrow(pRCW->GetEmitter()->DefinePinvokeMap(
+ token, // the method token
+ linkFlags, // the mapping flags
+ wszFunctionName, // function name
+ mrImportDll));
+
+ IfFailThrow(pRCW->GetEmitter()->SetMethodProps(token, (DWORD) -1, 0x0, miIL));
+
+ END_QCALL;
+}
+
+/*============================DefineProperty============================
+**Action:
+**Returns:
+**Arguments:
+**Exceptions:
+==============================================================================*/
+INT32 QCALLTYPE COMDynamicWrite::DefineProperty(QCall::ModuleHandle pModule, INT32 tkParent, LPCWSTR wszName, INT32 attr, LPCBYTE pSignature, INT32 sigLength)
+{
+ QCALL_CONTRACT;
+
+ mdProperty pr = mdTokenNil;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ // Define the Property
+ IfFailThrow(pRCW->GetEmitter()->DefineProperty(
+ tkParent, // ParentTypeDef
+ wszName, // Name of Member
+ attr, // property Attributes (prDefaultProperty, etc);
+ (PCCOR_SIGNATURE)pSignature, // Blob value of a COM+ signature
+ sigLength, // Size of the signature blob
+ ELEMENT_TYPE_VOID, // don't specify the default value
+ 0, // no default value
+ (ULONG) -1, // optional length
+ mdMethodDefNil, // no setter
+ mdMethodDefNil, // no getter
+ NULL, // no other methods
+ &pr));
+
+ END_QCALL;
+
+ return (INT32)pr;
+}
+
+/*============================DefineEvent============================
+**Action:
+**Returns:
+**Arguments:
+**Exceptions:
+==============================================================================*/
+INT32 QCALLTYPE COMDynamicWrite::DefineEvent(QCall::ModuleHandle pModule, INT32 tkParent, LPCWSTR wszName, INT32 attr, INT32 tkEventType)
+{
+ QCALL_CONTRACT;
+
+ mdProperty ev = mdTokenNil;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ // Define the Event
+ IfFailThrow(pRCW->GetEmitHelper()->DefineEventHelper(
+ tkParent, // ParentTypeDef
+ wszName, // Name of Member
+ attr, // property Attributes (prDefaultProperty, etc);
+ tkEventType, // the event type. Can be TypeDef or TypeRef
+ &ev));
+
+ END_QCALL;
+
+ return (INT32)ev;
+}
+
+/*============================DefineMethodSemantics============================
+**Action:
+**Returns:
+**Arguments:
+**Exceptions:
+==============================================================================*/
+void QCALLTYPE COMDynamicWrite::DefineMethodSemantics(QCall::ModuleHandle pModule, INT32 tkAssociation, INT32 attr, INT32 tkMethod)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ // Define the MethodSemantics
+ IfFailThrow(pRCW->GetEmitHelper()->DefineMethodSemanticsHelper(
+ tkAssociation,
+ attr,
+ tkMethod));
+
+ END_QCALL;
+}
+
+/*============================SetMethodImpl============================
+** To set a Method's Implementation flags
+==============================================================================*/
+void QCALLTYPE COMDynamicWrite::SetMethodImpl(QCall::ModuleHandle pModule, INT32 tkMethod, INT32 attr)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ // Set the methodimpl flags
+ IfFailThrow(pRCW->GetEmitter()->SetMethodImplFlags(
+ tkMethod,
+ attr)); // change the impl flags
+
+ END_QCALL;
+}
+
+/*============================DefineMethodImpl============================
+** Define a MethodImpl record
+==============================================================================*/
+void QCALLTYPE COMDynamicWrite::DefineMethodImpl(QCall::ModuleHandle pModule, UINT32 tkType, UINT32 tkBody, UINT32 tkDecl)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ // Set the methodimpl flags
+ IfFailThrow(pRCW->GetEmitter()->DefineMethodImpl(
+ tkType,
+ tkBody,
+ tkDecl)); // change the impl flags
+
+ END_QCALL;
+}
+
+/*============================GetTokenFromSig============================
+**Action:
+**Returns:
+**Arguments:
+**Exceptions:
+==============================================================================*/
+INT32 QCALLTYPE COMDynamicWrite::GetTokenFromSig(QCall::ModuleHandle pModule, LPCBYTE pSignature, INT32 sigLength)
+{
+ QCALL_CONTRACT;
+
+ mdSignature retVal = 0;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ _ASSERTE(pSignature);
+
+ // Define the signature
+ IfFailThrow(pRCW->GetEmitter()->GetTokenFromSig(
+ pSignature, // Signature blob
+ sigLength, // blob length
+ &retVal)); // returned token
+
+ END_QCALL;
+
+ return (INT32)retVal;
+}
+
+/*============================SetParamInfo============================
+**Action: Helper to set parameter information
+**Returns:
+**Arguments:
+**Exceptions:
+==============================================================================*/
+INT32 QCALLTYPE COMDynamicWrite::SetParamInfo(QCall::ModuleHandle pModule, UINT32 tkMethod, UINT32 iSequence, UINT32 iAttributes, LPCWSTR wszParamName)
+{
+ QCALL_CONTRACT;
+
+ mdParamDef retVal = 0;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ // Set the methodimpl flags
+ IfFailThrow(pRCW->GetEmitter()->DefineParam(
+ tkMethod,
+ iSequence, // sequence of the parameter
+ wszParamName,
+ iAttributes, // change the impl flags
+ ELEMENT_TYPE_VOID,
+ 0,
+ (ULONG) -1,
+ &retVal));
+
+ END_QCALL;
+
+ return (INT32)retVal;
+}
+
+#ifndef FEATURE_CORECLR
+/*============================CWSetMarshal============================
+**Action: Helper to set marshal information
+**Returns:
+**Arguments:
+**Exceptions:
+==============================================================================*/
+void QCALLTYPE COMDynamicWrite::SetFieldMarshal(QCall::ModuleHandle pModule, UINT32 tk, LPCBYTE pMarshal, INT32 cbMarshal)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ _ASSERTE(pMarshal);
+
+ // Define the signature
+ IfFailThrow(pRCW->GetEmitter()->SetFieldMarshal(
+ tk,
+ (PCCOR_SIGNATURE)pMarshal, // marshal blob
+ cbMarshal)); // blob length
+
+ END_QCALL;
+}
+#endif
+
+/*============================SetConstantValue============================
+**Action: Helper to set constant value to field or parameter
+**Returns:
+**Arguments:
+**Exceptions:
+==============================================================================*/
+void QCALLTYPE COMDynamicWrite::SetConstantValue(QCall::ModuleHandle pModule, UINT32 tk, DWORD valueCorType, LPVOID pValue)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ HRESULT hr;
+
+ if (TypeFromToken(tk) == mdtFieldDef)
+ {
+ hr = pRCW->GetEmitter()->SetFieldProps(
+ tk, // [IN] The FieldDef.
+ ULONG_MAX, // [IN] Field attributes.
+ valueCorType, // [IN] Flag for the value type, selected ELEMENT_TYPE_*
+ pValue, // [IN] Constant value.
+ (ULONG) -1); // [IN] Optional length.
+ }
+ else if (TypeFromToken(tk) == mdtProperty)
+ {
+ hr = pRCW->GetEmitter()->SetPropertyProps(
+ tk, // [IN] The PropertyDef.
+ ULONG_MAX, // [IN] Property attributes.
+ valueCorType, // [IN] Flag for the value type, selected ELEMENT_TYPE_*
+ pValue, // [IN] Constant value.
+ (ULONG) -1, // [IN] Optional length.
+ mdMethodDefNil, // [IN] Getter method.
+ mdMethodDefNil, // [IN] Setter method.
+ NULL); // [IN] Other methods.
+ }
+ else
+ {
+ hr = pRCW->GetEmitter()->SetParamProps(
+ tk, // [IN] The ParamDef.
+ NULL,
+ ULONG_MAX, // [IN] Parameter attributes.
+ valueCorType, // [IN] Flag for the value type, selected ELEMENT_TYPE_*
+ pValue, // [IN] Constant value.
+ (ULONG) -1); // [IN] Optional length.
+ }
+ if (FAILED(hr)) {
+ _ASSERTE(!"Set default value is failing");
+ COMPlusThrow(kArgumentException, W("Argument_BadConstantValue"));
+ }
+
+ END_QCALL;
+}
+
+/*============================SetFieldLayoutOffset============================
+**Action: set fieldlayout of a field
+**Returns:
+**Arguments:
+**Exceptions:
+==============================================================================*/
+void QCALLTYPE COMDynamicWrite::SetFieldLayoutOffset(QCall::ModuleHandle pModule, INT32 tkField, INT32 iOffset)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ // Set the field layout
+ IfFailThrow(pRCW->GetEmitHelper()->SetFieldLayoutHelper(
+ tkField, // field
+ iOffset)); // layout offset
+
+ END_QCALL;
+}
+
+
+/*============================SetClassLayout============================
+**Action:
+**Returns:
+**Arguments:
+**Exceptions:
+==============================================================================*/
+void QCALLTYPE COMDynamicWrite::SetClassLayout(QCall::ModuleHandle pModule, INT32 tk, INT32 iPackSize, UINT32 iTotalSize)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ RefClassWriter* pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ // Define the packing size and total size of a class
+ IfFailThrow(pRCW->GetEmitter()->SetClassLayout(
+ tk, // Typedef
+ iPackSize, // packing size
+ NULL, // no field layout
+ iTotalSize)); // total size for the type
+
+ END_QCALL;
+}
+
+/*===============================UpdateMethodRVAs===============================
+**Action: Update the RVAs in all of the methods associated with a particular typedef
+** to prior to emitting them to a PE.
+**Returns: Void
+**Arguments:
+**Exceptions:
+==============================================================================*/
+void COMDynamicWrite::UpdateMethodRVAs(IMetaDataEmit *pEmitNew,
+ IMetaDataImport *pImportNew,
+ ICeeFileGen *pCeeFileGen,
+ HCEEFILE ceeFile,
+ mdTypeDef td,
+ HCEESECTION sdataSection)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+
+ HCORENUM hEnum=0;
+ ULONG methRVA;
+ ULONG newMethRVA;
+ ULONG sdataSectionRVA = 0;
+ mdMethodDef md;
+ mdFieldDef fd;
+ ULONG count;
+ DWORD dwFlags=0;
+ DWORD implFlags=0;
+ HRESULT hr;
+
+ // Look at the typedef flags. Skip tdimport classes.
+ if (!IsNilToken(td))
+ {
+ IfFailGo(pImportNew->GetTypeDefProps(td, 0,0,0, &dwFlags, 0));
+ if (IsTdImport(dwFlags))
+ goto ErrExit;
+ }
+
+ //Get an enumerator and use it to walk all of the methods defined by td.
+ while ((hr = pImportNew->EnumMethods(
+ &hEnum,
+ td,
+ &md,
+ 1,
+ &count)) == S_OK) {
+
+ IfFailGo( pImportNew->GetMethodProps(
+ md,
+ NULL,
+ NULL, // don't get method name
+ 0,
+ NULL,
+ &dwFlags,
+ NULL,
+ NULL,
+ &methRVA,
+ &implFlags) );
+
+ // If this method isn't implemented here, don't bother correcting it's RVA
+ // Otherwise, get the correct RVA from our ICeeFileGen and put it back into our local
+ // copy of the metadata
+ //
+ if ( IsMdAbstract(dwFlags) || IsMdPinvokeImpl(dwFlags) ||
+ IsMiNative(implFlags) || IsMiRuntime(implFlags) ||
+ IsMiForwardRef(implFlags))
+ {
+ continue;
+ }
+
+ IfFailGo( pCeeFileGen->GetMethodRVA(ceeFile, methRVA, &newMethRVA) );
+ IfFailGo( pEmitNew->SetRVA(md, newMethRVA) );
+ }
+
+ if (hEnum) {
+ pImportNew->CloseEnum( hEnum);
+ }
+ hEnum = 0;
+
+ // Walk through all of the Field belongs to this TypeDef. If field is marked as fdHasFieldRVA, we need to update the
+ // RVA value.
+ while ((hr = pImportNew->EnumFields(
+ &hEnum,
+ td,
+ &fd,
+ 1,
+ &count)) == S_OK) {
+
+ IfFailGo( pImportNew->GetFieldProps(
+ fd,
+ NULL, // don't need the parent class
+ NULL, // don't get method name
+ 0,
+ NULL,
+ &dwFlags, // field flags
+ NULL, // don't need the signature
+ NULL,
+ NULL, // don't need the constant value
+ 0,
+ NULL) );
+
+ if ( IsFdHasFieldRVA(dwFlags) )
+ {
+ if (sdataSectionRVA == 0)
+ {
+ IfFailGo( pCeeFileGen->GetSectionCreate (ceeFile, ".sdata", sdReadWrite, &(sdataSection)) );
+ IfFailGo( pCeeFileGen->GetSectionRVA(sdataSection, &sdataSectionRVA) );
+ }
+
+ IfFailGo( pImportNew->GetRVA(fd, &methRVA, NULL) );
+ newMethRVA = methRVA + sdataSectionRVA;
+ IfFailGo( pEmitNew->SetFieldRVA(fd, newMethRVA) );
+ }
+ }
+
+ if (hEnum) {
+ pImportNew->CloseEnum( hEnum);
+ }
+ hEnum = 0;
+
+ErrExit:
+ if (FAILED(hr)) {
+ _ASSERTE(!"UpdateRVA failed");
+ COMPlusThrowHR(hr);
+ }
+}
+
+void QCALLTYPE COMDynamicWrite::DefineCustomAttribute(QCall::ModuleHandle pModule, INT32 token, INT32 conTok, LPCBYTE pBlob, INT32 cbBlob, BOOL toDisk, BOOL updateCompilerFlags)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ RefClassWriter* pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ HRESULT hr;
+ mdCustomAttribute retToken;
+
+ if (toDisk && pRCW->GetOnDiskEmitter())
+ {
+ hr = pRCW->GetOnDiskEmitter()->DefineCustomAttribute(
+ token,
+ conTok,
+ pBlob,
+ cbBlob,
+ &retToken);
+ }
+ else
+ {
+ hr = pRCW->GetEmitter()->DefineCustomAttribute(
+ token,
+ conTok,
+ pBlob,
+ cbBlob,
+ &retToken);
+ }
+
+ if (FAILED(hr))
+ {
+ // See if the metadata engine gave us any error information.
+ SafeComHolderPreemp<IErrorInfo> pIErrInfo;
+ BSTRHolder bstrMessage;
+ if (SafeGetErrorInfo(&pIErrInfo) == S_OK)
+ {
+ if (SUCCEEDED(pIErrInfo->GetDescription(&bstrMessage)) && bstrMessage != NULL)
+ COMPlusThrow(kArgumentException, IDS_EE_INVALID_CA_EX, bstrMessage);
+ }
+
+ COMPlusThrow(kArgumentException, IDS_EE_INVALID_CA);
+ }
+
+ if (updateCompilerFlags)
+ {
+ DWORD flags = 0;
+ DWORD mask = ~(DACF_OBSOLETE_TRACK_JIT_INFO | DACF_IGNORE_PDBS | DACF_ALLOW_JIT_OPTS) & DACF_CONTROL_FLAGS_MASK;
+
+ if ((cbBlob != 6) && (cbBlob != 8))
+ {
+ _ASSERTE(!"COMDynamicWrite::CWInternalCreateCustomAttribute - unexpected size for DebuggableAttribute\n");
+ }
+ else if ( !((pBlob[0] == 1) && (pBlob[1] == 0)) )
+ {
+ _ASSERTE(!"COMDynamicWrite::CWInternalCreateCustomAttribute - bad format for DebuggableAttribute\n");
+ }
+
+ if (pBlob[2] & 0x1)
+ {
+ flags |= DACF_OBSOLETE_TRACK_JIT_INFO;
+ }
+
+ if (pBlob[2] & 0x2)
+ {
+ flags |= DACF_IGNORE_PDBS;
+ }
+
+ if ( ((pBlob[2] & 0x1) == 0) || (pBlob[3] == 0) )
+ {
+ flags |= DACF_ALLOW_JIT_OPTS;
+ }
+
+ Assembly* pAssembly = pModule->GetAssembly();
+ DomainAssembly* pDomainAssembly = pAssembly->GetDomainAssembly();
+
+ // Dynamic assemblies should be 1:1 with DomainAssemblies.
+ _ASSERTE(!pAssembly->IsDomainNeutral());
+
+ DWORD actualFlags;
+ actualFlags = ((DWORD)pDomainAssembly->GetDebuggerInfoBits() & mask) | flags;
+ pDomainAssembly->SetDebuggerInfoBits((DebuggerAssemblyControlFlags)actualFlags);
+
+ actualFlags = ((DWORD)pAssembly->GetDebuggerInfoBits() & mask) | flags;
+ pAssembly->SetDebuggerInfoBits((DebuggerAssemblyControlFlags)actualFlags);
+
+ ModuleIterator i = pAssembly->IterateModules();
+ while (i.Next())
+ {
+ actualFlags = ((DWORD)(i.GetModule()->GetDebuggerInfoBits()) & mask) | flags;
+ i.GetModule()->SetDebuggerInfoBits((DebuggerAssemblyControlFlags)actualFlags);
+ }
+ }
+
+ END_QCALL;
+}
+
+void ManagedBitnessFlagsToUnmanagedBitnessFlags(
+ INT32 portableExecutableKind, INT32 imageFileMachine,
+ DWORD* pPeFlags, DWORD* pCorhFlags)
+{
+ if (portableExecutableKind & peILonly)
+ *pCorhFlags |= COMIMAGE_FLAGS_ILONLY;
+
+ if (portableExecutableKind & pe32BitPreferred)
+ COR_SET_32BIT_PREFERRED(*pCorhFlags);
+
+ if (portableExecutableKind & pe32BitRequired)
+ COR_SET_32BIT_REQUIRED(*pCorhFlags);
+
+ *pPeFlags |= ICEE_CREATE_FILE_CORMAIN_STUB;
+
+ if (imageFileMachine == IMAGE_FILE_MACHINE_I386)
+ *pPeFlags |= ICEE_CREATE_MACHINE_I386|ICEE_CREATE_FILE_PE32;
+
+ else if (imageFileMachine == IMAGE_FILE_MACHINE_IA64)
+ *pPeFlags |= ICEE_CREATE_MACHINE_IA64|ICEE_CREATE_FILE_PE64;
+
+ else if (imageFileMachine == IMAGE_FILE_MACHINE_AMD64)
+ *pPeFlags |= ICEE_CREATE_MACHINE_AMD64|ICEE_CREATE_FILE_PE64;
+
+ else if (imageFileMachine == IMAGE_FILE_MACHINE_ARMNT)
+ *pPeFlags |= ICEE_CREATE_MACHINE_ARM|ICEE_CREATE_FILE_PE32;
+}
+
+#ifndef FEATURE_CORECLR
+//=============================PreSavePEFile=====================================*/
+// PreSave the PEFile
+//==============================================================================*/
+void QCALLTYPE COMDynamicWrite::PreSavePEFile(QCall::ModuleHandle pModule, INT32 portableExecutableKind, INT32 imageFileMachine)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ RefClassWriter *pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ DWORD peFlags = 0, corhFlags = 0;
+ ManagedBitnessFlagsToUnmanagedBitnessFlags(portableExecutableKind, imageFileMachine, &peFlags, &corhFlags);
+ IfFailThrow(pRCW->EnsureCeeFileGenCreated(corhFlags, peFlags));
+
+ ICeeFileGen *pCeeFileGen = pRCW->GetCeeFileGen();
+ HCEEFILE ceeFile = pRCW->GetHCEEFILE();
+ _ASSERTE(ceeFile && pCeeFileGen);
+
+ // We should not have the on disk emitter yet
+ if (pRCW->GetOnDiskEmitter() != NULL)
+ pRCW->SetOnDiskEmitter(NULL);
+
+ // Get the dispenser.
+ SafeComHolderPreemp<IMetaDataDispenserEx> pDisp;
+ IfFailThrow(MetaDataGetDispenser(CLSID_CorMetaDataDispenser, IID_IMetaDataDispenserEx, (void**)&pDisp));
+
+ //Get the emitter and the importer
+ IMetaDataImport *pImport = pRCW->GetRWImporter();
+ IMetaDataEmit *pEmit = pRCW->GetEmitter();
+ _ASSERTE((pEmit != NULL ) && (pImport != NULL));
+
+ // Set the option on the dispenser turn on duplicate check for TypeDef and moduleRef
+ VARIANT varOption;
+ V_VT(&varOption) = VT_UI4;
+ V_I4(&varOption) = MDDupDefault | MDDupTypeDef | MDDupModuleRef | MDDupExportedType | MDDupAssemblyRef | MDDupFile | MDDupAssembly;
+ IfFailThrow(pDisp->SetOption(MetaDataCheckDuplicatesFor, &varOption));
+
+ V_VT(&varOption) = VT_UI4;
+ V_I4(&varOption) = MDRefToDefNone;
+ IfFailThrow(pDisp->SetOption(MetaDataRefToDefCheck, &varOption));
+
+ V_VT(&varOption) = VT_UI4;
+ V_I4(&varOption) = MergeManifest;
+ IfFailThrow(pDisp->SetOption(MetaDataMergerOptions, &varOption));
+
+ //Define an empty scope
+ SafeComHolderPreemp<IMetaDataEmit> pEmitNew;
+ IfFailThrow(pDisp->DefineScope(CLSID_CorMetaDataRuntime, 0, IID_IMetaDataEmit, (IUnknown**)&pEmitNew));
+
+ // Token can move upon merge. Get the IMapToken from the CeeFileGen that is created for save
+ // and pass it to merge to receive token movement notification.
+ // Note that this is not a long term fix. We are relying on the fact that those tokens embedded
+ // in PE cannot move after the merge. These tokens are TypeDef, TypeRef, MethodDef, FieldDef, MemberRef,
+ // TypeSpec, UserString. If this is no longer true, we can break!
+ //
+ // Note that we don't need to release pIMapToken because it is not AddRef'ed in the GetIMapTokenIfaceEx.
+ //
+ IUnknown *pUnknown = NULL;
+ IfFailThrow(pCeeFileGen->GetIMapTokenIfaceEx(ceeFile, pEmit, &pUnknown));
+
+ SafeComHolderPreemp<IMapToken> pIMapToken;
+ IfFailThrow(SafeQueryInterfacePreemp(pUnknown, IID_IMapToken, (IUnknown**) &pIMapToken));
+
+ // get the unmanaged writer.
+ ISymUnmanagedWriter *pWriter = pModule->GetReflectionModule()->GetISymUnmanagedWriter();
+ SafeComHolderPreemp<CSymMapToken> pSymMapToken(new CSymMapToken(pWriter, pIMapToken));
+
+ //Merge the old tokens into the new (empty) scope
+ //This is a copy.
+ IfFailThrow(pEmitNew->Merge(pImport, pSymMapToken, NULL));
+ IfFailThrow(pEmitNew->MergeEnd());
+
+ // Update the Module name in the new scope.
+ CQuickArray<WCHAR> cqModuleName;
+ ULONG cchName;
+
+ IfFailThrow(pImport->GetScopeProps(0, 0, &cchName, 0));
+
+ cqModuleName.ReSizeThrows(cchName);
+
+ IfFailThrow(pImport->GetScopeProps(cqModuleName.Ptr(), cchName, &cchName, 0));
+ IfFailThrow(pEmitNew->SetModuleProps(cqModuleName.Ptr()));
+
+ // cache the pEmitNew to RCW!!
+ pRCW->SetOnDiskEmitter(pEmitNew);
+
+ END_QCALL;
+} // COMDynamicWrite::PreSavePEFile
+
+//=============================SavePEFile=====================================*/
+// Save the PEFile to disk
+//==============================================================================*/
+void QCALLTYPE COMDynamicWrite::SavePEFile(QCall::ModuleHandle pModule, LPCWSTR wszPeName, UINT32 entryPoint, UINT32 fileKind, BOOL isManifestFile)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ HRESULT hr=S_OK;
+ HCORENUM hTypeDefs=0;
+ mdTypeDef td;
+ ULONG count;
+ IMetaDataImport *pImportNew = 0;
+ ULONG newMethRVA;
+ DWORD metaDataSize;
+ BYTE *metaData;
+ ULONG metaDataOffset;
+ HCEESECTION pILSection;
+ ISymUnmanagedWriter *pWriter = NULL;
+
+ if (wszPeName==NULL)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_String"));
+ if (wszPeName[0] == '\0')
+ COMPlusThrow(kFormatException, W("Format_StringZeroLength"));
+
+ Assembly * pAssembly = pModule->GetAssembly();
+ _ASSERTE( pAssembly );
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ ICeeFileGen * pCeeFileGen = pRCW->GetCeeFileGen();
+ HCEEFILE ceeFile = pRCW->GetHCEEFILE();
+ _ASSERTE(ceeFile && pCeeFileGen);
+
+ IMetaDataEmit * pEmitNew = pRCW->GetOnDiskEmitter();
+ _ASSERTE(pEmitNew);
+
+ //Get the emitter and the importer
+
+ if (pAssembly->IsDynamic() && isManifestFile)
+ {
+ // manifest is stored in this file
+
+ // Allocate space for a strong name signature if an originator was supplied
+ // (this doesn't strong name the assembly, but it makes it possible to do so
+ // as a post processing step).
+ if (pAssembly->IsStrongNamed())
+ IfFailGo(pAssembly->AllocateStrongNameSignature(pCeeFileGen, ceeFile));
+ }
+
+ //Set the Output FileName
+ IfFailGo( pCeeFileGen->SetOutputFileName(ceeFile, (LPWSTR)wszPeName) );
+
+ //Set the Entry Point or throw the dll switch if we're creating a dll.
+ if (entryPoint!=0)
+ {
+ IfFailGo( pCeeFileGen->SetEntryPoint(ceeFile, entryPoint) );
+ }
+
+ switch (fileKind)
+ {
+ case Dll:
+ {
+ IfFailGo( pCeeFileGen->SetDllSwitch(ceeFile, true) );
+ break;
+ }
+ case WindowApplication:
+ {
+ // window application. Set the SubSystem
+ IfFailGo( pCeeFileGen->SetSubsystem(ceeFile, IMAGE_SUBSYSTEM_WINDOWS_GUI, CEE_IMAGE_SUBSYSTEM_MAJOR_VERSION, CEE_IMAGE_SUBSYSTEM_MINOR_VERSION) );
+ break;
+ }
+ case ConsoleApplication:
+ {
+ // Console application. Set the SubSystem
+ IfFailGo( pCeeFileGen->SetSubsystem(ceeFile, IMAGE_SUBSYSTEM_WINDOWS_CUI, CEE_IMAGE_SUBSYSTEM_MAJOR_VERSION, CEE_IMAGE_SUBSYSTEM_MINOR_VERSION) );
+ break;
+ }
+ default:
+ {
+ _ASSERTE(!"Unknown file kind!");
+ break;
+ }
+ }
+
+ IfFailGo( pCeeFileGen->GetIlSection(ceeFile, &pILSection) );
+ IfFailGo( pEmitNew->GetSaveSize(cssAccurate, &metaDataSize) );
+ IfFailGo( pCeeFileGen->GetSectionBlock(pILSection, metaDataSize, sizeof(DWORD), (void**) &metaData) );
+ IfFailGo( pCeeFileGen->GetSectionDataLen(pILSection, &metaDataOffset) );
+ metaDataOffset -= metaDataSize;
+
+ // get the unmanaged writer.
+ pWriter = pModule->GetReflectionModule()->GetISymUnmanagedWriter();
+ IfFailGo( EmitDebugInfoBegin(pModule, pCeeFileGen, ceeFile, pILSection, wszPeName, pWriter) );
+
+ if (pAssembly->IsDynamic() && pRCW->m_ulResourceSize)
+ {
+ // There are manifest in this file
+
+ IfFailGo( pCeeFileGen->GetMethodRVA(ceeFile, 0, &newMethRVA) );
+
+ // Point to manifest resource
+ IfFailGo( pCeeFileGen->SetManifestEntry( ceeFile, pRCW->m_ulResourceSize, newMethRVA ) );
+ }
+
+ IfFailGo( pCeeFileGen->LinkCeeFile(ceeFile) );
+
+ // Get the import interface from the new Emit interface.
+ IfFailGo( pEmitNew->QueryInterface(IID_IMetaDataImport, (void **)&pImportNew));
+
+
+ //Enumerate the TypeDefs and update method RVAs.
+ while ((hr = pImportNew->EnumTypeDefs( &hTypeDefs, &td, 1, &count)) == S_OK)
+ {
+ UpdateMethodRVAs(pEmitNew, pImportNew, pCeeFileGen, ceeFile, td, pModule->GetReflectionModule()->m_sdataSection);
+ }
+
+ if (hTypeDefs)
+ {
+ pImportNew->CloseEnum(hTypeDefs);
+ }
+ hTypeDefs=0;
+
+ //Update Global Methods.
+ UpdateMethodRVAs(pEmitNew, pImportNew, pCeeFileGen, ceeFile, 0, pModule->GetReflectionModule()->m_sdataSection);
+
+
+ //Emit the MetaData
+ // IfFailGo( pCeeFileGen->EmitMetaDataEx(ceeFile, pEmitNew));
+ IfFailGo( pCeeFileGen->EmitMetaDataAt(ceeFile, pEmitNew, pILSection, metaDataOffset, metaData, metaDataSize) );
+
+ // finish the debugging info emitting after the metadata save so that token remap will be caught correctly
+ IfFailGo( EmitDebugInfoEnd(pModule, pCeeFileGen, ceeFile, pILSection, wszPeName, pWriter) );
+
+ //Generate the CeeFile
+ IfFailGo(pCeeFileGen->GenerateCeeFile(ceeFile) );
+
+ // Strong name sign the resulting assembly if required.
+ if (pAssembly->IsDynamic() && isManifestFile && pAssembly->IsStrongNamed())
+ IfFailGo(pAssembly->SignWithStrongName((LPWSTR)wszPeName));
+
+ErrExit:
+
+ pRCW->SetOnDiskEmitter(NULL);
+
+ //Release the interfaces. This should free some of the associated resources.
+ if (pImportNew)
+ pImportNew->Release();
+
+ //Release our interfaces if we allocated them to begin with
+ pRCW->DestroyCeeFileGen();
+
+ //Check all file IO errors. If so, throw IOException. Otherwise, just throw the hr.
+ if (FAILED(hr))
+ {
+ if (HRESULT_FACILITY(hr) == FACILITY_WIN32)
+ {
+ if (IsWin32IOError(HRESULT_CODE(hr)))
+ {
+ SString hrMessage;
+ GenerateTopLevelHRExceptionMessage(hr, hrMessage);
+ COMPlusThrowHR(COR_E_IO, IDS_EE_GENERIC, hrMessage.GetUnicode());
+ }
+ else
+ {
+ COMPlusThrowHR(hr);
+ }
+ }
+ COMPlusThrowHR(hr);
+ }
+
+ END_QCALL;
+}
+
+#endif // FEATURE_CORECLR
+
+//=============================EmitDebugInfoBegin============================*/
+// Phase 1 of emit debugging directory and symbol file.
+//===========================================================================*/
+HRESULT COMDynamicWrite::EmitDebugInfoBegin(Module *pModule,
+ ICeeFileGen *pCeeFileGen,
+ HCEEFILE ceeFile,
+ HCEESECTION pILSection,
+ const WCHAR *filename,
+ ISymUnmanagedWriter *pWriter)
+{
+ CONTRACT(HRESULT) {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+
+ PRECONDITION(CheckPointer(pWriter, NULL_OK));
+ PRECONDITION(CheckPointer(pCeeFileGen));
+ PRECONDITION(CheckPointer(pModule));
+
+ POSTCONDITION(SUCCEEDED(RETVAL));
+ }
+ CONTRACT_END;
+
+ HRESULT hr = S_OK;
+
+ // If we were emitting symbols for this dynamic module, go ahead
+ // and fill out the debug directory and save off the symbols now.
+ if (pWriter != NULL)
+ {
+ IMAGE_DEBUG_DIRECTORY debugDirIDD;
+ DWORD debugDirDataSize;
+ BYTE *debugDirData;
+
+ // Grab the debug info.
+ IfFailGo(pWriter->GetDebugInfo(NULL, 0, &debugDirDataSize, NULL));
+
+
+ // Is there any debug info to emit?
+ if (debugDirDataSize > 0)
+ {
+ // Make some room for the data.
+ debugDirData = (BYTE*)_alloca(debugDirDataSize);
+
+ // Actually get the data now.
+ IfFailGo(pWriter->GetDebugInfo(&debugDirIDD,
+ debugDirDataSize,
+ NULL,
+ debugDirData));
+
+
+ // Grab the timestamp of the PE file.
+ DWORD fileTimeStamp;
+
+
+ IfFailGo(pCeeFileGen->GetFileTimeStamp(ceeFile, &fileTimeStamp));
+
+
+ // Fill in the directory entry.
+ debugDirIDD.TimeDateStamp = VAL32(fileTimeStamp);
+ debugDirIDD.AddressOfRawData = 0;
+
+ // Grab memory in the section for our stuff.
+ HCEESECTION sec = pILSection;
+ BYTE *de;
+
+ IfFailGo(pCeeFileGen->GetSectionBlock(sec,
+ sizeof(debugDirIDD) +
+ debugDirDataSize,
+ 4,
+ (void**) &de) );
+
+
+ // Where did we get that memory?
+ ULONG deOffset;
+ IfFailGo(pCeeFileGen->GetSectionDataLen(sec, &deOffset));
+
+
+ deOffset -= (sizeof(debugDirIDD) + debugDirDataSize);
+
+ // Setup a reloc so that the address of the raw data is
+ // setup correctly.
+ debugDirIDD.PointerToRawData = VAL32(deOffset + sizeof(debugDirIDD));
+
+ IfFailGo(pCeeFileGen->AddSectionReloc(
+ sec,
+ deOffset +
+ offsetof(IMAGE_DEBUG_DIRECTORY, PointerToRawData),
+ sec, srRelocFilePos));
+
+
+
+ // Emit the directory entry.
+ IfFailGo(pCeeFileGen->SetDirectoryEntry(
+ ceeFile,
+ sec,
+ IMAGE_DIRECTORY_ENTRY_DEBUG,
+ sizeof(debugDirIDD),
+ deOffset));
+
+
+ // Copy the debug directory into the section.
+ memcpy(de, &debugDirIDD, sizeof(debugDirIDD));
+ memcpy(de + sizeof(debugDirIDD), debugDirData, debugDirDataSize);
+
+ }
+ }
+ErrExit:
+ RETURN(hr);
+}
+
+
+//=============================EmitDebugInfoEnd==============================*/
+// Phase 2 of emit debugging directory and symbol file.
+//===========================================================================*/
+HRESULT COMDynamicWrite::EmitDebugInfoEnd(Module *pModule,
+ ICeeFileGen *pCeeFileGen,
+ HCEEFILE ceeFile,
+ HCEESECTION pILSection,
+ const WCHAR *filename,
+ ISymUnmanagedWriter *pWriter)
+{
+ CONTRACT(HRESULT) {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ INJECT_FAULT(CONTRACT_RETURN(E_OUTOFMEMORY));
+
+ PRECONDITION(CheckPointer(pWriter, NULL_OK));
+ PRECONDITION(CheckPointer(pCeeFileGen));
+ PRECONDITION(CheckPointer(pModule));
+ }
+ CONTRACT_END;
+
+ HRESULT hr = S_OK;
+
+ CGrowableStream *pStream = NULL;
+
+ // If we were emitting symbols for this dynamic module, go ahead
+ // and fill out the debug directory and save off the symbols now.
+ if (pWriter != NULL)
+ {
+ // Now go ahead and save off the symbol file and release the
+ // writer.
+ IfFailGo( pWriter->Close() );
+
+
+
+
+ // How big of a stream to we have now?
+ pStream = pModule->GetInMemorySymbolStream();
+ _ASSERTE(pStream != NULL);
+
+ STATSTG SizeData = {0};
+ DWORD streamSize = 0;
+
+ IfFailGo(pStream->Stat(&SizeData, STATFLAG_NONAME));
+
+ streamSize = SizeData.cbSize.u.LowPart;
+
+ if (SizeData.cbSize.u.HighPart > 0)
+ {
+ IfFailGo( E_OUTOFMEMORY );
+
+ }
+
+ SIZE_T fnLen = wcslen(filename);
+ const WCHAR *dot = wcsrchr(filename, W('.'));
+ SIZE_T dotOffset = dot ? dot - filename : fnLen;
+
+ size_t len = dotOffset + 6;
+ WCHAR *fn = (WCHAR*)_alloca(len * sizeof(WCHAR));
+ wcsncpy_s(fn, len, filename, dotOffset);
+
+ fn[dotOffset] = W('.');
+ fn[dotOffset + 1] = W('p');
+ fn[dotOffset + 2] = W('d');
+ fn[dotOffset + 3] = W('b');
+ fn[dotOffset + 4] = W('\0');
+
+ HandleHolder pdbFile(WszCreateFile(fn,
+ GENERIC_WRITE,
+ 0,
+ NULL,
+ CREATE_ALWAYS,
+ FILE_ATTRIBUTE_NORMAL,
+ NULL));
+
+ if (pdbFile != INVALID_HANDLE_VALUE)
+ {
+ DWORD dummy;
+ BOOL succ = WriteFile(pdbFile,
+ pStream->GetRawBuffer().StartAddress(),
+ streamSize,
+ &dummy, NULL);
+
+ if (!succ)
+ {
+ IfFailGo( HRESULT_FROM_GetLastError() );
+
+ }
+
+ }
+ else
+ {
+ IfFailGo( HRESULT_FROM_GetLastError() );
+
+ }
+ }
+
+ErrExit:
+ // No one else will ever need this writer again...
+ pModule->GetReflectionModule()->SetISymUnmanagedWriter(NULL);
+// pModule->GetReflectionModule()->SetSymbolStream(NULL);
+
+ RETURN(hr);
+}
+
+
+#ifndef FEATURE_CORECLR
+//==============================================================================
+// Define external file for native resource.
+//==============================================================================
+void QCALLTYPE COMDynamicWrite::DefineNativeResourceFile(QCall::ModuleHandle pModule, LPCWSTR pwzFileName, INT32 portableExecutableKind, INT32 imageFileMachine)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ DWORD peFlags = 0, corhFlags = 0;
+ ManagedBitnessFlagsToUnmanagedBitnessFlags(portableExecutableKind, imageFileMachine, &peFlags, &corhFlags);
+ IfFailThrow( pRCW->EnsureCeeFileGenCreated(corhFlags, peFlags) );
+
+ ICeeFileGen * pCeeFileGen = pRCW->GetCeeFileGen();
+ HCEEFILE ceeFile = pRCW->GetHCEEFILE();
+ _ASSERTE(ceeFile && pCeeFileGen);
+
+ // Set the resource file name.
+ IfFailThrow( pCeeFileGen->SetResourceFileName(ceeFile, (LPWSTR)pwzFileName) );
+
+ END_QCALL;
+} // void __stdcall COMDynamicWrite::DefineNativeResourceFile()
+
+//==============================================================================
+// Define array of bytes for native resource.
+//==============================================================================
+void QCALLTYPE COMDynamicWrite::DefineNativeResourceBytes(QCall::ModuleHandle pModule, LPCBYTE pbResource, INT32 cbResource, INT32 portableExecutableKind, INT32 imageFileMachine)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ DWORD peFlags = 0, corhFlags = 0;
+ ManagedBitnessFlagsToUnmanagedBitnessFlags(portableExecutableKind, imageFileMachine, &peFlags, &corhFlags);
+ IfFailThrow( pRCW->EnsureCeeFileGenCreated(corhFlags, peFlags) );
+
+ ICeeFileGen * pCeeFileGen = pRCW->GetCeeFileGen();
+ HCEEFILE ceeFile = pRCW->GetHCEEFILE();
+ _ASSERTE(ceeFile && pCeeFileGen);
+
+ // Set the resource stream.
+ HCEESECTION ceeSection = NULL;
+ IfFailThrow( pCeeFileGen->GetSectionCreate(ceeFile, ".rsrc", sdReadOnly, &ceeSection) );
+
+ void * pvResource;
+ IfFailThrow( pCeeFileGen->GetSectionBlock(ceeSection, cbResource, 1, &pvResource) );
+ memcpy(pvResource, pbResource, cbResource);
+
+ END_QCALL;
+} // void __stdcall COMDynamicWrite::DefineNativeResourceBytes()
+
+//=============================AddResource=====================================*/
+// ecall for adding embedded resource to this module
+//==============================================================================*/
+void QCALLTYPE COMDynamicWrite::AddResource(QCall::ModuleHandle pModule, LPCWSTR pName, LPCBYTE pResBytes, INT32 resByteCount, UINT32 uFileTk, UINT32 iAttribute, INT32 portableExecutableKind, INT32 imageFileMachine)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ DWORD peFlags = 0, corhFlags = 0;
+ ManagedBitnessFlagsToUnmanagedBitnessFlags(portableExecutableKind, imageFileMachine, &peFlags, &corhFlags);
+ IfFailThrow( pRCW->EnsureCeeFileGenCreated(corhFlags, peFlags) );
+
+ Assembly * pAssembly = pModule->GetAssembly();
+ _ASSERTE( pAssembly && pAssembly->IsDynamic() );
+
+ ICeeFileGen * pCeeFileGen = pRCW->GetCeeFileGen();
+ HCEEFILE ceeFile = pRCW->GetHCEEFILE();
+ _ASSERTE(ceeFile && pCeeFileGen);
+
+ IMetaDataEmit * pOnDiskEmit = pRCW->GetOnDiskEmitter();
+
+ // First, put it into .rdata section. The only reason that we choose .rdata section at
+ // this moment is because this is the first section on the PE file. We don't need to deal with
+ // reloc. Actually, I don't know how to deal with the reloc with CeeFileGen given that the reloc
+ // position is not in the same file!
+
+ // Get the .rdata section
+ HCEESECTION hSection;
+ IfFailThrow( pCeeFileGen->GetRdataSection(ceeFile, &hSection) );
+
+ // the current section data length is the RVA
+ ULONG ulOffset;
+ IfFailThrow( pCeeFileGen->GetSectionDataLen(hSection, &ulOffset) );
+
+ // Allocate a block of space fromt he .rdata section
+ BYTE * pbBuffer;
+ IfFailThrow( pCeeFileGen->GetSectionBlock(
+ hSection, // from .rdata section
+ resByteCount + sizeof(DWORD), // number of bytes that we need
+ 1, // alignment
+ (void**) &pbBuffer) );
+
+ // now copy over the resource
+ memcpy( pbBuffer, &resByteCount, sizeof(DWORD) );
+ memcpy( pbBuffer + sizeof(DWORD), pResBytes, resByteCount );
+
+ // track the total resource size so far. The size is actually the offset into the section
+ // after writing the resource out
+ IfFailThrow( pCeeFileGen->GetSectionDataLen(hSection, &pRCW->m_ulResourceSize) );
+
+ mdFile tkFile = RidFromToken(uFileTk) ? uFileTk : mdFileNil;
+ mdManifestResource mr;
+
+ if (tkFile != mdFileNil)
+ {
+ SafeComHolderPreemp<IMetaDataAssemblyEmit> pOnDiskAssemblyEmit;
+
+ IfFailThrow( pOnDiskEmit->QueryInterface(IID_IMetaDataAssemblyEmit, (void **) &pOnDiskAssemblyEmit) );
+
+ // The resource is stored in a file other than the manifest file
+ IfFailThrow(pOnDiskAssemblyEmit->DefineManifestResource(
+ pName,
+ mdFileNil, // implementation -- should be file token of this module in the manifest
+ ulOffset, // offset to this file -- need to be adjusted upon save
+ iAttribute, // resource flag
+ &mr)); // manifest resource token
+ }
+
+ // Add an entry into the ManifestResource table for this resource
+ // The RVA is ulOffset
+ SafeComHolderPreemp<IMetaDataAssemblyEmit> pAssemEmitter(pAssembly->GetOnDiskMDAssemblyEmitter());
+ IfFailThrow(pAssemEmitter->DefineManifestResource(
+ pName,
+ tkFile, // implementation -- should be file token of this module in the manifest
+ ulOffset, // offset to this file -- need to be adjusted upon save
+ iAttribute, // resource flag
+ &mr)); // manifest resource token
+
+ pRCW->m_tkFile = tkFile;
+
+ END_QCALL;
+}
+
+#endif // FEATURE_CORECLR
+
+//============================AddDeclarativeSecurity============================*/
+// Add a declarative security serialized blob and a security action code to a
+// given parent (class or method).
+//==============================================================================*/
+void QCALLTYPE COMDynamicWrite::AddDeclarativeSecurity(QCall::ModuleHandle pModule, INT32 tk, DWORD action, LPCBYTE pBlob, INT32 cbBlob)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+
+ mdPermission tkPermission;
+ HRESULT hr = pRCW->GetEmitHelper()->AddDeclarativeSecurityHelper(tk,
+ action,
+ pBlob,
+ cbBlob,
+ &tkPermission);
+ IfFailThrow(hr);
+
+ if (hr == META_S_DUPLICATE)
+ {
+ COMPlusThrow(kInvalidOperationException, IDS_EE_DUPLICATE_DECLSEC);
+ }
+
+ END_QCALL;
+}
+
+
+CSymMapToken::CSymMapToken(ISymUnmanagedWriter *pWriter, IMapToken *pMapToken)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ // we know that the com implementation is ours so we use mode-any to simplify
+ // having to switch mode
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ m_cRef = 1;
+ m_pWriter = pWriter;
+ m_pMapToken = pMapToken;
+ if (m_pWriter)
+ m_pWriter->AddRef();
+ if (m_pMapToken)
+ m_pMapToken->AddRef();
+} // CSymMapToken::CSymMapToken()
+
+
+
+//*********************************************************************
+//
+// CSymMapToken's destructor
+//
+//*********************************************************************
+CSymMapToken::~CSymMapToken()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ // we know that the com implementation is ours so we use mode-any to simplify
+ // having to switch mode
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ if (m_pWriter)
+ m_pWriter->Release();
+ if (m_pMapToken)
+ m_pMapToken->Release();
+} // CSymMapToken::~CMapToken()
+
+
+ULONG CSymMapToken::AddRef()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ return InterlockedIncrement(&m_cRef);
+} // CSymMapToken::AddRef()
+
+
+
+ULONG CSymMapToken::Release()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ ULONG cRef = InterlockedDecrement(&m_cRef);
+ if (!cRef)
+ delete this;
+ return (cRef);
+} // CSymMapToken::Release()
+
+
+HRESULT CSymMapToken::QueryInterface(REFIID riid, void **ppUnk)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ *ppUnk = 0;
+
+ if (riid == IID_IMapToken)
+ *ppUnk = (IUnknown *) (IMapToken *) this;
+ else
+ return (E_NOINTERFACE);
+ AddRef();
+ return (S_OK);
+} // CSymMapToken::QueryInterface
+
+
+
+//*********************************************************************
+//
+// catching the token mapping
+//
+//*********************************************************************
+HRESULT CSymMapToken::Map(
+ mdToken tkFrom,
+ mdToken tkTo)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = NOERROR;
+ if (m_pWriter)
+ IfFailGo( m_pWriter->RemapToken(tkFrom, tkTo) );
+ if (m_pMapToken)
+ IfFailGo( m_pMapToken->Map(tkFrom, tkTo) );
+ErrExit:
+ return hr;
+}
+
diff --git a/src/vm/comdynamic.h b/src/vm/comdynamic.h
new file mode 100644
index 0000000000..bb898bb776
--- /dev/null
+++ b/src/vm/comdynamic.h
@@ -0,0 +1,202 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+////////////////////////////////////////////////////////////////////////////////
+// COMDynamic.h
+// This module defines the native methods that are used for Dynamic IL generation
+
+////////////////////////////////////////////////////////////////////////////////
+
+#ifndef _COMDYNAMIC_H_
+#define _COMDYNAMIC_H_
+
+#include "iceefilegen.h"
+#include "dbginterface.h"
+
+typedef enum PEFileKinds {
+ Dll = 0x1,
+ ConsoleApplication = 0x2,
+ WindowApplication = 0x3,
+} PEFileKinds;
+
+struct ExceptionInstance;
+
+// COMDynamicWrite
+// This class defines all the methods that implement the dynamic IL creation process
+// inside reflection.
+class COMDynamicWrite
+{
+private:
+
+ static void UpdateMethodRVAs(IMetaDataEmit*, IMetaDataImport*, ICeeFileGen *, HCEEFILE, mdTypeDef td, HCEESECTION sdataSection);
+
+public:
+ // This function will create the class's metadata definition
+ static
+ INT32 QCALLTYPE DefineType(QCall::ModuleHandle pModule,
+ LPCWSTR wszFullName,
+ INT32 tkParent,
+ INT32 attributes,
+ INT32 tkEnclosingType,
+ INT32 * pInterfaceTokens);
+
+ static
+ INT32 QCALLTYPE DefineGenericParam(QCall::ModuleHandle pModule,
+ LPCWSTR wszFullName,
+ INT32 tkParent,
+ INT32 attributes,
+ INT32 position,
+ INT32 * pConstraintTokens);
+
+ // This function will reset the parent class in metadata
+ static
+ void QCALLTYPE SetParentType(QCall::ModuleHandle pModule, INT32 tdType, INT32 tkParent);
+
+ // This function will add another interface impl
+ static
+ void QCALLTYPE AddInterfaceImpl(QCall::ModuleHandle pModule, INT32 tdType, INT32 tkInterface);
+
+ // This function will create a method within the class
+ static
+ INT32 QCALLTYPE DefineMethod(QCall::ModuleHandle pModule, INT32 tkParent, LPCWSTR wszName, LPCBYTE pSignature, INT32 sigLength, INT32 attributes);
+
+ static
+ INT32 QCALLTYPE DefineMethodSpec(QCall::ModuleHandle pModule, INT32 tkParent, LPCBYTE pSignature, INT32 sigLength);
+
+ // This function will create a method within the class
+ static
+ void QCALLTYPE SetMethodIL(QCall::ModuleHandle pModule,
+ INT32 tk,
+ BOOL fIsInitLocal,
+ LPCBYTE pBody,
+ INT32 cbBody,
+ LPCBYTE pLocalSig,
+ INT32 sigLength,
+ UINT16 maxStackSize,
+ ExceptionInstance * pExceptions,
+ INT32 numExceptions,
+ INT32 * pTokenFixups,
+ INT32 numTokenFixups);
+
+ static
+ void QCALLTYPE TermCreateClass(QCall::ModuleHandle pModule, INT32 tk, QCall::ObjectHandleOnStack retType);
+
+ static
+ mdFieldDef QCALLTYPE DefineField(QCall::ModuleHandle pModule, INT32 tkParent, LPCWSTR wszName, LPCBYTE pSignature, INT32 sigLength, INT32 attr);
+
+ static
+ void QCALLTYPE PreSavePEFile(QCall::ModuleHandle pModule, INT32 portableExecutableKind, INT32 imageFileMachine);
+
+ static
+ void QCALLTYPE SavePEFile(QCall::ModuleHandle pModule, LPCWSTR wszPeName, UINT32 entryPoint, UINT32 fileKind, BOOL isManifestFile);
+
+#ifndef FEATURE_CORECLR
+ static
+ void QCALLTYPE DefineNativeResourceFile(QCall::ModuleHandle pModule, LPCWSTR pwzFileName, INT32 portableExecutableKind, INT32 imageFileMachine);
+
+ static
+ void QCALLTYPE DefineNativeResourceBytes(QCall::ModuleHandle pModule, LPCBYTE pbResource, INT32 cbResource, INT32 portableExecutableKind, INT32 imageFileMachine);
+
+ static
+ void QCALLTYPE AddResource(QCall::ModuleHandle pModule, LPCWSTR pName, LPCBYTE pResBytes, INT32 resByteCount, UINT32 uFileTk, UINT32 iAttribute, INT32 portableExecutableKind, INT32 imageFileMachine);
+#endif // !FEATURE_CORECLR
+
+ // not an ecall!
+ static HRESULT EmitDebugInfoBegin(
+ Module *pModule,
+ ICeeFileGen *pCeeFileGen,
+ HCEEFILE ceeFile,
+ HCEESECTION pILSection,
+ const WCHAR *filename,
+ ISymUnmanagedWriter *pWriter);
+
+ // not an ecall!
+ static HRESULT EmitDebugInfoEnd(
+ Module *pModule,
+ ICeeFileGen *pCeeFileGen,
+ HCEEFILE ceeFile,
+ HCEESECTION pILSection,
+ const WCHAR *filename,
+ ISymUnmanagedWriter *pWriter);
+
+ static
+ void QCALLTYPE SetPInvokeData(QCall::ModuleHandle pModule, LPCWSTR wszDllName, LPCWSTR wszFunctionName, INT32 token, INT32 linkFlags);
+
+ static
+ INT32 QCALLTYPE DefineProperty(QCall::ModuleHandle pModule, INT32 tkParent, LPCWSTR wszName, INT32 attr, LPCBYTE pSignature, INT32 sigLength);
+
+ static
+ INT32 QCALLTYPE DefineEvent(QCall::ModuleHandle pModule, INT32 tkParent, LPCWSTR wszName, INT32 attr, INT32 tkEventType);
+
+ // functions to set Setter, Getter, Reset, TestDefault, and other methods
+ static
+ void QCALLTYPE DefineMethodSemantics(QCall::ModuleHandle pModule, INT32 tkAssociation, INT32 attr, INT32 tkMethod);
+
+ // functions to set method's implementation flag
+ static
+ void QCALLTYPE SetMethodImpl(QCall::ModuleHandle pModule, INT32 tkMethod, INT32 attr);
+
+ // functions to create MethodImpl record
+ static
+ void QCALLTYPE DefineMethodImpl(QCall::ModuleHandle pModule, UINT32 tkType, UINT32 tkBody, UINT32 tkDecl);
+
+ // GetTokenFromSig's argument
+ static
+ INT32 QCALLTYPE GetTokenFromSig(QCall::ModuleHandle pModule, LPCBYTE pSignature, INT32 sigLength);
+
+ // Set Field offset
+ static
+ void QCALLTYPE SetFieldLayoutOffset(QCall::ModuleHandle pModule, INT32 tkField, INT32 iOffset);
+
+ // Set classlayout info
+ static
+ void QCALLTYPE SetClassLayout(QCall::ModuleHandle pModule, INT32 tk, INT32 iPackSize, UINT32 iTotalSize);
+
+ // Set a custom attribute
+ static
+ void QCALLTYPE DefineCustomAttribute(QCall::ModuleHandle pModule, INT32 token, INT32 conTok, LPCBYTE pBlob, INT32 cbBlob, BOOL toDisk, BOOL updateCompilerFlags);
+
+ // functions to set ParamInfo
+ static
+ INT32 QCALLTYPE SetParamInfo(QCall::ModuleHandle pModule, UINT32 tkMethod, UINT32 iSequence, UINT32 iAttributes, LPCWSTR wszParamName);
+
+#ifndef FEATURE_CORECLR
+ // functions to set FieldMarshal
+ static
+ void QCALLTYPE SetFieldMarshal(QCall::ModuleHandle pModule, UINT32 tk, LPCBYTE pMarshal, INT32 cbMarshal);
+#endif
+ // functions to set default value
+ static
+ void QCALLTYPE SetConstantValue(QCall::ModuleHandle pModule, UINT32 tk, DWORD valueType, LPVOID pValue);
+
+ // functions to add declarative security
+ static
+ void QCALLTYPE AddDeclarativeSecurity(QCall::ModuleHandle pModule, INT32 tk, DWORD action, LPCBYTE pBlob, INT32 cbBlob);
+};
+
+
+
+//*********************************************************************
+//
+// This CSymMapToken class implemented the IMapToken. It is used in catching
+// token remap information from Merge and send the notifcation to CeeFileGen
+// and SymbolWriter
+//
+//*********************************************************************
+class CSymMapToken : public IMapToken
+{
+public:
+ STDMETHODIMP QueryInterface(REFIID riid, PVOID *pp);
+ STDMETHODIMP_(ULONG) AddRef();
+ STDMETHODIMP_(ULONG) Release();
+ STDMETHODIMP Map(mdToken tkImp, mdToken tkEmit);
+ CSymMapToken(ISymUnmanagedWriter *pWriter, IMapToken *pMapToken);
+ ~CSymMapToken();
+private:
+ LONG m_cRef;
+ ISymUnmanagedWriter *m_pWriter;
+ IMapToken *m_pMapToken;
+};
+
+#endif // _COMDYNAMIC_H_
diff --git a/src/vm/cominterfacemarshaler.cpp b/src/vm/cominterfacemarshaler.cpp
new file mode 100644
index 0000000000..6c88710797
--- /dev/null
+++ b/src/vm/cominterfacemarshaler.cpp
@@ -0,0 +1,1335 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: ComInterfaceMarshaler.cpp
+//
+
+//
+
+
+#include "common.h"
+
+#include "vars.hpp"
+#include "excep.h"
+#include "stdinterfaces.h"
+#include "interoputil.h"
+#include "comcallablewrapper.h"
+#include "runtimecallablewrapper.h"
+#include "cominterfacemarshaler.h"
+#include "interopconverter.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#include "appdomainhelper.h"
+#include "crossdomaincalls.h"
+#endif
+#include "notifyexternals.h"
+#include "comdelegate.h"
+#include "winrttypenameconverter.h"
+#include "olecontexthelpers.h"
+
+
+//--------------------------------------------------------------------------------
+// COMInterfaceMarshaler::COMInterfaceMarshaler()
+// ctor
+//--------------------------------------------------------------------------------
+COMInterfaceMarshaler::COMInterfaceMarshaler()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_pWrapperCache = RCWCache::GetRCWCache();
+ _ASSERTE(m_pWrapperCache);
+
+ m_pUnknown = NULL;
+ m_pIdentity = NULL;
+ m_pIManaged = NULL;
+
+ INDEBUG(m_fFlagsInited = false;)
+ m_fIsRemote = false;
+ m_fIReference = false;
+ m_fIReferenceArray = false;
+ m_fNonRCWType = false;
+ m_flags = RCW::CF_None;
+ m_pCallback = NULL;
+ m_pThread = NULL;
+
+ m_dwServerSyncBlockIndex = 0;
+}
+
+//--------------------------------------------------------------------------------
+// COMInterfaceMarshaler::~COMInterfaceMarshaler()
+// dtor
+//--------------------------------------------------------------------------------
+COMInterfaceMarshaler::~COMInterfaceMarshaler()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_pIManaged)
+ {
+ ULONG cbRef = SafeRelease(m_pIManaged);
+ LogInteropRelease(m_pIManaged, cbRef, "COMInterfaceMarshaler::~COMInterfaceMarshaler: Releasing IManaged interface");
+ m_pIManaged = NULL;
+ }
+}
+
+//--------------------------------------------------------------------------------
+// VOID COMInterfaceMarshaler::Init(IUnknown* pUnk, MethodTable* pClassMT, Thread *pThread, DWORD flags)
+// init
+//--------------------------------------------------------------------------------
+VOID COMInterfaceMarshaler::Init(IUnknown* pUnk, MethodTable* pClassMT, Thread *pThread, DWORD flags /*= RCW::CF_None*/)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(CheckPointer(pClassMT, NULL_OK));
+ PRECONDITION(CheckPointer(pThread));
+ PRECONDITION(m_typeHandle.IsNull() && m_pUnknown == NULL && m_pIdentity == NULL);
+ }
+ CONTRACTL_END;
+
+ // NOTE ** this struct is temporary,
+ // so NO ADDREF of the COM Interface pointers
+ m_pUnknown = pUnk;
+
+ // for now use the IUnknown as the Identity
+ m_pIdentity = pUnk;
+
+ m_typeHandle = TypeHandle(pClassMT);
+
+ m_pThread = pThread;
+
+ m_flags = flags;
+
+ if (!SupportsIInspectable())
+ {
+ if (!m_typeHandle.IsNull() && m_typeHandle.IsProjectedFromWinRT())
+ m_flags |= RCW::CF_SupportsIInspectable;
+ }
+}
+
+//--------------------------------------------------------------------------------
+// VOID COMInterfaceMarshaler::InitializeFlags()
+//--------------------------------------------------------------------------------
+VOID COMInterfaceMarshaler::InitializeFlags()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(false == m_fFlagsInited);
+ PRECONDITION(NULL == m_pIManaged);
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ if (SupportsIInspectable() || GetAppDomain()->GetPreferComInsteadOfManagedRemoting())
+ {
+ // User has set flag / disable IManagedObject check or we know that the object supports IInspectable
+ // so COM remoting will be used. We have to be careful here and only use the CF_SupportsIInspectable
+ // flag that came in statically. All managed objects support IInspectable so performing the check
+ // after CF_SupportsIInspectable has been updated based on QI(IID_IInspectable) would break classic
+ // managed <-> managed COM interop scenarios.
+ hr = E_NOINTERFACE;
+ }
+
+
+ if (SUCCEEDED(hr))
+ {
+ hr = SafeQueryInterface(m_pUnknown, IID_IManagedObject, (IUnknown**)&m_pIManaged);
+ LogInteropQI(m_pUnknown, IID_IManagedObject, hr, "COMInterfaceMarshaler::InitializeFlags: QI for IManagedObject");
+ }
+
+ if (hr == S_OK)
+ {
+ _ASSERTE(m_pIManaged);
+ BSTRHolder bstrProcessGUID;
+
+ {
+ GCX_PREEMP();
+
+ INT_PTR pCCW;
+ IfFailThrow(m_pIManaged->GetObjectIdentity(&bstrProcessGUID, (int*)&m_dwServerDomainId, &pCCW));
+
+ // we may get back a pointer-sized value but only the lower 32-bits are guaranteed to be valid and
+ // contain syncblock index of the managed object
+ m_dwServerSyncBlockIndex = (DWORD)pCCW;
+ }
+
+ // if hr2 != S_OK then we throw an exception
+ // coz GetProcessID shouldn't fail..
+ // one reason where it fails is JIT Activation of the object
+ // failed
+ _ASSERTE(bstrProcessGUID != NULL);
+
+ // compare the strings to check if this is in-proc
+ BSTR processGuid = GetProcessGUID();
+
+ if (NULL == processGuid)
+ ThrowOutOfMemory();
+
+ m_fIsRemote = (wcscmp((WCHAR *)bstrProcessGUID, processGuid) != 0);
+ }
+
+ INDEBUG(m_fFlagsInited = true;)
+}
+
+// Returns true if the type is WinRT-redirected and requires special marshaler functionality
+// to convert an interface pointer to its corresponding managed instance.
+static bool IsRedirectedToNonRCWType(MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (pMT == nullptr)
+ {
+ return false;
+ }
+
+ WinMDAdapter::RedirectedTypeIndex index;
+ if (!WinRTTypeNameConverter::ResolveRedirectedType(pMT, &index))
+ {
+ return false;
+ }
+
+ if (index == WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_KeyValuePair)
+ {
+ // we need to convert IKeyValuePair to boxed KeyValuePair
+ return true;
+ }
+
+ // redirected runtime classes are not RCWs
+ WinMDAdapter::WinMDTypeKind kind;
+ WinMDAdapter::GetRedirectedTypeInfo(index, nullptr, nullptr, nullptr, nullptr, nullptr, &kind);
+
+ return kind == WinMDAdapter::WinMDTypeKind_Runtimeclass;
+}
+
+//--------------------------------------------------------------------------------
+// VOID COMInterfaceMarshaler::InitializeObjectClass()
+//--------------------------------------------------------------------------------
+VOID COMInterfaceMarshaler::InitializeObjectClass(IUnknown *pIncomingIP)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!DontResolveClass())
+ {
+
+ // If we are not in an APPX process, and an object could have a strongly typed RCW as a COM CoClass,
+ // we prefer that to the WinRT class.This preserves compatibility for exisitng code.
+ // If we are in an APPX process we do not check for IProvideClassInfo.
+ if (m_typeHandle.IsNull() && !AppX::IsAppXProcess() && !m_fIsRemote)
+ {
+ EX_TRY
+ {
+ m_typeHandle = GetClassFromIProvideClassInfo(m_pUnknown);
+
+ if (!m_typeHandle.IsNull() && !m_typeHandle.IsComObjectType())
+ {
+ m_typeHandle = TypeHandle(); // Clear the existing one.
+ }
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+ if(!m_typeHandle.IsNull())
+ return;
+ }
+
+ // Note that the actual type may be a subtype of m_typeHandle if it's not sealed.
+ if ((m_typeHandle.IsNull() || !m_typeHandle.GetMethodTable()->IsSealed()) && WinRTSupported())
+ {
+ bool fInspectable = SupportsIInspectable();
+ EX_TRY
+ {
+ // QI for IInspectable first. m_fInspectable at this point contains information about the interface
+ // pointer that we could gather from the signature or API call. But, since an object can be acquired
+ // as a plain IUnknown and later started being treated as a WinRT object, we always eagerly QI for
+ // IInspectable as part of the IInspectable::GetRuntimeClassName call. Also note that we may discover
+ // this IInspectable is really a IReference<T> or IReferenceArray<T> for WinRT-compatible T's.
+ TypeHandle typeHandle = GetClassFromIInspectable(pIncomingIP, &fInspectable, &m_fIReference, &m_fIReferenceArray);
+
+ if (!typeHandle.IsNull())
+ {
+ // GetRuntimeClassName could return a interface or projected value type name
+ if (m_fIReference || m_fIReferenceArray)
+ {
+ // this has already been pre-processed - it is the IReference/IReferenceArray generic argument
+ m_typeHandle = typeHandle;
+ }
+ if (typeHandle.IsInterface())
+ {
+ m_itfTypeHandle = typeHandle;
+ }
+ else if (IsRedirectedToNonRCWType(typeHandle.GetMethodTable()))
+ {
+ m_typeHandle = typeHandle;
+ m_fNonRCWType = true;
+ }
+ else if (!typeHandle.IsValueType())
+ {
+ // if the type returned from GetRuntimeClassName is a class, it must be derived from __ComObject
+ // or be a WinRT delegate for us to be able to build an RCW for it
+ if (typeHandle.IsComObjectType() ||
+ (!typeHandle.IsTypeDesc() && typeHandle.GetMethodTable()->IsDelegate() && (typeHandle.IsProjectedFromWinRT() || WinRTTypeNameConverter::IsRedirectedType(typeHandle.GetMethodTable()))))
+ {
+ m_typeHandle = typeHandle;
+ }
+ }
+ }
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ if (fInspectable)
+ {
+ m_flags |= RCW::CF_SupportsIInspectable;
+ }
+ else
+ {
+ _ASSERTE_MSG(m_typeHandle.IsNull() || !SupportsIInspectable(),
+ "Acquired an object which should be IInspectable according to metadata but the QI failed.");
+ }
+ }
+ }
+
+ if (m_typeHandle.IsNull())
+ m_typeHandle = TypeHandle(g_pBaseCOMObject);
+}
+
+//--------------------------------------------------------------------
+// OBJECTREF COMInterfaceMarshaler::GetCCWObject()
+//--------------------------------------------------------------------
+OBJECTREF COMInterfaceMarshaler::GetCCWObject()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(!m_fIsRemote);
+ }
+ CONTRACTL_END;
+
+ OBJECTREF oref = NULL;
+
+ if (m_dwServerSyncBlockIndex != 0)
+ {
+ AppDomain* pCurrDomain = m_pThread->GetDomain();
+ if (m_dwServerDomainId == pCurrDomain->GetId())
+ {
+ // if we are in the right AD, we know for sure that the object is still alive
+ // since we keep the CCW addref'ed and the AD could not have been unloaded
+ oref = ObjectToOBJECTREF(g_pSyncTable[m_dwServerSyncBlockIndex].m_Object);
+ }
+ else
+ {
+ // otherwise we have to make sure that the AD hasn't been unloaded
+ AppDomainFromIDHolder ad(m_dwServerDomainId, TRUE);
+ if (!ad.IsUnloaded())
+ {
+ oref = ObjectToOBJECTREF(g_pSyncTable[m_dwServerSyncBlockIndex].m_Object);
+ }
+ }
+ }
+
+ return oref;
+}
+
+//--------------------------------------------------------------------
+// OBJECTREF COMInterfaceMarshaler::HandleInProcManagedComponents()
+//--------------------------------------------------------------------
+OBJECTREF COMInterfaceMarshaler::HandleInProcManagedComponent()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(!m_fIsRemote);
+ }
+ CONTRACTL_END;
+
+ AppDomain* pCurrDomain = m_pThread->GetDomain();
+
+ OBJECTREF oref = NULL;
+ if (m_dwServerDomainId == pCurrDomain->GetId())
+ {
+ oref = GetCCWObject();
+ }
+ else
+ {
+#ifdef FEATURE_CORECLR
+ _ASSERTE(!"NYI");
+ COMPlusThrowHR(COR_E_NOTSUPPORTED);
+#else // FEATURE_CORECLR
+ // TODO: probably we can cache the object on a per App domain bases
+ // using CCW as the key
+ OBJECTREF pwrap = NULL;
+ GCPROTECT_BEGIN(pwrap);
+ {
+ pwrap = GetCCWObject();
+ oref = AppDomainHelper::CrossContextCopyFrom(m_dwServerDomainId, &pwrap);
+ }
+ GCPROTECT_END();
+#endif // FEATURE_CORECLR
+ }
+
+ return oref;
+}
+
+#ifdef FEATURE_REMOTING
+
+OBJECTREF COMInterfaceMarshaler::GetObjectForRemoteManagedComponentNoThrow()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF oref = NULL;
+
+ EX_TRY
+ {
+ oref = GetObjectForRemoteManagedComponent();
+ }
+ EX_CATCH
+ {
+ oref = NULL;
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ return oref;
+}
+
+
+//--------------------------------------------------------------------
+// OBJECTREF COMInterfaceMarshaler::GetObjectForRemoteManagedComponent()
+// setup managed proxy to remote object
+//--------------------------------------------------------------------
+OBJECTREF COMInterfaceMarshaler::GetObjectForRemoteManagedComponent()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(m_fIsRemote == true);
+ PRECONDITION(CheckPointer(m_pIManaged));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF oref = NULL;
+
+ GCPROTECT_BEGIN(oref)
+ {
+ BSTR bstr;
+ HRESULT hr;
+
+ {
+ GCX_PREEMP();
+ hr = m_pIManaged->GetSerializedBuffer(&bstr);
+ }
+
+ if (hr == S_OK)
+ {
+ if (bstr != NULL)
+ {
+ // this could throw an exception
+ // also this would free up the BSTR that we pass in
+ BOOL fLegacyMode = (GetAppDomain()->GetComOrRemotingFlag() == COMorRemoting_LegacyMode);
+ oref = ConvertBSTRToObject(bstr, !fLegacyMode);
+
+ if (oref != NULL)
+ {
+ // setup a COM call wrapper
+ ComCallWrapper* pComCallWrap = ComCallWrapper::InlineGetWrapper(&oref);
+ _ASSERTE(pComCallWrap != NULL);
+
+ // InlineGetWrapper AddRef's the wrapper
+ pComCallWrap->Release();
+ }
+ }
+ }
+ else
+ {
+ COMPlusThrowHR(hr);
+ }
+ }
+ GCPROTECT_END();
+
+ return oref;
+}
+#endif // FEATURE_REMOTING
+
+//--------------------------------------------------------------------------------
+// void COMInterfaceMarshaler::CreateObjectRef(BOOL fDuplicate, OBJECTREF *pComObj)
+// Creates an RCW of the proper type.
+//--------------------------------------------------------------------------------
+void COMInterfaceMarshaler::CreateObjectRef(BOOL fDuplicate, OBJECTREF *pComObj, IUnknown **ppIncomingIP, MethodTable *pIncomingItfMT, bool bIncomingIPAddRefed)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(IsProtectedByGCFrame(pComObj));
+ PRECONDITION(!m_typeHandle.IsNull());
+ PRECONDITION(m_typeHandle.IsComObjectType() || (m_typeHandle.GetMethodTable()->IsDelegate() && (m_typeHandle.GetMethodTable()->IsProjectedFromWinRT() || WinRTTypeNameConverter::IsRedirectedType(m_typeHandle.GetMethodTable()))));
+ PRECONDITION(m_pThread == GetThread());
+ PRECONDITION(pIncomingItfMT == NULL || pIncomingItfMT->IsInterface());
+ }
+ CONTRACTL_END;
+
+ BOOL fExisting = FALSE;
+
+ // instantiate an instance of m_typeHandle
+ if (*pComObj != NULL)
+ {
+ // the instance already exists and was passed in *pComObj
+ fExisting = TRUE;
+ }
+ else if (m_typeHandle.IsComObjectType())
+ {
+ // ordinary RCW
+ *pComObj = ComObject::CreateComObjectRef(m_typeHandle.GetMethodTable());
+ }
+ else
+ {
+ // If delegates were to take this path, we need to fix the identity in MethodPtrAux later
+ _ASSERTE(!(m_flags & RCW::CF_QueryForIdentity));
+
+ // delegate backed by a WinRT interface pointer
+ *pComObj = COMDelegate::ConvertWinRTInterfaceToDelegate(m_pIdentity, m_typeHandle.GetMethodTable());
+ }
+
+ // make sure we "pin" the syncblock before switching to preemptive mode
+ SyncBlock *pSB = (*pComObj)->GetSyncBlock();
+ pSB->SetPrecious();
+ DWORD dwSyncBlockIndex = pSB->GetSyncBlockIndex();
+
+ NewRCWHolder pNewRCW;
+ pNewRCW = RCW::CreateRCW(m_pUnknown, dwSyncBlockIndex, m_flags, m_typeHandle.GetMethodTable());
+
+ if (fDuplicate)
+ {
+ // let us fix the identity to be the wrapper,
+ // so looking up this IUnknown won't return this wrapper
+ // this would allow users to call WrapIUnknownWithCOMObject
+ // to create duplicate wrappers
+ pNewRCW->m_pIdentity = pNewRCW;
+ m_pIdentity = (IUnknown*)(LPVOID)pNewRCW;
+ }
+ else if (m_flags & RCW::CF_QueryForIdentity)
+ {
+ // pNewRCW has the real Identity in this case and we need to use it to insert into RCW cache
+ m_pIdentity = (IUnknown *)pNewRCW->m_pIdentity;
+ }
+
+ // If the class is an extensible RCW (managed class deriving from a ComImport class)
+ if (fExisting)
+ {
+ MethodTable *pClassMT = (*pComObj)->GetMethodTable();
+ if (pClassMT != g_pBaseCOMObject && pClassMT->IsExtensibleRCW())
+ {
+ // WinRT scenario: we're initializing an RCW for a managed object that is
+ // already in the process of being constructed (we're at the point of calling
+ // to the base class ctor.
+ // Just mark the RCW as aggregated (in this scenario we don't go down
+ // ComClassFactory::CreateAggregatedInstance)
+ pNewRCW->MarkURTAggregated();
+ }
+ }
+ else
+ {
+ if (m_typeHandle.GetMethodTable() != g_pBaseCOMObject && m_typeHandle.GetMethodTable()->IsExtensibleRCW())
+ {
+ // Normal COM aggregation case - we're just in the process of allocating the object
+ // If the managed class has a default constructor then call it
+ MethodDesc *pCtorMD = m_typeHandle.GetMethodTable()->GetDefaultConstructor();
+ if (pCtorMD)
+ {
+ PREPARE_NONVIRTUAL_CALLSITE_USING_METHODDESC(pCtorMD);
+ DECLARE_ARGHOLDER_ARRAY(CtorArgs, 1);
+ CtorArgs[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(*pComObj);
+
+ // Call the ctor...
+ CALL_MANAGED_METHOD_NORET(CtorArgs);
+ }
+ }
+ }
+
+ // We expect that, at most, the first entry will already be allocated.
+ // (SetJupiterObject gets the first shot at this.)
+ int nNextFreeIdx = pNewRCW->m_aInterfaceEntries[0].IsFree() ? 0 : 1;
+
+ // Only cache WinRT interfaces
+ // Note that we can't use SupportsIInspectable here because we could be talking to a CCW
+ // which supports IInspectable by default
+ if (ppIncomingIP != NULL &&
+ *ppIncomingIP != NULL &&
+ pIncomingItfMT != NULL &&
+ pIncomingItfMT->IsLegalNonArrayWinRTType())
+ {
+ _ASSERTE(pIncomingItfMT->IsInterface());
+ _ASSERTE(pNewRCW->m_aInterfaceEntries[nNextFreeIdx].IsFree());
+
+ //
+ // The incoming interface pointer is of type m_pItfMT
+ // Cache the result into RCW for better performance and for variance support
+ // For example, GetFilesAsync() returns Windows.Storage.StorageFileView and this type
+ // is not in any WinMD. Because GetFilesAsync actually returns IVectorView<StorageFile>,
+ // we know this RCW supports this interface, and putting it into the cache would make sure
+ // casting this RCW to IVectorView<object> works
+ //
+ pNewRCW->m_aInterfaceEntries[nNextFreeIdx++].Init(pIncomingItfMT, *ppIncomingIP);
+
+ // Don't hold ref count if RCW is aggregated
+ if (!pNewRCW->IsURTAggregated())
+ {
+ if (bIncomingIPAddRefed)
+ {
+ // Transfer the ref from ppIncomingIP to internal cache
+ // This will only happen in WinRT scenarios to reduce risk of this change
+ *ppIncomingIP = NULL;
+ }
+ else
+ {
+ // Otherwise AddRef by ourselves
+ RCW_VTABLEPTR(pNewRCW);
+ SafeAddRef(*ppIncomingIP);
+ }
+
+ RCWWalker::AfterInterfaceAddRef(pNewRCW);
+ }
+
+ // Save GetEnumerator method if necessary
+ // Do this after we "AddRef" on ppIncomingIP otherwise we would call Release on it
+ // without a AddRef
+ pNewRCW->SetGetEnumeratorMethod(pIncomingItfMT);
+ }
+
+ if (!m_itfTypeHandle.IsNull() && !m_itfTypeHandle.IsTypeDesc())
+ {
+ MethodTable *pItfMT = m_itfTypeHandle.AsMethodTable();
+
+ // Just in case we've already cached it with pIncomingItfMT
+ if (pItfMT != pIncomingItfMT)
+ {
+ // We know that the object supports pItfMT but we don't have the right interface pointer at this point
+ // (*ppIncomingIP is not necessarily the right one) so we'll QI for it. Note that this is not just a
+ // perf optimization, we need to store pItfMT in the RCW in case it has variance and/or provide the
+ // non-generic IEnumerable::GetEnumerator method.
+
+ IID iid;
+ SafeComHolder<IUnknown> pItfIP;
+
+ if (SUCCEEDED(pNewRCW->CallQueryInterface(pItfMT, Instantiation(), &iid, &pItfIP)))
+ {
+ _ASSERTE(pNewRCW->m_aInterfaceEntries[nNextFreeIdx].IsFree());
+
+ pNewRCW->m_aInterfaceEntries[nNextFreeIdx].Init(pItfMT, pItfIP);
+
+ // Don't hold ref count if RCW is aggregated
+ if (!pNewRCW->IsURTAggregated())
+ {
+ pItfIP.SuppressRelease();
+
+ RCWWalker::AfterInterfaceAddRef(pNewRCW);
+ }
+ }
+ }
+ }
+
+
+ {
+ // Make sure that RCWHolder is declared before GC is forbidden - its destructor may trigger GC.
+ RCWHolder pRCW(m_pThread);
+ pRCW.InitNoCheck(pNewRCW);
+
+ // We may get back an RCW from another STA thread (mainly in WinRT factory cache scenario,
+ // as those factories are typically singleton), and we can only touch the RCW if we hold the lock,
+ // otherwise we may AV if the STA thread dies and takes the RCW with it
+ RCWCache::LockHolder lh(m_pWrapperCache);
+
+ GCX_FORBID();
+
+ // see if somebody beat us to it..
+ BOOL fInserted = m_pWrapperCache->FindOrInsertWrapper_NoLock(m_pIdentity, &pRCW, !fExisting);
+ if (!fInserted)
+ {
+ // somebody beats us in creating a wrapper. Let's determine whether we should insert our
+ // wrapper as a duplicate, or use the other wrapper that is already in the cache
+
+ // If the object instance already exists, we have no choice but to insert this wrapper
+ // as a duplicate. If we return the one that is already in the cache, we would return
+ // a different object!
+ BOOL fInsertAsDuplicateWrapper = fExisting;
+
+ if (!fInsertAsDuplicateWrapper)
+ {
+ // Shall we use the RCW that is already in the cache?
+ if (m_pCallback && !m_pCallback->ShouldUseThisRCW(pRCW.GetRawRCWUnsafe()))
+ {
+ // No - let's insert our wrapper as a duplicate instead
+ fInsertAsDuplicateWrapper = TRUE;
+
+ // Initialize pRCW again and make sure sure pRCW is indeed our new wrapper
+ pRCW.UnInit();
+ pRCW.InitNoCheck(pNewRCW);
+ }
+ }
+
+ if (fInsertAsDuplicateWrapper)
+ {
+ // we need to keep this wrapper separate so we'll insert it with the alternate identity
+ // (just as if fDuplicate was TRUE)
+ pNewRCW->m_pIdentity = pNewRCW;
+ m_pIdentity = (IUnknown*)(LPVOID)pNewRCW;
+
+ fInserted = m_pWrapperCache->FindOrInsertWrapper_NoLock(m_pIdentity, &pRCW, !fExisting);
+ _ASSERTE(fInserted);
+
+ pNewRCW.SuppressRelease();
+
+ if (m_pCallback)
+ m_pCallback->OnRCWCreated(pRCW.GetRawRCWUnsafe());
+ }
+ else
+ {
+ // Somebody beat us in creating the wrapper. Let's use that
+ if (m_pCallback)
+ m_pCallback->OnRCWCacheHit(pRCW.GetRawRCWUnsafe());
+
+ // grab the new object
+ *pComObj = (OBJECTREF)pRCW->GetExposedObject();
+ }
+ }
+ else
+ {
+ // If we did insert this wrapper in the table, make sure we don't delete it.
+ pNewRCW.SuppressRelease();
+
+ if (m_pCallback)
+ m_pCallback->OnRCWCreated(pRCW.GetRawRCWUnsafe());
+ }
+ }
+
+ _ASSERTE(*pComObj != NULL);
+
+#ifdef _DEBUG
+ if (!m_typeHandle.IsNull() && m_typeHandle.IsComObjectType())
+ {
+ // make sure this object supports all the COM Interfaces in the class
+ EnsureCOMInterfacesSupported(*pComObj, m_typeHandle.GetMethodTable());
+ }
+#endif
+}
+
+
+// OBJECTREF COMInterfaceMarshaler::IReferenceUnbox()
+//--------------------------------------------------------------------------------
+
+void COMInterfaceMarshaler::IReferenceUnbox(IUnknown **ppIncomingIP, OBJECTREF *poref, bool bIncomingIPAddRefed)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(m_fIReference);
+ PRECONDITION(m_pThread == GetThread());
+ }
+ CONTRACTL_END;
+
+ OBJECTREF unboxed = NULL;
+ _ASSERTE(m_typeHandle.AsMethodTable()->IsLegalNonArrayWinRTType());
+
+ // Create a temporary RCW. Call into managed. Let managed query for a closed generic instantiation
+ // like IReference<Int32> (including the GUID calculation & QI) then call the Value property. That
+ // will use the existing interop code to safely marshal the value.
+ // Also, make sure we create a duplicate RCW in this case so that next time we won't end up getting
+ // this RCW from cache
+ COMInterfaceMarshaler marshaler;
+
+ DWORD flags = RCW::CF_DontResolveClass | RCW::CF_NeedUniqueObject;
+
+ marshaler.Init(m_pUnknown, g_pBaseCOMObject, m_pThread, flags);
+
+ if (m_pCallback)
+ marshaler.SetCallback(m_pCallback);
+
+ OBJECTREF oref = marshaler.FindOrCreateObjectRefInternal(ppIncomingIP, /* pIncomingItfMT = */ NULL, bIncomingIPAddRefed);
+
+ IReferenceOrIReferenceArrayUnboxWorker(oref, m_typeHandle, FALSE, poref);
+}
+
+// void COMInterfaceMarshaler::IReferenceOrIReferenceArrayUnboxWorker()
+//--------------------------------------------------------------------------------
+
+// static
+void COMInterfaceMarshaler::IReferenceOrIReferenceArrayUnboxWorker(OBJECTREF oref, TypeHandle thT, BOOL fIsIReferenceArray, OBJECTREF *porefResult)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ GCPROTECT_BEGIN(oref);
+
+ // Get IReference<SomeType> or IReferenceArray<SomeType>
+ Instantiation inst(&thT, 1);
+ TypeHandle openType;
+ MethodDesc* pMD = NULL;
+ if (fIsIReferenceArray)
+ {
+ openType = TypeHandle(MscorlibBinder::GetClass(CLASS__CLRIREFERENCEARRAYIMPL));
+ pMD = MscorlibBinder::GetMethod(METHOD__CLRIREFERENCEARRAYIMPL__UNBOXHELPER);
+ }
+ else
+ {
+ openType = TypeHandle(MscorlibBinder::GetClass(CLASS__CLRIREFERENCEIMPL));
+ pMD = MscorlibBinder::GetMethod(METHOD__CLRIREFERENCEIMPL__UNBOXHELPER);
+ }
+ TypeHandle closedType = openType.Instantiate(inst);
+
+ // Call managed helper to get the real unboxed object now
+ MethodDesc* method = MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pMD,
+ closedType.AsMethodTable(),
+ FALSE,
+ Instantiation(),
+ FALSE);
+ _ASSERTE(method != NULL);
+
+ MethodDescCallSite unboxHelper(method);
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(oref),
+ };
+
+ // Call CLRIReferenceImpl::UnboxHelper(Object) or CLRIReferenceArrayImpl::UnboxHelper(Object)
+ *porefResult = unboxHelper.Call_RetOBJECTREF(args);
+ GCPROTECT_END();
+}
+
+// void COMInterfaceMarshaler::IKeyValuePairUnboxWorker()
+//--------------------------------------------------------------------------------
+
+// static
+void COMInterfaceMarshaler::IKeyValuePairUnboxWorker(OBJECTREF oref, OBJECTREF *porefResult)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ GCPROTECT_BEGIN(oref);
+
+ _ASSERTE(oref->GetMethodTable()->HasSameTypeDefAs(MscorlibBinder::GetClass(CLASS__CLRIKEYVALUEPAIRIMPL)));
+
+ MethodDesc *method = MethodDesc::FindOrCreateAssociatedMethodDesc(
+ MscorlibBinder::GetMethod(METHOD__CLRIKEYVALUEPAIRIMPL__UNBOXHELPER),
+ oref->GetMethodTable(),
+ FALSE,
+ Instantiation(),
+ FALSE);
+ _ASSERTE(method != NULL);
+
+ MethodDescCallSite unboxHelper(method);
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(oref),
+ };
+
+ // Call CLRIKeyValuePair::UnboxHelper(Object)
+ *porefResult = unboxHelper.Call_RetOBJECTREF(args);
+ GCPROTECT_END();
+}
+
+// OBJECTREF COMInterfaceMarshaler::IReferenceArrayUnbox()
+//--------------------------------------------------------------------------------
+
+void COMInterfaceMarshaler::IReferenceArrayUnbox(IUnknown **ppIncomingIP, OBJECTREF *poref, bool bIncomingIPAddRefed)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(m_fIReferenceArray);
+ PRECONDITION(m_pThread == GetThread());
+ }
+ CONTRACTL_END;
+
+ OBJECTREF unboxed = NULL;
+ // Remember all reference type array method tables are shared.
+ TypeHandle elementType = m_typeHandle.GetElementType();
+ _ASSERTE(elementType.AsMethodTable()->IsLegalNonArrayWinRTType());
+
+ // Create a temporary RCW. Call into managed. Let managed query for a closed generic instantiation
+ // like IReferenceArray<Int32> (including the GUID calculation & QI) then call the Value property. That
+ // will use the existing interop code to safely marshal the value.
+ // Also, make sure we create a duplicate RCW in this case so that next time we won't end up getting
+ // this RCW from cache
+ COMInterfaceMarshaler marshaler;
+
+ DWORD flags = RCW::CF_DontResolveClass | RCW::CF_NeedUniqueObject;
+
+ marshaler.Init(m_pUnknown, g_pBaseCOMObject, m_pThread, flags);
+
+ if (m_pCallback)
+ marshaler.SetCallback(m_pCallback);
+
+ OBJECTREF oref = marshaler.FindOrCreateObjectRefInternal(ppIncomingIP, /* pIncomingItfMT = */ NULL, bIncomingIPAddRefed);
+
+ IReferenceOrIReferenceArrayUnboxWorker(oref, elementType, TRUE, poref);
+}
+
+void COMInterfaceMarshaler::MarshalToNonRCWType(OBJECTREF *poref)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(m_fNonRCWType);
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(IsRedirectedToNonRCWType(m_typeHandle.GetMethodTable()));
+
+ struct
+ {
+ OBJECTREF refMarshaled;
+ STRINGREF refRawURI;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ WinMDAdapter::RedirectedTypeIndex index = static_cast<WinMDAdapter::RedirectedTypeIndex>(-1);
+ WinRTTypeNameConverter::ResolveRedirectedType(m_typeHandle.GetMethodTable(), &index);
+ _ASSERTE(index != -1);
+
+ GCPROTECT_BEGIN(gc)
+
+ switch (index)
+ {
+ case WinMDAdapter::RedirectedTypeIndex_System_Uri:
+ {
+ WinRtString hsRawUri;
+ {
+ GCX_PREEMP();
+
+ SafeComHolderPreemp<ABI::Windows::Foundation::IUriRuntimeClass> pUriRuntimeClass;
+ HRESULT hr = SafeQueryInterfacePreemp(m_pUnknown, ABI::Windows::Foundation::IID_IUriRuntimeClass, (IUnknown **) &pUriRuntimeClass);
+ LogInteropQI(m_pUnknown, ABI::Windows::Foundation::IID_IUriRuntimeClass, hr, "IUriRuntimeClass");
+ IfFailThrow(hr);
+
+ IfFailThrow(pUriRuntimeClass->get_RawUri(hsRawUri.Address()));
+ }
+
+ UINT32 cchRawUri;
+ LPCWSTR pwszRawUri = hsRawUri.GetRawBuffer(&cchRawUri);
+ gc.refRawURI = StringObject::NewString(pwszRawUri, cchRawUri);
+
+ UriMarshalingInfo *pUriMarshalingInfo = GetAppDomain()->GetMarshalingData()->GetUriMarshalingInfo();
+ MethodDesc* pSystemUriCtorMD = pUriMarshalingInfo->GetSystemUriCtorMD();
+
+ MethodTable *pMTSystemUri = pUriMarshalingInfo->GetSystemUriType().AsMethodTable();
+ pMTSystemUri->EnsureInstanceActive();
+ gc.refMarshaled = AllocateObject(pMTSystemUri, false);
+
+ MethodDescCallSite uriCtor(pSystemUriCtorMD);
+ ARG_SLOT ctorArgs[] =
+ {
+ ObjToArgSlot(gc.refMarshaled),
+ ObjToArgSlot(gc.refRawURI)
+ };
+ uriCtor.Call(ctorArgs);
+ }
+ break;
+
+ case WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_KeyValuePair:
+ {
+ MethodDesc *pMD = MscorlibBinder::GetMethod(METHOD__KEYVALUEPAIRMARSHALER__CONVERT_TO_MANAGED_BOX);
+
+ pMD = MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pMD,
+ pMD->GetMethodTable(),
+ FALSE, // forceBoxedEntryPoint
+ m_typeHandle.GetInstantiation(), // methodInst
+ FALSE, // allowInstParam
+ TRUE); // forceRemotableMethod
+
+ MethodDescCallSite marshalMethod(pMD);
+ ARG_SLOT methodArgs[] =
+ {
+ PtrToArgSlot(m_pUnknown)
+ };
+ gc.refMarshaled = marshalMethod.Call_RetOBJECTREF(methodArgs);
+ }
+ break;
+
+ case WinMDAdapter::RedirectedTypeIndex_System_Collections_Specialized_NotifyCollectionChangedEventArgs:
+ case WinMDAdapter::RedirectedTypeIndex_System_ComponentModel_PropertyChangedEventArgs:
+ {
+ MethodDesc *pMD;
+ EventArgsMarshalingInfo *pInfo = GetAppDomain()->GetMarshalingData()->GetEventArgsMarshalingInfo();
+
+ if (index == WinMDAdapter::RedirectedTypeIndex_System_Collections_Specialized_NotifyCollectionChangedEventArgs)
+ pMD = pInfo->GetWinRTNCCEventArgsToSystemNCCEventArgsMD();
+ else
+ pMD = pInfo->GetWinRTPCEventArgsToSystemPCEventArgsMD();
+
+ MethodDescCallSite marshalMethod(pMD);
+ ARG_SLOT methodArgs[] =
+ {
+ PtrToArgSlot(m_pUnknown)
+ };
+ gc.refMarshaled = marshalMethod.Call_RetOBJECTREF(methodArgs);
+ }
+ break;
+
+ default:
+ {
+ // If we get here then there is a new redirected type being introduced to the system. You must
+ // add code to marshal that type above. Additionally, code may need to be added to GetComIPFromObjectRef,
+ // in order to handle the reverse case.
+ UNREACHABLE();
+ }
+ }
+
+ *poref = gc.refMarshaled;
+
+ GCPROTECT_END();
+}
+
+
+// OBJECTREF COMInterfaceMarshaler::HandleTPComponents()
+//--------------------------------------------------------------------------------
+
+OBJECTREF COMInterfaceMarshaler::HandleTPComponents()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(m_pIManaged));
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_REMOTING
+ OBJECTREF oref = NULL;
+
+ if (m_fIsRemote || CRemotingServices::IsTransparentProxy(OBJECTREFToObject(GetCCWObject())))
+ {
+ if (!m_fIsRemote)
+ {
+ oref = HandleInProcManagedComponent();
+ }
+ else
+ {
+ if (!m_typeHandle.IsNull() && !m_typeHandle.IsComObjectType())
+ {
+ // if the user wants explicit calls,
+ // we better serialize/deserialize
+ oref = GetObjectForRemoteManagedComponent();
+ }
+ else
+ {
+ oref = GetObjectForRemoteManagedComponentNoThrow();
+ }
+ }
+
+ if (oref != NULL)
+ {
+ OBJECTREF realProxy = ObjectToOBJECTREF(CRemotingServices::GetRealProxy(OBJECTREFToObject(oref)));
+ if(realProxy != NULL)
+ {
+ // call setIUnknown on real proxy
+ GCPROTECT_BEGIN(oref)
+ {
+ CRemotingServices::CallSetDCOMProxy(realProxy, m_pUnknown);
+ }
+ GCPROTECT_END();
+ return oref;
+ }
+ else
+ {
+ return oref;
+ }
+ }
+ }
+#endif // FEATURE_REMOTING
+
+ return NULL;
+}
+
+//--------------------------------------------------------------------------------
+// OBJECTREF COMInterfaceMarshaler::FindOrCreateObjectRef()
+// Find the wrapper for this COM IP, might have to create one if not found.
+// It will return null for out-of memory scenarios. It also notices if we have
+// an IP that is disguised as an unmanaged object, sitting on top of a
+// managed object.
+//
+// The ppIncomingIP parameter serves two purposes - it lets COMInterfaceMarshaler call methods on the
+// interface pointer that came in from unmanaged code (pUnk could be the result of QI'ing such an IP for IUnknown),
+// and it also implements the CF_SuppressAddRef flag in a reliable way by assigning NULL to *ppIncomingIP if and
+// only if COMInterfaceMarshaler ended up creating a new RCW which took ownership of the interface pointer.
+//
+// If pIncomingItfMT is not NULL, we'll cache ppIncomingIP into the created RCW, so that
+// 1) RCW variance would work if we can't load the right type from RuntimeClassName, but the method returns a interface
+// 2) avoid a second QI for the same interface type
+//--------------------------------------------------------------------
+
+OBJECTREF COMInterfaceMarshaler::FindOrCreateObjectRef(IUnknown **ppIncomingIP, MethodTable *pIncomingItfMT /* = NULL */)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return FindOrCreateObjectRefInternal(ppIncomingIP, pIncomingItfMT, /* bIncomingIPAddRefed = */ true);
+}
+
+OBJECTREF COMInterfaceMarshaler::FindOrCreateObjectRef(IUnknown *pIncomingIP, MethodTable *pIncomingItfMT /* = NULL */)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return FindOrCreateObjectRefInternal(&pIncomingIP, pIncomingItfMT, /* bIncomingIPAddRefed = */ false);
+}
+
+OBJECTREF COMInterfaceMarshaler::FindOrCreateObjectRefInternal(IUnknown **ppIncomingIP, MethodTable *pIncomingItfMT, bool bIncomingIPAddRefed)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(m_pThread == GetThread());
+ PRECONDITION(pIncomingItfMT == NULL || pIncomingItfMT->IsInterface());
+ }
+ CONTRACTL_END;
+
+ OBJECTREF oref = NULL;
+
+ // (I)
+ // Initial check in our cache
+ // Skip if we want a unique object.
+ if (!NeedUniqueObject())
+ {
+ // Protect oref as SafeAddRef may trigger GC
+ GCPROTECT_BEGIN_THREAD(m_pThread, oref);
+
+ {
+ // We may get back an RCW from another STA thread (mainly in WinRT factory cache scenario,
+ // as those factories are typically singleton), and we can only touch the RCW if we hold the lock,
+ // otherwise we may AV if the STA thread dies and takes the RCW with it
+ RCWCache::LockHolder lh(m_pWrapperCache);
+
+ RCWHolder pRCW(m_pThread);
+ m_pWrapperCache->FindWrapperInCache_NoLock(
+ m_pIdentity,
+ &pRCW);
+ if (!pRCW.IsNull())
+ {
+ bool bShouldUseThisRCW = true;
+
+ if (m_pCallback)
+ bShouldUseThisRCW = m_pCallback->ShouldUseThisRCW(pRCW.GetRawRCWUnsafe());
+
+ if (bShouldUseThisRCW)
+ {
+ oref = (OBJECTREF)pRCW->GetExposedObject();
+ if (m_pCallback)
+ m_pCallback->OnRCWCacheHit(pRCW.GetRawRCWUnsafe());
+ }
+ }
+ }
+
+ GCPROTECT_END();
+
+ if (oref != NULL)
+ return oref;
+ }
+
+ // (II)
+ // Initialize all our flags
+ // this should setup all the info we need
+ InitializeFlags();
+
+ // (III)
+ // check for IManaged interface
+ if (m_pIManaged)
+ {
+ oref = HandleTPComponents();
+ if (oref != NULL)
+ return oref;
+ }
+
+ // (IV)
+ // okay let us create a wrapper and an instance for this IUnknown
+
+ // Find a suitable class to instantiate the instance
+ if (ppIncomingIP != NULL)
+ {
+ InitializeObjectClass(*ppIncomingIP);
+ }
+ else
+ {
+ InitializeObjectClass(m_pUnknown);
+ }
+
+
+ GCPROTECT_BEGIN_THREAD(m_pThread, oref)
+ {
+ if (m_fIReference)
+ IReferenceUnbox(ppIncomingIP, &oref, bIncomingIPAddRefed);
+ else if (m_fIReferenceArray)
+ IReferenceArrayUnbox(ppIncomingIP, &oref, bIncomingIPAddRefed);
+ else if (m_fNonRCWType)
+ MarshalToNonRCWType(&oref);
+ else
+ CreateObjectRef(NeedUniqueObject(), &oref, ppIncomingIP, pIncomingItfMT, bIncomingIPAddRefed);
+ }
+ GCPROTECT_END();
+
+ return oref;
+}
+
+VOID COMInterfaceMarshaler::InitializeExistingComObject(OBJECTREF *pComObj, IUnknown **ppIncomingIP)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(!m_typeHandle.IsNull());
+ PRECONDITION(IsProtectedByGCFrame(pComObj));
+ }
+ CONTRACTL_END;
+
+ CreateObjectRef(NeedUniqueObject(), pComObj, ppIncomingIP, /* pIncomingItfMT = */ NULL, /* bIncomingIPAddRefed = */ true);
+}
+
+//--------------------------------------------------------------------------------
+// Helper to wrap an IUnknown with COM object
+//--------------------------------------------------------------------------------
+OBJECTREF COMInterfaceMarshaler::WrapWithComObject()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF oref = NULL;
+ GCPROTECT_BEGIN(oref)
+ {
+ CreateObjectRef(
+ TRUE, // fDuplicate
+ &oref, // pComObj
+ NULL, // ppIncomingIP
+ NULL, // pIncomingItfMT
+ false // bIncomingIPAdddefed
+ );
+ }
+ GCPROTECT_END();
+
+ return oref;
+}
+
+//--------------------------------------------------------------------------------
+// VOID EnsureCOMInterfacesSupported(OBJECTREF oref, MethodTable* pClassMT)
+// Make sure the oref supports all the COM interfaces in the class
+VOID COMInterfaceMarshaler::EnsureCOMInterfacesSupported(OBJECTREF oref, MethodTable* pClassMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pClassMT));
+ PRECONDITION(pClassMT->IsComObjectType());
+ }
+ CONTRACTL_END;
+
+ // Make sure the COM object supports all the COM imported interfaces that the new
+ // wrapper class implements.
+ GCPROTECT_BEGIN(oref);
+ MethodTable::InterfaceMapIterator it = pClassMT->IterateInterfaceMap();
+
+ while (it.Next())
+ {
+ MethodTable *pItfMT = it.GetInterface();
+ if (!pItfMT)
+ COMPlusThrow(kInvalidCastException, IDS_EE_CANNOT_COERCE_COMOBJECT);
+
+ if (pItfMT->IsComImport())
+ {
+ if (!Object::SupportsInterface(oref, pItfMT))
+ COMPlusThrow(kInvalidCastException, IDS_EE_CANNOT_COERCE_COMOBJECT);
+ }
+ }
+
+ GCPROTECT_END();
+}
+
+bool COMInterfaceMarshaler::SupportsIInspectable()
+{
+ LIMITED_METHOD_CONTRACT;
+ return (m_flags & RCW::CF_SupportsIInspectable) != 0;
+}
+
+bool COMInterfaceMarshaler::DontResolveClass()
+{
+ LIMITED_METHOD_CONTRACT;
+ return (m_flags & RCW::CF_DontResolveClass) != 0;
+}
+
+bool COMInterfaceMarshaler::NeedUniqueObject()
+{
+ LIMITED_METHOD_CONTRACT;
+ return (m_flags & RCW::CF_NeedUniqueObject) != 0;
+}
diff --git a/src/vm/cominterfacemarshaler.h b/src/vm/cominterfacemarshaler.h
new file mode 100644
index 0000000000..7836c52871
--- /dev/null
+++ b/src/vm/cominterfacemarshaler.h
@@ -0,0 +1,112 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: ComInterfaceMarshaler.h
+//
+
+//
+
+
+#ifndef _H_COMInterfaceMarshaler_
+#define _H_COMInterfaceMarshaler_
+
+#ifndef FEATURE_COMINTEROP
+#error FEATURE_COMINTEROP is required for this file
+#endif // FEATURE_COMINTEROP
+
+class ICOMInterfaceMarshalerCallback
+{
+public :
+ // Callback to be called when we created a RCW and that RCW is inserted to cache
+ virtual void OnRCWCreated(RCW *pRCW) = 0;
+
+ // Callback to be called when we got back a RCW from the cache
+ virtual void OnRCWCacheHit(RCW *pRCW) = 0;
+
+ // Callback to be called to determine whether we should use this RCW
+ // Return true if ComInterfaceMarshaler should use this RCW
+ // Return false if ComInterfaceMarshaler should just skip this RCW and proceed
+ // to create a duplicate one instead
+ virtual bool ShouldUseThisRCW(RCW *pRCW) = 0;
+};
+
+//--------------------------------------------------------------------------------
+// class ComInterfaceMarshaler
+//--------------------------------------------------------------------------------
+class COMInterfaceMarshaler
+{
+public:
+ COMInterfaceMarshaler();
+ virtual ~COMInterfaceMarshaler();
+
+ VOID Init(IUnknown* pUnk, MethodTable* pClassMT, Thread *pThread, DWORD flags = 0); // see RCW::CreationFlags
+
+ // Sets a ICOMInterfaceMarshalerCallback pointer to be called when RCW is created or got back from cache
+ // Note that caller owns the lifetime of this callback object, and needs to make sure this callback is
+ // alive until the last time you call any function on COMInterfaceMarshaler
+ VOID SetCallback(ICOMInterfaceMarshalerCallback *pCallback)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(pCallback != NULL);
+ m_pCallback = pCallback;
+ }
+
+ VOID InitializeFlags();
+
+ VOID InitializeObjectClass(IUnknown *pIncomingIP);
+
+ OBJECTREF FindOrCreateObjectRef(IUnknown **ppIncomingIP, MethodTable *pIncomingItfMT = NULL);
+ OBJECTREF FindOrCreateObjectRef(IUnknown *pIncomingIP, MethodTable *pIncomingItfMT = NULL);
+
+ OBJECTREF WrapWithComObject();
+
+ VOID InitializeExistingComObject(OBJECTREF *pComObj, IUnknown **ppIncomingIP);
+
+ static void IReferenceOrIReferenceArrayUnboxWorker(OBJECTREF oref, TypeHandle tkT, BOOL fIsIReferenceArray, OBJECTREF *porefResult);
+ static void IKeyValuePairUnboxWorker(OBJECTREF oref, OBJECTREF *porefResult);
+
+private:
+ OBJECTREF GetCCWObject();
+ OBJECTREF HandleInProcManagedComponent();
+ OBJECTREF HandleTPComponents();
+ OBJECTREF GetObjectForRemoteManagedComponent();
+ OBJECTREF GetObjectForRemoteManagedComponentNoThrow();
+
+ OBJECTREF FindOrCreateObjectRefInternal(IUnknown **ppIncomingIP, MethodTable *pIncomingItfMT, bool bIncomingIPAddRefed);
+ VOID CreateObjectRef(BOOL fDuplicate, OBJECTREF *pComObj, IUnknown **ppIncomingIP, MethodTable *pIncomingItfMT, bool bIncomingIPAddRefed);
+ void IReferenceUnbox(IUnknown **ppIncomingIP, OBJECTREF *poref, bool bIncomingIPAddRefed);
+ void IReferenceArrayUnbox(IUnknown **ppIncomingIP, OBJECTREF *poref, bool bIncomingIPAddRefed);
+ void MarshalToNonRCWType(OBJECTREF *poref);
+ static VOID EnsureCOMInterfacesSupported(OBJECTREF oref, MethodTable* pClassMT);
+
+ inline bool SupportsIInspectable();
+ inline bool DontResolveClass();
+ inline bool NeedUniqueObject();
+
+ RCWCache* m_pWrapperCache; // initialization info
+ IUnknown* m_pUnknown; // NOT AddRef'ed
+ IUnknown* m_pIdentity; // NOT AddRef'ed
+ TypeHandle m_typeHandle; // inited and computed if inited value is NULL. Need to represent all array information too.
+ TypeHandle m_itfTypeHandle; // an interface supported by the object as returned from GetRuntimeClassName
+ IManagedObject* m_pIManaged; // AddRef'ed - computed info
+ Thread* m_pThread; // Current thread - avoid calling GetThread multiple times
+
+ INDEBUG(bool m_fFlagsInited;)
+ bool m_fIsRemote;
+ bool m_fIReference; // Is this an IReference<T> (ie, a WinRT "boxed" value type)
+ bool m_fIReferenceArray; // Is this an IReferenceArray<T> (ie, an array wrapped in a WinRT interface)
+ bool m_fNonRCWType; // Is this redirected to a non-RCW CLR type
+
+ DWORD m_flags;
+
+ ICOMInterfaceMarshalerCallback *m_pCallback; // Callback to call when we created a RCW or got back RCW from cache
+
+ // For Transparent Proxys
+ ADID m_dwServerDomainId;
+ DWORD m_dwServerSyncBlockIndex;
+};
+
+
+#endif // #ifndef _H_COMInterfaceMarshaler_
diff --git a/src/vm/comisolatedstorage.cpp b/src/vm/comisolatedstorage.cpp
new file mode 100644
index 0000000000..fd31a079fd
--- /dev/null
+++ b/src/vm/comisolatedstorage.cpp
@@ -0,0 +1,1070 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//============================================================
+//
+// Class: COMIsolatedStorage
+//
+// Purpose: Native Implementation of IsolatedStorage
+//
+
+//
+
+//============================================================
+
+
+#include "common.h"
+#include "excep.h"
+#include "eeconfig.h"
+#include "qcall.h"
+#include "comisolatedstorage.h"
+
+#ifdef FEATURE_ISOSTORE
+
+#include <shlobj.h>
+
+#ifndef FEATURE_ISOSTORE_LIGHT
+#define IS_ROAMING(x) ((x) & ISS_ROAMING_STORE)
+#endif // !FEATURE_ISOSTORE_LIGHT
+
+void DECLSPEC_NORETURN COMIsolatedStorage::ThrowISS(HRESULT hr)
+{
+ STANDARD_VM_CONTRACT;
+
+ if ((hr >= ISS_E_ISOSTORE_START) && (hr <= ISS_E_ISOSTORE_END))
+ {
+ switch (hr)
+ {
+ case ISS_E_ISOSTORE :
+ case ISS_E_OPEN_STORE_FILE :
+ case ISS_E_OPEN_FILE_MAPPING :
+ case ISS_E_MAP_VIEW_OF_FILE :
+ case ISS_E_GET_FILE_SIZE :
+ case ISS_E_CREATE_MUTEX :
+ case ISS_E_LOCK_FAILED :
+ case ISS_E_FILE_WRITE :
+ case ISS_E_SET_FILE_POINTER :
+ case ISS_E_CREATE_DIR :
+ case ISS_E_CORRUPTED_STORE_FILE :
+ case ISS_E_STORE_VERSION :
+ case ISS_E_FILE_NOT_MAPPED :
+ case ISS_E_BLOCK_SIZE_TOO_SMALL :
+ case ISS_E_ALLOC_TOO_LARGE :
+ case ISS_E_USAGE_WILL_EXCEED_QUOTA :
+ case ISS_E_TABLE_ROW_NOT_FOUND :
+ case ISS_E_DEPRECATE :
+ case ISS_E_CALLER :
+ case ISS_E_PATH_LENGTH :
+ case ISS_E_MACHINE :
+ case ISS_E_STORE_NOT_OPEN :
+ case ISS_E_MACHINE_DACL :
+ COMPlusThrowHR(hr);
+ break;
+
+ default :
+ _ASSERTE(!"Unknown hr");
+ }
+ }
+
+ COMPlusThrowHR(hr);
+}
+
+#ifndef FEATURE_ISOSTORE_LIGHT
+StackWalkAction COMIsolatedStorage::StackWalkCallBack(
+ CrawlFrame* pCf, PVOID ppv)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ // Get the function descriptor for this frame...
+ MethodDesc *pMeth = pCf->GetFunction();
+ MethodTable *pMT = pMeth->GetMethodTable();
+
+ // Skip the Isolated Store and all it's sub classes..
+ // <TODO>@Todo : This will work for now, but need to walk up to the base class
+ // @Todo : Work out the JIT inlining issiues</TODO>
+
+ if ((MscorlibBinder::IsClass(pMT, CLASS__ISS_STORE)) ||
+ (MscorlibBinder::IsClass(pMT, CLASS__ISS_STORE_FILE)) ||
+ (MscorlibBinder::IsClass(pMT, CLASS__ISS_STORE_FILE_STREAM)))
+ {
+ LOG((LF_STORE, LL_INFO10000, "StackWalk Continue %s\n",
+ pMeth->m_pszDebugMethodName));
+ return SWA_CONTINUE;
+ }
+
+ *(PVOID *)ppv = pMeth->GetModule()->GetAssembly();
+
+ return SWA_ABORT;
+}
+
+void QCALLTYPE COMIsolatedStorage::GetCaller(QCall::ObjectHandleOnStack retAssembly)
+{
+ QCALL_CONTRACT;
+
+ DomainAssembly * pDomainAssembly = NULL;
+
+ BEGIN_QCALL;
+
+ Assembly * pAssembly = NULL;
+ StackWalkAction result;
+
+ {
+ GCX_COOP();
+ result = StackWalkFunctions(GetThread(), StackWalkCallBack, (VOID*)&pAssembly);
+ }
+
+ if (result == SWA_FAILED)
+ ThrowISS(ISS_E_CALLER);
+
+ if (pAssembly == NULL)
+ ThrowISS(ISS_E_CALLER);
+
+#ifdef _DEBUG
+ LOG((LF_STORE, LL_INFO10000, "StackWalk Found %s\n", pAssembly->GetSimpleName()));
+#endif
+
+ pDomainAssembly = pAssembly->GetDomainAssembly();
+
+ GCX_COOP();
+ retAssembly.Set(pDomainAssembly->GetExposedAssemblyObject());
+ END_QCALL;
+
+ return;
+}
+#endif // !FEATURE_ISOSTORE_LIGHT
+
+// static
+UINT64 QCALLTYPE COMIsolatedStorageFile::GetUsage(__in_opt AccountingInfo * pAI)
+{
+ QCALL_CONTRACT;
+
+ UINT64 retVal = 0;
+ BEGIN_QCALL;
+
+ if (pAI == NULL)
+ COMIsolatedStorage::ThrowISS(ISS_E_STORE_NOT_OPEN);
+
+ PREFIX_ASSUME(pAI != NULL);
+
+ HRESULT hr = pAI->GetUsage(&retVal);
+
+ if (FAILED(hr))
+ COMIsolatedStorage::ThrowISS(hr);
+
+ END_QCALL;
+ return retVal;
+}
+
+// static
+void QCALLTYPE COMIsolatedStorageFile::Close(__in_opt AccountingInfo * pAI)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ if (pAI != NULL)
+ delete pAI;
+
+ END_QCALL;
+}
+
+//static
+BOOL QCALLTYPE COMIsolatedStorageFile::Lock(__in AccountingInfo * pAI,
+ BOOL fLock)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(pAI != NULL);
+ } CONTRACTL_END;
+
+ BEGIN_QCALL;
+
+ if (fLock)
+ AccountingInfo::AcquireLock(pAI);
+ else
+ AccountingInfo::ReleaseLock(pAI);
+
+ END_QCALL;
+
+ // AcquireLock will throw if it fails, ReleaseLock will ASSERT
+ return TRUE;
+}
+
+// static
+AccountingInfo * QCALLTYPE COMIsolatedStorageFile::Open(LPCWSTR wszFileName,
+ LPCWSTR wszSyncName)
+{
+ CONTRACT(AccountingInfo *)
+ {
+ QCALL_CHECK;
+ PRECONDITION(wszFileName != NULL);
+ PRECONDITION(wszSyncName != NULL);
+ POSTCONDITION(RETVAL != NULL);
+ } CONTRACT_END;
+
+ AccountingInfo * pReturn = NULL;
+ BEGIN_QCALL;
+
+ AccountingInfo *pAI = new AccountingInfo(wszFileName, wszSyncName);
+
+ HRESULT hr = pAI->Init();
+
+ if (FAILED(hr))
+ COMIsolatedStorage::ThrowISS(hr);
+
+ pReturn = pAI;
+
+ END_QCALL;
+ RETURN(pReturn);
+}
+
+// static
+void QCALLTYPE COMIsolatedStorageFile::Reserve(__in_opt AccountingInfo * pAI,
+ UINT64 qwQuota,
+ UINT64 qwReserve,
+ BOOL fFree)
+{
+ QCALL_CONTRACT;
+ BEGIN_QCALL;
+
+ if (pAI == NULL)
+ COMIsolatedStorage::ThrowISS(ISS_E_STORE_NOT_OPEN);
+
+ PREFIX_ASSUME(pAI != NULL);
+ HRESULT hr = pAI->Reserve(qwQuota, qwReserve, fFree);
+
+ if (FAILED(hr))
+ {
+#ifdef _DEBUG
+ if (fFree) {
+ LOG((LF_STORE, LL_INFO10000, "free 0x%x failed\n",
+ (int)(qwReserve)));
+ }
+ else {
+ LOG((LF_STORE, LL_INFO10000, "reserve 0x%x failed\n",
+ (int)(qwReserve)));
+ }
+#endif
+ COMIsolatedStorage::ThrowISS(hr);
+ }
+
+#ifdef _DEBUG
+ if (fFree) {
+ LOG((LF_STORE, LL_INFO10000, "free 0x%x\n",
+ (int)(qwReserve)));
+ } else {
+ LOG((LF_STORE, LL_INFO10000, "reserve 0x%x\n",
+ (int)(qwReserve)));
+ }
+#endif
+
+ END_QCALL;
+}
+
+// static
+BOOL QCALLTYPE COMIsolatedStorageFile::GetQuota(__in_opt AccountingInfo * pAI,
+ __out INT64 * qwQuota)
+{
+ QCALL_CONTRACT;
+ BOOL retVal = false;
+ BEGIN_QCALL;
+
+ if (pAI == NULL)
+ COMIsolatedStorage::ThrowISS(ISS_E_STORE_NOT_OPEN);
+
+ PREFIX_ASSUME(pAI != NULL);
+
+ retVal = pAI->GetQuota(qwQuota);
+
+ END_QCALL;
+
+ return retVal;
+}
+
+// static
+void QCALLTYPE COMIsolatedStorageFile::SetQuota(__in_opt AccountingInfo * pAI,
+ INT64 qwQuota)
+{
+ QCALL_CONTRACT;
+ BEGIN_QCALL;
+
+ if (pAI == NULL)
+ COMIsolatedStorage::ThrowISS(ISS_E_STORE_NOT_OPEN);
+
+ PREFIX_ASSUME(pAI != NULL);
+
+ pAI->SetQuota(qwQuota);
+
+ END_QCALL;
+}
+
+// static
+void QCALLTYPE COMIsolatedStorageFile::GetRootDir(DWORD dwFlags,
+ QCall::StringHandleOnStack retRootDir)
+{
+ QCALL_CONTRACT;
+ BEGIN_QCALL;
+
+ WCHAR wszPath[MAX_PATH + 1];
+ GetRootDirInternal(dwFlags, wszPath, COUNTOF(wszPath));
+ retRootDir.Set(wszPath);
+
+ END_QCALL;
+}
+
+#ifndef FEATURE_ISOSTORE_LIGHT
+// static
+void QCALLTYPE COMIsolatedStorageFile::CreateDirectoryWithDacl(LPCWSTR wszPath)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(wszPath != NULL);
+ } CONTRACTL_END;
+
+ BEGIN_QCALL;
+
+ SECURITY_ATTRIBUTES *pSecAttrib = NULL;
+
+ SECURITY_ATTRIBUTES SecAttrib;
+ SECURITY_DESCRIPTOR sd;
+ NewArrayHolder<ACL> pDacl = NULL;
+
+ memset(&SecAttrib, 0, sizeof(SecAttrib));
+
+ BOOL ret = InitializeSecurityDescriptor(&sd, SECURITY_DESCRIPTOR_REVISION);
+ if (!ret)
+ COMIsolatedStorage::ThrowISS(ISS_E_MACHINE_DACL);
+
+ HRESULT hr = GetMachineStoreDacl(&pDacl);
+ if (FAILED(hr))
+ COMIsolatedStorage::ThrowISS(ISS_E_MACHINE_DACL);
+
+ ret = SetSecurityDescriptorDacl(&sd, TRUE, pDacl, FALSE);
+ if (!ret)
+ COMIsolatedStorage::ThrowISS(ISS_E_MACHINE_DACL);
+
+ SecAttrib.nLength = sizeof(SECURITY_ATTRIBUTES);
+ SecAttrib.bInheritHandle = FALSE;
+ SecAttrib.lpSecurityDescriptor = &sd;
+ pSecAttrib = &SecAttrib;
+
+ CreateDirectoryIfNotPresent(wszPath, pSecAttrib);
+
+ END_QCALL;
+}
+
+// Get the machine location for Isolated Storage
+BOOL COMIsolatedStorageFile::GetMachineStoreDirectory (__out_ecount(cchMachineStorageRoot) WCHAR * wszMachineStorageRoot,
+ DWORD cchMachineStorageRoot)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ HRESULT hr = WszSHGetFolderPath(NULL,
+ CSIDL_COMMON_APPDATA | CSIDL_FLAG_CREATE,
+ NULL,
+ SHGFP_TYPE_CURRENT,
+ cchMachineStorageRoot,
+ wszMachineStorageRoot);
+ LOG((LF_STORE, LL_INFO10000, "GetMachineStoreDirectory returned [%#x].\n", hr));
+ return SUCCEEDED(hr);
+}
+
+// Creates a DACL for the machine store directory so that
+// everyone may create directories beneath it.
+// This method should only be called on NT platforms.
+
+HRESULT COMIsolatedStorageFile::GetMachineStoreDacl(PACL * ppAcl)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+ SID_IDENTIFIER_AUTHORITY siaWorld = SECURITY_WORLD_SID_AUTHORITY;
+ SID_IDENTIFIER_AUTHORITY siaNTAuth = SECURITY_NT_AUTHORITY;
+ SID_IDENTIFIER_AUTHORITY siaCreatorOwnerAuthority = SECURITY_CREATOR_SID_AUTHORITY;
+ PSID pEveryoneSid = NULL;
+ PSID pAdminsSid = NULL;
+ PSID pCreatorOwnerSid = NULL;
+
+ //
+ // prepare Sids representing the world and admins
+ //
+
+ if (!AllocateAndInitializeSid(&siaWorld,
+ 1,
+ SECURITY_WORLD_RID,
+ 0, 0, 0, 0, 0, 0, 0,
+ &pEveryoneSid)) {
+ hr = HRESULT_FROM_GetLastError();
+ goto ErrorExit;
+ }
+
+ if (!AllocateAndInitializeSid(&siaNTAuth,
+ 2,
+ SECURITY_BUILTIN_DOMAIN_RID,
+ DOMAIN_ALIAS_RID_ADMINS,
+ 0, 0, 0, 0, 0, 0,
+ &pAdminsSid)) {
+ hr = HRESULT_FROM_GetLastError();
+ goto ErrorExit;
+ }
+
+ if (!AllocateAndInitializeSid(&siaCreatorOwnerAuthority,
+ 1,
+ SECURITY_CREATOR_OWNER_RID,
+ 0, 0, 0, 0, 0, 0, 0,
+ &pCreatorOwnerSid)) {
+ hr = HRESULT_FROM_GetLastError();
+ goto ErrorExit;
+ }
+
+ //
+ // compute size of new Acl
+ //
+
+ DWORD dwAclSize = sizeof(ACL)
+ + 3 * (sizeof(ACCESS_ALLOWED_ACE) - sizeof(DWORD))
+ + GetLengthSid(pEveryoneSid)
+ + GetLengthSid(pAdminsSid)
+ + GetLengthSid(pCreatorOwnerSid);
+
+ *ppAcl = new ACL[dwAclSize / sizeof(ACL) + 1];
+ if (!InitializeAcl(*ppAcl, dwAclSize, ACL_REVISION)) {
+ hr = HRESULT_FROM_GetLastError();
+ goto ErrorExit;
+ }
+
+ if (!AddAccessAllowedAce(*ppAcl,
+ ACL_REVISION,
+ (FILE_GENERIC_WRITE | FILE_GENERIC_READ) & (~WRITE_DAC),
+ pEveryoneSid)) {
+ hr = HRESULT_FROM_GetLastError();
+ goto ErrorExit;
+ }
+
+ if (!AddAccessAllowedAce(*ppAcl,
+ ACL_REVISION,
+ FILE_ALL_ACCESS,
+ pAdminsSid)) {
+ hr = HRESULT_FROM_GetLastError();
+ goto ErrorExit;
+ }
+
+ if (!AddAccessAllowedAce(*ppAcl,
+ ACL_REVISION,
+ FILE_ALL_ACCESS,
+ pCreatorOwnerSid)) {
+ hr = HRESULT_FROM_GetLastError();
+ goto ErrorExit;
+ }
+
+ //
+ // make ACL inheritable
+ //
+
+ PACCESS_ALLOWED_ACE pAce;
+ for (DWORD index = 0; index < 3; index++) {
+ if(!GetAce(*ppAcl, index, (void **) &pAce)) {
+ hr = HRESULT_FROM_GetLastError();
+ goto ErrorExit;
+ }
+ pAce->Header.AceFlags = CONTAINER_INHERIT_ACE | OBJECT_INHERIT_ACE;
+ }
+
+ErrorExit:
+ if (NULL != pEveryoneSid)
+ FreeSid(pEveryoneSid);
+ if (NULL != pAdminsSid)
+ FreeSid(pAdminsSid);
+ if (NULL != pCreatorOwnerSid)
+ FreeSid(pCreatorOwnerSid);
+
+ LOG((LF_STORE, LL_INFO10000, "GetMachineStoreDacl returned error code [%#x].\n", hr));
+
+ return hr;
+}
+#endif // !FEATURE_ISOSTORE_LIGHT
+
+// Throws on error
+void COMIsolatedStorageFile::CreateDirectoryIfNotPresent(__in_z const WCHAR *path, LPSECURITY_ATTRIBUTES lpSecurityAttributes)
+{
+ STANDARD_VM_CONTRACT;
+
+ LONG lresult;
+
+ // Check if the directory is already present
+ lresult = WszGetFileAttributes(path);
+
+ if (lresult == -1)
+ {
+ if (!WszCreateDirectory(path, lpSecurityAttributes))
+ {
+ COMIsolatedStorage::ThrowISS(ISS_E_CREATE_DIR);
+ }
+ }
+ else if ((lresult & FILE_ATTRIBUTE_DIRECTORY) == 0)
+ {
+ COMIsolatedStorage::ThrowISS(ISS_E_CREATE_DIR);
+ }
+}
+
+// Synchronized by the managed caller
+
+#ifdef FEATURE_ISOSTORE_LIGHT
+
+const WCHAR* const g_relativePath[] = {
+ W("\\CoreIsolatedStorage")
+};
+
+#define nRelativePathLen ( \
+ sizeof("\\CoreIsolatedStorage") + 1)
+
+#else // FEATURE_ISOSTORE_LIGHT
+
+const WCHAR* const g_relativePath[] = {
+ W("\\IsolatedStorage")
+};
+
+#define nRelativePathLen ( \
+ sizeof("\\IsolatedStorage") + 1)
+
+#endif // FEATURE_ISOSTORE_LIGHT
+
+#define nSubDirs (sizeof(g_relativePath)/sizeof(g_relativePath[0]))
+
+void COMIsolatedStorageFile::GetRootDirInternal(
+ DWORD dwFlags, __in_ecount(cPath) WCHAR *path, DWORD cPath)
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+ PRECONDITION(cPath > 1);
+ PRECONDITION(cPath <= MAX_PATH + 1);
+ } CONTRACTL_END;
+
+ ULONG len;
+
+ --cPath; // To be safe.
+ path[cPath] = 0;
+
+ // Get roaming or local App Data locations
+ BOOL res;
+
+#ifdef FEATURE_ISOSTORE_LIGHT
+ res = GetUserDir(path, cPath, FALSE);
+#else
+ if ((dwFlags & ISS_MACHINE_STORE) == 0)
+ res = GetUserDir(path, cPath, IS_ROAMING(dwFlags));
+ else
+ res = GetMachineStoreDirectory(path, cPath);
+#endif // !FEATURE_ISOSTORE_LIGHT
+ LOG((LF_STORE, LL_INFO10000, "The isolated storage root directory location is [%S].\n", path));
+
+ if (!res)
+ {
+ COMIsolatedStorage::ThrowISS(ISS_E_CREATE_DIR);
+ }
+
+ len = (ULONG)wcslen(path);
+
+ if ((len + nRelativePathLen + 1) > cPath)
+ COMIsolatedStorage::ThrowISS(ISS_E_PATH_LENGTH);
+
+ CreateDirectoryIfNotPresent(path);
+
+ // Create the store directory if necessary
+ for (unsigned int i=0; i<nSubDirs; ++i)
+ {
+ wcscat_s(path, cPath, g_relativePath[i]);
+ CreateDirectoryIfNotPresent(path);
+ }
+
+ wcscat_s(path, cPath, W("\\"));
+}
+
+#define WSZ_GLOBAL W("Global\\")
+
+//--------------------------------------------------------------------------
+// The file name is used to open / create the file.
+// A synchronization object will also be created using this name
+// with '\' replaced by '-'
+//--------------------------------------------------------------------------
+AccountingInfo::AccountingInfo(const WCHAR *wszFileName, const WCHAR *wszSyncName) :
+ m_hFile(INVALID_HANDLE_VALUE),
+ m_hMapping(NULL),
+ m_hLock(NULL),
+ m_pData(NULL)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef _DEBUG
+ m_dwNumLocks = 0;
+#endif
+
+ int buffLen;
+ buffLen = (int)wcslen(wszFileName) + 1;
+
+ NewArrayHolder<WCHAR> pwszFileName(new WCHAR[buffLen]);
+
+ // String length is known, using a memcpy here is faster, however, this
+ // makes the code here and below less readable, this is not a very frequent
+ // operation. No real perf gain here. Same comment applies to the strcpy
+ // following this.
+
+ wcscpy_s(pwszFileName, buffLen, wszFileName);
+
+ _ASSERTE(((int)wcslen(pwszFileName) + 1) <= buffLen);
+
+ // Allocate the Mutex name
+ buffLen = (int)wcslen(wszSyncName) + (sizeof(WSZ_GLOBAL) / sizeof(WCHAR)) + 1;
+
+ NewArrayHolder<WCHAR> pwszName(new WCHAR[buffLen]);
+
+ wcscpy_s(pwszName, buffLen, WSZ_GLOBAL);
+ wcscat_s(pwszName, buffLen, wszSyncName);
+
+ _ASSERTE(((int)wcslen(pwszName) + 1) <= buffLen);
+
+ pwszFileName.SuppressRelease();
+ pwszName.SuppressRelease();
+
+ // Now publish the strings
+ m_wszFileName = pwszFileName;
+ m_wszName = pwszName;
+}
+
+//--------------------------------------------------------------------------
+// Frees memory, and open handles
+//--------------------------------------------------------------------------
+AccountingInfo::~AccountingInfo()
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }CONTRACTL_END;
+
+ if (m_pData)
+ CLRUnmapViewOfFile(m_pData);
+
+ if (m_hMapping != NULL)
+ CloseHandle(m_hMapping);
+
+ if (m_hFile != INVALID_HANDLE_VALUE)
+ CloseHandle(m_hFile);
+
+ if (m_hLock != NULL)
+ CloseHandle(m_hLock);
+
+ if (m_wszFileName)
+ delete [] m_wszFileName;
+
+ if (m_wszName)
+ delete [] m_wszName;
+
+ _ASSERTE(m_dwNumLocks == 0);
+}
+
+//--------------------------------------------------------------------------
+// Init should be called before Reserve / GetUsage is called.
+// Creates the file if necessary
+//--------------------------------------------------------------------------
+HRESULT AccountingInfo::Init()
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+ PRECONDITION(m_hLock == NULL); // Init was called multiple times on this object without calling Close
+ } CONTRACTL_END;
+
+ // Create the synchronization object
+
+ HRESULT hr = S_OK;
+ m_hLock = WszCreateMutex(NULL, FALSE /* Initially not owned */, m_wszName);
+
+ if (m_hLock == NULL)
+ IfFailGo(ISS_E_CREATE_MUTEX);
+
+ // Init was called multiple times on this object without calling Close
+
+ _ASSERTE(m_hFile == INVALID_HANDLE_VALUE);
+
+ {
+ // The default DACL is fine here since we've already set the DACL on the root
+ m_hFile = WszCreateFile(m_wszFileName,
+ GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE,
+ NULL,
+ OPEN_ALWAYS,
+ FILE_FLAG_RANDOM_ACCESS,
+ NULL);
+
+ if (m_hFile == INVALID_HANDLE_VALUE)
+ IfFailGo(ISS_E_OPEN_STORE_FILE);
+ }
+
+ // If this file was created for the first time, then create the accounting
+ // record and set to zero
+ {
+ AccountingInfoLockHolder pAI(this);
+
+ DWORD dwLow = 0, dwHigh = 0; // For checking file size
+ QWORD qwSize;
+
+ dwLow = ::GetFileSize(m_hFile, &dwHigh);
+
+ if ((dwLow == 0xFFFFFFFF) && (GetLastError() != NO_ERROR))
+ {
+ IfFailGo(ISS_E_GET_FILE_SIZE);
+ }
+
+ qwSize = ((QWORD)dwHigh << 32) | dwLow;
+
+ if (qwSize < sizeof(ISS_RECORD))
+ {
+ DWORD dwWrite;
+
+ // Need to create the initial file
+ NewArrayHolder<BYTE> pb(new BYTE[sizeof(ISS_RECORD)]);
+
+ memset(pb, 0, sizeof(ISS_RECORD));
+
+ dwWrite = 0;
+
+ if ((WriteFile(m_hFile, pb, sizeof(ISS_RECORD), &dwWrite, NULL)
+ == 0) || (dwWrite != sizeof(ISS_RECORD)))
+ {
+ IfFailGo(ISS_E_FILE_WRITE);
+ }
+ }
+
+ // Lock out of scope here will be released
+ }
+ErrExit:
+ ;
+ return hr;
+}
+
+//--------------------------------------------------------------------------
+// Get the amount of quota saved on disk. Some hosts may allow users of
+// IsolatedStorage to increase the quota. If so, we persist this data.
+// If there is no saved quota, this method returns FALSE.
+//--------------------------------------------------------------------------
+BOOL AccountingInfo::GetQuota(
+ INT64 *qwQuota)
+{
+ STANDARD_VM_CONTRACT;
+
+ BOOL retVal = FALSE;
+ HRESULT hr = S_OK;
+ {
+ AccountingInfoLockHolder pAI(this);
+
+ hr = Map();
+
+ if (SUCCEEDED(hr))
+ {
+ if(m_pISSRecord->dwVersion >= 1) {
+ *qwQuota = m_pISSRecord->qwQuota;
+ retVal = TRUE;
+ } else {
+ *qwQuota = 0;
+ retVal = FALSE;
+ }
+ Unmap();
+ }
+ else
+ {
+ *qwQuota = 0;
+ retVal = FALSE;
+ }
+ }
+ return retVal;
+}
+
+//--------------------------------------------------------------------------
+// Sets the amount of quota saved on disk. Some hosts may allow users of
+// IsolatedStorage to increase the quota. If so, we persist this data.
+//--------------------------------------------------------------------------
+void AccountingInfo::SetQuota(
+ INT64 qwQuota)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+ {
+ AccountingInfoLockHolder pAI(this);
+
+ hr = Map();
+
+ if (SUCCEEDED(hr))
+ {
+ m_pISSRecord->dwVersion = (m_pISSRecord->dwVersion >= 1) ? m_pISSRecord->dwVersion : 1;
+ m_pISSRecord->qwQuota = qwQuota;
+ Unmap();
+ }
+ }
+}
+
+//--------------------------------------------------------------------------
+// Reserves space (Increments qwQuota)
+// This method is synchronized. If quota + request > limit, method fails
+//--------------------------------------------------------------------------
+HRESULT AccountingInfo::Reserve(
+ ISS_USAGE cLimit, // The max allowed
+ ISS_USAGE cRequest, // amount of space (request / free)
+ BOOL fFree) // TRUE will free, FALSE will reserve
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+ {
+ AccountingInfoLockHolder pAI(this);
+
+ hr = Map();
+
+ if (SUCCEEDED(hr))
+ {
+ if (fFree)
+ {
+ if (m_pISSRecord->cUsage > cRequest)
+ m_pISSRecord->cUsage -= cRequest;
+ else
+ m_pISSRecord->cUsage = 0;
+ }
+ else
+ {
+ if ((m_pISSRecord->cUsage + cRequest) > cLimit)
+ hr = ISS_E_USAGE_WILL_EXCEED_QUOTA;
+ else
+ // Safe to increment quota.
+ m_pISSRecord->cUsage += cRequest;
+ }
+
+ Unmap();
+ }
+ // Lock out of scope here will be released
+ }
+
+ return hr;
+}
+
+//--------------------------------------------------------------------------
+// Method is not synchronized. So the information may not be current.
+// This implies "Pass if (Request + GetUsage() < Limit)" is an Error!
+// Use Reserve() method instead.
+//--------------------------------------------------------------------------
+HRESULT AccountingInfo::GetUsage(ISS_USAGE *pcUsage) // pcUsage - [out]
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+ {
+ AccountingInfoLockHolder pAI(this);
+
+ hr = Map();
+
+ if (! FAILED(hr))
+ {
+ *pcUsage = m_pISSRecord->cUsage;
+
+ Unmap();
+ }
+ // Lock out of scope here will be released
+ }
+ return hr;
+}
+
+//--------------------------------------------------------------------------
+// Maps the store file into memory
+//--------------------------------------------------------------------------
+HRESULT AccountingInfo::Map()
+{
+ STANDARD_VM_CONTRACT;
+
+ // Mapping will fail if filesize is 0
+ if (m_hMapping == NULL)
+ {
+ m_hMapping = WszCreateFileMapping(
+ m_hFile,
+ NULL,
+ PAGE_READWRITE,
+ 0,
+ 0,
+ NULL);
+
+ if (m_hMapping == NULL)
+ return ISS_E_OPEN_FILE_MAPPING;
+ }
+
+ _ASSERTE(m_pData == NULL);
+
+ m_pData = (PBYTE) CLRMapViewOfFile(
+ m_hMapping,
+ FILE_MAP_WRITE,
+ 0,
+ 0,
+ 0);
+
+ if (m_pData == NULL)
+ return ISS_E_MAP_VIEW_OF_FILE;
+
+ return S_OK;
+}
+
+//--------------------------------------------------------------------------
+// Unmaps the store file from memory
+//--------------------------------------------------------------------------
+void AccountingInfo::Unmap()
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }CONTRACTL_END;
+
+
+ if (m_pData)
+ {
+ CLRUnmapViewOfFile(m_pData);
+ m_pData = NULL;
+ }
+}
+
+//--------------------------------------------------------------------------
+// Close the store file, and file mapping
+//--------------------------------------------------------------------------
+void AccountingInfo::Close()
+{
+ WRAPPER_NO_CONTRACT;
+ Unmap();
+
+ if (m_hMapping != NULL)
+ {
+ CloseHandle(m_hMapping);
+ m_hMapping = NULL;
+ }
+
+ if (m_hFile != INVALID_HANDLE_VALUE)
+ {
+ CloseHandle(m_hFile);
+ m_hFile = INVALID_HANDLE_VALUE;
+ }
+
+ if (m_hLock != NULL)
+ {
+ CloseHandle(m_hLock);
+ m_hLock = NULL;
+ }
+
+#ifdef _DEBUG
+ _ASSERTE(m_dwNumLocks == 0);
+#endif
+}
+
+//--------------------------------------------------------------------------
+// Machine wide Lock
+//--------------------------------------------------------------------------
+HRESULT AccountingInfo::Lock()
+{
+ STANDARD_VM_CONTRACT;
+
+ // Lock is intented to be used for inter process/thread synchronization.
+
+#ifdef _DEBUG
+ _ASSERTE(m_hLock);
+
+ LOG((LF_STORE, LL_INFO10000, "Lock %S, thread 0x%x start..\n",
+ m_wszName, GetCurrentThreadId()));
+#endif
+
+ DWORD dwRet;
+ {
+ // m_hLock is a mutex
+ Thread::BeginThreadAffinityAndCriticalRegion();
+ dwRet = WaitForSingleObject(m_hLock, INFINITE);
+ }
+
+#ifdef _DEBUG
+ if (dwRet == WAIT_OBJECT_0)
+ InterlockedIncrement((LPLONG)&m_dwNumLocks);
+
+ switch (dwRet)
+ {
+ case WAIT_OBJECT_0:
+ LOG((LF_STORE, LL_INFO10000, "Loc %S, thread 0x%x - WAIT_OBJECT_0\n",
+ m_wszName, GetCurrentThreadId()));
+ break;
+
+ case WAIT_ABANDONED:
+ LOG((LF_STORE, LL_INFO10000, "Loc %S, thread 0x%x - WAIT_ABANDONED\n",
+ m_wszName, GetCurrentThreadId()));
+ break;
+
+ case WAIT_FAILED:
+ LOG((LF_STORE, LL_INFO10000, "Loc %S, thread 0x%x - WAIT_FAILED\n",
+ m_wszName, GetCurrentThreadId()));
+ break;
+
+ case WAIT_TIMEOUT:
+ LOG((LF_STORE, LL_INFO10000, "Loc %S, thread 0x%x - WAIT_TIMEOUT\n",
+ m_wszName, GetCurrentThreadId()));
+ break;
+
+ default:
+ LOG((LF_STORE, LL_INFO10000, "Loc %S, thread 0x%x - 0x%x\n",
+ m_wszName, GetCurrentThreadId(), dwRet));
+ break;
+ }
+
+#endif
+
+ if ((dwRet == WAIT_OBJECT_0) || (dwRet == WAIT_ABANDONED))
+ return S_OK;
+
+ return ISS_E_LOCK_FAILED;
+}
+
+//--------------------------------------------------------------------------
+// Unlock the store
+//--------------------------------------------------------------------------
+void AccountingInfo::Unlock()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+#ifdef _DEBUG
+ _ASSERTE(m_hLock);
+ _ASSERTE(m_dwNumLocks >= 1);
+
+ LOG((LF_STORE, LL_INFO10000, "UnLoc %S, thread 0x%x\n",
+ m_wszName, GetCurrentThreadId()));
+#endif
+
+ BOOL released;
+ released = ReleaseMutex(m_hLock);
+ _ASSERTE(released);
+
+#ifdef _DEBUG
+ InterlockedDecrement((LPLONG)&m_dwNumLocks);
+#endif
+
+ Thread::EndThreadAffinityAndCriticalRegion();
+}
+
+#endif
diff --git a/src/vm/comisolatedstorage.h b/src/vm/comisolatedstorage.h
new file mode 100644
index 0000000000..38051d9b87
--- /dev/null
+++ b/src/vm/comisolatedstorage.h
@@ -0,0 +1,203 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//============================================================
+//
+// Class: COMIsolatedStorage
+//
+//
+// Purpose: Native Implementation of IsolatedStorage
+//
+
+//
+
+//============================================================
+
+
+#ifndef __COMISOLATEDSTORAGE_h__
+#define __COMISOLATEDSTORAGE_h__
+#ifdef FEATURE_ISOSTORE
+
+// Dependency in managed : System.IO.IsolatedStorage.IsolatedStorage.cs
+#ifndef FEATURE_ISOSTORE_LIGHT
+#define ISS_ROAMING_STORE 0x08
+#define ISS_MACHINE_STORE 0x10
+#endif // !FEATURE_ISOSTORE_LIGHT
+
+class COMIsolatedStorage
+{
+public:
+#ifndef FEATURE_ISOSTORE_LIGHT
+ static
+ void QCALLTYPE GetCaller(QCall::ObjectHandleOnStack retAssembly);
+#endif // !FEATURE_ISOSTORE_LIGHT
+
+ static void DECLSPEC_NORETURN ThrowISS(HRESULT hr);
+
+private:
+#ifndef FEATURE_ISOSTORE_LIGHT
+ static StackWalkAction StackWalkCallBack(CrawlFrame* pCf, PVOID ppv);
+#endif // !FEATURE_ISOSTORE_LIGHT
+};
+
+// --- [ Structure of data that gets persisted on disk ] -------------(Begin)
+
+// non-standard extension: 0-length arrays in struct
+#ifdef _MSC_VER
+#pragma warning(disable:4200)
+#endif
+#include <pshpack1.h>
+
+typedef unsigned __int64 QWORD;
+
+typedef QWORD ISS_USAGE;
+
+// Accounting Information
+typedef struct
+{
+ ISS_USAGE cUsage; // The amount of resource used
+
+ DWORD dwVersion; // Version of bookkeeping file on disk (so we know the layout)
+ QWORD qwQuota; // Quota stored on disk (persisted if increased by the host)
+ QWORD qwReserved[5]; // For future use, set to 0
+ DWORD dwReserved; // For future use, set to 0
+
+} ISS_RECORD;
+
+#include <poppack.h>
+#ifdef _MSC_VER
+#pragma warning(default:4200)
+#endif
+
+// --- [ Structure of data that gets persisted on disk ] ---------------(End)
+
+class AccountingInfo
+{
+public:
+
+ // The file name is used to open / create the file.
+ // A synchronization object will also be created using the sync name
+
+ AccountingInfo(const WCHAR *wszFileName, const WCHAR *wszSyncName);
+
+ // Init should be called before Reserve / GetUsage is called.
+
+ HRESULT Init(); // Creates the file if necessary
+
+ // Reserves space (Increments qwQuota)
+ // This method is synchrinized. If quota + request > limit, method fails
+
+ HRESULT Reserve(
+ ISS_USAGE cLimit, // The max allowed
+ ISS_USAGE cRequest, // amount of space (request / free)
+ BOOL fFree); // TRUE will free, FALSE will reserve
+
+ // Method is not synchronized. So the information may not be current.
+ // This implies "Pass if (Request + GetUsage() < Limit)" is an Error!
+ // Use Reserve() method instead.
+
+ HRESULT GetUsage(
+ ISS_USAGE *pcUsage); // [out] The amount of space / resource used
+
+ BOOL GetQuota(
+ INT64 *qwQuota); // [out] The Quota stored on disk (if there is one)
+
+ void SetQuota(
+ INT64 qwQuota); // [out] The Quota stored on disk (if there is one)
+
+ // Frees cached pointers, Closes handles
+
+ ~AccountingInfo();
+
+ static void AcquireLock(AccountingInfo *pAI) {
+ STANDARD_VM_CONTRACT;
+ HRESULT hr = pAI->Lock();
+ if (FAILED(hr)) COMIsolatedStorage::ThrowISS(hr);
+ }
+ static void ReleaseLock(AccountingInfo *pAI) {
+ WRAPPER_NO_CONTRACT;
+ pAI->Unlock();
+ }
+ typedef Holder<AccountingInfo *, AccountingInfo::AcquireLock, AccountingInfo::ReleaseLock> AccountingInfoLockHolder;
+
+private:
+ HRESULT Lock(); // Machine wide Lock
+ void Unlock(); // Unlock the store
+
+ HRESULT Map(); // Maps the store file into memory
+ void Unmap(); // Unmaps the store file from memory
+ void Close(); // Close the store file, and file mapping
+
+ WCHAR *m_wszFileName; // The file name
+ HANDLE m_hFile; // File handle for the file
+ HANDLE m_hMapping; // File mapping for the memory mapped file
+
+ // members used for synchronization
+ WCHAR *m_wszName; // The name of the mutex object
+ HANDLE m_hLock; // Handle to the Mutex object
+
+#ifdef _DEBUG
+ ULONG m_dwNumLocks; // The number of locks owned by this object
+#endif
+
+ union {
+ PBYTE m_pData; // The start of file stream
+ ISS_RECORD *m_pISSRecord;
+ };
+};
+class COMIsolatedStorageFile
+{
+public:
+ static
+ void QCALLTYPE GetRootDir(DWORD dwFlags,
+ QCall::StringHandleOnStack retRootDir);
+
+ static
+ UINT64 QCALLTYPE GetUsage(__in_opt AccountingInfo * pAI);
+
+ static
+ void QCALLTYPE Reserve(__in_opt AccountingInfo * pAI,
+ UINT64 qwQuota,
+ UINT64 qwReserve,
+ BOOL fFree);
+
+ static
+ BOOL QCALLTYPE GetQuota(__in_opt AccountingInfo * pAI,
+ __out INT64 * qwQuota);
+
+ static
+ void QCALLTYPE SetQuota(__in_opt AccountingInfo * pAI,
+ INT64 qwQuota);
+
+ static
+ AccountingInfo * QCALLTYPE Open(LPCWSTR wszFileName,
+ LPCWSTR wszSyncName);
+
+ static
+ void QCALLTYPE Close(__in_opt AccountingInfo * pAI);
+
+ static
+ BOOL QCALLTYPE Lock(__in AccountingInfo * handle,
+ BOOL fLock);
+
+#ifndef FEATURE_ISOSTORE_LIGHT
+ // create the machine store root directory and apply the correct DACL
+ static
+ void QCALLTYPE CreateDirectoryWithDacl(LPCWSTR wszPath);
+#endif // !FEATURE_ISOSTORE_LIGHT
+
+private:
+
+ static void GetRootDirInternal(DWORD dwFlags, __in_ecount(cPath) WCHAR *path, DWORD cPath);
+ static void CreateDirectoryIfNotPresent(__in_z const WCHAR *path, LPSECURITY_ATTRIBUTES lpSecurityAttributes = NULL);
+#ifndef FEATURE_ISOSTORE_LIGHT
+ static BOOL GetMachineStoreDirectory(__out_ecount(cchMachineStorageRoot) WCHAR *path, DWORD cPath);
+ static HRESULT GetMachineStoreDacl(PACL *ppAcl);
+#endif // !FEATURE_ISOSTORE_LIGHT
+};
+#endif // FEATURE_ISOSTORE
+
+#endif // __COMISOLATEDSTORAGE_h__
+
diff --git a/src/vm/commemoryfailpoint.cpp b/src/vm/commemoryfailpoint.cpp
new file mode 100644
index 0000000000..ac86cf475a
--- /dev/null
+++ b/src/vm/commemoryfailpoint.cpp
@@ -0,0 +1,44 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Class: COMMemoryFailPoint
+**
+**
+** Purpose: Native methods for System.Runtime.MemoryFailPoint.
+** These are to implement memory gates to limit allocations
+** when progress will likely result in an OOM.
+**
+===========================================================*/
+#include "common.h"
+
+#include "frames.h"
+#include "commemoryfailpoint.h"
+
+// Need to know the maximum segment size for both the normal GC heap and the
+// large object heap, as well as the top user-accessible address within the
+// address space (ie, theoretically 2^31 - 1 on a 32 bit machine, but a tad
+// lower in practice). This will help out with 32 bit machines running in
+// 3 GB mode.
+FCIMPL2(void, COMMemoryFailPoint::GetMemorySettings, UINT64* pMaxGCSegmentSize, UINT64* pTopOfMemory)
+{
+ FCALL_CONTRACT;
+
+ GCHeap * pGC = GCHeap::GetGCHeap();
+ size_t segment_size = pGC->GetValidSegmentSize(FALSE);
+ size_t large_segment_size = pGC->GetValidSegmentSize(TRUE);
+ _ASSERTE(segment_size < SIZE_T_MAX && large_segment_size < SIZE_T_MAX);
+ if (segment_size > large_segment_size)
+ *pMaxGCSegmentSize = (UINT64) segment_size;
+ else
+ *pMaxGCSegmentSize = (UINT64) large_segment_size;
+
+ // GetTopMemoryAddress returns a void*, which can't be cast
+ // directly to a UINT64 without causing an error from GCC.
+ void * topOfMem = GetTopMemoryAddress();
+ *pTopOfMemory = (UINT64) (size_t) topOfMem;
+}
+FCIMPLEND
diff --git a/src/vm/commemoryfailpoint.h b/src/vm/commemoryfailpoint.h
new file mode 100644
index 0000000000..c492293365
--- /dev/null
+++ b/src/vm/commemoryfailpoint.h
@@ -0,0 +1,29 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Class: COMMemoryFailPoint
+**
+**
+** Purpose: Native methods for System.Runtime.MemoryFailPoint.
+** These are to implement memory gates to limit allocations
+** when progress will likely result in an OOM.
+**
+**
+===========================================================*/
+
+#ifndef _COMMEMORYFAILPOINT_H
+#define _COMMEMORYFAILPOINT_H
+
+#include "fcall.h"
+
+class COMMemoryFailPoint
+{
+public:
+ static FCDECL2(void, GetMemorySettings, UINT64* pMaxGCSegmentSize, UINT64* pTopOfMemory);
+};
+
+#endif // _COMMEMORYFAILPOINT_H
diff --git a/src/vm/commethodrental.cpp b/src/vm/commethodrental.cpp
new file mode 100644
index 0000000000..8276849e5c
--- /dev/null
+++ b/src/vm/commethodrental.cpp
@@ -0,0 +1,121 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+////////////////////////////////////////////////////////////////////////////////
+
+
+
+#include "common.h"
+#include "commethodrental.h"
+#include "corerror.h"
+
+#ifdef FEATURE_METHOD_RENTAL
+// SwapMethodBody
+// This method will take the rgMethod as the new function body for a given method.
+//
+
+void QCALLTYPE COMMethodRental::SwapMethodBody(EnregisteredTypeHandle cls, INT32 tkMethod, LPVOID rgMethod, INT32 iSize, INT32 flags, QCall::StackCrawlMarkHandle stackMark)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ BYTE *pNewCode = NULL;
+ MethodDesc *pMethodDesc;
+ ReflectionModule *module;
+ ICeeGen* pGen;
+ ULONG methodRVA;
+ HRESULT hr;
+
+ if ( cls == NULL)
+ {
+ COMPlusThrowArgumentNull(W("cls"));
+ }
+
+ MethodTable *pMethodTable = TypeHandle::FromPtr(cls).GetMethodTable();
+ PREFIX_ASSUME(pMethodTable != NULL);
+ module = (ReflectionModule *) pMethodTable->GetModule();
+ pGen = module->GetCeeGen();
+
+ Assembly* caller = SystemDomain::GetCallersAssembly( stackMark );
+
+ _ASSERTE( caller != NULL && "Unable to get calling assembly" );
+ _ASSERTE( module->GetCreatingAssembly() != NULL && "ReflectionModule must have a creating assembly to be used with method rental" );
+
+ if (module->GetCreatingAssembly() != caller)
+ {
+ COMPlusThrow(kSecurityException);
+ }
+
+ // Find the methoddesc given the method token
+ pMethodDesc = MemberLoader::FindMethod(pMethodTable, tkMethod);
+ if (pMethodDesc == NULL)
+ {
+ COMPlusThrowArgumentException(W("methodtoken"), NULL);
+ }
+ if (pMethodDesc->GetMethodTable() != pMethodTable || pMethodDesc->GetNumGenericClassArgs() != 0 || pMethodDesc->GetNumGenericMethodArgs() != 0)
+ {
+ COMPlusThrowArgumentException(W("methodtoken"), W("Argument_TypeDoesNotContainMethod"));
+ }
+ hr = pGen->AllocateMethodBuffer(iSize, &pNewCode, &methodRVA);
+ if (FAILED(hr))
+ COMPlusThrowHR(hr);
+
+ if (pNewCode == NULL)
+ {
+ COMPlusThrowOM();
+ }
+
+ // <TODO>
+ // if method desc is pointing to the post-jitted native code block,
+ // we want to recycle this code block
+
+ // @todo: SEH handling. Will we need to support a method that can throw exception
+ // If not, add an assertion to make sure that there is no SEH contains in the method header.
+
+ // @todo: figure out a way not to copy the code block.
+
+ // @todo: add link time security check. This function can be executed only if fully trusted.</TODO>
+
+ // copy the new function body to the buffer
+ memcpy(pNewCode, (void *) rgMethod, iSize);
+
+ // add the starting address of the il blob to the il blob hash table
+ // we need to find this information from out of process for debugger inspection
+ // APIs so we have to store this information where we can get it later
+ module->SetDynamicIL(mdToken(tkMethod), TADDR(pNewCode), FALSE);
+
+ // Reset the methoddesc back to unjited state
+ pMethodDesc->Reset();
+
+ if (flags)
+ {
+ // JITImmediate
+#if _DEBUG
+ COR_ILMETHOD* ilHeader = pMethodDesc->GetILHeader(TRUE);
+ _ASSERTE(((BYTE *)ilHeader) == pNewCode);
+#endif
+ COR_ILMETHOD_DECODER header((COR_ILMETHOD *)pNewCode, pMethodDesc->GetMDImport(), NULL);
+
+ // minimum validation on the correctness of method header
+ if (header.GetCode() == NULL)
+ COMPlusThrowHR(VLDTR_E_MD_BADHEADER);
+
+#ifdef FEATURE_INTERPRETER
+ pMethodDesc->MakeJitWorker(&header, CORJIT_FLG_MAKEFINALCODE, 0);
+#else // !FEATURE_INTERPRETER
+ pMethodDesc->MakeJitWorker(&header, 0, 0);
+#endif // !FEATURE_INTERPRETER
+ }
+
+ // add feature::
+ // If SQL is generating class with inheritance hierarchy, we may need to
+ // check the whole vtable to find duplicate entries.
+
+ END_QCALL;
+
+} // COMMethodRental::SwapMethodBody
+
+
+#endif // FEATURE_METHOD_RENTAL
diff --git a/src/vm/commethodrental.h b/src/vm/commethodrental.h
new file mode 100644
index 0000000000..09a31650cd
--- /dev/null
+++ b/src/vm/commethodrental.h
@@ -0,0 +1,30 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+////////////////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef _COMMETHODRENTAL_H_
+#define _COMMETHODRENTAL_H_
+
+#include "excep.h"
+#include "fcall.h"
+
+#ifdef FEATURE_METHOD_RENTAL
+// COMMethodRental
+// This class implements SwapMethodBody for our MethodRenting story
+class COMMethodRental
+{
+public:
+
+ // COMMethodRental.SwapMethodBody -- this function will swap an existing method body with
+ // a new method body
+ //
+ static
+ void QCALLTYPE SwapMethodBody(EnregisteredTypeHandle cls, INT32 tkMethod, LPVOID rgMethod, INT32 iSize, INT32 flags, QCall::StackCrawlMarkHandle stackMark);
+};
+#endif // FEATURE_METHOD_RENTAL
+
+#endif //_COMMETHODRENTAL_H_
diff --git a/src/vm/commodule.cpp b/src/vm/commodule.cpp
new file mode 100644
index 0000000000..59c433e5c4
--- /dev/null
+++ b/src/vm/commodule.cpp
@@ -0,0 +1,1325 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "common.h"
+#include "commodule.h"
+#include "comdynamic.h"
+#include "reflectclasswriter.h"
+#include "class.h"
+#include "corpolicy.h"
+#include "security.h"
+#include "ceesectionstring.h"
+#include <cor.h>
+#include "typeparse.h"
+#include "typekey.h"
+#include "ildbsymlib.h"
+
+
+//===============================================================================================
+// CreateISymWriterforDynamicModule:
+// Helper to create a ISymUnmanagedWriter instance and hook it up to a newly created dynamic
+// module. This object is used to capture debugging information (source line info, etc.)
+// for the dynamic module. This function determines the appropriate symbol format type
+// (ILDB or PDB), and in the case of PDB (Windows desktop only) loads diasymreader.dll.
+//
+// Arguments:
+// mod - The ReflectionModule for the new dynamic module
+// filenameTemp - the filename at which the module may be saved (ignored if no save access)
+//
+// Return value:
+// The address where the new writer instance has been stored
+//===============================================================================================
+static ISymUnmanagedWriter **CreateISymWriterForDynamicModule(ReflectionModule *mod, const WCHAR *wszFilename)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(mod->IsReflection());
+
+ // Determine which symbol format to use. For Silverlight 2.0 RTM we use ILDB mode to address security
+ // and portability issues with diasymreader.
+ //
+ // For desktop builds we'll eventually want to make ILDB is the default, but we need to emit PDB format if
+ // the symbols can be saved to disk to preserve back compat.
+ //
+ ESymbolFormat symFormatToUse = eSymbolFormatILDB;
+
+#ifndef FEATURE_CORECLR // On desktop only we still use PDB format if the symbols are savable to disk
+ if(mod->GetAssembly()->HasSaveAccess())
+ {
+ symFormatToUse = eSymbolFormatPDB;
+ }
+#endif
+
+ static ConfigDWORD dbgForcePDBSymbols;
+ if(dbgForcePDBSymbols.val_DontUse_(W("DbgForcePDBSymbols"), 0) == 1)
+ {
+ symFormatToUse = eSymbolFormatPDB;
+ }
+
+ // Create a stream for the symbols to be emitted into. This
+ // lives on the Module for the life of the Module.
+ SafeComHolder<CGrowableStream> pStream(new CGrowableStream());
+
+ mod->SetInMemorySymbolStream(pStream, symFormatToUse);
+
+ // Create an ISymUnmanagedWriter and initialize it with the
+ // stream and the proper file name. This symbol writer will be
+ // replaced with new ones periodically as the symbols get
+ // retrieved by the debugger.
+ SafeComHolder<ISymUnmanagedWriter> pWriter;
+
+ HRESULT hr;
+ if (symFormatToUse == eSymbolFormatILDB)
+ {
+ // Create an ILDB symbol writer from the ildbsymbols library statically linked in
+ hr = IldbSymbolsCreateInstance(CLSID_CorSymWriter_SxS,
+ IID_ISymUnmanagedWriter,
+ (void**)&pWriter);
+ }
+ else
+ {
+ _ASSERTE(symFormatToUse == eSymbolFormatPDB);
+ hr = FakeCoCreateInstanceEx(CLSID_CorSymWriter_SxS,
+ GetInternalSystemDirectory(),
+ IID_ISymUnmanagedWriter,
+ (void**)&pWriter,
+ NULL);
+ }
+
+ if (SUCCEEDED(hr))
+ {
+ {
+ GCX_PREEMP();
+
+ // The other reference is given to the Sym Writer
+ // But, the writer takes it's own reference.
+ hr = pWriter->Initialize(mod->GetEmitter(),
+ wszFilename,
+ (IStream*)pStream,
+ TRUE);
+ }
+ if (SUCCEEDED(hr))
+ {
+ mod->GetReflectionModule()->SetISymUnmanagedWriter(pWriter.Extract());
+
+ // Return the address of where we've got our
+ // ISymUnmanagedWriter stored so we can pass it over
+ // to the managed symbol writer object that most of
+ // reflection emit will use to write symbols.
+ return mod->GetISymUnmanagedWriterAddr();
+ }
+ else
+ {
+ COMPlusThrowHR(hr);
+ }
+ }
+ else
+ {
+ COMPlusThrowHR(hr);
+ }
+}
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+//****************************************
+// This function creates a dynamic module underneath the current assembly.
+//****************************************
+void QCALLTYPE COMModule::DefineDynamicModule(QCall::AssemblyHandle pContainingAssembly, BOOL emitSymbolInfo, LPCWSTR pModuleName, LPCWSTR pFilename, QCall::StackCrawlMarkHandle stackMark, LPVOID* ppInternalSymWriter, QCall::ObjectHandleOnStack retModule, BOOL fIsTransient, INT32* ptkFile)
+{
+ QCALL_CONTRACT;
+
+ ReflectionModule * mod = NULL;
+
+ BEGIN_QCALL;
+
+ Assembly * pAssembly = pContainingAssembly->GetAssembly();
+ _ASSERTE(pAssembly);
+
+ // always create a dynamic module. Note that the name conflict
+ // checking is done in managed side.
+
+ mod = pAssembly->CreateDynamicModule(pModuleName, pFilename, fIsTransient, ptkFile);
+
+ mod->SetCreatingAssembly( SystemDomain::GetCallersAssembly( stackMark ) );
+
+ // If we need to emit symbol info, we setup the proper symbol
+ // writer for this module now.
+ if (emitSymbolInfo)
+ {
+ ISymUnmanagedWriter **pWriter = CreateISymWriterForDynamicModule(mod, pFilename);
+ if (ppInternalSymWriter)
+ {
+ *ppInternalSymWriter = pWriter;
+ }
+ }
+
+ GCX_COOP();
+ retModule.Set(mod->GetExposedObject());
+ END_QCALL;
+
+ return;
+}
+#endif //FEATURE_MULTIMODULE_ASSEMBLIES
+//===============================================================================================
+// Attaches an unmanaged symwriter to a newly created dynamic module.
+//===============================================================================================
+FCIMPL2(LPVOID, COMModule::nCreateISymWriterForDynamicModule, ReflectModuleBaseObject* reflectionModuleUNSAFE, StringObject* filenameUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ REFLECTMODULEBASEREF refModule = (REFLECTMODULEBASEREF)ObjectToOBJECTREF(reflectionModuleUNSAFE);
+
+ ReflectionModule *mod = (ReflectionModule*)refModule->GetModule();
+ STRINGREF filename = (STRINGREF)filenameUNSAFE;
+
+ LPVOID pInternalSymWriter = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_2(filename, refModule);
+
+ SString name;
+ if (filename != NULL)
+ {
+ filename->GetSString(name);
+ }
+
+ GCX_PREEMP();
+ pInternalSymWriter = CreateISymWriterForDynamicModule(mod, name.GetUnicode());
+
+ HELPER_METHOD_FRAME_END();
+
+ return pInternalSymWriter;
+
+} // COMModule::nCreateISymWriterForDynamicModule
+FCIMPLEND
+
+//**************************************************
+// LoadInMemoryTypeByName
+// Explicitly loading an in memory type
+// <TODO>@todo: this function is not dealing with nested type correctly yet.
+// We will need to parse the full name by finding "+" for enclosing type, etc.</TODO>
+//**************************************************
+void QCALLTYPE COMModule::LoadInMemoryTypeByName(QCall::ModuleHandle pModule, LPCWSTR wszFullName)
+{
+ QCALL_CONTRACT;
+
+ TypeHandle typeHnd;
+
+ BEGIN_QCALL;
+
+ if (!pModule->IsReflection())
+ COMPlusThrow(kNotSupportedException, W("NotSupported_NonReflectedType"));
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ // it is ok to use public import API because this is a dynamic module anyway. We are also receiving Unicode full name as
+ // parameter.
+ IMetaDataImport * pImport = pRCW->GetRWImporter();
+
+ if (wszFullName == NULL)
+ IfFailThrow( E_FAIL );
+
+ // look up the handle
+ mdTypeDef td;
+ HRESULT hr = pImport->FindTypeDefByName(wszFullName, mdTokenNil, &td);
+ if (FAILED(hr))
+ {
+ if (hr != CLDB_E_RECORD_NOTFOUND)
+ COMPlusThrowHR(hr);
+
+ // Get the UTF8 version of strFullName
+ MAKE_UTF8PTR_FROMWIDE(szFullName, wszFullName);
+ pModule->GetAssembly()->ThrowTypeLoadException(szFullName, IDS_CLASSLOAD_GENERAL);
+ }
+
+ TypeKey typeKey(pModule, td);
+ typeHnd = pModule->GetClassLoader()->LoadTypeHandleForTypeKey(&typeKey, TypeHandle());
+
+ END_QCALL;
+
+ return;
+}
+
+//**************************************************
+// GetTypeRef
+// This function will return the type token given full qual name. If the type
+// is defined locally, we will return the TypeDef token. Or we will return a TypeRef token
+// with proper resolution scope calculated.
+// wszFullName is escaped (TYPE_NAME_RESERVED_CHAR). It should not be byref or contain enclosing type name,
+// assembly name, and generic argument list.
+//**************************************************
+mdTypeRef QCALLTYPE COMModule::GetTypeRef(QCall::ModuleHandle pModule,
+ LPCWSTR wszFullName,
+ QCall::ModuleHandle pRefedModule,
+ LPCWSTR wszRefedModuleFileName,
+ INT32 tkResolutionArg)
+{
+ QCALL_CONTRACT;
+
+ mdTypeRef tr = 0;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ IMetaDataEmit * pEmit = pRCW->GetEmitter();
+ IMetaDataImport * pImport = pRCW->GetRWImporter();
+
+ if (wszFullName == NULL) {
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_String"));
+ }
+
+ InlineSString<128> ssNameUnescaped;
+ LPCWSTR wszTemp = wszFullName;
+
+ WCHAR c;
+ while(0 != (c = *wszTemp++))
+ {
+ if ( c == W('\\') &&
+ IsTypeNameReservedChar(*wszTemp) )
+ {
+ ssNameUnescaped.Append(*wszTemp++);
+ }
+ else
+ {
+ _ASSERTE( ! IsTypeNameReservedChar(c) );
+ ssNameUnescaped.Append(c);
+ }
+ }
+
+ LPCWSTR wszFullNameUnescaped = ssNameUnescaped.GetUnicode();
+
+ Assembly * pThisAssembly = pModule->GetClassLoader()->GetAssembly();
+ Assembly * pRefedAssembly = pRefedModule->GetClassLoader()->GetAssembly();
+
+ if (pModule == pRefedModule)
+ {
+ // referenced type is from the same module so we must be able to find a TypeDef.
+ IfFailThrow(pImport->FindTypeDefByName(
+ wszFullNameUnescaped,
+ RidFromToken(tkResolutionArg) ? tkResolutionArg : mdTypeDefNil,
+ &tr));
+ }
+ else
+ {
+ mdToken tkResolution = mdTokenNil;
+ if (RidFromToken(tkResolutionArg))
+ {
+ // reference to nested type
+ tkResolution = tkResolutionArg;
+ }
+ else
+ {
+ // reference to top level type
+ if ( pThisAssembly != pRefedAssembly )
+ {
+ SafeComHolderPreemp<IMetaDataAssemblyEmit> pAssemblyEmit;
+
+ // Generate AssemblyRef
+ IfFailThrow( pEmit->QueryInterface(IID_IMetaDataAssemblyEmit, (void **) &pAssemblyEmit) );
+ tkResolution = pThisAssembly->AddAssemblyRef(pRefedAssembly, pAssemblyEmit);
+
+ // Add the assembly ref token and the manifest module it is referring to this module's rid map.
+ // This is needed regardless of whether the dynamic assembly has run access. Even in Save-only
+ // or Refleciton-only mode, CreateType() of the referencing type may still need the referenced
+ // type to be resolved and loaded, e.g. if the referencing type is a subclass of the referenced type.
+ //
+ // Don't cache if there is assembly associated with the token already. The assembly ref resolution
+ // can be ambiguous because of reflection emit does not require unique assembly names.
+ // We always let the first association win. Ideally, we would disallow this situation by throwing
+ // exception, but that would be a breaking change.
+ if(pModule->LookupAssemblyRef(tkResolution) == NULL)
+ {
+ pModule->ForceStoreAssemblyRef(tkResolution, pRefedAssembly);
+ }
+ }
+ else
+ {
+ _ASSERTE(pModule != pRefedModule);
+ _ASSERTE(wszRefedModuleFileName != NULL);
+
+ // Generate ModuleRef
+ IfFailThrow(pEmit->DefineModuleRef(wszRefedModuleFileName, &tkResolution));
+ }
+ }
+
+ IfFailThrow( pEmit->DefineTypeRefByName(tkResolution, wszFullNameUnescaped, &tr) );
+ }
+
+ END_QCALL;
+
+ return tr;
+}
+
+
+/*=============================GetArrayMethodToken==============================
+**Action:
+**Returns:
+**Arguments: REFLECTMODULEBASEREF refThis
+** U1ARRAYREF sig
+** STRINGREF methodName
+** int tkTypeSpec
+**Exceptions:
+==============================================================================*/
+INT32 QCALLTYPE COMModule::GetArrayMethodToken(QCall::ModuleHandle pModule,
+ INT32 tkTypeSpec,
+ LPCWSTR wszMethodName,
+ LPCBYTE pSignature,
+ INT32 sigLength)
+{
+ QCALL_CONTRACT;
+
+ mdMemberRef memberRefE = mdTokenNil;
+
+ BEGIN_QCALL;
+
+ if (!wszMethodName)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_String"));
+ if (!tkTypeSpec)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_Type"));
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ HRESULT hr = pRCW->GetEmitter()->DefineMemberRef(tkTypeSpec, wszMethodName, (PCCOR_SIGNATURE)pSignature, sigLength, &memberRefE);
+ if (FAILED(hr))
+ {
+ _ASSERTE(!"Failed on DefineMemberRef");
+ COMPlusThrowHR(hr);
+ }
+
+ END_QCALL;
+
+ return (INT32)memberRefE;
+}
+
+
+//******************************************************************************
+//
+// GetMemberRefToken
+// This function will return a MemberRef token given a MethodDef token and the module where the MethodDef/FieldDef is defined.
+//
+//******************************************************************************
+INT32 QCALLTYPE COMModule::GetMemberRef(QCall::ModuleHandle pModule, QCall::ModuleHandle pRefedModule, INT32 tr, INT32 token)
+{
+ QCALL_CONTRACT;
+
+ mdMemberRef memberRefE = 0;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE( pRCW );
+
+ LPCUTF8 szName;
+ ULONG cbComSig;
+ PCCOR_SIGNATURE pvComSig;
+
+ if (TypeFromToken(token) == mdtMethodDef)
+ {
+ IfFailThrow(pRefedModule->GetMDImport()->GetNameOfMethodDef(token, &szName));
+ IfFailThrow(pRefedModule->GetMDImport()->GetSigOfMethodDef(token, &cbComSig, &pvComSig));
+ }
+ else
+ {
+ IfFailThrow(pRefedModule->GetMDImport()->GetNameOfFieldDef(token, &szName));
+ IfFailThrow(pRefedModule->GetMDImport()->GetSigOfFieldDef(token, &cbComSig, &pvComSig));
+ }
+
+ MAKE_WIDEPTR_FROMUTF8(wzName, szName);
+
+ // Translate the method sig into this scope
+ //
+ Assembly * pRefedAssembly = pRefedModule->GetAssembly();
+ Assembly * pRefingAssembly = pModule->GetAssembly();
+
+ if (pRefedAssembly->IsCollectible() && pRefedAssembly != pRefingAssembly)
+ {
+ if (pRefingAssembly->IsCollectible())
+ pRefingAssembly->GetLoaderAllocator()->EnsureReference(pRefedAssembly->GetLoaderAllocator());
+ else
+ COMPlusThrow(kNotSupportedException, W("NotSupported_CollectibleBoundNonCollectible"));
+ }
+
+ SafeComHolderPreemp<IMetaDataAssemblyEmit> pAssemblyEmit;
+ IfFailThrow( pRefingAssembly->GetManifestModule()->GetEmitter()->QueryInterface(IID_IMetaDataAssemblyEmit, (void **) &pAssemblyEmit) );
+
+ CQuickBytes qbNewSig;
+ ULONG cbNewSig;
+
+ IfFailThrow( pRefedModule->GetMDImport()->TranslateSigWithScope(
+ pRefedAssembly->GetManifestImport(),
+ NULL, 0, // hash value
+ pvComSig,
+ cbComSig,
+ pAssemblyEmit, // Emit assembly scope.
+ pRCW->GetEmitter(),
+ &qbNewSig,
+ &cbNewSig) );
+
+ mdTypeRef tref;
+
+ if (TypeFromToken(tr) == mdtTypeDef)
+ {
+ // define a TypeRef using the TypeDef
+ DefineTypeRefHelper(pRCW->GetEmitter(), tr, &tref);
+ }
+ else
+ tref = tr;
+
+ // Define the memberRef
+ IfFailThrow( pRCW->GetEmitter()->DefineMemberRef(tref, wzName, (PCCOR_SIGNATURE) qbNewSig.Ptr(), cbNewSig, &memberRefE) );
+
+ END_QCALL;
+
+ // assign output parameter
+ return (INT32)memberRefE;
+}
+
+
+//******************************************************************************
+//
+// Return a TypeRef token given a TypeDef token from the same emit scope
+//
+//******************************************************************************
+void COMModule::DefineTypeRefHelper(
+ IMetaDataEmit *pEmit, // given emit scope
+ mdTypeDef td, // given typedef in the emit scope
+ mdTypeRef *ptr) // return typeref
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(CheckPointer(pEmit));
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ CQuickBytes qb;
+ WCHAR* szTypeDef = (WCHAR*) qb.AllocThrows((MAX_CLASSNAME_LENGTH+1) * sizeof(WCHAR));
+ mdToken rs; // resolution scope
+ DWORD dwFlags;
+
+ SafeComHolder<IMetaDataImport> pImport;
+ IfFailThrow( pEmit->QueryInterface(IID_IMetaDataImport, (void **)&pImport) );
+ IfFailThrow( pImport->GetTypeDefProps(td, szTypeDef, MAX_CLASSNAME_LENGTH, NULL, &dwFlags, NULL) );
+ if ( IsTdNested(dwFlags) )
+ {
+ mdToken tdNested;
+ IfFailThrow( pImport->GetNestedClassProps(td, &tdNested) );
+ DefineTypeRefHelper( pEmit, tdNested, &rs);
+ }
+ else
+ rs = TokenFromRid( 1, mdtModule );
+
+ IfFailThrow( pEmit->DefineTypeRefByName( rs, szTypeDef, ptr) );
+} // DefineTypeRefHelper
+
+
+//******************************************************************************
+//
+// Return a MemberRef token given a RuntimeMethodInfo
+//
+//******************************************************************************
+INT32 QCALLTYPE COMModule::GetMemberRefOfMethodInfo(QCall::ModuleHandle pModule, INT32 tr, MethodDesc * pMeth)
+{
+ QCALL_CONTRACT;
+
+ mdMemberRef memberRefE = 0;
+
+ BEGIN_QCALL;
+
+ if (!pMeth)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_Obj"));
+
+ // Otherwise, we want to return memberref token.
+ if (pMeth->IsArray())
+ {
+ _ASSERTE(!"Should not have come here!");
+ COMPlusThrow(kNotSupportedException);
+ }
+
+ if (pMeth->GetMethodTable()->GetModule() == pModule)
+ {
+ // If the passed in method is defined in the same module, just return the MethodDef token
+ memberRefE = pMeth->GetMemberDef();
+ }
+ else
+ {
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ LPCUTF8 szName;
+ IfFailThrow(pMeth->GetMDImport()->GetNameOfMethodDef(pMeth->GetMemberDef(), &szName));
+
+ ULONG cbComSig;
+ PCCOR_SIGNATURE pvComSig;
+ IfFailThrow(pMeth->GetMDImport()->GetSigOfMethodDef(pMeth->GetMemberDef(), &cbComSig, &pvComSig));
+
+ // Translate the method sig into this scope
+ Assembly * pRefedAssembly = pMeth->GetModule()->GetAssembly();
+ Assembly * pRefingAssembly = pModule->GetAssembly();
+
+ SafeComHolderPreemp<IMetaDataAssemblyEmit> pAssemblyEmit;
+ IfFailThrow( pRefingAssembly->GetManifestModule()->GetEmitter()->QueryInterface(IID_IMetaDataAssemblyEmit, (void **) &pAssemblyEmit) );
+
+ CQuickBytes qbNewSig;
+ ULONG cbNewSig;
+
+ if (pRefedAssembly->IsCollectible() && pRefedAssembly != pRefingAssembly)
+ {
+ if (pRefingAssembly->IsCollectible())
+ pRefingAssembly->GetLoaderAllocator()->EnsureReference(pRefedAssembly->GetLoaderAllocator());
+ else
+ COMPlusThrow(kNotSupportedException, W("NotSupported_CollectibleBoundNonCollectible"));
+ }
+
+ IfFailThrow( pMeth->GetMDImport()->TranslateSigWithScope(
+ pRefedAssembly->GetManifestImport(),
+ NULL, 0, // hash blob value
+ pvComSig,
+ cbComSig,
+ pAssemblyEmit, // Emit assembly scope.
+ pRCW->GetEmitter(),
+ &qbNewSig,
+ &cbNewSig) );
+
+ // translate the name to unicode string
+ MAKE_WIDEPTR_FROMUTF8(wszName, szName);
+
+ // Define the memberRef
+ IfFailThrow( pRCW->GetEmitter()->DefineMemberRef(tr, wszName, (PCCOR_SIGNATURE) qbNewSig.Ptr(), cbNewSig, &memberRefE) );
+ }
+
+ END_QCALL;
+
+ return memberRefE;
+}
+
+
+//******************************************************************************
+//
+// Return a MemberRef token given a RuntimeFieldInfo
+//
+//******************************************************************************
+mdMemberRef QCALLTYPE COMModule::GetMemberRefOfFieldInfo(QCall::ModuleHandle pModule, mdTypeDef tr, void * th, mdFieldDef tkField)
+{
+ QCALL_CONTRACT;
+
+ mdMemberRef memberRefE = 0;
+
+ BEGIN_QCALL;
+
+ if (TypeFromToken(tr) == mdtTypeDef)
+ {
+ // If the passed in method is defined in the same module, just return the FieldDef token
+ memberRefE = tkField;
+ }
+ else
+ {
+ TypeHandle typeHandle = TypeHandle::FromPtr(th);
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ // get the field name and sig
+ Module * pRefedModule = typeHandle.GetModule();
+ IMDInternalImport * pRefedMDImport = pRefedModule->GetMDImport();
+
+ LPCUTF8 szName;
+ IfFailThrow(pRefedMDImport->GetNameOfFieldDef(tkField, &szName));
+
+ ULONG cbComSig;
+ PCCOR_SIGNATURE pvComSig;
+ IfFailThrow(pRefedMDImport->GetSigOfFieldDef(tkField, &cbComSig, &pvComSig));
+
+ // translate the name to unicode string
+ MAKE_WIDEPTR_FROMUTF8(wszName, szName);
+
+ Assembly * pRefedAssembly = pRefedModule->GetAssembly();
+ Assembly * pRefingAssembly = pModule->GetAssembly();
+
+ if (pRefedAssembly->IsCollectible() && pRefedAssembly != pRefingAssembly)
+ {
+ if (pRefingAssembly->IsCollectible())
+ pRefingAssembly->GetLoaderAllocator()->EnsureReference(pRefedAssembly->GetLoaderAllocator());
+ else
+ COMPlusThrow(kNotSupportedException, W("NotSupported_CollectibleBoundNonCollectible"));
+ }
+ SafeComHolderPreemp<IMetaDataAssemblyEmit> pAssemblyEmit;
+ IfFailThrow( pRefingAssembly->GetManifestModule()->GetEmitter()->QueryInterface(IID_IMetaDataAssemblyEmit, (void **) &pAssemblyEmit) );
+
+ // Translate the field signature this scope
+ CQuickBytes qbNewSig;
+ ULONG cbNewSig;
+
+ IfFailThrow( pRefedMDImport->TranslateSigWithScope(
+ pRefedAssembly->GetManifestImport(),
+ NULL, 0, // hash value
+ pvComSig,
+ cbComSig,
+ pAssemblyEmit, // Emit assembly scope.
+ pRCW->GetEmitter(),
+ &qbNewSig,
+ &cbNewSig) );
+
+ IfFailThrow( pRCW->GetEmitter()->DefineMemberRef(tr, wszName, (PCCOR_SIGNATURE) qbNewSig.Ptr(), cbNewSig, &memberRefE) );
+ }
+
+ END_QCALL;
+
+ return memberRefE;
+}
+
+//******************************************************************************
+//
+// Return a MemberRef token given a Signature
+//
+//******************************************************************************
+INT32 QCALLTYPE COMModule::GetMemberRefFromSignature(QCall::ModuleHandle pModule,
+ INT32 tr,
+ LPCWSTR wszMemberName,
+ LPCBYTE pSignature,
+ INT32 sigLength)
+{
+ QCALL_CONTRACT;
+
+ mdMemberRef memberRefE = mdTokenNil;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ IfFailThrow( pRCW->GetEmitter()->DefineMemberRef(tr,
+ wszMemberName,
+ pSignature,
+ sigLength,
+ &memberRefE) );
+
+ END_QCALL;
+
+ return memberRefE;
+}
+
+//******************************************************************************
+//
+// SetFieldRVAContent
+// This function is used to set the FieldRVA with the content data
+//
+//******************************************************************************
+void QCALLTYPE COMModule::SetFieldRVAContent(QCall::ModuleHandle pModule, INT32 tkField, LPCBYTE pContent, INT32 length)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ ICeeGen * pGen = pRCW->GetCeeGen();
+
+ ReflectionModule * pReflectionModule = pModule->GetReflectionModule();
+
+ // Create the .sdata section if not created
+ if (pReflectionModule->m_sdataSection == 0)
+ IfFailThrow( pGen->GetSectionCreate (".sdata", sdReadWrite, &pReflectionModule->m_sdataSection) );
+
+ // Get the size of current .sdata section. This will be the RVA for this field within the section
+ DWORD dwRVA = 0;
+ IfFailThrow( pGen->GetSectionDataLen(pReflectionModule->m_sdataSection, &dwRVA) );
+ dwRVA = (dwRVA + sizeof(DWORD)-1) & ~(sizeof(DWORD)-1);
+
+ // allocate the space in .sdata section
+ void * pvBlob;
+ IfFailThrow( pGen->GetSectionBlock(pReflectionModule->m_sdataSection, length, sizeof(DWORD), (void**) &pvBlob) );
+
+ // copy over the initialized data if specified
+ if (pContent != NULL)
+ memcpy(pvBlob, pContent, length);
+
+ // set FieldRVA into metadata. Note that this is not final RVA in the image if save to disk. We will do another round of fix up upon save.
+ IfFailThrow( pRCW->GetEmitter()->SetFieldRVA(tkField, dwRVA) );
+
+ END_QCALL;
+}
+
+
+//******************************************************************************
+//
+// GetStringConstant
+// If this is a dynamic module, this routine will define a new
+// string constant or return the token of an existing constant.
+//
+//******************************************************************************
+mdString QCALLTYPE COMModule::GetStringConstant(QCall::ModuleHandle pModule, LPCWSTR pwzValue, INT32 iLength)
+{
+ QCALL_CONTRACT;
+
+ mdString strRef = mdTokenNil;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ _ASSERTE(pwzValue != NULL);
+
+ HRESULT hr = pRCW->GetEmitter()->DefineUserString(pwzValue, iLength, &strRef);
+ if (FAILED(hr)) {
+ _ASSERTE(hr == E_OUTOFMEMORY || !"Unknown failure in DefineUserString");
+ COMPlusThrowHR(hr);
+ }
+
+ END_QCALL;
+
+ return strRef;
+}
+
+
+/*=============================SetModuleName====================================
+// SetModuleName
+==============================================================================*/
+void QCALLTYPE COMModule::SetModuleName(QCall::ModuleHandle pModule, LPCWSTR wszModuleName)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ IfFailThrow( pRCW->GetEmitter()->SetModuleProps(wszModuleName) );
+
+ END_QCALL;
+}
+
+//******************************************************************************
+//
+// Return a type spec token given a byte array
+//
+//******************************************************************************
+BOOL QCALLTYPE COMModule::IsTransient(QCall::ModuleHandle pModule)
+{
+ QCALL_CONTRACT;
+
+ BOOL fIsTransient = FALSE;
+
+ BEGIN_QCALL;
+
+ /* Only reflection modules can be transient */
+ if (pModule->IsReflection())
+ fIsTransient = pModule->GetReflectionModule()->IsTransient();
+
+ END_QCALL;
+
+ return fIsTransient;
+}
+
+//******************************************************************************
+//
+// Return a type spec token given a byte array
+//
+//******************************************************************************
+mdTypeSpec QCALLTYPE COMModule::GetTokenFromTypeSpec(QCall::ModuleHandle pModule, LPCBYTE pSignature, INT32 sigLength)
+{
+ QCALL_CONTRACT;
+
+ mdTypeSpec ts = mdTokenNil;
+
+ BEGIN_QCALL;
+
+ RefClassWriter * pRCW = pModule->GetReflectionModule()->GetClassWriter();
+ _ASSERTE(pRCW);
+
+ IfFailThrow(pRCW->GetEmitter()->GetTokenFromTypeSpec((PCCOR_SIGNATURE)pSignature, sigLength, &ts));
+
+ END_QCALL;
+
+ return ts;
+}
+
+
+// GetType
+// Given a class name, this method will look for that class
+// with in the module.
+void QCALLTYPE COMModule::GetType(QCall::ModuleHandle pModule, LPCWSTR wszName, BOOL bThrowOnError, BOOL bIgnoreCase, QCall::ObjectHandleOnStack retType)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(wszName));
+ }
+ CONTRACTL_END;
+
+ TypeHandle retTypeHandle;
+
+ BEGIN_QCALL;
+
+ GCX_COOP();
+
+ DomainAssembly *pAssembly = pModule->GetDomainAssembly();
+ _ASSERTE(pAssembly);
+
+ OBJECTREF keepAlive = NULL;
+ GCPROTECT_BEGIN(keepAlive);
+
+ {
+ GCX_PREEMP();
+
+ BOOL prohibitAsmQualifiedName = TRUE;
+
+#ifdef FEATURE_LEGACYNETCF
+ // // NetCF type name parser allowed assembly name to be overriden here
+ if (GetAppDomain()->GetAppDomainCompatMode() == BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8)
+ prohibitAsmQualifiedName = FALSE;
+#endif
+
+ // Load the class from this assembly (fail if it is in a different one).
+ retTypeHandle = TypeName::GetTypeManaged(wszName, pAssembly, bThrowOnError, bIgnoreCase, pAssembly->IsIntrospectionOnly(), prohibitAsmQualifiedName, NULL, FALSE, &keepAlive);
+ }
+
+ // Verify that it's in 'this' module
+ // But, if it's in a different assembly than expected, that's okay, because
+ // it just means that it's been type forwarded.
+ if (!retTypeHandle.IsNull())
+ {
+ if ( (retTypeHandle.GetModule() != pModule) &&
+ (retTypeHandle.GetModule()->GetAssembly() == pModule->GetAssembly()) )
+ retTypeHandle = TypeHandle();
+ }
+
+ if (!retTypeHandle.IsNull())
+ {
+ GCX_COOP();
+ retType.Set(retTypeHandle.GetManagedClassObject());
+ }
+ GCPROTECT_END();
+
+ END_QCALL;
+
+ return;
+}
+
+
+// GetName
+// This routine will return the name of the module as a String
+void QCALLTYPE COMModule::GetScopeName(QCall::ModuleHandle pModule, QCall::StringHandleOnStack retString)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ LPCSTR szName = NULL;
+
+ if (pModule->IsResource())
+ {
+ IfFailThrow(pModule->GetAssembly()->GetManifestImport()->GetFileProps(
+ pModule->GetModuleRef(),
+ &szName,
+ NULL,
+ NULL,
+ NULL));
+ }
+ else
+ {
+ if (!pModule->GetMDImport()->IsValidToken(pModule->GetMDImport()->GetModuleFromScope()))
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ IfFailThrow(pModule->GetMDImport()->GetScopeProps(&szName, 0));
+ }
+
+ retString.Set(szName);
+
+ END_QCALL;
+}
+
+static bool StringEndsWith(LPCWSTR pwzString, LPCWSTR pwzCandidate)
+{
+ size_t stringLength = wcslen(pwzString);
+ size_t candidateLength = wcslen(pwzCandidate);
+
+ if (candidateLength > stringLength || stringLength == 0 || candidateLength == 0)
+ {
+ return false;
+ }
+
+ LPCWSTR pwzStringEnd = pwzString + stringLength - candidateLength;
+
+ return !_wcsicmp(pwzStringEnd, pwzCandidate);
+}
+
+/*============================GetFullyQualifiedName=============================
+**Action:
+**Returns:
+**Arguments:
+**Exceptions:
+==============================================================================*/
+void QCALLTYPE COMModule::GetFullyQualifiedName(QCall::ModuleHandle pModule, QCall::StringHandleOnStack retString)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ HRESULT hr = S_OK;
+
+ WCHAR wszBuffer[64];
+
+ if (pModule->IsPEFile())
+ {
+ LPCWSTR fileName = pModule->GetPath();
+ if (*fileName != 0) {
+#ifndef FEATURE_CORECLR
+ // workaround - lie about where mscorlib is. Mscorlib is now loaded out of the GAC,
+ // but some apps query its location to find the system directory. (Notably CodeDOM)
+ if (pModule->IsSystem())
+ retString.Set(SystemDomain::System()->BaseLibrary());
+ else
+#endif // !FEATURE_CORECLR
+ {
+#ifdef FEATURE_WINDOWSPHONE
+ //
+ // On Phone we use only native images without any concept of the matching IL image
+ // To stop native image filenames leaking through to apps, fudge Reflection::get_Name
+ // so apps see Foo.dll instead of Foo.ni.dll
+ //
+ if (pModule->GetFile()->GetAssembly()->GetILimage()->IsTrustedNativeImage())
+ {
+ WCHAR fileNameWithoutNi[MAX_PATH];
+
+ wcscpy_s(fileNameWithoutNi, MAX_PATH, fileName);
+
+ if (StringEndsWith(fileName, W(".ni.dll")))
+ {
+ wcscpy_s(fileNameWithoutNi + wcslen(fileNameWithoutNi) - wcslen(W(".ni.dll")), MAX_PATH, W(".dll"));
+ }
+ else if (StringEndsWith(fileName, W(".ni.exe")))
+ {
+ wcscpy_s(fileNameWithoutNi + wcslen(fileNameWithoutNi) - wcslen(W(".ni.exe")), MAX_PATH, W(".exe"));
+ }
+ else if (StringEndsWith(fileName, W(".ni.winmd")))
+ {
+ wcscpy_s(fileNameWithoutNi + wcslen(fileNameWithoutNi) - wcslen(W(".ni.winmd")), MAX_PATH, W(".winmd"));
+ }
+
+ retString.Set(fileNameWithoutNi);
+ }
+ else
+#endif
+ retString.Set(fileName);
+ }
+ } else {
+ hr = UtilLoadStringRC(IDS_EE_NAME_UNKNOWN, wszBuffer, sizeof( wszBuffer ) / sizeof( WCHAR ), true );
+ if (FAILED(hr))
+ COMPlusThrowHR(hr);
+ retString.Set(wszBuffer);
+ }
+ }
+ else
+ {
+ hr = UtilLoadStringRC(IDS_EE_NAME_INMEMORYMODULE, wszBuffer, sizeof( wszBuffer ) / sizeof( WCHAR ), true );
+ if (FAILED(hr))
+ COMPlusThrowHR(hr);
+ retString.Set(wszBuffer);
+ }
+
+ END_QCALL;
+}
+
+/*===================================GetHINSTANCE===============================
+**Action: Returns the hinst for this module.
+**Returns:
+**Arguments: refThis
+**Exceptions: None.
+==============================================================================*/
+HINSTANCE QCALLTYPE COMModule::GetHINSTANCE(QCall::ModuleHandle pModule)
+{
+ QCALL_CONTRACT;
+
+ HMODULE hMod = (HMODULE)0;
+
+ BEGIN_QCALL;
+
+ // This returns the base address - this will work for either HMODULE or HCORMODULES
+ // Other modules should have zero base
+ PEFile *pPEFile = pModule->GetFile();
+ if (!pPEFile->IsDynamic() && !pPEFile->IsResource())
+ {
+ hMod = (HMODULE) pModule->GetFile()->GetManagedFileContents();
+ }
+
+ //If we don't have an hMod, set it to -1 so that they know that there's none
+ //available
+ if (!hMod) {
+ hMod = (HMODULE)-1;
+ }
+
+ END_QCALL;
+
+ return (HINSTANCE)hMod;
+}
+
+static Object* GetTypesInner(Module* pModule);
+
+// Get class will return an array contain all of the classes
+// that are defined within this Module.
+FCIMPL1(Object*, COMModule::GetTypes, ReflectModuleBaseObject* pModuleUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF refRetVal = NULL;
+ REFLECTMODULEBASEREF refModule = (REFLECTMODULEBASEREF)ObjectToOBJECTREF(pModuleUNSAFE);
+ if (refModule == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ Module *pModule = refModule->GetModule();
+
+ HELPER_METHOD_FRAME_BEGIN_RET_2(refRetVal, refModule);
+
+ refRetVal = (OBJECTREF) GetTypesInner(pModule);
+
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(refRetVal);
+}
+FCIMPLEND
+
+Object* GetTypesInner(Module* pModule)
+{
+ CONTRACT(Object*) {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+
+ PRECONDITION(CheckPointer(pModule));
+
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ DWORD dwNumTypeDefs = 0;
+ DWORD i;
+ IMDInternalImport *pInternalImport;
+ PTRARRAYREF refArrClasses = NULL;
+ PTRARRAYREF xcept = NULL;
+ DWORD cXcept = 0;
+ HENUMInternal hEnum;
+ bool bSystemAssembly; // Don't expose transparent proxy
+ int AllocSize = 0;
+ MethodTable* pMT = NULL;
+
+ if (pModule->IsResource())
+ {
+ refArrClasses = (PTRARRAYREF) AllocateObjectArray(0, MscorlibBinder::GetClass(CLASS__CLASS));
+ RETURN(OBJECTREFToObject(refArrClasses));
+ }
+
+ GCPROTECT_BEGIN(refArrClasses);
+ GCPROTECT_BEGIN(xcept);
+
+ pInternalImport = pModule->GetMDImport();
+
+ HENUMTypeDefInternalHolder hEnum(pInternalImport);
+ // Get the count of typedefs
+ hEnum.EnumTypeDefInit();
+
+ dwNumTypeDefs = pInternalImport->EnumTypeDefGetCount(&hEnum);
+
+ // Allocate the COM+ array
+ bSystemAssembly = (pModule->GetAssembly() == SystemDomain::SystemAssembly());
+#ifdef FEATURE_REMOTING
+ // we skip the TransparentProxy type if this is mscorlib, so we can make the array one element smaller
+ AllocSize = !bSystemAssembly ? dwNumTypeDefs : dwNumTypeDefs - 1;
+#else
+ AllocSize = dwNumTypeDefs;
+#endif
+ refArrClasses = (PTRARRAYREF) AllocateObjectArray(AllocSize, MscorlibBinder::GetClass(CLASS__CLASS));
+
+ int curPos = 0;
+ OBJECTREF throwable = 0;
+ mdTypeDef tdCur = mdTypeDefNil;
+
+ GCPROTECT_BEGIN(throwable);
+ // Now create each COM+ Method object and insert it into the array.
+ while (pInternalImport->EnumTypeDefNext(&hEnum, &tdCur))
+ {
+ // Get the VM class for the current class token
+ TypeHandle curClass;
+
+ EX_TRY {
+ curClass = ClassLoader::LoadTypeDefOrRefThrowing(pModule, tdCur,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef);
+ }
+ EX_CATCH_THROWABLE(&throwable);
+
+ if (throwable != 0) {
+ // Lazily allocate an array to store the exceptions in
+ if (xcept == NULL)
+ xcept = (PTRARRAYREF) AllocateObjectArray(dwNumTypeDefs,g_pExceptionClass);
+
+ _ASSERTE(cXcept < dwNumTypeDefs);
+ xcept->SetAt(cXcept++, throwable);
+ throwable = 0;
+ continue;
+ }
+
+ _ASSERTE("LoadClass failed." && !curClass.IsNull());
+
+ pMT = curClass.GetMethodTable();
+ PREFIX_ASSUME(pMT != NULL);
+
+ if (pMT->IsTransparentProxy())
+ {
+ // Don't expose transparent proxy
+ _ASSERTE(bSystemAssembly);
+ continue;
+ }
+
+ // Get the COM+ Class object
+ OBJECTREF refCurClass = pMT->GetManagedClassObject();
+ _ASSERTE("GetManagedClassObject failed." && refCurClass != NULL);
+
+ _ASSERTE(curPos < AllocSize);
+ refArrClasses->SetAt(curPos++, refCurClass);
+ }
+ GCPROTECT_END(); //throwable
+
+ // check if there were exceptions thrown
+ if (cXcept > 0) {
+ PTRARRAYREF xceptRet = NULL;
+ GCPROTECT_BEGIN(xceptRet);
+
+ xceptRet = (PTRARRAYREF) AllocateObjectArray(cXcept,g_pExceptionClass);
+ for (i=0;i<cXcept;i++) {
+ xceptRet->SetAt(i, xcept->GetAt(i));
+ }
+ OBJECTREF except = InvokeUtil::CreateClassLoadExcept((OBJECTREF*) &refArrClasses,(OBJECTREF*) &xceptRet);
+ COMPlusThrow(except);
+
+ GCPROTECT_END();
+ }
+
+ // We should have filled the array exactly.
+ _ASSERTE(curPos == AllocSize);
+
+ // Assign the return value to the COM+ array
+ GCPROTECT_END();
+ GCPROTECT_END();
+
+ RETURN(OBJECTREFToObject(refArrClasses));
+}
+
+#if defined(FEATURE_X509) && defined(FEATURE_CAS_POLICY)
+//+--------------------------------------------------------------------------
+//
+// Microsoft Confidential.
+//
+// Member: COMModule::GetSignerCertificate()
+//
+// Synopsis: Gets the certificate with which the module was signed.
+//
+// Effects: Creates an X509Certificate and returns it.
+//
+// Arguments: None.
+//
+// Returns: OBJECTREF to an X509Certificate object containing the
+// signer certificate.
+//
+
+//
+//---------------------------------------------------------------------------
+
+void QCALLTYPE COMModule::GetSignerCertificate(QCall::ModuleHandle pModule, QCall::ObjectHandleOnStack retData)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ PCOR_TRUST pCorTrust = NULL;
+ IAssemblySecurityDescriptor* pSecDesc = NULL;
+ PBYTE pbSigner = NULL;
+ DWORD cbSigner = 0;
+
+ // ******** Get the security descriptor ********
+
+ // Get a pointer to the module security descriptor
+ pSecDesc = pModule->GetSecurityDescriptor();
+ _ASSERTE(pSecDesc);
+
+ // ******** Get COR_TRUST info from module security descriptor ********
+ if (FAILED(pSecDesc->LoadSignature(&pCorTrust)))
+ {
+ COMPlusThrow(kArgumentNullException, W("InvalidOperation_MetaDataError"));
+ }
+
+ if( pCorTrust )
+ {
+ // Get a pointer to the signer certificate information in the COR_TRUST
+ pbSigner = pCorTrust->pbSigner;
+ cbSigner = pCorTrust->cbSigner;
+
+ if( pbSigner && cbSigner )
+ {
+ retData.SetByteArray(pbSigner, cbSigner);
+ }
+ }
+
+ END_QCALL;
+}
+#endif // #if defined(FEATURE_X509) && defined(FEATURE_CAS_POLICY)
+
+FCIMPL1(FC_BOOL_RET, COMModule::IsResource, ReflectModuleBaseObject* pModuleUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ if (pModuleUNSAFE == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ FC_RETURN_BOOL(pModuleUNSAFE->GetModule()->IsResource());
+}
+FCIMPLEND
+
+#ifdef FEATURE_CORECLR
+
+//---------------------------------------------------------------------
+// Helper code for PunkSafeHandle class. This does the Release in the
+// safehandle's critical finalizer.
+//---------------------------------------------------------------------
+static VOID __stdcall DReleaseTarget(IUnknown *punk)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (punk)
+ {
+ punk->Release();
+ }
+}
+
+
+//---------------------------------------------------------------------
+// Helper code for PunkSafeHandle class. This returns the function that performs
+// the Release() for the safehandle's critical finalizer.
+//---------------------------------------------------------------------
+FCIMPL0(void*, COMPunkSafeHandle::nGetDReleaseTarget)
+{
+ FCALL_CONTRACT;
+
+ return (void*)DReleaseTarget;
+}
+FCIMPLEND
+#endif //FEATURE_CORECLR
+
+
diff --git a/src/vm/commodule.h b/src/vm/commodule.h
new file mode 100644
index 0000000000..2d912c2849
--- /dev/null
+++ b/src/vm/commodule.h
@@ -0,0 +1,144 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+////////////////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef _COMModule_H_
+#define _COMModule_H_
+
+#include "invokeutil.h"
+
+class Module;
+
+class COMModule
+{
+public:
+ // Attaches an unmanaged symwriter to a newly created dynamic module.
+ static FCDECL2(LPVOID, nCreateISymWriterForDynamicModule, ReflectModuleBaseObject* reflectionModuleUNSAFE, StringObject* filenameUNSAFE);
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ // DefineDynamicModule
+ // This method will create a dynamic module given an assembly
+ static
+ void QCALLTYPE DefineDynamicModule(QCall::AssemblyHandle pContainingAssembly, BOOL emitSymbolInfo, LPCWSTR pModuleName, LPCWSTR pFilename, QCall::StackCrawlMarkHandle stackMark, LPVOID* ppInternalSymWriter, QCall::ObjectHandleOnStack retModule, BOOL fIsTransient, INT32* ptkFile);
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+
+ // IsTransient
+ // Determine if a Module is transient
+ static
+ BOOL QCALLTYPE IsTransient(QCall::ModuleHandle pModule);
+
+ // GetTypeRef
+ // This function will return the class token for the named element.
+ static
+ mdTypeRef QCALLTYPE GetTypeRef(QCall::ModuleHandle pModule,
+ LPCWSTR wszFullName,
+ QCall::ModuleHandle pRefedModule,
+ LPCWSTR wszRefedModuleFileName,
+ INT32 tkResolution);
+
+ // LoadInMemoryTypeByName
+ // This function will return the class token for the named element.
+ static
+ void QCALLTYPE LoadInMemoryTypeByName(QCall::ModuleHandle pModule, LPCWSTR wszFullName);
+
+
+ // SetFieldRVAContent
+ // This function is used to set the FieldRVA with the content data
+ static
+ void QCALLTYPE SetFieldRVAContent(QCall::ModuleHandle pModule, INT32 tkField, LPCBYTE pContent, INT32 length);
+
+
+ //GetArrayMethodToken
+ static
+ INT32 QCALLTYPE GetArrayMethodToken(QCall::ModuleHandle pModule,
+ INT32 tkTypeSpec,
+ LPCWSTR wszMethodName,
+ LPCBYTE pSignature,
+ INT32 sigLength);
+
+ // GetMemberRefToken
+ // This function will return the MemberRef token
+ static
+ INT32 QCALLTYPE GetMemberRef(QCall::ModuleHandle pModule, QCall::ModuleHandle pRefedModule, INT32 tr, INT32 token);
+
+ // This function return a MemberRef token given a MethodInfo describing a array method
+ static
+ INT32 QCALLTYPE GetMemberRefOfMethodInfo(QCall::ModuleHandle pModule, INT32 tr, MethodDesc * method);
+
+
+ // GetMemberRefOfFieldInfo
+ // This function will return a memberRef token given a FieldInfo
+ static
+ mdMemberRef QCALLTYPE GetMemberRefOfFieldInfo(QCall::ModuleHandle pModule, mdTypeDef tr, EnregisteredTypeHandle th, mdFieldDef tkField);
+
+ // GetMemberRefFromSignature
+ // This function will return the MemberRef token given the signature from managed code
+ static
+ INT32 QCALLTYPE GetMemberRefFromSignature(QCall::ModuleHandle pModule,
+ INT32 tr,
+ LPCWSTR wszMemberName,
+ LPCBYTE pSignature,
+ INT32 sigLength);
+
+ // GetTokenFromTypeSpec
+ static
+ mdTypeSpec QCALLTYPE GetTokenFromTypeSpec(QCall::ModuleHandle pModule, LPCBYTE pSignature, INT32 sigLength);
+
+ // GetType
+ // Given a class type, this method will look for that type
+ // with in the module.
+ static
+ void QCALLTYPE GetType(QCall::ModuleHandle pModule, LPCWSTR wszName, BOOL bThrowOnError, BOOL bIgnoreCase, QCall::ObjectHandleOnStack retType);
+
+ // Get class will return an array contain all of the classes
+ // that are defined within this Module.
+ static FCDECL1(Object*, GetTypes, ReflectModuleBaseObject* pModuleUNSAFE);
+
+ // GetStringConstant
+ // If this is a dynamic module, this routine will define a new
+ // string constant or return the token of an existing constant.
+ static
+ mdString QCALLTYPE GetStringConstant(QCall::ModuleHandle pModule, LPCWSTR pwzValue, INT32 iLength);
+
+#if defined(FEATURE_X509) && defined(FEATURE_CAS_POLICY)
+ /*X509Certificate*/
+ static
+ void QCALLTYPE GetSignerCertificate(QCall::ModuleHandle pModule, QCall::ObjectHandleOnStack retData);
+#endif // #if defined(FEATURE_X509) && defined(FEATURE_CAS_POLICY)
+
+ static
+ void QCALLTYPE SetModuleName(QCall::ModuleHandle pModule, LPCWSTR wszModuleName);
+
+ static FCDECL1(FC_BOOL_RET, IsResource, ReflectModuleBaseObject* pModuleUNSAFE);
+
+ static FCDECL1(Object*, GetMethods, ReflectModuleBaseObject* refThisUNSAFE);
+
+ static
+ void QCALLTYPE GetScopeName(QCall::ModuleHandle pModule, QCall::StringHandleOnStack retString);
+
+ static
+ void QCALLTYPE GetFullyQualifiedName(QCall::ModuleHandle pModule, QCall::StringHandleOnStack retString);
+
+ static
+ HINSTANCE QCALLTYPE GetHINSTANCE(QCall::ModuleHandle pModule);
+
+ static void DefineTypeRefHelper(
+ IMetaDataEmit *pEmit, // given emit scope
+ mdTypeDef td, // given typedef in the emit scope
+ mdTypeRef *ptr); // return typeref
+
+};
+
+class COMPunkSafeHandle
+{
+ public:
+#ifdef FEATURE_CORECLR
+ static FCDECL0(void*, nGetDReleaseTarget);
+#endif
+};
+
+#endif
diff --git a/src/vm/common.cpp b/src/vm/common.cpp
new file mode 100644
index 0000000000..dee10dcd55
--- /dev/null
+++ b/src/vm/common.cpp
@@ -0,0 +1,8 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "common.h"
+
diff --git a/src/vm/common.h b/src/vm/common.h
new file mode 100644
index 0000000000..5d214818f1
--- /dev/null
+++ b/src/vm/common.h
@@ -0,0 +1,528 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// common.h - precompiled headers include for the COM+ Execution Engine
+//
+
+//
+
+
+#ifndef _common_h_
+#define _common_h_
+
+#ifdef CLR_STANDALONE_BINDER
+
+#ifndef CLR_PRIV_BINDER_FOR_MDILBIND
+#include "..\tools\mdilbind\common.h"
+#endif //!CLR_PRIV_BINDER_FOR_MDILBIND
+
+#else //CLR_STANDALONE_BINDER
+
+
+#if defined(_MSC_VER) && defined(_X86_) && !defined(FPO_ON)
+#pragma optimize("y", on) // Small critical routines, don't put in EBP frame
+#define FPO_ON 1
+#define COMMON_TURNED_FPO_ON 1
+#endif
+
+#define USE_COM_CONTEXT_DEF
+
+#if defined(_DEBUG) && !defined(CROSSGEN_COMPILE)
+#define DEBUG_REGDISPLAY
+#endif
+
+#ifdef _MSC_VER
+
+ // These don't seem useful, so turning them off is no big deal
+#pragma warning(disable:4201) // nameless struct/union
+#pragma warning(disable:4510) // can't generate default constructor
+//#pragma warning(disable:4511) // can't generate copy constructor
+#pragma warning(disable:4512) // can't generate assignment constructor
+#pragma warning(disable:4610) // user defined constructor required
+#pragma warning(disable:4211) // nonstandard extention used (char name[0] in structs)
+#pragma warning(disable:4268) // 'const' static/global data initialized with compiler generated default constructor fills the object with zeros
+#pragma warning(disable:4238) // nonstandard extension used : class rvalue used as lvalue
+#pragma warning(disable:4291) // no matching operator delete found
+#pragma warning(disable:4345) // behavior change: an object of POD type constructed with an initializer of the form () will be default-initialized
+
+ // Depending on the code base, you may want to not disable these
+#pragma warning(disable:4245) // assigning signed / unsigned
+//#pragma warning(disable:4146) // unary minus applied to unsigned
+//#pragma warning(disable:4244) // loss of data int -> char ..
+#pragma warning(disable:4127) // conditional expression is constant
+#pragma warning(disable:4100) // unreferenced formal parameter
+
+#pragma warning(1:4189) // local variable initialized but not used
+
+#ifndef DEBUG
+#pragma warning(disable:4505) // unreferenced local function has been removed
+//#pragma warning(disable:4702) // unreachable code
+#pragma warning(disable:4313) // 'format specifier' in format string conflicts with argument %d of type 'type'
+#endif // !DEBUG
+
+ // CONSIDER put these back in
+#pragma warning(disable:4063) // bad switch value for enum (only in Disasm.cpp)
+#pragma warning(disable:4710) // function not inlined
+#pragma warning(disable:4527) // user-defined destructor required
+#pragma warning(disable:4513) // destructor could not be generated
+
+ // <TODO>TODO we really probably need this one put back in!!!</TODO>
+//#pragma warning(disable:4701) // local variable may be used without being initialized
+#endif // _MSC_VER
+
+#define _CRT_DEPENDENCY_ //this code depends on the crt file functions
+
+
+#include <stdint.h>
+#include <winwrap.h>
+
+
+#include <windef.h>
+#include <winnt.h>
+#include <stdlib.h>
+#include <wchar.h>
+#include <objbase.h>
+#include <stddef.h>
+#include <float.h>
+#include <math.h>
+#include <time.h>
+#include <limits.h>
+
+#include <olectl.h>
+
+#ifdef _MSC_VER
+//non inline intrinsics are faster
+#pragma function(memcpy,memcmp,strcmp,strcpy,strlen,strcat)
+#endif // _MSC_VER
+
+#include "volatile.h"
+
+// make all the unsafe redefinitions available
+#include "unsafe.h"
+
+#include <../../debug/inc/dbgtargetcontext.h>
+
+//-----------------------------------------------------------------------------------------------------------
+
+#include "compatibilityflags.h"
+extern BOOL GetCompatibilityFlag(CompatibilityFlag flag);
+#ifndef FEATURE_CORECLR
+extern DWORD* GetGlobalCompatibilityFlags();
+#endif // !FEATURE_CORECLR
+
+#include "strongname.h"
+#include "stdmacros.h"
+
+#define POISONC ((UINT_PTR)((sizeof(int *) == 4)?0xCCCCCCCCL:I64(0xCCCCCCCCCCCCCCCC)))
+
+#include "ndpversion.h"
+#include "switches.h"
+#include "holder.h"
+#include "classnames.h"
+#include "util.hpp"
+#include "corpriv.h"
+//#include "WarningControl.h"
+
+#include <daccess.h>
+
+typedef VPTR(class LoaderAllocator) PTR_LoaderAllocator;
+typedef VPTR(class AppDomain) PTR_AppDomain;
+typedef VPTR(class AppDomainBaseObject) PTR_AppDomainBaseObject;
+typedef DPTR(class ArrayBase) PTR_ArrayBase;
+typedef DPTR(class ArrayTypeDesc) PTR_ArrayTypeDesc;
+typedef DPTR(class Assembly) PTR_Assembly;
+typedef DPTR(class AssemblyBaseObject) PTR_AssemblyBaseObject;
+typedef DPTR(class AssemblyNameBaseObject) PTR_AssemblyNameBaseObject;
+typedef VPTR(class BaseDomain) PTR_BaseDomain;
+typedef DPTR(class ClassLoader) PTR_ClassLoader;
+typedef DPTR(class ComCallMethodDesc) PTR_ComCallMethodDesc;
+typedef VPTR(class CompilationDomain) PTR_CompilationDomain;
+typedef DPTR(class ComPlusCallMethodDesc) PTR_ComPlusCallMethodDesc;
+typedef VPTR(class DebugInterface) PTR_DebugInterface;
+typedef DPTR(class Dictionary) PTR_Dictionary;
+typedef VPTR(class DomainAssembly) PTR_DomainAssembly;
+typedef VPTR(class DomainFile) PTR_DomainFile;
+typedef VPTR(class DomainModule) PTR_DomainModule;
+typedef DPTR(struct FailedAssembly) PTR_FailedAssembly;
+typedef VPTR(class EditAndContinueModule) PTR_EditAndContinueModule;
+typedef DPTR(class EEClass) PTR_EEClass;
+typedef DPTR(class DelegateEEClass) PTR_DelegateEEClass;
+typedef DPTR(struct DomainLocalModule) PTR_DomainLocalModule;
+typedef VPTR(class EECodeManager) PTR_EECodeManager;
+typedef DPTR(class EEConfig) PTR_EEConfig;
+typedef VPTR(class EEDbgInterfaceImpl) PTR_EEDbgInterfaceImpl;
+typedef VPTR(class DebugInfoManager) PTR_DebugInfoManager;
+typedef DPTR(class FieldDesc) PTR_FieldDesc;
+typedef VPTR(class Frame) PTR_Frame;
+typedef VPTR(class ICodeManager) PTR_ICodeManager;
+typedef VPTR(class IJitManager) PTR_IJitManager;
+typedef VPTR(struct IUnknown) PTR_IUnknown;
+typedef DPTR(class InstMethodHashTable) PTR_InstMethodHashTable;
+typedef DPTR(class MetaSig) PTR_MetaSig;
+typedef DPTR(class MethodDesc) PTR_MethodDesc;
+typedef DPTR(class MethodDescChunk) PTR_MethodDescChunk;
+typedef DPTR(class MethodImpl) PTR_MethodImpl;
+typedef DPTR(class MethodTable) PTR_MethodTable;
+typedef DPTR(class MscorlibBinder) PTR_MscorlibBinder;
+typedef VPTR(class Module) PTR_Module;
+typedef DPTR(class NDirectMethodDesc) PTR_NDirectMethodDesc;
+typedef VPTR(class Thread) PTR_Thread;
+typedef DPTR(class Object) PTR_Object;
+typedef DPTR(PTR_Object) PTR_PTR_Object;
+typedef DPTR(class ObjHeader) PTR_ObjHeader;
+typedef DPTR(class Precode) PTR_Precode;
+typedef VPTR(class ReflectionModule) PTR_ReflectionModule;
+typedef DPTR(class ReflectClassBaseObject) PTR_ReflectClassBaseObject;
+typedef DPTR(class ReflectMethodObject) PTR_ReflectMethodObject;
+typedef DPTR(class ReflectFieldObject) PTR_ReflectFieldObject;
+typedef DPTR(class ReflectModuleBaseObject) PTR_ReflectModuleBaseObject;
+typedef DPTR(class ReJitManager) PTR_ReJitManager;
+typedef DPTR(struct ReJitInfo) PTR_ReJitInfo;
+typedef DPTR(struct SharedReJitInfo) PTR_SharedReJitInfo;
+typedef DPTR(class StringObject) PTR_StringObject;
+typedef DPTR(class StringBufferObject) PTR_StringBufferObject;
+typedef DPTR(class TypeHandle) PTR_TypeHandle;
+typedef VPTR(class VirtualCallStubManager) PTR_VirtualCallStubManager;
+typedef VPTR(class VirtualCallStubManagerManager) PTR_VirtualCallStubManagerManager;
+typedef VPTR(class GCHeap) PTR_GCHeap;
+
+//
+// _UNCHECKED_OBJECTREF is for code that can't deal with DEBUG OBJECTREFs
+//
+typedef PTR_Object _UNCHECKED_OBJECTREF;
+typedef DPTR(PTR_Object) PTR_UNCHECKED_OBJECTREF;
+
+#ifdef USE_CHECKED_OBJECTREFS
+class OBJECTREF;
+#else
+typedef PTR_Object OBJECTREF;
+#endif
+typedef DPTR(OBJECTREF) PTR_OBJECTREF;
+typedef DPTR(PTR_OBJECTREF) PTR_PTR_OBJECTREF;
+
+EXTERN_C Thread* STDCALL GetThread();
+BOOL SetThread(Thread*);
+
+// This is a mechanism by which macros can make the Thread pointer available to inner scopes
+// that is robust to code changes. If the outer Thread no longer is available for some reason
+// (e.g. code refactoring), this GET_THREAD() macro will fall back to calling GetThread().
+const bool CURRENT_THREAD_AVAILABLE = false;
+Thread * const CURRENT_THREAD = NULL;
+#define GET_THREAD() (CURRENT_THREAD_AVAILABLE ? CURRENT_THREAD : GetThread())
+
+#define MAKE_CURRENT_THREAD_AVAILABLE() \
+ Thread * __pThread = GET_THREAD(); \
+ MAKE_CURRENT_THREAD_AVAILABLE_EX(__pThread)
+
+#define MAKE_CURRENT_THREAD_AVAILABLE_EX(__pThread) \
+ Thread * CURRENT_THREAD = __pThread; \
+ const bool CURRENT_THREAD_AVAILABLE = true; \
+ (void)CURRENT_THREAD_AVAILABLE; /* silence "local variable initialized but not used" warning */ \
+
+#ifndef DACCESS_COMPILE
+EXTERN_C AppDomain* STDCALL GetAppDomain();
+#endif //!DACCESS_COMPILE
+
+inline void RetailBreak()
+{
+#ifdef _TARGET_X86_
+ __asm int 3
+#else
+ DebugBreak();
+#endif
+}
+
+extern BOOL isMemoryReadable(const TADDR start, unsigned len);
+
+#ifndef memcpyUnsafe_f
+#define memcpyUnsafe_f
+
+// use this when you want to memcpy something that contains GC refs
+FORCEINLINE void* memcpyUnsafe(void *dest, const void *src, size_t len)
+{
+ WRAPPER_NO_CONTRACT;
+ return memcpy(dest, src, len);
+}
+
+#endif // !memcpyUnsafe_f
+
+//
+// By default logging, and debug GC are enabled under debug
+//
+// These can be enabled in non-debug by removing the #ifdef _DEBUG
+// allowing one to log/check_gc a free build.
+//
+#if defined(_DEBUG) && !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+
+ // You should be using CopyValueClass if you are doing an memcpy
+ // in the CG heap.
+ #if !defined(memcpy)
+ FORCEINLINE void* memcpyNoGCRefs(void * dest, const void * src, size_t len) {
+ WRAPPER_NO_CONTRACT;
+ return memcpy(dest, src, len);
+ }
+ extern "C" void * __cdecl GCSafeMemCpy(void *, const void *, size_t);
+ #define memcpy(dest, src, len) GCSafeMemCpy(dest, src, len)
+ #endif // !defined(memcpy)
+
+ #if !defined(CHECK_APP_DOMAIN_LEAKS)
+ #define CHECK_APP_DOMAIN_LEAKS 1
+ #endif
+#else // !_DEBUG && !DACCESS_COMPILE && !CROSSGEN_COMPILE
+ FORCEINLINE void* memcpyNoGCRefs(void * dest, const void * src, size_t len) {
+ WRAPPER_NO_CONTRACT;
+
+ return memcpy(dest, src, len);
+ }
+#endif // !_DEBUG && !DACCESS_COMPILE && !CROSSGEN_COMPILE
+
+namespace Loader
+{
+ typedef enum
+ {
+ Load, //should load
+ DontLoad, //should not load
+ SafeLookup //take no locks, no allocations
+ } LoadFlag;
+}
+
+
+// src/inc
+#include "utilcode.h"
+#include "log.h"
+#include "loaderheap.h"
+#include "fixuppointer.h"
+#include "lazycow.h"
+
+// src/vm
+#include "util.hpp"
+#include "ibclogger.h"
+#include "eepolicy.h"
+
+#include "vars.hpp"
+#include "crst.h"
+#include "argslot.h"
+#include "stublink.h"
+#include "cgensys.h"
+#include "ceemain.h"
+#include "hash.h"
+#include "eecontract.h"
+#include "pedecoder.h"
+#include "sstring.h"
+#include "slist.h"
+
+#include "eeconfig.h"
+
+#include "spinlock.h"
+#include "objecthandle.h"
+#include "declsec.h"
+
+#ifdef FEATURE_COMINTEROP
+#include "stdinterfaces.h"
+#endif
+
+#include "typehandle.h"
+#include "perfcounters.h"
+#include "methodtable.h"
+#include "typectxt.h"
+
+#include "eehash.h"
+
+#include "handletable.h"
+#include "vars.hpp"
+#include "eventstore.hpp"
+
+#include "synch.h"
+#include "regdisp.h"
+#include "stackframe.h"
+#include "gms.h"
+#include "stackprobe.h"
+#include "fcall.h"
+#include "syncblk.h"
+#include "gcdesc.h"
+#include "specialstatics.h"
+#include "object.h" // <NICE> We should not really need to put this so early... </NICE>
+#include "gchelpers.h"
+#include "pefile.h"
+#include "clrex.h"
+#include "clsload.hpp" // <NICE> We should not really need to put this so early... </NICE>
+#include "siginfo.hpp"
+#include "binder.h"
+#include "jitinterface.h" // <NICE> We should not really need to put this so early... </NICE>
+#include "ceeload.h"
+#include "memberload.h"
+#include "genericdict.h"
+#include "class.h"
+#include "codeman.h"
+#include "threads.h"
+#include "clrex.inl"
+#ifdef FEATURE_COMINTEROP
+ // These need to be included *after* threads.h so that they can properly use LeaveRuntimeHolder
+ #include "windowsruntime.h"
+ #include "windowsstring.h"
+#endif
+#include "loaderallocator.hpp"
+#include "appdomain.hpp"
+#include "appdomain.inl"
+#include "assembly.hpp"
+#include "pefile.inl"
+#include "excep.h"
+#include "method.hpp"
+#include "callingconvention.h"
+#include "frames.h"
+#include "qcall.h"
+#include "callhelpers.h"
+
+#include "stackwalk.h"
+#include "stackingallocator.h"
+#include "interoputil.h"
+#include "wrappers.h"
+#include "dynamicmethod.h"
+#include "mixedmode.hpp"
+
+#include "gcstress.h"
+
+#ifndef DACCESS_COMPILE
+
+inline VOID UnsafeEEEnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+
+ if (CLRTaskHosted()) {
+ Thread::BeginThreadAffinity();
+ }
+ UnsafeEnterCriticalSection(lpCriticalSection);
+ INCTHREADLOCKCOUNT();
+}
+
+inline VOID UnsafeEELeaveCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ UnsafeLeaveCriticalSection(lpCriticalSection);
+ DECTHREADLOCKCOUNT();
+ if (CLRTaskHosted()) {
+ Thread::EndThreadAffinity();
+ }
+}
+
+inline BOOL UnsafeEETryEnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+
+ BOOL fEnteredCriticalSection = UnsafeTryEnterCriticalSection(lpCriticalSection);
+ if(fEnteredCriticalSection)
+ {
+ INCTHREADLOCKCOUNT();
+ }
+ return fEnteredCriticalSection;
+}
+
+#endif // !DACCESS_COMPILE
+
+HRESULT EnsureRtlFunctions();
+HINSTANCE GetModuleInst();
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+//
+// Strong memory model. No memory barrier necessary before writing object references into GC heap.
+//
+#define GCHeapMemoryBarrier()
+#else
+//
+// The weak memory model forces us to raise memory barriers before writing object references into GC heap. This is required
+// for both security and to make most managed code written against strong memory model work. Under normal circumstances, this memory
+// barrier is part of GC write barrier. However, there are a few places in the VM that set cards manually without going through
+// regular GC write barrier. These places need to this macro. This macro is usually used before memcpy-like operation followed
+// by SetCardsAfterBulkCopy.
+//
+#define GCHeapMemoryBarrier() MemoryBarrier()
+#endif
+
+
+// use this when you want to memcpy something that contains GC refs
+void memmoveGCRefs(void *dest, const void *src, size_t len);
+
+
+#if defined(_DEBUG)
+
+// This catches CANNOTTHROW macros that occur outside the scope of a CONTRACT.
+// Note that it's important for m_CannotThrowLineNums to be NULL.
+struct DummyGlobalContract
+{
+ int *m_CannotThrowLineNums; //= NULL;
+ LPVOID *m_CannotThrowRecords; //= NULL;
+};
+
+extern DummyGlobalContract ___contract;
+
+#endif // defined(_DEBUG)
+
+
+// All files get to see all of these .inl files to make sure all files
+// get the benefit of inlining.
+#include "ceeload.inl"
+#include "typedesc.inl"
+#include "class.inl"
+#include "methodtable.inl"
+#include "typehandle.inl"
+#include "object.inl"
+#include "clsload.inl"
+#include "domainfile.inl"
+#include "handletable.inl"
+#include "method.inl"
+#include "stackprobe.inl"
+#include "syncblk.inl"
+#include "threads.inl"
+#include "eehash.inl"
+#include "mscorcfg.h"
+#ifdef FEATURE_COMINTEROP
+#include "WinRTRedirector.h"
+#include "winrtredirector.inl"
+#endif // FEATURE_COMINTEROP
+
+inline HRESULT CreateConfigStreamHelper(LPCWSTR filename, IStream** pOutStream)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ HRESULT hr =S_OK;
+
+ EX_TRY
+ {
+ hr = CreateConfigStream( filename, pOutStream);
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+}
+
+
+#if defined(COMMON_TURNED_FPO_ON)
+#pragma optimize("", on) // Go back to command line default optimizations
+#undef COMMON_TURNED_FPO_ON
+#undef FPO_ON
+#endif
+
+extern INT64 g_PauseTime; // Total duration of all pauses in the runtime
+extern Volatile<BOOL> g_IsPaused; // True if the runtime is Paused for FAS
+extern CLREventStatic g_ClrResumeEvent; // Event fired when the runtime is resumed after a Pause for FAS
+INT64 AdditionalWait(INT64 sPauseTime, INT64 sTime, INT64 expDuration);
+
+#endif // CLR_STANDALONE_BINDER
+
+#endif // !_common_h_
+
+
diff --git a/src/vm/commtmemberinfomap.cpp b/src/vm/commtmemberinfomap.cpp
new file mode 100644
index 0000000000..0fb9109e72
--- /dev/null
+++ b/src/vm/commtmemberinfomap.cpp
@@ -0,0 +1,1583 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+/*============================================================
+**
+** Header: Map associated with a ComMethodTable that contains
+** information on its members.
+===========================================================*/
+
+#include "common.h"
+
+#include "commtmemberinfomap.h"
+#include "comcallablewrapper.h"
+#include "tlbexport.h"
+#include "field.h"
+#include "caparser.h"
+
+#define BASE_OLEAUT_DISPID 0x60020000
+
+static LPCWSTR szDefaultValue = W("Value");
+static LPCWSTR szGetEnumerator = W("GetEnumerator");
+
+// ============================================================================
+// This structure and class definition are used to implement the hash table
+// used to make sure that there are no duplicate class names.
+// ============================================================================
+struct WSTRHASH : HASHLINK
+{
+ LPCWSTR szName; // Ptr to hashed string.
+};
+
+class CWStrHash : public CChainedHash<WSTRHASH>
+{
+public:
+ virtual bool InUse(WSTRHASH *pItem)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pItem));
+ }
+ CONTRACTL_END;
+
+ return (pItem->szName != NULL);
+ }
+
+ virtual void SetFree(WSTRHASH *pItem)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pItem));
+ }
+ CONTRACTL_END;
+
+ pItem->szName = NULL;
+ }
+
+ virtual ULONG Hash(const void *pData)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pData));
+ }
+ CONTRACTL_END;
+
+ // Do case-insensitive hash
+ return (HashiString(reinterpret_cast<LPCWSTR>(pData)));
+ }
+
+ virtual int Cmp(const void *pData, void *pItem)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pItem));
+ PRECONDITION(CheckPointer(pData));
+ }
+ CONTRACTL_END;
+
+ return SString::_wcsicmp(reinterpret_cast<LPCWSTR>(pData),reinterpret_cast<WSTRHASH*>(pItem)->szName);
+ }
+}; // class CWStrHash : public CChainedHash<WSTRHASH>
+
+
+// ============================================================================
+// Token and module pair hashtable.
+// ============================================================================
+EEHashEntry_t * EEModuleTokenHashTableHelper::AllocateEntry(EEModuleTokenPair *pKey, BOOL bDeepCopy, void *pHeap)
+{
+ CONTRACT (EEHashEntry_t*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(CONTRACT_RETURN NULL);
+ PRECONDITION(CheckPointer(pKey));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ _ASSERTE(!bDeepCopy && "Deep copy is not supported by the EEModuleTokenHashTableHelper");
+
+ EEHashEntry_t *pEntry = (EEHashEntry_t *) new (nothrow) BYTE[SIZEOF_EEHASH_ENTRY + sizeof(EEModuleTokenPair)];
+ if (!pEntry)
+ RETURN NULL;
+
+ EEModuleTokenPair *pEntryKey = (EEModuleTokenPair *) pEntry->Key;
+ pEntryKey->m_tk = pKey->m_tk;
+ pEntryKey->m_pModule = pKey->m_pModule;
+
+ RETURN pEntry;
+} // EEHashEntry_t * EEModuleTokenHashTableHelper::AllocateEntry()
+
+
+void EEModuleTokenHashTableHelper::DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap Heap)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pEntry));
+ }
+ CONTRACTL_END;
+
+ delete [] (BYTE*)pEntry;
+} // void EEModuleTokenHashTableHelper::DeleteEntry()
+
+
+BOOL EEModuleTokenHashTableHelper::CompareKeys(EEHashEntry_t *pEntry, EEModuleTokenPair *pKey)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pEntry));
+ PRECONDITION(CheckPointer(pKey));
+ }
+ CONTRACTL_END;
+
+ EEModuleTokenPair *pEntryKey = (EEModuleTokenPair*) pEntry->Key;
+
+ // Compare the token.
+ if (pEntryKey->m_tk != pKey->m_tk)
+ return FALSE;
+
+ // Compare the module.
+ if (pEntryKey->m_pModule != pKey->m_pModule)
+ return FALSE;
+
+ return TRUE;
+} // BOOL EEModuleTokenHashTableHelper::CompareKeys()
+
+
+DWORD EEModuleTokenHashTableHelper::Hash(EEModuleTokenPair *pKey)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pKey));
+ }
+ CONTRACTL_END;
+
+ size_t val = (size_t) ((DWORD_PTR)pKey->m_tk + (DWORD_PTR)pKey->m_pModule);
+#ifdef _TARGET_X86_
+ return (DWORD)val;
+#else
+ // @TODO IA64: Is this a good hashing mechanism on IA64?
+ return (DWORD)(val >> 3);
+#endif
+} // DWORD EEModuleTokenHashTableHelper::Hash()
+
+
+EEModuleTokenPair *EEModuleTokenHashTableHelper::GetKey(EEHashEntry_t *pEntry)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pEntry));
+ }
+ CONTRACTL_END;
+
+ return (EEModuleTokenPair*)pEntry->Key;
+} // EEModuleTokenPair *EEModuleTokenHashTableHelper::GetKey()
+
+
+// ============================================================================
+// ComMethodTable member info map.
+// ============================================================================
+void ComMTMemberInfoMap::Init(size_t sizeOfPtr)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ mdTypeDef td; // Token for the class.
+ BYTE const *pData; // Pointer to a custom attribute blob.
+ ULONG cbData; // Size of a custom attribute blob.
+
+ // Get the TypeDef and some info about it.
+ td = m_pMT->GetCl();
+
+ m_bHadDuplicateDispIds = FALSE;
+
+ // See if there is a default property.
+ m_DefaultProp[0] = 0; // init to 'none'.
+ hr = m_pMT->GetMDImport()->GetCustomAttributeByName(
+ td, INTEROP_DEFAULTMEMBER_TYPE, reinterpret_cast<const void**>(&pData), &cbData);
+ if (hr == S_FALSE)
+ {
+ hr = m_pMT->GetMDImport()->GetCustomAttributeByName(
+ td, "System.Reflection.DefaultMemberAttribute", reinterpret_cast<const void**>(&pData), &cbData);
+ }
+
+ if (hr == S_OK && cbData > 5 && pData[0] == 1 && pData[1] == 0)
+ {
+ CustomAttributeParser cap(pData, cbData);
+
+ // Already verified prolog before entering block.
+ // Technically, we should have done that but I'm
+ // leaving it to avoid causing a breaking change.
+ VERIFY(SUCCEEDED(cap.ValidateProlog()));
+
+ LPCUTF8 szString;
+ ULONG cbString;
+ if (SUCCEEDED(cap.GetNonNullString(&szString, &cbString)))
+ {
+ // Copy the data, then null terminate (CA blob's string may not be).
+ m_DefaultProp.ReSizeThrows(cbString+1);
+ memcpy(m_DefaultProp.Ptr(), szString, cbString);
+ m_DefaultProp[cbString] = 0;
+ }
+ }
+
+ // Set up the properties for the type.
+ if (m_pMT->IsInterface())
+ SetupPropsForInterface(sizeOfPtr);
+ else
+ SetupPropsForIClassX(sizeOfPtr);
+
+ // Initiliaze the hashtable.
+ m_TokenToComMTMethodPropsMap.Init((DWORD)m_MethodProps.Size(), NULL, NULL);
+
+ // Populate the hashtable that maps from token to member info.
+ PopulateMemberHashtable();
+} // HRESULT ComMTMemberInfoMap::Init()
+
+
+ComMTMethodProps *ComMTMemberInfoMap::GetMethodProps(mdToken tk, Module *pModule)
+{
+ CONTRACT (ComMTMethodProps*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pModule));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ EEModuleTokenPair TokenModulePair(tk, pModule);
+ HashDatum Data;
+
+ if (m_TokenToComMTMethodPropsMap.GetValue(&TokenModulePair, &Data))
+ RETURN (ComMTMethodProps *)Data;
+
+ RETURN NULL;
+} // ComMTMethodProps *ComMTMemberInfoMap::GetMethodProps()
+
+
+void ComMTMemberInfoMap::SetupPropsForIClassX(size_t sizeOfPtr)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ComMethodTable *pCMT; // ComMethodTable for the Class Vtable.
+ MethodDesc *pMeth; // A method descriptor.
+ ComCallMethodDesc *pFieldMeth; // A method descriptor for a field.
+ FieldDesc *pField; // Actual FieldDesc for field.
+ DWORD nSlots; // Number of vtable slots.
+ UINT i; // Loop control.
+ LPCUTF8 pszName; // A name in UTF8.
+ CQuickArray<WCHAR> rName; // A name.
+ ULONG dispid; // A dispid.
+ SHORT oVftBase; // Offset in vtable, if not system defined.
+ int cVisibleMembers = 0; // The count of methods that are visible to COM.
+ HRESULT hr = S_OK; // A result.
+ DWORD dwTIFlags = 0; // TypeLib flags.
+
+ // Get the vtable for the class.
+ pCMT = ComCallWrapperTemplate::SetupComMethodTableForClass(m_pMT, TRUE);
+ nSlots = pCMT->GetSlots();
+
+ // IDispatch derived.
+ oVftBase = 7 * (SHORT)sizeOfPtr;
+
+ // Build array of descriptive information.
+ m_MethodProps.ReSizeThrows(nSlots);
+ for (i=0; i<nSlots; ++i)
+ {
+ if (pCMT->IsSlotAField(i))
+ {
+ // Fields better come in pairs.
+ _ASSERTE(i < nSlots-1);
+
+ pFieldMeth = pCMT->GetFieldCallMethodDescForSlot(i);
+ pField = pFieldMeth->GetFieldDesc();
+
+ DWORD dwFlags;
+ IfFailThrow(pField->GetMDImport()->GetFieldDefProps(pField->GetMemberDef(), &dwFlags));
+ BOOL bReadOnly = IsFdInitOnly(dwFlags) || IsFdLiteral(dwFlags);
+ BOOL bFieldVisibleFromCom = IsMemberVisibleFromCom(pField->GetApproxEnclosingMethodTable(), pField->GetMemberDef(), mdTokenNil);
+
+ // Get the assigned dispid, or DISPID_UNKNOWN.
+ hr = pField->GetMDImport()->GetDispIdOfMemberDef(pField->GetMemberDef(), &dispid);
+
+ IfFailThrow(pField->GetMDImport()->GetNameOfFieldDef(pField->GetMemberDef(), &pszName));
+ IfFailThrow(Utf2Quick(pszName, rName));
+ ULONG cchpName = ((int)wcslen(rName.Ptr())) + 1;
+ m_MethodProps[i].pName = reinterpret_cast<WCHAR*>(m_sNames.Alloc(cchpName * sizeof(WCHAR)));
+
+ m_MethodProps[i].pMeth = (MethodDesc*)pFieldMeth;
+ // It's safe to do the following case becasue that FieldSemanticOffset is 100, msSetter = 1, msGetter = 2
+ m_MethodProps[i].semantic = static_cast<USHORT>(FieldSemanticOffset + (pFieldMeth->IsFieldGetter() ? msGetter : msSetter));
+ m_MethodProps[i].property = mdPropertyNil;
+ wcscpy_s(m_MethodProps[i].pName, cchpName, rName.Ptr());
+ m_MethodProps[i].dispid = dispid;
+ m_MethodProps[i].oVft = 0;
+ m_MethodProps[i].bMemberVisible = bFieldVisibleFromCom && (!bReadOnly || pFieldMeth->IsFieldGetter());
+ m_MethodProps[i].bFunction2Getter = FALSE;
+
+ ++i;
+ pFieldMeth = pCMT->GetFieldCallMethodDescForSlot(i);
+ m_MethodProps[i].pMeth = (MethodDesc*)pFieldMeth;
+ // It's safe to do the following case becasue that FieldSemanticOffset is 100, msSetter = 1, msGetter = 2
+ m_MethodProps[i].semantic = static_cast<USHORT>(FieldSemanticOffset + (pFieldMeth->IsFieldGetter() ? msGetter : msSetter));
+ m_MethodProps[i].property = i - 1;
+ m_MethodProps[i].dispid = dispid;
+ m_MethodProps[i].oVft = 0;
+ m_MethodProps[i].bMemberVisible = bFieldVisibleFromCom && (!bReadOnly || pFieldMeth->IsFieldGetter());
+ m_MethodProps[i].bFunction2Getter = FALSE;
+ }
+ else
+ {
+ // Retrieve the method desc on the current class. This involves looking up the method
+ // desc in the vtable if it is a virtual method.
+ pMeth = pCMT->GetMethodDescForSlot(i);
+ if (pMeth->IsVirtual())
+ {
+ WORD wSlot = InteropMethodTableData::GetSlotForMethodDesc(m_pMT, pMeth);
+ _ASSERTE(wSlot != MethodTable::NO_SLOT);
+ pMeth = m_pMT->GetComInteropData()->pVTable[wSlot].pMD;
+ }
+ m_MethodProps[i].pMeth = pMeth;
+
+ // Retrieve the properties of the method.
+ GetMethodPropsForMeth(pMeth, i, m_MethodProps, m_sNames);
+
+ // Turn off dispids that look system-assigned.
+ if (m_MethodProps[i].dispid >= 0x40000000 && m_MethodProps[i].dispid <= 0x7fffffff)
+ m_MethodProps[i].dispid = DISPID_UNKNOWN;
+ }
+ }
+
+ // COM+ supports properties in which the getter and setter have different signatures,
+ // but TypeLibs do not. Look for mismatched signatures, and break apart the properties.
+ for (i=0; i<nSlots; ++i)
+ {
+ // Is it a property, but not a field? Fields only have one signature, so they are always OK.
+ if (TypeFromToken(m_MethodProps[i].property) != mdtProperty &&
+ m_MethodProps[i].semantic < FieldSemanticOffset)
+ {
+ // Get the indices of the getter and setter.
+ size_t ixSet, ixGet;
+
+ if (m_MethodProps[i].semantic == msGetter)
+ {
+ ixGet = i, ixSet = m_MethodProps[i].property;
+ }
+ else
+ {
+ _ASSERTE(m_MethodProps[i].semantic == msSetter);
+ ixSet = i, ixGet = m_MethodProps[i].property;
+ }
+
+ // Get the signatures.
+ PCCOR_SIGNATURE pbGet, pbSet;
+ ULONG cbGet, cbSet;
+ pMeth = pCMT->GetMethodDescForSlot((unsigned)ixSet);
+ pMeth->GetSig(&pbSet, &cbSet);
+
+ pMeth = pCMT->GetMethodDescForSlot((unsigned)ixGet);
+ pMeth->GetSig(&pbGet, &cbGet);
+
+ // Now reuse ixGet, ixSet to index through signature.
+ ixGet = ixSet = 0;
+
+ // Eat calling conventions.
+ ULONG callconv;
+ ixGet += CorSigUncompressData(&pbGet[ixGet], &callconv);
+ _ASSERTE((callconv & IMAGE_CEE_CS_CALLCONV_MASK) != IMAGE_CEE_CS_CALLCONV_FIELD);
+ ixSet += CorSigUncompressData(&pbSet[ixSet], &callconv);
+ _ASSERTE((callconv & IMAGE_CEE_CS_CALLCONV_MASK) != IMAGE_CEE_CS_CALLCONV_FIELD);
+
+ // Argument count.
+ ULONG acGet, acSet;
+ ixGet += CorSigUncompressData(&pbGet[ixGet], &acGet);
+ ixSet += CorSigUncompressData(&pbSet[ixSet], &acSet);
+
+ // Setter must take exactly on more parameter.
+ if (acSet != acGet+1)
+ goto UnLink;
+
+ // All matched, so on to next.
+ continue;
+
+
+ // Unlink the properties, and turn them into ordinary functions.
+UnLink:
+ // Get the indices of the getter and setter (again).
+ if (m_MethodProps[i].semantic == msGetter)
+ ixGet = i, ixSet = m_MethodProps[i].property;
+ else
+ ixSet = i, ixGet = m_MethodProps[i].property;
+
+ // Eliminate the semantics.
+ m_MethodProps[ixGet].semantic = 0;
+ m_MethodProps[ixSet].semantic = 0;
+
+ // Decorate the names.
+ // These are the names of properties when properties don't have signatures
+ // that match, and the "get" and "set" below don't have to match the CLS
+ // property names. This is an obscure corner case.
+ m_MethodProps[i].pName = m_MethodProps[m_MethodProps[i].property].pName;
+ WCHAR *pNewName;
+ //string length + "get" + null terminator.
+ //XXX Fri 11/19/2004 Why is this + 4 rather than +3?
+ ULONG cchpNewName = ((int)wcslen(m_MethodProps[ixGet].pName)) + 4 + 1;
+ pNewName = reinterpret_cast<WCHAR*>(m_sNames.Alloc(cchpNewName * sizeof(WCHAR)));
+ wcscpy_s(pNewName, cchpNewName, W("get"));
+ wcscat_s(pNewName, cchpNewName, m_MethodProps[ixGet].pName);
+ m_MethodProps[ixGet].pName = pNewName;
+ pNewName = reinterpret_cast<WCHAR*>(m_sNames.Alloc((int)((4+wcslen(m_MethodProps[ixSet].pName))*sizeof(WCHAR)+2)));
+ wcscpy_s(pNewName, cchpNewName, W("set"));
+ wcscat_s(pNewName, cchpNewName, m_MethodProps[ixSet].pName);
+ m_MethodProps[ixSet].pName = pNewName;
+
+ // If the methods share a dispid, kill them both.
+ if (m_MethodProps[ixGet].dispid == m_MethodProps[ixSet].dispid)
+ m_MethodProps[ixGet].dispid = m_MethodProps[ixSet].dispid = DISPID_UNKNOWN;
+
+ // Unlink from each other.
+ m_MethodProps[i].property = mdPropertyNil;
+
+ }
+ }
+
+ // Assign vtable offsets.
+ for (i = 0; i < nSlots; ++i)
+ {
+ SHORT oVft = oVftBase + static_cast<SHORT>(i * sizeOfPtr);
+ m_MethodProps[i].oVft = oVft;
+ }
+
+ // Resolve duplicate dispids.
+ EliminateDuplicateDispIds(m_MethodProps, nSlots);
+
+ // Pick something for the "Value".
+ AssignDefaultMember(m_MethodProps, m_sNames, nSlots);
+
+ // Check to see if there is something to assign DISPID_NEWENUM to.
+ AssignNewEnumMember(m_MethodProps, m_sNames, nSlots);
+
+ // Resolve duplicate names.
+ EliminateDuplicateNames(m_MethodProps, m_sNames, nSlots);
+
+ // Do some PROPERTYPUT/PROPERTYPUTREF translation.
+ FixupPropertyAccessors(m_MethodProps, m_sNames, nSlots);
+
+ // Fix up all properties so that they point to their shared name.
+ for (i=0; i<nSlots; ++i)
+ {
+ if (TypeFromToken(m_MethodProps[i].property) != mdtProperty)
+ {
+ m_MethodProps[i].pName = m_MethodProps[m_MethodProps[i].property].pName;
+ m_MethodProps[i].dispid = m_MethodProps[m_MethodProps[i].property].dispid;
+ }
+ }
+
+ // Assign default dispids.
+ AssignDefaultDispIds();
+} // void ComMTMemberInfoMap::SetupPropsForIClassX()
+
+
+void ComMTMemberInfoMap::SetupPropsForInterface(size_t sizeOfPtr)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ULONG iMD; // Loop control.
+ SHORT oVftBase; // Offset in vtable, if not system defined.
+ CorIfaceAttr ifaceType; // Is this interface [dual]?
+ MethodDesc *pMeth; // A MethodDesc.
+ CQuickArray<int> rSlotMap; // Array to map vtable slots.
+ DWORD nSlots; // Number of vtable slots.
+ ULONG ulComSlotMin = ULONG_MAX; // Find first COM+ slot.
+ ULONG ulComSlotMax = 0; // Find last COM+ slot.
+ int bSlotRemap = false; // True if slots need to be mapped, due to holes.
+ HRESULT hr = S_OK;
+
+ // Retrieve the number of vtable slots the interface has.
+ nSlots = m_pMT->GetNumVirtuals();
+
+ // IDispatch, IUnknown, or IInspectable derived?
+ ifaceType = (m_pMT->IsInterface() ? m_pMT->GetComInterfaceType() : ifDual);
+ oVftBase = ComMethodTable::GetNumExtraSlots(ifaceType) * (SHORT)sizeOfPtr;
+
+ // Find lowest slot number.
+ for (iMD=0; iMD < nSlots; ++iMD)
+ {
+ MethodDesc* pMD = m_pMT->GetMethodDescForSlot(iMD);
+ _ASSERTE(pMD != NULL);
+ ULONG tmp = pMD->GetComSlot();
+
+ if (tmp < ulComSlotMin)
+ ulComSlotMin = tmp;
+ if (tmp > ulComSlotMax)
+ ulComSlotMax = tmp;
+ }
+
+ // Used a couple of times.
+ MethodTable::MethodIterator it(m_pMT);
+
+ if (ulComSlotMax-ulComSlotMin >= nSlots)
+ {
+ bSlotRemap = true;
+
+ // Resize the array.
+ rSlotMap.ReSizeThrows(ulComSlotMax+1);
+
+ // Init to "slot not used" value of -1.
+ memset(rSlotMap.Ptr(), -1, rSlotMap.Size()*sizeof(int));
+
+ // See which vtable slots are used.
+ it.MoveToBegin();
+ for (; it.IsValid(); it.Next())
+ {
+ if (it.IsVirtual())
+ {
+ MethodDesc* pMD = it.GetMethodDesc();
+ _ASSERTE(pMD != NULL);
+ ULONG tmp = pMD->GetComSlot();
+ rSlotMap[tmp] = 0;
+ }
+ }
+
+ // Assign incrementing table indices to the slots.
+ ULONG ix=0;
+ for (iMD=0; iMD<=ulComSlotMax; ++iMD)
+ if (rSlotMap[iMD] != -1)
+ rSlotMap[iMD] = ix++;
+ }
+
+ // Iterate over the members in the interface and build the list of methods.
+ m_MethodProps.ReSizeThrows(nSlots);
+ it.MoveToBegin();
+ for (; it.IsValid(); it.Next())
+ {
+ if (it.IsVirtual())
+ {
+ pMeth = it.GetMethodDesc();
+ if (pMeth != NULL)
+ {
+ ULONG ixSlot = pMeth->GetComSlot();
+ if (bSlotRemap)
+ ixSlot = rSlotMap[ixSlot];
+ else
+ ixSlot -= ulComSlotMin;
+
+ m_MethodProps[ixSlot].pMeth = pMeth;
+ }
+ }
+ }
+
+ // Now have a list of methods in vtable order. Go through and build names, semantic.
+ for (iMD=0; iMD < nSlots; ++iMD)
+ {
+ pMeth = m_MethodProps[iMD].pMeth;
+ GetMethodPropsForMeth(pMeth, iMD, m_MethodProps, m_sNames);
+ }
+
+ // Assign vtable offsets.
+ for (iMD=0; iMD < nSlots; ++iMD)
+ {
+ SHORT oVft = oVftBase + static_cast<SHORT>((m_MethodProps[iMD].pMeth->GetComSlot() -ulComSlotMin) * sizeOfPtr);
+ m_MethodProps[iMD].oVft = oVft;
+ }
+
+ // Resolve duplicate dispids.
+ EliminateDuplicateDispIds(m_MethodProps, nSlots);
+
+ // Pick something for the "Value".
+ AssignDefaultMember(m_MethodProps, m_sNames, nSlots);
+
+ // Check to see if there is something to assign DISPID_NEWENUM to.
+ AssignNewEnumMember(m_MethodProps, m_sNames, nSlots);
+
+ // Take care of name collisions due to overloading, inheritance.
+ EliminateDuplicateNames(m_MethodProps, m_sNames, nSlots);
+
+ // Do some PROPERTYPUT/PROPERTYPUTREF translation.
+ FixupPropertyAccessors(m_MethodProps, m_sNames, nSlots);
+
+ // Fix up all properties so that they point to their shared name.
+ for (iMD=0; iMD < m_pMT->GetNumVirtuals(); ++iMD)
+ {
+ if (TypeFromToken(m_MethodProps[iMD].property) != mdtProperty)
+ {
+ m_MethodProps[iMD].pName = m_MethodProps[m_MethodProps[iMD].property].pName;
+ m_MethodProps[iMD].dispid = m_MethodProps[m_MethodProps[iMD].property].dispid;
+ }
+ }
+
+ // If the interface is IDispatch based, then assign the default dispids.
+ if (IsDispatchBasedItf(ifaceType))
+ AssignDefaultDispIds();
+} // void ComMTMemberInfoMap::SetupPropsForInterface()
+
+
+// ============================================================================
+// Given a MethodDesc*, get the name of the method or property that the
+// method is a getter/setter for, plus the semantic for getter/setter.
+// In the case of properties, look for a previous getter/setter for this
+// property, and if found, link them, so that only one name participates in
+// name decoration.
+// ============================================================================
+void ComMTMemberInfoMap::GetMethodPropsForMeth(
+ MethodDesc *pMeth, // MethodDesc * for method.
+ int ix, // Slot.
+ CQuickArray<ComMTMethodProps> &rProps, // Array of method property information.
+ CDescPool &sNames) // Pool of possibly decorated names.
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMeth));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK; // A result.
+ LPCUTF8 pszName; // Name in UTF8.
+ CQuickArray<WCHAR> rName; // Buffer for unicode conversion.
+ LPCWSTR pName; // Pointer to a name, after possible substitution.
+ mdProperty pd; // Property token.
+ LPCUTF8 pPropName; // Pointer to propterty name.
+ ULONG uSemantic; // Property semantic.
+ ULONG dispid; // A property dispid.
+
+ // Get any user-assigned dispid.
+ rProps[ix].dispid = pMeth->GetComDispid();
+
+ // Assume system-defined vtable offsets.
+ rProps[ix].oVft = 0;
+
+ // Generally don't munge function into a getter.
+ rProps[ix].bFunction2Getter = FALSE;
+
+ // See if there is property information for this member.
+ hr = pMeth->GetModule()->GetPropertyInfoForMethodDef(pMeth->GetMemberDef(), &pd, &pPropName, &uSemantic);
+ IfFailThrow(hr);
+
+ if (hr == S_OK)
+ {
+ // There is property information.
+ // See if there a method is already associated with this property.
+ rProps[ix].property = pd;
+ int i;
+ for (i=ix-1; i>=0; --i)
+ {
+ // Same property in same scope?
+ if (rProps[i].property == pd &&
+ rProps[i].pMeth->GetMDImport() == pMeth->GetMDImport())
+ {
+ rProps[ix].property = i;
+ break;
+ }
+ }
+
+ // If there wasn't another method for this property, save the name on
+ // this method, for duplicate elimination.
+ if (i < 0)
+ {
+ // Save the name. Have to convert from UTF8.
+ int iLen = WszMultiByteToWideChar(CP_UTF8, 0, pPropName, -1, 0, 0);
+ rProps[ix].pName = reinterpret_cast<WCHAR*>(sNames.Alloc(iLen*sizeof(WCHAR)));
+ if (rProps[ix].pName == NULL)
+ {
+ ThrowHR(E_OUTOFMEMORY);
+ }
+ WszMultiByteToWideChar(CP_UTF8, 0, pPropName, -1, rProps[ix].pName, iLen);
+
+ // Check whether the property has a dispid attribute.
+ hr = pMeth->GetMDImport()->GetDispIdOfMemberDef(pd, &dispid);
+ if (dispid != DISPID_UNKNOWN)
+ rProps[ix].dispid = dispid;
+
+ // If this is the default property, and the method or property doesn't have a dispid already,
+ // use DISPID_DEFAULT.
+ if (rProps[ix].dispid == DISPID_UNKNOWN)
+ {
+ if (strcmp(pPropName, m_DefaultProp.Ptr()) == 0)
+ {
+ rProps[ix].dispid = DISPID_VALUE;
+
+ // Don't want to try to set multiple as default property.
+ m_DefaultProp[0] = 0;
+ }
+ }
+ }
+
+ // Save the semantic.
+ rProps[ix].semantic = static_cast<USHORT>(uSemantic);
+
+ // Determine if the property is visible from COM.
+ rProps[ix].bMemberVisible = IsMethodVisibleFromCom(pMeth) ? 1 : 0;
+ }
+ else
+ {
+ // Not a property, just an ordinary method.
+ rProps[ix].property = mdPropertyNil;
+ rProps[ix].semantic = 0;
+
+ // Get the name.
+ pszName = pMeth->GetName();
+ if (pszName == NULL)
+ ThrowHR(E_FAIL);
+
+ if (stricmpUTF8(pszName, szInitName) == 0)
+ {
+ pName = szInitNameUse;
+ }
+ else
+ {
+ IfFailThrow(Utf2Quick(pszName, rName));
+ pName = rName.Ptr();
+
+ // If this is a "ToString" method, make it a property get.
+ if (SString::_wcsicmp(pName, szDefaultToString) == 0)
+ {
+ rProps[ix].semantic = msGetter;
+ rProps[ix].bFunction2Getter = TRUE;
+ }
+ }
+
+ ULONG len = ((int)wcslen(pName)) + 1;
+ rProps[ix].pName = reinterpret_cast<WCHAR*>(sNames.Alloc(len * sizeof(WCHAR)));
+ if (rProps[ix].pName == NULL)
+ {
+ ThrowHR(E_OUTOFMEMORY);
+ }
+ wcscpy_s(rProps[ix].pName, len, pName);
+
+ // Determine if the method is visible from COM.
+ rProps[ix].bMemberVisible = !pMeth->IsArray() && IsMethodVisibleFromCom(pMeth);
+ }
+} // void ComMTMemberInfoMap::GetMethodPropsForMeth()
+
+
+
+
+// ============================================================================
+// Process the function names for an interface, checking for duplicates. If
+// any duplicates are found, decorate the names with "_n".
+//
+// NOTE: Two implementations are provided, one using nested for-loops and a
+// second which implements a hashtable. The first is faster when
+// the number of elements is less than 20, otherwise the hashtable
+// is the way to go. The code-size of the first implementation is 574
+// bytes. The hashtable code is 1120 bytes.
+// ============================================================================
+void ComMTMemberInfoMap::EliminateDuplicateNames(
+ CQuickArray<ComMTMethodProps> &rProps, // Array of method property information.
+ CDescPool &sNames, // Pool of possibly decorated names.
+ UINT nSlots) // Count of entries
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ CQuickBytes qb;
+ UINT iCur;
+ CorIfaceAttr ifaceType; // VTBL, Dispinterface, IDispatch, or IInspectable
+ ULONG cBaseNames; // Count of names in base interface.
+ BOOL bDup; // Is the name a duplicate?
+ HRESULT hr = S_OK;
+ const size_t cchrcName = MAX_CLASSNAME_LENGTH;
+ LPWSTR rcName = (LPWSTR)qb.AllocThrows(cchrcName * sizeof(WCHAR));
+
+ // Tables of names of methods on IUnknown, IDispatch, and IInspectable.
+ static const LPCWSTR rBaseNames_Dispatch[] =
+ {
+ W("QueryInterface"),
+ W("AddRef"),
+ W("Release"),
+ W("GetTypeInfoCount"),
+ W("GetTypeInfo"),
+ W("GetIDsOfNames"),
+ W("Invoke")
+ };
+
+ static const LPCWSTR rBaseNames_Inspectable[] =
+ {
+ W("QueryInterface"),
+ W("AddRef"),
+ W("Release"),
+ W("GetIIDs"),
+ W("GetRuntimeClassName"),
+ W("GetTrustLevel")
+ };
+
+ // Determine which names are in the base interface.
+ ifaceType = (m_pMT->IsInterface() ? m_pMT->GetComInterfaceType() : ifDual);
+ const LPCWSTR * rBaseNames = (ifaceType == ifInspectable ? rBaseNames_Inspectable : rBaseNames_Dispatch);
+
+ // Is it pure dispinterface?
+ if (ifaceType == ifDispatch)
+ {
+ cBaseNames = 0;
+ }
+ else
+ {
+ // Or is it IUnknown, IDispatch, IInspectable derived
+ cBaseNames = ComMethodTable::GetNumExtraSlots(ifaceType);
+ }
+
+ // we're wasting time if there aren't at least two items!
+ if (nSlots < 2 && cBaseNames == 0)
+ return;
+
+ else if (nSlots < 20)
+ {
+ // Eliminate duplicates.
+ for (iCur=0; iCur<nSlots; ++iCur)
+ {
+ UINT iTst, iSuffix, iTry;
+
+ // If a property with an associated (lower indexed) property, don't need to examine it.
+ if (TypeFromToken(rProps[iCur].property) != mdtProperty)
+ continue;
+
+ // If the member is not visible to COM then we don't need to examine it.
+ if (!rProps[iCur].bMemberVisible)
+ continue;
+
+ // Check for duplicate with already accepted member names.
+ bDup = FALSE;
+ for (iTst=0; !bDup && iTst<iCur; ++iTst)
+ {
+ // If a property with an associated (lower indexed) property, don't need to examine it.
+ if (TypeFromToken(rProps[iTst].property) != mdtProperty)
+ continue;
+
+ // If the member is not visible to COM then we don't need to examine it.
+ if (!rProps[iTst].bMemberVisible)
+ continue;
+
+ if (SString::_wcsicmp(rProps[iCur].pName, rProps[iTst].pName) == 0)
+ bDup = TRUE;
+ }
+
+ // If OK with other members, check with base interface names.
+ for (iTst=0; !bDup && iTst<cBaseNames; ++iTst)
+ {
+ if (SString::_wcsicmp(rProps[iCur].pName, rBaseNames[iTst]) == 0)
+ bDup = TRUE;
+ }
+
+ // If the name is a duplicate, decorate it.
+ if (bDup)
+ {
+ // Duplicate.
+ DWORD cchName = (DWORD) wcslen(rProps[iCur].pName);
+ if (cchName > MAX_CLASSNAME_LENGTH-cchDuplicateDecoration)
+ cchName = MAX_CLASSNAME_LENGTH-cchDuplicateDecoration;
+
+ wcsncpy_s(rcName, cchrcName, rProps[iCur].pName, cchName);
+ LPWSTR pSuffix = rcName + cchName;
+
+ for (iSuffix=2; ; ++iSuffix)
+ {
+ // Form a new name.
+ _snwprintf_s(pSuffix, cchDuplicateDecoration, _TRUNCATE, szDuplicateDecoration, iSuffix);
+
+ // Compare against ALL names.
+ for (iTry=0; iTry<nSlots; ++iTry)
+ {
+ // If a property with an associated (lower indexed) property,
+ // or iTry is the same as iCur, don't need to examine it.
+ if (TypeFromToken(rProps[iTry].property) != mdtProperty || iTry == iCur)
+ continue;
+ if (SString::_wcsicmp(rProps[iTry].pName, rcName) == 0)
+ break;
+ }
+
+ // Did we make it all the way through? If so, we have a winner.
+ if (iTry == nSlots)
+ break;
+ }
+
+ // Remember the new name.
+ ULONG len = ((int)wcslen(rcName)) + 1;
+ rProps[iCur].pName = reinterpret_cast<WCHAR*>(sNames.Alloc(len * sizeof(WCHAR)));
+ if (rProps[iCur].pName == NULL)
+ {
+ ThrowHR(E_OUTOFMEMORY);
+ }
+ wcscpy_s(rProps[iCur].pName, len, rcName);
+
+ // Don't need to look at this iCur any more, since we know it is completely unique.
+ }
+ }
+ }
+ else
+ {
+
+ CWStrHash htNames;
+ WSTRHASH *pItem;
+ CUnorderedArray<ULONG, 10> uaDuplicates; // array to keep track of non-unique names
+
+ // Add the base interface names. Already know there are no duplicates there.
+ for (iCur=0; iCur<cBaseNames; ++iCur)
+ {
+ pItem = htNames.Add(rBaseNames[iCur]);
+ IfNullThrow(pItem);
+ pItem->szName = rBaseNames[iCur];
+ }
+
+ for (iCur=0; iCur<nSlots; iCur++)
+ {
+ // If a property with an associated (lower indexed) property, don't need to examine it.
+ if (TypeFromToken(rProps[iCur].property) != mdtProperty)
+ continue;
+
+ // If the member is not visible to COM then we don't need to examine it.
+ if (!rProps[iCur].bMemberVisible)
+ continue;
+
+ // see if name is already in table
+ if (htNames.Find(rProps[iCur].pName) == NULL)
+ {
+ // name not found, so add it.
+ pItem = htNames.Add(rProps[iCur].pName);
+ IfNullThrow(pItem);
+ pItem->szName = rProps[iCur].pName;
+ }
+ else
+ {
+ // name is a duplicate, so keep track of this index for later decoration
+ ULONG *piAppend = uaDuplicates.Append();
+ IfNullThrow(piAppend);
+ *piAppend = iCur;
+ }
+ }
+
+ ULONG i;
+ ULONG iSize = uaDuplicates.Count();
+ ULONG *piTable = uaDuplicates.Table();
+
+ for (i = 0; i < iSize; i++)
+ {
+ // get index to decorate
+ iCur = piTable[i];
+
+ // Copy name into local buffer
+ DWORD cchName = (DWORD) wcslen(rProps[iCur].pName);
+ if (cchName > MAX_CLASSNAME_LENGTH-cchDuplicateDecoration)
+ cchName = MAX_CLASSNAME_LENGTH-cchDuplicateDecoration;
+
+ wcsncpy_s(rcName, cchrcName, rProps[iCur].pName, cchName);
+
+ LPWSTR pSuffix = rcName + cchName;
+ UINT iSuffix = 2;
+
+ // We know this is a duplicate, so immediately decorate name.
+ do
+ {
+ _snwprintf_s(pSuffix, cchDuplicateDecoration, _TRUNCATE, szDuplicateDecoration, iSuffix);
+ iSuffix++;
+ // keep going while we find this name in the hashtable
+ } while (htNames.Find(rcName) != NULL);
+
+ // Now rcName has an acceptable (unique) name. Remember the new name.
+ ULONG len = ((int)wcslen(rcName)) + 1;
+ rProps[iCur].pName = reinterpret_cast<WCHAR*>(sNames.Alloc(len * sizeof(WCHAR)));
+ if (rProps[iCur].pName == NULL)
+ {
+ ThrowHR(E_OUTOFMEMORY);
+ }
+ wcscpy_s(rProps[iCur].pName, len, rcName);
+
+ // Stick it in the table.
+ pItem = htNames.Add(rProps[iCur].pName);
+ IfNullThrow(pItem);
+ pItem->szName = rProps[iCur].pName;
+ }
+ }
+} // void ComMTMemberInfoMap::EliminateDuplicateNames()
+
+
+// ============================================================================
+// Process the dispids for an interface, checking for duplicates. If
+// any duplicates are found, change them to DISPID_UNKNOWN.
+// ============================================================================
+void ComMTMemberInfoMap::EliminateDuplicateDispIds(
+ CQuickArray<ComMTMethodProps> &rProps, // Array of method property information.
+ UINT nSlots) // Count of entries
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr=S_OK; // A result.
+ UINT ix; // Loop control.
+ UINT cDispids = 0; // Dispids actually assigned.
+ CQuickArray<ULONG> rDispid; // Array of dispids.
+
+ // Count the Dispids.
+ for (ix=0; ix<nSlots; ++ix)
+ {
+ if (TypeFromToken(rProps[ix].property) == mdtProperty && rProps[ix].dispid != DISPID_UNKNOWN && rProps[ix].bMemberVisible)
+ ++cDispids;
+ }
+
+ // If not at least two, can't be a duplicate.
+ if (cDispids < 2)
+ return;
+
+ // Make space for the dispids.
+ rDispid.ReSizeThrows(cDispids);
+
+ // Collect the Dispids.
+ cDispids = 0;
+ for (ix=0; ix<nSlots; ++ix)
+ {
+ if (TypeFromToken(rProps[ix].property) == mdtProperty && rProps[ix].dispid != DISPID_UNKNOWN && rProps[ix].bMemberVisible)
+ rDispid[cDispids++] = rProps[ix].dispid;
+ }
+
+ // Sort the dispids. Scope avoids "initialization bypassed by goto" error.
+ {
+ CQuickSort<ULONG> sorter(rDispid.Ptr(), cDispids);
+ sorter.Sort();
+ }
+
+ // Look through the sorted dispids, looking for duplicates.
+ for (ix=0; ix<cDispids-1; ++ix)
+ {
+ // If a duplicate is found...
+ if (rDispid[ix] == rDispid[ix+1])
+ {
+ m_bHadDuplicateDispIds = TRUE;
+
+ // iterate over all slots...
+ for (UINT iy=0; iy<nSlots; ++iy)
+ {
+ // and replace every instance of the duplicate dispid with DISPID_UNKNOWN.
+ if (rProps[iy].dispid == rDispid[ix])
+ {
+ // Mark the dispid so the system will assign one.
+ rProps[iy].dispid = DISPID_UNKNOWN;
+ }
+ }
+ }
+
+ // Skip through the duplicate range.
+ while (ix <cDispids-1 && rDispid[ix] == rDispid[ix+1])
+ ++ix;
+ }
+} // HRESULT ComMTMemberInfoMap::EliminateDuplicateDispIds()
+
+
+// ============================================================================
+// Assign a default member based on "Value" or "ToString", unless there is
+// a dispid of 0.
+// ============================================================================
+void ComMTMemberInfoMap::AssignDefaultMember(
+ CQuickArray<ComMTMethodProps> &rProps, // Array of method property information.
+ CDescPool &sNames, // Pool of possibly decorated names.
+ UINT nSlots) // Count of entries
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ int ix; // Loop control.
+ int defDispid=-1; // Default via dispid.
+ int defValueProp=-1; // Default via szDefaultValue on a method.
+ int defValueMeth=-1; // Default via szDefaultValue on a property.
+ int defToString=-1; // Default via szDefaultToString.
+ int *pDef=0; // Pointer to one of the def* variables.
+ LPWSTR pName=NULL; // Pointer to a name.
+ ULONG cbSig=0; // Size of Cor signature.
+ ULONG ixSig=0; // Index into COM+ signature.
+ ULONG callconv=0; // A member's calling convention.
+ ULONG cParams=0; // A member's parameter count.
+ ULONG retval=0; // A default member's return type.
+ PCCOR_SIGNATURE pbSig; // Pointer to Cor signature.
+
+ for (ix=0; ix<(int)nSlots; ++ix)
+ {
+ // If this is the explicit default, done.
+ if (rProps[ix].dispid == DISPID_VALUE)
+ {
+ defDispid = ix;
+ break;
+ }
+
+ // If this has an assigned dispid, honor it.
+ if (rProps[ix].dispid != DISPID_UNKNOWN)
+ continue;
+
+ // Skip linked properties and non-properties.
+ if (TypeFromToken(rProps[ix].property) != mdtProperty)
+ continue;
+
+ pName = rProps[ix].pName;
+ if (SString::_wcsicmp(pName, szDefaultValue) == 0)
+ {
+ if (rProps[ix].semantic != 0)
+ pDef = &defValueProp;
+ else
+ pDef = &defValueMeth;
+ }
+ else if (SString::_wcsicmp(pName, szDefaultToString) == 0)
+ {
+ pDef = &defToString;
+ }
+
+ // If a potential match was found, see if it is "simple" enough. A field is OK;
+ // a property get function is OK if it takes 0 params; a put is OK with 1.
+ if (pDef)
+ {
+ // Fields are by definition simple enough, so only check if some sort of func.
+ if (rProps[ix].semantic < FieldSemanticOffset)
+ {
+ // Get the signature, skip the calling convention, get the param count.
+ rProps[ix].pMeth->GetSig(&pbSig, &cbSig);
+ ixSig = CorSigUncompressData(pbSig, &callconv);
+ _ASSERTE(callconv != IMAGE_CEE_CS_CALLCONV_FIELD);
+ ixSig += CorSigUncompressData(&pbSig[ixSig], &cParams);
+
+ // If too many params, don't consider this one any more.
+ if (cParams > 1 || (cParams == 1 && rProps[ix].semantic != msSetter))
+ pDef = 0;
+ }
+ // If we made it through the above checks, save the index of this member.
+ if (pDef)
+ *pDef = ix, pDef = 0;
+ }
+ }
+
+ // If there wasn't a DISPID_VALUE already assigned...
+ if (defDispid == -1)
+ {
+ // Was there a "Value" or "ToSTring"
+ if (defValueMeth > -1)
+ defDispid = defValueMeth;
+ else if (defValueProp > -1)
+ defDispid = defValueProp;
+ else if (defToString > -1)
+ defDispid = defToString;
+
+ // Make it the "Value"
+ if (defDispid >= 0)
+ rProps[defDispid].dispid = DISPID_VALUE;
+ }
+ else
+ {
+ // This was a pre-assigned DISPID_VALUE. If it is a function, try to
+ // turn into a propertyget.
+ if (rProps[defDispid].semantic == 0)
+ {
+ // See if the function returns anything.
+ rProps[defDispid].pMeth->GetSig(&pbSig, &cbSig);
+ PREFIX_ASSUME(pbSig != NULL);
+
+ ixSig = CorSigUncompressData(pbSig, &callconv);
+ _ASSERTE(callconv != IMAGE_CEE_CS_CALLCONV_FIELD);
+ ixSig += CorSigUncompressData(&pbSig[ixSig], &cParams);
+ ixSig += CorSigUncompressData(&pbSig[ixSig], &retval);
+ if (retval != ELEMENT_TYPE_VOID)
+ {
+ rProps[defDispid].semantic = msGetter;
+ rProps[defDispid].bFunction2Getter = TRUE;
+ }
+ }
+ }
+} // void ComMTMemberInfoMap::AssignDefaultMember()
+
+
+// ============================================================================
+// Assign a DISPID_NEWENUM member based on "GetEnumerator", unless there is
+// already a member with DISPID_NEWENUM.
+// ============================================================================
+void ComMTMemberInfoMap::AssignNewEnumMember(
+ CQuickArray<ComMTMethodProps> &rProps, // Array of method property information.
+ CDescPool &sNames, // Pool of possibly decorated names.
+ UINT nSlots) // Count of entries
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK; // HRESULT.
+ int ix; // Loop control.
+ int enumDispid=-1; // Default via dispid.
+ int badEnumDispid=-1; // Misdefined default via dispid.
+ int enumGetEnumMeth=-1; // Default via szGetEnumerator on a method.
+ int *pNewEnum=0; // Pointer to one of the def* variables.
+ ULONG elem; // The element type.
+ mdToken tkTypeRef; // Token for a TypeRef/TypeDef
+ LPWSTR pName; // Pointer to a name.
+ ULONG cbSig; // Size of Cor signature.
+ ULONG ixSig; // Index into COM+ signature.
+ ULONG callconv; // A member's calling convention.
+ ULONG cParams; // A member's parameter count.
+ MethodDesc *pMeth; // A method desc.
+ LPCUTF8 pclsname; // Class name for ELEMENT_TYPE_CLASS.
+
+ CQuickArray<CHAR> rName; // Library name.
+ PCCOR_SIGNATURE pbSig; // Pointer to Cor signature.
+
+ for (ix=0; ix<(int)nSlots; ++ix)
+ {
+ // If we previously found a poorly defined newenum member, we need to clear it.
+ if (badEnumDispid != -1)
+ {
+ rProps[badEnumDispid].dispid = DISPID_UNKNOWN;
+ badEnumDispid = -1;
+ }
+
+ // In case we have a poorly defined newenum member, we need to remember it.
+ if (rProps[ix].dispid == DISPID_NEWENUM)
+ badEnumDispid = ix;
+
+ // Only consider method.
+ if (rProps[ix].semantic != 0)
+ continue;
+
+ // Skip any members that have explicitly assigned DISPID's unless it's the newenum dispid.
+ if ((rProps[ix].dispid != DISPID_UNKNOWN) && (rProps[ix].dispid != DISPID_NEWENUM))
+ continue;
+
+ // Check to see if the member is GetEnumerator.
+ pName = rProps[ix].pName;
+ if (SString::_wcsicmp(pName, szGetEnumerator) != 0)
+ continue;
+
+ pMeth = rProps[ix].pMeth;
+
+ // Get the signature, skip the calling convention, get the param count.
+ pMeth->GetSig(&pbSig, &cbSig);
+ PREFIX_ASSUME(pbSig != NULL);
+
+ ixSig = CorSigUncompressData(pbSig, &callconv);
+ _ASSERTE(callconv != IMAGE_CEE_CS_CALLCONV_FIELD);
+ ixSig += CorSigUncompressData(&pbSig[ixSig], &cParams);
+
+ // If too many params, don't consider this one any more. Also disregard
+ // this method if it doesn't have a return type.
+ if (cParams != 0 || ixSig >= cbSig)
+ continue;
+
+ ixSig += CorSigUncompressData(&pbSig[ixSig], &elem);
+ if (elem != ELEMENT_TYPE_CLASS)
+ continue;
+
+ // Get the TD/TR.
+ ixSig = CorSigUncompressToken(&pbSig[ixSig], &tkTypeRef);
+
+ LPCUTF8 pNS;
+ if (TypeFromToken(tkTypeRef) == mdtTypeDef)
+ {
+ // Get the name of the TypeDef.
+ if (FAILED(pMeth->GetMDImport()->GetNameOfTypeDef(tkTypeRef, &pclsname, &pNS)))
+ {
+ continue;
+ }
+ }
+ else
+ {
+ // Get the name of the TypeRef.
+ _ASSERTE(TypeFromToken(tkTypeRef) == mdtTypeRef);
+ if (FAILED(pMeth->GetMDImport()->GetNameOfTypeRef(tkTypeRef, &pNS, &pclsname)))
+ {
+ continue;
+ }
+ }
+
+ if (pNS)
+ {
+ // Pre-pend the namespace to the class name.
+ rName.ReSizeThrows((int)(strlen(pclsname)+strlen(pNS)+2));
+ strcpy_s(rName.Ptr(), rName.Size(), pNS);
+ strcat_s(rName.Ptr(), rName.Size(), NAMESPACE_SEPARATOR_STR);
+ strcat_s(rName.Ptr(), rName.Size(), pclsname);
+ pclsname = rName.Ptr();
+ }
+
+ // Make sure the returned type is an IEnumerator.
+ if (stricmpUTF8(pclsname, g_CollectionsEnumeratorClassName) != 0)
+ continue;
+
+ // If assigned the newenum dispid, that's it.
+ if (rProps[ix].dispid == DISPID_NEWENUM)
+ {
+ enumDispid = ix;
+ break;
+ }
+
+ // The method is a valid GetEnumerator method.
+ enumGetEnumMeth = ix;
+ }
+
+ // If there wasn't a DISPID_NEWENUM already assigned...
+ if (enumDispid == -1)
+ {
+ // If there was a GetEnumerator then give it DISPID_NEWENUM.
+ if (enumGetEnumMeth > -1)
+ rProps[enumGetEnumMeth].dispid = DISPID_NEWENUM;
+ }
+} // void ComMTMemberInfoMap::AssignNewEnumMember()
+
+
+// ============================================================================
+// For each property set and let functions, determine PROPERTYPUT and
+// PROPERTYPUTREF.
+// ============================================================================
+void ComMTMemberInfoMap::FixupPropertyAccessors(
+ CQuickArray<ComMTMethodProps> &rProps, // Array of method property information.
+ CDescPool &sNames, // Pool of possibly decorated names.
+ UINT nSlots) // Count of entries
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ UINT ix; // Loop control.
+ UINT j; // Inner loop.
+ int iSet; // Index of Set method.
+ int iOther; // Index of Other method.
+
+ for (ix=0; ix<nSlots; ++ix)
+ {
+ // Skip linked properties and non-properties.
+ if (TypeFromToken(rProps[ix].property) != mdtProperty)
+ continue;
+
+ // What is this one?
+ switch (rProps[ix].semantic)
+ {
+ case msSetter:
+ iSet = ix;
+ iOther = -1;
+ break;
+ case msOther:
+ iOther = ix;
+ iSet = -1;
+ break;
+ default:
+ iSet = iOther = -1;
+ }
+
+ // Look for the others.
+ for (j=ix+1; j<nSlots && (iOther == -1 || iSet == -1); ++j)
+ {
+ if ((UINT)rProps[j].property == ix)
+ {
+ // Found one -- what is it?
+ switch (rProps[j].semantic)
+ {
+ case msSetter:
+ _ASSERTE(iSet == -1);
+ iSet = j;
+ break;
+ case msOther:
+ _ASSERTE(iOther == -1);
+ iOther = j;
+ break;
+ }
+ }
+ }
+
+ // If both, or neither, or "VB Specific Let" (msOther) only, keep as-is.
+ if (((iSet == -1) == (iOther == -1)) || (iSet == -1))
+ continue;
+
+ _ASSERTE(iSet != -1 && iOther == -1);
+
+ // Get the signature.
+ MethodDesc *pMeth = rProps[iSet].pMeth;
+ MetaSigExport msig(pMeth);
+
+ UINT numArgs = msig.NumFixedArgs();
+ for (DWORD i = 0; i < numArgs; i++)
+ msig.NextArg();
+
+ if (msig.IsVbRefType())
+ rProps[iSet].semantic = msSetter;
+ else
+ rProps[iSet].semantic = msOther;
+
+ }
+} // void ComMTMemberInfoMap::FixupPropertyAccessors()
+
+
+void ComMTMemberInfoMap::AssignDefaultDispIds()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Assign the DISPID's using the same algorithm OLEAUT uses.
+ DWORD nSlots = (DWORD)m_MethodProps.Size();
+ for (DWORD i = 0; i < nSlots; i++)
+ {
+ // Retrieve the properties for the current member.
+ ComMTMethodProps *pProps = &m_MethodProps[i];
+
+ if (pProps->dispid == DISPID_UNKNOWN)
+ {
+ if (pProps->semantic > FieldSemanticOffset)
+ {
+ // We are dealing with a field.
+ pProps->dispid = BASE_OLEAUT_DISPID + i;
+ m_MethodProps[i + 1].dispid = BASE_OLEAUT_DISPID + i;
+
+ // Skip the next method since field methods always come in pairs.
+ _ASSERTE(i + 1 < nSlots && m_MethodProps[i + 1].property == i);
+ i++;
+ }
+ else if (pProps->property == mdPropertyNil)
+ {
+ // Make sure that this is either a real method or a method transformed into a getter.
+ _ASSERTE(pProps->semantic == 0 || pProps->semantic == msGetter);
+
+ // We are dealing with a method.
+ pProps->dispid = BASE_OLEAUT_DISPID + i;
+
+ }
+ else
+ {
+ // We are dealing with a property.
+ if (TypeFromToken(pProps->property) == mdtProperty)
+ {
+ pProps->dispid = BASE_OLEAUT_DISPID + i;
+ }
+ else
+ {
+ pProps->dispid = m_MethodProps[pProps->property].dispid;
+ }
+ }
+ }
+ }
+} // void ComMTMemberInfoMap::AssignDefaultDispIds()
+
+
+void ComMTMemberInfoMap::PopulateMemberHashtable()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ DWORD nSlots = (DWORD)m_MethodProps.Size();
+
+ // Go through the members and add them to the hashtable.
+ for (DWORD i = 0; i < nSlots; i++)
+ {
+ // Retrieve the properties for the current member.
+ ComMTMethodProps *pProps = &m_MethodProps[i];
+
+ if (pProps->semantic > FieldSemanticOffset)
+ {
+ // We are dealing with a field.
+ ComCallMethodDesc *pFieldMeth = reinterpret_cast<ComCallMethodDesc*>(pProps->pMeth);
+ FieldDesc *pFD = pFieldMeth->GetFieldDesc();
+
+ // Insert the member into the hashtable.
+ EEModuleTokenPair Key(pFD->GetMemberDef(), pFD->GetModule());
+ m_TokenToComMTMethodPropsMap.InsertValue(&Key, (HashDatum)pProps);
+
+ // Skip the next method since field methods always come in pairs.
+ _ASSERTE(i + 1 < nSlots && m_MethodProps[i + 1].property == i);
+ i++;
+ }
+ else if (pProps->property == mdPropertyNil)
+ {
+ // Make sure that this is either a real method or a method transformed into a getter.
+ _ASSERTE(pProps->semantic == 0 || pProps->semantic == msGetter);
+
+ // We are dealing with a method.
+ MethodDesc *pMD = pProps->pMeth;
+ EEModuleTokenPair Key(pMD->GetMemberDef(), pMD->GetModule());
+ m_TokenToComMTMethodPropsMap.InsertValue(&Key, (HashDatum)pProps);
+ }
+ else
+ {
+ // We are dealing with a property.
+ if (TypeFromToken(pProps->property) == mdtProperty)
+ {
+ // This is the first method of the property.
+ MethodDesc *pMD = pProps->pMeth;
+ EEModuleTokenPair Key(pProps->property, pMD->GetModule());
+ m_TokenToComMTMethodPropsMap.InsertValue(&Key, (HashDatum)pProps);
+ }
+ }
+ }
+} // void ComMTMemberInfoMap::PopulateMemberHashtable()
diff --git a/src/vm/commtmemberinfomap.h b/src/vm/commtmemberinfomap.h
new file mode 100644
index 0000000000..f98403e63e
--- /dev/null
+++ b/src/vm/commtmemberinfomap.h
@@ -0,0 +1,221 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+/*============================================================
+**
+** Header: Map associated with a ComMethodTable that contains
+** information on its members.
+===========================================================*/
+
+#ifndef _COMMTMEMBERINFOMAP_H
+#define _COMMTMEMBERINFOMAP_H
+
+#ifndef FEATURE_COMINTEROP
+#error FEATURE_COMINTEROP is required for this file
+#endif // FEATURE_COMINTEROP
+
+#include "vars.hpp"
+
+
+// Forward declarations.
+struct ComMethodTable;
+class CDescPool;
+class MethodDesc;
+
+
+// Constants.
+static const unsigned int FieldSemanticOffset = 100;
+static LPCSTR szInitName = COR_CTOR_METHOD_NAME; // not unicode
+static LPCWSTR szInitNameUse = W("Init");
+static LPCWSTR szDefaultToString = W("ToString");
+static LPCWSTR szDuplicateDecoration = W("_%d");
+static const int cchDuplicateDecoration = 10; // max is _16777215 (0xffffff)
+static const int cbDuplicateDecoration = 20; // max is _16777215 (0xffffff)
+
+
+// Properties of a method in a ComMethodTable.
+struct ComMTMethodProps
+{
+ MethodDesc* pMeth; // MethodDesc for the method.
+ LPWSTR pName; // The method name. May be a property name.
+ mdToken property; // Property associated with a name. May be the token,
+ // the index of an associated member, or -1;
+ ULONG dispid; // The dispid to use for the method. Get from metadata
+ // or determine from "Value" or "ToString".
+ USHORT semantic; // Semantic of the property, if any.
+ SHORT oVft; // vtable offset, if not auto-assigned.
+ SHORT bMemberVisible; // A flag indicating that the member is visible from COM
+ SHORT bFunction2Getter; // If true, function was munged to getter
+};
+
+
+
+//*****************************************************************************
+// Class to perform memory management for building FuncDesc's etc. for
+// TypeLib creation. Memory is not moved as the heap is expanded, and
+// all of the allocations are cleaned up in the destructor.
+//*****************************************************************************
+class CDescPool : public StgPool
+{
+public:
+ CDescPool() : StgPool()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ IfFailThrow(InitNew());
+ }
+
+ // Allocate some bytes from the pool.
+ BYTE* Alloc(ULONG nBytes)
+ {
+ CONTRACT (BYTE*)
+ {
+ DISABLED(THROWS); // Fix when StgPool throws
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ BYTE *pRslt;
+ if (!Grow(nBytes))
+ RETURN NULL;
+ pRslt = GetNextLocation();
+ SegAllocate(nBytes);
+ RETURN pRslt;
+ }
+
+ // Allocate and clear some bytes.
+ BYTE* AllocZero(ULONG nBytes)
+ {
+ CONTRACT (BYTE*)
+ {
+ DISABLED(THROWS); // Fix when StgPool throws
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ BYTE *pRslt = Alloc(nBytes);
+ if (pRslt)
+ memset(pRslt, 0, nBytes);
+ RETURN pRslt;
+ }
+}; // class CDescPool : public StgPool
+
+
+
+// Token and module pair.
+class EEModuleTokenPair
+{
+public:
+ mdToken m_tk;
+ Module * m_pModule;
+
+ EEModuleTokenPair() : m_tk(0), m_pModule(NULL)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ EEModuleTokenPair(mdToken tk, Module *pModule) : m_tk(tk), m_pModule(pModule)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+};
+
+
+
+// Token and module pair hashtable helper.
+class EEModuleTokenHashTableHelper
+{
+public:
+ static EEHashEntry_t* AllocateEntry(EEModuleTokenPair* pKey, BOOL bDeepCopy, AllocationHeap Heap);
+ static void DeleteEntry(EEHashEntry_t* pEntry, AllocationHeap Heap);
+ static BOOL CompareKeys(EEHashEntry_t* pEntry, EEModuleTokenPair *pKey);
+ static DWORD Hash(EEModuleTokenPair* pKey);
+ static EEModuleTokenPair* GetKey(EEHashEntry_t* pEntry);
+};
+
+
+
+// Token and module pair hashtable.
+typedef EEHashTable<EEModuleTokenPair *, EEModuleTokenHashTableHelper, FALSE> EEModuleTokenHashTable;
+
+
+
+// Map associated with a ComMethodTable that contains information on its members.
+class ComMTMemberInfoMap
+{
+public:
+ ComMTMemberInfoMap(MethodTable *pMT) : m_pMT(pMT)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_DefaultProp.ReSizeThrows(1);
+ m_DefaultProp[0] = 0;
+ }
+
+ // Initialize the map.
+ void Init(size_t sizeOfPtr);
+
+ // Retrieve the member information for a given token.
+ ComMTMethodProps *GetMethodProps(mdToken tk, Module *pModule);
+
+ // Retrieves all the method properties.
+ CQuickArray<ComMTMethodProps> &GetMethods()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_MethodProps;
+ }
+
+ BOOL HadDuplicateDispIds()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_bHadDuplicateDispIds;
+ }
+
+private:
+ // Helper functions.
+ void SetupPropsForIClassX(size_t sizeOfPtr);
+ void SetupPropsForInterface(size_t sizeOfPtr);
+ void GetMethodPropsForMeth(MethodDesc *pMeth, int ix, CQuickArray<ComMTMethodProps> &rProps, CDescPool &sNames);
+ void EliminateDuplicateDispIds(CQuickArray<ComMTMethodProps> &rProps, UINT nSlots);
+ void EliminateDuplicateNames(CQuickArray<ComMTMethodProps> &rProps, CDescPool &sNames, UINT nSlots);
+ void AssignDefaultMember(CQuickArray<ComMTMethodProps> &rProps, CDescPool &sNames, UINT nSlots);
+ void AssignNewEnumMember(CQuickArray<ComMTMethodProps> &rProps, CDescPool &sNames, UINT nSlots);
+ void FixupPropertyAccessors(CQuickArray<ComMTMethodProps> &rProps, CDescPool &sNames, UINT nSlots);
+ void AssignDefaultDispIds();
+ void PopulateMemberHashtable();
+
+ EEModuleTokenHashTable m_TokenToComMTMethodPropsMap;
+ CQuickArray<ComMTMethodProps> m_MethodProps;
+ MethodTable * m_pMT;
+ CQuickArray<CHAR> m_DefaultProp;
+ CDescPool m_sNames;
+ BOOL m_bHadDuplicateDispIds;
+};
+
+#endif // _COMMTMEMBERINFOMAP_H
+
+
+
+
+
+
+
diff --git a/src/vm/compactlayoutwriter.cpp b/src/vm/compactlayoutwriter.cpp
new file mode 100644
index 0000000000..686393aad3
--- /dev/null
+++ b/src/vm/compactlayoutwriter.cpp
@@ -0,0 +1,4116 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//
+
+//
+
+#include "common.h"
+
+#include "compactlayoutwriter.h"
+#include "zapper.h"
+#include "..\zap\zapwriter.h"
+#include "..\zap\zapimage.h"
+#include "..\zap\wellknowntypes.h"
+#include "sigbuilder.h"
+#include "winrthelpers.h"
+#include "caparser.h"
+
+#define TRITON_STRESS_IMPL
+#include "tritonstress.h"
+enum LoadFailureEnum
+{
+ ThrowOnLoadFailure,
+ ReturnNullOnLoadFailure,
+};
+
+
+class ByteStreamWriter
+{
+private:
+ BYTE *m_bufStart;
+ BYTE *m_bufPtr;
+ BYTE *m_bufEnd;
+ size_t m_bufSize;
+
+ void Grow()
+ {
+ STANDARD_VM_CONTRACT;
+
+ size_t newBufSize = m_bufSize*2;
+ BYTE *newBuffer = new BYTE[newBufSize];
+ for (size_t i = 0; i < m_bufSize; i++)
+ newBuffer[i] = m_bufStart[i];
+ delete[] m_bufStart;
+ m_bufPtr = newBuffer + (m_bufPtr - m_bufStart);
+ m_bufStart = newBuffer;
+ m_bufEnd = newBuffer + newBufSize;
+ m_bufSize = newBufSize;
+ }
+
+public:
+ ByteStreamWriter()
+ {
+ STANDARD_VM_CONTRACT;
+
+ m_bufSize = 100;
+ m_bufStart = new BYTE[m_bufSize];
+ m_bufPtr = m_bufStart;
+ m_bufEnd = m_bufStart + m_bufSize;
+ }
+
+ void Reset()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_bufPtr = m_bufStart;
+ }
+
+ BYTE *GetBuffer(size_t &size)
+ {
+ LIMITED_METHOD_CONTRACT;
+ size = m_bufPtr - m_bufStart;
+ return m_bufStart;
+ }
+
+ void WriteByte(BYTE b)
+ {
+ STANDARD_VM_CONTRACT;
+
+ *m_bufPtr++ = b;
+ if (m_bufPtr >= m_bufEnd)
+ Grow();
+ }
+
+ void WriteWord(WORD w)
+ {
+ STANDARD_VM_CONTRACT;
+
+ WriteByte((BYTE)w);
+ WriteByte((BYTE)(w >> 8));
+ }
+
+ void WriteDWord(DWORD d)
+ {
+ STANDARD_VM_CONTRACT;
+
+ WriteWord((WORD)d);
+ WriteWord((WORD)(d >> 16));
+ }
+
+ void WriteUnsigned(DWORD d)
+ {
+ STANDARD_VM_CONTRACT;
+
+ if (d < 128)
+ {
+ WriteByte((BYTE)(d*2 + 0));
+ }
+ else if (d < 128*128)
+ {
+ WriteByte((BYTE)(d*4 + 1));
+ WriteByte((BYTE)(d >> 6));
+ }
+ else if (d < 128*128*128)
+ {
+ WriteByte((BYTE)(d*8 + 3));
+ WriteByte((BYTE)(d >> 5));
+ WriteByte((BYTE)(d >> 13));
+ }
+ else if (d < 128*128*128*128)
+ {
+ WriteByte((BYTE)(d*16 + 7));
+ WriteByte((BYTE)(d >> 4));
+ WriteByte((BYTE)(d >> 12));
+ WriteByte((BYTE)(d >> 20));
+ }
+ else
+ {
+ WriteByte(15);
+ WriteDWord(d);
+ }
+ }
+
+ void WriteSigned(INT32 i)
+ {
+ STANDARD_VM_CONTRACT;
+
+ DWORD d = (DWORD)i;
+ if (d + 64 < 128)
+ {
+ WriteByte((BYTE)(d*2 + 0));
+ }
+ else if (d + 64*128 < 128*128)
+ {
+ WriteByte((BYTE)(d*4 + 1));
+ WriteByte((BYTE)(d >> 6));
+ }
+ else if (d + 64*128*128 < 128*128*128)
+ {
+ WriteByte((BYTE)(d*8 + 3));
+ WriteByte((BYTE)(d >> 5));
+ WriteByte((BYTE)(d >> 13));
+ }
+ else if (d + 64*128*128*128 < 128*128*128*128)
+ {
+ WriteByte((BYTE)(d*16 + 7));
+ WriteByte((BYTE)(d >> 4));
+ WriteByte((BYTE)(d >> 12));
+ WriteByte((BYTE)(d >> 20));
+ }
+ else
+ {
+ WriteByte(15);
+ WriteDWord(d);
+ }
+ }
+};
+
+enum CompactLayoutToken
+{
+ CLT_INVALID,
+
+ CLT_START_TYPE,
+ CLT_SMALL_START_TYPE,
+ CLT_SIMPLE_START_TYPE,
+ CLT_MODEST_START_TYPE,
+
+ CLT_END_TYPE,
+
+ CLT_IMPLEMENT_INTERFACE,
+
+ CLT_ADVANCE_ENCLOSING_TYPEDEF,
+ CLT_ADVANCE_METHODDEF,
+ CLT_ADVANCE_METHODDEF_SHORT_MINUS_8,
+ CLT_ADVANCE_METHODDEF_SHORT_0 = CLT_ADVANCE_METHODDEF_SHORT_MINUS_8 + 8,
+ CLT_ADVANCE_METHODDEF_SHORT_PLUS_8 = CLT_ADVANCE_METHODDEF_SHORT_0 + 8,
+
+ CLT_ADVANCE_FIELDDEF,
+ CLT_ADVANCE_FIELDDEF_SHORT_MINUS_8,
+ CLT_ADVANCE_FIELDDEF_SHORT_0 = CLT_ADVANCE_FIELDDEF_SHORT_MINUS_8 + 8,
+ CLT_ADVANCE_FIELDDEF_SHORT_PLUS_8 = CLT_ADVANCE_FIELDDEF_SHORT_0 + 8,
+
+ CLT_FIELD_OFFSET,
+
+ CLT_IMPLEMENT_INTERFACE_METHOD,
+
+ CLT_METHOD,
+ CLT_NORMAL_METHOD,
+ CLT_SIMPLE_METHOD,
+
+ CLT_PINVOKE_METHOD = CLT_SIMPLE_METHOD + 32,
+ CLT_METHOD_IMPL,
+
+ CLT_FIELD_INSTANCE,
+ CLT_FIELD_STATIC,
+ CLT_FIELD_THREADLOCAL,
+ CLT_FIELD_CONTEXTLOCAL,
+ CLT_FIELD_RVA,
+
+ CLT_FIELD_SIMPLE,
+
+ CLT_FIELD_MAX = CLT_FIELD_SIMPLE + 16,
+
+ CLT_DLLEXPORT_METHOD = CLT_FIELD_MAX,
+ CLT_RUNTIME_IMPORT_METHOD,
+ CLT_RUNTIME_EXPORT_METHOD,
+
+ CLT_GENERIC_TYPE_1, // prefix before CLT_START_TYPE : this is a generic type with 1 type arg
+ CLT_GENERIC_TYPE_2, // prefix before CLT_START_TYPE : this is a generic type with 2 type args
+ CLT_GENERIC_TYPE_N, // prefix before CLT_START_TYPE : this is a generic type with N type args (byte follows)
+
+ CLT_PACK, // unsigned follows specifying the maximum field alignment
+ CLT_SIZE, // unsigned follows specifying the struct size
+
+ CLT_GENERIC_PARAM, // unsigned follows specifying generic param rid, i.e. token = mdtGenericParam + rid
+
+ CLT_NATIVE_FIELD, // unsigned follows specifying native type along with flags specifying what other
+ // information follows (see NFI_*flags):
+ // - size
+ // - flags
+ // - type token 1
+ // - type token 2
+
+ CLT_GUIDINFO, // guid info for interfaces - guid itself followed by flags
+
+ CLT_STUB_METHOD, // IL stub method
+
+ CLT_TYPE_FLAGS, // additional type information (if necessary)
+
+ CLT_SPECIAL_TYPE, // unsigned follows describing the specific type. This CTL code must not exist in versionable mdil. (See SPECIAL_TYPE enum)
+ // This is used to encode information to the binder that would be inappropriate to put into MDIL directly and is runtime specific.
+
+ CLT_LAST,
+};
+
+struct InlineContext
+{
+ static const int MAX_TYPE_ARGS = 64*1024;
+ struct TypeArg
+ {
+ PCCOR_SIGNATURE pSig;
+ size_t cbSig;
+ };
+ Module *m_currentModule;
+ Module *m_inlineeModule;
+ ULONG classTypeArgCount;
+ TypeArg *classTypeArgs;
+ ULONG methodTypeArgCount;
+ TypeArg *methodTypeArgs;
+
+ InlineContext(Module *currentModule)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_currentModule = currentModule;
+ m_inlineeModule = currentModule;
+ classTypeArgCount = 0;
+ classTypeArgs = NULL;
+ methodTypeArgCount = 0;
+ methodTypeArgs = NULL;
+ }
+
+ InlineContext(const InlineContext &inlineContext)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ memcpy(this, &inlineContext, sizeof(InlineContext));
+ }
+
+ bool IsTrivial()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_currentModule == m_inlineeModule && classTypeArgCount == 0 && methodTypeArgCount == 0;
+ }
+
+ Module *GetModule()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_inlineeModule;
+ }
+};
+
+class TokenToSig
+{
+ struct Entry
+ {
+ mdToken m_token;
+ PCCOR_SIGNATURE m_pSig;
+ ULONG m_cbSig;
+ };
+ Entry * m_entries;
+ ULONG m_capacity;
+ ULONG m_count;
+
+ void Grow()
+ {
+ STANDARD_VM_CONTRACT;
+
+ m_capacity = m_capacity > 0 ? m_capacity*2 : 10;
+ Entry *newEntries = new Entry[m_capacity];
+ memcpy(newEntries, m_entries, sizeof(Entry)*m_count);
+ delete m_entries;
+ m_entries = newEntries;
+ }
+
+ PCCOR_SIGNATURE Find(mdToken token, ULONG &cbSig)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // simple linear search for now - replace with hash if appropriate
+ for (ULONG i = 0; i < m_count; i++)
+ {
+ if (m_entries[i].m_token == token)
+ {
+ cbSig = m_entries[i].m_cbSig;
+ return m_entries[i].m_pSig;
+ }
+ }
+ return NULL;
+ }
+
+public:
+ TokenToSig()
+ {
+ STANDARD_VM_CONTRACT;
+
+ m_count = 0;
+ m_capacity = 10;
+ m_entries = new Entry[m_capacity];
+ }
+
+ PCCOR_SIGNATURE Get(mdToken token, ULONG &cbSig)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ PCCOR_SIGNATURE pSig = Find(token, cbSig);
+ assert(pSig != NULL);
+ return pSig;
+ }
+
+ void Set(mdToken token, PCCOR_SIGNATURE pSig, ULONG &cbSig)
+ {
+ STANDARD_VM_CONTRACT;
+
+ ULONG oldCbSig;
+ PCCOR_SIGNATURE oldPSig = Find(token, oldCbSig);
+ if (oldPSig != NULL)
+ {
+ // we can't quite assert that because there may be modifiers
+ // in the signatures that are irrelevant (and ignored) for the time being
+ // assert(cbSig == oldCbSig && memcmp(pSig, oldPSig, cbSig) == 0);
+ return;
+ }
+ if (m_count >= m_capacity)
+ {
+ Grow();
+ }
+ COR_SIGNATURE *newSig = new COR_SIGNATURE[cbSig];
+ memcpy(newSig, pSig, cbSig);
+ Entry *entry = &m_entries[m_count++];
+ entry->m_token = token;
+ entry->m_pSig = newSig;
+ entry->m_cbSig = cbSig;
+ }
+};
+
+class CompactLayoutWriter : public ICompactLayoutWriter
+{
+private:
+ // Reset to an empty stream at Reset. EndType will store the contents of the stream.
+ ByteStreamWriter *m_stream;
+
+ // Reset to new values at each Reset, StartType will fill in with meaningful values
+ DWORD m_typeDefToken;
+ DWORD m_prevFieldDefToken;
+ DWORD m_prevMethodDefToken;
+ DWORD m_typeFlags;
+
+ // Accumulates over the lifetime of the process and is used once.
+ ByteStreamWriter *m_stubAssocStream;
+ DWORD m_prevStubDefToken;
+ DWORD m_stubMethodCount;
+#ifdef _DEBUG
+ bool m_generatingStubs;
+#endif // _DEBUG
+
+ // Accumulates of the lifetime of the process and is safe for use even if the type that initially added data fails to be generated.
+ TokenToSig m_tokenToSig;
+
+ // Constant, and never modified
+ Module * const m_pModule;
+ ZapImage * const m_pZapImage;
+
+ void AdvanceEnclosingTypeDef(unsigned enclosingTypeToken)
+ {
+ STANDARD_VM_CONTRACT;
+
+ if (enclosingTypeToken != 0)
+ {
+ assert(TypeFromToken(enclosingTypeToken) == mdtTypeDef);
+ m_stream->WriteByte(CLT_ADVANCE_ENCLOSING_TYPEDEF);
+ m_stream->WriteSigned(RidFromToken(enclosingTypeToken));
+ }
+ }
+
+ void AdvanceMethodDef(unsigned methodToken)
+ {
+ STANDARD_VM_CONTRACT;
+
+ int tokenDiff = methodToken - m_prevMethodDefToken - 1;
+ if (tokenDiff != 0)
+ {
+ if (-8 <= tokenDiff && tokenDiff <= 8)
+ m_stream->WriteByte(BYTE(CLT_ADVANCE_METHODDEF_SHORT_0 + tokenDiff));
+ else
+ {
+ m_stream->WriteByte(CLT_ADVANCE_METHODDEF);
+ m_stream->WriteSigned(tokenDiff);
+ }
+ }
+ m_prevMethodDefToken = methodToken;
+ }
+
+ void AdvanceFieldDef(unsigned fieldToken)
+ {
+ STANDARD_VM_CONTRACT;
+
+ assert(TypeFromToken(fieldToken) == mdtFieldDef);
+ int tokenDiff = fieldToken - m_prevFieldDefToken - 1;
+ if (tokenDiff != 0)
+ {
+ if (-8 <= tokenDiff && tokenDiff <= 8)
+ m_stream->WriteByte(BYTE(CLT_ADVANCE_FIELDDEF_SHORT_0 + tokenDiff));
+ else
+ {
+ m_stream->WriteByte(CLT_ADVANCE_FIELDDEF);
+ m_stream->WriteSigned(tokenDiff);
+ }
+ }
+ m_prevFieldDefToken = fieldToken;
+ }
+
+ // Read NeutralResourcesLanguageAttribute and store results
+ void EmitNeutralResourceData(IMDInternalImport *pMDImport)
+ {
+ STANDARD_VM_CONTRACT;
+
+ mdToken token;
+ IfFailThrow(pMDImport->GetAssemblyFromScope(&token));
+
+ const BYTE *pVal = NULL;
+ ULONG cbVal = 0;
+
+ LPCUTF8 cultureName = NULL;
+ ULONG cultureNameLength = 0;
+ INT16 fallbackLocation = 0;
+ // Check for the existance of the attribute.
+ HRESULT hr = pMDImport->GetCustomAttributeByName(token, "System.Resources.NeutralResourcesLanguageAttribute", (const void **)&pVal, &cbVal);
+ if (hr == S_OK)
+ {
+ CustomAttributeParser cap(pVal, cbVal);
+ IfFailThrow(cap.SkipProlog());
+ IfFailThrow(cap.GetString(&cultureName, &cultureNameLength));
+ IfFailThrow(cap.GetI2(&fallbackLocation));
+#ifdef FEATURE_LEGACYNETCF
+ if (m_pModule->GetDomain()->GetAppDomainCompatMode() == BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8)
+ {
+ fallbackLocation = 0; // UltimateResourceFallbackLocation.MainAssembly
+ }
+#endif
+ }
+
+ DWORD cultureNameId = EmitName(cultureName);
+
+ m_pZapImage->SetNeutralResourceInfo(cultureNameLength, cultureNameId, fallbackLocation);
+ }
+
+ void WriteTypeDefOrRef(unsigned typeToken, ByteStreamWriter *stream)
+ {
+ STANDARD_VM_CONTRACT;
+
+ DWORD encoding = RidFromToken(typeToken) << 2;
+ switch (TypeFromToken(typeToken))
+ {
+ case 0: assert(typeToken == 0); break;
+ case mdtTypeDef: encoding += 1; break;
+ case mdtTypeRef: encoding += 2; break;
+ case mdtTypeSpec: encoding += 3; break;
+ default: assert(0); encoding = (typeToken << 2) + 3; break;
+ }
+ stream->WriteUnsigned(encoding);
+ }
+
+ void WriteMethodDefOrRef(unsigned methodToken, ByteStreamWriter *stream)
+ {
+ STANDARD_VM_CONTRACT;
+
+ DWORD encoding = RidFromToken(methodToken) << 2;
+ switch (TypeFromToken(methodToken))
+ {
+ case 0: assert(methodToken == 0); break;
+ case mdtMethodDef: encoding += 1; break;
+ case mdtMemberRef: encoding += 2; break;
+ case mdtMethodSpec: encoding += 3; break;
+ default: assert(0); encoding = (methodToken << 2); break;
+ }
+ stream->WriteUnsigned(encoding);
+ }
+
+public:
+ CompactLayoutWriter(Module *pModule, ZapImage *pZapImage) :
+ m_pModule(pModule),
+ m_pZapImage(pZapImage)
+ {
+ STANDARD_VM_CONTRACT;
+
+ ULONG assembly = 0;
+ ULONG locale = 0;
+ IMDInternalImport *pMDImport = pModule->GetMDImport();
+ LPCSTR pszName = NULL;
+ AssemblyMetaDataInternal metaData;
+ DWORD flags = 0;
+ HRESULT hr;
+
+ m_prevFieldDefToken = 0;
+ m_prevMethodDefToken = 0;
+
+ m_stream = new ByteStreamWriter();
+
+ HENUMInternalHolder hEnum(pMDImport);
+ hEnum.EnumAllInit(mdtMethodDef);
+ m_prevStubDefToken = TokenFromRid(hEnum.EnumGetCount(), mdtMethodDef);
+
+ m_stubAssocStream = new ByteStreamWriter;
+ m_stubMethodCount = 0;
+ INDEBUG(m_generatingStubs = false);
+
+ // initialize string buffer (with empty string being first)
+ if (m_pZapImage->m_namePool.GetCount() == 0){
+ m_pZapImage->m_namePool.SetCount(1);
+ m_pZapImage->m_namePool[0] = 0;
+ }
+
+ hr = pMDImport->GetAssemblyProps(TokenFromRid(1, mdtAssembly),
+ NULL, NULL, // not yet interested in public key data
+ NULL, // not interested in HashAlgID
+ &pszName, //
+ &metaData,
+ &flags);
+ if (hr == S_OK) {
+
+ // WindowsRuntime assembly names are annotated with the first winrt type in them.
+#ifdef FEATURE_COMINTEROP
+ if ((flags & afContentType_Mask) == afContentType_WindowsRuntime)
+ {
+ LPCSTR szNameSpace;
+ LPCSTR szTypeName;
+ LPCWSTR wszAssemblyPath = pModule->GetAssembly()->GetManifestFile()->GetPath();
+ SString ssFakeNameSpaceAllocationBuffer;
+
+ IfFailThrow(GetFirstWinRTTypeDef(pMDImport, &szNameSpace, &szTypeName, wszAssemblyPath, &ssFakeNameSpaceAllocationBuffer));
+
+ StackSString sNamespaceAndType(SString::Utf8, pszName);
+ sNamespaceAndType.Append(W("!"));
+ sNamespaceAndType.AppendUTF8(szNameSpace);
+ sNamespaceAndType.Append(W("."));
+ sNamespaceAndType.AppendUTF8(szTypeName);
+
+ StackScratchBuffer scratchBufferUtf8;
+ assembly = EmitName(sNamespaceAndType.GetUTF8(scratchBufferUtf8));
+ }
+ else
+#endif
+ {
+ assembly = EmitName(pszName);
+ }
+ locale = EmitName(metaData.szLocale);
+ }
+
+ pZapImage->SetAssemblyNameAndLocale(assembly, locale, &metaData);
+ EmitNeutralResourceData(pMDImport);
+ }
+
+ // This is used to prepare the CompactLayoutWriter for writing out another type.
+ virtual void Reset()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_stream->Reset();
+ m_typeDefToken = 0;
+ m_prevFieldDefToken = 0x04000000;
+ m_prevMethodDefToken = 0x06000000;
+ m_typeFlags = 0;
+ }
+
+ // This is a prefix for generic types
+ virtual
+ void GenericType(DWORD typeArgCount)
+ {
+ STANDARD_VM_CONTRACT;
+
+ if (typeArgCount == 1)
+ m_stream->WriteByte(CLT_GENERIC_TYPE_1);
+ else if (typeArgCount == 2)
+ m_stream->WriteByte(CLT_GENERIC_TYPE_2);
+ else
+ {
+ m_stream->WriteByte(CLT_GENERIC_TYPE_N);
+ m_stream->WriteUnsigned(typeArgCount);
+ }
+ }
+
+ // This starts serialization/deserialization of new type
+ virtual
+ void StartType( DWORD flags, // CorTypeAttr plus perhaps other flags
+ DWORD typeDefToken, // typedef token for this type
+ DWORD baseTypeToken, // type this type is derived from, if any
+ DWORD enclosingTypeToken, // type this type is nested in, if any
+ DWORD interfaceCount, // how many times ImplementInterface() will be called
+ DWORD fieldCount, // how many times Field() will be called
+ DWORD methodCount, // how many times Method() will be called
+ DWORD newVirtualMethodCount, // how many new virtuals this type defines
+ DWORD overrideVirtualMethodCount ) // how many virtuals this type overrides
+ {
+ STANDARD_VM_CONTRACT;
+
+ // we write out all types before we start generating stubs
+ assert(!m_generatingStubs);
+
+ m_typeDefToken = typeDefToken;
+
+ AdvanceEnclosingTypeDef(enclosingTypeToken);
+
+ if (interfaceCount == 0 && newVirtualMethodCount == 0 && overrideVirtualMethodCount == 0)
+ {
+ if (fieldCount <= 7)
+ {
+ m_stream->WriteByte(CLT_SMALL_START_TYPE);
+ m_stream->WriteUnsigned(flags);
+ WriteTypeDefOrRef(baseTypeToken, m_stream);
+ m_stream->WriteUnsigned(fieldCount + methodCount*8);
+ }
+ else
+ {
+ m_stream->WriteByte(CLT_SIMPLE_START_TYPE);
+ m_stream->WriteUnsigned(flags);
+ WriteTypeDefOrRef(baseTypeToken, m_stream);
+ m_stream->WriteUnsigned(fieldCount);
+ m_stream->WriteUnsigned(methodCount);
+ }
+ }
+ else if (interfaceCount <= 3 && newVirtualMethodCount <= 3)
+ {
+ m_stream->WriteByte(CLT_MODEST_START_TYPE);
+ m_stream->WriteUnsigned(flags);
+ WriteTypeDefOrRef(baseTypeToken, m_stream);
+ m_stream->WriteUnsigned(fieldCount);
+ m_stream->WriteUnsigned(methodCount);
+ m_stream->WriteUnsigned(interfaceCount + newVirtualMethodCount*4 + overrideVirtualMethodCount*16);
+ }
+ else
+ {
+ m_stream->WriteByte(CLT_START_TYPE);
+ m_stream->WriteUnsigned(flags);
+ WriteTypeDefOrRef(baseTypeToken, m_stream);
+ m_stream->WriteUnsigned(interfaceCount);
+ m_stream->WriteUnsigned(fieldCount);
+ m_stream->WriteUnsigned(methodCount);
+ m_stream->WriteUnsigned(newVirtualMethodCount);
+ m_stream->WriteUnsigned(overrideVirtualMethodCount);
+ }
+
+ m_typeFlags = flags;
+ m_prevFieldDefToken = 0x04000000;
+ m_prevMethodDefToken = 0x06000000;
+ }
+
+ // Call once for each interface implemented by the
+ // class directly (not those implemented in base classes)
+ virtual
+ void ImplementInterface( DWORD interfaceTypeToken )
+ {
+ STANDARD_VM_CONTRACT;
+
+ m_stream->WriteByte(CLT_IMPLEMENT_INTERFACE);
+ WriteTypeDefOrRef(interfaceTypeToken, m_stream);
+ }
+
+ virtual
+ void ExtendedTypeFlags( DWORD flags )
+ {
+ STANDARD_VM_CONTRACT;
+
+ m_stream->WriteByte(CLT_TYPE_FLAGS);
+ m_stream->WriteUnsigned(flags);
+ }
+
+ virtual
+ void SpecialType( SPECIAL_TYPE type)
+ {
+ STANDARD_VM_CONTRACT;
+
+ m_stream->WriteByte(CLT_SPECIAL_TYPE);
+ m_stream->WriteUnsigned((DWORD)type);
+ }
+
+ // Call once for each field the class declares directly
+ // valueTypeToken is non-0
+ // iff fieldType == ELEMENT_TYPE_VALUETYPE
+ // not all CorElementTypes may be allowed - TBD
+ virtual
+ void Field( DWORD fieldToken, // an mdFieldDef
+ FieldStorage fieldStorage,
+ FieldProtection fieldProtection,
+ CorElementType fieldType,
+ DWORD fieldOffset,
+ DWORD valueTypeToken)
+ {
+ STANDARD_VM_CONTRACT;
+
+ AdvanceFieldDef(fieldToken);
+
+ // We should have an explicit field offset iff the containing type has explicit layout or we have an RVA field
+ assert((fieldOffset != ~0) == (IsTdExplicitLayout(m_typeFlags) != 0 || fieldStorage == FS_RVA));
+ if (fieldOffset != ~0)
+ {
+ m_stream->WriteByte(CLT_FIELD_OFFSET);
+ m_stream->WriteUnsigned(fieldOffset);
+ }
+
+// assert((fieldType == ELEMENT_TYPE_VALUETYPE) == (valueTypeToken != 0));
+// disable for now -fires for some generic stuff (?)
+ if ((fieldType == ELEMENT_TYPE_VALUETYPE) != (valueTypeToken != 0))
+ {
+// printf("fieldType = %d valueTypeToken = %08x\n", fieldType, valueTypeToken);
+ }
+ assert((unsigned)fieldStorage < 8);
+
+ assert(CLT_FIELD_INSTANCE + FS_INSTANCE == CLT_FIELD_INSTANCE);
+ assert(CLT_FIELD_INSTANCE + FS_STATIC == CLT_FIELD_STATIC);
+ assert(CLT_FIELD_INSTANCE + FS_THREADLOCAL == CLT_FIELD_THREADLOCAL);
+ assert(CLT_FIELD_INSTANCE + FS_CONTEXTLOCAL == CLT_FIELD_CONTEXTLOCAL);
+ assert(CLT_FIELD_INSTANCE + FS_RVA == CLT_FIELD_RVA);
+
+ assert(
+ fieldType == ELEMENT_TYPE_I1 ||
+ fieldType == ELEMENT_TYPE_BOOLEAN ||
+ fieldType == ELEMENT_TYPE_U1 ||
+ fieldType == ELEMENT_TYPE_I2 ||
+ fieldType == ELEMENT_TYPE_U2 ||
+ fieldType == ELEMENT_TYPE_CHAR ||
+ fieldType == ELEMENT_TYPE_I4 ||
+ fieldType == ELEMENT_TYPE_U4 ||
+ fieldType == ELEMENT_TYPE_I8 ||
+ fieldType == ELEMENT_TYPE_I ||
+ fieldType == ELEMENT_TYPE_U ||
+ fieldType == ELEMENT_TYPE_U8 ||
+ fieldType == ELEMENT_TYPE_R4 ||
+ fieldType == ELEMENT_TYPE_R8 ||
+ fieldType == ELEMENT_TYPE_CLASS ||
+ fieldType == ELEMENT_TYPE_VALUETYPE ||
+ fieldType == ELEMENT_TYPE_PTR ||
+ fieldType == ELEMENT_TYPE_FNPTR
+ );
+
+ assert((unsigned)fieldType < 32);
+ assert((unsigned)fieldProtection < 8);
+
+ static DWORD encodingTable[16] =
+ {
+ 0x0112, // 9369 (sum = 9369)
+ 0x1112, // 4106 (sum = 13475)
+ 0x0608, // 3805 (sum = 17280)
+ 0x0108, // 3245 (sum = 20525)
+ 0x0102, // 2110 (sum = 22635)
+ 0x0312, // 1690 (sum = 24325)
+ 0x0612, // 1364 (sum = 25689)
+ 0x1108, // 1234 (sum = 26923)
+ 0x0308, // 910 (sum = 27833)
+ 0x1612, // 815 (sum = 28648)
+ 0x0111, // 762 (sum = 29410)
+ 0x1312, // 742 (sum = 30152)
+ 0x0618, // 665 (sum = 30817)
+ 0x0309, // 627 (sum = 31444)
+ 0x0609, // 414 (sum = 31858)
+ 0x0311, // 409 (sum = 32267)
+ };
+
+ DWORD encoding = fieldStorage*16*256 + fieldProtection*256 + fieldType;
+ DWORD encodingIndex;
+ for (encodingIndex = 0; encodingIndex < sizeof(encodingTable)/sizeof(encodingTable[0]); encodingIndex++)
+ {
+ if (encoding == encodingTable[encodingIndex])
+ break;
+ }
+ if (encodingIndex < sizeof(encodingTable)/sizeof(encodingTable[0]))
+ {
+ m_stream->WriteByte(BYTE(CLT_FIELD_SIMPLE + encodingIndex));
+ }
+ else
+ {
+ m_stream->WriteByte(BYTE(CLT_FIELD_INSTANCE + (unsigned)fieldStorage));
+ m_stream->WriteByte(BYTE((unsigned)fieldProtection*32 + (unsigned)fieldType));
+ }
+ if (fieldType == ELEMENT_TYPE_VALUETYPE)
+ {
+ WriteTypeDefOrRef(valueTypeToken, m_stream);
+ }
+ };
+
+ // call once for each method implementing a contract
+ // in an interface. Parameters see OverrideMethod
+ virtual
+ void ImplementInterfaceMethod(DWORD declToken,
+ DWORD implToken)
+ {
+ STANDARD_VM_CONTRACT;
+
+ AdvanceMethodDef(implToken);
+
+ m_stream->WriteByte(CLT_IMPLEMENT_INTERFACE_METHOD);
+ WriteMethodDefOrRef(declToken, m_stream);
+ }
+
+ // call once for each method, including those mentioned in
+ // OverrideMethod, NewVirtual, ImplementInterfaceMethod
+ virtual
+ void Method(DWORD methodAttrs,
+ DWORD implFlags,
+ DWORD implHints, // have to figure how exactly we do this so it's not so tied to the CLR implementation
+ DWORD methodToken,
+ DWORD overriddenMethodToken)
+ {
+ STANDARD_VM_CONTRACT;
+
+ AdvanceMethodDef(methodToken);
+
+ DWORD encodingIndex = 0xffff;
+
+ if (methodAttrs < 0x10000 && implFlags < 0x100 && implHints < 0x100)
+ {
+ // common method attribute values - implHints in upper 8 bits,
+ // implFlags in following 8 bits, methodAttrs in bottom 16 bits
+ static DWORD encodingTable[32] =
+ {
+ 0x00000886, // 0886, 00, 00: 17837 (sum = 17837)
+ 0x00000081, // 0081, 00, 00: 9726 (sum = 27563)
+ 0x060005c6, // 05c6, 00, 06: 7042 (sum = 34605)
+ 0x00000086, // 0086, 00, 00: 6841 (sum = 41446)
+ 0x000000c6, // 00c6, 00, 00: 5922 (sum = 47368)
+ 0x00000083, // 0083, 00, 00: 5217 (sum = 52585)
+ 0x000008c6, // 08c6, 00, 00: 4369 (sum = 56954)
+ 0x10001886, // 1886, 00, 10: 4129 (sum = 61083)
+ 0x00000883, // 0883, 00, 00: 3662 (sum = 64745)
+ 0x00000096, // 0096, 00, 00: 3577 (sum = 68322)
+ 0x000000c4, // 00c4, 00, 00: 3042 (sum = 71364)
+ 0x00000896, // 0896, 00, 00: 2860 (sum = 74224)
+ 0x000001e1, // 01e1, 00, 00: 2586 (sum = 76810)
+ 0x00000093, // 0093, 00, 00: 2574 (sum = 79384)
+ 0x00000091, // 0091, 00, 00: 2544 (sum = 81928)
+ 0x30001886, // 1886, 00, 30: 2449 (sum = 84377)
+ 0x000001c6, // 01c6, 00, 00: 2179 (sum = 86556)
+ 0x000001e6, // 01e6, 00, 00: 2028 (sum = 88584)
+ 0x000001c4, // 01c4, 00, 00: 1835 (sum = 90419)
+ 0x10001883, // 1883, 00, 10: 1812 (sum = 92231)
+ 0x000009c6, // 09c6, 00, 00: 1706 (sum = 93937)
+ 0x40001891, // 1891, 00, 40: 1482 (sum = 95419)
+ 0x06000dc6, // 0dc6, 00, 06: 1473 (sum = 96892)
+ 0x030301c6, // 01c6, 03, 03: 1449 (sum = 98341)
+ 0x02802096, // 2096, 80, 02: 1235 (sum = 99576)
+ 0x000002c3, // 02c3, 00, 00: 1144 (sum = 100720)
+ 0x02802093, // 2093, 80, 02: 1090 (sum = 101810)
+ 0x00000881, // 0881, 00, 00: 926 (sum = 102736)
+ 0x000009e6, // 09e6, 00, 00: 903 (sum = 103639)
+ 0x000009e1, // 09e1, 00, 00: 860 (sum = 104499)
+ 0x00000084, // 0084, 00, 00: 720 (sum = 105219)
+ 0x068005c6, // 05c6, 80, 06: 653 (sum = 105872)
+ };
+
+ DWORD encoding = (implHints<<24) + (implFlags <<16) + methodAttrs;
+ for (encodingIndex = 0; encodingIndex < sizeof(encodingTable)/sizeof(encodingTable[0]); encodingIndex++)
+ {
+ if (encoding == encodingTable[encodingIndex])
+ break;
+ }
+ }
+ if (encodingIndex < 32)
+ {
+ m_stream->WriteByte(BYTE(CLT_SIMPLE_METHOD + encodingIndex));
+ }
+ else if (implFlags == 0 && implHints == 0)
+ {
+ m_stream->WriteByte(CLT_NORMAL_METHOD);
+
+ methodAttrs ^= mdHideBySig; // this being the default for C#, it's on for most of the base libs
+
+ m_stream->WriteUnsigned(methodAttrs);
+ }
+ else
+ {
+ m_stream->WriteByte(CLT_METHOD);
+
+ methodAttrs ^= mdHideBySig; // this being the default for C#, it's on for most of the base libs
+
+ m_stream->WriteUnsigned(methodAttrs);
+ m_stream->WriteUnsigned(implFlags);
+ m_stream->WriteUnsigned(implHints);
+ }
+ // if the method is not virtual, or it's newslot, it can't override anything
+ assert((IsMdVirtual(methodAttrs) && !IsMdNewSlot(methodAttrs)) || overriddenMethodToken == 0);
+ if (IsMdVirtual(methodAttrs) && !IsMdNewSlot(methodAttrs))
+ WriteMethodDefOrRef(overriddenMethodToken, m_stream);
+ }
+
+ // call once for each PInvoke method
+ virtual
+ void PInvokeMethod( DWORD methodAttrs,
+ DWORD implFlags,
+ DWORD implHints, // have to figure how exactly we do this so it's not so tied to the CLR implementation
+ DWORD methodToken,
+ LPCSTR moduleName,
+ LPCSTR entryPointName,
+ WORD wLinkFlags)
+ {
+ STANDARD_VM_CONTRACT;
+
+ AdvanceMethodDef(methodToken);
+
+ m_stream->WriteByte(CLT_PINVOKE_METHOD);
+
+ methodAttrs ^= mdHideBySig; // this being the default for C#, it's on for most of the base libs
+
+ m_stream->WriteUnsigned(methodAttrs);
+ m_stream->WriteUnsigned(implFlags);
+
+ DWORD entryPointNameIndexOrOrdinal;
+ if (entryPointName != NULL && entryPointName[0] == '#')
+ {
+ // this is import-by-ordinal
+ char *endPtr;
+ entryPointNameIndexOrOrdinal = strtoul(&entryPointName[1], &endPtr, 10);
+ assert(*endPtr == '\0');
+ implHints |= IH_BY_ORDINAL;
+ }
+ else
+ {
+ // this is import-by-name
+ entryPointNameIndexOrOrdinal = EmitName(entryPointName);
+ }
+ m_stream->WriteUnsigned(implHints);
+
+ // the method should not be virtual
+ assert(!IsMdVirtual(methodAttrs));
+
+ m_stream->WriteUnsigned(EmitName(moduleName));
+
+ m_stream->WriteUnsigned(entryPointNameIndexOrOrdinal);
+
+ m_stream->WriteUnsigned(wLinkFlags); // calling convention, Ansi/Unicode, ...
+ }
+
+ // call once for each DllExport method (Redhawk only feature, at least for now)
+ virtual
+ void DllExportMethod( DWORD methodAttrs,
+ DWORD implFlags,
+ DWORD implHints, // have to figure how exactly we do this so it's not so tied to the CLR implementation
+ DWORD methodToken,
+ LPCSTR entryPointName,
+ DWORD callingConvention)
+ {
+ STANDARD_VM_CONTRACT;
+
+ AdvanceMethodDef(methodToken);
+
+ m_stream->WriteByte(CLT_DLLEXPORT_METHOD);
+
+ methodAttrs ^= mdHideBySig; // this being the default for C#, it's on for most of the base libs
+
+ m_stream->WriteUnsigned(methodAttrs);
+ m_stream->WriteUnsigned(implFlags);
+
+ DWORD entryPointNameIndexOrOrdinal;
+ if (entryPointName[0] == '#')
+ {
+ // this is export-by-ordinal
+ char *endPtr;
+ entryPointNameIndexOrOrdinal = strtoul(&entryPointName[1], &endPtr, 10);
+ assert(*endPtr == '\0');
+ implHints |= IH_BY_ORDINAL;
+ }
+ else
+ {
+ // this is import-by-name
+ entryPointNameIndexOrOrdinal = EmitName(entryPointName);
+ }
+ m_stream->WriteUnsigned(implHints);
+
+ // the method should not be virtual
+ assert(!IsMdVirtual(methodAttrs));
+
+ // in fact the method should be static
+ assert(IsMdStatic(methodAttrs));
+
+ m_stream->WriteUnsigned(entryPointNameIndexOrOrdinal);
+ m_stream->WriteUnsigned(callingConvention);
+ }
+
+ virtual
+ void StubMethod( DWORD dwMethodFlags,
+ DWORD sigToken,
+ DWORD methodToken)
+ {
+ STANDARD_VM_CONTRACT;
+
+ assert(m_generatingStubs || m_prevMethodDefToken == 0x06000000);
+ INDEBUG(m_generatingStubs = true);
+
+ AdvanceMethodDef(methodToken);
+
+ m_stream->WriteByte(CLT_STUB_METHOD);
+ m_stream->WriteUnsigned(dwMethodFlags);
+
+ if (dwMethodFlags & SF_NEEDS_STUB_SIGNATURE)
+ {
+ assert(TypeFromToken(sigToken) == mdtSignature);
+ assert(RidFromToken(sigToken) > 0);
+
+ m_stream->WriteUnsigned(RidFromToken(sigToken));
+ }
+
+ m_stubMethodCount++;
+ }
+
+ virtual
+ void StubAssociation( DWORD ownerToken,
+ DWORD *stubTokens,
+ size_t numStubs)
+ {
+ STANDARD_VM_CONTRACT;
+
+ // note that we may be generating associations without previously calling StubMethod
+ // if the stub method is an ordinary (as opposed to dynamically generated) method
+ WriteMethodDefOrRef(ownerToken, m_stubAssocStream);
+ m_stubAssocStream->WriteUnsigned((DWORD)numStubs);
+
+ for (size_t i = 0; i < numStubs; i++)
+ {
+ WriteMethodDefOrRef(stubTokens[i], m_stubAssocStream);
+ }
+ }
+
+ // call once for each method impl
+ virtual
+ void MethodImpl(DWORD declToken,
+ DWORD implToken )
+ {
+ STANDARD_VM_CONTRACT;
+
+ AdvanceMethodDef(implToken);
+
+ m_stream->WriteByte(CLT_METHOD_IMPL);
+ WriteMethodDefOrRef(declToken, m_stream);
+ }
+
+ // set an explicit size for explicit layout structs
+ virtual
+ void SizeType(DWORD size)
+ {
+ STANDARD_VM_CONTRACT;
+
+ m_stream->WriteByte(CLT_SIZE);
+ m_stream->WriteUnsigned(size);
+ }
+
+ // specify the packing size
+ virtual
+ void PackType(DWORD packingSize)
+ {
+ STANDARD_VM_CONTRACT;
+
+ m_stream->WriteByte(CLT_PACK);
+ m_stream->WriteUnsigned(packingSize);
+ }
+
+ // specify a generic parameter to a type or method
+ virtual
+ void GenericParameter(DWORD genericParamToken, DWORD flags)
+ {
+ STANDARD_VM_CONTRACT;
+
+ assert(TypeFromToken(genericParamToken) == mdtGenericParam);
+ m_stream->WriteByte(CLT_GENERIC_PARAM);
+ m_stream->WriteUnsigned(RidFromToken(genericParamToken));
+ m_stream->WriteUnsigned(flags);
+ }
+
+ // specify a field representation on the native side
+ virtual
+ void NativeField(DWORD fieldToken, // an mdFieldDef
+ DWORD nativeType, // really an NStructFieldType
+ DWORD nativeOffset,
+ DWORD count,
+ DWORD flags,
+ DWORD typeToken1,
+ DWORD typeToken2)
+ {
+ STANDARD_VM_CONTRACT;
+
+ assert(TypeFromToken(fieldToken) == mdtFieldDef);
+ AdvanceFieldDef(fieldToken);
+
+ if (nativeOffset != ~0)
+ {
+ m_stream->WriteByte(CLT_FIELD_OFFSET);
+ m_stream->WriteUnsigned(nativeOffset);
+ }
+ m_stream->WriteByte(CLT_NATIVE_FIELD);
+
+ // we encode the native type together with flags that tell us
+ // what other information follows
+ enum NativeInformationFlags
+ {
+ NFI_TYPEMASK = 0x3F, // we assume the native type fits into this
+ NFI_FIRSTFLAG = 0x40,
+ NFI_COUNT = NFI_FIRSTFLAG,
+ NFI_FLAGS = NFI_COUNT<<1,
+ NFI_TYPETOKEN1 = NFI_FLAGS<<1,
+ NFI_TYPETOKEN2 = NFI_TYPETOKEN1<<1,
+ };
+
+ assert((nativeType & NFI_TYPEMASK) == nativeType);
+ if (typeToken1 != 0)
+ nativeType |= NFI_TYPETOKEN1;
+ if (count != 0)
+ nativeType |= NFI_COUNT;
+ if (flags != 0)
+ nativeType |= NFI_FLAGS;
+ if (typeToken2 != 0)
+ nativeType |= NFI_TYPETOKEN2;
+
+ m_stream->WriteUnsigned(nativeType);
+
+ if (nativeType & NFI_COUNT)
+ m_stream->WriteUnsigned(count);
+
+ if (nativeType & NFI_FLAGS)
+ m_stream->WriteUnsigned(flags);
+
+ if (nativeType & NFI_TYPETOKEN1)
+ m_stream->WriteUnsigned(typeToken1);
+
+ if (nativeType & NFI_TYPETOKEN2)
+ m_stream->WriteUnsigned(typeToken2);
+ }
+
+ // specify guid info for interface types
+ virtual
+ void GuidInformation(GuidInfo *guidInfo)
+ {
+ STANDARD_VM_CONTRACT;
+
+ m_stream->WriteByte(CLT_GUIDINFO);
+
+ // write the actual guid data verbatim
+ BYTE *guidData = (BYTE *)&guidInfo->m_Guid;
+ for (size_t i = 0; i < sizeof(guidInfo->m_Guid); i++)
+ m_stream->WriteByte(guidData[i]);
+
+ // write the flag - use WriteUnsigned for future extensibility
+ m_stream->WriteUnsigned(guidInfo->m_bGeneratedFromName);
+ }
+
+ // end the description of the type
+ virtual
+ void EndType()
+ {
+ STANDARD_VM_CONTRACT;
+ TritonStress(TritonStress_GenerateCTL, this->m_typeDefToken, 0, TritonStressFlag_MainModule);
+
+ m_stream->WriteByte(CLT_END_TYPE);
+
+ size_t size;
+ BYTE *buffer = m_stream->GetBuffer(size);
+
+ m_pZapImage->FlushCompactLayoutData(this->m_typeDefToken, buffer, (ULONG)size);
+ }
+
+ ULONG FindOrCreateExtModuleID(Module *module)
+ {
+ STANDARD_VM_CONTRACT;
+
+ IMDInternalImport *pMDImport = module->GetMDImport();
+ LPCSTR assemblyName = NULL;
+ pMDImport->GetAssemblyProps(TokenFromRid(1, mdtAssembly), NULL, NULL, NULL, &assemblyName, NULL, NULL);
+ ULONG emittedName;
+ if (assemblyName == NULL)
+ {
+ pMDImport->GetScopeProps(&assemblyName, NULL);
+ emittedName = EmitName(assemblyName);
+ }
+ else
+ {
+ emittedName = EmitAssemblyName(pMDImport, TokenFromRid(1, mdtAssembly));
+ }
+
+ COUNT_T tableSize = m_pZapImage->m_extModRef.GetCount();
+ for (COUNT_T i = 1; i < tableSize; i++)
+ {
+ // Take advantage of the emitted name hash that ensures that there will only be one copy of an emitted name.
+ if (m_pZapImage->m_extModRef[i].name == emittedName)
+ {
+ return i;
+ }
+ }
+
+ // not found, create a new entry
+ m_pZapImage->m_extModRef.SetCount(tableSize + 1);
+ m_pZapImage->m_extModRef[tableSize].name = emittedName;
+ m_pZapImage->m_extModRef[tableSize].flags = ZapImage::ExtModRef::NO_FLAGS;
+
+ return tableSize;
+ }
+
+ ULONG FindOrCreateExtTypeRef(MethodTable *pMT)
+ {
+ STANDARD_VM_CONTRACT;
+
+ DWORD typeToken = pMT->GetCl();
+ ULONG typeOrdinal = RidFromToken(typeToken);
+ ULONG moduleID = FindOrCreateExtModuleID(pMT->GetModule());
+
+ COUNT_T tableSize = m_pZapImage->m_extTypeRef.GetCount();
+ COUNT_T tableSize2 = m_pZapImage->m_extTypeRefExtend.GetCount();
+
+ for (COUNT_T i = 1; i < tableSize; i++)
+ {
+ if (typeOrdinal == m_pZapImage->m_extTypeRef[i].ordinal && moduleID == m_pZapImage->m_extTypeRef[i].module)
+ return i;
+ }
+
+ // not found, create a new entry
+
+ LPCUTF8 pszNamespace;
+ LPCUTF8 pszName;
+ pszName = pMT->GetFullyQualifiedNameInfo(&pszNamespace);
+ ULONG offsName = EmitName(pszName);
+ ULONG offsNamespace = EmitName(pszNamespace);
+ ULONG resolutionScope = 0;
+ mdToken tkEncloser = 0;
+
+ if (SUCCEEDED(pMT->GetModule()->GetMDImport()->GetNestedClassProps(pMT->GetCl(), &tkEncloser)))
+ {
+ EEClass *eeClass = LoadTypeDef(pMT->GetModule(), tkEncloser);
+ assert(eeClass != 0);
+ resolutionScope = FindOrCreateExtTypeRef(eeClass->GetMethodTable());
+
+ // Re-acquire tableSize and tableSize2 as the above call to FindOrCreateExtTypeRef may have invalidated
+ // the existing local variables.
+ tableSize = m_pZapImage->m_extTypeRef.GetCount();
+ tableSize2 = m_pZapImage->m_extTypeRefExtend.GetCount();
+ }
+
+ m_pZapImage->m_extTypeRef.SetCount(tableSize + 1);
+ m_pZapImage->m_extTypeRef[tableSize].module = moduleID;
+ m_pZapImage->m_extTypeRef[tableSize].ordinal = typeOrdinal;
+
+ m_pZapImage->m_extTypeRefExtend.SetCount(tableSize2 + 1);
+
+ m_pZapImage->m_extTypeRefExtend[tableSize2].name = offsName;
+ m_pZapImage->m_extTypeRefExtend[tableSize2].name_space = offsNamespace;
+ m_pZapImage->m_extTypeRefExtend[tableSize2].resolutionScope = resolutionScope;
+
+ return tableSize;
+ }
+
+ mdMemberRef FindOrCreateExtMemberRef(DWORD typeToken, ULONG isField, ULONG memberOrdinal, LPCUTF8 pszName, Module *pModule, mdToken tkDefToken)
+ {
+ STANDARD_VM_CONTRACT;
+
+ assert(TypeFromToken(typeToken) == mdtTypeRef || TypeFromToken(typeToken) == mdtTypeSpec);
+ ULONG typeRid = RidFromToken(typeToken);
+ unsigned isTypeSpec = TypeFromToken(typeToken) == mdtTypeSpec;
+
+ COUNT_T tableSize = m_pZapImage->m_extMemberRef.GetCount();
+ COUNT_T tableSize2 = m_pZapImage->m_extMemberRefExtend.GetCount();
+
+ for (COUNT_T i = 1; i < tableSize; i++)
+ {
+ if (isTypeSpec == m_pZapImage->m_extMemberRef[i].isTypeSpec &&
+ typeRid == m_pZapImage->m_extMemberRef[i].typeRid &&
+ isField == m_pZapImage->m_extMemberRef[i].isField &&
+ memberOrdinal == m_pZapImage->m_extMemberRef[i].ordinal)
+ {
+ return TokenFromRid(i, mdtMemberRef);
+ }
+ }
+
+ // not found, create a new entry
+ ULONG memberRefName = EmitName(pszName);
+
+ InlineContext context(pModule);
+ PCCOR_SIGNATURE pSig;
+ DWORD cbSig;
+ IMDInternalImport *pMDImport = pModule->GetMDImport();
+
+ switch (TypeFromToken(tkDefToken))
+ {
+ case mdtMethodDef:
+ IfFailThrow(pMDImport->GetSigOfMethodDef(tkDefToken, &cbSig, &pSig));
+ break;
+
+ case mdtFieldDef:
+ IfFailThrow(pMDImport->GetSigOfFieldDef(tkDefToken, &cbSig, &pSig));
+ break;
+ default:
+ assert(!"bad token type");
+ return 0;
+ }
+
+ SigBuilder sigBuilder;
+ EncodeMemberRefSignature(&context, pSig, cbSig, sigBuilder);
+
+ DWORD size;
+ BYTE *newBuffer = (BYTE *)sigBuilder.GetSignature(&size);
+
+ BOOL fCreateNewSig = TRUE;
+ ULONG offsOfSig = 0;
+
+ // Check to see if we've already created this signature
+ for (COUNT_T i = 1; i < tableSize2; i++)
+ {
+ COUNT_T offs = m_pZapImage->m_extMemberRefExtend[i].signature;
+ BYTE *oldBuffer = &m_pZapImage->m_compactLayoutBuffer[offs];
+ if (memcmp(oldBuffer, newBuffer, size) == 0)
+ {
+ fCreateNewSig = FALSE;
+ offsOfSig = offs;
+ break;
+ }
+ }
+
+ m_pZapImage->m_extMemberRef.SetCount(tableSize + 1);
+ m_pZapImage->m_extMemberRef[tableSize].isTypeSpec = isTypeSpec;
+ m_pZapImage->m_extMemberRef[tableSize].typeRid = typeRid;
+ m_pZapImage->m_extMemberRef[tableSize].isField = isField;
+ m_pZapImage->m_extMemberRef[tableSize].ordinal = memberOrdinal;
+
+ m_pZapImage->m_extMemberRefExtend.SetCount(tableSize2 + 1);
+ m_pZapImage->m_extMemberRefExtend[tableSize2].name = memberRefName;
+ m_pZapImage->m_extMemberRefExtend[tableSize2].signature = offsOfSig;
+
+ mdToken extMemberRef2Token = TokenFromRid(tableSize2, mdtMemberRef);
+ if (fCreateNewSig)
+ {
+ m_pZapImage->FlushCompactLayoutData(extMemberRef2Token, newBuffer, size);
+ }
+
+ return TokenFromRid(tableSize, mdtMemberRef);
+ }
+
+ mdMethodSpec FindOrCreateMethodSpec(ByteStreamWriter &stream)
+ {
+ STANDARD_VM_CONTRACT;
+
+ size_t size;
+ BYTE *newBuffer = stream.GetBuffer(size);
+ COUNT_T methodSpecCount = m_pZapImage->m_methodSpecToOffs.GetCount();
+ for (COUNT_T i = 1; i < methodSpecCount; i++)
+ {
+ COUNT_T offs = m_pZapImage->m_methodSpecToOffs[i];
+ BYTE *oldBuffer = &m_pZapImage->m_compactLayoutBuffer[offs];
+ if (memcmp(oldBuffer, newBuffer, size) == 0)
+ return TokenFromRid(mdtMethodSpec, i);
+ }
+ // not found, hence create
+ m_pZapImage->m_methodSpecToOffs.SetCount(methodSpecCount+1);
+ mdMethodSpec resultToken = TokenFromRid(methodSpecCount, mdtMethodSpec);
+ m_pZapImage->FlushCompactLayoutData(resultToken, newBuffer, (COUNT_T)size);
+ return resultToken;
+ }
+
+ mdSignature FindOrCreateSignature(ByteStreamWriter &stream)
+ {
+ STANDARD_VM_CONTRACT;
+
+ size_t size;
+ BYTE *newBuffer = stream.GetBuffer(size);
+ COUNT_T signatureCount = m_pZapImage->m_signatureToOffs.GetCount();
+ for (COUNT_T i = 1; i < signatureCount; i++)
+ {
+ COUNT_T offs = m_pZapImage->m_signatureToOffs[i];
+ BYTE *oldBuffer = &m_pZapImage->m_compactLayoutBuffer[offs];
+ if (memcmp(oldBuffer, newBuffer, size) == 0)
+ return TokenFromRid(mdtSignature, i);
+ }
+ // not found, hence create
+ m_pZapImage->m_signatureToOffs.SetCount(signatureCount+1);
+ mdSignature resultToken = TokenFromRid(signatureCount, mdtSignature);
+ m_pZapImage->FlushCompactLayoutData(resultToken, newBuffer, (COUNT_T)size);
+ return resultToken;
+ }
+
+ void EncodeMethodSpec(mdToken parentToken, PCCOR_SIGNATURE pSig, DWORD cbSig, ByteStreamWriter *stream)
+ {
+ STANDARD_VM_CONTRACT;
+
+ SigPointer sig(pSig, cbSig);
+ WriteMethodDefOrRef(parentToken, stream);
+ BYTE b;
+ sig.GetByte(&b);
+ assert(b == IMAGE_CEE_CS_CALLCONV_GENERICINST);
+ ULONG typeArgCount;
+ sig.GetData(&typeArgCount);
+ stream->WriteUnsigned(typeArgCount);
+ for (ULONG typeArgIndex = 0; typeArgIndex < typeArgCount; typeArgIndex++)
+ EncodeType(&sig, stream);
+ }
+
+ void EncodeMemberRefSignature(InlineContext *context, PCCOR_SIGNATURE pSig, DWORD cbSig, SigBuilder &sigBuilder)
+ {
+ STANDARD_VM_CONTRACT;
+
+ SigPointer sp(pSig, cbSig);
+
+ BYTE b;
+ IfFailThrow(sp.GetByte(&b));
+ sigBuilder.AppendByte(b);
+
+ ULONG paramCount;
+
+ if (b == IMAGE_CEE_CS_CALLCONV_FIELD)
+ {
+ // FieldSigs are encoded like methodRefSigs with 0 parameters
+ paramCount = 0;
+ }
+ else
+ {
+ if ((b & IMAGE_CEE_CS_CALLCONV_GENERIC) != 0)
+ {
+ ULONG genericArgCount;
+
+ // Copy Generic Argument Count
+ IfFailThrow(sp.GetData(&genericArgCount));
+ sigBuilder.AppendData(genericArgCount);
+ }
+
+ // Copy Parameter count
+ IfFailThrow(sp.GetData(&paramCount));
+ sigBuilder.AppendData(paramCount);
+ }
+
+ // Copy Parameters and return value across (return value is param 0)
+ for (ULONG paramIndex = 0; paramIndex <= paramCount; paramIndex++)
+ {
+ IfFailThrow(sp.PeekByte(&b));
+ if (b == ELEMENT_TYPE_SENTINEL)
+ {
+ IfFailThrow(sp.GetByte(&b));
+ sigBuilder.AppendByte(b);
+ }
+
+ expandSignature(sp, context, sigBuilder);
+ }
+
+ return;
+ }
+
+ mdMethodSpec FindOrCreateMethodSpec(mdToken parentToken, SigBuilder &sb)
+ {
+ STANDARD_VM_CONTRACT;
+
+ // recode this as a CTL sig
+ DWORD cbSig;
+ PCCOR_SIGNATURE pSig = (PCCOR_SIGNATURE)sb.GetSignature(&cbSig);
+ ByteStreamWriter stream;
+ EncodeMethodSpec(parentToken, pSig, cbSig, &stream);
+ // look it up in the CTL tokens we have already generated
+ mdToken methodSpecToken = FindOrCreateMethodSpec(stream);
+ // associate the methodSpecToken with the IL sig
+ m_tokenToSig.Set(methodSpecToken, pSig, cbSig);
+ return methodSpecToken;
+ }
+
+ void EncodeSignature(PCCOR_SIGNATURE pSig, DWORD cbSig, ByteStreamWriter *stream)
+ {
+ STANDARD_VM_CONTRACT;
+
+ SigPointer sig(pSig, cbSig);
+ BYTE b;
+ sig.GetByte(&b);
+ stream->WriteByte(b);
+ ULONG argCount;
+ sig.GetData(&argCount);
+ stream->WriteUnsigned(argCount);
+ for (ULONG argIndex = 0; argIndex <= argCount; argIndex++)
+ EncodeType(&sig, stream);
+ }
+
+ mdSignature FindOrCreateSignature(SigBuilder &sb)
+ {
+ STANDARD_VM_CONTRACT;
+
+ // recode this as a CTL sig
+ DWORD cbSig;
+ PCCOR_SIGNATURE pSig = (PCCOR_SIGNATURE)sb.GetSignature(&cbSig);
+ ByteStreamWriter stream;
+ EncodeSignature(pSig, cbSig, &stream);
+ // look it up in the CTL tokens we have already generated
+ mdToken signatureToken = FindOrCreateSignature(stream);
+ // associate the signatureToken with the IL sig
+ m_tokenToSig.Set(signatureToken, pSig, cbSig);
+ return TokenFromRid(RidFromToken(signatureToken), mdtSignature);
+ }
+
+ DWORD GetToken(MethodTable *pMT)
+ {
+ STANDARD_VM_CONTRACT;
+
+ DWORD typeToken = pMT->GetCl();
+ if (pMT->GetModule() != m_pModule)
+ {
+ ULONG extTypeID = FindOrCreateExtTypeRef(pMT);
+ typeToken = TokenFromRid(extTypeID, mdtTypeRef);
+ }
+ return typeToken;
+ }
+
+ void Encode(TypeHandle th, ByteStreamWriter *stream)
+ {
+ STANDARD_VM_CONTRACT;
+
+ if (th.IsGenericVariable())
+ {
+ TypeVarTypeDesc *tvtd = th.AsGenericVariable();
+ stream->WriteByte(BYTE(tvtd->GetInternalCorElementType()));
+ stream->WriteUnsigned(tvtd->GetIndex());
+ }
+ else if (!th.IsTypeDesc())
+ {
+ Encode(th.AsMethodTable(), stream);
+ }
+ else if (th.IsArray())
+ {
+ ArrayTypeDesc *atd = th.AsArray();
+ CorElementType elType = atd->GetInternalCorElementType();
+ stream->WriteByte((BYTE)elType);
+ Encode(atd->GetArrayElementTypeHandle(), stream);
+ if (elType == ELEMENT_TYPE_ARRAY)
+ {
+ stream->WriteUnsigned(atd->GetRank());
+ stream->WriteUnsigned(0);
+ stream->WriteUnsigned(0);
+ }
+ }
+ else
+ {
+ assert(!"NYI");
+ }
+ }
+
+ void Encode(MethodTable *pMT, ByteStreamWriter *stream)
+ {
+ STANDARD_VM_CONTRACT;
+
+ CorElementType elType = pMT->GetSignatureCorElementType();
+ if (pMT->HasInstantiation())
+ {
+ stream->WriteByte(ELEMENT_TYPE_GENERICINST);
+ elType = pMT->IsValueType() ? ELEMENT_TYPE_VALUETYPE : ELEMENT_TYPE_CLASS;
+ stream->WriteByte(BYTE(elType));
+ WriteTypeDefOrRef(GetToken(pMT), stream);
+ stream->WriteUnsigned(pMT->GetNumGenericArgs());
+ Instantiation instantiation = pMT->GetInstantiation();
+ for (DWORD i = 0; i < instantiation.GetNumArgs(); i++)
+ {
+ Encode(instantiation[i], stream);
+ }
+ }
+ else
+ {
+ stream->WriteByte(BYTE(elType));
+ if (elType == ELEMENT_TYPE_CLASS || elType == ELEMENT_TYPE_VALUETYPE)
+ WriteTypeDefOrRef(GetToken(pMT), stream);
+ else
+ assert(elType <= ELEMENT_TYPE_STRING || elType == ELEMENT_TYPE_I || elType == ELEMENT_TYPE_U);
+ }
+ }
+
+ mdTypeSpec GetTypeSpecToken(ByteStreamWriter *stream)
+ {
+ STANDARD_VM_CONTRACT;
+
+ size_t size;
+ BYTE *buffer = stream->GetBuffer(size);
+ for (COUNT_T i = 1; i < m_pZapImage->m_typeSpecToOffs.GetCount(); i++)
+ {
+ COUNT_T startOffs = m_pZapImage->m_typeSpecToOffs[i];
+ if (memcmp(&m_pZapImage->m_compactLayoutBuffer[startOffs], buffer, size) == 0)
+ return TokenFromRid(i, mdtTypeSpec);
+ }
+ // we have not found it - let's add it to the CTL type spec table
+
+ // we need to give it a new token and expand the table
+ DWORD typeSpecToken = TokenFromRid(m_pZapImage->m_typeSpecToOffs.GetCount(), mdtTypeSpec);
+
+ m_pZapImage->m_typeSpecToOffs.SetCount(m_pZapImage->m_typeSpecToOffs.GetCount()+1);
+ m_pZapImage->FlushCompactLayoutData(typeSpecToken, buffer, (ULONG)size);
+
+ return typeSpecToken;
+ }
+
+ // translate a method def in another module or a generic type
+ // a member ref in our own module (which we may have to create)
+ virtual
+ mdMemberRef GetTokenForMethodDesc(MethodDesc *methodDesc, MethodTable *pMT)
+ {
+ STANDARD_VM_CONTRACT;
+
+ // check for the trivial case - non-generic method in the same module
+ if (methodDesc->GetModule() == m_pModule && !methodDesc->HasClassOrMethodInstantiation())
+ return methodDesc->GetMemberDef();
+
+ Module *module = methodDesc->GetModule();
+ if (pMT == NULL)
+ pMT = methodDesc->GetMethodTable();
+
+ // translate an external typedef into a typeref
+ mdToken typeToken = GetToken(pMT);
+
+ if (pMT->HasInstantiation())
+ {
+ ByteStreamWriter stream;
+
+ Encode(pMT, &stream);
+
+ typeToken = GetTypeSpecToken(&stream);
+ }
+
+ if (methodDesc->HasMethodInstantiation())
+ {
+ _ASSERTE(methodDesc->IsGenericMethodDefinition());
+ if (TypeFromToken(typeToken) == mdtTypeDef)
+ return methodDesc->GetMemberDef();
+ }
+
+ HENUMInternalHolder hEnumMethodDef(module->GetMDImport());
+ hEnumMethodDef.EnumInit(mdtMethodDef, pMT->GetCl());
+ ULONG methodCount = hEnumMethodDef.EnumGetCount();
+ mdMethodDef firstMethodDefToken;
+ hEnumMethodDef.EnumNext(&firstMethodDefToken);
+ mdMethodDef methodDefToken = methodDesc->GetMemberDef();
+ ULONG methodOrdinal = methodDefToken - firstMethodDefToken;
+ assert(methodOrdinal < methodCount);
+
+ return FindOrCreateExtMemberRef(typeToken, FALSE, methodOrdinal, methodDesc->GetNameOnNonArrayClass(), methodDesc->GetModule(), methodDefToken);
+ }
+
+ // we get passed a generic instantatiation - find or create a type spec
+ // token for it
+ virtual
+ mdTypeSpec GetTypeSpecToken(PCCOR_SIGNATURE pGenericInstSig, DWORD cbGenericInstSig)
+ {
+ STANDARD_VM_CONTRACT;
+
+ IMDInternalImport *pMDImport = m_pModule->GetMDImport();
+
+ // search linearly through the existing type specs to see if we have a match
+ HENUMInternalHolder hEnumTypeSpec(pMDImport);
+ hEnumTypeSpec.EnumAllInit(mdtTypeSpec);
+ ULONG typeSpecCount = hEnumTypeSpec.EnumGetCount();
+ mdTypeSpec typeSpecToken;
+ while (hEnumTypeSpec.EnumNext(&typeSpecToken))
+ {
+ PCCOR_SIGNATURE pSig;
+ ULONG cbSig;
+ pMDImport->GetTypeSpecFromToken(typeSpecToken, &pSig, &cbSig);
+
+ // does this type spec match the generic instantiation?
+ if (cbGenericInstSig == cbSig && memcmp(pGenericInstSig, pSig, cbSig) == 0)
+ return typeSpecToken;
+ }
+
+ ByteStreamWriter stream;
+
+ // we need to encode it
+ SigPointer sig(pGenericInstSig, cbGenericInstSig);
+ EncodeType(&sig, &stream);
+
+ typeSpecToken = GetTypeSpecToken(&stream);
+
+ m_tokenToSig.Set(typeSpecToken, pGenericInstSig, cbGenericInstSig);
+
+ return typeSpecToken;
+ }
+
+ virtual
+ mdToken GetTokenForType(MethodTable *pMT)
+ {
+ STANDARD_VM_CONTRACT;
+
+ if (pMT == NULL)
+ return 0;
+
+ mdToken typeToken = GetToken(pMT);
+
+ if (pMT->HasInstantiation())
+ {
+ ByteStreamWriter stream;
+
+ Encode(pMT, &stream);
+
+ typeToken = GetTypeSpecToken(&stream);
+ }
+ return typeToken;
+ }
+
+ void GetSignatureForType(TypeHandle th, SigBuilder *pSig)
+ {
+ STANDARD_VM_CONTRACT;
+
+ if (!th.IsTypeDesc())
+ {
+ MethodTable *pMT = th.AsMethodTable();
+ CorElementType elType = pMT->GetSignatureCorElementType();
+ if (pMT->HasInstantiation())
+ {
+ pSig->AppendElementType(ELEMENT_TYPE_GENERICINST);
+ pSig->AppendElementType(pMT->IsValueType() ? ELEMENT_TYPE_VALUETYPE : ELEMENT_TYPE_CLASS);
+ pSig->AppendToken(GetToken(pMT));
+ pSig->AppendData(pMT->GetNumGenericArgs());
+ Instantiation inst = pMT->GetInstantiation();
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ TypeHandle t = inst[i];
+ CONSISTENCY_CHECK(!t.IsNull() && !t.IsEncodedFixup());
+ GetSignatureForType(t, pSig);
+ }
+ }
+ else
+ {
+ pSig->AppendElementType(elType);
+ if (elType == ELEMENT_TYPE_CLASS || elType == ELEMENT_TYPE_VALUETYPE)
+ pSig->AppendToken(GetToken(pMT));
+ else
+ assert(elType <= ELEMENT_TYPE_STRING || elType == ELEMENT_TYPE_I || elType == ELEMENT_TYPE_U);
+ }
+ }
+ else
+ {
+ TypeDesc *pDesc = th.AsTypeDesc();
+ CorElementType et = pDesc->GetInternalCorElementType();
+ switch (et)
+ {
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_BYREF:
+ case ELEMENT_TYPE_SZARRAY:
+ {
+ pSig->AppendElementType(et);
+ GetSignatureForType(th.GetTypeParam(), pSig);
+ break;
+ }
+
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ pSig->AppendElementType((CorElementType)ELEMENT_TYPE_NATIVE_VALUETYPE);
+ GetSignatureForType(th.GetTypeParam(), pSig);
+ break;
+ }
+
+ case ELEMENT_TYPE_ARRAY:
+ {
+ pSig->AppendElementType(et);
+ GetSignatureForType(th.GetTypeParam(), pSig);
+
+ ArrayTypeDesc *arrayDesc = th.AsArray();
+
+ ULONG rank = arrayDesc->GetRank();
+ pSig->AppendData(rank);
+
+ if (rank != 0)
+ {
+ pSig->AppendData(0); // sizes
+ pSig->AppendData(0); // bounds
+ }
+ break;
+ }
+
+ default:
+ {
+ UNREACHABLE_MSG("Unexpected typedesc type");
+ }
+ }
+ }
+ }
+
+ virtual
+ mdToken GetTokenForType(CORINFO_CLASS_HANDLE type)
+ {
+ STANDARD_VM_CONTRACT;
+
+ TypeHandle th(type);
+
+ if (!th.IsTypeDesc())
+ {
+ return GetTokenForType(th.AsMethodTable());
+ }
+ else
+ {
+ SigBuilder sb;
+ GetSignatureForType(th, &sb);
+
+ DWORD cbSig;
+ PCCOR_SIGNATURE pSig = (PCCOR_SIGNATURE)sb.GetSignature(&cbSig);
+
+ return GetTypeSpecToken(pSig, cbSig);
+ }
+ }
+
+ struct NameHashElement
+ {
+ const CHAR *GetKey()
+ {
+ return m_name;
+ }
+ CHAR *m_name;
+ ULONG m_nameOffs;
+ };
+
+ class NameHashElementTraits : public StringSHashTraits<NameHashElement, CHAR>
+ {
+public:
+ static inline void OnDestructPerEntryCleanupAction(NameHashElement *e)
+ {
+ STANDARD_VM_CONTRACT;
+ delete [] e->m_name;
+ }
+ static const bool s_DestructPerEntryCleanupAction = true;
+ };
+
+ typedef SHash<NameHashElementTraits> NameHash;
+ NameHash m_nameHash;
+
+ ULONG EmitName(LPCSTR name)
+ {
+ STANDARD_VM_CONTRACT;
+
+ if (name == NULL || *name == 0)
+ {
+ return 0; // again, empty string at offset 0;
+ }
+
+ NameHashElement *pCacheElement = m_nameHash.Lookup(name);
+ if (pCacheElement != NULL)
+ return pCacheElement->m_nameOffs;
+
+ // emit name into the name pool, return offset
+ ULONG nameOffs = m_pZapImage->m_namePool.GetCount();
+ ULONG nameLength = (ULONG)(strlen(name) + 1);
+ m_pZapImage->m_namePool.SetCount(nameOffs + nameLength);
+ memcpy(&m_pZapImage->m_namePool[(COUNT_T)nameOffs], name, nameLength);
+
+ // Insert into hash
+ NewHolder<NameHashElement> pNewCacheElement = new NameHashElement();
+ size_t cchNameBuffer = strlen(name) + 1;
+ pNewCacheElement->m_name = new CHAR[cchNameBuffer];
+ strcpy_s(pNewCacheElement->m_name, cchNameBuffer, name);
+ pNewCacheElement->m_nameOffs = nameOffs;
+
+ m_nameHash.Add(pNewCacheElement);
+ pNewCacheElement.SuppressRelease();
+
+ return nameOffs;
+ }
+
+ EEClass *LoadTypeRef(Module *module, mdTypeRef typeRefToken, LoadFailureEnum loadflag = ThrowOnLoadFailure)
+ {
+ STANDARD_VM_CONTRACT;
+
+ const size_t NUM_ARGS = 1024;
+ TypeHandle instArgs[NUM_ARGS];
+ for (size_t i = 0; i < NUM_ARGS; i++)
+ {
+ instArgs[i] = TypeHandle(g_pCanonMethodTableClass);
+ }
+ Instantiation typeInstantiation(instArgs, NUM_ARGS);
+ Instantiation methodInstantiation(instArgs, NUM_ARGS);
+ SigTypeContext typeContext(typeInstantiation, methodInstantiation);
+
+ EEClass *result = NULL;
+ if (loadflag == ReturnNullOnLoadFailure)
+ {
+ EX_TRY
+ {
+ // load the type ref
+ TypeHandle th = ClassLoader::LoadTypeDefOrRefOrSpecThrowing(module, typeRefToken, &typeContext,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef);
+ if (!th.IsTypeDesc())
+ result = th.AsMethodTable()->GetClass();
+ else
+ result = NULL;
+ }
+ EX_CATCH
+ {
+ // do nothing
+ }
+ EX_END_CATCH(RethrowCorruptingExceptions)
+ }
+ else
+ {
+ // load the type ref
+ TypeHandle th = ClassLoader::LoadTypeDefOrRefOrSpecThrowing(module, typeRefToken, &typeContext,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef);
+ if (!th.IsTypeDesc())
+ result = th.AsMethodTable()->GetClass();
+ else
+ {
+ IfFailThrow(E_UNEXPECTED);
+ }
+ }
+
+ return result;
+ }
+
+ EEClass *LoadTypeDef(Module *module, mdTypeDef typeDefToken, LoadFailureEnum loadflag = ThrowOnLoadFailure)
+ {
+ STANDARD_VM_CONTRACT;
+
+ const size_t NUM_ARGS = 1024;
+ TypeHandle instArgs[NUM_ARGS];
+ for (size_t i = 0; i < NUM_ARGS; i++)
+ {
+ instArgs[i] = TypeHandle(g_pCanonMethodTableClass);
+ }
+ Instantiation typeInstantiation(instArgs, NUM_ARGS);
+ Instantiation methodInstantiation(instArgs, NUM_ARGS);
+ SigTypeContext typeContext(typeInstantiation, methodInstantiation);
+
+ EEClass *result = NULL;
+ if (loadflag == ReturnNullOnLoadFailure)
+ {
+ EX_TRY
+ {
+ // load the type ref
+ TypeHandle th = ClassLoader::LoadTypeDefOrRefOrSpecThrowing(module, typeDefToken, &typeContext,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef);
+ if (!th.IsTypeDesc())
+ result = th.AsMethodTable()->GetClass();
+ else
+ result = NULL;
+ }
+ EX_CATCH
+ {
+ // do nothing
+ }
+ EX_END_CATCH(RethrowCorruptingExceptions)
+ }
+ else
+ {
+ // load the type def
+ TypeHandle th = ClassLoader::LoadTypeDefOrRefOrSpecThrowing(module, typeDefToken, &typeContext,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef);
+ if (!th.IsTypeDesc())
+ result = th.AsMethodTable()->GetClass();
+ else
+ {
+ IfFailThrow(E_UNEXPECTED);
+ }
+ }
+
+ return result;
+ }
+
+ MethodDesc *LoadMethod(Module *module, mdMemberRef memberRefToken, LoadFailureEnum loadflag = ThrowOnLoadFailure)
+ {
+ STANDARD_VM_CONTRACT;
+
+ const size_t NUM_ARGS = 1024;
+ TypeHandle instArgs[NUM_ARGS];
+ for (size_t i = 0; i < NUM_ARGS; i++)
+ {
+ instArgs[i] = TypeHandle(g_pCanonMethodTableClass);
+ }
+ Instantiation typeInstantiation(instArgs, NUM_ARGS);
+ Instantiation methodInstantiation(instArgs, NUM_ARGS);
+ SigTypeContext typeContext(typeInstantiation, methodInstantiation);
+
+ MethodDesc *result = NULL;
+
+ /// MDIL_NEEDS_REVIEW
+ /// GetMethodDescFromMemberDefOrRefOrSpecThrowing has been apparently renamed (the "throw" part has been deleted)
+ /// Confirm this....
+ /// The same seems to be true for GetFieldDescFromMemberRefThrowing .....
+
+ EX_TRY
+ {
+ result = MemberLoader::GetMethodDescFromMemberDefOrRefOrSpec(module, memberRefToken, &typeContext, FALSE, TRUE);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(RethrowCorruptingExceptions)
+
+ if (result != NULL)
+ return result;
+
+ // retry with strictMetadataChecks = TRUE - the above may have failed because of generic constraint checking
+ if (loadflag == ReturnNullOnLoadFailure)
+ {
+ EX_TRY
+ {
+ result = MemberLoader::GetMethodDescFromMemberDefOrRefOrSpec(module, memberRefToken, &typeContext, TRUE, TRUE);
+ }
+ EX_CATCH
+ {
+ // do nothing
+ }
+ EX_END_CATCH(RethrowCorruptingExceptions)
+ }
+ else
+ {
+ result = MemberLoader::GetMethodDescFromMemberDefOrRefOrSpec(module, memberRefToken, &typeContext, TRUE, TRUE);
+ }
+
+ return result;
+ }
+
+ FieldDesc *LoadField(Module *module, mdMemberRef memberRefToken, LoadFailureEnum loadflag = ThrowOnLoadFailure)
+ {
+ STANDARD_VM_CONTRACT;
+
+ const size_t NUM_ARGS = 1024;
+ TypeHandle instArgs[NUM_ARGS];
+ for (size_t i = 0; i < NUM_ARGS; i++)
+ {
+ instArgs[i] = TypeHandle(g_pCanonMethodTableClass);
+ }
+ Instantiation typeInstantiation(instArgs, NUM_ARGS);
+ Instantiation methodInstantiation(instArgs, NUM_ARGS);
+ SigTypeContext typeContext(typeInstantiation, methodInstantiation);
+
+ FieldDesc *result = NULL;
+
+ if (loadflag == ReturnNullOnLoadFailure)
+ {
+ EX_TRY
+ {
+ result = MemberLoader::GetFieldDescFromMemberDefOrRef(module, memberRefToken, &typeContext, true);
+ }
+ EX_CATCH
+ {
+ // do nothing
+ }
+ EX_END_CATCH(RethrowCorruptingExceptions)
+ }
+ else
+ {
+ result = MemberLoader::GetFieldDescFromMemberDefOrRef(module, memberRefToken, &typeContext, true);
+ }
+
+ return result;
+ }
+
+ void EncodeType(SigPointer *sig, ByteStreamWriter *stream)
+ {
+
+ STANDARD_VM_CONTRACT;
+ BYTE b;
+ sig->GetByte(&b);
+ switch (b)
+ {
+ case ELEMENT_TYPE_VOID:
+ case ELEMENT_TYPE_BOOLEAN:
+ case ELEMENT_TYPE_CHAR:
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_R4:
+ case ELEMENT_TYPE_R8:
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_U:
+ case ELEMENT_TYPE_TYPEDBYREF:
+ case ELEMENT_TYPE_OBJECT:
+ stream->WriteByte(b);
+ break;
+
+ // every type above PTR will be simple type
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_BYREF:
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_NATIVE_VALUETYPE:
+ stream->WriteByte(b);
+ EncodeType(sig, stream);
+ break;
+
+ // Please use case ELEMENT_TYPE_VALUETYPE. case ELEMENT_TYPE_VALUECLASS is deprecated.
+ case ELEMENT_TYPE_VALUETYPE:
+ case ELEMENT_TYPE_CLASS:
+ {
+ stream->WriteByte(b);
+ mdToken typeToken;
+ sig->GetToken(&typeToken);
+ WriteTypeDefOrRef(typeToken, stream);
+ break;
+ }
+
+ case ELEMENT_TYPE_VAR:
+ case ELEMENT_TYPE_MVAR:
+ {
+ stream->WriteByte(b);
+ ULONG index;
+ sig->GetData(&index);
+ stream->WriteUnsigned(index);
+ }
+ break;
+
+ case ELEMENT_TYPE_GENERICINST:
+ {
+ stream->WriteByte(b);
+ EncodeType(sig, stream);
+ ULONG typeArgCount;
+ sig->GetData(&typeArgCount);
+ stream->WriteUnsigned(typeArgCount);
+ for (ULONG typeArgIndex = 0; typeArgIndex < typeArgCount; typeArgIndex++)
+ {
+ EncodeType(sig, stream);
+ }
+ }
+ break;
+
+ case ELEMENT_TYPE_ARRAY:
+ {
+ stream->WriteByte(b);
+ EncodeType(sig, stream);
+ ULONG rank;
+ sig->GetData(&rank);
+ stream->WriteUnsigned(rank);
+ ULONG boundCount;
+ sig->GetData(&boundCount);
+ stream->WriteUnsigned(boundCount);
+ for (unsigned i = 0; i < boundCount; i++)
+ {
+ ULONG bound;
+ sig->GetData(&bound);
+ stream->WriteUnsigned(bound);
+ }
+ ULONG lowerBoundCount;
+ sig->GetData(&lowerBoundCount);
+ stream->WriteUnsigned(lowerBoundCount);
+ for (unsigned i = 0; i < lowerBoundCount; i++)
+ {
+ ULONG lowerBound;
+ sig->GetData(&lowerBound);
+ stream->WriteUnsigned(lowerBound);
+ }
+ }
+ break;
+
+ case ELEMENT_TYPE_CMOD_REQD:
+ case ELEMENT_TYPE_CMOD_OPT:
+ {
+ mdToken typeToken;
+ sig->GetToken(&typeToken);
+ EncodeType(sig, stream);
+ }
+ break;
+
+ case ELEMENT_TYPE_FNPTR:
+ {
+ stream->WriteByte(b);
+ ULONG callingConvInfo;
+ sig->GetCallingConvInfo(&callingConvInfo);
+ stream->WriteUnsigned(callingConvInfo);
+ ULONG argCount;
+ sig->GetData(&argCount);
+ stream->WriteUnsigned(argCount);
+ for (ULONG argIndex = 0; argIndex <= argCount; argIndex++)
+ {
+ EncodeType(sig, stream);
+ }
+ }
+ break;
+
+ default:
+ stream->WriteByte(ELEMENT_TYPE_I);
+ printf("type spec not yet impemented: %x\n", b);
+ break;
+ }
+ }
+
+ DWORD TypeDefOfPrimitive(CorElementType elType)
+ {
+ STANDARD_VM_CONTRACT;
+
+ MethodTable *pMT = MscorlibBinder::GetElementType(elType);
+ if (pMT == NULL)
+ {
+ printf("Primitive type not found: 0x%x\n", elType);
+ return 0;
+ }
+ else
+ return pMT->GetCl();
+ }
+
+ DWORD TypeDefOfNamedType(__in_z char *nameSpace, __in_z char *name)
+ {
+ STANDARD_VM_CONTRACT;
+
+ MethodTable *pMT = ClassLoader::LoadTypeByNameThrowing( m_pModule->GetAssembly(), nameSpace, name,
+ ClassLoader::ReturnNullIfNotFound,
+ // == FailIfNotLoadedOrNotRestored
+ ClassLoader::LoadTypes,
+ CLASS_LOADED).AsMethodTable();
+ if (pMT == NULL)
+ {
+ printf("Named type not found: %s.%s\n", nameSpace, name);
+ return 0;
+ }
+ else
+ return pMT->GetCl();
+ }
+
+ IAssemblyName *CreateAssemblyNameFromAssemblyToken(IMDInternalImport *pMDImport, mdToken assemblyToken)
+ {
+ STANDARD_VM_CONTRACT;
+
+ LPCSTR assemblyName;
+ AssemblyMetaDataInternal asmMetadataInternal = {0};
+ const void *pvPublicKeyToken;
+ ULONG dwPublicKeyToken;
+ DWORD dwAssemblyRefFlags;
+
+ if (TypeFromToken(assemblyToken) == mdtAssemblyRef)
+ {
+ // Gather assembly ref information
+ IfFailThrow(pMDImport->GetAssemblyRefProps(assemblyToken, &pvPublicKeyToken, &dwPublicKeyToken, &assemblyName, &asmMetadataInternal, NULL, NULL, &dwAssemblyRefFlags));
+ }
+ else
+ {
+ IfFailThrow(pMDImport->GetAssemblyProps(assemblyToken, &pvPublicKeyToken, &dwPublicKeyToken, NULL, &assemblyName, &asmMetadataInternal, &dwAssemblyRefFlags));
+ }
+
+ SString szName(SString::Utf8, assemblyName);
+
+ // Create AssemblyName object
+ ReleaseHolder<IAssemblyName> pName;
+
+ IfFailThrow(CreateAssemblyNameObject(&pName, szName.GetUnicode(), 0, NULL));
+
+ IfFailThrow(pName->SetProperty(ASM_NAME_MAJOR_VERSION, &asmMetadataInternal.usMajorVersion, sizeof(WORD)));
+ IfFailThrow(pName->SetProperty(ASM_NAME_MINOR_VERSION, &asmMetadataInternal.usMinorVersion, sizeof(WORD)));
+ IfFailThrow(pName->SetProperty(ASM_NAME_REVISION_NUMBER, &asmMetadataInternal.usRevisionNumber, sizeof(WORD)));
+ IfFailThrow(pName->SetProperty(ASM_NAME_BUILD_NUMBER, &asmMetadataInternal.usBuildNumber, sizeof(WORD)));
+
+ if (asmMetadataInternal.szLocale)
+ {
+ SString szLocaleString;
+ szLocaleString.SetUTF8(asmMetadataInternal.szLocale);
+ IfFailThrow(pName->SetProperty(ASM_NAME_CULTURE, szLocaleString.GetUnicode(), (szLocaleString.GetCount() + 1) * sizeof(WCHAR)));
+ }
+
+ // See if the assembly[def] is retargetable (ie, for a generic assembly).
+ if (IsAfRetargetable(dwAssemblyRefFlags)) {
+ BOOL bTrue = TRUE;
+ IfFailThrow(pName->SetProperty(ASM_NAME_RETARGET, &bTrue, sizeof(bTrue)));
+ }
+
+ // Set public key or public key token
+ if (IsAfPublicKey(dwAssemblyRefFlags)) {
+ IfFailThrow(pName->SetProperty(((pvPublicKeyToken && dwPublicKeyToken) ? (ASM_NAME_PUBLIC_KEY) : (ASM_NAME_NULL_PUBLIC_KEY)),
+ pvPublicKeyToken, dwPublicKeyToken * sizeof(BYTE)));
+ }
+ else {
+ IfFailThrow(pName->SetProperty(((pvPublicKeyToken && dwPublicKeyToken) ? (ASM_NAME_PUBLIC_KEY_TOKEN) : (ASM_NAME_NULL_PUBLIC_KEY_TOKEN)),
+ pvPublicKeyToken, dwPublicKeyToken * sizeof(BYTE)));
+ }
+
+ // Set Content Type
+ if (!IsAfContentType_Default(dwAssemblyRefFlags))
+ {
+ if (IsAfContentType_WindowsRuntime(dwAssemblyRefFlags))
+ {
+ DWORD dwContentType = AssemblyContentType_WindowsRuntime;
+ IfFailThrow(pName->SetProperty(ASM_NAME_CONTENT_TYPE, (LPBYTE)&dwContentType, sizeof(dwContentType)));
+ }
+ else
+ {
+ IfFailThrow(COR_E_BADIMAGEFORMAT);
+ }
+ }
+
+ pName.SuppressRelease();
+ return pName;
+ }
+
+ void GetAssemblyDisplayNameFromIAssemblyName(IAssemblyName *pAssemblyName, SString *pStringName)
+ {
+ STANDARD_VM_CONTRACT;
+
+ DWORD cchDisplayName = 0;
+ HRESULT hr = pAssemblyName->GetDisplayName(NULL, &cchDisplayName, ASM_DISPLAYF_FULL);
+ if (hr != HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER))
+ {
+ IfFailThrow(hr);
+ }
+
+ IfFailThrow(pAssemblyName->GetDisplayName(pStringName->OpenUnicodeBuffer(cchDisplayName), &cchDisplayName, ASM_DISPLAYF_FULL));
+ pStringName->CloseBuffer(cchDisplayName-1);
+ }
+
+ ULONG EmitAssemblyName(IMDInternalImport *pMDImport, mdToken assemblyToken)
+ {
+ STANDARD_VM_CONTRACT;
+
+ ReleaseHolder<IAssemblyName> pAsmName = CreateAssemblyNameFromAssemblyToken(pMDImport, assemblyToken);
+ StackSString ssAssemblyName;
+ GetAssemblyDisplayNameFromIAssemblyName(pAsmName, &ssAssemblyName);
+ StackScratchBuffer scratchBufferUtf8;
+
+ return EmitName(ssAssemblyName.GetUTF8(scratchBufferUtf8));
+ }
+
+ void CreateExternalReferences()
+ {
+ STANDARD_VM_CONTRACT;
+
+ IMDInternalImport *pMDImport = m_pModule->GetMDImport();
+
+ HENUMInternalHolder hEnumAssemblyRef(pMDImport);
+ hEnumAssemblyRef.EnumAllInit(mdtAssemblyRef);
+ COUNT_T assemblyRefCount = hEnumAssemblyRef.EnumGetCount();
+ m_pZapImage->m_extModRef.SetCount(assemblyRefCount+1);
+ mdAssemblyRef assemblyRefToken;
+// printf("Assembly refs: %d\n", assemblyRefCount);
+
+ // Initialize values
+ m_pZapImage->m_extModRef[0].name = 0xdeafbeef;
+ m_pZapImage->m_extModRef[0].flags = ZapImage::ExtModRef::NO_FLAGS;
+
+ while (hEnumAssemblyRef.EnumNext(&assemblyRefToken))
+ {
+ COUNT_T rid = RidFromToken(assemblyRefToken);
+ m_pZapImage->m_extModRef[rid].name = EmitAssemblyName(pMDImport, assemblyRefToken);
+ m_pZapImage->m_extModRef[rid].flags = ZapImage::ExtModRef::IS_FROM_IL_METADATA;
+// printf(" %d = %s\n", rid, assemblyName);
+ }
+
+
+ HENUMInternalHolder hEnumModuleRef(pMDImport);
+ hEnumModuleRef.EnumAllInit(mdtModuleRef);
+ COUNT_T moduleRefCount = hEnumModuleRef.EnumGetCount();
+ m_pZapImage->m_extModRef.SetCount(assemblyRefCount+moduleRefCount+1);
+// printf("Module refs: %d\n", moduleRefCount);
+ mdModuleRef moduleRefToken;
+ while (hEnumModuleRef.EnumNext(&moduleRefToken))
+ {
+ COUNT_T rid = RidFromToken(moduleRefToken);
+ // we set the module names lazily because we don't want names from PInvokes
+ // (e.g. kernel32.dll) to end up in here. We set the name only when we
+ // encounter a type ref from this module
+ m_pZapImage->m_extModRef[assemblyRefCount + rid].name = 0;
+ m_pZapImage->m_extModRef[assemblyRefCount + rid].flags = ZapImage::ExtModRef::IS_MODULE_REF;
+ }
+
+ HENUMInternalHolder hEnumTypeRef(pMDImport);
+ hEnumTypeRef.EnumAllInit(mdtTypeRef);
+ ULONG typeRefCount = hEnumTypeRef.EnumGetCount();
+ mdTypeRef typeRefToken;
+
+ m_pZapImage->m_extTypeRef.Preallocate((typeRefCount + 1) * 2);
+ m_pZapImage->m_extTypeRefExtend.Preallocate((typeRefCount + 1) * 2);
+
+ m_pZapImage->m_extTypeRef.SetCount(typeRefCount+1);
+ // Initialize (unused) value
+ m_pZapImage->m_extTypeRef[0].module = 0;
+ m_pZapImage->m_extTypeRef[0].ordinal = 0;
+ while (hEnumTypeRef.EnumNext(&typeRefToken))
+ {
+ COUNT_T rid = RidFromToken(typeRefToken);
+ LPCSTR nameSpace;
+ LPCSTR typeName;
+ pMDImport->GetNameOfTypeRef(typeRefToken, &nameSpace, &typeName);
+// printf("%08x %s.%s", typeRefToken, nameSpace, typeName);
+ mdToken scopeToken;
+ pMDImport->GetResolutionScopeOfTypeRef(typeRefToken, &scopeToken);
+
+ // We found a case where a typeref had itself as the resolution scope,
+ // possibly to hinder reverse engineering.
+ // so limit the number of iterations and pretend the type has not been loaded
+ // if we reach the limit (bug #286371).
+ int iter = 0;
+ const int maxIter = 1000;
+ while (pMDImport->IsValidToken(scopeToken) && TypeFromToken(scopeToken) == mdtTypeRef && iter < maxIter)
+ {
+ pMDImport->GetNameOfTypeRef(scopeToken, &nameSpace, &typeName);
+// printf(" from %08x %s.%s", scopeToken, nameSpace, typeName);
+ pMDImport->GetResolutionScopeOfTypeRef(scopeToken, &scopeToken);
+ iter++;
+ }
+
+ EEClass *extType = NULL;
+ if (iter < maxIter)
+ extType = LoadTypeRef(m_pModule, typeRefToken, ReturnNullOnLoadFailure);
+ m_pZapImage->m_extTypeRef[rid].ordinal =
+ extType ? RidFromToken(extType->GetMethodTable()->GetCl())
+ : 0;
+
+ COUNT_T scopeRid = RidFromToken(scopeToken);
+ if (!pMDImport->IsValidToken(scopeToken))
+ {
+ m_pZapImage->m_extTypeRef[rid].module = 0;
+ }
+ else if (TypeFromToken(scopeToken) == mdtAssemblyRef)
+ {
+ if (extType != NULL)
+ {
+ Module *extModule = extType->GetMethodTable()->GetModule();
+
+ // Use the assembly ref rid as module id, and let the binder do any resolution to correct
+ // assembly, or any type forwarding necessary. This allows type-forwarding detection from
+ // within signatures to operate properly.
+ scopeRid = RidFromToken(scopeToken);
+
+ LoadHintEnum loadHint = LoadDefault;
+ LoadHintEnum defaultLoadHint = LoadDefault;
+ m_pZapImage->GetCompileInfo()->GetLoadHint((CORINFO_ASSEMBLY_HANDLE)m_pModule->GetAssembly(),
+ (CORINFO_ASSEMBLY_HANDLE)extModule->GetAssembly(),
+ &loadHint,
+ &defaultLoadHint);
+ if (loadHint == LoadAlways)
+ {
+ m_pZapImage->m_extModRef[scopeRid].flags =
+ (ZapImage::ExtModRef::ExtModRefFlags)(m_pZapImage->m_extModRef[scopeRid].flags | ZapImage::ExtModRef::IS_EAGERLY_BOUND);
+ }
+ }
+ m_pZapImage->m_extTypeRef[rid].module = scopeRid;
+ }
+ else if (TypeFromToken(scopeToken) == mdtModuleRef)
+ {
+// assert(!"TypeRef with ModuleRef scope currently not supported");
+ COUNT_T moduleRid = assemblyRefCount + scopeRid;
+ m_pZapImage->m_extTypeRef[rid].module = moduleRid;
+
+ // set the module name because now we know the module is referenced
+ if (m_pZapImage->m_extModRef[moduleRid].name == 0)
+ {
+ LPCSTR moduleName;
+ assert((m_pZapImage->m_extModRef[moduleRid].flags & ZapImage::ExtModRef::IS_MODULE_REF) != 0);
+ pMDImport->GetModuleRefProps(scopeToken, &moduleName);
+ m_pZapImage->m_extModRef[moduleRid].name = EmitName(moduleName);
+ m_pZapImage->m_extModRef[moduleRid].flags =
+ (ZapImage::ExtModRef::ExtModRefFlags)(m_pZapImage->m_extModRef[moduleRid].flags | ZapImage::ExtModRef::IS_LOCAL_MODULE);
+ }
+ }
+ else if (TypeFromToken(scopeToken) == mdtModule)
+ {
+ // haven't figured out that yet - skip for now
+ // there are also bogus entries with a resolution scope of 0 (??)
+ assert(scopeToken == TokenFromRid(1, mdtModule) || scopeToken == 0);
+ m_pZapImage->m_extTypeRef[rid].module = 0;
+ }
+ else if (TypeFromToken(scopeToken) == mdtTypeRef)
+ {
+ // this must be the case of the long (or infinite)
+ // typeref chain - give up
+ assert(iter >= maxIter);
+ m_pZapImage->m_extTypeRef[rid].module = 0;
+ }
+ else
+ {
+ // hmm, ignore these cases for now
+ assert(!"NYI");
+// printf("scopeToken = %08x\n", scopeToken);
+ }
+ }
+
+ HENUMInternalHolder hEnumTypeSpec(pMDImport);
+ hEnumTypeSpec.EnumAllInit(mdtTypeSpec);
+ ULONG typeSpecCount = hEnumTypeSpec.EnumGetCount();
+ m_pZapImage->m_typeSpecToOffs.SetCount(typeSpecCount+1);
+
+ // init (unused) data
+ m_pZapImage->m_typeSpecToOffs[0] = 0;
+
+ mdTypeSpec typeSpecToken;
+ size_t typeSpecSize = 0;
+ while (hEnumTypeSpec.EnumNext(&typeSpecToken))
+ {
+ PCCOR_SIGNATURE pSig;
+ ULONG cbSig;
+ pMDImport->GetTypeSpecFromToken(typeSpecToken, &pSig, &cbSig);
+
+ //printf("%08x: ", typeSpecToken);
+ //for (ULONG i = 0; i < cbSig; i++)
+ // printf("%02x ", pSig[i]);
+ //printf("\n");
+
+ SigPointer sig(pSig, cbSig);
+ EncodeType(&sig, m_stream);
+
+ size_t size;
+ BYTE *buffer = m_stream->GetBuffer(size);
+ m_pZapImage->FlushCompactLayoutData(typeSpecToken, buffer, (ULONG)size);
+ m_stream->Reset();
+ typeSpecSize += size;
+ }
+// printf("total encoded typespec size is %u for %d typespecs - average %5.1f\n", typeSpecSize, typeSpecCount, (double)typeSpecSize/typeSpecCount);
+
+ HENUMInternalHolder hEnumMemberRef(pMDImport);
+ hEnumMemberRef.EnumAllInit(mdtMemberRef);
+ ULONG memberRefCount = hEnumMemberRef.EnumGetCount();
+
+ m_pZapImage->m_extMemberRef.Preallocate((memberRefCount + 1) * 2);
+ m_pZapImage->m_extMemberRefExtend.Preallocate((memberRefCount + 1) * 2);
+
+ m_pZapImage->m_extMemberRef.SetCount(memberRefCount+1);
+ mdMemberRef memberRefToken;
+
+ // Initialize (unused) value
+ m_pZapImage->m_extMemberRef[0].isTypeSpec = false;
+ m_pZapImage->m_extMemberRef[0].typeRid = 0;
+ m_pZapImage->m_extMemberRef[0].isField = false;
+ m_pZapImage->m_extMemberRef[0].ordinal = 0;
+
+ while (hEnumMemberRef.EnumNext(&memberRefToken))
+ {
+ PCCOR_SIGNATURE pSig;
+ ULONG cbSig;
+ LPCSTR name;
+ IfFailThrow(pMDImport->GetNameAndSigOfMemberRef(memberRefToken, &pSig, &cbSig, &name));
+ mdToken parentToken;
+ IfFailThrow(pMDImport->GetParentOfMemberRef(memberRefToken, &parentToken));
+
+// printf("%08x Parent = %08x Name = %s\n", memberRefToken, parentToken, name);
+ DWORD extRefRid = RidFromToken(memberRefToken);
+ if (TypeFromToken(parentToken) != mdtTypeRef && TypeFromToken(parentToken) != mdtTypeSpec && TypeFromToken(parentToken) != mdtTypeDef)
+ {
+ m_pZapImage->m_extMemberRef[(COUNT_T)extRefRid].isTypeSpec = false;;
+ m_pZapImage->m_extMemberRef[(COUNT_T)extRefRid].typeRid = 0;
+ m_pZapImage->m_extMemberRef[(COUNT_T)extRefRid].isField = FALSE;
+ if (TypeFromToken(parentToken) == mdtMethodDef)
+ {
+ m_pZapImage->m_extMemberRef[(COUNT_T)extRefRid].ordinal = RidFromToken(parentToken);
+ }
+ else
+ {
+ m_pZapImage->m_extMemberRef[(COUNT_T)extRefRid].ordinal = 0;
+ printf("MemberRef parent is %08x - giving up\n", parentToken);
+ }
+ continue;
+ }
+
+ EEClass *extType = NULL;
+ if (TypeFromToken(parentToken) == mdtTypeDef)
+ {
+ extType = LoadTypeDef(m_pModule, parentToken, ReturnNullOnLoadFailure);
+
+ // Create a TypeSpec token to point to the type. (The MDIL file format does not
+ // allow a TypeDef to parent a MemberRef, but IL does. We work around this by simply
+ // converting the typedef into a typespec.
+
+ ByteStreamWriter stream;
+ if ((extType == NULL) || !extType->GetMethodTable()->IsValueType())
+ {
+ // If extType is NULL, whether or not the type is a valuetype does not matter.
+ stream.WriteByte(ELEMENT_TYPE_CLASS);
+ }
+ else
+ {
+ stream.WriteByte(ELEMENT_TYPE_VALUETYPE);
+ }
+ WriteTypeDefOrRef(parentToken, &stream);
+
+ // Get the new TypeSpec token.
+ parentToken = GetTypeSpecToken(&stream);
+ }
+ else
+ {
+ extType = LoadTypeRef(m_pModule, parentToken, ReturnNullOnLoadFailure);
+ }
+
+ m_pZapImage->m_extMemberRef[(COUNT_T)extRefRid].isTypeSpec = TypeFromToken(parentToken) == mdtTypeSpec;
+ m_pZapImage->m_extMemberRef[(COUNT_T)extRefRid].typeRid = RidFromToken(parentToken);
+
+ mdToken extTypeDefToken = 0;
+ Module *extModule = NULL;
+ if (extType != NULL)
+ {
+ extTypeDefToken = extType->GetMethodTable()->GetCl();
+ extModule = extType->GetMethodTable()->GetModule();
+ }
+
+ SigPointer sigPointer(pSig, cbSig);
+ ULONG callingConv;
+ sigPointer.GetCallingConv(&callingConv);
+
+ if (callingConv != IMAGE_CEE_CS_CALLCONV_FIELD)
+ {
+ MethodDesc *pMD = LoadMethod(m_pModule, memberRefToken, ReturnNullOnLoadFailure);
+ ULONG ordinal;
+ if (pMD == NULL)
+ {
+ ordinal = ~0;
+ }
+ else
+ {
+ mdMethodDef methodDefToken = pMD->GetMemberDef();
+ if (IsNilToken(methodDefToken))
+ {
+ // this should only happen for arrays
+ assert(pMD->GetClass()->GetInternalCorElementType() == ELEMENT_TYPE_ARRAY
+ || pMD->GetClass()->GetInternalCorElementType() == ELEMENT_TYPE_SZARRAY);
+ if (pMD->GetClass() != extType)
+ {
+ extType = pMD->GetClass();
+ extTypeDefToken = extType->GetMethodTable()->GetCl();
+ extModule = extType->GetMethodTable()->GetModule();
+ if (TypeFromToken(parentToken) == mdtTypeRef)
+ {
+ ULONG parentOrdinal = FindOrCreateExtTypeRef(extType->GetMethodTable());
+ m_pZapImage->m_extMemberRef[(COUNT_T)extRefRid].typeRid = parentOrdinal;
+ }
+ }
+ ordinal = pMD->GetSlot();
+ }
+ // handle the case where the member specified is in a baseclass of the type
+ // referenced. In this case we generate not a proper methodIndex and defer to
+ // binding by name/signature
+ else if (pMD->GetClass() != extType)
+ {
+ ordinal = ~0;
+ }
+ else
+ {
+ ordinal = GetMethodOrdinal(extModule, extTypeDefToken, methodDefToken);
+ }
+ }
+ m_pZapImage->m_extMemberRef[(COUNT_T)extRefRid].isField = FALSE;
+ m_pZapImage->m_extMemberRef[(COUNT_T)extRefRid].ordinal = ordinal;
+ }
+ else
+ {
+ ULONG ordinal;
+ FieldDesc *pFD = LoadField(m_pModule, memberRefToken, ReturnNullOnLoadFailure);
+ if (pFD == NULL)
+ ordinal = ~0;
+ else
+ {
+ // handle the case where the member specified is in a baseclass of the type
+ // referenced. In this case what we generate here is not as versionable as the original IL
+ // this is something we need to think about...
+ if (pFD->GetApproxEnclosingMethodTable()->GetClass() != extType)
+ {
+ extType = pFD->GetApproxEnclosingMethodTable()->GetClass();
+ extTypeDefToken = extType->GetMethodTable()->GetCl();
+ extModule = extType->GetMethodTable()->GetModule();
+ ULONG parentOrdinal = FindOrCreateExtTypeRef(extType->GetMethodTable());
+ m_pZapImage->m_extMemberRef[(COUNT_T)extRefRid].typeRid = parentOrdinal;
+ }
+ ordinal = GetFieldOrdinal(extModule, extTypeDefToken, pFD->GetMemberDef());
+ }
+ m_pZapImage->m_extMemberRef[(COUNT_T)extRefRid].isField = TRUE;
+ m_pZapImage->m_extMemberRef[(COUNT_T)extRefRid].ordinal = ordinal;
+ }
+ }
+
+ HENUMInternalHolder hEnumMethodSpec(pMDImport);
+ hEnumMethodSpec.EnumAllInit(mdtMethodSpec);
+ ULONG methodSpecCount = hEnumMethodSpec.EnumGetCount();
+ m_pZapImage->m_methodSpecToOffs.SetCount(methodSpecCount+1);
+ mdMethodSpec methodSpecToken;
+ size_t methodSpecSize = 0;
+
+ // Initialize (unused) value
+ m_pZapImage->m_methodSpecToOffs[0] = 0;
+ while (hEnumMethodSpec.EnumNext(&methodSpecToken))
+ {
+ PCCOR_SIGNATURE pSig;
+ ULONG cbSig;
+ unsigned parentToken;
+ pMDImport->GetMethodSpecProps(methodSpecToken, &parentToken, &pSig, &cbSig);
+
+ //printf("%08x: %08x ", methodSpecToken, parentToken);
+ //for (ULONG i = 0; i < cbSig; i++)
+ // printf("%02x ", pSig[i]);
+ //printf("\n");
+
+ if (TypeFromToken(parentToken) == mdtMemberRef)
+ {
+ MethodDesc *pMD = LoadMethod(m_pModule, methodSpecToken, ReturnNullOnLoadFailure);
+ if (pMD != NULL)
+ {
+ COUNT_T extRefRid = RidFromToken(parentToken);
+ assert(m_pZapImage->m_extMemberRef[extRefRid].isField == FALSE);
+
+ MethodTable *pMT = pMD->GetMethodTable();
+ DWORD extTypeDefToken = pMT->GetCl();
+ Module *extModule = pMT->GetModule();
+ assert(extTypeDefToken != 0);
+ DWORD methodDefToken = pMD->GetMemberDef();
+ HENUMInternalHolder hEnumMethodDef(extModule->GetMDImport());
+ hEnumMethodDef.EnumInit(mdtMethodDef, extTypeDefToken);
+ ULONG methodCount = hEnumMethodDef.EnumGetCount();
+ mdMethodDef firstMethodDefToken;
+ hEnumMethodDef.EnumNext(&firstMethodDefToken);
+ ULONG ordinal = methodDefToken - firstMethodDefToken;
+ assert(ordinal < methodCount);
+
+ if (m_pZapImage->m_extMemberRef[extRefRid].ordinal == 0x7fff)
+ m_pZapImage->m_extMemberRef[extRefRid].ordinal = ordinal;
+ else
+ assert(m_pZapImage->m_extMemberRef[extRefRid].ordinal == ordinal);
+ }
+ }
+
+ EncodeMethodSpec(parentToken, pSig, cbSig, m_stream);
+
+ size_t size;
+ BYTE *buffer = m_stream->GetBuffer(size);
+ m_pZapImage->FlushCompactLayoutData(methodSpecToken, buffer, (ULONG)size);
+ m_stream->Reset();
+ methodSpecSize += size;
+ }
+
+ m_pZapImage->m_signatureToOffs.SetCount(1);
+
+ // Initialize (unused) value
+ m_pZapImage->m_signatureToOffs[0] = 0;
+
+// printf("total encoded method spec size is %u for %d methodspecs - average %5.1f\n", methodSpecSize, methodSpecCount, (double)methodSpecSize/methodSpecCount);
+
+ if (m_pModule->IsSystem())
+ {
+ DWORD wellKnownTypes[WKT_COUNT];
+ wellKnownTypes[WKT_OBJECT] = TypeDefOfPrimitive(ELEMENT_TYPE_OBJECT);
+ wellKnownTypes[WKT_STRING] = TypeDefOfPrimitive(ELEMENT_TYPE_STRING);
+ wellKnownTypes[WKT_VALUETYPE] = g_pValueTypeClass->GetCl();
+ wellKnownTypes[WKT_ENUM] = g_pEnumClass->GetCl();
+ wellKnownTypes[WKT_ARRAY] = g_pArrayClass->GetCl();
+
+ wellKnownTypes[WKT_BOOLEAN] = TypeDefOfPrimitive(ELEMENT_TYPE_BOOLEAN);
+ wellKnownTypes[WKT_VOID] = TypeDefOfPrimitive(ELEMENT_TYPE_VOID);
+ wellKnownTypes[WKT_CHAR] = TypeDefOfPrimitive(ELEMENT_TYPE_CHAR);
+ wellKnownTypes[WKT_I1] = TypeDefOfPrimitive(ELEMENT_TYPE_I1);
+ wellKnownTypes[WKT_U1] = TypeDefOfPrimitive(ELEMENT_TYPE_U1);
+ wellKnownTypes[WKT_I2] = TypeDefOfPrimitive(ELEMENT_TYPE_I2);
+ wellKnownTypes[WKT_U2] = TypeDefOfPrimitive(ELEMENT_TYPE_U2);
+ wellKnownTypes[WKT_I4] = TypeDefOfPrimitive(ELEMENT_TYPE_I4);
+ wellKnownTypes[WKT_U4] = TypeDefOfPrimitive(ELEMENT_TYPE_U4);
+ wellKnownTypes[WKT_I8] = TypeDefOfPrimitive(ELEMENT_TYPE_I8);
+ wellKnownTypes[WKT_U8] = TypeDefOfPrimitive(ELEMENT_TYPE_U8);
+ wellKnownTypes[WKT_R4] = TypeDefOfPrimitive(ELEMENT_TYPE_R4);
+ wellKnownTypes[WKT_R8] = TypeDefOfPrimitive(ELEMENT_TYPE_R8);
+ wellKnownTypes[WKT_I] = TypeDefOfPrimitive(ELEMENT_TYPE_I);
+ wellKnownTypes[WKT_U] = TypeDefOfPrimitive(ELEMENT_TYPE_U);
+
+#ifndef FEATURE_CORECLR
+ wellKnownTypes[WKT_MARSHALBYREFOBJECT] = TypeDefOfNamedType("System", "MarshalByRefObject");
+#else
+ wellKnownTypes[WKT_MARSHALBYREFOBJECT] = 0;
+#endif
+ wellKnownTypes[WKT_MULTICASTDELEGATE] = g_pMulticastDelegateClass->GetCl();
+ wellKnownTypes[WKT_NULLABLE] = g_pNullableClass->GetCl();
+ wellKnownTypes[WKT_CANON] = g_pCanonMethodTableClass->GetCl();
+#ifndef FEATURE_CORECLR
+ wellKnownTypes[WKT_TRANSPARENTPROXY] = TypeDefOfNamedType("System.Runtime.Remoting.Proxies", g_TransparentProxyName);
+#else
+ wellKnownTypes[WKT_TRANSPARENTPROXY] = 0;
+#endif
+#ifdef FEATURE_COMINTEROP
+ wellKnownTypes[WKT_COMOBJECT] = g_pBaseCOMObject->GetCl();
+ wellKnownTypes[WKT_WINDOWS_RUNTIME_OBJECT] = TypeDefOfNamedType("System.Runtime.InteropServices.WindowsRuntime", "RuntimeClass");
+#else
+ wellKnownTypes[WKT_COMOBJECT] = 0;
+ wellKnownTypes[WKT_WINDOWS_RUNTIME_OBJECT] = 0;
+#endif
+ wellKnownTypes[WKT_CONTEXTBOUNDOBJECT] = TypeDefOfNamedType("System", "ContextBoundObject");
+ // for (int i = WKT_FIRST; i < WKT_COUNT; i++)
+ // printf("Well known type %u = %08x\n", i, wellKnownTypes[i]);
+
+ wellKnownTypes[WKT_DECIMAL] = TypeDefOfNamedType("System", "Decimal");
+
+ wellKnownTypes[WKT_TYPEDREFERENCE] = TypeDefOfNamedType("System", "TypedReference");
+
+ m_pZapImage->FlushWellKnownTypes(wellKnownTypes, WKT_COUNT);
+ }
+ }
+
+ virtual
+ mdToken GetTokenForType(InlineContext *inlineContext, CORINFO_ARG_LIST_HANDLE argList)
+ {
+ STANDARD_VM_CONTRACT;
+
+ PCCOR_SIGNATURE pSig = (PCCOR_SIGNATURE)argList;
+ SigBuilder sigBuilder;
+ if (inlineContext != NULL && !inlineContext->IsTrivial())
+ {
+ // expand the signature plugging in the type arguments
+ SigParser sp1(pSig);
+ expandSignature(sp1, inlineContext, sigBuilder);
+
+ DWORD cSig;
+ pSig = (PCCOR_SIGNATURE)sigBuilder.GetSignature(&cSig);
+ }
+ SigParser sp(pSig);
+ CorElementType elType;
+ sp.GetElemType(&elType);
+ while (elType == ELEMENT_TYPE_CMOD_REQD || elType == ELEMENT_TYPE_CMOD_OPT || elType == ELEMENT_TYPE_PINNED)
+ {
+ if (elType != ELEMENT_TYPE_PINNED)
+ {
+ mdToken modifierToken;
+ sp.GetToken(&modifierToken);
+ }
+ sp.GetElemType(&elType);
+ }
+ switch (elType)
+ {
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ mdToken structTypeToken = 0;
+ sp.GetToken(&structTypeToken);
+ return structTypeToken;
+ }
+
+ case ELEMENT_TYPE_TYPEDBYREF:
+ return GetTokenForType(g_TypedReferenceMT);
+
+ case ELEMENT_TYPE_GENERICINST:
+ {
+ // if this is an instantiation of a generic class rather
+ // than a generic value type, we don't need a token, because
+ // this is going to be a reference type no matter what the instantiation is
+ sp.PeekElemType(&elType);
+ if (elType == ELEMENT_TYPE_CLASS)
+ return 0;
+
+ // skip the type token of the generic type
+ sp.SkipExactlyOne();
+ // skip the instantiation arguments
+ ULONG typeArgCount;
+ sp.GetData(&typeArgCount);
+ for (ULONG typeArgIndex = 0; typeArgIndex < typeArgCount; typeArgIndex++)
+ {
+ sp.SkipExactlyOne();
+ }
+ break;
+ }
+
+ case ELEMENT_TYPE_VAR:
+ {
+ ULONG argIndex;
+ sp.GetData(&argIndex);
+ break;
+ }
+
+ case ELEMENT_TYPE_MVAR:
+ {
+ ULONG argIndex;
+ sp.GetData(&argIndex);
+ break;
+ }
+
+ case ELEMENT_TYPE_INTERNAL:
+ {
+ CORINFO_CLASS_HANDLE type;
+ sp.GetPointer((void **)&type);
+ return GetTokenForType(type);
+ }
+
+ case ELEMENT_TYPE_VOID:
+ case ELEMENT_TYPE_BOOLEAN:
+ case ELEMENT_TYPE_CHAR:
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_R4:
+ case ELEMENT_TYPE_R8:
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_BYREF:
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_ARRAY:
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_U:
+ case ELEMENT_TYPE_FNPTR:
+ case ELEMENT_TYPE_OBJECT:
+ case ELEMENT_TYPE_SZARRAY:
+ return 0;
+
+ default:
+ // oops?
+ assert(!"signature not yet supported");
+ break;
+ }
+
+ // Ok, we parsed to the end of this argument or local in the signature -
+ // now try to find a matching typespec
+ PCCOR_SIGNATURE end = sp.GetPtr();
+ ULONG length = (ULONG)(end - pSig);
+
+ return GetTypeSpecToken(pSig, length);
+ }
+
+ virtual
+ mdToken GetTokenForMethod(CORINFO_METHOD_HANDLE method)
+ {
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc *pMD = GetMethod(method);
+ InlineContext context(pMD->GetModule());
+
+ return TranslateToken(&context, pMD->GetMemberDef());
+ }
+
+ virtual
+ mdToken GetTokenForField(CORINFO_FIELD_HANDLE field)
+ {
+ STANDARD_VM_CONTRACT;
+
+ FieldDesc *pFD = GetField(field);
+ InlineContext context(pFD->GetModule());
+
+ return TranslateToken(&context, pFD->GetMemberDef());
+ }
+
+ virtual
+ mdToken GetTokenForSignature(PCCOR_SIGNATURE sig)
+ {
+ STANDARD_VM_CONTRACT;
+
+ SigBuilder sigBuilder;
+ SigParser sp(sig);
+
+ BYTE b;
+ sp.GetByte(&b);
+ sigBuilder.AppendByte(b);
+
+ ULONG argCount;
+ sp.GetData(&argCount);
+ sigBuilder.AppendData(argCount);
+
+ for (ULONG argIndex = 0; argIndex <= argCount; argIndex++)
+ expandSignature(sp, NULL, sigBuilder);
+
+ return FindOrCreateSignature(sigBuilder);
+ }
+
+ void expandSignature(SigParser &sp, InlineContext *context, SigBuilder &sigBuilder)
+ {
+ STANDARD_VM_CONTRACT;
+
+ BYTE elType;
+ sp.GetByte(&elType);
+ mdToken token;
+ while (elType == ELEMENT_TYPE_CMOD_REQD || elType == ELEMENT_TYPE_CMOD_OPT || elType == ELEMENT_TYPE_PINNED)
+ {
+ sigBuilder.AppendByte(elType);
+ if (elType != ELEMENT_TYPE_PINNED)
+ {
+ sp.GetToken(&token);
+ token = TranslateToken(context, token);
+ sigBuilder.AppendToken(token);
+ }
+ sp.GetByte(&elType);
+ }
+ switch (elType)
+ {
+ case ELEMENT_TYPE_VALUETYPE:
+ case ELEMENT_TYPE_CLASS:
+ {
+ sigBuilder.AppendByte(elType);
+ sp.GetToken(&token);
+ token = TranslateToken(context, token);
+ sigBuilder.AppendToken(token);
+ break;
+ }
+
+ case ELEMENT_TYPE_INTERNAL:
+ {
+ TypeHandle th;
+ sp.GetPointer((void **)&th);
+
+ if (th.IsNativeValueType())
+ {
+ sigBuilder.AppendByte((CorElementType)ELEMENT_TYPE_NATIVE_VALUETYPE);
+ th = th.GetTypeParam();
+ }
+ token = GetTokenForType((CORINFO_CLASS_HANDLE)th.AsPtr());
+
+ sigBuilder.AppendElementType(th.IsValueType() ? ELEMENT_TYPE_VALUETYPE : ELEMENT_TYPE_CLASS);
+ sigBuilder.AppendToken(token);
+ break;
+ }
+
+ case ELEMENT_TYPE_GENERICINST:
+ {
+ sigBuilder.AppendByte(elType);
+ sp.GetByte(&elType);
+ assert(elType == ELEMENT_TYPE_CLASS || elType == ELEMENT_TYPE_VALUETYPE);
+ sigBuilder.AppendByte(elType);
+ sp.GetToken(&token);
+ token = TranslateToken(context, token);
+ sigBuilder.AppendToken(token);
+ ULONG typeArgCount;
+ sp.GetData(&typeArgCount);
+ sigBuilder.AppendData(typeArgCount);
+ for (ULONG typeArgIndex = 0; typeArgIndex < typeArgCount; typeArgIndex++)
+ {
+ expandSignature(sp, context, sigBuilder);
+ }
+ break;
+ }
+
+ case ELEMENT_TYPE_VAR:
+ {
+ ULONG argIndex;
+ sp.GetData(&argIndex);
+ if (context != NULL && context->classTypeArgCount != 0)
+ {
+ _ASSERTE(argIndex < context->classTypeArgCount);
+
+ // a type argument to be expanded
+ SigParser argSp(context->classTypeArgs[argIndex].pSig, (DWORD)context->classTypeArgs[argIndex].cbSig);
+ expandSignature(argSp, NULL, sigBuilder);
+ }
+ else
+ {
+ // a type argument to be just copied
+ sigBuilder.AppendByte(elType);
+ sigBuilder.AppendData(argIndex);
+ }
+ break;
+ }
+
+ case ELEMENT_TYPE_MVAR:
+ {
+ ULONG argIndex;
+ sp.GetData(&argIndex);
+ if (context != NULL && context->methodTypeArgCount != 0)
+ {
+ _ASSERTE(argIndex < context->methodTypeArgCount);
+
+ // a type argument to be expanded
+ SigParser argSp(context->methodTypeArgs[argIndex].pSig, (DWORD)context->methodTypeArgs[argIndex].cbSig);
+ expandSignature(argSp, NULL, sigBuilder);
+ }
+ else
+ {
+ // a type argument to be just copied
+ sigBuilder.AppendByte(elType);
+ sigBuilder.AppendData(argIndex);
+ }
+ break;
+ }
+
+ case ELEMENT_TYPE_VOID:
+ case ELEMENT_TYPE_BOOLEAN:
+ case ELEMENT_TYPE_CHAR:
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_R4:
+ case ELEMENT_TYPE_R8:
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_U:
+ case ELEMENT_TYPE_OBJECT:
+ case ELEMENT_TYPE_TYPEDBYREF:
+ sigBuilder.AppendByte(elType);
+ break;
+
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_BYREF:
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_NATIVE_VALUETYPE:
+ sigBuilder.AppendByte(elType);
+ expandSignature(sp, context, sigBuilder);
+ break;
+
+ case ELEMENT_TYPE_ARRAY:
+ {
+ sigBuilder.AppendByte(elType);
+ expandSignature(sp, context, sigBuilder);
+ ULONG rank;
+ sp.GetData(&rank);
+ sigBuilder.AppendData(rank);
+ ULONG boundCount;
+ sp.GetData(&boundCount);
+ sigBuilder.AppendData(boundCount);
+ for (ULONG i = 0; i < boundCount; i++)
+ {
+ ULONG bound;
+ sp.GetData(&bound);
+ sigBuilder.AppendData(bound);
+ }
+ ULONG lowerBoundCount;
+ sp.GetData(&lowerBoundCount);
+ sigBuilder.AppendData(lowerBoundCount);
+ for (ULONG i = 0; i < lowerBoundCount; i++)
+ {
+ ULONG bound;
+ sp.GetData(&bound);
+ sigBuilder.AppendData(bound);
+ }
+ }
+ break;
+
+ case ELEMENT_TYPE_FNPTR:
+ {
+ BYTE callConv;
+ sp.GetByte(&callConv);
+
+ ULONG argCount;
+ sp.GetData(&argCount);
+
+ sigBuilder.AppendByte(elType);
+ sigBuilder.AppendByte(callConv);
+ sigBuilder.AppendData(argCount);
+
+ for (ULONG i = 0; i <= argCount; i++)
+ {
+ expandSignature(sp, context, sigBuilder);
+ }
+ }
+ break;
+
+ default:
+ // oops?
+ assert(!"signature not yet supported");
+ break;
+ }
+ }
+
+ void expandFieldOrMethodSignature(PCCOR_SIGNATURE pSig, ULONG cbSig, InlineContext *context, SigBuilder &sigBuilder)
+ {
+ STANDARD_VM_CONTRACT;
+
+ SigParser sp(pSig, cbSig);
+ ULONG callingConvention;
+ sp.GetCallingConvInfo(&callingConvention);
+ sigBuilder.AppendByte((BYTE)callingConvention);
+ if (callingConvention == IMAGE_CEE_CS_CALLCONV_FIELD)
+ {
+ // nothing to do here...
+ }
+ else
+ {
+ if (callingConvention & IMAGE_CEE_CS_CALLCONV_GENERIC)
+ {
+ // uncompress number of generic type args
+ ULONG genericArgCount;
+ sp.GetData(&genericArgCount);
+ sigBuilder.AppendData(genericArgCount);
+ }
+ // uncompress number of args
+ ULONG argCount;
+ sp.GetData(&argCount);
+ sigBuilder.AppendData(argCount);
+ }
+ expandSignature(sp, context, sigBuilder);
+ }
+
+ virtual
+ mdToken GetParentOfMemberRef(CORINFO_MODULE_HANDLE scope, mdMemberRef memberRefToken)
+ {
+ STANDARD_VM_CONTRACT;
+
+ unsigned parentToken = 0;
+ IMDInternalImport *pMDImport = NULL;
+
+ HRESULT hr = E_FAIL;
+ if (!IsDynamicScope(scope))
+ {
+ Module *module = GetModule(scope);
+ pMDImport = module->GetMDImport();
+ hr = pMDImport->GetParentOfMemberRef(memberRefToken, &parentToken);
+ }
+
+ if (FAILED(hr))
+ {
+ COUNT_T memberRefRid = RidFromToken(memberRefToken);
+ COUNT_T typeRid = m_pZapImage->m_extMemberRef[memberRefRid].typeRid;
+ if (m_pZapImage->m_extMemberRef[memberRefRid].isTypeSpec)
+ parentToken = TokenFromRid(typeRid, mdtTypeSpec);
+ else
+ parentToken = TokenFromRid(typeRid, mdtTypeRef);
+ }
+ // For varargs, a memberref can point to a methodDef
+ if (TypeFromToken(parentToken) == mdtMethodDef)
+ {
+ IfFailThrow(pMDImport->GetParentToken(parentToken, &parentToken));
+ }
+ return parentToken;
+ }
+
+ virtual
+ mdToken GetArrayElementToken(CORINFO_MODULE_HANDLE scope, mdTypeSpec arrayTypeToken)
+ {
+ STANDARD_VM_CONTRACT;
+
+ assert(TypeFromToken(arrayTypeToken) == mdtTypeSpec);
+ assert(!IsDynamicScope(scope));
+ Module *module = GetModule(scope);
+ PCCOR_SIGNATURE pSig = NULL;
+ ULONG cbSig;
+ IMDInternalImport *pMDImport = module->GetMDImport();
+ pMDImport->GetTypeSpecFromToken(arrayTypeToken, &pSig, &cbSig);
+ assert(pSig[0] == ELEMENT_TYPE_ARRAY);
+
+ InlineContext context(module);
+
+ return GetTokenForType(&context, (CORINFO_ARG_LIST_HANDLE)&pSig[1]);
+ }
+
+ PCCOR_SIGNATURE GetSigOfTypeSpec(Module *module, mdTypeSpec typeSpecToken, ULONG &cbSig)
+ {
+ STANDARD_VM_CONTRACT;
+
+ IMDInternalImport *pMDImport = module->GetMDImport();
+ PCCOR_SIGNATURE pSig = 0;
+ if (FAILED(pMDImport->GetTypeSpecFromToken(typeSpecToken, &pSig, &cbSig)))
+ {
+ // this could be a new token injected by the compilation process -
+ // in this case it has to be local to the current module
+ assert(module == m_pModule);
+
+ pSig = m_tokenToSig.Get(typeSpecToken, cbSig);
+ assert(pSig != NULL);
+ }
+ return pSig;
+ }
+
+ unsigned ReadUnsigned(BYTE *buffer)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ unsigned firstByte = buffer[0];
+ unsigned numberOfBytes = 1;
+ while (firstByte & 1)
+ {
+ numberOfBytes++;
+ firstByte >>= 1;
+ }
+ switch (numberOfBytes)
+ {
+ case 1: return (buffer[0]) >> 1;
+ case 2: return (buffer[0] + buffer[1]*256) >> 2;
+ case 3: return (buffer[0] + buffer[1]*256 + buffer[2]*(256*256)) >> 3;
+ case 4: return (buffer[0] + buffer[1]*256 + buffer[2]*(256*256) + buffer[3]*(256*256*256)) >> 4;
+ case 5: return (buffer[1] + buffer[2]*256 + buffer[3]*(256*256) + buffer[4]*(256*256*256));
+ default: assert(!"invalid encoding"); return 0;
+ }
+ }
+
+ mdToken ReadMethodDefOrRef(BYTE *buffer)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ static const mdToken tokenTypes[4] = { 0, mdtMethodDef, mdtMemberRef, mdtMethodSpec };
+ unsigned encoding = ReadUnsigned(buffer);
+ return TokenFromRid(tokenTypes[encoding & 0x3], encoding>>2);
+ }
+
+ unsigned GetParentOfMethodSpec(Module *module, mdMethodSpec methodSpecToken)
+ {
+ STANDARD_VM_CONTRACT;
+
+ IMDInternalImport *pMDImport = module->GetMDImport();
+ unsigned parentToken = 0;
+ PCCOR_SIGNATURE pSig = 0;
+ ULONG cbSig = 0;
+ if (SUCCEEDED(pMDImport->GetMethodSpecProps(methodSpecToken, &parentToken, &pSig, &cbSig)))
+ {
+ return parentToken;
+ }
+
+ // this could be a new token injected by the compilation process - in this case
+ // it HAS to be local to the current module
+ assert(module == m_pModule);
+
+ COUNT_T methodSpecRid = RidFromToken(methodSpecToken);
+ COUNT_T offs = m_pZapImage->m_methodSpecToOffs[methodSpecRid];
+ BYTE *buffer = &m_pZapImage->m_compactLayoutBuffer[offs];
+ return ReadMethodDefOrRef(buffer);
+ }
+
+ PCCOR_SIGNATURE GetSigOfMethodSpec(Module *module, mdMethodSpec methodSpecToken, ULONG &cbSig)
+ {
+ STANDARD_VM_CONTRACT;
+
+ IMDInternalImport *pMDImport = module->GetMDImport();
+ unsigned parentToken = 0;
+ PCCOR_SIGNATURE pSig = 0;
+ if (SUCCEEDED(pMDImport->GetMethodSpecProps(methodSpecToken, &parentToken, &pSig, &cbSig)))
+ {
+ return pSig;
+ }
+
+ // this could be a new token injected by the compilation process - in this case
+ // it HAS to be local to the current module
+ assert(module == m_pModule);
+
+ return m_tokenToSig.Get(methodSpecToken, cbSig);
+ }
+
+ void FillInlineContext(InlineContext *inlineContext, InlineContext *outerContext, unsigned methodOrFieldToken, unsigned constraintTypeToken)
+ {
+ STANDARD_VM_CONTRACT;
+
+ Module *module = outerContext == NULL ? m_pModule : outerContext->GetModule();
+ IMDInternalImport *pMDImport = module ->GetMDImport();
+ unsigned parentToken = 0;
+ _ASSERTE(inlineContext->IsTrivial());
+ switch (TypeFromToken(methodOrFieldToken))
+ {
+ case mdtMethodDef:
+ break;
+
+ case mdtFieldDef:
+ break;
+
+ case mdtMemberRef:
+ parentToken = GetParentOfMemberRef((CORINFO_MODULE_HANDLE)module, methodOrFieldToken);
+ break;
+
+ case mdtMethodSpec:
+ {
+ parentToken = GetParentOfMethodSpec(module, methodOrFieldToken);
+ ULONG cbSig = 0;
+ PCCOR_SIGNATURE pSig = GetSigOfMethodSpec(module, methodOrFieldToken, cbSig);
+ SigPointer sp(pSig, cbSig);
+ BYTE callingConvention;
+ sp.GetByte(&callingConvention);
+ assert(callingConvention == IMAGE_CEE_CS_CALLCONV_GENERICINST);
+ ULONG typeArgCount;
+ sp.GetData(&typeArgCount);
+ _ASSERTE(typeArgCount < InlineContext::MAX_TYPE_ARGS);
+ inlineContext->methodTypeArgCount = typeArgCount;
+ inlineContext->methodTypeArgs = new InlineContext::TypeArg[typeArgCount];
+ for (ULONG typeArgIndex = 0; typeArgIndex < typeArgCount; typeArgIndex++)
+ {
+ inlineContext->methodTypeArgs[typeArgIndex].pSig = sp.GetPtr();
+ sp.SkipExactlyOne();
+ inlineContext->methodTypeArgs[typeArgIndex].cbSig = sp.GetPtr() - inlineContext->methodTypeArgs[typeArgIndex].pSig;
+ }
+ switch (TypeFromToken(parentToken))
+ {
+ case mdtMethodDef:
+ break;
+
+ case mdtMemberRef:
+ parentToken = GetParentOfMemberRef((CORINFO_MODULE_HANDLE)module, parentToken);
+ break;
+
+ default:
+ assert(!"bad token type");
+ return;
+ }
+ }
+ break;
+ }
+
+ if (constraintTypeToken != 0)
+ {
+ // Rationale: we will only inline a method different from the declared
+ // method if the definition/implementation is in a value type,
+ // otherwise we let the normal virtual or interface dispatch happen
+ // (see MethodTable::TryResolveConstraintMethodApprox)
+ //
+ // if the constraint type is a value type, then the
+ // method we're inlining is defined in the value type
+ //
+ // this only matters if this is a generic value type instantiation
+ // if the implementation is from a non-generic value type,
+ // the instantiation type arguments will not matter, because there can be no references to them
+
+ if (TypeFromToken(constraintTypeToken) == mdtTypeSpec)
+ {
+ ULONG cbctSig;
+ PCCOR_SIGNATURE pctSig = GetSigOfTypeSpec(module, constraintTypeToken, cbctSig);
+
+ SigParser sp((PCCOR_SIGNATURE)pctSig, cbctSig);
+
+ CorElementType elType;
+ sp.GetElemType(&elType);
+ if (elType == ELEMENT_TYPE_GENERICINST)
+ {
+ sp.GetElemType(&elType);
+ assert(elType == ELEMENT_TYPE_CLASS || elType == ELEMENT_TYPE_VALUETYPE);
+ if (elType == ELEMENT_TYPE_VALUETYPE)
+ parentToken = constraintTypeToken;
+ }
+ }
+ }
+
+ if (TypeFromToken(parentToken) == mdtTypeSpec)
+ {
+ ULONG cbtSig;
+ PCCOR_SIGNATURE ptSig = GetSigOfTypeSpec(module, parentToken, cbtSig);
+
+ SigParser sp((PCCOR_SIGNATURE)ptSig, cbtSig);
+
+ CorElementType elType;
+ sp.GetElemType(&elType);
+ if (elType == ELEMENT_TYPE_GENERICINST)
+ {
+ sp.GetElemType(&elType);
+ assert(elType == ELEMENT_TYPE_CLASS || elType == ELEMENT_TYPE_VALUETYPE);
+ sp.GetToken(&parentToken);
+ // skip the instantiation arguments
+ ULONG typeArgCount;
+ sp.GetData(&typeArgCount);
+ _ASSERTE(typeArgCount < InlineContext::MAX_TYPE_ARGS);
+ inlineContext->classTypeArgCount = typeArgCount;
+ inlineContext->classTypeArgs = new InlineContext::TypeArg[typeArgCount];
+ for (ULONG typeArgIndex = 0; typeArgIndex < typeArgCount; typeArgIndex++)
+ {
+ inlineContext->classTypeArgs[typeArgIndex].pSig = sp.GetPtr();
+ sp.SkipExactlyOne();
+ inlineContext->classTypeArgs[typeArgIndex].cbSig = sp.GetPtr() - inlineContext->classTypeArgs[typeArgIndex].pSig;
+ }
+ }
+ }
+ }
+
+ PCCOR_SIGNATURE GetSigOfMemberRef(Module *module, mdMemberRef memberRefToken, ULONG &cbSig)
+ {
+ STANDARD_VM_CONTRACT;
+
+ LPCSTR szName_Ignore;
+ IMDInternalImport *pMDImport = module->GetMDImport();
+ PCCOR_SIGNATURE pSig = 0;
+ if (FAILED(pMDImport->GetNameAndSigOfMemberRef(memberRefToken, &pSig, &cbSig, &szName_Ignore)))
+ {
+ // this could be a new token injected by the compilation process - in this case
+ // it HAS to be local to the current module
+ assert(module == m_pModule);
+
+ pSig = m_tokenToSig.Get(memberRefToken, cbSig);
+ assert(pSig != NULL);
+ }
+ return pSig;
+ }
+
+ virtual
+ mdToken GetTypeTokenForFieldOrMethod(mdToken fieldOrMethodToken)
+ {
+ STANDARD_VM_CONTRACT;
+
+ Module *module = m_pModule;
+ IMDInternalImport *pMDImport = module->GetMDImport();
+ PCCOR_SIGNATURE pSig = 0;
+ ULONG cbSig = 0;
+ unsigned parentToken = 0;
+ InlineContext context(m_pModule);
+
+ FillInlineContext(&context, NULL, fieldOrMethodToken, 0);
+ switch (TypeFromToken(fieldOrMethodToken))
+ {
+ case mdtMethodDef:
+ IfFailThrow(pMDImport->GetSigOfMethodDef(fieldOrMethodToken, &cbSig, &pSig));
+ break;
+
+ case mdtFieldDef:
+ IfFailThrow(pMDImport->GetSigOfFieldDef(fieldOrMethodToken, &cbSig, &pSig));
+ break;
+
+ case mdtMemberRef:
+ pSig = GetSigOfMemberRef(module, fieldOrMethodToken, cbSig);
+ parentToken = GetParentOfMemberRef((CORINFO_MODULE_HANDLE)module, fieldOrMethodToken);
+ break;
+
+ case mdtMethodSpec:
+ {
+ parentToken = GetParentOfMethodSpec(module, fieldOrMethodToken);
+
+ switch (TypeFromToken(parentToken))
+ {
+ case mdtMethodDef:
+ IfFailThrow(pMDImport->GetSigOfMethodDef(parentToken, &cbSig, &pSig));
+ break;
+
+ case mdtMemberRef:
+ pSig = GetSigOfMemberRef(module, parentToken, cbSig);
+ parentToken = GetParentOfMemberRef((CORINFO_MODULE_HANDLE)module, parentToken);
+ break;
+
+ default:
+ assert(!"bad token type");
+ return 0;
+ }
+ break;
+ }
+
+ default:
+ assert(!"bad token type");
+ return 0;
+ }
+
+ BYTE *sigPtr = (BYTE *)pSig;
+ BYTE callingConvention = *sigPtr++;
+ pSig = (PCCOR_SIGNATURE)sigPtr;
+ if (callingConvention == IMAGE_CEE_CS_CALLCONV_FIELD)
+ {
+ // nothing to do here...
+ }
+ else
+ {
+ if (callingConvention & IMAGE_CEE_CS_CALLCONV_GENERIC)
+ {
+ // uncompress number of generic type args
+ CorSigUncompressData(pSig);
+ }
+ // uncompress number of args
+ CorSigUncompressData(pSig);
+ }
+
+ return GetTokenForType(&context, (CORINFO_ARG_LIST_HANDLE)pSig);
+ }
+
+ virtual
+ mdToken GetEnclosingClassToken(InlineContext *inlineContext, CORINFO_METHOD_HANDLE methHnd)
+ {
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc *methodDesc = (MethodDesc *)methHnd;
+ assert(methodDesc->GetModule() == (inlineContext != NULL ? inlineContext->m_inlineeModule : m_pModule));
+ MethodTable *pMT = methodDesc->GetMethodTable();
+
+ unsigned typeToken = GetToken(pMT);
+ if (!pMT->HasInstantiation())
+ return typeToken;
+
+ // encode type<!0,!1,...> and get a typespec token for it
+ SigBuilder sb;
+ sb.AppendElementType(ELEMENT_TYPE_GENERICINST);
+ CorElementType elType = pMT->IsValueType() ? ELEMENT_TYPE_VALUETYPE : ELEMENT_TYPE_CLASS;
+ sb.AppendElementType(elType);
+ sb.AppendToken(TranslateToken(inlineContext, typeToken));
+ sb.AppendData(pMT->GetNumGenericArgs());
+ Instantiation instantiation = pMT->GetInstantiation();
+ for (DWORD i = 0; i < instantiation.GetNumArgs(); i++)
+ {
+ if (inlineContext != NULL && inlineContext->classTypeArgCount != 0)
+ {
+ _ASSERTE(i < inlineContext->classTypeArgCount);
+
+ // a type argument to be expanded
+ SigParser argSp(inlineContext->classTypeArgs[i].pSig, (DWORD)inlineContext->classTypeArgs[i].cbSig);
+ expandSignature(argSp, NULL, sb);
+ }
+ else
+ {
+ sb.AppendElementType(ELEMENT_TYPE_VAR);
+ sb.AppendData(i);
+ }
+ }
+ DWORD cbSig;
+ PCCOR_SIGNATURE pSig = (PCCOR_SIGNATURE)sb.GetSignature(&cbSig);
+
+ return GetTypeSpecToken(pSig, cbSig);
+ }
+
+ virtual
+ mdToken GetCurrentMethodToken(InlineContext *inlineContext, CORINFO_METHOD_HANDLE methHnd)
+ {
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc *methodDesc = (MethodDesc *)methHnd;
+ MethodTable *methodTable = methodDesc->GetMethodTable();
+ Module *methodModule = methodTable->GetModule();
+ unsigned methodToken = methodDesc->GetMemberDef();
+ if (!methodDesc->HasClassOrMethodInstantiation() && methodModule == m_pModule)
+ return methodToken;
+ unsigned enclosingClassToken = GetEnclosingClassToken(inlineContext, methHnd);
+ if (TypeFromToken(enclosingClassToken) != mdtTypeDef)
+ {
+ HENUMInternalHolder hEnumMethodDef(methodModule->GetMDImport());
+ hEnumMethodDef.EnumInit(mdtMethodDef, methodTable->GetCl());
+ ULONG methodCount = hEnumMethodDef.EnumGetCount();
+ mdMethodDef firstMethodDefToken;
+ hEnumMethodDef.EnumNext(&firstMethodDefToken);
+ ULONG ordinal = methodToken - firstMethodDefToken;
+ assert(ordinal < methodCount);
+ methodToken = FindOrCreateExtMemberRef(enclosingClassToken, FALSE, ordinal, methodDesc->GetNameOnNonArrayClass(), methodModule, methodToken);
+ }
+ if (!methodDesc->HasMethodInstantiation())
+ return methodToken;
+
+ // encode methodToken<!0,!1,...> and get a methodspec token for it
+ SigBuilder sb;
+ sb.AppendByte(IMAGE_CEE_CS_CALLCONV_GENERICINST);
+ sb.AppendData(methodDesc->GetNumGenericMethodArgs());
+ Instantiation instantiation = methodDesc->GetMethodInstantiation();
+ for (DWORD i = 0; i < methodDesc->GetNumGenericMethodArgs(); i++)
+ {
+ if (inlineContext != NULL && inlineContext->methodTypeArgCount != 0)
+ {
+ _ASSERTE(i < inlineContext->methodTypeArgCount);
+
+ // a type argument to be expanded
+ SigParser argSp(inlineContext->methodTypeArgs[i].pSig, (DWORD)inlineContext->methodTypeArgs[i].cbSig);
+ expandSignature(argSp, NULL, sb);
+ }
+ else
+ {
+ sb.AppendElementType(ELEMENT_TYPE_MVAR);
+ sb.AppendData(i);
+ }
+ }
+
+ return FindOrCreateMethodSpec(methodToken, sb);
+ }
+
+ virtual
+ bool IsDynamicScope(CORINFO_MODULE_HANDLE scope)
+ {
+ STANDARD_VM_CONTRACT;
+ return ::IsDynamicScope(scope);
+ }
+
+ virtual
+ InlineContext *ComputeInlineContext(InlineContext *outerContext, unsigned inlinedMethodToken, unsigned constraintTypeToken, CORINFO_METHOD_HANDLE methHnd)
+ {
+ STANDARD_VM_CONTRACT;
+
+ InlineContext inlineContext(m_pModule);
+
+ FillInlineContext(&inlineContext, outerContext, inlinedMethodToken, constraintTypeToken);
+
+ MethodDesc *inlineeMethod = (MethodDesc *)methHnd;
+ inlineContext.m_inlineeModule = inlineeMethod->GetModule();
+
+ if (inlineContext.IsTrivial())
+ return NULL;
+ else
+ return new InlineContext(inlineContext);
+ }
+
+ ULONG GetFieldOrdinal(Module *module, mdTypeDef parentToken, mdFieldDef fieldDefToken)
+ {
+ STANDARD_VM_CONTRACT;
+
+ IMDInternalImport *pMDImport = module->GetMDImport();
+ HENUMInternalHolder hEnumFieldDef(pMDImport);
+ hEnumFieldDef.EnumInit(mdtFieldDef, parentToken);
+ ULONG fieldCount = hEnumFieldDef.EnumGetCount();
+ mdFieldDef firstFieldDefToken;
+ while (hEnumFieldDef.EnumNext(&firstFieldDefToken))
+ {
+ DWORD fieldAttr;
+ IfFailThrow(pMDImport->GetFieldDefProps(firstFieldDefToken, &fieldAttr));
+ if (!IsFdLiteral(fieldAttr))
+ break;
+ }
+ ULONG ordinal = fieldDefToken - firstFieldDefToken;
+ assert(ordinal < fieldCount);
+ return ordinal;
+ }
+
+ virtual DWORD GetFieldOrdinal(CORINFO_MODULE_HANDLE tokenScope, unsigned fieldToken)
+ {
+ STANDARD_VM_CONTRACT;
+
+ assert(TypeFromToken(fieldToken) == mdtFieldDef);
+ Module *module = GetModule(tokenScope);
+ mdToken parentToken;
+ IMDInternalImport *pMDImport = module->GetMDImport();
+ IfFailThrow(pMDImport->GetParentToken(fieldToken, &parentToken));
+ return GetFieldOrdinal(module, parentToken, fieldToken);
+ }
+
+ ULONG GetMethodOrdinal(Module *module, mdTypeDef parentToken, mdMethodDef methodDefToken)
+ {
+ STANDARD_VM_CONTRACT;
+
+ assert(parentToken != 0);
+ HENUMInternalHolder hEnumMethodDef(module->GetMDImport());
+ hEnumMethodDef.EnumInit(mdtMethodDef, parentToken);
+ ULONG methodCount = hEnumMethodDef.EnumGetCount();
+ mdMethodDef firstMethodDefToken;
+ hEnumMethodDef.EnumNext(&firstMethodDefToken);
+ ULONG ordinal = methodDefToken - firstMethodDefToken;
+ assert(ordinal < methodCount);
+ return ordinal;
+ }
+
+ void SetTranslatedSig(Module *module, mdToken translatedToken, PCCOR_SIGNATURE pOrgSig, ULONG cbOrgSig)
+ {
+ STANDARD_VM_CONTRACT;
+
+ InlineContext inlineContext(m_pModule);
+ inlineContext.m_inlineeModule = module;
+
+ SigBuilder sigBuilder;
+ expandFieldOrMethodSignature(pOrgSig, cbOrgSig, &inlineContext, sigBuilder);
+ ULONG cbTranslatedSig;
+ PCCOR_SIGNATURE pTranslatedSig = (PCCOR_SIGNATURE)sigBuilder.GetSignature(&cbTranslatedSig);
+ m_tokenToSig.Set(translatedToken, pTranslatedSig, cbTranslatedSig);
+ }
+
+ virtual
+ unsigned TranslateToken(InlineContext *inlineContext, mdToken token)
+ {
+ STANDARD_VM_CONTRACT;
+
+ Module *module = inlineContext == NULL ? m_pModule : inlineContext->GetModule();
+ IMDInternalImport *pMDImport = module->GetMDImport();
+
+ switch (TypeFromToken(token))
+ {
+ case mdtTypeDef:
+ {
+ if (module == m_pModule)
+ return token;
+
+ EEClass *eeClass = LoadTypeDef(module, token);
+ assert(eeClass != 0);
+ MethodTable *pMT = eeClass->GetMethodTable();
+
+ ULONG typeRefRid = FindOrCreateExtTypeRef(pMT);
+ return TokenFromRid(mdtTypeRef, typeRefRid);
+ }
+ break;
+
+ case mdtFieldDef:
+ {
+ if (module == m_pModule)
+ return token;
+ mdToken parentToken;
+ IfFailThrow(pMDImport->GetParentToken(token, &parentToken));
+ mdToken translatedParentToken = TranslateToken(inlineContext, parentToken);
+ ULONG fieldOrdinal = GetFieldOrdinal(module, parentToken, token);
+
+ LPCUTF8 pszName;
+ IfFailThrow(pMDImport->GetNameOfFieldDef(token, &pszName));
+
+ mdToken translatedToken = FindOrCreateExtMemberRef(translatedParentToken, true, fieldOrdinal, pszName, module, token);
+ ULONG cbSig;
+ PCCOR_SIGNATURE pSig;
+ IfFailThrow(pMDImport->GetSigOfFieldDef(token, &cbSig, &pSig));
+ SetTranslatedSig(module, translatedToken, pSig, cbSig);
+ return translatedToken;
+ }
+
+ case mdtMethodDef:
+ {
+ if (module == m_pModule)
+ return token;
+ mdToken parentToken;
+ IfFailThrow(pMDImport->GetParentToken(token, &parentToken));
+ mdToken translatedParentToken = TranslateToken(inlineContext, parentToken);
+ ULONG methodOrdinal = GetMethodOrdinal(module, parentToken, token);
+
+ LPCUTF8 pszName;
+ IfFailThrow(pMDImport->GetNameOfMethodDef(token, &pszName));
+
+ mdToken translatedToken = FindOrCreateExtMemberRef(translatedParentToken, false, methodOrdinal, pszName, module, token);
+ ULONG cbSig;
+ PCCOR_SIGNATURE pSig;
+ IfFailThrow(pMDImport->GetSigOfMethodDef(token, &cbSig, &pSig));
+ SetTranslatedSig(module, translatedToken, pSig, cbSig);
+ return translatedToken;
+ }
+
+ case mdtTypeRef:
+ {
+ if (module == m_pModule)
+ return token;
+ EEClass *eeClass = LoadTypeRef(module, token);
+ assert(eeClass != 0);
+ MethodTable *pMT = eeClass->GetMethodTable();
+ if (pMT->GetModule() == m_pModule)
+ {
+ // if this happens to be from our own module, we can just
+ // return the typedef token
+ return pMT->GetCl();
+ }
+ else
+ {
+ ULONG typeRefRid = FindOrCreateExtTypeRef(pMT);
+ return TokenFromRid(mdtTypeRef, typeRefRid);
+ }
+ }
+
+ case mdtSignature:
+ // we don't know how to translate these if they are from another module
+ assert(module == m_pModule);
+ return token;
+
+ case mdtMemberRef:
+ {
+ mdToken parentToken = GetParentOfMemberRef((CORINFO_MODULE_HANDLE)module, token);
+ mdToken translatedParentToken = TranslateToken(inlineContext, parentToken);
+ if (translatedParentToken == parentToken && module == m_pModule)
+ return token;
+ ULONG cbSig;
+ PCCOR_SIGNATURE pSig;
+ LPCSTR szName;
+ IfFailThrow(pMDImport->GetNameAndSigOfMemberRef(token, &pSig, &cbSig, &szName));
+ assert(cbSig >= 2);
+ SigPointer sigPointer(pSig, cbSig);
+ ULONG callingConv;
+ sigPointer.GetCallingConv(&callingConv);
+
+ ULONG memberOrdinal;
+ mdToken tkMemberDefToken;
+ Module *pModuleMemberDef;
+
+ if (callingConv != IMAGE_CEE_CS_CALLCONV_FIELD)
+ {
+ MethodDesc *method = LoadMethod(module, token);
+ assert(method != NULL);
+ EEClass *eeClass = method->GetClass();
+ Module *methodModule = pModuleMemberDef = eeClass->GetMethodTable()->GetModule();
+ mdToken methodDefToken = tkMemberDefToken = method->GetMemberDef();
+ // if this happens to be in a typedef from our own module, we can just
+ // return the methoddef token
+ if (TypeFromToken(translatedParentToken) == mdtTypeDef)
+ return methodDefToken;
+ mdToken typeDefToken = eeClass->GetMethodTable()->GetCl();
+ memberOrdinal = GetMethodOrdinal(methodModule, typeDefToken, methodDefToken);
+ }
+ else
+ {
+ FieldDesc *field = LoadField(module, token);
+ assert(field != NULL);
+ EEClass *eeClass = field->GetApproxEnclosingMethodTable()->GetClass();
+ Module *fieldModule = pModuleMemberDef = eeClass->GetMethodTable()->GetModule();
+ mdToken fieldDefToken = tkMemberDefToken = field->GetMemberDef();
+ // if this happens to be in a typedef from our own module, we can just
+ // return the fielddef token
+ if (TypeFromToken(translatedParentToken) == mdtTypeDef)
+ return fieldDefToken;
+ mdToken typeDefToken = eeClass->GetMethodTable()->GetCl();
+ memberOrdinal = GetFieldOrdinal(fieldModule, typeDefToken, fieldDefToken);
+ }
+ mdToken translatedToken = FindOrCreateExtMemberRef(translatedParentToken, callingConv == IMAGE_CEE_CS_CALLCONV_FIELD, memberOrdinal, szName, pModuleMemberDef, tkMemberDefToken);
+ SetTranslatedSig(module, translatedToken, pSig, cbSig);
+ return translatedToken;
+ }
+ break;
+
+ case mdtTypeSpec:
+ {
+ ULONG cbSig;
+ PCCOR_SIGNATURE pSig = GetSigOfTypeSpec(module, token, cbSig);
+
+ SigBuilder sigBuilder;
+
+ // expand the signature plugging in the type arguments
+ SigParser sp(pSig, cbSig);
+ expandSignature(sp, inlineContext, sigBuilder);
+
+ pSig = (PCCOR_SIGNATURE)sigBuilder.GetSignature(&cbSig);
+
+ mdToken translatedToken = GetTypeSpecToken(pSig, cbSig);
+
+ return translatedToken;
+ }
+ break;
+
+ case mdtMethodSpec:
+ {
+ mdToken parentToken = GetParentOfMethodSpec(module, token);
+ mdToken translatedParentToken = TranslateToken(inlineContext, parentToken);
+ ULONG cbSig;
+ PCCOR_SIGNATURE pSig = GetSigOfMethodSpec(module, token, cbSig);
+ SigBuilder sigBuilder;
+ SigParser sp(pSig, cbSig);
+ BYTE b;
+ sp.GetByte(&b);
+ assert(b == IMAGE_CEE_CS_CALLCONV_GENERICINST);
+ sigBuilder.AppendByte(b);
+ ULONG typeArgCount;
+ sp.GetData(&typeArgCount);
+ sigBuilder.AppendData(typeArgCount);
+ for (ULONG typeArgIndex = 0; typeArgIndex < typeArgCount; typeArgIndex++)
+ expandSignature(sp, inlineContext, sigBuilder);
+ mdToken translatedToken = FindOrCreateMethodSpec(translatedParentToken, sigBuilder);
+ return translatedToken;
+ }
+ break;
+
+ default:
+ _ASSERTE(!"Unexpected token type");
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ virtual
+ CorInfoType GetFieldElementType(unsigned fieldToken, CORINFO_MODULE_HANDLE scope, CORINFO_METHOD_HANDLE methHnd, ICorJitInfo *info)
+ {
+ STANDARD_VM_CONTRACT;
+
+ Module *module = GetModule(scope);
+
+ IMDInternalImport *pMDImport = module->GetMDImport();
+ PCCOR_SIGNATURE pSig = 0;
+ ULONG cbSig = 0;
+ InlineContext context(m_pModule);
+
+ SigBuilder sigBuilder;
+ FillInlineContext(&context, NULL, fieldToken, 0);
+ if (TypeFromToken(fieldToken) == mdtTypeSpec)
+ {
+ pSig = GetSigOfTypeSpec(module, fieldToken, cbSig);
+ }
+ else
+ {
+ if (TypeFromToken(fieldToken) == mdtFieldDef)
+ IfFailThrow(pMDImport->GetSigOfFieldDef(fieldToken, &cbSig, &pSig));
+ else
+ {
+ assert(TypeFromToken(fieldToken) == mdtMemberRef);
+ pSig = GetSigOfMemberRef(module, fieldToken, cbSig);
+ }
+ assert(cbSig >= 2);
+ BYTE *sigPtr = (BYTE *)pSig;
+ BYTE callingConvention = *sigPtr++;
+ cbSig--;
+ pSig = (PCCOR_SIGNATURE)sigPtr;
+ assert(callingConvention == IMAGE_CEE_CS_CALLCONV_FIELD);
+ }
+ if (!context.IsTrivial())
+ {
+ // expand the signature plugging in the type arguments
+ SigParser sp1(pSig, cbSig);
+ expandSignature(sp1, &context, sigBuilder);
+
+ pSig = (PCCOR_SIGNATURE)sigBuilder.GetSignature(&cbSig);
+ }
+ SigParser sp(pSig, cbSig);
+ CorElementType elType;
+ sp.GetElemType(&elType);
+ while (elType == ELEMENT_TYPE_CMOD_REQD || elType == ELEMENT_TYPE_CMOD_OPT)
+ {
+ mdToken token;
+ sp.GetToken(&token);
+ sp.GetElemType(&elType);
+ }
+ while (true)
+ {
+ switch (elType)
+ {
+ case ELEMENT_TYPE_END: return CORINFO_TYPE_UNDEF;
+ case ELEMENT_TYPE_VOID: return CORINFO_TYPE_VOID;
+ case ELEMENT_TYPE_BOOLEAN: return CORINFO_TYPE_BOOL;
+ case ELEMENT_TYPE_CHAR: return CORINFO_TYPE_BYTE;
+ case ELEMENT_TYPE_I1: return CORINFO_TYPE_UBYTE;
+ case ELEMENT_TYPE_U1: return CORINFO_TYPE_UBYTE;
+ case ELEMENT_TYPE_I2: return CORINFO_TYPE_SHORT;
+ case ELEMENT_TYPE_U2: return CORINFO_TYPE_USHORT;
+ case ELEMENT_TYPE_I4: return CORINFO_TYPE_INT;
+ case ELEMENT_TYPE_U4: return CORINFO_TYPE_UINT;
+ case ELEMENT_TYPE_I8: return CORINFO_TYPE_LONG;
+ case ELEMENT_TYPE_U8: return CORINFO_TYPE_ULONG;
+ case ELEMENT_TYPE_R4: return CORINFO_TYPE_FLOAT;
+ case ELEMENT_TYPE_R8: return CORINFO_TYPE_DOUBLE;
+ case ELEMENT_TYPE_I: return CORINFO_TYPE_NATIVEINT;
+ case ELEMENT_TYPE_U: return CORINFO_TYPE_NATIVEUINT;
+
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_ARRAY:
+ case ELEMENT_TYPE_OBJECT:
+ case ELEMENT_TYPE_SZARRAY: return CORINFO_TYPE_CLASS;
+
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_FNPTR: return CORINFO_TYPE_PTR;
+
+ case ELEMENT_TYPE_BYREF: return CORINFO_TYPE_BYREF;
+
+ case ELEMENT_TYPE_TYPEDBYREF:return CORINFO_TYPE_REFANY;
+
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ mdToken typeToken;
+ sp.GetToken(&typeToken);
+ assert(TypeFromToken(typeToken) == mdtTypeDef || TypeFromToken(typeToken) == mdtTypeRef);
+ EEClass *eeClass = LoadTypeRef(module, typeToken);
+ if (eeClass != NULL && eeClass->GetMethodTable()->IsEnum())
+ {
+ elType = eeClass->GetInternalCorElementType();
+ continue;
+ }
+ }
+ return CORINFO_TYPE_VALUECLASS;
+
+ case ELEMENT_TYPE_GENERICINST:
+ sp.GetElemType(&elType);
+ return elType == ELEMENT_TYPE_CLASS ? CORINFO_TYPE_CLASS : CORINFO_TYPE_VALUECLASS;
+
+ case ELEMENT_TYPE_VAR:
+ case ELEMENT_TYPE_MVAR:
+ {
+ ULONG argIndex;
+ sp.GetData(&argIndex);
+ CORINFO_CLASS_HANDLE typeParameter = info->getTypeParameter(methHnd, elType == ELEMENT_TYPE_VAR, argIndex);
+ return info->asCorInfoType(typeParameter);
+ }
+
+ default:
+ assert(!"unexpected element type");
+ return CORINFO_TYPE_NATIVEINT;
+ }
+ break;
+ }
+ }
+
+ virtual
+ mdToken GetNextStubToken()
+ {
+ STANDARD_VM_CONTRACT;
+
+ if (RidFromToken(m_prevStubDefToken) == 0xFFFFFF)
+ {
+ // we ran out of methoddefs
+ return mdMethodDefNil;
+ }
+ return ++m_prevStubDefToken;
+ }
+
+ virtual
+ void Flush()
+ {
+ STANDARD_VM_CONTRACT;
+
+ CopyUserStringPool();
+ }
+
+ virtual
+ void FlushStubData()
+ {
+ STANDARD_VM_CONTRACT;
+
+ size_t size, sizeSize, assocSize;
+ BYTE *buffer = m_stream->GetBuffer(size);
+ BYTE *assocBuffer = m_stubAssocStream->GetBuffer(assocSize);
+
+ m_stream->WriteUnsigned(m_stubMethodCount);
+ m_stream->GetBuffer(sizeSize);
+
+ m_pZapImage->FlushStubData(buffer + size,
+ (COUNT_T)(sizeSize - size),
+ buffer, (COUNT_T)size,
+ assocBuffer, (COUNT_T)assocSize);
+ }
+
+ void CopyUserStringPool()
+ {
+ STANDARD_VM_CONTRACT;
+#ifdef REDHAWK
+ IMDInternalImport *pMDImport = m_pModule->GetMDImport();
+ void *table;
+ size_t tableSize;
+ pMDImport->GetTableInfoWithIndex(TBL_COUNT + MDPoolUSBlobs, &table, (void **)&tableSize);
+// printf("User string pool at %p taking %u bytes\n", pTable, pTableSize);
+ m_pZapImage->FlushUserStringPool((BYTE *)table, (ULONG)tableSize);
+#endif
+ }
+};
+
+// static
+ICompactLayoutWriter *ICompactLayoutWriter::MakeCompactLayoutWriter(Module *pModule, ZapImage *pZapImage)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (pZapImage->DoCompactLayout())
+ {
+ CompactLayoutWriter *compactLayoutWriter = new CompactLayoutWriter(pModule, pZapImage);
+ compactLayoutWriter->CreateExternalReferences();
+
+ pZapImage->SetCompactLayoutWriter(compactLayoutWriter);
+
+ return compactLayoutWriter;
+ }
+ else
+ {
+ return NULL;
+ }
+}
diff --git a/src/vm/compactlayoutwriter.h b/src/vm/compactlayoutwriter.h
new file mode 100644
index 0000000000..56ba1a69ae
--- /dev/null
+++ b/src/vm/compactlayoutwriter.h
@@ -0,0 +1,333 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//
+
+//
+
+#include "corhdr.h"
+
+#if !defined(_CompactLayoutWriter) && defined(MDIL)
+#define _CompactLayoutWriter
+#include "mdil.h"
+
+class ZapImage;
+
+struct InlineContext;
+
+class ICompactLayoutWriter
+{
+public:
+ // Prepare to write CTL for a type.
+ virtual
+ void Reset() = 0;
+
+ virtual
+ void GenericType(DWORD typeArgCount) = 0;
+
+ enum ComFlags // additional flags on types besides CorTypeAttr
+ {
+ CF_COMOBJECTTYPE = 0x80000000,
+ CF_COMCLASSINTERFACE = 0x40000000,
+ CF_COMEVENTINTERFACE = 0x20000000,
+
+ // com interface types
+ CF_IFACEATTRMASK = 0x18000000,
+ CF_DUAL = 0x00000000,
+ CF_VTABLE = 0x08000000,
+ CF_DISPATCH = 0x10000000,
+ CF_INSPECTABLE = 0x18000000,
+
+ // interfaces don't have finalizers so we can
+ // reuse the bits interfaces use to specify dispatch type
+ CF_FINALIZER = 0x08000000,
+ CF_CRITICALFINALIZER = 0x10000000,
+
+ // fixed value type statics
+ CF_FIXED_ADDRESS_VT_STATICS = 0x04000000,
+
+ // type equivalence on struct parameters
+ CF_DEPENDS_ON_COM_IMPORT_STRUCTS = 0x02000000,
+
+ // is type equivalent?
+ CF_TYPE_EQUIVALENT = 0x01000000,
+
+ CF_CONTAINS_STACK_PTR = 0x00800000,
+
+ // this type has the UnsafeValueType CA attached to it
+ CF_UNSAFEVALUETYPE = 0x00400000,
+ };
+
+ enum SecurityFlags
+ {
+ SF_UNKNOWN = 0,
+ SF_TRANSPARENT = 1,
+ SF_ALL_TRANSPARENT = 2,
+ SF_CRTIICAL = 3,
+ SF_CRITICAL_TAS = 4,
+ SF_ALLCRITICAL = 5,
+ SF_ALLCRITICAL_TAS = 6,
+ SF_TAS_NOTCRITICAL = 7,
+ };
+
+ virtual
+ void StartType( DWORD flags, // CorTypeAttr plus perhaps other flags
+ DWORD typeDefToken, // typedef token for this type
+ DWORD baseTypeToken, // type this type is derived from, if any
+ DWORD enclosingTypeToken, // type this type is nested in, if any
+ DWORD interfaceCount, // how many times ImplementInterface() will be called
+ DWORD fieldCount, // how many times Field() will be called
+ DWORD methodCount, // how many times Method() will be called
+ DWORD newVirtualMethodCount, // how many new virtuals this type defines
+ DWORD overrideVirtualMethodCount ) = 0;// how many virtuals this type overrides
+
+ // Call once for each interface implemented by the
+ // class directly (not those implemented in base classes)
+ virtual
+ void ImplementInterface( DWORD interfaceTypeToken ) = 0;
+
+
+ virtual
+ void ExtendedTypeFlags( DWORD flags ) = 0;
+
+ virtual
+ void SpecialType( SPECIAL_TYPE type) = 0;
+
+ virtual
+ enum FieldStorage
+ {
+ FS_INSTANCE,
+ FS_STATIC,
+ FS_THREADLOCAL,
+ FS_CONTEXTLOCAL,
+ FS_RVA
+ };
+
+ enum FieldProtection // parallels CorFieldAttr
+ {
+ FP_PRIVATE_SCOPE = 0x0, // Member not referenceable.
+ FP_PRIVATE = 0x1, // Accessible only by the parent type.
+ FP_FAM_AND_ASSEM = 0x2, // Accessible by sub-types only in this Assembly.
+ FP_ASSEMBLY = 0x3, // Accessibly by anyone in the Assembly.
+ FP_FAMILY = 0x4, // Accessible only by type and sub-types.
+ FP_FAM_OR_ASSEM = 0x5, // Accessibly by sub-types anywhere, plus anyone in assembly.
+ FP_PUBLIC = 0x6, // Accessibly by anyone who has visibility to this scope.
+ };
+
+ // Call once for each field the class declares directly
+ // valueTypeToken is non-0 iff fieldType == ELEMENT_TYPE_VALUETYPE
+ // not all CorElementTypes are allowed
+ virtual
+ void Field( DWORD fieldToken, // an mdFieldDef
+ FieldStorage fieldStorage,
+ FieldProtection fieldProtection,
+ CorElementType fieldType,
+ DWORD fieldOffset,
+ DWORD valueTypeToken) = 0;
+
+ // call once for each method implementing a contract
+ // in an interface.
+ // declToken is the token of the method that is implemented
+ // implToken is the method body that implements it
+ virtual
+ void ImplementInterfaceMethod(DWORD declToken,
+ DWORD implToken) = 0;
+
+ enum ImplHints
+ {
+ IH_CTOR = 0x0010,
+ IH_DEFAULT_CTOR = 0x0020, // this one will have IH_CTOR set also
+ IH_CCTOR = 0x0040,
+
+ IH_DELEGATE_INVOKE = 0x0080, // this the Invoke method of a delegate
+ IH_DELEGATE_BEGIN_INVOKE = 0x0090, // this the BeginInvoke method of a delegate
+ IH_DELEGATE_END_INVOKE = 0x00A0, // this the EndInvoke method of a delegate
+
+ IH_TRANSPARENCY_MASK = 0x0C00,
+ IH_TRANSPARENCY_NO_INFO = 0x0000,
+ IH_TRANSPARENCY_TRANSPARENT = 0x0400,
+ IH_TRANSPARENCY_CRITICAL = 0x0800,
+ IH_TRANSPARENCY_TREAT_AS_SAFE = 0x0C00,
+
+ IH_BY_ORDINAL = 0x1000, // this applies only to DllImport and DllExport methods
+ IH_IS_VERIFIED = 0x2000, // IH_IS_VERIFIED and IH_IS_VERIFIABLE match
+ IH_IS_VERIFIABLE = 0x4000, // IsVerified and IsVerifiable on the MethodDesc
+
+ IH_HASMETHODIMPL = 0x8000, // Method overrides a (non-interface) method via MethodImpl,
+ // which will be reported via MethodImpl lateron
+ };
+
+ enum StubMethodFlags
+ {
+ SF_PINVOKE = 0,
+ SF_DELEGATE_PINVOKE = 1,
+ SF_CALLI_PINVOKE = 2,
+ SF_REVERSE_PINVOKE = 3,
+ SF_CLR_TO_COM = 4,
+ SF_COM_TO_CLR = 5,
+ SF_STUB_KIND_MASK = 0x0000000f,
+
+ SF_HAS_COPY_CONSTRUCTED_ARGS = 0x00000010,
+ SF_NEEDS_STUB_SIGNATURE = 0x00000020,
+
+ SF_STACK_ARG_SIZE_MASK = 0xFFFC0000,
+ };
+
+ enum CorElementTypeMDIL
+ {
+ ELEMENT_TYPE_NATIVE_VALUETYPE = 0x08 | ELEMENT_TYPE_MODIFIER
+ };
+
+ // call once for each method except PInvoke methods
+ virtual
+ void Method(DWORD methodAttrs,
+ DWORD implFlags,
+ DWORD implHints, // have to figure how exactly we do this so it's not so tied to the CLR implementation
+ DWORD methodToken,
+ DWORD overriddenMethodToken) = 0;
+
+ // call once for each PInvoke method
+ virtual
+ void PInvokeMethod( DWORD methodAttrs,
+ DWORD implFlags,
+ DWORD implHints, // have to figure how exactly we do this so it's not so tied to the CLR implementation
+ DWORD methodToken,
+ LPCSTR moduleName,
+ LPCSTR entryPointName,
+ WORD wLinkFlags) = 0;
+
+ // call once for each DllExport method (Redhawk only feature, at least for now)
+ virtual
+ void DllExportMethod( DWORD methodAttrs,
+ DWORD implFlags,
+ DWORD implHints, // have to figure how exactly we do this so it's not so tied to the CLR implementation
+ DWORD methodToken,
+ LPCSTR entryPointName,
+ DWORD callingConvention) = 0;
+
+ virtual
+ void StubMethod( DWORD dwMethodFlags,
+ DWORD sigToken,
+ DWORD methodToken) = 0;
+
+ virtual
+ void StubAssociation( DWORD ownerToken,
+ DWORD *stubTokens,
+ size_t numStubs) = 0;
+
+ // call once for each method impl
+ virtual
+ void MethodImpl(DWORD declToken,
+ DWORD implToken ) = 0;
+
+ // set an explicit size for explicit layout structs
+ virtual
+ void SizeType(DWORD size) = 0;
+
+ // specify the packing size
+ virtual
+ void PackType(DWORD packingSize) = 0;
+
+ // specify a generic parameter to a type or method
+ virtual
+ void GenericParameter(DWORD genericParamToken, DWORD flags) = 0;
+
+ enum NativeFlags
+ {
+ NF_BESTFITMAP = 0x01,
+ NF_THROWONUNMAPPABLECHAR = 0x02,
+
+ // sometimes we store a VARTYPE in the upper bits - this is how much to shift
+ NF_VARTYPE_SHIFT = 8,
+ };
+
+ // specify a field representation on the native side
+ virtual
+ void NativeField(DWORD fieldToken, // an mdFieldDef
+ DWORD nativeType, // really an NStructFieldType
+ DWORD nativeOffset,
+ DWORD count,
+ DWORD flags,
+ DWORD typeToken1,
+ DWORD typeToken2) = 0;
+
+ // specify guid info for interface types
+ virtual
+ void GuidInformation(GuidInfo *guidInfo) = 0;
+
+ // end the description of the type
+ virtual
+ void EndType() = 0;
+
+ // return a token for a method desc
+ // this is trivial in the case of a non-generic method in our own module, not so trivial
+ // otherwise - we may have to generate new moduleref, typeref, typespec, methodspec tokens
+ virtual
+ mdMemberRef GetTokenForMethodDesc(MethodDesc *methodDesc, MethodTable *pMT = NULL) = 0;
+
+ virtual
+ mdTypeSpec GetTypeSpecToken(PCCOR_SIGNATURE pSig, DWORD cbSig) = 0;
+
+ virtual
+ mdToken GetTokenForType(MethodTable *pMT) = 0;
+
+ virtual
+ mdToken GetTokenForType(CORINFO_CLASS_HANDLE type) = 0;
+
+ virtual
+ mdToken GetTokenForType(InlineContext *inlineContext, CORINFO_ARG_LIST_HANDLE argList) = 0;
+
+ virtual
+ mdToken GetTokenForMethod(CORINFO_METHOD_HANDLE method) = 0;
+
+ virtual
+ mdToken GetTokenForField(CORINFO_FIELD_HANDLE field) = 0;
+
+ virtual
+ mdToken GetTokenForSignature(PCCOR_SIGNATURE sig) = 0;
+
+ virtual
+ mdToken GetTypeTokenForFieldOrMethod(mdToken fieldOrMethodToken) = 0;
+
+ virtual
+ mdToken GetEnclosingClassToken(InlineContext *inlineContext, CORINFO_METHOD_HANDLE methHnd) = 0;
+
+ virtual
+ InlineContext *ComputeInlineContext(InlineContext *outerContext, unsigned inlinedMethodToken, unsigned constraintTypeToken, CORINFO_METHOD_HANDLE methHnd) = 0;
+
+ virtual
+ DWORD GetFieldOrdinal(CORINFO_MODULE_HANDLE tokenScope, unsigned fieldToken) = 0;
+
+ virtual
+ unsigned TranslateToken(InlineContext *inlineContext, unsigned token) = 0;
+
+ virtual
+ CorInfoType GetFieldElementType(unsigned fieldToken, CORINFO_MODULE_HANDLE scope, CORINFO_METHOD_HANDLE methHnd, ICorJitInfo *info) = 0;
+
+ virtual
+ mdToken GetParentOfMemberRef(CORINFO_MODULE_HANDLE scope, mdMemberRef memberRefToken) = 0;
+
+ virtual
+ mdToken GetArrayElementToken(CORINFO_MODULE_HANDLE scope, mdTypeSpec arrayTypeToken) = 0;
+
+ virtual
+ mdToken GetCurrentMethodToken(InlineContext *inlineContext, CORINFO_METHOD_HANDLE methHnd) = 0;
+
+ virtual
+ bool IsDynamicScope(CORINFO_MODULE_HANDLE scope) = 0;
+
+ virtual
+ mdToken GetNextStubToken() = 0;
+
+ virtual
+ void Flush() = 0;
+
+ virtual
+ void FlushStubData() = 0;
+
+ static ICompactLayoutWriter *MakeCompactLayoutWriter(Module *pModule, ZapImage *pZapImage);
+};
+
+#endif
diff --git a/src/vm/compatibilityswitch.cpp b/src/vm/compatibilityswitch.cpp
new file mode 100644
index 0000000000..0c825eea2d
--- /dev/null
+++ b/src/vm/compatibilityswitch.cpp
@@ -0,0 +1,108 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "common.h"
+#include "clrconfig.h"
+#include "compatibilityswitch.h"
+
+FCIMPL2(FC_BOOL_RET, CompatibilitySwitch::IsEnabled, StringObject* switchNameUNSAFE, CLR_BOOL onlyDB)
+{
+ FCALL_CONTRACT;
+
+ if (!switchNameUNSAFE)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidSwitchName"));
+
+ BOOL result = TRUE;
+
+ STRINGREF name = (STRINGREF) switchNameUNSAFE;
+ VALIDATEOBJECTREF(name);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(name);
+
+ CLRConfig::ConfigDWORDInfo info;
+ info.name = name->GetBuffer();
+ if(onlyDB)
+ {
+ // for public managed apis we ignore checking in registry/config/env
+ // only check in windows appcompat DB
+ info.options = CLRConfig::IgnoreEnv |
+ CLRConfig::IgnoreHKLM |
+ CLRConfig::IgnoreHKCU |
+ CLRConfig::IgnoreConfigFiles;
+ }
+ else
+ {
+ // for mscorlib (i.e. which use internal apis) also check in
+ // registry/config/env in addition to windows appcompat DB
+ info.options = CLRConfig::EEConfig_default;
+ }
+
+ // default value is disabled
+ info.defaultValue = 0;
+ result = CLRConfig::IsConfigEnabled(info);
+ HELPER_METHOD_FRAME_END();
+
+ FC_RETURN_BOOL(result);
+}
+FCIMPLEND
+
+
+FCIMPL2(StringObject*, CompatibilitySwitch::GetValue, StringObject* switchNameUNSAFE, CLR_BOOL onlyDB) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ if (!switchNameUNSAFE)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidSwitchName"));
+
+ STRINGREF name = (STRINGREF) switchNameUNSAFE;
+ VALIDATEOBJECTREF(name);
+
+ STRINGREF refName = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(name);
+ CLRConfig::ConfigStringInfo info;
+ info.name = name->GetBuffer();
+ if(onlyDB)
+ {
+ // for public managed apis we ignore checking in registry/config/env
+ // only check in windows appcompat DB
+ info.options = CLRConfig::IgnoreEnv |
+ CLRConfig::IgnoreHKLM |
+ CLRConfig::IgnoreHKCU |
+ CLRConfig::IgnoreConfigFiles;
+ }
+ else
+ {
+ // for mscorlib (i.e. which use internal apis) also check in
+ // registry/config/env in addition to windows appcompat DB
+ info.options = CLRConfig::EEConfig_default;
+ }
+ LPWSTR strVal = CLRConfig::GetConfigValue(info);
+ refName = StringObject::NewString(strVal);
+ HELPER_METHOD_FRAME_END();
+
+ return (StringObject*)OBJECTREFToObject(refName);
+}
+FCIMPLEND
+
+FCIMPL0(StringObject*, CompatibilitySwitch::GetAppContextOverrides) {
+ CONTRACTL{
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ STRINGREF refName = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+ LPWSTR strVal = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_AppContextSwitchOverrides);
+ refName = StringObject::NewString(strVal);
+ HELPER_METHOD_FRAME_END();
+
+ return (StringObject*)OBJECTREFToObject(refName);
+}
+FCIMPLEND
diff --git a/src/vm/compatibilityswitch.h b/src/vm/compatibilityswitch.h
new file mode 100644
index 0000000000..37ddd23a40
--- /dev/null
+++ b/src/vm/compatibilityswitch.h
@@ -0,0 +1,27 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#ifndef _COMPATIBILITYSWITCH_H_
+#define _COMPATIBILITYSWITCH_H_
+
+#include "object.h"
+#include "typehandle.h"
+#include "fcall.h"
+#include "field.h"
+#include "typectxt.h"
+
+class CompatibilitySwitch
+{
+public:
+ static FCDECL2(FC_BOOL_RET, IsEnabled, StringObject* switchNameUNSAFE, CLR_BOOL onlyDB);
+ static FCDECL2(StringObject*, GetValue, StringObject *switchNameUNSAFE, CLR_BOOL onlyDB);
+ static FCDECL0(StringObject*, GetAppContextOverrides);
+};
+
+
+#endif
+
diff --git a/src/vm/compile.cpp b/src/vm/compile.cpp
new file mode 100644
index 0000000000..c9c0603c61
--- /dev/null
+++ b/src/vm/compile.cpp
@@ -0,0 +1,8171 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: compile.cpp
+//
+
+//
+// Support for zap compiler and zap files
+// ===========================================================================
+
+
+
+#include "common.h"
+
+#ifdef FEATURE_PREJIT
+
+#include <corcompile.h>
+
+#include "assemblyspec.hpp"
+
+#include "compile.h"
+#include "excep.h"
+#include "field.h"
+#include "security.h"
+#include "eeconfig.h"
+#include "zapsig.h"
+#include "gcrefmap.h"
+
+#ifndef FEATURE_CORECLR
+#include "corsym.h"
+#endif // FEATURE_CORECLR
+
+#include "virtualcallstub.h"
+#include "typeparse.h"
+#include "typestring.h"
+#include "constrainedexecutionregion.h"
+#include "dllimport.h"
+#include "comdelegate.h"
+#include "stringarraylist.h"
+
+#ifdef FEATURE_COMINTEROP
+#include "clrtocomcall.h"
+#include "comtoclrcall.h"
+#include "winrttypenameconverter.h"
+#endif // FEATURE_COMINTEROP
+
+#include "dllimportcallback.h"
+#include "caparser.h"
+#include "sigbuilder.h"
+#include "cgensys.h"
+#include "peimagelayout.inl"
+
+#if defined(FEATURE_HOSTED_BINDER) && defined(FEATURE_APPX_BINDER)
+#include "appxutil.h"
+#include "clrprivbinderappx.h"
+#include "clrprivtypecachewinrt.h"
+#endif // defined(FEATURE_HOSTED_BINDER) && defined(FEATURE_APPX_BINDER)
+
+#ifdef FEATURE_COMINTEROP
+#include "clrprivbinderwinrt.h"
+#include "winrthelpers.h"
+#endif
+
+#ifdef CROSSGEN_COMPILE
+#include "crossgenroresolvenamespace.h"
+#endif
+
+#include <cvinfo.h>
+
+#ifdef MDIL
+#include <mdil.h>
+#endif
+#include "tritonstress.h"
+
+#ifdef CROSSGEN_COMPILE
+CompilationDomain * theDomain;
+#endif
+
+VerboseLevel g_CorCompileVerboseLevel = CORCOMPILE_NO_LOG;
+
+//
+// CEECompileInfo implements most of ICorCompileInfo
+//
+
+HRESULT CEECompileInfo::Startup( BOOL fForceDebug,
+ BOOL fForceProfiling,
+ BOOL fForceInstrument)
+{
+ SystemDomain::SetCompilationOverrides(fForceDebug,
+ fForceProfiling,
+ fForceInstrument);
+
+ HRESULT hr = S_OK;
+
+ m_fCachingOfInliningHintsEnabled = TRUE;
+ m_fGeneratingNgenPDB = FALSE;
+
+ _ASSERTE(!g_fEEStarted && !g_fEEInit && "You cannot run the EE inside an NGEN compilation process");
+
+ if (!g_fEEStarted && !g_fEEInit)
+ {
+#ifdef CROSSGEN_COMPILE
+ GetSystemInfo(&g_SystemInfo);
+
+ theDomain = new CompilationDomain(fForceDebug,
+ fForceProfiling,
+ fForceInstrument);
+#endif
+
+ // When NGEN'ing this call may execute EE code, e.g. the managed code to set up
+ // the SharedDomain.
+ hr = InitializeEE(COINITEE_DEFAULT);
+ }
+
+ //
+ // JIT interface expects to be called with
+ // preemptive GC enabled
+ //
+ if (SUCCEEDED(hr)) {
+#ifdef _DEBUG
+ Thread *pThread = GetThread();
+ _ASSERTE(pThread);
+#endif
+
+ GCX_PREEMP_NO_DTOR();
+ }
+
+ return hr;
+}
+
+HRESULT CEECompileInfo::CreateDomain(ICorCompilationDomain **ppDomain,
+ IMetaDataAssemblyEmit *pEmitter,
+ BOOL fForceDebug,
+ BOOL fForceProfiling,
+ BOOL fForceInstrument,
+ BOOL fForceFulltrustDomain
+#ifdef MDIL
+ , MDILCompilationFlags mdilCompilationFlags
+#endif
+ )
+{
+ STANDARD_VM_CONTRACT;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+#ifndef CROSSGEN_COMPILE
+ AppDomainCreationHolder<CompilationDomain> pCompilationDomain;
+
+ pCompilationDomain.Assign(new CompilationDomain(fForceDebug,
+ fForceProfiling,
+ fForceInstrument));
+#else
+ CompilationDomain * pCompilationDomain = theDomain;
+#endif
+
+ {
+ SystemDomain::LockHolder lh;
+ pCompilationDomain->Init(
+#ifdef MDIL
+ mdilCompilationFlags
+#endif
+ );
+ }
+
+ if (pEmitter)
+ pCompilationDomain->SetDependencyEmitter(pEmitter);
+
+#if defined(FEATURE_HOSTED_BINDER) && defined(FEATURE_APPX_BINDER)
+ if (AppX::IsAppXProcess())
+ {
+ HRESULT hr = S_OK;
+ ReleaseHolder<ICLRPrivBinder> pBinderInterface;
+ CLRPrivBinderAppX * pBinder = CLRPrivBinderAppX::GetOrCreateBinder();
+
+ IfFailThrow(pBinder->QueryInterface(IID_ICLRPrivBinder, &pBinderInterface));
+ pCompilationDomain->SetLoadContextHostBinder(pBinderInterface);
+ }
+#endif // defined(FEATURE_HOSTED_BINDER) && defined(FEATURE_APPX_BINDER)
+
+#ifdef DEBUGGING_SUPPORTED
+ // Notify the debugger here, before the thread transitions into the
+ // AD to finish the setup, and before any assemblies are loaded into it.
+ SystemDomain::PublishAppDomainAndInformDebugger(pCompilationDomain);
+#endif // DEBUGGING_SUPPORTED
+
+ pCompilationDomain->LoadSystemAssemblies();
+
+ pCompilationDomain->SetupSharedStatics();
+
+ *ppDomain = static_cast<ICorCompilationDomain*>(pCompilationDomain);
+
+ {
+ GCX_COOP();
+
+ ENTER_DOMAIN_PTR(pCompilationDomain,ADV_COMPILATION)
+ {
+#ifdef FEATURE_CORECLR
+ if (fForceFulltrustDomain)
+ ((ApplicationSecurityDescriptor *)pCompilationDomain->GetSecurityDescriptor())->SetGrantedPermissionSet(NULL, NULL, 0xFFFFFFFF);
+#endif
+
+#ifndef CROSSGEN_COMPILE
+#ifndef FEATURE_CORECLR
+ pCompilationDomain->InitializeHashing(NULL);
+#endif // FEATURE_CORECLR
+#endif
+ pCompilationDomain->InitializeDomainContext(TRUE, NULL, NULL);
+
+#ifndef CROSSGEN_COMPILE
+#ifdef FEATURE_CORECLR
+
+ if (!NingenEnabled())
+ {
+ APPDOMAINREF adRef = (APPDOMAINREF)pCompilationDomain->GetExposedObject();
+ GCPROTECT_BEGIN(adRef);
+ MethodDescCallSite initializeSecurity(METHOD__APP_DOMAIN__INITIALIZE_DOMAIN_SECURITY);
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(adRef),
+ ObjToArgSlot(NULL),
+ ObjToArgSlot(NULL),
+ ObjToArgSlot(NULL),
+ static_cast<ARG_SLOT>(FALSE)
+ };
+ initializeSecurity.Call(args);
+ GCPROTECT_END();
+ }
+#endif //FEATURE_CORECLR
+#endif
+
+ {
+ GCX_PREEMP();
+
+ // We load assemblies as domain-bound (However, they're compiled as domain neutral)
+#ifdef FEATURE_LOADER_OPTIMIZATION
+#ifdef FEATURE_FUSION
+ if (NingenEnabled())
+ {
+ pCompilationDomain->SetSharePolicy(AppDomain::SHARE_POLICY_NEVER);
+ }
+ else
+ {
+ pCompilationDomain->SetupLoaderOptimization(AppDomain::SHARE_POLICY_NEVER);
+ }
+#else //FEATURE_FUSION
+ pCompilationDomain->SetSharePolicy(AppDomain::SHARE_POLICY_NEVER);
+#endif //FEATURE_FUSION
+#endif // FEATURE_LOADER_OPTIMIZATION
+
+#ifdef FEATURE_FUSION
+ CorCompileConfigFlags flags = PEFile::GetNativeImageConfigFlags(pCompilationDomain->m_fForceDebug,
+ pCompilationDomain->m_fForceProfiling,
+ pCompilationDomain->m_fForceInstrument);
+
+ FusionBind::SetApplicationContextDWORDProperty(GetAppDomain()->GetFusionContext(),
+ ACTAG_ZAP_CONFIG_FLAGS, flags);
+#endif //FEATURE_FUSION
+ }
+
+ pCompilationDomain->SetFriendlyName(W("Compilation Domain"));
+ if (!NingenEnabled())
+ {
+ Security::SetDefaultAppDomainProperty(pCompilationDomain->GetSecurityDescriptor());
+ pCompilationDomain->GetSecurityDescriptor()->FinishInitialization();
+ }
+ SystemDomain::System()->LoadDomain(pCompilationDomain);
+
+#ifndef CROSSGEN_COMPILE
+ pCompilationDomain.DoneCreating();
+#endif
+ }
+ END_DOMAIN_TRANSITION;
+ }
+
+ COOPERATIVE_TRANSITION_END();
+
+ return S_OK;
+}
+
+
+HRESULT CEECompileInfo::DestroyDomain(ICorCompilationDomain *pDomain)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifndef CROSSGEN_COMPILE
+ COOPERATIVE_TRANSITION_BEGIN();
+
+ GCX_COOP();
+
+ CompilationDomain *pCompilationDomain = (CompilationDomain *) pDomain;
+
+ // DDB 175659: Make sure that canCallNeedsRestore() returns FALSE during compilation
+ // domain shutdown.
+ pCompilationDomain->setCannotCallNeedsRestore();
+
+ pCompilationDomain->Unload(TRUE);
+
+ COOPERATIVE_TRANSITION_END();
+#endif
+
+ return S_OK;
+}
+
+HRESULT MakeCrossDomainCallbackWorker(
+ CROSS_DOMAIN_CALLBACK pfnCallback,
+ LPVOID pArgs)
+{
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+ HRESULT hrRetVal = E_UNEXPECTED;
+ BEGIN_SO_TOLERANT_CODE(GetThread());
+ hrRetVal = pfnCallback(pArgs);
+ END_SO_TOLERANT_CODE;
+ return hrRetVal;
+}
+
+HRESULT CEECompileInfo::MakeCrossDomainCallback(
+ ICorCompilationDomain* pDomain,
+ CROSS_DOMAIN_CALLBACK pfnCallback,
+ LPVOID pArgs)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hrRetVal = E_UNEXPECTED;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+ {
+ // Switch to cooperative mode to switch appdomains
+ GCX_COOP();
+
+ ENTER_DOMAIN_PTR((CompilationDomain*)pDomain,ADV_COMPILATION)
+ {
+ //
+ // Switch to preemptive mode on before calling back into
+ // the zapper
+ //
+
+ GCX_PREEMP();
+
+ hrRetVal = MakeCrossDomainCallbackWorker(pfnCallback, pArgs);
+ }
+ END_DOMAIN_TRANSITION;
+ }
+
+ COOPERATIVE_TRANSITION_END();
+
+ return hrRetVal;
+}
+
+#ifdef TRITON_STRESS_NEED_IMPL
+int LogToSvcLogger(LPCWSTR format, ...)
+{
+ STANDARD_VM_CONTRACT;
+
+ StackSString s;
+
+ va_list args;
+ va_start(args, format);
+ s.VPrintf(format, args);
+ va_end(args);
+
+ GetSvcLogger()->Printf(W("%s"), s.GetUnicode());
+
+ return 0;
+}
+#endif
+
+HRESULT CEECompileInfo::LoadAssemblyByPath(
+ LPCWSTR wzPath,
+
+ // Normally this is FALSE, but crossgen /CreatePDB sets this to TRUE, so it can
+ // explicitly load an NI by path
+ BOOL fExplicitBindToNativeImage,
+
+ CORINFO_ASSEMBLY_HANDLE *pHandle)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+ Assembly * pAssembly;
+ HRESULT hrProcessLibraryBitnessMismatch = S_OK;
+ bool verifyingImageIsAssembly = false;
+
+ // We don't want to do a LoadFrom, since they do not work with ngen. Instead,
+ // read the metadata from the file and do a bind based on that.
+
+ EX_TRY
+ {
+ // Pre-open the image so we can grab some metadata to help initialize the
+ // binder's AssemblySpec, which we'll use later to load the assembly for real.
+
+ PEImageHolder pImage;
+
+#if defined(CROSSGEN_COMPILE) && !defined(FEATURE_CORECLR)
+ // If the path is not absolute, look for the assembly on platform path list first
+ if (wcschr(wzPath, '\\') == NULL || wcschr(wzPath, ':') == NULL || wcschr(wzPath, '/') == NULL)
+ {
+ CompilationDomain::FindImage(wzPath,
+ fExplicitBindToNativeImage ? MDInternalImport_NoCache : MDInternalImport_Default, &pImage);
+ }
+#endif
+
+ if (pImage == NULL)
+ {
+ pImage = PEImage::OpenImage(
+ wzPath,
+
+ // If we're explicitly binding to an NGEN image, we do not want the cache
+ // this PEImage for use later, as pointers that need fixup (e.g.,
+ // Module::m_pModuleSecurityDescriptor) will not be valid for use later.
+ // Normal caching is done when we open it "for real" further down when we
+ // call LoadDomainAssembly().
+ fExplicitBindToNativeImage ? MDInternalImport_NoCache : MDInternalImport_Default);
+ }
+
+#if defined(FEATURE_WINDOWSPHONE)
+ verifyingImageIsAssembly = true;
+#endif // FEATURE_WINDOWSPHONE
+ if (fExplicitBindToNativeImage && !pImage->HasReadyToRunHeader())
+ {
+ pImage->VerifyIsNIAssembly();
+ }
+ else
+ {
+ pImage->VerifyIsAssembly();
+ }
+
+ verifyingImageIsAssembly = false;
+
+ // Check to make sure the bitness of the assembly matches the bitness of the process
+ // we will be loading it into and store the result. If a COR_IMAGE_ERROR gets thrown
+ // by LoadAssembly then we can blame it on bitness mismatch. We do the check here
+ // and not in the CATCH to distinguish between the COR_IMAGE_ERROR that can be thrown by
+ // VerifyIsAssembly (not necessarily a bitness mismatch) and that from LoadAssembly
+#ifdef _WIN64
+ if (pImage->Has32BitNTHeaders())
+ {
+ hrProcessLibraryBitnessMismatch = PEFMT_E_32BIT;
+ }
+#else
+ if (!pImage->Has32BitNTHeaders())
+ {
+ hrProcessLibraryBitnessMismatch = PEFMT_E_64BIT;
+ }
+#endif
+
+ AssemblySpec spec;
+ spec.InitializeSpec(TokenFromRid(1, mdtAssembly), pImage->GetMDImport(), NULL, FALSE);
+
+ if (spec.IsMscorlib())
+ {
+ pAssembly = SystemDomain::System()->SystemAssembly();
+ }
+ else
+ {
+ AppDomain * pDomain = AppDomain::GetCurrentDomain();
+
+ PEAssemblyHolder pAssemblyHolder;
+ BOOL isWinRT = FALSE;
+
+#ifdef FEATURE_COMINTEROP
+ isWinRT = spec.IsContentType_WindowsRuntime();
+ if (isWinRT)
+ {
+ LPCSTR szNameSpace;
+ LPCSTR szTypeName;
+ // It does not make sense to pass the file name to recieve fake type name for empty WinMDs, because we would use the name
+ // for binding in next call to BindAssemblySpec which would fail for fake WinRT type name
+ // We will throw/return the error instead and the caller will recognize it and react to it by not creating the ngen image -
+ // see code:Zapper::ComputeDependenciesInCurrentDomain
+ IfFailThrow(::GetFirstWinRTTypeDef(pImage->GetMDImport(), &szNameSpace, &szTypeName, NULL, NULL));
+ spec.SetWindowsRuntimeType(szNameSpace, szTypeName);
+ }
+#endif //FEATURE_COMINTEROP
+
+ // If there is a host binder then use it to bind the assembly.
+ if (pDomain->HasLoadContextHostBinder() || isWinRT)
+ {
+ pAssemblyHolder = pDomain->BindAssemblySpec(&spec, TRUE, FALSE);
+ }
+ else
+ {
+#ifdef FEATURE_FUSION
+ SafeComHolder<IBindResult> pNativeFusionAssembly;
+ SafeComHolder<IFusionBindLog> pFusionLog;
+ SafeComHolder<IAssembly> pFusionAssembly;
+
+ IfFailThrow(ExplicitBind(wzPath, pDomain->GetFusionContext(), EXPLICITBIND_FLAGS_EXE,
+ NULL, &pFusionAssembly, &pNativeFusionAssembly, &pFusionLog));
+
+ pAssemblyHolder = PEAssembly::Open(pFusionAssembly, pNativeFusionAssembly, pFusionLog, FALSE, FALSE);
+#else //FEATURE_FUSION
+ //ExplicitBind
+ CoreBindResult bindResult;
+ spec.SetCodeBase(pImage->GetPath());
+ spec.Bind(
+ pDomain,
+ TRUE, // fThrowOnFileNotFound
+ &bindResult,
+
+ // fNgenExplicitBind: Generally during NGEN / MDIL compilation, this is
+ // TRUE, meaning "I am NGEN, and I am doing an explicit bind to the IL
+ // image, so don't infer the NI and try to open it, because I already
+ // have it open". But if we're executing crossgen /CreatePDB, this should
+ // be FALSE so that downstream code doesn't assume we're explicitly
+ // trying to bind to an IL image (we're actually explicitly trying to
+ // open an NI).
+ !fExplicitBindToNativeImage,
+
+ // fExplicitBindToNativeImage: Most callers want this FALSE; but crossgen
+ // /CreatePDB explicitly specifies NI names to open, and cannot assume
+ // that IL assemblies will be available.
+ fExplicitBindToNativeImage
+ );
+ pAssemblyHolder = PEAssembly::Open(&bindResult,FALSE,FALSE);
+#endif //FEATURE_FUSION
+ }
+
+ // Now load assembly into domain.
+ DomainAssembly * pDomainAssembly = pDomain->LoadDomainAssembly(&spec, pAssemblyHolder, FILE_LOAD_BEGIN);
+
+#ifndef FEATURE_APPX_BINDER
+ if (spec.CanUseWithBindingCache() && pDomainAssembly->CanUseWithBindingCache())
+ pDomain->AddAssemblyToCache(&spec, pDomainAssembly);
+#endif
+
+#if defined(CROSSGEN_COMPILE) && !defined(FEATURE_CORECLR)
+ pDomain->ToCompilationDomain()->ComputeAssemblyHardBindList(pAssemblyHolder->GetPersistentMDImport());
+#endif
+
+#ifdef MDIL
+ // MDIL is generated as a special mode of ngen.exe or coregen.exe; normally these two utilities
+ // would not generate anything if a native image for the requested assembly already exists.
+ // Of course, this is not the desired behavior when generating MDIL - it should always work.
+ // We need to prevent loading of the native image when we are generating MDIL.
+ if (!GetAppDomain()->IsMDILCompilationDomain())
+#endif
+ {
+ // Mark the assembly before it gets fully loaded and NGen image dependencies are verified. This is necessary
+ // to allow skipping compilation if there is NGen image already.
+ pDomainAssembly->GetFile()->SetSafeToHardBindTo();
+ }
+
+ pAssembly = pDomain->LoadAssembly(&spec, pAssemblyHolder, FILE_LOADED);
+
+ // Add a dependency to the current assembly. This is done to match the behavior
+ // of LoadAssemblyFusion, so that the same native image is generated whether we
+ // ngen install by file name or by assembly name.
+ pDomain->ToCompilationDomain()->AddDependency(&spec, pAssemblyHolder);
+ }
+
+#ifdef MDIL
+ if (GetAppDomain()->IsMDILCompilationDomain())
+ TritonStressStartup(LogToSvcLogger);
+#endif
+
+ // Kind of a workaround - if we could have loaded this assembly via normal load,
+
+ *pHandle = CORINFO_ASSEMBLY_HANDLE(pAssembly);
+ }
+ EX_CATCH_HRESULT(hr);
+
+ if (verifyingImageIsAssembly && hr != S_OK)
+ {
+ hr = NGEN_E_FILE_NOT_ASSEMBLY;
+ }
+ else if ( hrProcessLibraryBitnessMismatch != S_OK && ( hr == COR_E_BADIMAGEFORMAT || hr == HRESULT_FROM_WIN32(ERROR_BAD_EXE_FORMAT) ) )
+ {
+ hr = hrProcessLibraryBitnessMismatch;
+ }
+
+ COOPERATIVE_TRANSITION_END();
+
+ return hr;
+}
+
+#ifdef FEATURE_FUSION
+
+// Simple helper that factors out code common to LoadAssemblyByIAssemblyName and
+// LoadAssemblyByName
+static HRESULT LoadAssemblyByIAssemblyNameWorker(
+ IAssemblyName *pAssemblyName,
+ CORINFO_ASSEMBLY_HANDLE *pHandle)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ Assembly *pAssembly;
+
+ AssemblySpec spec;
+ spec.InitializeSpec(pAssemblyName, NULL, FALSE);
+
+ if (spec.IsMscorlib())
+ {
+ pAssembly = SystemDomain::System()->SystemAssembly();
+ }
+ else
+ {
+
+ DomainAssembly * pDomainAssembly = spec.LoadDomainAssembly(FILE_LOAD_BEGIN);
+
+ // Mark the assembly before it gets fully loaded and NGen image dependencies are verified. This is necessary
+ // to allow skipping compilation if there is NGen image already.
+ pDomainAssembly->GetFile()->SetSafeToHardBindTo();
+
+ pAssembly = spec.LoadAssembly(FILE_LOADED);
+ }
+
+#ifdef MDIL
+ if (GetAppDomain()->IsMDILCompilationDomain())
+ TritonStressStartup(LogToSvcLogger);
+#endif
+
+ //
+ // Return the module handle
+ //
+
+ *pHandle = CORINFO_ASSEMBLY_HANDLE(pAssembly);
+
+ return S_OK;
+}
+
+HRESULT CEECompileInfo::LoadAssemblyByName(
+ LPCWSTR wzName,
+ CORINFO_ASSEMBLY_HANDLE *pHandle)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+ EX_TRY
+ {
+ ReleaseHolder<IAssemblyName> pAssemblyName;
+ IfFailThrow(CreateAssemblyNameObject(&pAssemblyName, wzName, CANOF_PARSE_DISPLAY_NAME, NULL));
+ IfFailThrow(LoadAssemblyByIAssemblyNameWorker(pAssemblyName, pHandle));
+ }
+ EX_CATCH_HRESULT(hr);
+
+ COOPERATIVE_TRANSITION_END();
+
+ return hr;
+}
+
+HRESULT CEECompileInfo::LoadAssemblyRef(
+ IMDInternalImport *pAssemblyImport,
+ mdAssemblyRef ref,
+ CORINFO_ASSEMBLY_HANDLE *pHandle,
+ IAssemblyName **refAssemblyName /*=NULL*/)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ ReleaseHolder<IAssemblyName> pAssemblyName;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+ EX_TRY
+ {
+ Assembly *pAssembly;
+
+ if (refAssemblyName)
+ *refAssemblyName = NULL;
+
+ AssemblySpec spec;
+ spec.InitializeSpec(ref, pAssemblyImport, NULL, FALSE);
+
+ if (spec.HasBindableIdentity())
+ {
+ if (refAssemblyName)
+ {
+ IfFailThrow(spec.CreateFusionName(&pAssemblyName));
+ }
+
+ pAssembly = spec.LoadAssembly(FILE_LOADED);
+
+ //
+ // Return the module handle
+ //
+
+ *pHandle = CORINFO_ASSEMBLY_HANDLE(pAssembly);
+ }
+ else
+ { // Cannot load assembly refs with non-unique id.
+ hr = S_FALSE;
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ COOPERATIVE_TRANSITION_END();
+
+ if (refAssemblyName != NULL && pAssemblyName != NULL)
+ {
+ *refAssemblyName = pAssemblyName.Extract();
+ }
+
+ return hr;
+}
+
+HRESULT CEECompileInfo::LoadAssemblyByIAssemblyName(
+ IAssemblyName *pAssemblyName,
+ CORINFO_ASSEMBLY_HANDLE *pHandle
+ )
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+ EX_TRY
+ {
+ IfFailThrow(LoadAssemblyByIAssemblyNameWorker(pAssemblyName, pHandle));
+ }
+ EX_CATCH_HRESULT(hr);
+
+ COOPERATIVE_TRANSITION_END();
+
+ return hr;
+}
+
+#endif //FEATURE_FUSION
+
+#ifdef FEATURE_COMINTEROP
+HRESULT CEECompileInfo::LoadTypeRefWinRT(
+ IMDInternalImport *pAssemblyImport,
+ mdTypeRef ref,
+ CORINFO_ASSEMBLY_HANDLE *pHandle)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ ReleaseHolder<IAssemblyName> pAssemblyName;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+ EX_TRY
+ {
+ Assembly *pAssembly;
+
+ mdToken tkResolutionScope;
+ pAssemblyImport->GetResolutionScopeOfTypeRef(ref, &tkResolutionScope);
+
+ if(TypeFromToken(tkResolutionScope) == mdtAssemblyRef)
+ {
+ DWORD dwAssemblyRefFlags;
+ IfFailThrow(pAssemblyImport->GetAssemblyRefProps(tkResolutionScope, NULL, NULL,
+ NULL, NULL,
+ NULL, NULL, &dwAssemblyRefFlags));
+ if (IsAfContentType_WindowsRuntime(dwAssemblyRefFlags))
+ {
+ LPCSTR psznamespace;
+ LPCSTR pszname;
+ pAssemblyImport->GetNameOfTypeRef(ref, &psznamespace, &pszname);
+ AssemblySpec spec;
+ spec.InitializeSpec(tkResolutionScope, pAssemblyImport, NULL, FALSE);
+ spec.SetWindowsRuntimeType(psznamespace, pszname);
+
+ _ASSERTE(spec.HasBindableIdentity());
+
+ pAssembly = spec.LoadAssembly(FILE_LOADED);
+
+ //
+ // Return the module handle
+ //
+
+ *pHandle = CORINFO_ASSEMBLY_HANDLE(pAssembly);
+ }
+ else
+ {
+ hr = S_FALSE;
+ }
+ }
+ else
+ {
+ hr = S_FALSE;
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ COOPERATIVE_TRANSITION_END();
+
+ return hr;
+}
+#endif
+
+BOOL CEECompileInfo::IsInCurrentVersionBubble(CORINFO_MODULE_HANDLE hModule)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return ((Module*)hModule)->IsInCurrentVersionBubble();
+}
+
+HRESULT CEECompileInfo::LoadAssemblyModule(
+ CORINFO_ASSEMBLY_HANDLE assembly,
+ mdFile file,
+ CORINFO_MODULE_HANDLE *pHandle)
+{
+ STANDARD_VM_CONTRACT;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+ Assembly *pAssembly = (Assembly*) assembly;
+
+ Module *pModule = pAssembly->GetManifestModule()->LoadModule(GetAppDomain(), file, TRUE)->GetModule();
+
+ //
+ // Return the module handle
+ //
+
+ *pHandle = CORINFO_MODULE_HANDLE(pModule);
+
+ COOPERATIVE_TRANSITION_END();
+
+ return S_OK;
+}
+
+#ifndef FEATURE_CORECLR
+BOOL CEECompileInfo::SupportsAutoNGen(CORINFO_ASSEMBLY_HANDLE assembly)
+{
+ STANDARD_VM_CONTRACT;
+
+ Assembly *pAssembly = (Assembly*) assembly;
+ return pAssembly->SupportsAutoNGen();
+}
+
+HRESULT CEECompileInfo::SetCachedSigningLevel(HANDLE hNI, HANDLE *pModules, COUNT_T nModules)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ HMODULE hKernel32 = WszLoadLibrary(W("kernel32.dll"));
+ typedef BOOL (WINAPI *SetCachedSigningLevel_t)
+ (__in_ecount(Count) PHANDLE SourceFiles, __in ULONG Count, __in ULONG Flags, __in HANDLE TargetFile);
+ SetCachedSigningLevel_t SetCachedSigningLevel
+ = (SetCachedSigningLevel_t)GetProcAddress(hKernel32, "SetCachedSigningLevel");
+ if (SetCachedSigningLevel == NULL)
+ {
+ return S_OK;
+ }
+
+ StackSArray<PEImage*> images;
+ PEImage::GetAll(images);
+
+ StackSArray<HANDLE> handles;
+ for (StackSArray<PEImage*>::Iterator i = images.Begin(), end = images.End(); i != end; i++)
+ {
+ if (!(*i)->IsFile())
+ {
+ continue;
+ }
+ HANDLE hFile = (*i)->GetFileHandleLocking();
+ handles.Append(hFile);
+ }
+
+ IfFailGo(SetCachedSigningLevel(handles.GetElements(), handles.GetCount(), 0, hNI));
+ for (COUNT_T i = 0; i < nModules; i++)
+ {
+ if (!SetCachedSigningLevel(handles.GetElements(), handles.GetCount(), 0, pModules[i]))
+ {
+ hr = HRESULT_FROM_WIN32(GetLastError());
+ _ASSERTE(FAILED(hr));
+ goto ErrExit;
+ }
+ }
+
+ErrExit:
+ return hr;
+}
+#endif
+
+BOOL CEECompileInfo::CheckAssemblyZap(
+ CORINFO_ASSEMBLY_HANDLE assembly,
+ __out_ecount_opt(*cAssemblyManifestModulePath)
+ LPWSTR assemblyManifestModulePath,
+ LPDWORD cAssemblyManifestModulePath)
+{
+ STANDARD_VM_CONTRACT;
+
+ BOOL result = FALSE;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+ Assembly *pAssembly = (Assembly*) assembly;
+
+ if (pAssembly->GetManifestFile()->HasNativeImage())
+ {
+ PEImage *pImage = pAssembly->GetManifestFile()->GetPersistentNativeImage();
+
+ if (assemblyManifestModulePath != NULL)
+ {
+ DWORD length = pImage->GetPath().GetCount();
+ if (length > *cAssemblyManifestModulePath)
+ {
+ length = *cAssemblyManifestModulePath - 1;
+ wcsncpy_s(assemblyManifestModulePath, *cAssemblyManifestModulePath, pImage->GetPath(), length);
+ assemblyManifestModulePath[length] = 0;
+ }
+ else
+ wcscpy_s(assemblyManifestModulePath, *cAssemblyManifestModulePath, pImage->GetPath());
+ }
+
+ result = TRUE;
+ }
+
+ COOPERATIVE_TRANSITION_END();
+
+ return result;
+}
+
+#ifdef MDIL
+DWORD CEECompileInfo::GetMdilModuleSecurityFlags(
+ CORINFO_ASSEMBLY_HANDLE assembly)
+{
+ STANDARD_VM_CONTRACT;
+
+ Assembly *pAssembly = (Assembly*) assembly;
+
+ ModuleSecurityDescriptor *pMSD = ModuleSecurityDescriptor::GetModuleSecurityDescriptor(pAssembly);
+
+ MDILHeader::Flags securityFlags = MDILHeader::MdilModuleSecurityDescriptorFlags_None;
+
+ // Is Microsoft Platform
+ if (pMSD->IsMicrosoftPlatform())
+ securityFlags = (MDILHeader::Flags)(securityFlags | MDILHeader::MdilModuleSecurityDescriptorFlags_IsMicrosoftPlatform);
+
+ // Is every method and type in the assembly transparent
+ if (pMSD->IsAllTransparent())
+ securityFlags = (MDILHeader::Flags)(securityFlags | MDILHeader::MdilModuleSecurityDescriptorFlags_IsAllTransparent);
+
+ // Is every method and type introduced by the assembly critical
+ if (pMSD->IsAllCritical())
+ securityFlags = (MDILHeader::Flags)(securityFlags | MDILHeader::MdilModuleSecurityDescriptorFlags_IsAllCritical);
+
+ // Combined with IsAllCritical - is every method and type introduced by the assembly safe critical
+ if (pMSD->IsTreatAsSafe())
+ securityFlags = (MDILHeader::Flags)(securityFlags | MDILHeader::MdilModuleSecurityDescriptorFlags_IsTreatAsSafe);
+
+ // Does the assembly not care about transparency, and wants the CLR to take care of making sure everything
+ // is annotated properly in the assembly.
+ if (pMSD->IsOpportunisticallyCritical())
+ securityFlags = (MDILHeader::Flags)(securityFlags | MDILHeader::MdilModuleSecurityDescriptorFlags_IsOpportunisticallyCritical);
+
+ // Partial trust assemblies are forced all-transparent under some conditions. This
+ // tells us whether that is true for this particular assembly.
+ if (pMSD->IsAllTransparentDueToPartialTrust())
+ securityFlags = (MDILHeader::Flags)(securityFlags | MDILHeader::MdilModuleSecurityDescriptorFlags_TransparentDueToPartialTrust);
+
+#ifdef FEATURE_APTCA
+ if (pMSD->IsAPTCA())
+#endif
+ securityFlags = (MDILHeader::Flags)(securityFlags | MDILHeader::MdilModuleSecurityDescriptorFlags_IsAPTCA);
+
+#ifndef FEATURE_CORECLR
+ // Can fully trusted transparent code bypass verification
+ if (pMSD->CanTransparentCodeSkipVerification())
+ securityFlags = (MDILHeader::Flags)(securityFlags | MDILHeader::MdilModuleSecurityDescriptorFlags_SkipFullTrustVerification);
+#endif // !FEATURE_CORECLR
+
+ return (DWORD)securityFlags;
+}
+
+BOOL CEECompileInfo::CompilerRelaxationNoStringInterningPermitted(
+ CORINFO_ASSEMBLY_HANDLE assembly)
+{
+ STANDARD_VM_CONTRACT;
+
+ Assembly *pAssembly = (Assembly*) assembly;
+
+ return pAssembly->GetManifestModule()->IsNoStringInterning();
+}
+
+BOOL CEECompileInfo::RuntimeCompatibilityWrapExceptions(
+ CORINFO_ASSEMBLY_HANDLE assembly)
+{
+ STANDARD_VM_CONTRACT;
+
+ Assembly *pAssembly = (Assembly*) assembly;
+
+ return pAssembly->GetManifestModule()->IsRuntimeWrapExceptions();
+}
+
+DWORD CEECompileInfo::CERReliabilityContract(
+ CORINFO_ASSEMBLY_HANDLE assembly)
+{
+ STANDARD_VM_CONTRACT;
+
+ Assembly *pAssembly = (Assembly*) assembly;
+
+ return pAssembly->GetManifestModule()->GetReliabilityContract();
+}
+
+#endif // MDIL
+
+HRESULT CEECompileInfo::SetCompilationTarget(CORINFO_ASSEMBLY_HANDLE assembly,
+ CORINFO_MODULE_HANDLE module)
+{
+ STANDARD_VM_CONTRACT;
+
+ Assembly *pAssembly = (Assembly *) assembly;
+ Module *pModule = (Module *) module;
+
+ CompilationDomain *pDomain = (CompilationDomain *) GetAppDomain();
+ pDomain->SetTarget(pAssembly, pModule);
+
+ if (!pAssembly->IsSystem())
+ {
+ // It is possible to get through a compile without calling BindAssemblySpec on mscorlib. This
+ // is because refs to mscorlib are short circuited in a number of places. So, we will explicitly
+ // add it to our dependencies.
+
+ AssemblySpec mscorlib;
+ mscorlib.InitializeSpec(SystemDomain::SystemFile());
+ GetAppDomain()->BindAssemblySpec(&mscorlib,TRUE,FALSE);
+
+ if (!SystemDomain::SystemFile()->HasNativeImage())
+ {
+ if (!CLRConfig::GetConfigValue(CLRConfig::INTERNAL_NgenAllowMscorlibSoftbind))
+ {
+ return NGEN_E_SYS_ASM_NI_MISSING;
+ }
+ }
+ }
+
+ return S_OK;
+}
+
+IMDInternalImport *
+ CEECompileInfo::GetAssemblyMetaDataImport(CORINFO_ASSEMBLY_HANDLE assembly)
+{
+ STANDARD_VM_CONTRACT;
+
+ IMDInternalImport * import;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+ import = ((Assembly*)assembly)->GetManifestImport();
+ import->AddRef();
+
+ COOPERATIVE_TRANSITION_END();
+
+ return import;
+}
+
+IMDInternalImport *
+ CEECompileInfo::GetModuleMetaDataImport(CORINFO_MODULE_HANDLE scope)
+{
+ STANDARD_VM_CONTRACT;
+
+ IMDInternalImport * import;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+ import = ((Module*)scope)->GetMDImport();
+ import->AddRef();
+
+ COOPERATIVE_TRANSITION_END();
+
+ return import;
+}
+
+CORINFO_MODULE_HANDLE
+ CEECompileInfo::GetAssemblyModule(CORINFO_ASSEMBLY_HANDLE assembly)
+{
+ STANDARD_VM_CONTRACT;
+
+ CANNOTTHROWCOMPLUSEXCEPTION();
+
+ return (CORINFO_MODULE_HANDLE) ((Assembly*)assembly)->GetManifestModule();
+}
+
+PEDecoder * CEECompileInfo::GetModuleDecoder(CORINFO_MODULE_HANDLE scope)
+{
+ STANDARD_VM_CONTRACT;
+
+ PEDecoder *result;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+ //
+ // Note that we go ahead and return the native image if we are using that.
+ // It contains everything we need to ngen. However, the caller must be
+ // aware and check for the native image case, since some fields will need to come
+ // from the CORCOMPILE_ZAP_HEADER rather than the PE headers.
+ //
+
+ PEFile *pFile = ((Module *) scope)->GetFile();
+
+ if (pFile->HasNativeImage())
+ result = pFile->GetLoadedNative();
+ else
+ result = pFile->GetLoadedIL();
+
+ COOPERATIVE_TRANSITION_END();
+
+ return result;
+
+}
+
+void CEECompileInfo::GetModuleFileName(CORINFO_MODULE_HANDLE scope,
+ SString &result)
+{
+ STANDARD_VM_CONTRACT;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+ result.Set(((Module*)scope)->GetPath());
+
+ COOPERATIVE_TRANSITION_END();
+}
+
+CORINFO_ASSEMBLY_HANDLE
+ CEECompileInfo::GetModuleAssembly(CORINFO_MODULE_HANDLE module)
+{
+ STANDARD_VM_CONTRACT;
+
+ CANNOTTHROWCOMPLUSEXCEPTION();
+
+ return (CORINFO_ASSEMBLY_HANDLE) GetModule(module)->GetAssembly();
+}
+
+#ifdef MDIL
+HRESULT CEECompileInfo::ShouldCompile(CORINFO_METHOD_HANDLE method)
+{
+ STANDARD_VM_CONTRACT;
+ MethodDesc * pMD = (MethodDesc *)method;
+
+ BOOL fIsMinimalMDIL = GetAppDomain()->IsMinimalMDILCompilationDomain();
+ if (fIsMinimalMDIL && !pMD->HasClassOrMethodInstantiation())
+ return S_FALSE;
+
+ if (GetAppDomain()->IsNoMDILCompilationDomain())
+ return S_FALSE;
+
+ return S_OK;
+}
+#endif // MDIL
+
+#ifdef FEATURE_FUSION
+HRESULT CEECompileInfo::GetAssemblyName(
+ CORINFO_ASSEMBLY_HANDLE hAssembly,
+ DWORD dwFlags,
+ __out_z LPWSTR wzAssemblyName,
+ LPDWORD pcchAssemblyName)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(hAssembly != NULL);
+ if (hAssembly == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ Assembly *pAssembly = (Assembly *) hAssembly;
+ IAssemblyName * pAssemblyName = pAssembly->GetFusionAssemblyName();
+ if (dwFlags == GANF_Default)
+ {
+ hr = pAssemblyName->GetDisplayName(wzAssemblyName, pcchAssemblyName, 0);
+ }
+ else if (dwFlags == GANF_Simple)
+ {
+ DWORD cbAssemblyName = *pcchAssemblyName * sizeof(WCHAR);
+ hr = pAssemblyName->GetProperty(ASM_NAME_NAME, (LPVOID)wzAssemblyName, &cbAssemblyName);
+ *pcchAssemblyName = cbAssemblyName / sizeof(WCHAR);
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+}
+#endif //FEATURE_FUSION
+
+#ifdef CROSSGEN_COMPILE
+//
+// Small wrapper to avoid having too many crossgen ifdefs
+//
+class AssemblyForLoadHint
+{
+ IMDInternalImport * m_pMDImport;
+public:
+ AssemblyForLoadHint(IMDInternalImport * pMDImport)
+ : m_pMDImport(pMDImport)
+ {
+ }
+
+ IMDInternalImport * GetManifestImport()
+ {
+ return m_pMDImport;
+ }
+
+ LPCSTR GetSimpleName()
+ {
+ LPCSTR name = "";
+ IfFailThrow(m_pMDImport->GetAssemblyProps(TokenFromRid(1, mdtAssembly), NULL, NULL, NULL, &name, NULL, NULL));
+ return name;
+ }
+
+ void GetDisplayName(SString &result, DWORD flags = 0)
+ {
+ PEAssembly::GetFullyQualifiedAssemblyName(m_pMDImport, TokenFromRid(1, mdtAssembly), result, flags);
+ }
+
+ BOOL IsSystem()
+ {
+ return FALSE;
+ }
+};
+#endif
+
+//-----------------------------------------------------------------------------
+// For an assembly with a full name of "Foo, Version=2.0.0.0, Culture=neutral",
+// we want any of these attributes specifications to match:
+// DependencyAttribute("Foo", LoadHint.Always)
+// DependencyAttribute("Foo,", LoadHint.Always)
+// DependencyAttribute("Foo, Version=2.0.0.0, Culture=neutral", LoadHint.Always)
+// The second case of "Foo," is needed only for intra-V2 compat as
+// it was supported at one point during V2. We may be able to get rid of it.
+template <typename ASSEMBLY>
+BOOL IsAssemblySpecifiedInCA(ASSEMBLY * pAssembly, SString dependencyNameFromCA)
+{
+ STANDARD_VM_CONTRACT;
+
+ // First, check for this:
+ // DependencyAttribute("Foo", LoadHint.Always)
+ StackSString simpleName(SString::Utf8, pAssembly->GetSimpleName());
+ if (simpleName.EqualsCaseInsensitive(dependencyNameFromCA, PEImage::GetFileSystemLocale()))
+ return TRUE;
+
+ // Now, check for this:
+ // DependencyAttribute("Foo,", LoadHint.Always)
+ SString comma(W(","));
+ StackSString simpleNameWithComma(simpleName, comma);
+ if (simpleNameWithComma.EqualsCaseInsensitive(dependencyNameFromCA, PEImage::GetFileSystemLocale()))
+ return TRUE;
+
+ // Finally:
+ // DependencyAttribute("Foo, Version=2.0.0.0, Culture=neutral", LoadHint.Always)
+ StackSString fullName;
+ pAssembly->GetDisplayName(fullName);
+ if (fullName.EqualsCaseInsensitive(dependencyNameFromCA))
+ return TRUE;
+
+ return FALSE;
+}
+
+template <typename ASSEMBLY>
+void GetLoadHint(ASSEMBLY * pAssembly, ASSEMBLY *pAssemblyDependency,
+ LoadHintEnum *loadHint, LoadHintEnum *defaultLoadHint = NULL)
+{
+ STANDARD_VM_CONTRACT;
+
+ *loadHint = LoadDefault;
+
+ if (g_pConfig->NgenHardBind() == EEConfig::NGEN_HARD_BIND_ALL)
+ *loadHint = LoadAlways;
+
+ const BYTE *pbAttr; // Custom attribute data as a BYTE*.
+ ULONG cbAttr; // Size of custom attribute data.
+ mdToken mdAssembly;
+
+ // Look for the binding custom attribute
+ {
+ IMDInternalImport *pImport = pAssembly->GetManifestImport();
+
+ IfFailThrow(pImport->GetAssemblyFromScope(&mdAssembly));
+
+ MDEnumHolder hEnum(pImport); // Enumerator for custom attributes
+ IfFailThrow(pImport->EnumCustomAttributeByNameInit(mdAssembly, DEPENDENCY_TYPE, &hEnum));
+
+ mdCustomAttribute tkAttribute; // A custom attribute on this assembly.
+ while (pImport->EnumNext(&hEnum, &tkAttribute))
+ {
+ // Get raw custom attribute.
+ IfFailThrow(pImport->GetCustomAttributeAsBlob(tkAttribute, (const void**)&pbAttr, &cbAttr));
+
+ CustomAttributeParser cap(pbAttr, cbAttr);
+
+ IfFailThrow(cap.ValidateProlog());
+
+ // Extract string from custom attribute
+ LPCUTF8 szString;
+ ULONG cbString;
+ IfFailThrow(cap.GetNonNullString(&szString, &cbString));
+
+ // Convert the string to Unicode.
+ StackSString dependencyNameFromCA(SString::Utf8, szString, cbString);
+
+ if (IsAssemblySpecifiedInCA(pAssemblyDependency, dependencyNameFromCA))
+ {
+ // Get dependency setting
+ UINT32 u4;
+ IfFailThrow(cap.GetU4(&u4));
+ *loadHint = (LoadHintEnum)u4;
+ break;
+ }
+ }
+ }
+
+ // If not preference is specified, look for the built-in assembly preference
+ if (*loadHint == LoadDefault || defaultLoadHint != NULL)
+ {
+ IMDInternalImport *pImportDependency = pAssemblyDependency->GetManifestImport();
+
+ IfFailThrow(pImportDependency->GetAssemblyFromScope(&mdAssembly));
+
+ HRESULT hr = pImportDependency->GetCustomAttributeByName(mdAssembly,
+ DEFAULTDEPENDENCY_TYPE,
+ (const void**)&pbAttr, &cbAttr);
+ IfFailThrow(hr);
+
+ // Parse the attribute
+ if (hr == S_OK)
+ {
+ CustomAttributeParser cap(pbAttr, cbAttr);
+ IfFailThrow(cap.ValidateProlog());
+
+ // Get default bind setting
+ UINT32 u4 = 0;
+ IfFailThrow(cap.GetU4(&u4));
+
+ if (pAssemblyDependency->IsSystem() && CLRConfig::GetConfigValue(CLRConfig::INTERNAL_NgenAllowMscorlibSoftbind))
+ {
+ u4 = LoadDefault;
+ }
+
+ if (defaultLoadHint)
+ *defaultLoadHint = (LoadHintEnum) u4;
+ else
+ *loadHint = (LoadHintEnum) u4;
+ }
+ }
+}
+
+HRESULT CEECompileInfo::GetLoadHint(CORINFO_ASSEMBLY_HANDLE hAssembly,
+ CORINFO_ASSEMBLY_HANDLE hAssemblyDependency,
+ LoadHintEnum *loadHint,
+ LoadHintEnum *defaultLoadHint // for MDIL we want to separate the default load hint on the assembly
+ // from the load hint on the dependency
+ )
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ Assembly *pAssembly = (Assembly *) hAssembly;
+ Assembly *pAssemblyDependency = (Assembly *) hAssemblyDependency;
+
+ ::GetLoadHint(pAssembly, pAssemblyDependency, loadHint, defaultLoadHint);
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+}
+
+HRESULT CEECompileInfo::GetAssemblyVersionInfo(CORINFO_ASSEMBLY_HANDLE hAssembly,
+ CORCOMPILE_VERSION_INFO *pInfo)
+{
+ STANDARD_VM_CONTRACT;
+
+ Assembly *pAssembly = (Assembly *) hAssembly;
+
+ pAssembly->GetDomainAssembly()->GetCurrentVersionInfo(pInfo);
+
+ return S_OK;
+}
+
+void CEECompileInfo::GetAssemblyCodeBase(CORINFO_ASSEMBLY_HANDLE hAssembly, SString &result)
+{
+ STANDARD_VM_CONTRACT;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+ Assembly *pAssembly = (Assembly *)hAssembly;
+ _ASSERTE(pAssembly != NULL);
+
+ pAssembly->GetCodeBase(result);
+
+ COOPERATIVE_TRANSITION_END();
+}
+
+//=================================================================================
+
+void FakePromote(PTR_PTR_Object ppObj, ScanContext *pSC, DWORD dwFlags)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ _ASSERTE(*ppObj == NULL);
+ *(CORCOMPILE_GCREFMAP_TOKENS *)ppObj = (dwFlags & GC_CALL_INTERIOR) ? GCREFMAP_INTERIOR : GCREFMAP_REF;
+}
+
+//=================================================================================
+
+void FakePromoteCarefully(promote_func *fn, Object **ppObj, ScanContext *pSC, DWORD dwFlags)
+{
+ (*fn)(ppObj, pSC, dwFlags);
+}
+
+//=================================================================================
+
+void FakeGcScanRoots(MetaSig& msig, ArgIterator& argit, MethodDesc * pMD, BYTE * pFrame)
+{
+ STANDARD_VM_CONTRACT;
+
+ ScanContext sc;
+
+ // Encode generic instantiation arg
+ if (argit.HasParamType())
+ {
+ // Note that intrinsic array methods have hidden instantiation arg too, but it is not reported to GC
+ if (pMD->RequiresInstMethodDescArg())
+ *(CORCOMPILE_GCREFMAP_TOKENS *)(pFrame + argit.GetParamTypeArgOffset()) = GCREFMAP_METHOD_PARAM;
+ else
+ if (pMD->RequiresInstMethodTableArg())
+ *(CORCOMPILE_GCREFMAP_TOKENS *)(pFrame + argit.GetParamTypeArgOffset()) = GCREFMAP_TYPE_PARAM;
+ }
+
+ // If the function has a this pointer, add it to the mask
+ if (argit.HasThis())
+ {
+ BOOL interior = pMD->GetMethodTable()->IsValueType() && !pMD->IsUnboxingStub();
+
+ FakePromote((Object **)(pFrame + argit.GetThisOffset()), &sc, interior ? GC_CALL_INTERIOR : 0);
+ }
+
+ if (argit.IsVarArg())
+ {
+ *(CORCOMPILE_GCREFMAP_TOKENS *)(pFrame + argit.GetVASigCookieOffset()) = GCREFMAP_VASIG_COOKIE;
+
+ // We are done for varargs - the remaining arguments are reported via vasig cookie
+ return;
+ }
+
+ // Also if the method has a return buffer, then it is the first argument, and could be an interior ref,
+ // so always promote it.
+ if (argit.HasRetBuffArg())
+ {
+ FakePromote((Object **)(pFrame + argit.GetRetBuffArgOffset()), &sc, GC_CALL_INTERIOR);
+ }
+
+ //
+ // Now iterate the arguments
+ //
+
+ // Cycle through the arguments, and call msig.GcScanRoots for each
+ int argOffset;
+ while ((argOffset = argit.GetNextOffset()) != TransitionBlock::InvalidOffset)
+ {
+ msig.GcScanRoots(pFrame + argOffset, &FakePromote, &sc, &FakePromoteCarefully);
+ }
+}
+
+void CEECompileInfo::GetCallRefMap(CORINFO_METHOD_HANDLE hMethod, GCRefMapBuilder * pBuilder)
+{
+#ifdef _DEBUG
+ DWORD dwInitialLength = pBuilder->GetBlobLength();
+ UINT nTokensWritten = 0;
+#endif
+
+ MethodDesc *pMD = (MethodDesc *)hMethod;
+
+ MetaSig msig(pMD);
+ ArgIterator argit(&msig);
+
+ UINT nStackBytes = argit.SizeOfFrameArgumentArray();
+
+ // Allocate a fake stack
+ CQuickBytes qbFakeStack;
+ qbFakeStack.AllocThrows(sizeof(TransitionBlock) + nStackBytes);
+ memset(qbFakeStack.Ptr(), 0, qbFakeStack.Size());
+
+ BYTE * pFrame = (BYTE *)qbFakeStack.Ptr();
+
+ // Fill it in
+ FakeGcScanRoots(msig, argit, pMD, pFrame);
+
+ //
+ // Encode the ref map
+ //
+
+ UINT nStackSlots;
+
+#ifdef _TARGET_X86_
+ UINT cbStackPop = argit.CbStackPop();
+ pBuilder->WriteStackPop(cbStackPop / sizeof(TADDR));
+
+ nStackSlots = nStackBytes / sizeof(TADDR) + NUM_ARGUMENT_REGISTERS;
+#else
+ nStackSlots = (sizeof(TransitionBlock) + nStackBytes - TransitionBlock::GetOffsetOfArgumentRegisters()) / sizeof(TADDR);
+#endif
+
+ for (UINT pos = 0; pos < nStackSlots; pos++)
+ {
+ int ofs;
+
+#ifdef _TARGET_X86_
+ ofs = (pos < NUM_ARGUMENT_REGISTERS) ?
+ (TransitionBlock::GetOffsetOfArgumentRegisters() + ARGUMENTREGISTERS_SIZE - (pos + 1) * sizeof(TADDR)) :
+ (TransitionBlock::GetOffsetOfArgs() + (pos - NUM_ARGUMENT_REGISTERS) * sizeof(TADDR));
+#else
+ ofs = TransitionBlock::GetOffsetOfArgumentRegisters() + pos * sizeof(TADDR);
+#endif
+
+ CORCOMPILE_GCREFMAP_TOKENS token = *(CORCOMPILE_GCREFMAP_TOKENS *)(pFrame + ofs);
+
+ if (token != 0)
+ {
+ INDEBUG(nTokensWritten++;)
+ pBuilder->WriteToken(pos, token);
+ }
+ }
+
+ // We are done
+ pBuilder->Flush();
+
+#ifdef _DEBUG
+ //
+ // Verify that decoder produces what got encoded
+ //
+
+ DWORD dwFinalLength;
+ PVOID pBlob = pBuilder->GetBlob(&dwFinalLength);
+
+ UINT nTokensDecoded = 0;
+
+ GCRefMapDecoder decoder((BYTE *)pBlob + dwInitialLength);
+
+#ifdef _TARGET_X86_
+ _ASSERTE(decoder.ReadStackPop() * sizeof(TADDR) == cbStackPop);
+#endif
+
+ while (!decoder.AtEnd())
+ {
+ int pos = decoder.CurrentPos();
+ int token = decoder.ReadToken();
+
+ int ofs;
+
+#ifdef _TARGET_X86_
+ ofs = (pos < NUM_ARGUMENT_REGISTERS) ?
+ (TransitionBlock::GetOffsetOfArgumentRegisters() + ARGUMENTREGISTERS_SIZE - (pos + 1) * sizeof(TADDR)) :
+ (TransitionBlock::GetOffsetOfArgs() + (pos - NUM_ARGUMENT_REGISTERS) * sizeof(TADDR));
+#else
+ ofs = TransitionBlock::GetOffsetOfArgumentRegisters() + pos * sizeof(TADDR);
+#endif
+
+ if (token != 0)
+ {
+ _ASSERTE(*(CORCOMPILE_GCREFMAP_TOKENS *)(pFrame + ofs) == token);
+ nTokensDecoded++;
+ }
+ }
+
+ // Verify that all tokens got decoded.
+ _ASSERTE(nTokensWritten == nTokensDecoded);
+#endif // _DEBUG
+}
+
+void CEECompileInfo::CompressDebugInfo(
+ IN ICorDebugInfo::OffsetMapping * pOffsetMapping,
+ IN ULONG iOffsetMapping,
+ IN ICorDebugInfo::NativeVarInfo * pNativeVarInfo,
+ IN ULONG iNativeVarInfo,
+ IN OUT SBuffer * pDebugInfoBuffer
+ )
+{
+ STANDARD_VM_CONTRACT;
+
+ CompressDebugInfo::CompressBoundariesAndVars(pOffsetMapping, iOffsetMapping, pNativeVarInfo, iNativeVarInfo, pDebugInfoBuffer, NULL);
+}
+
+HRESULT CEECompileInfo::GetBaseJitFlags(
+ IN CORINFO_METHOD_HANDLE hMethod,
+ OUT DWORD *pFlags)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc *pMD = (MethodDesc *)hMethod;
+ *pFlags = CEEInfo::GetBaseCompileFlags(pMD);
+
+ return S_OK;
+}
+
+//=================================================================================
+
+#ifdef _DEBUG
+
+static struct
+{
+ size_t total;
+ size_t noEmbed;
+ size_t array;
+ size_t primitives;
+ size_t szarray;
+} embedStats;
+
+#endif // _DEBUG
+
+BOOL CEEPreloader::CanEmbedClassID(CORINFO_CLASS_HANDLE typeHandle)
+{
+ STANDARD_VM_CONTRACT;
+
+ TypeHandle hnd = (TypeHandle) typeHandle;
+ return m_image->CanEagerBindToTypeHandle(hnd) &&
+ !hnd.AsMethodTable()->NeedsCrossModuleGenericsStaticsInfo();
+}
+
+BOOL CEEPreloader::CanEmbedModuleID(CORINFO_MODULE_HANDLE moduleHandle)
+{
+ STANDARD_VM_CONTRACT;
+
+ return m_image->CanEagerBindToModule((Module *)moduleHandle);
+}
+
+BOOL CEEPreloader::CanEmbedModuleHandle(CORINFO_MODULE_HANDLE moduleHandle)
+{
+ STANDARD_VM_CONTRACT;
+
+ return m_image->CanEagerBindToModule((Module *)moduleHandle);
+
+}
+BOOL CEEPreloader::CanEmbedClassHandle(CORINFO_CLASS_HANDLE typeHandle)
+{
+ STANDARD_VM_CONTRACT;
+
+ TypeHandle hnd = (TypeHandle) typeHandle;
+
+ BOOL decision = m_image->CanEagerBindToTypeHandle(hnd);
+
+#ifdef _DEBUG
+ embedStats.total++;
+
+ if (!decision)
+ embedStats.noEmbed++;
+
+ if (hnd.IsArray())
+ {
+ embedStats.array++;
+
+ CorElementType arrType = hnd.AsArray()->GetInternalCorElementType();
+ if (arrType == ELEMENT_TYPE_SZARRAY)
+ embedStats.szarray++;
+
+ CorElementType elemType = hnd.AsArray()->GetArrayElementTypeHandle().GetInternalCorElementType();
+ if (elemType <= ELEMENT_TYPE_R8)
+ embedStats.primitives++;
+ }
+#endif // _DEBUG
+ return decision;
+}
+
+
+/*static*/ BOOL CanEmbedMethodDescViaContext(MethodDesc * pMethod, MethodDesc * pContext)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (pContext != NULL)
+ {
+ _ASSERTE(pContext->GetLoaderModule() == GetAppDomain()->ToCompilationDomain()->GetTargetModule());
+
+ // a method can always embed its own handle
+ if (pContext == pMethod)
+ {
+ return TRUE;
+ }
+
+ // Methods that are tightly bound to the same method table can
+ // always refer each other directly. This check allows methods
+ // within one speculative generic instantiations to call each
+ // other directly.
+ //
+ if ((pContext->GetMethodTable() == pMethod->GetMethodTable()) &&
+ pContext->IsTightlyBoundToMethodTable() &&
+ pMethod->IsTightlyBoundToMethodTable())
+ {
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+BOOL CEEPreloader::CanEmbedMethodHandle(CORINFO_METHOD_HANDLE methodHandle,
+ CORINFO_METHOD_HANDLE contextHandle)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc * pContext = GetMethod(contextHandle);
+ MethodDesc * pMethod = GetMethod(methodHandle);
+
+ if (CanEmbedMethodDescViaContext(pMethod, pContext))
+ return TRUE;
+
+ return m_image->CanEagerBindToMethodDesc(pMethod);
+}
+
+BOOL CEEPreloader::CanEmbedFieldHandle(CORINFO_FIELD_HANDLE fieldHandle)
+{
+ STANDARD_VM_CONTRACT;
+
+ return m_image->CanEagerBindToFieldDesc((FieldDesc *) fieldHandle);
+
+}
+
+void* CEECompileInfo::GetStubSize(void *pStubAddress, DWORD *pSizeToCopy)
+{
+ CONTRACT(void*)
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(pStubAddress && pSizeToCopy);
+ }
+ CONTRACT_END;
+
+ Stub *stub = Stub::RecoverStubAndSize((TADDR)pStubAddress, pSizeToCopy);
+ _ASSERTE(*pSizeToCopy > sizeof(Stub));
+ RETURN stub;
+}
+
+HRESULT CEECompileInfo::GetStubClone(void *pStub, BYTE *pBuffer, DWORD dwBufferSize)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (pStub == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ return (reinterpret_cast<Stub *>(pStub)->CloneStub(pBuffer, dwBufferSize));
+}
+
+HRESULT CEECompileInfo::GetTypeDef(CORINFO_CLASS_HANDLE classHandle,
+ mdTypeDef *token)
+{
+ STANDARD_VM_CONTRACT;
+
+ CANNOTTHROWCOMPLUSEXCEPTION();
+
+ TypeHandle hClass(classHandle);
+
+ *token = hClass.GetCl();
+
+ return S_OK;
+}
+
+HRESULT CEECompileInfo::GetMethodDef(CORINFO_METHOD_HANDLE methodHandle,
+ mdMethodDef *token)
+{
+ STANDARD_VM_CONTRACT;
+
+ CANNOTTHROWCOMPLUSEXCEPTION();
+
+ *token = ((MethodDesc*)methodHandle)->GetMemberDef();
+
+ return S_OK;
+}
+
+/*********************************************************************/
+// Used to determine if a methodHandle can be embedded in an ngen image.
+// Depends on what things are persisted by CEEPreloader
+
+BOOL CEEPreloader::CanEmbedFunctionEntryPoint(
+ CORINFO_METHOD_HANDLE methodHandle,
+ CORINFO_METHOD_HANDLE contextHandle, /* = NULL */
+ CORINFO_ACCESS_FLAGS accessFlags /*=CORINFO_ACCESS_ANY*/)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc * pMethod = GetMethod(methodHandle);
+ MethodDesc * pContext = GetMethod(contextHandle);
+
+ // IsRemotingInterceptedViaVirtualDispatch is a rather special case.
+ //
+ // Other remoting intercepts are implemented by one of:
+ // (1) in DoPrestub (for non-virtual calls)
+ // (2) by transparent proxy vtables, where all the entries in the vtable
+ // go to the same code.
+ //
+ // However when calling virtual functions non-virtually the JIT interface
+ // pointer to the code for the function in a stub
+ // (see GetNonVirtualEntryPointForVirtualMethod).
+ // Thus we cannot embed non-virtual calls to these functions because we
+ // don't save these stubs. Unlike most other remoting stubs these ones
+ // are NOT inserted by DoPrestub.
+ //
+ if (((accessFlags & CORINFO_ACCESS_THIS) == 0) &&
+ (pMethod->IsRemotingInterceptedViaVirtualDispatch()) )
+ {
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+BOOL CEEPreloader::DoesMethodNeedRestoringBeforePrestubIsRun(
+ CORINFO_METHOD_HANDLE methodHandle)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc * ftn = GetMethod(methodHandle);
+
+ // The restore mechanism for InstantiatedMethodDescs (IMDs) is complicated, and causes
+ // circular dependency complications with the GC if we hardbind to the prestub/precode
+ // of an unrestored IMD. As such, we're eliminating hardbinding to unrestored MethodDescs
+ // that belong to generic types.
+
+ //@TODO: The reduction may be overkill, and we may consider refining the cases.
+
+ // Specifically, InstantiatedMethodDescs can have preferred zap modules different than
+ // the zap modules for their owning types. As such, in a soft-binding case a MethodDesc
+ // may not be able to trace back to its owning Module without hitting an unrestored
+ // fixup token. For example, the 64-bit JIT can not yet provide generic type arguments
+ // and uses instantiating stubs to call static methods on generic types. If such an stub
+ // belong to a module other than the module in which the generic type is declared, then
+ // it is possible for the MethodTable::m_pEEClass or the EEClass::m_pModule pointers to
+ // be unrestored. The complication arises when a call to the prestub/precode of such
+ // an unrestored IMD causes us try to restore the IMD and this in turn causes us to
+ // transition to preemptive GC and as such GC needs the metadata signature from the IMD
+ // to iterate its arguments. But since we're currently restoring the IMD, we may not be
+ // able to get to the signature, and as such we're stuck.
+
+ // The same problem exists for instantiation arguments. We may need the instantiation
+ // arguments while walking the signature during GC, and if they are not restored we're stuck.
+
+ if (ftn->HasClassOrMethodInstantiation())
+ {
+ if (ftn->NeedsRestore(m_image))
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+BOOL CEEPreloader::CanSkipDependencyActivation(CORINFO_METHOD_HANDLE context,
+ CORINFO_MODULE_HANDLE moduleFrom,
+ CORINFO_MODULE_HANDLE moduleTo)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Can't skip any fixups for speculative generic instantiations
+ if (Module::GetPreferredZapModuleForMethodDesc(GetMethod(context)) != m_image->GetModule())
+ return FALSE;
+
+ // We don't need a fixup for eager bound dependencies since we are going to have
+ // an uncontional one already.
+ return m_image->CanEagerBindToModule((Module *)moduleTo);
+}
+
+CORINFO_MODULE_HANDLE CEEPreloader::GetPreferredZapModuleForClassHandle(
+ CORINFO_CLASS_HANDLE classHnd)
+{
+ STANDARD_VM_CONTRACT;
+
+ return CORINFO_MODULE_HANDLE(Module::GetPreferredZapModuleForTypeHandle(TypeHandle(classHnd)));
+}
+
+// This method is called directly from zapper
+extern BOOL CanDeduplicateCode(CORINFO_METHOD_HANDLE method, CORINFO_METHOD_HANDLE duplicateMethod);
+
+BOOL CanDeduplicateCode(CORINFO_METHOD_HANDLE method, CORINFO_METHOD_HANDLE duplicateMethod)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+ // For now, the deduplication is supported for IL stubs only
+ DynamicMethodDesc * pMethod = GetMethod(method)->AsDynamicMethodDesc();
+ DynamicMethodDesc * pDuplicateMethod = GetMethod(duplicateMethod)->AsDynamicMethodDesc();
+
+ //
+ // Make sure that the return types match (for code:Thread::HijackThread)
+ //
+
+#ifdef _TARGET_X86_
+ MetaSig msig1(pMethod);
+ MetaSig msig2(pDuplicateMethod);
+ if (!msig1.HasFPReturn() != !msig2.HasFPReturn())
+ return FALSE;
+#endif // _TARGET_X86_
+
+ if (pMethod->ReturnsObject() != pDuplicateMethod->ReturnsObject())
+ return FALSE;
+
+ //
+ // Make sure that the IL stub flags match
+ //
+
+ if (pMethod->GetExtendedFlags() != pDuplicateMethod->GetExtendedFlags())
+ return FALSE;
+
+ return TRUE;
+}
+
+void CEEPreloader::NoteDeduplicatedCode(CORINFO_METHOD_HANDLE method, CORINFO_METHOD_HANDLE duplicateMethod)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifndef FEATURE_FULL_NGEN // Deduplication
+ DuplicateMethodEntry e;
+ e.pMD = GetMethod(method);
+ e.pDuplicateMD = GetMethod(duplicateMethod);
+ m_duplicateMethodsHash.Add(e);
+#endif
+}
+
+HRESULT CEECompileInfo::GetFieldDef(CORINFO_FIELD_HANDLE fieldHandle,
+ mdFieldDef *token)
+{
+ STANDARD_VM_CONTRACT;
+
+ CANNOTTHROWCOMPLUSEXCEPTION();
+
+ *token = ((FieldDesc*)fieldHandle)->GetMemberDef();
+
+ return S_OK;
+}
+
+void CEECompileInfo::EncodeModuleAsIndexes(CORINFO_MODULE_HANDLE fromHandle,
+ CORINFO_MODULE_HANDLE handle,
+ DWORD* pAssemblyIndex,
+ DWORD* pModuleIndex,
+ IMetaDataAssemblyEmit* pAssemblyEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+ Module *fromModule = GetModule(fromHandle);
+ Assembly *fromAssembly = fromModule->GetAssembly();
+
+ Module *module = GetModule(handle);
+ Assembly *assembly = module->GetAssembly();
+
+ if (assembly == fromAssembly)
+ *pAssemblyIndex = 0;
+ else
+ {
+ UPTR result;
+ mdToken token;
+
+ CompilationDomain *pDomain = GetAppDomain()->ToCompilationDomain();
+
+ RefCache *pRefCache = pDomain->GetRefCache(fromModule);
+ if (!pRefCache)
+ ThrowOutOfMemory();
+
+
+ if (!assembly->GetManifestFile()->HasBindableIdentity())
+ {
+ // If the module that we'd like to encode for a later fixup doesn't have
+ // a bindable identity, then this will fail at runtime. So, we ask the
+ // compilation domain for a matching assembly with a bindable identity.
+ // This is possible because this module must have been bound in the past,
+ // and the compilation domain will keep track of at least one corresponding
+ // bindable identity.
+ AssemblySpec defSpec;
+ defSpec.InitializeSpec(assembly->GetManifestFile());
+
+ AssemblySpec* pRefSpec = pDomain->FindAssemblyRefSpecForDefSpec(&defSpec);
+ _ASSERTE(pRefSpec != nullptr);
+
+ IfFailThrow(pRefSpec->EmitToken(pAssemblyEmit, &token, TRUE, TRUE));
+ token += fromModule->GetAssemblyRefMax();
+ }
+ else
+ {
+ result = pRefCache->m_sAssemblyRefMap.LookupValue((UPTR)assembly, NULL);
+
+ if (result == (UPTR)INVALIDENTRY)
+ token = fromModule->FindAssemblyRef(assembly);
+ else
+ token = (mdAssemblyRef) result;
+
+ if (IsNilToken(token))
+ {
+ token = fromAssembly->AddAssemblyRef(assembly, pAssemblyEmit);
+ token += fromModule->GetAssemblyRefMax();
+ }
+ }
+
+ *pAssemblyIndex = RidFromToken(token);
+
+ pRefCache->m_sAssemblyRefMap.InsertValue((UPTR) assembly, (UPTR)token);
+ }
+
+ if (module == assembly->GetManifestModule())
+ *pModuleIndex = 0;
+ else
+ {
+ _ASSERTE(module->GetModuleRef() != mdFileNil);
+ *pModuleIndex = RidFromToken(module->GetModuleRef());
+ }
+
+ COOPERATIVE_TRANSITION_END();
+}
+
+void CEECompileInfo::EncodeClass(
+ CORINFO_MODULE_HANDLE referencingModule,
+ CORINFO_CLASS_HANDLE classHandle,
+ SigBuilder * pSigBuilder,
+ LPVOID pEncodeModuleContext,
+ ENCODEMODULE_CALLBACK pfnEncodeModule)
+{
+ STANDARD_VM_CONTRACT;
+
+ TypeHandle th(classHandle);
+
+ ZapSig zapSig((Module *)referencingModule, pEncodeModuleContext, ZapSig::NormalTokens,
+ (EncodeModuleCallback) pfnEncodeModule, NULL);
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+ BOOL fSuccess;
+ fSuccess = zapSig.GetSignatureForTypeHandle(th, pSigBuilder);
+ _ASSERTE(fSuccess);
+
+ COOPERATIVE_TRANSITION_END();
+}
+
+CORINFO_MODULE_HANDLE CEECompileInfo::GetLoaderModuleForMscorlib()
+{
+ STANDARD_VM_CONTRACT;
+
+ return CORINFO_MODULE_HANDLE(SystemDomain::SystemModule());
+}
+
+CORINFO_MODULE_HANDLE CEECompileInfo::GetLoaderModuleForEmbeddableType(CORINFO_CLASS_HANDLE clsHnd)
+{
+ STANDARD_VM_CONTRACT;
+
+ TypeHandle t = TypeHandle(clsHnd);
+ return CORINFO_MODULE_HANDLE(t.GetLoaderModule());
+}
+
+CORINFO_MODULE_HANDLE CEECompileInfo::GetLoaderModuleForEmbeddableMethod(CORINFO_METHOD_HANDLE methHnd)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc *pMD = GetMethod(methHnd);
+ return CORINFO_MODULE_HANDLE(pMD->GetLoaderModule());
+}
+
+CORINFO_MODULE_HANDLE CEECompileInfo::GetLoaderModuleForEmbeddableField(CORINFO_FIELD_HANDLE fieldHnd)
+{
+ STANDARD_VM_CONTRACT;
+
+ FieldDesc *pFD = (FieldDesc *) fieldHnd;
+ return CORINFO_MODULE_HANDLE(pFD->GetLoaderModule());
+}
+
+void CEECompileInfo::EncodeMethod(
+ CORINFO_MODULE_HANDLE referencingModule,
+ CORINFO_METHOD_HANDLE handle,
+ SigBuilder * pSigBuilder,
+ LPVOID pEncodeModuleContext,
+ ENCODEMODULE_CALLBACK pfnEncodeModule,
+ CORINFO_RESOLVED_TOKEN * pResolvedToken,
+ CORINFO_RESOLVED_TOKEN * pConstrainedResolvedToken)
+{
+ STANDARD_VM_CONTRACT;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+ MethodDesc *pMethod = GetMethod(handle);
+
+ BOOL fSuccess;
+ fSuccess = ZapSig::EncodeMethod(pMethod,
+ (Module *) referencingModule,
+ pSigBuilder,
+ pEncodeModuleContext,
+ pfnEncodeModule, NULL,
+ pResolvedToken, pConstrainedResolvedToken);
+ _ASSERTE(fSuccess);
+
+ COOPERATIVE_TRANSITION_END();
+}
+
+mdToken CEECompileInfo::TryEncodeMethodAsToken(
+ CORINFO_METHOD_HANDLE handle,
+ CORINFO_RESOLVED_TOKEN * pResolvedToken,
+ CORINFO_MODULE_HANDLE * referencingModule)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc * pMethod = GetMethod(handle);
+
+#ifdef FEATURE_READYTORUN_COMPILER
+ if (IsReadyToRunCompilation())
+ {
+ _ASSERTE(pResolvedToken != NULL);
+
+ Module * pReferencingModule = (Module *)pResolvedToken->tokenScope;
+
+ if (!pReferencingModule->IsInCurrentVersionBubble())
+ return mdTokenNil;
+
+ unsigned methodToken = pResolvedToken->token;
+
+ switch (TypeFromToken(methodToken))
+ {
+ case mdtMethodDef:
+ if (pReferencingModule->LookupMethodDef(methodToken) != pMethod)
+ return mdTokenNil;
+ break;
+
+ case mdtMemberRef:
+ if (pReferencingModule->LookupMemberRefAsMethod(methodToken) != pMethod)
+ return mdTokenNil;
+ break;
+
+ default:
+ return mdTokenNil;
+ }
+
+ *referencingModule = CORINFO_MODULE_HANDLE(pReferencingModule);
+ return methodToken;
+ }
+#endif // FEATURE_READYTORUN_COMPILER
+
+ Module *pModule = pMethod->GetModule();
+ if (!pModule->IsInCurrentVersionBubble())
+ {
+ Module * pTargetModule = GetAppDomain()->ToCompilationDomain()->GetTargetModule();
+ *referencingModule = CORINFO_MODULE_HANDLE(pTargetModule);
+ return pTargetModule->LookupMemberRefByMethodDesc(pMethod);
+ }
+ else
+ {
+ mdToken defToken = pMethod->GetMemberDef();
+ if (pModule->LookupMethodDef(defToken) == pMethod)
+ {
+ *referencingModule = CORINFO_MODULE_HANDLE(pModule);
+ return defToken;
+ }
+ }
+
+ return mdTokenNil;
+}
+
+DWORD CEECompileInfo::TryEncodeMethodSlot(CORINFO_METHOD_HANDLE handle)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc * pMethod = GetMethod(handle);
+
+#ifdef FEATURE_READYTORUN_COMPILER
+ if (IsReadyToRunCompilation())
+ {
+ // We can only encode real interface methods as slots
+ if (!pMethod->IsInterface() || pMethod->IsStatic())
+ return (DWORD)-1;
+
+ // And only if the interface lives in the current version bubble
+ // If may be possible to relax this restriction if we can guarantee that the external interfaces are
+ // really not changing. We will play it safe for now.
+ if (!pMethod->GetModule()->IsInCurrentVersionBubble())
+ return (DWORD)-1;
+ }
+#endif
+
+ return pMethod->GetSlot();
+}
+
+void EncodeTypeInDictionarySignature(
+ Module * pInfoModule,
+ SigPointer ptr,
+ SigBuilder * pSigBuilder,
+ LPVOID encodeContext,
+ ENCODEMODULE_CALLBACK pfnEncodeModule)
+{
+ STANDARD_VM_CONTRACT;
+
+ CorElementType typ = ELEMENT_TYPE_END;
+ IfFailThrow(ptr.GetElemType(&typ));
+
+ if (typ == ELEMENT_TYPE_INTERNAL)
+ {
+ TypeHandle th;
+
+ IfFailThrow(ptr.GetPointer((void**)&th));
+
+ ZapSig zapSig(pInfoModule, encodeContext, ZapSig::NormalTokens,
+ (EncodeModuleCallback) pfnEncodeModule, NULL);
+
+ //
+ // Write class
+ //
+ BOOL fSuccess;
+ fSuccess = zapSig.GetSignatureForTypeHandle(th, pSigBuilder);
+ _ASSERTE(fSuccess);
+
+ return;
+ }
+ else
+ if (typ == ELEMENT_TYPE_GENERICINST)
+ {
+ //
+ // SigParser expects ELEMENT_TYPE_MODULE_ZAPSIG to be before ELEMENT_TYPE_GENERICINST
+ //
+ SigPointer peek(ptr);
+ ULONG instType = 0;
+ IfFailThrow(peek.GetData(&instType));
+ _ASSERTE(instType == ELEMENT_TYPE_INTERNAL);
+
+ TypeHandle th;
+ IfFailThrow(peek.GetPointer((void **)&th));
+
+ Module * pTypeHandleModule = th.GetModule();
+
+ if (!pTypeHandleModule->IsInCurrentVersionBubble())
+ {
+ pTypeHandleModule = GetAppDomain()->ToCompilationDomain()->GetTargetModule();
+ }
+
+ if (pTypeHandleModule != pInfoModule)
+ {
+ DWORD index = pfnEncodeModule(encodeContext, (CORINFO_MODULE_HANDLE)pTypeHandleModule);
+ _ASSERTE(index != ENCODE_MODULE_FAILED);
+
+ pSigBuilder->AppendElementType((CorElementType) ELEMENT_TYPE_MODULE_ZAPSIG);
+ pSigBuilder->AppendData(index);
+ }
+
+ pSigBuilder->AppendElementType(ELEMENT_TYPE_GENERICINST);
+
+ EncodeTypeInDictionarySignature(pTypeHandleModule, ptr, pSigBuilder, encodeContext, pfnEncodeModule);
+ IfFailThrow(ptr.SkipExactlyOne());
+
+ ULONG argCnt = 0; // Get number of parameters
+ IfFailThrow(ptr.GetData(&argCnt));
+ pSigBuilder->AppendData(argCnt);
+
+ while (argCnt--)
+ {
+ EncodeTypeInDictionarySignature(pInfoModule, ptr, pSigBuilder, encodeContext, pfnEncodeModule);
+ IfFailThrow(ptr.SkipExactlyOne());
+ }
+
+ return;
+ }
+
+ pSigBuilder->AppendElementType(typ);
+
+ if (!CorIsPrimitiveType(typ))
+ {
+ switch (typ)
+ {
+ case ELEMENT_TYPE_VAR:
+ case ELEMENT_TYPE_MVAR:
+ {
+ ULONG varNum;
+ // Skip variable number
+ IfFailThrow(ptr.GetData(&varNum));
+ pSigBuilder->AppendData(varNum);
+ }
+ break;
+ case ELEMENT_TYPE_OBJECT:
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_TYPEDBYREF:
+ break;
+
+ case ELEMENT_TYPE_BYREF: //fallthru
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_PINNED:
+ case ELEMENT_TYPE_SZARRAY:
+ EncodeTypeInDictionarySignature(pInfoModule, ptr, pSigBuilder, encodeContext, pfnEncodeModule);
+ IfFailThrow(ptr.SkipExactlyOne());
+ break;
+
+ case ELEMENT_TYPE_ARRAY:
+ {
+ EncodeTypeInDictionarySignature(pInfoModule, ptr, pSigBuilder, encodeContext, pfnEncodeModule);
+ IfFailThrow(ptr.SkipExactlyOne());
+
+ ULONG rank = 0; // Get rank
+ IfFailThrow(ptr.GetData(&rank));
+ pSigBuilder->AppendData(rank);
+
+ if (rank)
+ {
+ ULONG nsizes = 0;
+ IfFailThrow(ptr.GetData(&nsizes));
+ pSigBuilder->AppendData(nsizes);
+
+ while (nsizes--)
+ {
+ ULONG data = 0;
+ IfFailThrow(ptr.GetData(&data));
+ pSigBuilder->AppendData(data);
+ }
+
+ ULONG nlbounds = 0;
+ IfFailThrow(ptr.GetData(&nlbounds));
+ pSigBuilder->AppendData(nlbounds);
+
+ while (nlbounds--)
+ {
+ ULONG data = 0;
+ IfFailThrow(ptr.GetData(&data));
+ pSigBuilder->AppendData(data);
+ }
+ }
+ }
+ break;
+
+ default:
+ _ASSERTE(!"Unexpected element in signature");
+ }
+ }
+}
+
+void CEECompileInfo::EncodeGenericSignature(
+ LPVOID signature,
+ BOOL fMethod,
+ SigBuilder * pSigBuilder,
+ LPVOID encodeContext,
+ ENCODEMODULE_CALLBACK pfnEncodeModule)
+{
+ STANDARD_VM_CONTRACT;
+
+ Module * pInfoModule = MscorlibBinder::GetModule();
+
+ SigPointer ptr((PCCOR_SIGNATURE)signature);
+
+ ULONG entryKind; // DictionaryEntryKind
+ IfFailThrow(ptr.GetData(&entryKind));
+ pSigBuilder->AppendData(entryKind);
+
+ if (!fMethod)
+ {
+ ULONG dictionaryIndex = 0;
+ IfFailThrow(ptr.GetData(&dictionaryIndex));
+
+ pSigBuilder->AppendData(dictionaryIndex);
+ }
+
+ switch (entryKind)
+ {
+ case DeclaringTypeHandleSlot:
+ EncodeTypeInDictionarySignature(pInfoModule, ptr, pSigBuilder, encodeContext, pfnEncodeModule);
+ IfFailThrow(ptr.SkipExactlyOne());
+ // fall through
+
+ case TypeHandleSlot:
+ EncodeTypeInDictionarySignature(pInfoModule, ptr, pSigBuilder, encodeContext, pfnEncodeModule);
+ IfFailThrow(ptr.SkipExactlyOne());
+ break;
+
+ case ConstrainedMethodEntrySlot:
+ EncodeTypeInDictionarySignature(pInfoModule, ptr, pSigBuilder, encodeContext, pfnEncodeModule);
+ IfFailThrow(ptr.SkipExactlyOne());
+ // fall through
+
+ case MethodDescSlot:
+ case MethodEntrySlot:
+ case DispatchStubAddrSlot:
+ {
+ EncodeTypeInDictionarySignature(pInfoModule, ptr, pSigBuilder, encodeContext, pfnEncodeModule);
+ IfFailThrow(ptr.SkipExactlyOne());
+
+ ULONG methodFlags;
+ IfFailThrow(ptr.GetData(&methodFlags));
+ pSigBuilder->AppendData(methodFlags);
+
+ if ((methodFlags & ENCODE_METHOD_SIG_SlotInsteadOfToken) == 0)
+ {
+ EncodeTypeInDictionarySignature(pInfoModule, ptr, pSigBuilder, encodeContext, pfnEncodeModule);
+ IfFailThrow(ptr.SkipExactlyOne());
+ }
+
+ ULONG tokenOrSlot;
+ IfFailThrow(ptr.GetData(&tokenOrSlot));
+ pSigBuilder->AppendData(tokenOrSlot);
+
+ if (methodFlags & ENCODE_METHOD_SIG_MethodInstantiation)
+ {
+ DWORD nGenericMethodArgs;
+ IfFailThrow(ptr.GetData(&nGenericMethodArgs));
+ pSigBuilder->AppendData(nGenericMethodArgs);
+
+ for (DWORD i = 0; i < nGenericMethodArgs; i++)
+ {
+ EncodeTypeInDictionarySignature(pInfoModule, ptr, pSigBuilder, encodeContext, pfnEncodeModule);
+ IfFailThrow(ptr.SkipExactlyOne());
+ }
+ }
+ }
+ break;
+
+ case FieldDescSlot:
+ {
+ EncodeTypeInDictionarySignature(pInfoModule, ptr, pSigBuilder, encodeContext, pfnEncodeModule);
+ IfFailThrow(ptr.SkipExactlyOne());
+
+ DWORD fieldIndex;
+ IfFailThrow(ptr.GetData(&fieldIndex));
+ pSigBuilder->AppendData(fieldIndex);
+ }
+ break;
+
+ default:
+ _ASSERTE(false);
+ }
+
+ ULONG dictionarySlot;
+ IfFailThrow(ptr.GetData(&dictionarySlot));
+ pSigBuilder->AppendData(dictionarySlot);
+}
+
+void CEECompileInfo::EncodeField(
+ CORINFO_MODULE_HANDLE referencingModule,
+ CORINFO_FIELD_HANDLE handle,
+ SigBuilder * pSigBuilder,
+ LPVOID encodeContext,
+ ENCODEMODULE_CALLBACK pfnEncodeModule,
+ CORINFO_RESOLVED_TOKEN * pResolvedToken)
+{
+ STANDARD_VM_CONTRACT;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+ ZapSig::EncodeField(GetField(handle),
+ (Module *) referencingModule,
+ pSigBuilder,
+ encodeContext,
+ pfnEncodeModule,
+ pResolvedToken);
+
+ COOPERATIVE_TRANSITION_END();
+}
+
+BOOL CEECompileInfo::IsEmptyString(mdString token,
+ CORINFO_MODULE_HANDLE module)
+{
+ STANDARD_VM_CONTRACT;
+
+ BOOL fRet = FALSE;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+ EEStringData strData;
+ ((Module *)module)->InitializeStringData(token, &strData, NULL);
+ fRet = (strData.GetCharCount() == 0);
+
+ COOPERATIVE_TRANSITION_END();
+
+ return fRet;
+}
+
+#ifdef FEATURE_READYTORUN_COMPILER
+CORCOMPILE_FIXUP_BLOB_KIND CEECompileInfo::GetFieldBaseOffset(
+ CORINFO_CLASS_HANDLE classHnd,
+ DWORD * pBaseOffset)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodTable * pMT = (MethodTable *)classHnd;
+ Module * pModule = pMT->GetModule();
+
+ if (!pMT->IsLayoutFixedInCurrentVersionBubble())
+ {
+ return pMT->IsValueType() ? ENCODE_CHECK_FIELD_OFFSET : ENCODE_FIELD_OFFSET;
+ }
+
+ if (pMT->IsValueType())
+ {
+ return (CORCOMPILE_FIXUP_BLOB_KIND)0;
+ }
+
+ if (pMT->GetParentMethodTable()->IsInheritanceChainLayoutFixedInCurrentVersionBubble())
+ {
+ return (CORCOMPILE_FIXUP_BLOB_KIND)0;
+ }
+
+ if (pMT->HasLayout())
+ {
+ // We won't try to be smart for classes with layout.
+ // They are complex to get right, and very rare anyway.
+ return ENCODE_FIELD_OFFSET;
+ }
+
+ *pBaseOffset = ReadyToRunInfo::GetFieldBaseOffset(pMT);
+ return ENCODE_FIELD_BASE_OFFSET;
+}
+
+BOOL CEECompileInfo::NeedsTypeLayoutCheck(CORINFO_CLASS_HANDLE classHnd)
+{
+ STANDARD_VM_CONTRACT;
+
+ TypeHandle th(classHnd);
+
+ if (th.IsTypeDesc())
+ return FALSE;
+
+ MethodTable * pMT = th.AsMethodTable();
+
+ if (!pMT->IsValueType())
+ return FALSE;
+
+ return !pMT->IsLayoutFixedInCurrentVersionBubble();
+}
+
+extern void ComputeGCRefMap(MethodTable * pMT, BYTE * pGCRefMap, size_t cbGCRefMap);
+
+void CEECompileInfo::EncodeTypeLayout(CORINFO_CLASS_HANDLE classHandle, SigBuilder * pSigBuilder)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodTable * pMT = TypeHandle(classHandle).AsMethodTable();
+ _ASSERTE(pMT->IsValueType());
+
+ DWORD dwSize = pMT->GetNumInstanceFieldBytes();
+ DWORD dwAlignment = CEEInfo::getClassAlignmentRequirementStatic(pMT);
+
+ DWORD dwFlags = 0;
+
+#ifdef FEATURE_HFA
+ if (pMT->IsHFA())
+ dwFlags |= READYTORUN_LAYOUT_HFA;
+#endif
+
+ // Check everything
+ dwFlags |= READYTORUN_LAYOUT_Alignment;
+ if (dwAlignment == sizeof(void *))
+ dwFlags |= READYTORUN_LAYOUT_Alignment_Native;
+
+ dwFlags |= READYTORUN_LAYOUT_GCLayout;
+ if (!pMT->ContainsPointers())
+ dwFlags |= READYTORUN_LAYOUT_GCLayout_Empty;
+
+ pSigBuilder->AppendData(dwFlags);
+
+ // Size is checked unconditionally
+ pSigBuilder->AppendData(dwSize);
+
+#ifdef FEATURE_HFA
+ if (dwFlags & READYTORUN_LAYOUT_HFA)
+ {
+ pSigBuilder->AppendData(pMT->GetHFAType());
+ }
+#endif
+
+ if ((dwFlags & dwFlags & READYTORUN_LAYOUT_Alignment) && !(dwFlags & READYTORUN_LAYOUT_Alignment_Native))
+ {
+ pSigBuilder->AppendData(dwAlignment);
+ }
+
+ if ((dwFlags & READYTORUN_LAYOUT_GCLayout) && !(dwFlags & READYTORUN_LAYOUT_GCLayout_Empty))
+ {
+ size_t cbGCRefMap = (dwSize / sizeof(TADDR) + 7) / 8;
+ _ASSERTE(cbGCRefMap > 0);
+
+ BYTE * pGCRefMap = (BYTE *)_alloca(cbGCRefMap);
+
+ ComputeGCRefMap(pMT, pGCRefMap, cbGCRefMap);
+
+ for (size_t i = 0; i < cbGCRefMap; i++)
+ pSigBuilder->AppendByte(pGCRefMap[i]);
+ }
+}
+
+BOOL CEECompileInfo::AreAllClassesFullyLoaded(CORINFO_MODULE_HANDLE moduleHandle)
+{
+ STANDARD_VM_CONTRACT;
+
+ return ((Module *)moduleHandle)->AreAllClassesFullyLoaded();
+}
+
+#endif // FEATURE_READYTORUN_COMPILER
+
+
+#define OMFConst_Read 0x0001
+#define OMFConst_Write 0x0002
+#define OMFConst_Exec 0x0004
+#define OMFConst_F32Bit 0x0008
+#define OMFConst_ReservedBits1 0x00f0
+#define OMFConst_FSel 0x0100
+#define OMFConst_FAbs 0x0200
+#define OMFConst_ReservedBits2 0x0C00
+#define OMFConst_FGroup 0x1000
+#define OMFConst_ReservedBits3 0xE000
+
+#define OMF_StandardText (OMFConst_FSel|OMFConst_F32Bit|OMFConst_Exec|OMFConst_Read) // 0x10D
+#define OMF_SentinelType (OMFConst_FAbs|OMFConst_F32Bit) // 0x208
+
+
+// ----------------------------------------------------------------------------
+// NGEN PDB SUPPORT
+//
+// The NGEN PDB format consists of structs stacked together into buffers, which are
+// passed to the PDB API. For a description of the structures, see
+// InternalApis\vctools\inc\cvinfo.h.
+//
+// The interface to the PDB used below is NGEN-specific, and is exposed via
+// diasymreader.dll. For a description of this interface, see ISymNGenWriter2 inside
+// public\devdiv\inc\corsym.h and debugger\sh\symwrtr\ngenpdbwriter.h,cpp
+// ----------------------------------------------------------------------------
+
+
+BOOL CEECompileInfo::GetIsGeneratingNgenPDB()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_fGeneratingNgenPDB;
+}
+
+void CEECompileInfo::SetIsGeneratingNgenPDB(BOOL fGeneratingNgenPDB)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ m_fGeneratingNgenPDB = fGeneratingNgenPDB;
+}
+
+BOOL IsNgenPDBCompilationProcess()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return IsCompilationProcess() && g_pCEECompileInfo->GetIsGeneratingNgenPDB();
+}
+
+// This is the prototype of "CreateNGenPdbWriter" exported by diasymreader.dll
+typedef HRESULT (__stdcall *CreateNGenPdbWriter_t)(const WCHAR *pwszNGenImagePath, const WCHAR *pwszPdbPath, void **ppvObj);
+
+// Allocator to specify when requesting boundaries information for PDB
+BYTE* SimpleNew(void *, size_t cBytes)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ BYTE * p = new BYTE[cBytes];
+ return p;
+}
+
+// PDB convention has any IPs that don't map to source code (e.g., prolog, epilog, etc.)
+// to be mapped to line number "0xFeeFee".
+const int kUnmappedIP = 0xFeeFee;
+
+
+// ----------------------------------------------------------------------------
+// Simple pair of offsets for each source file name. Pair includes its offset into the
+// PDB string table, and its offset in the files checksum table.
+//
+struct DocNameOffsets
+{
+ ULONG32 m_dwStrTableOffset;
+ ULONG32 m_dwChksumTableOffset;
+ DocNameOffsets(ULONG32 dwStrTableOffset, ULONG32 dwChksumTableOffset)
+ : m_dwStrTableOffset(dwStrTableOffset), m_dwChksumTableOffset(dwChksumTableOffset)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ DocNameOffsets()
+ : m_dwStrTableOffset((ULONG32) -1), m_dwChksumTableOffset((ULONG32) -1)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+};
+
+
+// ----------------------------------------------------------------------------
+// This is used when creating the hash table which maps source file names to
+// DocNameOffsets instances. The only interesting stuff here is that:
+// * Equality is determined by a case-insensitive comparison on the source file
+// names
+// * Hashing is done by hashing the source file names
+//
+struct DocNameToOffsetMapTraits : public NoRemoveSHashTraits < MapSHashTraits<LPCSTR, DocNameOffsets> >
+{
+public:
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (k1 == NULL && k2 == NULL)
+ return TRUE;
+ if (k1 == NULL || k2 == NULL)
+ return FALSE;
+ return _stricmp(k1, k2) == 0;
+ }
+
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (k == NULL)
+ return 0;
+ else
+ return HashiStringA(k);
+ }
+
+ typedef LPCSTR KEY;
+ typedef DocNameOffsets VALUE;
+ typedef NoRemoveSHashTraits < MapSHashTraits<LPCSTR, DocNameOffsets> > PARENT;
+ typedef PARENT::element_t element_t;
+ static const element_t Null() { LIMITED_METHOD_CONTRACT; return element_t((KEY)0,VALUE((ULONG32) -1, (ULONG32) -1)); }
+ static bool IsNull(const element_t &e) { LIMITED_METHOD_CONTRACT; return e.Key() == (KEY)0; }
+};
+
+
+// ----------------------------------------------------------------------------
+// Hash table that maps the UTF-8 string of a source file name to its corresponding
+// DocNameToOffsetMapTraits
+//
+class DocNameToOffsetMap : public SHash<DocNameToOffsetMapTraits>
+{
+ typedef SHash<DocNameToOffsetMapTraits> PARENT;
+ typedef LPCSTR KEY;
+ typedef DocNameOffsets VALUE;
+
+public:
+ void Add(KEY key, VALUE value)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(key != (KEY)0);
+ }
+ CONTRACTL_END;
+
+ PARENT::Add(KeyValuePair<KEY,VALUE>(key, value));
+ }
+
+ void AddOrReplace(KEY key, VALUE value)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(key != (KEY)0);
+ }
+ CONTRACTL_END;
+
+ PARENT::AddOrReplace(KeyValuePair<KEY,VALUE>(key, value));
+ }
+
+ BOOL Lookup(KEY key, VALUE* pValue)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(key != (KEY)0);
+ }
+ CONTRACTL_END;
+
+ const KeyValuePair<KEY,VALUE> *pRet = PARENT::LookupPtr(key);
+ if (pRet == NULL)
+ return FALSE;
+
+ *pValue = pRet->Value();
+ return TRUE;
+ }
+};
+
+// ----------------------------------------------------------------------------
+// Simple class to sort ICorDebugInfo::OffsetMapping arrays by IL offset
+//
+class QuickSortILNativeMapByIL : public CQuickSort<ICorDebugInfo::OffsetMapping>
+{
+ public:
+ QuickSortILNativeMapByIL(
+ ICorDebugInfo::OffsetMapping * rgMap,
+ int cEntries)
+ : CQuickSort<ICorDebugInfo::OffsetMapping>(rgMap, cEntries)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ int Compare(ICorDebugInfo::OffsetMapping * pFirst,
+ ICorDebugInfo::OffsetMapping * pSecond)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (pFirst->ilOffset < pSecond->ilOffset)
+ return -1;
+ else if (pFirst->ilOffset == pSecond->ilOffset)
+ return 0;
+ else
+ return 1;
+ }
+};
+
+// ----------------------------------------------------------------------------
+// Simple structure used when merging the JIT manager's IL-to-native maps
+// (ICorDebugInfo::OffsetMapping) with the IL PDB's source-to-IL map.
+//
+struct MapIndexPair
+{
+public:
+ // Index into ICorDebugInfo::OffsetMapping
+ ULONG32 m_iIlNativeMap;
+
+ // Corresponding index into the IL PDB's sequence point arrays
+ ULONG32 m_iSeqPoints;
+
+ MapIndexPair() :
+ m_iIlNativeMap((ULONG32) -1),
+ m_iSeqPoints((ULONG32) -1)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+};
+
+// ----------------------------------------------------------------------------
+// Simple class to sort MapIndexPairs by native IP offset. A MapIndexPair sorts "earlier"
+// if its m_iIlNativeMap index gives you an IP offset (i.e.,
+// m_rgIlNativeMap[m_iIlNativeMap].nativeOffset) that is smaller.
+//
+class QuickSortMapIndexPairsByNativeOffset : public CQuickSort<MapIndexPair>
+{
+ public:
+ QuickSortMapIndexPairsByNativeOffset(
+ MapIndexPair * rgMap,
+ int cEntries,
+ ICorDebugInfo::OffsetMapping * rgIlNativeMap,
+ ULONG32 cIlNativeMap)
+ : CQuickSort<MapIndexPair>(rgMap, cEntries),
+ m_rgIlNativeMap(rgIlNativeMap),
+ m_cIlNativeMap(cIlNativeMap)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ int Compare(MapIndexPair * pFirst,
+ MapIndexPair * pSecond)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(pFirst->m_iIlNativeMap < m_cIlNativeMap);
+ _ASSERTE(pSecond->m_iIlNativeMap < m_cIlNativeMap);
+
+ DWORD dwFirstNativeOffset = m_rgIlNativeMap[pFirst->m_iIlNativeMap].nativeOffset;
+ DWORD dwSecondNativeOffset = m_rgIlNativeMap[pSecond->m_iIlNativeMap].nativeOffset;
+
+ if (dwFirstNativeOffset < dwSecondNativeOffset)
+ return -1;
+ else if (dwFirstNativeOffset == dwSecondNativeOffset)
+ return 0;
+ else
+ return 1;
+ }
+
+protected:
+ ICorDebugInfo::OffsetMapping * m_rgIlNativeMap;
+ ULONG32 m_cIlNativeMap;
+};
+
+// ----------------------------------------------------------------------------
+// The following 3 classes contain the code to generate PDBs
+//
+
+// NGEN always generates PDBs with public symbols lists (so tools can map IP ranges to
+// methods). This bitmask indicates what extra info should be added to the PDB
+enum PDBExtraData
+{
+ // Add string table subsection, files checksum subsection, and lines subsection to
+ // allow tools to map IP ranges to source lines.
+ kPDBLines = 0x00000001,
+};
+
+
+// ----------------------------------------------------------------------------
+// Manages generating all PDB data for an NGENd image. One of these is instantiated per
+// run of "ngen createpdb"
+//
+class NGenPdbWriter
+{
+private:
+ CreateNGenPdbWriter_t m_Create;
+ HMODULE m_hModule;
+ ReleaseHolder<ISymUnmanagedBinder> m_pBinder;
+ LPCWSTR m_wszPdbPath;
+ DWORD m_dwExtraData;
+ LPCWSTR m_wszManagedPDBSearchPath;
+
+public:
+ NGenPdbWriter (LPCWSTR wszNativeImagePath, LPCWSTR wszPdbPath, DWORD dwExtraData, LPCWSTR wszManagedPDBSearchPath)
+ : m_hModule(NULL),
+ m_Create(NULL),
+ m_wszPdbPath(wszPdbPath),
+ m_dwExtraData(dwExtraData),
+ m_wszManagedPDBSearchPath(wszManagedPDBSearchPath)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ HRESULT Load()
+ {
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ m_hModule = WszLoadLibrary(W("diasymreader.dll"));
+ if (m_hModule == NULL)
+ {
+ GetSvcLogger()->Printf(
+ W("Unable to load diasymreader.dll. Please ensure that version 11 or greater of diasymreader.dll is on the path. You can typically find this DLL in the desktop .NET install directory for 4.5 or greater. Error='%d'\n"),
+ GetLastError());
+ return HRESULT_FROM_WIN32(GetLastError());
+ }
+
+ m_Create = reinterpret_cast<CreateNGenPdbWriter_t>(GetProcAddress(m_hModule, "CreateNGenPdbWriter"));
+ if (m_Create == NULL)
+ {
+ GetSvcLogger()->Printf(
+ W("An incorrect version of diasymreader.dll was found. Please ensure that version 11 or greater of diasymreader.dll is on the path. You can typically find this DLL in the desktop .NET install directory for 4.5 or greater. Error='%d'\n"),
+ GetLastError());
+ return HRESULT_FROM_WIN32(GetLastError());
+ }
+
+ if ((m_dwExtraData & kPDBLines) != 0)
+ {
+ hr = FakeCoCreateInstanceEx(
+ CLSID_CorSymBinder_SxS,
+ NULL,
+ IID_ISymUnmanagedBinder,
+ (void**)&m_pBinder,
+ NULL);
+ }
+
+ return hr;
+ }
+
+ HRESULT WritePDBDataForModule(Module * pModule);
+
+ ~NGenPdbWriter()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_hModule)
+ FreeLibrary(m_hModule);
+
+ m_Create = NULL;
+ }
+};
+
+// ----------------------------------------------------------------------------
+// Manages generating all PDB data for an EE Module. Directly responsible for writing the
+// string table and file checksum subsections. One of these is instantiated per Module
+// found when using the ModuleIterator over the CORINFO_ASSEMBLY_HANDLE corresponding to
+// this invocation of NGEN createpdb.
+//
+class NGenModulePdbWriter
+{
+private:
+ // Simple holder to coordinate the PDB calls to OpenModW and CloseMod on a given PDB
+ // Mod *.
+ class PDBModHolder
+ {
+ private:
+ ReleaseHolder<ISymNGenWriter2> m_pWriter;
+ LPBYTE m_pMod;
+
+ public:
+ PDBModHolder()
+ : m_pWriter(NULL),
+ m_pMod(NULL)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ ~PDBModHolder()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if ((m_pWriter != NULL) && (m_pMod != NULL))
+ {
+ m_pWriter->CloseMod(m_pMod);
+ }
+ }
+
+ HRESULT Open(ISymNGenWriter2 * pWriter, LPCWSTR wszModule, LPCWSTR wszObjFile)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(m_pWriter == NULL);
+
+ m_pWriter = pWriter;
+ m_pWriter->AddRef();
+
+ _ASSERTE(m_pMod == NULL);
+
+ HRESULT hr = m_pWriter->OpenModW(wszModule, wszObjFile, &m_pMod);
+ if (FAILED(hr))
+ {
+ m_pMod = NULL;
+ }
+ return hr;
+ }
+
+ LPBYTE GetModPtr()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(m_pMod != NULL);
+ return m_pMod;
+ }
+ };
+
+private:
+ // This holder ensures we delete a half-generated PDB file if we manage to create it
+ // on disk, but fail at some point after it was created. When NGenModulePdbWriter is
+ // destroyed, m_deletePDBFileHolder's destructor will delete the PDB file if there
+ // was a prior error.
+ //
+ //************* NOTE! *************
+ //
+ // These members should appear FIRST so that they get destructed last. That way, if
+ // we encounter an error generating the PDB file, we ensure that we release all PDB
+ // interfaces and close the PDB file BEFORE this holder tries to *delete* the PDB
+ // file. Also, keep these two in this relative order, so that m_deletePDBFileHolder
+ // is destructed before m_wszPDBFilePath.
+ WCHAR m_wszPDBFilePath[MAX_PATH];
+ DeleteFileHolder m_deletePDBFileHolder;
+ //
+ // ************* NOTE! *************
+
+ CreateNGenPdbWriter_t m_Create;
+ LPCWSTR m_wszPdbPath;
+ ReleaseHolder<ISymNGenWriter2> m_pWriter;
+ Module * m_pModule;
+ DWORD m_dwExtraData;
+ LPCWSTR m_wszManagedPDBSearchPath;
+
+ // Interfaces for reading IL PDB info
+ ReleaseHolder<ISymUnmanagedBinder> m_pBinder;
+ ReleaseHolder<ISymUnmanagedReader> m_pReader;
+ NewInterfaceArrayHolder<ISymUnmanagedDocument> m_rgpDocs; // All docs in the PDB Mod
+ ULONG32 m_cDocs;
+
+ // Keeps track of source file names and how they map to offsets in the relevant PDB
+ // subsections.
+ DocNameToOffsetMap m_docNameToOffsetMap;
+
+ // Holds a PDB Mod *
+ PDBModHolder m_pdbMod;
+
+ // Buffer in which to store the entire string table (i.e., list of all source file
+ // names). This buffer is held alive as long as m_docNameToOffsetMap is needed, as
+ // the latter contains offsets into this buffer.
+ NewArrayHolder<BYTE> m_rgbStringTableSubsection;
+
+ HRESULT InitILPdbData();
+ HRESULT WriteStringTable();
+ HRESULT WriteFileChecksums();
+
+public:
+ NGenModulePdbWriter(CreateNGenPdbWriter_t Create, LPCWSTR wszPdbPath, DWORD dwExtraData, ISymUnmanagedBinder * pBinder, Module * pModule, LPCWSTR wszManagedPDBSearchPath)
+ : m_Create(Create),
+ m_wszPdbPath(wszPdbPath),
+ m_pWriter(NULL),
+ m_dwExtraData(dwExtraData),
+ m_pBinder(pBinder),
+ m_pModule(pModule),
+ m_wszManagedPDBSearchPath(wszManagedPDBSearchPath)
+
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_pBinder != NULL)
+ m_pBinder->AddRef();
+
+ ZeroMemory(m_wszPDBFilePath, sizeof(m_wszPDBFilePath));
+ }
+
+ HRESULT WritePDBData();
+
+ HRESULT WriteMethodPDBData(PEImageLayout * pLoadedLayout, USHORT iCodeSection, BYTE *pCodeBase, MethodDesc * hotDesc, PCODE start);
+};
+
+// ----------------------------------------------------------------------------
+// Manages generating the lines subsection in the PDB data for a given managed method.
+// One of these is instantiated per managed method we find when iterating through all
+// methods in a Module.
+//
+class NGenMethodLinesPdbWriter
+{
+private:
+ ISymNGenWriter2 * m_pWriter;
+ LPBYTE m_pMod;
+ ISymUnmanagedReader * m_pReader;
+ MethodDesc * m_hotDesc;
+ PCODE m_start;
+ USHORT m_iCodeSection;
+ TADDR m_addrCodeSection;
+ const IJitManager::MethodRegionInfo * m_pMethodRegionInfo;
+ EECodeInfo * m_pCodeInfo;
+ DocNameToOffsetMap * m_pDocNameToOffsetMap;
+
+ // IL-to-native map from JIT manager
+ ULONG32 m_cIlNativeMap;
+ NewArrayHolder<ICorDebugInfo::OffsetMapping> m_rgIlNativeMap;
+
+ // IL PDB info for this one method
+ NewInterfaceArrayHolder<ISymUnmanagedDocument> m_rgpDocs; // Source files defining this method.
+ NewArrayHolder<ULONG32> m_rgilOffsets; // Array of IL offsets for this method
+ NewArrayHolder<ULONG32> m_rgnLineStarts; // Array of source lines for this method
+ ULONG32 m_cSeqPoints; // Count of above two parallel arrays
+
+ HRESULT WriteLinesSubsection(
+ ULONG32 ulCodeStartOffset,
+ ULONG32 cbCode,
+ MapIndexPair * rgMapIndexPairs,
+ ULONG32 cMapIndexPairs);
+
+ BOOL FinalizeLinesFileBlock(
+ CV_DebugSLinesFileBlockHeader_t * pLinesFileBlockHeader,
+ CV_Line_t * pLineBlockStart,
+ CV_Line_t * pLineBlockAfterEnd);
+
+public:
+ NGenMethodLinesPdbWriter(
+ ISymNGenWriter2 * pWriter,
+ LPBYTE pMod,
+ ISymUnmanagedReader * pReader,
+ MethodDesc * hotDesc,
+ PCODE start,
+ USHORT iCodeSection,
+ TADDR addrCodeSection,
+ const IJitManager::MethodRegionInfo * pMethodRegionInfo,
+ EECodeInfo * pCodeInfo,
+ DocNameToOffsetMap * pDocNameToOffsetMap)
+ : m_pWriter(pWriter),
+ m_pMod(pMod),
+ m_pReader(pReader),
+ m_hotDesc(hotDesc),
+ m_start(start),
+ m_iCodeSection(iCodeSection),
+ m_addrCodeSection(addrCodeSection),
+ m_pMethodRegionInfo(pMethodRegionInfo),
+ m_pCodeInfo(pCodeInfo),
+ m_pDocNameToOffsetMap(pDocNameToOffsetMap),
+ m_cIlNativeMap(0),
+ m_cSeqPoints(0)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ HRESULT WritePDBData();
+};
+
+// ----------------------------------------------------------------------------
+// NGenPdbWriter implementation
+
+
+
+//---------------------------------------------------------------------------------------
+//
+// Coordinates calling all the other classes & methods to generate PDB info for the
+// given Module
+//
+// Arguments:
+// pModule - EE Module to write PDB data for
+//
+
+HRESULT NGenPdbWriter::WritePDBDataForModule(Module * pModule)
+{
+ STANDARD_VM_CONTRACT;
+ NGenModulePdbWriter ngenModulePdbWriter(m_Create, m_wszPdbPath, m_dwExtraData, m_pBinder, pModule, m_wszManagedPDBSearchPath);
+ return ngenModulePdbWriter.WritePDBData();
+}
+
+
+// ----------------------------------------------------------------------------
+// NGenModulePdbWriter implementation
+
+
+//---------------------------------------------------------------------------------------
+//
+// Writes out all source files into the string table subsection for the PDB Mod*
+// controlled by this NGenModulePdbWriter. Updates m_docNameToOffsetMap to add string
+// table offset for each source file as it gets added.
+//
+HRESULT NGenModulePdbWriter::WriteStringTable()
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(m_pWriter != NULL);
+
+ HRESULT hr;
+ UINT64 cbStringTableEstimate =
+ sizeof(DWORD) +
+ sizeof(CV_DebugSSubsectionHeader_t) +
+ m_cDocs * (MAX_PATH + 1);
+ if (!FitsIn<ULONG32>(cbStringTableEstimate))
+ {
+ return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
+ }
+
+ m_rgbStringTableSubsection = new BYTE[ULONG32(cbStringTableEstimate)];
+ LPBYTE pbStringTableSubsectionCur = m_rgbStringTableSubsection;
+
+ // Subsection signature
+ *((DWORD *) pbStringTableSubsectionCur) = CV_SIGNATURE_C13;
+ pbStringTableSubsectionCur += sizeof(DWORD);
+
+ // Subsection header
+ CV_DebugSSubsectionHeader_t * pSubSectHeader = (CV_DebugSSubsectionHeader_t *) pbStringTableSubsectionCur;
+ memset(pSubSectHeader, 0, sizeof(*pSubSectHeader));
+ pSubSectHeader->type = DEBUG_S_STRINGTABLE;
+ pbStringTableSubsectionCur += sizeof(*pSubSectHeader);
+ // pSubSectHeader->cbLen counts the number of bytes that appear AFTER the subsection
+ // header above (i.e., the size of the string table itself). We'll fill out
+ // pSubSectHeader->cbLen below, once it's calculated
+
+ LPBYTE pbStringTableStart = pbStringTableSubsectionCur;
+
+ // The actual strings
+ for (ULONG32 i=0; i < m_cDocs; i++)
+ {
+ WCHAR wszURL[MAX_PATH];
+ ULONG32 cchURL;
+ hr = m_rgpDocs[i]->GetURL(_countof(wszURL), &cchURL, wszURL);
+ if (FAILED(hr))
+ return hr;
+
+ int cbWritten = WideCharToMultiByte(
+ CP_UTF8,
+ 0, // dwFlags
+ wszURL,
+ -1, // i.e., input is NULL-terminated
+ (LPSTR) pbStringTableSubsectionCur, // output: UTF8 string starts here
+ ULONG32(cbStringTableEstimate) -
+ int(pbStringTableSubsectionCur - m_rgbStringTableSubsection), // Available space
+ NULL, // lpDefaultChar
+ NULL // lpUsedDefaultChar
+ );
+ if (cbWritten == 0)
+ return HRESULT_FROM_WIN32(GetLastError());
+
+ // Remember the string table offset for later
+ m_docNameToOffsetMap.AddOrReplace(
+ (LPCSTR) pbStringTableSubsectionCur,
+ DocNameOffsets(
+ ULONG32(pbStringTableSubsectionCur - pbStringTableStart),
+ (ULONG32) -1));
+
+ pbStringTableSubsectionCur += cbWritten;
+ if (pbStringTableSubsectionCur >= (m_rgbStringTableSubsection + cbStringTableEstimate))
+ return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
+ }
+
+ // Now that we know pSubSectHeader->cbLen, fill it in
+ pSubSectHeader->cbLen = CV_off32_t(pbStringTableSubsectionCur - pbStringTableStart);
+
+ // Subsection is now filled out, so use the PDB API to add it
+ hr = m_pWriter->ModAddSymbols(
+ m_pdbMod.GetModPtr(),
+ m_rgbStringTableSubsection,
+ int(pbStringTableSubsectionCur - m_rgbStringTableSubsection));
+ if (FAILED(hr))
+ return hr;
+
+ return S_OK;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// This takes care of actually loading the IL PDB itself, and initializing the
+// ISymUnmanaged* interfaces with module-level data from the IL PDB.
+//
+HRESULT NGenModulePdbWriter::InitILPdbData()
+{
+ // Load the managed PDB
+
+ ReleaseHolder<IUnknown> pUnk = NULL;
+ HRESULT hr = m_pModule->GetReadablePublicMetaDataInterface(ofReadOnly, IID_IMetaDataImport, (LPVOID *) &pUnk);
+ if (FAILED(hr))
+ {
+ GetSvcLogger()->Printf(
+ W("Unable to obtain metadata for '%s' Error: '0x%x'.\n"),
+ LPCWSTR(m_pModule->GetFile()->GetILimage()->GetPath()),
+ hr);
+ return hr;
+ }
+
+ hr = m_pBinder->GetReaderForFile(
+ pUnk,
+ m_pModule->GetFile()->GetILimage()->GetPath(),
+ m_wszManagedPDBSearchPath,
+ &m_pReader);
+ if (FAILED(hr))
+ {
+ GetSvcLogger()->Printf(
+ W("Unable to find managed PDB matching '%s'. Managed PDB search path: '%s'\n"),
+ LPCWSTR(m_pModule->GetFile()->GetILimage()->GetPath()),
+ (((m_wszManagedPDBSearchPath == NULL) || (*m_wszManagedPDBSearchPath == W('\0'))) ?
+ W("(not specified)") :
+ m_wszManagedPDBSearchPath));
+ return hr;
+ }
+
+ GetSvcLogger()->Log(W("Loaded managed PDB"));
+
+ // Grab the full path of the managed PDB so we can log it
+ WCHAR wszIlPdbPath[MAX_PATH];
+ ULONG32 cchIlPdbPath;
+ hr = m_pReader->GetSymbolStoreFileName(
+ _countof(wszIlPdbPath),
+ &cchIlPdbPath,
+ wszIlPdbPath);
+ if (FAILED(hr))
+ {
+ GetSvcLogger()->Log(W("\n"));
+ }
+ else
+ {
+ GetSvcLogger()->Printf(W(": '%s'\n"), wszIlPdbPath);
+ }
+
+ // Read all source files names from the IL PDB
+ ULONG32 cDocs;
+ hr = m_pReader->GetDocuments(
+ 0, // cDocsRequested
+ &cDocs,
+ NULL // Array
+ );
+ if (FAILED(hr))
+ return hr;
+
+ m_rgpDocs = new ISymUnmanagedDocument * [cDocs];
+ hr = m_pReader->GetDocuments(
+ cDocs,
+ &m_cDocs,
+ m_rgpDocs);
+ if (FAILED(hr))
+ return hr;
+ // Commit m_rgpDocs to calling Release() on each ISymUnmanagedDocument* in the array
+ m_rgpDocs.SetElementCount(m_cDocs);
+
+ return S_OK;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// This manages writing all Module-level data to the PDB, including public symbols,
+// string table, files checksum, section contribution table, and, indirectly, the lines
+// subsection
+//
+HRESULT NGenModulePdbWriter::WritePDBData()
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(m_pWriter == NULL);
+
+ HRESULT hr;
+
+ // This will try to open the managed PDB if lines info was requested. This is a
+ // likely failure point, so intentionally do this before creating the NGEN PDB file
+ // on disk.
+ if ((m_dwExtraData & kPDBLines) != 0)
+ {
+ hr = InitILPdbData();
+ if (FAILED(hr))
+ return hr;
+ }
+
+ // Create the PDB file we will write into.
+
+ _ASSERTE(m_Create != NULL);
+ _ASSERTE(m_pModule != NULL);
+
+ PEImageLayout * pLoadedLayout = m_pModule->GetFile()->GetLoaded();
+
+ ReleaseHolder<ISymNGenWriter> pWriter1;
+ hr = m_Create(pLoadedLayout->GetPath(), m_wszPdbPath, &pWriter1);
+ if (FAILED(hr))
+ return hr;
+
+ hr = pWriter1->QueryInterface(IID_ISymNGenWriter2, (LPVOID*) &m_pWriter);
+ if (FAILED(hr))
+ {
+ GetSvcLogger()->Printf(
+ W("An incorrect version of diasymreader.dll was found. Please ensure that version 11 or greater of diasymreader.dll is on the path. You can typically find this DLL in the desktop .NET install directory for 4.5 or greater. Error='0x%x'\n"),
+ hr);
+ return hr;
+ }
+
+ // PDB file is now created. Get its path and initialize the holder so the PDB file
+ // can be deleted if we don't make it successfully to the end
+
+ hr = m_pWriter->QueryPDBNameExW(m_wszPDBFilePath, _countof(m_wszPDBFilePath));
+ if (SUCCEEDED(hr))
+ {
+ // A failure in QueryPDBNameExW above isn't fatal--it just means we can't
+ // initialize m_deletePDBFileHolder, and thus may leave the PDB file on disk if
+ // there's *another* error later on. And if we do hit another error, NGEN will
+ // still return an error exit code, so the worst we'll have is a bogus PDB file
+ // that no one should expect works anyway.
+ m_deletePDBFileHolder.Assign(m_wszPDBFilePath);
+ }
+
+ if ((m_dwExtraData & kPDBLines) != 0)
+ {
+ hr = m_pdbMod.Open(m_pWriter, pLoadedLayout->GetPath(), m_pModule->GetPath());
+ if (FAILED(hr))
+ return hr;
+
+ hr = WriteStringTable();
+ if (FAILED(hr))
+ return hr;
+
+ hr = WriteFileChecksums();
+ if (FAILED(hr))
+ return hr;
+ }
+
+ COUNT_T sectionCount = pLoadedLayout->GetNumberOfSections();
+ IMAGE_SECTION_HEADER *section = pLoadedLayout->FindFirstSection();
+ COUNT_T sectionIndex = 0;
+ USHORT iCodeSection = 0;
+ BYTE *pCodeBase = NULL;
+ while (sectionIndex < sectionCount)
+ {
+ hr = m_pWriter->AddSection((USHORT)(sectionIndex + 1),
+ OMF_StandardText,
+ 0,
+ section[sectionIndex].SizeOfRawData);
+ if (FAILED(hr))
+ return hr;
+
+ if (strcmp((const char *)&section[sectionIndex].Name[0], ".text") == 0) {
+ _ASSERTE((iCodeSection == 0) && (pCodeBase == NULL));
+ iCodeSection = (USHORT)(sectionIndex + 1);
+ pCodeBase = (BYTE *)section[sectionIndex].VirtualAddress;
+ }
+
+ if ((m_dwExtraData & kPDBLines) != 0)
+ {
+ // In order to support the DIA RVA-to-lines API against the PDB we're
+ // generating, we need to update the section contribution table with each
+ // section we add.
+ hr = m_pWriter->ModAddSecContribEx(
+ m_pdbMod.GetModPtr(),
+ (USHORT)(sectionIndex + 1),
+ 0,
+ section[sectionIndex].SizeOfRawData,
+ section[sectionIndex].Characteristics,
+ 0, // dwDataCrc
+ 0 // dwRelocCrc
+ );
+ if (FAILED(hr))
+ return hr;
+ }
+
+ sectionIndex++;
+ }
+
+ _ASSERTE(iCodeSection != 0);
+ _ASSERTE(pCodeBase != NULL);
+
+ if ((m_dwExtraData & kPDBLines) != 0)
+ {
+ // To support lines info, we need a "dummy" section, indexed as 0, for use as a
+ // sentinel when MSPDB sets up its section contribution table
+ hr = m_pWriter->AddSection(0, // Dummy section 0
+ OMF_SentinelType,
+ 0,
+ 0xFFFFffff);
+ if (FAILED(hr))
+ return hr;
+ }
+
+#ifdef FEATURE_READYTORUN_COMPILER
+ if (pLoadedLayout->HasReadyToRunHeader())
+ {
+ ReadyToRunInfo::MethodIterator mi(m_pModule->GetReadyToRunInfo());
+ while (mi.Next())
+ {
+ MethodDesc *hotDesc = mi.GetMethodDesc();
+
+ hr = WriteMethodPDBData(pLoadedLayout, iCodeSection, pCodeBase, hotDesc, mi.GetMethodStartAddress());
+ if (FAILED(hr))
+ return hr;
+ }
+ }
+ else
+#endif // FEATURE_READYTORUN_COMPILER
+ {
+ MethodIterator mi(m_pModule);
+ while (mi.Next())
+ {
+ MethodDesc *hotDesc = mi.GetMethodDesc();
+ hotDesc->CheckRestore();
+
+ hr = WriteMethodPDBData(pLoadedLayout, iCodeSection, pCodeBase, hotDesc, mi.GetMethodStartAddress());
+ if (FAILED(hr))
+ return hr;
+ }
+ }
+
+ // We made it successfully to the end, so don't delete the PDB file.
+ m_deletePDBFileHolder.SuppressRelease();
+ return S_OK;
+}
+
+HRESULT NGenModulePdbWriter::WriteMethodPDBData(PEImageLayout * pLoadedLayout, USHORT iCodeSection, BYTE *pCodeBase, MethodDesc * hotDesc, PCODE start)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr;
+
+ EECodeInfo codeInfo(start);
+ _ASSERTE(codeInfo.IsValid());
+
+ IJitManager::MethodRegionInfo methodRegionInfo;
+ codeInfo.GetMethodRegionInfo(&methodRegionInfo);
+
+ PCODE pHotCodeStart = methodRegionInfo.hotStartAddress;
+ _ASSERTE(pHotCodeStart);
+
+ PCODE pColdCodeStart = methodRegionInfo.coldStartAddress;
+
+ // Hot name
+ {
+ SString fullName;
+ TypeString::AppendMethodInternal(
+ fullName,
+ hotDesc,
+ TypeString::FormatNamespace | TypeString::FormatSignature);
+
+ BSTRHolder hotNameHolder(SysAllocString(fullName.GetUnicode()));
+ hr = m_pWriter->AddSymbol(hotNameHolder,
+ iCodeSection,
+ (pHotCodeStart - (TADDR)pLoadedLayout->GetBase() - (TADDR)pCodeBase));
+ if (FAILED(hr))
+ return hr;
+ }
+
+ // Cold name
+ {
+ if (pColdCodeStart) {
+
+ SString fullNameCold;
+ fullNameCold.Append(W("[COLD] "));
+ TypeString::AppendMethodInternal(
+ fullNameCold,
+ hotDesc,
+ TypeString::FormatNamespace | TypeString::FormatSignature);
+
+ BSTRHolder coldNameHolder(SysAllocString(fullNameCold.GetUnicode()));
+ hr = m_pWriter->AddSymbol(coldNameHolder,
+ iCodeSection,
+ (pColdCodeStart - (TADDR)pLoadedLayout->GetBase() - (TADDR)pCodeBase));
+
+ if (FAILED(hr))
+ return hr;
+
+ }
+ }
+
+ // Offset / lines mapping
+ if (((m_dwExtraData & kPDBLines) != 0) &&
+
+ // Skip functions that are too big for PDB lines format
+ FitsIn<DWORD>(methodRegionInfo.hotSize) &&
+ FitsIn<DWORD>(methodRegionInfo.coldSize))
+ {
+ NGenMethodLinesPdbWriter methodLinesWriter(
+ m_pWriter,
+ m_pdbMod.GetModPtr(),
+ m_pReader,
+ hotDesc,
+ start,
+ iCodeSection,
+ (TADDR)pLoadedLayout->GetBase() + (TADDR)pCodeBase,
+ &methodRegionInfo,
+ &codeInfo,
+ &m_docNameToOffsetMap);
+
+ hr = methodLinesWriter.WritePDBData();
+ if (FAILED(hr))
+ return hr;
+ }
+
+ return S_OK;
+}
+
+// ----------------------------------------------------------------------------
+// Handles writing the file checksums subsection to the PDB
+//
+HRESULT NGenModulePdbWriter::WriteFileChecksums()
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(m_pWriter != NULL);
+
+ // The file checksums subsection of the PDB (i.e., "DEBUG_S_FILECHKSMS"), is a blob
+ // consisting of a few structs stacked one after the other:
+ //
+ // * (1) DWORD = CV_SIGNATURE_C13 -- the usual subsection signature DWORD
+ // * (2) CV_DebugSSubsectionHeader_t -- the usual subsection header, with type =
+ // DEBUG_S_FILECHKSMS
+ // * (3) Blob consisting of an array of checksum data -- the format of this piece is
+ // not defined via structs (not sure why), but is defined in
+ // vctools\PDB\doc\lines.docx
+ //
+ HRESULT hr;
+
+ // PDB format requires that the checksum size can always be expressed in a BYTE.
+ const BYTE kcbEachChecksumEstimate = 0xFF;
+
+ UINT64 cbChecksumSubsectionEstimate =
+ sizeof(DWORD) +
+ sizeof(CV_DebugSSubsectionHeader_t) +
+ m_cDocs * kcbEachChecksumEstimate;
+ if (!FitsIn<ULONG32>(cbChecksumSubsectionEstimate))
+ {
+ return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
+ }
+
+ NewArrayHolder<BYTE> rgbChksumSubsection(new BYTE[ULONG32(cbChecksumSubsectionEstimate)]);
+ LPBYTE pbChksumSubsectionCur = rgbChksumSubsection;
+
+ // (1) Subsection signature
+ *((DWORD *) pbChksumSubsectionCur) = CV_SIGNATURE_C13;
+ pbChksumSubsectionCur += sizeof(DWORD);
+
+ // (2) Subsection header
+ CV_DebugSSubsectionHeader_t * pSubSectHeader = (CV_DebugSSubsectionHeader_t *) pbChksumSubsectionCur;
+ memset(pSubSectHeader, 0, sizeof(*pSubSectHeader));
+ pSubSectHeader->type = DEBUG_S_FILECHKSMS;
+ pbChksumSubsectionCur += sizeof(*pSubSectHeader);
+ // pSubSectHeader->cblen to be filled in later once we know the size
+
+ LPBYTE pbChksumDataStart = pbChksumSubsectionCur;
+
+ // (3) Iterate through source files, steal their checksum info from the IL PDB, and
+ // write it into the NGEN PDB.
+ for (ULONG32 i=0; i < m_cDocs; i++)
+ {
+ WCHAR wszURL[MAX_PATH];
+ char szURL[MAX_PATH];
+ ULONG32 cchURL;
+ hr = m_rgpDocs[i]->GetURL(_countof(wszURL), &cchURL, wszURL);
+ if (FAILED(hr))
+ return hr;
+
+ int cbWritten = WideCharToMultiByte(
+ CP_UTF8,
+ 0, // dwFlags
+ wszURL,
+ -1, // i.e., input is NULL-terminated
+ szURL, // output: UTF8 string starts here
+ _countof(szURL), // Available space
+ NULL, // lpDefaultChar
+ NULL // lpUsedDefaultChar
+ );
+ if (cbWritten == 0)
+ return HRESULT_FROM_WIN32(GetLastError());
+
+ // find offset into string table and add to blob; meanwhile update hash to
+ // remember the offset into the cksum table
+ const KeyValuePair<LPCSTR,DocNameOffsets> * pMapEntry =
+ m_docNameToOffsetMap.LookupPtr(szURL);
+ if (pMapEntry == NULL)
+ {
+ // Should never happen, as it implies we found a source file that was never
+ // written to the string table
+ return E_UNEXPECTED;
+ }
+ DocNameOffsets docNameOffsets(pMapEntry->Value());
+ docNameOffsets.m_dwChksumTableOffset = ULONG32(pbChksumSubsectionCur - pbChksumDataStart);
+
+ // Update the map with the new docNameOffsets that contains the cksum table
+ // offset as well. Note that we must ensure the key (LPCSTR) remains the same
+ // (thus we explicitly ask for the Key()). This class guarantees that string
+ // pointer (which comes from the string table buffer field) will remain allocated
+ // as long as the map is.
+ m_docNameToOffsetMap.AddOrReplace(pMapEntry->Key(), docNameOffsets);
+ * (ULONG32 *) pbChksumSubsectionCur = docNameOffsets.m_dwStrTableOffset;
+ pbChksumSubsectionCur += sizeof(ULONG32);
+
+ // Checksum algorithm and bytes
+
+ BYTE rgbChecksum[kcbEachChecksumEstimate];
+ ULONG32 cbChecksum = 0;
+ BYTE bChecksumAlgorithmType = CHKSUM_TYPE_NONE;
+ GUID guidChecksumAlgorithm;
+ hr = m_rgpDocs[i]->GetCheckSumAlgorithmId(&guidChecksumAlgorithm);
+ if (SUCCEEDED(hr))
+ {
+ // If we got the checksum algorithm, we can write it all out to the buffer.
+ // Else, we'll just omit the checksum info
+ if (memcmp(&guidChecksumAlgorithm, &CorSym_SourceHash_MD5, sizeof(GUID)) == 0)
+ bChecksumAlgorithmType = CHKSUM_TYPE_MD5;
+ else if (memcmp(&guidChecksumAlgorithm, &CorSym_SourceHash_SHA1, sizeof(GUID)) == 0)
+ bChecksumAlgorithmType = CHKSUM_TYPE_SHA1;
+ }
+
+ if (bChecksumAlgorithmType != CHKSUM_TYPE_NONE)
+ {
+ hr = m_rgpDocs[i]->GetCheckSum(sizeof(rgbChecksum), &cbChecksum, rgbChecksum);
+ if (FAILED(hr) || !FitsIn<BYTE>(cbChecksum))
+ {
+ // Should never happen, but just in case checksum data is invalid, just put
+ // no checksum into the NGEN PDB
+ bChecksumAlgorithmType = CHKSUM_TYPE_NONE;
+ cbChecksum = 0;
+ }
+ }
+
+ // checksum length & algorithm
+ *pbChksumSubsectionCur = (BYTE) cbChecksum;
+ pbChksumSubsectionCur++;
+ *pbChksumSubsectionCur = bChecksumAlgorithmType;
+ pbChksumSubsectionCur++;
+
+ // checksum data bytes
+ memcpy(pbChksumSubsectionCur, rgbChecksum, cbChecksum);
+ pbChksumSubsectionCur += cbChecksum;
+
+ // Must align to the next 4-byte boundary
+ LPBYTE pbChksumSubsectionCurAligned = (LPBYTE) ALIGN_UP(pbChksumSubsectionCur, 4);
+ memset(pbChksumSubsectionCur, 0, pbChksumSubsectionCurAligned-pbChksumSubsectionCur);
+ pbChksumSubsectionCur = pbChksumSubsectionCurAligned;
+ }
+
+ // Now that we know pSubSectHeader->cbLen, fill it in
+ pSubSectHeader->cbLen = CV_off32_t(pbChksumSubsectionCur - pbChksumDataStart);
+
+ // Subsection is now filled out, so add it
+ hr = m_pWriter->ModAddSymbols(
+ m_pdbMod.GetModPtr(),
+ rgbChksumSubsection,
+ int(pbChksumSubsectionCur - rgbChksumSubsection));
+ if (FAILED(hr))
+ return hr;
+
+ return S_OK;
+}
+
+// ----------------------------------------------------------------------------
+// NGenMethodLinesPdbWriter implementation
+
+
+//---------------------------------------------------------------------------------------
+//
+// Manages the writing of all lines-file subsections requred for a given method. if a
+// method is hot/cold split, this will write two line-file subsections to the PDB--one
+// for the hot region, and one for the cold.
+//
+
+HRESULT NGenMethodLinesPdbWriter::WritePDBData()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (m_hotDesc->IsNoMetadata())
+ {
+ // IL stubs will not have data in the IL PDB, so just skip them.
+ return S_OK;
+ }
+
+ //
+ // First, we'll need to merge the IL-to-native map from the JIT manager with the
+ // IL-to-source map from the IL PDB. This merging is done into a single piece that
+ // includes all regions of the code when it's split
+ //
+
+ // Grab the IL-to-native map from the JIT manager
+ DebugInfoRequest debugInfoRequest;
+ debugInfoRequest.InitFromStartingAddr(m_hotDesc, m_start);
+ BOOL fSuccess = m_pCodeInfo->GetJitManager()->GetBoundariesAndVars(
+ debugInfoRequest,
+ SimpleNew, NULL, // Allocator
+ &m_cIlNativeMap,
+ &m_rgIlNativeMap,
+ NULL, NULL);
+ if (!fSuccess)
+ {
+ // Shouldn't happen, but just skip this method if it does
+ return S_OK;
+ }
+
+ // We will traverse this IL-to-native map (from the JIT) in parallel with the
+ // source-to-IL map provided by the IL PDB (below). Both need to be sorted by IL so
+ // we can easily find matching entries in the two maps
+ QuickSortILNativeMapByIL sorterByIl(m_rgIlNativeMap, m_cIlNativeMap);
+ sorterByIl.Sort();
+
+ // Now grab IL-to-source map from the IL PDBs (just known as "sequence points"
+ // according to the IL PDB API)
+
+ ReleaseHolder<ISymUnmanagedMethod> pMethod;
+ HRESULT hr = m_pReader->GetMethod(
+ m_hotDesc->GetMemberDef(),
+ &pMethod);
+ if (FAILED(hr))
+ {
+ // Ignore any methods not included in the IL PDB. Although we've already
+ // excluded LCG & IL stubs from methods we're considering, there can still be
+ // methods in the NGEN module that are not in the IL PDB (e.g., implicit ctors).
+ return S_OK;
+ }
+
+ ULONG32 cSeqPointsExpected;
+ hr = pMethod->GetSequencePointCount(&cSeqPointsExpected);
+ if (FAILED(hr))
+ {
+ // Should never happen, but we can just skip this function if the IL PDB can't
+ // find sequence point info
+ return S_OK;
+ }
+
+ ULONG32 cSeqPointsReturned;
+ m_rgilOffsets = new ULONG32[cSeqPointsExpected];
+ m_rgpDocs = new ISymUnmanagedDocument * [cSeqPointsExpected];
+ m_rgnLineStarts = new ULONG32[cSeqPointsExpected];
+
+ // This is guaranteed to return the sequence points sorted in order of the IL
+ // offsets (m_rgilOffsets)
+ hr = pMethod->GetSequencePoints(
+ cSeqPointsExpected,
+ &cSeqPointsReturned,
+ m_rgilOffsets,
+ m_rgpDocs,
+ m_rgnLineStarts,
+ NULL, // ColumnStarts not needed
+ NULL, // LineEnds not needed
+ NULL); // ColumnEnds not needed
+ if (FAILED(hr))
+ {
+ // Shouldn't happen, but just skip this method if it does
+ return S_OK;
+ }
+ // Commit m_rgpDocs to calling Release() on all ISymUnmanagedDocument* returned into
+ // the array.
+ m_rgpDocs.SetElementCount(cSeqPointsReturned);
+
+ // Now merge the two maps together into an array of MapIndexPair structures. Traverse
+ // both maps in parallel (both ordered by IL offset), looking for IL offset matches.
+ // Range matching: If an entry in the IL-to-native map has no matching entry in the
+ // IL PDB, then seek up in the IL PDB to the previous sequence point and merge to
+ // that (assuming that previous sequence point from the IL PDB did not already have
+ // an exact match to some other entry in the IL-to-native map).
+ ULONG32 cMapIndexPairsMax = m_cIlNativeMap;
+ NewArrayHolder<MapIndexPair> rgMapIndexPairs(new MapIndexPair [cMapIndexPairsMax]);
+ ULONG32 iSeqPoints = 0;
+
+ // Keep track (via iSeqPointLastUnmatched) of the most recent entry in the IL PDB
+ // that we passed over because it had no matching entry in the IL-to-native map. We
+ // may use this to do a range-match if necessary. We'll set iSeqPointLastUnmatched to
+ // the currently interated IL PDB entry after our cursor in the il-to-native map
+ // passed it by, but only if fCurSeqPointMatched is FALSE
+ ULONG32 iSeqPointLastUnmatched = (ULONG32) -1;
+ BOOL fCurSeqPointMatched = FALSE;
+
+ ULONG32 iIlNativeMap = 0;
+ ULONG32 iMapIndexPairs = 0;
+
+ // Traverse IL PDB entries and IL-to-native map entries (both sorted by IL) in
+ // parallel
+ //
+ // * Record matching indices in our output map, rgMapIndexPairs, indexed by
+ // iMapIndexPairs.
+ //
+ // * We will have at most m_cIlNativeMap entries in rgMapIndexPairs by the time
+ // we're done. (Each il-to-native map entry will be considered for inclusion
+ // in this output. Those il-to-native map entries with a match in the il PDB
+ // will be included, the rest skipped.)
+ //
+ // * iSeqPointLastUnmatched != -1 iff it equals a prior entry in the IL PDB that
+ // we skipped over because it could not be exactly matched to an entry in the
+ // il-to-native map. In such a case, it will be considered for a
+ // range-match to the next il-to-native map entry
+ while (iIlNativeMap < m_cIlNativeMap)
+ {
+ _ASSERTE (iMapIndexPairs < cMapIndexPairsMax);
+
+ // IP addresses that map to "special" places (prolog, epilog, or
+ // other hidden code), will just map to 0xFeeFee, as per convention
+ if ((m_rgIlNativeMap[iIlNativeMap].ilOffset == NO_MAPPING) ||
+ (m_rgIlNativeMap[iIlNativeMap].ilOffset == PROLOG) ||
+ (m_rgIlNativeMap[iIlNativeMap].ilOffset == EPILOG))
+ {
+ rgMapIndexPairs[iMapIndexPairs].m_iIlNativeMap = iIlNativeMap;
+ rgMapIndexPairs[iMapIndexPairs].m_iSeqPoints = kUnmappedIP;
+ iMapIndexPairs++;
+
+ // If we were remembering a prior unmatched entry in the IL PDB, reset it
+ iSeqPointLastUnmatched = (ULONG32) -1;
+
+ // Advance il-native map, NOT il-source map
+ iIlNativeMap++;
+ continue;
+ }
+
+ // Cases below actually look at the IL PDB sequence point, so ensure it's still
+ // in range; otherwise, we're done.
+ if (iSeqPoints >= cSeqPointsReturned)
+ break;
+
+ if (m_rgIlNativeMap[iIlNativeMap].ilOffset < m_rgilOffsets[iSeqPoints])
+ {
+ // Our cursor over the ilnative map is behind the sourceil
+ // map
+
+ if (iSeqPointLastUnmatched != (ULONG32) -1)
+ {
+ // Range matching: This ilnative entry is behind our cursor in the
+ // sourceil map, but this ilnative entry is also ahead of the previous
+ // (unmatched) entry in the sourceil map. So this is a case where the JIT
+ // generated sequence points that surround, without matching, that
+ // previous entry in the sourceil map. So match to that previous
+ // (unmatched) entry in the sourceil map.
+ _ASSERTE(m_rgilOffsets[iSeqPointLastUnmatched] < m_rgIlNativeMap[iIlNativeMap].ilOffset);
+ rgMapIndexPairs[iMapIndexPairs].m_iIlNativeMap = iIlNativeMap;
+ rgMapIndexPairs[iMapIndexPairs].m_iSeqPoints = iSeqPointLastUnmatched;
+ iMapIndexPairs++;
+
+ // Reset our memory of the last unmatched entry in the IL PDB
+ iSeqPointLastUnmatched = (ULONG32) -1;
+ }
+
+ // Go to next ilnative map entry
+ iIlNativeMap++;
+ continue;
+ }
+
+ if (m_rgilOffsets[iSeqPoints] < m_rgIlNativeMap[iIlNativeMap].ilOffset)
+ {
+ // Our cursor over the ilnative map is ahead of the sourceil
+ // map, so go to next sourceil map entry. Remember that we're passing over
+ // this entry in the sourceil map, in case we choose to match to it later.
+ if (!fCurSeqPointMatched)
+ {
+ iSeqPointLastUnmatched = iSeqPoints;
+ }
+ iSeqPoints++;
+ fCurSeqPointMatched = FALSE;
+ continue;
+ }
+
+ // At a match
+ _ASSERTE(m_rgilOffsets[iSeqPoints] == m_rgIlNativeMap[iIlNativeMap].ilOffset);
+ rgMapIndexPairs[iMapIndexPairs].m_iIlNativeMap = iIlNativeMap;
+ rgMapIndexPairs[iMapIndexPairs].m_iSeqPoints = iSeqPoints;
+
+ // If we were remembering a prior unmatched entry in the IL PDB, reset it
+ iSeqPointLastUnmatched = (ULONG32) -1;
+
+ // Advance il-native map, do not advance il-source map in case the next il-native
+ // entry matches this current il-source map entry, but remember that this current
+ // il-source map entry has found an exact match
+ iMapIndexPairs++;
+ iIlNativeMap++;
+ fCurSeqPointMatched = TRUE;
+ }
+
+ ULONG32 cMapIndexPairs = iMapIndexPairs;
+
+ // PDB format requires the lines array to be sorted by IP offset
+ QuickSortMapIndexPairsByNativeOffset sorterByIp(rgMapIndexPairs, cMapIndexPairs, m_rgIlNativeMap, m_cIlNativeMap);
+ sorterByIp.Sort();
+
+ //
+ // Now that the maps are merged and sorted, determine whether there's a hot/cold
+ // split, where that split is, and then call WriteLinesSubsection to write out each
+ // region into its own lines-file subsection
+ //
+
+ // Find the point where the code got split
+ ULONG32 iMapIndexPairsFirstEntryInColdSection = cMapIndexPairs;
+ for (iMapIndexPairs = 0; iMapIndexPairs < cMapIndexPairs; iMapIndexPairs++)
+ {
+ DWORD dwNativeOffset = m_rgIlNativeMap[rgMapIndexPairs[iMapIndexPairs].m_iIlNativeMap].nativeOffset;
+ if (dwNativeOffset >= m_pMethodRegionInfo->hotSize)
+ {
+ iMapIndexPairsFirstEntryInColdSection = iMapIndexPairs;
+ break;
+ }
+ }
+
+ // Adjust the cold offsets (if any) to be relative to the cold start
+ for (iMapIndexPairs = iMapIndexPairsFirstEntryInColdSection; iMapIndexPairs < cMapIndexPairs; iMapIndexPairs++)
+ {
+ DWORD dwNativeOffset = m_rgIlNativeMap[rgMapIndexPairs[iMapIndexPairs].m_iIlNativeMap].nativeOffset;
+ _ASSERTE (dwNativeOffset >= m_pMethodRegionInfo->hotSize);
+
+ // Adjust offset so it's relative to the cold region start
+ dwNativeOffset -= DWORD(m_pMethodRegionInfo->hotSize);
+ _ASSERTE(dwNativeOffset < m_pMethodRegionInfo->coldSize);
+ m_rgIlNativeMap[rgMapIndexPairs[iMapIndexPairs].m_iIlNativeMap].nativeOffset = dwNativeOffset;
+ }
+
+ // Write out the hot region into its own lines-file subsection
+ hr = WriteLinesSubsection(
+ ULONG32(m_pMethodRegionInfo->hotStartAddress - m_addrCodeSection),
+ ULONG32(m_pMethodRegionInfo->hotSize),
+ rgMapIndexPairs,
+ iMapIndexPairsFirstEntryInColdSection);
+ if (FAILED(hr))
+ return hr;
+
+ // If there was a hot/cold split, write a separate lines-file subsection for the cold
+ // region
+ if (iMapIndexPairsFirstEntryInColdSection < cMapIndexPairs)
+ {
+ hr = WriteLinesSubsection(
+ ULONG32(m_pMethodRegionInfo->coldStartAddress - m_addrCodeSection),
+ ULONG32(m_pMethodRegionInfo->coldSize),
+ &rgMapIndexPairs[iMapIndexPairsFirstEntryInColdSection],
+ cMapIndexPairs - iMapIndexPairsFirstEntryInColdSection);
+ if (FAILED(hr))
+ return hr;
+ }
+
+ return S_OK;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Helper called by NGenMethodLinesPdbWriter::WritePDBData to do the actual PDB writing of a single
+// lines-subsection. This is called once for the hot region, and once for the cold
+// region, of a given method that has been split. That means you get two
+// lines-subsections for split methods.
+//
+// Arguments:
+// * ulCodeStartOffset - Offset relative to the code section, or where this region
+// of code begins
+// * cbCode - Size in bytes of this region of code
+// * rgMapIndexPairs - Array of indices forming the merged data from the JIT
+// Manager's IL-to-native map and the IL PDB's IL-to-source map. It is assumed
+// that this array has indices sorted such that the native offsets increase
+// * cMapIndexPairs - Size in entries of above array.
+//
+// Assumptions:
+// rgMapIndexPairs must be sorted in order of nativeOffset, i.e.,
+// m_rgIlNativeMap[rgMapIndexPairs[i].m_iIlNativeMap].nativeOffset increases with i.
+//
+
+HRESULT NGenMethodLinesPdbWriter::WriteLinesSubsection(
+ ULONG32 ulCodeStartOffset,
+ ULONG32 cbCode,
+ MapIndexPair * rgMapIndexPairs,
+ ULONG32 cMapIndexPairs)
+{
+ STANDARD_VM_CONTRACT;
+
+ // The lines subsection of the PDB (i.e., "DEBUG_S_LINES"), is a blob consisting of a
+ // few structs stacked one after the other:
+ //
+ // * (1) DWORD = CV_SIGNATURE_C13 -- the usual subsection signature DWORD
+ // * (2) CV_DebugSSubsectionHeader_t -- the usual subsection header, with type =
+ // DEBUG_S_LINES
+ // * (3) CV_DebugSLinesHeader_t -- a single header for the entire subsection. Its
+ // purpose is to specify the native function being described, and to specify the
+ // size of the variable-sized "blocks" that follow
+ // * (4) CV_DebugSLinesFileBlockHeader_t -- For each block, you get one of these. A
+ // block is defined by a set of sequence points that map to the same source
+ // file. While iterating through the offsets, we need to define new blocks
+ // whenever the source file changes. In C#, this typically only happens when
+ // you advance to (or away from) an unmapped IP (0xFeeFee).
+ // * (5) CV_Line_t (Line array entries) -- For each block, you get several line
+ // array entries, one entry for the beginning of each sequence point.
+
+ HRESULT hr;
+
+ UINT64 cbLinesSubsectionEstimate =
+ sizeof(DWORD) +
+ sizeof(CV_DebugSSubsectionHeader_t) +
+ sizeof(CV_DebugSLinesHeader_t) +
+ // Worst case: assume each sequence point will require its own
+ // CV_DebugSLinesFileBlockHeader_t
+ (cMapIndexPairs * (sizeof(CV_DebugSLinesFileBlockHeader_t) + sizeof(CV_Line_t)));
+ if (!FitsIn<ULONG32>(cbLinesSubsectionEstimate))
+ {
+ return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
+ }
+
+ NewArrayHolder<BYTE> rgbLinesSubsection(new BYTE[ULONG32(cbLinesSubsectionEstimate)]);
+ LPBYTE pbLinesSubsectionCur = rgbLinesSubsection;
+
+ // * (1) DWORD = CV_SIGNATURE_C13 -- the usual subsection signature DWORD
+ *((DWORD *) pbLinesSubsectionCur) = CV_SIGNATURE_C13;
+ pbLinesSubsectionCur += sizeof(DWORD);
+
+ // * (2) CV_DebugSSubsectionHeader_t
+ CV_DebugSSubsectionHeader_t * pSubSectHeader = (CV_DebugSSubsectionHeader_t *) pbLinesSubsectionCur;
+ memset(pSubSectHeader, 0, sizeof(*pSubSectHeader));
+ pSubSectHeader->type = DEBUG_S_LINES;
+ // pSubSectHeader->cblen to be filled in later once we know the size
+ pbLinesSubsectionCur += sizeof(*pSubSectHeader);
+
+ // * (3) CV_DebugSLinesHeader_t
+ CV_DebugSLinesHeader_t * pLinesHeader = (CV_DebugSLinesHeader_t *) pbLinesSubsectionCur;
+ memset(pLinesHeader, 0, sizeof(*pLinesHeader));
+ pLinesHeader->offCon = ulCodeStartOffset;
+ pLinesHeader->segCon = m_iCodeSection;
+ pLinesHeader->flags = 0; // 0 means line info, but not column info, is included
+ pLinesHeader->cbCon = cbCode;
+ pbLinesSubsectionCur += sizeof(*pLinesHeader);
+
+ // The loop below takes care of
+ // * (4) CV_DebugSLinesFileBlockHeader_t
+ // * (5) CV_Line_t (Line array entries)
+ //
+ BOOL fAtLeastOneBlockWritten = FALSE;
+ CV_DebugSLinesFileBlockHeader_t * pLinesFileBlockHeader = NULL;
+ CV_Line_t * pLineCur = NULL;
+ CV_Line_t * pLineBlockStart = NULL;
+ BOOL fBeginNewBlock = TRUE;
+ ULONG32 iSeqPointsPrev = (ULONG32) -1;
+ DWORD dwNativeOffsetPrev = (DWORD) -1;
+ WCHAR wszURLPrev[MAX_PATH];
+ memset(&wszURLPrev, 0, sizeof(wszURLPrev));
+ LPBYTE pbEnd = NULL;
+
+ for (ULONG32 iMapIndexPairs=0; iMapIndexPairs < cMapIndexPairs; iMapIndexPairs++)
+ {
+ ULONG32 iSeqPoints = rgMapIndexPairs[iMapIndexPairs].m_iSeqPoints;
+ ULONG32 iIlNativeMap = rgMapIndexPairs[iMapIndexPairs].m_iIlNativeMap;
+
+ // Sometimes the JIT manager will give us duplicate IPs in the IL-to-native
+ // offset mapping. PDB format frowns on that. Since rgMapIndexPairs is being
+ // iterated in native offset order, it's easy to find these dupes right now, and
+ // skip all but the first map containing a given IP offset.
+ if (m_rgIlNativeMap[iIlNativeMap].nativeOffset == dwNativeOffsetPrev)
+ {
+ // Found a native offset dupe. Since we've already assigned
+ // dwNativeOffsetPrev, ignore the current map entry
+ continue;
+ }
+ dwNativeOffsetPrev = m_rgIlNativeMap[iIlNativeMap].nativeOffset;
+
+ if ((iSeqPoints != kUnmappedIP) && (iSeqPoints != iSeqPointsPrev))
+ {
+ // This is the first iteration where we're looking at this iSeqPoints. So
+ // check whether the document name has changed on us. If it has, that means
+ // we need to start a new block.
+ WCHAR wszURL[MAX_PATH];
+ ULONG32 cchURL;
+ hr = m_rgpDocs[iSeqPoints]->GetURL(_countof(wszURL), &cchURL, wszURL);
+ if (FAILED(hr))
+ {
+ // Skip function if IL PDB has data missing
+ return S_OK;
+ }
+
+ // wszURL is the best we have for a unique identifier of documents. See
+ // whether the previous document's URL is different
+ if (_wcsicmp(wszURL, wszURLPrev) != 0)
+ {
+ // New document. Update wszURLPrev, and remember that we need to start a
+ // new file block
+ if (wcscpy_s(wszURLPrev, _countof(wszURLPrev), wszURL) != 0)
+ {
+ continue;
+ }
+ fBeginNewBlock = TRUE;
+ }
+
+ iSeqPointsPrev = iSeqPoints;
+ }
+ if (fBeginNewBlock)
+ {
+ // We've determined that we need to start a new block. So perform fixups
+ // against the previous block (if any) first
+ if (FinalizeLinesFileBlock(pLinesFileBlockHeader, pLineBlockStart, pLineCur))
+ {
+ fAtLeastOneBlockWritten = TRUE;
+ }
+ else if (pLinesFileBlockHeader != NULL)
+ {
+ // Previous block had no usable data. So rewind back to the previous
+ // block header, and we'll start there with the next block
+ pbLinesSubsectionCur = LPBYTE(pLinesFileBlockHeader);
+ pLineCur = (CV_Line_t *) pbLinesSubsectionCur;
+ }
+
+ // Now get the info we'll need for the next block
+ char szURL[MAX_PATH];
+ int cbWritten = WideCharToMultiByte(
+ CP_UTF8,
+ 0, // dwFlags
+ wszURLPrev,
+ -1, // i.e., input is NULL-terminated
+ szURL, // output: UTF8 string starts here
+ _countof(szURL), // Available space
+ NULL, // lpDefaultChar
+ NULL // lpUsedDefaultChar
+ );
+ if (cbWritten == 0)
+ continue;
+
+ DocNameOffsets docNameOffsets;
+ BOOL fExists = m_pDocNameToOffsetMap->Lookup(szURL, &docNameOffsets);
+ if (fExists)
+ {
+ _ASSERTE(docNameOffsets.m_dwChksumTableOffset != (ULONG32) -1);
+ }
+ else
+ {
+ // We may get back an invalid document in the 0xFeeFee case (i.e., a
+ // sequence point that intentionally doesn't map back to a publicly
+ // available source code line). In that case, we'll use the bogus cksum
+ // offset of -1 for now, and verify we're in the 0xFeeFee case later on
+ // (see code:NGenMethodLinesPdbWriter::FinalizeLinesFileBlock).
+ _ASSERTE(szURL[0] == '\0');
+ _ASSERTE(docNameOffsets.m_dwChksumTableOffset == (ULONG32) -1);
+ }
+
+
+ // * (4) CV_DebugSLinesFileBlockHeader_t
+ if (pLineCur == NULL)
+ {
+ // First lines file block, so begin the block header immediately after the
+ // subsection headers
+ pLinesFileBlockHeader = (CV_DebugSLinesFileBlockHeader_t *) pbLinesSubsectionCur;
+ }
+ else
+ {
+ // We've had blocks before this one, so add this block at our current
+ // location in the blob
+ pLinesFileBlockHeader = (CV_DebugSLinesFileBlockHeader_t *) pLineCur;
+ }
+
+ // PDB structure sizes guarantee this is the case, though their docs are
+ // explicit that each lines-file block header must be 4-byte aligned.
+ _ASSERTE(IS_ALIGNED(pLinesFileBlockHeader, 4));
+
+ memset(pLinesFileBlockHeader, 0, sizeof(*pLinesFileBlockHeader));
+ pLinesFileBlockHeader->offFile = docNameOffsets.m_dwChksumTableOffset;
+ // pLinesFileBlockHeader->nLines to be filled in when block is complete
+ // pLinesFileBlockHeader->cbBlock to be filled in when block is complete
+
+ pLineCur = (CV_Line_t *) (pLinesFileBlockHeader + 1);
+ pLineBlockStart = pLineCur;
+ fBeginNewBlock = FALSE;
+ }
+
+
+ pLineCur->offset = m_rgIlNativeMap[iIlNativeMap].nativeOffset;
+ pLineCur->linenumStart =
+ (iSeqPoints == kUnmappedIP) ?
+ kUnmappedIP :
+ m_rgnLineStarts[iSeqPoints];
+ pLineCur->deltaLineEnd = 0;
+ pLineCur->fStatement = 1;
+ pLineCur++;
+ } // for (ULONG32 iMapIndexPairs=0; iMapIndexPairs < cMapIndexPairs; iMapIndexPairs++)
+
+ if (pLineCur == NULL)
+ {
+ // There were no lines data for this function, so don't write anything
+ return S_OK;
+ }
+
+ // Perform fixups against the last block we wrote
+ if (FinalizeLinesFileBlock(pLinesFileBlockHeader, pLineBlockStart, pLineCur))
+ fAtLeastOneBlockWritten = TRUE;
+
+ if (!fAtLeastOneBlockWritten)
+ {
+ // There were no valid blocks to write for this function, so don't bother
+ // calling PDB writing API. No problem.
+ return S_OK;
+ }
+
+ // Now that we know pSubSectHeader->cbLen, fill it in
+ pSubSectHeader->cbLen = CV_off32_t(LPBYTE(pLineCur) - LPBYTE(pLinesHeader));
+
+ // Subsection is now filled out, so add it.
+ hr = m_pWriter->ModAddSymbols(
+ m_pMod,
+ rgbLinesSubsection,
+
+ // The size we pass here is the size of the entire byte array that we pass in.
+ int(LPBYTE(pLineCur) - rgbLinesSubsection));
+
+ if (FAILED(hr))
+ return hr;
+
+ return S_OK;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Performs final fixups on the last lines-file block we completed, specifically writing
+// in the size of the block, now that it's known. Also responsible for determining
+// whether there is even any data to write in the first place.
+//
+// Arguments:
+// * pLinesFileBlockHeader - lines-file block header to write to
+// * pLineBlockStart - First CV_Line_t * of this block
+// * pLineBlockAfterEnd - Last CV_Line_t * of this block plus 1
+//
+// Return Value:
+// * TRUE: lines-file block was nonempty, and is now finalized
+// * FALSE: lines-file block was empty, and caller should toss it out.
+//
+
+BOOL NGenMethodLinesPdbWriter::FinalizeLinesFileBlock(
+ CV_DebugSLinesFileBlockHeader_t * pLinesFileBlockHeader,
+ CV_Line_t * pLineBlockStart,
+ CV_Line_t * pLineBlockAfterEnd)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (pLinesFileBlockHeader == NULL)
+ {
+ // If a given function has no sequence points at all, pLinesFileBlockHeader can
+ // be NULL. No problem
+ return FALSE;
+ }
+
+ if (pLineBlockStart == pLineBlockAfterEnd)
+ {
+ // If we start a lines file block and then realize that there are no entries
+ // (i.e., no valid sequence points to map), then we end up with an empty block.
+ // No problem, just skip the block.
+ return FALSE;
+ }
+
+ _ASSERTE(pLineBlockStart != NULL);
+ _ASSERTE(pLineBlockAfterEnd != NULL);
+ _ASSERTE(pLineBlockAfterEnd > pLineBlockStart);
+
+ if (pLinesFileBlockHeader->offFile == (ULONG32) -1)
+ {
+ // The file offset we set for this block is invalid. This should be due to the
+ // 0xFeeFee case (i.e., sequence points that intentionally don't map back to a
+ // publicly available source code line). Fix up the offset to be valid (point it
+ // at the first file), but the offset will generally be ignored by the PDB
+ // reader.
+#ifdef _DEBUG
+ {
+ for (CV_Line_t * pLineCur = pLineBlockStart; pLineCur < pLineBlockAfterEnd; pLineCur++)
+ {
+ _ASSERTE(pLineCur->linenumStart == kUnmappedIP);
+ }
+ }
+#endif // _DEBUG
+ pLinesFileBlockHeader->offFile = 0;
+ }
+
+ // Now that we know the size of the block, finish filling out the lines file block
+ // header
+ pLinesFileBlockHeader->nLines = CV_off32_t(pLineBlockAfterEnd - pLineBlockStart);
+ pLinesFileBlockHeader->cbBlock = pLinesFileBlockHeader->nLines * sizeof(CV_Line_t);
+
+ return TRUE;
+}
+
+
+HRESULT __stdcall CreatePdb(CORINFO_ASSEMBLY_HANDLE hAssembly, BSTR pNativeImagePath, BSTR pPdbPath, BOOL pdbLines, BSTR pManagedPdbSearchPath)
+{
+ STANDARD_VM_CONTRACT;
+
+ NGenPdbWriter pdbWriter(
+ pNativeImagePath,
+ pPdbPath,
+ pdbLines ? kPDBLines : 0,
+ pManagedPdbSearchPath);
+ IfFailThrow(pdbWriter.Load());
+
+ Assembly *pAssembly = reinterpret_cast<Assembly *>(hAssembly);
+ _ASSERTE(pAssembly);
+ _ASSERTE(pNativeImagePath);
+ _ASSERTE(pPdbPath);
+
+ ModuleIterator moduleIterator = pAssembly->IterateModules();
+ Module *pModule = NULL;
+ BOOL fAtLeastOneNativeModuleFound = FALSE;
+
+ while (moduleIterator.Next())
+ {
+ pModule = moduleIterator.GetModule();
+
+ if (pModule->HasNativeImage() || pModule->IsReadyToRun())
+ {
+ IfFailThrow(pdbWriter.WritePDBDataForModule(pModule));
+ fAtLeastOneNativeModuleFound = TRUE;
+ }
+ }
+
+ if (!fAtLeastOneNativeModuleFound)
+ {
+ GetSvcLogger()->Printf(
+ pNativeImagePath);
+ return E_FAIL;
+ }
+
+ GetSvcLogger()->Printf(
+ W("Successfully generated PDB for native assembly '%s'.\n"),
+ pNativeImagePath);
+ return S_OK;
+}
+
+
+// End of PDB writing code
+// ----------------------------------------------------------------------------
+
+
+BOOL CEEPreloader::CanPrerestoreEmbedClassHandle(CORINFO_CLASS_HANDLE handle)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (IsReadyToRunCompilation())
+ return FALSE;
+
+ TypeHandle th(handle);
+
+ return m_image->CanPrerestoreEagerBindToTypeHandle(th, NULL);
+}
+
+BOOL CEEPreloader::CanPrerestoreEmbedMethodHandle(CORINFO_METHOD_HANDLE handle)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (IsReadyToRunCompilation())
+ return FALSE;
+
+ MethodDesc *pMD = (MethodDesc*) handle;
+
+ return m_image->CanPrerestoreEagerBindToMethodDesc(pMD, NULL);
+}
+
+ICorCompilePreloader * CEECompileInfo::PreloadModule(CORINFO_MODULE_HANDLE module,
+ ICorCompileDataStore *pData,
+ CorProfileData *profileData)
+{
+ STANDARD_VM_CONTRACT;
+
+ NewHolder<CEEPreloader> pPreloader(new CEEPreloader((Module *) module, pData));
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+ if (PartialNGenStressPercentage() == 0)
+ {
+ pPreloader->Preload(profileData);
+ }
+
+ COOPERATIVE_TRANSITION_END();
+
+ return pPreloader.Extract();
+}
+
+void CEECompileInfo::SetAssemblyHardBindList(
+ __in_ecount( cHardBindList )
+ LPWSTR *pHardBindList,
+ DWORD cHardBindList)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifndef FEATURE_CORECLR // hardbinding
+ GetAppDomain()->ToCompilationDomain()->SetAssemblyHardBindList(pHardBindList, cHardBindList);
+#endif
+}
+
+HRESULT CEECompileInfo::SetVerboseLevel(
+ IN VerboseLevel level)
+{
+ LIMITED_METHOD_CONTRACT;
+ HRESULT hr = S_OK;
+ g_CorCompileVerboseLevel = level;
+ return hr;
+}
+
+//
+// Preloader:
+//
+CEEPreloader::CEEPreloader(Module *pModule,
+ ICorCompileDataStore *pData)
+ : m_pData(pData)
+{
+ m_image = new DataImage(pModule, this);
+
+ CONSISTENCY_CHECK(pModule == GetAppDomain()->ToCompilationDomain()->GetTargetModule());
+
+ GetAppDomain()->ToCompilationDomain()->SetTargetImage(m_image, this);
+
+#ifdef FEATURE_FULL_NGEN
+ m_fSpeculativeTriage = FALSE;
+ m_fDictionariesPopulated = FALSE;
+#endif
+}
+
+CEEPreloader::~CEEPreloader()
+{
+ WRAPPER_NO_CONTRACT;
+ delete m_image;
+}
+
+void CEEPreloader::Preload(CorProfileData * profileData)
+{
+ STANDARD_VM_CONTRACT;
+
+ bool doNothingNgen = false;
+#ifdef _DEBUG
+ static ConfigDWORD fDoNothingNGen;
+ doNothingNgen = !!fDoNothingNGen.val(CLRConfig::INTERNAL_ZapDoNothing);
+#endif
+
+ if (!doNothingNgen)
+ {
+ m_image->GetModule()->SetProfileData(profileData);
+ m_image->GetModule()->ExpandAll(m_image);
+ }
+
+ // Triage all items created by initial expansion.
+ // We will try to accept all items created by initial expansion.
+ TriageForZap(TRUE);
+}
+
+//
+// ICorCompilerPreloader
+//
+
+DWORD CEEPreloader::MapMethodEntryPoint(CORINFO_METHOD_HANDLE handle)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc *pMD = GetMethod(handle);
+ Precode * pPrecode = pMD->GetSavedPrecode(m_image);
+
+ return m_image->GetRVA(pPrecode);
+}
+
+DWORD CEEPreloader::MapClassHandle(CORINFO_CLASS_HANDLE handle)
+{
+ STANDARD_VM_CONTRACT;
+
+ TypeHandle th = TypeHandle::FromPtr(handle);
+ if (th.IsTypeDesc())
+ return m_image->GetRVA(th.AsTypeDesc()) | 2;
+ else
+ return m_image->GetRVA(th.AsMethodTable());
+}
+
+DWORD CEEPreloader::MapMethodHandle(CORINFO_METHOD_HANDLE handle)
+{
+ STANDARD_VM_CONTRACT;
+
+ return m_image->GetRVA(handle);
+}
+
+DWORD CEEPreloader::MapFieldHandle(CORINFO_FIELD_HANDLE handle)
+{
+ STANDARD_VM_CONTRACT;
+
+ return m_image->GetRVA(handle);
+}
+
+DWORD CEEPreloader::MapAddressOfPInvokeFixup(CORINFO_METHOD_HANDLE handle)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc *pMD = GetMethod(handle);
+
+ _ASSERTE(pMD->IsNDirect());
+ NDirectWriteableData * pMDWriteableData = ((NDirectMethodDesc *)pMD)->GetWriteableData();
+
+ return m_image->GetRVA(pMDWriteableData) + offsetof(NDirectWriteableData, m_pNDirectTarget);
+}
+
+DWORD CEEPreloader::MapGenericHandle(CORINFO_GENERIC_HANDLE handle)
+{
+ STANDARD_VM_CONTRACT;
+
+ return m_image->GetRVA(handle);
+}
+
+DWORD CEEPreloader::MapModuleIDHandle(CORINFO_MODULE_HANDLE handle)
+{
+ STANDARD_VM_CONTRACT;
+
+ return m_image->GetRVA(handle) + (DWORD)Module::GetOffsetOfModuleID();
+}
+
+CORINFO_METHOD_HANDLE CEEPreloader::NextUncompiledMethod()
+{
+ STANDARD_VM_CONTRACT;
+
+ // If we have run out of methods to compile, ensure that we have code for all methods
+ // that we are about to save.
+ if (m_uncompiledMethods.GetCount() == 0)
+ {
+#ifdef FEATURE_FULL_NGEN
+ if (!m_fSpeculativeTriage)
+ {
+ // We take one shot at smarter elimination of speculative instantiations
+ // that are guaranteed to be found in other modules
+ TriageSpeculativeInstantiations();
+ m_fSpeculativeTriage = TRUE;
+ }
+#endif
+
+ if (m_uncompiledMethods.GetCount() == 0)
+ {
+#ifdef FEATURE_FULL_NGEN
+ if (!m_fDictionariesPopulated)
+ {
+ // Prepopulate dictionaries. Only the first population is done in expansive way.
+ m_image->GetModule()->PrepopulateDictionaries(m_image, FALSE);
+ m_fDictionariesPopulated = TRUE;
+ }
+ else
+#endif
+ {
+ // The subsequent populations are done in non-expansive way (won't load new types)
+ m_image->GetModule()->PrepopulateDictionaries(m_image, TRUE);
+ }
+
+ // Make sure that we have generated code for all instantiations that we are going to save
+ // The new items that we encounter here were most likely side effects of verification or failed inlining,
+ // so do not try to save them eagerly.
+ while (TriageForZap(FALSE)) {
+ // Loop as long as new types are added
+ }
+ }
+ }
+
+ // Take next uncompiled method
+ COUNT_T count = m_uncompiledMethods.GetCount();
+ if (count == 0)
+ return NULL;
+
+ MethodDesc * pMD = m_uncompiledMethods[count - 1];
+ m_uncompiledMethods.SetCount(count - 1);
+
+#ifdef _DEBUG
+ if (LoggingOn(LF_ZAP, LL_INFO10000))
+ {
+ StackSString methodString;
+ TypeString::AppendMethodDebug(methodString, pMD);
+
+ LOG((LF_ZAP, LL_INFO10000, "CEEPreloader::NextUncompiledMethod: %S\n", methodString.GetUnicode()));
+ }
+#endif // _DEBUG
+
+ return (CORINFO_METHOD_HANDLE) pMD;
+}
+
+void CEEPreloader::AddMethodToTransitiveClosureOfInstantiations(CORINFO_METHOD_HANDLE handle)
+{
+ STANDARD_VM_CONTRACT;
+
+ TriageMethodForZap(GetMethod(handle), TRUE);
+}
+
+BOOL CEEPreloader::IsMethodInTransitiveClosureOfInstantiations(CORINFO_METHOD_HANDLE handle)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc *pMD = GetMethod(handle);
+
+ return (m_acceptedMethods.Lookup(pMD) != NULL) && (m_rejectedMethods.Lookup(pMD) == NULL);
+}
+
+BOOL CEEPreloader::IsTypeInTransitiveClosureOfInstantiations(CORINFO_CLASS_HANDLE handle)
+{
+ STANDARD_VM_CONTRACT;
+
+ TypeHandle th = (TypeHandle) handle;
+
+ return (m_acceptedTypes.Lookup(th) != NULL) && (m_rejectedTypes.Lookup(th) == NULL);
+}
+
+void CEEPreloader::MethodReferencedByCompiledCode(CORINFO_METHOD_HANDLE handle)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifndef FEATURE_FULL_NGEN // Unreferenced methods
+ //
+ // Keep track of methods that are actually referenced by the code. We use this information
+ // to avoid generating code for unreferenced methods not visible outside the assembly.
+ // These methods are very unlikely to be ever used at runtime because of they only ever be
+ // called via private reflection.
+ //
+ MethodDesc *pMD = GetMethod(handle);
+
+ const CompileMethodEntry * pEntry = m_compileMethodsHash.LookupPtr(pMD);
+ if (pEntry != NULL)
+ {
+ if (pEntry->fReferenced)
+ return;
+ const_cast<CompileMethodEntry *>(pEntry)->fReferenced = true;
+
+ if (pEntry->fScheduled)
+ return;
+ m_uncompiledMethods.Append(pMD);
+ }
+ else
+ {
+ CompileMethodEntry entry;
+ entry.pMD = pMD;
+ entry.fReferenced = true;
+ entry.fScheduled = false;
+ m_compileMethodsHash.Add(entry);
+ }
+
+ if (pMD->IsWrapperStub())
+ MethodReferencedByCompiledCode((CORINFO_METHOD_HANDLE)pMD->GetWrappedMethodDesc());
+#endif // FEATURE_FULL_NGEN
+}
+
+BOOL CEEPreloader::IsUncompiledMethod(CORINFO_METHOD_HANDLE handle)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc *pMD = GetMethod(handle);
+
+#ifndef FEATURE_FULL_NGEN // Unreferenced methods
+ const CompileMethodEntry * pEntry = m_compileMethodsHash.LookupPtr(pMD);
+ return (pEntry != NULL) && (pEntry->fScheduled || !pEntry->fReferenced);
+#else
+ return m_compileMethodsHash.LookupPtr(pMD) != NULL;
+#endif
+}
+
+#ifdef MDIL
+#define ELEMENT_TYPE_NULLABLE ((CorElementType)0x17)
+#define ELEMENT_TYPE_NULLABLE_CANON ((CorElementType)0x1f)
+MethodTable *GetMethodTable(CorElementType elType)
+{
+ switch (elType)
+ {
+ default:
+ assert(!"unexpected element type");
+ return g_pCanonMethodTableClass;
+
+ case ELEMENT_TYPE_BOOLEAN:
+ case ELEMENT_TYPE_CHAR:
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_R4:
+ case ELEMENT_TYPE_R8:
+ return MscorlibBinder::LoadPrimitiveType(elType);
+
+ case ELEMENT_TYPE_VALUETYPE:
+ return MscorlibBinder::GetClass(CLASS__DECIMAL);
+
+ case ELEMENT_TYPE_CLASS:
+ return g_pCanonMethodTableClass;
+
+ case ELEMENT_TYPE_NULLABLE:
+ {
+ MethodTable *pInstMT = NULL;
+ EX_TRY
+ {
+ TypeHandle int32TH = GetMethodTable(ELEMENT_TYPE_I4);
+ pInstMT = ClassLoader::LoadGenericInstantiationThrowing(g_pNullableClass->GetModule(),
+ g_pNullableClass->GetCl(),
+ Instantiation(&int32TH, 1),
+ ClassLoader::LoadTypes
+ ).GetMethodTable();
+ }
+ EX_CATCH
+ {
+ // do nothing
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ return pInstMT;
+ }
+
+ case ELEMENT_TYPE_NULLABLE_CANON:
+ {
+ MethodTable *pInstMT = NULL;
+ EX_TRY
+ {
+ TypeHandle canonTH = g_pCanonMethodTableClass;
+ pInstMT = ClassLoader::LoadGenericInstantiationThrowing(g_pNullableClass->GetModule(),
+ g_pNullableClass->GetCl(),
+ Instantiation(&canonTH, 1),
+ ClassLoader::LoadTypes
+ ).GetMethodTable();
+ }
+ EX_CATCH
+ {
+ // do nothing
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ return pInstMT;
+ }
+ }
+}
+
+void CEEPreloader::AddMDILCodeFlavorsToUncompiledMethods(CORINFO_METHOD_HANDLE handle)
+{
+ STANDARD_VM_CONTRACT;
+#if 0
+ static CorElementType popularFlavors[] =
+ {
+ ELEMENT_TYPE_CLASS,
+ ELEMENT_TYPE_VALUETYPE,
+ ELEMENT_TYPE_I4,
+// ELEMENT_TYPE_NULLABLE,
+ ELEMENT_TYPE_R8,
+ ELEMENT_TYPE_U1,
+ ELEMENT_TYPE_BOOLEAN,
+ ELEMENT_TYPE_U2,
+ ELEMENT_TYPE_I8,
+ ELEMENT_TYPE_U4,
+ ELEMENT_TYPE_U8,
+// ELEMENT_TYPE_NULLABLE_CANON,
+ };
+
+ MethodDesc *pMD = GetMethod(handle);
+ MethodTable *pMT = pMD->GetMethodTable();
+
+ DWORD nGenericClassArgs = pMT->GetNumGenericArgs();
+ DWORD nGenericMethodArgs = pMD->GetNumGenericMethodArgs();
+ DWORD nGenericArgs = nGenericClassArgs + nGenericMethodArgs;
+ if (nGenericArgs == 0)
+ return;
+ DWORD flavorCount = nGenericArgs <= 1 ? COUNTOF(popularFlavors) : COUNTOF(popularFlavors)*COUNTOF(popularFlavors);
+ DWORD flavorsPerArg = nGenericArgs <= 1 ? COUNTOF(popularFlavors) : COUNTOF(popularFlavors);
+
+ for (DWORD flavor = 1; flavor < flavorCount; flavor++)
+ {
+ // First instantiate the declaring type at <...,...,...>
+
+ DWORD dwAllocSize = 0;
+ if (!ClrSafeInt<DWORD>::multiply(sizeof(TypeHandle), nGenericClassArgs, dwAllocSize))
+ ThrowHR(COR_E_OVERFLOW);
+
+ CQuickBytes qbGenericClassArgs;
+ TypeHandle* pGenericClassArgs = reinterpret_cast<TypeHandle*>(qbGenericClassArgs.AllocThrows(dwAllocSize));
+
+ DWORD remainder = flavor;
+
+ for (DWORD i = 0; i < nGenericClassArgs; i++)
+ {
+ pGenericClassArgs[i] = GetMethodTable(popularFlavors[remainder % flavorsPerArg]);
+ remainder /= flavorsPerArg;
+ }
+
+ MethodTable *pInstMT = NULL;
+
+ EX_TRY
+ {
+ pInstMT = ClassLoader::LoadGenericInstantiationThrowing(pMT->GetModule(),
+ pMT->GetCl(),
+ Instantiation(pGenericClassArgs, nGenericClassArgs),
+ ClassLoader::LoadTypes
+ ).GetMethodTable();
+ }
+ EX_CATCH
+ {
+ // do nothing
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ if (pInstMT == NULL)
+ {
+ continue;
+ }
+
+ // Now instantiate the method at <__Canon,...,__Canon>, creating the shared code.
+ // This will not create an instantiating stub just yet.
+ CQuickBytes qbGenericMethodArgs;
+ TypeHandle *genericMethodArgs = NULL;
+
+ // The rest of this method instantiates a generic method
+ // Instantiate at "__Canon" if a NULL "genericMethodArgs" is given
+ if (nGenericMethodArgs)
+ {
+ dwAllocSize = 0;
+ if (!ClrSafeInt<DWORD>::multiply(sizeof(TypeHandle), nGenericMethodArgs, dwAllocSize))
+ ThrowHR(COR_E_OVERFLOW);
+
+ genericMethodArgs = reinterpret_cast<TypeHandle*>(qbGenericMethodArgs.AllocThrows(dwAllocSize));
+
+
+ for (DWORD i =0; i < nGenericMethodArgs; i++)
+ {
+ genericMethodArgs[i] = GetMethodTable(popularFlavors[remainder % flavorsPerArg]);
+ remainder /= flavorsPerArg;
+ }
+ }
+
+ MethodDesc *pInstMD = NULL;
+ EX_TRY
+ {
+ pInstMD = MethodDesc::FindOrCreateAssociatedMethodDesc( pMD,
+ pMT,
+ FALSE, /* don't get unboxing entry point */
+ Instantiation(genericMethodArgs, nGenericMethodArgs),
+ TRUE,
+ FALSE,
+ TRUE);
+ }
+ EX_CATCH
+ {
+ // do nothing
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ if (pInstMD != NULL)
+ AddToUncompiledMethods(pInstMD);
+ }
+#endif
+}
+#endif
+
+static bool IsTypeAccessibleOutsideItsAssembly(TypeHandle th)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (th.IsTypeDesc())
+ {
+ if (th.AsTypeDesc()->HasTypeParam())
+ return IsTypeAccessibleOutsideItsAssembly(th.AsTypeDesc()->GetTypeParam());
+
+ return true;
+ }
+
+ MethodTable * pMT = th.AsMethodTable();
+
+ if (pMT == g_pCanonMethodTableClass)
+ return true;
+
+ switch (pMT->GetClass()->GetProtection())
+ {
+ case tdPublic:
+ break;
+ case tdNestedPublic:
+ case tdNestedFamily:
+ case tdNestedFamORAssem:
+ {
+ MethodTable * pMTEnclosing = pMT->LoadEnclosingMethodTable();
+ if (pMTEnclosing == NULL)
+ return false;
+ if (!IsTypeAccessibleOutsideItsAssembly(pMTEnclosing))
+ return false;
+ }
+ break;
+
+ default:
+ return false;
+ }
+
+ if (pMT->HasInstantiation())
+ {
+ Instantiation instantiation = pMT->GetInstantiation();
+ for (DWORD i = 0; i < instantiation.GetNumArgs(); i++)
+ {
+ if (!IsTypeAccessibleOutsideItsAssembly(instantiation[i]))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool IsMethodAccessibleOutsideItsAssembly(MethodDesc * pMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Note that this ignores unrestricted friend access. This friend access allowed attribute can be used to
+ // prevent methods from getting trimmed if necessary.
+ if (pMD->GetMDImport()->GetCustomAttributeByName(pMD->GetMemberDef(), FRIEND_ACCESS_ALLOWED_ATTRIBUTE_TYPE, NULL, NULL) == S_OK)
+ return true;
+
+ switch (pMD->GetAttrs() & mdMemberAccessMask)
+ {
+ case mdFamily:
+ case mdFamORAssem:
+ case mdPublic:
+ break;
+
+ default:
+ return false;
+ }
+
+ if (!IsTypeAccessibleOutsideItsAssembly(pMD->GetMethodTable()))
+ return false;
+
+ if (pMD->HasMethodInstantiation())
+ {
+ Instantiation instantiation = pMD->GetMethodInstantiation();
+ for (DWORD i = 0; i < instantiation.GetNumArgs(); i++)
+ {
+ if (!IsTypeAccessibleOutsideItsAssembly(instantiation[i]))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool IsMethodCallableOutsideItsAssembly(MethodDesc * pMD)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef MDIL
+ // Tracking of referenced methods does not kick in during MDIL compilation, so disable this optimization.
+ if (GetAppDomain()->IsMDILCompilationDomain())
+ return true;
+#endif
+
+ // Virtual methods can be called via interfaces, etc. We would need to do
+ // more analysis to trim them. For now, assume that they can be referenced outside this assembly.
+ if (pMD->IsVirtual())
+ return true;
+
+ // Class constructors are often used with reflection. Always generate code for them.
+ if (pMD->IsClassConstructorOrCtor())
+ return true;
+
+ if (IsMethodAccessibleOutsideItsAssembly(pMD))
+ return true;
+
+ return false;
+}
+
+BOOL IsGenericTooDeeplyNested(TypeHandle t);
+void CEEPreloader::AddToUncompiledMethods(MethodDesc *pMD, BOOL fForStubs)
+{
+ STANDARD_VM_CONTRACT;
+
+ // TriageTypeForZap() and TriageMethodForZap() should ensure this.
+ _ASSERTE(m_image->GetModule() == pMD->GetLoaderModule());
+
+ if (!fForStubs)
+ {
+ if (!pMD->IsIL())
+ return;
+
+ if (!pMD->MayHaveNativeCode() && !pMD->IsWrapperStub())
+ return;
+ }
+
+#ifdef MDIL
+ BOOL fIsMinimalMDIL = GetAppDomain()->IsMinimalMDILCompilationDomain();
+ if (fIsMinimalMDIL && (fForStubs || !pMD->HasClassOrMethodInstantiation()))
+ return;
+#endif // MDIL
+
+ // If it's already been compiled, don't add it to the set of uncompiled methods
+ if (m_image->GetCodeAddress(pMD) != NULL)
+ return;
+
+ // If it's already in the queue to be compiled don't add it again
+ const CompileMethodEntry * pEntry = m_compileMethodsHash.LookupPtr(pMD);
+
+#ifndef FEATURE_FULL_NGEN // Unreferenced methods
+ if (pEntry != NULL)
+ {
+ if (pEntry->fScheduled)
+ return;
+
+ if (!pEntry->fReferenced)
+ return;
+
+ const_cast<CompileMethodEntry *>(pEntry)->fScheduled = true;
+ }
+ else
+ {
+ // The unreferenced methods optimization works for generic methods and methods on generic types only.
+ // Non-generic methods take different path.
+ //
+ // It unclear whether it is worth it to enable it for non-generic methods too. The benefit
+ // for non-generic methods is small, and the non-generic methods are more likely to be called
+ // via private reflection.
+
+ bool fSchedule = fForStubs || IsMethodCallableOutsideItsAssembly(pMD);
+
+ CompileMethodEntry entry;
+ entry.pMD = pMD;
+ entry.fScheduled = fSchedule;
+ entry.fReferenced = false;
+ m_compileMethodsHash.Add(entry);
+
+ if (!fSchedule)
+ return;
+ }
+#else // // FEATURE_FULL_NGEN
+ // Schedule the method for compilation
+ if (pEntry != NULL)
+ return;
+ CompileMethodEntry entry;
+ entry.pMD = pMD;
+ m_compileMethodsHash.Add(entry);
+#endif // FEATURE_FULL_NGEN
+
+ if (pMD->HasMethodInstantiation())
+ {
+ Instantiation instantiation = pMD->GetMethodInstantiation();
+ for (DWORD i = 0; i < instantiation.GetNumArgs(); i++)
+ {
+ if (IsGenericTooDeeplyNested(instantiation[i]))
+ return;
+ }
+ }
+
+ // Add it to the set of uncompiled methods
+ m_uncompiledMethods.Append(pMD);
+}
+
+//
+// Used to validate instantiations produced by the production rules before we actually try to instantiate them.
+//
+static BOOL CanSatisfyConstraints(Instantiation typicalInst, Instantiation candidateInst)
+{
+ STANDARD_VM_CONTRACT;
+
+ // The dependency must be of the form C<T> --> D<T>
+ _ASSERTE(typicalInst.GetNumArgs() == candidateInst.GetNumArgs());
+ if (typicalInst.GetNumArgs() != candidateInst.GetNumArgs())
+ return FALSE;
+
+ SigTypeContext typeContext(candidateInst, Instantiation());
+
+ for (DWORD i = 0; i < candidateInst.GetNumArgs(); i++)
+ {
+ TypeHandle thArg = candidateInst[i];
+
+ // If this is "__Canon" and we are code sharing then we can't rule out that some
+ // compatible instantiation may meet the constraints
+ if (thArg == TypeHandle(g_pCanonMethodTableClass))
+ continue;
+
+ // Otherwise we approximate, and just assume that we have "parametric" constraints
+ // of the form "T : IComparable<T>" rather than "odd" constraints such as "T : IComparable<string>".
+ // That is, we assume checking the constraint at the canonical type is sufficient
+ // to tell us if the constraint holds for all compatible types.
+ //
+ // For example of where this does not hold, consider if
+ // class C<T>
+ // class D<T> where T : IComparable<T>
+ // struct Struct<T> : IComparable<string>
+ // Assume we generate C<Struct<object>>. Now the constraint
+ // Struct<object> : IComparable<object>
+ // does not hold, so we do not generate the instantiation, even though strictly speaking
+ // the compatible instantiation C<Struct<string>> will satisfy the constraint
+ // Struct<string> : IComparable<string>
+
+ TypeVarTypeDesc* tyvar = typicalInst[i].AsGenericVariable();
+
+ tyvar->LoadConstraints();
+
+ if (!tyvar->SatisfiesConstraints(&typeContext,thArg)) {
+#ifdef _DEBUG
+ /*
+ // In case we want to know which illegal instantiations we ngen'ed
+ StackSString candidateInstName;
+ StackScratchBuffer buffer;
+ thArg.GetName(candidateInstName);
+ char output[1024];
+ sprintf(output, "Generics TypeDependencyAttribute processing: Couldn't satisfy a constraint. Class with Attribute: %s Bad candidate instantiated type: %s\r\n", pMT->GetDebugClassName(), candidateInstName.GetANSI(buffer));
+ OutputDebugStringA(output);
+ */
+#endif
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+
+//
+// This method has duplicated logic from bcl\system\collections\generic\comparer.cs
+//
+static void SpecializeComparer(SString& ss, Instantiation& inst)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (inst.GetNumArgs() != 1) {
+ _ASSERTE(!"Improper use of a TypeDependencyAttribute for Comparer");
+ return;
+ }
+
+ TypeHandle elemTypeHnd = inst[0];
+
+ //
+ // Override the default ObjectComparer for special cases
+ //
+ if (elemTypeHnd.CanCastTo(
+ TypeHandle(MscorlibBinder::GetClass(CLASS__ICOMPARABLEGENERIC)).Instantiate(Instantiation(&elemTypeHnd, 1))))
+ {
+ ss.Set(W("System.Collections.Generic.GenericComparer`1"));
+ return;
+ }
+
+ if (Nullable::IsNullableType(elemTypeHnd))
+ {
+ Instantiation nullableInst = elemTypeHnd.AsMethodTable()->GetInstantiation();
+ if (nullableInst[0].CanCastTo(
+ TypeHandle(MscorlibBinder::GetClass(CLASS__ICOMPARABLEGENERIC)).Instantiate(nullableInst)))
+ {
+ ss.Set(W("System.Collections.Generic.NullableComparer`1"));
+ inst = nullableInst;
+ return;
+ }
+ }
+}
+
+//
+// This method has duplicated logic from bcl\system\collections\generic\equalitycomparer.cs
+//
+static void SpecializeEqualityComparer(SString& ss, Instantiation& inst)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (inst.GetNumArgs() != 1) {
+ _ASSERTE(!"Improper use of a TypeDependencyAttribute for EqualityComparer");
+ return;
+ }
+
+ TypeHandle elemTypeHnd = inst[0];
+
+ //
+ // Override the default ObjectEqualityComparer for special cases
+ //
+ if (elemTypeHnd.CanCastTo(
+ TypeHandle(MscorlibBinder::GetClass(CLASS__IEQUATABLEGENERIC)).Instantiate(Instantiation(&elemTypeHnd, 1))))
+ {
+ ss.Set(W("System.Collections.Generic.GenericEqualityComparer`1"));
+ return;
+ }
+
+ if (Nullable::IsNullableType(elemTypeHnd))
+ {
+ Instantiation nullableInst = elemTypeHnd.AsMethodTable()->GetInstantiation();
+ if (nullableInst[0].CanCastTo(
+ TypeHandle(MscorlibBinder::GetClass(CLASS__IEQUATABLEGENERIC)).Instantiate(nullableInst)))
+ {
+ ss.Set(W("System.Collections.Generic.NullableEqualityComparer`1"));
+ inst = nullableInst;
+ return;
+ }
+ }
+
+ if (elemTypeHnd.IsEnum())
+ {
+ // Note: We have different comparers for Short and SByte because for those types we need to make sure we call GetHashCode on the actual underlying type as the
+ // implementation of GetHashCode is more complex than for the other types.
+ CorElementType et = elemTypeHnd.GetVerifierCorElementType();
+ if (et == ELEMENT_TYPE_I4 ||
+ et == ELEMENT_TYPE_U4 ||
+ et == ELEMENT_TYPE_U2 ||
+ et == ELEMENT_TYPE_U1)
+ {
+ ss.Set(W("System.Collections.Generic.EnumEqualityComparer`1"));
+ return;
+ }
+ else if (et == ELEMENT_TYPE_I2)
+ {
+ ss.Set(W("System.Collections.Generic.ShortEnumEqualityComparer`1"));
+ return;
+ }
+ else if (et == ELEMENT_TYPE_I1)
+ {
+ ss.Set(W("System.Collections.Generic.SByteEnumEqualityComparer`1"));
+ return;
+ }
+ else if (et == ELEMENT_TYPE_I8 ||
+ et == ELEMENT_TYPE_U8)
+ {
+ ss.Set(W("System.Collections.Generic.LongEnumEqualityComparer`1"));
+ return;
+ }
+ }
+}
+
+#ifdef FEATURE_COMINTEROP
+// Instantiation of WinRT types defined in non-WinRT module. This check is required to generate marshaling stubs for
+// instantiations of shadow WinRT types like EventHandler<ITracingStatusChangedEventArgs> in mscorlib.
+static BOOL IsInstantationOfShadowWinRTType(MethodTable * pMT)
+{
+ STANDARD_VM_CONTRACT;
+
+ Instantiation inst = pMT->GetInstantiation();
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ TypeHandle th = inst[i];
+ if (th.IsProjectedFromWinRT() && !th.GetModule()->IsWindowsRuntimeModule())
+ return TRUE;
+ }
+ return FALSE;
+}
+#endif
+
+void CEEPreloader::ApplyTypeDependencyProductionsForType(TypeHandle t)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Only actual types
+ if (t.IsTypeDesc())
+ return;
+
+ MethodTable * pMT = t.AsMethodTable();
+
+ if (!pMT->HasInstantiation() || pMT->ContainsGenericVariables())
+ return;
+
+#ifdef FEATURE_COMINTEROP
+ // At run-time, generic redirected interfaces and delegates need matching instantiations
+ // of other types/methods in order to be marshaled across the interop boundary.
+ if (m_image->GetModule()->IsWindowsRuntimeModule() || IsInstantationOfShadowWinRTType(pMT))
+ {
+ // We only apply WinRT dependencies when compiling .winmd assemblies since redirected
+ // types are heavily used in non-WinRT code as well and would bloat native images.
+ if (pMT->IsLegalNonArrayWinRTType())
+ {
+ TypeHandle thWinRT;
+ WinMDAdapter::RedirectedTypeIndex index;
+ if (WinRTInterfaceRedirector::ResolveRedirectedInterface(pMT, &index))
+ {
+ // redirected interface needs the mscorlib-local definition of the corresponding WinRT type
+ MethodTable *pWinRTMT = WinRTInterfaceRedirector::GetWinRTTypeForRedirectedInterfaceIndex(index);
+ thWinRT = TypeHandle(pWinRTMT);
+
+ // and matching stub methods
+ WORD wNumSlots = pWinRTMT->GetNumVirtuals();
+ for (WORD i = 0; i < wNumSlots; i++)
+ {
+ MethodDesc *pAdapterMD = WinRTInterfaceRedirector::GetStubMethodForRedirectedInterface(
+ index,
+ i,
+ TypeHandle::Interop_NativeToManaged,
+ FALSE,
+ pMT->GetInstantiation());
+
+ TriageMethodForZap(pAdapterMD, TRUE);
+ }
+ }
+ if (WinRTDelegateRedirector::ResolveRedirectedDelegate(pMT, &index))
+ {
+ // redirected delegate needs the mscorlib-local definition of the corresponding WinRT type
+ thWinRT = TypeHandle(WinRTDelegateRedirector::GetWinRTTypeForRedirectedDelegateIndex(index));
+ }
+
+ if (!thWinRT.IsNull())
+ {
+ thWinRT = thWinRT.Instantiate(pMT->GetInstantiation());
+ TriageTypeForZap(thWinRT, TRUE);
+ }
+ }
+ }
+#endif // FEATURE_COMINTEROP
+
+ pMT = pMT->GetCanonicalMethodTable();
+
+ // The TypeDependencyAttribute attribute is currently only allowed on mscorlib types
+ // Don't even look for the attribute on types in other assemblies.
+ if(!pMT->GetModule()->IsSystem()) {
+ return;
+ }
+
+ // Part 1. - check for an NGEN production rule specified by a use of CompilerServices.TypeDependencyAttribute
+ // e.g. C<T> --> D<T>
+ //
+ // For example, if C<int> is generated then we produce D<int>.
+ //
+ // Normally NGEN can detect such productions through the process of compilation, but there are some
+ // legitimate uses of reflection to generate generic instantiations which NGEN cannot detect.
+ // In particular typically D<T> will have more constraints than C<T>, e.g.
+ // class D<T> where T : IComparable<T>
+ // Uses of dynamic constraints are an example - consider making a Comparer<T>, where we can have a
+ // FastComparer<T> where T : IComparable<T>, and the "slow" version checks for the non-generic
+ // IComparer interface.
+ // Also, T[] : IList<T>, IReadOnlyList<T>, and both of those interfaces should have a type dependency on SZArrayHelper's generic methods.
+ //
+ IMDInternalImport *pImport = pMT->GetMDImport();
+ HRESULT hr;
+
+ _ASSERTE(pImport);
+ //walk all of the TypeDependencyAttributes
+ MDEnumHolder hEnum(pImport);
+ hr = pImport->EnumCustomAttributeByNameInit(pMT->GetCl(),
+ g_CompilerServicesTypeDependencyAttribute, &hEnum);
+ if (SUCCEEDED(hr))
+ {
+ mdCustomAttribute tkAttribute;
+ const BYTE *pbAttr;
+ ULONG cbAttr;
+
+ while (pImport->EnumNext(&hEnum, &tkAttribute))
+ {
+ //get attribute and validate format
+ if (FAILED(pImport->GetCustomAttributeAsBlob(
+ tkAttribute,
+ reinterpret_cast<const void **>(&pbAttr),
+ &cbAttr)))
+ {
+ continue;
+ }
+
+ CustomAttributeParser cap(pbAttr, cbAttr);
+ if (FAILED(cap.SkipProlog()))
+ continue;
+
+ LPCUTF8 szString;
+ ULONG cbString;
+ if (FAILED(cap.GetNonNullString(&szString, &cbString)))
+ continue;
+
+ StackSString ss(SString::Utf8, szString, cbString);
+ Instantiation inst = pMT->GetInstantiation();
+
+#ifndef FEATURE_FULL_NGEN
+ // Do not expand non-canonical instantiations. They are not that expensive to create at runtime
+ // using code:ClassLoader::CreateTypeHandleForNonCanonicalGenericInstantiation if necessary.
+ if (!ClassLoader::IsCanonicalGenericInstantiation(inst))
+ continue;
+#endif
+
+ if (ss.Equals(W("System.Collections.Generic.ObjectComparer`1")))
+ {
+ SpecializeComparer(ss, inst);
+ }
+ else
+ if (ss.Equals(W("System.Collections.Generic.ObjectEqualityComparer`1")))
+ {
+ SpecializeEqualityComparer(ss, inst);
+ }
+
+ // Try to load the class using its name as a fully qualified name. If that fails,
+ // then we try to load it in the assembly of the current class.
+ TypeHandle typicalDepTH = TypeName::GetTypeUsingCASearchRules(ss.GetUnicode(), pMT->GetAssembly());
+
+ _ASSERTE(!typicalDepTH.IsNull());
+ // This attribute is currently only allowed to refer to mscorlib types
+ _ASSERTE(typicalDepTH.GetModule()->IsSystem());
+ if (!typicalDepTH.GetModule()->IsSystem())
+ continue;
+
+ // For IList<T>, ICollection<T>, IEnumerable<T>, IReadOnlyCollection<T> & IReadOnlyList<T>, include SZArrayHelper's
+ // generic methods (or at least the relevant ones) in the ngen image in
+ // case someone casts a T[] to an IList<T> (or ICollection<T> or IEnumerable<T>, etc).
+ if (MscorlibBinder::IsClass(typicalDepTH.AsMethodTable(), CLASS__SZARRAYHELPER))
+ {
+#ifdef FEATURE_FULL_NGEN
+ if (pMT->GetNumGenericArgs() != 1 || !pMT->IsInterface()) {
+ _ASSERTE(!"Improper use of a TypeDependencyAttribute for SZArrayHelper");
+ continue;
+ }
+ TypeHandle elemTypeHnd = pMT->GetInstantiation()[0];
+ if (elemTypeHnd.IsValueType())
+ ApplyTypeDependencyForSZArrayHelper(pMT, elemTypeHnd);
+#endif
+ continue;
+ }
+
+ _ASSERTE(typicalDepTH.IsTypicalTypeDefinition());
+ if (!typicalDepTH.IsTypicalTypeDefinition())
+ continue;
+
+ // It certainly can't be immediately recursive...
+ _ASSERTE(!typicalDepTH.GetMethodTable()->HasSameTypeDefAs(pMT));
+
+ // We want to rule out some cases where we know for sure that the generated type
+ // won't satisfy its constraints. However, some generated types may represent
+ // canonicals in sets of shared instantaitions,
+
+ if (CanSatisfyConstraints(typicalDepTH.GetInstantiation(), inst))
+ {
+ TypeHandle instDepTH =
+ ClassLoader::LoadGenericInstantiationThrowing(typicalDepTH.GetModule(), typicalDepTH.GetCl(), inst);
+
+ _ASSERTE(!instDepTH.ContainsGenericVariables());
+ _ASSERTE(instDepTH.GetNumGenericArgs() == typicalDepTH.GetNumGenericArgs());
+ _ASSERTE(instDepTH.GetMethodTable()->HasSameTypeDefAs(typicalDepTH.GetMethodTable()));
+
+ // OK, add the generated type to the dependency set
+ TriageTypeForZap(instDepTH, TRUE);
+ }
+ }
+ }
+} // CEEPreloader::ApplyTypeDependencyProductionsForType
+
+
+// Given IEnumerable<Foo>, we want to add System.SZArrayHelper.GetEnumerator<Foo>
+// to the ngen image. This way we can cast a T[] to an IList<T> and
+// use methods on it (from SZArrayHelper) without pulling in the JIT.
+// Do the same for ICollection<T>/IReadOnlyCollection<T> and
+// IList<T>/IReadOnlyList<T>, but only add the relevant methods
+// from those interfaces.
+void CEEPreloader::ApplyTypeDependencyForSZArrayHelper(MethodTable * pInterfaceMT, TypeHandle elemTypeHnd)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(elemTypeHnd.AsMethodTable()->IsValueType());
+
+ // We expect this to only be called for IList<T>/IReadOnlyList<T>, ICollection<T>/IReadOnlyCollection<T>, IEnumerable<T>.
+ _ASSERTE(pInterfaceMT->IsInterface());
+ _ASSERTE(pInterfaceMT->GetNumGenericArgs() == 1);
+
+ // This is the list of methods that don't throw exceptions on SZArrayHelper.
+ static const BinderMethodID SZArrayHelperMethodIDs[] = {
+ // Read-only methods that are present on both regular and read-only interfaces.
+ METHOD__SZARRAYHELPER__GETENUMERATOR,
+ METHOD__SZARRAYHELPER__GET_COUNT,
+ METHOD__SZARRAYHELPER__GET_ITEM,
+ // The rest of the methods is present on regular interfaces only.
+ METHOD__SZARRAYHELPER__SET_ITEM,
+ METHOD__SZARRAYHELPER__COPYTO,
+ METHOD__SZARRAYHELPER__INDEXOF,
+ METHOD__SZARRAYHELPER__CONTAINS };
+
+ static const int cReadOnlyMethods = 3;
+ static const int cAllMethods = 7;
+
+ static const BinderMethodID LastMethodOnGenericArrayInterfaces[] = {
+ METHOD__SZARRAYHELPER__GETENUMERATOR, // Last method of IEnumerable<T>
+ METHOD__SZARRAYHELPER__REMOVE, // Last method of ICollection<T>.
+ METHOD__SZARRAYHELPER__REMOVEAT, // Last method of IList<T>
+ };
+
+ // Assuming the binder ID's are properly laid out in mscorlib.h
+#if _DEBUG
+ for(unsigned int i=0; i < NumItems(LastMethodOnGenericArrayInterfaces) - 1; i++) {
+ _ASSERTE(LastMethodOnGenericArrayInterfaces[i] < LastMethodOnGenericArrayInterfaces[i+1]);
+ }
+#endif
+
+ MethodTable* pExactMT = MscorlibBinder::GetClass(CLASS__SZARRAYHELPER);
+
+ // Subtract one from the non-generic IEnumerable that the generic IEnumerable<T>
+ // inherits from.
+ unsigned inheritanceDepth = pInterfaceMT->GetNumInterfaces() - 1;
+ PREFIX_ASSUME(0 <= inheritanceDepth && inheritanceDepth < NumItems(LastMethodOnGenericArrayInterfaces));
+
+ // Read-only interfaces happen to always have one method
+ bool fIsReadOnly = pInterfaceMT->GetNumVirtuals() == 1;
+
+ for(int i=0; i < (fIsReadOnly ? cReadOnlyMethods : cAllMethods); i++)
+ {
+ // Check whether the method applies for this type.
+ if (SZArrayHelperMethodIDs[i] > LastMethodOnGenericArrayInterfaces[inheritanceDepth])
+ continue;
+
+ MethodDesc * pPrimaryMD = MscorlibBinder::GetMethod(SZArrayHelperMethodIDs[i]);
+
+ MethodDesc * pInstantiatedMD = MethodDesc::FindOrCreateAssociatedMethodDesc(pPrimaryMD,
+ pExactMT, false, Instantiation(&elemTypeHnd, 1), false);
+
+ TriageMethodForZap(pInstantiatedMD, true);
+ }
+}
+
+
+void CEEPreloader::AddTypeToTransitiveClosureOfInstantiations(CORINFO_CLASS_HANDLE handle)
+{
+ STANDARD_VM_CONTRACT;
+
+ TriageTypeForZap((TypeHandle) handle, TRUE);
+}
+
+const unsigned MAX_ZAP_INSTANTIATION_NESTING = 10;
+
+BOOL IsGenericTooDeeplyNested(TypeHandle t)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ //if this type is more than N levels nested deep, do not add it to the
+ //closure. Build a queue for a DFS of the depth of instantiation.
+
+ //the current index in the queue we're visiting
+ int currentQueueIdx; //use -1 to indicate that we're done.
+ //the current generic arg type.
+ TypeHandle currentVisitingType[MAX_ZAP_INSTANTIATION_NESTING];
+
+ //the ordinal in the GetInstantiation for the current type (over [0,
+ //GetNumGenericArg())
+ unsigned currentGenericArgEdge[MAX_ZAP_INSTANTIATION_NESTING];
+
+ //initialize the DFS.
+ memset(currentGenericArgEdge, 0, sizeof(currentGenericArgEdge));
+ currentVisitingType[0] = t;
+ currentQueueIdx = 0;
+
+ while( currentQueueIdx >= 0 )
+ {
+ //see if we're done with this node
+ if( currentVisitingType[currentQueueIdx].GetNumGenericArgs()
+ <= currentGenericArgEdge[currentQueueIdx] )
+ {
+ --currentQueueIdx;
+ }
+ else
+ {
+ //more edges to visit. So visit one edge
+ _ASSERTE(currentGenericArgEdge[currentQueueIdx] < currentVisitingType[currentQueueIdx].GetNumGenericArgs());
+ TypeHandle current = currentVisitingType[currentQueueIdx].GetInstantiation()[currentGenericArgEdge[currentQueueIdx]];
+ ++currentGenericArgEdge[currentQueueIdx];
+ //only value types cause a problem because of "approximate" type
+ //loading, so only worry about scanning value type arguments.
+ if( current.HasInstantiation() && current.IsValueType() )
+ {
+ //new edge. Make sure there is space in the queue.
+ if( (currentQueueIdx + 1) >= (int)NumItems(currentGenericArgEdge) )
+ {
+ //exceeded the allowable depth. Stop processing.
+ return TRUE;
+ }
+ else
+ {
+ ++currentQueueIdx;
+ currentGenericArgEdge[currentQueueIdx] = 0;
+ currentVisitingType[currentQueueIdx] = current;
+ }
+ }
+ }
+ }
+
+ return FALSE;
+}
+
+void CEEPreloader::TriageTypeForZap(TypeHandle th, BOOL fAcceptIfNotSure, BOOL fExpandDependencies)
+{
+ STANDARD_VM_CONTRACT;
+
+ // We care about param types only
+ if (th.IsTypicalTypeDefinition() && !th.IsTypeDesc())
+ return;
+
+ // We care about types from our module only
+ if (m_image->GetModule() != th.GetLoaderModule())
+ return;
+
+ // Check if we have decided to accept this type already.
+ if (m_acceptedTypes.Lookup(th) != NULL)
+ return;
+
+ // Check if we have decided to reject this type already.
+ if (m_rejectedTypes.Lookup(th) != NULL)
+ return;
+
+ enum { Investigate, Accepted, Rejected } triage = Investigate;
+
+ const char * rejectReason = NULL;
+
+ // TypeVarTypeDesc are saved via links from code:Module::m_GenericParamToDescMap
+ if (th.IsGenericVariable())
+ {
+ triage = Rejected;
+ rejectReason = "type is a Generic variable";
+ goto Done;
+ }
+
+ /* Consider this example:
+
+ class A<T> {}
+ class B<U> : A<U> {}
+
+ class C<V> : B<V>
+ {
+ void foo<W>()
+ {
+ typeof(C<W>);
+ typeof(B<A<W>>);
+
+ typeof(List<V>);
+ }
+ }
+
+ The open instantiations can be divided into the following 3 categories:
+
+ 1. A<T>, B<U>, A<U>, C<V>, B<V>, A<V> are open instantiations involving
+ ELEMENT_TYPE_VARs that need to be saved in the ngen image.
+ 2. List<V> is an instantiations that also involves ELEMENT_TYPE_VARs.
+ However, it need not be saved since it will only be needed during the
+ verification of foo<W>().
+ 3. C<W>, A<W>, B<A<W>> are open instantiations involving ELEMENT_TYPE_MVARs
+ that need not be saved since they will only be needed during the
+ verification of foo<W>().
+
+ Distinguishing between 1 and 2 requires walking C<V> and determining
+ which ones are field/parent/interface types required by c<V>. However,
+ category 3 is easy to detect, and can easily be pruned out. Hence,
+ we pass in methodTypeVarsOnly=TRUE here.
+ */
+ if (th.ContainsGenericVariables(TRUE/*methodTypeVarsOnly*/))
+ {
+ triage = Rejected;
+ rejectReason = "type contains method generic variables";
+ goto Done;
+ }
+
+ // Filter out weird cases we do not care about.
+ if (!m_image->GetModule()->GetAvailableParamTypes()->ContainsValue(th))
+ {
+ triage = Rejected;
+ rejectReason = "type is not in the current module";
+ return;
+ }
+
+ // Reject invalid generic instantiations. They will not be fully loaded
+ // as they will throw a TypeLoadException before they reach CLASS_LOAD_LEVEL_FINAL.
+ if (!th.IsFullyLoaded())
+ {
+ // This may load new types. May load new types.
+ ClassLoader::TryEnsureLoaded(th);
+
+ if (!th.IsFullyLoaded())
+ {
+ triage = Rejected;
+ rejectReason = "type could not be fully loaded, possibly because it does not satisfy its constraints";
+ goto Done;
+ }
+ }
+
+ // Do not save any types containing generic class parameters from another module
+ Module *pOpenModule;
+ pOpenModule = th.GetDefiningModuleForOpenType();
+ if (pOpenModule != NULL && pOpenModule != m_image->GetModule())
+ {
+ triage = Rejected;
+ rejectReason = "type contains generic variables from another module";
+ goto Done;
+ }
+
+ // Always store items in their preferred zap module even if we are not sure
+ if (Module::GetPreferredZapModuleForTypeHandle(th) == m_image->GetModule())
+ {
+ triage = Accepted;
+ goto Done;
+ }
+
+#ifdef FEATURE_FULL_NGEN
+ // Only save arrays and other param types in their preferred zap modules,
+ // i.e. never duplicate them.
+ if (th.IsTypeDesc() || th.IsArrayType())
+ {
+ triage = Rejected;
+ rejectReason = "type is a TypeDesc";
+ goto Done;
+ }
+
+ {
+ // Do not save instantiations found in one of our hardbound dependencies
+ PtrHashMap::PtrIterator iter = GetAppDomain()->ToCompilationDomain()->IterateHardBoundModules();
+ for (/**/; !iter.end(); ++iter)
+ {
+ Module * hardBoundModule = (Module*)iter.GetValue();
+ if (hardBoundModule->GetAvailableParamTypes()->ContainsValue(th))
+ {
+ triage = Rejected;
+ rejectReason = "type was found in a hardbound dependency";
+ goto Done;
+ }
+ }
+ }
+
+ // We are not really sure about this type. Accept it only if we have been asked to.
+ if (fAcceptIfNotSure)
+ {
+ if (!m_fSpeculativeTriage)
+ {
+ // We will take a look later before we actually start compiling the instantiations
+ m_speculativeTypes.Append(th);
+ m_acceptedTypes.Add(th);
+ return;
+ }
+
+ triage = Accepted;
+ goto Done;
+ }
+#else
+ rejectReason = "type is not in the preferred module";
+ triage = Rejected;
+#endif
+
+Done:
+ switch (triage)
+ {
+ case Accepted:
+ m_acceptedTypes.Add(th);
+ if (fExpandDependencies)
+ {
+ ExpandTypeDependencies(th);
+ }
+ break;
+
+ case Rejected:
+
+ m_rejectedTypes.Add(th);
+
+#ifdef LOGGING
+ // It is expensive to call th.GetName, only do it when we are actually logging
+ if (LoggingEnabled())
+ {
+ SString typeName;
+ th.GetName(typeName);
+ LOG((LF_ZAP, LL_INFO10000, "TriageTypeForZap rejects %S (%08x) because %s\n",
+ typeName.GetUnicode(), th.AsPtr(), rejectReason));
+ }
+#endif
+ break;
+
+ default:
+ // We have not found a compeling reason to accept or reject the type yet. Maybe next time...
+ break;
+ }
+}
+
+void CEEPreloader::ExpandTypeDependencies(TypeHandle th)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (th.IsTypeDesc())
+ return;
+
+ MethodTable* pMT = th.AsMethodTable();
+
+ if (pMT->IsCanonicalMethodTable())
+ {
+ // Cutoff infinite recursion.
+ if (!IsGenericTooDeeplyNested(th))
+ {
+ // Make sure all methods are compiled
+ // We only want to check the method bodies owned by this type,
+ // and not any method bodies owned by a parent type, as the
+ // parent type may not get saved in this ngen image.
+ MethodTable::IntroducedMethodIterator itr(pMT);
+ for (/**/; itr.IsValid(); itr.Next())
+ {
+ AddToUncompiledMethods(itr.GetMethodDesc(), FALSE);
+ }
+ }
+ }
+ else
+ {
+ // Make sure canonical method table is saved
+ TriageTypeForZap(pMT->GetCanonicalMethodTable(), TRUE);
+ }
+
+ if (pMT->SupportsGenericInterop(TypeHandle::Interop_ManagedToNative))
+ {
+ MethodTable::IntroducedMethodIterator itr(pMT->GetCanonicalMethodTable());
+ for (/**/; itr.IsValid(); itr.Next())
+ {
+ MethodDesc *pMD = itr.GetMethodDesc();
+
+ if (!pMD->HasMethodInstantiation())
+ {
+ if (pMT->IsInterface() || !pMD->IsSharedByGenericInstantiations())
+ {
+ pMD = MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pMD,
+ pMT,
+ FALSE, // forceBoxedEntryPoint
+ Instantiation(), // methodInst
+ FALSE, // allowInstParam
+ TRUE); // forceRemotableMethod
+ }
+ else
+ {
+ _ASSERTE(pMT->IsDelegate());
+ pMD = InstantiatedMethodDesc::FindOrCreateExactClassMethod(pMT, pMD);
+ }
+
+ AddToUncompiledMethods(pMD, TRUE);
+ }
+ }
+ }
+
+ // Make sure parent type is saved
+ TriageTypeForZap(pMT->GetParentMethodTable(), TRUE);
+
+ // Make sure all instantiation arguments are saved
+ Instantiation inst = pMT->GetInstantiation();
+ for (DWORD iArg = 0; iArg < inst.GetNumArgs(); iArg++)
+ {
+ TriageTypeForZap(inst[iArg], TRUE);
+ }
+
+ // Make sure all interfaces implemeted by the class are saved
+ MethodTable::InterfaceMapIterator intIterator = pMT->IterateInterfaceMap();
+ while (intIterator.Next())
+ {
+ TriageTypeForZap(intIterator.GetInterface(), TRUE);
+ }
+
+ // Make sure aprox types for all fields are saved
+ ApproxFieldDescIterator fdIterator(pMT, ApproxFieldDescIterator::ALL_FIELDS);
+ FieldDesc* pFD;
+ while ((pFD = fdIterator.Next()) != NULL)
+ {
+ if (pFD->GetFieldType() == ELEMENT_TYPE_VALUETYPE)
+ {
+ TriageTypeForZap(pFD->GetFieldTypeHandleThrowing(), TRUE);
+ }
+ }
+
+ // Make sure types for all generic static fields are saved
+
+ if (pMT->HasGenericsStaticsInfo())
+ {
+ FieldDesc *pGenStaticFields = pMT->GetGenericsStaticFieldDescs();
+ DWORD nFields = pMT->GetNumStaticFields();
+ for (DWORD iField = 0; iField < nFields; iField++)
+ {
+ FieldDesc* pField = &pGenStaticFields[iField];
+ if (pField->GetFieldType() == ELEMENT_TYPE_VALUETYPE)
+ {
+ TriageTypeForZap(pField->GetFieldTypeHandleThrowing(), TRUE);
+ }
+ }
+ }
+
+ // Expand type using the custom rules. May load new types.
+ ApplyTypeDependencyProductionsForType(th);
+}
+
+// Triage instantiations of generic methods
+
+void CEEPreloader::TriageMethodForZap(MethodDesc* pMD, BOOL fAcceptIfNotSure, BOOL fExpandDependencies)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Submit the method type for triage
+ TriageTypeForZap(TypeHandle(pMD->GetMethodTable()), fAcceptIfNotSure);
+
+ // We care about instantiated methods only
+ if (pMD->IsTypicalMethodDefinition())
+ return;
+
+ // We care about methods from our module only
+ if (m_image->GetModule() != pMD->GetLoaderModule())
+ return;
+
+ // Check if we have decided to accept this method already.
+ if (m_acceptedMethods.Lookup(pMD) != NULL)
+ return;
+
+ // Check if we have decided to reject this method already.
+ if (m_rejectedMethods.Lookup(pMD) != NULL)
+ return;
+
+ enum { Investigate, Accepted, Rejected } triage = Investigate;
+
+ const char * rejectReason = NULL;
+
+ // Do not save open methods
+ if (pMD->ContainsGenericVariables())
+ {
+ triage = Rejected;
+ rejectReason = "method contains method generic variables";
+ goto Done;
+ }
+
+ // Filter out other weird cases we do not care about.
+ if (!m_image->GetModule()->GetInstMethodHashTable()->ContainsMethodDesc(pMD))
+ {
+ triage = Rejected;
+ rejectReason = "method is not in the current module";
+ goto Done;
+ }
+
+ // Always store items in their preferred zap module even if we are not sure
+ if (Module::GetPreferredZapModuleForMethodDesc(pMD) == m_image->GetModule())
+ {
+ triage = Accepted;
+ goto Done;
+ }
+
+#ifdef FEATURE_FULL_NGEN
+ {
+ // Do not save instantiations found in one of our hardbound dependencies
+ PtrHashMap::PtrIterator iter = GetAppDomain()->ToCompilationDomain()->IterateHardBoundModules();
+ for (/**/; !iter.end(); ++iter)
+ {
+ Module * hardBoundModule = (Module*)iter.GetValue();
+ if (hardBoundModule->GetInstMethodHashTable()->ContainsMethodDesc(pMD))
+ {
+ triage = Rejected;
+ rejectReason = "method was found in a hardbound dependency";
+ goto Done;
+ }
+ }
+ }
+
+ // We are not really sure about this method. Accept it only if we have been asked to.
+ if (fAcceptIfNotSure)
+ {
+ // It does not seem worth it to go through extra hoops to eliminate redundant
+ // speculative method instatiations from softbound dependencies like we do for types
+ // if (!m_fSpeculativeTriage)
+ // {
+ // // We will take a look later before we actually start compiling the instantiations
+ // ...
+ // }
+
+ triage = Accepted;
+ goto Done;
+ }
+#else
+ triage = Rejected;
+#endif
+
+Done:
+ switch (triage)
+ {
+ case Accepted:
+ m_acceptedMethods.Add(pMD);
+ if (fExpandDependencies)
+ {
+ ExpandMethodDependencies(pMD);
+ }
+ break;
+
+ case Rejected:
+ m_rejectedMethods.Add(pMD);
+ LOG((LF_ZAP, LL_INFO10000, "TriageMethodForZap rejects %s (%08x) because %s\n",
+ pMD->m_pszDebugMethodName, pMD, rejectReason));
+ break;
+
+ default:
+ // We have not found a compeling reason to accept or reject the method yet. Maybe next time...
+ break;
+ }
+}
+
+void CEEPreloader::ExpandMethodDependencies(MethodDesc * pMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ AddToUncompiledMethods(pMD, FALSE);
+
+ {
+ // Make sure all instantiation arguments are saved
+ Instantiation inst = pMD->GetMethodInstantiation();
+ for (DWORD iArg = 0; iArg < inst.GetNumArgs(); iArg++)
+ {
+ TriageTypeForZap(inst[iArg], TRUE);
+ }
+ }
+
+ // Make sure to add wrapped method desc
+ if (pMD->IsWrapperStub())
+ TriageMethodForZap(pMD->GetWrappedMethodDesc(), TRUE);
+}
+
+void CEEPreloader::TriageTypeFromSoftBoundModule(TypeHandle th, Module * pSoftBoundModule)
+{
+ STANDARD_VM_CONTRACT;
+
+ // We care about types from our module only
+ if (m_image->GetModule() != th.GetLoaderModule())
+ return;
+
+ // Nothing to do if we have rejected the type already.
+ if (m_rejectedTypes.Lookup(th) != NULL)
+ return;
+
+ // We make guarantees about types living in its own PZM only
+ if (Module::GetPreferredZapModuleForTypeHandle(th) != pSoftBoundModule)
+ return;
+
+ // Reject the type - it is guaranteed to be saved in PZM
+ m_rejectedTypes.Add(th);
+
+ if (!th.IsTypeDesc())
+ {
+ // Reject the canonical method table if possible.
+ MethodTable* pMT = th.AsMethodTable();
+ if (!pMT->IsCanonicalMethodTable())
+ TriageTypeFromSoftBoundModule(pMT->GetCanonicalMethodTable(), pSoftBoundModule);
+
+ // Reject parent method table if possible.
+ TriageTypeFromSoftBoundModule(pMT->GetParentMethodTable(), pSoftBoundModule);
+
+ // Reject all interfaces implemented by the type if possible.
+ MethodTable::InterfaceMapIterator intIterator = pMT->IterateInterfaceMap();
+ while (intIterator.Next())
+ {
+ TriageTypeFromSoftBoundModule(intIterator.GetInterface(), pSoftBoundModule);
+ }
+
+ // It does not seem worth it to reject the remaining items
+ // expanded by CEEPreloader::ExpandTypeDependencies here.
+ }
+}
+
+static TypeHandle TryToLoadTypeSpecHelper(Module * pModule, PCCOR_SIGNATURE pSig, ULONG cSig)
+{
+ STANDARD_VM_CONTRACT;
+
+ TypeHandle th;
+
+ EX_TRY
+ {
+ SigPointer p(pSig, cSig);
+ SigTypeContext typeContext; // empty context is OK: encoding should not contain type variables.
+
+ th = p.GetTypeHandleThrowing(pModule, &typeContext, ClassLoader::DontLoadTypes);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ return th;
+}
+
+#ifdef FEATURE_FULL_NGEN
+void CEEPreloader::TriageTypeSpecsFromSoftBoundModule(Module * pSoftBoundModule)
+{
+ STANDARD_VM_CONTRACT;
+
+ //
+ // Reject all typespecs that are guranteed to be found in soft bound PZM
+ //
+
+ IMDInternalImport *pInternalImport = pSoftBoundModule->GetMDImport();
+
+ HENUMInternalHolder hEnum(pInternalImport);
+ hEnum.EnumAllInit(mdtTypeSpec);
+
+ mdToken tk;
+ while (pInternalImport->EnumNext(&hEnum, &tk))
+ {
+ ULONG cSig;
+ PCCOR_SIGNATURE pSig;
+
+ if (FAILED(pInternalImport->GetTypeSpecFromToken(tk, &pSig, &cSig)))
+ {
+ pSig = NULL;
+ cSig = 0;
+ }
+
+ // Check all types specs that do not contain variables
+ if (SigPointer(pSig, cSig).IsPolyType(NULL) == hasNoVars)
+ {
+ TypeHandle th = TryToLoadTypeSpecHelper(pSoftBoundModule, pSig, cSig);
+
+ if (th.IsNull())
+ continue;
+
+ TriageTypeFromSoftBoundModule(th, pSoftBoundModule);
+ }
+ }
+}
+
+void CEEPreloader::TriageSpeculativeType(TypeHandle th)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Nothing to do if we have rejected the type already
+ if (m_rejectedTypes.Lookup(th) != NULL)
+ return;
+
+ Module * pPreferredZapModule = Module::GetPreferredZapModuleForTypeHandle(th);
+ BOOL fHardBoundPreferredZapModule = FALSE;
+
+ //
+ // Even though we have done this check already earlier, do it again here in case we have picked up
+ // any eager-bound dependency in the meantime
+ //
+ // Do not save instantiations found in one of our eager-bound dependencies
+ PtrHashMap::PtrIterator iter = GetAppDomain()->ToCompilationDomain()->IterateHardBoundModules();
+ for (/**/; !iter.end(); ++iter)
+ {
+ Module * hardBoundModule = (Module*)iter.GetValue();
+ if (hardBoundModule->GetAvailableParamTypes()->ContainsValue(th))
+ {
+ m_rejectedTypes.Add(th);
+ return;
+ }
+
+ if (hardBoundModule == pPreferredZapModule)
+ {
+ fHardBoundPreferredZapModule = TRUE;
+ }
+ }
+
+ if (!fHardBoundPreferredZapModule && !pPreferredZapModule->AreTypeSpecsTriaged())
+ {
+ // Reject all types that are guaranteed to be instantiated in soft bound PZM
+ TriageTypeSpecsFromSoftBoundModule(pPreferredZapModule);
+ pPreferredZapModule->SetTypeSpecsTriaged();
+
+ if (m_rejectedTypes.Lookup(th) != NULL)
+ return;
+ }
+
+ // We have to no other option but to accept and expand the type
+ ExpandTypeDependencies(th);
+}
+
+void CEEPreloader::TriageSpeculativeInstantiations()
+{
+ STANDARD_VM_CONTRACT;
+
+ // Get definitive triage answer for speculative types that we have run into earlier
+ // Note that m_speculativeTypes may be growing as this loop runs
+ for (COUNT_T i = 0; i < m_speculativeTypes.GetCount(); i++)
+ {
+ TriageSpeculativeType(m_speculativeTypes[i]);
+ }
+
+ // We are done - the array of speculative types is no longer necessary
+ m_speculativeTypes.Clear();
+}
+#endif // FEATURE_FULL_NGEN
+
+BOOL CEEPreloader::TriageForZap(BOOL fAcceptIfNotSure, BOOL fExpandDependencies)
+{
+ STANDARD_VM_CONTRACT;
+
+ DWORD dwNumTypes = m_image->GetModule()->GetAvailableParamTypes()->GetCount();
+ DWORD dwNumMethods = m_image->GetModule()->GetInstMethodHashTable()->GetCount();
+
+ // Triage types
+ {
+ // Create a local copy in case the new elements are added to the hashtable during population
+ InlineSArray<TypeHandle, 20> pTypes;
+
+ // Make sure the iterator is destroyed before there is a chance of loading new types
+ {
+ EETypeHashTable* pTable = m_image->GetModule()->GetAvailableParamTypes();
+
+ EETypeHashTable::Iterator it(pTable);
+ EETypeHashEntry *pEntry;
+ while (pTable->FindNext(&it, &pEntry))
+ {
+ TypeHandle th = pEntry->GetTypeHandle();
+ if (m_acceptedTypes.Lookup(th) == NULL && m_rejectedTypes.Lookup(th) == NULL)
+ pTypes.Append(th);
+ }
+ }
+
+ for(COUNT_T i = 0; i < pTypes.GetCount(); i ++)
+ {
+ TriageTypeForZap(pTypes[i], fAcceptIfNotSure, fExpandDependencies);
+ }
+ }
+
+ // Triage methods
+ {
+ // Create a local copy in case the new elements are added to the hashtable during population
+ InlineSArray<MethodDesc*, 20> pMethods;
+
+ // Make sure the iterator is destroyed before there is a chance of loading new methods
+ {
+ InstMethodHashTable* pTable = m_image->GetModule()->GetInstMethodHashTable();
+
+ InstMethodHashTable::Iterator it(pTable);
+ InstMethodHashEntry *pEntry;
+ while (pTable->FindNext(&it, &pEntry))
+ {
+ MethodDesc* pMD = pEntry->GetMethod();
+ if (m_acceptedMethods.Lookup(pMD) == NULL && m_rejectedMethods.Lookup(pMD) == NULL)
+ pMethods.Append(pMD);
+ }
+ }
+
+ for(COUNT_T i = 0; i < pMethods.GetCount(); i ++)
+ {
+ TriageMethodForZap(pMethods[i], fAcceptIfNotSure, fExpandDependencies);
+ }
+ }
+
+ // Returns TRUE if new types or methods has been added by the triage
+ return (dwNumTypes != m_image->GetModule()->GetAvailableParamTypes()->GetCount()) ||
+ (dwNumMethods != m_image->GetModule()->GetInstMethodHashTable()->GetCount());
+}
+
+void CEEPreloader::PrePrepareMethodIfNecessary(CORINFO_METHOD_HANDLE hMethod)
+{
+ STANDARD_VM_CONTRACT;
+
+ ::PrePrepareMethodIfNecessary(hMethod);
+}
+
+static void SetStubMethodDescOnInteropMethodDesc(MethodDesc* pInteropMD, MethodDesc* pStubMD, bool fReverseStub)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ // We store NGENed stubs on these MethodDesc types
+ PRECONDITION(pInteropMD->IsNDirect() || pInteropMD->IsComPlusCall() || pInteropMD->IsGenericComPlusCall() || pInteropMD->IsEEImpl());
+ }
+ CONTRACTL_END;
+
+ if (pInteropMD->IsNDirect())
+ {
+ _ASSERTE(!fReverseStub);
+ NDirectMethodDesc* pNMD = (NDirectMethodDesc*)pInteropMD;
+ pNMD->ndirect.m_pStubMD.SetValue(pStubMD);
+ }
+#ifdef FEATURE_COMINTEROP
+ else if (pInteropMD->IsComPlusCall() || pInteropMD->IsGenericComPlusCall())
+ {
+ _ASSERTE(!fReverseStub);
+ ComPlusCallInfo *pComInfo = ComPlusCallInfo::FromMethodDesc(pInteropMD);
+ pComInfo->m_pStubMD.SetValue(pStubMD);
+ }
+#endif // FEATURE_COMINTEROP
+ else if (pInteropMD->IsEEImpl())
+ {
+ DelegateEEClass* pDelegateClass = (DelegateEEClass*)pInteropMD->GetClass();
+ if (fReverseStub)
+ {
+ pDelegateClass->m_pReverseStubMD = pStubMD;
+ }
+ else
+ {
+#ifdef FEATURE_COMINTEROP
+ // We don't currently NGEN both the P/Invoke and WinRT stubs for WinRT delegates.
+ // If that changes, this function will need an extra parameter to tell what kind
+ // of stub is being passed.
+ if (pInteropMD->GetMethodTable()->IsWinRTDelegate())
+ {
+ pDelegateClass->m_pComPlusCallInfo->m_pStubMD.SetValue(pStubMD);
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ pDelegateClass->m_pForwardStubMD = pStubMD;
+ }
+ }
+ }
+ else
+ {
+ UNREACHABLE_MSG("unexpected type of MethodDesc");
+ }
+}
+
+MethodDesc * CEEPreloader::CompileMethodStubIfNeeded(
+ MethodDesc *pMD,
+ MethodDesc *pStubMD,
+ ICorCompilePreloader::CORCOMPILE_CompileStubCallback pfnCallback,
+ LPVOID pCallbackContext)
+{
+ STANDARD_VM_CONTRACT;
+
+ LOG((LF_ZAP, LL_INFO10000, "NGEN_ILSTUB: %s::%s -> %s::%s\n",
+ pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName, pStubMD->m_pszDebugClassName, pStubMD->m_pszDebugMethodName));
+
+ // It is possible that the StubMD is a normal method pointed by InteropStubMethodAttribute,
+ // and in that case we don't need to compile it here
+ if (pStubMD->IsDynamicMethod())
+ {
+ if (!pStubMD->AsDynamicMethodDesc()->GetILStubResolver()->IsCompiled())
+ {
+ DWORD dwJitFlags = pStubMD->AsDynamicMethodDesc()->GetILStubResolver()->GetJitFlags();
+
+ pfnCallback(pCallbackContext, (CORINFO_METHOD_HANDLE)pStubMD, dwJitFlags);
+ }
+
+#ifndef FEATURE_FULL_NGEN // Deduplication
+ const DuplicateMethodEntry * pDuplicate = m_duplicateMethodsHash.LookupPtr(pStubMD);
+ if (pDuplicate != NULL)
+ return pDuplicate->pDuplicateMD;
+#endif
+ }
+
+//We do not store ILStubs so if the compilation failed for them
+//It does not make sense to keep the MD corresponding to the IL
+ if (pStubMD->IsILStub() && m_image->GetCodeAddress(pStubMD) == NULL)
+ pStubMD=NULL;
+
+ return pStubMD;
+}
+
+void CEEPreloader::GenerateMethodStubs(
+ CORINFO_METHOD_HANDLE hMethod,
+ bool fNgenProfilerImage,
+ CORCOMPILE_CompileStubCallback pfnCallback,
+ LPVOID pCallbackContext)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(hMethod != NULL && pfnCallback != NULL);
+ }
+ CONTRACTL_END;
+
+ MethodDesc* pMD = GetMethod(hMethod);
+ MethodDesc* pStubMD = NULL;
+
+#ifdef MDIL
+ // Do not generate IL stubs when generating MDIL
+ // This prevents versionability concerns around IL stubs exposing internal
+ // implementation details of the CLR.
+ if (GetAppDomain()->IsMDILCompilationDomain())
+ return;
+#endif
+
+ // Do not generate IL stubs when generating MDIL
+ // This prevents versionability concerns around IL stubs exposing internal
+ // implementation details of the CLR.
+ if (IsReadyToRunCompilation())
+ return;
+
+ DWORD dwNGenStubFlags = NDIRECTSTUB_FL_NGENEDSTUB;
+
+ if (fNgenProfilerImage)
+ dwNGenStubFlags |= NDIRECTSTUB_FL_NGENEDSTUBFORPROFILING;
+
+ //
+ // Generate IL stubs. If failed, we go through normal NGEN path
+ // Catch any exceptions that occur when we try to create the IL_STUB
+ //
+ EX_TRY
+ {
+ //
+ // Take care of forward stubs
+ //
+ if (pMD->IsNDirect())
+ {
+ NDirectMethodDesc* pNMD = (NDirectMethodDesc*)pMD;
+ PInvokeStaticSigInfo sigInfo;
+ NDirect::PopulateNDirectMethodDesc(pNMD, &sigInfo);
+ pStubMD = NDirect::GetILStubMethodDesc((NDirectMethodDesc*)pMD, &sigInfo, dwNGenStubFlags);
+ }
+#ifdef FEATURE_COMINTEROP
+ else if (pMD->IsComPlusCall() || pMD->IsGenericComPlusCall())
+ {
+ if (MethodNeedsForwardComStub(pMD, m_image))
+ {
+ // Look for predefined IL stubs in forward com interop scenario.
+ // If we've found a stub, that's what we'll use
+ DWORD dwStubFlags;
+ ComPlusCall::PopulateComPlusCallMethodDesc(pMD, &dwStubFlags);
+ if (FAILED(FindPredefinedILStubMethod(pMD, dwStubFlags, &pStubMD)))
+ {
+ pStubMD = ComPlusCall::GetILStubMethodDesc(pMD, dwStubFlags | dwNGenStubFlags);
+ }
+ }
+ }
+#endif // FEATURE_COMINTEROP
+ else if (pMD->IsEEImpl())
+ {
+ MethodTable* pMT = pMD->GetMethodTable();
+ CONSISTENCY_CHECK(pMT->IsDelegate());
+
+ // we can filter out non-WinRT generic delegates right off the top
+ if (!pMD->HasClassOrMethodInstantiation() || pMT->IsProjectedFromWinRT()
+#ifdef FEATURE_COMINTEROP
+ || WinRTTypeNameConverter::IsRedirectedType(pMT)
+#endif // FEATURE_COMINTEROP
+ )
+ {
+ if (COMDelegate::IsDelegateInvokeMethod(pMD)) // build forward stub
+ {
+#ifdef FEATURE_COMINTEROP
+ if ((pMT->IsProjectedFromWinRT() || WinRTTypeNameConverter::IsRedirectedType(pMT)) &&
+ (!pMT->HasInstantiation() || pMT->SupportsGenericInterop(TypeHandle::Interop_ManagedToNative))) // filter out shared generics
+ {
+ // Build the stub for all WinRT delegates, these will definitely be used for interop.
+ if (pMT->IsLegalNonArrayWinRTType())
+ {
+ COMDelegate::PopulateComPlusCallInfo(pMT);
+ pStubMD = COMDelegate::GetILStubMethodDesc((EEImplMethodDesc *)pMD, dwNGenStubFlags);
+ }
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ // Build the stub only if the delegate is decorated with UnmanagedFunctionPointerAttribute.
+ // Forward delegate stubs are rare so we require this opt-in to avoid bloating NGEN images.
+
+ if (S_OK == pMT->GetMDImport()->GetCustomAttributeByName(
+ pMT->GetCl(), g_UnmanagedFunctionPointerAttribute, NULL, NULL))
+ {
+ pStubMD = COMDelegate::GetILStubMethodDesc((EEImplMethodDesc *)pMD, dwNGenStubFlags);
+ }
+ }
+ }
+ }
+ }
+
+ // compile the forward stub
+ if (pStubMD != NULL)
+ {
+ pStubMD = CompileMethodStubIfNeeded(pMD, pStubMD, pfnCallback, pCallbackContext);
+
+ // We store the MethodDesc of the Stub on the NDirectMethodDesc/ComPlusCallMethodDesc/DelegateEEClass
+ // that we can recover the stub MethodDesc at prestub time, do the fixups, and wire up the native code
+ if (pStubMD != NULL)
+ {
+ SetStubMethodDescOnInteropMethodDesc(pMD, pStubMD, false /* fReverseStub */);
+ pStubMD = NULL;
+ }
+
+ }
+ }
+ EX_CATCH
+ {
+ LOG((LF_ZAP, LL_WARNING, "NGEN_ILSTUB: Generating forward interop stub FAILED: %s::%s\n", pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName));
+ }
+ EX_END_CATCH(RethrowTransientExceptions);
+
+ //
+ // Now take care of reverse P/Invoke stubs for delegates
+ //
+ if (pMD->IsEEImpl() && COMDelegate::IsDelegateInvokeMethod(pMD))
+ {
+ // Reverse P/Invoke is not supported for generic methods and WinRT delegates
+ if (!pMD->HasClassOrMethodInstantiation() && !pMD->GetMethodTable()->IsProjectedFromWinRT())
+ {
+ EX_TRY
+ {
+#ifdef _TARGET_X86_
+ // on x86, we call the target directly if Invoke has a no-marshal signature
+ if (NDirect::MarshalingRequired(pMD))
+#endif // _TARGET_X86_
+ {
+ PInvokeStaticSigInfo sigInfo(pMD);
+ pStubMD = UMThunkMarshInfo::GetILStubMethodDesc(pMD, &sigInfo, NDIRECTSTUB_FL_DELEGATE | dwNGenStubFlags);
+
+ if (pStubMD != NULL)
+ {
+ // compile the reverse stub
+ pStubMD = CompileMethodStubIfNeeded(pMD, pStubMD, pfnCallback, pCallbackContext);
+
+ // We store the MethodDesc of the Stub on the DelegateEEClass
+ if (pStubMD != NULL)
+ {
+ SetStubMethodDescOnInteropMethodDesc(pMD, pStubMD, true /* fReverseStub */);
+ }
+ }
+ }
+ }
+ EX_CATCH
+ {
+ LOG((LF_ZAP, LL_WARNING, "NGEN_ILSTUB: Generating reverse interop stub for delegate FAILED: %s::%s\n", pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName));
+ }
+ EX_END_CATCH(RethrowTransientExceptions);
+ }
+ }
+
+#ifdef FEATURE_COMINTEROP
+ //
+ // And finally generate reverse COM stubs
+ //
+ EX_TRY
+ {
+ // The method doesn't have to have a special type to be exposed to COM, in particular it doesn't
+ // have to be ComPlusCallMethodDesc. However, it must have certain properties (custom attributes,
+ // public visibility, etc.)
+ if (MethodNeedsReverseComStub(pMD))
+ {
+ // initialize ComCallMethodDesc
+ ComCallMethodDesc ccmd;
+ ComCallMethodDescHolder ccmdHolder(&ccmd);
+ ccmd.InitMethod(pMD, NULL);
+
+ // generate the IL stub
+ DWORD dwStubFlags;
+ ComCall::PopulateComCallMethodDesc(&ccmd, &dwStubFlags);
+ pStubMD = ComCall::GetILStubMethodDesc(pMD, dwStubFlags | dwNGenStubFlags);
+
+ if (pStubMD != NULL)
+ {
+ // compile the reverse stub
+ pStubMD = CompileMethodStubIfNeeded(pMD, pStubMD, pfnCallback, pCallbackContext);
+
+ if (pStubMD != NULL)
+ {
+ // store the stub in a hash table on the module
+ m_image->GetModule()->GetStubMethodHashTable()->InsertMethodDesc(pMD, pStubMD);
+ }
+ }
+ }
+ }
+ EX_CATCH
+ {
+ LOG((LF_ZAP, LL_WARNING, "NGEN_ILSTUB: Generating reverse interop stub FAILED: %s::%s\n", pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName));
+ }
+ EX_END_CATCH(RethrowTransientExceptions);
+#endif // FEATURE_COMINTEROP
+}
+
+bool CEEPreloader::IsDynamicMethod(CORINFO_METHOD_HANDLE hMethod)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc* pMD = GetMethod(hMethod);
+
+ if (pMD)
+ {
+ return pMD->IsDynamicMethod();
+ }
+
+ return false;
+}
+
+// Set method profiling flags for layout of EE datastructures
+void CEEPreloader::SetMethodProfilingFlags(CORINFO_METHOD_HANDLE hMethod, DWORD flags)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(hMethod != NULL);
+ _ASSERTE(flags != 0);
+
+ return m_image->SetMethodProfilingFlags(GetMethod(hMethod), flags);
+}
+
+/*********************************************************************/
+// canSkipMethodPreparation: Is there a need for all calls from
+// NGEN'd code to a particular MethodDesc to go through DoPrestub,
+// depending on the method sematics? If so return FALSE.
+//
+// This is used to rule out both ngen-hardbinds and intra-ngen-module
+// direct calls.
+//
+// The cases where direct calls are not allowed are typically where
+// a stub must be inserted by DoPrestub (we do not save stubs) or where
+// we haven't saved the code for some reason or another, or where fixups
+// are required in the MethodDesc.
+//
+// callerHnd=NULL implies any/unspecified caller.
+//
+// Note that there may be other requirements for going through the prestub
+// which vary based on the scenario. These need to be handled separately
+
+bool CEEPreloader::CanSkipMethodPreparation (
+ CORINFO_METHOD_HANDLE callerHnd,
+ CORINFO_METHOD_HANDLE calleeHnd,
+ CorInfoIndirectCallReason *pReason,
+ CORINFO_ACCESS_FLAGS accessFlags/*=CORINFO_ACCESS_ANY*/)
+{
+ STANDARD_VM_CONTRACT;
+
+ bool result = false;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+ MethodDesc * calleeMD = (MethodDesc *)calleeHnd;
+ MethodDesc * callerMD = (MethodDesc *)callerHnd;
+
+ {
+ result = calleeMD->CanSkipDoPrestub(callerMD, pReason, accessFlags);
+ }
+
+ COOPERATIVE_TRANSITION_END();
+
+ return result;
+}
+
+CORINFO_METHOD_HANDLE CEEPreloader::LookupMethodDef(mdMethodDef token)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc *pMD = MemberLoader::GetMethodDescFromMethodDef(
+ m_image->GetModule(),
+ token,
+ FALSE);
+
+ // READYTORUN: FUTURE: Generics
+ if (IsReadyToRunCompilation() && pMD->HasClassOrMethodInstantiation())
+ return NULL;
+
+ pMD = pMD->FindOrCreateTypicalSharedInstantiation();
+
+ return CORINFO_METHOD_HANDLE(pMD);
+}
+
+CorCompileILRegion CEEPreloader::GetILRegion(mdMethodDef token)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Since we are running managed code during NGen the inlining hint may be
+ // changing underneeth us as the code is JITed. We need to prevent the inlining
+ // hints from changing once we start to use them to place IL in the image.
+ g_pCEECompileInfo->DisableCachingOfInliningHints();
+
+ // Default if there is something completely wrong, e.g. the type failed to load.
+ // We may need the IL at runtime.
+ CorCompileILRegion region = CORCOMPILE_ILREGION_WARM;
+
+ EX_TRY
+ {
+ MethodDesc *pMD = m_image->GetModule()->LookupMethodDef(token);
+
+ if (pMD == NULL || !pMD->GetMethodTable()->IsFullyLoaded())
+ {
+ // Something is completely wrong - use the default
+ }
+ else
+ if (m_image->IsStored(pMD))
+ {
+ if (pMD->IsNotInline())
+ {
+ if (pMD->HasClassOrMethodInstantiation())
+ {
+ region = CORCOMPILE_ILREGION_GENERICS;
+ }
+ else
+ {
+ region = CORCOMPILE_ILREGION_COLD;
+ }
+ }
+ else
+ if (Security::MethodIsVisibleOutsideItsAssembly(pMD))
+ {
+ // We are inlining only leaf methods, except for mscorlib. Thus we can assume that only methods
+ // visible outside its assembly are likely to be inlined.
+ region = CORCOMPILE_ILREGION_INLINEABLE;
+ }
+ else
+ {
+ // We may still need the IL of the non-nonvisible methods for inlining in certain scenarios:
+ // dynamically emitted IL, friend assemblies or JITing of generic instantiations
+ region = CORCOMPILE_ILREGION_WARM;
+ }
+ }
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ return region;
+}
+
+CORINFO_CLASS_HANDLE CEEPreloader::FindTypeForProfileEntry(CORBBTPROF_BLOB_PARAM_SIG_ENTRY * profileBlobEntry)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(profileBlobEntry->blob.type == ParamTypeSpec);
+
+ if (PartialNGenStressPercentage() != 0)
+ return CORINFO_CLASS_HANDLE( NULL );
+
+ Module * pModule = GetAppDomain()->ToCompilationDomain()->GetTargetModule();
+ TypeHandle th = pModule->LoadIBCTypeHelper(profileBlobEntry);
+
+ return CORINFO_CLASS_HANDLE(th.AsPtr());
+}
+
+CORINFO_METHOD_HANDLE CEEPreloader::FindMethodForProfileEntry(CORBBTPROF_BLOB_PARAM_SIG_ENTRY * profileBlobEntry)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(profileBlobEntry->blob.type == ParamMethodSpec);
+
+ if (PartialNGenStressPercentage() != 0)
+ return CORINFO_METHOD_HANDLE( NULL );
+
+ Module * pModule = GetAppDomain()->ToCompilationDomain()->GetTargetModule();
+ MethodDesc * pMethod = pModule->LoadIBCMethodHelper(profileBlobEntry);
+
+ return CORINFO_METHOD_HANDLE( pMethod );
+}
+
+void CEEPreloader::ReportInlining(CORINFO_METHOD_HANDLE inliner, CORINFO_METHOD_HANDLE inlinee)
+{
+ STANDARD_VM_CONTRACT;
+ m_image->ReportInlining(inliner, inlinee);
+}
+
+void CEEPreloader::Link()
+{
+ STANDARD_VM_CONTRACT;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+ m_image->PreSave();
+
+ m_image->GetModule()->Save(m_image);
+ m_image->GetModule()->Arrange(m_image);
+ m_image->GetModule()->Fixup(m_image);
+
+ m_image->PostSave();
+
+ COOPERATIVE_TRANSITION_END();
+}
+
+void CEEPreloader::FixupRVAs()
+{
+ STANDARD_VM_CONTRACT;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+ m_image->FixupRVAs();
+
+ COOPERATIVE_TRANSITION_END();
+}
+
+void CEEPreloader::SetRVAsForFields(IMetaDataEmit * pEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+ m_image->SetRVAsForFields(pEmit);
+
+ COOPERATIVE_TRANSITION_END();
+}
+
+void CEEPreloader::GetRVAFieldData(mdFieldDef fd, PVOID * ppData, DWORD * pcbSize, DWORD * pcbAlignment)
+{
+ STANDARD_VM_CONTRACT;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+ FieldDesc * pFD = m_image->GetModule()->LookupFieldDef(fd);
+ if (pFD == NULL)
+ ThrowHR(COR_E_TYPELOAD);
+
+ _ASSERTE(pFD->IsILOnlyRVAField());
+
+ UINT size = pFD->LoadSize();
+
+ //
+ // Compute an alignment for the data based on the alignment
+ // of the RVA. We'll align up to 8 bytes.
+ //
+
+ UINT align = 1;
+ DWORD rva = pFD->GetOffset();
+ DWORD rvaTemp = rva;
+
+ while ((rvaTemp&1) == 0 && align < 8 && align < size)
+ {
+ align <<= 1;
+ rvaTemp >>= 1;
+ }
+
+
+ *ppData = pFD->GetStaticAddressHandle(NULL);
+ *pcbSize = size;
+ *pcbAlignment = align;
+
+ COOPERATIVE_TRANSITION_END();
+}
+
+ULONG CEEPreloader::Release()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ delete this;
+ return 0;
+}
+
+void CEEPreloader::Error(mdToken token, Exception * pException)
+{
+ STANDARD_VM_CONTRACT;
+
+ StackSString msg;
+
+#ifdef CROSSGEN_COMPILE
+ pException->GetMessage(msg);
+#else
+ {
+ GCX_COOP();
+
+ // Going though throwable gives more verbose error messages in certain cases that our tests depend on.
+ OBJECTREF throwable = NingenEnabled() ? NULL : CLRException::GetThrowableFromException(pException);
+
+ if (throwable != NULL)
+ {
+ GetExceptionMessage(throwable, msg);
+ }
+ else
+ {
+ pException->GetMessage(msg);
+ }
+ }
+#endif
+
+ m_pData->Error(token, pException->GetHR(), msg.GetUnicode());
+}
+
+CEEInfo *g_pCEEInfo = NULL;
+
+ICorDynamicInfo * __stdcall GetZapJitInfo()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (g_pCEEInfo == NULL)
+ {
+ CEEInfo * p = new CEEInfo();
+ if (InterlockedCompareExchangeT(&g_pCEEInfo, p, NULL) != NULL)
+ delete p;
+ }
+
+ return g_pCEEInfo;
+}
+
+CEECompileInfo *g_pCEECompileInfo = NULL;
+
+ICorCompileInfo * __stdcall GetCompileInfo()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (g_pCEECompileInfo == NULL)
+ {
+ CEECompileInfo * p = new CEECompileInfo();
+ if (InterlockedCompareExchangeT(&g_pCEECompileInfo, p, NULL) != NULL)
+ delete p;
+ }
+
+ return g_pCEECompileInfo;
+}
+
+//
+// CompilationDomain
+//
+
+CompilationDomain::CompilationDomain(BOOL fForceDebug,
+ BOOL fForceProfiling,
+ BOOL fForceInstrument)
+ : m_fForceDebug(fForceDebug),
+ m_fForceProfiling(fForceProfiling),
+ m_fForceInstrument(fForceInstrument),
+ m_pTargetAssembly(NULL),
+ m_pTargetModule(NULL),
+ m_pTargetImage(NULL),
+ m_pEmit(NULL),
+ m_pDependencyRefSpecs(NULL),
+ m_pDependencies(NULL),
+ m_cDependenciesCount(0),
+ m_cDependenciesAlloc(0)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifndef FEATURE_CORECLR // hardbinding
+ m_hardBoundModules.Init(FALSE, NULL);
+ m_cantHardBindModules.Init(FALSE, NULL);
+ m_useHardBindList = FALSE;
+#endif
+}
+
+void CompilationDomain::ReleaseDependencyEmitter()
+{
+ m_pDependencyRefSpecs.Release();
+
+ m_pEmit.Release();
+}
+
+CompilationDomain::~CompilationDomain()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_pDependencies != NULL)
+ delete [] m_pDependencies;
+
+ ReleaseDependencyEmitter();
+
+ for (unsigned i = 0; i < m_rRefCaches.Size(); i++)
+ {
+ delete m_rRefCaches[i];
+ m_rRefCaches[i]=NULL;
+ }
+
+}
+
+void CompilationDomain::Init(
+#ifdef MDIL
+ MDILCompilationFlags mdilCompilationFlags
+#endif
+ )
+{
+ STANDARD_VM_CONTRACT;
+
+#ifndef CROSSGEN_COMPILE
+ AppDomain::Init();
+#endif
+
+#ifndef CROSSGEN_COMPILE
+ // allocate a Virtual Call Stub Manager for the compilation domain
+ InitVSD();
+#endif
+
+ Security::SetDefaultAppDomainProperty(GetSecurityDescriptor());
+ SetCompilationDomain();
+#ifdef MDIL
+ if (mdilCompilationFlags & MDILCompilationFlags_CreateMDIL)
+ SetMDILCompilationDomain();
+ if (mdilCompilationFlags & MDILCompilationFlags_MinimalMDIL)
+ SetMinimalMDILCompilationDomain();
+ if (mdilCompilationFlags & MDILCompilationFlags_NoMDIL)
+ SetNoMDILCompilationDomain();
+#endif
+
+#ifndef FEATURE_CORECLR
+ // We need the Compilation Domain to be homogeneous. We've already forced everything to be full trust.
+ // However, CheckZapSecurity needs this to be set, so set it here.
+ GetSecurityDescriptor()->SetHomogeneousFlag(TRUE);
+#endif // !FEATURE_CORECLR
+
+#ifdef _DEBUG
+ g_pConfig->DisableGenerateStubForHost();
+#endif
+}
+
+HRESULT CompilationDomain::AddDependencyEntry(PEAssembly *pFile,
+ mdAssemblyRef ref,
+ mdAssemblyRef def)
+{
+#ifdef _DEBUG
+ // This method is not multi-thread safe. This is OK because it is only called by NGen compiling, which is
+ // effectively single-threaded. The following code verifies that we're not called on multiple threads.
+ static volatile LONG threadId = 0;
+ if (threadId == 0)
+ {
+ InterlockedCompareExchange(&threadId, GetCurrentThreadId(), 0);
+ }
+ _ASSERTE((LONG)GetCurrentThreadId() == threadId);
+#endif // _DEBUG
+
+ _ASSERTE((pFile == NULL) == (def == mdAssemblyRefNil));
+
+ if (m_cDependenciesCount == m_cDependenciesAlloc)
+ {
+ // Save the new count in a local variable. Can't update m_cDependenciesAlloc until the new
+ // CORCOMPILE_DEPENDENCY array is allocated, otherwise an out-of-memory exception from new[]
+ // operator would put the data in an inconsistent state, causing heap corruption later.
+ USHORT cNewDependenciesAlloc = m_cDependenciesAlloc == 0 ? 20 : m_cDependenciesAlloc * 2;
+
+ // Grow m_pDependencies
+
+ NewArrayHolder<CORCOMPILE_DEPENDENCY> pNewDependencies(new CORCOMPILE_DEPENDENCY[cNewDependenciesAlloc]);
+ {
+ // This block must execute transactionally. No throwing allowed. No bailing allowed.
+ FAULT_FORBID();
+
+ memset(pNewDependencies, 0, cNewDependenciesAlloc*sizeof(CORCOMPILE_DEPENDENCY));
+
+ if (m_pDependencies)
+ {
+ memcpy(pNewDependencies, m_pDependencies,
+ m_cDependenciesCount*sizeof(CORCOMPILE_DEPENDENCY));
+
+ delete [] m_pDependencies;
+ }
+
+ m_pDependencies = pNewDependencies.Extract();
+ m_cDependenciesAlloc = cNewDependenciesAlloc;
+ }
+ }
+
+ CORCOMPILE_DEPENDENCY *pDependency = &m_pDependencies[m_cDependenciesCount++];
+
+ // Clear memory so that we won't write random data into the zapped file
+ ZeroMemory(pDependency, sizeof(CORCOMPILE_DEPENDENCY));
+
+ pDependency->dwAssemblyRef = ref;
+
+ pDependency->dwAssemblyDef = def;
+
+ pDependency->signNativeImage = INVALID_NGEN_SIGNATURE;
+#ifdef FEATURE_APTCA
+ pDependency->dependencyInfo = CorCompileDependencyInfo(0);
+#endif //FEATURE_APTCA
+
+ if (pFile)
+ {
+ DomainAssembly *pAssembly = GetAppDomain()->LoadDomainAssembly(NULL, pFile, FILE_LOAD_CREATE, NULL);
+ // Note that this can trigger an assembly load (of mscorlib)
+ pAssembly->GetOptimizedIdentitySignature(&pDependency->signAssemblyDef);
+
+#if defined(FEATURE_APTCA) || !defined(FEATURE_CORECLR)
+ ReleaseHolder<IMDInternalImport> pAssemblyMD(pFile->GetMDImportWithRef());
+#endif
+
+#ifdef FEATURE_APTCA
+ // determine if there's an APTCA reference, before we retrieve the target file version (for killbit)
+ TokenSecurityDescriptorFlags assemblySecurityAttributes =
+ TokenSecurityDescriptor::ReadSecurityAttributes(pAssemblyMD, TokenFromRid(1, mdtAssembly));
+
+ pFile->AddRef();
+
+ BOOL fIsAptca = assemblySecurityAttributes & (TokenSecurityDescriptorFlags_APTCA
+ | TokenSecurityDescriptorFlags_ConditionalAPTCA);
+ if (fIsAptca)
+ {
+ // get the file path
+ LPCWSTR pwszPath = pFile->GetPath().GetUnicode();
+ if (pwszPath == NULL)
+ {
+ return E_FAIL;
+ }
+ // retrieve the file version based on the file path (using Watson OS wrapper)
+ if (FAILED(GetFileVersion(pwszPath, &pDependency->uliFileVersion)))
+ // ignore failures (e.g. platform doesn't support file version, or version info missing
+
+ {
+ fIsAptca = FALSE;
+ }
+ }
+ if (fIsAptca)
+ {
+ pDependency->dependencyInfo = CorCompileDependencyInfo(pDependency->dependencyInfo
+ | CORCOMPILE_DEPENDENCY_IS_APTCA);
+ }
+
+ if (assemblySecurityAttributes & TokenSecurityDescriptorFlags_ConditionalAPTCA)
+ {
+ pDependency->dependencyInfo = CorCompileDependencyInfo(pDependency->dependencyInfo
+ | CORCOMPILE_DEPENDENCY_IS_CAPTCA);
+ }
+#endif //FEATURE_APTCA
+
+#ifdef FEATURE_CORECLR // hardbinding
+ //
+ // This is done in CompilationDomain::CanEagerBindToZapFile with full support for hardbinding
+ //
+ if (pFile->IsSystem() && pFile->HasNativeImage())
+ {
+ CORCOMPILE_VERSION_INFO * pNativeVersion = pFile->GetLoadedNative()->GetNativeVersionInfo();
+ pDependency->signNativeImage = pNativeVersion->signature;
+ }
+#endif
+
+#ifndef FEATURE_CORECLR
+ // Find the architecture of the dependency, using algorithm from Fusion GetRuntimeVersionForAssembly.
+ // Normally, when an assembly is loaded at runtime, Fusion determines its architecture based on the
+ // metadata. However, if assembly load is skipped due to presence of native image, then Fusion needs
+ // to get assembly architecture from another source. For assemblies in GAC, the GAC structure provides
+ // architecture data. For assemblies outside of GAC, however, no other source of info is available.
+ // So we calculate the architecture now and store it in the native image, to make it available to Fusion.
+ // The algorithm here must exactly match the algorithm in GetRuntimeVersionForAssembly.
+ LPCSTR pszPERuntime;
+ IfFailThrow(pAssemblyMD->GetVersionString(&pszPERuntime));
+
+ if (SString::_strnicmp(pszPERuntime, "v1.0", 4) != 0 &&
+ SString::_strnicmp(pszPERuntime, "v1.1", 4) != 0 &&
+ SString::_stricmp(pszPERuntime, "Standard CLI 2002") != 0)
+ {
+ // Get the PE architecture of this dependency, similar to PEAssembly::GetFusionProcessorArchitecture.
+ // The difference is when NI is loaded, PEAssembly::GetFusionProcessorArchitecture returns the
+ // architecture of the NI (which is never processor neutral), but we want the architecture
+ // associated with the IL image.
+ DWORD dwPEKind, dwMachine;
+ if (pFile->HasNativeImage())
+ {
+ // CrossGen can load an NI without loading the corresponding IL image, in which case
+ // PEAssembly::GetILImage() actually returns an NI. Thus we need specific code to handle NI.
+ PEImageHolder pImage(pFile->GetNativeImageWithRef());
+ pImage->GetNativeILPEKindAndMachine(&dwPEKind, &dwMachine);
+ }
+ else
+ {
+ pFile->GetILimage()->GetPEKindAndMachine(&dwPEKind, &dwMachine);
+ }
+
+ DWORD dwAssemblyFlags = 0;
+ IfFailThrow(pAssemblyMD->GetAssemblyProps(TokenFromRid(1, mdtAssembly),
+ NULL, NULL, NULL,
+ NULL, NULL, &dwAssemblyFlags));
+
+ PEKIND peKind;
+ if (SUCCEEDED(TranslatePEToArchitectureType(
+ (CorPEKind)dwPEKind,
+ dwMachine,
+ dwAssemblyFlags,
+ &peKind)))
+ {
+ CorCompileDependencyInfo peKindShifted = CorCompileDependencyInfo(peKind << CORCOMPILE_DEPENDENCY_PEKIND_SHIFT);
+ _ASSERTE(peKindShifted == (peKindShifted & CORCOMPILE_DEPENDENCY_PEKIND_MASK));
+ pDependency->dependencyInfo = CorCompileDependencyInfo(pDependency->dependencyInfo
+ | (peKindShifted & CORCOMPILE_DEPENDENCY_PEKIND_MASK));
+ }
+ }
+#endif //FEATURE_FUSION
+ }
+
+ return S_OK;
+}
+
+HRESULT CompilationDomain::AddDependency(AssemblySpec *pRefSpec,
+ PEAssembly * pFile)
+{
+ HRESULT hr;
+
+ //
+ // Record the dependency
+ //
+
+ // This assert prevents dependencies from silently being loaded without being recorded.
+ _ASSERTE(m_pEmit);
+
+ // Normalize any reference to mscorlib; we don't want to record other non-canonical
+ // mscorlib references in the ngen image since fusion doesn't understand how to bind them.
+ // (Not to mention the fact that they are redundant.)
+ AssemblySpec spec;
+ if (pRefSpec->IsMscorlib())
+ {
+ _ASSERTE(pFile); // mscorlib had better not be missing
+ if (!pFile)
+ return E_UNEXPECTED;
+
+ // Don't store a binding from mscorlib to itself.
+ if (m_pTargetAssembly == SystemDomain::SystemAssembly())
+ return S_OK;
+
+ spec.InitializeSpec(pFile);
+ pRefSpec = &spec;
+ }
+ else if (m_pTargetAssembly == NULL && pFile)
+ {
+ // If target assembly is still NULL, we must be loading either the target assembly or mscorlib.
+ // Mscorlib is already handled above, so we must be loading the target assembly if we get here.
+ // Use the assembly name given in the target assembly so that the native image is deterministic
+ // regardless of how the target assembly is specified on the command line.
+ spec.InitializeSpec(pFile);
+ if (spec.IsStrongNamed() && spec.HasPublicKey())
+ {
+ spec.ConvertPublicKeyToToken();
+ }
+ pRefSpec = &spec;
+ }
+ else if (pRefSpec->IsStrongNamed() && pRefSpec->HasPublicKey())
+ {
+ // Normalize to always use public key token. Otherwise we may insert one reference
+ // using public key, and another reference using public key token.
+ spec.CopyFrom(pRefSpec);
+ spec.ConvertPublicKeyToToken();
+ pRefSpec = &spec;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ // Only cache ref specs that have a unique identity. This is needed to avoid caching
+ // things like WinRT type specs, which would benefit very little from being cached.
+ if (!pRefSpec->HasUniqueIdentity())
+ {
+ // Successful bind of a reference with a non-unique assembly identity.
+ _ASSERTE(pRefSpec->IsContentType_WindowsRuntime());
+
+ AssemblySpec defSpec;
+ if (pFile != NULL)
+ {
+ defSpec.InitializeSpec(pFile);
+
+ // Windows Runtime Native Image binding depends on details exclusively described by the definition winmd file.
+ // Therefore we can actually drop the existing ref spec here entirely.
+ // Also, Windows Runtime Native Image binding uses the simple name of the ref spec as the
+ // resolution rule for PreBind when finding definition assemblies.
+ // See comment on CLRPrivBinderWinRT::PreBind for further details.
+ pRefSpec = &defSpec;
+ }
+
+ // Unfortunately, we don't have any choice regarding failures (pFile == NULL) because
+ // there is no value to canonicalize on (i.e., a def spec created from a non-NULL
+ // pFile) and so we must cache all non-unique-assembly-id failures.
+ const AssemblySpecDefRefMapEntry * pEntry = m_dependencyDefRefMap.LookupPtr(&defSpec);
+ if (pFile == NULL || pEntry == NULL)
+ {
+ mdAssemblyRef refToken = mdAssemblyRefNil;
+ IfFailRet(pRefSpec->EmitToken(m_pEmit, &refToken, TRUE, TRUE));
+
+ mdAssemblyRef defToken = mdAssemblyRefNil;
+ if (pFile != NULL)
+ {
+ IfFailRet(defSpec.EmitToken(m_pEmit, &defToken, TRUE, TRUE));
+
+ NewHolder<AssemblySpec> pNewDefSpec = new AssemblySpec();
+ pNewDefSpec->CopyFrom(&defSpec);
+ pNewDefSpec->CloneFields();
+
+ NewHolder<AssemblySpec> pNewRefSpec = new AssemblySpec();
+ pNewRefSpec->CopyFrom(pRefSpec);
+ pNewRefSpec->CloneFields();
+
+ _ASSERTE(m_dependencyDefRefMap.LookupPtr(pNewDefSpec) == NULL);
+
+ AssemblySpecDefRefMapEntry e;
+ e.m_pDef = pNewDefSpec;
+ e.m_pRef = pNewRefSpec;
+ m_dependencyDefRefMap.Add(e);
+
+ pNewDefSpec.SuppressRelease();
+ pNewRefSpec.SuppressRelease();
+ }
+
+ IfFailRet(AddDependencyEntry(pFile, refToken, defToken));
+ }
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ //
+ // See if we've already added the contents of the ref
+ // Else, emit token for the ref
+ //
+
+ if (m_pDependencyRefSpecs->Store(pRefSpec))
+ return S_OK;
+
+ mdAssemblyRef refToken;
+ IfFailRet(pRefSpec->EmitToken(m_pEmit, &refToken));
+
+ //
+ // Make a spec for the bound assembly
+ //
+
+ mdAssemblyRef defToken = mdAssemblyRefNil;
+
+ // All dependencies of a shared assembly need to be shared. So for a shared
+ // assembly, we want to remember the missing assembly ref during ngen, so that
+ // we can probe eagerly for the dependency at load time, and make sure that
+ // it is loaded as shared.
+ // In such a case, pFile will be NULL
+ if (pFile)
+ {
+ AssemblySpec assemblySpec;
+ assemblySpec.InitializeSpec(pFile);
+
+ IfFailRet(assemblySpec.EmitToken(m_pEmit, &defToken));
+ }
+
+ //
+ // Add the entry. Include the PEFile if we are not doing explicit bindings.
+ //
+
+ IfFailRet(AddDependencyEntry(pFile, refToken, defToken));
+ }
+
+ return S_OK;
+}
+
+//----------------------------------------------------------------------------
+AssemblySpec* CompilationDomain::FindAssemblyRefSpecForDefSpec(
+ AssemblySpec* pDefSpec)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (pDefSpec == nullptr)
+ return nullptr;
+
+ const AssemblySpecDefRefMapEntry * pEntry = m_dependencyDefRefMap.LookupPtr(pDefSpec);
+ _ASSERTE(pEntry != NULL);
+
+ return (pEntry != NULL) ? pEntry->m_pRef : NULL;
+}
+
+#ifndef FEATURE_CORECLR // hardbinding
+//----------------------------------------------------------------------------
+// Was the assembly asked to be hard-bound to?
+
+BOOL CompilationDomain::IsInHardBindRequestList(Assembly * pAssembly)
+{
+ return IsInHardBindRequestList(pAssembly->GetManifestFile());
+}
+
+BOOL CompilationDomain::IsInHardBindRequestList(PEAssembly * pAssembly)
+{
+ if (!m_useHardBindList)
+ return FALSE;
+
+ StackSString displayName;
+ pAssembly->GetDisplayName(displayName);
+
+ for (COUNT_T i = 0; i < m_assemblyHardBindList.GetCount(); i++)
+ {
+ if (displayName.Equals(m_assemblyHardBindList[i]))
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+BOOL CompilationDomain::IsSafeToHardBindTo(PEAssembly * pAssembly)
+{
+ WRAPPER_NO_CONTRACT;
+ // The dependency worker does not have m_useHardBindList set.
+ // We do want to allow all possible native images to be loaded in this case.
+ if (!m_useHardBindList)
+ return TRUE;
+
+ if (CompilationDomain::IsInHardBindRequestList(pAssembly))
+ return TRUE;
+
+ return FALSE;
+}
+
+PtrHashMap::PtrIterator CompilationDomain::IterateHardBoundModules()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_hardBoundModules.begin();
+}
+
+void CompilationDomain::SetAssemblyHardBindList(
+ __in_ecount( cHardBindList )
+ LPWSTR *pHardBindList,
+ DWORD cHardBindList)
+{
+ m_assemblyHardBindList.SetCount(0);
+
+ for (DWORD i = 0; i < cHardBindList; i++)
+ {
+ SString s(pHardBindList[i]);
+ m_assemblyHardBindList.Append(s);
+ }
+
+ m_useHardBindList = TRUE;
+}
+#endif // FEATURE_CORECLR
+
+//----------------------------------------------------------------------------
+// Is it OK to embed direct pointers to an ngen dependency?
+// true if hardbinding is OK, false otherwise
+//
+// targetModule - The pointer points into the native image of this Module.
+// If this native image gets relocated, the native image of
+// the source Module is invalidated unless the embedded
+// pointer can be fixed up appropriately.
+// limitToHardBindList - Is it OK to hard-bind to a dependency even if it is
+// not asked for explicitly?
+
+BOOL CompilationDomain::CanEagerBindToZapFile(Module *targetModule, BOOL limitToHardBindList)
+{
+ // We do this check before checking the hashtables because m_cantHardBindModules
+ // will contain non-manifest modules. However, we do want them to be able
+ // to hard-bind to themselves
+ if (targetModule == m_pTargetModule)
+ {
+ return TRUE;
+ }
+
+#ifdef FEATURE_CORECLR // hardbinding
+ //
+ // CoreCLR does not have attributes for fine grained eager binding control.
+ // We hard bind to mscorlib.dll only.
+ //
+ return targetModule->IsSystem();
+#else
+ // Now, look up the hashtables to avoid doing the heavy-duty work everytime
+
+ if (m_cantHardBindModules.LookupValue((UPTR)targetModule, targetModule) !=
+ LPVOID(INVALIDENTRY))
+ {
+ return FALSE;
+ }
+
+ if (m_hardBoundModules.LookupValue((UPTR)targetModule, targetModule) !=
+ LPVOID(INVALIDENTRY))
+ {
+ return TRUE;
+ }
+
+ const char * logMsg = NULL;
+
+ EEConfig::NgenHardBindType ngenHardBindConfig = g_pConfig->NgenHardBind();
+
+ if (ngenHardBindConfig == EEConfig::NGEN_HARD_BIND_NONE)
+ {
+ logMsg = "COMPlus_HardPrejitEnabled=0 is specified";
+ goto CANNOT_HARD_BIND;
+ }
+
+ if (ngenHardBindConfig == EEConfig::NGEN_HARD_BIND_ALL)
+ {
+ // COMPlus_HardPrejitEnabled=2 is specified
+ limitToHardBindList = FALSE;
+ }
+
+ if (!targetModule->HasNativeImage())
+ {
+ logMsg = "dependency does not have a native image (check FusLogVw for reason)";
+ goto CANNOT_HARD_BIND;
+ }
+
+ // The loader/Fusion cannot currently guarantee that a non-manifest module of a
+ // hardbound dependency gets eagerly loaded.
+ if (!targetModule->GetFile()->IsAssembly())
+ {
+ logMsg = "dependency is a non-manifest module";
+ goto CANNOT_HARD_BIND;
+ }
+
+ // Don't hard bind to modules not on the list
+ if (limitToHardBindList && m_useHardBindList)
+ {
+ if (!IsInHardBindRequestList(targetModule->GetAssembly()))
+ {
+ logMsg = "dependency was not found in m_assemblyHardBindList";
+ goto CANNOT_HARD_BIND;
+ }
+ }
+
+ // Mark targetModule as a hard dependency
+ //
+ m_hardBoundModules.InsertValue((UPTR)targetModule, targetModule);
+
+ // Update m_pDependencies for the corresponding assembly, to reflect the fact
+ // that we are hard-binding to its native image
+ //
+ PEAssembly * pTargetAssembly;
+ pTargetAssembly = targetModule->GetFile()->GetAssembly();
+ UpdateDependencyEntryForHardBind(pTargetAssembly);
+
+ logMsg = "new dependency";
+
+ // Try to hardbind to the hardbound dependency closure as there is
+ // no extra cost in doing so
+ IncludeHardBindClosure(pTargetAssembly);
+
+ LOG((LF_ZAP, LL_INFO100, "Success CanEagerBindToZapFile: %S (%s)\n",
+ targetModule->GetDebugName(), logMsg));
+
+ return TRUE;
+
+CANNOT_HARD_BIND:
+
+ m_cantHardBindModules.InsertValue((UPTR)targetModule, targetModule);
+
+ // If we have a hard binding list, check if this module is on the list.
+ if (targetModule->GetFile()->IsAssembly() &&
+ IsInHardBindRequestList(targetModule->GetAssembly()))
+ {
+ StackSString displayName;
+ targetModule->GetAssembly()->GetDisplayName(displayName);
+
+ GetSvcLogger()->Printf(LogLevel_Warning, W("WARNING: Cannot hardbind to %s because %S\n"),
+ displayName.GetUnicode(), logMsg);
+ }
+
+ if (logMsg)
+ {
+ LOG((LF_ZAP, LL_INFO100, "Failure CanEagerBindToZapFile: %S (%s)\n",
+ targetModule->GetDebugName(), logMsg));
+ }
+
+ return FALSE;
+#endif // FEATURE_CORECLR
+}
+
+#if defined(CROSSGEN_COMPILE) && !defined(FEATURE_CORECLR)
+
+SArray<LPCWSTR> * s_pPlatformAssembliesPaths;
+
+void ZapperSetPlatformAssembliesPaths(SString &platformAssembliesPaths)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(s_pPlatformAssembliesPaths == NULL);
+ s_pPlatformAssembliesPaths = new SArray<LPCWSTR>();
+
+ SString strPaths(platformAssembliesPaths);
+ if (strPaths.IsEmpty())
+ return;
+
+ for (SString::Iterator i = strPaths.Begin(); i != strPaths.End(); )
+ {
+ // Skip any leading spaces or semicolons
+ if (strPaths.Skip(i, W(';')))
+ {
+ continue;
+ }
+
+ SString::Iterator iEnd = i; // Where current assembly name ends
+ SString::Iterator iNext; // Where next assembly name starts
+ if (strPaths.Find(iEnd, W(';')))
+ {
+ iNext = iEnd + 1;
+ }
+ else
+ {
+ iNext = iEnd = strPaths.End();
+ }
+
+ _ASSERTE(i < iEnd);
+ if(i != iEnd)
+ {
+ SString strPath(strPaths, i, iEnd);
+
+ SString strFullPath;
+ Clr::Util::Win32::GetFullPathName(strPath, strFullPath, NULL);
+
+ NewArrayHolder<WCHAR> wszFullPath = DuplicateStringThrowing(strFullPath.GetUnicode());
+ s_pPlatformAssembliesPaths->Append(wszFullPath);
+ wszFullPath.SuppressRelease();
+ }
+ i = iNext;
+ }
+}
+
+BOOL CompilationDomain::FindImage(const SString& fileName, MDInternalImportFlags flags, PEImage ** ppImage)
+{
+ if (s_pPlatformAssembliesPaths == NULL)
+ return FALSE;
+
+ for (COUNT_T i = 0; i < s_pPlatformAssembliesPaths->GetCount(); i++)
+ {
+ SString sPath((*s_pPlatformAssembliesPaths)[i]);
+ if (sPath[sPath.GetCount() - 1] != '\\')
+ sPath.Append(W("\\"));
+ sPath.Append(fileName);
+
+ if (!FileExists(sPath))
+ continue;
+
+ // Normalize the path to maintain identity
+ SString sFullPath;
+ Clr::Util::Win32::GetFullPathName(sPath, sFullPath, NULL);
+
+ PEImageHolder image(PEImage::OpenImage(sFullPath, flags));
+
+ PEImageLayoutHolder pLayout(image->GetLayout(
+ (flags == MDInternalImport_NoCache) ? PEImageLayout::LAYOUT_FLAT : PEImageLayout::LAYOUT_MAPPED,
+ PEImage::LAYOUT_CREATEIFNEEDED));
+
+ if (!pLayout->HasNTHeaders())
+ continue;
+
+ if (!pLayout->IsNativeMachineFormat())
+ {
+ // Check for platform agnostic IL
+ if (!pLayout->IsPlatformNeutral())
+ continue;
+ }
+
+ *ppImage = image.Extract();
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+BOOL CompilationDomain::IsInHardBindList(SString& simpleName)
+{
+ for (COUNT_T i = 0; i < m_assemblyHardBindList.GetCount(); i++)
+ {
+ if (simpleName.Equals(m_assemblyHardBindList[i]))
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+void CompilationDomain::ComputeAssemblyHardBindList(IMDInternalImport * pImport)
+{
+ AssemblyForLoadHint assembly(pImport);
+
+ HENUMInternalHolder hEnum(pImport);
+ hEnum.EnumAllInit(mdtAssemblyRef);
+
+ mdAssembly token;
+ while (pImport->EnumNext(&hEnum, &token))
+ {
+ LPCSTR pszName;
+ IfFailThrow(pImport->GetAssemblyRefProps(token, NULL, NULL,
+ &pszName, NULL,
+ NULL, NULL, NULL));
+
+ SString sSimpleName(SString::Utf8, pszName);
+
+ SString sFileName(sSimpleName, W(".dll"));
+
+ PEImageHolder pDependencyImage;
+
+ if (!FindImage(sFileName, MDInternalImport_Default, &pDependencyImage))
+ continue;
+
+ AssemblyForLoadHint assemblyDependency(pDependencyImage->GetMDImport());
+
+ LoadHintEnum loadHint;
+ ::GetLoadHint(&assembly, &assemblyDependency, &loadHint);
+
+ if (loadHint == LoadAlways)
+ {
+ GetSvcLogger()->Printf(W("Hardbinding to %s\n"), sSimpleName.GetUnicode());
+
+ if (!IsInHardBindList(sSimpleName))
+ {
+ m_assemblyHardBindList.Append(sSimpleName);
+ }
+ }
+ }
+
+ // Note that we are not setting m_useHardBindList to TRUE here. When we load the NGen image, we are good to hardbind.
+ // m_useHardBindList = TRUE;
+}
+#endif
+
+void CompilationDomain::SetTarget(Assembly *pAssembly, Module *pModule)
+{
+ STANDARD_VM_CONTRACT;
+
+ m_pTargetAssembly = pAssembly;
+ m_pTargetModule = pModule;
+}
+
+void CompilationDomain::SetTargetImage(DataImage *pImage, CEEPreloader * pPreloader)
+{
+ STANDARD_VM_CONTRACT;
+
+ m_pTargetImage = pImage;
+ m_pTargetPreloader = pPreloader;
+
+ _ASSERTE(pImage->GetModule() == GetTargetModule());
+}
+
+#if defined(FEATURE_CORECLR) || defined(CROSSGEN_COMPILE)
+void ReportMissingDependency(Exception * e)
+{
+ // Avoid duplicate error messages
+ if (FAILED(g_hrFatalError))
+ return;
+
+ SString s;
+
+ e->GetMessage(s);
+ GetSvcLogger()->Printf(LogLevel_Error, W("Error: %s\n"), s.GetUnicode());
+
+ g_hrFatalError = COR_E_FILELOAD;
+}
+#endif
+
+PEAssembly *CompilationDomain::BindAssemblySpec(
+ AssemblySpec *pSpec,
+ BOOL fThrowOnFileNotFound,
+ BOOL fRaisePrebindEvents,
+ StackCrawlMark *pCallerStackMark,
+ AssemblyLoadSecurity *pLoadSecurity,
+ BOOL fUseHostBinderIfAvailable)
+{
+ PEAssembly *pFile = NULL;
+ //
+ // Do the binding
+ //
+
+ EX_TRY
+ {
+ //
+ // Use normal binding rules
+ // (possibly with our custom IApplicationContext)
+ //
+ pFile = AppDomain::BindAssemblySpec(
+ pSpec,
+ fThrowOnFileNotFound,
+ fRaisePrebindEvents,
+ pCallerStackMark,
+ pLoadSecurity,
+ fUseHostBinderIfAvailable);
+ }
+ EX_HOOK
+ {
+#if defined(FEATURE_CORECLR) || defined(CROSSGEN_COMPILE)
+ if (!g_fNGenMissingDependenciesOk)
+ {
+ ReportMissingDependency(GET_EXCEPTION());
+ EX_RETHROW;
+ }
+#endif
+
+ //
+ // Record missing dependencies
+ //
+#ifdef FEATURE_COMINTEROP
+ if (!g_fNGenWinMDResilient || pSpec->HasUniqueIdentity())
+#endif
+ {
+ IfFailThrow(AddDependency(pSpec, NULL));
+ }
+ }
+ EX_END_HOOK
+
+#ifdef FEATURE_COMINTEROP
+ if (!g_fNGenWinMDResilient || pSpec->HasUniqueIdentity())
+#endif
+ {
+ IfFailThrow(AddDependency(pSpec, pFile));
+ }
+
+ return pFile;
+}
+
+HRESULT
+ CompilationDomain::SetContextInfo(LPCWSTR path, BOOL isExe)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+#ifdef FEATURE_FUSION
+ if (isExe)
+ {
+ if (NingenEnabled())
+ {
+ WCHAR buf[MAX_PATH + sizeof(CONFIGURATION_EXTENSION)/sizeof(WCHAR) + 1];
+ if (0 != wcscpy_s(buf, sizeof(buf)/sizeof(*buf), path))
+ {
+ COMPlusThrowHR(COR_E_PATHTOOLONG);
+ }
+ WCHAR *pSlash = wcsrchr(buf, W('\\'));
+ if (!pSlash)
+ {
+ COMPlusThrowHR(COR_E_BAD_PATHNAME);
+ }
+
+ *(pSlash + 1) = W('\0');
+ hr = m_pFusionContext->Set(ACTAG_APP_BASE_URL, buf, (DWORD)((wcslen(buf) + 1) * sizeof(WCHAR)), 0);
+ if (FAILED(hr))
+ {
+ COMPlusThrowHR(hr);
+ }
+
+ if (0 != wcscpy_s(buf, sizeof(buf)/sizeof(*buf), path + (pSlash - buf) + 1))
+ {
+ COMPlusThrowHR(COR_E_PATHTOOLONG);
+ }
+
+ if (0 != wcscat_s(buf, sizeof(buf)/sizeof(*buf), CONFIGURATION_EXTENSION))
+ {
+ COMPlusThrowHR(COR_E_PATHTOOLONG);
+ }
+ hr = m_pFusionContext->Set(ACTAG_APP_CONFIG_FILE, buf, (DWORD)((wcslen(buf) + 1) * sizeof(WCHAR)), 0);
+ if (FAILED(hr))
+ {
+ COMPlusThrowHR(hr);
+ }
+ }
+ else
+ {
+ SetupExecutableFusionContext(path);
+ }
+ }
+ else
+ {
+ hr = m_pFusionContext->Set(ACTAG_APP_BASE_URL,
+ (void*) path, (DWORD) ((wcslen(path)+1) * sizeof(WCHAR)),
+ 0);
+ }
+#endif //FEATURE_FUSION
+
+ COOPERATIVE_TRANSITION_END();
+
+ return hr;
+}
+
+void CompilationDomain::SetDependencyEmitter(IMetaDataAssemblyEmit *pEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ pEmit->AddRef();
+ m_pEmit = pEmit;
+
+ m_pDependencyRefSpecs = new AssemblySpecHash();
+}
+
+#ifndef FEATURE_CORECLR // hardbinding
+/* Update m_pDependencies for the corresponding assembly, to reflect the fact
+ that we are hard-binding to its native image
+ */
+
+void CompilationDomain::UpdateDependencyEntryForHardBind(PEAssembly * pDependencyAssembly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(pDependencyAssembly->HasBindableIdentity()); // Currently no hard deps on WinMD files.
+ }
+ CONTRACTL_END;
+ AssemblySpec assemblySpec;
+ assemblySpec.InitializeSpec(pDependencyAssembly);
+
+ mdAssemblyRef defToken;
+ IfFailThrow(assemblySpec.EmitToken(m_pEmit, &defToken));
+
+ CORCOMPILE_DEPENDENCY * pDep = m_pDependencies;
+
+ for (unsigned i = 0; i < m_cDependenciesCount; i++, pDep++)
+ {
+ if (pDep->dwAssemblyDef == defToken)
+ {
+ PEImage * pNativeImage = pDependencyAssembly->GetPersistentNativeImage();
+ CORCOMPILE_VERSION_INFO * pNativeVersion = pNativeImage->GetLoadedLayout()->GetNativeVersionInfo();
+ _ASSERTE(pDep->signNativeImage == INVALID_NGEN_SIGNATURE ||
+ pDep->signNativeImage == pNativeVersion->signature);
+ pDep->signNativeImage = pNativeVersion->signature;
+ return;
+ }
+ }
+
+ // We should have found and updated the corresponding dependency
+ _ASSERTE(!"This should be unreachable");
+}
+
+// pAssembly is a hardbound ngen dependency of m_pTargetModule.
+// Try to hardbind to the hardbound dependency closure as there is
+// no extra cost in doing so, and it will help generate better code
+//
+
+void CompilationDomain::IncludeHardBindClosure(PEAssembly * pAssembly)
+{
+ CONTRACTL {
+ PRECONDITION(pAssembly->GetPersistentNativeImage() != NULL);
+ } CONTRACTL_END;
+
+ PEImageLayout *pNativeImage = pAssembly->GetLoadedNative();
+ COUNT_T cDependencies;
+ CORCOMPILE_DEPENDENCY *pDependencies = pNativeImage->GetNativeDependencies(&cDependencies);
+ CORCOMPILE_DEPENDENCY *pDependenciesEnd = pDependencies + cDependencies;
+
+ for (/**/; pDependencies < pDependenciesEnd; pDependencies++)
+ {
+ // Ignore "soft" dependencies
+
+ if (pDependencies->signNativeImage == INVALID_NGEN_SIGNATURE)
+ continue;
+
+ // Load the manifest file for the given hardbound name-assembly-spec.
+
+ AssemblySpec name;
+ name.InitializeSpec(pDependencies->dwAssemblyRef,
+ pAssembly->GetPersistentNativeImage()->GetNativeMDImport(),
+ FindAssembly(pAssembly),
+ FALSE);
+
+ DomainAssembly * pDependency = name.LoadDomainAssembly(FILE_LOADED);
+
+ // Since pAssembly hardbinds to pDependency, pDependency better
+ // have a native image
+ _ASSERTE(pDependency->GetFile()->HasNativeImage());
+
+ //
+ // Now add pDependency as a hard dependency of m_pTargetModule.
+ // We pass in limitToHardBindList=FALSE as it is OK to hardbind even if
+ // pDependency is not in the hardbound list.
+ //
+
+ CanEagerBindToZapFile(pDependency->GetLoadedModule(), FALSE);
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Check if we were successfully able to hardbind to the requested dependency
+
+void CompilationDomain::CheckHardBindToZapFile(SString dependencyNameFromCA)
+{
+ // First check if we were successfully able to hard-bind
+
+ for (PtrHashMap::PtrIterator hbItr = m_hardBoundModules.begin(); !hbItr.end(); ++hbItr)
+ {
+ Module * pModule = (Module*) hbItr.GetValue();
+
+ if (IsAssemblySpecifiedInCA(pModule->GetAssembly(), dependencyNameFromCA))
+ {
+ // We did successfully use "dependencyNameFromCA"
+ return;
+ }
+ }
+
+ // Next, check if we failed to hard-bind. CompilationDomain::CanEagerBindToZapFile()
+ // would have logged a warning message with the cause of the soft-bind
+
+ for (PtrHashMap::PtrIterator sbItr = m_cantHardBindModules.begin(); !sbItr.end(); ++sbItr)
+ {
+ Module * pModule = (Module*) sbItr.GetValue();
+
+ if (IsAssemblySpecifiedInCA(pModule->GetAssembly(), dependencyNameFromCA))
+ {
+ if (!IsInHardBindRequestList(pModule->GetAssembly()))
+ {
+ // CompilationDomain::CanEagerBindToZapFile() does not give a warning
+ // message for the cyclic dependency case, since the NGEN service
+ // breaks the cycle by overriding the CA. So give a message here instead.
+ GetSvcLogger()->Printf(LogLevel_Warning, W("WARNING: Dependency attribute for %s is being ignored, possibly because of cyclic dependencies.\n"),
+ dependencyNameFromCA.GetUnicode());
+ }
+ return;
+ }
+ }
+
+ // Finally, it looks like the assembly was either not loaded, or that
+ // there was no reason to even try to hard-bind.
+
+ GetSvcLogger()->Printf(LogLevel_Warning, W("WARNING: Cannot hardbind to %s because it is not loaded\n"),
+ dependencyNameFromCA.GetUnicode());
+}
+
+//-----------------------------------------------------------------------------
+// Check if we were successfully able to hardbind to all the requested
+// dependencies.
+
+void CompilationDomain::CheckLoadHints()
+{
+ if (!m_useHardBindList)
+ return;
+
+ if (g_pConfig->NgenHardBind() == EEConfig::NGEN_HARD_BIND_NONE)
+ return;
+
+ // Look for the binding custom attribute
+
+ IMDInternalImport * pImport = m_pTargetAssembly->GetManifestImport();
+ _ASSERTE(pImport);
+
+ MDEnumHolder hEnum(pImport); // Enumerator for custom attributes
+ if (FAILED(pImport->EnumCustomAttributeByNameInit(m_pTargetAssembly->GetManifestToken(),
+ DEPENDENCY_TYPE,
+ &hEnum)))
+ {
+ return;
+ }
+
+ mdCustomAttribute tkAttribute; // A custom attribute on this assembly.
+ const BYTE *pbAttr; // Custom attribute data as a BYTE*.
+ ULONG cbAttr; // Size of custom attribute data.
+
+ while (pImport->EnumNext(&hEnum, &tkAttribute))
+ { // Get raw custom attribute.
+ IfFailThrow(pImport->GetCustomAttributeAsBlob(tkAttribute, (const void**)&pbAttr, &cbAttr));
+
+ CustomAttributeParser cap(pbAttr, cbAttr);
+ if (FAILED(cap.ValidateProlog()))
+ {
+ THROW_BAD_FORMAT(BFA_BAD_CA_HEADER, m_pTargetAssembly->GetManifestModule());
+ }
+
+ LPCUTF8 szString;
+ ULONG cbString;
+ if (FAILED(cap.GetNonNullString(&szString, &cbString)))
+ {
+ THROW_BAD_FORMAT(BFA_BAD_CA_STRING, m_pTargetAssembly->GetManifestModule());
+ }
+
+ UINT32 u4;
+ IfFailThrow(cap.GetU4(&u4));
+ LoadHintEnum loadHint = (LoadHintEnum)u4;
+
+ if (loadHint != LoadAlways)
+ continue;
+
+ // Convert the string to Unicode.
+ StackSString dependencyNameFromCA(SString::Utf8, szString, cbString);
+
+ CheckHardBindToZapFile(dependencyNameFromCA);
+ }
+}
+#endif // FEATURE_CORECLR
+
+HRESULT
+ CompilationDomain::GetDependencies(CORCOMPILE_DEPENDENCY **ppDependencies,
+ DWORD *pcDependencies)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifndef FEATURE_CORECLR // hardbinding
+ CheckLoadHints();
+#endif
+
+ //
+ // Return the bindings.
+ //
+
+ *ppDependencies = m_pDependencies;
+ *pcDependencies = m_cDependenciesCount;
+
+ // Cannot add any more dependencies
+ ReleaseDependencyEmitter();
+
+ return S_OK;
+}
+
+#ifdef FEATURE_FUSION
+HRESULT
+ CompilationDomain::GetIBindContext(IBindContext **ppBindCtx)
+{
+ LIMITED_METHOD_CONTRACT;
+ HRESULT hr = S_OK;
+
+ ReleaseHolder<IBindContext> pBindCtx;
+ if (HasLoadContextHostBinder())
+ {
+ IfFailRet(GetCurrentLoadContextHostBinder()->QueryInterface(__uuidof(IBindContext), &pBindCtx));
+ }
+ else
+ {
+ GetBindContextFromApplicationContext(BaseDomain::GetFusionContext(), &pBindCtx); // Can't fail
+ }
+
+ *ppBindCtx = pBindCtx.Extract();
+ return S_OK;
+}
+#endif
+
+#ifdef CROSSGEN_COMPILE
+HRESULT CompilationDomain::SetPlatformWinmdPaths(LPCWSTR pwzPlatformWinmdPaths)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef FEATURE_COMINTEROP
+ // Create the array list on the heap since it will be passed off for the Crossgen RoResolveNamespace mockup to keep for the life of the process
+ StringArrayList *saPaths = new StringArrayList();
+
+ SString strPaths(pwzPlatformWinmdPaths);
+ if (!strPaths.IsEmpty())
+ {
+ for (SString::Iterator i = strPaths.Begin(); i != strPaths.End(); )
+ {
+ // Skip any leading spaces or semicolons
+ if (strPaths.Skip(i, W(';')))
+ {
+ continue;
+ }
+
+ SString::Iterator iEnd = i; // Where current assembly name ends
+ SString::Iterator iNext; // Where next assembly name starts
+ if (strPaths.Find(iEnd, W(';')))
+ {
+ iNext = iEnd + 1;
+ }
+ else
+ {
+ iNext = iEnd = strPaths.End();
+ }
+
+ _ASSERTE(i < iEnd);
+ if(i != iEnd)
+ {
+ saPaths->Append(SString(strPaths, i, iEnd));
+ }
+ i = iNext;
+ }
+ }
+ Crossgen::SetFirstPartyWinMDPaths(saPaths);
+#endif // FEATURE_COMINTEROP
+
+ return S_OK;
+}
+#endif // CROSSGEN_COMPILE
+
+#endif // FEATURE_PREJIT
diff --git a/src/vm/compile.h b/src/vm/compile.h
new file mode 100644
index 0000000000..9c5a62de5f
--- /dev/null
+++ b/src/vm/compile.h
@@ -0,0 +1,919 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: compile.h
+//
+// Interfaces and support for zap compiler and zap files
+//
+
+// ===========================================================================
+
+
+/*
+
+The preloader is used to serialize internal EE data structures in the
+zapped image. The object model looks like the following:
+
+ +--------------------+
+ | |
+ | ZapperModule |
+ | |
+ +--------------------+
+ |
+ *
+ ICorCompileDataStore Zapper
+
+ =====================================================
+
+ ICorCompilePreloader EE
+ *
+ |
+ +--------------------+
+ | |
+ | CEEPreloader |
+ | |
+ +--------------------+
+ |
+ *
+ DataImage::IDataStore
+
+
+ +--------------------+
+ | |
+ | DataImage |
+ | |
+ +--------------------+
+
+ZapperModule - Created by the zapper for each module. It implements the
+ ICorCompileDataStore interface that the preloader uses to
+ allocate space for the EE data structures. Currently it
+ allocates space in a single PE section (though the DataImage
+ has logic to further subdivide the space into subsections).
+
+CEEPreloader - Created by ZapperModule in order to serialize EE
+ data structures. It implements two interfaces.
+ ICorCompilePreloader is used by ZapperModule to inquire
+ about the offsets of various EE data structures inside
+ the preloader section. DataImage::IDataStore is used
+ by DataImage to manage the PE section memory, and the
+ implementation in the CEEPreloader mostly forwards the calls
+ to the zapper (ICorCompileDataStore).
+
+DataImage - Created by CEEPreloader to keep track of memory used by
+ EE data structures. Even though it uses only one PE
+ section, it allows the EE to allocate memory in multiple
+ subsections. This is accomplished by splitting the work into
+ three phases (there are comments in dataimage.h that explain
+ this in detail).
+
+
+The CEEPreloader is created when ZapperModule::Preload calls
+m_zapper->m_pEECompileInfo->PreloadModule. PreloadModule creates
+the CEEPreloader and then calls its Preload method, which explicitely
+loads all the EE objects into memory (Module::ExpandAll), and then
+allocates space for them in the preloader section (Module::Save).
+
+Each EE data structure that needs to be serialized implements a Save
+method. A Save method is required to:
+1) Store all of its data (including strings and other buffers that it
+ uses) in the preloader section. This is accomplished by calling on
+ one of the DataImage storage methods (such as DataImage::StoreStructure).
+2) Call the Save method on the objects that it owns. The interesting
+ part of the hierarchy looks like:
+
+ Module::Save
+ MethodTable::Save (in profile order)
+ EEClass::Save
+ MethodDescChunk::Save (method desc chunks can be split into hot
+ and cold based on profile info)
+ MethodDesc::Save
+
+Note that while the architecture requires the data structures in the
+preloader sections to look like their EE counterparts, it is possible
+to work around that limitation by constructing multiple submappings of
+these data structures. Sometimes the submappings require a change to the actual
+data (i.e. each method desc has information that tells you how far it is
+from the MethodDescChunk, and that needs to change when reordering method
+descs). In such cases you create new copies of that memory and construct
+a regular copying map for each of the new updated copies (DataImage::StoreStructure),
+and a pointer update map for each of the original EE data structures
+(DataImage::StoreStructureUsingSurrogate). See MethodDescChunk::Save for
+an example on how to do this.
+
+Fixups: once everything has been layout out in memory, the ZapperModule
+calls CEEPreloader::Link to generate fixups for the data. CEEPreloader::Link
+calls Module::Fixup, which results in a data structure walk very similar to
+that of Module::Save. Each data structure calls one of the FixupPointerField
+methods on the DataImage, which in turn forwards the call to
+CEEPreloader::AddFixup, which forwards it to the zapper
+(ZapperModule::AddFixup).
+
+*/
+
+#ifndef COMPILE_H_
+#define COMPILE_H_
+
+#ifndef FEATURE_PREJIT
+#error FEATURE_PREJIT is required for this file
+#endif // FEATURE_PREJIT
+
+struct ZapperLoaderModuleTableKey {
+ ZapperLoaderModuleTableKey(Module *pDefinitionModule,
+ mdToken token,
+ Instantiation classInst,
+ Instantiation methodInst)
+ : m_inst(classInst, methodInst)
+ { WRAPPER_NO_CONTRACT;
+ this->m_pDefinitionModule = pDefinitionModule;
+ this->m_token = token; }
+
+ Module *m_pDefinitionModule;
+ mdToken m_token;
+ SigTypeContext m_inst;
+} ;
+
+struct ZapperLoaderModuleTableEntry {
+ ZapperLoaderModuleTableEntry(): key(0,0,Instantiation(),Instantiation()) { WRAPPER_NO_CONTRACT; this->result = 0; }
+ ZapperLoaderModuleTableEntry(const ZapperLoaderModuleTableKey &_key,Module *_result)
+ : key(_key)
+ { this->result = _result; }
+
+ ZapperLoaderModuleTableKey key;
+ Module *result;
+} ;
+
+class ZapperLoaderModuleTableTraits : public NoRemoveSHashTraits<DefaultSHashTraits<ZapperLoaderModuleTableEntry> >
+{
+
+public:
+ typedef const ZapperLoaderModuleTableKey *key_t;
+ static const ZapperLoaderModuleTableKey * GetKey(const ZapperLoaderModuleTableEntry &e) { return &e.key; }
+ static count_t Hash(const ZapperLoaderModuleTableKey * k)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ DWORD dwHash = 5381;
+
+ dwHash = ((dwHash << 5) + dwHash) ^ (unsigned int)(SIZE_T)k->m_pDefinitionModule;
+ dwHash = ((dwHash << 5) + dwHash) ^ (unsigned int)(SIZE_T)k->m_token;
+ dwHash = ((dwHash << 5) + dwHash) ^ EEInstantiationHashTableHelper:: Hash(&k->m_inst);
+ return dwHash;
+ }
+
+ static BOOL Equals(const ZapperLoaderModuleTableKey *e1, const ZapperLoaderModuleTableKey *e2)
+ {
+ WRAPPER_NO_CONTRACT;
+ return e1->m_pDefinitionModule == e2->m_pDefinitionModule &&
+ e1->m_token == e2->m_token &&
+ SigTypeContext::Equal(&e1->m_inst, &e2->m_inst);
+ }
+ static const ZapperLoaderModuleTableEntry Null()
+ { return ZapperLoaderModuleTableEntry(); }
+
+ static bool IsNull(const ZapperLoaderModuleTableEntry &e)
+ { LIMITED_METHOD_CONTRACT; return e.key.m_pDefinitionModule == 0 && e.key.m_token == 0 && e.key.m_inst.IsEmpty(); }
+
+};
+
+
+typedef SHash<ZapperLoaderModuleTableTraits> ZapperLoaderModuleTable;
+
+class CEECompileInfo : public ICorCompileInfo
+{
+ public:
+ HRESULT Startup( BOOL fForceDebug,
+ BOOL fForceProfiling,
+ BOOL fForceInstrument);
+
+ HRESULT CreateDomain(ICorCompilationDomain **ppDomain,
+ IMetaDataAssemblyEmit *pEmitter,
+ BOOL fForceDebug,
+ BOOL fForceProfiling,
+ BOOL fForceInstrument,
+ BOOL fForceFulltrustDomain
+#ifdef MDIL
+ , MDILCompilationFlags mdilCompilationFlags
+#endif
+ );
+
+ HRESULT MakeCrossDomainCallback(
+ ICorCompilationDomain* pDomain,
+ CROSS_DOMAIN_CALLBACK pfnCallback,
+ LPVOID pArgs);
+
+ HRESULT DestroyDomain(ICorCompilationDomain *pDomain);
+
+ HRESULT LoadAssemblyByPath(LPCWSTR wzPath,
+ BOOL fExplicitBindToNativeImage,
+ CORINFO_ASSEMBLY_HANDLE *pHandle);
+
+#ifdef FEATURE_FUSION
+ HRESULT LoadAssemblyByName(LPCWSTR wzName,
+ CORINFO_ASSEMBLY_HANDLE *pHandle);
+
+ HRESULT LoadAssemblyRef(IMDInternalImport *pAssemblyImport,
+ mdAssemblyRef ref,
+ CORINFO_ASSEMBLY_HANDLE *pHandle,
+ IAssemblyName **refAssemblyName = NULL);
+
+ HRESULT LoadAssemblyByIAssemblyName(
+ IAssemblyName *pAssemblyName,
+ CORINFO_ASSEMBLY_HANDLE *pHandle
+ );
+
+#endif //FEATURE_FUSION
+
+#ifdef FEATURE_COMINTEROP
+ HRESULT LoadTypeRefWinRT(IMDInternalImport *pAssemblyImport,
+ mdTypeRef ref,
+ CORINFO_ASSEMBLY_HANDLE *pHandle);
+#endif
+
+ BOOL IsInCurrentVersionBubble(CORINFO_MODULE_HANDLE hModule);
+
+ HRESULT LoadAssemblyModule(CORINFO_ASSEMBLY_HANDLE assembly,
+ mdFile file,
+ CORINFO_MODULE_HANDLE *pHandle);
+
+#ifndef FEATURE_CORECLR
+ // Check if the assembly supports automatic NGen
+ BOOL SupportsAutoNGen(CORINFO_ASSEMBLY_HANDLE assembly);
+
+ HRESULT SetCachedSigningLevel(HANDLE hNI, HANDLE *pModules, COUNT_T nModules);
+#endif
+
+ BOOL CheckAssemblyZap(
+ CORINFO_ASSEMBLY_HANDLE assembly,
+ __out_ecount_opt(*cAssemblyManifestModulePath)
+ LPWSTR assemblyManifestModulePath,
+ LPDWORD cAssemblyManifestModulePath);
+
+#ifdef MDIL
+ DWORD GetMdilModuleSecurityFlags(
+ CORINFO_ASSEMBLY_HANDLE assembly);
+
+ BOOL CompilerRelaxationNoStringInterningPermitted(
+ CORINFO_ASSEMBLY_HANDLE assembly);
+
+ BOOL RuntimeCompatibilityWrapExceptions(
+ CORINFO_ASSEMBLY_HANDLE assembly);
+
+ DWORD CERReliabilityContract(
+ CORINFO_ASSEMBLY_HANDLE assembly);
+#endif //MDIL
+
+ HRESULT SetCompilationTarget(CORINFO_ASSEMBLY_HANDLE assembly,
+ CORINFO_MODULE_HANDLE module);
+
+ IMDInternalImport * GetAssemblyMetaDataImport(CORINFO_ASSEMBLY_HANDLE scope);
+
+ IMDInternalImport * GetModuleMetaDataImport(CORINFO_MODULE_HANDLE scope);
+
+ CORINFO_MODULE_HANDLE GetAssemblyModule(CORINFO_ASSEMBLY_HANDLE module);
+
+ CORINFO_ASSEMBLY_HANDLE GetModuleAssembly(CORINFO_MODULE_HANDLE module);
+
+ PEDecoder * GetModuleDecoder(CORINFO_MODULE_HANDLE scope);
+
+ void GetModuleFileName(CORINFO_MODULE_HANDLE module,
+ SString &result);
+
+ void EncodeModuleAsIndexes( CORINFO_MODULE_HANDLE fromHandle,
+ CORINFO_MODULE_HANDLE handle,
+ DWORD *pAssemblyIndex,
+ DWORD *pModuleIndex,
+ IMetaDataAssemblyEmit *pAssemblyEmit);
+
+ void EncodeClass( CORINFO_MODULE_HANDLE referencingModule,
+ CORINFO_CLASS_HANDLE classHandle,
+ SigBuilder *pSigBuilder,
+ LPVOID encodeContext,
+ ENCODEMODULE_CALLBACK pfnEncodeModule);
+
+ void EncodeMethod( CORINFO_MODULE_HANDLE referencingModule,
+ CORINFO_METHOD_HANDLE methHnd,
+ SigBuilder *pSigBuilder,
+ LPVOID encodeContext,
+ ENCODEMODULE_CALLBACK pfnEncodeModule,
+ CORINFO_RESOLVED_TOKEN * pResolvedToken,
+ CORINFO_RESOLVED_TOKEN * pConstrainedResolvedToken);
+
+ virtual mdToken TryEncodeMethodAsToken(CORINFO_METHOD_HANDLE handle,
+ CORINFO_RESOLVED_TOKEN * pResolvedToken,
+ CORINFO_MODULE_HANDLE * referencingModule);
+
+ virtual DWORD TryEncodeMethodSlot(CORINFO_METHOD_HANDLE handle);
+
+ void EncodeField( CORINFO_MODULE_HANDLE referencingModule,
+ CORINFO_FIELD_HANDLE handle,
+ SigBuilder *pSigBuilder,
+ LPVOID encodeContext,
+ ENCODEMODULE_CALLBACK pfnEncodeModule,
+ CORINFO_RESOLVED_TOKEN * pResolvedToken);
+
+ // Encode generic dictionary signature
+ virtual void EncodeGenericSignature(
+ LPVOID signature,
+ BOOL fMethod,
+ SigBuilder * pSigBuilder,
+ LPVOID encodeContext,
+ ENCODEMODULE_CALLBACK pfnEncodeModule);
+
+
+ BOOL IsEmptyString(mdString token,
+ CORINFO_MODULE_HANDLE module);
+
+ BOOL IsCachingOfInliningHintsEnabled()
+ {
+ return m_fCachingOfInliningHintsEnabled;
+ }
+
+ void DisableCachingOfInliningHints()
+ {
+ m_fCachingOfInliningHintsEnabled = FALSE;
+ }
+
+ HRESULT GetTypeDef( CORINFO_CLASS_HANDLE classHandle,
+ mdTypeDef *token);
+ HRESULT GetMethodDef( CORINFO_METHOD_HANDLE methodHandle,
+ mdMethodDef *token);
+ HRESULT GetFieldDef( CORINFO_FIELD_HANDLE fieldHandle,
+ mdFieldDef *token);
+
+ void SetAssemblyHardBindList(__in_ecount( cHardBindList )
+ LPWSTR *pHardBindList,
+ DWORD cHardBindList);
+
+ CORINFO_MODULE_HANDLE GetLoaderModuleForMscorlib();
+ CORINFO_MODULE_HANDLE GetLoaderModuleForEmbeddableType(CORINFO_CLASS_HANDLE classHandle);
+ CORINFO_MODULE_HANDLE GetLoaderModuleForEmbeddableMethod(CORINFO_METHOD_HANDLE methodHandle);
+ CORINFO_MODULE_HANDLE GetLoaderModuleForEmbeddableField(CORINFO_FIELD_HANDLE fieldHandle);
+
+ ICorCompilePreloader * PreloadModule(CORINFO_MODULE_HANDLE moduleHandle,
+ ICorCompileDataStore *pData,
+ CorProfileData *profileData);
+
+#if MDIL
+ HRESULT ShouldCompile(CORINFO_METHOD_HANDLE methodHandle);
+#endif // MDIL
+
+#ifdef FEATURE_FUSION
+ HRESULT GetAssemblyName(
+ CORINFO_ASSEMBLY_HANDLE hAssembly,
+ DWORD dwFlags,
+ __out_z LPWSTR wzAssemblyName,
+ LPDWORD cchAssemblyName);
+#endif //FEATURE_FUSION
+
+ HRESULT GetLoadHint(CORINFO_ASSEMBLY_HANDLE hAssembly,
+ CORINFO_ASSEMBLY_HANDLE hAssemblyDependency,
+ LoadHintEnum *loadHint,
+ LoadHintEnum *defaultLoadHint = NULL // for MDIL we want to separate the default load hint on the assembly
+ // from the load hint on the dependency
+ );
+
+ HRESULT GetAssemblyVersionInfo(CORINFO_ASSEMBLY_HANDLE Handle,
+ CORCOMPILE_VERSION_INFO *pInfo);
+
+ void GetAssemblyCodeBase(CORINFO_ASSEMBLY_HANDLE hAssembly,
+ SString &result);
+
+ void GetCallRefMap(CORINFO_METHOD_HANDLE hMethod,
+ GCRefMapBuilder * pBuilder);
+
+ void CompressDebugInfo(
+ IN ICorDebugInfo::OffsetMapping * pOffsetMapping,
+ IN ULONG iOffsetMapping,
+ IN ICorDebugInfo::NativeVarInfo * pNativeVarInfo,
+ IN ULONG iNativeVarInfo,
+ IN OUT SBuffer * pDebugInfoBuffer);
+
+ HRESULT SetVerboseLevel(
+ IN VerboseLevel level);
+
+ HRESULT GetBaseJitFlags(
+ IN CORINFO_METHOD_HANDLE hMethod,
+ OUT DWORD *pFlags);
+
+#ifdef _WIN64
+ SIZE_T getPersonalityValue();
+#endif
+
+ void* GetStubSize(void *pStubAddress, DWORD *pSizeToCopy);
+
+ HRESULT GetStubClone(void *pStub, BYTE *pBuffer, DWORD dwBufferSize);
+
+ BOOL GetIsGeneratingNgenPDB();
+ void SetIsGeneratingNgenPDB(BOOL fGeneratingNgenPDB);
+
+#ifdef FEATURE_READYTORUN_COMPILER
+ CORCOMPILE_FIXUP_BLOB_KIND GetFieldBaseOffset(
+ CORINFO_CLASS_HANDLE classHnd,
+ DWORD * pBaseOffset);
+
+ BOOL NeedsTypeLayoutCheck(CORINFO_CLASS_HANDLE classHnd);
+ void EncodeTypeLayout(CORINFO_CLASS_HANDLE classHandle, SigBuilder * pSigBuilder);
+
+ BOOL AreAllClassesFullyLoaded(CORINFO_MODULE_HANDLE moduleHandle);
+#endif
+
+ //--------------------------------------------------------------------
+ // ZapperLoaderModules and the ZapperLoaderModuleTable
+ //
+ // When NGEN'ing we want to adjust the
+ // places where some items (i.e. generic instantiations) are placed, in order to get some of them
+ // placed into the module we are compiling. However, the
+ // results of ComputeLoaderModule must be stable for the duration
+ // of an entire instance of the VM, i.e. for the duration of a compilation
+ // process. Thus each time we place an item into a non-standard LoaderModule we record
+ // that fact.
+
+ Module *LookupZapperLoaderModule(const ZapperLoaderModuleTableKey *pKey)
+ {
+ WRAPPER_NO_CONTRACT;
+ const ZapperLoaderModuleTableEntry *pEntry = m_ZapperLoaderModuleTable.LookupPtr(pKey);
+ if (pEntry)
+ return pEntry->result;
+ return NULL;
+ }
+
+ void RecordZapperLoaderModule(const ZapperLoaderModuleTableKey *pKey,
+ Module *pZapperLoaderModuleTable)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ ZapperLoaderModuleTableEntry entry(*pKey, pZapperLoaderModuleTable);
+ m_ZapperLoaderModuleTable.Add(entry);
+ }
+
+ ZapperLoaderModuleTable m_ZapperLoaderModuleTable;
+
+private:
+ BOOL m_fCachingOfInliningHintsEnabled;
+ BOOL m_fGeneratingNgenPDB;
+};
+
+extern CEECompileInfo *g_pCEECompileInfo;
+
+BOOL IsNgenPDBCompilationProcess();
+
+//
+// See comment at top of file for an explanation on the preloader
+// architecture.
+//
+
+class CEEPreloader : public ICorCompilePreloader
+{
+ private:
+ DataImage *m_image;
+ ICorCompileDataStore *m_pData;
+
+ class MethodSetTraits : public NoRemoveSHashTraits< DefaultSHashTraits<MethodDesc *> >
+ {
+ public:
+ typedef MethodDesc *key_t;
+ static MethodDesc * GetKey(MethodDesc *md) { return md; }
+ static count_t Hash(MethodDesc *md) { return (count_t) (UINT_PTR) md; }
+ static BOOL Equals(MethodDesc *md1, MethodDesc *md2)
+ {
+ return md1 == md2;
+ }
+ };
+
+ class TypeSetTraits : public NoRemoveSHashTraits< DefaultSHashTraits<TypeHandle> >
+ {
+ public:
+ typedef TypeHandle key_t;
+ static const TypeHandle Null() { return TypeHandle(); }
+ static bool IsNull(const TypeHandle &th) { return !!th.IsNull(); }
+ static TypeHandle GetKey(TypeHandle th) { return th; }
+ static count_t Hash(TypeHandle th) { return (count_t) th.AsTAddr(); }
+ static BOOL Equals(TypeHandle th1, TypeHandle th2) { return th1 == th2; }
+ };
+
+ // Cached results of instantiations triage
+ SHash<TypeSetTraits> m_acceptedTypes;
+ SHash<MethodSetTraits> m_acceptedMethods;
+ SHash<TypeSetTraits> m_rejectedTypes;
+ SHash<MethodSetTraits> m_rejectedMethods;
+
+#ifdef FEATURE_FULL_NGEN
+ // Tentatively accepted instantiations
+ InlineSArray<TypeHandle, 20> m_speculativeTypes;
+ BOOL m_fSpeculativeTriage;
+ BOOL m_fDictionariesPopulated;
+#endif
+
+ struct CompileMethodEntry
+ {
+ MethodDesc * pMD;
+#ifndef FEATURE_FULL_NGEN // Unreferenced methods
+ bool fReferenced; // true when this method was referenced by other code
+ bool fScheduled; // true when this method was scheduled for compilation
+#endif
+ };
+
+ class CompileMethodSetTraits : public NoRemoveSHashTraits< DefaultSHashTraits<CompileMethodEntry> >
+ {
+ public:
+ typedef MethodDesc *key_t;
+ static MethodDesc * GetKey(CompileMethodEntry e) { return e.pMD; }
+ static count_t Hash(MethodDesc *md) { return (count_t) (UINT_PTR) md; }
+ static BOOL Equals(MethodDesc *md1, MethodDesc *md2)
+ {
+ return md1 == md2;
+ }
+ static const CompileMethodEntry Null() { CompileMethodEntry e; e.pMD = NULL; return e; }
+ static bool IsNull(const CompileMethodEntry &e) { return e.pMD == NULL; }
+ };
+
+ SHash<CompileMethodSetTraits> m_compileMethodsHash;
+
+ // Array of methods that we need to compile.
+ SArray<MethodDesc*> m_uncompiledMethods;
+
+ struct DuplicateMethodEntry
+ {
+ MethodDesc * pMD;
+ MethodDesc * pDuplicateMD;
+ };
+
+ class DuplicateMethodTraits : public NoRemoveSHashTraits< DefaultSHashTraits<DuplicateMethodEntry> >
+ {
+ public:
+ typedef MethodDesc *key_t;
+ static MethodDesc * GetKey(DuplicateMethodEntry e) { return e.pMD; }
+ static count_t Hash(MethodDesc *md) { return (count_t) (UINT_PTR) md; }
+ static BOOL Equals(MethodDesc *md1, MethodDesc *md2)
+ {
+ return md1 == md2;
+ }
+ static const DuplicateMethodEntry Null() { DuplicateMethodEntry e; e.pMD = NULL; return e; }
+ static bool IsNull(const DuplicateMethodEntry &e) { return e.pMD == NULL; }
+ };
+
+ SHash<DuplicateMethodTraits> m_duplicateMethodsHash;
+
+ MethodDesc * CompileMethodStubIfNeeded(
+ MethodDesc *pMD,
+ MethodDesc *pStubMD,
+ ICorCompilePreloader::CORCOMPILE_CompileStubCallback pfnCallback,
+ LPVOID pCallbackContext);
+
+ public:
+ CEEPreloader(Module *pModule,
+ ICorCompileDataStore *pData);
+ ~CEEPreloader();
+
+ void Preload(CorProfileData * profileData);
+ DataImage * GetDataImage() { LIMITED_METHOD_CONTRACT; return m_image; }
+ ICorCompileDataStore * GetDataStore() { LIMITED_METHOD_CONTRACT; return m_pData; }
+
+ //
+ // ICorCompilerPreloader
+ //
+
+ DWORD MapMethodEntryPoint(CORINFO_METHOD_HANDLE handle);
+ DWORD MapClassHandle(CORINFO_CLASS_HANDLE handle);
+ DWORD MapMethodHandle(CORINFO_METHOD_HANDLE handle);
+ DWORD MapFieldHandle(CORINFO_FIELD_HANDLE handle);
+ DWORD MapAddressOfPInvokeFixup(CORINFO_METHOD_HANDLE handle);
+ DWORD MapGenericHandle(CORINFO_GENERIC_HANDLE handle);
+ DWORD MapModuleIDHandle(CORINFO_MODULE_HANDLE handle);
+
+ void AddMethodToTransitiveClosureOfInstantiations(CORINFO_METHOD_HANDLE handle);
+ void AddTypeToTransitiveClosureOfInstantiations(CORINFO_CLASS_HANDLE handle);
+ BOOL IsMethodInTransitiveClosureOfInstantiations(CORINFO_METHOD_HANDLE handle);
+ BOOL IsTypeInTransitiveClosureOfInstantiations(CORINFO_CLASS_HANDLE handle);
+
+ void MethodReferencedByCompiledCode(CORINFO_METHOD_HANDLE handle);
+
+ BOOL IsUncompiledMethod(CORINFO_METHOD_HANDLE handle);
+#ifdef MDIL
+ void AddMDILCodeFlavorsToUncompiledMethods(CORINFO_METHOD_HANDLE handle);
+#endif
+
+private:
+ void AddToUncompiledMethods(MethodDesc *pMethod, BOOL fForStubs);
+
+ void ApplyTypeDependencyProductionsForType(TypeHandle t);
+ void ApplyTypeDependencyForSZArrayHelper(MethodTable * pInterfaceMT, TypeHandle elemTypeHnd);
+
+ friend class Module;
+ void TriageTypeForZap(TypeHandle th, BOOL fAcceptIfNotSure, BOOL fExpandDependencies = TRUE);
+ void TriageMethodForZap(MethodDesc* pMethod, BOOL fAcceptIfNotSure, BOOL fExpandDependencies = TRUE);
+
+ void ExpandTypeDependencies(TypeHandle th);
+ void ExpandMethodDependencies(MethodDesc * pMD);
+
+ void TriageTypeSpecsFromSoftBoundModule(Module * pSoftBoundModule);
+ void TriageTypeFromSoftBoundModule(TypeHandle th, Module * pSoftBoundModule);
+ void TriageSpeculativeType(TypeHandle th);
+ void TriageSpeculativeInstantiations();
+
+ // Returns TRUE if new types or methods have been added by the triage
+ BOOL TriageForZap(BOOL fAcceptIfNotSure, BOOL fExpandDependencies = TRUE);
+
+public:
+ CORINFO_METHOD_HANDLE NextUncompiledMethod();
+
+ void PrePrepareMethodIfNecessary(CORINFO_METHOD_HANDLE hMethod);
+
+ void GenerateMethodStubs(
+ CORINFO_METHOD_HANDLE hMethod,
+ bool fNgenProfileImage,
+ CORCOMPILE_CompileStubCallback pfnCallback,
+ LPVOID pCallbackContext);
+
+ bool IsDynamicMethod(CORINFO_METHOD_HANDLE hMethod);
+ void SetMethodProfilingFlags(CORINFO_METHOD_HANDLE hMethod, DWORD flags);
+
+ bool CanSkipMethodPreparation (
+ CORINFO_METHOD_HANDLE callerHnd, /* IN */
+ CORINFO_METHOD_HANDLE calleeHnd, /* IN */
+ CorInfoIndirectCallReason *pReason = NULL,
+ CORINFO_ACCESS_FLAGS accessFlags = CORINFO_ACCESS_ANY);
+
+ BOOL CanEmbedClassID (CORINFO_CLASS_HANDLE typeHandle);
+ BOOL CanEmbedModuleID (CORINFO_MODULE_HANDLE moduleHandle);
+ BOOL CanEmbedModuleHandle(CORINFO_MODULE_HANDLE moduleHandle);
+ BOOL CanEmbedClassHandle (CORINFO_CLASS_HANDLE typeHandle);
+ BOOL CanEmbedMethodHandle(CORINFO_METHOD_HANDLE methodHandle,
+ CORINFO_METHOD_HANDLE contextHandle);
+ BOOL CanEmbedFieldHandle (CORINFO_FIELD_HANDLE fieldHandle);
+
+ BOOL CanPrerestoreEmbedClassHandle (CORINFO_CLASS_HANDLE classHnd);
+ BOOL CanPrerestoreEmbedMethodHandle(CORINFO_METHOD_HANDLE methodHnd);
+
+ BOOL CanEmbedFunctionEntryPoint(CORINFO_METHOD_HANDLE methodHandle,
+ CORINFO_METHOD_HANDLE contextHandle,
+ CORINFO_ACCESS_FLAGS accessFlags = CORINFO_ACCESS_ANY);
+
+ BOOL DoesMethodNeedRestoringBeforePrestubIsRun(CORINFO_METHOD_HANDLE methodHandle);
+
+ BOOL CanSkipDependencyActivation(CORINFO_METHOD_HANDLE context,
+ CORINFO_MODULE_HANDLE moduleFrom,
+ CORINFO_MODULE_HANDLE moduleTo);
+
+ CORINFO_MODULE_HANDLE GetPreferredZapModuleForClassHandle(CORINFO_CLASS_HANDLE classHnd);
+
+ void NoteDeduplicatedCode(CORINFO_METHOD_HANDLE method, CORINFO_METHOD_HANDLE duplicateMethod);
+
+ CORINFO_METHOD_HANDLE LookupMethodDef(mdMethodDef token);
+
+ CorCompileILRegion GetILRegion(mdMethodDef token);
+
+ CORINFO_CLASS_HANDLE FindTypeForProfileEntry(CORBBTPROF_BLOB_PARAM_SIG_ENTRY * profileBlobEntry);
+ CORINFO_METHOD_HANDLE FindMethodForProfileEntry(CORBBTPROF_BLOB_PARAM_SIG_ENTRY * profileBlobEntry);
+
+ void ReportInlining(CORINFO_METHOD_HANDLE inliner, CORINFO_METHOD_HANDLE inlinee);
+
+ void Link();
+ void FixupRVAs();
+
+ void SetRVAsForFields(IMetaDataEmit * pEmit);
+
+ void GetRVAFieldData(mdFieldDef fd, PVOID * ppData, DWORD * pcbSize, DWORD * pcbAlignment);
+
+ ULONG Release();
+
+ void Error(mdToken token, Exception * pException);
+};
+
+
+struct RefCache
+{
+ RefCache(Module *pModule)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+
+ m_pModule = pModule;
+
+ {
+ // HashMap::Init can throw due to OOM. Our ctor can't. Since this whole
+ // thing is for use inside CEECompileInfo methods, it doesn't make sense to
+ // use an exception model. Thus we probably have to move the hashmap init
+ // calls out of the ctor so can catch these exceptions and translate them to
+ // hresults.
+ //
+ CONTRACT_VIOLATION(ThrowsViolation|FaultViolation);
+
+ m_sAssemblyRefMap.Init(FALSE,NULL);
+ }
+ }
+
+ Module *m_pModule;
+
+ HashMap m_sAssemblyRefMap;
+};
+
+struct AssemblySpecDefRefMapEntry {
+ AssemblySpec * m_pDef;
+ AssemblySpec * m_pRef;
+};
+
+class AssemblySpecDefRefMapTraits : public NoRemoveSHashTraits<DefaultSHashTraits<AssemblySpecDefRefMapEntry> >
+{
+public:
+ typedef const AssemblySpec *key_t;
+ static const AssemblySpec * GetKey(const AssemblySpecDefRefMapEntry &e) { return e.m_pDef; }
+
+ static count_t Hash(const AssemblySpec * k)
+ {
+ return const_cast<AssemblySpec *>(k)->Hash();
+ }
+
+ static BOOL Equals(const AssemblySpec * lhs, const AssemblySpec * rhs)
+ {
+ return const_cast<AssemblySpec *>(lhs)->CompareEx(const_cast<AssemblySpec *>(rhs), AssemblySpec::ASC_DefinitionEquality);
+ }
+
+ static const AssemblySpecDefRefMapEntry Null() { AssemblySpecDefRefMapEntry e; e.m_pDef = NULL; return e; }
+ static bool IsNull(const AssemblySpecDefRefMapEntry &e) { return e.m_pDef == NULL; }
+
+ void OnDestructPerEntryCleanupAction(const AssemblySpecDefRefMapEntry& e)
+ {
+ WRAPPER_NO_CONTRACT;
+ delete e.m_pDef;
+ delete e.m_pRef;
+ }
+ static const bool s_DestructPerEntryCleanupAction = true;
+};
+
+typedef SHash<AssemblySpecDefRefMapTraits> AssemblySpecMapDefRefMapTable;
+
+class CompilationDomain : public AppDomain,
+ public ICorCompilationDomain
+{
+#ifndef FEATURE_CORECLR
+ VPTR_MULTI_VTABLE_CLASS(CompilationDomain, AppDomain);
+#endif
+
+ public:
+ BOOL m_fForceDebug;
+ BOOL m_fForceProfiling;
+ BOOL m_fForceInstrument;
+
+ // TODO: During ngen, we need to determine whether we can call NeedsRestore
+ // before the preloader has been initialized. This is accomplished via this
+ // method. This code needs to be cleaned up. See bug #284709 for background.
+ BOOL canCallNeedsRestore() { return (m_pTargetImage != NULL); };
+
+ // DDB 175659: Make sure that canCallNeedsRestore() returns FALSE during compilation
+ // domain shutdown.
+ void setCannotCallNeedsRestore() { m_pTargetImage = NULL; }
+
+ private:
+
+ Assembly *m_pTargetAssembly; // Assembly being compiled
+ Module *m_pTargetModule; // Module currently being compiled. Needed for multi-module assemblies
+ DataImage *m_pTargetImage; // Data image
+ CEEPreloader *m_pTargetPreloader;
+
+ ReleaseHolder<IMetaDataAssemblyEmit> m_pEmit;
+
+ NewHolder<AssemblySpecHash> m_pDependencyRefSpecs;
+
+ AssemblySpecMapDefRefMapTable m_dependencyDefRefMap;
+
+ CORCOMPILE_DEPENDENCY *m_pDependencies;
+ USHORT m_cDependenciesCount, m_cDependenciesAlloc;
+
+ CQuickArray<RefCache*> m_rRefCaches;
+
+ HRESULT AddDependencyEntry(PEAssembly *pFile, mdAssemblyRef ref,mdAssemblyRef def);
+ void ReleaseDependencyEmitter();
+
+#ifndef FEATURE_CORECLR // hardbinding
+ PtrHashMap m_hardBoundModules; // Hard dependency on native image of these dependency modules
+ PtrHashMap m_cantHardBindModules;
+ void UpdateDependencyEntryForHardBind(PEAssembly * pDependencyAssembly);
+ void IncludeHardBindClosure(PEAssembly * pDependencyAssembly);
+ void CheckHardBindToZapFile(SString dependencyNameFromCustomAttribute);
+ void CheckLoadHints();
+#endif
+
+ public:
+
+#ifndef DACCESS_COMPILE
+ CompilationDomain(BOOL fForceDebug = FALSE,
+ BOOL fForceProfiling = FALSE,
+ BOOL fForceInstrument = FALSE);
+ ~CompilationDomain();
+#endif
+
+ void Init(
+#ifdef MDIL
+ MDILCompilationFlags mdilCompilationFlags
+#endif
+ );
+
+ HRESULT AddDependency(AssemblySpec *pRefSpec, PEAssembly *pFile);
+
+ AssemblySpec* FindAssemblyRefSpecForDefSpec(
+ AssemblySpec* pDefSpec);
+
+ PEAssembly *BindAssemblySpec(
+ AssemblySpec *pSpec,
+ BOOL fThrowOnFileNotFound,
+ BOOL fRaisePrebindEvents,
+ StackCrawlMark *pCallerStackMark = NULL,
+ AssemblyLoadSecurity *pLoadSecurity = NULL,
+ BOOL fUseHostBinderIfAvailable = TRUE) DAC_EMPTY_RET(NULL);
+
+ BOOL CanEagerBindToZapFile(Module *targetModule, BOOL limitToHardBindList = TRUE);
+
+#ifndef FEATURE_CORECLR // hardbinding
+ PtrHashMap::PtrIterator IterateHardBoundModules();
+
+ // List of full display names of assemblies to hard-bind to
+ SArray<SString,FALSE> m_assemblyHardBindList;
+ BOOL m_useHardBindList;
+ BOOL IsInHardBindRequestList(Assembly * pAssembly);
+ BOOL IsInHardBindRequestList(PEAssembly * pAssembly);
+ BOOL IsSafeToHardBindTo(PEAssembly * pAssembly);
+
+ void SetAssemblyHardBindList(
+ __in_ecount( cHardBindList )
+ LPWSTR *pHardBindList,
+ DWORD cHardBindList);
+#endif
+
+#if defined(CROSSGEN_COMPILE) && !defined(FEATURE_CORECLR)
+ void ComputeAssemblyHardBindList(IMDInternalImport * pImport);
+ BOOL IsInHardBindList(SString& simpleName);
+
+ static BOOL FindImage(const SString& fileName, MDInternalImportFlags flags, PEImage ** ppImage);
+#endif
+
+ // Returns NULL on out-of-memory
+ RefCache *GetRefCache(Module *pModule)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ INJECT_FAULT(return NULL;);
+ }
+ CONTRACTL_END
+
+ unsigned uSize = (unsigned) m_rRefCaches.Size();
+ for (unsigned i = 0; i < uSize; i++)
+ if (m_rRefCaches[i]->m_pModule == pModule)
+ return m_rRefCaches[i];
+
+ // Add a new cache entry
+ HRESULT hr;
+
+ if (FAILED(hr = m_rRefCaches.ReSizeNoThrow(uSize + 1)))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return NULL;
+ }
+
+ m_rRefCaches[uSize] = new (nothrow) RefCache(pModule);
+ return m_rRefCaches[uSize];
+ }
+
+ void SetTarget(Assembly * pAssembly, Module *pModule);
+
+ void SetTargetImage(DataImage * pImage, CEEPreloader * pPreloader);
+ DataImage * GetTargetImage() { LIMITED_METHOD_CONTRACT; return m_pTargetImage; }
+
+ Assembly * GetTargetAssembly()
+ { LIMITED_METHOD_CONTRACT; return m_pTargetAssembly; }
+ Module * GetTargetModule()
+ { LIMITED_METHOD_CONTRACT; return m_pTargetModule; }
+
+ // ICorCompilationDomain
+
+ HRESULT SetContextInfo(LPCWSTR exePath, BOOL isExe) DAC_EMPTY_RET(E_FAIL);
+ HRESULT GetDependencies(CORCOMPILE_DEPENDENCY **ppDependencies,
+ DWORD *cDependencies) DAC_EMPTY_RET(E_FAIL);
+#ifdef FEATURE_FUSION
+ HRESULT GetIBindContext(IBindContext **ppBindCtx) DAC_EMPTY_RET(E_FAIL);
+#endif
+
+#ifdef CROSSGEN_COMPILE
+ HRESULT SetPlatformWinmdPaths(LPCWSTR pwzPlatformWinmdPaths) DAC_EMPTY_RET(E_FAIL);
+#endif
+
+ void SetDependencyEmitter(IMetaDataAssemblyEmit *pEmitter);
+};
+
+#endif // COMPILE_H_
diff --git a/src/vm/comreflectioncache.hpp b/src/vm/comreflectioncache.hpp
new file mode 100644
index 0000000000..f99b89b1d6
--- /dev/null
+++ b/src/vm/comreflectioncache.hpp
@@ -0,0 +1,270 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#ifndef __COMReflectionCache_hpp__
+#define __COMReflectionCache_hpp__
+
+#include <stddef.h>
+#include "class.h"
+#include "threads.h"
+#include "simplerwlock.hpp"
+
+class ReflectClass;
+
+template <class Element, class CacheType, int CacheSize> class ReflectionCache
+: public SimpleRWLock
+{
+public:
+ ReflectionCache()
+ : SimpleRWLock(COOPERATIVE, LOCK_REFLECTCACHE)
+ {
+ WRAPPER_NO_CONTRACT;
+ index = 0;
+ currentStamp = 0;
+ }
+
+ void Init();
+
+ BOOL GetFromCache(Element *pElement, CacheType& rv)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pElement));
+ }
+ CONTRACTL_END;
+
+ this->EnterRead();
+
+ rv = 0;
+ int i = SlotInCache(pElement);
+ BOOL fGotIt = (i != CacheSize);
+ if (fGotIt)
+ {
+ rv = m_pResult[i].element.GetValue();
+ m_pResult[i].stamp = InterlockedIncrement(&currentStamp);
+ }
+ this->LeaveRead();
+
+ if (fGotIt)
+ AdjustStamp(FALSE);
+
+ return fGotIt;
+ }
+
+ void AddToCache(Element *pElement, CacheType obj)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pElement));
+ }
+ CONTRACTL_END;
+
+ this->EnterWrite();
+ currentStamp++; // no need to InterlockIncrement b/c write lock taken
+ int i = SlotInCache(pElement);
+ if (i == CacheSize)
+ {
+ int slot = index;
+ // Not in cache.
+ if (slot == CacheSize)
+ {
+ // Reuse a slot.
+ slot = 0;
+ LONG minStamp = m_pResult[0].stamp;
+ for (i = 1; i < CacheSize; i ++)
+ {
+ if (m_pResult[i].stamp < minStamp)
+ {
+ slot = i;
+ minStamp = m_pResult[i].stamp;
+ }
+ }
+ }
+ else
+ m_pResult[slot].element.InitValue();
+
+ m_pResult[slot].element = *pElement;
+ m_pResult[slot].element.SetValue(obj);
+ m_pResult[slot].stamp = currentStamp;
+
+ UpdateHashTable(pElement->GetHash(), slot);
+
+ if (index < CacheSize)
+ index++;
+ }
+ AdjustStamp(TRUE);
+ this->LeaveWrite();
+ }
+
+private:
+ // Lock must have been taken before calling this.
+ int SlotInCache(Element *pElement)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(LockTaken());
+ PRECONDITION(CheckPointer(pElement));
+ }
+ CONTRACTL_END;
+
+ if (index == 0)
+ return CacheSize;
+
+ size_t hash = pElement->GetHash();
+
+ int slot = m_pHashTable[hash%CacheSize].slot;
+ if (slot == -1)
+ return CacheSize;
+
+ if (m_pResult[slot].element == *pElement)
+ return slot;
+
+ for (int i = 0; i < index; i ++)
+ {
+ if (i != slot && m_pHashTable[i].hash == hash)
+ {
+ if (m_pResult[i].element == *pElement)
+ return i;
+ }
+ }
+
+ return CacheSize;
+ }
+
+ void AdjustStamp(BOOL hasWriterLock)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if ((currentStamp & 0x40000000) == 0)
+ return;
+ if (!hasWriterLock)
+ {
+ _ASSERTE (!LockTaken());
+ this->EnterWrite();
+ }
+ else
+ _ASSERTE (IsWriterLock());
+
+ if (currentStamp & 0x40000000)
+ {
+ currentStamp >>= 1;
+ for (int i = 0; i < index; i ++)
+ m_pResult[i].stamp >>= 1;
+ }
+ if (!hasWriterLock)
+ this->LeaveWrite();
+ }
+
+ void UpdateHashTable(SIZE_T hash, int slot)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(IsWriterLock());
+ }
+ CONTRACTL_END;
+
+ m_pHashTable[slot].hash = hash;
+ m_pHashTable[hash%CacheSize].slot = slot;
+ }
+
+ struct CacheTable
+ {
+ Element element;
+ int stamp;
+ } *m_pResult;
+
+ struct HashTable
+ {
+ size_t hash; // Record hash value for each slot
+ int slot; // The slot corresponding to the hash.
+ } *m_pHashTable;
+
+ int index;
+ LONG currentStamp;
+};
+
+
+#ifdef FEATURE_COMINTEROP
+
+#define ReflectionMaxCachedNameLength 23
+struct DispIDCacheElement
+{
+ MethodTable *pMT;
+ int strNameLength;
+ LCID lcid;
+ DISPID DispId;
+ WCHAR strName[ReflectionMaxCachedNameLength+1];
+ DispIDCacheElement ()
+ : pMT (NULL), strNameLength(0), lcid(0), DispId(0)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ BOOL operator==(const DispIDCacheElement& var) const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (pMT == var.pMT && strNameLength == var.strNameLength
+ && lcid == var.lcid && wcscmp (strName, var.strName) == 0);
+ }
+
+ DispIDCacheElement& operator= (const DispIDCacheElement& var)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE (var.strNameLength <= ReflectionMaxCachedNameLength);
+ pMT = var.pMT;
+ strNameLength = var.strNameLength;
+ lcid = var.lcid;
+ wcscpy_s (strName, COUNTOF(strName), var.strName);
+ return *this;
+ }
+
+ DISPID GetValue ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return DispId;
+ }
+
+ void InitValue ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ void SetValue (DISPID Id)
+ {
+ LIMITED_METHOD_CONTRACT;
+ DispId = Id;
+ }
+
+ size_t GetHash ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (size_t)pMT + strNameLength + (lcid << 4);
+ }
+};
+
+typedef ReflectionCache <DispIDCacheElement, DISPID, 128> DispIDCache;
+
+#endif // FEATURE_COMINTEROP
+
+#endif // __COMReflectionCache_hpp__
diff --git a/src/vm/comreflectioncache.inl b/src/vm/comreflectioncache.inl
new file mode 100644
index 0000000000..df64b2ecbd
--- /dev/null
+++ b/src/vm/comreflectioncache.inl
@@ -0,0 +1,32 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+#ifndef __COMReflectionCache_inl__
+#define __COMReflectionCache_inl__
+
+#ifndef DACCESS_COMPILE
+
+template <class Element, class CacheType, int CacheSize>
+void ReflectionCache<Element, CacheType, CacheSize>::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_pResult = (CacheTable *)(void *) ::GetAppDomain()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(CacheSize) * S_SIZE_T(sizeof(CacheTable)));
+ m_pHashTable = (HashTable *)(void *) ::GetAppDomain()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(CacheSize) * S_SIZE_T(sizeof(HashTable)));
+
+
+ for (int i = 0; i < CacheSize; i ++)
+ m_pHashTable[i].slot = -1;
+}
+
+#endif //!DACCESS_COMPILE
+
+#endif // __COMReflectionCache_inl__
diff --git a/src/vm/comsynchronizable.cpp b/src/vm/comsynchronizable.cpp
new file mode 100644
index 0000000000..a4c08af0f6
--- /dev/null
+++ b/src/vm/comsynchronizable.cpp
@@ -0,0 +1,2243 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+/*============================================================
+**
+** Header: COMSynchronizable.cpp
+**
+** Purpose: Native methods on System.SynchronizableObject
+** and its subclasses.
+**
+**
+===========================================================*/
+
+#include "common.h"
+
+#include <object.h>
+#include "threads.h"
+#include "excep.h"
+#include "vars.hpp"
+#include "field.h"
+#include "security.h"
+#include "comsynchronizable.h"
+#include "dbginterface.h"
+#include "comdelegate.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "eeconfig.h"
+#include "stackcompressor.h"
+#ifdef FEATURE_REMOTING
+#include "appdomainhelper.h"
+#include "objectclone.h"
+#else
+#include "callhelpers.h"
+#endif
+#include "appdomain.hpp"
+#include "appdomain.inl"
+#ifdef FEATURE_REMOTING
+#include "crossdomaincalls.h"
+#endif
+
+#include "newapis.h"
+
+// To include definition of CAPTURE_BUCKETS_AT_TRANSITION
+#include "exstate.h"
+
+// The two threads need to communicate some information. Any object references must
+// be declared to GC.
+struct SharedState
+{
+ OBJECTHANDLE m_Threadable;
+ OBJECTHANDLE m_ThreadStartArg;
+ Thread *m_Internal;
+ OBJECTHANDLE m_Principal;
+
+ SharedState(OBJECTREF threadable, OBJECTREF threadStartArg, Thread *internal, OBJECTREF principal)
+ {
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ THROWS; // From CreateHandle()
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ AppDomainFromIDHolder ad(internal->GetKickOffDomainId(), TRUE);
+ if (ad.IsUnloaded())
+ COMPlusThrow(kAppDomainUnloadedException);
+
+ m_Threadable = ad->CreateHandle(threadable);
+ m_ThreadStartArg = ad->CreateHandle(threadStartArg);
+
+ m_Internal = internal;
+
+ m_Principal = ad->CreateHandle(principal);
+ }
+
+ ~SharedState()
+ {
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // It's important to have no GC rendez-vous point between the checking and the clean-up below.
+ // The three handles below could be in an appdomain which is just starting to be unloaded, or an appdomain
+ // which has been unloaded already. Thus, we need to check whether the appdomain is still valid before
+ // we do the clean-up. Since we suspend all runtime threads when we try to do the unload, there will be no
+ // race condition between the checking and the clean-up as long as this thread cannot be suspended in between.
+ AppDomainFromIDHolder ad(m_Internal->GetKickOffDomainId(), TRUE);
+ if (!ad.IsUnloaded())
+ {
+ DestroyHandle(m_Threadable);
+ DestroyHandle(m_ThreadStartArg);
+ DestroyHandle(m_Principal);
+ }
+ }
+};
+
+
+// For the following helpers, we make no attempt to synchronize. The app developer
+// is responsible for managing his own race conditions.
+//
+// Note: if the internal Thread is NULL, this implies that the exposed object has
+// finalized and then been resurrected.
+static inline BOOL ThreadNotStarted(Thread *t)
+{
+ WRAPPER_NO_CONTRACT;
+ return (t && t->IsUnstarted() && !t->HasValidThreadHandle());
+}
+
+static inline BOOL ThreadIsRunning(Thread *t)
+{
+ WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ return (t &&
+ (t->m_State & (Thread::TS_ReportDead|Thread::TS_Dead)) == 0 &&
+ (CLRTaskHosted()? t->GetHostTask()!=NULL:t->HasValidThreadHandle()));
+#else // !FEATURE_INCLUDE_ALL_INTERFACES
+ return (t &&
+ (t->m_State & (Thread::TS_ReportDead|Thread::TS_Dead)) == 0 &&
+ (t->HasValidThreadHandle()));
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+}
+
+static inline BOOL ThreadIsDead(Thread *t)
+{
+ WRAPPER_NO_CONTRACT;
+ return (t == 0 || t->IsDead());
+}
+
+
+// Map our exposed notion of thread priorities into the enumeration that NT uses.
+static INT32 MapToNTPriority(INT32 ours)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ INT32 NTPriority = 0;
+
+ switch (ours)
+ {
+ case ThreadNative::PRIORITY_LOWEST:
+ NTPriority = THREAD_PRIORITY_LOWEST;
+ break;
+
+ case ThreadNative::PRIORITY_BELOW_NORMAL:
+ NTPriority = THREAD_PRIORITY_BELOW_NORMAL;
+ break;
+
+ case ThreadNative::PRIORITY_NORMAL:
+ NTPriority = THREAD_PRIORITY_NORMAL;
+ break;
+
+ case ThreadNative::PRIORITY_ABOVE_NORMAL:
+ NTPriority = THREAD_PRIORITY_ABOVE_NORMAL;
+ break;
+
+ case ThreadNative::PRIORITY_HIGHEST:
+ NTPriority = THREAD_PRIORITY_HIGHEST;
+ break;
+
+ default:
+ COMPlusThrow(kArgumentOutOfRangeException, W("Argument_InvalidFlag"));
+ }
+ return NTPriority;
+}
+
+
+// Map to our exposed notion of thread priorities from the enumeration that NT uses.
+INT32 MapFromNTPriority(INT32 NTPriority)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ INT32 ours = 0;
+
+ if (NTPriority <= THREAD_PRIORITY_LOWEST)
+ {
+ // managed code does not support IDLE. Map it to PRIORITY_LOWEST.
+ ours = ThreadNative::PRIORITY_LOWEST;
+ }
+ else if (NTPriority >= THREAD_PRIORITY_HIGHEST)
+ {
+ ours = ThreadNative::PRIORITY_HIGHEST;
+ }
+ else if (NTPriority == THREAD_PRIORITY_BELOW_NORMAL)
+ {
+ ours = ThreadNative::PRIORITY_BELOW_NORMAL;
+ }
+ else if (NTPriority == THREAD_PRIORITY_NORMAL)
+ {
+ ours = ThreadNative::PRIORITY_NORMAL;
+ }
+ else if (NTPriority == THREAD_PRIORITY_ABOVE_NORMAL)
+ {
+ ours = ThreadNative::PRIORITY_ABOVE_NORMAL;
+ }
+ else
+ {
+ _ASSERTE (!"not supported priority");
+ ours = ThreadNative::PRIORITY_NORMAL;
+ }
+ return ours;
+}
+
+
+void ThreadNative::KickOffThread_Worker(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ KickOffThread_Args *args = (KickOffThread_Args *) ptr;
+ _ASSERTE(ObjectFromHandle(args->share->m_Threadable) != NULL);
+ args->retVal = 0;
+
+ // we are saving the delagate and result primarily for debugging
+ struct _gc
+ {
+ OBJECTREF orPrincipal;
+ OBJECTREF orThreadStartArg;
+ OBJECTREF orDelegate;
+ OBJECTREF orResult;
+ OBJECTREF orThread;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ Thread *pThread;
+ pThread = GetThread();
+ _ASSERTE(pThread);
+ GCPROTECT_BEGIN(gc);
+ BEGIN_SO_INTOLERANT_CODE(pThread);
+
+ gc.orPrincipal = ObjectFromHandle(args->share->m_Principal);
+
+#ifdef FEATURE_IMPERSONATION
+ // Push the initial security principal object (if any) onto the
+ // managed thread.
+ if (gc.orPrincipal != NULL)
+ {
+ gc.orThread = args->pThread->GetExposedObject();
+ MethodDescCallSite setPrincipalInternal(METHOD__THREAD__SET_PRINCIPAL_INTERNAL, &gc.orThread);
+ ARG_SLOT argsToSetPrincipal[2];
+ argsToSetPrincipal[0] = ObjToArgSlot(gc.orThread);
+ argsToSetPrincipal[1] = ObjToArgSlot(gc.orPrincipal);
+ setPrincipalInternal.Call(argsToSetPrincipal);
+ }
+#endif
+
+ gc.orDelegate = ObjectFromHandle(args->share->m_Threadable);
+ gc.orThreadStartArg = ObjectFromHandle(args->share->m_ThreadStartArg);
+
+ // We cannot call the Delegate Invoke method directly from ECall. The
+ // stub has not been created for non multicast delegates. Instead, we
+ // will invoke the Method on the OR stored in the delegate directly.
+ // If there are changes to the signature of the ThreadStart delegate
+ // this code will need to change. I've noted this in the Thread start
+ // class.
+
+ delete args->share;
+ args->share = 0;
+
+ MethodDesc *pMeth = ((DelegateEEClass*)( gc.orDelegate->GetMethodTable()->GetClass() ))->m_pInvokeMethod;
+ _ASSERTE(pMeth);
+ MethodDescCallSite invokeMethod(pMeth, &gc.orDelegate);
+
+ if (MscorlibBinder::IsClass(gc.orDelegate->GetMethodTable(), CLASS__PARAMETERIZEDTHREADSTART))
+ {
+ //Parameterized ThreadStart
+ ARG_SLOT arg[2];
+
+ arg[0] = ObjToArgSlot(gc.orDelegate);
+ arg[1]=ObjToArgSlot(gc.orThreadStartArg);
+ invokeMethod.Call(arg);
+ }
+ else
+ {
+ //Simple ThreadStart
+ ARG_SLOT arg[1];
+
+ arg[0] = ObjToArgSlot(gc.orDelegate);
+ invokeMethod.Call(arg);
+ }
+ STRESS_LOG2(LF_SYNC, LL_INFO10, "Managed thread exiting normally for delegate %p Type %pT\n", OBJECTREFToObject(gc.orDelegate), (size_t) gc.orDelegate->GetMethodTable());
+
+ END_SO_INTOLERANT_CODE;
+ GCPROTECT_END();
+}
+
+// Helper to avoid two EX_TRY/EX_CATCH blocks in one function
+static void PulseAllHelper(Thread* pThread)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ DISABLED(NOTHROW);
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ EX_TRY
+ {
+ // GetExposedObject() will either throw, or we have a valid object. Note
+ // that we re-acquire it each time, since it may move during calls.
+ pThread->GetExposedObject()->EnterObjMonitor();
+ pThread->GetExposedObject()->PulseAll();
+ pThread->GetExposedObject()->LeaveObjMonitor();
+ }
+ EX_CATCH
+ {
+ // just keep going...
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+}
+
+// When an exposed thread is started by Win32, this is where it starts.
+ULONG __stdcall ThreadNative::KickOffThread(void* pass)
+{
+
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ ULONG retVal = 0;
+ // Before we do anything else, get Setup so that we have a real thread.
+
+ // Our thread isn't setup yet, so we can't use the standard probe
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return E_FAIL);
+
+ KickOffThread_Args args;
+ // don't have a separate var becuase this can be updated in the worker
+ args.share = (SharedState *) pass;
+ args.pThread = args.share->m_Internal;
+
+ Thread* pThread = args.pThread;
+
+ _ASSERTE(pThread != NULL);
+
+ BOOL ok = TRUE;
+
+ {
+ EX_TRY
+ {
+ CExecutionEngine::CheckThreadState(0);
+ }
+ EX_CATCH
+ {
+ // OOM might be thrown from CheckThreadState, so it's important
+ // that we don't rethrow it; if we do then the process will die
+ // because there are no installed handlers at this point, so
+ // swallow the exception. this will set the thread's state to
+ // FailStarted which will result in a ThreadStartException being
+ // thrown from the thread that attempted to start this one.
+ if (!GET_EXCEPTION()->IsTransient() && !SwallowUnhandledExceptions())
+ EX_RETHROW;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ if (CExecutionEngine::CheckThreadStateNoCreate(0) == NULL)
+ {
+ // We can not
+ pThread->SetThreadState(Thread::TS_FailStarted);
+ pThread->DetachThread(FALSE);
+ // !!! Do not touch any field of Thread object. The Thread object is subject to delete
+ // !!! after DetachThread call.
+ ok = FALSE;
+ }
+ }
+
+ if (ok)
+ {
+ ok = pThread->HasStarted();
+ }
+
+ if (ok)
+ {
+ // Do not swallow the unhandled exception here
+ //
+
+ // Fire ETW event to correlate with the thread that created current thread
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, ThreadRunning))
+ FireEtwThreadRunning(pThread, GetClrInstanceId());
+
+ // We have a sticky problem here.
+ //
+ // Under some circumstances, the context of 'this' doesn't match the context
+ // of the thread. Today this can only happen if the thread is marked for an
+ // STA. If so, the delegate that is stored in the object may not be directly
+ // suitable for invocation. Instead, we need to call through a proxy so that
+ // the correct context transitions occur.
+ //
+ // All the changes occur inside HasStarted(), which will switch this thread
+ // over to a brand new STA as necessary. We have to notice this happening, so
+ // we can adjust the delegate we are going to invoke on.
+
+ _ASSERTE(GetThread() == pThread); // Now that it's started
+ ManagedThreadBase::KickOff(pThread->GetKickOffDomainId(), KickOffThread_Worker, &args);
+
+ // If TS_FailStarted is set then the args are deleted in ThreadNative::StartInner
+ if ((args.share) && !pThread->HasThreadState(Thread::TS_FailStarted))
+ {
+ delete args.share;
+ }
+
+ PulseAllHelper(pThread);
+
+ GCX_PREEMP_NO_DTOR();
+
+ pThread->ClearThreadCPUGroupAffinity();
+
+ DestroyThread(pThread);
+ }
+
+ END_SO_INTOLERANT_CODE;
+
+ return retVal;
+}
+
+
+FCIMPL3(void, ThreadNative::Start, ThreadBaseObject* pThisUNSAFE, Object* pPrincipalUNSAFE, StackCrawlMark* pStackMark)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL();
+
+ StartInner(pThisUNSAFE, pPrincipalUNSAFE, pStackMark);
+
+ HELPER_METHOD_FRAME_END_POLL();
+}
+FCIMPLEND
+
+// Start up a thread, which by now should be in the ThreadStore's Unstarted list.
+void ThreadNative::StartInner(ThreadBaseObject* pThisUNSAFE, Object* pPrincipalUNSAFE, StackCrawlMark* pStackMark)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ struct _gc
+ {
+ OBJECTREF pPrincipal;
+ THREADBASEREF pThis;
+ } gc;
+
+ gc.pPrincipal = (OBJECTREF) pPrincipalUNSAFE;
+ gc.pThis = (THREADBASEREF) pThisUNSAFE;
+
+ GCPROTECT_BEGIN(gc);
+
+ if (gc.pThis == NULL)
+ COMPlusThrow(kNullReferenceException, W("NullReference_This"));
+
+ Thread *pNewThread = gc.pThis->GetInternal();
+ if (pNewThread == NULL)
+ COMPlusThrow(kThreadStateException, IDS_EE_THREAD_CANNOT_GET);
+
+ _ASSERTE(GetThread() != NULL); // Current thread wandered in!
+
+ gc.pThis->EnterObjMonitor();
+
+ EX_TRY
+ {
+ // Is the thread already started? You can't restart a thread.
+ if (!ThreadNotStarted(pNewThread))
+ {
+ COMPlusThrow(kThreadStateException, IDS_EE_THREADSTART_STATE);
+ }
+
+ OBJECTREF threadable = gc.pThis->GetDelegate();
+ OBJECTREF threadStartArg = gc.pThis->GetThreadStartArg();
+ gc.pThis->SetDelegate(NULL);
+ gc.pThis->SetThreadStartArg(NULL);
+
+ // This can never happen, because we construct it with a valid one and then
+ // we never let you change it (because SetStart is private).
+ _ASSERTE(threadable != NULL);
+
+ // Allocate this away from our stack, so we can unwind without affecting
+ // KickOffThread. It is inside a GCFrame, so we can enable GC now.
+ NewHolder<SharedState> share(new SharedState(threadable, threadStartArg, pNewThread, gc.pPrincipal));
+
+ pNewThread->IncExternalCount();
+
+ // Fire an ETW event to mark the current thread as the launcher of the new thread
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, ThreadCreating))
+ FireEtwThreadCreating(pNewThread, GetClrInstanceId());
+
+ // As soon as we create the new thread, it is eligible for suspension, etc.
+ // So it gets transitioned to cooperative mode before this call returns to
+ // us. It is our duty to start it running immediately, so that GC isn't blocked.
+
+ BOOL success = pNewThread->CreateNewThread(
+ pNewThread->RequestedThreadStackSize() /* 0 stackSize override*/,
+ KickOffThread, share);
+
+ if (!success)
+ {
+ pNewThread->DecExternalCount(FALSE);
+ COMPlusThrowOM();
+ }
+
+ // After we have established the thread handle, we can check m_Priority.
+ // This ordering is required to eliminate the race condition on setting the
+ // priority of a thread just as it starts up.
+ pNewThread->SetThreadPriority(MapToNTPriority(gc.pThis->m_Priority));
+ pNewThread->ChooseThreadCPUGroupAffinity();
+
+ FastInterlockOr((ULONG *) &pNewThread->m_State, Thread::TS_LegalToJoin);
+
+ DWORD ret;
+ ret = pNewThread->StartThread();
+
+ // When running under a user mode native debugger there is a race
+ // between the moment we've created the thread (in CreateNewThread) and
+ // the moment we resume it (in StartThread); the debugger may receive
+ // the "ct" (create thread) notification, and it will attempt to
+ // suspend/resume all threads in the process. Now imagine the debugger
+ // resumes this thread first, and only later does it try to resume the
+ // newly created thread. In these conditions our call to ResumeThread
+ // may come before the debugger's call to ResumeThread actually causing
+ // ret to equal 2.
+ // We cannot use IsDebuggerPresent() in the condition below because the
+ // debugger may have been detached between the time it got the notification
+ // and the moment we execute the test below.
+ _ASSERTE(ret == 1 || ret == 2);
+
+ {
+ GCX_PREEMP();
+
+ // Synchronize with HasStarted.
+ YIELD_WHILE (!pNewThread->HasThreadState(Thread::TS_FailStarted) &&
+ pNewThread->HasThreadState(Thread::TS_Unstarted));
+ }
+
+ if (!pNewThread->HasThreadState(Thread::TS_FailStarted))
+ {
+ share.SuppressRelease(); // we have handed off ownership of the shared struct
+ }
+ else
+ {
+ share.Release();
+ PulseAllHelper(pNewThread);
+ pNewThread->HandleThreadStartupFailure();
+ }
+ }
+ EX_CATCH
+ {
+ gc.pThis->LeaveObjMonitor();
+ EX_RETHROW;
+ }
+ EX_END_CATCH_UNREACHABLE;
+
+ gc.pThis->LeaveObjMonitor();
+
+ GCPROTECT_END();
+}
+
+FCIMPL1(void, ThreadNative::Abort, ThreadBaseObject* pThis)
+{
+ FCALL_CONTRACT;
+
+ if (pThis == NULL)
+ FCThrowVoid(kNullReferenceException);
+
+ THREADBASEREF thisRef(pThis);
+ // We need to keep the managed Thread object alive so that we can call UserAbort on
+ // unmanaged thread object.
+ HELPER_METHOD_FRAME_BEGIN_1(thisRef);
+
+ Thread *thread = thisRef->GetInternal();
+ if (thread == NULL)
+ COMPlusThrow(kThreadStateException, IDS_EE_THREAD_CANNOT_GET);
+#ifdef _DEBUG
+ DWORD testAbort = g_pConfig->GetHostTestThreadAbort();
+ if (testAbort != 0) {
+ thread->UserAbort(Thread::TAR_Thread, testAbort == 1 ? EEPolicy::TA_Safe : EEPolicy::TA_Rude, INFINITE, Thread::UAC_Normal);
+ }
+ else
+#endif
+ thread->UserAbort(Thread::TAR_Thread, EEPolicy::TA_V1Compatible, INFINITE, Thread::UAC_Normal);
+
+ if (thread->CatchAtSafePoint())
+ CommonTripThread();
+ HELPER_METHOD_FRAME_END_POLL();
+}
+FCIMPLEND
+
+FCIMPL1(void, ThreadNative::ResetAbort, ThreadBaseObject* pThis)
+{
+ FCALL_CONTRACT;
+
+ _ASSERTE(pThis);
+ VALIDATEOBJECT(pThis);
+
+ Thread *thread = pThis->GetInternal();
+ // We do not allow user to reset rude thread abort in MustRun code.
+ if (thread && thread->IsRudeAbort())
+ {
+ return;
+ }
+
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL();
+
+ if (thread == NULL)
+ COMPlusThrow(kThreadStateException, IDS_EE_THREAD_CANNOT_GET);
+ thread->UserResetAbort(Thread::TAR_Thread);
+ thread->ClearAborted();
+ HELPER_METHOD_FRAME_END_POLL();
+}
+FCIMPLEND
+
+#ifndef FEATURE_CORECLR
+// You can only suspend a running thread.
+FCIMPL1(void, ThreadNative::Suspend, ThreadBaseObject* pThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ if (pThisUNSAFE == NULL)
+ FCThrowResVoid(kNullReferenceException, W("NullReference_This"));
+
+ Thread *thread = pThisUNSAFE->GetInternal();
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+#ifdef MDA_SUPPORTED
+ MDA_TRIGGER_ASSISTANT(DangerousThreadingAPI, ReportViolation(W("System.Threading.Thread.Suspend")));
+#endif
+
+ if (!ThreadIsRunning(thread))
+ COMPlusThrow(kThreadStateException, IDS_EE_THREAD_SUSPEND_NON_RUNNING);
+
+ thread->UserSuspendThread();
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+// You can only resume a thread that is in the user-suspended state. (This puts a large
+// burden on the app developer, but we want him to be thinking carefully about race
+// conditions. Precise errors give him a hope of sorting out his logic).
+FCIMPL1(void, ThreadNative::Resume, ThreadBaseObject* pThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ if (pThisUNSAFE == NULL)
+ FCThrowResVoid(kNullReferenceException, W("NullReference_This"));
+
+ Thread *thread = pThisUNSAFE->GetInternal();
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ // UserResumeThread() will return 0 if there isn't a user suspension for us to
+ // clear.
+ if (!ThreadIsRunning(thread))
+ COMPlusThrow(kThreadStateException, IDS_EE_THREAD_RESUME_NON_RUNNING);
+
+ if (thread->UserResumeThread() == 0)
+ COMPlusThrow(kThreadStateException, IDS_EE_THREAD_RESUME_NON_USER_SUSPEND);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+#endif // FEATURE_CORECLR
+
+// Note that you can manipulate the priority of a thread that hasn't started yet,
+// or one that is running. But you get an exception if you manipulate the priority
+// of a thread that has died.
+FCIMPL1(INT32, ThreadNative::GetPriority, ThreadBaseObject* pThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ if (pThisUNSAFE==NULL)
+ FCThrowRes(kNullReferenceException, W("NullReference_This"));
+
+ // validate the handle
+ if (ThreadIsDead(pThisUNSAFE->GetInternal()))
+ FCThrowEx(kThreadStateException, IDS_EE_THREAD_DEAD_PRIORITY, NULL, NULL, NULL);
+
+ return pThisUNSAFE->m_Priority;
+}
+FCIMPLEND
+
+FCIMPL2(void, ThreadNative::SetPriority, ThreadBaseObject* pThisUNSAFE, INT32 iPriority)
+{
+ FCALL_CONTRACT;
+
+ int priority;
+ Thread *thread;
+
+ THREADBASEREF pThis = (THREADBASEREF) pThisUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_1(pThis);
+
+ if (pThis==NULL)
+ {
+ COMPlusThrow(kNullReferenceException, W("NullReference_This"));
+ }
+
+ // translate the priority (validating as well)
+ priority = MapToNTPriority(iPriority); // can throw; needs a frame
+
+ // validate the thread
+ thread = pThis->GetInternal();
+
+ if (ThreadIsDead(thread))
+ {
+ COMPlusThrow(kThreadStateException, IDS_EE_THREAD_DEAD_PRIORITY, NULL, NULL, NULL);
+ }
+
+ INT32 oldPriority = pThis->m_Priority;
+
+ // Eliminate the race condition by establishing m_Priority before we check for if
+ // the thread is running. See ThreadNative::Start() for the other half.
+ pThis->m_Priority = iPriority;
+
+ if (!thread->SetThreadPriority(priority))
+ {
+ pThis->m_Priority = oldPriority;
+ COMPlusThrow(kThreadStateException, IDS_EE_THREAD_PRIORITY_FAIL, NULL, NULL, NULL);
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+// This service can be called on unstarted and dead threads. For unstarted ones, the
+// next wait will be interrupted. For dead ones, this service quietly does nothing.
+FCIMPL1(void, ThreadNative::Interrupt, ThreadBaseObject* pThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ if (pThisUNSAFE==NULL)
+ FCThrowResVoid(kNullReferenceException, W("NullReference_This"));
+
+ Thread *thread = pThisUNSAFE->GetInternal();
+
+ if (thread == 0)
+ FCThrowExVoid(kThreadStateException, IDS_EE_THREAD_CANNOT_GET, NULL, NULL, NULL);
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ thread->UserInterrupt(Thread::TI_Interrupt);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL1(FC_BOOL_RET, ThreadNative::IsAlive, ThreadBaseObject* pThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ if (pThisUNSAFE==NULL)
+ FCThrowRes(kNullReferenceException, W("NullReference_This"));
+
+ THREADBASEREF thisRef(pThisUNSAFE);
+ BOOL ret = false;
+
+ // Keep managed Thread object alive, since the native object's
+ // lifetime is tied to the managed object's finalizer. And with
+ // resurrection, it may be possible to get a dangling pointer here -
+ // consider both protecting thisRef and setting the managed object's
+ // Thread* to NULL in the GC's ScanForFinalization method.
+ HELPER_METHOD_FRAME_BEGIN_RET_1(thisRef);
+
+ Thread *thread = thisRef->GetInternal();
+
+ if (thread == 0)
+ COMPlusThrow(kThreadStateException, IDS_EE_THREAD_CANNOT_GET);
+
+ ret = ThreadIsRunning(thread);
+
+ HELPER_METHOD_POLL();
+ HELPER_METHOD_FRAME_END();
+
+ FC_RETURN_BOOL(ret);
+}
+FCIMPLEND
+
+FCIMPL2(FC_BOOL_RET, ThreadNative::Join, ThreadBaseObject* pThisUNSAFE, INT32 Timeout)
+{
+ FCALL_CONTRACT;
+
+ BOOL retVal = FALSE;
+ THREADBASEREF pThis = (THREADBASEREF) pThisUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(pThis);
+
+ if (pThis==NULL)
+ COMPlusThrow(kNullReferenceException, W("NullReference_This"));
+
+ // validate the timeout
+ if ((Timeout < 0) && (Timeout != INFINITE_TIMEOUT))
+ COMPlusThrowArgumentOutOfRange(W("millisecondsTimeout"), W("ArgumentOutOfRange_NeedNonNegOrNegative1"));
+
+ retVal = DoJoin(pThis, Timeout);
+
+ HELPER_METHOD_FRAME_END();
+
+ FC_RETURN_BOOL(retVal);
+}
+FCIMPLEND
+
+#undef Sleep
+FCIMPL1(void, ThreadNative::Sleep, INT32 iTime)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ // validate the sleep time
+ if ((iTime < 0) && (iTime != INFINITE_TIMEOUT))
+ COMPlusThrowArgumentOutOfRange(W("millisecondsTimeout"), W("ArgumentOutOfRange_NeedNonNegOrNegative1"));
+
+ while(true)
+ {
+ INT64 sPauseTime = g_PauseTime;
+ INT64 sTime = CLRGetTickCount64();
+ GetThread()->UserSleep(iTime);
+ iTime = (INT32)AdditionalWait(sPauseTime, sTime, iTime);
+ if(iTime == 0)
+ break;
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+#define Sleep(dwMilliseconds) Dont_Use_Sleep(dwMilliseconds)
+
+FCIMPL1(INT32, ThreadNative::GetManagedThreadId, ThreadBaseObject* th) {
+ FCALL_CONTRACT;
+
+ FC_GC_POLL_NOT_NEEDED();
+ if (th == NULL)
+ FCThrow(kNullReferenceException);
+
+ return th->GetManagedThreadId();
+}
+FCIMPLEND
+
+NOINLINE static Object* GetCurrentThreadHelper()
+{
+ FCALL_CONTRACT;
+ FC_INNER_PROLOG(ThreadNative::GetCurrentThread);
+ OBJECTREF refRetVal = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, refRetVal);
+ refRetVal = GetThread()->GetExposedObject();
+ HELPER_METHOD_FRAME_END();
+
+ FC_INNER_EPILOG();
+ return OBJECTREFToObject(refRetVal);
+}
+
+FCIMPL0(Object*, ThreadNative::GetCurrentThread)
+{
+ FCALL_CONTRACT;
+ OBJECTHANDLE ExposedObject = GetThread()->m_ExposedObject;
+ _ASSERTE(ExposedObject != 0); //Thread's constructor always initializes its GCHandle
+ Object* result = *((Object**) ExposedObject);
+ if (result != 0)
+ return result;
+
+ FC_INNER_RETURN(Object*, GetCurrentThreadHelper());
+}
+FCIMPLEND
+
+
+FCIMPL3(void, ThreadNative::SetStart, ThreadBaseObject* pThisUNSAFE, Object* pDelegateUNSAFE, INT32 iRequestedStackSize)
+{
+ FCALL_CONTRACT;
+
+ if (pThisUNSAFE==NULL)
+ FCThrowResVoid(kNullReferenceException, W("NullReference_This"));
+
+ THREADBASEREF pThis = (THREADBASEREF) pThisUNSAFE;
+ OBJECTREF pDelegate = (OBJECTREF ) pDelegateUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_2(pThis, pDelegate);
+
+ _ASSERTE(pThis != NULL);
+ _ASSERTE(pDelegate != NULL); // Thread's constructor validates this
+
+ if (pThis->m_InternalThread == NULL)
+ {
+ // if we don't have an internal Thread object associated with this exposed object,
+ // now is our first opportunity to create one.
+ Thread *unstarted = SetupUnstartedThread();
+
+ PREFIX_ASSUME(unstarted != NULL);
+
+ if (GetThread()->GetDomain()->IgnoreUnhandledExceptions())
+ {
+ unstarted->SetThreadStateNC(Thread::TSNC_IgnoreUnhandledExceptions);
+ }
+
+ pThis->SetInternal(unstarted);
+ pThis->SetManagedThreadId(unstarted->GetThreadId());
+ unstarted->SetExposedObject(pThis);
+ unstarted->RequestedThreadStackSize(iRequestedStackSize);
+ }
+
+ // save off the delegate
+ pThis->SetDelegate(pDelegate);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+
+// Set whether or not this is a background thread.
+FCIMPL2(void, ThreadNative::SetBackground, ThreadBaseObject* pThisUNSAFE, CLR_BOOL isBackground)
+{
+ FCALL_CONTRACT;
+
+ if (pThisUNSAFE==NULL)
+ FCThrowResVoid(kNullReferenceException, W("NullReference_This"));
+
+ // validate the thread
+ Thread *thread = pThisUNSAFE->GetInternal();
+
+ if (ThreadIsDead(thread))
+ FCThrowExVoid(kThreadStateException, IDS_EE_THREAD_DEAD_STATE, NULL, NULL, NULL);
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ thread->SetBackground(isBackground);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+// Return whether or not this is a background thread.
+FCIMPL1(FC_BOOL_RET, ThreadNative::IsBackground, ThreadBaseObject* pThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ if (pThisUNSAFE==NULL)
+ FCThrowRes(kNullReferenceException, W("NullReference_This"));
+
+ // validate the thread
+ Thread *thread = pThisUNSAFE->GetInternal();
+
+ if (ThreadIsDead(thread))
+ FCThrowEx(kThreadStateException, IDS_EE_THREAD_DEAD_STATE, NULL, NULL, NULL);
+
+ FC_RETURN_BOOL(thread->IsBackground());
+}
+FCIMPLEND
+
+
+// Deliver the state of the thread as a consistent set of bits.
+// This copied in VM\EEDbgInterfaceImpl.h's
+// CorDebugUserState GetUserState( Thread *pThread )
+// , so propogate changes to both functions
+FCIMPL1(INT32, ThreadNative::GetThreadState, ThreadBaseObject* pThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ INT32 res = 0;
+ Thread::ThreadState state;
+
+ if (pThisUNSAFE==NULL)
+ FCThrowRes(kNullReferenceException, W("NullReference_This"));
+
+ // validate the thread. Failure here implies that the thread was finalized
+ // and then resurrected.
+ Thread *thread = pThisUNSAFE->GetInternal();
+
+ if (!thread)
+ FCThrowEx(kThreadStateException, IDS_EE_THREAD_CANNOT_GET, NULL, NULL, NULL);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ // grab a snapshot
+ state = thread->GetSnapshotState();
+
+ if (state & Thread::TS_Background)
+ res |= ThreadBackground;
+
+ if (state & Thread::TS_Unstarted)
+ res |= ThreadUnstarted;
+
+ // Don't report a StopRequested if the thread has actually stopped.
+ if (state & Thread::TS_Dead)
+ {
+ if (state & Thread::TS_Aborted)
+ res |= ThreadAborted;
+ else
+ res |= ThreadStopped;
+ }
+ else
+ {
+ if (state & Thread::TS_AbortRequested)
+ res |= ThreadAbortRequested;
+ }
+
+ if (state & Thread::TS_Interruptible)
+ res |= ThreadWaitSleepJoin;
+
+ // Don't report a SuspendRequested if the thread has actually Suspended.
+ if ((state & Thread::TS_UserSuspendPending) &&
+ (state & Thread::TS_SyncSuspended)
+ )
+ {
+ res |= ThreadSuspended;
+ }
+ else
+ if (state & Thread::TS_UserSuspendPending)
+ {
+ res |= ThreadSuspendRequested;
+ }
+
+ HELPER_METHOD_POLL();
+ HELPER_METHOD_FRAME_END();
+
+ return res;
+}
+FCIMPLEND
+
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+// Indicate whether the thread will host an STA (this may fail if the thread has
+// already been made part of the MTA, use GetApartmentState or the return state
+// from this routine to check for this).
+FCIMPL3(INT32, ThreadNative::SetApartmentState, ThreadBaseObject* pThisUNSAFE, INT32 iState, CLR_BOOL fireMDAOnMismatch)
+{
+ FCALL_CONTRACT;
+
+ if (pThisUNSAFE==NULL)
+ FCThrowRes(kNullReferenceException, W("NullReference_This"));
+
+ INT32 retVal = ApartmentUnknown;
+ BOOL ok = TRUE;
+ THREADBASEREF pThis = (THREADBASEREF) pThisUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(pThis);
+
+ // Translate state input. ApartmentUnknown is not an acceptable input state.
+ // Throw an exception here rather than pass it through to the internal
+ // routine, which asserts.
+ Thread::ApartmentState state = Thread::AS_Unknown;
+ if (iState == ApartmentSTA)
+ state = Thread::AS_InSTA;
+ else if (iState == ApartmentMTA)
+ state = Thread::AS_InMTA;
+ else if (iState == ApartmentUnknown)
+ state = Thread::AS_Unknown;
+ else
+ COMPlusThrow(kArgumentOutOfRangeException, W("ArgumentOutOfRange_Enum"));
+
+ Thread *thread = pThis->GetInternal();
+ if (!thread)
+ COMPlusThrow(kThreadStateException, IDS_EE_THREAD_CANNOT_GET);
+
+ {
+ pThis->EnterObjMonitor();
+
+ // We can only change the apartment if the thread is unstarted or
+ // running, and if it's running we have to be in the thread's
+ // context.
+ if ((!ThreadNotStarted(thread) && !ThreadIsRunning(thread)) ||
+ (!ThreadNotStarted(thread) && (GetThread() != thread)))
+ ok = FALSE;
+ else
+ {
+ EX_TRY
+ {
+ state = thread->SetApartment(state, fireMDAOnMismatch == TRUE);
+ }
+ EX_CATCH
+ {
+ pThis->LeaveObjMonitor();
+ EX_RETHROW;
+ }
+ EX_END_CATCH_UNREACHABLE;
+ }
+
+ pThis->LeaveObjMonitor();
+ }
+
+
+ // Now it's safe to throw exceptions again.
+ if (!ok)
+ COMPlusThrow(kThreadStateException);
+
+ // Translate state back into external form
+ if (state == Thread::AS_InSTA)
+ retVal = ApartmentSTA;
+ else if (state == Thread::AS_InMTA)
+ retVal = ApartmentMTA;
+ else if (state == Thread::AS_Unknown)
+ retVal = ApartmentUnknown;
+ else
+ _ASSERTE(!"Invalid state returned from SetApartment");
+
+ HELPER_METHOD_FRAME_END();
+
+ return retVal;
+}
+FCIMPLEND
+
+// Return whether the thread hosts an STA, is a member of the MTA or is not
+// currently initialized for COM.
+FCIMPL1(INT32, ThreadNative::GetApartmentState, ThreadBaseObject* pThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ INT32 retVal = 0;
+
+ THREADBASEREF refThis = (THREADBASEREF) ObjectToOBJECTREF(pThisUNSAFE);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refThis);
+
+ if (refThis == NULL)
+ {
+ COMPlusThrow(kNullReferenceException, W("NullReference_This"));
+ }
+
+ Thread* thread = refThis->GetInternal();
+
+ if (ThreadIsDead(thread))
+ {
+ COMPlusThrow(kThreadStateException, IDS_EE_THREAD_DEAD_STATE);
+ }
+
+ Thread::ApartmentState state = thread->GetApartment();
+
+#ifdef FEATURE_COMINTEROP
+ if (state == Thread::AS_Unknown)
+ {
+ // If the CLR hasn't started COM yet, start it up and attempt the call again.
+ // We do this in order to minimize the number of situations under which we return
+ // ApartmentState.Unknown to our callers.
+ if (!g_fComStarted)
+ {
+ EnsureComStarted();
+ state = thread->GetApartment();
+ }
+ }
+#endif // FEATURE_COMINTEROP
+
+ // Translate state into external form
+ retVal = ApartmentUnknown;
+ if (state == Thread::AS_InSTA)
+ {
+ retVal = ApartmentSTA;
+ }
+ else if (state == Thread::AS_InMTA)
+ {
+ retVal = ApartmentMTA;
+ }
+ else if (state == Thread::AS_Unknown)
+ {
+ retVal = ApartmentUnknown;
+ }
+ else
+ {
+ _ASSERTE(!"Invalid state returned from GetApartment");
+ }
+
+ HELPER_METHOD_FRAME_END();
+
+ return retVal;
+}
+FCIMPLEND
+
+
+// Attempt to eagerly set the apartment state during thread startup.
+FCIMPL1(void, ThreadNative::StartupSetApartmentState, ThreadBaseObject* pThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ THREADBASEREF refThis = (THREADBASEREF) ObjectToOBJECTREF(pThisUNSAFE);
+
+ HELPER_METHOD_FRAME_BEGIN_1(refThis);
+
+ if (refThis == NULL)
+ {
+ COMPlusThrow(kNullReferenceException, W("NullReference_This"));
+ }
+
+ Thread* thread = refThis->GetInternal();
+
+ if (!ThreadNotStarted(thread))
+ COMPlusThrow(kThreadStateException, IDS_EE_THREADSTART_STATE);
+
+ // Assert that the thread hasn't been started yet.
+ _ASSERTE(Thread::TS_Unstarted & thread->GetSnapshotState());
+
+ if ((g_pConfig != NULL) && !g_pConfig->LegacyApartmentInitPolicy())
+ {
+ Thread::ApartmentState as = thread->GetExplicitApartment();
+ if (as == Thread::AS_Unknown)
+ {
+ thread->SetApartment(Thread::AS_InMTA, TRUE);
+ }
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+void ReleaseThreadExternalCount(Thread * pThread)
+{
+ WRAPPER_NO_CONTRACT;
+ pThread->DecExternalCount(FALSE);
+}
+
+typedef Holder<Thread *, DoNothing, ReleaseThreadExternalCount> ThreadExternalCountHolder;
+
+// Wait for the thread to die
+BOOL ThreadNative::DoJoin(THREADBASEREF DyingThread, INT32 timeout)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(DyingThread != NULL);
+ PRECONDITION((timeout >= 0) || (timeout == INFINITE_TIMEOUT));
+ }
+ CONTRACTL_END;
+
+ Thread * DyingInternal = DyingThread->GetInternal();
+
+ // Validate the handle. It's valid to Join a thread that's not running -- so
+ // long as it was once started.
+ if (DyingInternal == 0 ||
+ !(DyingInternal->m_State & Thread::TS_LegalToJoin))
+ {
+ COMPlusThrow(kThreadStateException, IDS_EE_THREAD_NOTSTARTED);
+ }
+
+ // Don't grab the handle until we know it has started, to eliminate the race
+ // condition.
+ if (ThreadIsDead(DyingInternal) || !DyingInternal->HasValidThreadHandle())
+ return TRUE;
+
+ DWORD dwTimeOut32 = (timeout == INFINITE_TIMEOUT
+ ? INFINITE
+ : (DWORD) timeout);
+
+ // There is a race here. DyingThread is going to close its thread handle.
+ // If we grab the handle and then DyingThread closes it, we will wait forever
+ // in DoAppropriateWait.
+ int RefCount = DyingInternal->IncExternalCount();
+ if (RefCount == 1)
+ {
+ // !!! We resurrect the Thread Object.
+ // !!! We will keep the Thread ref count to be 1 so that we will not try
+ // !!! to destroy the Thread Object again.
+ // !!! Do not call DecExternalCount here!
+ _ASSERTE (!DyingInternal->HasValidThreadHandle());
+ return TRUE;
+ }
+
+ ThreadExternalCountHolder dyingInternalHolder(DyingInternal);
+
+ if (!DyingInternal->HasValidThreadHandle())
+ {
+ return TRUE;
+ }
+
+ GCX_PREEMP();
+ DWORD rv = DyingInternal->JoinEx(dwTimeOut32, (WaitMode)(WaitMode_Alertable/*alertable*/|WaitMode_InDeadlock));
+
+ switch(rv)
+ {
+ case WAIT_OBJECT_0:
+ return TRUE;
+
+ case WAIT_TIMEOUT:
+ break;
+
+ case WAIT_FAILED:
+ if(!DyingInternal->HasValidThreadHandle())
+ return TRUE;
+ break;
+
+ default:
+ _ASSERTE(!"This return code is not understood \n");
+ break;
+ }
+
+ return FALSE;
+}
+
+
+// We don't get a constructor for ThreadBaseObject, so we rely on the fact that this
+// method is only called once, out of SetStart. Since SetStart is private/native
+// and only called from the constructor, we'll only get called here once to set it
+// up and once (with NULL) to tear it down. The 'null' can only come from Finalize
+// because the constructor throws if it doesn't get a valid delegate.
+void ThreadBaseObject::SetDelegate(OBJECTREF delegate)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+#ifdef APPDOMAIN_STATE
+ if (delegate != NULL)
+ {
+ AppDomain *pDomain = delegate->GetAppDomain();
+ Thread *pThread = GetInternal();
+ AppDomain *kickoffDomain = pThread->GetKickOffDomain();
+ _ASSERTE_ALL_BUILDS("clr/src/VM/COMSynchronizable.cpp", !pDomain || pDomain == kickoffDomain);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/COMSynchronizable.cpp", kickoffDomain == GetThread()->GetDomain());
+ }
+#endif
+
+ SetObjectReferenceUnchecked( (OBJECTREF *)&m_Delegate, delegate );
+
+ // If the delegate is being set then initialize the other data members.
+ if (m_Delegate != NULL)
+ {
+ // Initialize the thread priority to normal.
+ m_Priority = ThreadNative::PRIORITY_NORMAL;
+ }
+}
+
+
+// If the exposed object is created after-the-fact, for an existing thread, we call
+// InitExisting on it. This is the other "construction", as opposed to SetDelegate.
+void ThreadBaseObject::InitExisting()
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ Thread *pThread = GetInternal();
+ _ASSERTE (pThread);
+ switch (pThread->GetThreadPriority())
+ {
+ case THREAD_PRIORITY_LOWEST:
+ case THREAD_PRIORITY_IDLE:
+ m_Priority = ThreadNative::PRIORITY_LOWEST;
+ break;
+
+ case THREAD_PRIORITY_BELOW_NORMAL:
+ m_Priority = ThreadNative::PRIORITY_BELOW_NORMAL;
+ break;
+
+ case THREAD_PRIORITY_NORMAL:
+ m_Priority = ThreadNative::PRIORITY_NORMAL;
+ break;
+
+ case THREAD_PRIORITY_ABOVE_NORMAL:
+ m_Priority = ThreadNative::PRIORITY_ABOVE_NORMAL;
+ break;
+
+ case THREAD_PRIORITY_HIGHEST:
+ case THREAD_PRIORITY_TIME_CRITICAL:
+ m_Priority = ThreadNative::PRIORITY_HIGHEST;
+ break;
+
+ case THREAD_PRIORITY_ERROR_RETURN:
+ _ASSERTE(FALSE);
+ m_Priority = ThreadNative::PRIORITY_NORMAL;
+ break;
+
+ default:
+ m_Priority = ThreadNative::PRIORITY_NORMAL;
+ break;
+ }
+
+}
+
+#ifndef FEATURE_LEAK_CULTURE_INFO
+OBJECTREF ThreadBaseObject::GetManagedThreadCulture(BOOL bUICulture)
+{
+ CONTRACTL {
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // This is the case when we're building mscorlib and haven't yet created
+ // the system assembly.
+ if (SystemDomain::System()->SystemAssembly()==NULL || g_fForbidEnterEE) {
+ return NULL;
+ }
+
+ OBJECTREF *pCurrentCulture = NULL;
+ Thread *pThread = GetInternal();
+ FieldDesc *pFD = NULL;
+
+ if (bUICulture)
+ {
+ pFD = pThread->managedThreadCurrentUICulture;
+ }
+ else
+ {
+ pFD = pThread->managedThreadCurrentCulture;
+ }
+
+ if (pFD != NULL)
+ {
+ pCurrentCulture = (OBJECTREF*)pThread->GetStaticFieldAddrNoCreate(pFD, NULL);
+ if (pCurrentCulture)
+ {
+ return *pCurrentCulture;
+ }
+ }
+
+ return NULL;
+}
+
+CULTUREINFOBASEREF ThreadBaseObject::GetCurrentUserCulture()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return (CULTUREINFOBASEREF)GetManagedThreadCulture(false);
+}
+
+CULTUREINFOBASEREF ThreadBaseObject::GetCurrentUICulture()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return (CULTUREINFOBASEREF)GetManagedThreadCulture(true);
+}
+
+// If the thread pool thread switched appdomains and the culture was set, the culture won't be
+// reset for the second appdomain. It's impossible to do general cleanup of thread pool threads
+// because we don't have the right extensible infrastructure for it. For example, if the second
+// appdomain was in a different CLR you won't be able to reset the culture without introducing
+// new cross-CLR communication mechanism. However, note that this isn't a user scenario in
+// CoreCLR anyway.
+void ThreadBaseObject::ResetCurrentUserCulture()
+{
+ WRAPPER_NO_CONTRACT;
+ ResetManagedThreadCulture(false);
+}
+
+void ThreadBaseObject::ResetCurrentUICulture()
+{
+ WRAPPER_NO_CONTRACT;
+ ResetManagedThreadCulture(true);
+}
+
+void ThreadBaseObject::ResetManagedThreadCulture(BOOL bUICulture)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // This is the case when we're building mscorlib and haven't yet created
+ // the system assembly.
+ if (SystemDomain::System()->SystemAssembly()==NULL || g_fForbidEnterEE) {
+ return;
+ }
+
+ Thread *pThread = GetInternal();
+ FieldDesc *pFD = NULL;
+
+ if (bUICulture)
+ {
+ pFD = pThread->managedThreadCurrentUICulture;
+ }
+ else
+ {
+ pFD = pThread->managedThreadCurrentCulture;
+ }
+
+ if (pFD != NULL)
+ {
+ OBJECTREF *pCulture = NULL;
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(COMPlusThrowSO());
+ pCulture = (OBJECTREF*)pThread->GetStaticFieldAddrNoCreate(pFD, NULL);
+ if (pCulture)
+ {
+ SetObjectReferenceUnchecked(pCulture, NULL);
+ }
+ END_SO_INTOLERANT_CODE;
+
+ }
+}
+
+#endif // FEATURE_LEAK_CULTURE_INFO
+
+
+FCIMPL1(void, ThreadNative::Finalize, ThreadBaseObject* pThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ // This function is intentionally blank.
+ // See comment in code:MethodTable::CallFinalizer.
+
+ _ASSERTE (!"Should not be called");
+
+ FCUnique(0x21);
+}
+FCIMPLEND
+
+#ifdef FEATURE_COMINTEROP
+FCIMPL1(void, ThreadNative::DisableComObjectEagerCleanup, ThreadBaseObject* pThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ _ASSERTE(pThisUNSAFE != NULL);
+ VALIDATEOBJECT(pThisUNSAFE);
+ Thread *pThread = pThisUNSAFE->GetInternal();
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ if (pThread == NULL)
+ COMPlusThrow(kThreadStateException, IDS_EE_THREAD_CANNOT_GET);
+
+ pThread->SetDisableComObjectEagerCleanup();
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+#endif //FEATURE_COMINTEROP
+
+#ifdef FEATURE_LEAK_CULTURE_INFO
+FCIMPL1(FC_BOOL_RET, ThreadNative::SetThreadUILocale, StringObject* localeNameUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ BOOL result = TRUE;
+
+ STRINGREF name = (STRINGREF) localeNameUNSAFE;
+ VALIDATEOBJECTREF(name);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ LCID lcid=NewApis::LocaleNameToLCID(name->GetBuffer(),0);
+ if (lcid == 0)
+ {
+ ThrowHR(HRESULT_FROM_WIN32(GetLastError()));
+ }
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostTaskManager *manager = CorHost2::GetHostTaskManager();
+ if (manager) {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ result = (manager->SetUILocale(lcid) == S_OK);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ HELPER_METHOD_FRAME_END();
+
+ FC_RETURN_BOOL(result);
+}
+FCIMPLEND
+#endif // FEATURE_LEAK_CULTURE_INFO
+
+FCIMPL0(Object*, ThreadNative::GetDomain)
+{
+ FCALL_CONTRACT;
+
+ APPDOMAINREF refRetVal = NULL;
+
+ Thread* thread = GetThread();
+
+ if ((thread) && (thread->GetDomain()))
+ {
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refRetVal);
+ refRetVal = (APPDOMAINREF) thread->GetDomain()->GetExposedObject();
+ HELPER_METHOD_FRAME_END();
+ }
+
+ return OBJECTREFToObject(refRetVal);
+}
+FCIMPLEND
+
+#ifdef _TARGET_X86_
+__declspec(naked) LPVOID __fastcall ThreadNative::FastGetDomain()
+{
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ __asm {
+ call GetAppDomain
+ test eax, eax
+ je done
+ mov eax, dword ptr [eax]AppDomain.m_ExposedObject
+ test eax, eax
+ je done
+ mov eax, dword ptr [eax]
+done:
+ ret
+ }
+}
+#else // _TARGET_X86_
+LPVOID F_CALL_CONV ThreadNative::FastGetDomain()
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ Thread *pThread;
+ AppDomain *pDomain;
+ OBJECTHANDLE ExposedObject;
+
+ pDomain = GetAppDomain();
+ if (!pDomain) {
+ return NULL;
+ }
+ ExposedObject = pDomain->m_ExposedObject;
+ if (ExposedObject) {
+ return *(LPVOID *)ExposedObject;
+ }
+ return NULL;
+}
+#endif // _TARGET_X86_
+
+#ifdef FEATURE_REMOTING
+// This is just a helper method that lets BCL get to the managed context
+// from the contextID.
+FCIMPL1(Object*, ThreadNative::GetContextFromContextID, LPVOID ContextID)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF rv = NULL;
+ Context* pCtx = (Context *) ContextID;
+ // Get the managed context backing this unmanaged context
+ rv = pCtx->GetExposedObjectRaw();
+
+ // This assert maintains the following invariant:
+ // Only default unmanaged contexts can have a null managed context
+ // (All non-deafult contexts are created as managed contexts first, and then
+ // hooked to the unmanaged context)
+ _ASSERTE((rv != NULL) || (pCtx->GetDomain()->GetDefaultContext() == pCtx));
+
+ return OBJECTREFToObject(rv);
+}
+FCIMPLEND
+
+
+FCIMPL6(Object*, ThreadNative::InternalCrossContextCallback, ThreadBaseObject* refThis, ContextBaseObject* refContext, LPVOID contextID, INT32 appDomainId, Object* oDelegateUNSAFE, PtrArray* oArgsUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ _ASSERTE(refThis != NULL);
+ VALIDATEOBJECT(refThis);
+ Thread *pThread = refThis->GetInternal();
+ Context *pCtx = (Context *)contextID;
+
+
+ _ASSERTE(pCtx && (refContext == NULL || pCtx->GetExposedObjectRaw() == NULL ||
+ ObjectToOBJECTREF(refContext) == pCtx->GetExposedObjectRaw()));
+ LOG((LF_APPDOMAIN, LL_INFO1000, "ThreadNative::InternalCrossContextCallback: %p, %p\n", refContext, pCtx));
+ // install our frame. We have to put it here before we put the helper frame on
+
+ // Set the VM conext
+
+ struct _gc {
+ OBJECTREF oRetVal;
+ OBJECTREF oDelegate;
+ OBJECTREF oArgs;
+ // We need to report the managed context object because it may become unreachable in the caller,
+ // however we have to keep it alive, otherwise its finalizer could free the unmanaged internal context
+ OBJECTREF oContext;
+ } gc;
+
+ gc.oRetVal = NULL;
+ gc.oDelegate = ObjectToOBJECTREF(oDelegateUNSAFE);
+ gc.oArgs = ObjectToOBJECTREF(oArgsUNSAFE);
+ gc.oContext = ObjectToOBJECTREF(refContext);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ if (pThread == NULL)
+ COMPlusThrow(kThreadStateException, IDS_EE_THREAD_CANNOT_GET);
+
+#ifdef _DEBUG
+ MethodDesc* pTargetMD = COMDelegate::GetMethodDesc(gc.oDelegate);
+ _ASSERTE(pTargetMD->IsStatic());
+#endif
+
+ // If we have a non-zero appDomain index, this is a x-domain call
+ // We must verify that the AppDomain is not unloaded
+ PREPARE_NONVIRTUAL_CALLSITE(METHOD__THREAD__COMPLETE_CROSSCONTEXTCALLBACK);
+
+ AppDomainFromIDHolder ad;
+ if (appDomainId != 0)
+ {
+ //
+ // NOTE: there is a potential race between the time we retrieve the app domain pointer,
+ // and the time which this thread enters the domain.
+ //
+ // To solve the race, we rely on the fact that there is a thread sync
+ // between releasing an app domain's handle, and destroying the app domain. Thus
+ // it is important that we not go into preemptive gc mode in that window.
+ //
+ {
+ ad.Assign(ADID(appDomainId), TRUE);
+
+ if (ad.IsUnloaded() || !ad->CanThreadEnter(pThread))
+ COMPlusThrow(kAppDomainUnloadedException, W("Remoting_AppDomainUnloaded"));
+ }
+ }
+
+ // Verify that the Context is valid.
+ if ( !Context::ValidateContext(pCtx) )
+ COMPlusThrow(kRemotingException, W("Remoting_InvalidContext"));
+
+ DEBUG_ASSURE_NO_RETURN_BEGIN(COMSYNCH)
+
+ FrameWithCookie<ContextTransitionFrame> frame;
+
+ Context* pCurrContext = pThread->GetContext();
+ bool fTransition = (pCurrContext != pCtx);
+ BOOL fSameDomain = (appDomainId==0) || (pCurrContext->GetDomain()->GetId() == (ADID)appDomainId);
+ _ASSERTE( fTransition || fSameDomain);
+ if (fTransition)
+ if (appDomainId!=0)
+ ad->EnterContext(pThread,pCtx, &frame);
+ else
+ pThread->EnterContextRestricted(pCtx,&frame);
+ ad.Release();
+
+
+ LOG((LF_EH, LL_INFO100, "MSCORLIB_ENTER_CONTEXT( %s::%s ): %s\n",
+ pTargetMD->m_pszDebugClassName,
+ pTargetMD->m_pszDebugMethodName,
+ fTransition ? "ENTERED" : "NOP"));
+
+ Exception* pOriginalException=NULL;
+
+ EX_TRY
+ {
+ DECLARE_ARGHOLDER_ARRAY(callArgs, 2);
+
+#if CHECK_APP_DOMAIN_LEAKS
+ // We're passing the delegate object to another appdomain
+ // without marshaling, that is OK - it's a static function delegate
+ // but we should mark it as agile then.
+ gc.oDelegate->SetSyncBlockAppDomainAgile();
+#endif
+ callArgs[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(gc.oDelegate);
+ callArgs[ARGNUM_1] = OBJECTREF_TO_ARGHOLDER(gc.oArgs);
+
+ CATCH_HANDLER_FOUND_NOTIFICATION_CALLSITE;
+ CALL_MANAGED_METHOD_RETREF(gc.oRetVal, OBJECTREF, callArgs);
+ }
+ EX_CATCH
+ {
+ LOG((LF_EH, LL_INFO100, "MSCORLIB_CONTEXT_TRANSITION( %s::%s ): exception in flight\n", pTargetMD->m_pszDebugClassName, pTargetMD->m_pszDebugMethodName));
+
+ if (!fTransition || fSameDomain)
+ {
+ if (fTransition)
+ {
+ GCX_FORBID();
+ pThread->ReturnToContext(&frame);
+ }
+#ifdef FEATURE_TESTHOOKS
+ if (appDomainId!=0)
+ {
+ TESTHOOKCALL(LeftAppDomain(appDomainId));
+ }
+#endif
+ EX_RETHROW;
+ }
+
+ pOriginalException=EXTRACT_EXCEPTION();
+ CAPTURE_BUCKETS_AT_TRANSITION(pThread, CLRException::GetThrowableFromException(pOriginalException));
+ goto lAfterCtxUnwind;
+ }
+ EX_END_CATCH_UNREACHABLE;
+ if (0)
+ {
+lAfterCtxUnwind:
+ LOG((LF_EH, LL_INFO100, "MSCORLIB_RaiseCrossContextException( %s::%s )\n", pTargetMD->m_pszDebugClassName, pTargetMD->m_pszDebugMethodName));
+ pThread->RaiseCrossContextException(pOriginalException,&frame);
+ }
+
+ LOG((LF_EH, LL_INFO100, "MSCORLIB_LEAVE_CONTEXT_TRANSITION( %s::%s )\n", pTargetMD->m_pszDebugClassName, pTargetMD->m_pszDebugMethodName));
+
+ if (fTransition)
+ {
+ GCX_FORBID();
+ pThread->ReturnToContext(&frame);
+ }
+#ifdef FEATURE_TESTHOOKS
+ if(appDomainId!=0)
+ {
+ TESTHOOKCALL(LeftAppDomain(appDomainId));
+ }
+#endif
+
+ DEBUG_ASSURE_NO_RETURN_END(COMSYNCH)
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(gc.oRetVal);
+}
+FCIMPLEND
+#endif //FEATURE_REMOTING
+
+//
+// nativeGetSafeCulture is used when the culture get requested from the thread object.
+// we have to check the culture in the FCALL because in FCALL the thread cannot be
+// interrupted and unload other app domian.
+// the concern here is if the thread hold a subclassed culture object and somebody
+// requested it from other app domain then we shouldn't hold any reference to that
+// culture object any time because the app domain created this culture may get
+// unloaded and this culture will survive although the type metadata will be unloaded
+// and GC will crash first time accessing this object after the app domain unload.
+//
+#ifdef FEATURE_LEAK_CULTURE_INFO
+FCIMPL4(FC_BOOL_RET, ThreadNative::nativeGetSafeCulture,
+ ThreadBaseObject* threadUNSAFE,
+ int appDomainId,
+ CLR_BOOL isUI,
+ OBJECTREF* safeCulture)
+{
+ FCALL_CONTRACT;
+
+ THREADBASEREF thread(threadUNSAFE);
+
+ CULTUREINFOBASEREF pCulture = isUI ? thread->GetCurrentUICulture() : thread->GetCurrentUserCulture();
+ if (pCulture != NULL) {
+ if (pCulture->IsSafeCrossDomain() || pCulture->GetCreatedDomainID() == ADID(appDomainId)) {
+ SetObjectReference(safeCulture, pCulture, pCulture->GetAppDomain());
+ } else {
+ FC_RETURN_BOOL(FALSE);
+ }
+ }
+ FC_RETURN_BOOL(TRUE);
+}
+FCIMPLEND
+#endif // FEATURE_LEAK_CULTURE_INFO
+
+#ifndef FEATURE_LEAK_CULTURE_INFO
+void QCALLTYPE ThreadNative::nativeInitCultureAccessors()
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ Thread* pThread = GetThread();
+ pThread->InitCultureAccessors();
+
+ END_QCALL;
+}
+#endif // FEATURE_LEAK_CULTURE_INFO
+
+
+void QCALLTYPE ThreadNative::InformThreadNameChange(QCall::ThreadHandle thread, LPCWSTR name, INT32 len)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ Thread* pThread = &(*thread);
+
+#ifdef PROFILING_SUPPORTED
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackThreads());
+ if (name == NULL)
+ {
+ g_profControlBlock.pProfInterface->ThreadNameChanged((ThreadID)pThread, 0, NULL);
+ }
+ else
+ {
+ g_profControlBlock.pProfInterface->ThreadNameChanged((ThreadID)pThread, len, (WCHAR*)name);
+ }
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+
+#ifdef DEBUGGING_SUPPORTED
+ if (CORDebuggerAttached())
+ {
+ _ASSERTE(NULL != g_pDebugInterface);
+ g_pDebugInterface->NameChangeEvent(NULL, pThread);
+ }
+#endif // DEBUGGING_SUPPORTED
+
+ END_QCALL;
+}
+
+UINT64 QCALLTYPE ThreadNative::GetProcessDefaultStackSize()
+{
+ QCALL_CONTRACT;
+
+ SIZE_T reserve = 0;
+ SIZE_T commit = 0;
+
+ BEGIN_QCALL;
+
+ if (!Thread::GetProcessDefaultStackSize(&reserve, &commit))
+ reserve = 1024 * 1024;
+
+ END_QCALL;
+
+ return (UINT64)reserve;
+}
+
+
+FCIMPL0(void, ThreadNative::BeginCriticalRegion)
+{
+ FCALL_CONTRACT;
+ if (CLRHosted())
+ {
+ GetThread()->BeginCriticalRegion_NoCheck();
+ }
+}
+FCIMPLEND
+
+FCIMPL0(void, ThreadNative::EndCriticalRegion)
+{
+ FCALL_CONTRACT;
+ if (CLRHosted())
+ {
+ GetThread()->EndCriticalRegion_NoCheck();
+ }
+}
+FCIMPLEND
+
+FCIMPL0(void, ThreadNative::BeginThreadAffinity)
+{
+ FCALL_CONTRACT;
+ Thread::BeginThreadAffinity();
+}
+FCIMPLEND
+
+FCIMPL0(void, ThreadNative::EndThreadAffinity)
+{
+ FCALL_CONTRACT;
+ Thread::EndThreadAffinity();
+}
+FCIMPLEND
+
+
+FCIMPL1(FC_BOOL_RET, ThreadNative::IsThreadpoolThread, ThreadBaseObject* thread)
+{
+ FCALL_CONTRACT;
+
+ if (thread==NULL)
+ FCThrowRes(kNullReferenceException, W("NullReference_This"));
+
+ Thread *pThread = thread->GetInternal();
+
+ if (pThread == NULL)
+ FCThrowEx(kThreadStateException, IDS_EE_THREAD_DEAD_STATE, NULL, NULL, NULL);
+
+ BOOL ret = pThread->IsThreadPoolThread();
+
+ FC_GC_POLL_RET();
+
+ FC_RETURN_BOOL(ret);
+}
+FCIMPLEND
+
+
+FCIMPL1(void, ThreadNative::SpinWait, int iterations)
+{
+ FCALL_CONTRACT;
+
+ //
+ // If we're not going to spin for long, it's ok to remain in cooperative mode.
+ // The threshold is determined by the cost of entering preemptive mode; if we're
+ // spinning for less than that number of cycles, then switching to preemptive
+ // mode won't help a GC start any faster. That number is right around 1000000
+ // on my machine.
+ //
+ if (iterations <= 1000000)
+ {
+ for(int i = 0; i < iterations; i++)
+ YieldProcessor();
+ return;
+ }
+
+ //
+ // Too many iterations; better switch to preemptive mode to avoid stalling a GC.
+ //
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL();
+ GCX_PREEMP();
+
+ for(int i = 0; i < iterations; i++)
+ YieldProcessor();
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+BOOL QCALLTYPE ThreadNative::YieldThread()
+{
+ QCALL_CONTRACT;
+
+ BOOL ret = FALSE;
+
+ BEGIN_QCALL
+
+ ret = __SwitchToThread(0, CALLER_LIMITS_SPINNING);
+
+ END_QCALL
+
+ return ret;
+}
+
+#ifdef FEATURE_COMPRESSEDSTACK
+FCIMPL2(void*, ThreadNative::SetAppDomainStack, ThreadBaseObject* pThis, SafeHandle* hcsUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ void* pRet = NULL;
+ SAFEHANDLE hcsSAFE = (SAFEHANDLE) hcsUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(hcsSAFE);
+
+
+ void* unmanagedCompressedStack = NULL;
+ if (hcsSAFE != NULL)
+ {
+ unmanagedCompressedStack = (void *)hcsSAFE->GetHandle();
+ }
+
+
+ VALIDATEOBJECT(pThis);
+ Thread *pThread = pThis->GetInternal();
+ if (pThread == NULL)
+ COMPlusThrow(kThreadStateException, IDS_EE_THREAD_CANNOT_GET);
+
+ pRet = StackCompressor::SetAppDomainStack(pThread, unmanagedCompressedStack);
+ HELPER_METHOD_FRAME_END_POLL();
+ return pRet;
+}
+FCIMPLEND
+
+
+FCIMPL2(void, ThreadNative::RestoreAppDomainStack, ThreadBaseObject* pThis, void* appDomainStack)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL();
+
+ VALIDATEOBJECT(pThis);
+ Thread *pThread = pThis->GetInternal();
+ if (pThread == NULL)
+ COMPlusThrow(kThreadStateException, IDS_EE_THREAD_CANNOT_GET);
+
+ StackCompressor::RestoreAppDomainStack(pThread, appDomainStack);
+ HELPER_METHOD_FRAME_END_POLL();
+}
+FCIMPLEND
+#endif //#ifdef FEATURE_COMPRESSEDSTACK
+
+FCIMPL0(void, ThreadNative::FCMemoryBarrier)
+{
+ FCALL_CONTRACT;
+
+ MemoryBarrier();
+ FC_GC_POLL();
+}
+FCIMPLEND
+
+FCIMPL2(void, ThreadNative::SetAbortReason, ThreadBaseObject* pThisUNSAFE, Object* pObject)
+{
+ FCALL_CONTRACT;
+
+ if (pThisUNSAFE==NULL)
+ FCThrowResVoid(kNullReferenceException, W("NullReference_This"));
+
+ OBJECTREF refObject = static_cast<OBJECTREF>(pObject);
+
+ Thread *pThread = pThisUNSAFE->GetInternal();
+
+ // If the OBJECTHANDLE is not 0, already set so just return
+ if (pThread != NULL && pThread->m_AbortReason != 0)
+ return;
+
+ // Set up a frame in case of GC or EH
+ HELPER_METHOD_FRAME_BEGIN_1(refObject)
+
+ if (pThread == NULL)
+ COMPlusThrow(kThreadStateException, IDS_EE_THREAD_CANNOT_GET);
+
+ // Get the AppDomain ID for the AppDomain on the currently running thread.
+ // NOTE: the currently running thread may be different from this thread object!
+ AppDomain *pCurrentDomain = GetThread()->GetDomain();
+ ADID adid = pCurrentDomain->GetId();
+
+ // Create a OBJECTHANDLE for the object.
+ OBJECTHANDLE oh = pCurrentDomain->CreateHandle(refObject);
+
+ // Scope the lock to peeking at and updating the two fields on the Thread object.
+ { // Atomically check whether the OBJECTHANDLE has been set, and if not,
+ // store it and the ADID of the object.
+ // NOTE: get the lock on this thread object, not on the executing thread.
+ Thread::AbortRequestLockHolder lock(pThread);
+ if (pThread->m_AbortReason == 0)
+ {
+ pThread->m_AbortReason = oh;
+ pThread->m_AbortReasonDomainID = adid;
+ // Set the OBJECTHANDLE so we can know that we stored it on the Thread object.
+ oh = 0;
+ }
+ }
+
+ // If the OBJECTHANDLE created above was not stored onto the Thread object, then
+ // another thread beat this one to the update. Destroy the OBJECTHANDLE that
+ // was not used, created above.
+ if (oh != 0)
+ {
+ DestroyHandle(oh);
+ }
+
+ HELPER_METHOD_FRAME_END()
+
+}
+FCIMPLEND
+
+#ifndef FEATURE_CORECLR // core clr does not support abort reason
+FCIMPL1(Object*, ThreadNative::GetAbortReason, ThreadBaseObject *pThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ if (pThisUNSAFE==NULL)
+ FCThrowRes(kNullReferenceException, W("NullReference_This"));
+
+ OBJECTREF refRetVal = NULL;
+ Thread *pThread = pThisUNSAFE->GetInternal();
+
+ // Set up a frame in case of GC or EH
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refRetVal)
+
+ if (pThread == NULL)
+ COMPlusThrow(kThreadStateException, IDS_EE_THREAD_CANNOT_GET);
+
+ // While the ExceptionInfo probably will be *set* from a different
+ // thread, it should only be *read* from the current thread.
+ _ASSERTE(GetThread() == pThread);
+
+ // Set cooperative mode, to avoid AD unload while we're working.
+ GCX_COOP();
+
+ OBJECTHANDLE oh=NULL;
+ ADID adid;
+ // Scope the lock to reading the two fields on the Thread object.
+ { // Atomically get the OBJECTHANDLE and ADID of the object
+ // NOTE: get the lock on this thread object, not on the executing thread.
+ Thread::AbortRequestLockHolder lock(pThread);
+ oh = pThread->m_AbortReason;
+ adid = pThread->m_AbortReasonDomainID;
+ }
+
+ // If the OBJECTHANDLE is not 0...
+ if (oh != 0)
+ {
+
+ AppDomain *pCurrentDomain = pThread->GetDomain();
+ // See if the appdomain is equal to the appdomain of the currently running
+ // thread.
+
+ if (pCurrentDomain->GetId() == adid)
+ { // Same appdomain; just return object from the OBJECTHANDLE
+ refRetVal = ObjectFromHandle(oh);
+ }
+ else
+ { // Otherwise, try to marshal the object from the other AppDomain
+ ENTER_DOMAIN_ID(adid);
+ CrossAppDomainClonerCallback cadcc;
+ ObjectClone Cloner(&cadcc, CrossAppDomain, FALSE);
+ refRetVal = Cloner.Clone(ObjectFromHandle(oh), GetAppDomain(), pCurrentDomain, NULL);
+ Cloner.RemoveGCFrames();
+ END_DOMAIN_TRANSITION;
+ }
+ }
+
+ HELPER_METHOD_FRAME_END()
+
+ return OBJECTREFToObject(refRetVal);
+}
+FCIMPLEND
+#endif // !FEATURE_CORECLR
+
+FCIMPL1(void, ThreadNative::ClearAbortReason, ThreadBaseObject* pThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ if (pThisUNSAFE==NULL)
+ FCThrowResVoid(kNullReferenceException, W("NullReference_This"));
+
+ Thread *pThread = pThisUNSAFE->GetInternal();
+
+ // Clearing from managed code can only happen on the current thread.
+ _ASSERTE(pThread == GetThread());
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ if (pThread == NULL)
+ COMPlusThrow(kThreadStateException, IDS_EE_THREAD_CANNOT_GET);
+
+ pThread->ClearAbortReason();
+
+ HELPER_METHOD_FRAME_END();
+
+}
+FCIMPLEND
+
+
diff --git a/src/vm/comsynchronizable.h b/src/vm/comsynchronizable.h
new file mode 100644
index 0000000000..8b10e11e8e
--- /dev/null
+++ b/src/vm/comsynchronizable.h
@@ -0,0 +1,157 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+/*============================================================
+**
+** Header: COMSynchronizable.h
+**
+** Purpose: Native methods on System.SynchronizableObject
+** and its subclasses.
+**
+**
+===========================================================*/
+
+#ifndef _COMSYNCHRONIZABLE_H
+#define _COMSYNCHRONIZABLE_H
+
+#include "field.h" // For FieldDesc definition.
+
+//
+// Each function that we call through native only gets one argument,
+// which is actually a pointer to its stack of arguments. Our structs
+// for accessing these are defined below.
+//
+
+struct SharedState;
+
+class ThreadNative
+{
+friend class ThreadBaseObject;
+
+public:
+
+ enum
+ {
+ PRIORITY_LOWEST = 0,
+ PRIORITY_BELOW_NORMAL = 1,
+ PRIORITY_NORMAL = 2,
+ PRIORITY_ABOVE_NORMAL = 3,
+ PRIORITY_HIGHEST = 4,
+ };
+
+ enum
+ {
+ ThreadStopRequested = 1,
+ ThreadSuspendRequested = 2,
+ ThreadBackground = 4,
+ ThreadUnstarted = 8,
+ ThreadStopped = 16,
+ ThreadWaitSleepJoin = 32,
+ ThreadSuspended = 64,
+ ThreadAbortRequested = 128,
+ ThreadAborted = 256,
+ };
+
+ enum
+ {
+ ApartmentSTA = 0,
+ ApartmentMTA = 1,
+ ApartmentUnknown = 2
+ };
+
+ static LPVOID F_CALL_CONV FastGetCurrentThread();
+ static LPVOID F_CALL_CONV FastGetDomain();
+
+ static void StartInner(ThreadBaseObject* pThisUNSAFE, Object* pPrincipalUNSAFE, StackCrawlMark* pStackMark);
+
+ static FCDECL1(void, Abort, ThreadBaseObject* pThis);
+ static FCDECL1(void, ResetAbort, ThreadBaseObject* pThis);
+ static FCDECL3(void, Start, ThreadBaseObject* pThisUNSAFE, Object* pPrincipalUNSAFE, StackCrawlMark* pStackMark);
+#ifndef FEATURE_CORECLR
+ static FCDECL1(void, Suspend, ThreadBaseObject* pThisUNSAFE);
+ static FCDECL1(void, Resume, ThreadBaseObject* pThisUNSAFE);
+#endif // FEATURE_CORECLR
+ static FCDECL1(INT32, GetPriority, ThreadBaseObject* pThisUNSAFE);
+ static FCDECL2(void, SetPriority, ThreadBaseObject* pThisUNSAFE, INT32 iPriority);
+ static FCDECL1(void, Interrupt, ThreadBaseObject* pThisUNSAFE);
+ static FCDECL1(FC_BOOL_RET, IsAlive, ThreadBaseObject* pThisUNSAFE);
+ static FCDECL2(FC_BOOL_RET, Join, ThreadBaseObject* pThisUNSAFE, INT32 Timeout);
+#undef Sleep
+ static FCDECL1(void, Sleep, INT32 iTime);
+#define Sleep(a) Dont_Use_Sleep(a)
+ static FCDECL3(void, SetStart, ThreadBaseObject* pThisUNSAFE, Object* pDelegateUNSAFE, INT32 iRequestedStackSize);
+ static FCDECL2(void, SetBackground, ThreadBaseObject* pThisUNSAFE, CLR_BOOL isBackground);
+ static FCDECL1(FC_BOOL_RET, IsBackground, ThreadBaseObject* pThisUNSAFE);
+ static FCDECL1(INT32, GetThreadState, ThreadBaseObject* pThisUNSAFE);
+ static FCDECL1(INT32, GetThreadContext, ThreadBaseObject* pThisUNSAFE);
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+ static FCDECL1(INT32, GetApartmentState, ThreadBaseObject* pThis);
+ static FCDECL3(INT32, SetApartmentState, ThreadBaseObject* pThisUNSAFE, INT32 iState, CLR_BOOL fireMDAOnMismatch);
+ static FCDECL1(void, StartupSetApartmentState, ThreadBaseObject* pThis);
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+ static FCDECL0(Object*, GetDomain);
+#ifdef FEATURE_REMOTING
+ static FCDECL1(Object*, GetContextFromContextID, LPVOID ContextID);
+ static FCDECL6(Object*, InternalCrossContextCallback, ThreadBaseObject* refThis, ContextBaseObject* refContext, LPVOID contextID, INT32 appDomainId, Object* oDelegateUNSAFE, PtrArray* oArgsUNSAFE);
+#endif
+#ifdef FEATURE_LEAK_CULTURE_INFO
+ static FCDECL4(FC_BOOL_RET, nativeGetSafeCulture, ThreadBaseObject* threadUNSAFE, int appDomainId, CLR_BOOL isUI, OBJECTREF *safeCulture);
+#else
+ static void QCALLTYPE nativeInitCultureAccessors();
+#endif
+
+ static
+ void QCALLTYPE InformThreadNameChange(QCall::ThreadHandle thread, LPCWSTR name, INT32 len);
+
+ static
+ UINT64 QCALLTYPE GetProcessDefaultStackSize();
+
+ static FCDECL1(INT32, GetManagedThreadId, ThreadBaseObject* th);
+ static FCDECL0(void, BeginCriticalRegion);
+ static FCDECL0(void, EndCriticalRegion);
+ static FCDECL0(void, BeginThreadAffinity);
+ static FCDECL0(void, EndThreadAffinity);
+ static FCDECL1(void, SpinWait, int iterations);
+ static BOOL QCALLTYPE YieldThread();
+ static FCDECL0(Object*, GetCurrentThread);
+ static FCDECL1(void, Finalize, ThreadBaseObject* pThis);
+#ifdef FEATURE_COMINTEROP
+ static FCDECL1(void, DisableComObjectEagerCleanup, ThreadBaseObject* pThis);
+#endif //FEATURE_COMINTEROP
+#ifdef FEATURE_LEAK_CULTURE_INFO
+ static FCDECL1(FC_BOOL_RET,SetThreadUILocale, StringObject* localeNameUNSAFE);
+#endif // FEATURE_LEAK_CULTURE_INFO
+ static FCDECL1(FC_BOOL_RET,IsThreadpoolThread, ThreadBaseObject* thread);
+#ifdef FEATURE_COMPRESSEDSTACK
+ static FCDECL2(void*, SetAppDomainStack, ThreadBaseObject* pThis, SafeHandle* hcsUNSAFE);
+ static FCDECL2(void, RestoreAppDomainStack, ThreadBaseObject* pThis, void* appDomainStack);
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
+
+ static FCDECL0(void, FCMemoryBarrier);
+ static FCDECL1(void, SetIsThreadStaticsArray, Object* pObject);
+
+ static FCDECL2(void, SetAbortReason, ThreadBaseObject* pThisUNSAFE, Object* pObject);
+#ifndef FEATURE_CORECLR
+ static FCDECL1(Object*, GetAbortReason, ThreadBaseObject* pThisUNSAFE);
+#endif
+ static FCDECL1(void, ClearAbortReason, ThreadBaseObject* pThisUNSAFE);
+
+private:
+
+ struct KickOffThread_Args {
+ Thread *pThread;
+ SharedState *share;
+ ULONG retVal;
+ };
+
+ static void KickOffThread_Worker(LPVOID /* KickOffThread_Args* */);
+ static ULONG __stdcall KickOffThread(void *pass);
+ static BOOL DoJoin(THREADBASEREF DyingThread, INT32 timeout);
+};
+
+
+#endif // _COMSYNCHRONIZABLE_H
+
diff --git a/src/vm/comthreadpool.cpp b/src/vm/comthreadpool.cpp
new file mode 100644
index 0000000000..98616526ef
--- /dev/null
+++ b/src/vm/comthreadpool.cpp
@@ -0,0 +1,1018 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+/*============================================================
+**
+** Header: COMThreadPool.cpp
+**
+** Purpose: Native methods on System.ThreadPool
+** and its inner classes
+**
+**
+===========================================================*/
+
+/********************************************************************************************************************/
+#include "common.h"
+#include "comdelegate.h"
+#include "comthreadpool.h"
+#include "threadpoolrequest.h"
+#include "win32threadpool.h"
+#include "class.h"
+#include "object.h"
+#include "field.h"
+#include "excep.h"
+#include "security.h"
+#include "eeconfig.h"
+#include "corhost.h"
+#include "nativeoverlapped.h"
+#include "comsynchronizable.h"
+#ifdef FEATURE_REMOTING
+#include "crossdomaincalls.h"
+#else
+#include "callhelpers.h"
+#endif
+#include "appdomain.inl"
+/*****************************************************************************************************/
+#ifdef _DEBUG
+void LogCall(MethodDesc* pMD, LPCUTF8 api)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ LPCUTF8 cls = pMD->GetMethodTable()->GetDebugClassName();
+ LPCUTF8 name = pMD->GetName();
+
+ LOG((LF_THREADPOOL,LL_INFO1000,"%s: ", api));
+ LOG((LF_THREADPOOL, LL_INFO1000,
+ " calling %s.%s\n", cls, name));
+}
+#else
+#define LogCall(pMd,api)
+#endif
+
+VOID
+AcquireDelegateInfo(DelegateInfo *pDelInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+VOID
+ReleaseDelegateInfo(DelegateInfo *pDelInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ // The release methods of holders can be called with preemptive GC enabled. Ensure we're in cooperative mode
+ // before calling pDelInfo->Release(), since that requires coop mode.
+ GCX_COOP();
+
+ pDelInfo->Release();
+ ThreadpoolMgr::RecycleMemory( pDelInfo, ThreadpoolMgr::MEMTYPE_DelegateInfo );
+}
+
+//typedef Holder<DelegateInfo *, AcquireDelegateInfo, ReleaseDelegateInfo> DelegateInfoHolder;
+
+typedef Wrapper<DelegateInfo *, AcquireDelegateInfo, ReleaseDelegateInfo> DelegateInfoHolder;
+
+/*****************************************************************************************************/
+// Caller has to GC protect Objectrefs being passed in
+DelegateInfo *DelegateInfo::MakeDelegateInfo(AppDomain *pAppDomain,
+ OBJECTREF *state,
+ OBJECTREF *waitEvent,
+ OBJECTREF *registeredWaitHandle)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ if (state != NULL || waitEvent != NULL || registeredWaitHandle != NULL)
+ {
+ MODE_COOPERATIVE;
+ }
+ else
+ {
+ MODE_ANY;
+ }
+ PRECONDITION(state == NULL || IsProtectedByGCFrame(state));
+ PRECONDITION(waitEvent == NULL || IsProtectedByGCFrame(waitEvent));
+ PRECONDITION(registeredWaitHandle == NULL || IsProtectedByGCFrame(registeredWaitHandle));
+ PRECONDITION(CheckPointer(pAppDomain));
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ // If there were any DelegateInfos waiting to be released, they'll get flushed now
+ ThreadpoolMgr::FlushQueueOfTimerInfos();
+
+ DelegateInfoHolder delegateInfo = (DelegateInfo*) ThreadpoolMgr::GetRecycledMemory(ThreadpoolMgr::MEMTYPE_DelegateInfo);
+
+ delegateInfo->m_appDomainId = pAppDomain->GetId();
+
+ if (state != NULL)
+ delegateInfo->m_stateHandle = pAppDomain->CreateHandle(*state);
+ else
+ delegateInfo->m_stateHandle = NULL;
+
+ if (waitEvent != NULL)
+ delegateInfo->m_eventHandle = pAppDomain->CreateHandle(*waitEvent);
+ else
+ delegateInfo->m_eventHandle = NULL;
+
+ if (registeredWaitHandle != NULL)
+ delegateInfo->m_registeredWaitHandle = pAppDomain->CreateHandle(*registeredWaitHandle);
+ else
+ delegateInfo->m_registeredWaitHandle = NULL;
+
+ delegateInfo->m_overridesCount = 0;
+ delegateInfo->m_hasSecurityInfo = FALSE;
+
+ delegateInfo.SuppressRelease();
+
+ return delegateInfo;
+}
+
+/*****************************************************************************************************/
+FCIMPL2(FC_BOOL_RET, ThreadPoolNative::CorSetMaxThreads,DWORD workerThreads, DWORD completionPortThreads)
+{
+ FCALL_CONTRACT;
+
+ BOOL bRet = FALSE;
+ HELPER_METHOD_FRAME_BEGIN_RET_0(); // Eventually calls BEGIN_SO_INTOLERANT_CODE_NOTHROW
+
+ bRet = ThreadpoolMgr::SetMaxThreads(workerThreads,completionPortThreads);
+ HELPER_METHOD_FRAME_END();
+ FC_RETURN_BOOL(bRet);
+}
+FCIMPLEND
+
+/*****************************************************************************************************/
+FCIMPL2(VOID, ThreadPoolNative::CorGetMaxThreads,DWORD* workerThreads, DWORD* completionPortThreads)
+{
+ FCALL_CONTRACT;
+
+ ThreadpoolMgr::GetMaxThreads(workerThreads,completionPortThreads);
+ return;
+}
+FCIMPLEND
+
+/*****************************************************************************************************/
+FCIMPL2(FC_BOOL_RET, ThreadPoolNative::CorSetMinThreads,DWORD workerThreads, DWORD completionPortThreads)
+{
+ FCALL_CONTRACT;
+
+ BOOL bRet = FALSE;
+ HELPER_METHOD_FRAME_BEGIN_RET_0(); // Eventually calls BEGIN_SO_INTOLERANT_CODE_NOTHROW
+
+ bRet = ThreadpoolMgr::SetMinThreads(workerThreads,completionPortThreads);
+ HELPER_METHOD_FRAME_END();
+ FC_RETURN_BOOL(bRet);
+}
+FCIMPLEND
+
+/*****************************************************************************************************/
+FCIMPL2(VOID, ThreadPoolNative::CorGetMinThreads,DWORD* workerThreads, DWORD* completionPortThreads)
+{
+ FCALL_CONTRACT;
+
+ ThreadpoolMgr::GetMinThreads(workerThreads,completionPortThreads);
+ return;
+}
+FCIMPLEND
+
+/*****************************************************************************************************/
+FCIMPL2(VOID, ThreadPoolNative::CorGetAvailableThreads,DWORD* workerThreads, DWORD* completionPortThreads)
+{
+ FCALL_CONTRACT;
+
+ ThreadpoolMgr::GetAvailableThreads(workerThreads,completionPortThreads);
+ return;
+}
+FCIMPLEND
+
+/*****************************************************************************************************/
+
+FCIMPL0(VOID, ThreadPoolNative::NotifyRequestProgress)
+{
+ FCALL_CONTRACT;
+
+ ThreadpoolMgr::NotifyWorkItemCompleted();
+
+ if (ThreadpoolMgr::ShouldAdjustMaxWorkersActive())
+ {
+ DangerousNonHostedSpinLockTryHolder tal(&ThreadpoolMgr::ThreadAdjustmentLock);
+ if (tal.Acquired())
+ {
+ HELPER_METHOD_FRAME_BEGIN_0();
+ ThreadpoolMgr::AdjustMaxWorkersActive();
+ HELPER_METHOD_FRAME_END();
+ }
+ else
+ {
+ // the lock is held by someone else, so they will take care of this for us.
+ }
+ }
+}
+FCIMPLEND
+
+FCIMPL1(VOID, ThreadPoolNative::ReportThreadStatus, CLR_BOOL isWorking)
+{
+ FCALL_CONTRACT;
+ ThreadpoolMgr::ReportThreadStatus(isWorking);
+}
+FCIMPLEND
+
+FCIMPL0(FC_BOOL_RET, ThreadPoolNative::NotifyRequestComplete)
+{
+ FCALL_CONTRACT;
+
+ ThreadpoolMgr::NotifyWorkItemCompleted();
+
+ //
+ // Now we need to possibly do one or both of: reset the thread's state, and/or perform a
+ // "worker thread adjustment" (i.e., invoke Hill Climbing). We try to avoid these at all costs,
+ // because they require an expensive helper method frame. So we first try a minimal thread reset,
+ // then check if it covered everything that was needed, and we ask ThreadpoolMgr whether
+ // we need a thread adjustment, before setting up the frame.
+ //
+ Thread *pThread = GetThread();
+ _ASSERTE (pThread);
+
+ INT32 priority = pThread->ResetManagedThreadObjectInCoopMode(ThreadNative::PRIORITY_NORMAL);
+
+ bool needReset =
+ priority != ThreadNative::PRIORITY_NORMAL ||
+ pThread->HasThreadStateNC(Thread::TSNC_SOWorkNeeded) ||
+ !pThread->IsBackground() ||
+ pThread->HasCriticalRegion() ||
+ pThread->HasThreadAffinity();
+
+ bool shouldAdjustWorkers = ThreadpoolMgr::ShouldAdjustMaxWorkersActive();
+
+ //
+ // If it's time for a thread adjustment, try to get the lock. This is just a "try," it won't block,
+ // so it's ok to do this in cooperative mode. If we can't get the lock, then some other thread is
+ // already doing the thread adjustment, so we needn't bother.
+ //
+ DangerousNonHostedSpinLockTryHolder tal(&ThreadpoolMgr::ThreadAdjustmentLock, shouldAdjustWorkers);
+ if (!tal.Acquired())
+ shouldAdjustWorkers = false;
+
+ if (needReset || shouldAdjustWorkers)
+ {
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ if (shouldAdjustWorkers)
+ {
+ ThreadpoolMgr::AdjustMaxWorkersActive();
+ tal.Release();
+ }
+
+ if (needReset)
+ pThread->InternalReset (FALSE, TRUE, TRUE, FALSE);
+
+ HELPER_METHOD_FRAME_END();
+ }
+
+ //
+ // Finally, ask ThreadpoolMgr whether it's ok to keep running work on this thread. Maybe Hill Climbing
+ // wants this thread back.
+ //
+ BOOL result = ThreadpoolMgr::ShouldWorkerKeepRunning() ? TRUE : FALSE;
+ FC_RETURN_BOOL(result);
+}
+FCIMPLEND
+
+
+/*****************************************************************************************************/
+
+void QCALLTYPE ThreadPoolNative::InitializeVMTp(CLR_BOOL* pEnableWorkerTracking)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+ ThreadpoolMgr::EnsureInitialized();
+ *pEnableWorkerTracking = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ThreadPool_EnableWorkerTracking) ? TRUE : FALSE;
+ END_QCALL;
+}
+
+
+FCIMPL0(FC_BOOL_RET, ThreadPoolNative::IsThreadPoolHosted)
+{
+ FCALL_CONTRACT;
+
+ FCUnique(0x22);
+
+ FC_RETURN_BOOL(ThreadpoolMgr::IsThreadPoolHosted());
+}
+FCIMPLEND
+
+/*****************************************************************************************************/
+
+struct RegisterWaitForSingleObjectCallback_Args
+{
+ DelegateInfo *delegateInfo;
+ BOOLEAN TimerOrWaitFired;
+};
+
+static VOID
+RegisterWaitForSingleObjectCallback_Worker(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF orState = NULL;
+
+ GCPROTECT_BEGIN( orState );
+
+ RegisterWaitForSingleObjectCallback_Args *args = (RegisterWaitForSingleObjectCallback_Args *) ptr;
+ orState = ObjectFromHandle(((DelegateInfo*) args->delegateInfo)->m_stateHandle);
+
+#ifdef _DEBUG
+ MethodDesc *pMeth = MscorlibBinder::GetMethod(METHOD__TPWAITORTIMER_HELPER__PERFORM_WAITORTIMER_CALLBACK);
+ LogCall(pMeth,"RWSOCallback");
+#endif
+
+ // Caution: the args are not protected, we have to garantee there's no GC from here till
+ // the managed call happens.
+ PREPARE_NONVIRTUAL_CALLSITE(METHOD__TPWAITORTIMER_HELPER__PERFORM_WAITORTIMER_CALLBACK);
+ DECLARE_ARGHOLDER_ARRAY(arg, 2);
+ arg[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(orState);
+ arg[ARGNUM_1] = DWORD_TO_ARGHOLDER(args->TimerOrWaitFired);
+
+ // Call the method...
+ CALL_MANAGED_METHOD_NORET(arg);
+
+ GCPROTECT_END();
+}
+
+
+void ResetThreadSecurityState(Thread* pThread)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ if (pThread)
+ {
+ pThread->ResetSecurityInfo();
+ }
+}
+
+// this holder resets our thread's security state
+typedef Holder<Thread*, DoNothing<Thread*>, ResetThreadSecurityState> ThreadSecurityStateHolder;
+
+VOID NTAPI RegisterWaitForSingleObjectCallback(PVOID delegateInfo, BOOLEAN TimerOrWaitFired)
+{
+ Thread* pThread = GetThread();
+ if (pThread == NULL)
+ {
+ ClrFlsSetThreadType(ThreadType_Threadpool_Worker);
+ pThread = SetupThreadNoThrow();
+ if (pThread == NULL) {
+ return;
+ }
+ }
+
+ CONTRACTL
+ {
+ MODE_PREEMPTIVE; // Worker thread will be in preempt mode. We switch to coop below.
+ THROWS;
+ GC_TRIGGERS;
+
+ PRECONDITION(CheckPointer(delegateInfo));
+ }
+ CONTRACTL_END;
+
+ // This thread should not have any locks held at entry point.
+ _ASSERTE(pThread->m_dwLockCount == 0);
+
+ GCX_COOP();
+
+ // this holder resets our thread's security state when exiting this scope
+ ThreadSecurityStateHolder secState(pThread);
+
+ RegisterWaitForSingleObjectCallback_Args args = { ((DelegateInfo*) delegateInfo), TimerOrWaitFired };
+
+ ManagedThreadBase::ThreadPool(((DelegateInfo*) delegateInfo)->m_appDomainId, RegisterWaitForSingleObjectCallback_Worker, &args);
+
+ // We should have released all locks.
+ _ASSERTE(g_fEEShutDown || pThread->m_dwLockCount == 0 || pThread->m_fRudeAborted);
+ return;
+}
+
+void ThreadPoolNative::Init()
+{
+
+}
+
+
+FCIMPL7(LPVOID, ThreadPoolNative::CorRegisterWaitForSingleObject,
+ Object* waitObjectUNSAFE,
+ Object* stateUNSAFE,
+ UINT32 timeout,
+ CLR_BOOL executeOnlyOnce,
+ Object* registeredWaitObjectUNSAFE,
+ StackCrawlMark* stackMark,
+ CLR_BOOL compressStack)
+{
+ FCALL_CONTRACT;
+
+ HANDLE handle = 0;
+ struct _gc
+ {
+ WAITHANDLEREF waitObject;
+ OBJECTREF state;
+ OBJECTREF registeredWaitObject;
+ } gc;
+ gc.waitObject = (WAITHANDLEREF) ObjectToOBJECTREF(waitObjectUNSAFE);
+ gc.state = (OBJECTREF) stateUNSAFE;
+ gc.registeredWaitObject = (OBJECTREF) registeredWaitObjectUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc); // Eventually calls BEGIN_SO_INTOLERANT_CODE_NOTHROW
+
+ if(gc.waitObject == NULL)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_Obj"));
+
+ _ASSERTE(gc.registeredWaitObject != NULL);
+
+ ULONG flag = executeOnlyOnce ? WAIT_SINGLE_EXECUTION | WAIT_FREE_CONTEXT : WAIT_FREE_CONTEXT;
+
+ HANDLE hWaitHandle = gc.waitObject->GetWaitHandle();
+ _ASSERTE(hWaitHandle);
+
+ Thread* pCurThread = GetThread();
+ _ASSERTE( pCurThread);
+
+ AppDomain* appDomain = pCurThread->GetDomain();
+ _ASSERTE(appDomain);
+
+ DelegateInfoHolder delegateInfo = DelegateInfo::MakeDelegateInfo(appDomain,
+ &gc.state,
+ (OBJECTREF *)&gc.waitObject,
+ &gc.registeredWaitObject);
+
+ if (compressStack)
+ {
+ delegateInfo->SetThreadSecurityInfo( pCurThread, stackMark );
+ }
+
+
+
+ if (!(ThreadpoolMgr::RegisterWaitForSingleObject(&handle,
+ hWaitHandle,
+ RegisterWaitForSingleObjectCallback,
+ (PVOID) delegateInfo,
+ (ULONG) timeout,
+ flag)))
+
+ {
+ _ASSERTE(GetLastError() != ERROR_CALL_NOT_IMPLEMENTED);
+
+ COMPlusThrowWin32();
+ }
+
+ delegateInfo.SuppressRelease();
+ HELPER_METHOD_FRAME_END();
+ return (LPVOID) handle;
+}
+FCIMPLEND
+
+
+VOID QueueUserWorkItemManagedCallback(PVOID pArg)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ _ASSERTE(NULL != pArg);
+
+ // This thread should not have any locks held at entry point.
+ _ASSERTE(GetThread()->m_dwLockCount == 0);
+
+ bool* wasNotRecalled = (bool*)pArg;
+
+ MethodDescCallSite dispatch(METHOD__TP_WAIT_CALLBACK__PERFORM_WAIT_CALLBACK);
+ *wasNotRecalled = dispatch.Call_RetBool(NULL);
+}
+
+
+BOOL QCALLTYPE ThreadPoolNative::RequestWorkerThread()
+{
+ QCALL_CONTRACT;
+
+ BOOL res = FALSE;
+
+ BEGIN_QCALL;
+
+ ThreadpoolMgr::SetAppDomainRequestsActive();
+
+ res = ThreadpoolMgr::QueueUserWorkItem(NULL,
+ NULL,
+ 0,
+ FALSE);
+ if (!res)
+ {
+ if (GetLastError() == ERROR_CALL_NOT_IMPLEMENTED)
+ COMPlusThrow(kNotSupportedException);
+ else
+ COMPlusThrowWin32();
+ }
+
+ END_QCALL;
+ return res;
+}
+
+
+/********************************************************************************************************************/
+
+FCIMPL2(FC_BOOL_RET, ThreadPoolNative::CorUnregisterWait, LPVOID WaitHandle, Object* objectToNotify)
+{
+ FCALL_CONTRACT;
+
+ BOOL retVal = false;
+ SAFEHANDLEREF refSH = (SAFEHANDLEREF) ObjectToOBJECTREF(objectToNotify);
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refSH); // Eventually calls BEGIN_SO_INTOLERANT_CODE_NOTHROW
+
+ HANDLE hWait = (HANDLE) WaitHandle;
+ HANDLE hObjectToNotify = NULL;
+
+ ThreadpoolMgr::WaitInfo *pWaitInfo = (ThreadpoolMgr::WaitInfo *)hWait;
+ _ASSERTE(pWaitInfo != NULL);
+
+ ThreadpoolMgr::WaitInfoHolder wiHolder(NULL);
+
+ if (refSH != NULL)
+ {
+ // Create a GCHandle in the WaitInfo, so that it can hold on to the safe handle
+ pWaitInfo->ExternalEventSafeHandle = GetAppDomain()->CreateHandle(NULL);
+ pWaitInfo->handleOwningAD = GetAppDomain()->GetId();
+
+ // Holder will now release objecthandle in face of exceptions
+ wiHolder.Assign(pWaitInfo);
+
+ // Store SafeHandle in object handle. Holder will now release both safehandle and objecthandle
+ // in case of exceptions
+ StoreObjectInHandle(pWaitInfo->ExternalEventSafeHandle, refSH);
+
+ // Acquire safe handle to examine its handle, then release.
+ SafeHandleHolder shHolder(&refSH);
+
+ if (refSH->GetHandle() == INVALID_HANDLE_VALUE)
+ {
+ hObjectToNotify = INVALID_HANDLE_VALUE;
+ // We do not need the ObjectHandle, refcount on the safehandle etc
+ wiHolder.Release();
+ _ASSERTE(pWaitInfo->ExternalEventSafeHandle == NULL);
+ }
+ }
+
+ _ASSERTE(hObjectToNotify == NULL || hObjectToNotify == INVALID_HANDLE_VALUE);
+
+ // When hObjectToNotify is NULL ExternalEventSafeHandle contains event to notify (if it is non NULL).
+ // When hObjectToNotify is INVALID_HANDLE_VALUE, UnregisterWaitEx blocks until dispose is complete.
+ retVal = ThreadpoolMgr::UnregisterWaitEx(hWait, hObjectToNotify);
+
+ if (retVal)
+ wiHolder.SuppressRelease();
+
+ HELPER_METHOD_FRAME_END();
+ FC_RETURN_BOOL(retVal);
+}
+FCIMPLEND
+
+/********************************************************************************************************************/
+FCIMPL1(void, ThreadPoolNative::CorWaitHandleCleanupNative, LPVOID WaitHandle)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0(); // Eventually calls BEGIN_SO_INTOLERANT_CODE_NOTHROW
+
+ HANDLE hWait = (HANDLE)WaitHandle;
+ ThreadpoolMgr::WaitHandleCleanup(hWait);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+/********************************************************************************************************************/
+
+/********************************************************************************************************************/
+
+struct BindIoCompletion_Args
+{
+ DWORD ErrorCode;
+ DWORD numBytesTransferred;
+ LPOVERLAPPED lpOverlapped;
+ BOOL *pfProcessed;
+};
+
+void SetAsyncResultProperties(
+ OVERLAPPEDDATAREF overlapped,
+ DWORD dwErrorCode,
+ DWORD dwNumBytes
+)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ ASYNCRESULTREF asyncResult = overlapped->m_asyncResult;
+ // only filestream is expected to have a null delegate in which
+ // case we do the necessary book-keeping here. However, for robustness
+ // we should make sure that the asyncResult is indeed an instance of
+ // FileStreamAsyncResult
+ if (asyncResult->GetMethodTable() == g_pAsyncFileStream_AsyncResultClass)
+ {
+ // Handle reading from & writing to closed pipes. It's possible for
+ // an async read on a pipe to be issued and then the pipe is closed,
+ // returning this error. This may very well be necessary. -BG
+ if (dwErrorCode == ERROR_BROKEN_PIPE || dwErrorCode == ERROR_NO_DATA)
+ dwErrorCode = 0;
+ asyncResult->SetErrorCode(dwErrorCode);
+ asyncResult->SetNumBytes(dwNumBytes);
+ asyncResult->SetCompletedAsynchronously();
+ asyncResult->SetIsComplete();
+
+ // Signal the event - the OS does not do this for us.
+ WAITHANDLEREF waitHandle = asyncResult->GetWaitHandle();
+ HANDLE h = waitHandle->GetWaitHandle();
+ if ((h != NULL) && (h != (HANDLE) -1))
+ UnsafeSetEvent(h);
+ }
+}
+
+VOID BindIoCompletionCallBack_Worker(LPVOID args)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+ DWORD ErrorCode = ((BindIoCompletion_Args *)args)->ErrorCode;
+ DWORD numBytesTransferred = ((BindIoCompletion_Args *)args)->numBytesTransferred;
+ LPOVERLAPPED lpOverlapped = ((BindIoCompletion_Args *)args)->lpOverlapped;
+
+ OVERLAPPEDDATAREF overlapped = ObjectToOVERLAPPEDDATAREF(OverlappedDataObject::GetOverlapped(lpOverlapped));
+
+ GCPROTECT_BEGIN(overlapped);
+ *(((BindIoCompletion_Args *)args)->pfProcessed) = TRUE;
+ // we set processed to TRUE, now it's our responsibility to guarantee proper cleanup
+
+#ifdef _DEBUG
+ MethodDesc *pMeth = MscorlibBinder::GetMethod(METHOD__IOCB_HELPER__PERFORM_IOCOMPLETION_CALLBACK);
+ LogCall(pMeth,"IOCallback");
+#endif
+
+ if (overlapped->m_iocb != NULL)
+ {
+ // Caution: the args are not protected, we have to garantee there's no GC from here till
+ PREPARE_NONVIRTUAL_CALLSITE(METHOD__IOCB_HELPER__PERFORM_IOCOMPLETION_CALLBACK);
+ DECLARE_ARGHOLDER_ARRAY(arg, 3);
+ arg[ARGNUM_0] = DWORD_TO_ARGHOLDER(ErrorCode);
+ arg[ARGNUM_1] = DWORD_TO_ARGHOLDER(numBytesTransferred);
+ arg[ARGNUM_2] = PTR_TO_ARGHOLDER(lpOverlapped);
+
+ // Call the method...
+ CALL_MANAGED_METHOD_NORET(arg);
+ }
+ else
+ { // no user delegate to callback
+ _ASSERTE((overlapped->m_iocbHelper == NULL) || !"This is benign, but should be optimized");
+
+ // we cannot do this at threadpool initialization time since mscorlib may not have been loaded
+ if (!g_pAsyncFileStream_AsyncResultClass)
+ {
+ g_pAsyncFileStream_AsyncResultClass = MscorlibBinder::GetClass(CLASS__FILESTREAM_ASYNCRESULT);
+ }
+
+ SetAsyncResultProperties(overlapped, ErrorCode, numBytesTransferred);
+ }
+ GCPROTECT_END();
+}
+
+
+void __stdcall BindIoCompletionCallbackStubEx(DWORD ErrorCode,
+ DWORD numBytesTransferred,
+ LPOVERLAPPED lpOverlapped,
+ BOOL setStack)
+{
+ Thread* pThread = GetThread();
+ if (pThread == NULL)
+ {
+ // TODO: how do we notify user of OOM here?
+ ClrFlsSetThreadType(ThreadType_Threadpool_Worker);
+ pThread = SetupThreadNoThrow();
+ if (pThread == NULL) {
+ return;
+ }
+ }
+
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ // This thread should not have any locks held at entry point.
+ _ASSERTE(pThread->m_dwLockCount == 0);
+
+ LOG((LF_INTEROP, LL_INFO10000, "In IO_CallBackStub thread 0x%x retCode 0x%x, overlap 0x%x\n", pThread, ErrorCode, lpOverlapped));
+
+ GCX_COOP();
+
+ // NOTE: there is a potential race between the time we retrieve the app domain pointer,
+ // and the time which this thread enters the domain.
+ //
+ // To solve the race, we rely on the fact that there is a thread sync (via GC)
+ // between releasing an app domain's handle, and destroying the app domain. Thus
+ // it is important that we not go into preemptive gc mode in that window.
+ //
+
+ //IMPORTANT - do not gc protect overlapped here - it belongs to another appdomain
+ //so if it stops being pinned it should be able to go away
+ OVERLAPPEDDATAREF overlapped = ObjectToOVERLAPPEDDATAREF(OverlappedDataObject::GetOverlapped(lpOverlapped));
+ AppDomainFromIDHolder appDomain(ADID(overlapped->GetAppDomainId()), TRUE);
+ BOOL fProcessed = FALSE;
+ if (!appDomain.IsUnloaded())
+ {
+ // this holder resets our thread's security state when exiting this scope,
+ // but only if setStack is TRUE.
+ Thread* pHolderThread = NULL;
+ if (setStack)
+ {
+ pHolderThread = pThread;
+ }
+
+ ThreadSecurityStateHolder secState(pHolderThread);
+
+ BindIoCompletion_Args args = {ErrorCode, numBytesTransferred, lpOverlapped, &fProcessed};
+ appDomain.Release();
+ ManagedThreadBase::ThreadPool(ADID(overlapped->GetAppDomainId()), BindIoCompletionCallBack_Worker, &args);
+ }
+
+
+
+
+ LOG((LF_INTEROP, LL_INFO10000, "Leaving IO_CallBackStub thread 0x%x retCode 0x%x, overlap 0x%x\n", pThread, ErrorCode, lpOverlapped));
+ // We should have released all locks.
+ _ASSERTE(g_fEEShutDown || pThread->m_dwLockCount == 0 || pThread->m_fRudeAborted);
+ return;
+}
+
+void WINAPI BindIoCompletionCallbackStub(DWORD ErrorCode,
+ DWORD numBytesTransferred,
+ LPOVERLAPPED lpOverlapped)
+{
+ WRAPPER_NO_CONTRACT;
+ BindIoCompletionCallbackStubEx(ErrorCode, numBytesTransferred, lpOverlapped, TRUE);
+
+#ifndef FEATURE_PAL
+ extern Volatile<ULONG> g_fCompletionPortDrainNeeded;
+
+ Thread *pThread = GetThread();
+ if (g_fCompletionPortDrainNeeded && pThread)
+ {
+ // We have started draining completion port.
+ // The next job picked up by this thread is going to be after our special marker.
+ if (!pThread->IsCompletionPortDrained())
+ {
+ pThread->MarkCompletionPortDrained();
+ }
+ }
+#endif // !FEATURE_PAL
+}
+
+FCIMPL1(FC_BOOL_RET, ThreadPoolNative::CorBindIoCompletionCallback, HANDLE fileHandle)
+{
+ FCALL_CONTRACT;
+
+ BOOL retVal = FALSE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0(); // Eventually calls BEGIN_SO_INTOLERANT_CODE_NOTHROW
+
+ HANDLE hFile = (HANDLE) fileHandle;
+ DWORD errCode = 0;
+
+ retVal = ThreadpoolMgr::BindIoCompletionCallback(hFile,
+ BindIoCompletionCallbackStub,
+ 0, // reserved, must be 0
+ OUT errCode);
+ if (!retVal)
+ {
+ if (errCode == ERROR_CALL_NOT_IMPLEMENTED)
+ COMPlusThrow(kPlatformNotSupportedException);
+ else
+ {
+ SetLastError(errCode);
+ COMPlusThrowWin32();
+ }
+ }
+
+ HELPER_METHOD_FRAME_END();
+ FC_RETURN_BOOL(retVal);
+}
+FCIMPLEND
+
+FCIMPL1(FC_BOOL_RET, ThreadPoolNative::CorPostQueuedCompletionStatus, LPOVERLAPPED lpOverlapped)
+{
+ FCALL_CONTRACT;
+
+ OVERLAPPEDDATAREF overlapped = ObjectToOVERLAPPEDDATAREF(OverlappedDataObject::GetOverlapped(lpOverlapped));
+
+ BOOL res = FALSE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(overlapped); // Eventually calls BEGIN_SO_INTOLERANT_CODE_NOTHROW
+
+ // OS doesn't signal handle, so do it here
+ overlapped->Internal = 0;
+
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, ThreadPoolIOEnqueue))
+ FireEtwThreadPoolIOEnqueue(lpOverlapped, OBJECTREFToObject(overlapped), false, GetClrInstanceId());
+
+ res = ThreadpoolMgr::PostQueuedCompletionStatus(lpOverlapped,
+ BindIoCompletionCallbackStub);
+
+ if (!res)
+ {
+ if (GetLastError() == ERROR_CALL_NOT_IMPLEMENTED)
+ COMPlusThrow(kPlatformNotSupportedException);
+ else
+ COMPlusThrowWin32();
+ }
+
+ HELPER_METHOD_FRAME_END();
+ FC_RETURN_BOOL(res);
+}
+FCIMPLEND
+
+
+/********************************************************************************************************************/
+
+
+/******************************************************************************************/
+/* */
+/* Timer Functions */
+/* */
+/******************************************************************************************/
+
+void AppDomainTimerCallback_Worker(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ MethodDesc *pMeth = MscorlibBinder::GetMethod(METHOD__TIMER_QUEUE__APPDOMAIN_TIMER_CALLBACK);
+ LogCall(pMeth,"AppDomainTimerCallback");
+#endif
+
+ MethodDescCallSite(METHOD__TIMER_QUEUE__APPDOMAIN_TIMER_CALLBACK).Call(NULL);
+}
+
+VOID WINAPI AppDomainTimerCallback(PVOID delegateInfo, BOOLEAN timerOrWaitFired)
+{
+ Thread* pThread = GetThread();
+ if (pThread == NULL)
+ {
+ // TODO: how do we notify user of OOM here?
+ ClrFlsSetThreadType(ThreadType_Threadpool_Worker);
+ pThread = SetupThreadNoThrow();
+ if (pThread == NULL) {
+ return;
+ }
+ }
+
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+
+ PRECONDITION(CheckPointer(delegateInfo));
+ }
+ CONTRACTL_END;
+
+ // This thread should not have any locks held at entry point.
+ _ASSERTE(pThread->m_dwLockCount == 0);
+
+ GCX_COOP();
+
+ {
+ ThreadSecurityStateHolder secState(pThread);
+ ManagedThreadBase::ThreadPool(((DelegateInfo*)delegateInfo)->m_appDomainId, AppDomainTimerCallback_Worker, NULL);
+ }
+
+ // We should have released all locks.
+ _ASSERTE(g_fEEShutDown || pThread->m_dwLockCount == 0 || pThread->m_fRudeAborted);
+}
+
+HANDLE QCALLTYPE AppDomainTimerNative::CreateAppDomainTimer(INT32 dueTime)
+{
+ QCALL_CONTRACT;
+
+ HANDLE hTimer = NULL;
+ BEGIN_QCALL;
+
+ _ASSERTE(dueTime >= 0);
+
+ AppDomain* pAppDomain = GetThread()->GetDomain();
+ ADID adid = pAppDomain->GetId();
+
+ DelegateInfoHolder delegateInfo = DelegateInfo::MakeDelegateInfo(
+ pAppDomain,
+ NULL,
+ NULL,
+ NULL);
+
+ BOOL res = ThreadpoolMgr::CreateTimerQueueTimer(
+ &hTimer,
+ (WAITORTIMERCALLBACK)AppDomainTimerCallback,
+ (PVOID)delegateInfo,
+ (ULONG)dueTime,
+ (ULONG)-1 /* this timer doesn't repeat */,
+ 0 /* no flags */);
+
+ if (!res)
+ {
+ if (GetLastError() == ERROR_CALL_NOT_IMPLEMENTED)
+ COMPlusThrow(kNotSupportedException);
+ else
+ COMPlusThrowWin32();
+ }
+ else
+ {
+ delegateInfo.SuppressRelease();
+ }
+
+ END_QCALL;
+ return hTimer;
+}
+
+BOOL QCALLTYPE AppDomainTimerNative::DeleteAppDomainTimer(HANDLE hTimer)
+{
+ QCALL_CONTRACT;
+
+ BOOL res = FALSE;
+ BEGIN_QCALL;
+
+ _ASSERTE(hTimer != NULL && hTimer != INVALID_HANDLE_VALUE);
+ res = ThreadpoolMgr::DeleteTimerQueueTimer(hTimer, NULL);
+
+ if (!res)
+ {
+ DWORD errorCode = ::GetLastError();
+ if (errorCode != ERROR_IO_PENDING)
+ COMPlusThrowWin32(HRESULT_FROM_WIN32(errorCode));
+ }
+
+ END_QCALL;
+ return res;
+}
+
+
+BOOL QCALLTYPE AppDomainTimerNative::ChangeAppDomainTimer(HANDLE hTimer, INT32 dueTime)
+{
+ QCALL_CONTRACT;
+
+ BOOL res = FALSE;
+ BEGIN_QCALL;
+
+ _ASSERTE(hTimer != NULL && hTimer != INVALID_HANDLE_VALUE);
+ _ASSERTE(dueTime >= 0);
+
+ res = ThreadpoolMgr::ChangeTimerQueueTimer(
+ hTimer,
+ (ULONG)dueTime,
+ (ULONG)-1 /* this timer doesn't repeat */);
+
+ if (!res)
+ COMPlusThrowWin32();
+
+ END_QCALL;
+ return res;
+}
diff --git a/src/vm/comthreadpool.h b/src/vm/comthreadpool.h
new file mode 100644
index 0000000000..45f01d69c7
--- /dev/null
+++ b/src/vm/comthreadpool.h
@@ -0,0 +1,84 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+/*============================================================
+**
+** Header: COMThreadPool.h
+**
+** Purpose: Native methods on System.ThreadPool
+** and its inner classes
+**
+**
+===========================================================*/
+
+#ifndef _COMTHREADPOOL_H
+#define _COMTHREADPOOL_H
+
+#include "delegateinfo.h"
+#include "nativeoverlapped.h"
+
+class ThreadPoolNative
+{
+
+public:
+
+ static void Init();
+
+ static FCDECL2(FC_BOOL_RET, CorSetMaxThreads, DWORD workerThreads, DWORD completionPortThreads);
+ static FCDECL2(VOID, CorGetMaxThreads, DWORD* workerThreads, DWORD* completionPortThreads);
+ static FCDECL2(FC_BOOL_RET, CorSetMinThreads, DWORD workerThreads, DWORD completionPortThreads);
+ static FCDECL2(VOID, CorGetMinThreads, DWORD* workerThreads, DWORD* completionPortThreads);
+ static FCDECL2(VOID, CorGetAvailableThreads, DWORD* workerThreads, DWORD* completionPortThreads);
+
+ static FCDECL0(VOID, NotifyRequestProgress);
+ static FCDECL0(FC_BOOL_RET, NotifyRequestComplete);
+
+ static void QCALLTYPE InitializeVMTp(CLR_BOOL* pEnableWorkerTracking);
+
+ static FCDECL1(void, ReportThreadStatus, CLR_BOOL isWorking);
+
+ static FCDECL0(FC_BOOL_RET, IsThreadPoolHosted);
+
+
+ static FCDECL7(LPVOID, CorRegisterWaitForSingleObject,
+ Object* waitObjectUNSAFE,
+ Object* stateUNSAFE,
+ UINT32 timeout,
+ CLR_BOOL executeOnlyOnce,
+ Object* registeredWaitObjectUNSAFE,
+ StackCrawlMark* stackMark,
+ CLR_BOOL compressStack);
+
+ static BOOL QCALLTYPE RequestWorkerThread();
+
+ static FCDECL1(FC_BOOL_RET, CorPostQueuedCompletionStatus, LPOVERLAPPED lpOverlapped);
+ static FCDECL2(FC_BOOL_RET, CorUnregisterWait, LPVOID WaitHandle, Object * objectToNotify);
+ static FCDECL1(void, CorWaitHandleCleanupNative, LPVOID WaitHandle);
+ static FCDECL1(FC_BOOL_RET, CorBindIoCompletionCallback, HANDLE fileHandle);
+};
+
+class AppDomainTimerNative
+{
+public:
+ static HANDLE QCALLTYPE CreateAppDomainTimer(INT32 dueTime);
+ static BOOL QCALLTYPE ChangeAppDomainTimer(HANDLE hTimer, INT32 dueTime);
+ static BOOL QCALLTYPE DeleteAppDomainTimer(HANDLE hTimer);
+};
+
+void ResetThreadSecurityState(Thread* pThread);
+VOID QueueUserWorkItemManagedCallback(PVOID pArg);
+void WINAPI BindIoCompletionCallbackStub(DWORD ErrorCode,
+ DWORD numBytesTransferred,
+ LPOVERLAPPED lpOverlapped);
+void SetAsyncResultProperties(
+ OVERLAPPEDDATAREF overlapped,
+ DWORD dwErrorCode,
+ DWORD dwNumBytes);
+
+// this holder resets our thread's security state
+typedef Holder<Thread*, DoNothing<Thread*>, ResetThreadSecurityState> ThreadSecurityStateHolder;
+
+#endif
diff --git a/src/vm/comtoclrcall.cpp b/src/vm/comtoclrcall.cpp
new file mode 100644
index 0000000000..42c551e244
--- /dev/null
+++ b/src/vm/comtoclrcall.cpp
@@ -0,0 +1,2075 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+// ==++==
+//
+
+//
+// ==--==
+//
+// File: COMtoCLRCall.cpp
+//
+
+//
+// COM to CLR call support.
+//
+
+
+#include "common.h"
+
+#include "vars.hpp"
+#include "clrtypes.h"
+#include "stublink.h"
+#include "excep.h"
+#include "comtoclrcall.h"
+#include "cgensys.h"
+#include "method.hpp"
+#include "siginfo.hpp"
+#include "comcallablewrapper.h"
+#include "field.h"
+#include "security.h"
+#include "virtualcallstub.h"
+#include "dllimport.h"
+#include "mlinfo.h"
+#include "dbginterface.h"
+#include "mdaassistants.h"
+#include "sigbuilder.h"
+#include "notifyexternals.h"
+#include "comdelegate.h"
+#include "finalizerthread.h"
+
+#ifdef _DEBUG
+#define FORCEINLINE_NONDEBUG
+#else
+#define FORCEINLINE_NONDEBUG FORCEINLINE
+#endif
+
+#if !defined(DACCESS_COMPILE)
+
+#ifdef _TARGET_X86_
+static PCODE g_pGenericComCallStubFields = NULL;
+static PCODE g_pGenericComCallStub = NULL;
+#endif
+
+UINT64 FieldCallWorker(Thread *pThread, ComMethodFrame* pFrame);
+void FieldCallWorkerDebuggerWrapper(Thread *pThread, ComMethodFrame* pFrame);
+void FieldCallWorkerBody(Thread *pThread, ComMethodFrame* pFrame);
+extern "C" HRESULT STDCALL StubRareDisableHRWorker(Thread *pThread);
+
+#ifndef CROSSGEN_COMPILE
+//---------------------------------------------------------
+// void SetupGenericStubs()
+//
+// Throws on failure
+//---------------------------------------------------------
+static void SetupGenericStubs()
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef _TARGET_X86_
+ if ( (g_pGenericComCallStubFields != NULL) && (g_pGenericComCallStub != NULL))
+ return;
+
+ StubHolder<Stub> candidateCall, candidateFields;
+
+ // Build each one. If we get a collision on replacement, favor the one that's
+ // already there. (We have lifetime issues with these, because they are used
+ // in every VTable without refcounting, so we don't want them to change
+ // underneath us).
+
+ // Allocate all three before setting - if an error occurs, we'll free the
+ // memory via holder objects and throw.
+ candidateCall = ComCall::CreateGenericComCallStub(FALSE/*notField*/);
+ candidateFields = ComCall::CreateGenericComCallStub(TRUE/*Field*/);
+
+ if (InterlockedCompareExchangeT<PCODE>(&g_pGenericComCallStub, candidateCall->GetEntryPoint(), 0) == 0)
+ candidateCall.SuppressRelease();
+
+ if (InterlockedCompareExchangeT<PCODE>(&g_pGenericComCallStubFields, candidateFields->GetEntryPoint(), 0) == 0)
+ candidateFields.SuppressRelease();
+#endif // _TARGET_X86_
+}
+
+#ifdef PROFILING_SUPPORTED
+// The sole purpose of this helper is to transition into preemptive mode
+// and then call the profiler transition callbacks. We can't use the GCX_PREEMP
+// in a function with SEH (such as COMToCLRWorkerBody()).
+NOINLINE
+void ProfilerTransitionCallbackHelper(MethodDesc* pMD, Thread* pThread, COR_PRF_TRANSITION_REASON reason)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(CheckPointer(pThread));
+ PRECONDITION(CORProfilerTrackTransitions());
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP_THREAD_EXISTS(pThread);
+
+ if (reason == COR_PRF_TRANSITION_CALL)
+ {
+ ProfilerUnmanagedToManagedTransitionMD(pMD, COR_PRF_TRANSITION_CALL);
+ }
+ else
+ {
+ ProfilerManagedToUnmanagedTransitionMD(pMD, COR_PRF_TRANSITION_RETURN);
+ }
+}
+#endif // PROFILING_SUPPORTED
+
+// Disable when calling into managed code from a place that fails via HRESULT
+extern "C" HRESULT STDCALL StubRareDisableHRWorker(Thread *pThread)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ HRESULT hr = S_OK;
+
+ // Do not add a CONTRACT here. We haven't set up SEH. We rely
+ // on HandleThreadAbort dealing with this situation properly.
+
+ // @todo - We need to probe here, but can't introduce destructors etc.
+ BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
+
+
+ // WARNING!!!!
+ // when we start executing here, we are actually in cooperative mode. But we
+ // haven't synchronized with the barrier to reentry yet. So we are in a highly
+ // dangerous mode. If we call managed code, we will potentially be active in
+ // the GC heap, even as GC's are occuring!
+
+ // Check for ShutDown scenario. This happens only when we have initiated shutdown
+ // and someone is trying to call in after the CLR is suspended. In that case, we
+ // must either raise an unmanaged exception or return an HRESULT, depending on the
+ // expectations of our caller.
+ if (!CanRunManagedCode())
+ {
+ hr = E_PROCESS_SHUTDOWN_REENTRY;
+ }
+ else
+ {
+ // We must do the following in this order, because otherwise we would be constructing
+ // the exception for the abort without synchronizing with the GC. Also, we have no
+ // CLR SEH set up, despite the fact that we may throw a ThreadAbortException.
+ pThread->RareDisablePreemptiveGC();
+ EX_TRY
+ {
+ pThread->HandleThreadAbort();
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+
+ // should always be in coop mode here
+ _ASSERTE(pThread->PreemptiveGCDisabled());
+
+ END_CONTRACT_VIOLATION;
+
+ // Note that this code does not handle rare signatures that do not return HRESULT properly
+
+ return hr;
+}
+
+#ifdef _TARGET_X86_
+
+// defined in i386\asmhelpers.asm
+extern "C" ARG_SLOT __fastcall COMToCLRDispatchHelper(
+ INT_PTR dwArgECX,
+ INT_PTR dwArgEDX,
+ PCODE pTarget,
+ PCODE pSecretArg,
+ INT_PTR *pInputStack,
+ WORD wOutputStackSlots,
+ UINT16 *pOutputStackOffsets,
+ Frame *pCurFrame);
+
+
+inline static void InvokeStub(ComCallMethodDesc *pCMD, PCODE pManagedTarget, OBJECTREF orThis, ComMethodFrame *pFrame, Thread *pThread,
+ UINT64* pRetValOut)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ INT_PTR *pInputStack = (INT_PTR *)pFrame->GetPointerToArguments();
+ PCODE pStubEntryPoint = pCMD->GetILStub();
+
+ INT_PTR EDX = (pCMD->m_wSourceSlotEDX == (UINT16)-1 ? NULL : pInputStack[pCMD->m_wSourceSlotEDX]);
+
+ ARG_SLOT retVal = 0;
+
+ // Managed code is generally "THROWS" and we have no exception handler here that the contract system can
+ // see. We ensure that we don't get exceptions here by generating a try/catch in the IL stub that covers
+ // any possible throw points, including all calls within the stub to helpers.
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonILStubWillNotThrow);
+
+ //
+ // NOTE! We do not use BEGIN_CALL_TO_MANAGEDEX around this call because we stayed in the SO_TOLERANT
+ // mode and COMToCLRDispatchHelper is responsible for pushing/popping the CPFH into the FS:0 chain.
+ //
+
+ *pRetValOut = COMToCLRDispatchHelper(
+ *((INT_PTR *) &orThis), // pArgECX
+ EDX, // pArgEDX
+ pStubEntryPoint, // pTarget
+ pManagedTarget, // pSecretArg
+ pInputStack, // pInputStack
+ pCMD->m_wStubStackSlotCount, // wOutputStackSlots
+ pCMD->m_pwStubStackSlotOffsets, // pOutputStackOffsets
+ pThread->GetFrame()); // pCurFrame
+}
+
+#else // _TARGET_X86_
+
+// defined in amd64\GenericComCallStubs.asm
+extern "C" ARG_SLOT COMToCLRDispatchHelper(
+ DWORD dwStackSlots,
+ ComMethodFrame *pFrame,
+ PCODE pTarget,
+ PCODE pR10,
+ INT_PTR pDangerousThis);
+
+
+inline static void InvokeStub(ComCallMethodDesc *pCMD, PCODE pManagedTarget, OBJECTREF orThis, ComMethodFrame *pFrame, Thread *pThread,
+ UINT64* pRetValOut)
+{
+ WRAPPER_NO_CONTRACT;
+
+ ARG_SLOT retVal = 0;
+ PCODE pStubEntryPoint = pCMD->GetILStub();
+
+ INT_PTR dangerousThis;
+ *(OBJECTREF *)&dangerousThis = orThis;
+
+ DWORD dwStackSlots = pCMD->GetNumStackBytes() / STACK_ELEM_SIZE;
+
+ // Managed code is generally "THROWS" and we have no exception handler here that the contract system can
+ // see. We ensure that we don't get exceptions here by generating a try/catch in the IL stub that covers
+ // any possible throw points, including all calls within the stub to helpers.
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonILStubWillNotThrow);
+
+ //
+ // NOTE! We do not use BEGIN_CALL_TO_MANAGEDEX around this call because we stayed in the SO_TOLERANT
+ // mode and we have no need to push/pop FS:0 on non-x86 Windows platforms.
+ //
+
+ *pRetValOut = COMToCLRDispatchHelper(
+ dwStackSlots, // dwStackSlots
+ pFrame, // pFrame
+ pStubEntryPoint, // pTarget
+ pManagedTarget, // pSecretArg
+ dangerousThis); // pDangerousThis
+}
+
+#endif // _TARGET_X86_
+
+NOINLINE
+void InvokeStub_Hosted(ComCallMethodDesc *pCMD, PCODE pManagedTarget, OBJECTREF orThis, ComMethodFrame *pFrame, Thread *pThread,
+ UINT64* pRetValOut)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(CLRTaskHosted());
+
+ ReverseEnterRuntimeHolderNoThrow REHolder;
+ HRESULT hr = REHolder.AcquireNoThrow();
+ if (FAILED(hr))
+ {
+ *pRetValOut = hr;
+ return;
+ }
+
+ InvokeStub(pCMD, pManagedTarget, orThis, pFrame, pThread, pRetValOut);
+}
+
+#if defined(_MSC_VER) && !defined(_DEBUG)
+#pragma optimize("t", on) // optimize for speed
+#endif
+
+OBJECTREF COMToCLRGetObjectAndTarget_Delegate(ComCallWrapper * pWrap, PCODE * ppManagedTargetOut)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ DELEGATEREF pDelObj = (DELEGATEREF)pWrap->GetObjectRef();
+ _ASSERTE(pDelObj->GetMethodTable()->IsDelegate());
+
+ // We don't have to go through the Invoke slot because we know what the delegate
+ // target is. This is the same optimization that reverse P/Invoke stubs do.
+ *ppManagedTargetOut = (PCODE)pDelObj->GetMethodPtr();
+ return pDelObj->GetTarget();
+}
+
+// returns true on success, false otherwise
+NOINLINE // keep the EH tax out of our caller
+bool COMToCLRGetObjectAndTarget_WinRTCtor(Thread * pThread, MethodDesc * pRealMD, ComCallMethodDesc * pCMD, PCODE * ppManagedTargetOut,
+ OBJECTREF* pObjectOut, UINT64* pRetValOut)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // Ctor is not virtual and operates on a newly created object.
+ _ASSERTE(!pCMD->IsVirtual());
+
+ *pObjectOut = NULL;
+ *ppManagedTargetOut = pRealMD->GetSingleCallableAddrOfCode();
+ MethodTable *pMT = pRealMD->GetMethodTable();
+
+ // We should not see a unsealed class here
+ _ASSERTE(pMT->IsSealed());
+
+ // we know for sure that we are allocating a new object
+
+ // @TODO: move this object allocation into the IL stub to avoid the try/catch and SO-intolerant region.
+
+ bool fSuccess = true;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, { *pRetValOut = COR_E_STACKOVERFLOW; return false; } );
+
+ EX_TRY
+ {
+ *pObjectOut = AllocateObject(pMT);
+ }
+ EX_CATCH
+ {
+ fSuccess = false;
+ *pRetValOut = SetupErrorInfo(GET_THROWABLE());
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ END_SO_INTOLERANT_CODE;
+
+ return fSuccess;
+}
+
+FORCEINLINE_NONDEBUG
+OBJECTREF COMToCLRGetObjectAndTarget_Virtual(ComCallWrapper * pWrap, MethodDesc * pRealMD, ComCallMethodDesc * pCMD, PCODE * ppManagedTargetOut)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF pObject = pWrap->GetObjectRef();
+
+ MethodTable *pMT = pObject->GetMethodTable();
+
+ if (pMT->IsTransparentProxy() || pRealMD->IsInterface())
+ {
+ // For transparent proxies, we need to call on the interface method desc if
+ // this method represents an interface method and not an IClassX method.
+ *ppManagedTargetOut = pCMD->GetCallMethodDesc()->GetSingleCallableAddrOfCode();
+ }
+ else if (pWrap->IsAggregated() && pWrap->GetComCallWrapperTemplate()->GetClassType().IsExportedToWinRT())
+ {
+ // we know the slot number for this method desc, grab the actual
+ // address from the vtable for this slot. The slot number should
+ // remain the same through out the heirarchy.
+ //
+ // This is the WinRT inheritance case where we want to always call the method as
+ // most recently implemented in the managed world.
+ *ppManagedTargetOut = pWrap->GetComCallWrapperTemplate()->GetClassType().GetMethodTable()->GetSlot(pCMD->GetSlot());
+ }
+ else
+ {
+ // we know the slot number for this method desc, grab the actual
+ // address from the vtable for this slot. The slot number should
+ // remain the same through out the heirarchy.
+ *ppManagedTargetOut = pMT->GetSlotForVirtual(pCMD->GetSlot());
+ }
+ return pObject;
+}
+
+FORCEINLINE_NONDEBUG
+OBJECTREF COMToCLRGetObjectAndTarget_NonVirtual(ComCallWrapper * pWrap, MethodDesc * pRealMD, ComCallMethodDesc * pCMD, PCODE * ppManagedTargetOut)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ //NOTE: No need to optimize for stub dispatch since non-virtuals are retrieved quickly.
+ *ppManagedTargetOut = pRealMD->GetSingleCallableAddrOfCode();
+
+ return pWrap->GetObjectRef();
+}
+
+FORCEINLINE_NONDEBUG
+void COMToCLRInvokeTarget(PCODE pManagedTarget, OBJECTREF pObject, ComCallMethodDesc * pCMD,
+ ComMethodFrame * pFrame, Thread * pThread, UINT64* pRetValOut)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifdef DEBUGGING_SUPPORTED
+ if (CORDebuggerTraceCall())
+ {
+ g_pDebugInterface->TraceCall((const BYTE *)pManagedTarget);
+ }
+#endif // DEBUGGING_SUPPORTED
+
+
+ if (CLRTaskHosted())
+ {
+ InvokeStub_Hosted(pCMD, pManagedTarget, pObject, pFrame, pThread, pRetValOut);
+ }
+ else
+ {
+ InvokeStub(pCMD, pManagedTarget, pObject, pFrame, pThread, pRetValOut);
+ }
+}
+
+bool COMToCLRWorkerBody_SecurityCheck(ComCallMethodDesc * pCMD, MethodDesc * pMD, Thread * pThread, UINT64 * pRetValOut)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ bool result = true;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, { *pRetValOut = COR_E_STACKOVERFLOW; return false; } );
+
+ EX_TRY
+ {
+
+ // Need to check for the presence of a security link demand on the target
+ // method. If we're hosted inside of an app domain with security, we perform
+ // the link demand against that app domain's grant set.
+ Security::CheckLinkDemandAgainstAppDomain(pMD);
+
+ if (pCMD->IsEarlyBoundUnsafe())
+ COMPlusThrow(kSecurityException);
+
+ }
+ EX_CATCH
+ {
+ *pRetValOut = SetupErrorInfo(GET_THROWABLE());
+ result = false;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ END_SO_INTOLERANT_CODE;
+
+ return result;
+}
+
+NOINLINE
+void COMToCLRWorkerBody_Rare(Thread * pThread, ComMethodFrame * pFrame, ComCallWrapper * pWrap,
+ MethodDesc * pRealMD, ComCallMethodDesc * pCMD, DWORD maskedFlags,
+ UINT64 * pRetValOut)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ PCODE pManagedTarget;
+ OBJECTREF pObject;
+
+ int fpReturnSize = 0;
+ if (maskedFlags & enum_NeedsSecurityCheck)
+ {
+ if (!COMToCLRWorkerBody_SecurityCheck(pCMD, pRealMD, pThread, pRetValOut))
+ return;
+ }
+ if (maskedFlags & enum_NativeR8Retval)
+ fpReturnSize = 8;
+ if (maskedFlags & enum_NativeR4Retval)
+ fpReturnSize = 4;
+
+ maskedFlags &= ~(enum_NeedsSecurityCheck|enum_NativeR4Retval|enum_NativeR8Retval);
+
+ CONSISTENCY_CHECK(maskedFlags != ( enum_IsWinRTCtor|enum_IsVirtual));
+ CONSISTENCY_CHECK(maskedFlags != (enum_IsDelegateInvoke|enum_IsWinRTCtor|enum_IsVirtual));
+ CONSISTENCY_CHECK(maskedFlags != (enum_IsDelegateInvoke|enum_IsWinRTCtor ));
+ switch (maskedFlags)
+ {
+ case enum_IsDelegateInvoke|enum_IsVirtual:
+ case enum_IsDelegateInvoke: pObject = COMToCLRGetObjectAndTarget_Delegate(pWrap, &pManagedTarget); break;
+ case enum_IsVirtual: pObject = COMToCLRGetObjectAndTarget_Virtual(pWrap, pRealMD, pCMD, &pManagedTarget); break;
+ case 0: pObject = COMToCLRGetObjectAndTarget_NonVirtual(pWrap, pRealMD, pCMD, &pManagedTarget); break;
+ case enum_IsWinRTCtor:
+ if (!COMToCLRGetObjectAndTarget_WinRTCtor(pThread, pRealMD, pCMD, &pManagedTarget, &pObject, pRetValOut))
+ return;
+ break;
+ default: UNREACHABLE();
+ }
+
+ COMToCLRInvokeTarget(pManagedTarget, pObject, pCMD, pFrame, pThread, pRetValOut);
+
+ if (fpReturnSize != 0)
+ getFPReturn(fpReturnSize, (INT64*)pRetValOut);
+
+#if defined(PROFILING_SUPPORTED)
+ // Notify the profiler of the return out of the runtime.
+ if (CORProfilerTrackTransitions())
+ {
+ ProfilerTransitionCallbackHelper(pRealMD, pThread, COR_PRF_TRANSITION_RETURN);
+ }
+#endif // PROFILING_SUPPORTED
+
+ return;
+}
+
+
+// This is the factored out body of COMToCLRWorker.
+FORCEINLINE_NONDEBUG
+void COMToCLRWorkerBody(
+ Thread * pThread,
+ ComMethodFrame * pFrame,
+ ComCallWrapper * pWrap,
+ UINT64 * pRetValOut)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ ComCallMethodDesc* pCMD = pFrame->GetComCallMethodDesc();
+ MethodDesc *pRealMD = pCMD->GetMethodDesc();
+
+#if defined(PROFILING_SUPPORTED)
+ // @TODO: PERF: x86: we are making profiler callbacks in the StubLinker stub as well as here.
+ // The checks for these callbacks add about 5% to the path length, so we should remove these
+ // callbacks in the next SxS release because they are redundant.
+ //
+ // Notify the profiler of the call into the runtime.
+ // 32-bit does this callback in the stubs before calling into COMToCLRWorker().
+ BOOL fNotifyProfiler = CORProfilerTrackTransitions();
+ if (fNotifyProfiler)
+ {
+ ProfilerTransitionCallbackHelper(pRealMD, pThread, COR_PRF_TRANSITION_CALL);
+ }
+#endif // PROFILING_SUPPORTED
+
+ LOG((LF_STUBS, LL_INFO1000000, "Calling COMToCLRWorker %s::%s \n", pRealMD->m_pszDebugClassName, pRealMD->m_pszDebugMethodName));
+
+ //
+ // In order to find the managed target code address and target object, we need to know
+ // what scenario we're in. We do this by switching on the flags of interest. We include
+ // the NeedsSecurityCheck flag in the calculation even though it's really orthogonal so
+ // that the faster case--where no security check is needed--can be matched immediately.
+ //
+ PCODE pManagedTarget;
+ OBJECTREF pObject;
+
+ DWORD mask = (
+ enum_NeedsSecurityCheck |
+ enum_IsDelegateInvoke |
+ enum_IsWinRTCtor |
+ enum_IsVirtual |
+ enum_NativeR4Retval |
+ enum_NativeR8Retval);
+ DWORD maskedFlags = pCMD->GetFlags() & mask;
+
+ CONSISTENCY_CHECK(maskedFlags != ( enum_IsWinRTCtor|enum_IsVirtual));
+ CONSISTENCY_CHECK(maskedFlags != (enum_IsDelegateInvoke|enum_IsWinRTCtor|enum_IsVirtual));
+ CONSISTENCY_CHECK(maskedFlags != (enum_IsDelegateInvoke|enum_IsWinRTCtor ));
+ switch (maskedFlags)
+ {
+ case enum_IsDelegateInvoke|enum_IsVirtual:
+ case enum_IsDelegateInvoke: pObject = COMToCLRGetObjectAndTarget_Delegate(pWrap, &pManagedTarget); break;
+ case enum_IsVirtual: pObject = COMToCLRGetObjectAndTarget_Virtual(pWrap, pRealMD, pCMD, &pManagedTarget); break;
+ case 0: pObject = COMToCLRGetObjectAndTarget_NonVirtual(pWrap, pRealMD, pCMD, &pManagedTarget); break;
+ case enum_IsWinRTCtor:
+ if (!COMToCLRGetObjectAndTarget_WinRTCtor(pThread, pRealMD, pCMD, &pManagedTarget, &pObject, pRetValOut))
+ return;
+ break;
+ default:
+ COMToCLRWorkerBody_Rare(pThread, pFrame, pWrap, pRealMD, pCMD, maskedFlags, pRetValOut);
+ return;
+ }
+
+ COMToCLRInvokeTarget(pManagedTarget, pObject, pCMD, pFrame, pThread, pRetValOut);
+
+#if defined(PROFILING_SUPPORTED)
+ // Notify the profiler of the return out of the runtime.
+ if (fNotifyProfiler)
+ {
+ ProfilerTransitionCallbackHelper(pRealMD, pThread, COR_PRF_TRANSITION_RETURN);
+ }
+#endif // PROFILING_SUPPORTED
+
+ return;
+}
+
+void COMToCLRWorkerBody_SOIntolerant(Thread * pThread, ComMethodFrame * pFrame, ComCallWrapper * pWrap, UINT64 * pRetValOut)
+{
+ STATIC_CONTRACT_THROWS; // THROWS due to END_SO_TOLERANT_CODE
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+ BEGIN_SO_TOLERANT_CODE(pThread);
+
+ COMToCLRWorkerBody(pThread, pFrame, pWrap, pRetValOut);
+
+ END_SO_TOLERANT_CODE;
+}
+
+#ifdef _TARGET_X86_
+// On x86, we do not want the non-AD-transition path to push an extra FS:0 handler just to
+// pop off the ComMethodFrame. On non-x86, we have a personality routine that does this
+// (ReverseComUnwindFrameChainHandler), but on x86 we will latch onto the typical CPFH by
+// pushing COMPlusFrameHandlerRevCom as the FS:0 handler instead of COMPlusFrameHandler.
+// COMPlusFrameHandlerRevCom will look at the Frame chain from the current Frame up to
+// the ComMethodFrame and, if it finds a ContextTransitionFrame, it will do nothing.
+// Otherwise, it will unwind the Frame chain up to the ComMethodFrame. So here we latch
+// onto the AD transition rethrow as the point at which to unwind the Frame chain up to
+// the ComMethodFrame.
+#define REVERSE_COM_RETHROW_HOOK(pFrame) { ComMethodFrame::DoSecondPassHandlerCleanup(pFrame); }
+#else
+#define REVERSE_COM_RETHROW_HOOK(pFrame) NULL
+#endif // _TARGET_X86_
+
+NOINLINE
+void COMToCLRWorkerBodyWithADTransition(
+ Thread * pThread,
+ ComMethodFrame * pFrame,
+ ComCallWrapper * pWrap,
+ UINT64 * pRetValOut)
+{
+ CONTRACTL
+ {
+ NOTHROW; // Although CSE can be thrown
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ BOOL fEnteredDomain = FALSE;
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, { *pRetValOut = COR_E_STACKOVERFLOW; return; } );
+ EX_TRY
+ {
+ bool fNeedToTranslateTAEtoADUE = false;
+ ADID pTgtDomain = pWrap->GetDomainID();
+ ENTER_DOMAIN_ID(pTgtDomain)
+ {
+ fEnteredDomain = TRUE;
+ COMToCLRWorkerBody_SOIntolerant(pThread, pFrame, pWrap, pRetValOut);
+
+ //
+ // Below is some logic adapted from Thread::RaiseCrossContextExceptionHelper, which we now
+ // bypass because the IL stub is catching the ThreadAbortException instead of a proper domain
+ // transition, where the logic typically resides. This code applies some policy to transform
+ // the ThreadAbortException into an AppDomainUnloadedException and sets up the HRESULT and
+ // IErrorInfo accordingly.
+ //
+
+ // If the IL stub caught a TAE...
+ if (COR_E_THREADABORTED == ((HRESULT)*pRetValOut))
+ {
+ // ...first, make sure it was actually an HRESULT return value...
+ ComCallMethodDesc* pCMD = pFrame->GetComCallMethodDesc();
+ if (pCMD->IsNativeHResultRetVal())
+ {
+ // There may be multiple AD transitions on the stack so the current unload boundary may
+ // not be the transition frame that was set up to make our AD switch. Detect that by
+ // comparing the unload boundary's Next with our ComMethodFrame and proceed to translate
+ // the exception to ADUE only if they match. Otherwise the exception should stay as TAE.
+
+ Frame* pUnloadBoundary = pThread->GetUnloadBoundaryFrame();
+ // ...and we are at an unload boundary with a pending unload...
+ if ( ( pUnloadBoundary != NULL
+ && (pUnloadBoundary->Next() == pFrame
+ && pThread->ShouldChangeAbortToUnload(pUnloadBoundary, pUnloadBoundary))
+ )
+ // ... or we don't have an unload boundary, but we're otherwise unloading
+ // this domain from another thread (and we aren't the finalizer)...
+ || ( (NULL == pUnloadBoundary)
+ && (pThread->GetDomain() == SystemDomain::AppDomainBeingUnloaded())
+ && (pThread != SystemDomain::System()->GetUnloadingThread())
+ && (pThread != FinalizerThread::GetFinalizerThread())
+ )
+ )
+ {
+ // ... we take note and then create an ADUE in the domain we're returning to.
+ fNeedToTranslateTAEtoADUE = true;
+ }
+ }
+ }
+ }
+ END_DOMAIN_TRANSITION;
+
+ if (fNeedToTranslateTAEtoADUE)
+ {
+ EEResourceException ex(kAppDomainUnloadedException, W("Remoting_AppDomainUnloaded_ThreadUnwound"));
+ OBJECTREF oEx = CLRException::GetThrowableFromException(&ex);
+ *pRetValOut = SetupErrorInfo(oEx, pFrame->GetComCallMethodDesc());
+ }
+ }
+ EX_CATCH
+ {
+ *pRetValOut = SetupErrorInfo(GET_THROWABLE(), pFrame->GetComCallMethodDesc());
+ }
+ EX_END_CATCH(
+ RethrowCorruptingExceptionsExAndHookRethrow(
+ // If it was thrown at us from the IL stub (which will evaluate the CE policy), then we must
+ // rethrow it here. But we should swallow exceptions generated by our domain transition.
+ fEnteredDomain,
+ REVERSE_COM_RETHROW_HOOK(pThread->GetFrame())
+ ));
+
+ END_SO_INTOLERANT_CODE;
+}
+
+
+//------------------------------------------------------------------
+// UINT64 __stdcall COMToCLRWorker(Thread *pThread,
+// ComMethodFrame* pFrame)
+//------------------------------------------------------------------
+extern "C" UINT64 __stdcall COMToCLRWorker(Thread *pThread, ComMethodFrame* pFrame)
+{
+ CONTRACTL
+ {
+ NOTHROW; // Although CSE can be thrown
+ GC_TRIGGERS;
+#if defined(_TARGET_X86_)
+ MODE_COOPERATIVE; // X86 sets up COOP in stublinker-generated stub
+#else
+ // This contract is disabled because user code can illegally reenter here through no fault of the
+ // CLR (i.e. it's a user code bug), so we shouldn't be popping ASSERT dialogs in those cases. Note
+ // that this reentrancy check is already done by the stublinker-generated stub on x86, so it's OK
+ // to leave the MODE_ contract enabled on x86.
+ DISABLED(MODE_PREEMPTIVE);
+#endif
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pFrame));
+ PRECONDITION(CheckPointer(pThread, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ UINT64 retVal = 0;
+
+ ComCallMethodDesc* pCMD = pFrame->GetComCallMethodDesc();
+
+#if !defined(_TARGET_X86_)
+ //
+ // The following code is a transcription of the code that is generated by CreateGenericComCallStub. The
+ // idea is that we needn't really do this work either in static assembly code nor in dynamically
+ // generated code since the benefit/cost ratio is low. There are some minor differences in the below
+ // code, compared to x86. First, the reentrancy and loader lock checks are optionally compiled into the
+ // stub on x86, depending on whether or not the corresponding MDAs are active at stub-generation time.
+ // We must check each time at runtime here because we're using static code.
+ //
+ HRESULT hr = S_OK;
+
+ pThread = GetThread();
+ if (NULL == pThread)
+ {
+ pThread = SetupThreadNoThrow();
+ if (pThread == NULL)
+ {
+ hr = E_OUTOFMEMORY;
+ goto ErrorExit;
+ }
+ }
+
+ // Check for an illegal coop->coop transition. We may fire the Reentrancy MDA as a result.
+ if (pThread->PreemptiveGCDisabled())
+ HasIllegalReentrancy();
+
+ // Attempt to switch GC modes. Note that this is performed manually just like in the x86 stub because
+ // we have additional checks for shutdown races, MDAs, and thread abort that are performed only when
+ // g_TrapReturningThreads is set.
+ pThread->m_fPreemptiveGCDisabled.StoreWithoutBarrier(1);
+ if (g_TrapReturningThreads.LoadWithoutBarrier())
+ {
+ hr = StubRareDisableHRWorker(pThread);
+ if (S_OK != hr)
+ goto ErrorExit;
+ }
+
+#ifdef MDA_SUPPORTED
+ // Check for and trigger the LoaderLock MDA
+ if (ShouldCheckLoaderLock())
+ {
+ BOOL IsHeld;
+ if (AuxUlibIsDLLSynchronizationHeld(&IsHeld) && IsHeld)
+ {
+ MDA_TRIGGER_ASSISTANT(LoaderLock, ReportViolation(0));
+ }
+ }
+#endif // MDA_SUPPORTED
+
+ // Initialize the frame's VPTR and GS cookie.
+ *((TADDR*)pFrame) = ComMethodFrame::GetMethodFrameVPtr();
+ *pFrame->GetGSCookiePtr() = GetProcessGSCookie();
+ // Link frame into the chain.
+ pFrame->Push(pThread);
+
+#endif // !_TARGET_X86_
+
+ _ASSERTE(pThread);
+
+ // At this point we should be in preemptive GC mode (regardless of if it happened
+ // in the stub or in the worker).
+ _ASSERTE(pThread->PreemptiveGCDisabled());
+
+ {
+#ifndef _TARGET_X86_
+ if (pCMD->IsFieldCall())
+ {
+ retVal = FieldCallWorker(pThread, pFrame);
+ }
+ else
+#endif // !_TARGET_X86_
+ {
+ IUnknown **pip = (IUnknown **)pFrame->GetPointerToArguments();
+ IUnknown *pUnk = (IUnknown *)*pip;
+ _ASSERTE(pUnk != NULL);
+
+ // Obtain the managed 'this' for the call
+ ComCallWrapper *pWrap = ComCallWrapper::GetWrapperFromIP(pUnk);
+ _ASSERTE(pWrap != NULL);
+ if (pWrap->NeedToSwitchDomains(pThread))
+ {
+ COMToCLRWorkerBodyWithADTransition(pThread, pFrame, pWrap, &retVal);
+ }
+ else
+ {
+ // This is the common case that needs to be fast: we are in the right domain and
+ // all we have to do is marshal the parameters and deliver the call.
+ COMToCLRWorkerBody(pThread, pFrame, pWrap, &retVal);
+ }
+ }
+ }
+
+#ifndef _TARGET_X86_
+ // Note: the EH subsystem will handle reseting the frame chain and setting
+ // the correct GC mode on exception.
+ pFrame->Pop(pThread);
+ pThread->EnablePreemptiveGC();
+#endif
+
+ LOG((LF_STUBS, LL_INFO1000000, "COMToCLRWorker leave\n"));
+
+ // The call was successfull. If the native return type is a floating point
+ // value, then we need to set the floating point registers appropriately.
+ if (pCMD->IsNativeFloatingPointRetVal()) // single check skips both cases
+ {
+ if (pCMD->IsNativeR4RetVal())
+ setFPReturn(4, retVal);
+ else
+ setFPReturn(8, retVal);
+ }
+ return retVal;
+
+#ifndef _TARGET_X86_
+ErrorExit:
+ if (pThread->PreemptiveGCDisabled())
+ pThread->EnablePreemptiveGC();
+
+ // The call failed so we need to report an error to the caller.
+ if (pCMD->IsNativeHResultRetVal())
+ {
+ _ASSERTE(FAILED(hr));
+ retVal = hr;
+ }
+ else if (pCMD->IsNativeBoolRetVal())
+ retVal = 0;
+ else if (pCMD->IsNativeR4RetVal())
+ setFPReturn(4, CLR_NAN_32);
+ else if (pCMD->IsNativeR8RetVal())
+ setFPReturn(8, CLR_NAN_64);
+ else
+ _ASSERTE(pCMD->IsNativeVoidRetVal());
+ return retVal;
+#endif // _TARGET_X86_
+}
+
+#if defined(_MSC_VER) && !defined(_DEBUG)
+#pragma optimize("", on) // restore settings
+#endif
+
+
+static UINT64 __stdcall FieldCallWorker(Thread *pThread, ComMethodFrame* pFrame)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ ENTRY_POINT;
+ PRECONDITION(CheckPointer(pThread));
+ PRECONDITION(CheckPointer(pFrame));
+ }
+ CONTRACTL_END;
+
+
+#ifdef MDA_SUPPORTED
+ MDA_TRIGGER_ASSISTANT(GcUnmanagedToManaged, TriggerGC());
+#endif
+
+ LOG((LF_STUBS, LL_INFO1000000, "FieldCallWorker enter\n"));
+
+ HRESULT hrRetVal = S_OK;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, return COR_E_STACKOVERFLOW);
+ // BEGIN_ENTRYPOINT_NOTHROW_WITH_THREAD(pThread);
+
+ IUnknown** pip = (IUnknown **)pFrame->GetPointerToArguments();
+ IUnknown* pUnk = (IUnknown *)*pip;
+ _ASSERTE(pUnk != NULL);
+
+ ComCallWrapper* pWrap = ComCallWrapper::GetWrapperFromIP(pUnk);
+ _ASSERTE(pWrap != NULL);
+
+ GCX_ASSERT_COOP();
+ OBJECTREF pThrowable = NULL;
+ GCPROTECT_BEGIN(pThrowable);
+ {
+ if (!pWrap->NeedToSwitchDomains(pThread))
+ {
+ // This is the common case that needs to be fast: we are in the right domain and
+ // all we have to do is marshal the parameters and deliver the call. We still have to
+ // set up an EX_TRY/EX_CATCH to transform any exceptions that were thrown into
+ // HRESULTs.
+ EX_TRY
+ {
+ FieldCallWorkerDebuggerWrapper(pThread, pFrame);
+ }
+ EX_CATCH
+ {
+ pThrowable = GET_THROWABLE();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (pThrowable != NULL)
+ {
+ // Transform the exception into an HRESULT. This also sets up
+ // an IErrorInfo on the current thread for the exception.
+ hrRetVal = SetupErrorInfo(pThrowable, pFrame->GetComCallMethodDesc());
+ pThrowable = NULL;
+ }
+ }
+ else
+ {
+ ADID pTgtDomain = pWrap->GetDomainID();
+ if (!pTgtDomain.m_dwId)
+ {
+ hrRetVal = COR_E_APPDOMAINUNLOADED;
+ }
+ else
+ {
+ // We need a try/catch around the code to enter the domain since entering
+ // an AppDomain can throw an exception.
+ EX_TRY
+ {
+ ENTER_DOMAIN_ID(pTgtDomain)
+ {
+ // Set up a new GC protection frame for any exceptions thrown inside the AppDomain. Do
+ // this so we can be sure we don't leak an AppDomain-specific object outside the
+ // lifetime of the AppDomain (which can happen if an AppDomain unload causes us to
+ // unwind out via a ThreadAbortException).
+ OBJECTREF pAppDomainThrowable = NULL;
+ GCPROTECT_BEGIN(pAppDomainThrowable);
+ {
+ // We need a try/catch around the call to the worker since we need
+ // to transform any exceptions into HRESULTs. We want to do this
+ // inside the AppDomain of the CCW.
+ EX_TRY
+ {
+ FieldCallWorkerDebuggerWrapper(pThread, pFrame);
+ }
+ EX_CATCH
+ {
+ pAppDomainThrowable = GET_THROWABLE();
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ if (pAppDomainThrowable != NULL)
+ {
+ // Transform the exception into an HRESULT. This also sets up
+ // an IErrorInfo on the current thread for the exception.
+ hrRetVal = SetupErrorInfo(pAppDomainThrowable, pFrame->GetComCallMethodDesc());
+ pAppDomainThrowable = NULL;
+ }
+ }
+ GCPROTECT_END();
+ }
+ END_DOMAIN_TRANSITION;
+ }
+ EX_CATCH
+ {
+ // Transform the exception into an HRESULT. This also sets up
+ // an IErrorInfo on the current thread for the exception.
+ pThrowable = GET_THROWABLE();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (pThrowable != NULL)
+ {
+ // Transform the exception into an HRESULT. This also sets up
+ // an IErrorInfo on the current thread for the exception.
+ hrRetVal = SetupErrorInfo(pThrowable, pFrame->GetComCallMethodDesc());
+ pThrowable = NULL;
+ }
+ }
+ }
+ }
+
+ GCPROTECT_END();
+
+#ifdef MDA_SUPPORTED
+ MDA_TRIGGER_ASSISTANT(GcManagedToUnmanaged, TriggerGC());
+#endif
+
+ LOG((LF_STUBS, LL_INFO1000000, "FieldCallWorker leave\n"));
+
+ END_SO_INTOLERANT_CODE;
+ //END_ENTRYPOINT_NOTHROW_WITH_THREAD;
+
+ return hrRetVal;
+}
+
+static void FieldCallWorkerDebuggerWrapper(Thread *pThread, ComMethodFrame* pFrame)
+{
+ // Use static contracts b/c we have SEH.
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+
+ struct Param : public NotifyOfCHFFilterWrapperParam {
+ Thread* pThread;
+ } param;
+ param.pFrame = pFrame;
+ param.pThread = pThread;
+
+ // @todo - we have a PAL_TRY/PAL_EXCEPT here as a general (cross-platform) way to get a 1st-pass
+ // filter. If that's bad perf, we could inline an FS:0 handler for x86-only; and then inline
+ // both this wrapper and the main body.
+ PAL_TRY(Param *, pParam, &param)
+ {
+ FieldCallWorkerBody(pParam->pThread, (ComMethodFrame*)pParam->pFrame);
+ }
+ PAL_EXCEPT_FILTER(NotifyOfCHFFilterWrapper)
+ {
+ // Should never reach here b/c handler should always continue search.
+ _ASSERTE(false);
+ }
+ PAL_ENDTRY
+}
+
+static void FieldCallWorkerBody(Thread *pThread, ComMethodFrame* pFrame)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY; // Dependant on machine type (X86 sets COOP in stub)
+ PRECONDITION(CheckPointer(pThread));
+ PRECONDITION(CheckPointer(pFrame));
+ }
+ CONTRACTL_END;
+
+ ReverseEnterRuntimeHolder REHolder(TRUE);
+
+ IUnknown** pip = (IUnknown **)pFrame->GetPointerToArguments();
+ IUnknown* pUnk = (IUnknown *)*pip;
+ _ASSERTE(pUnk != NULL);
+
+ ComCallWrapper* pWrap = ComCallWrapper::GetWrapperFromIP(pUnk);
+ _ASSERTE(pWrap != NULL);
+
+ ComCallMethodDesc *pCMD = pFrame->GetComCallMethodDesc();
+ _ASSERTE(pCMD->IsFieldCall());
+ _ASSERTE(pCMD->IsNativeHResultRetVal());
+
+#ifdef PROFILING_SUPPORTED
+ // Notify the profiler of the call into the runtime.
+ // 32-bit does this callback in the stubs before calling into FieldCallWorker().
+ if (CORProfilerTrackTransitions())
+ {
+ MethodDesc* pMD = pCMD->GetMethodDesc();
+ ProfilerTransitionCallbackHelper(pMD, pThread, COR_PRF_TRANSITION_CALL);
+ }
+#endif // PROFILING_SUPPORTED
+
+ if (pCMD->IsEarlyBoundUnsafe())
+ {
+ COMPlusThrow(kSecurityException);
+ }
+
+ UINT64 retVal;
+ InvokeStub(pCMD, NULL, pWrap->GetObjectRef(), pFrame, pThread, &retVal);
+
+#ifdef PROFILING_SUPPORTED
+ // Notify the profiler of the return out of the runtime.
+ if (CORProfilerTrackTransitions())
+ {
+ MethodDesc* pMD = pCMD->GetMethodDesc();
+ ProfilerTransitionCallbackHelper(pMD, pThread, COR_PRF_TRANSITION_RETURN);
+ }
+#endif // PROFILING_SUPPORTED
+}
+
+//---------------------------------------------------------
+PCODE ComCallMethodDesc::CreateCOMToCLRStub(DWORD dwStubFlags, MethodDesc **ppStubMD)
+{
+ CONTRACT(PCODE)
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(ppStubMD));
+ POSTCONDITION(CheckPointer(*ppStubMD));
+ POSTCONDITION(RETVAL != NULL);
+ }
+ CONTRACT_END;
+
+ MethodDesc * pStubMD;
+
+ if (IsFieldCall())
+ {
+ FieldDesc *pFD = GetFieldDesc();
+ pStubMD = ComCall::GetILStubMethodDesc(pFD, dwStubFlags);
+ }
+ else
+ {
+ // if this represents a ctor or static, use the class method (i.e. the actual ctor or static)
+ MethodDesc *pMD = ((IsWinRTCtor() || IsWinRTStatic()) ? GetMethodDesc() : GetCallMethodDesc());
+
+ // first see if we have an NGENed stub
+ pStubMD = GetStubMethodDescFromInteropMethodDesc(pMD, dwStubFlags);
+ if (pStubMD != NULL)
+ {
+ pStubMD = RestoreNGENedStub(pStubMD);
+ }
+ if (pStubMD == NULL)
+ {
+ // no NGENed stub - create a new one
+ pStubMD = ComCall::GetILStubMethodDesc(pMD, dwStubFlags);
+ }
+ }
+
+ *ppStubMD = pStubMD;
+
+#ifdef _TARGET_X86_
+ // make sure our native stack computation in code:ComCallMethodDesc.InitNativeInfo is right
+ _ASSERTE(HasMarshalError() || !pStubMD->IsILStub() || pStubMD->AsDynamicMethodDesc()->GetNativeStackArgSize() == m_StackBytes);
+#else // _TARGET_X86_
+ if (pStubMD->IsILStub())
+ {
+ m_StackBytes = pStubMD->AsDynamicMethodDesc()->GetNativeStackArgSize();
+ _ASSERTE(m_StackBytes == pStubMD->SizeOfArgStack());
+ }
+ else
+ {
+ m_StackBytes = pStubMD->SizeOfArgStack();
+ }
+#endif // _TARGET_X86_
+
+ RETURN JitILStub(pStubMD);
+}
+
+//---------------------------------------------------------
+void ComCallMethodDesc::InitRuntimeNativeInfo(MethodDesc *pStubMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pStubMD));
+ }
+ CONTRACTL_END;
+
+#ifdef _TARGET_X86_
+ // Parse the stub signature to figure out how we're going to transform the incoming arguments
+ // into stub arguments (i.e. ECX and possibly EDX get enregisterable args, stack gets reversed).
+
+ MetaSig msig(pStubMD);
+ ArgIterator argit(&msig);
+
+ UINT dwArgStack = argit.SizeOfArgStack();
+ if (!FitsInU2(dwArgStack))
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_SIGTOOCOMPLEX);
+
+ NewArrayHolder<UINT16> pwStubStackSlotOffsets;
+ UINT16 *pOutputStack = NULL;
+
+ UINT16 wStubStackSlotCount = static_cast<UINT16>(dwArgStack) / STACK_ELEM_SIZE;
+ if (wStubStackSlotCount > 0)
+ {
+ pwStubStackSlotOffsets = new UINT16[wStubStackSlotCount];
+ pOutputStack = pwStubStackSlotOffsets + wStubStackSlotCount;
+ }
+
+ UINT16 wSourceSlotEDX = (UINT16)-1;
+
+ int numRegistersUsed = 0;
+ UINT16 wInputStack = 0;
+
+ // process this
+ if (!pStubMD->IsStatic())
+ {
+ numRegistersUsed++;
+ wInputStack += STACK_ELEM_SIZE;
+ }
+
+ // process the return buffer parameter
+ if (argit.HasRetBuffArg())
+ {
+ numRegistersUsed++;
+ wSourceSlotEDX = wInputStack / STACK_ELEM_SIZE;
+ wInputStack += STACK_ELEM_SIZE;
+ }
+
+ // process ordinary parameters
+ for (UINT i = msig.NumFixedArgs(); i > 0; i--)
+ {
+ TypeHandle thValueType;
+ CorElementType type = msig.NextArgNormalized(&thValueType);
+
+ UINT cbSize = MetaSig::GetElemSize(type, thValueType);
+
+ if (ArgIterator::IsArgumentInRegister(&numRegistersUsed, type))
+ {
+ wSourceSlotEDX = wInputStack / STACK_ELEM_SIZE;
+ wInputStack += STACK_ELEM_SIZE;
+ }
+ else
+ {
+ // we may need more stack slots for larger parameters
+ pOutputStack -= StackElemSize(cbSize) / STACK_ELEM_SIZE;
+ for (UINT slot = 0; slot < (StackElemSize(cbSize) / STACK_ELEM_SIZE); slot++)
+ {
+ pOutputStack[slot] = wInputStack;
+ wInputStack += STACK_ELEM_SIZE;
+ }
+ }
+ }
+
+ // write the computed data into this ComCallMethodDesc
+ m_dwSlotInfo = (wSourceSlotEDX | (wStubStackSlotCount << 16));
+ if (pwStubStackSlotOffsets != NULL)
+ {
+ if (FastInterlockCompareExchangePointer(&m_pwStubStackSlotOffsets, pwStubStackSlotOffsets.GetValue(), NULL) == NULL)
+ {
+ pwStubStackSlotOffsets.SuppressRelease();
+ }
+ }
+
+ //
+ // Fill in return thunk with proper native arg size.
+ //
+
+ BYTE *pMethodDescMemory = ((BYTE*)this) + GetOffsetOfReturnThunk();
+
+ //
+ // encodes a "ret nativeArgSize" to return and
+ // pop off the args off the stack
+ //
+ pMethodDescMemory[0] = 0xc2;
+
+ UINT16 nativeArgSize = GetNumStackBytes();
+
+ if (!(nativeArgSize < 0x7fff))
+ COMPlusThrow(kTypeLoadException, IDS_EE_SIGTOOCOMPLEX);
+
+ *(SHORT *)&pMethodDescMemory[1] = nativeArgSize;
+
+ FlushInstructionCache(GetCurrentProcess(), pMethodDescMemory, sizeof pMethodDescMemory[0] + sizeof(SHORT));
+#endif // _TARGET_X86_
+}
+#endif //CROSSGEN_COMPILE
+
+void ComCallMethodDesc::InitMethod(MethodDesc *pMD, MethodDesc *pInterfaceMD, BOOL fRedirectedInterface /* = FALSE */)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ m_flags = pMD->IsVirtual() ? enum_IsVirtual : 0;
+
+ m_pMD = pMD;
+ m_pInterfaceMD = PTR_MethodDesc(pInterfaceMD);
+ m_pILStub = NULL;
+
+#ifdef _TARGET_X86_
+ m_dwSlotInfo = 0;
+ m_pwStubStackSlotOffsets = NULL;
+#endif // _TARGET_X86_
+
+ if (fRedirectedInterface)
+ m_flags |= enum_IsWinRTRedirected;
+
+ // check whether this is a WinRT ctor/static/event method
+ MethodDesc *pCallMD = GetCallMethodDesc();
+ MethodTable *pCallMT = pCallMD->GetMethodTable();
+ if (pCallMT->IsProjectedFromWinRT() || pCallMT->IsExportedToWinRT())
+ {
+ m_flags |= enum_IsWinRTCall;
+
+ if (pMD->IsCtor())
+ {
+ m_flags |= enum_IsWinRTCtor;
+ }
+ else
+ {
+ if (pMD->IsStatic())
+ m_flags |= enum_IsWinRTStatic;
+ }
+ }
+
+ if (!SystemDomain::GetCurrentDomain()->IsCompilationDomain())
+ {
+ // Initialize the native type information size of native stack, native retval flags, etc).
+ InitNativeInfo();
+
+ // If this interface method is implemented on a class which lives
+ // in an assembly without UnmanagedCodePermission, then
+ // we mark the ComCallMethodDesc as unsafe for being called early-bound.
+ Module* pModule = pMD->GetModule();
+ if (!Security::CanCallUnmanagedCode(pModule))
+ {
+ m_flags |= (enum_NeedsSecurityCheck | enum_IsEarlyBoundUnsafe);
+ }
+ else if (pMD->RequiresLinktimeCheck())
+ {
+ // remember that we have to call Security::CheckLinkDemandAgainstAppDomain at invocation time
+ m_flags |= enum_NeedsSecurityCheck;
+ }
+ }
+
+ if (pMD->IsEEImpl() && COMDelegate::IsDelegateInvokeMethod(pMD))
+ {
+ m_flags |= enum_IsDelegateInvoke;
+ }
+}
+
+void ComCallMethodDesc::InitField(FieldDesc* pFD, BOOL isGetter)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pFD));
+ }
+ CONTRACTL_END;
+
+ m_pFD = pFD;
+ m_pILStub = NULL;
+
+#ifdef _TARGET_X86_
+ m_dwSlotInfo = 0;
+ m_pwStubStackSlotOffsets = NULL;
+#endif // _TARGET_X86_
+
+ m_flags = enum_IsFieldCall; // mark the attribute as a field
+ m_flags |= isGetter ? enum_IsGetter : 0;
+
+ if (!SystemDomain::GetCurrentDomain()->IsCompilationDomain())
+ {
+ // Initialize the native type information size of native stack, native retval flags, etc).
+ InitNativeInfo();
+
+ // If this interface method is implemented on a class which lives
+ // in an assembly without UnmanagedCodePermission, then
+ // we mark the ComCallMethodDesc as unsafe for being called early-bound.
+ Module* pModule = pFD->GetModule();
+ if (!Security::CanCallUnmanagedCode(pModule))
+ {
+ m_flags |= enum_IsEarlyBoundUnsafe;
+ }
+ }
+};
+
+// Initialize the member's native type information (size of native stack, native retval flags, etc).
+// It is unfortunate that we have to touch all this metadata at creation time. The reason for this
+// is that we need to know size of the native stack to be able to return back to unmanaged code in
+// case ComPrestub fails. If it fails because the target appdomain has already been unloaded, it is
+// too late to make this computation - the metadata is no longer available.
+void ComCallMethodDesc::InitNativeInfo()
+{
+ CONTRACT_VOID
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(!IsNativeInfoInitialized());
+ }
+ CONTRACT_END;
+
+ m_StackBytes = (UINT16)-1;
+
+ EX_TRY
+ {
+#ifdef _TARGET_X86_
+ // On x86, this method has to compute size of arguments because we need to know size of the native stack
+ // to be able to return back to unmanaged code
+ UINT16 nativeArgSize;
+#endif
+
+ if (IsFieldCall())
+ {
+ FieldDesc *pFD = GetFieldDesc();
+ _ASSERTE(pFD != NULL);
+
+#ifdef _DEBUG
+ LPCUTF8 szDebugName = pFD->GetDebugName();
+ LPCUTF8 szDebugClassName = pFD->GetEnclosingMethodTable()->GetDebugClassName();
+
+ if (g_pConfig->ShouldBreakOnComToClrNativeInfoInit(szDebugName))
+ CONSISTENCY_CHECK_MSGF(false, ("BreakOnComToClrNativeInfoInit: '%s' ", szDebugName));
+#endif // _DEBUG
+
+#ifdef _TARGET_X86_
+ MetaSig fsig(pFD);
+ fsig.NextArg();
+
+ // Look up the best fit mapping info via Assembly & Interface level attributes
+ BOOL BestFit = TRUE;
+ BOOL ThrowOnUnmappableChar = FALSE;
+ ReadBestFitCustomAttribute(fsig.GetModule()->GetMDImport(), pFD->GetEnclosingMethodTable()->GetCl(), &BestFit, &ThrowOnUnmappableChar);
+
+ MarshalInfo info(fsig.GetModule(), fsig.GetArgProps(), fsig.GetSigTypeContext(), pFD->GetMemberDef(), MarshalInfo::MARSHAL_SCENARIO_COMINTEROP,
+ (CorNativeLinkType)0, (CorNativeLinkFlags)0,
+ FALSE, 0, fsig.NumFixedArgs(), BestFit, ThrowOnUnmappableChar, FALSE, NULL, FALSE
+#ifdef _DEBUG
+ , szDebugName, szDebugClassName, 0
+#endif
+ );
+
+ if (IsFieldGetter())
+ {
+ // getter takes 'this' and the output argument by-ref
+ nativeArgSize = sizeof(void *) + sizeof(void *);
+ }
+ else
+ {
+ info.SetupArgumentSizes();
+
+ // setter takes 'this' and the input argument by-value
+ nativeArgSize = sizeof(void *) + info.GetNativeArgSize();
+ }
+#endif // _TARGET_X86_
+
+ // Field calls always return HRESULTs.
+ m_flags |= enum_NativeHResultRetVal;
+ }
+ else
+ {
+ MethodDesc *pMD = GetCallMethodDesc();
+
+#ifdef _DEBUG
+ LPCUTF8 szDebugName = pMD->m_pszDebugMethodName;
+ LPCUTF8 szDebugClassName = pMD->m_pszDebugClassName;
+
+ if (g_pConfig->ShouldBreakOnComToClrNativeInfoInit(szDebugName))
+ CONSISTENCY_CHECK_MSGF(false, ("BreakOnComToClrNativeInfoInit: '%s' ", szDebugName));
+#endif // _DEBUG
+
+ MethodTable * pMT = pMD->GetMethodTable();
+ IMDInternalImport * pInternalImport = pMT->GetMDImport();
+
+ mdMethodDef md = pMD->GetMemberDef();
+
+ ULONG ulCodeRVA;
+ DWORD dwImplFlags;
+ IfFailThrow(pInternalImport->GetMethodImplProps(md, &ulCodeRVA, &dwImplFlags));
+
+ // Determine if we need to do HRESULT munging for this method.
+ BOOL fPreserveSig = IsMiPreserveSig(dwImplFlags);
+
+#ifndef _TARGET_X86_
+ if (!fPreserveSig)
+ {
+ // PreserveSig=false methods always return HRESULTs.
+ m_flags |= enum_NativeHResultRetVal;
+ goto Done;
+ }
+#endif
+
+ MetaSig msig(pMD);
+
+#ifndef _TARGET_X86_
+ if (msig.IsReturnTypeVoid())
+ {
+ // The method has a void return type on the native side.
+ m_flags |= enum_NativeVoidRetVal;
+ goto Done;
+ }
+#endif
+
+ BOOL WinRTType = pMT->IsProjectedFromWinRT();
+
+ // Look up the best fit mapping info via Assembly & Interface level attributes
+ BOOL BestFit = TRUE;
+ BOOL ThrowOnUnmappableChar = FALSE;
+
+ // Marshaling is fully described by the parameter type in WinRT. BestFit custom attributes
+ // are not going to affect the marshaling behavior.
+ if (!WinRTType)
+ {
+ ReadBestFitCustomAttribute(pMD, &BestFit, &ThrowOnUnmappableChar);
+ }
+
+ int numArgs = msig.NumFixedArgs();
+
+ // Collects ParamDef information in an indexed array where element 0 represents
+ // the return type.
+ mdParamDef *params = (mdParamDef*)_alloca((numArgs+1) * sizeof(mdParamDef));
+ CollateParamTokens(pInternalImport, md, numArgs, params);
+
+#ifdef _TARGET_X86_
+ // If this is a method call then check to see if we need to do LCID conversion.
+ int iLCIDArg = GetLCIDParameterIndex(pMD);
+ if (iLCIDArg != -1)
+ iLCIDArg++;
+
+ nativeArgSize = sizeof(void*);
+
+ int iArg = 1;
+ CorElementType mtype;
+ while (ELEMENT_TYPE_END != (mtype = msig.NextArg()))
+ {
+ // Check to see if this is the parameter after which we need to read the LCID from.
+ if (iArg == iLCIDArg)
+ nativeArgSize += StackElemSize(sizeof(LCID));
+
+ MarshalInfo info(msig.GetModule(), msig.GetArgProps(), msig.GetSigTypeContext(), params[iArg],
+ WinRTType ? MarshalInfo::MARSHAL_SCENARIO_WINRT : MarshalInfo::MARSHAL_SCENARIO_COMINTEROP,
+ (CorNativeLinkType)0, (CorNativeLinkFlags)0,
+ TRUE, iArg, numArgs, BestFit, ThrowOnUnmappableChar, FALSE, pMD, FALSE
+#ifdef _DEBUG
+ , szDebugName, szDebugClassName, iArg
+#endif
+ );
+
+ if (info.GetMarshalType() == MarshalInfo::MARSHAL_TYPE_UNKNOWN)
+ {
+ nativeArgSize += StackElemSize(sizeof(LPVOID));
+ m_flags |= enum_HasMarshalError;
+ }
+ else
+ {
+ info.SetupArgumentSizes();
+
+ nativeArgSize += info.GetNativeArgSize();
+
+ if (info.GetMarshalType() == MarshalInfo::MARSHAL_TYPE_HIDDENLENGTHARRAY)
+ {
+ // count the hidden length
+ nativeArgSize += info.GetHiddenLengthParamStackSize();
+ }
+ }
+
+ ++iArg;
+ }
+
+ // Check to see if this is the parameter after which we need to read the LCID from.
+ if (iArg == iLCIDArg)
+ nativeArgSize += StackElemSize(sizeof(LCID));
+#endif // _TARGET_X86_
+
+
+ //
+ // Return value
+ //
+
+#ifndef _TARGET_X86_
+ // Handled above
+ _ASSERTE(!msig.IsReturnTypeVoid());
+#else
+ if (msig.IsReturnTypeVoid())
+ {
+ if (!fPreserveSig)
+ {
+ // PreserveSig=false methods always return HRESULTs.
+ m_flags |= enum_NativeHResultRetVal;
+ }
+ else
+ {
+ // The method has a void return type on the native side.
+ m_flags |= enum_NativeVoidRetVal;
+ }
+
+ goto Done;
+ }
+#endif // _TARGET_X86_
+
+ {
+ MarshalInfo info(msig.GetModule(), msig.GetReturnProps(), msig.GetSigTypeContext(), params[0],
+ WinRTType ? MarshalInfo::MARSHAL_SCENARIO_WINRT : MarshalInfo::MARSHAL_SCENARIO_COMINTEROP,
+ (CorNativeLinkType)0, (CorNativeLinkFlags)0,
+ FALSE, 0, numArgs, BestFit, ThrowOnUnmappableChar, FALSE, pMD, FALSE
+#ifdef _DEBUG
+ ,szDebugName, szDebugClassName, 0
+#endif
+ );
+
+#ifndef _TARGET_X86_
+ // Handled above
+ _ASSERTE(fPreserveSig);
+#else
+ if (!fPreserveSig)
+ {
+ // PreserveSig=false methods always return HRESULTs.
+ m_flags |= enum_NativeHResultRetVal;
+
+ // count the output by-ref argument
+ nativeArgSize += sizeof(void *);
+
+ if (info.GetMarshalType() == MarshalInfo::MARSHAL_TYPE_HIDDENLENGTHARRAY)
+ {
+ // count the output hidden length
+ nativeArgSize += info.GetHiddenLengthParamStackSize();
+ }
+
+ goto Done;
+ }
+#endif // _TARGET_X86_
+
+ // Ignore the secret return buffer argument - we don't allow returning
+ // structures by value in COM interop.
+ if (info.IsFpuReturn())
+ {
+ if (info.GetMarshalType() == MarshalInfo::MARSHAL_TYPE_FLOAT)
+ {
+ m_flags |= enum_NativeR4Retval;
+ }
+ else
+ {
+ _ASSERTE(info.GetMarshalType() == MarshalInfo::MARSHAL_TYPE_DOUBLE);
+ m_flags |= enum_NativeR8Retval;
+ }
+ }
+ else
+ {
+ CorElementType returnType = msig.GetReturnType();
+ if (returnType == ELEMENT_TYPE_I4 || returnType == ELEMENT_TYPE_U4)
+ {
+ // If the method is PreserveSig=true and returns either an I4 or an U4, then we
+ // will assume the users wants to return an HRESULT in case of failure.
+ m_flags |= enum_NativeHResultRetVal;
+ }
+ else if (info.GetMarshalType() == MarshalInfo::MARSHAL_TYPE_DATE)
+ {
+ // DateTime is returned as an OLEAUT DATE which is actually an R8.
+ m_flags |= enum_NativeR8Retval;
+ }
+ else
+ {
+ // The method doesn't return an FP value nor should we treat it as returning
+ // an HRESULT so we will return 0 in case of failure.
+ m_flags |= enum_NativeBoolRetVal;
+ }
+ }
+ }
+ }
+
+Done:
+
+#ifdef _TARGET_X86_
+ // The above algorithm to compute nativeArgSize is x86-specific. We will compute
+ // the correct value later for other platforms.
+ m_StackBytes = nativeArgSize;
+#endif
+
+ m_flags |= enum_NativeInfoInitialized;
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(RethrowTransientExceptions)
+
+ RETURN;
+}
+
+SpinLock* ComCall::s_plock=NULL;
+
+//---------------------------------------------------------
+// One-time init
+//---------------------------------------------------------
+/*static*/
+void ComCall::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ s_plock = new SpinLock();
+ s_plock->Init(LOCK_COMCALL);
+}
+
+//
+/*static*/
+void ComCall::PopulateComCallMethodDesc(ComCallMethodDesc *pCMD, DWORD *pdwStubFlags)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pCMD));
+ PRECONDITION(CheckPointer(pdwStubFlags));
+ }
+ CONTRACTL_END;
+
+ DWORD dwStubFlags = NDIRECTSTUB_FL_COM | NDIRECTSTUB_FL_REVERSE_INTEROP;
+
+ BOOL BestFit = TRUE;
+ BOOL ThrowOnUnmappableChar = FALSE;
+
+ if (pCMD->IsFieldCall())
+ {
+ if (pCMD->IsFieldGetter())
+ dwStubFlags |= NDIRECTSTUB_FL_FIELDGETTER;
+ else
+ dwStubFlags |= NDIRECTSTUB_FL_FIELDSETTER;
+
+ FieldDesc *pFD = pCMD->GetFieldDesc();
+ _ASSERTE(IsMemberVisibleFromCom(pFD->GetApproxEnclosingMethodTable(), pFD->GetMemberDef(), mdTokenNil) && "Calls are not permitted on this member since it isn't visible from COM. The only way you can have reached this code path is if your native interface doesn't match the managed interface.");
+
+ MethodTable *pMT = pFD->GetEnclosingMethodTable();
+ ReadBestFitCustomAttribute(pMT->GetMDImport(), pMT->GetCl(), &BestFit, &ThrowOnUnmappableChar);
+ }
+ else
+ {
+ MethodDesc *pMD = pCMD->GetCallMethodDesc();
+ _ASSERTE(IsMethodVisibleFromCom(pMD) && "Calls are not permitted on this member since it isn't visible from COM. The only way you can have reached this code path is if your native interface doesn't match the managed interface.");
+
+ MethodTable *pMT = pMD->GetMethodTable();
+ if (pMT->IsProjectedFromWinRT() || pMT->IsExportedToWinRT() || pCMD->IsWinRTRedirectedMethod())
+ {
+ dwStubFlags |= NDIRECTSTUB_FL_WINRT;
+
+ if (pMT->IsDelegate())
+ dwStubFlags |= NDIRECTSTUB_FL_WINRTDELEGATE;
+ else if (pCMD->IsWinRTCtor())
+ {
+ dwStubFlags |= NDIRECTSTUB_FL_WINRTCTOR;
+ }
+ else
+ {
+ if (pCMD->IsWinRTStatic())
+ dwStubFlags |= NDIRECTSTUB_FL_WINRTSTATIC;
+ }
+ }
+ else
+ {
+ // Marshaling is fully described by the parameter type in WinRT. BestFit custom attributes
+ // are not going to affect the marshaling behavior.
+ ReadBestFitCustomAttribute(pMD, &BestFit, &ThrowOnUnmappableChar);
+ }
+ }
+
+ if (BestFit)
+ dwStubFlags |= NDIRECTSTUB_FL_BESTFIT;
+
+ if (ThrowOnUnmappableChar)
+ dwStubFlags |= NDIRECTSTUB_FL_THROWONUNMAPPABLECHAR;
+
+ //
+ // fill in out param
+ //
+ *pdwStubFlags = dwStubFlags;
+}
+
+#ifndef CROSSGEN_COMPILE
+#ifdef _TARGET_X86_
+//---------------------------------------------------------
+// Creates the generic ComCall stub.
+//
+// Throws in case of error.
+//---------------------------------------------------------
+/*static*/
+Stub* ComCall::CreateGenericComCallStub(BOOL isFieldAccess)
+{
+ CONTRACT (Stub*)
+ {
+ STANDARD_VM_CHECK;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ CPUSTUBLINKER sl;
+ CPUSTUBLINKER *psl = &sl;
+
+ // These new CodeLabels are allocated on a
+ // "throwaway" heap. Do not worry about
+ // deallocating them if one of the allocations
+ // ends up throwing an OOM exception.
+
+ CodeLabel* rgRareLabels[] = {
+ psl->NewCodeLabel(),
+ psl->NewCodeLabel(),
+ psl->NewCodeLabel()
+ };
+
+
+ CodeLabel* rgRejoinLabels[] = {
+ psl->NewCodeLabel(),
+ psl->NewCodeLabel(),
+ psl->NewCodeLabel()
+ };
+
+ // Pop ComCallMethodDesc* pushed by prestub
+ psl->X86EmitPopReg(kEAX);
+
+ // emit the initial prolog
+ // NOTE: Don't profile field accesses yet.
+ psl->EmitComMethodStubProlog(ComMethodFrame::GetMethodFrameVPtr(),
+ rgRareLabels,
+ rgRejoinLabels,
+ !isFieldAccess);
+
+ // ******* NOTE ********
+ // We now have a frame on the stack that is unproctected by an SEH handler. If we take an
+ // SO before getting into the target, we'll have a corrupted frame chain. In EmitComMethodStubProlog
+ // we probe-by-touch for 4 DWORDS to ensure that can set up the SEH handler before linking in
+ // the frame. So long as we don't use more than that here (currently 3 DWORDS - for the two args plus
+ // the return address, we are OK. If we decrement ESP more than an additional DWORD here before
+ // calling the target, we will need to probe farther.
+
+ psl->X86EmitPushReg(kESI); // push frame as an ARG
+ psl->X86EmitPushReg(kEBX); // push ebx (push current thread as ARG)
+ LPVOID pTarget = isFieldAccess ? (LPVOID)FieldCallWorker : (LPVOID)COMToCLRWorker;
+ psl->X86EmitCall(psl->NewExternalCodeLabel(pTarget), 8);
+
+ // emit the epilog
+ // NOTE: Don't profile field accesses yet.
+ psl->EmitSharedComMethodStubEpilog(ComMethodFrame::GetMethodFrameVPtr(), rgRareLabels, rgRejoinLabels,
+ ComCallMethodDesc::GetOffsetOfReturnThunk(), !isFieldAccess);
+
+ // Process-wide stubs that never unload.
+ RETURN (psl->Link(SystemDomain::GetGlobalLoaderAllocator()->GetStubHeap()));
+}
+#endif // _TARGET_X86_
+
+//---------------------------------------------------------
+// Either creates or retrieves from the cache, a stub to
+// invoke ComCall methods. Each call refcounts the returned stub.
+// This routines throws an exception rather than returning
+// NULL.
+//---------------------------------------------------------
+/*static*/
+PCODE ComCall::GetComCallMethodStub(ComCallMethodDesc *pCMD)
+{
+ CONTRACT (PCODE)
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pCMD));
+ POSTCONDITION(RETVAL != NULL);
+ }
+ CONTRACT_END;
+
+ SetupGenericStubs();
+
+ // The stub style we return is to a single generic stub for method calls and to
+ // a single generic stub for field accesses. The generic stub parameterizes
+ // its behavior based on the ComCallMethodDesc.
+
+ PCODE pTempILStub = NULL;
+ DWORD dwStubFlags;
+
+ PopulateComCallMethodDesc(pCMD, &dwStubFlags);
+
+ MethodDesc *pStubMD;
+ pTempILStub = pCMD->CreateCOMToCLRStub(dwStubFlags, &pStubMD);
+
+ // Compute stack layout and prepare the return thunk on x86
+ pCMD->InitRuntimeNativeInfo(pStubMD);
+
+ InterlockedCompareExchangeT<PCODE>(pCMD->GetAddrOfILStubField(), pTempILStub, NULL);
+
+#ifdef _TARGET_X86_
+ // Finally, we need to build a stub that represents the entire call. This
+ // is always generic.
+ RETURN (pCMD->IsFieldCall() ? g_pGenericComCallStubFields : g_pGenericComCallStub);
+#else
+ RETURN GetEEFuncEntryPoint(GenericComCallStub);
+#endif
+}
+#endif // CROSSGEN_COMPILE
+
+// Called both at run-time and by NGEN - generates method stub.
+/*static*/
+MethodDesc* ComCall::GetILStubMethodDesc(MethodDesc *pCallMD, DWORD dwStubFlags)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pCallMD));
+ PRECONDITION(SF_IsReverseCOMStub(dwStubFlags));
+ }
+ CONTRACTL_END;
+
+ PCCOR_SIGNATURE pSig;
+ DWORD cSig;
+
+ // Get the call signature information
+ StubSigDesc sigDesc(pCallMD);
+
+ return NDirect::CreateCLRToNativeILStub(&sigDesc,
+ (CorNativeLinkType)0,
+ (CorNativeLinkFlags)0,
+ (CorPinvokeMap)0,
+ dwStubFlags);
+}
+
+// Called at run-time - generates field access stub. We don't currently NGEN field access stubs
+// as the scenario is too rare to justify the extra NGEN logic. The workaround is trivial - make
+// the field non-public and add a public property to access it.
+/*static*/
+MethodDesc* ComCall::GetILStubMethodDesc(FieldDesc *pFD, DWORD dwStubFlags)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pFD));
+ PRECONDITION(SF_IsFieldGetterStub(dwStubFlags) || SF_IsFieldSetterStub(dwStubFlags));
+ }
+ CONTRACTL_END;
+
+ PCCOR_SIGNATURE pSig;
+ DWORD cSig;
+
+ // Get the field signature information
+ pFD->GetSig(&pSig, &cSig);
+
+ return NDirect::CreateFieldAccessILStub(pSig,
+ cSig,
+ pFD->GetModule(),
+ pFD->GetMemberDef(),
+ dwStubFlags,
+ pFD);
+}
+
+// static
+MethodDesc *ComCall::GetCtorForWinRTFactoryMethod(MethodTable *pClsMT, MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(pClsMT->IsSealed());
+ }
+ CONTRACTL_END;
+
+ PCCOR_SIGNATURE pSig;
+ DWORD cSig;
+ pMD->GetSig(&pSig, &cSig);
+ SigParser sig(pSig, cSig);
+
+ ULONG numArgs;
+ CorElementType et;
+
+ IfFailThrow(sig.GetCallingConv(NULL)); // calling convention
+ IfFailThrow(sig.GetData(&numArgs)); // number of args
+ IfFailThrow(sig.SkipExactlyOne()); // skip return type
+
+ SigBuilder sigBuilder;
+ sigBuilder.AppendByte(IMAGE_CEE_CS_CALLCONV_HASTHIS);
+ sigBuilder.AppendData(numArgs);
+
+ // ctor returns void
+ sigBuilder.AppendElementType(ELEMENT_TYPE_VOID);
+
+ sig.GetSignature(&pSig, &cSig);
+
+ // parameter types are identical for sealed classes
+ sigBuilder.AppendBlob((const PVOID)pSig, cSig);
+
+ pSig = (PCCOR_SIGNATURE)sigBuilder.GetSignature(&cSig);
+
+ MethodDesc *pCtorMD = MemberLoader::FindMethod(pClsMT, COR_CTOR_METHOD_NAME, pSig, cSig, pMD->GetModule());
+
+ if (pCtorMD == NULL)
+ {
+ SString ctorMethodName(SString::Utf8, COR_CTOR_METHOD_NAME);
+ COMPlusThrowNonLocalized(kMissingMethodException, ctorMethodName.GetUnicode());
+ }
+ return pCtorMD;
+}
+
+// static
+MethodDesc *ComCall::GetStaticForWinRTFactoryMethod(MethodTable *pClsMT, MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PCCOR_SIGNATURE pSig;
+ DWORD cSig;
+ pMD->GetSig(&pSig, &cSig);
+ SigParser sig(pSig, cSig);
+
+ IfFailThrow(sig.GetCallingConv(NULL)); // calling convention
+
+ SigBuilder sigBuilder;
+ sigBuilder.AppendByte(IMAGE_CEE_CS_CALLCONV_DEFAULT);
+
+ // number of parameters, return type, and parameter types are identical
+ sig.GetSignature(&pSig, &cSig);
+ sigBuilder.AppendBlob((const PVOID)pSig, cSig);
+
+ pSig = (PCCOR_SIGNATURE)sigBuilder.GetSignature(&cSig);
+
+ MethodDesc *pStaticMD = MemberLoader::FindMethod(pClsMT, pMD->GetName(), pSig, cSig, pMD->GetModule());
+
+ if (pStaticMD == NULL)
+ {
+ SString staticMethodName(SString::Utf8, pMD->GetName());
+ COMPlusThrowNonLocalized(kMissingMethodException, staticMethodName.GetUnicode());
+ }
+ return pStaticMD;
+}
+
+#endif // DACCESS_COMPILE
diff --git a/src/vm/comtoclrcall.h b/src/vm/comtoclrcall.h
new file mode 100644
index 0000000000..cd9d69f97a
--- /dev/null
+++ b/src/vm/comtoclrcall.h
@@ -0,0 +1,483 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: COMtoCLRCall.h
+//
+
+//
+
+
+#ifndef __COMTOCLRCALL_H__
+#define __COMTOCLRCALL_H__
+
+#ifndef FEATURE_COMINTEROP
+#error FEATURE_COMINTEROP is required for this file
+#endif // FEATURE_COMINTEROP
+
+#include "util.hpp"
+#include "spinlock.h"
+
+enum ComCallFlags
+{
+ enum_IsVirtual = 0x0001, // If true the method is virtual on the managed side
+ enum_IsFieldCall = 0x0002, // is field call
+ enum_IsGetter = 0x0004, // is field call getter
+ enum_NativeInfoInitialized = 0x0008, // Has the native info been initialized
+ enum_NativeR4Retval = 0x0010, // Native ret val is an R4
+ enum_NativeR8Retval = 0x0020, // Native ret val is an R8
+ enum_NativeHResultRetVal = 0x0040, // Native ret val is an HRESULT
+ enum_NativeBoolRetVal = 0x0080, // Native ret val is 0 in the case of failure
+ enum_NativeVoidRetVal = 0x0100, // Native ret val is void
+ enum_IsEarlyBoundUnsafe = 0x0200, // Is unsafe to be called early-bound
+ enum_HasMarshalError = 0x0400, // The signature is not marshalable and m_StackBytes is a guess
+ enum_IsDelegateInvoke = 0x0800, // The method is an 'Invoke' on a delegate
+ enum_NeedsSecurityCheck = 0x1000, // Security check is needed at every invocation
+ enum_IsWinRTCall = 0x2000, // The method is declared on a WinRT interface/delegate
+ enum_IsWinRTCtor = 0x4000, // The method is a WinRT constructor
+ enum_IsWinRTStatic = 0x8000, // The method is a WinRT static
+ enum_IsWinRTRedirected = 0x10000, // The method is declared on a redirected WinRT interface
+};
+
+
+//=======================================================================
+// class com call
+//=======================================================================
+
+#if !defined(DACCESS_COMPILE)
+class ComCall
+{
+public:
+ // Encapsulate a SpinLockHolder, so that clients of our lock don't have to know
+ // the details of our implementation.
+ class LockHolder : public SpinLockHolder
+ {
+ public:
+ LockHolder()
+ : SpinLockHolder(ComCall::s_plock)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+ };
+
+
+ //---------------------------------------------------------
+ // One-time init
+ //---------------------------------------------------------
+ static void Init();
+
+ //
+ static void PopulateComCallMethodDesc(ComCallMethodDesc *pCMD, DWORD *pdwStubFlags);
+
+ // helper to create a generic stub for com calls
+ static Stub* CreateGenericComCallStub(BOOL isFieldAccess);
+
+ //---------------------------------------------------------
+ // Either creates or retrieves from the cache, a stub to
+ // invoke com to com+
+ // Each call refcounts the returned stub.
+ // This routines throws an exception rather than returning
+ // NULL.
+ //---------------------------------------------------------
+ static PCODE GetComCallMethodStub(ComCallMethodDesc *pMD);
+
+ // pCallMD is either interface or class method - the one returned by
+ // code:ComCallMethodDesc.GetCallMethodDesc on the ComCallMethodDesc
+ // that owns the stub; pFD is the target field
+ static MethodDesc* GetILStubMethodDesc(MethodDesc *pCallMD, DWORD dwStubFlags);
+ static MethodDesc* GetILStubMethodDesc(FieldDesc *pFD, DWORD dwStubFlags);
+
+ static MethodDesc *GetCtorForWinRTFactoryMethod(MethodTable *pClsMT, MethodDesc *pMD);
+ static MethodDesc *GetStaticForWinRTFactoryMethod(MethodTable *pClsMT, MethodDesc *pMD);
+
+private:
+ ComCall() {LIMITED_METHOD_CONTRACT;}; // prevent "new"'s on this class
+
+ static SpinLock* s_plock;
+};
+#endif // DACCESS_COMPILE
+
+//-----------------------------------------------------------------------
+// Operations specific to ComCall methods. This is not a code:MethodDesc.
+//-----------------------------------------------------------------------
+
+class ComCallMethodDesc
+{
+ friend void InvokeStub(ComCallMethodDesc *pCMD, PCODE pManagedTarget, OBJECTREF orThis, ComMethodFrame *pFrame, Thread *pThread, UINT64* pRetValOut);
+
+public:
+ // init method
+ void InitMethod(MethodDesc *pMD, MethodDesc *pInterfaceMD, BOOL fRedirectedInterface = FALSE);
+
+ // init field
+ void InitField(FieldDesc* pField, BOOL isGetter);
+
+ // is field call
+ BOOL IsFieldCall()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_flags & enum_IsFieldCall);
+ }
+
+ BOOL IsEarlyBoundUnsafe()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_flags & enum_IsEarlyBoundUnsafe);
+ }
+
+ BOOL NeedsSecurityCheck()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_flags & enum_NeedsSecurityCheck);
+ }
+
+ BOOL IsMethodCall()
+ {
+ WRAPPER_NO_CONTRACT;
+ return !IsFieldCall();
+ }
+
+ // is field getter
+ BOOL IsFieldGetter()
+ {
+ CONTRACT (BOOL)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(IsFieldCall());
+ }
+ CONTRACT_END;
+
+ RETURN (m_flags & enum_IsGetter);
+ }
+
+ // is a virtual method
+ BOOL IsVirtual()
+ {
+ CONTRACT (BOOL)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(IsMethodCall());
+ }
+ CONTRACT_END;
+
+ RETURN (m_flags & enum_IsVirtual);
+ }
+
+ BOOL IsNativeR4RetVal()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_flags & enum_NativeR4Retval;
+ }
+
+ BOOL IsNativeR8RetVal()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_flags & enum_NativeR8Retval;
+ }
+
+ BOOL IsNativeFloatingPointRetVal()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_flags & (enum_NativeR4Retval | enum_NativeR8Retval);
+ }
+
+ BOOL IsNativeHResultRetVal()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_flags & enum_NativeHResultRetVal;
+ }
+
+ BOOL IsNativeBoolRetVal()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_flags & enum_NativeBoolRetVal;
+ }
+
+ BOOL IsNativeVoidRetVal()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_flags & enum_NativeVoidRetVal;
+ }
+
+ BOOL HasMarshalError()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_flags & enum_HasMarshalError;
+ }
+
+ BOOL IsDelegateInvoke()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_flags & enum_IsDelegateInvoke;
+ }
+
+ BOOL IsWinRTCall()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_flags & enum_IsWinRTCall;
+ }
+
+ BOOL IsWinRTCtor()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_flags & enum_IsWinRTCtor;
+ }
+
+ BOOL IsWinRTStatic()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_flags & enum_IsWinRTStatic;
+ }
+
+ BOOL IsWinRTRedirectedMethod()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_flags & enum_IsWinRTRedirected;
+ }
+
+ BOOL IsNativeInfoInitialized()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_flags & enum_NativeInfoInitialized;
+ }
+
+ DWORD GetFlags()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_flags;
+ }
+
+ // get method desc
+ MethodDesc* GetMethodDesc()
+ {
+ CONTRACT (MethodDesc*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(!IsFieldCall());
+ PRECONDITION(CheckPointer(m_pMD));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ RETURN m_pMD;
+ }
+
+ // get interface method desc
+ MethodDesc* GetInterfaceMethodDesc()
+ {
+ CONTRACT (MethodDesc *)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(!IsFieldCall());
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ RETURN m_pInterfaceMD;
+ }
+
+ // get interface method desc if non-NULL, class method desc otherwise
+ MethodDesc* GetCallMethodDesc()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ MethodDesc *pMD = GetInterfaceMethodDesc();
+ if (pMD == NULL)
+ pMD = GetMethodDesc();
+ _ASSERTE(pMD != NULL);
+
+ return pMD;
+ }
+
+ // get field desc
+ FieldDesc* GetFieldDesc()
+ {
+ CONTRACT (FieldDesc*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(IsFieldCall());
+ PRECONDITION(CheckPointer(m_pFD));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ RETURN m_pFD;
+ }
+
+ // get module
+ Module* GetModule();
+
+ PCODE *GetAddrOfILStubField()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return &m_pILStub;
+ }
+
+ PCODE GetILStub()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pILStub;
+ }
+
+ // get slot number for the method
+ unsigned GetSlot()
+ {
+ CONTRACT (unsigned)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(IsMethodCall());
+ PRECONDITION(CheckPointer(m_pMD));
+ }
+ CONTRACT_END;
+
+ RETURN m_pMD->GetSlot();
+ }
+
+ // get num stack bytes to pop
+ UINT16 GetNumStackBytes()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(m_flags & enum_NativeInfoInitialized);
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ return m_StackBytes;
+ }
+
+ static DWORD GetOffsetOfReturnThunk()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return -COMMETHOD_PREPAD;
+ }
+
+ static DWORD GetOffsetOfMethodDesc()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ((DWORD) offsetof(class ComCallMethodDesc, m_pMD));
+ }
+
+ //get call sig
+ PCCOR_SIGNATURE GetSig(DWORD *pcbSigSize = NULL)
+ {
+ CONTRACT (PCCOR_SIGNATURE)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(IsMethodCall());
+ PRECONDITION(CheckPointer(m_pMD));
+ }
+ CONTRACT_END;
+
+ PCCOR_SIGNATURE pSig;
+ DWORD cbSigSize;
+
+ m_pMD->GetSig(&pSig, &cbSigSize);
+
+ if (pcbSigSize != NULL)
+ {
+ *pcbSigSize = cbSigSize;
+ }
+
+ RETURN pSig;
+ }
+
+ // Discard all the resources owned by this ComCallMethodDesc.
+ void Destruct()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef _TARGET_X86_
+ if (m_pwStubStackSlotOffsets != NULL)
+ delete [] m_pwStubStackSlotOffsets;
+#endif // _TARGET_X86_
+ }
+
+ static void ReleaseComCallMethodDesc(ComCallMethodDesc *pCMD)
+ {
+ WRAPPER_NO_CONTRACT;
+ pCMD->Destruct();
+ }
+
+ PCODE CreateCOMToCLRStub(DWORD dwStubFlags, MethodDesc **ppStubMD);
+ void InitRuntimeNativeInfo(MethodDesc *pStubMD);
+
+private:
+ // Initialize the member's native type information (size of native stack, native retval flags, etc).
+ void InitNativeInfo();
+
+ // see ComCallFlags enum above
+ DWORD m_flags;
+ union
+ {
+ struct
+ {
+ MethodDesc* m_pMD;
+ PTR_MethodDesc m_pInterfaceMD;
+ };
+ FieldDesc* m_pFD;
+ };
+
+ PCODE m_pILStub; // IL stub for COM to CLR call, invokes GetCallMethodDesc()
+
+ // Platform specific data needed for efficient IL stub invocation:
+#ifdef _TARGET_X86_
+ union
+ {
+ struct
+ {
+ // Index of the stack slot that gets stuffed into EDX when calling the stub.
+ UINT16 m_wSourceSlotEDX;
+
+ // Number of stack slots expected by the IL stub.
+ UINT16 m_wStubStackSlotCount;
+ };
+ // Combination of m_wSourceSlotEDX and m_wStubStackSlotCount for atomic updates.
+ UINT32 m_dwSlotInfo;
+ };
+
+ // This is an array of m_wStubStackSlotCount numbers where each element is the offset
+ // on the source stack where the particular stub stack slot should be copied from.
+ UINT16 *m_pwStubStackSlotOffsets;
+#endif // _TARGET_X86_
+
+ // Number of stack bytes pushed by the unmanaged caller.
+ UINT16 m_StackBytes;
+};
+
+typedef Holder<ComCallMethodDesc *, DoNothing<ComCallMethodDesc *>, ComCallMethodDesc::ReleaseComCallMethodDesc> ComCallMethodDescHolder;
+
+#endif // __COMTOCLRCALL_H__
diff --git a/src/vm/comtypelibconverter.cpp b/src/vm/comtypelibconverter.cpp
new file mode 100644
index 0000000000..481870b711
--- /dev/null
+++ b/src/vm/comtypelibconverter.cpp
@@ -0,0 +1,792 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: COMTypeLibConverter.cpp
+**
+**
+** Purpose: Implementation of the native methods used by the
+** typelib converter.
+**
+**
+===========================================================*/
+
+#include "common.h"
+
+#include "comtypelibconverter.h"
+#include "runtimecallablewrapper.h"
+#include "assembly.hpp"
+#include "debugmacros.h"
+#include <tlbimpexp.h>
+#include "..\md\inc\imptlb.h"
+#include <tlbutils.h>
+#include "posterror.h"
+
+BOOL COMTypeLibConverter::m_bInitialized = FALSE;
+
+void COMTypeLibConverter::TypeLibImporterWrapper(
+ ITypeLib *pITLB, // Typelib to import.
+ LPCWSTR szFname, // Name of the typelib, if known.
+ LPCWSTR szNamespace, // Optional namespace override.
+ IMetaDataEmit *pEmit, // Metadata scope to which to emit.
+ Assembly *pAssembly, // Assembly containing the imported module.
+ Module *pModule, // Module we are emitting into.
+ ITypeLibImporterNotifySink *pNotify,// Callback interface.
+ TlbImporterFlags flags, // Importer flags.
+ CImportTlb **ppImporter) // The importer.
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pITLB));
+ PRECONDITION(CheckPointer(szFname, NULL_OK));
+ PRECONDITION(CheckPointer(szNamespace, NULL_OK));
+ PRECONDITION(CheckPointer(pEmit));
+ PRECONDITION(CheckPointer(pAssembly));
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(CheckPointer(pNotify));
+ PRECONDITION(CheckPointer(ppImporter));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+
+ // Retrieve flag indicating whether runtime or linktime interface
+ // security checks are required.
+ BOOL bUnsafeInterfaces = (BOOL)(flags & TlbImporter_UnsafeInterfaces);
+
+ // Determine if we import SAFEARRAY's as System.Array's.
+ BOOL bSafeArrayAsSysArray = (BOOL)(flags & TlbImporter_SafeArrayAsSystemArray);
+
+ // Determine if we are doing the [out,retval] transformation on disp only interfaces.
+ BOOL bTransformDispRetVals = (BOOL)(flags & TlbImporter_TransformDispRetVals);
+
+ // Determine if we are adding members to classes.
+ BOOL bPreventClassMembers = (BOOL)(flags & TlbImporter_PreventClassMembers);
+
+ // Determine if we are marking value classes as serializable
+ BOOL bSerializableValueClasses = (BOOL)(flags & TlbImporter_SerializableValueClasses);
+
+ // Create and initialize a TypeLib importer.
+ NewPreempHolder<CImportTlb> pImporter = CImportTlb::CreateImporter(szFname, pITLB, true, bUnsafeInterfaces, bSafeArrayAsSysArray, bTransformDispRetVals, bPreventClassMembers, bSerializableValueClasses);
+ if (!pImporter)
+ COMPlusThrowOM();
+
+ // If a namespace is specified, use it.
+ if (szNamespace)
+ pImporter->SetNamespace(szNamespace);
+
+ // Set the various pointers.
+ hr = pImporter->SetMetaData(pEmit);
+ _ASSERTE(SUCCEEDED(hr) && "Couldn't get IMetaDataEmit* from Module");
+ if (FAILED(hr))
+ COMPlusThrowArgumentNull(W("pEmit"));
+
+ pImporter->SetNotification(pNotify);
+ pImporter->SetAssembly(pAssembly);
+ pImporter->SetModule(pModule);
+
+ // Do the conversion.
+ hr = pImporter->Import();
+ if (SUCCEEDED(hr))
+ {
+ *ppImporter = pImporter;
+ pImporter.SuppressRelease();
+ }
+ else
+ {
+ COMPlusThrowHR(hr, kGetErrorInfo);
+ }
+} // HRESULT COMTypeLibConverter::TypeLibImporterWrapper()
+
+
+void COMTypeLibConverter::ConvertAssemblyToTypeLibInternal(OBJECTREF* ppAssembly,
+ STRINGREF* ppTypeLibName,
+ DWORD Flags,
+ OBJECTREF* ppNotifySink,
+ OBJECTREF* pRetObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION (IsProtectedByGCFrame (ppAssembly));
+ PRECONDITION (IsProtectedByGCFrame (ppTypeLibName));
+ PRECONDITION (IsProtectedByGCFrame (ppNotifySink));
+ PRECONDITION (IsProtectedByGCFrame (pRetObj));
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ Assembly *pAssembly=0; // Assembly to export.
+
+ NewArrayHolder<WCHAR> szTypeLibName=0; // The name for the typelib.
+ SafeComHolder<ITypeLib> pTLB=0; // The new typelib.
+ SafeComHolder<ITypeLibExporterNotifySink> pINotify=0; // Callback parameter.
+
+ // Make sure the COMTypeLibConverter has been initialized.
+ if (!m_bInitialized)
+ Init();
+
+ // Validate flags
+ if ((Flags & ~TlbExporter_ValidFlags) != 0)
+ COMPlusThrowArgumentOutOfRange(W("flags"), W("Argument_InvalidFlag"));
+
+ // Retrieve the callback.
+ if (*ppNotifySink == NULL)
+ COMPlusThrowArgumentNull(W("notifySink"));
+
+ pINotify = (ITypeLibExporterNotifySink*)GetComIPFromObjectRef(ppNotifySink, MscorlibBinder::GetClass(CLASS__ITYPE_LIB_EXPORTER_NOTIFY_SINK));
+ if (!pINotify)
+ COMPlusThrow(kArgumentException, W("Arg_NoImporterCallback"));
+
+ // If a name was specified then copy it to a temporary string.
+ if (*ppTypeLibName != NULL)
+ {
+ int TypeLibNameLen = (*ppTypeLibName)->GetStringLength();
+ szTypeLibName = new WCHAR[TypeLibNameLen + 1];
+ memcpyNoGCRefs(szTypeLibName, (*ppTypeLibName)->GetBuffer(), TypeLibNameLen * sizeof(WCHAR));
+ szTypeLibName[TypeLibNameLen] = 0;
+ }
+
+ // Retrieve the assembly from the AssemblyBuilder argument.
+ if (*ppAssembly == NULL)
+ COMPlusThrowNonLocalized(kArgumentNullException, W("assembly"));
+
+ pAssembly = ((ASSEMBLYREF)*ppAssembly)->GetAssembly();
+ _ASSERTE(pAssembly);
+
+ if (IsAfContentType_WindowsRuntime(pAssembly->GetFlags()))
+ COMPlusThrow(kArgumentException, W("Argument_AssemblyWinMD"));
+
+ {
+ GCX_PREEMP();
+ ExportTypeLibFromLoadedAssembly(pAssembly, szTypeLibName, &pTLB, pINotify, Flags);
+ }
+
+ // Make sure we got a typelib back.
+ _ASSERTE(pTLB);
+
+ // Convert the ITypeLib interface pointer to a COM+ object.
+ GetObjectRefFromComIP(pRetObj, pTLB, NULL);
+}
+
+// static
+void COMTypeLibConverter::LoadType(
+ Module * pModule,
+ mdTypeDef cl,
+ TlbImporterFlags Flags)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ OBJECTREF pThrowable = NULL;
+
+ GCPROTECT_BEGIN(pThrowable)
+ {
+ EX_TRY
+ {
+ // Load the EE class that represents the type, so that
+ // the TypeDefToMethodTable rid map contains this entry
+ // (They were going to be loaded, anyway, to generate comtypes)
+ TypeHandle typeHnd;
+ typeHnd = ClassLoader::LoadTypeDefThrowing(pModule, cl,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef);
+ }
+ EX_CATCH
+ {
+ pThrowable = GET_THROWABLE();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (pThrowable != NULL)
+ {
+ // Only spit out a special message if PreventClassMembers is set.
+ if ((Flags & TlbImporter_PreventClassMembers) == 0)
+ {
+ struct _gc
+ {
+ OBJECTREF pInnerException;
+ OBJECTREF pThrowable;
+ STRINGREF pMsg;
+ } gc;
+
+ gc.pInnerException = NULL;
+ gc.pThrowable = NULL;
+ gc.pMsg = NULL;
+
+ GCPROTECT_BEGIN(gc);
+ {
+ MethodTable* pMT = MscorlibBinder::GetException(kSystemException);
+
+ gc.pThrowable = AllocateObject(pMT);
+ gc.pInnerException = pThrowable;
+ ResMgrGetString(W("Arg_ImporterLoadFailure"), &gc.pMsg);
+
+ MethodDescCallSite exceptionCtor(METHOD__SYSTEM_EXCEPTION__STR_EX_CTOR, &gc.pThrowable);
+
+ ARG_SLOT args[] = { ObjToArgSlot(gc.pThrowable),
+ ObjToArgSlot(gc.pMsg),
+ ObjToArgSlot(gc.pInnerException) };
+
+ exceptionCtor.Call(args);
+
+ COMPlusThrow(gc.pThrowable);
+ }
+ GCPROTECT_END();
+ }
+
+ COMPlusThrow(pThrowable);
+ }
+ }
+ GCPROTECT_END();
+}
+
+void COMTypeLibConverter::ConvertTypeLibToMetadataInternal(OBJECTREF* ppTypeLib,
+ OBJECTREF* ppAsmBldr,
+ OBJECTREF* ppModBldr,
+ STRINGREF* ppNamespace,
+ TlbImporterFlags Flags,
+ OBJECTREF* ppNotifySink,
+ OBJECTREF* pEventItfInfoList)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(IsProtectedByGCFrame (ppTypeLib));
+ PRECONDITION(IsProtectedByGCFrame (ppAsmBldr));
+ PRECONDITION(IsProtectedByGCFrame (ppModBldr));
+ PRECONDITION(IsProtectedByGCFrame (ppNamespace));
+ PRECONDITION(IsProtectedByGCFrame (ppNotifySink));
+ PRECONDITION(IsProtectedByGCFrame (pEventItfInfoList));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ Module *pModule = NULL; // ModuleBuilder parameter.
+ Assembly *pAssembly = NULL; // AssemblyBuilder parameter.
+ REFLECTMODULEBASEREF pReflect = NULL; // ReflectModule passed as param.
+ int cTypeDefs; // Count of imported TypeDefs.
+ int i; // Loop control.
+ mdTypeDef cl; // An imported TypeDef.
+
+ NewArrayHolder<WCHAR> szNamespace = NULL; // The namespace to put the type in.
+ NewPreempHolder<CImportTlb> pImporter = NULL; // The importer used to import the typelib.
+ SafeComHolder<ITypeLib> pTLB = NULL; // TypeLib parameter.
+ SafeComHolder<ITypeLibImporterNotifySink> pINotify = NULL; // Callback parameter.
+
+ // Make sure the COMTypeLibConverter has been initialized.
+ if (!m_bInitialized)
+ Init();
+
+ // Validate the flags.
+ if ((Flags & ~TlbImporter_ValidFlags) != 0)
+ COMPlusThrowArgumentOutOfRange(W("flags"), W("Argument_InvalidFlag"));
+
+ // Retrieve the callback.
+ MethodTable * pSinkMT = MscorlibBinder::GetClass(CLASS__ITYPE_LIB_IMPORTER_NOTIFY_SINK);
+ pINotify = (ITypeLibImporterNotifySink*)GetComIPFromObjectRef(ppNotifySink, pSinkMT);
+ if (!pINotify)
+ COMPlusThrow(kArgumentException, W("Arg_NoImporterCallback"));
+
+ pReflect = (REFLECTMODULEBASEREF) *ppModBldr;
+ _ASSERTE(pReflect);
+
+
+ pModule = pReflect->GetModule();
+ _ASSERTE(pModule);
+
+ // Suppress capturing while we dispatch events. This is a performance optimization to avoid
+ // re-serializing metadata between each type. Instead, we suppress serialization while we bake all
+ // the types and then re-enable it at the end (when this holder goes out of scope).
+ _ASSERTE(pModule->IsReflection());
+ ReflectionModule::SuppressMetadataCaptureHolder holderCapture(pModule->GetReflectionModule());
+
+
+ // Retrieve the assembly from the AssemblyBuilder argument.
+ pAssembly = ((ASSEMBLYREF)*ppAsmBldr)->GetAssembly();
+ _ASSERTE(pAssembly);
+
+ // Retrieve a pointer to the ITypeLib interface.
+ pTLB = (ITypeLib*)GetComIPFromObjectRef(ppTypeLib, IID_ITypeLib);
+ if (!pTLB)
+ COMPlusThrow(kArgumentException, W("Arg_NoITypeLib"));
+
+ // If a namespace was specified then copy it to a temporary string.
+ if (*ppNamespace != NULL)
+ {
+ int NamespaceLen = (*ppNamespace)->GetStringLength();
+ szNamespace = new WCHAR[NamespaceLen + 1];
+ memcpyNoGCRefs(szNamespace, (*ppNamespace)->GetBuffer(), NamespaceLen * sizeof(WCHAR));
+ szNamespace[NamespaceLen] = 0;
+ }
+
+ // Switch to preemptive GC before we call out to COM.
+ {
+ GCX_PREEMP();
+
+ // Have to wrap the CImportTlb object in a call, because it has a destructor.
+ TypeLibImporterWrapper(pTLB, NULL /*filename*/, szNamespace,
+ pModule->GetEmitter(), pAssembly, pModule, pINotify,
+ Flags, &pImporter);
+ }
+
+ // Enumerate the types imported from the typelib, and add them to the assembly's available type table.
+ IMDInternalImport* pInternalImport = pModule->GetMDImport();
+ HENUMTypeDefInternalHolder hEnum(pInternalImport);
+
+ hEnum.EnumTypeDefInit();
+ cTypeDefs = pInternalImport->EnumTypeDefGetCount(&hEnum);
+
+ for (i=0; i<cTypeDefs; ++i)
+ {
+ BOOL success = pInternalImport->EnumTypeDefNext(&hEnum, &cl);
+ _ASSERTE(success);
+
+ pAssembly->AddType(pModule, cl);
+ }
+
+ // Allocate an empty array
+ CreateItfInfoList(pEventItfInfoList);
+
+#ifdef _DEBUG
+ if (!g_pConfig->TlbImpSkipLoading())
+ {
+#endif // _DEBUG
+ pInternalImport->EnumReset(&hEnum);
+ for (i=0; i<cTypeDefs; ++i)
+ {
+ BOOL success = pInternalImport->EnumTypeDefNext(&hEnum, &cl);
+ _ASSERTE(success);
+
+ LoadType(pModule, cl, Flags);
+ }
+
+ // Retrieve the event interface list.
+ GetEventItfInfoList(pImporter, pAssembly, pEventItfInfoList);
+#ifdef _DEBUG
+ }
+#endif // _DEBUG
+}
+
+void COMTypeLibConverter::CreateItfInfoList(OBJECTREF* pEventItfInfoList)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(IsProtectedByGCFrame (pEventItfInfoList));
+ }
+ CONTRACTL_END;
+
+ // Allocate the array list that will contain the event sources.
+ SetObjectReference(pEventItfInfoList,
+ AllocateObject(MscorlibBinder::GetClass(CLASS__ARRAY_LIST)),
+ SystemDomain::GetCurrentDomain());
+
+ MethodDescCallSite ctor(METHOD__ARRAY_LIST__CTOR, pEventItfInfoList);
+
+ // Call the ArrayList constructor.
+ ARG_SLOT CtorArgs[] =
+ {
+ ObjToArgSlot(*pEventItfInfoList)
+ };
+ ctor.Call(CtorArgs);
+}
+
+//*****************************************************************************
+//*****************************************************************************
+void COMTypeLibConverter::GetEventItfInfoList(CImportTlb *pImporter, Assembly *pAssembly, OBJECTREF *pEventItfInfoList)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pImporter));
+ PRECONDITION(CheckPointer(pAssembly));
+ PRECONDITION(IsProtectedByGCFrame (pEventItfInfoList));
+ }
+ CONTRACTL_END;
+
+ UINT i;
+ CQuickArray<ImpTlbEventInfo*> qbEvInfoList;
+
+ // Retrieve the list of event interfaces.
+ pImporter->GetEventInfoList(qbEvInfoList);
+
+ // Iterate over TypeInfos.
+ for (i = 0; i < qbEvInfoList.Size(); i++)
+ {
+ // Retrieve the Add method desc for the ArrayList.
+ MethodDescCallSite addMeth(METHOD__ARRAY_LIST__ADD, pEventItfInfoList);
+
+ // Retrieve the event interface info for the current CoClass.
+ OBJECTREF EventItfInfoObj = GetEventItfInfo(pAssembly, qbEvInfoList[i]);
+ _ASSERTE(EventItfInfoObj);
+
+ // Add the event interface info to the list.
+ ARG_SLOT AddArgs[] = {
+ ObjToArgSlot(*pEventItfInfoList),
+ ObjToArgSlot(EventItfInfoObj)
+ };
+ addMeth.Call(AddArgs);
+ }
+} // LPVOID COMTypeLibConverter::GetTypeLibEventSourceList()
+
+//*****************************************************************************
+// Initialize the COMTypeLibConverter.
+//*****************************************************************************
+void COMTypeLibConverter::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Ensure COM is started up.
+ EnsureComStarted();
+
+ // Set the initialized flag to TRUE.
+ m_bInitialized = TRUE;
+} // void COMTypeLibConverter::Init()
+
+//*****************************************************************************
+// Given an imported class in an assembly, generate a list of event sources.
+//*****************************************************************************
+OBJECTREF COMTypeLibConverter::GetEventItfInfo(Assembly *pAssembly, ImpTlbEventInfo *pImpTlbEventInfo)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pAssembly));
+ PRECONDITION(CheckPointer(pImpTlbEventInfo));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF RetObj = NULL;
+
+ struct _gc
+ {
+ OBJECTREF EventItfInfoObj;
+ STRINGREF EventItfNameStrObj;
+ STRINGREF SrcItfNameStrObj;
+ STRINGREF EventProvNameStrObj;
+ OBJECTREF AssemblyObj;
+ OBJECTREF SrcItfAssemblyObj;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc)
+ {
+ // Create the EventSource object.
+ gc.EventItfInfoObj = AllocateObject(MscorlibBinder::GetClass(CLASS__TCE_EVENT_ITF_INFO));
+
+ // Retrieve the assembly object.
+ gc.AssemblyObj = pAssembly->GetExposedObject();
+
+ // Retrieve the source interface assembly object (may be the same assembly).
+ gc.SrcItfAssemblyObj = pImpTlbEventInfo->SrcItfAssembly->GetExposedObject();
+
+ // Prepare the constructor arguments.
+ gc.EventItfNameStrObj = StringObject::NewString(pImpTlbEventInfo->szEventItfName);
+ gc.SrcItfNameStrObj = StringObject::NewString(pImpTlbEventInfo->szSrcItfName);
+ gc.EventProvNameStrObj = StringObject::NewString(pImpTlbEventInfo->szEventProviderName);
+
+ MethodDescCallSite ctor(METHOD__TCE_EVENT_ITF_INFO__CTOR, &gc.EventItfInfoObj);
+
+ // Call the EventItfInfo constructor.
+ ARG_SLOT CtorArgs[] = {
+ ObjToArgSlot(gc.EventItfInfoObj),
+ ObjToArgSlot(gc.EventItfNameStrObj),
+ ObjToArgSlot(gc.SrcItfNameStrObj),
+ ObjToArgSlot(gc.EventProvNameStrObj),
+ ObjToArgSlot(gc.AssemblyObj),
+ ObjToArgSlot(gc.SrcItfAssemblyObj),
+ };
+ ctor.Call(CtorArgs);
+
+ RetObj = gc.EventItfInfoObj;
+ }
+ GCPROTECT_END();
+
+ return RetObj;
+} // OBJECTREF COMTypeLibConverter::GetEventSourceInfo()
+
+//*****************************************************************************
+// Given the string persisted from a TypeLib export, recreate the assembly
+// reference.
+//*****************************************************************************
+mdAssemblyRef DefineAssemblyRefForExportedAssembly(
+ LPCWSTR pszFullName, // Full name of the assembly.
+ IUnknown *pIMeta) // Metadata emit interface.
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pszFullName));
+ PRECONDITION(CheckPointer(pIMeta));
+ }
+ CONTRACTL_END;
+
+ mdAssemblyRef ar=0;
+ HRESULT hr; // A result.
+ AssemblySpec spec; // "Name" of assembly.
+ CQuickArray<char> rBuf;
+ int iLen;
+ SafeComHolder<IMetaDataAssemblyEmit> pMeta=0; // Emit interface.
+
+ iLen = WszWideCharToMultiByte(CP_ACP,0, pszFullName,-1, 0,0, 0,0);
+ IfFailGo(rBuf.ReSizeNoThrow(iLen+1));
+ WszWideCharToMultiByte(CP_ACP,0, pszFullName,-1, rBuf.Ptr(),iLen+1, 0,0);
+
+ // Restore the AssemblySpec data.
+ IfFailGo(spec.Init(rBuf.Ptr()));
+
+ // Make sure we have the correct pointer type.
+ IfFailGo(SafeQueryInterface(pIMeta, IID_IMetaDataAssemblyEmit, (IUnknown**)&pMeta));
+
+ // Create the assemblyref token.
+ IfFailGo(spec.EmitToken(pMeta, &ar));
+
+ErrExit:
+ return ar;
+} // mdAssemblyRef DefineAssemblyRefForExportedAssembly()
+
+//*****************************************************************************
+// Public helper function used by typelib converter to create AssemblyRef
+// for a referenced typelib.
+//*****************************************************************************
+extern mdAssemblyRef DefineAssemblyRefForImportedTypeLib(
+ void *pvAssembly, // Assembly importing the typelib.
+ void *pvModule, // Module importing the typelib.
+ IUnknown *pIMeta, // IMetaData* from import module.
+ IUnknown *pIUnk, // IUnknown to referenced Assembly.
+ BSTR *pwzNamespace, // The namespace of the resolved assembly.
+ BSTR *pwzAsmName, // The name of the resolved assembly.
+ Assembly **ppAssemblyRef) // The resolved assembly.
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pvAssembly));
+ PRECONDITION(CheckPointer(pvModule));
+ PRECONDITION(CheckPointer(pIMeta));
+ PRECONDITION(CheckPointer(pIUnk));
+ PRECONDITION(CheckPointer(pwzNamespace));
+ PRECONDITION(CheckPointer(pwzAsmName));
+ PRECONDITION(CheckPointer(ppAssemblyRef, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // This is a workaround to allow an untyped param. To really fix, move imptlb to this project,
+ // and out of the metadata project. Once here, imptlb can just reference any of
+ // the .h files in this project.
+ Assembly* pAssembly = reinterpret_cast<Assembly*>(pvAssembly);
+ Module* pTypeModule = reinterpret_cast<Module*>(pvModule);
+ HRESULT hr;
+ Assembly* pRefdAssembly = NULL;
+ IMetaDataEmit* pEmitter = NULL;
+ MethodTable* pAssemblyClass = NULL;
+ mdAssemblyRef ar = mdAssemblyRefNil;
+ Module* pManifestModule = NULL;
+ mdTypeDef td = 0;
+ LPCSTR szName = NULL;
+ LPCSTR szNamespace = NULL;
+ CQuickBytes qb;
+ WCHAR* wszBuff = (WCHAR*) qb.AllocThrows((MAX_CLASSNAME_LENGTH+1) * sizeof(WCHAR));
+ SString szRefdAssemblyName;
+ IMDInternalImport* pRefdMDImport = NULL;
+ SafeComHolder<IMetaDataAssemblyEmit> pAssemEmitter = NULL;
+
+ GCX_COOP();
+
+ // Initialize the output strings to NULL.
+ *pwzNamespace = NULL;
+ *pwzAsmName = NULL;
+ BSTRHolder local_pwzNamespace = NULL;
+ BSTRHolder local_pwzAsmName = NULL;
+
+ // Get the Referenced Assembly object from the IUnknown.
+ PREFIX_ASSUME(pIUnk != NULL);
+ ASSEMBLYREF RefdAsmObj = NULL;
+ GCPROTECT_BEGIN(RefdAsmObj);
+ GetObjectRefFromComIP((OBJECTREF*)&RefdAsmObj, pIUnk, pAssemblyClass);
+ PREFIX_ASSUME(RefdAsmObj != NULL);
+
+ // Get the internal assembly from the assembly object.
+ pRefdAssembly = RefdAsmObj->GetAssembly();
+ GCPROTECT_END();
+ PREFIX_ASSUME(pRefdAssembly != NULL);
+
+ // Return the assembly if asked for
+ if (ppAssemblyRef)
+ *ppAssemblyRef = pRefdAssembly;
+
+ // Get the manifest module for the importing and the referenced assembly.
+ pManifestModule = pAssembly->GetManifestModule();
+
+ // Define the AssemblyRef in the global assembly.
+ pEmitter = pManifestModule->GetEmitter();
+ _ASSERTE(pEmitter);
+ IfFailGo(SafeQueryInterface(pEmitter, IID_IMetaDataAssemblyEmit, (IUnknown**) &pAssemEmitter));
+ ar = pAssembly->AddAssemblyRef(pRefdAssembly, pAssemEmitter);
+ pAssemEmitter.Release();
+
+ // Add the assembly ref token and the manifest module it is referring to the manifest module's rid map.
+ pManifestModule->StoreAssemblyRef(ar, pRefdAssembly);
+
+ // Add assembly ref in module manifest.
+ IfFailGo(SafeQueryInterface(pIMeta, IID_IMetaDataAssemblyEmit, (IUnknown**) &pAssemEmitter));
+ ar = pAssembly->AddAssemblyRef(pRefdAssembly, pAssemEmitter);
+
+ // Add the assembly ref token and the manifest module it is referring to the rid map of the module we are
+ // emiting into.
+ pTypeModule->StoreAssemblyRef(ar, pRefdAssembly);
+
+ // Retrieve the first typedef in the assembly.
+ {
+ ModuleIterator i = pRefdAssembly->IterateModules();
+ Module *pRefdModule = NULL;
+
+ while (i.Next())
+ {
+ pRefdModule = i.GetModule();
+ pRefdMDImport = pRefdModule->GetMDImport();
+ HENUMTypeDefInternalHolder hTDEnum(pRefdMDImport);
+
+ IfFailGo(hTDEnum.EnumTypeDefInitNoThrow());
+
+ if (pRefdMDImport->EnumTypeDefNext(&hTDEnum, &td) == true)
+ {
+ IfFailGo(pRefdMDImport->GetNameOfTypeDef(td, &szName, &szNamespace));
+ break;
+ }
+ }
+ }
+
+ // DefineAssemblyRefForImportedTypeLib should never be called for assemblies that
+ // do not contain any types so we better have found one.
+ _ASSERTE(szNamespace);
+
+ // Give the namespace back to the caller.
+ WszMultiByteToWideChar(CP_UTF8,0, szNamespace, -1, wszBuff, MAX_CLASSNAME_LENGTH);
+ local_pwzNamespace = SysAllocString(wszBuff);
+ IfNullGo(local_pwzNamespace);
+
+ // Give the assembly name back to the caller.
+ pRefdAssembly->GetDisplayName(szRefdAssemblyName);
+ local_pwzAsmName = SysAllocString(szRefdAssemblyName);
+ IfNullGo(local_pwzAsmName);
+
+ErrExit:
+ if (FAILED(hr))
+ {
+ ar = mdAssemblyRefNil;
+ }
+ else
+ {
+ local_pwzNamespace.SuppressRelease();
+ local_pwzAsmName.SuppressRelease();
+ *pwzNamespace = local_pwzNamespace;
+ *pwzAsmName = local_pwzAsmName;
+ }
+
+ return ar;
+} // mdAssemblyRef DefineAssemblyRefForImportedTypeLib()
+
+
+
+//*****************************************************************************
+// A typelib exporter.
+//*****************************************************************************
+FCIMPL4(Object*, COMTypeLibConverter::ConvertAssemblyToTypeLib, Object* AssemblyUNSAFE, StringObject* TypeLibNameUNSAFE, DWORD Flags, Object* NotifySinkUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF RetObj = NULL;
+ struct _gc
+ {
+ OBJECTREF Assembly;
+ STRINGREF TypeLibName;
+ OBJECTREF NotifySink;
+ OBJECTREF RetObj;
+ } gc;
+
+ gc.Assembly = (OBJECTREF) AssemblyUNSAFE;
+ gc.TypeLibName = (STRINGREF) TypeLibNameUNSAFE;
+ gc.NotifySink = (OBJECTREF) NotifySinkUNSAFE;
+ gc.RetObj = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ ConvertAssemblyToTypeLibInternal(&gc.Assembly, &gc.TypeLibName, Flags, &gc.NotifySink, &gc.RetObj);
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(gc.RetObj);
+} // LPVOID COMTypeLibConverter::ConvertAssemblyToTypeLib()
+FCIMPLEND
+
+//*****************************************************************************
+// Import a typelib as metadata. Doesn't add TCE adapters.
+//*****************************************************************************
+FCIMPL7(void, COMTypeLibConverter::ConvertTypeLibToMetadata, Object* TypeLibUNSAFE, Object* AsmBldrUNSAFE, Object* ModBldrUNSAFE, StringObject* NamespaceUNSAFE, TlbImporterFlags Flags, Object* NotifySinkUNSAFE, OBJECTREF* pEventItfInfoList)
+{
+ FCALL_CONTRACT;
+
+ struct _gc
+ {
+ OBJECTREF TypeLib;
+ OBJECTREF AsmBldr;
+ OBJECTREF ModBldr;
+ STRINGREF Namespace;
+ OBJECTREF NotifySink;
+ } gc;
+
+ gc.TypeLib = (OBJECTREF) TypeLibUNSAFE;
+ gc.AsmBldr = (OBJECTREF) AsmBldrUNSAFE;
+ gc.ModBldr = (OBJECTREF) ModBldrUNSAFE;
+ gc.Namespace = (STRINGREF) NamespaceUNSAFE;
+ gc.NotifySink = (OBJECTREF) NotifySinkUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_PROTECT(gc);
+
+ ASSUME_BYREF_FROM_JIT_STACK_BEGIN(pEventItfInfoList);
+ ConvertTypeLibToMetadataInternal(&gc.TypeLib, &gc.AsmBldr, &gc.ModBldr, &gc.Namespace, Flags, &gc.NotifySink, pEventItfInfoList);
+ ASSUME_BYREF_FROM_JIT_STACK_END();
+
+ HELPER_METHOD_FRAME_END();
+} // void COMTypeLibConverter::ConvertTypeLibToMetadata()
+FCIMPLEND
diff --git a/src/vm/comtypelibconverter.h b/src/vm/comtypelibconverter.h
new file mode 100644
index 0000000000..641b9735bd
--- /dev/null
+++ b/src/vm/comtypelibconverter.h
@@ -0,0 +1,108 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: COMTypeLibConverter.h
+**
+**
+** Purpose: Definition of the native methods used by the
+** typelib converter.
+**
+**
+===========================================================*/
+
+#ifndef _COMTYPELIBCONVERTER_H
+#define _COMTYPELIBCONVERTER_H
+
+#ifndef FEATURE_COMINTEROP
+#error FEATURE_COMINTEROP is required for this file
+#endif // FEATURE_COMINTEROP
+#ifndef FEATURE_COMINTEROP_TLB_SUPPORT
+#error FEATURE_COMINTEROP_TLB_SUPPORT is required for this file
+#endif // FEATURE_COMINTEROP
+
+#include "vars.hpp"
+
+struct ITypeLibImporterNotifySink;
+class ImpTlbEventInfo;
+
+
+enum TlbImporterFlags
+{
+ TlbImporter_PrimaryInteropAssembly = 0x00000001, // Generate a PIA.
+ TlbImporter_UnsafeInterfaces = 0x00000002, // Generate unsafe interfaces.
+ TlbImporter_SafeArrayAsSystemArray = 0x00000004, // Safe array import control.
+ TlbImporter_TransformDispRetVals = 0x00000008, // Disp only itf [out, retval] transformation.
+ TlbImporter_PreventClassMembers = 0x00000010, // Prevent adding members to class.
+ TlbImporter_SerializableValueClasses = 0x00000020, // Mark value classes as serializable.
+ TlbImporter_ImportAsX86 = 0x00000100, // Import to a 32-bit assembly
+ TlbImporter_ImportAsX64 = 0x00000200, // Import to an x64 assembly
+ TlbImporter_ImportAsItanium = 0x00000400, // Import to an itanium assembly
+ TlbImporter_ImportAsAgnostic = 0x00000800, // Import to an agnostic assembly
+ TlbImporter_ReflectionOnlyLoading = 0x00001000, // Use ReflectionOnly loading.
+ TlbImporter_NoDefineVersionResource = 0x00002000, // Don't call AssemblyBuilder.DefineVersionResource
+ TlbImporter_ImportAsArm = 0x00004000, // Import to an ARM assembly
+ TlbImporter_ValidFlags = TlbImporter_PrimaryInteropAssembly |
+ TlbImporter_UnsafeInterfaces |
+ TlbImporter_SafeArrayAsSystemArray |
+ TlbImporter_TransformDispRetVals |
+ TlbImporter_PreventClassMembers |
+ TlbImporter_SerializableValueClasses |
+ TlbImporter_ImportAsX86 |
+ TlbImporter_ImportAsX64 |
+ TlbImporter_ImportAsItanium |
+ TlbImporter_ImportAsAgnostic |
+ TlbImporter_ReflectionOnlyLoading |
+ TlbImporter_NoDefineVersionResource |
+ TlbImporter_ImportAsArm
+};
+
+// Note that the second hex digit is reserved
+enum TlbExporterFlags
+{
+ TlbExporter_OnlyReferenceRegistered = 0x00000001, // Only reference an external typelib if it is registered.
+ TlbExporter_CallerResolvedReferences = 0x00000002, // Always allow caller to resolve typelib references first
+ TlbExporter_OldNames = 0x00000004, // Do not ignore non COM visible types when doing name decoration.
+// TlbExporter_Unused = 0x00000008, // This is currently unused - feel free to use this for another switch
+ TlbExporter_ExportAs32Bit = 0x00000010, // Export the type library using 32-bit semantics
+ TlbExporter_ExportAs64Bit = 0x00000020, // Export the type library using 64-bit semantics
+// TlbExporter_Reserved = 0x00000040, // Do not use this
+// TlbExporter_Reserved = 0x00000080, // Do not use this
+ TlbExporter_ValidFlags = TlbExporter_OnlyReferenceRegistered |
+ TlbExporter_CallerResolvedReferences |
+ TlbExporter_OldNames |
+ TlbExporter_ExportAs32Bit |
+ TlbExporter_ExportAs64Bit
+};
+
+#define TlbExportAsMask 0x000000F0
+#define TlbExportAs32Bit(x) ((TlbExportAsMask & x) == TlbExporter_ExportAs32Bit)
+#define TlbExportAs64Bit(x) ((TlbExportAsMask & x) == TlbExporter_ExportAs64Bit)
+#define TlbExportAsDefault(x) ((!TlbExportAs32Bit(x)) && (!TlbExportAs64Bit(x)))
+
+class COMTypeLibConverter
+{
+public:
+ static FCDECL4(Object*, ConvertAssemblyToTypeLib, Object* AssemblyUNSAFE, StringObject* TypeLibNameUNSAFE, DWORD Flags, Object* NotifySinkUNSAFE);
+ static FCDECL7(void, ConvertTypeLibToMetadata, Object* TypeLibUNSAFE, Object* AsmBldrUNSAFE, Object* ModBldrUNSAFE, StringObject* NamespaceUNSAFE, TlbImporterFlags Flags, Object* NotifySinkUNSAFE, OBJECTREF* pEventItfInfoList);
+
+private:
+ static void Init();
+ static void CreateItfInfoList(OBJECTREF* pEventItfInfoList);
+ static void GetEventItfInfoList(CImportTlb *pImporter, Assembly *pAssembly, OBJECTREF *pEventItfInfoList);
+ static OBJECTREF GetEventItfInfo(Assembly *pAssembly, ImpTlbEventInfo *pImpTlbEventInfo);
+ static void TypeLibImporterWrapper(ITypeLib *pITLB, LPCWSTR szFname, LPCWSTR szNamespace, IMetaDataEmit *pEmit, Assembly *pAssembly, Module *pModule, ITypeLibImporterNotifySink *pNotify, TlbImporterFlags flags, CImportTlb **ppImporter);
+
+ static void ConvertAssemblyToTypeLibInternal(OBJECTREF* ppAssembly, STRINGREF* ppTypeLibName, DWORD Flags, OBJECTREF* ppNotifySink, OBJECTREF* pRetObj);
+ static void COMTypeLibConverter::LoadType(Module * pModule,
+ mdTypeDef cl,
+ TlbImporterFlags Flags);
+ static void ConvertTypeLibToMetadataInternal(OBJECTREF* ppTypeLib, OBJECTREF* ppAsmBldr, OBJECTREF* ppModBldr, STRINGREF* ppNamespace, TlbImporterFlags Flags, OBJECTREF* ppNotifySink, OBJECTREF* pEventItfInfoList);
+
+ static BOOL m_bInitialized;
+};
+
+#endif // _COMTYPELIBCONVERTER_H
diff --git a/src/vm/comutilnative.cpp b/src/vm/comutilnative.cpp
new file mode 100644
index 0000000000..8c27b2f478
--- /dev/null
+++ b/src/vm/comutilnative.cpp
@@ -0,0 +1,3102 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+/*============================================================
+**
+** File: COMUtilNative
+**
+**
+**
+** Purpose: A dumping ground for classes which aren't large
+** enough to get their own file in the EE.
+**
+**
+**
+===========================================================*/
+#include "common.h"
+#include "object.h"
+#include "excep.h"
+#include "vars.hpp"
+#include "comutilnative.h"
+
+#include "utilcode.h"
+#include "frames.h"
+#include "field.h"
+#include "winwrap.h"
+#include "gc.h"
+#include "fcall.h"
+#include "invokeutil.h"
+#include "eeconfig.h"
+#include "typestring.h"
+#include "sha1.h"
+#include "finalizerthread.h"
+
+#ifdef FEATURE_COMINTEROP
+ #include "comcallablewrapper.h"
+ #include "comcache.h"
+#endif // FEATURE_COMINTEROP
+
+#define STACK_OVERFLOW_MESSAGE W("StackOverflowException")
+
+//These are defined in System.ParseNumbers and should be kept in sync.
+#define PARSE_TREATASUNSIGNED 0x200
+#define PARSE_TREATASI1 0x400
+#define PARSE_TREATASI2 0x800
+#define PARSE_ISTIGHT 0x1000
+#define PARSE_NOSPACE 0x2000
+
+
+//
+//
+// PARSENUMBERS (and helper functions)
+//
+//
+
+/*===================================IsDigit====================================
+**Returns a bool indicating whether the character passed in represents a **
+**digit.
+==============================================================================*/
+bool IsDigit(WCHAR c, int radix, int *result)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(result));
+ }
+ CONTRACTL_END;
+
+ if (IS_DIGIT(c)) {
+ *result = DIGIT_TO_INT(c);
+ }
+ else if (c>='A' && c<='Z') {
+ //+10 is necessary because A is actually 10, etc.
+ *result = c-'A'+10;
+ }
+ else if (c>='a' && c<='z') {
+ //+10 is necessary because a is actually 10, etc.
+ *result = c-'a'+10;
+ }
+ else {
+ *result = -1;
+ }
+
+ if ((*result >=0) && (*result < radix))
+ return true;
+
+ return false;
+}
+
+INT32 wtoi(__in_ecount(length) WCHAR* wstr, DWORD length)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(wstr));
+ PRECONDITION(length >= 0);
+ }
+ CONTRACTL_END;
+
+ DWORD i = 0;
+ int value;
+ INT32 result = 0;
+
+ while ( (i < length) && (IsDigit(wstr[i], 10 ,&value)) ) {
+ //Read all of the digits and convert to a number
+ result = result*10 + value;
+ i++;
+ }
+
+ return result;
+}
+
+INT32 ParseNumbers::GrabInts(const INT32 radix, __in_ecount(length) WCHAR *buffer, const int length, int *i, BOOL isUnsigned)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(buffer));
+ PRECONDITION(CheckPointer(i));
+ PRECONDITION(*i >= 0);
+ PRECONDITION(length >= 0);
+ PRECONDITION( radix==2 || radix==8 || radix==10 || radix==16 );
+ }
+ CONTRACTL_END;
+
+ UINT32 result=0;
+ int value;
+ UINT32 maxVal;
+
+ // Allow all non-decimal numbers to set the sign bit.
+ if (radix==10 && !isUnsigned) {
+ maxVal = (0x7FFFFFFF / 10);
+
+ //Read all of the digits and convert to a number
+ while (*i<length&&(IsDigit(buffer[*i],radix,&value))) {
+ // Check for overflows - this is sufficient & correct.
+ if (result > maxVal || ((INT32)result)<0)
+ COMPlusThrow(kOverflowException, W("Overflow_Int32"));
+ result = result*radix + value;
+ (*i)++;
+ }
+ if ((INT32)result<0 && result!=0x80000000)
+ COMPlusThrow(kOverflowException, W("Overflow_Int32"));
+
+ }
+ else {
+ maxVal = ((UINT32) -1) / radix;
+
+ //Read all of the digits and convert to a number
+ while (*i<length&&(IsDigit(buffer[*i],radix,&value))) {
+ // Check for overflows - this is sufficient & correct.
+ if (result > maxVal)
+ COMPlusThrow(kOverflowException, W("Overflow_UInt32"));
+ // the above check won't cover 4294967296 to 4294967299
+ UINT32 temp = result*radix + value;
+ if( temp < result) { // this means overflow as well
+ COMPlusThrow(kOverflowException, W("Overflow_UInt32"));
+ }
+
+ result = temp;
+ (*i)++;
+ }
+ }
+ return(INT32) result;
+}
+
+INT64 ParseNumbers::GrabLongs(const INT32 radix, __in_ecount(length) WCHAR *buffer, const int length, int *i, BOOL isUnsigned)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(buffer));
+ PRECONDITION(CheckPointer(i));
+ PRECONDITION(*i >= 0);
+ PRECONDITION(length >= 0);
+ }
+ CONTRACTL_END;
+
+ UINT64 result=0;
+ int value;
+ UINT64 maxVal;
+
+ // Allow all non-decimal numbers to set the sign bit.
+ if (radix==10 && !isUnsigned) {
+ maxVal = (UI64(0x7FFFFFFFFFFFFFFF) / 10);
+
+ //Read all of the digits and convert to a number
+ while (*i<length&&(IsDigit(buffer[*i],radix,&value))) {
+ // Check for overflows - this is sufficient & correct.
+ if (result > maxVal || ((INT64)result)<0)
+ COMPlusThrow(kOverflowException, W("Overflow_Int64"));
+ result = result*radix + value;
+ (*i)++;
+ }
+ if ((INT64)result<0 && result!=UI64(0x8000000000000000))
+ COMPlusThrow(kOverflowException, W("Overflow_Int64"));
+
+ }
+ else {
+ maxVal = ((UINT64) -1L) / radix;
+
+ //Read all of the digits and convert to a number
+ while (*i<length&&(IsDigit(buffer[*i],radix,&value))) {
+ // Check for overflows - this is sufficient & correct.
+ if (result > maxVal)
+ COMPlusThrow(kOverflowException, W("Overflow_UInt64"));
+
+ UINT64 temp = result*radix + value;
+ if( temp < result) { // this means overflow as well
+ COMPlusThrow(kOverflowException, W("Overflow_UInt64"));
+ }
+ result = temp;
+
+ (*i)++;
+ }
+ }
+ return(INT64) result;
+}
+
+void EatWhiteSpace(__in_ecount(length) WCHAR *buffer, int length, int *i)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(buffer));
+ PRECONDITION(CheckPointer(i));
+ PRECONDITION(length >= 0);
+ }
+ CONTRACTL_END;
+
+ for (; *i<length && COMCharacter::nativeIsWhiteSpace(buffer[*i]); (*i)++);
+}
+
+FCIMPL5_VII(LPVOID, ParseNumbers::LongToString, INT64 n, INT32 radix, INT32 width, CLR_CHAR paddingChar, INT32 flags)
+{
+ FCALL_CONTRACT;
+
+ LPVOID rv = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ bool isNegative = false;
+ int index=0;
+ int charVal;
+ UINT64 l;
+ INT32 i;
+ INT32 buffLength=0;
+ WCHAR buffer[67];//Longest possible string length for an integer in binary notation with prefix
+
+ if (radix<MinRadix || radix>MaxRadix)
+ COMPlusThrowArgumentException(W("radix"), W("Arg_InvalidBase"));
+
+ //If the number is negative, make it positive and remember the sign.
+ if (n<0) {
+ isNegative=true;
+
+ // For base 10, write out -num, but other bases write out the
+ // 2's complement bit pattern
+ if (10==radix)
+ l = (UINT64)(-n);
+ else
+ l = (UINT64)n;
+ }
+ else {
+ l=(UINT64)n;
+ }
+
+ if (flags&PrintAsI1)
+ l = l&0xFF;
+ else if (flags&PrintAsI2)
+ l = l&0xFFFF;
+ else if (flags&PrintAsI4)
+ l=l&0xFFFFFFFF;
+
+ //Special case the 0.
+ if (0==l) {
+ buffer[0]='0';
+ index=1;
+ }
+ else {
+ //Pull apart the number and put the digits (in reverse order) into the buffer.
+ for (index=0; l>0; l=l/radix, index++) {
+ if ((charVal=(int)(l%radix))<10)
+ buffer[index] = (WCHAR)(charVal + '0');
+ else
+ buffer[index] = (WCHAR)(charVal + 'a' - 10);
+ }
+ }
+
+ //If they want the base, append that to the string (in reverse order)
+ if (radix!=10 && ((flags&PrintBase)!=0)) {
+ if (16==radix) {
+ buffer[index++]='x';
+ buffer[index++]='0';
+ }
+ else if (8==radix) {
+ buffer[index++]='0';
+ }
+ else if ((flags&PrintRadixBase)!=0) {
+ buffer[index++]='#';
+ buffer[index++]=((radix%10)+'0');
+ buffer[index++]=((static_cast<char>(radix)/10)+'0');
+ }
+ }
+
+ if (10==radix) {
+ //If it was negative, append the sign.
+ if (isNegative) {
+ buffer[index++]='-';
+ }
+
+ //else if they requested, add the '+';
+ else if ((flags&PrintSign)!=0) {
+ buffer[index++]='+';
+ }
+
+ //If they requested a leading space, put it on.
+ else if ((flags&PrefixSpace)!=0) {
+ buffer[index++]=' ';
+ }
+ }
+
+ //Figure out the size of our string.
+ if (width<=index)
+ buffLength=index;
+ else
+ buffLength=width;
+
+ STRINGREF Local = StringObject::NewString(buffLength);
+ WCHAR *LocalBuffer = Local->GetBuffer();
+
+ //Put the characters into the String in reverse order
+ //Fill the remaining space -- if there is any --
+ //with the correct padding character.
+ if ((flags&LeftAlign)!=0) {
+ for (i=0; i<index; i++) {
+ LocalBuffer[i]=buffer[index-i-1];
+ }
+ for (;i<buffLength; i++) {
+ LocalBuffer[i]=paddingChar;
+ }
+ }
+ else {
+ for (i=0; i<index; i++) {
+ LocalBuffer[buffLength-i-1]=buffer[i];
+ }
+ for (int j=buffLength-i-1; j>=0; j--) {
+ LocalBuffer[j]=paddingChar;
+ }
+ }
+
+ *((STRINGREF *)&rv)=Local;
+
+ HELPER_METHOD_FRAME_END();
+
+ return rv;
+}
+FCIMPLEND
+
+
+FCIMPL5(LPVOID, ParseNumbers::IntToString, INT32 n, INT32 radix, INT32 width, CLR_CHAR paddingChar, INT32 flags)
+{
+ FCALL_CONTRACT;
+
+ LPVOID rv = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ bool isNegative = false;
+ int index=0;
+ int charVal;
+ int buffLength;
+ int i;
+ UINT32 l;
+ WCHAR buffer[66]; //Longest possible string length for an integer in binary notation with prefix
+
+ if (radix<MinRadix || radix>MaxRadix)
+ COMPlusThrowArgumentException(W("radix"), W("Arg_InvalidBase"));
+
+ //If the number is negative, make it positive and remember the sign.
+ //If the number is MIN_VALUE, this will still be negative, so we'll have to
+ //special case this later.
+ if (n<0) {
+ isNegative=true;
+ // For base 10, write out -num, but other bases write out the
+ // 2's complement bit pattern
+ if (10==radix)
+ l = (UINT32)(-n);
+ else
+ l = (UINT32)n;
+ }
+ else {
+ l=(UINT32)n;
+ }
+
+ //The conversion to a UINT will sign extend the number. In order to ensure
+ //that we only get as many bits as we expect, we chop the number.
+ if (flags&PrintAsI1) {
+ l = l&0xFF;
+ }
+ else if (flags&PrintAsI2) {
+ l = l&0xFFFF;
+ }
+ else if (flags&PrintAsI4) {
+ l=l&0xFFFFFFFF;
+ }
+
+ //Special case the 0.
+ if (0==l) {
+ buffer[0]='0';
+ index=1;
+ }
+ else {
+ do {
+ charVal = l%radix;
+ l=l/radix;
+ if (charVal<10) {
+ buffer[index++] = (WCHAR)(charVal + '0');
+ }
+ else {
+ buffer[index++] = (WCHAR)(charVal + 'a' - 10);
+ }
+ }
+ while (l!=0);
+ }
+
+ //If they want the base, append that to the string (in reverse order)
+ if (radix!=10 && ((flags&PrintBase)!=0)) {
+ if (16==radix) {
+ buffer[index++]='x';
+ buffer[index++]='0';
+ }
+ else if (8==radix) {
+ buffer[index++]='0';
+ }
+ }
+
+ if (10==radix) {
+ //If it was negative, append the sign.
+ if (isNegative) {
+ buffer[index++]='-';
+ }
+
+ //else if they requested, add the '+';
+ else if ((flags&PrintSign)!=0) {
+ buffer[index++]='+';
+ }
+
+ //If they requested a leading space, put it on.
+ else if ((flags&PrefixSpace)!=0) {
+ buffer[index++]=' ';
+ }
+ }
+
+ //Figure out the size of our string.
+ if (width<=index) {
+ buffLength=index;
+ }
+ else {
+ buffLength=width;
+ }
+
+ STRINGREF Local = StringObject::NewString(buffLength);
+ WCHAR *LocalBuffer = Local->GetBuffer();
+
+ //Put the characters into the String in reverse order
+ //Fill the remaining space -- if there is any --
+ //with the correct padding character.
+ if ((flags&LeftAlign)!=0) {
+ for (i=0; i<index; i++) {
+ LocalBuffer[i]=buffer[index-i-1];
+ }
+ for (;i<buffLength; i++) {
+ LocalBuffer[i]=paddingChar;
+ }
+ }
+ else {
+ for (i=0; i<index; i++) {
+ LocalBuffer[buffLength-i-1]=buffer[i];
+ }
+ for (int j=buffLength-i-1; j>=0; j--) {
+ LocalBuffer[j]=paddingChar;
+ }
+ }
+
+ *((STRINGREF *)&rv)=Local;
+
+ HELPER_METHOD_FRAME_END();
+
+ return rv;
+}
+FCIMPLEND
+
+
+/*===================================FixRadix===================================
+**It's possible that we parsed the radix in a base other than 10 by accident.
+**This method will take that number, verify that it only contained valid base 10
+**digits, and then do the conversion to base 10. If it contained invalid digits,
+**they tried to pass us a radix such as 1A, so we throw a FormatException.
+**
+**Args: oldVal: The value that we had actually parsed in some arbitrary base.
+** oldBase: The base in which we actually did the parsing.
+**
+**Returns: oldVal as if it had been parsed as a base-10 number.
+**Exceptions: FormatException if either of the digits in the radix aren't
+** valid base-10 numbers.
+==============================================================================*/
+int FixRadix(int oldVal, int oldBase)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ int firstDigit = (oldVal/oldBase);
+ int secondDigit = (oldVal%oldBase);
+
+ if ((firstDigit>=10) || (secondDigit>=10))
+ COMPlusThrow(kFormatException, W("Format_BadBase"));
+
+ return(firstDigit*10)+secondDigit;
+}
+
+/*=================================StringToLong=================================
+**Action:
+**Returns:
+**Exceptions:
+==============================================================================*/
+FCIMPL4(INT64, ParseNumbers::StringToLong, StringObject * s, INT32 radix, INT32 flags, INT32 *currPos)
+{
+ FCALL_CONTRACT;
+
+ INT64 result = 0;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(s);
+
+ int sign = 1;
+ WCHAR *input;
+ int length;
+ int i;
+ int grabNumbersStart=0;
+ INT32 r;
+
+ _ASSERTE((flags & PARSE_TREATASI1) == 0 && (flags & PARSE_TREATASI2) == 0);
+
+ if (s) {
+ i = currPos ? *currPos : 0;
+
+ //Do some radix checking.
+ //A radix of -1 says to use whatever base is spec'd on the number.
+ //Parse in Base10 until we figure out what the base actually is.
+ r = (-1==radix)?10:radix;
+
+ if (r!=2 && r!=10 && r!=8 && r!=16)
+ COMPlusThrow(kArgumentException, W("Arg_InvalidBase"));
+
+ s->RefInterpretGetStringValuesDangerousForGC(&input, &length);
+
+ if (i<0 || i>=length)
+ COMPlusThrowArgumentOutOfRange(W("startIndex"), W("ArgumentOutOfRange_Index"));
+
+ //Get rid of the whitespace and then check that we've still got some digits to parse.
+ if (!(flags & PARSE_ISTIGHT) && !(flags & PARSE_NOSPACE)) {
+ EatWhiteSpace(input,length,&i);
+ if (i==length)
+ COMPlusThrow(kFormatException, W("Format_EmptyInputString"));
+ }
+
+ //Check for a sign
+ if (input[i]=='-') {
+ if (r != 10)
+ COMPlusThrow(kArgumentException, W("Arg_CannotHaveNegativeValue"));
+
+ if (flags & PARSE_TREATASUNSIGNED)
+ COMPlusThrow(kOverflowException, W("Overflow_NegativeUnsigned"));
+
+ sign = -1;
+ i++;
+ }
+ else if (input[i]=='+') {
+ i++;
+ }
+
+ if ((radix==-1 || radix==16) && (i+1<length) && input[i]=='0') {
+ if (input[i+1]=='x' || input [i+1]=='X') {
+ r=16;
+ i+=2;
+ }
+ }
+
+ grabNumbersStart=i;
+ result = GrabLongs(r,input,length,&i, (flags & PARSE_TREATASUNSIGNED));
+
+ //Check if they passed us a string with no parsable digits.
+ if (i==grabNumbersStart)
+ COMPlusThrow(kFormatException, W("Format_NoParsibleDigits"));
+
+ if (flags & PARSE_ISTIGHT) {
+ //If we've got effluvia left at the end of the string, complain.
+ if (i<length)
+ COMPlusThrow(kFormatException, W("Format_ExtraJunkAtEnd"));
+ }
+
+ //Put the current index back into the correct place.
+ if (currPos != NULL) *currPos = i;
+
+ //Return the value properly signed.
+ if ((UINT64) result==UI64(0x8000000000000000) && sign==1 && r==10 && !(flags & PARSE_TREATASUNSIGNED))
+ COMPlusThrow(kOverflowException, W("Overflow_Int64"));
+
+ if (r == 10)
+ result *= sign;
+ }
+ else {
+ result = 0;
+ }
+
+ HELPER_METHOD_FRAME_END();
+
+ return result;
+}
+FCIMPLEND
+
+FCIMPL4(INT32, ParseNumbers::StringToInt, StringObject * s, INT32 radix, INT32 flags, INT32* currPos)
+{
+ FCALL_CONTRACT;
+
+ INT32 result = 0;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(s);
+
+ int sign = 1;
+ WCHAR *input;
+ int length;
+ int i;
+ int grabNumbersStart=0;
+ INT32 r;
+
+ // TreatAsI1 and TreatAsI2 are mutually exclusive.
+ _ASSERTE(!((flags & PARSE_TREATASI1) != 0 && (flags & PARSE_TREATASI2) != 0));
+
+ if (s) {
+ //They're requied to tell me where to start parsing.
+ i = currPos ? (*currPos) : 0;
+
+ //Do some radix checking.
+ //A radix of -1 says to use whatever base is spec'd on the number.
+ //Parse in Base10 until we figure out what the base actually is.
+ r = (-1==radix)?10:radix;
+
+ if (r!=2 && r!=10 && r!=8 && r!=16)
+ COMPlusThrow(kArgumentException, W("Arg_InvalidBase"));
+
+ s->RefInterpretGetStringValuesDangerousForGC(&input, &length);
+
+ if (i<0 || i>=length)
+ COMPlusThrowArgumentOutOfRange(W("startIndex"), W("ArgumentOutOfRange_Index"));
+
+ //Get rid of the whitespace and then check that we've still got some digits to parse.
+ if (!(flags & PARSE_ISTIGHT) && !(flags & PARSE_NOSPACE)) {
+ EatWhiteSpace(input,length,&i);
+ if (i==length)
+ COMPlusThrow(kFormatException, W("Format_EmptyInputString"));
+ }
+
+ //Check for a sign
+ if (input[i]=='-') {
+ if (r != 10)
+ COMPlusThrow(kArgumentException, W("Arg_CannotHaveNegativeValue"));
+
+ if (flags & PARSE_TREATASUNSIGNED)
+ COMPlusThrow(kOverflowException, W("Overflow_NegativeUnsigned"));
+
+ sign = -1;
+ i++;
+ }
+ else if (input[i]=='+') {
+ i++;
+ }
+
+ //Consume the 0x if we're in an unknown base or in base-16.
+ if ((radix==-1||radix==16) && (i+1<length) && input[i]=='0') {
+ if (input[i+1]=='x' || input [i+1]=='X') {
+ r=16;
+ i+=2;
+ }
+ }
+
+ grabNumbersStart=i;
+ result = GrabInts(r,input,length,&i, (flags & PARSE_TREATASUNSIGNED));
+
+ //Check if they passed us a string with no parsable digits.
+ if (i==grabNumbersStart)
+ COMPlusThrow(kFormatException, W("Format_NoParsibleDigits"));
+
+ if (flags & PARSE_ISTIGHT) {
+ //If we've got effluvia left at the end of the string, complain.
+ if (i<(length))
+ COMPlusThrow(kFormatException, W("Format_ExtraJunkAtEnd"));
+ }
+
+ //Put the current index back into the correct place.
+ if (currPos != NULL) *currPos = i;
+
+ //Return the value properly signed.
+ if (flags & PARSE_TREATASI1) {
+ if ((UINT32)result > 0xFF)
+ COMPlusThrow(kOverflowException, W("Overflow_SByte"));
+
+ // result looks positive when parsed as an I4
+ _ASSERTE(sign==1 || r==10);
+ }
+ else if (flags & PARSE_TREATASI2) {
+ if ((UINT32)result > 0xFFFF)
+ COMPlusThrow(kOverflowException, W("Overflow_Int16"));
+
+ // result looks positive when parsed as an I4
+ _ASSERTE(sign==1 || r==10);
+ }
+ else if ((UINT32) result==0x80000000U && sign==1 && r==10 && !(flags & PARSE_TREATASUNSIGNED)) {
+ COMPlusThrow(kOverflowException, W("Overflow_Int32"));
+ }
+
+ if (r == 10)
+ result *= sign;
+ }
+ else {
+ result = 0;
+ }
+
+ HELPER_METHOD_FRAME_END();
+
+ return result;
+}
+FCIMPLEND
+
+//
+//
+// EXCEPTION NATIVE
+//
+//
+FCIMPL1(FC_BOOL_RET, ExceptionNative::IsImmutableAgileException, Object* pExceptionUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ ASSERT(pExceptionUNSAFE != NULL);
+
+ OBJECTREF pException = (OBJECTREF) pExceptionUNSAFE;
+
+ // The preallocated exception objects may be used from multiple AppDomains
+ // and therefore must remain immutable from the application's perspective.
+ FC_RETURN_BOOL(CLRException::IsPreallocatedExceptionObject(pException));
+}
+FCIMPLEND
+
+FCIMPL1(FC_BOOL_RET, ExceptionNative::IsTransient, INT32 hresult)
+{
+ FCALL_CONTRACT;
+
+ FC_RETURN_BOOL(Exception::IsTransient(hresult));
+}
+FCIMPLEND
+
+#ifndef FEATURE_CORECLR
+
+FCIMPL3(StringObject *, ExceptionNative::StripFileInfo, Object *orefExcepUNSAFE, StringObject *orefStrUNSAFE, CLR_BOOL isRemoteStackTrace)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF orefExcep = ObjectToOBJECTREF(orefExcepUNSAFE);
+ STRINGREF orefStr = (STRINGREF)ObjectToOBJECTREF(orefStrUNSAFE);
+
+ if (orefStr == NULL)
+ {
+ return NULL;
+ }
+
+ HELPER_METHOD_FRAME_BEGIN_RET_2(orefExcep, orefStr);
+
+ if (isRemoteStackTrace)
+ {
+ if (!AppX::IsAppXProcess() && ExceptionTypeOverridesStackTraceGetter(orefExcep->GetMethodTable()))
+ {
+ // In classic processes, the remote stack trace could have been generated using a custom get_StackTrace
+ // override which means that we would not be able to parse is - strip the whole string by returning NULL.
+ orefStr = NULL;
+ }
+ }
+
+ if (orefStr != NULL)
+ {
+ SString stackTrace;
+ orefStr->GetSString(stackTrace);
+
+ StripFileInfoFromStackTrace(stackTrace);
+
+ orefStr = AllocateString(stackTrace);
+ }
+
+ HELPER_METHOD_FRAME_END();
+ return (StringObject *)OBJECTREFToObject(orefStr);
+}
+FCIMPLEND
+
+#endif // !FEATURE_CORECLR
+
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+// This FCall sets a flag against the thread exception state to indicate to
+// IL_Throw and the StackTraceInfo implementation to account for the fact
+// that we have restored a foreign exception dispatch details.
+//
+// Refer to the respective methods for details on how they use this flag.
+FCIMPL0(VOID, ExceptionNative::PrepareForForeignExceptionRaise)
+{
+ FCALL_CONTRACT;
+
+ PTR_ThreadExceptionState pCurTES = GetThread()->GetExceptionState();
+
+ // Set a flag against the TES to indicate this is a foreign exception raise.
+ pCurTES->SetRaisingForeignException();
+}
+FCIMPLEND
+
+// Given an exception object, this method will extract the stacktrace and dynamic method array and set them up for return to the caller.
+FCIMPL3(VOID, ExceptionNative::GetStackTracesDeepCopy, Object* pExceptionObjectUnsafe, Object **pStackTraceUnsafe, Object **pDynamicMethodsUnsafe);
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ ASSERT(pExceptionObjectUnsafe != NULL);
+ ASSERT(pStackTraceUnsafe != NULL);
+ ASSERT(pDynamicMethodsUnsafe != NULL);
+
+ struct _gc
+ {
+ StackTraceArray stackTrace;
+ StackTraceArray stackTraceCopy;
+ EXCEPTIONREF refException;
+ PTRARRAYREF dynamicMethodsArray; // Object array of Managed Resolvers
+ PTRARRAYREF dynamicMethodsArrayCopy; // Copy of the object array of Managed Resolvers
+ };
+ _gc gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ // GC protect the array reference
+ HELPER_METHOD_FRAME_BEGIN_PROTECT(gc);
+
+ // Get the exception object reference
+ gc.refException = (EXCEPTIONREF)(ObjectToOBJECTREF(pExceptionObjectUnsafe));
+
+ // Fetch the stacktrace details from the exception under a lock
+ gc.refException->GetStackTrace(gc.stackTrace, &gc.dynamicMethodsArray);
+
+ bool fHaveStackTrace = false;
+ bool fHaveDynamicMethodArray = false;
+
+ if ((unsigned)gc.stackTrace.Size() > 0)
+ {
+ // Deepcopy the array
+ gc.stackTraceCopy.CopyFrom(gc.stackTrace);
+ fHaveStackTrace = true;
+ }
+
+ if (gc.dynamicMethodsArray != NULL)
+ {
+ // Get the number of elements in the dynamic methods array
+ unsigned cOrigDynamic = gc.dynamicMethodsArray->GetNumComponents();
+
+ // ..and allocate a new array. This can trigger GC or throw under OOM.
+ gc.dynamicMethodsArrayCopy = (PTRARRAYREF)AllocateObjectArray(cOrigDynamic, g_pObjectClass);
+
+ // Deepcopy references to the new array we just allocated
+ memmoveGCRefs(gc.dynamicMethodsArrayCopy->GetDataPtr(), gc.dynamicMethodsArray->GetDataPtr(),
+ cOrigDynamic * sizeof(Object *));
+
+ fHaveDynamicMethodArray = true;
+ }
+
+ // Prep to return
+ *pStackTraceUnsafe = fHaveStackTrace?OBJECTREFToObject(gc.stackTraceCopy.Get()):NULL;
+ *pDynamicMethodsUnsafe = fHaveDynamicMethodArray?OBJECTREFToObject(gc.dynamicMethodsArrayCopy):NULL;
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+// Given an exception object and deep copied instances of a stacktrace and/or dynamic method array, this method will set the latter in the exception object instance.
+FCIMPL3(VOID, ExceptionNative::SaveStackTracesFromDeepCopy, Object* pExceptionObjectUnsafe, Object *pStackTraceUnsafe, Object *pDynamicMethodsUnsafe);
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ ASSERT(pExceptionObjectUnsafe != NULL);
+
+ struct _gc
+ {
+ StackTraceArray stackTrace;
+ EXCEPTIONREF refException;
+ PTRARRAYREF dynamicMethodsArray; // Object array of Managed Resolvers
+ };
+ _gc gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ // GC protect the array reference
+ HELPER_METHOD_FRAME_BEGIN_PROTECT(gc);
+
+ // Get the exception object reference
+ gc.refException = (EXCEPTIONREF)(ObjectToOBJECTREF(pExceptionObjectUnsafe));
+
+ if (pStackTraceUnsafe != NULL)
+ {
+ // Copy the stacktrace
+ StackTraceArray stackTraceArray((I1ARRAYREF)ObjectToOBJECTREF(pStackTraceUnsafe));
+ gc.stackTrace.Swap(stackTraceArray);
+ }
+
+ gc.dynamicMethodsArray = NULL;
+ if (pDynamicMethodsUnsafe != NULL)
+ {
+ gc.dynamicMethodsArray = (PTRARRAYREF)ObjectToOBJECTREF(pDynamicMethodsUnsafe);
+ }
+
+ // If there is no stacktrace, then there cannot be any dynamic method array. Thus,
+ // save stacktrace only when we have it.
+ if (gc.stackTrace.Size() > 0)
+ {
+ // Save the stacktrace details in the exception under a lock
+ gc.refException->SetStackTrace(gc.stackTrace, gc.dynamicMethodsArray);
+ }
+ else
+ {
+ gc.refException->SetNullStackTrace();
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+// This method performs a deep copy of the stack trace array.
+FCIMPL1(Object*, ExceptionNative::CopyStackTrace, Object* pStackTraceUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ ASSERT(pStackTraceUNSAFE != NULL);
+
+ struct _gc
+ {
+ StackTraceArray stackTrace;
+ StackTraceArray stackTraceCopy;
+ _gc(I1ARRAYREF refStackTrace)
+ : stackTrace(refStackTrace)
+ {}
+ };
+ _gc gc((I1ARRAYREF)(ObjectToOBJECTREF(pStackTraceUNSAFE)));
+
+ // GC protect the array reference
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ // Deepcopy the array
+ gc.stackTraceCopy.CopyFrom(gc.stackTrace);
+
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(gc.stackTraceCopy.Get());
+}
+FCIMPLEND
+
+// This method performs a deep copy of the dynamic method array.
+FCIMPL1(Object*, ExceptionNative::CopyDynamicMethods, Object* pDynamicMethodsUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ ASSERT(pDynamicMethodsUNSAFE != NULL);
+
+ struct _gc
+ {
+ PTRARRAYREF dynamicMethodsArray; // Object array of Managed Resolvers
+ PTRARRAYREF dynamicMethodsArrayCopy; // Copy of the object array of Managed Resolvers
+ _gc()
+ {}
+ };
+ _gc gc;
+ ZeroMemory(&gc, sizeof(gc));
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ gc.dynamicMethodsArray = (PTRARRAYREF)(ObjectToOBJECTREF(pDynamicMethodsUNSAFE));
+
+ // Get the number of elements in the array
+ unsigned cOrigDynamic = gc.dynamicMethodsArray->GetNumComponents();
+ // ..and allocate a new array. This can trigger GC or throw under OOM.
+ gc.dynamicMethodsArrayCopy = (PTRARRAYREF)AllocateObjectArray(cOrigDynamic, g_pObjectClass);
+
+ // Copy references to the new array we just allocated
+ memmoveGCRefs(gc.dynamicMethodsArrayCopy->GetDataPtr(), gc.dynamicMethodsArray->GetDataPtr(),
+ cOrigDynamic * sizeof(Object *));
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(gc.dynamicMethodsArrayCopy);
+}
+FCIMPLEND
+
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+
+BSTR BStrFromString(STRINGREF s)
+{
+ CONTRACTL
+ {
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ WCHAR *wz;
+ int cch;
+ BSTR bstr;
+
+ if (s == NULL)
+ return NULL;
+
+ s->RefInterpretGetStringValuesDangerousForGC(&wz, &cch);
+
+ bstr = SysAllocString(wz);
+ if (bstr == NULL)
+ COMPlusThrowOM();
+
+ return bstr;
+}
+
+static BSTR GetExceptionDescription(OBJECTREF objException)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION( IsException(objException->GetMethodTable()) );
+ }
+ CONTRACTL_END;
+
+ BSTR bstrDescription;
+
+ STRINGREF MessageString = NULL;
+ GCPROTECT_BEGIN(MessageString)
+ GCPROTECT_BEGIN(objException)
+ {
+#ifdef FEATURE_APPX
+ if (AppX::IsAppXProcess())
+ {
+ // In AppX, call Exception.ToString(false, false) which returns a string containing the exception class
+ // name and callstack without file paths/names. This is used for unhandled exception bucketing/analysis.
+ MethodDescCallSite getMessage(METHOD__EXCEPTION__TO_STRING, &objException);
+
+ ARG_SLOT GetMessageArgs[] =
+ {
+ ObjToArgSlot(objException),
+ BoolToArgSlot(false), // needFileLineInfo
+ BoolToArgSlot(false) // needMessage
+ };
+ MessageString = getMessage.Call_RetSTRINGREF(GetMessageArgs);
+ }
+ else
+#endif // FEATURE_APPX
+ {
+ // read Exception.Message property
+ MethodDescCallSite getMessage(METHOD__EXCEPTION__GET_MESSAGE, &objException);
+
+ ARG_SLOT GetMessageArgs[] = { ObjToArgSlot(objException)};
+ MessageString = getMessage.Call_RetSTRINGREF(GetMessageArgs);
+
+ // if the message string is empty then use the exception classname.
+ if (MessageString == NULL || MessageString->GetStringLength() == 0) {
+ // call GetClassName
+ MethodDescCallSite getClassName(METHOD__EXCEPTION__GET_CLASS_NAME, &objException);
+ ARG_SLOT GetClassNameArgs[] = { ObjToArgSlot(objException)};
+ MessageString = getClassName.Call_RetSTRINGREF(GetClassNameArgs);
+ _ASSERTE(MessageString != NULL && MessageString->GetStringLength() != 0);
+ }
+ }
+
+ // Allocate the description BSTR.
+ int DescriptionLen = MessageString->GetStringLength();
+ bstrDescription = SysAllocStringLen(MessageString->GetBuffer(), DescriptionLen);
+ }
+ GCPROTECT_END();
+ GCPROTECT_END();
+
+ return bstrDescription;
+}
+
+static BSTR GetExceptionSource(OBJECTREF objException)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION( IsException(objException->GetMethodTable()) );
+ }
+ CONTRACTL_END;
+
+ STRINGREF refRetVal;
+ GCPROTECT_BEGIN(objException)
+
+ // read Exception.Source property
+ MethodDescCallSite getSource(METHOD__EXCEPTION__GET_SOURCE, &objException);
+
+ ARG_SLOT GetSourceArgs[] = { ObjToArgSlot(objException)};
+
+ refRetVal = getSource.Call_RetSTRINGREF(GetSourceArgs);
+
+ GCPROTECT_END();
+ return BStrFromString(refRetVal);
+}
+
+static void GetExceptionHelp(OBJECTREF objException, BSTR *pbstrHelpFile, DWORD *pdwHelpContext)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(IsException(objException->GetMethodTable()));
+ PRECONDITION(CheckPointer(pbstrHelpFile));
+ PRECONDITION(CheckPointer(pdwHelpContext));
+ }
+ CONTRACTL_END;
+
+ *pdwHelpContext = 0;
+
+ GCPROTECT_BEGIN(objException);
+
+ // read Exception.HelpLink property
+ MethodDescCallSite getHelpLink(METHOD__EXCEPTION__GET_HELP_LINK, &objException);
+
+ ARG_SLOT GetHelpLinkArgs[] = { ObjToArgSlot(objException)};
+ *pbstrHelpFile = BStrFromString(getHelpLink.Call_RetSTRINGREF(GetHelpLinkArgs));
+
+ GCPROTECT_END();
+
+ // parse the help file to check for the presence of helpcontext
+ int len = SysStringLen(*pbstrHelpFile);
+ int pos = len;
+ WCHAR *pwstr = *pbstrHelpFile;
+ if (pwstr) {
+ BOOL fFoundPound = FALSE;
+
+ for (pos = len - 1; pos >= 0; pos--) {
+ if (pwstr[pos] == W('#')) {
+ fFoundPound = TRUE;
+ break;
+ }
+ }
+
+ if (fFoundPound) {
+ int PoundPos = pos;
+ int NumberStartPos = -1;
+ BOOL bNumberStarted = FALSE;
+ BOOL bNumberFinished = FALSE;
+ BOOL bInvalidDigitsFound = FALSE;
+
+ _ASSERTE(pwstr[pos] == W('#'));
+
+ // Check to see if the string to the right of the pound a valid number.
+ for (pos++; pos < len; pos++) {
+ if (bNumberFinished) {
+ if (!COMCharacter::nativeIsWhiteSpace(pwstr[pos])) {
+ bInvalidDigitsFound = TRUE;
+ break;
+ }
+ }
+ else if (bNumberStarted) {
+ if (COMCharacter::nativeIsWhiteSpace(pwstr[pos])) {
+ bNumberFinished = TRUE;
+ }
+ else if (!COMCharacter::nativeIsDigit(pwstr[pos])) {
+ bInvalidDigitsFound = TRUE;
+ break;
+ }
+ }
+ else {
+ if (COMCharacter::nativeIsDigit(pwstr[pos])) {
+ NumberStartPos = pos;
+ bNumberStarted = TRUE;
+ }
+ else if (!COMCharacter::nativeIsWhiteSpace(pwstr[pos])) {
+ bInvalidDigitsFound = TRUE;
+ break;
+ }
+ }
+ }
+
+ if (bNumberStarted && !bInvalidDigitsFound) {
+ // Grab the help context and remove it from the help file.
+ *pdwHelpContext = (DWORD)wtoi(&pwstr[NumberStartPos], len - NumberStartPos);
+
+ // Allocate a new help file string of the right length.
+ BSTR strOld = *pbstrHelpFile;
+ *pbstrHelpFile = SysAllocStringLen(strOld, PoundPos);
+ SysFreeString(strOld);
+ if (!*pbstrHelpFile)
+ COMPlusThrowOM();
+ }
+ }
+ }
+}
+
+// NOTE: caller cleans up any partially initialized BSTRs in pED
+void ExceptionNative::GetExceptionData(OBJECTREF objException, ExceptionData *pED)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(IsException(objException->GetMethodTable()));
+ PRECONDITION(CheckPointer(pED));
+ }
+ CONTRACTL_END;
+
+ ZeroMemory(pED, sizeof(ExceptionData));
+
+ if (objException->GetMethodTable() == g_pStackOverflowExceptionClass) {
+ // In a low stack situation, most everything else in here will fail.
+ // <TODO>@TODO: We're not turning the guard page back on here, yet.</TODO>
+ pED->hr = COR_E_STACKOVERFLOW;
+ pED->bstrDescription = SysAllocString(STACK_OVERFLOW_MESSAGE);
+ return;
+ }
+
+ GCPROTECT_BEGIN(objException);
+ pED->hr = GetExceptionHResult(objException);
+ pED->bstrDescription = GetExceptionDescription(objException);
+ pED->bstrSource = GetExceptionSource(objException);
+ GetExceptionHelp(objException, &pED->bstrHelpFile, &pED->dwHelpContext);
+ GCPROTECT_END();
+ return;
+}
+
+#ifdef FEATURE_COMINTEROP
+
+HRESULT SimpleComCallWrapper::IErrorInfo_hr()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetExceptionHResult(this->GetObjectRef());
+}
+
+BSTR SimpleComCallWrapper::IErrorInfo_bstrDescription()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetExceptionDescription(this->GetObjectRef());
+}
+
+BSTR SimpleComCallWrapper::IErrorInfo_bstrSource()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetExceptionSource(this->GetObjectRef());
+}
+
+BSTR SimpleComCallWrapper::IErrorInfo_bstrHelpFile()
+{
+ WRAPPER_NO_CONTRACT;
+ BSTR bstrHelpFile;
+ DWORD dwHelpContext;
+ GetExceptionHelp(this->GetObjectRef(), &bstrHelpFile, &dwHelpContext);
+ return bstrHelpFile;
+}
+
+DWORD SimpleComCallWrapper::IErrorInfo_dwHelpContext()
+{
+ WRAPPER_NO_CONTRACT;
+ BSTR bstrHelpFile;
+ DWORD dwHelpContext;
+ GetExceptionHelp(this->GetObjectRef(), &bstrHelpFile, &dwHelpContext);
+ SysFreeString(bstrHelpFile);
+ return dwHelpContext;
+}
+
+GUID SimpleComCallWrapper::IErrorInfo_guid()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GUID_NULL;
+}
+
+#endif // FEATURE_COMINTEROP
+
+FCIMPL0(EXCEPTION_POINTERS*, ExceptionNative::GetExceptionPointers)
+{
+ FCALL_CONTRACT;
+
+ EXCEPTION_POINTERS* retVal = NULL;
+
+ Thread *pThread = GetThread();
+ _ASSERTE(pThread);
+
+ if (pThread->IsExceptionInProgress())
+ {
+ retVal = pThread->GetExceptionState()->GetExceptionPointers();
+ }
+
+ return retVal;
+}
+FCIMPLEND
+
+FCIMPL0(INT32, ExceptionNative::GetExceptionCode)
+{
+ FCALL_CONTRACT;
+
+ INT32 retVal = 0;
+
+ Thread *pThread = GetThread();
+ _ASSERTE(pThread);
+
+ if (pThread->IsExceptionInProgress())
+ {
+ retVal = pThread->GetExceptionState()->GetExceptionCode();
+ }
+
+ return retVal;
+}
+FCIMPLEND
+
+
+//
+// This must be implemented as an FCALL because managed code cannot
+// swallow a thread abort exception without resetting the abort,
+// which we don't want to do. Additionally, we can run into deadlocks
+// if we use the ResourceManager to do resource lookups - it requires
+// taking managed locks when initializing Globalization & Security,
+// but a thread abort on a separate thread initializing those same
+// systems would also do a resource lookup via the ResourceManager.
+// We've deadlocked in CompareInfo.GetCompareInfo &
+// Environment.GetResourceString. It's not practical to take all of
+// our locks within CER's to avoid this problem - just use the CLR's
+// unmanaged resources.
+//
+void QCALLTYPE ExceptionNative::GetMessageFromNativeResources(ExceptionMessageKind kind, QCall::StringHandleOnStack retMesg)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ SString buffer;
+ HRESULT hr = S_OK;
+ const WCHAR * wszFallbackString = NULL;
+
+ switch(kind) {
+ case ThreadAbort:
+ hr = buffer.LoadResourceAndReturnHR(CCompRC::Error, IDS_EE_THREAD_ABORT);
+ if (FAILED(hr)) {
+ wszFallbackString = W("Thread was being aborted.");
+ }
+ break;
+
+ case ThreadInterrupted:
+ hr = buffer.LoadResourceAndReturnHR(CCompRC::Error, IDS_EE_THREAD_INTERRUPTED);
+ if (FAILED(hr)) {
+ wszFallbackString = W("Thread was interrupted from a waiting state.");
+ }
+ break;
+
+ case OutOfMemory:
+ hr = buffer.LoadResourceAndReturnHR(CCompRC::Error, IDS_EE_OUT_OF_MEMORY);
+ if (FAILED(hr)) {
+ wszFallbackString = W("Insufficient memory to continue the execution of the program.");
+ }
+ break;
+
+ default:
+ _ASSERTE(!"Unknown ExceptionMessageKind value!");
+ }
+ if (FAILED(hr)) {
+ STRESS_LOG1(LF_BCL, LL_ALWAYS, "LoadResource error: %x", hr);
+ _ASSERTE(wszFallbackString != NULL);
+ retMesg.Set(wszFallbackString);
+ }
+ else {
+ retMesg.Set(buffer);
+ }
+
+ END_QCALL;
+}
+
+
+// BlockCopy
+// This method from one primitive array to another based
+// upon an offset into each an a byte count.
+FCIMPL5(VOID, Buffer::BlockCopy, ArrayBase *src, int srcOffset, ArrayBase *dst, int dstOffset, int count)
+{
+ FCALL_CONTRACT;
+
+ // Verify that both the src and dst are Arrays of primitive
+ // types.
+ // <TODO>@TODO: We need to check for booleans</TODO>
+ if (src==NULL || dst==NULL)
+ FCThrowArgumentNullVoid((src==NULL) ? W("src") : W("dst"));
+
+ // Size of the Arrays in bytes
+ SIZE_T srcLen = src->GetNumComponents() * src->GetComponentSize();
+ SIZE_T dstLen = srcLen;
+
+ // We only want to allow arrays of primitives, no Objects.
+ const CorElementType srcET = src->GetArrayElementType();
+ if (!CorTypeInfo::IsPrimitiveType_NoThrow(srcET))
+ FCThrowArgumentVoid(W("src"), W("Arg_MustBePrimArray"));
+
+ if (src != dst) {
+ const CorElementType dstET = dst->GetArrayElementType();
+ if (!CorTypeInfo::IsPrimitiveType_NoThrow(dstET))
+ FCThrowArgumentVoid(W("dest"), W("Arg_MustBePrimArray"));
+ dstLen = dst->GetNumComponents() * dst->GetComponentSize();
+ }
+
+ if (srcOffset < 0 || dstOffset < 0 || count < 0) {
+ const wchar_t* str = W("srcOffset");
+ if (dstOffset < 0) str = W("dstOffset");
+ if (count < 0) str = W("count");
+ FCThrowArgumentOutOfRangeVoid(str, W("ArgumentOutOfRange_NeedNonNegNum"));
+ }
+
+ if (srcLen < (SIZE_T)srcOffset + (SIZE_T)count || dstLen < (SIZE_T)dstOffset + (SIZE_T)count) {
+ FCThrowArgumentVoid(NULL, W("Argument_InvalidOffLen"));
+ }
+
+ if (count > 0) {
+ memmove(dst->GetDataPtr() + dstOffset, src->GetDataPtr() + srcOffset, count);
+ }
+
+ FC_GC_POLL();
+}
+FCIMPLEND
+
+
+// InternalBlockCopy
+// This method from one primitive array to another based
+// upon an offset into each an a byte count.
+FCIMPL5(VOID, Buffer::InternalBlockCopy, ArrayBase *src, int srcOffset, ArrayBase *dst, int dstOffset, int count)
+{
+ FCALL_CONTRACT;
+
+ // @TODO: We should consider writing this in managed code. We probably
+ // cannot easily do this though - how do we get at the array's data?
+
+ // Unfortunately, we must do a check to make sure we're writing within
+ // the bounds of the array. This will ensure that we don't overwrite
+ // memory elsewhere in the system nor do we write out junk. This can
+ // happen if multiple threads interact with our IO classes simultaneously
+ // without being threadsafe. Throw here.
+ // Unfortunately this even applies to setting our internal buffers to
+ // null. We don't want to debug races between Close and Read or Write.
+ if (src == NULL || dst == NULL)
+ FCThrowResVoid(kIndexOutOfRangeException, W("IndexOutOfRange_IORaceCondition"));
+
+ SIZE_T srcLen = src->GetNumComponents() * src->GetComponentSize();
+ SIZE_T dstLen = srcLen;
+ if (src != dst)
+ dstLen = dst->GetNumComponents() * dst->GetComponentSize();
+
+ if (srcOffset < 0 || dstOffset < 0 || count < 0)
+ FCThrowResVoid(kIndexOutOfRangeException, W("IndexOutOfRange_IORaceCondition"));
+
+ if (srcLen < (SIZE_T)srcOffset + (SIZE_T)count || dstLen < (SIZE_T)dstOffset + (SIZE_T)count)
+ FCThrowResVoid(kIndexOutOfRangeException, W("IndexOutOfRange_IORaceCondition"));
+
+ _ASSERTE(srcOffset >= 0);
+ _ASSERTE((src->GetNumComponents() * src->GetComponentSize()) - (unsigned) srcOffset >= (unsigned) count);
+ _ASSERTE((dst->GetNumComponents() * dst->GetComponentSize()) - (unsigned) dstOffset >= (unsigned) count);
+ _ASSERTE(dstOffset >= 0);
+ _ASSERTE(count >= 0);
+
+ // Copy the data.
+ memmove(dst->GetDataPtr() + dstOffset, src->GetDataPtr() + srcOffset, count);
+
+ FC_GC_POLL();
+}
+FCIMPLEND
+
+void QCALLTYPE Buffer::MemMove(void *dst, void *src, size_t length)
+{
+ QCALL_CONTRACT;
+
+#if defined(FEATURE_CORECLR) && !defined(FEATURE_CORESYSTEM)
+ // Callers of memcpy do expect and handle access violations in some scenarios.
+ // Access violations in the runtime dll are turned into fail fast by the vector exception handler by default.
+ // We need to supress this behavior for CoreCLR using AVInRuntimeImplOkayHolder because of memcpy is statically linked in.
+ AVInRuntimeImplOkayHolder avOk;
+#endif
+
+ memmove(dst, src, length);
+}
+
+// Returns a bool to indicate if the array is of primitive types or not.
+FCIMPL1(FC_BOOL_RET, Buffer::IsPrimitiveTypeArray, ArrayBase *arrayUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ _ASSERTE(arrayUNSAFE != NULL);
+
+ // Check the type from the contained element's handle
+ TypeHandle elementTH = arrayUNSAFE->GetArrayElementTypeHandle();
+ BOOL fIsPrimitiveTypeArray = CorTypeInfo::IsPrimitiveType_NoThrow(elementTH.GetVerifierCorElementType());
+
+ FC_RETURN_BOOL(fIsPrimitiveTypeArray);
+
+}
+FCIMPLEND
+
+// Gets a particular byte out of the array. The array can't be an array of Objects - it
+// must be a primitive array.
+FCIMPL2(FC_UINT8_RET, Buffer::GetByte, ArrayBase *arrayUNSAFE, INT32 index)
+{
+ FCALL_CONTRACT;
+
+ _ASSERTE(arrayUNSAFE != NULL);
+ _ASSERTE(index >=0 && index < ((INT32)(arrayUNSAFE->GetComponentSize() * arrayUNSAFE->GetNumComponents())));
+
+ UINT8 bData = *((BYTE*)arrayUNSAFE->GetDataPtr() + index);
+ return bData;
+}
+FCIMPLEND
+
+// Sets a particular byte in an array. The array can't be an array of Objects - it
+// must be a primitive array.
+//
+// Semantically the bData argment is of type BYTE but FCallCheckSignature expects the
+// type to be UINT8 and raises an error if this isn't this case when
+// COMPlus_ConsistencyCheck is set.
+FCIMPL3(VOID, Buffer::SetByte, ArrayBase *arrayUNSAFE, INT32 index, UINT8 bData)
+{
+ FCALL_CONTRACT;
+
+ _ASSERTE(arrayUNSAFE != NULL);
+ _ASSERTE(index >=0 && index < ((INT32)(arrayUNSAFE->GetComponentSize() * arrayUNSAFE->GetNumComponents())));
+
+ *((BYTE*)arrayUNSAFE->GetDataPtr() + index) = (BYTE) bData;
+}
+FCIMPLEND
+
+// Returns the length in bytes of an array containing
+// primitive type elements
+FCIMPL1(INT32, Buffer::ByteLength, ArrayBase* arrayUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ _ASSERTE(arrayUNSAFE != NULL);
+
+ SIZE_T iRetVal = arrayUNSAFE->GetNumComponents() * arrayUNSAFE->GetComponentSize();
+
+ // This API is explosed both as Buffer.ByteLength and also used indirectly in argument
+ // checks for Buffer.GetByte/SetByte.
+ //
+ // If somebody called Get/SetByte on 2GB+ arrays, there is a decent chance that
+ // the computation of the index has overflowed. Thus we intentionally always
+ // throw on 2GB+ arrays in Get/SetByte argument checks (even for indicies <2GB)
+ // to prevent people from running into a trap silently.
+ if (iRetVal > INT32_MAX)
+ FCThrow(kOverflowException);
+
+ return (INT32)iRetVal;
+}
+FCIMPLEND
+
+//
+// GCInterface
+//
+MethodDesc *GCInterface::m_pCacheMethod=NULL;
+
+UINT64 GCInterface::m_ulMemPressure = 0;
+UINT64 GCInterface::m_ulThreshold = MIN_GC_MEMORYPRESSURE_THRESHOLD;
+INT32 GCInterface::m_gc_counts[3] = {0,0,0};
+CrstStatic GCInterface::m_MemoryPressureLock;
+
+UINT64 GCInterface::m_addPressure[NEW_PRESSURE_COUNT] = {0, 0, 0, 0}; // history of memory pressure additions
+UINT64 GCInterface::m_remPressure[NEW_PRESSURE_COUNT] = {0, 0, 0, 0}; // history of memory pressure removals
+
+// incremented after a gen2 GC has been detected,
+// (m_iteration % NEW_PRESSURE_COUNT) is used as an index into m_addPressure and m_remPressure
+UINT GCInterface::m_iteration = 0;
+
+FCIMPL0(int, GCInterface::GetGcLatencyMode)
+{
+ FCALL_CONTRACT;
+
+ FC_GC_POLL_NOT_NEEDED();
+
+ int result = (INT32)GCHeap::GetGCHeap()->GetGcLatencyMode();
+ return result;
+}
+FCIMPLEND
+
+FCIMPL1(int, GCInterface::SetGcLatencyMode, int newLatencyMode)
+{
+ FCALL_CONTRACT;
+
+ FC_GC_POLL_NOT_NEEDED();
+
+ return GCHeap::GetGCHeap()->SetGcLatencyMode(newLatencyMode);
+}
+FCIMPLEND
+
+FCIMPL0(int, GCInterface::GetLOHCompactionMode)
+{
+ FCALL_CONTRACT;
+
+ FC_GC_POLL_NOT_NEEDED();
+
+ int result = (INT32)GCHeap::GetGCHeap()->GetLOHCompactionMode();
+ return result;
+}
+FCIMPLEND
+
+FCIMPL1(void, GCInterface::SetLOHCompactionMode, int newLOHCompactionyMode)
+{
+ FCALL_CONTRACT;
+
+ FC_GC_POLL_NOT_NEEDED();
+
+ GCHeap::GetGCHeap()->SetLOHCompactionMode(newLOHCompactionyMode);
+}
+FCIMPLEND
+
+
+FCIMPL2(FC_BOOL_RET, GCInterface::RegisterForFullGCNotification, UINT32 gen2Percentage, UINT32 lohPercentage)
+{
+ FCALL_CONTRACT;
+
+ FC_GC_POLL_NOT_NEEDED();
+
+ FC_RETURN_BOOL(GCHeap::GetGCHeap()->RegisterForFullGCNotification(gen2Percentage, lohPercentage));
+}
+FCIMPLEND
+
+FCIMPL0(FC_BOOL_RET, GCInterface::CancelFullGCNotification)
+{
+ FCALL_CONTRACT;
+
+ FC_GC_POLL_NOT_NEEDED();
+ FC_RETURN_BOOL(GCHeap::GetGCHeap()->CancelFullGCNotification());
+}
+FCIMPLEND
+
+FCIMPL1(int, GCInterface::WaitForFullGCApproach, int millisecondsTimeout)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_COOPERATIVE;
+ DISABLED(GC_TRIGGERS); // can't use this in an FCALL because we're in forbid gc mode until we setup a H_M_F.
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ int result = 0;
+
+ //We don't need to check the top end because the GC will take care of that.
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ DWORD dwMilliseconds = ((millisecondsTimeout == -1) ? INFINITE : millisecondsTimeout);
+ result = GCHeap::GetGCHeap()->WaitForFullGCApproach(dwMilliseconds);
+
+ HELPER_METHOD_FRAME_END();
+
+ return result;
+}
+FCIMPLEND
+
+FCIMPL1(int, GCInterface::WaitForFullGCComplete, int millisecondsTimeout)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_COOPERATIVE;
+ DISABLED(GC_TRIGGERS); // can't use this in an FCALL because we're in forbid gc mode until we setup a H_M_F.
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ int result = 0;
+
+ //We don't need to check the top end because the GC will take care of that.
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ DWORD dwMilliseconds = ((millisecondsTimeout == -1) ? INFINITE : millisecondsTimeout);
+ result = GCHeap::GetGCHeap()->WaitForFullGCComplete(dwMilliseconds);
+
+ HELPER_METHOD_FRAME_END();
+
+ return result;
+}
+FCIMPLEND
+
+/*================================GetGeneration=================================
+**Action: Returns the generation in which args->obj is found.
+**Returns: The generation in which args->obj is found.
+**Arguments: args->obj -- The object to locate.
+**Exceptions: ArgumentException if args->obj is null.
+==============================================================================*/
+FCIMPL1(int, GCInterface::GetGeneration, Object* objUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ if (objUNSAFE == NULL)
+ FCThrowArgumentNull(W("obj"));
+
+ int result = (INT32)GCHeap::GetGCHeap()->WhichGeneration(objUNSAFE);
+ FC_GC_POLL_RET();
+ return result;
+}
+FCIMPLEND
+
+/*================================CollectionCount=================================
+**Action: Returns the number of collections for this generation since the begining of the life of the process
+**Returns: The collection count.
+**Arguments: args->generation -- The generation
+**Exceptions: Argument exception if args->generation is < 0 or > GetMaxGeneration();
+==============================================================================*/
+FCIMPL2(int, GCInterface::CollectionCount, INT32 generation, INT32 getSpecialGCCount)
+{
+ FCALL_CONTRACT;
+
+ //We've already checked this in GC.cs, so we'll just assert it here.
+ _ASSERTE(generation >= 0);
+
+ //We don't need to check the top end because the GC will take care of that.
+ int result = (INT32)GCHeap::GetGCHeap()->CollectionCount(generation, getSpecialGCCount);
+ FC_GC_POLL_RET();
+ return result;
+}
+FCIMPLEND
+
+/*===============================GetGenerationWR================================
+**Action: Returns the generation in which the object pointed to by a WeakReference is found.
+**Returns:
+**Arguments: args->handle -- the OBJECTHANDLE to the object which we're locating.
+**Exceptions: ArgumentException if handle points to an object which is not accessible.
+==============================================================================*/
+FCIMPL1(int, GCInterface::GetGenerationWR, LPVOID handle)
+{
+ FCALL_CONTRACT;
+
+ int iRetVal = 0;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ OBJECTREF temp;
+ temp = ObjectFromHandle((OBJECTHANDLE) handle);
+ if (temp == NULL)
+ COMPlusThrowArgumentNull(W("weak handle"));
+
+ iRetVal = (INT32)GCHeap::GetGCHeap()->WhichGeneration(OBJECTREFToObject(temp));
+
+ HELPER_METHOD_FRAME_END();
+
+ return iRetVal;
+}
+FCIMPLEND
+
+/*================================GetTotalMemory================================
+**Action: Returns the total number of bytes in use
+**Returns: The total number of bytes in use
+**Arguments: None
+**Exceptions: None
+==============================================================================*/
+INT64 QCALLTYPE GCInterface::GetTotalMemory()
+{
+ QCALL_CONTRACT;
+
+ INT64 iRetVal = 0;
+
+ BEGIN_QCALL;
+
+ GCX_COOP();
+ iRetVal = (INT64) GCHeap::GetGCHeap()->GetTotalBytesInUse();
+
+ END_QCALL;
+
+ return iRetVal;
+}
+
+/*==============================Collect=========================================
+**Action: Collects all generations <= args->generation
+**Returns: void
+**Arguments: args->generation: The maximum generation to collect
+**Exceptions: Argument exception if args->generation is < 0 or > GetMaxGeneration();
+==============================================================================*/
+void QCALLTYPE GCInterface::Collect(INT32 generation, INT32 mode)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ //We've already checked this in GC.cs, so we'll just assert it here.
+ _ASSERTE(generation >= -1);
+
+ //We don't need to check the top end because the GC will take care of that.
+
+ GCX_COOP();
+ GCHeap::GetGCHeap()->GarbageCollect(generation, FALSE, mode);
+
+ END_QCALL;
+}
+
+
+/*==========================WaitForPendingFinalizers============================
+**Action: Run all Finalizers that haven't been run.
+**Arguments: None
+**Exceptions: None
+==============================================================================*/
+void QCALLTYPE GCInterface::WaitForPendingFinalizers()
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ FinalizerThread::FinalizerThreadWait();
+
+ END_QCALL;
+}
+
+
+/*===============================GetMaxGeneration===============================
+**Action: Returns the largest GC generation
+**Returns: The largest GC Generation
+**Arguments: None
+**Exceptions: None
+==============================================================================*/
+FCIMPL0(int, GCInterface::GetMaxGeneration)
+{
+ FCALL_CONTRACT;
+
+ return(INT32)GCHeap::GetGCHeap()->GetMaxGeneration();
+}
+FCIMPLEND
+
+
+/*==============================SuppressFinalize================================
+**Action: Indicate that an object's finalizer should not be run by the system
+**Arguments: Object of interest
+**Exceptions: None
+==============================================================================*/
+FCIMPL1(void, GCInterface::SuppressFinalize, Object *obj)
+{
+ FCALL_CONTRACT;
+
+ // Checked by the caller
+ _ASSERTE(obj != NULL);
+
+ if (!obj->GetMethodTable ()->HasFinalizer())
+ return;
+
+ GCHeap::GetGCHeap()->SetFinalizationRun(obj);
+ FC_GC_POLL();
+}
+FCIMPLEND
+
+
+/*============================ReRegisterForFinalize==============================
+**Action: Indicate that an object's finalizer should be run by the system.
+**Arguments: Object of interest
+**Exceptions: None
+==============================================================================*/
+FCIMPL1(void, GCInterface::ReRegisterForFinalize, Object *obj)
+{
+ FCALL_CONTRACT;
+
+ // Checked by the caller
+ _ASSERTE(obj != NULL);
+
+ if (obj->GetMethodTable()->HasFinalizer())
+ {
+ HELPER_METHOD_FRAME_BEGIN_1(obj);
+ GCHeap::GetGCHeap()->RegisterForFinalization(-1, obj);
+ HELPER_METHOD_FRAME_END();
+ }
+}
+FCIMPLEND
+
+FORCEINLINE UINT64 GCInterface::InterlockedAdd (UINT64 *pAugend, UINT64 addend) {
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ UINT64 oldMemValue;
+ UINT64 newMemValue;
+
+ do {
+ oldMemValue = *pAugend;
+ newMemValue = oldMemValue + addend;
+
+ // check for overflow
+ if (newMemValue < oldMemValue)
+ {
+ newMemValue = UINT64_MAX;
+ }
+ } while (InterlockedCompareExchange64((LONGLONG*) pAugend, (LONGLONG) newMemValue, (LONGLONG) oldMemValue) != (LONGLONG) oldMemValue);
+
+ return newMemValue;
+}
+
+FORCEINLINE UINT64 GCInterface::InterlockedSub(UINT64 *pMinuend, UINT64 subtrahend) {
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ UINT64 oldMemValue;
+ UINT64 newMemValue;
+
+ do {
+ oldMemValue = *pMinuend;
+ newMemValue = oldMemValue - subtrahend;
+
+ // check for underflow
+ if (newMemValue > oldMemValue)
+ newMemValue = 0;
+
+ } while (InterlockedCompareExchange64((LONGLONG*) pMinuend, (LONGLONG) newMemValue, (LONGLONG) oldMemValue) != (LONGLONG) oldMemValue);
+
+ return newMemValue;
+}
+
+void QCALLTYPE GCInterface::_AddMemoryPressure(UINT64 bytesAllocated)
+{
+ QCALL_CONTRACT;
+
+ // AddMemoryPressure could cause a GC, so we need a frame
+ BEGIN_QCALL;
+ AddMemoryPressure(bytesAllocated);
+ END_QCALL;
+}
+
+void GCInterface::AddMemoryPressure(UINT64 bytesAllocated)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SendEtwAddMemoryPressureEvent(bytesAllocated);
+
+ UINT64 newMemValue = InterlockedAdd(&m_ulMemPressure, bytesAllocated);
+
+ if (newMemValue > m_ulThreshold)
+ {
+ INT32 gen_collect = 0;
+ {
+ GCX_PREEMP();
+ CrstHolder holder(&m_MemoryPressureLock);
+
+ // to avoid collecting too often, take the max threshold of the linear and geometric growth
+ // heuristics.
+ UINT64 addMethod;
+ UINT64 multMethod;
+ UINT64 bytesAllocatedMax = (UINT64_MAX - m_ulThreshold) / 8;
+
+ if (bytesAllocated >= bytesAllocatedMax) // overflow check
+ {
+ addMethod = UINT64_MAX;
+ }
+ else
+ {
+ addMethod = m_ulThreshold + bytesAllocated * 8;
+ }
+
+ multMethod = newMemValue + newMemValue / 10;
+ if (multMethod < newMemValue) // overflow check
+ {
+ multMethod = UINT64_MAX;
+ }
+
+ m_ulThreshold = (addMethod > multMethod) ? addMethod : multMethod;
+ for (int i = 0; i <= 1; i++)
+ {
+ if ((GCHeap::GetGCHeap()->CollectionCount(i) / RELATIVE_GC_RATIO) > GCHeap::GetGCHeap()->CollectionCount(i + 1))
+ {
+ gen_collect = i + 1;
+ break;
+ }
+ }
+ }
+
+ PREFIX_ASSUME(gen_collect <= 2);
+
+ if ((gen_collect == 0) || (m_gc_counts[gen_collect] == GCHeap::GetGCHeap()->CollectionCount(gen_collect)))
+ {
+ GarbageCollectModeAny(gen_collect);
+ }
+
+ for (int i = 0; i < 3; i++)
+ {
+ m_gc_counts [i] = GCHeap::GetGCHeap()->CollectionCount(i);
+ }
+ }
+}
+
+#ifdef _WIN64
+const unsigned MIN_MEMORYPRESSURE_BUDGET = 4 * 1024 * 1024; // 4 MB
+#else // _WIN64
+const unsigned MIN_MEMORYPRESSURE_BUDGET = 3 * 1024 * 1024; // 3 MB
+#endif // _WIN64
+
+const unsigned MAX_MEMORYPRESSURE_RATIO = 10; // 40 MB or 30 MB
+
+
+// Resets pressure accounting after a gen2 GC has occurred.
+void GCInterface::CheckCollectionCount()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ GCHeap * pHeap = GCHeap::GetGCHeap();
+
+ if (m_gc_counts[2] != pHeap->CollectionCount(2))
+ {
+ for (int i = 0; i < 3; i++)
+ {
+ m_gc_counts[i] = pHeap->CollectionCount(i);
+ }
+
+ m_iteration++;
+
+ UINT p = m_iteration % NEW_PRESSURE_COUNT;
+
+ m_addPressure[p] = 0; // new pressure will be accumulated here
+ m_remPressure[p] = 0;
+ }
+}
+
+
+// New AddMemoryPressure implementation (used by RCW and the CLRServicesImpl class)
+//
+// 1. Less sensitive than the original implementation (start budget 3 MB)
+// 2. Focuses more on newly added memory pressure
+// 3. Budget adjusted by effectiveness of last 3 triggered GC (add / remove ratio, max 10x)
+// 4. Budget maxed with 30% of current managed GC size
+// 5. If Gen2 GC is happening naturally, ignore past pressure
+//
+// Here's a brief description of the ideal algorithm for Add/Remove memory pressure:
+// Do a GC when (HeapStart < X * MemPressureGrowth) where
+// - HeapStart is GC Heap size after doing the last GC
+// - MemPressureGrowth is the net of Add and Remove since the last GC
+// - X is proportional to our guess of the ummanaged memory death rate per GC interval,
+// and would be calculated based on historic data using standard exponential approximation:
+// Xnew = UMDeath/UMTotal * 0.5 + Xprev
+//
+void GCInterface::NewAddMemoryPressure(UINT64 bytesAllocated)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ CheckCollectionCount();
+
+ UINT p = m_iteration % NEW_PRESSURE_COUNT;
+
+ UINT64 newMemValue = InterlockedAdd(&m_addPressure[p], bytesAllocated);
+
+ static_assert(NEW_PRESSURE_COUNT == 4, "NewAddMemoryPressure contains unrolled loops which depend on NEW_PRESSURE_COUNT");
+
+ UINT64 add = m_addPressure[0] + m_addPressure[1] + m_addPressure[2] + m_addPressure[3] - m_addPressure[p];
+ UINT64 rem = m_remPressure[0] + m_remPressure[1] + m_remPressure[2] + m_remPressure[3] - m_remPressure[p];
+
+ STRESS_LOG4(LF_GCINFO, LL_INFO10000, "AMP Add: %I64u => added=%I64u total_added=%I64u total_removed=%I64u",
+ bytesAllocated, newMemValue, add, rem);
+
+ SendEtwAddMemoryPressureEvent(bytesAllocated);
+
+ if (newMemValue >= MIN_MEMORYPRESSURE_BUDGET)
+ {
+ UINT64 budget = MIN_MEMORYPRESSURE_BUDGET;
+
+ if (m_iteration >= NEW_PRESSURE_COUNT) // wait until we have enough data points
+ {
+ // Adjust according to effectiveness of GC
+ // Scale budget according to past m_addPressure / m_remPressure ratio
+ if (add >= rem * MAX_MEMORYPRESSURE_RATIO)
+ {
+ budget = MIN_MEMORYPRESSURE_BUDGET * MAX_MEMORYPRESSURE_RATIO;
+ }
+ else if (add > rem)
+ {
+ CONSISTENCY_CHECK(rem != 0);
+
+ // Avoid overflow by calculating addPressure / remPressure as fixed point (1 = 1024)
+ budget = (add * 1024 / rem) * budget / 1024;
+ }
+ }
+
+ // If still over budget, check current managed heap size
+ if (newMemValue >= budget)
+ {
+ GCHeap *pGCHeap = GCHeap::GetGCHeap();
+ UINT64 heapOver3 = pGCHeap->GetCurrentObjSize() / 3;
+
+ if (budget < heapOver3) // Max
+ {
+ budget = heapOver3;
+ }
+
+ if (newMemValue >= budget)
+ {
+ // last check - if we would exceed 20% of GC "duty cycle", do not trigger GC at this time
+ if ((pGCHeap->GetNow() - pGCHeap->GetLastGCStartTime(2)) > (pGCHeap->GetLastGCDuration(2) * 5))
+ {
+ STRESS_LOG6(LF_GCINFO, LL_INFO10000, "AMP Budget: pressure=%I64u ? budget=%I64u (total_added=%I64u, total_removed=%I64u, mng_heap=%I64u) pos=%d",
+ newMemValue, budget, add, rem, heapOver3 * 3, m_iteration);
+
+ GarbageCollectModeAny(2);
+
+ CheckCollectionCount();
+ }
+ }
+ }
+ }
+}
+
+void QCALLTYPE GCInterface::_RemoveMemoryPressure(UINT64 bytesAllocated)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+ RemoveMemoryPressure(bytesAllocated);
+ END_QCALL;
+}
+
+void GCInterface::RemoveMemoryPressure(UINT64 bytesAllocated)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SendEtwRemoveMemoryPressureEvent(bytesAllocated);
+
+ UINT64 newMemValue = InterlockedSub(&m_ulMemPressure, bytesAllocated);
+ UINT64 new_th;
+ UINT64 bytesAllocatedMax = (m_ulThreshold / 4);
+ UINT64 addMethod;
+ UINT64 multMethod = (m_ulThreshold - m_ulThreshold / 20); // can never underflow
+ if (bytesAllocated >= bytesAllocatedMax) // protect against underflow
+ {
+ m_ulThreshold = MIN_GC_MEMORYPRESSURE_THRESHOLD;
+ return;
+ }
+ else
+ {
+ addMethod = m_ulThreshold - bytesAllocated * 4;
+ }
+
+ new_th = (addMethod < multMethod) ? addMethod : multMethod;
+
+ if (newMemValue <= new_th)
+ {
+ GCX_PREEMP();
+ CrstHolder holder(&m_MemoryPressureLock);
+ if (new_th > MIN_GC_MEMORYPRESSURE_THRESHOLD)
+ m_ulThreshold = new_th;
+ else
+ m_ulThreshold = MIN_GC_MEMORYPRESSURE_THRESHOLD;
+
+ for (int i = 0; i < 3; i++)
+ {
+ m_gc_counts [i] = GCHeap::GetGCHeap()->CollectionCount(i);
+ }
+ }
+}
+
+void GCInterface::NewRemoveMemoryPressure(UINT64 bytesAllocated)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ CheckCollectionCount();
+
+ UINT p = m_iteration % NEW_PRESSURE_COUNT;
+
+ SendEtwRemoveMemoryPressureEvent(bytesAllocated);
+
+ InterlockedAdd(&m_remPressure[p], bytesAllocated);
+
+ STRESS_LOG2(LF_GCINFO, LL_INFO10000, "AMP Remove: %I64u => removed=%I64u",
+ bytesAllocated, m_remPressure[p]);
+}
+
+inline void GCInterface::SendEtwAddMemoryPressureEvent(UINT64 bytesAllocated)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ FireEtwIncreaseMemoryPressure(bytesAllocated, GetClrInstanceId());
+}
+
+// Out-of-line helper to avoid EH prolog/epilog in functions that otherwise don't throw.
+NOINLINE void GCInterface::SendEtwRemoveMemoryPressureEvent(UINT64 bytesAllocated)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ EX_TRY
+ {
+ FireEtwDecreaseMemoryPressure(bytesAllocated, GetClrInstanceId());
+ }
+ EX_CATCH
+ {
+ // Ignore failures
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+}
+
+// Out-of-line helper to avoid EH prolog/epilog in functions that otherwise don't throw.
+NOINLINE void GCInterface::GarbageCollectModeAny(int generation)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ GCX_COOP();
+ GCHeap::GetGCHeap()->GarbageCollect(generation, FALSE, collection_non_blocking);
+}
+
+//
+// COMInterlocked
+//
+
+#include <optsmallperfcritical.h>
+
+FCIMPL2(INT32,COMInterlocked::Exchange, INT32 *location, INT32 value)
+{
+ FCALL_CONTRACT;
+
+ if( NULL == location) {
+ FCThrow(kNullReferenceException);
+ }
+
+ return FastInterlockExchange((LONG *) location, value);
+}
+FCIMPLEND
+
+FCIMPL2_IV(INT64,COMInterlocked::Exchange64, INT64 *location, INT64 value)
+{
+ FCALL_CONTRACT;
+
+ if( NULL == location) {
+ FCThrow(kNullReferenceException);
+ }
+
+ return FastInterlockExchangeLong((INT64 *) location, value);
+}
+FCIMPLEND
+
+FCIMPL2(LPVOID,COMInterlocked::ExchangePointer, LPVOID *location, LPVOID value)
+{
+ FCALL_CONTRACT;
+
+ if( NULL == location) {
+ FCThrow(kNullReferenceException);
+ }
+
+ FCUnique(0x15);
+ return FastInterlockExchangePointer(location, value);
+}
+FCIMPLEND
+
+FCIMPL3(INT32, COMInterlocked::CompareExchange, INT32* location, INT32 value, INT32 comparand)
+{
+ FCALL_CONTRACT;
+
+ if( NULL == location) {
+ FCThrow(kNullReferenceException);
+ }
+
+ return FastInterlockCompareExchange((LONG*)location, value, comparand);
+}
+FCIMPLEND
+
+FCIMPL4(INT32, COMInterlocked::CompareExchangeReliableResult, INT32* location, INT32 value, INT32 comparand, CLR_BOOL* succeeded)
+{
+ FCALL_CONTRACT;
+
+ if( NULL == location) {
+ FCThrow(kNullReferenceException);
+ }
+
+ INT32 result = FastInterlockCompareExchange((LONG*)location, value, comparand);
+ if (result == comparand)
+ *succeeded = true;
+
+ return result;
+}
+FCIMPLEND
+
+FCIMPL3_IVV(INT64, COMInterlocked::CompareExchange64, INT64* location, INT64 value, INT64 comparand)
+{
+ FCALL_CONTRACT;
+
+ if( NULL == location) {
+ FCThrow(kNullReferenceException);
+ }
+
+ return FastInterlockCompareExchangeLong((INT64*)location, value, comparand);
+}
+FCIMPLEND
+
+FCIMPL3(LPVOID,COMInterlocked::CompareExchangePointer, LPVOID *location, LPVOID value, LPVOID comparand)
+{
+ FCALL_CONTRACT;
+
+ if( NULL == location) {
+ FCThrow(kNullReferenceException);
+ }
+
+ FCUnique(0x59);
+ return FastInterlockCompareExchangePointer(location, value, comparand);
+}
+FCIMPLEND
+
+FCIMPL2_IV(float,COMInterlocked::ExchangeFloat, float *location, float value)
+{
+ FCALL_CONTRACT;
+
+ if( NULL == location) {
+ FCThrow(kNullReferenceException);
+ }
+
+ LONG ret = FastInterlockExchange((LONG *) location, *(LONG*)&value);
+ return *(float*)&ret;
+}
+FCIMPLEND
+
+FCIMPL2_IV(double,COMInterlocked::ExchangeDouble, double *location, double value)
+{
+ FCALL_CONTRACT;
+
+ if( NULL == location) {
+ FCThrow(kNullReferenceException);
+ }
+
+
+ INT64 ret = FastInterlockExchangeLong((INT64 *) location, *(INT64*)&value);
+ return *(double*)&ret;
+}
+FCIMPLEND
+
+FCIMPL3_IVV(float,COMInterlocked::CompareExchangeFloat, float *location, float value, float comparand)
+{
+ FCALL_CONTRACT;
+
+ if( NULL == location) {
+ FCThrow(kNullReferenceException);
+ }
+
+ LONG ret = (LONG)FastInterlockCompareExchange((LONG*) location, *(LONG*)&value, *(LONG*)&comparand);
+ return *(float*)&ret;
+}
+FCIMPLEND
+
+FCIMPL3_IVV(double,COMInterlocked::CompareExchangeDouble, double *location, double value, double comparand)
+{
+ FCALL_CONTRACT;
+
+ if( NULL == location) {
+ FCThrow(kNullReferenceException);
+ }
+
+ INT64 ret = (INT64)FastInterlockCompareExchangeLong((INT64*) location, *(INT64*)&value, *(INT64*)&comparand);
+ return *(double*)&ret;
+}
+FCIMPLEND
+
+FCIMPL2(LPVOID,COMInterlocked::ExchangeObject, LPVOID*location, LPVOID value)
+{
+ FCALL_CONTRACT;
+
+ if( NULL == location) {
+ FCThrow(kNullReferenceException);
+ }
+
+ LPVOID ret = FastInterlockExchangePointer(location, value);
+#ifdef _DEBUG
+ Thread::ObjectRefAssign((OBJECTREF *)location);
+#endif
+ ErectWriteBarrier((OBJECTREF*) location, ObjectToOBJECTREF((Object*) value));
+ return ret;
+}
+FCIMPLEND
+
+FCIMPL2_VV(void,COMInterlocked::ExchangeGeneric, TypedByRef location, TypedByRef value)
+{
+ FCALL_CONTRACT;
+
+ LPVOID* loc = (LPVOID*)location.data;
+ if( NULL == loc) {
+ FCThrowVoid(kNullReferenceException);
+ }
+
+ LPVOID val = *(LPVOID*)value.data;
+ *(LPVOID*)value.data = FastInterlockExchangePointer(loc, val);
+#ifdef _DEBUG
+ Thread::ObjectRefAssign((OBJECTREF *)loc);
+#endif
+ ErectWriteBarrier((OBJECTREF*) loc, ObjectToOBJECTREF((Object*) val));
+}
+FCIMPLEND
+
+FCIMPL3_VVI(void,COMInterlocked::CompareExchangeGeneric, TypedByRef location, TypedByRef value, LPVOID comparand)
+{
+ FCALL_CONTRACT;
+
+ LPVOID* loc = (LPVOID*)location.data;
+ LPVOID val = *(LPVOID*)value.data;
+ if( NULL == loc) {
+ FCThrowVoid(kNullReferenceException);
+ }
+
+ LPVOID ret = FastInterlockCompareExchangePointer(loc, val, comparand);
+ *(LPVOID*)value.data = ret;
+ if(ret == comparand)
+ {
+#ifdef _DEBUG
+ Thread::ObjectRefAssign((OBJECTREF *)loc);
+#endif
+ ErectWriteBarrier((OBJECTREF*) loc, ObjectToOBJECTREF((Object*) val));
+ }
+}
+FCIMPLEND
+
+FCIMPL3(LPVOID,COMInterlocked::CompareExchangeObject, LPVOID *location, LPVOID value, LPVOID comparand)
+{
+ FCALL_CONTRACT;
+
+ if( NULL == location) {
+ FCThrow(kNullReferenceException);
+ }
+
+ // <TODO>@todo: only set ref if is updated</TODO>
+ LPVOID ret = FastInterlockCompareExchangePointer(location, value, comparand);
+ if (ret == comparand) {
+#ifdef _DEBUG
+ Thread::ObjectRefAssign((OBJECTREF *)location);
+#endif
+ ErectWriteBarrier((OBJECTREF*) location, ObjectToOBJECTREF((Object*) value));
+ }
+ return ret;
+}
+FCIMPLEND
+
+FCIMPL2(INT32,COMInterlocked::ExchangeAdd32, INT32 *location, INT32 value)
+{
+ FCALL_CONTRACT;
+
+ if( NULL == location) {
+ FCThrow(kNullReferenceException);
+ }
+
+ return FastInterlockExchangeAdd((LONG *) location, value);
+}
+FCIMPLEND
+
+FCIMPL2_IV(INT64,COMInterlocked::ExchangeAdd64, INT64 *location, INT64 value)
+{
+ FCALL_CONTRACT;
+
+ if( NULL == location) {
+ FCThrow(kNullReferenceException);
+ }
+
+ return FastInterlockExchangeAddLong((INT64 *) location, value);
+}
+FCIMPLEND
+
+#include <optdefault.h>
+
+
+
+FCIMPL6(INT32, ManagedLoggingHelper::GetRegistryLoggingValues, CLR_BOOL* bLoggingEnabled, CLR_BOOL* bLogToConsole, INT32 *iLogLevel, CLR_BOOL* bPerfWarnings, CLR_BOOL* bCorrectnessWarnings, CLR_BOOL* bSafeHandleStackTraces)
+{
+ FCALL_CONTRACT;
+
+ INT32 logFacility = 0;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ *bLoggingEnabled = (bool)(g_pConfig->GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_LogEnable, 0)!=0);
+ *bLogToConsole = (bool)(g_pConfig->GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_LogToConsole, 0)!=0);
+ *iLogLevel = (INT32)(g_pConfig->GetConfigDWORD_DontUse_(CLRConfig::EXTERNAL_LogLevel, 0));
+ logFacility = (INT32)(g_pConfig->GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_ManagedLogFacility, 0));
+ *bPerfWarnings = (bool)(g_pConfig->GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_BCLPerfWarnings, 0)!=0);
+ *bCorrectnessWarnings = (bool)(g_pConfig->GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_BCLCorrectnessWarnings, 0)!=0);
+ *bSafeHandleStackTraces = (bool)(g_pConfig->GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_SafeHandleStackTraces, 0)!=0);
+
+ HELPER_METHOD_FRAME_END(); \
+
+ return logFacility;
+}
+FCIMPLEND
+
+// Return true if the valuetype does not contain pointer and is tightly packed
+FCIMPL1(FC_BOOL_RET, ValueTypeHelper::CanCompareBits, Object* obj)
+{
+ FCALL_CONTRACT;
+
+ _ASSERTE(obj != NULL);
+ MethodTable* mt = obj->GetMethodTable();
+ FC_RETURN_BOOL(!mt->ContainsPointers() && !mt->IsNotTightlyPacked());
+}
+FCIMPLEND
+
+FCIMPL2(FC_BOOL_RET, ValueTypeHelper::FastEqualsCheck, Object* obj1, Object* obj2)
+{
+ FCALL_CONTRACT;
+
+ _ASSERTE(obj1 != NULL);
+ _ASSERTE(obj2 != NULL);
+ _ASSERTE(!obj1->GetMethodTable()->ContainsPointers());
+ _ASSERTE(obj1->GetSize() == obj2->GetSize());
+
+ TypeHandle pTh = obj1->GetTypeHandle();
+
+ FC_RETURN_BOOL(memcmp(obj1->GetData(),obj2->GetData(),pTh.GetSize()) == 0);
+}
+FCIMPLEND
+
+static BOOL CanUseFastGetHashCodeHelper(MethodTable *mt)
+{
+ LIMITED_METHOD_CONTRACT;
+ return !mt->ContainsPointers() && !mt->IsNotTightlyPacked();
+}
+
+static INT32 FastGetValueTypeHashCodeHelper(MethodTable *mt, void *pObjRef)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ PRECONDITION(CanUseFastGetHashCodeHelper(mt));
+ } CONTRACTL_END;
+
+ INT32 hashCode = 0;
+ INT32 *pObj = (INT32*)pObjRef;
+
+ // this is a struct with no refs and no "strange" offsets, just go through the obj and xor the bits
+ INT32 size = mt->GetNumInstanceFieldBytes();
+ for (INT32 i = 0; i < (INT32)(size / sizeof(INT32)); i++)
+ hashCode ^= *pObj++;
+
+ return hashCode;
+}
+
+static INT32 RegularGetValueTypeHashCode(MethodTable *mt, void *pObjRef)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ INT32 hashCode = 0;
+ INT32 *pObj = (INT32*)pObjRef;
+
+ // While we shouln't get here directly from ValueTypeHelper::GetHashCode, if we recurse we need to
+ // be able to handle getting the hashcode for an embedded structure whose hashcode is computed by the fast path.
+ if (CanUseFastGetHashCodeHelper(mt))
+ {
+ return FastGetValueTypeHashCodeHelper(mt, pObjRef);
+ }
+ else
+ {
+ // it's looking ugly so we'll use the old behavior in managed code. Grab the first non-null
+ // field and return its hash code or 'it' as hash code
+ // <TODO> Note that the old behavior has already been broken for value types
+ // that is qualified for CanUseFastGetHashCodeHelper. So maybe we should
+ // change the implementation here to use all fields instead of just the 1st one.
+ // </TODO>
+ //
+ // <TODO> check this approximation - we may be losing exact type information </TODO>
+ ApproxFieldDescIterator fdIterator(mt, ApproxFieldDescIterator::INSTANCE_FIELDS);
+ INT32 count = (INT32)fdIterator.Count();
+
+ if (count != 0)
+ {
+ for (INT32 i = 0; i < count; i++)
+ {
+ FieldDesc *field = fdIterator.Next();
+ _ASSERTE(!field->IsRVA());
+ void *pFieldValue = (BYTE *)pObj + field->GetOffsetUnsafe();
+ if (field->IsObjRef())
+ {
+ // if we get an object reference we get the hash code out of that
+ if (*(Object**)pFieldValue != NULL)
+ {
+
+ OBJECTREF fieldObjRef = ObjectToOBJECTREF(*(Object **) pFieldValue);
+ GCPROTECT_BEGIN(fieldObjRef);
+
+ MethodDescCallSite getHashCode(METHOD__OBJECT__GET_HASH_CODE, &fieldObjRef);
+
+ // Make the call.
+ ARG_SLOT arg[1] = {ObjToArgSlot(fieldObjRef)};
+ hashCode = getHashCode.Call_RetI4(arg);
+
+ GCPROTECT_END();
+ }
+ else
+ {
+ // null object reference, try next
+ continue;
+ }
+ }
+ else
+ {
+ UINT fieldSize = field->LoadSize();
+ INT32 *pValue = (INT32*)pFieldValue;
+ CorElementType fieldType = field->GetFieldType();
+ if (fieldType != ELEMENT_TYPE_VALUETYPE)
+ {
+ for (INT32 j = 0; j < (INT32)(fieldSize / sizeof(INT32)); j++)
+ hashCode ^= *pValue++;
+ }
+ else
+ {
+ // got another value type. Get the type
+ TypeHandle fieldTH = field->LookupFieldTypeHandle(); // the type was loaded already
+ _ASSERTE(!fieldTH.IsNull());
+ hashCode = RegularGetValueTypeHashCode(fieldTH.GetMethodTable(), pValue);
+ }
+ }
+ break;
+ }
+ }
+ }
+ return hashCode;
+}
+
+// The default implementation of GetHashCode() for all value types.
+// Note that this implementation reveals the value of the fields.
+// So if the value type contains any sensitive information it should
+// implement its own GetHashCode().
+FCIMPL1(INT32, ValueTypeHelper::GetHashCode, Object* objUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ if (objUNSAFE == NULL)
+ FCThrow(kNullReferenceException);
+
+ OBJECTREF obj = ObjectToOBJECTREF(objUNSAFE);
+ VALIDATEOBJECTREF(obj);
+
+ INT32 hashCode = 0;
+ MethodTable *pMT = objUNSAFE->GetMethodTable();
+
+ // We don't want to expose the method table pointer in the hash code
+ // Let's use the typeID instead.
+ UINT32 typeID = pMT->LookupTypeID();
+ if (typeID == TypeIDProvider::INVALID_TYPE_ID)
+ {
+ // If the typeID has yet to be generated, fall back to GetTypeID
+ // This only needs to be done once per MethodTable
+ HELPER_METHOD_FRAME_BEGIN_RET_1(obj);
+ typeID = pMT->GetTypeID();
+ HELPER_METHOD_FRAME_END();
+ }
+
+ // To get less colliding and more evenly distributed hash codes,
+ // we munge the class index with two big prime numbers
+ hashCode = typeID * 711650207 + 2506965631U;
+
+ if (CanUseFastGetHashCodeHelper(pMT))
+ {
+ hashCode ^= FastGetValueTypeHashCodeHelper(pMT, obj->UnBox());
+ }
+ else
+ {
+ HELPER_METHOD_FRAME_BEGIN_RET_1(obj);
+ hashCode ^= RegularGetValueTypeHashCode(pMT, obj->UnBox());
+ HELPER_METHOD_FRAME_END();
+ }
+
+ return hashCode;
+}
+FCIMPLEND
+
+static LONG s_dwSeed;
+
+FCIMPL1(INT32, ValueTypeHelper::GetHashCodeOfPtr, LPVOID ptr)
+{
+ FCALL_CONTRACT;
+
+ INT32 hashCode = (INT32)((INT64)(ptr));
+
+ if (hashCode == 0)
+ {
+ return 0;
+ }
+
+ DWORD dwSeed = s_dwSeed;
+
+ // Initialize s_dwSeed lazily
+ if (dwSeed == 0)
+ {
+ // We use the first non-0 pointer as the seed, all hashcodes will be based off that.
+ // This is to make sure that we only reveal relative memory addresses and never absolute ones.
+ dwSeed = hashCode;
+ InterlockedCompareExchange(&s_dwSeed, dwSeed, 0);
+ dwSeed = s_dwSeed;
+ }
+ _ASSERTE(dwSeed != 0);
+
+ return hashCode - dwSeed;
+}
+FCIMPLEND
+
+#ifndef FEATURE_CORECLR
+FCIMPL1(OBJECTHANDLE, SizedRefHandle::Initialize, Object* _obj)
+{
+ FCALL_CONTRACT;
+
+ OBJECTHANDLE result = 0;
+ OBJECTREF obj(_obj);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ result = GetAppDomain()->CreateSizedRefHandle(obj);
+
+ HELPER_METHOD_FRAME_END();
+
+ return result;
+}
+FCIMPLEND
+
+FCIMPL1(VOID, SizedRefHandle::Free, OBJECTHANDLE handle)
+{
+ FCALL_CONTRACT;
+
+ _ASSERTE(handle != NULL);
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ DestroySizedRefHandle(handle);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL1(LPVOID, SizedRefHandle::GetTarget, OBJECTHANDLE handle)
+{
+ FCALL_CONTRACT;
+
+ _ASSERTE(handle != NULL);
+
+ OBJECTREF objRef = NULL;
+
+ objRef = ObjectFromHandle(handle);
+
+ FCUnique(0x33);
+ return *((LPVOID*)&objRef);
+}
+FCIMPLEND
+
+FCIMPL1(INT64, SizedRefHandle::GetApproximateSize, OBJECTHANDLE handle)
+{
+ FCALL_CONTRACT;
+
+ _ASSERTE(handle != NULL);
+
+ return (INT64)HndGetHandleExtraInfo(handle);
+}
+FCIMPLEND
+#endif //!FEATURE_CORECLR
+
+#ifdef FEATURE_CORECLR
+COMNlsHashProvider COMNlsHashProvider::s_NlsHashProvider;
+#endif // FEATURE_CORECLR
+
+
+COMNlsHashProvider::COMNlsHashProvider()
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef FEATURE_RANDOMIZED_STRING_HASHING
+ bUseRandomHashing = FALSE;
+ pEntropy = NULL;
+ pDefaultSeed = NULL;
+#endif // FEATURE_RANDOMIZED_STRING_HASHING
+}
+
+INT32 COMNlsHashProvider::HashString(LPCWSTR szStr, SIZE_T strLen, BOOL forceRandomHashing, INT64 additionalEntropy)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_RANDOMIZED_STRING_HASHING
+ _ASSERTE(forceRandomHashing == false);
+ _ASSERTE(additionalEntropy == 0);
+#endif
+
+#ifdef FEATURE_RANDOMIZED_STRING_HASHING
+ if(bUseRandomHashing || forceRandomHashing)
+ {
+ int marvinResult[SYMCRYPT_MARVIN32_RESULT_SIZE / sizeof(int)];
+
+ if(additionalEntropy == 0)
+ {
+ SymCryptMarvin32(GetDefaultSeed(), (PCBYTE) szStr, strLen * sizeof(WCHAR), (PBYTE) &marvinResult);
+ }
+ else
+ {
+ SYMCRYPT_MARVIN32_EXPANDED_SEED seed;
+ CreateMarvin32Seed(additionalEntropy, &seed);
+ SymCryptMarvin32(&seed, (PCBYTE) szStr, strLen * sizeof(WCHAR), (PBYTE) &marvinResult);
+ }
+
+ return marvinResult[0] ^ marvinResult[1];
+ }
+ else
+ {
+#endif // FEATURE_RANDOMIZED_STRING_HASHING
+ return ::HashString(szStr);
+#ifdef FEATURE_RANDOMIZED_STRING_HASHING
+ }
+#endif // FEATURE_RANDOMIZED_STRING_HASHING
+}
+
+
+INT32 COMNlsHashProvider::HashSortKey(PCBYTE pSrc, SIZE_T cbSrc, BOOL forceRandomHashing, INT64 additionalEntropy)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_RANDOMIZED_STRING_HASHING
+ _ASSERTE(forceRandomHashing == false);
+ _ASSERTE(additionalEntropy == 0);
+#endif
+
+#ifdef FEATURE_RANDOMIZED_STRING_HASHING
+ if(bUseRandomHashing || forceRandomHashing)
+ {
+ int marvinResult[SYMCRYPT_MARVIN32_RESULT_SIZE / sizeof(int)];
+
+ // Sort Keys are terminated with a null byte which we didn't hash using the old algorithm,
+ // so we don't have it with Marvin32 either.
+ if(additionalEntropy == 0)
+ {
+ SymCryptMarvin32(GetDefaultSeed(), pSrc, cbSrc - 1, (PBYTE) &marvinResult);
+ }
+ else
+ {
+ SYMCRYPT_MARVIN32_EXPANDED_SEED seed;
+ CreateMarvin32Seed(additionalEntropy, &seed);
+ SymCryptMarvin32(&seed, pSrc, cbSrc - 1, (PBYTE) &marvinResult);
+ }
+
+ return marvinResult[0] ^ marvinResult[1];
+ }
+ else
+ {
+#endif // FEATURE_RANDOMIZED_STRING_HASHING
+ // Ok, lets build the hashcode -- mostly lifted from GetHashCode() in String.cs, for strings.
+ int hash1 = 5381;
+ int hash2 = hash1;
+ const BYTE *pB = pSrc;
+ BYTE c;
+
+ while (pB != 0 && *pB != 0) {
+ hash1 = ((hash1 << 5) + hash1) ^ *pB;
+ c = pB[1];
+
+ //
+ // FUTURE: Update NewAPis::LCMapStringEx to perhaps use a different, bug free, Win32 API on Win2k3 to workaround the issue discussed below.
+ //
+ // On Win2k3 Server, LCMapStringEx(LCMAP_SORTKEY) output does not correspond to CompareString in all cases, breaking the .NET GetHashCode<->Equality Contract
+ // Due to a fluke in our GetHashCode method, we avoided this issue due to the break out of the loop on the binary-zero byte.
+ //
+ if (c == 0)
+ break;
+
+ hash2 = ((hash2 << 5) + hash2) ^ c;
+ pB += 2;
+ }
+
+ return hash1 + (hash2 * 1566083941);
+
+#ifdef FEATURE_RANDOMIZED_STRING_HASHING
+ }
+#endif // FEATURE_RANDOMIZED_STRING_HASHING
+
+}
+
+INT32 COMNlsHashProvider::HashiStringKnownLower80(LPCWSTR szStr, INT32 strLen, BOOL forceRandomHashing, INT64 additionalEntropy)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_RANDOMIZED_STRING_HASHING
+ _ASSERTE(forceRandomHashing == false);
+ _ASSERTE(additionalEntropy == 0);
+#endif
+
+#ifdef FEATURE_RANDOMIZED_STRING_HASHING
+ if(bUseRandomHashing || forceRandomHashing)
+ {
+ WCHAR buf[SYMCRYPT_MARVIN32_INPUT_BLOCK_SIZE * 8];
+ SYMCRYPT_MARVIN32_STATE marvinState;
+ SYMCRYPT_MARVIN32_EXPANDED_SEED seed;
+
+ if(additionalEntropy == 0)
+ {
+ SymCryptMarvin32Init(&marvinState, GetDefaultSeed());
+ }
+ else
+ {
+ CreateMarvin32Seed(additionalEntropy, &seed);
+ SymCryptMarvin32Init(&marvinState, &seed);
+ }
+
+ LPCWSTR szEnd = szStr + strLen;
+
+ const UINT A_TO_Z_RANGE = (UINT)('z' - 'a');
+
+ while (szStr != szEnd)
+ {
+ size_t count = (sizeof(buf) / sizeof(buf[0]));
+
+ if ((size_t)(szEnd - szStr) < count)
+ count = (size_t)(szEnd - szStr);
+
+ for (size_t i = 0; i<count; i++)
+ {
+ WCHAR c = szStr[i];
+
+ if ((UINT)(c - 'a') <= A_TO_Z_RANGE) // if (c >='a' && c <= 'z')
+ {
+ //If we have a lowercase character, ANDing off 0x20
+ // will make it an uppercase character.
+ c &= ~0x20;
+ }
+
+ buf[i] = c;
+ }
+
+ szStr += count;
+
+ SymCryptMarvin32Append(&marvinState, (PCBYTE) &buf, sizeof(WCHAR) * count);
+ }
+
+ int marvinResult[SYMCRYPT_MARVIN32_RESULT_SIZE / sizeof(int)];
+ SymCryptMarvin32Result(&marvinState, (PBYTE) &marvinResult);
+ return marvinResult[0] ^ marvinResult[1];
+ }
+ else
+ {
+#endif // FEATURE_RANDOMIZED_STRING_HASHING
+ return ::HashiStringKnownLower80(szStr);
+#ifdef FEATURE_RANDOMIZED_STRING_HASHING
+ }
+#endif // FEATURE_RANDOMIZED_STRING_HASHING
+}
+
+
+#ifdef FEATURE_RANDOMIZED_STRING_HASHING
+void COMNlsHashProvider::InitializeDefaultSeed()
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PCBYTE pEntropy = GetEntropy();
+ AllocMemHolder<SYMCRYPT_MARVIN32_EXPANDED_SEED> pSeed = GetAppDomain()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(SYMCRYPT_MARVIN32_EXPANDED_SEED)));
+ SymCryptMarvin32ExpandSeed(pSeed, pEntropy, SYMCRYPT_MARVIN32_SEED_SIZE);
+
+ if(InterlockedCompareExchangeT(&pDefaultSeed, (PCSYMCRYPT_MARVIN32_EXPANDED_SEED) pSeed, NULL) == NULL)
+ {
+ pSeed.SuppressRelease();
+ }
+}
+
+PCSYMCRYPT_MARVIN32_EXPANDED_SEED COMNlsHashProvider::GetDefaultSeed()
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if(pDefaultSeed == NULL)
+ {
+ InitializeDefaultSeed();
+ }
+
+ return pDefaultSeed;
+}
+
+PCBYTE COMNlsHashProvider::GetEntropy()
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if(pEntropy == NULL)
+ {
+ HCRYPTPROV hCryptProv;
+ AllocMemHolder<BYTE> pNewEntropy = GetAppDomain()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(SYMCRYPT_MARVIN32_SEED_SIZE)));
+
+ WszCryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT);
+ CryptGenRandom(hCryptProv, SYMCRYPT_MARVIN32_SEED_SIZE, pNewEntropy);
+ CryptReleaseContext(hCryptProv, 0);
+
+ if(InterlockedCompareExchangeT(&pEntropy, (PBYTE) pNewEntropy, NULL) == NULL)
+ {
+ pNewEntropy.SuppressRelease();
+ }
+ }
+
+ return (PCBYTE) pEntropy;
+}
+
+
+void COMNlsHashProvider::CreateMarvin32Seed(INT64 additionalEntropy, PSYMCRYPT_MARVIN32_EXPANDED_SEED pExpandedMarvinSeed)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ INT64 *pEntropy = (INT64*) GetEntropy();
+ INT64 entropy;
+
+ entropy = *pEntropy ^ additionalEntropy;
+
+ SymCryptMarvin32ExpandSeed(pExpandedMarvinSeed, (PCBYTE) &entropy, SYMCRYPT_MARVIN32_SEED_SIZE);
+}
+#endif // FEATURE_RANDOMIZED_STRING_HASHING
diff --git a/src/vm/comutilnative.h b/src/vm/comutilnative.h
new file mode 100644
index 0000000000..6e652dc32d
--- /dev/null
+++ b/src/vm/comutilnative.h
@@ -0,0 +1,306 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+/*============================================================
+**
+** Header: COMUtilNative
+**
+**
+** Purpose: A dumping ground for classes which aren't large
+** enough to get their own file in the VM.
+**
+**
+===========================================================*/
+#ifndef _COMUTILNATIVE_H_
+#define _COMUTILNATIVE_H_
+
+#include "object.h"
+#include "util.hpp"
+#include "cgensys.h"
+#include "fcall.h"
+#include "qcall.h"
+#include "windows.h"
+#undef GetCurrentTime
+
+#ifndef FEATURE_CORECLR
+#include <winnls.h>
+#endif
+
+#ifdef FEATURE_RANDOMIZED_STRING_HASHING
+#pragma warning(push)
+#pragma warning(disable:4324)
+#if !defined(CROSS_COMPILE) && defined(_TARGET_ARM_)
+#include "arm_neon.h"
+#endif
+#include "marvin32.h"
+#pragma warning(pop)
+#endif
+
+//
+//
+// PARSE NUMBERS
+//
+//
+
+#define MinRadix 2
+#define MaxRadix 36
+
+class ParseNumbers {
+
+ enum FmtFlags {
+ LeftAlign = 0x1, //Ensure that these conform to the values specified in the managed files.
+ CenterAlign = 0x2,
+ RightAlign = 0x4,
+ PrefixSpace = 0x8,
+ PrintSign = 0x10,
+ PrintBase = 0x20,
+ TreatAsUnsigned = 0x10,
+ PrintAsI1 = 0x40,
+ PrintAsI2 = 0x80,
+ PrintAsI4 = 0x100,
+ PrintRadixBase = 0x200,
+ AlternateForm = 0x400};
+
+public:
+
+ static INT32 GrabInts(const INT32 radix, __in_ecount(length) WCHAR *buffer, const int length, int *i, BOOL isUnsigned);
+ static INT64 GrabLongs(const INT32 radix, __in_ecount(length) WCHAR *buffer, const int length, int *i, BOOL isUnsigned);
+
+ static FCDECL5(LPVOID, IntToString, INT32 l, INT32 radix, INT32 width, CLR_CHAR paddingChar, INT32 flags);
+ static FCDECL5_VII(LPVOID, LongToString, INT64 l, INT32 radix, INT32 width, CLR_CHAR paddingChar, INT32 flags);
+ static FCDECL4(INT32, StringToInt, StringObject * s, INT32 radix, INT32 flags, INT32* currPos);
+ static FCDECL4(INT64, StringToLong, StringObject * s, INT32 radix, INT32 flags, INT32* currPos);
+};
+
+//
+//
+// EXCEPTION NATIVE
+//
+//
+
+void FreeExceptionData(ExceptionData *pedata);
+
+class ExceptionNative
+{
+private:
+ enum ExceptionMessageKind {
+ ThreadAbort = 1,
+ ThreadInterrupted = 2,
+ OutOfMemory = 3
+ };
+
+public:
+ static FCDECL1(FC_BOOL_RET, IsImmutableAgileException, Object* pExceptionUNSAFE);
+ static FCDECL1(FC_BOOL_RET, IsTransient, INT32 hresult);
+ static FCDECL3(StringObject *, StripFileInfo, Object *orefExcepUNSAFE, StringObject *orefStrUNSAFE, CLR_BOOL isRemoteStackTrace);
+ static void QCALLTYPE GetMessageFromNativeResources(ExceptionMessageKind kind, QCall::StringHandleOnStack retMesg);
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ static FCDECL0(VOID, PrepareForForeignExceptionRaise);
+ static FCDECL1(Object*, CopyStackTrace, Object* pStackTraceUNSAFE);
+ static FCDECL1(Object*, CopyDynamicMethods, Object* pDynamicMethodsUNSAFE);
+ static FCDECL3(VOID, GetStackTracesDeepCopy, Object* pExceptionObjectUnsafe, Object **pStackTraceUnsafe, Object **pDynamicMethodsUnsafe);
+ static FCDECL3(VOID, SaveStackTracesFromDeepCopy, Object* pExceptionObjectUnsafe, Object *pStackTraceUnsafe, Object *pDynamicMethodsUnsafe);
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+
+
+ // NOTE: caller cleans up any partially initialized BSTRs in pED
+ static void GetExceptionData(OBJECTREF, ExceptionData *);
+
+ // Note: these are on the PInvoke class to hide these from the user.
+ static FCDECL0(EXCEPTION_POINTERS*, GetExceptionPointers);
+ static FCDECL0(INT32, GetExceptionCode);
+};
+
+
+//
+// Buffer
+//
+class Buffer {
+public:
+
+ // BlockCopy
+ // This method from one primitive array to another based
+ // upon an offset into each an a byte count.
+ static FCDECL5(VOID, BlockCopy, ArrayBase *src, int srcOffset, ArrayBase *dst, int dstOffset, int count);
+ static FCDECL5(VOID, InternalBlockCopy, ArrayBase *src, int srcOffset, ArrayBase *dst, int dstOffset, int count);
+ static FCDECL2(FC_UINT8_RET, GetByte, ArrayBase *arrayUNSAFE, INT32 index);
+ static FCDECL3(VOID, SetByte, ArrayBase *arrayUNSAFE, INT32 index, UINT8 bData);
+ static FCDECL1(FC_BOOL_RET, IsPrimitiveTypeArray, ArrayBase *arrayUNSAFE);
+ static FCDECL1(INT32, ByteLength, ArrayBase *arrayUNSAFE);
+
+ static void QCALLTYPE MemMove(void *dst, void *src, size_t length);
+};
+
+#define MIN_GC_MEMORYPRESSURE_THRESHOLD 100000
+#define RELATIVE_GC_RATIO 8
+
+const UINT NEW_PRESSURE_COUNT = 4;
+
+class GCInterface {
+private:
+
+ static MethodDesc *m_pCacheMethod;
+ static UINT64 m_ulMemPressure;
+ static UINT64 m_ulThreshold;
+ static INT32 m_gc_counts[3];
+
+ static UINT64 m_addPressure[NEW_PRESSURE_COUNT];
+ static UINT64 m_remPressure[NEW_PRESSURE_COUNT];
+ static UINT m_iteration;
+
+public:
+ static CrstStatic m_MemoryPressureLock;
+
+ static FORCEINLINE UINT64 InterlockedAdd(UINT64 *pAugend, UINT64 addend);
+ static FORCEINLINE UINT64 InterlockedSub(UINT64 *pMinuend, UINT64 subtrahend);
+
+ static FCDECL0(int, GetGcLatencyMode);
+ static FCDECL1(int, SetGcLatencyMode, int newLatencyMode);
+ static FCDECL0(int, GetLOHCompactionMode);
+ static FCDECL1(void, SetLOHCompactionMode, int newLOHCompactionyMode);
+ static FCDECL2(FC_BOOL_RET, RegisterForFullGCNotification, UINT32 gen2Percentage, UINT32 lohPercentage);
+ static FCDECL0(FC_BOOL_RET, CancelFullGCNotification);
+ static FCDECL1(int, WaitForFullGCApproach, int millisecondsTimeout);
+ static FCDECL1(int, WaitForFullGCComplete, int millisecondsTimeout);
+ static FCDECL1(int, GetGenerationWR, LPVOID handle);
+ static FCDECL1(int, GetGeneration, Object* objUNSAFE);
+
+ static
+ INT64 QCALLTYPE GetTotalMemory();
+
+ static
+ void QCALLTYPE Collect(INT32 generation, INT32 mode);
+
+ static
+ void QCALLTYPE WaitForPendingFinalizers();
+
+ static FCDECL0(int, GetMaxGeneration);
+ static FCDECL1(void, KeepAlive, Object *obj);
+ static FCDECL1(void, SuppressFinalize, Object *obj);
+ static FCDECL1(void, ReRegisterForFinalize, Object *obj);
+ static FCDECL2(int, CollectionCount, INT32 generation, INT32 getSpecialGCCount);
+
+ static
+ void QCALLTYPE _AddMemoryPressure(UINT64 bytesAllocated);
+
+ static
+ void QCALLTYPE _RemoveMemoryPressure(UINT64 bytesAllocated);
+
+ static void RemoveMemoryPressure(UINT64 bytesAllocated);
+ static void AddMemoryPressure(UINT64 bytesAllocated);
+ NOINLINE static void SendEtwRemoveMemoryPressureEvent(UINT64 bytesAllocated);
+ static void SendEtwAddMemoryPressureEvent(UINT64 bytesAllocated);
+
+ // New less sensitive implementation of Add/RemoveMemoryPressure:
+ static void CheckCollectionCount();
+ static void NewRemoveMemoryPressure(UINT64 bytesAllocated);
+ static void NewAddMemoryPressure(UINT64 bytesAllocated);
+
+private:
+ // Out-of-line helper to avoid EH prolog/epilog in functions that otherwise don't throw.
+ NOINLINE static void GarbageCollectModeAny(int generation);
+};
+
+class COMInterlocked
+{
+public:
+ static FCDECL2(INT32, Exchange, INT32 *location, INT32 value);
+ static FCDECL2_IV(INT64, Exchange64, INT64 *location, INT64 value);
+ static FCDECL2(LPVOID, ExchangePointer, LPVOID* location, LPVOID value);
+ static FCDECL3(INT32, CompareExchange, INT32* location, INT32 value, INT32 comparand);
+ static FCDECL4(INT32, CompareExchangeReliableResult, INT32* location, INT32 value, INT32 comparand, CLR_BOOL* succeeded);
+ static FCDECL3_IVV(INT64, CompareExchange64, INT64* location, INT64 value, INT64 comparand);
+ static FCDECL3(LPVOID, CompareExchangePointer, LPVOID* location, LPVOID value, LPVOID comparand);
+ static FCDECL2_IV(float, ExchangeFloat, float *location, float value);
+ static FCDECL2_IV(double, ExchangeDouble, double *location, double value);
+ static FCDECL3_IVV(float, CompareExchangeFloat, float *location, float value, float comparand);
+ static FCDECL3_IVV(double, CompareExchangeDouble, double *location, double value, double comparand);
+ static FCDECL2(LPVOID, ExchangeObject, LPVOID* location, LPVOID value);
+ static FCDECL3(LPVOID, CompareExchangeObject, LPVOID* location, LPVOID value, LPVOID comparand);
+ static FCDECL2(INT32, ExchangeAdd32, INT32 *location, INT32 value);
+ static FCDECL2_IV(INT64, ExchangeAdd64, INT64 *location, INT64 value);
+ static FCDECL2_VV(void, ExchangeGeneric, TypedByRef location, TypedByRef value);
+ static FCDECL3_VVI(void, CompareExchangeGeneric, TypedByRef location, TypedByRef value, LPVOID comparand);
+};
+
+class ManagedLoggingHelper {
+
+public:
+ static FCDECL6(INT32, GetRegistryLoggingValues, CLR_BOOL* bLoggingEnabled, CLR_BOOL* bLogToConsole, INT32 *bLogLevel, CLR_BOOL* bPerfWarnings, CLR_BOOL* bCorrectnessWarnings, CLR_BOOL* bSafeHandleStackTraces);
+};
+
+class ValueTypeHelper {
+public:
+ static FCDECL1(FC_BOOL_RET, CanCompareBits, Object* obj);
+ static FCDECL2(FC_BOOL_RET, FastEqualsCheck, Object* obj1, Object* obj2);
+ static FCDECL1(INT32, GetHashCode, Object* objRef);
+ static FCDECL1(INT32, GetHashCodeOfPtr, LPVOID ptr);
+};
+
+#ifndef FEATURE_CORECLR
+class SizedRefHandle
+{
+public:
+ static FCDECL1(OBJECTHANDLE, Initialize, Object* _obj);
+ static FCDECL1(VOID, Free, OBJECTHANDLE handle);
+ static FCDECL1(LPVOID, GetTarget, OBJECTHANDLE handle);
+ static FCDECL1(INT64, GetApproximateSize, OBJECTHANDLE handle);
+};
+
+typedef BOOL (*PFN_IS_NLS_DEFINED_STRING)(NLS_FUNCTION, DWORD, LPNLSVERSIONINFO, LPCWSTR, INT);
+typedef INT (*PFN_COMPARE_STRING_EX)(LPCWSTR, DWORD, LPCWSTR, INT, LPCWSTR, INT, LPNLSVERSIONINFO, LPVOID, LPARAM);
+typedef INT (*PFN_LC_MAP_STRING_EX)(LPCWSTR, DWORD, LPCWSTR, INT, LPWSTR, INT, LPNLSVERSIONINFO, LPVOID, LPARAM);
+typedef INT (*PFN_FIND_NLS_STRING_EX)(LPCWSTR, DWORD, LPCWSTR, INT, LPCWSTR, INT, LPINT, LPNLSVERSIONINFO, LPVOID, LPARAM);
+typedef INT (*PFN_COMPARE_STRING_ORDINAL)(LPCWSTR, INT, LPCWSTR, INT, BOOL);
+typedef BOOL (*PFN_GET_NLS_VERSION_EX)(NLS_FUNCTION, LPCWSTR, LPNLSVERSIONINFOEX);
+typedef INT (*PFN_FIND_STRING_ORDINAL)(DWORD, LPCWSTR, INT, LPCWSTR, INT, BOOL);
+
+class COMNlsCustomSortLibrary {
+public:
+ PFN_IS_NLS_DEFINED_STRING pIsNLSDefinedString;
+ PFN_COMPARE_STRING_EX pCompareStringEx;
+ PFN_LC_MAP_STRING_EX pLCMapStringEx;
+ PFN_FIND_NLS_STRING_EX pFindNLSStringEx;
+ PFN_COMPARE_STRING_ORDINAL pCompareStringOrdinal;
+ PFN_GET_NLS_VERSION_EX pGetNLSVersionEx;
+ PFN_FIND_STRING_ORDINAL pFindStringOrdinal;
+};
+#endif //!FEATURE_CORECLR
+
+typedef const BYTE * PCBYTE;
+
+class COMNlsHashProvider {
+public:
+ COMNlsHashProvider();
+
+ INT32 HashString(LPCWSTR szStr, SIZE_T strLen, BOOL forceRandomHashing, INT64 additionalEntropy);
+ INT32 HashSortKey(PCBYTE pSrc, SIZE_T cbSrc, BOOL forceRandomHashing, INT64 additionalEntropy);
+ INT32 HashiStringKnownLower80(LPCWSTR lpszStr, INT32 strLen, BOOL forceRandomHashing, INT64 additionalEntropy);
+
+#ifdef FEATURE_CORECLR
+ static COMNlsHashProvider s_NlsHashProvider;
+#endif // FEATURE_CORECLR
+
+#ifdef FEATURE_RANDOMIZED_STRING_HASHING
+ void SetUseRandomHashing(BOOL useRandomHashing) { LIMITED_METHOD_CONTRACT; bUseRandomHashing = useRandomHashing; }
+ BOOL GetUseRandomHashing() { LIMITED_METHOD_CONTRACT; return bUseRandomHashing; }
+
+
+private:
+ BOOL bUseRandomHashing;
+ PBYTE pEntropy;
+ PCSYMCRYPT_MARVIN32_EXPANDED_SEED pDefaultSeed;
+
+ PCBYTE GetEntropy();
+ PCSYMCRYPT_MARVIN32_EXPANDED_SEED GetDefaultSeed();
+ void InitializeDefaultSeed();
+ void CreateMarvin32Seed(INT64 additionalEntropy, PSYMCRYPT_MARVIN32_EXPANDED_SEED pExpandedMarvinSeed);
+#endif // FEATURE_RANDOMIZED_STRING_HASHING
+};
+
+#endif // _COMUTILNATIVE_H_
diff --git a/src/vm/comwaithandle.cpp b/src/vm/comwaithandle.cpp
new file mode 100644
index 0000000000..3c56cb9c36
--- /dev/null
+++ b/src/vm/comwaithandle.cpp
@@ -0,0 +1,453 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+/*============================================================
+**
+** COMWaitHandle.cpp
+**
+** Purpose: Native methods on System.WaitHandle
+**
+**
+===========================================================*/
+#include "common.h"
+#include "object.h"
+#include "field.h"
+#include "excep.h"
+#include "comwaithandle.h"
+
+
+//-----------------------------------------------------------------------------
+// ObjArrayHolder : ideal for holding a managed array of items. Will run
+// the ACQUIRE method sequentially on each item. Assume the ACQUIRE method
+// may possibly fail. If it does, only release the ones we've acquired.
+// Note: If a GC occurs during the ACQUIRE or RELEASE methods, you'll have to
+// explicitly gc protect the objectref.
+//-----------------------------------------------------------------------------
+template <typename TYPE, void (*ACQUIRE)(TYPE), void (*RELEASEF)(TYPE)>
+class ObjArrayHolder
+{
+
+public:
+ ObjArrayHolder() {
+ LIMITED_METHOD_CONTRACT;
+ m_numAcquired = 0;
+ m_pValues = NULL;
+ }
+
+ // Assuming ACQUIRE can throw an exception, we must put this logic
+ // somewhere outside of the constructor. In C++, the destructor won't be
+ // run if the constructor didn't complete.
+ void Initialize(const unsigned int numElements, PTRARRAYREF* pValues) {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(m_numAcquired == 0);
+ m_numElements = numElements;
+ m_pValues = pValues;
+ for (unsigned int i=0; i<m_numElements; i++) {
+ TYPE value = (TYPE) (*m_pValues)->GetAt(i);
+ ACQUIRE(value);
+ m_numAcquired++;
+ }
+ }
+
+ ~ObjArrayHolder() {
+ WRAPPER_NO_CONTRACT;
+
+ GCX_COOP();
+ for (unsigned int i=0; i<m_numAcquired; i++) {
+ TYPE value = (TYPE) (*m_pValues)->GetAt(i);
+ RELEASEF(value);
+ }
+ }
+
+private:
+ unsigned int m_numElements;
+ unsigned int m_numAcquired;
+ PTRARRAYREF* m_pValues;
+
+ FORCEINLINE ObjArrayHolder<TYPE, ACQUIRE, RELEASEF> &operator=(const ObjArrayHolder<TYPE, ACQUIRE, RELEASEF> &holder)
+ {
+ _ASSERTE(!"No assignment allowed");
+ return NULL;
+ }
+
+ FORCEINLINE ObjArrayHolder(const ObjArrayHolder<TYPE, ACQUIRE, RELEASEF> &holder)
+ {
+ _ASSERTE(!"No copy construction allowed");
+ }
+};
+
+void AcquireSafeHandleFromWaitHandle(WAITHANDLEREF wh)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ MODE_COOPERATIVE;
+ PRECONDITION(wh != NULL);
+ } CONTRACTL_END;
+
+ _ASSERTE(!wh->IsTransparentProxy());
+
+ SAFEHANDLEREF sh = wh->GetSafeHandle();
+ if (sh == NULL)
+ COMPlusThrow(kObjectDisposedException);
+ sh->AddRef();
+}
+
+void ReleaseSafeHandleFromWaitHandle(WAITHANDLEREF wh)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ PRECONDITION(wh != NULL);
+ } CONTRACTL_END;
+
+ SAFEHANDLEREF sh = wh->GetSafeHandle();
+ _ASSERTE(sh);
+ sh->Release();
+}
+
+typedef ObjArrayHolder<WAITHANDLEREF, AcquireSafeHandleFromWaitHandle, ReleaseSafeHandleFromWaitHandle> WaitHandleArrayHolder;
+
+INT64 AdditionalWait(INT64 sPauseTime, INT64 sTime, INT64 expDuration)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(g_PauseTime >= sPauseTime);
+
+ INT64 pauseTime = g_PauseTime - sPauseTime;
+ // No pause was used inbetween this handle
+ if(pauseTime <= 0)
+ return 0;
+
+ INT64 actDuration = CLRGetTickCount64() - sTime;
+
+ // In case the CLR is paused inbetween a wait, this method calculates how much
+ // the wait has to be adjusted to account for the CLR Freeze. Essentially all
+ // pause duration has to be considered as "time that never existed".
+ //
+ // Two cases exists, consider that 10 sec wait is issued
+ // Case 1: All pauses happened before the wait completes. Hence just the
+ // pause time needs to be added back at the end of wait
+ // 0 3 8 10
+ // |-----------|###################|------>
+ // 5-sec pause
+ // ....................>
+ // Additional 5 sec wait
+ // |=========================>
+ //
+ // Case 2: Pauses ended after the wait completes.
+ // 3 second of wait was left as the pause started at 7 so need to add that back
+ // 0 7 10
+ // |---------------------------|###########>
+ // 5-sec pause 12
+ // ...................>
+ // Additional 3 sec wait
+ // |==================>
+ //
+ // Both cases can be expressed in the same calculation
+ // pauseTime: sum of all pauses that were triggered after the timer was started
+ // expDuration: expected duration of the wait (without any pauses) 10 in the example
+ // actDuration: time when the wait finished. Since the CLR is frozen during pause it's
+ // max of timeout or pause-end. In case-1 it's 10, in case-2 it's 12
+ INT64 additional = expDuration - (actDuration - pauseTime);
+ if(additional < 0)
+ additional = 0;
+
+ return additional;
+}
+
+FCIMPL4(INT32, WaitHandleNative::CorWaitOneNative, SafeHandle* safeWaitHandleUNSAFE, INT32 timeout, CLR_BOOL hasThreadAffinity, CLR_BOOL exitContext)
+{
+ FCALL_CONTRACT;
+
+ INT32 retVal = 0;
+ SAFEHANDLEREF sh(safeWaitHandleUNSAFE);
+ HELPER_METHOD_FRAME_BEGIN_RET_1(sh);
+
+ _ASSERTE(sh != NULL);
+
+ Thread* pThread = GET_THREAD();
+
+ DWORD res = (DWORD) -1;
+
+ Context* targetContext;
+ targetContext = pThread->GetContext();
+ _ASSERTE(targetContext);
+ Context* defaultContext;
+ defaultContext = pThread->GetDomain()->GetDefaultContext();
+ _ASSERTE(defaultContext);
+#ifndef FEATURE_CORECLR
+ // DoAppropriateWait calls LeaveRuntime/EnterRuntime which may cause the current
+ // fiber to be re-scheduled.
+ ThreadAffinityAndCriticalRegionHolder affinityAndCriticalRegionHolder(hasThreadAffinity);
+#endif
+ SafeHandleHolder shh(&sh);
+ // Note that SafeHandle is a GC object, and RequestCallback and
+ // DoAppropriateWait work on an array of handles. Don't pass the address
+ // of the handle field - that's a GC hole. Instead, pass this temp
+ // array.
+ HANDLE handles[1];
+ handles[0] = sh->GetHandle();
+#ifdef FEATURE_REMOTING
+ if (exitContext != NULL &&
+ targetContext != defaultContext)
+ {
+ Context::WaitArgs waitOneArgs = {1, handles, TRUE, timeout, TRUE, &res};
+ Context::CallBackInfo callBackInfo = {Context::Wait_callback, (void*) &waitOneArgs};
+ Context::RequestCallBack(CURRENT_APPDOMAIN_ID,defaultContext, &callBackInfo);
+ }
+ else
+#else
+ _ASSERTE(exitContext == NULL || targetContext == defaultContext);
+#endif
+ {
+ // Support for pause/resume (FXFREEZE)
+ while(true)
+ {
+ INT64 sPauseTime = g_PauseTime;
+ INT64 sTime = CLRGetTickCount64();
+ res = pThread->DoAppropriateWait(1,handles,TRUE,timeout, WaitMode_Alertable /*alertable*/);
+ if(res != WAIT_TIMEOUT)
+ break;
+ timeout = (INT32)AdditionalWait(sPauseTime, sTime, timeout);
+ if(timeout == 0)
+ break;
+ }
+ }
+
+ retVal = res;
+
+#ifndef FEATURE_CORECLR
+ if (res == WAIT_OBJECT_0 && hasThreadAffinity) {
+ affinityAndCriticalRegionHolder.SuppressRelease();
+ }
+ else if(res == WAIT_ABANDONED_0) {
+ // WAIT_ABANDONED means the specified object is a mutex object that was not released by the thread
+ // that owned the mutex object before the owning thread terminated.
+ // Ownership of the mutex object is granted to the calling thread, and the mutex is set to nonsignaled.
+ _ASSERTE(hasThreadAffinity);
+ affinityAndCriticalRegionHolder.SuppressRelease();
+ }
+#endif
+
+ HELPER_METHOD_FRAME_END();
+ return retVal;
+}
+FCIMPLEND
+
+FCIMPL4(INT32, WaitHandleNative::CorWaitMultipleNative, Object* waitObjectsUNSAFE, INT32 timeout, CLR_BOOL exitContext, CLR_BOOL waitForAll)
+{
+ FCALL_CONTRACT;
+
+ INT32 retVal = 0;
+ OBJECTREF waitObjects = (OBJECTREF) waitObjectsUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(waitObjects);
+
+ _ASSERTE(waitObjects);
+
+ Thread* pThread = GET_THREAD();
+
+ PTRARRAYREF pWaitObjects = (PTRARRAYREF)waitObjects; // array of objects on which to wait
+ int numWaiters = pWaitObjects->GetNumComponents();
+
+ // Note: this should really be FEATURE_COMINTEROP_APARTMENT_SUPPORT.
+ // Because it's not, CoreCLR will allow WaitAll on STA threads.
+ // But fixing this would be a breaking change at this point, since we already shipped
+ // SL 2 and 3 this way.
+ // We we'll also check for FEATURE_CORECLR here, so that if we enable FEATURE_COMINTEROP
+ // on CoreCLR we won't break anyone.
+ // Perhaps in a future release we can fix this, if we aren't quite so concerned about
+ // compatibility....
+#if defined(FEATURE_COMINTEROP) && !defined(FEATURE_CORECLR)
+ if (waitForAll && numWaiters > 1 && pThread->GetApartment() == Thread::AS_InSTA) {
+ COMPlusThrow(kNotSupportedException, W("NotSupported_WaitAllSTAThread"));
+ }
+#endif // FEATURE_COMINTEROP && !FEATURE_CORECLR
+
+ WaitHandleArrayHolder arrayHolder;
+ arrayHolder.Initialize(numWaiters, (PTRARRAYREF*) &waitObjects);
+
+ pWaitObjects = (PTRARRAYREF)waitObjects; // array of objects on which to wait
+ HANDLE* internalHandles = (HANDLE*) _alloca(numWaiters*sizeof(HANDLE));
+ BOOL *hasThreadAffinity = (BOOL*) _alloca(numWaiters*sizeof(BOOL));
+
+ BOOL mayRequireThreadAffinity = FALSE;
+ for (int i=0;i<numWaiters;i++)
+ {
+ WAITHANDLEREF waitObject = (WAITHANDLEREF) pWaitObjects->m_Array[i];
+ _ASSERTE(waitObject != NULL);
+
+ //If the size of the array is 1 and m_handle is INVALID_HANDLE then WaitForMultipleObjectsEx will
+ // return ERROR_INVALID_HANDLE but DoAppropriateWait will convert to WAIT_OBJECT_0. i.e Success,
+ // this behavior seems wrong but someone explicitly coded that condition so it must have been for a reason.
+ internalHandles[i] = waitObject->m_handle;
+
+ // m_hasThreadAffinity is set for Mutex only
+ hasThreadAffinity[i] = waitObject->m_hasThreadAffinity;
+ if (hasThreadAffinity[i]) {
+ mayRequireThreadAffinity = TRUE;
+ }
+ }
+
+ DWORD res = (DWORD) -1;
+ ThreadAffinityHolder affinityHolder(mayRequireThreadAffinity);
+ Context* targetContext;
+ targetContext = pThread->GetContext();
+ _ASSERTE(targetContext);
+ Context* defaultContext;
+ defaultContext = pThread->GetDomain()->GetDefaultContext();
+ _ASSERTE(defaultContext);
+#ifdef FEATURE_REMOTING
+ if (exitContext != NULL &&
+ targetContext != defaultContext)
+ {
+ Context::WaitArgs waitMultipleArgs = {numWaiters, internalHandles, waitForAll, timeout, TRUE, &res};
+ Context::CallBackInfo callBackInfo = {Context::Wait_callback, (void*) &waitMultipleArgs};
+ Context::RequestCallBack(CURRENT_APPDOMAIN_ID,defaultContext, &callBackInfo);
+ }
+ else
+#else
+ _ASSERTE(exitContext == NULL || targetContext == defaultContext);
+#endif
+ {
+ // Support for pause/resume (FXFREEZE)
+ while(true)
+ {
+ INT64 sPauseTime = g_PauseTime;
+ INT64 sTime = CLRGetTickCount64();
+ res = pThread->DoAppropriateWait(numWaiters, internalHandles, waitForAll, timeout, WaitMode_Alertable /*alertable*/);
+ if(res != WAIT_TIMEOUT)
+ break;
+ timeout = (INT32)AdditionalWait(sPauseTime, sTime, timeout);
+ if(timeout == 0)
+ break;
+ }
+ }
+
+ if (mayRequireThreadAffinity) {
+ if (waitForAll) {
+ if (res >= (DWORD) WAIT_OBJECT_0 && res < (DWORD) WAIT_OBJECT_0 + numWaiters) {
+ for (int i = 0; i < numWaiters; i ++) {
+ if (hasThreadAffinity[i]) {
+ Thread::BeginThreadAffinityAndCriticalRegion();
+ }
+ }
+ }
+ // If some mutex is abandoned
+ else if (res >= (DWORD) WAIT_ABANDONED_0 && res < (DWORD) WAIT_ABANDONED_0+numWaiters) {
+ for (int i = 0; i < numWaiters; i ++) {
+ if (hasThreadAffinity[i])
+ {
+ if (WaitForSingleObject(internalHandles[i],0) == WAIT_OBJECT_0)
+ {
+ BOOL result;
+ result = ReleaseMutex(internalHandles[i]);
+ _ASSERTE (result);
+ Thread::BeginThreadAffinityAndCriticalRegion();
+ }
+ }
+ }
+ }
+ }
+ else {
+ if ( res >= (DWORD)WAIT_OBJECT_0 && res < (DWORD)WAIT_OBJECT_0 + numWaiters) {
+ if (hasThreadAffinity[res - WAIT_OBJECT_0]) {
+ Thread::BeginThreadAffinityAndCriticalRegion();
+ }
+ }
+ else if (res >= (DWORD)WAIT_ABANDONED_0 && res < (DWORD)WAIT_ABANDONED_0 + numWaiters) {
+ _ASSERTE (hasThreadAffinity[res - WAIT_ABANDONED_0]);
+ Thread::BeginThreadAffinityAndCriticalRegion();
+ }
+ }
+ }
+ retVal = res;
+
+ HELPER_METHOD_FRAME_END();
+ return retVal;
+}
+FCIMPLEND
+
+#ifndef FEATURE_CORECLR
+FCIMPL5(INT32, WaitHandleNative::CorSignalAndWaitOneNative, SafeHandle* safeWaitHandleSignalUNSAFE,SafeHandle* safeWaitHandleWaitUNSAFE, INT32 timeout, CLR_BOOL hasThreadAffinity, CLR_BOOL exitContext)
+{
+ FCALL_CONTRACT;
+
+ INT32 retVal = 0;
+ SAFEHANDLEREF shSignal(safeWaitHandleSignalUNSAFE);
+ SAFEHANDLEREF shWait(safeWaitHandleWaitUNSAFE);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_2(shSignal,shWait);
+
+ if(shSignal == NULL || shWait == NULL)
+ COMPlusThrow(kObjectDisposedException);
+
+ _ASSERTE(safeWaitHandleSignalUNSAFE != NULL);
+ _ASSERTE( safeWaitHandleWaitUNSAFE != NULL);
+
+
+ Thread* pThread = GET_THREAD();
+
+#ifdef FEATURE_COMINTEROP
+ if (pThread->GetApartment() == Thread::AS_InSTA) {
+ COMPlusThrow(kNotSupportedException, W("NotSupported_SignalAndWaitSTAThread")); //<TODO> Change this message
+ }
+#endif
+
+ DWORD res = (DWORD) -1;
+
+ Context* targetContext = pThread->GetContext();
+ _ASSERTE(targetContext);
+ Context* defaultContext = pThread->GetDomain()->GetDefaultContext();
+ _ASSERTE(defaultContext);
+
+ // DoSignalAndWait calls LeaveRuntime/EnterRuntime which may cause the current
+ // fiber to be re-scheduled.
+ ThreadAffinityAndCriticalRegionHolder affinityAndCriticalRegionHolder(hasThreadAffinity);
+
+ SafeHandleHolder shhSignal(&shSignal);
+ SafeHandleHolder shhWait(&shWait);
+ // Don't pass the address of the handle field
+ // - that's a GC hole. Instead, pass this temp array.
+ HANDLE handles[2];
+ handles[0] = shSignal->GetHandle();
+ handles[1] = shWait->GetHandle();
+#ifdef FEATURE_REMOTING
+ if (exitContext != NULL &&
+ targetContext != defaultContext)
+ {
+ Context::SignalAndWaitArgs signalAndWaitArgs = {handles, timeout, TRUE, &res};
+ Context::CallBackInfo callBackInfo = {Context::SignalAndWait_callback, (void*) &signalAndWaitArgs};
+ Context::RequestCallBack(CURRENT_APPDOMAIN_ID,defaultContext, &callBackInfo);
+ }
+ else
+#else
+ _ASSERTE(exitContext == NULL || targetContext == defaultContext);
+#endif
+ {
+ res = pThread->DoSignalAndWait(handles,timeout,TRUE /*alertable*/);
+ }
+
+ if (res == WAIT_OBJECT_0 && hasThreadAffinity) {
+ affinityAndCriticalRegionHolder.SuppressRelease();
+ }
+ else if(res == WAIT_ABANDONED_0) {
+ _ASSERTE(hasThreadAffinity);
+ affinityAndCriticalRegionHolder.SuppressRelease();
+ }
+
+ retVal = res;
+
+ HELPER_METHOD_FRAME_END();
+ return retVal;
+}
+FCIMPLEND
+#endif // !FEATURE_CORECLR
+
+
diff --git a/src/vm/comwaithandle.h b/src/vm/comwaithandle.h
new file mode 100644
index 0000000000..029dbfb537
--- /dev/null
+++ b/src/vm/comwaithandle.h
@@ -0,0 +1,29 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+/*============================================================
+**
+** Header: COMWaitHandle.h
+**
+** Purpose: Native methods on System.WaitHandle
+**
+**
+===========================================================*/
+
+#ifndef _COM_WAITABLE_HANDLE_H
+#define _COM_WAITABLE_HANDLE_H
+
+
+class WaitHandleNative
+{
+public:
+ static FCDECL4(INT32, CorWaitOneNative, SafeHandle* safeWaitHandleUNSAFE, INT32 timeout, CLR_BOOL hasThreadAffinity, CLR_BOOL exitContext);
+ static FCDECL4(INT32, CorWaitMultipleNative, Object* waitObjectsUNSAFE, INT32 timeout, CLR_BOOL exitContext, CLR_BOOL waitForAll);
+#ifndef FEATURE_CORECLR
+ static FCDECL5(INT32, CorSignalAndWaitOneNative, SafeHandle* safeWaitHandleSignalUNSAFE, SafeHandle* safeWaitHandleWaitUNSAFE, INT32 timeout, CLR_BOOL hasThreadAffinity, CLR_BOOL exitContext);
+#endif
+};
+#endif
diff --git a/src/vm/confighelper.cpp b/src/vm/confighelper.cpp
new file mode 100644
index 0000000000..a55db18c37
--- /dev/null
+++ b/src/vm/confighelper.cpp
@@ -0,0 +1,310 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// ConfigHelper.cpp
+//
+//*****************************************************************************
+//
+// XML Helper so that NodeFactory can be implemented in Managed code
+//
+
+
+
+#include "common.h"
+
+#include "confighelper.h"
+
+ConfigFactory::ConfigFactory(OBJECTREF *pFactory)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pFactory != NULL);
+ Initialize(pFactory);
+ AddRef();
+}
+
+HRESULT STDMETHODCALLTYPE ConfigFactory::NotifyEvent(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ XML_NODEFACTORY_EVENT iEvt)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_ANY;
+ NOTHROW;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), return COR_E_STACKOVERFLOW)
+
+ EX_TRY
+ {
+ GetNotifyEventFunctionality()(iEvt);
+ }
+ EX_CATCH_HRESULT(hr);
+
+ END_SO_INTOLERANT_CODE
+
+ return hr;
+}
+
+//---------------------------------------------------------------------------
+HRESULT STDMETHODCALLTYPE ConfigFactory::BeginChildren(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ XML_NODE_INFO __RPC_FAR *pNodeInfo)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_ANY;
+ NOTHROW;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), return COR_E_STACKOVERFLOW)
+ EX_TRY
+ {
+ GetBeginChildrenFunctionality()(pNodeInfo->dwSize,
+ pNodeInfo->dwSubType,
+ pNodeInfo->dwType,
+ pNodeInfo->fTerminal,
+ pNodeInfo->pwcText,
+ pNodeInfo->ulLen,
+ pNodeInfo->ulNsPrefixLen);
+ }
+ EX_CATCH_HRESULT(hr);
+
+ END_SO_INTOLERANT_CODE
+
+ return hr;
+
+}
+
+//---------------------------------------------------------------------------
+HRESULT STDMETHODCALLTYPE ConfigFactory::EndChildren(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ BOOL fEmptyNode,
+ /* [in] */ XML_NODE_INFO __RPC_FAR *pNodeInfo)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_ANY;
+ NOTHROW;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), return COR_E_STACKOVERFLOW)
+
+ EX_TRY
+ {
+ GetEndChildrenFunctionality()(fEmptyNode,
+ pNodeInfo->dwSize,
+ pNodeInfo->dwSubType,
+ pNodeInfo->dwType,
+ pNodeInfo->fTerminal,
+ pNodeInfo->pwcText,
+ pNodeInfo->ulLen,
+ pNodeInfo->ulNsPrefixLen);
+ }
+ EX_CATCH_HRESULT(hr);
+
+ END_SO_INTOLERANT_CODE
+
+ return hr;
+}
+
+//---------------------------------------------------------------------------
+HRESULT STDMETHODCALLTYPE ConfigFactory::CreateNode(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ PVOID pNode,
+ /* [in] */ USHORT cNumRecs,
+ /* [in] */ XML_NODE_INFO* __RPC_FAR * __RPC_FAR apNodeInfo)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_ANY;
+ NOTHROW;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), return COR_E_STACKOVERFLOW)
+
+ EX_TRY
+ {
+ DWORD i;
+ WCHAR wstr[128];
+ WCHAR *pString = wstr;
+ DWORD dwString = sizeof(wstr)/sizeof(WCHAR);
+
+ for( i = 0; i < cNumRecs; i++) {
+ if ( apNodeInfo[i]->ulLen >= dwString) {
+ dwString = apNodeInfo[i]->ulLen+1;
+ if(pString != wstr) delete [] pString;
+ pString = new(nothrow) WCHAR[dwString];
+ IfNullGo(pString);
+ }
+
+ pString[apNodeInfo[i]->ulLen] = W('\0');
+ wcsncpy_s(pString, dwString, apNodeInfo[i]->pwcText, apNodeInfo[i]->ulLen);
+
+ if(i == 0) {
+ GetCreateNodeFunctionality()(apNodeInfo[i]->dwSize,
+ apNodeInfo[i]->dwSubType,
+ apNodeInfo[i]->dwType,
+ apNodeInfo[i]->fTerminal,
+ pString,
+ apNodeInfo[i]->ulLen,
+ apNodeInfo[i]->ulNsPrefixLen);
+ }
+ else {
+ GetCreateAttributeFunctionality()(apNodeInfo[i]->dwSize,
+ apNodeInfo[i]->dwSubType,
+ apNodeInfo[i]->dwType,
+ apNodeInfo[i]->fTerminal,
+ pString,
+ apNodeInfo[i]->ulLen,
+ apNodeInfo[i]->ulNsPrefixLen);
+ }
+
+ if (FAILED(hr))
+ break;
+ }
+ if(pString != wstr) delete [] pString;
+ErrExit:;
+ }
+ EX_CATCH_HRESULT(hr);
+
+ END_SO_INTOLERANT_CODE
+ return hr;
+}
+
+
+
+STDAPI GetXMLObjectEx(IXMLParser **ppv);
+
+//
+//Helper routines to call into managed Node Factory
+//
+
+HRESULT ConfigNative::RunInternal(OBJECTREF *pFactory, LPCWSTR filename)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END
+
+ HRESULT hr = S_OK;
+ SafeComHolder<IXMLParser> pIXMLParser;
+ SafeComHolder<ConfigFactory> helperfactory;
+ SafeComHolder<IStream> pFile;
+ if (!pFactory){
+ return E_POINTER;
+ }
+
+ hr = CreateConfigStreamHelper(filename,&pFile);
+ if(FAILED(hr))
+ return hr;
+
+ hr = GetXMLObjectEx(&pIXMLParser);
+ if(FAILED(hr))
+ return hr;
+
+ helperfactory = new (nothrow)ConfigFactory(pFactory); // RefCount = 1
+ if ( ! helperfactory) {
+ return E_OUTOFMEMORY;
+ }
+
+ hr = pIXMLParser->SetInput(pFile); // filestream's RefCount=2
+ if(FAILED(hr))
+ return hr;
+
+ hr = pIXMLParser->SetFactory(helperfactory); // factory's RefCount=2
+ if(FAILED(hr))
+ return hr;
+
+ // On X86, we emit a call to LogUMTransition which needs us to be in preemptive GC mode
+ // Since we are done dealing with REF's after the call to ConfigFactory constructor,
+ // it is safe to switch to preemptive mode here
+ {
+ GCX_PREEMP();
+ hr = pIXMLParser->Run(-1);
+ }
+
+ if (hr== (HRESULT) XML_E_MISSINGROOT) //empty file
+ hr=S_OK;
+ return hr;
+}
+
+//
+// Entrypoint to return an Helper interface which Managed code can call to build managed Node factory
+//
+
+FCIMPL2(void, ConfigNative::RunParser, Object* refHandlerUNSAFE, StringObject* strFileNameUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF refHandler = (OBJECTREF) refHandlerUNSAFE;
+ STRINGREF strFileName = (STRINGREF) strFileNameUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_2(refHandler, strFileName);
+
+ HRESULT hr;
+ WCHAR* pString;
+ int iString;
+ LPWSTR pFileName;
+ CQuickBytes qb;
+
+ if (refHandler == NULL) {
+ COMPlusThrowArgumentNull(W("handler"));
+ }
+
+ if (strFileName == NULL) {
+ COMPlusThrowArgumentNull(W("fileName"));
+ }
+
+ //Get string data.
+ strFileName->RefInterpretGetStringValuesDangerousForGC(&pString, &iString);
+
+ S_UINT32 bufSize = (S_UINT32(iString) + S_UINT32(1)) * S_UINT32(sizeof(WCHAR));
+ _ASSERTE(!bufSize.IsOverflow());
+ if(bufSize.IsOverflow())
+ {
+ ThrowWin32(ERROR_ARITHMETIC_OVERFLOW);
+ }
+
+ pFileName = (LPWSTR) qb.AllocThrows(bufSize.Value());
+ memcpy(pFileName, pString, bufSize.Value());
+
+ hr = RunInternal(&refHandler, pFileName);
+
+ if (FAILED(hr))
+ COMPlusThrowHR(hr);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
diff --git a/src/vm/confighelper.h b/src/vm/confighelper.h
new file mode 100644
index 0000000000..c46368b92c
--- /dev/null
+++ b/src/vm/confighelper.h
@@ -0,0 +1,204 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// ConfigHelper.h
+//
+//*****************************************************************************
+//
+// These are unmanaged definitions of interfaces used call Managed Node Factories
+// If you make any changes please do corresponding changes in \src\bcl\system\__xmlparser.cs
+//
+
+
+#ifndef _CONFIGHELPER_H
+#define _CONFIGHELPER_H
+
+#include <mscoree.h>
+#include <xmlparser.h>
+#include <mscorcfg.h>
+#include "unknwn.h"
+#include "../xmlparser/_reference.h"
+#include "../xmlparser/_unknown.h"
+#include "comdelegate.h"
+
+class ConfigFactory : public _unknown<IXMLNodeFactory, &IID_IXMLNodeFactory>
+{
+ #define ICONFIGHANDLER_CALLBACK_COUNT 6
+ OBJECTREF *m_pManagedFactory;
+ LPVOID eventCallbacks[ICONFIGHANDLER_CALLBACK_COUNT];
+
+ // We assume the offsets as per the object layout of ConfigTreeParser defined in CfgParser.cs
+ // Any changes made at either place must be propagated to the other
+ LPVOID GetCallbackAtOffset(DWORD dwOffset)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(dwOffset < ICONFIGHANDLER_CALLBACK_COUNT);
+ }
+ CONTRACTL_END;
+
+ PTRARRAYREF refAllDelegates = (PTRARRAYREF)ObjectToOBJECTREF((Object *)((*m_pManagedFactory)->GetPtrOffset(0)));
+ _ASSERTE(refAllDelegates->GetNumComponents()==ICONFIGHANDLER_CALLBACK_COUNT);
+ return COMDelegate::ConvertToCallback(refAllDelegates->GetAt(dwOffset));
+ }
+
+ void Initialize(OBJECTREF *pFactory)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(pFactory != NULL);
+ }
+ CONTRACTL_END;
+
+ m_pManagedFactory = pFactory;
+ EX_TRY
+ {
+ for(int i=0; i<ICONFIGHANDLER_CALLBACK_COUNT; i++)
+ {
+ eventCallbacks[i] = GetCallbackAtOffset(i);
+ }
+ } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
+ }
+
+ typedef VOID (STDMETHODCALLTYPE *NotifyEventCallback)(
+ /* [in] */ XML_NODEFACTORY_EVENT iEvt);
+
+ NotifyEventCallback GetNotifyEventFunctionality()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(eventCallbacks[0] != NULL);
+ return (NotifyEventCallback)eventCallbacks[0];
+ }
+
+ typedef VOID (STDMETHODCALLTYPE *BeginChildrenCallback)(
+ /* [in] */ DWORD dwSize,
+ /* [in] */ DWORD dwSubType,
+ /* [in] */ DWORD dwType,
+ /* [in] */ BOOL fTerminal,
+ /* [in] */ LPCWSTR pwcText,
+ /* [in] */ DWORD ulLen,
+ /* [in] */ DWORD ulNsPrefixLen);
+
+ BeginChildrenCallback GetBeginChildrenFunctionality()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(eventCallbacks[1] != NULL);
+ return (BeginChildrenCallback)eventCallbacks[1];
+ }
+
+ typedef VOID (STDMETHODCALLTYPE *EndChildrenCallback)(
+ /* [in] */ BOOL fEmpty,
+ /* [in] */ DWORD dwSize,
+ /* [in] */ DWORD dwSubType,
+ /* [in] */ DWORD dwType,
+ /* [in] */ BOOL fTerminal,
+ /* [in] */ LPCWSTR pwcText,
+ /* [in] */ DWORD ulLen,
+ /* [in] */ DWORD ulNsPrefixLen);
+
+ EndChildrenCallback GetEndChildrenFunctionality()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(eventCallbacks[2] != NULL);
+ return (EndChildrenCallback)eventCallbacks[2];
+ }
+
+ typedef VOID (STDMETHODCALLTYPE *ErrorCallback)(
+ /* [in] */ DWORD dwSize,
+ /* [in] */ DWORD dwSubType,
+ /* [in] */ DWORD dwType,
+ /* [in] */ BOOL fTerminal,
+ /* [in] */ LPCWSTR pwcText,
+ /* [in] */ DWORD ulLen,
+ /* [in] */ DWORD ulNsPrefixLen);
+
+ ErrorCallback GetErrorFunctionality()
+ {
+ _ASSERTE(eventCallbacks[3] != NULL);
+ return (ErrorCallback)eventCallbacks[3];
+ }
+
+ typedef VOID (STDMETHODCALLTYPE *CreateNodeCallback)(
+ /* [in] */ DWORD dwSize,
+ /* [in] */ DWORD dwSubType,
+ /* [in] */ DWORD dwType,
+ /* [in] */ BOOL fTerminal,
+ /* [in] */ LPCWSTR pwcText,
+ /* [in] */ DWORD ulLen,
+ /* [in] */ DWORD ulNsPrefixLen);
+
+ CreateNodeCallback GetCreateNodeFunctionality()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(eventCallbacks[4] != NULL);
+ return (CreateNodeCallback)eventCallbacks[4];
+ }
+
+ typedef VOID (STDMETHODCALLTYPE *CreateAttributeCallback)(
+ /* [in] */ DWORD dwSize,
+ /* [in] */ DWORD dwSubType,
+ /* [in] */ DWORD dwType,
+ /* [in] */ BOOL fTerminal,
+ /* [in] */ LPCWSTR pwcText,
+ /* [in] */ DWORD ulLen,
+ /* [in] */ DWORD ulNsPrefixLen);
+
+ CreateAttributeCallback GetCreateAttributeFunctionality()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(eventCallbacks[5] != NULL);
+ return (CreateAttributeCallback)eventCallbacks[5];
+ }
+ #undef ICONFIGHANDLER_CALLBACK_COUNT
+
+public:
+ ConfigFactory(OBJECTREF *pFactory);
+
+ HRESULT STDMETHODCALLTYPE NotifyEvent(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ XML_NODEFACTORY_EVENT iEvt);
+
+ HRESULT STDMETHODCALLTYPE BeginChildren(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ XML_NODE_INFO* __RPC_FAR pNodeInfo);
+
+ HRESULT STDMETHODCALLTYPE EndChildren(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ BOOL fEmptyNode,
+ /* [in] */ XML_NODE_INFO* __RPC_FAR pNodeInfo);
+
+ HRESULT STDMETHODCALLTYPE Error(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ HRESULT hrErrorCode,
+ /* [in] */ USHORT cNumRecs,
+ /* [in] */ XML_NODE_INFO* __RPC_FAR * __RPC_FAR apNodeInfo)
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return hrErrorCode;
+ }
+
+ HRESULT STDMETHODCALLTYPE CreateNode(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ PVOID pNodeParent,
+ /* [in] */ USHORT cNumRecs,
+ /* [in] */ XML_NODE_INFO* __RPC_FAR * __RPC_FAR apNodeInfo);
+};
+
+class ConfigNative
+{
+ static HRESULT RunInternal(OBJECTREF *pFactory, LPCWSTR filename);
+
+public:
+ static FCDECL2(void, RunParser, Object* refHandlerUNSAFE, StringObject* strFileNameUNSAFE);
+};
+
+#endif // _CONFIGHELPER_H
diff --git a/src/vm/constrainedexecutionregion.cpp b/src/vm/constrainedexecutionregion.cpp
new file mode 100644
index 0000000000..b820c77348
--- /dev/null
+++ b/src/vm/constrainedexecutionregion.cpp
@@ -0,0 +1,2266 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// Methods to support the implementation of Constrained Execution Regions (CERs). This includes logic to walk the IL of methods to
+// determine the statically determinable call graph and prepare each submethod (jit, prepopulate generic dictionaries etc.,
+// everything needed to ensure that the runtime won't generate implicit failure points during the execution of said call graph).
+//
+
+//
+
+
+#include "common.h"
+#include <openum.h>
+#include <mdaassistants.h>
+#include <constrainedexecutionregion.h>
+#include <ecmakey.h>
+#include <typestring.h>
+#include <jitinterface.h>
+
+#ifdef FEATURE_PREJIT
+#include <compile.h>
+#endif
+
+
+// Internal debugging support. Would be nice to use the common logging code but we've run out of unique facility codes and the debug
+// info we spew out is of interest to a limited audience anyhow.
+#ifdef _DEBUG
+
+#define CER_NOISY_PREPARE 0x00000001
+#define CER_NOISY_RESTORE 0x00000002
+#define CER_NOISY_CONTRACTS 0x00000004
+#define CER_NOISY_WARNINGS 0x00000008
+#define CER_NOISY_NGEN_STATS 0x00000010
+
+DWORD g_dwCerLogActions = 0xffffffff;
+DWORD GetCerLoggingOptions()
+{
+ WRAPPER_NO_CONTRACT;
+ if (g_dwCerLogActions != 0xffffffff)
+ return g_dwCerLogActions;
+ return g_dwCerLogActions = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_CerLogging);
+}
+
+#define CER_LOG(_reason, _msg) do { if (GetCerLoggingOptions() & CER_NOISY_##_reason) printf _msg; } while (false)
+#else
+#define CER_LOG(_reason, _msg)
+#endif
+
+
+// Enumeration used to determine the number of inline data bytes included inside a given IL instruction (except for the case of a
+// SWITCH instruction, where a dynamic calculation is required).
+enum
+{
+ ArgBytes_InlineNone = 0, // no inline args
+ ArgBytes_InlineVar = 2, // local variable (U2 (U1 if Short on))
+ ArgBytes_InlineI = 4, // an signed integer (I4 (I1 if Short on))
+ ArgBytes_InlineR = 8, // a real number (R8 (R4 if Short on))
+ ArgBytes_InlineBrTarget = 4, // branch target (I4 (I1 if Short on))
+ ArgBytes_InlineI8 = 8,
+ ArgBytes_InlineMethod = 4, // method token (U4)
+ ArgBytes_InlineField = 4, // field token (U4)
+ ArgBytes_InlineType = 4, // type token (U4)
+ ArgBytes_InlineString = 4, // string TOKEN (U4)
+ ArgBytes_InlineSig = 4, // signature tok (U4)
+ ArgBytes_InlineRVA = 4, // ldptr token (U4)
+ ArgBytes_InlineTok = 4, // a meta-data token of unknown type (U4)
+ ArgBytes_InlineSwitch = 4, // count (U4), pcrel1 (U4) .... pcrelN (U4)
+ ArgBytes_ShortInlineVar = 1,
+ ArgBytes_ShortInlineI = 1,
+ ArgBytes_ShortInlineR = 4,
+ ArgBytes_ShortInlineBrTarget = 1
+};
+
+// Build an array of argument byte counts as described above by extracting the 'args' field of each entry in opcode.def.
+#define OPDEF(c, s, pop, push, args, type, l, s1, s2, ctrl) ArgBytes_##args,
+const BYTE g_rOpArgs[] = {
+#include <opcode.def>
+};
+#undef OPDEF
+
+
+// Global cache of methods and their reliability contract state.
+PtrHashCache *g_pMethodContractCache = NULL;
+
+
+// Private method forward references.
+bool IsPcrReference(Module *pModule, mdToken tkMethod);
+MethodContext *TokenToMethodDesc(Module *pModule, mdToken tokMethod, SigTypeContext *pTypeContext);
+TypeHandle GetTypeFromMemberDefOrRefOrSpecThrowing(Module *pModule,
+ mdToken tokMethod,
+ SigTypeContext *pTypeContext);
+
+bool MethodCallGraphPreparer::ShouldGatherExplicitCERCallInfo()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // If we're partially processing a method body (at the top of the call graph), we need to fetch exception handling
+ // information to determine possible ranges of interesting IL (potentially each finally and catch clause).
+ //
+ // And if we are probing for stack overflow, we need to know if the explicit CER region contains any calls out, in
+ // which case we want to probe in the call to PrepareConstrainedExecutionRegions. This will ensure that we don't
+ // take an SO in boundary code and not be able to call the CER. When stack probing is disabled, we rip the process
+ // if we take an SO anywhere but managed, or if we take an SO with a CER on the stack. For NGEN images, we need
+ // to always probe because stack probing may be enabled in the runtime, but if we haven't probed in the NGEN image
+ // then we could take an SO in boundary code and not be able to crawl the stack to know that we've skipped a CER and
+ // need to tear the process.
+ //
+ // Additionally, if the MDA for illegal PrepareConstrainedRegions call positioning is enabled we gather this information for
+ // all methods in the graph.
+ return !m_fEntireMethod
+#ifdef MDA_SUPPORTED
+ || MDA_GET_ASSISTANT(IllegalPrepareConstrainedRegion)
+#endif
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+ || m_fNgen
+#endif
+ || g_pConfig->ProbeForStackOverflow();
+}
+
+MethodCallGraphPreparer::MethodCallGraphPreparer(MethodDesc *pRootMD, SigTypeContext *pRootTypeContext, bool fEntireMethod, bool fExactTypeContext, bool fIgnoreVirtualCERCallMDA)
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pRootMD));
+ PRECONDITION(CheckPointer(pRootTypeContext));
+ } CONTRACTL_END;
+
+ // Canonicalize value type unboxing stubs into their underlying method desc.
+ if (pRootMD->IsUnboxingStub())
+ pRootMD = pRootMD->GetWrappedMethodDesc();
+
+ m_pRootMD = pRootMD;
+ m_pRootTypeContext = pRootTypeContext;
+ m_fEntireMethod = fEntireMethod;
+ m_fExactTypeContext = fExactTypeContext;
+ m_fIgnoreVirtualCERCallMDA = fIgnoreVirtualCERCallMDA;
+
+ m_pEHClauses = NULL;
+ m_cEHClauses = 0;
+ m_pCerPrepInfo = NULL;
+ m_pMethodDecoder = NULL;
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+ m_fNgen = false;
+#endif
+
+ m_pThread = GetThread();
+ m_fPartialPreparation = false;
+ m_fMethodHasCallsWithinExplicitCer = false;
+}
+
+// Walk the call graph of the method given by pRootMD (and type context in pRootTypeContext which provides instantiation information
+// for generic methods/classes).
+//
+// If fEntireMethod is true then the entire body of pRootMD is scanned for callsites, otherwise we assume that one or more CER
+// exception handlers exist in the method and only the finally and catch blocks of such handlers are scanned for graph roots.
+//
+// Each method we come across in the call graph (excluding late bound invocation destinations precipitated by virtual or interface
+// calls) is jitted and has any generic dictionary information we can determine at jit time prepopulated. This includes implicit
+// cctor invocations. If this method is called at ngen time we will attach extra fixup information to the affected method to ensure
+// that fixing up the root method of the graph will cause all methods in the graph to be fixed up at that point also.
+//
+// Some generic dictionary entries may not be prepopulated if unbound type variables exist at the root of the call tree. Such cases
+// will be ignored (as for the virtual/interface dispatch case we assume the caller will use an out-of-band mechanism to pre-prepare
+// these entries explicitly).
+bool MethodCallGraphPreparer::Run()
+{
+ STANDARD_VM_CONTRACT;
+
+ // Avoid recursion while jitting methods for another preparation.
+ if (!m_pThread->GetCerPreparationState()->CanPreparationProceed(m_pRootMD, m_pRootTypeContext))
+ return TRUE; // Assume the worst
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+ // Determine if we're being called in order to provide an ngen image. This impacts whether we actually prepare methods and the
+ // type of tracking data we produce. Ideally we'd call GetAppDomain()->IsCompilationDomain() here, but we have to deal with the
+ // problem of ngen'ing mscorlib. Mscorlib code is always shared and some of it is run before the compilation domain is fully
+ // created (so we'd end up with some methods being prepared without saving any ngen metadata to that effect). So instead we
+ // check to see whether this is an ngen process. This will catch those first few mscorlib methods.
+ m_fNgen = IsCompilationProcess() != FALSE;
+
+ // We keep a hash table of CERs we've processed on the module object of the root method. See if any work has been done on this
+ // CER before. We store different data for ngen and non-ngen cases.
+ if (m_fNgen) {
+
+ // Pretty simple in ngen case -- if we've stored a context record for this method at all then we've already processed it.
+ if (m_pRootMD->GetModule()->IsNgenCerRootMethod(m_pRootMD)) {
+ m_pCerPrepInfo = m_pRootMD->GetModule()->GetCerPrepInfo(m_pRootMD);
+
+ // We always store CerPrepInfo if the method has calls, so if we haven't stored
+ // anything then we know it doesn't have any calls
+ return (m_pCerPrepInfo && m_pCerPrepInfo->m_fMethodHasCallsWithinExplicitCer);
+ }
+ } else
+#endif
+ {
+ // The non-ngen case (normal jit, call to PrepareMethod etc).
+ m_pCerPrepInfo = m_pRootMD->GetModule()->GetCerPrepInfo(m_pRootMD);
+ if (m_pCerPrepInfo) {
+
+ // Check for the "everything's done" case.
+ if (m_pCerPrepInfo->m_fFullyPrepared)
+ return m_pCerPrepInfo->m_fMethodHasCallsWithinExplicitCer;
+
+ // Check for the "we can't do anything" case (see below for descriptions of that).
+ if (m_pCerPrepInfo->m_fRequiresInstantiation && !m_fExactTypeContext)
+ return m_pCerPrepInfo->m_fMethodHasCallsWithinExplicitCer;
+
+ // Check for the "need to prepare per-instantiation, but we've already done this one" case.
+ if (m_pCerPrepInfo->m_fRequiresInstantiation) {
+ HashDatum sDatum;
+ if (m_pCerPrepInfo->m_sIsInitAtInstHash.GetValue(m_pRootTypeContext, &sDatum))
+ return m_pCerPrepInfo->m_fMethodHasCallsWithinExplicitCer;
+ }
+ }
+ }
+
+ // We can't deal with generic methods or methods on generic types that may have some representative type parameters in their
+ // instantiation (i.e. some reference types indicated by Object rather than the exact type). The jit will tend to pass us these
+ // since it shares code between instantiations over reference types. We can't prepare methods like these completely -- even
+ // though we can jit all the method bodies the code might require generic dictionary information at the class or method level
+ // that is populated at runtime and can introduce failure points. So we reject such methods immediately (they will need to be
+ // prepared at non-jit time by an explicit call to PrepareMethod with a fully instantiated method).
+ //
+ // In the case where the type context is marked as suspect (m_fExactTypeContext == false) there are a number of possibilites for
+ // bad methods the jit will pass us:
+ // 1) We're passed a MethodDesc that shared between instantiations (bogus because exact method descs are never shared).
+ // 2) We're passed a MethodDesc that's an instantiating stub (bogus because non-shared methods don't need this).
+ // 3) We're passed a MethodDesc that has generic variables in its instantiations (I've seen this during ngen).
+ //
+ // Technically we could do a little better than this -- we could determine whether any of the representative type parameters are
+ // actually used within the CER call graph itself. But this would require us to understand the IL at a much deeper level (i.e.
+ // parse every instruction that could take a type or member spec and pull apart those specs to see if a type var is used). Plus
+ // we couldn't make this determination until we've prepared the entire region and the result is rather brittle from the code
+ // author's point of view (i.e. we might prepare a CER automatically one day but stop doing after some relatively subtle changes
+ // in the source code).
+ m_fPartialPreparation = m_pRootMD->IsSharedByGenericInstantiations() || m_pRootMD->IsInstantiatingStub() || m_pRootMD->ContainsGenericVariables();
+ if (!m_fExactTypeContext && m_fPartialPreparation) {
+#ifdef MDA_SUPPORTED
+ MDA_TRIGGER_ASSISTANT(OpenGenericCERCall, ReportViolation(m_pRootMD));
+#endif
+ CER_LOG(WARNINGS, ("CER: %s has open type parameters and can't be pre-prepared\n", m_pRootMD->m_pszDebugMethodName));
+
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+ if (!m_fNgen)
+#endif
+ {
+ // Set up a prep info structure for this method if it's not there already (the create method takes care of races).
+ if (m_pCerPrepInfo == NULL)
+ m_pCerPrepInfo = m_pRootMD->GetModule()->CreateCerPrepInfo(m_pRootMD);
+
+ // We may be racing to update the structure at this point but that's OK since the flag we're setting is never cleared once
+ // it's set and is always guaranteed to be set before we rely on its value (setting it here is just a performance thing,
+ // letting us early-out on multiple attempts to prepare this CER from the jit).
+ m_pCerPrepInfo->m_fRequiresInstantiation = true;
+ }
+
+ if (! g_pConfig->ProbeForStackOverflow())
+ {
+ return FALSE;
+ }
+ m_pCerPrepInfo = m_pRootMD->GetModule()->GetCerPrepInfo(m_pRootMD);
+
+ // We always store CerPrepInfo if the method has calls, so if we haven't stored
+ // anything then we know it doesn't have any calls
+ return (m_pCerPrepInfo && m_pCerPrepInfo->m_fMethodHasCallsWithinExplicitCer);
+
+ }
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+ // If we've been called for a shared generic root method and the exact instantiation (this can happen because ngen lets code
+ // execute under some circumstances) we don't currently support saving this information in the ngen image (we don't have a
+ // format for the instantiation info). We just ignore the preparation in this case (it will be prepared at runtime).
+ if (m_fNgen && m_fPartialPreparation)
+ return TRUE;
+#endif
+
+ // Prevent inlining of the root method (otherwise it's hard to tell where ThreadAbort exceptions should be delayed). Note that
+ // MethodDesc::SetNotInline is thread safe.
+ m_pRootMD->SetNotInline(true);
+
+ // Remember the checkpoint for all of our allocations. Keep it in a holder so they'll be unwound if we throw an exception past
+ // here.
+ CheckPointHolder sCheckpoint(m_pThread->m_MarshalAlloc.GetCheckpoint());
+
+ // Push the current method as the one and only method to process so far.
+ m_sLeftToProcess.Push(MethodContext::PerThreadAllocate(m_pRootMD, m_pRootTypeContext));
+
+ MethodContext *pContext = NULL; // The current MethodContext we're processing
+
+ // Iterate until we run out of methods to process.
+ while ((pContext = m_sLeftToProcess.Pop()) != NULL) {
+
+ // Restore the MD if necessary. In particular, if this is an instantiating stub and the wrapped MethodDesc could
+ // not be hard bound, then we'll need to restore that pointer before getting it.
+ pContext->m_pMethodDesc->CheckRestore();
+
+ // Transfer the method to the already seen stack immediately (we don't want to loop infinitely in the case of method
+ // recursion).
+ m_sAlreadySeen.Push(pContext);
+
+ // Check if the enclosing class requires a static class constructor to be run. If so, we need to prepare that method as
+ // though it were any other call.
+ if (pContext->m_pMethodDesc->GetMethodTable()->HasClassConstructor()) {
+
+ // Decode target method into MethodDesc and new SigTypeContext.
+ // The type context is easy to derive here : .cctors never have any method type parameters and the class instantiations
+ // are those of the method we're currently parsing, so can be simply copied down.
+ MethodDesc *pCctor = pContext->m_pMethodDesc->GetCanonicalMethodTable()->GetClassConstructor();
+ SigTypeContext sCctorTypeContext(pCctor, pContext->m_sTypeContext.m_classInst, Instantiation());
+ MethodContext *pCctorContext = MethodContext::PerThreadAllocate(pCctor, &sCctorTypeContext);
+
+ // Only process this cctor the first time we find it in this call graph.
+ if (!m_sAlreadySeen.IsInStack(pCctorContext) && !m_sLeftToProcess.IsInStack(pCctorContext))
+ m_sLeftToProcess.Push(pCctorContext);
+ }
+
+ // Skip further processing if this method doesn't have an IL body (note that we assume the method we entered with was IL, so
+ // we don't need to bother with partial method processing).
+ if (!pContext->m_pMethodDesc->IsIL()) {
+ _ASSERTE(m_fEntireMethod);
+ continue;
+ }
+
+ // Locate the IL body of the current method. May have to account for the fact that the current method desc is an
+ // instantiating stub and burrow down for the real method desc.
+ MethodDesc *pRealMethod = pContext->m_pMethodDesc;
+ if (pRealMethod->IsInstantiatingStub()) {
+ _ASSERTE(!pRealMethod->ContainsGenericVariables());
+ pRealMethod = pRealMethod->GetWrappedMethodDesc();
+ }
+
+ COR_ILMETHOD* pILHeader = pRealMethod->GetILHeader();
+
+ // Skip malformed methods. (We should always have method with IL for well-formed images here.)
+ if (pILHeader == NULL) {
+ continue;
+ }
+
+ COR_ILMETHOD_DECODER method(pILHeader);
+ m_pMethodDecoder = &method;
+
+ // We want to reget the EH clauses for the current method so that we can scan its handlers
+ GetEHClauses();
+
+ LookForInterestingCallsites(pContext);
+
+ // Whatever we've done, we're definitely not processing the top-level method at this point (so we'll be processing full
+ // method bodies from now on).
+ m_fEntireMethod = true;
+ }
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+ if (!m_fNgen)
+#endif
+ {
+ // Set up a prep info structure for this method if it's not there already (the create method takes care of races).
+ // This needs to happen before we start JITing the methods as part of preparation. The JIT needs to know
+ // about the CER root in CEEInfo::canTailCall.
+ if (m_pCerPrepInfo == NULL)
+ m_pCerPrepInfo = m_pRootMD->GetModule()->CreateCerPrepInfo(m_pRootMD);
+ }
+
+ // Prevent infinite recursion by recording on the thread which roots we're currently preparing.
+ ThreadPreparingCerHolder sCerHolder(this);
+
+ // Once we get here we've run out of methods to process and have recorded each method we visited in the m_sAlreadySeen stack. Now
+ // it's time to prepare each of these methods (jit, prepopulate generic dictionaries etc.).
+ PrepareMethods();
+
+ return RecordResults();
+}
+
+
+// Determine whether a CER preparation for the given root method (with type context for generic instantiation
+// if necessary) can go ahead given any current preparation already being performed on the current thread.
+BOOL MethodCallGraphPreparer::CanPreparationProceed(MethodDesc * pMD, SigTypeContext * pTypeContext)
+{
+ WRAPPER_NO_CONTRACT;
+ MethodCallGraphPreparer * pCurrPrep = this;
+ while (pCurrPrep)
+ {
+ // Is the prepartion request for the root method of the current preparer?
+ if (pMD == pCurrPrep->m_pRootMD && SigTypeContext::Equal(pTypeContext, pCurrPrep->m_pRootTypeContext))
+ {
+ // We're already preparing this root, return FALSE to turn the request into a no-op and avoid
+ // infinite recursion.
+ return FALSE;
+ }
+
+ pCurrPrep = pCurrPrep->m_pNext;
+ }
+
+ // We found no previous preparation for the same root, so the request can proceed.
+ return TRUE;
+}
+
+// Methods that push and pop thread local state used to determine if a re-entrant preparation request should
+// complete immediately as a no-op (because it would lead to an infinite recursion) or should proceed
+// recursively.
+
+//static
+void MethodCallGraphPreparer::BeginPrepareCerForHolder(MethodCallGraphPreparer * pPrepState)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ Thread * pThread = pPrepState->m_pThread;
+ pPrepState->m_pNext = pThread->GetCerPreparationState();
+ pThread->SetCerPreparationState(pPrepState);
+}
+
+//static
+void MethodCallGraphPreparer::EndPrepareCerForHolder(MethodCallGraphPreparer * pPrepState)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ Thread * pThread = pPrepState->m_pThread;
+ _ASSERTE(pThread && pThread->GetCerPreparationState() == pPrepState);
+ pThread->SetCerPreparationState(pPrepState->m_pNext);
+}
+
+
+void MethodCallGraphPreparer::GetEHClauses()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (! ShouldGatherExplicitCERCallInfo())
+ {
+ return;
+ }
+
+ m_cEHClauses = 0;
+ m_pEHClauses = NULL; // we use the StackingAllocator, so don't have to delete the previous storage
+
+ COR_ILMETHOD_SECT_EH const * pEH = m_pMethodDecoder->EH;
+ if (pEH == NULL ||pEH->EHCount() == 0)
+ {
+ return;
+ }
+
+ m_cEHClauses = pEH->EHCount();
+ m_pEHClauses = new (&m_pThread->m_MarshalAlloc) EHClauseRange[m_cEHClauses];
+
+ for (DWORD i = 0; i < m_cEHClauses; i++)
+ {
+ COR_ILMETHOD_SECT_EH_CLAUSE_FAT sEHClauseBuffer;
+ const COR_ILMETHOD_SECT_EH_CLAUSE_FAT *pEHClause;
+
+ pEHClause = (COR_ILMETHOD_SECT_EH_CLAUSE_FAT*)pEH->EHClause(i, &sEHClauseBuffer);
+
+ // The algorithm below assumes handlers are located after their associated try blocks. If this turns out to be a
+ // false assumption we need to move to a two pass technique (or defer callsite handling in some other fashion until
+ // we've scanned the IL for all calls to our preparation marker method).
+ if (!(pEHClause->GetTryOffset() < pEHClause->GetHandlerOffset()))
+ {
+ COMPlusThrowHR(COR_E_NOTSUPPORTED, IDS_EE_NOTSUPPORTED_CATCHBEFORETRY);
+ }
+
+ m_pEHClauses[i].m_dwTryOffset = pEHClause->GetTryOffset();
+ m_pEHClauses[i].m_dwHandlerOffset = pEHClause->GetHandlerOffset();
+ m_pEHClauses[i].m_dwHandlerLength = pEHClause->GetHandlerLength();
+ m_pEHClauses[i].m_fActive = false;
+
+ //printf("Try: %u Handler: %u -> %u\n", pEHClause->GetTryOffset(), pEHClause->GetHandlerOffset(), pEHClause->GetHandlerOffset() + pEHClause->GetHandlerLength() - 1);
+ }
+
+}
+
+void MethodCallGraphPreparer::MarkEHClauseActivatedByCERCall(MethodContext *pContext, BYTE *pbIL, DWORD cbIL)
+{
+ STANDARD_VM_CONTRACT;
+
+ DWORD dwOffset = (DWORD)(SIZE_T)((pbIL + ArgBytes_InlineTok) - (BYTE*)m_pMethodDecoder->Code);
+
+ // Additionally we need to cope with the fact that VB and C# (for debug builds) can generate NOP instructions
+ // between the PCR call and the beginning of the try block. So we're potentially looking for the
+ // intersection of the try with a range of instructions. Count the number of consecutive NOP instructions
+ // which follow the call.
+ DWORD dwLength = 0;
+ BYTE *pbTmpIL = pbIL + ArgBytes_InlineTok;
+ while (pbTmpIL < (pbIL + cbIL) && *pbTmpIL++ == CEE_NOP)
+ {
+ dwLength++;
+ }
+
+ bool fMatched = false;
+ for (DWORD i = 0; i < m_cEHClauses; i++)
+ {
+ if (m_pEHClauses[i].m_dwTryOffset >= dwOffset &&
+ m_pEHClauses[i].m_dwTryOffset <= (dwOffset + dwLength))
+ {
+ fMatched = true;
+ m_pEHClauses[i].m_fActive = true;
+ }
+ }
+ if (!fMatched)
+ {
+#if defined(_DEBUG) || defined(MDA_SUPPORTED)
+ DWORD dwPCROffset = (DWORD)(SIZE_T)((pbIL - 1) - (BYTE*)m_pMethodDecoder->Code);
+#endif // defined(_DEBUG) || defined(MDA_SUPPORTED)
+#ifdef MDA_SUPPORTED
+ MDA_TRIGGER_ASSISTANT(IllegalPrepareConstrainedRegion, ReportViolation(pContext->m_pMethodDesc, dwPCROffset));
+#endif
+ CER_LOG(WARNINGS, ("CER: %s: Invalid call to PrepareConstrainedRegions() at IL +%04X\n",
+ pContext->m_pMethodDesc->m_pszDebugMethodName, dwPCROffset));
+ }
+}
+
+bool MethodCallGraphPreparer::CheckIfCallsiteWithinCER(DWORD dwOffset)
+{
+ STANDARD_VM_CONTRACT;
+
+ //printf("Found: %s at %u\n", pCallTarget->m_pMethodDesc->m_pszDebugMethodName, dwOffset);
+
+ // Search all the EH regions we know about.
+ for (DWORD i = 0; i < m_cEHClauses; i++)
+ {
+ bool fCallsiteWithinCER = false;
+ if (! m_pEHClauses[i].m_fActive)
+ {
+ // clause not CER-active so skip it
+ continue;
+ }
+ if (dwOffset >= (m_pEHClauses[i].m_dwHandlerOffset + m_pEHClauses[i].m_dwHandlerLength))
+ {
+ // offset beyond clause, so skip it
+ continue;
+ }
+ if (dwOffset >= m_pEHClauses[i].m_dwTryOffset)
+ {
+ // For stack probing optimization, we care if either the try or the handler has a call. If neither
+ // does, then we can optimize the probe out.
+ m_fMethodHasCallsWithinExplicitCer = true;
+ if (dwOffset >= m_pEHClauses[i].m_dwHandlerOffset)
+ {
+ fCallsiteWithinCER = true;
+ }
+ }
+ // Only terminate if we got a positive result (i.e. the calliste is within a hardened clause).
+ // We can't terminate early in the negative case because the callsite could be nested
+ // in another EH region which may be hardened.
+ if (fCallsiteWithinCER == true)
+ {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+// Iterate through the body of the method looking for interesting call sites.
+void MethodCallGraphPreparer::LookForInterestingCallsites(MethodContext *pContext)
+{
+ STANDARD_VM_CONTRACT;
+
+ BYTE *pbIL = (BYTE*)m_pMethodDecoder->Code;
+ DWORD cbIL = m_pMethodDecoder->GetCodeSize();
+
+ while (cbIL) {
+
+ // Read the IL op.
+ DWORD dwOp = *pbIL++; cbIL--;
+
+ // Handle prefix codes (only CEE_PREFIX1 is legal so far).
+ if (dwOp == CEE_PREFIX1) {
+ if (!cbIL)
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ dwOp = 256 + *pbIL++; cbIL--;
+ } else if (dwOp >= CEE_PREFIX7)
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+
+ // We're interested in NEWOBJ, JMP, CALL and CALLVIRT (can't trace through CALLI). We include CALLVIRT becase C#
+ // routinely calls non-virtual instance methods this way in order to get this pointer null checking. We prepare NEWOBJ
+ // because that covers the corner case of value types which can be constructed with no failure path.
+ if (dwOp == CEE_CALL || dwOp == CEE_CALLVIRT || dwOp == CEE_NEWOBJ || dwOp == CEE_JMP) {
+
+ if (cbIL < sizeof(DWORD))
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+
+ // Decode target method into MethodDesc and new SigTypeContext.
+ mdToken tkCallTarget = (mdToken)GET_UNALIGNED_VAL32(pbIL);
+ MethodContext *pCallTarget = TokenToMethodDesc(pContext->m_pMethodDesc->GetModule(), tkCallTarget, &pContext->m_sTypeContext);
+
+ // Check whether we've found a call to our own preparation marker method.
+ if (pCallTarget->m_pMethodDesc == g_pPrepareConstrainedRegionsMethod) {
+
+ if (ShouldGatherExplicitCERCallInfo()) {
+ // If we're preparing a partial method these callsites are significant (we mark which EH clauses are now
+ // 'activated' by proximity to this marker method call). Look for EH regions that are 'activated' by the call to
+ // PrepareConstrainedRegions by comparing the IL offset of the start of the try to the offset immediately after
+ // the callsite (remember to account for the rest of the CALLVIRT instruction we haven't skipped yet).
+ MarkEHClauseActivatedByCERCall(pContext, pbIL, cbIL);
+ }
+
+ // Record the fact that we found a method in the CER which is the root of a sub-CER. This is important since the
+ // rude thread abort protection algorithm relies on root CER methods being marked as such.
+ pContext->m_fRoot = true;
+ }
+
+ // Determine if this was really a virtual call (we discard those since we can't reliably determine the call target).
+ bool fNonVirtualCall = dwOp == CEE_CALL || !pCallTarget->m_pMethodDesc->IsVirtual() || pCallTarget->m_pMethodDesc->IsFinal();
+
+ // When we're only processing qualified catch / finally handlers then we need to compute whether this call site
+ // lands in one of them. The callsite is always within a cer if we are processing the full method.
+ // If we have stackoverflow probing on, also call to determine if the CER try or finally makes any calls
+ bool fCallsiteWithinCerInThisFunction = false;
+ if (!m_fEntireMethod || g_pConfig->ProbeForStackOverflow()) {
+ DWORD dwOffset = (DWORD)(SIZE_T)((pbIL - 1) - (BYTE*)m_pMethodDecoder->Code);
+ fCallsiteWithinCerInThisFunction = CheckIfCallsiteWithinCER(dwOffset);
+ }
+ bool fCallsiteWithinCer = m_fEntireMethod || fCallsiteWithinCerInThisFunction;
+
+ // Check for the presence of some sort of reliability contract (on the method, class or assembly). This will
+ // determine whether we log an error, ignore the method or treat it as part of the prepared call graph.
+ ReliabilityContractLevel eLevel = RCL_UNKNOWN;
+ if (fNonVirtualCall && // Ignore virtual calls
+ fCallsiteWithinCer && // And calls outside CERs
+ !m_sAlreadySeen.IsInStack(pCallTarget) && // And methods we've seen before
+ !m_sLeftToProcess.IsInStack(pCallTarget) && // And methods we've already queued for processing
+ (eLevel = CheckForReliabilityContract(pCallTarget->m_pMethodDesc)) >= RCL_PREPARE_CONTRACT) // And unreliable methods
+ m_sLeftToProcess.Push(pCallTarget); // Otherwise add this method to the list to process
+ else if (fCallsiteWithinCer) {
+#if defined(_DEBUG) || defined(MDA_SUPPORTED)
+ DWORD dwOffset = (DWORD)(SIZE_T)((pbIL - 1) - (BYTE*)m_pMethodDecoder->Code);
+#endif // defined(_DEBUG) || defined(MDA_SUPPORTED)
+ if (eLevel == RCL_NO_CONTRACT) {
+ // Method was sufficiently unreliable for us to warn interested users that something may be amiss. Do this
+ // through MDA logging.
+#ifdef MDA_SUPPORTED
+ MDA_TRIGGER_ASSISTANT(InvalidCERCall, ReportViolation(pContext->m_pMethodDesc, pCallTarget->m_pMethodDesc, dwOffset));
+#endif
+ CER_LOG(WARNINGS, ("CER: %s +%04X -> %s: weak contract\n", pContext->ToString(), dwOffset, pCallTarget->ToString()));
+ } else if (!fNonVirtualCall && !m_fIgnoreVirtualCERCallMDA) {
+ // Warn users about virtual calls in CERs (so they can go back and consider which target methods need to be
+ // prepared ahead of time).
+#ifdef MDA_SUPPORTED
+ MDA_TRIGGER_ASSISTANT(VirtualCERCall, ReportViolation(pContext->m_pMethodDesc, pCallTarget->m_pMethodDesc, dwOffset));
+#endif
+ CER_LOG(WARNINGS, ("CER: %s +%04X -> %s: virtual call\n", pContext->ToString(), dwOffset, pCallTarget->ToString()));
+ }
+ }
+ }
+
+ // Skip the rest of the current IL instruction. Look up the table built statically at the top of this module for most
+ // instructions, but CEE_SWITCH requires special processing (the length of that instruction depends on a count DWORD
+ // embedded right after the opcode).
+ if (dwOp == CEE_SWITCH) {
+ DWORD dwTargets = GET_UNALIGNED_VAL32(pbIL);
+ if (dwTargets >= (MAXDWORD / sizeof(DWORD)))
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT); // multiplication below would overflow
+ DWORD cbSwitch = (1 + dwTargets) * sizeof(DWORD);
+ if (cbIL < cbSwitch)
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ pbIL += cbSwitch;
+ cbIL -= cbSwitch;
+ } else {
+ if (dwOp >= _countof(g_rOpArgs))
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ DWORD cbOp = g_rOpArgs[dwOp];
+ if (cbIL < cbOp)
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ pbIL += cbOp;
+ cbIL -= cbOp;
+ }
+
+ } // End of IL parsing loop
+}
+
+void MethodCallGraphPreparer::PrepareMethods()
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef _DEBUG
+ DWORD dwCount = 0;
+ if (GetCerLoggingOptions())
+ {
+ CER_LOG(PREPARE, ("---------------------------------------------------------------\n"));
+ SString ssMethod;
+ TypeString::AppendMethodInternal(ssMethod, m_pRootMD, TypeString::FormatNamespace | TypeString::FormatStubInfo);
+ CER_LOG(PREPARE, ("Preparing from %S\n", ssMethod.GetUnicode()));
+ }
+#endif
+
+ MethodContext *pContext; // The current MethodContext we're processing
+
+ while ((pContext = m_sAlreadySeen.Pop()) != NULL) {
+ MethodDesc *pMD = pContext->m_pMethodDesc;
+
+#ifndef CROSSGEN_COMPILE
+ // Jitting. Don't need to do this for the ngen case.
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+ if (!m_fNgen)
+#endif
+ {
+ // Also skip the jit for the root method in the activated from jit case (where this would result in a recursive
+ // jit). We'd cope with this just fine, the main reason for this logic is to avoid unbalancing some profiler event
+ // counts that upset some of our test cases. This is safe in the face of multiple instantiations of the same method
+ // because in the jit activated case (where we're told the root type context is not exact) we early exit if the root
+ // method desc isn't 'unique' (i.e. independent of the type context).
+ if (m_fExactTypeContext || pMD != m_pRootMD) {
+
+ // Jit the method we traced.
+ if (pMD->IsPointingToPrestub())
+ {
+ pMD->EnsureActive();
+ pMD->DoPrestub(NULL);
+ }
+
+ // If we traced an instantiating stub we need to jit the wrapped (real) method as well.
+ if (pMD->IsInstantiatingStub()) {
+ _ASSERTE(!pMD->ContainsGenericVariables());
+ MethodDesc *pRealMD = pMD->GetWrappedMethodDesc();
+ if (pRealMD->IsPointingToPrestub())
+ {
+ pMD->EnsureActive();
+ pRealMD->DoPrestub(NULL);
+ }
+ }
+ }
+
+ // Remember sub-CER root methods for further processing in RecordResults. We need to build CerPrepInfo structures for
+ // these just the same as top-level CERs since we may wander in to them by a route that doesn't include the top-level CER
+ // and the thread abort deflection algorithm relies on each CER root method being marked by a CerPrepInfo. Defer this
+ // processing to RecordResults since we aren't guaranteed to have prepared all the methods of the sub-graph at this
+ // point.
+ if (pContext->m_fRoot && pMD != m_pRootMD)
+ m_sPersist.Push(pContext);
+ }
+#endif // CROSSGEN_COMPILE
+
+ // Prepare generic dictionaries (both class and method as needed). We do this even in the ngen scenario, trying to get
+ // as many slots filled as possible. By the looks of it, it's possible that not all of these entries will make it across
+ // to runtime (the fixup code seems to give up on some of the more complex entries, not sure of the details). But we'll
+ // do as best we can here to hopefully minimize any real work on the other side.
+
+ // Don't use the direct PrepopulateDictionary method on MethodTable here, it takes binding considerations into account
+ // (which we don't care about).
+ DictionaryLayout *pClassDictLayout = pMD->GetClass()->GetDictionaryLayout();
+ if (pClassDictLayout) {
+ // Translate the representative method table we can find from our method desc into an exact instantiation using the
+ // type context we have.
+ MethodTable *pMT = TypeHandle(pMD->GetMethodTable()).Instantiate(pContext->m_sTypeContext.m_classInst).AsMethodTable();
+
+ pMT->GetDictionary()->PrepopulateDictionary(NULL, pMT, false);
+
+ // The dictionary may have overflowed in which case we need to prepopulate the jit's lookup cache as well.
+ PrepopulateGenericHandleCache(pClassDictLayout, NULL, pMT);
+ }
+
+ // Don't use the direct PrepopulateDictionary method on MethodDesc here, it appears to use a representative class
+ // instantiation (and we have the exact one handy).
+ DictionaryLayout *pMethDictLayout = pMD->GetDictionaryLayout();
+ if (pMethDictLayout) {
+ pMD->GetMethodDictionary()->PrepopulateDictionary(pMD, NULL, false);
+
+ // The dictionary may have overflowed in which case we need to prepopulate the jit's lookup cache as well.
+ PrepopulateGenericHandleCache(pMethDictLayout, pMD, NULL);
+ }
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+ // Keep some of the method contexts around for the ngen case (the ones that might still need fixup at runtime). We'll
+ // write them into a persisted data structure in the next step.
+ // @todo: We use a horrible workaround here to get round the fact that while ngen'ing mscorlib we may prepare some of its
+ // methods before we've had a chance to start up the compilation domain (mscorlib code is shared and used by the ngen
+ // process itself). So we can't blindly call NeedsRestore() on an mscorlib method since that code asserts we're in the
+ // compilation domain. Instead, if we're in the ngen process and we're outside the compilation domain we're going to
+ // assume that the method doesn't need restoration. This only affects a handful of methods (six at last count, all to do
+ // with security safe handles or some CERs in remoting).
+ if (m_fNgen) {
+ if (GetAppDomain() == NULL ||
+ !GetAppDomain()->IsCompilationDomain() ||
+ !(GetAppDomain()->ToCompilationDomain()->canCallNeedsRestore()) ||
+ !(GetAppDomain()->ToCompilationDomain()->GetTargetImage()->CanPrerestoreEagerBindToMethodDesc(pMD, NULL))||
+ pMD->HasClassOrMethodInstantiation() ||
+ pMD->IsNDirect() ||
+ pMD->IsComPlusCall() ||
+ pMD->IsFCall() ||
+ pContext->m_fRoot)
+ m_sPersist.Push(pContext);
+ }
+#endif
+
+#ifdef _DEBUG
+ CER_LOG(PREPARE, (" %s\n", pContext->ToString()));
+ dwCount++;
+#endif
+ }
+
+#ifdef _DEBUG
+ CER_LOG(PREPARE, ("Prepared a total of %u methods\n", dwCount));
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+ if (m_fNgen)
+ CER_LOG(PREPARE, ("Saved data for %u of them in the ngen image\n", m_sPersist.GetCount()));
+#endif
+ CER_LOG(PREPARE, ("---------------------------------------------------------------\n"));
+#endif
+}
+
+// Common code used in creating/looking up a CerPrepInfo and initializing/updating it.
+void InitPrepInfo(MethodDesc *pMD, SigTypeContext *pTypeContext, bool fMethodHasCallsWithinExplicitCer)
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pMD));
+ } CONTRACTL_END;
+
+ // Lookup or allocate the CerPrepInfo.
+ CerPrepInfo *pInfo = pMD->GetModule()->CreateCerPrepInfo(pMD);
+
+ pInfo->m_fMethodHasCallsWithinExplicitCer = fMethodHasCallsWithinExplicitCer;
+
+ // Work out if this was a partial preparation.
+ bool fPartialPreparation = pMD->IsSharedByGenericInstantiations() ||
+ pMD->IsInstantiatingStub() ||
+ pMD->ContainsGenericVariables();
+
+ // Simple case first: if this isn't a partial preparation (no pesky unbound type vars to worry about), then the method is
+ // now fully prepared.
+ if (!fPartialPreparation) {
+ pInfo->m_fFullyPrepared = true;
+ return;
+ }
+
+ // Else we know we require per-instantiation initialization. We need to update a hash table to record the preparation we did
+ // in this case, and that requires taking a mutex. We could check that nobody beat us to it first, but that will hardly ever
+ // happen, so it's not really worth it. So just acquire the mutex right away.
+ CrstHolder sHolder(pMD->GetModule()->GetCerCrst());
+
+ pInfo->m_fRequiresInstantiation = true;
+
+ // Add an entry to a hash that records which instantiations we've prep'd for (again, only if someone hasn't beaten us).
+ HashDatum sDatum;
+ if (!pInfo->m_sIsInitAtInstHash.GetValue(pTypeContext, &sDatum))
+ {
+ pInfo->m_sIsInitAtInstHash.InsertKeyAsValue(pTypeContext);
+ }
+}
+
+bool MethodCallGraphPreparer::RecordResults()
+{
+ STANDARD_VM_CONTRACT;
+
+ // Preparation has been successful, record what we've done in a manner consistent with whether we're ngen'ing or running for
+ // real.
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+ // If we're ngen'ing an image we save our progess as a list of method contexts that might need restoration at runtime (since
+ // even with prejitting there are some things that need to be prepared at runtime). This list goes into a per module table (the
+ // module in question being that of the root method in the CER).
+ if (m_fNgen) {
+
+ // We have the list of MethodContexts ready, but they're in cheap storage that will go away once we exit this method.
+ // Not only do we have to copy them to heap memory, but we also know exactly how many there are. So we can allocate a
+ // single array with a more compact form of MethodContext for each element. We allocate an extra sentinel value for the end
+ // of the list. This means we can store just a pointer to the list without a count (the code that accesses this list cares
+ // about keeping the list heads compact and densely packed and doesn't care about counting the elements in the list).
+ DWORD cContexts = m_sPersist.GetCount();
+ LoaderHeap *pHeap = m_pRootMD->GetAssembly()->GetLowFrequencyHeap();
+ MethodContextElement *pContexts = (MethodContextElement*)(void*)pHeap->AllocMem(S_SIZE_T(sizeof(MethodContextElement)) * (S_SIZE_T(cContexts) + S_SIZE_T(1)));
+ DWORD i = 0;
+
+ MethodContext *pContext; // The current MethodContext we're processing
+ while ((pContext = m_sPersist.Pop()) != NULL) {
+ pContexts[i].m_pMethodDesc.SetValue(pContext->m_pMethodDesc);
+
+ MethodTable * pExactMT = NULL;
+ if (!pContext->m_sTypeContext.m_classInst.IsEmpty())
+ {
+ pExactMT = TypeHandle(pContext->m_pMethodDesc->GetMethodTable()).Instantiate(pContext->m_sTypeContext.m_classInst).AsMethodTable();
+ _ASSERTE(pExactMT->HasInstantiation());
+ }
+ else
+ {
+ _ASSERTE(!pContext->m_pMethodDesc->GetMethodTable()->HasInstantiation());
+ }
+ pContexts[i].m_pExactMT.SetValue(pExactMT);
+
+ i++;
+
+ // Keep the context round for a little longer if the method in question was the root of a sub-CER.
+ if (pContext->m_fRoot)
+ m_sRootMethods.Push(pContext);
+ }
+
+ // Write sentinel entry terminating list.
+ _ASSERTE(i == cContexts);
+
+ // Add list representing this CER to the per-module table (keyed by root method desc).
+ m_pRootMD->GetModule()->AddCerListToRootTable(m_pRootMD, pContexts);
+
+ // If this did have an call from an explicit PCER range, create a PrepInfo for it so that we can
+ // quickly grab that information later when we jit that method. This allows us to optimize the probe
+ // away if there are no calls from the PCER range. This is an issue when we've prepared a method
+ // as part of a CER call from another method, but haven't ngened that method yet. When we get
+ // around to finally ngening that method, we want to be able to optimize the CER probe out if
+ // we can, but don't want to reprepare the method.
+ if (g_pConfig->ProbeForStackOverflow() && m_fMethodHasCallsWithinExplicitCer)
+ {
+ if (m_pCerPrepInfo == NULL)
+ m_pCerPrepInfo = m_pRootMD->GetModule()->CreateCerPrepInfo(m_pRootMD);
+ m_pCerPrepInfo->m_fMethodHasCallsWithinExplicitCer = TRUE;
+ }
+
+
+ // We need to be careful with sub-CERs in the ngen case. In the jit case they're dealt with automatically (preparing a
+ // super-CER always completely prepares a sub-CER). But in the ngen case we potentially need to run further preparation
+ // steps at the point that a CER root is executed for the first time. If the sub-root is encountered before the super-root
+ // then the sub-CER won't have been prepared correctly.
+ // We solve this simply by recursively running this routine over the methods we noted were sub-roots earlier (this list
+ // doesn't include the main root). We could potentially do a little better than this given that we've calculated the
+ // super-graph, but this is complicated somewhat by the fact that we don't retain the graph structure (i.e. we can't extract
+ // sub-graphs easily) and the effort seems wasted just to avoid a little CPU time and stack space just for the ngen creation
+ // scenario.
+ while ((pContext = m_sRootMethods.Pop()) != NULL)
+ {
+ MethodCallGraphPreparer mgcp(pContext->m_pMethodDesc, &pContext->m_sTypeContext, false, false);
+ mgcp.Run();
+ }
+
+ return m_fMethodHasCallsWithinExplicitCer;
+ }
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+ // This is the runtime (non-ngen case). Record our progress in an info structure placed in a hash table hung off the module
+ // which owns the root method desc in the CER. The methods which create this info structure handle race conditions (to
+ // ensure we don't leak memory or data), but the updates to the info structure itself might not require any serialization
+ // (the values are 'latched' -- recomputation should yield the same result). The exception is any update to a more complex
+ // data fields (lists and hash tables) that require serialization to prevent corruption of the basic data structure.
+
+ // Process sub-CER roots first. We need to build CerPrepInfo structures for these just as same as top-level CERs since we may
+ // wander in to them by a route that doesn't include the top-level CER and the thread abort deflection algorithm relies on each
+ // CER root method being marked by a CerPrepInfo.
+ MethodContext *pContext;
+ while ((pContext = m_sPersist.Pop()) != NULL) {
+ _ASSERTE(pContext->m_fRoot);
+
+ // @todo: need to flow fMethodHasCallsWithinExplicitCer information through method contexts. For now just make a
+ // conservative, safe choice.
+ InitPrepInfo(pContext->m_pMethodDesc, &pContext->m_sTypeContext, true);
+ }
+
+ // Now process the top-level CER.
+ InitPrepInfo(m_pRootMD, m_pRootTypeContext, m_fMethodHasCallsWithinExplicitCer);
+
+ return m_fMethodHasCallsWithinExplicitCer;
+}
+
+// Determines whether the given method contains a CER root that can be pre-prepared (i.e. prepared at jit time).
+bool ContainsPrePreparableCerRoot(MethodDesc *pMD)
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pMD));
+ } CONTRACTL_END;
+
+ // Deal with exotic cases (non-IL methods and the like).
+ if (!pMD->IsIL() || pMD->IsAbstract())
+ return false;
+
+ // And cases where we can't jit prepare (because the code is shared between instantiations).
+ if (pMD->IsSharedByGenericInstantiations() || pMD->IsInstantiatingStub() || pMD->ContainsGenericVariables())
+ return false;
+
+ // Otherwise we have a trickier calculation. We don't want to force the jit of the method at this point (may cause infinite
+ // recursion problems when we're called from the jit in the presence of call cycles). Instead we walk the top-level of the
+ // method IL using the same algorithm as PrepareMethodCallGraph.
+
+ // Locate the IL body of the current method. May have to account for the fact that the current method desc is an
+ // instantiating stub and burrow down for the real method desc.
+ MethodDesc *pRealMethod = pMD;
+ if (pRealMethod->IsInstantiatingStub()) {
+ _ASSERTE(!pRealMethod->ContainsGenericVariables());
+ pRealMethod = pRealMethod->GetWrappedMethodDesc();
+ }
+ COR_ILMETHOD_DECODER method(pRealMethod->GetILHeader());
+ BYTE *pbIL = (BYTE*)method.Code;
+ DWORD cbIL = method.GetCodeSize();
+
+ // Look for exception handling information for the method. If there isn't any then we know there can't be a CER rooted here.
+ COR_ILMETHOD_SECT_EH const * pEH = method.EH;
+ if (pEH == NULL || pEH->EHCount() == 0)
+ return false;
+
+ // Iterate through the body of the method looking for interesting call sites.
+ while (cbIL) {
+
+ // Read the IL op.
+ DWORD dwOp = *pbIL++; cbIL--;
+
+ // Handle prefix codes (only CEE_PREFIX1 is legal so far).
+ if (dwOp == CEE_PREFIX1) {
+ if (!cbIL)
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ dwOp = 256 + *pbIL++; cbIL--;
+ if (dwOp >= CEE_ILLEGAL)
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ } else if (dwOp >= CEE_PREFIX7)
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+
+ // We'll only ever see CALL instructions targeting PrepareConstrainedRegions (well those are the ones we're interested in
+ // anyway).
+ if (dwOp == CEE_CALL)
+ {
+ if (cbIL < sizeof(DWORD))
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ if (IsPcrReference(pMD->GetModule(), (mdToken)GET_UNALIGNED_VAL32(pbIL)))
+ return true;
+ }
+
+ // Skip the rest of the current IL instruction. Look up the table built statically at the top of this module for most
+ // instructions, but CEE_SWITCH requires special processing (the length of that instruction depends on a count DWORD
+ // embedded right after the opcode).
+ if (dwOp == CEE_SWITCH) {
+ if (cbIL < sizeof(DWORD))
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ DWORD dwTargets = GET_UNALIGNED_VAL32(pbIL);
+ if (dwTargets >= (MAXDWORD / sizeof(DWORD)))
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT); // multiplication below would overflow
+ DWORD cbSwitch = (1 + dwTargets) * sizeof(DWORD);
+ if (cbIL < cbSwitch)
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ pbIL += cbSwitch;
+ cbIL -= cbSwitch;
+ } else {
+ if (dwOp >= _countof(g_rOpArgs))
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ DWORD cbOp = g_rOpArgs[dwOp];
+ if (cbIL < cbOp)
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ pbIL += cbOp;
+ cbIL -= cbOp;
+ }
+
+ } // End of IL parsing loop
+
+ // If we get here then there was no CER-root.
+ return false;
+}
+
+// The name of the PrepareConstrainedRegions method, broken down into its components (the code below scans for these directly in the
+// metadata).
+#define PCR_METHOD "PrepareConstrainedRegions"
+#define PCR_TYPE "RuntimeHelpers"
+#define PCR_NAMESPACE "System.Runtime.CompilerServices"
+
+// Given a token and a module scoping it, determine if that token is a reference to PrepareConstrainedRegions. We want to do this
+// without loading any random types since we're called in a context where type loading is prohibited.
+bool IsPcrReference(Module *pModule, mdToken tkMethod)
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pModule));
+ } CONTRACTL_END;
+
+ IMDInternalImport *pImport = pModule->GetMDImport();
+
+ // Validate that the token is one that we can handle.
+ if (!pImport->IsValidToken(tkMethod) || (TypeFromToken(tkMethod) != mdtMethodDef &&
+ TypeFromToken(tkMethod) != mdtMethodSpec &&
+ TypeFromToken(tkMethod) != mdtMemberRef))
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_INVALID_METHOD_TOKEN);
+
+ // No reason to see a method spec for a call to something as simple as PrepareConstrainedRegions.
+ if (TypeFromToken(tkMethod) == mdtMethodSpec)
+ return false;
+
+ // If it's a method def then the module had better be mscorlib.
+ if (TypeFromToken(tkMethod) == mdtMethodDef) {
+ if (pModule->GetAssembly()->GetManifestModule() == SystemDomain::SystemAssembly()->GetManifestModule())
+ return tkMethod == g_pPrepareConstrainedRegionsMethod->GetMemberDef();
+ else
+ return false;
+ }
+
+ // That leaves the cross module reference case.
+ _ASSERTE(TypeFromToken(tkMethod) == mdtMemberRef);
+
+ // First get the method name and signature and validate it.
+ PCCOR_SIGNATURE pSig;
+ DWORD cbSig;
+ LPCSTR szMethod;
+ IfFailThrow(pImport->GetNameAndSigOfMemberRef(tkMethod, &pSig, &cbSig, &szMethod));
+
+ {
+ SigParser sig(pSig, cbSig);
+ ULONG nCallingConvention;
+ ULONG nArgumentsCount;
+ BYTE bReturnType;
+
+ // Signature is easy: void PCR().
+ // Must be a static method signature.
+ if (FAILED(sig.GetCallingConvInfo(&nCallingConvention)))
+ return false;
+ if (nCallingConvention != IMAGE_CEE_CS_CALLCONV_DEFAULT)
+ return false;
+ // With no arguments.
+ if (FAILED(sig.GetData(&nArgumentsCount)))
+ return false;
+ if (nArgumentsCount != 0)
+ return false;
+ // And a void return type.
+ if (FAILED(sig.GetByte(&bReturnType)))
+ return false;
+ if (bReturnType != (BYTE)ELEMENT_TYPE_VOID)
+ return false;
+ }
+
+ // Validate the name.
+ if (strcmp(szMethod, PCR_METHOD) != 0)
+ return false;
+
+ // The method looks OK, move up to the type and validate that.
+ mdToken tkType;
+ IfFailThrow(pImport->GetParentOfMemberRef(tkMethod, &tkType));
+
+ if (!pImport->IsValidToken(tkType))
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_INVALID_TOKEN);
+
+ // If the parent is not a type ref then this isn't our target (we assume that mscorlib never uses a member ref to target
+ // PrepareConstrainedRegions, check that with the assert below, if it ever fails we need to add some additional logic below).
+ _ASSERTE(TypeFromToken(tkType) != mdtTypeDef ||
+ pModule->GetAssembly()->GetManifestModule() != SystemDomain::SystemAssembly()->GetManifestModule());
+ if (TypeFromToken(tkType) != mdtTypeRef)
+ return false;
+
+ // Get the type name and validate it.
+ LPCSTR szNamespace;
+ LPCSTR szType;
+ IfFailThrow(pImport->GetNameOfTypeRef(tkType, &szNamespace, &szType));
+
+ if (strcmp(szType, PCR_TYPE) != 0)
+ return false;
+ if (strcmp(szNamespace, PCR_NAMESPACE) != 0)
+ return false;
+
+ // Type is OK as well. Check the assembly reference.
+ mdToken tkScope;
+ IfFailThrow(pImport->GetResolutionScopeOfTypeRef(tkType, &tkScope));
+
+ if (TypeFromToken(tkScope) != mdtAssemblyRef)
+ return false;
+ if (!pImport->IsValidToken(tkScope))
+ {
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_INVALID_TOKEN);
+ }
+
+ // Fetch the name and public key or public key token.
+ BYTE *pbPublicKeyOrToken;
+ DWORD cbPublicKeyOrToken;
+ LPCSTR szAssembly;
+ DWORD dwAssemblyFlags;
+ IfFailThrow(pImport->GetAssemblyRefProps(
+ tkScope,
+ (const void**)&pbPublicKeyOrToken,
+ &cbPublicKeyOrToken,
+ &szAssembly,
+ NULL, // AssemblyMetaDataInternal: we don't care about version, culture etc.
+ NULL, // Hash value pointer, obsolete information
+ NULL, // Byte count for above
+ &dwAssemblyFlags));
+
+ // Validate the name.
+ if (stricmpUTF8(szAssembly, g_psBaseLibraryName) != 0)
+ return false;
+
+ // And the public key or token, which ever was burned into the reference by the compiler. For mscorlib this is the ECMA key or
+ // token.
+ if (IsAfPublicKeyToken(dwAssemblyFlags)) {
+ if (cbPublicKeyOrToken != sizeof(g_rbNeutralPublicKeyToken) ||
+ memcmp(pbPublicKeyOrToken, g_rbNeutralPublicKeyToken, cbPublicKeyOrToken) != 0)
+ return false;
+ } else {
+ if (cbPublicKeyOrToken != sizeof(g_rbNeutralPublicKey) ||
+ memcmp(pbPublicKeyOrToken, g_rbNeutralPublicKey, cbPublicKeyOrToken) != 0)
+ return false;
+ }
+
+ // If we get here we've finally proved the call target was indeed PrepareConstrainedRegions. Whew.
+ return true;
+}
+
+// Prepares a method as a CER root. In some scenarios we set
+// fIgnoreVirtualCERCallMDA=TRUE, this happens when we want to ignore firing a
+// VirtualCERCall MDA because we know for sure that the virtual methods are
+// already prepared. A good example of this case is preparing
+// g_pExecuteBackoutCodeHelperMethod method.
+void PrepareMethodDesc(MethodDesc* pMD, Instantiation classInst, Instantiation methodInst, BOOL onlyContractedMethod, BOOL fIgnoreVirtualCERCallMDA)
+{
+ CONTRACTL
+ {
+ THROWS;
+ DISABLED(GC_TRIGGERS);
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+
+#ifdef FEATURE_PREJIT
+ // This method may have some ngen fixup information provided, in which case we just check that it's been restored and can
+ // dispense with the preparation altogether.
+ Module *pModule = pMD->GetModule();
+ if (pModule->IsNgenCerRootMethod(pMD))
+ {
+ pMD->CheckRestore();
+ pModule->RestoreCer(pMD);
+ return;
+ }
+#endif
+
+ // If we are only going to prepare contracted methods and this method does
+ // not have a contract then we just return.
+ if (onlyContractedMethod && CheckForReliabilityContract(pMD) < RCL_BASIC_CONTRACT)
+ {
+ return;
+ }
+
+ SigTypeContext sTypeContext(pMD, classInst, methodInst);
+ MethodCallGraphPreparer mcgp(pMD, &sTypeContext, true, true, fIgnoreVirtualCERCallMDA == TRUE);
+ mcgp.Run();
+}
+
+// Prepares the critical finalizer call graph for the given object type (which
+// must derive from CriticalFinalizerObject). This involves preparing at least
+// the finalizer method and possibly some others (for SafeHandle and
+// CriticalHandle derivations). If a module pointer is supplied then only the
+// critical methods introduced in that module are prepared (this is used at
+// ngen time to ensure that we're only generating ngen preparation info for the
+// targetted module).
+void PrepareCriticalFinalizerObject(MethodTable *pMT, Module *pModule)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ } CONTRACTL_END;
+
+ // Have we prepared this type before?
+ if (pMT->CriticalTypeHasBeenPrepared())
+ return;
+
+ GCX_PREEMP();
+
+ // Restore the method table if necessary.
+ pMT->CheckRestore();
+
+ // Determine the interesting parent class (either SafeHandle, CriticalHandle or CriticalFinalizerObject).
+ MethodTable *pSafeHandleClass = MscorlibBinder::GetClass(CLASS__SAFE_HANDLE);
+ MethodTable *pCriticalHandleClass = MscorlibBinder::GetClass(CLASS__CRITICAL_HANDLE);
+ MethodTable *pParent = pMT;
+ while (pParent) {
+ if (pParent == g_pCriticalFinalizerObjectClass ||
+ pParent == pSafeHandleClass ||
+ pParent == pCriticalHandleClass) {
+ break;
+ }
+ pParent = pParent->GetParentMethodTable();
+ }
+ _ASSERTE(pParent != NULL);
+
+ BinderMethodID rgMethods[5];
+ int nMethods;
+
+ // Prepare the method or methods based on the parent class.
+ if (pParent == pSafeHandleClass) {
+ rgMethods[0] = METHOD__CRITICAL_FINALIZER_OBJECT__FINALIZE;
+ rgMethods[1] = METHOD__SAFE_HANDLE__RELEASE_HANDLE;
+ rgMethods[2] = METHOD__SAFE_HANDLE__GET_IS_INVALID;
+ rgMethods[3] = METHOD__SAFE_HANDLE__DISPOSE;
+ rgMethods[4] = METHOD__SAFE_HANDLE__DISPOSE_BOOL;
+ nMethods = 5;
+ } else if (pParent == pCriticalHandleClass) {
+ rgMethods[0] = METHOD__CRITICAL_FINALIZER_OBJECT__FINALIZE;
+ rgMethods[1] = METHOD__CRITICAL_HANDLE__RELEASE_HANDLE;
+ rgMethods[2] = METHOD__CRITICAL_HANDLE__GET_IS_INVALID;
+ rgMethods[3] = METHOD__CRITICAL_HANDLE__DISPOSE;
+ rgMethods[4] = METHOD__CRITICAL_HANDLE__DISPOSE_BOOL;
+ nMethods = 5;
+ } else {
+ _ASSERTE(pParent == g_pCriticalFinalizerObjectClass);
+ rgMethods[0] = METHOD__CRITICAL_FINALIZER_OBJECT__FINALIZE;
+ nMethods = 1;
+ }
+
+ for (int iMethod = 0; iMethod < nMethods; iMethod++)
+ {
+ // Prepare a (possibly virtual) method on an instance. The method is identified via a binder ID, so the initial
+ // declaration of the method must reside within mscorlib. We might have ngen fixup information for the method and can avoid direct
+ // preparation as well.
+
+ MethodDesc *pPrepMethod = pMT->GetMethodDescForSlot(MscorlibBinder::GetMethod(rgMethods[iMethod])->GetSlot());
+#ifdef FEATURE_PREJIT
+ if (pPrepMethod->GetModule()->IsNgenCerRootMethod(pPrepMethod)) {
+ pPrepMethod->GetModule()->RestoreCer(pPrepMethod);
+ }
+ else
+ if (IsCompilationProcess() && pPrepMethod->IsAbstract()) {
+ // Skip abstract methods during NGen (we should not ever get abstract methods here at runtime)
+ }
+ else
+#endif
+ {
+ if (pModule == NULL || pPrepMethod->GetModule() == pModule) {
+ SigTypeContext _sTypeContext(pPrepMethod, TypeHandle(pMT));
+ MethodCallGraphPreparer mcgp(pPrepMethod, &_sTypeContext, true, true);
+ mcgp.Run();
+ }
+ }
+ }
+
+ // Note the fact that we've prepared this type before to prevent repetition of the work above. (Though repetition is harmless in
+ // all other respects, so there's no need to worry about the race setting this flag).
+ pMT->SetCriticalTypeHasBeenPrepared();
+}
+
+#ifdef _DEBUG
+
+static const char * const g_rszContractNames[] = { "RCL_NO_CONTRACT", "RCL_BASIC_CONTRACT", "RCL_PREPARE_CONTRACT" };
+static DWORD g_dwContractChecks = 0;
+
+#define ReturnContractLevel(_level) do { \
+ g_dwContractChecks++; \
+ if ((g_dwContractChecks % 100) == 0 && g_pMethodContractCache) \
+ g_pMethodContractCache->DbgDumpStats(); \
+ ReliabilityContractLevel __level = (_level); \
+ CER_LOG(CONTRACTS, ("%s -- %s\n", pMD->m_pszDebugMethodName, g_rszContractNames[__level])); \
+ return __level; \
+} while (false)
+#else
+#define ReturnContractLevel(_level) return (_level)
+#endif
+
+// Look for reliability contracts at the method, class and assembly level and parse them to extract the information we're interested
+// in from a runtime preparation viewpoint. This information is abstracted in the form of the ReliabilityContractLevel enumeration.
+ReliabilityContractLevel CheckForReliabilityContract(MethodDesc *pMD)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMD));
+ } CONTRACTL_END;
+
+ // We are attempting to abstract reliability contracts for the given method into three different buckets: those methods that
+ // will cause an error (or a MDA report at least) during preparation (RCL_NO_CONTRACT), those we allow but don't prepare
+ // (RCL_BASIC_CONTRACT) and those we allow and prepare (RCL_PREPARE_CONTRACT).
+ //
+ // We place methods into the first bucket below that matches:
+ // RCL_NO_CONTRACT -- Methods with no consistency or whose consistency states they may corrupt the appdomain or process.
+ // RCL_BASIC_CONTRACT -- Methods that state CER.None (or don't specify a CER attribute)
+ // RCL_PREPARE_CONTRACT -- Methods that state CER.MayFail or CER.Success
+ //
+ // We look for reliability contracts at three levels: method, class and assembly. Definitions found at the method level override
+ // those at the class and assembly level and those at the class level override assembly settings.
+ //
+ // In the interests of efficiency we cache contract information in a number of ways. Firstly we look at a hash of recently
+ // queried MethodDescs. This contains authoritative answers (assembly/class/method information has already been composed so on a
+ // hit we don't need to look anywhere else). This cache is allocated lazily, never grows (newer items eventually displace older
+ // ones), is global, requires no locks and is never freed. The idea is to limit the amount of working set we ever occupy while
+ // keeping the CPU usage as low as possible. Typical usages of this method involve querying a small number of methods in a stack
+ // walk, possibly multiple times, so a small hash cache should work reasonably well here.
+ //
+ // On a miss we're going to have to bite the bullet and look at the assembly, class and method. The assembly and class cache
+ // this information at load (ngen) time though, so they're not so expensive (class level data is cached on the EEClass, so it's
+ // cold data, but the most performance sensitive scenario in which we're called here, ThreadAbort, isn't all that hot).
+
+ // Check the cache first, it contains a raw contract level.
+ ReliabilityContractLevel eLevel;
+ if (g_pMethodContractCache && g_pMethodContractCache->Lookup(pMD, (DWORD*)&eLevel))
+ ReturnContractLevel(eLevel);
+
+ // Start at the method level and work up until we've found enough information to make a decision. The contract level is composed
+ // in an encoded DWORD form that lets us track both parts of the state (consistency and cer) and whether each has been supplied
+ // yet. See the RC_* macros for encoding details.
+ DWORD dwMethodContractInfo = GetReliabilityContract(pMD->GetMDImport(), pMD->GetMemberDef());
+ if (RC_INCOMPLETE(dwMethodContractInfo)) {
+ DWORD dwClassContractInfo = pMD->GetClass()->GetReliabilityContract();
+ dwMethodContractInfo = RC_MERGE(dwMethodContractInfo, dwClassContractInfo);
+ if (RC_INCOMPLETE(dwMethodContractInfo)) {
+ DWORD dwAssemblyContractInfo = pMD->GetModule()->GetReliabilityContract();
+ dwMethodContractInfo = RC_MERGE(dwMethodContractInfo, dwAssemblyContractInfo);
+ }
+ }
+
+ // We've got an answer, so attempt to cache it for the next time.
+
+ // First check we have a cache (we allocate it lazily).
+ if (g_pMethodContractCache == NULL) {
+ PtrHashCache *pCache = new (nothrow) PtrHashCache();
+ if (pCache)
+ if (FastInterlockCompareExchangePointer(&g_pMethodContractCache, pCache, NULL) != NULL)
+ delete pCache;
+ }
+
+ // We still might not have a cache in low memory situations. That's OK.
+ if (g_pMethodContractCache)
+ g_pMethodContractCache->Add(pMD, RC_ENCODED_TO_LEVEL(dwMethodContractInfo));
+
+ ReturnContractLevel(RC_ENCODED_TO_LEVEL(dwMethodContractInfo));
+}
+
+
+// Macro used to handle failures in the routine below.
+#define IfFailRetRcNull(_hr) do { if (FAILED(_hr)) return RC_NULL; } while (false)
+
+// Look for a reliability contract attached to the given metadata token in the given scope. Return the result as an encoded DWORD
+// (see the RC_ENCODE macro).
+DWORD GetReliabilityContract(IMDInternalImport *pImport, mdToken tkParent)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pImport));
+ } CONTRACTL_END;
+
+ HRESULT hr;
+ DWORD dwResult = RC_NULL;
+
+ // Sadly we only have two unmanaged APIs available to us for looking at custom attributes. One looks up attributes by name but
+ // only returns the byte blob, not the attribute ctor information (which we need to parse the blob) while the other returns
+ // everything but requires us to enumerate all attributes on a given token looking for the one we're interested in. To keep the
+ // cost down we probe for the existence of the attribute using the first API and then use the enumeration method if we get a
+ // hit.
+ hr = pImport->GetCustomAttributeByName(tkParent, RELIABILITY_CONTRACT_NAME, NULL, NULL);
+ if (hr == S_FALSE)
+ return RC_NULL;
+
+ IfFailRetRcNull(hr);
+
+ // Got at least one contract against this parent. Enumerate them all (filtering by name).
+ MDEnumHolder hEnum(pImport);
+ hr = pImport->SafeAndSlowEnumCustomAttributeByNameInit(tkParent, RELIABILITY_CONTRACT_NAME, &hEnum);
+ _ASSERTE(hr != S_FALSE);
+ IfFailRetRcNull(hr);
+
+ // Enumerate over all the contracts.
+ mdToken tkContract;
+ while (S_OK == pImport->SafeAndSlowEnumCustomAttributeByNameNext(tkParent, RELIABILITY_CONTRACT_NAME, &hEnum, &tkContract)) {
+
+ // Get the attribute type (token of the ctor used) since we need this information in order to parse the blob we'll find
+ // next.
+ mdToken tkAttrType;
+ IfFailRetRcNull(pImport->GetCustomAttributeProps(tkContract, &tkAttrType));
+ if (!pImport->IsValidToken(tkAttrType))
+ continue;
+
+ // The token should be a member ref or method def.
+ // Get the signature of the ctor so we know which version has been called.
+ PCCOR_SIGNATURE pSig;
+ DWORD cbSig;
+ LPCSTR szName_Ignore;
+ if (TypeFromToken(tkAttrType) == mdtMemberRef)
+ {
+ IfFailRetRcNull(pImport->GetNameAndSigOfMemberRef(tkAttrType, &pSig, &cbSig, &szName_Ignore));
+ }
+ else
+ {
+ if (TypeFromToken(tkAttrType) != mdtMethodDef)
+ continue;
+ IfFailRetRcNull(pImport->GetNameAndSigOfMethodDef(tkAttrType, &pSig, &cbSig, &szName_Ignore));
+ }
+
+ // Only two signatures are supported: the null sig '()' and the full sig '(Consistency, CER)'.
+ // Set a boolean based on which one was provided.
+ bool fNullCtor;
+ ULONG eCallConv;
+
+ SigPointer sig(pSig, cbSig);
+
+ // Check the calling convention is what we expect (default convention on an instance method).
+ IfFailRetRcNull(sig.GetCallingConvInfo(&eCallConv));
+ _ASSERTE(eCallConv == (IMAGE_CEE_CS_CALLCONV_DEFAULT | IMAGE_CEE_CS_CALLCONV_HASTHIS));
+ if (eCallConv != (IMAGE_CEE_CS_CALLCONV_DEFAULT | IMAGE_CEE_CS_CALLCONV_HASTHIS))
+ IfFailRetRcNull(COR_E_BADIMAGEFORMAT);
+
+ // If so, the next datum is the count of arguments, and this is all we need to determine which ctor has been used.
+ ULONG dwArgs;
+ IfFailRetRcNull(sig.GetData(&dwArgs));
+ _ASSERTE(dwArgs == 0 || dwArgs == 2);
+ if (dwArgs != 0 && dwArgs != 2)
+ IfFailRetRcNull(COR_E_BADIMAGEFORMAT);
+
+ fNullCtor = dwArgs == 0;
+
+ // Now we know how to parse the blob, let's fetch a pointer to it.
+ BYTE const *pbData;
+ DWORD cbData;
+ IfFailRetRcNull(pImport->GetCustomAttributeAsBlob(tkContract, (const void **)&pbData, &cbData));
+
+ // Check serialization format (we support version 1 only).
+ if (cbData < sizeof(WORD) || GET_UNALIGNED_VAL16(pbData) != 1)
+ IfFailRetRcNull(COR_E_BADIMAGEFORMAT);
+ pbData += sizeof(WORD);
+ cbData -= sizeof(WORD);
+
+ // Parse ctor arguments if we have any.
+ if (!fNullCtor) {
+
+ // We assume the enums are based on DWORDS.
+ if (cbData < (sizeof(DWORD) * 2))
+ IfFailRetRcNull(COR_E_BADIMAGEFORMAT);
+
+ // Consistency first.
+ DWORD dwConsistency = GET_UNALIGNED_VAL32(pbData);
+ pbData += sizeof(DWORD);
+ cbData -= sizeof(DWORD);
+ if (dwConsistency > RC_CONSISTENCY_CORRUPT_NOTHING)
+ IfFailRetRcNull(COR_E_BADIMAGEFORMAT);
+
+ // Followed by Cer.
+ DWORD dwCer = GET_UNALIGNED_VAL32(pbData);
+ pbData += sizeof(DWORD);
+ cbData -= sizeof(DWORD);
+ if (dwCer > RC_CER_SUCCESS)
+ IfFailRetRcNull(COR_E_BADIMAGEFORMAT);
+
+ dwResult = RC_MERGE(dwResult, RC_ENCODE(dwConsistency, dwCer));
+ }
+
+ // Get the count of field/property, value pairs.
+ if (cbData < sizeof(WORD))
+ IfFailRetRcNull(COR_E_BADIMAGEFORMAT);
+ WORD cPairs = GET_UNALIGNED_VAL16(pbData);
+ pbData += sizeof(WORD);
+ cbData -= sizeof(WORD);
+
+ // Iterate over any such pairs, looking for values we haven't picked up yet.
+ for (DWORD i = 0 ; i < cPairs; i++) {
+
+ // First is a field vs property selector. We expect only properties.
+ if (cbData < sizeof(BYTE) || *(BYTE*)pbData != SERIALIZATION_TYPE_PROPERTY)
+ IfFailRetRcNull(COR_E_BADIMAGEFORMAT);
+ pbData += sizeof(BYTE);
+ cbData -= sizeof(BYTE);
+
+ // Next is the type of the property. It had better be an enum.
+ if (cbData < sizeof(BYTE) || *(BYTE*)pbData != SERIALIZATION_TYPE_ENUM)
+ IfFailRetRcNull(COR_E_BADIMAGEFORMAT);
+ pbData += sizeof(BYTE);
+ cbData -= sizeof(BYTE);
+
+ // Next we have the assembly qualified enum type name. This is preceded by a metadata style packed byte length (the
+ // string itself is 8-bit and not null terminated). Ignore it (just skip across) and we'll key off the property name
+ // (coming up) instead.
+ DWORD cbName;
+ BYTE const * pbPostEncodedLength;
+ IfFailRetRcNull(CPackedLen::SafeGetData(pbData, cbData, &cbName, &pbPostEncodedLength));
+ DWORD cbEncodedLength = static_cast<DWORD>(pbPostEncodedLength - pbData);
+ pbData += cbEncodedLength + cbName;
+ cbData -= cbEncodedLength + cbName;
+
+ // Now we have the name of the property (in a similar format to above).
+ IfFailRetRcNull(CPackedLen::SafeGetData(pbData, cbData, &cbName, &pbPostEncodedLength));
+ cbEncodedLength = static_cast<DWORD>(pbPostEncodedLength - pbData);
+ pbData += cbEncodedLength;
+ cbData -= cbEncodedLength;
+
+ bool fConsistencyProp = false;
+ if (cbName == strlen(RC_CONSISTENCY_PROP_NAME) && strncmp((const char*)pbData, RC_CONSISTENCY_PROP_NAME, cbName) == 0)
+ fConsistencyProp = true;
+ else if (cbName == strlen(RC_CER_PROP_NAME) && strncmp((const char*)pbData, RC_CER_PROP_NAME, cbName) == 0)
+ fConsistencyProp = false;
+ else
+ IfFailRetRcNull(COR_E_BADIMAGEFORMAT);
+ pbData += cbName;
+ cbData -= cbName;
+
+ // And finally the actual enum value (again, we assume the underlying type is a DWORD).
+ if (cbData < sizeof(DWORD))
+ IfFailRetRcNull(COR_E_BADIMAGEFORMAT);
+ DWORD dwValue = GET_UNALIGNED_VAL32(pbData);
+ pbData += sizeof(DWORD);
+ cbData -= sizeof(DWORD);
+
+ if (fConsistencyProp) {
+ if (dwValue > RC_CONSISTENCY_CORRUPT_NOTHING)
+ IfFailRetRcNull(COR_E_BADIMAGEFORMAT);
+ dwResult = RC_MERGE(dwResult, RC_ENCODE(dwValue, RC_CER_UNDEFINED));
+ } else {
+ if (dwValue > RC_CER_SUCCESS)
+ IfFailRetRcNull(COR_E_BADIMAGEFORMAT);
+ dwResult = RC_MERGE(dwResult, RC_ENCODE(RC_CONSISTENCY_UNDEFINED, dwValue));
+ }
+ }
+
+ // Shouldn't have any bytes left in the blob at this stage.
+ _ASSERTE(cbData == 0);
+ }
+
+ // Return the result encoded and packed into the 2 low order bits of a DWORD.
+ return dwResult;
+}
+
+// Given a metadata token, a scoping module and a type context, look up the appropriate MethodDesc (and recomputed accompanying type
+// context).
+MethodContext *TokenToMethodDesc(Module *pModule, mdToken tokMethod, SigTypeContext *pTypeContext)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Validate that the token is one that we can handle.
+ if (!pModule->GetMDImport()->IsValidToken(tokMethod) || (TypeFromToken(tokMethod) != mdtMethodDef &&
+ TypeFromToken(tokMethod) != mdtMethodSpec &&
+ TypeFromToken(tokMethod) != mdtMemberRef)) {
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_INVALID_METHOD_TOKEN);
+ }
+
+ // Look up the MethodDesc based on the token and type context.
+ MethodDesc *pMD = MemberLoader::GetMethodDescFromMemberDefOrRefOrSpec(pModule,
+ tokMethod,
+ pTypeContext,
+ TRUE,
+ FALSE);
+
+ // The MethodDesc we get might be shared between several types. If so we'll need to do extra work to locate the exact
+ // class instantiation instead of the default representative one.
+ SigTypeContext sNewTypeContext;
+ if (pMD->IsSharedByGenericInstantiations()) {
+ TypeHandle th = GetTypeFromMemberDefOrRefOrSpecThrowing(pModule,
+ tokMethod,
+ pTypeContext);
+ SigTypeContext::InitTypeContext(pMD, th,&sNewTypeContext);
+ } else
+ SigTypeContext::InitTypeContext(pMD, pMD->GetClassInstantiation(), pMD->GetMethodInstantiation(),&sNewTypeContext);
+
+ return MethodContext::PerThreadAllocate(pMD, &sNewTypeContext);
+}
+
+// Locate an exact type definition given a method token and the type context in which it can be resolved.
+TypeHandle GetTypeFromMemberDefOrRefOrSpecThrowing(Module *pModule,
+ mdToken tokMethod,
+ SigTypeContext *pTypeContext)
+{
+ STANDARD_VM_CONTRACT;
+
+ IMDInternalImport *pImport = pModule->GetMDImport();
+
+ // Convert method specs into the underlying member ref.
+ if (TypeFromToken(tokMethod) == mdtMethodSpec)
+ {
+ PCCOR_SIGNATURE pSig;
+ ULONG cSig;
+ mdMemberRef tkGenericMemberRef;
+
+ IfFailThrow(pImport->GetMethodSpecProps(tokMethod, &tkGenericMemberRef, &pSig, &cSig));
+
+ if (!pImport->IsValidToken(tkGenericMemberRef) ||
+ (TypeFromToken(tkGenericMemberRef) != mdtMethodDef &&
+ TypeFromToken(tkGenericMemberRef) != mdtMemberRef))
+ {
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_INVALID_TOKEN_TYPE);
+ }
+
+ tokMethod = tkGenericMemberRef;
+ }
+
+ // Follow the member ref/def back up to the type def/ref/spec or module (for global methods).
+ if (TypeFromToken(tokMethod) == mdtMemberRef)
+ {
+ IfFailThrow(pImport->GetParentOfMemberRef(tokMethod, &tokMethod));
+
+ // For varargs, a memberref can point to a methodDef
+ if (TypeFromToken(tokMethod) == mdtMethodDef)
+ {
+ if (!pImport->IsValidToken(tokMethod))
+ {
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_INVALID_TOKEN);
+ }
+ IfFailThrow(pImport->GetParentToken(tokMethod, &tokMethod));
+ }
+ }
+ else if (TypeFromToken(tokMethod) == mdtMethodDef)
+ {
+ IfFailThrow(pImport->GetParentToken(tokMethod, &tokMethod));
+ }
+
+ if (!pImport->IsValidToken(tokMethod) || (TypeFromToken(tokMethod) != mdtTypeDef &&
+ TypeFromToken(tokMethod) != mdtTypeRef &&
+ TypeFromToken(tokMethod) != mdtTypeSpec &&
+ TypeFromToken(tokMethod) != mdtModuleRef))
+ {
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_INVALID_TOKEN);
+ }
+
+ // Load the type in question, using a type context if necessary to get an exact representation.
+ TypeHandle th;
+ if (TypeFromToken(tokMethod) == mdtModuleRef) {
+ DomainFile *pNewModule = pModule->LoadModule(GetAppDomain(), tokMethod, FALSE);
+ if (pNewModule != NULL)
+ th = TypeHandle(pNewModule->GetModule()->GetGlobalMethodTable());
+ } else {
+ th = ClassLoader::LoadTypeDefOrRefOrSpecThrowing(pModule,
+ tokMethod,
+ pTypeContext);
+ }
+
+ if (th.IsNull())
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+
+ return th;
+}
+
+// Determine whether the method given as a parameter is the root of a CER.
+// @todo: Need an x86 offset as well and logic to determine whether we're actually in a root-CER portion of the method (if the whole
+// thing isn't the root).
+bool IsCerRootMethod(MethodDesc *pMD)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMD));
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ // Treat IL stubs as CER roots (marshaling code needs to string together operations without being interruped by thread aborts).
+ if (pMD->IsILStub())
+ return true;
+
+ // There are some well defined root methods defined by the system.
+ if (pMD == g_pExecuteBackoutCodeHelperMethod)
+ return true;
+
+ // For now we just look to see whether there is some prep or fixup info stored for this method.
+ Module *pModule = pMD->GetModule();
+
+ if (pModule->GetCerPrepInfo(pMD) != NULL)
+ return true;
+
+#ifdef FEATURE_PREJIT
+ if (pModule->IsNgenCerRootMethod(pMD))
+ return true;
+#endif
+
+ return false;
+}
+
+// Fill the cache of overflowed generic dictionary entries that the jit maintains with all the overflow slots stored so far in the
+// dictionary layout.
+void PrepopulateGenericHandleCache(DictionaryLayout *pDictionaryLayout,
+ MethodDesc *pMD,
+ MethodTable *pMT)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ // Dictionary overflow entries are recorded starting in the second bucket of the dictionary layout.
+ DictionaryLayout *pOverflows = pDictionaryLayout->GetNextLayout();
+
+ while (pOverflows) {
+ for (DWORD i = 0; i < pOverflows->GetMaxSlots(); i++) {
+ DictionaryEntryLayout *pEntry = pOverflows->GetEntryLayout(i);
+
+ // We've finished as soon as we find the first unused slot.
+ if (!pEntry->m_signature)
+ return;
+
+ // We have a valid overflow entry. Determine the handle value given the type context we have and push it into the JIT's
+ // cache.
+ JIT_GenericHandleWorker(pMD, pMT, pEntry->m_signature);
+ }
+ pOverflows = pOverflows->GetNextLayout();
+ }
+}
+
+#ifdef FEATURE_PREJIT
+
+// Prepare the CER rooted at the given method (it's OK to pass a MethodDesc* that doesn't root a CER, in which case the method
+// is a no-op).
+void CerNgenRootTable::Restore(MethodDesc *pRootMD)
+{
+#ifndef CROSSGEN_COMPILE
+ STANDARD_VM_CONTRACT;
+
+ // We don't have a restoration bitmap at ngen time. No matter, we just always claim everything is restored.
+ if (m_pRestoreBitmap == NULL)
+ return;
+
+ // Locate the root index from the table. Failure indicates there's no work to do.
+ DWORD dwIndex = FindIndex(pRootMD);
+ if (dwIndex == NoSuchRoot)
+ return;
+
+ _ASSERTE(m_pRoots[dwIndex].m_pRootMD == pRootMD);
+
+ // Check then mark the fact that we're preparing (to prevent potential recursion).
+ SigTypeContext typeContext;
+ if (!GetThread()->GetCerPreparationState()->CanPreparationProceed(pRootMD, &typeContext))
+ return;
+
+ MethodCallGraphPreparer sPrep(pRootMD, &typeContext, true, true);
+ MethodCallGraphPreparer::ThreadPreparingCerHolder sCerHolder(&sPrep);
+
+#ifdef _DEBUG
+ if (GetCerLoggingOptions())
+ {
+ CER_LOG(RESTORE, ("---------------------------------------------------------------\n"));
+ SString ssRootMethod;
+ TypeString::AppendMethodInternal(ssRootMethod, pRootMD, TypeString::FormatNamespace | TypeString::FormatStubInfo);
+ CER_LOG(RESTORE, ("Restoring CER graph from %S\n", ssRootMethod.GetUnicode()));
+ }
+#endif
+
+ g_IBCLogger.LogCerMethodListReadAccess(pRootMD);
+
+ // Retrieve the CerRoot structure.
+ CerRoot *pRoot = &m_pRoots[dwIndex];
+ _ASSERTE(pRoot->m_pRootMD == pRootMD);
+
+ // Scan the list of methods in the CER (the last one is a sentinel with a NULL MethodDesc*). Restore each method as we go.
+ MethodContextElement *pEntry = pRoot->m_pList;
+ while (pEntry->GetMethodDesc())
+ {
+ // Method desc and type handle pointers may still be tokenized at this point.
+ Module::RestoreMethodDescPointer(&pEntry->m_pMethodDesc);
+ Module::RestoreMethodTablePointer(&pEntry->m_pExactMT);
+
+ g_IBCLogger.LogCerMethodListReadAccess(pEntry->GetMethodDesc());
+
+ MethodDesc * pMD = pEntry->GetMethodDesc();
+
+ // Check whether there are generic dictionaries that need to be pre-populated.
+
+ // Don't use the direct PrepopulateDictionary method here for MethodTable/MethodDesc
+ // - MethodTable: Takes binding considerations into account (which we don't care about)
+ // - MethodDesc: Appears to use a representative class instantiation (and we have the exact one handy)
+ //
+ // Additionally, avoid touching EE Class if we don't need to
+ MethodTable * pMT = pEntry->GetExactMT();
+ if (pMT != NULL)
+ {
+ // MethodTable
+ DictionaryLayout *pClassDictLayout = pMT->GetClass()->GetDictionaryLayout();
+ if (pClassDictLayout)
+ {
+ pMT->GetDictionary()->PrepopulateDictionary(NULL, pMT, false);
+
+ // The dictionary may have overflowed in which case we need to prepopulate the jit's lookup cache as well.
+ PrepopulateGenericHandleCache(pClassDictLayout, NULL, pMT);
+ }
+
+ // MethodDesc
+ DictionaryLayout *pMethDictLayout = pMD->GetDictionaryLayout();
+ if (pMethDictLayout)
+ {
+ pMD->GetMethodDictionary()->PrepopulateDictionary(pMD, NULL, false);
+
+ // The dictionary may have overflowed in which case we need to prepopulate the jit's lookup cache as well.
+ PrepopulateGenericHandleCache(pMethDictLayout, pMD, NULL);
+ }
+ }
+
+ // Recreate stubs used by P/Invoke, COM calls, or FCalls by exercising the prestub.
+ if (pMD->IsPointingToPrestub() && (pMD->IsNDirect() || pMD->IsComPlusCall() || pMD->IsFCall()))
+ {
+ pMD->EnsureActive();
+ pMD->DoPrestub(NULL);
+ }
+
+#ifdef _DEBUG
+ if (GetCerLoggingOptions())
+ {
+ SString ssMethod;
+ TypeString::AppendMethodInternal(ssMethod, pMD, TypeString::FormatNamespace | TypeString::FormatStubInfo);
+ CER_LOG(RESTORE, (" %S\n", ssMethod.GetUnicode()));
+ }
+#endif
+
+ // Move to next entry.
+ pEntry++;
+ }
+
+ CER_LOG(RESTORE, ("---------------------------------------------------------------\n"));
+
+ // Mark this whole CER region as fixed up by setting a flag in the restore bitmap (kept separate so we can cluster all our page
+ // writes).
+ // Compute the DWORD offset into the flag array and then the mask for the specific bit in that DWORD.
+ DWORD dwOffset = dwIndex / (sizeof(DWORD) * 8);
+ DWORD dwMask = 1 << (dwIndex % (sizeof(DWORD) * 8));
+ EnsureWritablePages(m_pRestoreBitmap, sizeof(DWORD) * SizeOfRestoreBitmap());
+ FastInterlockOr(&m_pRestoreBitmap[dwOffset], dwMask);
+
+ // If we fixed up any methods with their own CERs then we will have implicitly fixed up those too. Mark their fixup records as
+ // completed as well to avoid further unecessary work.
+ pEntry = pRoot->m_pList;
+ while (pEntry->GetMethodDesc()) {
+ dwIndex = FindIndex(pEntry->GetMethodDesc());
+ if (dwIndex != NoSuchRoot) {
+ dwOffset = dwIndex / (sizeof(DWORD) * 8);
+ dwMask = 1 << (dwIndex % (sizeof(DWORD) * 8));
+ FastInterlockOr(&m_pRestoreBitmap[dwOffset], dwMask);
+ }
+ pEntry++;
+ }
+#endif // CROSSGEN_COMPILE
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+// Add a new root to the table, expanding it as necessary. Note that this method must be called with the CerCrst already held.
+void CerNgenRootTable::AddRoot(MethodDesc *pRootMD, MethodContextElement *pList)
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+ PRECONDITION(IsOwnerOfCrst(pRootMD->GetModule()->GetCerCrst()));
+ } CONTRACTL_END;
+
+ // Ensure we have enough space first.
+ if (m_cRoots == m_cSlots) {
+ DWORD cNewSize = m_cSlots + 16;
+ CerRoot *pNewArray = new CerRoot[cNewSize];
+ memcpyNoGCRefs(pNewArray, m_pRoots, m_cRoots * sizeof(CerRoot));
+ MethodContextElement **pNewRootsInCompilationOrder = new MethodContextElement*[cNewSize];
+ memcpyNoGCRefs(pNewRootsInCompilationOrder, m_pRootsInCompilationOrder, m_cRoots * sizeof(MethodContextElement*) );
+ m_cSlots = cNewSize;
+ delete m_pRoots;
+ m_pRoots = pNewArray;
+ delete m_pRootsInCompilationOrder;
+ m_pRootsInCompilationOrder = pNewRootsInCompilationOrder;
+ }
+
+ // Fill in the new entry in sorted order.
+ DWORD i;
+ for (i = 0; i < m_cRoots; i++)
+ if ((UPTR) m_pRoots[i].m_pRootMD > (UPTR) pRootMD)
+ break;
+ if (i < m_cRoots)
+ memmove(&m_pRoots[i + 1], &m_pRoots[i], (m_cRoots - i) * sizeof(CerRoot));
+ m_pRoots[i].m_pRootMD = pRootMD;
+ m_pRoots[i].m_pList = pList;
+
+ m_pRootsInCompilationOrder[m_cRoots] = pList;
+
+ m_cRoots++;
+}
+
+// Ngen callouts to help serialize this structure and its children to storage.
+void CerNgenRootTable::Save(DataImage *image, CorProfileData *profileData)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef _DEBUG
+ DWORD dwMaxEntries = 0;
+ DWORD dwTotalEntries = 0;
+#endif
+
+ image->StoreStructure(this, sizeof(CerNgenRootTable), DataImage::ITEM_CER_ROOT_TABLE);
+ image->StoreStructure(m_pRoots, m_cRoots * sizeof(CerRoot), DataImage::ITEM_CER_ROOT_TABLE);
+
+ // Create a bitmap of boolean flags (1 bit per flag) indicating whether the CER at a given index in the array has been restored.
+ // This is initially all zero and only filled in at runtime (keep all the flags together this way because they're the only
+ // things we have to write at runtime and we want to keep them as dense as possible).
+ _ASSERTE((SizeOfRestoreBitmap() % sizeof(DWORD)) == 0);
+ m_pRestoreBitmap = new DWORD[SizeOfRestoreBitmap() / sizeof(DWORD)];
+ memset(m_pRestoreBitmap, 0xff, SizeOfRestoreBitmap());
+
+ image->StoreStructure(m_pRestoreBitmap,
+ SizeOfRestoreBitmap(),
+ DataImage::ITEM_CER_RESTORE_FLAGS);
+
+ // Next save off the list of MethodContextElements associated with each root.
+ for (DWORD i = 0; i < m_cRoots; i++) {
+ MethodContextElement *pEntry = m_pRootsInCompilationOrder[i];
+
+ // Count entries in list.
+ DWORD cEntries = 0;
+ while (pEntry->GetMethodDesc()) {
+ cEntries++;
+ pEntry++;
+ }
+
+ // Plus one for the sentinel value.
+ cEntries++;
+
+#ifdef _DEBUG
+ dwTotalEntries += cEntries;
+ if (cEntries > dwMaxEntries)
+ dwMaxEntries = cEntries;
+#endif
+
+ // Store this list.
+ image->StoreStructure(m_pRootsInCompilationOrder[i],
+ cEntries * sizeof(MethodContextElement),
+ DataImage::ITEM_CER_METHOD_LIST);
+ }
+
+#ifdef _DEBUG
+ if (m_cRoots > 0) {
+ CER_LOG(NGEN_STATS, ("Saving %u CER roots in ngen image\n", m_cRoots));
+ CER_LOG(NGEN_STATS, (" Max methods in CER: %u\n", dwMaxEntries));
+ CER_LOG(NGEN_STATS, (" Avg methods in CER: %.1f\n", (float)((float)dwTotalEntries / (float)m_cRoots)));
+ } else
+ CER_LOG(NGEN_STATS, ("No CER roots in ngen image\n"));
+#endif
+}
+
+void CerNgenRootTable::Fixup(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ DWORD i;
+
+ // We still use the point to the root array even though at runtime the two structures will be adjacent.
+ image->FixupPointerField(this, offsetof(CerNgenRootTable, m_pRoots));
+
+ // Restoration flags are used only at runtime and must start off zeroed.
+ image->FixupPointerField(this, offsetof(CerNgenRootTable, m_pRestoreBitmap));
+ image->ZeroField(m_pRestoreBitmap, 0, SizeOfRestoreBitmap());
+
+ // The root list in compilation order is only used at ngen time, and is not written into native image.
+ image->ZeroPointerField(this, offsetof(CerNgenRootTable, m_pRootsInCompilationOrder));
+
+ // Fixup all the pointers in the individual CERs.
+ for (i = 0; i < m_cRoots; i++) {
+
+ // For each MethodContextElement in the list we need to fixup a pointer to a MethodDesc and two array pointers (one for any
+ // class instantiation and one for any method instantiation). The actual MethodDescs and TypeHandles themselves are already
+ // fixed up as are the instantiation arrays we point to (they're the ones inside the generic dictionaries of the class/method
+ // concerned).
+ MethodContextElement *pList = m_pRootsInCompilationOrder[i];
+ MethodContextElement *pEntry = pList;
+ while (pEntry->GetMethodDesc()) {
+ image->FixupMethodDescPointer(pList, &pEntry->m_pMethodDesc);
+ image->FixupMethodTablePointer(pList, &pEntry->m_pExactMT);
+ pEntry++;
+ }
+ }
+}
+
+void CerNgenRootTable::FixupRVAs(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ DWORD i, j;
+
+ // Now we go back through the root table and sort the entries based on the locations of the root method descs in the new image
+ // (they may be rearranged due to IBC profiling).
+ CerRoot *pNewRoots = (CerRoot*)image->GetImagePointer(m_pRoots);
+ PREFIX_ASSUME(pNewRoots != NULL);
+
+ // Simple insertion sort. Starting at the second element insert a candidate into its correct location in the sub-list
+ // preceding it (which by definition will already be sorted).
+ for (i = 1; i < m_cRoots; i++)
+ {
+ // Look at all of the preceding elements for the first that is larger than the candidate (i.e. should succeed the
+ // candidate in sorted order). If we don't find one then the candidate is already in place and we can proceed to the
+ // next candidate.
+ for (j = 0; j < i; j++)
+ if (image->GetRVA(pNewRoots[j].m_pRootMD) > image->GetRVA(pNewRoots[i].m_pRootMD)) {
+
+ // Need to move candidate element up. Cache its value because we're about to overwrite it.
+ MethodDesc *pTmpRootMD = pNewRoots[i].m_pRootMD;
+ MethodContextElement *pTmpList = pNewRoots[i].m_pList;
+
+ // Shuffle the sorted list one up to make room for the candidate.
+ memmove(&pNewRoots[j + 1], &pNewRoots[j], (i - j) * sizeof(CerRoot));
+
+ // Insert the candidate into position.
+ pNewRoots[j].m_pRootMD = pTmpRootMD;
+ pNewRoots[j].m_pList = pTmpList;
+
+ // Sorted the candidate, move onto the next.
+ break;
+ }
+ }
+
+ // Fixup all the pointers in the individual CERs.
+ for (i = 0; i < m_cRoots; i++) {
+ // Fix up the pointer to the root method and the list of methods in the CER.
+ image->FixupField(m_pRoots, sizeof(CerRoot) * i + offsetof(CerRoot, m_pRootMD),
+ pNewRoots[i].m_pRootMD);
+ image->FixupField(m_pRoots, sizeof(CerRoot) * i + offsetof(CerRoot, m_pList),
+ pNewRoots[i].m_pList);
+ }
+}
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+// Locate the index of a given CerRoot record in the array given the root method. This is used to access the array and to locate the
+// restored flag for the entry in the restored bitmap. NoSuchRoot is returned if the root cannot be found.
+DWORD CerNgenRootTable::FindIndex(MethodDesc *pRootMD)
+{
+ CONTRACTL {
+ NOTHROW;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pRootMD));
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ // The table is guaranteed to be sorted, so we can lookup our target with a binary search.
+ DWORD dwLow = 0;
+ DWORD dwHigh = m_cRoots - 1;
+ while (true) {
+
+ // Take out the simple cases first.
+
+ // The range has only one entry.
+ if (dwLow == dwHigh) {
+ if (m_pRoots[dwLow].m_pRootMD == pRootMD)
+ return dwLow;
+#ifdef _DEBUG
+ for (DWORD i = 0; i < m_cRoots; i++)
+ _ASSERTE(m_pRoots[i].m_pRootMD != pRootMD);
+#endif
+ return NoSuchRoot;
+ }
+
+ // The range has only two entries.
+ if (dwLow == dwHigh - 1) {
+ if (m_pRoots[dwLow].m_pRootMD == pRootMD)
+ return dwLow;
+ if (m_pRoots[dwHigh].m_pRootMD == pRootMD)
+ return dwHigh;
+#ifdef _DEBUG
+ for (DWORD i = 0; i < m_cRoots; i++)
+ _ASSERTE(m_pRoots[i].m_pRootMD != pRootMD);
+#endif
+ return NoSuchRoot;
+ }
+
+ // Now we can compute a midpoint that is definitely distinct and in-between the endpoints.
+ DWORD dwMid = dwLow + ((dwHigh - dwLow) / 2);
+
+ // Did we nail it?
+ if (m_pRoots[dwMid].m_pRootMD == pRootMD)
+ return dwMid;
+
+ // Otherwise adjust our range to be the bit we haven't looked at and iterate.
+ if ((UPTR)m_pRoots[dwMid].m_pRootMD < (UPTR)pRootMD)
+ dwLow = dwMid + 1;
+ else
+ dwHigh = dwMid - 1;
+ }
+}
+
+// Prepare the class if it is derived from CriticalFinalizerObject. This is used at ngen time since such classes are normally
+// prepared at runtime (at instantiation) and would therefore miss the ngen image.
+void PrepareCriticalType(MethodTable * pMT)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Prepare any class that satisfies the criteria. Pass a pointer to this module so that we'll only prepare any overrides of
+ // the critical methods that were actually introduced here.
+ if (pMT->HasCriticalFinalizer())
+ PrepareCriticalFinalizerObject(pMT, pMT->GetLoaderModule());
+}
+
+// Prepare a method and its statically determinable call graph if a hint attribute has been applied. This is only called at ngen
+// time to save additional preparation information into the ngen image that wouldn't normally be there (and thus lower runtime
+// overheads).
+void PrePrepareMethodIfNecessary(CORINFO_METHOD_HANDLE hMethod)
+{
+ STANDARD_VM_CONTRACT;
+
+ EX_TRY {
+
+ // Translate jit-style method handle into method desc.
+ MethodDesc *pMD = GetMethod(hMethod);
+
+ // Check for the existance of the attribute.
+ IMDInternalImport *pImport = pMD->GetMDImport();
+ mdToken tkMethod = pMD->GetMemberDef();
+ HRESULT hr = pImport->GetCustomAttributeByName(tkMethod,
+ "System.Runtime.ConstrainedExecution.PrePrepareMethodAttribute",
+ NULL, NULL);
+
+ // TODO: We should add IBC probes which indicate that methods need to be preprepared
+ // which can then be reflected in the IBC data, we can add an additional check
+ // here to cover that case, then we can get around this problem with profiling
+ // instead of manual programmer effort.
+
+ // Only prepare if we definitely saw the attribute.
+ if (hr == S_OK) {
+ // Prepare the method and its graph. There should never be any open type parameters (we can't do much at ngen time with these),
+ // so we can pass a null type context.
+ SigTypeContext sTypeContext;
+ MethodCallGraphPreparer mcgp(pMD, &sTypeContext, true, true);
+ mcgp.Run();
+ }
+
+ } EX_CATCH {
+ } EX_END_CATCH(SwallowAllExceptions);
+}
+
+#endif // FEATURE_PREJIT
+
+PtrHashCache::PtrHashCache()
+{
+ LIMITED_METHOD_CONTRACT;
+ ZeroMemory(this, sizeof(*this));
+
+ // First entry in each bucket is a chain index used to evenly distribute inserts within a bucket.
+ _ASSERTE(PHC_CHAIN > 1);
+}
+
+bool PtrHashCache::Lookup(void *pKey, DWORD *pdwValue)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(((UINT_PTR)pKey & PHC_DATA_MASK) == 0);
+
+ DWORD dwBucket = GetHash(pKey);
+
+ // Skip first entry in bucket, it's a sequence number used for insertions.
+ for (DWORD i = 1; i < PHC_CHAIN; i++) {
+ UINT_PTR uipEntry = VolatileLoad<UINT_PTR>(&m_rEntries[(dwBucket * PHC_CHAIN) + i]);
+ if ((uipEntry & ~PHC_DATA_MASK) == (UINT_PTR)pKey) {
+#ifdef _DEBUG
+ FastInterlockIncrement((LONG*)&m_dwHits);
+#endif
+ *pdwValue = uipEntry & PHC_DATA_MASK;
+ return true;
+ }
+ }
+
+#ifdef _DEBUG
+ FastInterlockIncrement((LONG*)&m_dwMisses);
+#endif
+ return false;
+}
+
+void PtrHashCache::Add(void *pKey, DWORD dwValue)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(((UINT_PTR)pKey & PHC_DATA_MASK) == 0);
+ _ASSERTE((dwValue & ~PHC_DATA_MASK) == 0);
+
+ DWORD dwBucket = GetHash(pKey);
+
+ // We keep a sequence number in the first entry of the bucket so that we distribute insertions within the bucket evenly. We're
+ // racing when we update this value, but it doesn't matter if we lose an update (we're a cache after all). We don't bother being
+ // careful to avoid overflowing the value here (we just keep incrementing); we'll do the modulo logic when we insert our value
+ // instead.
+ DWORD dwIndex = static_cast<DWORD>(m_rEntries[dwBucket * PHC_CHAIN]++);
+ dwIndex = (dwIndex % (PHC_CHAIN - 1)) + 1;
+ m_rEntries[(dwBucket * PHC_CHAIN) + dwIndex] = ((UINT_PTR)pKey & ~PHC_DATA_MASK) | dwValue;
+}
+
+DWORD PtrHashCache::GetHash(void *pKey)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (DWORD)(((UINT_PTR)pKey >> 4) % PHC_BUCKETS);
+}
+
+#ifdef _DEBUG
+void PtrHashCache::DbgDumpStats()
+{
+#if 0
+ if ((m_dwHits + m_dwMisses) == 0)
+ return;
+
+ printf("Dumping stats for PtrHashCache %08X\n", this);
+ printf(" %u hits, %u misses (%u%% hit rate)\n", m_dwHits, m_dwMisses, (m_dwHits * 100) / (m_dwHits + m_dwMisses));
+ for (DWORD i = 0; i < PHC_BUCKETS; i++)
+ printf(" [%2u] : %u insertions\n", i, m_rEntries[i * PHC_CHAIN]);
+ printf("\n");
+#endif
+}
+#endif
diff --git a/src/vm/constrainedexecutionregion.h b/src/vm/constrainedexecutionregion.h
new file mode 100644
index 0000000000..8c67cd11da
--- /dev/null
+++ b/src/vm/constrainedexecutionregion.h
@@ -0,0 +1,568 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// Methods to support the implementation of Constrained Execution Regions (CERs). This includes logic to walk the IL of methods to
+// determine the statically determinable call graph and prepare each submethod (jit, prepopulate generic dictionaries etc.,
+// everything needed to ensure that the runtime won't generate implicit failure points during the execution of said call graph).
+//
+
+//
+
+
+#ifndef __CONSTRAINED_EXECUTION_REGION_H
+#define __CONSTRAINED_EXECUTION_REGION_H
+
+
+#include <corhlpr.h>
+#include <typestring.h>
+
+
+// An enumeration that abstracts the interesting information (from our point of view) present in a reliability contract decorating a
+// method.
+enum ReliabilityContractLevel
+{
+ RCL_UNKNOWN = -1, // The contract attribute hasn't been read yet
+ RCL_NO_CONTRACT = 0, // No contract (or a fairly useless one) was specified
+ RCL_BASIC_CONTRACT = 1, // The contract promises enough for the method to be a legal member of a CER call graph
+ RCL_PREPARE_CONTRACT = 2, // The contract promises enough to be worth preparing the method as part of a CER call graph
+};
+
+// Various definitions used to parse reliability contracts. These must be kept synchronized with the managed version in
+// BCL\System\Runtime\Reliability\ReliabilityContractAttribute.cs
+
+#define RELIABILITY_CONTRACT_NAME "System.Runtime.ConstrainedExecution.ReliabilityContractAttribute"
+#define RC_CONSISTENCY_PROP_NAME "ConsistencyGuarantee"
+#define RC_CER_PROP_NAME "Cer"
+
+enum {
+ RC_CONSISTENCY_CORRUPT_PROCESS = 0,
+ RC_CONSISTENCY_CORRUPT_APPDOMAIN = 1,
+ RC_CONSISTENCY_CORRUPT_INSTANCE = 2,
+ RC_CONSISTENCY_CORRUPT_NOTHING = 3,
+ RC_CONSISTENCY_UNDEFINED = 4,
+ RC_CER_NONE = 0,
+ RC_CER_MAYFAIL = 1,
+ RC_CER_SUCCESS = 2,
+ RC_CER_UNDEFINED = 3
+};
+
+
+// We compact the reliability contract states above into a single DWORD format easy to cache at the assembly and class level
+// opaquely. We also encode in this DWORD whether a given part of the state has been defined yet (an assembly might set a
+// consistency level without specifying a cer level, for instance, and this information is vital when merging states between
+// assembly, class and method levels).
+// The macros below handle the encoding so nobody else needs to know the details.
+
+// The base state for an encoded DWORD: neither consistency or cer defined.
+#define RC_NULL RC_ENCODE(RC_CONSISTENCY_UNDEFINED, RC_CER_UNDEFINED)
+
+// Extract the raw consistency value from an encoded DWORD.
+#define RC_CONSISTENCY(_encoded) ((_encoded) >> 2)
+
+// Extract the raw cer value from an encoded DWORD.
+#define RC_CER(_encoded) ((_encoded) & 3)
+
+// Produce an encoded DWORD from a pair of raw consistency and cer values. Values must have been range validated first.
+#define RC_ENCODE(_consistency, _cer) (DWORD)(((_consistency) << 2) | (_cer))
+
+// Produce an abstracted ReliabilityContractLevel from an encoded DWORD, see CheckForReliabilityContract for details of the rules.
+#define RC_ENCODED_TO_LEVEL(_encoded) \
+ ((RC_CONSISTENCY(_encoded) == RC_CONSISTENCY_UNDEFINED || \
+ RC_CONSISTENCY(_encoded) < RC_CONSISTENCY_CORRUPT_INSTANCE) ? RCL_NO_CONTRACT : \
+ (RC_CER(_encoded) == RC_CER_UNDEFINED || \
+ RC_CER(_encoded) == RC_CER_NONE) ? RCL_BASIC_CONTRACT : \
+ RCL_PREPARE_CONTRACT)
+
+// Given two DWORD encodings presumed to come from different scopes (e.g. method and class) merge them to find the effective
+// contract state. It's presumed the first encoding is the most tightly scoped (i.e. method would go first in the example above) and
+// therefore takes precedence.
+#define RC_MERGE(_old, _new) RC_ENCODE((RC_CONSISTENCY(_old) == RC_CONSISTENCY_UNDEFINED) ? \
+ RC_CONSISTENCY(_new) : RC_CONSISTENCY(_old), \
+ (RC_CER(_old) == RC_CER_UNDEFINED) ? \
+ RC_CER(_new) : RC_CER(_old)) \
+
+// Return true if either consistency or cer has not been specified in the encoded DWORD given.
+#define RC_INCOMPLETE(_encoded) (RC_CONSISTENCY(_encoded) == RC_CONSISTENCY_UNDEFINED || RC_CER(_encoded) == RC_CER_UNDEFINED)
+
+#ifndef CLR_STANDALONE_BINDER
+
+// Look for reliability contracts at the method, class and assembly level and parse them to extract the information we're interested
+// in from a runtime preparation viewpoint. This information is abstracted in the form of the ReliabilityContractLevel enumeration.
+ReliabilityContractLevel CheckForReliabilityContract(MethodDesc *pMD);
+
+
+// Structure used to track enough information to identify a method (possibly generic or belonging to a generic class). Includes a
+// MethodDesc pointer and a SigTypeContext (values of class and method type parameters to narrow down the exact method being refered
+// to). Similar to MethodContext (see ConstrainedExecutionRegion.cpp), but without the unneeded list link field (we expect to embed
+// these in arrays, hence the name).
+struct MethodContextElement
+{
+ FixupPointer<PTR_MethodDesc> m_pMethodDesc; // Pointer to a MethodDesc
+ FixupPointer<PTR_MethodTable> m_pExactMT; // Exact type to disambiguate code shared by instantiations
+
+ MethodDesc * GetMethodDesc()
+ {
+ return m_pMethodDesc.GetValue();
+ }
+
+ MethodTable * GetExactMT()
+ {
+ return m_pExactMT.GetValue();
+ }
+};
+
+
+// Base structure used to track which CERs have been prepared so far.
+// These structures are looked up via a per-assembly hash table using the root method desc as a key.
+// Used to avoid extra work in both the jit and PrepareMethod calls. The latter case is more involved because we support preparing a
+// CER with generic type parameters (the instantiation is passed in along with the method in the PrepareMethod call). In that case
+// we need to track exactly which instantiations we've prepared for a given method.
+struct CerPrepInfo
+{
+ CerPrepInfo() :
+ m_fFullyPrepared(false),
+ m_fRequiresInstantiation(false),
+ m_fMethodHasCallsWithinExplicitCer(false)
+ {
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ if (!m_sIsInitAtInstHash.Init(17, NULL, NULL, FALSE))
+ COMPlusThrowOM();
+ }
+
+ bool m_fFullyPrepared; // True implies we've prep'd this once and there are no shared instantiations
+ bool m_fRequiresInstantiation; // True implies that this method is shared amongst multiple instantiations
+ bool m_fMethodHasCallsWithinExplicitCer; // True if method contains calls out from within an explicit PCER range
+ EEInstantiationHashTable m_sIsInitAtInstHash; // Hash of instantiations we've prepared this CER for
+};
+
+
+#ifdef FEATURE_PREJIT
+
+// Structure used to represent a CER by a root method and a list of MethodContextElements that indicate all the methods contained.
+// The MethodContextElement list is terminated with a sentinel entry (m_pMethodDesc set to NULL).
+// Keep this structure small since we'll access the whole array of them randomly at runtime; density is our best friend.
+struct CerRoot
+{
+ MethodDesc *m_pRootMD; // Root method (no type context since it never has type params)
+ MethodContextElement *m_pList; // List of methods in this CER
+};
+
+// Class used to track all the CERs rooted at methods defined within a given module that are discovered at ngen time. This data is
+// then used at runtime to determine when and how to perform necessary restoration work so that the CERs don't encounter any
+// unexpected failure points during execution.
+// During ngen this class keeps a dynamically expanded array of CER roots (both the class and the array are allocated from a win32
+// heap). When we save the image to storage (and thus know the final size of the table) we combine the two so that at runtime
+// they're adjacent and exactly the right size.
+class CerNgenRootTable
+{
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+
+ DWORD *m_pRestoreBitmap; // Pointer to array of restored flag bits
+ DWORD m_cRoots; // Count of root methods represented
+ DWORD m_cSlots; // Extra empty slots at the tail of the array below (ngen time only)
+ CerRoot *m_pRoots; // Pointer to array of CER roots (sorted by RootMD address)
+ MethodContextElement **m_pRootsInCompilationOrder; // Pointer to array of CerRoot::m_pList (in the order AddRoot is called)
+
+public:
+
+ CerNgenRootTable() :
+ m_pRestoreBitmap(NULL),
+ m_cRoots(0),
+ m_cSlots(0),
+ m_pRoots(NULL),
+ m_pRootsInCompilationOrder(NULL)
+ {
+ }
+
+ ~CerNgenRootTable()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ delete m_pRestoreBitmap;
+ delete m_pRoots;
+ delete m_pRootsInCompilationOrder;
+ }
+
+ // Add a new root to the table, expanding it as necessary. Note that this method must be called with the CerCrst already held.
+ void AddRoot(MethodDesc *pRootMD, MethodContextElement *pList);
+
+ // Retrieve the address of the list of methods for the CER rooted at the given index.
+ inline MethodContextElement *GetList(DWORD dwIndex) { LIMITED_METHOD_CONTRACT; _ASSERTE(dwIndex < m_cRoots); return m_pRoots[dwIndex].m_pList; }
+
+ // Retrieve the address of the list of methods for the CER rooted at the given method. (The root must exist).
+ inline MethodContextElement *GetList(MethodDesc *pRootMD) { WRAPPER_NO_CONTRACT; return GetList(FindIndex(pRootMD)); }
+
+ // Indicate whether the given method has ngen restoration information associated with it.
+ inline bool IsNgenRootMethod(MethodDesc *pRootMD) { WRAPPER_NO_CONTRACT; return FindIndex(pRootMD) != NoSuchRoot; }
+
+ // Prepare the CER rooted at the given method (it's OK to pass a MethodDesc* that doesn't root a CER, in which case the method
+ // is a no-op).
+ void Restore(MethodDesc *pRootMD);
+
+ // Ngen callouts to help serialize this structure and its children to storage.
+ void Save(DataImage *image, CorProfileData *profileData);
+ void Fixup(DataImage *image);
+ void FixupRVAs(DataImage *image);
+
+ // Calculate (in bytes) the size of bitmap to allocate to record whether each CER has been restored at runtime. Size is
+ // rounded up to DWORD alignment.
+ inline DWORD SizeOfRestoreBitmap()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ((m_cRoots + 31) / 32) * sizeof(DWORD);
+ }
+
+ inline DWORD GetRootCount() { LIMITED_METHOD_CONTRACT; return m_cRoots; }
+ inline CerRoot *GetRoots() { LIMITED_METHOD_CONTRACT; return m_pRoots; }
+ inline DWORD *GetRestoreBitmap() { LIMITED_METHOD_CONTRACT; return m_pRestoreBitmap; }
+
+private:
+ enum { NoSuchRoot = 0xffffffff };
+
+ // Locate the index of a given CerRoot record in the array given the root method. This is used to access the array and to locate
+ // the restored flag for the entry in the restored bitmap. NoSuchRoot is returned if the root cannot be found.
+ DWORD FindIndex(MethodDesc *pRootMD);
+};
+
+#endif
+
+
+// Default initial size for hash table used to track CerPrepInfo structures on a per-module basis.
+#define CER_DEFAULT_HASH_SIZE 17
+
+
+// Structure used to track a single exception handling range (catch, finally etc.). We build an array of these and then track which
+// ones have become 'activated' by virtue of having their try clause immediately preceded by a call to our preparation marker
+// method. This allows us to determine which call sites in the method body should be followed during method preparation.
+struct EHClauseRange
+{
+ DWORD m_dwTryOffset;
+ DWORD m_dwHandlerOffset;
+ DWORD m_dwHandlerLength;
+ bool m_fActive;
+};
+
+
+// Structure used to track enough information to identify a method (possibly generic or belonging to a generic class). Includes a
+// MethodDesc pointer and a SigTypeContext (values of class and method type parameters to narrow down the exact method being refered
+// to). The structure also contains a next pointer so that it can be placed in a singly linked list (see MethodContextStack below).
+struct MethodContext
+{
+ MethodContext *m_pNext; // Next MethodContext in a MethodContextStack list
+ MethodDesc *m_pMethodDesc; // Pointer to a MethodDesc
+ SigTypeContext m_sTypeContext; // Additional type parameter information to qualify the exact method targetted
+ bool m_fRoot; // Does this method contain a CER root of its own?
+
+ // Allocate and initialize a MethodContext from the per-thread stacking allocator (we assume the checkpoint has already been
+ // taken).
+ static MethodContext* PerThreadAllocate(MethodDesc *pMD, SigTypeContext *pTypeContext)
+ {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+
+ MethodContext *pContext = new (&GetThread()->m_MarshalAlloc) MethodContext();
+ pContext->m_pMethodDesc = pMD;
+ pContext->m_sTypeContext = *pTypeContext;
+ pContext->m_fRoot = false;
+
+ return pContext;
+ }
+
+ // Determine if two MethodContexts are equivalent (same MethodDesc pointer and identical arrays of TypeHandles in the
+ // TypeContext).
+ bool Equals(MethodContext *pOther)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (pOther->m_pMethodDesc != m_pMethodDesc)
+ return false;
+
+ if (pOther->m_sTypeContext.m_classInst.GetNumArgs() != m_sTypeContext.m_classInst.GetNumArgs())
+ return false;
+
+ if (pOther->m_sTypeContext.m_methodInst.GetNumArgs() != m_sTypeContext.m_methodInst.GetNumArgs())
+ return false;
+
+ DWORD i;
+
+ for (i = 0; i < m_sTypeContext.m_classInst.GetNumArgs(); i++)
+ if (pOther->m_sTypeContext.m_classInst[i] != m_sTypeContext.m_classInst[i])
+ return false;
+
+ for (i = 0; i < m_sTypeContext.m_methodInst.GetNumArgs(); i++)
+ if (pOther->m_sTypeContext.m_methodInst[i] != m_sTypeContext.m_methodInst[i])
+ return false;
+
+ return true;
+ }
+
+#ifdef _DEBUG
+#define CER_DBG_MAX_OUT 4096
+ char *ToString()
+ {
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+
+ // Support up to two ToString calls before we re-use a buffer and overwrite previous output.
+ static char szOut1[CER_DBG_MAX_OUT];
+ static char szOut2[CER_DBG_MAX_OUT];
+ static char *pszOut = szOut1;
+
+ StackSString ssBuffer;
+ StackScratchBuffer ssScratch;
+
+ TypeString::AppendMethod(ssBuffer, m_pMethodDesc, m_sTypeContext.m_classInst, TypeString::FormatNamespace | TypeString::FormatAngleBrackets);
+ sprintf_s(&pszOut[0], CER_DBG_MAX_OUT, "%s", ssBuffer.GetUTF8(ssScratch));
+
+ char *pszReturn = pszOut;
+ pszOut = pszOut == szOut1 ? szOut2 : szOut1;
+ return pszReturn;
+ }
+#endif
+};
+
+// Maintains a stack of MethodContexts (implemented as a singly linked list with insert and remove operations only at the head).
+class MethodContextStack
+{
+ MethodContext *m_pFirst; // The head of the linked list
+ DWORD m_cElements; // Count of elements in the stack
+
+public:
+
+ // Initialize to an empty list.
+ MethodContextStack()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_pFirst = NULL;
+ m_cElements = 0;
+ }
+
+ // Push a MethodContext pointer on the head of the list.
+ void Push(MethodContext *pContext)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ pContext->m_pNext = m_pFirst;
+ m_pFirst = pContext;
+ m_cElements++;
+ }
+
+ // Remove and retrieve the most recently pushed MethodContext. Return NULL if no more entries exist.
+ MethodContext *Pop()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ MethodContext* pContext = m_pFirst;
+ if (pContext == NULL)
+ return NULL;
+
+ m_pFirst = pContext->m_pNext;
+ m_cElements--;
+
+ return pContext;
+ }
+
+ // Return true if an MethodContext equivalent to the argument exists in the stack.
+ bool IsInStack(MethodContext *pMatchContext)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ MethodContext* pContext = m_pFirst;
+ while (pContext) {
+ if (pContext->Equals(pMatchContext))
+ return true;
+ pContext = pContext->m_pNext;
+ }
+
+ return false;
+ }
+
+ // Get count of elements in the stack.
+ DWORD GetCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_cElements;
+ }
+};
+
+
+class MethodCallGraphPreparer
+{
+ MethodDesc *m_pRootMD;
+ SigTypeContext *m_pRootTypeContext;
+
+ COR_ILMETHOD_DECODER *m_pMethodDecoder;
+
+ MethodContextStack m_sLeftToProcess; // MethodContexts we have yet to look at in this call graph
+ MethodContextStack m_sAlreadySeen; // MethodContexts we've already processed at least once
+
+ EHClauseRange *m_pEHClauses; // Array of exception handling clauses in current method (only if !fEntireMethod)
+ DWORD m_cEHClauses; // Number of elements in above array
+ CerPrepInfo *m_pCerPrepInfo; // Context recording how much preparation this region has had
+ MethodContextStack m_sPersist; // MethodContexts we need to keep around past the 'prepare' phase of preparation
+#ifdef FEATURE_PREJIT
+ bool m_fNgen; // True when being called as part of an ngen
+ MethodContextStack m_sRootMethods; // Methods containing a sub-CER (excludes the real root)
+#endif
+ Thread *m_pThread; // Cached managed thread pointer (for allocations and the like)
+ bool m_fPartialPreparation; // True if we have unbound type vars at the CER root and can only prep one instantiation at a time
+
+ bool m_fEntireMethod; // True if are preparing for the entire method
+ bool m_fExactTypeContext;
+ bool m_fMethodHasCallsWithinExplicitCer; // True if method contains calls out from within an explicit PCER range
+
+ bool m_fIgnoreVirtualCERCallMDA; // True if VirtualCER MDA is not desirable to be fired
+
+ MethodCallGraphPreparer * m_pNext; // Links this instance on a per-thread stack used to detect
+ // and defeat recursive preparations
+
+ public:
+ MethodCallGraphPreparer(MethodDesc *pRootMD, SigTypeContext *pRootTypeContext, bool fEntireMethod, bool fExactTypeContext, bool fIgnoreVirtualCERCallMDA = false);
+
+ // Walk the call graph of the method given by m_pRootMD (and type context in m_pRootTypeContext which provides instantiation information
+ // for generic methods/classes).
+ //
+ // If fEntireMethod is true then the entire body of pRootMD is scanned for callsites, otherwise we assume that one or more CER
+ // exception handlers exist in the method and only the finally and catch blocks of such handlers are scanned for graph roots.
+ //
+ // Each method we come across in the call graph (excluding late bound invocation destinations precipitated by virtual or interface
+ // calls) is jitted and has any generic dictionary information we can determine at jit time prepopulated. This includes implicit
+ // cctor invocations. If this method is called at ngen time we will attach extra fixup information to the affected method to ensure
+ // that fixing up the root method of the graph will cause all methods in the graph to be fixed up at that point also.
+ //
+ // Some generic dictionary entries may not be prepopulated if unbound type variables exist at the root of the call tree. Such cases
+ // will be ignored (as for the virtual/interface dispatch case we assume the caller will use an out-of-band mechanism to pre-prepare
+ // these entries explicitly).
+ //
+ // Returns true if the m_pRootMD contains a CER that calls outside the method.
+ //
+ bool Run();
+
+ // Methods used to control re-entrancy on the same thread. Essentially we'd like to avoid all re-entrancy
+ // (since it can lead to unbounded recursion easily) but unfortunately jitting methods during the
+ // preparation phase can cause this both directly (if we spot a sub-root) or indirectly (where implicit
+ // jit execution of a cctor causes a possibly unrelated CER graph to be prepared). The algorithm we use to
+ // avoid this records a stack of preparations attempts on the current thread (implemented via a singly
+ // linked list of the MethodCallGraphPreparer instances). Re-entrant prepare requests become noops if
+ // they're for a root method we're already processing (anywhere in the stack) and run to completion
+ // otherwise. This prevents infinite recursion since it removes at least one method from the intersection
+ // of the CER call graphs on each iteration. Theoretically it might not be the most efficient solution
+ // since there might still be a lot of overlap between graphs, but in practice the number of sub-CER roots
+ // is likely to be small and we won't recurse very far. This will still allow a re-entrant preparation
+ // that is the result of running a cctor to potentially early-out (and thus allow code to run before its
+ // CERs have been fully prepped). But this should only happen when a CER causes (directly or indirectly) a
+ // cctor to run that depends on that CER having been prepared already, which we really can't do much
+ // about.
+ //
+ BOOL CanPreparationProceed(MethodDesc * pMD, SigTypeContext * pTypeContext);
+
+ static void BeginPrepareCerForHolder(MethodCallGraphPreparer *pPrepState);
+ static void EndPrepareCerForHolder(MethodCallGraphPreparer *pPrepState);
+
+ typedef Holder<MethodCallGraphPreparer*, BeginPrepareCerForHolder, EndPrepareCerForHolder> ThreadPreparingCerHolder;
+
+ private:
+ void GetEHClauses();
+ void MarkEHClauseActivatedByCERCall(MethodContext *pContext, BYTE *pbIL, DWORD cbIL);
+ bool CheckIfCallsiteWithinCER(DWORD dwOffset);
+ bool ShouldGatherExplicitCERCallInfo();
+ void LookForInterestingCallsites(MethodContext *pContext);
+ void PrepareMethods();
+ bool RecordResults();
+};
+
+// Determines whether the given method contains a CER root that can be pre-prepared (i.e. prepared at jit time).
+bool ContainsPrePreparableCerRoot(MethodDesc *pMD);
+
+// Prepares the critical finalizer call graph for the given object type (which must derive from CriticalFinalizerObject). This
+// involves preparing at least the finalizer method and possibly some others (for SafeHandle and CriticalHandle derivations). If a
+// module pointer is supplied then only the critical methods introduced in that module are prepared (this is used at ngen time to
+// ensure that we're only generating ngen preparation info for the targetted module).
+void PrepareCriticalFinalizerObject(MethodTable *pMT, Module *pModule = NULL);
+
+void PrepareMethodDesc(MethodDesc* pMD, Instantiation classInst = Instantiation(), Instantiation methodInst = Instantiation(), BOOL onlyContractedMethod = FALSE, BOOL fIgnoreVirtualCERCallMDA = FALSE);
+// Determine whether the method given as a parameter is the root of a CER.
+// @todo: Need an x86 offset as well and logic to determine whether we're actually in a root-CER portion of the method (if the whole
+// thing isn't the root).
+bool IsCerRootMethod(MethodDesc *pMD);
+
+// Fill the cache of overflowed generic dictionary entries that the jit maintains with all the overflow slots stored so far in the
+// dictionary layout.
+void PrepopulateGenericHandleCache(DictionaryLayout *pDictionaryLayout,
+ MethodDesc *pMD,
+ MethodTable *pMT);
+
+DWORD GetReliabilityContract(IMDInternalImport *pImport, mdToken tkParent);
+
+#ifdef FEATURE_PREJIT
+
+// Prepare the class if it is derived from CriticalFinalizerObject. This is used at ngen time since such classes are normally
+// prepared at runtime (at instantiation) and would therefore miss the ngen image.
+void PrepareCriticalType(MethodTable *pMT);
+
+// Prepare a method and its statically determinable call graph if a hint attribute has been applied. This is only called at ngen
+// time to save additional preparation information into the ngen image that wouldn't normally be there (and thus lower runtime
+// overheads).
+void PrePrepareMethodIfNecessary(CORINFO_METHOD_HANDLE hMethod);
+
+#endif
+
+
+// A fixed sized hash table keyed by pointers and storing two bits worth of value for every entry. The value is stored in the low
+// order bits of the keys, so the pointers must be at least DWORD aligned. No hash table expansion occurs so new entries will sooner
+// or later overwrite old. The implementation uses no locks (all accesses are single aligned pointer sized operations and therefore
+// inherently atomic).
+// The purpose of this table is to store a smallish number of reliability contract levels for the most recently queried methods,
+// mainly for the purpose of speeding up thread abort processing (where we will walk the stack probing methods for contracts,
+// sometimes repeatedly). So we use a small fixed sized hash to speed up lookups on average but avoid impacting working set.
+#define PHC_BUCKETS 29
+#define PHC_CHAIN 5
+#define PHC_DATA_MASK 3
+
+class PtrHashCache
+{
+public:
+ PtrHashCache();
+ bool Lookup(void *pKey, DWORD *pdwValue);
+ void Add(void *pKey, DWORD dwValue);
+
+#ifdef _DEBUG
+ void DbgDumpStats();
+#endif
+
+private:
+ DWORD GetHash(void *pKey);
+
+ UINT_PTR m_rEntries[PHC_BUCKETS * PHC_CHAIN];
+
+#ifdef _DEBUG
+ DWORD m_dwHits;
+ DWORD m_dwMisses;
+#endif
+};
+
+#endif // CLR_STANDALONE_BINDER
+
+#endif
diff --git a/src/vm/context.h b/src/vm/context.h
new file mode 100644
index 0000000000..05247df405
--- /dev/null
+++ b/src/vm/context.h
@@ -0,0 +1,231 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#ifndef _H_CONTEXT_
+#define _H_CONTEXT_
+
+#include "specialstatics.h"
+#include "fcall.h"
+
+#ifdef FEATURE_COMINTEROP
+class RCWCache;
+#endif // FEATURE_COMINTEROP
+
+typedef DPTR(class Context) PTR_Context;
+
+#ifdef FEATURE_REMOTING
+
+class Context
+{
+public:
+ enum CallbackType
+ {
+ Wait_callback = 0,
+ MonitorWait_callback = 1,
+ ADTransition_callback = 2,
+ SignalAndWait_callback = 3
+ };
+
+ typedef struct
+ {
+ int numWaiters;
+ HANDLE* waitHandles;
+ BOOL waitAll;
+ DWORD millis;
+ BOOL alertable;
+ DWORD* pResult;
+ } WaitArgs;
+
+ typedef struct
+ {
+ HANDLE* waitHandles;
+ DWORD millis;
+ BOOL alertable;
+ DWORD* pResult;
+ } SignalAndWaitArgs;
+
+ typedef struct
+ {
+ INT32 millis;
+ PendingSync* syncState;
+ BOOL* pResult;
+ } MonitorWaitArgs;
+
+
+ typedef struct
+ {
+ enum CallbackType callbackId;
+ void* callbackData;
+ } CallBackInfo;
+
+ typedef void (*ADCallBackFcnType)(LPVOID);
+
+ struct ADCallBackArgs
+ {
+ ADCallBackFcnType pTarget;
+ LPVOID pArguments;
+ };
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+friend class Thread;
+friend class ThreadNative;
+friend class ContextBaseObject;
+friend class CRemotingServices;
+friend struct PendingSync;
+
+ Context(AppDomain *pDomain);
+ ~Context();
+ static void Initialize();
+ PTR_AppDomain GetDomain()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pDomain;
+ }
+
+ // Get and Set the exposed System.Runtime.Remoting.Context
+ // object which corresponds to this context.
+ OBJECTREF GetExposedObject();
+ OBJECTREF GetExposedObjectRaw();
+ PTR_Object GetExposedObjectRawUnchecked();
+ PTR_PTR_Object GetExposedObjectRawUncheckedPtr();
+ void SetExposedObject(OBJECTREF exposed);
+
+ // Query whether the exposed object exists
+ BOOL IsExposedObjectSet();
+
+ static LPVOID GetStaticFieldAddress(FieldDesc *pFD);
+
+ PTR_VOID GetStaticFieldAddrNoCreate(FieldDesc *pFD);
+
+ static Context* CreateNewContext(AppDomain *pDomain);
+
+ static void FreeContext(Context* victim)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(victim));
+ }
+ CONTRACTL_END;
+
+ delete victim;
+ }
+
+ static Context* GetExecutionContext(OBJECTREF pObj);
+ static void RequestCallBack(ADID appDomain, Context* targetCtxID, void* privateData);
+
+ // <TODO>Made public to get around the context GC issue </TODO>
+ static BOOL ValidateContext(Context *pCtx);
+
+ inline STATIC_DATA *GetSharedStaticData()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pSharedStaticData;
+ }
+
+ inline void SetSharedStaticData(STATIC_DATA *pData)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pSharedStaticData = PTR_STATIC_DATA(pData);
+ }
+
+ inline STATIC_DATA *GetUnsharedStaticData()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pUnsharedStaticData;
+ }
+
+ inline void SetUnsharedStaticData(STATIC_DATA *pData)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pUnsharedStaticData = PTR_STATIC_DATA(pData);
+ }
+
+ // Functions called from BCL on a managed context object
+ static FCDECL2(void, SetupInternalContext, ContextBaseObject* pThisUNSAFE, CLR_BOOL bDefault);
+ static FCDECL1(void, CleanupInternalContext, ContextBaseObject* pThisUNSAFE);
+ static FCDECL1(void, ExecuteCallBack, LPVOID privateData);
+
+private:
+ // Static helper functions:
+
+ static void ExecuteWaitCallback(WaitArgs* waitArgs);
+ static void ExecuteMonitorWaitCallback(MonitorWaitArgs* waitArgs);
+ static void ExecuteSignalAndWaitCallback(SignalAndWaitArgs* signalAndWaitArgs);
+ void GetStaticFieldAddressSpecial(FieldDesc *pFD, MethodTable *pMT, int *pSlot, LPVOID *ppvAddress);
+ PTR_VOID CalculateAddressForManagedStatic(int slot);
+
+ // Static Data Members:
+
+ static CrstStatic s_ContextCrst;
+
+
+ // Non-static Data Members:
+ // Pointer to native context static data
+ PTR_STATIC_DATA m_pUnsharedStaticData;
+
+ // Pointer to native context static data
+ PTR_STATIC_DATA m_pSharedStaticData;
+
+ typedef SimpleList<OBJECTHANDLE> ObjectHandleList;
+
+ ObjectHandleList m_PinnedContextStatics;
+
+ // <TODO> CTS. Domains should really be policies on a context and not
+ // entry in the context object. When AppDomains become an attribute of
+ // a context then add the policy.</TODO>
+ PTR_AppDomain m_pDomain;
+
+ OBJECTHANDLE m_ExposedObjectHandle;
+
+ DWORD m_Signature;
+ // NOTE: please maintain the signature as the last member field!!!
+};
+
+FCDECL0(LPVOID, GetPrivateContextsPerfCountersEx);
+
+#else // FEATURE_REMOTING
+
+// if FEATURE_REMOTING is not defined there will be only the default context for each appdomain
+// and contexts will not be exposed to users (so there will be no managed Context class)
+
+class Context
+{
+ PTR_AppDomain m_pDomain;
+
+public:
+#ifndef DACCESS_COMPILE
+ Context(AppDomain *pDomain)
+ {
+ m_pDomain = pDomain;
+ }
+#endif
+
+ PTR_AppDomain GetDomain()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pDomain;
+ }
+
+ static void Initialize()
+ {
+ }
+
+ typedef void (*ADCallBackFcnType)(LPVOID);
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+};
+
+#endif // FEATURE_REMOTING
+
+#endif
diff --git a/src/vm/contexts.cpp b/src/vm/contexts.cpp
new file mode 100644
index 0000000000..038c5e5a45
--- /dev/null
+++ b/src/vm/contexts.cpp
@@ -0,0 +1,940 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// Contexts.CPP
+//
+
+//
+// Implementation for class Context
+//
+
+
+#include "common.h"
+
+#ifdef FEATURE_REMOTING
+
+#include "context.h"
+#include "excep.h"
+#include "field.h"
+#include "remoting.h"
+#include "perfcounters.h"
+#include "specialstatics.h"
+#include "appdomain.inl"
+
+#ifdef FEATURE_COMINTEROP
+#include "runtimecallablewrapper.h"
+#endif // FEATURE_COMINTEROP
+
+#ifndef DACCESS_COMPILE
+
+#define CONTEXT_SIGNATURE (0x2b585443) // CTX+
+#define CONTEXT_DESTROYED (0x2d585443) // CTX-
+
+// Lock for safe operations
+CrstStatic Context::s_ContextCrst;
+
+
+Context::Context(AppDomain *pDomain)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pDomain));
+ }
+ CONTRACTL_END;
+
+ m_pDomain = pDomain;
+ m_Signature = CONTEXT_SIGNATURE;
+
+ // This needs to be a LongWeakHandle since we want to be able
+ // to run finalizers on Proxies while the Context itself
+ // unreachable. When running the finalizer we will have to
+ // transition into the context like a regular remote call.
+ // If this is a short weak handle, it ceases being updated
+ // as soon as the context is unreachable. By making it a strong
+ // handle, it is updated till the context::finalize is run.
+
+ m_ExposedObjectHandle = pDomain->CreateLongWeakHandle(NULL);
+
+ // Set the pointers to the static data storage
+ m_pUnsharedStaticData = NULL;
+ m_pSharedStaticData = NULL;
+
+ COUNTER_ONLY(GetPerfCounters().m_Context.cContexts++);
+}
+
+Context::~Context()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ BOOL fADUnloaded = m_pDomain->NoAccessToHandleTable();
+ if (!fADUnloaded)
+ {
+ DestroyLongWeakHandle(m_ExposedObjectHandle);
+ }
+
+ m_pDomain = NULL;
+ m_Signature = CONTEXT_DESTROYED;
+
+ // Cleanup the static data storage
+ if(m_pUnsharedStaticData)
+ {
+ for(WORD i = 0; i < m_pUnsharedStaticData->cElem; i++)
+ {
+ delete [] (BYTE *) (m_pUnsharedStaticData->dataPtr[i]);
+ }
+ delete [] m_pUnsharedStaticData;
+ m_pUnsharedStaticData = NULL;
+ }
+
+ if(m_pSharedStaticData)
+ {
+ for(WORD i = 0; i < m_pSharedStaticData->cElem; i++)
+ {
+ delete [] (BYTE *) (m_pSharedStaticData->dataPtr[i]);
+ }
+ delete [] m_pSharedStaticData;
+ m_pSharedStaticData = NULL;
+ }
+
+ // Destroy pinning handles associated with this context
+ ObjectHandleList::NodeType* pHandleNode;
+ while ((pHandleNode = m_PinnedContextStatics.UnlinkHead() ) != NULL)
+ {
+ if (!fADUnloaded)
+ {
+ DestroyPinningHandle(pHandleNode->data);
+ }
+ delete pHandleNode;
+ }
+
+ COUNTER_ONLY(GetPerfCounters().m_Context.cContexts--);
+}
+
+Context* Context::CreateNewContext(AppDomain *pDomain)
+{
+ CONTRACT (Context*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pDomain));
+ }
+ CONTRACT_END;
+
+ Context *p = new Context(pDomain);
+ RETURN p;
+}
+
+void Context::Initialize()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Initialize the context critical section
+ s_ContextCrst.Init(CrstContexts, (CrstFlags)(CRST_REENTRANCY|CRST_HOST_BREAKABLE));
+}
+
+BOOL Context::ValidateContext(Context *pCtx)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pCtx));
+ }
+ CONTRACTL_END;
+
+ BOOL bRet = FALSE;
+
+ EX_TRY
+ {
+ if (pCtx->m_Signature == CONTEXT_SIGNATURE)
+ bRet = TRUE;
+ }
+ EX_CATCH
+ {
+ // Swallow exceptions - if not a valid ctx, just return false.
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ return bRet;
+}
+
+// if the object we are creating is a proxy to another appdomain, want to create the wrapper for the
+// new object in the appdomain of the proxy target
+Context* Context::GetExecutionContext(OBJECTREF pObj)
+{
+ CONTRACT (Context*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(pObj != NULL);
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ Context *pContext = NULL;
+ if (pObj->IsTransparentProxy())
+ pContext = CRemotingServices::GetServerContextForProxy(pObj);
+ if (pContext == NULL)
+ pContext = GetAppDomain()->GetDefaultContext();
+
+ RETURN pContext;
+}
+
+OBJECTREF Context::GetExposedObject()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (ObjectFromHandle(m_ExposedObjectHandle) == NULL)
+ {
+ // This call should fault in the managed context for the thread
+ MethodDescCallSite getCurrentContext(METHOD__THREAD__GET_CURRENT_CONTEXT);
+ CONTEXTBASEREF ctx = (CONTEXTBASEREF) getCurrentContext.Call_RetOBJECTREF((ARG_SLOT*)NULL);
+
+ GCPROTECT_BEGIN(ctx);
+ {
+ // Take a lock to make sure that only one thread creates the object.
+ // This locking may be too severe!
+ CrstHolder ch(&s_ContextCrst);
+
+ // Check to see if another thread has not already created the exposed object.
+ if (ObjectFromHandle(m_ExposedObjectHandle) == NULL)
+ {
+ // Keep a weak reference to the exposed object.
+ StoreObjectInHandle(m_ExposedObjectHandle, (OBJECTREF) ctx);
+
+ ctx->SetInternalContext(this);
+ }
+ }
+ GCPROTECT_END();
+
+ }
+ return ObjectFromHandle(m_ExposedObjectHandle);
+}
+
+void Context::SetExposedObject(OBJECTREF exposed)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(exposed != NULL);
+ PRECONDITION(ObjectFromHandle(m_ExposedObjectHandle) == NULL);
+ }
+ CONTRACTL_END;
+
+ StoreObjectInHandle(m_ExposedObjectHandle, exposed);
+}
+
+// This is called by EE to transition into a context(possibly in
+// another appdomain) and execute the method Context::ExecuteCallBack
+// with the private data provided to this method
+void Context::RequestCallBack(ADID appDomainID, Context* targetCtxID, void* privateData)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(targetCtxID));
+ PRECONDITION(CheckPointer(privateData));
+ PRECONDITION(ValidateContext((Context*)targetCtxID));
+ }
+ CONTRACTL_END;
+ // a warning: don't touch targetCtxID until you verified appDomainID,
+ // unless the latter is CURRENT_APPDOMAIN_ID
+
+ // Get the current context of the thread. This is assumed as
+ // the context where the request originated
+ Context *pCurrCtx;
+ pCurrCtx = GetCurrentContext();
+
+ // Check that the target context is not the same (presumably the caller has checked for it).
+ _ASSERTE(pCurrCtx != targetCtxID);
+
+ // Check if we might be going to a context in another appDomain.
+ ADID targetDomainID;
+
+ if (appDomainID == CURRENT_APPDOMAIN_ID)
+ {
+ targetDomainID = (ADID)0;
+ _ASSERTE(targetCtxID->GetDomain()==::GetAppDomain());
+ }
+ else
+ {
+ targetDomainID=appDomainID;
+#ifdef _DEBUG
+ AppDomainFromIDHolder ad(appDomainID, FALSE);
+ if (!ad.IsUnloaded())
+ _ASSERTE(targetCtxID->GetDomain()->GetId()==appDomainID);
+#endif
+ }
+
+ // we need to be co-operative mode for jitting
+ GCX_COOP();
+
+ MethodDescCallSite callback(METHOD__CONTEXT__CALLBACK);
+
+ ARG_SLOT args[3];
+ args[0] = PtrToArgSlot(targetCtxID);
+ args[1] = PtrToArgSlot(privateData);
+ args[2] = (ARG_SLOT) (size_t)targetDomainID.m_dwId;
+
+ callback.Call(args);
+}
+
+/*** Definitions of callback executions for the various callbacks that are known to EE ***/
+
+// Callback for waits on waithandle
+void Context::ExecuteWaitCallback(WaitArgs* waitArgs)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(waitArgs));
+ }
+ CONTRACTL_END;
+
+ Thread* pCurThread = GetThread();
+ _ASSERTE(pCurThread != NULL);
+
+ // DoAppropriateWait switches to preemptive GC before entering the wait
+ *(waitArgs->pResult) = pCurThread->DoAppropriateWait( waitArgs->numWaiters,
+ waitArgs->waitHandles,
+ waitArgs->waitAll,
+ waitArgs->millis,
+ waitArgs->alertable?WaitMode_Alertable:WaitMode_None);
+}
+
+// Callback for monitor wait on objects
+void Context::ExecuteMonitorWaitCallback(MonitorWaitArgs* waitArgs)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(waitArgs));
+ }
+ CONTRACTL_END;
+
+ Thread* pCurThread = GetThread();
+ _ASSERTE(pCurThread != NULL);
+
+ GCX_PREEMP();
+
+ *(waitArgs->pResult) = pCurThread->Block(waitArgs->millis,
+ waitArgs->syncState);
+}
+
+// Callback for signalandwait on waithandles
+void Context::ExecuteSignalAndWaitCallback(SignalAndWaitArgs* signalAndWaitArgs)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(signalAndWaitArgs));
+ }
+ CONTRACTL_END;
+
+ Thread* pCurThread = GetThread();
+ _ASSERTE(pCurThread != NULL);
+
+ // DoAppropriateWait switches to preemptive GC before entering the wait
+ *(signalAndWaitArgs->pResult) = pCurThread->DoSignalAndWait( signalAndWaitArgs->waitHandles,
+ signalAndWaitArgs->millis,
+ signalAndWaitArgs->alertable);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: Context::GetStaticFieldAddress private
+//
+// Synopsis: Get the address of the field relative to the current context.
+// If an address has not been assigned yet then create one.
+//
+
+//
+//+----------------------------------------------------------------------------
+LPVOID Context::GetStaticFieldAddress(FieldDesc *pFD)
+{
+ CONTRACT (LPVOID)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pFD));
+ PRECONDITION(!s_ContextCrst.OwnedByCurrentThread());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ LPVOID pvAddress = NULL;
+ Context* pCtx = NULL;
+ // for static field the MethodTable is exact even for generic classes
+ MethodTable* pMT = pFD->GetEnclosingMethodTable();
+ BOOL fIsShared = pMT->IsDomainNeutral();
+ DWORD dwClassOffset = pMT->GetContextStaticsOffset();
+ DWORD currElem = 0;
+ STATIC_DATA* pData;
+
+ // NOTE: if you change this method, you must also change
+ // GetStaticFieldAddrNoCreate below.
+
+ if (dwClassOffset == (DWORD)-1)
+ {
+ dwClassOffset = pMT->AllocateContextStaticsOffset();
+ }
+
+ // Retrieve the current context
+ pCtx = GetCurrentContext();
+ _ASSERTE(NULL != pCtx);
+
+ // Acquire the context lock before accessing the static data pointer
+ {
+ CrstHolder ch(&s_ContextCrst);
+
+ if(!fIsShared)
+ pData = pCtx->m_pUnsharedStaticData;
+ else
+ pData = pCtx->m_pSharedStaticData;
+
+ if(NULL != pData)
+ currElem = pData->cElem;
+
+ // Check whether we have allocated space for storing a pointer to
+ // this class' context static store
+ if(dwClassOffset >= currElem)
+ {
+ // Allocate space for storing pointers
+ DWORD dwNewElem = (currElem == 0 ? 4 : currElem*2);
+
+ // Ensure that we grow to a size larger than the index we intend to use
+ while (dwNewElem <= dwClassOffset)
+ dwNewElem = 2*dwNewElem;
+
+ STATIC_DATA *pNew = (STATIC_DATA *)new BYTE[sizeof(STATIC_DATA) + dwNewElem*sizeof(LPVOID)];
+
+ // Set the new count.
+ pNew->cElem = dwNewElem;
+
+ if(NULL != pData)
+ {
+ // Copy the old data into the new data
+ memcpy(&pNew->dataPtr[0], &pData->dataPtr[0], currElem*sizeof(LPVOID));
+
+ // Delete the old data
+ delete [] (BYTE*) pData;
+ }
+
+ // Zero init any new elements.
+ ZeroMemory(&pNew->dataPtr[currElem], (dwNewElem - currElem)* sizeof(LPVOID));
+
+ // Update the locals
+ pData = pNew;
+
+ // Reset the pointers in the context object to point to the
+ // new memory
+ if(!fIsShared)
+ pCtx->m_pUnsharedStaticData = pData;
+ else
+ pCtx->m_pSharedStaticData = pData;
+ }
+
+ _ASSERTE(NULL != pData);
+
+ // Check whether we have to allocate space for
+ // the context local statics of this class
+ if(NULL == pData->dataPtr[dwClassOffset])
+ {
+ DWORD dwSize = pMT->GetContextStaticsSize();
+
+ // Allocate memory for context static fields
+ LPBYTE pFields = new BYTE[dwSize];
+
+ // Initialize the memory allocated for the fields
+ ZeroMemory(pFields, dwSize);
+
+ pData->dataPtr[dwClassOffset] = pFields;
+ }
+
+ _ASSERTE(NULL != pData->dataPtr[dwClassOffset]);
+
+ pvAddress = (LPVOID)((LPBYTE)pData->dataPtr[dwClassOffset] + pFD->GetOffset());
+
+ // For object and value class fields we have to allocate storage in the
+ // __StaticContainer class in the managed heap
+ if(pFD->IsObjRef() || pFD->IsByValue())
+ {
+ // in this case *pvAddress == bucket|index
+ int *pSlot = (int*)pvAddress;
+ pvAddress = NULL;
+ pCtx->GetStaticFieldAddressSpecial(pFD, pMT, pSlot, &pvAddress);
+
+ if (pFD->IsByValue())
+ {
+ _ASSERTE(pvAddress != NULL);
+ pvAddress = (*((OBJECTREF*)pvAddress))->GetData();
+ }
+ // ************************************************
+ // ************** WARNING *************************
+ // Do not provoke GC from here to the point JIT gets
+ // pvAddress back
+ // ************************************************
+ _ASSERTE(*pSlot > 0);
+ }
+ }
+
+ RETURN pvAddress;
+}
+
+
+//+----------------------------------------------------------------------------
+//
+// Method: Context::GetStaticFieldAddressSpecial private
+//
+// Synopsis: Allocate an entry in the __StaticContainer class in the
+// managed heap for static objects and value classes
+//
+
+//
+//+----------------------------------------------------------------------------
+
+// NOTE: At one point we used to allocate these in the long lived handle table
+// which is per-appdomain. However, that causes them to get rooted and not
+// cleaned up until the appdomain gets unloaded. This is not very desirable
+// since a context static object may hold a reference to the context itself or
+// to a proxy in the context causing a whole lot of garbage to float around.
+// Now (2/13/01) these are allocated from a managed structure rooted in each
+// managed context.
+
+void Context::GetStaticFieldAddressSpecial(FieldDesc *pFD, MethodTable *pMT, int *pSlot, LPVOID *ppvAddress)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pFD));
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(CheckPointer(pSlot));
+ PRECONDITION(CheckPointer(ppvAddress));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF *pObjRef = NULL;
+ BOOL bNewSlot = (*pSlot == 0);
+
+ if (bNewSlot)
+ {
+ // ! this line will trigger a GC, don't move it down
+ // ! without protecting the args[] and other OBJECTREFS
+ OBJECTREF orThis = GetExposedObject();;
+ GCPROTECT_BEGIN(orThis);
+
+ MethodDescCallSite reserveSlot(METHOD__CONTEXT__RESERVE_SLOT, &orThis);
+
+ // We need to assign a location for this static field.
+ // Call the managed helper
+ ARG_SLOT args[1] =
+ {
+ ObjToArgSlot(orThis)
+ };
+
+ // The managed ReserveSlot methods counts on this!
+ _ASSERTE(s_ContextCrst.OwnedByCurrentThread());
+ _ASSERTE(args[0] != 0);
+
+ *pSlot = reserveSlot.Call_RetI4(args);
+
+ _ASSERTE(*pSlot>0);
+
+ GCPROTECT_END();
+
+
+ // to a boxed version of the value class.This allows the standard GC
+ // algorithm to take care of internal pointers in the value class.
+ if (pFD->IsByValue())
+ {
+ // Extract the type of the field
+ TypeHandle th = pFD->GetFieldTypeHandleThrowing();
+
+ OBJECTHANDLE oh;
+ OBJECTREF obj = MethodTable::AllocateStaticBox(th.GetMethodTable(), pMT->HasFixedAddressVTStatics(), &oh);
+ pObjRef = (OBJECTREF*)CalculateAddressForManagedStatic(*pSlot);
+
+ if (oh != NULL)
+ {
+ ObjectHandleList::NodeType* pNewNode = new ObjectHandleList::NodeType(oh);
+ m_PinnedContextStatics.LinkHead(pNewNode);
+ }
+
+ SetObjectReference( pObjRef, obj, GetAppDomain() );
+ }
+ else
+ {
+ pObjRef = (OBJECTREF*)CalculateAddressForManagedStatic(*pSlot);
+ }
+ }
+ else
+ {
+ // If the field already has a location assigned we go through here
+ pObjRef = (OBJECTREF*)CalculateAddressForManagedStatic(*pSlot);
+ }
+
+ *(ULONG_PTR *)ppvAddress = (ULONG_PTR)pObjRef;
+}
+
+// This is called by the managed context constructor
+FCIMPL2(void, Context::SetupInternalContext, ContextBaseObject* pThisUNSAFE, CLR_BOOL bDefault)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(pThisUNSAFE != NULL);
+ PRECONDITION(pThisUNSAFE->m_internalContext == NULL);
+ }
+ CONTRACTL_END;
+
+ CONTEXTBASEREF pThis = (CONTEXTBASEREF) pThisUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_1(pThis);
+
+ Context *pCtx;
+
+ if (bDefault)
+ {
+ // We have to hook this up with the internal default
+ // context for the current appDomain
+ pCtx = GetThread()->GetDomain()->GetDefaultContext();
+ }
+ else
+ {
+ // Create the unmanaged backing context object
+ pCtx = Context::CreateNewContext(GetThread()->GetDomain());
+ }
+
+ // Set the managed & unmanaged objects to point at each other.
+ pThis->SetInternalContext(pCtx);
+ pCtx->SetExposedObject((OBJECTREF)pThis);
+
+ // Set the AppDomain field in the Managed context object
+ pThis->SetExposedDomain(GetThread()->GetDomain()->GetExposedObject());
+
+ if(bDefault)
+ ((APPDOMAINREF)GetThread()->GetDomain()->GetExposedObject())->SetDefaultContext(pThis);
+
+ COUNTER_ONLY(GetPerfCounters().m_Context.cContexts++);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+// This is called by the managed context finalizer
+FCIMPL1(void, Context::CleanupInternalContext, ContextBaseObject* pThisUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(pThisUNSAFE != NULL);
+ }
+ CONTRACTL_END;
+
+ CONTEXTBASEREF pThis = (CONTEXTBASEREF) pThisUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_1(pThis);
+
+ CONTEXTBASEREF refCtx = pThis;
+
+ Context *pCtx = refCtx->m_internalContext;
+ _ASSERTE(pCtx != NULL);
+
+ if (ValidateContext(pCtx))
+ {
+ LOG((LF_APPDOMAIN, LL_INFO1000, "Context::CleanupInternalContext: %8.8x, %8.8x\n", OBJECTREFToObject(refCtx), pCtx));
+ Context::FreeContext(pCtx);
+ }
+
+ COUNTER_ONLY(GetPerfCounters().m_Context.cContexts--);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+
+// This is where a call back request made by EE in Context::RequestCallBack
+// actually gets "executed".
+// At this point we have done a real context transition from the threads
+// context when RequestCallBack was called to the destination context.
+FCIMPL1(void, Context::ExecuteCallBack, LPVOID privateData)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(privateData));
+ }
+ CONTRACTL_END;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ switch (((CallBackInfo*) privateData)->callbackId)
+ {
+ case Wait_callback:
+ {
+ WaitArgs* waitArgs;
+ waitArgs = (WaitArgs*) ((CallBackInfo*) privateData)->callbackData;
+ ExecuteWaitCallback(waitArgs);
+ break;
+ }
+
+ case MonitorWait_callback:
+ {
+ MonitorWaitArgs* waitArgs;
+ waitArgs = (MonitorWaitArgs*) ((CallBackInfo*) privateData)->callbackData;
+ ExecuteMonitorWaitCallback(waitArgs);
+ break;
+ }
+
+ case ADTransition_callback:
+ {
+ ADCallBackArgs* pCallArgs = (ADCallBackArgs*)(((CallBackInfo*) privateData)->callbackData);
+ pCallArgs->pTarget(pCallArgs->pArguments);
+ break;
+ }
+
+ case SignalAndWait_callback:
+ {
+ SignalAndWaitArgs* signalAndWaitArgs;
+ signalAndWaitArgs = (SignalAndWaitArgs*)((CallBackInfo*)privateData)->callbackData;
+ ExecuteSignalAndWaitCallback(signalAndWaitArgs);
+ break;
+ }
+ // Add other callback types here
+ default:
+ _ASSERTE(!"Invalid callback type");
+ break;
+ }
+
+ // This is EE's entry point to do whatever it wanted to do in
+ // the targetContext. This will return back into the managed
+ // world and transition back into the original context.
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+
+
+#ifdef ENABLE_PERF_COUNTERS
+
+FCIMPL0(LPVOID, GetPrivateContextsPerfCountersEx)
+{
+ FCALL_CONTRACT;
+
+ return (LPVOID)GetPrivateContextsPerfCounters();
+}
+FCIMPLEND
+
+
+#else
+FCIMPL0(LPVOID, GetPrivateContextsPerfCountersEx)
+{
+ FCALL_CONTRACT;
+
+ return NULL;
+}
+FCIMPLEND
+
+#endif // ENABLE_PERF_COUNTERS
+
+#endif // DACCESS_COMPILE
+
+// This will NOT create the exposed object if there isn't one!
+OBJECTREF Context::GetExposedObjectRaw()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return ObjectFromHandle(m_ExposedObjectHandle);
+}
+
+
+PTR_Object Context::GetExposedObjectRawUnchecked()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return *PTR_PTR_Object(m_ExposedObjectHandle);
+}
+
+PTR_PTR_Object Context::GetExposedObjectRawUncheckedPtr()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return PTR_PTR_Object(m_ExposedObjectHandle);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: Context::GetStaticFieldAddrNoCreate private
+//
+// Synopsis: Get the address of the field relative to the context given a thread.
+// If an address has not been assigned, return NULL.
+// No creating is allowed.
+//
+
+//
+//+----------------------------------------------------------------------------
+PTR_VOID Context::GetStaticFieldAddrNoCreate(FieldDesc *pFD)
+{
+ CONTRACT (PTR_VOID)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pFD));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ PTR_VOID pvAddress = NULL;
+ // for static field the MethodTable is exact even for generic classes
+ MethodTable* pMT = pFD->GetEnclosingMethodTable();
+ BOOL fIsShared = pMT->IsDomainNeutral();
+ DWORD dwClassOffset = pMT->GetContextStaticsOffset();
+ DWORD currElem = 0;
+ STATIC_DATA* pData;
+
+ if (dwClassOffset == (DWORD)-1)
+ RETURN NULL;
+
+ if(!fIsShared)
+ pData = m_pUnsharedStaticData;
+ else
+ pData = m_pSharedStaticData;
+
+ if (NULL == pData)
+ RETURN NULL;
+
+ currElem = pData->cElem;
+
+ // Check whether we have allocated space for storing a pointer to
+ // this class' context static store
+ if(dwClassOffset >= currElem || pData->dataPtr[dwClassOffset] == NULL)
+ RETURN NULL;
+
+ _ASSERTE(pData->dataPtr[dwClassOffset] != NULL);
+
+ // We have allocated static storage for this data
+ // Just return the address by getting the offset into the data
+ pvAddress = PTR_VOID(dac_cast<PTR_BYTE>(pData->dataPtr[dwClassOffset]) + pFD->GetOffset());
+
+ if(pFD->IsObjRef() || pFD->IsByValue())
+ {
+ if (*dac_cast<PTR_BYTE>(pvAddress) == NULL)
+ {
+ pvAddress = NULL;
+ LOG((LF_SYNC, LL_ALWAYS, "dbgr: pvAddress = NULL"));
+ }
+ else
+ {
+ pvAddress = CalculateAddressForManagedStatic(*(PTR_int(pvAddress)));
+ LOG((LF_SYNC, LL_ALWAYS, "dbgr: pvAddress = %lx", pvAddress));
+ if (pFD->IsByValue())
+ {
+ _ASSERTE(pvAddress != NULL);
+ pvAddress = (*(PTR_OBJECTREF(pvAddress)))->GetData();
+ }
+ }
+ }
+
+ RETURN pvAddress;
+}
+
+
+// This is used for context relative statics that are object refs
+// These are stored in a structure in the managed context. The first
+// time over an index and a bucket are determined and subsequently
+// remembered in the location for the field in the per-context-per-class
+// data structure.
+// Here we map back from the index to the address of the object ref.
+PTR_VOID Context::CalculateAddressForManagedStatic(int slot)
+{
+ CONTRACT (PTR_VOID)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(this));
+ POSTCONDITION(CheckPointer(RETVAL));
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ PTR_OBJECTREF pObjRef;
+ int bucket = (slot>>16);
+ int index = (0x0000ffff&slot);
+
+ // Now determine the address of the static field
+ PTRARRAYREF bucketRef = NULL;
+
+ bucketRef = ((CONTEXTBASEREF)GetExposedObjectRaw())->GetContextStaticsHolder();
+
+ // walk the chain to our bucket
+ while (bucket--)
+ bucketRef = (PTRARRAYREF) bucketRef->GetAt(0);
+
+ // Index 0 is used to point to the next bucket!
+ _ASSERTE(index > 0);
+ pObjRef = PTR_OBJECTREF(bucketRef->GetDataPtr())+index;
+
+ RETURN (PTR_VOID(pObjRef));
+}
+
+#endif // FEATURE_REMOTING
+
+#ifdef DACCESS_COMPILE
+
+void
+Context::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ DAC_ENUM_DTHIS();
+
+ if (m_pDomain.IsValid())
+ {
+ m_pDomain->EnumMemoryRegions(flags, true);
+ }
+}
+#endif // #ifdef DACCESS_COMPILE
diff --git a/src/vm/contractimpl.cpp b/src/vm/contractimpl.cpp
new file mode 100644
index 0000000000..96d2d86567
--- /dev/null
+++ b/src/vm/contractimpl.cpp
@@ -0,0 +1,716 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: contractimpl.cpp
+//
+// Keeps track of contract implementations, used primarily in stub dispatch.
+//
+
+
+//
+
+//
+// ============================================================================
+
+#include "common.h" // Precompiled header
+
+#include "contractimpl.h"
+#include "virtualcallstub.h"
+#include "decodemd.h"
+
+#ifdef FEATURE_PREJIT
+#include "compile.h"
+#endif
+
+#if defined(_DEBUG)
+DummyGlobalContract ___contract;
+#endif
+
+#ifdef LOGGING
+//----------------------------------------------------------------------------
+StubDispatchStats g_sdStats = {0};
+#endif // LOGGING
+
+#ifndef DACCESS_COMPILE
+
+//----------------------------------------------------------------------------
+MethodDesc * DispatchSlot::GetMethodDesc()
+{
+ WRAPPER_NO_CONTRACT;
+ if (IsNull())
+ return NULL;
+ else
+ return MethodTable::GetMethodDescForSlotAddress(GetTarget());
+}
+
+//------------------------------------------------------------------------
+void TypeIDMap::Init(UINT32 idStartValue, UINT32 idIncrementValue, BOOL fUseFatTokensForUniqueness)
+{
+ STANDARD_VM_CONTRACT;
+
+ LockOwner lock = {&m_lock, IsOwnerOfCrst};
+ m_idMap.Init(11, TRUE, &lock);
+ m_mtMap.Init(11, TRUE, &lock);
+ m_idProvider.Init(idStartValue, idIncrementValue);
+ m_entryCount = 0;
+ m_fUseFatIdsForUniqueness = fUseFatTokensForUniqueness;
+}
+
+#endif // !DACCESS_COMPILE
+
+//------------------------------------------------------------------------
+// Returns the ID of the type if found. If not found, returns INVALID_TYPE_ID
+UINT32 TypeIDMap::LookupTypeID(PTR_MethodTable pMT)
+{
+ CONTRACTL {
+ NOTHROW;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(GetThread()));
+ if (GetThread()->PreemptiveGCDisabled()) { GC_NOTRIGGER; } else { GC_TRIGGERS; }
+ } CONTRACTL_END;
+
+ UINT32 id = (UINT32) m_mtMap.LookupValue((UPTR)dac_cast<TADDR>(pMT), 0);
+ _ASSERTE(!m_fUseFatIdsForUniqueness || !pMT->RequiresFatDispatchTokens() || (DispatchToken::RequiresDispatchTokenFat(id, 0)));
+
+ return id;
+}
+
+//------------------------------------------------------------------------
+// Returns the ID of the type if found. If not found, returns INVALID_TYPE_ID
+PTR_MethodTable TypeIDMap::LookupType(UINT32 id)
+{
+ CONTRACTL {
+ NOTHROW;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(GetThread()));
+ if (GetThread()->PreemptiveGCDisabled()) { GC_NOTRIGGER; } else { GC_TRIGGERS; }
+ PRECONDITION(id <= TypeIDProvider::MAX_TYPE_ID);
+ } CONTRACTL_END;
+
+ if (!m_idProvider.OwnsID(id))
+ return NULL;
+
+ UPTR ret = m_idMap.LookupValue((UPTR)id, 0);
+ if (ret == static_cast<UPTR>(INVALIDENTRY))
+ return NULL;
+
+ ret <<= 1;
+
+ return PTR_MethodTable(ret);
+}
+
+//------------------------------------------------------------------------
+// Returns the ID of the type if found. If not found, assigns the ID and
+// returns the new ID.
+UINT32 TypeIDMap::GetTypeID(PTR_MethodTable pMT)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ // Lookup the value.
+ UINT32 id = LookupTypeID(pMT);
+#ifndef DACCESS_COMPILE
+ // If the value is not in the table, take the lock, get a new ID, and
+ // insert the new pair.
+ if (id == TypeIDProvider::INVALID_TYPE_ID)
+ {
+ // Take the lock
+ CrstHolder lh(&m_lock);
+ // Check to see if someone beat us to the punch
+ id = LookupTypeID(pMT);
+ if (id != TypeIDProvider::INVALID_TYPE_ID)
+ {
+ return id;
+ }
+ // Get the next ID
+ if (m_fUseFatIdsForUniqueness && pMT->RequiresFatDispatchTokens())
+ {
+ id = GetNextFatID();
+ }
+ else
+ {
+ id = GetNextID();
+ }
+
+ CONSISTENCY_CHECK(id <= TypeIDProvider::MAX_TYPE_ID);
+ // Insert the pair, with lookups in both directions
+ CONSISTENCY_CHECK((((UPTR)pMT) & 0x1) == 0);
+ m_idMap.InsertValue((UPTR)id, (UPTR)pMT >> 1);
+ m_mtMap.InsertValue((UPTR)pMT, (UPTR)id);
+ m_entryCount++;
+ CONSISTENCY_CHECK(GetThread()->GetDomain()->IsCompilationDomain() ||
+ (LookupType(id) == pMT));
+ }
+#else // DACCESS_COMPILE
+ if (id == TypeIDProvider::INVALID_TYPE_ID)
+ DacError(E_FAIL);
+#endif // DACCESS_COMPILE
+ // Return the ID for this type.
+ return id;
+} // TypeIDMap::GetTypeID
+
+#ifndef DACCESS_COMPILE
+
+//------------------------------------------------------------------------
+// If TRUE, it points to a matching entry.
+// If FALSE, it is at the insertion point.
+BOOL
+DispatchMapBuilder::Find(
+ DispatchMapTypeID typeID,
+ UINT32 slotNumber,
+ Iterator & it)
+{
+ WRAPPER_NO_CONTRACT;
+ for (; it.IsValid(); it.Next())
+ {
+ if (typeID == it.GetTypeID())
+ {
+ if (slotNumber == it.GetSlotNumber())
+ {
+ return TRUE;
+ }
+ if (slotNumber < it.GetSlotNumber())
+ {
+ return FALSE;
+ }
+ }
+ else if (typeID < it.GetTypeID())
+ {
+ return FALSE;
+ }
+ }
+
+ return FALSE;
+} // DispatchMapBuilder::Find
+
+//------------------------------------------------------------------------
+// If TRUE, contains such an entry.
+// If FALSE, no such entry exists.
+BOOL DispatchMapBuilder::Contains(DispatchMapTypeID typeID, UINT32 slotNumber)
+{
+ WRAPPER_NO_CONTRACT;
+ Iterator it(this);
+ return Find(typeID, slotNumber, it);
+}
+
+//------------------------------------------------------------------------
+void
+DispatchMapBuilder::InsertMDMapping(
+ DispatchMapTypeID typeID,
+ UINT32 slotNumber,
+ MethodDesc * pMDTarget,
+ BOOL fIsMethodImpl)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ // Find a matching entry, or move the iterator to insertion point.
+ Iterator it(this);
+ BOOL fFound = Find(typeID, slotNumber, it);
+
+ // If we find an existing matching entry, fail.
+ if (fFound)
+ {
+ _ASSERTE(false);
+ COMPlusThrowHR(COR_E_TYPELOAD);
+ }
+
+ // Create and initialize a new entry
+ DispatchMapBuilderNode * pNew = NewEntry();
+ pNew->Init(typeID, slotNumber, pMDTarget);
+ if (fIsMethodImpl)
+ pNew->SetIsMethodImpl();
+
+ // Insert at the point of the iterator
+ pNew->m_next = NULL;
+ if (it.IsValid())
+ {
+ pNew->m_next = it.EntryNode();
+ }
+ *(it.EntryNodePtr()) = pNew;
+ m_cEntries++;
+
+} // DispatchMapBuilder::InsertMDMapping
+
+//--------------------------------------------------------------------
+UINT32 DispatchMapBuilder::Iterator::GetTargetSlot()
+{
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(IsValid());
+ if (GetTargetMD() != NULL)
+ {
+ return EntryNode()->m_pMDTarget->GetSlot();
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+//------------------------------------------------------------------------
+DispatchMapBuilderNode * DispatchMapBuilder::NewEntry()
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM());
+ } CONTRACTL_END;
+
+ return new (m_pAllocator) DispatchMapBuilderNode();
+}
+
+//----------------------------------------------------------------------------
+DispatchMap::DispatchMap(
+ BYTE * pMap,
+ UINT32 cbMap)
+{
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(CheckPointer(pMap));
+ memcpyNoGCRefs(m_rgMap, pMap, cbMap);
+}
+
+//----------------------------------------------------------------------------
+// This mapping consists of a list of the following entries.
+// <type, [<slot, (index | slot)>]>. This is implemented as
+//
+// flag: 0 if the map is a part of a JIT'd module
+// 1 if the map is a part of an NGEN'd module.
+// count: number of types that have entries
+// {
+// type: The ID current type being mapped
+// count: Number of subentries for the current type
+// bool: Whether or not the target slot/index values can be negative.
+// {
+// slot: The slot of type that is being mapped
+// index/slot: This is a slot mapping for the current type. The implementation search is
+// modified to <this, slot> and the search is restarted from the initial type.
+// }
+// }
+void
+DispatchMap::CreateEncodedMapping(
+ MethodTable * pMT,
+ DispatchMapBuilder * pMapBuilder,
+ StackingAllocator * pAllocator,
+ BYTE ** ppbMap,
+ UINT32 * pcbMap)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(CheckPointer(pMapBuilder));
+ PRECONDITION(CheckPointer(pAllocator));
+ PRECONDITION(CheckPointer(ppbMap));
+ PRECONDITION(CheckPointer(pcbMap));
+ } CONTRACTL_END;
+
+ /////////////////////////////////
+ // Phase 1 - gather entry counts
+
+ UINT32 cNumTypes = 0;
+ UINT32 cNumEntries = 0;
+
+ {
+ DispatchMapBuilder::Iterator it(pMapBuilder);
+ // We don't want to record overrides or methodImpls in the dispatch map since
+ // we have vtables to track this information.
+ it.SkipThisTypeEntries();
+ if (it.IsValid())
+ {
+ DispatchMapTypeID curType = DispatchMapTypeID::FromUINT32(INVALIDENTRY);
+ do
+ {
+ cNumEntries++;
+ if (curType != it.GetTypeID())
+ {
+ cNumTypes++;
+ curType = it.GetTypeID();
+ }
+ }
+ while (it.Next());
+ }
+ }
+
+ /////////////////////////////////
+ // Phase 2 - allocate space
+
+ // Now that we have stats about the overall absolute maximum map size, we can allocate
+ // some working space for createing the encoded map in.
+ // Sizes: flag==UINT32, typeID==UINT32, slot==UINT32, index/slot==UINT32
+
+ S_UINT32 scbMap = S_UINT32(sizeof(UINT32)) +
+ S_UINT32(cNumTypes) * S_UINT32(sizeof(UINT32)) +
+ S_UINT32(cNumEntries) * S_UINT32((sizeof(UINT32) + sizeof(UINT32)));
+
+ BYTE * pbMap = (BYTE *)pAllocator->Alloc(scbMap);
+
+ /////////////////////////////////
+ // Phase 3 - encode the map
+
+ {
+ // Create the encoder over the newly allocated memory
+ Encoder e(pbMap);
+ // Encode the count of type entries
+ e.Encode((unsigned)cNumTypes);
+ // Start encoding the map
+ DispatchMapBuilder::Iterator it(pMapBuilder);
+ it.SkipThisTypeEntries();
+
+ INT32 curType = -1;
+ INT32 prevType;
+ INT32 deltaType;
+ while (it.IsValid())
+ {
+ // Encode the type ID
+ prevType = curType;
+ curType = (INT32)it.GetTypeID().ToUINT32();
+ deltaType = curType - prevType - ENCODING_TYPE_DELTA;
+ CONSISTENCY_CHECK(0 <= deltaType);
+ e.Encode((unsigned)deltaType);
+ // Variables for slot delta calculations
+ BOOL fHasNegatives = FALSE;
+ // Source slot
+ INT32 curSlot = -1;
+ INT32 prevSlot = -1;
+ // Target slot for virtual mappings
+ INT32 curTargetSlot = -1;
+ INT32 prevTargetSlot = -1;
+ // Count and encode the number of sub entries for this type
+ UINT32 cSubEntries = 0;
+ DispatchMapBuilder::Iterator subIt(it);
+ do
+ {
+ prevTargetSlot = curTargetSlot;
+ curTargetSlot = (INT32)subIt.GetTargetSlot();
+ INT32 deltaTargetSlot = curTargetSlot - prevTargetSlot - ENCODING_TARGET_SLOT_DELTA;
+ if (deltaTargetSlot < 0)
+ {
+ fHasNegatives = TRUE;
+ }
+ cSubEntries++;
+ }
+ while (subIt.Next() && (subIt.GetTypeID().ToUINT32() == (UINT32)curType));
+
+ e.Encode((unsigned)cSubEntries);
+ e.Encode((unsigned)fHasNegatives);
+ e.ContainsNegatives(fHasNegatives);
+ // Iterate each subentry and encode it
+ curTargetSlot = -1;
+ do
+ {
+ // Only virtual targets can be mapped virtually.
+ CONSISTENCY_CHECK((it.GetTargetMD() == NULL) ||
+ it.GetTargetMD()->IsVirtual());
+ // Encode the slot
+ prevSlot = curSlot;
+ curSlot = it.GetSlotNumber();
+ INT32 deltaSlot = curSlot - prevSlot - ENCODING_SLOT_DELTA;
+ CONSISTENCY_CHECK(0 <= deltaSlot);
+ e.Encode((unsigned)deltaSlot);
+
+ // Calculate and encode the target slot delta
+ prevTargetSlot = curTargetSlot;
+ curTargetSlot = (INT32)it.GetTargetSlot();
+ INT32 delta = curTargetSlot - prevTargetSlot - ENCODING_TARGET_SLOT_DELTA;
+
+ if (fHasNegatives)
+ {
+ e.EncodeSigned((signed)delta);
+ }
+ else
+ {
+ CONSISTENCY_CHECK(0 <= delta);
+ e.Encode((unsigned)delta);
+ }
+ }
+ while (it.Next() && it.GetTypeID().ToUINT32() == (UINT32)curType);
+ } // while (it.IsValid())
+
+ // Finish and finalize the map, and set the out params.
+ e.Done();
+ *pcbMap = e.Contents(ppbMap);
+ }
+
+#ifdef _DEBUG
+ // Let's verify the mapping
+ {
+ EncodedMapIterator itMap(*ppbMap);
+ DispatchMapBuilder::Iterator itBuilder(pMapBuilder);
+ itBuilder.SkipThisTypeEntries();
+
+ while (itMap.IsValid())
+ {
+ CONSISTENCY_CHECK(itBuilder.IsValid());
+ DispatchMapEntry * pEntryMap = itMap.Entry();
+ CONSISTENCY_CHECK(pEntryMap->GetTypeID() == itBuilder.GetTypeID());
+ CONSISTENCY_CHECK(pEntryMap->GetTargetSlotNumber() == itBuilder.GetTargetSlot());
+ itMap.Next();
+ itBuilder.Next();
+ }
+
+ CONSISTENCY_CHECK(!itBuilder.IsValid());
+ }
+#endif //_DEBUG
+} // DispatchMap::CreateEncodedMapping
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+//------------------------------------------------------------------------
+void DispatchMap::Save(DataImage * image)
+{
+ STANDARD_VM_CONTRACT;
+
+ CONSISTENCY_CHECK(!image->IsStored(this));
+
+ UINT32 cbMap = GetMapSize();
+ UINT32 cbObj = GetObjectSize(cbMap);
+
+ image->StoreInternedStructure(
+ this,
+ cbObj,
+ DataImage::ITEM_DISPATCH_MAP,
+ sizeof(void *));
+
+#ifdef LOGGING
+ g_sdStats.m_cNGENDispatchMap++;
+ g_sdStats.m_cbNGENDispatchMap += cbObj;
+#endif //LOGGING
+}
+
+//------------------------------------------------------------------------
+void DispatchMap::Fixup(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+}
+
+#endif //FEATURE_NATIVE_IMAGE_GENERATION
+
+#endif //!DACCESS_COMPILE
+
+//------------------------------------------------------------------------
+UINT32 DispatchMap::GetMapSize()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ EncodedMapIterator it(this);
+ for (; it.IsValid(); it.Next())
+ {
+ }
+ CONSISTENCY_CHECK(dac_cast<TADDR>(it.m_d.End()) > PTR_HOST_MEMBER_TADDR(DispatchMap, this, m_rgMap));
+ return (UINT32)(dac_cast<TADDR>(it.m_d.End()) - PTR_HOST_MEMBER_TADDR(DispatchMap, this, m_rgMap));
+}
+
+#ifdef DACCESS_COMPILE
+
+//------------------------------------------------------------------------
+void DispatchMap::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ DAC_ENUM_DTHIS();
+
+ EMEM_OUT(("MEM: %p DispatchMap\n", dac_cast<TADDR>(this)));
+
+ DacEnumMemoryRegion(PTR_HOST_MEMBER_TADDR(DispatchMap,this,m_rgMap), GetMapSize());
+}
+
+#endif // DACCESS_COMPILE
+
+//--------------------------------------------------------------------
+void DispatchMap::EncodedMapIterator::Invalidate()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ m_numTypes = 0;
+ m_curType = 0;
+ m_numEntries = 0;
+ m_curEntry = 0;
+}
+
+//--------------------------------------------------------------------
+void DispatchMap::EncodedMapIterator::Init(PTR_BYTE pbMap)
+{
+ CONTRACTL {
+ GC_NOTRIGGER;
+ NOTHROW;
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pbMap, NULL_OK));
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ if (pbMap != NULL)
+ {
+ // Initialize the map decoder
+ m_d.Init(pbMap);
+ m_numTypes = m_d.Next();
+ m_curType = -1;
+ m_curTypeId = DispatchMapTypeID::FromUINT32(static_cast<UINT32>(-1));
+ m_numEntries = 0;
+ m_curEntry = -1;
+ m_curTargetSlot = static_cast<UINT32>(-1);
+ }
+ else
+ {
+ Invalidate();
+ }
+
+ Next();
+}
+
+//--------------------------------------------------------------------
+DispatchMap::EncodedMapIterator::EncodedMapIterator(MethodTable * pMT)
+{
+ CONTRACTL {
+ GC_NOTRIGGER;
+ NOTHROW;
+ INSTANCE_CHECK;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ if (pMT->HasDispatchMap())
+ {
+ DispatchMap * pMap = pMT->GetDispatchMap();
+ Init(PTR_BYTE(PTR_HOST_MEMBER_TADDR(DispatchMap, pMap, m_rgMap)));
+ }
+ else
+ {
+ Init(NULL);
+ }
+}
+
+//--------------------------------------------------------------------
+// This should be used only when a dispatch map needs to be used
+// separately from its MethodTable.
+DispatchMap::EncodedMapIterator::EncodedMapIterator(DispatchMap * pMap)
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ PTR_BYTE pBytes = NULL;
+ if (pMap != NULL)
+ {
+ pBytes = PTR_BYTE(PTR_HOST_MEMBER_TADDR(DispatchMap, pMap,m_rgMap));
+ }
+ Init(pBytes);
+}
+
+//--------------------------------------------------------------------
+DispatchMap::EncodedMapIterator::EncodedMapIterator(PTR_BYTE pbMap)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ Init(pbMap);
+}
+
+//--------------------------------------------------------------------
+BOOL DispatchMap::EncodedMapIterator::Next()
+{
+ CONTRACTL {
+ GC_NOTRIGGER;
+ NOTHROW;
+ INSTANCE_CHECK;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ if (!IsValid())
+ {
+ return FALSE;
+ }
+
+ m_curEntry++;
+ if (m_curEntry == m_numEntries)
+ {
+ m_curType++;
+ if (m_curType == m_numTypes)
+ {
+ return FALSE;
+ }
+ m_curTypeId =
+ DispatchMapTypeID::FromUINT32(
+ (UINT32)((INT32)m_curTypeId.ToUINT32() +
+ (INT32)m_d.Next() +
+ ENCODING_TYPE_DELTA));
+ _ASSERTE(!m_curTypeId.IsThisClass());
+ m_curEntry = 0;
+ m_numEntries = m_d.Next();
+ m_fCurTypeHasNegativeEntries = (BOOL)m_d.Next();
+ m_curSlot = static_cast<UINT32>(-1);
+ m_curTargetSlot = static_cast<UINT32>(-1);
+ CONSISTENCY_CHECK(m_numEntries != 0);
+ }
+
+ // Now gather enough info to initialize the dispatch entry
+
+ // Get the source slot
+ m_curSlot = (UINT32)((INT32)m_curSlot + (INT32)m_d.Next() + ENCODING_SLOT_DELTA);
+
+ // If virtual, get the target virtual slot number
+ m_curTargetSlot =
+ (UINT32)((INT32)m_curTargetSlot +
+ ENCODING_TARGET_SLOT_DELTA +
+ (INT32)(m_fCurTypeHasNegativeEntries ? m_d.NextSigned() : m_d.Next()));
+ m_e.InitVirtualMapping(m_curTypeId, m_curSlot, m_curTargetSlot);
+
+ CONSISTENCY_CHECK(IsValid());
+ return TRUE;
+} // DispatchMap::EncodedMapIterator::Next
+
+//--------------------------------------------------------------------
+DispatchMap::Iterator::Iterator(MethodTable * pMT)
+ : m_mapIt(pMT)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+}
+
+//--------------------------------------------------------------------
+BOOL DispatchMap::Iterator::IsValid()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_mapIt.IsValid();
+}
+
+//--------------------------------------------------------------------
+BOOL DispatchMap::Iterator::Next()
+{
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(!m_mapIt.Entry()->GetTypeID().IsThisClass());
+ if (m_mapIt.IsValid())
+ {
+ m_mapIt.Next();
+ CONSISTENCY_CHECK(!m_mapIt.IsValid() || !m_mapIt.Entry()->GetTypeID().IsThisClass());
+ }
+ return IsValid();
+}
+
+//--------------------------------------------------------------------
+DispatchMapEntry * DispatchMap::Iterator::Entry()
+{
+/*
+ CONTRACTL {
+ INSTANCE_CHECK;
+ MODE_ANY;
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ PRECONDITION(IsValid());
+ } CONTRACTL_END;
+*/
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(IsValid());
+
+ DispatchMapEntry * pEntry = NULL;
+ if (m_mapIt.IsValid())
+ {
+ pEntry = m_mapIt.Entry();
+ }
+ CONSISTENCY_CHECK(CheckPointer(pEntry));
+ return pEntry;
+}
diff --git a/src/vm/contractimpl.h b/src/vm/contractimpl.h
new file mode 100644
index 0000000000..c8b0f4856a
--- /dev/null
+++ b/src/vm/contractimpl.h
@@ -0,0 +1,1028 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: contractimpl.h
+//
+// Keeps track of contract implementations, used primarily in stub dispatch.
+//
+
+
+//
+
+//
+// ============================================================================
+
+#ifndef CONTRACTIMPL_H_
+#define CONTRACTIMPL_H_
+
+#include "hash.h"
+#include "decodemd.h"
+
+class Module;
+class MethodDesc;
+class StackingAllocator;
+
+// ===========================================================================
+struct DispatchSlot
+{
+protected:
+ PCODE m_slot;
+
+public:
+ //------------------------------------------------------------------------
+ inline DispatchSlot(PCODE slot) : m_slot(slot)
+ { LIMITED_METHOD_CONTRACT; }
+
+ //------------------------------------------------------------------------
+ inline DispatchSlot(const DispatchSlot &slot) : m_slot(slot.m_slot)
+ { LIMITED_METHOD_CONTRACT; }
+
+ //------------------------------------------------------------------------
+ inline DispatchSlot& operator=(PCODE slot)
+ { LIMITED_METHOD_CONTRACT; m_slot = slot; return *this; }
+
+ //------------------------------------------------------------------------
+ inline DispatchSlot& operator=(const DispatchSlot &slot)
+ { LIMITED_METHOD_CONTRACT; m_slot = slot.m_slot; return *this; }
+
+ //------------------------------------------------------------------------
+ inline BOOL IsNull()
+ { LIMITED_METHOD_CONTRACT; return (m_slot == NULL); }
+
+ //------------------------------------------------------------------------
+ inline void SetNull()
+ { LIMITED_METHOD_CONTRACT; m_slot = NULL; }
+
+ //------------------------------------------------------------------------
+ inline PCODE GetTarget()
+ { LIMITED_METHOD_CONTRACT; return m_slot; }
+
+ //------------------------------------------------------------------------
+ MethodDesc *GetMethodDesc();
+}; // struct DispatchSlot
+
+// ===========================================================================
+// This value indicates that a slot number is in reference to the
+// current class. Thus, no TypeID can have a value of 0. This is stored
+// inside a DispatchToken as the TypeID for such cases.
+static const UINT32 TYPE_ID_THIS_CLASS = 0;
+
+
+// ===========================================================================
+// The type IDs used in the dispatch map are relative to the implementing
+// type, and are a discriminated union between:
+// - a special value to indicate "this" class
+// - a special value to indicate that an interface is not implemented by the type
+// - an index into the InterfaceMap
+class DispatchMapTypeID
+{
+private:
+ static const UINT32 const_nFirstInterfaceIndex = 1;
+
+ UINT32 m_typeIDVal;
+ DispatchMapTypeID(UINT32 id) { LIMITED_METHOD_DAC_CONTRACT; m_typeIDVal = id; }
+public:
+ // Constructors
+ static DispatchMapTypeID ThisClassID() { LIMITED_METHOD_CONTRACT; return DispatchMapTypeID(TYPE_ID_THIS_CLASS); }
+ static DispatchMapTypeID InterfaceClassID(UINT32 inum)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(inum + const_nFirstInterfaceIndex > inum);
+ return DispatchMapTypeID(inum + const_nFirstInterfaceIndex);
+ }
+ DispatchMapTypeID() { LIMITED_METHOD_DAC_CONTRACT; m_typeIDVal = TYPE_ID_THIS_CLASS; }
+
+ // Accessors
+ BOOL IsThisClass() const { LIMITED_METHOD_DAC_CONTRACT; return (m_typeIDVal == TYPE_ID_THIS_CLASS); }
+ BOOL IsImplementedInterface() const { LIMITED_METHOD_CONTRACT; return (m_typeIDVal >= const_nFirstInterfaceIndex); }
+ UINT32 GetInterfaceNum() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(IsImplementedInterface());
+ return (m_typeIDVal - const_nFirstInterfaceIndex);
+ }
+
+ // Ordering/equality
+ BOOL operator ==(const DispatchMapTypeID &that) const { LIMITED_METHOD_CONTRACT; return m_typeIDVal == that.m_typeIDVal; }
+ BOOL operator !=(const DispatchMapTypeID &that) const { LIMITED_METHOD_CONTRACT; return m_typeIDVal != that.m_typeIDVal; }
+ BOOL operator <(const DispatchMapTypeID &that) const { LIMITED_METHOD_CONTRACT; return m_typeIDVal < that.m_typeIDVal; }
+
+ // To/from UINT32, for encoding/decoding etc.
+ UINT32 ToUINT32() const { LIMITED_METHOD_DAC_CONTRACT; return m_typeIDVal; }
+ static DispatchMapTypeID FromUINT32(UINT32 x) { LIMITED_METHOD_DAC_CONTRACT; return DispatchMapTypeID(x); }
+}; // class DispatchMapTypeID
+
+#ifdef FAT_DISPATCH_TOKENS
+// ===========================================================================
+// This is the structure that is used when typeId becomes too be to be
+// contained in a regular DispatchToken. DispatchToken is able to encapsulate
+// a DispatchTokenFat*, somewhat like TypeHandle may encapsulate a TypeDesc*.
+struct DispatchTokenFat
+{
+ friend struct DispatchToken;
+ friend class BaseDomain;
+
+ private:
+ UINT32 m_typeId;
+ UINT32 m_slotNum;
+
+ public:
+ DispatchTokenFat(UINT32 typeID, UINT32 slotNumber)
+ : m_typeId(typeID), m_slotNum(slotNumber)
+ {}
+
+ // Equality comparison, used in SHash set.
+ bool operator==(const DispatchTokenFat &other) const
+ { return m_typeId == other.m_typeId && m_slotNum == other.m_slotNum; }
+
+ // Hashing operator, using in SHash set.
+ operator size_t() const
+ { return (size_t)m_typeId ^ (size_t)m_slotNum; }
+}; // struct DispatchTokenFat
+
+typedef DPTR(DispatchTokenFat) PTR_DispatchTokenFat;
+#endif
+
+// ===========================================================================
+// This represents the contract used for code lookups throughout the
+// virtual stub dispatch mechanism. It is important to know that
+// sizeof(DispatchToken) is UINT_PTR, which means it can be thrown around
+// by value without a problem.
+
+struct DispatchToken
+{
+private:
+ // IMPORTANT: This is the ONLY member of this class.
+ UINT_PTR m_token;
+
+#ifndef _WIN64
+ // NOTE: On 32-bit, we use the uppermost bit to indicate that the
+ // token is really a DispatchTokenFat*, and to recover the pointer
+ // we just shift left by 1; correspondingly, when storing a
+ // DispatchTokenFat* in a DispatchToken, we shift right by 1.
+ static const UINT_PTR MASK_TYPE_ID = 0x00007FFF;
+ static const UINT_PTR MASK_SLOT_NUMBER = 0x0000FFFF;
+
+ static const UINT_PTR SHIFT_TYPE_ID = 0x10;
+ static const UINT_PTR SHIFT_SLOT_NUMBER = 0x0;
+
+#ifdef FAT_DISPATCH_TOKENS
+ static const UINT_PTR FAT_TOKEN_FLAG = 0x80000000;
+#endif // FAT_DISPATCH_TOKENS
+
+ static const UINT_PTR INVALID_TOKEN = 0x7FFFFFFF;
+#else //_WIN64
+ static const UINT_PTR MASK_TYPE_ID = UI64(0x000000007FFFFFFF);
+ static const UINT_PTR MASK_SLOT_NUMBER = UI64(0x000000000000FFFF);
+
+ static const UINT_PTR SHIFT_TYPE_ID = 0x20;
+ static const UINT_PTR SHIFT_SLOT_NUMBER = 0x0;
+
+#ifdef FAT_DISPATCH_TOKENS
+ static const UINT_PTR FAT_TOKEN_FLAG = UI64(0x8000000000000000);
+#endif // FAT_DISPATCH_TOKENS
+
+ static const UINT_PTR INVALID_TOKEN = 0x7FFFFFFFFFFFFFFF;
+#endif //_WIN64
+
+#ifdef FAT_DISPATCH_TOKENS
+ //------------------------------------------------------------------------
+ static inline BOOL IsFat(UINT_PTR token)
+ {
+ return (token & FAT_TOKEN_FLAG) != 0;
+ }
+
+ //------------------------------------------------------------------------
+ static inline DispatchTokenFat* ToFat(UINT_PTR token)
+ {
+ return PTR_DispatchTokenFat(token << 1);
+ }
+#endif
+
+ //------------------------------------------------------------------------
+ // Combines the two values into a single 32-bit number.
+ static UINT_PTR CreateToken(UINT32 typeID, UINT32 slotNumber)
+ {
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(((UINT_PTR)typeID & MASK_TYPE_ID) == (UINT_PTR)typeID);
+ CONSISTENCY_CHECK(((UINT_PTR)slotNumber & MASK_SLOT_NUMBER) == (UINT_PTR)slotNumber);
+ return ((((UINT_PTR)typeID & MASK_TYPE_ID) << SHIFT_TYPE_ID) |
+ (((UINT_PTR)slotNumber & MASK_SLOT_NUMBER) << SHIFT_SLOT_NUMBER));
+ }
+
+ //------------------------------------------------------------------------
+ // Extracts the type ID from a token created by CreateToken
+ static UINT32 DecodeTypeID(UINT_PTR token)
+ {
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(token != INVALID_TOKEN);
+#ifdef FAT_DISPATCH_TOKENS
+ if (IsFat(token))
+ return ToFat(token)->m_typeId;
+ else
+#endif
+ return ((token >> SHIFT_TYPE_ID) & MASK_TYPE_ID);
+ }
+
+ //------------------------------------------------------------------------
+ // Extracts the slot number from a token created by CreateToken
+ static UINT32 DecodeSlotNumber(UINT_PTR token)
+ {
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(token != INVALID_TOKEN);
+#ifdef FAT_DISPATCH_TOKENS
+ if (IsFat(token))
+ return ToFat(token)->m_slotNum;
+ else
+#endif
+ return ((token >> SHIFT_SLOT_NUMBER) & MASK_SLOT_NUMBER);
+ }
+
+public:
+
+#ifdef FAT_DISPATCH_TOKENS
+#if !defined(_WIN64)
+ static const UINT32 MAX_TYPE_ID_SMALL = 0x00007FFF;
+#else
+ static const UINT32 MAX_TYPE_ID_SMALL = 0x7FFFFFFF;
+#endif
+#endif // FAT_DISPATCH_TOKENS
+
+ //------------------------------------------------------------------------
+ DispatchToken()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_token = INVALID_TOKEN;
+ }
+
+ DispatchToken(UINT_PTR token)
+ {
+ CONSISTENCY_CHECK(token != INVALID_TOKEN);
+ m_token = token;
+ }
+
+#ifdef FAT_DISPATCH_TOKENS
+ //------------------------------------------------------------------------
+ DispatchToken(DispatchTokenFat *pFat)
+ {
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK((((UINT_PTR)pFat) & 0x1) == 0);
+ m_token = (UINT_PTR(pFat) >> 1) | FAT_TOKEN_FLAG;
+ }
+
+ //------------------------------------------------------------------------
+ static bool RequiresDispatchTokenFat(UINT32 typeID, UINT32 slotNumber)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return typeID > MAX_TYPE_ID_SMALL
+#ifdef _DEBUG
+ // Stress the overflow mechanism in debug builds.
+ || ((typeID != TYPE_ID_THIS_CLASS) && ((typeID % 7) < 4))
+#endif
+ ;
+ }
+#endif //FAT_DISPATCH_TOKENS
+
+ //------------------------------------------------------------------------
+ inline bool operator==(const DispatchToken &tok) const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_token == tok.m_token;
+ }
+
+ //------------------------------------------------------------------------
+ // Creates a "this" type dispatch token. This means that the type for the
+ // token is implied by the type on which one wishes to invoke. In other
+ // words, the value returned by GetTypeID is TYPE_ID_THIS_CLASS.
+ static DispatchToken CreateDispatchToken(UINT32 slotNumber)
+ {
+ WRAPPER_NO_CONTRACT;
+ return DispatchToken(CreateToken(TYPE_ID_THIS_CLASS, slotNumber));
+ }
+
+ //------------------------------------------------------------------------
+ // Creates a fully qualified type dispatch token. This means that the ID
+ // for the type is encoded directly in the token.
+ static DispatchToken CreateDispatchToken(UINT32 typeID, UINT32 slotNumber)
+ {
+ WRAPPER_NO_CONTRACT;
+ return DispatchToken(CreateToken(typeID, slotNumber));
+ }
+
+ //------------------------------------------------------------------------
+ // Returns the type ID for this dispatch contract
+ inline UINT32 GetTypeID() const
+ {
+ WRAPPER_NO_CONTRACT;
+ return DecodeTypeID(m_token);
+ }
+
+ //------------------------------------------------------------------------
+ // Returns the slot number for this dispatch contract
+ inline UINT32 GetSlotNumber() const
+ {
+ WRAPPER_NO_CONTRACT;
+ return DecodeSlotNumber(m_token);
+ }
+
+ //------------------------------------------------------------------------
+ inline bool IsThisToken() const
+ {
+ WRAPPER_NO_CONTRACT;
+ return (GetTypeID() == TYPE_ID_THIS_CLASS);
+ }
+
+ //------------------------------------------------------------------------
+ inline bool IsTypedToken() const
+ {
+ WRAPPER_NO_CONTRACT;
+ return (!IsThisToken());
+ }
+
+ //------------------------------------------------------------------------
+ static DispatchToken From_SIZE_T(SIZE_T token)
+ {
+ WRAPPER_NO_CONTRACT;
+ return DispatchToken((UINT_PTR)token);
+ }
+
+ //------------------------------------------------------------------------
+ SIZE_T To_SIZE_T() const
+ {
+ WRAPPER_NO_CONTRACT;
+ static_assert_no_msg(sizeof(SIZE_T) == sizeof(UINT_PTR));
+ return (SIZE_T) m_token;
+ }
+
+ //------------------------------------------------------------------------
+ inline BOOL IsValid() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return !(m_token == INVALID_TOKEN);
+ }
+}; // struct DispatchToken
+
+// DispatchToken.m_token should be the only field of DispatchToken.
+static_assert_no_msg(sizeof(DispatchToken) == sizeof(UINT_PTR));
+
+// ===========================================================================
+class TypeIDProvider
+{
+protected:
+ UINT32 m_nextID;
+ UINT32 m_incSize;
+ UINT32 m_nextFatID;
+
+public:
+ // This is used for an invalid type ID.
+ static const UINT32 INVALID_TYPE_ID = ~0;
+
+ // If we can have more than 2^32-1 types, we'll need to revisit this.
+ static const UINT32 MAX_TYPE_ID = INVALID_TYPE_ID - 1;
+
+ //------------------------------------------------------------------------
+ // Ctor
+ TypeIDProvider()
+ : m_nextID(0), m_incSize(0), m_nextFatID(0)
+ { LIMITED_METHOD_CONTRACT; }
+
+
+ //------------------------------------------------------------------------
+ void Init(UINT32 idStartValue, UINT32 idIncrementValue)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_nextID = idStartValue;
+ m_incSize = idIncrementValue;
+ m_nextFatID = DispatchToken::MAX_TYPE_ID_SMALL + 1;
+ if (m_incSize != 0)
+ {
+ while (!OwnsID(m_nextFatID))
+ {
+ m_nextFatID++;
+ }
+ }
+ }
+
+ //------------------------------------------------------------------------
+ // Returns the next available ID
+ inline UINT32 GetNextID()
+ {
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(m_nextID != 0);
+ PRECONDITION(m_incSize != 0);
+ } CONTRACTL_END;
+ UINT32 id = m_nextID;
+
+ if (id > DispatchToken::MAX_TYPE_ID_SMALL)
+ {
+ return GetNextFatID();
+ }
+
+ if (!ClrSafeInt<UINT32>::addition(m_nextID, m_incSize, m_nextID) ||
+ m_nextID == INVALID_TYPE_ID)
+ {
+ ThrowOutOfMemory();
+ }
+ return id;
+ }
+
+ //------------------------------------------------------------------------
+ // Returns the next available ID
+ inline UINT32 GetNextFatID()
+ {
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(m_nextFatID != 0);
+ PRECONDITION(m_incSize != 0);
+ } CONTRACTL_END;
+ UINT32 id = m_nextFatID;
+ if (!ClrSafeInt<UINT32>::addition(m_nextFatID, m_incSize, m_nextFatID) ||
+ m_nextID == INVALID_TYPE_ID)
+ {
+ ThrowOutOfMemory();
+ }
+ return id;
+ }
+
+ //------------------------------------------------------------------------
+ inline BOOL OwnsID(UINT32 id)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ((id % m_incSize) == (m_nextID % m_incSize));
+ }
+}; // class TypeIDProvider
+
+// ===========================================================================
+class TypeIDMap
+{
+protected:
+ HashMap m_idMap;
+ HashMap m_mtMap;
+ Crst m_lock;
+ TypeIDProvider m_idProvider;
+ BOOL m_fUseFatIdsForUniqueness;
+ UINT32 m_entryCount;
+
+#ifndef BINDER
+ //------------------------------------------------------------------------
+ // Returns the next available ID
+ inline UINT32 GetNextID()
+ {
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(m_lock.OwnedByCurrentThread());
+ UINT32 id = m_idProvider.GetNextID();
+ CONSISTENCY_CHECK(id != TYPE_ID_THIS_CLASS);
+ return id;
+ }
+
+ //------------------------------------------------------------------------
+ // Returns the next available FAT ID
+ inline UINT32 GetNextFatID()
+ {
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(m_lock.OwnedByCurrentThread());
+ UINT32 id = m_idProvider.GetNextFatID();
+ CONSISTENCY_CHECK(id != TYPE_ID_THIS_CLASS);
+ return id;
+ }
+#endif
+
+public:
+ // Starting values for shared and unshared domains
+ enum
+ {
+ STARTING_SHARED_DOMAIN_ID = 0x2,
+ STARTING_UNSHARED_DOMAIN_ID = 0x3,
+ };
+
+ //------------------------------------------------------------------------
+ void Init(UINT32 idStartValue, UINT32 idIncrementValue, BOOL fUseFatTokensForUniqueness);
+
+ //------------------------------------------------------------------------
+ // Ctor
+#ifndef BINDER
+ TypeIDMap()
+ : m_lock(CrstTypeIDMap, CrstFlags(CRST_REENTRANCY))
+ {
+ WRAPPER_NO_CONTRACT;
+ static_assert_no_msg(TypeIDProvider::INVALID_TYPE_ID == static_cast<UINT32>(INVALIDENTRY));
+ }
+#endif
+
+ //------------------------------------------------------------------------
+ // Dtor
+ ~TypeIDMap()
+ { WRAPPER_NO_CONTRACT; }
+
+ //------------------------------------------------------------------------
+ // Returns the ID of the type if found. If not found, returns INVALID_TYPE_ID
+ UINT32 LookupTypeID(PTR_MethodTable pMT);
+
+ //------------------------------------------------------------------------
+ // Returns the ID of the type if found. If not found, returns NULL.
+ PTR_MethodTable LookupType(UINT32 id);
+
+ //------------------------------------------------------------------------
+ // Returns the ID of the type if found. If not found, assigns the ID and
+ // returns the new ID.
+ UINT32 GetTypeID(PTR_MethodTable pMT);
+
+ //------------------------------------------------------------------------
+ inline UINT32 GetCount()
+ { LIMITED_METHOD_CONTRACT; return m_entryCount; }
+
+ //------------------------------------------------------------------------
+ void Clear()
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+ m_idMap.Clear();
+ m_mtMap.Clear();
+ m_idProvider.Init(0, 0);
+ }
+
+ //------------------------------------------------------------------------
+ class Iterator
+ {
+ HashMap::Iterator m_it;
+
+ public:
+ //--------------------------------------------------------------------
+ inline Iterator(TypeIDMap *map)
+ : m_it(map->m_mtMap.begin())
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ //--------------------------------------------------------------------
+ inline BOOL IsValid()
+ {
+ WRAPPER_NO_CONTRACT;
+ return !m_it.end();
+ }
+
+ //--------------------------------------------------------------------
+ inline BOOL Next()
+ {
+ // We want to skip the entries that are ID->Type, and just
+ // enumerate the Type->ID entries to avoid duplicates.
+ ++m_it;
+ return IsValid();
+ }
+
+ //--------------------------------------------------------------------
+ inline MethodTable *GetType()
+ {
+ WRAPPER_NO_CONTRACT;
+ return (MethodTable *) m_it.GetKey();
+ }
+
+ //--------------------------------------------------------------------
+ inline UINT32 GetID()
+ {
+ WRAPPER_NO_CONTRACT;
+ return (UINT32) m_it.GetValue();
+ }
+ };
+}; // class TypeIDMap
+
+
+// ===========================================================================
+struct DispatchMapEntry
+{
+private:
+ DispatchMapTypeID m_typeID;
+ UINT16 m_slotNumber;
+ UINT16 m_targetSlotNumber;
+
+ enum
+ {
+ e_IS_VALID = 0x1
+ };
+ UINT16 m_flags;
+
+public:
+ //------------------------------------------------------------------------
+ // Initializes this structure.
+ void InitVirtualMapping(
+ DispatchMapTypeID typeID,
+ UINT32 slotNumber,
+ UINT32 targetSlotNumber)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ m_typeID = typeID;
+ m_slotNumber = (UINT16)slotNumber;
+ m_targetSlotNumber = (UINT16)targetSlotNumber;
+
+ // Set the flags
+ m_flags = e_IS_VALID;
+ }
+
+ //------------------------------------------------------------------------
+ inline DispatchMapTypeID GetTypeID()
+ { LIMITED_METHOD_CONTRACT; return m_typeID; }
+
+ //------------------------------------------------------------------------
+ inline UINT32 GetSlotNumber()
+ { LIMITED_METHOD_CONTRACT; return (UINT32) m_slotNumber; }
+
+ //------------------------------------------------------------------------
+ inline UINT32 GetTargetSlotNumber()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ CONSISTENCY_CHECK(IsValid());
+ return (UINT32)m_targetSlotNumber;
+ }
+ inline void SetTargetSlotNumber(UINT32 targetSlotNumber)
+ {
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(IsValid());
+ m_targetSlotNumber = (UINT16)targetSlotNumber;
+ }
+
+ //------------------------------------------------------------------------
+ // Ctor - just blanks everything out - need to call Init*Mapping function.
+ inline DispatchMapEntry() : m_flags(0)
+ { LIMITED_METHOD_DAC_CONTRACT; }
+
+ inline BOOL IsValid()
+ { LIMITED_METHOD_CONTRACT; return (m_flags & e_IS_VALID); }
+}; // struct DispatchMapEntry
+
+// ===========================================================================
+// This represents an entry in the dispatch mapping. Conceptually, there is a
+// source to target mapping. There are additional housekeeping flags.
+struct DispatchMapBuilderNode
+{
+ // This represents the type and slot for this mapping
+ DispatchMapTypeID m_typeID;
+ UINT32 m_slotNumber;
+
+ // These represent the target, and type of mapping
+ MethodDesc * m_pMDTarget;
+
+ // Flags
+ UINT32 m_flags;
+
+ enum {
+ e_ENTRY_IS_METHODIMPL = 1
+ };
+
+ // Next entry in the list
+ DispatchMapBuilderNode *m_next;
+
+ //------------------------------------------------------------------------
+ void Init(
+ DispatchMapTypeID typeID,
+ UINT32 slotNumber,
+ MethodDesc * pMDTarget)
+ {
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(CheckPointer(pMDTarget, NULL_OK));
+ // Remember type and slot
+ m_typeID = typeID;
+ m_slotNumber = slotNumber;
+ // Set the target MD
+ m_pMDTarget = pMDTarget;
+ // Initialize the flags
+ m_flags = 0;
+ // Default to null link
+ m_next = NULL;
+ }
+
+ //------------------------------------------------------------------------
+ inline BOOL IsMethodImpl()
+ {
+ WRAPPER_NO_CONTRACT;
+ return (m_flags & e_ENTRY_IS_METHODIMPL);
+ }
+
+ //------------------------------------------------------------------------
+ inline void SetIsMethodImpl()
+ {
+ WRAPPER_NO_CONTRACT;
+ m_flags |= e_ENTRY_IS_METHODIMPL;
+ }
+}; // struct DispatchMapBuilderNode
+
+// ===========================================================================
+class DispatchMapBuilder
+{
+public:
+ class Iterator;
+
+ //------------------------------------------------------------------------
+ DispatchMapBuilder(StackingAllocator *allocator)
+ : m_pHead(NULL), m_cEntries(0), m_pAllocator(allocator)
+ { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(CheckPointer(m_pAllocator)); }
+
+ //------------------------------------------------------------------------
+ inline StackingAllocator *GetAllocator()
+ { LIMITED_METHOD_CONTRACT; return m_pAllocator; }
+
+ //------------------------------------------------------------------------
+ // If TRUE, it points to a matching entry.
+ // If FALSE, it is at the insertion point.
+ BOOL Find(DispatchMapTypeID typeID, UINT32 slotNumber, Iterator &it);
+
+ //------------------------------------------------------------------------
+ // If TRUE, contains such an entry.
+ // If FALSE, no such entry exists.
+ BOOL Contains(DispatchMapTypeID typeID, UINT32 slotNumber);
+
+ //------------------------------------------------------------------------
+ // This is used when building a MT, and things such as implementation
+ // table index and chain delta can't be calculated until later on. That's
+ // why we use an MD to get the information later.
+ void InsertMDMapping(
+ DispatchMapTypeID typeID,
+ UINT32 slotNumber,
+ MethodDesc * pMDTarget,
+ BOOL fIsMethodImpl);
+
+ //------------------------------------------------------------------------
+ inline UINT32 Count()
+ { LIMITED_METHOD_CONTRACT; return m_cEntries; }
+
+ //------------------------------------------------------------------------
+ class Iterator
+ {
+ friend class DispatchMapBuilder;
+
+ protected:
+ DispatchMapBuilderNode **m_cur;
+
+ //--------------------------------------------------------------------
+ inline DispatchMapBuilderNode **EntryNodePtr()
+ { LIMITED_METHOD_CONTRACT; return m_cur; }
+
+ //--------------------------------------------------------------------
+ inline DispatchMapBuilderNode *EntryNode()
+ { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(IsValid()); return *m_cur; }
+
+public:
+ //--------------------------------------------------------------------
+ // Creates an iterator that is pointing to the first entry of the map.
+ inline Iterator(DispatchMapBuilder *pMap)
+ : m_cur(&pMap->m_pHead)
+ { LIMITED_METHOD_CONTRACT; }
+
+ //--------------------------------------------------------------------
+ // Creates an iterator this is pointing to the same location as 'it'.
+ inline Iterator(Iterator &it)
+ : m_cur(it.m_cur)
+ { LIMITED_METHOD_CONTRACT; }
+
+ //--------------------------------------------------------------------
+ inline BOOL IsValid()
+ { LIMITED_METHOD_CONTRACT; return (*m_cur != NULL); }
+
+ //--------------------------------------------------------------------
+ inline BOOL Next()
+ {
+ WRAPPER_NO_CONTRACT;
+ if (!IsValid()) {
+ return FALSE;
+ }
+ m_cur = &((*m_cur)->m_next);
+ return (IsValid());
+ }
+
+ //--------------------------------------------------------------------
+ inline DispatchMapTypeID GetTypeID()
+ {
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(IsValid());
+ return EntryNode()->m_typeID;
+ }
+
+ //--------------------------------------------------------------------
+ inline UINT32 GetSlotNumber()
+ {
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(IsValid());
+ return EntryNode()->m_slotNumber;
+ }
+
+ //--------------------------------------------------------------------
+ inline MethodDesc *GetTargetMD()
+ {
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(IsValid());
+ return EntryNode()->m_pMDTarget;
+ }
+
+ //--------------------------------------------------------------------
+ UINT32 GetTargetSlot();
+
+ //--------------------------------------------------------------------
+ inline void SetTarget(MethodDesc *pMDTarget)
+ {
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(IsValid());
+ CONSISTENCY_CHECK(CheckPointer(pMDTarget));
+ EntryNode()->m_pMDTarget = pMDTarget;
+ }
+
+ //--------------------------------------------------------------------
+ inline BOOL IsMethodImpl()
+ {
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(IsValid());
+ return EntryNode()->IsMethodImpl();
+ }
+
+ //--------------------------------------------------------------------
+ inline void SetIsMethodImpl()
+ {
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(IsValid());
+ EntryNode()->SetIsMethodImpl();
+ }
+
+ inline void SkipThisTypeEntries()
+ {
+ LIMITED_METHOD_CONTRACT;
+ while (IsValid() && GetTypeID() == DispatchMapTypeID::ThisClassID())
+ {
+ Next();
+ }
+ }
+ }; // class Iterator
+
+protected:
+ DispatchMapBuilderNode * m_pHead;
+ UINT32 m_cEntries;
+ StackingAllocator * m_pAllocator;
+
+ //------------------------------------------------------------------------
+ DispatchMapBuilderNode * NewEntry();
+
+}; // class DispatchMapBuilder
+
+typedef DPTR(class DispatchMap) PTR_DispatchMap;
+// ===========================================================================
+#ifndef BINDER
+class DispatchMap
+{
+protected:
+ BYTE m_rgMap[0];
+
+ static const INT32 ENCODING_TYPE_DELTA = 1;
+ static const INT32 ENCODING_SLOT_DELTA = 1;
+ static const INT32 ENCODING_TARGET_SLOT_DELTA = 1;
+
+public:
+ //------------------------------------------------------------------------
+ // Need to make sure that you allocate GetObjectSize(pMap) bytes for any
+ // instance of DispatchMap, as this constructor assumes that m_rgMap is
+ // large enough to store cbMap bytes, which GetObjectSize ensures.
+ DispatchMap(
+ BYTE * pMap,
+ UINT32 cbMap);
+
+ //------------------------------------------------------------------------
+ static void CreateEncodedMapping(
+ MethodTable * pMT,
+ DispatchMapBuilder * pMapBuilder,
+ StackingAllocator * pAllocator,
+ BYTE ** ppbMap,
+ UINT32 * pcbMap);
+
+ //------------------------------------------------------------------------
+ static UINT32 GetObjectSize(UINT32 cbMap)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (UINT32)(sizeof(DispatchMap) + cbMap);
+ }
+
+ //------------------------------------------------------------------------
+ UINT32 GetMapSize();
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+#ifdef FEATURE_PREJIT
+ //------------------------------------------------------------------------
+ void Save(DataImage *image);
+
+ //------------------------------------------------------------------------
+ void Fixup(DataImage *image);
+#endif //FEATURE_PREJIT
+
+ //------------------------------------------------------------------------
+ class EncodedMapIterator
+ {
+ friend class DispatchMap;
+ protected:
+ DispatchMapEntry m_e;
+
+ // These fields are for decoding the implementation map
+ Decoder m_d;
+ // Keep count of the number of types in the list
+ INT32 m_numTypes;
+ INT32 m_curType;
+ DispatchMapTypeID m_curTypeId;
+ BOOL m_fCurTypeHasNegativeEntries;
+
+ // Keep count of the number of entries for the current type
+ INT32 m_numEntries;
+ INT32 m_curEntry;
+ UINT32 m_curSlot;
+
+ UINT32 m_curTargetSlot;
+
+ //--------------------------------------------------------------------
+ void Invalidate();
+
+ //--------------------------------------------------------------------
+ void Init(PTR_BYTE pbMap);
+
+public:
+ //--------------------------------------------------------------------
+ EncodedMapIterator(MethodTable *pMT);
+
+ //--------------------------------------------------------------------
+ // This should be used only when a dispatch map needs to be used
+ // separately from its MethodTable.
+ EncodedMapIterator(DispatchMap *pMap);
+
+ //--------------------------------------------------------------------
+ EncodedMapIterator(PTR_BYTE pbMap);
+
+ //--------------------------------------------------------------------
+ inline BOOL IsValid()
+ { LIMITED_METHOD_DAC_CONTRACT; return (m_curType < m_numTypes); }
+
+ //--------------------------------------------------------------------
+ BOOL Next();
+
+ //--------------------------------------------------------------------
+ inline DispatchMapEntry *Entry()
+ { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(IsValid()); return &m_e; }
+ }; // class EncodedMapIterator
+
+public:
+ //------------------------------------------------------------------------
+ class Iterator
+ {
+ protected:
+ // This is for generating entries from the encoded map
+ EncodedMapIterator m_mapIt;
+
+ public:
+ //--------------------------------------------------------------------
+ Iterator(MethodTable *pMT);
+
+ //--------------------------------------------------------------------
+ BOOL IsValid();
+
+ //--------------------------------------------------------------------
+ BOOL Next();
+
+ //--------------------------------------------------------------------
+ DispatchMapEntry *Entry();
+ }; // class Iterator
+}; // class DispatchMap
+
+#endif // BINDER
+
+#ifdef LOGGING
+struct StubDispatchStats
+{
+ // DispatchMap stats
+ UINT32 m_cDispatchMap; // Number of DispatchMaps created
+ UINT32 m_cbDispatchMap; // Total size of created maps
+ UINT32 m_cNGENDispatchMap;
+ UINT32 m_cbNGENDispatchMap;
+
+ // Some comparative stats with the old world (simulated)
+ UINT32 m_cVTables; // Number of vtables out there
+ UINT32 m_cVTableSlots; // Total number of slots.
+ UINT32 m_cVTableDuplicateSlots; // Total number of duplicated slots
+
+ UINT32 m_cCacheLookups;
+ UINT32 m_cCacheMisses;
+
+ UINT32 m_cbComInteropData;
+}; // struct StubDispatchStats
+
+extern StubDispatchStats g_sdStats;
+#endif // LOGGING
+
+#endif // !CONTRACTIMPL_H_
diff --git a/src/vm/coreassemblyspec.cpp b/src/vm/coreassemblyspec.cpp
new file mode 100644
index 0000000000..be3d2841ed
--- /dev/null
+++ b/src/vm/coreassemblyspec.cpp
@@ -0,0 +1,693 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ============================================================
+//
+// CoreAssemblySpec.cpp
+//
+
+
+//
+// CoreCLR specific implementation of AssemblySpec and BaseAssemblySpec
+// ============================================================
+
+#include "common.h"
+#include "peimage.h"
+#include "appdomain.inl"
+#include <peimage.h>
+#include "peimagelayout.inl"
+#include "domainfile.h"
+#include "holder.h"
+#include "../binder/inc/assemblybinder.hpp"
+
+#ifdef FEATURE_PREJIT
+#include "compile.h"
+#endif
+
+#ifndef FEATURE_FUSION
+
+#include "../binder/inc/textualidentityparser.hpp"
+#include "../binder/inc/assemblyidentity.hpp"
+#include "../binder/inc/assembly.hpp"
+#include "../binder/inc/assemblyname.hpp"
+#include "../binder/inc/fusionassemblyname.hpp"
+
+#include "../binder/inc/coreclrbindercommon.h"
+#include "../binder/inc/applicationcontext.hpp"
+#ifndef DACCESS_COMPILE
+
+STDAPI BinderGetImagePath(PEImage *pPEImage,
+ SString &imagePath)
+{
+ HRESULT hr = S_OK;
+
+ _ASSERTE(pPEImage != NULL);
+
+ imagePath.Set(pPEImage->GetPath());
+ return hr;
+}
+
+STDAPI BinderAddRefPEImage(PEImage *pPEImage)
+{
+ HRESULT hr = S_OK;
+
+ if (pPEImage != NULL)
+ {
+ pPEImage->AddRef();
+ }
+
+ return hr;
+}
+
+STDAPI BinderReleasePEImage(PEImage *pPEImage)
+{
+ HRESULT hr = S_OK;
+
+ if (pPEImage != NULL)
+ {
+ pPEImage->Release();
+ }
+
+ return hr;
+}
+
+STDAPI BinderGetDisplayName(PEAssembly *pAssembly,
+ SString &displayName)
+{
+ HRESULT hr = S_OK;
+
+ if (pAssembly != NULL)
+ {
+ pAssembly->GetDisplayName(displayName, ASM_DISPLAYF_FULL);
+ }
+
+ return hr;
+}
+
+
+#ifdef FEATURE_VERSIONING
+
+static VOID ThrowLoadError(AssemblySpec * pSpec, HRESULT hr)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ StackSString name;
+ pSpec->GetFileOrDisplayName(0, name);
+ EEFileLoadException::Throw(name, hr);
+}
+
+// See code:BINDER_SPACE::AssemblyBinder::GetAssembly for info on fNgenExplicitBind
+// and fExplicitBindToNativeImage, and see code:CEECompileInfo::LoadAssemblyByPath
+// for an example of how they're used.
+VOID AssemblySpec::Bind(AppDomain *pAppDomain,
+ BOOL fThrowOnFileNotFound,
+ CoreBindResult *pResult,
+ BOOL fNgenExplicitBind /* = FALSE */,
+ BOOL fExplicitBindToNativeImage /* = FALSE */,
+ StackCrawlMark *pCallerStackMark /* = NULL */)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pResult));
+ PRECONDITION(CheckPointer(pAppDomain));
+ PRECONDITION(IsMscorlib() == FALSE); // This should never be called for MSCORLIB (explicit loading)
+ }
+ CONTRACTL_END;
+
+ ReleaseHolder<BINDER_SPACE::Assembly> result;
+ HRESULT hr=S_OK;
+
+ SString assemblyDisplayName;
+
+ pResult->Reset();
+
+ if (m_wszCodeBase==NULL)
+ {
+ GetFileOrDisplayName(0, assemblyDisplayName);
+ }
+
+ // Have a default binding context setup
+ ICLRPrivBinder *pBinder = GetBindingContextFromParentAssembly(pAppDomain);
+
+ // Get the reference to the TPABinder context
+ CLRPrivBinderCoreCLR *pTPABinder = pAppDomain->GetTPABinderContext();
+
+ ReleaseHolder<ICLRPrivAssembly> pPrivAsm;
+ _ASSERTE(pBinder != NULL);
+
+ if (m_wszCodeBase==NULL && IsMscorlibSatellite())
+ {
+ StackSString sSystemDirectory(SystemDomain::System()->SystemDirectory());
+ StackSString tmpString;
+ StackSString sSimpleName;
+ StackSString sCultureName;
+
+ tmpString.SetUTF8(m_pAssemblyName);
+ tmpString.ConvertToUnicode(sSimpleName);
+
+ tmpString.Clear();
+ if ((m_context.szLocale != NULL) && (m_context.szLocale[0] != 0))
+ {
+ tmpString.SetUTF8(m_context.szLocale);
+ tmpString.ConvertToUnicode(sCultureName);
+ }
+
+ hr = CCoreCLRBinderHelper::BindToSystemSatellite(sSystemDirectory, sSimpleName, sCultureName, &pPrivAsm);
+ }
+ else if(m_wszCodeBase==NULL)
+ {
+ // For name based binding these arguments shouldnt have been changed from default
+ _ASSERTE(!fNgenExplicitBind && !fExplicitBindToNativeImage);
+ SafeComHolder<IAssemblyName> pName;
+ hr = CreateAssemblyNameObject(&pName, assemblyDisplayName, CANOF_PARSE_DISPLAY_NAME, NULL);
+ if (SUCCEEDED(hr))
+ {
+ hr = pBinder->BindAssemblyByName(pName, &pPrivAsm);
+ }
+ }
+ else
+ {
+ // BindByWhereRef is supported only for the default (TPA) Binder in CoreCLR.
+ _ASSERTE(pBinder == pTPABinder);
+ if (pBinder != pTPABinder)
+ {
+ // Fail with an exception for better diagnosis.
+ COMPlusThrowHR(COR_E_INVALIDOPERATION);
+ }
+
+ hr = pTPABinder->Bind(assemblyDisplayName,
+ m_wszCodeBase,
+ GetParentAssembly()? GetParentAssembly()->GetFile():NULL,
+ fNgenExplicitBind,
+ fExplicitBindToNativeImage,
+ &pPrivAsm);
+ }
+
+ if(SUCCEEDED(hr))
+ {
+ _ASSERTE(pPrivAsm != nullptr);
+ result = BINDER_SPACE::GetAssemblyFromPrivAssemblyFast(pPrivAsm.Extract());
+ _ASSERTE(result != nullptr);
+ }
+
+ pResult->SetHRBindResult(hr);
+ if (SUCCEEDED(hr))
+ {
+ BOOL fIsInGAC = pAppDomain->IsImageFromTrustedPath(result->GetNativeOrILPEImage());
+ BOOL fIsOnTpaList = FALSE;
+ const SString &sImagePath = result->GetNativeOrILPEImage()->GetPath();
+ if (pTPABinder->IsInTpaList(sImagePath))
+ {
+ fIsOnTpaList = TRUE;
+ }
+ pResult->Init(result,fIsInGAC, fIsOnTpaList);
+ }
+ else if (FAILED(hr) && (fThrowOnFileNotFound || (!Assembly::FileNotFound(hr))))
+ {
+ ThrowLoadError(this, hr);
+ }
+}
+
+#endif // FEATURE_VERSIONING
+
+STDAPI BinderAcquirePEImage(LPCWSTR wszAssemblyPath,
+ PEImage **ppPEImage,
+ PEImage **ppNativeImage,
+ BOOL fExplicitBindToNativeImage)
+{
+ HRESULT hr = S_OK;
+
+ _ASSERTE(ppPEImage != NULL);
+
+ EX_TRY
+ {
+ PEImageHolder pImage = NULL;
+ PEImageHolder pNativeImage = NULL;
+
+#ifdef FEATURE_PREJIT
+ // fExplicitBindToNativeImage is set on Phone when we bind to a list of native images and have no IL on device for an assembly
+ if (fExplicitBindToNativeImage)
+ {
+ pNativeImage = PEImage::OpenImage(wszAssemblyPath, MDInternalImport_TrustedNativeImage);
+
+ // Make sure that the IL image can be opened if the native image is not available.
+ hr=pNativeImage->TryOpenFile();
+ if (FAILED(hr))
+ {
+ goto Exit;
+ }
+ }
+ else
+#endif
+ {
+ pImage = PEImage::OpenImage(wszAssemblyPath, MDInternalImport_Default);
+
+ // Make sure that the IL image can be opened if the native image is not available.
+ hr=pImage->TryOpenFile();
+ if (FAILED(hr))
+ {
+ goto Exit;
+ }
+ }
+
+ if (pImage)
+ *ppPEImage = pImage.Extract();
+
+ if (ppNativeImage)
+ *ppNativeImage = pNativeImage.Extract();
+ }
+ EX_CATCH_HRESULT(hr);
+
+ Exit:
+ return hr;
+}
+
+STDAPI BinderHasNativeHeader(PEImage *pPEImage, BOOL* result)
+{
+ *result = pPEImage->HasNativeHeader();
+ return S_OK;
+}
+
+STDAPI BinderAcquireImport(PEImage *pPEImage,
+ IMDInternalImport **ppIAssemblyMetaDataImport,
+ DWORD *pdwPAFlags,
+ BOOL bNativeImage)
+{
+ HRESULT hr = S_OK;
+
+ _ASSERTE(pPEImage != NULL);
+ _ASSERTE(ppIAssemblyMetaDataImport != NULL);
+ _ASSERTE(pdwPAFlags != NULL);
+
+ EX_TRY
+ {
+ PEImageLayoutHolder pLayout(pPEImage->GetLayout(PEImageLayout::LAYOUT_ANY,PEImage::LAYOUT_CREATEIFNEEDED));
+
+ // CheckCorHeader includes check of NT headers too
+ if (!pLayout->CheckCorHeader())
+ IfFailGo(COR_E_ASSEMBLYEXPECTED);
+
+ if (!pLayout->CheckFormat())
+ IfFailGo(COR_E_BADIMAGEFORMAT);
+
+ if (bNativeImage && pPEImage->IsNativeILILOnly())
+ {
+ pPEImage->GetNativeILPEKindAndMachine(&pdwPAFlags[0], &pdwPAFlags[1]);
+ }
+ else
+ {
+ pPEImage->GetPEKindAndMachine(&pdwPAFlags[0], &pdwPAFlags[1]);
+ }
+
+ *ppIAssemblyMetaDataImport = pPEImage->GetMDImport();
+ if (!*ppIAssemblyMetaDataImport)
+ {
+ // Some native images don't contain metadata, to reduce size
+ if (!bNativeImage)
+ IfFailGo(COR_E_BADIMAGEFORMAT);
+ }
+ else
+ (*ppIAssemblyMetaDataImport)->AddRef();
+ }
+ EX_CATCH_HRESULT(hr);
+ErrExit:
+ return hr;
+}
+
+HRESULT BaseAssemblySpec::ParseName()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ GC_TRIGGERS;
+ NOTHROW;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END;
+
+ if (!m_pAssemblyName)
+ return S_OK;
+
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ NewHolder<BINDER_SPACE::AssemblyIdentityUTF8> pAssemblyIdentity;
+ AppDomain *pDomain = ::GetAppDomain();
+ _ASSERTE(pDomain);
+
+ BINDER_SPACE::ApplicationContext *pAppContext = NULL;
+ IUnknown *pIUnknownBinder = pDomain->GetFusionContext();
+
+ if (pIUnknownBinder != NULL)
+ {
+#if defined(FEATURE_HOST_ASSEMBLY_RESOLVER) && !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE) && !defined(MDILNIGEN)
+ if (pDomain->GetFusionContext() != pDomain->GetTPABinderContext())
+ {
+ pAppContext = (static_cast<CLRPrivBinderAssemblyLoadContext *>(pIUnknownBinder))->GetAppContext();
+ }
+ else
+#endif // defined(FEATURE_HOST_ASSEMBLY_RESOLVER) && !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE) && !defined(MDILNIGEN)
+ {
+ pAppContext = (static_cast<CLRPrivBinderCoreCLR *>(pIUnknownBinder))->GetAppContext();
+ }
+ }
+
+ hr = CCoreCLRBinderHelper::GetAssemblyIdentity(m_pAssemblyName, pAppContext, pAssemblyIdentity);
+
+ if (FAILED(hr))
+ {
+ m_ownedFlags |= BAD_NAME_OWNED;
+ IfFailThrow(hr);
+ }
+
+ SetName(pAssemblyIdentity->GetSimpleNameUTF8());
+
+ if (pAssemblyIdentity->Have(BINDER_SPACE::AssemblyIdentity::IDENTITY_FLAG_VERSION))
+ {
+ m_context.usMajorVersion = (USHORT)pAssemblyIdentity->m_version.GetMajor();
+ m_context.usMinorVersion = (USHORT)pAssemblyIdentity->m_version.GetMinor();
+ m_context.usBuildNumber = (USHORT)pAssemblyIdentity->m_version.GetBuild();
+ m_context.usRevisionNumber = (USHORT)pAssemblyIdentity->m_version.GetRevision();
+ }
+
+ if (pAssemblyIdentity->Have(BINDER_SPACE::AssemblyIdentity::IDENTITY_FLAG_CULTURE))
+ {
+ if (!pAssemblyIdentity->m_cultureOrLanguage.IsEmpty())
+ SetCulture(pAssemblyIdentity->GetCultureOrLanguageUTF8());
+ else
+ SetCulture("");
+ }
+
+ if (pAssemblyIdentity->
+ Have(BINDER_SPACE::AssemblyIdentity::IDENTITY_FLAG_PUBLIC_KEY_TOKEN) ||
+ pAssemblyIdentity->Have(BINDER_SPACE::AssemblyIdentity::IDENTITY_FLAG_PUBLIC_KEY))
+ {
+ m_pbPublicKeyOrToken = const_cast<BYTE *>(pAssemblyIdentity->GetPublicKeyOrTokenArray());
+ m_cbPublicKeyOrToken = pAssemblyIdentity->m_publicKeyOrTokenBLOB.GetSize();
+
+ if (pAssemblyIdentity->Have(BINDER_SPACE::AssemblyIdentity::IDENTITY_FLAG_PUBLIC_KEY))
+ {
+ m_dwFlags |= afPublicKey;
+ }
+ }
+ else if (pAssemblyIdentity->
+ Have(BINDER_SPACE::AssemblyIdentity::IDENTITY_FLAG_PUBLIC_KEY_TOKEN_NULL))
+ {
+ m_pbPublicKeyOrToken = const_cast<BYTE *>(pAssemblyIdentity->GetPublicKeyOrTokenArray());
+ m_cbPublicKeyOrToken = 0;
+ }
+ else
+ {
+ m_pbPublicKeyOrToken = NULL;
+ m_cbPublicKeyOrToken = 0;
+ }
+
+ if (pAssemblyIdentity->
+ Have(BINDER_SPACE::AssemblyIdentity::IDENTITY_FLAG_PROCESSOR_ARCHITECTURE))
+ {
+ switch (pAssemblyIdentity->m_kProcessorArchitecture)
+ {
+ case peI386:
+ m_dwFlags |= afPA_x86;
+ break;
+ case peIA64:
+ m_dwFlags |= afPA_IA64;
+ break;
+ case peAMD64:
+ m_dwFlags |= afPA_AMD64;
+ break;
+ case peARM:
+ m_dwFlags |= afPA_ARM;
+ break;
+ case peMSIL:
+ m_dwFlags |= afPA_MSIL;
+ break;
+ default:
+ IfFailThrow(FUSION_E_INVALID_NAME);
+ }
+ }
+
+
+ if (pAssemblyIdentity->
+ Have(BINDER_SPACE::AssemblyIdentity::IDENTITY_FLAG_RETARGETABLE))
+ {
+ m_dwFlags |= afRetargetable;
+ }
+
+ if (pAssemblyIdentity->
+ Have(BINDER_SPACE::AssemblyIdentity::IDENTITY_FLAG_CONTENT_TYPE))
+ {
+ DWORD dwContentType = pAssemblyIdentity->m_kContentType;
+
+ _ASSERTE((dwContentType == AssemblyContentType_Default) || (dwContentType == AssemblyContentType_WindowsRuntime));
+ if (dwContentType == AssemblyContentType_WindowsRuntime)
+ {
+ m_dwFlags |= afContentType_WindowsRuntime;
+ }
+ }
+
+ CloneFields();
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+}
+
+#endif // DACCESS_COMPILE
+
+VOID BaseAssemblySpec::GetFileOrDisplayName(DWORD flags, SString &result) const
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ INJECT_FAULT(ThrowOutOfMemory());
+ PRECONDITION(CheckValue(result));
+ PRECONDITION(result.IsEmpty());
+ }
+ CONTRACTL_END;
+
+ if (m_wszCodeBase)
+ {
+ result.Set(m_wszCodeBase);
+ return;
+ }
+
+ if (flags==0)
+ flags=ASM_DISPLAYF_FULL;
+
+ BINDER_SPACE::AssemblyIdentity assemblyIdentity;
+ SString tmpString;
+
+ tmpString.SetUTF8(m_pAssemblyName);
+
+ if ((m_ownedFlags & BAD_NAME_OWNED) != 0)
+ {
+ // Can't do anything with a broken name
+ tmpString.ConvertToUnicode(result);
+ return;
+ }
+ else
+ {
+ tmpString.ConvertToUnicode(assemblyIdentity.m_simpleName);
+ assemblyIdentity.SetHave(BINDER_SPACE::AssemblyIdentity::IDENTITY_FLAG_SIMPLE_NAME);
+ }
+
+ if( flags & ASM_DISPLAYF_VERSION && m_context.usMajorVersion != 0xFFFF)
+ {
+ assemblyIdentity.m_version.SetFeatureVersion(m_context.usMajorVersion,
+ m_context.usMinorVersion);
+ assemblyIdentity.m_version.SetServiceVersion(m_context.usBuildNumber,
+ m_context.usRevisionNumber);
+ assemblyIdentity.SetHave(BINDER_SPACE::AssemblyIdentity::IDENTITY_FLAG_VERSION);
+ }
+
+ if(flags & ASM_DISPLAYF_CULTURE)
+ {
+ assemblyIdentity.SetHave(BINDER_SPACE::AssemblyIdentity::IDENTITY_FLAG_CULTURE);
+ if ((m_context.szLocale != NULL) && (m_context.szLocale[0] != 0))
+ {
+ tmpString.SetUTF8(m_context.szLocale);
+ tmpString.ConvertToUnicode(assemblyIdentity.m_cultureOrLanguage);
+ }
+ }
+
+ if(flags & ASM_DISPLAYF_PUBLIC_KEY_TOKEN)
+ {
+ if (m_cbPublicKeyOrToken)
+ {
+ assemblyIdentity.SetHave(BINDER_SPACE::AssemblyIdentity::IDENTITY_FLAG_PUBLIC_KEY_TOKEN);
+ if(IsAfPublicKeyToken(m_dwFlags))
+ {
+ assemblyIdentity.m_publicKeyOrTokenBLOB.Set(m_pbPublicKeyOrToken,
+ m_cbPublicKeyOrToken);
+ }
+ else
+ {
+ DWORD cbToken = 0;
+ StrongNameBufferHolder<BYTE> pbToken;
+
+ // Try to get the strong name
+ if (!StrongNameTokenFromPublicKey(m_pbPublicKeyOrToken,
+ m_cbPublicKeyOrToken,
+ &pbToken,
+ &cbToken))
+ {
+ // Throw an exception with details on what went wrong
+ COMPlusThrowHR(StrongNameErrorInfo());
+ }
+
+ assemblyIdentity.m_publicKeyOrTokenBLOB.Set(pbToken, cbToken);
+ }
+ }
+ else
+ {
+ assemblyIdentity.
+ SetHave(BINDER_SPACE::AssemblyIdentity::IDENTITY_FLAG_PUBLIC_KEY_TOKEN_NULL);
+ }
+ }
+
+ if ((flags & ASM_DISPLAYF_PROCESSORARCHITECTURE) && (m_dwFlags & afPA_Mask))
+ {
+ assemblyIdentity.
+ SetHave(BINDER_SPACE::AssemblyIdentity::IDENTITY_FLAG_PROCESSOR_ARCHITECTURE);
+
+ if (m_dwFlags & afPA_MSIL)
+ assemblyIdentity.m_kProcessorArchitecture = peMSIL;
+ else if (m_dwFlags & afPA_x86)
+ assemblyIdentity.m_kProcessorArchitecture = peI386;
+ else if (m_dwFlags & afPA_IA64)
+ assemblyIdentity.m_kProcessorArchitecture = peIA64;
+ else if (m_dwFlags & afPA_AMD64)
+ assemblyIdentity.m_kProcessorArchitecture = peAMD64;
+ else if (m_dwFlags & afPA_ARM)
+ assemblyIdentity.m_kProcessorArchitecture = peARM;
+ }
+
+ if ((flags & ASM_DISPLAYF_RETARGET) && (m_dwFlags & afRetargetable))
+ {
+ assemblyIdentity.SetHave(BINDER_SPACE::AssemblyIdentity::IDENTITY_FLAG_RETARGETABLE);
+ }
+
+ if ((flags & ASM_DISPLAYF_CONTENT_TYPE) && (m_dwFlags & afContentType_Mask) == afContentType_WindowsRuntime)
+ {
+ assemblyIdentity.SetHave(BINDER_SPACE::AssemblyIdentity::IDENTITY_FLAG_CONTENT_TYPE);
+ assemblyIdentity.m_kContentType = AssemblyContentType_WindowsRuntime;
+ }
+
+ IfFailThrow(BINDER_SPACE::TextualIdentityParser::ToString(&assemblyIdentity,
+ assemblyIdentity.m_dwIdentityFlags,
+ result));
+}
+
+#ifndef FEATURE_CORECLR
+
+//
+// Trivial assembly binder for desktop crossgen
+//
+
+VOID AssemblySpec::Bind(AppDomain *pAppDomain,
+ BOOL fThrowOnFileNotFound,
+ CoreBindResult *pResult,
+ BOOL fNgenExplicitBind /* = FALSE */,
+ BOOL fExplicitBindToNativeImage /* = FALSE */,
+ StackCrawlMark *pCallerStackMark /* = NULL */)
+{
+ PEImageHolder pImage;
+ BOOL fNativeImage = FALSE;
+
+ if (GetCodeBase() != NULL)
+ {
+ // Normalize the path to maintain identity
+ SString sFullAssemblyPath;
+ Clr::Util::Win32::GetFullPathName(GetCodeBase(), sFullAssemblyPath, NULL);
+
+ pImage = PEImage::OpenImage(sFullAssemblyPath, MDInternalImport_Default);
+ }
+ else
+ {
+ SString sSimpleName(SString::Utf8, m_pAssemblyName);
+
+ fNativeImage = pAppDomain->ToCompilationDomain()->IsInHardBindList(sSimpleName);
+
+ SString sFileName(sSimpleName, fNativeImage ? W(".ni.dll") : W(".dll"));
+
+ if (!CompilationDomain::FindImage(sFileName,
+ fNativeImage ? MDInternalImport_TrustedNativeImage : MDInternalImport_Default,
+ &pImage))
+ {
+ sFileName.Set(sSimpleName, fNativeImage ? W(".ni.exe") : W(".exe"));
+
+ if (!CompilationDomain::FindImage(sFileName,
+ fNativeImage ? MDInternalImport_TrustedNativeImage : MDInternalImport_Default,
+ &pImage))
+ {
+ EEFileLoadException::Throw(sSimpleName, COR_E_FILENOTFOUND);
+ }
+ }
+ }
+
+ GetSvcLogger()->Printf(W("Loading %s\n"), pImage->GetPath().GetUnicode());
+
+ NewHolder<BINDER_SPACE::Assembly> pAssembly;
+ pAssembly = new BINDER_SPACE::Assembly();
+
+ pAssembly->m_assemblyPath.Set(pImage->GetPath());
+
+ if (fNativeImage)
+ pAssembly->SetNativePEImage(pImage.Extract());
+ else
+ pAssembly->SetPEImage(pImage.Extract());
+
+ pResult->Init(pAssembly.Extract(), TRUE, TRUE);
+}
+
+VOID AssemblySpec::BindToSystem(BINDER_SPACE::Assembly** ppAssembly)
+{
+ PEImageHolder pImage;
+ BOOL fNativeImage = FALSE;
+
+ _ASSERTE(ppAssembly != nullptr);
+
+ if (g_fAllowNativeImages)
+ {
+ if (CompilationDomain::FindImage(W("mscorlib.ni.dll"), MDInternalImport_TrustedNativeImage, &pImage))
+ fNativeImage = TRUE;
+ }
+
+ if (!fNativeImage)
+ {
+ if (!CompilationDomain::FindImage(W("mscorlib.dll"), MDInternalImport_Default, &pImage))
+ {
+ EEFileLoadException::Throw(W("mscorlib.dll"), COR_E_FILENOTFOUND);
+ }
+ }
+
+ GetSvcLogger()->Printf(W("Loading %s\n"), pImage->GetPath().GetUnicode());
+
+ NewHolder<BINDER_SPACE::Assembly> pAssembly;
+ pAssembly = new BINDER_SPACE::Assembly();
+
+ pAssembly->m_assemblyPath.Set(pImage->GetPath());
+
+ if (fNativeImage)
+ pAssembly->SetNativePEImage(pImage.Extract());
+ else
+ pAssembly->SetPEImage(pImage.Extract());
+
+ *ppAssembly = pAssembly.Extract();
+}
+
+#endif // !FEATURE_CORECLR
+
+#endif // FEATURE_FUSION
diff --git a/src/vm/corebindresult.cpp b/src/vm/corebindresult.cpp
new file mode 100644
index 0000000000..298bd48643
--- /dev/null
+++ b/src/vm/corebindresult.cpp
@@ -0,0 +1,71 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ============================================================
+//
+// CoreBindResult.cpp
+//
+
+//
+// Implements the CoreBindResult class
+// ============================================================
+
+
+#include "common.h"
+
+#ifdef CLR_STANDALONE_BINDER
+#include "coreclr\corebindresult.h"
+#endif // CLR_STANDALONE_BINDER
+
+#include "../binder/inc/assembly.hpp"
+
+#ifndef FEATURE_FUSION
+#ifndef DACCESS_COMPILE
+
+STDMETHODIMP CoreBindResult::QueryInterface(REFIID riid,
+ void **ppv)
+{
+ HRESULT hr = S_OK;
+
+ if (ppv == NULL)
+ {
+ hr = E_POINTER;
+ }
+ else
+ {
+ if ( IsEqualIID(riid, IID_IUnknown) )
+ {
+ AddRef();
+ *ppv = static_cast<IUnknown *>(this);
+ }
+ else
+ {
+ *ppv = NULL;
+ hr = E_NOINTERFACE;
+ }
+ }
+
+ return hr;
+}
+
+STDMETHODIMP_(ULONG) CoreBindResult::AddRef()
+{
+ return InterlockedIncrement(&m_cRef);
+}
+
+STDMETHODIMP_(ULONG) CoreBindResult::Release()
+{
+ ULONG ulRef = InterlockedDecrement(&m_cRef);
+
+ if (ulRef == 0)
+ {
+ delete this;
+ }
+
+ return ulRef;
+}
+
+
+#endif // DACCES_COMPILE
+#endif // FEATURE_FUSION
diff --git a/src/vm/coreclr/.gitmirror b/src/vm/coreclr/.gitmirror
new file mode 100644
index 0000000000..f507630f94
--- /dev/null
+++ b/src/vm/coreclr/.gitmirror
@@ -0,0 +1 @@
+Only contents of this folder, excluding subfolders, will be mirrored by the Git-TFS Mirror. \ No newline at end of file
diff --git a/src/vm/coreclr/corebindresult.h b/src/vm/coreclr/corebindresult.h
new file mode 100644
index 0000000000..aae7752c87
--- /dev/null
+++ b/src/vm/coreclr/corebindresult.h
@@ -0,0 +1,62 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ============================================================
+//
+// CoreBindResult.h
+//
+
+//
+// Declares the CoreBindResult class
+// BindResult represent a result of an assembly bind and might encapsulate PEImage, IAssembly, IHostAssebmly etc.
+// This is CoreCLR implementation of it
+// ============================================================
+
+#ifndef __CORE_BIND_RESULT_H__
+#define __CORE_BIND_RESULT_H__
+
+#include "../../binder/inc/assembly.hpp"
+
+struct CoreBindResult : public IUnknown
+{
+protected:
+ ReleaseHolder<ICLRPrivAssembly> m_pAssembly;
+ BOOL m_bIsFromGAC;
+ BOOL m_bIsOnTpaList;
+ HRESULT m_hrBindResult;
+ LONG m_cRef;
+
+public:
+
+ // IUnknown methods
+ STDMETHOD(QueryInterface)(REFIID riid,
+ void ** ppv);
+ STDMETHOD_(ULONG, AddRef)();
+ STDMETHOD_(ULONG, Release)();
+
+ // CoreBindResult methods
+ CoreBindResult() : m_cRef(1) {}
+
+ void Init(ICLRPrivAssembly* pAssembly, BOOL bFromGAC, BOOL bIsOnTpaList);
+ void Reset();
+
+ BOOL Found();
+ PEImage* GetPEImage();
+ BOOL IsFromGAC();
+ BOOL IsOnTpaList();
+ BOOL IsMscorlib();
+ void GetBindAssembly(ICLRPrivAssembly** ppAssembly);
+#ifdef FEATURE_PREJIT
+ BOOL HasNativeImage();
+ PEImage* GetNativeImage();
+ void SetNativeImage(PEImage * pNativeImage);
+ PEImage* GetILImage();
+#endif
+ void SetHRBindResult(HRESULT hrBindResult);
+ HRESULT GetHRBindResult();
+};
+
+
+#endif // __CORE_BIND_RESULT_H__
+
diff --git a/src/vm/coreclr/corebindresult.inl b/src/vm/coreclr/corebindresult.inl
new file mode 100644
index 0000000000..9b6cc469de
--- /dev/null
+++ b/src/vm/coreclr/corebindresult.inl
@@ -0,0 +1,130 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ============================================================
+//
+// CoreBindResult.inl
+//
+
+//
+// Implements the CoreBindResult class
+// ============================================================
+
+#ifndef __CORE_BIND_RESULT_INL__
+#define __CORE_BIND_RESULT_INL__
+
+#include "clrprivbinderutil.h"
+
+inline BOOL CoreBindResult::IsFromGAC()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_bIsFromGAC;
+};
+
+inline BOOL CoreBindResult::IsOnTpaList()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_bIsOnTpaList;
+};
+
+inline BOOL CoreBindResult::Found()
+{
+ LIMITED_METHOD_CONTRACT;
+ return (m_pAssembly!=NULL);
+};
+
+inline BOOL CoreBindResult::IsMscorlib()
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ PRECONDITION(Found());
+ }
+ CONTRACTL_END;
+
+ BINDER_SPACE::Assembly* pAssembly = BINDER_SPACE::GetAssemblyFromPrivAssemblyFast(m_pAssembly);
+#ifndef CROSSGEN_COMPILE
+ return pAssembly->GetAssemblyName()->IsMscorlib();
+#else
+ return (pAssembly->GetPath()).EndsWithCaseInsensitive(SString(L"mscorlib.dll"), PEImage::GetFileSystemLocale());
+#endif
+}
+
+inline void CoreBindResult::GetBindAssembly(ICLRPrivAssembly** ppAssembly)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ PRECONDITION(Found());
+ }
+ CONTRACTL_END;
+
+ m_pAssembly->AddRef();
+ *ppAssembly = m_pAssembly;
+}
+
+
+inline PEImage* CoreBindResult::GetPEImage()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pAssembly?BINDER_SPACE::GetAssemblyFromPrivAssemblyFast(m_pAssembly)->GetNativeOrILPEImage():NULL;
+};
+
+inline void CoreBindResult::Init(ICLRPrivAssembly* pAssembly, BOOL bFromGAC, BOOL bOnTpaList = FALSE)
+{
+ WRAPPER_NO_CONTRACT;
+ m_pAssembly=pAssembly;
+ if(pAssembly)
+ pAssembly->AddRef();
+ m_bIsFromGAC=bFromGAC;
+ m_bIsOnTpaList = bOnTpaList;
+ m_hrBindResult = S_OK;
+}
+
+inline void CoreBindResult::Reset()
+{
+ WRAPPER_NO_CONTRACT;
+ m_pAssembly=NULL;
+ m_bIsFromGAC=FALSE;
+ m_bIsOnTpaList=FALSE;
+ m_hrBindResult = S_OK;
+}
+#ifdef FEATURE_PREJIT
+inline BOOL CoreBindResult::HasNativeImage()
+{
+ LIMITED_METHOD_CONTRACT;
+ BINDER_SPACE::Assembly* pAssembly = BINDER_SPACE::GetAssemblyFromPrivAssemblyFast(m_pAssembly);
+ return pAssembly->GetNativePEImage() != NULL;
+}
+inline PEImage* CoreBindResult::GetNativeImage()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(HasNativeImage());
+ BINDER_SPACE::Assembly* pAssembly = BINDER_SPACE::GetAssemblyFromPrivAssemblyFast(m_pAssembly);
+ return pAssembly->GetNativePEImage();
+}
+
+inline PEImage* CoreBindResult::GetILImage()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pAssembly?BINDER_SPACE::GetAssemblyFromPrivAssemblyFast(m_pAssembly)->GetPEImage():NULL;
+};
+#endif
+
+inline void CoreBindResult::SetHRBindResult(HRESULT hrBindResult)
+{
+ WRAPPER_NO_CONTRACT;
+ m_hrBindResult = hrBindResult;
+}
+
+inline HRESULT CoreBindResult::GetHRBindResult()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_hrBindResult;
+}
+
+#endif // __CORE_BIND_RESULT_INL__
+
diff --git a/src/vm/corhost.cpp b/src/vm/corhost.cpp
new file mode 100644
index 0000000000..37488f09e7
--- /dev/null
+++ b/src/vm/corhost.cpp
@@ -0,0 +1,8936 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// CorHost.cpp
+//
+// Implementation for the meta data dispenser code.
+//
+
+//*****************************************************************************
+
+#include "common.h"
+
+#include "mscoree.h"
+#include "corhost.h"
+#include "excep.h"
+#include "threads.h"
+#include "jitinterface.h"
+#include "eeconfig.h"
+#include "dbginterface.h"
+#include "ceemain.h"
+#include "rwlock.h"
+#include "hosting.h"
+#include "eepolicy.h"
+#include "clrex.h"
+#ifdef FEATURE_IPCMAN
+#include "ipcmanagerinterface.h"
+#endif // FEATURE_IPCMAN
+#include "comcallablewrapper.h"
+#include "hostexecutioncontext.h"
+#include "invokeutil.h"
+#include "appdomain.inl"
+#include "vars.hpp"
+#include "comdelegate.h"
+#include "dllimportcallback.h"
+#include "eventtrace.h"
+
+#include "win32threadpool.h"
+#include "eventtrace.h"
+#include "finalizerthread.h"
+#include "threadsuspend.h"
+
+#ifndef FEATURE_PAL
+#include "dwreport.h"
+#endif // !FEATURE_PAL
+
+#include "stringarraylist.h"
+
+#ifdef FEATURE_COMINTEROP
+#include "winrttypenameconverter.h"
+#endif
+
+#if defined(FEATURE_HOSTED_BINDER) && defined(FEATURE_APPX_BINDER)
+#include "clrprivbinderappx.h"
+#include "clrprivtypecachewinrt.h"
+#endif
+
+#ifdef FEATURE_WINDOWSPHONE
+#include "thetestkey.h"
+#endif
+
+GVAL_IMPL_INIT(DWORD, g_fHostConfig, 0);
+
+#ifdef FEATURE_IMPLICIT_TLS
+#ifndef __llvm__
+EXTERN_C __declspec(thread) ThreadLocalInfo gCurrentThreadInfo;
+#else // !__llvm__
+EXTERN_C __thread ThreadLocalInfo gCurrentThreadInfo;
+#endif // !__llvm__
+#ifndef FEATURE_PAL
+EXTERN_C UINT32 _tls_index;
+#else // FEATURE_PAL
+UINT32 _tls_index = 0;
+#endif // FEATURE_PAL
+SVAL_IMPL_INIT(DWORD, CExecutionEngine, TlsIndex, _tls_index);
+#else
+SVAL_IMPL_INIT(DWORD, CExecutionEngine, TlsIndex, TLS_OUT_OF_INDEXES);
+#endif
+
+
+#if defined(FEATURE_INCLUDE_ALL_INTERFACES) || defined(FEATURE_WINDOWSPHONE)
+SVAL_IMPL_INIT(ECustomDumpFlavor, CCLRErrorReportingManager, g_ECustomDumpFlavor, DUMP_FLAVOR_Default);
+#endif
+
+#ifndef DACCESS_COMPILE
+
+extern void STDMETHODCALLTYPE EEShutDown(BOOL fIsDllUnloading);
+extern HRESULT STDMETHODCALLTYPE CoInitializeEE(DWORD fFlags);
+extern void PrintToStdOutA(const char *pszString);
+extern void PrintToStdOutW(const WCHAR *pwzString);
+extern BOOL g_fEEHostedStartup;
+
+INT64 g_PauseTime; // Total time in millisecond the CLR has been paused
+Volatile<BOOL> g_IsPaused; // True if the runtime is paused (FAS)
+CLREventStatic g_ClrResumeEvent; // Event that is fired at FAS Resuming
+#ifndef FEATURE_CORECLR
+CLREventStatic g_PauseCompletedEvent; // Set when Pause has completed its work on another thread.
+#endif
+
+#if defined(FEATURE_CORECLR)
+extern BYTE g_rbTestKeyBuffer[];
+#endif
+
+#if !defined(FEATURE_CORECLR)
+//******************************************************************************
+// <TODO>TODO: ICorThreadpool: Move this into a separate file CorThreadpool.cpp
+// after the move to VBL </TODO>
+//******************************************************************************
+
+HRESULT STDMETHODCALLTYPE CorThreadpool::CorRegisterWaitForSingleObject(PHANDLE phNewWaitObject,
+ HANDLE hWaitObject,
+ WAITORTIMERCALLBACK Callback,
+ PVOID Context,
+ ULONG timeout,
+ BOOL executeOnlyOnce,
+ BOOL* pResult)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_ANY;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_UNEXPECTED;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ ULONG flag = executeOnlyOnce ? WAIT_SINGLE_EXECUTION : 0;
+ *pResult = FALSE;
+ EX_TRY
+ {
+ *pResult = ThreadpoolMgr::RegisterWaitForSingleObject(phNewWaitObject,
+ hWaitObject,
+ Callback,
+ Context,
+ timeout,
+ flag);
+
+ hr = (*pResult ? S_OK : HRESULT_FROM_GetLastError());
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+
+HRESULT STDMETHODCALLTYPE CorThreadpool::CorBindIoCompletionCallback(HANDLE fileHandle,
+ LPOVERLAPPED_COMPLETION_ROUTINE callback)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_ANY;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_UNEXPECTED;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ BOOL ret = FALSE;
+ DWORD errCode = 0;
+
+ EX_TRY
+ {
+ ret = ThreadpoolMgr::BindIoCompletionCallback(fileHandle,callback,0, errCode);
+ hr = (ret ? S_OK : HRESULT_FROM_WIN32(errCode));
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+
+HRESULT STDMETHODCALLTYPE CorThreadpool::CorUnregisterWait(HANDLE hWaitObject,
+ HANDLE CompletionEvent,
+ BOOL* pResult)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_ANY;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_UNEXPECTED;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ *pResult = FALSE;
+ EX_TRY
+ {
+
+ *pResult = ThreadpoolMgr::UnregisterWaitEx(hWaitObject,CompletionEvent);
+ hr = (*pResult ? S_OK : HRESULT_FROM_GetLastError());
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+
+}
+
+HRESULT STDMETHODCALLTYPE CorThreadpool::CorQueueUserWorkItem(LPTHREAD_START_ROUTINE Function,
+ PVOID Context,BOOL executeOnlyOnce,
+ BOOL* pResult )
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_ANY;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_UNEXPECTED;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ *pResult = FALSE;
+ EX_TRY
+ {
+ *pResult = ThreadpoolMgr::QueueUserWorkItem(Function,Context,QUEUE_ONLY);
+ hr = (*pResult ? S_OK : HRESULT_FROM_GetLastError());
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+}
+
+HRESULT STDMETHODCALLTYPE CorThreadpool::CorCallOrQueueUserWorkItem(LPTHREAD_START_ROUTINE Function,
+ PVOID Context,
+ BOOL* pResult )
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_ANY;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_UNEXPECTED;
+ BEGIN_ENTRYPOINT_NOTHROW;
+ *pResult = FALSE;
+ EX_TRY
+ {
+ *pResult = ThreadpoolMgr::QueueUserWorkItem(Function,Context,CALL_OR_QUEUE);
+ hr = (*pResult ? S_OK : HRESULT_FROM_GetLastError());
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+}
+
+
+HRESULT STDMETHODCALLTYPE CorThreadpool::CorCreateTimer(PHANDLE phNewTimer,
+ WAITORTIMERCALLBACK Callback,
+ PVOID Parameter,
+ DWORD DueTime,
+ DWORD Period,
+ BOOL* pResult)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_ANY;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_UNEXPECTED;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ *pResult = FALSE;
+ EX_TRY
+ {
+ *pResult = ThreadpoolMgr::CreateTimerQueueTimer(phNewTimer,Callback,Parameter,DueTime,Period,0);
+ hr = (*pResult ? S_OK : HRESULT_FROM_GetLastError());
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+}
+
+
+HRESULT STDMETHODCALLTYPE CorThreadpool::CorDeleteTimer(HANDLE Timer, HANDLE CompletionEvent, BOOL* pResult)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_ANY;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_UNEXPECTED;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ *pResult = FALSE;
+ EX_TRY
+ {
+ *pResult = ThreadpoolMgr::DeleteTimerQueueTimer(Timer,CompletionEvent);
+ hr = (*pResult ? S_OK : HRESULT_FROM_GetLastError());
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+}
+
+HRESULT STDMETHODCALLTYPE CorThreadpool::CorChangeTimer(HANDLE Timer,
+ ULONG DueTime,
+ ULONG Period,
+ BOOL* pResult)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_ANY;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_UNEXPECTED;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ *pResult = FALSE;
+ EX_TRY
+ {
+ //CONTRACT_VIOLATION(ThrowsViolation);
+ *pResult = ThreadpoolMgr::ChangeTimerQueueTimer(Timer,DueTime,Period);
+ hr = (*pResult ? S_OK : HRESULT_FROM_GetLastError());
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+}
+
+
+HRESULT STDMETHODCALLTYPE CorThreadpool::CorSetMaxThreads(DWORD MaxWorkerThreads,
+ DWORD MaxIOCompletionThreads)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_ANY;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_UNEXPECTED;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ BOOL result = FALSE;
+ EX_TRY
+ {
+ result = ThreadpoolMgr::SetMaxThreads(MaxWorkerThreads, MaxIOCompletionThreads);
+ hr = (result ? S_OK : E_FAIL);
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+}
+
+HRESULT STDMETHODCALLTYPE CorThreadpool::CorGetMaxThreads(DWORD *MaxWorkerThreads,
+ DWORD *MaxIOCompletionThreads)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_ANY;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_UNEXPECTED;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ BOOL result = FALSE;
+ EX_TRY
+ {
+ result = ThreadpoolMgr::GetMaxThreads(MaxWorkerThreads, MaxIOCompletionThreads);
+ hr = (result ? S_OK : E_FAIL);
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+}
+
+HRESULT STDMETHODCALLTYPE CorThreadpool::CorGetAvailableThreads(DWORD *AvailableWorkerThreads,
+ DWORD *AvailableIOCompletionThreads)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_ANY;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_UNEXPECTED;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ BOOL result = FALSE;
+ EX_TRY
+ {
+ result = ThreadpoolMgr::GetAvailableThreads(AvailableWorkerThreads, AvailableIOCompletionThreads);
+ hr = (result ? S_OK : E_FAIL);
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+#endif // !defined(FEATURE_CORECLR)
+//***************************************************************************
+
+ULONG CorRuntimeHostBase::m_Version = 0;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+static CCLRDebugManager s_CLRDebugManager;
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+#if defined(FEATURE_INCLUDE_ALL_INTERFACES) || defined(FEATURE_WINDOWSPHONE)
+CCLRErrorReportingManager g_CLRErrorReportingManager;
+#endif // defined(FEATURE_INCLUDE_ALL_INTERFACES) || defined(FEATURE_WINDOWSPHONE)
+
+#ifdef FEATURE_IPCMAN
+static CCLRSecurityAttributeManager s_CLRSecurityAttributeManager;
+#endif // FEATURE_IPCMAN
+
+#endif // !DAC
+
+typedef DPTR(CONNID) PTR_CONNID;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+// Hash table to keep track <connection, name> for SQL fiber support
+class ConnectionNameTable : CHashTableAndData<CNewDataNoThrow>
+{
+ friend class CCLRDebugManager;
+public:
+
+ // Key to match is connection ID.
+ // Returns true if the given HASHENTRY has the same key as the requested key.
+ BOOL Cmp(SIZE_T requestedKey, const HASHENTRY * pEntry)
+ {
+ SUPPORTS_DAC;
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ CONNID keyRequested = (CONNID)requestedKey;
+ CONNID keySearch = dac_cast<PTR_ConnectionNameHashEntry>(pEntry)->m_dwConnectionId;
+ return keyRequested != keySearch;
+ }
+
+ // Hash function
+ ULONG Hash(CONNID dwConnectionId)
+ {
+ SUPPORTS_DAC;
+ LIMITED_METHOD_CONTRACT;
+
+ return (ULONG)(dwConnectionId);
+ }
+
+#ifndef DACCESS_COMPILE
+ // constructor
+ ConnectionNameTable(
+ ULONG iBuckets) : // # of chains we are hashing into.
+ CHashTableAndData<CNewDataNoThrow>(iBuckets)
+ {LIMITED_METHOD_CONTRACT;}
+
+ // destructor
+ ~ConnectionNameTable()
+ {
+ CONTRACTL
+ {
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ NOTHROW;
+ }
+ CONTRACTL_END;
+ HASHFIND hashFind;
+ ConnectionNameHashEntry *pNameEntry;
+
+ pNameEntry = (ConnectionNameHashEntry *)FindFirstEntry(&hashFind);
+ while (pNameEntry != NULL)
+ {
+ if (pNameEntry->m_pwzName)
+ {
+ delete pNameEntry->m_pwzName;
+ pNameEntry->m_pwzName = NULL;
+ }
+
+ if (pNameEntry->m_CLRTaskCount != 0)
+ {
+ _ASSERTE(pNameEntry->m_ppCLRTaskArray != NULL);
+ for (UINT i = 0; i < pNameEntry->m_CLRTaskCount; i++)
+ {
+ pNameEntry->m_ppCLRTaskArray[i]->Release();
+ }
+ delete [] pNameEntry->m_ppCLRTaskArray;
+ pNameEntry->m_ppCLRTaskArray = NULL;
+ pNameEntry->m_CLRTaskCount = 0;
+ }
+ pNameEntry = (ConnectionNameHashEntry *)FindNextEntry(&hashFind);
+ }
+ }
+
+ // Add a new connection into hash table.
+ // This function does not throw but return NULL when memory allocation fails.
+ ConnectionNameHashEntry *AddConnection(
+ CONNID dwConnectionId,
+ __in_z WCHAR *pwzName) // We should review this in the future. This API is
+ // public and callable by a host. This SAL annotation
+ // is the best we can do now.
+ {
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ ULONG iHash = Hash(dwConnectionId);
+
+ size_t len = wcslen(pwzName) + 1;
+ WCHAR *pConnName = new (nothrow) WCHAR[len];
+ if (pConnName == NULL)
+ return NULL;
+
+ ConnectionNameHashEntry *pRecord = (ConnectionNameHashEntry *)Add(iHash);
+ if (pRecord)
+ {
+ pRecord->m_dwConnectionId = dwConnectionId;
+ pRecord->m_pwzName = pConnName;
+ wcsncpy_s(pRecord->m_pwzName, len, pwzName, len);
+ pRecord->m_CLRTaskCount = 0;
+ pRecord->m_ppCLRTaskArray = NULL;
+ }
+ else
+ {
+ if (pConnName)
+ delete [] pConnName;
+ }
+
+ return pRecord;
+ }
+
+ // Delete a hash entry given a connection id
+ void DeleteConnection(CONNID dwConnectionId)
+ {
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ ULONG iHash;
+ iHash = Hash(dwConnectionId);
+ ConnectionNameHashEntry * pRecord =
+ reinterpret_cast<ConnectionNameHashEntry *>(Find(iHash, (SIZE_T)dwConnectionId));
+ if (pRecord == NULL)
+ {
+ return;
+ }
+
+ _ASSERTE(pRecord->m_CLRTaskCount == 0 && pRecord->m_ppCLRTaskArray == NULL);
+ if (pRecord->m_pwzName)
+ {
+ delete pRecord->m_pwzName;
+ pRecord->m_pwzName = NULL;
+ }
+ Delete(iHash, (HASHENTRY *)pRecord);
+ }
+
+ // return NULL if the given connection id cannot be found.
+ ConnectionNameHashEntry *FindConnection(CONNID dwConnectionId)
+ {
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ ULONG iHash;
+ iHash = Hash(dwConnectionId);
+ return reinterpret_cast<ConnectionNameHashEntry *>(Find(iHash, (SIZE_T)dwConnectionId));
+ }
+#endif // !DAC
+};
+#endif //FEATURE_INCLUDE_ALL_INTERFACES
+
+
+// Keep track connection id and name
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+SPTR_IMPL(ConnectionNameTable, CCLRDebugManager, m_pConnectionNameHash);
+CrstStatic CCLRDebugManager::m_lockConnectionNameTable;
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+#ifndef DACCESS_COMPILE
+
+
+#if !defined(FEATURE_CORECLR) // simple hosting
+//*****************************************************************************
+// ICorRuntimeHost
+//*****************************************************************************
+extern BOOL g_singleVersionHosting;
+
+// *** ICorRuntimeHost methods ***
+// Returns an object for configuring the runtime prior to
+// it starting. If the runtime has been initialized this
+// routine returns an error. See ICorConfiguration.
+HRESULT CorHost::GetConfiguration(ICorConfiguration** pConfiguration)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+ HRESULT hr=E_FAIL;
+ BEGIN_ENTRYPOINT_NOTHROW;
+ if (CorHost::GetHostVersion() != 1)
+ {
+ hr=HOST_E_INVALIDOPERATION;
+ }
+ else
+ if (!pConfiguration)
+ hr= E_POINTER;
+ else
+ if (!m_Started)
+ {
+ *pConfiguration = (ICorConfiguration *) this;
+ AddRef();
+ hr=S_OK;
+ }
+ END_ENTRYPOINT_NOTHROW;
+ // Cannot obtain configuration after the runtime is started
+ return hr;
+}
+
+STDMETHODIMP CorHost::Start(void)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ BEGIN_ENTRYPOINT_NOTHROW;
+ hr = CorRuntimeHostBase::Start();
+
+ END_ENTRYPOINT_NOTHROW;
+
+ if (hr == S_FALSE)
+ {
+ // This is to keep v1 behavior.
+ hr = S_OK;
+ }
+ return(hr);
+}
+#endif // !defined(FEATURE_CORECLR)
+
+
+// *** ICorRuntimeHost methods ***
+#ifndef FEATURE_CORECLR
+// Returns an object for configuring the runtime prior to
+// it starting. If the runtime has been initialized this
+// routine returns an error. See ICorConfiguration.
+HRESULT CorHost2::GetConfiguration(ICorConfiguration** pConfiguration)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ if (!pConfiguration)
+ return E_POINTER;
+ HRESULT hr=E_FAIL;
+ BEGIN_ENTRYPOINT_NOTHROW;
+ if (!m_Started)
+ {
+ *pConfiguration = (ICorConfiguration *) this;
+ AddRef();
+ hr=S_OK;
+ }
+ END_ENTRYPOINT_NOTHROW;
+ // Cannot obtain configuration after the runtime is started
+ return hr;
+}
+#endif // FEATURE_CORECLR
+
+extern BOOL g_fWeOwnProcess;
+
+CorHost2::CorHost2()
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef FEATURE_CORECLR
+ m_fStarted = FALSE;
+ m_fFirstToLoadCLR = FALSE;
+ m_fAppDomainCreated = FALSE;
+
+ // By default, the host is assumed to be unauthenticated and is expected to invoke
+ // ICLRRuntimeHost2::Authenticate to authenticate before invoking
+ // ICLRRuntimeHost2::Start.
+ m_fIsHostAuthenticated = FALSE;
+#endif // FEATURE_CORECLR
+}
+
+static DangerousNonHostedSpinLock lockOnlyOneToInvokeStart;
+
+STDMETHODIMP CorHost2::Start()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ ENTRY_POINT;
+ }CONTRACTL_END;
+
+
+ HRESULT hr;
+
+#ifdef FEATURE_CORECLR
+ // Is the host authenticated?
+ if (FALSE == m_fIsHostAuthenticated)
+ {
+ // Attempting to start the runtime without authentication is an invalid operation.
+ return HOST_E_INVALIDOPERATION;
+ }
+#endif // FEATURE_CORECLR
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+#ifdef FEATURE_CORECLR
+ // Ensure that only one thread at a time gets in here
+ DangerousNonHostedSpinLockHolder lockHolder(&lockOnlyOneToInvokeStart);
+
+ // To provide the complete semantic of Start/Stop in context of a given host, we check m_fStarted and let
+ // them invoke the Start only if they have not already. Likewise, they can invoke the Stop method
+ // only if they have invoked Start prior to that.
+ //
+ // This prevents a host from invoking Stop twice and hitting the refCount to zero, when another
+ // host is using the CLR, as CLR instance sharing across hosts is a scenario for CoreCLR.
+
+ if (g_fEEStarted)
+ {
+ hr = S_OK;
+ // CoreCLR is already running - but was Start already invoked by this host?
+ if (m_fStarted)
+ {
+ // This host had already invoked the Start method - return them an error
+ hr = HOST_E_INVALIDOPERATION;
+ }
+ else
+ {
+ // Increment the global (and dynamic) refCount...
+ FastInterlockIncrement(&m_RefCount);
+
+ // And set our flag that this host has invoked the Start...
+ m_fStarted = TRUE;
+ }
+ }
+ else
+#endif // FEATURE_CORECLR
+ {
+ // Using managed C++ libraries, its possible that when the runtime is already running,
+ // MC++ will use CorBindToRuntimeEx to make callbacks into specific appdomain of its
+ // choice. Now, CorBindToRuntimeEx results in CorHost2::CreateObject being invoked
+ // that will set runtime hosted flag "g_fHostConfig |= CLRHOSTED".
+ //
+ // For the case when managed code started without CLR hosting and MC++ does a
+ // CorBindToRuntimeEx, setting the CLR hosted flag is incorrect.
+ //
+ // Thus, before we attempt to start the runtime, we save the status of it being
+ // already running or not. Next, if we are able to successfully start the runtime
+ // and ONLY if it was not started earlier will we set the hosted flag below.
+ if (!g_fEEStarted)
+ {
+ g_fHostConfig |= CLRHOSTED;
+ }
+
+ hr = CorRuntimeHostBase::Start();
+ if (SUCCEEDED(hr))
+ {
+#ifdef FEATURE_CORECLR
+ // Set our flag that this host invoked the Start method.
+ m_fStarted = TRUE;
+
+ // And they also loaded the CoreCLR DLL in the memory (for this version).
+ // This is a special flag as the host that has got this flag set will be allowed
+ // to repeatedly invoke Stop method (without corresponding Start method invocations).
+ // This is to support scenarios like that of Office where they need to bring down
+ // the CLR at any cost.
+ //
+ // So, if you want to do that, just make sure you are the first host to load the
+ // specific version of CLR in memory AND start it.
+ m_fFirstToLoadCLR = TRUE;
+#endif // FEATURE_CORECLR
+ if (FastInterlockIncrement(&m_RefCount) != 1)
+ {
+ }
+ else
+ {
+ if (g_fWeOwnProcess)
+ {
+ // Runtime is started by a managed exe. Bump the ref-count, so that
+ // matching Start/Stop does not stop runtime.
+ FastInterlockIncrement(&m_RefCount);
+ }
+ }
+ }
+ }
+
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+}
+
+// Starts the runtime. This is equivalent to CoInitializeEE();
+HRESULT CorRuntimeHostBase::Start()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ DISABLED(GC_TRIGGERS);
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+ {
+ m_Started = TRUE;
+#ifdef FEATURE_EVENT_TRACE
+ g_fEEHostedStartup = TRUE;
+#endif // FEATURE_EVENT_TRACE
+ hr = InitializeEE(COINITEE_DEFAULT);
+ }
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+#if !defined(FEATURE_CORECLR) // simple hosting
+HRESULT CorHost::Stop()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ ENTRY_POINT;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+ // This must remain this way (that is doing nothing) for backwards compat reasons.
+ return S_OK;
+}
+#endif // !defined(FEATURE_CORECLR)
+
+HRESULT CorHost2::Stop()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ ENTRY_POINT; // We're bringing the EE down, so no point in probing
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+ if (!g_fEEStarted)
+ {
+ return E_UNEXPECTED;
+ }
+ HRESULT hr=S_OK;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+#ifdef FEATURE_CORECLR
+ // Is this host eligible to invoke the Stop method?
+ if ((!m_fStarted) && (!m_fFirstToLoadCLR))
+ {
+ // Well - since this host never invoked Start, it is not eligible to invoke Stop.
+ // Semantically, for such a host, CLR is not available in the process. The only
+ // exception to this condition is the host that first loaded this version of the
+ // CLR and invoked Start method. For details, refer to comments in CorHost2::Start implementation.
+ hr = HOST_E_CLRNOTAVAILABLE;
+ }
+ else
+#endif // FEATURE_CORECLR
+ {
+ while (TRUE)
+ {
+ LONG refCount = m_RefCount;
+ if (refCount == 0)
+ {
+ #ifdef FEATURE_CORECLR
+ hr = HOST_E_CLRNOTAVAILABLE;
+ #else // !FEATURE_CORECLR
+ hr= E_UNEXPECTED;
+ #endif // FEATURE_CORECLR
+ break;
+ }
+ else
+ if (FastInterlockCompareExchange(&m_RefCount, refCount - 1, refCount) == refCount)
+ {
+ #ifdef FEATURE_CORECLR
+ // Indicate that we have got a Stop for a corresponding Start call from the
+ // Host. Semantically, CoreCLR has stopped for them.
+ m_fStarted = FALSE;
+ #endif // FEATURE_CORECLR
+
+ if (refCount > 1)
+ {
+ hr=S_FALSE;
+ break;
+ }
+ else
+ {
+ break;
+ }
+ }
+ }
+ }
+#ifndef FEATURE_CORECLR
+ if (hr==S_OK)
+ {
+ EPolicyAction action = GetEEPolicy()->GetDefaultAction(OPR_ProcessExit, NULL);
+ if (action > eExitProcess)
+ {
+ g_fFastExitProcess = 1;
+ }
+ EEShutDown(FALSE);
+ }
+#endif // FEATURE_CORECLR
+ END_ENTRYPOINT_NOTHROW;
+
+#ifndef FEATURE_CORECLR
+ if (hr == S_OK)
+ {
+ if (m_HostControl)
+ {
+ m_HostControl->Release();
+ m_HostControl = NULL;
+ }
+ }
+#endif // FEATURE_CORECLR
+
+ return hr;
+}
+
+#if defined(FEATURE_COMINTEROP) && !defined(FEATURE_CORECLR)
+
+// Creates a domain in the runtime. The identity array is
+// a pointer to an array TYPE containing IIdentity objects defining
+// the security identity.
+HRESULT CorRuntimeHostBase::CreateDomain(LPCWSTR pwzFriendlyName,
+ IUnknown* pIdentityArray, // Optional
+ IUnknown ** pAppDomain)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_ENTRY_POINT;
+
+ return CreateDomainEx(pwzFriendlyName,
+ NULL,
+ NULL,
+ pAppDomain);
+}
+
+
+// Returns the default domain.
+HRESULT CorRuntimeHostBase::GetDefaultDomain(IUnknown ** pAppDomain)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ ENTRY_POINT;
+ } CONTRACTL_END;
+
+ HRESULT hr = E_UNEXPECTED;
+ if (!g_fEEStarted)
+ return hr;
+
+ if( pAppDomain == NULL)
+ return E_POINTER;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr);
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ if (SystemDomain::System()) {
+ AppDomain* pCom = SystemDomain::System()->DefaultDomain();
+ if(pCom)
+ hr = pCom->GetComIPForExposedObject(pAppDomain);
+ }
+
+ }
+ END_EXTERNAL_ENTRYPOINT;
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+// Returns the default domain.
+HRESULT CorRuntimeHostBase::CurrentDomain(IUnknown ** pAppDomain)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_PREEMPTIVE;
+ ENTRY_POINT;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_UNEXPECTED;
+ if (!g_fEEStarted)
+ return hr;
+
+ if( pAppDomain == NULL) return E_POINTER;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr);
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ AppDomain* pCom = ::GetAppDomain();
+ if(pCom)
+ hr = pCom->GetComIPForExposedObject(pAppDomain);
+
+ }
+ END_EXTERNAL_ENTRYPOINT;
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+};
+
+#endif // FEATURE_COMINTEROP && !FEATURE_CORECLR
+
+HRESULT CorHost2::GetCurrentAppDomainId(DWORD *pdwAppDomainId)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ // No point going further if the runtime is not running...
+ // We use CanRunManagedCode() instead of IsRuntimeActive() because this allows us
+ // to specify test using the form that does not trigger a GC.
+ if (!(g_fEEStarted && CanRunManagedCode(LoaderLockCheck::None))
+#ifdef FEATURE_CORECLR
+ || !m_fStarted
+#endif
+ )
+ {
+ return HOST_E_CLRNOTAVAILABLE;
+ }
+
+ HRESULT hr = S_OK;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ if(pdwAppDomainId == NULL)
+ {
+ hr = E_POINTER;
+ }
+ else
+ {
+ Thread *pThread = GetThread();
+ if (!pThread)
+ {
+ hr = E_UNEXPECTED;
+ }
+ else
+ {
+ *pdwAppDomainId = SystemDomain::GetCurrentDomain()->GetId().m_dwId;
+ }
+ }
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+HRESULT CorHost2::ExecuteApplication(LPCWSTR pwzAppFullName,
+ DWORD dwManifestPaths,
+ LPCWSTR *ppwzManifestPaths,
+ DWORD dwActivationData,
+ LPCWSTR *ppwzActivationData,
+ int *pReturnValue)
+{
+#ifndef FEATURE_CORECLR
+ // This API should not be called when the EE has already been started.
+ HRESULT hr = E_UNEXPECTED;
+ if (g_fEEStarted)
+ return hr;
+
+ //
+ // We will let unhandled exceptions in the activated application
+ // propagate all the way up, so that ClickOnce semi-trusted apps
+ // can participate in the Dr Watson program, etc...
+ //
+
+ CONTRACTL {
+ THROWS;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ if (!pwzAppFullName)
+ IfFailGo(E_POINTER);
+
+ // Set the information about the application to execute.
+ CorCommandLine::m_pwszAppFullName = (LPWSTR) pwzAppFullName;
+ CorCommandLine::m_dwManifestPaths = dwManifestPaths;
+ CorCommandLine::m_ppwszManifestPaths = (LPWSTR*) ppwzManifestPaths;
+ CorCommandLine::m_dwActivationData = dwActivationData;
+ CorCommandLine::m_ppwszActivationData = (LPWSTR*) ppwzActivationData;
+
+ // Start up the EE.
+ IfFailGo(Start());
+
+ Thread *pThread;
+ pThread = GetThread();
+ if (pThread == NULL)
+ pThread = SetupThreadNoThrow(&hr);
+ if (pThread == NULL)
+ goto ErrExit;
+
+ _ASSERTE (!pThread->PreemptiveGCDisabled());
+
+ hr = S_OK;
+
+ BEGIN_ENTRYPOINT_THROWS_WITH_THREAD(pThread);
+ ENTER_DOMAIN_PTR(SystemDomain::System()->DefaultDomain(),ADV_DEFAULTAD)
+
+ SystemDomain::ActivateApplication(pReturnValue);
+
+ END_DOMAIN_TRANSITION;
+ END_ENTRYPOINT_THROWS_WITH_THREAD;
+
+ErrExit:
+ return hr;
+#else // FEATURE_CORECLR
+ return E_NOTIMPL;
+#endif
+}
+
+#ifdef FEATURE_CORECLR
+HRESULT CorHost2::ExecuteAssembly(DWORD dwAppDomainId,
+ LPCWSTR pwzAssemblyPath,
+ int argc,
+ LPCWSTR* argv,
+ DWORD *pReturnValue)
+{
+ CONTRACTL
+ {
+ THROWS; // Throws...as we do not want it to swallow the managed exception
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ // This is currently supported in default domain only
+ if (dwAppDomainId != DefaultADID)
+ return HOST_E_INVALIDOPERATION;
+
+ // No point going further if the runtime is not running...
+ if (!IsRuntimeActive() || !m_fStarted)
+ {
+ return HOST_E_CLRNOTAVAILABLE;
+ }
+
+ if(!pwzAssemblyPath)
+ return E_POINTER;
+
+ if(argc < 0)
+ {
+ return E_INVALIDARG;
+ }
+
+ if(argc > 0 && argv == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ HRESULT hr = S_OK;
+
+ Thread *pThread = GetThread();
+ if (pThread == NULL)
+ {
+ pThread = SetupThreadNoThrow(&hr);
+ if (pThread == NULL)
+ {
+ goto ErrExit;
+ }
+ }
+
+ if(SystemDomain::GetCurrentDomain()->GetId().m_dwId != DefaultADID)
+ {
+ return HOST_E_INVALIDOPERATION;
+ }
+
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+ _ASSERTE (!pThread->PreemptiveGCDisabled());
+
+ Assembly *pAssembly = AssemblySpec::LoadAssembly(pwzAssemblyPath);
+
+ {
+ GCX_COOP();
+
+ PTRARRAYREF arguments = NULL;
+
+ GCPROTECT_BEGIN(arguments);
+
+ arguments = (PTRARRAYREF)AllocateObjectArray(argc, g_pStringClass);
+
+ for (int i = 0; i < argc; ++i)
+ {
+ STRINGREF argument = StringObject::NewString(argv[i]);
+ arguments->SetAt(i, argument);
+ }
+
+ DWORD retval = pAssembly->ExecuteMainMethod(&arguments);
+ if (pReturnValue)
+ {
+ *pReturnValue = retval;
+ }
+
+ GCPROTECT_END();
+
+ }
+
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+ErrExit:
+
+ return hr;
+}
+#endif
+
+HRESULT CorHost2::ExecuteInDefaultAppDomain(LPCWSTR pwzAssemblyPath,
+ LPCWSTR pwzTypeName,
+ LPCWSTR pwzMethodName,
+ LPCWSTR pwzArgument,
+ DWORD *pReturnValue)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ // No point going further if the runtime is not running...
+ if (!IsRuntimeActive()
+#ifdef FEATURE_CORECLR
+ || !m_fStarted
+#endif
+ )
+ {
+ return HOST_E_CLRNOTAVAILABLE;
+ }
+
+
+#ifndef FEATURE_CORECLR
+ if(! (pwzAssemblyPath && pwzTypeName && pwzMethodName) )
+ return E_POINTER;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ Thread *pThread = GetThread();
+ if (pThread == NULL)
+ {
+ pThread = SetupThreadNoThrow(&hr);
+ if (pThread == NULL)
+ {
+ goto ErrExit;
+ }
+ }
+
+ _ASSERTE (!pThread->PreemptiveGCDisabled());
+
+ EX_TRY
+ {
+ ENTER_DOMAIN_PTR(SystemDomain::System()->DefaultDomain(),ADV_DEFAULTAD)
+
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+ Assembly *pAssembly = AssemblySpec::LoadAssembly(pwzAssemblyPath);
+
+ SString szTypeName(pwzTypeName);
+ StackScratchBuffer buff1;
+ const char* szTypeNameUTF8 = szTypeName.GetUTF8(buff1);
+ MethodTable *pMT = ClassLoader::LoadTypeByNameThrowing(pAssembly,
+ NULL,
+ szTypeNameUTF8).AsMethodTable();
+
+ SString szMethodName(pwzMethodName);
+ StackScratchBuffer buff;
+ const char* szMethodNameUTF8 = szMethodName.GetUTF8(buff);
+ MethodDesc *pMethodMD = MemberLoader::FindMethod(pMT, szMethodNameUTF8, &gsig_SM_Str_RetInt);
+
+ if (!pMethodMD)
+ {
+ hr = COR_E_MISSINGMETHOD;
+ }
+ else
+ {
+ GCX_COOP();
+
+ MethodDescCallSite method(pMethodMD);
+
+ STRINGREF sref = NULL;
+ GCPROTECT_BEGIN(sref);
+
+ if (pwzArgument)
+ sref = StringObject::NewString(pwzArgument);
+
+ ARG_SLOT MethodArgs[] =
+ {
+ ObjToArgSlot(sref)
+ };
+ DWORD retval = method.Call_RetI4(MethodArgs);
+ if (pReturnValue)
+ {
+ *pReturnValue = retval;
+ }
+
+ GCPROTECT_END();
+ }
+
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+ END_DOMAIN_TRANSITION;
+ }
+ EX_CATCH_HRESULT(hr);
+
+ErrExit:
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+#else // FEATURE_CORECLR
+ // Ensure that code is not loaded in the Default AppDomain
+ return HOST_E_INVALIDOPERATION;
+#endif
+}
+
+HRESULT ExecuteInAppDomainHelper(FExecuteInAppDomainCallback pCallback,
+ void * cookie)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_SO_TOLERANT_CODE(GetThread());
+ hr = pCallback(cookie);
+ END_SO_TOLERANT_CODE;
+
+ return hr;
+}
+
+HRESULT CorHost2::ExecuteInAppDomain(DWORD dwAppDomainId,
+ FExecuteInAppDomainCallback pCallback,
+ void * cookie)
+{
+
+ // No point going further if the runtime is not running...
+ if (!IsRuntimeActive()
+#ifdef FEATURE_CORECLR
+ || !m_fStarted
+#endif // FEATURE_CORECLR
+ )
+ {
+ return HOST_E_CLRNOTAVAILABLE;
+ }
+
+#ifdef FEATURE_CORECLR
+ if(!(m_dwStartupFlags & STARTUP_SINGLE_APPDOMAIN))
+ {
+ // Ensure that code is not loaded in the Default AppDomain
+ if (dwAppDomainId == DefaultADID)
+ return HOST_E_INVALIDOPERATION;
+ }
+#endif // FEATURE_CORECLR
+
+ // Moved this here since no point validating the pointer
+ // if the basic checks [above] fail
+ if( pCallback == NULL)
+ return E_POINTER;
+
+ CONTRACTL
+ {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ ENTRY_POINT; // This is called by a host.
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr);
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+ ENTER_DOMAIN_ID(ADID(dwAppDomainId))
+ {
+ // We are calling an unmanaged function pointer, either an unmanaged function, or a marshaled out delegate.
+ // The thread should be in preemptive mode, and SO_Tolerant.
+ GCX_PREEMP();
+ hr=ExecuteInAppDomainHelper (pCallback, cookie);
+ }
+ END_DOMAIN_TRANSITION;
+ END_EXTERNAL_ENTRYPOINT;
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+#if defined(FEATURE_CORECLR) || defined(FEATURE_HOSTED_BINDER)
+#define EMPTY_STRING_TO_NULL(s) {if(s && s[0] == 0) {s=NULL;};}
+
+HRESULT CorHost2::_CreateAppDomain(
+ LPCWSTR wszFriendlyName,
+ DWORD dwFlags,
+ LPCWSTR wszAppDomainManagerAssemblyName,
+ LPCWSTR wszAppDomainManagerTypeName,
+ int nProperties,
+ LPCWSTR* pPropertyNames,
+ LPCWSTR* pPropertyValues,
+#if defined(FEATURE_HOSTED_BINDER) && !defined(FEATURE_CORECLR)
+ ICLRPrivBinder* pBinder,
+#endif
+ DWORD* pAppDomainID)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ ENTRY_POINT; // This is called by a host.
+ }
+ CONTRACTL_END;
+
+ HRESULT hr=S_OK;
+
+#ifdef FEATURE_CORECLR
+ //cannot call the function more than once when single appDomain is allowed
+ if (m_fAppDomainCreated && (m_dwStartupFlags & STARTUP_SINGLE_APPDOMAIN))
+ {
+ return HOST_E_INVALIDOPERATION;
+ }
+#endif
+
+ //normalize empty strings
+ EMPTY_STRING_TO_NULL(wszFriendlyName);
+ EMPTY_STRING_TO_NULL(wszAppDomainManagerAssemblyName);
+ EMPTY_STRING_TO_NULL(wszAppDomainManagerTypeName);
+
+ if(pAppDomainID==NULL)
+ return E_POINTER;
+
+#ifdef FEATURE_CORECLR
+ if (!m_fStarted)
+ return HOST_E_INVALIDOPERATION;
+
+#if defined(FEATURE_WINDOWSPHONE) && defined(FEATURE_STRONGNAME_TESTKEY_ALLOWED)
+ if((APPDOMAIN_SET_TEST_KEY & dwFlags) && (m_dwStartupFlags & STARTUP_SINGLE_APPDOMAIN))
+ {
+ const BYTE testKey[] = { TEST_KEY_VALUE };
+ memcpy_s(g_rbTestKeyBuffer + sizeof(GUID)*2, sizeof(testKey), testKey, sizeof(testKey));
+ }
+#endif // defined(FEATURE_WINDOWSPHONE) && defined(FEATURE_STRONGNAME_TESTKEY_ALLOWED)
+
+#endif // FEATURE_CORECLR
+
+ if(wszFriendlyName == NULL)
+ return E_INVALIDARG;
+
+ if((wszAppDomainManagerAssemblyName == NULL) != (wszAppDomainManagerTypeName == NULL))
+ return E_INVALIDARG;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr);
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ AppDomainCreationHolder<AppDomain> pDomain;
+
+#ifdef FEATURE_CORECLR
+ // If StartupFlag specifies single appDomain then return the default domain instead of creating new one
+ if(m_dwStartupFlags & STARTUP_SINGLE_APPDOMAIN)
+ {
+ pDomain.Assign(SystemDomain::System()->DefaultDomain());
+ }
+ else
+#endif
+ {
+ AppDomain::CreateUnmanagedObject(pDomain);
+ }
+
+ ETW::LoaderLog::DomainLoad(pDomain, (LPWSTR)wszFriendlyName);
+
+#ifdef FEATURE_CORECLR
+ if (dwFlags & APPDOMAIN_IGNORE_UNHANDLED_EXCEPTIONS)
+ {
+ pDomain->SetIgnoreUnhandledExceptions();
+ }
+
+ // Enable interop for all assemblies if the host has asked us to.
+ if (dwFlags & APPDOMAIN_ENABLE_PINVOKE_AND_CLASSIC_COMINTEROP)
+ {
+ pDomain->SetEnablePInvokeAndClassicComInterop();
+ }
+
+ if (dwFlags & APPDOMAIN_ENABLE_PLATFORM_SPECIFIC_APPS)
+ {
+ pDomain->SetAllowPlatformSpecificAppAssemblies();
+ }
+
+ if (dwFlags & APPDOMAIN_ENABLE_ASSEMBLY_LOADFILE)
+ {
+ pDomain->SetAllowLoadFile();
+ }
+#endif // FEATURE_CORECLR
+
+ if (dwFlags & APPDOMAIN_SECURITY_FORBID_CROSSAD_REVERSE_PINVOKE)
+ pDomain->SetReversePInvokeCannotEnter();
+
+ if (dwFlags & APPDOMAIN_FORCE_TRIVIAL_WAIT_OPERATIONS)
+ pDomain->SetForceTrivialWaitOperations();
+
+#if defined(FEATURE_HOSTED_BINDER) && !defined(FEATURE_CORECLR)
+ if (pBinder != NULL)
+ pDomain->SetLoadContextHostBinder(pBinder);
+#endif
+
+#ifdef PROFILING_SUPPORTED
+ EX_TRY
+#endif
+ {
+ pDomain->SetAppDomainManagerInfo(wszAppDomainManagerAssemblyName,wszAppDomainManagerTypeName,eInitializeNewDomainFlags_None);
+
+ GCX_COOP();
+
+ struct
+ {
+ STRINGREF friendlyName;
+ PTRARRAYREF propertyNames;
+ PTRARRAYREF propertyValues;
+ STRINGREF sandboxName;
+ OBJECTREF setupInfo;
+ OBJECTREF adSetup;
+ } _gc;
+
+ ZeroMemory(&_gc,sizeof(_gc));
+
+ GCPROTECT_BEGIN(_gc)
+ _gc.friendlyName=StringObject::NewString(wszFriendlyName);
+
+ if(nProperties>0)
+ {
+ _gc.propertyNames = (PTRARRAYREF) AllocateObjectArray(nProperties, g_pStringClass);
+ _gc.propertyValues= (PTRARRAYREF) AllocateObjectArray(nProperties, g_pStringClass);
+ for (int i=0;i< nProperties;i++)
+ {
+ STRINGREF obj = StringObject::NewString(pPropertyNames[i]);
+ _gc.propertyNames->SetAt(i, obj);
+
+ obj = StringObject::NewString(pPropertyValues[i]);
+ _gc.propertyValues->SetAt(i, obj);
+
+#ifdef FEATURE_LEGACYNETCF
+ // Look for the "AppDomainCompatSwitch" property and, if it exists, save its value
+ // in the AppDomain object.
+ if ((0 == _wcsicmp(pPropertyNames[i], W("AppDomainCompatSwitch"))) && (pPropertyValues[i] != NULL))
+ {
+ if (0 == _wcsicmp(pPropertyValues[i], W("WindowsPhone_3.8.0.0")))
+ {
+ pDomain->SetAppDomainCompatMode(BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8);
+ }
+ else
+ if (0 == _wcsicmp(pPropertyValues[i], W("WindowsPhone_3.7.0.0")))
+ {
+ pDomain->SetAppDomainCompatMode(BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8);
+ }
+ else
+ {
+ // We currently don't know any other AppDomain compatibility switches
+ }
+ }
+#endif // FEATURE_LEGACYNETCF
+ }
+ }
+
+ if (dwFlags & APPDOMAIN_SECURITY_SANDBOXED)
+ {
+ _gc.sandboxName = StringObject::NewString(W("Internet"));
+ }
+ else
+ {
+ _gc.sandboxName = StringObject::NewString(W("FullTrust"));
+ }
+
+ MethodDescCallSite prepareDataForSetup(METHOD__APP_DOMAIN__PREPARE_DATA_FOR_SETUP);
+
+ ARG_SLOT args[8];
+ args[0]=ObjToArgSlot(_gc.friendlyName);
+ args[1]=ObjToArgSlot(NULL);
+ args[2]=ObjToArgSlot(NULL);
+ args[3]=ObjToArgSlot(NULL);
+#ifdef FEATURE_CORECLR
+ //CoreCLR shouldn't have dependencies on parent app domain.
+ args[4]=ObjToArgSlot(NULL);
+#else
+ args[4]=PtrToArgSlot(GetAppDomain()->GetSecurityDescriptor());
+#endif //FEATURE_CORECLR
+ args[5]=ObjToArgSlot(_gc.sandboxName);
+ args[6]=ObjToArgSlot(_gc.propertyNames);
+ args[7]=ObjToArgSlot(_gc.propertyValues);
+
+ _gc.setupInfo=prepareDataForSetup.Call_RetOBJECTREF(args);
+
+ //
+ // Get the new flag values and set it to the domain
+ //
+ PTRARRAYREF handleArrayObj = (PTRARRAYREF) ObjectToOBJECTREF(_gc.setupInfo);
+ _gc.adSetup = ObjectToOBJECTREF(handleArrayObj->GetAt(1));
+
+#ifndef FEATURE_CORECLR
+ // We need to setup domain sorting before any other managed code runs in the domain, since that code
+ // could end up caching data based on the sorting mode of the domain.
+ pDomain->InitializeSorting(&_gc.adSetup);
+ pDomain->InitializeHashing(&_gc.adSetup);
+#endif
+
+ pDomain->DoSetup(&_gc.setupInfo);
+
+ pDomain->CacheStringsForDAC();
+
+ GCPROTECT_END();
+
+ *pAppDomainID=pDomain->GetId().m_dwId;
+
+#ifdef FEATURE_CORECLR
+ // If StartupFlag specifies single appDomain then set the flag that appdomain has already been created
+ if(m_dwStartupFlags & STARTUP_SINGLE_APPDOMAIN)
+ {
+ m_fAppDomainCreated = TRUE;
+ }
+#endif
+ }
+#ifdef PROFILING_SUPPORTED
+ EX_HOOK
+ {
+ // Need the first assembly loaded in to get any data on an app domain.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->AppDomainCreationFinished((AppDomainID)(AppDomain*) pDomain, GET_EXCEPTION()->GetHR());
+ END_PIN_PROFILER();
+ }
+ }
+ EX_END_HOOK;
+
+ // Need the first assembly loaded in to get any data on an app domain.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->AppDomainCreationFinished((AppDomainID)(AppDomain*) pDomain, S_OK);
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ // DoneCreating releases ownership of AppDomain. After this call, there should be no access to pDomain.
+ pDomain.DoneCreating();
+
+ END_EXTERNAL_ENTRYPOINT;
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+
+};
+
+HRESULT CorHost2::_CreateDelegate(
+ DWORD appDomainID,
+ LPCWSTR wszAssemblyName,
+ LPCWSTR wszClassName,
+ LPCWSTR wszMethodName,
+ INT_PTR* fnPtr)
+{
+
+ CONTRACTL
+ {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ ENTRY_POINT; // This is called by a host.
+ }
+ CONTRACTL_END;
+
+ HRESULT hr=S_OK;
+
+ EMPTY_STRING_TO_NULL(wszAssemblyName);
+ EMPTY_STRING_TO_NULL(wszClassName);
+ EMPTY_STRING_TO_NULL(wszMethodName);
+
+ if (fnPtr == NULL)
+ return E_POINTER;
+ *fnPtr = NULL;
+
+ if(wszAssemblyName == NULL)
+ return E_INVALIDARG;
+
+ if(wszClassName == NULL)
+ return E_INVALIDARG;
+
+ if(wszMethodName == NULL)
+ return E_INVALIDARG;
+
+#ifdef FEATURE_CORECLR
+ if (!m_fStarted)
+ return HOST_E_INVALIDOPERATION;
+
+ if(!(m_dwStartupFlags & STARTUP_SINGLE_APPDOMAIN))
+ {
+ // Ensure that code is not loaded in the Default AppDomain
+ if (appDomainID == DefaultADID)
+ return HOST_E_INVALIDOPERATION;
+ }
+#endif
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr);
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ MAKE_UTF8PTR_FROMWIDE(szAssemblyName, wszAssemblyName);
+ MAKE_UTF8PTR_FROMWIDE(szClassName, wszClassName);
+ MAKE_UTF8PTR_FROMWIDE(szMethodName, wszMethodName);
+
+ ADID id;
+ id.m_dwId=appDomainID;
+
+ ENTER_DOMAIN_ID(id)
+
+ GCX_PREEMP();
+
+ AssemblySpec spec;
+ spec.Init(szAssemblyName);
+ Assembly* pAsm=spec.LoadAssembly(FILE_ACTIVE);
+
+ // we have no signature to check so allowing calling partially trusted code
+ // can result in an exploit
+ if (!pAsm->GetSecurityDescriptor()->IsFullyTrusted())
+ ThrowHR(COR_E_SECURITY);
+
+ TypeHandle th=pAsm->GetLoader()->LoadTypeByNameThrowing(pAsm,NULL,szClassName);
+ MethodDesc* pMD=NULL;
+
+ if (!th.IsTypeDesc())
+ {
+ pMD = MemberLoader::FindMethodByName(th.GetMethodTable(), szMethodName, MemberLoader::FM_Unique);
+ if (pMD == NULL)
+ {
+ // try again without the FM_Unique flag (error path)
+ pMD = MemberLoader::FindMethodByName(th.GetMethodTable(), szMethodName, MemberLoader::FM_Default);
+ if (pMD != NULL)
+ {
+ // the method exists but is overloaded
+ ThrowHR(COR_E_AMBIGUOUSMATCH);
+ }
+ }
+ }
+
+ if (pMD==NULL || !pMD->IsStatic() || pMD->ContainsGenericVariables())
+ ThrowHR(COR_E_MISSINGMETHOD);
+
+#ifdef FEATURE_CORECLR
+ // the target method must be decorated with AllowReversePInvokeCallsAttribute
+ if (!COMDelegate::IsMethodAllowedToSinkReversePInvoke(pMD))
+ ThrowHR(COR_E_SECURITY);
+#endif
+
+ UMEntryThunk *pUMEntryThunk = GetAppDomain()->GetUMEntryThunkCache()->GetUMEntryThunk(pMD);
+ *fnPtr = (INT_PTR)pUMEntryThunk->GetCode();
+
+ END_DOMAIN_TRANSITION;
+
+ END_EXTERNAL_ENTRYPOINT;
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+#endif // defined(FEATURE_CORECLR) || defined(FEATURE_HOSTED_BINDER)
+
+#ifdef FEATURE_CORECLR
+HRESULT CorHost2::CreateAppDomainWithManager(
+ LPCWSTR wszFriendlyName,
+ DWORD dwFlags,
+ LPCWSTR wszAppDomainManagerAssemblyName,
+ LPCWSTR wszAppDomainManagerTypeName,
+ int nProperties,
+ LPCWSTR* pPropertyNames,
+ LPCWSTR* pPropertyValues,
+ DWORD* pAppDomainID)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return _CreateAppDomain(
+ wszFriendlyName,
+ dwFlags,
+ wszAppDomainManagerAssemblyName,
+ wszAppDomainManagerTypeName,
+ nProperties,
+ pPropertyNames,
+ pPropertyValues,
+ pAppDomainID);
+}
+
+HRESULT CorHost2::CreateDelegate(
+ DWORD appDomainID,
+ LPCWSTR wszAssemblyName,
+ LPCWSTR wszClassName,
+ LPCWSTR wszMethodName,
+ INT_PTR* fnPtr)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return _CreateDelegate(appDomainID, wszAssemblyName, wszClassName, wszMethodName, fnPtr);
+}
+
+// To prevent any unmanaged application from hosting CoreCLR, we require the host to authenticate
+// with the runtime by passing a secret key. This key is a constant value, CORECLR_HOST_AUTHENTICATION_KEY,
+// defined in mscoree.h. If the authentication fails, E_FAIL will be returned. S_OK is returned on
+// successful authentication.
+//
+// If the host is not authentication and ICLRRuntimeHost2::Start is invoked, it will return
+// HOST_E_INVALIDOPERATION.
+
+HRESULT CorHost2::Authenticate(ULONGLONG authKey)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ ENTRY_POINT; // This is called by a host.
+ }
+ CONTRACTL_END;
+
+ // Initialize...
+ HRESULT hr = E_FAIL;
+
+#ifdef FEATURE_CORECLR
+ // Host is not yet authenticated - playing safe here though we set this to FALSE
+ // in CorHost2 constructor.
+ m_fIsHostAuthenticated = FALSE;
+
+ // Is the authentication key valid?
+ if (CORECLR_HOST_AUTHENTICATION_KEY == authKey ||
+ CORECLR_HOST_AUTHENTICATION_KEY_NONGEN == authKey)
+ {
+ hr = S_OK;
+ m_fIsHostAuthenticated = TRUE;
+ }
+
+ //
+ // For Silverlight 4, we overload this API to tell us if we are not allowed
+ // to use native images.
+ //
+ // This was added to address Silverlight bug #88577: crash loading Silverlight netflix player.
+ // The basic scenario is:
+ // 1. install Silverlight 4. At the end of setup, coregen.exe is invoked asynchronously to generate
+ // native images.
+ // 2. start IE (while native images are still being generated) and load a Silverlight application.
+ // This SL app loads some native images (mscorlib.ni.dll, System.ni.dll), but not others, since
+ // they don't exist yet, as coregen.exe is still working in the background.
+ // 3. Navigate to a different Silverlight app (in the case of the bug, the Netflix movie player). By
+ // now, all the native images have been generated. When we go to load a native image for an
+ // assembly that was not loaded by the first application, we get confused -- we check its native
+ // image dependencies early in loading and it looks good. Then, we choose to use shared assemblies
+ // that it depends on, that were only loaded as IL assemblies by the first Silverlight application.
+ // It is also hard-bound to the native images of these dependent native images. At the end, we
+ // re-check the native image dependencies, and throw an exception (in DomainFile::FinishLoad()).
+ //
+ if (CORECLR_HOST_AUTHENTICATION_KEY_NONGEN == authKey)
+ {
+ g_fAllowNativeImages = false; // The host told us not to use native images in this process
+ }
+
+#else
+ // Host authentication is (currently) only supported for CoreCLR. For all other CLR implementations,
+ // we return S_OK incase someone calls us.
+ hr = S_OK;
+#endif
+
+ return hr;
+}
+
+HRESULT CorHost2::RegisterMacEHPort()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ ENTRY_POINT; // This is called by a host.
+ }
+ CONTRACTL_END;
+
+ return S_OK;
+}
+
+HRESULT CorHost2::SetStartupFlags(STARTUP_FLAGS flag)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ ENTRY_POINT; // This is called by a host.
+ }
+ CONTRACTL_END;
+
+ if(g_fEEStarted)
+ return HOST_E_INVALIDOPERATION;
+
+ if (CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_gcServer) != 0)
+ {
+ flag = (STARTUP_FLAGS)(flag | STARTUP_SERVER_GC);
+ }
+
+ m_dwStartupFlags = flag;
+
+ return S_OK;
+}
+
+#endif //FEATURE_CORECLR
+
+#ifndef FEATURE_CORECLR
+void PauseOneAppDomain(AppDomainIterator* pi)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ EX_TRY {
+ ENTER_DOMAIN_PTR(pi->GetDomain(),ADV_ITERATOR);
+
+ MethodDescCallSite(METHOD__APP_DOMAIN__PAUSE).Call(NULL);
+
+ END_DOMAIN_TRANSITION;
+ } EX_CATCH {
+ } EX_END_CATCH(SwallowAllExceptions);
+}
+
+void ResumeOneAppDomain(AppDomainIterator* pi)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ EX_TRY {
+ ENTER_DOMAIN_PTR(pi->GetDomain(),ADV_ITERATOR);
+
+ MethodDescCallSite(METHOD__APP_DOMAIN__RESUME).Call(NULL);
+
+ END_DOMAIN_TRANSITION;
+ } EX_CATCH {
+ } EX_END_CATCH(SwallowAllExceptions);
+}
+
+// see comments in SuspendEEFromPause
+DWORD WINAPI SuspendAndResumeForPause(LPVOID arg)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_OTHER);
+
+ g_PauseCompletedEvent.Set();
+ g_ClrResumeEvent.Wait(INFINITE, FALSE);
+
+ ThreadSuspend::RestartEE(FALSE, TRUE);
+ return 0;
+}
+
+#endif // !FEATURE_CORECLR
+
+HRESULT SuspendEEForPause()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_PREEMPTIVE;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+#ifdef FEATURE_CORECLR
+ // In CoreCLR, we always resume from the same thread that paused. So we can simply suspend the EE from this thread,
+ // knowing we'll restart from the same thread.
+ ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_OTHER);
+#else
+ // In the CLR, we can resume from a different thread than the one that paused. We can't call SuspendEE directly,
+ // because we can't call RestartEE from another thread. So we queue a workitem to the ThreadPool to call SuspendEE
+ // and ResumeEE on our behalf.
+
+ EX_TRY
+ {
+ if (!ThreadpoolMgr::QueueUserWorkItem(SuspendAndResumeForPause, NULL, QUEUE_ONLY))
+ {
+ hr = HRESULT_FROM_GetLastError();
+ }
+ else
+ {
+ // wait for SuspendEE to complete before returning.
+ g_PauseCompletedEvent.Wait(INFINITE,FALSE);
+ }
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+#endif
+
+ return hr;
+}
+
+HRESULT RestartEEFromPauseAndSetResumeEvent()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_PREEMPTIVE;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ // see comments in SuspendEEFromPause
+#ifdef FEATURE_CORECLR
+ ThreadSuspend::RestartEE(FALSE, TRUE);
+#else
+ // setting the resume event below will restart the EE as well. We don't wait for the restart
+ // to complete, because we'll sync with it next time we go to cooperative mode.
+#endif
+
+ _ASSERTE(g_ClrResumeEvent.IsValid());
+ g_ClrResumeEvent.Set();
+
+ return S_OK;
+}
+
+
+
+CorExecutionManager::CorExecutionManager()
+ : m_dwFlags(0), m_pauseStartTime(0)
+{
+ LIMITED_METHOD_CONTRACT;
+ g_IsPaused = FALSE;
+ g_PauseTime = 0;
+}
+
+HRESULT CorExecutionManager::Pause(DWORD dwAppDomainId, DWORD dwFlags)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ ENTRY_POINT; // This is called by a host.
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+#ifndef FEATURE_CORECLR
+ if (!IsRuntimeActive())
+ return HOST_E_CLRNOTAVAILABLE;
+#endif
+
+ if(g_IsPaused)
+ return E_FAIL;
+
+ EX_TRY
+ {
+ if(!g_ClrResumeEvent.IsValid())
+ g_ClrResumeEvent.CreateManualEvent(FALSE);
+ else
+ g_ClrResumeEvent.Reset();
+
+#ifndef FEATURE_CORECLR
+ if (!g_PauseCompletedEvent.IsValid())
+ g_PauseCompletedEvent.CreateManualEvent(FALSE);
+ else
+ g_PauseCompletedEvent.Reset();
+#endif
+ }
+ EX_CATCH_HRESULT(hr);
+
+ if (FAILED(hr))
+ return hr;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ m_dwFlags = dwFlags;
+
+#ifndef FEATURE_CORECLR
+ if ((m_dwFlags & PAUSE_APP_DOMAINS) != 0)
+ {
+ Thread* pThread = SetupThreadNoThrow(&hr);
+ if (pThread != NULL)
+ {
+ GCX_COOP_THREAD_EXISTS(pThread);
+
+ AppDomainIterator ai(/*bOnlyActive:*/ TRUE);
+ while (ai.Next())
+ PauseOneAppDomain(&ai);
+ }
+ }
+#endif
+
+ if (SUCCEEDED(hr))
+ {
+ g_IsPaused = TRUE;
+
+ hr = SuspendEEForPause();
+
+ // Even though this is named with TickCount, it returns milliseconds
+ m_pauseStartTime = (INT64)CLRGetTickCount64();
+ }
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+
+HRESULT CorExecutionManager::Resume(DWORD dwAppDomainId)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ ENTRY_POINT; // This is called by a host.
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+#ifndef FEATURE_CORECLR
+ if (!IsRuntimeActive())
+ return HOST_E_CLRNOTAVAILABLE;
+#endif
+
+ if(!g_IsPaused)
+ return E_FAIL;
+
+#ifdef FEATURE_CORECLR
+ // GCThread is the thread that did the Pause. Resume should also happen on that same thread
+ Thread *pThread = GetThread();
+ if(pThread != ThreadSuspend::GetSuspensionThread())
+ {
+ _ASSERTE(!"HOST BUG: The same thread that did Pause should do the Resume");
+ return E_FAIL;
+ }
+#endif
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ // Even though this is named with TickCount, it returns milliseconds
+ INT64 currTime = (INT64)CLRGetTickCount64();
+ _ASSERTE(currTime >= m_pauseStartTime);
+ _ASSERTE(m_pauseStartTime != 0);
+
+ g_PauseTime += (currTime - m_pauseStartTime);
+ g_IsPaused = FALSE;
+
+ hr = RestartEEFromPauseAndSetResumeEvent();
+
+#ifndef FEATURE_CORECLR
+ if (SUCCEEDED(hr))
+ {
+ if ((m_dwFlags & PAUSE_APP_DOMAINS) != 0)
+ {
+ Thread* pThread = SetupThreadNoThrow(&hr);
+ if (pThread != NULL)
+ {
+ GCX_COOP_THREAD_EXISTS(pThread);
+
+ AppDomainIterator ai(/*bOnlyActive:*/ TRUE);
+ while (ai.Next())
+ ResumeOneAppDomain(&ai);
+ }
+ }
+ }
+#endif
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+
+#endif //!DACCESS_COMPILE
+
+#ifdef FEATURE_CORECLR
+#ifndef DACCESS_COMPILE
+SVAL_IMPL(STARTUP_FLAGS, CorHost2, m_dwStartupFlags = STARTUP_CONCURRENT_GC);
+#else
+SVAL_IMPL(STARTUP_FLAGS, CorHost2, m_dwStartupFlags);
+#endif
+
+STARTUP_FLAGS CorHost2::GetStartupFlags()
+{
+ return m_dwStartupFlags;
+}
+#endif //FEATURE_CORECLR
+
+#ifndef DACCESS_COMPILE
+
+#if defined(FEATURE_HOSTED_BINDER) && !defined(FEATURE_CORECLR)
+/*************************************************************************************
+ ** ICLRPrivRuntime Methods
+ *************************************************************************************/
+
+HRESULT CorHost2::GetInterface(
+ REFCLSID rclsid,
+ REFIID riid,
+ LPVOID * ppUnk)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ if (rclsid == __uuidof(CLRPrivAppXBinder))
+ {
+ CLRPrivBinderAppX * pBinder = CLRPrivBinderAppX::GetOrCreateBinder();
+ hr = pBinder->QueryInterface(riid, ppUnk);
+ }
+ else
+ {
+ hr = E_NOINTERFACE;
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+}
+
+HRESULT CorHost2::CreateAppDomain(
+ LPCWSTR pwzFriendlyName,
+ ICLRPrivBinder * pBinder,
+ LPDWORD pdwAppDomainId)
+{
+ return _CreateAppDomain(
+ pwzFriendlyName,
+ 0 /* default security */,
+ nullptr, /* domain manager */
+ nullptr, /* domain manager */
+ 0, /* property count */
+ nullptr, /* property names */
+ nullptr, /* property values */
+ pBinder,
+ pdwAppDomainId);
+}
+
+HRESULT CorHost2::CreateDelegate(
+ DWORD appDomainID,
+ LPCWSTR wszAssemblyName,
+ LPCWSTR wszClassName,
+ LPCWSTR wszMethodName,
+ LPVOID * ppvDelegate)
+{
+ return _CreateDelegate(appDomainID, wszAssemblyName, wszClassName,
+ wszMethodName, reinterpret_cast<INT_PTR*>(ppvDelegate));
+}
+
+// Flag indicating if the EE was started up by an managed exe. Defined in ceemain.cpp.
+extern BOOL g_fEEManagedEXEStartup;
+
+HRESULT CorHost2::ExecuteMain(
+ ICLRPrivBinder * pBinder,
+ int * pRetVal)
+{
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_ENTRY_POINT;
+
+ HRESULT hr = S_OK;
+
+ // If an exception passes through here, it will cause the
+ // "The application has generated an unhandled exception" dialog and offer to debug.
+ BEGIN_ENTRYPOINT_THROWS;
+
+ // Indicates that the EE was started up by a managed exe.
+ g_fEEManagedEXEStartup = TRUE;
+
+ IfFailGo(CorCommandLine::SetArgvW(WszGetCommandLine()));
+
+ IfFailGo(EnsureEEStarted(COINITEE_MAIN));
+
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+ //
+ // Look for the [STAThread] or [MTAThread] attribute
+ // TODO delete this code when we move to the default AppDomain
+ //
+ HMODULE hMod = WszGetModuleHandle(NULL);
+
+ PEImageHolder pTempImage(PEImage::LoadImage(hMod));
+ PEFileHolder pTempFile(PEFile::Open(pTempImage.Extract()));
+
+ // Check for CustomAttributes - Set up the DefaultDomain and the main thread
+ // Note that this has to be done before ExplicitBind() as it
+ // affects the bind
+ mdToken tkEntryPoint = pTempFile->GetEntryPointToken();
+ // <TODO>@TODO: What if the entrypoint is in another file of the assembly?</TODO>
+ ReleaseHolder<IMDInternalImport> scope(pTempFile->GetMDImportWithRef());
+ // In theory, we should have a valid executable image and scope should never be NULL, but we've been
+ // getting Watson failures for AVs here due to ISVs modifying image headers and some new OS loader
+ // checks (see Dev10# 718530 and Windows 7# 615596)
+ if (scope == NULL)
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+
+ Thread::ApartmentState state = Thread::AS_Unknown;
+
+ if((!IsNilToken(tkEntryPoint)) && (TypeFromToken(tkEntryPoint) == mdtMethodDef)) {
+ if (scope->IsValidToken(tkEntryPoint))
+ state = SystemDomain::GetEntryPointThreadAptState(scope, tkEntryPoint);
+ else
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+
+ BOOL fSetGlobalSharePolicyUsingAttribute = FALSE;
+
+ if((!IsNilToken(tkEntryPoint)) && (TypeFromToken(tkEntryPoint) == mdtMethodDef))
+ {
+ // The global share policy needs to be set before initializing default domain
+ // so that it is in place for loading of appdomain manager.
+ fSetGlobalSharePolicyUsingAttribute = SystemDomain::SetGlobalSharePolicyUsingAttribute(scope, tkEntryPoint);
+ }
+
+ // If the entry point has an explicit thread apartment state, set it
+ // before running the AppDomainManager initialization code.
+ if (state == Thread::AS_InSTA || state == Thread::AS_InMTA)
+ SystemDomain::SetThreadAptState(scope, state);
+
+ // This can potentially run managed code.
+ SystemDomain::InitializeDefaultDomain(FALSE, pBinder);
+
+ // If we haven't set an explicit thread apartment state, set it after the
+ // AppDomainManager has got a chance to go set it in InitializeNewDomain.
+ if (state != Thread::AS_InSTA && state != Thread::AS_InMTA)
+ SystemDomain::SetThreadAptState(scope, state);
+
+ if (fSetGlobalSharePolicyUsingAttribute)
+ SystemDomain::System()->DefaultDomain()->SetupLoaderOptimization(g_dwGlobalSharePolicy);
+
+ ADID adId(DefaultADID);
+
+ GCX_COOP();
+
+ ENTER_DOMAIN_ID(adId)
+ TESTHOOKCALL(EnteredAppDomain(adId.m_dwId));
+ {
+ GCX_PREEMP();
+
+ AppDomain *pDomain = GetAppDomain();
+ _ASSERTE(pDomain);
+
+ WCHAR wzExeFileName[_MAX_PATH];
+ DWORD cchExeFileName = _MAX_PATH;
+ cchExeFileName = WszGetModuleFileName(nullptr, wzExeFileName, cchExeFileName);
+ if (cchExeFileName == _MAX_PATH)
+ IfFailThrow(E_UNEXPECTED);
+
+ LPWSTR wzExeSimpleFileName = nullptr;
+ size_t cchExeSimpleFileName = 0;
+ SplitPathInterior(
+ wzExeFileName,
+ nullptr, nullptr, // drive
+ nullptr, nullptr, // dir
+ (LPCWSTR*)&wzExeSimpleFileName, &cchExeSimpleFileName, // filename
+ nullptr, nullptr); // ext
+
+ // Remove the extension
+ wzExeSimpleFileName[cchExeSimpleFileName] = W('\0');
+
+ ReleaseHolder<IAssemblyName> pAssemblyName;
+ IfFailThrow(CreateAssemblyNameObject(
+ &pAssemblyName, // Returned IAssemblyName
+ wzExeSimpleFileName, // Name of assembly
+ CANOF_PARSE_DISPLAY_NAME, // Parse as display name
+ nullptr)); // Reserved
+
+ AssemblySpec specExe;
+ specExe.InitializeSpec(pAssemblyName, nullptr, false);
+
+ PEAssemblyHolder pPEAssembly = pDomain->BindAssemblySpec(&specExe, TRUE, FALSE);
+
+ pDomain->SetRootAssembly(pDomain->LoadAssembly(NULL, pPEAssembly, FILE_ACTIVE));
+
+ LOG((LF_CLASSLOADER | LF_CORDB,
+ LL_INFO10,
+ "Created domain for an executable at %p\n",
+ (pDomain->GetRootAssembly()? pDomain->GetRootAssembly()->Parent() : NULL)));
+ TESTHOOKCALL(RuntimeStarted(RTS_CALLINGENTRYPOINT));
+
+ // Set the friendly name to indicate that this is an immersive domain.
+ pDomain->SetFriendlyName(W("Immersive Application Domain"), TRUE);
+
+ // Execute the main method
+ // NOTE: we call the entry point with our entry point exception filter active
+ // after the AppDomain transition which is a bit different from classic apps.
+ // this is so that we have the correct context when notifying the debugger
+ // or invoking WER on the main thread and mimics the behavior of classic apps.
+ // the assumption is that AppX entry points are always invoked post-AD transition.
+ ExecuteMainInner(pDomain->GetRootAssembly());
+
+ // Get the global latched exit code instead of the return value from ExecuteMainMethod
+ // because in the case of a "void Main" method the return code is always 0,
+ // while the latched exit code is set in either case.
+ *pRetVal = GetLatchedExitCode();
+ }
+ END_DOMAIN_TRANSITION;
+ TESTHOOKCALL(LeftAppDomain(adId.m_dwId));
+
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+ErrExit:
+ END_ENTRYPOINT_THROWS;
+
+ return hr;
+}
+
+VOID CorHost2::ExecuteMainInner(Assembly* pRootAssembly)
+{
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_ENTRY_POINT;
+
+ struct Param
+ {
+ Assembly* pRootAssembly;
+ } param;
+
+ param.pRootAssembly = pRootAssembly;
+
+ PAL_TRY(Param*, pParam, &param)
+ {
+ // since this is the thread 0 entry point for AppX apps we use
+ // the EntryPointFilter so that an unhandled exception here will
+ // trigger the same behavior as in classic apps.
+ pParam->pRootAssembly->ExecuteMainMethod(NULL);
+ }
+ PAL_EXCEPT_FILTER(EntryPointFilter)
+ {
+ LOG((LF_STARTUP, LL_INFO10, "EntryPointFilter returned EXCEPTION_EXECUTE_HANDLER!"));
+ }
+ PAL_ENDTRY
+}
+
+#endif // FEATURE_HOSTED_BINDER
+
+#ifndef FEATURE_CORECLR
+// static
+HRESULT CorHost2::SetFlagsAndHostConfig(STARTUP_FLAGS dwStartupFlags, LPCWSTR pwzHostConfigFile, BOOL fFinalize)
+{
+ WRAPPER_NO_CONTRACT;
+
+ HRESULT hr = E_INVALIDARG;
+
+ if (pwzHostConfigFile == NULL)
+ pwzHostConfigFile = W("");
+
+ DangerousNonHostedSpinLockHolder lockHolder(&m_FlagsLock);
+
+ if (m_dwFlagsFinalized)
+ {
+ // verify that flags and config file are the same
+ if (dwStartupFlags == m_dwStartupFlags &&
+ _wcsicmp(pwzHostConfigFile, m_wzHostConfigFile) == 0)
+ {
+ hr = S_OK;
+ }
+ }
+ else
+ {
+ // overwrite the flags and config with the incoming values
+ if (wcslen(pwzHostConfigFile) < COUNTOF(m_wzHostConfigFile))
+ {
+ VERIFY(wcscpy_s(m_wzHostConfigFile, COUNTOF(m_wzHostConfigFile), pwzHostConfigFile) == 0);
+
+ // If they asked for the server gc but only have one processor, deny that option.
+ // Keep this in sync with shim logic in ComputeStartupFlagsAndFlavor that also switches to
+ // the workstation GC on uniprocessor boxes.
+ if (g_SystemInfo.dwNumberOfProcessors == 1 && (dwStartupFlags & STARTUP_SERVER_GC))
+ dwStartupFlags = (STARTUP_FLAGS)(dwStartupFlags & ~(STARTUP_SERVER_GC | STARTUP_CONCURRENT_GC));
+
+ m_dwStartupFlags = dwStartupFlags;
+
+ if (fFinalize)
+ m_dwFlagsFinalized = TRUE;
+
+ hr = S_OK;
+ }
+ }
+
+ return hr;
+}
+
+// static
+STARTUP_FLAGS CorHost2::GetStartupFlags()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!m_dwFlagsFinalized) // make sure we return consistent results
+ {
+ DangerousNonHostedSpinLockHolder lockHolder(&m_FlagsLock);
+ m_dwFlagsFinalized = TRUE;
+ }
+
+ return m_dwStartupFlags;
+}
+
+// static
+LPCWSTR CorHost2::GetHostConfigFile()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!m_dwFlagsFinalized) // make sure we return consistent results
+ {
+ DangerousNonHostedSpinLockHolder lockHolder(&m_FlagsLock);
+ m_dwFlagsFinalized = TRUE;
+ }
+
+ return m_wzHostConfigFile;
+}
+
+// static
+void CorHost2::GetDefaultAppDomainProperties(StringArrayList **pPropertyNames, StringArrayList **pPropertyValues)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // We should only read these after the runtime has started to ensure that the host isn't modifying them
+ // still
+ _ASSERTE(g_fEEStarted || HasStarted());
+
+ *pPropertyNames = &s_defaultDomainPropertyNames;
+ *pPropertyValues = &s_defaultDomainPropertyValues;
+}
+
+#endif // !FEATURE_CORECLR
+
+#ifdef FEATURE_COMINTEROP
+
+// Enumerate currently existing domains.
+HRESULT CorRuntimeHostBase::EnumDomains(HDOMAINENUM *hEnum)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_PREEMPTIVE;
+ WRAPPER(GC_TRIGGERS);
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ if(hEnum == NULL) return E_POINTER;
+
+ // Thread setup happens in BEGIN_EXTERNAL_ENTRYPOINT below.
+ // If the runtime has not started, we have nothing to do.
+ if (!g_fEEStarted)
+ {
+ return HOST_E_CLRNOTAVAILABLE;
+ }
+
+ HRESULT hr = E_OUTOFMEMORY;
+ *hEnum = NULL;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+
+ AppDomainIterator *pEnum = new (nothrow) AppDomainIterator(FALSE);
+ if(pEnum) {
+ *hEnum = (HDOMAINENUM) pEnum;
+ hr = S_OK;
+ }
+ END_EXTERNAL_ENTRYPOINT;
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+#endif // FEATURE_COMINTEROP
+
+extern "C"
+HRESULT GetCLRRuntimeHost(REFIID riid, IUnknown **ppUnk)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return CorHost2::CreateObject(riid, (void**)ppUnk);
+}
+
+#if defined(FEATURE_COMINTEROP) && !defined(FEATURE_CORECLR)
+
+HRESULT NextDomainWorker(AppDomainIterator *pEnum,
+ IUnknown** pAppDomain)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW); // nothrow contract's fs:0 handler gets called before the C++ EH fs:0 handler which is pushed in the prolog
+ GC_TRIGGERS;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ Thread *pThread = GetThread();
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, return COR_E_STACKOVERFLOW);
+
+ EX_TRY
+ {
+ GCX_COOP_THREAD_EXISTS(pThread);
+
+ if (pEnum->Next())
+ {
+ AppDomain* pDomain = pEnum->GetDomain();
+ // Need to enter the AppDomain to synchronize access to the exposed
+ // object properly (can't just take the system domain mutex since we
+ // might need to run code that uses higher ranking crsts).
+ ENTER_DOMAIN_PTR(pDomain,ADV_ITERATOR)
+ {
+
+ hr = pDomain->GetComIPForExposedObject(pAppDomain);
+ }
+ END_DOMAIN_TRANSITION;
+ }
+ else
+ {
+ hr = S_FALSE;
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ END_SO_INTOLERANT_CODE;
+
+ return hr;
+}
+
+// Returns S_FALSE when there are no more domains. A domain
+// is passed out only when S_OK is returned.
+HRESULT CorRuntimeHostBase::NextDomain(HDOMAINENUM hEnum,
+ IUnknown** pAppDomain)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ if(hEnum == NULL || pAppDomain == NULL)
+ return E_POINTER;
+
+ // If the runtime has not started, we have nothing to do.
+ if (!g_fEEStarted)
+ {
+ return HOST_E_CLRNOTAVAILABLE;
+ }
+
+ HRESULT hr;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ AppDomainIterator *pEnum = (AppDomainIterator *) hEnum;
+
+ do
+ {
+ hr = NextDomainWorker(pEnum, pAppDomain);
+ // Might need to look at the next appdomain if we were attempting to get at
+ // the exposed appdomain object and were chucked out as the result of an
+ // appdomain unload.
+ } while (hr == COR_E_APPDOMAINUNLOADED);
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+}
+
+// Creates a domain in the runtime. The identity array is
+// a pointer to an array TYPE containing IIdentity objects defining
+// the security identity.
+HRESULT CorRuntimeHostBase::CreateDomainEx(LPCWSTR pwzFriendlyName,
+ IUnknown* pSetup, // Optional
+ IUnknown* pEvidence, // Optional
+ IUnknown ** pAppDomain)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ if(!pwzFriendlyName) return E_POINTER;
+ if(pAppDomain == NULL) return E_POINTER;
+ if(!g_fEEStarted) return E_FAIL;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr);
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ struct _gc {
+ STRINGREF pName;
+ OBJECTREF pSetup;
+ OBJECTREF pEvidence;
+ APPDOMAINREF pDomain;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ if (FAILED(hr = EnsureComStartedNoThrow()))
+ goto lDone;
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.pName = StringObject::NewString(pwzFriendlyName);
+
+ if(pSetup)
+ GetObjectRefFromComIP(&gc.pSetup, pSetup);
+ if(pEvidence)
+ GetObjectRefFromComIP(&gc.pEvidence, pEvidence);
+
+ MethodDescCallSite createDomain(METHOD__APP_DOMAIN__CREATE_DOMAIN);
+
+ ARG_SLOT args[3] = {
+ ObjToArgSlot(gc.pName),
+ ObjToArgSlot(gc.pEvidence),
+ ObjToArgSlot(gc.pSetup),
+ };
+
+ gc.pDomain = (APPDOMAINREF) createDomain.Call_RetOBJECTREF(args);
+
+ *pAppDomain = GetComIPFromObjectRef((OBJECTREF*) &gc.pDomain);
+
+ GCPROTECT_END();
+
+lDone: ;
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+// Close the enumeration releasing resources
+HRESULT CorRuntimeHostBase::CloseEnum(HDOMAINENUM hEnum)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ if(hEnum) {
+ AppDomainIterator* pEnum = (AppDomainIterator*) hEnum;
+ delete pEnum;
+ }
+
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+}
+
+
+HRESULT CorRuntimeHostBase::CreateDomainSetup(IUnknown **pAppDomainSetup)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ ENTRY_POINT;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ if (!pAppDomainSetup)
+ return E_POINTER;
+
+ // If the runtime has not started, we have nothing to do.
+ if (!g_fEEStarted)
+ {
+ return HOST_E_CLRNOTAVAILABLE;
+ }
+
+ // Create the domain.
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr);
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ struct _gc {
+ OBJECTREF pSetup;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ MethodTable* pMT = NULL;
+
+ hr = EnsureComStartedNoThrow();
+ if (FAILED(hr))
+ goto lDone;
+
+ pMT = MscorlibBinder::GetClass(CLASS__APPDOMAIN_SETUP);
+
+ GCPROTECT_BEGIN(gc);
+ gc.pSetup = AllocateObject(pMT);
+ *pAppDomainSetup = GetComIPFromObjectRef((OBJECTREF*) &gc.pSetup);
+ GCPROTECT_END();
+
+lDone: ;
+ }
+ END_EXTERNAL_ENTRYPOINT;
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+HRESULT CorRuntimeHostBase::CreateEvidence(IUnknown **pEvidence)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ if (!pEvidence)
+ return E_POINTER;
+
+#ifdef FEATURE_CAS_POLICY
+
+ // If the runtime has not started, we have nothing to do.
+ if (!g_fEEStarted)
+ {
+ return HOST_E_CLRNOTAVAILABLE;
+ }
+
+ // Create the domain.
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr);
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ struct _gc {
+ OBJECTREF pEvidence;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ MethodTable* pMT = NULL;
+
+ hr = EnsureComStartedNoThrow();
+ if (FAILED(hr))
+ goto lDone;
+
+ pMT = MscorlibBinder::GetClass(CLASS__EVIDENCE);
+
+ GCPROTECT_BEGIN(gc);
+ gc.pEvidence = AllocateObject(pMT);
+ MethodDescCallSite ctor(METHOD__EVIDENCE__CTOR, &(gc.pEvidence));
+
+ // Call the Evidence class constructor.
+ ARG_SLOT CtorArgs[] =
+ {
+ ObjToArgSlot(gc.pEvidence)
+ };
+ ctor.Call(CtorArgs);
+
+ *pEvidence = GetComIPFromObjectRef((OBJECTREF*) &gc.pEvidence);
+ GCPROTECT_END();
+
+lDone: ;
+ }
+ END_EXTERNAL_ENTRYPOINT;
+ END_ENTRYPOINT_NOTHROW;
+#else // !FEATURE_CAS_POLICY
+ // There is no Evidence class support without CAS policy.
+ return E_NOTIMPL;
+#endif // FEATURE_CAS_POLICY
+
+ return hr;
+}
+
+HRESULT CorRuntimeHostBase::UnloadDomain(IUnknown *pUnkDomain)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_ANY;
+ FORBID_FAULT;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ if (!pUnkDomain)
+ return E_POINTER;
+
+ // If the runtime has not started, we have nothing to do.
+ if (!g_fEEStarted)
+ {
+ return HOST_E_CLRNOTAVAILABLE;
+ }
+
+ CONTRACT_VIOLATION(FaultViolation); // This entire function is full of OOM potential: must fix.
+
+ HRESULT hr = S_OK;
+ DWORD dwDomainId = 0;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ _ASSERTE (g_fComStarted);
+
+ {
+ SystemDomain::LockHolder lh;
+
+ ComCallWrapper* pWrap = GetCCWFromIUnknown(pUnkDomain, FALSE);
+ if (!pWrap)
+ {
+ hr = COR_E_APPDOMAINUNLOADED;
+ }
+ if (SUCCEEDED(hr))
+ {
+ dwDomainId = pWrap->GetDomainID().m_dwId;
+ }
+ }
+ if (SUCCEEDED(hr))
+ {
+ hr = UnloadAppDomain(dwDomainId, TRUE);
+ }
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+#endif // FEATURE_COMINTEROP && !FEATURE_CORECLR
+
+STDMETHODIMP CorHost2::UnloadAppDomain(DWORD dwDomainId, BOOL fWaitUntilDone)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+#ifdef FEATURE_CORECLR
+ if (!m_fStarted)
+ return HOST_E_INVALIDOPERATION;
+
+ if(m_dwStartupFlags & STARTUP_SINGLE_APPDOMAIN)
+ {
+ if (!g_fEEStarted)
+ {
+ return HOST_E_CLRNOTAVAILABLE;
+ }
+
+ if(!m_fAppDomainCreated)
+ {
+ return HOST_E_INVALIDOPERATION;
+ }
+
+ HRESULT hr=S_OK;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ if (!m_fFirstToLoadCLR)
+ {
+ _ASSERTE(!"Not reachable");
+ hr = HOST_E_CLRNOTAVAILABLE;
+ }
+ else
+ {
+ LONG refCount = m_RefCount;
+ if (refCount == 0)
+ {
+ hr = HOST_E_CLRNOTAVAILABLE;
+ }
+ else
+ if (1 == refCount)
+ {
+ // Stop coreclr on unload.
+ m_fStarted = FALSE;
+ EEShutDown(FALSE);
+ }
+ else
+ {
+ _ASSERTE(!"Not reachable");
+ hr = FALSE;
+ }
+ }
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+ }
+ else
+#endif // FEATURE_CORECLR
+
+ return CorRuntimeHostBase::UnloadAppDomain(dwDomainId, fWaitUntilDone);
+}
+
+HRESULT CorRuntimeHostBase::UnloadAppDomain(DWORD dwDomainId, BOOL fSync)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ FORBID_FAULT; // Unloading domains cannot fail due to OOM
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // No point going further if the runtime is not running...
+ {
+ // In IsRuntimeActive, we will call CanRunManagedCode that will
+ // check if the current thread has taken the loader lock or not,
+ // if MDA is supported. To do the check, MdaLoaderLock::ReportViolation
+ // will be invoked that will internally end up invoking
+ // MdaFactory<MdaXmlElement>::GetNext that will use the "new" operator
+ // that has the "FAULT" contract set, resulting in FAULT_VIOLATION since
+ // this method has the FORBID_FAULT contract set above.
+ //
+ // However, for a thread that holds the loader lock, unloading the appDomain is
+ // not a supported scenario. Thus, we should not be ending up in this code
+ // path for the FAULT violation.
+ //
+ // Hence, the CONTRACT_VIOLATION below for overriding the FORBID_FAULT
+ // for this scope only.
+ CONTRACT_VIOLATION(FaultViolation);
+ if (!IsRuntimeActive()
+ #ifdef FEATURE_CORECLR
+ || !m_fStarted
+ #endif // FEATURE_CORECLR
+ )
+ {
+ return HOST_E_CLRNOTAVAILABLE;
+ }
+ }
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ // We do not use BEGIN_EXTERNAL_ENTRYPOINT here because
+ // we do not want to setup Thread. Process may be OOM, and we want Unload
+ // to work.
+ hr = AppDomain::UnloadById(ADID(dwDomainId), fSync);
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+//*****************************************************************************
+// Fiber Methods
+//*****************************************************************************
+#if !defined(FEATURE_CORECLR) // simple hosting
+HRESULT CorHost::CreateLogicalThreadState()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ DISABLED(GC_TRIGGERS);
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+ if (CorHost::GetHostVersion() != 1)
+ {
+ hr=HOST_E_INVALIDOPERATION;
+ }
+ else
+ {
+ _ASSERTE (GetThread() == 0 || GetThread()->HasRightCacheStackBase());
+ /* Thread *thread = */ SetupThreadNoThrow(&hr);
+
+ }
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+}
+
+
+HRESULT CorHost::DeleteLogicalThreadState()
+{
+ if (CorHost::GetHostVersion() != 1)
+ {
+ return HOST_E_INVALIDOPERATION;
+ }
+
+ Thread *pThread = GetThread();
+ if (!pThread)
+ return E_UNEXPECTED;
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+ HRESULT hr = S_OK;
+ BEGIN_ENTRYPOINT_NOTHROW;
+ // We need to reset the TrapReturningThread count that was
+ // set when a thread is requested to be aborted. Otherwise
+ // every stub call is going to go through a slow path.
+ if (pThread->IsAbortRequested())
+ pThread->UnmarkThreadForAbort(Thread::TAR_ALL);
+
+ // see code:Thread::OnThreadTerminate#ReportDeadOnThreadTerminate
+ pThread->SetThreadState(Thread::TS_ReportDead);
+
+ pThread->OnThreadTerminate(FALSE);
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+}
+
+
+HRESULT CorHost::SwitchInLogicalThreadState(DWORD *pFiberCookie)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_ENTRY_POINT;
+
+ if (CorHost::GetHostVersion() != 1)
+ {
+ return HOST_E_INVALIDOPERATION;
+ }
+
+ if (!pFiberCookie)
+ {
+ return E_POINTER;
+ }
+
+ HRESULT hr = S_OK;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ hr = ((Thread*)pFiberCookie)->SwitchIn(::GetCurrentThread());
+
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+
+}
+
+HRESULT CorHost::SwitchOutLogicalThreadState(DWORD **pFiberCookie)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_ENTRY_POINT;
+
+ if (CorHost::GetHostVersion() != 1)
+ {
+ return HOST_E_INVALIDOPERATION;
+ }
+
+ if (!pFiberCookie)
+ {
+ return E_POINTER;
+ }
+
+ Thread *pThread = GetThread();
+ if (!pThread)
+ {
+ return E_UNEXPECTED;
+ }
+
+ pThread->InternalSwitchOut();
+ *pFiberCookie = (DWORD*)pThread;
+
+ return S_OK;
+}
+#endif // !defined(FEATURE_CORECLR)
+
+HRESULT CorRuntimeHostBase::LocksHeldByLogicalThread(DWORD *pCount)
+{
+ if (!pCount)
+ return E_POINTER;
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ Thread* pThread = GetThread();
+ if (pThread == NULL)
+ *pCount = 0;
+ else
+ *pCount = pThread->m_dwLockCount;
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return S_OK;
+}
+
+//*****************************************************************************
+// ICorConfiguration
+//*****************************************************************************
+#if !defined(FEATURE_CORECLR)
+IGCThreadControl *CorConfiguration::m_CachedGCThreadControl = 0;
+IGCHostControl *CorConfiguration::m_CachedGCHostControl = 0;
+IDebuggerThreadControl *CorConfiguration::m_CachedDebuggerThreadControl = 0;
+DWORD *CorConfiguration::m_DSTArray = 0;
+DWORD CorConfiguration::m_DSTCount = 0;
+DWORD CorConfiguration::m_DSTArraySize = 0;
+
+// *** ICorConfiguration methods ***
+
+
+HRESULT CorConfiguration::SetGCThreadControl(IGCThreadControl *pGCThreadControl)
+{
+ if (!pGCThreadControl)
+ return E_POINTER;
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ if (m_CachedGCThreadControl)
+ m_CachedGCThreadControl->Release();
+
+ m_CachedGCThreadControl = pGCThreadControl;
+
+ if (m_CachedGCThreadControl)
+ m_CachedGCThreadControl->AddRef();
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return S_OK;
+}
+
+HRESULT CorConfiguration::SetGCHostControl(IGCHostControl *pGCHostControl)
+{
+ if (!pGCHostControl)
+ return E_POINTER;
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ if (m_CachedGCHostControl)
+ m_CachedGCHostControl->Release();
+
+ m_CachedGCHostControl = pGCHostControl;
+
+ if (m_CachedGCHostControl)
+ m_CachedGCHostControl->AddRef();
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return S_OK;
+}
+
+HRESULT CorConfiguration::SetDebuggerThreadControl(IDebuggerThreadControl *pDebuggerThreadControl)
+{
+ if (!pDebuggerThreadControl)
+ return E_POINTER;
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+#ifdef DEBUGGING_SUPPORTED
+ // Can't change the debugger thread control object once its been set.
+ if (m_CachedDebuggerThreadControl != NULL)
+ IfFailGo(E_INVALIDARG);
+
+ m_CachedDebuggerThreadControl = pDebuggerThreadControl;
+
+ // If debugging is already initialized then provide this interface pointer to it.
+ // It will also addref the new one and release the old one.
+ if (g_pDebugInterface)
+ g_pDebugInterface->SetIDbgThreadControl(pDebuggerThreadControl);
+
+ if (m_CachedDebuggerThreadControl)
+ m_CachedDebuggerThreadControl->AddRef();
+
+ hr = S_OK;
+#else // !DEBUGGING_SUPPORTED
+ hr = E_NOTIMPL;
+#endif // !DEBUGGING_SUPPORTED
+
+ErrExit:
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+
+}
+
+
+HRESULT CorConfiguration::AddDebuggerSpecialThread(DWORD dwSpecialThreadId)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ ENTRY_POINT; // debugging not hardened for SO
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+
+#ifdef DEBUGGING_SUPPORTED
+ // If it's already in the list, don't add it again.
+ if (IsDebuggerSpecialThread(dwSpecialThreadId))
+ {
+ hr = S_OK;
+ goto ErrExit;
+ }
+ // Grow the array if necessary.
+ if (m_DSTCount >= m_DSTArraySize)
+ {
+ // There's probably only ever gonna be one or two of these
+ // things, so we'll start small.
+ DWORD newSize = (m_DSTArraySize == 0) ? 2 : m_DSTArraySize * 2;
+
+ DWORD *newArray = new (nothrow) DWORD[newSize];
+ IfNullGo(newArray);
+
+ // If we're growing instead of starting, then copy the old array.
+ if (m_DSTArray)
+ {
+ memcpy(newArray, m_DSTArray, m_DSTArraySize * sizeof(DWORD));
+ delete [] m_DSTArray;
+ }
+
+ // Update to the new array and size.
+ m_DSTArray = newArray;
+ m_DSTArraySize = newSize;
+ }
+
+ // Save the new thread ID.
+ m_DSTArray[m_DSTCount++] = dwSpecialThreadId;
+
+ hr = (RefreshDebuggerSpecialThreadList());
+#else // !DEBUGGING_SUPPORTED
+ hr = E_NOTIMPL;
+#endif // !DEBUGGING_SUPPORTED
+ErrExit:
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+
+}
+// Helper function to update the thread list in the debugger control block
+HRESULT CorConfiguration::RefreshDebuggerSpecialThreadList()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#ifdef DEBUGGING_SUPPORTED
+ HRESULT hr = S_OK;
+
+ if (g_pDebugInterface)
+ {
+ // Inform the debugger services that this list has changed
+ hr = g_pDebugInterface->UpdateSpecialThreadList(
+ m_DSTCount, m_DSTArray);
+
+ _ASSERTE(SUCCEEDED(hr));
+ }
+
+ return (hr);
+#else // !DEBUGGING_SUPPORTED
+ return E_NOTIMPL;
+#endif // !DEBUGGING_SUPPORTED
+}
+
+
+// Helper func that returns true if the thread is in the debugger special thread list
+BOOL CorConfiguration::IsDebuggerSpecialThread(DWORD dwThreadId)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ for (DWORD i = 0; i < m_DSTCount; i++)
+ {
+ if (m_DSTArray[i] == dwThreadId)
+ return (TRUE);
+ }
+
+ return (FALSE);
+}
+
+
+// Clean up any debugger thread control object we may be holding, called at shutdown.
+void CorConfiguration::CleanupDebuggerThreadControl()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (m_CachedDebuggerThreadControl != NULL)
+ {
+ // Note: we don't release the IDebuggerThreadControl object if we're cleaning up from
+ // our DllMain. The DLL that implements the object may already have been unloaded.
+ // Leaking the object is okay... the PDM doesn't care.
+ if (!IsAtProcessExit())
+ m_CachedDebuggerThreadControl->Release();
+
+ m_CachedDebuggerThreadControl = NULL;
+ }
+}
+#endif // !defined(FEATURE_CORECLR)
+
+//*****************************************************************************
+// IUnknown
+//*****************************************************************************
+
+ULONG CorRuntimeHostBase::AddRef()
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+ return InterlockedIncrement(&m_cRef);
+}
+
+#if !defined(FEATURE_CORECLR) // simple hosting
+ULONG CorHost::Release()
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ ULONG cRef = InterlockedDecrement(&m_cRef);
+ if (!cRef) {
+ delete this;
+ }
+
+ return (cRef);
+}
+#endif // !defined(FEATURE_CORECLR)
+
+ULONG CorHost2::Release()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ ULONG cRef = InterlockedDecrement(&m_cRef);
+ if (!cRef) {
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ // CorHost2 is allocated before host memory interface is set up.
+ if (GetHostMemoryManager() == NULL)
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ delete this;
+ }
+
+ return (cRef);
+}
+
+#if !defined(FEATURE_CORECLR) // simple hosting
+HRESULT CorHost::QueryInterface(REFIID riid, void **ppUnk)
+{
+ if (!ppUnk)
+ return E_POINTER;
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT; // no global state updates that need guarding.
+ }
+ CONTRACTL_END;
+
+ if (ppUnk == NULL)
+ {
+ return E_POINTER;
+ }
+
+ *ppUnk = 0;
+
+ // Deliberately do NOT hand out ICorConfiguration. They must explicitly call
+ // GetConfiguration to obtain that interface.
+ if (riid == IID_IUnknown)
+ *ppUnk = (IUnknown *) (ICorRuntimeHost *) this;
+ else if (riid == IID_ICorRuntimeHost)
+ {
+ ULONG version = 1;
+ if (m_Version == 0)
+ FastInterlockCompareExchange((LONG*)&m_Version, version, 0);
+
+ if (m_Version != version && (g_singleVersionHosting || !g_fEEStarted))
+ {
+ return HOST_E_INVALIDOPERATION;
+ }
+
+ *ppUnk = (ICorRuntimeHost *) this;
+ }
+ else if (riid == IID_ICorThreadpool)
+ *ppUnk = (ICorThreadpool *) this;
+ else if (riid == IID_IGCHost)
+ *ppUnk = (IGCHost *) this;
+ else if (riid == IID_IGCHost2)
+ *ppUnk = (IGCHost2 *) this;
+ else if (riid == IID_IValidator)
+ *ppUnk = (IValidator *) this;
+ else if (riid == IID_IDebuggerInfo)
+ *ppUnk = (IDebuggerInfo *) this;
+ else if (riid == IID_ICLRExecutionManager)
+ *ppUnk = (ICLRExecutionManager *) this;
+ else
+ return (E_NOINTERFACE);
+ AddRef();
+ return (S_OK);
+}
+#endif // !defined(FEATURE_CORECLR)
+
+
+HRESULT CorHost2::QueryInterface(REFIID riid, void **ppUnk)
+{
+ if (!ppUnk)
+ return E_POINTER;
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT; // no global state updates that need guarding.
+ }
+ CONTRACTL_END;
+
+ if (ppUnk == NULL)
+ {
+ return E_POINTER;
+ }
+
+ *ppUnk = 0;
+
+ // Deliberately do NOT hand out ICorConfiguration. They must explicitly call
+ // GetConfiguration to obtain that interface.
+ if (riid == IID_IUnknown)
+ *ppUnk = static_cast<IUnknown *>(static_cast<ICLRRuntimeHost *>(this));
+#ifdef FEATURE_CORECLR // CoreCLR only supports IID_ICLRRuntimeHost2
+ else if (riid == IID_ICLRRuntimeHost2)
+ {
+ ULONG version = 2;
+ if (m_Version == 0)
+ FastInterlockCompareExchange((LONG*)&m_Version, version, 0);
+
+ *ppUnk = static_cast<ICLRRuntimeHost2 *>(this);
+ }
+#else // DesktopCLR only supports IID_ICLRRuntimeHost
+ else if (riid == IID_ICLRRuntimeHost)
+ {
+ ULONG version = 2;
+ if (m_Version == 0)
+ FastInterlockCompareExchange((LONG*)&m_Version, version, 0);
+
+ *ppUnk = static_cast<ICLRRuntimeHost *>(this);
+ }
+#endif // FEATURE_CORECLR
+ else if (riid == IID_ICLRExecutionManager)
+ {
+ ULONG version = 2;
+ if (m_Version == 0)
+ FastInterlockCompareExchange((LONG*)&m_Version, version, 0);
+
+ *ppUnk = static_cast<ICLRExecutionManager *>(this);
+ }
+#if defined(FEATURE_HOSTED_BINDER) && !defined(FEATURE_CORECLR)
+ else if (riid == __uuidof(ICLRPrivRuntime))
+ {
+ ULONG version = 2;
+ if (m_Version == 0)
+ FastInterlockCompareExchange((LONG*)&m_Version, version, 0);
+
+ *ppUnk = static_cast<ICLRPrivRuntime *>(this);
+ }
+#endif
+#ifndef FEATURE_PAL
+ else if (riid == IID_IPrivateManagedExceptionReporting)
+ {
+ *ppUnk = static_cast<IPrivateManagedExceptionReporting *>(this);
+ }
+#endif // !FEATURE_PAL
+#ifndef FEATURE_CORECLR
+ else if (riid == IID_ICorThreadpool)
+ *ppUnk = static_cast<ICorThreadpool *>(this);
+ // TODO: wwl Remove this after SQL uses new interface.
+ else if (riid == IID_IGCHost &&
+ GetHostVersion() == 3)
+ *ppUnk = static_cast<IGCHost *>(this);
+ else if (riid == IID_ICLRValidator)
+ *ppUnk = static_cast<ICLRValidator *>(this);
+ else if (riid == IID_IDebuggerInfo)
+ *ppUnk = static_cast<IDebuggerInfo *>(this);
+#ifdef FEATURE_TESTHOOKS
+ else if (riid == IID_ICLRTestHookManager)
+ {
+ *ppUnk=CLRTestHookManager::Start();
+ if(*ppUnk==NULL)
+ return E_OUTOFMEMORY;
+ }
+#endif // FEATURE_TESTHOOKS
+#endif // FEATURE_CORECLR
+ else
+ return (E_NOINTERFACE);
+ AddRef();
+ return (S_OK);
+}
+
+#ifndef FEATURE_CORECLR // CorHost isn't exposed externally
+//*****************************************************************************
+// Called by the class factory template to create a new instance of this object.
+//*****************************************************************************
+HRESULT CorHost::CreateObject(REFIID riid, void **ppUnk)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ CorHost *pCorHost = new (nothrow) CorHost();
+ if (!pCorHost)
+ {
+ hr = E_OUTOFMEMORY;
+ }
+ else
+ {
+ hr = pCorHost->QueryInterface(riid, ppUnk);
+
+ if (FAILED(hr))
+ delete pCorHost;
+ }
+ return (hr);
+}
+#endif // FEATURE_CORECLR
+
+#ifndef FEATURE_PAL
+HRESULT CorHost2::GetBucketParametersForCurrentException(BucketParameters *pParams)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ // To avoid confusion, clear the buckets.
+ memset(pParams, 0, sizeof(BucketParameters));
+
+ // Defer to Watson helper.
+ hr = ::GetBucketParametersForCurrentException(pParams);
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+#endif // !FEATURE_PAL
+
+HRESULT CorHost2::CreateObject(REFIID riid, void **ppUnk)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW; );
+ CorHost2 *pCorHost = new (nothrow) CorHost2();
+ if (!pCorHost)
+ {
+ hr = E_OUTOFMEMORY;
+ }
+ else
+ {
+ hr = pCorHost->QueryInterface(riid, ppUnk);
+ if (FAILED(hr))
+ delete pCorHost;
+ }
+ END_SO_INTOLERANT_CODE;
+ return (hr);
+}
+
+
+//-----------------------------------------------------------------------------
+// MapFile - Maps a file into the runtime in a non-standard way
+//-----------------------------------------------------------------------------
+
+static PEImage *MapFileHelper(HANDLE hFile)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+
+ HandleHolder hFileMap(WszCreateFileMapping(hFile, NULL, PAGE_READONLY, 0, 0, NULL));
+ if (hFileMap == NULL)
+ ThrowLastError();
+
+ CLRMapViewHolder base(CLRMapViewOfFile(hFileMap, FILE_MAP_READ, 0, 0, 0));
+ if (base == NULL)
+ ThrowLastError();
+
+ DWORD dwSize = SafeGetFileSize(hFile, NULL);
+ if (dwSize == 0xffffffff && GetLastError() != NOERROR)
+ {
+ ThrowLastError();
+ }
+ PEImageHolder pImage(PEImage::LoadFlat(base, dwSize));
+ return pImage.Extract();
+}
+
+HRESULT CorRuntimeHostBase::MapFile(HANDLE hFile, HMODULE* phHandle)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ *phHandle = (HMODULE) (MapFileHelper(hFile)->GetLoadedLayout()->GetBase());
+ }
+ END_EXTERNAL_ENTRYPOINT;
+ END_ENTRYPOINT_NOTHROW;
+
+
+ return hr;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// IDebuggerInfo::IsDebuggerAttached
+#if !defined(FEATURE_CORECLR)
+HRESULT CorDebuggerInfo::IsDebuggerAttached(BOOL *pbAttached)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ if (pbAttached == NULL)
+ hr = E_INVALIDARG;
+ else
+#ifdef DEBUGGING_SUPPORTED
+ *pbAttached = (CORDebuggerAttached() != 0);
+#else
+ *pbAttached = FALSE;
+#endif
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+#endif // !defined(FEATURE_CORECLR)
+
+LONG CorHost2::m_RefCount = 0;
+
+IHostControl *CorHost2::m_HostControl = NULL;
+
+LPCWSTR CorHost2::s_wszAppDomainManagerAsm = NULL;
+LPCWSTR CorHost2::s_wszAppDomainManagerType = NULL;
+EInitializeNewDomainFlags CorHost2::s_dwDomainManagerInitFlags = eInitializeNewDomainFlags_None;
+
+#ifdef FEATURE_LEGACYNETCF_DBG_HOST_CONTROL
+IHostNetCFDebugControlManager *CorHost2::m_HostNetCFDebugControlManager = NULL;
+#endif
+
+#ifndef FEATURE_CORECLR // not supported
+
+StringArrayList CorHost2::s_defaultDomainPropertyNames;
+StringArrayList CorHost2::s_defaultDomainPropertyValues;
+
+IHostMemoryManager *CorHost2::m_HostMemoryManager = NULL;
+IHostMalloc *CorHost2::m_HostMalloc = NULL;
+IHostTaskManager *CorHost2::m_HostTaskManager = NULL;
+IHostThreadpoolManager *CorHost2::m_HostThreadpoolManager = NULL;
+IHostIoCompletionManager *CorHost2::m_HostIoCompletionManager = NULL;
+IHostSyncManager *CorHost2::m_HostSyncManager = NULL;
+IHostAssemblyManager *CorHost2::m_HostAssemblyManager = NULL;
+IHostGCManager *CorHost2::m_HostGCManager = NULL;
+IHostSecurityManager *CorHost2::m_HostSecurityManager = NULL;
+IHostPolicyManager *CorHost2::m_HostPolicyManager = NULL;
+int CorHost2::m_HostOverlappedExtensionSize = -1;
+
+STARTUP_FLAGS CorHost2::m_dwStartupFlags = STARTUP_CONCURRENT_GC;
+WCHAR CorHost2::m_wzHostConfigFile[_MAX_PATH] = { 0 };
+
+BOOL CorHost2::m_dwFlagsFinalized = FALSE;
+DangerousNonHostedSpinLock CorHost2::m_FlagsLock;
+
+class CCLRMemoryNotificationCallback: public ICLRMemoryNotificationCallback
+{
+public:
+ virtual HRESULT STDMETHODCALLTYPE OnMemoryNotification(EMemoryAvailable eMemoryAvailable) {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ // We have not started runtime yet.
+ if (!g_fEEStarted)
+ return S_OK;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ switch (eMemoryAvailable)
+ {
+ case eMemoryAvailableLow:
+ STRESS_LOG0(LF_GC, LL_INFO100, "Host delivers memory notification: Low\n");
+ break;
+ case eMemoryAvailableNeutral:
+ STRESS_LOG0(LF_GC, LL_INFO100, "Host delivers memory notification: Neutral\n");
+ break;
+ case eMemoryAvailableHigh:
+ STRESS_LOG0(LF_GC, LL_INFO100, "Host delivers memory notification: High\n");
+ break;
+ }
+ static DWORD lastTime = (DWORD)-1;
+ if (eMemoryAvailable == eMemoryAvailableLow)
+ {
+ FastInterlockIncrement (&g_bLowMemoryFromHost);
+ DWORD curTime = GetTickCount();
+ if (curTime < lastTime || curTime - lastTime >= 0x2000)
+ {
+ lastTime = curTime;
+ FinalizerThread::EnableFinalization();
+ }
+ }
+ else
+ {
+ FastInterlockExchange (&g_bLowMemoryFromHost, FALSE);
+ }
+ END_ENTRYPOINT_NOTHROW;
+
+ return S_OK;
+ }
+
+ virtual ULONG STDMETHODCALLTYPE AddRef(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+ }
+
+ HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid, void **ppvObject)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (riid != IID_ICLRMemoryNotificationCallback && riid != IID_IUnknown)
+ return (E_NOINTERFACE);
+ *ppvObject = this;
+ return S_OK;
+ }
+
+ virtual ULONG STDMETHODCALLTYPE Release(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+ }
+};
+
+static CCLRMemoryNotificationCallback s_MemoryNotification;
+
+class CLRTaskManager : public ICLRTaskManager
+{
+public:
+ virtual ULONG STDMETHODCALLTYPE AddRef(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid, void **ppvObject) {
+ LIMITED_METHOD_CONTRACT;
+ if (riid != IID_ICLRTaskManager && riid != IID_IUnknown)
+ return (E_NOINTERFACE);
+ *ppvObject = this;
+ return S_OK;
+ }
+
+ virtual ULONG STDMETHODCALLTYPE Release(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE CreateTask(ICLRTask **pTask)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ DISABLED(GC_NOTRIGGER);
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+#ifdef _DEBUG
+ _ASSERTE (!CLRTaskHosted() || GetCurrentHostTask());
+#endif
+ _ASSERTE (GetThread() == NULL);
+ Thread *pThread = NULL;
+ pThread = SetupThreadNoThrow(&hr);
+ *pTask = pThread;
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE GetCurrentTask(ICLRTask **pTask)
+ {
+ // This function may be called due SQL SwitchIn/Out. Contract may
+ // force memory allocation which is not allowed during Switch.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_ENTRY_POINT;
+
+ *pTask = GetThread();
+ return S_OK;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE SetUILocale(LCID lcid)
+ {
+ Thread *pThread = GetThread();
+ if (pThread == NULL)
+ return HOST_E_INVALIDOPERATION;
+
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ NOTHROW;
+ MODE_PREEMPTIVE;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ //BEGIN_ENTRYPOINT_NOTHROW;
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ pThread->SetCultureId(lcid,TRUE);
+ }
+ END_EXTERNAL_ENTRYPOINT;
+ //END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE SetLocale(LCID lcid)
+ {
+ Thread *pThread = GetThread();
+ if (pThread == NULL)
+ return HOST_E_INVALIDOPERATION;
+
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ NOTHROW;
+ MODE_PREEMPTIVE;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ //BEGIN_ENTRYPOINT_NOTHROW;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ pThread->SetCultureId(lcid,FALSE);
+ }
+ END_EXTERNAL_ENTRYPOINT;
+ //END_ENTRYPOINT_NOTHROW;
+ return hr;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE GetCurrentTaskType(ETaskType *pTaskType)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+ *pTaskType = ::GetCurrentTaskType();
+ END_ENTRYPOINT_NOTHROW;
+
+ return S_OK;
+ }
+};
+
+static CLRTaskManager s_CLRTaskManager;
+
+class CLRSyncManager : public ICLRSyncManager
+{
+public:
+ virtual HRESULT STDMETHODCALLTYPE GetMonitorOwner(SIZE_T Cookie,
+ IHostTask **ppOwnerHostTask)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_PREEMPTIVE;
+ GC_NOTRIGGER;
+ ENTRY_POINT;;
+ }
+ CONTRACTL_END;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ // Cookie is the SyncBlock
+ // <TODO>TODO: Lifetime of Cookie?</TODO>
+ AwareLock* pAwareLock = (AwareLock*)Cookie;
+ IHostTask *pTask = NULL;
+ Thread *pThread = pAwareLock->GetOwningThread();
+ if (pThread)
+ {
+ ThreadStoreLockHolder tsLock;
+ pThread = pAwareLock->GetOwningThread();
+ if (pThread)
+ {
+ // See if the lock is orphaned, and the Thread object has been deleted
+ Thread *pWalk = NULL;
+ while ((pWalk = ThreadStore::GetAllThreadList(pWalk, 0, 0)) != NULL)
+ {
+ if (pWalk == pThread)
+ {
+ pTask = pThread->GetHostTaskWithAddRef();
+ break;
+ }
+ }
+ }
+ }
+
+ *ppOwnerHostTask = pTask;
+
+ END_ENTRYPOINT_NOTHROW;
+
+
+ return S_OK;
+ }
+ virtual HRESULT STDMETHODCALLTYPE CreateRWLockOwnerIterator(SIZE_T Cookie,
+ SIZE_T *pIterator) {
+ Thread *pThread = GetThread();
+
+ // We may open a window for GC here.
+ // A host should not hijack a coop thread to do deadlock detection.
+ if (pThread && pThread->PreemptiveGCDisabled())
+ return HOST_E_INVALIDOPERATION;
+
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_PREEMPTIVE;
+ GC_NOTRIGGER;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_FAIL;
+
+#ifdef FEATURE_RWLOCK
+ BEGIN_ENTRYPOINT_NOTHROW;
+ ThreadStoreLockHolder tsLock;
+ // Cookie is a weak handle. We need to make sure that the object is not moving.
+ CRWLock *pRWLock = *(CRWLock **) Cookie;
+ *pIterator = NULL;
+ if (pRWLock == NULL)
+ {
+ hr = S_OK;
+ }
+ else
+ {
+ hr = pRWLock->CreateOwnerIterator(pIterator);
+ }
+ END_ENTRYPOINT_NOTHROW;
+#endif // FEATURE_RWLOCK
+
+ return hr;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE GetRWLockOwnerNext(SIZE_T Iterator,
+ IHostTask **ppOwnerHostTask)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_RWLOCK
+ BEGIN_ENTRYPOINT_NOTHROW;
+ CRWLock::GetNextOwner(Iterator,ppOwnerHostTask);
+ END_ENTRYPOINT_NOTHROW;
+#endif // FEATURE_RWLOCK
+
+ return S_OK;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE DeleteRWLockOwnerIterator(SIZE_T Iterator)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_RWLOCK
+ BEGIN_ENTRYPOINT_NOTHROW;
+ CRWLock::DeleteOwnerIterator(Iterator);
+ END_ENTRYPOINT_NOTHROW;
+#endif // FEATURE_RWLOCK
+
+ return S_OK;
+ }
+
+ virtual ULONG STDMETHODCALLTYPE AddRef(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+ }
+
+ HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid, void **ppvObject)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (riid != IID_ICLRSyncManager && riid != IID_IUnknown)
+ return (E_NOINTERFACE);
+ *ppvObject = this;
+ return S_OK;
+ }
+
+ virtual ULONG STDMETHODCALLTYPE Release(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+ }
+};
+
+static CLRSyncManager s_CLRSyncManager;
+
+extern void HostIOCompletionCallback(DWORD ErrorCode,
+ DWORD numBytesTransferred,
+ LPOVERLAPPED lpOverlapped);
+class CCLRIoCompletionManager :public ICLRIoCompletionManager
+{
+public:
+ virtual HRESULT STDMETHODCALLTYPE OnComplete(DWORD dwErrorCode,
+ DWORD NumberOfBytesTransferred,
+ void* pvOverlapped)
+ {
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_ENTRY_POINT;
+
+ if (pvOverlapped)
+ {
+ BEGIN_ENTRYPOINT_NOTHROW;
+ HostIOCompletionCallback (dwErrorCode, NumberOfBytesTransferred, (LPOVERLAPPED)pvOverlapped);
+ END_ENTRYPOINT_NOTHROW;
+ }
+
+ return S_OK;
+ }
+
+ virtual ULONG STDMETHODCALLTYPE AddRef(void)
+ {
+ STATIC_CONTRACT_SO_TOLERANT;
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+ }
+
+ virtual ULONG STDMETHODCALLTYPE Release(void)
+ {
+ STATIC_CONTRACT_SO_TOLERANT;
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+ }
+ BEGIN_INTERFACE HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid, void **ppvObject)
+ {
+ STATIC_CONTRACT_SO_TOLERANT;
+ LIMITED_METHOD_CONTRACT;
+ if (riid != IID_ICLRIoCompletionManager && riid != IID_IUnknown)
+ return (E_NOINTERFACE);
+ *ppvObject = this;
+ return S_OK;
+ }
+};
+
+static CCLRIoCompletionManager s_CLRIoCompletionManager;
+#endif // FEATURE_CORECLR
+
+#ifdef _DEBUG
+extern void ValidateHostInterface();
+#endif
+
+// fusion's global copy of host assembly manager stuff
+BOOL g_bFusionHosted = FALSE;
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ICLRAssemblyReferenceList *g_pHostAsmList = NULL;
+IHostAssemblyStore *g_pHostAssemblyStore = NULL;
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+/*static*/ BOOL CorHost2::IsLoadFromBlocked() // LoadFrom, LoadFile and Load(byte[]) are blocked in certain hosting scenarios
+{
+ LIMITED_METHOD_CONTRACT;
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ return (g_bFusionHosted && (g_pHostAsmList != NULL));
+#else // !FEATURE_INCLUDE_ALL_INTERFACES
+ return FALSE; // as g_pHostAsmList is not defined for CoreCLR; hence above expression will be FALSE.
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+}
+
+static Volatile<BOOL> fOneOnly = 0;
+
+///////////////////////////////////////////////////////////////////////////////
+// ICLRRuntimeHost::SetHostControl
+///////////////////////////////////////////////////////////////////////////////
+HRESULT CorHost2::SetHostControl(IHostControl* pHostControl)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+ if (m_Version < 2)
+ // CLR is hosted with v1 hosting interface. Some part of v2 hosting API are disabled.
+ return HOST_E_INVALIDOPERATION;
+
+ if (pHostControl == 0)
+ return E_INVALIDARG;
+
+ // If Runtime has been started, do not allow setting HostMemoryManager
+ if (g_fEEStarted)
+ return E_ACCESSDENIED;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ DWORD dwSwitchCount = 0;
+
+ while (FastInterlockExchange((LONG*)&fOneOnly, 1) == 1)
+ {
+ #ifndef FEATURE_CORECLR
+ if (m_HostTaskManager != NULL)
+ {
+ m_HostTaskManager->SwitchToTask(0);
+ }
+ else
+ {
+ IHostTaskManager *pHostTaskManager = NULL;
+ if (pHostControl->GetHostManager(IID_IHostTaskManager, (void**)&pHostTaskManager) == S_OK &&
+ pHostTaskManager != NULL)
+ {
+ pHostTaskManager->SwitchToTask(0);
+ pHostTaskManager->Release();
+ }
+ else
+ {
+ __SwitchToThread(0, ++dwSwitchCount);
+ }
+ }
+ #else
+ __SwitchToThread(0, ++dwSwitchCount);
+ #endif // FEATURE_CORECLR
+ }
+
+#ifndef FEATURE_CORECLR
+
+#ifdef _DEBUG
+ ValidateHostInterface();
+#endif
+
+#ifdef _DEBUG
+ DWORD dbg_HostManagerConfig = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_HostManagerConfig);
+#endif
+
+ IHostMemoryManager *memoryManager = NULL;
+ IHostTaskManager *taskManager = NULL;
+ IHostThreadpoolManager *threadpoolManager = NULL;
+ IHostIoCompletionManager *ioCompletionManager = NULL;
+ IHostSyncManager *syncManager = NULL;
+ IHostAssemblyManager *assemblyManager = NULL;
+ IHostGCManager *gcManager = NULL;
+ IHostSecurityManager *securityManager = NULL;
+ IHostPolicyManager *policyManager = NULL;
+
+ if (m_HostMemoryManager == NULL &&
+#ifdef _DEBUG
+ (dbg_HostManagerConfig & CLRMEMORYHOSTED) &&
+#endif
+ pHostControl->GetHostManager(IID_IHostMemoryManager,(void**)&memoryManager) == S_OK &&
+ memoryManager != NULL) {
+ if (m_HostMalloc == NULL)
+ {
+ hr = memoryManager->CreateMalloc (MALLOC_THREADSAFE, &m_HostMalloc);
+ if (hr == S_OK)
+ {
+ memoryManager->RegisterMemoryNotificationCallback(&s_MemoryNotification);
+ }
+ else
+ {
+ memoryManager->Release();
+ IfFailGo(E_UNEXPECTED);
+ }
+ }
+ m_HostMemoryManager = memoryManager;
+ g_fHostConfig |= CLRMEMORYHOSTED;
+ }
+
+ if (m_HostTaskManager == NULL &&
+#ifdef _DEBUG
+ (dbg_HostManagerConfig & CLRTASKHOSTED) &&
+#endif
+ pHostControl->GetHostManager(IID_IHostTaskManager,(void**)&taskManager) == S_OK &&
+ taskManager != NULL) {
+#ifdef _TARGET_ARM_ // @ARMTODO: re-enable once we support hosted p/invokes.
+ IfFailGo(E_NOTIMPL);
+#endif
+ m_HostTaskManager = taskManager;
+ m_HostTaskManager->SetCLRTaskManager(&s_CLRTaskManager);
+ g_fHostConfig |= CLRTASKHOSTED;
+ }
+
+ if (m_HostThreadpoolManager == NULL &&
+#ifdef _DEBUG
+ (dbg_HostManagerConfig & CLRTHREADPOOLHOSTED) &&
+#endif
+ pHostControl->GetHostManager(IID_IHostThreadpoolManager,(void**)&threadpoolManager) == S_OK &&
+ threadpoolManager != NULL) {
+ m_HostThreadpoolManager = threadpoolManager;
+ g_fHostConfig |= CLRTHREADPOOLHOSTED;
+ }
+
+ if (m_HostIoCompletionManager == NULL &&
+#ifdef _DEBUG
+ (dbg_HostManagerConfig & CLRIOCOMPLETIONHOSTED) &&
+#endif
+ pHostControl->GetHostManager(IID_IHostIoCompletionManager,(void**)&ioCompletionManager) == S_OK &&
+ ioCompletionManager != NULL) {
+ DWORD hostSize;
+ hr = ioCompletionManager->GetHostOverlappedSize(&hostSize);
+ if (FAILED(hr))
+ {
+ ioCompletionManager->Release();
+ IfFailGo(E_UNEXPECTED);
+ }
+ m_HostOverlappedExtensionSize = (int)hostSize;
+ m_HostIoCompletionManager = ioCompletionManager;
+ m_HostIoCompletionManager->SetCLRIoCompletionManager(&s_CLRIoCompletionManager);
+ g_fHostConfig |= CLRIOCOMPLETIONHOSTED;
+ }
+
+ if (m_HostSyncManager == NULL &&
+#ifdef _DEBUG
+ (dbg_HostManagerConfig & CLRSYNCHOSTED) &&
+#endif
+ pHostControl->GetHostManager(IID_IHostSyncManager,(void**)&syncManager) == S_OK &&
+ syncManager != NULL) {
+ m_HostSyncManager = syncManager;
+ m_HostSyncManager->SetCLRSyncManager(&s_CLRSyncManager);
+ g_fHostConfig |= CLRSYNCHOSTED;
+ }
+
+ if (m_HostAssemblyManager == NULL &&
+#ifdef _DEBUG
+ (dbg_HostManagerConfig & CLRASSEMBLYHOSTED) &&
+#endif
+ pHostControl->GetHostManager(IID_IHostAssemblyManager,(void**)&assemblyManager) == S_OK &&
+ assemblyManager != NULL) {
+
+ assemblyManager->GetAssemblyStore(&g_pHostAssemblyStore);
+
+ hr = assemblyManager->GetNonHostStoreAssemblies(&g_pHostAsmList);
+ if (FAILED(hr))
+ {
+ assemblyManager->Release();
+ IfFailGo(hr);
+ }
+
+ if (g_pHostAssemblyStore || g_pHostAsmList)
+ g_bFusionHosted = TRUE;
+ m_HostAssemblyManager = assemblyManager;
+ g_fHostConfig |= CLRASSEMBLYHOSTED;
+ }
+
+ if (m_HostGCManager == NULL &&
+#ifdef _DEBUG
+ (dbg_HostManagerConfig & CLRGCHOSTED) &&
+#endif
+ pHostControl->GetHostManager(IID_IHostGCManager,
+ (void**)&gcManager) == S_OK &&
+ gcManager != NULL) {
+ m_HostGCManager = gcManager;
+ g_fHostConfig |= CLRGCHOSTED;
+ }
+
+ if (m_HostSecurityManager == NULL &&
+#ifdef _DEBUG
+ (dbg_HostManagerConfig & CLRSECURITYHOSTED) &&
+#endif
+ pHostControl->GetHostManager(IID_IHostSecurityManager,
+ (void**)&securityManager) == S_OK &&
+ securityManager != NULL) {
+ g_fHostConfig |= CLRSECURITYHOSTED;
+ m_HostSecurityManager = securityManager;
+#ifdef FEATURE_CAS_POLICY
+ HostExecutionContextManager::InitializeRestrictedContext();
+#endif // #ifdef FEATURE_CAS_POLICY
+ }
+
+ if (m_HostPolicyManager == NULL &&
+ pHostControl->GetHostManager(IID_IHostPolicyManager,
+ (void**)&policyManager) == S_OK &&
+ policyManager != NULL) {
+ m_HostPolicyManager = policyManager;
+ }
+#endif //!FEATURE_CORECLR
+
+#ifdef FEATURE_LEGACYNETCF_DBG_HOST_CONTROL
+ IHostNetCFDebugControlManager *hostNetCFDebugControlManager = NULL;
+ if (m_HostNetCFDebugControlManager == NULL &&
+ pHostControl->GetHostManager(__uuidof(IHostNetCFDebugControlManager),
+ (void**)&hostNetCFDebugControlManager) == S_OK &&
+ hostNetCFDebugControlManager != NULL) {
+ m_HostNetCFDebugControlManager = hostNetCFDebugControlManager;
+ }
+#endif
+
+ if (m_HostControl == NULL)
+ {
+ m_HostControl = pHostControl;
+ m_HostControl->AddRef();
+ }
+
+ goto ErrExit;
+
+ErrExit:
+ fOneOnly = 0;
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+class CCLRPolicyManager: public ICLRPolicyManager
+{
+public:
+ virtual HRESULT STDMETHODCALLTYPE SetDefaultAction(EClrOperation operation,
+ EPolicyAction action)
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifndef FEATURE_CORECLR
+ STATIC_CONTRACT_ENTRY_POINT;
+ HRESULT hr;
+ BEGIN_ENTRYPOINT_NOTHROW;
+ hr = GetEEPolicy()->SetDefaultAction(operation, action);
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+#else // FEATURE_CORECLR
+ return E_NOTIMPL;
+#endif // !FEATURE_CORECLR
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE SetTimeout(EClrOperation operation,
+ DWORD dwMilliseconds)
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifndef FEATURE_CORECLR
+ STATIC_CONTRACT_ENTRY_POINT;
+ HRESULT hr;
+ BEGIN_ENTRYPOINT_NOTHROW;
+ hr = GetEEPolicy()->SetTimeout(operation,dwMilliseconds);
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+#else // FEATURE_CORECLR
+ return E_NOTIMPL;
+#endif // !FEATURE_CORECLR
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE SetActionOnTimeout(EClrOperation operation,
+ EPolicyAction action)
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifndef FEATURE_CORECLR
+ STATIC_CONTRACT_ENTRY_POINT;
+ HRESULT hr;
+ BEGIN_ENTRYPOINT_NOTHROW;
+ hr = GetEEPolicy()->SetActionOnTimeout(operation,action);
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+#else // FEATURE_CORECLR
+ return E_NOTIMPL;
+#endif // !FEATURE_CORECLR
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE SetTimeoutAndAction(EClrOperation operation, DWORD dwMilliseconds,
+ EPolicyAction action)
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifndef FEATURE_CORECLR
+ STATIC_CONTRACT_SO_TOLERANT;
+ HRESULT hr;
+ BEGIN_ENTRYPOINT_NOTHROW;
+ hr = GetEEPolicy()->SetTimeoutAndAction(operation,dwMilliseconds,action);
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+#else // FEATURE_CORECLR
+ return E_NOTIMPL;
+#endif // !FEATURE_CORECLR
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE SetActionOnFailure(EClrFailure failure,
+ EPolicyAction action)
+ {
+ // This is enabled for CoreCLR since a host can use this to
+ // specify action for handling AV.
+ STATIC_CONTRACT_ENTRY_POINT;
+ LIMITED_METHOD_CONTRACT;
+ HRESULT hr;
+#ifdef FEATURE_CORECLR
+ // For CoreCLR, this method just supports FAIL_AccessViolation as a valid
+ // failure input arg. The validation of the specified action for the failure
+ // will be done in EEPolicy::IsValidActionForFailure.
+ if (failure != FAIL_AccessViolation)
+ {
+ return E_INVALIDARG;
+ }
+#endif // FEATURE_CORECLR
+ BEGIN_ENTRYPOINT_NOTHROW;
+ hr = GetEEPolicy()->SetActionOnFailure(failure,action);
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE SetUnhandledExceptionPolicy(EClrUnhandledException policy)
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifndef FEATURE_CORECLR
+ STATIC_CONTRACT_ENTRY_POINT;
+ HRESULT hr;
+ BEGIN_ENTRYPOINT_NOTHROW;
+ hr = GetEEPolicy()->SetUnhandledExceptionPolicy(policy);
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+#else // FEATURE_CORECLR
+ return E_NOTIMPL;
+#endif // !FEATURE_CORECLR
+ }
+
+ virtual ULONG STDMETHODCALLTYPE AddRef(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return 1;
+ }
+
+ virtual ULONG STDMETHODCALLTYPE Release(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return 1;
+ }
+
+ BEGIN_INTERFACE HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid,
+ void **ppvObject)
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ if (riid != IID_ICLRPolicyManager && riid != IID_IUnknown)
+ return (E_NOINTERFACE);
+
+ // Ensure that the out going pointer is not null
+ if (ppvObject == NULL)
+ return E_POINTER;
+
+ *ppvObject = this;
+ return S_OK;
+ }
+};
+
+static CCLRPolicyManager s_PolicyManager;
+
+#ifndef FEATURE_CORECLR // not supported
+class CCLROnEventManager: public ICLROnEventManager
+{
+public:
+ virtual HRESULT STDMETHODCALLTYPE RegisterActionOnEvent(EClrEvent event,
+ IActionOnCLREvent *pAction)
+ {
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ NOTHROW;
+ ENTRY_POINT;
+
+ // This function is always called from outside the Runtime. So, we assert that we either don't have a
+ // managed thread, or if we do, that we're in preemptive GC mode.
+ PRECONDITION((GetThread() == NULL) || !GetThread()->PreemptiveGCDisabled());
+ }
+ CONTRACTL_END;
+
+ if (event >= MaxClrEvent || pAction == NULL || event < (EClrEvent)0)
+ return E_INVALIDARG;
+
+ HRESULT hr = S_OK;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ // Note: its only safe to use a straight ReleaseHolder from within the VM directory when we know we're
+ // called from outside the Runtime. We assert that above, just to be sure.
+ ReleaseHolder<IActionOnCLREvent> actionHolder(pAction);
+ pAction->AddRef();
+
+ CrstHolderWithState ch(m_pLock);
+
+ DWORD dwSwitchCount = 0;
+ while (m_ProcessEvent != 0)
+ {
+ ch.Release();
+ __SwitchToThread(0, ++dwSwitchCount);
+ ch.Acquire();
+ }
+
+ if (m_pAction[event] == NULL)
+ {
+ m_pAction[event] = new (nothrow)ActionNode;
+ if (m_pAction[event] == NULL)
+ hr = E_OUTOFMEMORY;
+ }
+
+ if (SUCCEEDED(hr))
+ {
+ ActionNode *walk = m_pAction[event];
+ while (TRUE)
+ {
+ int n = 0;
+ for ( ; n < ActionNode::ActionArraySize; n ++)
+ {
+ if (walk->pAction[n] == NULL)
+ {
+ walk->pAction[n] = pAction;
+ actionHolder.SuppressRelease();
+ hr = S_OK;
+ break;
+ }
+ }
+ if (n < ActionNode::ActionArraySize)
+ {
+ break;
+ }
+ if (walk->pNext == NULL)
+ {
+ walk->pNext = new (nothrow) ActionNode;
+ if (walk->pNext == NULL)
+ {
+ hr = E_OUTOFMEMORY;
+ break;
+ }
+ }
+ walk = walk->pNext;
+ }
+ }
+
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE UnregisterActionOnEvent(EClrEvent event,
+ IActionOnCLREvent *pAction)
+ {
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ if (event == Event_StackOverflow)
+ {
+ // We don't want to take a lock when we process StackOverflow event, because we may
+ // not have enough stack to do it.
+ // So we do not release our cache of the callback in order to avoid race.
+ return HOST_E_INVALIDOPERATION;
+ }
+
+ HRESULT hr = S_OK;
+
+ ActionNode *walk = NULL;
+ ActionNode *prev = NULL;
+
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ CrstHolderWithState ch(m_pLock);
+
+ DWORD dwSwitchCount = 0;
+ while (m_ProcessEvent != 0)
+ {
+ ch.Release();
+ __SwitchToThread(0, ++dwSwitchCount);
+ ch.Acquire();
+ }
+
+ if (m_pAction[event] == NULL)
+ IfFailGo(HOST_E_INVALIDOPERATION);
+
+ walk = m_pAction[event];
+ while (walk)
+ {
+ BOOL fInUse = FALSE;
+ for (int n = 0; n < ActionNode::ActionArraySize; n ++)
+ {
+ if (prev && !fInUse && walk->pAction[n])
+ fInUse = TRUE;
+ if (walk->pAction[n] == pAction)
+ {
+ walk->pAction[n] = NULL;
+ ch.Release();
+ pAction->Release();
+ hr = S_OK;
+ goto ErrExit;
+ }
+ }
+ if (prev && !fInUse)
+ {
+ prev->pNext = walk->pNext;
+ delete walk;
+ walk = prev;
+ }
+ prev = walk;
+ walk = walk->pNext;
+ }
+ hr = HOST_E_INVALIDOPERATION;
+ErrExit:
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+ }
+
+ virtual ULONG STDMETHODCALLTYPE AddRef(void)
+ {
+ STATIC_CONTRACT_SO_TOLERANT;
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid, void **ppUnk)
+ {
+ STATIC_CONTRACT_SO_TOLERANT;
+ LIMITED_METHOD_CONTRACT;
+ if (riid != IID_ICLROnEventManager && riid != IID_IUnknown)
+ return (E_NOINTERFACE);
+ *ppUnk = this;
+ return S_OK;
+ }
+
+ virtual ULONG STDMETHODCALLTYPE Release(void)
+ {
+ STATIC_CONTRACT_SO_TOLERANT;
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+ }
+
+ // This function is to work around an issue in scan.exe.
+ // scan.exe is not smart to handle that if (){} else {}.
+ void ProcessSOEvent(void *data)
+ {
+ STATIC_CONTRACT_SO_TOLERANT;
+ WRAPPER_NO_CONTRACT;
+
+ if (m_pLock == NULL)
+ return;
+
+ ActionNode *walk = m_pAction[Event_StackOverflow];
+
+ while (walk)
+ {
+ for (int n = 0; n < ActionNode::ActionArraySize; n ++)
+ {
+ if (walk->pAction[n])
+ {
+ walk->pAction[n]->OnEvent(Event_StackOverflow,data);
+ }
+ }
+ walk = walk->pNext;
+ }
+ }
+
+ void ProcessEvent(EClrEvent event, void *data)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (m_pLock == NULL)
+ {
+ return;
+ }
+
+ _ASSERTE (event != Event_StackOverflow);
+
+ {
+ CrstHolder ch(m_pLock);
+
+ if (event == Event_ClrDisabled)
+ {
+ if (m_CLRDisabled)
+ {
+ return;
+ }
+ m_CLRDisabled = TRUE;
+ }
+ m_ProcessEvent ++;
+
+ // Release the lock around the call into the host. Is this correct?
+ // It seems that we need to hold the lock except for the actual callback itself.
+ }
+
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ {
+ ActionNode *walk = m_pAction[event];
+ while (walk)
+ {
+ for (int n = 0; n < ActionNode::ActionArraySize; n ++)
+ {
+ if (walk->pAction[n])
+ {
+ walk->pAction[n]->OnEvent(event,data);
+ }
+ }
+ walk = walk->pNext;
+ }
+ }
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+
+ {
+ CrstHolder ch(m_pLock);
+ m_ProcessEvent --;
+ }
+ }
+
+ BOOL IsActionRegisteredForEvent(EClrEvent event)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // Check to see if the event manager has been set up.
+ if (m_pLock == NULL)
+ return FALSE;
+
+ CrstHolder ch(m_pLock);
+
+ ActionNode *walk = m_pAction[event];
+ while (walk)
+ {
+ for (int n = 0; n < ActionNode::ActionArraySize; n ++)
+ {
+ if (walk->pAction[n] != NULL)
+ {
+ // We found an action registered for this event.
+ return TRUE;
+ }
+ }
+ walk = walk->pNext;
+ }
+
+ // There weren't any actions registered.
+ return FALSE;
+ }
+
+ HRESULT Init()
+ {
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ HRESULT hr = S_OK;
+ if (m_pLock == NULL)
+ {
+ EX_TRY
+ {
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+ {
+ InitHelper();
+ }
+ END_SO_INTOLERANT_CODE;
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+
+ return hr;
+ }
+
+#if 0
+ // We do not need this one. We have one instance of this class
+ // and it is static.
+ CCLROnEventManager()
+ {
+ LIMITED_METHOD_CONTRACT;
+ for (int n = 0; n < MaxClrEvent; n ++)
+ m_pAction[n] = NULL;
+ }
+#endif
+
+private:
+ struct ActionNode
+ {
+ static const int ActionArraySize = 8;
+
+ IActionOnCLREvent *pAction[ActionArraySize];
+ ActionNode *pNext;
+
+ ActionNode ()
+ : pNext(NULL)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ for (int n = 0; n < ActionArraySize; n ++)
+ pAction[n] = 0;
+ }
+ };
+ ActionNode *m_pAction[MaxClrEvent];
+
+ Crst* m_pLock;
+
+ BOOL m_CLRDisabled;
+
+ // We can not call out into host while holding the lock. At the same time
+ // we need to make our data consistent. Therefore, m_ProcessEvent is a marker
+ // to forbid touching the data structure from Register and UnRegister.
+ DWORD m_ProcessEvent;
+
+ void InitHelper()
+ {
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_ProcessEvent = 0;
+
+ Crst* tmp = new Crst(CrstOnEventManager, CrstFlags(CRST_DEFAULT | CRST_DEBUGGER_THREAD));
+ if (FastInterlockCompareExchangePointer(&m_pLock, tmp, NULL) != NULL)
+ delete tmp;
+ }
+};
+
+static CCLROnEventManager s_OnEventManager;
+#endif // FEATURE_CORECLR
+
+
+void ProcessEventForHost(EClrEvent event, void *data)
+{
+#ifndef FEATURE_CORECLR
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE (event != Event_StackOverflow);
+
+ GCX_PREEMP();
+
+ s_OnEventManager.ProcessEvent(event,data);
+#endif // FEATURE_CORECLR
+}
+
+// We do not call ProcessEventForHost for stack overflow, since we have limit stack
+// and we should avoid calling GCX_PREEMPT
+void ProcessSOEventForHost(EXCEPTION_POINTERS *pExceptionInfo, BOOL fInSoTolerant)
+{
+#ifndef FEATURE_CORECLR
+ WRAPPER_NO_CONTRACT;
+
+ StackOverflowInfo soInfo;
+ if (fInSoTolerant)
+ {
+ soInfo.soType = SO_Managed;
+ }
+ else if (pExceptionInfo == NULL || IsIPInModule(g_pMSCorEE, GetIP(pExceptionInfo->ContextRecord)))
+ {
+ soInfo.soType = SO_ClrEngine;
+ }
+ else
+ {
+ soInfo.soType = SO_Other;
+ }
+
+ soInfo.pExceptionInfo = pExceptionInfo;
+ s_OnEventManager.ProcessSOEvent(&soInfo);
+#endif // FEATURE_CORECLR
+}
+
+BOOL IsHostRegisteredForEvent(EClrEvent event)
+{
+ WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_CORECLR
+ return FALSE;
+#else // FEATURE_CORECLR
+ return s_OnEventManager.IsActionRegisteredForEvent(event);
+#endif // FEATURE_CORECLR
+}
+
+inline size_t SizeInKBytes(size_t cbSize)
+{
+ LIMITED_METHOD_CONTRACT;
+ size_t cb = (cbSize % 1024) ? 1 : 0;
+ return ((cbSize / 1024) + cb);
+}
+
+SIZE_T Host_SegmentSize = 0;
+SIZE_T Host_MaxGen0Size = 0;
+BOOL Host_fSegmentSizeSet = FALSE;
+BOOL Host_fMaxGen0SizeSet = FALSE;
+
+void UpdateGCSettingFromHost ()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE (g_pConfig);
+ if (Host_fSegmentSizeSet)
+ {
+ g_pConfig->SetSegmentSize(Host_SegmentSize);
+ }
+ if (Host_fMaxGen0SizeSet)
+ {
+ g_pConfig->SetGCgen0size(Host_MaxGen0Size);
+ }
+}
+
+#if !defined(FEATURE_CORECLR) || defined(FEATURE_WINDOWSPHONE)
+class CCLRGCManager: public ICLRGCManager2
+{
+public:
+ virtual HRESULT STDMETHODCALLTYPE Collect(LONG Generation)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ if (Generation > (int) GCHeap::GetGCHeap()->GetMaxGeneration())
+ hr = E_INVALIDARG;
+
+ if (SUCCEEDED(hr))
+ {
+ // Set up a Thread object if this is called on a native thread.
+ Thread *pThread;
+ pThread = GetThread();
+ if (pThread == NULL)
+ pThread = SetupThreadNoThrow(&hr);
+ if (pThread != NULL)
+ {
+ BEGIN_ENTRYPOINT_NOTHROW_WITH_THREAD(pThread);
+ GCX_COOP();
+
+ EX_TRY
+ {
+ STRESS_LOG0(LF_GC, LL_INFO100, "Host triggers GC\n");
+ hr = GCHeap::GetGCHeap()->GarbageCollect(Generation);
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ END_ENTRYPOINT_NOTHROW_WITH_THREAD;
+ }
+ }
+
+ return (hr);
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE GetStats(COR_GC_STATS *pStats)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ #if defined(ENABLE_PERF_COUNTERS)
+
+ Perf_GC *pgc = &GetPerfCounters().m_GC;
+
+ if (!pStats)
+ IfFailGo(E_INVALIDARG);
+
+ if (pStats->Flags & COR_GC_COUNTS)
+ {
+ pStats->ExplicitGCCount = pgc->cInducedGCs;
+
+ for (int idx=0; idx<3; idx++)
+ pStats->GenCollectionsTaken[idx] = pgc->cGenCollections[idx];
+ }
+
+ if (pStats->Flags & COR_GC_MEMORYUSAGE)
+ {
+ pStats->CommittedKBytes = SizeInKBytes(pgc->cTotalCommittedBytes);
+ pStats->ReservedKBytes = SizeInKBytes(pgc->cTotalReservedBytes);
+ pStats->Gen0HeapSizeKBytes = SizeInKBytes(pgc->cGenHeapSize[0]);
+ pStats->Gen1HeapSizeKBytes = SizeInKBytes(pgc->cGenHeapSize[1]);
+ pStats->Gen2HeapSizeKBytes = SizeInKBytes(pgc->cGenHeapSize[2]);
+ pStats->LargeObjectHeapSizeKBytes = SizeInKBytes(pgc->cLrgObjSize);
+ pStats->KBytesPromotedFromGen0 = SizeInKBytes(pgc->cbPromotedMem[0]);
+ pStats->KBytesPromotedFromGen1 = SizeInKBytes(pgc->cbPromotedMem[1]);
+ }
+ hr = S_OK;
+ErrExit:
+ #else
+ hr = E_NOTIMPL;
+ #endif // ENABLE_PERF_COUNTERS
+
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+ }
+ virtual HRESULT STDMETHODCALLTYPE SetGCStartupLimits(
+ DWORD SegmentSize,
+ DWORD MaxGen0Size)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ ENTRY_POINT;
+
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ // Set default overrides if specified by caller.
+ if (SegmentSize != (DWORD) ~0 && SegmentSize > 0)
+ {
+ hr = _SetGCSegmentSize(SegmentSize);
+ }
+
+ if (SUCCEEDED(hr) && MaxGen0Size != (DWORD) ~0 && MaxGen0Size > 0)
+ {
+ hr = _SetGCMaxGen0Size(MaxGen0Size);
+ }
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return (hr);
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE SetGCStartupLimitsEx(
+ SIZE_T SegmentSize,
+ SIZE_T MaxGen0Size)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ ENTRY_POINT;
+
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ // Set default overrides if specified by caller.
+ if (SegmentSize != (SIZE_T) ~0 && SegmentSize > 0)
+ {
+ hr = _SetGCSegmentSize(SegmentSize);
+ }
+
+ if (SUCCEEDED(hr) && MaxGen0Size != (SIZE_T) ~0 && MaxGen0Size > 0)
+ {
+ hr = _SetGCMaxGen0Size(MaxGen0Size);
+ }
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return (hr);
+ }
+
+ virtual ULONG STDMETHODCALLTYPE AddRef(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid, OUT PVOID *ppUnk)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (riid != IID_ICLRGCManager && riid != IID_ICLRGCManager2 && riid != IID_IUnknown)
+ return (E_NOINTERFACE);
+ *ppUnk = this;
+ return S_OK;
+ }
+
+ virtual ULONG STDMETHODCALLTYPE Release(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+ }
+private:
+ HRESULT _SetGCSegmentSize(SIZE_T SegmentSize);
+ HRESULT _SetGCMaxGen0Size(SIZE_T MaxGen0Size);
+};
+
+
+HRESULT CCLRGCManager::_SetGCSegmentSize(SIZE_T SegmentSize)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Sanity check the value, it must be a power of two and big enough.
+ if (!GCHeap::IsValidSegmentSize(SegmentSize))
+ {
+ hr = E_INVALIDARG;
+ }
+ else
+ {
+ Host_SegmentSize = SegmentSize;
+ Host_fSegmentSizeSet = TRUE;
+ }
+
+ return (hr);
+}
+
+HRESULT CCLRGCManager::_SetGCMaxGen0Size(SIZE_T MaxGen0Size)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Sanity check the value is at least large enough.
+ if (!GCHeap::IsValidGen0MaxSize(MaxGen0Size))
+ {
+ hr = E_INVALIDARG;
+ }
+ else
+ {
+ Host_MaxGen0Size = MaxGen0Size;
+ Host_fMaxGen0SizeSet = TRUE;
+ }
+
+ return (hr);
+}
+
+static CCLRGCManager s_GCManager;
+#endif // !FEATURE_CORECLR || FEATURE_WINDOWSPHONE
+
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+class CCLRAppDomainResourceMonitor : public ICLRAppDomainResourceMonitor
+{
+public:
+ virtual HRESULT STDMETHODCALLTYPE GetCurrentAllocated(DWORD dwAppDomainId,
+ ULONGLONG* pBytesAllocated)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ SystemDomain::LockHolder lh;
+ AppDomainFromIDHolder pAppDomain((ADID)dwAppDomainId, TRUE, AppDomainFromIDHolder::SyncType_ADLock);
+
+ if (!pAppDomain.IsUnloaded())
+ {
+ if (pBytesAllocated)
+ {
+ *pBytesAllocated = pAppDomain->GetAllocBytes();
+ }
+ }
+ else
+ {
+ hr = COR_E_APPDOMAINUNLOADED;
+ }
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return (hr);
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE GetCurrentSurvived(DWORD dwAppDomainId,
+ ULONGLONG* pAppDomainBytesSurvived,
+ ULONGLONG* pTotalBytesSurvived)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ SystemDomain::LockHolder lh;
+ AppDomainFromIDHolder pAppDomain((ADID)dwAppDomainId, TRUE, AppDomainFromIDHolder::SyncType_ADLock);
+
+ if (pAppDomain.IsUnloaded())
+ {
+ hr = COR_E_APPDOMAINUNLOADED;
+ }
+ else
+ {
+ if (pAppDomainBytesSurvived)
+ {
+ *pAppDomainBytesSurvived = pAppDomain->GetSurvivedBytes();
+ }
+ if (pTotalBytesSurvived)
+ {
+ *pTotalBytesSurvived = SystemDomain::GetTotalSurvivedBytes();
+ }
+ }
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return (hr);
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE GetCurrentCpuTime(DWORD dwAppDomainId,
+ ULONGLONG* pMilliseconds)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ {
+ SystemDomain::LockHolder lh;
+
+ {
+ AppDomainFromIDHolder pAppDomain((ADID)dwAppDomainId, TRUE, AppDomainFromIDHolder::SyncType_ADLock);
+
+ if (!pAppDomain.IsUnloaded())
+ {
+ if (pMilliseconds)
+ {
+ *pMilliseconds = pAppDomain->QueryProcessorUsage() / 10000;
+ }
+ }
+ else
+ {
+ hr = COR_E_APPDOMAINUNLOADED;
+ }
+ }
+ }
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+ }
+
+ virtual ULONG STDMETHODCALLTYPE AddRef(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid, OUT PVOID *ppUnk)
+ {
+ LIMITED_METHOD_CONTRACT;
+ *ppUnk = NULL;
+ if (riid == IID_IUnknown)
+ *ppUnk = (IUnknown*)this;
+ else if (riid == IID_ICLRAppDomainResourceMonitor)
+ *ppUnk = (ICLRAppDomainResourceMonitor*)this;
+ else
+ return E_NOINTERFACE;
+ return S_OK;
+ }
+
+ virtual ULONG STDMETHODCALLTYPE Release(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+ }
+};
+static CCLRAppDomainResourceMonitor s_Arm;
+#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
+
+#ifdef FEATURE_APTCA
+class CLRDomainManager : public ICLRDomainManager
+{
+public:
+ virtual HRESULT STDMETHODCALLTYPE SetAppDomainManagerType(__in LPCWSTR wszAppDomainManagerAssembly,
+ __in LPCWSTR wszAppDomainManagerType,
+ EInitializeNewDomainFlags dwInitializeDomainFlags)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ hr = CorHost2::SetAppDomainManagerType(wszAppDomainManagerAssembly,
+ wszAppDomainManagerType,
+ dwInitializeDomainFlags);
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE SetPropertiesForDefaultAppDomain(DWORD nProperties,
+ __in_ecount(nProperties) LPCWSTR *pwszPropertyNames,
+ __in_ecount(nProperties) LPCWSTR *pwszPropertyValues)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ hr = CorHost2::SetPropertiesForDefaultAppDomain(nProperties, pwszPropertyNames, pwszPropertyValues);
+
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+ }
+
+ virtual ULONG STDMETHODCALLTYPE AddRef()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+ }
+
+ virtual ULONG STDMETHODCALLTYPE Release()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE QueryInterface(__in REFIID riid, __out LPVOID *ppvObject)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (ppvObject == NULL)
+ return E_POINTER;
+
+ *ppvObject = NULL;
+
+ if (riid == IID_ICLRDomainManager)
+ {
+ *ppvObject = this;
+ }
+ else if (riid == IID_IUnknown)
+ {
+ *ppvObject = static_cast<IUnknown *>(this);
+ }
+
+ if (*ppvObject == NULL)
+ return E_NOINTERFACE;
+
+ AddRef();
+ return S_OK;
+ }
+};
+
+static CLRDomainManager s_CLRDomainManager;
+#endif // FEATURE_APTCA
+
+BYTE g_CorHostProtectionManagerInstance[sizeof(CorHostProtectionManager)];
+
+void InitHostProtectionManager()
+{
+ WRAPPER_NO_CONTRACT;
+ new (g_CorHostProtectionManagerInstance) CorHostProtectionManager();
+}
+
+BOOL g_CLRPolicyRequested = FALSE;
+
+class CCorCLRControl: public ICLRControl
+{
+public:
+ virtual HRESULT STDMETHODCALLTYPE GetCLRManager(REFIID riid, void **ppObject)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT; // no global state updates
+ }
+ CONTRACTL_END;
+
+ // Sanity check.
+ if (ppObject == NULL)
+ return E_INVALIDARG;
+
+#ifndef FEATURE_CORECLR
+ // ErrorReportingManager is allowed, even if runtime is started, so
+ // make this check first.
+ // Host must call release on CLRErrorReportingManager after this call
+ if (riid == IID_ICLRErrorReportingManager)
+ {
+ *ppObject = &g_CLRErrorReportingManager;
+ return S_OK;
+ }
+ else
+#elif defined(FEATURE_WINDOWSPHONE)
+ if (riid == IID_ICLRErrorReportingManager2)
+ {
+ *ppObject = &g_CLRErrorReportingManager;
+ return S_OK;
+ }
+ else
+#endif // !FEATURE_CORECLR || defined(FEATURE_WINDOWSPHONE)
+ if (g_fEEStarted && !m_fFullAccess)
+ {
+ // If runtime has been started, do not allow user to obtain CLR managers.
+ return HOST_E_INVALIDOPERATION;
+ }
+#ifndef FEATURE_CORECLR
+ else if (riid == IID_ICLRTaskManager) {
+ *ppObject = &s_CLRTaskManager;
+ return S_OK;
+ }
+#endif // !FEATURE_CORECLR
+
+ // CoreCLR supports ICLRPolicyManager since it allows the host
+ // to specify the policy for AccessViolation.
+ else if (riid == IID_ICLRPolicyManager) {
+ *ppObject = &s_PolicyManager;
+ FastInterlockExchange((LONG*)&g_CLRPolicyRequested, TRUE);
+ return S_OK;
+ }
+#ifndef FEATURE_CORECLR
+ else if (riid == IID_ICLRHostProtectionManager) {
+ *ppObject = GetHostProtectionManager();
+ return S_OK;
+ }
+
+ // Host must call release on CLRDebugManager after this call
+ else if (riid == IID_ICLRDebugManager)
+ {
+ *ppObject = &s_CLRDebugManager;
+ return S_OK;
+ }
+
+ else if (riid == IID_ICLROnEventManager)
+ {
+ HRESULT hr = s_OnEventManager.Init();
+ if (FAILED(hr))
+ return hr;
+ *ppObject = &s_OnEventManager;
+ return S_OK;
+ }
+#endif // !FEATURE_CORECLR
+
+#if !defined(FEATURE_CORECLR) || defined(FEATURE_WINDOWSPHONE)
+ else if ((riid == IID_ICLRGCManager) || (riid == IID_ICLRGCManager2))
+ {
+ *ppObject = &s_GCManager;
+ return S_OK;
+ }
+#endif // !FEATURE_CORECLR || FEATURE_WINDOWSPHONE
+
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+ else if (riid == IID_ICLRAppDomainResourceMonitor)
+ {
+ EnableARM();
+ *ppObject = &s_Arm;
+ return S_OK;
+ }
+#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
+#ifdef FEATURE_APTCA
+ else if (riid == IID_ICLRDomainManager)
+ {
+ *ppObject = &s_CLRDomainManager;
+ return S_OK;
+ }
+#endif // FEATURE_APTCA
+ else
+ return (E_NOINTERFACE);
+ }
+
+ virtual HRESULT STDMETHODCALLTYPE SetAppDomainManagerType(
+ LPCWSTR pwzAppDomainManagerAssembly,
+ LPCWSTR pwzAppDomainManagerType)
+ {
+#ifndef FEATURE_CORECLR
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ ENTRY_POINT; // no global state updates
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ hr = CorHost2::SetAppDomainManagerType(pwzAppDomainManagerAssembly,
+ pwzAppDomainManagerType,
+ eInitializeNewDomainFlags_None);
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+#else // FEATURE_CORECLR
+
+ // CoreCLR does not support this method
+ return E_NOTIMPL;
+#endif // !FEATURE_CORECLR
+ }
+
+ virtual ULONG STDMETHODCALLTYPE AddRef(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+ }
+
+ virtual ULONG STDMETHODCALLTYPE Release(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+ }
+
+ BEGIN_INTERFACE HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid,
+ void **ppvObject)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (riid != IID_ICLRControl && riid != IID_IUnknown)
+ return (E_NOINTERFACE);
+
+ // Ensure that the out going pointer is not null
+ if (ppvObject == NULL)
+ return E_POINTER;
+
+ *ppvObject = this;
+ return S_OK;
+ }
+
+ // This is to avoid having ctor. We have static objects, and it is
+ // difficult to support ctor on certain platform.
+ void SetAccess(BOOL fFullAccess)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_fFullAccess = fFullAccess;
+ }
+private:
+ BOOL m_fFullAccess;
+};
+
+// Before CLR starts, we give out s_CorCLRControl which has full access to all managers.
+// After CLR starts, we give out s_CorCLRControlLimited which allows limited access to managers.
+static CCorCLRControl s_CorCLRControl;
+
+#ifndef FEATURE_CORECLR
+static CCorCLRControl s_CorCLRControlLimited;
+#endif // FEATURE_CORECLR
+
+///////////////////////////////////////////////////////////////////////////////
+// ICLRRuntimeHost::GetCLRControl
+HRESULT CorHost2::GetCLRControl(ICLRControl** pCLRControl)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Ensure that we have a valid pointer
+ if (pCLRControl == NULL)
+ {
+ return E_POINTER;
+ }
+
+ HRESULT hr = S_OK;
+
+ STATIC_CONTRACT_ENTRY_POINT;
+ BEGIN_ENTRYPOINT_NOTHROW;
+ if (!g_fEEStarted && m_Version >= 2)
+ {
+ s_CorCLRControl.SetAccess(TRUE);
+ *pCLRControl = &s_CorCLRControl;
+ }
+ else
+ {
+#ifndef FEATURE_CORECLR
+ // Even CLR is hosted by v1 hosting interface, we still allow part of CLRControl, like IID_ICLRErrorReportingManager.
+ s_CorCLRControlLimited.SetAccess(FALSE);
+ *pCLRControl = &s_CorCLRControlLimited;
+#else // FEATURE_CORECLR
+ // If :
+ // 1) request comes for interface other than ICLRControl*, OR
+ // 2) runtime has already started, OR
+ // 3) version is not 2
+ //
+ // we will return failure and set the out pointer to NULL
+ *pCLRControl = NULL;
+ if (g_fEEStarted)
+ {
+ // Return HOST_E_INVALIDOPERATION as per MSDN if runtime has already started
+ hr = HOST_E_INVALIDOPERATION;
+ }
+ else
+ {
+ hr = E_NOTIMPL;
+ }
+#endif // !FEATURE_CORECLR
+ }
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+#ifndef FEATURE_CORECLR
+
+// static
+HRESULT CorHost2::SetPropertiesForDefaultAppDomain(DWORD nProperties,
+ __in_ecount(nProperties) LPCWSTR *pwszPropertyNames,
+ __in_ecount(nProperties) LPCWSTR *pwszPropertyValues)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // Default domain properties can only be set before the CLR has started
+ if (g_fEEStarted || HasStarted())
+ {
+ return HOST_E_INVALIDOPERATION;
+ }
+
+ // If the host is specifying properties, they should be there
+ if (nProperties > 0 && (pwszPropertyNames == NULL || pwszPropertyValues == NULL))
+ {
+ return E_POINTER;
+ }
+
+ // v4 - since this property is being added late in the cycle to address a specific scenario, we
+ // reject any attempt to set anything but a single well known property name. This restriction
+ // can be removed in the future.
+ for (DWORD iProperty = 0; iProperty < nProperties; ++iProperty)
+ {
+ if (pwszPropertyNames[iProperty] == NULL)
+ {
+ return E_POINTER;
+ }
+ if (pwszPropertyValues[iProperty] == NULL)
+ {
+ return E_POINTER;
+ }
+ if (wcscmp(PARTIAL_TRUST_VISIBLE_ASSEMBLIES_PROPERTY, pwszPropertyNames[iProperty]) != 0)
+ {
+ return HRESULT_FROM_WIN32(ERROR_UNKNOWN_PROPERTY);
+ }
+ }
+
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ for (DWORD iProperty = 0; iProperty < nProperties; ++iProperty)
+ {
+ SString propertyName(pwszPropertyNames[iProperty]);
+ s_defaultDomainPropertyNames.Append(propertyName);
+
+ SString propertyValue(pwszPropertyValues[iProperty]);
+ s_defaultDomainPropertyValues.Append(propertyValue);
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+}
+
+// static
+HRESULT CorHost2::SetAppDomainManagerType(LPCWSTR wszAppDomainManagerAssembly,
+ LPCWSTR wszAppDomainManagerType,
+ EInitializeNewDomainFlags dwInitializeDomainFlags)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // The AppDomainManger can only be set by the host before the CLR has started
+ if (g_fEEStarted || HasStarted())
+ {
+ return HOST_E_INVALIDOPERATION;
+ }
+
+ // Both the type and assembly must be specified
+ if (wszAppDomainManagerAssembly == NULL || wszAppDomainManagerType == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ // Make sure we understand the incoming flags
+ const EInitializeNewDomainFlags knownFlags = eInitializeNewDomainFlags_NoSecurityChanges;
+ if ((dwInitializeDomainFlags & (~knownFlags)) != eInitializeNewDomainFlags_None)
+ {
+ return E_INVALIDARG;
+ }
+
+ // Get a copy of the AppDomainManager assembly
+ size_t cchAsm = wcslen(wszAppDomainManagerAssembly) + 1;
+ NewArrayHolder<WCHAR> wszAppDomainManagerAssemblyCopy(new (nothrow) WCHAR[cchAsm]);
+ if (wszAppDomainManagerAssemblyCopy == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ wcsncpy_s(wszAppDomainManagerAssemblyCopy, cchAsm, wszAppDomainManagerAssembly, cchAsm - 1);
+
+ // And of the AppDomainManagerType
+ size_t cchType = wcslen(wszAppDomainManagerType) + 1;
+ NewArrayHolder<WCHAR> wszAppDomainManagerTypeCopy(new (nothrow) WCHAR[cchType]);
+ if (wszAppDomainManagerTypeCopy == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ wcsncpy_s(wszAppDomainManagerTypeCopy, cchType, wszAppDomainManagerType, cchType - 1);
+
+ LPCWSTR wszOldAsmValue = FastInterlockCompareExchangePointer(&s_wszAppDomainManagerAsm,
+ static_cast<LPCWSTR>(wszAppDomainManagerAssemblyCopy.GetValue()),
+ NULL);
+ if (wszOldAsmValue != NULL)
+ {
+ // We've tried to setup an AppDomainManager twice ... that's not allowed
+ return HOST_E_INVALIDOPERATION;
+ }
+
+ s_wszAppDomainManagerType = wszAppDomainManagerTypeCopy;
+ s_dwDomainManagerInitFlags = dwInitializeDomainFlags;
+
+ wszAppDomainManagerAssemblyCopy.SuppressRelease();
+ wszAppDomainManagerTypeCopy.SuppressRelease();
+ return S_OK;
+}
+#endif // !FEATURE_CORECLR
+
+LPCWSTR CorHost2::GetAppDomainManagerAsm()
+{
+ LIMITED_METHOD_CONTRACT;
+#ifdef FEATURE_CORECLR
+ return NULL;
+#else // FEATURE_CORECLR
+ _ASSERTE (g_fEEStarted);
+ return s_wszAppDomainManagerAsm;
+#endif // FEATURE_CORECLR
+}
+
+LPCWSTR CorHost2::GetAppDomainManagerType()
+{
+ LIMITED_METHOD_CONTRACT;
+#ifdef FEATURE_CORECLR
+ return NULL;
+#else // FEATURE_CORECLR
+ _ASSERTE (g_fEEStarted);
+ return s_wszAppDomainManagerType;
+#endif // FEATURE_CORECLR
+}
+
+// static
+EInitializeNewDomainFlags CorHost2::GetAppDomainManagerInitializeNewDomainFlags()
+{
+ LIMITED_METHOD_CONTRACT;
+#ifdef FEATURE_CORECLR
+ return eInitializeNewDomainFlags_None;
+#else // FEAUTRE_CORECLR
+ _ASSERTE (g_fEEStarted);
+ return s_dwDomainManagerInitFlags;
+#endif // FEATURE_CORECLR
+}
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+// We do not implement the Release since our host does not control the lifetime on this object
+ULONG CCLRDebugManager::Release()
+{
+ LIMITED_METHOD_CONTRACT;
+ return (1);
+}
+
+HRESULT CCLRDebugManager::QueryInterface(REFIID riid, void **ppUnk)
+{
+ if (!ppUnk)
+ return E_POINTER;
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ if (ppUnk == NULL)
+ {
+ return E_POINTER;
+ }
+
+ *ppUnk = 0;
+
+ // Deliberately do NOT hand out ICorConfiguration. They must explicitly call
+ // GetConfiguration to obtain that interface.
+ if (riid == IID_IUnknown)
+ {
+ *ppUnk = (IUnknown *) this;
+ }
+ else if (riid == IID_ICLRDebugManager)
+ {
+ *ppUnk = (ICLRDebugManager *) this;
+ }
+ else
+ {
+ hr = E_NOINTERFACE;
+ }
+
+ return hr;
+
+}
+
+/*
+*
+* Called once to when process start up to initialize the lock for connection name hash table
+*
+*/
+void CCLRDebugManager::ProcessInit()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ m_lockConnectionNameTable.Init(CrstConnectionNameTable, (CrstFlags) (CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD));
+}
+
+/*
+* Called once to when process shut down to destroy the lock for connection name hash table
+*
+*/
+void CCLRDebugManager::ProcessCleanup()
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ m_lockConnectionNameTable.Destroy();
+}
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+#endif // !DAC
+
+
+#ifdef DACCESS_COMPILE
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+
+//---------------------------------------------------------------------------------------
+// Begin an iterating over connections for Debugger
+//
+// Arguments:
+// pHashfind - out: initializes cookie to pass to to future calls to code:CCLRDebugManager.FindNext
+//
+// Returns:
+// NULL if iteration is done. Else a ConnectionNameHashEntry representing the connection.
+//
+ConnectionNameHashEntry * CCLRDebugManager::FindFirst(HASHFIND * pHashfind)
+{
+ SUPPORTS_DAC;
+ if (m_pConnectionNameHash == NULL)
+ {
+ return NULL;
+ }
+
+ ConnectionNameHashEntry * pConnection = dac_cast<PTR_ConnectionNameHashEntry>(m_pConnectionNameHash->FindFirstEntry(pHashfind));
+ return pConnection;
+ }
+
+//---------------------------------------------------------------------------------------
+// Begin an iterating over connections for Debugger
+//
+// Arguments:
+// pHashfind - in/out: iterator cookie to pass to future calls to code:CCLRDebugManager.FindNext
+//
+// Returns:
+// NULL if iteration is done. Else a ConnectionNameHashEntry representing the connection.
+//
+ConnectionNameHashEntry * CCLRDebugManager::FindNext(HASHFIND * pHashfind)
+ {
+ SUPPORTS_DAC;
+ ConnectionNameHashEntry * pConnection = dac_cast<PTR_ConnectionNameHashEntry>(m_pConnectionNameHash->FindNextEntry(pHashfind));
+ return pConnection;
+}
+
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+#endif //DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+HRESULT CCLRDebugManager::IsDebuggerAttached(BOOL *pbAttached)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ if (pbAttached == NULL)
+ return E_INVALIDARG;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+#ifdef DEBUGGING_SUPPORTED
+ *pbAttached = (CORDebuggerAttached() != 0);
+#else
+ *pbAttached = FALSE;
+#endif
+
+ END_ENTRYPOINT_NOTHROW;
+
+
+ return S_OK;
+}
+
+// By default, we permit symbols to be read for full-trust assemblies only
+ESymbolReadingSetBy CCLRDebugManager::m_symbolReadingSetBy = eSymbolReadingSetByDefault;
+ESymbolReadingPolicy CCLRDebugManager::m_symbolReadingPolicy = eSymbolReadingFullTrustOnly;
+
+HRESULT CCLRDebugManager::SetSymbolReadingPolicy(ESymbolReadingPolicy policy)
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_ENTRY_POINT;
+
+ if( policy > eSymbolReadingFullTrustOnly )
+ {
+ return E_INVALIDARG;
+ }
+
+ SetSymbolReadingPolicy( policy, eSymbolReadingSetByHost );
+
+ return S_OK;
+}
+
+void CCLRDebugManager::SetSymbolReadingPolicy( ESymbolReadingPolicy policy, ESymbolReadingSetBy setBy )
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE( policy <= eSymbolReadingFullTrustOnly ); // don't have _COUNT because it's not in convention for mscoree.idl enums
+ _ASSERTE( setBy < eSymbolReadingSetBy_COUNT );
+
+ // if the setter meets or exceeds the precendence of the existing setting then override the setting
+ if( setBy >= m_symbolReadingSetBy )
+ {
+ m_symbolReadingSetBy = setBy;
+ m_symbolReadingPolicy = policy;
+ }
+}
+
+
+/*
+* Call by host to set the name of a connection and begin a connection.
+*
+*/
+HRESULT CCLRDebugManager::BeginConnection(
+ CONNID dwConnectionId,
+ __in_z wchar_t *wzConnectionName) // We should review this in the future. This API is
+ // public and callable by a host. This SAL annotation
+ // is the best we can do now.
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS; // I am having problem in putting either GC_TRIGGERS or GC_NOTRIGGER. It is not happy either way when debugger
+ // call back event needs to enable preemptive GC.
+ ENTRY_POINT;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ ConnectionNameHashEntry *pEntry = NULL;
+
+ // check input parameter
+ if (dwConnectionId == INVALID_CONNECTION_ID || wzConnectionName == NULL || wzConnectionName[0] == W('\0'))
+ IfFailGo(E_INVALIDARG);
+
+ if (wcslen(wzConnectionName) >= MAX_CONNECTION_NAME)
+ IfFailGo(E_INVALIDARG);
+
+ {
+ CrstHolder ch(&m_lockConnectionNameTable);
+
+ if (m_pConnectionNameHash == NULL)
+ {
+ m_pConnectionNameHash = new (nothrow) ConnectionNameTable(50);
+ IfNullGo(m_pConnectionNameHash);
+ IfFailGo(m_pConnectionNameHash->NewInit(50, sizeof(ConnectionNameHashEntry), USHRT_MAX));
+ }
+
+ // error: Should not have an existing connection id already
+ if (m_pConnectionNameHash->FindConnection(dwConnectionId))
+ IfFailGo(E_INVALIDARG);
+
+ // Our implementation of hashtable cannot throw out of memory exception
+ pEntry = m_pConnectionNameHash->AddConnection(dwConnectionId, wzConnectionName);
+ IfNullGo(pEntry);
+ }
+
+ // send notification to debugger
+ if (CORDebuggerAttached())
+ {
+ g_pDebugInterface->CreateConnection(dwConnectionId, wzConnectionName);
+ }
+
+ErrExit:
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+/*
+* Call by host to end a connection
+*/
+HRESULT CCLRDebugManager::EndConnection(CONNID dwConnectionId)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ NOTHROW;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ UINT CLRTaskCount = 0;
+ ICLRTask **ppCLRTaskArray = NULL;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ if (dwConnectionId == INVALID_CONNECTION_ID)
+ IfFailGo(E_INVALIDARG);
+
+ // No connection exist at all
+ if (m_pConnectionNameHash == NULL)
+ IfFailGo(E_FAIL);
+
+ {
+ CrstHolder ch(&m_lockConnectionNameTable);
+ ConnectionNameHashEntry *pEntry = NULL;
+
+ if ((pEntry = m_pConnectionNameHash->FindConnection(dwConnectionId)) == NULL)
+ IfFailGo(E_INVALIDARG);
+
+ // Note that the Release on CLRTask chould take a ThreadStoreLock. So we need to finish our
+ // business with ConnectionNameHash before hand and release our name hash lock
+ //
+ CLRTaskCount = pEntry->m_CLRTaskCount;
+ ppCLRTaskArray = pEntry->m_ppCLRTaskArray;
+ pEntry->m_ppCLRTaskArray = NULL;
+ pEntry->m_CLRTaskCount = 0;
+ m_pConnectionNameHash->DeleteConnection(dwConnectionId);
+ }
+
+ if (CLRTaskCount != 0)
+ {
+ _ASSERTE(ppCLRTaskArray != NULL);
+ for (UINT i = 0; i < CLRTaskCount; i++)
+ {
+ ((Thread *)ppCLRTaskArray[i])->SetConnectionId(INVALID_CONNECTION_ID);
+ ppCLRTaskArray[i]->Release();
+ }
+ delete [] ppCLRTaskArray;
+ }
+
+ // send notification to debugger
+ if (CORDebuggerAttached())
+ g_pDebugInterface->DestroyConnection(dwConnectionId);
+
+ErrExit:
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+/*
+* Call by host to set a set of tasks as a connection.
+*
+*/
+HRESULT CCLRDebugManager::SetConnectionTasks(
+ DWORD id,
+ DWORD dwCount,
+ ICLRTask **ppCLRTask)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ NOTHROW;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ ICLRTask **ppCLRTaskArrayNew = NULL;
+ UINT CLRTaskCountPrevious = 0;
+ ICLRTask **ppCLRTaskArrayPrevious = NULL;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ DWORD index;
+ Thread *pThread;
+ ConnectionNameHashEntry *pEntry = NULL;
+
+ if (id == INVALID_CONNECTION_ID || dwCount == 0 || ppCLRTask == NULL)
+ IfFailGo(E_INVALIDARG);
+
+ {
+ CrstHolder ch(&m_lockConnectionNameTable);
+
+ // check the BeginConnectin has been called.
+ if (m_pConnectionNameHash == NULL)
+ // No connection exist
+ IfFailGo(E_INVALIDARG);
+
+ // Host forget to call BeginConnection before calling SetConnectionTask!
+ if ((pEntry = m_pConnectionNameHash->FindConnection(id)) == NULL)
+ IfFailGo(E_INVALIDARG);
+
+ for (index = 0; index < dwCount; index++)
+ {
+ // Check on input parameter
+ pThread = (Thread *) ppCLRTask[index];
+ if (pThread == NULL)
+ {
+ // _ASSERTE(!"Host passed in NULL ICLRTask pointer");
+ IfFailGo(E_INVALIDARG);
+ }
+
+ // Check for Finalizer thread
+ if (GCHeap::IsGCHeapInitialized() && (pThread == FinalizerThread::GetFinalizerThread()))
+ {
+ // _ASSERTE(!"Host should not try to schedule user code on our Finalizer Thread");
+ IfFailGo(E_INVALIDARG);
+
+ }
+ }
+
+ ppCLRTaskArrayNew = new (nothrow) ICLRTask*[dwCount];
+ IfNullGo(ppCLRTaskArrayNew);
+
+ CLRTaskCountPrevious = pEntry->m_CLRTaskCount;
+ ppCLRTaskArrayPrevious = pEntry->m_ppCLRTaskArray;
+ pEntry->m_ppCLRTaskArray = NULL;
+ pEntry->m_CLRTaskCount = 0;
+
+ if (CLRTaskCountPrevious != 0)
+ {
+ // Clear the old connection set
+ _ASSERTE(ppCLRTaskArrayPrevious != NULL);
+ for (UINT i = 0; i < CLRTaskCountPrevious; i++)
+ ((Thread *)ppCLRTaskArrayPrevious[i])->SetConnectionId(INVALID_CONNECTION_ID);
+ }
+
+ // now remember the new set
+ pEntry->m_ppCLRTaskArray = ppCLRTaskArrayNew;
+
+ for (index = 0; index < dwCount; index++)
+ {
+ pThread = (Thread *) ppCLRTask[index];
+ pThread->SetConnectionId( id );
+ pEntry->m_ppCLRTaskArray[index] = ppCLRTask[index];
+ }
+ pEntry->m_CLRTaskCount = dwCount;
+
+ // AddRef and Release on Thread object can call ThreadStoreLock. So we will release our
+ // lock first of all.
+ }
+
+ // Does the addref on the new set
+ for (index = 0; index < dwCount; index++)
+ ppCLRTaskArrayNew[index]->AddRef();
+
+ // Does the release on the old set
+ if (CLRTaskCountPrevious != 0)
+ {
+ _ASSERTE(ppCLRTaskArrayPrevious != NULL);
+ for (UINT i = 0; i < CLRTaskCountPrevious; i++)
+ ppCLRTaskArrayPrevious[i]->Release();
+ delete ppCLRTaskArrayPrevious;
+ }
+
+ // send notification to debugger
+ if (CORDebuggerAttached())
+ {
+ g_pDebugInterface->ChangeConnection(id);
+ }
+
+ErrExit:
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+}
+
+HRESULT CCLRDebugManager::SetDacl(PACL pacl)
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_ENTRY_POINT;
+ HRESULT hr;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ hr = E_NOTIMPL;
+
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+} // SetDACL
+
+
+HRESULT CCLRDebugManager::GetDacl(PACL *pacl)
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_ENTRY_POINT;
+ HRESULT hr;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ hr = E_NOTIMPL;
+
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+} // SetDACL
+
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+#if defined(FEATURE_INCLUDE_ALL_INTERFACES) || defined(FEATURE_WINDOWSPHONE)
+
+HRESULT CCLRErrorReportingManager::QueryInterface(REFIID riid, void** ppUnk)
+{
+ if (!ppUnk)
+ return E_POINTER;
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ if (ppUnk == NULL)
+ {
+ return E_POINTER;
+ }
+
+ *ppUnk = 0;
+
+ // Deliberately do NOT hand out ICorConfiguration. They must explicitly call
+ // GetConfiguration to obtain that interface.
+ if (riid == IID_IUnknown)
+ {
+ *ppUnk = (IUnknown *) this;
+ }
+ else if (riid == IID_ICLRErrorReportingManager)
+ {
+ *ppUnk = (ICLRErrorReportingManager *) this;
+ }
+#ifdef FEATURE_WINDOWSPHONE
+ else if (riid == IID_ICLRErrorReportingManager2)
+ {
+ *ppUnk = (ICLRErrorReportingManager2 *) this;
+ }
+#endif // FEATURE_WINDOWSPHONE
+ else
+ {
+ hr = E_NOINTERFACE;
+ }
+
+ return hr;
+
+} // HRESULT CCLRErrorReportingManager::QueryInterface()
+
+ULONG CCLRErrorReportingManager::AddRef()
+{
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+} // HRESULT CCLRErrorReportingManager::AddRef()
+
+ULONG CCLRErrorReportingManager::Release()
+{
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+} // HRESULT CCLRErrorReportingManager::Release()
+
+// Get Watson bucket parameters for "current" exception (on calling thread).
+HRESULT CCLRErrorReportingManager::GetBucketParametersForCurrentException(
+ BucketParameters *pParams)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ // To avoid confusion, clear the buckets.
+ memset(pParams, 0, sizeof(BucketParameters));
+
+#ifndef FEATURE_PAL
+ // Defer to Watson helper.
+ hr = ::GetBucketParametersForCurrentException(pParams);
+ #else
+ // Watson doesn't exist on non-windows platforms
+ hr = E_NOTIMPL;
+#endif // !FEATURE_PAL
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+
+} // HRESULT CCLRErrorReportingManager::GetBucketParametersForCurrentException()
+
+//
+// The BeginCustomDump function configures the custom dump support
+//
+// Parameters -
+// dwFlavor - The flavor of the dump
+// dwNumItems - The number of items in the CustomDumpItem array.
+// Should always be zero today, since no custom items are defined
+// items - Array of CustomDumpItem structs specifying items to be added to the dump.
+// Should always be NULL today, since no custom items are defined.
+// dwReserved - reserved for future use. Must be zero today
+//
+HRESULT CCLRErrorReportingManager::BeginCustomDump( ECustomDumpFlavor dwFlavor,
+ DWORD dwNumItems,
+ CustomDumpItem items[],
+ DWORD dwReserved)
+{
+ STATIC_CONTRACT_ENTRY_POINT;
+ HRESULT hr = S_OK;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ if (dwNumItems != 0 || items != NULL || dwReserved != 0)
+ {
+ IfFailGo(E_INVALIDARG);
+ }
+ if (g_ECustomDumpFlavor != DUMP_FLAVOR_Default)
+ {
+ // BeginCustomDump is called without matching EndCustomDump
+ IfFailGo(E_INVALIDARG);
+ }
+ g_ECustomDumpFlavor = dwFlavor;
+
+ErrExit:
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+//
+// EndCustomDump clears the custom dump configuration
+//
+HRESULT CCLRErrorReportingManager::EndCustomDump()
+{
+ STATIC_CONTRACT_ENTRY_POINT;
+ // NOT IMPLEMENTED YET
+ BEGIN_ENTRYPOINT_NOTHROW;
+ g_ECustomDumpFlavor = DUMP_FLAVOR_Default;
+ END_ENTRYPOINT_NOTHROW;
+
+ return S_OK;
+}
+
+#ifdef FEATURE_WINDOWSPHONE
+HRESULT CopyStringWorker(WCHAR** pTarget, WCHAR const* pSource)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (pTarget == NULL || pSource == NULL)
+ return E_INVALIDARG;
+
+ if (*pTarget)
+ delete[] (*pTarget);
+
+ // allocate space for the data plus one wchar for NULL
+ size_t sourceLen = wcslen(pSource);
+ *pTarget = new (nothrow) WCHAR[sourceLen + 1];
+
+ if (!(*pTarget))
+ return E_OUTOFMEMORY;
+
+ errno_t result = wcsncpy_s(*pTarget, sourceLen + 1, pSource, sourceLen);
+ _ASSERTE(result == 0);
+
+ if (result != 0)
+ {
+ delete[] (*pTarget);
+ *pTarget = NULL;
+
+ return E_FAIL;
+ }
+
+ return S_OK;
+}
+
+CCLRErrorReportingManager::BucketParamsCache::BucketParamsCache(DWORD maxNumParams) : m_pParams(NULL), m_cMaxParams(maxNumParams)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+CCLRErrorReportingManager::BucketParamsCache::~BucketParamsCache()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_pParams)
+ {
+ for (DWORD i = 0; i < m_cMaxParams; ++i)
+ if (m_pParams[i]) delete[] m_pParams[i];
+ }
+}
+
+WCHAR const* CCLRErrorReportingManager::BucketParamsCache::GetAt(BucketParameterIndex index)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (index >= InvalidBucketParamIndex)
+ {
+ _ASSERTE(!"bad bucket parameter index");
+ return NULL;
+ }
+
+ if (!m_pParams)
+ return NULL;
+
+ return m_pParams[index];
+}
+
+HRESULT CCLRErrorReportingManager::BucketParamsCache::SetAt(BucketParameterIndex index, WCHAR const* val)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (index >= InvalidBucketParamIndex)
+ {
+ _ASSERTE(!"bad bucket parameter index");
+ return E_INVALIDARG;
+ }
+
+ if (!val)
+ return E_INVALIDARG;
+
+ if (!m_pParams)
+ {
+ m_pParams = new (nothrow) WCHAR*[m_cMaxParams];
+ if (!m_pParams)
+ return E_OUTOFMEMORY;
+
+ for (DWORD i = 0; i < m_cMaxParams; ++i)
+ m_pParams[i] = NULL;
+ }
+
+ return CopyStringWorker(&m_pParams[index], val);
+}
+
+HRESULT CCLRErrorReportingManager::CopyToDataCache(WCHAR** pTarget, WCHAR const* pSource)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return CopyStringWorker(pTarget, pSource);
+}
+
+HRESULT CCLRErrorReportingManager::SetApplicationData(ApplicationDataKey key, WCHAR const* pValue)
+{
+ STATIC_CONTRACT_ENTRY_POINT;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ if(g_fEEStarted)
+ return HOST_E_INVALIDOPERATION;
+
+ if (pValue == NULL || wcslen(pValue) > MAX_PATH)
+ return E_INVALIDARG;
+
+ HRESULT hr = S_OK;
+
+ switch (key)
+ {
+ case ApplicationID:
+ hr = CopyToDataCache(&m_pApplicationId, pValue);
+ break;
+
+ case InstanceID:
+ hr = CopyToDataCache(&m_pInstanceId, pValue);
+ break;
+
+ default:
+ hr = E_INVALIDARG;
+ }
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+HRESULT CCLRErrorReportingManager::SetBucketParametersForUnhandledException(BucketParameters const* pBucketParams, DWORD* pCountParams)
+{
+ STATIC_CONTRACT_ENTRY_POINT;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ if(g_fEEStarted)
+ return HOST_E_INVALIDOPERATION;
+
+ if (pBucketParams == NULL || pCountParams == NULL || pBucketParams->fInited != TRUE)
+ return E_INVALIDARG;
+
+ *pCountParams = 0;
+
+ if (!m_pBucketParamsCache)
+ {
+ m_pBucketParamsCache = new (nothrow) BucketParamsCache(InvalidBucketParamIndex);
+ if (!m_pBucketParamsCache)
+ return E_OUTOFMEMORY;
+ }
+
+ HRESULT hr = S_OK;
+ bool hasOverride = false;
+
+ for (DWORD i = 0; i < InvalidBucketParamIndex; ++i)
+ {
+ if (pBucketParams->pszParams[i][0] != W('\0'))
+ {
+ hasOverride = true;
+ hr = m_pBucketParamsCache->SetAt(static_cast<BucketParameterIndex>(i), pBucketParams->pszParams[i]);
+ if (SUCCEEDED(hr))
+ *pCountParams += 1;
+ else
+ break;
+ }
+ }
+
+ if (!hasOverride)
+ return E_INVALIDARG;
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+WCHAR const* CCLRErrorReportingManager::GetApplicationData(ApplicationDataKey key)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ WCHAR* pValue = NULL;
+
+ switch (key)
+ {
+ case ApplicationID:
+ pValue = m_pApplicationId;
+ break;
+
+ case InstanceID:
+ pValue = m_pInstanceId;
+ break;
+
+ default:
+ _ASSERTE(!"invalid key specified");
+ }
+
+ return pValue;
+}
+
+WCHAR const* CCLRErrorReportingManager::GetBucketParamOverride(BucketParameterIndex bucketParamId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (!m_pBucketParamsCache)
+ return NULL;
+
+ return m_pBucketParamsCache->GetAt(bucketParamId);
+}
+
+#endif // FEATURE_WINDOWSPHONE
+
+CCLRErrorReportingManager::CCLRErrorReportingManager()
+{
+ LIMITED_METHOD_CONTRACT;
+#ifdef FEATURE_WINDOWSPHONE
+ m_pApplicationId = NULL;
+ m_pInstanceId = NULL;
+ m_pBucketParamsCache = NULL;
+#endif
+}
+
+CCLRErrorReportingManager::~CCLRErrorReportingManager()
+{
+ LIMITED_METHOD_CONTRACT;
+#ifdef FEATURE_WINDOWSPHONE
+ if (m_pApplicationId)
+ delete[] m_pApplicationId;
+
+ if (m_pInstanceId)
+ delete[] m_pInstanceId;
+
+ if (m_pBucketParamsCache)
+ delete m_pBucketParamsCache;
+#endif
+}
+
+#endif // defined(FEATURE_INCLUDE_ALL_INTERFACES) || defined(FEATURE_WINDOWSPHONE)
+
+#ifdef FEATURE_IPCMAN
+
+CrstStatic CCLRSecurityAttributeManager::m_hostSAMutex;
+PACL CCLRSecurityAttributeManager::m_pACL;
+
+SECURITY_ATTRIBUTES CCLRSecurityAttributeManager::m_hostSA;
+SECURITY_DESCRIPTOR CCLRSecurityAttributeManager::m_hostSD;
+
+/*
+* constructor
+*
+*/
+void CCLRSecurityAttributeManager::ProcessInit()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ m_hostSAMutex.Init(CrstReDacl, CRST_UNSAFE_ANYMODE);
+ m_pACL = NULL;
+}
+
+/*
+* destructor
+*
+*/
+void CCLRSecurityAttributeManager::ProcessCleanUp()
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ m_hostSAMutex.Destroy();
+ if (m_pACL)
+ CoTaskMemFree(m_pACL);
+}
+
+// Set private block and events to the new ACL.
+HRESULT CCLRSecurityAttributeManager::SetDACL(PACL pacl)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ DWORD dwError;
+ PACL pNewACL = NULL;
+ HANDLE hProc = NULL;
+ DWORD pid = 0;
+
+ // @todo: How can we make sure that debugger attach will not attempt to happen during this time???
+ //
+ CrstHolder ch(&m_hostSAMutex);
+
+ // make sure our host pass our a valid ACL
+ if (!IsValidAcl(pacl))
+ {
+ dwError = GetLastError();
+ hr = HRESULT_FROM_WIN32(dwError);
+ goto ErrExit;
+ }
+
+ // Cannnot set DACL while debugger is attached. Because the events are already all hooked up
+ // between LS and RS.
+ if (CORDebuggerAttached())
+ return CORDBG_E_DEBUGGER_ALREADY_ATTACHED;
+
+ // make a copy of the new ACL
+ pNewACL = (PACL) CoTaskMemAlloc(pacl->AclSize);
+ if (FAILED( CopyACL(pacl, pNewACL)))
+ goto ErrExit;
+
+ _ASSERTE (SECURITY_DESCRIPTOR_MIN_LENGTH == sizeof(SECURITY_DESCRIPTOR));
+
+ if (!InitializeSecurityDescriptor(&m_hostSD, SECURITY_DESCRIPTOR_REVISION))
+ {
+ hr = HRESULT_FROM_GetLastError();
+ goto ErrExit;
+ }
+
+ if (!SetSecurityDescriptorDacl(&m_hostSD, TRUE, pNewACL, FALSE))
+ {
+ hr = HRESULT_FROM_GetLastError();
+ goto ErrExit;
+ }
+
+ // Now cache the pNewACL to m_pACL and delete m_pACL.
+ if (m_pACL)
+ CoTaskMemFree(m_pACL);
+
+ m_pACL = pNewACL;
+ pNewACL = NULL;
+
+ m_hostSA.nLength = sizeof(SECURITY_ATTRIBUTES);
+ m_hostSA.lpSecurityDescriptor = &m_hostSD;
+ m_hostSA.bInheritHandle = FALSE;
+
+ // first of all, try to reDacl on the process token
+ pid = GetCurrentProcessId();
+ hProc = OpenProcess(WRITE_DAC, FALSE, pid);
+ if (hProc == NULL)
+ {
+ hr = HRESULT_FROM_GetLastError();
+ goto ErrExit;
+ }
+ if (SetKernelObjectSecurity(hProc, DACL_SECURITY_INFORMATION, &m_hostSD) == 0)
+ {
+ // failed!
+ hr = HRESULT_FROM_GetLastError();
+ goto ErrExit;
+ }
+
+
+ // now reset all of the kernel object token's DACL.
+ // This will reDACL the global shared section
+ if (FAILED(g_pIPCManagerInterface->ReDaclLegacyPrivateBlock(&m_hostSD)))
+ goto ErrExit;
+
+ // This will reDacl on debugger events.
+ if (g_pDebugInterface)
+ {
+ g_pDebugInterface->ReDaclEvents(&m_hostSD);
+ }
+
+ErrExit:
+ if (pNewACL)
+ CoTaskMemFree(pNewACL);
+ if (hProc != NULL)
+ CloseHandle(hProc);
+
+ return hr;
+}
+
+// cLen - specify the size of input buffer ppacl. If cLen is zero or ppacl is null,
+// pcLenTotal will return the total size of required pacl buffer.
+// pacl - caller allocated space. We will fill acl in this buffer.
+// pcLenTotal - the total size of ACL.
+//
+HRESULT CCLRSecurityAttributeManager::GetDACL(PACL *ppacl)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ PACL pNewACL = NULL;
+ PACL pDefaultACL = NULL;
+ SECURITY_ATTRIBUTES *pSA = NULL;
+
+ // output parameter cannot be NULL
+ if (ppacl == NULL)
+ return E_INVALIDARG;
+
+ *ppacl = NULL;
+
+ CrstHolder ch(&m_hostSAMutex);
+
+ // we want to return the ACL of our default policy
+ if (m_pACL == NULL)
+ {
+ hr = g_pIPCManagerInterface->CreateWinNTDescriptor(GetCurrentProcessId(), &pSA, eDescriptor_Private);
+ if (FAILED(hr))
+ {
+ goto ErrExit;
+ }
+ EX_TRY
+ {
+ BOOL bDaclPresent;
+ BOOL bDaclDefault;
+
+ LeaveRuntimeHolder holder((size_t)(::GetSecurityDescriptorDacl));
+ ::GetSecurityDescriptorDacl(pSA->lpSecurityDescriptor, &bDaclPresent, &pDefaultACL, &bDaclDefault);
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ if (FAILED(hr) || pDefaultACL == NULL || pDefaultACL->AclSize == 0)
+ {
+ goto ErrExit;
+ }
+ }
+ else
+ {
+ pDefaultACL = m_pACL;
+ }
+
+ pNewACL = (PACL) CoTaskMemAlloc(pDefaultACL->AclSize);
+ if (pNewACL == NULL)
+ {
+ hr = E_OUTOFMEMORY;
+ goto ErrExit;
+ }
+
+ // make a copy of ACL
+ hr = CCLRSecurityAttributeManager::CopyACL(pDefaultACL, pNewACL);
+ if (SUCCEEDED(hr))
+ *ppacl = pNewACL;
+
+ErrExit:
+ if (FAILED(hr))
+ {
+ if (pNewACL)
+ {
+ CoTaskMemFree(pNewACL);
+ }
+ }
+ if (pSA != NULL)
+ {
+ g_pIPCManagerInterface->DestroySecurityAttributes(pSA);
+ }
+ return hr;
+}
+
+
+// This API will duplicate a copy of pAclOrigingal and pass it out on ppAclNew
+HRESULT CCLRSecurityAttributeManager::CopyACL(PACL pAclOriginal, PACL pNewACL)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = NO_ERROR;
+ DWORD dwError = GetLastError();
+ int i;
+ ACE_HEADER *pDACLAce;
+
+ _ASSERTE(pNewACL && pAclOriginal);
+
+ // initialize the target ACL buffer
+ if (!InitializeAcl(pNewACL, pAclOriginal->AclSize, ACL_REVISION))
+ {
+ dwError = GetLastError();
+ hr = HRESULT_FROM_WIN32(dwError);
+ goto ErrExit;
+ }
+
+ // loop through each existing ace and copy it over
+ for (i = 0; i < pAclOriginal->AceCount; i++)
+ {
+ if (!GetAce(pAclOriginal, i, (LPVOID *) &pDACLAce))
+ {
+ dwError = GetLastError();
+ hr = HRESULT_FROM_WIN32(dwError);
+ goto ErrExit;
+ }
+
+ if (!AddAce(pNewACL, ACL_REVISION, i, pDACLAce, pDACLAce->AceSize))
+ {
+ dwError = GetLastError();
+ hr = HRESULT_FROM_WIN32(dwError);
+ goto ErrExit;
+ }
+ }
+
+ // make sure everything went well with the new ACL
+ if (!IsValidAcl(pNewACL))
+ {
+ dwError = GetLastError();
+ hr = HRESULT_FROM_WIN32(dwError);
+ goto ErrExit;
+ }
+
+ErrExit:
+ return hr;
+}
+
+
+HRESULT CCLRSecurityAttributeManager::GetHostSecurityAttributes(SECURITY_ATTRIBUTES **ppSA)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if(!ppSA)
+ return E_POINTER;
+
+ HRESULT hr = S_OK;
+
+ *ppSA = NULL;
+
+ // host has specified ACL
+ if (m_pACL != NULL)
+ *ppSA = &(m_hostSA);
+
+ else
+ hr = g_pIPCManagerInterface->CreateWinNTDescriptor(GetCurrentProcessId(), ppSA, eDescriptor_Private);
+
+ return hr;
+}
+
+void CCLRSecurityAttributeManager::DestroyHostSecurityAttributes(SECURITY_ATTRIBUTES *pSA)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // no pSA to cleanup
+ if (pSA == NULL)
+ return;
+
+ // it is our current host SA.
+ if (&(m_hostSA) == pSA)
+ return;
+
+ g_pIPCManagerInterface->DestroySecurityAttributes(pSA);
+}
+#endif // FEATURE_IPCMAN
+
+void GetProcessMemoryLoad(LPMEMORYSTATUSEX pMSEX)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ pMSEX->dwLength = sizeof(MEMORYSTATUSEX);
+ BOOL fRet = GlobalMemoryStatusEx(pMSEX);
+ _ASSERTE (fRet);
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ // CoreCLR cannot be memory hosted
+ if (CLRMemoryHosted())
+ {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ DWORD memoryLoad;
+ SIZE_T availableBytes;
+ HRESULT hr = CorHost2::GetHostMemoryManager()->GetMemoryLoad(&memoryLoad, &availableBytes);
+ if (hr == S_OK) {
+ pMSEX->dwMemoryLoad = memoryLoad;
+ pMSEX->ullAvailPhys = availableBytes;
+ }
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ // If the machine has more RAM than virtual address limit, let us cap it.
+ // Our GC can never use more than virtual address limit.
+ if (pMSEX->ullAvailPhys > pMSEX->ullTotalVirtual)
+ {
+ pMSEX->ullAvailPhys = pMSEX->ullAvailVirtual;
+ }
+}
+
+// This is the instance that exposes interfaces out to all the other DLLs of the CLR
+// so they can use our services for TLS, synchronization, memory allocation, etc.
+static BYTE g_CEEInstance[sizeof(CExecutionEngine)];
+static Volatile<IExecutionEngine*> g_pCEE = NULL;
+
+PTLS_CALLBACK_FUNCTION CExecutionEngine::Callbacks[MAX_PREDEFINED_TLS_SLOT];
+
+extern "C" IExecutionEngine * __stdcall IEE()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Unfortunately,we can't probe here. The probing system requires the
+ // use of TLS, and in order to initialize TLS we need to call IEE.
+
+ //BEGIN_ENTRYPOINT_VOIDRET;
+
+
+ // The following code does NOT contain a race condition. The following code is BY DESIGN.
+ // The issue is that we can have two separate threads inside this if statement, both of which are
+ // initializing the g_CEEInstance variable (and subsequently updating g_pCEE). This works fine,
+ // and will not cause an inconsistent state due to the fact that CExecutionEngine has no
+ // local variables. If multiple threads make it inside this if statement, it will copy the same
+ // bytes over g_CEEInstance and there will not be a time when there is an inconsistent state.
+ if ( !g_pCEE )
+ {
+ // Create a local copy on the stack and then copy it over to the static instance.
+ // This avoids race conditions caused by multiple initializations of vtable in the constructor
+ CExecutionEngine local;
+ memcpy(&g_CEEInstance, &local, sizeof(CExecutionEngine));
+
+ g_pCEE = (IExecutionEngine*)(CExecutionEngine*)&g_CEEInstance;
+ }
+ //END_ENTRYPOINT_VOIDRET;
+
+ return g_pCEE;
+}
+
+
+HRESULT STDMETHODCALLTYPE CExecutionEngine::QueryInterface(REFIID id, void **pInterface)
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ if (!pInterface)
+ return E_POINTER;
+
+ *pInterface = NULL;
+
+ //CANNOTTHROWCOMPLUSEXCEPTION();
+ if (id == IID_IExecutionEngine)
+ *pInterface = (IExecutionEngine *)this;
+ else if (id == IID_IEEMemoryManager)
+ *pInterface = (IEEMemoryManager *)this;
+ else if (id == IID_IUnknown)
+ *pInterface = (IUnknown *)(IExecutionEngine *)this;
+ else
+ return E_NOINTERFACE;
+
+ AddRef();
+ return S_OK;
+} // HRESULT STDMETHODCALLTYPE CExecutionEngine::QueryInterface()
+
+
+ULONG STDMETHODCALLTYPE CExecutionEngine::AddRef()
+{
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+}
+
+ULONG STDMETHODCALLTYPE CExecutionEngine::Release()
+{
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+}
+
+struct ClrTlsInfo
+{
+ void* data[MAX_PREDEFINED_TLS_SLOT];
+ // When hosted, we may not be able to delete memory in DLL_THREAD_DETACH.
+ // We will chain this into a side list, and free these on Finalizer thread.
+ ClrTlsInfo *next;
+};
+
+#define DataToClrTlsInfo(a) (a)?(ClrTlsInfo*)((BYTE*)a - offsetof(ClrTlsInfo, data)):NULL
+
+#if !defined(FEATURE_CORECLR)
+#define HAS_FLS_SUPPORT 1
+#endif
+
+#ifdef HAS_FLS_SUPPORT
+
+static BOOL fHasFlsSupport = FALSE;
+
+typedef DWORD (*Func_FlsAlloc)(PFLS_CALLBACK_FUNCTION lpCallback);
+typedef BOOL (*Func_FlsFree)(DWORD dwFlsIndex);
+typedef BOOL (*Func_FlsSetValue)(DWORD dwFlsIndex,PVOID lpFlsData);
+typedef PVOID (*Func_FlsGetValue)(DWORD dwFlsIndex);
+
+static DWORD FlsIndex = FLS_OUT_OF_INDEXES;
+static Func_FlsAlloc pFlsAlloc;
+static Func_FlsSetValue pFlsSetValue;
+static Func_FlsFree pFlsFree;
+static Func_FlsGetValue pFlsGetValue;
+static Volatile<BOOL> fFlsSetupDone = FALSE;
+
+VOID WINAPI FlsCallback(
+ PVOID lpFlsData
+)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE (pFlsGetValue);
+ if (pFlsGetValue(FlsIndex) != lpFlsData)
+ {
+ // The current running fiber is being destroyed. We can not destroy the memory yet,
+ // because our DllMain function may still need the memory.
+ CExecutionEngine::ThreadDetaching((void **)lpFlsData);
+ }
+ else
+ {
+ // The thread is being wound down.
+ // In hosting scenarios the host will have already called ICLRTask::ExitTask, which
+ // ends up calling CExecutionEngine::SwitchOut, which will have reset the TLS at TlsIndex.
+ //
+ // Unfortunately different OSes have different ordering of destroying FLS data and sending
+ // the DLL_THREAD_DETACH notification (pre-Vista FlsCallback is called after DllMain, while
+ // in Vista and up, FlsCallback is called before DllMain). Additionally, starting with
+ // Vista SP1 and Win2k8, the OS will set the FLS slot to 0 after the call to FlsCallback,
+ // effectively removing our last reference to this data. Since in EEDllMain we need to be
+ // able to access the FLS data, we save lpFlsData in the TLS slot at TlsIndex, if needed.
+ if (CExecutionEngine::GetTlsData() == NULL)
+ {
+ CExecutionEngine::SetTlsData((void **)lpFlsData);
+ }
+ }
+}
+
+#endif // HAS_FLS_SUPPORT
+
+
+#ifdef FEATURE_IMPLICIT_TLS
+void** CExecutionEngine::GetTlsData()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return gCurrentThreadInfo.m_EETlsData;
+}
+
+BOOL CExecutionEngine::SetTlsData (void** ppTlsInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ gCurrentThreadInfo.m_EETlsData = ppTlsInfo;
+ return TRUE;
+}
+#else
+void** CExecutionEngine::GetTlsData()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (TlsIndex == TLS_OUT_OF_INDEXES)
+ return NULL;
+
+ void **ppTlsData = (void **)UnsafeTlsGetValue(TlsIndex);
+ return ppTlsData;
+}
+BOOL CExecutionEngine::SetTlsData (void** ppTlsInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (TlsIndex == TLS_OUT_OF_INDEXES)
+ return FALSE;
+
+ return UnsafeTlsSetValue(TlsIndex, ppTlsInfo);
+}
+
+#endif // FEATURE_IMPLICIT_TLS
+
+static VolatilePtr<ClrTlsInfo> g_pDetachedTlsInfo;
+
+BOOL CExecutionEngine::HasDetachedTlsInfo()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return g_pDetachedTlsInfo.Load() != NULL;
+}
+
+void CExecutionEngine::CleanupDetachedTlsInfo()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (g_pDetachedTlsInfo.Load() == NULL)
+ {
+ return;
+ }
+ ClrTlsInfo *head = FastInterlockExchangePointer(g_pDetachedTlsInfo.GetPointer(), NULL);
+
+ while (head)
+ {
+ ClrTlsInfo *node = head;
+ head = head->next;
+ DeleteTLS(node->data);
+ }
+}
+
+void CExecutionEngine::DetachTlsInfo(void **pTlsData)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (pTlsData == NULL)
+ {
+ return;
+ }
+
+ if (CExecutionEngine::GetTlsData() == pTlsData)
+ {
+ CExecutionEngine::SetTlsData(0);
+ }
+
+#ifdef HAS_FLS_SUPPORT
+ if (fHasFlsSupport && pFlsGetValue(FlsIndex) == pTlsData)
+ {
+ pFlsSetValue(FlsIndex, NULL);
+ }
+#endif
+
+ ClrTlsInfo *pTlsInfo = DataToClrTlsInfo(pTlsData);
+ // PREFIX_ASSUME needs TLS. If we use it here, we may do memory allocation.
+#if defined(_PREFAST_) || defined(_PREFIX_)
+ if (pTlsInfo == NULL) __UNREACHABLE();
+#else
+ _ASSERTE(pTlsInfo != NULL);
+#endif // _PREFAST_ || _PREFIX_
+
+ if (pTlsInfo->data[TlsIdx_StressLog])
+ {
+#ifdef STRESS_LOG
+ CantAllocHolder caHolder;
+ StressLog::ThreadDetach ((ThreadStressLog *)pTlsInfo->data[TlsIdx_StressLog]);
+ pTlsInfo->data[TlsIdx_StressLog] = NULL;
+#else
+ _ASSERTE (!"Shouldn't have stress log!");
+#endif
+ }
+
+ while (TRUE)
+ {
+ ClrTlsInfo *head = g_pDetachedTlsInfo.Load();
+ pTlsInfo->next = head;
+ if (FastInterlockCompareExchangePointer(g_pDetachedTlsInfo.GetPointer(), pTlsInfo, head) == head)
+ {
+ return;
+ }
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Returns the current logical thread's data block (ClrTlsInfo::data).
+//
+// Arguments:
+// slot - Index of the slot that is about to be requested
+// force - If the data block does not exist yet, create it as a side-effect
+//
+// Return Value:
+// NULL, if the data block did not exist yet for the current thread and force was FALSE.
+// A pointer to the data block, otherwise.
+//
+// Notes:
+// If the underlying OS does not support fiber mode, the data block is stored in TLS.
+// If the underlying OS does support fiber mode, it is primarily stored in FLS,
+// and cached in TLS so that we can use our generated optimized TLS accessors.
+//
+// TLS support for the other DLLs of the CLR operates quite differently in hosted
+// and unhosted scenarios.
+
+void **CExecutionEngine::CheckThreadState(DWORD slot, BOOL force)
+{
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_CANNOT_TAKE_LOCK;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ // !!! This function is called during Thread::SwitchIn and SwitchOut
+ // !!! It is extremely important that while executing this function, we will not
+ // !!! cause fiber switch. This means we can not allocate memory, lock, etc...
+
+ //<TODO> @TODO: Decide on an exception strategy for all the DLLs of the CLR, and then
+ // enable all the exceptions out of this method.</TODO>
+
+ // Treat as a runtime assertion, since the invariant spans many DLLs.
+ _ASSERTE(slot < MAX_PREDEFINED_TLS_SLOT);
+// if (slot >= MAX_PREDEFINED_TLS_SLOT)
+// COMPlusThrow(kArgumentOutOfRangeException);
+
+#ifdef HAS_FLS_SUPPORT
+ if (!fFlsSetupDone)
+ {
+ // Contract depends on Fls support. Don't use contract here.
+ HMODULE hmod = GetModuleHandleA(WINDOWS_KERNEL32_DLLNAME_A);
+ if (hmod)
+ {
+ pFlsSetValue = (Func_FlsSetValue) GetProcAddress(hmod, "FlsSetValue");
+ pFlsGetValue = (Func_FlsGetValue) GetProcAddress(hmod, "FlsGetValue");
+ pFlsAlloc = (Func_FlsAlloc) GetProcAddress(hmod, "FlsAlloc");
+ pFlsFree = (Func_FlsFree) GetProcAddress(hmod, "FlsFree");
+
+ if (pFlsSetValue && pFlsGetValue && pFlsAlloc && pFlsFree )
+ {
+ fHasFlsSupport = TRUE;
+ }
+ else
+ {
+ // Since we didn't find them all, we shouldn't have found any
+ _ASSERTE( pFlsSetValue == NULL && pFlsGetValue == NULL && pFlsAlloc == NULL && pFlsFree == NULL);
+ }
+ fFlsSetupDone = TRUE;
+ }
+ }
+
+ if (fHasFlsSupport && FlsIndex == FLS_OUT_OF_INDEXES)
+ {
+ // PREFIX_ASSUME needs TLS. If we use it here, we will loop forever
+#if defined(_PREFAST_) || defined(_PREFIX_)
+ if (pFlsAlloc == NULL) __UNREACHABLE();
+#else
+ _ASSERTE(pFlsAlloc != NULL);
+#endif // _PREFAST_ || _PREFIX_
+
+ DWORD tryFlsIndex = pFlsAlloc(FlsCallback);
+ if (tryFlsIndex != FLS_OUT_OF_INDEXES)
+ {
+ if (FastInterlockCompareExchange((LONG*)&FlsIndex, tryFlsIndex, FLS_OUT_OF_INDEXES) != FLS_OUT_OF_INDEXES)
+ {
+ pFlsFree(tryFlsIndex);
+ }
+ }
+ if (FlsIndex == FLS_OUT_OF_INDEXES)
+ {
+ COMPlusThrowOM();
+ }
+ }
+#endif // HAS_FLS_SUPPORT
+
+#ifndef FEATURE_IMPLICIT_TLS
+ // Ensure we have a TLS Index
+ if (TlsIndex == TLS_OUT_OF_INDEXES)
+ {
+ DWORD tryTlsIndex = UnsafeTlsAlloc();
+ if (tryTlsIndex != TLS_OUT_OF_INDEXES)
+ {
+ if (FastInterlockCompareExchange((LONG*)&TlsIndex, tryTlsIndex, TLS_OUT_OF_INDEXES) != (LONG)TLS_OUT_OF_INDEXES)
+ {
+ UnsafeTlsFree(tryTlsIndex);
+ }
+ }
+ if (TlsIndex == TLS_OUT_OF_INDEXES)
+ {
+ COMPlusThrowOM();
+ }
+ }
+#endif // FEATURE_IMPLICIT_TLS
+
+ void** pTlsData = CExecutionEngine::GetTlsData();
+ BOOL fInTls = (pTlsData != NULL);
+
+#ifdef HAS_FLS_SUPPORT
+ if (fHasFlsSupport)
+ {
+ if (pTlsData == NULL)
+ {
+ pTlsData = (void **)pFlsGetValue(FlsIndex);
+ }
+ }
+#endif
+
+ ClrTlsInfo *pTlsInfo = DataToClrTlsInfo(pTlsData);
+ if (pTlsInfo == 0 && force)
+ {
+#undef HeapAlloc
+#undef GetProcessHeap
+ // !!! Contract uses our TLS support. Contract may be used before our host support is set up.
+ // !!! To better support contract, we call into OS for memory allocation.
+ pTlsInfo = (ClrTlsInfo*) ::HeapAlloc(GetProcessHeap(),0,sizeof(ClrTlsInfo));
+#define GetProcessHeap() Dont_Use_GetProcessHeap()
+#define HeapAlloc(hHeap, dwFlags, dwBytes) Dont_Use_HeapAlloc(hHeap, dwFlags, dwBytes)
+ if (pTlsInfo == NULL)
+ {
+ goto LError;
+ }
+ memset (pTlsInfo, 0, sizeof(ClrTlsInfo));
+#ifdef HAS_FLS_SUPPORT
+ if (fHasFlsSupport && !pFlsSetValue(FlsIndex, pTlsInfo))
+ {
+ goto LError;
+ }
+#endif
+ // We save the last intolerant marker on stack in this slot.
+ // -1 is the larget unsigned number, and therefore our marker is always smaller than it.
+ pTlsInfo->data[TlsIdx_SOIntolerantTransitionHandler] = (void*)(-1);
+ }
+
+ if (!fInTls && pTlsInfo)
+ {
+#ifdef HAS_FLS_SUPPORT
+ // If we have a thread object or are on a non-fiber thread, we are safe for fiber switching.
+ if (!fHasFlsSupport ||
+ GetThread() ||
+ ((g_fEEStarted || g_fEEInit) && !CLRTaskHosted()) ||
+ (((size_t)pTlsInfo->data[TlsIdx_ThreadType]) & (ThreadType_GC | ThreadType_Gate | ThreadType_Timer | ThreadType_DbgHelper)))
+ {
+#ifdef _DEBUG
+ Thread *pThread = GetThread();
+ if (pThread)
+ {
+ pThread->AddFiberInfo(Thread::ThreadTrackInfo_Lifetime);
+ }
+#endif
+ if (!CExecutionEngine::SetTlsData(pTlsInfo->data) && !fHasFlsSupport)
+ {
+ goto LError;
+ }
+ }
+#else
+ if (!CExecutionEngine::SetTlsData(pTlsInfo->data))
+ {
+ goto LError;
+ }
+#endif
+ }
+
+ return pTlsInfo?pTlsInfo->data:NULL;
+
+LError:
+ if (pTlsInfo)
+ {
+#undef HeapFree
+#undef GetProcessHeap
+ ::HeapFree(GetProcessHeap(), 0, pTlsInfo);
+#define GetProcessHeap() Dont_Use_GetProcessHeap()
+#define HeapFree(hHeap, dwFlags, lpMem) Dont_Use_HeapFree(hHeap, dwFlags, lpMem)
+ }
+ // If this is for the stack probe, and we failed to allocate memory for it, we won't
+ // put in a guard page.
+ if (slot == TlsIdx_ClrDebugState || slot == TlsIdx_StackProbe)
+ return NULL;
+
+ ThrowOutOfMemory();
+}
+
+
+void **CExecutionEngine::CheckThreadStateNoCreate(DWORD slot
+#ifdef _DEBUG
+ , BOOL fForDestruction
+#endif // _DEBUG
+ )
+{
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ // !!! This function is called during Thread::SwitchIn and SwitchOut
+ // !!! It is extremely important that while executing this function, we will not
+ // !!! cause fiber switch. This means we can not allocate memory, lock, etc...
+
+
+ // Treat as a runtime assertion, since the invariant spans many DLLs.
+ _ASSERTE(slot < MAX_PREDEFINED_TLS_SLOT);
+
+ void **pTlsData = CExecutionEngine::GetTlsData();
+
+#ifdef HAS_FLS_SUPPORT
+ if (fHasFlsSupport)
+ {
+ if (pTlsData == NULL)
+ {
+ pTlsData = (void **)pFlsGetValue(FlsIndex);
+ }
+ }
+#endif
+
+ ClrTlsInfo *pTlsInfo = DataToClrTlsInfo(pTlsData);
+
+ return pTlsInfo?pTlsInfo->data:NULL;
+}
+
+// Note: Sampling profilers also use this function to initialize TLS for a unmanaged
+// sampling thread so that initialization can be done in advance to avoid deadlocks.
+// See ProfToEEInterfaceImpl::InitializeCurrentThread for more details.
+void CExecutionEngine::SetupTLSForThread(Thread *pThread)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_MODE_ANY;
+
+#ifdef _DEBUG
+ if (pThread)
+ pThread->AddFiberInfo(Thread::ThreadTrackInfo_Lifetime);
+#endif
+#ifdef STRESS_LOG
+ if (StressLog::StressLogOn(~0ul, 0))
+ {
+ StressLog::CreateThreadStressLog();
+ }
+#endif
+ void **pTlsData;
+ pTlsData = CheckThreadState(0);
+
+ PREFIX_ASSUME(pTlsData != NULL);
+
+#ifdef ENABLE_CONTRACTS
+ // Profilers need the side effect of GetClrDebugState() to perform initialization
+ // in advance to avoid deadlocks. Refer to ProfToEEInterfaceImpl::InitializeCurrentThread
+ ClrDebugState *pDebugState = ::GetClrDebugState();
+
+ if (pThread)
+ pThread->m_pClrDebugState = pDebugState;
+#endif
+}
+
+void CExecutionEngine::SwitchIn()
+{
+ // No real contracts here. This function is called by Thread::SwitchIn.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_ENTRY_POINT;
+
+ // @TODO - doesn't look like we can probe here....
+
+#ifdef HAS_FLS_SUPPORT
+ if (fHasFlsSupport)
+ {
+ void **pTlsData = (void **)pFlsGetValue(FlsIndex);
+
+ BOOL fResult = CExecutionEngine::SetTlsData(pTlsData);
+ if (fResult)
+ {
+#ifdef STRESS_LOG
+ // We are in task transition period. We can not call into host to create stress log.
+ if (ClrTlsGetValue(TlsIdx_StressLog) != NULL)
+ {
+ STRESS_LOG1(LF_SYNC, LL_INFO100, ThreadStressLog::TaskSwitchMsg(), ::GetCurrentThreadId());
+ }
+#endif
+ }
+ // It is OK for UnsafeTlsSetValue to fail here, since we can always go back to Fls to get value.
+ }
+#endif
+}
+
+void CExecutionEngine::SwitchOut()
+{
+ // No real contracts here. This function is called by Thread::SwitchOut
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_ENTRY_POINT;
+
+#ifdef HAS_FLS_SUPPORT
+ // @TODO - doesn't look like we can probe here.
+ if (fHasFlsSupport && pFlsGetValue != NULL && (void **)pFlsGetValue(FlsIndex) != NULL)
+ {
+ // Clear out TLS unless we're in the process of ThreadDetach
+ // We establish that we're in ThreadDetach because fHasFlsSupport will
+ // be TRUE, but the FLS will not exist.
+ CExecutionEngine::SetTlsData(NULL);
+ }
+#endif // HAS_FLS_SUPPORT
+}
+
+static void ThreadDetachingHelper(PTLS_CALLBACK_FUNCTION callback, void* pData)
+{
+ // Do not use contract. We are freeing TLS blocks.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+
+ callback(pData);
+ }
+
+// Called here from a thread detach or from destruction of a Thread object. In
+// the detach case, we get our info from TLS. In the destruct case, it comes from
+// the object we are destructing.
+void CExecutionEngine::ThreadDetaching(void ** pTlsData)
+{
+ // Can not cause memory allocation during thread detach, so no real contracts.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+
+ // This function may be called twice:
+ // 1. When a physical thread dies, our DLL_THREAD_DETACH calls this function with pTlsData = NULL
+ // 2. When a fiber is destroyed, or OS calls FlsCallback after DLL_THREAD_DETACH process.
+ // We will null the FLS and TLS entry if it matches the deleted one.
+
+ if (pTlsData)
+ {
+ DeleteTLS (pTlsData);
+ }
+}
+
+void CExecutionEngine::DeleteTLS(void ** pTlsData)
+{
+ // Can not cause memory allocation during thread detach, so no real contracts.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+
+ if (CExecutionEngine::GetTlsData() == NULL)
+ {
+ // We have not allocated TlsData yet.
+ return;
+ }
+
+ PREFIX_ASSUME(pTlsData != NULL);
+
+ ClrTlsInfo *pTlsInfo = DataToClrTlsInfo(pTlsData);
+ BOOL fNeed;
+ do
+ {
+ fNeed = FALSE;
+ for (int i=0; i<MAX_PREDEFINED_TLS_SLOT; i++)
+ {
+ if (i == TlsIdx_ClrDebugState ||
+ i == TlsIdx_StressLog)
+ {
+ // StressLog and DebugState may be needed during callback.
+ continue;
+ }
+ // If we have some data and a callback, issue it.
+ if (Callbacks[i] != 0 && pTlsInfo->data[i] != 0)
+ {
+ void* pData = pTlsInfo->data[i];
+ pTlsInfo->data[i] = 0;
+ ThreadDetachingHelper(Callbacks[i], pData);
+ fNeed = TRUE;
+ }
+ }
+ } while (fNeed);
+
+ if (pTlsInfo->data[TlsIdx_StressLog] != 0)
+ {
+#ifdef STRESS_LOG
+ StressLog::ThreadDetach((ThreadStressLog *)pTlsInfo->data[TlsIdx_StressLog]);
+#else
+ _ASSERTE (!"should not have StressLog");
+#endif
+ }
+
+ if (Callbacks[TlsIdx_ClrDebugState] != 0 && pTlsInfo->data[TlsIdx_ClrDebugState] != 0)
+ {
+ void* pData = pTlsInfo->data[TlsIdx_ClrDebugState];
+ pTlsInfo->data[TlsIdx_ClrDebugState] = 0;
+ ThreadDetachingHelper(Callbacks[TlsIdx_ClrDebugState], pData);
+ }
+
+#ifdef _DEBUG
+ Thread *pThread = GetThread();
+ if (pThread)
+ {
+ pThread->AddFiberInfo(Thread::ThreadTrackInfo_Lifetime);
+ }
+#endif
+
+ // NULL TLS and FLS entry so that we don't double free.
+ // We may get two callback here on thread death
+ // 1. From EEDllMain
+ // 2. From OS callback on FLS destruction
+ if (CExecutionEngine::GetTlsData() == pTlsData)
+ {
+ CExecutionEngine::SetTlsData(0);
+ }
+
+#ifdef HAS_FLS_SUPPORT
+ if (fHasFlsSupport && pFlsGetValue(FlsIndex) == pTlsData)
+ {
+ pFlsSetValue(FlsIndex, NULL);
+ }
+#endif
+
+#undef HeapFree
+#undef GetProcessHeap
+ ::HeapFree (GetProcessHeap(),0,pTlsInfo);
+#define HeapFree(hHeap, dwFlags, lpMem) Dont_Use_HeapFree(hHeap, dwFlags, lpMem)
+#define GetProcessHeap() Dont_Use_GetProcessHeap()
+
+}
+
+#ifdef ENABLE_CONTRACTS_IMPL
+// Fls callback to deallocate ClrDebugState when our FLS block goes away.
+void FreeClrDebugState(LPVOID pTlsData);
+#endif
+
+VOID STDMETHODCALLTYPE CExecutionEngine::TLS_AssociateCallback(DWORD slot, PTLS_CALLBACK_FUNCTION callback)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ CheckThreadState(slot);
+
+ // They can toggle between a callback and no callback. But anything else looks like
+ // confusion on their part.
+ //
+ // (TlsIdx_ClrDebugState associates its callback from utilcode.lib - which can be replicated. But
+ // all the callbacks are equally good.)
+ _ASSERTE(slot == TlsIdx_ClrDebugState || Callbacks[slot] == 0 || Callbacks[slot] == callback || callback == 0);
+ if (slot == TlsIdx_ClrDebugState)
+ {
+#ifdef ENABLE_CONTRACTS_IMPL
+ // ClrDebugState is shared among many dlls. Some dll, like perfcounter.dll, may be unloaded.
+ // We force the callback function to be in mscorwks.dll.
+ Callbacks[slot] = FreeClrDebugState;
+#else
+ _ASSERTE (!"should not get here");
+#endif
+ }
+ else
+ Callbacks[slot] = callback;
+}
+
+LPVOID* STDMETHODCALLTYPE CExecutionEngine::TLS_GetDataBlock()
+{
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_CANNOT_TAKE_LOCK;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ return CExecutionEngine::GetTlsData();
+}
+
+LPVOID STDMETHODCALLTYPE CExecutionEngine::TLS_GetValue(DWORD slot)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return EETlsGetValue(slot);
+}
+
+BOOL STDMETHODCALLTYPE CExecutionEngine::TLS_CheckValue(DWORD slot, LPVOID * pValue)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return EETlsCheckValue(slot, pValue);
+}
+
+VOID STDMETHODCALLTYPE CExecutionEngine::TLS_SetValue(DWORD slot, LPVOID pData)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ EETlsSetValue(slot,pData);
+}
+
+
+VOID STDMETHODCALLTYPE CExecutionEngine::TLS_ThreadDetaching()
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ CExecutionEngine::ThreadDetaching(NULL);
+}
+
+
+CRITSEC_COOKIE STDMETHODCALLTYPE CExecutionEngine::CreateLock(LPCSTR szTag, LPCSTR level, CrstFlags flags)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ CRITSEC_COOKIE cookie = NULL;
+ BEGIN_ENTRYPOINT_VOIDRET;
+ cookie = ::EECreateCriticalSection(*(CrstType*)&level, flags);
+ END_ENTRYPOINT_VOIDRET;
+ return cookie;
+}
+
+void STDMETHODCALLTYPE CExecutionEngine::DestroyLock(CRITSEC_COOKIE cookie)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ ::EEDeleteCriticalSection(cookie);
+}
+
+void STDMETHODCALLTYPE CExecutionEngine::AcquireLock(CRITSEC_COOKIE cookie)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+ ::EEEnterCriticalSection(cookie);
+ END_SO_INTOLERANT_CODE;
+}
+
+void STDMETHODCALLTYPE CExecutionEngine::ReleaseLock(CRITSEC_COOKIE cookie)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+ ::EELeaveCriticalSection(cookie);
+ END_SO_INTOLERANT_CODE;
+}
+
+// Locking routines supplied by the EE to the other DLLs of the CLR. In a _DEBUG
+// build of the EE, we poison the Crst as a poor man's attempt to do some argument
+// validation.
+#define POISON_BITS 3
+
+static inline EVENT_COOKIE CLREventToCookie(CLREvent * pEvent)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE((((uintptr_t) pEvent) & POISON_BITS) == 0);
+#ifdef _DEBUG
+ pEvent = (CLREvent *) (((uintptr_t) pEvent) | POISON_BITS);
+#endif
+ return (EVENT_COOKIE) pEvent;
+}
+
+static inline CLREvent *CookieToCLREvent(EVENT_COOKIE cookie)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE((((uintptr_t) cookie) & POISON_BITS) == POISON_BITS);
+#ifdef _DEBUG
+ if (cookie)
+ {
+ cookie = (EVENT_COOKIE) (((uintptr_t) cookie) & ~POISON_BITS);
+ }
+#endif
+ return (CLREvent *) cookie;
+}
+
+
+EVENT_COOKIE STDMETHODCALLTYPE CExecutionEngine::CreateAutoEvent(BOOL bInitialState)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ EVENT_COOKIE event = NULL;
+ BEGIN_ENTRYPOINT_THROWS;
+ NewHolder<CLREvent> pEvent(new CLREvent());
+ pEvent->CreateAutoEvent(bInitialState);
+ event = CLREventToCookie(pEvent);
+ pEvent.SuppressRelease();
+ END_ENTRYPOINT_THROWS;
+
+ return event;
+}
+
+EVENT_COOKIE STDMETHODCALLTYPE CExecutionEngine::CreateManualEvent(BOOL bInitialState)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ EVENT_COOKIE event = NULL;
+ BEGIN_ENTRYPOINT_THROWS;
+
+ NewHolder<CLREvent> pEvent(new CLREvent());
+ pEvent->CreateManualEvent(bInitialState);
+ event = CLREventToCookie(pEvent);
+ pEvent.SuppressRelease();
+
+ END_ENTRYPOINT_THROWS;
+
+ return event;
+}
+
+void STDMETHODCALLTYPE CExecutionEngine::CloseEvent(EVENT_COOKIE event)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ if (event) {
+ CLREvent *pEvent = CookieToCLREvent(event);
+ pEvent->CloseEvent();
+ delete pEvent;
+ }
+}
+
+BOOL STDMETHODCALLTYPE CExecutionEngine::ClrSetEvent(EVENT_COOKIE event)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ if (event) {
+ CLREvent *pEvent = CookieToCLREvent(event);
+ return pEvent->Set();
+ }
+ return FALSE;
+}
+
+BOOL STDMETHODCALLTYPE CExecutionEngine::ClrResetEvent(EVENT_COOKIE event)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ if (event) {
+ CLREvent *pEvent = CookieToCLREvent(event);
+ return pEvent->Reset();
+ }
+ return FALSE;
+}
+
+DWORD STDMETHODCALLTYPE CExecutionEngine::WaitForEvent(EVENT_COOKIE event,
+ DWORD dwMilliseconds,
+ BOOL bAlertable)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ if (event) {
+ CLREvent *pEvent = CookieToCLREvent(event);
+ return pEvent->Wait(dwMilliseconds,bAlertable);
+ }
+
+ if (GetThread() && bAlertable)
+ ThrowHR(E_INVALIDARG);
+ return WAIT_FAILED;
+}
+
+DWORD STDMETHODCALLTYPE CExecutionEngine::WaitForSingleObject(HANDLE handle,
+ DWORD dwMilliseconds)
+{
+ STATIC_CONTRACT_WRAPPER;
+ STATIC_CONTRACT_SO_TOLERANT;
+ LeaveRuntimeHolder holder((size_t)(::WaitForSingleObject));
+ return ::WaitForSingleObject(handle,dwMilliseconds);
+}
+
+static inline SEMAPHORE_COOKIE CLRSemaphoreToCookie(CLRSemaphore * pSemaphore)
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ _ASSERTE((((uintptr_t) pSemaphore) & POISON_BITS) == 0);
+#ifdef _DEBUG
+ pSemaphore = (CLRSemaphore *) (((uintptr_t) pSemaphore) | POISON_BITS);
+#endif
+ return (SEMAPHORE_COOKIE) pSemaphore;
+}
+
+static inline CLRSemaphore *CookieToCLRSemaphore(SEMAPHORE_COOKIE cookie)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE((((uintptr_t) cookie) & POISON_BITS) == POISON_BITS);
+#ifdef _DEBUG
+ if (cookie)
+ {
+ cookie = (SEMAPHORE_COOKIE) (((uintptr_t) cookie) & ~POISON_BITS);
+ }
+#endif
+ return (CLRSemaphore *) cookie;
+}
+
+
+SEMAPHORE_COOKIE STDMETHODCALLTYPE CExecutionEngine::ClrCreateSemaphore(DWORD dwInitial,
+ DWORD dwMax)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ NewHolder<CLRSemaphore> pSemaphore(new CLRSemaphore());
+ pSemaphore->Create(dwInitial, dwMax);
+ SEMAPHORE_COOKIE ret = CLRSemaphoreToCookie(pSemaphore);;
+ pSemaphore.SuppressRelease();
+ return ret;
+}
+
+void STDMETHODCALLTYPE CExecutionEngine::ClrCloseSemaphore(SEMAPHORE_COOKIE semaphore)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ CLRSemaphore *pSemaphore = CookieToCLRSemaphore(semaphore);
+ pSemaphore->Close();
+ delete pSemaphore;
+}
+
+BOOL STDMETHODCALLTYPE CExecutionEngine::ClrReleaseSemaphore(SEMAPHORE_COOKIE semaphore,
+ LONG lReleaseCount,
+ LONG *lpPreviousCount)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ CLRSemaphore *pSemaphore = CookieToCLRSemaphore(semaphore);
+ return pSemaphore->Release(lReleaseCount,lpPreviousCount);
+}
+
+DWORD STDMETHODCALLTYPE CExecutionEngine::ClrWaitForSemaphore(SEMAPHORE_COOKIE semaphore,
+ DWORD dwMilliseconds,
+ BOOL bAlertable)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ CLRSemaphore *pSemaphore = CookieToCLRSemaphore(semaphore);
+ return pSemaphore->Wait(dwMilliseconds,bAlertable);
+}
+
+static inline MUTEX_COOKIE CLRMutexToCookie(CLRMutex * pMutex)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE((((uintptr_t) pMutex) & POISON_BITS) == 0);
+#ifdef _DEBUG
+ pMutex = (CLRMutex *) (((uintptr_t) pMutex) | POISON_BITS);
+#endif
+ return (MUTEX_COOKIE) pMutex;
+}
+
+static inline CLRMutex *CookieToCLRMutex(MUTEX_COOKIE cookie)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE((((uintptr_t) cookie) & POISON_BITS) == POISON_BITS);
+#ifdef _DEBUG
+ if (cookie)
+ {
+ cookie = (MUTEX_COOKIE) (((uintptr_t) cookie) & ~POISON_BITS);
+ }
+#endif
+ return (CLRMutex *) cookie;
+}
+
+
+MUTEX_COOKIE STDMETHODCALLTYPE CExecutionEngine::ClrCreateMutex(LPSECURITY_ATTRIBUTES lpMutexAttributes,
+ BOOL bInitialOwner,
+ LPCTSTR lpName)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ SO_TOLERANT; // we catch any erros and free the allocated memory
+ }
+ CONTRACTL_END;
+
+
+ MUTEX_COOKIE mutex = 0;
+ CLRMutex *pMutex = new (nothrow) CLRMutex();
+ if (pMutex)
+ {
+ EX_TRY
+ {
+ pMutex->Create(lpMutexAttributes, bInitialOwner, lpName);
+ mutex = CLRMutexToCookie(pMutex);
+ }
+ EX_CATCH
+ {
+ delete pMutex;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+ return mutex;
+}
+
+void STDMETHODCALLTYPE CExecutionEngine::ClrCloseMutex(MUTEX_COOKIE mutex)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ CLRMutex *pMutex = CookieToCLRMutex(mutex);
+ pMutex->Close();
+ delete pMutex;
+}
+
+BOOL STDMETHODCALLTYPE CExecutionEngine::ClrReleaseMutex(MUTEX_COOKIE mutex)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ CLRMutex *pMutex = CookieToCLRMutex(mutex);
+ return pMutex->Release();
+}
+
+DWORD STDMETHODCALLTYPE CExecutionEngine::ClrWaitForMutex(MUTEX_COOKIE mutex,
+ DWORD dwMilliseconds,
+ BOOL bAlertable)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_INTOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ CLRMutex *pMutex = CookieToCLRMutex(mutex);
+ return pMutex->Wait(dwMilliseconds,bAlertable);
+}
+
+#undef ClrSleepEx
+DWORD STDMETHODCALLTYPE CExecutionEngine::ClrSleepEx(DWORD dwMilliseconds, BOOL bAlertable)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ return EESleepEx(dwMilliseconds,bAlertable);
+}
+#define ClrSleepEx EESleepEx
+
+#undef ClrAllocationDisallowed
+BOOL STDMETHODCALLTYPE CExecutionEngine::ClrAllocationDisallowed()
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return EEAllocationDisallowed();
+}
+#define ClrAllocationDisallowed EEAllocationDisallowed
+
+#undef ClrVirtualAlloc
+LPVOID STDMETHODCALLTYPE CExecutionEngine::ClrVirtualAlloc(LPVOID lpAddress,
+ SIZE_T dwSize,
+ DWORD flAllocationType,
+ DWORD flProtect)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return EEVirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect);
+}
+#define ClrVirtualAlloc EEVirtualAlloc
+
+#undef ClrVirtualFree
+BOOL STDMETHODCALLTYPE CExecutionEngine::ClrVirtualFree(LPVOID lpAddress,
+ SIZE_T dwSize,
+ DWORD dwFreeType)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return EEVirtualFree(lpAddress, dwSize, dwFreeType);
+}
+#define ClrVirtualFree EEVirtualFree
+
+#undef ClrVirtualQuery
+SIZE_T STDMETHODCALLTYPE CExecutionEngine::ClrVirtualQuery(LPCVOID lpAddress,
+ PMEMORY_BASIC_INFORMATION lpBuffer,
+ SIZE_T dwLength)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return EEVirtualQuery(lpAddress, lpBuffer, dwLength);
+}
+#define ClrVirtualQuery EEVirtualQuery
+
+#if defined(_DEBUG) && defined(FEATURE_CORECLR) && !defined(FEATURE_PAL)
+static VolatilePtr<BYTE> s_pStartOfUEFSection = NULL;
+static VolatilePtr<BYTE> s_pEndOfUEFSectionBoundary = NULL;
+static Volatile<DWORD> s_dwProtection = 0;
+#endif // _DEBUG && FEATURE_CORECLR && !FEATURE_PAL
+
+#undef ClrVirtualProtect
+
+BOOL STDMETHODCALLTYPE CExecutionEngine::ClrVirtualProtect(LPVOID lpAddress,
+ SIZE_T dwSize,
+ DWORD flNewProtect,
+ PDWORD lpflOldProtect)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ // Get the UEF installation details - we will use these to validate
+ // that the calls to ClrVirtualProtect are not going to affect the UEF.
+ //
+ // The OS UEF invocation mechanism was updated. When a UEF is setup,the OS captures
+ // the following details about it:
+ // 1) Protection of the pages in which the UEF lives
+ // 2) The size of the region in which the UEF lives
+ // 3) The region's Allocation Base
+ //
+ // The OS verifies details surrounding the UEF before invocation. For security reasons
+ // the page protection cannot change between SetUnhandledExceptionFilter and invocation.
+ //
+ // Prior to this change, the UEF lived in a common section of code_Seg, along with
+ // JIT_PatchedCode. Thus, their pages have the same protection, they live
+ // in the same region (and thus, its size is the same).
+ //
+ // In EEStartupHelper, when we setup the UEF and then invoke InitJitHelpers1 and InitJitHelpers2,
+ // they perform some optimizations that result in the memory page protection being changed. When
+ // the UEF is to be invoked, the OS does the check on the UEF's cached details against the current
+ // memory pages. This check used to fail when on 64bit retail builds when JIT_PatchedCode was
+ // aligned after the UEF with a different memory page protection (post the optimizations by InitJitHelpers).
+ // Thus, the UEF was never invoked.
+ //
+ // To circumvent this, we put the UEF in its own section in the code segment so that any modifications
+ // to memory pages will not affect the UEF details that the OS cached. This is done in Excep.cpp
+ // using the "#pragma code_seg" directives.
+ //
+ // Below, we double check that:
+ //
+ // 1) the address being protected does not lie in the region of of the UEF.
+ // 2) the section after UEF is not having the same memory protection as UEF section.
+ //
+ // We assert if either of the two conditions above are true.
+
+#if defined(_DEBUG) && defined(FEATURE_CORECLR) && !defined(FEATURE_PAL)
+ // We do this check in debug/checked builds only
+
+ // Do we have the UEF details?
+ if (s_pEndOfUEFSectionBoundary.Load() == NULL)
+ {
+ // Get reference to MSCORWKS image in memory...
+ PEDecoder pe(g_pMSCorEE);
+
+ // Find the UEF section from the image
+ IMAGE_SECTION_HEADER* pUEFSection = pe.FindSection(CLR_UEF_SECTION_NAME);
+ _ASSERTE(pUEFSection != NULL);
+ if (pUEFSection)
+ {
+ // We got our section - get the start of the section
+ BYTE* pStartOfUEFSection = static_cast<BYTE*>(pe.GetBase())+pUEFSection->VirtualAddress;
+ s_pStartOfUEFSection = pStartOfUEFSection;
+
+ // Now we need the protection attributes for the memory region in which the
+ // UEF section is...
+ MEMORY_BASIC_INFORMATION uefInfo;
+ if (ClrVirtualQuery(pStartOfUEFSection, &uefInfo, sizeof(uefInfo)) != 0)
+ {
+ // Calculate how many pages does the UEF section take to get to the start of the
+ // next section. We dont calculate this as
+ //
+ // pStartOfUEFSection + uefInfo.RegionSize
+ //
+ // because the section following UEF will also be included in the region size
+ // if it has the same protection as the UEF section.
+ DWORD dwUEFSectionPageCount = ((pUEFSection->Misc.VirtualSize + OS_PAGE_SIZE - 1)/OS_PAGE_SIZE);
+
+ BYTE* pAddressOfFollowingSection = pStartOfUEFSection + (OS_PAGE_SIZE * dwUEFSectionPageCount);
+
+ // Ensure that the section following us is having different memory protection
+ MEMORY_BASIC_INFORMATION nextSectionInfo;
+ _ASSERTE(ClrVirtualQuery(pAddressOfFollowingSection, &nextSectionInfo, sizeof(nextSectionInfo)) != 0);
+ _ASSERTE(nextSectionInfo.Protect != uefInfo.Protect);
+
+ // save the memory protection details
+ s_dwProtection = uefInfo.Protect;
+
+ // Get the end of the UEF section
+ BYTE* pEndOfUEFSectionBoundary = pAddressOfFollowingSection - 1;
+
+ // Set the end of UEF section boundary
+ FastInterlockExchangePointer(s_pEndOfUEFSectionBoundary.GetPointer(), pEndOfUEFSectionBoundary);
+ }
+ else
+ {
+ _ASSERTE(!"Unable to get UEF Details!");
+ }
+ }
+ }
+
+ if (s_pEndOfUEFSectionBoundary.Load() != NULL)
+ {
+ // Is the protection being changed?
+ if (flNewProtect != s_dwProtection)
+ {
+ // Is the target address NOT affecting the UEF ? Possible cases:
+ // 1) Starts and ends before the UEF start
+ // 2) Starts after the UEF start
+
+ void* pEndOfRangeAddr = static_cast<BYTE*>(lpAddress)+dwSize-1;
+
+ _ASSERTE_MSG(((pEndOfRangeAddr < s_pStartOfUEFSection.Load()) || (lpAddress > s_pEndOfUEFSectionBoundary.Load())),
+ "Do not virtual protect the section in which UEF lives!");
+ }
+ }
+#endif // _DEBUG && FEATURE_CORECLR && !FEATURE_PAL
+
+ return EEVirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect);
+}
+#define ClrVirtualProtect EEVirtualProtect
+
+#undef ClrGetProcessHeap
+HANDLE STDMETHODCALLTYPE CExecutionEngine::ClrGetProcessHeap()
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return EEGetProcessHeap();
+}
+#define ClrGetProcessHeap EEGetProcessHeap
+
+#undef ClrGetProcessExecutableHeap
+HANDLE STDMETHODCALLTYPE CExecutionEngine::ClrGetProcessExecutableHeap()
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return EEGetProcessExecutableHeap();
+}
+#define ClrGetProcessExecutableHeap EEGetProcessExecutableHeap
+
+
+#undef ClrHeapCreate
+HANDLE STDMETHODCALLTYPE CExecutionEngine::ClrHeapCreate(DWORD flOptions,
+ SIZE_T dwInitialSize,
+ SIZE_T dwMaximumSize)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return EEHeapCreate(flOptions, dwInitialSize, dwMaximumSize);
+}
+#define ClrHeapCreate EEHeapCreate
+
+#undef ClrHeapDestroy
+BOOL STDMETHODCALLTYPE CExecutionEngine::ClrHeapDestroy(HANDLE hHeap)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return EEHeapDestroy(hHeap);
+}
+#define ClrHeapDestroy EEHeapDestroy
+
+#undef ClrHeapAlloc
+LPVOID STDMETHODCALLTYPE CExecutionEngine::ClrHeapAlloc(HANDLE hHeap,
+ DWORD dwFlags,
+ SIZE_T dwBytes)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ // We need to guarentee a very small stack consumption in allocating. And we can't allow
+ // an SO to happen while calling into the host. This will force a hard SO which is OK because
+ // we shouldn't ever get this close inside the EE in SO-intolerant code, so this should
+ // only fail if we call directly in from outside the EE, such as the JIT.
+ MINIMAL_STACK_PROBE_CHECK_THREAD(GetThread());
+
+ return EEHeapAlloc(hHeap, dwFlags, dwBytes);
+}
+#define ClrHeapAlloc EEHeapAlloc
+
+#undef ClrHeapFree
+BOOL STDMETHODCALLTYPE CExecutionEngine::ClrHeapFree(HANDLE hHeap,
+ DWORD dwFlags,
+ LPVOID lpMem)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return EEHeapFree(hHeap, dwFlags, lpMem);
+}
+#define ClrHeapFree EEHeapFree
+
+#undef ClrHeapValidate
+BOOL STDMETHODCALLTYPE CExecutionEngine::ClrHeapValidate(HANDLE hHeap,
+ DWORD dwFlags,
+ LPCVOID lpMem)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return EEHeapValidate(hHeap, dwFlags, lpMem);
+}
+#define ClrHeapValidate EEHeapValidate
+
+//------------------------------------------------------------------------------
+// Helper function to get an exception object from outside the exception. In
+// the CLR, it may be from the Thread object. Non-CLR users have no thread object,
+// and it will do nothing.
+
+void CExecutionEngine::GetLastThrownObjectExceptionFromThread(void **ppvException)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ // Cast to our real type.
+ Exception **ppException = reinterpret_cast<Exception**>(ppvException);
+
+ // Try to get a better message.
+ GetLastThrownObjectExceptionFromThread_Internal(ppException);
+
+} // HRESULT CExecutionEngine::GetLastThrownObjectExceptionFromThread()
+
+
+#ifdef FEATURE_VERSIONING
+LocaleID RuntimeGetFileSystemLocale()
+{
+ return PEImage::GetFileSystemLocale();
+};
+#endif
+
+#ifdef FEATURE_CORECLR
+HRESULT CorHost2::DllGetActivationFactory(DWORD appDomainID, LPCWSTR wszTypeName, IActivationFactory ** factory)
+{
+#ifdef FEATURE_COMINTEROP_WINRT_MANAGED_ACTIVATION
+ // WinRT activation currently supported in default domain only
+ if (appDomainID != DefaultADID)
+ return HOST_E_INVALIDOPERATION;
+
+ HRESULT hr = S_OK;
+
+ Thread *pThread = GetThread();
+ if (pThread == NULL)
+ {
+ pThread = SetupThreadNoThrow(&hr);
+ if (pThread == NULL)
+ {
+ return hr;
+ }
+ }
+
+ if(SystemDomain::GetCurrentDomain()->GetId().m_dwId != DefaultADID)
+ {
+ return HOST_E_INVALIDOPERATION;
+ }
+
+ return DllGetActivationFactoryImpl(NULL, wszTypeName, NULL, factory);
+#else
+ return E_NOTIMPL;
+#endif
+}
+#endif
+
+
+#ifdef FEATURE_COMINTEROP_WINRT_MANAGED_ACTIVATION
+
+HRESULT STDMETHODCALLTYPE DllGetActivationFactoryImpl(LPCWSTR wszAssemblyName,
+ LPCWSTR wszTypeName,
+ LPCWSTR wszCodeBase,
+ IActivationFactory ** factory)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_ANY;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ AppDomain* pDomain = SystemDomain::System()->DefaultDomain();
+ _ASSERTE(pDomain);
+#ifndef FEATURE_CORECLR // coreclr uses winrt binder which does not allow redirects
+ {
+ BaseDomain::LockHolder lh(pDomain);
+#ifdef FEATURE_HOSTED_BINDER
+ if (!SystemDomain::System()->DefaultDomain()->HasLoadContextHostBinder())
+#endif
+ {
+ // don't allow redirects
+ SystemDomain::InitializeDefaultDomain(FALSE);
+ }
+ }
+#endif
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr);
+ {
+ GCX_COOP();
+
+ bool bIsPrimitive;
+ TypeHandle typeHandle = WinRTTypeNameConverter::GetManagedTypeFromWinRTTypeName(wszTypeName, &bIsPrimitive);
+ if (!bIsPrimitive && !typeHandle.IsNull() && !typeHandle.IsTypeDesc() && typeHandle.AsMethodTable()->IsExportedToWinRT())
+ {
+ struct _gc {
+ OBJECTREF type;
+ } gc;
+ memset(&gc, 0, sizeof(gc));
+
+#if defined(FEATURE_MULTICOREJIT) && defined(FEATURE_APPX_BINDER)
+ // For Appx, multicore JIT is only needed when root assembly does not have NI image
+ // When it has NI image, we can't generate profile, and do not need to playback profile
+ if (AppX::IsAppXProcess() && ! typeHandle.IsZapped())
+ {
+ GCX_PREEMP();
+
+ pDomain->GetMulticoreJitManager().AutoStartProfileAppx(pDomain);
+ }
+#endif
+
+ IActivationFactory* activationFactory;
+ GCPROTECT_BEGIN(gc);
+
+ gc.type = typeHandle.GetManagedClassObject();
+
+ MethodDescCallSite mdcs(METHOD__WINDOWSRUNTIMEMARSHAL__GET_ACTIVATION_FACTORY_FOR_TYPE);
+ ARG_SLOT args[1] = {
+ ObjToArgSlot(gc.type)
+ };
+ activationFactory = (IActivationFactory*)mdcs.Call_RetLPVOID(args);
+
+ *factory = activationFactory;
+
+ GCPROTECT_END();
+ }
+ else
+ {
+ hr = COR_E_TYPELOAD;
+ }
+ }
+ END_EXTERNAL_ENTRYPOINT;
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+#endif // !FEATURE_COMINTEROP_MANAGED_ACTIVATION
+
+
+#ifdef FEATURE_COMINTEROP_WINRT_DESKTOP_HOST
+
+HRESULT STDMETHODCALLTYPE GetClassActivatorForApplicationImpl(HSTRING appPath, IWinRTClassActivator** ppActivator)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_ANY;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr);
+ {
+ if (GetAppDomain()->GetWinRtBinder()->SetLocalWinMDPath(appPath))
+ {
+ GCX_COOP();
+
+ struct
+ {
+ STRINGREF appbase;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ GCPROTECT_BEGIN(gc);
+
+ UINT32 appPathLength = 0;
+ PCWSTR wszAppPath = WindowsGetStringRawBuffer(appPath, &appPathLength);
+
+ gc.appbase = StringObject::NewString(wszAppPath, appPathLength);
+
+ MethodDescCallSite getClassActivator(METHOD__WINDOWSRUNTIMEMARSHAL__GET_CLASS_ACTIVATOR_FOR_APPLICATION);
+
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(gc.appbase)
+ };
+
+ IWinRTClassActivator* pActivator = reinterpret_cast<IWinRTClassActivator *>(getClassActivator.Call_RetLPVOID(args));
+ *ppActivator = pActivator;
+
+ GCPROTECT_END();
+ }
+ else
+ {
+ hr = CO_E_BAD_PATH;
+ }
+ }
+ END_EXTERNAL_ENTRYPOINT;
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+#endif // FEATURE_COMINTEROP_WINRT_DESKTOP_HOST
+
+
+#endif // !DACCESS_COMPILE
diff --git a/src/vm/coverage.cpp b/src/vm/coverage.cpp
new file mode 100644
index 0000000000..fb0384a7d8
--- /dev/null
+++ b/src/vm/coverage.cpp
@@ -0,0 +1,56 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "common.h"
+
+#include "coverage.h"
+
+
+//
+// This is part of the runtime test teams Code Coverge Tools. Due to the special nature of MSCORLIB.dll
+// We have to work around several issues (Like the initilization of the Secutiry Manager) to be able to get
+// Code coverage on mscorlib.dll
+//
+
+FCIMPL1(unsigned __int64, COMCoverage::nativeCoverBlock, INT32 id)
+{
+ FCALL_CONTRACT;
+
+ unsigned __int64 retVal = 0;
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ HMODULE ilcovnat = 0;
+ if (id == 1)
+ {
+ ilcovnat = CLRLoadLibrary(W("Ilcovnat.dll"));
+
+ if (ilcovnat)
+ {
+ retVal = (unsigned __int64)GetProcAddress(ilcovnat, "CoverBlockNative");
+ }
+ }
+ else if (id == 2)
+ {
+ ilcovnat = CLRLoadLibrary(W("coverage.dll"));
+
+ if (ilcovnat)
+ {
+ retVal = (unsigned __int64)GetProcAddress(ilcovnat, "CoverageRegisterBinaryWithStruct");
+ }
+ }
+ else if (id == 3)
+ {
+ ilcovnat = CLRLoadLibrary(W("Ilcovnat.dll"));
+ if (ilcovnat)
+ {
+ retVal = (unsigned __int64)GetProcAddress(ilcovnat, "CoverMonRegisterMscorlib");
+ }
+ }
+
+ HELPER_METHOD_FRAME_END();
+ return retVal;
+}
+FCIMPLEND
diff --git a/src/vm/coverage.h b/src/vm/coverage.h
new file mode 100644
index 0000000000..d45847f5b2
--- /dev/null
+++ b/src/vm/coverage.h
@@ -0,0 +1,20 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#ifndef _COVERAGE_H_
+#define _COVERAGE_H_
+
+// Please see coverage.cpp for info on this file
+class COMCoverage
+{
+public:
+ //typedef struct
+ //{
+ // DECLARE_ECALL_I4_ARG(INT32, id);
+ //} _CoverageArgs;
+ static FCDECL1(unsigned __int64, nativeCoverBlock, INT32 id);
+};
+#endif // _COVERAGE_H_
diff --git a/src/vm/crossdomaincalls.cpp b/src/vm/crossdomaincalls.cpp
new file mode 100644
index 0000000000..fa04b57faa
--- /dev/null
+++ b/src/vm/crossdomaincalls.cpp
@@ -0,0 +1,2590 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: CrossDomainCalls.cpp
+//
+
+//
+// The CrossDomainCall class provides a fast path of execution for qualifying
+// cross domain calls. Asynch calls, one way calls, calls on context bound objects
+// etc dont qualify.
+//
+
+
+#include "common.h"
+
+#ifdef FEATURE_REMOTING
+
+#include "crossdomaincalls.h"
+#include "callhelpers.h"
+#include "remoting.h"
+#include "objectclone.h"
+#include "dbginterface.h"
+#include "stackprobe.h"
+#include "virtualcallstub.h"
+#include "typeparse.h"
+#include "typestring.h"
+#include "appdomain.inl"
+#include "callingconvention.h"
+
+// See explanation of flags in crossdomaincalls.h
+RemotableMethodInfo::XADOptimizationType
+RemotableMethodInfo::IsCrossAppDomainOptimizable(MethodDesc *pMeth, DWORD *pNumStackSlotsToCopy)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // This method table might be representative, but that's OK for the kinds of analysis we're about to do.
+ MethodTable *pMT = pMeth->GetMethodTable()->GetCanonicalMethodTable();
+
+ _ASSERTE(pMT->HasRemotableMethodInfo());
+ _ASSERTE(pMT->GetRemotableMethodInfo());
+
+ if (pMT->IsContextful())
+ return XAD_NOT_OPTIMIZABLE;
+
+ DWORD flags;
+
+ // If this method is generic then we can't used cached analysis data stored on the method table and keyed by slot -- the same
+ // slot is shared by methods with very different characteristics (such as whether the return type is a GC ref etc.).
+ if (pMeth->GetNumGenericMethodArgs() > 0)
+ {
+ flags = DoStaticAnalysis(pMeth);
+ }
+ else
+ {
+ _ASSERTE(pMeth->GetSlot() < pMeth->GetMethodTable()->GetNumVtableSlots());
+ RemotableMethodInfo *pRMI = &(pMT->GetRemotableMethodInfo()->GetRemotableMethodInfo()[pMeth->GetSlot()]);
+ flags = pRMI->m_OptFlags;
+
+ if (!(flags & XAD_FLAGS_INITIALIZED))
+ {
+ flags = DoStaticAnalysis(pMeth);
+ pRMI->m_OptFlags = flags;
+ }
+ }
+
+ *pNumStackSlotsToCopy = flags & XAD_ARG_COUNT_MASK;
+
+ return (XADOptimizationType) (flags & XAD_FLAG_MASK);
+}
+
+// This method is not synchronized because the operation is idempotent
+DWORD
+RemotableMethodInfo::DoStaticAnalysis(MethodDesc *pMeth)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ PRECONDITION(CheckPointer(pMeth));
+ }
+ CONTRACTL_END
+
+ BOOL bCallArgsBlittable = TRUE;
+ BOOL bRetArgBlittable = TRUE;
+ BOOL bOptimizable = TRUE;
+ BOOL bMethodIsVirtual = FALSE;
+ BOOL bArgsHaveAFloat = FALSE;
+
+ DWORD numStackBytes = 0;
+ DWORD numStackSlots = 0;
+ DWORD returnTypeFlags = 0;
+
+ if (pMeth->ContainsGenericVariables())
+ {
+ bOptimizable = FALSE;
+ }
+ else
+ {
+ MetaSig mSig(pMeth);
+ ArgIterator argit(&mSig);
+
+ SigPointer spRet;
+ CorElementType retElem;
+
+ IMDInternalImport *pMDImport = pMeth->GetModule()->GetMDImport();
+ MDEnumHolder ePm(pMDImport); // For enumerating params.
+ mdParamDef tkPm; // A param token.
+ DWORD dwFlags; // Param flags.
+ USHORT usSeq; // Sequence of a parameter.
+
+ if (pMeth->IsOneWay())
+ {
+ bOptimizable = FALSE;
+ goto SetFlags;
+ }
+
+ if (pMeth->IsVirtual())
+ {
+ bMethodIsVirtual = TRUE;
+ }
+
+ numStackBytes = argit.SizeOfFrameArgumentArray();
+
+ _ASSERTE(numStackBytes % sizeof(SIZE_T) == 0);
+ numStackSlots = numStackBytes / sizeof(SIZE_T);
+
+ if (numStackSlots > XAD_ARG_COUNT_MASK)
+ {
+ bOptimizable = FALSE;
+ goto SetFlags;
+ }
+
+ // Check if there are any [Out] args. If there are, skip the fast path
+ IfFailThrow(pMDImport->EnumInit(mdtParamDef, pMeth->GetMemberDef(), &ePm));
+
+ // Enumerate through the params and check the flags.
+ while (pMDImport->EnumNext(&ePm, &tkPm))
+ {
+ LPCSTR szParamName_Ignore;
+ IfFailThrow(pMDImport->GetParamDefProps(tkPm, &usSeq, &dwFlags, &szParamName_Ignore));
+ if (usSeq == 0) // Skip return type flags.
+ continue;
+ // If the param has Out attribute, do not use fast path for this method
+ if (IsPdOut(dwFlags))
+ {
+ bOptimizable = FALSE;
+ goto SetFlags;
+ }
+ }
+
+ // We're getting SigPointer first because this way we can differentiate E_T_STRING and E_T_CLASS
+ spRet = mSig.GetReturnProps();
+ IfFailThrow(spRet.GetElemType(&retElem));
+ if (retElem > ELEMENT_TYPE_PTR &&
+ retElem != ELEMENT_TYPE_I &&
+ retElem != ELEMENT_TYPE_U &&
+ retElem != ELEMENT_TYPE_FNPTR)
+ {
+ bRetArgBlittable = FALSE;
+ }
+
+ // Now we can normalize the return type so as to get rid of any generic type variables and the like.
+ retElem = mSig.GetReturnType();
+
+ if (retElem == ELEMENT_TYPE_VALUETYPE)
+ {
+ // Make a note that we have a struct in the signature. This way we wont blit the contents
+ // and end up in a situation where we have data, but the type isnt loaded yet
+ bCallArgsBlittable = FALSE;
+
+ // Do some further inspection
+ TypeHandle retTypeHandle = mSig.GetRetTypeHandleThrowing();
+
+ // Currently we don't handle the special unbox handling for ret values of Nullable<T> in MarshalAndCall
+ if (Nullable::IsNullableType(retTypeHandle))
+ {
+ bOptimizable = FALSE;
+ }
+
+ retElem = retTypeHandle.GetInternalCorElementType();
+ if ((retElem <= ELEMENT_TYPE_PTR || retElem == ELEMENT_TYPE_I || retElem == ELEMENT_TYPE_U) &&
+ !retTypeHandle.AsMethodTable()->CannotBeBlittedByObjectCloner())
+ bRetArgBlittable = TRUE;
+ }
+
+ // Check if the return type is a GC ref
+ if (gElementTypeInfo[retElem].m_gc == TYPE_GC_REF)
+ {
+ returnTypeFlags = XAD_RET_GC_REF;
+ }
+ else
+ {
+ returnTypeFlags = GetRetTypeFlagsFromFPReturnSize(argit.GetFPReturnSize());
+ }
+
+ CorElementType currType;
+ while ((currType = mSig.NextArg()) != ELEMENT_TYPE_END)
+ {
+ SigPointer sp = mSig.GetArgProps();
+ CorElementType retTy;
+ IfFailThrow(sp.GetElemType(&retTy));
+ if (retTy > ELEMENT_TYPE_PTR &&
+ retTy != ELEMENT_TYPE_VALUETYPE &&
+ retTy != ELEMENT_TYPE_I &&
+ retTy != ELEMENT_TYPE_U &&
+ retTy != ELEMENT_TYPE_FNPTR)
+ {
+ bCallArgsBlittable = FALSE;
+ }
+
+ // Currently we don't handle the special unbox handling for Nullable<T> for byrefs in MarshalAndCall
+ if (currType == ELEMENT_TYPE_BYREF)
+ {
+ TypeHandle refType;
+ if (mSig.GetByRefType(&refType) == ELEMENT_TYPE_VALUETYPE)
+ if (Nullable::IsNullableType(refType))
+ {
+ bOptimizable = FALSE;
+ }
+ }
+ else if (currType == ELEMENT_TYPE_VALUETYPE)
+ {
+#if ENREGISTERED_PARAMTYPE_MAXSIZE
+ // Since we also do implict ByRef in some cases, we also have to probit the optimization there too
+ TypeHandle argType = mSig.GetLastTypeHandleThrowing();
+ if (Nullable::IsNullableType(argType))
+ {
+ if (ArgIterator::IsArgPassedByRef(argType))
+ bOptimizable = FALSE;
+ }
+#endif
+ bCallArgsBlittable = FALSE;
+ }
+ else if (currType == ELEMENT_TYPE_R4 || currType == ELEMENT_TYPE_R8)
+ {
+ bArgsHaveAFloat = TRUE;
+ }
+ }
+ }
+
+SetFlags:
+ DWORD optimizationFlags = 0;
+ if (!bOptimizable)
+ {
+ optimizationFlags |= XAD_NOT_OPTIMIZABLE;
+ }
+ else
+ {
+ optimizationFlags |= returnTypeFlags;
+
+ if (bCallArgsBlittable)
+ {
+ optimizationFlags |= XAD_BLITTABLE_ARGS;
+ }
+ if (bRetArgBlittable)
+ {
+ optimizationFlags |= XAD_BLITTABLE_RET;
+ }
+ if (bMethodIsVirtual)
+ {
+ optimizationFlags |= XAD_METHOD_IS_VIRTUAL;
+ }
+ if (bArgsHaveAFloat)
+ {
+ optimizationFlags |= XAD_ARGS_HAVE_A_FLOAT;
+ }
+ }
+ optimizationFlags |= numStackSlots;
+ optimizationFlags |= XAD_FLAGS_INITIALIZED;
+
+ return optimizationFlags;
+}
+
+#ifndef CROSSGEN_COMPILE
+
+BOOL RemotableMethodInfo::TypeIsConduciveToBlitting(MethodTable *pFromMT, MethodTable *pToMT)
+{
+ LIMITED_METHOD_CONTRACT;
+ // Presence of GC fields or certain atributes, rules out blittability
+ if (pFromMT->CannotBeBlittedByObjectCloner() ||
+ pToMT->CannotBeBlittedByObjectCloner())
+ return FALSE;
+
+ // Shared types are okay to blit
+ if (pFromMT == pToMT)
+ return TRUE;
+
+ if (pFromMT->IsEnum() && pToMT->IsEnum()
+ && pFromMT->GetBaseSize() == pToMT->GetBaseSize())
+ return TRUE;
+
+ return FALSE;
+}
+
+PtrHashMap *CrossDomainTypeMap::s_crossDomainMTMap = NULL;
+SimpleRWLock *CrossDomainTypeMap::s_MTMapLock = NULL;
+
+BOOL CrossDomainTypeMap::CompareMTMapEntry (UPTR val1, UPTR val2)
+{
+ CONTRACTL {
+ MODE_ANY;
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ CrossDomainTypeMap::MTMapEntry *entry1 = (CrossDomainTypeMap::MTMapEntry *)(val1 << 1);
+ CrossDomainTypeMap::MTMapEntry *entry2 = (CrossDomainTypeMap::MTMapEntry *)val2;
+
+ if (entry1->m_pFromMT == entry2->m_pFromMT &&
+ entry1->m_dwFromDomain == entry2->m_dwFromDomain &&
+ entry1->m_dwToDomain == entry2->m_dwToDomain)
+ return TRUE;
+
+ return FALSE;
+}
+
+CrossDomainTypeMap::MTMapEntry::MTMapEntry(AppDomain *pFromDomain, MethodTable *pFromMT, AppDomain *pToDomain, MethodTable *pToMT)
+{
+ WRAPPER_NO_CONTRACT;
+
+ m_dwFromDomain = pFromDomain->GetId();
+ m_dwToDomain = pToDomain->GetId();
+ m_pFromMT = pFromMT;
+ m_pToMT = pToMT;
+}
+
+static BOOL IsOwnerOfRWLock(LPVOID lock)
+{
+ // @TODO - SimpleRWLock does not have knowledge of which thread gets the writer
+ // lock, so no way to verify
+ return TRUE;
+}
+
+/*static*/
+PtrHashMap *CrossDomainTypeMap::GetTypeMap()
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ THROWS;
+ }
+ CONTRACTL_END
+
+ if (s_MTMapLock == NULL)
+ {
+ void *tempLockSpace = SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(SimpleRWLock)));
+ SimpleRWLock *tempLock = new (tempLockSpace) SimpleRWLock(COOPERATIVE_OR_PREEMPTIVE, LOCK_TYPE_DEFAULT);
+
+ if (FastInterlockCompareExchangePointer(&s_MTMapLock,
+ tempLock,
+ NULL) != NULL)
+ {
+ // We lost the race
+ SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()->BackoutMem(tempLockSpace, sizeof(SimpleRWLock));
+ }
+ }
+
+ // Now we have a Crst we can use to synchronize the remainder of the init.
+ if (s_crossDomainMTMap == NULL)
+ {
+ SimpleWriteLockHolder swlh(s_MTMapLock);
+
+ if (s_crossDomainMTMap == NULL)
+ {
+ PtrHashMap *map = new (SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()) PtrHashMap ();
+ LockOwner lock = {s_MTMapLock, IsOwnerOfRWLock};
+ map->Init (32, CompareMTMapEntry, TRUE, &lock);
+ s_crossDomainMTMap = map;
+ }
+ }
+
+ return s_crossDomainMTMap;
+}
+
+MethodTable *
+CrossDomainTypeMap::GetMethodTableForDomain(MethodTable *pMT, AppDomain *pFromDomain, AppDomain *pToDomain)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ THROWS;
+ }
+ CONTRACTL_END
+ PtrHashMap *map = GetTypeMap();
+
+ MTMapEntry admt(pFromDomain, pMT, pToDomain, NULL);
+
+ SimpleReadLockHolder srlh(s_MTMapLock);
+ MTMapEntry *pFound = (MTMapEntry *) map->LookupValue(admt.GetHash(), (LPVOID) &admt);
+ if ((MTMapEntry *)INVALIDENTRY == pFound)
+ return NULL;
+
+ return pFound->m_pToMT;
+}
+
+void
+CrossDomainTypeMap::SetMethodTableForDomain(MethodTable *pFromMT, AppDomain *pFromDomain, MethodTable *pToMT, AppDomain *pToDomain)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ THROWS;
+ }
+ CONTRACTL_END
+ PtrHashMap *map = GetTypeMap();
+
+ NewHolder<MTMapEntry> admt(new MTMapEntry(pFromDomain, pFromMT, pToDomain, pToMT));
+ PREFIX_ASSUME(admt != NULL);
+
+ SimpleWriteLockHolder swlh(s_MTMapLock);
+
+ UPTR key = admt->GetHash();
+
+ MTMapEntry *pFound = (MTMapEntry *) map->LookupValue(key, (LPVOID) admt);
+ if ((MTMapEntry *)INVALIDENTRY == pFound)
+ {
+ map->InsertValue(key, (LPVOID) admt);
+ admt.SuppressRelease();
+ }
+}
+
+// Remove any entries in the table that refer to an appdomain that is no longer live.
+void CrossDomainTypeMap::FlushStaleEntries()
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ NOTHROW;
+ }
+ CONTRACTL_END
+
+ if (s_MTMapLock == NULL || s_crossDomainMTMap == NULL)
+ return;
+
+ SimpleWriteLockHolder swlh(s_MTMapLock);
+
+ bool fDeletedEntry = false;
+ PtrHashMap::PtrIterator iter = s_crossDomainMTMap->begin();
+ while (!iter.end())
+ {
+ MTMapEntry *pEntry = (MTMapEntry *)iter.GetValue();
+ AppDomainFromIDHolder adFrom(pEntry->m_dwFromDomain, TRUE);
+ AppDomainFromIDHolder adTo(pEntry->m_dwToDomain, TRUE);
+ if (adFrom.IsUnloaded() ||
+ adTo.IsUnloaded())
+ {
+#ifdef _DEBUG
+ LPVOID pDeletedEntry =
+#endif
+ s_crossDomainMTMap->DeleteValue(pEntry->GetHash(), pEntry);
+ _ASSERTE(pDeletedEntry == pEntry);
+ delete pEntry;
+ fDeletedEntry = true;
+ }
+ ++iter;
+ }
+
+ if (fDeletedEntry)
+ s_crossDomainMTMap->Compact();
+}
+
+
+
+// Before a cross appdomain call, we read the principal on the thread and set it aside, so that it can
+// be restored when the call returns.
+// In addition, we let the principal flow thru to the called appdomain, if the principal is serializable
+OBJECTREF CrossDomainChannel::ReadPrincipal()
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ THROWS;
+ }
+ CONTRACTL_END
+
+ THREADBASEREF ref = (THREADBASEREF) GetThread()->GetExposedObjectRaw();
+ _ASSERTE(ref != NULL);
+
+ EXECUTIONCONTEXTREF refExecCtx = (EXECUTIONCONTEXTREF )ref->GetExecutionContext();
+ if (refExecCtx == NULL)
+ return NULL;
+
+ LOGICALCALLCONTEXTREF refCallContext = refExecCtx->GetLogicalCallContext();
+ if (refCallContext == NULL)
+ return NULL;
+
+ CCSECURITYDATAREF refSecurityData = refCallContext->GetSecurityData();
+ if (refSecurityData == NULL)
+ return NULL;
+
+ OBJECTREF refPrincipal = refSecurityData->GetPrincipal();
+ if (refPrincipal != NULL)
+ {
+ MethodTable *pPrincipalType = refPrincipal->GetMethodTable();
+ if (!pPrincipalType->IsSerializable())
+ {
+ refSecurityData->SetPrincipal(NULL);
+ }
+ }
+
+ return refPrincipal;
+}
+
+// Principal never flows from called appdomain back to caller domain.
+VOID CrossDomainChannel::ResetPrincipal()
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ THROWS;
+ }
+ CONTRACTL_END
+
+ THREADBASEREF ref = (THREADBASEREF) GetThread()->GetExposedObjectRaw();
+ _ASSERTE(ref != NULL);
+
+ EXECUTIONCONTEXTREF refExecCtx = (EXECUTIONCONTEXTREF )ref->GetExecutionContext();
+ if (refExecCtx == NULL)
+ return;
+
+ LOGICALCALLCONTEXTREF refCallContext = refExecCtx->GetLogicalCallContext();
+ if (refCallContext == NULL)
+ return;
+
+ CCSECURITYDATAREF refSecurityData = refCallContext->GetSecurityData();
+ if (refSecurityData == NULL)
+ return;
+
+ refSecurityData->SetPrincipal(NULL);
+
+}
+
+// At the end of a cross-appdomain call, we restore whatever principal was on the thread at the beginning of the call
+VOID CrossDomainChannel::RestorePrincipal(OBJECTREF *prefPrincipal)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ THROWS;
+ }
+ CONTRACTL_END
+
+ THREADBASEREF ref = (THREADBASEREF) GetThread()->GetExposedObjectRaw();
+ if (ref == NULL)
+ return;
+
+ EXECUTIONCONTEXTREF refExecCtx = (EXECUTIONCONTEXTREF )ref->GetExecutionContext();
+ _ASSERTE(*prefPrincipal == NULL || refExecCtx != NULL);
+
+ if (refExecCtx == NULL)
+ return;
+
+ LOGICALCALLCONTEXTREF refCallContext = refExecCtx->GetLogicalCallContext();
+ if (refCallContext == NULL)
+ return;
+
+ CCSECURITYDATAREF refSecurityData = refCallContext->GetSecurityData();
+ _ASSERTE(*prefPrincipal == NULL || refSecurityData != NULL);
+
+ if (refSecurityData == NULL)
+ return;
+
+ refSecurityData->SetPrincipal(*prefPrincipal);
+
+}
+
+// This method mimics the Lease renewal mechanism of the managed CrossDomainChannel
+// The lease object for the server can be accessed via its ServerIdentity.
+VOID CrossDomainChannel::RenewLease()
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ }
+ CONTRACTL_END
+ // Check if lease needs to be renewed
+ OBJECTREF refSrvIdentity = ObjectFromHandle(m_refSrvIdentity);
+ if (refSrvIdentity == NULL)
+ return;
+
+ OBJECTREF refLease = ObjectToOBJECTREF((Object *)refSrvIdentity->GetPtrOffset(CRemotingServices::GetOffsetOfLeaseInIdentity()));
+ if (refLease != NULL)
+ {
+ GCPROTECT_BEGIN(refLease);
+ MethodDesc *pLeaseMeth = CRemotingServices::MDofRenewLeaseOnCall();
+ PCODE pCode = (PCODE)pLeaseMeth->GetCallTarget(&refLease);
+
+ PREPARE_NONVIRTUAL_CALLSITE_USING_CODE(pCode);
+
+ DECLARE_ARGHOLDER_ARRAY(args, 2);
+
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(refLease);
+ args[ARGNUM_1] = NULL;
+
+ CATCH_HANDLER_FOUND_NOTIFICATION_CALLSITE;
+ CALL_MANAGED_METHOD_NORET(args);
+
+ GCPROTECT_END();
+ }
+}
+
+// Given a client side instantiated method desc and a server side generic definition method desc extract the instantiation,
+// translate all the types involved into the server domain and return the fully instantiated server method desc. Note that the
+// client and server method descs might not represent the same type -- the client method might be from an interface, whereas the
+// server method will always be on the real class.
+MethodDesc *InstantiateMethod(MethodDesc *pClientMD, MethodDesc *pServerMD, MethodTable *pServerMT)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ PRECONDITION(CheckPointer(pClientMD));
+ PRECONDITION(pClientMD->HasMethodInstantiation());
+ PRECONDITION(CheckPointer(pServerMD));
+ PRECONDITION(pServerMD->HasMethodInstantiation());
+ PRECONDITION(pClientMD->GetNumGenericMethodArgs() == pServerMD->GetNumGenericMethodArgs());
+ }
+ CONTRACTL_END;
+
+ Instantiation clientInst = pClientMD->GetMethodInstantiation();
+
+ DWORD dwAllocaSize;
+ if (!ClrSafeInt<DWORD>::multiply(clientInst.GetNumArgs(), sizeof(TypeHandle), dwAllocaSize))
+ COMPlusThrowHR(COR_E_OVERFLOW);
+
+ CQuickBytes qbServerInst;
+ TypeHandle *pServerInst = reinterpret_cast<TypeHandle*>(qbServerInst.AllocThrows(dwAllocaSize));
+
+ for (DWORD dwArgNum = 0; dwArgNum < clientInst.GetNumArgs(); dwArgNum++)
+ {
+ SString thName;
+ TypeString::AppendType(thName, clientInst[dwArgNum], TypeString::FormatNamespace|TypeString::FormatFullInst|TypeString::FormatAssembly);
+
+ pServerInst[dwArgNum] = TypeName::GetTypeFromAsmQualifiedName(thName.GetUnicode(), pClientMD->IsIntrospectionOnly());
+
+ _ASSERTE(!pServerInst[dwArgNum].IsNull());
+
+ // Check that the type is actually visible on the server side. This prevents a malicious client from luring a trusted server
+ // into manipulating types that would be normally invisible to it.
+ if (!pServerInst[dwArgNum].IsExternallyVisible())
+ COMPlusThrow(kRemotingException, W("Remoting_NonPublicOrStaticCantBeCalledRemotely"));
+ }
+
+ // Find or create the method will the full instantiation.
+ return MethodDesc::FindOrCreateAssociatedMethodDesc(pServerMD,
+ pServerMT,
+ FALSE,
+ Instantiation(pServerInst, clientInst.GetNumArgs()),
+ FALSE);
+}
+
+BOOL CrossDomainChannel::GetGenericMethodAddress(MethodTable *pServerType)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ m_pSrvMD = InstantiateMethod(m_pCliMD, pServerType->GetMethodDescForSlot(m_pCliMD->GetSlot()), pServerType);
+
+ OBJECTREF thisObj = GetServerObject();
+ m_pTargetAddress = m_pSrvMD->GetCallTarget(&thisObj);
+
+ return TRUE;
+}
+
+// We use this method to find the target address when we are convinced that the most derived type the proxy is cast
+// to on the client side is equivalent to the corresponding type on the server side, in the sense the method table
+// layouts are similar. This fact can be used to look up method addresses faster
+BOOL CrossDomainChannel::GetTargetAddressFast(DWORD optFlags, MethodTable *pSrvMT, BOOL bFindServerMD)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ }
+ CONTRACTL_END
+ _ASSERTE(m_pCliMD);
+ _ASSERTE(m_pSrvDomain == SystemDomain::GetCurrentDomain()->GetId());
+
+ MethodTable *pCliMT = m_pCliMD->GetMethodTable();
+ _ASSERTE(!pCliMT->IsInterface());
+
+ m_pSrvMD = NULL;
+
+ DWORD dwMethodSlot = m_pCliMD->GetSlot();
+ if (!RemotableMethodInfo::IsMethodVirtual(m_xret))
+ {
+ // This is a non-virtual method. Find the matching MT on the
+ // server side, dereference the slot and get the method address
+
+ MethodTable *pSrvSideMT = pSrvMT;
+
+ // We now need to walk the server type hierarchy till we find the type that
+ // declared the method we're going to call
+
+ // First find how far is the type declaring the called method, from System.Object
+ DWORD cliDepth = 0;
+ MethodTable *pCurrLevel = pCliMT;
+ while (pCurrLevel != NULL)
+ {
+ _ASSERTE(pCurrLevel);
+ pCurrLevel = pCurrLevel->GetParentMethodTable();
+ cliDepth++;
+ };
+
+ // Next find how deep is the server type from System.Object
+ DWORD srvDepth = 0;
+ pCurrLevel = pSrvMT;
+ while (pCurrLevel != NULL)
+ {
+ _ASSERTE(pCurrLevel);
+ pCurrLevel = pCurrLevel->GetParentMethodTable();
+ srvDepth++;
+ };
+
+ _ASSERTE(srvDepth >= cliDepth);
+
+ while (srvDepth > cliDepth)
+ {
+ _ASSERTE(pSrvSideMT);
+ _ASSERTE(srvDepth != 0);
+ pSrvSideMT = pSrvSideMT->GetParentMethodTable();
+ srvDepth--;
+ };
+
+ if (m_pCliMD->HasMethodInstantiation())
+ {
+ GetGenericMethodAddress(pSrvSideMT);
+ }
+ else
+ {
+ m_pTargetAddress = pSrvSideMT->GetRestoredSlot(dwMethodSlot);
+
+#ifndef _DEBUG
+ if (bFindServerMD)
+#endif
+ m_pSrvMD = pSrvSideMT->GetMethodDescForSlot(dwMethodSlot);
+ }
+ }
+ else
+ {
+ if (m_pCliMD->HasMethodInstantiation())
+ GetGenericMethodAddress(pSrvMT);
+ else
+ {
+ m_pTargetAddress = pSrvMT->GetRestoredSlot(dwMethodSlot);
+
+#ifndef _DEBUG
+ if (bFindServerMD)
+#endif
+ m_pSrvMD = pSrvMT->GetMethodDescForSlot(dwMethodSlot);
+ }
+ }
+
+ _ASSERTE(m_pTargetAddress);
+#ifdef _DEBUG
+#ifndef MDIL // Triton binder doesn't have method names available
+ _ASSERTE(!strcmp(m_pSrvMD->GetName(), m_pCliMD->GetName()));
+#endif
+ DefineFullyQualifiedNameForClass();
+ LPCUTF8 szSrvTypeName = GetFullyQualifiedNameForClassNestedAware(pSrvMT);
+ LPCUTF8 pszMethodName = m_pCliMD->GetName();
+ LOG((LF_REMOTING, LL_INFO100, "GetTargetAddressFast. Address of %s::%s is 0x%x\n", &szSrvTypeName[0], pszMethodName, m_pTargetAddress));
+#endif // _DEBUG
+ return TRUE;
+}
+
+BOOL
+CrossDomainChannel::GetInterfaceMethodAddressFast(DWORD optFlags, MethodTable *pSrvMT, BOOL bFindServerMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_pCliMD);
+
+ MethodTable *pCliItfMT = m_pCliMD->GetMethodTable();
+ _ASSERTE(pCliItfMT->IsInterface());
+
+ // Only use the fast path if the interface is shared. If the interface isnt shared, then we'll have to search the
+ // interface map on server type using name as key and then deref the slot # etc. I think shared interfaces will
+ // be the common pattern. If not they should be.
+ // Note that it's not enough to check that the client interface is shared, it must also be loaded in the server appdomain (since
+ // it's now possible to have more than one instance of a shared assembly in a process).
+ _ASSERTE(pCliItfMT->IsDomainNeutral());
+ AppDomain* ad = SystemDomain::GetAppDomainFromId(m_pSrvDomain,ADV_RUNNINGIN);
+ if (ad->FindDomainAssembly(pCliItfMT->GetAssembly()) == NULL)
+ return FALSE;
+
+ m_pSrvMD = NULL;
+
+ OBJECTREF thisObj = GetServerObject();
+
+#ifdef FEATURE_COMINTEROP
+ // Check for a COM interop server.
+ if (thisObj->GetMethodTable()->IsComObjectType())
+ {
+#if 0
+ // We don't have all the logic in place to deal with COM interop yet. The following code is taken from the regular remoting
+ // invocation path in CStackBuilderSink::PrivateProcessMessage, but I think we still need some work on the actual invocation
+ // itself (leastways we didn't end up invoking the method correctly when I tried it).
+ // For now we'll just bail back to the regular remoting path for COM interop.
+ m_pSrvMD = thisObj->GetMethodTable()->GetMethodDescForComInterfaceMethod(m_pCliMD, false);
+ if (m_pSrvMD == NULL)
+ return FALSE;
+#endif
+ return FALSE;
+ }
+#endif // FEATURE_COMINTEROP
+
+ GCPROTECT_BEGIN(thisObj);
+
+ DispatchSlot impl(pSrvMT->FindDispatchSlotForInterfaceMD(m_pCliMD));
+ CONSISTENCY_CHECK(!impl.IsNull());
+ m_pSrvMD = impl.GetMethodDesc();
+
+ _ASSERTE(m_pSrvMD);
+
+ // If the method called has a generic instantiation then the server method desc we've just received doesn't contain that
+ // information (generic method slots are filled with generic definition method descs). We need to build the exact method desc by
+ // copying the instantiation from the client method (translating each type into the new domain of course).
+ if (m_pSrvMD->HasMethodInstantiation())
+ m_pSrvMD = InstantiateMethod(m_pCliMD, m_pSrvMD, pSrvMT);
+
+ m_pTargetAddress = m_pSrvMD->GetCallTarget(&thisObj);
+
+ GCPROTECT_END();
+
+ return TRUE;
+}
+
+
+// Here we check whether the remote call is a cross domain call, if so, does it qualify
+BOOL
+CrossDomainChannel::CheckCrossDomainCall(TPMethodFrame *pFrame)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ m_pFrame = pFrame;
+ m_pCliMD = m_pFrame->GetFunction();
+
+ MethodTable *pCliMT = m_pCliMD->GetMethodTable();
+
+ // Check if this is an async delegate call
+ if (pCliMT->IsDelegate())
+ return FALSE;
+
+ // Only use the fast path if the interface is shared. If the interface isnt shared, then we'll have to search the
+ // interface map on server type using name as key and then deref the slot # etc. I think shared interfaces will
+ // be the common pattern.
+ if (pCliMT->IsInterface() && !pCliMT->IsDomainNeutral())
+ return FALSE;
+
+ OBJECTREF refTP = pFrame->GetThis();
+ REALPROXYREF refRP = CTPMethodTable::GetRP(refTP);
+
+ // Check if this is a x-domain call
+ DWORD domainID = refRP->GetDomainID();
+ if (domainID == 0)
+ return FALSE; // Not x-appdomain call, or proxy not initted for optimization
+
+ // Check if we are in a context different from default. If so, we may need to run context
+ // policies etc. Use regular path.
+ if (GetThread()->GetContext() != SystemDomain::GetCurrentDomain()->GetDefaultContext())
+ return FALSE;
+
+ // Check if method call args can be blitted (or not optimizable at all)
+ m_xret = RemotableMethodInfo::IsCrossAppDomainOptimizable(m_pCliMD, &m_numStackSlotsToCopy);
+ if (RemotableMethodInfo::IsCallNotOptimizable(m_xret))
+ {
+#ifdef _DEBUG
+ DefineFullyQualifiedNameForClass();
+ LPCUTF8 szSrvTypeName = GetFullyQualifiedNameForClassNestedAware(m_pCliMD->GetMethodTable());
+ LOG((LF_REMOTING, LL_INFO100, "CheckCrossDomainCall. Call to %s::%s is not optimizable\n",
+ &szSrvTypeName[0], m_pCliMD->GetName()));
+#endif // _DEBUG
+ return FALSE;
+ }
+
+ m_pCliDomain = SystemDomain::GetCurrentDomain();
+ m_pSrvDomain = ADID(domainID);
+
+ return ExecuteCrossDomainCall();
+}
+
+// Dereference the server identity handle, to reach the server object
+// If the handle is null, someone either called Disconnect on the server
+// or the server's lease ran out
+OBJECTREF CrossDomainChannel::GetServerObject()
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END
+
+ _ASSERTE(m_pSrvDomain == SystemDomain::GetCurrentDomain()->GetId());
+
+ OBJECTREF refSrvIdentity = ObjectFromHandle(m_refSrvIdentity);
+ if (refSrvIdentity == NULL)
+ {
+ OBJECTREF refTP = m_pFrame->GetThis();
+ REALPROXYREF refRP = CTPMethodTable::GetRP(refTP);
+ OBJECTREF refIdentity = ObjectToOBJECTREF((Object *)refRP->GetPtrOffset(CRemotingServices::GetOffsetOfCliIdentityInRP()));
+ STRINGREF refURI = (STRINGREF)ObjectToOBJECTREF((Object *)refIdentity->GetPtrOffset(CRemotingServices::GetOffsetOfURIInIdentity()));
+ SString sURI;
+ refURI->GetSString(sURI);
+
+ COMPlusThrow(kRemotingException,
+ IDS_REMOTING_SERVER_DISCONNECTED,
+ sURI.GetUnicode());
+ }
+ OBJECTREF srvObject = ObjectToOBJECTREF((Object *)refSrvIdentity->GetPtrOffset(CRemotingServices::GetOffsetOfTPOrObjInIdentity()));
+ return srvObject;
+}
+
+// Here the we find the target method address, make a decision whether to
+// execute the call locally, if remote whether to blit the arguments or to marshal them,
+BOOL CrossDomainChannel::ExecuteCrossDomainCall()
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ BOOL bOptimizable = TRUE;
+
+ {
+ ProfilerRemotingClientCallbackHolder profilerHolder;
+
+ // Short circuit calls to Object::GetType and run them locally
+ if (m_pCliMD == CRemotingServices::MDofObjectGetType())
+ {
+ LOG((LF_REMOTING, LL_INFO100, "ExecuteCrossDomainCall. Short circuiting call to Object::GetType and running it locally.\n"));
+ OBJECTREF refTP = m_pFrame->GetThis();
+ OBJECTREF refType = CRemotingServices::GetClass(refTP);
+ LPVOID pReturnValuePtr = m_pFrame->GetReturnValuePtr();
+ *(Object **)pReturnValuePtr = OBJECTREFToObject(refType);
+ }
+ else if (RemotableMethodInfo::AreArgsBlittable(m_xret))
+ {
+ bOptimizable = BlitAndCall();
+ }
+ else
+ {
+ bOptimizable = MarshalAndCall();
+ }
+ }
+
+ if (!bOptimizable)
+ return FALSE;
+
+ // Check for the need to trip thread
+ if (GetThread()->CatchAtSafePointOpportunistic())
+ {
+ // There is no need to GC protect the return object as
+ // TPFrame is GC protecting it
+ CommonTripThread();
+ }
+
+#ifdef _TARGET_X86_
+ // Set the number of bytes to pop for x86
+ m_pFrame->SetCbStackPop(m_numStackSlotsToCopy * sizeof(SIZE_T));
+#endif // _TARGET_X86_
+
+ return TRUE;
+}
+
+BOOL
+CrossDomainChannel::InitServerInfo()
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END
+
+ _ASSERTE(m_pFrame);
+ _ASSERTE(m_pSrvDomain == SystemDomain::GetCurrentDomain()->GetId());
+
+ // Get the server object
+ OBJECTREF refTP = m_pFrame->GetThis();
+ REALPROXYREF refRP = CTPMethodTable::GetRP(refTP);
+ m_refSrvIdentity = (OBJECTHANDLE)refRP->GetPtrOffset(CRemotingServices::GetOffsetOfSrvIdentityInRP());
+ OBJECTREF srvObject = GetServerObject();
+
+ MethodTable *pSrvMT = srvObject->GetMethodTable();
+
+ // Find the target address
+ DWORD optFlag = refRP->GetOptFlags();
+
+ // If we are cloning some arguments to server domain, we want to do a type check
+ // on the cloned objects against the expected type. To find the expected type, we need to
+ // know the method signature on the server side, which in turn is obtainable if we know
+ // the server MethodDesc. Finding MethodDesc from a slot isnt cheap, so we do it only if we need it
+ BOOL bFindServerMD = (RemotableMethodInfo::AreArgsBlittable(m_xret)) ? FALSE : TRUE;
+ BOOL bResultOfAddressLookup = FALSE;
+
+ if (m_pCliMD->GetMethodTable()->IsInterface())
+ {
+ bResultOfAddressLookup = GetInterfaceMethodAddressFast(optFlag, pSrvMT, bFindServerMD);
+ }
+ else if ((optFlag & OPTIMIZATION_FLAG_INITTED) && (optFlag & OPTIMIZATION_FLAG_PROXY_EQUIVALENT))
+ {
+ bResultOfAddressLookup = GetTargetAddressFast(optFlag, pSrvMT, bFindServerMD);
+ }
+ else
+ {
+ // If the proxy is not cast to an equivalent type, do not perform any optimizations
+ bResultOfAddressLookup = FALSE;
+ }
+
+#ifdef _DEBUG
+ if (!bResultOfAddressLookup)
+ LOG((LF_REMOTING, LL_INFO100, "InitServerInfo. Skipping fast path because failed to find target address.\n"));
+#endif // _DEBUG
+
+ _ASSERTE(!bResultOfAddressLookup || !bFindServerMD || m_pSrvMD);
+ return bResultOfAddressLookup;
+}
+
+// A macro used to help calculate the exact declaring type of a method (this may not be as simple as calling GetMethodTable on the
+// method desc if the type is generic and not an interface). We get the additional information from the instance (which provides an
+// exact method table, though not necessarily the one the method is actually _declared_ on). We don't compute the instance or the
+// method table from that instance in this macro since the logic varies greatly from client to server (the client has to adjust for
+// the fact that the instance is a TP).
+// We assume a variable called thDeclaringType has already been declared in the current scope.
+#define CDC_DETERMINE_DECLARING_TYPE(_pMD, _thInst) \
+ if (!(_pMD)->HasClassInstantiation() || (_pMD)->IsInterface()) \
+ { \
+ thDeclaringType = TypeHandle((_pMD)->GetMethodTable()); \
+ } \
+ else \
+ { \
+ Instantiation inst = (_pMD)->GetExactClassInstantiation(_thInst); \
+ MethodTable *pApproxDeclaringMT = (_pMD)->GetMethodTable(); \
+ thDeclaringType = ClassLoader::LoadGenericInstantiationThrowing(pApproxDeclaringMT->GetModule(), \
+ pApproxDeclaringMT->GetCl(), \
+ inst); \
+ }
+
+// We have decided the arguments are blittable. We may still need to marshal
+// call context if any.
+BOOL
+CrossDomainChannel::BlitAndCall()
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ }
+ CONTRACTL_END
+
+ SIZE_T *pRegArgs = NULL;
+ SIZE_T *pStackArgs = NULL;
+#ifdef CALLDESCR_ARGREGS
+ ArgumentRegisters RegArgs = {0};
+ pRegArgs = (SIZE_T*)&RegArgs;
+#endif
+#ifdef CALLDESCR_FPARGREGS
+ FloatArgumentRegisters *pFloatArgumentRegisters = NULL;
+#endif
+
+ BOOL bOptimizable = TRUE;
+ BOOL bHasObjRefReturnVal = FALSE;
+
+ Thread *pCurThread = GetThread();
+
+#ifdef _DEBUG
+ LPCUTF8 pszMethodName;
+ pszMethodName = m_pCliMD->GetName();
+ LOG((LF_REMOTING, LL_INFO100, "BlitAndCall. Blitting arguments to method %s\n", pszMethodName));
+#endif // _DEBUG
+
+ // Collect all client domain GC references together in a single GC frame.
+ // refReturnValue contains the returned object.
+ // It can also contain a boxed object when the return is a value type and needs marshalling
+ struct _gc {
+ OBJECTREF refReturnValue;
+ OBJECTREF refException;
+ OBJECTREF refExecutionContext;
+ OBJECTREF refPrincipal;
+ } ClientGC;
+ ZeroMemory(&ClientGC, sizeof(ClientGC));
+ GCPROTECT_BEGIN(ClientGC);
+
+ _ASSERTE(RemotableMethodInfo::IsReturnBlittable(m_xret));
+
+ // Check for any logical call context that contains data
+ BOOL bMarshalCallContext = FALSE;
+ BOOL bMarshalReturnCallContext = FALSE;
+ if (pCurThread->IsExposedObjectSet())
+ {
+ THREADBASEREF ref = (THREADBASEREF) pCurThread->GetExposedObjectRaw();
+ _ASSERTE(ref != NULL);
+
+ EXECUTIONCONTEXTREF refExecCtx = (EXECUTIONCONTEXTREF) ref->GetExecutionContext();
+ if (refExecCtx != NULL)
+ {
+ ClientGC.refExecutionContext = refExecCtx;
+ ClientGC.refPrincipal = ReadPrincipal();
+
+ LOGICALCALLCONTEXTREF refLogCallCtx = refExecCtx->GetLogicalCallContext();
+ if (refLogCallCtx != NULL)
+ {
+ if (refLogCallCtx->ContainsDataForSerialization())
+ {
+ bMarshalCallContext = TRUE;
+ }
+ }
+ }
+ }
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // Assume that exception at server was NotCorrupting
+ CorruptionSeverity severity = NotCorrupting;
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ // Push the frame
+ ENTER_DOMAIN_ID(m_pSrvDomain);
+
+ // Now create a server domain GC frame for all server side GC references.
+ struct _gc {
+ OBJECTREF refReturnValue;
+ OBJECTREF refException;
+ OBJECTREF refExecutionContext;
+ } ServerGC;
+ ZeroMemory(&ServerGC, sizeof(ServerGC));
+ GCPROTECT_BEGIN(ServerGC);
+
+ // Initialize server side info, such as method address etc
+ bOptimizable = InitServerInfo();
+
+ if (!bOptimizable)
+ goto LeaveDomain;
+
+ RenewLease();
+
+ if (bMarshalCallContext)
+ {
+ LOG((LF_REMOTING, LL_INFO100, "BlitAndCall. Marshalling call context\n", pszMethodName));
+ CrossAppDomainClonerCallback cadcc;
+ ObjectClone Marshaller(&cadcc, CrossAppDomain, FALSE);
+ ServerGC.refExecutionContext = Marshaller.Clone(ClientGC.refExecutionContext,
+ m_pCliDomain,
+ GetAppDomain(),
+ ClientGC.refExecutionContext);
+ THREADBASEREF ref = (THREADBASEREF) pCurThread->GetExposedObjectRaw();
+ _ASSERTE(ref != NULL);
+
+ ref->SetExecutionContext(ServerGC.refExecutionContext);
+
+ Marshaller.RemoveGCFrames();
+ }
+ else if (pCurThread->IsExposedObjectSet())
+ {
+ THREADBASEREF ref = (THREADBASEREF) pCurThread->GetExposedObjectRaw();
+ _ASSERTE(ref != NULL);
+
+ ref->SetExecutionContext(NULL);
+ }
+
+#ifdef PROFILING_SUPPORTED
+ // If we're profiling, notify the profiler that we're about to invoke the remoting target
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackRemoting());
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->RemotingServerInvocationStarted();
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ {
+ GCX_COOP();
+ UINT64 uRegTypeMap = 0;
+ pStackArgs = (SIZE_T *)(m_pFrame->GetTransitionBlock() + TransitionBlock::GetOffsetOfArgs());
+
+ // Get the 'this' object
+ OBJECTREF srvObject = GetServerObject();
+
+#if defined(_TARGET_X86_)
+ pRegArgs[0] = *((SIZE_T*)(m_pFrame->GetTransitionBlock() + TransitionBlock::GetOffsetOfArgumentRegisters()));
+ pRegArgs[1] = (SIZE_T) OBJECTREFToObject(srvObject);
+#elif defined(CALLDESCR_ARGREGS)
+ // Have to buffer argument registers since we're going to overwrite the this object with the server
+ // version and the frame owning the registers is in the wrong domain to report that object.
+ pRegArgs[0] = (SIZE_T) OBJECTREFToObject(srvObject);
+ memcpy(pRegArgs + 1,
+ (SIZE_T*)(m_pFrame->GetTransitionBlock() + TransitionBlock::GetOffsetOfArgumentRegisters()) + 1,
+ sizeof(ArgumentRegisters) - sizeof(SIZE_T));
+
+#ifdef CALLDESCR_FPARGREGS
+ // Only provide a pointer to the floating point area of the stack frame if there any any floating
+ // point arguments (passing NULL optimizes the CallDescr thunk by omitting the initialization of
+ // floating point argument registers).
+ if (RemotableMethodInfo::DoArgsContainAFloat(m_xret))
+ pFloatArgumentRegisters = (FloatArgumentRegisters*)(m_pFrame->GetTransitionBlock() + TransitionBlock::GetOffsetOfFloatArgumentRegisters());
+#endif // CALLDESCR_FPARGREGS
+
+#else // CALLDESCR_ARGREGS
+
+ // It's a pity that we have to allocate a buffer for the arguments on the stack even in BlitAndCall().
+ // The problem is we can't use the portion of the stack protected by m_pFrame to store the srvObject,
+ // since the srvObject is in the server domain and the TPMethodFrame m_pFrame is in the client domain.
+ // I don't think we need to protect the srvOjbect in this case, since it's reachable from the transparent
+ // proxy, which is protected by the TPMethodFrame.
+ SIZE_T* pTmpStackArgs = (SIZE_T*)_alloca(m_numStackSlotsToCopy * sizeof(SIZE_T));
+ memcpy(pTmpStackArgs, pStackArgs, m_numStackSlotsToCopy * sizeof(SIZE_T));
+ pStackArgs = pTmpStackArgs;
+
+ pStackArgs[0] = (SIZE_T)OBJECTREFToObject(srvObject);
+#endif // CALLDESCR_ARGREGS
+
+#if defined(CALLDESCR_REGTYPEMAP) || defined(COM_STUBS_SEPARATE_FP_LOCATIONS)
+ // We have to copy the floating point registers from a different stack location to the portion of
+ // the stack used to save the general registers. Since this is expensive, we only do this if
+ // we have some floating point arguments.
+ if (RemotableMethodInfo::DoArgsContainAFloat(m_xret))
+ {
+ // When computing the method signature we need to take special care if the call is on a non-interface class with a
+ // generic instantiation (since in that case we may have a representative method with a non-concrete signature).
+ TypeHandle thDeclaringType;
+ CDC_DETERMINE_DECLARING_TYPE(m_pCliMD, TypeHandle(CTPMethodTable::GetMethodTableBeingProxied(m_pFrame->GetThis())));
+ MetaSig mSig(m_pCliMD, thDeclaringType);
+ ArgIterator argit(&mSig);
+
+ int offset;
+ while (TransitionBlock::InvalidOffset != (offset = argit.GetNextOffset()))
+ {
+ int regArgNum = TransitionBlock::GetArgumentIndexFromOffset(offset);
+
+ if (regArgNum >= NUM_ARGUMENT_REGISTERS)
+ break;
+
+ CorElementType argTyp = argit.GetArgType();
+
+#ifdef CALLDESCR_REGTYPEMAP
+ FillInRegTypeMap(offset, argTyp, (BYTE*)&uRegTypeMap);
+#endif
+
+#ifdef COM_STUBS_SEPARATE_FP_LOCATIONS
+ if (argTyp == ELEMENT_TYPE_R4 || argTyp == ELEMENT_TYPE_R8)
+ {
+ PVOID pSrc = (PVOID)(m_pFrame->GetTransitionBlock() + m_pFrame->GetFPArgOffset(regArgNum));
+
+ ARG_SLOT val;
+ if (argTyp == ELEMENT_TYPE_R4)
+ val = FPSpillToR4(pSrc);
+ else
+ val = FPSpillToR8(pSrc);
+
+ *(ARG_SLOT*)(&pStackArgs[regArgNum]) = val;
+ }
+#endif
+ }
+ }
+#endif // CALLDESCR_REGTYPEMAP || COM_STUBS_SEPARATE_FP_LOCATIONS
+
+ CallDescrData callDescrData;
+
+ callDescrData.pSrc = pStackArgs;
+ callDescrData.numStackSlots = m_numStackSlotsToCopy,
+#ifdef CALLDESCR_ARGREGS
+ callDescrData.pArgumentRegisters = (ArgumentRegisters *)pRegArgs;
+#endif
+#ifdef CALLDESCR_FPARGREGS
+ callDescrData.pFloatArgumentRegisters = pFloatArgumentRegisters;
+#endif
+#ifdef CALLDESCR_REGTYPEMAP
+ callDescrData.dwRegTypeMap = uRegTypeMap;
+#endif
+ callDescrData.fpReturnSize = GetFPReturnSize();
+ callDescrData.pTarget = m_pTargetAddress;
+
+ DispatchCall(
+ &callDescrData,
+ &ServerGC.refException,
+ GET_CTX_TRANSITION_FRAME()
+ COMMA_CORRUPTING_EXCEPTIONS_ONLY(&severity)
+ );
+
+ // If the return value is a GC ref, store it in a protected place
+ if (ServerGC.refException != NULL)
+ {
+ // Return value is invalid if there was exception thrown
+ }
+ else
+ if (RemotableMethodInfo::IsReturnGCRef(m_xret))
+ {
+ ServerGC.refReturnValue = ObjectToOBJECTREF(*(Object **)(&callDescrData.returnValue));
+ bHasObjRefReturnVal = TRUE;
+ }
+ else
+ {
+ memcpyNoGCRefs(m_pFrame->GetReturnValuePtr(), &callDescrData.returnValue, sizeof(callDescrData.returnValue));
+ }
+ }
+
+#ifdef PROFILING_SUPPORTED
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackRemoting());
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->RemotingServerInvocationReturned();
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ // Propagate any logical call context changes except those to the principal
+ if (pCurThread->IsExposedObjectSet())
+ {
+ THREADBASEREF ref = (THREADBASEREF) pCurThread->GetExposedObjectRaw();
+ _ASSERTE(ref != NULL);
+
+ EXECUTIONCONTEXTREF refExecCtx = (EXECUTIONCONTEXTREF) ref->GetExecutionContext();
+ if (refExecCtx != NULL)
+ {
+ LOGICALCALLCONTEXTREF refLogCallCtx = refExecCtx->GetLogicalCallContext();
+ if (bMarshalCallContext ||
+ (refLogCallCtx != NULL && refLogCallCtx->ContainsNonSecurityDataForSerialization()))
+ {
+ ServerGC.refExecutionContext = ref->GetExecutionContext();
+ bMarshalReturnCallContext = TRUE;
+ LOG((LF_REMOTING, LL_INFO100, "BlitAndCall. Marshalling return call context\n", pszMethodName));
+ CrossAppDomainClonerCallback cadcc;
+ ObjectClone Marshaller(&cadcc, CrossAppDomain, FALSE);
+
+ ResetPrincipal();
+
+ EXECUTIONCONTEXTREF ecref = (EXECUTIONCONTEXTREF)Marshaller.Clone(ServerGC.refExecutionContext,
+ GetAppDomain(),
+ m_pCliDomain,
+ ServerGC.refExecutionContext);
+ if (ClientGC.refExecutionContext != NULL)
+ ((EXECUTIONCONTEXTREF)ClientGC.refExecutionContext)->SetLogicalCallContext(ecref->GetLogicalCallContext());
+ else
+ ClientGC.refExecutionContext = (OBJECTREF)ecref;
+
+ Marshaller.RemoveGCFrames();
+ }
+ }
+ }
+
+ if (ServerGC.refException != NULL)
+ {
+ LOG((LF_REMOTING, LL_INFO100, "BlitAndCall. Exception thrown ! Marshalling exception. \n", pszMethodName));
+
+ // Save Watson buckets before the exception object is changed
+ if (GetThread() != NULL)
+ {
+ // Ensure that we have the buckets for the exception in question.
+ // For preallocated exceptions, we capture the buckets in the
+ // UE WatsonBucket Tracker in AppDomainTransitionExceptionFilter.
+ //
+ // When the exception is reraised in the returning AppDomain,
+ // StackTraceInfo::AppendElement will copy over the buckets
+ // to the EHtracker corresponding to the raised exception.
+ if (!CLRException::IsPreallocatedExceptionObject(ServerGC.refException))
+ {
+ // For non-preallocated exception objects, the throwable
+ // is expected have the buckets in it, assuming that
+ // CLR's personality routine for managed code was notified
+ // of the exception before returning from the AD transition.
+ //
+ // There are scenarios where few managed frames, post AD transition,
+ // may get optimized away by the JIT. If a managed exception is
+ // thrown from within the VM at a later time, the personality routine
+ // for managed will not be invoked if there are no managed frames present
+ // between the AD Transition frame and the frame where VM raised the managed
+ // exception.
+ //
+ // When such an exception comes back to the calling AppDomain, we will
+ // come here and look for watson buckets and may not find them. In such
+ // a case, simply log it.
+ if(!((EXCEPTIONREF)ServerGC.refException)->AreWatsonBucketsPresent())
+ {
+ LOG((LF_EH, LL_INFO100, "CrossDomainChannel::BlitAndCall: Watson buckets not found in regular exception object. Exception likely raised in native code.\n"));
+ }
+ }
+ }
+
+ CrossAppDomainClonerCallback cadcc;
+ ObjectClone Marshaller(&cadcc, CrossAppDomain, FALSE);
+ ClientGC.refException = Marshaller.Clone(ServerGC.refException, GetAppDomain(), m_pCliDomain, ServerGC.refExecutionContext);
+
+ Marshaller.RemoveGCFrames();
+
+ // We have to be in the right domain before we throw the exception
+ goto LeaveDomain;
+ }
+
+ if (bHasObjRefReturnVal)
+ {
+ // Must be a domain agile GC ref. We can just copy the reference into the client GC frame.
+ ClientGC.refReturnValue = ServerGC.refReturnValue;
+ }
+
+LeaveDomain: ;
+
+ GCPROTECT_END(); // ServerGC
+
+ END_DOMAIN_TRANSITION;
+
+ if (ClientGC.refException != NULL)
+ {
+ RestorePrincipal(&ClientGC.refPrincipal);
+ COMPlusThrow(ClientGC.refException
+ COMMA_CORRUPTING_EXCEPTIONS_ONLY(severity)
+ );
+ }
+
+ if (pCurThread->IsExposedObjectSet())
+ {
+ THREADBASEREF ref = (THREADBASEREF) pCurThread->GetExposedObjectRaw();
+ _ASSERTE(ref != NULL);
+
+ ref->SetExecutionContext(ClientGC.refExecutionContext);
+ }
+
+ RestorePrincipal(&ClientGC.refPrincipal);
+
+ // If the return type is an object, take it out of the protected ref
+ if (bHasObjRefReturnVal)
+ {
+ *(Object **)m_pFrame->GetReturnValuePtr() = OBJECTREFToObject(ClientGC.refReturnValue);
+ }
+
+ GCPROTECT_END(); // ClientGC
+
+ return bOptimizable;
+}
+
+// Argument attributes
+#define ARG_NEEDS_UNBOX 0x80000000
+#define ARG_GOES_IN_EDX 0x40000000
+#define ARG_IS_BYREF 0x20000000
+#define ARG_OFFSET_MASK 0x00FFFFFF
+
+// Structure to hold arguments for MarshalAndCall
+struct MarshalAndCallArgs : public CtxTransitionBaseArgs
+{
+ MarshalAndCallArgs() : Marshaller(&cadcc, CrossAppDomain, FALSE)
+ {
+ STATIC_CONTRACT_SO_INTOLERANT;
+ }
+
+ CrossDomainChannel * pThis;
+
+ struct _gc {
+ OBJECTREF refReturnValue;
+ OBJECTREF refException;
+ OBJECTREF refExecutionContext;
+ OBJECTREF refPrincipal;
+ } ClientGC;
+
+ BOOL bOptimizable;
+
+ ObjectClone Marshaller;
+ CrossAppDomainClonerCallback cadcc;
+
+ MetaSig *mSig;
+ ArgIterator *argit;
+
+ DWORD dwNumArgs;
+#ifdef CALLDESCR_ARGREGS
+ SIZE_T *pRegArgs;
+#endif
+#ifdef CALLDESCR_FPARGREGS
+ FloatArgumentRegisters *pFloatArgumentRegisters;
+#endif
+ SIZE_T *pStackArgs;
+ DWORD *pArgAttribs;
+
+ DWORD dwNumObjectsMarshalled;
+ BOOL *bMarshalledArgs;
+ OBJECTREF *pClientArgArray;
+
+ BOOL bHasByRefArgsToMarshal;
+ int *pByRefArgAttribs;
+ TypeHandle *pThByRefs;
+
+ TypeHandle retTh;
+ BOOL bHasObjRefReturnVal;
+ BOOL bHasRetBuffArg;
+ BOOL bHasValueTypeReturnValToMarshal;
+
+ BOOL bMarshalCallContext;
+ BOOL bMarshalReturnCallContext;
+
+#ifdef CALLDESCR_REGTYPEMAP
+ UINT64 uRegTypeMap;
+#endif
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ CorruptionSeverity severity;
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+};
+
+// Simple wrapper to go from C to C++.
+void MarshalAndCall_Wrapper2(MarshalAndCallArgs * pArgs)
+{
+ WRAPPER_NO_CONTRACT;
+
+ pArgs->pThis->MarshalAndCall_Wrapper(pArgs);
+}
+
+void CrossDomainChannel::MarshalAndCall_Wrapper(MarshalAndCallArgs * pArgs)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ // Set up a rip-cord that will immediately stop us reporting GC references we're keeping alive in the Marshaller that was passed
+ // to us in the event that this appdomain is unloaded underneath our feet. This avoids us keeping any server objects alive after
+ // their domain has unloaded.
+ ReportClonerRefsHolder sHolder(&pArgs->Marshaller);
+
+ Thread* pCurThread = GetThread();
+ AppDomain* pCurAppDomain = GetAppDomain();
+
+ // Now create a server domain GC frame for all non-arg server side GC references.
+ struct _gc {
+ OBJECTREF refReturnValue;
+ OBJECTREF refException;
+ OBJECTREF refExecutionContext;
+ } ServerGC;
+ ZeroMemory(&ServerGC, sizeof(ServerGC));
+ GCPROTECT_BEGIN(ServerGC);
+
+ // And a variable sized array and frame of marshaled arg GC references.
+ OBJECTREF *pServerArgArray = NULL;
+ pServerArgArray = (OBJECTREF *) _alloca(pArgs->dwNumObjectsMarshalled * sizeof(OBJECTREF));
+ ZeroMemory(pServerArgArray, sizeof(OBJECTREF) * pArgs->dwNumObjectsMarshalled);
+
+ TypeHandle* pServerArgTH = (TypeHandle *) _alloca(pArgs->dwNumObjectsMarshalled * sizeof(TypeHandle));
+ GCPROTECT_ARRAY_BEGIN(pServerArgArray[0], pArgs->dwNumObjectsMarshalled);
+
+ // Initialize server side info, such as method address etc
+ pArgs->bOptimizable = InitServerInfo();
+
+ if (!pArgs->bOptimizable)
+ goto LeaveDomain;
+
+ RenewLease();
+
+ // First clone objref arguments into the called domain
+ if (!RemotableMethodInfo::AreArgsBlittable(m_xret))
+ {
+ // When computing the method signature we need to take special care if the call is on a non-interface class with a
+ // generic instantiation (since in that case we may have a representative method with a non-concrete signature).
+ TypeHandle thDeclaringType;
+ CDC_DETERMINE_DECLARING_TYPE(m_pSrvMD, TypeHandle(GetServerObject()->GetTypeHandle()));
+ MetaSig mSrvSideSig(m_pSrvMD, thDeclaringType);
+ DWORD dwMarshalledArg = 0;
+ for (DWORD i = 0; i < pArgs->dwNumArgs; i++)
+ {
+ CorElementType cType = mSrvSideSig.NextArg();
+ if (pArgs->bMarshalledArgs[i] != TRUE)
+ {
+ // Make sure argument type is loaded
+ if (cType == ELEMENT_TYPE_VALUETYPE)
+ {
+ mSrvSideSig.GetLastTypeHandleThrowing();
+ }
+ continue;
+ }
+
+ TypeHandle argTh;
+ if (cType == ELEMENT_TYPE_BYREF)
+ mSrvSideSig.GetByRefType(&argTh);
+ else
+ argTh = mSrvSideSig.GetLastTypeHandleThrowing();
+
+ pServerArgTH[dwMarshalledArg] = argTh;
+ pServerArgArray[dwMarshalledArg] = pArgs->Marshaller.Clone(pArgs->pClientArgArray[dwMarshalledArg],
+ argTh,
+ m_pCliDomain,
+ pCurAppDomain,
+ pArgs->ClientGC.refExecutionContext);
+ dwMarshalledArg++;
+ }
+
+ // Make sure return type is loaded
+ TypeHandle thReturn = mSrvSideSig.GetRetTypeHandleThrowing();
+ _ASSERTE(!thReturn.IsNull());
+
+ if (pArgs->bHasValueTypeReturnValToMarshal)
+ {
+ // The return is a value type which could have GC ref fields. Allocate a boxed value type so that the
+ // return value goes into that. During return from call we'll clone it and copy it onto the stack
+ ServerGC.refReturnValue = thReturn.AsMethodTable()->Allocate();
+ }
+ }
+
+ // Then clone the call context if any
+ if (pArgs->bMarshalCallContext)
+ {
+#ifdef _DEBUG
+ LOG((LF_REMOTING, LL_INFO100, "MarshalAndCall. Marshalling call context\n"));
+#endif
+ ServerGC.refExecutionContext = pArgs->Marshaller.Clone(pArgs->ClientGC.refExecutionContext,
+ m_pCliDomain,
+ pCurAppDomain,
+ pArgs->ClientGC.refExecutionContext);
+ THREADBASEREF ref = (THREADBASEREF) pCurThread->GetExposedObjectRaw();
+ _ASSERTE(ref != NULL);
+
+ ref->SetExecutionContext(ServerGC.refExecutionContext);
+ }
+ else if (pCurThread->IsExposedObjectSet())
+ {
+ THREADBASEREF ref = (THREADBASEREF) pCurThread->GetExposedObjectRaw();
+ _ASSERTE(ref != NULL);
+
+ ref->SetExecutionContext(NULL);
+ }
+
+#ifdef PROFILING_SUPPORTED
+ // If we're profiling, notify the profiler that we're about to invoke the remoting target
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackRemoting());
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->RemotingServerInvocationStarted();
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ {
+ GCX_COOP();
+ if (!RemotableMethodInfo::AreArgsBlittable(m_xret))
+ {
+ // Next place arguments into the destination array
+ // No GC should occur between now and call dispatch
+ for (DWORD i = 0 ; i < pArgs->dwNumObjectsMarshalled; i++)
+ {
+ BOOL bNeedUnbox = pArgs->pArgAttribs[i] & ARG_NEEDS_UNBOX;
+ BOOL bGoesInEDX = pArgs->pArgAttribs[i] & ARG_GOES_IN_EDX;
+ BOOL bIsByRef = pArgs->pArgAttribs[i] & ARG_IS_BYREF;
+ DWORD dwOffset = pArgs->pArgAttribs[i] & ARG_OFFSET_MASK;
+
+ SIZE_T *pDest = NULL;
+
+#if defined(_TARGET_X86_)
+ if (bGoesInEDX)
+ {
+ // This has to be EDX for this platform.
+ pDest = pArgs->pRegArgs;
+ }
+ else
+ {
+ pDest = (SIZE_T *)((BYTE *)(pArgs->pStackArgs) + dwOffset);
+ }
+#elif defined(CALLDESCR_ARGREGS)
+ // To help deal with the fact that a single argument can span both registers and stack
+ // we've ensured that the register and stack buffers are contiguous and encoded all offsets
+ // from the beginning of the register buffer.
+ pDest = (SIZE_T *)((BYTE *)(pArgs->pRegArgs) + dwOffset);
+#else
+ pDest = (SIZE_T *)((BYTE *)(pArgs->pStackArgs) + dwOffset);
+#endif
+
+ if (bNeedUnbox && !bIsByRef)
+ {
+ pServerArgTH[i].GetMethodTable()->UnBoxIntoUnchecked(pDest, pServerArgArray[i]);
+ }
+ else if (bIsByRef)
+ {
+ if (bNeedUnbox)
+ {
+ // We don't use the fast path for byref nullables, so UnBox() can be used
+ *pDest = (SIZE_T)pServerArgArray[i]->UnBox();
+ }
+ else
+ {
+ // Point to the OBJECTREF
+ *pDest = (SIZE_T)&pServerArgArray[i];
+ }
+ }
+ else
+ {
+ *pDest = (SIZE_T)OBJECTREFToObject(pServerArgArray[i]);
+ }
+ }
+ }
+
+ // Get the 'this' object
+ OBJECTREF srvObject = GetServerObject();
+ LPVOID pvRetBuff = NULL;
+
+ FrameWithCookie<ProtectValueClassFrame>* pProtectValueClassFrame = NULL;
+ ValueClassInfo* pValueClasses = NULL;
+
+ if (pArgs->bHasRetBuffArg)
+ {
+ // Need some sort of check here that retTH has been initialized?
+ MethodTable* pMT = pArgs->retTh.GetMethodTable();
+ _ASSERTE_MSG(pMT != NULL, "GetRetType failed?");
+ if (pMT->IsStructRequiringStackAllocRetBuf())
+ {
+ SIZE_T sz = pMT->GetNumInstanceFieldBytes();
+ pvRetBuff = _alloca(sz);
+ memset(pvRetBuff, 0, sz);
+ pValueClasses = new (_alloca(sizeof(ValueClassInfo))) ValueClassInfo(pvRetBuff, pMT, pValueClasses);
+ }
+ else
+ {
+ // We don't use the fast path for values that return nullables, so UnBox() can be used
+ pvRetBuff = (PVOID)ServerGC.refReturnValue->UnBox();
+ }
+ }
+#if defined(_TARGET_X86_)
+ // Check if EDX should point to a return buffer (either stack- or heap-allocated).
+ if (pArgs->bHasValueTypeReturnValToMarshal && pArgs->bHasRetBuffArg)
+ {
+ *(pArgs->pRegArgs) = (SIZE_T)pvRetBuff;
+ }
+ (pArgs->pRegArgs)[1] = (SIZE_T)OBJECTREFToObject(srvObject);
+#elif defined(CALLDESCR_ARGREGS)
+ // On ARM the this pointer goes in r0 and any return buffer argument pointer in r1.
+ pArgs->pRegArgs[0] = (SIZE_T)OBJECTREFToObject(srvObject);
+ if (pArgs->bHasRetBuffArg)
+ {
+ pArgs->pRegArgs[1] = (SIZE_T)pvRetBuff;
+ }
+#else // CALLDESCR_ARGREGS
+
+ if (pArgs->bHasRetBuffArg)
+ {
+ (pArgs->pStackArgs)[0] = (SIZE_T)OBJECTREFToObject(srvObject);
+ (pArgs->pStackArgs)[1] = (SIZE_T)pvRetBuff;
+ }
+ else
+ {
+ (pArgs->pStackArgs)[0] = (SIZE_T)OBJECTREFToObject(srvObject);
+ }
+
+#endif // CALLDESCR_ARGREGS
+
+ CallDescrData callDescrData;
+
+
+ callDescrData.pSrc = pArgs->pStackArgs;
+ callDescrData.numStackSlots = m_numStackSlotsToCopy,
+#ifdef CALLDESCR_ARGREGS
+ callDescrData.pArgumentRegisters = (ArgumentRegisters *)pArgs->pRegArgs;
+#endif
+#ifdef CALLDESCR_FPARGREGS
+ callDescrData.pFloatArgumentRegisters = pArgs->pFloatArgumentRegisters;
+#endif
+#ifdef CALLDESCR_REGTYPEMAP
+ callDescrData.dwRegTypeMap = pArgs->uRegTypeMap;
+#endif
+ callDescrData.fpReturnSize = GetFPReturnSize();
+ callDescrData.pTarget = m_pTargetAddress;
+
+ if (pValueClasses != NULL)
+ {
+ pProtectValueClassFrame = new (_alloca (sizeof (FrameWithCookie<ProtectValueClassFrame>)))
+ FrameWithCookie<ProtectValueClassFrame>(pCurThread, pValueClasses);
+ }
+
+ DispatchCall(&callDescrData,
+ &ServerGC.refException,
+ pArgs->GetCtxTransitionFrame()
+ COMMA_CORRUPTING_EXCEPTIONS_ONLY(&(pArgs->severity))
+ );
+
+ // If the return value is a GC ref, store it in a protected place
+ if (ServerGC.refException != NULL)
+ {
+ // Return value is invalid if there was exception thrown
+ }
+ else
+ if (RemotableMethodInfo::IsReturnGCRef(m_xret))
+ {
+ ServerGC.refReturnValue = ObjectToOBJECTREF(*(Object **)(&callDescrData.returnValue));
+ pArgs->bHasObjRefReturnVal = TRUE;
+ }
+ else
+ if (pArgs->bHasValueTypeReturnValToMarshal)
+ {
+ if (!pArgs->bHasRetBuffArg)
+ {
+ //
+ // The value type return value is returned by value in this case.
+ // We have to copy it back into our server object.
+ //
+ // We don't use the fast path for values that return nullables, so UnBox() can be used
+ //
+ CopyValueClass(ServerGC.refReturnValue->UnBox(), &callDescrData.returnValue, ServerGC.refReturnValue->GetMethodTable(), pCurAppDomain);
+ }
+ else if (pValueClasses != NULL)
+ {
+ // We passed a stack allocated ret buff. Copy back into the allocated server object.
+ // We don't use the fast path for values that return nullables, so UnBox() can be used
+ CopyValueClass(ServerGC.refReturnValue->UnBox(), pvRetBuff, ServerGC.refReturnValue->GetMethodTable(), pCurAppDomain);
+ }
+ // In all other cases, the return value should be in the server object already.
+ }
+ else
+ {
+ memcpyNoGCRefs(m_pFrame->GetReturnValuePtr(), &callDescrData.returnValue, sizeof(callDescrData.returnValue));
+ }
+
+ if (pProtectValueClassFrame != NULL)
+ pProtectValueClassFrame->Pop(pCurThread);
+ }
+
+#ifdef PROFILING_SUPPORTED
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackRemoting());
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->RemotingServerInvocationReturned();
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ if (pCurThread->IsExposedObjectSet())
+ {
+ THREADBASEREF ref = (THREADBASEREF) pCurThread->GetExposedObjectRaw();
+ _ASSERTE(ref != NULL);
+
+ EXECUTIONCONTEXTREF refExecCtx = (EXECUTIONCONTEXTREF) ref->GetExecutionContext();
+ if (refExecCtx != NULL)
+ {
+ LOGICALCALLCONTEXTREF refLogCallCtx = refExecCtx->GetLogicalCallContext();
+ if (pArgs->bMarshalCallContext ||
+ (refLogCallCtx != NULL && refLogCallCtx->ContainsNonSecurityDataForSerialization()))
+ {
+ ServerGC.refExecutionContext = ref->GetExecutionContext();
+ pArgs->bMarshalReturnCallContext = TRUE;
+#ifdef _DEBUG
+ LOG((LF_REMOTING, LL_INFO100, "MarshalAndCall. Marshalling return call context\n"));
+#endif
+ ResetPrincipal();
+ EXECUTIONCONTEXTREF ecref = (EXECUTIONCONTEXTREF)pArgs->Marshaller.Clone(ServerGC.refExecutionContext,
+ pCurAppDomain,
+ m_pCliDomain,
+ ServerGC.refExecutionContext);
+ if (pArgs->ClientGC.refExecutionContext != NULL)
+ {
+ ((EXECUTIONCONTEXTREF)pArgs->ClientGC.refExecutionContext)->SetLogicalCallContext(ecref->GetLogicalCallContext());
+ }
+ else
+ {
+ pArgs->ClientGC.refExecutionContext = (OBJECTREF)ecref;
+ }
+ }
+ }
+ }
+
+
+ if (ServerGC.refException != NULL)
+ {
+#ifdef _DEBUG
+ LOG((LF_REMOTING, LL_INFO100, "MarshalAndCall. Exception thrown ! Marshalling exception. \n"));
+#endif
+
+ // Save Watson buckets before the exception object is changed
+ if (GetThread() != NULL)
+ {
+ // Ensure that we have the buckets for the exception in question.
+ // For preallocated exceptions, we capture the buckets in the
+ // UE WatsonBucket Tracker in AppDomainTransitionExceptionFilter.
+ //
+ // When the exception is reraised in the returning AppDomain,
+ // StackTraceInfo::AppendElement will copy over the buckets
+ // to the EHtracker corresponding to the raised exception.
+ if (!CLRException::IsPreallocatedExceptionObject(ServerGC.refException))
+ {
+ // For non-preallocated exception objects, the throwable
+ // should already have the buckets in it, unless it was raised in VM native code
+ // and reached here before CLR's managed code exception handler could see it.
+ if(!((EXCEPTIONREF)ServerGC.refException)->AreWatsonBucketsPresent())
+ {
+ LOG((LF_EH, LL_INFO1000, "MarshalAndCall - Regular exception object received (%p) does not contain watson buckets.\n",
+ OBJECTREFToObject(ServerGC.refException)));
+ }
+ }
+ }
+
+ pArgs->ClientGC.refException = pArgs->Marshaller.Clone(ServerGC.refException,
+ pCurAppDomain,
+ m_pCliDomain,
+ ServerGC.refExecutionContext);
+ goto LeaveDomain;
+ }
+
+ if (!RemotableMethodInfo::IsReturnBlittable(m_xret))
+ {
+ LOG((LF_REMOTING, LL_INFO100, "MarshalAndCall. Marshalling return object\n"));
+ // Need to marshal the return object
+
+ pArgs->ClientGC.refReturnValue = pArgs->Marshaller.Clone(ServerGC.refReturnValue,
+ pArgs->retTh,
+ pCurAppDomain,
+ m_pCliDomain,
+ ServerGC.refExecutionContext);
+
+ if (pArgs->bHasValueTypeReturnValToMarshal)
+ {
+ // Need to copy contents from temp return buffer to the original return buffer
+ void *pDest;
+ if (!pArgs->bHasRetBuffArg)
+ {
+ pDest = m_pFrame->GetReturnValuePtr();
+ }
+ else
+ {
+ pDest = *(void **)(m_pFrame->GetTransitionBlock() + pArgs->argit->GetRetBuffArgOffset());
+ }
+ // We don't use the fast path for values that return nullables, so UnBox() can be used
+ CopyValueClass(pDest, pArgs->ClientGC.refReturnValue->UnBox(), pArgs->ClientGC.refReturnValue->GetMethodTable(), m_pCliDomain);
+ }
+ }
+ else if (pArgs->bHasObjRefReturnVal)
+ {
+ // Must be a domain agile GC ref. We can just copy the reference into the client GC frame.
+ pArgs->ClientGC.refReturnValue = ServerGC.refReturnValue;
+ }
+
+ // Marshal any by-ref args into calling domain
+ if (pArgs->bHasByRefArgsToMarshal)
+ {
+#ifdef _DEBUG
+ LOG((LF_REMOTING, LL_INFO100, "MarshalAndCall. Marshalling by-ref args\n"));
+#endif
+ int iMarshalledArg = -1;
+ // Look for by ref args
+ for (DWORD i = 0; i < pArgs->dwNumArgs; i++)
+ {
+ if (pArgs->bMarshalledArgs[i] != TRUE)
+ continue;
+
+ iMarshalledArg++;
+
+ BOOL bNeedUnbox = pArgs->pArgAttribs[iMarshalledArg] & ARG_NEEDS_UNBOX;
+ BOOL bIsByRef = pArgs->pArgAttribs[iMarshalledArg] & ARG_IS_BYREF;
+
+ if (!bIsByRef)
+ continue;
+
+ TypeHandle argTh = pArgs->pThByRefs[iMarshalledArg];
+ int offset = pArgs->pByRefArgAttribs[iMarshalledArg];
+ OBJECTREF refReturn = pServerArgArray[iMarshalledArg];
+ GCPROTECT_BEGIN(refReturn);
+
+ refReturn = pArgs->Marshaller.Clone(refReturn,
+ argTh,
+ pCurAppDomain,
+ m_pCliDomain,
+ ServerGC.refExecutionContext);
+ if (bNeedUnbox)
+ {
+ // We don't use the fast path for byref nullables, so UnBox() can be used
+ BYTE *pTargetAddress = *((BYTE **)(m_pFrame->GetTransitionBlock() + offset));
+ CopyValueClass(pTargetAddress, refReturn->UnBox(), refReturn->GetMethodTable(), m_pCliDomain);
+ }
+ else
+ {
+ SetObjectReference(*((OBJECTREF **)(m_pFrame->GetTransitionBlock() + offset)), refReturn, m_pCliDomain);
+ }
+ GCPROTECT_END();
+ }
+ }
+
+ LeaveDomain:;
+
+ GCPROTECT_END(); // pServerArgArray
+ GCPROTECT_END(); // ServerGC
+}
+
+
+// Arguments need to be marshalled before dispatch. We walk thru each argument,
+// inspect its type, make a list of objects that need to be marshalled, cross over to the new domain,
+// marshal the objects and dispatch the call. Upon return, we marshal the return object if any and
+// by ref objects if any. Call contexts flows either way
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#endif
+BOOL
+CrossDomainChannel::MarshalAndCall()
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ }
+ CONTRACTL_END
+
+ MarshalAndCallArgs args;
+
+ args.bHasByRefArgsToMarshal = FALSE;
+
+ args.bHasObjRefReturnVal = FALSE;
+ args.bHasRetBuffArg = FALSE;
+ args.bHasValueTypeReturnValToMarshal = FALSE;
+
+ DWORD dwNumArgs = 0;
+ DWORD dwNumObjectsMarshalled = 0;
+
+ DWORD *pArgAttribs = NULL;
+ BOOL *bMarshalledArgs = NULL;
+ int *pByRefArgAttribs = NULL;
+ TypeHandle *pThByRefs = NULL;
+
+ Thread *pCurThread = GetThread();
+
+#ifdef _DEBUG
+ LPCUTF8 pszMethodName;
+ pszMethodName = m_pCliMD->GetName();
+ LOG((LF_REMOTING, LL_INFO100, "MarshalAndCall. Marshalling arguments to method %s\n", pszMethodName));
+#endif // _DEBUG
+
+ // Collect all client domain GC references together in a single GC frame.
+ // refReturnValue contains the returned object when its a value type and needs marshalling
+ ZeroMemory(&args.ClientGC, sizeof(args.ClientGC));
+ GCPROTECT_BEGIN(args.ClientGC);
+
+ // When computing the method signature we need to take special care if the call is on a non-interface class with a
+ // generic instantiation (since in that case we may have a representative method with a non-concrete signature).
+ TypeHandle thDeclaringType;
+ CDC_DETERMINE_DECLARING_TYPE(m_pCliMD, TypeHandle(CTPMethodTable::GetMethodTableBeingProxied(m_pFrame->GetThis())));
+ MetaSig mSig(m_pCliMD, thDeclaringType);
+ ArgIterator argit(&mSig);
+ int ofs;
+
+ // NumFixedArgs() doesn't count the "this" object, but SizeOfFrameArgumentArray() does.
+ dwNumArgs = mSig.NumFixedArgs();
+ m_numStackSlotsToCopy = argit.SizeOfFrameArgumentArray() / sizeof(SIZE_T);
+
+ // Ensure none of the following _alloca's are subject to integer overflow problems.
+ DWORD dwMaxEntries = dwNumArgs > m_numStackSlotsToCopy ? dwNumArgs : m_numStackSlotsToCopy;
+ DWORD dwResult;
+ if (!ClrSafeInt<DWORD>::multiply(dwMaxEntries, sizeof(SIZE_T), dwResult))
+ COMPlusThrowOM();
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:26000) // "Suppress PREFast warning about integer overflow (we're doing an umbrella check)"
+#endif
+
+ args.bHasRetBuffArg = argit.HasRetBuffArg();
+
+#ifdef _TARGET_X86_
+ BOOL bArgumentRegisterUsed = FALSE;
+ if (args.bHasRetBuffArg)
+ {
+ bArgumentRegisterUsed = TRUE;
+ }
+#endif // _TARGET_X86_
+
+ // pArgAttribs tell where the marshalled objects should go, where they need unboxing etc
+ pArgAttribs = (DWORD*) _alloca(dwNumArgs * sizeof(DWORD));
+ ZeroMemory(pArgAttribs, sizeof(DWORD) * dwNumArgs);
+ // pThByRefs has the typehandles of the by-ref args
+ pThByRefs = (TypeHandle *)_alloca(dwNumArgs * sizeof(TypeHandle));
+ ZeroMemory(pThByRefs, sizeof(TypeHandle) *dwNumArgs);
+ // pByRefArgAttribs tell where the by-ref args should go, after the call
+ pByRefArgAttribs = (int*) _alloca(dwNumArgs * sizeof(int));
+ ZeroMemory(pByRefArgAttribs, sizeof(int) * dwNumArgs);
+ // bMarshalledArgs is a bunch of flags that tell which args were marshalled
+ bMarshalledArgs = (BOOL*) _alloca(dwNumArgs * sizeof(BOOL));
+ ZeroMemory(bMarshalledArgs, sizeof(BOOL) * dwNumArgs);
+
+ // pArgArray contains marshalled objects on the client side
+ OBJECTREF *pClientArgArray = NULL;
+ pClientArgArray = (OBJECTREF *) _alloca(dwNumArgs * sizeof(OBJECTREF));
+ ZeroMemory(pClientArgArray, sizeof(OBJECTREF) * dwNumArgs);
+ GCPROTECT_ARRAY_BEGIN(pClientArgArray[0], dwNumArgs);
+
+ // pStackArgs will finally contain the arguments that'll be fed to Dispatch call. The Marshalled objects
+ // are not placed directly into pStackArgs because its not possible to GCPROTECT an array that can contain
+ // both GC refs and primitives.
+ DWORD cbStackArgs = m_numStackSlotsToCopy * sizeof (SIZE_T);
+#ifdef CALLDESCR_ARGREGS
+ // Allocate enough space to put ArgumentRegisters at the front of the buffer so we can ensure
+ // register and stack arguments are stored contiguously and simply the case of unboxing a value type that
+ // spans registers and the stack.
+ cbStackArgs += sizeof(ArgumentRegisters);
+#endif
+ SIZE_T *pStackArgs = (SIZE_T*)_alloca(cbStackArgs);
+ ZeroMemory(pStackArgs, cbStackArgs);
+#ifdef CALLDESCR_ARGREGS
+ SIZE_T *pRegArgs = pStackArgs;
+ pStackArgs += sizeof(ArgumentRegisters) / sizeof(SIZE_T);
+#endif
+#ifdef CALLDESCR_FPARGREGS
+ FloatArgumentRegisters *pFloatArgumentRegisters = NULL;
+#endif
+
+#if defined(CALLDESCR_REGTYPEMAP)
+ UINT64 uRegTypeMap = 0;
+ BYTE* pMap = (BYTE*)&uRegTypeMap;
+#endif
+
+ TADDR pTransitionBlock = m_pFrame->GetTransitionBlock();
+
+ for (int argNum = 0;
+ TransitionBlock::InvalidOffset != (ofs = argit.GetNextOffset());
+ argNum++
+ )
+ {
+ DWORD dwOffsetOfArg = 0;
+
+#if defined(CALLDESCR_REGTYPEMAP)
+ int regArgNum = TransitionBlock::GetArgumentIndexFromOffset(ofs);
+
+ FillInRegTypeMap(ofs, argit.GetArgType(), pMap);
+#endif // defined(CALLDESCR_REGTYPEMAP)
+
+ SIZE_T *pDestToCopy = NULL;
+
+#if defined(_TARGET_ARM_)
+
+ // On ARM there are ranges of offset that can be returned from ArgIterator::GetNextOffset() (where R
+ // == TransitionBlock::GetOffsetOfArgumentRegisters() and S == sizeof(TransitionBlock)):
+ //
+ // * ofs < 0 : arg is in a floating point register
+ // * ofs >= R && ofs < S : arg is in a general register
+ // * ofs >= S : arg is on the stack at offset (ofs - X)
+ //
+ // Arguments can be split between general registers and the stack on ARM and as a result both
+ // FramedMethodFrame and this method ensure the storage for register and stack locations is
+ // contiguous.
+ int iInitialRegOffset = TransitionBlock::GetOffsetOfArgumentRegisters();
+ int iInitialStackOffset = sizeof(TransitionBlock);
+ _ASSERTE(iInitialStackOffset == (iInitialRegOffset + sizeof(ArgumentRegisters)));
+ if (ofs < 0)
+ {
+ // Floating point register case. Since these registers can never hold a GC reference we can just
+ // pass through a pointer to the spilled FP reg area in the frame. But we don't do this unless we
+ // see at least one FP arg: passing NULL for pFloatArgumentRegisters enables an optimization in
+ // the call thunk.
+ if (pFloatArgumentRegisters == NULL)
+ pFloatArgumentRegisters = (FloatArgumentRegisters*) (pTransitionBlock + TransitionBlock::GetOffsetOfFloatArgumentRegisters());
+
+ // No arg to copy in this case.
+ continue;
+ }
+
+ _ASSERTE(ofs >= iInitialRegOffset);
+
+ // We've ensured our registers and stack locations are contiguous so treat both types of arguments
+ // identically (i.e. compute a destination offset from the base of the register save area and it will
+ // work for arguments that span from registers to stack or live entirely on the stack).
+ dwOffsetOfArg = ofs - TransitionBlock::GetOffsetOfArgumentRegisters();
+ pDestToCopy = (SIZE_T*)((BYTE *)pRegArgs + dwOffsetOfArg);
+
+#else // _TARGET_ARM_
+
+ dwOffsetOfArg = ofs - TransitionBlock::GetOffsetOfArgs();
+
+#ifdef _TARGET_X86_
+ if (!bArgumentRegisterUsed && gElementTypeInfo[argit.GetArgType()].m_enregister)
+ {
+ pDestToCopy = pRegArgs;
+ bArgumentRegisterUsed = TRUE;
+ }
+ else
+#endif // _TARGET_X86_
+ {
+ _ASSERTE(dwOffsetOfArg < (m_numStackSlotsToCopy * sizeof(SIZE_T)));
+ pDestToCopy = (SIZE_T*)((BYTE *)pStackArgs + dwOffsetOfArg);
+ }
+
+#endif // _TARGET_ARM_
+
+ CorElementType origTyp = argit.GetArgType();
+
+ // Get the signature type of the argument (For ex. enum will be E_T_VT, not E_T_I4 etc)
+ SigPointer sp = mSig.GetArgProps();
+ CorElementType typ;
+ IfFailThrow(sp.GetElemType(&typ));
+
+ if (typ == ELEMENT_TYPE_VAR ||
+ typ == ELEMENT_TYPE_MVAR ||
+ typ == ELEMENT_TYPE_GENERICINST)
+ {
+ typ = origTyp;
+ }
+
+ switch (typ)
+ {
+ case ELEMENT_TYPE_BOOLEAN:
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_CHAR:
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+#if !defined(COM_STUBS_SEPARATE_FP_LOCATIONS)
+ case ELEMENT_TYPE_R4:
+#endif
+
+#if defined(_TARGET_X86_) || defined(_TARGET_ARM_)
+ *(pDestToCopy) = *((SIZE_T*) (pTransitionBlock + ofs));
+#elif defined(_WIN64)
+ switch (GetSizeForCorElementType((CorElementType)typ))
+ {
+ case 1:
+ *(BYTE*)(pDestToCopy) = *(BYTE*)(pTransitionBlock + ofs);
+ break;
+
+ case 2:
+ *(USHORT*)(pDestToCopy) = *(USHORT*)(pTransitionBlock + ofs);
+ break;
+
+ case 4:
+ *(UINT*)(pDestToCopy) = *(UINT*)(pTransitionBlock + ofs);
+ break;
+
+ case 8:
+ *(SIZE_T*)(pDestToCopy) = *(SIZE_T*)(pTransitionBlock + ofs);
+ break;
+
+ default:
+ _ASSERTE(!"MarshalAndCall() - unexpected size");
+ }
+#else // !defined(_WIN64)
+ PORTABILITY_ASSERT("MarshalAndCall() - NYI on this platform");
+#endif // !defined(_WIN64)
+ break;
+
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_U:
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_FNPTR:
+
+ *((SIZE_T*)((BYTE *)pDestToCopy)) = *((SIZE_T*)(pTransitionBlock + ofs));
+ break;
+
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+#if !defined(COM_STUBS_SEPARATE_FP_LOCATIONS)
+ case ELEMENT_TYPE_R8:
+#endif
+
+ *((INT64*)((BYTE *)pDestToCopy)) = *((INT64 *)(pTransitionBlock + ofs));
+ break;
+
+#if defined(COM_STUBS_SEPARATE_FP_LOCATIONS)
+ case ELEMENT_TYPE_R4:
+
+ if (regArgNum < NUM_ARGUMENT_REGISTERS)
+ {
+ *(ARG_SLOT*)pDestToCopy = FPSpillToR4( (LPVOID)(pTransitionBlock + m_pFrame->GetFPArgOffset(regArgNum)) );
+ }
+ else
+ {
+ *(UINT*)(pDestToCopy) = *(UINT*)(pTransitionBlock + ofs);
+ }
+ break;
+
+ case ELEMENT_TYPE_R8:
+
+ if (regArgNum < NUM_ARGUMENT_REGISTERS)
+ {
+ *(ARG_SLOT*)pDestToCopy = FPSpillToR8( (LPVOID)(pTransitionBlock + m_pFrame->GetFPArgOffset(regArgNum)) );
+ }
+ else
+ {
+ *(SIZE_T*)(pDestToCopy) = *(SIZE_T*)(pTransitionBlock + ofs);
+ }
+ break;
+#endif // defined(COM_STUBS_SEPARATE_FP_LOCATIONS)
+
+ case ELEMENT_TYPE_BYREF:
+ {
+ // Check if this is a by-ref primitive
+ OBJECTREF refTmpBox = NULL;
+ TypeHandle ty = TypeHandle();
+ CorElementType brType = mSig.GetByRefType(&ty);
+ if (CorIsPrimitiveType(brType) || ty.IsValueType())
+ {
+
+ // Needs marshalling
+ MethodTable *pMT = NULL;
+ if (CorIsPrimitiveType(brType))
+ pMT = MscorlibBinder::GetElementType(brType);
+ else
+ pMT = ty.GetMethodTable();
+ refTmpBox = pMT->Box(*((SIZE_T**)(pTransitionBlock + ofs)));
+ pArgAttribs[dwNumObjectsMarshalled] |= ARG_NEEDS_UNBOX;
+ }
+ else
+ {
+ OBJECTREF *refRefObj = *((OBJECTREF **)(pTransitionBlock + ofs));
+ refTmpBox = (refRefObj == NULL ? NULL : *refRefObj);
+ }
+
+ pByRefArgAttribs[dwNumObjectsMarshalled] = ofs;
+ pThByRefs[dwNumObjectsMarshalled] = ty;
+
+ // we should have stopped nullables before we got here in DoStaticAnalysis
+ _ASSERTE(ty.IsNull() || !Nullable::IsNullableType(ty));
+ pArgAttribs[dwNumObjectsMarshalled] |= ARG_IS_BYREF;
+
+ args.bHasByRefArgsToMarshal = TRUE;
+
+ pClientArgArray[dwNumObjectsMarshalled] = refTmpBox;
+ bMarshalledArgs[argNum] = TRUE;
+
+#if defined(_TARGET_X86_)
+ if (pDestToCopy == pRegArgs)
+ {
+ pArgAttribs[dwNumObjectsMarshalled] |= ARG_GOES_IN_EDX; // Indicate that this goes in EDX
+ }
+ else
+#endif // _TARGET_X86_
+ {
+ // @TODO - Use QWORD for attribs
+ _ASSERTE(dwOffsetOfArg < ARG_OFFSET_MASK);
+ pArgAttribs[dwNumObjectsMarshalled] |= dwOffsetOfArg;
+ }
+ dwNumObjectsMarshalled++;
+ }
+ break;
+
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+#if defined(COM_STUBS_SEPARATE_FP_LOCATIONS)
+ if (regArgNum < NUM_ARGUMENT_REGISTERS)
+ {
+
+ // We have to copy the floating point registers from a different stack location to the portion of
+ // the stack used to save the general registers.
+ if (origTyp == ELEMENT_TYPE_R4)
+ {
+ LPVOID pDest = (LPVOID)(pTransitionBlock + ofs);
+ *(ARG_SLOT*)pDest = FPSpillToR4( (LPVOID)(pTransitionBlock + m_pFrame->GetFPArgOffset(regArgNum)) );
+ }
+ else if (origTyp == ELEMENT_TYPE_R8)
+ {
+ LPVOID pDest = (LPVOID)(pTransitionBlock + ofs);
+ *(ARG_SLOT*)pDest = FPSpillToR8( (LPVOID)(pTransitionBlock + m_pFrame->GetFPArgOffset(regArgNum)) );
+ }
+ }
+#endif // defined(COM_STUBS_SEPARATE_FP_LOCATIONS)
+
+ TypeHandle th = mSig.GetLastTypeHandleThrowing();
+
+#ifdef _DEBUG
+ {
+ DefineFullyQualifiedNameForClass()
+ LPCUTF8 szTypeName = GetFullyQualifiedNameForClassNestedAware(th.GetMethodTable());
+ LOG((LF_REMOTING, LL_INFO100, "MarshalAndCall. Boxing a value type argument of type %s.\n", &szTypeName[0]));
+ }
+#endif // _DEBUG
+
+ OBJECTREF refTmpBox;
+#if defined(ENREGISTERED_PARAMTYPE_MAXSIZE)
+ if (argit.IsArgPassedByRef())
+ {
+ refTmpBox = th.GetMethodTable()->Box(*(LPVOID*)(pTransitionBlock + ofs));
+
+ // we should have stopped nullables before we got here in DoStaticAnalysis
+ _ASSERTE(!Nullable::IsNullableType(th));
+ pArgAttribs[dwNumObjectsMarshalled] |= ARG_IS_BYREF;
+
+ pByRefArgAttribs[dwNumObjectsMarshalled] = ofs;
+ pThByRefs[dwNumObjectsMarshalled] = th;
+ }
+ else
+#endif // defined(ENREGISTERED_PARAMTYPE_MAXSIZE)
+ {
+ refTmpBox = th.GetMethodTable()->Box((void *)(pTransitionBlock + ofs));
+ }
+ pClientArgArray[dwNumObjectsMarshalled] = refTmpBox;
+ bMarshalledArgs[argNum] = TRUE;
+
+#if defined(_TARGET_X86_)
+ if (pDestToCopy == pRegArgs)
+ {
+ pArgAttribs[dwNumObjectsMarshalled] |= ARG_GOES_IN_EDX; // Indicate that this goes in EDX
+ }
+ else
+#endif // _TARGET_X86_
+ {
+ // @TODO - Use QWORD for attribs
+ _ASSERTE(dwOffsetOfArg < ARG_OFFSET_MASK);
+ pArgAttribs[dwNumObjectsMarshalled] |= dwOffsetOfArg;
+ }
+ pArgAttribs[dwNumObjectsMarshalled] |= ARG_NEEDS_UNBOX; // Indicate that an unboxing is required
+ dwNumObjectsMarshalled++;
+ }
+ break;
+
+ case ELEMENT_TYPE_SZARRAY: // Single Dim
+ case ELEMENT_TYPE_ARRAY: // General Array
+ case ELEMENT_TYPE_CLASS: // Class
+ case ELEMENT_TYPE_OBJECT:
+ case ELEMENT_TYPE_STRING: // System.String
+ case ELEMENT_TYPE_VAR:
+ {
+ OBJECTREF *refRefObj = (OBJECTREF *)(pTransitionBlock + ofs);
+ // The frame does protect this object, so mark it as such to avoid asserts
+ INDEBUG(Thread::ObjectRefNew(refRefObj);)
+ INDEBUG(Thread::ObjectRefProtected(refRefObj);)
+
+ pClientArgArray[dwNumObjectsMarshalled] = *refRefObj;
+ bMarshalledArgs[argNum] = TRUE;
+
+#ifdef _TARGET_X86_
+ if (pDestToCopy == pRegArgs)
+ {
+ pArgAttribs[dwNumObjectsMarshalled] |= ARG_GOES_IN_EDX; // Indicate that this goes in EDX
+ }
+ else
+#endif // _TARGET_X86_
+ {
+ // @TODO - Use QWORD for attribs
+ _ASSERTE(dwOffsetOfArg < ARG_OFFSET_MASK);
+ pArgAttribs[dwNumObjectsMarshalled] |= dwOffsetOfArg;
+ }
+ dwNumObjectsMarshalled++;
+ }
+ break;
+
+ default:
+ _ASSERTE(!"Unknown Element type in MarshalAndCall" );
+ }
+ }
+
+ if (!RemotableMethodInfo::IsReturnBlittable(m_xret))
+ {
+ CorElementType retType = mSig.GetReturnType();
+ if (retType == ELEMENT_TYPE_VALUETYPE)
+ {
+ args.retTh = mSig.GetRetTypeHandleThrowing();
+ args.bHasValueTypeReturnValToMarshal = TRUE;
+ }
+ else
+ {
+ args.retTh = mSig.GetRetTypeHandleThrowing();
+ }
+ }
+
+ // Check for any call context
+ BOOL bMarshalCallContext = FALSE;
+ args.bMarshalReturnCallContext = FALSE;
+ if (pCurThread->IsExposedObjectSet())
+ {
+ THREADBASEREF ref = (THREADBASEREF) pCurThread->GetExposedObjectRaw();
+ _ASSERTE(ref != NULL);
+
+ EXECUTIONCONTEXTREF refExecCtx = (EXECUTIONCONTEXTREF) ref->GetExecutionContext();
+ if (refExecCtx != NULL)
+ {
+ args.ClientGC.refExecutionContext = refExecCtx;
+ args.ClientGC.refPrincipal = ReadPrincipal();
+
+ LOGICALCALLCONTEXTREF refLogCallCtx = refExecCtx->GetLogicalCallContext();
+ if (refLogCallCtx != NULL)
+ {
+ if (refLogCallCtx->ContainsDataForSerialization())
+ {
+ bMarshalCallContext = TRUE;
+ }
+ }
+ }
+ }
+
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+ // Make the Cross-AppDomain call
+ {
+ args.pThis = this;
+
+ args.bOptimizable = TRUE;
+
+ args.mSig = &mSig;
+ args.argit = &argit;
+
+ args.dwNumArgs = dwNumArgs;
+ args.pStackArgs = pStackArgs;
+#ifdef CALLDESCR_ARGREGS
+ args.pRegArgs = pRegArgs;
+#endif
+#ifdef CALLDESCR_FPARGREGS
+ args.pFloatArgumentRegisters = pFloatArgumentRegisters;
+#endif
+ args.pArgAttribs = pArgAttribs;
+
+ args.dwNumObjectsMarshalled = dwNumObjectsMarshalled;
+ args.bMarshalledArgs = bMarshalledArgs;
+ args.pClientArgArray = pClientArgArray;
+
+ args.pByRefArgAttribs = pByRefArgAttribs;
+ args.pThByRefs = pThByRefs;
+
+ args.bMarshalCallContext = bMarshalCallContext;
+
+#ifdef CALLDESCR_REGTYPEMAP
+ args.uRegTypeMap = *(UINT64*)pMap;
+#endif
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // By default assume that exception thrown across the cross-AD call is NotCorrupting.
+ args.severity = NotCorrupting;
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ MakeCallWithPossibleAppDomainTransition(m_pSrvDomain, (FPAPPDOMAINCALLBACK) MarshalAndCall_Wrapper2, &args);
+ }
+
+ if (args.ClientGC.refException != NULL)
+ {
+ RestorePrincipal(&args.ClientGC.refPrincipal);
+ COMPlusThrow(args.ClientGC.refException
+ COMMA_CORRUPTING_EXCEPTIONS_ONLY(args.severity)
+ );
+ }
+
+ if (pCurThread->IsExposedObjectSet())
+ {
+ THREADBASEREF ref = (THREADBASEREF) pCurThread->GetExposedObjectRaw();
+ _ASSERTE(ref != NULL);
+
+ ref->SetExecutionContext(args.ClientGC.refExecutionContext);
+ }
+
+ RestorePrincipal(&args.ClientGC.refPrincipal);
+
+ // If the return type is an object, take it out of the protected ref
+ if (args.bHasObjRefReturnVal)
+ {
+ *(Object **)m_pFrame->GetReturnValuePtr() = OBJECTREFToObject(args.ClientGC.refReturnValue);
+ }
+
+ GCPROTECT_END(); // pClientArgArray
+ GCPROTECT_END(); // args.ClientGC
+
+ args.Marshaller.RemoveGCFrames();
+
+ return args.bOptimizable;
+}
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+#endif // CROSSGEN_COMPILE
+
+#endif // FEATURE_REMOTING
diff --git a/src/vm/crossdomaincalls.h b/src/vm/crossdomaincalls.h
new file mode 100644
index 0000000000..47361ee8a1
--- /dev/null
+++ b/src/vm/crossdomaincalls.h
@@ -0,0 +1,273 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: CrossDomainCalls.h
+//
+
+//
+// Purpose: Provides a fast path for cross domain calls.
+//
+
+
+#ifndef __CROSSDOMAINCALLS_H__
+#define __CROSSDOMAINCALLS_H__
+
+#ifndef FEATURE_REMOTING
+#error FEATURE_REMOTING is not set, please do no include crossdomaincalls.h
+#endif
+
+#include "methodtable.h"
+
+class SimpleRWLock;
+
+// These are flags set inside the real proxy. Indicates what kind of type is the proxy cast to
+// whether its method table layout is equivalent to the server type etc
+#define OPTIMIZATION_FLAG_INITTED 0x01000000
+#define OPTIMIZATION_FLAG_PROXY_EQUIVALENT 0x02000000
+#define OPTIMIZATION_FLAG_PROXY_SHARED_TYPE 0x04000000
+#define OPTIMIZATION_FLAG_DEPTH_MASK 0x00FFFFFF
+
+// This struct has info about methods on MBR objects and Interfaces
+struct RemotableMethodInfo
+{
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+ /*
+ if XAD_BLITTABLE_ARGS is set, (m_OptFlags & XAD_ARG_COUNT_MASK) contains the number of stack dwords to copy
+ */
+ enum XADOptimizationType
+ {
+ XAD_FLAGS_INITIALIZED = 0x01000000,
+ XAD_NOT_OPTIMIZABLE = 0x02000000, // Method call has to go through managed remoting path
+ XAD_BLITTABLE_ARGS = 0x04000000, // Arguments blittable across domains. Could be scalars or agile gc refs
+ XAD_BLITTABLE_RET = 0x08000000, // Return Value blittable across domains. Could be scalars or agile gc refs
+ XAD_BLITTABLE_ALL = XAD_BLITTABLE_ARGS | XAD_BLITTABLE_RET,
+
+ XAD_RET_FLOAT = 0x10000000,
+ XAD_RET_DOUBLE = 0x20000000,
+#ifdef FEATURE_HFA
+ XAD_RET_HFA_TYPE = 0x40000000,
+#endif
+ XAD_RET_GC_REF = 0x70000000, // To differentiate agile objects like string which can be blitted across domains, but are gc refs
+ XAD_RET_TYPE_MASK = 0x70000000,
+
+ XAD_METHOD_IS_VIRTUAL = 0x80000000, // MethodDesc::IsVirtual is slow. Should consider fixing IsVirtual rather than have a flag here
+ XAD_ARGS_HAVE_A_FLOAT = 0x00800000,
+
+ XAD_FLAG_MASK = 0xFF800000,
+ XAD_ARG_COUNT_MASK = 0x007FFFFF
+ } ;
+
+ static XADOptimizationType IsCrossAppDomainOptimizable(MethodDesc *pMeth, DWORD *pNumDwordsToCopy);
+
+ static BOOL TypeIsConduciveToBlitting(MethodTable *pFromMT, MethodTable *pToMT);
+
+ static BOOL AreArgsBlittable(XADOptimizationType enumVal)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (enumVal & XAD_BLITTABLE_ARGS) && IsReturnBlittable(enumVal);
+ }
+ static BOOL IsReturnBlittable(XADOptimizationType enumVal)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return enumVal & XAD_BLITTABLE_RET;
+ }
+ static BOOL IsReturnGCRef(XADOptimizationType enumVal)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return XAD_RET_GC_REF == (enumVal & XAD_RET_TYPE_MASK);
+ }
+
+ static UINT GetFPReturnSize(XADOptimizationType enumVal)
+ {
+ WRAPPER_NO_CONTRACT;
+ switch (enumVal & XAD_RET_TYPE_MASK)
+ {
+ case XAD_RET_FLOAT:
+ return sizeof(float);
+
+ case XAD_RET_DOUBLE:
+ return sizeof(double);
+
+#ifdef FEATURE_HFA
+ case XAD_RET_FLOAT | XAD_RET_HFA_TYPE:
+ return 4 * sizeof(float);
+
+ case XAD_RET_DOUBLE | XAD_RET_HFA_TYPE:
+ return 4 * sizeof(double);
+#endif
+
+ default:
+ return 0;
+ }
+ }
+
+ static DWORD GetRetTypeFlagsFromFPReturnSize(UINT fpRetSize)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ DWORD flags = 0;
+ switch (fpRetSize)
+ {
+ case 0:
+ break;
+
+ case sizeof(float):
+ flags = XAD_RET_FLOAT;
+ break;
+
+ case sizeof(double):
+ flags = XAD_RET_DOUBLE;
+ break;
+
+#ifdef FEATURE_HFA
+ case 4 * sizeof(float):
+ flags = XAD_RET_FLOAT | XAD_RET_HFA_TYPE;
+ break;
+
+ case 4 * sizeof(double):
+ flags = XAD_RET_DOUBLE | XAD_RET_HFA_TYPE;
+ break;
+#endif
+ default:
+ _ASSERTE(false);
+ break;
+ }
+
+ _ASSERTE(fpRetSize == GetFPReturnSize((XADOptimizationType)flags));
+
+ return flags;
+ }
+
+ static BOOL DoArgsContainAFloat(XADOptimizationType enumVal)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return enumVal & XAD_ARGS_HAVE_A_FLOAT;
+ }
+
+ static BOOL IsCallNotOptimizable(XADOptimizationType enumVal)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return enumVal & XAD_NOT_OPTIMIZABLE;
+ }
+ static BOOL IsMethodVirtual(XADOptimizationType enumVal)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return enumVal & XAD_METHOD_IS_VIRTUAL;
+ }
+
+ private:
+
+ static DWORD DoStaticAnalysis(MethodDesc *pMeth);
+
+ DWORD m_OptFlags;
+
+} ;
+
+class CrossDomainOptimizationInfo
+{
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+ RemotableMethodInfo m_rmi[0];
+
+ public:
+
+ static SIZE_T SizeOf(MethodTable *pMT)
+ {
+ WRAPPER_NO_CONTRACT;
+ return SizeOf(pMT->GetNumVtableSlots());
+ }
+
+ static SIZE_T SizeOf(DWORD dwNumVtableSlots)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return offsetof(CrossDomainOptimizationInfo, m_rmi) + (sizeof(RemotableMethodInfo) * dwNumVtableSlots);
+ }
+
+ RemotableMethodInfo *GetRemotableMethodInfo()
+ {
+ return &(m_rmi[0]);
+ }
+};
+#ifndef BINDER
+class CrossDomainTypeMap
+{
+ class MTMapEntry
+ {
+ public:
+ MTMapEntry(AppDomain *pFromDomain, MethodTable *pFromMT, AppDomain *pToDomain, MethodTable *pToMT);
+ UPTR GetHash()
+ {
+ LIMITED_METHOD_CONTRACT;
+ DWORD hash = _rotl((UINT)(SIZE_T)m_pFromMT, 1) + m_dwFromDomain.m_dwId;
+ hash = _rotl(hash, 1) + m_dwToDomain.m_dwId;
+ return (UPTR)hash;
+ }
+ ADID m_dwFromDomain;
+ ADID m_dwToDomain;
+ MethodTable *m_pFromMT;
+ MethodTable *m_pToMT;
+ };
+
+ static BOOL CompareMTMapEntry (UPTR val1, UPTR val2);
+ static PtrHashMap *s_crossDomainMTMap; // Maps a MT to corresponding MT in another domain
+ static SimpleRWLock *s_MTMapLock;
+ static PtrHashMap * GetTypeMap();
+
+public:
+ static MethodTable *GetMethodTableForDomain(MethodTable *pFrom, AppDomain *pFromDomain, AppDomain *pToDomain);
+ static void SetMethodTableForDomain(MethodTable *pFromMT, AppDomain *pFromDomain, MethodTable *pToMT, AppDomain *pToDomain);
+ static void FlushStaleEntries();
+};
+
+struct MarshalAndCallArgs;
+void MarshalAndCall_Wrapper2(MarshalAndCallArgs * pArgs);
+
+class CrossDomainChannel
+{
+private:
+ friend void MarshalAndCall_Wrapper2(MarshalAndCallArgs * pArgs);
+
+
+ BOOL GetTargetAddressFast(DWORD optFlags, MethodTable *pSrvMT, BOOL bFindServerMD);
+ BOOL GetGenericMethodAddress(MethodTable *pSrvMT);
+ BOOL GetInterfaceMethodAddressFast(DWORD optFlags, MethodTable *pSrvMT, BOOL bFindServerMD);
+ BOOL BlitAndCall();
+ BOOL MarshalAndCall();
+ void MarshalAndCall_Wrapper(MarshalAndCallArgs * pArgs);
+ BOOL ExecuteCrossDomainCall();
+ VOID RenewLease();
+ OBJECTREF GetServerObject();
+ BOOL InitServerInfo();
+ OBJECTREF ReadPrincipal();
+ VOID RestorePrincipal(OBJECTREF *prefPrincipal);
+ VOID ResetPrincipal();
+
+public:
+
+ UINT GetFPReturnSize()
+ {
+ WRAPPER_NO_CONTRACT;
+ return RemotableMethodInfo::GetFPReturnSize(m_xret);
+ }
+
+ BOOL CheckCrossDomainCall(TPMethodFrame *pFrame);
+
+private:
+ MethodDesc *m_pCliMD;
+ MethodDesc *m_pSrvMD;
+ RemotableMethodInfo::XADOptimizationType m_xret;
+ DWORD m_numStackSlotsToCopy;
+ OBJECTHANDLE m_refSrvIdentity;
+ AppDomain *m_pCliDomain;
+ ADID m_pSrvDomain;
+ PCODE m_pTargetAddress;
+ TPMethodFrame *m_pFrame;
+};
+
+#endif
+#endif // !BINDER
diff --git a/src/vm/crossgen/.gitmirror b/src/vm/crossgen/.gitmirror
new file mode 100644
index 0000000000..f507630f94
--- /dev/null
+++ b/src/vm/crossgen/.gitmirror
@@ -0,0 +1 @@
+Only contents of this folder, excluding subfolders, will be mirrored by the Git-TFS Mirror. \ No newline at end of file
diff --git a/src/vm/crossgen/wks_crossgen.nativeproj b/src/vm/crossgen/wks_crossgen.nativeproj
new file mode 100644
index 0000000000..a688b49304
--- /dev/null
+++ b/src/vm/crossgen/wks_crossgen.nativeproj
@@ -0,0 +1,163 @@
+<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003" ToolsVersion="dogfood">
+
+ <Import Project="$(_NTDRIVE)$(_NTROOT)\ndp\clr\xplat\SetCrossGen.props" />
+ <PropertyGroup>
+ <BuildSysBinaries>true</BuildSysBinaries>
+ <OutputName>cee_crossgen</OutputName>
+ </PropertyGroup>
+
+ <Import Project="$(_NTDRIVE)$(_NTROOT)\ndp\clr\src\vm\vm.settings" />
+
+ <PropertyGroup>
+ <UserIncludes>
+ $(UserIncludes);
+ $(Clrbase)\src\binder\inc
+ </UserIncludes>
+ <ClAdditionalOptions>$(ClAdditionalOptions) -wd4702</ClAdditionalOptions>
+ </PropertyGroup>
+
+ <ItemGroup>
+ <CppCompile Include="$(VmSourcesDir)\class.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\AppDomain.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\aptca.cpp" Condition="'$(FeatureAptca)' == 'true'"/>
+ <CppCompile Include="$(VmSourcesDir)\array.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\Assembly.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\AssemblySpec.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\binder.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ceeload.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ceemain.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\classhash.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\clrex.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\CLRPrivBinderUtil.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\CLRPrivBinderWinRT.cpp" Condition="'$(FeatureCominterop)' == 'true'"/>
+ <CppCompile Include="$(VmSourcesDir)\CLRPrivTypeCacheWinRT.cpp" Condition="'$(FeatureCominterop)' == 'true'"/>
+ <CppCompile Include="$(VmSourcesDir)\clsload.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\comdelegate.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\codeman.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\CompactLayoutWriter.cpp" Condition="'$(MDILGenerator)' == 'true'"/>
+ <CppCompile Include="$(VmSourcesDir)\compile.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ConstrainedExecutionRegion.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\CustomMarshalerInfo.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\Domainfile.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\BaseAssemblySpec.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\corebindresult.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\coreassemblyspec.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\crossdomaincalls.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\dataimage.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\decodeMD.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\DebugInfoStore.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ecall.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\eeconfig.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\eehash.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\eetwain.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\excep.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\Field.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\Fieldmarshaler.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\formattype.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\TypeEquivalenceHash.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\GCDecode.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\genericdict.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\generics.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\genmeth.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\hash.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ILMarshalers.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ILStubCache.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ILStubResolver.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\instmethhash.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\interoputil.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\invokeutil.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\inlinetracking.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\contractImpl.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\JITInterface.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\LoaderAllocator.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ListLock.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\memberload.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\Method.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\MethodImpl.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\MethodTable.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\methodtablebuilder.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\mscorlib.cpp">
+ <DisablePrecompiledHeaders>true</DisablePrecompiledHeaders>
+ </CppCompile>
+ <CppCompile Include="$(VmSourcesDir)\stubcache.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\mlinfo.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\DllImport.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\DllImportCallback.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\PEFile.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\PEFingerprint.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\PEImage.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\PEImageLayout.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\pendingload.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\Precode.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\olevariant.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ReadyToRunInfo.cpp" Condition="'$(FeatureReadyToRun)' == 'true'"/>
+ <CppCompile Include="$(VmSourcesDir)\security.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\securitypolicy.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\securityAttributes.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\SecurityDeclarative.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\SecurityDeclarativeCache.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\SecurityDescriptor.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\SecurityDescriptorAppdomain.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\SecurityDescriptorAssembly.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\securitymeta.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\SecurityTransparentAssembly.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\siginfo.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\SigFormat.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\SimpleRWLock.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\spinlock.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\StackingAllocator.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\stubgen.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\stublink.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\typectxt.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\typedesc.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\typehandle.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\typehash.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\typeparse.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\typestring.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\util.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\vars.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\zapsig.cpp" />
+ </ItemGroup>
+
+ <ItemGroup Condition="'$(FeatureCominterop)' == 'true'">
+ <CppCompile Include="$(VmSourcesDir)\classcompat.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\COMtoCLRCall.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\CLRtoCOMCall.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\RuntimeCallableWrapper.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\WinRTHelpers.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\WinRTTypeNameConverter.cpp" />
+ </ItemGroup>
+
+ <!-- SOURCES_NONPAL -->
+ <ItemGroup>
+ <CppCompile Include="$(VmSourcesDir)\DbgGcInfoDecoder.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\GcInfoDecoder.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\Crypto\SHA1.cpp" Condition="'$(FeatureCoreclr)' != 'true'"/>
+ <CppCompile Include="$(VmSourcesDir)\SHA1.cpp" Condition="'$(FeatureCoreclr)' == 'true'"/>
+ </ItemGroup>
+
+ <ItemGroup Condition="'$(TargetArch)' == 'i386'">
+ <CppCompile Include="$(I386SourcesDir)\stublinkerx86.cpp" />
+ </ItemGroup>
+
+ <ItemGroup Condition="'$(TargetArch)' == 'amd64'">
+ <CppCompile Include="$(Amd64SourcesDir)\StubLinkerAMD64.cpp" />
+ </ItemGroup>
+
+ <ItemGroup Condition="'$(TargetArch)' == 'arm'">
+ <CppCompile Include="$(ArmSourcesDir)\stubs.cpp" />
+ </ItemGroup>
+
+ <ItemGroup Condition="'$(TargetArch)' == 'arm64'">
+ <CppCompile Include="$(Arm64SourcesDir)\stubs.cpp" />
+ </ItemGroup>
+
+ <ItemGroup>
+ <CppCompile Include="$(VmSourcesDir)\crossgencompile.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\CrossgenRoParseTypeName.cpp" Condition="'$(FeatureCominterop)' == 'true'"/>
+ <CppCompile Include="$(VmSourcesDir)\CrossgenRoResolveNamespace.cpp" Condition="'$(FeatureCominterop)' == 'true'"/>
+ </ItemGroup>
+
+ <Import Project="$(_NTDRIVE)$(_NTROOT)\ndp\clr\src\vm\vm.targets" />
+
+</Project>
diff --git a/src/vm/crossgen_mscorlib/.gitmirror b/src/vm/crossgen_mscorlib/.gitmirror
new file mode 100644
index 0000000000..f507630f94
--- /dev/null
+++ b/src/vm/crossgen_mscorlib/.gitmirror
@@ -0,0 +1 @@
+Only contents of this folder, excluding subfolders, will be mirrored by the Git-TFS Mirror. \ No newline at end of file
diff --git a/src/vm/crossgen_mscorlib/mscorlib_crossgen.nativeproj b/src/vm/crossgen_mscorlib/mscorlib_crossgen.nativeproj
new file mode 100644
index 0000000000..be65670654
--- /dev/null
+++ b/src/vm/crossgen_mscorlib/mscorlib_crossgen.nativeproj
@@ -0,0 +1,27 @@
+<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003" ToolsVersion="dogfood">
+
+ <Import Project="$(_NTDRIVE)$(_NTROOT)\ndp\clr\xplat\SetCrossGen.props" />
+
+ <PropertyGroup>
+ <!-- Pretend that we are not crossgen compiling to pick up defines matching the target -->
+ <CrossGenCompile>false</CrossGenCompile>
+
+ <CDefines>$(CDefines);CROSSGEN_MSCORLIB</CDefines>
+ </PropertyGroup>
+
+ <Import Project="$(_NTDRIVE)$(_NTROOT)\ndp\clr\clr.props" />
+
+ <PropertyGroup>
+ <BuildSysBinaries>true</BuildSysBinaries>
+ <OutputName>mscorlib_crossgen</OutputName>
+ <OutputPath>$(ClrLibDest)</OutputPath>
+ <TargetType>LIBRARY</TargetType>
+ </PropertyGroup>
+
+ <ItemGroup>
+ <CppCompile Include="..\Mscorlib.cpp" />
+ </ItemGroup>
+
+ <Import Project="$(_NTDRIVE)$(_NTROOT)\ndp\clr\clr.targets" />
+
+</Project> \ No newline at end of file
diff --git a/src/vm/crossgencompile.cpp b/src/vm/crossgencompile.cpp
new file mode 100644
index 0000000000..99a43f197d
--- /dev/null
+++ b/src/vm/crossgencompile.cpp
@@ -0,0 +1,464 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: crosscomp.cpp
+//
+
+// ===========================================================================
+// This file contains stubbed out implementations for cross-platform NGen.
+//
+// The stubbed out implementations are concentrated in this file to reduce number
+// of ifdefs that has to be sprinkled through the code.
+// ===========================================================================
+
+#include "common.h"
+
+#include "comdelegate.h"
+#include "compile.h"
+#include "constrainedexecutionregion.h"
+#include "security.h"
+#include "invokeutil.h"
+#include "comcallablewrapper.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+
+//---------------------------------------------------------------------------------------
+//
+// Pull in some implementation files from other places in the tree
+//
+
+#include "..\..\dlls\mscoree\mscoree.cpp"
+
+//---------------------------------------------------------------------------------------
+//
+// Helper function for features unsupported under crossgen
+//
+
+#undef ExitProcess
+
+void CrossGenNotSupported(char * message)
+{
+ _ASSERTE(!"CrossGenNotSupported");
+ fprintf(stderr, "Fatal error: %s\n", message);
+ ExitProcess(CORSECATTR_E_BAD_ACTION);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// There is always only one thread and one appdomain in crossgen.
+//
+
+extern CompilationDomain * theDomain;
+
+AppDomain * GetAppDomain()
+{
+ return theDomain;
+}
+
+Thread theThread;
+
+Thread * GetThread()
+{
+ return (Thread*)&theThread;
+}
+
+Thread * GetThreadNULLOk()
+{
+ return GetThread();
+}
+
+#ifdef _DEBUG
+BOOL Debug_IsLockedViaThreadSuspension()
+{
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+}
+#endif // _DEBUG
+
+#if defined(FEATURE_MERGE_JIT_AND_ENGINE) && defined(FEATURE_IMPLICIT_TLS)
+Compiler* theTlsCompiler;
+
+Compiler* GetTlsCompiler()
+{
+ LIMITED_METHOD_CONTRACT
+
+ return theTlsCompiler;
+}
+void SetTlsCompiler(Compiler* c)
+{
+ LIMITED_METHOD_CONTRACT
+ theTlsCompiler = c;
+}
+#endif
+
+//---------------------------------------------------------------------------------------
+//
+// All locks are nops because of there is always only one thread.
+//
+
+void CrstBase::InitWorker(INDEBUG_COMMA(CrstType crstType) CrstFlags flags)
+{
+ m_dwFlags = flags;
+}
+
+void CrstBase::Destroy()
+{
+}
+
+void CrstBase::Enter(INDEBUG(enum CrstBase::NoLevelCheckFlag))
+{
+}
+
+void CrstBase::Leave()
+{
+}
+
+BOOL __SwitchToThread(DWORD, DWORD)
+{
+ return TRUE;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Globals and misc other
+//
+
+GPTR_IMPL(GCHeap,g_pGCHeap);
+
+BOOL g_fEEOtherStartup=FALSE;
+BOOL g_fEEComActivatedStartup=FALSE;
+
+GVAL_IMPL_INIT(DWORD, g_fHostConfig, 0);
+
+#ifdef FEATURE_SVR_GC
+SVAL_IMPL_INIT(DWORD,GCHeap,gcHeapType,GCHeap::GC_HEAP_WKS);
+#endif
+
+void UpdateGCSettingFromHost()
+{
+}
+
+HRESULT GetExceptionHResult(OBJECTREF throwable)
+{
+ return E_FAIL;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Dynamically unreachable implementation of profiler callbacks. Note that we can't just
+// disable PROFILING_SUPPORTED for crossgen because of it affects data layout and FCall tables.
+//
+
+UINT_PTR EEToProfInterfaceImpl::EEFunctionIDMapper(FunctionID funcId, BOOL * pbHookFunction)
+{
+ UNREACHABLE();
+}
+
+HRESULT EEToProfInterfaceImpl::JITInlining(
+ /* [in] */ FunctionID callerId,
+ /* [in] */ FunctionID calleeId,
+ /* [out] */ BOOL * pfShouldInline)
+{
+ UNREACHABLE();
+}
+
+HRESULT EEToProfInterfaceImpl::ModuleLoadStarted(ModuleID moduleId)
+{
+ UNREACHABLE();
+}
+
+HRESULT EEToProfInterfaceImpl::ModuleLoadFinished(
+ ModuleID moduleId,
+ HRESULT hrStatus)
+{
+ UNREACHABLE();
+}
+
+HRESULT EEToProfInterfaceImpl::ModuleUnloadStarted(
+ ModuleID moduleId)
+{
+ UNREACHABLE();
+}
+
+HRESULT EEToProfInterfaceImpl::ModuleUnloadFinished(
+ ModuleID moduleId,
+ HRESULT hrStatus)
+{
+ UNREACHABLE();
+}
+
+HRESULT EEToProfInterfaceImpl::ModuleAttachedToAssembly(
+ ModuleID moduleId,
+ AssemblyID AssemblyId)
+{
+ UNREACHABLE();
+}
+
+HRESULT EEToProfInterfaceImpl::ClassLoadStarted(
+ ClassID classId)
+{
+ UNREACHABLE();
+}
+
+HRESULT EEToProfInterfaceImpl::ClassLoadFinished(
+ ClassID classId,
+ HRESULT hrStatus)
+{
+ UNREACHABLE();
+}
+
+HRESULT EEToProfInterfaceImpl::AppDomainCreationFinished(
+ AppDomainID appDomainId,
+ HRESULT hrStatus)
+{
+ UNREACHABLE();
+}
+
+HRESULT EEToProfInterfaceImpl::AppDomainCreationStarted(
+ AppDomainID appDomainId)
+{
+ UNREACHABLE();
+}
+
+HRESULT EEToProfInterfaceImpl::AppDomainShutdownFinished(
+ AppDomainID appDomainId,
+ HRESULT hrStatus)
+{
+ UNREACHABLE();
+}
+
+HRESULT EEToProfInterfaceImpl::AppDomainShutdownStarted(
+ AppDomainID appDomainId)
+{
+ UNREACHABLE();
+}
+
+HRESULT EEToProfInterfaceImpl::AssemblyLoadStarted(
+ AssemblyID assemblyId)
+{
+ UNREACHABLE();
+}
+
+HRESULT EEToProfInterfaceImpl::AssemblyLoadFinished(
+ AssemblyID assemblyId,
+ HRESULT hrStatus)
+{
+ UNREACHABLE();
+}
+
+ClassID TypeHandleToClassID(TypeHandle th)
+{
+ UNREACHABLE();
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Stubed-out implementations of functions that can do anything useful only when we are actually running managed code
+//
+
+MethodTable *Object::GetTrueMethodTable()
+{
+ UNREACHABLE();
+}
+
+FuncPtrStubs::FuncPtrStubs()
+ : m_hashTableCrst(CrstFuncPtrStubs, CRST_UNSAFE_ANYMODE)
+{
+}
+
+PCODE MethodDesc::GetMultiCallableAddrOfCode(CORINFO_ACCESS_FLAGS accessFlags)
+{
+ return 0x321;
+}
+
+PCODE MethodDesc::TryGetMultiCallableAddrOfCode(CORINFO_ACCESS_FLAGS accessFlags)
+{
+ return 0x321;
+}
+
+#ifdef _TARGET_X86_
+BOOL Runtime_Test_For_SSE2()
+{
+ return TRUE;
+}
+#endif
+
+#ifdef _TARGET_AMD64_
+INT32 rel32UsingJumpStub(INT32 UNALIGNED * pRel32, PCODE target, MethodDesc *pMethod, LoaderAllocator *pLoaderAllocator /* = NULL */)
+{
+ // crossgen does not have jump stubs
+ return 0;
+}
+#endif
+
+#if defined(FEATURE_REMOTING) && !defined(HAS_REMOTING_PRECODE)
+void CRemotingServices::DestroyThunk(MethodDesc* pMD)
+{
+ UNREACHABLE();
+}
+#endif
+
+CORINFO_GENERIC_HANDLE JIT_GenericHandleWorker(MethodDesc * pMD, MethodTable * pMT, LPVOID signature)
+{
+ UNREACHABLE();
+}
+
+void CrawlFrame::GetExactGenericInstantiations(Instantiation *pClassInst, Instantiation *pMethodInst)
+{
+ UNREACHABLE();
+}
+
+OBJECTREF AppDomain::GetExposedObject()
+{
+ UNREACHABLE();
+}
+
+BOOL Object::SupportsInterface(OBJECTREF pObj, MethodTable* pInterfaceMT)
+{
+ UNREACHABLE();
+}
+
+GCFrame::GCFrame(OBJECTREF *pObjRefs, UINT numObjRefs, BOOL maybeInterior)
+{
+}
+
+void GCFrame::GcScanRoots(promote_func *fn, ScanContext* sc)
+{
+ UNREACHABLE();
+}
+
+VOID GCFrame::Pop()
+{
+}
+
+void Frame::Push()
+{
+}
+
+void Frame::Pop()
+{
+}
+
+PCODE COMDelegate::GetSecureInvoke(MethodDesc* pMD)
+{
+ return (PCODE)(0x12345);
+}
+
+Assembly * SystemDomain::GetCallersAssembly(StackCrawlMark * stackMark, AppDomain ** ppAppDomain)
+{
+ return NULL;
+}
+
+void EnableStressHeapHelper()
+{
+ UNREACHABLE();
+}
+
+void ReflectionModule::CaptureModuleMetaDataToMemory()
+{
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Empty implementations of shutdown-related functions. We don't do any cleanup for shutdown during crossgen.
+//
+
+Assembly::~Assembly()
+{
+}
+
+void Assembly::StartUnload()
+{
+}
+
+void Module::StartUnload()
+{
+}
+
+void DynamicMethodTable::Destroy()
+{
+}
+
+void SyncClean::AddEEHashTable(EEHashEntry** entry)
+{
+}
+
+void SyncClean::AddHashMap(Bucket *bucket)
+{
+}
+
+#ifdef FEATURE_COMINTEROP
+LONG ComCallWrapperTemplate::Release()
+{
+ UNREACHABLE();
+}
+#endif
+
+//---------------------------------------------------------------------------------------
+//
+// Security-related functions. They are reachable in theory for legacy security attributes. The legacy security
+// attributes should not be used in code running on CoreCLR. We fail fast for number of these just in case somebody
+// tries to use the legacy security attributes.
+//
+
+void SecurityDeclarative::FullTrustInheritanceDemand(Assembly *pTargetAssembly)
+{
+ CrossGenNotSupported("FullTrustInheritanceDemand");
+}
+
+void SecurityDeclarative::InheritanceLinkDemandCheck(Assembly *pTargetAssembly, MethodDesc * pMDLinkDemand)
+{
+ CrossGenNotSupported("InheritanceLinkDemandCheck");
+}
+
+void ApplicationSecurityDescriptor::PreResolve(BOOL *pfIsFullyTrusted, BOOL *pfIsHomogeneous)
+{
+ // virtual method unreachable in crossgen
+ UNREACHABLE();
+}
+
+extern "C" UINT_PTR STDCALL GetCurrentIP()
+{
+ return 0;
+}
+
+void EEPolicy::HandleFatalError(UINT exitCode, UINT_PTR address, LPCWSTR pszMessage, PEXCEPTION_POINTERS pExceptionInfo)
+{
+ fprintf(stderr, "Fatal error: %08x\n", exitCode);
+ ExitProcess(exitCode);
+}
+
+//---------------------------------------------------------------------------------------
+
+Assembly * AppDomain::RaiseAssemblyResolveEvent(AssemblySpec * pSpec, BOOL fIntrospection, BOOL fPreBind)
+{
+ return NULL;
+}
+
+Assembly * AppDomain::RaiseResourceResolveEvent(DomainAssembly* pAssembly, LPCSTR szName)
+{
+ return NULL;
+}
+
+DomainAssembly * AppDomain::RaiseTypeResolveEventThrowing(DomainAssembly* pAssembly, LPCSTR szName, ASSEMBLYREF *pResultingAssemblyRef)
+{
+ return NULL;
+}
+
+void AppDomain::RaiseLoadingAssemblyEvent(DomainAssembly *pAssembly)
+{
+}
+
+#ifdef FEATURE_CORECLR
+BOOL AppDomain::BindingByManifestFile()
+{
+ return FALSE;
+}
+#endif
+
+ReJitManager::ReJitManager()
+{
+}
diff --git a/src/vm/crossgenroparsetypename.cpp b/src/vm/crossgenroparsetypename.cpp
new file mode 100644
index 0000000000..5f2f55815f
--- /dev/null
+++ b/src/vm/crossgenroparsetypename.cpp
@@ -0,0 +1,495 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//+----------------------------------------------------------------------------
+//
+
+//
+// Purpose: Enable parsing of parameterized and non-parameterized typenames
+//
+// Adapted from Windows sources. Modified to run on Windows version < Win8, so
+// that we can use this in CrossGen.
+//
+
+//
+//-----------------------------------------------------------------------------
+
+#include "common.h" // precompiled header
+
+static const UINT32 g_uiMaxTypeName = 512;
+
+// Type name grammar:
+//
+// expression -> param
+//
+// pinterface_instance -> pinterface "<" params ">"
+// {
+// if (count(pinterface.params) != num) { error }
+// }
+//
+// pinterface -> identifier "`" num
+//
+// params -> params "," param | param
+//
+// param -> identifier | pinterface_instance
+//
+// identifier -> all characters are allowed, except for white space, back tick, comma and left/right angle brackets.
+//
+// num -> [0-9]+
+
+typedef enum
+{
+ TTT_PINTERFACE,
+ TTT_IDENTIFIER,
+ TTT_INVALID
+} TYPENAME_TOKEN_TYPE;
+
+class TypeNameTokenizer
+{
+public:
+ _When_(return == S_OK, _At_(pphstrTypeNameParts, __deref_out_ecount(*pdwPartsCount)))
+ _When_(return != S_OK, _At_(pphstrTypeNameParts, __deref_out))
+ HRESULT TokenizeType(__in PCWSTR pszTypeName, __out DWORD *pdwPartsCount, SString **pphstrTypeNameParts);
+
+ ~TypeNameTokenizer()
+ {
+ if (_sphstrTypeNameParts != nullptr)
+ delete[] _sphstrTypeNameParts;
+ }
+
+private:
+ HRESULT ParseNonParameterizedType();
+ HRESULT ParseParameterizedType();
+
+ int CountTokens();
+ TYPENAME_TOKEN_TYPE ReadNextToken();
+ void SkipWhitespace();
+ bool IsWhitespace(WCHAR ch);
+ bool TrimThenFetchAndCompareNextCharIfAny(__in WCHAR chExpectedSymbol);
+ bool TrimThenPeekAndCompareNextCharIfAny(__in WCHAR chExpectedSymbol);
+ HRESULT VerifyTrailingCloseBrackets(__in DWORD dwExpectedTrailingCloseBrackets);
+
+ SString* _sphstrTypeNameParts;
+ WCHAR _pszTypeName[g_uiMaxTypeName];
+ WCHAR *_pchTypeNamePtr;
+ WCHAR _pszCurrentToken[g_uiMaxTypeName];
+ DWORD _cCurrentTokenParameters;
+ DWORD _cTokens;
+};
+
+_When_(return == S_OK, _At_(typeNameParts, __deref_out_ecount(*partsCount)))
+_When_(return != S_OK, _At_(typeNameParts, __deref_out))
+__checkReturn extern "C" HRESULT WINAPI CrossgenRoParseTypeName(
+ __in SString* typeName,
+ __out DWORD *partsCount,
+ SString **typeNameParts)
+{
+ HRESULT hr = S_OK;
+
+ // Clear output parameters.
+ *typeNameParts = nullptr;
+ *partsCount = 0;
+
+ if (typeName->IsEmpty() /*|| typeName.HasEmbeddedNull() */)
+ {
+ hr = E_INVALIDARG;
+ }
+
+ if (SUCCEEDED(hr))
+ {
+ TypeNameTokenizer typeNameTokenizer;
+ hr = typeNameTokenizer.TokenizeType(
+ typeName->GetUnicode(),
+ partsCount,
+ typeNameParts);
+ }
+
+ return hr;
+}
+
+_When_(return == S_OK, _At_(pphstrTypeNameParts, __deref_out_ecount(*pdwPartsCount)))
+_When_(return != S_OK, _At_(pphstrTypeNameParts, __deref_out))
+HRESULT TypeNameTokenizer::TokenizeType(__in PCWSTR pszTypeName, __out DWORD *pdwPartsCount, SString **pphstrTypeNameParts)
+{
+ _ASSERTE(pphstrTypeNameParts != nullptr);
+ _ASSERTE(pdwPartsCount != nullptr);
+ HRESULT hr = S_OK;
+
+ _cTokens = 0;
+ hr = StringCchCopy(_pszTypeName, ARRAYSIZE(_pszTypeName), pszTypeName);
+ _pchTypeNamePtr = _pszTypeName;
+
+ if (hr == STRSAFE_E_INSUFFICIENT_BUFFER)
+ {
+ hr = RO_E_METADATA_NAME_NOT_FOUND;
+ }
+
+ if (SUCCEEDED(hr))
+ {
+ *pdwPartsCount = CountTokens();
+
+ _sphstrTypeNameParts = new(nothrow) SString[*pdwPartsCount];
+ if (_sphstrTypeNameParts == nullptr)
+ {
+ hr = E_OUTOFMEMORY;
+ }
+ }
+
+ if (SUCCEEDED(hr))
+ {
+ TYPENAME_TOKEN_TYPE tokenType = ReadNextToken();
+
+ if (tokenType == TTT_IDENTIFIER)
+ {
+ hr = ParseNonParameterizedType();
+ }
+ else if (tokenType == TTT_PINTERFACE)
+ {
+ hr = ParseParameterizedType();
+ }
+ else
+ {
+ hr = RO_E_METADATA_INVALID_TYPE_FORMAT;
+ }
+ }
+
+ if (SUCCEEDED(hr))
+ {
+ *pphstrTypeNameParts = _sphstrTypeNameParts;
+ _sphstrTypeNameParts = nullptr;
+ }
+ else
+ {
+ *pdwPartsCount = 0;
+ *pphstrTypeNameParts = nullptr;
+ }
+
+ return hr;
+}
+
+int TypeNameTokenizer::CountTokens()
+{
+ const size_t cTypeNameLength = wcslen(_pszTypeName);
+ int nCount = 1;
+ WCHAR ch;
+
+ _ASSERTE(cTypeNameLength != 0);
+
+ for (UINT32 nIndex = 0; nIndex < cTypeNameLength; nIndex++)
+ {
+ ch = _pszTypeName[nIndex];
+
+ if ((ch == W(',')) || (ch == W('<')))
+ {
+ nCount++;
+ }
+ }
+
+ return nCount;
+}
+
+TYPENAME_TOKEN_TYPE TypeNameTokenizer::ReadNextToken()
+{
+ TYPENAME_TOKEN_TYPE tokenType = TTT_IDENTIFIER;
+ int nTokenIndex = 0;
+ WCHAR ch = *_pchTypeNamePtr;
+
+ while ((ch != W('\0')) &&
+ (ch != W('<')) &&
+ (ch != W('>')) &&
+ (ch != W(',')) &&
+ (!IsWhitespace(ch)))
+ {
+ _pszCurrentToken[nTokenIndex++] = ch;
+
+ if (ch == W('`'))
+ {
+ if (nTokenIndex > 1)
+ {
+ tokenType = TTT_PINTERFACE;
+
+ // Store the pinterface's parameters count (limited to a single digit).
+ _pchTypeNamePtr++;
+ ch = *_pchTypeNamePtr;
+
+ if (isdigit(ch))
+ {
+ _pszCurrentToken[nTokenIndex++] = ch;
+ _cCurrentTokenParameters = ch - W('0');
+ _pchTypeNamePtr++;
+ }
+ else
+ {
+ tokenType = TTT_INVALID;
+ }
+ }
+ else
+ {
+ // The back tick (`) was the first character in the token.
+ tokenType = TTT_INVALID;
+ }
+
+ break;
+ }
+
+ _pchTypeNamePtr++;
+ ch = *_pchTypeNamePtr;
+ }
+
+ // Empty token is invalid.
+ if (nTokenIndex == 0)
+ {
+ tokenType = TTT_INVALID;
+ }
+
+
+ if ((tokenType == TTT_PINTERFACE) && (_cCurrentTokenParameters == 0))
+ {
+ tokenType = TTT_INVALID;
+ }
+
+ _pszCurrentToken[nTokenIndex] = W('\0');
+
+ return tokenType;
+}
+
+bool TypeNameTokenizer::TrimThenPeekAndCompareNextCharIfAny(__in WCHAR chExpectedSymbol)
+{
+ // Trim leading spaces.
+ SkipWhitespace();
+
+ return (*_pchTypeNamePtr == chExpectedSymbol);
+}
+
+bool TypeNameTokenizer::TrimThenFetchAndCompareNextCharIfAny(__in WCHAR chExpectedSymbol)
+{
+ bool fSymbolsMatch;
+
+ // Trim leading spaces.
+ SkipWhitespace();
+
+ WCHAR ch = *_pchTypeNamePtr;
+
+ // Do not move the typename pointer past the end of the typename string.
+ if (ch != W('\0'))
+ {
+ _pchTypeNamePtr++;
+ }
+
+ fSymbolsMatch = (ch == chExpectedSymbol);
+
+ // Trim trailing spaces.
+ SkipWhitespace();
+
+ return fSymbolsMatch;
+}
+
+HRESULT TypeNameTokenizer::ParseNonParameterizedType()
+{
+ HRESULT hr = S_OK;
+
+ // There should be no trailing symbols or spaces after a non-parameterized type.
+ if (!TrimThenFetchAndCompareNextCharIfAny(W('\0')))
+ {
+ hr = RO_E_METADATA_INVALID_TYPE_FORMAT;
+ }
+
+ if (SUCCEEDED(hr))
+ {
+ _sphstrTypeNameParts[_cTokens++].Set(_pszCurrentToken);
+ //hr = WindowsCreateString(_pszCurrentToken, static_cast<UINT32>(wcslen(_pszCurrentToken)), &_sphstrTypeNameParts[_cTokens++]);
+
+ //if (FAILED(hr))
+ //{
+ // _cTokens--;
+ //}
+ }
+
+ //_ASSERTE(SUCCEEDED(hr) ? _cTokens == 1 : _cTokens == 0);
+
+ return hr;
+}
+
+HRESULT TypeNameTokenizer::ParseParameterizedType()
+{
+ HRESULT hr = S_OK;
+
+ // For every pinterface in the typename (base and nested), there will be a corresponding entry in the
+ // anRemainingParameters array to hold the number of parameters that need to be matched for that pinterface.
+ // The count of parameters for a given pinterface is decremented after parsing each paramter and when the
+ // count reaches zero, the corresponding pinterface is considered completely parsed.
+ int nInnermostPinterfaceIndex = -1;
+ SArray<int> anRemainingParameters;
+ DWORD dwExpectedTrailingCloseBrackets = 0;
+ TYPENAME_TOKEN_TYPE tokenType = TTT_PINTERFACE;
+
+ do
+ {
+ switch (tokenType)
+ {
+ case TTT_PINTERFACE:
+ {
+ if (++nInnermostPinterfaceIndex > 0)
+ {
+ // This was a nested pinterface (i.e. a parameter of another pinterface), so we
+ // need to decrement the parameters count of its parent pinterface.
+ anRemainingParameters[nInnermostPinterfaceIndex - 1]--;
+ if (anRemainingParameters[nInnermostPinterfaceIndex - 1] == 0)
+ {
+ nInnermostPinterfaceIndex--;
+ }
+ }
+
+ // Store pinterface's parameters count.
+ if (nInnermostPinterfaceIndex < (int)anRemainingParameters.GetCount())
+ {
+ anRemainingParameters[nInnermostPinterfaceIndex] = _cCurrentTokenParameters;
+ }
+ else
+ {
+ anRemainingParameters.Append(_cCurrentTokenParameters);
+ }
+
+ if (!TrimThenFetchAndCompareNextCharIfAny(W('<')))
+ {
+ hr = RO_E_METADATA_INVALID_TYPE_FORMAT;
+ }
+
+ dwExpectedTrailingCloseBrackets++;
+ }
+ break;
+
+ case TTT_IDENTIFIER:
+ {
+ _ASSERTE(nInnermostPinterfaceIndex != -1);
+ _ASSERTE(anRemainingParameters[nInnermostPinterfaceIndex] != 0);
+
+ anRemainingParameters[nInnermostPinterfaceIndex]--;
+
+ if (anRemainingParameters[nInnermostPinterfaceIndex] == 0)
+ {
+ // This was the last parameter for the given pinterface.
+ nInnermostPinterfaceIndex--;
+ hr = VerifyTrailingCloseBrackets(1);
+
+ if (SUCCEEDED(hr))
+ {
+ dwExpectedTrailingCloseBrackets--;
+
+ if (nInnermostPinterfaceIndex == -1)
+ {
+ // No more unparsed pinterfaces
+ hr = VerifyTrailingCloseBrackets(dwExpectedTrailingCloseBrackets);
+
+ if (SUCCEEDED(hr))
+ {
+ dwExpectedTrailingCloseBrackets = 0;
+
+ if (!TrimThenFetchAndCompareNextCharIfAny(W('\0')))
+ {
+ hr = RO_E_METADATA_INVALID_TYPE_FORMAT;
+ }
+ }
+ }
+ else
+ {
+ while (TrimThenPeekAndCompareNextCharIfAny(W('>')))
+ {
+ if (dwExpectedTrailingCloseBrackets > 0)
+ {
+ TrimThenFetchAndCompareNextCharIfAny(W('>'));
+ dwExpectedTrailingCloseBrackets--;
+ }
+ else
+ {
+ hr = RO_E_METADATA_INVALID_TYPE_FORMAT;
+ break;
+ }
+ }
+
+ // There are more parameters, so we expect a comma-separated list.
+ if (!TrimThenFetchAndCompareNextCharIfAny(W(',')))
+ {
+ hr = RO_E_METADATA_INVALID_TYPE_FORMAT;
+ }
+ }
+ }
+ }
+ else
+ {
+ // There are more parameters, so we expect a comma-separated list.
+ if (!TrimThenFetchAndCompareNextCharIfAny(W(',')))
+ {
+ hr = RO_E_METADATA_INVALID_TYPE_FORMAT;
+ }
+ }
+ }
+ break;
+
+ default:
+ {
+ hr = RO_E_METADATA_INVALID_TYPE_FORMAT;
+ }
+ }
+
+ // Store current token.
+ if (SUCCEEDED(hr))
+ {
+ _sphstrTypeNameParts[_cTokens++].Set(_pszCurrentToken);
+ //hr = WindowsCreateString(_pszCurrentToken, static_cast<UINT32>(wcslen(_pszCurrentToken)), &_sphstrTypeNameParts[_cTokens++]);
+
+ //if (FAILED(hr))
+ //{
+ // _cTokens--;
+ //}
+ }
+
+ tokenType = ReadNextToken();
+
+ } while (SUCCEEDED(hr) && (nInnermostPinterfaceIndex != -1));
+
+ return hr;
+}
+
+HRESULT TypeNameTokenizer::VerifyTrailingCloseBrackets(__in DWORD dwExpectedTrailingCloseBrackets)
+{
+ HRESULT hr = S_OK;
+
+ for (DWORD dwClosingBracket = 0; dwClosingBracket < dwExpectedTrailingCloseBrackets; dwClosingBracket++)
+ {
+ if (!TrimThenFetchAndCompareNextCharIfAny(W('>')))
+ {
+ hr = RO_E_METADATA_INVALID_TYPE_FORMAT;
+ }
+ }
+
+ return hr;
+}
+
+void TypeNameTokenizer::SkipWhitespace()
+{
+ while (IsWhitespace(*_pchTypeNamePtr))
+ {
+ _pchTypeNamePtr++;
+ }
+}
+
+bool TypeNameTokenizer::IsWhitespace(WCHAR ch)
+{
+ bool fIsWhitespace = false;
+
+ switch (ch)
+ {
+ case ' ':
+ case '\t':
+ case '\r':
+ case '\f':
+ case '\n':
+ fIsWhitespace = true;
+ break;
+ }
+
+ return fIsWhitespace;
+}
diff --git a/src/vm/crossgenroresolvenamespace.cpp b/src/vm/crossgenroresolvenamespace.cpp
new file mode 100644
index 0000000000..e31adaff69
--- /dev/null
+++ b/src/vm/crossgenroresolvenamespace.cpp
@@ -0,0 +1,195 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//+----------------------------------------------------------------------------
+//
+
+//
+// Adapted from Windows sources. Modified to run on Windows version < Win8, so
+// that we can use this in CrossGen.
+//
+
+//
+//-----------------------------------------------------------------------------
+
+#include "common.h"
+#include "crossgenroresolvenamespace.h"
+#include "stringarraylist.h"
+
+namespace Crossgen
+{
+
+#define WINDOWS_NAMESPACE W("Windows")
+#define WINDOWS_NAMESPACE_PREFIX WINDOWS_NAMESPACE W(".")
+#define WINMD_FILE_EXTENSION_L W(".winmd")
+
+StringArrayList* g_wszWindowsNamespaceDirectories;
+StringArrayList* g_wszUserNamespaceDirectories;
+
+BOOL
+IsWindowsNamespace(const WCHAR * wszNamespace)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (wcsncmp(wszNamespace, WINDOWS_NAMESPACE_PREFIX, (_countof(WINDOWS_NAMESPACE_PREFIX) - 1)) == 0)
+ {
+ return TRUE;
+ }
+ else if (wcscmp(wszNamespace, WINDOWS_NAMESPACE) == 0)
+ {
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+
+BOOL
+DoesFileExist(
+ const WCHAR * wszFileName)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ BOOL fFileExists = TRUE;
+ DWORD dwFileAttributes;
+ dwFileAttributes = GetFileAttributesW(wszFileName);
+
+ if ((dwFileAttributes == INVALID_FILE_ATTRIBUTES) ||
+ (dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY))
+ {
+ fFileExists = FALSE;
+ }
+
+ return fFileExists;
+}
+
+
+HRESULT
+FindNamespaceFileInDirectory(
+ const WCHAR * wszNamespace,
+ const WCHAR * wszDirectory,
+ DWORD * pcMetadataFiles,
+ SString ** ppMetadataFiles)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (wszDirectory == nullptr)
+ return ERROR_NOT_SUPPORTED;
+
+ WCHAR wszFilePath[MAX_PATH + 1];
+ wcscpy_s(
+ wszFilePath,
+ _countof(wszFilePath),
+ wszDirectory);
+
+ WCHAR * wszFirstFileNameChar = wszFilePath + wcslen(wszFilePath);
+
+ // If there's no backslash, add one.
+ if (*(wszFirstFileNameChar - 1) != '\\')
+ *wszFirstFileNameChar++ = '\\';
+
+ WCHAR wszRemainingNamespace[MAX_PATH +1];
+ wcscpy_s(
+ wszRemainingNamespace,
+ _countof(wszRemainingNamespace),
+ wszNamespace);
+
+ do
+ {
+ *wszFirstFileNameChar = W('\0');
+ wcscat_s(
+ wszFilePath,
+ _countof(wszFilePath),
+ wszRemainingNamespace);
+ wcscat_s(
+ wszFilePath,
+ _countof(wszFilePath),
+ WINMD_FILE_EXTENSION_L);
+
+ if (DoesFileExist(wszFilePath))
+ {
+ *ppMetadataFiles = new SString(wszFilePath);
+ *pcMetadataFiles = 1;
+ return S_OK;
+ }
+
+ WCHAR * wszLastDotChar = wcsrchr(wszRemainingNamespace, W('.'));
+ if (wszLastDotChar == nullptr)
+ {
+ *ppMetadataFiles = nullptr;
+ *pcMetadataFiles = 0;
+ return S_FALSE;
+ }
+ *wszLastDotChar = W('\0');
+ } while (true);
+}
+
+
+__checkReturn
+HRESULT WINAPI CrossgenRoResolveNamespace(
+ const LPCWSTR wszNamespace,
+ DWORD * pcMetadataFiles,
+ SString ** ppMetadataFiles)
+{
+ LIMITED_METHOD_CONTRACT;
+ HRESULT hr = S_OK;
+
+ if (IsWindowsNamespace(wszNamespace))
+ {
+ DWORD cAppPaths = g_wszWindowsNamespaceDirectories->GetCount();
+
+ for (DWORD i = 0; i < cAppPaths; i++)
+ {
+ // Returns S_FALSE on file not found so we continue proving app directory graph
+ IfFailRet(FindNamespaceFileInDirectory(
+ wszNamespace,
+ g_wszWindowsNamespaceDirectories->Get(i).GetUnicode(),
+ pcMetadataFiles,
+ ppMetadataFiles));
+
+ if (hr == S_OK)
+ {
+ return hr;
+ }
+ }
+ }
+ else
+ {
+ DWORD cAppPaths = g_wszUserNamespaceDirectories->GetCount();
+
+ for (DWORD i = 0; i < cAppPaths; i++)
+ {
+ // Returns S_FALSE on file not found so we continue proving app directory graph
+ IfFailRet(FindNamespaceFileInDirectory(
+ wszNamespace,
+ g_wszUserNamespaceDirectories->Get(i).GetUnicode(),
+ pcMetadataFiles,
+ ppMetadataFiles));
+
+ if (hr == S_OK)
+ {
+ return hr;
+ }
+ }
+ }
+
+ return hr;
+} // RoResolveNamespace
+
+void SetFirstPartyWinMDPaths(StringArrayList* saAppPaths)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ g_wszWindowsNamespaceDirectories = saAppPaths;
+}
+
+void SetAppPaths(StringArrayList* saAppPaths)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ g_wszUserNamespaceDirectories = saAppPaths;
+}
+
+}// Namespace Crossgen
diff --git a/src/vm/crossgenroresolvenamespace.h b/src/vm/crossgenroresolvenamespace.h
new file mode 100644
index 0000000000..3e3fb3f8a9
--- /dev/null
+++ b/src/vm/crossgenroresolvenamespace.h
@@ -0,0 +1,28 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//+----------------------------------------------------------------------------
+//
+
+//
+
+//
+//-----------------------------------------------------------------------------
+
+#ifndef __CROSSGENRORESOLVENAMESPACE_H
+#define __CROSSGENRORESOLVENAMESPACE_H
+
+namespace Crossgen
+{
+ HRESULT WINAPI CrossgenRoResolveNamespace(
+ const LPCWSTR wszNamespace,
+ DWORD * pcMetadataFiles,
+ SString ** ppMetadataFiles);
+
+ void SetFirstPartyWinMDPaths(StringArrayList* saAppPaths);
+ void SetAppPaths(StringArrayList* saAppPaths);
+}
+
+#endif
diff --git a/src/vm/crst.cpp b/src/vm/crst.cpp
new file mode 100644
index 0000000000..72665493a4
--- /dev/null
+++ b/src/vm/crst.cpp
@@ -0,0 +1,995 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// CRST.CPP
+//
+
+//
+
+
+#include "common.h"
+
+#include "crst.h"
+#include "log.h"
+#include "corhost.h"
+
+// We need to know if we're on the helper thread. We need this header for g_pDebugInterface.
+#include "dbginterface.h"
+#include "threadsuspend.h"
+
+#define __IN_CRST_CPP
+#include <crsttypes.h>
+#undef __IN_CRST_CPP
+
+#ifndef DACCESS_COMPILE
+Volatile<LONG> g_ShutdownCrstUsageCount = 0;
+
+//-----------------------------------------------------------------
+// Initialize critical section
+//-----------------------------------------------------------------
+VOID CrstBase::InitWorker(INDEBUG_COMMA(CrstType crstType) CrstFlags flags)
+{
+ CONTRACTL {
+ THROWS;
+ WRAPPER(GC_TRIGGERS);
+ } CONTRACTL_END;
+
+ // Disallow creation of Crst before EE starts. But only complain if we end up
+ // being hosted, since such Crsts have escaped the hosting net and will cause
+ // AVs on next use.
+#ifdef _DEBUG
+ static bool fEarlyInit; // = false
+
+ if (!g_fEEStarted)
+ {
+ if (!CLRSyncHosted())
+ fEarlyInit = true;
+ }
+
+ // If we are now hosted, we better not have *ever* created some Crsts that are
+ // not known to our host.
+ _ASSERTE(!fEarlyInit || !CLRSyncHosted());
+
+#endif
+
+ _ASSERTE((flags & CRST_INITIALIZED) == 0);
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostSyncManager *pSyncManager = CorHost2::GetHostSyncManager();
+ if (pSyncManager) {
+ ResetOSCritSec ();
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ SetOSCritSec ();
+ }
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (!IsOSCritSec())
+ {
+ IHostCrst *pHostCrst;
+ PREFIX_ASSUME(pSyncManager != NULL);
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pSyncManager->CreateCrst(&pHostCrst);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (hr != S_OK) {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ ThrowOutOfMemory();
+ }
+ m_pHostCrst = pHostCrst;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ UnsafeInitializeCriticalSection(&m_criticalsection);
+ }
+
+ SetFlags(flags);
+ SetCrstInitialized();
+
+#ifdef _DEBUG
+ DebugInit(crstType, flags);
+#endif
+}
+
+//-----------------------------------------------------------------
+// Clean up critical section
+//-----------------------------------------------------------------
+void CrstBase::Destroy()
+{
+ WRAPPER_NO_CONTRACT;
+
+ // nothing to do if not initialized
+ if (!IsCrstInitialized())
+ return;
+
+ // If this assert fired, a crst got deleted while some thread
+ // still owned it. This can happen if the process detaches from
+ // our DLL.
+#ifdef _DEBUG
+ EEThreadId holderthreadid = m_holderthreadid;
+ _ASSERTE(holderthreadid.IsUnknown() || IsAtProcessExit() || g_fEEShutDown);
+#endif
+
+ // If a lock is host breakable, a host is required to block the release call until
+ // deadlock detection is finished.
+ GCPreemp __gcHolder((m_dwFlags & CRST_HOST_BREAKABLE) == CRST_HOST_BREAKABLE);
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (!IsOSCritSec())
+ {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ m_pHostCrst->Release();
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ UnsafeDeleteCriticalSection(&m_criticalsection);
+ }
+
+ LOG((LF_SYNC, INFO3, "Deleting 0x%x\n", this));
+#ifdef _DEBUG
+ DebugDestroy();
+#endif
+
+ ResetFlags();
+}
+
+extern void WaitForEndOfShutdown();
+
+//-----------------------------------------------------------------
+// If we're in shutdown (as determined by caller since each lock needs its
+// own shutdown flag) and this is a non-special thread (not helper/finalizer/shutdown),
+// then release the crst and block forever.
+// See the prototype for more details.
+//-----------------------------------------------------------------
+void CrstBase::ReleaseAndBlockForShutdownIfNotSpecialThread()
+{
+ CONTRACTL {
+ NOTHROW;
+
+ // We're almost always MODE_PREEMPTIVE, but if it's a thread suspending for GC,
+ // then we might be MODE_COOPERATIVE. Fortunately in that case, we don't block on shutdown.
+ // We assert this below.
+ MODE_ANY;
+ GC_NOTRIGGER;
+
+ PRECONDITION(this->OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ if (
+ (((size_t)ClrFlsGetValue (TlsIdx_ThreadType)) & (ThreadType_Finalizer|ThreadType_DbgHelper|ThreadType_Shutdown|ThreadType_GC)) == 0)
+ {
+ // The process is shutting down. Release the lock and just block forever.
+ this->Leave();
+
+ // is this safe to use here since we never return?
+ GCX_ASSERT_PREEMP();
+
+ WaitForEndOfShutdown();
+ __SwitchToThread(INFINITE, CALLER_LIMITS_SPINNING);
+ _ASSERTE (!"Can not reach here");
+ }
+}
+
+#endif // DACCESS_COMPILE
+
+
+//-----------------------------------------------------------------
+// Acquire the lock.
+//-----------------------------------------------------------------
+#ifdef DACCESS_COMPILE
+// In DAC builds, we will not actually take the lock. Instead, we just need to determine
+// whether the LS holds the lock. If it does, we assume the locked data is in an inconsistent
+// state and throw, rather than using erroneous values.
+// Argument:
+// input: noLevelCheckFlag - indicates whether to check the crst level
+// Note: Throws
+void CrstBase::Enter(INDEBUG(NoLevelCheckFlag noLevelCheckFlag/* = CRST_LEVEL_CHECK*/))
+{
+#ifdef _DEBUG
+ if (m_entercount != 0)
+ {
+ ThrowHR(CORDBG_E_PROCESS_NOT_SYNCHRONIZED);
+ }
+#endif
+}
+#else // !DACCESS_COMPILE
+
+
+#if !defined(FEATURE_CORECLR)
+// Slower spin enter path after first attemp failed
+void CrstBase::SpinEnter()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+
+ // We only reach this routine when first attemp failed, so time to fire ETW event (fyuan)
+
+ // Fire an ETW event to mark the beginning of native contention
+ FireEtwContentionStart_V1(ETW::ContentionLog::ContentionStructs::NativeContention, GetClrInstanceId());
+
+ // Try spinning and yielding before eventually blocking.
+ // The limit of dwRepetitions = 10 is largely arbitrary - feel free to tune if you have evidence
+ // you're making things better.
+
+ for (DWORD iter = 0; iter < g_SpinConstants.dwRepetitions; iter++)
+ {
+ DWORD i = g_SpinConstants.dwInitialDuration;
+
+ do
+ {
+ if ( (m_criticalsection.LockCount == -1 ||
+ (size_t)m_criticalsection.OwningThread == (size_t) GetCurrentThreadId())
+ && UnsafeTryEnterCriticalSection(&m_criticalsection))
+ {
+ return;
+ }
+
+ if (g_SystemInfo.dwNumberOfProcessors <= 1)
+ {
+ break;
+ }
+
+ // Delay by approximately 2*i clock cycles (Pentium III).
+ // This is brittle code - future processors may of course execute this
+ // faster or slower, and future code generators may eliminate the loop altogether.
+ // The precise value of the delay is not critical, however, and can't think
+ // of a better way that isn't machine-dependent.
+
+ for (int delayCount = i; --delayCount; )
+ {
+ YieldProcessor(); // indicate to the processor that we are spining
+ }
+
+ // exponential backoff: wait a factor longer in the next iteration
+ i *= g_SpinConstants.dwBackoffFactor;
+ } while (i < g_SpinConstants.dwMaximumDuration);
+
+ __SwitchToThread(0, CALLER_LIMITS_SPINNING);
+ }
+
+ UnsafeEnterCriticalSection(& m_criticalsection);
+}
+#endif // FEATURE_CORECLR
+
+
+void CrstBase::Enter(INDEBUG(NoLevelCheckFlag noLevelCheckFlag/* = CRST_LEVEL_CHECK*/))
+{
+ //-------------------------------------------------------------------------------------------
+ // What, no CONTRACT?
+ //
+ // We can't put an actual CONTRACT here as PostEnter() makes unscoped changes to the GC_NoTrigger
+ // counter. But we do perform the equivalent checks manually.
+ //
+ // What's worse, the implied contract differs for different flavors of crst.
+ //
+ // THROWS/FAULT
+ //
+ // A crst can be HOST_BREAKBALE or not. A HOST_BREAKABLE crst can throw on an attempt to enter
+ // (due to deadlock breaking by the host.) A non-breakable crst will never
+ // throw or OOM or fail an enter.
+ //
+ //
+ //
+ //
+ // GC/MODE
+ // Orthogonally, a crst can be one of the following flavors. We only want to see the
+ // "normal" type used in new code. Other types, kept for legacy reasons, are listed in
+ // order from least objectionable to most objectionable.
+ //
+ // normal - This is the preferred type of crst. Enter() will force-switch your thread
+ // into preemptive mode if it isn't already. Thus, the effective contract is:
+ //
+ // MODE_ANY
+ // GC_TRIGGERS
+ //
+ //
+ //
+ // CRST_UNSAFE_COOPGC - You can only attempt to acquire this crst if you're already
+ // in coop mode. It is guaranteed no GC will occur while waiting to acquire the lock.
+ // While you hold the lock, your thread is in a GCFORBID state.
+ //
+ // MODE_COOP
+ // GC_NOTRIGGER
+ //
+ //
+ //
+ // CRST_UNSAFE_ANYMODE - You can attempt to acquire this in either mode. Entering the
+ // crst will not change your thread mode but it will increment the GCNoTrigger count.
+ //
+ // MODE_ANY
+ // GC_NOTRIGGER
+ //------------------------------------------------------------------------------------------------
+
+#ifdef ENABLE_CONTRACTS_IMPL
+ ClrDebugState *pClrDebugState = CheckClrDebugState();
+ if (pClrDebugState)
+ {
+ if (m_dwFlags & CRST_HOST_BREAKABLE)
+ {
+ if (pClrDebugState->IsFaultForbid() &&
+ !(pClrDebugState->ViolationMask() & (FaultViolation|FaultNotFatal|BadDebugState)))
+ {
+ CONTRACT_ASSERT("You cannot enter a HOST_BREAKABLE lock in a FAULTFORBID region.",
+ Contract::FAULT_Forbid,
+ Contract::FAULT_Mask,
+ __FUNCTION__,
+ __FILE__,
+ __LINE__);
+ }
+
+ if (!(pClrDebugState->CheckOkayToThrowNoAssert()))
+ {
+ CONTRACT_ASSERT("You cannot enter a HOST_BREAKABLE lock in a NOTHROW region.",
+ Contract::THROWS_No,
+ Contract::THROWS_Mask,
+ __FUNCTION__,
+ __FILE__,
+ __LINE__);
+ }
+ }
+
+ // If we might want to toggle the GC mode, then we better not be in a GC_NOTRIGGERS region
+ if (!(m_dwFlags & (CRST_UNSAFE_COOPGC | CRST_UNSAFE_ANYMODE | CRST_GC_NOTRIGGER_WHEN_TAKEN)))
+ {
+ if (pClrDebugState->GetGCNoTriggerCount())
+ {
+ // If we have no thread object, we won't be toggling the GC. This is the case,
+ // for example, on the debugger helper thread which is always GC_NOTRIGGERS.
+ if (GetThreadNULLOk() != NULL)
+ {
+ // Will we really need to change GC mode COOPERATIVE to PREEMPTIVE?
+ if (GetThreadNULLOk()->PreemptiveGCDisabled())
+ {
+ if (!((GCViolation | BadDebugState) & pClrDebugState->ViolationMask()))
+ {
+ CONTRACT_ASSERT("You cannot enter a lock in a GC_NOTRIGGER + MODE_COOPERATIVE region.",
+ Contract::GC_NoTrigger,
+ Contract::GC_Mask,
+ __FUNCTION__,
+ __FILE__,
+ __LINE__);
+ }
+ }
+ }
+ }
+ }
+
+ // The mode checks and enforcement of GC_NOTRIGGER during the lock are done in CrstBase::PostEnter().
+
+ }
+#endif //ENABLE_CONTRACTS_IMPL
+
+
+
+ SCAN_IGNORE_THROW;
+ SCAN_IGNORE_FAULT;
+ SCAN_IGNORE_TRIGGER;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+
+ _ASSERTE(IsCrstInitialized());
+
+ // Is Critical Section entered?
+ // We could have perhaps used m_criticalsection.LockCount, but
+ // while spinning, we want to fire the ETW event only once
+ BOOL fIsCriticalSectionEnteredAfterFailingOnce = FALSE;
+
+ Thread * pThread;
+ BOOL fToggle;
+
+ BEGIN_GETTHREAD_ALLOWED;
+ pThread = GetThread();
+ fToggle = ((m_dwFlags & (CRST_UNSAFE_ANYMODE | CRST_UNSAFE_COOPGC | CRST_GC_NOTRIGGER_WHEN_TAKEN)) == 0) // condition normally false
+ && pThread && pThread->PreemptiveGCDisabled();
+
+ if (fToggle) {
+ pThread->EnablePreemptiveGC();
+ }
+ END_GETTHREAD_ALLOWED;
+
+#ifdef _DEBUG
+ PreEnter ();
+#endif
+
+ _ASSERTE(noLevelCheckFlag == CRST_NO_LEVEL_CHECK || IsSafeToTake() || g_fEEShutDown);
+
+ // Check for both rare case using one if-check
+ if (m_dwFlags & (CRST_TAKEN_DURING_SHUTDOWN | CRST_DEBUGGER_THREAD))
+ {
+ if (m_dwFlags & CRST_TAKEN_DURING_SHUTDOWN)
+ {
+ // increment the usage count of locks that can be taken during shutdown
+ FastInterlockIncrement(&g_ShutdownCrstUsageCount);
+ }
+
+ // If this is a debugger lock, bump up the "Can't-Stop" count.
+ // We'll bump it down when we release the lock.
+ if (m_dwFlags & CRST_DEBUGGER_THREAD)
+ {
+ IncCantStopCount();
+ }
+ }
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (!IsOSCritSec())
+ {
+ DWORD option;
+ if (m_dwFlags & CRST_HOST_BREAKABLE)
+ {
+ option = 0;
+ }
+ else
+ {
+ option = WAIT_NOTINDEADLOCK;
+ }
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(pThread);
+
+ // Try entering the critical section once, if we fail we contend
+ // and fire the contention start ETW event
+ hr = m_pHostCrst->TryEnter(option, &fIsCriticalSectionEnteredAfterFailingOnce);
+
+ if (! fIsCriticalSectionEnteredAfterFailingOnce)
+ {
+#ifndef FEATURE_CORECLR
+ // Fire an ETW event to mark the beginning of native contention
+ FireEtwContentionStart_V1(ETW::ContentionLog::ContentionStructs::NativeContention, GetClrInstanceId());
+#endif // !FEATURE_CORECLR
+ fIsCriticalSectionEnteredAfterFailingOnce = TRUE;
+
+ hr = m_pHostCrst->Enter(option);
+ }
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+
+ PREFIX_ASSUME (hr == S_OK || ((m_dwFlags & CRST_HOST_BREAKABLE) && hr == HOST_E_DEADLOCK));
+
+ if (hr == HOST_E_DEADLOCK)
+ {
+ RaiseDeadLockException();
+ }
+
+ INCTHREADLOCKCOUNTTHREAD(pThread);
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ if (CLRTaskHosted())
+ {
+ Thread::BeginThreadAffinity();
+ }
+
+#ifdef FEATURE_CORECLR
+ UnsafeEnterCriticalSection(&m_criticalsection);
+#else
+ // Try entering the critical section once, if we fail we contend
+ // and fire the contention start ETW event
+ if ((m_criticalsection.LockCount == -1 || (size_t)m_criticalsection.OwningThread == (size_t) GetCurrentThreadId())
+ && UnsafeTryEnterCriticalSection(& m_criticalsection))
+ {
+ }
+ else
+ {
+ SpinEnter();
+
+ fIsCriticalSectionEnteredAfterFailingOnce = TRUE;
+ }
+#endif
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ INCTHREADLOCKCOUNTTHREAD(pThread);
+#endif
+ }
+
+#ifndef FEATURE_CORECLR
+ // Fire an ETW event to mark the end of native contention
+ // This we do only when we have fired a contention start event before
+ if (fIsCriticalSectionEnteredAfterFailingOnce)
+ {
+ FireEtwContentionStop(ETW::ContentionLog::ContentionStructs::NativeContention, GetClrInstanceId());
+ }
+#endif // !FEATURE_CORECLR
+
+#ifdef _DEBUG
+ PostEnter();
+#endif
+
+ if (fToggle)
+ {
+ BEGIN_GETTHREAD_ALLOWED;
+ pThread->DisablePreemptiveGC();
+ END_GETTHREAD_ALLOWED;
+ }
+}
+
+//-----------------------------------------------------------------
+// Release the lock.
+//-----------------------------------------------------------------
+void CrstBase::Leave()
+{
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ _ASSERTE(IsCrstInitialized());
+
+#ifdef _DEBUG
+ PreLeave ();
+#endif //_DEBUG
+
+#if defined(FEATURE_INCLUDE_ALL_INTERFACES) || defined(_DEBUG)
+ Thread * pThread = GetThread();
+#endif
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (!IsOSCritSec()) {
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(pThread);
+ hr = m_pHostCrst->Leave();
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ _ASSERTE (hr == S_OK);
+ DECTHREADLOCKCOUNT ();
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ UnsafeLeaveCriticalSection(&m_criticalsection);
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ DECTHREADLOCKCOUNTTHREAD(pThread);
+#endif
+
+ if (CLRTaskHosted()) {
+ Thread::EndThreadAffinity();
+ }
+ }
+
+ // Check for both rare case using one if-check
+ if (m_dwFlags & (CRST_TAKEN_DURING_SHUTDOWN | CRST_DEBUGGER_THREAD))
+ {
+ // If this is a debugger lock, restore the "Can't-Stop" count.
+ // We bumped it up when we Entered the lock.
+ if (m_dwFlags & CRST_DEBUGGER_THREAD)
+ {
+ DecCantStopCount();
+ }
+
+ if (m_dwFlags & CRST_TAKEN_DURING_SHUTDOWN)
+ {
+ // decrement the usage count of locks that can be taken during shutdown
+ _ASSERTE_MSG(g_ShutdownCrstUsageCount.Load() > 0, "Attempting to leave a lock that was never taken!");
+ FastInterlockDecrement(&g_ShutdownCrstUsageCount);
+ }
+ }
+
+#ifdef _DEBUG
+ //_ASSERTE(m_cannotLeave==0 || OwnedByCurrentThread());
+
+ if ((pThread != NULL) &&
+ (m_dwFlags & CRST_DEBUG_ONLY_CHECK_FORBID_SUSPEND_THREAD))
+ { // The lock requires ForbidSuspendRegion while it is taken
+ CONSISTENCY_CHECK_MSGF(pThread->IsInForbidSuspendRegion(), ("ForbidSuspend region was released before the lock:'%s'", m_tag));
+ }
+#endif //_DEBUG
+} // CrstBase::Leave
+
+
+#ifdef _DEBUG
+void CrstBase::PreEnter()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ // Are we in the shutdown sequence and in phase 2 of it?
+ if (g_fProcessDetach && (g_fEEShutDown & ShutDown_Phase2))
+ {
+ // Ensure that this lock has been flagged to be taken during shutdown
+ _ASSERTE_MSG(CanBeTakenDuringShutdown(), "Attempting to take a lock at shutdown that is not CRST_TAKEN_DURING_SHUTDOWN");
+ }
+
+ Thread * pThread = GetThreadNULLOk();
+
+ if (pThread)
+ {
+ // If the thread has SpinLock, it can not take Crst.
+ _ASSERTE ((pThread->m_StateNC & Thread::TSNC_OwnsSpinLock) == 0);
+ }
+
+ // If we're on the debugger helper thread, we can only take helper thread locks.
+ bool fIsHelperThread = (g_pDebugInterface == NULL) ? false : g_pDebugInterface->ThisIsHelperThread();
+ bool fIsDebuggerLock = (m_dwFlags & CRST_DEBUGGER_THREAD) != 0;
+
+ // don't enforce this check during regular process exit or fail fast
+ if (fIsHelperThread && !fIsDebuggerLock && !IsAtProcessExit() && !g_fFastExitProcess)
+ {
+ CONSISTENCY_CHECK_MSGF(false, ("Helper thread taking non-helper lock:'%s'", m_tag));
+ }
+
+ // If a thread suspends another thread, it cannot acquire locks.
+ if ((pThread != NULL) &&
+ (pThread->Debug_GetUnsafeSuspendeeCount() != 0))
+ {
+ CONSISTENCY_CHECK_MSGF(false, ("Suspender thread taking non-suspender lock:'%s'", m_tag));
+ }
+
+ if (ThreadStore::s_pThreadStore->IsCrstForThreadStore(this))
+ return;
+
+ if (m_dwFlags & CRST_UNSAFE_COOPGC)
+ {
+ CONSISTENCY_CHECK (IsGCThread ()
+ || (pThread != NULL && pThread->PreemptiveGCDisabled())
+ // If GC heap has not been initialized yet, there is no need to synchronize with GC.
+ // This check is mainly for code called from EEStartup.
+ || (pThread == NULL && !GCHeap::IsGCHeapInitialized()) );
+ }
+
+ if ((pThread != NULL) &&
+ (m_dwFlags & CRST_DEBUG_ONLY_CHECK_FORBID_SUSPEND_THREAD))
+ {
+ CONSISTENCY_CHECK_MSGF(pThread->IsInForbidSuspendRegion(), ("The lock '%s' can be taken only in ForbidSuspend region.", m_tag));
+ }
+}
+
+void CrstBase::PostEnter()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ if ((m_dwFlags & CRST_HOST_BREAKABLE) != 0)
+ {
+ HOST_BREAKABLE_CRST_TAKEN(this);
+ }
+ else
+ {
+ EE_LOCK_TAKEN(this);
+ }
+
+ _ASSERTE((m_entercount == 0 && m_holderthreadid.IsUnknown()) ||
+ m_holderthreadid.IsSameThread() ||
+ IsAtProcessExit());
+ m_holderthreadid.SetThreadId();
+ m_entercount++;
+
+ if (m_entercount == 1)
+ {
+ _ASSERTE((m_next == NULL) && (m_prev == NULL));
+
+ // Link this Crst into the Thread's chain of OwnedCrsts
+ CrstBase *pcrst = GetThreadsOwnedCrsts();
+ if (pcrst == NULL)
+ {
+ SetThreadsOwnedCrsts (this);
+ }
+ else
+ {
+ while (pcrst->m_next != NULL)
+ pcrst = pcrst->m_next;
+ pcrst->m_next = this;
+ m_prev = pcrst;
+ }
+ }
+
+ Thread * pThread = GetThreadNULLOk();
+ if ((m_dwFlags & CRST_HOST_BREAKABLE) == 0)
+ {
+ if (pThread)
+ {
+ pThread->IncUnbreakableLockCount();
+ }
+ }
+
+ if (ThreadStore::s_pThreadStore->IsCrstForThreadStore(this))
+ return;
+
+ if (m_dwFlags & (CRST_UNSAFE_ANYMODE | CRST_UNSAFE_COOPGC | CRST_GC_NOTRIGGER_WHEN_TAKEN))
+ {
+ if (pThread == NULL)
+ {
+ // Cannot set NoTrigger. This could conceivably turn into
+ // A GC hole if the thread is created and then a GC rendezvous happens
+ // while the lock is still held.
+ }
+ else
+ {
+ // Keep a count, since the thread may change from NULL to non-NULL and
+ // we don't want to have unbalanced NoTrigger calls
+ m_countNoTriggerGC++;
+ INCONTRACT(pThread->BeginNoTriggerGC(__FILE__, __LINE__));
+ }
+ }
+}
+
+void CrstBase::PreLeave()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(OwnedByCurrentThread());
+ _ASSERTE(m_entercount > 0);
+ m_entercount--;
+ if (!m_entercount) {
+ m_holderthreadid.ResetThreadId();
+
+ // Delink it from the Thread's chain of OwnedChain
+ if (m_prev)
+ m_prev->m_next = m_next;
+ else
+ SetThreadsOwnedCrsts(m_next);
+
+ if (m_next)
+ m_next->m_prev = m_prev;
+
+ m_next = NULL;
+ m_prev = NULL;
+ }
+
+ Thread * pThread = GetThreadNULLOk();
+
+ if ((m_dwFlags & CRST_HOST_BREAKABLE) == 0)
+ {
+ if (pThread)
+ {
+ pThread->DecUnbreakableLockCount();
+ }
+ }
+
+ if (m_countNoTriggerGC > 0 && !ThreadStore::s_pThreadStore->IsCrstForThreadStore(this))
+ {
+ m_countNoTriggerGC--;
+ if (pThread != NULL)
+ {
+ INCONTRACT(pThread->EndNoTriggerGC());
+ }
+ }
+
+ if ((m_dwFlags & CRST_HOST_BREAKABLE) != 0)
+ {
+ HOST_BREAKABLE_CRST_RELEASED(this);
+ }
+ else
+ {
+ EE_LOCK_RELEASED(this);
+ }
+
+ // Are we in the shutdown sequence and in phase 2 of it?
+ if (g_fProcessDetach && (g_fEEShutDown & ShutDown_Phase2))
+ {
+ // Ensure that this lock has been flagged to be taken during shutdown
+ _ASSERTE_MSG(CanBeTakenDuringShutdown(), "Attempting to leave a lock at shutdown that is not CRST_TAKEN_DURING_SHUTDOWN");
+ }
+
+}
+
+// We have seen several times that a Crst is not destroyed before its memory is freed. This corrupts
+// our chain, and also causes memory leak. The following structure is to track what Crst exists.
+// If our chain is broken, find out which Crst causes problem, then lookup this array. The problematic
+// Crst can be identified with crstType.
+struct CrstDebugInfo
+{
+ CrstBase *pAddress;
+ CrstType crstType;
+};
+const int crstDebugInfoCount = 4000;
+CrstDebugInfo crstDebugInfo[crstDebugInfoCount];
+
+CrstBase *CrstBase::GetThreadsOwnedCrsts()
+{
+ return (CrstBase*)ClrFlsGetValue(TlsIdx_OwnedCrstsChain);
+}
+void CrstBase::SetThreadsOwnedCrsts(CrstBase *pCrst)
+{
+ WRAPPER_NO_CONTRACT;
+ ClrFlsSetValue(TlsIdx_OwnedCrstsChain, (LPVOID) (pCrst));
+}
+
+void CrstBase::DebugInit(CrstType crstType, CrstFlags flags)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_crstType = crstType;
+ m_tag = GetCrstName(crstType);
+ m_crstlevel = GetCrstLevel(crstType);
+ m_holderthreadid.ResetThreadId();
+ m_entercount = 0;
+ m_next = NULL;
+ m_prev = NULL;
+ m_cannotLeave=0;
+
+ _ASSERTE((m_dwFlags & ~(CRST_REENTRANCY |
+ CRST_UNSAFE_SAMELEVEL |
+ CRST_UNSAFE_COOPGC |
+ CRST_UNSAFE_ANYMODE |
+ CRST_DEBUGGER_THREAD |
+ CRST_HOST_BREAKABLE |
+ CRST_OS_CRIT_SEC |
+ CRST_INITIALIZED |
+ CRST_TAKEN_DURING_SHUTDOWN |
+ CRST_GC_NOTRIGGER_WHEN_TAKEN |
+ CRST_DEBUG_ONLY_CHECK_FORBID_SUSPEND_THREAD)) == 0);
+
+ // @todo - Any Crst w/ CRST_DEBUGGER_THREAD must be on a special blessed list. Check that here.
+
+ LOG((LF_SYNC, INFO3, "ConstructCrst with this:0x%x\n", this));
+
+ for (int i = 0; i < crstDebugInfoCount; i++)
+ {
+ if (crstDebugInfo[i].pAddress == NULL)
+ {
+ crstDebugInfo[i].pAddress = this;
+ crstDebugInfo[i].crstType = crstType;
+ break;
+ }
+ }
+
+ m_countNoTriggerGC = 0;
+}
+
+void CrstBase::DebugDestroy()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Ideally, when we destroy the crst, it wouldn't be held.
+ // This is violated if a thread holds a lock and is asynchronously killed
+ // (such as what happens on ExitProcess).
+ // Delink it from the Thread's chain of OwnedChain
+ if (IsAtProcessExit())
+ {
+ // In shutdown scenario, crst may or may not be held.
+ if (m_prev == NULL)
+ {
+ if (!m_holderthreadid.IsUnknown()) // Crst taken!
+ {
+ if (m_next)
+ m_next->m_prev = NULL; // workaround: break up the chain
+ SetThreadsOwnedCrsts(NULL);
+ }
+ }
+ else
+ {
+ m_prev->m_next = m_next;
+ if (m_next)
+ m_next->m_prev = m_prev;
+ }
+ }
+ else
+ {
+ // Crst is destroyed while being held.
+ CONSISTENCY_CHECK_MSGF(
+ ((m_prev == NULL) && (m_next == NULL) && m_holderthreadid.IsUnknown()),
+ ("CRST '%s' is destroyed while being held in non-shutdown scenario.\n"
+ "this=0x%p, m_prev=0x%p. m_next=0x%p", m_tag, this, this->m_prev, this->m_next));
+ }
+
+ FillMemory(&m_criticalsection, sizeof(m_criticalsection), 0xcc);
+ m_holderthreadid.ResetThreadId();
+ m_entercount = 0xcccccccc;
+
+ m_next = (CrstBase*)POISONC;
+ m_prev = (CrstBase*)POISONC;
+
+ for (int i = 0; i < crstDebugInfoCount; i++)
+ {
+ if (crstDebugInfo[i].pAddress == this)
+ {
+ crstDebugInfo[i].pAddress = NULL;
+ crstDebugInfo[i].crstType = kNumberOfCrstTypes;
+ break;
+ }
+ }
+}
+
+//-----------------------------------------------------------------
+// Check if attempting to take the lock would violate level order.
+//-----------------------------------------------------------------
+BOOL CrstBase::IsSafeToTake()
+{
+ CONTRACTL {
+ DEBUG_ONLY;
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ } CONTRACTL_END;
+
+ // If mscoree.dll is being detached
+ if (IsAtProcessExit())
+ return TRUE;
+
+ // Cannot take a Crst in cooperative mode unless CRST_UNSAFE_COOPGC is set, in
+ // which case it must always be taken in this mode.
+ // If there is no thread object, we ignore the check since this thread isn't
+ // coordinated with the GC.
+ Thread * pThread;
+ BEGIN_GETTHREAD_ALLOWED;
+ pThread = GetThread();
+
+ _ASSERTE(pThread == NULL ||
+ (pThread->PreemptiveGCDisabled() == ((m_dwFlags & CRST_UNSAFE_COOPGC) != 0)) ||
+ ((m_dwFlags & (CRST_UNSAFE_ANYMODE | CRST_GC_NOTRIGGER_WHEN_TAKEN)) != 0) ||
+ (GCHeap::IsGCInProgress() && pThread == ThreadSuspend::GetSuspensionThread()));
+ END_GETTHREAD_ALLOWED;
+
+ if (m_holderthreadid.IsSameThread())
+ {
+ // If we already hold it, we can't violate level order.
+ // Check if client wanted to allow reentrancy.
+ if ((m_dwFlags & CRST_REENTRANCY) == 0)
+ {
+ LOG((LF_SYNC, INFO3, "Crst Reentrancy violation on %s\n", m_tag));
+ // So that we can debug here.
+ _ASSERTE (g_fEEShutDown || !"Crst Reentrancy violation");
+ }
+ return ((m_dwFlags & CRST_REENTRANCY) != 0);
+ }
+
+ // Is the current Crst exempt from the Crst ranking enforcement?
+ if (m_crstlevel == CRSTUNORDERED
+ // when the thread is doing a stressing GC, some Crst violations could be ignored
+ // also, we want to keep an explicit list of Crst's that we may take during GC stress
+ || (pThread && pThread->GetGCStressing ()
+ && (m_crstType == CrstThreadStore || m_crstType == CrstHandleTable
+ || m_crstType == CrstSyncBlockCache || m_crstType == CrstIbcProfile
+ || m_crstType == CrstAvailableParamTypes || m_crstType == CrstSystemDomainDelayedUnloadList
+ || m_crstType == CrstAssemblyList || m_crstType == CrstJumpStubCache
+ || m_crstType == CrstSingleUseLock)
+ )
+ || (pThread && pThread->GetUniqueStacking ())
+ )
+ {
+ return TRUE;
+ }
+
+ // See if the current thread already owns a lower or sibling lock.
+ BOOL fSafe = TRUE;
+ for (CrstBase *pcrst = GetThreadsOwnedCrsts(); pcrst != NULL; pcrst = pcrst->m_next)
+ {
+ fSafe =
+ !pcrst->m_holderthreadid.IsSameThread()
+ || (pcrst->m_crstlevel == CRSTUNORDERED)
+ || (pcrst->m_crstlevel > m_crstlevel)
+ || (pcrst->m_crstlevel == m_crstlevel && (m_dwFlags & CRST_UNSAFE_SAMELEVEL) != 0);
+ if (!fSafe)
+ {
+ LOG((LF_SYNC, INFO3, "Crst Level violation: Can't take level %lu lock %s because you already holding level %lu lock %s\n",
+ (ULONG)m_crstlevel, m_tag, (ULONG)(pcrst->m_crstlevel), pcrst->m_tag));
+ // So that we can debug here.
+ if (!g_fEEShutDown)
+ {
+ CONSISTENCY_CHECK_MSGF(false, ("Crst Level violation: Can't take level %lu lock %s because you already holding level %lu lock %s\n",
+ (ULONG)m_crstlevel,
+ m_tag,
+ (ULONG)(pcrst->m_crstlevel),
+ pcrst->m_tag));
+ }
+ break;
+ }
+ }
+ return fSafe;
+}
+
+#endif // _DEBUG
+
+#endif // !DACCESS_COMPILE
+
+#ifdef TEST_DATA_CONSISTENCY
+// used for test purposes. Determines if a crst is held.
+// Arguments:
+// input: pLock - the lock to test
+// Note: Throws if the lock is held
+
+void DebugTryCrst(CrstBase * pLock)
+{
+ SUPPORTS_DAC;
+
+ if (g_pConfig && g_pConfig->TestDataConsistency())
+ {
+ CrstHolder crstHolder (pLock);
+ }
+}
+#endif
+
diff --git a/src/vm/crst.h b/src/vm/crst.h
new file mode 100644
index 0000000000..da50066ccc
--- /dev/null
+++ b/src/vm/crst.h
@@ -0,0 +1,566 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// CRST.H
+//
+
+//
+// Debug-instrumented hierarchical critical sections.
+//
+//
+// The hierarchy:
+// --------------
+// The EE divides critical sections into numbered groups or "levels."
+// Crsts that guard the lowest level data structures that don't
+// use other services are grouped into the lowest-numbered levels.
+// The higher-numbered levels are reserved for high-level crsts
+// that guard broad swatches of code. Multiple groups can share the
+// same number to indicate that they're disjoint (their locks will never
+// nest.)
+//
+// The fundamental rule of the hierarchy that threads can only request
+// a crst whose level is lower than any crst currently held by the thread.
+// E.g. if a thread current holds a level-3 crst, he can try to enter
+// a level-2 crst, but not a level-4 crst, nor a different level-3
+// crst. This prevents the cyclic dependencies that lead to deadlock.
+//
+// For debugging purposes Crsts are all also grouped by a type (e.g.
+// CrstRemoting, the type of Crst used to synchronize certain remoting
+// operations). Each type maps to one level (though a level may map to
+// multiple types). The idea here is for the programmer to express Crst types
+// and their dependencies (e.g. a CrstClassInit instance may be acquired
+// while a CrstRemoting instance is already held) in a high level manner
+// while an external script handles the mechanical process of assigning
+// numerical levels to each type. See file:..\inc\CrstTypes.def for these high level
+// type definitions.
+//
+//
+// To create a crst:
+//
+// Crst *pcrst = new Crst(type);
+//
+// where "type" is one of the enums created in the auto-generated
+// file:..\inc\CrstTypes.h header file (matching the definition in
+// file:..\inc\CrstTypes.def).
+//
+// By default, crsts don't support nested enters by the same thread. If
+// you need reentrancy, use the alternate form:
+//
+// Crst *pcrst = new Crst(type, TRUE);
+//
+// Since reentrancies never block the caller, they're allowed to
+// "violate" the level ordering rule.
+//
+//
+// To enter/leave a crst:
+// ----------------------
+//
+//
+// pcrst->Enter();
+// pcrst->Leave();
+//
+// An assertion will fire on Enter() if a thread attempts to take locks
+// in the wrong order.
+//
+// Finally, a few DEBUG-only methods:
+//
+// To assert taking a crst won't violate level order:
+// --------------------------------------------------
+//
+// _ASSERTE(pcrst->IsSafeToTake());
+//
+// This is a good line to put at the start of any function that
+// enters a crst in some circumstances but not others. If it
+// always enters the crst, it's not necessary to call IsSafeToTake()
+// since Enter() does this for you.
+//
+// To assert that the current thread owns a crst:
+// --------------------------------------------------
+//
+// _ASSERTE(pcrst->OwnedByCurrentThread());
+
+
+
+#ifndef __crst_h__
+#define __crst_h__
+
+#include "util.hpp"
+#include "debugmacros.h"
+#include "log.h"
+
+#define ShutDown_Start 0x00000001
+#define ShutDown_Finalize1 0x00000002
+#define ShutDown_Finalize2 0x00000004
+#define ShutDown_Profiler 0x00000008
+#define ShutDown_COM 0x00000010
+#define ShutDown_SyncBlock 0x00000020
+#define ShutDown_IUnknown 0x00000040
+#define ShutDown_Phase2 0x00000080
+
+#ifndef DACCESS_COMPILE
+extern bool g_fProcessDetach;
+extern DWORD g_fEEShutDown;
+#endif
+// Total count of Crst lock of the type (Shutdown) that are currently in use
+extern Volatile<LONG> g_ShutdownCrstUsageCount;
+extern Volatile<LONG> g_fForbidEnterEE;
+extern bool g_fFinalizerRunOnShutDown;
+
+// The CRST.
+class CrstBase
+{
+#ifndef CLR_STANDALONE_BINDER
+
+// The following classes and methods violate the requirement that Crst usage be
+// exception-safe, or they satisfy that requirement using techniques other than
+// Holder objects:
+friend class Thread;
+friend class ThreadStore;
+friend class ThreadSuspend;
+friend class ListLock;
+friend class ListLockEntry;
+//friend class CExecutionEngine;
+friend struct SavedExceptionInfo;
+friend void EEEnterCriticalSection(CRITSEC_COOKIE cookie);
+friend void EELeaveCriticalSection(CRITSEC_COOKIE cookie);
+friend class ReJitPublishMethodHolder;
+friend class ReJitPublishMethodTableHolder;
+
+friend class Debugger;
+friend class Crst;
+
+#ifdef FEATURE_DBGIPC_TRANSPORT_VM
+ // The debugger transport code uses a holder for its Crst, but it needs to share the holder implementation
+ // with its right side code as well (which can't see the Crst implementation and actually uses a
+ // CRITICAL_SECTION as the base lock). So make DbgTransportSession a friend here so we can use Enter() and
+ // Leave() in order to build a shared holder class.
+ friend class DbgTransportLock;
+#endif // FEATURE_DBGIPC_TRANSPORT_VM
+
+ // PendingTypeLoadEntry acquires the lock during construction before anybody has a chance to see it to avoid
+ // level violations.
+ friend class PendingTypeLoadEntry;
+
+public:
+#ifdef _DEBUG
+ enum NoLevelCheckFlag
+ {
+ CRST_NO_LEVEL_CHECK = 1,
+ CRST_LEVEL_CHECK = 0,
+ };
+#endif
+
+private:
+ // Some Crsts have a "shutdown" mode.
+ // A Crst in shutdown mode can only be taken / released by special
+ // (the helper / finalizer / shutdown) threads. Any other thread that tries to take
+ // the a "shutdown" crst will immediately release the Crst and instead just block forever.
+ //
+ // This prevents random threads from blocking the special threads from doing finalization on shutdown.
+ //
+ // Unfortunately, each Crst needs its own "shutdown" flag because we can't convert all the locks
+ // into shutdown locks at once. For eg, the TSL needs to suspend the runtime before
+ // converting to a shutdown lock. But it can't suspend the runtime while holding
+ // a UNSAFE_ANYMODE lock (such as the debugger-lock). So at least the debugger-lock
+ // and TSL need to be set separately.
+ //
+ // So for such Crsts, it's the caller's responsibility to detect if the crst is in
+ // shutdown mode, and if so, call this function after enter.
+ void ReleaseAndBlockForShutdownIfNotSpecialThread();
+
+ // Enter & Leave are deliberately private to force callers to use the
+ // Holder class. If you bypass the Holder class and access these members
+ // directly, your lock is not exception-safe.
+ //
+ // noLevelCheckFlag parameter lets you disable the crst level checking. This is
+ // very dangerous so it is only used when the constructor is the one performing
+ // the Enter (that attempt cannot possibly block since the current thread is
+ // the only one with a pointer to the crst.)
+ //
+ // For obvious reasons, this parameter must never be made public.
+ void Enter(INDEBUG(NoLevelCheckFlag noLevelCheckFlag = CRST_LEVEL_CHECK));
+ void Leave();
+
+ void SpinEnter();
+
+#ifndef DACCESS_COMPILE
+ DEBUG_NOINLINE static void AcquireLock(CrstBase *c) PUB {
+ WRAPPER_NO_CONTRACT;
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+ c->Enter();
+ }
+
+ DEBUG_NOINLINE static void ReleaseLock(CrstBase *c) PUB {
+ WRAPPER_NO_CONTRACT;
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+ c->Leave();
+ }
+
+#else // DACCESS_COMPILE
+
+ // in DAC builds, we don't actually acquire the lock, we just determine whether the LS
+ // already holds it. If so, we assume the data is inconsistent and throw an exception.
+ // Argument:
+ // input: c - the lock to be checked.
+ // Note: Throws
+ static void AcquireLock(CrstBase * c) PUB
+ {
+ SUPPORTS_DAC;
+ if (c->GetEnterCount() != 0)
+ {
+ ThrowHR(CORDBG_E_PROCESS_NOT_SYNCHRONIZED);
+ }
+ };
+
+ static void ReleaseLock(CrstBase *c) PUB
+ {
+ SUPPORTS_DAC;
+ };
+#endif // DACCESS_COMPILE
+
+public:
+ //-----------------------------------------------------------------
+ // Clean up critical section
+ // Safe to call multiple times or on non-initialized critical section
+ //-----------------------------------------------------------------
+ void Destroy();
+
+#ifdef _DEBUG
+ //-----------------------------------------------------------------
+ // Check if attempting to take the lock would violate level order.
+ //-----------------------------------------------------------------
+ BOOL IsSafeToTake();
+ // Checks that the lock can be taken
+ BOOL Debug_CanTake()
+ {
+ WRAPPER_NO_CONTRACT;
+ // Actually take the lock and release it immediatelly, that will do all the necessary checks
+ Enter();
+ Leave();
+ return TRUE;
+ }
+ void SetCantLeave(BOOL bSet)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (bSet)
+ FastInterlockIncrement(&m_cannotLeave);
+ else
+ {
+ _ASSERTE(m_cannotLeave);
+ FastInterlockDecrement(&m_cannotLeave);
+ }
+ };
+ //-----------------------------------------------------------------
+ // Is the current thread the owner?
+ //-----------------------------------------------------------------
+ BOOL OwnedByCurrentThread()
+ {
+ WRAPPER_NO_CONTRACT;
+#ifdef CROSSGEN_COMPILE
+ return TRUE;
+#else
+ return m_holderthreadid.IsSameThread();
+#endif
+ }
+
+ CrstBase *GetThreadsOwnedCrsts();
+ void SetThreadsOwnedCrsts(CrstBase *pCrst);
+
+ __declspec(noinline) EEThreadId GetHolderThreadId()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_holderthreadid;
+ }
+
+#endif //_DEBUG
+
+ //-----------------------------------------------------------------
+ // For clients who want to assert whether they are in or out of the
+ // region.
+ //-----------------------------------------------------------------
+ UINT GetEnterCount()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+#ifdef _DEBUG
+ return m_entercount;
+#else
+ return 0;
+#endif //_DEBUG
+ }
+
+protected:
+
+ VOID InitWorker(INDEBUG_COMMA(CrstType crstType) CrstFlags flags);
+
+#ifdef _DEBUG
+ void DebugInit(CrstType crstType, CrstFlags flags);
+ void DebugDestroy();
+#endif
+
+#endif // CLR_STANDALONE_BINDER
+
+ union {
+ CRITICAL_SECTION m_criticalsection;
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostCrst *m_pHostCrst;
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ };
+
+ typedef enum
+ {
+ // Mask to indicate reserved flags
+ CRST_RESERVED_FLAGS_MASK = 0xC0000000,
+ // private flag to indicate initialized Crsts
+ CRST_INITIALIZED = 0x80000000,
+ // private flag to indicate Crst is OS Critical Section
+ CRST_OS_CRIT_SEC = 0x40000000,
+ // rest of the flags are CrstFlags
+ } CrstReservedFlags;
+ DWORD m_dwFlags; // Re-entrancy and same level
+#ifdef _DEBUG
+ UINT m_entercount; // # of unmatched Enters.
+ CrstType m_crstType; // Type enum (should have a descriptive name for debugging)
+ const char *m_tag; // Stringized form of the tag for easy debugging
+ int m_crstlevel; // what level is the crst in?
+ EEThreadId m_holderthreadid; // current holder (or NULL)
+ CrstBase *m_next; // link for global linked list
+ CrstBase *m_prev; // link for global linked list
+ Volatile<LONG> m_cannotLeave;
+
+ // Check for dead lock situation.
+ ULONG m_countNoTriggerGC;
+
+ void PostEnter ();
+ void PreEnter ();
+ void PreLeave ();
+#endif //_DEBUG
+
+#ifndef CLR_STANDALONE_BINDER
+
+private:
+
+ void SetOSCritSec ()
+ {
+ m_dwFlags |= CRST_OS_CRIT_SEC;
+ }
+ void ResetOSCritSec ()
+ {
+ m_dwFlags &= ~CRST_OS_CRIT_SEC;
+ }
+ BOOL IsOSCritSec ()
+ {
+ return m_dwFlags & CRST_OS_CRIT_SEC;
+ }
+ void SetCrstInitialized()
+ {
+ m_dwFlags |= CRST_INITIALIZED;
+ }
+
+ BOOL IsCrstInitialized()
+ {
+ return m_dwFlags & CRST_INITIALIZED;
+ }
+
+ BOOL CanBeTakenDuringShutdown()
+ {
+ return m_dwFlags & CRST_TAKEN_DURING_SHUTDOWN;
+ }
+
+ void SetFlags(CrstFlags f)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(((CrstFlags)(f & ~CRST_RESERVED_FLAGS_MASK)) == f);
+ m_dwFlags = (f & ~CRST_RESERVED_FLAGS_MASK) | (m_dwFlags & CRST_RESERVED_FLAGS_MASK);
+ }
+
+ void ResetFlags() // resets the reserved and the CrstFlags
+ {
+ m_dwFlags = 0;
+ }
+ // ------------------------------- Holders ------------------------------
+ public:
+ //
+ // CrstHolder is optimized for the common use that takes the lock in constructor
+ // and releases it in destructor. Users that require all Holder features
+ // can use CrstHolderWithState.
+ //
+ class CrstHolder
+ {
+ CrstBase * m_pCrst;
+
+ public:
+ inline CrstHolder(CrstBase * pCrst)
+ : m_pCrst(pCrst)
+ {
+ WRAPPER_NO_CONTRACT;
+ AcquireLock(pCrst);
+ }
+
+ inline ~CrstHolder()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ VALIDATE_HOLDER_STACK_CONSUMPTION_FOR_TYPE(HSV_ValidateMinimumStackReq);
+ ReleaseLock(m_pCrst);
+ }
+ };
+
+ // Note that the holders for CRSTs are used in extremely low stack conditions. Because of this, they
+ // aren't allowed to use more than HOLDER_CODE_MINIMUM_STACK_LIMIT pages of stack.
+ typedef DacHolder<CrstBase *, CrstBase::AcquireLock, CrstBase::ReleaseLock, 0, CompareDefault, HSV_ValidateMinimumStackReq> CrstHolderWithState;
+
+ // We have some situations where we're already holding a lock, and we need to release and reacquire the lock across a window.
+ // This is a dangerous construct because the backout code can block.
+ // Generally, it's better to use a regular CrstHolder, and then use the Release() / Acquire() methods on it.
+ // This just exists to convert legacy OS Critical Section patterns over to holders.
+ typedef DacHolder<CrstBase *, CrstBase::ReleaseLock, CrstBase::AcquireLock, 0, CompareDefault, HSV_ValidateMinimumStackReq> UnsafeCrstInverseHolder;
+
+#endif // CLR_STANDALONE_BINDER
+};
+
+#ifndef CLR_STANDALONE_BINDER
+typedef CrstBase::CrstHolder CrstHolder;
+typedef CrstBase::CrstHolderWithState CrstHolderWithState;
+#endif // CLR_STANDALONE_BINDER
+
+
+// The CRST.
+class Crst : public CrstBase
+{
+#ifndef CLR_STANDALONE_BINDER
+public:
+ void *operator new(size_t size)
+ {
+ WRAPPER_NO_CONTRACT;
+ return new BYTE[size];
+ }
+
+private:
+ // Do not use inplace operator new on Crst. A wrong destructor would be called if the constructor fails.
+ // Use CrstStatic or CrstExplicitInit instead of the inplace operator new.
+ void *operator new(size_t size, void *pInPlace);
+
+public:
+
+#ifndef DACCESS_COMPILE
+
+ //-----------------------------------------------------------------
+ // Constructor.
+ //-----------------------------------------------------------------
+ Crst(CrstType crstType, CrstFlags flags = CRST_DEFAULT)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // throw away the debug-only parameter in retail
+ InitWorker(INDEBUG_COMMA(crstType) flags);
+ }
+
+ //-----------------------------------------------------------------
+ // Destructor.
+ //-----------------------------------------------------------------
+ ~Crst()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ Destroy();
+ };
+
+#else
+
+ Crst(CrstType crstType, CrstFlags flags = CRST_DEFAULT) {
+ LIMITED_METHOD_CONTRACT;
+ };
+
+ Crst() {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+#endif
+#endif // CLR_STANDALONE_BINDER
+};
+
+typedef DPTR(Crst) PTR_Crst;
+
+/* to be used as static variable - no constructor/destructor, assumes zero
+ initialized memory */
+class CrstStatic : public CrstBase
+{
+#ifndef CLR_STANDALONE_BINDER
+public:
+ VOID Init(CrstType crstType, CrstFlags flags = CRST_DEFAULT)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE((flags & CRST_INITIALIZED) == 0);
+
+ // throw away the debug-only parameter in retail
+ InitWorker(INDEBUG_COMMA(crstType) flags);
+ }
+
+ bool InitNoThrow(CrstType crstType, CrstFlags flags = CRST_DEFAULT)
+ {
+ CONTRACTL {
+ NOTHROW;
+ } CONTRACTL_END;
+
+ _ASSERTE((flags & CRST_INITIALIZED) == 0);
+
+ bool fSuccess = false;
+
+ EX_TRY
+ {
+ // throw away the debug-only parameter in retail
+ InitWorker(INDEBUG_COMMA(crstType) flags);
+ fSuccess = true;
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ return fSuccess;
+ }
+#endif // CLR_STANDALONE_BINDER
+};
+
+/* to be used as regular variable when a explicit call to Init method is needed */
+class CrstExplicitInit : public CrstStatic
+{
+#ifndef CLR_STANDALONE_BINDER
+public:
+ CrstExplicitInit() {
+ m_dwFlags = 0;
+ }
+ ~CrstExplicitInit() {
+#ifndef DACCESS_COMPILE
+ Destroy();
+#endif
+ }
+#endif // CLR_STANDALONE_BINDER
+};
+
+#ifndef CLR_STANDALONE_BINDER
+__inline BOOL IsOwnerOfCrst(LPVOID lock)
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef _DEBUG
+ return ((Crst*)lock)->OwnedByCurrentThread();
+#else
+ // This function should not be called on free build.
+ DebugBreak();
+ return TRUE;
+#endif
+}
+
+#endif // CLR_STANDALONE_BINDER
+
+#ifdef TEST_DATA_CONSISTENCY
+// used for test purposes. Determines if a crst is held.
+void DebugTryCrst(CrstBase * pLock);
+#endif
+#endif // __crst_h__
+
+
diff --git a/src/vm/ctxtcall.h b/src/vm/ctxtcall.h
new file mode 100644
index 0000000000..1113ee145c
--- /dev/null
+++ b/src/vm/ctxtcall.h
@@ -0,0 +1,411 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#pragma warning( disable: 4049 ) /* more than 64k source lines */
+
+/* this ALWAYS GENERATED file contains the definitions for the interfaces */
+
+
+/* File created by MIDL compiler version 5.03.0279 */
+/* at Wed Dec 06 11:12:56 2000
+ */
+/* Compiler settings for ctxtcall.idl:
+ Oicf (OptLev=i2), W1, Zp8, env=Win32 (32b run), ms_ext, c_ext, robust
+ error checks: allocation ref bounds_check enum stub_data
+ VC __declspec() decoration level:
+ __declspec(uuid()), __declspec(selectany), __declspec(novtable)
+ DECLSPEC_UUID(), MIDL_INTERFACE()
+*/
+//@@MIDL_FILE_HEADING( )
+
+
+/* verify that the <rpcndr.h> version is high enough to compile this file*/
+#ifndef __REQUIRED_RPCNDR_H_VERSION__
+#define __REQUIRED_RPCNDR_H_VERSION__ 475
+#endif
+
+#include "rpc.h"
+#include "rpcndr.h"
+
+#ifndef __RPCNDR_H_VERSION__
+#error this stub requires an updated version of <rpcndr.h>
+#endif // __RPCNDR_H_VERSION__
+
+#ifndef COM_NO_WINDOWS_H
+#include "windows.h"
+#include "ole2.h"
+#endif /*COM_NO_WINDOWS_H*/
+
+#ifndef __ctxtcall_h__
+#define __ctxtcall_h__
+
+/* Forward Declarations */
+
+#ifndef __IContextCallback_FWD_DEFINED__
+#define __IContextCallback_FWD_DEFINED__
+typedef interface IContextCallback IContextCallback;
+#endif /* __IContextCallback_FWD_DEFINED__ */
+
+
+#ifndef __ITeardownNotification_FWD_DEFINED__
+#define __ITeardownNotification_FWD_DEFINED__
+typedef interface ITeardownNotification ITeardownNotification;
+#endif /* __ITeardownNotification_FWD_DEFINED__ */
+
+
+#ifndef __IComApartmentState_FWD_DEFINED__
+#define __IComApartmentState_FWD_DEFINED__
+typedef interface IComApartmentState IComApartmentState;
+#endif /* __IComApartmentState_FWD_DEFINED__ */
+
+
+/* header files for imported files */
+#include "wtypes.h"
+#include "objidl.h"
+
+#ifdef __cplusplus
+extern "C"{
+#endif
+
+void __RPC_FAR * __RPC_USER MIDL_user_allocate(size_t);
+void __RPC_USER MIDL_user_free( void __RPC_FAR * );
+
+/* interface __MIDL_itf_ctxtcall_0000 */
+/* [local] */
+
+typedef struct tagComCallData
+ {
+ DWORD dwDispid;
+ DWORD dwReserved;
+ void __RPC_FAR *pUserDefined;
+ } ComCallData;
+
+
+
+extern RPC_IF_HANDLE __MIDL_itf_ctxtcall_0000_v0_0_c_ifspec;
+extern RPC_IF_HANDLE __MIDL_itf_ctxtcall_0000_v0_0_s_ifspec;
+
+#ifndef __IContextCallback_INTERFACE_DEFINED__
+#define __IContextCallback_INTERFACE_DEFINED__
+
+/* interface IContextCallback */
+/* [unique][uuid][object][local] */
+
+typedef /* [ref] */ HRESULT ( __stdcall __RPC_FAR *PFNCONTEXTCALL )(
+ ComCallData __RPC_FAR *pParam);
+
+
+EXTERN_C const IID IID_IContextCallback;
+
+#if defined(__cplusplus) && !defined(CINTERFACE)
+
+ MIDL_INTERFACE("000001da-0000-0000-C000-000000000046")
+ IContextCallback : public IUnknown
+ {
+ public:
+ virtual HRESULT STDMETHODCALLTYPE ContextCallback(
+ /* [in] */ PFNCONTEXTCALL pfnCallback,
+ /* [in] */ ComCallData __RPC_FAR *pParam,
+ /* [in] */ REFIID riid,
+ /* [in] */ int iMethod,
+ /* [in] */ IUnknown __RPC_FAR *pUnk) = 0;
+
+ };
+
+#else /* C style interface */
+
+ typedef struct IContextCallbackVtbl
+ {
+ BEGIN_INTERFACE
+
+ HRESULT ( STDMETHODCALLTYPE __RPC_FAR *QueryInterface )(
+ IContextCallback __RPC_FAR * This,
+ /* [in] */ REFIID riid,
+ /* [iid_is][out] */ void __RPC_FAR *__RPC_FAR *ppvObject);
+
+ ULONG ( STDMETHODCALLTYPE __RPC_FAR *AddRef )(
+ IContextCallback __RPC_FAR * This);
+
+ ULONG ( STDMETHODCALLTYPE __RPC_FAR *Release )(
+ IContextCallback __RPC_FAR * This);
+
+ HRESULT ( STDMETHODCALLTYPE __RPC_FAR *ContextCallback )(
+ IContextCallback __RPC_FAR * This,
+ /* [in] */ PFNCONTEXTCALL pfnCallback,
+ /* [in] */ ComCallData __RPC_FAR *pParam,
+ /* [in] */ REFIID riid,
+ /* [in] */ int iMethod,
+ /* [in] */ IUnknown __RPC_FAR *pUnk);
+
+ END_INTERFACE
+ } IContextCallbackVtbl;
+
+ interface IContextCallback
+ {
+ CONST_VTBL struct IContextCallbackVtbl __RPC_FAR *lpVtbl;
+ };
+
+
+
+#ifdef COBJMACROS
+
+
+#define IContextCallback_QueryInterface(This,riid,ppvObject) \
+ (This)->lpVtbl -> QueryInterface(This,riid,ppvObject)
+
+#define IContextCallback_AddRef(This) \
+ (This)->lpVtbl -> AddRef(This)
+
+#define IContextCallback_Release(This) \
+ (This)->lpVtbl -> Release(This)
+
+
+#define IContextCallback_ContextCallback(This,pfnCallback,pParam,riid,iMethod,pUnk) \
+ (This)->lpVtbl -> ContextCallback(This,pfnCallback,pParam,riid,iMethod,pUnk)
+
+#endif /* COBJMACROS */
+
+
+#endif /* C style interface */
+
+
+
+HRESULT STDMETHODCALLTYPE IContextCallback_ContextCallback_Proxy(
+ IContextCallback __RPC_FAR * This,
+ /* [in] */ PFNCONTEXTCALL pfnCallback,
+ /* [in] */ ComCallData __RPC_FAR *pParam,
+ /* [in] */ REFIID riid,
+ /* [in] */ int iMethod,
+ /* [in] */ IUnknown __RPC_FAR *pUnk);
+
+
+void __RPC_STUB IContextCallback_ContextCallback_Stub(
+ IRpcStubBuffer *This,
+ IRpcChannelBuffer *_pRpcChannelBuffer,
+ PRPC_MESSAGE _pRpcMessage,
+ DWORD *_pdwStubPhase);
+
+
+
+#endif /* __IContextCallback_INTERFACE_DEFINED__ */
+
+
+#ifndef __ITeardownNotification_INTERFACE_DEFINED__
+#define __ITeardownNotification_INTERFACE_DEFINED__
+
+/* interface ITeardownNotification */
+/* [unique][object][local][uuid] */
+
+
+EXTERN_C const IID IID_ITeardownNotification;
+
+#if defined(__cplusplus) && !defined(CINTERFACE)
+
+ MIDL_INTERFACE("a85e0fb6-8bf4-4614-b164-7b43ef43f5be")
+ ITeardownNotification : public IUnknown
+ {
+ public:
+ virtual HRESULT STDMETHODCALLTYPE TeardownHint( void) = 0;
+
+ };
+
+#else /* C style interface */
+
+ typedef struct ITeardownNotificationVtbl
+ {
+ BEGIN_INTERFACE
+
+ HRESULT ( STDMETHODCALLTYPE __RPC_FAR *QueryInterface )(
+ ITeardownNotification __RPC_FAR * This,
+ /* [in] */ REFIID riid,
+ /* [iid_is][out] */ void __RPC_FAR *__RPC_FAR *ppvObject);
+
+ ULONG ( STDMETHODCALLTYPE __RPC_FAR *AddRef )(
+ ITeardownNotification __RPC_FAR * This);
+
+ ULONG ( STDMETHODCALLTYPE __RPC_FAR *Release )(
+ ITeardownNotification __RPC_FAR * This);
+
+ HRESULT ( STDMETHODCALLTYPE __RPC_FAR *TeardownHint )(
+ ITeardownNotification __RPC_FAR * This);
+
+ END_INTERFACE
+ } ITeardownNotificationVtbl;
+
+ interface ITeardownNotification
+ {
+ CONST_VTBL struct ITeardownNotificationVtbl __RPC_FAR *lpVtbl;
+ };
+
+
+
+#ifdef COBJMACROS
+
+
+#define ITeardownNotification_QueryInterface(This,riid,ppvObject) \
+ (This)->lpVtbl -> QueryInterface(This,riid,ppvObject)
+
+#define ITeardownNotification_AddRef(This) \
+ (This)->lpVtbl -> AddRef(This)
+
+#define ITeardownNotification_Release(This) \
+ (This)->lpVtbl -> Release(This)
+
+
+#define ITeardownNotification_TeardownHint(This) \
+ (This)->lpVtbl -> TeardownHint(This)
+
+#endif /* COBJMACROS */
+
+
+#endif /* C style interface */
+
+
+
+HRESULT STDMETHODCALLTYPE ITeardownNotification_TeardownHint_Proxy(
+ ITeardownNotification __RPC_FAR * This);
+
+
+void __RPC_STUB ITeardownNotification_TeardownHint_Stub(
+ IRpcStubBuffer *This,
+ IRpcChannelBuffer *_pRpcChannelBuffer,
+ PRPC_MESSAGE _pRpcMessage,
+ DWORD *_pdwStubPhase);
+
+
+
+#endif /* __ITeardownNotification_INTERFACE_DEFINED__ */
+
+
+#ifndef __IComApartmentState_INTERFACE_DEFINED__
+#define __IComApartmentState_INTERFACE_DEFINED__
+
+/* interface IComApartmentState */
+/* [object][local][uuid] */
+
+
+EXTERN_C const IID IID_IComApartmentState;
+
+#if defined(__cplusplus) && !defined(CINTERFACE)
+
+ MIDL_INTERFACE("7e220139-8dde-47ef-b181-08be603efd75")
+ IComApartmentState : public IUnknown
+ {
+ public:
+ virtual HRESULT STDMETHODCALLTYPE RegisterForTeardownHint(
+ /* [in] */ ITeardownNotification __RPC_FAR *pT,
+ /* [in] */ DWORD dwFlags,
+ /* [out] */ ULONG_PTR __RPC_FAR *pCookie) = 0;
+
+ virtual HRESULT STDMETHODCALLTYPE UnregisterForTeardownHint(
+ /* [in] */ ULONG_PTR cookie) = 0;
+
+ };
+
+#else /* C style interface */
+
+ typedef struct IComApartmentStateVtbl
+ {
+ BEGIN_INTERFACE
+
+ HRESULT ( STDMETHODCALLTYPE __RPC_FAR *QueryInterface )(
+ IComApartmentState __RPC_FAR * This,
+ /* [in] */ REFIID riid,
+ /* [iid_is][out] */ void __RPC_FAR *__RPC_FAR *ppvObject);
+
+ ULONG ( STDMETHODCALLTYPE __RPC_FAR *AddRef )(
+ IComApartmentState __RPC_FAR * This);
+
+ ULONG ( STDMETHODCALLTYPE __RPC_FAR *Release )(
+ IComApartmentState __RPC_FAR * This);
+
+ HRESULT ( STDMETHODCALLTYPE __RPC_FAR *RegisterForTeardownHint )(
+ IComApartmentState __RPC_FAR * This,
+ /* [in] */ ITeardownNotification __RPC_FAR *pT,
+ /* [in] */ DWORD dwFlags,
+ /* [out] */ ULONG_PTR __RPC_FAR *pCookie);
+
+ HRESULT ( STDMETHODCALLTYPE __RPC_FAR *UnregisterForTeardownHint )(
+ IComApartmentState __RPC_FAR * This,
+ /* [in] */ ULONG_PTR cookie);
+
+ END_INTERFACE
+ } IComApartmentStateVtbl;
+
+ interface IComApartmentState
+ {
+ CONST_VTBL struct IComApartmentStateVtbl __RPC_FAR *lpVtbl;
+ };
+
+
+
+#ifdef COBJMACROS
+
+
+#define IComApartmentState_QueryInterface(This,riid,ppvObject) \
+ (This)->lpVtbl -> QueryInterface(This,riid,ppvObject)
+
+#define IComApartmentState_AddRef(This) \
+ (This)->lpVtbl -> AddRef(This)
+
+#define IComApartmentState_Release(This) \
+ (This)->lpVtbl -> Release(This)
+
+
+#define IComApartmentState_RegisterForTeardownHint(This,pT,dwFlags,pCookie) \
+ (This)->lpVtbl -> RegisterForTeardownHint(This,pT,dwFlags,pCookie)
+
+#define IComApartmentState_UnregisterForTeardownHint(This,cookie) \
+ (This)->lpVtbl -> UnregisterForTeardownHint(This,cookie)
+
+#endif /* COBJMACROS */
+
+
+#endif /* C style interface */
+
+
+
+HRESULT STDMETHODCALLTYPE IComApartmentState_RegisterForTeardownHint_Proxy(
+ IComApartmentState __RPC_FAR * This,
+ /* [in] */ ITeardownNotification __RPC_FAR *pT,
+ /* [in] */ DWORD dwFlags,
+ /* [out] */ ULONG_PTR __RPC_FAR *pCookie);
+
+
+void __RPC_STUB IComApartmentState_RegisterForTeardownHint_Stub(
+ IRpcStubBuffer *This,
+ IRpcChannelBuffer *_pRpcChannelBuffer,
+ PRPC_MESSAGE _pRpcMessage,
+ DWORD *_pdwStubPhase);
+
+
+HRESULT STDMETHODCALLTYPE IComApartmentState_UnregisterForTeardownHint_Proxy(
+ IComApartmentState __RPC_FAR * This,
+ /* [in] */ ULONG_PTR cookie);
+
+
+void __RPC_STUB IComApartmentState_UnregisterForTeardownHint_Stub(
+ IRpcStubBuffer *This,
+ IRpcChannelBuffer *_pRpcChannelBuffer,
+ PRPC_MESSAGE _pRpcMessage,
+ DWORD *_pdwStubPhase);
+
+
+
+#endif /* __IComApartmentState_INTERFACE_DEFINED__ */
+
+
+/* Additional Prototypes for ALL interfaces */
+
+/* end of Additional Prototypes */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
+
diff --git a/src/vm/customattribute.cpp b/src/vm/customattribute.cpp
new file mode 100644
index 0000000000..fba6d34847
--- /dev/null
+++ b/src/vm/customattribute.cpp
@@ -0,0 +1,1694 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "common.h"
+#include "customattribute.h"
+#include "invokeutil.h"
+#include "method.hpp"
+#include "threads.h"
+#include "excep.h"
+#include "corerror.h"
+#include "security.h"
+#include "classnames.h"
+#include "fcall.h"
+#include "assemblynative.hpp"
+#include "typeparse.h"
+#include "securityattributes.h"
+#include "reflectioninvocation.h"
+#include "runtimehandles.h"
+
+typedef InlineFactory<InlineSString<64>, 16> SStringFactory;
+
+/*static*/
+TypeHandle Attribute::GetTypeForEnum(LPCUTF8 szEnumName, COUNT_T cbEnumName, DomainAssembly* pDomainAssembly)
+{
+ CONTRACTL
+ {
+ PRECONDITION(CheckPointer(pDomainAssembly));
+ PRECONDITION(CheckPointer(szEnumName));
+ PRECONDITION(cbEnumName);
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ StackScratchBuffer buff;
+ StackSString sszEnumName(SString::Utf8, szEnumName, cbEnumName);
+ return TypeName::GetTypeUsingCASearchRules(sszEnumName.GetUTF8(buff), pDomainAssembly->GetAssembly());
+}
+
+/*static*/
+HRESULT Attribute::ParseCaType(
+ CustomAttributeParser &ca,
+ CaType* pCaType,
+ DomainAssembly* pDomainAssembly,
+ StackSString* ss)
+{
+ WRAPPER_NO_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ IfFailGo(::ParseEncodedType(ca, pCaType));
+
+ if (pCaType->tag == SERIALIZATION_TYPE_ENUM ||
+ (pCaType->tag == SERIALIZATION_TYPE_SZARRAY && pCaType->arrayType == SERIALIZATION_TYPE_ENUM ))
+ {
+ TypeHandle th = Attribute::GetTypeForEnum(pCaType->szEnumName, pCaType->cEnumName, pDomainAssembly);
+
+ if (!th.IsNull() && th.IsEnum())
+ {
+ pCaType->enumType = (CorSerializationType)th.GetVerifierCorElementType();
+
+ // The assembly qualified name of th might not equal pCaType->szEnumName.
+ // e.g. th could be "MyEnum, MyAssembly, Version=4.0.0.0" while
+ // pCaType->szEnumName is "MyEnum, MyAssembly, Version=3.0.0.0"
+ if (ss)
+ {
+ DWORD format = TypeString::FormatNamespace | TypeString::FormatFullInst | TypeString::FormatAssembly;
+ TypeString::AppendType(*ss, th, format);
+ }
+ }
+ else
+ {
+ MAKE_WIDEPTR_FROMUTF8N(pWideStr, pCaType->szEnumName, pCaType->cEnumName)
+ IfFailGo(PostError(META_E_CA_UNEXPECTED_TYPE, wcslen(pWideStr), pWideStr));
+ }
+ }
+
+ErrExit:
+ return hr;
+}
+
+/*static*/
+void Attribute::SetBlittableCaValue(CustomAttributeValue* pVal, CaValue* pCaVal, BOOL* pbAllBlittableCa)
+{
+ WRAPPER_NO_CONTRACT;
+
+ CorSerializationType type = pCaVal->type.tag;
+
+ pVal->m_type.m_tag = pCaVal->type.tag;
+ pVal->m_type.m_arrayType = pCaVal->type.arrayType;
+ pVal->m_type.m_enumType = pCaVal->type.enumType;
+ pVal->m_rawValue = 0;
+
+ if (type == SERIALIZATION_TYPE_STRING ||
+ type == SERIALIZATION_TYPE_SZARRAY ||
+ type == SERIALIZATION_TYPE_TYPE)
+ {
+ *pbAllBlittableCa = FALSE;
+ }
+ else
+ {
+ // Enum arg -> Object param
+ if (type == SERIALIZATION_TYPE_ENUM && pCaVal->type.cEnumName)
+ *pbAllBlittableCa = FALSE;
+
+ pVal->m_rawValue = pCaVal->i8;
+ }
+}
+
+/*static*/
+void Attribute::SetManagedValue(CustomAttributeManagedValues gc, CustomAttributeValue* pValue)
+{
+ WRAPPER_NO_CONTRACT;
+
+ CorSerializationType type = pValue->m_type.m_tag;
+
+ if (type == SERIALIZATION_TYPE_TYPE || type == SERIALIZATION_TYPE_STRING)
+ {
+ SetObjectReference((OBJECTREF*)&pValue->m_enumOrTypeName, gc.string, GetAppDomain());
+ }
+ else if (type == SERIALIZATION_TYPE_ENUM)
+ {
+ SetObjectReference((OBJECTREF*)&pValue->m_type.m_enumName, gc.string, GetAppDomain());
+ }
+ else if (type == SERIALIZATION_TYPE_SZARRAY)
+ {
+ SetObjectReference((OBJECTREF*)&pValue->m_value, gc.array, GetAppDomain());
+
+ if (pValue->m_type.m_arrayType == SERIALIZATION_TYPE_ENUM)
+ SetObjectReference((OBJECTREF*)&pValue->m_type.m_enumName, gc.string, GetAppDomain());
+ }
+}
+
+/*static*/
+CustomAttributeManagedValues Attribute::GetManagedCaValue(CaValue* pCaVal)
+{
+ WRAPPER_NO_CONTRACT;
+
+ CustomAttributeManagedValues gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ CorSerializationType type = pCaVal->type.tag;
+
+ if (type == SERIALIZATION_TYPE_ENUM)
+ {
+ gc.string = StringObject::NewString(pCaVal->type.szEnumName, pCaVal->type.cEnumName);
+ }
+ else if (type == SERIALIZATION_TYPE_STRING)
+ {
+ gc.string = NULL;
+
+ if (pCaVal->str.pStr)
+ gc.string = StringObject::NewString(pCaVal->str.pStr, pCaVal->str.cbStr);
+ }
+ else if (type == SERIALIZATION_TYPE_TYPE)
+ {
+ gc.string = StringObject::NewString(pCaVal->str.pStr, pCaVal->str.cbStr);
+ }
+ else if (type == SERIALIZATION_TYPE_SZARRAY)
+ {
+ CorSerializationType arrayType = pCaVal->type.arrayType;
+ ULONG length = pCaVal->arr.length;
+ BOOL bAllBlittableCa = arrayType != SERIALIZATION_TYPE_ENUM;
+
+ if (length == (ULONG)-1)
+ return gc;
+
+ gc.array = (CaValueArrayREF)AllocateValueSzArray(MscorlibBinder::GetClass(CLASS__CUSTOM_ATTRIBUTE_ENCODED_ARGUMENT), length);
+ CustomAttributeValue* pValues = gc.array->GetDirectPointerToNonObjectElements();
+
+ for (COUNT_T i = 0; i < length; i ++)
+ Attribute::SetBlittableCaValue(&pValues[i], &pCaVal->arr[i], &bAllBlittableCa);
+
+ if (!bAllBlittableCa)
+ {
+ GCPROTECT_BEGIN(gc)
+ {
+ if (arrayType == SERIALIZATION_TYPE_ENUM)
+ gc.string = StringObject::NewString(pCaVal->type.szEnumName, pCaVal->type.cEnumName);
+
+ for (COUNT_T i = 0; i < length; i ++)
+ {
+ CustomAttributeManagedValues managedCaValue = Attribute::GetManagedCaValue(&pCaVal->arr[i]);
+ Attribute::SetManagedValue(
+ managedCaValue,
+ &gc.array->GetDirectPointerToNonObjectElements()[i]);
+ }
+ }
+ GCPROTECT_END();
+ }
+ }
+
+ return gc;
+}
+
+/*static*/
+HRESULT Attribute::ParseAttributeArgumentValues(
+ void* pCa,
+ INT32 cCa,
+ CaValueArrayFactory* pCaValueArrayFactory,
+ CaArg* pCaArgs,
+ COUNT_T cArgs,
+ CaNamedArg* pCaNamedArgs,
+ COUNT_T cNamedArgs,
+ DomainAssembly* pDomainAssembly)
+{
+ WRAPPER_NO_CONTRACT;
+
+ HRESULT hr = S_OK;
+ CustomAttributeParser cap(pCa, cCa);
+
+ IfFailGo(Attribute::ParseCaCtorArgs(cap, pCaArgs, cArgs, pCaValueArrayFactory, pDomainAssembly));
+ IfFailGo(Attribute::ParseCaNamedArgs(cap, pCaNamedArgs, cNamedArgs, pCaValueArrayFactory, pDomainAssembly));
+
+ErrExit:
+ return hr;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Helper to parse the values for the ctor argument list and the named argument list.
+//
+
+HRESULT Attribute::ParseCaValue(
+ CustomAttributeParser &ca,
+ CaValue* pCaArg,
+ CaType* pCaParam,
+ CaValueArrayFactory* pCaValueArrayFactory,
+ DomainAssembly* pDomainAssembly)
+{
+ CONTRACTL
+ {
+ PRECONDITION(CheckPointer(pCaArg));
+ PRECONDITION(CheckPointer(pCaParam));
+ PRECONDITION(CheckPointer(pCaValueArrayFactory));
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ CorSerializationType underlyingType;
+ CaType elementType;
+
+ if (pCaParam->tag == SERIALIZATION_TYPE_TAGGED_OBJECT)
+ IfFailGo(Attribute::ParseCaType(ca, &pCaArg->type, pDomainAssembly));
+ else
+ pCaArg->type = *pCaParam;
+
+ underlyingType = pCaArg->type.tag == SERIALIZATION_TYPE_ENUM ? pCaArg->type.enumType : pCaArg->type.tag;
+
+ // Grab the value.
+ switch (underlyingType)
+ {
+ case SERIALIZATION_TYPE_BOOLEAN:
+ case SERIALIZATION_TYPE_I1:
+ case SERIALIZATION_TYPE_U1:
+ IfFailGo(ca.GetU1(&pCaArg->u1));
+ break;
+
+ case SERIALIZATION_TYPE_CHAR:
+ case SERIALIZATION_TYPE_I2:
+ case SERIALIZATION_TYPE_U2:
+ IfFailGo(ca.GetU2(&pCaArg->u2));
+ break;
+
+ case SERIALIZATION_TYPE_I4:
+ case SERIALIZATION_TYPE_U4:
+ IfFailGo(ca.GetU4(&pCaArg->u4));
+ break;
+
+ case SERIALIZATION_TYPE_I8:
+ case SERIALIZATION_TYPE_U8:
+ IfFailGo(ca.GetU8(&pCaArg->u8));
+ break;
+
+ case SERIALIZATION_TYPE_R4:
+ IfFailGo(ca.GetR4(&pCaArg->r4));
+ break;
+
+ case SERIALIZATION_TYPE_R8:
+ IfFailGo(ca.GetR8(&pCaArg->r8));
+ break;
+
+ case SERIALIZATION_TYPE_STRING:
+ case SERIALIZATION_TYPE_TYPE:
+ IfFailGo(ca.GetString(&pCaArg->str.pStr, &pCaArg->str.cbStr));
+ break;
+
+ case SERIALIZATION_TYPE_SZARRAY:
+ UINT32 len;
+ IfFailGo(ca.GetU4(&len));
+ pCaArg->arr.length = len;
+ pCaArg->arr.pSArray = NULL;
+ if (pCaArg->arr.length == (ULONG)-1)
+ break;
+
+ IfNullGo(pCaArg->arr.pSArray = pCaValueArrayFactory->Create());
+ elementType.Init(pCaArg->type.arrayType, SERIALIZATION_TYPE_UNDEFINED,
+ pCaArg->type.enumType, pCaArg->type.szEnumName, pCaArg->type.cEnumName);
+ for (ULONG i = 0; i < pCaArg->arr.length; i++)
+ IfFailGo(Attribute::ParseCaValue(ca, &*pCaArg->arr.pSArray->Append(), &elementType, pCaValueArrayFactory, pDomainAssembly));
+
+ break;
+
+ default:
+ // The format of the custom attribute record is invalid.
+ hr = E_FAIL;
+ break;
+ } // End switch
+
+ErrExit:
+ return hr;
+}
+
+/*static*/
+HRESULT Attribute::ParseCaCtorArgs(
+ CustomAttributeParser &ca,
+ CaArg* pArgs,
+ ULONG cArgs,
+ CaValueArrayFactory* pCaValueArrayFactory,
+ DomainAssembly* pDomainAssembly)
+{
+ WRAPPER_NO_CONTRACT;
+
+ HRESULT hr = S_OK; // A result.
+ ULONG ix; // Loop control.
+
+ // If there is a blob, check the prolog.
+ if (FAILED(ca.ValidateProlog()))
+ {
+ IfFailGo(PostError(META_E_CA_INVALID_BLOB));
+ }
+
+ // For each expected arg...
+ for (ix=0; ix<cArgs; ++ix)
+ {
+ CaArg* pArg = &pArgs[ix];
+ IfFailGo(Attribute::ParseCaValue(ca, &pArg->val, &pArg->type, pCaValueArrayFactory, pDomainAssembly));
+ }
+
+ErrExit:
+ return hr;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Because ParseKnowCaNamedArgs MD cannot have VM dependency, we have our own implementation here:
+// 1. It needs to load the assemblies that contain the enum types for the named arguments,
+// 2. It Compares the enum type name with that of the loaded enum type, not the one in the CA record.
+//
+
+/*static*/
+HRESULT Attribute::ParseCaNamedArgs(
+ CustomAttributeParser &ca,
+ CaNamedArg *pNamedParams,
+ ULONG cNamedParams,
+ CaValueArrayFactory* pCaValueArrayFactory,
+ DomainAssembly* pDomainAssembly)
+{
+ CONTRACTL {
+ PRECONDITION(CheckPointer(pCaValueArrayFactory));
+ PRECONDITION(CheckPointer(pDomainAssembly));
+ THROWS;
+ } CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ ULONG ixParam;
+ INT32 ixArg;
+ INT16 cActualArgs;
+ CaNamedArgCtor namedArg;
+ CaNamedArg* pNamedParam;
+
+ // Get actual count of named arguments.
+ if (FAILED(ca.GetI2(&cActualArgs)))
+ cActualArgs = 0; // Everett behavior
+
+ for (ixParam = 0; ixParam < cNamedParams; ixParam++)
+ pNamedParams[ixParam].val.type.tag = SERIALIZATION_TYPE_UNDEFINED;
+
+ // For each named argument...
+ for (ixArg = 0; ixArg < cActualArgs; ixArg++)
+ {
+ // Field or property?
+ IfFailGo(ca.GetTag(&namedArg.propertyOrField));
+ if (namedArg.propertyOrField != SERIALIZATION_TYPE_FIELD && namedArg.propertyOrField != SERIALIZATION_TYPE_PROPERTY)
+ IfFailGo(PostError(META_E_CA_INVALID_ARGTYPE));
+
+ // Get argument type information
+ CaType* pNamedArgType = &namedArg.type;
+ StackSString ss;
+ IfFailGo(Attribute::ParseCaType(ca, pNamedArgType, pDomainAssembly, &ss));
+
+ LPCSTR szLoadedEnumName = NULL;
+ StackScratchBuffer buff;
+
+ if (pNamedArgType->tag == SERIALIZATION_TYPE_ENUM ||
+ (pNamedArgType->tag == SERIALIZATION_TYPE_SZARRAY && pNamedArgType->arrayType == SERIALIZATION_TYPE_ENUM ))
+ {
+ szLoadedEnumName = ss.GetUTF8(buff);
+ }
+
+ // Get name of Arg.
+ if (FAILED(ca.GetNonEmptyString(&namedArg.szName, &namedArg.cName)))
+ IfFailGo(PostError(META_E_CA_INVALID_BLOB));
+
+ // Match arg by name and type
+ for (ixParam = 0; ixParam < cNamedParams; ixParam++)
+ {
+ pNamedParam = &pNamedParams[ixParam];
+
+ // Match type
+ if (pNamedParam->type.tag != SERIALIZATION_TYPE_TAGGED_OBJECT)
+ {
+ if (namedArg.type.tag != pNamedParam->type.tag)
+ continue;
+
+ // Match array type
+ if (namedArg.type.tag == SERIALIZATION_TYPE_SZARRAY &&
+ pNamedParam->type.arrayType != SERIALIZATION_TYPE_TAGGED_OBJECT &&
+ namedArg.type.arrayType != pNamedParam->type.arrayType)
+ continue;
+ }
+
+ // Match name (and its length to avoid substring matching)
+ if ((pNamedParam->cName != namedArg.cName) ||
+ (strncmp(pNamedParam->szName, namedArg.szName, namedArg.cName) != 0))
+ {
+ continue;
+ }
+
+ // If enum, match enum name.
+ if (pNamedParam->type.tag == SERIALIZATION_TYPE_ENUM ||
+ (pNamedParam->type.tag == SERIALIZATION_TYPE_SZARRAY && pNamedParam->type.arrayType == SERIALIZATION_TYPE_ENUM ))
+ {
+ // pNamedParam->type.szEnumName: module->CA record->ctor token->loaded type->field/property->field/property type->field/property type name
+ // namedArg.type.szEnumName: module->CA record->named arg->enum type name
+ // szLoadedEnumName: module->CA record->named arg->enum type name->loaded enum type->loaded enum type name
+
+ // Comparing pNamedParam->type.szEnumName against namedArg.type.szEnumName could fail if we loaded a different version
+ // of the enum type than the one specified in the CA record. So we are comparing it against szLoadedEnumName instead.
+ if (strncmp(pNamedParam->type.szEnumName, szLoadedEnumName, pNamedParam->type.cEnumName) != 0)
+ continue;
+
+ if (namedArg.type.enumType != pNamedParam->type.enumType)
+ {
+ MAKE_WIDEPTR_FROMUTF8N(pWideStr, pNamedParam->type.szEnumName, pNamedParam->type.cEnumName)
+ IfFailGo(PostError(META_E_CA_UNEXPECTED_TYPE, wcslen(pWideStr), pWideStr));
+ }
+
+ // TODO: For now assume the property\field array size is correct - later we should verify this
+ }
+
+ // Found a match.
+ break;
+ }
+
+ // Better have found an argument.
+ if (ixParam == cNamedParams)
+ {
+ MAKE_WIDEPTR_FROMUTF8N(pWideStr, namedArg.szName, namedArg.cName)
+ IfFailGo(PostError(META_E_CA_UNKNOWN_ARGUMENT, wcslen(pWideStr), pWideStr));
+ }
+
+ // Argument had better not have been seen already.
+ if (pNamedParams[ixParam].val.type.tag != SERIALIZATION_TYPE_UNDEFINED)
+ {
+ MAKE_WIDEPTR_FROMUTF8N(pWideStr, namedArg.szName, namedArg.cName)
+ IfFailGo(PostError(META_E_CA_REPEATED_ARG, wcslen(pWideStr), pWideStr));
+ }
+
+ IfFailGo(Attribute::ParseCaValue(ca, &pNamedParams[ixParam].val, &namedArg.type, pCaValueArrayFactory, pDomainAssembly));
+ }
+
+ErrExit:
+ return hr;
+}
+
+/*static*/
+HRESULT Attribute::InitCaType(CustomAttributeType* pType, Factory<SString>* pSstringFactory, Factory<StackScratchBuffer>* pStackScratchBufferFactory, CaType* pCaType)
+{
+ CONTRACTL {
+ THROWS;
+ PRECONDITION(CheckPointer(pType));
+ PRECONDITION(CheckPointer(pSstringFactory));
+ PRECONDITION(CheckPointer(pStackScratchBufferFactory));
+ PRECONDITION(CheckPointer(pCaType));
+ } CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ SString* psszName = NULL;
+ StackScratchBuffer* scratchBuffer = NULL;
+
+ IfNullGo(psszName = pSstringFactory->Create());
+ IfNullGo(scratchBuffer = pStackScratchBufferFactory->Create());
+
+ psszName->Set(pType->m_enumName == NULL ? NULL : pType->m_enumName->GetBuffer());
+
+ pCaType->Init(
+ pType->m_tag,
+ pType->m_arrayType,
+ pType->m_enumType,
+ psszName->GetUTF8(*scratchBuffer),
+ (ULONG)psszName->GetCount());
+
+ErrExit:
+ return hr;
+}
+
+FCIMPL5(VOID, Attribute::ParseAttributeArguments, void* pCa, INT32 cCa,
+ CaArgArrayREF* ppCustomAttributeArguments,
+ CaNamedArgArrayREF* ppCustomAttributeNamedArguments,
+ AssemblyBaseObject* pAssemblyUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ ASSEMBLYREF refAssembly = (ASSEMBLYREF)ObjectToOBJECTREF(pAssemblyUNSAFE);
+
+ HELPER_METHOD_FRAME_BEGIN_1(refAssembly)
+ {
+ DomainAssembly *pDomainAssembly = refAssembly->GetDomainAssembly();
+
+ struct
+ {
+ CustomAttributeArgument* pArgs;
+ CustomAttributeNamedArgument* pNamedArgs;
+ } gc;
+
+ gc.pArgs = NULL;
+ gc.pNamedArgs = NULL;
+
+ HRESULT hr = S_OK;
+
+ GCPROTECT_BEGININTERIOR(gc);
+
+ BOOL bAllBlittableCa = TRUE;
+ COUNT_T cArgs = 0;
+ COUNT_T cNamedArgs = 0;
+ CaArg* pCaArgs = NULL;
+ CaNamedArg* pCaNamedArgs = NULL;
+#ifdef __GNUC__
+ // When compiling under GCC we have to use the -fstack-check option to ensure we always spot stack
+ // overflow. But this option is intolerant of locals growing too large, so we have to cut back a bit
+ // on what we can allocate inline here. Leave the Windows versions alone to retain the perf benefits
+ // since we don't have the same constraints.
+ NewHolder<CaValueArrayFactory> pCaValueArrayFactory = new InlineFactory<SArray<CaValue>, 4>();
+ InlineFactory<StackScratchBuffer, 4> stackScratchBufferFactory;
+ InlineFactory<SString, 4> sstringFactory;
+#else // __GNUC__
+
+ // Preallocate 4 elements in each of the following factories for optimal performance.
+ // 4 is enough for 4 typed args or 2 named args which are enough for 99% of the cases.
+
+ // SArray<CaValue> is only needed if a argument is an array, don't preallocate any memory as arrays are rare.
+
+ // Need one per (ctor or named) arg + one per array element
+ InlineFactory<SArray<CaValue>, 4> caValueArrayFactory;
+ InlineFactory<SArray<CaValue>, 4> *pCaValueArrayFactory = &caValueArrayFactory;
+
+ // Need one StackScratchBuffer per ctor arg and two per named arg
+ InlineFactory<StackScratchBuffer, 4> stackScratchBufferFactory;
+
+ // Need one SString per ctor arg and two per named arg
+ InlineFactory<SString, 4> sstringFactory;
+#endif // __GNUC__
+
+ cArgs = (*ppCustomAttributeArguments)->GetNumComponents();
+
+ if (cArgs)
+ {
+ gc.pArgs = (*ppCustomAttributeArguments)->GetDirectPointerToNonObjectElements();
+
+ size_t size = sizeof(CaArg) * cArgs;
+ if ((size / sizeof(CaArg)) != cArgs) // uint over/underflow
+ IfFailGo(E_INVALIDARG);
+ pCaArgs = (CaArg*)_alloca(size);
+
+ for (COUNT_T i = 0; i < cArgs; i ++)
+ {
+ CaType caType;
+ IfFailGo(Attribute::InitCaType(&gc.pArgs[i].m_type, &sstringFactory, &stackScratchBufferFactory, &caType));
+
+ pCaArgs[i].Init(caType);
+ }
+ }
+
+ cNamedArgs = (*ppCustomAttributeNamedArguments)->GetNumComponents();
+
+ if (cNamedArgs)
+ {
+ gc.pNamedArgs = (*ppCustomAttributeNamedArguments)->GetDirectPointerToNonObjectElements();
+
+ size_t size = sizeof(CaNamedArg) * cNamedArgs;
+ if ((size / sizeof(CaNamedArg)) != cNamedArgs) // uint over/underflow
+ IfFailGo(E_INVALIDARG);
+ pCaNamedArgs = (CaNamedArg*)_alloca(size);
+
+ for (COUNT_T i = 0; i < cNamedArgs; i ++)
+ {
+ CustomAttributeNamedArgument* pNamedArg = &gc.pNamedArgs[i];
+
+ CaType caType;
+ IfFailGo(Attribute::InitCaType(&pNamedArg->m_type, &sstringFactory, &stackScratchBufferFactory, &caType));
+
+ SString* psszName = NULL;
+ IfNullGo(psszName = sstringFactory.Create());
+
+ psszName->Set(pNamedArg->m_argumentName->GetBuffer());
+
+ StackScratchBuffer* scratchBuffer = NULL;
+ IfNullGo(scratchBuffer = stackScratchBufferFactory.Create());
+
+ pCaNamedArgs[i].Init(
+ psszName->GetUTF8(*scratchBuffer),
+ pNamedArg->m_propertyOrField,
+ caType);
+ }
+ }
+
+ // This call maps the named parameters (fields and arguments) and ctor parameters with the arguments in the CA record
+ // and retrieve their values.
+ IfFailGo(Attribute::ParseAttributeArgumentValues(pCa, cCa, pCaValueArrayFactory, pCaArgs, cArgs, pCaNamedArgs, cNamedArgs, pDomainAssembly));
+
+ for (COUNT_T i = 0; i < cArgs; i ++)
+ Attribute::SetBlittableCaValue(&gc.pArgs[i].m_value, &pCaArgs[i].val, &bAllBlittableCa);
+
+ for (COUNT_T i = 0; i < cNamedArgs; i ++)
+ Attribute::SetBlittableCaValue(&gc.pNamedArgs[i].m_value, &pCaNamedArgs[i].val, &bAllBlittableCa);
+
+ if (!bAllBlittableCa)
+ {
+ for (COUNT_T i = 0; i < cArgs; i ++)
+ {
+ CustomAttributeManagedValues managedCaValue = Attribute::GetManagedCaValue(&pCaArgs[i].val);
+ Attribute::SetManagedValue(managedCaValue, &(gc.pArgs[i].m_value));
+ }
+
+ for (COUNT_T i = 0; i < cNamedArgs; i++)
+ {
+ CustomAttributeManagedValues managedCaValue = Attribute::GetManagedCaValue(&pCaNamedArgs[i].val);
+ Attribute::SetManagedValue(managedCaValue, &(gc.pNamedArgs[i].m_value));
+ }
+ }
+
+ ErrExit:
+
+ ; // Need empty statement to get GCPROTECT_END below to work.
+
+ GCPROTECT_END();
+
+
+ if (hr != S_OK)
+ {
+ if ((hr == E_OUTOFMEMORY) || (hr == NTE_NO_MEMORY))
+ {
+ COMPlusThrow(kOutOfMemoryException);
+ }
+ else
+ {
+ COMPlusThrow(kCustomAttributeFormatException);
+ }
+ }
+ }
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+
+FCIMPL2(Object*, RuntimeTypeHandle::CreateCaInstance, ReflectClassBaseObject* pCaTypeUNSAFE, ReflectMethodObject* pCtorUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pCaTypeUNSAFE));
+ PRECONDITION(!pCaTypeUNSAFE->GetType().IsGenericVariable());
+ PRECONDITION(pCaTypeUNSAFE->GetType().IsValueType() || CheckPointer(pCtorUNSAFE));
+ }
+ CONTRACTL_END;
+
+ struct _gc
+ {
+ REFLECTCLASSBASEREF refCaType;
+ OBJECTREF o;
+ REFLECTMETHODREF refCtor;
+ } gc;
+
+ gc.refCaType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pCaTypeUNSAFE);
+ MethodTable* pCaMT = gc.refCaType->GetType().GetMethodTable();
+
+ gc.o = NULL;
+ gc.refCtor = (REFLECTMETHODREF)ObjectToOBJECTREF(pCtorUNSAFE);
+ MethodDesc *pCtor = gc.refCtor != NULL ? gc.refCtor->GetMethod() : NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+ {
+ PRECONDITION(
+ (!pCtor && gc.refCaType->GetType().IsValueType() && !gc.refCaType->GetType().GetMethodTable()->HasDefaultConstructor()) ||
+ (pCtor == gc.refCaType->GetType().GetMethodTable()->GetDefaultConstructor()));
+
+ // If we relax this, we need to insure custom attributes construct properly for Nullable<T>
+ if (gc.refCaType->GetType().HasInstantiation())
+ COMPlusThrow(kNotSupportedException, W("Argument_GenericsInvalid"));
+
+ gc.o = pCaMT->Allocate();
+
+ if (pCtor)
+ {
+
+ ARG_SLOT args;
+
+ if (pCaMT->IsValueType())
+ {
+ MethodDescCallSite ctor(pCtor, &gc.o);
+ args = PtrToArgSlot(gc.o->UnBox());
+ ctor.CallWithValueTypes(&args);
+ }
+ else
+ {
+
+ PREPARE_NONVIRTUAL_CALLSITE_USING_METHODDESC(pCtor);
+ DECLARE_ARGHOLDER_ARRAY(CtorArgs, 1);
+ CtorArgs[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(gc.o);
+
+ // Call the ctor...
+ CALL_MANAGED_METHOD_NORET(CtorArgs);
+ }
+
+ }
+ }
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(gc.o);
+}
+FCIMPLEND
+
+FCIMPL5(LPVOID, COMCustomAttribute::CreateCaObject, ReflectModuleBaseObject* pAttributedModuleUNSAFE, ReflectMethodObject *pMethodUNSAFE, BYTE** ppBlob, BYTE* pEndBlob, INT32* pcNamedArgs)
+{
+ FCALL_CONTRACT;
+
+ struct
+ {
+ OBJECTREF ca;
+ REFLECTMETHODREF refCtor;
+ REFLECTMODULEBASEREF refAttributedModule;
+ } gc;
+ gc.ca = NULL;
+ gc.refCtor = (REFLECTMETHODREF)ObjectToOBJECTREF(pMethodUNSAFE);
+ gc.refAttributedModule = (REFLECTMODULEBASEREF)ObjectToOBJECTREF(pAttributedModuleUNSAFE);
+
+ if(gc.refAttributedModule == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ MethodDesc* pCtorMD = gc.refCtor->GetMethod();
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+ {
+ MethodDescCallSite ctorCallSite(pCtorMD);
+ MetaSig* pSig = ctorCallSite.GetMetaSig();
+ BYTE* pBlob = *ppBlob;
+
+ // get the number of arguments and allocate an array for the args
+ ARG_SLOT *args = NULL;
+ UINT cArgs = pSig->NumFixedArgs() + 1; // make room for the this pointer
+ UINT i = 1; // used to flag that we actually get the right number of arg from the blob
+
+ args = (ARG_SLOT*)_alloca(cArgs * sizeof(ARG_SLOT));
+ memset((void*)args, 0, cArgs * sizeof(ARG_SLOT));
+
+ OBJECTREF *argToProtect = (OBJECTREF*)_alloca(cArgs * sizeof(OBJECTREF));
+ memset((void*)argToProtect, 0, cArgs * sizeof(OBJECTREF));
+
+ // If we relax this, we need to insure custom attributes construct properly for Nullable<T>
+ if (pCtorMD->GetMethodTable()->HasInstantiation())
+ COMPlusThrow(kNotSupportedException, W("Argument_GenericsInvalid"));
+
+ // load the this pointer
+ argToProtect[0] = pCtorMD->GetMethodTable()->Allocate(); // this is the value to return after the ctor invocation
+
+ if (pBlob)
+ {
+ if (pBlob < pEndBlob)
+ {
+ if (pBlob + 2 > pEndBlob)
+ {
+ COMPlusThrow(kCustomAttributeFormatException);
+ }
+ INT16 prolog = GET_UNALIGNED_VAL16(pBlob);
+ if (prolog != 1)
+ COMPlusThrow(kCustomAttributeFormatException);
+ pBlob += 2;
+ }
+
+ if (cArgs > 1)
+ {
+ GCPROTECT_ARRAY_BEGIN(*argToProtect, cArgs);
+ {
+ // loop through the args
+ for (i = 1; i < cArgs; i++) {
+ CorElementType type = pSig->NextArg();
+ if (type == ELEMENT_TYPE_END)
+ break;
+ BOOL bObjectCreated = FALSE;
+ TypeHandle th = pSig->GetLastTypeHandleThrowing();
+ if (th.IsArray())
+ // get the array element
+ th = th.AsArray()->GetArrayElementTypeHandle();
+ ARG_SLOT data = GetDataFromBlob(pCtorMD->GetAssembly(), (CorSerializationType)type, th, &pBlob, pEndBlob, gc.refAttributedModule->GetModule(), &bObjectCreated);
+ if (bObjectCreated)
+ argToProtect[i] = ArgSlotToObj(data);
+ else
+ args[i] = data;
+ }
+ }
+ GCPROTECT_END();
+
+ // We have borrowed the signature from MethodDescCallSite. We have to put it back into the initial position
+ // because of that's where MethodDescCallSite expects to find it below.
+ pSig->Reset();
+
+ for (i = 1; i < cArgs; i++)
+ {
+ if (argToProtect[i] != NULL)
+ {
+ _ASSERTE(args[i] == NULL);
+ args[i] = ObjToArgSlot(argToProtect[i]);
+ }
+ }
+ }
+ }
+ args[0] = ObjToArgSlot(argToProtect[0]);
+
+ if (i != cArgs)
+ COMPlusThrow(kCustomAttributeFormatException);
+
+ // check if there are any named properties to invoke,
+ // if so set the by ref int passed in to point
+ // to the blob position where name properties start
+ *pcNamedArgs = 0;
+
+ if (pBlob && pBlob != pEndBlob)
+ {
+ if (pBlob + 2 > pEndBlob)
+ COMPlusThrow(kCustomAttributeFormatException);
+
+ *pcNamedArgs = GET_UNALIGNED_VAL16(pBlob);
+
+ pBlob += 2;
+ }
+
+ *ppBlob = pBlob;
+
+ if (*pcNamedArgs == 0 && pBlob != pEndBlob)
+ COMPlusThrow(kCustomAttributeFormatException);
+
+ // make the invocation to the ctor
+ gc.ca = ArgSlotToObj(args[0]);
+ if (pCtorMD->GetMethodTable()->IsValueType())
+ args[0] = PtrToArgSlot(OBJECTREFToObject(gc.ca)->UnBox());
+
+ ctorCallSite.CallWithValueTypes(args);
+ }
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(gc.ca);
+}
+FCIMPLEND
+
+FCIMPL5(VOID, COMCustomAttribute::ParseAttributeUsageAttribute, PVOID pData, ULONG cData, ULONG* pTargets, CLR_BOOL* pInherited, CLR_BOOL* pAllowMultiple)
+{
+ FCALL_CONTRACT;
+
+ int inherited = 0;
+ int allowMultiple = 1;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
+ {
+ CustomAttributeParser ca(pData, cData);
+
+ CaArg args[1];
+ args[0].InitEnum(SERIALIZATION_TYPE_I4, 0);
+ if (FAILED(::ParseKnownCaArgs(ca, args, lengthof(args))))
+ {
+ HELPER_METHOD_FRAME_BEGIN_0();
+ COMPlusThrow(kCustomAttributeFormatException);
+ HELPER_METHOD_FRAME_END();
+ }
+
+ *pTargets = args[0].val.u4;
+
+ CaNamedArg namedArgs[2];
+ CaType namedArgTypes[2];
+ namedArgTypes[inherited].Init(SERIALIZATION_TYPE_BOOLEAN);
+ namedArgTypes[allowMultiple].Init(SERIALIZATION_TYPE_BOOLEAN);
+ namedArgs[inherited].Init("Inherited", SERIALIZATION_TYPE_PROPERTY, namedArgTypes[inherited], TRUE);
+ namedArgs[allowMultiple].Init("AllowMultiple", SERIALIZATION_TYPE_PROPERTY, namedArgTypes[allowMultiple], FALSE);
+ if (FAILED(::ParseKnownCaNamedArgs(ca, namedArgs, lengthof(namedArgs))))
+ {
+ HELPER_METHOD_FRAME_BEGIN_0();
+ COMPlusThrow(kCustomAttributeFormatException);
+ HELPER_METHOD_FRAME_END();
+ }
+
+ *pInherited = namedArgs[inherited].val.boolean == TRUE;
+ *pAllowMultiple = namedArgs[allowMultiple].val.boolean == TRUE;
+ }
+ END_SO_INTOLERANT_CODE;
+}
+FCIMPLEND
+
+FCIMPL4(VOID, COMCustomAttribute::GetSecurityAttributes, ReflectModuleBaseObject *pModuleUNSAFE, DWORD tkToken, CLR_BOOL fAssembly, PTRARRAYREF* ppArray)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF throwable = NULL;
+ REFLECTMODULEBASEREF refModule = (REFLECTMODULEBASEREF)ObjectToOBJECTREF(pModuleUNSAFE);
+
+ if(refModule == NULL)
+ FCThrowResVoid(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ Module *pModule = refModule->GetModule();
+
+ HELPER_METHOD_FRAME_BEGIN_2(throwable, refModule);
+ {
+ IMDInternalImport* pScope = pModule->GetMDImport();
+
+ DWORD action;
+
+ CORSEC_ATTRSET_ARRAY aAttrset;
+ DWORD dwCount = 0;
+ for(action = 1; action <= dclMaximumValue; action++)
+ {
+ // We cannot use IsAssemblyDclAction(action) != fAssembly because CLR_BOOL is defined
+ // as BYTE in PAL so it might contain a value other than 0 or 1.
+ if (IsNGenOnlyDclAction(action) || IsAssemblyDclAction(action) == !fAssembly)
+ continue;
+
+ HENUMInternalHolder hEnum(pScope);
+ if (!hEnum.EnumPermissionSetsInit(tkToken, (CorDeclSecurity)action))
+ continue;
+
+ mdPermission tkPerm;
+ BYTE* pbBlob;
+ ULONG cbBlob;
+ DWORD dwAction;
+
+ while (pScope->EnumNext(&hEnum, &tkPerm))
+ {
+ IfFailThrow(pScope->GetPermissionSetProps(
+ tkPerm,
+ &dwAction,
+ (void const **)&pbBlob,
+ &cbBlob));
+
+ CORSEC_ATTRSET* pAttrSet = &*aAttrset.Append();
+ IfFailThrow(BlobToAttributeSet(pbBlob, cbBlob, pAttrSet, dwAction));
+
+ dwCount += pAttrSet->dwAttrCount;
+ }
+ }
+
+ *ppArray = (PTRARRAYREF)AllocateObjectArray(dwCount, g_pObjectClass);
+
+ CQuickBytes qb;
+
+ COUNT_T c = 0;
+ for (COUNT_T i = 0; i < aAttrset.GetCount(); i ++)
+ {
+ CORSEC_ATTRSET& attrset = aAttrset[i];
+ OBJECTREF* attrArray = (OBJECTREF*)qb.AllocThrows(attrset.dwAttrCount * sizeof(OBJECTREF));
+ memset(attrArray, 0, attrset.dwAttrCount * sizeof(OBJECTREF));
+ {
+ // Convert to a managed array of attribute objects
+ DWORD dwErrorIndex;
+ HRESULT hr = E_FAIL;
+ GCPROTECT_ARRAY_BEGIN(*attrArray, attrset.dwAttrCount);
+ // This is very tricky.
+ // We have a GCFrame local here. The local goes out of scope beyond for loop. The stack location of the local
+ // is then reused by other variables, and the content in GCFrame may be changed. But the Frame is still chained
+ // on our Thread object.
+ // If exception is thrown before we pop our frame chain, we will have corrupted frame chain.
+ hr = SecurityAttributes::AttributeSetToManaged(attrArray, &attrset, &throwable, &dwErrorIndex, true);
+ GCPROTECT_END();
+ if (FAILED(hr))
+ COMPlusThrowHR(hr);
+
+ for (COUNT_T j = 0; j < attrset.dwAttrCount; j ++)
+ (*ppArray)->SetAt(c++, attrArray[j]);
+ }
+
+ }
+ }
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL7(void, COMCustomAttribute::GetPropertyOrFieldData, ReflectModuleBaseObject *pModuleUNSAFE, BYTE** ppBlobStart, BYTE* pBlobEnd, STRINGREF* pName, CLR_BOOL* pbIsProperty, OBJECTREF* pType, OBJECTREF* value)
+{
+ FCALL_CONTRACT;
+
+ BYTE* pBlob = *ppBlobStart;
+ *pType = NULL;
+
+ REFLECTMODULEBASEREF refModule = (REFLECTMODULEBASEREF)ObjectToOBJECTREF(pModuleUNSAFE);
+
+ if(refModule == NULL)
+ FCThrowResVoid(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ Module *pModule = refModule->GetModule();
+
+ HELPER_METHOD_FRAME_BEGIN_1(refModule);
+ {
+ Assembly *pCtorAssembly = NULL;
+
+ MethodTable *pMTValue = NULL;
+ CorSerializationType arrayType = SERIALIZATION_TYPE_BOOLEAN;
+ BOOL bObjectCreated = FALSE;
+ TypeHandle nullTH;
+
+ if (pBlob + 2 > pBlobEnd)
+ COMPlusThrow(kCustomAttributeFormatException);
+
+ // get whether it is a field or a property
+ CorSerializationType propOrField = (CorSerializationType)*pBlob;
+ pBlob++;
+ if (propOrField == SERIALIZATION_TYPE_FIELD)
+ *pbIsProperty = FALSE;
+ else if (propOrField == SERIALIZATION_TYPE_PROPERTY)
+ *pbIsProperty = TRUE;
+ else
+ COMPlusThrow(kCustomAttributeFormatException);
+
+ // get the type of the field
+ CorSerializationType fieldType = (CorSerializationType)*pBlob;
+ pBlob++;
+ if (fieldType == SERIALIZATION_TYPE_SZARRAY)
+ {
+ arrayType = (CorSerializationType)*pBlob;
+
+ if (pBlob + 1 > pBlobEnd)
+ COMPlusThrow(kCustomAttributeFormatException);
+
+ pBlob++;
+ }
+ if (fieldType == SERIALIZATION_TYPE_ENUM || arrayType == SERIALIZATION_TYPE_ENUM)
+ {
+ // get the enum type
+ ReflectClassBaseObject *pEnum =
+ (ReflectClassBaseObject*)OBJECTREFToObject(ArgSlotToObj(GetDataFromBlob(
+ pCtorAssembly, SERIALIZATION_TYPE_TYPE, nullTH, &pBlob, pBlobEnd, pModule, &bObjectCreated)));
+
+ if (pEnum == NULL)
+ COMPlusThrow(kCustomAttributeFormatException);
+
+ _ASSERTE(bObjectCreated);
+
+ TypeHandle th = pEnum->GetType();
+ _ASSERTE(th.IsEnum());
+
+ pMTValue = th.AsMethodTable();
+ if (fieldType == SERIALIZATION_TYPE_ENUM)
+ // load the enum type to pass it back
+ *pType = th.GetManagedClassObject();
+ else
+ nullTH = th;
+ }
+
+ //
+ // get the string representing the field/property name
+ *pName = ArgSlotToString(GetDataFromBlob(
+ pCtorAssembly, SERIALIZATION_TYPE_STRING, nullTH, &pBlob, pBlobEnd, pModule, &bObjectCreated));
+ _ASSERTE(bObjectCreated || *pName == NULL);
+
+ // create the object and return it
+ switch (fieldType)
+ {
+ case SERIALIZATION_TYPE_TAGGED_OBJECT:
+ *pType = g_pObjectClass->GetManagedClassObject();
+ case SERIALIZATION_TYPE_TYPE:
+ case SERIALIZATION_TYPE_STRING:
+ *value = ArgSlotToObj(GetDataFromBlob(
+ pCtorAssembly, fieldType, nullTH, &pBlob, pBlobEnd, pModule, &bObjectCreated));
+ _ASSERTE(bObjectCreated || *value == NULL);
+
+ if (*value == NULL)
+ {
+ // load the proper type so that code in managed knows which property to load
+ if (fieldType == SERIALIZATION_TYPE_STRING)
+ *pType = MscorlibBinder::GetElementType(ELEMENT_TYPE_STRING)->GetManagedClassObject();
+ else if (fieldType == SERIALIZATION_TYPE_TYPE)
+ *pType = MscorlibBinder::GetClass(CLASS__TYPE)->GetManagedClassObject();
+ }
+ break;
+ case SERIALIZATION_TYPE_SZARRAY:
+ {
+ int arraySize = (int)GetDataFromBlob(pCtorAssembly, SERIALIZATION_TYPE_I4, nullTH, &pBlob, pBlobEnd, pModule, &bObjectCreated);
+
+ if (arraySize != -1)
+ {
+ _ASSERTE(!bObjectCreated);
+ if (arrayType == SERIALIZATION_TYPE_STRING)
+ nullTH = TypeHandle(MscorlibBinder::GetElementType(ELEMENT_TYPE_STRING));
+ else if (arrayType == SERIALIZATION_TYPE_TYPE)
+ nullTH = TypeHandle(MscorlibBinder::GetClass(CLASS__TYPE));
+ else if (arrayType == SERIALIZATION_TYPE_TAGGED_OBJECT)
+ nullTH = TypeHandle(g_pObjectClass);
+ ReadArray(pCtorAssembly, arrayType, arraySize, nullTH, &pBlob, pBlobEnd, pModule, (BASEARRAYREF*)value);
+ }
+ if (*value == NULL)
+ {
+ TypeHandle arrayTH;
+ switch (arrayType)
+ {
+ case SERIALIZATION_TYPE_STRING:
+ arrayTH = TypeHandle(MscorlibBinder::GetElementType(ELEMENT_TYPE_STRING));
+ break;
+ case SERIALIZATION_TYPE_TYPE:
+ arrayTH = TypeHandle(MscorlibBinder::GetClass(CLASS__TYPE));
+ break;
+ case SERIALIZATION_TYPE_TAGGED_OBJECT:
+ arrayTH = TypeHandle(g_pObjectClass);
+ break;
+ default:
+ if (SERIALIZATION_TYPE_BOOLEAN <= arrayType && arrayType <= SERIALIZATION_TYPE_R8)
+ arrayTH = TypeHandle(MscorlibBinder::GetElementType((CorElementType)arrayType));
+ }
+ if (!arrayTH.IsNull())
+ {
+ arrayTH = ClassLoader::LoadArrayTypeThrowing(arrayTH);
+ *pType = arrayTH.GetManagedClassObject();
+ }
+ }
+ break;
+ }
+ default:
+ if (SERIALIZATION_TYPE_BOOLEAN <= fieldType && fieldType <= SERIALIZATION_TYPE_R8)
+ pMTValue = MscorlibBinder::GetElementType((CorElementType)fieldType);
+ else if(fieldType == SERIALIZATION_TYPE_ENUM)
+ fieldType = (CorSerializationType)pMTValue->GetInternalCorElementType();
+ else
+ COMPlusThrow(kCustomAttributeFormatException);
+
+ ARG_SLOT val = GetDataFromBlob(pCtorAssembly, fieldType, nullTH, &pBlob, pBlobEnd, pModule, &bObjectCreated);
+ _ASSERTE(!bObjectCreated);
+
+ *value = pMTValue->Box((void*)ArgSlotEndianessFixup(&val, pMTValue->GetNumInstanceFieldBytes()));
+ }
+
+ *ppBlobStart = pBlob;
+ }
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+/*static*/
+TypeHandle COMCustomAttribute::GetTypeHandleFromBlob(Assembly *pCtorAssembly,
+ CorSerializationType objType,
+ BYTE **pBlob,
+ const BYTE *endBlob,
+ Module *pModule)
+{
+ CONTRACTL
+ {
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ // we must box which means we must get the method table, switch again on the element type
+ MethodTable *pMTType = NULL;
+ TypeHandle nullTH;
+ TypeHandle RtnTypeHnd;
+
+ switch (objType) {
+ case SERIALIZATION_TYPE_BOOLEAN:
+ case SERIALIZATION_TYPE_I1:
+ case SERIALIZATION_TYPE_U1:
+ case SERIALIZATION_TYPE_CHAR:
+ case SERIALIZATION_TYPE_I2:
+ case SERIALIZATION_TYPE_U2:
+ case SERIALIZATION_TYPE_I4:
+ case SERIALIZATION_TYPE_U4:
+ case SERIALIZATION_TYPE_R4:
+ case SERIALIZATION_TYPE_I8:
+ case SERIALIZATION_TYPE_U8:
+ case SERIALIZATION_TYPE_R8:
+ case SERIALIZATION_TYPE_STRING:
+ pMTType = MscorlibBinder::GetElementType((CorElementType)objType);
+ RtnTypeHnd = TypeHandle(pMTType);
+ break;
+
+ case ELEMENT_TYPE_CLASS:
+ pMTType = MscorlibBinder::GetClass(CLASS__TYPE);
+ RtnTypeHnd = TypeHandle(pMTType);
+ break;
+
+ case SERIALIZATION_TYPE_TAGGED_OBJECT:
+ pMTType = g_pObjectClass;
+ RtnTypeHnd = TypeHandle(pMTType);
+ break;
+
+ case SERIALIZATION_TYPE_TYPE:
+ {
+ int size = GetStringSize(pBlob, endBlob);
+ if (size == -1)
+ return nullTH;
+
+ if ((size+1 <= 1) || (size > endBlob - *pBlob))
+ COMPlusThrow(kCustomAttributeFormatException);
+
+ LPUTF8 szName = (LPUTF8)_alloca(size + 1);
+ memcpy(szName, *pBlob, size);
+ *pBlob += size;
+ szName[size] = 0;
+
+ RtnTypeHnd = TypeName::GetTypeUsingCASearchRules(szName, pModule->GetAssembly(), NULL, FALSE);
+ break;
+ }
+
+ case SERIALIZATION_TYPE_ENUM:
+ {
+ // get the enum type
+ BOOL isObject = FALSE;
+ ReflectClassBaseObject *pType = (ReflectClassBaseObject*)OBJECTREFToObject(ArgSlotToObj(GetDataFromBlob(pCtorAssembly,
+ SERIALIZATION_TYPE_TYPE,
+ nullTH,
+ pBlob,
+ endBlob,
+ pModule,
+ &isObject)));
+ if (pType != NULL)
+ {
+ _ASSERTE(isObject);
+ RtnTypeHnd = pType->GetType();
+ _ASSERTE((objType == SERIALIZATION_TYPE_ENUM) ? RtnTypeHnd.GetMethodTable()->IsEnum() : TRUE);
+ }
+ else
+ {
+ RtnTypeHnd = TypeHandle();
+ }
+ break;
+ }
+
+ default:
+ COMPlusThrow(kCustomAttributeFormatException);
+ }
+
+ return RtnTypeHnd;
+}
+
+// retrieve the string size in a CA blob. Advance the blob pointer to point to
+// the beginning of the string immediately following the size
+/*static*/
+int COMCustomAttribute::GetStringSize(BYTE **pBlob, const BYTE *endBlob)
+{
+ CONTRACTL
+ {
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ if (*pBlob >= endBlob )
+ { // No buffer at all, or buffer overrun
+ COMPlusThrow(kCustomAttributeFormatException);
+ }
+
+ if (**pBlob == 0xFF)
+ { // Special case null string.
+ ++(*pBlob);
+ return -1;
+ }
+
+ ULONG ulSize;
+ if (FAILED(CPackedLen::SafeGetData((BYTE const *)*pBlob, (BYTE const *)endBlob, (ULONG *)&ulSize, (BYTE const **)pBlob)))
+ {
+ COMPlusThrow(kCustomAttributeFormatException);
+ }
+
+ return (int)ulSize;
+}
+
+// copy the values of an array of integers from a CA blob
+// (i.e., always stored in little-endian, and needs not be aligned).
+// Returns TRUE on success, FALSE if the blob was not big enough.
+// Advances *pBlob by the amount copied.
+/*static*/
+template < typename T >
+BOOL COMCustomAttribute::CopyArrayVAL(BASEARRAYREF pArray, int nElements, BYTE **pBlob, const BYTE *endBlob)
+{
+ int sizeData; // = size * 2; with integer overflow check
+ if (!ClrSafeInt<int>::multiply(nElements, sizeof(T), sizeData))
+ return FALSE;
+ if (*pBlob + sizeData < *pBlob) // integer overflow check
+ return FALSE;
+ if (*pBlob + sizeData > endBlob)
+ return FALSE;
+#if BIGENDIAN
+ T *ptDest = reinterpret_cast<T *>(pArray->GetDataPtr());
+ for (int iElement = 0; iElement < nElements; iElement++)
+ {
+ T tValue;
+ BYTE *pbSrc = *pBlob + iElement * sizeof(T);
+ BYTE *pbDest = reinterpret_cast<BYTE *>(&tValue);
+ for (size_t iByte = 0; iByte < sizeof(T); iByte++)
+ {
+ pbDest[sizeof(T) - 1 - iByte] = pbSrc[iByte];
+ }
+ ptDest[iElement] = tValue;
+ }
+#else // BIGENDIAN
+ memcpyNoGCRefs(pArray->GetDataPtr(), *pBlob, sizeData);
+#endif // BIGENDIAN
+ *pBlob += sizeData;
+ return TRUE;
+}
+
+// read the whole array as a chunk
+/*static*/
+void COMCustomAttribute::ReadArray(Assembly *pCtorAssembly,
+ CorSerializationType arrayType,
+ int size,
+ TypeHandle th,
+ BYTE **pBlob,
+ const BYTE *endBlob,
+ Module *pModule,
+ BASEARRAYREF *pArray)
+{
+ CONTRACTL
+ {
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ ARG_SLOT element = 0;
+
+ switch (arrayType) {
+ case SERIALIZATION_TYPE_BOOLEAN:
+ case SERIALIZATION_TYPE_I1:
+ case SERIALIZATION_TYPE_U1:
+ *pArray = (BASEARRAYREF)AllocatePrimitiveArray((CorElementType)arrayType, size);
+ if (!CopyArrayVAL<BYTE>(*pArray, size, pBlob, endBlob))
+ goto badBlob;
+ break;
+
+ case SERIALIZATION_TYPE_CHAR:
+ case SERIALIZATION_TYPE_I2:
+ case SERIALIZATION_TYPE_U2:
+ {
+ *pArray = (BASEARRAYREF)AllocatePrimitiveArray((CorElementType)arrayType, size);
+ if (!CopyArrayVAL<UINT16>(*pArray, size, pBlob, endBlob))
+ goto badBlob;
+ break;
+ }
+ case SERIALIZATION_TYPE_I4:
+ case SERIALIZATION_TYPE_U4:
+ case SERIALIZATION_TYPE_R4:
+ {
+ *pArray = (BASEARRAYREF)AllocatePrimitiveArray((CorElementType)arrayType, size);
+ if (!CopyArrayVAL<UINT32>(*pArray, size, pBlob, endBlob))
+ goto badBlob;
+ break;
+ }
+ case SERIALIZATION_TYPE_I8:
+ case SERIALIZATION_TYPE_U8:
+ case SERIALIZATION_TYPE_R8:
+ {
+ *pArray = (BASEARRAYREF)AllocatePrimitiveArray((CorElementType)arrayType, size);
+ if (!CopyArrayVAL<UINT64>(*pArray, size, pBlob, endBlob))
+ goto badBlob;
+ break;
+ }
+ case ELEMENT_TYPE_CLASS:
+ case SERIALIZATION_TYPE_TYPE:
+ case SERIALIZATION_TYPE_STRING:
+ case SERIALIZATION_TYPE_SZARRAY:
+ case SERIALIZATION_TYPE_TAGGED_OBJECT:
+ {
+ BOOL isObject;
+
+ // If we haven't figured out the type of the array, throw bad blob exception
+ if (th.IsNull())
+ goto badBlob;
+
+ *pArray = (BASEARRAYREF)AllocateObjectArray(size, th);
+ if (arrayType == SERIALIZATION_TYPE_SZARRAY)
+ // switch the th to be the proper one
+ th = th.AsArray()->GetArrayElementTypeHandle();
+ for (int i = 0; i < size; i++) {
+ element = GetDataFromBlob(pCtorAssembly, arrayType, th, pBlob, endBlob, pModule, &isObject);
+ _ASSERTE(isObject || element == NULL);
+ ((PTRARRAYREF)(*pArray))->SetAt(i, ArgSlotToObj(element));
+ }
+ break;
+ }
+
+ case SERIALIZATION_TYPE_ENUM:
+ {
+ INT32 bounds = size;
+
+ // If we haven't figured out the type of the array, throw bad blob exception
+ if (th.IsNull())
+ goto badBlob;
+
+ unsigned elementSize = th.GetSize();
+ TypeHandle arrayHandle = ClassLoader::LoadArrayTypeThrowing(th);
+ if (arrayHandle.IsNull())
+ goto badBlob;
+ *pArray = (BASEARRAYREF)AllocateArrayEx(arrayHandle, &bounds, 1);
+ BOOL fSuccess;
+ switch (elementSize)
+ {
+ case 1:
+ fSuccess = CopyArrayVAL<BYTE>(*pArray, size, pBlob, endBlob);
+ break;
+ case 2:
+ fSuccess = CopyArrayVAL<UINT16>(*pArray, size, pBlob, endBlob);
+ break;
+ case 4:
+ fSuccess = CopyArrayVAL<UINT32>(*pArray, size, pBlob, endBlob);
+ break;
+ case 8:
+ fSuccess = CopyArrayVAL<UINT64>(*pArray, size, pBlob, endBlob);
+ break;
+ default:
+ fSuccess = FALSE;
+ }
+ if (!fSuccess)
+ goto badBlob;
+ break;
+ }
+
+ default:
+ badBlob:
+ COMPlusThrow(kCustomAttributeFormatException);
+ }
+
+}
+
+// get data out of the blob according to a CorElementType
+/*static*/
+ARG_SLOT COMCustomAttribute::GetDataFromBlob(Assembly *pCtorAssembly,
+ CorSerializationType type,
+ TypeHandle th,
+ BYTE **pBlob,
+ const BYTE *endBlob,
+ Module *pModule,
+ BOOL *bObjectCreated)
+{
+ CONTRACTL
+ {
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ ARG_SLOT retValue = 0;
+ *bObjectCreated = FALSE;
+ TypeHandle nullTH;
+ TypeHandle typeHnd;
+
+ switch (type) {
+
+ case SERIALIZATION_TYPE_BOOLEAN:
+ case SERIALIZATION_TYPE_I1:
+ case SERIALIZATION_TYPE_U1:
+ if (*pBlob + 1 <= endBlob) {
+ retValue = (ARG_SLOT)**pBlob;
+ *pBlob += 1;
+ break;
+ }
+ goto badBlob;
+
+ case SERIALIZATION_TYPE_CHAR:
+ case SERIALIZATION_TYPE_I2:
+ case SERIALIZATION_TYPE_U2:
+ if (*pBlob + 2 <= endBlob) {
+ retValue = (ARG_SLOT)GET_UNALIGNED_VAL16(*pBlob);
+ *pBlob += 2;
+ break;
+ }
+ goto badBlob;
+
+ case SERIALIZATION_TYPE_I4:
+ case SERIALIZATION_TYPE_U4:
+ case SERIALIZATION_TYPE_R4:
+ if (*pBlob + 4 <= endBlob) {
+ retValue = (ARG_SLOT)GET_UNALIGNED_VAL32(*pBlob);
+ *pBlob += 4;
+ break;
+ }
+ goto badBlob;
+
+ case SERIALIZATION_TYPE_I8:
+ case SERIALIZATION_TYPE_U8:
+ case SERIALIZATION_TYPE_R8:
+ if (*pBlob + 8 <= endBlob) {
+ retValue = (ARG_SLOT)GET_UNALIGNED_VAL64(*pBlob);
+ *pBlob += 8;
+ break;
+ }
+ goto badBlob;
+
+ case SERIALIZATION_TYPE_STRING:
+ stringType:
+ {
+ int size = GetStringSize(pBlob, endBlob);
+ *bObjectCreated = TRUE;
+ if (size > 0) {
+ if (*pBlob + size < *pBlob) // integer overflow check
+ goto badBlob;
+ if (*pBlob + size > endBlob)
+ goto badBlob;
+ retValue = ObjToArgSlot(StringObject::NewString((LPCUTF8)*pBlob, size));
+ *pBlob += size;
+ }
+ else if (size == 0)
+ retValue = ObjToArgSlot(StringObject::NewString(0));
+ else
+ *bObjectCreated = FALSE;
+
+ break;
+ }
+
+ // this is coming back from sig but it's not a serialization type,
+ // essentialy the type in the blob and the type in the sig don't match
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ if (!th.IsEnum())
+ goto badBlob;
+ CorSerializationType enumType = (CorSerializationType)th.GetInternalCorElementType();
+ BOOL cannotBeObject = FALSE;
+ retValue = GetDataFromBlob(pCtorAssembly, enumType, nullTH, pBlob, endBlob, pModule, &cannotBeObject);
+ _ASSERTE(!cannotBeObject);
+ break;
+ }
+
+ // this is coming back from sig but it's not a serialization type,
+ // essentialy the type in the blob and the type in the sig don't match
+ case ELEMENT_TYPE_CLASS:
+ if (th.IsArray())
+ goto typeArray;
+ else {
+ MethodTable *pMT = th.AsMethodTable();
+ if (pMT == g_pStringClass)
+ goto stringType;
+ else if (pMT == g_pObjectClass)
+ goto typeObject;
+ else if (MscorlibBinder::IsClass(pMT, CLASS__TYPE))
+ goto typeType;
+ }
+
+ goto badBlob;
+
+ case SERIALIZATION_TYPE_TYPE:
+ typeType:
+ {
+ typeHnd = GetTypeHandleFromBlob(pCtorAssembly, SERIALIZATION_TYPE_TYPE, pBlob, endBlob, pModule);
+ if (!typeHnd.IsNull())
+ retValue = ObjToArgSlot(typeHnd.GetManagedClassObject());
+ *bObjectCreated = TRUE;
+ break;
+ }
+
+ // this is coming back from sig but it's not a serialization type,
+ // essentialy the type in the blob and the type in the sig don't match
+ case ELEMENT_TYPE_OBJECT:
+ case SERIALIZATION_TYPE_TAGGED_OBJECT:
+ typeObject:
+ {
+ // get the byte representing the real type and call GetDataFromBlob again
+ if (*pBlob + 1 > endBlob)
+ goto badBlob;
+ CorSerializationType objType = (CorSerializationType)**pBlob;
+ *pBlob += 1;
+ switch (objType) {
+ case SERIALIZATION_TYPE_SZARRAY:
+ {
+ if (*pBlob + 1 > endBlob)
+ goto badBlob;
+ CorSerializationType arrayType = (CorSerializationType)**pBlob;
+ *pBlob += 1;
+ if (arrayType == SERIALIZATION_TYPE_TYPE)
+ arrayType = (CorSerializationType)ELEMENT_TYPE_CLASS;
+ // grab the array type and make a type handle for it
+ nullTH = GetTypeHandleFromBlob(pCtorAssembly, arrayType, pBlob, endBlob, pModule);
+ }
+ case SERIALIZATION_TYPE_TYPE:
+ case SERIALIZATION_TYPE_STRING:
+ // notice that the nullTH is actually not null in the array case (see case above)
+ retValue = GetDataFromBlob(pCtorAssembly, objType, nullTH, pBlob, endBlob, pModule, bObjectCreated);
+ _ASSERTE(*bObjectCreated || retValue == 0);
+ break;
+ case SERIALIZATION_TYPE_ENUM:
+ {
+ //
+ // get the enum type
+ typeHnd = GetTypeHandleFromBlob(pCtorAssembly, SERIALIZATION_TYPE_ENUM, pBlob, endBlob, pModule);
+ _ASSERTE(typeHnd.IsTypeDesc() == false);
+
+ // ok we have the class, now we go and read the data
+ MethodTable *pMT = typeHnd.AsMethodTable();
+ PREFIX_ASSUME(pMT != NULL);
+ CorSerializationType objNormType = (CorSerializationType)pMT->GetInternalCorElementType();
+ BOOL isObject = FALSE;
+ retValue = GetDataFromBlob(pCtorAssembly, objNormType, nullTH, pBlob, endBlob, pModule, &isObject);
+ _ASSERTE(!isObject);
+ retValue= ObjToArgSlot(pMT->Box((void*)&retValue));
+ *bObjectCreated = TRUE;
+ break;
+ }
+ default:
+ {
+ // the common primitive type case. We need to box the primitive
+ typeHnd = GetTypeHandleFromBlob(pCtorAssembly, objType, pBlob, endBlob, pModule);
+ _ASSERTE(typeHnd.IsTypeDesc() == false);
+ retValue = GetDataFromBlob(pCtorAssembly, objType, nullTH, pBlob, endBlob, pModule, bObjectCreated);
+ _ASSERTE(!*bObjectCreated);
+ retValue= ObjToArgSlot(typeHnd.AsMethodTable()->Box((void*)&retValue));
+ *bObjectCreated = TRUE;
+ break;
+ }
+ }
+ break;
+ }
+
+ case SERIALIZATION_TYPE_SZARRAY:
+ typeArray:
+ {
+ // read size
+ BOOL isObject = FALSE;
+ int size = (int)GetDataFromBlob(pCtorAssembly, SERIALIZATION_TYPE_I4, nullTH, pBlob, endBlob, pModule, &isObject);
+ _ASSERTE(!isObject);
+
+ if (size != -1) {
+ CorSerializationType arrayType;
+ if (th.IsEnum())
+ arrayType = SERIALIZATION_TYPE_ENUM;
+ else
+ arrayType = (CorSerializationType)th.GetInternalCorElementType();
+
+ BASEARRAYREF array = NULL;
+ GCPROTECT_BEGIN(array);
+ ReadArray(pCtorAssembly, arrayType, size, th, pBlob, endBlob, pModule, &array);
+ retValue = ObjToArgSlot(array);
+ GCPROTECT_END();
+ }
+ *bObjectCreated = TRUE;
+ break;
+ }
+
+ default:
+ badBlob:
+ //<TODO> generate a reasonable text string ("invalid blob or constructor")</TODO>
+ COMPlusThrow(kCustomAttributeFormatException);
+ }
+
+ return retValue;
+}
+
+FCIMPL2(VOID, COMCustomAttribute::PushSecurityContextFrame, SecurityContextFrame *pFrame, AssemblyBaseObject *pAssemblyObjectUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
+
+ // Adjust frame pointer for the presence of the GSCookie at a negative
+ // offset (it's hard for us to express neginfo in the managed definition of
+ // the frame).
+ pFrame = (SecurityContextFrame*)((BYTE*)pFrame + sizeof(GSCookie));
+
+ *((TADDR*)pFrame) = SecurityContextFrame::GetMethodFrameVPtr();
+ pFrame->SetAssembly(pAssemblyObjectUNSAFE->GetAssembly());
+ *pFrame->GetGSCookiePtr() = GetProcessGSCookie();
+ pFrame->Push();
+
+ END_SO_INTOLERANT_CODE;
+}
+FCIMPLEND
+
+FCIMPL1(VOID, COMCustomAttribute::PopSecurityContextFrame, SecurityContextFrame *pFrame)
+{
+ FCALL_CONTRACT;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
+
+ // Adjust frame pointer for the presence of the GSCookie at a negative
+ // offset (it's hard for us to express neginfo in the managed definition of
+ // the frame).
+ pFrame = (SecurityContextFrame*)((BYTE*)pFrame + sizeof(GSCookie));
+
+ pFrame->Pop();
+
+ END_SO_INTOLERANT_CODE;
+}
+FCIMPLEND
diff --git a/src/vm/customattribute.h b/src/vm/customattribute.h
new file mode 100644
index 0000000000..052a9c0931
--- /dev/null
+++ b/src/vm/customattribute.h
@@ -0,0 +1,240 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#ifndef _CUSTOMATTRIBUTE_H_
+#define _CUSTOMATTRIBUTE_H_
+
+#include "fcall.h"
+#include "../md/compiler/custattr.h"
+
+struct CustomAttributeType;
+struct CustomAttributeValue;
+struct CustomAttributeArgument;
+struct CustomAttributeNamedArgument;
+
+typedef Array<CustomAttributeArgument> CaArgArray;
+typedef Array<CustomAttributeNamedArgument> CaNamedArgArray;
+typedef Array<CustomAttributeValue> CaValueArray;
+
+typedef DPTR(CaArgArray) PTR_CaArgArray;
+typedef DPTR(CaNamedArgArray) PTR_CaNamedArgArray;
+typedef DPTR(CaValueArray) PTR_CaValueArray;
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<CaArgArray> CaArgArrayREF;
+typedef REF<CaNamedArgArray> CaNamedArgArrayREF;
+typedef REF<CaValueArray> CaValueArrayREF;
+#else
+typedef PTR_CaArgArray CaArgArrayREF;
+typedef PTR_CaNamedArgArray CaNamedArgArrayREF;
+typedef PTR_CaValueArray CaValueArrayREF;
+#endif
+
+
+#include <pshpack1.h>
+struct CustomAttributeType
+{
+ STRINGREF m_enumName;
+ CorSerializationType m_tag;
+ CorSerializationType m_enumType;
+ CorSerializationType m_arrayType;
+ CorSerializationType m_padding;
+};
+
+struct CustomAttributeValue
+{
+#ifdef _WIN64
+ // refs come before longs on win64
+ CaValueArrayREF m_value;
+ STRINGREF m_enumOrTypeName;
+ INT64 m_rawValue;
+#else
+ // longs come before refs on x86
+ INT64 m_rawValue;
+ CaValueArrayREF m_value;
+ STRINGREF m_enumOrTypeName;
+#endif
+ CustomAttributeType m_type;
+#if defined(FEATURE_64BIT_ALIGNMENT)
+ DWORD m_padding;
+#endif
+};
+
+struct CustomAttributeArgument
+{
+ CustomAttributeType m_type;
+#if (!defined(_WIN64) && (DATA_ALIGNMENT > 4)) || defined(FEATURE_64BIT_ALIGNMENT)
+ DWORD m_padding;
+#endif
+ CustomAttributeValue m_value;
+};
+
+struct CustomAttributeNamedArgument
+{
+ STRINGREF m_argumentName;
+ CorSerializationType m_propertyOrField;
+ CorSerializationType m_padding;
+#if !defined(_WIN64) && (DATA_ALIGNMENT > 4)
+ DWORD m_padding2;
+#endif
+ CustomAttributeType m_type;
+#if !defined(_WIN64) && (DATA_ALIGNMENT > 4)
+ DWORD m_padding3;
+#endif
+ CustomAttributeValue m_value;
+};
+#include <poppack.h>
+
+
+typedef struct {
+ STRINGREF string;
+ CaValueArrayREF array;
+} CustomAttributeManagedValues;
+
+typedef Factory< SArray<CaValue> > CaValueArrayFactory;
+
+class Attribute
+{
+public:
+ static FCDECL5(VOID, ParseAttributeArguments,
+ void* pCa,
+ INT32 cCa,
+ CaArgArrayREF* ppCustomAttributeArguments,
+ CaNamedArgArrayREF* ppCustomAttributeNamedArguments,
+ AssemblyBaseObject* pAssemblyUNSAFE);
+
+private:
+ static HRESULT ParseAttributeArgumentValues(
+ void* pCa,
+ INT32 cCa,
+ CaValueArrayFactory* pCaValueArrayFactory,
+ CaArg* pCaArgs,
+ COUNT_T cArgs,
+ CaNamedArg* pCaNamedArgs,
+ COUNT_T cNamedArgs,
+ DomainAssembly* pDomainAssembly);
+
+ static HRESULT ParseCaValue(
+ CustomAttributeParser &ca,
+ CaValue* pCaArg,
+ CaType* pCaParam,
+ CaValueArrayFactory* pCaValueArrayFactory,
+ DomainAssembly* pDomainAssembly);
+
+ static HRESULT ParseCaCtorArgs(
+ CustomAttributeParser &ca,
+ CaArg* pArgs,
+ ULONG cArgs,
+ CaValueArrayFactory* pCaValueArrayFactory,
+ DomainAssembly* pDomainAssembly);
+
+ static HRESULT ParseCaNamedArgs(
+ CustomAttributeParser &ca, // The Custom Attribute blob.
+ CaNamedArg *pNamedParams, // Array of argument descriptors.
+ ULONG cNamedParams,
+ CaValueArrayFactory* pCaValueArrayFactory,
+ DomainAssembly* pDomainAssembly);
+
+ static HRESULT InitCaType(
+ CustomAttributeType* pType,
+ Factory<SString>* pSstringFactory,
+ Factory<StackScratchBuffer>* pStackScratchBufferFactory,
+ CaType* pCaType);
+
+ static HRESULT ParseCaType(
+ CustomAttributeParser &ca,
+ CaType* pCaType,
+ DomainAssembly* pDomainAssembly,
+ StackSString* ss = NULL);
+
+ static TypeHandle GetTypeForEnum(
+ LPCUTF8 szEnumName,
+ COUNT_T cbEnumName,
+ DomainAssembly* pDomainAssembly);
+
+ static void SetBlittableCaValue(
+ CustomAttributeValue* pVal,
+ CaValue* pCaVal,
+ BOOL* pbAllBlittableCa);
+
+ static void SetManagedValue(
+ CustomAttributeManagedValues gc,
+ CustomAttributeValue* pValue);
+
+ static CustomAttributeManagedValues GetManagedCaValue(CaValue* pCaVal);
+};
+
+class CORSEC_ATTRSET_ARRAY: public StackSArray<CORSEC_ATTRSET>
+{
+public:
+ CORSEC_ATTRSET_ARRAY()
+ {
+ }
+ ~CORSEC_ATTRSET_ARRAY()
+ {
+ WRAPPER_NO_CONTRACT;
+ for (COUNT_T i = 0; i < GetCount(); i++)
+ {
+ (*this)[i].CORSEC_ATTRSET::~CORSEC_ATTRSET();
+ }
+
+ }
+};
+
+class COMCustomAttribute
+{
+public:
+
+ // custom attributes utility functions
+ static FCDECL5(VOID, ParseAttributeUsageAttribute, PVOID pData, ULONG cData, ULONG* pTargets, CLR_BOOL* pInherited, CLR_BOOL* pAllowMultiple);
+ static FCDECL5(LPVOID, CreateCaObject, ReflectModuleBaseObject* pAttributedModuleUNSAFE, ReflectMethodObject *pMethodUNSAFE, BYTE** ppBlob, BYTE* pEndBlob, INT32* pcNamedArgs);
+ static FCDECL7(void, GetPropertyOrFieldData, ReflectModuleBaseObject *pModuleUNSAFE, BYTE** ppBlobStart, BYTE* pBlobEnd, STRINGREF* pName, CLR_BOOL* pbIsProperty, OBJECTREF* pType, OBJECTREF* value);
+ static FCDECL4(VOID, GetSecurityAttributes, ReflectModuleBaseObject *pModuleUNSAFE, DWORD tkToken, CLR_BOOL fAssembly, PTRARRAYREF* ppArray);
+ static FCDECL2(VOID, PushSecurityContextFrame, SecurityContextFrame *pFrame, AssemblyBaseObject *pAssemblyObjectUNSAFE);
+ static FCDECL1(VOID, PopSecurityContextFrame, SecurityContextFrame *pFrame);
+
+private:
+
+ static TypeHandle GetTypeHandleFromBlob(
+ Assembly *pCtorAssembly,
+ CorSerializationType objType,
+ BYTE **pBlob,
+ const BYTE *endBlob,
+ Module *pModule);
+
+ static ARG_SLOT GetDataFromBlob(
+ Assembly *pCtorAssembly,
+ CorSerializationType type,
+ TypeHandle th,
+ BYTE **pBlob,
+ const BYTE *endBlob,
+ Module *pModule,
+ BOOL *bObjectCreated);
+
+ static void ReadArray(
+ Assembly *pCtorAssembly,
+ CorSerializationType arrayType,
+ int size,
+ TypeHandle th,
+ BYTE **pBlob,
+ const BYTE *endBlob,
+ Module *pModule,
+ BASEARRAYREF *pArray);
+
+ static int GetStringSize(
+ BYTE **pBlob,
+ const BYTE *endBlob);
+
+ template < typename T >
+ static BOOL CopyArrayVAL(
+ BASEARRAYREF pArray,
+ int nElements,
+ BYTE **pBlob,
+ const BYTE *endBlob);
+};
+
+#endif
+
diff --git a/src/vm/custommarshalerinfo.cpp b/src/vm/custommarshalerinfo.cpp
new file mode 100644
index 0000000000..561b919023
--- /dev/null
+++ b/src/vm/custommarshalerinfo.cpp
@@ -0,0 +1,642 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: CustomMarshalerInfo.cpp
+//
+
+//
+// Custom marshaler information used when marshaling
+// a parameter with a custom marshaler.
+//
+
+
+#include "common.h"
+
+
+#include "custommarshalerinfo.h"
+#include "mlinfo.h"
+#include "mdaassistants.h"
+#include "sigbuilder.h"
+
+//==========================================================================
+// Implementation of the custom marshaler info class.
+//==========================================================================
+
+CustomMarshalerInfo::CustomMarshalerInfo(BaseDomain *pDomain, TypeHandle hndCustomMarshalerType, TypeHandle hndManagedType, LPCUTF8 strCookie, DWORD cCookieStrBytes)
+: m_NativeSize(0)
+, m_hndManagedType(hndManagedType)
+, m_hndCustomMarshaler(NULL)
+, m_pMarshalNativeToManagedMD(NULL)
+, m_pMarshalManagedToNativeMD(NULL)
+, m_pCleanUpNativeDataMD(NULL)
+, m_pCleanUpManagedDataMD(NULL)
+, m_bDataIsByValue(FALSE)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pDomain));
+ }
+ CONTRACTL_END;
+
+
+ // Make sure the custom marshaller implements ICustomMarshaler.
+ if (!hndCustomMarshalerType.GetMethodTable()->CanCastToNonVariantInterface(MscorlibBinder::GetClass(CLASS__ICUSTOM_MARSHALER)))
+ {
+ DefineFullyQualifiedNameForClassW()
+ COMPlusThrow(kApplicationException,
+ IDS_EE_ICUSTOMMARSHALERNOTIMPL,
+ GetFullyQualifiedNameForClassW(hndCustomMarshalerType.GetMethodTable()));
+ }
+
+ // Determine if this type is a value class.
+ m_bDataIsByValue = m_hndManagedType.GetMethodTable()->IsValueType();
+
+ // Custom marshalling of value classes is not currently supported.
+ if (m_bDataIsByValue)
+ COMPlusThrow(kNotSupportedException, W("NotSupported_ValueClassCM"));
+
+#ifndef CROSSGEN_COMPILE
+ // Run the <clinit> on the marshaler since it might not have run yet.
+ hndCustomMarshalerType.GetMethodTable()->EnsureInstanceActive();
+ hndCustomMarshalerType.GetMethodTable()->CheckRunClassInitThrowing();
+
+ // Create a COM+ string that will contain the string cookie.
+ STRINGREF CookieStringObj = StringObject::NewString(strCookie, cCookieStrBytes);
+ GCPROTECT_BEGIN(CookieStringObj);
+#endif
+
+ // Load the method desc's for all the methods in the ICustomMarshaler interface.
+ m_pMarshalNativeToManagedMD = GetCustomMarshalerMD(CustomMarshalerMethods_MarshalNativeToManaged, hndCustomMarshalerType);
+ m_pMarshalManagedToNativeMD = GetCustomMarshalerMD(CustomMarshalerMethods_MarshalManagedToNative, hndCustomMarshalerType);
+ m_pCleanUpNativeDataMD = GetCustomMarshalerMD(CustomMarshalerMethods_CleanUpNativeData, hndCustomMarshalerType);
+ m_pCleanUpManagedDataMD = GetCustomMarshalerMD(CustomMarshalerMethods_CleanUpManagedData, hndCustomMarshalerType);
+
+ // Load the method desc for the static method to retrieve the instance.
+ MethodDesc *pGetCustomMarshalerMD = GetCustomMarshalerMD(CustomMarshalerMethods_GetInstance, hndCustomMarshalerType);
+
+ // If the GetInstance method is generic, get an instantiating stub for it -
+ // the CallDescr infrastructure doesn't know how to pass secret generic arguments.
+ if (pGetCustomMarshalerMD->RequiresInstMethodTableArg())
+ {
+ pGetCustomMarshalerMD = MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pGetCustomMarshalerMD,
+ hndCustomMarshalerType.GetMethodTable(),
+ FALSE, // forceBoxedEntryPoint
+ Instantiation(), // methodInst
+ FALSE, // allowInstParam
+ FALSE); // forceRemotableMethod
+
+ _ASSERTE(!pGetCustomMarshalerMD->RequiresInstMethodTableArg());
+ }
+
+#ifndef CROSSGEN_COMPILE
+ MethodDescCallSite getCustomMarshaler(pGetCustomMarshalerMD, (OBJECTREF*)&CookieStringObj);
+
+ pGetCustomMarshalerMD->EnsureActive();
+
+ // Prepare the arguments that will be passed to GetCustomMarshaler.
+ ARG_SLOT GetCustomMarshalerArgs[] = {
+ ObjToArgSlot(CookieStringObj)
+ };
+
+ // Call the GetCustomMarshaler method to retrieve the custom marshaler to use.
+ OBJECTREF CustomMarshalerObj = getCustomMarshaler.Call_RetOBJECTREF(GetCustomMarshalerArgs);
+ if (!CustomMarshalerObj)
+ {
+ DefineFullyQualifiedNameForClassW()
+ COMPlusThrow(kApplicationException,
+ IDS_EE_NOCUSTOMMARSHALER,
+ GetFullyQualifiedNameForClassW(hndCustomMarshalerType.GetMethodTable()));
+ }
+ m_hndCustomMarshaler = pDomain->CreateHandle(CustomMarshalerObj);
+
+ // Retrieve the size of the native data.
+ if (m_bDataIsByValue)
+ {
+ // <TODO>@TODO(DM): Call GetNativeDataSize() to retrieve the size of the native data.</TODO>
+ _ASSERTE(!"Value classes are not yet supported by the custom marshaler!");
+ }
+ else
+ {
+ m_NativeSize = sizeof(void *);
+ }
+
+ GCPROTECT_END();
+#endif
+}
+
+
+CustomMarshalerInfo::~CustomMarshalerInfo()
+{
+ WRAPPER_NO_CONTRACT;
+#ifndef CROSSGEN_COMPILE
+ if (m_hndCustomMarshaler)
+ {
+ DestroyHandle(m_hndCustomMarshaler);
+ m_hndCustomMarshaler = NULL;
+ }
+#endif
+}
+
+
+void *CustomMarshalerInfo::operator new(size_t size, LoaderHeap *pHeap)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pHeap));
+ }
+ CONTRACTL_END;
+
+ return pHeap->AllocMem(S_SIZE_T(sizeof(CustomMarshalerInfo)));
+}
+
+
+void CustomMarshalerInfo::operator delete(void *pMem)
+{
+ // Instances of this class are always allocated on the loader heap so
+ // the delete operator has nothing to do.
+ LIMITED_METHOD_CONTRACT;
+}
+
+#ifndef CROSSGEN_COMPILE
+OBJECTREF CustomMarshalerInfo::InvokeMarshalNativeToManagedMeth(void *pNative)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNative, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ if (!pNative)
+ return NULL;
+
+ MethodDescCallSite marshalNativeToManaged(m_pMarshalNativeToManagedMD, m_hndCustomMarshaler);
+
+ ARG_SLOT Args[] = {
+ ObjToArgSlot(ObjectFromHandle(m_hndCustomMarshaler)),
+ PtrToArgSlot(pNative)
+ };
+
+ return marshalNativeToManaged.Call_RetOBJECTREF(Args);
+}
+
+
+void *CustomMarshalerInfo::InvokeMarshalManagedToNativeMeth(OBJECTREF MngObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ void *RetVal = NULL;
+
+ if (!MngObj)
+ return NULL;
+
+ GCPROTECT_BEGIN (MngObj);
+ MethodDescCallSite marshalManagedToNative(m_pMarshalManagedToNativeMD, m_hndCustomMarshaler);
+
+ ARG_SLOT Args[] = {
+ ObjToArgSlot(ObjectFromHandle(m_hndCustomMarshaler)),
+ ObjToArgSlot(MngObj)
+ };
+
+ RetVal = marshalManagedToNative.Call_RetLPVOID(Args);
+ GCPROTECT_END ();
+
+ return RetVal;
+}
+
+
+void CustomMarshalerInfo::InvokeCleanUpNativeMeth(void *pNative)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNative, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ if (!pNative)
+ return;
+
+ MethodDescCallSite cleanUpNativeData(m_pCleanUpNativeDataMD, m_hndCustomMarshaler);
+
+ ARG_SLOT Args[] = {
+ ObjToArgSlot(ObjectFromHandle(m_hndCustomMarshaler)),
+ PtrToArgSlot(pNative)
+ };
+
+ cleanUpNativeData.Call(Args);
+}
+
+
+void CustomMarshalerInfo::InvokeCleanUpManagedMeth(OBJECTREF MngObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (!MngObj)
+ return;
+
+ GCPROTECT_BEGIN (MngObj);
+ MethodDescCallSite cleanUpManagedData(m_pCleanUpManagedDataMD, m_hndCustomMarshaler);
+
+ ARG_SLOT Args[] = {
+ ObjToArgSlot(ObjectFromHandle(m_hndCustomMarshaler)),
+ ObjToArgSlot(MngObj)
+ };
+
+ cleanUpManagedData.Call(Args);
+ GCPROTECT_END ();
+}
+
+#endif // CROSSGEN_COMPILE
+MethodDesc *CustomMarshalerInfo::GetCustomMarshalerMD(EnumCustomMarshalerMethods Method, TypeHandle hndCustomMarshalertype)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+
+ MethodTable *pMT = hndCustomMarshalertype.AsMethodTable();
+
+ _ASSERTE(pMT->CanCastToNonVariantInterface(MscorlibBinder::GetClass(CLASS__ICUSTOM_MARSHALER)));
+
+ MethodDesc *pMD = NULL;
+
+ switch (Method)
+ {
+ case CustomMarshalerMethods_MarshalNativeToManaged:
+ pMD = pMT->GetMethodDescForInterfaceMethod(
+ MscorlibBinder::GetMethod(METHOD__ICUSTOM_MARSHALER__MARSHAL_NATIVE_TO_MANAGED));
+ break;
+ case CustomMarshalerMethods_MarshalManagedToNative:
+ pMD = pMT->GetMethodDescForInterfaceMethod(
+ MscorlibBinder::GetMethod(METHOD__ICUSTOM_MARSHALER__MARSHAL_MANAGED_TO_NATIVE));
+ break;
+ case CustomMarshalerMethods_CleanUpNativeData:
+ pMD = pMT->GetMethodDescForInterfaceMethod(
+ MscorlibBinder::GetMethod(METHOD__ICUSTOM_MARSHALER__CLEANUP_NATIVE_DATA));
+ break;
+
+ case CustomMarshalerMethods_CleanUpManagedData:
+ pMD = pMT->GetMethodDescForInterfaceMethod(
+ MscorlibBinder::GetMethod(METHOD__ICUSTOM_MARSHALER__CLEANUP_MANAGED_DATA));
+ break;
+ case CustomMarshalerMethods_GetNativeDataSize:
+ pMD = pMT->GetMethodDescForInterfaceMethod(
+ MscorlibBinder::GetMethod(METHOD__ICUSTOM_MARSHALER__GET_NATIVE_DATA_SIZE));
+ break;
+ case CustomMarshalerMethods_GetInstance:
+ // Must look this up by name since it's static
+ pMD = MemberLoader::FindMethod(pMT, "GetInstance", &gsig_SM_Str_RetICustomMarshaler);
+ if (!pMD)
+ {
+ DefineFullyQualifiedNameForClassW()
+ COMPlusThrow(kApplicationException,
+ IDS_EE_GETINSTANCENOTIMPL,
+ GetFullyQualifiedNameForClassW(pMT));
+ }
+ break;
+ default:
+ _ASSERTE(!"Unknown custom marshaler method");
+ }
+
+ _ASSERTE(pMD && "Unable to find specified CustomMarshaler method");
+
+ // Ensure that the value types in the signature are loaded.
+ MetaSig::EnsureSigValueTypesLoaded(pMD);
+
+ // Return the specified method desc.
+ return pMD;
+}
+
+#ifndef CROSSGEN_COMPILE
+
+//==========================================================================
+// Implementation of the custom marshaler hashtable helper.
+//==========================================================================
+
+EEHashEntry_t * EECMHelperHashtableHelper::AllocateEntry(EECMHelperHashtableKey *pKey, BOOL bDeepCopy, void* pHeap)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(return NULL;);
+ }
+ CONTRACTL_END;
+
+ EEHashEntry_t *pEntry;
+
+ if (bDeepCopy)
+ {
+ S_SIZE_T cbEntry = S_SIZE_T(sizeof(EEHashEntry) - 1 + sizeof(EECMHelperHashtableKey));
+ cbEntry += S_SIZE_T(pKey->GetMarshalerTypeNameByteCount());
+ cbEntry += S_SIZE_T(pKey->GetCookieStringByteCount());
+ cbEntry += S_SIZE_T(pKey->GetMarshalerInstantiation().GetNumArgs()) * S_SIZE_T(sizeof(LPVOID));
+
+ if (cbEntry.IsOverflow())
+ return NULL;
+
+ pEntry = (EEHashEntry_t *) new (nothrow) BYTE[cbEntry.Value()];
+ if (!pEntry)
+ return NULL;
+
+ EECMHelperHashtableKey *pEntryKey = (EECMHelperHashtableKey *) pEntry->Key;
+ pEntryKey->m_cMarshalerTypeNameBytes = pKey->GetMarshalerTypeNameByteCount();
+ pEntryKey->m_strMarshalerTypeName = (LPSTR) pEntry->Key + sizeof(EECMHelperHashtableKey);
+ pEntryKey->m_cCookieStrBytes = pKey->GetCookieStringByteCount();
+ pEntryKey->m_strCookie = (LPSTR) pEntry->Key + sizeof(EECMHelperHashtableKey) + pEntryKey->m_cMarshalerTypeNameBytes;
+ pEntryKey->m_Instantiation = Instantiation(
+ (TypeHandle *) (pEntryKey->m_strCookie + pEntryKey->m_cCookieStrBytes),
+ pKey->GetMarshalerInstantiation().GetNumArgs());
+ pEntryKey->m_bSharedHelper = pKey->IsSharedHelper();
+ memcpy((void*)pEntryKey->m_strMarshalerTypeName, pKey->GetMarshalerTypeName(), pKey->GetMarshalerTypeNameByteCount());
+ memcpy((void*)pEntryKey->m_strCookie, pKey->GetCookieString(), pKey->GetCookieStringByteCount());
+ memcpy((void*)pEntryKey->m_Instantiation.GetRawArgs(), pKey->GetMarshalerInstantiation().GetRawArgs(),
+ pEntryKey->m_Instantiation.GetNumArgs() * sizeof(LPVOID));
+ }
+ else
+ {
+ pEntry = (EEHashEntry_t *)
+ new (nothrow) BYTE[sizeof(EEHashEntry) - 1 + sizeof(EECMHelperHashtableKey)];
+ if (!pEntry)
+ return NULL;
+
+ EECMHelperHashtableKey *pEntryKey = (EECMHelperHashtableKey *) pEntry->Key;
+ pEntryKey->m_cMarshalerTypeNameBytes = pKey->GetMarshalerTypeNameByteCount();
+ pEntryKey->m_strMarshalerTypeName = pKey->GetMarshalerTypeName();
+ pEntryKey->m_cCookieStrBytes = pKey->GetCookieStringByteCount();
+ pEntryKey->m_strCookie = pKey->GetCookieString();
+ pEntryKey->m_Instantiation = Instantiation(pKey->GetMarshalerInstantiation());
+ pEntryKey->m_bSharedHelper = pKey->IsSharedHelper();
+ }
+
+ return pEntry;
+}
+
+
+void EECMHelperHashtableHelper::DeleteEntry(EEHashEntry_t *pEntry, void* pHeap)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pEntry));
+ }
+ CONTRACTL_END;
+
+ delete[] (BYTE*)pEntry;
+}
+
+
+BOOL EECMHelperHashtableHelper::CompareKeys(EEHashEntry_t *pEntry, EECMHelperHashtableKey *pKey)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pEntry));
+ PRECONDITION(CheckPointer(pKey));
+ }
+ CONTRACTL_END;
+
+ EECMHelperHashtableKey *pEntryKey = (EECMHelperHashtableKey *) pEntry->Key;
+
+ if (pEntryKey->IsSharedHelper() != pKey->IsSharedHelper())
+ return FALSE;
+
+ if (pEntryKey->GetMarshalerTypeNameByteCount() != pKey->GetMarshalerTypeNameByteCount())
+ return FALSE;
+
+ if (memcmp(pEntryKey->GetMarshalerTypeName(), pKey->GetMarshalerTypeName(), pEntryKey->GetMarshalerTypeNameByteCount()) != 0)
+ return FALSE;
+
+ if (pEntryKey->GetCookieStringByteCount() != pKey->GetCookieStringByteCount())
+ return FALSE;
+
+ if (memcmp(pEntryKey->GetCookieString(), pKey->GetCookieString(), pEntryKey->GetCookieStringByteCount()) != 0)
+ return FALSE;
+
+ DWORD dwNumTypeArgs = pEntryKey->GetMarshalerInstantiation().GetNumArgs();
+ if (dwNumTypeArgs != pKey->GetMarshalerInstantiation().GetNumArgs())
+ return FALSE;
+
+ for (DWORD i = 0; i < dwNumTypeArgs; i++)
+ {
+ if (pEntryKey->GetMarshalerInstantiation()[i] != pKey->GetMarshalerInstantiation()[i])
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+
+DWORD EECMHelperHashtableHelper::Hash(EECMHelperHashtableKey *pKey)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return (DWORD)
+ (HashBytes((const BYTE *) pKey->GetMarshalerTypeName(), pKey->GetMarshalerTypeNameByteCount()) +
+ HashBytes((const BYTE *) pKey->GetCookieString(), pKey->GetCookieStringByteCount()) +
+ HashBytes((const BYTE *) pKey->GetMarshalerInstantiation().GetRawArgs(), pKey->GetMarshalerInstantiation().GetNumArgs() * sizeof(LPVOID)) +
+ (pKey->IsSharedHelper() ? 1 : 0));
+}
+
+
+OBJECTREF CustomMarshalerHelper::InvokeMarshalNativeToManagedMeth(void *pNative)
+{
+ WRAPPER_NO_CONTRACT;
+ return GetCustomMarshalerInfo()->InvokeMarshalNativeToManagedMeth(pNative);
+}
+
+
+void *CustomMarshalerHelper::InvokeMarshalManagedToNativeMeth(OBJECTREF MngObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ void *RetVal = NULL;
+
+ GCPROTECT_BEGIN(MngObj)
+ {
+ CustomMarshalerInfo *pCMInfo = GetCustomMarshalerInfo();
+ RetVal = pCMInfo->InvokeMarshalManagedToNativeMeth(MngObj);
+ }
+ GCPROTECT_END();
+
+ return RetVal;
+}
+
+
+void CustomMarshalerHelper::InvokeCleanUpNativeMeth(void *pNative)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF ExceptionObj = NULL;
+ GCPROTECT_BEGIN(ExceptionObj)
+ {
+ EX_TRY
+ {
+ GetCustomMarshalerInfo()->InvokeCleanUpNativeMeth(pNative);
+ }
+ EX_CATCH
+ {
+ ExceptionObj = GET_THROWABLE();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+#ifdef MDA_SUPPORTED
+ if (ExceptionObj != NULL)
+ {
+ TypeHandle typeCustomMarshaler = GetCustomMarshalerInfo()->GetCustomMarshalerType();
+ MDA_TRIGGER_ASSISTANT(MarshalCleanupError, ReportErrorCustomMarshalerCleanup(typeCustomMarshaler, &ExceptionObj));
+ }
+#endif
+ }
+ GCPROTECT_END();
+}
+
+
+void CustomMarshalerHelper::InvokeCleanUpManagedMeth(OBJECTREF MngObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ GCPROTECT_BEGIN(MngObj)
+ {
+ CustomMarshalerInfo *pCMInfo = GetCustomMarshalerInfo();
+ pCMInfo->InvokeCleanUpManagedMeth(MngObj);
+ }
+ GCPROTECT_END();
+}
+
+
+void *NonSharedCustomMarshalerHelper::operator new(size_t size, LoaderHeap *pHeap)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pHeap));
+ }
+ CONTRACTL_END;
+
+ return pHeap->AllocMem(S_SIZE_T(sizeof(NonSharedCustomMarshalerHelper)));
+}
+
+
+void NonSharedCustomMarshalerHelper::operator delete(void *pMem)
+{
+ // Instances of this class are always allocated on the loader heap so
+ // the delete operator has nothing to do.
+ LIMITED_METHOD_CONTRACT;
+}
+
+
+SharedCustomMarshalerHelper::SharedCustomMarshalerHelper(Assembly *pAssembly, TypeHandle hndManagedType, LPCUTF8 strMarshalerTypeName, DWORD cMarshalerTypeNameBytes, LPCUTF8 strCookie, DWORD cCookieStrBytes)
+: m_pAssembly(pAssembly)
+, m_hndManagedType(hndManagedType)
+, m_cMarshalerTypeNameBytes(cMarshalerTypeNameBytes)
+, m_strMarshalerTypeName(strMarshalerTypeName)
+, m_cCookieStrBytes(cCookieStrBytes)
+, m_strCookie(strCookie)
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+
+void *SharedCustomMarshalerHelper::operator new(size_t size, LoaderHeap *pHeap)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pHeap));
+ }
+ CONTRACTL_END;
+
+ return pHeap->AllocMem(S_SIZE_T(sizeof(SharedCustomMarshalerHelper)));
+}
+
+
+void SharedCustomMarshalerHelper::operator delete(void *pMem)
+{
+ // Instances of this class are always allocated on the loader heap so
+ // the delete operator has nothing to do.
+ LIMITED_METHOD_CONTRACT;
+}
+
+
+CustomMarshalerInfo *SharedCustomMarshalerHelper::GetCustomMarshalerInfo()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // Retrieve the marshalling data for the current app domain.
+ EEMarshalingData *pMarshalingData = GetThread()->GetDomain()->GetMarshalingData();
+
+ // Retrieve the custom marshaling information for the current shared custom
+ // marshaling helper.
+ return pMarshalingData->GetCustomMarshalerInfo(this);
+}
+
+
+#endif // CROSSGEN_COMPILE
+
diff --git a/src/vm/custommarshalerinfo.h b/src/vm/custommarshalerinfo.h
new file mode 100644
index 0000000000..58e7b796ae
--- /dev/null
+++ b/src/vm/custommarshalerinfo.h
@@ -0,0 +1,321 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: CustomMarshalerInfo.h
+//
+
+//
+// Custom marshaler information used when marshaling
+// a parameter with a custom marshaler.
+//
+
+
+#ifndef _CUSTOMMARSHALERINFO_H_
+#define _CUSTOMMARSHALERINFO_H_
+
+
+#include "vars.hpp"
+#include "slist.h"
+
+
+// This enumeration is used to retrieve a method desc from CustomMarshalerInfo::GetCustomMarshalerMD().
+enum EnumCustomMarshalerMethods
+{
+ CustomMarshalerMethods_MarshalNativeToManaged = 0,
+ CustomMarshalerMethods_MarshalManagedToNative,
+ CustomMarshalerMethods_CleanUpNativeData,
+ CustomMarshalerMethods_CleanUpManagedData,
+ CustomMarshalerMethods_GetNativeDataSize,
+ CustomMarshalerMethods_GetInstance,
+ CustomMarshalerMethods_LastMember
+};
+
+
+class CustomMarshalerInfo
+{
+public:
+ // Constructor and destructor.
+ CustomMarshalerInfo(BaseDomain* pDomain, TypeHandle hndCustomMarshalerType, TypeHandle hndManagedType, LPCUTF8 strCookie, DWORD cCookieStrBytes);
+ ~CustomMarshalerInfo();
+
+ // CustomMarshalerInfo's are always allocated on the loader heap so we need to redefine
+ // the new and delete operators to ensure this.
+ void* operator new(size_t size, LoaderHeap* pHeap);
+ void operator delete(void* pMem);
+
+ // Helpers used to invoke the different methods in the ICustomMarshaler interface.
+ OBJECTREF InvokeMarshalNativeToManagedMeth(void* pNative);
+ void* InvokeMarshalManagedToNativeMeth(OBJECTREF MngObj);
+ void InvokeCleanUpNativeMeth(void* pNative);
+ void InvokeCleanUpManagedMeth(OBJECTREF MngObj);
+
+ // Accessors.
+ int GetNativeSize()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_NativeSize;
+ }
+
+ int GetManagedSize()
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_hndManagedType.GetSize();
+ }
+
+ TypeHandle GetManagedType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_hndManagedType;
+ }
+
+ BOOL IsDataByValue()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_bDataIsByValue;
+ }
+
+ OBJECTHANDLE GetCustomMarshaler()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_hndCustomMarshaler;
+ }
+
+ TypeHandle GetCustomMarshalerType()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ return ObjectFromHandle(m_hndCustomMarshaler)->GetTypeHandle();
+ }
+
+ // Helper function to retrieve a custom marshaler method desc.
+ static MethodDesc* GetCustomMarshalerMD(EnumCustomMarshalerMethods Method, TypeHandle hndCustomMarshalertype);
+
+ // Link used to contain this CM info in a linked list.
+ SLink m_Link;
+
+private:
+ int m_NativeSize;
+ TypeHandle m_hndManagedType;
+ OBJECTHANDLE m_hndCustomMarshaler;
+ MethodDesc* m_pMarshalNativeToManagedMD;
+ MethodDesc* m_pMarshalManagedToNativeMD;
+ MethodDesc* m_pCleanUpNativeDataMD;
+ MethodDesc* m_pCleanUpManagedDataMD;
+ BOOL m_bDataIsByValue;
+};
+
+
+typedef SList<CustomMarshalerInfo, true> CMINFOLIST;
+
+
+class EECMHelperHashtableKey
+{
+public:
+ EECMHelperHashtableKey(DWORD cMarshalerTypeNameBytes, LPCSTR strMarshalerTypeName, DWORD cCookieStrBytes, LPCSTR strCookie, Instantiation instantiation, BOOL bSharedHelper)
+ : m_cMarshalerTypeNameBytes(cMarshalerTypeNameBytes)
+ , m_strMarshalerTypeName(strMarshalerTypeName)
+ , m_cCookieStrBytes(cCookieStrBytes)
+ , m_strCookie(strCookie)
+ , m_Instantiation(instantiation)
+ , m_bSharedHelper(bSharedHelper)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ inline DWORD GetMarshalerTypeNameByteCount() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_cMarshalerTypeNameBytes;
+ }
+ inline LPCSTR GetMarshalerTypeName() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_strMarshalerTypeName;
+ }
+ inline LPCSTR GetCookieString() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_strCookie;
+ }
+ inline ULONG GetCookieStringByteCount() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_cCookieStrBytes;
+ }
+ inline Instantiation GetMarshalerInstantiation() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_Instantiation;
+ }
+ inline BOOL IsSharedHelper() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_bSharedHelper;
+ }
+
+
+ DWORD m_cMarshalerTypeNameBytes;
+ LPCSTR m_strMarshalerTypeName;
+ DWORD m_cCookieStrBytes;
+ LPCSTR m_strCookie;
+ Instantiation m_Instantiation;
+ BOOL m_bSharedHelper;
+};
+
+
+class EECMHelperHashtableHelper
+{
+public:
+ static EEHashEntry_t* AllocateEntry(EECMHelperHashtableKey* pKey, BOOL bDeepCopy, AllocationHeap Heap);
+ static void DeleteEntry(EEHashEntry_t* pEntry, AllocationHeap Heap);
+ static BOOL CompareKeys(EEHashEntry_t* pEntry, EECMHelperHashtableKey* pKey);
+ static DWORD Hash(EECMHelperHashtableKey* pKey);
+};
+
+
+typedef EEHashTable<EECMHelperHashtableKey*, EECMHelperHashtableHelper, TRUE> EECMHelperHashTable;
+
+
+class CustomMarshalerHelper
+{
+public:
+ // Helpers used to invoke the different methods in the ICustomMarshaler interface.
+ OBJECTREF InvokeMarshalNativeToManagedMeth(void* pNative);
+ void* InvokeMarshalManagedToNativeMeth(OBJECTREF MngObj);
+ void InvokeCleanUpNativeMeth(void* pNative);
+ void InvokeCleanUpManagedMeth(OBJECTREF MngObj);
+
+ // Accessors.
+ int GetNativeSize()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetCustomMarshalerInfo()->GetNativeSize();
+ }
+
+ int GetManagedSize()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetCustomMarshalerInfo()->GetManagedSize();
+ }
+
+ TypeHandle GetManagedType()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetCustomMarshalerInfo()->GetManagedType();
+ }
+
+ BOOL IsDataByValue()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetCustomMarshalerInfo()->IsDataByValue();
+ }
+
+ // Helper function to retrieve the custom marshaler object.
+ virtual CustomMarshalerInfo* GetCustomMarshalerInfo() = 0;
+
+protected:
+ ~CustomMarshalerHelper( void )
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+};
+
+
+class NonSharedCustomMarshalerHelper : public CustomMarshalerHelper
+{
+public:
+ // Constructor.
+ NonSharedCustomMarshalerHelper(CustomMarshalerInfo* pCMInfo) : m_pCMInfo(pCMInfo)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ // CustomMarshalerHelpers's are always allocated on the loader heap so we need to redefine
+ // the new and delete operators to ensure this.
+ void *operator new(size_t size, LoaderHeap *pHeap);
+ void operator delete(void* pMem);
+
+protected:
+ // Helper function to retrieve the custom marshaler object.
+ virtual CustomMarshalerInfo* GetCustomMarshalerInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pCMInfo;
+ }
+
+private:
+ CustomMarshalerInfo* m_pCMInfo;
+};
+
+
+class SharedCustomMarshalerHelper : public CustomMarshalerHelper
+{
+public:
+ // Constructor.
+ SharedCustomMarshalerHelper(Assembly* pAssembly, TypeHandle hndManagedType, LPCUTF8 strMarshalerTypeName, DWORD cMarshalerTypeNameBytes, LPCUTF8 strCookie, DWORD cCookieStrBytes);
+
+ // CustomMarshalerHelpers's are always allocated on the loader heap so we need to redefine
+ // the new and delete operators to ensure this.
+ void* operator new(size_t size, LoaderHeap* pHeap);
+ void operator delete(void* pMem);
+
+ // Accessors.
+ inline Assembly* GetAssembly()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pAssembly;
+ }
+
+ inline TypeHandle GetManagedType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_hndManagedType;
+ }
+
+ inline DWORD GetMarshalerTypeNameByteCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_cMarshalerTypeNameBytes;
+ }
+
+ inline LPCSTR GetMarshalerTypeName()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_strMarshalerTypeName;
+ }
+
+ inline LPCSTR GetCookieString()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_strCookie;
+ }
+
+ inline ULONG GetCookieStringByteCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_cCookieStrBytes;
+ }
+
+protected:
+ // Helper function to retrieve the custom marshaler object.
+ virtual CustomMarshalerInfo* GetCustomMarshalerInfo();
+
+private:
+ Assembly* m_pAssembly;
+ TypeHandle m_hndManagedType;
+ DWORD m_cMarshalerTypeNameBytes;
+ LPCUTF8 m_strMarshalerTypeName;
+ DWORD m_cCookieStrBytes;
+ LPCUTF8 m_strCookie;
+};
+
+
+#endif // _CUSTOMMARSHALERINFO_H_
diff --git a/src/vm/dac/.gitmirror b/src/vm/dac/.gitmirror
new file mode 100644
index 0000000000..f507630f94
--- /dev/null
+++ b/src/vm/dac/.gitmirror
@@ -0,0 +1 @@
+Only contents of this folder, excluding subfolders, will be mirrored by the Git-TFS Mirror. \ No newline at end of file
diff --git a/src/vm/dac/CMakeLists.txt b/src/vm/dac/CMakeLists.txt
new file mode 100644
index 0000000000..987b3f5695
--- /dev/null
+++ b/src/vm/dac/CMakeLists.txt
@@ -0,0 +1,4 @@
+
+include(${CLR_DIR}/dac.cmake)
+
+add_library(cee_dac ${VM_SOURCES_DAC}) \ No newline at end of file
diff --git a/src/vm/dac/dacwks.targets b/src/vm/dac/dacwks.targets
new file mode 100644
index 0000000000..fc4883923f
--- /dev/null
+++ b/src/vm/dac/dacwks.targets
@@ -0,0 +1,166 @@
+<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+
+ <PropertyGroup>
+ <OutputName Condition="'$(OutputName)' == ''">cee_wks_dac</OutputName>
+ <AsmDefines>$(AsmDefines) $(CDefines)</AsmDefines>
+ </PropertyGroup>
+
+ <ItemGroup>
+ <CppCompile Include="$(ClrSrcDirectory)\vm\AppDomain.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\array.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\assembly.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\binder.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\ceeload.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\certificatecache.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\class.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\classhash.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\clsload.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\codeman.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\COMDelegate.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\contexts.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\contractimpl.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\corhost.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\crst.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\decodeMD.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\debugdebugger.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\debugHelp.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\debuginfostore.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\DllImport.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\domainfile.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\dynamicmethod.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\ecall.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\EEDbgInterfaceImpl.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\eehash.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\EETwain.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\excep.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\ExState.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\field.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\formattype.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\fptrstubs.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\frames.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\GCDecode.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\genericdict.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\generics.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\hash.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\ILStubCache.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\ILStubResolver.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\instmethhash.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\inlinetracking.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\JITinterface.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\LoaderAllocator.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\memberload.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\method.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\methodimpl.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\methoditer.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\methodtable.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\object.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\pefile.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\peimage.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\peimagelayout.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\prestub.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\precode.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\rejit.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\ReadyToRunInfo.cpp" Condition="'$(FeatureReadyToRun)' == 'true'" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\remoting.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\SecurityDescriptor.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\SecurityDescriptorAssembly.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\siginfo.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\sigformat.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\stackwalk.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\stublink.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\stubmgr.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\syncblk.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\threads.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\ThreadDebugBlockingInfo.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\typedesc.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\typectxt.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\typehandle.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\typehash.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\typeString.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\util.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\vars.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\VirtualCallStub.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\ThreadPoolRequest.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\ThreadStatics.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\win32threadpool.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\hillclimbing.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\zapsig.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\BaseAssemblySpec.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\corebindresult.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\coreassemblyspec.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\CLRPrivBinderAppX.cpp" Condition="'$(FeatureAppXBinder)' == 'true'" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\CLRPrivBinderWinRT.cpp" Condition="'$(FeatureCominterop)' == 'true'" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\CLRPrivTypeCacheWinRT.cpp" Condition="'$(FeatureCominterop)' == 'true'" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\umthunkhash.cpp" Condition="'$(FeatureMixedMode)' == 'true'" />
+ </ItemGroup>
+
+ <ItemGroup>
+ <CppCompile Include="$(ClrSrcDirectory)\gc\gccommon.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\gc\gceesvr.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\gc\gceewks.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\gc\gcsvr.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\gc\gcwks.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\gc\gcscan.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\gc\handletable.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\gc\handletablecore.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\gc\handletablescan.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\gc\objecthandle.cpp" />
+ </ItemGroup>
+
+ <ItemGroup Condition="'$(FeatureCoreclr)' != 'true'" >
+ <CppCompile Include="$(ClrSrcDirectory)\usagelog\AssemblyUsageLogger.cpp" />
+ </ItemGroup>
+
+ <ItemGroup >
+ <CppCompile Include="$(ClrSrcDirectory)\vm\EnCEE.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\mdadac.cpp" Condition="'$(FeatureMdaSupported)' == 'true'"/>
+ </ItemGroup>
+
+ <ItemGroup Condition="'$(FeatureCominterop)' == 'true'">
+ <CppCompile Include="$(ClrSrcDirectory)\vm\COMtoCLRCall.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\CLRtoCOMCall.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\rcwwalker.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\WinRTTypeNameConverter.cpp" />
+ </ItemGroup>
+
+ <ItemGroup Condition="'$(TargetArch)' == 'amd64'" >
+ <CppCompile Include="$(ClrSrcDirectory)\vm\GcInfoDecoder.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\DbgGcInfoDecoder.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\amd64\cGenAMD64.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\amd64\ExcepAMD64.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\amd64\gmsAMD64.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\amd64\stublinkerAMD64.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\exceptionhandling.cpp" />
+ </ItemGroup>
+
+ <ItemGroup Condition="'$(TargetArch)' == 'i386'" >
+ <CppCompile Include="$(ClrSrcDirectory)\vm\i386\cgenx86.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\i386\ExcepX86.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\i386\gmsX86.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\i386\stublinkerx86.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\exinfo.cpp" />
+ </ItemGroup>
+
+ <ItemGroup Condition="'$(TargetArch)' == 'arm'" >
+ <CppCompile Include="$(ClrSrcDirectory)\vm\GcInfoDecoder.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\arm\stubs.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\arm\armsinglestepper.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\exceptionhandling.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\arm\ExcepARM.cpp" />
+ </ItemGroup>
+
+ <ItemGroup Condition="'$(TargetArch)' == 'arm64'" >
+ <CppCompile Include="$(ClrSrcDirectory)\vm\arm64\stubs.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\exceptionhandling.cpp" />
+ </ItemGroup>
+
+ <ItemGroup>
+ <RotorX86Sources Include="$(ClrSrcDirectory)\vm\rotor_x86\cgenx86.cpp" />
+ <RotorX86Sources Include="$(ClrSrcDirectory)\vm\rotor_x86\gmsx86.cpp" />
+ <RotorX86Sources Include="$(ClrSrcDirectory)\vm\rotor_x86\stublinkerx86.cpp" />
+ <RotorX86Sources Include="$(ClrSrcDirectory)\vm\exinfo.cpp" />
+ </ItemGroup>
+
+ <Import Project="$(_NTDRIVE)$(_NTROOT)\ndp\clr\src\vm\vm.targets" />
+
+</Project>
diff --git a/src/vm/dac/dirs.proj b/src/vm/dac/dirs.proj
new file mode 100644
index 0000000000..244881aabe
--- /dev/null
+++ b/src/vm/dac/dirs.proj
@@ -0,0 +1,18 @@
+<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <!--Import the settings-->
+ <Import Project="$(_NTDRIVE)$(_NTROOT)\ndp\clr\clr.props" />
+
+ <PropertyGroup>
+ <BuildInPhase1>true</BuildInPhase1>
+ <BuildInPhaseDefault>false</BuildInPhaseDefault>
+ <BuildCoreBinaries>true</BuildCoreBinaries>
+ <BuildSysBinaries>true</BuildSysBinaries>
+ </PropertyGroup>
+
+ <!--The following projects will build during PHASE 1-->
+ <ItemGroup Condition="'$(BuildExePhase)' == '1'">
+ <ProjectFile Include="HostLocal\dacwks.nativeproj" />
+ </ItemGroup>
+
+ <Import Project="$(_NTDRIVE)$(_NTROOT)\tools\Microsoft.DevDiv.Traversal.targets" />
+</Project>
diff --git a/src/vm/dangerousapis.h b/src/vm/dangerousapis.h
new file mode 100644
index 0000000000..e428e28f62
--- /dev/null
+++ b/src/vm/dangerousapis.h
@@ -0,0 +1,72 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+////////////////////////////////////////////////////////////////////////////////
+// This header file defines the list of dangerous APIs and
+// is used by InvokeUtil::IsDangerousMethod.
+// Dangerous APIs are the APIs that make security decisions based on the result
+// of a stack walk. When these APIs are invoked through reflection or delegate
+// the stack walker can be easily confused, resulting in security holes.
+////////////////////////////////////////////////////////////////////////////////
+
+#ifndef API_NAMES
+#define API_NAMES(...) __VA_ARGS__
+#endif // !API_NAMES
+
+// ToString is never dangerous but we include it on the Runtime*Info types because of a JScript.Net compat issue.
+// JScript.Net tries to invoke these ToString APIs when a Runtime*Info object is compared to another object of a different type (e.g. a string).
+// This used to cause a SecurityException in partial trust (which JScript catches) because the API was considered dangerous.
+// Now this causes a MethodAccessException in partial trust because the API is inaccessible. So we add them back to the "dangerous" API
+// list to maintain compatibility. See Devdiv bug 419443 for details.
+DEFINE_DANGEROUS_API(APP_DOMAIN, API_NAMES("CreateInstance", "CreateComInstanceFrom", "CreateInstanceAndUnwrap", "CreateInstanceFrom", "CreateInstanceFromAndUnwrap ", "DefineDynamicAssembly", "Load"))
+DEFINE_DANGEROUS_API(ASSEMBLYBASE, API_NAMES("CreateInstance", "Load"))
+DEFINE_DANGEROUS_API(ASSEMBLY, API_NAMES("CreateInstance", "Load"))
+DEFINE_DANGEROUS_API(ASSEMBLY_BUILDER, API_NAMES("CreateInstance", "DefineDynamicAssembly", "DefineDynamicModule"))
+DEFINE_DANGEROUS_API(INTERNAL_ASSEMBLY_BUILDER, API_NAMES("CreateInstance"))
+DEFINE_DANGEROUS_API(METHOD_BASE, API_NAMES("Invoke"))
+DEFINE_DANGEROUS_API(CONSTRUCTOR_INFO, API_NAMES("Invoke", \
+ "System.Runtime.InteropServices._ConstructorInfo.Invoke_2", \
+ "System.Runtime.InteropServices._ConstructorInfo.Invoke_3", \
+ "System.Runtime.InteropServices._ConstructorInfo.Invoke_4", \
+ "System.Runtime.InteropServices._ConstructorInfo.Invoke_5"))
+DEFINE_DANGEROUS_API(CONSTRUCTOR, API_NAMES("Invoke", "ToString"))
+DEFINE_DANGEROUS_API(METHOD_INFO, API_NAMES("CreateDelegate", "Invoke"))
+DEFINE_DANGEROUS_API(METHOD, API_NAMES("CreateDelegate", "Invoke", "ToString"))
+DEFINE_DANGEROUS_API(DYNAMICMETHOD, API_NAMES("CreateDelegate", "Invoke", ".ctor"))
+DEFINE_DANGEROUS_API(TYPE, API_NAMES("InvokeMember"))
+DEFINE_DANGEROUS_API(CLASS, API_NAMES("InvokeMember", "ToString"))
+DEFINE_DANGEROUS_API(TYPE_DELEGATOR, API_NAMES("InvokeMember"))
+DEFINE_DANGEROUS_API(RT_FIELD_INFO, API_NAMES("GetValue", "SetValue", "ToString"))
+DEFINE_DANGEROUS_API(FIELD_INFO, API_NAMES("GetValue", "SetValue"))
+DEFINE_DANGEROUS_API(FIELD, API_NAMES("GetValue", "SetValue", "ToString"))
+DEFINE_DANGEROUS_API(PROPERTY_INFO, API_NAMES("GetValue", "SetValue"))
+DEFINE_DANGEROUS_API(PROPERTY, API_NAMES("GetValue", "SetValue", "ToString"))
+DEFINE_DANGEROUS_API(EVENT_INFO, API_NAMES("AddEventHandler", "RemoveEventHandler"))
+DEFINE_DANGEROUS_API(EVENT, API_NAMES("AddEventHandler", "RemoveEventHandler", "ToString"))
+DEFINE_DANGEROUS_API(RESOURCE_MANAGER, API_NAMES("GetResourceSet", "InternalGetResourceSet", ".ctor"))
+
+#if defined(FEATURE_COMINTEROP) && !defined(FEATURE_CORECLR)
+// The COM interfaces implemented by the reflection types.
+// The IDispatch Invoke methods are not included here because they are not implemented in mscorlib.
+DEFINE_DANGEROUS_API(ITYPE, API_NAMES("InvokeMember"))
+DEFINE_DANGEROUS_API(IASSEMBLY, API_NAMES("CreateInstance"))
+DEFINE_DANGEROUS_API(IMETHODBASE, API_NAMES("Invoke"))
+DEFINE_DANGEROUS_API(IMETHODINFO, API_NAMES("Invoke"))
+DEFINE_DANGEROUS_API(ICONSTRUCTORINFO, API_NAMES("Invoke", "Invoke_2", "Invoke_3", "Invoke_4", "Invoke_5"))
+DEFINE_DANGEROUS_API(IFIELDINFO, API_NAMES("GetValue", "SetValue"))
+DEFINE_DANGEROUS_API(IPROPERTYINFO, API_NAMES("GetValue", "SetValue"))
+DEFINE_DANGEROUS_API(IEVENTINFO, API_NAMES("AddEventHandler", "RemoveEventHandler"))
+DEFINE_DANGEROUS_API(IAPPDOMAIN, API_NAMES("CreateInstance", "CreateInstanceFrom", "DefineDynamicAssembly", "Load"))
+DEFINE_DANGEROUS_API(IREFLECT, API_NAMES("InvokeMember"))
+#endif // FEATURE_COMINTEROP && !FEATURE_CORECLR
+
+
+
+
+
+
+
diff --git a/src/vm/dataimage.cpp b/src/vm/dataimage.cpp
new file mode 100644
index 0000000000..946ffaf91e
--- /dev/null
+++ b/src/vm/dataimage.cpp
@@ -0,0 +1,2576 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#include "common.h"
+
+#ifdef FEATURE_PREJIT
+
+#include "dataimage.h"
+#ifdef BINDER
+#include "mdilmodule.h"
+#else // BINDER
+#include "compile.h"
+
+#include "field.h"
+#include "constrainedexecutionregion.h"
+#endif // BINDER
+//
+// Include Zapper infrastructure here
+//
+// dataimage.cpp is the only place where Zapper infrasture should be used directly in the VM.
+// The rest of the VM should never use Zapper infrastructure directly for good layering.
+// The long term goal is to move all NGen specific parts like Save and Fixup methods out of the VM,
+// and remove the dataimage.cpp completely.
+//
+#include "zapper.h"
+#ifdef BINDER
+#include "zapwriter.h"
+#include "zapimage.h"
+#include "zapimport.h"
+#else
+#include "../zap/zapwriter.h"
+#include "../zap/zapimage.h"
+#include "../zap/zapimport.h"
+#endif // BINDER
+#include "inlinetracking.h"
+
+#define NodeTypeForItemKind(kind) ((ZapNodeType)(ZapNodeType_StoredStructure + kind))
+
+class ZapStoredStructure : public ZapNode
+{
+ DWORD m_dwSize;
+ BYTE m_kind;
+ BYTE m_align;
+
+public:
+ ZapStoredStructure(DWORD dwSize, BYTE kind, BYTE align)
+ : m_dwSize(dwSize), m_kind(kind), m_align(align)
+ {
+ }
+
+ void * GetData()
+ {
+ return this + 1;
+ }
+
+ DataImage::ItemKind GetKind()
+ {
+ return (DataImage::ItemKind)m_kind;
+ }
+
+ virtual DWORD GetSize()
+ {
+ return m_dwSize;
+ }
+
+ virtual UINT GetAlignment()
+ {
+ return m_align;
+ }
+
+ virtual ZapNodeType GetType()
+ {
+ return NodeTypeForItemKind(m_kind);
+ }
+
+ virtual void Save(ZapWriter * pZapWriter);
+};
+
+inline ZapStoredStructure * AsStoredStructure(ZapNode * pNode)
+{
+ // Verify that it is one of the StoredStructure subtypes
+ _ASSERTE(pNode->GetType() >= ZapNodeType_StoredStructure);
+ return (ZapStoredStructure *)pNode;
+}
+
+struct InternedStructureKey
+{
+ InternedStructureKey(const void * data, DWORD dwSize, DataImage::ItemKind kind)
+ : m_data(data), m_dwSize(dwSize), m_kind(kind)
+ {
+ }
+
+ const void *m_data;
+ DWORD m_dwSize;
+ DataImage::ItemKind m_kind;
+};
+
+class InternedStructureTraits : public NoRemoveSHashTraits< DefaultSHashTraits<ZapStoredStructure *> >
+{
+public:
+ typedef InternedStructureKey key_t;
+
+ static key_t GetKey(element_t e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return InternedStructureKey(e->GetData(), e->GetSize(), e->GetKind());
+ }
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (k1.m_dwSize == k2.m_dwSize) &&
+ (k1.m_kind == k2.m_kind) &&
+ memcmp(k1.m_data, k2.m_data, k1.m_dwSize) == 0;
+ }
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (count_t)k.m_dwSize ^ (count_t)k.m_kind ^ HashBytes((BYTE *)k.m_data, k.m_dwSize);
+ }
+
+ static const element_t Null() { LIMITED_METHOD_CONTRACT; return NULL; }
+ static bool IsNull(const element_t &e) { LIMITED_METHOD_CONTRACT; return e == NULL; }
+};
+
+#ifdef BINDER
+DataImage::DataImage(Module *module, ZapImage *pZapImage)
+ : m_module(module),
+#else // BINDER
+DataImage::DataImage(Module *module, CEEPreloader *preloader)
+ : m_module(module),
+ m_preloader(preloader),
+#endif
+ m_iCurrentFixup(0), // Dev11 bug 181494 instrumentation
+ m_pInternedStructures(NULL),
+ m_pCurrentAssociatedMethodTable(NULL)
+{
+#ifdef BINDER
+ m_pZapImage = pZapImage;
+#else // BINDER
+ m_pZapImage = m_preloader->GetDataStore()->GetZapImage();
+#endif // BINDER
+ m_pZapImage->m_pDataImage = this;
+
+ m_pInternedStructures = new InternedStructureHashTable();
+
+#ifdef FEATURE_CORECLR
+ m_inlineTrackingMap = NULL;
+#else
+ m_inlineTrackingMap = new InlineTrackingMap();
+#endif
+}
+
+DataImage::~DataImage()
+{
+ delete m_pInternedStructures;
+ delete m_inlineTrackingMap;
+}
+
+void DataImage::PreSave()
+{
+#ifndef ZAP_HASHTABLE_TUNING
+ Preallocate();
+#endif
+}
+
+void DataImage::PostSave()
+{
+#ifdef ZAP_HASHTABLE_TUNING
+ // If ZAP_HASHTABLE_TUNING is defined, preallocate is overloaded to print the tunning constants
+ Preallocate();
+#endif
+}
+
+#ifndef BINDER
+DWORD DataImage::GetMethodProfilingFlags(MethodDesc * pMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ // We are not differentiating unboxing stubs vs. normal method descs in IBC data yet
+ if (pMD->IsUnboxingStub())
+ pMD = pMD->GetWrappedMethodDesc();
+
+ const MethodProfilingData * pData = m_methodProfilingData.LookupPtr(pMD);
+ return (pData != NULL) ? pData->flags : 0;
+}
+#endif
+
+void DataImage::SetMethodProfilingFlags(MethodDesc * pMD, DWORD flags)
+{
+ STANDARD_VM_CONTRACT;
+
+ const MethodProfilingData * pData = m_methodProfilingData.LookupPtr(pMD);
+ if (pData != NULL)
+ {
+ const_cast<MethodProfilingData *>(pData)->flags |= flags;
+ return;
+ }
+
+ MethodProfilingData data;
+ data.pMD = pMD;
+ data.flags = flags;
+ m_methodProfilingData.Add(data);
+}
+
+void DataImage::Preallocate()
+{
+ STANDARD_VM_CONTRACT;
+
+#ifndef BINDER
+ // TODO: Move to ZapImage
+
+ PEDecoder pe((void *)m_module->GetFile()->GetManagedFileContents());
+
+ COUNT_T cbILImage = pe.GetSize();
+
+ // Curb the estimate to handle corner cases gracefuly
+ cbILImage = min(cbILImage, 50000000);
+
+ PREALLOCATE_HASHTABLE(DataImage::m_structures, 0.019, cbILImage);
+ PREALLOCATE_ARRAY(DataImage::m_structuresInOrder, 0.0088, cbILImage);
+ PREALLOCATE_ARRAY(DataImage::m_Fixups, 0.046, cbILImage);
+ PREALLOCATE_HASHTABLE(DataImage::m_surrogates, 0.0025, cbILImage);
+ PREALLOCATE_HASHTABLE((*DataImage::m_pInternedStructures), 0.0007, cbILImage);
+#endif
+}
+
+ZapHeap * DataImage::GetHeap()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pZapImage->GetHeap();
+}
+
+void DataImage::AddStructureInOrder(ZapNode *pNode, BOOL fMaintainSaveOrder /*=FALSE*/)
+{
+ WRAPPER_NO_CONTRACT;
+
+ SavedNodeEntry entry;
+ entry.pNode = pNode;
+ entry.dwAssociatedOrder = 0;
+
+ if (fMaintainSaveOrder)
+ {
+ entry.dwAssociatedOrder = MAINTAIN_SAVE_ORDER;
+ }
+ else if (m_pCurrentAssociatedMethodTable)
+ {
+ TypeHandle th = TypeHandle(m_pCurrentAssociatedMethodTable);
+ entry.dwAssociatedOrder = m_pZapImage->LookupClassLayoutOrder(CORINFO_CLASS_HANDLE(th.AsPtr()));
+ }
+
+ m_structuresInOrder.Append(entry);
+}
+
+ZapStoredStructure * DataImage::StoreStructureHelper(const void *data, SIZE_T size,
+ DataImage::ItemKind kind,
+ int align,
+ BOOL fMaintainSaveOrder)
+{
+ STANDARD_VM_CONTRACT;
+
+ S_SIZE_T cbAllocSize = S_SIZE_T(sizeof(ZapStoredStructure)) + S_SIZE_T(size);
+ if(cbAllocSize.IsOverflow())
+ ThrowHR(COR_E_OVERFLOW);
+
+ void * pMemory = new (GetHeap()) BYTE[cbAllocSize.Value()];
+
+ // PE files cannot be larger than 4 GB
+ if (DWORD(size) != size)
+ ThrowHR(E_UNEXPECTED);
+
+ ZapStoredStructure * pStructure = new (pMemory) ZapStoredStructure((DWORD)size, static_cast<BYTE>(kind), static_cast<BYTE>(align));
+
+ if (data != NULL)
+ {
+ CopyMemory(pStructure->GetData(), data, size);
+ BindPointer(data, pStructure, 0);
+ }
+
+ m_pLastLookup = NULL;
+
+ AddStructureInOrder(pStructure, fMaintainSaveOrder);
+
+ return pStructure;
+}
+
+// Bind pointer to the relative offset in ZapNode
+void DataImage::BindPointer(const void *p, ZapNode * pNode, SSIZE_T offset)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(m_structures.LookupPtr(p) == NULL);
+
+ StructureEntry e;
+ e.ptr = p;
+ e.pNode = pNode;
+ e.offset = offset;
+ m_structures.Add(e);
+
+ m_pLastLookup = NULL;
+}
+
+void DataImage::CopyData(ZapStoredStructure * pNode, const void * p, ULONG size)
+{
+ memcpy(pNode->GetData(), p, size);
+}
+
+void DataImage::CopyDataToOffset(ZapStoredStructure * pNode, ULONG offset, const void * p, ULONG size)
+{
+ SIZE_T target = (SIZE_T) (pNode->GetData());
+ target += offset;
+
+ memcpy((void *) target, p, size);
+}
+
+void DataImage::PlaceStructureForAddress(const void * data, CorCompileSection section)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (data == NULL)
+ return;
+
+ const StructureEntry * pEntry = m_structures.LookupPtr(data);
+ if (pEntry == NULL)
+ return;
+
+ ZapNode * pNode = pEntry->pNode;
+ if (!pNode->IsPlaced())
+ {
+ ZapVirtualSection * pSection = m_pZapImage->GetSection(section);
+ pSection->Place(pNode);
+ }
+}
+
+void DataImage::PlaceInternedStructureForAddress(const void * data, CorCompileSection sectionIfReused, CorCompileSection sectionIfSingleton)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (data == NULL)
+ return;
+
+ const StructureEntry * pEntry = m_structures.LookupPtr(data);
+ if (pEntry == NULL)
+ return;
+
+ ZapNode * pNode = pEntry->pNode;
+ if (!pNode->IsPlaced())
+ {
+ CorCompileSection section = m_reusedStructures.Contains(pNode) ? sectionIfReused : sectionIfSingleton;
+ ZapVirtualSection * pSection = m_pZapImage->GetSection(section);
+ pSection->Place(pNode);
+ }
+}
+
+void DataImage::FixupPointerField(PVOID p, SSIZE_T offset)
+{
+ STANDARD_VM_CONTRACT;
+
+ PVOID pTarget = *(PVOID UNALIGNED *)((BYTE *)p + offset);
+
+ if (pTarget == NULL)
+ {
+ ZeroPointerField(p, offset);
+ return;
+ }
+
+ FixupField(p, offset, pTarget);
+}
+
+void DataImage::FixupRelativePointerField(PVOID p, SSIZE_T offset)
+{
+ STANDARD_VM_CONTRACT;
+
+ PVOID pTarget = RelativePointer<PTR_VOID>::GetValueMaybeNullAtPtr((TADDR)p + offset);
+
+ if (pTarget == NULL)
+ {
+ ZeroPointerField(p, offset);
+ return;
+ }
+
+ FixupField(p, offset, pTarget, 0, IMAGE_REL_BASED_RELPTR);
+}
+
+static void EncodeTargetOffset(PVOID pLocation, SSIZE_T targetOffset, ZapRelocationType type)
+{
+ // Store the targetOffset into the location of the reloc temporarily
+ switch (type)
+ {
+#ifdef BINDER
+ case IMAGE_REL_BASED_MD_METHODENTRY:
+#endif
+ case IMAGE_REL_BASED_PTR:
+ case IMAGE_REL_BASED_RELPTR:
+ *(UNALIGNED TADDR *)pLocation = (TADDR)targetOffset;
+ break;
+
+ case IMAGE_REL_BASED_ABSOLUTE:
+ *(UNALIGNED DWORD *)pLocation = (DWORD)targetOffset;
+ break;
+
+ case IMAGE_REL_BASED_ABSOLUTE_TAGGED:
+ _ASSERTE(targetOffset == 0);
+ *(UNALIGNED TADDR *)pLocation = 0;
+ break;
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+ case IMAGE_REL_BASED_REL32:
+ *(UNALIGNED INT32 *)pLocation = (INT32)targetOffset;
+ break;
+#endif // _TARGET_X86_ || _TARGET_AMD64_
+
+ default:
+ _ASSERTE(0);
+ }
+}
+
+static SSIZE_T DecodeTargetOffset(PVOID pLocation, ZapRelocationType type)
+{
+ // Store the targetOffset into the location of the reloc temporarily
+ switch (type)
+ {
+#ifdef BINDER
+ case IMAGE_REL_BASED_MD_METHODENTRY:
+#endif
+ case IMAGE_REL_BASED_PTR:
+ case IMAGE_REL_BASED_RELPTR:
+ return (SSIZE_T)*(UNALIGNED TADDR *)pLocation;
+
+ case IMAGE_REL_BASED_ABSOLUTE:
+ return *(UNALIGNED DWORD *)pLocation;
+
+ case IMAGE_REL_BASED_ABSOLUTE_TAGGED:
+ _ASSERTE(*(UNALIGNED TADDR *)pLocation == 0);
+ return 0;
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+ case IMAGE_REL_BASED_REL32:
+ return *(UNALIGNED INT32 *)pLocation;
+#endif // _TARGET_X86_ || _TARGET_AMD64_
+
+ default:
+ _ASSERTE(0);
+ return 0;
+ }
+}
+
+void DataImage::FixupField(PVOID p, SSIZE_T offset, PVOID pTarget, SSIZE_T targetOffset, ZapRelocationType type)
+{
+ STANDARD_VM_CONTRACT;
+
+ m_iCurrentFixup++; // Dev11 bug 181494 instrumentation
+
+ const StructureEntry * pEntry = m_pLastLookup;
+ if (pEntry == NULL || pEntry->ptr != p)
+ {
+ pEntry = m_structures.LookupPtr(p);
+ _ASSERTE(pEntry != NULL &&
+ "StoreStructure or BindPointer have to be called on all save data.");
+ m_pLastLookup = pEntry;
+ }
+ offset += pEntry->offset;
+ _ASSERTE(0 <= offset && (DWORD)offset < pEntry->pNode->GetSize());
+
+ const StructureEntry * pTargetEntry = m_pLastLookup;
+ if (pTargetEntry == NULL || pTargetEntry->ptr != pTarget)
+ {
+ pTargetEntry = m_structures.LookupPtr(pTarget);
+
+ _ASSERTE(pTargetEntry != NULL &&
+ "The target of the fixup is not saved into the image");
+ }
+ targetOffset += pTargetEntry->offset;
+ _ASSERTE(0 <= targetOffset && (DWORD)targetOffset <= pTargetEntry->pNode->GetSize());
+
+ FixupEntry entry;
+ entry.m_type = type;
+ entry.m_offset = (DWORD)offset;
+ entry.m_pLocation = AsStoredStructure(pEntry->pNode);
+ entry.m_pTargetNode = pTargetEntry->pNode;
+ AppendFixup(entry);
+
+ EncodeTargetOffset((BYTE *)AsStoredStructure(pEntry->pNode)->GetData() + offset, targetOffset, type);
+}
+
+void DataImage::FixupFieldToNode(PVOID p, SSIZE_T offset, ZapNode * pTarget, SSIZE_T targetOffset, ZapRelocationType type)
+{
+ STANDARD_VM_CONTRACT;
+
+ m_iCurrentFixup++; // Dev11 bug 181494 instrumentation
+
+ const StructureEntry * pEntry = m_pLastLookup;
+ if (pEntry == NULL || pEntry->ptr != p)
+ {
+ pEntry = m_structures.LookupPtr(p);
+ _ASSERTE(pEntry != NULL &&
+ "StoreStructure or BindPointer have to be called on all save data.");
+ m_pLastLookup = pEntry;
+ }
+ offset += pEntry->offset;
+ _ASSERTE(0 <= offset && (DWORD)offset < pEntry->pNode->GetSize());
+
+ _ASSERTE(pTarget != NULL);
+
+ FixupEntry entry;
+ entry.m_type = type;
+ entry.m_offset = (DWORD)offset;
+ entry.m_pLocation = AsStoredStructure(pEntry->pNode);
+ entry.m_pTargetNode = pTarget;
+ AppendFixup(entry);
+
+ EncodeTargetOffset((BYTE *)AsStoredStructure(pEntry->pNode)->GetData() + offset, targetOffset, type);
+}
+
+DWORD DataImage::GetRVA(const void *data)
+{
+ STANDARD_VM_CONTRACT;
+
+ const StructureEntry * pEntry = m_structures.LookupPtr(data);
+ _ASSERTE(pEntry != NULL);
+
+ return pEntry->pNode->GetRVA() + (DWORD)pEntry->offset;
+}
+
+void DataImage::ZeroField(PVOID p, SSIZE_T offset, SIZE_T size)
+{
+ STANDARD_VM_CONTRACT;
+
+ ZeroMemory(GetImagePointer(p, offset), size);
+}
+
+void * DataImage::GetImagePointer(ZapStoredStructure * pNode)
+{
+ return pNode->GetData();
+}
+
+void * DataImage::GetImagePointer(PVOID p, SSIZE_T offset)
+{
+ STANDARD_VM_CONTRACT;
+
+ const StructureEntry * pEntry = m_pLastLookup;
+ if (pEntry == NULL || pEntry->ptr != p)
+ {
+ pEntry = m_structures.LookupPtr(p);
+ _ASSERTE(pEntry != NULL &&
+ "StoreStructure or BindPointer have to be called on all save data.");
+ m_pLastLookup = pEntry;
+ }
+ offset += pEntry->offset;
+ _ASSERTE(0 <= offset && (DWORD)offset < pEntry->pNode->GetSize());
+
+ return (BYTE *)AsStoredStructure(pEntry->pNode)->GetData() + offset;
+}
+
+ZapNode * DataImage::GetNodeForStructure(PVOID p, SSIZE_T * pOffset)
+{
+ const StructureEntry * pEntry = m_pLastLookup;
+ if (pEntry == NULL || pEntry->ptr != p)
+ {
+ pEntry = m_structures.LookupPtr(p);
+ _ASSERTE(pEntry != NULL &&
+ "StoreStructure or BindPointer have to be called on all save data.");
+ }
+ *pOffset = pEntry->offset;
+ return pEntry->pNode;
+}
+
+ZapStoredStructure * DataImage::StoreInternedStructure(const void *data, ULONG size,
+ DataImage::ItemKind kind,
+ int align)
+{
+ STANDARD_VM_CONTRACT;
+
+ ZapStoredStructure * pStructure = m_pInternedStructures->Lookup(InternedStructureKey(data, size, kind));
+
+ if (pStructure != NULL)
+ {
+ // Just add a new mapping for to the interned structure
+ BindPointer(data, pStructure, 0);
+
+ // Track that this structure has been successfully reused by interning
+ NoteReusedStructure(data);
+ }
+ else
+ {
+ // We have not seen this structure yet. Create a new one.
+ pStructure = StoreStructure(data, size, kind);
+ m_pInternedStructures->Add(pStructure);
+ }
+
+ return pStructure;
+}
+
+void DataImage::NoteReusedStructure(const void *data)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(IsStored(data));
+
+ const StructureEntry * pEntry = m_structures.LookupPtr(data);
+
+ if (!m_reusedStructures.Contains(pEntry->pNode))
+ {
+ m_reusedStructures.Add(pEntry->pNode);
+ }
+}
+
+#ifndef BINDER
+// Save the info of an RVA into m_rvaInfoVector.
+void DataImage::StoreRvaInfo(FieldDesc * pFD,
+ DWORD rva,
+ UINT size,
+ UINT align)
+{
+ RvaInfoStructure rvaInfo;
+
+ _ASSERTE(m_module == pFD->GetModule());
+ _ASSERTE(m_module == pFD->GetLoaderModule());
+
+ rvaInfo.pFD = pFD;
+ rvaInfo.rva = rva;
+ rvaInfo.size = size;
+ rvaInfo.align = align;
+
+ m_rvaInfoVector.Append(rvaInfo);
+}
+#endif
+
+// qsort compare function.
+// Primary key: rva (ascending order). Secondary key: size (descending order).
+int __cdecl DataImage::rvaInfoVectorEntryCmp(const void* a_, const void* b_)
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ DataImage::RvaInfoStructure *a = (DataImage::RvaInfoStructure *)a_;
+ DataImage::RvaInfoStructure *b = (DataImage::RvaInfoStructure *)b_;
+ int rvaComparisonResult = (int)(a->rva - b->rva);
+ if (rvaComparisonResult!=0)
+ return rvaComparisonResult; // Ascending order on rva
+ return (int)(b->size - a->size); // Descending order on size
+}
+
+#ifndef BINDER
+// Sort the list of RVA statics in an ascending order wrt the RVA and save them.
+// For RVA structures with the same RVA, we will only store the one with the largest size.
+void DataImage::SaveRvaStructure()
+{
+ if (m_rvaInfoVector.IsEmpty())
+ return; // No RVA static to save
+
+ // Use qsort to sort the m_rvaInfoVector
+ qsort (&m_rvaInfoVector[0], // start of array
+ m_rvaInfoVector.GetCount(), // array size in elements
+ sizeof(RvaInfoStructure), // element size in bytes
+ rvaInfoVectorEntryCmp); // comparere function
+
+ RvaInfoStructure * previousRvaInfo = NULL;
+
+ for (COUNT_T i=0; i<m_rvaInfoVector.GetCount(); i++) {
+
+ RvaInfoStructure * rvaInfo = &(m_rvaInfoVector[i]);
+
+ // Verify that rvaInfo->rva are actually monotonically increasing and
+ // rvaInfo->size are monotonically decreasing if rva are the same.
+ _ASSERTE(previousRvaInfo==NULL ||
+ previousRvaInfo->rva < rvaInfo->rva ||
+ previousRvaInfo->rva == rvaInfo->rva && previousRvaInfo->size >= rvaInfo->size
+ );
+
+ if (previousRvaInfo==NULL || previousRvaInfo->rva != rvaInfo->rva) {
+ void * pRVAData = rvaInfo->pFD->GetStaticAddressHandle(NULL);
+
+ // Note that we force the structures to be laid out in the order we save them
+ StoreStructureInOrder(pRVAData, rvaInfo->size,
+ DataImage::ITEM_RVA_STATICS,
+ rvaInfo->align);
+ }
+
+ previousRvaInfo = rvaInfo;
+ }
+}
+#endif // !BINDER
+
+void DataImage::RegisterSurrogate(PVOID ptr, PVOID surrogate)
+{
+ STANDARD_VM_CONTRACT;
+
+ m_surrogates.Add(ptr, surrogate);
+}
+
+PVOID DataImage::LookupSurrogate(PVOID ptr)
+{
+ STANDARD_VM_CONTRACT;
+
+ const KeyValuePair<PVOID, PVOID> * pEntry = m_surrogates.LookupPtr(ptr);
+ if (pEntry == NULL)
+ return NULL;
+ return pEntry->Value();
+}
+
+// Please read comments in corcompile.h for ZapVirtualSectionType before
+// putting data items into sections.
+FORCEINLINE static CorCompileSection GetSectionForNodeType(ZapNodeType type)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ switch (type)
+ {
+ // SECTION_MODULE
+ case NodeTypeForItemKind(DataImage::ITEM_MODULE):
+ return CORCOMPILE_SECTION_MODULE;
+
+ // CORCOMPILE_SECTION_WRITE (Hot Writeable)
+ // things only go in here if they are:
+ // (a) explicitly identified by profiling data
+ // or (b) if we have no profiling for these items but they are frequently written to
+ case NodeTypeForItemKind(DataImage::ITEM_FILEREF_MAP):
+ case NodeTypeForItemKind(DataImage::ITEM_ASSEMREF_MAP):
+ case NodeTypeForItemKind(DataImage::ITEM_DYNAMIC_STATICS_INFO_TABLE):
+ case NodeTypeForItemKind(DataImage::ITEM_DYNAMIC_STATICS_INFO_ENTRY):
+ case NodeTypeForItemKind(DataImage::ITEM_CER_RESTORE_FLAGS):
+ return CORCOMPILE_SECTION_WRITE;
+
+ // CORCOMPILE_SECTION_WRITEABLE (Cold Writeable)
+ case NodeTypeForItemKind(DataImage::ITEM_METHOD_TABLE_SPECIAL_WRITEABLE):
+ case NodeTypeForItemKind(DataImage::ITEM_METHOD_TABLE_DATA_COLD_WRITEABLE):
+ case NodeTypeForItemKind(DataImage::ITEM_DICTIONARY_WRITEABLE):
+ case NodeTypeForItemKind(DataImage::ITEM_FROZEN_OBJECTS): // sometimes the objhdr is modified
+ return CORCOMPILE_SECTION_WRITEABLE;
+
+ // SECTION_HOT
+ // Other things go in here if
+ // (a) identified as reads by the profiling runs
+ // (b) if we have no profiling for these items but are identified as typically being read
+ case NodeTypeForItemKind(DataImage::ITEM_CER_ROOT_TABLE):
+ case NodeTypeForItemKind(DataImage::ITEM_RID_MAP_HOT):
+ case NodeTypeForItemKind(DataImage::ITEM_BINDER):
+ case NodeTypeForItemKind(DataImage::ITEM_MODULE_SECDESC):
+ case NodeTypeForItemKind(DataImage::ITEM_METHOD_DESC_HOT):
+ return CORCOMPILE_SECTION_HOT;
+
+ case NodeTypeForItemKind(DataImage::ITEM_BINDER_ITEMS): // these are the guaranteed to be hot items
+ return CORCOMPILE_SECTION_READONLY_SHARED_HOT;
+
+ // SECTION_READONLY_HOT
+ case NodeTypeForItemKind(DataImage::ITEM_GC_STATIC_HANDLES_HOT): // this is assumed to be hot. it is not written to.
+ case NodeTypeForItemKind(DataImage::ITEM_MODULE_CCTOR_INFO_HOT):
+ case NodeTypeForItemKind(DataImage::ITEM_NGEN_HASH_BUCKETLIST_HOT):
+ case NodeTypeForItemKind(DataImage::ITEM_NGEN_HASH_ENTRIES_RO_HOT):
+ return CORCOMPILE_SECTION_READONLY_HOT;
+
+ // SECTION_HOT_WRITEABLE
+ case NodeTypeForItemKind(DataImage::ITEM_METHOD_DESC_HOT_WRITEABLE):
+ case NodeTypeForItemKind(DataImage::ITEM_METHOD_TABLE_DATA_HOT_WRITEABLE):
+ case NodeTypeForItemKind(DataImage::ITEM_NGEN_HASH_HOT):
+ case NodeTypeForItemKind(DataImage::ITEM_NGEN_HASH_ENTRIES_HOT):
+ return CORCOMPILE_SECTION_HOT_WRITEABLE;
+
+ case NodeTypeForItemKind(DataImage::ITEM_METHOD_PRECODE_HOT_WRITEABLE):
+ return CORCOMPILE_SECTION_METHOD_PRECODE_WRITE;
+
+ case NodeTypeForItemKind(DataImage::ITEM_METHOD_PRECODE_HOT):
+ return CORCOMPILE_SECTION_METHOD_PRECODE_HOT;
+
+ // SECTION_RVA_STATICS
+ case NodeTypeForItemKind(DataImage::ITEM_RVA_STATICS):
+ return CORCOMPILE_SECTION_RVA_STATICS_COLD; // This MUST go in this section
+
+ // SECTION_WARM
+ case NodeTypeForItemKind(DataImage::ITEM_GUID_INFO):
+ case NodeTypeForItemKind(DataImage::ITEM_DICTIONARY_LAYOUT):
+ case NodeTypeForItemKind(DataImage::ITEM_EECLASS_WARM):
+ return CORCOMPILE_SECTION_WARM;
+
+ // SECTION_READONLY_WARM
+ case NodeTypeForItemKind(DataImage::ITEM_METHOD_TABLE):
+ case NodeTypeForItemKind(DataImage::ITEM_VTABLE_CHUNK):
+ case NodeTypeForItemKind(DataImage::ITEM_INTERFACE_MAP):
+ case NodeTypeForItemKind(DataImage::ITEM_DICTIONARY):
+ case NodeTypeForItemKind(DataImage::ITEM_DISPATCH_MAP):
+ case NodeTypeForItemKind(DataImage::ITEM_GENERICS_STATIC_FIELDDESCS):
+ case NodeTypeForItemKind(DataImage::ITEM_GC_STATIC_HANDLES_COLD):
+ case NodeTypeForItemKind(DataImage::ITEM_MODULE_CCTOR_INFO_COLD):
+ case NodeTypeForItemKind(DataImage::ITEM_STORED_METHOD_NAME):
+ case NodeTypeForItemKind(DataImage::ITEM_PROPERTY_NAME_SET):
+ case NodeTypeForItemKind(DataImage::ITEM_STORED_METHOD_SIG_READONLY_WARM):
+ return CORCOMPILE_SECTION_READONLY_WARM;
+
+ // SECTION_CLASS_COLD
+ case NodeTypeForItemKind(DataImage::ITEM_PARAM_TYPEDESC):
+ case NodeTypeForItemKind(DataImage::ITEM_ARRAY_TYPEDESC):
+ case NodeTypeForItemKind(DataImage::ITEM_EECLASS):
+ case NodeTypeForItemKind(DataImage::ITEM_FIELD_MARSHALERS):
+ case NodeTypeForItemKind(DataImage::ITEM_FPTR_TYPEDESC):
+#ifdef FEATURE_COMINTEROP
+ case NodeTypeForItemKind(DataImage::ITEM_SPARSE_VTABLE_MAP_TABLE):
+#endif // FEATURE_COMINTEROP
+ return CORCOMPILE_SECTION_CLASS_COLD;
+
+ //SECTION_READONLY_COLD
+ case NodeTypeForItemKind(DataImage::ITEM_FIELD_DESC_LIST):
+ case NodeTypeForItemKind(DataImage::ITEM_ENUM_VALUES):
+ case NodeTypeForItemKind(DataImage::ITEM_ENUM_NAME_POINTERS):
+ case NodeTypeForItemKind(DataImage::ITEM_ENUM_NAME):
+ case NodeTypeForItemKind(DataImage::ITEM_NGEN_HASH_BUCKETLIST_COLD):
+ case NodeTypeForItemKind(DataImage::ITEM_NGEN_HASH_ENTRIES_RO_COLD):
+ case NodeTypeForItemKind(DataImage::ITEM_STORED_METHOD_SIG_READONLY):
+#ifdef FEATURE_COMINTEROP
+ case NodeTypeForItemKind(DataImage::ITEM_SPARSE_VTABLE_MAP_ENTRIES):
+#endif // FEATURE_COMINTEROP
+ case NodeTypeForItemKind(DataImage::ITEM_CLASS_VARIANCE_INFO):
+ return CORCOMPILE_SECTION_READONLY_COLD;
+
+ // SECTION_CROSS_DOMAIN_INFO
+ case NodeTypeForItemKind(DataImage::ITEM_CROSS_DOMAIN_INFO):
+ case NodeTypeForItemKind(DataImage::ITEM_VTS_INFO):
+ return CORCOMPILE_SECTION_CROSS_DOMAIN_INFO;
+
+ // SECTION_METHOD_DESC_COLD
+ case NodeTypeForItemKind(DataImage::ITEM_METHOD_DESC_COLD):
+ return CORCOMPILE_SECTION_METHOD_DESC_COLD;
+
+ case NodeTypeForItemKind(DataImage::ITEM_METHOD_DESC_COLD_WRITEABLE):
+ case NodeTypeForItemKind(DataImage::ITEM_STORED_METHOD_SIG):
+ return CORCOMPILE_SECTION_METHOD_DESC_COLD_WRITEABLE;
+
+ case NodeTypeForItemKind(DataImage::ITEM_METHOD_PRECODE_COLD):
+ return CORCOMPILE_SECTION_METHOD_PRECODE_COLD;
+
+ case NodeTypeForItemKind(DataImage::ITEM_METHOD_PRECODE_COLD_WRITEABLE):
+ return CORCOMPILE_SECTION_METHOD_PRECODE_COLD_WRITEABLE;
+
+ // SECTION_MODULE_COLD
+ case NodeTypeForItemKind(DataImage::ITEM_TYPEDEF_MAP):
+ case NodeTypeForItemKind(DataImage::ITEM_TYPEREF_MAP):
+ case NodeTypeForItemKind(DataImage::ITEM_METHODDEF_MAP):
+ case NodeTypeForItemKind(DataImage::ITEM_FIELDDEF_MAP):
+ case NodeTypeForItemKind(DataImage::ITEM_MEMBERREF_MAP):
+ case NodeTypeForItemKind(DataImage::ITEM_GENERICPARAM_MAP):
+ case NodeTypeForItemKind(DataImage::ITEM_GENERICTYPEDEF_MAP):
+ case NodeTypeForItemKind(DataImage::ITEM_PROPERTYINFO_MAP):
+ case NodeTypeForItemKind(DataImage::ITEM_TYVAR_TYPEDESC):
+ case NodeTypeForItemKind(DataImage::ITEM_EECLASS_COLD):
+ case NodeTypeForItemKind(DataImage::ITEM_CER_METHOD_LIST):
+ case NodeTypeForItemKind(DataImage::ITEM_NGEN_HASH_COLD):
+ case NodeTypeForItemKind(DataImage::ITEM_NGEN_HASH_ENTRIES_COLD):
+ return CORCOMPILE_SECTION_MODULE_COLD;
+
+ // SECTION_DEBUG_COLD
+ case NodeTypeForItemKind(DataImage::ITEM_DEBUG):
+ case NodeTypeForItemKind(DataImage::ITEM_INLINING_DATA):
+ return CORCOMPILE_SECTION_DEBUG_COLD;
+
+ // SECTION_COMPRESSED_MAPS
+ case NodeTypeForItemKind(DataImage::ITEM_COMPRESSED_MAP):
+ return CORCOMPILE_SECTION_COMPRESSED_MAPS;
+
+ default:
+ _ASSERTE(!"Missing mapping between type and section");
+ return CORCOMPILE_SECTION_MODULE_COLD;
+ }
+}
+
+static int __cdecl LayoutOrderCmp(const void* a_, const void* b_)
+{
+ DWORD a = ((DataImage::SavedNodeEntry*)a_)->dwAssociatedOrder;
+ DWORD b = ((DataImage::SavedNodeEntry*)b_)->dwAssociatedOrder;
+
+ if (a > b)
+ {
+ return 1;
+ }
+ else
+ {
+ return (a < b) ? -1 : 0;
+ }
+}
+
+void DataImage::PlaceRemainingStructures()
+{
+ if (m_pZapImage->HasClassLayoutOrder())
+ {
+ // The structures are currently in save order; since we are going to change
+ // that to class layout order, first place any that require us to maintain save order.
+ // Note that this is necessary because qsort is not stable.
+ for (COUNT_T iStructure = 0; iStructure < m_structuresInOrder.GetCount(); iStructure++)
+ {
+ if (m_structuresInOrder[iStructure].dwAssociatedOrder == MAINTAIN_SAVE_ORDER)
+ {
+ ZapNode * pStructure = m_structuresInOrder[iStructure].pNode;
+ if (!pStructure->IsPlaced())
+ {
+ ZapVirtualSection * pSection = m_pZapImage->GetSection(GetSectionForNodeType(pStructure->GetType()));
+ pSection->Place(pStructure);
+ }
+ }
+ }
+
+ qsort(&m_structuresInOrder[0], m_structuresInOrder.GetCount(), sizeof(SavedNodeEntry), LayoutOrderCmp);
+ }
+
+ // Place the unplaced structures, which may have been re-sorted according to class-layout order
+ for (COUNT_T iStructure = 0; iStructure < m_structuresInOrder.GetCount(); iStructure++)
+ {
+ ZapNode * pStructure = m_structuresInOrder[iStructure].pNode;
+ if (!pStructure->IsPlaced())
+ {
+ ZapVirtualSection * pSection = m_pZapImage->GetSection(GetSectionForNodeType(pStructure->GetType()));
+ pSection->Place(pStructure);
+ }
+ }
+}
+
+int __cdecl DataImage::fixupEntryCmp(const void* a_, const void* b_)
+{
+ LIMITED_METHOD_CONTRACT;
+ FixupEntry *a = (FixupEntry *)a_;
+ FixupEntry *b = (FixupEntry *)b_;
+ return (a->m_pLocation->GetRVA() + a->m_offset) - (b->m_pLocation->GetRVA() + b->m_offset);
+}
+
+void DataImage::FixupRVAs()
+{
+ STANDARD_VM_CONTRACT;
+
+ FixupModuleRVAs();
+#ifndef BINDER
+ FixupRvaStructure();
+
+ if (m_module->m_pCerNgenRootTable != NULL)
+ m_module->m_pCerNgenRootTable->FixupRVAs(this);
+
+ // Dev11 bug 181494 instrumentation
+ if (m_Fixups.GetCount() != m_iCurrentFixup) EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
+
+#endif // !BINDER
+
+ qsort(&m_Fixups[0], m_Fixups.GetCount(), sizeof(FixupEntry), fixupEntryCmp);
+
+ // Sentinel
+ FixupEntry entry;
+
+ entry.m_type = 0;
+ entry.m_offset = 0;
+ entry.m_pLocation = NULL;
+ entry.m_pTargetNode = NULL;
+
+ m_Fixups.Append(entry);
+
+#ifndef BINDER
+ // Dev11 bug 181494 instrumentation
+ if (m_Fixups.GetCount() -1 != m_iCurrentFixup) EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
+#endif
+ m_iCurrentFixup = 0;
+}
+
+#ifndef BINDER
+void DataImage::SetRVAsForFields(IMetaDataEmit * pEmit)
+{
+ for (COUNT_T i=0; i<m_rvaInfoVector.GetCount(); i++) {
+
+ RvaInfoStructure * rvaInfo = &(m_rvaInfoVector[i]);
+
+ void * pRVAData = rvaInfo->pFD->GetStaticAddressHandle(NULL);
+
+ DWORD dwOffset = GetRVA(pRVAData);
+
+ pEmit->SetRVA(rvaInfo->pFD->GetMemberDef(), dwOffset);
+ }
+}
+#endif // !BINDER
+
+void ZapStoredStructure::Save(ZapWriter * pWriter)
+{
+ DataImage * image = ZapImage::GetImage(pWriter)->m_pDataImage;
+
+ DataImage::FixupEntry * pPrevFixupEntry = NULL;
+
+ for (;;)
+ {
+ DataImage::FixupEntry * pFixupEntry = &(image->m_Fixups[image->m_iCurrentFixup]);
+
+ if (pFixupEntry->m_pLocation != this)
+ {
+ _ASSERTE(pFixupEntry->m_pLocation == NULL ||
+ GetRVA() + GetSize() <= pFixupEntry->m_pLocation->GetRVA());
+ break;
+ }
+
+ PVOID pLocation = (BYTE *)GetData() + pFixupEntry->m_offset;
+
+ if (pPrevFixupEntry == NULL || pPrevFixupEntry->m_offset != pFixupEntry->m_offset)
+ {
+ SSIZE_T targetOffset = DecodeTargetOffset(pLocation, pFixupEntry->m_type);
+
+#ifdef _DEBUG
+ // All pointers in EE datastructures should be aligned. This is important to
+ // avoid stradling relocations that cause issues with ASLR.
+ if (pFixupEntry->m_type == IMAGE_REL_BASED_PTR)
+ {
+ _ASSERTE(IS_ALIGNED(pWriter->GetCurrentRVA() + pFixupEntry->m_offset, sizeof(TADDR)));
+ }
+#endif
+
+ ZapImage::GetImage(pWriter)->WriteReloc(
+ GetData(),
+ pFixupEntry->m_offset,
+ pFixupEntry->m_pTargetNode,
+ (int)targetOffset,
+ pFixupEntry->m_type);
+ }
+ else
+ {
+ // It's fine to have duplicate fixup entries, but they must target the same data.
+ // If this assert fires, Fixup* was called twice on the same field in an NGen'd
+ // structure with different targets, which likely indicates the current structure
+ // was illegally interned or shared.
+ _ASSERTE(pPrevFixupEntry->m_type == pFixupEntry->m_type);
+ _ASSERTE(pPrevFixupEntry->m_pTargetNode== pFixupEntry->m_pTargetNode);
+ }
+
+ pPrevFixupEntry = pFixupEntry;
+ image->m_iCurrentFixup++;
+ }
+
+ pWriter->Write(GetData(), m_dwSize);
+}
+
+void DataImage::FixupSectionRange(SIZE_T offset, ZapNode * pNode)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (pNode->GetSize() != 0)
+ {
+ FixupFieldToNode(m_module->m_pNGenLayoutInfo, offset, pNode);
+
+ SIZE_T * pSize = (SIZE_T *)((BYTE *)GetImagePointer(m_module->m_pNGenLayoutInfo) + offset + sizeof(TADDR));
+ *pSize = pNode->GetSize();
+ }
+}
+
+void DataImage::FixupSectionPtr(SIZE_T offset, ZapNode * pNode)
+{
+ if (pNode->GetSize() != 0)
+ FixupFieldToNode(m_module->m_pNGenLayoutInfo, offset, pNode);
+}
+
+void DataImage::FixupJumpStubPtr(SIZE_T offset, CorInfoHelpFunc ftnNum)
+{
+ ZapNode * pNode = m_pZapImage->GetHelperThunkIfExists(ftnNum);
+ if (pNode != NULL)
+ FixupFieldToNode(m_module->m_pNGenLayoutInfo, offset, pNode);
+}
+
+void DataImage::FixupModuleRVAs()
+{
+ STANDARD_VM_CONTRACT;
+
+ FixupSectionRange(offsetof(NGenLayoutInfo, m_CodeSections[0]), m_pZapImage->m_pHotCodeSection);
+ FixupSectionRange(offsetof(NGenLayoutInfo, m_CodeSections[1]), m_pZapImage->m_pCodeSection);
+ FixupSectionRange(offsetof(NGenLayoutInfo, m_CodeSections[2]), m_pZapImage->m_pColdCodeSection);
+
+ NGenLayoutInfo * pSavedNGenLayoutInfo = (NGenLayoutInfo *)GetImagePointer(m_module->m_pNGenLayoutInfo);
+
+ COUNT_T nHotRuntimeFunctions = m_pZapImage->m_pHotRuntimeFunctionSection->GetNodeCount();
+ if (nHotRuntimeFunctions != 0)
+ {
+ pSavedNGenLayoutInfo->m_nRuntimeFunctions[0] = nHotRuntimeFunctions;
+
+ FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_UnwindInfoLookupTable[0]), m_pZapImage->m_pHotRuntimeFunctionLookupSection);
+ pSavedNGenLayoutInfo->m_UnwindInfoLookupTableEntryCount[0] = m_pZapImage->m_pHotRuntimeFunctionLookupSection->GetSize() / sizeof(DWORD) - 1;
+
+ FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_MethodDescs[0]), m_pZapImage->m_pHotCodeMethodDescsSection);
+
+ FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_pRuntimeFunctions[0]), m_pZapImage->m_pHotRuntimeFunctionSection);
+ }
+
+ COUNT_T nRuntimeFunctions = m_pZapImage->m_pRuntimeFunctionSection->GetNodeCount();
+ if (nRuntimeFunctions != 0)
+ {
+ pSavedNGenLayoutInfo->m_nRuntimeFunctions[1] = nRuntimeFunctions;
+
+ FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_UnwindInfoLookupTable[1]), m_pZapImage->m_pRuntimeFunctionLookupSection);
+ pSavedNGenLayoutInfo->m_UnwindInfoLookupTableEntryCount[1] = m_pZapImage->m_pRuntimeFunctionLookupSection->GetSize() / sizeof(DWORD) - 1;
+
+ FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_MethodDescs[1]), m_pZapImage->m_pCodeMethodDescsSection);
+
+ FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_pRuntimeFunctions[1]), m_pZapImage->m_pRuntimeFunctionSection);
+ }
+
+ COUNT_T nColdRuntimeFunctions = m_pZapImage->m_pColdRuntimeFunctionSection->GetNodeCount();
+ if (nColdRuntimeFunctions != 0)
+ {
+ pSavedNGenLayoutInfo->m_nRuntimeFunctions[2] = nColdRuntimeFunctions;
+
+ FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_pRuntimeFunctions[2]), m_pZapImage->m_pColdRuntimeFunctionSection);
+ }
+
+ if (m_pZapImage->m_pColdCodeMapSection->GetNodeCount() != 0)
+ {
+ FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_ColdCodeMap), m_pZapImage->m_pColdCodeMapSection);
+ }
+
+ FixupSectionRange(offsetof(NGenLayoutInfo, m_Precodes[0]), m_pZapImage->GetSection(CORCOMPILE_SECTION_METHOD_PRECODE_HOT));
+ FixupSectionRange(offsetof(NGenLayoutInfo, m_Precodes[1]), m_pZapImage->GetSection(CORCOMPILE_SECTION_METHOD_PRECODE_COLD));
+ FixupSectionRange(offsetof(NGenLayoutInfo, m_Precodes[2]), m_pZapImage->GetSection(CORCOMPILE_SECTION_METHOD_PRECODE_WRITE));
+ FixupSectionRange(offsetof(NGenLayoutInfo, m_Precodes[3]), m_pZapImage->GetSection(CORCOMPILE_SECTION_METHOD_PRECODE_COLD_WRITEABLE));
+
+ FixupSectionRange(offsetof(NGenLayoutInfo, m_JumpStubs), m_pZapImage->m_pHelperTableSection);
+ FixupSectionRange(offsetof(NGenLayoutInfo, m_StubLinkStubs), m_pZapImage->m_pStubsSection);
+ FixupSectionRange(offsetof(NGenLayoutInfo, m_VirtualMethodThunks), m_pZapImage->m_pVirtualImportThunkSection);
+ FixupSectionRange(offsetof(NGenLayoutInfo, m_ExternalMethodThunks), m_pZapImage->m_pExternalMethodThunkSection);
+
+ if (m_pZapImage->m_pExceptionInfoLookupTable->GetSize() != 0)
+ FixupSectionRange(offsetof(NGenLayoutInfo, m_ExceptionInfoLookupTable), m_pZapImage->m_pExceptionInfoLookupTable);
+
+ FixupJumpStubPtr(offsetof(NGenLayoutInfo, m_pPrestubJumpStub), CORINFO_HELP_EE_PRESTUB);
+#ifdef HAS_FIXUP_PRECODE
+ FixupJumpStubPtr(offsetof(NGenLayoutInfo, m_pPrecodeFixupJumpStub), CORINFO_HELP_EE_PRECODE_FIXUP);
+#endif
+ FixupJumpStubPtr(offsetof(NGenLayoutInfo, m_pVirtualImportFixupJumpStub), CORINFO_HELP_EE_VTABLE_FIXUP);
+ FixupJumpStubPtr(offsetof(NGenLayoutInfo, m_pExternalMethodFixupJumpStub), CORINFO_HELP_EE_EXTERNAL_FIXUP);
+
+ ZapNode * pFilterPersonalityRoutine = m_pZapImage->GetHelperThunkIfExists(CORINFO_HELP_EE_PERSONALITY_ROUTINE_FILTER_FUNCLET);
+ if (pFilterPersonalityRoutine != NULL)
+ FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_rvaFilterPersonalityRoutine), pFilterPersonalityRoutine, 0, IMAGE_REL_BASED_ABSOLUTE);
+}
+
+#ifndef BINDER
+
+void DataImage::FixupRvaStructure()
+{
+ STANDARD_VM_CONTRACT;
+
+ for (COUNT_T i=0; i<m_rvaInfoVector.GetCount(); i++) {
+
+ RvaInfoStructure * rvaInfo = &(m_rvaInfoVector[i]);
+
+ void * pRVAData = rvaInfo->pFD->GetStaticAddressHandle(NULL);
+
+ DWORD dwOffset = GetRVA(pRVAData);
+
+ FieldDesc * pNewFD = (FieldDesc *)GetImagePointer(rvaInfo->pFD);
+ pNewFD->SetOffset(dwOffset);
+ }
+}
+
+ZapNode * DataImage::GetCodeAddress(MethodDesc * method)
+{
+ ZapMethodHeader * pMethod = m_pZapImage->GetCompiledMethod((CORINFO_METHOD_HANDLE)method);
+ return (pMethod != NULL) ? pMethod->GetCode() : NULL;
+}
+
+BOOL DataImage::CanDirectCall(MethodDesc * method, CORINFO_ACCESS_FLAGS accessFlags)
+{
+ return m_pZapImage->canIntraModuleDirectCall(NULL, (CORINFO_METHOD_HANDLE)method, NULL, accessFlags);
+}
+
+ZapNode * DataImage::GetFixupList(MethodDesc * method)
+{
+ ZapMethodHeader * pMethod = m_pZapImage->GetCompiledMethod((CORINFO_METHOD_HANDLE)method);
+ return (pMethod != NULL) ? pMethod->GetFixupList() : NULL;
+}
+
+ZapNode * DataImage::GetHelperThunk(CorInfoHelpFunc ftnNum)
+{
+ return m_pZapImage->GetHelperThunk(ftnNum);
+}
+
+ZapNode * DataImage::GetTypeHandleImport(TypeHandle th, PVOID pUniqueId)
+{
+ ZapImport * pImport = m_pZapImage->GetImportTable()->GetClassHandleImport(CORINFO_CLASS_HANDLE(th.AsPtr()), pUniqueId);
+ if (!pImport->IsPlaced())
+ m_pZapImage->GetImportTable()->PlaceImport(pImport);
+ return pImport;
+}
+
+ZapNode * DataImage::GetMethodHandleImport(MethodDesc * pMD)
+{
+ ZapImport * pImport = m_pZapImage->GetImportTable()->GetMethodHandleImport(CORINFO_METHOD_HANDLE(pMD));
+ if (!pImport->IsPlaced())
+ m_pZapImage->GetImportTable()->PlaceImport(pImport);
+ return pImport;
+}
+
+ZapNode * DataImage::GetFieldHandleImport(FieldDesc * pMD)
+{
+ ZapImport * pImport = m_pZapImage->GetImportTable()->GetFieldHandleImport(CORINFO_FIELD_HANDLE(pMD));
+ if (!pImport->IsPlaced())
+ m_pZapImage->GetImportTable()->PlaceImport(pImport);
+ return pImport;
+}
+
+ZapNode * DataImage::GetModuleHandleImport(Module * pModule)
+{
+ ZapImport * pImport = m_pZapImage->GetImportTable()->GetModuleHandleImport(CORINFO_MODULE_HANDLE(pModule));
+ if (!pImport->IsPlaced())
+ m_pZapImage->GetImportTable()->PlaceImport(pImport);
+ return pImport;
+}
+
+DWORD DataImage::GetModuleImportIndex(Module * pModule)
+{
+ return m_pZapImage->GetImportTable()->GetIndexOfModule((CORINFO_MODULE_HANDLE)pModule);
+}
+
+ZapNode * DataImage::GetExistingTypeHandleImport(TypeHandle th)
+{
+ ZapImport * pImport = m_pZapImage->GetImportTable()->GetExistingClassHandleImport(CORINFO_CLASS_HANDLE(th.AsPtr()));
+ return (pImport != NULL && pImport->IsPlaced()) ? pImport : NULL;
+}
+
+ZapNode * DataImage::GetExistingMethodHandleImport(MethodDesc * pMD)
+{
+ ZapImport * pImport = m_pZapImage->GetImportTable()->GetExistingMethodHandleImport(CORINFO_METHOD_HANDLE(pMD));
+ return (pImport != NULL && pImport->IsPlaced()) ? pImport : NULL;
+}
+
+ZapNode * DataImage::GetExistingFieldHandleImport(FieldDesc * pFD)
+{
+ ZapImport * pImport = m_pZapImage->GetImportTable()->GetExistingFieldHandleImport(CORINFO_FIELD_HANDLE(pFD));
+ return (pImport != NULL && pImport->IsPlaced()) ? pImport : NULL;
+}
+
+ZapNode * DataImage::GetVirtualImportThunk(MethodTable * pMT, MethodDesc * pMD, int slotNumber)
+{
+ _ASSERTE(pMD == pMT->GetMethodDescForSlot(slotNumber));
+ _ASSERTE(!pMD->IsGenericMethodDefinition());
+
+ ZapImport * pImport = m_pZapImage->GetImportTable()->GetVirtualImportThunk(CORINFO_METHOD_HANDLE(pMD), slotNumber);
+ if (!pImport->IsPlaced())
+ m_pZapImage->GetImportTable()->PlaceVirtualImportThunk(pImport);
+ return pImport;
+}
+
+ZapNode * DataImage::GetGenericSignature(PVOID signature, BOOL fMethod)
+{
+ ZapGenericSignature * pGenericSignature = m_pZapImage->GetImportTable()->GetGenericSignature(signature, fMethod);
+ if (!pGenericSignature->IsPlaced())
+ m_pZapImage->GetImportTable()->PlaceBlob(pGenericSignature);
+ return pGenericSignature;
+}
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+
+class ZapStubPrecode : public ZapNode
+{
+protected:
+ MethodDesc * m_pMD;
+ DataImage::ItemKind m_kind;
+
+public:
+ ZapStubPrecode(MethodDesc * pMethod, DataImage::ItemKind kind)
+ : m_pMD(pMethod), m_kind(kind)
+ {
+ }
+
+ virtual DWORD GetSize()
+ {
+ return sizeof(StubPrecode);
+ }
+
+ virtual UINT GetAlignment()
+ {
+ return PRECODE_ALIGNMENT;
+ }
+
+ virtual ZapNodeType GetType()
+ {
+ return NodeTypeForItemKind(m_kind);
+ }
+
+ virtual DWORD ComputeRVA(ZapWriter * pZapWriter, DWORD dwPos)
+ {
+ dwPos = AlignUp(dwPos, GetAlignment());
+
+ // Alignment for straddlers. Need a cast to help gcc choose between AlignmentTrim(UINT,UINT) and (UINT64,UINT).
+ if (AlignmentTrim(static_cast<UINT>(dwPos + offsetof(StubPrecode, m_pMethodDesc)), RELOCATION_PAGE_SIZE) > RELOCATION_PAGE_SIZE - sizeof(TADDR))
+ dwPos += GetAlignment();
+
+ SetRVA(dwPos);
+
+ dwPos += GetSize();
+
+ return dwPos;
+ }
+
+ virtual void Save(ZapWriter * pZapWriter)
+ {
+ ZapImage * pImage = ZapImage::GetImage(pZapWriter);
+
+ StubPrecode precode;
+
+ precode.Init(m_pMD);
+
+ SSIZE_T offset;
+ ZapNode * pNode = pImage->m_pDataImage->GetNodeForStructure(m_pMD, &offset);
+ pImage->WriteReloc(&precode, offsetof(StubPrecode, m_pMethodDesc),
+ pNode, (int)offset, IMAGE_REL_BASED_PTR);
+
+ pImage->WriteReloc(&precode, offsetof(StubPrecode, m_rel32),
+ pImage->GetHelperThunk(CORINFO_HELP_EE_PRESTUB), 0, IMAGE_REL_BASED_REL32);
+
+ pZapWriter->Write(&precode, sizeof(precode));
+ }
+};
+
+#ifdef HAS_NDIRECT_IMPORT_PRECODE
+class ZapNDirectImportPrecode : public ZapStubPrecode
+{
+public:
+ ZapNDirectImportPrecode(MethodDesc * pMD, DataImage::ItemKind kind)
+ : ZapStubPrecode(pMD, kind)
+ {
+ }
+
+ virtual void Save(ZapWriter * pZapWriter)
+ {
+ ZapImage * pImage = ZapImage::GetImage(pZapWriter);
+
+ StubPrecode precode;
+
+ precode.Init(m_pMD);
+
+ SSIZE_T offset;
+ ZapNode * pNode = pImage->m_pDataImage->GetNodeForStructure(m_pMD, &offset);
+ pImage->WriteReloc(&precode, offsetof(StubPrecode, m_pMethodDesc),
+ pNode, (int)offset, IMAGE_REL_BASED_PTR);
+
+ pImage->WriteReloc(&precode, offsetof(StubPrecode, m_rel32),
+ pImage->GetHelperThunk(CORINFO_HELP_EE_PINVOKE_FIXUP), 0, IMAGE_REL_BASED_REL32);
+
+ pZapWriter->Write(&precode, sizeof(precode));
+ }
+};
+#endif // HAS_NDIRECT_IMPORT_PRECODE
+
+#ifdef HAS_REMOTING_PRECODE
+class ZapRemotingPrecode : public ZapNode
+{
+ MethodDesc * m_pMD;
+ DataImage::ItemKind m_kind;
+ BOOL m_fIsPrebound;
+
+public:
+ ZapRemotingPrecode(MethodDesc * pMethod, DataImage::ItemKind kind, BOOL fIsPrebound)
+ : m_pMD(pMethod), m_kind(kind), m_fIsPrebound(fIsPrebound)
+ {
+ }
+
+ virtual DWORD GetSize()
+ {
+ return sizeof(RemotingPrecode);
+ }
+
+ virtual UINT GetAlignment()
+ {
+ return PRECODE_ALIGNMENT;
+ }
+
+ virtual ZapNodeType GetType()
+ {
+ return NodeTypeForItemKind(m_kind);
+ }
+
+ virtual DWORD ComputeRVA(ZapWriter * pZapWriter, DWORD dwPos)
+ {
+ dwPos = AlignUp(dwPos, GetAlignment());
+
+ // Alignment for straddlers
+ if (AlignmentTrim(dwPos + offsetof(RemotingPrecode, m_pMethodDesc), RELOCATION_PAGE_SIZE) > RELOCATION_PAGE_SIZE - sizeof(TADDR))
+ dwPos += GetAlignment();
+
+ SetRVA(dwPos);
+
+ dwPos += GetSize();
+
+ return dwPos;
+ }
+
+ virtual void Save(ZapWriter * pZapWriter)
+ {
+ ZapImage * pImage = ZapImage::GetImage(pZapWriter);
+
+ RemotingPrecode precode;
+
+ precode.Init(m_pMD);
+
+ SSIZE_T offset;
+ ZapNode * pNode = pImage->m_pDataImage->GetNodeForStructure(m_pMD, &offset);
+ pImage->WriteReloc(&precode, offsetof(RemotingPrecode, m_pMethodDesc),
+ pNode, offset, IMAGE_REL_BASED_PTR);
+
+ pImage->WriteReloc(&precode, offsetof(RemotingPrecode, m_callRel32),
+ pImage->GetHelperThunk(CORINFO_HELP_EE_REMOTING_THUNK), 0, IMAGE_REL_BASED_REL32);
+
+ if (m_fIsPrebound)
+ {
+ pImage->WriteReloc(&precode, offsetof(RemotingPrecode, m_rel32),
+ pImage->m_pDataImage->GetCodeAddress(m_pMD), 0, IMAGE_REL_BASED_REL32);
+ }
+ else
+ {
+ pImage->WriteReloc(&precode, offsetof(RemotingPrecode, m_rel32),
+ pImage->GetHelperThunk(CORINFO_HELP_EE_PRESTUB), 0, IMAGE_REL_BASED_REL32);
+ }
+
+ pZapWriter->Write(&precode, sizeof(precode));
+ }
+
+ BOOL IsPrebound(ZapImage * pImage)
+ {
+ // This will make sure that when IBC logging is on, the precode goes thru prestub.
+ if (GetAppDomain()->ToCompilationDomain()->m_fForceInstrument)
+ return FALSE;
+
+ // Prebind the remoting precode if possible
+ return pImage->m_pDataImage->CanDirectCall(m_pMD, CORINFO_ACCESS_THIS);
+ }
+
+};
+#endif // HAS_REMOTING_PRECODE
+
+void DataImage::SavePrecode(PVOID ptr, MethodDesc * pMD, PrecodeType t, ItemKind kind, BOOL fIsPrebound)
+{
+ ZapNode * pNode = NULL;
+
+ switch (t) {
+ case PRECODE_STUB:
+ pNode = new (GetHeap()) ZapStubPrecode(pMD, kind);
+ GetHelperThunk(CORINFO_HELP_EE_PRESTUB);
+ break;
+
+#ifdef HAS_NDIRECT_IMPORT_PRECODE
+ case PRECODE_NDIRECT_IMPORT:
+ pNode = new (GetHeap()) ZapNDirectImportPrecode(pMD, kind);
+ GetHelperThunk(CORINFO_HELP_EE_PINVOKE_FIXUP);
+ break;
+#endif // HAS_NDIRECT_IMPORT_PRECODE
+
+#ifdef HAS_REMOTING_PRECODE
+ case PRECODE_REMOTING:
+ pNode = new (GetHeap()) ZapRemotingPrecode(pMD, kind, fIsPrebound);
+
+ GetHelperThunk(CORINFO_HELP_EE_REMOTING_THUNK);
+
+ if (!fIsPrebound)
+ {
+ GetHelperThunk(CORINFO_HELP_EE_PRESTUB);
+ }
+ break;
+#endif // HAS_REMOTING_PRECODE
+
+ default:
+ _ASSERTE(!"Unexpected precode type");
+ break;
+ }
+
+ BindPointer(ptr, pNode, 0);
+
+ AddStructureInOrder(pNode);
+}
+
+#endif // _TARGET_X86_ || _TARGET_AMD64_
+
+void DataImage::FixupModulePointer(Module * pModule, PVOID p, SSIZE_T offset, ZapRelocationType type)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (pModule != NULL)
+ {
+ if (CanEagerBindToModule(pModule) && CanHardBindToZapModule(pModule))
+ {
+ FixupField(p, offset, pModule, 0, type);
+ }
+ else
+ {
+ ZapNode * pImport = GetModuleHandleImport(pModule);
+ FixupFieldToNode(p, offset, pImport, FIXUP_POINTER_INDIRECTION, type);
+ }
+ }
+}
+
+void DataImage::FixupMethodTablePointer(MethodTable * pMT, PVOID p, SSIZE_T offset, ZapRelocationType type)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (pMT != NULL)
+ {
+ if (CanEagerBindToMethodTable(pMT) && CanHardBindToZapModule(pMT->GetLoaderModule()))
+ {
+ FixupField(p, offset, pMT, 0, type);
+ }
+ else
+ {
+ ZapNode * pImport = GetTypeHandleImport(pMT);
+ FixupFieldToNode(p, offset, pImport, FIXUP_POINTER_INDIRECTION, type);
+ }
+ }
+}
+
+void DataImage::FixupTypeHandlePointer(TypeHandle th, PVOID p, SSIZE_T offset, ZapRelocationType type)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (!th.IsNull())
+ {
+ if (th.IsTypeDesc())
+ {
+ if (CanEagerBindToTypeHandle(th) && CanHardBindToZapModule(th.GetLoaderModule()))
+ {
+ FixupField(p, offset, th.AsTypeDesc(), 2);
+ }
+ else
+ {
+ ZapNode * pImport = GetTypeHandleImport(th);
+ FixupFieldToNode(p, offset, pImport, FIXUP_POINTER_INDIRECTION, type);
+ }
+ }
+ else
+ {
+ MethodTable * pMT = th.AsMethodTable();
+ FixupMethodTablePointer(pMT, p, offset, type);
+ }
+ }
+}
+
+void DataImage::FixupMethodDescPointer(MethodDesc * pMD, PVOID p, SSIZE_T offset, ZapRelocationType type /*=IMAGE_REL_BASED_PTR*/)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (pMD != NULL)
+ {
+ if (CanEagerBindToMethodDesc(pMD) && CanHardBindToZapModule(pMD->GetLoaderModule()))
+ {
+ FixupField(p, offset, pMD, 0, type);
+ }
+ else
+ {
+ ZapNode * pImport = GetMethodHandleImport(pMD);
+ FixupFieldToNode(p, offset, pImport, FIXUP_POINTER_INDIRECTION, type);
+ }
+ }
+}
+
+void DataImage::FixupFieldDescPointer(FieldDesc * pFD, PVOID p, SSIZE_T offset, ZapRelocationType type /*=IMAGE_REL_BASED_PTR*/)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (pFD != NULL)
+ {
+ if (CanEagerBindToFieldDesc(pFD) && CanHardBindToZapModule(pFD->GetLoaderModule()))
+ {
+ FixupField(p, offset, pFD, 0, type);
+ }
+ else
+ {
+ ZapNode * pImport = GetFieldHandleImport(pFD);
+ FixupFieldToNode(p, offset, pImport, FIXUP_POINTER_INDIRECTION, type);
+ }
+ }
+}
+
+void DataImage::FixupMethodTablePointer(PVOID p, FixupPointer<PTR_MethodTable> * ppMT)
+{
+ FixupMethodTablePointer(ppMT->GetValue(), p, (BYTE *)ppMT - (BYTE *)p, IMAGE_REL_BASED_PTR);
+}
+void DataImage::FixupTypeHandlePointer(PVOID p, FixupPointer<TypeHandle> * pth)
+{
+ FixupTypeHandlePointer(pth->GetValue(), p, (BYTE *)pth - (BYTE *)p, IMAGE_REL_BASED_PTR);
+}
+void DataImage::FixupMethodDescPointer(PVOID p, FixupPointer<PTR_MethodDesc> * ppMD)
+{
+ FixupMethodDescPointer(ppMD->GetValue(), p, (BYTE *)ppMD - (BYTE *)p, IMAGE_REL_BASED_PTR);
+}
+void DataImage::FixupFieldDescPointer(PVOID p, FixupPointer<PTR_FieldDesc> * ppFD)
+{
+ FixupFieldDescPointer(ppFD->GetValue(), p, (BYTE *)ppFD - (BYTE *)p, IMAGE_REL_BASED_PTR);
+}
+
+void DataImage::FixupModulePointer(PVOID p, RelativeFixupPointer<PTR_Module> * ppModule)
+{
+ FixupModulePointer(ppModule->GetValueMaybeNull(), p, (BYTE *)ppModule - (BYTE *)p, IMAGE_REL_BASED_RELPTR);
+}
+void DataImage::FixupMethodTablePointer(PVOID p, RelativeFixupPointer<PTR_MethodTable> * ppMT)
+{
+ FixupMethodTablePointer(ppMT->GetValueMaybeNull(), p, (BYTE *)ppMT - (BYTE *)p, IMAGE_REL_BASED_RELPTR);
+}
+void DataImage::FixupTypeHandlePointer(PVOID p, RelativeFixupPointer<TypeHandle> * pth)
+{
+ FixupTypeHandlePointer(pth->GetValueMaybeNull(), p, (BYTE *)pth - (BYTE *)p, IMAGE_REL_BASED_RELPTR);
+}
+void DataImage::FixupMethodDescPointer(PVOID p, RelativeFixupPointer<PTR_MethodDesc> * ppMD)
+{
+ FixupMethodDescPointer(ppMD->GetValueMaybeNull(), p, (BYTE *)ppMD - (BYTE *)p, IMAGE_REL_BASED_RELPTR);
+}
+void DataImage::FixupFieldDescPointer(PVOID p, RelativeFixupPointer<PTR_FieldDesc> * ppFD)
+{
+ FixupFieldDescPointer(ppFD->GetValueMaybeNull(), p, (BYTE *)ppFD - (BYTE *)p, IMAGE_REL_BASED_RELPTR);
+}
+
+BOOL DataImage::CanHardBindToZapModule(Module *targetModule)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(targetModule == m_module || targetModule->HasNativeImage());
+ return targetModule == m_module;
+}
+
+BOOL DataImage::CanEagerBindToTypeHandle(TypeHandle th, BOOL fRequirePrerestore, TypeHandleList *pVisited)
+{
+ STANDARD_VM_CONTRACT;
+
+ Module * pLoaderModule = th.GetLoaderModule();
+
+ BOOL fCanEagerBind;
+
+ if (th.IsTypeDesc())
+ {
+ fCanEagerBind = CanEagerBindTo(pLoaderModule, Module::GetPreferredZapModuleForTypeDesc(th.AsTypeDesc()), th.AsTypeDesc());
+ }
+ else
+ {
+ fCanEagerBind = CanEagerBindTo(pLoaderModule, Module::GetPreferredZapModuleForMethodTable(th.AsMethodTable()), th.AsMethodTable());
+ }
+
+ if (GetModule() != th.GetLoaderModule())
+ {
+ if (th.IsTypeDesc())
+ {
+ return FALSE;
+ }
+
+ // As a performance optimization, don't eager bind to arrays. They are currently very expensive to
+ // fixup so we want to do it lazily.
+
+ if (th.AsMethodTable()->IsArray())
+ {
+ return FALSE;
+ }
+
+ // For correctness in the face of targeted patching, do not eager bind to any instantiation
+ // in the target module that might go away.
+ if (!th.IsTypicalTypeDefinition() &&
+ !Module::IsAlwaysSavedInPreferredZapModule(th.GetInstantiation(),
+ Instantiation()))
+ {
+ return FALSE;
+ }
+
+ // #DoNotEagerBindToTypesThatNeedRestore
+ //
+ // It is important to avoid eager binding to structures that require restore. The code here stops
+ // this from happening for cross-module fixups. For intra-module cases, eager fixups are allowed to
+ // (and often do) target types that require restore, even though this is generally prone to all of
+ // the same problems described below. Correctness is preserved only because intra-module eager
+ // fixups are ignored in Module::RunEagerFixups (so their semantics are very close to normal
+ // non-eager fixups).
+ //
+ // For performance, this is the most costly type of eager fixup (and may require otherwise-unneeded
+ // assemblies to be loaded) and has the lowest benefit, since it does not avoid the need for the
+ // referencing type to require restore.
+ //
+ // More importantly, this kind of fixup can compromise correctness by causing type loads to occur
+ // during eager fixup resolution. The system is not designed to cope with this and a variety of
+ // subtle failures can occur when it happens. As an example, consider a scenario involving the
+ // following assemblies and types:
+ // o A1: softbinds to A2, contains "class A1!Level2 extends A2!Level1"
+ // o A2: hardbinds to A3, contains "class A2!Level1 extends Object", contains methods that use A3!Level3.
+ // o A3: softbinds to A1, contains "class A3!Level3 extends A1!Level2"
+ //
+ // If eager fixups are allowed to target types that need restore, then it's possible for A2 to end
+ // up with an eager fixup targeting A3!Level3, setting up this sequence:
+ // 1 Type load starts for A1!Level2.
+ // 2 Loading base class A2!Level1 triggers assembly load for A2.
+ // 3 Loading A2 involves synchronously resolving its eager fixups, including the fixup to A3!Level3.
+ // 4 A3!Level3 needs restore, so type load starts for A3!Level3.
+ // 5 Loading A3!Level3 requires loading base class A1!Level2.
+ // 6 A1!Level2 is already being loaded on this thread (in #1 above), so type load fails.
+ // 7 Since eager fixup resolution failed, FileLoadException is thrown for A2.
+ fRequirePrerestore = TRUE;
+ }
+
+ if (fCanEagerBind && fRequirePrerestore)
+ {
+ fCanEagerBind = !th.ComputeNeedsRestore(this, pVisited);
+ }
+
+ return fCanEagerBind;
+}
+
+BOOL DataImage::CanEagerBindToMethodTable(MethodTable *pMT, BOOL fRequirePrerestore, TypeHandleList *pVisited)
+{
+ WRAPPER_NO_CONTRACT;
+
+ TypeHandle th = TypeHandle(pMT);
+ return DataImage::CanEagerBindToTypeHandle(th, fRequirePrerestore, pVisited);
+}
+
+BOOL DataImage::CanEagerBindToMethodDesc(MethodDesc *pMD, BOOL fRequirePrerestore, TypeHandleList *pVisited)
+{
+ STANDARD_VM_CONTRACT;
+
+ BOOL fCanEagerBind = CanEagerBindTo(pMD->GetLoaderModule(), Module::GetPreferredZapModuleForMethodDesc(pMD), pMD);
+
+ // Performance optimization -- see comment in CanEagerBindToTypeHandle
+ if (GetModule() != pMD->GetLoaderModule())
+ {
+ // For correctness in the face of targeted patching, do not eager bind to any instantiation
+ // in the target module that might go away.
+ if (!pMD->IsTypicalMethodDefinition() &&
+ !Module::IsAlwaysSavedInPreferredZapModule(pMD->GetClassInstantiation(),
+ pMD->GetMethodInstantiation()))
+ {
+ return FALSE;
+ }
+
+ fRequirePrerestore = TRUE;
+ }
+
+ if (fCanEagerBind && fRequirePrerestore)
+ {
+ fCanEagerBind = !pMD->ComputeNeedsRestore(this, pVisited);
+ }
+
+ return fCanEagerBind;
+}
+
+BOOL DataImage::CanEagerBindToFieldDesc(FieldDesc *pFD, BOOL fRequirePrerestore, TypeHandleList *pVisited)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (!CanEagerBindTo(pFD->GetLoaderModule(), Module::GetPreferredZapModuleForFieldDesc(pFD), pFD))
+ return FALSE;
+
+ MethodTable * pMT = pFD->GetApproxEnclosingMethodTable();
+
+ return CanEagerBindToMethodTable(pMT, fRequirePrerestore, pVisited);
+}
+
+BOOL DataImage::CanEagerBindToModule(Module *pModule)
+{
+ STANDARD_VM_CONTRACT;
+
+ return GetAppDomain()->ToCompilationDomain()->CanEagerBindToZapFile(pModule);
+}
+
+// "address" is a data-structure belonging to pTargetModule.
+// This function returns whether the Module currently being ngenned can
+// hardbind "address"
+/* static */
+BOOL DataImage::CanEagerBindTo(Module *pTargetModule, Module *pPreferredZapModule, void *address)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (pTargetModule != pPreferredZapModule)
+ return FALSE;
+
+ if (GetModule() == pTargetModule)
+ return TRUE;
+
+ BOOL eagerBindToZap = GetAppDomain()->ToCompilationDomain()->CanEagerBindToZapFile(pTargetModule);
+ BOOL isPersisted = pTargetModule->IsPersistedObject(address);
+
+ return eagerBindToZap && isPersisted;
+}
+
+BOOL DataImage::CanPrerestoreEagerBindToTypeHandle(TypeHandle th, TypeHandleList *pVisited)
+{
+ WRAPPER_NO_CONTRACT;
+ return CanEagerBindToTypeHandle(th, TRUE, pVisited);
+}
+
+BOOL DataImage::CanPrerestoreEagerBindToMethodTable(MethodTable *pMT, TypeHandleList *pVisited)
+{
+ WRAPPER_NO_CONTRACT;
+ return CanEagerBindToMethodTable(pMT, TRUE, pVisited);
+}
+
+BOOL DataImage::CanPrerestoreEagerBindToMethodDesc(MethodDesc *pMD, TypeHandleList *pVisited)
+{
+ WRAPPER_NO_CONTRACT;
+ return CanEagerBindToMethodDesc(pMD, TRUE, pVisited);
+}
+
+
+void DataImage::HardBindTypeHandlePointer(PVOID p, SSIZE_T offset)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CanEagerBindToTypeHandle(*(TypeHandle UNALIGNED*)((BYTE *)p + offset)));
+ }
+ CONTRACTL_END;
+
+ TypeHandle thCopy = *(TypeHandle UNALIGNED*)((BYTE *)p + offset);
+
+ if (!thCopy.IsNull())
+ {
+ if (thCopy.IsTypeDesc())
+ {
+ FixupField(p, offset, thCopy.AsTypeDesc(), 2);
+ }
+ else
+ {
+ FixupField(p, offset, thCopy.AsMethodTable());
+ }
+ }
+}
+
+
+ // This is obsolete in-place fixup that we should get rid of. For now, it is used for:
+ // - FnPtrTypeDescs. These should not be stored in NGen images at all.
+ // - stubs-as-il signatures. These should use tokens when stored in NGen image.
+ //
+void DataImage::FixupTypeHandlePointerInPlace(PVOID p, SSIZE_T offset, BOOL fForceFixup /*=FALSE*/)
+{
+ STANDARD_VM_CONTRACT;
+
+ TypeHandle thCopy = *(TypeHandle UNALIGNED*)((BYTE *)p + offset);
+
+ if (!thCopy.IsNull())
+ {
+ if (!fForceFixup &&
+ CanEagerBindToTypeHandle(thCopy) &&
+ CanHardBindToZapModule(thCopy.GetLoaderModule()))
+ {
+ HardBindTypeHandlePointer(p, offset);
+ }
+ else
+ {
+ ZapImport * pImport = m_pZapImage->GetImportTable()->GetClassHandleImport((CORINFO_CLASS_HANDLE)thCopy.AsPtr());
+
+ ZapNode * pBlob = m_pZapImage->GetImportTable()->PlaceImportBlob(pImport);
+ FixupFieldToNode(p, offset, pBlob, 0, IMAGE_REL_BASED_ABSOLUTE_TAGGED);
+ }
+ }
+}
+
+void DataImage::BeginRegion(CorInfoRegionKind regionKind)
+{
+ STANDARD_VM_CONTRACT;
+
+ m_pZapImage->BeginRegion(regionKind);
+}
+
+void DataImage::EndRegion(CorInfoRegionKind regionKind)
+{
+ STANDARD_VM_CONTRACT;
+
+ m_pZapImage->EndRegion(regionKind);
+}
+
+void DataImage::ReportInlining(CORINFO_METHOD_HANDLE inliner, CORINFO_METHOD_HANDLE inlinee)
+{
+ STANDARD_VM_CONTRACT;
+ _ASSERTE(m_inlineTrackingMap);
+ m_inlineTrackingMap->AddInlining(GetMethod(inliner), GetMethod(inlinee));
+}
+
+InlineTrackingMap * DataImage::GetInlineTrackingMap()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_inlineTrackingMap;
+}
+
+//
+// Compressed LookupMap Support
+//
+// See the large comment near the top of ceeload.h for a much more detailed discussion of this.
+//
+// Basically we support a specialized node, ZapCompressedLookupMap, which knows how to compress the array of
+// intra-module pointers present in certain types of LookupMap.
+//
+
+// A simple class to write a sequential sequence of variable sized bit-fields into a pre-allocated buffer. I
+// was going to use the version defined by GcInfoEncoder (the reader side in ceeload.cpp uses GcInfoDecoder's
+// BitStreamReader) but unfortunately the code is not currently factored to make this easy and the resources
+// were not available to perform a non-trivial refactorization of the code. In any event the writer is fairly
+// trivial and doesn't represent a huge duplication of effort.
+// The class requires that the input buffer is DWORD-aligned and sized (it uses a DWORD cache and always
+// writes data to the buffer in DWORD-sized chunks).
+class BitStreamWriter
+{
+public:
+ // Initialize a writer and point it at the start of a pre-allocated buffer (large enough to accomodate all
+ // future writes). The buffer must be DWORD-aligned (we use this for some performance optimization).
+ BitStreamWriter(DWORD *pStart)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // Buffer must be DWORD-aligned.
+ _ASSERTE(((TADDR)pStart & 0x3) == 0);
+
+ m_pNext = pStart; // Point at the start of the buffer
+ m_dwCurrent = 0; // We don't have any cached data waiting to write
+ m_cCurrentBits = 0; // Ditto
+ m_cBitsWritten = 0; // We haven't written any bits
+ }
+
+ // Write the low-order cBits of dwData to the stream.
+ void Write(DWORD dwData, DWORD cBits)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // We can only write between 1 and 32 bits of data at a time.
+ _ASSERTE(cBits > 0 && cBits <= kBitsPerDWORD);
+
+ // Check that none of the unused high-order bits of dwData have stale data in them (we can use this to
+ // optimize paths below). Use two conditions here because << of 32-bits or more (on x86) doesn't
+ // do what you might expect (the RHS is modulo 32 so "<< 32" is a no-op rather than zero-ing the
+ // result).
+ _ASSERTE((cBits == kBitsPerDWORD) || ((dwData & ((1U << cBits) - 1)) == dwData));
+
+ // Record the input bits as written (we can't fail and we have multiple exit paths below so it's
+ // convenient to update our counter here).
+ m_cBitsWritten += cBits;
+
+ // We cache up to a DWORD of data to be written to the stream and only write back to the buffer when
+ // we have a full DWORD. Calculate how many bits of the input we're going to write first (either the
+ // rest of the input or the remaining bits of space in the current DWORD cache, whichever is smaller).
+ DWORD cInitialBits = min(cBits, kBitsPerDWORD - m_cCurrentBits);
+ if (cInitialBits == kBitsPerDWORD)
+ {
+ // Deal with this special case (we're writing all the input, an entire DWORD all at once) since it
+ // ensures that none of the << operations below have to deal with a LHS that == 32 (see the <<
+ // comment in one of the asserts above for why this matters).
+
+ // Because of the calculations above we should only come here if our DWORD cache was empty and the
+ // caller is trying to write a full DWORD (which simplifies many things).
+ _ASSERTE(m_dwCurrent == 0 && m_cCurrentBits == 0 && cBits == kBitsPerDWORD);
+
+ *m_pNext++ = dwData; // Write a full DWORD directly from the input
+
+ // That's it, there's no more data to write and the only state update to the write was advancing
+ // the buffer pointer (cache DWORD is already in the correct state, see asserts above).
+ return;
+ }
+
+ // Calculate a mask of the low-order bits we're going to extract from the input data.
+ DWORD dwInitialMask = (1U << cInitialBits) - 1;
+
+ // OR those bits into the cache (properly shifted to fit above the data already there).
+ m_dwCurrent |= (dwData & dwInitialMask) << m_cCurrentBits;
+
+ // Update the cache bit counter for the new data.
+ m_cCurrentBits += cInitialBits;
+ if (m_cCurrentBits == kBitsPerDWORD)
+ {
+ // The cache filled up. Write the DWORD to the buffer and reset the cache state to empty.
+ *m_pNext++ = m_dwCurrent;
+ m_dwCurrent = 0;
+ m_cCurrentBits = 0;
+ }
+
+ // If the bits we just inserted comprised all the input bits we're done.
+ if (cInitialBits == cBits)
+ return;
+
+ // There's more data to write. But we can only get here if we just flushed the cache. So there is a
+ // whole DWORD free in the cache and we're guaranteed to have less than a DWORD of data left to write.
+ // As a result we can simply populate the low-order bits of the cache with our remaining data (simply
+ // shift down by the number of bits we've already written) and we're done.
+ _ASSERTE(m_dwCurrent == 0 && m_cCurrentBits == 0);
+ m_dwCurrent = dwData >>= cInitialBits;
+ m_cCurrentBits = cBits - cInitialBits;
+ }
+
+ // Because we cache a DWORD of data before writing it it's possible that there are still unwritten bits
+ // left in the cache once you've finished writing data. Call this operation after all Writes() are
+ // completed to flush any such data to memory. It's not legal to call Write() again after a Flush().
+ void Flush()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // Nothing to do if the cache is empty.
+ if (m_cCurrentBits == 0)
+ return;
+
+ // Write what we have to memory (unused high-order bits will be zero).
+ *m_pNext = m_dwCurrent;
+
+ // Catch any attempt to make a further Write() call.
+ m_pNext = NULL;
+ }
+
+ // Get the count of bits written so far (logically, this number does not take caching into account).
+ DWORD GetBitsWritten()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_cBitsWritten;
+ }
+
+private:
+ enum { kBitsPerDWORD = sizeof(DWORD) * 8 };
+
+ DWORD *m_pNext; // Pointer to the next DWORD that will be written in the buffer
+ DWORD m_dwCurrent; // We cache up to a DWORD of data before writing it to the buffer
+ DWORD m_cCurrentBits; // Count of valid (low-order) bits in the buffer above
+ DWORD m_cBitsWritten; // Count of bits given to Write() (ignores caching)
+};
+
+// A specialized node used to write the compressed portions of a LookupMap to an ngen image. This is
+// (optionally) allocated by a call to DataImage::StoreCompressedLayoutMap from LookupMapBase::Save() and
+// handles allocation and initialization of the compressed table and an index used to navigate the table
+// efficiently. The allocation of the map itself and any hot item list is still handled externally but this
+// node will perform any fixups in the base map required to refer to the new compressed data.
+//
+// Since the compression algorithm used depends on the precise values of the RVAs referenced by the LookupMap
+// the compression doesn't happen until ComputeRVA is called (don't call GetSize() until after ComputeRVA()
+// returns). Additionally we must ensure that this node's ComputeRVA() is not called until after that of every
+// node on those RVA it depends. Currently this is ensured by placing this node near the end of the .text
+// section (after pointers to any read-only data structures referenced by LookupMaps and after the .data
+// section containing writeable structures).
+class ZapCompressedLookupMap : public ZapNode
+{
+ DataImage *m_pImage; // Back pointer to the allocating DataImage
+ LookupMapBase *m_pMap; // Back pointer to the LookupMap we're compressing
+ BYTE *m_pTable; // ComputeRVA allocates a compressed table here
+ BYTE *m_pIndex; // ComputeRVA allocates a table index here
+ DWORD m_cbTable; // Size (in bytes) of the table above (after ComputeRVA)
+ DWORD m_cbIndex; // Size (in bytes) of the index above (after ComputeRVA)
+ DWORD m_cBitsPerIndexEntry; // Number of bits in each index entry
+ DWORD m_rgHistogram[kBitsPerRVA]; // Table of frequencies of different delta lengths
+ BYTE m_rgEncodingLengths[kLookupMapLengthEntries]; // Table of different bit lengths value deltas can take
+ BYTE m_eKind; // Item kind (DataImage::ITEM_COMPRESSED_MAP currently)
+
+public:
+ ZapCompressedLookupMap(DataImage *pImage, LookupMapBase *pMap, BYTE eKind)
+ : m_pImage(pImage), m_pMap(pMap), m_eKind(eKind)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ DataImage::ItemKind GetKind()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (DataImage::ItemKind)m_eKind;
+ }
+
+ virtual DWORD GetSize()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (!ShouldCompressedMapBeSaved())
+ return 0;
+
+ // This isn't legal until ComputeRVA() is called. Check this by seeing if the compressed version of
+ // the table is allocated yet.
+ _ASSERTE(m_pTable != NULL);
+ return m_cbIndex + m_cbTable;
+ }
+
+ virtual UINT GetAlignment()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (!ShouldCompressedMapBeSaved())
+ return 1;
+
+ // The table and index have no pointers but do require DWORD alignment.
+ return sizeof(DWORD);
+ }
+
+ virtual ZapNodeType GetType()
+ {
+ STANDARD_VM_CONTRACT;
+
+ return NodeTypeForItemKind(m_eKind);
+ }
+
+ virtual DWORD ComputeRVA(ZapWriter *pZapWriter, DWORD dwPos)
+ {
+ STANDARD_VM_CONTRACT;
+
+ if (ShouldCompressedMapBeSaved())
+ {
+
+ // This is the earliest opportunity at which all data is available in order to compress the table. In
+ // particular all values in the table (currently MethodTable* or MethodDesc*) point to structures
+ // which have been assigned final RVAs in the image. We can thus compute a compressed table value that
+ // relies on the relationship between these RVAs.
+
+ // Phase 1: Look through all the entries in the table. Look at the deltas between RVAs for adjacent
+ // items and build a histogram of how many entries require a specific number to encode their delta
+ // (using a scheme we we discard non-significant low and high-order zero bits). This call will
+ // initialize m_rgHistogram so that entry 0 contains the number of entries that require 1 bit to
+ // encode their delta, entry 1 the count of those that require 2 bits etc. up to the last entry (how
+ // many entries require the full 32 bits). Note that even on 64-bit platforms we only currently
+ // support 32-bit RVAs.
+ DWORD cRids = AnalyzeTable();
+
+ // Phase 2: Given the histogram above, calculate the set of delta lengths for the encoding table
+ // (m_rgEncodingLengths) that will result in optimal table size. We have a fixed size encoding length
+ // so we don't have to embed a large fixed-size length field for every compressed entry but we can
+ // still cope with the relatively rare but ever-present worst case entries which require many bits of
+ // delta entry.
+ OptimizeEncodingLengths();
+
+ // Phase 3: We now have enough data to allocate the final data structures (the compressed table itself
+ // and an index that bookmarks every kLookupMapIndexStride'th entry). Both structures must start
+ // DWORD-aligned and have a DWORD-aligned size (requirements of BitStreamWriter).
+
+ // PredictCompressedSize() returns its result in bits so we must convert (rounding up) to bytes before
+ // DWORD aligning.
+ m_cbTable = AlignUp((PredictCompressedSize(m_rgEncodingLengths) + 7) / 8, sizeof(DWORD));
+
+ // Each index entry contains a bit offset into the compressed stream (so we must size for the worst
+ // case of an offset at the end of the stream) plus an RVA.
+ m_cBitsPerIndexEntry = BitsRequired(m_cbTable * 8) + kBitsPerRVA;
+ _ASSERTE(m_cBitsPerIndexEntry > 0);
+
+ // Our first index entry is for entry 0 (rather than entry kLookupMapIndexStride) so we must be
+ // sure to round up the number of index entries we need in order to cover the table.
+ DWORD cIndexEntries = (cRids + (kLookupMapIndexStride - 1)) / kLookupMapIndexStride;
+
+ // Since we calculate the index size in bits we need to round up to bytes before DWORD aligning.
+ m_cbIndex = AlignUp(((m_cBitsPerIndexEntry * cIndexEntries) + 7) / 8, sizeof(DWORD));
+
+ // Allocate both table and index from a single chunk of memory.
+ BYTE *pMemory = new BYTE[m_cbIndex + m_cbTable];
+ m_pTable = pMemory;
+ m_pIndex = pMemory + m_cbTable;
+
+ // Phase 4: We've now calculated all the input data we need and allocated memory for the output so we
+ // can go ahead and fill in the compressed table and index.
+ InitializeTableAndIndex();
+
+ // Phase 5: Go back up update the saved version of the LookupMap (redirect the table pointer to the
+ // compressed table and fill in the other fields which aren't valid until the table is compressed).
+ LookupMapBase *pSaveMap = (LookupMapBase*)m_pImage->GetImagePointer(m_pMap);
+ pSaveMap->pTable = (TADDR*)m_pTable;
+ pSaveMap->pIndex = m_pIndex;
+ pSaveMap->cIndexEntryBits = m_cBitsPerIndexEntry;
+ pSaveMap->cbTable = m_cbTable;
+ pSaveMap->cbIndex = m_cbIndex;
+ memcpy(pSaveMap->rgEncodingLengths, m_rgEncodingLengths, sizeof(m_rgEncodingLengths));
+
+ // Schedule fixups for the map pointers to the compressed table and index.
+ m_pImage->FixupFieldToNode(m_pMap, offsetof(LookupMapBase, pTable), this, 0);
+ m_pImage->FixupFieldToNode(m_pMap, offsetof(LookupMapBase, pIndex), this, m_cbTable);
+ }
+
+ // We're done with generating the compressed table. Now we need to do the work ComputeRVA() is meant
+ // to do:
+ dwPos = AlignUp(dwPos, GetAlignment()); // Satisfy our alignment requirements
+ SetRVA(dwPos); // Set the RVA of the node (both table and index)
+ dwPos += GetSize(); // Advance the RVA past our node
+
+ return dwPos;
+ }
+
+ virtual void Save(ZapWriter *pZapWriter)
+ {
+ STANDARD_VM_CONTRACT;
+
+ if (!ShouldCompressedMapBeSaved())
+ return;
+
+ // Save both the table and index.
+ pZapWriter->Write(m_pTable, m_cbTable);
+ pZapWriter->Write(m_pIndex, m_cbIndex);
+ }
+
+private:
+
+ // It's possible that our node has been created and only later the decision is made to store the full
+ // uncompressed table. In this case, we want to early out of our work and make saving our node a no-op.
+ BOOL ShouldCompressedMapBeSaved()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // To identify whether compression is desired, use the flag from LookupMapBase::Save
+ return (m_pMap->cIndexEntryBits > 0);
+ }
+
+ // Phase 1: Look through all the entries in the table. Look at the deltas between RVAs for adjacent items
+ // and build a histogram of how many entries require a specific number to encode their delta (using a
+ // scheme we we discard non-significant low and high-order zero bits). This call will initialize
+ // m_rgHistogram so that entry 0 contains the number of entries that require 1 bit to encode their delta,
+ // entry 1 the count of those that require 2 bits etc. up to the last entry (how many entries require the
+ // full 32 bits). Note that even on 64-bit platforms we only currently support 32-bit RVAs.
+ DWORD AnalyzeTable()
+ {
+ STANDARD_VM_CONTRACT;
+
+ LookupMapBase *pMap = m_pMap;
+ DWORD dwLastValue = 0;
+ DWORD cRids = 0;
+
+ // Initialize the histogram to all zeroes.
+ memset(m_rgHistogram, 0, sizeof(m_rgHistogram));
+
+ // Walk each node in the map.
+ while (pMap)
+ {
+ // Walk each entry in this node.
+ for (DWORD i = 0; i < pMap->dwCount; i++)
+ {
+ DWORD dwCurrentValue = ComputeElementRVA(pMap, i);
+
+ // Calculate the delta from the last entry. We split the delta into two-components: a bool
+ // indicating whether the RVA was higher or lower and an absolute (non-negative) size. Sort of
+ // like a ones-complement signed number.
+ bool fIncreasingDelta = dwCurrentValue > dwLastValue;
+ DWORD dwDelta = fIncreasingDelta ? (dwCurrentValue - dwLastValue) : (dwLastValue - dwCurrentValue);
+
+ // Determine the minimum number of bits required to represent the delta (by stripping
+ // non-significant leading zeros) and update the count in the histogram of the number of
+ // deltas that required this many bits. We never encode anything with zero bits (only the
+ // value zero would be eligibil and it's not a common value) so the first histogram entry
+ // records the number of deltas encodable with one bit and so on.
+ m_rgHistogram[BitsRequired(dwDelta) - 1]++;
+
+ dwLastValue = dwCurrentValue;
+ cRids++;
+ }
+
+ pMap = pMap->pNext;
+ }
+
+ return cRids;
+ }
+
+ // Phase 2: Given the histogram above, calculate the set of delta lengths for the encoding table
+ // (m_rgEncodingLengths) that will result in optimal table size. We have a fixed size encoding length so
+ // we don't have to embed a large fixed-size length field for every compressed entry but we can still cope
+ // with the relatively rare but ever-present worst case entries which require many bits of delta entry.
+ void OptimizeEncodingLengths()
+ {
+ STANDARD_VM_CONTRACT;
+
+ // Find the longest delta (search from the large end of the histogram down for the first non-zero
+ // entry).
+ BYTE bMaxBits = 0;
+#ifdef _MSC_VER
+#pragma warning(suppress:6293) // Prefast doesn't understand the unsigned modulo-8 arithmetic below.
+#endif
+ for (BYTE i = kBitsPerRVA - 1; i < 0xff; i--)
+ if (m_rgHistogram[i] > 0)
+ {
+ bMaxBits = i + 1; // +1 because we never encode anything with zero bits.
+ break;
+ }
+ _ASSERTE(bMaxBits >= 1);
+
+ // Now find the smallest delta in a similar fashion.
+ BYTE bMinBits = bMaxBits;
+ for (BYTE i = 0; i < kBitsPerRVA; i++)
+ if (m_rgHistogram[i] > 0)
+ {
+ bMinBits = i + 1; // +1 because we never encode anything with zero bits.
+ break;
+ }
+ _ASSERTE(bMinBits <= bMaxBits);
+
+ // The encoding lengths table is a sorted list of bit field lengths we can use to encode any
+ // entry-to-entry delta in the compressed table. We go through a table so we can use a small number of
+ // bits in the compressed stream (the table index) to express a very flexible range of deltas. The one
+ // entry we know in advance is the largest (the last). That's because we know we have to be able to
+ // encode the largest delta we found in the table or else we couldn't be functionally correct.
+ m_rgEncodingLengths[kLookupMapLengthEntries - 1] = bMaxBits;
+
+ // Now find optimal values for the other entries one by one. It doesn't really matter which order we
+ // do them in. For each entry we'll loop through all the possible encoding lengths, dwMinBits <=
+ // length < dwMaxBits, setting all the uninitialized entries to the candidate value and calculating
+ // the resulting compressed size of the table. We don't enforce that the candidate sizes get smaller
+ // for each entry so in that if the best use of an extra table entry is to add a larger length rather
+ // than a smaller one then we'll take that. The downside is that we have to sort the table before
+ // calculating the table size (the sizing algorithm is only fast for a sorted table). Luckily our
+ // table is very small (currently 4 entries) and we don't have to sort one of the entries (the last is
+ // always largest) so this isn't such a huge deal.
+ for (DWORD i = 0; i < kLookupMapLengthEntries - 1; i++)
+ {
+ DWORD dwBestSize = 0xffffffff; // Best overall table size so far
+ BYTE bBestLength = bMaxBits; // The candidate value that lead to the above
+
+ // Iterate over all the values that could generate a good result (no point trying values smaller
+ // than the smallest delta we have or as large as the maximum table entry we've already fixed).
+ for (BYTE j = bMinBits; j < bMaxBits; j++)
+ {
+ // Build a temporary (unsorted) encoding table.
+ BYTE rgTempBuckets[kLookupMapLengthEntries];
+
+ // Entries before the current one are set to the values we've already determined in previous
+ // iterations.
+ for (DWORD k = 0; k < i; k++)
+ rgTempBuckets[k] = m_rgEncodingLengths[k];
+
+ // The current entry and the remaining uninitialized entries are all set to the current
+ // candidate value (this is logically the equivalent of removing the non-current uninitialized
+ // entries from the table altogether).
+ for (DWORD k = i; k < kLookupMapLengthEntries - 1; k++)
+ rgTempBuckets[k] = j;
+
+ // The last entry is always the maximum bit length.
+ rgTempBuckets[kLookupMapLengthEntries - 1] = bMaxBits;
+
+ // Sort the temporary table so that the call to PredictCompressedSize() below behaves
+ // correctly (and fast).
+ SortLengthBuckets(rgTempBuckets);
+
+ // See what size of table this would generate.
+ DWORD dwTestSize = PredictCompressedSize(rgTempBuckets);
+ if (dwTestSize < dwBestSize)
+ {
+ // The result is better than our current best, remember it.
+ dwBestSize = dwTestSize;
+ bBestLength = j;
+ }
+ }
+
+ // Set the current entry to the best length we found.
+ m_rgEncodingLengths[i] = bBestLength;
+ }
+
+ // We've picked optimal values for all entries, but the result is unsorted. Fix that now.
+ SortLengthBuckets(m_rgEncodingLengths);
+ }
+
+ // Phase 4: We've now calculated all the input data we need and allocated memory for the output so we can
+ // go ahead and fill in the compressed table and index.
+ void InitializeTableAndIndex()
+ {
+ STANDARD_VM_CONTRACT;
+
+ // Initialize bit stream writers to the start of the compressed table and index.
+ BitStreamWriter sTableStream((DWORD*)m_pTable);
+ BitStreamWriter sIndexStream((DWORD*)m_pIndex);
+
+ DWORD dwRid = 0;
+ DWORD dwLastValue = 0;
+ LookupMapBase *pMap = m_pMap;
+
+ // Walk each node in the map.
+ while (pMap)
+ {
+ // Walk each entry in this node.
+ for (DWORD i = 0; i < pMap->dwCount; i++)
+ {
+ DWORD dwCurrentValue = ComputeElementRVA(pMap, i);
+
+ // Calculate the delta from the last entry. We split the delta into two-components: a bool
+ // indicating whether the RVA was higher or lower and an absolute (non-negative) size. Sort of
+ // like a ones-complement signed number.
+ bool fIncreasingDelta = dwCurrentValue > dwLastValue;
+ DWORD dwDelta = fIncreasingDelta ? (dwCurrentValue - dwLastValue) : (dwLastValue - dwCurrentValue);
+
+ // As a trade-off we can't store deltas with their most efficient length (because just
+ // encoding the length can dominate the space requirement when we have to cope with worst-case
+ // deltas). Instead we encode a relatively short index into the table of encoding lengths we
+ // calculated back in phase 2. So some deltas will encode in more bits than necessary but
+ // overall we'll win due to lowered prefix bit requirements.
+ // Look through all the table entries and choose the first that's large enough to accomodate
+ // our delta.
+ DWORD dwDeltaBitLength = BitsRequired(dwDelta);
+ DWORD j;
+ for (j = 0; j < kLookupMapLengthEntries; j++)
+ {
+ if (m_rgEncodingLengths[j] >= dwDeltaBitLength)
+ {
+ dwDeltaBitLength = m_rgEncodingLengths[j];
+ break;
+ }
+ }
+ _ASSERTE(j < kLookupMapLengthEntries);
+
+ // Write the entry into the compressed table.
+ sTableStream.Write(j, kLookupMapLengthBits); // The index for the delta length
+ sTableStream.Write(fIncreasingDelta ? 1 : 0, 1); // The +/- delta indicator
+ sTableStream.Write(dwDelta, dwDeltaBitLength); // The delta itself
+
+ // Is this entry one that requires a corresponding index entry?
+ if ((dwRid % kLookupMapIndexStride) == 0)
+ {
+ // Write an index entry:
+ // * The current (map-relative) RVA.
+ // * The position in the table bit stream of the next entry.
+ sIndexStream.Write(dwCurrentValue, kBitsPerRVA);
+ sIndexStream.Write(sTableStream.GetBitsWritten(), m_cBitsPerIndexEntry - kBitsPerRVA);
+ }
+
+ dwRid++;
+
+ dwLastValue = dwCurrentValue;
+ }
+
+ pMap = pMap->pNext;
+ }
+
+ // Flush any remaining bits in the caches of the table and index stream writers.
+ sTableStream.Flush();
+ sIndexStream.Flush();
+
+ // Make sure what we wrote fitted in what we allocated.
+ _ASSERTE((sTableStream.GetBitsWritten() / 8) <= m_cbTable);
+ _ASSERTE((sIndexStream.GetBitsWritten() / 8) <= m_cbIndex);
+
+ // Also check that we didn't have more than 31 bits of excess space allocated either (we should have
+ // allocated DWORD aligned lengths).
+ _ASSERTE(((m_cbTable * 8) - sTableStream.GetBitsWritten()) < 32);
+ _ASSERTE(((m_cbIndex * 8) - sIndexStream.GetBitsWritten()) < 32);
+ }
+
+ // Determine the final, map-relative RVA of the element at a specified index
+ DWORD ComputeElementRVA(LookupMapBase *pMap, DWORD index)
+ {
+ STANDARD_VM_CONTRACT;
+
+ // We base our RVAs on the RVA of the map (rather than the module). This is purely because individual
+ // maps don't store back pointers to their owning module so it's easier to recover pointer values at
+ // runtime using the map address instead.
+ DWORD rvaBase = m_pImage->GetRVA(m_pMap);
+
+ // Retrieve the pointer value in the specified entry. This is tricky since the pointer is
+ // encoded as a RelativePointer.
+ DWORD dwFinalRVA;
+ TADDR entry = RelativePointer<TADDR>::GetValueMaybeNullAtPtr((TADDR)&pMap->pTable[index]);
+ if (entry == 0)
+ {
+ // The pointer was null. We encode this as a zero RVA (RVA pointing to the map itself,
+ // which should never happen otherwise).
+ dwFinalRVA = 0;
+ }
+ else
+ {
+ // Non-null pointer, go get the RVA it's been mapped to. Transform this RVA into our
+ // special map-relative variant by substracting the map base.
+
+ // Some of the pointer alignment bits may have been used as flags; preserve them.
+ DWORD flags = entry & ((1 << kFlagBits) - 1);
+ entry -= flags;
+
+ // We only support compressing maps of pointers to saved objects (e.g. no indirected FixupPointers)
+ // so there is guaranteed to be a valid RVA at this point. If this does not hold, GetRVA will assert.
+ DWORD rvaEntry = m_pImage->GetRVA((void*)entry);
+
+ dwFinalRVA = rvaEntry - rvaBase + flags;
+ }
+
+ return dwFinalRVA;
+ }
+
+ // Determine the number of bits required to represent the significant portion of a value (i.e. the value
+ // without any leading 0s). Always return 1 as a minimum (we do not encode 0 in 0 bits).
+ DWORD BitsRequired(DWORD dwValue)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+#if (defined(_TARGET_X86_) || defined(_TARGET_AMD64_)) && defined(_MSC_VER)
+
+ // This this operation could impact the performance of ngen (we call this a *lot*) we'll try and
+ // optimize this where we can. x86 and amd64 actually have instructions to find the least and most
+ // significant bits in a DWORD and MSVC exposes this as a builtin.
+ DWORD dwHighBit;
+ if (_BitScanReverse(&dwHighBit, dwValue))
+ return dwHighBit + 1;
+ else
+ return 1;
+
+#else // (_TARGET_X86_ || _TARGET_AMD64_) && _MSC_VER
+
+ // Otherwise we'll calculate this the slow way. Pick off the 32-bit case first due to avoid the
+ // usual << problem (x << 32 == x, not 0).
+ if (dwValue > 0x7fffffff)
+ return 32;
+
+ DWORD cBits = 1;
+ while (dwValue > ((1U << cBits) - 1))
+ cBits++;
+
+ return cBits;
+
+#endif // (_TARGET_X86_ || _TARGET_AMD64_) && _MSC_VER
+ }
+
+ // Sort the given input array (of kLookupMapLengthEntries entries, where the last entry is already sorted)
+ // from lowest to highest value.
+ void SortLengthBuckets(BYTE rgBuckets[])
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // This simplistic insertion sort algorithm is probably the fastest for small values of
+ // kLookupMapLengthEntries.
+ _ASSERTE(kLookupMapLengthEntries < 10);
+
+ // Iterate over every entry apart from the last two, moving the correct sorted value into each in
+ // turn. Don't do the last value because it's already sorted and the second last because it'll be
+ // sorted by the time we've done all the rest.
+ for (DWORD i = 0; i < (kLookupMapLengthEntries - 2); i++)
+ {
+ BYTE bLowValue = rgBuckets[i]; // The lowest value we've seen so far
+ DWORD dwLowIndex = i; // The index which held that value
+
+ // Look through the unsorted entries for the smallest.
+ for (DWORD j = i + 1; j < (kLookupMapLengthEntries - 1); j++)
+ {
+ if (rgBuckets[j] < bLowValue)
+ {
+ // Got a bette candidate for smallest.
+ bLowValue = rgBuckets[j];
+ dwLowIndex = j;
+ }
+ }
+
+ // If the original value at the current index wasn't the smallest, swap it with the one that was.
+ if (dwLowIndex != i)
+ {
+ rgBuckets[dwLowIndex] = rgBuckets[i];
+ rgBuckets[i] = bLowValue;
+ }
+ }
+
+#ifdef _DEBUG
+ // Check the table really is sorted.
+ for (DWORD i = 1; i < kLookupMapLengthEntries; i++)
+ _ASSERTE(rgBuckets[i] >= rgBuckets[i - 1]);
+#endif // _DEBUG
+ }
+
+ // Given the histogram of the delta lengths and a prospective table of the subset of those lengths that
+ // we'd utilize to encode the table, return the size (in bits) of the compressed table we'd get as a
+ // result. The algorithm requires that the encoding length table is sorted (smallest to largest length).
+ DWORD PredictCompressedSize(BYTE rgBuckets[])
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ DWORD cTotalBits = 0;
+
+ // Iterate over each entry in the histogram (first entry is the number of deltas that can be encoded
+ // in 1 bit, the second is the number of entries encodable in 2 bits etc.).
+ for (DWORD i = 0; i < kBitsPerRVA; i++)
+ {
+ // Start by assuming that we can encode entries in this bucket with their exact length.
+ DWORD cBits = i + 1;
+
+ // Look through the encoding table to find the first (lowest) encoding length that can encode the
+ // values for this bucket.
+ for (DWORD j = 0; j < kLookupMapLengthEntries; j++)
+ {
+ if (cBits <= rgBuckets[j])
+ {
+ // This is the best encoding we can do. Remember the real cost of all entries in this
+ // histogram bucket.
+ cBits = rgBuckets[j];
+ break;
+ }
+ }
+
+ // Each entry for this histogram bucket costs a fixed size index into the encoding length table
+ // (kLookupMapLengthBits), a single bit of delta sign plus the number of bits of delta magnitude
+ // that we calculated above.
+ cTotalBits += (kLookupMapLengthBits + 1 + cBits) * m_rgHistogram[i];
+ }
+
+ return cTotalBits;
+ }
+};
+
+// Allocate a special zap node that will compress the cold rid map associated with the given LookupMap.
+void DataImage::StoreCompressedLayoutMap(LookupMapBase *pMap, ItemKind kind)
+{
+ STANDARD_VM_CONTRACT;
+
+ ZapNode *pNode = new (GetHeap()) ZapCompressedLookupMap(this, pMap, static_cast<BYTE>(kind));
+
+ AddStructureInOrder(pNode);
+}
+
+#endif // !BINDER
+#endif // FEATURE_PREJIT
diff --git a/src/vm/dataimage.h b/src/vm/dataimage.h
new file mode 100644
index 0000000000..1921a268f4
--- /dev/null
+++ b/src/vm/dataimage.h
@@ -0,0 +1,464 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#ifndef _DATAIMAGE_H_
+#define _DATAIMAGE_H_
+
+#if defined(FEATURE_PREJIT) && !defined(DACCESS_COMPILE)
+
+// All we really need is to pre-declare the PrecodeType enum, but g++ doesn't
+// support enum pre-declaration, so we need to include the declaration itself.
+/*#include "cgensys.h" // needed to include precode.h*/
+#include "precode.h"
+
+typedef BYTE ZapRelocationType; // IMAGE_REL_XXX enum
+
+// IMAGE_REL_BASED_PTR is architecture specific reloc of virtual address
+#ifdef _WIN64
+#define IMAGE_REL_BASED_PTR IMAGE_REL_BASED_DIR64
+#else
+#define IMAGE_REL_BASED_PTR IMAGE_REL_BASED_HIGHLOW
+#endif
+
+// Special NGEN-specific relocation type for relative pointer (used to make NGen relocation section smaller)
+#define IMAGE_REL_BASED_RELPTR 0x7D
+
+class CEEPreloader;
+
+class ZapImage;
+class TypeHandleList;
+
+class ZapNode;
+class ZapStoredStructure;
+
+class ZapHeap;
+void *operator new(size_t size, ZapHeap * pZapHeap);
+void *operator new[](size_t size, ZapHeap * pZapHeap);
+
+class InternedStructureTraits;
+typedef SHash<InternedStructureTraits> InternedStructureHashTable;
+
+struct LookupMapBase;
+class InlineTrackingMap;
+
+class DataImage
+{
+public:
+ //
+ // As items are recorded for saving we note some information about the item
+ // to help guide later heuristics.
+ //
+ enum ItemKind
+ {
+ #define DEFINE_ITEM_KIND(id) id,
+ #include "dataimagesection.h"
+
+ ITEM_COUNT,
+ };
+
+ Module *m_module;
+ CEEPreloader *m_preloader;
+ ZapImage * m_pZapImage;
+
+ struct StructureEntry
+ {
+ const void * ptr;
+ ZapNode * pNode;
+ SSIZE_T offset;
+ };
+
+ class StructureTraits : public NoRemoveSHashTraits< DefaultSHashTraits<StructureEntry> >
+ {
+ public:
+ typedef const void * key_t;
+
+ static key_t GetKey(element_t e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return e.ptr;
+ }
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (k1 == k2);
+ }
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (count_t)(size_t)k;
+ }
+
+ static const element_t Null() { LIMITED_METHOD_CONTRACT; StructureEntry e; e.ptr = NULL; return e; }
+ static bool IsNull(const element_t &e) { LIMITED_METHOD_CONTRACT; return e.ptr == NULL; }
+ };
+ typedef SHash<StructureTraits> StructureHashTable;
+
+ StructureHashTable m_structures;
+ const StructureEntry * m_pLastLookup; // Cached result of last lookup
+
+ #define MAINTAIN_SAVE_ORDER (0xFFFFFFFF)
+
+ struct SavedNodeEntry
+ {
+ ZapNode * pNode;
+ DWORD dwAssociatedOrder;
+ };
+
+ // These are added in save order, however after PlaceRemainingStructures they may have been
+ // rearranged based on the class layout order stored in the dwAssociatedOrder field.
+ SArray<SavedNodeEntry> m_structuresInOrder;
+
+ void AddStructureInOrder(ZapNode *pNode, BOOL fMaintainSaveOrder = FALSE);
+
+ struct FixupEntry
+ {
+ ZapRelocationType m_type;
+ DWORD m_offset;
+#ifdef _DEBUG
+ DWORD m_ordinal;
+#endif // _DEBUG
+
+ ZapStoredStructure * m_pLocation;
+ ZapNode * m_pTargetNode;
+ };
+
+ SArray<FixupEntry> m_Fixups;
+ COUNT_T m_iCurrentFixup;
+
+ void AppendFixup(FixupEntry entry)
+ {
+#ifdef _DEBUG
+ static DWORD s_ordinal = 1;
+ entry.m_ordinal = s_ordinal++;
+#endif // _DEBUG
+ m_Fixups.Append(entry);
+ }
+
+ static int __cdecl fixupEntryCmp(const void* a_, const void* b_);
+
+ void FixupSectionRange(SIZE_T offset, ZapNode * pNode);
+ void FixupSectionPtr(SIZE_T offset, ZapNode * pNode);
+ void FixupJumpStubPtr(SIZE_T offset, CorInfoHelpFunc ftnNum);
+
+ void FixupModuleRVAs();
+
+ InternedStructureHashTable * m_pInternedStructures;
+ SetSHash<ZapNode *> m_reusedStructures;
+
+ struct RvaInfoStructure
+ {
+ FieldDesc * pFD;
+ DWORD rva;
+ UINT size;
+ UINT align;
+ };
+
+ SArray<RvaInfoStructure> m_rvaInfoVector;
+
+ static int __cdecl rvaInfoVectorEntryCmp(const void* a_, const void* b_);
+
+ MapSHash<PVOID,PVOID> m_surrogates;
+
+ // Often set while a class is being saved in order to associate
+ // stored structures with the class, and therefore its layout order.
+ // Note that it is a best guess and not always set.
+ MethodTable * m_pCurrentAssociatedMethodTable;
+
+ struct MethodProfilingData
+ {
+ MethodDesc *pMD;
+ DWORD flags;
+ };
+
+ class MethodProfilingDataTraits : public NoRemoveSHashTraits< DefaultSHashTraits<MethodProfilingData> >
+ {
+ public:
+ typedef const MethodDesc * key_t;
+
+ static key_t GetKey(element_t e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return e.pMD;
+ }
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (k1 == k2);
+ }
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (count_t)(size_t)k;
+ }
+
+ static const element_t Null() { LIMITED_METHOD_CONTRACT; MethodProfilingData e; e.pMD = NULL; e.flags = 0; return e; }
+ static bool IsNull(const element_t &e) { LIMITED_METHOD_CONTRACT; return e.pMD == NULL; }
+ };
+ typedef SHash<MethodProfilingDataTraits> MethodProfilingDataHashTable;
+
+ MethodProfilingDataHashTable m_methodProfilingData;
+
+ // This is a hashmap from inlinee method to an array of inliner methods
+ // So it can answer question: "where did this method get inlined ?"
+ InlineTrackingMap *m_inlineTrackingMap;
+
+ public:
+#ifndef CLR_STANDALONE_BINDER
+ DataImage(Module *module, CEEPreloader *preloader);
+#else
+ DataImage(Module *module, ZapImage *pZapImage);
+#endif
+ ~DataImage();
+
+ void Preallocate();
+
+ void PreSave();
+ void PostSave();
+
+ Module *GetModule() { LIMITED_METHOD_CONTRACT; return m_module; }
+
+ DWORD GetMethodProfilingFlags(MethodDesc * pMD);
+ void SetMethodProfilingFlags(MethodDesc * pMD, DWORD flags);
+
+ CEEPreloader *GetPreloader() { LIMITED_METHOD_CONTRACT; return m_preloader; }
+
+ ZapHeap * GetHeap();
+
+ //
+ // Data is stored in the image store in three phases.
+ //
+
+ //
+ // In the first phase, all objects are assigned locations in the
+ // data store. This is done by calling StoreStructure on all
+ // structures which are being stored into the image.
+ //
+ // This would typically done by methods on the objects themselves,
+ // each of which stores itself and any objects it references.
+ // Reference loops must be explicitly tested for using IsStored.
+ // (Each structure can be stored only once.)
+ //
+ // Note that StoreStructure makes no guarantees about layout order.
+ // If you want structures of a particular kind to be laid out in
+ // the order they are saved, use StoreStructureInOrder.
+ //
+
+ inline ZapStoredStructure * StoreStructure(const void *data, SIZE_T size,
+ ItemKind kind,
+ int align = sizeof(TADDR))
+ {
+ return StoreStructureHelper(data, size, kind, align, FALSE);
+ }
+
+ inline ZapStoredStructure * StoreStructureInOrder(const void *data, SIZE_T size,
+ ItemKind kind,
+ int align = sizeof(TADDR))
+ {
+ return StoreStructureHelper(data, size, kind, align, TRUE);
+ }
+
+ ZapStoredStructure * StoreStructureHelper(const void *data, SIZE_T size,
+ ItemKind kind,
+ int align,
+ BOOL fMaintainSaveOrder);
+
+ // Often set while a class is being saved in order to associate
+ // stored structures with the class, and therefore its layout order.
+ // Note that it is a best guess and not always set.
+ inline void BeginAssociatingStoredObjectsWithMethodTable(MethodTable *pMT)
+ {
+ m_pCurrentAssociatedMethodTable = pMT;
+ }
+
+ inline void EndAssociatingStoredObjectsWithMethodTable()
+ {
+ m_pCurrentAssociatedMethodTable = NULL;
+ }
+
+ // Bind pointer to the relative offset in ZapNode
+ void BindPointer(const void *p, ZapNode * pNode, SSIZE_T offset);
+
+ void BindPointer(const void *p, ZapStoredStructure * pNode, SSIZE_T offset)
+ {
+ BindPointer(p, (ZapNode *)pNode, offset);
+ }
+
+ void CopyData(ZapStoredStructure * pNode, const void * p, ULONG size);
+ void CopyDataToOffset(ZapStoredStructure * pNode, ULONG offset, const void * p, ULONG size);
+
+ //
+ // In the second phase, data is arranged in the image by successive calls
+ // to PlaceMappedRange. Items are arranged using pointers to data structures in the
+ // original heap, or by giving a StoredStructure along with the original
+ // mapping.
+ //
+
+ // Concrete mapped ranges are the ones that actually correspond to allocations
+ // of new space within the image. They should be placed first. We do not
+ // necessarily populate the space in the image (i.e. copy the data to the image)
+ // from the concrete range: for example the space associated with a
+ // combo structure gets filled by copying the data from the individual items
+ // that make up the parts of the combo structure.
+ //
+ // These can tolerate placing the same item multiple times
+ // PlaceInternedStructureForAddress allows a different section to be used depending on
+ // whether an interned structure actually had duplicates in this image.
+ //
+ void PlaceStructureForAddress(const void * data, CorCompileSection section);
+ void PlaceInternedStructureForAddress(const void * data, CorCompileSection sectionIfReused, CorCompileSection sectionIfSingleton);
+
+ void FixupPointerField(PVOID p, SSIZE_T offset);
+ void FixupRelativePointerField(PVOID p, SSIZE_T offset);
+
+ void FixupField(PVOID p, SSIZE_T offset, PVOID pTarget, SSIZE_T targetOffset = 0, ZapRelocationType type = IMAGE_REL_BASED_PTR);
+
+ void FixupFieldToNode(PVOID p, SSIZE_T offset, ZapNode * pTarget, SSIZE_T targetOffset = 0, ZapRelocationType type = IMAGE_REL_BASED_PTR);
+
+ void FixupFieldToNode(PVOID p, SSIZE_T offset, ZapStoredStructure * pTarget, SSIZE_T targetOffset = 0, ZapRelocationType type = IMAGE_REL_BASED_PTR)
+ {
+ return FixupFieldToNode(p, offset, (ZapNode *)pTarget, targetOffset, type);
+ }
+
+ BOOL IsStored(const void *data)
+ { WRAPPER_NO_CONTRACT; return m_structures.LookupPtr(data) != NULL; }
+
+ DWORD GetRVA(const void *data);
+
+ void ZeroField(PVOID p, SSIZE_T offset, SIZE_T size);
+ void *GetImagePointer(ZapStoredStructure * pNode);
+ void *GetImagePointer(PVOID p, SSIZE_T offset = 0);
+ ZapNode * GetNodeForStructure(PVOID p, SSIZE_T * pOffset);
+
+ void ZeroPointerField(PVOID p, SSIZE_T offset)
+ { WRAPPER_NO_CONTRACT; ZeroField(p, offset, sizeof(void*)); }
+
+
+ ZapStoredStructure * StoreInternedStructure(const void *data, ULONG size,
+ ItemKind kind,
+ int align = sizeof(TADDR));
+
+ void NoteReusedStructure(const void *data);
+
+ void StoreRvaInfo(FieldDesc * pFD,
+ DWORD rva,
+ UINT size,
+ UINT align);
+
+ void SaveRvaStructure();
+ void FixupRvaStructure();
+
+ // Surrogates are used to reorganize the data before they are saved. RegisterSurrogate and LookupSurrogate
+ // maintains mapping from the original data to the reorganized data.
+ void RegisterSurrogate(PVOID ptr, PVOID surrogate);
+ PVOID LookupSurrogate(PVOID ptr);
+
+ void PlaceRemainingStructures();
+
+ void FixupRVAs();
+
+ void SetRVAsForFields(IMetaDataEmit * pEmit);
+
+ // Called when data contains a function address. The data store
+ // can return a fixed compiled code address if it is compiling
+ // code for the module.
+ ZapNode * GetCodeAddress(MethodDesc * method);
+
+ // Returns TRUE if the method can be called directly without going through prestub
+ BOOL CanDirectCall(MethodDesc * method, CORINFO_ACCESS_FLAGS accessFlags = CORINFO_ACCESS_ANY);
+
+ // Returns the method fixup info if it has one, NULL if method has no fixup info
+ ZapNode * GetFixupList(MethodDesc * method);
+
+ ZapNode * GetHelperThunk(CorInfoHelpFunc ftnNum);
+
+ // pUniqueId is used to allocate unique cells for cases where we cannot use the shared cell.
+ ZapNode * GetTypeHandleImport(TypeHandle th, PVOID pUniqueId = NULL);
+ ZapNode * GetMethodHandleImport(MethodDesc * pMD);
+ ZapNode * GetFieldHandleImport(FieldDesc * pFD);
+ ZapNode * GetModuleHandleImport(Module * pModule);
+ DWORD GetModuleImportIndex(Module * pModule);
+
+ ZapNode * GetExistingTypeHandleImport(TypeHandle th);
+ ZapNode * GetExistingMethodHandleImport(MethodDesc * pMD);
+ ZapNode * GetExistingFieldHandleImport(FieldDesc * pFD);
+
+ ZapNode * GetVirtualImportThunk(MethodTable * pMT, MethodDesc * pMD, int slotNumber);
+
+ ZapNode * GetGenericSignature(PVOID signature, BOOL fMethod);
+
+ void SavePrecode(PVOID ptr, MethodDesc * pMD, PrecodeType t, ItemKind kind, BOOL fIsPrebound = FALSE);
+
+ void StoreCompressedLayoutMap(LookupMapBase *pMap, ItemKind kind);
+
+ // "Fixup" here means "save the pointer either as a poiter or indirection"
+ void FixupModulePointer(Module * pModule, PVOID p, SSIZE_T offset, ZapRelocationType type);
+ void FixupMethodTablePointer(MethodTable * pMT, PVOID p, SSIZE_T offset, ZapRelocationType type);
+ void FixupTypeHandlePointer(TypeHandle th, PVOID p, SSIZE_T offset, ZapRelocationType type);
+ void FixupMethodDescPointer(MethodDesc * pMD, PVOID p, SSIZE_T offset, ZapRelocationType type);
+ void FixupFieldDescPointer(FieldDesc * pFD, PVOID p, SSIZE_T offset, ZapRelocationType type);
+
+#ifndef CLR_STANDALONE_BINDER
+ void FixupModulePointer(PVOID p, FixupPointer<PTR_Module> * ppModule);
+#else
+ void FixupModulePointer(PVOID p, FixupPointer<PTR_ClrModule> * ppModule);
+#endif
+ void FixupMethodTablePointer(PVOID p, FixupPointer<PTR_MethodTable> * ppMT);
+ void FixupTypeHandlePointer(PVOID p, FixupPointer<TypeHandle> * pth);
+ void FixupMethodDescPointer(PVOID p, FixupPointer<PTR_MethodDesc> * ppMD);
+ void FixupFieldDescPointer(PVOID p, FixupPointer<PTR_FieldDesc> * ppFD);
+
+#ifndef CLR_STANDALONE_BINDER
+ void FixupModulePointer(PVOID p, RelativeFixupPointer<PTR_Module> * ppModule);
+#else
+ void FixupModulePointer(PVOID p, RelativeFixupPointer<PTR_ClrModule> * ppModule);
+#endif
+ void FixupMethodTablePointer(PVOID p, RelativeFixupPointer<PTR_MethodTable> * ppMT);
+ void FixupTypeHandlePointer(PVOID p, RelativeFixupPointer<TypeHandle> * pth);
+ void FixupMethodDescPointer(PVOID p, RelativeFixupPointer<PTR_MethodDesc> * ppMD);
+ void FixupFieldDescPointer(PVOID p, RelativeFixupPointer<PTR_FieldDesc> * ppFD);
+
+ // "HardBind" here means "save a reference using a (relocatable) pointer,
+ // where the object we're referring to lives either in an external hard-bound DLL
+ // or in the image currently being saved"
+ //
+ BOOL CanHardBindToZapModule(Module *targetModule);
+
+ void ReportInlining(CORINFO_METHOD_HANDLE inliner, CORINFO_METHOD_HANDLE inlinee);
+ InlineTrackingMap *GetInlineTrackingMap();
+
+private:
+ BOOL CanEagerBindTo(Module *targetModule, Module *pPreferredZapModule, void *address);
+
+public:
+ // "EagerBind" here means "save a reference using pointer in the image currently being saved
+ // or indirection cell refering to to external DLL
+ BOOL CanEagerBindToTypeHandle(TypeHandle th, BOOL fRequirePrerestore = FALSE, TypeHandleList *pVisited = NULL);
+ BOOL CanEagerBindToMethodTable(MethodTable *pMT, BOOL fRequirePrerestore = FALSE, TypeHandleList *pVisited = NULL);
+ BOOL CanEagerBindToMethodDesc(MethodDesc *pMD, BOOL fRequirePrerestore = FALSE, TypeHandleList *pVisited = NULL);
+ BOOL CanEagerBindToFieldDesc(FieldDesc *pFD, BOOL fRequirePrerestore = FALSE, TypeHandleList *pVisited = NULL);
+ BOOL CanEagerBindToModule(Module *pModule);
+
+ // These also check that the target object doesn't need a restore action
+ // upon reload.
+ BOOL CanPrerestoreEagerBindToTypeHandle(TypeHandle th, TypeHandleList *pVisited);
+ BOOL CanPrerestoreEagerBindToMethodTable(MethodTable *pMT, TypeHandleList *pVisited);
+ BOOL CanPrerestoreEagerBindToMethodDesc(MethodDesc *pMD, TypeHandleList *pVisited);
+
+ void HardBindTypeHandlePointer(PVOID p, SSIZE_T offset);
+
+ // This is obsolete in-place fixup that we should get rid of. For now, it is used for:
+ // - FnPtrTypeDescs. These should not be stored in NGen images at all.
+ // - stubs-as-il signatures. These should use tokens when stored in NGen image.
+ void FixupTypeHandlePointerInPlace(PVOID p, SSIZE_T offset, BOOL fForceFixup = FALSE);
+
+ void BeginRegion(CorInfoRegionKind regionKind);
+ void EndRegion(CorInfoRegionKind regionKind);
+};
+
+#endif // FEATURE_PREJIT && !DACCESS_COMPILE
+
+#endif // _DATAIMAGE_H_
diff --git a/src/vm/dataimagesection.h b/src/vm/dataimagesection.h
new file mode 100644
index 0000000000..8bcb3570e7
--- /dev/null
+++ b/src/vm/dataimagesection.h
@@ -0,0 +1,105 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#ifndef FEATURE_PREJIT
+#error FEATURE_PREJIT is required for this file
+#endif
+
+
+#ifndef DEFINE_ITEM_KIND
+#define DEFINE_ITEM_KIND(id)
+#endif
+
+//-----------------------------------------------------------------------------
+// Items
+//-----------------------------------------------------------------------------
+
+DEFINE_ITEM_KIND(ITEM_MODULE)
+DEFINE_ITEM_KIND(ITEM_FILEREF_MAP)
+DEFINE_ITEM_KIND(ITEM_ASSEMREF_MAP)
+DEFINE_ITEM_KIND(ITEM_GC_STATIC_HANDLES_HOT)
+DEFINE_ITEM_KIND(ITEM_DYNAMIC_STATICS_INFO_TABLE)
+DEFINE_ITEM_KIND(ITEM_DYNAMIC_STATICS_INFO_ENTRY)
+DEFINE_ITEM_KIND(ITEM_RID_MAP_HOT)
+DEFINE_ITEM_KIND(ITEM_TYPEDEF_MAP)
+DEFINE_ITEM_KIND(ITEM_MODULE_CCTOR_INFO_HOT)
+DEFINE_ITEM_KIND(ITEM_MODULE_CCTOR_INFO_COLD)
+DEFINE_ITEM_KIND(ITEM_STORED_METHOD_SIG)
+DEFINE_ITEM_KIND(ITEM_STORED_METHOD_SIG_READONLY)
+DEFINE_ITEM_KIND(ITEM_STORED_METHOD_SIG_READONLY_WARM)
+DEFINE_ITEM_KIND(ITEM_STORED_METHOD_NAME)
+DEFINE_ITEM_KIND(ITEM_PROPERTY_NAME_SET)
+DEFINE_ITEM_KIND(ITEM_BINDER)
+DEFINE_ITEM_KIND(ITEM_BINDER_ITEMS)
+DEFINE_ITEM_KIND(ITEM_TYPEREF_MAP)
+DEFINE_ITEM_KIND(ITEM_METHODDEF_MAP)
+DEFINE_ITEM_KIND(ITEM_FIELDDEF_MAP)
+DEFINE_ITEM_KIND(ITEM_MEMBERREF_MAP)
+DEFINE_ITEM_KIND(ITEM_GENERICPARAM_MAP)
+DEFINE_ITEM_KIND(ITEM_GENERICTYPEDEF_MAP)
+DEFINE_ITEM_KIND(ITEM_PROPERTYINFO_MAP)
+DEFINE_ITEM_KIND(ITEM_DISPATCH_MAP)
+DEFINE_ITEM_KIND(ITEM_PARAM_TYPEDESC)
+DEFINE_ITEM_KIND(ITEM_ARRAY_TYPEDESC)
+#ifdef FEATURE_COMINTEROP
+DEFINE_ITEM_KIND(ITEM_SPARSE_VTABLE_MAP_TABLE)
+DEFINE_ITEM_KIND(ITEM_SPARSE_VTABLE_MAP_ENTRIES)
+#endif // FEATURE_COMINTEROP
+DEFINE_ITEM_KIND(ITEM_EECLASS)
+DEFINE_ITEM_KIND(ITEM_EECLASS_COLD)
+DEFINE_ITEM_KIND(ITEM_EECLASS_WARM)
+DEFINE_ITEM_KIND(ITEM_CLASS_VARIANCE_INFO)
+DEFINE_ITEM_KIND(ITEM_FIELD_DESC_LIST)
+DEFINE_ITEM_KIND(ITEM_FIELD_MARSHALERS)
+DEFINE_ITEM_KIND(ITEM_ENUM_VALUES)
+DEFINE_ITEM_KIND(ITEM_ENUM_NAME_POINTERS)
+DEFINE_ITEM_KIND(ITEM_ENUM_NAME)
+DEFINE_ITEM_KIND(ITEM_DICTIONARY_LAYOUT)
+DEFINE_ITEM_KIND(ITEM_TYVAR_TYPEDESC)
+DEFINE_ITEM_KIND(ITEM_FPTR_TYPEDESC)
+DEFINE_ITEM_KIND(ITEM_DICTIONARY)
+DEFINE_ITEM_KIND(ITEM_DICTIONARY_WRITEABLE)
+DEFINE_ITEM_KIND(ITEM_METHOD_TABLE)
+DEFINE_ITEM_KIND(ITEM_METHOD_TABLE_SPECIAL_WRITEABLE)
+DEFINE_ITEM_KIND(ITEM_METHOD_TABLE_DATA_HOT_WRITEABLE)
+DEFINE_ITEM_KIND(ITEM_METHOD_TABLE_DATA_COLD_WRITEABLE)
+DEFINE_ITEM_KIND(ITEM_INTERFACE_MAP)
+DEFINE_ITEM_KIND(ITEM_VTABLE_CHUNK)
+DEFINE_ITEM_KIND(ITEM_GUID_INFO)
+DEFINE_ITEM_KIND(ITEM_GENERICS_STATIC_FIELDDESCS)
+DEFINE_ITEM_KIND(ITEM_RVA_STATICS)
+DEFINE_ITEM_KIND(ITEM_DEBUG)
+DEFINE_ITEM_KIND(ITEM_GC_STATIC_HANDLES_COLD)
+DEFINE_ITEM_KIND(ITEM_METHOD_PRECODE_COLD_WRITEABLE)
+DEFINE_ITEM_KIND(ITEM_METHOD_PRECODE_COLD)
+DEFINE_ITEM_KIND(ITEM_METHOD_PRECODE_HOT_WRITEABLE)
+DEFINE_ITEM_KIND(ITEM_METHOD_PRECODE_HOT)
+DEFINE_ITEM_KIND(ITEM_METHOD_DESC_COLD_WRITEABLE)
+DEFINE_ITEM_KIND(ITEM_METHOD_DESC_COLD)
+DEFINE_ITEM_KIND(ITEM_METHOD_DESC_HOT_WRITEABLE)
+DEFINE_ITEM_KIND(ITEM_METHOD_DESC_HOT)
+DEFINE_ITEM_KIND(ITEM_CROSS_DOMAIN_INFO)
+DEFINE_ITEM_KIND(ITEM_CER_ROOT_TABLE)
+DEFINE_ITEM_KIND(ITEM_CER_METHOD_LIST)
+DEFINE_ITEM_KIND(ITEM_CER_RESTORE_FLAGS)
+DEFINE_ITEM_KIND(ITEM_VTS_INFO)
+DEFINE_ITEM_KIND(ITEM_MODULE_SECDESC)
+DEFINE_ITEM_KIND(ITEM_FROZEN_OBJECTS)
+DEFINE_ITEM_KIND(ITEM_NGEN_HASH_HOT)
+DEFINE_ITEM_KIND(ITEM_NGEN_HASH_COLD)
+DEFINE_ITEM_KIND(ITEM_NGEN_HASH_BUCKETLIST_HOT)
+DEFINE_ITEM_KIND(ITEM_NGEN_HASH_BUCKETLIST_COLD)
+DEFINE_ITEM_KIND(ITEM_NGEN_HASH_ENTRIES_HOT)
+DEFINE_ITEM_KIND(ITEM_NGEN_HASH_ENTRIES_COLD)
+DEFINE_ITEM_KIND(ITEM_NGEN_HASH_ENTRIES_RO_HOT)
+DEFINE_ITEM_KIND(ITEM_NGEN_HASH_ENTRIES_RO_COLD)
+DEFINE_ITEM_KIND(ITEM_COMPRESSED_MAP)
+DEFINE_ITEM_KIND(ITEM_INLINING_DATA)
+
+#ifdef DEFINE_ITEM_KIND
+#undef DEFINE_ITEM_KIND
+#endif
diff --git a/src/vm/dbggcinfodecoder.cpp b/src/vm/dbggcinfodecoder.cpp
new file mode 100644
index 0000000000..fd59cc075b
--- /dev/null
+++ b/src/vm/dbggcinfodecoder.cpp
@@ -0,0 +1,933 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+
+#include "common.h"
+#include "gcinfodecoder.h"
+
+#ifdef VERIFY_GCINFO
+#ifdef USE_GC_INFO_DECODER
+
+#include "dbggcinfodecoder.h"
+
+#ifndef GCINFODECODER_CONTRACT
+#define GCINFODECODER_CONTRACT(contract) contract
+#endif // !GCINFODECODER_CONTRACT
+
+#ifndef GET_CALLER_SP
+#define GET_CALLER_SP(pREGDISPLAY) EECodeManager::GetCallerSp(pREGDISPLAY)
+#endif // !GET_CALLER_SP
+
+#ifndef VALIDATE_OBJECTREF
+#ifdef DACCESS_COMPILE
+#define VALIDATE_OBJECTREF(objref, fDeep)
+#else // DACCESS_COMPILE
+#define VALIDATE_OBJECTREF(objref, fDeep) OBJECTREFToObject(objref)->Validate(fDeep)
+#endif // DACCESS_COMPILE
+#endif // !VALIDATE_OBJECTREF
+
+#ifndef VALIDATE_ROOT
+#define VALIDATE_ROOT(isInterior, hCallBack, pObjRef) \
+ do { \
+ /* Only call Object::Validate() with bDeep == TRUE if we are in the promote phase. */ \
+ /* We should call Validate() with bDeep == FALSE if we are in the relocation phase. */ \
+ \
+ GCCONTEXT* pGCCtx = (GCCONTEXT*)(hCallBack); \
+ \
+ if (!(isInterior) && !(m_Flags & DECODE_NO_VALIDATION)) \
+ VALIDATE_OBJECTREF(*(pObjRef), pGCCtx->sc->promotion == TRUE); \
+ } while (0)
+#endif // !VALIDATE_ROOT
+
+
+
+namespace DbgGcInfo {
+
+
+//static
+bool GcInfoDecoder::SetIsInterruptibleCB (UINT32 startOffset, UINT32 stopOffset, LPVOID hCallback)
+{
+ GcInfoDecoder *pThis = (GcInfoDecoder*)hCallback;
+
+ bool fStop = pThis->m_InstructionOffset >= startOffset && pThis->m_InstructionOffset < stopOffset;
+
+ if (fStop)
+ pThis->m_IsInterruptible = true;
+
+ return fStop;
+}
+
+
+GcInfoDecoder::GcInfoDecoder(
+ const BYTE* gcInfoAddr,
+ GcInfoDecoderFlags flags,
+ UINT32 breakOffset
+ )
+ : m_Reader( gcInfoAddr )
+ , m_InstructionOffset( breakOffset )
+ , m_IsInterruptible( false )
+ , m_pLiveRegisters( NULL )
+ , m_pLiveStackSlots( NULL )
+ , m_NumLiveRegisters(0)
+ , m_NumLiveStackSlots(0)
+#ifdef _DEBUG
+ , m_Flags( flags )
+#endif
+{
+#ifdef _TARGET_ARM_
+ _ASSERTE(!"JIT32 is not generating GCInfo in the correct format yet!");
+#endif
+
+ _ASSERTE( (flags & (DECODE_INTERRUPTIBILITY | DECODE_GC_LIFETIMES)) || (0 == breakOffset) );
+
+ // The current implementation doesn't support the two flags together
+ _ASSERTE(
+ ((flags & (DECODE_INTERRUPTIBILITY | DECODE_GC_LIFETIMES)) != (DECODE_INTERRUPTIBILITY | DECODE_GC_LIFETIMES))
+ );
+
+
+ //--------------------------------------------
+ // Pre-decode information
+ //--------------------------------------------
+
+ m_IsVarArg = (m_Reader.Read(1)) ? true : false;
+
+ size_t hasSecurityObject = m_Reader.Read(1);
+ if(hasSecurityObject)
+ m_SecurityObjectStackSlot = (INT32) DENORMALIZE_STACK_SLOT(m_Reader.DecodeVarLengthSigned(SECURITY_OBJECT_STACK_SLOT_ENCBASE));
+ else
+ m_SecurityObjectStackSlot = NO_SECURITY_OBJECT;
+
+ size_t hasPSPSym = m_Reader.Read(1);
+ if(hasPSPSym)
+ {
+ m_PSPSymStackSlot = (INT32) DENORMALIZE_STACK_SLOT(m_Reader.DecodeVarLengthSigned(PSP_SYM_STACK_SLOT_ENCBASE));
+ }
+ else
+ {
+ m_PSPSymStackSlot = NO_PSP_SYM;
+ }
+
+ size_t hasGenericsInstContext = m_Reader.Read(1);
+ if(hasGenericsInstContext)
+ {
+ m_GenericsInstContextStackSlot = (INT32) DENORMALIZE_STACK_SLOT(m_Reader.DecodeVarLengthSigned(GENERICS_INST_CONTEXT_STACK_SLOT_ENCBASE));
+ }
+ else
+ {
+ m_GenericsInstContextStackSlot = NO_GENERICS_INST_CONTEXT;
+ }
+
+ m_CodeLength = (UINT32) DENORMALIZE_CODE_LENGTH(m_Reader.DecodeVarLengthUnsigned(CODE_LENGTH_ENCBASE));
+
+ size_t hasStackBaseRegister = m_Reader.Read(1);
+ if(hasStackBaseRegister)
+ m_StackBaseRegister = (UINT32) DENORMALIZE_STACK_BASE_REGISTER(m_Reader.DecodeVarLengthUnsigned(STACK_BASE_REGISTER_ENCBASE));
+ else
+ m_StackBaseRegister = NO_STACK_BASE_REGISTER;
+
+ size_t hasSizeOfEditAndContinuePreservedArea = m_Reader.Read(1);
+ if(hasSizeOfEditAndContinuePreservedArea)
+ m_SizeOfEditAndContinuePreservedArea = (UINT32) m_Reader.DecodeVarLengthUnsigned(SIZE_OF_EDIT_AND_CONTINUE_PRESERVED_AREA_ENCBASE);
+ else
+ m_SizeOfEditAndContinuePreservedArea = NO_SIZE_OF_EDIT_AND_CONTINUE_PRESERVED_AREA;
+
+#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
+ m_SizeOfStackOutgoingAndScratchArea = (UINT32)DENORMALIZE_SIZE_OF_STACK_AREA(m_Reader.DecodeVarLengthUnsigned(SIZE_OF_STACK_AREA_ENCBASE));
+#endif // FIXED_STACK_PARAMETER_SCRATCH_AREA
+
+ m_NumInterruptibleRanges = (UINT32) DENORMALIZE_NUM_INTERRUPTIBLE_RANGES(m_Reader.DecodeVarLengthUnsigned(NUM_INTERRUPTIBLE_RANGES_ENCBASE));
+
+ if( flags & DECODE_INTERRUPTIBILITY )
+ {
+ EnumerateInterruptibleRanges(&SetIsInterruptibleCB, this);
+ }
+}
+
+
+bool GcInfoDecoder::IsInterruptible()
+{
+ _ASSERTE( m_Flags & DECODE_INTERRUPTIBILITY );
+ return m_IsInterruptible;
+}
+
+
+void GcInfoDecoder::EnumerateInterruptibleRanges (
+ EnumerateInterruptibleRangesCallback *pCallback,
+ LPVOID hCallback)
+{
+#if 0
+#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
+
+ //------------------------------------------------------------------------------
+ // Try partially interruptible first
+ //------------------------------------------------------------------------------
+
+ UINT32 numCallSites = (UINT32)m_Reader.Read( sizeof( numCallSites ) * 8 );
+ UINT32 callSiteIdx = 0;
+
+ if( numCallSites > 0 )
+ {
+ UINT32 numSlotMappings = (UINT32)m_Reader.Read( sizeof( numSlotMappings ) * 8 );
+
+ // Align the reader to the next byte to continue decoding
+ m_Reader.Skip( ( 8 - ( m_Reader.GetCurrentPos() % 8 ) ) % 8 );
+
+ for( callSiteIdx=0; callSiteIdx<numCallSites; callSiteIdx++ )
+ {
+ UINT32 instructionOffset = (UINT32)m_Reader.Read( 32 );
+
+ bool fStop = pCallback(instructionOffset, instructionOffset+1, hCallback);
+ if (fStop)
+ return;
+
+ m_Reader.Skip( numSlotMappings );
+ }
+
+ // Call site not found. Skip the slot mapping table in preparation for reading the fully-interruptible information
+ m_Reader.Skip( numSlotMappings * sizeof( GcSlotDesc ) * 8 );
+ }
+
+#endif // PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
+#endif
+
+
+ // If no info is found for the call site, we default to fully-interruptbile
+ LOG((LF_GCROOTS, LL_INFO1000000, "No GC info found for call site at offset %x. Defaulting to fully-interruptible information.\n", (int) m_InstructionOffset));
+
+ // Align the reader to the next byte to continue decoding
+ m_Reader.Skip( ( 8 - ( m_Reader.GetCurrentPos() % 8 ) ) % 8 );
+
+ UINT32 lastInterruptibleRangeStopOffsetNormalized = 0;
+
+ for(UINT32 i=0; i<m_NumInterruptibleRanges; i++)
+ {
+ UINT32 normStartDelta = (UINT32) m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA_ENCBASE );
+ UINT32 normStopDelta = (UINT32) m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA_ENCBASE ) + 1;
+
+ UINT32 rangeStartOffsetNormalized = lastInterruptibleRangeStopOffsetNormalized + normStartDelta;
+ UINT32 rangeStopOffsetNormalized = rangeStartOffsetNormalized + normStopDelta;
+
+ UINT32 rangeStartOffset = DENORMALIZE_CODE_OFFSET(rangeStartOffsetNormalized);
+ UINT32 rangeStopOffset = DENORMALIZE_CODE_OFFSET(rangeStopOffsetNormalized);
+
+ bool fStop = pCallback(rangeStartOffset, rangeStopOffset, hCallback);
+ if (fStop)
+ return;
+
+ lastInterruptibleRangeStopOffsetNormalized = rangeStopOffsetNormalized;
+ }
+}
+
+
+INT32 GcInfoDecoder::GetSecurityObjectStackSlot()
+{
+ _ASSERTE( m_Flags & DECODE_SECURITY_OBJECT );
+ return m_SecurityObjectStackSlot;
+}
+
+INT32 GcInfoDecoder::GetGenericsInstContextStackSlot()
+{
+ _ASSERTE( m_Flags & DECODE_GENERICS_INST_CONTEXT );
+ return m_GenericsInstContextStackSlot;
+}
+
+INT32 GcInfoDecoder::GetPSPSymStackSlot()
+{
+ _ASSERTE( m_Flags & DECODE_PSP_SYM);
+ return m_PSPSymStackSlot;
+}
+
+bool GcInfoDecoder::GetIsVarArg()
+{
+ _ASSERTE( m_Flags & DECODE_VARARG );
+ return m_IsVarArg;
+}
+
+UINT32 GcInfoDecoder::GetCodeLength()
+{
+ _ASSERTE( m_Flags & DECODE_CODE_LENGTH );
+ return m_CodeLength;
+}
+
+UINT32 GcInfoDecoder::GetStackBaseRegister()
+{
+ return m_StackBaseRegister;
+}
+
+UINT32 GcInfoDecoder::GetSizeOfEditAndContinuePreservedArea()
+{
+ _ASSERTE( m_Flags & DECODE_EDIT_AND_CONTINUE );
+ return m_SizeOfEditAndContinuePreservedArea;
+}
+
+
+#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
+
+UINT32 GcInfoDecoder::GetSizeOfStackParameterArea()
+{
+ return m_SizeOfStackOutgoingAndScratchArea;
+}
+
+#endif // FIXED_STACK_PARAMETER_SCRATCH_AREA
+
+
+bool GcInfoDecoder::EnumerateLiveSlots(
+ PREGDISPLAY pRD,
+ bool reportScratchSlots,
+ unsigned flags,
+ GCEnumCallback pCallBack,
+ LPVOID hCallBack
+ )
+{
+ _ASSERTE( m_Flags & DECODE_GC_LIFETIMES );
+
+#if 0
+#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
+
+ //------------------------------------------------------------------------------
+ // Try partially interruptible first
+ //------------------------------------------------------------------------------
+
+ UINT32 numCallSites = (UINT32)m_Reader.Read( sizeof( numCallSites ) * 8 );
+ UINT32 callSiteIdx = 0;
+
+ if( numCallSites > 0 )
+ {
+ UINT32 numSlotMappings = (UINT32)m_Reader.Read( sizeof( numSlotMappings ) * 8 );
+
+ // Align the reader to the next byte to continue decoding
+ m_Reader.Skip( ( 8 - ( m_Reader.GetCurrentPos() % 8 ) ) % 8 );
+
+ for( callSiteIdx=0; callSiteIdx<numCallSites; callSiteIdx++ )
+ {
+ UINT32 instructionOffset = (UINT32)m_Reader.Read( 32 );
+ if( instructionOffset == m_InstructionOffset )
+ {
+ m_IsInterruptible = true;
+
+ BYTE* callSiteLiveSet = (BYTE*) _alloca( ( numSlotMappings + 7 ) / 8 );
+
+ UINT32 i;
+ for( i=0; i<numSlotMappings/8; i++ )
+ callSiteLiveSet[ i ] = (BYTE)m_Reader.Read( 8 );
+
+ callSiteLiveSet[ i ] = (BYTE)m_Reader.Read( numSlotMappings % 8 );
+
+ m_Reader.Skip( ( numCallSites - callSiteIdx - 1 ) * ( 32 + numSlotMappings ) );
+
+ //---------------------------------------------------------------------------
+ // Read slot mappings
+ //---------------------------------------------------------------------------
+
+ GcSlotDesc* slotMappings = (GcSlotDesc*) _alloca( numSlotMappings * sizeof( GcSlotDesc ) );
+ // Assert that we can read a GcSlotDesc with a single call to m_Reader.Read()
+ _ASSERTE( sizeof( GcSlotDesc ) <= sizeof ( size_t ) );
+ for( UINT32 i=0; i<numSlotMappings; i++ )
+ {
+ size_t data = m_Reader.Read( sizeof( GcSlotDesc ) * 8 );
+ slotMappings[ i ] = *( (GcSlotDesc*) &data );
+ }
+
+ //---------------------------------------------------------------------------
+ // Report live slots
+ //---------------------------------------------------------------------------
+
+ for( UINT32 i=0; i<numSlotMappings; i++ )
+ {
+ BYTE isLive = callSiteLiveSet[ i / 8 ] & ( 1 << ( i % 8 ) );
+ if( isLive )
+ {
+ GcSlotDesc slotDesc = slotMappings[ i ];
+ if( slotDesc.IsRegister )
+ {
+ if( reportScratchSlots || !IsScratchRegister( slotDesc.Slot.RegisterNumber, pRD ) )
+ {
+ ReportRegisterToGC(
+ slotDesc.Slot.RegisterNumber,
+ slotDesc.IsInterior,
+ slotDesc.IsPinned,
+ pRD,
+ flags,
+ pCallBack,
+ hCallBack
+ );
+ }
+ else
+ {
+ LOG((LF_GCROOTS, LL_INFO1000, "\"Live\" scratch register " FMT_REG " not reported\n", slotDesc.Slot.RegisterNumber));
+ }
+ }
+ else
+ {
+ GcStackSlotBase spBase = (GcStackSlotBase) (slotDesc.Slot.SpOffset & 0x3);
+ INT32 realSpOffset = slotDesc.Slot.SpOffset ^ (int) spBase;
+
+ if( reportScratchSlots || !IsScratchStackSlot(realSpOffset, spBase, pRD) )
+ {
+ ReportStackSlotToGC(
+ realSpOffset,
+ spBase,
+ slotDesc.IsInterior,
+ slotDesc.IsPinned,
+ pRD,
+ flags,
+ pCallBack,
+ hCallBack
+ );
+ }
+ else
+ {
+ LOG((LF_GCROOTS, LL_INFO1000, "\"Live\" scratch stack slot " FMT_STK " not reported\n", DBG_STK(realSpOffset)));
+ }
+ }
+ }
+ }
+
+ return true;
+ }
+
+ m_Reader.Skip( numSlotMappings );
+ }
+
+ // Call site not found. Skip the slot mapping table in preparation for reading the fully-interruptible information
+ m_Reader.Skip( numSlotMappings * sizeof( GcSlotDesc ) * 8 );
+ }
+
+#endif // PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
+#endif
+
+
+ // If no info is found for the call site, we default to fully-interruptbile
+ LOG((LF_GCROOTS, LL_INFO1000000, "No GC info found for call site at offset %x. Defaulting to fully-interruptible information.\n", (int) m_InstructionOffset));
+
+ // Align the reader to the next byte to continue decoding
+ m_Reader.Skip( ( 8 - ( m_Reader.GetCurrentPos() % 8 ) ) % 8 );
+
+ // Skip interruptibility information
+ for(UINT32 i=0; i<m_NumInterruptibleRanges; i++)
+ {
+ m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA_ENCBASE );
+ m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA_ENCBASE );
+ }
+
+ //
+ // If this is a non-leaf frame and we are executing a call, the unwinder has given us the PC
+ // of the call instruction. We should adjust it to the PC of the instruction after the call in order to
+ // obtain transition information for scratch slots. However, we always assume scratch slots to be
+ // dead for non-leaf frames (except for ResumableFrames), so we don't need to adjust the PC.
+ // If this is a non-leaf frame and we are not executing a call (i.e.: a fault occurred in the function),
+ // then it would be incorrect to ajust the PC
+ //
+
+ int lifetimeTransitionsCount = 0;
+
+ //--------------------------------------------------------------------
+ // Decode registers
+ //--------------------------------------------------------------------
+
+ size_t numRegisters = m_Reader.DecodeVarLengthUnsigned(NUM_REGISTERS_ENCBASE);
+
+ {
+#ifdef ENABLE_CONTRACTS_IMPL
+ CONTRACT_VIOLATION(FaultViolation | FaultNotFatal);
+#endif
+ m_pLiveRegisters = (GcSlotDesc*) qbSlots1.AllocNoThrow(sizeof(GcSlotDesc)*numRegisters);
+ }
+ if (m_pLiveRegisters == NULL)
+ {
+ return false;
+ }
+
+
+ _ASSERTE(m_pLiveRegisters);
+
+ int lastNormRegNum = 0;
+
+ for(int i=0; i<numRegisters; i++)
+ {
+ if( i==0 )
+ {
+ lastNormRegNum = (int) m_Reader.DecodeVarLengthUnsigned(REGISTER_ENCBASE);
+ }
+ else
+ {
+ int normRegDelta = (int) m_Reader.DecodeVarLengthUnsigned(REGISTER_DELTA_ENCBASE) + 1;
+ lastNormRegNum += normRegDelta;
+ }
+ int regNum = DENORMALIZE_REGISTER(lastNormRegNum);
+
+ BOOL isInterior = FALSE;
+ BOOL isPinned = FALSE;
+ BOOL isLive = FALSE;
+
+ size_t normCodeOffset = (size_t)(SSIZE_T)(-1);
+ BOOL becomesLive = TRUE;
+ for(;;)
+ {
+ size_t normCodeOffsetDelta = m_Reader.DecodeVarLengthUnsigned(NORM_CODE_OFFSET_DELTA_ENCBASE);
+ if(normCodeOffsetDelta == 0) // terminator
+ break;
+
+ if(normCodeOffset != (size_t)(SSIZE_T)(-1))
+ becomesLive = (BOOL) m_Reader.Read(1);
+
+ normCodeOffset += normCodeOffsetDelta;
+
+ UINT32 instructionOffset = DENORMALIZE_CODE_OFFSET((UINT32)normCodeOffset);
+
+ BOOL becomesInterior = FALSE;
+ BOOL becomesPinned = FALSE;
+
+ if(becomesLive)
+ {
+ if(m_Reader.Read(1))
+ {
+ size_t flagEnc = m_Reader.Read( 2 );
+ becomesInterior = (BOOL)(flagEnc & 0x1);
+ becomesPinned = (BOOL)(flagEnc & 0x2);
+ }
+ }
+
+ lifetimeTransitionsCount++;
+
+ LOG((LF_GCROOTS, LL_INFO1000000,
+ "Transition " FMT_PIPTR "in " FMT_REG "going %s at offset %04x.\n",
+ DBG_PIN_NAME(becomesPinned), DBG_IPTR_NAME(becomesInterior), regNum,
+ becomesLive ? "live" : "dead",
+ (int) instructionOffset ));
+
+ if( instructionOffset > m_InstructionOffset )
+ continue;
+
+ isLive = becomesLive;
+ isInterior = becomesInterior;
+ isPinned = becomesPinned;
+ }
+
+ if( isLive )
+ {
+ if( reportScratchSlots || !IsScratchRegister( regNum, pRD ) )
+ {
+ m_pLiveRegisters[m_NumLiveRegisters].Slot.RegisterNumber = regNum;
+ GcSlotFlags flags = GC_SLOT_BASE;
+ if(isInterior)
+ flags = (GcSlotFlags) (flags | GC_SLOT_INTERIOR);
+ if(isPinned)
+ flags = (GcSlotFlags) (flags | GC_SLOT_PINNED);
+
+ m_pLiveRegisters[m_NumLiveRegisters].Flags = flags;
+ m_NumLiveRegisters++;
+ }
+ else
+ {
+ LOG((LF_GCROOTS, LL_INFO1000, "\"Live\" scratch register " FMT_REG " not reported\n", regNum));
+ }
+ }
+ }
+
+ //--------------------------------------------------------------------
+ // Decode stack slots
+ //--------------------------------------------------------------------
+
+ size_t numStackSlots = m_Reader.DecodeVarLengthUnsigned(NUM_STACK_SLOTS_ENCBASE);
+ {
+#ifdef ENABLE_CONTRACTS_IMPL
+ CONTRACT_VIOLATION(FaultViolation | FaultNotFatal);
+#endif
+ m_pLiveStackSlots = (GcSlotDesc*) qbSlots2.AllocNoThrow(sizeof(GcSlotDesc)*numStackSlots);
+ }
+ if (m_pLiveStackSlots == NULL)
+ {
+ return false;
+ }
+ _ASSERTE(m_pLiveStackSlots);
+
+ INT32 lastNormStackSlot = 0;
+
+ for(int i=0; i<numStackSlots; i++)
+ {
+ if( i==0 )
+ {
+ lastNormStackSlot = (INT32) m_Reader.DecodeVarLengthSigned(STACK_SLOT_ENCBASE);
+ }
+ else
+ {
+ INT32 normStackSlotDelta = (INT32) m_Reader.DecodeVarLengthUnsigned(STACK_SLOT_DELTA_ENCBASE);
+ lastNormStackSlot += normStackSlotDelta;
+ }
+ INT32 spOffset = DENORMALIZE_STACK_SLOT(lastNormStackSlot);
+ GcStackSlotBase spBase = (GcStackSlotBase) m_Reader.Read(2);
+
+ BOOL isInterior = FALSE;
+ BOOL isPinned = FALSE;
+ BOOL isLive = FALSE;
+
+ size_t normCodeOffset = (size_t)(SSIZE_T)(-1);
+ BOOL becomesLive = TRUE;
+ for(;;)
+ {
+ size_t normCodeOffsetDelta = m_Reader.DecodeVarLengthUnsigned(NORM_CODE_OFFSET_DELTA_ENCBASE);
+ if(normCodeOffsetDelta == 0) // terminator
+ break;
+
+ if(normCodeOffset != (size_t)(SSIZE_T)(-1))
+ becomesLive = (BOOL) m_Reader.Read(1);
+
+ normCodeOffset += normCodeOffsetDelta;
+
+ UINT32 instructionOffset = DENORMALIZE_CODE_OFFSET((UINT32)normCodeOffset);
+
+ BOOL becomesInterior = FALSE;
+ BOOL becomesPinned = FALSE;
+
+ if(becomesLive)
+ {
+ if(m_Reader.Read(1))
+ {
+ size_t flagEnc = m_Reader.Read( 2 );
+ becomesInterior = (BOOL)(flagEnc & 0x1);
+ becomesPinned = (BOOL)(flagEnc & 0x2);
+ }
+ }
+
+ lifetimeTransitionsCount++;
+
+ LOG((LF_GCROOTS, LL_INFO1000000,
+ "Transition " FMT_PIPTR "in " FMT_STK "going %s at offset %04x.\n",
+ DBG_PIN_NAME(becomesPinned), DBG_IPTR_NAME(becomesInterior), DBG_STK(spOffset),
+ becomesLive ? "live" : "dead",
+ (int) instructionOffset ));
+
+ if( instructionOffset > m_InstructionOffset )
+ continue;
+
+ isLive = becomesLive;
+ isInterior = becomesInterior;
+ isPinned = becomesPinned;
+ }
+
+ if( isLive )
+ {
+ if( reportScratchSlots || !IsScratchStackSlot(spOffset, spBase, pRD) )
+ {
+ m_pLiveStackSlots[m_NumLiveStackSlots].Slot.Stack.SpOffset = spOffset;
+ m_pLiveStackSlots[m_NumLiveStackSlots].Slot.Stack.Base = spBase;
+ GcSlotFlags flags = GC_SLOT_BASE;
+ if(isInterior)
+ flags = (GcSlotFlags) (flags | GC_SLOT_INTERIOR);
+ if(isPinned)
+ flags = (GcSlotFlags) (flags | GC_SLOT_PINNED);
+
+ m_pLiveStackSlots[m_NumLiveStackSlots].Flags = flags;
+ m_NumLiveStackSlots++;
+ }
+ else
+ {
+ LOG((LF_GCROOTS, LL_INFO1000, "\"Live\" scratch stack slot " FMT_STK " not reported\n", DBG_STK(spOffset)));
+ }
+ }
+ }
+
+
+ LOG((LF_GCROOTS, LL_INFO1000000, "Decoded %d lifetime transitions.\n", (int) lifetimeTransitionsCount ));
+
+ return true;
+}
+
+void GcInfoDecoder::VerifyLiveRegister(
+ UINT32 regNum,
+ GcSlotFlags flags
+ )
+{
+ _ASSERTE(m_pLiveRegisters);
+
+ // If this assert fails, the slot being passed was not found to be live in this decoder
+ _ASSERTE(m_NumLiveRegisters > 0);
+
+ int pos;
+ for(pos = 0; pos < m_NumLiveRegisters; pos++)
+ {
+ if(regNum == m_pLiveRegisters[pos].Slot.RegisterNumber &&
+ flags == m_pLiveRegisters[pos].Flags)
+ {
+ break;
+ }
+ }
+
+ // If this assert fails, the slot being passed was not found to be live in this decoder
+ _ASSERTE(pos < m_NumLiveRegisters);
+
+ m_pLiveRegisters[pos] = m_pLiveRegisters[--m_NumLiveRegisters];
+}
+
+void GcInfoDecoder::VerifyLiveStackSlot(
+ INT32 spOffset,
+ GcStackSlotBase spBase,
+ GcSlotFlags flags
+ )
+{
+ _ASSERTE(m_pLiveStackSlots);
+
+ // If this assert fails, the slot being passed was not found to be live in this decoder
+ _ASSERTE(m_NumLiveStackSlots > 0);
+
+ int pos;
+ for(pos = 0; pos < m_NumLiveStackSlots; pos++)
+ {
+ if(spOffset == m_pLiveStackSlots[pos].Slot.Stack.SpOffset &&
+ spBase == m_pLiveStackSlots[pos].Slot.Stack.Base &&
+ flags == m_pLiveStackSlots[pos].Flags)
+ {
+ break;
+ }
+ }
+
+ // If this assert fails, the slot being passed was not found to be live in this decoder
+ _ASSERTE(pos < m_NumLiveStackSlots);
+
+ m_pLiveStackSlots[pos] = m_pLiveStackSlots[--m_NumLiveStackSlots];
+}
+
+void GcInfoDecoder::DoFinalVerification()
+{
+ // If this assert fails, the m_NumLiveRegisters slots remaining in m_pLiveRegisters
+ // were not reported by the calling decoder
+ _ASSERTE(m_NumLiveRegisters == 0);
+
+ // If this assert fails, the m_NumLiveStackSlots slots remaining in m_pLiveStackSlots
+ // were not reported by the calling decoder
+ _ASSERTE(m_NumLiveStackSlots == 0);
+
+}
+
+//-----------------------------------------------------------------------------
+// Platform-specific methods
+//-----------------------------------------------------------------------------
+
+#if defined(_TARGET_AMD64_)
+
+
+OBJECTREF* GcInfoDecoder::GetRegisterSlot(
+ int regNum,
+ PREGDISPLAY pRD
+ )
+{
+ _ASSERTE(regNum >= 0 && regNum <= 16);
+ _ASSERTE(regNum != 4); // rsp
+
+ // The fields of KNONVOLATILE_CONTEXT_POINTERS are in the same order as
+ // the processor encoding numbers.
+
+ ULONGLONG **ppRax;
+#ifdef _NTAMD64_
+ ppRax = &pRD->pCurrentContextPointers->Rax;
+#else
+ ppRax = &pRD->pCurrentContextPointers->Integer.Register.Rax;
+#endif
+
+ return (OBJECTREF*)*(ppRax + regNum);
+}
+
+
+bool GcInfoDecoder::IsScratchRegister(int regNum, PREGDISPLAY pRD)
+{
+ _ASSERTE(regNum >= 0 && regNum <= 16);
+ _ASSERTE(regNum != 4); // rsp
+
+ UINT16 PreservedRegMask =
+ (1 << 3) // rbx
+ | (1 << 5) // rbp
+ | (1 << 6) // rsi
+ | (1 << 7) // rdi
+ | (1 << 12) // r12
+ | (1 << 13) // r13
+ | (1 << 14) // r14
+ | (1 << 15); // r15
+
+ return !(PreservedRegMask & (1 << regNum));
+}
+
+
+bool GcInfoDecoder::IsScratchStackSlot(INT32 spOffset, GcStackSlotBase spBase, PREGDISPLAY pRD)
+{
+#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
+ _ASSERTE( m_Flags & DECODE_GC_LIFETIMES );
+
+ ULONGLONG pSlot = (ULONGLONG) GetStackSlot(spOffset, spBase, pRD);
+ _ASSERTE(pSlot >= pRD->SP);
+
+ return (pSlot < pRD->SP + m_SizeOfStackOutgoingAndScratchArea);
+#else
+ return FALSE;
+#endif
+}
+
+
+void GcInfoDecoder::ReportRegisterToGC( // AMD64
+ int regNum,
+ BOOL isInterior,
+ BOOL isPinned,
+ PREGDISPLAY pRD,
+ unsigned flags,
+ GCEnumCallback pCallBack,
+ LPVOID hCallBack)
+{
+ GCINFODECODER_CONTRACT(CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END);
+
+ _ASSERTE(regNum >= 0 && regNum <= 16);
+ _ASSERTE(regNum != 4); // rsp
+
+ LOG((LF_GCROOTS, LL_INFO1000, "Reporting " FMT_REG, regNum ));
+
+ OBJECTREF* pObjRef = GetRegisterSlot( regNum, pRD );
+
+#ifdef _DEBUG
+ if(IsScratchRegister(regNum, pRD))
+ {
+ // Scratch registers cannot be reported for non-leaf frames
+ _ASSERTE(flags & ActiveStackFrame);
+ }
+
+ LOG((LF_GCROOTS, LL_INFO1000, /* Part Two */
+ "at" FMT_ADDR "as ", DBG_ADDR(pObjRef) ));
+
+ VALIDATE_ROOT(isInterior, hCallBack, pObjRef);
+
+ LOG((LF_GCROOTS, LL_INFO1000, /* Part Three */
+ LOG_PIPTR_OBJECT_CLASS(OBJECTREF_TO_UNCHECKED_OBJECTREF(*pObjRef), isPinned, isInterior)));
+#endif //_DEBUG
+
+ DWORD gcFlags = CHECK_APP_DOMAIN;
+
+ if (isInterior)
+ gcFlags |= GC_CALL_INTERIOR;
+
+ if (isPinned)
+ gcFlags |= GC_CALL_PINNED;
+
+ pCallBack(hCallBack, pObjRef, gcFlags);
+}
+
+#else // Unknown platform
+
+OBJECTREF* GcInfoDecoder::GetRegisterSlot(
+ int regNum,
+ PREGDISPLAY pRD
+ )
+{
+ PORTABILITY_ASSERT("GcInfoDecoder::GetRegisterSlot");
+ return NULL;
+}
+
+bool GcInfoDecoder::IsScratchRegister(int regNum, PREGDISPLAY pRD)
+{
+ PORTABILITY_ASSERT("GcInfoDecoder::IsScratchRegister");
+ return false;
+}
+
+bool GcInfoDecoder::IsScratchStackSlot(INT32 spOffset, GcStackSlotBase spBase, PREGDISPLAY pRD)
+{
+ _ASSERTE( !"NYI" );
+ return false;
+}
+
+void GcInfoDecoder::ReportRegisterToGC(
+ int regNum,
+ BOOL isInterior,
+ BOOL isPinned,
+ PREGDISPLAY pRD,
+ unsigned flags,
+ GCEnumCallback pCallBack,
+ LPVOID hCallBack)
+{
+ _ASSERTE( !"NYI" );
+}
+
+#endif // Unknown platform
+
+
+OBJECTREF* GcInfoDecoder::GetStackSlot(
+ INT32 spOffset,
+ GcStackSlotBase spBase,
+ PREGDISPLAY pRD
+ )
+{
+ OBJECTREF* pObjRef;
+
+ if( GC_SP_REL == spBase )
+ {
+ pObjRef = (OBJECTREF*) ((SIZE_T)GetRegdisplaySP(pRD) + spOffset);
+ }
+ else if( GC_CALLER_SP_REL == spBase )
+ {
+ pObjRef = (OBJECTREF*) (GET_CALLER_SP(pRD) + spOffset);
+ }
+ else
+ {
+ _ASSERTE( GC_FRAMEREG_REL == spBase );
+ _ASSERTE( NO_STACK_BASE_REGISTER != m_StackBaseRegister );
+
+ pObjRef = (OBJECTREF*)((*((INT64*)(GetRegisterSlot( m_StackBaseRegister, pRD )))) + spOffset);
+ }
+
+ return pObjRef;
+}
+
+void GcInfoDecoder::ReportStackSlotToGC(
+ INT32 spOffset,
+ GcStackSlotBase spBase,
+ BOOL isInterior,
+ BOOL isPinned,
+ PREGDISPLAY pRD,
+ unsigned flags,
+ GCEnumCallback pCallBack,
+ LPVOID hCallBack)
+{
+ GCINFODECODER_CONTRACT(CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END);
+
+ OBJECTREF* pObjRef = GetStackSlot(spOffset, spBase, pRD);
+ _ASSERTE( IS_ALIGNED( pObjRef, sizeof( Object* ) ) );
+
+#ifdef _DEBUG
+ LOG((LF_GCROOTS, LL_INFO1000, /* Part One */
+ "Reporting %s" FMT_STK,
+ ( (GC_SP_REL == spBase) ? "" :
+ ((GC_CALLER_SP_REL == spBase) ? "caller's " :
+ ((GC_FRAMEREG_REL == spBase) ? "frame " : "<unrecognized GcStackSlotBase> "))),
+ DBG_STK(spOffset) ));
+
+ LOG((LF_GCROOTS, LL_INFO1000, /* Part Two */
+ "at" FMT_ADDR "as ", DBG_ADDR(pObjRef) ));
+
+ VALIDATE_ROOT(isInterior, hCallBack, pObjRef);
+
+ LOG((LF_GCROOTS, LL_INFO1000, /* Part Three */
+ LOG_PIPTR_OBJECT_CLASS(OBJECTREF_TO_UNCHECKED_OBJECTREF(*pObjRef), isPinned, isInterior)));
+#endif
+
+ DWORD gcFlags = CHECK_APP_DOMAIN;
+
+ if (isInterior)
+ gcFlags |= GC_CALL_INTERIOR;
+
+ if (isPinned)
+ gcFlags |= GC_CALL_PINNED;
+
+ pCallBack(hCallBack, pObjRef, gcFlags);
+}
+
+}
+
+#endif // USE_GC_INFO_DECODER
+#endif // VERIFY_GCINFO
diff --git a/src/vm/dbginterface.h b/src/vm/dbginterface.h
new file mode 100644
index 0000000000..fb3e167374
--- /dev/null
+++ b/src/vm/dbginterface.h
@@ -0,0 +1,420 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// COM+99 Debug Interface Header
+//
+
+
+
+#ifndef _dbgInterface_h_
+#define _dbgInterface_h_
+
+#include "common.h"
+#include "eedbginterface.h"
+#include "corjit.h"
+#include "../debug/inc/dbgipcevents.h"
+#include "primitives.h"
+
+typedef DPTR(struct ICorDebugInfo::NativeVarInfo) PTR_NativeVarInfo;
+
+typedef void (*FAVORCALLBACK)(void *);
+
+//
+// The purpose of this object is to serve as an entry point to the
+// debugger, which used to reside in a seperate DLL.
+//
+
+class DebugInterface
+{
+ VPTR_BASE_VTABLE_CLASS(DebugInterface);
+
+public:
+
+ //
+ // Functions exported from the debugger to the EE.
+ //
+
+#ifndef DACCESS_COMPILE
+
+ virtual HRESULT Startup(void) = 0;
+
+ virtual HRESULT StartupPhase2(Thread * pThread) = 0;
+
+ // Some callers into the debugger (e.g., ETW rundown) know they will need the lazy
+ // data initialized but cannot afford to have it initialized unpredictably or inside a
+ // lock. They can use this function to force the data to be initialized at a
+ // controlled point in time
+ virtual void InitializeLazyDataIfNecessary() = 0;
+
+ virtual void SetEEInterface(EEDebugInterface* i) = 0;
+
+ virtual void StopDebugger(void) = 0;
+
+ virtual BOOL IsStopped(void) = 0;
+
+ virtual void ThreadCreated(Thread* pRuntimeThread) = 0;
+
+ virtual void ThreadStarted(Thread* pRuntimeThread) = 0;
+
+ virtual void DetachThread(Thread *pRuntimeThread) = 0;
+
+ // Called when a module is being loaded into an AppDomain.
+ // This includes when a domain neutral module is loaded into a new AppDomain.
+ // This is called only when a debugger is attached, and will occur after the
+ // related LoadAssembly and AddAppDomainToIPCBlock calls and before any
+ // LoadClass calls for this module.
+ virtual void LoadModule(Module * pRuntimeModule, // the module being loaded
+ LPCWSTR psModuleName, // module file name
+ DWORD dwModuleName, // number of characters in file name excludign null
+ Assembly * pAssembly, // the assembly the module belongs to
+ AppDomain * pAppDomain, // the AppDomain the module is being loaded into
+ DomainFile * pDomainFile,
+ BOOL fAttaching) = 0; // true if this notification is due to a debugger
+ // being attached to the process
+
+ // Called AFTER LoadModule, and after the module has reached FILE_LOADED. This lets
+ // dbgapi do any processing that needs to wait until the FILE_LOADED stage (e.g.,
+ // binding breakpoints in NGENd generics).
+ virtual void LoadModuleFinished(Module * pModule, AppDomain * pAppDomain) = 0;
+
+ // Called for all modules in an AppDomain when the AppDomain is unloaded.
+ // This includes domain neutral modules that are also loaded into other domains.
+ // This is called only when a debugger is attached, and will occur after all UnloadClass
+ // calls and before any UnloadAssembly or RemoveAppDomainFromIPCBlock calls realted
+ // to this module. On CLR shutdown, we are not guarenteed to get UnloadModule calls for
+ // all outstanding loaded modules.
+ virtual void UnloadModule(Module* pRuntimeModule, AppDomain *pAppDomain) = 0;
+
+ // Called when a Module* is being destroyed.
+ // Specifically, the Module has completed unloading (which may have been done asyncronously), all resources
+ // associated are being freed, and the Module* is about to become invalid. The debugger should remove all
+ // references to this Module*.
+ // NOTE: This is called REGARDLESS of whether a debugger is attached or not, and will occur after any other
+ // notifications about this module (including any RemoveAppDomainFromIPCBlock call indicating the module's
+ // domain has been unloaded).
+ virtual void DestructModule(Module *pModule) = 0;
+
+ virtual BOOL LoadClass(TypeHandle th,
+ mdTypeDef classMetadataToken,
+ Module *classModule,
+ AppDomain *pAppDomain) = 0;
+
+ virtual void UnloadClass(mdTypeDef classMetadataToken,
+ Module *classModule,
+ AppDomain *pAppDomain) = 0;
+
+ // Filter we call in 1st-pass to dispatch a CHF callback.
+ // pCatchStackAddress really should be a Frame* onto the stack. That way the CHF stack address
+ // and the debugger's stacktrace Frames will match up.
+ // This is only called by stubs.
+ virtual LONG NotifyOfCHFFilter(EXCEPTION_POINTERS* pExceptionPointers, PVOID pCatchStackAddr) = 0;
+
+
+ virtual bool FirstChanceNativeException(EXCEPTION_RECORD *exception,
+ CONTEXT *context,
+ DWORD code,
+ Thread *thread) = 0;
+
+ // pThread is thread that exception is on.
+ // currentSP is stack frame of the throw site.
+ // currentIP is ip of the throw site.
+ // pStubFrame = NULL if the currentSp is for a non-stub frame (ie, a regular JITed catched).
+ // For stub-based throws, pStubFrame is the EE Frame of the stub.
+ virtual bool FirstChanceManagedException(Thread *pThread, SIZE_T currentIP, SIZE_T currentSP) = 0;
+
+ virtual void FirstChanceManagedExceptionCatcherFound(Thread *pThread,
+ MethodDesc *pMD, TADDR pMethodAddr,
+ BYTE *currentSP,
+ EE_ILEXCEPTION_CLAUSE *pEHClause) = 0;
+
+ virtual LONG LastChanceManagedException(EXCEPTION_POINTERS * pExceptionInfo,
+ Thread *thread,
+ BOOL jitAttachRequested) = 0;
+
+ virtual void ManagedExceptionUnwindBegin(Thread *pThread) = 0;
+
+ virtual void DeleteInterceptContext(void *pContext) = 0;
+
+ virtual void ExceptionFilter(MethodDesc *fd, TADDR pMethodAddr,
+ SIZE_T offset,
+ BYTE *pStack) = 0;
+
+ virtual void ExceptionHandle(MethodDesc *fd, TADDR pMethodAddr,
+ SIZE_T offset,
+ BYTE *pStack) = 0;
+
+ virtual void SendUserBreakpoint(Thread *thread) = 0;
+
+ // Send an UpdateModuleSyms event, and block waiting for the debugger to continue it.
+ virtual void SendUpdateModuleSymsEventAndBlock(Module *pRuntimeModule,
+ AppDomain *pAppDomain) = 0;
+
+ //
+ // RequestFavor gets the debugger helper thread to call a function. It's
+ // typically called when the current thread can't call the function directly,
+ // e.g, there isn't enough stack space.
+ //
+ // RequestFavor ensures that the helper thread has been initialized to
+ // execute favors and then calls Debugger:DoFavor. It blocks until the
+ // favor callback completes.
+ //
+ // Parameters:
+ // fp - Favour callback function
+ // pData - the parameter passed to the favor callback function.
+ //
+ // Return values:
+ // S_OK if the function succeeds, else a failure HRESULT
+ //
+ virtual HRESULT RequestFavor(FAVORCALLBACK fp, void * pData) = 0;
+
+#endif // #ifndef DACCESS_COMPILE
+
+ // JITComplete() is called after a method is jit-compiled, successfully or not
+
+#ifndef DACCESS_COMPILE
+
+ virtual void JITComplete(MethodDesc* fd, TADDR newAddress) = 0;
+
+ //
+ // EnC functions
+ //
+#ifdef EnC_SUPPORTED
+ // Notify that an existing method has been edited in a loaded type
+ virtual HRESULT UpdateFunction(MethodDesc* md, SIZE_T enCVersion) = 0;
+
+ // Notify that a new method has been added to a loaded type
+ virtual HRESULT AddFunction(MethodDesc* md, SIZE_T enCVersion) = 0;
+
+ virtual HRESULT UpdateNotYetLoadedFunction(mdMethodDef token, Module * pModule, SIZE_T enCVersion) = 0;
+
+ // Notify that a field has been added
+ virtual HRESULT AddField(FieldDesc* fd, SIZE_T enCVersion) = 0;
+
+ // Notify that the EE has completed the remap and is about to resume execution
+ virtual HRESULT RemapComplete(MethodDesc *pMd, TADDR addr, SIZE_T nativeOffset) = 0;
+
+ // Used by the codemanager FixContextForEnC() to update
+ virtual HRESULT MapILInfoToCurrentNative(MethodDesc *pMD,
+ SIZE_T ilOffset,
+ TADDR nativeFnxStart,
+ SIZE_T *nativeOffset) = 0;
+#endif // EnC_SUPPORTED
+
+ // Get debugger variable information for a specific version of a method
+ virtual void GetVarInfo(MethodDesc * fd, // [IN] method of interest
+ void *DebuggerVersionToken, // [IN] which edit version
+ SIZE_T * cVars, // [OUT] size of 'vars'
+ const ICorDebugInfo::NativeVarInfo **vars // [OUT] map telling where local vars are stored
+ ) = 0;
+
+ virtual void getBoundaries(MethodDesc * ftn,
+ unsigned int *cILOffsets, DWORD **pILOffsets,
+ ICorDebugInfo::BoundaryTypes* implictBoundaries) = 0;
+
+ virtual void getVars(MethodDesc * ftn,
+ ULONG32 *cVars, ICorDebugInfo::ILVarInfo **vars,
+ bool *extendOthers) = 0;
+
+ virtual BOOL CheckGetPatchedOpcode(CORDB_ADDRESS_TYPE *address, /*OUT*/ PRD_TYPE *pOpcode) = 0;
+
+ virtual PRD_TYPE GetPatchedOpcode(CORDB_ADDRESS_TYPE *ip) = 0;
+
+ virtual void TraceCall(const BYTE *target) = 0;
+
+ virtual bool ThreadsAtUnsafePlaces(void) = 0;
+
+ virtual HRESULT LaunchDebuggerForUser(Thread * pThread, EXCEPTION_POINTERS * pExceptionInfo, BOOL sendManagedEvent, BOOL explicitUserRequest) = 0;
+
+ // Launches a debugger and waits for it to attach
+ virtual void JitAttach(Thread * pThread, EXCEPTION_POINTERS * pExceptionInfo, BOOL willSendManagedEvent, BOOL explicitUserRequest) = 0;
+
+ // Prepares for a jit attach and decides which of several potentially
+ // racing threads get to launch the debugger
+ virtual BOOL PreJitAttach(BOOL willSendManagedEvent, BOOL willLaunchDebugger, BOOL explicitUserRequest) = 0;
+
+ // Waits for a jit attach to complete
+ virtual void WaitForDebuggerAttach() = 0;
+
+ // Completes the jit attach, unblocking all threads waiting for attach,
+ // regardless of whether or not the debugger actually attached
+ virtual void PostJitAttach() = 0;
+
+ virtual void SendUserBreakpointAndSynchronize(Thread * pThread) = 0;
+
+ virtual void SendLogMessage(int iLevel,
+ SString * pSwitchName,
+ SString * pMessage) = 0;
+
+ // send a custom notification from the target to the RS. This will become an ICorDebugThread and
+ // ICorDebugAppDomain on the RS.
+ virtual void SendCustomDebuggerNotification(Thread * pThread, DomainFile * pDomainFile, mdTypeDef classToken) = 0;
+
+ // Send an MDA notification. This ultimately translates to an ICorDebugMDA object on the Right-Side.
+ virtual void SendMDANotification(
+ Thread * pThread, // may be NULL. Lets us send on behalf of other threads.
+ SString * szName,
+ SString * szDescription,
+ SString * szXML,
+ CorDebugMDAFlags flags,
+ BOOL bAttach
+ ) = 0;
+
+ virtual bool IsJMCMethod(Module* pModule, mdMethodDef tkMethod) = 0;
+
+ // Given a method, get's its EnC version number. 1 if the method is not EnCed.
+ // Note that MethodDescs are reused between versions so this will give us
+ // the most recent EnC number.
+ virtual int GetMethodEncNumber(MethodDesc * pMethod) = 0;
+
+ virtual void SendLogSwitchSetting (int iLevel,
+ int iReason,
+ __in_z LPCWSTR pLogSwitchName,
+ __in_z LPCWSTR pParentSwitchName) = 0;
+
+ virtual bool IsLoggingEnabled (void) = 0;
+
+ virtual bool GetILOffsetFromNative (MethodDesc *PFD,
+ const BYTE *pbAddr,
+ DWORD nativeOffset,
+ DWORD *ilOffset) = 0;
+
+ virtual HRESULT GetILToNativeMapping(MethodDesc *pMD,
+ ULONG32 cMap,
+ ULONG32 *pcMap,
+ COR_DEBUG_IL_TO_NATIVE_MAP map[]) = 0;
+
+ virtual HRESULT GetILToNativeMappingIntoArrays(
+ MethodDesc * pMD,
+ USHORT cMapMax,
+ USHORT * pcMap,
+ UINT ** prguiILOffset,
+ UINT ** prguiNativeOffset) = 0;
+
+ virtual DWORD GetHelperThreadID(void ) = 0;
+
+ // Called whenever a new AppDomain is created, regardless of whether a debugger is attached.
+ // This will be called before any LoadAssembly calls for assemblies in this domain.
+ virtual HRESULT AddAppDomainToIPC (AppDomain *pAppDomain) = 0;
+
+ // Called whenever an AppDomain is unloaded, regardless of whether a Debugger is attached
+ // This will occur after any UnloadAssembly and UnloadModule callbacks for this domain (if any).
+ virtual HRESULT RemoveAppDomainFromIPC (AppDomain *pAppDomain) = 0;
+
+ virtual HRESULT UpdateAppDomainEntryInIPC (AppDomain *pAppDomain) = 0;
+
+ // Called when an assembly is being loaded into an AppDomain.
+ // This includes when a domain neutral assembly is loaded into a new AppDomain.
+ // This is called only when a debugger is attached, and will occur after the
+ // related AddAppDomainToIPCBlock call and before any LoadModule or
+ // LoadClass calls for this assembly.
+ virtual void LoadAssembly(DomainAssembly * pDomainAssembly) = 0; // the assembly being loaded
+
+
+ // Called for all assemblies in an AppDomain when the AppDomain is unloaded.
+ // This includes domain neutral assemblies that are also loaded into other domains.
+ // This is called only when a debugger is attached, and will occur after all UnloadClass
+ // and UnloadModule calls and before any RemoveAppDomainFromIPCBlock calls realted
+ // to this assembly. On CLR shutdown, we are not guarenteed to get UnloadAssembly calls for
+ // all outstanding loaded assemblies.
+ virtual void UnloadAssembly(DomainAssembly * pDomainAssembly) = 0;
+
+ virtual HRESULT SetILInstrumentedCodeMap(MethodDesc *fd,
+ BOOL fStartJit,
+ ULONG32 cILMapEntries,
+ COR_IL_MAP rgILMapEntries[]) = 0;
+
+ virtual void EarlyHelperThreadDeath(void) = 0;
+
+ virtual void ShutdownBegun(void) = 0;
+
+ virtual void LockDebuggerForShutdown(void) = 0;
+
+ virtual void DisableDebugger(void) = 0;
+
+ virtual HRESULT NameChangeEvent(AppDomain *pAppDomain,
+ Thread *pThread) = 0;
+
+ // send an event to the RS indicating that there's a Ctrl-C or Ctrl-Break
+ virtual BOOL SendCtrlCToDebugger(DWORD dwCtrlType) = 0;
+
+ // Allows the debugger to keep an up to date list of special threads
+ virtual HRESULT UpdateSpecialThreadList(DWORD cThreadArrayLength,
+ DWORD *rgdwThreadIDArray) = 0;
+
+ // Updates the pointer for the debugger services
+ virtual void SetIDbgThreadControl(IDebuggerThreadControl *pIDbgThreadControl) = 0;
+
+ virtual DWORD GetRCThreadId(void) = 0;
+
+ virtual HRESULT GetVariablesFromOffset(MethodDesc *pMD,
+ UINT varNativeInfoCount,
+ ICorDebugInfo::NativeVarInfo *varNativeInfo,
+ SIZE_T offsetFrom,
+ CONTEXT *pCtx,
+ SIZE_T *rgVal1,
+ SIZE_T *rgVal2,
+ UINT uRgValSize,
+ BYTE ***rgpVCs) = 0;
+
+ virtual HRESULT SetVariablesAtOffset(MethodDesc *pMD,
+ UINT varNativeInfoCount,
+ ICorDebugInfo::NativeVarInfo *varNativeInfo,
+ SIZE_T offsetTo,
+ CONTEXT *pCtx,
+ SIZE_T *rgVal1,
+ SIZE_T *rgVal2,
+ BYTE **rgpVCs) = 0;
+
+ virtual BOOL IsThreadContextInvalid(Thread *pThread) = 0;
+
+ // For Just-My-Code (aka Just-User-Code).
+ // The jit inserts probes that look like.
+ // if (*pAddr != 0) call g_pDebugInterface->OnMethodEnter()
+
+ // Invoked when we enter a user method.
+ // pIP is an ip within the method, right after the prolog.
+ virtual void OnMethodEnter(void * pIP) = 0;
+
+ // Given a method, the debugger provides the address of the flag.
+ // This allows the debugger to store the flag whereever it wants
+ // and with whatever granularity (per-module, per-class, per-function, etc).
+ virtual DWORD* GetJMCFlagAddr(Module * pModule) = 0;
+
+ // notification for SQL fiber debugging support
+ virtual void CreateConnection(CONNID dwConnectionId, __in_z WCHAR *wzName) = 0;
+ virtual void DestroyConnection(CONNID dwConnectionId) = 0;
+ virtual void ChangeConnection(CONNID dwConnectionId) = 0;
+
+ //
+ // This function is used to identify the helper thread.
+ //
+ virtual bool ThisIsHelperThread(void) = 0;
+
+ virtual HRESULT ReDaclEvents(PSECURITY_DESCRIPTOR securityDescriptor) = 0;
+
+ virtual BOOL ShouldAutoAttach() = 0;
+ virtual BOOL FallbackJITAttachPrompt() = 0;
+ virtual HRESULT SetFiberMode(bool isFiberMode) = 0;
+
+#ifdef FEATURE_INTEROP_DEBUGGING
+ virtual LONG FirstChanceSuspendHijackWorker(PCONTEXT pContext, PEXCEPTION_RECORD pExceptionRecord) = 0;
+#endif
+
+#endif // #ifndef DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags) = 0;
+ virtual void EnumMemoryRegionsIfFuncEvalFrame(CLRDataEnumMemoryFlags flags, Frame * pFrame) = 0;
+#endif
+};
+
+#ifndef DACCESS_COMPILE
+// Helper to make GCC compile. GCC can't handle putting a virtual call in a filter.
+struct NotifyOfCHFFilterWrapperParam { void *pFrame; };
+LONG NotifyOfCHFFilterWrapper(EXCEPTION_POINTERS *pExceptionInfo, PVOID pNotifyOfCHFFilterWrapperParam);
+#endif
+
+
+#endif // _dbgInterface_h_
diff --git a/src/vm/debugdebugger.cpp b/src/vm/debugdebugger.cpp
new file mode 100644
index 0000000000..3a620fae9f
--- /dev/null
+++ b/src/vm/debugdebugger.cpp
@@ -0,0 +1,1733 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** File: DebugDebugger.cpp
+**
+** Purpose: Native methods on System.Debug.Debugger
+**
+**
+
+===========================================================*/
+
+#include "common.h"
+
+#include <object.h>
+#include "ceeload.h"
+#include "corpermp.h"
+
+#include "excep.h"
+#include "frames.h"
+#include "vars.hpp"
+#include "field.h"
+#include "gc.h"
+#include "jitinterface.h"
+#include "debugdebugger.h"
+#include "dbginterface.h"
+#include "cordebug.h"
+#include "corsym.h"
+#include "generics.h"
+#include "eemessagebox.h"
+#include "stackwalk.h"
+
+LogHashTable g_sLogHashTable;
+
+#ifndef DACCESS_COMPILE
+//----------------------------------------------------------------------------
+//
+// FindMostRecentUserCodeOnStack - find out the most recent user managed code on stack
+//
+//
+// Arguments:
+// pContext - [optional] pointer to the context to be restored the user code's context if found
+//
+// Return Value:
+// The most recent user managed code or NULL if not found.
+//
+// Note:
+// It is a heuristic approach to get the address of the user managed code that calls into
+// BCL like System.Diagnostics.Debugger.Break assuming that we can find the original user
+// code caller with stack walking.
+//
+// DoWatsonForUserBreak has the address returned from the helper frame that points to an
+// internal BCL helpful function doing permission check. From bucketing perspetive it is
+// more preferable to report the user managed code that invokes Debugger.Break instead.
+//
+// User managed code is managed code in non-system assembly. Currently, only mscorlib.dll
+// is marked as system assembly.
+//
+//----------------------------------------------------------------------------
+UINT_PTR FindMostRecentUserCodeOnStack(void)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ Thread * pThread = GetThread();
+ _ASSERTE(pThread != NULL);
+
+ UINT_PTR address = NULL;
+
+ CONTEXT ctx;
+ REGDISPLAY rd;
+ SetUpRegdisplayForStackWalk(pThread, &ctx, &rd);
+
+ StackFrameIterator frameIter;
+ frameIter.Init(pThread, pThread->GetFrame(), &rd, FUNCTIONSONLY | LIGHTUNWIND);
+
+ while (frameIter.IsValid())
+ {
+ MethodDesc * pMD = frameIter.m_crawl.GetFunction();
+
+ // Is it not a system assembly? User manged user will not be in system assembly.
+ if ((pMD != NULL) && (!pMD->GetAssembly()->IsSystem()))
+ {
+ CrawlFrame * pCF = &(frameIter.m_crawl);
+ address = (UINT_PTR)GetControlPC(pCF->GetRegisterSet());
+ break;
+ }
+
+ if (frameIter.Next() != SWA_CONTINUE)
+ {
+ break;
+ }
+ }
+
+ return address;
+}
+
+#ifndef FEATURE_CORECLR
+// Call into the unhandled-exception processing code to launch Watson.
+//
+// Arguments:
+// address - address to distinguish callsite of break.
+//
+// Notes:
+// Invokes a watson dialog in response to a user break (Debug.Break).
+// Assumes that caller has already enforced any policy it cares about related to whether a debugger is attached.
+void DoWatsonForUserBreak(UINT_PTR address)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_TRIGGERS;
+ THROWS;
+ PRECONDITION(address != NULL);
+ }
+ CONTRACTL_END;
+
+ CONTEXT context;
+ EXCEPTION_RECORD exceptionRecord;
+ EXCEPTION_POINTERS exceptionPointers;
+
+ ZeroMemory(&context, sizeof(context));
+ ZeroMemory(&exceptionRecord, sizeof(exceptionRecord));
+ ZeroMemory(&exceptionPointers, sizeof(exceptionPointers));
+
+ // Try to locate the user managed code invoking System.Diagnostics.Debugger.Break
+ UINT_PTR userCodeAddress = FindMostRecentUserCodeOnStack();
+ if (userCodeAddress != NULL)
+ {
+ address = userCodeAddress;
+ }
+
+ LOG((LF_EH, LL_INFO10, "DoDebugBreak: break at %0p\n", address));
+
+ exceptionRecord.ExceptionAddress = reinterpret_cast< PVOID >(address);
+ exceptionPointers.ExceptionRecord = &exceptionRecord;
+ exceptionPointers.ContextRecord = &context;
+
+ Thread *pThread = GetThread();
+ PTR_EHWatsonBucketTracker pUEWatsonBucketTracker = pThread->GetExceptionState()->GetUEWatsonBucketTracker();
+ _ASSERTE(pUEWatsonBucketTracker != NULL);
+ pUEWatsonBucketTracker->SaveIpForWatsonBucket(address);
+ pUEWatsonBucketTracker->CaptureUnhandledInfoForWatson(TypeOfReportedError::UserBreakpoint, pThread, NULL);
+ if (pUEWatsonBucketTracker->RetrieveWatsonBuckets() == NULL)
+ {
+ pUEWatsonBucketTracker->ClearWatsonBucketDetails();
+ }
+
+ WatsonLastChance(GetThread(), &exceptionPointers, TypeOfReportedError::UserBreakpoint);
+
+} // void DoDebugBreak()
+#endif // !FEATURE_CORECLR
+
+// This does a user break, triggered by System.Diagnostics.Debugger.Break, or the IL opcode for break.
+//
+// Notes:
+// If a managed debugger is attached, this should send the managed UserBreak event.
+// Else if a native debugger is attached, this should send a native break event (kernel32!DebugBreak)
+// Else, this should invoke Watson.
+//
+// Historical trivia:
+// - In whidbey, this would still invoke Watson if a native-only debugger is attached.
+// - In arrowhead, the managed debugging pipeline switched to be built on the native pipeline.
+FCIMPL0(void, DebugDebugger::Break)
+{
+ FCALL_CONTRACT;
+
+#ifdef DEBUGGING_SUPPORTED
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+#ifdef _DEBUG
+ {
+ static int fBreakOnDebugBreak = -1;
+ if (fBreakOnDebugBreak == -1)
+ fBreakOnDebugBreak = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnDebugBreak);
+ _ASSERTE(fBreakOnDebugBreak == 0 && "BreakOnDebugBreak");
+ }
+
+ static BOOL fDbgInjectFEE = -1;
+ if (fDbgInjectFEE == -1)
+ fDbgInjectFEE = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgInjectFEE);
+#endif
+
+ // WatsonLastChance has its own complex (and changing) policy of how to behave if a debugger is attached.
+ // So caller should explicitly enforce any debugger-related policy before handing off to watson.
+ // Check managed-only first, since managed debugging may be built on native-debugging.
+ if (CORDebuggerAttached() INDEBUG(|| fDbgInjectFEE))
+ {
+ // A managed debugger is already attached -- let it handle the event.
+ g_pDebugInterface->SendUserBreakpoint(GetThread());
+ }
+ else if (IsDebuggerPresent())
+ {
+ // No managed debugger, but a native debug is attached. Explicitly fire a native user breakpoint.
+ // Don't rely on Watson support since that may have a different policy.
+
+ // Toggle to preemptive before firing the debug event. This allows the debugger to suspend this
+ // thread at the debug event.
+ GCX_PREEMP();
+
+ // This becomes an unmanaged breakpoint, such as int 3.
+ DebugBreak();
+ }
+ else
+ {
+#ifndef FEATURE_CORECLR
+ // No debugger attached -- Watson up.
+
+ // The HelperMethodFrame knows how to get the return address.
+ DoWatsonForUserBreak(HELPER_METHOD_FRAME_GET_RETURN_ADDRESS());
+#endif //FEATURE_CORECLR
+ }
+
+ HELPER_METHOD_FRAME_END();
+#endif // DEBUGGING_SUPPORTED
+}
+FCIMPLEND
+
+FCIMPL0(FC_BOOL_RET, DebugDebugger::Launch)
+{
+ FCALL_CONTRACT;
+
+#ifdef DEBUGGING_SUPPORTED
+ if (CORDebuggerAttached())
+ {
+ FC_RETURN_BOOL(TRUE);
+ }
+ else if (g_pDebugInterface != NULL)
+ {
+ HRESULT hr = S_OK;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ hr = g_pDebugInterface->LaunchDebuggerForUser(GetThread(), NULL, TRUE, TRUE);
+
+ HELPER_METHOD_FRAME_END();
+
+ if (SUCCEEDED (hr))
+ {
+ FC_RETURN_BOOL(TRUE);
+ }
+ }
+#endif // DEBUGGING_SUPPORTED
+
+ FC_RETURN_BOOL(FALSE);
+}
+FCIMPLEND
+
+
+FCIMPL0(FC_BOOL_RET, DebugDebugger::IsDebuggerAttached)
+{
+ FCALL_CONTRACT;
+
+ FC_GC_POLL_RET();
+
+#ifdef DEBUGGING_SUPPORTED
+ FC_RETURN_BOOL(CORDebuggerAttached());
+#else // DEBUGGING_SUPPORTED
+ FC_RETURN_BOOL(FALSE);
+#endif
+}
+FCIMPLEND
+
+
+/*static*/ BOOL DebugDebugger::IsLoggingHelper()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef DEBUGGING_SUPPORTED
+ if (CORDebuggerAttached())
+ {
+ return (g_pDebugInterface->IsLoggingEnabled());
+ }
+#endif // DEBUGGING_SUPPORTED
+ return FALSE;
+}
+
+
+// Log to managed debugger.
+// It will send a managed log event, which will faithfully send the two string parameters here without
+// appending a newline to anything.
+// It will also call OutputDebugString() which will send a native debug event. The message
+// string there will be a composite of the two managed string parameters and may include a newline.
+FCIMPL3(void, DebugDebugger::Log,
+ INT32 Level,
+ StringObject* strModuleUNSAFE,
+ StringObject* strMessageUNSAFE
+ )
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(strModuleUNSAFE, NULL_OK));
+ PRECONDITION(CheckPointer(strMessageUNSAFE, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ STRINGREF strModule = (STRINGREF)ObjectToOBJECTREF(strModuleUNSAFE);
+ STRINGREF strMessage = (STRINGREF)ObjectToOBJECTREF(strMessageUNSAFE);
+
+ HELPER_METHOD_FRAME_BEGIN_2(strModule, strMessage);
+
+ // OutputDebugString will log to native/interop debugger.
+ if (strModule != NULL)
+ {
+ WszOutputDebugString(strModule->GetBuffer());
+ WszOutputDebugString(W(" : "));
+ }
+
+ if (strMessage != NULL)
+ {
+ WszOutputDebugString(strMessage->GetBuffer());
+ }
+
+ // If we're not logging a module prefix, then don't log the newline either.
+ // Thus if somebody is just logging messages, there won't be any extra newlines in there.
+ // If somebody is also logging category / module information, then this call to OutputDebugString is
+ // already prepending that to the message, so we append a newline for readability.
+ if (strModule != NULL)
+ {
+ WszOutputDebugString(W("\n"));
+ }
+
+
+#ifdef DEBUGGING_SUPPORTED
+
+ // Send message for logging only if the
+ // debugger is attached and logging is enabled
+ // for the given category
+ if (CORDebuggerAttached())
+ {
+ if (IsLoggingHelper() )
+ {
+ // Copy log message and category into our own SString to protect against GC
+ // Strings may contain embedded nulls, but we need to handle null-terminated
+ // strings, so use truncate now.
+ StackSString switchName;
+ if( strModule != NULL )
+ {
+ // truncate if necessary
+ COUNT_T iLen = (COUNT_T) wcslen(strModule->GetBuffer());
+ if (iLen > MAX_LOG_SWITCH_NAME_LEN)
+ {
+ iLen = MAX_LOG_SWITCH_NAME_LEN;
+ }
+ switchName.Set(strModule->GetBuffer(), iLen);
+ }
+
+ SString message;
+ if( strMessage != NULL )
+ {
+ message.Set(strMessage->GetBuffer(), (COUNT_T) wcslen(strMessage->GetBuffer()));
+ }
+
+ g_pDebugInterface->SendLogMessage (Level, &switchName, &message);
+ }
+ }
+
+#endif // DEBUGGING_SUPPORTED
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+
+FCIMPL0(FC_BOOL_RET, DebugDebugger::IsLogging)
+{
+ FCALL_CONTRACT;
+
+ FC_GC_POLL_RET();
+
+ FC_RETURN_BOOL(IsLoggingHelper());
+}
+FCIMPLEND
+
+
+FCIMPL3(void, DebugStackTrace::GetStackFramesInternal,
+ StackFrameHelper* pStackFrameHelperUNSAFE,
+ INT32 iSkip,
+ Object* pExceptionUNSAFE
+ )
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pStackFrameHelperUNSAFE));
+ PRECONDITION(CheckPointer(pExceptionUNSAFE, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ STACKFRAMEHELPERREF pStackFrameHelper = (STACKFRAMEHELPERREF)ObjectToOBJECTREF(pStackFrameHelperUNSAFE);
+ OBJECTREF pException = ObjectToOBJECTREF(pExceptionUNSAFE);
+ PTRARRAYREF dynamicMethodArrayOrig = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_2(pStackFrameHelper, pException);
+
+ GCPROTECT_BEGIN(dynamicMethodArrayOrig);
+
+ ASSERT(iSkip >= 0);
+
+ GetStackFramesData data;
+
+ data.pDomain = GetAppDomain();
+
+ data.skip = iSkip;
+
+ data.NumFramesRequested = pStackFrameHelper->iFrameCount;
+
+ if (pException == NULL)
+ {
+ // Thread is NULL if it's the current thread.
+ data.TargetThread = pStackFrameHelper->TargetThread;
+ GetStackFrames(NULL, (void*)-1, &data);
+ }
+ else
+ {
+ // We also fetch the dynamic method array in a GC protected artifact to ensure
+ // that the resolver objects, if any, are kept alive incase the exception object
+ // is thrown again (resetting the dynamic method array reference in the object)
+ // that may result in resolver objects getting collected before they can be reachable again
+ // (from the code below).
+ GetStackFramesFromException(&pException, &data, &dynamicMethodArrayOrig);
+ }
+
+ if (data.cElements != 0)
+ {
+#ifdef FEATURE_COMINTEROP
+ if (pStackFrameHelper->fNeedFileInfo)
+ {
+ // Calls to COM up ahead.
+ EnsureComStarted();
+ }
+#endif // FEATURE_COMINTEROP
+
+ MethodTable *pMT = MscorlibBinder::GetClass(CLASS__METHOD_HANDLE);
+ TypeHandle arrayHandle = ClassLoader::LoadArrayTypeThrowing(TypeHandle(pMT), ELEMENT_TYPE_SZARRAY);
+
+ // Allocate memory for the MethodInfo objects
+ BASEARRAYREF MethodInfoArray = (BASEARRAYREF) AllocatePrimitiveArray(ELEMENT_TYPE_I, data.cElements);
+ //printf("\nmethod table = %X\n", pMT);
+
+ SetObjectReference( (OBJECTREF *)&(pStackFrameHelper->rgMethodHandle), (OBJECTREF)MethodInfoArray,
+ pStackFrameHelper->GetAppDomain());
+
+ // Allocate memory for the Offsets
+ OBJECTREF Offsets = AllocatePrimitiveArray(ELEMENT_TYPE_I4, data.cElements);
+
+ SetObjectReference( (OBJECTREF *)&(pStackFrameHelper->rgiOffset), (OBJECTREF)Offsets,
+ pStackFrameHelper->GetAppDomain());
+
+ // Allocate memory for the ILOffsets
+ OBJECTREF ILOffsets = AllocatePrimitiveArray(ELEMENT_TYPE_I4, data.cElements);
+
+ SetObjectReference( (OBJECTREF *)&(pStackFrameHelper->rgiILOffset), (OBJECTREF)ILOffsets,
+ pStackFrameHelper->GetAppDomain());
+
+ // if we need Filename, linenumber, etc., then allocate memory for the same
+ // Allocate memory for the Filename string objects
+ PTRARRAYREF FilenameArray = (PTRARRAYREF) AllocateObjectArray(data.cElements, g_pStringClass);
+
+ SetObjectReference( (OBJECTREF *)&(pStackFrameHelper->rgFilename), (OBJECTREF)FilenameArray,
+ pStackFrameHelper->GetAppDomain());
+
+ // Allocate memory for the Offsets
+ OBJECTREF LineNumbers = AllocatePrimitiveArray(ELEMENT_TYPE_I4, data.cElements);
+
+ SetObjectReference( (OBJECTREF *)&(pStackFrameHelper->rgiLineNumber), (OBJECTREF)LineNumbers,
+ pStackFrameHelper->GetAppDomain());
+
+ // Allocate memory for the ILOffsets
+ OBJECTREF ColumnNumbers = AllocatePrimitiveArray(ELEMENT_TYPE_I4, data.cElements);
+
+ SetObjectReference( (OBJECTREF *)&(pStackFrameHelper->rgiColumnNumber), (OBJECTREF)ColumnNumbers,
+ pStackFrameHelper->GetAppDomain());
+
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ // Allocate memory for the flag indicating if this frame represents the last one from a foreign
+ // exception stack trace provided we have any such frames. Otherwise, set it to null.
+ // When StackFrameHelper.IsLastFrameFromForeignExceptionStackTrace is invoked in managed code,
+ // it will return false for the null case.
+ //
+ // This is an optimization for us to not allocate the BOOL array if we do not have any frames
+ // from a foreign stack trace.
+ OBJECTREF IsLastFrameFromForeignStackTraceFlags = NULL;
+ if (data.fDoWeHaveAnyFramesFromForeignStackTrace)
+ {
+ IsLastFrameFromForeignStackTraceFlags = AllocatePrimitiveArray(ELEMENT_TYPE_BOOLEAN, data.cElements);
+
+ SetObjectReference( (OBJECTREF *)&(pStackFrameHelper->rgiLastFrameFromForeignExceptionStackTrace), (OBJECTREF)IsLastFrameFromForeignStackTraceFlags,
+ pStackFrameHelper->GetAppDomain());
+ }
+ else
+ {
+ SetObjectReference( (OBJECTREF *)&(pStackFrameHelper->rgiLastFrameFromForeignExceptionStackTrace), NULL,
+ pStackFrameHelper->GetAppDomain());
+ }
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+
+ // Determine if there are any dynamic methods in the stack trace. If there are,
+ // allocate an ObjectArray large enough to hold an ObjRef to each one.
+ unsigned iNumDynamics = 0;
+ unsigned iCurDynamic = 0;
+ for (int iElement=0; iElement < data.cElements; iElement++)
+ {
+ MethodDesc *pMethod = data.pElements[iElement].pFunc;
+ if (pMethod->IsLCGMethod())
+ {
+ iNumDynamics++;
+ }
+ else
+ if (pMethod->GetMethodTable()->Collectible())
+ {
+ iNumDynamics++;
+ }
+ }
+
+ if (iNumDynamics)
+ {
+ PTRARRAYREF DynamicDataArray = (PTRARRAYREF) AllocateObjectArray(iNumDynamics, g_pObjectClass);
+
+ SetObjectReference( (OBJECTREF *)&(pStackFrameHelper->dynamicMethods), (OBJECTREF)DynamicDataArray,
+ pStackFrameHelper->GetAppDomain());
+ }
+
+ int iNumValidFrames = 0;
+ for (int i=0; i<data.cElements; i++)
+ {
+ size_t *pElem = (size_t*)pStackFrameHelper->rgMethodHandle->GetDataPtr();
+
+ // The managed stacktrace classes always returns typical method definition, so we don't need to bother providing exact instantiation.
+ // Generics::GetExactInstantiationsOfMethodAndItsClassFromCallInformation(data.pElements[i].pFunc, data.pElements[i].pExactGenericArgsToken, &pExactMethod, &thExactType);
+ MethodDesc* pFunc = data.pElements[i].pFunc;
+
+ // Strip the instantiation to make sure that the reflection never gets a bad method desc back.
+ if (pFunc->HasMethodInstantiation())
+ pFunc = pFunc->StripMethodInstantiation();
+ _ASSERTE(pFunc->IsRuntimeMethodHandle());
+
+ pElem[iNumValidFrames] = (size_t)pFunc;
+
+ // native offset
+ I4 *pI4 = (I4 *)((I4ARRAYREF)pStackFrameHelper->rgiOffset)
+ ->GetDirectPointerToNonObjectElements();
+ pI4 [iNumValidFrames] = data.pElements[i].dwOffset;
+
+ // IL offset
+ I4 *pILI4 = (I4 *)((I4ARRAYREF)pStackFrameHelper->rgiILOffset)
+ ->GetDirectPointerToNonObjectElements();
+ pILI4 [iNumValidFrames] = data.pElements[i].dwILOffset;
+
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ if (data.fDoWeHaveAnyFramesFromForeignStackTrace)
+ {
+ // Set the BOOL indicating if the frame represents the last frame from a foreign exception stack trace.
+ U1 *pIsLastFrameFromForeignExceptionStackTraceU1 = (U1 *)((BOOLARRAYREF)pStackFrameHelper->rgiLastFrameFromForeignExceptionStackTrace)
+ ->GetDirectPointerToNonObjectElements();
+ pIsLastFrameFromForeignExceptionStackTraceU1 [iNumValidFrames] = (U1) data.pElements[i].fIsLastFrameFromForeignStackTrace;
+ }
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+
+ BOOL fFileInfoSet = FALSE;
+ MethodDesc *pMethod = data.pElements[i].pFunc;
+
+ // If there are any dynamic methods, and this one is one of them, store
+ // a reference to it's managed resolver to keep it alive.
+ if (iNumDynamics)
+ {
+ if (pMethod->IsLCGMethod())
+ {
+ DynamicMethodDesc *pDMD = pMethod->AsDynamicMethodDesc();
+ OBJECTREF pResolver = pDMD->GetLCGMethodResolver()->GetManagedResolver();
+ _ASSERTE(pResolver != NULL);
+
+ ((PTRARRAYREF)pStackFrameHelper->dynamicMethods)->SetAt (iCurDynamic++, pResolver);
+ }
+ else
+ if (pMethod->GetMethodTable()->Collectible())
+ {
+ OBJECTREF pLoaderAllocator = pMethod->GetMethodTable()->GetLoaderAllocator()->GetExposedObject();
+ _ASSERTE(pLoaderAllocator != NULL);
+ ((PTRARRAYREF)pStackFrameHelper->dynamicMethods)->SetAt (iCurDynamic++, pLoaderAllocator);
+ }
+ }
+
+#ifdef FEATURE_ISYM_READER
+ Module *pModule = pMethod->GetModule();
+
+ // If it's an EnC method, then don't give back any line info, b/c the PDB is out of date.
+ // (We're using the stale PDB, not one w/ Edits applied).
+ // Since the MethodDesc is always the most recent, v1 instances of EnC methods on the stack
+ // will appeared to be Enc. This means we err on the side of not showing line numbers for EnC methods.
+ // If any method in the file was changed, then our line numbers could be wrong. Since we don't
+ // have udpated PDBs from EnC, we can at best look at the module's version number as a rough guess
+ // to if this file has been updated.
+ bool fIsEnc = false;
+#ifdef EnC_SUPPORTED
+ if (pModule->IsEditAndContinueEnabled())
+ {
+ EditAndContinueModule *eacm = (EditAndContinueModule *) pModule;
+ if (eacm->GetApplyChangesCount() != 1)
+ {
+ fIsEnc = true;
+ }
+ }
+#endif
+
+ // check if the user wants the filenumber, linenumber info...
+ if (!fIsEnc && pStackFrameHelper->fNeedFileInfo)
+ {
+ // Use the MethodDesc...
+ ULONG32 sourceLine = 0;
+ ULONG32 sourceColumn = 0;
+ WCHAR wszFileName[MAX_PATH];
+ ULONG32 fileNameLength = 0;
+
+ {
+ // Note: we need to enable preemptive GC when accessing the unmanages symbol store.
+ GCX_PREEMP();
+
+ // Note: we use the NoThrow version of GetISymUnmanagedReader. If getting the unmanaged
+ // reader fails, then just leave the pointer NULL and leave any symbol info off of the
+ // stack trace.
+ ReleaseHolder<ISymUnmanagedReader> pISymUnmanagedReader(
+ pModule->GetISymUnmanagedReaderNoThrow());
+
+ if (pISymUnmanagedReader != NULL)
+ {
+ ReleaseHolder<ISymUnmanagedMethod> pISymUnmanagedMethod;
+ HRESULT hr = pISymUnmanagedReader->GetMethod(pMethod->GetMemberDef(),
+ &pISymUnmanagedMethod);
+
+ if (SUCCEEDED(hr))
+ {
+ // get all the sequence points and the documents
+ // associated with those sequence points.
+ // from the doument get the filename using GetURL()
+ ULONG32 SeqPointCount = 0;
+ ULONG32 RealSeqPointCount = 0;
+
+ hr = pISymUnmanagedMethod->GetSequencePointCount(&SeqPointCount);
+ _ASSERTE (SUCCEEDED(hr) || (hr == E_OUTOFMEMORY) );
+
+ if (SUCCEEDED(hr) && SeqPointCount > 0)
+ {
+ // allocate memory for the objects to be fetched
+ NewArrayHolder<ULONG32> offsets (new (nothrow) ULONG32 [SeqPointCount]);
+ NewArrayHolder<ULONG32> lines (new (nothrow) ULONG32 [SeqPointCount]);
+ NewArrayHolder<ULONG32> columns (new (nothrow) ULONG32 [SeqPointCount]);
+ NewArrayHolder<ULONG32> endlines (new (nothrow) ULONG32 [SeqPointCount]);
+ NewArrayHolder<ULONG32> endcolumns (new (nothrow) ULONG32 [SeqPointCount]);
+
+ // we free the array automatically, but we have to manually call release
+ // on each element in the array when we're done with it.
+ NewArrayHolder<ISymUnmanagedDocument*> documents (
+ (ISymUnmanagedDocument **)new PVOID [SeqPointCount]);
+
+ if ((offsets && lines && columns && documents && endlines && endcolumns))
+ {
+ hr = pISymUnmanagedMethod->GetSequencePoints (
+ SeqPointCount,
+ &RealSeqPointCount,
+ offsets,
+ (ISymUnmanagedDocument **)documents,
+ lines,
+ columns,
+ endlines,
+ endcolumns);
+
+ _ASSERTE(SUCCEEDED(hr) || (hr == E_OUTOFMEMORY) );
+
+ if (SUCCEEDED(hr))
+ {
+ _ASSERTE(RealSeqPointCount == SeqPointCount);
+
+#ifdef _DEBUG
+ {
+ // This is just some debugging code to help ensure that the array
+ // returned contains valid interface pointers.
+ for (ULONG32 i = 0; i < RealSeqPointCount; i++)
+ {
+ _ASSERTE(documents[i] != NULL);
+ documents[i]->AddRef();
+ documents[i]->Release();
+ }
+ }
+#endif
+
+ // This is the IL offset of the current frame
+ DWORD dwCurILOffset = data.pElements[i].dwILOffset;
+
+ // search for the correct IL offset
+ DWORD j;
+ for (j=0; j<RealSeqPointCount; j++)
+ {
+ // look for the entry matching the one we're looking for
+ if (offsets[j] >= dwCurILOffset)
+ {
+ // if this offset is > what we're looking for, ajdust the index
+ if (offsets[j] > dwCurILOffset && j > 0)
+ {
+ j--;
+ }
+
+ break;
+ }
+ }
+
+ // If we didn't find a match, default to the last sequence point
+ if (j == RealSeqPointCount)
+ {
+ j--;
+ }
+
+ while (lines[j] == 0x00feefee && j > 0)
+ {
+ j--;
+ }
+
+#ifdef DEBUGGING_SUPPORTED
+ if (lines[j] != 0x00feefee)
+ {
+ sourceLine = lines [j];
+ sourceColumn = columns [j];
+ }
+ else
+#endif // DEBUGGING_SUPPORTED
+ {
+ sourceLine = 0;
+ sourceColumn = 0;
+ }
+
+ // Also get the filename from the document...
+ _ASSERTE (documents [j] != NULL);
+
+ hr = documents [j]->GetURL (MAX_PATH, &fileNameLength, wszFileName);
+ _ASSERTE ( SUCCEEDED(hr) || (hr == E_OUTOFMEMORY) || (hr == HRESULT_FROM_WIN32(ERROR_NOT_ENOUGH_MEMORY)) );
+
+
+ // indicate that the requisite information has been set!
+ fFileInfoSet = TRUE;
+
+ // release the documents set by GetSequencePoints
+ for (DWORD x=0; x<RealSeqPointCount; x++)
+ {
+ documents [x]->Release();
+ }
+ } // if got sequence points
+
+ } // if all memory allocations succeeded
+
+ // holders will now delete the arrays.
+ }
+ }
+ // Holder will release pISymUnmanagedMethod
+ }
+
+ } // GCX_PREEMP()
+
+ if (fFileInfoSet == TRUE)
+ {
+ // Set the line and column numbers
+ I4 *pI4Line = (I4 *)((I4ARRAYREF)pStackFrameHelper->rgiLineNumber)
+ ->GetDirectPointerToNonObjectElements();
+ I4 *pI4Column = (I4 *)((I4ARRAYREF)pStackFrameHelper->rgiColumnNumber)
+ ->GetDirectPointerToNonObjectElements();
+
+ pI4Line [iNumValidFrames] = sourceLine;
+ pI4Column [iNumValidFrames] = sourceColumn;
+
+ // Set the file name
+ OBJECTREF obj = (OBJECTREF) StringObject::NewString(wszFileName);
+ pStackFrameHelper->rgFilename->SetAt(iNumValidFrames, obj);
+ }
+ }
+#endif // FEATURE_ISYM_READER
+
+ if (fFileInfoSet == FALSE)
+ {
+ I4 *pI4Line = (I4 *)((I4ARRAYREF)pStackFrameHelper->rgiLineNumber)
+ ->GetDirectPointerToNonObjectElements();
+ I4 *pI4Column = (I4 *)((I4ARRAYREF)pStackFrameHelper->rgiColumnNumber)
+ ->GetDirectPointerToNonObjectElements();
+ pI4Line [iNumValidFrames] = 0;
+ pI4Column [iNumValidFrames] = 0;
+
+ pStackFrameHelper->rgFilename->SetAt(iNumValidFrames, NULL);
+
+ }
+
+ iNumValidFrames++;
+ }
+
+ pStackFrameHelper->iFrameCount = iNumValidFrames;
+
+ /*
+ int *pArray = (int*)OBJECTREFToObject(pStackFrameHelper->rgMethodHandle);
+ printf("array { MT - %X, size = %d", pArray[0], pArray[1]);
+ for (int i=0; i<pArray[1]; i++)
+ {
+ printf(", method desc in array[%d] = %X", i, pArray[i + 2]);
+ }
+ printf("}\n");
+ */
+
+ }
+ else
+ {
+ pStackFrameHelper->iFrameCount = 0;
+ }
+
+ GCPROTECT_END();
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FORCEINLINE void HolderDestroyStrongHandle(OBJECTHANDLE h) { if (h != NULL) DestroyStrongHandle(h); }
+typedef Wrapper<OBJECTHANDLE, DoNothing<OBJECTHANDLE>, HolderDestroyStrongHandle, NULL> StrongHandleHolder;
+
+// receives a custom notification object from the target and sends it to the RS via
+// code:Debugger::SendCustomDebuggerNotification
+// Argument: dataUNSAFE - a pointer the the custom notification object being sent
+FCIMPL1(void, DebugDebugger::CustomNotification, Object * dataUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF pData = ObjectToOBJECTREF(dataUNSAFE);
+
+#ifdef DEBUGGING_SUPPORTED
+ // Send notification only if the debugger is attached
+ if (CORDebuggerAttached() )
+ {
+ HELPER_METHOD_FRAME_BEGIN_PROTECT(pData);
+
+ Thread * pThread = GetThread();
+ AppDomain * pAppDomain = pThread->GetDomain();
+
+ StrongHandleHolder objHandle = pAppDomain->CreateStrongHandle(pData);
+ MethodTable * pMT = pData->GetGCSafeMethodTable();
+ Module * pModule = pMT->GetModule();
+ DomainFile * pDomainFile = pModule->GetDomainFile(pAppDomain);
+ mdTypeDef classToken = pMT->GetCl();
+
+ pThread->SetThreadCurrNotification(objHandle);
+ g_pDebugInterface->SendCustomDebuggerNotification(pThread, pDomainFile, classToken);
+ pThread->ClearThreadCurrNotification();
+
+ TESTHOOKCALL(AppDomainCanBeUnloaded(pThread->GetDomain()->GetId().m_dwId, FALSE));
+ if (pThread->IsAbortRequested())
+ {
+ pThread->HandleThreadAbort();
+ }
+
+ HELPER_METHOD_FRAME_END();
+ }
+
+#endif // DEBUGGING_SUPPORTED
+
+}
+FCIMPLEND
+
+
+void DebugStackTrace::GetStackFramesHelper(Frame *pStartFrame,
+ void* pStopStack,
+ GetStackFramesData *pData
+ )
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ ASSERT (pData != NULL);
+
+ pData->cElements = 0;
+
+ // if the caller specified (< 20) frames are required, then allocate
+ // only that many
+ if ((pData->NumFramesRequested > 0) && (pData->NumFramesRequested < 20))
+ {
+ pData->cElementsAllocated = pData->NumFramesRequested;
+ }
+ else
+ {
+ pData->cElementsAllocated = 20;
+ }
+
+ // Allocate memory for the initial 'n' frames
+ pData->pElements = new DebugStackTraceElement[pData->cElementsAllocated];
+
+ if (pData->TargetThread == NULL ||
+ pData->TargetThread->GetInternal() == GetThread())
+ {
+ // Null target thread specifies current thread.
+ GetThread()->StackWalkFrames(GetStackFramesCallback, pData, FUNCTIONSONLY, pStartFrame);
+ }
+ else
+ {
+ Thread *pThread = pData->TargetThread->GetInternal();
+ _ASSERTE (pThread != NULL);
+
+ // Here's the timeline for the TS_UserSuspendPending and TS_SyncSuspended bits.
+ // 0) Neither TS_UserSuspendPending nor TS_SyncSuspended set.
+ // 1) The suspending thread grabs the thread store lock
+ // then sets TS_UserSuspendPending
+ // then puts in place trip wires for the suspendee (if it is in managed code)
+ // and releases the thread store lock.
+ // 2) The suspending thread waits for the "SafeEvent".
+ // 3) The suspendee continues execution until it tries to enter preemptive mode.
+ // If it trips over the wires put in place by the suspending thread,
+ // it will try to enter preemptive mode.
+ // 4) The suspendee sets TS_SyncSuspended and the "SafeEvent".
+ // Then it waits for m_UserSuspendEvent.
+ // 5) AT THIS POINT, IT IS SAFE TO WALK THE SUSPENDEE'S STACK.
+ // 6) Now, some thread wants to resume the suspendee.
+ // The resuming thread takes the thread store lock
+ // then clears the TS_UserSuspendPending flag
+ // then sets m_UserSuspendEvent
+ // and releases the thread store lock.
+ // 7) The suspendee clears the TS_SyncSuspended flag.
+ //
+ // In other words, it is safe to trace the thread's stack IF we're holding the
+ // thread store lock AND TS_UserSuspendPending is set AND TS_SyncSuspended is set.
+ //
+ // This is because:
+ // - If we were not holding the thread store lock, the thread could be resumed
+ // underneath us.
+ // - As long as only TS_UserSuspendPending is set (and the thread is in cooperative
+ // mode), the thread can still be executing managed code until it trips.
+ // - When only TS_SyncSuspended is set, we race against it resuming execution.
+
+ ThreadStoreLockHolder tsl;
+
+ // We erect a barrier so that if the thread tries to disable preemptive GC,
+ // it will look at the TS_UserSuspendPending flag. Otherwise, it could resume
+ // execution of managed code during our stack walk.
+ TSSuspendHolder shTrap;
+
+ Thread::ThreadState state = pThread->GetSnapshotState();
+ if (state & (Thread::TS_Unstarted|Thread::TS_Dead|Thread::TS_Detached))
+ {
+ goto LSafeToTrace;
+ }
+
+ if (state & Thread::TS_UserSuspendPending)
+ {
+ if (state & Thread::TS_SyncSuspended)
+ {
+ goto LSafeToTrace;
+ }
+
+#ifndef DISABLE_THREADSUSPEND
+ // On Mac don't perform the optimization below, but rather wait for
+ // the suspendee to set the TS_SyncSuspended flag
+
+ // The target thread is not actually suspended yet, but if it is
+ // in preemptive mode, then it is still safe to trace. Before we
+ // can look at another thread's GC mode, we have to suspend it:
+ // The target thread updates its GC mode flag with non-interlocked
+ // operations, and Thread::SuspendThread drains the CPU's store
+ // buffer (by virtue of calling GetThreadContext).
+ switch (pThread->SuspendThread())
+ {
+ case Thread::STR_Success:
+ if (!pThread->PreemptiveGCDisabledOther())
+ {
+ pThread->ResumeThread();
+ goto LSafeToTrace;
+ }
+
+ // Refuse to trace the stack.
+ //
+ // Note that there is a pretty large window in-between when the
+ // target thread sets the GC mode to cooperative, and when it
+ // actually sets the TS_SyncSuspended bit. In this window, we
+ // will refuse to take a stack trace even though it would be
+ // safe to do so.
+ pThread->ResumeThread();
+ break;
+ case Thread::STR_Failure:
+ case Thread::STR_NoStressLog:
+ break;
+ case Thread::STR_UnstartedOrDead:
+ // We know the thread is not unstarted, because we checked for
+ // TS_Unstarted above.
+ _ASSERTE(!(state & Thread::TS_Unstarted));
+
+ // Since the thread is dead, it is safe to trace.
+ goto LSafeToTrace;
+ case Thread::STR_SwitchedOut:
+ if (!pThread->PreemptiveGCDisabledOther())
+ {
+ goto LSafeToTrace;
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+#endif // DISABLE_THREADSUSPEND
+ }
+
+ COMPlusThrow(kThreadStateException, IDS_EE_THREAD_BAD_STATE);
+
+ LSafeToTrace:
+ pThread->StackWalkFrames(GetStackFramesCallback,
+ pData,
+ FUNCTIONSONLY|ALLOW_ASYNC_STACK_WALK,
+ pStartFrame);
+ }
+
+ // Do a 2nd pass outside of any locks.
+ // This will compute IL offsets.
+ for(INT32 i = 0; i < pData->cElements; i++)
+ {
+ pData->pElements[i].InitPass2();
+ }
+
+}
+
+
+void DebugStackTrace::GetStackFrames(Frame *pStartFrame,
+ void* pStopStack,
+ GetStackFramesData *pData
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ GetStackFramesHelper(pStartFrame, pStopStack, pData);
+}
+
+
+StackWalkAction DebugStackTrace::GetStackFramesCallback(CrawlFrame* pCf, VOID* data)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ GetStackFramesData* pData = (GetStackFramesData*)data;
+
+ if (pData->pDomain != pCf->GetAppDomain())
+ {
+ return SWA_CONTINUE;
+ }
+
+ if (pData->skip > 0)
+ {
+ pData->skip--;
+ return SWA_CONTINUE;
+ }
+
+ // <REVISIT_TODO>@todo: How do we know what kind of frame we have?</REVISIT_TODO>
+ // Can we always assume FramedMethodFrame?
+ // NOT AT ALL!!!, but we can assume it's a function
+ // because we asked the stackwalker for it!
+ MethodDesc* pFunc = pCf->GetFunction();
+
+ if (pData->cElements >= pData->cElementsAllocated)
+ {
+
+ DebugStackTraceElement* pTemp = new (nothrow) DebugStackTraceElement[2*pData->cElementsAllocated];
+
+ if (!pTemp)
+ {
+ return SWA_ABORT;
+ }
+
+ memcpy(pTemp, pData->pElements, pData->cElementsAllocated * sizeof(DebugStackTraceElement));
+
+ delete [] pData->pElements;
+
+ pData->pElements = pTemp;
+ pData->cElementsAllocated *= 2;
+ }
+
+ PCODE ip;
+ DWORD dwNativeOffset;
+
+ if (pCf->IsFrameless())
+ {
+ // Real method with jitted code.
+ dwNativeOffset = pCf->GetRelOffset();
+ ip = GetControlPC(pCf->GetRegisterSet());
+ }
+ else
+ {
+ ip = NULL;
+ dwNativeOffset = 0;
+ }
+
+ pData->pElements[pData->cElements].InitPass1(
+ dwNativeOffset,
+ pFunc,
+ ip);
+
+ // We'll init the IL offsets outside the TSL lock.
+
+
+ ++pData->cElements;
+
+ // Since we may be asynchronously walking another thread's stack,
+ // check (frequently) for stack-buffer-overrun corruptions after
+ // any long operation
+ pCf->CheckGSCookies();
+
+ // check if we already have the number of frames that the user had asked for
+ if ((pData->NumFramesRequested != 0) && (pData->NumFramesRequested <= pData->cElements))
+ {
+ return SWA_ABORT;
+ }
+
+ return SWA_CONTINUE;
+}
+#endif // !DACCESS_COMPILE
+
+void DebugStackTrace::GetStackFramesFromException(OBJECTREF * e,
+ GetStackFramesData *pData,
+ PTRARRAYREF * pDynamicMethodArray /*= NULL*/
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION (IsProtectedByGCFrame (e));
+ PRECONDITION ((pDynamicMethodArray == NULL) || IsProtectedByGCFrame (pDynamicMethodArray));
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ ASSERT (pData != NULL);
+
+ // Reasonable default, will indicate error on failure
+ pData->cElements = 0;
+
+#ifndef DACCESS_COMPILE
+ // for DAC builds this has already been validated
+ // Get the class for the exception
+ MethodTable *pExcepClass = (*e)->GetMethodTable();
+
+ _ASSERTE(IsException(pExcepClass)); // what is the pathway for this?
+ if (!IsException(pExcepClass))
+ {
+ return;
+ }
+#endif // DACCESS_COMPILE
+
+ // Now get the _stackTrace reference
+ StackTraceArray traceData;
+ EXCEPTIONREF(*e)->GetStackTrace(traceData, pDynamicMethodArray);
+
+ GCPROTECT_BEGIN(traceData);
+ // The number of frame info elements in the stack trace info
+ pData->cElements = static_cast<int>(traceData.Size());
+
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ // By default, assume that we have no frames from foreign exception stack trace.
+ pData->fDoWeHaveAnyFramesFromForeignStackTrace = FALSE;
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+
+ // Now we know the size, allocate the information for the data struct
+ if (pData->cElements != 0)
+ {
+ // Allocate the memory to contain the data
+ pData->pElements = new DebugStackTraceElement[pData->cElements];
+
+ // Fill in the data
+ for (unsigned i = 0; i < (unsigned)pData->cElements; i++)
+ {
+ StackTraceElement const & cur = traceData[i];
+
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ // If we come across any frame representing foreign exception stack trace,
+ // then set the flag indicating so. This will be used to allocate the
+ // corresponding array in StackFrameHelper.
+ if (cur.fIsLastFrameFromForeignStackTrace)
+ {
+ pData->fDoWeHaveAnyFramesFromForeignStackTrace = TRUE;
+ }
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+
+ // Fill out the MethodDesc*
+ MethodDesc *pMD = cur.pFunc;
+ _ASSERTE(pMD);
+
+ // Calculate the native offset
+ // This doesn't work for framed methods, since internal calls won't
+ // push frames and the method body is therefore non-contiguous.
+ // Currently such methods always return an IP of 0, so they're easy
+ // to spot.
+ DWORD dwNativeOffset;
+
+ if (cur.ip)
+ {
+ dwNativeOffset = (DWORD)(cur.ip - (UINT_PTR)pMD->GetNativeCode());
+ }
+ else
+ {
+ dwNativeOffset = 0;
+ }
+
+ pData->pElements[i].InitPass1(dwNativeOffset, pMD, (PCODE) cur.ip
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ , cur.fIsLastFrameFromForeignStackTrace
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ );
+#ifndef DACCESS_COMPILE
+ pData->pElements[i].InitPass2();
+#endif
+ }
+ }
+ else
+ {
+ pData->pElements = NULL;
+ }
+ GCPROTECT_END();
+
+ return;
+}
+
+// Init a stack-trace element.
+// Initialization done potentially under the TSL.
+void DebugStackTrace::DebugStackTraceElement::InitPass1(
+ DWORD dwNativeOffset,
+ MethodDesc *pFunc,
+ PCODE ip
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ , BOOL fIsLastFrameFromForeignStackTrace /*= FALSE*/
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(pFunc != NULL);
+
+ // May have a null IP for ecall frames. If IP is null, then dwNativeOffset should be 0 too.
+ _ASSERTE ( (ip != NULL) || (dwNativeOffset == 0) );
+
+ this->pFunc = pFunc;
+ this->dwOffset = dwNativeOffset;
+ this->ip = ip;
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ this->fIsLastFrameFromForeignStackTrace = fIsLastFrameFromForeignStackTrace;
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+}
+
+#ifndef DACCESS_COMPILE
+
+// Initialization done outside the TSL.
+// This may need to call locking operations that aren't safe under the TSL.
+void DebugStackTrace::DebugStackTraceElement::InitPass2()
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!ThreadStore::HoldingThreadStore());
+
+ bool bRes = false;
+
+#ifdef DEBUGGING_SUPPORTED
+ // Calculate the IL offset using the debugging services
+ if ((this->ip != NULL) && g_pDebugInterface)
+ {
+ bRes = g_pDebugInterface->GetILOffsetFromNative(
+ pFunc, (LPCBYTE) this->ip, this->dwOffset, &this->dwILOffset);
+ }
+
+#endif // !DEBUGGING_SUPPORTED
+
+ // If there was no mapping information, then set to an invalid value
+ if (!bRes)
+ {
+ this->dwILOffset = (DWORD)-1;
+ }
+}
+
+FCIMPL4(INT32, DebuggerAssert::ShowDefaultAssertDialog,
+ StringObject* strConditionUNSAFE,
+ StringObject* strMessageUNSAFE,
+ StringObject* strStackTraceUNSAFE,
+ StringObject* strWindowTitleUNSAFE
+ )
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(strConditionUNSAFE, NULL_OK));
+ PRECONDITION(CheckPointer(strMessageUNSAFE, NULL_OK));
+ PRECONDITION(CheckPointer(strStackTraceUNSAFE, NULL_OK));
+ PRECONDITION(CheckPointer(strWindowTitleUNSAFE, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ int result = IDRETRY;
+
+ struct _gc {
+ STRINGREF strCondition;
+ STRINGREF strMessage;
+ STRINGREF strStackTrace;
+ STRINGREF strWindowTitle;
+ } gc;
+
+ gc.strCondition = (STRINGREF) ObjectToOBJECTREF(strConditionUNSAFE);
+ gc.strMessage = (STRINGREF) ObjectToOBJECTREF(strMessageUNSAFE);
+ gc.strStackTrace = (STRINGREF) ObjectToOBJECTREF(strStackTraceUNSAFE);
+ gc.strWindowTitle = (STRINGREF) ObjectToOBJECTREF(strWindowTitleUNSAFE);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ StackSString condition;
+ StackSString message;
+ StackSString stackTrace;
+ StackSString windowTitle;
+
+ if (gc.strCondition != NULL)
+ gc.strCondition->GetSString(condition);
+ if (gc.strMessage != NULL)
+ gc.strMessage->GetSString(message);
+ if (gc.strStackTrace != NULL)
+ gc.strStackTrace->GetSString(stackTrace);
+ if (gc.strWindowTitle != NULL)
+ gc.strWindowTitle->GetSString(windowTitle);
+
+ StackSString msgText;
+ if (gc.strCondition != NULL) {
+ msgText.Append(W("Expression: "));
+ msgText.Append(condition);
+ msgText.Append(W("\n"));
+ }
+ msgText.Append(W("Description: "));
+ msgText.Append(message);
+
+ StackSString stackTraceText;
+ if (gc.strStackTrace != NULL) {
+ stackTraceText.Append(W("Stack Trace:\n"));
+ stackTraceText.Append(stackTrace);
+ }
+
+ if (gc.strWindowTitle == NULL) {
+ windowTitle.Set(W("Assert Failure"));
+ }
+
+ // We're taking a string from managed code, and we can't be sure it doesn't have stuff like %s or \n in it.
+ // So, pass a format string of %s and pass the text as a vararg to our message box method.
+ // Also, varargs and StackSString don't mix. Convert to string first.
+ const WCHAR* msgTextAsUnicode = msgText.GetUnicode();
+ result = EEMessageBoxNonLocalizedNonFatal(W("%s"), windowTitle, stackTraceText, MB_ABORTRETRYIGNORE | MB_ICONEXCLAMATION, msgTextAsUnicode);
+
+ // map the user's choice to the values recognized by
+ // the System.Diagnostics.Assert package
+ if (result == IDRETRY)
+ {
+ result = FailDebug;
+ }
+ else if (result == IDIGNORE)
+ {
+ result = FailIgnore;
+ }
+ else
+ {
+ result = FailTerminate;
+ }
+
+ HELPER_METHOD_FRAME_END();
+ return result;
+}
+FCIMPLEND
+
+
+FCIMPL1( void, Log::AddLogSwitch,
+ LogSwitchObject* logSwitchUNSAFE
+ )
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(logSwitchUNSAFE));
+ }
+ CONTRACTL_END;
+
+ Thread *pThread = GetThread();
+ _ASSERTE(pThread);
+
+ HRESULT hresult = S_OK;
+
+ struct _gc {
+ LOGSWITCHREF m_LogSwitch;
+ STRINGREF Name;
+ OBJECTREF tempObj;
+ STRINGREF strrefParentName;
+ } gc;
+
+ ZeroMemory(&gc, sizeof(gc));
+
+ HELPER_METHOD_FRAME_BEGIN_PROTECT(gc);
+
+ gc.m_LogSwitch = (LOGSWITCHREF)ObjectToOBJECTREF(logSwitchUNSAFE);
+
+ // From the given args, extract the LogSwitch name
+ gc.Name = ((LogSwitchObject*) OBJECTREFToObject(gc.m_LogSwitch))->GetName();
+
+ _ASSERTE( gc.Name != NULL );
+ WCHAR *pstrCategoryName = NULL;
+ int iCategoryLength = 0;
+ WCHAR wszParentName [MAX_LOG_SWITCH_NAME_LEN+1];
+ WCHAR wszSwitchName [MAX_LOG_SWITCH_NAME_LEN+1];
+ wszParentName [0] = W('\0');
+ wszSwitchName [0] = W('\0');
+
+ // extract the (WCHAR) name from the STRINGREF object
+ gc.Name->RefInterpretGetStringValuesDangerousForGC(&pstrCategoryName, &iCategoryLength);
+
+ _ASSERTE (iCategoryLength > 0);
+ wcsncpy_s(wszSwitchName, COUNTOF(wszSwitchName), pstrCategoryName, _TRUNCATE);
+
+ // check if an entry with this name already exists in the hash table.
+ // Duplicates are not allowed.
+ // <REVISIT_TODO>: access to the hashtable is not synchronized!</REVISIT_TODO>
+ if(g_sLogHashTable.GetEntryFromHashTable(pstrCategoryName) != NULL)
+ {
+ hresult = TYPE_E_DUPLICATEID;
+ }
+ else
+ {
+ // Create a strong reference handle to the LogSwitch object
+ OBJECTHANDLE ObjHandle = pThread->GetDomain()->CreateStrongHandle(NULL);
+ StoreObjectInHandle(ObjHandle, ObjectToOBJECTREF(gc.m_LogSwitch));
+ // Use ObjectFromHandle(ObjHandle) to get back the object.
+
+ hresult = g_sLogHashTable.AddEntryToHashTable(pstrCategoryName, ObjHandle);
+
+ // If we failed to insert this into the hash table, destroy the handle so
+ // that we don't leak it.
+ if (FAILED(hresult))
+ {
+ ::DestroyStrongHandle(ObjHandle);
+ }
+
+#ifdef DEBUGGING_SUPPORTED
+ if (hresult == S_OK)
+ {
+ // tell the attached debugger about this switch
+ if (CORDebuggerAttached())
+ {
+ int iLevel = gc.m_LogSwitch->GetLevel();
+ WCHAR *pstrParentName = NULL;
+ int iParentNameLength = 0;
+
+ gc.tempObj = gc.m_LogSwitch->GetParent();
+
+ LogSwitchObject* pParent = (LogSwitchObject*) OBJECTREFToObject( gc.tempObj );
+
+ if (pParent != NULL)
+ {
+ // From the given args, extract the ParentLogSwitch's name
+ gc.strrefParentName = pParent->GetName();
+
+ // extract the (WCHAR) name from the STRINGREF object
+ gc.strrefParentName->RefInterpretGetStringValuesDangerousForGC(&pstrParentName, &iParentNameLength );
+
+ if (iParentNameLength > MAX_LOG_SWITCH_NAME_LEN)
+ {
+ wcsncpy_s (wszParentName, COUNTOF(wszParentName), pstrParentName, _TRUNCATE);
+ }
+ else
+ {
+ wcscpy_s (wszParentName, COUNTOF(wszParentName), pstrParentName);
+ }
+ }
+
+ g_pDebugInterface->SendLogSwitchSetting (iLevel, SWITCH_CREATE, wszSwitchName, wszParentName );
+ }
+ }
+#endif // DEBUGGING_SUPPORTED
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+
+FCIMPL3(void, Log::ModifyLogSwitch,
+ INT32 Level,
+ StringObject* strLogSwitchNameUNSAFE,
+ StringObject* strParentNameUNSAFE
+ )
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(strLogSwitchNameUNSAFE));
+ PRECONDITION(CheckPointer(strParentNameUNSAFE));
+ }
+ CONTRACTL_END;
+
+ STRINGREF strLogSwitchName = (STRINGREF) ObjectToOBJECTREF(strLogSwitchNameUNSAFE);
+ STRINGREF strParentName = (STRINGREF) ObjectToOBJECTREF(strParentNameUNSAFE);
+
+ HELPER_METHOD_FRAME_BEGIN_2(strLogSwitchName, strParentName);
+
+ _ASSERTE (strLogSwitchName != NULL);
+
+ WCHAR *pstrLogSwitchName = NULL;
+ WCHAR *pstrParentName = NULL;
+ int iSwitchNameLength = 0;
+ int iParentNameLength = 0;
+ WCHAR wszParentName [MAX_LOG_SWITCH_NAME_LEN+1];
+ WCHAR wszSwitchName [MAX_LOG_SWITCH_NAME_LEN+1];
+ wszParentName [0] = W('\0');
+ wszSwitchName [0] = W('\0');
+
+ // extract the (WCHAR) name from the STRINGREF object
+ strLogSwitchName->RefInterpretGetStringValuesDangerousForGC (
+ &pstrLogSwitchName,
+ &iSwitchNameLength);
+
+ if (iSwitchNameLength > MAX_LOG_SWITCH_NAME_LEN)
+ {
+ wcsncpy_s (wszSwitchName, COUNTOF(wszSwitchName), pstrLogSwitchName, _TRUNCATE);
+ }
+ else
+ {
+ wcscpy_s (wszSwitchName, COUNTOF(wszSwitchName), pstrLogSwitchName);
+ }
+
+ // extract the (WCHAR) name from the STRINGREF object
+ strParentName->RefInterpretGetStringValuesDangerousForGC (
+ &pstrParentName,
+ &iParentNameLength);
+
+ if (iParentNameLength > MAX_LOG_SWITCH_NAME_LEN)
+ {
+ wcsncpy_s (wszParentName, COUNTOF(wszParentName), pstrParentName, _TRUNCATE);
+ }
+ else
+ {
+ wcscpy_s (wszParentName, COUNTOF(wszParentName), pstrParentName);
+ }
+
+#ifdef DEBUGGING_SUPPORTED
+ if (g_pDebugInterface)
+ {
+ g_pDebugInterface->SendLogSwitchSetting (Level,
+ SWITCH_MODIFY,
+ wszSwitchName,
+ wszParentName
+ );
+ }
+#endif // DEBUGGING_SUPPORTED
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+
+void Log::DebuggerModifyingLogSwitch (int iNewLevel,
+ const WCHAR *pLogSwitchName
+ )
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // check if an entry with this name exists in the hash table.
+ OBJECTHANDLE ObjHandle = g_sLogHashTable.GetEntryFromHashTable (pLogSwitchName);
+ if ( ObjHandle != NULL)
+ {
+ OBJECTREF obj = ObjectFromHandle (ObjHandle);
+ LogSwitchObject *pLogSwitch = (LogSwitchObject *)(OBJECTREFToObject (obj));
+
+ pLogSwitch->SetLevel (iNewLevel);
+ }
+}
+
+
+// Note: Caller should ensure that it's not adding a duplicate
+// entry by calling GetEntryFromHashTable before calling this
+// function.
+HRESULT LogHashTable::AddEntryToHashTable (const WCHAR *pKey,
+ OBJECTHANDLE pData
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ HashElement *pElement;
+
+ // check that the length is non-zero
+ if (pKey == NULL)
+ {
+ return (E_INVALIDARG);
+ }
+
+ int iHashKey = 0;
+ int iLength = (int)wcslen (pKey);
+
+ for (int i= 0; i<iLength; i++)
+ {
+ iHashKey += pKey [i];
+ }
+
+ iHashKey = iHashKey % MAX_HASH_BUCKETS;
+
+ // Create a new HashElement. This throws on oom, nothing to cleanup.
+ pElement = new HashElement;
+
+ pElement->SetData (pData, pKey);
+
+ if (m_Buckets [iHashKey] == NULL)
+ {
+ m_Buckets [iHashKey] = pElement;
+ }
+ else
+ {
+ pElement->SetNext (m_Buckets [iHashKey]);
+ m_Buckets [iHashKey] = pElement;
+ }
+
+ return S_OK;
+}
+
+
+OBJECTHANDLE LogHashTable::GetEntryFromHashTable (const WCHAR *pKey)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (pKey == NULL)
+ {
+ return NULL;
+ }
+
+ int iHashKey = 0;
+ int iLength = (int)wcslen (pKey);
+
+ // Calculate the hash value of the given key
+ for (int i= 0; i<iLength; i++)
+ {
+ iHashKey += pKey [i];
+ }
+
+ iHashKey = iHashKey % MAX_HASH_BUCKETS;
+
+ HashElement *pElement = m_Buckets [iHashKey];
+
+ // Find and return the data
+ while (pElement != NULL)
+ {
+ if (wcscmp(pElement->GetKey(), pKey) == 0)
+ {
+ return (pElement->GetData());
+ }
+
+ pElement = pElement->GetNext();
+ }
+
+ return NULL;
+}
+
+//
+// Returns a textual representation of the current stack trace. The format of the stack
+// trace is the same as returned by StackTrace.ToString.
+//
+void GetManagedStackTraceString(BOOL fNeedFileInfo, SString &result)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Switch to cooperative GC mode before we call into managed code.
+ GCX_COOP();
+
+ MethodDescCallSite managedHelper(METHOD__STACK_TRACE__GET_MANAGED_STACK_TRACE_HELPER);
+ ARG_SLOT args[] =
+ {
+ BoolToArgSlot(fNeedFileInfo)
+ };
+
+ STRINGREF resultStringRef = (STRINGREF) managedHelper.Call_RetOBJECTREF(args);
+ resultStringRef->GetSString(result);
+}
+
+#endif // !DACCESS_COMPILE
diff --git a/src/vm/debugdebugger.h b/src/vm/debugdebugger.h
new file mode 100644
index 0000000000..56c3cf13fe
--- /dev/null
+++ b/src/vm/debugdebugger.h
@@ -0,0 +1,385 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: DebugDebugger.h
+**
+** Purpose: Native methods on System.Debug.Debugger
+**
+**
+
+===========================================================*/
+
+#ifndef __DEBUG_DEBUGGER_h__
+#define __DEBUG_DEBUGGER_h__
+#include <object.h>
+
+
+// ! WARNING !
+// The following constants mirror the constants
+// declared in the class LoggingLevelEnum in the
+// System.Diagnostic package. Any changes here will also
+// need to be made there.
+#define TraceLevel0 0
+#define TraceLevel1 1
+#define TraceLevel2 2
+#define TraceLevel3 3
+#define TraceLevel4 4
+#define StatusLevel0 20
+#define StatusLevel1 21
+#define StatusLevel2 22
+#define StatusLevel3 23
+#define StatusLevel4 24
+#define WarningLevel 40
+#define ErrorLevel 50
+#define PanicLevel 100
+
+// ! WARNING !
+// The following constants mirror the constants
+// declared in the class AssertLevelEnum in the
+// System.Diagnostic package. Any changes here will also
+// need to be made there.
+#define FailDebug 0
+#define FailIgnore 1
+#define FailTerminate 2
+#define FailContinueFilter 3
+
+#define MAX_LOG_SWITCH_NAME_LEN 256
+
+class DebugDebugger
+{
+public:
+ static FCDECL0(void, Break);
+ static FCDECL0(FC_BOOL_RET, Launch);
+ static FCDECL0(FC_BOOL_RET, IsDebuggerAttached);
+ static FCDECL3(void, Log, INT32 Level, StringObject* strModule, StringObject* strMessage);
+
+ // receives a custom notification object from the target and sends it to the RS via
+ // code:Debugger::SendCustomDebuggerNotification
+ static FCDECL1(void, CustomNotification, Object * dataUNSAFE);
+
+ static FCDECL0(FC_BOOL_RET, IsLogging);
+
+protected:
+ static BOOL IsLoggingHelper();
+};
+
+
+
+
+class StackFrameHelper:public Object
+{
+ // READ ME:
+ // Modifying the order or fields of this object may require other changes to the
+ // classlib defintion of the StackFrameHelper class.
+public:
+ THREADBASEREF TargetThread;
+ I4ARRAYREF rgiOffset;
+ I4ARRAYREF rgiILOffset;
+ BASEARRAYREF rgMethodBase;
+ PTRARRAYREF dynamicMethods;
+ BASEARRAYREF rgMethodHandle;
+ PTRARRAYREF rgFilename;
+ I4ARRAYREF rgiLineNumber;
+ I4ARRAYREF rgiColumnNumber;
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ BOOLARRAYREF rgiLastFrameFromForeignExceptionStackTrace;
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ int iFrameCount;
+ CLR_BOOL fNeedFileInfo;
+
+protected:
+ StackFrameHelper() {}
+ ~StackFrameHelper() {}
+
+public:
+ void SetFrameCount (int iCount)
+ {
+ iFrameCount = iCount;
+ }
+
+ int GetFrameCount (void)
+ {
+ return iFrameCount;
+ }
+
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF <StackFrameHelper> STACKFRAMEHELPERREF;
+#else
+typedef StackFrameHelper* STACKFRAMEHELPERREF;
+#endif
+
+
+class DebugStackTrace
+{
+public:
+
+#ifndef DACCESS_COMPILE
+// the DAC directly uses the GetStackFramesData and DebugStackTraceElement types
+private:
+#endif // DACCESS_COMPILE
+ struct DebugStackTraceElement {
+ DWORD dwOffset; // native offset
+ DWORD dwILOffset;
+ MethodDesc *pFunc;
+ PCODE ip;
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ // TRUE if this element represents the last frame of the foreign
+ // exception stack trace.
+ BOOL fIsLastFrameFromForeignStackTrace;
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+
+ // Initialization done under TSL.
+ // This is used when first collecting the stack frame data.
+ void InitPass1(
+ DWORD dwNativeOffset,
+ MethodDesc *pFunc,
+ PCODE ip
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ , BOOL fIsLastFrameFromForeignStackTrace = FALSE
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ );
+
+ // Initialization done outside the TSL.
+ // This will init the dwILOffset field (and potentially anything else
+ // that can't be done under the TSL).
+ void InitPass2();
+ };
+
+ struct GetStackFramesData {
+
+ // Used for the integer-skip version
+ INT32 skip;
+ INT32 NumFramesRequested;
+ INT32 cElementsAllocated;
+ INT32 cElements;
+ DebugStackTraceElement* pElements;
+ THREADBASEREF TargetThread;
+ AppDomain *pDomain;
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ BOOL fDoWeHaveAnyFramesFromForeignStackTrace;
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+
+
+ GetStackFramesData() : skip(0),
+ NumFramesRequested (0),
+ cElementsAllocated(0),
+ cElements(0),
+ pElements(NULL),
+ TargetThread((THREADBASEREF)(TADDR)NULL)
+ {
+ LIMITED_METHOD_CONTRACT;
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ fDoWeHaveAnyFramesFromForeignStackTrace = FALSE;
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+
+ }
+
+ ~GetStackFramesData()
+ {
+ delete [] pElements;
+ }
+ };
+
+
+public:
+
+ static FCDECL3(void,
+ GetStackFramesInternal,
+ StackFrameHelper* pStackFrameHelper,
+ INT32 iSkip,
+ Object* pException
+ );
+
+#ifndef DACCESS_COMPILE
+// the DAC directly calls GetStackFramesFromException
+private:
+#endif
+
+ static void GetStackFramesHelper(Frame *pStartFrame, void* pStopStack, GetStackFramesData *pData);
+
+ static void GetStackFrames(Frame *pStartFrame, void* pStopStack, GetStackFramesData *pData);
+
+ static void GetStackFramesFromException(OBJECTREF * e, GetStackFramesData *pData, PTRARRAYREF * pDynamicMethodArray = NULL);
+
+ static StackWalkAction GetStackFramesCallback(CrawlFrame* pCf, VOID* data);
+
+};
+
+class DebuggerAssert
+{
+private:
+
+public:
+
+ static FCDECL4(INT32,
+ ShowDefaultAssertDialog,
+ StringObject* strConditionUNSAFE,
+ StringObject* strMessageUNSAFE,
+ StringObject* strStackTraceUNSAFE,
+ StringObject* strWindowTitleUNSAFE
+ );
+
+};
+
+
+// The following code is taken from object.h and modified to suit
+// LogSwitchBaseObject
+//
+class LogSwitchObject : public Object
+{
+ protected:
+ // README:
+ // Modifying the order or fields of this object may require other changes to the
+ // classlib class defintion of the LogSwitch object.
+
+ STRINGREF m_strName;
+ STRINGREF strDescription;
+ OBJECTREF m_ParentSwitch;
+ INT32 m_iLevel;
+ INT32 m_iOldLevel;
+
+ protected:
+ LogSwitchObject() {}
+ ~LogSwitchObject() {}
+
+ public:
+ // check for classes that wrap Ole classes
+
+ void SetLevel(INT32 iLevel)
+ {
+ m_iLevel = iLevel;
+ }
+
+ INT32 GetLevel(void)
+ {
+ return m_iLevel;
+ }
+
+ OBJECTREF GetParent (void)
+ {
+ return m_ParentSwitch;
+ }
+
+ STRINGREF GetName (void)
+ {
+ return m_strName;
+ }
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF <LogSwitchObject> LOGSWITCHREF;
+#else
+typedef LogSwitchObject* LOGSWITCHREF;
+#endif
+
+
+#define MAX_KEY_LENGTH 64
+#define MAX_HASH_BUCKETS 20
+
+class HashElement
+{
+private:
+
+ OBJECTHANDLE m_pData;
+ SString m_strKey;
+ HashElement *m_pNext;
+
+public:
+
+ HashElement ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pData = NULL;
+ m_pNext = NULL;
+ }
+
+ ~HashElement()
+ {
+ if (m_pNext!= NULL)
+ {
+ delete m_pNext;
+ }
+
+ m_pNext=NULL;
+
+ }// ~HashElement
+
+ void SetData (OBJECTHANDLE pData, const WCHAR *pKey)
+ {
+ m_pData = pData;
+ m_strKey.Set(pKey);
+ }
+
+ OBJECTHANDLE GetData (void)
+ {
+ return m_pData;
+ }
+
+ const WCHAR *GetKey (void)
+ {
+ return m_strKey.GetUnicode();
+ }
+
+ void SetNext (HashElement *pNext)
+ {
+ m_pNext = pNext;
+ }
+
+ HashElement *GetNext (void)
+ {
+ return m_pNext;
+ }
+
+};
+
+class LogHashTable
+{
+private:
+
+ HashElement *m_Buckets [MAX_HASH_BUCKETS];
+
+public:
+ // static global object, no constructors/destructors, assumes zero initialized memory
+
+ HRESULT AddEntryToHashTable (const WCHAR *pKey, OBJECTHANDLE pData);
+
+ OBJECTHANDLE GetEntryFromHashTable (const WCHAR *pKey);
+
+};
+
+extern LogHashTable g_sLogHashTable;
+
+
+class Log
+{
+private:
+
+public:
+ static FCDECL1(void, AddLogSwitch, LogSwitchObject * m_LogSwitch);
+
+ static FCDECL3(void,
+ ModifyLogSwitch,
+ INT32 Level,
+ StringObject* strLogSwitchNameUNSAFE,
+ StringObject* strParentNameUNSAFE
+ );
+
+ // The following method is called when the level of a log switch is modified
+ // from the debugger. It is not an ecall.
+ static void DebuggerModifyingLogSwitch (int iNewLevel, const WCHAR *pLogSwitchName);
+
+};
+
+//
+// Returns a textual representation of the current stack trace. The format of the stack
+// trace is the same as returned by StackTrace.ToString.
+//
+void GetManagedStackTraceString(BOOL fNeedFileInfo, SString &result);
+
+#endif // __DEBUG_DEBUGGER_h__
diff --git a/src/vm/debughelp.cpp b/src/vm/debughelp.cpp
new file mode 100644
index 0000000000..a800bf0440
--- /dev/null
+++ b/src/vm/debughelp.cpp
@@ -0,0 +1,1246 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "common.h"
+
+/*******************************************************************/
+/* The folowing routines used to exist in all builds so they could called from the
+ * debugger before we had strike.
+ * Now most of them are only inclued in debug builds for diagnostics purposes.
+*/
+/*******************************************************************/
+
+#include "stdlib.h"
+
+BOOL isMemoryReadable(const TADDR start, unsigned len)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ //
+ // To accomplish this in a no-throw way, we have to touch each and every page
+ // and see if it is in memory or not.
+ //
+
+ //
+ // Touch the first and last bytes.
+ //
+ char buff;
+
+#ifdef DACCESS_COMPILE
+ if (DacReadAll(start, &buff, 1, false) != S_OK)
+ {
+ return 0;
+ }
+#else
+ if (ReadProcessMemory(GetCurrentProcess(), (PVOID)start, &buff, 1, 0) == 0)
+ {
+ return 0;
+ }
+#endif
+
+ TADDR location;
+
+ location = start + (len - 1);
+
+#ifdef DACCESS_COMPILE
+ if (DacReadAll(location, &buff, 1, false) != S_OK)
+ {
+ return 0;
+ }
+#else
+ if (ReadProcessMemory(GetCurrentProcess(), (PVOID)location,
+ &buff, 1, 0) == 0)
+ {
+ return 0;
+ }
+#endif
+
+ //
+ // Now we have to loop thru each and every page in between and touch them.
+ //
+ location = start;
+ while (len > PAGE_SIZE)
+ {
+ location += PAGE_SIZE;
+ len -= PAGE_SIZE;
+
+#ifdef DACCESS_COMPILE
+ if (DacReadAll(location, &buff, 1, false) != S_OK)
+ {
+ return 0;
+ }
+#else
+ if (ReadProcessMemory(GetCurrentProcess(), (PVOID)location,
+ &buff, 1, 0) == 0)
+ {
+ return 0;
+ }
+#endif
+ }
+
+ return 1;
+}
+
+
+/*******************************************************************/
+/* check to see if 'retAddr' is a valid return address (it points to
+ someplace that has a 'call' right before it), If possible it is
+ it returns the address that was called in whereCalled */
+
+bool isRetAddr(TADDR retAddr, TADDR* whereCalled)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ // don't waste time values clearly out of range
+ if (retAddr < (TADDR)BOT_MEMORY || retAddr > (TADDR)TOP_MEMORY)
+ {
+ return false;
+ }
+
+ PTR_BYTE spot = PTR_BYTE(retAddr);
+ if (!isMemoryReadable(dac_cast<TADDR>(spot) - 7, 7))
+ {
+ return(false);
+ }
+
+ // Note this is possible to be spoofed, but pretty unlikely
+ *whereCalled = 0;
+ // call XXXXXXXX
+ if (spot[-5] == 0xE8)
+ {
+ *whereCalled = *(PTR_DWORD(retAddr - 4)) + retAddr;
+ return(true);
+ }
+
+ // call [XXXXXXXX]
+ if (spot[-6] == 0xFF && (spot[-5] == 025))
+ {
+ if (isMemoryReadable(*(PTR_TADDR(retAddr - 4)), 4))
+ {
+ *whereCalled = *(PTR_TADDR(*(PTR_TADDR(retAddr - 4))));
+ return(true);
+ }
+ }
+
+ // call [REG+XX]
+ if (spot[-3] == 0xFF && (spot[-2] & ~7) == 0120 && (spot[-2] & 7) != 4)
+ {
+ return(true);
+ }
+
+ if (spot[-4] == 0xFF && spot[-3] == 0124) // call [ESP+XX]
+ {
+ return(true);
+ }
+
+ // call [REG+XXXX]
+ if (spot[-6] == 0xFF && (spot[-5] & ~7) == 0220 && (spot[-5] & 7) != 4)
+ {
+ return(true);
+ }
+
+ if (spot[-7] == 0xFF && spot[-6] == 0224) // call [ESP+XXXX]
+ {
+ return(true);
+ }
+
+ // call [REG]
+ if (spot[-2] == 0xFF && (spot[-1] & ~7) == 0020 && (spot[-1] & 7) != 4 && (spot[-1] & 7) != 5)
+ {
+ return(true);
+ }
+
+ // call REG
+ if (spot[-2] == 0xFF && (spot[-1] & ~7) == 0320 && (spot[-1] & 7) != 4)
+ {
+ return(true);
+ }
+
+ // There are other cases, but I don't believe they are used.
+ return(false);
+}
+
+/*
+ * The remaining methods are included in debug builds only
+ */
+#ifdef _DEBUG
+
+#ifndef DACCESS_COMPILE
+void *DumpEnvironmentBlock(void)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LPTSTR lpszVariable;
+ lpszVariable = (LPTSTR)WszGetEnvironmentStrings();
+
+ while (*lpszVariable)
+ {
+ fprintf(stderr, "%c", *lpszVariable++);
+ }
+
+ fprintf(stderr, "\n");
+
+ return WszGetEnvironmentStrings();
+}
+
+#if defined(_TARGET_X86_)
+/*******************************************************************/
+// Dump the SEH chain to stderr
+void PrintSEHChain(void)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ EXCEPTION_REGISTRATION_RECORD* pEHR = GetCurrentSEHRecord();
+
+ while (pEHR != NULL && pEHR != EXCEPTION_CHAIN_END)
+ {
+ fprintf(stderr, "pEHR:0x%x Handler:0x%x\n", (size_t)pEHR, (size_t)pEHR->Handler);
+ pEHR = pEHR->Next;
+ }
+}
+#endif // _TARGET_X86_
+
+/*******************************************************************/
+MethodDesc* IP2MD(ULONG_PTR IP)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return ExecutionManager::GetCodeMethodDesc((PCODE)IP);
+}
+
+/*******************************************************************/
+/* if addr is a valid method table, return a poitner to it */
+MethodTable* AsMethodTable(size_t addr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ DEBUG_ONLY;
+ }
+ CONTRACTL_END;
+
+ MethodTable* pValidMT = NULL;
+
+ EX_TRY
+ {
+ MethodTable* pMT = (MethodTable*) addr;
+
+ if (isMemoryReadable((TADDR)pMT, sizeof(MethodTable)))
+ {
+ EEClass* cls = pMT->GetClass_NoLogging();
+
+ if (isMemoryReadable((TADDR)cls, sizeof(EEClass)) &&
+ (cls->GetMethodTable() == pMT))
+ {
+ pValidMT = pMT;
+ }
+ }
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ return(pValidMT);
+}
+
+/*******************************************************************/
+/* if addr is a valid method table, return a pointer to it */
+MethodDesc* AsMethodDesc(size_t addr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ DEBUG_ONLY;
+ }
+ CONTRACTL_END;
+
+ if (!IS_ALIGNED(addr, sizeof(void*)))
+ return(0);
+
+ MethodDesc* pValidMD = NULL;
+
+ // We try to avoid the most AVs by explicitit calls to isMemoryReadable below, but rare cases can still get through
+ // if we are unlucky.
+ AVInRuntimeImplOkayHolder AVOkay;
+
+ EX_TRY
+ {
+ MethodDesc* pMD = (MethodDesc*) addr;
+
+ if (isMemoryReadable((TADDR)pMD, sizeof(MethodDesc)))
+ {
+ MethodDescChunk *chunk = pMD->GetMethodDescChunk();
+
+ if (isMemoryReadable((TADDR)chunk, sizeof(MethodDescChunk)))
+ {
+ RelativeFixupPointer<PTR_MethodTable> * ppMT = chunk->GetMethodTablePtr();
+
+ // The MethodTable is stored as a RelativeFixupPointer which does an
+ // extra indirection if the address is tagged (the low bit is set).
+ // That could AV if we don't check it first.
+
+ if (!ppMT->IsTagged((TADDR)ppMT) || isMemoryReadable((TADDR)ppMT->GetValuePtr((TADDR)ppMT), sizeof(MethodTable*)))
+ {
+ if (AsMethodTable((size_t)RelativeFixupPointer<PTR_MethodTable>::GetValueAtPtr((TADDR)ppMT)) != 0)
+ {
+ pValidMD = pMD;
+ }
+ }
+ }
+ }
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+
+ return(pValidMD);
+}
+
+
+// This function will return NULL if the buffer is not large enough.
+/*******************************************************************/
+
+wchar_t* formatMethodTable(MethodTable* pMT,
+ __out_z __inout_ecount(bufSize) wchar_t* buff,
+ DWORD bufSize)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if(bufSize == 0)
+ {
+ return NULL;
+ }
+
+ buff[ bufSize - 1] = W('\0');
+
+ DefineFullyQualifiedNameForClass();
+
+ LPCUTF8 clsName = GetFullyQualifiedNameForClass(pMT);
+
+ if (clsName != 0)
+ {
+ if(_snwprintf_s(buff, bufSize - 1, _TRUNCATE, W("%S"), clsName) < 0)
+ {
+ return NULL;
+ }
+
+ buff[ bufSize - 1] = W('\0');
+
+ }
+ return(buff);
+}
+
+/*******************************************************************/
+// This function will return NULL if the buffer is not large enough, otherwise it will
+// return the buffer position for next write.
+/*******************************************************************/
+
+wchar_t* formatMethodDesc(MethodDesc* pMD,
+ __out_z __inout_ecount(bufSize) wchar_t* buff,
+ DWORD bufSize)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if(bufSize == 0)
+ {
+ return NULL;
+ }
+
+ buff = formatMethodTable(pMD->GetMethodTable(), buff, bufSize);
+ if(buff == NULL)
+ {
+ return NULL;
+ }
+
+ buff[bufSize - 1] = W('\0'); // this will guarantee the buffer is also NULL-terminated
+ if(_snwprintf_s( &buff[lstrlenW(buff)] , bufSize -lstrlenW(buff) - 1, _TRUNCATE, W("::%S"), pMD->GetName()) < 0)
+ {
+ return NULL;
+ }
+
+#ifdef _DEBUG
+ if (pMD->m_pszDebugMethodSignature)
+ {
+ if(_snwprintf_s(&buff[lstrlenW(buff)],
+ bufSize -lstrlenW(buff) - 1,
+ _TRUNCATE,
+ W(" %S"),
+ pMD->m_pszDebugMethodSignature) < 0)
+ {
+ return NULL;
+ }
+
+ }
+#endif
+
+ if(_snwprintf_s(&buff[lstrlenW(buff)], bufSize -lstrlenW(buff) - 1, _TRUNCATE, W("(%x)"), (size_t)pMD) < 0)
+ {
+ return NULL;
+ }
+
+ return(buff);
+}
+
+
+
+
+/*******************************************************************/
+/* dump the stack, pretty printing IL methods if possible. This
+ routine is very robust. It will never cause an access violation
+ and it always find return addresses if they are on the stack
+ (it may find some spurious ones however). */
+
+int dumpStack(BYTE* topOfStack, unsigned len)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ size_t* top = (size_t*) topOfStack;
+ size_t* end = (size_t*) &topOfStack[len];
+
+ size_t* ptr = (size_t*) (((size_t) top) & ~3); // make certain dword aligned.
+ TADDR whereCalled;
+
+ WszOutputDebugString(W("***************************************************\n"));
+
+ CQuickBytes qb;
+
+ int nLen = MAX_CLASSNAME_LENGTH * 4 + 400; // this should be enough
+
+ wchar_t *buff = (wchar_t *) qb.AllocThrows(nLen * sizeof(wchar_t));
+
+ while (ptr < end)
+ {
+ buff[nLen - 1] = W('\0');
+
+ wchar_t* buffPtr = buff;
+
+ // stop if we hit unmapped pages
+ if (!isMemoryReadable((TADDR)ptr, sizeof(TADDR)))
+ {
+ break;
+ }
+
+ if (isRetAddr((TADDR)*ptr, &whereCalled))
+ {
+ if (_snwprintf_s(buffPtr, buff+NumItems(buff)-buffPtr-1, _TRUNCATE, W("STK[%08X] = %08X "), (size_t)ptr, *ptr) <0)
+ {
+ return(0);
+ }
+
+ buffPtr += lstrlenW(buffPtr);
+
+ const wchar_t* kind = W("RETADDR ");
+
+ // Is this a stub (is the return address a MethodDesc?
+ MethodDesc* ftn = AsMethodDesc(*ptr);
+
+ if (ftn != 0)
+ {
+
+ kind = W(" MD PARAM");
+
+ // If another true return address is not directly before it, it is just
+ // a methodDesc param.
+ TADDR prevRetAddr = ptr[1];
+
+ if (isRetAddr(prevRetAddr, &whereCalled) && AsMethodDesc(prevRetAddr) == 0)
+ {
+ kind = W("STUBCALL");
+ }
+ else
+ {
+ // Is it the magic sequence used by CallDescr?
+ if (isMemoryReadable(prevRetAddr - sizeof(short),
+ sizeof(short)) &&
+ ((short*) prevRetAddr)[-1] == 0x5A59) // Pop ECX POP EDX
+ {
+ kind = W("STUBCALL");
+ }
+
+ }
+
+ }
+ else // Is it some other code the EE knows about?
+ {
+ ftn = ExecutionManager::GetCodeMethodDesc((PCODE)(*ptr));
+ }
+
+ if(_snwprintf_s(buffPtr, buff+ nLen -buffPtr-1, _TRUNCATE, W("%s "), kind) < 0)
+ {
+ return(0);
+ }
+
+ buffPtr += lstrlenW(buffPtr);
+
+ if (ftn != 0)
+ {
+ // buffer is not large enough
+ if( formatMethodDesc(ftn, buffPtr, static_cast<DWORD>(buff+ nLen -buffPtr-1)) == NULL)
+ {
+ return(0);
+ }
+
+ buffPtr += lstrlenW(buffPtr);
+ }
+ else
+ {
+ wcsncpy_s(buffPtr, nLen - (buffPtr - buff), W("<UNKNOWN FTN>"), _TRUNCATE);
+ buffPtr += lstrlenW(buffPtr);
+ }
+
+ if (whereCalled != 0)
+ {
+ if(_snwprintf_s(buffPtr, buff+ nLen -buffPtr-1, _TRUNCATE, W(" Caller called Entry %X"), whereCalled) <0)
+ {
+ return(0);
+ }
+
+ buffPtr += lstrlenW(buffPtr);
+ }
+
+ wcsncpy_s(buffPtr, nLen - (buffPtr - buff), W("\n"), _TRUNCATE);
+ buffPtr += lstrlenW(buffPtr);
+ WszOutputDebugString(buff);
+ }
+
+ MethodTable* pMT = AsMethodTable(*ptr);
+ if (pMT != 0)
+ {
+ buffPtr = buff;
+ if( _snwprintf_s(buffPtr, buff+ nLen -buffPtr-1, _TRUNCATE, W("STK[%08X] = %08X MT PARAM "), (size_t)ptr, *ptr ) <0)
+ {
+ return(0);
+ }
+
+ buffPtr += lstrlenW(buffPtr);
+
+ if( formatMethodTable(pMT, buffPtr, static_cast<DWORD>(buff+ nLen -buffPtr-1)) == NULL)
+ {
+ return(0);
+ }
+
+ buffPtr += lstrlenW(buffPtr);
+
+ wcsncpy_s(buffPtr, nLen - (buffPtr - buff), W("\n"), _TRUNCATE);
+ WszOutputDebugString(buff);
+
+ }
+
+ ptr++;
+
+ } // while
+
+ return(0);
+}
+
+/*******************************************************************/
+/* dump the stack from the current ESP. Stop when we reach a 64K
+ boundary */
+int DumpCurrentStack()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#ifdef _TARGET_X86_
+ BYTE* top = (BYTE *)GetCurrentSP();
+
+ // go back at most 64K, it will stop if we go off the
+ // top to unmapped memory
+ return(dumpStack(top, 0xFFFF));
+#else
+ _ASSERTE(!"@NYI - DumpCurrentStack(DebugHelp.cpp)");
+ return 0;
+#endif // _TARGET_X86_
+}
+
+/*******************************************************************/
+WCHAR* StringVal(STRINGREF objref)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return(objref->GetBuffer());
+}
+
+LPCUTF8 NameForMethodTable(UINT_PTR pMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ DefineFullyQualifiedNameForClass();
+ LPCUTF8 clsName = GetFullyQualifiedNameForClass(((MethodTable*)pMT));
+ // Note we're returning local stack space - this should be OK for using in the debugger though
+ return clsName;
+}
+
+LPCUTF8 ClassNameForObject(UINT_PTR obj)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return(NameForMethodTable((UINT_PTR)(((Object*)obj)->GetMethodTable())));
+}
+
+LPCUTF8 ClassNameForOBJECTREF(OBJECTREF obj)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return(ClassNameForObject((UINT_PTR)(OBJECTREFToObject(obj))));
+}
+
+LPCUTF8 NameForMethodDesc(UINT_PTR pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return(((MethodDesc*)pMD)->GetName());
+}
+
+LPCUTF8 ClassNameForMethodDesc(UINT_PTR pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ DefineFullyQualifiedNameForClass ();
+ return GetFullyQualifiedNameForClass(((MethodDesc*)pMD)->GetMethodTable());
+}
+
+PCCOR_SIGNATURE RawSigForMethodDesc(MethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return(pMD->GetSig());
+}
+
+Thread * CurrentThreadInfo ()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return GetThread ();
+}
+
+AppDomain *GetAppDomainForObject(UINT_PTR obj)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return ((Object*)obj)->GetAppDomain();
+}
+
+ADIndex GetAppDomainIndexForObject(UINT_PTR obj)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return ((Object*)obj)->GetHeader()->GetAppDomainIndex();
+}
+
+AppDomain *GetAppDomainForObjectHeader(UINT_PTR hdr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ ADIndex indx = ((ObjHeader*)hdr)->GetAppDomainIndex();
+ if (!indx.m_dwIndex)
+ {
+ return NULL;
+ }
+
+ return SystemDomain::GetAppDomainAtIndex(indx);
+}
+
+ADIndex GetAppDomainIndexForObjectHeader(UINT_PTR hdr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return ((ObjHeader*)hdr)->GetAppDomainIndex();
+}
+
+SyncBlock *GetSyncBlockForObject(UINT_PTR obj)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return ((Object*)obj)->GetHeader()->PassiveGetSyncBlock();
+}
+
+/*******************************************************************/
+void PrintMethodTable(UINT_PTR pMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ MethodTable * p = (MethodTable *)pMT;
+
+ DefineFullyQualifiedNameForClass();
+ LPCUTF8 name = GetFullyQualifiedNameForClass(p);
+ p->DebugDumpVtable(name, true);
+ p->DebugDumpFieldLayout(name, true);
+ p->DebugDumpGCDesc(name, true);
+}
+
+void PrintTableForMethodDesc(UINT_PTR pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ PrintMethodTable((UINT_PTR) ((MethodDesc *)pMD)->GetMethodTable() );
+}
+
+void PrintException(OBJECTREF pObjectRef)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+
+ if(pObjectRef == NULL)
+ {
+ return;
+ }
+
+ GCPROTECT_BEGIN(pObjectRef);
+
+ if (!IsException(pObjectRef->GetMethodTable()))
+ {
+ printf("Specified object is not an exception object.\n");
+ }
+ else
+ {
+ MethodDescCallSite internalToString(METHOD__EXCEPTION__INTERNAL_TO_STRING, &pObjectRef);
+
+ ARG_SLOT arg[1] = {
+ ObjToArgSlot(pObjectRef)
+ };
+
+ STRINGREF str = internalToString.Call_RetSTRINGREF(arg);
+
+ if(str->GetBuffer() != NULL)
+ {
+ WszOutputDebugString(str->GetBuffer());
+ }
+ }
+
+ GCPROTECT_END();
+}
+
+void PrintException(UINT_PTR pObject)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF pObjectRef = NULL;
+ GCPROTECT_BEGIN(pObjectRef);
+ GCPROTECT_END();
+}
+
+/*******************************************************************/
+/* sends a current stack trace to the debug window */
+
+const char* FormatSig(MethodDesc* pMD, AppDomain *pDomain, AllocMemTracker *pamTracker);
+
+struct PrintCallbackData {
+ BOOL toStdout;
+ BOOL withAppDomain;
+#ifdef _DEBUG
+ BOOL toLOG;
+#endif
+};
+
+StackWalkAction PrintStackTraceCallback(CrawlFrame* pCF, VOID* pData)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ DISABLED(GC_TRIGGERS);
+ }
+ CONTRACTL_END;
+
+ CONTRACT_VIOLATION(ThrowsViolation);
+
+ MethodDesc* pMD = pCF->GetFunction();
+ const int nLen = 2048 - 1; // keep one character for "\n"
+ wchar_t *buff = (wchar_t*)alloca((nLen + 1) * sizeof(wchar_t));
+ buff[0] = 0;
+ buff[nLen-1] = W('\0'); // make sure the buffer is always NULL-terminated
+
+ PrintCallbackData *pCBD = (PrintCallbackData *)pData;
+
+ if (pMD != 0)
+ {
+ MethodTable * pMT = pMD->GetMethodTable();
+
+ if (pCBD->withAppDomain)
+ {
+ if(_snwprintf_s(&buff[lstrlenW(buff)],
+ nLen -lstrlenW(buff) - 1,
+ _TRUNCATE,
+ W("{[%3.3x] %s} "),
+ pCF->GetAppDomain()->GetId().m_dwId,
+ pCF->GetAppDomain()->GetFriendlyName(FALSE)) < 0)
+ {
+ return SWA_CONTINUE;
+ }
+ }
+
+ DefineFullyQualifiedNameForClass();
+
+ LPCUTF8 clsName = GetFullyQualifiedNameForClass(pMT);
+
+ if (clsName != 0)
+ {
+ if(_snwprintf_s(&buff[lstrlenW(buff)], nLen -lstrlenW(buff) - 1, _TRUNCATE, W("%S::"), clsName) < 0)
+ {
+ return SWA_CONTINUE;
+ }
+ }
+
+ // This prematurely suppressrelease'd AmTracker will leak any memory allocated by FormatSig.
+ // But this routine is diagnostic aid, not customer-reachable so we won't bother to plug.
+ AllocMemTracker dummyAmTracker;
+
+ int buffLen = _snwprintf_s(&buff[lstrlenW(buff)],
+ nLen -lstrlenW(buff) - 1,
+ _TRUNCATE,
+ W("%S %S "),
+ pMD->GetName(),
+ FormatSig(pMD, pCF->GetAppDomain(), &dummyAmTracker));
+
+ dummyAmTracker.SuppressRelease();
+ if (buffLen < 0 )
+ {
+ return SWA_CONTINUE;
+ }
+
+
+ if (pCF->IsFrameless() && pCF->GetJitManager() != 0) {
+
+ PREGDISPLAY regs = pCF->GetRegisterSet();
+
+ DWORD offset = pCF->GetRelOffset();
+
+ TADDR start = pCF->GetCodeInfo()->GetStartAddress();
+
+ if(_snwprintf_s(&buff[lstrlenW(buff)],
+ nLen -lstrlenW(buff) - 1,
+ _TRUNCATE,
+ W("JIT ESP:%X MethStart:%X EIP:%X(rel %X)"),
+ (size_t)GetRegdisplaySP(regs),
+ (size_t)start,
+ (size_t)GetControlPC(regs),
+ offset) < 0)
+ {
+ return SWA_CONTINUE;
+ }
+
+ }
+ else
+ {
+
+ if(_snwprintf_s(&buff[lstrlenW(buff)], nLen -lstrlenW(buff) - 1, _TRUNCATE, W("EE implemented")) < 0)
+ {
+ return SWA_CONTINUE;
+ }
+ }
+
+ }
+ else
+ {
+ Frame* frame = pCF->GetFrame();
+
+ if(_snwprintf_s(&buff[lstrlenW(buff)],
+ nLen -lstrlenW(buff) - 1,
+ _TRUNCATE,
+ W("EE Frame is") LFMT_ADDR,
+ (size_t)DBG_ADDR(frame)) < 0)
+ {
+ return SWA_CONTINUE;
+ }
+ }
+
+ if (pCBD->toStdout)
+ {
+ wcscat_s(buff, nLen + 1, W("\n"));
+ PrintToStdOutW(buff);
+ }
+#ifdef _DEBUG
+ else if (pCBD->toLOG)
+ {
+ MAKE_ANSIPTR_FROMWIDE(sbuff, buff);
+ // For LogSpewAlways to work rightr the "\n" (newline)
+ // must be in the fmt string not part of the args
+ LogSpewAlways(" %s\n", sbuff);
+ }
+#endif
+ else
+ {
+ wcscat_s(buff, nLen + 1, W("\n"));
+ WszOutputDebugString(buff);
+ }
+
+ return SWA_CONTINUE;
+}
+
+void PrintStackTrace()
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ DISABLED(GC_TRIGGERS);
+ }
+ CONTRACTL_END;
+
+ WszOutputDebugString(W("***************************************************\n"));
+ PrintCallbackData cbd = {0, 0};
+ GetThread()->StackWalkFrames(PrintStackTraceCallback, &cbd, ALLOW_ASYNC_STACK_WALK, 0);
+}
+
+void PrintStackTraceToStdout()
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ DISABLED(GC_TRIGGERS);
+ }
+ CONTRACTL_END;
+
+ PrintCallbackData cbd = {1, 0};
+ GetThread()->StackWalkFrames(PrintStackTraceCallback, &cbd, ALLOW_ASYNC_STACK_WALK, 0);
+}
+
+#ifdef _DEBUG
+void PrintStackTraceToLog()
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ DISABLED(GC_TRIGGERS);
+ }
+ CONTRACTL_END;
+
+ PrintCallbackData cbd = {0, 0, 1};
+ GetThread()->StackWalkFrames(PrintStackTraceCallback, &cbd, ALLOW_ASYNC_STACK_WALK, 0);
+}
+#endif
+
+void PrintStackTraceWithAD()
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ DISABLED(GC_TRIGGERS);
+ }
+ CONTRACTL_END;
+
+ WszOutputDebugString(W("***************************************************\n"));
+ PrintCallbackData cbd = {0, 1};
+ GetThread()->StackWalkFrames(PrintStackTraceCallback, &cbd, ALLOW_ASYNC_STACK_WALK, 0);
+}
+
+void PrintStackTraceWithADToStdout()
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ DISABLED(GC_TRIGGERS);
+ }
+ CONTRACTL_END;
+
+ PrintCallbackData cbd = {1, 1};
+ GetThread()->StackWalkFrames(PrintStackTraceCallback, &cbd, ALLOW_ASYNC_STACK_WALK, 0);
+}
+
+#ifdef _DEBUG
+void PrintStackTraceWithADToLog()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ PrintCallbackData cbd = {0, 1, 1};
+ GetThread()->StackWalkFrames(PrintStackTraceCallback, &cbd, ALLOW_ASYNC_STACK_WALK, 0);
+}
+
+void PrintStackTraceWithADToLog(Thread *pThread)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ DISABLED(GC_TRIGGERS);
+ }
+ CONTRACTL_END;
+
+ PrintCallbackData cbd = {0, 1, 1};
+ pThread->StackWalkFrames(PrintStackTraceCallback, &cbd, ALLOW_ASYNC_STACK_WALK, 0);
+}
+#endif
+
+/*******************************************************************/
+// Get the system or current domain from the thread.
+BaseDomain* GetSystemDomain()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return SystemDomain::System();
+}
+
+AppDomain* GetCurrentDomain()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return SystemDomain::GetCurrentDomain();
+}
+
+void PrintDomainName(size_t ob)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ AppDomain* dm = (AppDomain*) ob;
+ LPCWSTR st = dm->GetFriendlyName(FALSE);
+
+ if(st != NULL)
+ {
+ WszOutputDebugString(st);
+ }
+ else
+ {
+ WszOutputDebugString(W("<Domain with no Name>"));
+ }
+}
+
+#if defined(_TARGET_X86_)
+
+#include "gcdump.h"
+
+#include "../gcdump/i386/gcdumpx86.cpp"
+
+#include "../gcdump/gcdump.cpp"
+
+/*********************************************************************/
+void printfToDbgOut(const char* fmt, ...)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ va_list args;
+ va_start(args, fmt);
+
+ char buffer[4096];
+ _vsnprintf_s(buffer, COUNTOF(buffer), _TRUNCATE, fmt, args);
+
+ OutputDebugStringA( buffer );
+}
+
+void DumpGCInfo(MethodDesc* method)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ PCODE methodStart = method->GetNativeCode();
+
+ if (methodStart == 0)
+ {
+ return;
+ }
+
+ EECodeInfo codeInfo(methodStart);
+ _ASSERTE(codeInfo.GetRelOffset() == 0);
+
+ ICodeManager* codeMan = codeInfo.GetCodeManager();
+ BYTE* table = (BYTE*) codeInfo.GetGCInfo();
+
+ unsigned methodSize = (unsigned)codeMan->GetFunctionSize(table);
+
+ GCDump gcDump;
+
+ gcDump.gcPrintf = printfToDbgOut;
+
+ InfoHdr header;
+
+ printfToDbgOut ("Method info block:\n");
+
+ table += gcDump.DumpInfoHdr(table, &header, &methodSize, 0);
+
+ printfToDbgOut ("\n");
+ printfToDbgOut ("Pointer table:\n");
+
+ table += gcDump.DumpGCTable(table, header, methodSize, 0);
+}
+
+void DumpGCInfoMD(size_t method)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ DumpGCInfo((MethodDesc*) method);
+}
+#endif
+
+
+#ifdef LOGGING
+void LogStackTrace()
+{
+ WRAPPER_NO_CONTRACT;
+
+ PrintCallbackData cbd = {0, 0, 1};
+ GetThread()->StackWalkFrames(PrintStackTraceCallback, &cbd,ALLOW_ASYNC_STACK_WALK, 0);
+}
+#endif
+
+#endif // #ifndef DACCESS_COMPILE
+#endif //_DEBUG
diff --git a/src/vm/debuginfostore.cpp b/src/vm/debuginfostore.cpp
new file mode 100644
index 0000000000..2ec708095b
--- /dev/null
+++ b/src/vm/debuginfostore.cpp
@@ -0,0 +1,750 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// DebugInfoStore
+
+
+
+#include "common.h"
+#include "debuginfostore.h"
+#include "nibblestream.h"
+
+
+#ifdef _DEBUG
+// For debug builds only.
+static bool Dbg_ShouldUseCookies()
+{
+ SUPPORTS_DAC;
+
+ // Normally we want this as false b/c it would bloat the image.
+ // But give us a hook to enable it in case we need it.
+ return false;
+}
+#endif
+
+//-----------------------------------------------------------------------------
+// We have "Transfer" objects that sit on top of the streams.
+// The objects look identical, but one serializes and the other deserializes.
+// This lets the compression + restoration routines share all their compression
+// logic and just swap out Transfer objects.
+//
+// It's not ideal that we have a lot of redundancy maintaining both Transfer
+// objects, but at least the compiler can enforce that the Reader & Writer are
+// in sync. It can't enforce that a 2 separate routines for Compression &
+// restoration are in sync.
+//
+// We could have the TransferReader + Writer be polymorphic off a base class,
+// but the virtual function calls will be extra overhead. May as well use
+// templates and let the compiler resolve it all statically at compile time.
+//-----------------------------------------------------------------------------
+
+
+//-----------------------------------------------------------------------------
+// Serialize to a NibbleWriter stream.
+//-----------------------------------------------------------------------------
+class TransferWriter
+{
+public:
+ TransferWriter(NibbleWriter & w) : m_w(w)
+ {
+ }
+
+ // Write an raw U32 in nibble encoded form.
+ void DoEncodedU32(DWORD dw) { m_w.WriteEncodedU32(dw); }
+
+ // Use to encode a monotonically increasing delta.
+ void DoEncodedDeltaU32(DWORD & dw, DWORD dwLast)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ _ASSERTE(dw >= dwLast);
+ DWORD dwDelta = dw - dwLast;
+ m_w.WriteEncodedU32(dwDelta);
+ }
+
+
+ // Some U32 may have a few sentinal negative values .
+ // We adjust it to be a real U32 and then encode that.
+ // dwAdjust should be the lower bound on the enum.
+ void DoEncodedAdjustedU32(DWORD dw, DWORD dwAdjust)
+ {
+ //_ASSERTE(dwAdjust < 0); // some negative lower bound.
+ m_w.WriteEncodedU32(dw - dwAdjust);
+ }
+
+ // Typesafe versions of EncodeU32.
+ void DoEncodedSourceType(ICorDebugInfo::SourceTypes & dw) { m_w.WriteEncodedU32(dw); }
+ void DoEncodedVarLocType(ICorDebugInfo::VarLocType & dw) { m_w.WriteEncodedU32(dw); }
+ void DoEncodedUnsigned(unsigned & dw) { m_w.WriteEncodedU32(dw); }
+
+ // Stack offsets are aligned on a DWORD boundary, so that lets us shave off 2 bits.
+ void DoEncodedStackOffset(signed & dwOffset)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+#ifdef _TARGET_X86_
+ _ASSERTE(dwOffset % sizeof(DWORD) == 0); // should be dword aligned. That'll save us 2 bits.
+ m_w.WriteEncodedI32(dwOffset / sizeof(DWORD));
+#else
+ // Non x86 platforms don't need it to be dword aligned.
+ m_w.WriteEncodedI32(dwOffset);
+#endif
+ }
+
+ void DoEncodedRegIdx(ICorDebugInfo::RegNum & reg) { m_w.WriteEncodedU32(reg); }
+
+ // For debugging purposes, inject cookies into the Compression.
+ void DoCookie(BYTE b) {
+#ifdef _DEBUG
+ if (Dbg_ShouldUseCookies())
+ {
+ m_w.WriteNibble(b);
+ }
+#endif
+ }
+
+protected:
+ NibbleWriter & m_w;
+
+};
+
+//-----------------------------------------------------------------------------
+// Deserializer that sits on top of a NibbleReader
+// This class interface matches TransferWriter exactly. See that for details.
+//-----------------------------------------------------------------------------
+class TransferReader
+{
+public:
+ TransferReader(NibbleReader & r) : m_r(r)
+ {
+ SUPPORTS_DAC;
+ }
+
+ void DoEncodedU32(DWORD & dw)
+ {
+ SUPPORTS_DAC;
+ dw = m_r.ReadEncodedU32();
+ }
+
+ // Use to decode a monotonically increasing delta.
+ // dwLast was the last value; we update it to the current value on output.
+ void DoEncodedDeltaU32(DWORD & dw, DWORD dwLast)
+ {
+ SUPPORTS_DAC;
+ DWORD dwDelta = m_r.ReadEncodedU32();
+ dw = dwLast + dwDelta;
+ }
+
+ void DoEncodedAdjustedU32(DWORD & dw, DWORD dwAdjust)
+ {
+ SUPPORTS_DAC;
+ //_ASSERTE(dwAdjust < 0);
+ dw = m_r.ReadEncodedU32() + dwAdjust;
+ }
+
+ void DoEncodedSourceType(ICorDebugInfo::SourceTypes & dw)
+ {
+ SUPPORTS_DAC;
+ dw = (ICorDebugInfo::SourceTypes) m_r.ReadEncodedU32();
+ }
+
+ void DoEncodedVarLocType(ICorDebugInfo::VarLocType & dw)
+ {
+ SUPPORTS_DAC;
+ dw = (ICorDebugInfo::VarLocType) m_r.ReadEncodedU32();
+ }
+
+ void DoEncodedUnsigned(unsigned & dw)
+ {
+ SUPPORTS_DAC;
+ dw = (unsigned) m_r.ReadEncodedU32();
+ }
+
+
+ // Stack offsets are aligned on a DWORD boundary, so that lets us shave off 2 bits.
+ void DoEncodedStackOffset(signed & dwOffset)
+ {
+ SUPPORTS_DAC;
+#ifdef _TARGET_X86_
+ dwOffset = m_r.ReadEncodedI32() * sizeof(DWORD);
+#else
+ // Non x86 platforms don't need it to be dword aligned.
+ dwOffset = m_r.ReadEncodedI32();
+#endif
+ }
+
+ void DoEncodedRegIdx(ICorDebugInfo::RegNum & reg)
+ {
+ SUPPORTS_DAC;
+ reg = (ICorDebugInfo::RegNum) m_r.ReadEncodedU32();
+ }
+
+ // For debugging purposes, inject cookies into the Compression.
+ void DoCookie(BYTE b)
+ {
+ SUPPORTS_DAC;
+
+#ifdef _DEBUG
+ if (Dbg_ShouldUseCookies())
+ {
+ BYTE b2 = m_r.ReadNibble();
+ _ASSERTE(b == b2);
+ }
+#endif
+ }
+
+
+protected:
+ NibbleReader & m_r;
+};
+
+
+#if defined(_DEBUG) && !defined(DACCESS_COMPILE)
+// Perf tracking
+static int g_CDI_TotalMethods = 0;
+static int g_CDI_bMethodTotalUncompress = 0;
+static int g_CDI_bMethodTotalCompress = 0;
+
+static int g_CDI_bVarsTotalUncompress = 0;
+static int g_CDI_bVarsTotalCompress = 0;
+#endif
+
+//-----------------------------------------------------------------------------
+// Serialize Bounds info.
+//-----------------------------------------------------------------------------
+template <class T>
+void DoBounds(
+ T trans, // transfer object.
+ ULONG32 cMap,
+ ICorDebugInfo::OffsetMapping *pMap
+)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+
+ // Bounds info contains (Native Offset, IL Offset, flags)
+ // - Sorted by native offset (so use a delta encoding for that).
+ // - IL offsets aren't sorted, but they should be close to each other (so a signed delta encoding)
+ // They may also include a sentinel value from MappingTypes.
+ // - flags is 3 indepedent bits.
+
+ // Loop through and transfer each Entry in the Mapping.
+ DWORD dwLastNativeOffset = 0;
+ for(DWORD i = 0; i < cMap; i++)
+ {
+ ICorDebugInfo::OffsetMapping * pBound = &pMap[i];
+
+ trans.DoEncodedDeltaU32(pBound->nativeOffset, dwLastNativeOffset);
+ dwLastNativeOffset = pBound->nativeOffset;
+
+
+ trans.DoEncodedAdjustedU32(pBound->ilOffset, (DWORD) ICorDebugInfo::MAX_MAPPING_VALUE);
+
+ trans.DoEncodedSourceType(pBound->source);
+
+ trans.DoCookie(0xA);
+ }
+}
+
+
+
+// Helper to write a compressed Native Var Info
+template<class T>
+void DoNativeVarInfo(
+ T trans,
+ ICorDebugInfo::NativeVarInfo * pVar
+)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+
+ // Each Varinfo has a:
+ // - native start +End offset. We can use a delta for the end offset.
+ // - Il variable number. These are usually small.
+ // - VarLoc information. This is a tagged variant.
+ // The entries aren't sorted in any particular order.
+ trans.DoCookie(0xB);
+ trans.DoEncodedU32(pVar->startOffset);
+
+
+ trans.DoEncodedDeltaU32(pVar->endOffset, pVar->startOffset);
+
+ // record var number.
+ trans.DoEncodedAdjustedU32(pVar->varNumber, (DWORD) ICorDebugInfo::MAX_ILNUM);
+
+
+ // Now write the VarLoc... This is a variant like structure and so we'll get different
+ // compressioned depending on what we've got.
+ trans.DoEncodedVarLocType(pVar->loc.vlType);
+
+ switch(pVar->loc.vlType)
+ {
+ case ICorDebugInfo::VLT_REG:
+ case ICorDebugInfo::VLT_REG_FP: // fall through
+ case ICorDebugInfo::VLT_REG_BYREF: // fall through
+ trans.DoEncodedRegIdx(pVar->loc.vlReg.vlrReg);
+ break;
+
+ case ICorDebugInfo::VLT_STK:
+ case ICorDebugInfo::VLT_STK_BYREF: // fall through
+ trans.DoEncodedRegIdx(pVar->loc.vlStk.vlsBaseReg);
+ trans.DoEncodedStackOffset(pVar->loc.vlStk.vlsOffset);
+ break;
+
+#ifdef MDIL
+ case ICorDebugInfo::VLT_STK | ICorDebugInfo::VLT_MDIL_SYMBOLIC:
+ case ICorDebugInfo::VLT_STK_BYREF | ICorDebugInfo::VLT_MDIL_SYMBOLIC: // fall through
+ _ASSERTE(pVar->loc.vlStk.vlsOffset >= 0);
+ trans.DoEncodedRegIdx(pVar->loc.vlStk.vlsBaseReg);
+ trans.DoEncodedU32((DWORD&)pVar->loc.vlStk.vlsOffset);
+ break;
+#endif // MDIL
+
+ case ICorDebugInfo::VLT_REG_REG:
+ trans.DoEncodedRegIdx(pVar->loc.vlRegReg.vlrrReg1);
+ trans.DoEncodedRegIdx(pVar->loc.vlRegReg.vlrrReg2);
+ break;
+
+ case ICorDebugInfo::VLT_REG_STK:
+ trans.DoEncodedRegIdx(pVar->loc.vlRegStk.vlrsReg);
+ trans.DoEncodedRegIdx(pVar->loc.vlRegStk.vlrsStk.vlrssBaseReg);
+ trans.DoEncodedStackOffset(pVar->loc.vlRegStk.vlrsStk.vlrssOffset);
+ break;
+
+#ifdef MDIL
+ case ICorDebugInfo::VLT_REG_STK | ICorDebugInfo::VLT_MDIL_SYMBOLIC:
+ _ASSERTE(pVar->loc.vlRegStk.vlrsStk.vlrssOffset >= 0);
+ trans.DoEncodedRegIdx(pVar->loc.vlRegStk.vlrsReg);
+ trans.DoEncodedRegIdx(pVar->loc.vlRegStk.vlrsStk.vlrssBaseReg);
+ trans.DoEncodedU32((DWORD&)pVar->loc.vlRegStk.vlrsStk.vlrssOffset);
+ break;
+#endif // MDIL
+
+ case ICorDebugInfo::VLT_STK_REG:
+ trans.DoEncodedStackOffset(pVar->loc.vlStkReg.vlsrStk.vlsrsOffset);
+ trans.DoEncodedRegIdx(pVar->loc.vlStkReg.vlsrStk.vlsrsBaseReg);
+ trans.DoEncodedRegIdx(pVar->loc.vlStkReg.vlsrReg);
+ break;
+
+#ifdef MDIL
+ case ICorDebugInfo::VLT_STK_REG | ICorDebugInfo::VLT_MDIL_SYMBOLIC:
+ _ASSERTE(pVar->loc.vlStkReg.vlsrStk.vlsrsOffset >= 0);
+ trans.DoEncodedU32((DWORD&)pVar->loc.vlStkReg.vlsrStk.vlsrsOffset);
+ trans.DoEncodedRegIdx(pVar->loc.vlStkReg.vlsrStk.vlsrsBaseReg);
+ trans.DoEncodedRegIdx(pVar->loc.vlStkReg.vlsrReg);
+ break;
+#endif // MDIL
+
+ case ICorDebugInfo::VLT_STK2:
+ trans.DoEncodedRegIdx(pVar->loc.vlStk2.vls2BaseReg);
+ trans.DoEncodedStackOffset(pVar->loc.vlStk2.vls2Offset);
+ break;
+
+#ifdef MDIL
+ case ICorDebugInfo::VLT_STK2 | ICorDebugInfo::VLT_MDIL_SYMBOLIC:
+ _ASSERTE(pVar->loc.vlStk2.vls2Offset >= 0);
+ trans.DoEncodedRegIdx(pVar->loc.vlStk2.vls2BaseReg);
+ trans.DoEncodedU32((DWORD&)pVar->loc.vlStk2.vls2Offset);
+ break;
+#endif // MDIL
+
+ case ICorDebugInfo::VLT_FPSTK:
+ trans.DoEncodedUnsigned(pVar->loc.vlFPstk.vlfReg);
+ break;
+
+ case ICorDebugInfo::VLT_FIXED_VA:
+ trans.DoEncodedUnsigned(pVar->loc.vlFixedVarArg.vlfvOffset);
+ break;
+
+ default:
+ _ASSERTE(!"Unknown varloc type!");
+ break;
+ }
+
+
+ trans.DoCookie(0xC);
+}
+
+
+#ifndef DACCESS_COMPILE
+
+void CompressDebugInfo::CompressBoundaries(
+ IN ULONG32 cMap,
+ IN ICorDebugInfo::OffsetMapping *pMap,
+ IN OUT NibbleWriter *pWriter
+)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pWriter != NULL);
+ _ASSERTE((pMap == NULL) == (cMap == 0));
+
+ if (cMap != 0)
+ {
+ pWriter->WriteEncodedU32(cMap);
+
+ TransferWriter t(*pWriter);
+ DoBounds(t, cMap, pMap);
+
+ pWriter->Flush();
+ }
+
+#ifdef _DEBUG
+ DWORD cbBlob;
+ PVOID pBlob = pWriter->GetBlob(&cbBlob);
+
+ // Track perf #s for compression...
+ g_CDI_TotalMethods++;
+ g_CDI_bMethodTotalUncompress += sizeof(ICorDebugInfo::OffsetMapping) * cMap;
+ g_CDI_bMethodTotalCompress += (int) cbBlob;
+#endif // _DEBUG
+}
+
+
+void CompressDebugInfo::CompressVars(
+ IN ULONG32 cVars,
+ IN ICorDebugInfo::NativeVarInfo *vars,
+ IN OUT NibbleWriter *pWriter
+)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pWriter != NULL);
+ _ASSERTE((cVars == 0) == (vars == NULL));
+
+ if (cVars != 0)
+ {
+ pWriter->WriteEncodedU32(cVars);
+
+ TransferWriter t(*pWriter);
+ for(ULONG32 i = 0; i < cVars; i ++)
+ {
+ DoNativeVarInfo(t, &vars[i]);
+ }
+
+ pWriter->Flush();
+ }
+
+#ifdef _DEBUG
+ DWORD cbBlob;
+ PVOID pBlob = pWriter->GetBlob(&cbBlob);
+
+ g_CDI_bVarsTotalUncompress += cVars * sizeof(ICorDebugInfo::NativeVarInfo);
+ g_CDI_bVarsTotalCompress += (int) cbBlob;
+#endif
+}
+
+PTR_BYTE CompressDebugInfo::CompressBoundariesAndVars(
+ IN ICorDebugInfo::OffsetMapping * pOffsetMapping,
+ IN ULONG iOffsetMapping,
+ IN ICorDebugInfo::NativeVarInfo * pNativeVarInfo,
+ IN ULONG iNativeVarInfo,
+ IN OUT SBuffer * pDebugInfoBuffer,
+ IN LoaderHeap * pLoaderHeap
+ )
+{
+ CONTRACTL {
+ THROWS; // compression routines throw
+ PRECONDITION((iOffsetMapping == 0) == (pOffsetMapping == NULL));
+ PRECONDITION((iNativeVarInfo == 0) == (pNativeVarInfo == NULL));
+ PRECONDITION((pDebugInfoBuffer != NULL) ^ (pLoaderHeap != NULL));
+ } CONTRACTL_END;
+
+ // Actually do the compression. These will throw on oom.
+ NibbleWriter boundsBuffer;
+ DWORD cbBounds = 0;
+ PVOID pBounds = NULL;
+ if (iOffsetMapping > 0)
+ {
+ CompressDebugInfo::CompressBoundaries(iOffsetMapping, pOffsetMapping, &boundsBuffer);
+ pBounds = boundsBuffer.GetBlob(&cbBounds);
+ }
+
+ NibbleWriter varsBuffer;
+ DWORD cbVars = 0;
+ PVOID pVars = NULL;
+ if (iNativeVarInfo > 0)
+ {
+ CompressDebugInfo::CompressVars(iNativeVarInfo, pNativeVarInfo, &varsBuffer);
+ pVars = varsBuffer.GetBlob(&cbVars);
+ }
+
+ // Now write it all out to the buffer in a compact fashion.
+ NibbleWriter w;
+ w.WriteEncodedU32(cbBounds);
+ w.WriteEncodedU32(cbVars);
+ w.Flush();
+
+ DWORD cbHeader;
+ PVOID pHeader = w.GetBlob(&cbHeader);
+
+ S_UINT32 cbFinalSize = S_UINT32(cbHeader) + S_UINT32(cbBounds) + S_UINT32(cbVars);
+ if (cbFinalSize.IsOverflow())
+ ThrowHR(COR_E_OVERFLOW);
+
+ BYTE *ptrStart = NULL;
+ if (pLoaderHeap != NULL)
+ {
+ ptrStart = (BYTE *)(void *)pLoaderHeap->AllocMem(S_SIZE_T(cbFinalSize.Value()));
+ }
+ else
+ {
+ // Create a conservatively large buffer to hold all the data.
+ ptrStart = pDebugInfoBuffer->OpenRawBuffer(cbFinalSize.Value());
+ }
+ _ASSERTE(ptrStart != NULL); // throws on oom.
+
+ BYTE *ptr = ptrStart;
+
+ memcpy(ptr, pHeader, cbHeader);
+ ptr += cbHeader;
+
+ memcpy(ptr, pBounds, cbBounds);
+ ptr += cbBounds;
+
+ memcpy(ptr, pVars, cbVars);
+ ptr += cbVars;
+
+ if (pLoaderHeap != NULL)
+ {
+ return ptrStart;
+ }
+ else
+ {
+ pDebugInfoBuffer->CloseRawBuffer(cbFinalSize.Value());
+ return NULL;
+ }
+}
+
+#endif // DACCESS_COMPILE
+
+//-----------------------------------------------------------------------------
+// Compression routines
+// DAC only needs to run the uncompression routines.
+//-----------------------------------------------------------------------------
+
+//-----------------------------------------------------------------------------
+// Uncompression (restore) routines
+//-----------------------------------------------------------------------------
+
+// Uncompress data supplied by Compress functions.
+void CompressDebugInfo::RestoreBoundariesAndVars(
+ IN FP_IDS_NEW fpNew, IN void * pNewData,
+ IN PTR_BYTE pDebugInfo,
+ OUT ULONG32 * pcMap, // number of entries in ppMap
+ OUT ICorDebugInfo::OffsetMapping **ppMap, // pointer to newly allocated array
+ OUT ULONG32 *pcVars,
+ OUT ICorDebugInfo::NativeVarInfo **ppVars
+ )
+{
+ CONTRACTL
+ {
+ THROWS; // reading from nibble stream may throw on invalid data.
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ if (pcMap != NULL) *pcMap = 0;
+ if (ppMap != NULL) *ppMap = NULL;
+ if (pcVars != NULL) *pcVars = 0;
+ if (ppVars != NULL) *ppVars = NULL;
+
+ NibbleReader r(pDebugInfo, 12 /* maximum size of compressed 2 UINT32s */);
+
+ ULONG cbBounds = r.ReadEncodedU32();
+ ULONG cbVars = r.ReadEncodedU32();
+
+ PTR_BYTE addrBounds = pDebugInfo + r.GetNextByteIndex();
+ PTR_BYTE addrVars = addrBounds + cbBounds;
+
+ if ((pcMap != NULL || ppMap != NULL) && (cbBounds != 0))
+ {
+ NibbleReader r(addrBounds, cbBounds);
+ TransferReader t(r);
+
+ UINT32 cNumEntries = r.ReadEncodedU32();
+ _ASSERTE(cNumEntries > 0);
+
+ if (pcMap != NULL)
+ *pcMap = cNumEntries;
+
+ if (ppMap != NULL)
+ {
+ ICorDebugInfo::OffsetMapping * pMap = reinterpret_cast<ICorDebugInfo::OffsetMapping *>
+ (fpNew(pNewData, cNumEntries * sizeof(ICorDebugInfo::OffsetMapping)));
+ if (pMap == NULL)
+ {
+ ThrowOutOfMemory();
+ }
+ *ppMap = pMap;
+
+ // Main decompression routine.
+ DoBounds(t, cNumEntries, pMap);
+ }
+ }
+
+ if ((pcVars != NULL || ppVars != NULL) && (cbVars != 0))
+ {
+ NibbleReader r(addrVars, cbVars);
+ TransferReader t(r);
+
+ UINT32 cNumEntries = r.ReadEncodedU32();
+ _ASSERTE(cNumEntries > 0);
+
+ if (pcVars != NULL)
+ *pcVars = cNumEntries;
+
+ if (ppVars != NULL)
+ {
+ ICorDebugInfo::NativeVarInfo * pVars = reinterpret_cast<ICorDebugInfo::NativeVarInfo *>
+ (fpNew(pNewData, cNumEntries * sizeof(ICorDebugInfo::NativeVarInfo)));
+ if (pVars == NULL)
+ {
+ ThrowOutOfMemory();
+ }
+ *ppVars = pVars;
+
+ for(UINT32 i = 0; i < cNumEntries; i++)
+ {
+ DoNativeVarInfo(t, &pVars[i]);
+ }
+ }
+ }
+}
+
+#ifdef DACCESS_COMPILE
+void CompressDebugInfo::EnumMemoryRegions(CLRDataEnumMemoryFlags flags, PTR_BYTE pDebugInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ NibbleReader r(pDebugInfo, 12 /* maximum size of compressed 2 UINT32s */);
+
+ ULONG cbBounds = r.ReadEncodedU32();
+ ULONG cbVars = r.ReadEncodedU32();
+
+ DacEnumMemoryRegion(dac_cast<TADDR>(pDebugInfo), r.GetNextByteIndex() + cbBounds + cbVars);
+}
+#endif // DACCESS_COMPILE
+
+#ifndef BINDER
+// Init given a starting address from the start of code.
+void DebugInfoRequest::InitFromStartingAddr(MethodDesc * pMD, PCODE addrCode)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pMD != NULL);
+ _ASSERTE(addrCode != NULL);
+
+ this->m_pMD = pMD;
+ this->m_addrStart = addrCode;
+}
+
+
+//-----------------------------------------------------------------------------
+// Impl for DebugInfoManager's IDebugInfoStore
+//-----------------------------------------------------------------------------
+BOOL DebugInfoManager::GetBoundariesAndVars(
+ const DebugInfoRequest & request,
+ IN FP_IDS_NEW fpNew, IN void * pNewData,
+ OUT ULONG32 * pcMap,
+ OUT ICorDebugInfo::OffsetMapping ** ppMap,
+ OUT ULONG32 * pcVars,
+ OUT ICorDebugInfo::NativeVarInfo ** ppVars)
+{
+ CONTRACTL
+ {
+ THROWS;
+ WRAPPER(GC_TRIGGERS); // depends on fpNew
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ IJitManager* pJitMan = ExecutionManager::FindJitMan(request.GetStartAddress());
+ if (pJitMan == NULL)
+ {
+ return FALSE; // no info available.
+ }
+
+ return pJitMan->GetBoundariesAndVars(request, fpNew, pNewData, pcMap, ppMap, pcVars, ppVars);
+}
+
+#ifdef DACCESS_COMPILE
+void DebugInfoManager::EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ PCODE addrCode = pMD->GetNativeCode();
+ if (addrCode == NULL)
+ {
+ return;
+ }
+
+ IJitManager* pJitMan = ExecutionManager::FindJitMan(addrCode);
+ if (pJitMan == NULL)
+ {
+ return; // no info available.
+ }
+
+ pJitMan->EnumMemoryRegionsForMethodDebugInfo(flags, pMD);
+}
+#endif
+
+#endif // BINDER
diff --git a/src/vm/debuginfostore.h b/src/vm/debuginfostore.h
new file mode 100644
index 0000000000..d150a26f8d
--- /dev/null
+++ b/src/vm/debuginfostore.h
@@ -0,0 +1,130 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// DebugInfoStore
+
+
+
+
+#ifndef __DebugInfoStore_H_
+#define __DebugInfoStore_H_
+
+// Debugging information is described in CorInfo.h
+#include "corinfo.h"
+
+#include "nibblestream.h"
+
+//-----------------------------------------------------------------------------
+// Information to request Debug info.
+//-----------------------------------------------------------------------------
+class DebugInfoRequest
+{
+public:
+#ifdef _DEBUG
+ // Must initialize via an Init*() function, not just a ctor.
+ // In debug, ctor sets fields to values that will cause asserts if not initialized.
+ DebugInfoRequest()
+ {
+ SUPPORTS_DAC;
+ m_pMD = NULL;
+ m_addrStart = NULL;
+ }
+#endif
+ // Eventually we may have many ways to initialize a request.
+
+ // Init given a method desc and starting address for a native code blob.
+ void InitFromStartingAddr(MethodDesc * pDesc, PCODE addrCode);
+
+
+ MethodDesc * GetMD() const { LIMITED_METHOD_DAC_CONTRACT; return m_pMD; }
+ PCODE GetStartAddress() const { LIMITED_METHOD_DAC_CONTRACT; return m_addrStart; }
+
+protected:
+ MethodDesc * m_pMD;
+ PCODE m_addrStart;
+
+};
+
+//-----------------------------------------------------------------------------
+// A Debug-Info Store abstracts the storage of debugging information
+//-----------------------------------------------------------------------------
+
+
+// We pass the IDS an allocator which it uses to hand the data back.
+// pData is data the allocator may use for 'new'.
+// Eg, perhaps we have multiple heaps (eg, loader-heaps per appdomain).
+typedef BYTE* (*FP_IDS_NEW)(void * pData, size_t cBytes);
+
+
+//-----------------------------------------------------------------------------
+// Utility routines used for compression
+// Note that the compression is just an implementation detail of the stores,
+// and so these are just utility routines exposed to the stores.
+//-----------------------------------------------------------------------------
+class CompressDebugInfo
+{
+public:
+ // Compress incoming data and write it to the provided NibbleWriter.
+ static void CompressBoundaries(
+ IN ULONG32 cMap,
+ IN ICorDebugInfo::OffsetMapping *pMap,
+ IN OUT NibbleWriter * pWriter
+ );
+
+ static void CompressVars(
+ IN ULONG32 cVars,
+ IN ICorDebugInfo::NativeVarInfo *vars,
+ IN OUT NibbleWriter * pBuffer
+ );
+
+ // Stores the result into SBuffer (used by NGen), or in LoaderHeap (used by JIT)
+ static PTR_BYTE CompressBoundariesAndVars(
+ IN ICorDebugInfo::OffsetMapping * pOffsetMapping,
+ IN ULONG iOffsetMapping,
+ IN ICorDebugInfo::NativeVarInfo * pNativeVarInfo,
+ IN ULONG iNativeVarInfo,
+ IN OUT SBuffer * pDebugInfoBuffer,
+ IN LoaderHeap * pLoaderHeap
+ );
+
+public:
+ // Uncompress data supplied by Compress functions.
+ static void RestoreBoundariesAndVars(
+ IN FP_IDS_NEW fpNew, IN void * pNewData,
+ IN PTR_BYTE pDebugInfo,
+ OUT ULONG32 * pcMap, // number of entries in ppMap
+ OUT ICorDebugInfo::OffsetMapping **ppMap, // pointer to newly allocated array
+ OUT ULONG32 *pcVars,
+ OUT ICorDebugInfo::NativeVarInfo **ppVars
+ );
+
+#ifdef DACCESS_COMPILE
+ static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags, PTR_BYTE pDebugInfo);
+#endif
+};
+
+//-----------------------------------------------------------------------------
+// Debug-Info-manager. This is like a process-wide store.
+// There should be only 1 instance of this and it's process-wide.
+// It will delegate to sub-stores as needed
+//-----------------------------------------------------------------------------
+class DebugInfoManager
+{
+public:
+ static BOOL GetBoundariesAndVars(
+ const DebugInfoRequest & request,
+ IN FP_IDS_NEW fpNew, IN void * pNewData,
+ OUT ULONG32 * pcMap,
+ OUT ICorDebugInfo::OffsetMapping ** ppMap,
+ OUT ULONG32 * pcVars,
+ OUT ICorDebugInfo::NativeVarInfo ** ppVars);
+
+#ifdef DACCESS_COMPILE
+ static void EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD);
+#endif
+};
+
+
+
+#endif // __DebugInfoStore_H_
diff --git a/src/vm/decodemd.cpp b/src/vm/decodemd.cpp
new file mode 100644
index 0000000000..91dab49087
--- /dev/null
+++ b/src/vm/decodemd.cpp
@@ -0,0 +1,518 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#include "common.h"
+#include "decodemd.h"
+
+/*
+encoding patterns:
+ 0 10x 110xxx 1110xxxxxxx 11110xxxxxxxxxxxxxxx 111110xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ 0 1,2 3-10 11-138 139-32905 32906-big
+*/
+
+#define MAX_HEADER 5
+#define BASE_0 0
+#define BASE_1 1
+#define BASE_2 (0x2+0x1)
+#define BASE_3 (0x8+0x2+0x1)
+#define BASE_4 (0x80+0x8+0x2+0x1)
+#define BASE_5 (0x8000+0x80+0x8+0x2+0x1)
+#define BASE_6 (0x80000000+0x8000+0x80+0x8+0x2+0x1)
+const unsigned decode_base[MAX_HEADER+2] = {BASE_0, BASE_1, BASE_2, BASE_3, BASE_4, BASE_5, BASE_6};
+#define BIT_LENGTH_0 0
+#define BIT_LENGTH_1 1
+#define BIT_LENGTH_2 3
+#define BIT_LENGTH_3 7
+#define BIT_LENGTH_4 15
+#define BIT_LENGTH_5 31
+#define BIT_LENGTH_6 63
+const unsigned decode_bitlength[MAX_HEADER+2] =
+ {
+ BIT_LENGTH_0,
+ BIT_LENGTH_1,
+ BIT_LENGTH_2,
+ BIT_LENGTH_3,
+ BIT_LENGTH_4,
+ BIT_LENGTH_5,
+ BIT_LENGTH_6
+ };
+
+#define END_DECODED BASE_3
+const BYTE decoded_end[1] = {END_DECODED };
+const BYTE decoded_0_0_0_0[5] = {0,0,0,0,END_DECODED };
+const BYTE decoded_0_1[3] = {0,1,END_DECODED };
+const BYTE decoded_0_2[3] = {0,2,END_DECODED };
+const BYTE decoded_1_0[3] = {1,0,END_DECODED };
+const BYTE decoded_2_0[3] = {2,0,END_DECODED };
+const BYTE decoded_1_0_0[4] = {1,0,0,END_DECODED };
+const BYTE decoded_2_0_0[4] = {2,0,0,END_DECODED };
+#define decoded_0 &decoded_0_0_0_0[3]
+#define decoded_0_0 &decoded_0_0_0_0[2]
+#define decoded_0_0_0 &decoded_0_0_0_0[1]
+#define decoded_1 &decoded_0_1[1]
+#define decoded_2 &decoded_0_2[1]
+const BYTE decoded_3[2] = {3, END_DECODED };
+const BYTE decoded_4[2] = {4, END_DECODED };
+const BYTE decoded_5[2] = {5, END_DECODED };
+const BYTE decoded_6[2] = {6, END_DECODED };
+const BYTE decoded_7[2] = {7, END_DECODED };
+const BYTE decoded_8[2] = {8, END_DECODED };
+const BYTE decoded_9[2] = {9, END_DECODED };
+const BYTE decoded_10[2] = {10, END_DECODED };
+
+#define INBITS(s) (s > MAX_HEADER)
+#define INHEADER(s) (s <= MAX_HEADER)
+#define PARTIALBITS(s) ((s>>8)&0xFF)
+#define NUMBERGOTTEN(s) (((s)>>16)&0xFF)
+#define HEADER(s) ((s>>24)&0xFF)
+#define DECODING_HEADER(n) n
+#define DOING_BITS (MAX_HEADER+1)
+#define DECODING_BITS(partial, got, header) (DOING_BITS+(partial<<8)+(got<<16)+(header<<24))
+#define DECODING_ERROR ((unsigned) -1)
+#define MASK(len) (~((~0)<<len))
+#define MASK64(len) ((~((~((unsigned __int64)0))<<len)))
+#define BITS_PER_BYTE (sizeof(BYTE)*8)
+
+const Decoder::Decode emptyDecode = {decoded_end, DECODING_HEADER(0)};
+
+const Decoder::Decode transition[6][16] =
+{
+ //header(0)
+ {
+ { decoded_0_0_0_0, DECODING_HEADER(0) }, // 0000
+ { decoded_0_0_0, DECODING_HEADER(1) }, // 0001
+ { decoded_0_0, DECODING_BITS(0,0,1) }, // 0010
+ { decoded_0_0, DECODING_HEADER(2) }, // 0011
+ { decoded_0_1, DECODING_HEADER(0) }, // 0100
+ { decoded_0_2, DECODING_HEADER(0) }, // 0101
+ { decoded_0, DECODING_BITS(0,0,2) }, // 0110
+ { decoded_0, DECODING_HEADER(3) }, // 0111
+ { decoded_1_0, DECODING_HEADER(0) }, // 1000
+ { decoded_1, DECODING_HEADER(1) }, // 1001
+ { decoded_2_0, DECODING_HEADER(0) }, // 1010
+ { decoded_2, DECODING_HEADER(1) }, // 1011
+ { decoded_end, DECODING_BITS(0,1,2) }, // 1100
+ { decoded_end, DECODING_BITS(1,1,2) }, // 1101
+ { decoded_end, DECODING_BITS(0,0,3) }, // 1110
+ { decoded_end, DECODING_HEADER(4) }, // 1111
+ },
+ //header(1)
+ {
+ { decoded_1_0_0, DECODING_HEADER(0) }, // 1 0000
+ { decoded_1_0, DECODING_HEADER(1) }, // 1 0001
+ { decoded_1, DECODING_BITS(0,0,1) }, // 1 0010
+ { decoded_1, DECODING_HEADER(2) }, // 1 0011
+ { decoded_2_0_0, DECODING_HEADER(0) }, // 1 0100
+ { decoded_2_0, DECODING_HEADER(1) }, // 1 0101
+ { decoded_2, DECODING_BITS(0,0,1) }, // 1 0110
+ { decoded_2, DECODING_HEADER(2) }, // 1 0111
+ { decoded_end, DECODING_BITS(0,2,2) }, // 1 1000
+ { decoded_end, DECODING_BITS(1,2,2) }, // 1 1001
+ { decoded_end, DECODING_BITS(2,2,2) }, // 1 1010
+ { decoded_end, DECODING_BITS(3,2,2) }, // 1 1011
+ { decoded_end, DECODING_BITS(0,1,3) }, // 1 1100
+ { decoded_end, DECODING_BITS(1,1,3) }, // 1 1101
+ { decoded_end, DECODING_BITS(0,0,4) }, // 1 1110
+ { decoded_end, DECODING_HEADER(5) }, // 1 1111
+ },
+ //header(2)
+ {
+ { decoded_3, DECODING_HEADER(0) }, // 11 0000
+ { decoded_4, DECODING_HEADER(0) }, // 11 0001
+ { decoded_5, DECODING_HEADER(0) }, // 11 0010
+ { decoded_6, DECODING_HEADER(0) }, // 11 0011
+ { decoded_7, DECODING_HEADER(0) }, // 11 0100
+ { decoded_8, DECODING_HEADER(0) }, // 11 0101
+ { decoded_9, DECODING_HEADER(0) }, // 11 0110
+ { decoded_10, DECODING_HEADER(0) }, // 11 0111
+ { decoded_end, DECODING_BITS(0,2,3) }, // 11 1000
+ { decoded_end, DECODING_BITS(1,2,3) }, // 11 1001
+ { decoded_end, DECODING_BITS(2,2,3) }, // 11 1010
+ { decoded_end, DECODING_BITS(3,2,3) }, // 11 1011
+ { decoded_end, DECODING_BITS(0,1,4) }, // 11 1100
+ { decoded_end, DECODING_BITS(1,1,4) }, // 11 1101
+ { decoded_end, DECODING_BITS(0,0,5) }, // 11 1110
+ { decoded_end, DECODING_ERROR }, // 11 1111
+ },
+ //header(3)
+ {
+ { decoded_end, DECODING_BITS(0,3,3) },
+ { decoded_end, DECODING_BITS(1,3,3) },
+ { decoded_end, DECODING_BITS(2,3,3) },
+ { decoded_end, DECODING_BITS(3,3,3) },
+ { decoded_end, DECODING_BITS(4,3,3) },
+ { decoded_end, DECODING_BITS(5,3,3) },
+ { decoded_end, DECODING_BITS(6,3,3) },
+ { decoded_end, DECODING_BITS(7,3,3) },
+ { decoded_end, DECODING_BITS(0,2,4) },
+ { decoded_end, DECODING_BITS(1,2,4) },
+ { decoded_end, DECODING_BITS(2,2,4) },
+ { decoded_end, DECODING_BITS(3,2,4) },
+ { decoded_end, DECODING_BITS(0,1,5) },
+ { decoded_end, DECODING_BITS(1,1,5) },
+ { decoded_end, DECODING_ERROR },
+ { decoded_end, DECODING_ERROR },
+ },
+ //header(4)
+ {
+ { decoded_end, DECODING_BITS(0,3,4) },
+ { decoded_end, DECODING_BITS(1,3,4) },
+ { decoded_end, DECODING_BITS(2,3,4) },
+ { decoded_end, DECODING_BITS(3,3,4) },
+ { decoded_end, DECODING_BITS(4,3,4) },
+ { decoded_end, DECODING_BITS(5,3,4) },
+ { decoded_end, DECODING_BITS(6,3,4) },
+ { decoded_end, DECODING_BITS(7,3,4) },
+ { decoded_end, DECODING_BITS(0,2,5) },
+ { decoded_end, DECODING_BITS(1,2,5) },
+ { decoded_end, DECODING_BITS(2,2,5) },
+ { decoded_end, DECODING_BITS(3,2,5) },
+ { decoded_end, DECODING_ERROR },
+ { decoded_end, DECODING_ERROR },
+ { decoded_end, DECODING_ERROR },
+ { decoded_end, DECODING_ERROR },
+ },
+ //header(5)
+ {
+ { decoded_end, DECODING_BITS(0,3,5) },
+ { decoded_end, DECODING_BITS(1,3,5) },
+ { decoded_end, DECODING_BITS(2,3,5) },
+ { decoded_end, DECODING_BITS(3,3,5) },
+ { decoded_end, DECODING_BITS(4,3,5) },
+ { decoded_end, DECODING_BITS(5,3,5) },
+ { decoded_end, DECODING_BITS(6,3,5) },
+ { decoded_end, DECODING_BITS(7,3,5) },
+ { decoded_end, DECODING_ERROR },
+ { decoded_end, DECODING_ERROR },
+ { decoded_end, DECODING_ERROR },
+ { decoded_end, DECODING_ERROR },
+ { decoded_end, DECODING_ERROR },
+ { decoded_end, DECODING_ERROR },
+ { decoded_end, DECODING_ERROR },
+ { decoded_end, DECODING_ERROR },
+ }
+};
+
+// --------------------------------------------------------
+void Decoder::Nibbles::SetContents( PTR_BYTE bytes)
+{
+ STATIC_CONTRACT_LEAF;
+
+ next = 2;
+ data = bytes;
+}
+
+// --------------------------------------------------------
+BYTE Decoder::Nibbles::Next()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_SUPPORTS_DAC;
+
+ BYTE result = Read();
+ next++;
+ return result;
+}
+
+// --------------------------------------------------------
+BYTE Decoder::Nibbles::Read()
+{
+ STATIC_CONTRACT_LEAF;
+ STATIC_CONTRACT_SUPPORTS_DAC;
+
+ if (next >= 2)
+ {
+ BYTE d = *data++;
+ next = 0;
+ nibbles[1] = d & 0xF;
+ nibbles[0] = d>>4;
+ }
+ return nibbles[next];
+}
+
+// --------------------------------------------------------
+unsigned Decoder::Nibbles::Bits(unsigned number)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_SUPPORTS_DAC;
+
+ unsigned n = number;
+ unsigned result = 0;
+ while (n >= 4 )
+ {
+ result = (result<<4) | Next();
+ n -= 4;
+ }
+ if (n > 0)
+ {
+ BYTE last = Read();
+ result = (result<<n) | (last>>(4-n));
+ nibbles[next] &= (0xF>>n);
+ }
+ return result;
+}
+
+// --------------------------------------------------------
+void Decoder::Init(PTR_BYTE bytes)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_SUPPORTS_DAC_HOST_ONLY;
+
+ state = emptyDecode;
+ data.SetContents(bytes);
+// signedNumbers = FALSE;
+}
+
+// --------------------------------------------------------
+Decoder::Decoder(PTR_BYTE bytes)
+{
+ STATIC_CONTRACT_WRAPPER;
+ Init(bytes);
+}
+
+// --------------------------------------------------------
+Decoder::Decoder()
+{
+ STATIC_CONTRACT_LEAF;
+ STATIC_CONTRACT_SUPPORTS_DAC;
+}
+
+// --------------------------------------------------------
+unsigned Decoder::Next()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_SUPPORTS_DAC;
+
+tryagain:
+ unsigned result = *state.decoded;
+ if (result != END_DECODED)
+ {
+ state.decoded++;
+ return result;
+ }
+ if (INHEADER(state.next))
+ {
+ state = transition[state.next][data.Next()];
+ goto tryagain;
+ }
+ //must be getting bits
+ _ASSERTE(INBITS(state.next));
+ unsigned index = HEADER(state.next);
+ unsigned bitsNeeded = decode_bitlength[index]-NUMBERGOTTEN(state.next);
+ result = (PARTIALBITS(state.next)<<bitsNeeded)+data.Bits(bitsNeeded)+decode_base[index];
+ state = emptyDecode;
+ unsigned skip = bitsNeeded % 4; // this works since we are always 4-bit aligned
+ if (skip > 0)
+ {
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:26000) // "Suppress PREFast warning about index overflow"
+#endif
+ // state.next is always 0, because we did "state = emptyDecode;" above
+ state = transition[state.next][data.Next()];
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+ state.decoded += skip;
+ }
+ return result;
+}
+
+// --------------------------------------------------------
+signed Decoder::NextSigned()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_SUPPORTS_DAC;
+
+ signed v = (signed) Next();
+ return (v & 1) ? (v+1)>>1 : -(v>>1);
+}
+
+// --------------------------------------------------------
+PTR_BYTE Decoder::End()
+{
+ STATIC_CONTRACT_LEAF;
+ STATIC_CONTRACT_SUPPORTS_DAC;
+
+ return data.data;
+}
+
+// --------------------------------------------------------
+Encoder::Encoder(BYTE *buffer) : encoding(0), unusedBits(BITS_PER_BYTE),
+ done(FALSE), signedNumbers(FALSE), index(0)
+{
+ STATIC_CONTRACT_LEAF;
+
+ this->buffer = buffer;
+}
+
+// --------------------------------------------------------
+void Encoder::ContainsNegatives(BOOL b)
+{
+ STATIC_CONTRACT_LEAF;
+
+ signedNumbers = b;
+}
+void Encoder::EncodeSigned(signed value)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+
+ if (!signedNumbers)
+ {
+ _ASSERTE(value>=0);
+ Encode(value);
+ return;
+ }
+ unsigned v = (value <= 0 ) ? (-value)<<1 : (value<<1)-1;
+ Encode(v);
+}
+
+// --------------------------------------------------------
+void Encoder::Encode(unsigned value)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+
+ if (value < BASE_1)
+ {
+ Add(0, 1);
+ return;
+ }
+ if (value < BASE_2)
+ {
+ Add((0x1<<(1+BIT_LENGTH_1))+(value-BASE_1), 2+BIT_LENGTH_1);
+ return;
+ }
+ if (value < BASE_3)
+ {
+ Add((0x3<<(1+BIT_LENGTH_2))+(value-BASE_2), 3+BIT_LENGTH_2);
+ return;
+ }
+ if (value < BASE_4)
+ {
+ Add((0x7<<(1+BIT_LENGTH_3))+(value-BASE_3), 4+BIT_LENGTH_3);
+ return;
+ }
+ if (value < BASE_5)
+ {
+ Add((0xf<<(1+BIT_LENGTH_4))+(value-BASE_4), 5+BIT_LENGTH_4);
+ return;
+ }
+ if (value < BASE_6)
+ {
+ unsigned __int64 value64 = (unsigned __int64) value;
+ Add64((((unsigned __int64)0x1f)<<(1+BIT_LENGTH_5))+(value64-BASE_5), 6+BIT_LENGTH_5);
+ return;
+ }
+ _ASSERTE(!"Too big");
+}
+
+// --------------------------------------------------------
+void Encoder::Encode(signed value, BOOL isSigned)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ if (isSigned)
+ EncodeSigned(value);
+ else
+ {
+ _ASSERTE(((signed)((unsigned) value)) == value);
+ Encode((unsigned) value);
+ }
+}
+
+// --------------------------------------------------------
+void Encoder::Add(unsigned value, unsigned length)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ _ASSERTE(!done);
+ while (length >= unusedBits)
+ {
+ length -= unusedBits;
+ encoding = (encoding<<unusedBits)+static_cast<BYTE>(value>>(length));
+ value = (value & MASK(length));
+ if (buffer) buffer[index++] = encoding;
+ else index++;
+ encoding = 0;
+ unusedBits = BITS_PER_BYTE;
+ }
+ encoding = (encoding<<length)+static_cast<BYTE>(value);
+ unusedBits -= length;
+}
+
+// --------------------------------------------------------
+void Encoder::Add64(unsigned __int64 value, unsigned length)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ _ASSERTE(!done);
+ while (length >= unusedBits)
+ {
+ length -= unusedBits;
+ encoding = (encoding<<unusedBits)+((BYTE)(value>>(length)));
+ value = (value & MASK64(length));
+ if (buffer) buffer[index++] = encoding;
+ else index++;
+ encoding = 0;
+ unusedBits = BITS_PER_BYTE;
+ }
+ encoding = (encoding<<length)+(BYTE)value;
+ unusedBits -= length;
+}
+
+// --------------------------------------------------------
+void Encoder::Done()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ done = TRUE;
+ if (unusedBits == BITS_PER_BYTE) return;
+ encoding = (encoding<<unusedBits);
+ if (buffer) buffer[index++] = encoding;
+ else index++;
+}
+
+// --------------------------------------------------------
+unsigned Encoder::Contents(BYTE** contents)
+{
+ STATIC_CONTRACT_LEAF;
+
+ _ASSERTE(done && buffer && contents);
+ *contents = buffer;
+ return index;
+}
+
+// --------------------------------------------------------
+unsigned Encoder::Length()
+{
+ STATIC_CONTRACT_LEAF;
+
+ _ASSERTE(done);
+ return index;
+}
+
diff --git a/src/vm/decodemd.h b/src/vm/decodemd.h
new file mode 100644
index 0000000000..c76bdddb53
--- /dev/null
+++ b/src/vm/decodemd.h
@@ -0,0 +1,80 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#ifndef __DECODEMD_H__
+#define __DECODEMD_H__
+
+// --------------------------------------------------------
+// This is used to decode a bitstream encoding
+
+class Decoder
+{
+public:
+ Decoder();
+ Decoder(PTR_BYTE bytes);
+ void Init(PTR_BYTE bytes);
+ unsigned Next();
+ signed NextSigned();
+ PTR_BYTE End();
+
+ // --------------------------------------------------------
+ // This structures contains the state of the FSM
+
+ struct Decode
+ {
+ const BYTE* decoded; //the already decoded values
+ unsigned next; //what to do when no more decoded values
+ };
+
+private:
+ // --------------------------------------------------------
+ // This is used to access nibbles from a byte stream.
+
+ class Nibbles
+ {
+ friend class Decoder;
+ public:
+ void SetContents(PTR_BYTE bytes);
+ BYTE Next();
+ BYTE Read();
+ unsigned Bits(unsigned number);
+ private:
+ PTR_BYTE data;
+ BYTE nibbles[2];
+ unsigned next;
+ };
+
+ Decode state;
+ Nibbles data;
+};
+
+// --------------------------------------------------------
+// This is used to encode a bitstream encoding
+#ifndef BINDER
+class Encoder
+{
+public:
+ Encoder(BYTE *buffer);
+ void ContainsNegatives(BOOL b);
+ void EncodeSigned(signed value);
+ void Encode(unsigned value);
+ void Encode(signed value, BOOL isSigned);
+ void Add(unsigned value, unsigned length);
+ void Add64(unsigned __int64 value, unsigned length);
+ void Done();
+ unsigned Contents(BYTE** contents);
+ unsigned Length();
+private:
+ BYTE* buffer;
+ BYTE encoding;
+ unsigned unusedBits;
+ BOOL done;
+ BOOL signedNumbers;
+ unsigned index;
+};
+#endif // !BINDER
+#endif // __DECODEMD_H__
diff --git a/src/vm/delegateinfo.h b/src/vm/delegateinfo.h
new file mode 100644
index 0000000000..d176767955
--- /dev/null
+++ b/src/vm/delegateinfo.h
@@ -0,0 +1,86 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: DelegateInfo.h
+**
+**
+** Purpose: Native methods on System.ThreadPool
+** and its inner classes
+**
+
+**
+===========================================================*/
+#ifndef DELEGATE_INFO
+#define DELEGATE_INFO
+
+struct DelegateInfo;
+typedef DelegateInfo* DelegateInfoPtr;
+
+struct DelegateInfo
+{
+ ADID m_appDomainId;
+ OBJECTHANDLE m_stateHandle;
+ OBJECTHANDLE m_eventHandle;
+ OBJECTHANDLE m_registeredWaitHandle;
+ DWORD m_overridesCount;
+ BOOL m_hasSecurityInfo;
+
+ void SetThreadSecurityInfo( Thread* thread, StackCrawlMark* stackMark )
+ {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+
+ }
+
+#ifndef DACCESS_COMPILE
+ void Release()
+ {
+ CONTRACTL {
+ // m_compressedStack->Release() can actually throw today because it has got a call
+ // to new down the stack. However that is recent and the semantic of that api is such
+ // it should not throw. I am expecting clenup of that function to take care of that
+ // so I am adding this comment to make sure the issue is document.
+ // Remove this comment once that work is done
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+
+ AppDomainFromIDHolder ad(m_appDomainId, FALSE);
+ if (!ad.IsUnloaded())
+ {
+ if (m_stateHandle)
+ DestroyHandle(m_stateHandle);
+ if (m_eventHandle)
+ DestroyHandle(m_eventHandle);
+ if (m_registeredWaitHandle)
+ DestroyHandle(m_registeredWaitHandle);
+ }
+
+ }
+#endif
+
+ static DelegateInfo *MakeDelegateInfo(AppDomain *pAppDomain,
+ OBJECTREF *state,
+ OBJECTREF *waitEvent,
+ OBJECTREF *registeredWaitObject);
+};
+
+
+
+
+
+#endif // DELEGATE_INFO
diff --git a/src/vm/dirs.proj b/src/vm/dirs.proj
new file mode 100644
index 0000000000..a931bcac57
--- /dev/null
+++ b/src/vm/dirs.proj
@@ -0,0 +1,20 @@
+<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <!--Import the settings-->
+ <Import Project="$(_NTDRIVE)$(_NTROOT)\ndp\clr\clr.props" />
+
+ <PropertyGroup>
+ <BuildInPhase1>true</BuildInPhase1>
+ <BuildInPhaseDefault>false</BuildInPhaseDefault>
+ <BuildCoreBinaries>true</BuildCoreBinaries>
+ <BuildSysBinaries>true</BuildSysBinaries>
+ </PropertyGroup>
+
+ <!--The following projects will build during PHASE 1-->
+ <ItemGroup Condition="'$(BuildExePhase)' == '1'">
+ <ProjectFile Include="wks\wks.nativeproj" />
+ <ProjectFile Include="dac\dirs.proj" />
+ </ItemGroup>
+
+ <!--Import the targets-->
+ <Import Project="$(_NTDRIVE)$(_NTROOT)\tools\Microsoft.DevDiv.Traversal.targets" />
+</Project>
diff --git a/src/vm/dispatchinfo.cpp b/src/vm/dispatchinfo.cpp
new file mode 100644
index 0000000000..7aa95cf6d0
--- /dev/null
+++ b/src/vm/dispatchinfo.cpp
@@ -0,0 +1,3772 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: DispatchInfo.cpp
+//
+
+//
+// Implementation of helpers used to expose IDispatch
+// and IDispatchEx to COM.
+//
+
+
+#include "common.h"
+
+#include "dispatchinfo.h"
+#include "dispex.h"
+#include "object.h"
+#include "field.h"
+#include "method.hpp"
+#include "class.h"
+#include "comcallablewrapper.h"
+#include "threads.h"
+#include "excep.h"
+#include "objecthandle.h"
+#include "comutilnative.h"
+#include "eeconfig.h"
+#include "interoputil.h"
+#include "olevariant.h"
+#include "commtmemberinfomap.h"
+#include "dispparammarshaler.h"
+#include "security.h"
+#include "reflectioninvocation.h"
+#include "dbginterface.h"
+
+#define EXCEPTION_INNER_PROP "InnerException"
+
+// The name of the properties accessed on the managed member infos.
+#define MEMBER_INFO_NAME_PROP "Name"
+
+// The initial size of the DISPID to member map.
+#define DISPID_TO_MEMBER_MAP_INITIAL_SIZE 37
+
+// The names of the properties that are accessed on the managed member info's
+#define MEMBERINFO_TYPE_PROP "MemberType"
+
+// The names of the properties that are accessed on managed ParameterInfo.
+#define PARAMETERINFO_NAME_PROP "Name"
+
+
+OBJECTHANDLE DispatchInfo::m_hndOleAutBinder = NULL;
+
+MethodTable* DispatchMemberInfo::s_pMemberTypes[NUM_MEMBER_TYPES] = {NULL};
+EnumMemberTypes DispatchMemberInfo::s_memberTypes[NUM_MEMBER_TYPES] = {Uninitted};
+int DispatchMemberInfo::s_iNumMemberTypesKnown = 0;
+
+// Helper function to convert between a DISPID and a hashkey.
+inline UPTR DispID2HashKey(DISPID DispID)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return DispID + 2;
+}
+
+// Typedef for string comparition functions.
+typedef int (__cdecl *UnicodeStringCompareFuncPtr)(const wchar_t *, const wchar_t *);
+
+//--------------------------------------------------------------------------------
+// The DispatchMemberInfo class implementation.
+
+DispatchMemberInfo::DispatchMemberInfo(DispatchInfo *pDispInfo, DISPID DispID, SString& strName, OBJECTREF MemberInfoObj)
+: m_DispID(DispID)
+, m_hndMemberInfo(NULL)
+, m_apParamMarshaler(NULL)
+, m_pParamInOnly(NULL)
+, m_strName(strName)
+, m_pNext(NULL)
+, m_enumType (Uninitted)
+, m_iNumParams(-1)
+, m_CultureAwareState(Unknown)
+, m_bRequiresManagedCleanup(FALSE)
+, m_bInitialized(FALSE)
+, m_bNeutered(FALSE)
+, m_pDispInfo(pDispInfo)
+, m_bLastParamOleVarArg(FALSE)
+{
+ WRAPPER_NO_CONTRACT; // Calls to CreateHandle, above, means not a leaf contract
+}
+
+void DispatchMemberInfo::Neuter()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (m_apParamMarshaler)
+ {
+ // Need to delete all individual members?
+ // Can't calculate the exact number here.
+ delete [] m_apParamMarshaler;
+ m_apParamMarshaler = NULL;
+ }
+
+ if (m_pParamInOnly)
+ {
+ delete [] m_pParamInOnly;
+ m_pParamInOnly = NULL;
+ }
+
+ //m_pNext = NULL;
+ m_enumType = Uninitted;
+ m_iNumParams = -1;
+ m_CultureAwareState = Unknown;
+ m_bNeutered = TRUE;
+}
+
+DispatchMemberInfo::~DispatchMemberInfo()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Delete the parameter marshalers and then delete the array of parameter
+ // marshalers itself.
+ if (m_apParamMarshaler)
+ {
+ EnumMemberTypes MemberType = GetMemberType();
+ int NumParamMarshalers = GetNumParameters() + ((MemberType == Property) ? 2 : 1);
+ for (int i = 0; i < NumParamMarshalers; i++)
+ {
+ if (m_apParamMarshaler[i])
+ delete m_apParamMarshaler[i];
+ }
+ delete []m_apParamMarshaler;
+ }
+
+ if (m_pParamInOnly)
+ delete [] m_pParamInOnly;
+
+ // Destroy the member info object.
+ if (m_hndMemberInfo)
+ DestroyHandle(m_hndMemberInfo);
+
+ // Clear the name of the member.
+ m_strName.Clear();
+}
+
+void DispatchMemberInfo::EnsureInitialized()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // Initialize the entry if it hasn't been initialized yet. This must be synchronized.
+ if (!m_bInitialized)
+ {
+ DispatchInfo::LockHolder lh(m_pDispInfo);
+
+ if (!m_bInitialized)
+ Init();
+ }
+}
+
+void DispatchMemberInfo::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ EX_TRY
+ {
+ // Determine the type of the member.
+ DetermineMemberType();
+
+ // Determine the parameter count.
+ DetermineParamCount();
+
+ // Determine the culture awareness of the member.
+ DetermineCultureAwareness();
+
+ // Set up the parameter marshaler info.
+ SetUpParamMarshalerInfo();
+
+ // Mark the dispatch member info as having been initialized.
+ m_bInitialized = TRUE;
+ }
+ EX_CATCH
+ {
+ // If we do throw an exception, then the status of the object
+ // is in limbo - just neuter it.
+ Neuter();
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+}
+
+HRESULT DispatchMemberInfo::GetIDsOfParameters(__in_ecount(NumNames) WCHAR **astrNames, int NumNames, DISPID *aDispIds, BOOL bCaseSensitive)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+
+ // The member info must have been initialized before this is called.
+ PRECONDITION(TRUE == m_bInitialized);
+ PRECONDITION(CheckPointer(astrNames));
+ PRECONDITION(CheckPointer(aDispIds));
+ }
+ CONTRACTL_END;
+
+ int NumNamesMapped = 0;
+ PTRARRAYREF ParamArray = NULL;
+ int cNames = 0;
+
+ // Initialize all the ID's to DISPID_UNKNOWN.
+ for (cNames = 0; cNames < NumNames; cNames++)
+ aDispIds[cNames] = DISPID_UNKNOWN;
+
+ // Retrieve the appropriate string comparation function.
+ UnicodeStringCompareFuncPtr StrCompFunc = bCaseSensitive ? wcscmp : SString::_wcsicmp;
+
+ GCPROTECT_BEGIN(ParamArray)
+ {
+ // Retrieve the member parameters.
+ ParamArray = GetParameters();
+
+ // If we managed to retrieve an non empty array of parameters then go through it and
+ // map the specified names to ID's.
+ if ((ParamArray != NULL) && (ParamArray->GetNumComponents() > 0))
+ {
+ int NumParams = ParamArray->GetNumComponents();
+ int cParams = 0;
+ NewArrayHolder< NewArrayHolder<WCHAR> > astrParamNames = new NewArrayHolder<WCHAR>[NumParams];
+
+ // Go through and retrieve the names of all the components.
+ for (cParams = 0; cParams < NumParams; cParams++)
+ {
+ OBJECTREF ParamInfoObj = ParamArray->GetAt(cParams);
+ GCPROTECT_BEGIN(ParamInfoObj)
+ {
+ // Retrieve the MD to use to retrieve the name of the parameter.
+ MethodDesc *pGetParamNameMD = MemberLoader::FindPropertyMethod(ParamInfoObj->GetMethodTable(), PARAMETERINFO_NAME_PROP, PropertyGet);
+ _ASSERTE(pGetParamNameMD && "Unable to find getter method for property ParameterInfo::Name");
+ MethodDescCallSite getParamName(pGetParamNameMD, &ParamInfoObj);
+
+ // Retrieve the name of the parameter.
+ ARG_SLOT GetNameArgs[] =
+ {
+ ObjToArgSlot(ParamInfoObj)
+ };
+ STRINGREF MemberNameObj = getParamName.Call_RetSTRINGREF(GetNameArgs);
+
+ // If we got a valid name back then store that in the array of names.
+ if (MemberNameObj != NULL)
+ {
+ astrParamNames[cParams] = new WCHAR[MemberNameObj->GetStringLength() + 1];
+ wcscpy_s(astrParamNames[cParams], MemberNameObj->GetStringLength() + 1, MemberNameObj->GetBuffer());
+ }
+ }
+ GCPROTECT_END();
+ }
+
+ // Now go through the list of specfiied names and map then to ID's.
+ for (cNames = 0; cNames < NumNames; cNames++)
+ {
+ for (cParams = 0; cParams < NumParams; cParams++)
+ {
+ if (astrParamNames[cParams] && (StrCompFunc(astrNames[cNames], astrParamNames[cParams]) == 0))
+ {
+ aDispIds[cNames] = cParams;
+ NumNamesMapped++;
+ break;
+ }
+ }
+ }
+ }
+ }
+ GCPROTECT_END();
+
+ return (NumNamesMapped == NumNames) ? S_OK : DISP_E_UNKNOWNNAME;
+}
+
+PTRARRAYREF DispatchMemberInfo::GetParameters()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ PTRARRAYREF ParamArray = NULL;
+ MethodDesc *pGetParamsMD = NULL;
+
+ // Retrieve the method to use to retrieve the array of parameters.
+ switch (GetMemberType())
+ {
+ case Method:
+ {
+ pGetParamsMD = DispatchInfo::GetMethodInfoMD(METHOD__METHOD__GET_PARAMETERS, ObjectFromHandle(m_hndMemberInfo)->GetTypeHandle());
+ _ASSERTE(pGetParamsMD && "Unable to find method MemberBase::GetParameters");
+ break;
+ }
+
+ case Property:
+ {
+ pGetParamsMD = DispatchInfo::GetPropertyInfoMD(METHOD__PROPERTY__GET_INDEX_PARAMETERS, ObjectFromHandle(m_hndMemberInfo)->GetTypeHandle());
+ _ASSERTE(pGetParamsMD && "Unable to find method PropertyInfo::GetIndexParameters");
+ break;
+ }
+ }
+
+ // If the member has parameters then retrieve the array of parameters.
+ if (pGetParamsMD != NULL)
+ {
+ MethodDescCallSite getParams(pGetParamsMD, m_hndMemberInfo);
+
+ ARG_SLOT GetParamsArgs[] =
+ {
+ ObjToArgSlot(ObjectFromHandle(m_hndMemberInfo))
+ };
+
+ ParamArray = (PTRARRAYREF) getParams.Call_RetOBJECTREF(GetParamsArgs);
+ }
+
+ return ParamArray;
+}
+
+void DispatchMemberInfo::MarshalParamNativeToManaged(int iParam, VARIANT *pSrcVar, OBJECTREF *pDestObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pSrcVar));
+ PRECONDITION(pDestObj != NULL);
+ PRECONDITION(TRUE == m_bInitialized);
+ }
+ CONTRACTL_END;
+
+ if (m_apParamMarshaler && m_apParamMarshaler[iParam + 1])
+ m_apParamMarshaler[iParam + 1]->MarshalNativeToManaged(pSrcVar, pDestObj);
+ else
+ OleVariant::MarshalObjectForOleVariant(pSrcVar, pDestObj);
+}
+
+void DispatchMemberInfo::MarshalParamManagedToNativeRef(int iParam, OBJECTREF *pSrcObj, VARIANT *pRefVar)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pRefVar));
+ PRECONDITION(TRUE == m_bInitialized);
+ PRECONDITION(pSrcObj != NULL);
+ }
+ CONTRACTL_END;
+
+ if (m_apParamMarshaler && m_apParamMarshaler[iParam + 1])
+ m_apParamMarshaler[iParam + 1]->MarshalManagedToNativeRef(pSrcObj, pRefVar);
+ else
+ OleVariant::MarshalOleRefVariantForObject(pSrcObj, pRefVar);
+}
+
+void DispatchMemberInfo::CleanUpParamManaged(int iParam, OBJECTREF *pObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(pObj != NULL);
+ PRECONDITION(TRUE == m_bInitialized);
+ }
+ CONTRACTL_END;
+
+ if (m_apParamMarshaler && m_apParamMarshaler[iParam + 1])
+ m_apParamMarshaler[iParam + 1]->CleanUpManaged(pObj);
+}
+
+void DispatchMemberInfo::MarshalReturnValueManagedToNative(OBJECTREF *pSrcObj, VARIANT *pDestVar)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pDestVar));
+ PRECONDITION(pSrcObj != NULL);
+ PRECONDITION(TRUE == m_bInitialized);
+ }
+ CONTRACTL_END;
+
+ if (m_apParamMarshaler && m_apParamMarshaler[0])
+ m_apParamMarshaler[0]->MarshalManagedToNative(pSrcObj, pDestVar);
+ else
+ OleVariant::MarshalOleVariantForObject(pSrcObj, pDestVar);
+}
+
+ComMTMethodProps * DispatchMemberInfo::GetMemberProps(OBJECTREF MemberInfoObj, ComMTMemberInfoMap *pMemberMap)
+{
+ CONTRACT (ComMTMethodProps*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(MemberInfoObj != NULL);
+ PRECONDITION(CheckPointer(pMemberMap, NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ DISPID DispId = DISPID_UNKNOWN;
+ ComMTMethodProps *pMemberProps = NULL;
+
+ // If we don't have a member map then we cannot retrieve properties for the member.
+ if (!pMemberMap)
+ RETURN NULL;
+
+ // Get the member's properties.
+ GCPROTECT_BEGIN(MemberInfoObj);
+ {
+ MethodTable *pMemberInfoClass = MemberInfoObj->GetMethodTable();
+ if (MscorlibBinder::IsClass(pMemberInfoClass, CLASS__METHOD))
+ {
+ // Retrieve the MethodDesc from the MethodInfo.
+ MethodDescCallSite getMethodHandle(METHOD__METHOD_BASE__GET_METHODDESC, &MemberInfoObj);
+ ARG_SLOT GetMethodHandleArg = ObjToArgSlot(MemberInfoObj);
+ MethodDesc* pMeth = (MethodDesc*) getMethodHandle.Call_RetLPVOID(&GetMethodHandleArg);
+ if (pMeth)
+ pMemberProps = pMemberMap->GetMethodProps(pMeth->GetMemberDef(), pMeth->GetModule());
+ }
+ else if (MscorlibBinder::IsClass(pMemberInfoClass, CLASS__RT_FIELD_INFO))
+ {
+ MethodDescCallSite getFieldHandle(METHOD__RTFIELD__GET_FIELDHANDLE, &MemberInfoObj);
+ ARG_SLOT arg = ObjToArgSlot(MemberInfoObj);
+ FieldDesc* pFld = (FieldDesc*) getFieldHandle.Call_RetLPVOID(&arg);
+ if (pFld)
+ pMemberProps = pMemberMap->GetMethodProps(pFld->GetMemberDef(), pFld->GetModule());
+ }
+ else if (MscorlibBinder::IsClass(pMemberInfoClass, CLASS__PROPERTY))
+ {
+ MethodDescCallSite getToken(METHOD__PROPERTY__GET_TOKEN, &MemberInfoObj);
+ ARG_SLOT arg = ObjToArgSlot(MemberInfoObj);
+ mdToken propTok = (mdToken) getToken.Call_RetArgSlot(&arg);
+ MethodDescCallSite getModule(METHOD__PROPERTY__GET_MODULE, &MemberInfoObj);
+ ARG_SLOT arg1 = ObjToArgSlot(MemberInfoObj);
+ REFLECTMODULEBASEREF module = (REFLECTMODULEBASEREF) getModule.Call_RetOBJECTREF(&arg1);
+ Module* pModule = module->GetModule();
+ pMemberProps = pMemberMap->GetMethodProps(propTok, pModule);
+ }
+ }
+ GCPROTECT_END();
+
+ RETURN pMemberProps;
+}
+
+DISPID DispatchMemberInfo::GetMemberDispId(OBJECTREF MemberInfoObj, ComMTMemberInfoMap *pMemberMap)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pMemberMap, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ _ASSERT(MemberInfoObj);
+
+ DISPID DispId = DISPID_UNKNOWN;
+
+ // Get the member's properties.
+ ComMTMethodProps *pMemberProps = GetMemberProps(MemberInfoObj, pMemberMap);
+
+ // If we managed to get the properties of the member then extract the DISPID.
+ if (pMemberProps)
+ DispId = pMemberProps->dispid;
+
+ return DispId;
+}
+
+LPWSTR DispatchMemberInfo::GetMemberName(OBJECTREF MemberInfoObj, ComMTMemberInfoMap *pMemberMap)
+{
+ CONTRACT (LPWSTR)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(MemberInfoObj != NULL);
+ PRECONDITION(CheckPointer(pMemberMap, NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ NewArrayHolder<WCHAR> strMemberName = NULL;
+ ComMTMethodProps *pMemberProps = NULL;
+
+ GCPROTECT_BEGIN(MemberInfoObj);
+ {
+ // Get the member's properties.
+ pMemberProps = GetMemberProps(MemberInfoObj, pMemberMap);
+
+ // If we managed to get the member's properties then extract the name.
+ if (pMemberProps)
+ {
+ int MemberNameLen = (INT)wcslen(pMemberProps->pName);
+ strMemberName = new WCHAR[MemberNameLen + 1];
+
+ memcpy(strMemberName, pMemberProps->pName, (MemberNameLen + 1) * sizeof(WCHAR));
+ }
+ else
+ {
+ // Retrieve the Get method for the Name property.
+ MethodDesc *pMD = MemberLoader::FindPropertyMethod(MemberInfoObj->GetMethodTable(), MEMBER_INFO_NAME_PROP, PropertyGet);
+ _ASSERTE(pMD && "Unable to find getter method for property MemberInfo::Name");
+ MethodDescCallSite propGet(pMD, &MemberInfoObj);
+
+ // Prepare the arguments.
+ ARG_SLOT Args[] =
+ {
+ ObjToArgSlot(MemberInfoObj)
+ };
+
+ // Retrieve the value of the Name property.
+ STRINGREF strObj = propGet.Call_RetSTRINGREF(Args);
+ _ASSERTE(strObj != NULL);
+
+ // Copy the name into the buffer we will return.
+ int MemberNameLen = strObj->GetStringLength();
+ strMemberName = new WCHAR[strObj->GetStringLength() + 1];
+ memcpy(strMemberName, strObj->GetBuffer(), MemberNameLen * sizeof(WCHAR));
+ strMemberName[MemberNameLen] = 0;
+ }
+ }
+ GCPROTECT_END();
+
+ strMemberName.SuppressRelease();
+ RETURN strMemberName;
+}
+
+void DispatchMemberInfo::DetermineMemberType()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+
+ // This should not be called more than once.
+ PRECONDITION(m_enumType == Uninitted);
+ }
+ CONTRACTL_END;
+
+ OBJECTREF MemberInfoObj = ObjectFromHandle(m_hndMemberInfo);
+
+ // Check to see if the member info is of a type we have already seen.
+ TypeHandle pMemberInfoClass = MemberInfoObj->GetTypeHandle();
+ for (int i = 0 ; i < s_iNumMemberTypesKnown ; i++)
+ {
+ if (pMemberInfoClass.GetMethodTable() == s_pMemberTypes[i])
+ {
+ m_enumType = s_memberTypes[i];
+ return;
+ }
+ }
+
+ GCPROTECT_BEGIN(MemberInfoObj);
+ {
+ // Retrieve the method descriptor for the type property accessor.
+ MethodDesc *pMD = MemberLoader::FindPropertyMethod(MemberInfoObj->GetMethodTable(), MEMBERINFO_TYPE_PROP, PropertyGet);
+ _ASSERTE(pMD && "Unable to find getter method for property MemberInfo::Type");
+ MethodDescCallSite propGet(pMD, &MemberInfoObj);
+
+ // Prepare the arguments that will be used to retrieve the value of all the properties.
+ ARG_SLOT Args[] =
+ {
+ ObjToArgSlot(MemberInfoObj)
+ };
+
+ // Retrieve the actual type of the member info.
+ m_enumType = (EnumMemberTypes)propGet.Call_RetArgSlot(Args);
+ }
+ GCPROTECT_END();
+
+ if (s_iNumMemberTypesKnown < NUM_MEMBER_TYPES)
+ {
+ s_pMemberTypes[s_iNumMemberTypesKnown] = MemberInfoObj->GetMethodTable();
+ s_memberTypes[s_iNumMemberTypesKnown++] = m_enumType;
+ }
+}
+
+void DispatchMemberInfo::DetermineParamCount()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ // This should not be called more than once.
+ PRECONDITION(m_iNumParams == -1);
+ }
+ CONTRACTL_END;
+
+ MethodDesc *pGetParamsMD = NULL;
+
+ OBJECTREF MemberInfoObj = ObjectFromHandle(m_hndMemberInfo);
+ GCPROTECT_BEGIN(MemberInfoObj);
+ {
+ // Retrieve the method to use to retrieve the array of parameters.
+ switch (GetMemberType())
+ {
+ case Method:
+ {
+ pGetParamsMD = DispatchInfo::GetMethodInfoMD(METHOD__METHOD__GET_PARAMETERS, ObjectFromHandle(m_hndMemberInfo)->GetTypeHandle());
+ _ASSERTE(pGetParamsMD && "Unable to find method MemberBase::GetParameters");
+ break;
+ }
+
+ case Property:
+ {
+ pGetParamsMD = DispatchInfo::GetPropertyInfoMD(METHOD__PROPERTY__GET_INDEX_PARAMETERS, ObjectFromHandle(m_hndMemberInfo)->GetTypeHandle());
+ _ASSERTE(pGetParamsMD && "Unable to find method PropertyInfo::GetIndexParameters");
+ break;
+ }
+ }
+
+ // If the member has parameters then get their count.
+ if (pGetParamsMD != NULL)
+ {
+ MethodDescCallSite getParams(pGetParamsMD, &MemberInfoObj);
+
+ ARG_SLOT GetParamsArgs[] =
+ {
+ ObjToArgSlot(ObjectFromHandle(m_hndMemberInfo))
+ };
+
+ PTRARRAYREF ParamArray = (PTRARRAYREF) getParams.Call_RetOBJECTREF(GetParamsArgs);
+ if (ParamArray != NULL)
+ m_iNumParams = ParamArray->GetNumComponents();
+ }
+ else
+ {
+ m_iNumParams = 0;
+ }
+ }
+ GCPROTECT_END();
+}
+
+void DispatchMemberInfo::DetermineCultureAwareness()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ // This should not be called more than once.
+ PRECONDITION(m_CultureAwareState == Unknown);
+ }
+ CONTRACTL_END;
+
+ // Load the LCIDConversionAttribute type.
+ MethodTable * pLcIdConvAttrClass = MscorlibBinder::GetClass(CLASS__LCID_CONVERSION_TYPE);
+
+ // Check to see if the attribute is set.
+ OBJECTREF MemberInfoObj = ObjectFromHandle(m_hndMemberInfo);
+ GCPROTECT_BEGIN(MemberInfoObj);
+ {
+ // Retrieve the method to use to determine if the DispIdAttribute custom attribute is set.
+ MethodDesc *pGetCustomAttributesMD = DispatchInfo::GetCustomAttrProviderMD(MemberInfoObj->GetTypeHandle());
+ MethodDescCallSite getCustomAttributes(pGetCustomAttributesMD, &MemberInfoObj);
+
+ // Prepare the arguments.
+ ARG_SLOT GetCustomAttributesArgs[] =
+ {
+ 0,
+ ObjToArgSlot(pLcIdConvAttrClass->GetManagedClassObject()),
+ 0,
+ };
+
+ // Now that we have potentially triggered a GC in the GetManagedClassObject
+ // call above, it is safe to set the 'this' using our properly protected
+ // MemberInfoObj value.
+ GetCustomAttributesArgs[0] = ObjToArgSlot(MemberInfoObj);
+
+ // Retrieve the custom attributes of type LCIDConversionAttribute.
+ PTRARRAYREF CustomAttrArray = NULL;
+ EX_TRY
+ {
+ CustomAttrArray = (PTRARRAYREF) getCustomAttributes.Call_RetOBJECTREF(GetCustomAttributesArgs);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ GCPROTECT_BEGIN(CustomAttrArray)
+ {
+ if ((CustomAttrArray != NULL) && (CustomAttrArray->GetNumComponents() > 0))
+ m_CultureAwareState = Aware;
+ else
+ m_CultureAwareState = NonAware;
+ }
+ GCPROTECT_END();
+ }
+ GCPROTECT_END();
+}
+
+void DispatchMemberInfo::SetUpParamMarshalerInfo()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ BOOL bSetUpReturnValueOnly = FALSE;
+ OBJECTREF SetterObj = NULL;
+ OBJECTREF GetterObj = NULL;
+ OBJECTREF MemberInfoObj = ObjectFromHandle(m_hndMemberInfo);
+
+ GCPROTECT_BEGIN(SetterObj);
+ GCPROTECT_BEGIN(GetterObj);
+ GCPROTECT_BEGIN(MemberInfoObj);
+ {
+ MethodTable *pMemberInfoMT = MemberInfoObj->GetMethodTable();
+
+ if (MscorlibBinder::IsClass(pMemberInfoMT, CLASS__METHOD))
+ {
+ MethodDescCallSite getMethodHandle(METHOD__METHOD_BASE__GET_METHODDESC, &MemberInfoObj);
+ ARG_SLOT arg = ObjToArgSlot(MemberInfoObj);
+ MethodDesc* pMeth = (MethodDesc*) getMethodHandle.Call_RetLPVOID(&arg);
+ if (pMeth)
+ SetUpMethodMarshalerInfo(pMeth, FALSE);
+ }
+ else if (MscorlibBinder::IsClass(pMemberInfoMT, CLASS__FIELD))
+ {
+ MethodDescCallSite getFieldHandle(METHOD__RTFIELD__GET_FIELDHANDLE, &MemberInfoObj);
+ ARG_SLOT arg = ObjToArgSlot(MemberInfoObj);
+ FieldDesc* pFld = (FieldDesc*) getFieldHandle.Call_RetLPVOID(&arg);
+ if (pFld)
+ SetUpFieldMarshalerInfo(pFld);
+ }
+ else if (MscorlibBinder::IsClass(pMemberInfoMT, CLASS__PROPERTY))
+ {
+ BOOL isGetter = FALSE;
+ MethodDescCallSite getSetter(METHOD__PROPERTY__GET_SETTER, &MemberInfoObj);
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(MemberInfoObj),
+ BoolToArgSlot(false)
+ };
+ SetterObj = getSetter.Call_RetOBJECTREF(args);
+
+ if (SetterObj != NULL)
+ {
+ MethodDescCallSite getMethodHandle(METHOD__METHOD_BASE__GET_METHODDESC, &SetterObj);
+ ARG_SLOT arg = ObjToArgSlot(SetterObj);
+ MethodDesc* pMeth = (MethodDesc*) getMethodHandle.Call_RetLPVOID(&arg);
+ if (pMeth)
+ {
+ bSetUpReturnValueOnly = TRUE;
+ SetUpMethodMarshalerInfo(pMeth, FALSE);
+ }
+ }
+
+ MethodDescCallSite getGetter(METHOD__PROPERTY__GET_GETTER, &MemberInfoObj);
+ ARG_SLOT args1[] =
+ {
+ ObjToArgSlot(MemberInfoObj),
+ BoolToArgSlot(false)
+ };
+ GetterObj = getGetter.Call_RetOBJECTREF(args1);
+
+ if (GetterObj != NULL)
+ {
+ MethodDescCallSite getMethodHandle(METHOD__METHOD_BASE__GET_METHODDESC, &GetterObj);
+ ARG_SLOT arg = ObjToArgSlot(GetterObj);
+ MethodDesc* pMeth = (MethodDesc*) getMethodHandle.Call_RetLPVOID(&arg);
+ if (pMeth)
+ {
+ // Only set up the marshalling information for the parameters if we
+ // haven't done it already for the setter.
+ SetUpMethodMarshalerInfo(pMeth, bSetUpReturnValueOnly);
+ }
+ }
+ }
+ else
+ {
+ // @FUTURE: Add support for user defined derived classes for
+ // MethodInfo, PropertyInfo and FieldInfo.
+ }
+ }
+ GCPROTECT_END();
+ GCPROTECT_END();
+ GCPROTECT_END();
+}
+
+void DispatchMemberInfo::SetUpMethodMarshalerInfo(MethodDesc *pMD, BOOL bReturnValueOnly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+
+ MetaSig msig(pMD);
+ LPCSTR szName;
+ USHORT usSequence;
+ DWORD dwAttr;
+ mdParamDef returnParamDef = mdParamDefNil;
+ mdParamDef currParamDef = mdParamDefNil;
+
+ int numArgs = msig.NumFixedArgs();
+
+ IMDInternalImport *pInternalImport = msig.GetModule()->GetMDImport();
+
+ HENUMInternalHolder hEnumParams(pInternalImport);
+
+ //
+ // Initialize the parameter definition enum.
+ //
+ hEnumParams.EnumInit(mdtParamDef, pMD->GetMemberDef());
+
+ //
+ // Retrieve the paramdef for the return type and determine which is the next
+ // parameter that has parameter information.
+ //
+ do
+ {
+ if (pInternalImport->EnumNext(&hEnumParams, &currParamDef))
+ {
+ IfFailThrow(pInternalImport->GetParamDefProps(currParamDef, &usSequence, &dwAttr, &szName));
+
+ if (usSequence == 0)
+ {
+ // The first parameter, if it has sequence 0, actually describes the return type.
+ returnParamDef = currParamDef;
+ }
+ }
+ else
+ {
+ usSequence = (USHORT)-1;
+ }
+ }
+ while (usSequence == 0);
+
+ // Look up the best fit mapping info via Assembly & Interface level attributes
+ BOOL BestFit = TRUE;
+ BOOL ThrowOnUnmappableChar = FALSE;
+ ReadBestFitCustomAttribute(pMD, &BestFit, &ThrowOnUnmappableChar);
+
+ //
+ // Unless the bReturnValueOnly flag is set, set up the marshaling info for the parameters.
+ //
+ if (!bReturnValueOnly)
+ {
+ int iParam = 1;
+ CorElementType mtype;
+ while (ELEMENT_TYPE_END != (mtype = msig.NextArg()))
+ {
+ //
+ // Get the parameter token if the current parameter has one.
+ //
+ mdParamDef paramDef = mdParamDefNil;
+ if (usSequence == iParam)
+ {
+ paramDef = currParamDef;
+
+ if (pInternalImport->EnumNext(&hEnumParams, &currParamDef))
+ {
+ IfFailThrow(pInternalImport->GetParamDefProps(currParamDef, &usSequence, &dwAttr, &szName));
+
+ // Validate that the param def tokens are in order.
+ _ASSERTE((usSequence > iParam) && "Param def tokens are not in order");
+ }
+ else
+ {
+ usSequence = (USHORT)-1;
+ }
+ }
+
+
+ //
+ // Set up the marshaling info for the parameter.
+ //
+
+ MarshalInfo Info(msig.GetModule(), msig.GetArgProps(), msig.GetSigTypeContext(), paramDef, MarshalInfo::MARSHAL_SCENARIO_COMINTEROP,
+ (CorNativeLinkType)0, (CorNativeLinkFlags)0,
+ TRUE, iParam, numArgs, BestFit, ThrowOnUnmappableChar, FALSE, pMD, TRUE
+ #ifdef _DEBUG
+ , pMD->m_pszDebugMethodName, pMD->m_pszDebugClassName, iParam
+ #endif
+ );
+
+
+ //
+ // Based on the MarshalInfo, set up a DispParamMarshaler for the parameter.
+ //
+ SetUpDispParamMarshalerForMarshalInfo(iParam, &Info);
+
+ //
+ // Get the in/out/ref attributes.
+ //
+ SetUpDispParamAttributes(iParam, &Info);
+
+ m_bLastParamOleVarArg |= Info.IsOleVarArgCandidate();
+
+ //
+ // Increase the argument index.
+ //
+ iParam++;
+ }
+
+ // Make sure that there are not more param def tokens then there are COM+ arguments.
+ _ASSERTE( usSequence == (USHORT)-1 && "There are more parameter information tokens then there are COM+ arguments" );
+ }
+
+ //
+ // Set up the marshaling info for the return value.
+ //
+
+ if (!msig.IsReturnTypeVoid())
+ {
+ MarshalInfo Info(msig.GetModule(), msig.GetReturnProps(), msig.GetSigTypeContext(), returnParamDef, MarshalInfo::MARSHAL_SCENARIO_COMINTEROP,
+ (CorNativeLinkType)0, (CorNativeLinkFlags)0,
+ FALSE, 0, numArgs, BestFit, ThrowOnUnmappableChar, FALSE, pMD, TRUE
+#ifdef _DEBUG
+ , pMD->m_pszDebugMethodName, pMD->m_pszDebugClassName, 0
+#endif
+ );
+
+ SetUpDispParamMarshalerForMarshalInfo(0, &Info);
+ }
+}
+
+void DispatchMemberInfo::SetUpFieldMarshalerInfo(FieldDesc *pField)
+{
+ // @TODO(DM): Implement this.
+ LIMITED_METHOD_CONTRACT;
+}
+
+void DispatchMemberInfo::SetUpDispParamMarshalerForMarshalInfo(int iParam, MarshalInfo *pInfo)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pInfo));
+ }
+ CONTRACTL_END;
+
+ DispParamMarshaler *pDispParamMarshaler = pInfo->GenerateDispParamMarshaler();
+ if (pDispParamMarshaler)
+ {
+ // If the array of marshalers hasn't been allocated yet, then allocate it.
+ if (!m_apParamMarshaler)
+ {
+ // The array needs to be one more than the number of parameters for
+ // normal methods and fields and 2 more properties.
+ EnumMemberTypes MemberType = GetMemberType();
+ int NumParamMarshalers = GetNumParameters() + ((MemberType == Property) ? 2 : 1);
+ m_apParamMarshaler = new DispParamMarshaler*[NumParamMarshalers];
+ memset(m_apParamMarshaler, 0, sizeof(DispParamMarshaler*) * NumParamMarshalers);
+ }
+
+ // Set the DispParamMarshaler in the array.
+ m_apParamMarshaler[iParam] = pDispParamMarshaler;
+
+ // If the disp param marshaler requires managed cleanup, then set
+ // m_bRequiresManagedCleanup to TRUE to indicate the method requires
+ // managed cleanup.
+ if (pDispParamMarshaler->RequiresManagedCleanup())
+ m_bRequiresManagedCleanup = TRUE;
+ }
+}
+
+
+void DispatchMemberInfo::SetUpDispParamAttributes(int iParam, MarshalInfo* Info)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(Info));
+ }
+ CONTRACTL_END;
+
+ // If the arry of In Only parameter indicators hasn't been allocated yet, then allocate it.
+ if (!m_pParamInOnly)
+ {
+ // The array needs to be one more than the number of parameters for
+ // normal methods and fields and 2 more properties.
+ EnumMemberTypes MemberType = GetMemberType();
+ int NumInOnlyFlags = GetNumParameters() + ((MemberType == Property) ? 2 : 1);
+ m_pParamInOnly = new BOOL[NumInOnlyFlags];
+ memset(m_pParamInOnly, 0, sizeof(BOOL) * NumInOnlyFlags);
+ }
+
+ m_pParamInOnly[iParam] = ( Info->IsIn() && !Info->IsOut() );
+}
+
+
+//--------------------------------------------------------------------------------
+// The DispatchInfo class implementation.
+
+DispatchInfo::DispatchInfo(MethodTable *pMT)
+: m_pMT(pMT)
+, m_pFirstMemberInfo(NULL)
+, m_lock(CrstInterop, (CrstFlags)(CRST_HOST_BREAKABLE | CRST_REENTRANCY))
+, m_CurrentDispID(0x10000)
+, m_bInvokeUsingInvokeMember(FALSE)
+, m_bAllowMembersNotInComMTMemberMap(FALSE)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ // Init the hashtable.
+ m_DispIDToMemberInfoMap.Init(DISPID_TO_MEMBER_MAP_INITIAL_SIZE, NULL);
+}
+
+DispatchInfo::~DispatchInfo()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ DispatchMemberInfo* pCurrMember = m_pFirstMemberInfo;
+ while (pCurrMember)
+ {
+ // Retrieve the next member.
+ DispatchMemberInfo* pNextMember = pCurrMember->m_pNext;
+
+ // Delete the current member.
+ delete pCurrMember;
+
+ // Process the next member.
+ pCurrMember = pNextMember;
+ }
+}
+
+DispatchMemberInfo* DispatchInfo::FindMember(DISPID DispID)
+{
+ CONTRACT (DispatchMemberInfo*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ // We need to special case DISPID_UNKNOWN and -2 because the hashtable cannot handle them.
+ // This is OK since these are invalid DISPID's.
+ if ((DispID == DISPID_UNKNOWN) || (DispID == -2))
+ RETURN NULL;
+
+ // Lookup in the hashtable to find member with the specified DISPID. Note: this hash is unsynchronized, but Gethash
+ // doesn't require synchronization.
+ UPTR Data = (UPTR)m_DispIDToMemberInfoMap.Gethash(DispID2HashKey(DispID));
+ if (Data != -1)
+ {
+ // We have found the member, so ensure it is initialized and return it.
+ DispatchMemberInfo *pMemberInfo = (DispatchMemberInfo*)Data;
+
+ pMemberInfo->EnsureInitialized();
+
+ RETURN pMemberInfo;
+ }
+ else
+ {
+ RETURN NULL;
+ }
+}
+
+DispatchMemberInfo* DispatchInfo::FindMember(SString& strName, BOOL bCaseSensitive)
+{
+ CONTRACT (DispatchMemberInfo*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ BOOL fFound = FALSE;
+
+ // Go through the list of DispatchMemberInfo's to try and find one with the
+ // specified name.
+ DispatchMemberInfo *pCurrMemberInfo = m_pFirstMemberInfo;
+ while (pCurrMemberInfo)
+ {
+ if (ObjectFromHandle(pCurrMemberInfo->m_hndMemberInfo) != NULL)
+ {
+ // Compare the 2 strings.
+ if (bCaseSensitive ?
+ pCurrMemberInfo->m_strName.Equals(strName) :
+ pCurrMemberInfo->m_strName.EqualsCaseInsensitive(strName))
+ {
+ // We have found the member, so ensure it is initialized and return it.
+ pCurrMemberInfo->EnsureInitialized();
+
+ RETURN pCurrMemberInfo;
+ }
+ }
+
+ // Process the next member.
+ pCurrMemberInfo = pCurrMemberInfo->m_pNext;
+ }
+
+ // No member has been found with the coresponding name.
+ RETURN NULL;
+}
+
+// Helper method used to create DispatchMemberInfo's. This is only here because
+// we can't call new inside a method that has a EX_TRY statement.
+DispatchMemberInfo* DispatchInfo::CreateDispatchMemberInfoInstance(DISPID DispID, SString& strMemberName, OBJECTREF MemberInfoObj)
+{
+ CONTRACT (DispatchMemberInfo*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ DispatchMemberInfo* pInfo = new DispatchMemberInfo(this, DispID, strMemberName, MemberInfoObj);
+ pInfo->SetHandle(MemberInfoObj->GetMethodTable()->GetDomain()->CreateHandle(MemberInfoObj));
+
+ RETURN pInfo;
+}
+
+// Used for cleanup of managed objects via custom marshalers. This class is stack-allocated
+// in code:DispatchInfo.InvokeMemberWorker to guarantee cleanup in the face of exception.
+class ManagedParamCleanupHolder
+{
+ DispatchMemberInfo *m_pDispMemberInfo;
+ InvokeObjects *m_pObjs;
+ int m_CleanUpArrayArraySize;
+
+public:
+ ManagedParamCleanupHolder(DispatchMemberInfo *pDispMemberInfo, InvokeObjects *pObjs)
+ : m_pDispMemberInfo(pDispMemberInfo),
+ m_pObjs(pObjs),
+ m_CleanUpArrayArraySize(-1)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pObjs->CleanUpArray = NULL;
+ }
+
+ void SetData(PTRARRAYREF pCleanUpArray, int iCleanUpArrayArraySize)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_CleanUpArrayArraySize = iCleanUpArrayArraySize;
+ m_pObjs->CleanUpArray = pCleanUpArray;
+ }
+
+ ~ManagedParamCleanupHolder()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // If the member info requires managed object cleanup, then do it now.
+ if (m_pObjs->CleanUpArray != NULL && m_pDispMemberInfo->RequiresManagedObjCleanup())
+ {
+ GCX_COOP();
+ _ASSERTE(m_CleanUpArrayArraySize != -1);
+
+ for (int i = 0; i < m_CleanUpArrayArraySize; i++)
+ {
+ // Clean up all the managed parameters that were generated.
+ m_pObjs->TmpObj = m_pObjs->CleanUpArray->GetAt(i);
+ m_pDispMemberInfo->CleanUpParamManaged(i, &m_pObjs->TmpObj);
+ }
+ }
+ }
+};
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#endif
+void DispatchInfo::InvokeMemberWorker(DispatchMemberInfo* pDispMemberInfo,
+ InvokeObjects* pObjs,
+ int NumParams,
+ int NumArgs,
+ int NumNamedArgs,
+ int& NumByrefArgs,
+ int& iSrcArg,
+ DISPID id,
+ DISPPARAMS* pdp,
+ VARIANT* pVarRes,
+ WORD wFlags,
+ LCID lcid,
+ DISPID* pSrcArgNames,
+ VARIANT* pSrcArgs,
+ OBJECTHANDLE* aByrefStaticArrayBackupObjHandle,
+ int* pManagedMethodParamIndexMap,
+ VARIANT** aByrefArgOleVariant)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ // there are too many fields in pObjs, here I assume once one of them is
+ // protected, the whole structure is protected.
+ PRECONDITION(IsProtectedByGCFrame(&pObjs->MemberInfo));
+ }
+ CONTRACTL_END;
+
+ int iDestArg;
+ ManagedParamCleanupHolder cleanupHolder(pDispMemberInfo, pObjs);
+ BOOL bPropValIsByref = FALSE;
+ EnumMemberTypes MemberType;
+
+ Thread* pThread = GetThread();
+ AppDomain* pAppDomain = pThread->GetDomain();
+
+ SafeArrayHolder pSA(NULL);
+ VARIANT safeArrayVar;
+ HRESULT hr;
+
+ // Allocate the array of used flags.
+ BYTE *aArgUsedFlags = (BYTE*)_alloca(NumParams * sizeof(BYTE));
+ memset(aArgUsedFlags, 0, NumParams * sizeof(BYTE));
+
+ size_t cbByrefArgMngVariantIndex;
+ if (!ClrSafeInt<size_t>::multiply(sizeof(DWORD), NumArgs, cbByrefArgMngVariantIndex))
+ ThrowHR(COR_E_OVERFLOW);
+
+ DWORD *aByrefArgMngVariantIndex = (DWORD *)_alloca(cbByrefArgMngVariantIndex);
+
+
+ //
+ // Retrieve information required for the invoke call.
+ //
+
+ pObjs->OleAutBinder = DispatchInfo::GetOleAutBinder();
+
+
+ //
+ // Allocate the array of arguments
+ //
+
+ // Allocate the array that will contain the converted variants in the right order.
+ // If the invoke is for a PROPUT or a PROPPUTREF and we are going to call through
+ // invoke member then allocate the array one bigger to allow space for the property
+ // value.
+ int ArraySize = NumParams;
+ if (m_bInvokeUsingInvokeMember && (wFlags & (DISPATCH_PROPERTYPUT | DISPATCH_PROPERTYPUTREF)))
+ {
+ if (!ClrSafeInt<int>::addition(ArraySize, 1, ArraySize))
+ ThrowHR(COR_E_OVERFLOW);
+ }
+
+ pObjs->ParamArray = (PTRARRAYREF)AllocateObjectArray(ArraySize, g_pObjectClass);
+
+
+ //
+ // Convert the property set argument if the invoke is a PROPERTYPUT OR PROPERTYPUTREF.
+ //
+
+ if (wFlags & (DISPATCH_PROPERTYPUT | DISPATCH_PROPERTYPUTREF))
+ {
+ // Convert the variant.
+ VARIANT *pSrcOleVariant = RetrieveSrcVariant(&pdp->rgvarg[0]);
+ MarshalParamNativeToManaged(pDispMemberInfo, NumArgs, pSrcOleVariant, &pObjs->PropVal);
+
+ // Remember if the property value is byref or not.
+ bPropValIsByref = V_VT(pSrcOleVariant) & VT_BYREF;
+
+ // If the variant is a byref static array, then remember the property value.
+ if (IsVariantByrefStaticArray(pSrcOleVariant))
+ SetObjectReference(&pObjs->ByrefStaticArrayBackupPropVal, pObjs->PropVal, pAppDomain);
+ }
+
+
+ //
+ // Convert the named arguments.
+ //
+
+ if (!m_bInvokeUsingInvokeMember)
+ {
+ for (iSrcArg = 0; iSrcArg < NumNamedArgs; iSrcArg++)
+ {
+ // Determine the destination index.
+ iDestArg = pSrcArgNames[iSrcArg];
+
+ // Check for duplicate param DISPID's.
+ if (aArgUsedFlags[iDestArg] != 0)
+ COMPlusThrowHR(DISP_E_PARAMNOTFOUND);
+
+ // Convert the variant.
+ VARIANT *pSrcOleVariant = RetrieveSrcVariant(&pSrcArgs[iSrcArg]);
+ MarshalParamNativeToManaged(pDispMemberInfo, iDestArg, pSrcOleVariant, &pObjs->TmpObj);
+ pObjs->ParamArray->SetAt(iDestArg, pObjs->TmpObj);
+
+ // If the argument is byref then add it to the array of byref arguments.
+ if (V_VT(pSrcOleVariant) & VT_BYREF)
+ {
+ // Remember what arg this really is.
+ pManagedMethodParamIndexMap[NumByrefArgs] = iDestArg;
+
+ aByrefArgOleVariant[NumByrefArgs] = pSrcOleVariant;
+ aByrefArgMngVariantIndex[NumByrefArgs] = iDestArg;
+
+ // If the variant is a byref static array, then remember the objectref we
+ // converted the variant to.
+ if (IsVariantByrefStaticArray(pSrcOleVariant))
+ aByrefStaticArrayBackupObjHandle[NumByrefArgs] = pAppDomain->CreateHandle(pObjs->TmpObj);
+
+ NumByrefArgs++;
+ }
+
+ // Mark the slot the argument is in as occupied.
+ aArgUsedFlags[iDestArg] = 1;
+ }
+ }
+ else
+ {
+ for (iSrcArg = 0, iDestArg = 0; iSrcArg < NumNamedArgs; iSrcArg++, iDestArg++)
+ {
+ // Check for duplicate param DISPID's.
+ if (aArgUsedFlags[iDestArg] != 0)
+ COMPlusThrowHR(DISP_E_PARAMNOTFOUND);
+
+ // Convert the variant.
+ VARIANT *pSrcOleVariant = RetrieveSrcVariant(&pSrcArgs[iSrcArg]);
+ MarshalParamNativeToManaged(pDispMemberInfo, iDestArg, pSrcOleVariant, &pObjs->TmpObj);
+ pObjs->ParamArray->SetAt(iDestArg, pObjs->TmpObj);
+
+ // If the argument is byref then add it to the array of byref arguments.
+ if (V_VT(pSrcOleVariant) & VT_BYREF)
+ {
+ // Remember what arg this really is.
+ pManagedMethodParamIndexMap[NumByrefArgs] = iDestArg;
+
+ aByrefArgOleVariant[NumByrefArgs] = pSrcOleVariant;
+ aByrefArgMngVariantIndex[NumByrefArgs] = iDestArg;
+
+ // If the variant is a byref static array, then remember the objectref we
+ // converted the variant to.
+ if (IsVariantByrefStaticArray(pSrcOleVariant))
+ aByrefStaticArrayBackupObjHandle[NumByrefArgs] = pAppDomain->CreateHandle(pObjs->TmpObj);
+
+ NumByrefArgs++;
+ }
+
+ // Mark the slot the argument is in as occupied.
+ aArgUsedFlags[iDestArg] = 1;
+ }
+ }
+
+
+ //
+ // Fill in the positional arguments. These are copied in reverse order and we also
+ // need to skip the arguments already filled in by named arguments.
+ //
+ BOOL bLastParamOleVarArg = pDispMemberInfo && pDispMemberInfo->IsLastParamOleVarArg();
+ BOOL bByRefArg;
+
+ // We support VarArg by aligning with the behavior of params array in C#.
+ // Here are things we do for callers depends on the arguments it passes:
+ // a) NumArgs == NumParams -1:
+ // We generate a SAFEARRAY with 0 elements and pass the VARIANT
+ // wrapping it to the callee
+ // b) NumArgs == NumParams && the first argument is NOT safearray:
+ // Note that arguments are passed from right to left so that the first argument
+ // passed by caller should be mapped to the last parameter of the callee
+ // We generate a SAFEARRAY to wrap the argument and pass the VARIANT
+ // wrapping the SAFEARRAY to the callee
+ // c) NumArgs == NumParams && the first argument is safearray:
+ // We directly pass it to the callee. To compact with v2 behavior, we loose the
+ // conditions by checking if the VT of the safearray varaint is VT_ARRAY only
+ // d) NumArgs > NumParams:
+ // We generate a SAFEARRAY to wrap then and pass the VARIANT wrapping
+ // the SAFEARRAY to the callee
+ for (iSrcArg = NumArgs - 1, iDestArg = 0;
+ iSrcArg >= NumNamedArgs || (iDestArg == NumParams - 1 && bLastParamOleVarArg)/* for vararg case a) */;
+ iSrcArg--, iDestArg++)
+ {
+ // Skip the arguments already filled in by named args.
+ for (; aArgUsedFlags[iDestArg] != 0; iDestArg++);
+ _ASSERTE(iDestArg < NumParams);
+
+ // Convert the variant.
+ VARIANT *pSrcOleVariant = NULL;
+ VARIANT *pFrstVarargOleVariant = NULL;
+ BOOL bSrcOleVariantCached = FALSE;
+ bByRefArg = FALSE;
+ if (iDestArg == NumParams-1 && bLastParamOleVarArg)
+ {
+ // VarArg scenario
+ BOOL bSrcArgIsSafeArray = FALSE;
+ if (iSrcArg == NumNamedArgs)
+ {
+ pSrcOleVariant = RetrieveSrcVariant(&pSrcArgs[iSrcArg]);
+ bSrcOleVariantCached = TRUE;
+ if ((V_VT(pSrcOleVariant) == (VT_ARRAY | VT_VARIANT)) ||
+ (V_VT(pSrcOleVariant) == VT_ARRAY) // see the comments in case c) above
+ )
+ {
+ // vararg case c)
+ bSrcArgIsSafeArray = TRUE;
+ bByRefArg = V_VT(pSrcOleVariant) & VT_BYREF;
+ }
+ }
+
+ if (!bSrcArgIsSafeArray)
+ {
+ // vararg case a), b) and d)
+ // 1. Construct a safearray
+ LONG lSafeArrayArg = 0;
+ bByRefArg = FALSE;
+ pSA = SafeArrayCreateVector(VT_VARIANT, 0, iSrcArg - NumNamedArgs + 1);
+ if (pSA.GetValue() == NULL)
+ COMPlusThrowHR(E_OUTOFMEMORY);
+ V_VT(&safeArrayVar) = VT_VARIANT | VT_ARRAY;
+ V_ARRAY(&safeArrayVar) = pSA;
+
+ // 2. Put the remaining srcArg into the safearray
+ for (; iSrcArg >= NumNamedArgs; iSrcArg--, lSafeArrayArg++)
+ {
+ if (!bSrcOleVariantCached)
+ pSrcOleVariant = RetrieveSrcVariant(&pSrcArgs[iSrcArg]);
+ else
+ bSrcOleVariantCached = FALSE;
+ if (FAILED(hr = SafeArrayPutElement(pSA, &lSafeArrayArg, pSrcOleVariant)))
+ COMPlusThrowHR(hr);
+ // Handle the UnMarshal Scenario
+ if (lSafeArrayArg == 0)
+ pFrstVarargOleVariant = pSrcOleVariant;
+ // If any of the VARIANTS which are put into safearray is BYREF, we need marshal back it
+ bByRefArg |= V_VT(pSrcOleVariant) & VT_BYREF;
+ }
+
+ // 3. Adjust the pSrcOleVariant in order to marshal to the params array in managed side
+ pSrcOleVariant = &safeArrayVar;
+ }
+ }
+ else
+ {
+ pSrcOleVariant = RetrieveSrcVariant(&pSrcArgs[iSrcArg]);
+ bByRefArg = V_VT(pSrcOleVariant) & VT_BYREF;
+ }
+
+
+ MarshalParamNativeToManaged(pDispMemberInfo, iDestArg, pSrcOleVariant, &pObjs->TmpObj);
+ pObjs->ParamArray->SetAt(iDestArg, pObjs->TmpObj);
+
+ // If the argument is byref then add it to the array of byref arguments.
+ if (bByRefArg)
+ {
+ // Remember what arg this really is.
+ pManagedMethodParamIndexMap[NumByrefArgs] = iDestArg;
+
+ // Remember the original variant so that we can unmarshal it back
+ // Note that when pSA is set, pSrcOleVaraint is re-write so that we use the first argument
+ // of vararg instead
+ if (pSA != NULL)
+ aByrefArgOleVariant[NumByrefArgs] = pFrstVarargOleVariant;
+ else
+ aByrefArgOleVariant[NumByrefArgs] = pSrcOleVariant;
+
+ aByrefArgMngVariantIndex[NumByrefArgs] = iDestArg;
+
+ // If the variant is a byref static array, then remember the objectref we
+ // converted the variant to.
+ if (IsVariantByrefStaticArray(pSrcOleVariant))
+ aByrefStaticArrayBackupObjHandle[NumByrefArgs] = pAppDomain->CreateHandle(pObjs->TmpObj);
+
+ NumByrefArgs++;
+ }
+ }
+
+ // Set the source arg back to -1 to indicate we are finished converting args.
+ iSrcArg = -1;
+
+
+ //
+ // Fill in all the remaining arguments with Missing.Value.
+ //
+
+ for (; iDestArg < NumParams; iDestArg++)
+ {
+ if (aArgUsedFlags[iDestArg] == 0)
+ {
+ pObjs->ParamArray->SetAt(iDestArg, pAppDomain->GetMissingObject());
+ }
+ }
+
+
+ //
+ // Set up the binding flags to pass to reflection.
+ //
+
+ int BindingFlags = ConvertInvokeFlagsToBindingFlags(wFlags) | BINDER_OptionalParamBinding;
+
+
+ //
+ // Do the actual invocation on the member info.
+ //
+
+ if (!m_bInvokeUsingInvokeMember)
+ {
+ PREFIX_ASSUME(pDispMemberInfo != NULL);
+
+ if (pDispMemberInfo->IsCultureAware())
+ {
+ // If the method is culture aware, then set the specified culture on the thread.
+ GetCultureInfoForLCID(lcid, &pObjs->CultureInfo);
+ pObjs->OldCultureInfo = pThread->GetCulture(FALSE);
+ pThread->SetCultureId(lcid, FALSE);
+ }
+
+ // If the method has custom marshalers then we will need to call
+ // the clean up method on the objects. So we need to make a copy of the
+ // ParamArray since it might be changed by reflection if any of the
+ // parameters are byref.
+ if (pDispMemberInfo->RequiresManagedObjCleanup())
+ {
+ // Allocate the clean up array.
+ int CleanUpArrayArraySize = NumParams;
+ if (wFlags & (DISPATCH_PROPERTYPUT | DISPATCH_PROPERTYPUTREF))
+ {
+ if (!ClrSafeInt<int>::addition(CleanUpArrayArraySize, 1, CleanUpArrayArraySize))
+ ThrowHR(COR_E_OVERFLOW);
+ }
+ cleanupHolder.SetData((PTRARRAYREF)AllocateObjectArray(CleanUpArrayArraySize, g_pObjectClass), CleanUpArrayArraySize);
+ _ASSERTE(pObjs->CleanUpArray != NULL);
+
+ // Copy the parameters into the clean up array.
+ for (int i = 0; i < ArraySize; i++)
+ pObjs->CleanUpArray->SetAt(i, pObjs->ParamArray->GetAt(i));
+
+ // If this invoke is for a PROPUT or PROPPUTREF, then add the property object to
+ // the end of the clean up array.
+ if (wFlags & (DISPATCH_PROPERTYPUT | DISPATCH_PROPERTYPUTREF))
+ pObjs->CleanUpArray->SetAt(NumParams, pObjs->PropVal);
+ }
+
+ // Retrieve the member info object and the type of the member.
+ pObjs->MemberInfo = ObjectFromHandle(pDispMemberInfo->m_hndMemberInfo);
+ MemberType = pDispMemberInfo->GetMemberType();
+
+ // Determine whether the member has a link time security check. If so we
+ // need to emulate this (since the caller is obviously not jitted in this
+ // case). Only methods and properties can have a link time check.
+ MethodDesc *pMDforSecurity = NULL;
+
+ if (MemberType == Method)
+ {
+ MethodDescCallSite getMethodHandle(METHOD__METHOD_BASE__GET_METHODDESC, &pObjs->MemberInfo);
+ ARG_SLOT arg = ObjToArgSlot(pObjs->MemberInfo);
+ pMDforSecurity = (MethodDesc*) getMethodHandle.Call_RetLPVOID(&arg);
+ }
+ else if (MemberType == Property)
+ {
+ MethodDescCallSite getSetter(METHOD__PROPERTY__GET_SETTER, &pObjs->MemberInfo);
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(pObjs->MemberInfo),
+ BoolToArgSlot(false)
+ };
+ OBJECTREF method = getSetter.Call_RetOBJECTREF(args);
+ if (method == NULL)
+ {
+ MethodDescCallSite getGetter(METHOD__PROPERTY__GET_GETTER, &pObjs->MemberInfo);
+ ARG_SLOT args1[] =
+ {
+ ObjToArgSlot(pObjs->MemberInfo),
+ BoolToArgSlot(false)
+ };
+ method = getGetter.Call_RetOBJECTREF(args1);
+ }
+
+ if (method != NULL)
+ {
+ GCPROTECT_BEGIN(method)
+ MethodDescCallSite getMethodHandle(METHOD__METHOD_BASE__GET_METHODDESC, &method);
+ ARG_SLOT arg = ObjToArgSlot(method);
+ pMDforSecurity = (MethodDesc*) getMethodHandle.Call_RetLPVOID(&arg);
+ GCPROTECT_END();
+ }
+ }
+
+ if (pMDforSecurity)
+ Security::CheckLinkDemandAgainstAppDomain(pMDforSecurity);
+
+ switch (MemberType)
+ {
+ case Field:
+ {
+ // Make sure this invoke is actually for a property put or get.
+ if (wFlags & (DISPATCH_METHOD | DISPATCH_PROPERTYGET))
+ {
+ // Do some more validation now that we know the type of the invocation.
+ if (NumNamedArgs != 0)
+ COMPlusThrowHR(DISP_E_NONAMEDARGS);
+ if (NumArgs != 0)
+ COMPlusThrowHR(DISP_E_BADPARAMCOUNT);
+
+ // Retrieve the method descriptor that will be called on.
+ MethodDesc *pMD = GetFieldInfoMD(METHOD__FIELD__GET_VALUE, pObjs->MemberInfo->GetTypeHandle());
+ MethodDescCallSite getValue(pMD, &pObjs->MemberInfo);
+
+ // Prepare the arguments that will be passed to Invoke.
+ ARG_SLOT Args[] =
+ {
+ ObjToArgSlot(pObjs->MemberInfo),
+ ObjToArgSlot(pObjs->Target),
+ };
+
+ // Do the actual method invocation.
+ pObjs->RetVal = getValue.Call_RetOBJECTREF(Args);
+ }
+ else if (wFlags & (DISPATCH_PROPERTYPUT | DISPATCH_PROPERTYPUTREF))
+ {
+ // Do some more validation now that we know the type of the invocation.
+ if (NumArgs != 0)
+ COMPlusThrowHR(DISP_E_BADPARAMCOUNT);
+ if (NumNamedArgs != 0)
+ COMPlusThrowHR(DISP_E_NONAMEDARGS);
+
+ // Retrieve the method descriptor that will be called on.
+ MethodDesc *pMD = GetFieldInfoMD(METHOD__FIELD__SET_VALUE, pObjs->MemberInfo->GetTypeHandle());
+ MethodDescCallSite setValue(pMD, &pObjs->MemberInfo);
+
+ // Prepare the arguments that will be passed to Invoke.
+ ARG_SLOT Args[] =
+ {
+ ObjToArgSlot(pObjs->MemberInfo),
+ ObjToArgSlot(pObjs->Target),
+ ObjToArgSlot(pObjs->PropVal),
+ (ARG_SLOT) BindingFlags,
+ ObjToArgSlot(pObjs->OleAutBinder),
+ ObjToArgSlot(pObjs->CultureInfo),
+ };
+
+ // Do the actual method invocation.
+ setValue.Call(Args);
+ }
+ else
+ {
+ COMPlusThrowHR(DISP_E_MEMBERNOTFOUND);
+ }
+
+ break;
+ }
+
+ case Property:
+ {
+ // Make sure this invoke is actually for a property put or get.
+ if (wFlags & (DISPATCH_METHOD | DISPATCH_PROPERTYGET))
+ {
+ if (!IsPropertyAccessorVisible(false, &pObjs->MemberInfo))
+ COMPlusThrowHR(DISP_E_MEMBERNOTFOUND);
+
+ // Retrieve the method descriptor that will be called on.
+ MethodDesc *pMD = GetPropertyInfoMD(METHOD__PROPERTY__GET_VALUE, pObjs->MemberInfo->GetTypeHandle());
+ MethodDescCallSite getValue(pMD, &pObjs->MemberInfo);
+
+ // Prepare the arguments that will be passed to GetValue().
+ ARG_SLOT Args[] =
+ {
+ ObjToArgSlot(pObjs->MemberInfo),
+ ObjToArgSlot(pObjs->Target),
+ (ARG_SLOT) BindingFlags,
+ ObjToArgSlot(pObjs->OleAutBinder),
+ ObjToArgSlot(pObjs->ParamArray),
+ ObjToArgSlot(pObjs->CultureInfo),
+ };
+
+ // Do the actual method invocation.
+ pObjs->RetVal = getValue.Call_RetOBJECTREF(Args);
+ }
+ else if (wFlags & (DISPATCH_PROPERTYPUT | DISPATCH_PROPERTYPUTREF))
+ {
+ if (!IsPropertyAccessorVisible(true, &pObjs->MemberInfo))
+ COMPlusThrowHR(DISP_E_MEMBERNOTFOUND);
+
+ // Retrieve the method descriptor that will be called on.
+ MethodDesc *pMD = GetPropertyInfoMD(METHOD__PROPERTY__SET_VALUE, pObjs->MemberInfo->GetTypeHandle());
+ MethodDescCallSite setValue(pMD, &pObjs->MemberInfo);
+
+ // Prepare the arguments that will be passed to SetValue().
+ ARG_SLOT Args[] =
+ {
+ ObjToArgSlot(pObjs->MemberInfo),
+ ObjToArgSlot(pObjs->Target),
+ ObjToArgSlot(pObjs->PropVal),
+ (ARG_SLOT) BindingFlags,
+ ObjToArgSlot(pObjs->OleAutBinder),
+ ObjToArgSlot(pObjs->ParamArray),
+ ObjToArgSlot(pObjs->CultureInfo),
+ };
+
+ // Do the actual method invocation.
+ setValue.Call(Args);
+ }
+ else
+ {
+ COMPlusThrowHR(DISP_E_MEMBERNOTFOUND);
+ }
+
+ break;
+ }
+
+ case Method:
+ {
+ // Make sure this invoke is actually for a method. We also allow
+ // prop gets since it is harmless and it allows the user a bit
+ // more freedom.
+ if (!(wFlags & (DISPATCH_METHOD | DISPATCH_PROPERTYGET)))
+ COMPlusThrowHR(DISP_E_MEMBERNOTFOUND);
+
+ // Retrieve the method descriptor that will be called on.
+ MethodDesc *pMD = GetMethodInfoMD(METHOD__METHOD__INVOKE, pObjs->MemberInfo->GetTypeHandle());
+ MethodDescCallSite invoke(pMD, &pObjs->MemberInfo);
+
+ // Prepare the arguments that will be passed to Invoke.
+ ARG_SLOT Args[] =
+ {
+ ObjToArgSlot(pObjs->MemberInfo),
+ ObjToArgSlot(pObjs->Target),
+ (ARG_SLOT) BindingFlags,
+ ObjToArgSlot(pObjs->OleAutBinder),
+ ObjToArgSlot(pObjs->ParamArray),
+ ObjToArgSlot(pObjs->CultureInfo),
+ };
+
+ // Do the actual method invocation.
+ pObjs->RetVal = invoke.Call_RetOBJECTREF(Args);
+ break;
+ }
+
+ default:
+ {
+ COMPlusThrowHR(E_UNEXPECTED);
+ }
+ }
+ }
+ else
+ {
+ // Convert the LCID into a CultureInfo.
+ GetCultureInfoForLCID(lcid, &pObjs->CultureInfo);
+
+ pObjs->ReflectionObj = GetReflectionObject();
+
+ // Retrieve the method descriptor that will be called on.
+ MethodDesc *pMD = GetInvokeMemberMD();
+ MethodDescCallSite invokeMember(pMD, &pObjs->ReflectionObj);
+
+ // Allocate the string that will contain the name of the member.
+ if (!pDispMemberInfo)
+ {
+ WCHAR strTmp[64];
+ _snwprintf_s(strTmp, NumItems(strTmp), _TRUNCATE, DISPID_NAME_FORMAT_STRING, id);
+ pObjs->MemberName = (OBJECTREF)StringObject::NewString(strTmp);
+ }
+ else
+ {
+ pObjs->MemberName = (OBJECTREF)StringObject::NewString(pDispMemberInfo->m_strName.GetUnicode());
+ }
+
+ // If there are named arguments, then set up the array of named arguments
+ // to pass to InvokeMember.
+ if (NumNamedArgs > 0)
+ SetUpNamedParamArray(pDispMemberInfo, pSrcArgNames, NumNamedArgs, &pObjs->NamedArgArray);
+
+ // If this is a PROPUT or a PROPPUTREF then we need to add the value
+ // being set as the last argument in the argument array.
+ if (wFlags & (DISPATCH_PROPERTYPUT | DISPATCH_PROPERTYPUTREF))
+ pObjs->ParamArray->SetAt(NumParams, pObjs->PropVal);
+
+ // Prepare the arguments that will be passed to Invoke.
+ ARG_SLOT Args[] =
+ {
+ ObjToArgSlot(pObjs->ReflectionObj),
+ ObjToArgSlot(pObjs->MemberName),
+ (ARG_SLOT) BindingFlags,
+ ObjToArgSlot(pObjs->OleAutBinder),
+ ObjToArgSlot(pObjs->Target),
+ ObjToArgSlot(pObjs->ParamArray),
+ ObjToArgSlot(NULL), // @TODO(DM): Look into setting the byref modifiers.
+ ObjToArgSlot(pObjs->CultureInfo),
+ ObjToArgSlot(pObjs->NamedArgArray),
+ };
+
+ // Do the actual method invocation.
+ pObjs->RetVal = invokeMember.Call_RetOBJECTREF(Args);
+ }
+
+
+ //
+ // Convert the return value and the byref arguments.
+ //
+
+ // If the property value is byref then convert it back.
+ if (bPropValIsByref)
+ MarshalParamManagedToNativeRef(pDispMemberInfo, NumArgs, &pObjs->PropVal, &pObjs->ByrefStaticArrayBackupPropVal, &pdp->rgvarg[0]);
+
+ // Convert all the ByRef arguments back.
+ for (int i = 0; i < NumByrefArgs; i++)
+ {
+ // Get the real parameter index for this arg.
+ int iParamIndex = pManagedMethodParamIndexMap[i];
+
+ if (!pDispMemberInfo || m_bInvokeUsingInvokeMember || !pDispMemberInfo->IsParamInOnly(iParamIndex))
+ {
+ pObjs->TmpObj = pObjs->ParamArray->GetAt(aByrefArgMngVariantIndex[i]);
+ if (pSA != NULL && iParamIndex == NumParams -1)
+ {
+ // VarArg scenario
+ // Here we only unmarshal the object whose corresponding VARIANT is VarArg
+ OleVariant::MarshalVariantArrayComToOle((BASEARRAYREF*)&pObjs->TmpObj, (void *)(aByrefArgOleVariant[i]), NULL, TRUE, FALSE, TRUE, TRUE, -1);
+ }
+ else
+ {
+ MarshalParamManagedToNativeRef(pDispMemberInfo, iParamIndex, &pObjs->TmpObj, (OBJECTREF*)aByrefStaticArrayBackupObjHandle[i], aByrefArgOleVariant[i]);
+ }
+ }
+
+ if (aByrefStaticArrayBackupObjHandle[i])
+ {
+ DestroyHandle(aByrefStaticArrayBackupObjHandle[i]);
+ aByrefStaticArrayBackupObjHandle[i] = NULL;
+ }
+ }
+
+ // Convert the return COM+ object to an OLE variant.
+ if (pVarRes)
+ MarshalReturnValueManagedToNative(pDispMemberInfo, &pObjs->RetVal, pVarRes);
+}
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+void DispatchInfo::InvokeMemberDebuggerWrapper(
+ DispatchMemberInfo* pDispMemberInfo,
+ InvokeObjects* pObjs,
+ int NumParams,
+ int NumArgs,
+ int NumNamedArgs,
+ int& NumByrefArgs,
+ int& iSrcArg,
+ DISPID id,
+ DISPPARAMS* pdp,
+ VARIANT* pVarRes,
+ WORD wFlags,
+ LCID lcid,
+ DISPID* pSrcArgNames,
+ VARIANT* pSrcArgs,
+ OBJECTHANDLE* aByrefStaticArrayBackupObjHandle,
+ int* pManagedMethodParamIndexMap,
+ VARIANT** aByrefArgOleVariant,
+ Frame * pFrame)
+
+{
+ // Use static contracts b/c we have SEH.
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+
+ // @todo - we have a PAL_TRY/PAL_EXCEPT here as a general (cross-platform) way to get a 1st-pass
+ // filter. If that's bad perf, we could inline an FS:0 handler for x86-only; and then inline
+ // both this wrapper and the main body.
+
+ struct Param : public NotifyOfCHFFilterWrapperParam
+ {
+ DispatchInfo* pThis;
+ DispatchMemberInfo* pDispMemberInfo;
+ InvokeObjects* pObjs;
+ int NumParams;
+ int NumArgs;
+ int NumNamedArgs;
+ int& NumByrefArgs;
+ int& iSrcArg;
+ DISPID id;
+ DISPPARAMS* pdp;
+ VARIANT* pVarRes;
+ WORD wFlags;
+ LCID lcid;
+ DISPID* pSrcArgNames;
+ VARIANT* pSrcArgs;
+ OBJECTHANDLE* aByrefStaticArrayBackupObjHandle;
+ int* pManagedMethodParamIndexMap;
+ VARIANT** aByrefArgOleVariant;
+
+ Param(int& _NumByrefArgs, int& _iSrcArg)
+ : NumByrefArgs(_NumByrefArgs), iSrcArg(_iSrcArg)
+ {}
+ } param(NumByrefArgs, iSrcArg);
+
+ param.pFrame = GetThread()->GetFrame(); // Inherited from NotifyOfCHFFilterWrapperParam
+ param.pThis = this;
+ param.pDispMemberInfo = pDispMemberInfo;
+ param.pObjs = pObjs;
+ param.NumParams = NumParams;
+ param.NumArgs = NumArgs;
+ param.NumNamedArgs = NumNamedArgs;
+ //param.NumByrefArgs = NumByrefArgs;
+ //param.iSrcArg = iSrcArg;
+ param.id = id;
+ param.pdp = pdp;
+ param.pVarRes = pVarRes;
+ param.wFlags = wFlags;
+ param.lcid = lcid;
+ param.pSrcArgNames = pSrcArgNames;
+ param.pSrcArgs = pSrcArgs;
+ param.aByrefStaticArrayBackupObjHandle = aByrefStaticArrayBackupObjHandle;
+ param.pManagedMethodParamIndexMap = pManagedMethodParamIndexMap;
+ param.aByrefArgOleVariant = aByrefArgOleVariant;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ pParam->pThis->InvokeMemberWorker(pParam->pDispMemberInfo,
+ pParam->pObjs,
+ pParam->NumParams,
+ pParam->NumArgs,
+ pParam->NumNamedArgs,
+ pParam->NumByrefArgs,
+ pParam->iSrcArg,
+ pParam->id,
+ pParam->pdp,
+ pParam->pVarRes,
+ pParam->wFlags,
+ pParam->lcid,
+ pParam->pSrcArgNames,
+ pParam->pSrcArgs,
+ pParam->aByrefStaticArrayBackupObjHandle,
+ pParam->pManagedMethodParamIndexMap,
+ pParam->aByrefArgOleVariant);
+ }
+ PAL_EXCEPT_FILTER(NotifyOfCHFFilterWrapper)
+ {
+ // Should never reach here b/c handler should always continue search.
+ _ASSERTE(false);
+ }
+ PAL_ENDTRY
+}
+
+// Helper method that invokes the member with the specified DISPID.
+HRESULT DispatchInfo::InvokeMember(SimpleComCallWrapper *pSimpleWrap, DISPID id, LCID lcid, WORD wFlags, DISPPARAMS *pdp, VARIANT *pVarRes, EXCEPINFO *pei, IServiceProvider *pspCaller, unsigned int *puArgErr)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pSimpleWrap));
+ PRECONDITION(CheckPointer(pdp, NULL_OK));
+ PRECONDITION(CheckPointer(pVarRes, NULL_OK));
+ PRECONDITION(CheckPointer(pei, NULL_OK));
+ PRECONDITION(CheckPointer(pspCaller, NULL_OK));
+ PRECONDITION(CheckPointer(puArgErr, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ int iSrcArg = -1;
+ int iBaseErrorArg = 0;
+ int NumArgs;
+ int NumNamedArgs;
+ int NumParams;
+ InvokeObjects Objs;
+ DISPID *pSrcArgNames = NULL;
+ VARIANT *pSrcArgs = NULL;
+ ULONG_PTR ulActCtxCookie = 0;
+ Thread *pThread = GetThread();
+
+ //
+ // Validate the arguments.
+ //
+
+ if (!pdp)
+ return E_POINTER;
+ if (!pdp->rgvarg && (pdp->cArgs > 0))
+ return E_INVALIDARG;
+ if (!pdp->rgdispidNamedArgs && (pdp->cNamedArgs > 0))
+ return E_INVALIDARG;
+ if (pdp->cNamedArgs > pdp->cArgs)
+ return E_INVALIDARG;
+ if ((int)pdp->cArgs < 0 || (int)pdp->cNamedArgs < 0)
+ return E_INVALIDARG;
+
+
+ //
+ // Clear the out arguments before we start.
+ //
+
+ if (pVarRes)
+ SafeVariantClear(pVarRes);
+ if (puArgErr)
+ *puArgErr = -1;
+
+
+ //
+ // Convert the default LCID's to actual LCID's.
+ //
+
+ if(lcid == LOCALE_SYSTEM_DEFAULT || lcid == 0)
+ lcid = GetSystemDefaultLCID();
+
+ if(lcid == LOCALE_USER_DEFAULT)
+ lcid = GetUserDefaultLCID();
+
+ //
+ // Set the value of the variables we use internally.
+ //
+
+ NumArgs = pdp->cArgs;
+ NumNamedArgs = pdp->cNamedArgs;
+ memset(&Objs, 0, sizeof(InvokeObjects));
+
+ if (wFlags & (DISPATCH_PROPERTYPUT | DISPATCH_PROPERTYPUTREF))
+ {
+ // Since this invoke is for a property put or put ref we need to add 1 to
+ // the iSrcArg to get the argument that is in error.
+ iBaseErrorArg = 1;
+
+ if (NumArgs < 1)
+ {
+ return DISP_E_BADPARAMCOUNT;
+ }
+ else
+ {
+ NumArgs--;
+ pSrcArgs = &pdp->rgvarg[1];
+ }
+
+ if (NumNamedArgs < 1)
+ {
+ if (NumNamedArgs < 0)
+ return DISP_E_BADPARAMCOUNT;
+
+ // Verify if we really want to do this or return E_INVALIDARG instead.
+ _ASSERTE(NumNamedArgs == 0);
+ _ASSERTE(pSrcArgNames == NULL);
+ }
+ else
+ {
+ NumNamedArgs--;
+ pSrcArgNames = &pdp->rgdispidNamedArgs[1];
+ }
+ }
+ else
+ {
+ pSrcArgs = pdp->rgvarg;
+ pSrcArgNames = pdp->rgdispidNamedArgs;
+ }
+
+ //
+ // Do a lookup in the hashtable to find the DispatchMemberInfo for the DISPID.
+ //
+
+ DispatchMemberInfo *pDispMemberInfo = FindMember(id);
+ if (!pDispMemberInfo || !(*((Object **)pDispMemberInfo->m_hndMemberInfo)))
+ {
+ pDispMemberInfo = NULL;
+ }
+ else if (pDispMemberInfo->IsNeutered())
+ {
+ COMPlusThrow(kInvalidOperationException);
+ }
+
+ //
+ // If the member is not known then make sure that the DispatchInfo we have
+ // supports unknown members.
+ //
+
+ if (m_bInvokeUsingInvokeMember)
+ {
+ // Since we do not have any information regarding the member then we
+ // must assume the number of formal parameters matches the number of args.
+ NumParams = NumArgs;
+ }
+ else
+ {
+ // If we haven't found the member then fail the invoke call.
+ if (!pDispMemberInfo)
+ return DISP_E_MEMBERNOTFOUND;
+
+ // DISPATCH_CONSTRUCT only works when calling InvokeMember.
+ if (wFlags & DISPATCH_CONSTRUCT)
+ return E_INVALIDARG;
+
+ // We have the member so retrieve the number of formal parameters.
+ NumParams = pDispMemberInfo->GetNumParameters();
+
+ if (pDispMemberInfo->IsLastParamOleVarArg())
+ {
+ // named args aren't allowed in a vararg function,
+ // unless it's a lone DISPID_PROPERTYPUT (note that we already decrement
+ // the value of NumNamedArgs for DISPID_PROPERTYPUT so that no special
+ // check needed be done here for it
+ // the logic is borrowed from the one in OLEAUT32!CTypeInfo2::Invoke
+ if (NumNamedArgs > 0)
+ return DISP_E_NONAMEDARGS;
+ }
+ else
+ {
+ // Make sure the number of arguments does not exceed the number of parameters.
+ if(NumArgs > NumParams)
+ return DISP_E_BADPARAMCOUNT;
+ }
+
+ // Validate that all the named arguments are known.
+ for (iSrcArg = 0; iSrcArg < NumNamedArgs; iSrcArg++)
+ {
+ // There are some members we do not know about so we will call InvokeMember()
+ // passing in the DISPID's directly so the caller can try to handle them.
+ if (pSrcArgNames[iSrcArg] < 0 || pSrcArgNames[iSrcArg] >= NumParams)
+ return DISP_E_MEMBERNOTFOUND;
+ }
+ }
+
+ OBJECTREF pThrowable = NULL;
+
+ //
+ // The member is present so we need to convert the arguments and then do the
+ // actual invocation.
+ //
+ GCPROTECT_BEGIN(pThrowable);
+ GCPROTECT_BEGIN(Objs);
+ {
+ //
+ // Allocate information used by the method.
+ //
+
+ int NumByrefArgs = 0;
+
+ // Allocate the array of backup byref static array objects.
+ size_t cbStaticArrayBackupObjHandle;
+ if (!ClrSafeInt<size_t>::multiply(sizeof(OBJECTHANDLE *), NumArgs, cbStaticArrayBackupObjHandle))
+ ThrowHR(COR_E_OVERFLOW);
+
+ OBJECTHANDLE *aByrefStaticArrayBackupObjHandle = (OBJECTHANDLE *)_alloca(cbStaticArrayBackupObjHandle);
+ memset(aByrefStaticArrayBackupObjHandle, 0, cbStaticArrayBackupObjHandle);
+
+ // Allocate the array that maps method params to their indices.
+ size_t cbManagedMethodParamIndexMap;
+ if (!ClrSafeInt<size_t>::multiply(sizeof(int), NumArgs, cbManagedMethodParamIndexMap))
+ ThrowHR(COR_E_OVERFLOW);
+
+ int *pManagedMethodParamIndexMap = (int *)_alloca(cbManagedMethodParamIndexMap);
+
+ // Allocate the array of byref objects.
+ size_t cbByrefArgOleVariant;
+ if (!ClrSafeInt<size_t>::multiply(sizeof(VARIANT *), NumArgs, cbByrefArgOleVariant))
+ ThrowHR(COR_E_OVERFLOW);
+
+ VARIANT **aByrefArgOleVariant = (VARIANT **)_alloca(cbByrefArgOleVariant);
+
+ Objs.Target = pSimpleWrap->GetObjectRef();
+
+ //
+ // Invoke the method.
+ //
+
+ // The sole purpose of having this frame is to tell the debugger that we have a catch handler here
+ // which may swallow managed exceptions. The debugger needs this in order to send a
+ // CatchHandlerFound (CHF) notification.
+ FrameWithCookie<DebuggerU2MCatchHandlerFrame> catchFrame;
+ EX_TRY
+ {
+ InvokeMemberDebuggerWrapper(pDispMemberInfo,
+ &Objs,
+ NumParams,
+ NumArgs,
+ NumNamedArgs,
+ NumByrefArgs,
+ iSrcArg,
+ id,
+ pdp,
+ pVarRes,
+ wFlags,
+ lcid,
+ pSrcArgNames,
+ pSrcArgs,
+ aByrefStaticArrayBackupObjHandle,
+ pManagedMethodParamIndexMap,
+ aByrefArgOleVariant,
+ &catchFrame);
+ }
+ EX_CATCH
+ {
+ pThrowable = GET_THROWABLE();
+
+ // RethrowCorruptingExceptionsEx, in EX_END_CATCH below, will ensure that CEs are rethrown.
+ }
+ EX_END_CATCH(RethrowCorruptingExceptionsEx(!CEHelper::CanIDispatchTargetHandleException()))
+ catchFrame.Pop();
+
+ if (pThrowable != NULL)
+ {
+ // Do cleanup - make sure that return value and outgoing arguments are cleared
+ if (pVarRes != NULL)
+ SafeVariantClear(pVarRes);
+
+ for (int i = 0; i < NumByrefArgs; i++)
+ {
+ if (!pDispMemberInfo || m_bInvokeUsingInvokeMember || !pDispMemberInfo->IsParamInOnly(i))
+ {
+ // Out and in/out byref arguments are outgoing and should be cleared
+ CleanUpNativeParam(pDispMemberInfo, pManagedMethodParamIndexMap[i], (OBJECTREF *)aByrefStaticArrayBackupObjHandle[i], aByrefArgOleVariant[i]);
+ }
+
+ // Destroy all the handles we allocated for the byref static safe array's.
+ if (aByrefStaticArrayBackupObjHandle[i] != NULL)
+ {
+ DestroyHandle(aByrefStaticArrayBackupObjHandle[i]);
+ aByrefStaticArrayBackupObjHandle[i] = NULL;
+ }
+ }
+
+ // Do HR conversion.
+ hr = SetupErrorInfo(pThrowable);
+ if (hr == COR_E_TARGETINVOCATION)
+ {
+ hr = DISP_E_EXCEPTION;
+ if (pei)
+ {
+ // Retrieve the exception iformation.
+ GetExcepInfoForInvocationExcep(pThrowable, pei);
+
+ // Clear the IErrorInfo on the current thread since it does contains
+ // information on the TargetInvocationException which conflicts with
+ // the information in the returned EXCEPINFO.
+ IErrorInfo *pErrInfo = NULL;
+ HRESULT hr2 = SafeGetErrorInfo(&pErrInfo);
+ _ASSERTE(hr2 == S_OK);
+ SafeRelease(pErrInfo);
+ }
+ }
+ else if (hr == COR_E_OVERFLOW)
+ {
+ hr = DISP_E_OVERFLOW;
+ if (iSrcArg != -1)
+ {
+ if (puArgErr)
+ *puArgErr = iSrcArg + iBaseErrorArg;
+ }
+ }
+ else if (hr == COR_E_INVALIDOLEVARIANTTYPE)
+ {
+ hr = DISP_E_BADVARTYPE;
+ if (iSrcArg != -1)
+ {
+ if (puArgErr)
+ *puArgErr = iSrcArg + iBaseErrorArg;
+ }
+ }
+ else if (hr == COR_E_ARGUMENT)
+ {
+ hr = E_INVALIDARG;
+ if (iSrcArg != -1)
+ {
+ if (puArgErr)
+ *puArgErr = iSrcArg + iBaseErrorArg;
+ }
+ }
+ else if (hr == COR_E_SAFEARRAYTYPEMISMATCH)
+ {
+ hr = DISP_E_TYPEMISMATCH;
+ if (iSrcArg != -1)
+ {
+ if (puArgErr)
+ *puArgErr = iSrcArg + iBaseErrorArg;
+ }
+ }
+ else if (hr == COR_E_MISSINGMEMBER || hr == COR_E_MISSINGMETHOD)
+ {
+ hr = DISP_E_MEMBERNOTFOUND;
+
+ // This exception should never occur while we are marshaling arguments.
+ _ASSERTE(iSrcArg == -1);
+ }
+ }
+
+ // If the culture was changed then restore it to the old culture.
+ if (Objs.OldCultureInfo != NULL)
+ pThread->SetCulture(&Objs.OldCultureInfo, FALSE);
+ }
+ GCPROTECT_END();
+ GCPROTECT_END();
+ return hr;
+}
+
+void DispatchInfo::DestroyMemberInfoHandles()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ DispatchMemberInfo* pCurrMember = m_pFirstMemberInfo;
+ while (pCurrMember)
+ {
+ // Destroy the handle
+ DestroyHandle(pCurrMember->m_hndMemberInfo);
+ pCurrMember->m_hndMemberInfo = NULL;
+ // Process the next member.
+ pCurrMember = pCurrMember->m_pNext;
+ }
+}
+
+// Parameter marshaling helpers.
+void DispatchInfo::MarshalParamNativeToManaged(DispatchMemberInfo *pMemberInfo, int iParam, VARIANT *pSrcVar, OBJECTREF *pDestObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (pMemberInfo && !m_bInvokeUsingInvokeMember)
+ pMemberInfo->MarshalParamNativeToManaged(iParam, pSrcVar, pDestObj);
+ else
+ OleVariant::MarshalObjectForOleVariant(pSrcVar, pDestObj);
+}
+
+void DispatchInfo::MarshalParamManagedToNativeRef(DispatchMemberInfo *pMemberInfo, int iParam, OBJECTREF *pSrcObj, OBJECTREF *pBackupStaticArray, VARIANT *pRefVar)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pMemberInfo, NULL_OK));
+ PRECONDITION(pSrcObj != NULL);
+ PRECONDITION(CheckPointer(pRefVar));
+ }
+ CONTRACTL_END;
+
+ if (pBackupStaticArray && (*pBackupStaticArray != NULL))
+ {
+ // The contents of a static array can change, but not the array itself. If
+ // the array has changed, then throw an exception.
+ if (*pSrcObj != *pBackupStaticArray)
+ COMPlusThrow(kInvalidOperationException, IDS_INVALID_REDIM);
+
+ // Retrieve the element VARTYPE and method table.
+ VARTYPE ElementVt = V_VT(pRefVar) & ~(VT_BYREF | VT_ARRAY);
+ MethodTable *pElementMT = (*(BASEARRAYREF *)pSrcObj)->GetArrayElementTypeHandle().GetMethodTable();
+
+ // Convert the contents of the managed array into the original SAFEARRAY.
+ OleVariant::MarshalSafeArrayForArrayRef((BASEARRAYREF *)pSrcObj, *V_ARRAYREF(pRefVar), ElementVt, pElementMT);
+ }
+ else
+{
+ if (pMemberInfo && !m_bInvokeUsingInvokeMember)
+ pMemberInfo->MarshalParamManagedToNativeRef(iParam, pSrcObj, pRefVar);
+ else
+ OleVariant::MarshalOleRefVariantForObject(pSrcObj, pRefVar);
+}
+}
+
+void DispatchInfo::MarshalReturnValueManagedToNative(DispatchMemberInfo *pMemberInfo, OBJECTREF *pSrcObj, VARIANT *pDestVar)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (pMemberInfo && !m_bInvokeUsingInvokeMember)
+ pMemberInfo->MarshalReturnValueManagedToNative(pSrcObj, pDestVar);
+ else
+ OleVariant::MarshalOleVariantForObject(pSrcObj, pDestVar);
+}
+
+void DispatchInfo::CleanUpNativeParam(DispatchMemberInfo *pDispMemberInfo, int iParamIndex, OBJECTREF *pBackupStaticArray, VARIANT *pArgVariant)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(pArgVariant != NULL);
+ }
+ CONTRACTL_END;
+
+ EX_TRY
+ {
+ switch (V_VT(pArgVariant) & ~VT_BYREF)
+ {
+ case VT_I1: case VT_I2: case VT_I4: case VT_I8:
+ case VT_UI1: case VT_UI2: case VT_UI4: case VT_UI8:
+ case VT_INT: case VT_UINT: case VT_PTR:
+ case VT_R4: case VT_R8: case VT_BOOL:
+ case VT_CY: case VT_DATE:
+ case VT_ERROR: case VT_HRESULT:
+ case VT_DECIMAL:
+ {
+ // the argument type is a value type - overwrite it with zeros
+ UINT uSize = OleVariant::GetElementSizeForVarType(V_VT(pArgVariant) & ~VT_BYREF, NULL);
+ FillMemory(V_BYREF(pArgVariant), uSize, 0);
+ break;
+ }
+
+ default:
+ {
+ // marshal managed null into the VARIANT which works for reference types
+ OBJECTREF Null = NULL;
+
+ GCPROTECT_BEGIN(Null); // the local stays NULL, this is just to satisfy contracts
+ MarshalParamManagedToNativeRef(pDispMemberInfo, iParamIndex, &Null, pBackupStaticArray, pArgVariant);
+ GCPROTECT_END();
+ }
+ }
+ }
+ EX_CATCH
+ {
+ // if the argument was totally corrupted and cleanup failed, just swallow it and continue
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+}
+
+void DispatchInfo::SetUpNamedParamArray(DispatchMemberInfo *pMemberInfo, DISPID *pSrcArgNames, int NumNamedArgs, PTRARRAYREF *pNamedParamArray)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pMemberInfo, NULL_OK));
+ PRECONDITION(CheckPointer(pSrcArgNames));
+ PRECONDITION(pNamedParamArray != NULL);
+ }
+ CONTRACTL_END;
+
+ PTRARRAYREF ParamArray = NULL;
+ int NumParams = pMemberInfo ? pMemberInfo->GetNumParameters() : 0;
+ int iSrcArg;
+ int iDestArg;
+ BOOL bGotParams = FALSE;
+
+ GCPROTECT_BEGIN(ParamArray)
+ {
+ // Allocate the array of named parameters.
+ *pNamedParamArray = (PTRARRAYREF)AllocateObjectArray(NumNamedArgs, g_pObjectClass);
+ ParamArray = pMemberInfo ? pMemberInfo->GetParameters() : NULL;
+ int numArrayComponents = (pMemberInfo && ParamArray != NULL)? (int)ParamArray->GetNumComponents() : 0;
+
+ // Convert all the named parameters from DISPID's to string.
+ for (iSrcArg = 0, iDestArg = 0; iSrcArg < NumNamedArgs; iSrcArg++, iDestArg++)
+ {
+ BOOL bParamNameSet = FALSE;
+
+ // Check to see if the DISPID is one that we can map to a parameter name.
+ if (pMemberInfo && pSrcArgNames[iSrcArg] >= 0 && pSrcArgNames[iSrcArg] < numArrayComponents)
+ {
+ // The DISPID is one that we assigned, map it back to its name.
+
+ // If we managed to get the parameters and if the current ID maps
+ // to an entry in the array.
+ if (ParamArray != NULL && numArrayComponents > pSrcArgNames[iSrcArg])
+ {
+ OBJECTREF ParamInfoObj = ParamArray->GetAt(pSrcArgNames[iSrcArg]);
+ GCPROTECT_BEGIN(ParamInfoObj)
+ {
+ // Retrieve the MD to use to retrieve the name of the parameter.
+ MethodDesc *pGetParamNameMD = MemberLoader::FindPropertyMethod(ParamInfoObj->GetMethodTable(), PARAMETERINFO_NAME_PROP, PropertyGet);
+ _ASSERTE(pGetParamNameMD && "Unable to find getter method for property ParameterInfo::Name");
+ MethodDescCallSite getParamName(pGetParamNameMD, &ParamInfoObj);
+
+ // Retrieve the name of the parameter.
+ ARG_SLOT GetNameArgs[] =
+ {
+ ObjToArgSlot(ParamInfoObj)
+ };
+
+ STRINGREF MemberNameObj = getParamName.Call_RetSTRINGREF(GetNameArgs);
+
+ // If we got a valid name back then use it as the named parameter.
+ if (MemberNameObj != NULL)
+ {
+ (*pNamedParamArray)->SetAt(iDestArg, (OBJECTREF)MemberNameObj);
+ bParamNameSet = TRUE;
+ }
+ }
+ GCPROTECT_END();
+ }
+ }
+
+ // If we haven't set the param name yet, then set it to [DISP=XXXX].
+ if (!bParamNameSet)
+ {
+ WCHAR wszTmp[64];
+
+ _snwprintf_s(wszTmp, NumItems(wszTmp), _TRUNCATE, DISPID_NAME_FORMAT_STRING, pSrcArgNames[iSrcArg]);
+ STRINGREF strTmp = StringObject::NewString(wszTmp);
+ (*pNamedParamArray)->SetAt(iDestArg, (OBJECTREF)strTmp);
+ }
+ }
+ }
+ GCPROTECT_END();
+}
+
+VARIANT *DispatchInfo::RetrieveSrcVariant(VARIANT *pDispParamsVariant)
+{
+ CONTRACT (VARIANT*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pDispParamsVariant));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ // For VB6 compatibility reasons, if the VARIANT is a VT_BYREF | VT_VARIANT that
+ // contains another VARIANT with VT_BYREF | VT_VARIANT, then we need to extract the
+ // inner VARIANT and use it instead of the outer one. Note that if the inner VARIANT
+ // is VT_BYREF | VT_VARIANT | VT_ARRAY, it will pass the below test too.
+ if (V_VT(pDispParamsVariant) == (VT_VARIANT | VT_BYREF) &&
+ (V_VT(V_VARIANTREF(pDispParamsVariant)) & (VT_TYPEMASK | VT_BYREF)) == (VT_VARIANT | VT_BYREF))
+ {
+ RETURN (V_VARIANTREF(pDispParamsVariant));
+ }
+ else
+ {
+ RETURN pDispParamsVariant;
+ }
+}
+
+
+bool DispatchInfo::IsPropertyAccessorVisible(bool fIsSetter, OBJECTREF* pMemberInfo)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(pMemberInfo != NULL);
+ PRECONDITION (IsProtectedByGCFrame (pMemberInfo));
+ }
+ CONTRACTL_END;
+
+ MethodTable *pMemberInfoClass = (*pMemberInfo)->GetMethodTable();
+
+ if (MscorlibBinder::IsClass(pMemberInfoClass, CLASS__PROPERTY))
+ {
+ // Get the property's MethodDesc
+ MethodDesc* pMDForProperty = NULL;
+ OBJECTREF method = NULL;
+ GCPROTECT_BEGIN(method)
+ {
+ // Get the property method token
+ BinderMethodID methodID;
+
+ if (fIsSetter)
+ {
+ methodID = METHOD__PROPERTY__GET_SETTER;
+ }
+ else
+ {
+ methodID = METHOD__PROPERTY__GET_GETTER;
+ }
+
+ MethodDescCallSite getMethod(methodID, pMemberInfo);
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(*pMemberInfo),
+ BoolToArgSlot(true)
+ };
+ method = getMethod.Call_RetOBJECTREF(args);
+
+ if (method != NULL)
+ {
+ MethodDescCallSite getMethodHandle(METHOD__METHOD_BASE__GET_METHODDESC, &method);
+ ARG_SLOT arg = ObjToArgSlot(method);
+ pMDForProperty = (MethodDesc*) getMethodHandle.Call_RetLPVOID(&arg);
+ }
+ }
+ GCPROTECT_END();
+
+ if (pMDForProperty == NULL)
+ return false;
+
+ // Check to see if the new method is a property accessor.
+ mdToken tkMember = mdTokenNil;
+ MethodTable *pDeclaringMT = pMDForProperty->GetMethodTable();
+ if (pMDForProperty->GetModule()->GetPropertyInfoForMethodDef(pMDForProperty->GetMemberDef(), &tkMember, NULL, NULL) == S_OK)
+ {
+ if (IsMemberVisibleFromCom(pDeclaringMT, tkMember, pMDForProperty->GetMemberDef()))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+MethodDesc* DispatchInfo::GetFieldInfoMD(BinderMethodID Method, TypeHandle hndFieldInfoType)
+{
+ CONTRACT (MethodDesc*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END
+
+ MethodDesc *pMD;
+
+ // If the current class is the standard implementation then return the cached method desc
+ if (MscorlibBinder::IsClass(hndFieldInfoType.GetMethodTable(), CLASS__FIELD))
+ {
+ pMD = MscorlibBinder::GetMethod(Method);
+ }
+ else
+ {
+ pMD = MemberLoader::FindMethod(hndFieldInfoType.GetMethodTable(),
+ MscorlibBinder::GetMethodName(Method), MscorlibBinder::GetMethodSig(Method));
+ }
+ _ASSERTE(pMD && "Unable to find specified FieldInfo method");
+
+ // Return the specified method desc.
+ RETURN pMD;
+}
+
+MethodDesc* DispatchInfo::GetPropertyInfoMD(BinderMethodID Method, TypeHandle hndPropInfoType)
+{
+ CONTRACT (MethodDesc*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END
+
+ MethodDesc *pMD;
+
+ // If the current class is the standard implementation then return the cached method desc if present.
+ if (MscorlibBinder::IsClass(hndPropInfoType.GetMethodTable(), CLASS__PROPERTY))
+ {
+ pMD = MscorlibBinder::GetMethod(Method);
+ }
+ else
+ {
+ pMD = MemberLoader::FindMethod(hndPropInfoType.GetMethodTable(),
+ MscorlibBinder::GetMethodName(Method), MscorlibBinder::GetMethodSig(Method));
+ }
+ _ASSERTE(pMD && "Unable to find specified PropertyInfo method");
+
+ // Return the specified method desc.
+ RETURN pMD;
+}
+
+MethodDesc* DispatchInfo::GetMethodInfoMD(BinderMethodID Method, TypeHandle hndMethodInfoType)
+{
+ CONTRACT (MethodDesc*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END
+
+ MethodDesc *pMD;
+
+ // If the current class is the standard implementation then return the cached method desc.
+ if (MscorlibBinder::IsClass(hndMethodInfoType.GetMethodTable(), CLASS__METHOD))
+ {
+ pMD = MscorlibBinder::GetMethod(Method);
+ }
+ else
+ {
+ pMD = MemberLoader::FindMethod(hndMethodInfoType.GetMethodTable(),
+ MscorlibBinder::GetMethodName(Method), MscorlibBinder::GetMethodSig(Method));
+ }
+ _ASSERTE(pMD && "Unable to find specified MethodInfo method");
+
+ // Return the specified method desc.
+ RETURN pMD;
+}
+
+MethodDesc* DispatchInfo::GetCustomAttrProviderMD(TypeHandle hndCustomAttrProvider)
+{
+ CONTRACT (MethodDesc*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ MethodTable *pMT = hndCustomAttrProvider.AsMethodTable();
+ MethodDesc *pMD = pMT->GetMethodDescForInterfaceMethod(MscorlibBinder::GetMethod(METHOD__ICUSTOM_ATTR_PROVIDER__GET_CUSTOM_ATTRIBUTES));
+
+ // Return the specified method desc.
+ RETURN pMD;
+}
+
+// This method synchronizes the DispatchInfo's members with the ones in the method tables type.
+// The return value will be set to TRUE if the object was out of synch and members where
+// added and it will be set to FALSE otherwise.
+BOOL DispatchInfo::SynchWithManagedView()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ NewArrayHolder<WCHAR> strMemberName = NULL;
+ NewHolder<ComMTMemberInfoMap> pMemberMap = NULL;
+
+ // This represents the new member to add and it is also used to determine if members have
+ // been added or not.
+ NewHolder<DispatchMemberInfo> pMemberToAdd = NULL;
+
+ Thread* pThread = SetupThreadNoThrow();
+ if (pThread == NULL)
+ return FALSE;
+
+ // Determine if this is the first time we synch.
+ BOOL bFirstSynch = (m_pFirstMemberInfo == NULL);
+
+ // This method needs to be synchronized to make sure two threads don't try and
+ // add members at the same time.
+ CrstHolder ch(&m_lock);
+ {
+ // Make sure we switch to cooperative mode before we start.
+ GCX_COOP();
+
+ // Go through the list of member info's and find the end.
+ DispatchMemberInfo **ppNextMember = &m_pFirstMemberInfo;
+ while (*ppNextMember)
+ ppNextMember = &((*ppNextMember)->m_pNext);
+
+ // Retrieve the member info map.
+ pMemberMap = GetMemberInfoMap();
+
+ for (int cPhase = 0; cPhase < 3; cPhase++)
+ {
+ PTRARRAYREF MemberArrayObj = NULL;
+ GCPROTECT_BEGIN(MemberArrayObj);
+
+ // Retrieve the appropriate array of members for the current phase.
+ switch (cPhase)
+ {
+ case 0:
+ // Retrieve the array of properties.
+ MemberArrayObj = RetrievePropList();
+ break;
+
+ case 1:
+ // Retrieve the array of fields.
+ MemberArrayObj = RetrieveFieldList();
+ break;
+
+ case 2:
+ // Retrieve the array of methods.
+ MemberArrayObj = RetrieveMethList();
+ break;
+ }
+
+ // Retrieve the number of components in the member array.
+ UINT NumComponents = 0;
+ if (MemberArrayObj != NULL)
+ NumComponents = MemberArrayObj->GetNumComponents();
+
+ // Go through all the member info's in the array and see if they are already
+ // in the DispatchExInfo.
+ for (UINT i = 0; i < NumComponents; i++)
+ {
+ BOOL bMatch = FALSE;
+
+ OBJECTREF CurrMemberInfoObj = MemberArrayObj->GetAt(i);
+ GCPROTECT_BEGIN(CurrMemberInfoObj)
+ {
+ DispatchMemberInfo *pCurrMemberInfo = m_pFirstMemberInfo;
+ while (pCurrMemberInfo)
+ {
+ // We can simply compare the OBJECTREF's.
+ if (CurrMemberInfoObj == ObjectFromHandle(pCurrMemberInfo->m_hndMemberInfo))
+ {
+ // We have found a match.
+ bMatch = TRUE;
+ break;
+ }
+
+ // Check the next member.
+ pCurrMemberInfo = pCurrMemberInfo->m_pNext;
+ }
+
+ // If we have not found a match then we need to add the member info to the
+ // list of member info's that will be added to the DispatchExInfo.
+ if (!bMatch)
+ {
+ DISPID MemberID = DISPID_UNKNOWN;
+ BOOL bAddMember = FALSE;
+
+
+ //
+ // Attempt to retrieve the properties of the member.
+ //
+
+ ComMTMethodProps *pMemberProps = DispatchMemberInfo::GetMemberProps(CurrMemberInfoObj, pMemberMap);
+
+ //
+ // Determine if we are to add this member or not.
+ //
+
+ if (pMemberProps)
+ bAddMember = pMemberProps->bMemberVisible;
+ else
+ bAddMember = m_bAllowMembersNotInComMTMemberMap;
+
+ if (bAddMember)
+ {
+ //
+ // Retrieve the DISPID of the member.
+ //
+ MemberID = DispatchMemberInfo::GetMemberDispId(CurrMemberInfoObj, pMemberMap);
+
+ //
+ // If the member does not have an explicit DISPID or if the specified DISPID
+ // is already in use then we need to generate a dynamic DISPID for the member.
+ //
+
+ if ((MemberID == DISPID_UNKNOWN) || (FindMember(MemberID) != NULL))
+ MemberID = GenerateDispID();
+
+ //
+ // Retrieve the name of the member.
+ //
+
+ strMemberName = DispatchMemberInfo::GetMemberName(CurrMemberInfoObj, pMemberMap);
+
+ //
+ // Create a DispatchInfoMemberInfo that will represent the member.
+ //
+
+ SString sName(strMemberName);
+ pMemberToAdd = CreateDispatchMemberInfoInstance(MemberID, sName, CurrMemberInfoObj);
+
+ //
+ // Add the member to the end of the list.
+ //
+
+ *ppNextMember = pMemberToAdd;
+
+ // Update ppNextMember to be ready for the next new member.
+ ppNextMember = &((*ppNextMember)->m_pNext);
+
+ // Add the member to the map. Note, the hash is unsynchronized, but we already have our lock
+ // so we're okay.
+ m_DispIDToMemberInfoMap.InsertValue(DispID2HashKey(MemberID), pMemberToAdd);
+ pMemberToAdd.SuppressRelease();
+ }
+ }
+ }
+ GCPROTECT_END();
+ }
+
+ GCPROTECT_END();
+ }
+ // GC mode toggles back here
+ }
+ // Check to see if any new members were added to the expando object.
+ return pMemberToAdd ? TRUE : FALSE;
+
+ // locks released and memory cleaned up here
+}
+
+// This method retrieves the OleAutBinder type.
+OBJECTREF DispatchInfo::GetOleAutBinder()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ // If we have already create the instance of the OleAutBinder then simply return it.
+ if (m_hndOleAutBinder)
+ return ObjectFromHandle(m_hndOleAutBinder);
+
+ MethodTable *pOleAutBinderClass = MscorlibBinder::GetClass(CLASS__OLE_AUT_BINDER);
+
+ // Allocate an instance of the OleAutBinder class.
+ OBJECTREF OleAutBinder = AllocateObject(pOleAutBinderClass);
+
+ // Keep a handle to the OleAutBinder instance.
+ m_hndOleAutBinder = CreateGlobalHandle(OleAutBinder);
+
+ return OleAutBinder;
+}
+
+BOOL DispatchInfo::VariantIsMissing(VARIANT *pOle)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (V_VT(pOle) == VT_ERROR) && (V_ERROR(pOle) == DISP_E_PARAMNOTFOUND);
+}
+
+PTRARRAYREF DispatchInfo::RetrievePropList()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // return value
+ PTRARRAYREF orRetVal;
+
+ // Retrieve the exposed class object.
+ OBJECTREF TargetObj = GetReflectionObject();
+
+ GCPROTECT_BEGIN(TargetObj);
+ MethodDescCallSite getProperties(METHOD__CLASS__GET_PROPERTIES, &TargetObj);
+
+ // Prepare the arguments that will be passed to the method.
+ ARG_SLOT Args[] =
+ {
+ ObjToArgSlot(TargetObj),
+ (ARG_SLOT)BINDER_DefaultLookup
+ };
+
+ // Retrieve the array of members from the type object.
+ orRetVal = (PTRARRAYREF) getProperties.Call_RetOBJECTREF(Args);
+
+ GCPROTECT_END();
+
+ return orRetVal;
+}
+
+PTRARRAYREF DispatchInfo::RetrieveFieldList()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // return value
+ PTRARRAYREF orRetVal;
+
+ // Retrieve the exposed class object.
+ OBJECTREF TargetObj = GetReflectionObject();
+
+ GCPROTECT_BEGIN(TargetObj);
+ MethodDescCallSite getFields(METHOD__CLASS__GET_FIELDS, &TargetObj);
+
+ // Prepare the arguments that will be passed to the method.
+ ARG_SLOT Args[] =
+ {
+ ObjToArgSlot(TargetObj),
+ (ARG_SLOT)BINDER_DefaultLookup
+ };
+
+ // Retrieve the array of members from the type object.
+ orRetVal = (PTRARRAYREF) getFields.Call_RetOBJECTREF(Args);
+
+ GCPROTECT_END();
+
+ return orRetVal;
+}
+
+PTRARRAYREF DispatchInfo::RetrieveMethList()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // return value
+ PTRARRAYREF orRetVal;
+
+ // Retrieve the exposed class object.
+ OBJECTREF TargetObj = GetReflectionObject();
+
+ GCPROTECT_BEGIN(TargetObj);
+ MethodDescCallSite getMethods(METHOD__CLASS__GET_METHODS, &TargetObj);
+
+ // Prepare the arguments that will be passed to the method.
+ ARG_SLOT Args[] =
+ {
+ ObjToArgSlot(TargetObj),
+ (ARG_SLOT)BINDER_DefaultLookup
+ };
+
+ // Retrieve the array of members from the type object.
+ orRetVal = (PTRARRAYREF) getMethods.Call_RetOBJECTREF(Args);
+
+ GCPROTECT_END();
+
+ return orRetVal;
+}
+
+// Virtual method to retrieve the InvokeMember method desc.
+MethodDesc* DispatchInfo::GetInvokeMemberMD()
+{
+ CONTRACT (MethodDesc*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN MscorlibBinder::GetMethod(METHOD__CLASS__INVOKE_MEMBER);
+}
+
+// Virtual method to retrieve the object associated with this DispatchInfo that
+// implements IReflect.
+OBJECTREF DispatchInfo::GetReflectionObject()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ return m_pMT->GetManagedClassObject();
+}
+
+// Virtual method to retrieve the member info map.
+ComMTMemberInfoMap *DispatchInfo::GetMemberInfoMap()
+{
+ CONTRACT (ComMTMemberInfoMap*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+
+ // Create the member info map.
+ NewHolder<ComMTMemberInfoMap> pMemberInfoMap (new ComMTMemberInfoMap(m_pMT));
+
+ // Initialize it.
+ pMemberInfoMap->Init(sizeof(void*));
+
+ pMemberInfoMap.SuppressRelease();
+ RETURN pMemberInfoMap;
+}
+
+// Helper function to fill in an EXCEPINFO for an InvocationException.
+void DispatchInfo::GetExcepInfoForInvocationExcep(OBJECTREF objException, EXCEPINFO *pei)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(objException != NULL);
+ PRECONDITION(CheckPointer(pei));
+ }
+ CONTRACTL_END;
+
+ MethodDesc *pMD;
+ ExceptionData ED;
+ OBJECTREF InnerExcep = NULL;
+
+ // Initialize the EXCEPINFO.
+ memset(pei, 0, sizeof(EXCEPINFO));
+ pei->scode = E_FAIL;
+
+ GCPROTECT_BEGIN(InnerExcep)
+ GCPROTECT_BEGIN(objException)
+ {
+ // Retrieve the method desc to access the InnerException property.
+ pMD = MemberLoader::FindPropertyMethod(objException->GetMethodTable(), EXCEPTION_INNER_PROP, PropertyGet);
+ _ASSERTE(pMD && "Unable to find get method for proprety Exception.InnerException");
+ MethodDescCallSite propGet(pMD, &objException);
+
+ // Retrieve the value of the InnerException property.
+ ARG_SLOT GetInnerExceptionArgs[] = { ObjToArgSlot(objException) };
+ InnerExcep = propGet.Call_RetOBJECTREF(GetInnerExceptionArgs);
+
+ // If the inner exception object is null then we can't get any info.
+ if (InnerExcep != NULL)
+ {
+ // Retrieve the exception data for the inner exception.
+ ExceptionNative::GetExceptionData(InnerExcep, &ED);
+ pei->bstrSource = ED.bstrSource;
+ pei->bstrDescription = ED.bstrDescription;
+ pei->bstrHelpFile = ED.bstrHelpFile;
+ pei->dwHelpContext = ED.dwHelpContext;
+ pei->scode = ED.hr;
+ }
+ }
+ GCPROTECT_END();
+ GCPROTECT_END();
+}
+
+int DispatchInfo::ConvertInvokeFlagsToBindingFlags(int InvokeFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ int BindingFlags = 0;
+
+ // Check to see if DISPATCH_CONSTRUCT is set.
+ if (InvokeFlags & DISPATCH_CONSTRUCT)
+ BindingFlags |= BINDER_CreateInstance;
+
+ // Check to see if DISPATCH_METHOD is set.
+ if (InvokeFlags & DISPATCH_METHOD)
+ BindingFlags |= BINDER_InvokeMethod;
+
+ if (InvokeFlags & (DISPATCH_PROPERTYPUT | DISPATCH_PROPERTYPUTREF))
+ {
+ // We are dealing with a PROPPUT or PROPPUTREF or both.
+ if ((InvokeFlags & (DISPATCH_PROPERTYPUT | DISPATCH_PROPERTYPUTREF)) == (DISPATCH_PROPERTYPUT | DISPATCH_PROPERTYPUTREF))
+ {
+ BindingFlags |= BINDER_SetProperty;
+ }
+ else if (InvokeFlags & DISPATCH_PROPERTYPUT)
+ {
+ BindingFlags |= BINDER_PutDispProperty;
+ }
+ else
+ {
+ BindingFlags |= BINDER_PutRefDispProperty;
+ }
+ }
+ else
+ {
+ // We are dealing with a PROPGET.
+ if (InvokeFlags & DISPATCH_PROPERTYGET)
+ BindingFlags |= BINDER_GetProperty;
+ }
+
+ return BindingFlags;
+}
+
+BOOL DispatchInfo::IsVariantByrefStaticArray(VARIANT *pOle)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pOle));
+ }
+ CONTRACTL_END;
+
+ if (V_VT(pOle) & VT_BYREF && V_VT(pOle) & VT_ARRAY)
+ {
+ if ((*V_ARRAYREF(pOle))->fFeatures & FADF_STATIC)
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+DISPID DispatchInfo::GenerateDispID()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ // Find the next unused DISPID. Note, the hash is unsynchronized, but Gethash doesn't require synchronization.
+ for (; (UPTR)m_DispIDToMemberInfoMap.Gethash(DispID2HashKey(m_CurrentDispID)) != -1; m_CurrentDispID++);
+ return m_CurrentDispID++;
+}
+
+//--------------------------------------------------------------------------------
+// The DispatchExInfo class implementation.
+
+DispatchExInfo::DispatchExInfo(SimpleComCallWrapper *pSimpleWrapper, MethodTable *pMT, BOOL bSupportsExpando)
+: DispatchInfo(pMT)
+, m_pSimpleWrapperOwner(pSimpleWrapper)
+, m_bSupportsExpando(bSupportsExpando)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pSimpleWrapper));
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ // Set the flags to specify the behavior of the base DispatchInfo class.
+ m_bAllowMembersNotInComMTMemberMap = TRUE;
+ m_bInvokeUsingInvokeMember = TRUE;
+}
+
+DispatchExInfo::~DispatchExInfo()
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+BOOL DispatchExInfo::SupportsExpando()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_bSupportsExpando;
+}
+
+// Methods to lookup members. These methods synch with the managed view if they fail to
+// find the method.
+DispatchMemberInfo* DispatchExInfo::SynchFindMember(DISPID DispID)
+{
+ CONTRACT (DispatchMemberInfo*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ DispatchMemberInfo *pMemberInfo = FindMember(DispID);
+
+ if (!pMemberInfo && SynchWithManagedView())
+ pMemberInfo = FindMember(DispID);
+
+ RETURN pMemberInfo;
+}
+
+DispatchMemberInfo* DispatchExInfo::SynchFindMember(SString& strName, BOOL bCaseSensitive)
+{
+ CONTRACT (DispatchMemberInfo*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ DispatchMemberInfo *pMemberInfo = FindMember(strName, bCaseSensitive);
+
+ if (!pMemberInfo && SynchWithManagedView())
+ pMemberInfo = FindMember(strName, bCaseSensitive);
+
+ RETURN pMemberInfo;
+}
+
+// Helper method that invokes the member with the specified DISPID. These methods synch
+// with the managed view if they fail to find the method.
+HRESULT DispatchExInfo::SynchInvokeMember(SimpleComCallWrapper *pSimpleWrap, DISPID id, LCID lcid, WORD wFlags, DISPPARAMS *pdp, VARIANT *pVarRes, EXCEPINFO *pei, IServiceProvider *pspCaller, unsigned int *puArgErr)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // Invoke the member.
+ HRESULT hr = InvokeMember(pSimpleWrap, id, lcid, wFlags, pdp, pVarRes, pei, pspCaller, puArgErr);
+
+ // If the member was not found then we need to synch and try again if the managed view has changed.
+ if ((hr == DISP_E_MEMBERNOTFOUND) && SynchWithManagedView())
+ hr = InvokeMember(pSimpleWrap, id, lcid, wFlags, pdp, pVarRes, pei, pspCaller, puArgErr);
+
+ return hr;
+}
+
+// Helper method used to create DispatchMemberInfo's. This is only here because
+// we can't call new inside a method that has a EX_TRY statement.
+DispatchMemberInfo* DispatchExInfo::CreateDispatchMemberInfoInstance(DISPID DispID, SString& strMemberName, OBJECTREF MemberInfoObj)
+{
+ CONTRACT (DispatchMemberInfo*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ DispatchMemberInfo* pInfo = new DispatchMemberInfo(this, DispID, strMemberName, MemberInfoObj);
+
+ AppDomainFromIDHolder pDomain(m_pSimpleWrapperOwner->GetDomainID(), FALSE);
+ pDomain.ThrowIfUnloaded();
+
+ pInfo->SetHandle(pDomain->CreateHandle(MemberInfoObj));
+
+ RETURN pInfo;
+}
+
+
+DispatchMemberInfo* DispatchExInfo::GetFirstMember()
+{
+ CONTRACT (DispatchMemberInfo*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ // Start with the first member.
+ DispatchMemberInfo **ppNextMemberInfo = &m_pFirstMemberInfo;
+
+ // If the next member is not set we need to sink up with the expando object
+ // itself to make sure that this member is really the last member and that
+ // other members have not been added without us knowing.
+ if (!(*ppNextMemberInfo))
+ {
+ if (SynchWithManagedView())
+ {
+ // New members have been added to the list and since they must be added
+ // to the end the next member of the previous end of the list must
+ // have been updated.
+ _ASSERTE(*ppNextMemberInfo);
+ }
+ }
+
+ // Now we need to make sure we skip any members that are deleted.
+ while ((*ppNextMemberInfo) && !ObjectFromHandle((*ppNextMemberInfo)->m_hndMemberInfo))
+ ppNextMemberInfo = &((*ppNextMemberInfo)->m_pNext);
+
+ RETURN *ppNextMemberInfo;
+}
+
+DispatchMemberInfo* DispatchExInfo::GetNextMember(DISPID CurrMemberDispID)
+{
+ CONTRACT (DispatchMemberInfo*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ // Do a lookup in the hashtable to find the DispatchMemberInfo for the DISPID.
+ DispatchMemberInfo *pDispMemberInfo = FindMember(CurrMemberDispID);
+ if (!pDispMemberInfo)
+ RETURN NULL;
+
+ // Start from the next member.
+ DispatchMemberInfo **ppNextMemberInfo = &pDispMemberInfo->m_pNext;
+
+ // If the next member is not set we need to sink up with the expando object
+ // itself to make sure that this member is really the last member and that
+ // other members have not been added without us knowing.
+ if (!(*ppNextMemberInfo))
+ {
+ if (SynchWithManagedView())
+ {
+ // New members have been added to the list and since they must be added
+ // to the end the next member of the previous end of the list must
+ // have been updated.
+ _ASSERTE(*ppNextMemberInfo);
+ }
+ }
+
+ // Now we need to make sure we skip any members that are deleted.
+ while ((*ppNextMemberInfo) && !ObjectFromHandle((*ppNextMemberInfo)->m_hndMemberInfo))
+ ppNextMemberInfo = &((*ppNextMemberInfo)->m_pNext);
+
+ RETURN *ppNextMemberInfo;
+}
+
+DispatchMemberInfo* DispatchExInfo::AddMember(SString& strName, BOOL bCaseSensitive)
+{
+ CONTRACT (DispatchMemberInfo*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(m_bSupportsExpando == TRUE);
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ DispatchMemberInfo *pDispMemberInfo = NULL;
+
+ // Attempt to find the member in the DispatchEx information.
+ pDispMemberInfo = SynchFindMember(strName, bCaseSensitive);
+
+ // If we haven't found the member, then we need to add it.
+ if (!pDispMemberInfo)
+ {
+ // Take a lock before we check again to see if the member has been added by another thread.
+ CrstHolder ch(&m_lock);
+
+ // Now that we are inside the lock, check without synching.
+ pDispMemberInfo = FindMember(strName, bCaseSensitive);
+ if (!pDispMemberInfo)
+ {
+ struct _gc {
+ STRINGREF strObj;
+ OBJECTREF TargetObj;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ // Retrieve the MethodDesc for AddField()
+ MethodDesc *pMD = GetIExpandoMD(METHOD__IEXPANDO__ADD_FIELD);
+
+ // Allocate the string object that will be passed to the AddField method.
+ gc.strObj = StringObject::NewString(strName.GetUnicode());
+
+ // Retrieve the COM+ object that is being exposed to COM.
+ gc.TargetObj = GetReflectionObject();
+
+ MethodDescCallSite addField(pMD, &gc.TargetObj);
+
+ // Prepare the arguments that will be passed to AddField.
+ ARG_SLOT Args[] =
+ {
+ ObjToArgSlot(gc.TargetObj),
+ ObjToArgSlot(gc.strObj)
+ };
+
+ // Add the field to the target expando.
+ OBJECTREF pMemberInfo = addField.Call_RetOBJECTREF(Args);
+
+ // Generate the DISPID for this member.
+ DISPID DispID = GenerateDispID();
+
+ // Create a new DispatchMemberInfo that will represent this member.
+ pDispMemberInfo = CreateDispatchMemberInfoInstance(DispID, strName, pMemberInfo);
+
+ // Go through the list of member info's and find the end.
+ DispatchMemberInfo **ppNextMember = &m_pFirstMemberInfo;
+ while (*ppNextMember)
+ ppNextMember = &((*ppNextMember)->m_pNext);
+
+ // Add the new member info to the end of the list.
+ *ppNextMember = pDispMemberInfo;
+
+ // Add the member to the hashtable. Note, the hash is unsynchronized, but we already have our lock so
+ // we're okay.
+ m_DispIDToMemberInfoMap.InsertValue(DispID2HashKey(DispID), pDispMemberInfo);
+
+ GCPROTECT_END();
+ }
+ }
+
+ RETURN pDispMemberInfo;
+}
+
+void DispatchExInfo::DeleteMember(DISPID DispID)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(m_bSupportsExpando);
+ }
+ CONTRACTL_END
+
+ // Do a lookup in the hashtable to find the DispatchMemberInfo for the DISPID.
+ // This needs to be done outside of the lock because SyncFindMember will acquire
+ // the lock as well.
+ DispatchMemberInfo *pDispMemberInfo = SynchFindMember(DispID);
+
+ // Take a lock before we check that the member has not already been deleted.
+ {
+ CrstHolder ch(&m_lock);
+
+ // If the member does not exist, it is static or has been deleted then we have nothing more to do.
+ if (pDispMemberInfo && (ObjectFromHandle(pDispMemberInfo->m_hndMemberInfo) != NULL))
+ {
+ OBJECTREF TargetObj = GetReflectionObject();
+ GCPROTECT_BEGIN(TargetObj);
+
+ // Retrieve the DeleteMember MethodDesc.
+ MethodDesc *pMD = GetIExpandoMD(METHOD__IEXPANDO__REMOVE_MEMBER);
+ MethodDescCallSite removeMember(pMD, &TargetObj);
+
+ OBJECTREF MemberInfoObj = ObjectFromHandle(pDispMemberInfo->m_hndMemberInfo);
+
+ // Prepare the arguments that will be passed to RemoveMember.
+ ARG_SLOT Args[] =
+ {
+ ObjToArgSlot(TargetObj),
+ ObjToArgSlot(MemberInfoObj)
+ };
+
+ // Call the DeleteMember method.
+ removeMember.Call(Args);
+
+ // Set the handle to point to NULL to indicate the member has been removed.
+ StoreObjectInHandle(pDispMemberInfo->m_hndMemberInfo, NULL);
+
+ GCPROTECT_END();
+ }
+ }
+}
+
+MethodDesc* DispatchExInfo::GetIReflectMD(BinderMethodID Method)
+{
+ CONTRACT (MethodDesc*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ MethodTable *pMT = m_pSimpleWrapperOwner->GetMethodTable();
+ MethodDesc *pMD = pMT->GetMethodDescForInterfaceMethod(MscorlibBinder::GetMethod(Method));
+
+ // Return the specified method desc.
+ RETURN pMD;
+}
+
+
+MethodDesc* DispatchExInfo::GetIExpandoMD(BinderMethodID Method)
+{
+ CONTRACT (MethodDesc*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(SupportsExpando());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ MethodTable *pMT = m_pSimpleWrapperOwner->GetMethodTable();
+ MethodDesc *pMD = pMT->GetMethodDescForInterfaceMethod(MscorlibBinder::GetMethod(Method));
+
+ // Return the specified method desc.
+ RETURN pMD;
+}
+
+PTRARRAYREF DispatchExInfo::RetrievePropList()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ PTRARRAYREF oPropList;
+
+ // Retrieve the expando OBJECTREF.
+ OBJECTREF TargetObj = GetReflectionObject();
+ GCPROTECT_BEGIN(TargetObj);
+
+ // Retrieve the GetMembers MethodDesc.
+ MethodDesc *pMD = GetIReflectMD(METHOD__IREFLECT__GET_PROPERTIES);
+ MethodDescCallSite getProperties(pMD, &TargetObj);
+
+ // Prepare the arguments that will be passed to the method.
+ ARG_SLOT Args[] =
+ {
+ ObjToArgSlot(TargetObj),
+ (ARG_SLOT)BINDER_DefaultLookup
+ };
+
+ // Retrieve the array of members from the expando object
+ oPropList = (PTRARRAYREF) getProperties.Call_RetOBJECTREF(Args);
+
+ GCPROTECT_END();
+
+ return oPropList;
+}
+
+PTRARRAYREF DispatchExInfo::RetrieveFieldList()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ PTRARRAYREF oFieldList;
+
+ // Retrieve the expando OBJECTREF.
+ OBJECTREF TargetObj = GetReflectionObject();
+ GCPROTECT_BEGIN(TargetObj);
+
+ // Retrieve the GetMembers MethodDesc.
+ MethodDesc *pMD = GetIReflectMD(METHOD__IREFLECT__GET_FIELDS);
+ MethodDescCallSite getFields(pMD, &TargetObj);
+
+ // Prepare the arguments that will be passed to the method.
+ ARG_SLOT Args[] =
+ {
+ ObjToArgSlot(TargetObj),
+ (ARG_SLOT)BINDER_DefaultLookup
+ };
+
+ // Retrieve the array of members from the expando object
+ oFieldList = (PTRARRAYREF) getFields.Call_RetOBJECTREF(Args);
+
+ GCPROTECT_END();
+
+ return oFieldList;
+}
+
+PTRARRAYREF DispatchExInfo::RetrieveMethList()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ PTRARRAYREF oMethList;
+
+ // Retrieve the expando OBJECTREF.
+ OBJECTREF TargetObj = GetReflectionObject();
+ GCPROTECT_BEGIN(TargetObj);
+
+ // Retrieve the GetMembers MethodDesc.
+ MethodDesc *pMD = GetIReflectMD(METHOD__IREFLECT__GET_METHODS);
+ MethodDescCallSite getMethods(pMD, &TargetObj);
+
+ // Prepare the arguments that will be passed to the method.
+ ARG_SLOT Args[] =
+ {
+ ObjToArgSlot(TargetObj),
+ (ARG_SLOT)BINDER_DefaultLookup
+ };
+
+ // Retrieve the array of members from the expando object
+ oMethList = (PTRARRAYREF) getMethods.Call_RetOBJECTREF(Args);
+
+ GCPROTECT_END();
+
+ return oMethList;
+}
+
+// Virtual method to retrieve the InvokeMember method desc.
+MethodDesc* DispatchExInfo::GetInvokeMemberMD()
+{
+ CONTRACT(MethodDesc*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN GetIReflectMD(METHOD__IREFLECT__INVOKE_MEMBER);
+}
+
+// Virtual method to retrieve the object associated with this DispatchInfo that
+// implements IReflect.
+OBJECTREF DispatchExInfo::GetReflectionObject()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // Runtime type is very special. Because of how it is implemented, calling methods
+ // through IDispatch on a runtime type object doesn't work like other IReflect implementors
+ // work. To be able to invoke methods on the runtime type, we need to invoke them
+ // on the runtime type that represents runtime type. This is why for runtime type,
+ // we get the exposed class object and not the actual objectred contained in the
+ // wrapper.
+
+ if (m_pMT == g_pRuntimeTypeClass ||
+ MscorlibBinder::IsClass(m_pMT, CLASS__CLASS_INTROSPECTION_ONLY))
+ return m_pMT->GetManagedClassObject();
+ else
+ return m_pSimpleWrapperOwner->GetObjectRef();
+}
+
+// Virtual method to retrieve the member info map.
+ComMTMemberInfoMap *DispatchExInfo::GetMemberInfoMap()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // There is no member info map for IExpando objects.
+ return NULL;
+}
diff --git a/src/vm/dispatchinfo.h b/src/vm/dispatchinfo.h
new file mode 100644
index 0000000000..0afcb7bb76
--- /dev/null
+++ b/src/vm/dispatchinfo.h
@@ -0,0 +1,411 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: DispatchInfo.h
+//
+
+//
+// Definition of helpers used to expose IDispatch
+// and IDispatchEx to COM.
+//
+
+
+#ifndef _DISPATCHINFO_H
+#define _DISPATCHINFO_H
+
+#ifndef FEATURE_COMINTEROP
+#error FEATURE_COMINTEROP is required for this file
+#endif // FEATURE_COMINTEROP
+
+#include "vars.hpp"
+#include "mlinfo.h"
+
+// Forward declarations.
+struct ComMethodTable;
+struct SimpleComCallWrapper;
+class ComMTMemberInfoMap;
+struct ComMTMethodProps;
+class DispParamMarshaler;
+class MarshalInfo;
+class DispatchInfo;
+enum BinderMethodID;
+
+// An enumeration of the types of managed MemberInfo's. This must stay in synch with
+// the ones defined in MemberInfo.cs.
+enum EnumMemberTypes
+{
+ Uninitted = 0x00,
+ Constructor = 0x01,
+ Event = 0x02,
+ Field = 0x04,
+ Method = 0x08,
+ Property = 0x10
+};
+
+enum {NUM_MEMBER_TYPES = 5};
+
+enum CultureAwareStates
+{
+ Aware,
+ NonAware,
+ Unknown
+};
+
+// This structure represents a dispatch member.
+struct DispatchMemberInfo
+{
+ DispatchMemberInfo(DispatchInfo *pDispInfo, DISPID DispID, SString& strName, OBJECTREF MemberInfoObj);
+ ~DispatchMemberInfo();
+
+ // Helper method to ensure the entry is initialized.
+ void EnsureInitialized();
+
+ BOOL IsNeutered()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_bNeutered) ? TRUE : FALSE;
+ }
+
+ // This method retrieves the ID's of the specified names.
+ HRESULT GetIDsOfParameters(__in_ecount(NumNames) WCHAR **astrNames, int NumNames, DISPID *aDispIds, BOOL bCaseSensitive);
+
+ // Accessors.
+ PTRARRAYREF GetParameters();
+
+ BOOL IsParamInOnly(int iIndex)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(m_pParamInOnly));
+ }
+ CONTRACTL_END;
+
+ // Add one for the return type.
+ return m_pParamInOnly[iIndex + 1];
+ }
+
+ // Inline accessors.
+ BOOL IsCultureAware()
+ {
+ CONTRACT (BOOL)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(Unknown != m_CultureAwareState);
+ }
+ CONTRACT_END;
+
+ RETURN (Aware == m_CultureAwareState);
+ }
+
+ EnumMemberTypes GetMemberType()
+ {
+ CONTRACT (EnumMemberTypes)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(Uninitted != m_enumType);
+ }
+ CONTRACT_END;
+
+ RETURN m_enumType;
+ }
+
+ int GetNumParameters()
+ {
+ CONTRACT (int)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(m_iNumParams != -1);
+ }
+ CONTRACT_END;
+
+ RETURN m_iNumParams;
+ }
+
+ BOOL IsLastParamOleVarArg()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_bLastParamOleVarArg;
+ }
+
+ void SetHandle(OBJECTHANDLE objhnd)
+ {
+ m_hndMemberInfo = objhnd;
+ }
+
+ BOOL RequiresManagedObjCleanup()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_bRequiresManagedCleanup;
+ }
+
+ // Parameter marshaling methods.
+ void MarshalParamNativeToManaged(int iParam, VARIANT *pSrcVar, OBJECTREF *pDestObj);
+ void MarshalParamManagedToNativeRef(int iParam, OBJECTREF *pSrcObj, VARIANT *pRefVar);
+ void CleanUpParamManaged(int iParam, OBJECTREF *pObj);
+ void MarshalReturnValueManagedToNative(OBJECTREF *pSrcObj, VARIANT *pDestVar);
+
+ // Static helper methods.
+ static ComMTMethodProps *GetMemberProps(OBJECTREF MemberInfoObj, ComMTMemberInfoMap *pMemberMap);
+ static DISPID GetMemberDispId(OBJECTREF MemberInfoObj, ComMTMemberInfoMap *pMemberMap);
+ static LPWSTR GetMemberName(OBJECTREF MemberInfoObj, ComMTMemberInfoMap *pMemberMap);
+
+private:
+ // Private helpers.
+ void Neuter();
+ void Init();
+ void DetermineMemberType();
+ void DetermineParamCount();
+ void DetermineCultureAwareness();
+ void SetUpParamMarshalerInfo();
+ void SetUpMethodMarshalerInfo(MethodDesc *pMeth, BOOL bReturnValueOnly);
+ void SetUpFieldMarshalerInfo(FieldDesc *pField);
+ void SetUpDispParamMarshalerForMarshalInfo(int iParam, MarshalInfo *pInfo);
+ void SetUpDispParamAttributes(int iParam, MarshalInfo* Info);
+public:
+ DISPID m_DispID;
+ OBJECTHANDLE m_hndMemberInfo;
+ DispParamMarshaler** m_apParamMarshaler;
+ BOOL* m_pParamInOnly;
+ DispatchMemberInfo* m_pNext;
+ SString m_strName;
+ EnumMemberTypes m_enumType;
+ int m_iNumParams;
+ CultureAwareStates m_CultureAwareState;
+ BOOL m_bRequiresManagedCleanup;
+ BOOL m_bInitialized;
+ BOOL m_bNeutered;
+ DispatchInfo* m_pDispInfo;
+ BOOL m_bLastParamOleVarArg;
+
+private:
+ static MethodTable* s_pMemberTypes[NUM_MEMBER_TYPES];
+ static EnumMemberTypes s_memberTypes[NUM_MEMBER_TYPES];
+ static int s_iNumMemberTypesKnown;
+};
+
+
+struct InvokeObjects
+{
+ PTRARRAYREF ParamArray;
+ PTRARRAYREF CleanUpArray;
+ OBJECTREF MemberInfo;
+ OBJECTREF OleAutBinder;
+ OBJECTREF Target;
+ OBJECTREF PropVal;
+ OBJECTREF ByrefStaticArrayBackupPropVal;
+ OBJECTREF RetVal;
+ OBJECTREF TmpObj;
+ OBJECTREF MemberName;
+ OBJECTREF CultureInfo;
+ OBJECTREF OldCultureInfo;
+ PTRARRAYREF NamedArgArray;
+ OBJECTREF ReflectionObj;
+};
+
+class DispatchInfo
+{
+public:
+ // Encapsulate a CrstHolder, so that clients of our lock don't have to know
+ // the details of our implementation.
+ class LockHolder : public CrstHolder
+ {
+ public:
+ LockHolder(DispatchInfo *pDI)
+ : CrstHolder(&pDI->m_lock)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+ };
+
+ // Constructor and destructor.
+ DispatchInfo(MethodTable *pComMTOwner);
+ virtual ~DispatchInfo();
+
+ // Methods to lookup members.
+ DispatchMemberInfo* FindMember(DISPID DispID);
+ DispatchMemberInfo* FindMember(SString& strName, BOOL bCaseSensitive);
+
+ // Helper method that invokes the member with the specified DISPID.
+ HRESULT InvokeMember(SimpleComCallWrapper *pSimpleWrap, DISPID id, LCID lcid, WORD wFlags, DISPPARAMS *pdp, VARIANT *pVarRes, EXCEPINFO *pei, IServiceProvider *pspCaller, unsigned int *puArgErr);
+
+ void InvokeMemberDebuggerWrapper(DispatchMemberInfo* pDispMemberInfo,
+ InvokeObjects* pObjs,
+ int NumParams,
+ int NumArgs,
+ int NumNamedArgs,
+ int& NumByrefArgs,
+ int& iSrcArg,
+ DISPID id,
+ DISPPARAMS* pdp,
+ VARIANT* pVarRes,
+ WORD wFlags,
+ LCID lcid,
+ DISPID* pSrcArgNames,
+ VARIANT* pSrcArgs,
+ OBJECTHANDLE* aByrefStaticArrayBackupObjHandle,
+ int* pManagedMethodParamIndexMap,
+ VARIANT** aByrefArgOleVariant,
+ Frame * pFrame);
+
+ void InvokeMemberWorker(DispatchMemberInfo* pDispMemberInfo,
+ InvokeObjects* pObjs,
+ int NumParams,
+ int NumArgs,
+ int NumNamedArgs,
+ int& NumByrefArgs,
+ int& iSrcArg,
+ DISPID id,
+ DISPPARAMS* pdp,
+ VARIANT* pVarRes,
+ WORD wFlags,
+ LCID lcid,
+ DISPID* pSrcArgNames,
+ VARIANT* pSrcArgs,
+ OBJECTHANDLE* aByrefStaticArrayBackupObjHandle,
+ int* pManagedMethodParamIndexMap,
+ VARIANT** aByrefArgOleVariant);
+
+ // Method to NULL the handles inside DispatchMemberInfo
+ void DestroyMemberInfoHandles();
+
+ // Methods to retrieve the cached MD's
+ static MethodDesc* GetFieldInfoMD(BinderMethodID Method, TypeHandle hndFieldInfoType);
+ static MethodDesc* GetPropertyInfoMD(BinderMethodID Method, TypeHandle hndPropInfoType);
+ static MethodDesc* GetMethodInfoMD(BinderMethodID Method, TypeHandle hndMethodInfoType);
+ static MethodDesc* GetCustomAttrProviderMD(TypeHandle hndCustomAttrProvider);
+
+ // This method synchronizes the DispatchInfo's members with the ones in managed world.
+ // The return value will be set to TRUE if the object was out of synch and members where
+ // added and it will be set to FALSE otherwise.
+ BOOL SynchWithManagedView();
+
+ // This method retrieves the OleAutBinder type.
+ static OBJECTREF GetOleAutBinder();
+
+ // Returns TRUE if the argument is "Missing"
+ static BOOL VariantIsMissing(VARIANT *pOle);
+
+protected:
+ // Parameter marshaling helpers.
+ void MarshalParamNativeToManaged(DispatchMemberInfo *pMemberInfo, int iParam, VARIANT *pSrcVar, OBJECTREF *pDestObj);
+ void MarshalParamManagedToNativeRef(DispatchMemberInfo *pMemberInfo, int iParam, OBJECTREF *pSrcObj, OBJECTREF *pBackupStaticArray, VARIANT *pRefVar);
+ void MarshalReturnValueManagedToNative(DispatchMemberInfo *pMemberInfo, OBJECTREF *pSrcObj, VARIANT *pDestVar);
+ void CleanUpNativeParam(DispatchMemberInfo *pDispMemberInfo, int iParam, OBJECTREF *pBackupStaticArray, VARIANT *pArgVariant);
+
+ // DISPID to named argument convertion helper.
+ void SetUpNamedParamArray(DispatchMemberInfo *pMemberInfo, DISPID *pSrcArgNames, int NumNamedArgs, PTRARRAYREF *pNamedParamArray);
+
+ // Helper method to retrieve the source VARIANT from the VARIANT contained in the disp params.
+ VARIANT* RetrieveSrcVariant(VARIANT *pDispParamsVariant);
+
+ // Helper method to determine if a member is publically accessible.
+ bool IsPropertyAccessorVisible(bool fIsSetter, OBJECTREF* pMemberInfo);
+
+ // Helper methods called from SynchWithManagedView() to retrieve the lists of members.
+ virtual PTRARRAYREF RetrievePropList();
+ virtual PTRARRAYREF RetrieveFieldList();
+ virtual PTRARRAYREF RetrieveMethList();
+
+ // Virtual method to retrieve the InvokeMember method desc.
+ virtual MethodDesc* GetInvokeMemberMD();
+
+ // Virtual method to retrieve the reflection object associated with the DispatchInfo.
+ virtual OBJECTREF GetReflectionObject();
+
+ // Virtual method to retrieve the member info map.
+ virtual ComMTMemberInfoMap* GetMemberInfoMap();
+
+ // This method generates a DISPID for a new member.
+ DISPID GenerateDispID();
+
+ // Helper method to create an instance of a DispatchMemberInfo.
+ virtual DispatchMemberInfo* CreateDispatchMemberInfoInstance(DISPID DispID, SString& strMemberName, OBJECTREF MemberInfoObj);
+
+ // Helper function to fill in an EXCEPINFO for an InvocationException.
+ static void GetExcepInfoForInvocationExcep(OBJECTREF objException, EXCEPINFO *pei);
+
+ // This helper method converts the IDispatch::Invoke flags to BindingFlags.
+ static int ConvertInvokeFlagsToBindingFlags(int InvokeFlags);
+
+ // Helper function to determine if a VARIANT is a byref static safe array.
+ static BOOL IsVariantByrefStaticArray(VARIANT *pOle);
+
+ MethodTable* m_pMT;
+ PtrHashMap m_DispIDToMemberInfoMap;
+ DispatchMemberInfo* m_pFirstMemberInfo;
+ Crst m_lock;
+ int m_CurrentDispID;
+ BOOL m_bAllowMembersNotInComMTMemberMap;
+ BOOL m_bInvokeUsingInvokeMember;
+
+ static OBJECTHANDLE m_hndOleAutBinder;
+};
+
+
+
+class DispatchExInfo : public DispatchInfo
+{
+public:
+ // Constructor and destructor.
+ DispatchExInfo(SimpleComCallWrapper *pSimpleWrapper, MethodTable *pMT, BOOL bSupportsExpando);
+ virtual ~DispatchExInfo();
+
+ // Returns true if this DispatchExInfo supports expando operations.
+ BOOL SupportsExpando();
+
+ // Methods to lookup members. These methods synch with the managed view if they fail to
+ // find the method.
+ DispatchMemberInfo* SynchFindMember(DISPID DispID);
+ DispatchMemberInfo* SynchFindMember(SString& strName, BOOL bCaseSensitive);
+
+ // Helper method that invokes the member with the specified DISPID. These methods synch
+ // with the managed view if they fail to find the method.
+ HRESULT SynchInvokeMember(SimpleComCallWrapper *pSimpleWrap, DISPID id, LCID lcid, WORD wFlags, DISPPARAMS *pdp, VARIANT *pVarRes, EXCEPINFO *pei, IServiceProvider *pspCaller, unsigned int *puArgErr);
+
+ // Helper method to create an instance of a DispatchMemberInfo.
+ virtual DispatchMemberInfo* CreateDispatchMemberInfoInstance(DISPID DispID, SString& strMemberName, OBJECTREF MemberInfoObj);
+
+ // These methods return the first and next non deleted members.
+ DispatchMemberInfo* GetFirstMember();
+ DispatchMemberInfo* GetNextMember(DISPID CurrMemberDispID);
+
+ // Methods to add and delete members.
+ DispatchMemberInfo* AddMember(SString& strName, BOOL bCaseSensitive);
+ void DeleteMember(DISPID DispID);
+
+ // Methods to retrieve the cached MD's
+ MethodDesc* GetIReflectMD(BinderMethodID Method);
+ MethodDesc* GetIExpandoMD(BinderMethodID Method);
+
+private:
+ // Helper methods called from SynchWithManagedView() to retrieve the lists of members.
+ virtual PTRARRAYREF RetrievePropList();
+ virtual PTRARRAYREF RetrieveFieldList();
+ virtual PTRARRAYREF RetrieveMethList();
+
+ // Virtual method to retrieve the InvokeMember method desc.
+ virtual MethodDesc* GetInvokeMemberMD();
+
+ // Virtual method to retrieve the reflection object associated with the DispatchInfo.
+ virtual OBJECTREF GetReflectionObject();
+
+ // Virtual method to retrieve the member info map.
+ virtual ComMTMemberInfoMap* GetMemberInfoMap();
+
+ SimpleComCallWrapper* m_pSimpleWrapperOwner;
+ BOOL m_bSupportsExpando;
+};
+
+#endif // _DISPATCHINFO_H
diff --git a/src/vm/dispparammarshaler.cpp b/src/vm/dispparammarshaler.cpp
new file mode 100644
index 0000000000..77d8d8b1a0
--- /dev/null
+++ b/src/vm/dispparammarshaler.cpp
@@ -0,0 +1,646 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: DispParamMarshaler.cpp
+//
+
+//
+// Implementation of dispatch parameter marshalers.
+//
+
+
+#include "common.h"
+
+#include "dispparammarshaler.h"
+#include "olevariant.h"
+#include "dispatchinfo.h"
+#include "fieldmarshaler.h"
+#include "comdelegate.h"
+
+BOOL DispParamMarshaler::RequiresManagedCleanup()
+{
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+}
+
+void DispParamMarshaler::MarshalNativeToManaged(VARIANT *pSrcVar, OBJECTREF *pDestObj)
+{
+ WRAPPER_NO_CONTRACT;
+ OleVariant::MarshalObjectForOleVariant(pSrcVar, pDestObj);
+}
+
+void DispParamMarshaler::MarshalManagedToNative(OBJECTREF *pSrcObj, VARIANT *pDestVar)
+{
+ WRAPPER_NO_CONTRACT;
+ OleVariant::MarshalOleVariantForObject(pSrcObj, pDestVar);
+}
+
+void DispParamMarshaler::MarshalManagedToNativeRef(OBJECTREF *pSrcObj, VARIANT *pRefVar)
+{
+ WRAPPER_NO_CONTRACT;
+ OleVariant::MarshalOleRefVariantForObject(pSrcObj, pRefVar);
+}
+
+void DispParamMarshaler::CleanUpManaged(OBJECTREF *pObj)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+void DispParamCurrencyMarshaler::MarshalManagedToNative(OBJECTREF *pSrcObj, VARIANT *pDestVar)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pDestVar));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Convert the managed decimal to a VARIANT containing a decimal.
+ OleVariant::MarshalOleVariantForObject(pSrcObj, pDestVar);
+ _ASSERTE(pDestVar->vt == VT_DECIMAL);
+
+ // Coerce the decimal to a currency.
+ IfFailThrow(SafeVariantChangeType(pDestVar, pDestVar, 0, VT_CY));
+}
+
+void DispParamOleColorMarshaler::MarshalNativeToManaged(VARIANT *pSrcVar, OBJECTREF *pDestObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pSrcVar));
+ }
+ CONTRACTL_END;
+
+ BOOL bByref = FALSE;
+ VARTYPE vt = V_VT(pSrcVar);
+
+ // Handle byref VARIANTS
+ if (vt & VT_BYREF)
+ {
+ vt = vt & ~VT_BYREF;
+ bByref = TRUE;
+ }
+
+ // Validate the OLE variant type.
+ if (vt != VT_I4 && vt != VT_UI4)
+ COMPlusThrow(kArgumentException, IDS_EE_INVALID_OLE_VARIANT);
+
+ // Retrieve the OLECOLOR.
+ OLE_COLOR OleColor = bByref ? *V_UI4REF(pSrcVar) : V_UI4(pSrcVar);
+
+ // Convert the OLECOLOR to a System.Drawing.Color.
+ SYSTEMCOLOR MngColor;
+ ConvertOleColorToSystemColor(OleColor, &MngColor);
+
+ // Box the System.Drawing.Color value class and give back the boxed object.
+ TypeHandle hndColorType =
+ GetThread()->GetDomain()->GetMarshalingData()->GetOleColorMarshalingInfo()->GetColorType();
+
+ *pDestObj = hndColorType.GetMethodTable()->Box(&MngColor);
+}
+
+void DispParamOleColorMarshaler::MarshalManagedToNative(OBJECTREF *pSrcObj, VARIANT *pDestVar)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pDestVar));
+ }
+ CONTRACTL_END;
+
+ // Clear the destination VARIANT.
+ SafeVariantClear(pDestVar);
+
+ // Convert the System.Drawing.Color to an OLECOLOR.
+ V_VT(pDestVar) = VT_UI4;
+ V_UI4(pDestVar) = ConvertSystemColorToOleColor(pSrcObj);
+}
+
+void DispParamOleColorMarshaler::MarshalManagedToNativeRef(OBJECTREF *pSrcObj, VARIANT *pRefVar)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pRefVar));
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(V_VT(pRefVar) == (VT_I4 | VT_BYREF) || V_VT(pRefVar) == (VT_UI4 | VT_BYREF));
+
+ // Convert the System.Drawing.Color to an OLECOLOR.
+ *V_UI4REF(pRefVar) = ConvertSystemColorToOleColor(pSrcObj);
+}
+
+void DispParamErrorMarshaler::MarshalManagedToNative(OBJECTREF *pSrcObj, VARIANT *pDestVar)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pDestVar));
+ }
+ CONTRACTL_END;
+
+ // Convert the managed decimal to a VARIANT containing a VT_I4 or VT_UI4.
+ OleVariant::MarshalOleVariantForObject(pSrcObj, pDestVar);
+ _ASSERTE(V_VT(pDestVar) == VT_I4 || V_VT(pDestVar) == VT_UI4);
+
+ // Since VariantChangeType refuses to coerce an I4 or an UI4 to a VT_ERROR, just
+ // wack the variant type directly.
+ V_VT(pDestVar) = VT_ERROR;
+}
+
+void DispParamInterfaceMarshaler::MarshalNativeToManaged(VARIANT *pSrcVar, OBJECTREF *pDestObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pSrcVar));
+ PRECONDITION(IsProtectedByGCFrame(pDestObj));
+ }
+ CONTRACTL_END;
+
+ BOOL bByref = FALSE;
+ VARTYPE vt = V_VT(pSrcVar);
+
+ // Handle byref VARIANTS
+ if (vt & VT_BYREF)
+ {
+ vt = vt & ~VT_BYREF;
+ bByref = TRUE;
+ }
+
+ // Validate the OLE variant type.
+ if (vt != VT_UNKNOWN && vt != VT_DISPATCH)
+ COMPlusThrow(kArgumentException, IDS_EE_INVALID_OLE_VARIANT);
+
+ // Retrieve the IP.
+ IUnknown *pUnk = bByref ? *V_UNKNOWNREF(pSrcVar) : V_UNKNOWN(pSrcVar);
+
+ // Convert the IP to an OBJECTREF.
+ GetObjectRefFromComIP(pDestObj, pUnk, m_pClassMT, /* pItfMT = */ NULL, m_bClassIsHint ? ObjFromComIP::CLASS_IS_HINT : ObjFromComIP::NONE);
+}
+
+void DispParamInterfaceMarshaler::MarshalManagedToNative(OBJECTREF *pSrcObj, VARIANT *pDestVar)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pDestVar));
+ }
+ CONTRACTL_END;
+
+ SafeVariantClear(pDestVar);
+ if (m_pIntfMT != NULL)
+ {
+ V_UNKNOWN(pDestVar) = GetComIPFromObjectRef(pSrcObj, m_pIntfMT);
+ }
+ else
+ {
+ V_UNKNOWN(pDestVar) = GetComIPFromObjectRef(pSrcObj, m_bDispatch ? ComIpType_Dispatch : ComIpType_Unknown, NULL);
+ }
+
+ V_VT(pDestVar) = static_cast<VARTYPE>(m_bDispatch ? VT_DISPATCH : VT_UNKNOWN);
+}
+
+#ifdef FEATURE_CLASSIC_COMINTEROP
+void DispParamArrayMarshaler::MarshalNativeToManaged(VARIANT *pSrcVar, OBJECTREF *pDestObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pSrcVar));
+ }
+ CONTRACTL_END;
+
+ VARTYPE vt = m_ElementVT;
+ MethodTable *pElemMT = m_pElementMT;
+
+ // Validate the OLE variant type.
+ if ((V_VT(pSrcVar) & VT_ARRAY) == 0)
+ COMPlusThrow(kArgumentException, IDS_EE_INVALID_OLE_VARIANT);
+
+ // Retrieve the SAFEARRAY pointer.
+ SAFEARRAY *pSafeArray = V_VT(pSrcVar) & VT_BYREF ? *V_ARRAYREF(pSrcVar) : V_ARRAY(pSrcVar);
+
+ if (pSafeArray)
+ {
+ // Retrieve the variant type if it is not specified for the parameter.
+ if (vt == VT_EMPTY)
+ vt = V_VT(pSrcVar) & ~VT_ARRAY | VT_BYREF;
+
+ if (!pElemMT && vt == VT_RECORD)
+ pElemMT = OleVariant::GetElementTypeForRecordSafeArray(pSafeArray).GetMethodTable();
+
+ // Create an array from the SAFEARRAY.
+ *(BASEARRAYREF*)pDestObj = OleVariant::CreateArrayRefForSafeArray(pSafeArray, vt, pElemMT);
+
+ // Convert the contents of the SAFEARRAY.
+ OleVariant::MarshalArrayRefForSafeArray(pSafeArray, (BASEARRAYREF*)pDestObj, vt, pElemMT);
+ }
+ else
+ {
+ *pDestObj = NULL;
+ }
+}
+
+void DispParamArrayMarshaler::MarshalManagedToNative(OBJECTREF *pSrcObj, VARIANT *pDestVar)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pDestVar));
+ }
+ CONTRACTL_END;
+
+ SafeArrayPtrHolder pSafeArray = NULL;
+ VARTYPE vt = m_ElementVT;
+ MethodTable *pElemMT = m_pElementMT;
+
+ // Clear the destination VARIANT.
+ SafeVariantClear(pDestVar);
+
+ if (*pSrcObj != NULL)
+ {
+ // Retrieve the VARTYPE if it is not specified for the parameter.
+ if (vt == VT_EMPTY)
+ vt = OleVariant::GetElementVarTypeForArrayRef(*((BASEARRAYREF*)pSrcObj));
+
+ // Retrieve the element method table if it is not specified for the parameter.
+ if (!pElemMT)
+ {
+ TypeHandle tempHandle = OleVariant::GetArrayElementTypeWrapperAware((BASEARRAYREF*)pSrcObj);
+ pElemMT = tempHandle.GetMethodTable();
+ }
+
+ // Allocate the safe array based on the source object and the destination VT.
+ pSafeArray = OleVariant::CreateSafeArrayForArrayRef((BASEARRAYREF*)pSrcObj, vt, pElemMT);
+ _ASSERTE(pSafeArray);
+
+ // Marshal the contents of the SAFEARRAY.
+ OleVariant::MarshalSafeArrayForArrayRef((BASEARRAYREF*)pSrcObj, pSafeArray, vt, pElemMT);
+ }
+
+ // Store the resulting SAFEARRAY in the destination VARIANT.
+ V_ARRAY(pDestVar) = pSafeArray;
+ V_VT(pDestVar) = VT_ARRAY | vt;
+
+ // Don't destroy the safearray.
+ pSafeArray.SuppressRelease();
+}
+
+void DispParamArrayMarshaler::MarshalManagedToNativeRef(OBJECTREF *pSrcObj, VARIANT *pRefVar)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pRefVar));
+ }
+ CONTRACTL_END;
+
+ VARIANT vtmp;
+ VARTYPE RefVt = V_VT(pRefVar) & ~VT_BYREF;
+
+ // Clear the contents of the original variant.
+ OleVariant::ExtractContentsFromByrefVariant(pRefVar, &vtmp);
+ SafeVariantClear(&vtmp);
+
+ // Marshal the array to a temp VARIANT.
+ memset(&vtmp, 0, sizeof(VARIANT));
+ MarshalManagedToNative(pSrcObj, &vtmp);
+
+ // Verify that the type of the temp VARIANT and the destination byref VARIANT
+ // are the same.
+ if (V_VT(&vtmp) != RefVt)
+ {
+ SafeVariantClear(&vtmp);
+ COMPlusThrow(kInvalidCastException, IDS_EE_CANNOT_COERCE_BYREF_VARIANT);
+ }
+
+ // Copy the converted variant back into the byref variant.
+ OleVariant::InsertContentsIntoByrefVariant(&vtmp, pRefVar);
+}
+#endif // FEATURE_CLASSIC_COMINTEROP
+
+void DispParamRecordMarshaler::MarshalNativeToManaged(VARIANT *pSrcVar, OBJECTREF *pDestObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pSrcVar));
+ }
+ CONTRACTL_END;
+
+ GUID argGuid;
+ GUID paramGuid;
+ HRESULT hr = S_OK;
+ VARTYPE vt = V_VT(pSrcVar);
+
+ // Handle byref VARIANTS
+ if (vt & VT_BYREF)
+ vt = vt & ~VT_BYREF;
+
+ // Validate the OLE variant type.
+ if (vt != VT_RECORD)
+ COMPlusThrow(kArgumentException, IDS_EE_INVALID_OLE_VARIANT);
+
+ // Make sure an IRecordInfo is specified.
+ IRecordInfo *pRecInfo = pSrcVar->pRecInfo;
+ if (!pRecInfo)
+ COMPlusThrow(kArgumentException, IDS_EE_INVALID_OLE_VARIANT);
+
+ // Make sure the GUID of the IRecordInfo matches the guid of the
+ // parameter type.
+ {
+ GCX_PREEMP();
+ IfFailThrow(pRecInfo->GetGuid(&argGuid));
+ }
+ if (argGuid != GUID_NULL)
+ {
+ m_pRecordMT->GetGuid(&paramGuid, TRUE);
+ if (paramGuid != argGuid)
+ COMPlusThrow(kArgumentException, IDS_EE_INVALID_OLE_VARIANT);
+ }
+
+ OBJECTREF BoxedValueClass = NULL;
+ GCPROTECT_BEGIN(BoxedValueClass)
+ {
+ LPVOID pvRecord = pSrcVar->pvRecord;
+ if (pvRecord)
+ {
+ // Allocate an instance of the boxed value class and copy the contents
+ // of the record into it.
+ BoxedValueClass = m_pRecordMT->Allocate();
+ FmtClassUpdateCLR(&BoxedValueClass, (BYTE*)pvRecord);
+ }
+
+ *pDestObj = BoxedValueClass;
+ }
+ GCPROTECT_END();
+}
+
+void DispParamRecordMarshaler::MarshalManagedToNative(OBJECTREF *pSrcObj, VARIANT *pDestVar)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pDestVar));
+ }
+ CONTRACTL_END;
+
+ // Clear the destination VARIANT.
+ SafeVariantClear(pDestVar);
+
+ // Convert the value class to a VT_RECORD.
+ OleVariant::ConvertValueClassToVariant(pSrcObj, pDestVar);
+
+ // Set the VT in the VARIANT.
+ V_VT(pDestVar) = VT_RECORD;
+}
+
+void DispParamDelegateMarshaler::MarshalNativeToManaged(VARIANT *pSrcVar, OBJECTREF *pDestObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pSrcVar));
+ }
+ CONTRACTL_END;
+
+ void *pDelegate = NULL;
+
+ switch(V_VT(pSrcVar))
+ {
+#ifdef _WIN64
+ case VT_I8:
+ pDelegate = reinterpret_cast<void*>(static_cast<INT_PTR>(V_I8(pSrcVar)));
+ break;
+
+ case VT_UI8:
+ pDelegate = reinterpret_cast<void*>(static_cast<UINT_PTR>(V_UI8(pSrcVar)));
+ break;
+#else
+ case VT_I4:
+ pDelegate = reinterpret_cast<void*>(static_cast<INT_PTR>(V_I4(pSrcVar)));
+ break;
+
+ case VT_UI4:
+ pDelegate = reinterpret_cast<void*>(static_cast<UINT_PTR>(V_UI4(pSrcVar)));
+ break;
+#endif
+ default :
+ COMPlusThrow(kArgumentException, IDS_EE_INVALID_OLE_VARIANT);
+
+ }
+
+ if (pDelegate == NULL)
+ SetObjectReference(pDestObj, NULL, GetAppDomain());
+ else
+ SetObjectReference(pDestObj, COMDelegate::ConvertToDelegate(pDelegate, m_pDelegateMT), GetAppDomain());
+}
+
+void DispParamDelegateMarshaler::MarshalManagedToNative(OBJECTREF *pSrcObj, VARIANT *pDestVar)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pDestVar));
+ }
+ CONTRACTL_END;
+
+ // Clear the destination VARIANT.
+ SafeVariantClear(pDestVar);
+
+ // Convert to VARIANT
+#ifdef _WIN64
+ V_VT(pDestVar) = VT_I8;
+#else
+ V_VT(pDestVar) = VT_I4;
+#endif
+
+ // ConvertToCallback automatically takes care of the pSrcObj == NULL case
+ void *pDelegate = (void*) COMDelegate::ConvertToCallback(*pSrcObj);
+
+#ifdef _WIN64
+ V_I8(pDestVar) = static_cast<INT64>(reinterpret_cast<INT_PTR>(pDelegate));
+#else
+ V_I4(pDestVar) = static_cast<INT32>(reinterpret_cast<INT_PTR>(pDelegate));
+#endif
+}
+
+BOOL DispParamCustomMarshaler::RequiresManagedCleanup()
+{
+ LIMITED_METHOD_CONTRACT;
+ return TRUE;
+}
+
+void DispParamCustomMarshaler::MarshalNativeToManaged(VARIANT *pSrcVar, OBJECTREF *pDestObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pSrcVar));
+ }
+ CONTRACTL_END;
+
+ BOOL bByref = FALSE;
+ VARTYPE vt = V_VT(pSrcVar);
+
+ // Handle byref VARIANTS
+ if (vt & VT_BYREF)
+ {
+ vt = vt & ~VT_BYREF;
+ bByref = TRUE;
+ }
+
+ // Make sure the source VARIANT is of a valid type.
+ if (vt != VT_I4 && vt != VT_UI4 && vt != VT_UNKNOWN && vt != VT_DISPATCH)
+ COMPlusThrow(kInvalidCastException, IDS_EE_INVALID_VT_FOR_CUSTOM_MARHALER);
+
+ // Retrieve the IUnknown pointer.
+ IUnknown *pUnk = bByref ? *V_UNKNOWNREF(pSrcVar) : V_UNKNOWN(pSrcVar);
+
+ // Marshal the contents of the VARIANT using the custom marshaler.
+ *pDestObj = m_pCMHelper->InvokeMarshalNativeToManagedMeth(pUnk);
+}
+
+void DispParamCustomMarshaler::MarshalManagedToNative(OBJECTREF *pSrcObj, VARIANT *pDestVar)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pDestVar));
+ }
+ CONTRACTL_END;
+
+ SafeComHolder<IUnknown> pUnk = NULL;
+ SafeComHolder<IDispatch> pDisp = NULL;
+
+ // Convert the object using the custom marshaler.
+ SafeVariantClear(pDestVar);
+
+ // Invoke the MarshalManagedToNative method.
+ pUnk = (IUnknown*)m_pCMHelper->InvokeMarshalManagedToNativeMeth(*pSrcObj);
+ if (!pUnk)
+ {
+ // Put a null IDispath pointer in the VARIANT.
+ V_VT(pDestVar) = VT_DISPATCH;
+ V_DISPATCH(pDestVar) = NULL;
+ }
+ else
+ {
+ // QI the object for IDispatch.
+ HRESULT hr = SafeQueryInterface(pUnk, IID_IDispatch, (IUnknown **)&pDisp);
+ LogInteropQI(pUnk, IID_IDispatch, hr, "DispParamCustomMarshaler::MarshalManagedToNative");
+ if (SUCCEEDED(hr))
+ {
+ // Release the IUnknown pointer since we will put the IDispatch pointer in
+ // the VARIANT.
+ ULONG cbRef = SafeRelease(pUnk);
+ pUnk.SuppressRelease();
+ LogInteropRelease(pUnk, cbRef, "Release IUnknown");
+
+ // Put the IDispatch pointer into the VARIANT.
+ V_VT(pDestVar) = VT_DISPATCH;
+ V_DISPATCH(pDestVar) = pDisp;
+ pDisp.SuppressRelease();
+ }
+ else
+ {
+ // Put the IUnknown pointer into the VARIANT.
+ V_VT(pDestVar) = VT_UNKNOWN;
+ V_UNKNOWN(pDestVar) = pUnk;
+ pUnk.SuppressRelease();
+ }
+ }
+}
+
+void DispParamCustomMarshaler::MarshalManagedToNativeRef(OBJECTREF *pSrcObj, VARIANT *pRefVar)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pRefVar));
+ }
+ CONTRACTL_END;
+
+ VARTYPE RefVt = V_VT(pRefVar) & ~VT_BYREF;
+ VARIANT vtmp;
+
+ // Clear the contents of the original variant.
+ OleVariant::ExtractContentsFromByrefVariant(pRefVar, &vtmp);
+ SafeVariantClear(&vtmp);
+
+ // Convert the object using the custom marshaler.
+ V_UNKNOWN(&vtmp) = (IUnknown*)m_pCMHelper->InvokeMarshalManagedToNativeMeth(*pSrcObj);
+ V_VT(&vtmp) = m_vt;
+
+ // Call VariantChangeType if required.
+ if (V_VT(&vtmp) != RefVt)
+ {
+ HRESULT hr = SafeVariantChangeType(&vtmp, &vtmp, 0, RefVt);
+ if (FAILED(hr))
+ {
+ SafeVariantClear(&vtmp);
+ if (hr == DISP_E_TYPEMISMATCH)
+ COMPlusThrow(kInvalidCastException, IDS_EE_CANNOT_COERCE_BYREF_VARIANT);
+ else
+ COMPlusThrowHR(hr);
+ }
+ }
+
+ // Copy the converted variant back into the byref variant.
+ OleVariant::InsertContentsIntoByrefVariant(&vtmp, pRefVar);
+}
+
+void DispParamCustomMarshaler::CleanUpManaged(OBJECTREF *pObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ m_pCMHelper->InvokeCleanUpManagedMeth(*pObj);
+}
diff --git a/src/vm/dispparammarshaler.h b/src/vm/dispparammarshaler.h
new file mode 100644
index 0000000000..908734070e
--- /dev/null
+++ b/src/vm/dispparammarshaler.h
@@ -0,0 +1,228 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: DispParamMarshaler.h
+//
+
+//
+// Definition of dispatch parameter marshalers.
+//
+
+
+#ifndef _DISPPARAMMARSHALER_H
+#define _DISPPARAMMARSHALER_H
+
+#ifndef FEATURE_COMINTEROP
+#error FEATURE_COMINTEROP is required for this file
+#endif // FEATURE_COMINTEROP
+
+#ifndef CROSSGEN_COMPILE
+
+#include "vars.hpp"
+#include "mlinfo.h"
+
+class DispParamMarshaler
+{
+public:
+ DispParamMarshaler()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual ~DispParamMarshaler()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual BOOL RequiresManagedCleanup();
+ virtual void MarshalNativeToManaged(VARIANT *pSrcVar, OBJECTREF *pDestObj);
+ virtual void MarshalManagedToNative(OBJECTREF *pSrcObj, VARIANT *pDestVar);
+ virtual void MarshalManagedToNativeRef(OBJECTREF *pSrcObj, VARIANT *pRefVar);
+ virtual void CleanUpManaged(OBJECTREF *pObj);
+};
+
+
+
+class DispParamCurrencyMarshaler : public DispParamMarshaler
+{
+public:
+ DispParamCurrencyMarshaler()
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ virtual ~DispParamCurrencyMarshaler()
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ virtual void MarshalManagedToNative(OBJECTREF *pSrcObj, VARIANT *pDestVar);
+};
+
+
+class DispParamOleColorMarshaler : public DispParamMarshaler
+{
+public:
+ DispParamOleColorMarshaler()
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ virtual ~DispParamOleColorMarshaler()
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ virtual void MarshalNativeToManaged(VARIANT *pSrcVar, OBJECTREF *pDestObj);
+ virtual void MarshalManagedToNative(OBJECTREF *pSrcObj, VARIANT *pDestVar);
+ virtual void MarshalManagedToNativeRef(OBJECTREF *pSrcObj, VARIANT *pRefVar);
+};
+
+
+class DispParamErrorMarshaler : public DispParamMarshaler
+{
+public:
+ DispParamErrorMarshaler()
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ virtual ~DispParamErrorMarshaler()
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ virtual void MarshalManagedToNative(OBJECTREF *pSrcObj, VARIANT *pDestVar);
+};
+
+
+
+class DispParamInterfaceMarshaler : public DispParamMarshaler
+{
+public:
+ DispParamInterfaceMarshaler(BOOL bDispatch, MethodTable* pIntfMT, MethodTable *pClassMT, BOOL bClassIsHint) :
+ m_bDispatch(bDispatch),
+ m_pIntfMT(pIntfMT),
+ m_pClassMT(pClassMT),
+ m_bClassIsHint(bClassIsHint)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ virtual ~DispParamInterfaceMarshaler()
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ virtual void MarshalNativeToManaged(VARIANT *pSrcVar, OBJECTREF *pDestObj);
+ virtual void MarshalManagedToNative(OBJECTREF *pSrcObj, VARIANT *pDestVar);
+
+private:
+ // if return type is an interface, then the method table of the interface is cached here.
+ // we need to cache this and use it when we call GetCOMIPFromObjectRef
+ MethodTable* m_pIntfMT;
+ MethodTable* m_pClassMT;
+ BOOL m_bDispatch;
+ BOOL m_bClassIsHint;
+};
+
+#ifdef FEATURE_CLASSIC_COMINTEROP
+class DispParamArrayMarshaler : public DispParamMarshaler
+{
+public:
+ DispParamArrayMarshaler(VARTYPE ElementVT, MethodTable *pElementMT) :
+ m_ElementVT(ElementVT),
+ m_pElementMT(pElementMT)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ virtual ~DispParamArrayMarshaler()
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ virtual void MarshalNativeToManaged(VARIANT *pSrcVar, OBJECTREF *pDestObj);
+ virtual void MarshalManagedToNative(OBJECTREF *pSrcObj, VARIANT *pDestVar);
+ virtual void MarshalManagedToNativeRef(OBJECTREF *pSrcObj, VARIANT *pDestVar);
+
+private:
+ VARTYPE m_ElementVT;
+ MethodTable* m_pElementMT;
+};
+#endif // FEATURE_CLASSIC_COMINTEROP
+
+
+class DispParamRecordMarshaler : public DispParamMarshaler
+{
+public:
+ DispParamRecordMarshaler(MethodTable *pRecordMT) :
+ m_pRecordMT(pRecordMT)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ virtual ~DispParamRecordMarshaler()
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ virtual void MarshalNativeToManaged(VARIANT *pSrcVar, OBJECTREF *pDestObj);
+ virtual void MarshalManagedToNative(OBJECTREF *pSrcObj, VARIANT *pDestVar);
+
+private:
+ MethodTable* m_pRecordMT;
+};
+
+class DispParamDelegateMarshaler : public DispParamMarshaler
+{
+public:
+ DispParamDelegateMarshaler(MethodTable *pDelegateMT) :
+ m_pDelegateMT(pDelegateMT)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ virtual ~DispParamDelegateMarshaler()
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ virtual void MarshalNativeToManaged(VARIANT *pSrcVar, OBJECTREF *pDestObj);
+ virtual void MarshalManagedToNative(OBJECTREF *pSrcObj, VARIANT *pDestVar);
+
+private:
+ MethodTable* m_pDelegateMT;
+};
+
+
+class DispParamCustomMarshaler : public DispParamMarshaler
+{
+public:
+ DispParamCustomMarshaler(CustomMarshalerHelper *pCMHelper, VARTYPE vt) :
+ m_pCMHelper(pCMHelper),
+ m_vt(vt)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ virtual ~DispParamCustomMarshaler()
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ virtual BOOL RequiresManagedCleanup();
+ virtual void MarshalNativeToManaged(VARIANT *pSrcVar, OBJECTREF *pDestObj);
+ virtual void MarshalManagedToNative(OBJECTREF *pSrcObj, VARIANT *pDestVar);
+ virtual void MarshalManagedToNativeRef(OBJECTREF *pSrcObj, VARIANT *pRefVar);
+ virtual void CleanUpManaged(OBJECTREF *pObj);
+
+private:
+ CustomMarshalerHelper* m_pCMHelper;
+ VARTYPE m_vt;
+};
+
+#endif //#ifndef CROSSGEN_COMPILE
+#endif // _DISPPARAMMARSHALER_H
diff --git a/src/vm/dllimport.cpp b/src/vm/dllimport.cpp
new file mode 100644
index 0000000000..6a116c9533
--- /dev/null
+++ b/src/vm/dllimport.cpp
@@ -0,0 +1,7559 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: DllImport.cpp
+//
+
+//
+// P/Invoke support.
+//
+
+
+#include "common.h"
+
+#include "vars.hpp"
+#include "stublink.h"
+#include "threads.h"
+#include "excep.h"
+#include "dllimport.h"
+#include "method.hpp"
+#include "siginfo.hpp"
+#include "security.h"
+#include "comdelegate.h"
+#include "ceeload.h"
+#include "mlinfo.h"
+#include "eeconfig.h"
+#include "comutilnative.h"
+#include "corhost.h"
+#include "asmconstants.h"
+#include "mdaassistants.h"
+#include "customattribute.h"
+#include "ilstubcache.h"
+#include "typeparse.h"
+#include "sigbuilder.h"
+#include "sigformat.h"
+#include "strongnameholders.h"
+#include "ecall.h"
+
+#include <formattype.h>
+#include "../md/compiler/custattr.h"
+
+#ifdef FEATURE_COMINTEROP
+#include "runtimecallablewrapper.h"
+#include "clrtocomcall.h"
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_PREJIT
+#include "compile.h"
+#endif // FEATURE_PREJIT
+
+#include "eventtrace.h"
+
+#ifndef FEATURE_CORECLR
+#define NEEDDATA
+#include "fxretarget.h"
+#endif
+
+// remove when we get an updated SDK
+#define LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR 0x00000100
+#define LOAD_LIBRARY_SEARCH_DEFAULT_DIRS 0x00001000
+
+void AppendEHClause(int nClauses, COR_ILMETHOD_SECT_EH * pEHSect, ILStubEHClause * pClause, int * pCurIdx)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (pClause->kind == ILStubEHClause::kNone)
+ return;
+
+ int idx = *pCurIdx;
+ *pCurIdx = idx + 1;
+
+ CorExceptionFlag flags;
+ switch (pClause->kind)
+ {
+ case ILStubEHClause::kFinally: flags = COR_ILEXCEPTION_CLAUSE_FINALLY; break;
+ case ILStubEHClause::kTypedCatch: flags = COR_ILEXCEPTION_CLAUSE_NONE; break;
+ default:
+ UNREACHABLE_MSG("unexpected ILStubEHClause kind");
+ }
+ _ASSERTE(idx < nClauses);
+ pEHSect->Fat.Clauses[idx].Flags = flags;
+ pEHSect->Fat.Clauses[idx].TryOffset = pClause->dwTryBeginOffset;
+ pEHSect->Fat.Clauses[idx].TryLength = pClause->cbTryLength;
+ pEHSect->Fat.Clauses[idx].HandlerOffset = pClause->dwHandlerBeginOffset;
+ pEHSect->Fat.Clauses[idx].HandlerLength = pClause->cbHandlerLength;
+ pEHSect->Fat.Clauses[idx].ClassToken = pClause->dwTypeToken;
+}
+
+VOID PopulateEHSect(COR_ILMETHOD_SECT_EH * pEHSect, int nClauses, ILStubEHClause * pOne, ILStubEHClause * pTwo)
+{
+ LIMITED_METHOD_CONTRACT;
+ pEHSect->Fat.Kind = (CorILMethod_Sect_EHTable | CorILMethod_Sect_FatFormat);
+ pEHSect->Fat.DataSize = COR_ILMETHOD_SECT_EH_FAT::Size(nClauses);
+
+ int curIdx = 0;
+ AppendEHClause(nClauses, pEHSect, pOne, &curIdx);
+ AppendEHClause(nClauses, pEHSect, pTwo, &curIdx);
+}
+
+StubSigDesc::StubSigDesc(MethodDesc *pMD, PInvokeStaticSigInfo* pSigInfo /*= NULL*/)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ m_pMD = pMD;
+ if (pSigInfo != NULL)
+ {
+ m_sig = pSigInfo->GetSignature();
+ m_pModule = pSigInfo->GetModule();
+ }
+ else
+ {
+ _ASSERTE(pMD != NULL);
+ m_sig = pMD->GetSignature();
+ m_pModule = pMD->GetModule(); // Used for token resolution.
+ }
+
+ if (pMD != NULL)
+ {
+ m_tkMethodDef = pMD->GetMemberDef();
+ SigTypeContext::InitTypeContext(pMD, &m_typeContext);
+ m_pLoaderModule = pMD->GetLoaderModule(); // Used for ILStubCache selection and MethodTable creation.
+ }
+ else
+ {
+ m_tkMethodDef = mdMethodDefNil;
+ m_pLoaderModule = m_pModule;
+ }
+
+ INDEBUG(InitDebugNames());
+}
+
+StubSigDesc::StubSigDesc(MethodDesc *pMD, Signature sig, Module *pModule)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ PRECONDITION(!sig.IsEmpty());
+ PRECONDITION(pModule != NULL);
+ }
+ CONTRACTL_END
+
+ m_pMD = pMD;
+ m_sig = sig;
+ m_pModule = pModule;
+
+ if (pMD != NULL)
+ {
+ m_tkMethodDef = pMD->GetMemberDef();
+ SigTypeContext::InitTypeContext(pMD, &m_typeContext);
+ m_pLoaderModule = pMD->GetLoaderModule(); // Used for ILStubCache selection and MethodTable creation.
+ }
+ else
+ {
+ m_tkMethodDef = mdMethodDefNil;
+ m_pLoaderModule = m_pModule;
+ }
+
+ INDEBUG(InitDebugNames());
+}
+
+#ifndef DACCESS_COMPILE
+
+class StubState
+{
+public:
+ virtual void SetLastError(BOOL fSetLastError) = 0;
+ virtual void BeginEmit(DWORD dwStubFlags) = 0;
+ virtual void MarshalReturn(MarshalInfo* pInfo, int argOffset) = 0;
+ virtual void MarshalArgument(MarshalInfo* pInfo, int argOffset, UINT nativeStackOffset) = 0;
+ virtual void MarshalLCID(int argIdx) = 0;
+
+#ifdef FEATURE_COMINTEROP
+ virtual void MarshalHiddenLengthArgument(MarshalInfo *pInfo, BOOL isForReturnArray) = 0;
+ virtual void MarshalFactoryReturn() = 0;
+#endif // FEATURE_COMINTEROP
+
+ virtual void EmitInvokeTarget(MethodDesc *pStubMD) = 0;
+
+ virtual void FinishEmit(MethodDesc* pMD) = 0;
+};
+
+class ILStubState : public StubState
+{
+protected:
+
+ ILStubState(
+ Module* pStubModule,
+ const Signature &signature,
+ SigTypeContext* pTypeContext,
+ BOOL fTargetHasThis,
+ BOOL fStubHasThis,
+ DWORD dwStubFlags,
+ int iLCIDParamIdx,
+ MethodDesc* pTargetMD)
+ : m_slIL(dwStubFlags, pStubModule, signature, pTypeContext, pTargetMD, iLCIDParamIdx, fTargetHasThis, fStubHasThis)
+ {
+ STANDARD_VM_CONTRACT;
+
+ m_fSetLastError = 0;
+ }
+
+public:
+ void SetLastError(BOOL fSetLastError)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_fSetLastError = fSetLastError;
+ }
+
+ // We use three stub linkers to generate IL stubs. The pre linker is the main one. It does all the marshaling and
+ // then calls the target method. The post return linker is only used to unmarshal the return value after we return
+ // from the target method. The post linker handles all the unmarshaling for by ref arguments and clean-up. It
+ // also checks if we should throw an exception etc.
+ //
+ // Currently, we have two "emittable" ILCodeLabel's. The first one is at the beginning of the pre linker. This
+ // label is used to emit code to declare and initialize clean-up flags. Each argument which requires clean-up
+ // emits one flag. This flag is set only after the marshaling is done, and it is checked before we do any clean-up
+ // in the finally.
+ //
+ // The second "emittable" ILCodeLabel is at the beginning of the post linker. It is used to emit code which is
+ // not safe to run in the case of an exception. The rest of the post linker is wrapped in a finally, and it contains
+ // with the necessary clean-up which should be executed in both normal and exception cases.
+ void BeginEmit(DWORD dwStubFlags)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_slIL.Begin(dwStubFlags);
+ m_dwStubFlags = dwStubFlags;
+ }
+
+ void MarshalReturn(MarshalInfo* pInfo, int argOffset)
+ {
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(CheckPointer(pInfo));
+ }
+ CONTRACTL_END;
+
+ pInfo->GenerateReturnIL(&m_slIL, argOffset,
+ SF_IsForwardStub(m_dwStubFlags),
+ SF_IsFieldGetterStub(m_dwStubFlags),
+ SF_IsHRESULTSwapping(m_dwStubFlags));
+ }
+
+ void MarshalArgument(MarshalInfo* pInfo, int argOffset, UINT nativeStackOffset)
+ {
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pInfo));
+ }
+ CONTRACTL_END;
+
+ pInfo->GenerateArgumentIL(&m_slIL, argOffset, nativeStackOffset, SF_IsForwardStub(m_dwStubFlags));
+ }
+
+#ifdef FEATURE_COMINTEROP
+ // Marshal the hidden length parameter for the managed parameter in pInfo
+ virtual void MarshalHiddenLengthArgument(MarshalInfo *pInfo, BOOL isForReturnArray)
+ {
+ STANDARD_VM_CONTRACT;
+
+ pInfo->MarshalHiddenLengthArgument(&m_slIL, SF_IsForwardStub(m_dwStubFlags), isForReturnArray);
+
+ if (SF_IsReverseStub(m_dwStubFlags))
+ {
+ // Hidden length arguments appear explicitly in the native signature
+ // however, they are not in the managed signature.
+ m_slIL.AdjustTargetStackDeltaForExtraParam();
+ }
+ }
+
+ void MarshalFactoryReturn()
+ {
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(SF_IsCOMStub(m_dwStubFlags));
+ PRECONDITION(SF_IsWinRTCtorStub(m_dwStubFlags));
+ }
+ CONTRACTL_END;
+
+ ILCodeStream *pcsSetup = m_slIL.GetSetupCodeStream();
+ ILCodeStream *pcsDispatch = m_slIL.GetDispatchCodeStream();
+ ILCodeStream *pcsUnmarshal = m_slIL.GetReturnUnmarshalCodeStream();
+ ILCodeStream *pcsCleanup = m_slIL.GetCleanupCodeStream();
+
+ /*
+ * SETUP
+ */
+
+ // create a local to hold the returned pUnk and initialize to 0 in case the factory fails
+ // and we try to release it during cleanup
+ LocalDesc locDescFactoryRetVal(ELEMENT_TYPE_I);
+ DWORD dwFactoryRetValLocalNum = pcsSetup->NewLocal(locDescFactoryRetVal);
+ pcsSetup->EmitLoadNullPtr();
+ pcsSetup->EmitSTLOC(dwFactoryRetValLocalNum);
+
+ DWORD dwInnerIInspectableLocalNum = -1;
+ DWORD dwOuterIInspectableLocalNum = -1;
+ if (SF_IsWinRTCompositionStub(m_dwStubFlags))
+ {
+ // Create locals to store the outer and inner IInspectable values and initialize to null
+ // Note that we do this in the setup stream so that we're guaranteed to have a null-initialized
+ // value in the cleanup stream
+ LocalDesc locDescOuterIInspectable(ELEMENT_TYPE_I);
+ dwOuterIInspectableLocalNum = pcsSetup->NewLocal(locDescOuterIInspectable);
+ pcsSetup->EmitLoadNullPtr();
+ pcsSetup->EmitSTLOC(dwOuterIInspectableLocalNum);
+ LocalDesc locDescInnerIInspectable(ELEMENT_TYPE_I);
+ dwInnerIInspectableLocalNum = pcsSetup->NewLocal(locDescInnerIInspectable);
+ pcsSetup->EmitLoadNullPtr();
+ pcsSetup->EmitSTLOC(dwInnerIInspectableLocalNum);
+ }
+
+ /*
+ * DISPATCH
+ */
+
+ // For composition factories, add the two extra params
+ if (SF_IsWinRTCompositionStub(m_dwStubFlags))
+ {
+ // Get outer IInspectable. The helper will return NULL if this is the "top-level" constructor,
+ // and the appropriate outer pointer otherwise.
+ pcsDispatch->EmitLoadThis();
+ m_slIL.EmitLoadStubContext(pcsDispatch, m_dwStubFlags);
+ pcsDispatch->EmitCALL(METHOD__STUBHELPERS__GET_OUTER_INSPECTABLE, 2, 1);
+ pcsDispatch->EmitSTLOC(dwOuterIInspectableLocalNum);
+
+ // load the outer IInspectable (3rd last argument)
+ pcsDispatch->SetStubTargetArgType(ELEMENT_TYPE_I, false);
+ pcsDispatch->EmitLDLOC(dwOuterIInspectableLocalNum);
+
+ // pass pointer to where inner non-delegating IInspectable should be stored (2nd last argument)
+ LocalDesc locDescInnerPtr(ELEMENT_TYPE_I);
+ locDescInnerPtr.MakeByRef();
+ pcsDispatch->SetStubTargetArgType(&locDescInnerPtr, false);
+ pcsDispatch->EmitLDLOCA(dwInnerIInspectableLocalNum);
+ }
+
+ // pass pointer to the local to the factory method (last argument)
+ locDescFactoryRetVal.MakeByRef();
+ pcsDispatch->SetStubTargetArgType(&locDescFactoryRetVal, false);
+ pcsDispatch->EmitLDLOCA(dwFactoryRetValLocalNum);
+
+ /*
+ * UNMARSHAL
+ */
+
+ // Mark that the factory method has succesfully returned and so cleanup will be necessary after
+ // this point.
+ m_slIL.EmitSetArgMarshalIndex(pcsUnmarshal, NDirectStubLinker::CLEANUP_INDEX_RETVAL_UNMARSHAL);
+
+ // associate the 'this' RCW with one of the returned interface pointers
+ pcsUnmarshal->EmitLoadThis();
+
+ // now we need to find the right interface pointer to load
+ if (dwInnerIInspectableLocalNum != -1)
+ {
+ // We may have a composition scenario
+ ILCodeLabel* pNonCompositionLabel = pcsUnmarshal->NewCodeLabel();
+ ILCodeLabel* pLoadedLabel = pcsUnmarshal->NewCodeLabel();
+
+ // Did we pass an outer IInspectable?
+ pcsUnmarshal->EmitLDLOC(dwOuterIInspectableLocalNum);
+ pcsUnmarshal->EmitBRFALSE(pNonCompositionLabel);
+
+ // yes, this is a composition scenario
+ {
+ // ignore the delegating interface pointer (will be released in cleanup below) - we can
+ // re-create it by QI'ing the non-delegating one.
+ // Note that using this could be useful in the future (avoids an extra QueryInterface call)
+ // Just load the non-delegating interface pointer
+ pcsUnmarshal->EmitLDLOCA(dwInnerIInspectableLocalNum);
+ pcsUnmarshal->EmitBR(pLoadedLabel);
+ }
+ // else, no this is a non-composition scenario
+ {
+ pcsUnmarshal->EmitLabel(pNonCompositionLabel);
+
+ // ignore the non-delegating interface pointer (which should be null, but will regardless get
+ // cleaned up below in the event the factory doesn't follow the pattern properly).
+ // Just load the regular delegating interface pointer
+ pcsUnmarshal->EmitLDLOCA(dwFactoryRetValLocalNum);
+ }
+
+ pcsUnmarshal->EmitLabel(pLoadedLabel);
+ }
+ else
+ {
+ // Definitely can't be a composition scenario - use the only pointer we have
+ pcsUnmarshal->EmitLDLOCA(dwFactoryRetValLocalNum);
+ }
+
+ pcsUnmarshal->EmitCALL(METHOD__MARSHAL__INITIALIZE_WRAPPER_FOR_WINRT, 2, 0);
+
+ /*
+ * CLEANUP
+ */
+
+ // release the returned interface pointer in the finally block
+ m_slIL.SetCleanupNeeded();
+
+ ILCodeLabel *pSkipCleanupLabel = pcsCleanup->NewCodeLabel();
+
+ m_slIL.EmitCheckForArgCleanup(pcsCleanup,
+ NDirectStubLinker::CLEANUP_INDEX_RETVAL_UNMARSHAL,
+ NDirectStubLinker::BranchIfNotMarshaled,
+ pSkipCleanupLabel);
+
+ EmitInterfaceClearNative(pcsCleanup, dwFactoryRetValLocalNum);
+
+ // Note that it's a no-op to pass NULL to Clear_Native, so we call it even though we don't
+ // know if we assigned to the inner/outer IInspectable
+ if (dwInnerIInspectableLocalNum != -1)
+ {
+ EmitInterfaceClearNative(pcsCleanup, dwInnerIInspectableLocalNum);
+ }
+ if (dwOuterIInspectableLocalNum != -1)
+ {
+ EmitInterfaceClearNative(pcsCleanup, dwOuterIInspectableLocalNum);
+ }
+
+ pcsCleanup->EmitLabel(pSkipCleanupLabel);
+ }
+
+ static void EmitInterfaceClearNative(ILCodeStream* pcsEmit, DWORD dwLocalNum)
+ {
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel *pSkipClearNativeLabel = pcsEmit->NewCodeLabel();
+ pcsEmit->EmitLDLOC(dwLocalNum);
+ pcsEmit->EmitBRFALSE(pSkipClearNativeLabel);
+ pcsEmit->EmitLDLOC(dwLocalNum);
+ pcsEmit->EmitCALL(METHOD__INTERFACEMARSHALER__CLEAR_NATIVE, 1, 0);
+ pcsEmit->EmitLabel(pSkipClearNativeLabel);
+ }
+
+#endif // FEATURE_COMINTEROP
+
+ void MarshalLCID(int argIdx)
+ {
+ STANDARD_VM_CONTRACT;
+
+ ILCodeStream* pcs = m_slIL.GetDispatchCodeStream();
+
+#ifdef FEATURE_USE_LCID
+ if (SF_IsReverseStub(m_dwStubFlags))
+ {
+ if ((m_slIL.GetStubTargetCallingConv() & IMAGE_CEE_CS_CALLCONV_HASTHIS) == IMAGE_CEE_CS_CALLCONV_HASTHIS)
+ {
+ // the arg number will be incremented by LDARG if we are in an instance method
+ _ASSERTE(argIdx > 0);
+ argIdx--;
+ }
+
+ LocalDesc locDescThread(MscorlibBinder::GetClass(CLASS__THREAD));
+ DWORD dwThreadLocalNum = pcs->NewLocal(locDescThread);
+
+ // call Thread.get_CurrentThread()
+ pcs->EmitCALL(METHOD__THREAD__GET_CURRENT_THREAD, 0, 1);
+ pcs->EmitDUP();
+ pcs->EmitSTLOC(dwThreadLocalNum);
+
+ // call current_thread.get_CurrentCulture()
+ pcs->EmitCALL(METHOD__THREAD__GET_CULTURE, 1, 1);
+
+ // save the current culture
+ LocalDesc locDescCulture(MscorlibBinder::GetClass(CLASS__CULTURE_INFO));
+ DWORD dwCultureLocalNum = pcs->NewLocal(locDescCulture);
+
+ pcs->EmitSTLOC(dwCultureLocalNum);
+
+ // set a new one based on the LCID passed from unmanaged
+ pcs->EmitLDLOC(dwThreadLocalNum);
+ pcs->EmitLDARG(argIdx);
+
+ // call CultureInfo..ctor(lcid)
+ // call current_thread.set_CurrentCulture(culture)
+ pcs->EmitNEWOBJ(METHOD__CULTURE_INFO__INT_CTOR, 1);
+ pcs->EmitCALL(METHOD__THREAD__SET_CULTURE, 2, 0);
+
+ // and restore the current one after the call
+ m_slIL.SetCleanupNeeded();
+ ILCodeStream *pcsCleanup = m_slIL.GetCleanupCodeStream();
+
+ // call current_thread.set_CurrentCulture(original_culture)
+ pcsCleanup->EmitLDLOC(dwThreadLocalNum);
+ pcsCleanup->EmitLDLOC(dwCultureLocalNum);
+ pcsCleanup->EmitCALL(METHOD__THREAD__SET_CULTURE, 1, 1);
+
+ }
+ else
+ {
+ if (SF_IsCOMStub(m_dwStubFlags))
+ {
+ // We used to get LCID from current thread's culture here. The code
+ // was replaced by the hardcoded LCID_ENGLISH_US as requested by VSTO.
+ pcs->EmitLDC(0x0409); // LCID_ENGLISH_US
+ }
+ else
+ {
+ // call Thread.get_CurrentThread()
+ // call current_thread.get_CurrentCulture()
+ pcs->EmitCALL(METHOD__THREAD__GET_CURRENT_THREAD, 0, 1);
+ pcs->EmitCALL(METHOD__THREAD__GET_CULTURE, 1, 1);
+
+ //call CultureInfo.get_LCID(this)
+ pcs->EmitCALL(METHOD__CULTURE_INFO__GET_ID, 1, 1);
+ }
+ }
+#else // FEATURE_USE_LCID
+ if (SF_IsForwardStub(m_dwStubFlags))
+ {
+ pcs->EmitLDC(0x0409); // LCID_ENGLISH_US
+ }
+#endif // FEATURE_USE_LCID
+
+ // add the extra arg to the unmanaged signature
+ LocalDesc locDescNative(ELEMENT_TYPE_I4);
+ pcs->SetStubTargetArgType(&locDescNative, false);
+
+ if (SF_IsReverseStub(m_dwStubFlags))
+ {
+ // reverse the effect of SetStubTargetArgType on the stack delta
+ // (the LCID argument is explicitly passed from unmanaged but does not
+ // show up in the managed signature in any way)
+ m_slIL.AdjustTargetStackDeltaForExtraParam();
+ }
+
+ }
+
+ void SwapStubSignatures(MethodDesc* pStubMD)
+ {
+ STANDARD_VM_CONTRACT;
+
+ //
+ // Since the stub handles native-to-managed transitions, we have to swap the
+ // stub-state-calculated stub target sig with the stub sig itself. This is
+ // because the stub target sig represents the native signature and the stub
+ // sig represents the managed signature.
+ //
+ // The first step is to convert the managed signature to a module-independent
+ // signature and then pass it off to SetStubTargetMethodSig. Note that the
+ // ILStubResolver will copy the sig, so we only need to make a temporary copy
+ // of it.
+ //
+ SigBuilder sigBuilder;
+
+ {
+ SigPointer sigPtr(pStubMD->GetSig());
+ sigPtr.ConvertToInternalSignature(pStubMD->GetModule(), NULL, &sigBuilder);
+ }
+
+ //
+ // The second step is to reset the sig on the stub MethodDesc to be the
+ // stub-state-calculated stub target sig.
+ //
+ {
+ //
+ // make a domain-local copy of the sig so that this state can outlive the
+ // compile time state.
+ //
+ DWORD cbNewSig;
+ PCCOR_SIGNATURE pNewSig;
+
+ cbNewSig = GetStubTargetMethodSigLength();
+ pNewSig = (PCCOR_SIGNATURE)(void *)pStubMD->GetLoaderAllocator()->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(cbNewSig));
+
+ memcpyNoGCRefs((void *)pNewSig, GetStubTargetMethodSig(), cbNewSig);
+
+ pStubMD->AsDynamicMethodDesc()->SetStoredMethodSig(pNewSig, cbNewSig);
+
+ SigPointer sigPtr(pNewSig, cbNewSig);
+ ULONG callConvInfo;
+ IfFailThrow(sigPtr.GetCallingConvInfo(&callConvInfo));
+
+ if (callConvInfo & CORINFO_CALLCONV_HASTHIS)
+ {
+ ((PTR_DynamicMethodDesc)pStubMD)->m_dwExtendedFlags &= ~mdStatic;
+ pStubMD->ClearStatic();
+ }
+ else
+ {
+ ((PTR_DynamicMethodDesc)pStubMD)->m_dwExtendedFlags |= mdStatic;
+ pStubMD->SetStatic();
+ }
+
+#ifndef _TARGET_X86_
+ // we store the real managed argument stack size in the stub MethodDesc on non-X86
+ UINT stackSize = pStubMD->SizeOfArgStack();
+
+ if (!FitsInU2(stackSize))
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_SIGTOOCOMPLEX);
+
+ pStubMD->AsDynamicMethodDesc()->SetNativeStackArgSize(static_cast<WORD>(stackSize));
+#endif // _TARGET_X86_
+ }
+
+ DWORD cbTempModuleIndependentSigLength;
+ BYTE * pTempModuleIndependentSig = (BYTE *)sigBuilder.GetSignature(&cbTempModuleIndependentSigLength);
+
+ // Finish it
+ SetStubTargetMethodSig(pTempModuleIndependentSig,
+ cbTempModuleIndependentSigLength);
+ }
+
+ void EmitInvokeTarget(MethodDesc *pStubMD)
+ {
+ STANDARD_VM_CONTRACT;
+
+ m_slIL.DoNDirect(m_slIL.GetDispatchCodeStream(), m_dwStubFlags, pStubMD);
+ }
+
+#ifdef FEATURE_COMINTEROP
+ void EmitExceptionHandler(LocalDesc* pNativeReturnType, LocalDesc* pManagedReturnType,
+ ILCodeLabel** ppTryEndAndCatchBeginLabel, ILCodeLabel ** ppCatchEndAndReturnLabel)
+ {
+ STANDARD_VM_CONTRACT;
+
+ ILCodeStream* pcsExceptionHandler = m_slIL.NewCodeStream(ILStubLinker::kExceptionHandler);
+ *ppTryEndAndCatchBeginLabel = pcsExceptionHandler->NewCodeLabel();
+ *ppCatchEndAndReturnLabel = pcsExceptionHandler->NewCodeLabel();
+
+ pcsExceptionHandler->EmitLEAVE(*ppCatchEndAndReturnLabel);
+ pcsExceptionHandler->EmitLabel(*ppTryEndAndCatchBeginLabel);
+
+ BYTE nativeReturnElemType = pNativeReturnType->ElementType[0]; // return type of the stub
+ BYTE managedReturnElemType = pManagedReturnType->ElementType[0]; // return type of the mananged target
+
+ bool returnTheHRESULT = SF_IsHRESULTSwapping(m_dwStubFlags) ||
+ (managedReturnElemType == ELEMENT_TYPE_I4) ||
+ (managedReturnElemType == ELEMENT_TYPE_U4);
+
+#ifdef MDA_SUPPORTED
+ if (!returnTheHRESULT)
+ {
+ MdaExceptionSwallowedOnCallFromCom* mda = MDA_GET_ASSISTANT(ExceptionSwallowedOnCallFromCom);
+ if (mda)
+ {
+ // on the stack: exception object, but the stub linker doesn't know it
+ pcsExceptionHandler->EmitCALL(METHOD__STUBHELPERS__GET_STUB_CONTEXT, 0, 1);
+ pcsExceptionHandler->EmitCALL(METHOD__STUBHELPERS__TRIGGER_EXCEPTION_SWALLOWED_MDA,
+ 1, // WARNING: This method takes 2 input args, the exception object and the stub context.
+ // But the ILStubLinker has no knowledge that the exception object is on the
+ // stack (because it is unaware that we've just entered a catch block), so we
+ // lie and claim that we only take one input argument.
+ 1); // returns the exception object back
+ }
+ }
+#endif // MDA_SUPPORTED
+
+ DWORD retvalLocalNum = m_slIL.GetReturnValueLocalNum();
+ BinderMethodID getHRForException;
+ if (SF_IsWinRTStub(m_dwStubFlags))
+ {
+ getHRForException = METHOD__MARSHAL__GET_HR_FOR_EXCEPTION_WINRT;
+ }
+ else
+ {
+ getHRForException = METHOD__MARSHAL__GET_HR_FOR_EXCEPTION;
+ }
+
+ pcsExceptionHandler->EmitCALL(getHRForException,
+ 0, // WARNING: This method takes 1 input arg, the exception object. But the ILStubLinker
+ // has no knowledge that the exception object is on the stack (because it is
+ // unaware that we've just entered a catch block), so we lie and claim that we
+ // don't take any input arguments.
+ 1);
+
+ switch (nativeReturnElemType)
+ {
+ default:
+ UNREACHABLE_MSG("Unexpected element type found on native return type.");
+ break;
+ case ELEMENT_TYPE_VOID:
+ _ASSERTE(retvalLocalNum == (DWORD)-1);
+ pcsExceptionHandler->EmitPOP();
+ break;
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ {
+ if (!returnTheHRESULT)
+ {
+ pcsExceptionHandler->EmitPOP();
+ pcsExceptionHandler->EmitLDC(0);
+ pcsExceptionHandler->EmitCONV_T((CorElementType)nativeReturnElemType);
+ }
+ _ASSERTE(retvalLocalNum != (DWORD)-1);
+ pcsExceptionHandler->EmitSTLOC(retvalLocalNum);
+ }
+ break;
+ case ELEMENT_TYPE_R4:
+ pcsExceptionHandler->EmitPOP();
+ pcsExceptionHandler->EmitLDC_R4(CLR_NAN_32);
+ _ASSERTE(retvalLocalNum != (DWORD)-1);
+ pcsExceptionHandler->EmitSTLOC(retvalLocalNum);
+ break;
+ case ELEMENT_TYPE_R8:
+ pcsExceptionHandler->EmitPOP();
+ pcsExceptionHandler->EmitLDC_R8(CLR_NAN_64);
+ _ASSERTE(retvalLocalNum != (DWORD)-1);
+ pcsExceptionHandler->EmitSTLOC(retvalLocalNum);
+ break;
+ case ELEMENT_TYPE_INTERNAL:
+ {
+ TypeHandle returnTypeHnd = pNativeReturnType->InternalToken;
+ CONSISTENCY_CHECK(returnTypeHnd.IsValueType());
+ _ASSERTE(retvalLocalNum != (DWORD)-1);
+ pcsExceptionHandler->EmitLDLOCA(retvalLocalNum);
+ pcsExceptionHandler->EmitINITOBJ(m_slIL.GetDispatchCodeStream()->GetToken(returnTypeHnd));
+ }
+ break;
+ case ELEMENT_TYPE_BOOLEAN:
+ case ELEMENT_TYPE_CHAR:
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_U:
+ pcsExceptionHandler->EmitPOP();
+ pcsExceptionHandler->EmitLDC(0);
+ pcsExceptionHandler->EmitCONV_T((CorElementType)nativeReturnElemType);
+ _ASSERTE(retvalLocalNum != (DWORD)-1);
+ pcsExceptionHandler->EmitSTLOC(retvalLocalNum);
+ break;
+ }
+
+ pcsExceptionHandler->EmitLEAVE(*ppCatchEndAndReturnLabel);
+ pcsExceptionHandler->EmitLabel(*ppCatchEndAndReturnLabel);
+ if (nativeReturnElemType != ELEMENT_TYPE_VOID)
+ {
+ _ASSERTE(retvalLocalNum != (DWORD)-1);
+ pcsExceptionHandler->EmitLDLOC(retvalLocalNum);
+ }
+ pcsExceptionHandler->EmitRET();
+ }
+#endif // FEATURE_COMINTEROP
+
+ void FinishEmit(MethodDesc* pStubMD)
+ {
+ STANDARD_VM_CONTRACT;
+
+ ILCodeStream* pcsMarshal = m_slIL.GetMarshalCodeStream();
+ ILCodeStream* pcsUnmarshal = m_slIL.GetUnmarshalCodeStream();
+ ILCodeStream* pcsDispatch = m_slIL.GetDispatchCodeStream();
+
+ if (SF_IsHRESULTSwapping(m_dwStubFlags) && m_slIL.StubHasVoidReturnType())
+ {
+ // if the return type is void, but we're doing HRESULT swapping, we
+ // need to set the return type here. Otherwise, the return value
+ // marshaler will do this.
+ pcsMarshal->SetStubTargetReturnType(ELEMENT_TYPE_I4); // HRESULT
+
+ if (SF_IsReverseStub(m_dwStubFlags))
+ {
+ // reverse interop needs to seed the return value if the
+ // managed function returns void but we're doing hresult
+ // swapping.
+ pcsUnmarshal->EmitLDC(S_OK);
+ }
+ }
+
+ LocalDesc nativeReturnType;
+ LocalDesc managedReturnType;
+ bool hasTryCatchForHRESULT = SF_IsReverseCOMStub(m_dwStubFlags)
+ && !SF_IsFieldGetterStub(m_dwStubFlags)
+ && !SF_IsFieldSetterStub(m_dwStubFlags);
+
+#ifdef FEATURE_COMINTEROP
+ if (hasTryCatchForHRESULT)
+ {
+ m_slIL.GetStubTargetReturnType(&nativeReturnType);
+ m_slIL.GetStubReturnType(&managedReturnType);
+ }
+#endif // FEATURE_COMINTEROP
+
+ if (SF_IsHRESULTSwapping(m_dwStubFlags) && SF_IsReverseStub(m_dwStubFlags))
+ {
+ m_slIL.AdjustTargetStackDeltaForReverseInteropHRESULTSwapping();
+ }
+
+ if (SF_IsForwardCOMStub(m_dwStubFlags))
+ {
+ // Compensate for the 'this' parameter.
+ m_slIL.AdjustTargetStackDeltaForExtraParam();
+ }
+
+#if defined(_TARGET_X86_)
+ // unmanaged CALLI will get an extra arg with the real target address if host hook is enabled
+ if (SF_IsCALLIStub(m_dwStubFlags) && NDirect::IsHostHookEnabled())
+ {
+ pcsMarshal->SetStubTargetArgType(ELEMENT_TYPE_I, false);
+ }
+#endif // _TARGET_X86_
+
+ // Don't touch target signatures from this point on otherwise it messes up the
+ // cache in ILStubState::GetStubTargetMethodSig.
+
+#ifdef _DEBUG
+ {
+ // The native and local signatures should not have any tokens.
+ // All token references should have been converted to
+ // ELEMENT_TYPE_INTERNAL.
+ //
+ // Note that MetaSig::GetReturnType and NextArg will normalize
+ // ELEMENT_TYPE_INTERNAL back to CLASS or VALUETYPE.
+ //
+ // <TODO> need to recursively check ELEMENT_TYPE_FNPTR signatures </TODO>
+
+ SigTypeContext typeContext; // this is an empty type context: COM calls are guaranteed to not be generics.
+ MetaSig nsig(
+ GetStubTargetMethodSig(),
+ GetStubTargetMethodSigLength(),
+ MscorlibBinder::GetModule(),
+ &typeContext);
+
+ CorElementType type;
+ IfFailThrow(nsig.GetReturnProps().PeekElemType(&type));
+ CONSISTENCY_CHECK(ELEMENT_TYPE_CLASS != type && ELEMENT_TYPE_VALUETYPE != type);
+
+ while (ELEMENT_TYPE_END != (type = nsig.NextArg()))
+ {
+ IfFailThrow(nsig.GetArgProps().PeekElemType(&type));
+ CONSISTENCY_CHECK(ELEMENT_TYPE_CLASS != type && ELEMENT_TYPE_VALUETYPE != type);
+ }
+ }
+#endif // _DEBUG
+
+#ifdef FEATURE_COMINTEROP
+ if (SF_IsForwardCOMStub(m_dwStubFlags))
+ {
+#if defined(MDA_SUPPORTED)
+ // We won't use this NGEN'ed stub if RaceOnRCWCleanup is enabled at run-time
+ if (!SF_IsNGENedStub(m_dwStubFlags))
+ {
+ // This code may change the type of the frame we use, so it has to be run before the code below where we
+ // retrieve the stack arg size based on the frame type.
+ MdaRaceOnRCWCleanup* mda = MDA_GET_ASSISTANT(RaceOnRCWCleanup);
+ if (mda)
+ {
+ // Here we have to register the RCW of the "this" object to the RCWStack and schedule the clean-up for it.
+ // Emit a call to StubHelpers::StubRegisterRCW() and StubHelpers::StubUnregisterRCW() to do this.
+ m_slIL.EmitLoadRCWThis(pcsMarshal, m_dwStubFlags);
+ pcsMarshal->EmitCALL(METHOD__STUBHELPERS__STUB_REGISTER_RCW, 1, 0);
+
+ // We use an extra local to track whether we need to unregister the RCW on cleanup
+ ILCodeStream *pcsSetup = m_slIL.GetSetupCodeStream();
+ DWORD dwRCWRegisteredLocalNum = pcsSetup->NewLocal(ELEMENT_TYPE_BOOLEAN);
+ pcsSetup->EmitLDC(0);
+ pcsSetup->EmitSTLOC(dwRCWRegisteredLocalNum);
+
+ pcsMarshal->EmitLDC(1);
+ pcsMarshal->EmitSTLOC(dwRCWRegisteredLocalNum);
+
+ ILCodeStream *pcsCleanup = m_slIL.GetCleanupCodeStream();
+ ILCodeLabel *pSkipCleanupLabel = pcsCleanup->NewCodeLabel();
+
+ m_slIL.SetCleanupNeeded();
+ pcsCleanup->EmitLDLOC(dwRCWRegisteredLocalNum);
+ pcsCleanup->EmitBRFALSE(pSkipCleanupLabel);
+
+ m_slIL.EmitLoadRCWThis(pcsCleanup, m_dwStubFlags);
+ pcsCleanup->EmitCALL(METHOD__STUBHELPERS__STUB_UNREGISTER_RCW, 1, 0);
+
+ pcsCleanup->EmitLabel(pSkipCleanupLabel);
+ }
+ }
+#endif // MDA_SUPPORTED
+ }
+#endif // FEATURE_COMINTEROP
+
+ // <NOTE>
+ // The profiler helpers below must be called immediately before and after the call to the target.
+ // The debugger trace call helpers are invoked from StubRareDisableWorker
+ // </NOTE>
+
+#if defined(PROFILING_SUPPORTED)
+ DWORD dwMethodDescLocalNum = -1;
+
+ // Notify the profiler of call out of the runtime
+ if (!SF_IsReverseCOMStub(m_dwStubFlags) && (CORProfilerTrackTransitions() || SF_IsNGENedStubForProfiling(m_dwStubFlags)))
+ {
+ dwMethodDescLocalNum = m_slIL.EmitProfilerBeginTransitionCallback(pcsDispatch, m_dwStubFlags);
+ _ASSERTE(dwMethodDescLocalNum != -1);
+ }
+#endif // PROFILING_SUPPORTED
+
+#ifdef MDA_SUPPORTED
+ if (SF_IsForwardStub(m_dwStubFlags) && !SF_IsNGENedStub(m_dwStubFlags) &&
+ MDA_GET_ASSISTANT(GcManagedToUnmanaged))
+ {
+ m_slIL.EmitCallGcCollectForMDA(pcsDispatch, m_dwStubFlags);
+ }
+#endif // MDA_SUPPORTED
+
+ // Invoke the target (calli, call method, call delegate, get/set field, etc.)
+ EmitInvokeTarget(pStubMD);
+
+ // Saving last error must be the first thing we do after returning from the target
+ if (m_fSetLastError && SF_IsForwardStub(m_dwStubFlags))
+ {
+ m_slIL.EmitSetLastError(pcsDispatch);
+ }
+
+#if defined(_TARGET_X86_)
+ if (SF_IsForwardDelegateStub(m_dwStubFlags))
+ {
+ // the delegate may have an intercept stub attached to its sync block so we should
+ // prevent it from being garbage collected when the call is in progress
+ pcsDispatch->EmitLoadThis();
+ pcsDispatch->EmitCALL(METHOD__GC__KEEP_ALIVE, 1, 0);
+ }
+#endif // defined(_TARGET_X86_)
+
+#ifdef MDA_SUPPORTED
+ if (SF_IsForwardStub(m_dwStubFlags) && !SF_IsNGENedStub(m_dwStubFlags) &&
+ MDA_GET_ASSISTANT(GcUnmanagedToManaged))
+ {
+ m_slIL.EmitCallGcCollectForMDA(pcsDispatch, m_dwStubFlags);
+ }
+#endif // MDA_SUPPORTED
+
+#ifdef VERIFY_HEAP
+ if (SF_IsForwardStub(m_dwStubFlags) && g_pConfig->InteropValidatePinnedObjects())
+ {
+ // call StubHelpers.ValidateObject/StubHelpers.ValidateByref on pinned locals
+ m_slIL.EmitObjectValidation(pcsDispatch, m_dwStubFlags);
+ }
+#endif // VERIFY_HEAP
+
+#if defined(PROFILING_SUPPORTED)
+ // Notify the profiler of return back into the runtime
+ if (dwMethodDescLocalNum != -1)
+ {
+ m_slIL.EmitProfilerEndTransitionCallback(pcsDispatch, m_dwStubFlags, dwMethodDescLocalNum);
+ }
+#endif // PROFILING_SUPPORTED
+
+#ifdef FEATURE_COMINTEROP
+ if (SF_IsForwardCOMStub(m_dwStubFlags))
+ {
+ // Make sure that the RCW stays alive for the duration of the call. Note that if we do HRESULT
+ // swapping, we'll pass 'this' to GetCOMHRExceptionObject after returning from the target so
+ // GC.KeepAlive is not necessary.
+ if (!SF_IsHRESULTSwapping(m_dwStubFlags))
+ {
+ m_slIL.EmitLoadRCWThis(pcsDispatch, m_dwStubFlags);
+ pcsDispatch->EmitCALL(METHOD__GC__KEEP_ALIVE, 1, 0);
+ }
+ }
+#endif // FEATURE_COMINTEROP
+
+ if (SF_IsHRESULTSwapping(m_dwStubFlags))
+ {
+ if (SF_IsForwardStub(m_dwStubFlags))
+ {
+ ILCodeLabel* pSkipThrowLabel = pcsDispatch->NewCodeLabel();
+
+ pcsDispatch->EmitDUP();
+ pcsDispatch->EmitLDC(0);
+ pcsDispatch->EmitBGE(pSkipThrowLabel);
+
+#ifdef FEATURE_COMINTEROP
+ if (SF_IsCOMStub(m_dwStubFlags))
+ {
+ m_slIL.EmitLoadStubContext(pcsDispatch, m_dwStubFlags);
+ m_slIL.EmitLoadRCWThis(pcsDispatch, m_dwStubFlags);
+
+ if (SF_IsWinRTStub(m_dwStubFlags))
+ {
+ pcsDispatch->EmitCALL(METHOD__STUBHELPERS__GET_COM_HR_EXCEPTION_OBJECT_WINRT, 3, 1);
+ }
+ else
+ {
+ pcsDispatch->EmitCALL(METHOD__STUBHELPERS__GET_COM_HR_EXCEPTION_OBJECT, 3, 1);
+ }
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ pcsDispatch->EmitCALL(METHOD__STUBHELPERS__GET_HR_EXCEPTION_OBJECT, 1, 1);
+ }
+
+ pcsDispatch->EmitTHROW();
+ pcsDispatch->EmitLDC(0); // keep the IL stack balanced across the branch and the fall-through
+ pcsDispatch->EmitLabel(pSkipThrowLabel);
+ pcsDispatch->EmitPOP();
+ }
+ }
+
+ m_slIL.End(m_dwStubFlags);
+ if (!hasTryCatchForHRESULT) // we will 'leave' the try scope and then 'ret' from outside
+ {
+ pcsUnmarshal->EmitRET();
+ }
+
+ DWORD dwJitFlags = CORJIT_FLG_IL_STUB;
+
+ if (m_slIL.HasInteropParamExceptionInfo())
+ {
+ // This code will not use the secret parameter, so we do not
+ // tell the JIT to bother with it.
+ m_slIL.ClearCode();
+ m_slIL.GenerateInteropParamException(pcsMarshal);
+ }
+ else if (SF_IsFieldGetterStub(m_dwStubFlags) || SF_IsFieldSetterStub(m_dwStubFlags))
+ {
+ // Field access stubs are not shared and do not use the secret parameter.
+ }
+#ifndef _WIN64
+ else if (SF_IsForwardDelegateStub(m_dwStubFlags) ||
+ (SF_IsForwardCOMStub(m_dwStubFlags) && SF_IsWinRTDelegateStub(m_dwStubFlags)))
+ {
+ // Forward delegate stubs get all the context they need in 'this' so they
+ // don't use the secret parameter. Except for AMD64 where we use the secret
+ // argument to pass the real target to the stub-for-host.
+ }
+#endif // !_WIN64
+ else
+ {
+ // All other IL stubs will need to use the secret parameter.
+ dwJitFlags |= CORJIT_FLG_PUBLISH_SECRET_PARAM;
+ }
+
+ if (SF_IsReverseStub(m_dwStubFlags))
+ {
+ SwapStubSignatures(pStubMD);
+ }
+
+ ILCodeLabel* pTryEndAndCatchBeginLabel = NULL; // try ends at the same place the catch begins
+ ILCodeLabel* pCatchEndAndReturnLabel = NULL; // catch ends at the same place we resume afterwards
+#ifdef FEATURE_COMINTEROP
+ if (hasTryCatchForHRESULT)
+ {
+ EmitExceptionHandler(&nativeReturnType, &managedReturnType, &pTryEndAndCatchBeginLabel, &pCatchEndAndReturnLabel);
+ }
+#endif // FEATURE_COMINTEROP
+
+ UINT maxStack;
+ size_t cbCode;
+ DWORD cbSig;
+ BYTE * pbBuffer;
+ BYTE * pbLocalSig;
+
+ cbCode = m_slIL.Link(&maxStack);
+ cbSig = m_slIL.GetLocalSigSize();
+
+ ILStubResolver * pResolver = pStubMD->AsDynamicMethodDesc()->GetILStubResolver();
+ COR_ILMETHOD_DECODER * pILHeader = pResolver->AllocGeneratedIL(cbCode, cbSig, maxStack);
+ pbBuffer = (BYTE *)pILHeader->Code;
+ pbLocalSig = (BYTE *)pILHeader->LocalVarSig;
+ _ASSERTE(cbSig == pILHeader->cbLocalVarSig);
+
+ ILStubEHClause cleanupTryFinally = { 0 };
+ ILStubEHClause convertToHRTryCatch = { 0 };
+ m_slIL.GetCleanupFinallyOffsets(&cleanupTryFinally);
+
+#ifdef FEATURE_COMINTEROP
+ if (hasTryCatchForHRESULT)
+ {
+ convertToHRTryCatch.kind = ILStubEHClause::kTypedCatch;
+ convertToHRTryCatch.dwTryBeginOffset = 0;
+ convertToHRTryCatch.dwHandlerBeginOffset = ((DWORD)pTryEndAndCatchBeginLabel->GetCodeOffset());
+ convertToHRTryCatch.cbTryLength = convertToHRTryCatch.dwHandlerBeginOffset - convertToHRTryCatch.dwTryBeginOffset;
+ convertToHRTryCatch.cbHandlerLength = ((DWORD)pCatchEndAndReturnLabel->GetCodeOffset()) - convertToHRTryCatch.dwHandlerBeginOffset;
+ convertToHRTryCatch.dwTypeToken = pcsDispatch->GetToken(g_pObjectClass);
+ }
+#endif // FEATURE_COMINTEROP
+
+ int nEHClauses = 0;
+
+ if (convertToHRTryCatch.cbHandlerLength != 0)
+ nEHClauses++;
+
+ if (cleanupTryFinally.cbHandlerLength != 0)
+ nEHClauses++;
+
+ if (nEHClauses > 0)
+ {
+ COR_ILMETHOD_SECT_EH* pEHSect = pResolver->AllocEHSect(nEHClauses);
+ PopulateEHSect(pEHSect, nEHClauses, &cleanupTryFinally, &convertToHRTryCatch);
+ }
+
+ m_slIL.GenerateCode(pbBuffer, cbCode);
+ m_slIL.GetLocalSig(pbLocalSig, cbSig);
+
+ pResolver->SetJitFlags(dwJitFlags);
+
+#ifdef LOGGING
+ LOG((LF_STUBS, LL_INFO1000, "---------------------------------------------------------------------\n"));
+ LOG((LF_STUBS, LL_INFO1000, "NDirect IL stub dump: %s::%s\n", pStubMD->m_pszDebugClassName, pStubMD->m_pszDebugMethodName));
+ if (LoggingEnabled() && LoggingOn(LF_STUBS, LL_INFO1000))
+ {
+ CQuickBytes qbManaged;
+ CQuickBytes qbLocal;
+
+ PCCOR_SIGNATURE pManagedSig;
+ ULONG cManagedSig;
+
+ IMDInternalImport* pIMDI = MscorlibBinder::GetModule()->GetMDImport();
+
+ pStubMD->GetSig(&pManagedSig, &cManagedSig);
+
+ PrettyPrintSig(pManagedSig, cManagedSig, "*", &qbManaged, pStubMD->GetMDImport(), NULL);
+ PrettyPrintSig(pbLocalSig, cbSig, NULL, &qbLocal, pIMDI, NULL);
+
+ LOG((LF_STUBS, LL_INFO1000, "incoming managed sig: %p: %s\n", pManagedSig, qbManaged.Ptr()));
+ LOG((LF_STUBS, LL_INFO1000, "locals sig: %p: %s\n", pbLocalSig+1, qbLocal.Ptr()));
+
+ if (cleanupTryFinally.cbHandlerLength != 0)
+ {
+ LOG((LF_STUBS, LL_INFO1000, "try_begin: 0x%04x try_end: 0x%04x finally_begin: 0x%04x finally_end: 0x%04x \n",
+ cleanupTryFinally.dwTryBeginOffset, cleanupTryFinally.dwTryBeginOffset + cleanupTryFinally.cbTryLength,
+ cleanupTryFinally.dwHandlerBeginOffset, cleanupTryFinally.dwHandlerBeginOffset + cleanupTryFinally.cbHandlerLength));
+ }
+ if (convertToHRTryCatch.cbHandlerLength != 0)
+ {
+ LOG((LF_STUBS, LL_INFO1000, "try_begin: 0x%04x try_end: 0x%04x catch_begin: 0x%04x catch_end: 0x%04x type_token: 0x%08x\n",
+ convertToHRTryCatch.dwTryBeginOffset, convertToHRTryCatch.dwTryBeginOffset + convertToHRTryCatch.cbTryLength,
+ convertToHRTryCatch.dwHandlerBeginOffset, convertToHRTryCatch.dwHandlerBeginOffset + convertToHRTryCatch.cbHandlerLength,
+ convertToHRTryCatch.dwTypeToken));
+ }
+
+ LogILStubFlags(LF_STUBS, LL_INFO1000, m_dwStubFlags);
+
+ m_slIL.LogILStub(dwJitFlags);
+ }
+ LOG((LF_STUBS, LL_INFO1000, "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n"));
+#endif // LOGGING
+
+#ifndef FEATURE_CORECLR
+ //
+ // Publish ETW events for IL stubs
+ //
+
+ // If the category and the event is enabled...
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, ILStubGenerated))
+ {
+ EtwOnILStubGenerated(
+ pStubMD,
+ pbLocalSig,
+ cbSig,
+ dwJitFlags,
+ &convertToHRTryCatch,
+ &cleanupTryFinally,
+ maxStack,
+ (DWORD)cbCode
+ );
+ }
+#endif // !FEATURE_CORECLR
+ }
+
+#ifndef FEATURE_CORECLR
+ //---------------------------------------------------------------------------------------
+ //
+ void
+ EtwOnILStubGenerated(
+ MethodDesc * pStubMD,
+ PCCOR_SIGNATURE pbLocalSig,
+ DWORD cbSig,
+ DWORD dwJitFlags,
+ ILStubEHClause * pConvertToHRTryCatchBounds,
+ ILStubEHClause * pCleanupTryFinallyBounds,
+ DWORD maxStack,
+ DWORD cbCode)
+ {
+ STANDARD_VM_CONTRACT;
+
+ //
+ // Interop Method Information
+ //
+ MethodDesc *pTargetMD = m_slIL.GetTargetMD();
+ SString strNamespaceOrClassName, strMethodName, strMethodSignature;
+ UINT64 uModuleId = 0;
+
+ if (pTargetMD)
+ {
+ pTargetMD->GetMethodInfoWithNewSig(strNamespaceOrClassName, strMethodName, strMethodSignature);
+ uModuleId = (UINT64)pTargetMD->GetModule()->GetAddrModuleID();
+ }
+
+ //
+ // Stub Method Signature
+ //
+ SString stubNamespaceOrClassName, stubMethodName, stubMethodSignature;
+ pStubMD->GetMethodInfoWithNewSig(stubNamespaceOrClassName, stubMethodName, stubMethodSignature);
+
+ IMDInternalImport *pStubImport = pStubMD->GetModule()->GetMDImport();
+
+ CQuickBytes qbLocal;
+ PrettyPrintSig(pbLocalSig, (DWORD)cbSig, NULL, &qbLocal, pStubImport, NULL);
+
+ SString strLocalSig(SString::Utf8, (LPCUTF8)qbLocal.Ptr());
+
+ //
+ // Native Signature
+ //
+ SString strNativeSignature(SString::Utf8);
+ if (m_dwStubFlags & NDIRECTSTUB_FL_REVERSE_INTEROP)
+ {
+ // Reverse interop. Use StubSignature
+ strNativeSignature = stubMethodSignature;
+ }
+ else
+ {
+ // Forward interop. Use StubTarget siganture
+ PCCOR_SIGNATURE pCallTargetSig = GetStubTargetMethodSig();
+ DWORD cCallTargetSig = GetStubTargetMethodSigLength();
+
+ CQuickBytes qbCallTargetSig;
+
+ PrettyPrintSig(pCallTargetSig, cCallTargetSig, "", &qbCallTargetSig, pStubImport, NULL);
+
+ strNativeSignature.SetUTF8((LPCUTF8)qbCallTargetSig.Ptr());
+ }
+
+ //
+ // Dump IL stub code
+ //
+ SString strILStubCode;
+ strILStubCode.Preallocate(4096); // Preallocate 4K bytes to avoid unnecessary growth
+
+ SString codeSizeFormat;
+ codeSizeFormat.LoadResource(CCompRC::Optional, IDS_EE_INTEROP_CODE_SIZE_COMMENT);
+ strILStubCode.AppendPrintf(W("// %s\t%d (0x%04x)\n"), codeSizeFormat.GetUnicode(), cbCode, cbCode);
+ strILStubCode.AppendPrintf(W(".maxstack %d \n"), maxStack);
+ strILStubCode.AppendPrintf(W(".locals %s\n"), strLocalSig.GetUnicode());
+
+ m_slIL.LogILStub(dwJitFlags, &strILStubCode);
+
+ if (pConvertToHRTryCatchBounds->cbTryLength != 0 && pConvertToHRTryCatchBounds->cbHandlerLength != 0)
+ {
+ strILStubCode.AppendPrintf(
+ W(".try IL_%04x to IL_%04x catch handler IL_%04x to IL_%04x\n"),
+ pConvertToHRTryCatchBounds->dwTryBeginOffset,
+ pConvertToHRTryCatchBounds->dwTryBeginOffset + pConvertToHRTryCatchBounds->cbTryLength,
+ pConvertToHRTryCatchBounds->dwHandlerBeginOffset,
+ pConvertToHRTryCatchBounds->dwHandlerBeginOffset + pConvertToHRTryCatchBounds->cbHandlerLength);
+ }
+
+ if (pCleanupTryFinallyBounds->cbTryLength != 0 && pCleanupTryFinallyBounds->cbHandlerLength != 0)
+ {
+ strILStubCode.AppendPrintf(
+ W(".try IL_%04x to IL_%04x finally handler IL_%04x to IL_%04x\n"),
+ pCleanupTryFinallyBounds->dwTryBeginOffset,
+ pCleanupTryFinallyBounds->dwTryBeginOffset + pCleanupTryFinallyBounds->cbTryLength,
+ pCleanupTryFinallyBounds->dwHandlerBeginOffset,
+ pCleanupTryFinallyBounds->dwHandlerBeginOffset + pCleanupTryFinallyBounds->cbHandlerLength);
+ }
+
+ //
+ // Fire the event
+ //
+ DWORD dwFlags = 0;
+ if (m_dwStubFlags & NDIRECTSTUB_FL_REVERSE_INTEROP)
+ dwFlags |= ETW_IL_STUB_FLAGS_REVERSE_INTEROP;
+#ifdef FEATURE_COMINTEROP
+ if (m_dwStubFlags & NDIRECTSTUB_FL_COM)
+ dwFlags |= ETW_IL_STUB_FLAGS_COM_INTEROP;
+#endif // FEATURE_COMINTEROP
+ if (m_dwStubFlags & NDIRECTSTUB_FL_NGENEDSTUB)
+ dwFlags |= ETW_IL_STUB_FLAGS_NGENED_STUB;
+ if (m_dwStubFlags & NDIRECTSTUB_FL_DELEGATE)
+ dwFlags |= ETW_IL_STUB_FLAGS_DELEGATE;
+ if (m_dwStubFlags & NDIRECTSTUB_FL_CONVSIGASVARARG)
+ dwFlags |= ETW_IL_STUB_FLAGS_VARARG;
+ if (m_dwStubFlags & NDIRECTSTUB_FL_UNMANAGED_CALLI)
+ dwFlags |= ETW_IL_STUB_FLAGS_UNMANAGED_CALLI;
+
+ DWORD dwToken = 0;
+ if (pTargetMD)
+ dwToken = pTargetMD->GetMemberDef();
+
+
+ //
+ // Truncate string fields. Make sure the whole event is less than 64KB
+ //
+ TruncateUnicodeString(strNamespaceOrClassName, ETW_IL_STUB_EVENT_STRING_FIELD_MAXSIZE);
+ TruncateUnicodeString(strMethodName, ETW_IL_STUB_EVENT_STRING_FIELD_MAXSIZE);
+ TruncateUnicodeString(strMethodSignature, ETW_IL_STUB_EVENT_STRING_FIELD_MAXSIZE);
+ TruncateUnicodeString(strNativeSignature, ETW_IL_STUB_EVENT_STRING_FIELD_MAXSIZE);
+ TruncateUnicodeString(stubMethodSignature, ETW_IL_STUB_EVENT_STRING_FIELD_MAXSIZE);
+ TruncateUnicodeString(strILStubCode, ETW_IL_STUB_EVENT_CODE_STRING_FIELD_MAXSIZE);
+
+ //
+ // Fire ETW event
+ //
+ FireEtwILStubGenerated(
+ GetClrInstanceId(), // ClrInstanceId
+ uModuleId, // ModuleIdentifier
+ (UINT64)pStubMD, // StubMethodIdentifier
+ dwFlags, // StubFlags
+ dwToken, // ManagedInteropMethodToken
+ strNamespaceOrClassName.GetUnicode(), // ManagedInteropMethodNamespace
+ strMethodName.GetUnicode(), // ManagedInteropMethodName
+ strMethodSignature.GetUnicode(), // ManagedInteropMethodSignature
+ strNativeSignature.GetUnicode(), // NativeSignature
+ stubMethodSignature.GetUnicode(), // StubMethodSigature
+ strILStubCode.GetUnicode() // StubMethodILCode
+ );
+ } // EtwOnILStubGenerated
+#endif // !FEATURE_CORECLR
+
+#ifdef LOGGING
+ //---------------------------------------------------------------------------------------
+ //
+ static inline void LogOneFlag(DWORD flags, DWORD flag, LPCSTR str, DWORD facility, DWORD level)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (flags & flag)
+ {
+ LOG((facility, level, str));
+ }
+ }
+
+ static void LogILStubFlags(DWORD facility, DWORD level, DWORD dwStubFlags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ LOG((facility, level, "dwStubFlags: 0x%08x\n", dwStubFlags));
+ LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_CONVSIGASVARARG, " NDIRECTSTUB_FL_CONVSIGASVARARG\n", facility, level);
+ LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_BESTFIT, " NDIRECTSTUB_FL_BESTFIT\n", facility, level);
+ LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_THROWONUNMAPPABLECHAR, " NDIRECTSTUB_FL_THROWONUNMAPPABLECHAR\n", facility, level);
+ LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_NGENEDSTUB, " NDIRECTSTUB_FL_NGENEDSTUB\n", facility, level);
+ LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_DELEGATE, " NDIRECTSTUB_FL_DELEGATE\n", facility, level);
+ LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_DOHRESULTSWAPPING, " NDIRECTSTUB_FL_DOHRESULTSWAPPING\n", facility, level);
+ LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_REVERSE_INTEROP, " NDIRECTSTUB_FL_REVERSE_INTEROP\n", facility, level);
+#ifdef FEATURE_COMINTEROP
+ LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_COM, " NDIRECTSTUB_FL_COM\n", facility, level);
+#endif // FEATURE_COMINTEROP
+ LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_NGENEDSTUBFORPROFILING, " NDIRECTSTUB_FL_NGENEDSTUBFORPROFILING\n", facility, level);
+ LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_GENERATEDEBUGGABLEIL, " NDIRECTSTUB_FL_GENERATEDEBUGGABLEIL\n", facility, level);
+ LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_HASDECLARATIVESECURITY, " NDIRECTSTUB_FL_HASDECLARATIVESECURITY\n", facility, level);
+ LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_UNMANAGED_CALLI, " NDIRECTSTUB_FL_UNMANAGED_CALLI\n", facility, level);
+ LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_TRIGGERCCTOR, " NDIRECTSTUB_FL_TRIGGERCCTOR\n", facility, level);
+#ifdef FEATURE_COMINTEROP
+ LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_FIELDGETTER, " NDIRECTSTUB_FL_FIELDGETTER\n", facility, level);
+ LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_FIELDSETTER, " NDIRECTSTUB_FL_FIELDSETTER\n", facility, level);
+ LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_WINRT, " NDIRECTSTUB_FL_WINRT\n", facility, level);
+ LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_WINRTDELEGATE, " NDIRECTSTUB_FL_WINRTDELEGATE\n", facility, level);
+ LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_WINRTSHAREDGENERIC, " NDIRECTSTUB_FL_WINRTSHAREDGENERIC\n", facility, level);
+ LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_WINRTCTOR, " NDIRECTSTUB_FL_WINRTCTOR\n", facility, level);
+ LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_WINRTCOMPOSITION, " NDIRECTSTUB_FL_WINRTCOMPOSITION\n", facility, level);
+ LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_WINRTSTATIC, " NDIRECTSTUB_FL_WINRTSTATIC\n", facility, level);
+ LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_WINRTHASREDIRECTION, " NDIRECTSTUB_FL_WINRTHASREDIRECTION\n", facility, level);
+#endif // FEATURE_COMINTEROP
+
+ //
+ // no need to log the internal flags, let's just assert what we expect to see...
+ //
+ CONSISTENCY_CHECK(!SF_IsCOMLateBoundStub(dwStubFlags));
+ CONSISTENCY_CHECK(!SF_IsCOMEventCallStub(dwStubFlags));
+
+ DWORD dwKnownMask =
+ NDIRECTSTUB_FL_CONVSIGASVARARG |
+ NDIRECTSTUB_FL_BESTFIT |
+ NDIRECTSTUB_FL_THROWONUNMAPPABLECHAR |
+ NDIRECTSTUB_FL_NGENEDSTUB |
+ NDIRECTSTUB_FL_DELEGATE |
+ NDIRECTSTUB_FL_DOHRESULTSWAPPING |
+ NDIRECTSTUB_FL_REVERSE_INTEROP |
+ NDIRECTSTUB_FL_NGENEDSTUBFORPROFILING |
+ NDIRECTSTUB_FL_GENERATEDEBUGGABLEIL |
+ NDIRECTSTUB_FL_HASDECLARATIVESECURITY |
+ NDIRECTSTUB_FL_UNMANAGED_CALLI |
+ NDIRECTSTUB_FL_TRIGGERCCTOR |
+#ifdef FEATURE_COMINTEROP
+ NDIRECTSTUB_FL_COM |
+ NDIRECTSTUB_FL_COMLATEBOUND | // internal
+ NDIRECTSTUB_FL_COMEVENTCALL | // internal
+ NDIRECTSTUB_FL_FIELDGETTER |
+ NDIRECTSTUB_FL_FIELDSETTER |
+ NDIRECTSTUB_FL_WINRT |
+ NDIRECTSTUB_FL_WINRTDELEGATE |
+ NDIRECTSTUB_FL_WINRTCTOR |
+ NDIRECTSTUB_FL_WINRTCOMPOSITION |
+ NDIRECTSTUB_FL_WINRTSTATIC |
+ NDIRECTSTUB_FL_WINRTHASREDIRECTION |
+#endif // FEATURE_COMINTEROP
+ NULL;
+
+ DWORD dwUnknownFlags = dwStubFlags & ~dwKnownMask;
+ if (0 != dwUnknownFlags)
+ {
+ LOG((facility, level, "UNKNOWN FLAGS: 0x%08x\n", dwUnknownFlags));
+ }
+ }
+#endif // LOGGING
+
+ PCCOR_SIGNATURE GetStubTargetMethodSig()
+ {
+ CONTRACT(PCCOR_SIGNATURE)
+ {
+ STANDARD_VM_CHECK;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_NOT_OK));
+ }
+ CONTRACT_END;
+
+ BYTE *pb;
+
+ if (!m_qbNativeFnSigBuffer.Size())
+ {
+ DWORD cb = m_slIL.GetStubTargetMethodSigSize();
+ pb = (BYTE *)m_qbNativeFnSigBuffer.AllocThrows(cb);
+
+ m_slIL.GetStubTargetMethodSig(pb, cb);
+ }
+ else
+ {
+ pb = (BYTE*)m_qbNativeFnSigBuffer.Ptr();
+ }
+
+ RETURN pb;
+ }
+
+ DWORD
+ GetStubTargetMethodSigLength()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return m_slIL.GetStubTargetMethodSigSize();
+ }
+
+ void SetStubTargetMethodSig(PCCOR_SIGNATURE pSig, DWORD cSig)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ m_slIL.SetStubTargetMethodSig(pSig, cSig);
+ m_qbNativeFnSigBuffer.Shrink(0);
+ }
+
+ TokenLookupMap* GetTokenLookupMap() { WRAPPER_NO_CONTRACT; return m_slIL.GetTokenLookupMap(); }
+
+protected:
+ CQuickBytes m_qbNativeFnSigBuffer;
+ NDirectStubLinker m_slIL;
+ BOOL m_fSetLastError;
+ DWORD m_dwStubFlags;
+};
+
+
+class PInvoke_ILStubState : public ILStubState
+{
+public:
+
+ PInvoke_ILStubState(Module* pStubModule, const Signature &signature, SigTypeContext *pTypeContext, DWORD dwStubFlags,
+ CorPinvokeMap unmgdCallConv, int iLCIDParamIdx, MethodDesc* pTargetMD)
+ : ILStubState(
+ pStubModule,
+ signature,
+ pTypeContext,
+ TargetHasThis(dwStubFlags),
+ StubHasThis(dwStubFlags),
+ dwStubFlags,
+ iLCIDParamIdx,
+ pTargetMD)
+ {
+ STANDARD_VM_CONTRACT;
+
+ if (SF_IsForwardStub(dwStubFlags))
+ {
+ m_slIL.SetCallingConvention(unmgdCallConv, SF_IsVarArgStub(dwStubFlags));
+ }
+ }
+
+private:
+ static BOOL TargetHasThis(DWORD dwStubFlags)
+ {
+ //
+ // in reverse pinvoke on delegate, the managed target will
+ // have a 'this' pointer, but the unmanaged signature does
+ // not.
+ //
+ return SF_IsReverseDelegateStub(dwStubFlags);
+ }
+
+ static BOOL StubHasThis(DWORD dwStubFlags)
+ {
+ //
+ // in forward pinvoke on a delegate, the stub will have a
+ // 'this' pointer, but the unmanaged target will not.
+ //
+ return SF_IsForwardDelegateStub(dwStubFlags);
+ }
+};
+
+#ifdef FEATURE_COMINTEROP
+class CLRToCOM_ILStubState : public ILStubState
+{
+public:
+
+ CLRToCOM_ILStubState(Module* pStubModule, const Signature &signature, SigTypeContext *pTypeContext, DWORD dwStubFlags,
+ int iLCIDParamIdx, MethodDesc* pTargetMD)
+ : ILStubState(
+ pStubModule,
+ signature,
+ pTypeContext,
+ TRUE,
+ !SF_IsWinRTStaticStub(dwStubFlags), // fStubHasThis
+ dwStubFlags,
+ iLCIDParamIdx,
+ pTargetMD)
+ {
+ STANDARD_VM_CONTRACT;
+
+ if (SF_IsForwardStub(dwStubFlags))
+ {
+ m_slIL.SetCallingConvention(pmCallConvStdcall, SF_IsVarArgStub(dwStubFlags));
+ }
+ }
+
+ void BeginEmit(DWORD dwStubFlags) // CLR to COM IL
+ {
+ STANDARD_VM_CONTRACT;
+
+ ILStubState::BeginEmit(dwStubFlags);
+
+ ILCodeStream *pcsDispatch = m_slIL.GetDispatchCodeStream();
+
+ // add the 'this' COM IP parameter to the target CALLI
+ m_slIL.GetMarshalCodeStream()->SetStubTargetArgType(ELEMENT_TYPE_I, false);
+
+ // convert 'this' to COM IP and the target method entry point
+ m_slIL.EmitLoadRCWThis(pcsDispatch, m_dwStubFlags);
+
+#ifdef _WIN64
+ if (SF_IsWinRTDelegateStub(m_dwStubFlags))
+ {
+ // write the stub context (EEImplMethodDesc representing the Invoke)
+ // into the secret arg so it shows up in the InlinedCallFrame and can
+ // be used by stub for host
+
+ pcsDispatch->EmitCALL(METHOD__STUBHELPERS__GET_STUB_CONTEXT_ADDR, 0, 1);
+ m_slIL.EmitLoadStubContext(pcsDispatch, dwStubFlags);
+ pcsDispatch->EmitSTIND_I();
+ pcsDispatch->EmitCALL(METHOD__STUBHELPERS__GET_STUB_CONTEXT, 0, 1);
+ }
+ else
+#endif // _WIN64
+ {
+ m_slIL.EmitLoadStubContext(pcsDispatch, dwStubFlags);
+ }
+
+ pcsDispatch->EmitLDLOCA(m_slIL.GetTargetEntryPointLocalNum());
+
+ BinderMethodID getCOMIPMethod;
+ bool fDoPostCallIPCleanup = true;
+
+ if (!SF_IsNGENedStub(dwStubFlags) && NDirect::IsHostHookEnabled())
+ {
+ // always use the non-optimized helper if we are hosted
+ getCOMIPMethod = METHOD__STUBHELPERS__GET_COM_IP_FROM_RCW;
+ }
+ else if (SF_IsWinRTStub(dwStubFlags))
+ {
+ // WinRT uses optimized helpers
+ if (SF_IsWinRTSharedGenericStub(dwStubFlags))
+ getCOMIPMethod = METHOD__STUBHELPERS__GET_COM_IP_FROM_RCW_WINRT_SHARED_GENERIC;
+ else if (SF_IsWinRTDelegateStub(dwStubFlags))
+ getCOMIPMethod = METHOD__STUBHELPERS__GET_COM_IP_FROM_RCW_WINRT_DELEGATE;
+ else
+ getCOMIPMethod = METHOD__STUBHELPERS__GET_COM_IP_FROM_RCW_WINRT;
+
+ // GetCOMIPFromRCW_WinRT, GetCOMIPFromRCW_WinRTSharedGeneric, and GetCOMIPFromRCW_WinRTDelegate
+ // always cache the COM interface pointer so no post-call cleanup is needed
+ fDoPostCallIPCleanup = false;
+ }
+ else
+ {
+ // classic COM interop uses the non-optimized helper
+ getCOMIPMethod = METHOD__STUBHELPERS__GET_COM_IP_FROM_RCW;
+ }
+
+ DWORD dwIPRequiresCleanupLocalNum = (DWORD)-1;
+ if (fDoPostCallIPCleanup)
+ {
+ dwIPRequiresCleanupLocalNum = pcsDispatch->NewLocal(ELEMENT_TYPE_BOOLEAN);
+ pcsDispatch->EmitLDLOCA(dwIPRequiresCleanupLocalNum);
+
+ // StubHelpers.GetCOMIPFromRCW(object objSrc, IntPtr pCPCMD, out IntPtr ppTarget, out bool pfNeedsRelease)
+ pcsDispatch->EmitCALL(getCOMIPMethod, 4, 1);
+ }
+ else
+ {
+ // StubHelpers.GetCOMIPFromRCW_WinRT*(object objSrc, IntPtr pCPCMD, out IntPtr ppTarget)
+ pcsDispatch->EmitCALL(getCOMIPMethod, 3, 1);
+ }
+
+
+ // save it because we'll need it to compute the CALLI target and release it
+ pcsDispatch->EmitDUP();
+ pcsDispatch->EmitSTLOC(m_slIL.GetTargetInterfacePointerLocalNum());
+
+ if (fDoPostCallIPCleanup)
+ {
+ // make sure it's Release()'ed after the call
+ m_slIL.SetCleanupNeeded();
+ ILCodeStream *pcsCleanup = m_slIL.GetCleanupCodeStream();
+
+ ILCodeLabel *pSkipThisCleanup = pcsCleanup->NewCodeLabel();
+
+ // and if it requires cleanup (i.e. it's not taken from the RCW cache)
+ pcsCleanup->EmitLDLOC(dwIPRequiresCleanupLocalNum);
+ pcsCleanup->EmitBRFALSE(pSkipThisCleanup);
+
+ pcsCleanup->EmitLDLOC(m_slIL.GetTargetInterfacePointerLocalNum());
+ pcsCleanup->EmitCALL(METHOD__INTERFACEMARSHALER__CLEAR_NATIVE, 1, 0);
+ pcsCleanup->EmitLabel(pSkipThisCleanup);
+ }
+ }
+};
+
+class COMToCLR_ILStubState : public ILStubState
+{
+public:
+
+ COMToCLR_ILStubState(Module* pStubModule, const Signature &signature, SigTypeContext *pTypeContext, DWORD dwStubFlags,
+ int iLCIDParamIdx, MethodDesc* pTargetMD)
+ : ILStubState(
+ pStubModule,
+ signature,
+ pTypeContext,
+ TRUE,
+ TRUE,
+ dwStubFlags,
+ iLCIDParamIdx,
+ pTargetMD)
+ {
+ STANDARD_VM_CONTRACT;
+ }
+
+ void BeginEmit(DWORD dwStubFlags) // COM to CLR IL
+ {
+ STANDARD_VM_CONTRACT;
+
+ ILStubState::BeginEmit(dwStubFlags);
+
+ if (SF_IsWinRTStaticStub(dwStubFlags))
+ {
+ // we are not loading 'this' because the target is static
+ m_slIL.AdjustTargetStackDeltaForExtraParam();
+ }
+ else
+ {
+ // load this
+ m_slIL.GetDispatchCodeStream()->EmitLoadThis();
+ }
+ }
+
+ void MarshalFactoryReturn()
+ {
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(SF_IsWinRTCtorStub(m_dwStubFlags));
+ }
+ CONTRACTL_END;
+
+ ILCodeStream *pcsSetup = m_slIL.GetSetupCodeStream();
+ ILCodeStream *pcsDispatch = m_slIL.GetDispatchCodeStream();
+ ILCodeStream *pcsUnmarshal = m_slIL.GetReturnUnmarshalCodeStream();
+ ILCodeStream *pcsExCleanup = m_slIL.GetExceptionCleanupCodeStream();
+
+ LocalDesc locDescFactoryRetVal(ELEMENT_TYPE_I);
+ DWORD dwFactoryRetValLocalNum = pcsSetup->NewLocal(locDescFactoryRetVal);
+ pcsSetup->EmitLoadNullPtr();
+ pcsSetup->EmitSTLOC(dwFactoryRetValLocalNum);
+
+ locDescFactoryRetVal.MakeByRef();
+
+ // expect one additional argument - pointer to a location that receives the created instance
+ DWORD dwRetValArgNum = pcsDispatch->SetStubTargetArgType(&locDescFactoryRetVal, false);
+ m_slIL.AdjustTargetStackDeltaForExtraParam();
+
+ // convert 'this' to an interface pointer corresponding to the default interface of this class
+ pcsUnmarshal->EmitLoadThis();
+ pcsUnmarshal->EmitCALL(METHOD__STUBHELPERS__GET_STUB_CONTEXT, 0, 1);
+ pcsUnmarshal->EmitCALL(METHOD__STUBHELPERS__GET_WINRT_FACTORY_RETURN_VALUE, 2, 1);
+ pcsUnmarshal->EmitSTLOC(dwFactoryRetValLocalNum);
+
+ // assign it to the location pointed to by the argument
+ pcsUnmarshal->EmitLDARG(dwRetValArgNum);
+ pcsUnmarshal->EmitLDLOC(dwFactoryRetValLocalNum);
+ pcsUnmarshal->EmitSTIND_I();
+
+ // on exception, we want to release the IInspectable's and assign NULL to output locations
+ m_slIL.SetExceptionCleanupNeeded();
+
+ EmitInterfaceClearNative(pcsExCleanup, dwFactoryRetValLocalNum);
+
+ // *retVal = NULL
+ pcsExCleanup->EmitLDARG(dwRetValArgNum);
+ pcsExCleanup->EmitLoadNullPtr();
+ pcsExCleanup->EmitSTIND_I();
+
+ }
+};
+
+class COMToCLRFieldAccess_ILStubState : public COMToCLR_ILStubState
+{
+public:
+
+ COMToCLRFieldAccess_ILStubState(Module* pStubModule, const Signature &signature, SigTypeContext *pTypeContext,
+ DWORD dwStubFlags, FieldDesc* pFD)
+ : COMToCLR_ILStubState(
+ pStubModule,
+ signature,
+ pTypeContext,
+ dwStubFlags,
+ -1,
+ NULL)
+ {
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(pFD != NULL);
+ m_pFD = pFD;
+ }
+
+ void EmitInvokeTarget(MethodDesc *pStubMD)
+ {
+ STANDARD_VM_CONTRACT;
+
+ ILCodeStream* pcsDispatch = m_slIL.GetDispatchCodeStream();
+
+ if (SF_IsFieldGetterStub(m_dwStubFlags))
+ {
+ pcsDispatch->EmitLDFLD(pcsDispatch->GetToken(m_pFD));
+ }
+ else
+ {
+ CONSISTENCY_CHECK(SF_IsFieldSetterStub(m_dwStubFlags));
+ pcsDispatch->EmitSTFLD(pcsDispatch->GetToken(m_pFD));
+ }
+ }
+
+protected:
+ FieldDesc *m_pFD;
+};
+#endif // FEATURE_COMINTEROP
+
+
+NDirectStubLinker::NDirectStubLinker(
+ DWORD dwStubFlags,
+ Module* pModule,
+ const Signature &signature,
+ SigTypeContext *pTypeContext,
+ MethodDesc* pTargetMD,
+ int iLCIDParamIdx,
+ BOOL fTargetHasThis,
+ BOOL fStubHasThis)
+ : ILStubLinker(pModule, signature, pTypeContext, pTargetMD, fTargetHasThis, fStubHasThis, !SF_IsCOMStub(dwStubFlags)),
+ m_pCleanupFinallyBeginLabel(NULL),
+ m_pCleanupFinallyEndLabel(NULL),
+ m_pSkipExceptionCleanupLabel(NULL),
+#ifdef FEATURE_COMINTEROP
+ m_dwWinRTFactoryObjectLocalNum(-1),
+#endif // FEATURE_COMINTEROP
+ m_fHasCleanupCode(FALSE),
+ m_fHasExceptionCleanupCode(FALSE),
+ m_fCleanupWorkListIsSetup(FALSE),
+ m_dwThreadLocalNum(-1),
+ m_dwCleanupWorkListLocalNum(-1),
+ m_dwRetValLocalNum(-1),
+#if defined(_TARGET_X86_) && !defined(FEATURE_CORECLR)
+ m_dwFirstCopyCtorCookieLocalNum(-1),
+ m_dwLastCopyCtorCookieLocalNum(-1),
+#endif // _TARGET_X86_ && !FEATURE_CORECLR
+ m_ErrorResID(-1),
+ m_ErrorParamIdx(-1),
+ m_iLCIDParamIdx(iLCIDParamIdx),
+ m_dwStubFlags(dwStubFlags)
+{
+ STANDARD_VM_CONTRACT;
+
+
+ m_pcsSetup = NewCodeStream(ILStubLinker::kSetup); // do any one-time setup work
+ m_pcsMarshal = NewCodeStream(ILStubLinker::kMarshal); // marshals arguments
+ m_pcsDispatch = NewCodeStream(ILStubLinker::kDispatch); // sets up arguments and makes call
+ m_pcsRetUnmarshal = NewCodeStream(ILStubLinker::kReturnUnmarshal); // unmarshals return value
+ m_pcsUnmarshal = NewCodeStream(ILStubLinker::kUnmarshal); // unmarshals arguments
+ m_pcsExceptionCleanup = NewCodeStream(ILStubLinker::kExceptionCleanup); // MAY NOT THROW: goes in a finally and does exception-only cleanup
+ m_pcsCleanup = NewCodeStream(ILStubLinker::kCleanup); // MAY NOT THROW: goes in a finally and does unconditional cleanup
+
+ //
+ // Add locals
+ m_dwArgMarshalIndexLocalNum = NewLocal(ELEMENT_TYPE_I4);
+ m_pcsMarshal->EmitLDC(0);
+ m_pcsMarshal->EmitSTLOC(m_dwArgMarshalIndexLocalNum);
+
+#ifdef FEATURE_COMINTEROP
+ //
+ // Forward COM interop needs a local to hold target interface pointer
+ //
+ if (SF_IsForwardCOMStub(m_dwStubFlags))
+ {
+ m_dwTargetEntryPointLocalNum = NewLocal(ELEMENT_TYPE_I);
+ m_dwTargetInterfacePointerLocalNum = NewLocal(ELEMENT_TYPE_I);
+ m_pcsSetup->EmitLoadNullPtr();
+ m_pcsSetup->EmitSTLOC(m_dwTargetInterfacePointerLocalNum);
+ }
+#endif // FEATURE_COMINTEROP
+}
+
+void NDirectStubLinker::SetCallingConvention(CorPinvokeMap unmngCallConv, BOOL fIsVarArg)
+{
+ LIMITED_METHOD_CONTRACT;
+ ULONG uNativeCallingConv = 0;
+
+#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM_)
+ if (fIsVarArg)
+ {
+ // The JIT has to use a different calling convention for unmanaged vararg targets on 64-bit and ARM:
+ // any float values must be duplicated in the corresponding general-purpose registers.
+ uNativeCallingConv = CORINFO_CALLCONV_NATIVEVARARG;
+ }
+ else
+#endif // _TARGET_AMD64_ || _TARGET_ARM_
+ {
+ switch (unmngCallConv)
+ {
+ case pmCallConvCdecl:
+ uNativeCallingConv = CORINFO_CALLCONV_C;
+ break;
+ case pmCallConvStdcall:
+ uNativeCallingConv = CORINFO_CALLCONV_STDCALL;
+ break;
+ case pmCallConvThiscall:
+ uNativeCallingConv = CORINFO_CALLCONV_THISCALL;
+ break;
+ default:
+ _ASSERTE(!"Invalid calling convention.");
+ uNativeCallingConv = CORINFO_CALLCONV_STDCALL;
+ break;
+ }
+ }
+
+ SetStubTargetCallingConv((CorCallingConvention)uNativeCallingConv);
+}
+
+void NDirectStubLinker::EmitSetArgMarshalIndex(ILCodeStream* pcsEmit, UINT uArgIdx)
+{
+ WRAPPER_NO_CONTRACT;
+
+ //
+ // This sets our state local variable that tracks the progress of the stub execution.
+ // In the finally block we test this variable to see what cleanup we need to do. The
+ // variable starts with the value of 0 and is assigned the following values as the
+ // stub executes:
+ //
+ // CLEANUP_INDEX_ARG0_MARSHAL + 1 - 1st argument marshaled
+ // CLEANUP_INDEX_ARG0_MARSHAL + 2 - 2nd argument marshaled
+ // ...
+ // CLEANUP_INDEX_ARG0_MARSHAL + n - nth argument marshaled
+ // CLEANUP_INDEX_RETVAL_UNMARSHAL + 1 - return value unmarshaled
+ // CLEANUP_INDEX_ARG0_UNMARSHAL + 1 - 1st argument unmarshaled
+ // CLEANUP_INDEX_ARG0_UNMARSHAL + 2 - 2nd argument unmarshaled
+ // ...
+ // CLEANUP_INDEX_ARG0_UNMARSHAL + n - nth argument unmarshaled
+ // CLEANUP_INDEX_ALL_DONE + 1 - ran to completion, no exception thrown
+ //
+ // Note: There may be gaps, i.e. if say 2nd argument does not need cleanup, the
+ // state variable will never be assigned the corresponding value. However, the
+ // value must always monotonically increase so we can use <=, >, etc.
+ //
+
+ pcsEmit->EmitLDC(uArgIdx + 1);
+ pcsEmit->EmitSTLOC(m_dwArgMarshalIndexLocalNum);
+}
+
+void NDirectStubLinker::EmitCheckForArgCleanup(ILCodeStream* pcsEmit, UINT uArgIdx, ArgCleanupBranchKind branchKind, ILCodeLabel* pSkipCleanupLabel)
+{
+ STANDARD_VM_CONTRACT;
+
+ SetCleanupNeeded();
+
+ // See EmitSetArgMarshalIndex.
+ pcsEmit->EmitLDLOC(m_dwArgMarshalIndexLocalNum);
+ pcsEmit->EmitLDC(uArgIdx);
+
+ switch (branchKind)
+ {
+ case BranchIfMarshaled:
+ {
+ // we branch to the label if the argument has been marshaled
+ pcsEmit->EmitBGT(pSkipCleanupLabel);
+ break;
+ }
+
+ case BranchIfNotMarshaled:
+ {
+ // we branch to the label if the argument has not been marshaled
+ pcsEmit->EmitBLE(pSkipCleanupLabel);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+int NDirectStubLinker::GetLCIDParamIdx()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_iLCIDParamIdx;
+}
+
+ILCodeStream* NDirectStubLinker::GetSetupCodeStream()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pcsSetup;
+}
+
+ILCodeStream* NDirectStubLinker::GetMarshalCodeStream()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pcsMarshal;
+}
+
+ILCodeStream* NDirectStubLinker::GetUnmarshalCodeStream()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pcsUnmarshal;
+}
+
+ILCodeStream* NDirectStubLinker::GetReturnUnmarshalCodeStream()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pcsRetUnmarshal;
+}
+
+ILCodeStream* NDirectStubLinker::GetDispatchCodeStream()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pcsDispatch;
+}
+
+ILCodeStream* NDirectStubLinker::GetCleanupCodeStream()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pcsCleanup;
+}
+
+ILCodeStream* NDirectStubLinker::GetExceptionCleanupCodeStream()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pcsExceptionCleanup;
+}
+
+void NDirectStubLinker::AdjustTargetStackDeltaForExtraParam()
+{
+ LIMITED_METHOD_CONTRACT;
+ //
+ // Compensate for the extra parameter.
+ //
+ m_iTargetStackDelta++;
+}
+
+void NDirectStubLinker::AdjustTargetStackDeltaForReverseInteropHRESULTSwapping()
+{
+ WRAPPER_NO_CONTRACT;
+ //
+ // In the case of reverse pinvoke, we build up the 'target'
+ // signature as if it were normal forward pinvoke and then
+ // switch that signature (representing the native sig) with
+ // the stub's sig (representing the managed sig). However,
+ // as a side-effect, our calcualted target stack delta is
+ // wrong.
+ //
+ // The only way that we support a different stack delta is
+ // through hresult swapping. So this code "undoes" the
+ // deltas that would have been applied in that case.
+ //
+
+ if (StubHasVoidReturnType())
+ {
+ //
+ // If the managed return type is void, undo the HRESULT
+ // return type added to our target sig for HRESULT swapping.
+ // No extra argument will have been added because it makes
+ // no sense to add an extry byref void argument.
+ //
+ m_iTargetStackDelta--;
+ }
+ else
+ {
+ //
+ // no longer pop the extra byref argument from the stack
+ //
+ m_iTargetStackDelta++;
+ }
+}
+
+void NDirectStubLinker::SetInteropParamExceptionInfo(UINT resID, UINT paramIdx)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // only keep the first one
+ if (HasInteropParamExceptionInfo())
+ {
+ return;
+ }
+
+ m_ErrorResID = resID;
+ m_ErrorParamIdx = paramIdx;
+}
+
+bool NDirectStubLinker::HasInteropParamExceptionInfo()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return !(((DWORD)-1 == m_ErrorResID) && ((DWORD)-1 == m_ErrorParamIdx));
+}
+
+void NDirectStubLinker::GenerateInteropParamException(ILCodeStream* pcsEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ pcsEmit->EmitLDC(m_ErrorResID);
+ pcsEmit->EmitLDC(m_ErrorParamIdx);
+ pcsEmit->EmitCALL(METHOD__STUBHELPERS__THROW_INTEROP_PARAM_EXCEPTION, 2, 0);
+
+ pcsEmit->EmitLDNULL();
+ pcsEmit->EmitTHROW();
+}
+
+#ifdef FEATURE_COMINTEROP
+DWORD NDirectStubLinker::GetTargetInterfacePointerLocalNum()
+{
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(m_dwTargetInterfacePointerLocalNum != (DWORD)-1);
+ return m_dwTargetInterfacePointerLocalNum;
+}
+DWORD NDirectStubLinker::GetTargetEntryPointLocalNum()
+{
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(m_dwTargetEntryPointLocalNum != (DWORD)-1);
+ return m_dwTargetEntryPointLocalNum;
+}
+
+void NDirectStubLinker::EmitLoadRCWThis(ILCodeStream *pcsEmit, DWORD dwStubFlags)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (SF_IsForwardStub(dwStubFlags) &&
+ (SF_IsWinRTCtorStub(dwStubFlags) || SF_IsWinRTStaticStub(dwStubFlags)))
+ {
+ // WinRT ctor/static stubs make the call on the factory object instead of 'this'
+ if (m_dwWinRTFactoryObjectLocalNum == (DWORD)-1)
+ {
+ m_dwWinRTFactoryObjectLocalNum = NewLocal(ELEMENT_TYPE_OBJECT);
+
+ // get the factory object
+ EmitLoadStubContext(m_pcsSetup, dwStubFlags);
+ m_pcsSetup->EmitCALL(METHOD__STUBHELPERS__GET_WINRT_FACTORY_OBJECT, 1, 1);
+ m_pcsSetup->EmitSTLOC(m_dwWinRTFactoryObjectLocalNum);
+ }
+
+ pcsEmit->EmitLDLOC(m_dwWinRTFactoryObjectLocalNum);
+ }
+ else
+ {
+ pcsEmit->EmitLoadThis();
+ }
+}
+#endif // FEATURE_COMINTEROP
+
+DWORD NDirectStubLinker::GetCleanupWorkListLocalNum()
+{
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(m_dwCleanupWorkListLocalNum != (DWORD)-1);
+ return m_dwCleanupWorkListLocalNum;
+}
+
+DWORD NDirectStubLinker::GetThreadLocalNum()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (m_dwThreadLocalNum == (DWORD)-1)
+ {
+ // The local is created and initialized lazily when first asked.
+ m_dwThreadLocalNum = NewLocal(ELEMENT_TYPE_I);
+ m_pcsSetup->EmitCALL(METHOD__THREAD__INTERNAL_GET_CURRENT_THREAD, 0, 1);
+ m_pcsSetup->EmitSTLOC(m_dwThreadLocalNum);
+ }
+
+ return m_dwThreadLocalNum;
+}
+
+DWORD NDirectStubLinker::GetReturnValueLocalNum()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_dwRetValLocalNum;
+}
+
+BOOL NDirectStubLinker::IsCleanupNeeded()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_fHasCleanupCode || IsCleanupWorkListSetup());
+}
+
+BOOL NDirectStubLinker::IsExceptionCleanupNeeded()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_fHasExceptionCleanupCode;
+}
+
+void NDirectStubLinker::InitCleanupCode()
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(NULL == m_pCleanupFinallyBeginLabel);
+ }
+ CONTRACTL_END;
+
+ m_pCleanupFinallyBeginLabel = NewCodeLabel();
+ m_pcsExceptionCleanup->EmitLabel(m_pCleanupFinallyBeginLabel);
+}
+
+void NDirectStubLinker::InitExceptionCleanupCode()
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(NULL == m_pSkipExceptionCleanupLabel);
+ }
+ CONTRACTL_END;
+
+ SetCleanupNeeded();
+
+ // we want to skip the entire exception cleanup if no exception has been thrown
+ m_pSkipExceptionCleanupLabel = NewCodeLabel();
+ EmitCheckForArgCleanup(m_pcsExceptionCleanup, CLEANUP_INDEX_ALL_DONE, BranchIfMarshaled, m_pSkipExceptionCleanupLabel);
+}
+
+void NDirectStubLinker::SetCleanupNeeded()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!m_fHasCleanupCode)
+ {
+ m_fHasCleanupCode = TRUE;
+ InitCleanupCode();
+ }
+}
+
+void NDirectStubLinker::SetExceptionCleanupNeeded()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!m_fHasExceptionCleanupCode)
+ {
+ m_fHasExceptionCleanupCode = TRUE;
+ InitExceptionCleanupCode();
+ }
+}
+
+void NDirectStubLinker::NeedsCleanupList()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (!IsCleanupWorkListSetup())
+ {
+ m_fCleanupWorkListIsSetup = TRUE;
+ SetCleanupNeeded();
+
+ // we setup a new local that will hold the cleanup work list
+ LocalDesc desc(MscorlibBinder::GetClass(CLASS__CLEANUP_WORK_LIST));
+ m_dwCleanupWorkListLocalNum = NewLocal(desc);
+ }
+}
+
+
+BOOL NDirectStubLinker::IsCleanupWorkListSetup ()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_fCleanupWorkListIsSetup;
+}
+
+
+void NDirectStubLinker::LoadCleanupWorkList(ILCodeStream* pcsEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ NeedsCleanupList();
+ pcsEmit->EmitLDLOCA(GetCleanupWorkListLocalNum());
+}
+
+#if defined(_TARGET_X86_) && !defined(FEATURE_CORECLR)
+
+BOOL NDirectStubLinker::IsCopyCtorStubNeeded()
+{
+ LIMITED_METHOD_CONTRACT;
+ return (m_dwFirstCopyCtorCookieLocalNum != (DWORD)-1);
+}
+
+DWORD NDirectStubLinker::CreateCopyCtorCookie(ILCodeStream* pcsEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodTable *pCookieMT = MscorlibBinder::GetClass(CLASS__COPYCTORSTUBCOOKIE);
+
+ LocalDesc desc(pCookieMT);
+ DWORD dwCookieLocalNum = pcsEmit->NewLocal(desc);
+
+ // <dwCookieLocalNum> = new CopyCtorStubCookie()
+ pcsEmit->EmitLDLOCA(dwCookieLocalNum);
+ pcsEmit->EmitINITOBJ(pcsEmit->GetToken(pCookieMT));
+
+ if (m_dwLastCopyCtorCookieLocalNum == (DWORD)-1)
+ {
+ // this is the first cookie in this stub
+ m_dwFirstCopyCtorCookieLocalNum = dwCookieLocalNum;
+ }
+ else
+ {
+ // this is not the first cookie - build a linked list
+ // <m_dwLastCopyCtorCookieLocalNum>.SetNext(&<dwCookieLocalNum>)
+ pcsEmit->EmitLDLOCA(m_dwLastCopyCtorCookieLocalNum);
+ pcsEmit->EmitLDLOCA(dwCookieLocalNum);
+ pcsEmit->EmitCALL(METHOD__COPYCTORSTUBCOOKIE__SET_NEXT, 2, 0);
+ }
+
+ m_dwLastCopyCtorCookieLocalNum = dwCookieLocalNum;
+ return dwCookieLocalNum;
+}
+
+#endif // _TARGET_X86_ && !FEATURE_CORECLR
+
+void NDirectStubLinker::Begin(DWORD dwStubFlags)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef FEATURE_COMINTEROP
+ if (SF_IsWinRTHasRedirection(dwStubFlags))
+ {
+ _ASSERTE(SF_IsForwardCOMStub(dwStubFlags));
+
+ // The very first thing we need to do is check whether the call should be routed to
+ // the marshaling stub for the corresponding projected WinRT interface. If so, we
+ // tail-call there.
+ m_pcsSetup->EmitLoadThis();
+ EmitLoadStubContext(m_pcsSetup, dwStubFlags);
+ m_pcsSetup->EmitCALL(METHOD__STUBHELPERS__SHOULD_CALL_WINRT_INTERFACE, 2, 1);
+
+ ILCodeLabel *pNoRedirection = m_pcsSetup->NewCodeLabel();
+ m_pcsSetup->EmitBRFALSE(pNoRedirection);
+
+ MethodDesc *pAdapterMD = WinRTInterfaceRedirector::GetStubMethodForRedirectedInterfaceMethod(
+ GetTargetMD(),
+ TypeHandle::Interop_ManagedToNative);
+
+ CONSISTENCY_CHECK(pAdapterMD != NULL && !pAdapterMD->HasMethodInstantiation());
+
+ m_pcsSetup->EmitJMP(m_pcsSetup->GetToken(pAdapterMD));
+
+ m_pcsSetup->EmitLabel(pNoRedirection);
+ }
+#endif // FEATURE_COMINTEROP
+
+ if (SF_IsForwardStub(dwStubFlags))
+ {
+#ifndef FEATURE_CORECLR // CAS
+ // we may need to demand security permission
+ if (SF_IsStubWithDemand(dwStubFlags))
+ {
+ if (SF_IsCOMStub(dwStubFlags) || SF_IsDelegateStub(dwStubFlags))
+ {
+ // pass NULL NDirectMethodDesc for COM and delegate P/Invoke
+ m_pcsSetup->EmitLoadNullPtr();
+ }
+ else
+ {
+ // pass the real MD for direct P/Invoke
+ EmitLoadStubContext(m_pcsSetup, dwStubFlags);
+ }
+ m_pcsSetup->EmitCALL(METHOD__STUBHELPERS__DEMAND_PERMISSION, 1, 0);
+ }
+#endif // !FEATURE_CORECLR
+
+ if (SF_IsStubWithCctorTrigger(dwStubFlags))
+ {
+ EmitLoadStubContext(m_pcsSetup, dwStubFlags);
+ m_pcsSetup->EmitCALL(METHOD__STUBHELPERS__INIT_DECLARING_TYPE, 1, 0);
+ }
+ }
+ else
+ {
+#ifdef MDA_SUPPORTED
+ if (!SF_IsNGENedStub(dwStubFlags) && MDA_GET_ASSISTANT(GcUnmanagedToManaged))
+ {
+ EmitCallGcCollectForMDA(m_pcsSetup, dwStubFlags);
+ }
+#endif // MDA_SUPPORTED
+
+ if (SF_IsDelegateStub(dwStubFlags))
+ {
+#if defined(MDA_SUPPORTED) || (defined(CROSSGEN_COMPILE) && !defined(FEATURE_CORECLR))
+ // GC was induced (gcUnmanagedToManagedMDA), arguments have been marshaled, and we are about
+ // to touch the UMEntryThunk and extract the delegate target from it so this is the right time
+ // to do the collected delegate MDA check.
+
+ // The call to CheckCollectedDelegateMDA is emitted regardless of whether the MDA is on at the
+ // moment. This is to avoid having to ignore NGENed stubs without the call just as we do for
+ // the GC MDA (callbackOncollectedDelegateMDA is turned on under managed debugger by default
+ // so the impact would be substantial). The helper bails out fast if the MDA is not enabled.
+ EmitLoadStubContext(m_pcsDispatch, dwStubFlags);
+ m_pcsDispatch->EmitCALL(METHOD__STUBHELPERS__CHECK_COLLECTED_DELEGATE_MDA, 1, 0);
+#endif // MDA_SUPPORTED
+
+ //
+ // recover delegate object from UMEntryThunk
+
+ EmitLoadStubContext(m_pcsDispatch, dwStubFlags); // load UMEntryThunk*
+
+ m_pcsDispatch->EmitLDC(offsetof(UMEntryThunk, m_pObjectHandle));
+ m_pcsDispatch->EmitADD();
+ m_pcsDispatch->EmitLDIND_I(); // get OBJECTHANDLE
+ m_pcsDispatch->EmitLDIND_REF(); // get Delegate object
+ m_pcsDispatch->EmitLDFLD(GetToken(MscorlibBinder::GetField(FIELD__DELEGATE__TARGET)));
+ }
+ }
+
+ m_pCleanupTryBeginLabel = NewCodeLabel();
+ m_pcsMarshal->EmitLabel(m_pCleanupTryBeginLabel);
+}
+
+void NDirectStubLinker::End(DWORD dwStubFlags)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeStream* pcs = m_pcsUnmarshal;
+
+ bool hasTryCatchForHRESULT = SF_IsReverseCOMStub(dwStubFlags)
+ && !SF_IsFieldGetterStub(dwStubFlags)
+ && !SF_IsFieldSetterStub(dwStubFlags);
+
+ //
+ // Create a local for the return value and store the return value in it.
+ //
+ if (IsCleanupNeeded() || hasTryCatchForHRESULT)
+ {
+ // Save the return value if necessary, since the IL stack will be emptied when we leave a try block.
+ LocalDesc locDescRetVal;
+ if (SF_IsForwardStub(dwStubFlags))
+ {
+ GetStubReturnType(&locDescRetVal);
+ }
+ else
+ {
+ GetStubTargetReturnType(&locDescRetVal);
+ }
+
+ if (!( (locDescRetVal.cbType == 1) && (locDescRetVal.ElementType[0] == ELEMENT_TYPE_VOID) ))
+ {
+ m_dwRetValLocalNum = m_pcsRetUnmarshal->NewLocal(locDescRetVal);
+ if (SF_IsReverseStub(dwStubFlags) && StubHasVoidReturnType())
+ {
+ // if the target returns void and we are doing HRESULT swapping, S_OK is loaded
+ // in the unmarshal stream
+ m_pcsUnmarshal->EmitSTLOC(m_dwRetValLocalNum);
+ }
+ else
+ {
+ // otherwise the return value is loaded in the return unmarshal stream
+ m_pcsRetUnmarshal->EmitSTLOC(m_dwRetValLocalNum);
+ }
+ }
+ else if (hasTryCatchForHRESULT && (locDescRetVal.ElementType[0] != ELEMENT_TYPE_VOID))
+ {
+ m_dwRetValLocalNum = m_pcsRetUnmarshal->NewLocal(locDescRetVal);
+ }
+ }
+
+ //
+ // Emit end-of-try and end-of-finally code for the try/finally
+ //
+ if (IsCleanupNeeded())
+ {
+ m_pCleanupFinallyEndLabel = NewCodeLabel();
+ m_pCleanupTryEndLabel = NewCodeLabel();
+
+ if (IsExceptionCleanupNeeded())
+ {
+ // if we made it here, no exception has been thrown
+ EmitSetArgMarshalIndex(m_pcsUnmarshal, CLEANUP_INDEX_ALL_DONE);
+ }
+
+ // Emit a leave at the end of the try block. If we have an outer try/catch, we need
+ // to leave to the beginning of the ExceptionHandler code stream, which follows the
+ // Cleanup code stream. If we don't, we can just leave to the tail end of the
+ // Unmarshal code stream where we'll emit our RET.
+
+ ILCodeLabel* pLeaveTarget = m_pCleanupTryEndLabel;
+ if (hasTryCatchForHRESULT)
+ {
+ pLeaveTarget = m_pCleanupFinallyEndLabel;
+ }
+
+ m_pcsUnmarshal->EmitLEAVE(pLeaveTarget);
+ m_pcsUnmarshal->EmitLabel(m_pCleanupTryEndLabel);
+
+ // Emit a call to destroy the clean-up list if needed.
+ if (IsCleanupWorkListSetup())
+ {
+ LoadCleanupWorkList(m_pcsCleanup);
+ m_pcsCleanup->EmitCALL(METHOD__STUBHELPERS__DESTROY_CLEANUP_LIST, 1, 0);
+ }
+
+ // Emit the endfinally.
+ m_pcsCleanup->EmitENDFINALLY();
+ m_pcsCleanup->EmitLabel(m_pCleanupFinallyEndLabel);
+ }
+
+#ifdef MDA_SUPPORTED
+ if (SF_IsReverseStub(dwStubFlags) && !SF_IsNGENedStub(dwStubFlags) &&
+ MDA_GET_ASSISTANT(GcManagedToUnmanaged))
+ {
+ EmitCallGcCollectForMDA(pcs, dwStubFlags);
+ }
+#endif // MDA_SUPPORTED
+
+ if (IsExceptionCleanupNeeded())
+ {
+ m_pcsExceptionCleanup->EmitLabel(m_pSkipExceptionCleanupLabel);
+ }
+
+ // Reload the return value
+ if ((m_dwRetValLocalNum != (DWORD)-1) && !hasTryCatchForHRESULT)
+ {
+ pcs->EmitLDLOC(m_dwRetValLocalNum);
+ }
+}
+
+
+void NDirectStubLinker::EmitSetLastError(ILCodeStream* pcsEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ pcsEmit->EmitCALL(METHOD__STUBHELPERS__SET_LAST_ERROR, 0, 0);
+}
+
+void NDirectStubLinker::DoNDirect(ILCodeStream *pcsEmit, DWORD dwStubFlags, MethodDesc * pStubMD)
+{
+ STANDARD_VM_CONTRACT;
+ if (SF_IsForwardStub(dwStubFlags)) // managed-to-native
+ {
+#if defined(_TARGET_X86_) && !defined(FEATURE_CORECLR)
+ // set the copy ctor cookie chain if needed
+ if (IsCopyCtorStubNeeded())
+ {
+ // StubHelpers.SetCopyCtorCookieChain(pStubArg, pUnmngThis, dwStubFlags, &<m_dwFirstCopyCtorCookieLocalNum>)
+ if (SF_IsDelegateStub(dwStubFlags))
+ {
+ // for forward delegate P/Invoke load the target from 'this'
+ pcsEmit->EmitLoadThis();
+ pcsEmit->EmitLDFLD(pcsEmit->GetToken(MscorlibBinder::GetField(FIELD__DELEGATE__METHOD_PTR_AUX)));
+ }
+ else
+ {
+ // otherwise load the secret argument
+ EmitLoadStubContext(pcsEmit, dwStubFlags);
+ }
+
+ if (SF_IsCOMStub(dwStubFlags))
+ {
+ // for forward COM load the unmanaged interface pointer
+ pcsEmit->EmitLDLOC(m_dwTargetInterfacePointerLocalNum);
+ }
+ else
+ {
+ // otherwise load 0
+ pcsEmit->EmitLoadNullPtr();
+ }
+ pcsEmit->EmitLDC(dwStubFlags);
+ pcsEmit->EmitLDLOCA(m_dwFirstCopyCtorCookieLocalNum);
+ pcsEmit->EmitCALL(METHOD__STUBHELPERS__SET_COPY_CTOR_COOKIE_CHAIN, 4, 0);
+ }
+#endif // _TARGET_X86_ && !FEATURE_CORECLR
+
+ if (SF_IsDelegateStub(dwStubFlags)) // delegate invocation
+ {
+ // get the delegate unmanaged target - we call a helper instead of just grabbing
+ // the _methodPtrAux field because we may need to intercept the call for host, MDA, etc.
+ pcsEmit->EmitLoadThis();
+#ifdef _WIN64
+ // on AMD64 GetDelegateTarget will return address of the generic stub for host when we are hosted
+ // and update the secret argument with real target - the secret arg will be embedded in the
+ // InlinedCallFrame by the JIT and fetched via TLS->Thread->Frame->Datum by the stub for host
+ pcsEmit->EmitCALL(METHOD__STUBHELPERS__GET_STUB_CONTEXT_ADDR, 0, 1);
+#else // _WIN64
+ // we don't need to do this on x86 because stub for host is generated dynamically per target
+ pcsEmit->EmitLDNULL();
+#endif // _WIN64
+ pcsEmit->EmitCALL(METHOD__STUBHELPERS__GET_DELEGATE_TARGET, 2, 1);
+ }
+ else // direct invocation
+ {
+ if (SF_IsCALLIStub(dwStubFlags)) // unmanaged CALLI
+ {
+ // if we ever NGEN CALLI stubs, this would have to be done differently
+ _ASSERTE(!SF_IsNGENedStub(dwStubFlags));
+
+#ifndef CROSSGEN_COMPILE
+
+#ifdef _TARGET_X86_
+
+#ifndef FEATURE_CORECLR
+ if (IsCopyCtorStubNeeded())
+ {
+ // if we need to call copy ctor(s), we go to the copy ctor stub
+ Stub *pCopyCtorStub = NDirect::GetStubForCopyCtor();
+ pcsEmit->EmitLDC((DWORD_PTR)pCopyCtorStub->GetEntryPoint());
+ }
+ else
+#endif // !FEATURE_CORECLR
+ {
+ // for managed-to-unmanaged CALLI that requires marshaling, the target is passed
+ // as the secret argument to the stub by GenericPInvokeCalliHelper (asmhelpers.asm)
+ EmitLoadStubContext(pcsEmit, dwStubFlags);
+ }
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (NDirect::IsHostHookEnabled())
+ {
+ // we need to call to the host hook, real target is passed as the last argument
+ Stub *pHostStub = NDirect::GenerateStubForHost(
+ GetStubSigModule(),
+ (CorUnmanagedCallingConvention)(GetStubTargetCallingConv() & IMAGE_CEE_CS_CALLCONV_MASK),
+ pStubMD->AsDynamicMethodDesc()->GetNativeStackArgSize());
+
+ pcsEmit->EmitLDC((DWORD_PTR)pHostStub->GetEntryPoint());
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+#else // _TARGET_X86_
+
+ if (NDirect::IsHostHookEnabled())
+ {
+ // the stub for host will get the original target from the secret arg
+ pcsEmit->EmitLDC((DWORD_PTR)GetEEFuncEntryPoint(PInvokeStubForHost));
+ }
+ else
+ {
+ // the secret arg has been shifted to left and ORed with 1 (see code:GenericPInvokeCalliHelper)
+ EmitLoadStubContext(pcsEmit, dwStubFlags);
+#ifndef _TARGET_ARM_
+ pcsEmit->EmitLDC(1);
+ pcsEmit->EmitSHR_UN();
+#endif
+ }
+
+#endif // _TARGET_X86_
+
+#endif // CROSSGEN_COMPILE
+ }
+ else
+#ifdef FEATURE_COMINTEROP
+ if (!SF_IsCOMStub(dwStubFlags)) // forward P/Invoke
+#endif // FEATURE_COMINTEROP
+ {
+ EmitLoadStubContext(pcsEmit, dwStubFlags);
+
+#ifdef MDIL
+ if (GetAppDomain()->IsMDILCompilationDomain())
+ {
+ // GetNDirectTarget is understood by the compiler and generates the CALL_PINVOKE instruction
+ pcsEmit->EmitCALL(pcsEmit->GetToken(MscorlibBinder::GetMethod(METHOD__STUBHELPERS__GET_NDIRECT_TARGET)), 1, 1);
+ }
+ else
+#endif // MDIL
+ {
+ // Perf: inline the helper for now
+ //pcsEmit->EmitCALL(METHOD__STUBHELPERS__GET_NDIRECT_TARGET, 1, 1);
+ pcsEmit->EmitLDC(offsetof(NDirectMethodDesc, ndirect.m_pWriteableData));
+ pcsEmit->EmitADD();
+ pcsEmit->EmitLDIND_I();
+ pcsEmit->EmitLDIND_I();
+ }
+ }
+#ifdef FEATURE_COMINTEROP
+ else
+ {
+ // this is a CLR -> COM call
+ // the target has been computed by StubHelpers::GetCOMIPFromRCW
+ pcsEmit->EmitLDLOC(m_dwTargetEntryPointLocalNum);
+ }
+#endif // FEATURE_COMINTEROP
+ }
+ }
+ else // native-to-managed
+ {
+ if (SF_IsDelegateStub(dwStubFlags)) // reverse P/Invoke via delegate
+ {
+ int tokDelegate_methodPtr = pcsEmit->GetToken(MscorlibBinder::GetField(FIELD__DELEGATE__METHOD_PTR));
+
+ EmitLoadStubContext(pcsEmit, dwStubFlags);
+ pcsEmit->EmitLDC(offsetof(UMEntryThunk, m_pObjectHandle));
+ pcsEmit->EmitADD();
+ pcsEmit->EmitLDIND_I(); // Get OBJECTHANDLE
+ pcsEmit->EmitLDIND_REF(); // Get Delegate object
+ pcsEmit->EmitLDFLD(tokDelegate_methodPtr); // get _methodPtr
+ }
+#ifdef FEATURE_COMINTEROP
+ else if (SF_IsCOMStub(dwStubFlags)) // COM -> CLR call
+ {
+ // managed target is passed directly in the secret argument
+ EmitLoadStubContext(pcsEmit, dwStubFlags);
+ }
+#endif // FEATURE_COMINTEROP
+ else // direct reverse P/Invoke (CoreCLR hosting)
+ {
+ EmitLoadStubContext(pcsEmit, dwStubFlags);
+ CONSISTENCY_CHECK(0 == offsetof(UMEntryThunk, m_pManagedTarget)); // if this changes, just add back the EmitLDC/EmitADD below
+ // pcsEmit->EmitLDC(offsetof(UMEntryThunk, m_pManagedTarget));
+ // pcsEmit->EmitADD();
+ pcsEmit->EmitLDIND_I(); // Get UMEntryThunk::m_pManagedTarget
+ }
+ }
+
+ // For managed-to-native calls, the rest of the work is done by the JIT. It will
+ // erect InlinedCallFrame, flip GC mode, and use the specified calling convention
+ // to call the target. For native-to-managed calls, this is an ordinary managed
+ // CALLI and nothing special happens.
+ pcsEmit->EmitCALLI(TOKEN_ILSTUB_TARGET_SIG, 0, m_iTargetStackDelta);
+}
+
+void NDirectStubLinker::EmitLogNativeArgument(ILCodeStream* pslILEmit, DWORD dwPinnedLocal)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (SF_IsForwardPInvokeStub(m_dwStubFlags) && !SF_IsForwardDelegateStub(m_dwStubFlags))
+ {
+ // get the secret argument via intrinsic
+ pslILEmit->EmitCALL(METHOD__STUBHELPERS__GET_STUB_CONTEXT, 0, 1);
+ }
+ else
+ {
+ // no secret argument
+ pslILEmit->EmitLoadNullPtr();
+ }
+
+ pslILEmit->EmitLDLOC(dwPinnedLocal);
+
+ pslILEmit->EmitCALL(METHOD__STUBHELPERS__LOG_PINNED_ARGUMENT, 2, 0);
+}
+
+void NDirectStubLinker::GetCleanupFinallyOffsets(ILStubEHClause * pClause)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pClause));
+ }
+ CONTRACTL_END;
+
+ if (m_pCleanupFinallyEndLabel)
+ {
+ _ASSERTE(m_pCleanupFinallyBeginLabel);
+ _ASSERTE(m_pCleanupTryBeginLabel);
+ _ASSERTE(m_pCleanupTryEndLabel);
+
+ pClause->kind = ILStubEHClause::kFinally;
+ pClause->dwTryBeginOffset = (DWORD)m_pCleanupTryBeginLabel->GetCodeOffset();
+ pClause->cbTryLength = (DWORD)m_pCleanupTryEndLabel->GetCodeOffset() - pClause->dwTryBeginOffset;
+ pClause->dwHandlerBeginOffset = (DWORD)m_pCleanupFinallyBeginLabel->GetCodeOffset();
+ pClause->cbHandlerLength = (DWORD)m_pCleanupFinallyEndLabel->GetCodeOffset() - pClause->dwHandlerBeginOffset;
+ }
+}
+
+void NDirectStubLinker::ClearCode()
+{
+ WRAPPER_NO_CONTRACT;
+ ILStubLinker::ClearCode();
+
+ m_pCleanupTryBeginLabel = 0;
+ m_pCleanupTryEndLabel = 0;
+ m_pCleanupFinallyBeginLabel = 0;
+ m_pCleanupFinallyEndLabel = 0;
+}
+
+#ifdef PROFILING_SUPPORTED
+DWORD NDirectStubLinker::EmitProfilerBeginTransitionCallback(ILCodeStream* pcsEmit, DWORD dwStubFlags)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (SF_IsForwardDelegateStub(dwStubFlags) || SF_IsCALLIStub(dwStubFlags))
+ {
+ // secret argument does not contain MD nor UMEntryThunk
+ pcsEmit->EmitLoadNullPtr();
+ }
+ else
+ {
+ EmitLoadStubContext(pcsEmit, dwStubFlags);
+ }
+
+ if (SF_IsForwardStub(dwStubFlags))
+ {
+ pcsEmit->EmitLDLOC(GetThreadLocalNum());
+ }
+ else
+ {
+ // we use a null pThread to indicate reverse interop
+ pcsEmit->EmitLDC(NULL);
+ }
+
+ // In the unmanaged delegate case, we need the "this" object to retrieve the MD
+ // in StubHelpers::ProfilerEnterCallback().
+ if (SF_IsDelegateStub(dwStubFlags))
+ {
+ if (SF_IsForwardStub(dwStubFlags))
+ {
+ pcsEmit->EmitLoadThis();
+ }
+ else
+ {
+ EmitLoadStubContext(pcsEmit, dwStubFlags); // load UMEntryThunk*
+ pcsEmit->EmitLDC(offsetof(UMEntryThunk, m_pObjectHandle));
+ pcsEmit->EmitADD();
+ pcsEmit->EmitLDIND_I(); // get OBJECTHANDLE
+ pcsEmit->EmitLDIND_REF(); // get Delegate object
+ }
+ }
+ else
+ {
+ pcsEmit->EmitLDC(NULL);
+ }
+ pcsEmit->EmitCALL(METHOD__STUBHELPERS__PROFILER_BEGIN_TRANSITION_CALLBACK, 3, 1);
+
+ // Store the MD for StubHelpers::ProfilerLeaveCallback().
+ DWORD dwMethodDescLocalNum = pcsEmit->NewLocal(ELEMENT_TYPE_I);
+ pcsEmit->EmitSTLOC(dwMethodDescLocalNum);
+ return dwMethodDescLocalNum;
+}
+
+void NDirectStubLinker::EmitProfilerEndTransitionCallback(ILCodeStream* pcsEmit, DWORD dwStubFlags, DWORD dwMethodDescLocalNum)
+{
+ STANDARD_VM_CONTRACT;
+
+ pcsEmit->EmitLDLOC(dwMethodDescLocalNum);
+ if (SF_IsReverseStub(dwStubFlags))
+ {
+ // we use a null pThread to indicate reverse interop
+ pcsEmit->EmitLDC(NULL);
+ }
+ else
+ {
+ pcsEmit->EmitLDLOC(GetThreadLocalNum());
+ }
+ pcsEmit->EmitCALL(METHOD__STUBHELPERS__PROFILER_END_TRANSITION_CALLBACK, 2, 0);
+}
+#endif // PROFILING_SUPPPORTED
+
+#ifdef VERIFY_HEAP
+void NDirectStubLinker::EmitValidateLocal(ILCodeStream* pcsEmit, DWORD dwLocalNum, bool fIsByref, DWORD dwStubFlags)
+{
+ STANDARD_VM_CONTRACT;
+
+ pcsEmit->EmitLDLOC(dwLocalNum);
+
+ if (SF_IsDelegateStub(dwStubFlags))
+ {
+ pcsEmit->EmitLoadNullPtr();
+ pcsEmit->EmitLoadThis();
+ }
+ else if (SF_IsCALLIStub(dwStubFlags))
+ {
+ pcsEmit->EmitLoadNullPtr();
+ pcsEmit->EmitLDNULL();
+ }
+ else
+ {
+ // P/Invoke, CLR->COM
+ EmitLoadStubContext(pcsEmit, dwStubFlags);
+ pcsEmit->EmitLDNULL();
+ }
+
+ if (fIsByref)
+ {
+ // StubHelpers.ValidateByref(byref, pMD, pThis)
+ pcsEmit->EmitCALL(METHOD__STUBHELPERS__VALIDATE_BYREF, 3, 0);
+ }
+ else
+ {
+ // StubHelpers.ValidateObject(obj, pMD, pThis)
+ pcsEmit->EmitCALL(METHOD__STUBHELPERS__VALIDATE_OBJECT, 3, 0);
+ }
+}
+
+void NDirectStubLinker::EmitObjectValidation(ILCodeStream* pcsEmit, DWORD dwStubFlags)
+{
+ STANDARD_VM_CONTRACT;
+
+ // generate validation callouts for pinned locals
+ CQuickBytes qbLocalSig;
+ DWORD cbSig = GetLocalSigSize();
+
+ qbLocalSig.AllocThrows(cbSig);
+ PCOR_SIGNATURE pSig = (PCOR_SIGNATURE)qbLocalSig.Ptr();
+
+ GetLocalSig(pSig, cbSig);
+ SigPointer ptr(pSig, cbSig);
+
+ IfFailThrow(ptr.GetData(NULL)); // IMAGE_CEE_CS_CALLCONV_LOCAL_SIG
+
+ ULONG numLocals;
+ IfFailThrow(ptr.GetData(&numLocals));
+
+ for (ULONG i = 0; i < numLocals; i++)
+ {
+ BYTE modifier;
+ IfFailThrow(ptr.PeekByte(&modifier));
+ if (modifier == ELEMENT_TYPE_PINNED)
+ {
+ IfFailThrow(ptr.GetByte(NULL));
+ IfFailThrow(ptr.PeekByte(&modifier));
+ EmitValidateLocal(pcsEmit, i, (modifier == ELEMENT_TYPE_BYREF), dwStubFlags);
+ }
+
+ IfFailThrow(ptr.SkipExactlyOne());
+ }
+}
+#endif // VERIFY_HEAP
+
+// Loads the 'secret argument' passed to the stub.
+void NDirectStubLinker::EmitLoadStubContext(ILCodeStream* pcsEmit, DWORD dwStubFlags)
+{
+ STANDARD_VM_CONTRACT;
+
+ CONSISTENCY_CHECK(!SF_IsForwardDelegateStub(dwStubFlags));
+ CONSISTENCY_CHECK(!SF_IsFieldGetterStub(dwStubFlags) && !SF_IsFieldSetterStub(dwStubFlags));
+
+#ifdef FEATURE_COMINTEROP
+ if (SF_IsWinRTDelegateStub(dwStubFlags) && SF_IsForwardStub(dwStubFlags))
+ {
+ // we have the delegate 'this' but we need the EEImpl/Instantiated 'Invoke' MD pointer
+ // (Delegate.GetInvokeMethod does not return exact instantiated MD so we call our own helper)
+ pcsEmit->EmitLoadThis();
+ pcsEmit->EmitCALL(METHOD__STUBHELPERS__GET_DELEGATE_INVOKE_METHOD, 1, 1);
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ // get the secret argument via intrinsic
+ pcsEmit->EmitCALL(METHOD__STUBHELPERS__GET_STUB_CONTEXT, 0, 1);
+ }
+}
+
+#ifdef MDA_SUPPORTED
+void NDirectStubLinker::EmitCallGcCollectForMDA(ILCodeStream *pcsEmit, DWORD dwStubFlags)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel *pSkipGcLabel = NULL;
+
+ if (SF_IsForwardPInvokeStub(dwStubFlags) &&
+ !SF_IsDelegateStub(dwStubFlags) &&
+ !SF_IsCALLIStub(dwStubFlags))
+ {
+ // don't call GC if this is a QCall
+ EmitLoadStubContext(pcsEmit, dwStubFlags);
+ pcsEmit->EmitCALL(METHOD__STUBHELPERS__IS_QCALL, 1, 1);
+
+ pSkipGcLabel = pcsEmit->NewCodeLabel();
+ pcsEmit->EmitBRTRUE(pSkipGcLabel);
+ }
+
+ pcsEmit->EmitCALL(METHOD__STUBHELPERS__TRIGGER_GC_FOR_MDA, 0, 0);
+
+ if (pSkipGcLabel != NULL)
+ {
+ pcsEmit->EmitLabel(pSkipGcLabel);
+ }
+}
+#endif // MDA_SUPPORTED
+
+#ifdef FEATURE_COMINTEROP
+
+class DispatchStubState : public StubState // For CLR-to-COM late-bound/eventing calls
+{
+public:
+ DispatchStubState()
+ : m_dwStubFlags(0),
+ m_lateBoundFlags(0)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ void SetLastError(BOOL fSetLastError)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ CONSISTENCY_CHECK(!fSetLastError);
+ }
+
+ void BeginEmit(DWORD dwStubFlags)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ CONSISTENCY_CHECK(SF_IsCOMStub(dwStubFlags));
+ m_dwStubFlags = dwStubFlags;
+ }
+
+ void MarshalReturn(MarshalInfo* pInfo, int argOffset)
+ {
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(CheckPointer(pInfo));
+ }
+ CONTRACTL_END;
+ }
+
+ void MarshalArgument(MarshalInfo* pInfo, int argOffset, UINT nativeStackOffset)
+ {
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pInfo));
+ }
+ CONTRACTL_END;
+
+ if (SF_IsCOMLateBoundStub(m_dwStubFlags) && pInfo->GetDispWrapperType() != 0)
+ {
+ m_lateBoundFlags |= ComPlusCallInfo::kRequiresArgumentWrapping;
+ }
+ }
+
+ void MarshalLCID(int argIdx)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ void MarshalHiddenLengthArgument(MarshalInfo *, BOOL)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+ void MarshalFactoryReturn()
+ {
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE();
+ }
+#endif // FEATURE_COMINTEROP
+
+ void EmitInvokeTarget(MethodDesc *pStubMD)
+ {
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE_MSG("Should never come to DispatchStubState::EmitInvokeTarget");
+ }
+
+ void FinishEmit(MethodDesc *pMD)
+ {
+ STANDARD_VM_CONTRACT;
+
+ // set flags directly on the interop MD
+ _ASSERTE(pMD->IsComPlusCall());
+
+ ((ComPlusCallMethodDesc *)pMD)->SetLateBoundFlags(m_lateBoundFlags);
+ }
+
+protected:
+ DWORD m_dwStubFlags;
+ BYTE m_lateBoundFlags; // ComPlusCallMethodDesc::Flags
+};
+
+#endif // FEATURE_COMINTEROP
+
+
+void PInvokeStaticSigInfo::PreInit(Module* pModule, MethodTable * pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // initialize data members
+ m_wFlags = 0;
+ m_pModule = pModule;
+ m_callConv = (CorPinvokeMap)0;
+ SetBestFitMapping (TRUE);
+ SetThrowOnUnmappableChar (FALSE);
+ SetLinkFlags (nlfNone);
+ SetCharSet (nltAnsi);
+ m_error = 0;
+
+ // assembly/type level m_bestFit & m_bThrowOnUnmappableChar
+ BOOL bBestFit;
+ BOOL bThrowOnUnmappableChar;
+
+ if (pMT != NULL)
+ {
+ EEClass::GetBestFitMapping(pMT, &bBestFit, &bThrowOnUnmappableChar);
+ }
+ else
+ {
+ ReadBestFitCustomAttribute(m_pModule->GetMDImport(), mdTypeDefNil, &bBestFit, &bThrowOnUnmappableChar);
+ }
+
+ SetBestFitMapping (bBestFit);
+ SetThrowOnUnmappableChar (bThrowOnUnmappableChar);
+}
+
+void PInvokeStaticSigInfo::PreInit(MethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PreInit(pMD->GetModule(), pMD->GetMethodTable());
+ SetIsStatic (pMD->IsStatic());
+ m_sig = pMD->GetSignature();
+ if (pMD->IsEEImpl())
+ {
+ CONSISTENCY_CHECK(pMD->GetMethodTable()->IsDelegate());
+ SetIsDelegateInterop(TRUE);
+ }
+}
+
+PInvokeStaticSigInfo::PInvokeStaticSigInfo(
+ MethodDesc* pMD, LPCUTF8 *pLibName, LPCUTF8 *pEntryPointName, ThrowOnError throwOnError)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ DllImportInit(pMD, pLibName, pEntryPointName);
+
+ if (throwOnError)
+ ReportErrors();
+}
+
+PInvokeStaticSigInfo::PInvokeStaticSigInfo(MethodDesc* pMD, ThrowOnError throwOnError)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ MethodTable * pMT = pMD->GetMethodTable();
+
+ if (!pMT->IsDelegate())
+ {
+ DllImportInit(pMD, NULL, NULL);
+ return;
+ }
+
+ // initialize data members to defaults
+ PreInit(pMD);
+
+ // System.Runtime.InteropServices.UnmanagedFunctionPointerAttribute
+ BYTE* pData = NULL;
+ LONG cData = 0;
+ CorPinvokeMap callConv = (CorPinvokeMap)0;
+
+ HRESULT hRESULT = pMT->GetMDImport()->GetCustomAttributeByName(
+ pMT->GetCl(), g_UnmanagedFunctionPointerAttribute, (const VOID **)(&pData), (ULONG *)&cData);
+ IfFailThrow(hRESULT);
+ if(cData != 0)
+ {
+ CustomAttributeParser ca(pData, cData);
+
+ CaArg args[1];
+ args[0].InitEnum(SERIALIZATION_TYPE_I4, (ULONG)m_callConv);
+
+ IfFailGo(ParseKnownCaArgs(ca, args, lengthof(args)));
+
+ enum UnmanagedFunctionPointerNamedArgs
+ {
+ MDA_CharSet,
+ MDA_BestFitMapping,
+ MDA_ThrowOnUnmappableChar,
+ MDA_SetLastError,
+ MDA_PreserveSig,
+ MDA_Last,
+ };
+
+ CaNamedArg namedArgs[MDA_Last];
+ namedArgs[MDA_CharSet].InitI4FieldEnum("CharSet", "System.Runtime.InteropServices.CharSet", (ULONG)GetCharSet());
+ namedArgs[MDA_BestFitMapping].InitBoolField("BestFitMapping", (ULONG)GetBestFitMapping());
+ namedArgs[MDA_ThrowOnUnmappableChar].InitBoolField("ThrowOnUnmappableChar", (ULONG)GetThrowOnUnmappableChar());
+ namedArgs[MDA_SetLastError].InitBoolField("SetLastError", 0);
+ namedArgs[MDA_PreserveSig].InitBoolField("PreserveSig", 0);
+
+ IfFailGo(ParseKnownCaNamedArgs(ca, namedArgs, lengthof(namedArgs)));
+
+ callConv = (CorPinvokeMap)(args[0].val.u4 << 8);
+ CorNativeLinkType nlt = (CorNativeLinkType)0;
+
+ // XXX Tue 07/19/2005
+ // Keep in sync with the handling of CorPInvokeMap in
+ // PInvokeStaticSigInfo::DllImportInit.
+ switch( namedArgs[MDA_CharSet].val.u4 )
+ {
+ case 0:
+ case nltAnsi:
+ nlt = nltAnsi; break;
+ case nltUnicode:
+ case nltAuto: // Since Win9x isn't supported anymore, nltAuto always represents unicode strings.
+ nlt = nltUnicode; break;
+ default:
+ hr = E_FAIL; goto ErrExit;
+ }
+ SetCharSet ( nlt );
+ SetBestFitMapping (namedArgs[MDA_BestFitMapping].val.u1);
+ SetThrowOnUnmappableChar (namedArgs[MDA_ThrowOnUnmappableChar].val.u1);
+ if (namedArgs[MDA_SetLastError].val.u1)
+ SetLinkFlags ((CorNativeLinkFlags)(nlfLastError | GetLinkFlags()));
+ if (namedArgs[MDA_PreserveSig].val.u1)
+ SetLinkFlags ((CorNativeLinkFlags)(nlfNoMangle | GetLinkFlags()));
+ }
+
+
+ErrExit:
+ if (hr != S_OK)
+ SetError(IDS_EE_NDIRECT_BADNATL);
+
+ InitCallConv(callConv, pMD->IsVarArg());
+
+ if (throwOnError)
+ ReportErrors();
+}
+
+PInvokeStaticSigInfo::PInvokeStaticSigInfo(
+ Signature sig, Module* pModule, ThrowOnError throwOnError)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(pModule));
+ }
+ CONTRACTL_END;
+
+ PreInit(pModule, NULL);
+ m_sig = sig;
+ SetIsStatic (!(MetaSig::GetCallingConvention(pModule, sig) & IMAGE_CEE_CS_CALLCONV_HASTHIS));
+ InitCallConv((CorPinvokeMap)0, FALSE);
+
+ if (throwOnError)
+ ReportErrors();
+}
+
+void PInvokeStaticSigInfo::DllImportInit(MethodDesc* pMD, LPCUTF8 *ppLibName, LPCUTF8 *ppEntryPointName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(pMD));
+
+ // These preconditions to prevent multithreaded regression
+ // where pMD->ndirect.m_szLibName was passed in directly, cleared
+ // by this API, then accessed on another thread before being reset here.
+ PRECONDITION(CheckPointer(ppLibName, NULL_OK) && (!ppLibName || *ppLibName == NULL));
+ PRECONDITION(CheckPointer(ppEntryPointName, NULL_OK) && (!ppEntryPointName || *ppEntryPointName == NULL));
+ }
+ CONTRACTL_END;
+
+ // initialize data members to defaults
+ PreInit(pMD);
+
+ // System.Runtime.InteropServices.DLLImportAttribute
+ IMDInternalImport *pInternalImport = pMD->GetMDImport();
+ CorPinvokeMap mappingFlags = pmMaxValue;
+ mdModuleRef modref = mdModuleRefNil;
+ if (FAILED(pInternalImport->GetPinvokeMap(pMD->GetMemberDef(), (DWORD*)&mappingFlags, ppEntryPointName, &modref)))
+ {
+#if defined(FEATURE_MIXEDMODE) && !defined(CROSSGEN_COMPILE) // IJW
+ // The guessing heuristic has been broken with NGen for a long time since we stopped loading
+ // images at NGen time using full LoadLibrary. The DLL references are not resolved correctly
+ // without full LoadLibrary.
+ //
+ // Disable the heuristic consistently during NGen so that it does not kick in by accident.
+ if (!IsCompilationProcess())
+ BestGuessNDirectDefaults(pMD);
+#endif
+ InitCallConv((CorPinvokeMap)0, pMD->IsVarArg());
+ return;
+ }
+
+ // out parameter pEntryPointName
+ if (ppEntryPointName && *ppEntryPointName == NULL)
+ *ppEntryPointName = pMD->GetName();
+
+ // out parameter pLibName
+ if (ppLibName != NULL)
+ {
+ if (FAILED(pInternalImport->GetModuleRefProps(modref, ppLibName)))
+ {
+ SetError(IDS_CLASSLOAD_BADFORMAT);
+ return;
+ }
+ }
+
+ // m_callConv
+ InitCallConv((CorPinvokeMap)(mappingFlags & pmCallConvMask), pMD->IsVarArg());
+
+ // m_bestFit
+ CorPinvokeMap bestFitMask = (CorPinvokeMap)(mappingFlags & pmBestFitMask);
+ if (bestFitMask == pmBestFitEnabled)
+ SetBestFitMapping (TRUE);
+ else if (bestFitMask == pmBestFitDisabled)
+ SetBestFitMapping (FALSE);
+
+ // m_bThrowOnUnmappableChar
+ CorPinvokeMap unmappableMask = (CorPinvokeMap)(mappingFlags & pmThrowOnUnmappableCharMask);
+ if (unmappableMask == pmThrowOnUnmappableCharEnabled)
+ SetThrowOnUnmappableChar (TRUE);
+ else if (unmappableMask == pmThrowOnUnmappableCharDisabled)
+ SetThrowOnUnmappableChar (FALSE);
+
+ // inkFlags : CorPinvoke -> CorNativeLinkFlags
+ if (mappingFlags & pmSupportsLastError)
+ SetLinkFlags ((CorNativeLinkFlags)(GetLinkFlags() | nlfLastError));
+ if (mappingFlags & pmNoMangle)
+ SetLinkFlags ((CorNativeLinkFlags)(GetLinkFlags() | nlfNoMangle));
+
+ // XXX Tue 07/19/2005
+ // Keep in sync with the handling of CorNativeLinkType in
+ // PInvokeStaticSigInfo::PInvokeStaticSigInfo.
+
+ // charset : CorPinvoke -> CorNativeLinkType
+ CorPinvokeMap charSetMask = (CorPinvokeMap)(mappingFlags & (pmCharSetNotSpec | pmCharSetAnsi | pmCharSetUnicode | pmCharSetAuto));
+ if (charSetMask == pmCharSetNotSpec || charSetMask == pmCharSetAnsi)
+ {
+ SetCharSet (nltAnsi);
+ }
+ else if (charSetMask == pmCharSetUnicode || charSetMask == pmCharSetAuto)
+ {
+ // Since Win9x isn't supported anymore, pmCharSetAuto always represents unicode strings.
+ SetCharSet (nltUnicode);
+ }
+ else
+ {
+ SetError(IDS_EE_NDIRECT_BADNATL);
+ }
+}
+
+
+#if defined(FEATURE_MIXEDMODE) && !defined(CROSSGEN_COMPILE) // IJW
+
+// This attempts to guess whether a target is an API call that uses SetLastError to communicate errors.
+static BOOL HeuristicDoesThisLooksLikeAnApiCallHelper(LPBYTE pTarget)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // This code is not that useful anymore since this functionality is already embedded in the VC linker.
+ // The linker will emit the lasterror flag by default for functions residing in modules that are
+ // a superset of the list below.
+ // Look for bug VSWhidbey 241895.
+
+ static struct SysDllInfo
+ {
+ LPCWSTR pName;
+ LPBYTE pImageBase;
+ DWORD dwImageSize;
+ } gSysDllInfo[] = {{WINDOWS_KERNEL32_DLLNAME_W, 0, 0},
+ {W("GDI32"), 0, 0},
+ {W("USER32"), 0, 0},
+ {W("ADVAPI32"), 0, 0}
+ };
+
+
+ for (int i = 0; i < sizeof(gSysDllInfo)/sizeof(*gSysDllInfo); i++)
+ {
+ if (gSysDllInfo[i].pImageBase == 0)
+ {
+ IMAGE_DOS_HEADER *pDos = (IMAGE_DOS_HEADER*)CLRGetModuleHandle(gSysDllInfo[i].pName);
+ if (pDos)
+ {
+ if (pDos->e_magic == VAL16(IMAGE_DOS_SIGNATURE))
+ {
+ IMAGE_NT_HEADERS *pNT = (IMAGE_NT_HEADERS*) (((LPBYTE)pDos) + VAL32(pDos->e_lfanew));
+ if (pNT->Signature == VAL32(IMAGE_NT_SIGNATURE) &&
+ pNT->FileHeader.SizeOfOptionalHeader ==
+#ifdef _WIN64
+ VAL16(sizeof(IMAGE_OPTIONAL_HEADER64))
+#else
+ VAL16(sizeof(IMAGE_OPTIONAL_HEADER32))
+#endif
+ && pNT->OptionalHeader.Magic == VAL16(IMAGE_NT_OPTIONAL_HDR_MAGIC))
+ {
+ gSysDllInfo[i].dwImageSize = VAL32(pNT->OptionalHeader.SizeOfImage);
+ }
+ }
+
+ gSysDllInfo[i].pImageBase = (LPBYTE)pDos;
+ }
+ }
+ if (gSysDllInfo[i].pImageBase != 0 &&
+ pTarget >= gSysDllInfo[i].pImageBase &&
+ pTarget < gSysDllInfo[i].pImageBase + gSysDllInfo[i].dwImageSize)
+ {
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+LPBYTE FollowIndirect(LPBYTE pTarget)
+{
+ CONTRACT (LPBYTE)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ LPBYTE pRet = NULL;
+
+ EX_TRY
+ {
+ AVInRuntimeImplOkayHolder AVOkay;
+
+#ifdef _TARGET_X86_
+ if (pTarget != NULL && !(pTarget[0] != 0xff || pTarget[1] != 0x25))
+ {
+ pRet = **(LPBYTE**)(pTarget + 2);
+ }
+#elif defined(_TARGET_AMD64_)
+ if (pTarget != NULL && !(pTarget[0] != 0xff || pTarget[1] != 0x25))
+ {
+ INT64 rva = *(INT32*)(pTarget + 2);
+ pRet = *(LPBYTE*)(pTarget + 6 + rva);
+ }
+#endif
+ }
+ EX_CATCH
+ {
+ // Catch AVs here.
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ RETURN pRet;
+}
+
+// This attempts to guess whether a target is an API call that uses SetLastError to communicate errors.
+BOOL HeuristicDoesThisLooksLikeAnApiCall(LPBYTE pTarget)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (pTarget == NULL)
+ return FALSE;
+
+ if (HeuristicDoesThisLooksLikeAnApiCallHelper(pTarget))
+ return TRUE;
+
+ LPBYTE pTarget2 = FollowIndirect(pTarget);
+ if (pTarget2)
+ {
+ // jmp [xxxx] - could be an import thunk
+ return HeuristicDoesThisLooksLikeAnApiCallHelper( pTarget2 );
+ }
+
+ return FALSE;
+}
+
+BOOL HeuristicDoesThisLookLikeAGetLastErrorCall(LPBYTE pTarget)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ static LPBYTE pGetLastError = NULL;
+ if (!pGetLastError)
+ {
+ // No need to use a holder here, since no cleanup is necessary.
+ HMODULE hMod = CLRGetModuleHandle(WINDOWS_KERNEL32_DLLNAME_W);
+ if (hMod)
+ {
+ pGetLastError = (LPBYTE)GetProcAddress(hMod, "GetLastError");
+ if (!pGetLastError)
+ {
+ // This should never happen but better to be cautious.
+ pGetLastError = (LPBYTE)-1;
+ }
+ }
+ else
+ {
+ // We failed to get the module handle for kernel32.dll. This is almost impossible
+ // however better to err on the side of caution.
+ pGetLastError = (LPBYTE)-1;
+ }
+ }
+
+ if (pTarget == pGetLastError)
+ return TRUE;
+
+ if (pTarget == NULL)
+ return FALSE;
+
+ LPBYTE pTarget2 = FollowIndirect(pTarget);
+ if (pTarget2)
+ {
+ // jmp [xxxx] - could be an import thunk
+ return pTarget2 == pGetLastError;
+ }
+
+ return FALSE;
+}
+
+DWORD __stdcall FalseGetLastError()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return GetThread()->m_dwLastError;
+}
+
+void PInvokeStaticSigInfo::BestGuessNDirectDefaults(MethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!pMD->IsNDirect())
+ return;
+
+ NDirectMethodDesc* pMDD = (NDirectMethodDesc*)pMD;
+
+ if (!pMDD->IsEarlyBound())
+ return;
+
+ LPVOID pTarget = NULL;
+
+ // NOTE: If we get inside this block, and this is a call to GetLastError,
+ // then InitEarlyBoundNDirectTarget has not been run yet.
+ if (pMDD->NDirectTargetIsImportThunk())
+ {
+ // Get the unmanaged callsite.
+ pTarget = (LPVOID)pMDD->GetModule()->GetInternalPInvokeTarget(pMDD->GetRVA());
+
+ // If this is a call to GetLastError, then we haven't overwritten m_pNativeNDirectTarget yet.
+ if (HeuristicDoesThisLookLikeAGetLastErrorCall((LPBYTE)pTarget))
+ pTarget = (BYTE*) FalseGetLastError;
+ }
+ else
+ {
+ pTarget = pMDD->GetNativeNDirectTarget();
+ }
+
+ if (HeuristicDoesThisLooksLikeAnApiCall((LPBYTE) pTarget))
+ SetLinkFlags ((CorNativeLinkFlags)(GetLinkFlags() | nlfLastError));
+}
+
+#endif // FEATURE_MIXEDMODE && !CROSSGEN_COMPILE
+
+
+void PInvokeStaticSigInfo::InitCallConv(CorPinvokeMap callConv, BOOL bIsVarArg)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ // Convert WinAPI methods to either StdCall or CDecl based on if they are varargs or not.
+ if (callConv == pmCallConvWinapi)
+ callConv = bIsVarArg ? pmCallConvCdecl : pmCallConvStdcall;
+
+ CorPinvokeMap sigCallConv = (CorPinvokeMap)0;
+ BOOL fSuccess = MetaSig::GetUnmanagedCallingConvention(m_pModule, m_sig.GetRawSig(), m_sig.GetRawSigLen(), &sigCallConv);
+
+ if (!fSuccess)
+ {
+ SetError(IDS_EE_NDIRECT_BADNATL); //Bad metadata format
+ }
+
+ // Do the same WinAPI to StdCall or CDecl for the signature calling convention as well. We need
+ // to do this before we check to make sure the PInvoke map calling convention and the
+ // signature calling convention match for compatibility reasons.
+ if (sigCallConv == pmCallConvWinapi)
+ sigCallConv = bIsVarArg ? pmCallConvCdecl : pmCallConvStdcall;
+
+ if (callConv != 0 && sigCallConv != 0 && callConv != sigCallConv)
+ SetError(IDS_EE_NDIRECT_BADNATL_CALLCONV);
+
+ if (callConv == 0 && sigCallConv == 0)
+ m_callConv = bIsVarArg ? pmCallConvCdecl : pmCallConvStdcall;
+ else if (callConv != 0)
+ m_callConv = callConv;
+ else
+ m_callConv = sigCallConv;
+
+ if (bIsVarArg && m_callConv != pmCallConvCdecl)
+ SetError(IDS_EE_NDIRECT_BADNATL_VARARGS_CALLCONV);
+}
+
+void PInvokeStaticSigInfo::ReportErrors()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_error != 0)
+ COMPlusThrow(kTypeLoadException, m_error);
+}
+
+
+//---------------------------------------------------------
+// Does a class or method have a NAT_L CustomAttribute?
+//
+// S_OK = yes
+// S_FALSE = no
+// FAILED = unknown because something failed.
+//---------------------------------------------------------
+/*static*/
+HRESULT NDirect::HasNAT_LAttribute(IMDInternalImport *pInternalImport, mdToken token, DWORD dwMemberAttrs)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(pInternalImport));
+ PRECONDITION(TypeFromToken(token) == mdtMethodDef);
+ }
+ CONTRACTL_END;
+
+ // Check method flags first before trying to find the custom value
+ if (!IsReallyMdPinvokeImpl(dwMemberAttrs))
+ return S_FALSE;
+
+ DWORD mappingFlags;
+ LPCSTR pszImportName;
+ mdModuleRef modref;
+
+ if (SUCCEEDED(pInternalImport->GetPinvokeMap(token, &mappingFlags, &pszImportName, &modref)))
+ return S_OK;
+
+ return S_FALSE;
+}
+
+
+// Either MD or signature & module must be given.
+/*static*/
+BOOL NDirect::MarshalingRequired(MethodDesc *pMD, PCCOR_SIGNATURE pSig /*= NULL*/, Module *pModule /*= NULL*/)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(pMD != NULL || (pSig != NULL && pModule != NULL));
+ }
+ CONTRACTL_END;
+
+ // As a by-product, when returning FALSE we will also set the native stack size to the MD if it's
+ // an NDirectMethodDesc. This number is needed to link the P/Invoke (it determines the @n entry
+ // point name suffix and affects alignment thunk generation on the Mac). If this method returns
+ // TRUE, the stack size will be set when building the marshaling IL stub.
+ DWORD dwStackSize = 0;
+ CorPinvokeMap callConv = (CorPinvokeMap)0;
+
+ if (pMD != NULL)
+ {
+ if (pMD->IsNDirect() || pMD->IsComPlusCall())
+ {
+ // HRESULT swapping is handled by stub
+ if ((pMD->GetImplAttrs() & miPreserveSig) == 0)
+ return TRUE;
+ }
+
+ // SetLastError is handled by stub
+ PInvokeStaticSigInfo sigInfo(pMD);
+ if (sigInfo.GetLinkFlags() & nlfLastError)
+ return TRUE;
+
+ // LCID argument is handled by stub
+ if (GetLCIDParameterIndex(pMD) != -1)
+ return TRUE;
+
+ // making sure that cctor has run may be handled by stub
+ if (pMD->IsNDirect() && ((NDirectMethodDesc *)pMD)->IsClassConstructorTriggeredByILStub())
+ return TRUE;
+
+ callConv = sigInfo.GetCallConv();
+ }
+
+ if (pSig == NULL)
+ {
+ PREFIX_ASSUME(pMD != NULL);
+
+ pSig = pMD->GetSig();
+ pModule = pMD->GetModule();
+ }
+
+ // Check to make certain that the signature only contains types that marshal trivially
+ SigPointer ptr(pSig);
+ IfFailThrow(ptr.GetCallingConvInfo(NULL));
+ ULONG numArgs;
+ IfFailThrow(ptr.GetData(&numArgs));
+ numArgs++; // +1 for return type
+
+ // We'll need to parse parameter native types
+ mdParamDef *pParamTokenArray = (mdParamDef *)_alloca(numArgs * sizeof(mdParamDef));
+ IMDInternalImport *pMDImport = pModule->GetMDImport();
+
+ SigTypeContext emptyTypeContext;
+
+ mdMethodDef methodToken = mdMethodDefNil;
+ if (pMD != NULL)
+ {
+ methodToken = pMD->GetMemberDef();
+ }
+ CollateParamTokens(pMDImport, methodToken, numArgs - 1, pParamTokenArray);
+
+ for (ULONG i = 0; i < numArgs; i++)
+ {
+ SigPointer arg = ptr;
+ CorElementType type;
+ IfFailThrow(arg.PeekElemType(&type));
+
+ switch (type)
+ {
+ case ELEMENT_TYPE_PTR:
+ {
+ IfFailThrow(arg.GetElemType(NULL)); // skip ELEMENT_TYPE_PTR
+ IfFailThrow(arg.PeekElemType(&type));
+
+ if (type == ELEMENT_TYPE_VALUETYPE)
+ {
+ if ((arg.HasCustomModifier(pModule,
+ "Microsoft.VisualC.NeedsCopyConstructorModifier",
+ ELEMENT_TYPE_CMOD_REQD)) ||
+ (arg.HasCustomModifier(pModule,
+ "System.Runtime.CompilerServices.IsCopyConstructed",
+ ELEMENT_TYPE_CMOD_REQD)))
+ {
+ return TRUE;
+ }
+ }
+ if (i > 0) dwStackSize += sizeof(SLOT);
+ break;
+ }
+
+ case ELEMENT_TYPE_INTERNAL:
+
+ // this check is not functional in DAC and provides no security against a malicious dump
+ // the DAC is prepared to receive an invalid type handle
+#ifndef DACCESS_COMPILE
+ if (pModule->IsSigInIL(arg.GetPtr()))
+ THROW_BAD_FORMAT(BFA_BAD_SIGNATURE, (Module*)pModule);
+#endif
+
+ /* Fall thru */
+
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ TypeHandle hndArgType = arg.GetTypeHandleThrowing(pModule, &emptyTypeContext);
+
+ // JIT can handle internal blittable value types
+ if (!hndArgType.IsBlittable() && !hndArgType.IsEnum())
+ {
+ return TRUE;
+ }
+
+ // return value is fine as long as it can be normalized to an integer
+ if (i == 0)
+ {
+ CorElementType normalizedType = hndArgType.GetInternalCorElementType();
+ if (normalizedType == ELEMENT_TYPE_VALUETYPE)
+ {
+ // it is a structure even after normalization
+ return TRUE;
+ }
+ }
+ else
+ {
+ dwStackSize += StackElemSize(hndArgType.GetSize());
+ }
+ break;
+ }
+
+ case ELEMENT_TYPE_BOOLEAN:
+ case ELEMENT_TYPE_CHAR:
+ {
+ // Bool requires marshaling
+ // Char may require marshaling (MARSHAL_TYPE_ANSICHAR)
+ return TRUE;
+ }
+
+ default:
+ {
+ if (CorTypeInfo::IsPrimitiveType(type) || type == ELEMENT_TYPE_FNPTR)
+ {
+ if (i > 0) dwStackSize += StackElemSize(CorTypeInfo::Size(type));
+ }
+ else
+ {
+ // other non-primitive type - requires marshaling
+ return TRUE;
+ }
+ }
+ }
+
+ // check for explicit MarshalAs
+ NativeTypeParamInfo paramInfo;
+
+ if (pParamTokenArray[i] != mdParamDefNil)
+ {
+ if (!ParseNativeTypeInfo(pParamTokenArray[i], pMDImport, &paramInfo) ||
+ paramInfo.m_NativeType != NATIVE_TYPE_DEFAULT)
+ {
+ // Presence of MarshalAs does not necessitate marshaling (it could as well be the default
+ // for the type), but it's a good enough heuristic. We definitely don't want to duplicate
+ // the logic from code:MarshalInfo.MarshalInfo here.
+ return TRUE;
+ }
+ }
+
+ IfFailThrow(ptr.SkipExactlyOne());
+ }
+
+ if (!FitsInU2(dwStackSize))
+ return TRUE;
+
+ // do not set the stack size for varargs - the number is call site specific
+ if (pMD != NULL && !pMD->IsVarArg())
+ {
+ if (pMD->IsNDirect())
+ {
+ ((NDirectMethodDesc *)pMD)->SetStackArgumentSize(static_cast<WORD>(dwStackSize), callConv);
+ }
+#ifdef FEATURE_COMINTEROP
+ else if (pMD->IsComPlusCall())
+ {
+ // calling convention is always stdcall
+ ((ComPlusCallMethodDesc *)pMD)->SetStackArgumentSize(static_cast<WORD>(dwStackSize));
+ }
+#endif // FEATURE_COMINTEROP
+ }
+
+ return FALSE;
+}
+
+
+// factorization of CreateNDirectStubWorker
+static MarshalInfo::MarshalType DoMarshalReturnValue(MetaSig& msig,
+ mdParamDef* params,
+ CorNativeLinkType nlType,
+ CorNativeLinkFlags nlFlags,
+ UINT argidx, // this is used for reverse pinvoke hresult swapping
+ StubState* pss,
+ BOOL fThisCall,
+ int argOffset,
+ DWORD dwStubFlags,
+ MethodDesc *pMD,
+ UINT& nativeStackOffset,
+ bool& fStubNeedsCOM,
+ int nativeArgIndex
+ DEBUG_ARG(LPCUTF8 pDebugName)
+ DEBUG_ARG(LPCUTF8 pDebugClassName)
+ )
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(CheckPointer(params));
+ PRECONDITION(CheckPointer(pss));
+ PRECONDITION(CheckPointer(pMD, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ MarshalInfo::MarshalType marshalType = (MarshalInfo::MarshalType) 0xcccccccc;
+
+ MarshalInfo::MarshalScenario ms;
+#ifdef FEATURE_COMINTEROP
+ if (SF_IsCOMStub(dwStubFlags))
+ {
+ if (SF_IsWinRTStub(dwStubFlags))
+ ms = MarshalInfo::MARSHAL_SCENARIO_WINRT;
+ else
+ ms = MarshalInfo::MARSHAL_SCENARIO_COMINTEROP;
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ ms = MarshalInfo::MARSHAL_SCENARIO_NDIRECT;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (SF_IsWinRTCtorStub(dwStubFlags))
+ {
+ _ASSERTE(msig.GetReturnType() == ELEMENT_TYPE_VOID);
+ _ASSERTE(SF_IsHRESULTSwapping(dwStubFlags));
+
+ pss->MarshalFactoryReturn();
+ nativeStackOffset += sizeof(LPVOID);
+ if (SF_IsWinRTCompositionStub(dwStubFlags))
+ {
+ nativeStackOffset += 2 * sizeof(LPVOID);
+ }
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ if (msig.GetReturnType() != ELEMENT_TYPE_VOID)
+ {
+ MarshalInfo returnInfo(msig.GetModule(),
+ msig.GetReturnProps(),
+ msig.GetSigTypeContext(),
+ params[0],
+ ms,
+ nlType,
+ nlFlags,
+ FALSE,
+ argidx,
+ msig.NumFixedArgs(),
+ SF_IsBestFit(dwStubFlags),
+ SF_IsThrowOnUnmappableChar(dwStubFlags),
+ TRUE,
+ pMD,
+ TRUE
+ DEBUG_ARG(pDebugName)
+ DEBUG_ARG(pDebugClassName)
+ DEBUG_ARG(0)
+ );
+
+ marshalType = returnInfo.GetMarshalType();
+
+ fStubNeedsCOM |= returnInfo.MarshalerRequiresCOM();
+
+#ifdef FEATURE_COMINTEROP
+ if (marshalType == MarshalInfo::MARSHAL_TYPE_HIDDENLENGTHARRAY)
+ {
+ // Hidden length arrays are only valid with HRESULT swapping
+ if (!SF_IsHRESULTSwapping(dwStubFlags))
+ {
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_COM_UNSUPPORTED_SIG);
+ }
+
+ // We should be safe to cast here - giant signatures will fail to marashal later with IDS_EE_SIGTOOCOMPLEX
+ returnInfo.SetHiddenLengthParamIndex(static_cast<UINT16>(nativeArgIndex));
+
+ // Inject the hidden argument so that it winds up at the end of the method signature
+ pss->MarshalHiddenLengthArgument(&returnInfo, TRUE);
+ nativeStackOffset += returnInfo.GetHiddenLengthParamStackSize();
+
+ if (SF_IsReverseStub(dwStubFlags))
+ {
+ ++argOffset;
+ }
+ }
+
+ if (SF_IsCOMStub(dwStubFlags))
+ {
+ if (marshalType == MarshalInfo::MARSHAL_TYPE_VALUECLASS ||
+ marshalType == MarshalInfo::MARSHAL_TYPE_BLITTABLEVALUECLASS ||
+ marshalType == MarshalInfo::MARSHAL_TYPE_GUID ||
+ marshalType == MarshalInfo::MARSHAL_TYPE_DECIMAL)
+ {
+#ifndef _TARGET_X86_
+ // We cannot optimize marshalType to MARSHAL_TYPE_GENERIC_* because the JIT works with exact types
+ // and would refuse to compile the stub if it implicitly converted between scalars and value types (also see
+ // code:MarshalInfo.MarhalInfo where we do the optimization on x86). We want to throw only if the structure
+ // is too big to be returned in registers.
+ if (marshalType != MarshalInfo::MARSHAL_TYPE_BLITTABLEVALUECLASS ||
+ IsUnmanagedValueTypeReturnedByRef(returnInfo.GetNativeArgSize()))
+#endif // _TARGET_X86_
+ {
+ if (!SF_IsHRESULTSwapping(dwStubFlags) && !SF_IsCOMLateBoundStub(dwStubFlags))
+ {
+ // Note that this limitation is very likely not needed anymore and could be lifted if we care.
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_COM_UNSUPPORTED_SIG);
+ }
+ }
+
+ pss->MarshalReturn(&returnInfo, argOffset);
+ }
+ else
+ {
+ // We don't support native methods that return VARIANTs directly.
+ if (marshalType == MarshalInfo::MARSHAL_TYPE_OBJECT)
+ {
+ if (!SF_IsHRESULTSwapping(dwStubFlags) && !SF_IsCOMLateBoundStub(dwStubFlags))
+ {
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_COM_UNSUPPORTED_SIG);
+ }
+ }
+
+ pss->MarshalReturn(&returnInfo, argOffset);
+ }
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ if (marshalType > MarshalInfo::MARSHAL_TYPE_DOUBLE && IsUnsupportedValueTypeReturn(msig))
+ {
+ if (marshalType == MarshalInfo::MARSHAL_TYPE_BLITTABLEVALUECLASS
+ || marshalType == MarshalInfo::MARSHAL_TYPE_GUID
+ || marshalType == MarshalInfo::MARSHAL_TYPE_DECIMAL
+#ifdef FEATURE_COMINTEROP
+ || marshalType == MarshalInfo::MARSHAL_TYPE_DATETIME
+#endif // FEATURE_COMINTEROP
+ )
+ {
+ if (SF_IsHRESULTSwapping(dwStubFlags))
+ {
+ // V1 restriction: we could implement this but it's late in the game to do so.
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_NDIRECT_UNSUPPORTED_SIG);
+ }
+ }
+ else if (marshalType == MarshalInfo::MARSHAL_TYPE_HANDLEREF)
+ {
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_BADMARSHAL_HANDLEREFRESTRICTION);
+ }
+ else
+ {
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_NDIRECT_UNSUPPORTED_SIG);
+ }
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (marshalType == MarshalInfo::MARSHAL_TYPE_OBJECT && !SF_IsHRESULTSwapping(dwStubFlags))
+ {
+ // No support for returning variants. This is a V1 restriction, due to the late date,
+ // don't want to add the special-case code to support this in light of low demand.
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_NOVARIANTRETURN);
+ }
+#endif // FEATURE_COMINTEROP
+
+ pss->MarshalReturn(&returnInfo, argOffset);
+ }
+ }
+
+ return marshalType;
+}
+
+static inline UINT GetStackOffsetFromStackSize(UINT stackSize, bool fThisCall)
+{
+ LIMITED_METHOD_CONTRACT;
+#ifdef _TARGET_X86_
+ if (fThisCall)
+ {
+ // -1 means that the argument is not on the stack
+ return (stackSize >= sizeof(SLOT) ? (stackSize - sizeof(SLOT)) : (UINT)-1);
+ }
+#endif // _TARGET_X86_
+ return stackSize;
+}
+
+#ifdef FEATURE_COMINTEROP
+
+struct HiddenParameterInfo
+{
+ MarshalInfo *pManagedParam; // Managed parameter which required the hidden parameter
+ int nativeIndex; // 0 based index into the native method signature where the hidden parameter should be injected
+};
+
+// Get the indexes of any hidden length parameters to be marshaled for the method
+//
+// At return, each value in the ppParamIndexes array is a 0 based index into the native method signature where
+// the length parameter for a hidden length array should be passed. The MarshalInfo objects will also be
+// updated such that they all have explicit marshaling information.
+//
+// The caller is responsible for freeing the memory pointed to by ppParamIndexes
+void CheckForHiddenParameters(DWORD cParamMarshalInfo,
+ __in_ecount(cParamMarshalInfo) MarshalInfo *pParamMarshalInfo,
+ __out DWORD *pcHiddenNativeParameters,
+ __out_ecount(*pcHiddenNativeParameters) HiddenParameterInfo **ppHiddenNativeParameters)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pParamMarshalInfo));
+ PRECONDITION(CheckPointer(pcHiddenNativeParameters));
+ PRECONDITION(CheckPointer(ppHiddenNativeParameters));
+ }
+ CONTRACTL_END;
+
+ NewArrayHolder<HiddenParameterInfo> hiddenParamInfo(new HiddenParameterInfo[cParamMarshalInfo]);
+ DWORD foundInfoCount = 0;
+
+ for (DWORD iParam = 0; iParam < cParamMarshalInfo; ++iParam)
+ {
+ // Look for hidden length arrays, which all require additional parameters to be added
+ if (pParamMarshalInfo[iParam].GetMarshalType() == MarshalInfo::MARSHAL_TYPE_HIDDENLENGTHARRAY)
+ {
+ DWORD currentNativeIndex = iParam + foundInfoCount;
+
+ // The location of the length parameter is implicitly just before the array pointer.
+ // We'll give it our current index, and bumping the found count will push us back a slot.
+
+ // We should be safe to cast here - giant signatures will fail to marashal later with IDS_EE_SIGTOOCOMPLEX
+ pParamMarshalInfo[iParam].SetHiddenLengthParamIndex(static_cast<UINT16>(currentNativeIndex));
+
+ hiddenParamInfo[foundInfoCount].nativeIndex = pParamMarshalInfo[iParam].HiddenLengthParamIndex();
+ hiddenParamInfo[foundInfoCount].pManagedParam = &(pParamMarshalInfo[iParam]);
+ ++foundInfoCount;
+ }
+ }
+
+ *pcHiddenNativeParameters = foundInfoCount;
+ *ppHiddenNativeParameters = hiddenParamInfo.Extract();
+}
+
+bool IsHiddenParameter(int nativeArgIndex,
+ DWORD cHiddenParameters,
+ __in_ecount(cHiddenParameters) HiddenParameterInfo *pHiddenParameters,
+ __out HiddenParameterInfo **ppHiddenParameterInfo)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(cHiddenParameters == 0 || CheckPointer(pHiddenParameters));
+ PRECONDITION(CheckPointer(ppHiddenParameterInfo));
+ }
+ CONTRACTL_END;
+
+ *ppHiddenParameterInfo = NULL;
+
+ for (DWORD i = 0; i < cHiddenParameters; ++i)
+ {
+ _ASSERTE(pHiddenParameters[i].nativeIndex != -1);
+ if (pHiddenParameters[i].nativeIndex == nativeArgIndex)
+ {
+ *ppHiddenParameterInfo = &(pHiddenParameters[i]);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+#endif // FEATURE_COMINTEROP
+
+//---------------------------------------------------------
+// Creates a new stub for a N/Direct call. Return refcount is 1.
+// Note that this function may now throw if it fails to create
+// a stub.
+//---------------------------------------------------------
+static void CreateNDirectStubWorker(StubState* pss,
+ StubSigDesc* pSigDesc,
+ CorNativeLinkType nlType,
+ CorNativeLinkFlags nlFlags,
+ CorPinvokeMap unmgdCallConv,
+ DWORD dwStubFlags,
+ MethodDesc *pMD,
+ mdParamDef* pParamTokenArray,
+ int iLCIDArg
+ )
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(CheckPointer(pss));
+ PRECONDITION(CheckPointer(pSigDesc));
+ PRECONDITION(CheckPointer(pMD, NULL_OK));
+ PRECONDITION(!pMD || pMD->IsILStub() || (0 != pMD->GetMethodTable()->IsDelegate()) == SF_IsDelegateStub(dwStubFlags));
+ }
+ CONTRACTL_END;
+
+ SF_ConsistencyCheck(dwStubFlags);
+
+#ifdef _DEBUG
+ if (g_pConfig->ShouldBreakOnInteropStubSetup(pSigDesc->m_pDebugName))
+ CONSISTENCY_CHECK_MSGF(false, ("BreakOnInteropStubSetup: '%s' ", pSigDesc->m_pDebugName));
+#endif // _DEBUG
+
+ Stub* pstub = NULL;
+
+ if (SF_IsCOMStub(dwStubFlags))
+ {
+ _ASSERTE(0 == nlType);
+ _ASSERTE(0 == nlFlags);
+ _ASSERTE(0 == unmgdCallConv);
+ }
+ else
+ {
+ _ASSERTE(nlType == nltAnsi || nlType == nltUnicode);
+ }
+ Module *pModule = pSigDesc->m_pModule;
+
+ //
+ // Set up signature walking objects.
+ //
+
+ MetaSig msig(pSigDesc->m_sig,
+ pModule,
+ &pSigDesc->m_typeContext);
+
+ if (SF_IsVarArgStub(dwStubFlags))
+ msig.SetTreatAsVarArg();
+
+ bool fThisCall = (unmgdCallConv == pmCallConvThiscall);
+
+ pss->SetLastError(nlFlags & nlfLastError);
+
+ // This has been in the product since forward P/Invoke via delegates was
+ // introduced. It's wrong, but please keep it for backward compatibility.
+ if (SF_IsDelegateStub(dwStubFlags))
+ pss->SetLastError(TRUE);
+
+ pss->BeginEmit(dwStubFlags);
+
+ if (-1 != iLCIDArg)
+ {
+ // LCID is not supported on WinRT
+ _ASSERTE(!SF_IsWinRTStub(dwStubFlags));
+
+ // The code to handle the LCID will call MarshalLCID before calling MarshalArgument
+ // on the argument the LCID should go after. So we just bump up the index here.
+ iLCIDArg++;
+ }
+
+ int numArgs = msig.NumFixedArgs();
+
+ // thiscall must have at least one parameter (the "this")
+ if (fThisCall && numArgs == 0)
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_NDIRECT_BADNATL_THISCALL);
+
+ //
+ // Now, emit the IL.
+ //
+
+ int argOffset = 0;
+
+ MarshalInfo::MarshalType marshalType = (MarshalInfo::MarshalType) 0xcccccccc;
+
+ //
+ // Marshal the return value.
+ //
+
+ UINT nativeStackSize = (SF_IsCOMStub(dwStubFlags) ? sizeof(SLOT) : 0);
+ bool fHasCopyCtorArgs = false;
+ bool fStubNeedsCOM = SF_IsCOMStub(dwStubFlags);
+
+ // The return structure secret arg comes first, however byvalue return is processed at
+ // the end because it could be the HRESULT-swapped argument which always comes last.
+ bool fMarshalReturnValueFirst = !SF_IsHRESULTSwapping(dwStubFlags) && HasRetBuffArg(&msig);
+
+ if (fMarshalReturnValueFirst)
+ {
+ marshalType = DoMarshalReturnValue(msig,
+ pParamTokenArray,
+ nlType,
+ nlFlags,
+ 0,
+ pss,
+ fThisCall,
+ argOffset,
+ dwStubFlags,
+ pMD,
+ nativeStackSize,
+ fStubNeedsCOM,
+ 0
+ DEBUG_ARG(pSigDesc->m_pDebugName)
+ DEBUG_ARG(pSigDesc->m_pDebugClassName)
+ );
+
+ if (marshalType == MarshalInfo::MARSHAL_TYPE_DATE ||
+ marshalType == MarshalInfo::MARSHAL_TYPE_CURRENCY ||
+ marshalType == MarshalInfo::MARSHAL_TYPE_ARRAYWITHOFFSET ||
+ marshalType == MarshalInfo::MARSHAL_TYPE_HANDLEREF ||
+ marshalType == MarshalInfo::MARSHAL_TYPE_ARGITERATOR
+#ifdef FEATURE_COMINTEROP
+ || marshalType == MarshalInfo::MARSHAL_TYPE_OLECOLOR
+#endif // FEATURE_COMINTEROP
+ )
+ {
+ // These are special non-blittable types returned by-ref in managed,
+ // but marshaled as primitive values returned by-value in unmanaged.
+ }
+ else
+ {
+ // This is an ordinary value type - see if it is returned by-ref.
+ MethodTable *pRetMT = msig.GetRetTypeHandleThrowing().AsMethodTable();
+ if (IsUnmanagedValueTypeReturnedByRef(pRetMT->GetNativeSize()))
+ {
+ nativeStackSize += sizeof(LPVOID);
+ }
+ }
+ }
+
+ //
+ // Marshal the arguments
+ //
+ MarshalInfo::MarshalScenario ms;
+#ifdef FEATURE_COMINTEROP
+ if (SF_IsCOMStub(dwStubFlags))
+ {
+ if (SF_IsWinRTStub(dwStubFlags))
+ ms = MarshalInfo::MARSHAL_SCENARIO_WINRT;
+ else
+ ms = MarshalInfo::MARSHAL_SCENARIO_COMINTEROP;
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ ms = MarshalInfo::MARSHAL_SCENARIO_NDIRECT;
+ }
+
+ // Build up marshaling information for each of the method's parameters
+ SIZE_T cbParamMarshalInfo;
+ if (!ClrSafeInt<SIZE_T>::multiply(sizeof(MarshalInfo), numArgs, cbParamMarshalInfo))
+ {
+ COMPlusThrowHR(COR_E_OVERFLOW);
+ }
+
+ NewArrayHolder<BYTE> pbParamMarshalInfo(new BYTE[cbParamMarshalInfo]);
+ MarshalInfo *pParamMarshalInfo = reinterpret_cast<MarshalInfo *>(pbParamMarshalInfo.GetValue());
+
+ MetaSig paramInfoMSig(msig);
+ for (int i = 0; i < numArgs; ++i)
+ {
+ paramInfoMSig.NextArg();
+ new(&(pParamMarshalInfo[i])) MarshalInfo(paramInfoMSig.GetModule(),
+ paramInfoMSig.GetArgProps(),
+ paramInfoMSig.GetSigTypeContext(),
+ pParamTokenArray[i + 1],
+ ms,
+ nlType,
+ nlFlags,
+ TRUE,
+ i + 1,
+ numArgs,
+ SF_IsBestFit(dwStubFlags),
+ SF_IsThrowOnUnmappableChar(dwStubFlags),
+ TRUE,
+ pMD,
+ TRUE
+ DEBUG_ARG(pSigDesc->m_pDebugName)
+ DEBUG_ARG(pSigDesc->m_pDebugClassName)
+ DEBUG_ARG(i + 1));
+ }
+
+#ifdef FEATURE_COMINTEROP
+ // Check to see if we need to inject any additional hidden parameters
+ DWORD cHiddenNativeParameters;
+ NewArrayHolder<HiddenParameterInfo> pHiddenNativeParameters;
+ CheckForHiddenParameters(numArgs, pParamMarshalInfo, &cHiddenNativeParameters, &pHiddenNativeParameters);
+
+ // Hidden parameters and LCID do not mix
+ _ASSERTE(!(cHiddenNativeParameters > 0 && iLCIDArg != -1));
+#endif // FEATURE_COMINTEROP
+
+ // Marshal the parameters
+ int argidx = 1;
+ int nativeArgIndex = 0;
+ while (argidx <= numArgs)
+ {
+#ifdef FEATURE_COMINTEROP
+ HiddenParameterInfo *pHiddenParameter;
+ // Check to see if we need to inject a hidden parameter
+ if (IsHiddenParameter(nativeArgIndex, cHiddenNativeParameters, pHiddenNativeParameters, &pHiddenParameter))
+ {
+ pss->MarshalHiddenLengthArgument(pHiddenParameter->pManagedParam, FALSE);
+ nativeStackSize += pHiddenParameter->pManagedParam->GetHiddenLengthParamStackSize();
+
+ if (SF_IsReverseStub(dwStubFlags))
+ {
+ ++argOffset;
+ }
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ //
+ // Check to see if this is the parameter after which we need to insert the LCID.
+ //
+ if (argidx == iLCIDArg)
+ {
+ pss->MarshalLCID(argidx);
+ nativeStackSize += sizeof(LPVOID);
+
+ if (SF_IsReverseStub(dwStubFlags))
+ argOffset++;
+ }
+
+ msig.NextArg();
+
+ MarshalInfo &info = pParamMarshalInfo[argidx - 1];
+
+#ifdef FEATURE_COMINTEROP
+ // For the hidden-length array, length parameters must occur before the parameter containing the array pointer
+ _ASSERTE(info.GetMarshalType() != MarshalInfo::MARSHAL_TYPE_HIDDENLENGTHARRAY || nativeArgIndex > info.HiddenLengthParamIndex());
+#endif // FEATURE_COMINTEROP
+
+ pss->MarshalArgument(&info, argOffset, GetStackOffsetFromStackSize(nativeStackSize, fThisCall));
+ nativeStackSize += info.GetNativeArgSize();
+
+ fStubNeedsCOM |= info.MarshalerRequiresCOM();
+
+ if (fThisCall && argidx == 1)
+ {
+ // make sure that the first parameter is enregisterable
+ if (info.GetNativeArgSize() > sizeof(SLOT))
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_NDIRECT_BADNATL_THISCALL);
+ }
+
+#ifndef FEATURE_CORECLR
+ if (info.GetMarshalType() == MarshalInfo::MARSHAL_TYPE_BLITTABLEVALUECLASSWITHCOPYCTOR)
+ {
+ fHasCopyCtorArgs = true;
+ }
+#endif // !FEATURE_CORECLR
+
+ argidx++;
+ }
+
+ ++nativeArgIndex;
+ }
+
+ // Check to see if this is the parameter after which we need to insert the LCID.
+ if (argidx == iLCIDArg)
+ {
+ pss->MarshalLCID(argidx);
+ nativeStackSize += sizeof(LPVOID);
+
+ if (SF_IsReverseStub(dwStubFlags))
+ argOffset++;
+ }
+
+ if (!fMarshalReturnValueFirst)
+ {
+ // This could be a HRESULT-swapped argument so it must come last.
+ marshalType = DoMarshalReturnValue(msig,
+ pParamTokenArray,
+ nlType,
+ nlFlags,
+ argidx,
+ pss,
+ fThisCall,
+ argOffset,
+ dwStubFlags,
+ pMD,
+ nativeStackSize,
+ fStubNeedsCOM,
+ nativeArgIndex
+ DEBUG_ARG(pSigDesc->m_pDebugName)
+ DEBUG_ARG(pSigDesc->m_pDebugClassName)
+ );
+
+ // If the return value is a SafeHandle or CriticalHandle, mark the stub method.
+ // Interop methods that use this stub will have an implicit reliability contract
+ // (see code:TAStackCrawlCallBack).
+ if (!SF_IsHRESULTSwapping(dwStubFlags))
+ {
+ if (marshalType == MarshalInfo::MARSHAL_TYPE_SAFEHANDLE ||
+ marshalType == MarshalInfo::MARSHAL_TYPE_CRITICALHANDLE)
+ {
+ if (pMD->IsDynamicMethod())
+ pMD->AsDynamicMethodDesc()->SetUnbreakable(true);
+ }
+ }
+ }
+
+ if (SF_IsHRESULTSwapping(dwStubFlags))
+ {
+ if (msig.GetReturnType() != ELEMENT_TYPE_VOID)
+ nativeStackSize += sizeof(LPVOID);
+ }
+
+ if (pMD->IsDynamicMethod())
+ {
+ // Set the native stack size to the IL stub MD. It is needed for alignment
+ // thunk generation on the Mac and stdcall name decoration on Windows.
+ // We do not store it directly in the interop MethodDesc here because due
+ // to sharing we come here only for the first call with given signature and
+ // the target MD may even be NULL.
+
+#ifdef _TARGET_X86_
+ if (fThisCall)
+ {
+ _ASSERTE(nativeStackSize >= sizeof(SLOT));
+ nativeStackSize -= sizeof(SLOT);
+ }
+#else // _TARGET_X86_
+ //
+ // The algorithm to compute nativeStackSize on the fly is x86-specific.
+ // Recompute the correct size for other platforms from the stub signature.
+ //
+ if (SF_IsForwardStub(dwStubFlags))
+ {
+ // It would be nice to compute the correct value for forward stubs too.
+ // The value is only used in MarshalNative::NumParamBytes right now,
+ // and changing what MarshalNative::NumParamBytes returns is
+ // a potential breaking change.
+ }
+ else
+ {
+ // native stack size is updated in code:ILStubState.SwapStubSignatures
+ }
+#endif // _TARGET_X86_
+
+ if (!FitsInU2(nativeStackSize))
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_SIGTOOCOMPLEX);
+
+ DynamicMethodDesc *pDMD = pMD->AsDynamicMethodDesc();
+
+ pDMD->SetNativeStackArgSize(static_cast<WORD>(nativeStackSize));
+ pDMD->SetHasCopyCtorArgs(fHasCopyCtorArgs);
+ pDMD->SetStubNeedsCOMStarted(fStubNeedsCOM);
+ }
+
+ // FinishEmit needs to know the native stack arg size so we call it after the number
+ // has been set in the stub MD (code:DynamicMethodDesc.SetNativeStackArgSize)
+ pss->FinishEmit(pMD);
+}
+
+class NDirectStubHashBlob : public ILStubHashBlobBase
+{
+public:
+ Module* m_pModule;
+
+ WORD m_unmgdCallConv;
+ BYTE m_nlType; // C_ASSERTS are in NDirect::CreateHashBlob
+ BYTE m_nlFlags;
+
+ DWORD m_StubFlags;
+
+ INT32 m_iLCIDArg;
+ INT32 m_nParams;
+ BYTE m_rgbSigAndParamData[1];
+ // (dwParamAttr, cbNativeType) // length: number of parameters
+ // NativeTypeBlob // length: number of parameters
+ // BYTE m_rgbSigData[]; // length: determined by sig walk
+};
+
+// For better performance and less memory fragmentation,
+// I'm using structure here to avoid allocating 3 different arrays.
+struct ParamInfo
+{
+ DWORD dwParamAttr;
+ ULONG cbNativeType;
+ PCCOR_SIGNATURE pvNativeType;
+};
+
+ILStubHashBlob* NDirect::CreateHashBlob(NDirectStubParameters* pParams)
+{
+ STANDARD_VM_CONTRACT;
+
+ NDirectStubHashBlob* pBlob;
+
+ IMDInternalImport* pInternalImport = pParams->m_pModule->GetMDImport();
+
+ CQuickBytes paramInfoBytes;
+ paramInfoBytes.AllocThrows(sizeof(ParamInfo)*pParams->m_nParamTokens);
+ ParamInfo *paramInfos = (ParamInfo *)paramInfoBytes.Ptr();
+ ::ZeroMemory(paramInfos, sizeof(ParamInfo) * pParams->m_nParamTokens);
+
+ size_t cbNativeTypeTotal = 0;
+
+ //
+ // Collect information for function parameters
+ //
+ for (int idx = 0; idx < pParams->m_nParamTokens; idx++)
+ {
+ mdParamDef token = pParams->m_pParamTokenArray[idx];
+ if (TypeFromToken(token) == mdtParamDef && mdParamDefNil != token)
+ {
+ USHORT usSequence_Ignore; // We don't need usSequence in the hash as the param array is already sorted
+ LPCSTR szParamName_Ignore;
+ IfFailThrow(pInternalImport->GetParamDefProps(token, &usSequence_Ignore, &paramInfos[idx].dwParamAttr, &szParamName_Ignore));
+
+ if (paramInfos[idx].dwParamAttr & pdHasFieldMarshal)
+ {
+ IfFailThrow(pInternalImport->GetFieldMarshal(token, &paramInfos[idx].pvNativeType, &paramInfos[idx].cbNativeType));
+ cbNativeTypeTotal += paramInfos[idx].cbNativeType;
+ }
+ }
+ }
+
+ SigPointer sigPtr = pParams->m_sig.CreateSigPointer();
+
+ // note that ConvertToInternalSignature also resolves generics so different instantiations will get different
+ // hash blobs for methods that have generic parameters in their signature
+ SigBuilder sigBuilder;
+ sigPtr.ConvertToInternalSignature(pParams->m_pModule, pParams->m_pTypeContext, &sigBuilder, /* bSkipCustomModifier = */ FALSE);
+
+ DWORD cbSig;
+ PVOID pSig = sigBuilder.GetSignature(&cbSig);
+
+ //
+ // Build hash blob for IL stub sharing
+ //
+ S_SIZE_T cbSizeOfBlob = S_SIZE_T(offsetof(NDirectStubHashBlob, m_rgbSigAndParamData)) +
+ S_SIZE_T(sizeof(ULONG)) * S_SIZE_T(pParams->m_nParamTokens) + // Parameter attributes
+ S_SIZE_T(sizeof(DWORD)) * S_SIZE_T(pParams->m_nParamTokens) + // Native type blob size
+ S_SIZE_T(cbNativeTypeTotal) + // Native type blob data
+ S_SIZE_T(cbSig); // Signature
+
+ if (cbSizeOfBlob.IsOverflow())
+ COMPlusThrowHR(COR_E_OVERFLOW);
+
+ static_assert_no_msg(nltMaxValue <= 0xFF);
+ static_assert_no_msg(nlfMaxValue <= 0xFF);
+ static_assert_no_msg(pmMaxValue <= 0xFFFF);
+
+ NewArrayHolder<BYTE> pBytes = new BYTE[cbSizeOfBlob.Value()];
+ // zero out the hash bytes to ensure all bit fields are deterministically set
+ ZeroMemory(pBytes, cbSizeOfBlob.Value());
+ pBlob = (NDirectStubHashBlob*)(BYTE*)pBytes;
+
+ pBlob->m_pModule = NULL;
+
+ if (SF_IsNGENedStub(pParams->m_dwStubFlags))
+ {
+ // don't share across modules if we are ngening the stub
+ pBlob->m_pModule = pParams->m_pModule;
+ }
+
+ pBlob->m_cbSizeOfBlob = cbSizeOfBlob.Value();
+ pBlob->m_unmgdCallConv = static_cast<WORD>(pParams->m_unmgdCallConv);
+ pBlob->m_nlType = static_cast<BYTE>(pParams->m_nlType);
+ pBlob->m_nlFlags = static_cast<BYTE>(pParams->m_nlFlags & ~nlfNoMangle); // this flag does not affect the stub
+ pBlob->m_iLCIDArg = pParams->m_iLCIDArg;
+
+ pBlob->m_StubFlags = pParams->m_dwStubFlags;
+ pBlob->m_nParams = pParams->m_nParamTokens;
+
+ BYTE* pBlobParams = &pBlob->m_rgbSigAndParamData[0];
+
+ //
+ // Write (dwParamAttr, cbNativeType) for parameters
+ //
+ // Note that these need to be aligned and it is why they are written before the byte blobs
+ // I'm putting asserts here so that it will assert even in non-IA64 platforms to catch bugs
+ //
+ _ASSERTE((DWORD_PTR)pBlobParams % sizeof(DWORD) == 0);
+ _ASSERTE(sizeof(DWORD) == sizeof(ULONG));
+
+ for (int i = 0; i < pParams->m_nParamTokens; ++i)
+ {
+ // We only care about In/Out/HasFieldMarshal
+ // Other attr are about optional/default values which are not used in marshalling,
+ // but only used in compilers
+ *((DWORD *)pBlobParams) = paramInfos[i].dwParamAttr & (pdIn | pdOut | pdHasFieldMarshal);
+ pBlobParams += sizeof(DWORD);
+
+ *((ULONG *)pBlobParams) = paramInfos[i].cbNativeType;
+ pBlobParams += sizeof(ULONG);
+ }
+
+ //
+ // Write native type blob for parameters
+ //
+ for (int i = 0; i < pParams->m_nParamTokens; ++i)
+ {
+ memcpy(pBlobParams, paramInfos[i].pvNativeType, paramInfos[i].cbNativeType);
+ pBlobParams += paramInfos[i].cbNativeType;
+ }
+
+ //
+ // Copy signature
+ //
+ memcpy(pBlobParams, pSig, cbSig);
+
+ // Verify that we indeed have reached the end
+ _ASSERTE(pBlobParams + cbSig == (BYTE *)pBlob + cbSizeOfBlob.Value());
+
+ pBytes.SuppressRelease();
+ return (ILStubHashBlob*)pBlob;
+}
+
+// static inline
+ILStubCache* NDirect::GetILStubCache(NDirectStubParameters* pParams)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Use the m_pLoaderModule instead of m_pModule
+ // They could be different for methods on generic types.
+ return pParams->m_pLoaderModule->GetILStubCache();
+}
+
+// static
+MethodDesc* NDirect::GetStubMethodDesc(
+ MethodDesc *pTargetMD,
+ NDirectStubParameters* pParams,
+ ILStubHashBlob* pHashParams,
+ AllocMemTracker* pamTracker,
+ bool& bILStubCreator,
+ MethodDesc* pLastMD)
+{
+ CONTRACT(MethodDesc*)
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(CheckPointer(pParams));
+ PRECONDITION(!pParams->m_sig.IsEmpty());
+ PRECONDITION(CheckPointer(pParams->m_pModule));
+ PRECONDITION(CheckPointer(pTargetMD, NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ MethodDesc* pMD;
+
+ ILStubCache* pCache = NDirect::GetILStubCache(pParams);
+
+ pMD = pCache->GetStubMethodDesc(pTargetMD,
+ pHashParams,
+ pParams->m_dwStubFlags,
+ pParams->m_pModule,
+ pParams->m_sig.GetRawSig(),
+ pParams->m_sig.GetRawSigLen(),
+ pamTracker,
+ bILStubCreator,
+ pLastMD);
+
+ RETURN pMD;
+}
+
+
+// static
+void NDirect::RemoveILStubCacheEntry(NDirectStubParameters* pParams, ILStubHashBlob* pHashParams)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(CheckPointer(pParams));
+ PRECONDITION(CheckPointer(pHashParams));
+ PRECONDITION(!pParams->m_sig.IsEmpty());
+ PRECONDITION(CheckPointer(pParams->m_pModule));
+ }
+ CONTRACTL_END;
+
+ LOG((LF_STUBS, LL_INFO1000, "Exception happened when generating IL of stub clr!CreateInteropILStub StubMD: %p, HashBlob: %p \n", pParams, pHashParams));
+
+ ILStubCache* pCache = NDirect::GetILStubCache(pParams);
+
+ pCache->DeleteEntry(pHashParams);
+}
+
+// static
+void NDirect::AddMethodDescChunkWithLockTaken(NDirectStubParameters* pParams, MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(CheckPointer(pParams));
+ PRECONDITION(!pParams->m_sig.IsEmpty());
+ PRECONDITION(CheckPointer(pParams->m_pModule));
+ }
+ CONTRACTL_END;
+
+ ILStubCache* pCache = NDirect::GetILStubCache(pParams);
+
+ pCache->AddMethodDescChunkWithLockTaken(pMD);
+}
+
+//
+// Additional factorization of CreateNDirectStub. This hoists all the metadata accesses
+// into one location so that we can leave CreateNDirectStubWorker to just generate the
+// IL. This allows us to cache a stub based on the inputs to CreateNDirectStubWorker
+// instead of having to generate the IL first before doing the caching.
+//
+void CreateNDirectStubAccessMetadata(StubSigDesc* pSigDesc, // IN
+ CorPinvokeMap unmgdCallConv, // IN
+ DWORD* pdwStubFlags, // IN/OUT
+ int* piLCIDArg, // OUT
+ int* pNumArgs // OUT
+ )
+{
+ STANDARD_VM_CONTRACT;
+
+ if (SF_IsCOMStub(*pdwStubFlags))
+ {
+ _ASSERTE(0 == unmgdCallConv);
+ }
+ else
+ {
+ if (unmgdCallConv != pmCallConvStdcall &&
+ unmgdCallConv != pmCallConvCdecl &&
+ unmgdCallConv != pmCallConvThiscall)
+ {
+ COMPlusThrow(kTypeLoadException, IDS_INVALID_PINVOKE_CALLCONV);
+ }
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (SF_IsDelegateStub(*pdwStubFlags))
+ {
+ _ASSERTE(!SF_IsWinRTStub(*pdwStubFlags));
+ if (pSigDesc->m_pMD->GetMethodTable()->IsProjectedFromWinRT())
+ {
+ // We do not allow P/Invoking via WinRT delegates to better segregate WinRT
+ // from classic interop scenarios.
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_DELEGATEPINVOKE_WINRT);
+ }
+ }
+#endif // FEATURE_COMINTEROP
+
+ MetaSig msig(pSigDesc->m_sig,
+ pSigDesc->m_pModule,
+ &pSigDesc->m_typeContext);
+
+ if (SF_IsVarArgStub(*pdwStubFlags))
+ msig.SetTreatAsVarArg();
+
+ (*pNumArgs) = msig.NumFixedArgs();
+
+ IMDInternalImport* pInternalImport = pSigDesc->m_pModule->GetMDImport();
+
+ _ASSERTE(!SF_IsHRESULTSwapping(*pdwStubFlags));
+
+ mdMethodDef md = pSigDesc->m_tkMethodDef;
+ if (md != mdMethodDefNil)
+ {
+ DWORD dwDescrOffset;
+ DWORD dwImplFlags;
+ IfFailThrow(pInternalImport->GetMethodImplProps(
+ md,
+ &dwDescrOffset,
+ &dwImplFlags));
+
+#ifdef FEATURE_COMINTEROP
+ if (SF_IsWinRTStub(*pdwStubFlags))
+ {
+ // All WinRT methods do HRESULT swapping
+ if (IsMiPreserveSig(dwImplFlags))
+ {
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_PRESERVESIG_WINRT);
+ }
+
+ (*pdwStubFlags) |= NDIRECTSTUB_FL_DOHRESULTSWAPPING;
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ if (SF_IsReverseStub(*pdwStubFlags))
+ {
+ // only COM-to-CLR call supports hresult swapping in the reverse direction
+ if (SF_IsCOMStub(*pdwStubFlags) && !IsMiPreserveSig(dwImplFlags))
+ {
+ (*pdwStubFlags) |= NDIRECTSTUB_FL_DOHRESULTSWAPPING;
+ }
+ }
+ else
+ {
+ // fwd pinvoke, fwd com interop support hresult swapping.
+ // delegate to an unmanaged method does not.
+ if (!IsMiPreserveSig(dwImplFlags) && !SF_IsDelegateStub(*pdwStubFlags))
+ {
+ (*pdwStubFlags) |= NDIRECTSTUB_FL_DOHRESULTSWAPPING;
+ }
+ }
+ }
+
+ if (pSigDesc->m_pMD != NULL)
+ {
+ (*piLCIDArg) = GetLCIDParameterIndex(pSigDesc->m_pMD);
+ }
+ else
+ {
+ (*piLCIDArg) = -1;
+ }
+
+ // Check to see if we need to do LCID conversion.
+ if ((*piLCIDArg) != -1 && (*piLCIDArg) > (*pNumArgs))
+ {
+ COMPlusThrow(kIndexOutOfRangeException, IDS_EE_INVALIDLCIDPARAM);
+ }
+
+ if (SF_IsCOMStub(*pdwStubFlags) && !SF_IsWinRTStaticStub(*pdwStubFlags))
+ {
+ CONSISTENCY_CHECK(msig.HasThis());
+ }
+ else
+ {
+ if (msig.HasThis() && !SF_IsDelegateStub(*pdwStubFlags))
+ {
+ COMPlusThrow(kInvalidProgramException, VLDTR_E_FMD_PINVOKENOTSTATIC);
+ }
+ }
+}
+
+void NDirect::PopulateNDirectMethodDesc(NDirectMethodDesc* pNMD, PInvokeStaticSigInfo* pSigInfo, BOOL throwOnError /*= TRUE*/)
+{
+ if (pNMD->IsSynchronized() && throwOnError)
+ COMPlusThrow(kTypeLoadException, IDS_EE_NOSYNCHRONIZED);
+
+ WORD ndirectflags = 0;
+ if (pNMD->MethodDesc::IsVarArg())
+ ndirectflags |= NDirectMethodDesc::kVarArgs;
+
+ LPCUTF8 szLibName = NULL, szEntryPointName = NULL;
+ new (pSigInfo) PInvokeStaticSigInfo(pNMD, &szLibName, &szEntryPointName,
+ (throwOnError ? PInvokeStaticSigInfo::THROW_ON_ERROR : PInvokeStaticSigInfo::NO_THROW_ON_ERROR));
+
+ if (pSigInfo->GetCharSet() == nltAnsi)
+ ndirectflags |= NDirectMethodDesc::kNativeAnsi;
+
+ CorNativeLinkFlags linkflags = pSigInfo->GetLinkFlags();
+ if (linkflags & nlfLastError)
+ ndirectflags |= NDirectMethodDesc::kLastError;
+ if (linkflags & nlfNoMangle)
+ ndirectflags |= NDirectMethodDesc::kNativeNoMangle;
+
+ CorPinvokeMap callConv = pSigInfo->GetCallConv();
+ if (callConv == pmCallConvStdcall)
+ ndirectflags |= NDirectMethodDesc::kStdCall;
+ if (callConv == pmCallConvThiscall)
+ ndirectflags |= NDirectMethodDesc::kThisCall;
+
+ if (pNMD->GetLoaderModule()->IsSystem() && strcmp(szLibName, "QCall") == 0)
+ {
+ ndirectflags |= NDirectMethodDesc::kIsQCall;
+ }
+ else
+ {
+ EnsureWritablePages(&pNMD->ndirect);
+ pNMD->ndirect.m_pszLibName = szLibName;
+ pNMD->ndirect.m_pszEntrypointName = szEntryPointName;
+ }
+
+#ifdef _TARGET_X86_
+ if (ndirectflags & NDirectMethodDesc::kStdCall)
+ {
+ // Compute the kStdCallWithRetBuf flag which is needed at link time for entry point mangling.
+ MetaSig msig(pNMD);
+ ArgIterator argit(&msig);
+ if (argit.HasRetBuffArg())
+ {
+ MethodTable *pRetMT = msig.GetRetTypeHandleThrowing().AsMethodTable();
+ if (IsUnmanagedValueTypeReturnedByRef(pRetMT->GetNativeSize()))
+ {
+ ndirectflags |= NDirectMethodDesc::kStdCallWithRetBuf;
+ }
+ }
+ }
+#endif // _TARGET_X86_
+
+ // Call this exactly ONCE per thread. Do not publish incomplete prestub flags
+ // or you will introduce a race condition.
+ pNMD->InterlockedSetNDirectFlags(ndirectflags);
+}
+
+#ifdef FEATURE_COMINTEROP
+// Find the MethodDesc of the predefined IL stub method by either
+// 1) looking at redirected adapter interfaces, OR
+// 2) looking at special attributes for the specific interop scenario (specified by dwStubFlags).
+// Currently only ManagedToNativeComInteropStubAttribute is supported.
+// It returns NULL if no such attribute(s) can be found.
+// But if the attribute is found and is invalid, or something went wrong in the looking up
+// process, a exception will be thrown. If everything goes well, you'll get the MethodDesc
+// of the stub method
+HRESULT FindPredefinedILStubMethod(MethodDesc *pTargetMD, DWORD dwStubFlags, MethodDesc **ppRetStubMD)
+{
+ CONTRACT(HRESULT)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pTargetMD));
+ PRECONDITION(CheckPointer(ppRetStubMD));
+ PRECONDITION(*ppRetStubMD == NULL);
+ }
+ CONTRACT_END;
+
+ HRESULT hr;
+
+ MethodTable *pTargetMT = pTargetMD->GetMethodTable();
+
+ // Check if this is a redirected interface - we have static stubs in mscorlib for those.
+ if (SF_IsForwardCOMStub(dwStubFlags) && pTargetMT->IsInterface())
+ {
+
+ // Redirect generic redirected interfaces to the corresponding adapter methods in mscorlib
+ if (pTargetMT->HasInstantiation())
+ {
+ MethodDesc *pAdapterMD = WinRTInterfaceRedirector::GetStubMethodForRedirectedInterfaceMethod(pTargetMD, TypeHandle::Interop_ManagedToNative);
+ if (pAdapterMD != NULL)
+ {
+ *ppRetStubMD = pAdapterMD;
+ return S_OK;
+ }
+ }
+ }
+
+ //
+ // Find out if we have the attribute
+ //
+ const void *pBytes;
+ ULONG cbBytes;
+
+ // Support v-table forward classic COM interop calls only
+ if (SF_IsCOMStub(dwStubFlags) && SF_IsForwardStub(dwStubFlags) && !SF_IsWinRTStub(dwStubFlags))
+ {
+ if (pTargetMT->HasInstantiation())
+ {
+ // ManagedToNativeComInteropStubAttribute is not supported with generics
+ return E_FAIL;
+ }
+
+ if (pTargetMD->IsFCall())
+ {
+ // ManagedToNativeComInteropStubAttribute is not supported on FCalls (i.e. methods on legacy
+ // interfaces forwarded to CustomMarshalers.dll such as IEnumerable::GetEnumerator)
+ return E_FAIL;
+ }
+ _ASSERTE(pTargetMD->IsComPlusCall());
+
+ if (pTargetMD->IsInterface())
+ {
+ _ASSERTE(!pTargetMD->GetAssembly()->IsWinMD());
+ hr = pTargetMD->GetMDImport()->GetCustomAttributeByName(
+ pTargetMD->GetMemberDef(),
+ FORWARD_INTEROP_STUB_METHOD_TYPE,
+ &pBytes,
+ &cbBytes);
+
+ if (FAILED(hr))
+ RETURN hr;
+ // GetCustomAttributeByName returns S_FALSE when it cannot find the attribute but nothing fails...
+ // Translate that to E_FAIL
+ else if (hr == S_FALSE)
+ RETURN E_FAIL;
+ }
+ else
+ {
+ // We are dealing with the class, use the interface MD instead
+ // After second thought I believe we don't need to check the class MD.
+ // We can think stubs as part of public interface, and if the interface is public,
+ // the stubs should also be accessible
+ MethodDesc *pInterfaceMD = pTargetMD->GetInterfaceMD();
+ if (pInterfaceMD)
+ {
+ hr = FindPredefinedILStubMethod(pInterfaceMD, dwStubFlags, ppRetStubMD);
+ RETURN hr;
+ }
+ else
+ RETURN E_FAIL;
+ }
+ }
+ else
+ RETURN E_FAIL;
+
+ //
+ // Parse the attribute
+ //
+ CustomAttributeParser parser(pBytes, cbBytes);
+ IfFailRet(parser.SkipProlog());
+
+ LPCUTF8 pTypeName;
+ ULONG cbTypeName;
+ IfFailRet(parser.GetNonEmptyString(&pTypeName, &cbTypeName));
+
+ LPCUTF8 pMethodName;
+ ULONG cbMethodName;
+ IfFailRet(parser.GetNonEmptyString(&pMethodName, &cbMethodName));
+
+ StackSString typeName(SString::Utf8, pTypeName, cbTypeName);
+ StackSString methodName(SString::Utf8, pMethodName, cbMethodName);
+
+ //
+ // Retrieve the type
+ //
+ TypeHandle stubClassType;
+ stubClassType = TypeName::GetTypeUsingCASearchRules(typeName.GetUnicode(), pTargetMT->GetAssembly());
+
+ MethodTable *pStubClassMT = stubClassType.AsMethodTable();
+
+ StackSString stubClassName;
+ pStubClassMT->_GetFullyQualifiedNameForClassNestedAware(stubClassName);
+
+ StackSString targetInterfaceName;
+ pTargetMT->_GetFullyQualifiedNameForClassNestedAware(targetInterfaceName);
+
+ // Restrict to same assembly only to reduce test cost
+ if (stubClassType.GetAssembly() != pTargetMT->GetAssembly())
+ {
+ COMPlusThrow(
+ kArgumentException,
+ IDS_EE_INTEROP_STUB_CA_MUST_BE_WITHIN_SAME_ASSEMBLY,
+ stubClassName.GetUnicode(),
+ targetInterfaceName.GetUnicode()
+ );
+ }
+
+ if (stubClassType.HasInstantiation())
+ {
+ COMPlusThrow(
+ kArgumentException,
+ IDS_EE_INTEROP_STUB_CA_STUB_CLASS_MUST_NOT_BE_GENERIC,
+ stubClassName.GetUnicode()
+ );
+ }
+
+ if (stubClassType.IsInterface())
+ {
+ COMPlusThrow(
+ kArgumentException,
+ IDS_EE_INTEROP_STUB_CA_STUB_CLASS_MUST_NOT_BE_INTERFACE,
+ stubClassName.GetUnicode()
+ );
+ }
+
+ //
+ // Locate the MethodDesc for the stub method
+ //
+ MethodDesc *pStubMD = NULL;
+
+ {
+ PCCOR_SIGNATURE pTargetSig = NULL;
+ DWORD pcTargetSig = 0;
+
+ SigTypeContext typeContext; // NO generics supported
+
+ pTargetMD->GetSig(&pTargetSig, &pcTargetSig);
+
+ MetaSig msig(pTargetSig,
+ pcTargetSig,
+ pTargetMD->GetModule(),
+ &typeContext);
+ _ASSERTE(msig.HasThis());
+
+ SigBuilder stubSigBuilder;
+
+ //
+ // Append calling Convention, NumOfArgs + 1,
+ //
+ stubSigBuilder.AppendByte(msig.GetCallingConvention() & ~IMAGE_CEE_CS_CALLCONV_HASTHIS);
+ stubSigBuilder.AppendData(msig.NumFixedArgs() + 1);
+
+ //
+ // Append return type
+ //
+ SigPointer pReturn = msig.GetReturnProps();
+ LPBYTE pReturnTypeBegin = (LPBYTE)pReturn.GetPtr();
+ IfFailThrow(pReturn.SkipExactlyOne());
+ LPBYTE pReturnTypeEnd = (LPBYTE)pReturn.GetPtr();
+
+ stubSigBuilder.AppendBlob(pReturnTypeBegin, pReturnTypeEnd - pReturnTypeBegin);
+
+ //
+ // Append 'this'
+ //
+ stubSigBuilder.AppendElementType(ELEMENT_TYPE_CLASS);
+ stubSigBuilder.AppendToken(pTargetMT->GetCl());
+
+ //
+ // Copy rest of the arguments
+ //
+ if (msig.NextArg() != ELEMENT_TYPE_END)
+ {
+ SigPointer pFirstArg = msig.GetArgProps();
+ LPBYTE pArgBegin = (LPBYTE) pFirstArg.GetPtr();
+ LPBYTE pArgEnd = (LPBYTE) pTargetSig + pcTargetSig;
+
+ stubSigBuilder.AppendBlob(pArgBegin, pArgEnd - pArgBegin);
+ }
+
+ //
+ // Allocate new memory and copy over
+ //
+ DWORD pcStubSig = 0;
+ PCCOR_SIGNATURE pStubSig = (PCCOR_SIGNATURE) stubSigBuilder.GetSignature(&pcStubSig);
+
+ //
+ // Find method using name + signature
+ //
+ StackScratchBuffer buffer;
+ LPCUTF8 szMethodNameUTF8 = methodName.GetUTF8(buffer);
+ pStubMD = MemberLoader::FindMethod(stubClassType.GetMethodTable(),
+ szMethodNameUTF8,
+ pStubSig,
+ pcStubSig,
+ pTargetMT->GetModule());
+
+ if (pStubMD == NULL)
+ {
+ CQuickBytes qbSig;
+
+ PrettyPrintSig(
+ pStubSig,
+ pcStubSig,
+ szMethodNameUTF8,
+ &qbSig,
+ pTargetMD->GetMDImport(),
+ NULL);
+
+ // Unfortunately the PrettyPrintSig doesn't print 'static' when the function is static
+ // so we need to append 'static' here. No need to localize
+ SString signature(SString::Utf8, (LPCUTF8)"static ");
+ signature.AppendUTF8((LPCUTF8) qbSig.Ptr());
+
+ COMPlusThrow(
+ kMissingMethodException,
+ IDS_EE_INTEROP_STUB_CA_STUB_METHOD_MISSING,
+ signature.GetUnicode(),
+ stubClassName.GetUnicode()
+ );
+
+ }
+ }
+
+ //
+ // Check the Stub MD
+ //
+
+ // Verify that the target interop method can call the stub method
+
+ _ASSERTE(pTargetMD != NULL);
+
+ StaticAccessCheckContext accessContext(pTargetMD, pTargetMT);
+
+ if (!ClassLoader::CanAccess(
+ &accessContext,
+ pStubClassMT,
+ stubClassType.GetAssembly(),
+ pStubMD->GetAttrs(),
+ pStubMD,
+ NULL))
+ {
+ StackSString interopMethodName(SString::Utf8, pTargetMD->GetName());
+
+ COMPlusThrow(
+ kMethodAccessException,
+ IDS_EE_INTEROP_STUB_CA_NO_ACCESS_TO_STUB_METHOD,
+ interopMethodName.GetUnicode(),
+ methodName.GetUnicode()
+ );
+ }
+
+ // The FindMethod call will make sure that it is static by matching signature.
+ // So there is no need to check and throw
+ _ASSERTE(pStubMD->IsStatic());
+
+ *ppRetStubMD = pStubMD;
+
+ RETURN S_OK;
+}
+#endif // FEATURE_COMINTEROP
+
+MethodDesc* CreateInteropILStub(
+ ILStubState* pss,
+ StubSigDesc* pSigDesc,
+ CorNativeLinkType nlType,
+ CorNativeLinkFlags nlFlags,
+ CorPinvokeMap unmgdCallConv,
+ DWORD dwStubFlags, // NDirectStubFlags
+ int nParamTokens,
+ mdParamDef* pParamTokenArray,
+ int iLCIDArg
+ )
+{
+ CONTRACT(MethodDesc*)
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(CheckPointer(pSigDesc));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+
+ ///////////////////////////////
+ //
+ // MethodDesc creation
+ //
+ ///////////////////////////////
+
+ MethodDesc* pStubMD = NULL;
+
+ Module* pModule = pSigDesc->m_pModule;
+ Module* pLoaderModule = pSigDesc->m_pLoaderModule;
+ MethodDesc* pTargetMD = pSigDesc->m_pMD;
+ //
+ // pTargetMD may be null in the case of calli pinvoke
+ // and vararg pinvoke.
+ //
+
+#ifdef FEATURE_COMINTEROP
+ //
+ // Try to locate predefined IL stub either defined in user code or hardcoded in CLR
+ // If there is one, use the pointed method as the stub.
+ // Skip pTargetMD == NULL case for reverse interop calls
+ //
+ if (pTargetMD && SUCCEEDED(FindPredefinedILStubMethod(pTargetMD, dwStubFlags, &pStubMD)))
+ {
+#ifndef CROSSGEN_COMPILE
+ // We are about to execute method in pStubMD which could be in another module.
+ // Call EnsureActive before make the call
+ // This cannot be done during NGEN/PEVerify (in PASSIVE_DOMAIN) so I've moved it here
+ pStubMD->EnsureActive();
+
+ if (pStubMD->IsPreImplemented())
+ RestoreNGENedStub(pStubMD);
+#endif
+
+ RETURN pStubMD;
+ }
+#endif // FEATURE_COMINTEROP
+
+ // Otherwise, fall back to generating IL stub on-the-fly
+ NDirectStubParameters params(pSigDesc->m_sig,
+ &pSigDesc->m_typeContext,
+ pModule,
+ pLoaderModule,
+ nlType,
+ nlFlags,
+ unmgdCallConv,
+ dwStubFlags,
+ nParamTokens,
+ pParamTokenArray,
+ iLCIDArg
+ );
+
+ // The following two ILStubCreatorHelperHolder are to recover the status when an
+ // exception happen during the generation of the IL stubs. We need to free the
+ // memory allocated and restore the ILStubCache.
+ //
+ // The following block is logically divided into two phases. The first phase is
+ // CreateOrGet IL Stub phase which we take a domain level lock. The second phase
+ // is IL generation phase which we take a MethodDesc level lock. Taking two locks
+ // is mainly designed for performance.
+ //
+ // ilStubCreatorHelper contains an instance of AllocMemTracker which tracks the
+ // allocated memory during the creation of MethodDesc so that we are able to remove
+ // them when releasing the ILStubCreatorHelperHolder or destructing ILStubCreatorHelper
+
+ // When removing IL Stub from Cache, we have a constraint that only the thread which
+ // creates the stub can remove it. Otherwise, any thread hits cache and gets the stub will
+ // remove it from cache if OOM occurs
+
+ {
+ ILStubCreatorHelper ilStubCreatorHelper(pTargetMD, &params);
+
+ // take the domain level lock
+ ListLockHolder pILStubLock(pLoaderModule->GetDomain()->GetILStubGenLock());
+
+ {
+ // The holder will free the allocated MethodDesc and restore the ILStubCache
+ // if exception happen.
+ ILStubCreatorHelperHolder pCreateOrGetStubHolder(&ilStubCreatorHelper);
+ pStubMD = pCreateOrGetStubHolder->GetStubMD();
+
+ ///////////////////////////////
+ //
+ // IL generation
+ //
+ ///////////////////////////////
+
+ {
+ // take the MethodDesc level locker
+ ListLockEntryHolder pEntry(ListLockEntry::Find(pILStubLock, pStubMD, "il stub gen lock"));
+
+ ListLockEntryLockHolder pEntryLock(pEntry, FALSE);
+
+ // We can release the holder for the first phase now
+ pCreateOrGetStubHolder.SuppressRelease();
+
+ {
+ // The holder will free the allocated MethodDesc and restore the ILStubCache
+ // if exception happen. The reason to get the holder again is to
+ ILStubCreatorHelperHolder pGenILHolder(&ilStubCreatorHelper);
+
+ if (!pEntryLock.DeadlockAwareAcquire())
+ {
+ // the IL generation is not recursive!
+ UNREACHABLE_MSG("unexpected deadlock in IL stub generation!");
+ }
+
+ if (SF_IsSharedStub(params.m_dwStubFlags))
+ {
+ // Assure that pStubMD we have now has not been destroyed by other threads
+ pGenILHolder->GetStubMethodDesc();
+
+ while (pStubMD != pGenILHolder->GetStubMD())
+ {
+ pStubMD = pGenILHolder->GetStubMD();
+
+ pEntry.Assign(ListLockEntry::Find(pILStubLock, pStubMD, "il stub gen lock"));
+ pEntryLock.Assign(pEntry, FALSE);
+
+ if (!pEntryLock.DeadlockAwareAcquire())
+ {
+ // the IL generation is not recursive!
+ UNREACHABLE_MSG("unexpected deadlock in IL stub generation!");
+ }
+
+ pGenILHolder->GetStubMethodDesc();
+ }
+ }
+
+ for (;;)
+ {
+ // We have the entry lock now, we can release the global lock
+ pILStubLock.Release();
+
+ if (pEntry->m_hrResultCode != S_FALSE)
+ {
+ // We came in to generate the IL but someone
+ // beat us so there's nothing to do
+ break;
+ }
+
+ ILStubResolver* pResolver = pStubMD->AsDynamicMethodDesc()->GetILStubResolver();
+
+ CONSISTENCY_CHECK((NULL == pResolver->GetStubMethodDesc()) || (pStubMD == pResolver->GetStubMethodDesc()));
+
+ if (pResolver->IsILGenerated())
+ {
+ // this stub already has its IL generated
+ break;
+ }
+
+ //
+ // Check that the stub signature and MethodDesc are compatible. The JIT
+ // interface functions depend on this.
+ //
+
+ {
+ SigPointer ptr = pSigDesc->m_sig.CreateSigPointer();
+
+ ULONG callConvInfo;
+ IfFailThrow(ptr.GetCallingConvInfo(&callConvInfo));
+
+ BOOL fSigIsStatic = !(callConvInfo & IMAGE_CEE_CS_CALLCONV_HASTHIS);
+
+ // CreateNDirectStubWorker will throw an exception for these cases.
+ BOOL fCanHaveThis = SF_IsDelegateStub(dwStubFlags) || SF_IsCOMStub(dwStubFlags);
+
+ if (fSigIsStatic || fCanHaveThis)
+ {
+ CONSISTENCY_CHECK(pStubMD->IsStatic() == (DWORD)fSigIsStatic);
+ }
+ }
+
+ {
+ ILStubGenHolder sgh(pResolver);
+
+ pResolver->SetStubMethodDesc(pStubMD);
+ pResolver->SetStubTargetMethodDesc(pTargetMD);
+
+ CreateNDirectStubWorker(pss,
+ pSigDesc,
+ nlType,
+ nlFlags,
+ unmgdCallConv,
+ dwStubFlags,
+ pStubMD,
+ pParamTokenArray,
+ iLCIDArg);
+
+ pResolver->SetTokenLookupMap(pss->GetTokenLookupMap());
+
+ pResolver->SetStubTargetMethodSig(
+ pss->GetStubTargetMethodSig(),
+ pss->GetStubTargetMethodSigLength());
+
+ // we successfully generated the IL stub
+ sgh.SuppressRelease();
+ }
+
+ pEntry->m_hrResultCode = S_OK;
+ break;
+ }
+
+ // Link the MethodDesc onto the method table with the lock taken
+ NDirect::AddMethodDescChunkWithLockTaken(&params, pStubMD);
+
+ pGenILHolder.SuppressRelease();
+ }
+ }
+ }
+ ilStubCreatorHelper.SuppressRelease();
+ }
+
+#if defined(_TARGET_X86_)
+ if (SF_IsForwardStub(dwStubFlags) && pTargetMD != NULL && !pTargetMD->IsVarArg())
+ {
+ // copy the stack arg byte count from the stub MD to the target MD - this number is computed
+ // during stub generation and is copied to all target MDs that share the stub
+ // (we don't set it for varargs - the number is call site specific)
+ // also copy the "takes parameters with copy constructors" flag which is needed to generate
+ // appropriate intercept stub
+
+ WORD cbStackArgSize = pStubMD->AsDynamicMethodDesc()->GetNativeStackArgSize();
+ BOOL fHasCopyCtorArgs = pStubMD->AsDynamicMethodDesc()->HasCopyCtorArgs();
+
+ if (pTargetMD->IsNDirect())
+ {
+ NDirectMethodDesc *pTargetNMD = (NDirectMethodDesc *)pTargetMD;
+
+ pTargetNMD->SetStackArgumentSize(cbStackArgSize, (CorPinvokeMap)0);
+ pTargetNMD->SetHasCopyCtorArgs(fHasCopyCtorArgs);
+ }
+#ifdef FEATURE_COMINTEROP
+ else
+ {
+ if (SF_IsCOMStub(dwStubFlags))
+ {
+ ComPlusCallInfo *pComInfo = ComPlusCallInfo::FromMethodDesc(pTargetMD);
+
+ if (pComInfo != NULL)
+ {
+ pComInfo->SetStackArgumentSize(cbStackArgSize);
+ pComInfo->SetHasCopyCtorArgs(fHasCopyCtorArgs);
+ }
+ }
+ }
+#endif // FEATURE_COMINTEROP
+ }
+#endif // defined(_TARGET_X86_)
+
+ RETURN pStubMD;
+}
+
+MethodDesc* NDirect::CreateCLRToNativeILStub(
+ StubSigDesc* pSigDesc,
+ CorNativeLinkType nlType,
+ CorNativeLinkFlags nlFlags,
+ CorPinvokeMap unmgdCallConv,
+ DWORD dwStubFlags) // NDirectStubFlags
+{
+ CONTRACT(MethodDesc*)
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(CheckPointer(pSigDesc));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ int iLCIDArg = 0;
+ int numArgs = 0;
+ int numParamTokens = 0;
+ mdParamDef* pParamTokenArray = NULL;
+
+ CreateNDirectStubAccessMetadata(pSigDesc,
+ unmgdCallConv,
+ &dwStubFlags,
+ &iLCIDArg,
+ &numArgs);
+
+ Module *pModule = pSigDesc->m_pModule;
+ numParamTokens = numArgs + 1;
+ pParamTokenArray = (mdParamDef*)_alloca(numParamTokens * sizeof(mdParamDef));
+ CollateParamTokens(pModule->GetMDImport(), pSigDesc->m_tkMethodDef, numArgs, pParamTokenArray);
+
+ // for interop vectors that have declarative security, we need
+ // to update the stub flags to ensure a unique stub hash
+ // is generated based on the marshalling signature AND
+ // any declarative security.
+ // IMPORTANT: This will only inject the security callouts for
+ // interop functionality which has a non-null target MethodDesc.
+ // Currently, this is known to exclude things like native
+ // function ptrs. It is assumed that if the target is not
+ // attribute'able for metadata, then it cannot have declarative
+ // security - and that the target is not attributable if it was
+ // not passed to this function.
+ MethodDesc *pMD = pSigDesc->m_pMD;
+ if (pMD != NULL && SF_IsForwardStub(dwStubFlags))
+ {
+ // In an AppX process there is only one fully trusted AppDomain, so there is never any need to insert
+ // a security callout on the stubs.
+ if (!AppX::IsAppXProcess())
+ {
+#ifdef FEATURE_COMINTEROP
+ if (pMD->IsComPlusCall() || pMD->IsGenericComPlusCall())
+ {
+ // To preserve Whidbey behavior, we only enforce the implicit demand for
+ // unmanaged code permission.
+ MethodTable* pMT = ComPlusCallInfo::FromMethodDesc(pMD)->m_pInterfaceMT;
+ if (pMT->ClassRequiresUnmanagedCodeCheck() &&
+ !pMD->HasSuppressUnmanagedCodeAccessAttr())
+ {
+ dwStubFlags |= NDIRECTSTUB_FL_HASDECLARATIVESECURITY;
+ }
+ }
+ else
+#endif // FEATURE_COMPINTEROP
+ if (pMD->IsInterceptedForDeclSecurity())
+ {
+ dwStubFlags |= NDIRECTSTUB_FL_HASDECLARATIVESECURITY;
+ }
+ }
+ }
+
+ NewHolder<ILStubState> pStubState;
+
+#ifdef FEATURE_COMINTEROP
+ if (SF_IsCOMStub(dwStubFlags))
+ {
+ if (SF_IsReverseStub(dwStubFlags))
+ {
+ pStubState = new COMToCLR_ILStubState(pModule, pSigDesc->m_sig, &pSigDesc->m_typeContext, dwStubFlags, iLCIDArg, pMD);
+ }
+ else
+ {
+ pStubState = new CLRToCOM_ILStubState(pModule, pSigDesc->m_sig, &pSigDesc->m_typeContext, dwStubFlags, iLCIDArg, pMD);
+ }
+ }
+ else
+#endif
+ {
+ pStubState = new PInvoke_ILStubState(pModule, pSigDesc->m_sig, &pSigDesc->m_typeContext, dwStubFlags, unmgdCallConv, iLCIDArg, pMD);
+ }
+
+ MethodDesc* pStubMD;
+ pStubMD = CreateInteropILStub(
+ pStubState,
+ pSigDesc,
+ nlType,
+ nlFlags,
+ unmgdCallConv,
+ dwStubFlags,
+ numParamTokens,
+ pParamTokenArray,
+ iLCIDArg);
+
+
+
+ RETURN pStubMD;
+}
+
+#ifdef FEATURE_COMINTEROP
+MethodDesc* NDirect::CreateFieldAccessILStub(
+ PCCOR_SIGNATURE szMetaSig,
+ DWORD cbMetaSigSize,
+ Module* pModule,
+ mdFieldDef fd,
+ DWORD dwStubFlags, // NDirectStubFlags
+ FieldDesc* pFD)
+{
+ CONTRACT(MethodDesc*)
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(CheckPointer(szMetaSig));
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(CheckPointer(pFD, NULL_OK));
+ PRECONDITION(SF_IsFieldGetterStub(dwStubFlags) || SF_IsFieldSetterStub(dwStubFlags));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ int numArgs = (SF_IsFieldSetterStub(dwStubFlags) ? 1 : 0);
+ int numParamTokens = numArgs + 1;
+
+ // make sure we capture marshaling metadata
+ mdParamDef* pParamTokenArray = (mdParamDef *)_alloca(numParamTokens * sizeof(mdParamDef));
+ pParamTokenArray[0] = mdParamDefNil;
+ pParamTokenArray[numArgs] = (mdParamDef)fd;
+
+ // fields are never preserve-sig
+ dwStubFlags |= NDIRECTSTUB_FL_DOHRESULTSWAPPING;
+
+ // convert field signature to getter/setter signature
+ SigBuilder sigBuilder;
+
+ sigBuilder.AppendData(IMAGE_CEE_CS_CALLCONV_DEFAULT | IMAGE_CEE_CS_CALLCONV_HASTHIS);
+ sigBuilder.AppendData(numArgs);
+
+ if (SF_IsFieldSetterStub(dwStubFlags))
+ {
+ // managed setter returns void
+ sigBuilder.AppendElementType(ELEMENT_TYPE_VOID);
+ }
+
+ CONSISTENCY_CHECK(*szMetaSig == IMAGE_CEE_CS_CALLCONV_FIELD);
+
+ sigBuilder.AppendBlob((const PVOID)(szMetaSig + 1), cbMetaSigSize - 1);
+ szMetaSig = (PCCOR_SIGNATURE)sigBuilder.GetSignature(&cbMetaSigSize);
+
+ StubSigDesc sigDesc(NULL, Signature(szMetaSig, cbMetaSigSize), pModule);
+
+#ifdef _DEBUG
+ sigDesc.m_pDebugName = pFD->GetDebugName();
+ sigDesc.m_pDebugClassName = pFD->GetEnclosingMethodTable()->GetDebugClassName();
+#endif // _DEBUG
+
+ Signature signature(szMetaSig, cbMetaSigSize);
+ NewHolder<ILStubState> pStubState = new COMToCLRFieldAccess_ILStubState(pModule, signature, &sigDesc.m_typeContext, dwStubFlags, pFD);
+
+ MethodDesc* pStubMD;
+ pStubMD = CreateInteropILStub(
+ pStubState,
+ &sigDesc,
+ (CorNativeLinkType)0,
+ (CorNativeLinkFlags)0,
+ (CorPinvokeMap)0,
+ dwStubFlags,
+ numParamTokens,
+ pParamTokenArray,
+ -1);
+
+ RETURN pStubMD;
+}
+#endif // FEATURE_COMINTEROP
+
+MethodDesc* NDirect::CreateCLRToNativeILStub(PInvokeStaticSigInfo* pSigInfo,
+ DWORD dwStubFlags,
+ MethodDesc* pMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ StubSigDesc sigDesc(pMD, pSigInfo);
+
+ if (SF_IsWinRTDelegateStub(dwStubFlags))
+ {
+ _ASSERTE(pMD->IsEEImpl());
+
+ return CreateCLRToNativeILStub(&sigDesc,
+ (CorNativeLinkType)0,
+ (CorNativeLinkFlags)0,
+ (CorPinvokeMap)0,
+ (pSigInfo->GetStubFlags() | dwStubFlags) & ~NDIRECTSTUB_FL_DELEGATE);
+ }
+ else
+ {
+ return CreateCLRToNativeILStub(&sigDesc,
+ pSigInfo->GetCharSet(),
+ pSigInfo->GetLinkFlags(),
+ pSigInfo->GetCallConv(),
+ pSigInfo->GetStubFlags() | dwStubFlags);
+ }
+}
+
+MethodDesc* NDirect::GetILStubMethodDesc(NDirectMethodDesc* pNMD, PInvokeStaticSigInfo* pSigInfo, DWORD dwStubFlags)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc* pStubMD = NULL;
+
+ if (!pNMD->IsVarArgs() || SF_IsForNumParamBytes(dwStubFlags))
+ {
+ if (pNMD->IsClassConstructorTriggeredByILStub())
+ {
+ dwStubFlags |= NDIRECTSTUB_FL_TRIGGERCCTOR;
+ }
+
+ pStubMD = CreateCLRToNativeILStub(
+ pSigInfo,
+ dwStubFlags & ~NDIRECTSTUB_FL_FOR_NUMPARAMBYTES,
+ pNMD);
+ }
+
+ return pStubMD;
+}
+
+MethodDesc* GetStubMethodDescFromInteropMethodDesc(MethodDesc* pMD, DWORD dwStubFlags)
+{
+ STANDARD_VM_CONTRACT;
+
+ BOOL fGcMdaEnabled = FALSE;
+#ifdef MDA_SUPPORTED
+ if (MDA_GET_ASSISTANT(GcManagedToUnmanaged) || MDA_GET_ASSISTANT(GcUnmanagedToManaged))
+ {
+ // We never generate checks for these MDAs to NGEN'ed stubs so if they are
+ // enabled, a new stub must be generated (the perf impact is huge anyway).
+ fGcMdaEnabled = TRUE;
+ }
+#endif // MDA_SUPPORTED
+
+#ifdef FEATURE_COMINTEROP
+ if (SF_IsReverseCOMStub(dwStubFlags))
+ {
+ if (fGcMdaEnabled)
+ return NULL;
+
+ // reverse COM stubs live in a hash table
+ StubMethodHashTable *pHash = pMD->GetLoaderModule()->GetStubMethodHashTable();
+ return (pHash == NULL ? NULL : pHash->FindMethodDesc(pMD));
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ if (pMD->IsNDirect())
+ {
+ NDirectMethodDesc* pNMD = (NDirectMethodDesc*)pMD;
+ return ((fGcMdaEnabled && !pNMD->IsQCall()) ? NULL : pNMD->ndirect.m_pStubMD.GetValueMaybeNull());
+ }
+#ifdef FEATURE_COMINTEROP
+ else if (pMD->IsComPlusCall() || pMD->IsGenericComPlusCall())
+ {
+#ifdef MDA_SUPPORTED
+ if (MDA_GET_ASSISTANT(RaceOnRCWCleanup))
+ {
+ // we never generate this callout to NGEN'ed stubs
+ return NULL;
+ }
+#endif // MDA_SUPPORTED
+
+ if (NDirect::IsHostHookEnabled())
+ {
+ MethodTable *pMT = pMD->GetMethodTable();
+ if (pMT->IsProjectedFromWinRT() || pMT->IsWinRTRedirectedInterface(TypeHandle::Interop_ManagedToNative))
+ {
+ // WinRT NGENed stubs are optimized for the non-hosted scenario and
+ // must be rejected if we are hosted.
+ return NULL;
+ }
+ }
+
+ if (fGcMdaEnabled)
+ return NULL;
+
+ ComPlusCallInfo *pComInfo = ComPlusCallInfo::FromMethodDesc(pMD);
+ return (pComInfo == NULL ? NULL : pComInfo->m_pStubMD.GetValueMaybeNull());
+ }
+#endif // FEATURE_COMINTEROP
+ else if (pMD->IsEEImpl())
+ {
+ if (fGcMdaEnabled)
+ return NULL;
+
+ DelegateEEClass *pClass = (DelegateEEClass *)pMD->GetClass();
+ if (SF_IsReverseStub(dwStubFlags))
+ {
+ return pClass->m_pReverseStubMD;
+ }
+ else
+ {
+#ifdef FEATURE_COMINTEROP
+ if (SF_IsWinRTDelegateStub(dwStubFlags))
+ {
+ if (NDirect::IsHostHookEnabled() && pMD->GetMethodTable()->IsProjectedFromWinRT())
+ {
+ // WinRT NGENed stubs are optimized for the non-hosted scenario and
+ // must be rejected if we are hosted.
+ return NULL;
+ }
+
+ return pClass->m_pComPlusCallInfo->m_pStubMD.GetValueMaybeNull();
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ return pClass->m_pForwardStubMD;
+ }
+ }
+ }
+ else if (pMD->IsIL())
+ {
+ // these are currently only created at runtime, not at NGEN time
+ return NULL;
+ }
+ else
+ {
+ UNREACHABLE_MSG("unexpected type of MethodDesc");
+ }
+}
+
+#ifndef CROSSGEN_COMPILE
+
+PCODE NDirect::GetStubForILStub(MethodDesc* pManagedMD, MethodDesc** ppStubMD, DWORD dwStubFlags)
+{
+ CONTRACT(PCODE)
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(CheckPointer(pManagedMD));
+ POSTCONDITION(RETVAL != NULL);
+ }
+ CONTRACT_END;
+
+ // pStubMD, if provided, must be preimplemented.
+ CONSISTENCY_CHECK( (*ppStubMD == NULL) || (*ppStubMD)->IsPreImplemented() );
+
+ if (NULL == *ppStubMD)
+ {
+ PInvokeStaticSigInfo sigInfo(pManagedMD);
+ *ppStubMD = NDirect::CreateCLRToNativeILStub(&sigInfo, dwStubFlags, pManagedMD);
+ }
+
+ RETURN JitILStub(*ppStubMD);
+}
+
+PCODE NDirect::GetStubForILStub(NDirectMethodDesc* pNMD, MethodDesc** ppStubMD, DWORD dwStubFlags)
+{
+ STANDARD_VM_CONTRACT;
+
+ PCODE pStub = NULL;
+
+ // pStubMD, if provided, must be preimplemented.
+ CONSISTENCY_CHECK( (*ppStubMD == NULL) || (*ppStubMD)->IsPreImplemented() );
+
+ if (NULL == *ppStubMD)
+ {
+ PInvokeStaticSigInfo sigInfo;
+ NDirect::PopulateNDirectMethodDesc(pNMD, &sigInfo, /* throwOnError = */ !SF_IsForNumParamBytes(dwStubFlags));
+
+ *ppStubMD = NDirect::GetILStubMethodDesc(pNMD, &sigInfo, dwStubFlags);
+ }
+
+ if (SF_IsForNumParamBytes(dwStubFlags))
+ return NULL;
+
+ if (*ppStubMD)
+ {
+ pStub = JitILStub(*ppStubMD);
+ }
+ else
+ {
+ CONSISTENCY_CHECK(pNMD->IsVarArgs());
+
+ //
+ // varargs goes through vararg NDirect stub
+ //
+ pStub = TheVarargNDirectStub(pNMD->HasRetBuffArg());
+ }
+
+#ifdef FEATURE_MIXEDMODE // IJW
+ if (pNMD->IsEarlyBound())
+ {
+ pNMD->InitEarlyBoundNDirectTarget();
+ }
+ else
+#endif
+ {
+ NDirectLink(pNMD);
+ }
+
+ //
+ // NOTE: there is a race in updating this MethodDesc. We depend on all
+ // threads getting back the same DynamicMethodDesc for a particular
+ // NDirectMethodDesc, in that case, the locking around the actual JIT
+ // operation will prevent the code from being jitted more than once.
+ // By the time we get here, all threads get the same address of code
+ // back from the JIT operation and they all just fill in the same value
+ // here.
+ //
+ // In the NGEN case, all threads will get the same preimplemented code
+ // address much like the JIT case.
+ //
+
+ return pStub;
+}
+
+PCODE JitILStub(MethodDesc* pStubMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ PCODE pCode = pStubMD->GetNativeCode();
+
+ if (pCode == NULL)
+ {
+ ///////////////////////////////
+ //
+ // Code generation
+ //
+ ///////////////////////////////
+
+
+ if (pStubMD->IsDynamicMethod())
+ {
+ //
+ // A dynamically generated IL stub
+ //
+
+ DWORD dwFlags = pStubMD->AsDynamicMethodDesc()->GetILStubResolver()->GetJitFlags();
+ pCode = pStubMD->MakeJitWorker(NULL, dwFlags, 0);
+
+ _ASSERTE(pCode == pStubMD->GetNativeCode());
+ }
+ else
+ {
+ //
+ // A static IL stub that is pointing to a static method in user assembly
+ // Compile it and return the native code
+ //
+
+ // This returns the stable entry point
+ pCode = pStubMD->DoPrestub(NULL);
+
+ _ASSERTE(pCode == pStubMD->GetStableEntryPoint());
+ }
+ }
+
+ if (!pStubMD->IsDynamicMethod())
+ {
+ // We need an entry point that can be called multiple times
+ pCode = pStubMD->GetMultiCallableAddrOfCode();
+ }
+
+ return pCode;
+}
+
+MethodDesc* RestoreNGENedStub(MethodDesc* pStubMD)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pStubMD));
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_PREJIT
+ pStubMD->CheckRestore();
+
+ PCODE pCode = pStubMD->GetPreImplementedCode();
+ if (pCode != NULL)
+ {
+ TADDR pFixupList = pStubMD->GetFixupList();
+ if (pFixupList != NULL)
+ {
+ Module* pZapModule = pStubMD->GetZapModule();
+ _ASSERTE(pZapModule != NULL);
+ if (!pZapModule->FixupDelayList(pFixupList))
+ {
+ _ASSERTE(!"FixupDelayList failed");
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ }
+
+#if defined(HAVE_GCCOVER)
+ if (GCStress<cfg_instr_ngen>::IsEnabled())
+ SetupGcCoverage(pStubMD, (BYTE*) pCode);
+#endif // HAVE_GCCOVER
+
+ }
+ else
+ {
+ // We only pass a non-NULL pStubMD to GetStubForILStub() below if pStubMD is preimplemeneted.
+ pStubMD = NULL;
+ }
+#endif // FEATURE_PREJIT
+
+ return pStubMD;
+}
+
+PCODE GetStubForInteropMethod(MethodDesc* pMD, DWORD dwStubFlags, MethodDesc **ppStubMD)
+{
+ CONTRACT(PCODE)
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(pMD->IsNDirect() || pMD->IsComPlusCall() || pMD->IsGenericComPlusCall() || pMD->IsEEImpl() || pMD->IsIL());
+ }
+ CONTRACT_END;
+
+ PCODE pStub = NULL;
+ MethodDesc* pStubMD = NULL;
+
+ pStubMD = GetStubMethodDescFromInteropMethodDesc(pMD, dwStubFlags);
+ if (pStubMD != NULL)
+ {
+ pStubMD = RestoreNGENedStub(pStubMD);
+ }
+
+ if ((NULL == pStubMD) && (SF_IsNGENedStub(dwStubFlags)))
+ {
+ // Return NULL -- the caller asked only for an ngened stub and
+ // one does not exist, so don't do any more work.
+ CONSISTENCY_CHECK(pStub == NULL);
+ }
+ else
+ if (pMD->IsNDirect())
+ {
+ NDirectMethodDesc* pNMD = (NDirectMethodDesc*)pMD;
+ pStub = NDirect::GetStubForILStub(pNMD, &pStubMD, dwStubFlags);
+ }
+#ifdef FEATURE_COMINTEROP
+ else
+ if (pMD->IsComPlusCall() || pMD->IsGenericComPlusCall())
+ {
+ pStub = ComPlusCall::GetStubForILStub(pMD, &pStubMD);
+ }
+#endif // FEATURE_COMINTEROP
+ else
+ if (pMD->IsEEImpl())
+ {
+ CONSISTENCY_CHECK(pMD->GetMethodTable()->IsDelegate());
+ EEImplMethodDesc* pDelegateMD = (EEImplMethodDesc*)pMD;
+ pStub = COMDelegate::GetStubForILStub(pDelegateMD, &pStubMD, dwStubFlags);
+ }
+ else
+ if (pMD->IsIL())
+ {
+ CONSISTENCY_CHECK(SF_IsReverseStub(dwStubFlags));
+ pStub = NDirect::GetStubForILStub(pMD, &pStubMD, dwStubFlags);
+ }
+ else
+ {
+ UNREACHABLE_MSG("unexpected MethodDesc type");
+ }
+
+ if (pStubMD != NULL && pStubMD->IsILStub() && pStubMD->AsDynamicMethodDesc()->IsStubNeedsCOMStarted())
+ {
+ // the stub uses COM so make sure that it is started
+ EnsureComStarted();
+ }
+
+ if (ppStubMD != NULL)
+ *EnsureWritablePages(ppStubMD) = pStubMD;
+
+ RETURN pStub;
+}
+
+#ifdef FEATURE_COMINTEROP
+void CreateCLRToDispatchCOMStub(
+ MethodDesc * pMD,
+ DWORD dwStubFlags) // NDirectStubFlags
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(SF_IsCOMLateBoundStub(dwStubFlags) || SF_IsCOMEventCallStub(dwStubFlags));
+
+ // If we are dealing with a COM event call, then we need to initialize the
+ // COM event call information.
+ if (SF_IsCOMEventCallStub(dwStubFlags))
+ {
+ _ASSERTE(pMD->IsComPlusCall()); // no generic COM eventing
+ ((ComPlusCallMethodDesc *)pMD)->InitComEventCallInfo();
+ }
+
+ // Get the call signature information
+ StubSigDesc sigDesc(pMD);
+
+ int iLCIDArg = 0;
+ int numArgs = 0;
+ int numParamTokens = 0;
+ mdParamDef* pParamTokenArray = NULL;
+
+ CreateNDirectStubAccessMetadata(&sigDesc,
+ (CorPinvokeMap)0,
+ &dwStubFlags,
+ &iLCIDArg,
+ &numArgs);
+
+ numParamTokens = numArgs + 1;
+ pParamTokenArray = (mdParamDef*)_alloca(numParamTokens * sizeof(mdParamDef));
+ CollateParamTokens(sigDesc.m_pModule->GetMDImport(), sigDesc.m_tkMethodDef, numArgs, pParamTokenArray);
+
+ DispatchStubState MyStubState;
+
+ CreateNDirectStubWorker(&MyStubState,
+ &sigDesc,
+ (CorNativeLinkType)0,
+ (CorNativeLinkFlags)0,
+ (CorPinvokeMap)0,
+ dwStubFlags | NDIRECTSTUB_FL_COM,
+ pMD,
+ pParamTokenArray,
+ iLCIDArg);
+
+ _ASSERTE(pMD->IsComPlusCall()); // no generic disp-calls
+ ((ComPlusCallMethodDesc *)pMD)->InitRetThunk();
+}
+
+
+#endif // FEATURE_COMINTEROP
+
+/*static*/
+LPVOID NDirect::NDirectGetEntryPoint(NDirectMethodDesc *pMD, HINSTANCE hMod)
+{
+ // GetProcAddress cannot be called while preemptive GC is disabled.
+ // It requires the OS to take the loader lock.
+ CONTRACT(LPVOID)
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pMD));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ g_IBCLogger.LogNDirectCodeAccess(pMD);
+
+#ifdef MDA_SUPPORTED
+ MDA_TRIGGER_ASSISTANT(PInvokeLog, LogPInvoke(pMD, hMod));
+#endif
+
+ RETURN pMD->FindEntryPoint(hMod);
+}
+
+static BOOL AbsolutePath(LPCWSTR wszLibName)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(wszLibName));
+ }
+ CONTRACTL_END;
+
+ // check for UNC or a drive
+ WCHAR* ptr = (WCHAR*) wszLibName;
+ WCHAR* start = ptr;
+
+ // Check for UNC path
+ while(*ptr)
+ {
+ if(*ptr != W('\\'))
+ break;
+ ptr++;
+ }
+
+ if((ptr - wszLibName) == 2)
+ return TRUE;
+ else
+ {
+ // Check to see if there is a colon indicating a drive or protocal
+ for(ptr = start; *ptr; ptr++)
+ {
+ if(*ptr == W(':'))
+ break;
+ }
+ if(*ptr != NULL)
+ return TRUE;
+ }
+
+ // We did not find a UNC/drive/protocol path
+ return FALSE;
+}
+
+VOID NDirectMethodDesc::SetNDirectTarget(LPVOID pTarget)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ PRECONDITION(IsNDirect());
+ PRECONDITION(pTarget != NULL);
+ }
+ CONTRACTL_END;
+
+ Stub *pInterceptStub = NULL;
+
+ BOOL fHook = FALSE;
+
+ // Host hooks are not supported for Mac CoreCLR.
+ if (NDirect::IsHostHookEnabled())
+ {
+#ifdef _WIN64
+ // we will call CallNeedsHostHook on every invocation for back compat
+ fHook = TRUE;
+#else // _WIN64
+ fHook = CallNeedsHostHook((size_t)pTarget);
+#endif // _WIN64
+
+#ifdef _DEBUG
+ if (g_pConfig->ShouldGenerateStubForHost())
+ {
+ fHook = TRUE;
+ }
+#endif
+ }
+
+#ifdef _TARGET_X86_
+
+#ifndef FEATURE_CORECLR
+ if (HasCopyCtorArgs())
+ {
+ _ASSERTE(pInterceptStub == NULL);
+
+ // static stub that gets its arguments in a thread-static field
+ pInterceptStub = NDirect::GetStubForCopyCtor();
+ }
+#endif // !FEATURE_CORECLR
+
+#ifdef MDA_SUPPORTED
+ if (!IsQCall() && MDA_GET_ASSISTANT(PInvokeStackImbalance))
+ {
+ pInterceptStub = GenerateStubForMDA(pTarget, pInterceptStub, fHook);
+ }
+#endif // MDA_SUPPORTED
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (fHook)
+ {
+ pInterceptStub = GenerateStubForHost(pTarget, pInterceptStub);
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+#endif // _TARGET_X86_
+
+
+ NDirectWriteableData* pWriteableData = GetWriteableData();
+ EnsureWritablePages(pWriteableData);
+ g_IBCLogger.LogNDirectCodeAccess(this);
+
+ if (pInterceptStub != NULL WIN64_ONLY(|| fHook))
+ {
+ ndirect.m_pNativeNDirectTarget = pTarget;
+
+#if defined(_TARGET_X86_)
+ pTarget = (PVOID)pInterceptStub->GetEntryPoint();
+
+ LPVOID oldTarget = GetNDirectImportThunkGlue()->GetEntrypoint();
+ if (FastInterlockCompareExchangePointer(&pWriteableData->m_pNDirectTarget, pTarget,
+ oldTarget) != oldTarget)
+ {
+ pInterceptStub->DecRef();
+ }
+#else
+ _ASSERTE(pInterceptStub == NULL); // we don't intercept for anything else than host on !_TARGET_X86_
+ pWriteableData->m_pNDirectTarget = (LPVOID)GetEEFuncEntryPoint(PInvokeStubForHost);
+#endif
+ }
+ else
+ {
+ pWriteableData->m_pNDirectTarget = pTarget;
+ }
+}
+
+#if defined(_TARGET_X86_) && !defined(FEATURE_CORECLR)
+
+// Returns a small stub whose purpose is to record current ESP and call code:CopyCtorCallStubWorker
+// to invoke copy constructors and destructors as appropriate. This stub operates on arguments
+// already pushed to the stack by JITted IL stub and must not create a new frame, i.e. it must
+// tail call to the target for it to see the arguments that copy ctors have been called on.
+//
+// As a consequence, the stub doesn't take any extra secret arguments and the description of the
+// ctors/dtors to call is passed "out-of-band" in a thread static field. The worker returns
+// address of the real target (also passed out of band) which enables us to have only one static
+// stub in i386\asmhelpers.asm.
+
+// static
+Stub *NDirect::GetStubForCopyCtor()
+{
+ STANDARD_VM_CONTRACT;
+
+ static Stub *s_pStub = NULL;
+
+ if (s_pStub == NULL)
+ {
+ Stub *pStub = Stub::NewStub(GetEEFuncEntryPoint(CopyCtorCallStub));
+ if (InterlockedCompareExchangeT(&s_pStub, pStub, NULL) != NULL)
+ {
+ pStub->DecRef();
+ }
+ }
+
+ s_pStub->IncRef();
+ return s_pStub;
+}
+
+#endif // _TARGET_X86_ && !FEATURE_CORECLR
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+BOOL NDirect::IsHostHookEnabled()
+{
+ WRAPPER_NO_CONTRACT;
+ //
+ // WARNING: The non-debug portion of this logic is inlined into UMThunkStubAMD64!
+ //
+ return CLRTaskHosted() INDEBUG(|| g_pConfig->ShouldGenerateStubForHost());
+}
+
+EXTERN_C BOOL CallNeedsHostHook(size_t target)
+{
+ BOOL fHook = FALSE;
+ IHostTaskManager *pManager = CorHost2::GetHostTaskManager();
+ if (pManager)
+ {
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pManager->CallNeedsHostHook(target,&fHook);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ _ASSERTE (hr == S_OK);
+ }
+#ifdef _DEBUG
+ else
+ {
+ if (g_pConfig->ShouldGenerateStubForHost())
+ {
+ fHook = TRUE;
+ }
+ }
+#endif
+ return fHook;
+}
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+#if defined(_TARGET_X86_) && defined(MDA_SUPPORTED)
+EXTERN_C VOID __stdcall PInvokeStackImbalanceWorker(StackImbalanceCookie *pSICookie, DWORD dwPostESP)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_PREEMPTIVE; // we've already switched to preemptive
+
+ // make sure we restore the original Win32 last error before leaving this function - we are
+ // called right after returning from the P/Invoke target and the error has not been saved yet
+ BEGIN_PRESERVE_LAST_ERROR;
+
+ MdaPInvokeStackImbalance* pProbe = MDA_GET_ASSISTANT(PInvokeStackImbalance);
+
+ // This MDA must be active if we generated a call to PInvokeStackImbalanceHelper
+ _ASSERTE(pProbe);
+
+ pProbe->CheckStack(pSICookie, dwPostESP);
+
+ END_PRESERVE_LAST_ERROR;
+}
+#endif // _TARGET_X86_ && MDA_SUPPORTED
+
+#if defined(_TARGET_X86_) && !defined(FEATURE_CORECLR)
+struct CopyCtorStubCookie // same layout as StubHelpers.CopyCtorStubCookie
+{
+ LPVOID m_srcInstancePtr;
+ DWORD m_dstStackOffset;
+ LPVOID m_ctorPtr; // managed method ptr
+ LPVOID m_dtorPtr; // managed method ptr
+
+ CopyCtorStubCookie *m_pNext;
+};
+
+struct CopyCtorStubDesc // same layout as StubHelpers.CopyCtorStubDesc
+{
+ CopyCtorStubCookie *m_pCookie;
+ LPVOID m_pTarget;
+};
+
+// Called by CopyCtorCallStub after we have already transitioned to unmanaged. Invokes copy ctor(s)
+// and dtor(s) using reverse P/Invoke which has some perf impact but provides all the debugging and
+// profiling support. An alternative solution would be CallDescr or some optimized variant of it
+// which would probably result in confusing call stacks.
+EXTERN_C LPVOID __stdcall CopyCtorCallStubWorker(BYTE *pESP)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_PREEMPTIVE; // we've already switched to preemptive
+
+ CopyCtorStubCookie *pCookie;
+ LPVOID pTarget;
+ {
+ GCX_COOP();
+ // get address of the thread-static field
+ FieldDesc *pFD = MscorlibBinder::GetField(FIELD__STUBHELPERS__COPY_CTOR_STUB_DESC);
+
+ CopyCtorStubDesc *pStubDesc = (CopyCtorStubDesc *)Thread::GetStaticFieldAddress(pFD);
+
+ // read the fields in cooperative mode
+ pCookie = pStubDesc->m_pCookie;
+ pTarget = pStubDesc->m_pTarget;
+
+ _ASSERTE(pCookie != NULL && pTarget != NULL);
+
+ // make sure we ASSERT/AV reliably if we are called by mistake
+ pStubDesc->m_pCookie = NULL;
+ pStubDesc->m_pTarget = NULL;
+ }
+
+ while (pCookie != NULL)
+ {
+ if (pCookie->m_ctorPtr != NULL)
+ {
+ // get reverse P/Invoke to the copy ctor (cache on AD)
+ MethodDesc *pMD = Entry2MethodDesc((PCODE)pCookie->m_ctorPtr, NULL);
+ UMEntryThunk *pUMEntryThunk = GetAppDomain()->GetUMEntryThunkCache()->GetUMEntryThunk(pMD);
+
+ // GetUMEntryThunk always returns stdcall-able function pointers for ordinary managed methods
+ // but the ctor can be a P/Invoke (pre-Whidbey MC++ only)
+ typedef void (__stdcall *CtorFnPtr_StdCall) (LPVOID dst, LPVOID src);
+ typedef void (__thiscall *CtorFnPtr_ThisCall)(LPVOID dst, LPVOID src);
+ typedef void (__cdecl *CtorFnPtr_Cdecl) (LPVOID dst, LPVOID src);
+
+ // call the copy ctor using the right calling convention
+ UMThunkMarshInfo *pMarshInfo = pUMEntryThunk->GetUMThunkMarshInfo();
+ pMarshInfo->RunTimeInit();
+
+ switch (pMarshInfo->GetCallingConvention() & pmCallConvMask)
+ {
+ case pmCallConvStdcall:
+ case pmCallConvWinapi:
+ {
+ CtorFnPtr_StdCall fnPtr = (CtorFnPtr_StdCall)pUMEntryThunk->GetCode();
+ fnPtr(pESP + pCookie->m_dstStackOffset, pCookie->m_srcInstancePtr);
+ break;
+ }
+
+ case pmCallConvThiscall:
+ {
+ CtorFnPtr_ThisCall fnPtr = (CtorFnPtr_ThisCall)pUMEntryThunk->GetCode();
+ fnPtr(pESP + pCookie->m_dstStackOffset, pCookie->m_srcInstancePtr);
+ break;
+ }
+
+ default:
+ {
+ _ASSERTE((pMarshInfo->GetCallingConvention() & pmCallConvMask) == pmCallConvCdecl);
+
+ CtorFnPtr_Cdecl fnPtr = (CtorFnPtr_Cdecl)pUMEntryThunk->GetCode();
+ fnPtr(pESP + pCookie->m_dstStackOffset, pCookie->m_srcInstancePtr);
+ break;
+ }
+ }
+ }
+ if (pCookie->m_dtorPtr != NULL)
+ {
+ // get reverse P/Invoke to the dtor (cache on AD)
+ MethodDesc *pMD = Entry2MethodDesc((PCODE)pCookie->m_dtorPtr, NULL);
+ UMEntryThunk *pUMEntryThunk = GetAppDomain()->GetUMEntryThunkCache()->GetUMEntryThunk(pMD);
+
+ // GetUMEntryThunk always returns stdcall-able function pointers for ordinary managed methods
+ // but the dtor can be a P/Invoke (pre-Whidbey MC++ only)
+ typedef void (__stdcall *DtorFnPtr_StdCall) (LPVOID src);
+ typedef void (__thiscall *DtorFnPtr_ThisCall)(LPVOID src);
+ typedef void (__cdecl *DtorFnPtr_Cdecl) (LPVOID src);
+
+ // call the dtor using the right calling convention
+ UMThunkMarshInfo *pMarshInfo = pUMEntryThunk->GetUMThunkMarshInfo();
+ pMarshInfo->RunTimeInit();
+
+ switch (pMarshInfo->GetCallingConvention() & pmCallConvMask)
+ {
+ case pmCallConvStdcall:
+ case pmCallConvWinapi:
+ {
+ DtorFnPtr_StdCall fnPtr = (DtorFnPtr_StdCall)pUMEntryThunk->GetCode();
+ fnPtr(pCookie->m_srcInstancePtr);
+ break;
+ }
+
+ case pmCallConvThiscall:
+ {
+ DtorFnPtr_ThisCall fnPtr = (DtorFnPtr_ThisCall)pUMEntryThunk->GetCode();
+ fnPtr(pCookie->m_srcInstancePtr);
+ break;
+ }
+
+ default:
+ {
+ _ASSERTE((pMarshInfo->GetCallingConvention() & pmCallConvMask) == pmCallConvCdecl);
+
+ DtorFnPtr_Cdecl fnPtr = (DtorFnPtr_Cdecl)pUMEntryThunk->GetCode();
+ fnPtr(pCookie->m_srcInstancePtr);
+ break;
+ }
+ }
+ }
+ pCookie = pCookie->m_pNext;
+ }
+
+ return pTarget;
+}
+#endif // _TARGET_X86_ && !FEATURE_CORECLR
+
+// Preserving good error info from DllImport-driven LoadLibrary is tricky because we keep loading from different places
+// if earlier loads fail and those later loads obliterate error codes.
+//
+// This tracker object will keep track of the error code in accordance to priority:
+//
+// low-priority: unknown error code (should never happen)
+// medium-priority: dll not found
+// high-priority: dll found but error during loading
+//
+// We will overwrite the previous load's error code only if the new error code is higher priority.
+//
+
+class LoadLibErrorTracker
+{
+private:
+ static const DWORD const_priorityNotFound = 10;
+ static const DWORD const_priorityAccessDenied = 20;
+ static const DWORD const_priorityCouldNotLoad = 99999;
+public:
+ LoadLibErrorTracker()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_hr = E_FAIL;
+ m_priorityOfLastError = 0;
+ }
+
+ VOID TrackErrorCode(DWORD dwLastError)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ DWORD priority;
+
+ switch (dwLastError)
+ {
+ case ERROR_FILE_NOT_FOUND:
+ case ERROR_PATH_NOT_FOUND:
+ case ERROR_MOD_NOT_FOUND:
+ case ERROR_DLL_NOT_FOUND:
+ priority = const_priorityNotFound;
+ break;
+
+ // If we can't access a location, we can't know if the dll's there or if it's good.
+ // Still, this is probably more unusual (and thus of more interest) than a dll-not-found
+ // so give it an intermediate priority.
+ case ERROR_ACCESS_DENIED:
+ priority = const_priorityAccessDenied;
+
+ // Assume all others are "dll found but couldn't load."
+ default:
+ priority = const_priorityCouldNotLoad;
+ break;
+ }
+
+ UpdateHR(priority, HRESULT_FROM_WIN32(dwLastError));
+ }
+
+ // Sets the error code to HRESULT as could not load DLL
+ void TrackHR_CouldNotLoad(HRESULT hr)
+ {
+ UpdateHR(const_priorityCouldNotLoad, hr);
+ }
+
+ HRESULT GetHR()
+ {
+ return m_hr;
+ }
+
+private:
+ void UpdateHR(DWORD priority, HRESULT hr)
+ {
+ if (priority > m_priorityOfLastError)
+ {
+ m_hr = hr;
+ m_priorityOfLastError = priority;
+ }
+ }
+
+ HRESULT m_hr;
+ DWORD m_priorityOfLastError;
+}; // class LoadLibErrorTracker
+
+
+// Local helper function for the LoadLibraryModule below
+static HMODULE LocalLoadLibraryHelper( LPCWSTR name, DWORD flags, LoadLibErrorTracker *pErrorTracker )
+{
+ STANDARD_VM_CONTRACT;
+
+ HMODULE hmod = NULL;
+
+#ifndef FEATURE_PAL
+
+ if ((flags & 0xFFFFFF00) != 0
+#ifndef FEATURE_CORESYSTEM
+ && NDirect::SecureLoadLibrarySupported()
+#endif // !FEATURE_CORESYSTEM
+ )
+ {
+ hmod = CLRLoadLibraryEx( name, NULL, flags & 0xFFFFFF00);
+ if(hmod != NULL)
+ {
+ return hmod;
+ }
+
+ DWORD dwLastError = GetLastError();
+ if (dwLastError != ERROR_INVALID_PARAMETER)
+ {
+ pErrorTracker->TrackErrorCode(dwLastError);
+ return hmod;
+ }
+ }
+
+ hmod = CLRLoadLibraryEx(name, NULL, flags & 0xFF);
+
+#else // !FEATURE_PAL
+ hmod = CLRLoadLibrary(name);
+#endif // !FEATURE_PAL
+
+ if (hmod == NULL)
+ {
+ pErrorTracker->TrackErrorCode(GetLastError());
+ }
+
+ return hmod;
+}
+
+
+#if !defined(FEATURE_CORESYSTEM)
+
+#define NATIVE_DLL(d) L#d, L#d W(".dll")
+
+const LPCWSTR wellKnownModules[] =
+{
+ NATIVE_DLL(advapi32),
+ NATIVE_DLL(gdi32),
+ NATIVE_DLL(gdiplus),
+ NATIVE_DLL(kernel32),
+ NATIVE_DLL(mscoree),
+ NATIVE_DLL(ole32),
+ NATIVE_DLL(shfolder),
+ NATIVE_DLL(user32),
+ NATIVE_DLL(version)
+};
+
+BOOL CompareLibNames (UPTR val1, UPTR val2)
+{
+ CONTRACTL {
+ MODE_ANY;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LPCWSTR wszStr1 = (LPCWSTR)(val1 << 1);
+ LPCWSTR wszStr2 = (LPCWSTR)val2;
+
+ if (SString::_wcsicmp(wszStr1, wszStr2) == 0)
+ return TRUE;
+
+ return FALSE;
+}
+
+PtrHashMap * NDirect::s_pWellKnownNativeModules = NULL;
+bool NDirect::s_fSecureLoadLibrarySupported = false;
+
+HINSTANCE NDirect::CheckForWellKnownModules(LPCWSTR wszLibName, LoadLibErrorTracker *pErrorTracker)
+{
+ STANDARD_VM_CONTRACT;
+
+ ModuleHandleHolder hMod;
+ ULONG hash = HashiString(wszLibName);
+ LPCWSTR wszName = NULL;
+ wszName = (LPCWSTR) s_pWellKnownNativeModules->LookupValue((UPTR) hash, (LPVOID)wszLibName);
+
+ if (wszName != (LPCWSTR)INVALIDENTRY)
+ {
+ hMod = LocalLoadLibraryHelper(wszLibName, 0, pErrorTracker);
+ }
+
+ return hMod.Extract();
+}
+
+#endif // !FEATURE_CORESYSTEM
+
+#define TOLOWER(a) (((a) >= W('A') && (a) <= W('Z')) ? (W('a') + (a - W('A'))) : (a))
+#define TOHEX(a) ((a)>=10 ? W('a')+(a)-10 : W('0')+(a))
+
+#ifndef FEATURE_CORECLR
+/*static*/
+VOID NDirect::CheckUnificationList(NDirectMethodDesc * pMD, DWORD * pDllImportSearchPathFlag, BOOL * pSearchAssemblyDirectory)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // If neither assembly and method have the attribute, check the unification list.
+ Assembly *pAssembly = pMD->GetAssembly();
+
+ if (!pAssembly->IsStrongNamed())
+ return;
+
+ const char * simpleName = pAssembly->GetSimpleName();
+
+ StringHashIterator(it, g_arFxPolicy, simpleName);
+
+ int pos;
+
+ while ((pos = it.GetNext()) >= 0)
+ {
+ const FrameworkConfig & config = g_arFxPolicy[pos];
+
+ FixedSizeString<char> asmName;
+
+ config.GetFxAssemblyName(asmName);
+
+ if (_stricmp(asmName, simpleName) == 0)
+ {
+ DWORD cbPublicKey = 0;
+ const void *pbPublicKey = NULL;
+ pbPublicKey = pAssembly->GetPublicKey(&cbPublicKey);
+
+ //
+ // StrongNameTokenFromPublicKey is potentially expensive operation. Do it only once we got a match on the simple name.
+ //
+ StrongNameBufferHolder<BYTE> pbStrongNameToken;
+ DWORD cbStrongNameToken;
+
+ if (StrongNameTokenFromPublicKey((BYTE*) pbPublicKey,cbPublicKey,&pbStrongNameToken,&cbStrongNameToken))
+ {
+ BOOL pktIsEqual = TRUE;
+
+ LPCWSTR pwzPKT = config.GetPKT();
+
+ for (UINT j = 0; j < cbStrongNameToken; j++)
+ {
+ WCHAR firstChar = TOHEX(pbStrongNameToken[j] / 16);
+ WCHAR secondChar = TOHEX(pbStrongNameToken[j] % 16);
+
+ if (firstChar != TOLOWER(pwzPKT[j*2]) || secondChar != TOLOWER(pwzPKT[j*2+1]))
+ {
+ pktIsEqual = FALSE;
+ break;
+ }
+ }
+
+ if (pktIsEqual)
+ {
+ *pDllImportSearchPathFlag = LOAD_LIBRARY_SEARCH_DEFAULT_DIRS;
+ *pSearchAssemblyDirectory = TRUE;
+ break;
+ }
+ }
+ }
+ }
+}
+#endif // !FEATURE_CORECLR
+
+#if defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+/* static */
+HMODULE NDirect::LoadLibraryModuleViaHost(NDirectMethodDesc * pMD, AppDomain* pDomain, const wchar_t* wszLibName)
+{
+ STANDARD_VM_CONTRACT;
+ //Dynamic Pinvoke Support:
+ //Check if we need to provide the host a chance to provide the unmanaged dll
+
+ // Prevent Overriding of Windows API sets.
+ // This is replicating quick check from the OS implementation of api sets.
+ if (SString::_wcsnicmp(wszLibName, W("api-"), 4) == 0 || SString::_wcsnicmp(wszLibName, W("ext-"), 4) == 0)
+ {
+ return NULL;
+ }
+
+ LPVOID hmod = NULL;
+ CLRPrivBinderCoreCLR *pTPABinder = pDomain->GetTPABinderContext();
+ Assembly* pAssembly = pMD->GetMethodTable()->GetAssembly();
+
+ PTR_ICLRPrivBinder pBindingContext = pAssembly->GetManifestFile()->GetBindingContext();
+
+ //Step 0: Check if the assembly was bound using TPA.
+ // The Binding Context can be null or an overridden TPA context
+ if (pBindingContext == NULL)
+ {
+ return NULL;
+ }
+
+ UINT_PTR assemblyBinderID = 0;
+ IfFailThrow(pBindingContext->GetBinderID(&assemblyBinderID));
+
+ ICLRPrivBinder *pCurrentBinder = reinterpret_cast<ICLRPrivBinder *>(assemblyBinderID);
+
+ if (AreSameBinderInstance(pCurrentBinder, pTPABinder))
+ {
+ return NULL;
+ }
+
+ //Step 1: If the assembly was not bound using TPA,
+ // Call System.Runtime.Loader.AssemblyLoadContext.ResolveUnamanagedDll to give
+ // The custom assembly context a chance to load the unmanaged dll.
+
+ GCX_COOP();
+
+ STRINGREF pUnmanagedDllName;
+ pUnmanagedDllName = StringObject::NewString(wszLibName);
+
+ GCPROTECT_BEGIN(pUnmanagedDllName);
+
+ // Get the pointer to the managed assembly load context
+ INT_PTR ptrManagedAssemblyLoadContext = ((CLRPrivBinderAssemblyLoadContext *)pCurrentBinder)->GetManagedAssemblyLoadContext();
+
+ // Prepare to invoke System.Runtime.Loader.AssemblyLoadContext.ResolveUnamanagedDll method.
+ PREPARE_NONVIRTUAL_CALLSITE(METHOD__ASSEMBLYLOADCONTEXT__RESOLVEUNMANAGEDDLL);
+ DECLARE_ARGHOLDER_ARRAY(args, 2);
+ args[ARGNUM_0] = STRINGREF_TO_ARGHOLDER(pUnmanagedDllName);
+ args[ARGNUM_1] = PTR_TO_ARGHOLDER(ptrManagedAssemblyLoadContext);
+
+ // Make the call
+ CALL_MANAGED_METHOD(hmod,LPVOID,args);
+
+ GCPROTECT_END();
+
+ return (HMODULE)hmod;
+}
+#endif //defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+
+/* static */
+HINSTANCE NDirect::LoadLibraryModule( NDirectMethodDesc * pMD, LoadLibErrorTracker * pErrorTracker)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION( CheckPointer( pMD ) );
+ }
+ CONTRACTL_END;
+
+ LPCUTF8 name = pMD->GetLibName();
+ if ( !name || !*name )
+ return NULL;
+
+ ModuleHandleHolder hmod;
+
+ DWORD loadWithAlteredPathFlags = GetLoadWithAlteredSearchPathFlag();
+
+ PREFIX_ASSUME( name != NULL );
+ MAKE_WIDEPTR_FROMUTF8( wszLibName, name );
+
+ AppDomain* pDomain = GetAppDomain();
+
+#if defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+ hmod = LoadLibraryModuleViaHost(pMD, pDomain, wszLibName);
+#endif //FEATURE_HOST_ASSEMBLY_RESOLVER
+
+ if(hmod == NULL)
+ {
+ hmod = pDomain->FindUnmanagedImageInCache(wszLibName);
+ }
+
+ if(hmod != NULL)
+ {
+ return hmod.Extract();
+ }
+
+#if !defined(FEATURE_CORESYSTEM)
+ hmod = CheckForWellKnownModules(wszLibName, pErrorTracker);
+#endif
+
+#ifndef FEATURE_CORECLR
+ // Since fusion.dll has been incorporated into mscorwks.dll, we need to redirect
+ // any PInvokes for fusion.dll over to this runtime module. In order to avoid picking
+ // up invalid versions of fusion.dll, we perform this redirection first. Also redirect
+ // PInvokes to mscorwks.dll and clr.dll to this runtime module (module rename back
+ // compat and in-proc SxS correctness).
+ if (hmod == NULL)
+ {
+ static LPCWSTR const rwszAliases[] =
+ {
+ W("fusion.dll"), W("mscorwks.dll"), W("clr.dll"),
+ W("fusion"), W("mscorwks"), W("clr")
+ };
+
+ for (int i = 0; i < COUNTOF(rwszAliases); i++)
+ {
+ if (SString::_wcsicmp(wszLibName, rwszAliases[i]) == 0)
+ {
+ hmod = GetCLRModule();
+ break;
+ }
+ }
+ }
+ // Some CLR DLLs cannot be directly PInvoked. They need in-proc SxS intialization - shim
+ // (mscoreei.dll) takes care of that. Load such DLLs via shim.
+ //
+ // Note that we do not support PInvoking into the newly renamed SxS versions of DLLs directly.
+ // For example mscorpe.dll functionality was moved to mscorpehost.dll in 4.0. When asked for
+ // loading mscorpe.dll, shim will load mscorpehost.dll and will call its InitializeSxS function
+ // first. However shim will not call InitializeSxS when asked for mscorpehost.dll directly.
+ // As a result users cannot use mscorpehost.dll directly for PInvokes (by design), they can only
+ // use the old mscorpe.dll name.
+ if (hmod == NULL)
+ {
+ static LPCWSTR const rgSxSAwareDlls[] =
+ {
+ W("mscorpe.dll"), W("mscorpe")
+ };
+
+ for (int i = 0; i < COUNTOF(rgSxSAwareDlls); i++)
+ {
+ if (SString::_wcsicmp(wszLibName, rgSxSAwareDlls[i]) == 0)
+ {
+ // Load the DLL using shim (shim takes care of the DLL SxS initialization)
+ HRESULT hr = g_pCLRRuntime->LoadLibrary(rgSxSAwareDlls[i], &hmod);
+ if (FAILED(hr))
+ { // We failed to load CLR DLL (probably corrupted installation)
+ pErrorTracker->TrackHR_CouldNotLoad(hr);
+ hmod = NULL;
+ }
+ break;
+ }
+ }
+ }
+#endif //!FEATURE_CORECLR
+
+#ifdef FEATURE_PAL
+ // In the PAL version of CoreCLR, the CLR module itself exports the functionality
+ // that the Windows version obtains from kernel32 and friends. In order to avoid
+ // picking up the wrong instance, we perform this redirection first.
+ // This is also true for CoreSystem builds, where mscorlib p/invokes are forwarded through coreclr
+ // itself so we can control CoreSystem library/API name re-mapping from one central location.
+ if (SString::_wcsicmp(wszLibName, MAIN_CLR_MODULE_NAME_W) == 0)
+ hmod = GetCLRModule();
+#endif // FEATURE_PAL
+
+#if defined(FEATURE_CORESYSTEM) && !defined(FEATURE_PAL)
+ if (hmod == NULL)
+ {
+ // Try to go straight to System32 for Windows API sets. This is replicating quick check from
+ // the OS implementation of api sets.
+ if (SString::_wcsnicmp(wszLibName, W("api-"), 4) == 0 || SString::_wcsnicmp(wszLibName, W("ext-"), 4) == 0)
+ {
+ hmod = LocalLoadLibraryHelper(wszLibName, LOAD_LIBRARY_SEARCH_SYSTEM32, pErrorTracker);
+ }
+ }
+#endif // FEATURE_CORESYSTEM && !FEATURE_PAL
+
+ DWORD dllImportSearchPathFlag = 0;
+ BOOL searchAssemblyDirectory = TRUE;
+ if (hmod == NULL)
+ {
+#ifndef FEATURE_CORECLR
+ // First checks if the method has DefaultDllImportSearchPathsAttribute. If method has the attribute
+ // then dllImportSearchPathFlag is set to its value.
+ // Otherwise checks if the assembly has the attribute.
+ // If assembly has the attribute then flag ise set to its value.
+ BOOL attributeIsFound = FALSE;
+
+ if (pMD->HasDefaultDllImportSearchPathsAttribute())
+ {
+ dllImportSearchPathFlag = pMD->DefaultDllImportSearchPathsAttributeCachedValue();
+ searchAssemblyDirectory = pMD->DllImportSearchAssemblyDirectory();
+ attributeIsFound = TRUE;
+ }
+ else
+ {
+ Module * pModule = pMD->GetModule();
+
+ if(pModule->HasDefaultDllImportSearchPathsAttribute())
+ {
+ dllImportSearchPathFlag = pModule->DefaultDllImportSearchPathsAttributeCachedValue();
+ searchAssemblyDirectory = pModule->DllImportSearchAssemblyDirectory();
+ attributeIsFound = TRUE;
+ }
+ }
+
+ if (!attributeIsFound)
+ {
+ CheckUnificationList(pMD, &dllImportSearchPathFlag, &searchAssemblyDirectory);
+ }
+#endif // !FEATURE_CORECLR
+
+ if (AbsolutePath(wszLibName))
+ {
+ DWORD flags = loadWithAlteredPathFlags;
+ if ((dllImportSearchPathFlag & LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR) != 0)
+ {
+ // LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR is the only flag affecting absolute path. Don't OR the flags
+ // unconditionally as all absolute path P/Invokes could then lose LOAD_WITH_ALTERED_SEARCH_PATH.
+ flags |= dllImportSearchPathFlag;
+ }
+
+ hmod = LocalLoadLibraryHelper(wszLibName, flags, pErrorTracker);
+ }
+ else if (searchAssemblyDirectory)
+ {
+ // Try to load the DLL alongside the assembly where the PInvoke was
+ // declared using the path of the assembly.
+ Assembly* pAssembly = pMD->GetMethodTable()->GetAssembly();
+
+ SString path = pAssembly->GetManifestFile()->GetPath();
+ SString::Iterator i = path.End();
+
+ if (PEAssembly::FindLastPathSeparator(path, i))
+ {
+ i++;
+ path.Truncate(i);
+
+ path.Append(wszLibName);
+
+ hmod = LocalLoadLibraryHelper(path, loadWithAlteredPathFlags | dllImportSearchPathFlag, pErrorTracker);
+ }
+
+#ifndef FEATURE_CORECLR
+ if (hmod == NULL)
+ {
+ // Try to load the DLL alongside the assembly where the PInvoke was
+ // declared using the codebase of the assembly. This is required for download
+ // and shadow copy scenarios.
+ const WCHAR* ptr;
+ SString codebase;
+ pAssembly->GetCodeBase(codebase);
+ DWORD dwCodebaseLength = codebase.GetCount();
+
+ // Strip off the protocol
+ for (ptr = codebase.GetUnicode(); *ptr && *ptr != W(':'); ptr++);
+
+ // If we have a code base then prepend it to the library name
+ if (*ptr)
+ {
+ SString pathFromCodebase;
+
+ // After finding the colon move forward until no more forward slashes
+ for(ptr++; *ptr && *ptr == W('/'); ptr++);
+ if (*ptr)
+ {
+ // Calculate the number of characters we are interested in
+ if (dwCodebaseLength > (DWORD)(ptr - codebase.GetUnicode()) )
+ {
+ // Back up to the last slash (forward or backwards)
+ const WCHAR* tail;
+
+ for (tail = codebase.GetUnicode() + (dwCodebaseLength - 1); tail > ptr && *tail != W('/') && *tail != W('\\'); tail--);
+
+ if (tail > ptr)
+ {
+ for(;ptr <= tail; ptr++)
+ {
+ if(*ptr == W('/'))
+ pathFromCodebase.Append(W('\\'));
+ else
+ pathFromCodebase.Append(*ptr);
+ }
+ }
+ }
+ }
+
+ pathFromCodebase.Append(wszLibName);
+
+ if (!pathFromCodebase.EqualsCaseInsensitive(path, PEImage::GetFileSystemLocale()))
+ {
+ hmod = LocalLoadLibraryHelper(pathFromCodebase, loadWithAlteredPathFlags | dllImportSearchPathFlag, pErrorTracker);
+ }
+ }
+ }
+#endif // !FEATURE_CORECLR
+ }
+ }
+
+#ifdef FEATURE_CORECLR
+ if (hmod == NULL && pDomain->HasNativeDllSearchDirectories())
+ {
+ AppDomain::PathIterator i = pDomain->IterateNativeDllSearchDirectories();
+ while (hmod == NULL && i.Next())
+ {
+ SString qualifiedPath(*(i.GetPath()));
+ qualifiedPath.Append(wszLibName);
+ if (AbsolutePath(qualifiedPath))
+ {
+ hmod = LocalLoadLibraryHelper(qualifiedPath, loadWithAlteredPathFlags, pErrorTracker);
+ }
+ }
+ }
+#endif // FEATURE_CORECLR
+
+ // Do we really need to do this. This call searches the application directory
+ // instead of the location for the library.
+ if(hmod == NULL)
+ {
+ hmod = LocalLoadLibraryHelper(wszLibName, dllImportSearchPathFlag, pErrorTracker);
+ }
+
+ // This may be an assembly name
+ if (!hmod)
+ {
+ // Format is "fileName, assemblyDisplayName"
+ MAKE_UTF8PTR_FROMWIDE(szLibName, wszLibName);
+ char *szComma = strchr(szLibName, ',');
+ if (szComma)
+ {
+ *szComma = '\0';
+ while (COMCharacter::nativeIsWhiteSpace(*(++szComma)));
+
+ AssemblySpec spec;
+ if (SUCCEEDED(spec.Init(szComma)))
+ {
+ // Need to perform case insensitive hashing.
+ CQuickBytes qbLC;
+ {
+ UTF8_TO_LOWER_CASE(szLibName, qbLC);
+ szLibName = (LPUTF8) qbLC.Ptr();
+ }
+
+ Assembly *pAssembly = spec.LoadAssembly(FILE_LOADED);
+ Module *pModule = pAssembly->FindModuleByName(szLibName);
+
+ hmod = LocalLoadLibraryHelper(pModule->GetPath(), loadWithAlteredPathFlags | dllImportSearchPathFlag, pErrorTracker);
+ }
+ }
+ }
+
+ // After all this, if we have a handle add it to the cache.
+ if (hmod)
+ {
+ pDomain->AddUnmanagedImageToCache(wszLibName, hmod);
+ }
+
+ return hmod.Extract();
+}
+
+
+//---------------------------------------------------------
+// Loads the DLL and finds the procaddress for an N/Direct call.
+//---------------------------------------------------------
+/* static */
+VOID NDirect::NDirectLink(NDirectMethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ //
+ // On the phone, we only allow platform assemblies to define pinvokes
+ // unless the host has asked us otherwise.
+ //
+#ifdef FEATURE_WINDOWSPHONE
+ if (!GetAppDomain()->EnablePInvokeAndClassicComInterop())
+ {
+ if (!pMD->GetModule()->GetFile()->GetAssembly()->IsProfileAssembly())
+ COMPlusThrow(kNotSupportedException, W("NotSupported_UserDllImport"));
+ }
+#endif //FEATURE_WINDOWS_PHONE
+
+
+ if (pMD->IsClassConstructorTriggeredAtLinkTime())
+ {
+ pMD->GetMethodTable()->CheckRunClassInitThrowing();
+ }
+
+ if (pMD->IsQCall())
+ {
+ LPVOID pvTarget = pMD->ndirect.m_pNativeNDirectTarget;
+
+ // Do not repeat the lookup if the QCall was hardbound during ngen
+ if (pvTarget == NULL)
+ {
+ pvTarget = ECall::GetQCallImpl(pMD);
+ }
+ else
+ {
+ _ASSERTE(pvTarget == ECall::GetQCallImpl(pMD));
+ }
+
+ pMD->SetNDirectTarget(pvTarget);
+ return;
+ }
+
+ // Loading unmanaged dlls can trigger dllmains which certainly count as code execution!
+ pMD->EnsureActive();
+
+ LoadLibErrorTracker errorTracker;
+
+ BOOL fSuccess = FALSE;
+ HINSTANCE hmod = LoadLibraryModule( pMD, &errorTracker );
+ if ( hmod )
+ {
+ LPVOID pvTarget = NDirectGetEntryPoint(pMD, hmod);
+ if (pvTarget)
+ {
+
+#ifdef MDA_SUPPORTED
+ MdaInvalidOverlappedToPinvoke *pOverlapCheck = MDA_GET_ASSISTANT(InvalidOverlappedToPinvoke);
+ if (pOverlapCheck && pOverlapCheck->ShouldHook(pMD))
+ {
+ LPVOID pNewTarget = pOverlapCheck->Register(hmod,pvTarget);
+ if (pNewTarget)
+ {
+ pvTarget = pNewTarget;
+ }
+ }
+#endif
+ pMD->SetNDirectTarget(pvTarget);
+ fSuccess = TRUE;
+ }
+ }
+
+ if (!fSuccess)
+ {
+ if (pMD->GetLibName() == NULL)
+ COMPlusThrow(kEntryPointNotFoundException, IDS_EE_NDIRECT_GETPROCADDRESS_NONAME);
+
+ StackSString ssLibName(SString::Utf8, pMD->GetLibName());
+
+ if (!hmod)
+ {
+ HRESULT theHRESULT = errorTracker.GetHR();
+ if (theHRESULT == HRESULT_FROM_WIN32(ERROR_BAD_EXE_FORMAT))
+ {
+ COMPlusThrow(kBadImageFormatException);
+ }
+ else
+ {
+ SString hrString;
+ GetHRMsg(theHRESULT, hrString);
+ COMPlusThrow(kDllNotFoundException, IDS_EE_NDIRECT_LOADLIB, ssLibName.GetUnicode(), hrString);
+ }
+ }
+
+ WCHAR wszEPName[50];
+ if(WszMultiByteToWideChar(CP_UTF8, 0, (LPCSTR)pMD->GetEntrypointName(), -1, wszEPName, sizeof(wszEPName)/sizeof(WCHAR)) == 0)
+ {
+ wszEPName[0] = W('?');
+ wszEPName[1] = W('\0');
+ }
+
+ COMPlusThrow(kEntryPointNotFoundException, IDS_EE_NDIRECT_GETPROCADDRESS, ssLibName.GetUnicode(), wszEPName);
+ }
+}
+
+
+//---------------------------------------------------------
+// One-time init
+//---------------------------------------------------------
+/*static*/ void NDirect::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+#if !defined(FEATURE_CORECLR)
+ // Generate a table of some well known native dlls
+ s_pWellKnownNativeModules = ::new PtrHashMap();
+ s_pWellKnownNativeModules->Init(sizeof(wellKnownModules)/sizeof(LPCWSTR), CompareLibNames, TRUE, NULL);
+ for (int index = 0; index < sizeof(wellKnownModules)/sizeof(LPCWSTR); index++)
+ {
+ s_pWellKnownNativeModules->InsertValue((UPTR) HashiString(wellKnownModules[index]), (LPVOID)wellKnownModules[index]);
+ }
+
+ // Check if the OS supports the new secure LoadLibraryEx flags introduced in KB2533623
+ HMODULE hMod = CLRGetModuleHandle(WINDOWS_KERNEL32_DLLNAME_W);
+ _ASSERTE(hMod != NULL);
+
+ if (GetProcAddress(hMod, "AddDllDirectory") != NULL)
+ {
+ // The AddDllDirectory export was added in KB2533623 together with the new flag support
+ s_fSecureLoadLibrarySupported = true;
+ }
+#endif // !FEATURE_CORECLR
+}
+
+
+//==========================================================================
+// This function is reached only via NDirectImportThunk. It's purpose
+// is to ensure that the target DLL is fully loaded and ready to run.
+//
+// FUN FACTS: Though this function is actually entered in unmanaged mode,
+// it can reenter managed mode and throw a COM+ exception if the DLL linking
+// fails.
+//==========================================================================
+
+
+EXTERN_C LPVOID STDCALL NDirectImportWorker(NDirectMethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ LPVOID ret = NULL;
+
+ // this function is called by CLR to native assembly stubs which are called by
+ // managed code as a result, we need an unwind and continue handler to translate
+ // any of our internal exceptions into managed exceptions.
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+#ifdef FEATURE_MIXEDMODE // IJW
+ if (pMD->IsEarlyBound())
+ {
+ if (!pMD->IsZapped())
+ {
+ // we need the MD to be populated in case we decide to build an intercept
+ // stub to wrap the target in InitEarlyBoundNDirectTarget
+ PInvokeStaticSigInfo sigInfo;
+ NDirect::PopulateNDirectMethodDesc(pMD, &sigInfo);
+ }
+
+ pMD->InitEarlyBoundNDirectTarget();
+ }
+ else
+#endif // FEATURE_MIXEDMODE
+ {
+ //
+ // Otherwise we're in an inlined pinvoke late bound MD
+ //
+ INDEBUG(Thread *pThread = GetThread());
+ {
+ _ASSERTE(pThread->GetFrame()->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr());
+
+ CONSISTENCY_CHECK(pMD->IsNDirect());
+ //
+ // With IL stubs, we don't have to do anything but ensure the DLL is loaded.
+ //
+
+ if (!pMD->GetModule()->GetSecurityDescriptor()->CanCallUnmanagedCode())
+ Security::ThrowSecurityException(g_SecurityPermissionClassName, SPFLAGSUNMANAGEDCODE);
+
+ if (!pMD->IsZapped())
+ {
+ PInvokeStaticSigInfo sigInfo;
+ NDirect::PopulateNDirectMethodDesc(pMD, &sigInfo);
+ }
+ else
+ {
+ // must have been populated at NGEN time
+ _ASSERTE(pMD->GetLibName() != NULL);
+ }
+
+ pMD->CheckRestore();
+
+ NDirect::NDirectLink(pMD);
+ }
+ }
+
+ ret = pMD->GetNDirectTarget();
+
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+ return ret;
+}
+
+//===========================================================================
+// Support for Pinvoke Calli instruction
+//
+//===========================================================================
+
+EXTERN_C void STDCALL VarargPInvokeStubWorker(TransitionBlock * pTransitionBlock, VASigCookie *pVASigCookie, MethodDesc *pMD)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_ENTRY_POINT;
+
+ MAKE_CURRENT_THREAD_AVAILABLE();
+
+#ifdef _DEBUG
+ Thread::ObjectRefFlush(CURRENT_THREAD);
+#endif
+
+ FrameWithCookie<PrestubMethodFrame> frame(pTransitionBlock, pMD);
+ PrestubMethodFrame * pFrame = &frame;
+
+ pFrame->Push(CURRENT_THREAD);
+
+ _ASSERTE(pVASigCookie == pFrame->GetVASigCookie());
+ _ASSERTE(pMD == pFrame->GetFunction());
+
+ GetILStubForCalli(pVASigCookie, pMD);
+
+ pFrame->Pop(CURRENT_THREAD);
+}
+
+EXTERN_C void STDCALL GenericPInvokeCalliStubWorker(TransitionBlock * pTransitionBlock, VASigCookie * pVASigCookie, PCODE pUnmanagedTarget)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_ENTRY_POINT;
+
+ MAKE_CURRENT_THREAD_AVAILABLE();
+
+#ifdef _DEBUG
+ Thread::ObjectRefFlush(CURRENT_THREAD);
+#endif
+
+ FrameWithCookie<PInvokeCalliFrame> frame(pTransitionBlock, pVASigCookie, pUnmanagedTarget);
+ PInvokeCalliFrame * pFrame = &frame;
+
+ pFrame->Push(CURRENT_THREAD);
+
+ _ASSERTE(pVASigCookie == pFrame->GetVASigCookie());
+
+ GetILStubForCalli(pVASigCookie, NULL);
+
+ pFrame->Pop(CURRENT_THREAD);
+}
+
+PCODE GetILStubForCalli(VASigCookie *pVASigCookie, MethodDesc *pMD)
+{
+ CONTRACT(PCODE)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ ENTRY_POINT;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pVASigCookie));
+ PRECONDITION(CheckPointer(pMD, NULL_OK));
+ POSTCONDITION(RETVAL != NULL);
+ }
+ CONTRACT_END;
+
+ PCODE pTempILStub = NULL;
+
+ // this function is called by CLR to native assembly stubs which are called by
+ // managed code as a result, we need an unwind and continue handler to translate
+ // any of our internal exceptions into managed exceptions.
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+ // Force a GC if the stress level is high enough
+ GCStress<cfg_any>::MaybeTrigger();
+
+ GCX_PREEMP();
+
+ Signature signature = pVASigCookie->signature;
+ CorPinvokeMap unmgdCallConv = pmNoMangle;
+
+ DWORD dwStubFlags = NDIRECTSTUB_FL_BESTFIT;
+
+ // The MethodDesc pointer may in fact be the unmanaged target, see PInvokeStubs.asm.
+ if (pMD == NULL || (UINT_PTR)pMD & 0x1)
+ {
+ pMD = NULL;
+ dwStubFlags |= NDIRECTSTUB_FL_UNMANAGED_CALLI;
+
+ // need to convert the CALLI signature to stub signature with managed calling convention
+ switch (MetaSig::GetCallingConvention(pVASigCookie->pModule, pVASigCookie->signature))
+ {
+ case IMAGE_CEE_CS_CALLCONV_C:
+ unmgdCallConv = pmCallConvCdecl;
+ break;
+ case IMAGE_CEE_CS_CALLCONV_STDCALL:
+ unmgdCallConv = pmCallConvStdcall;
+ break;
+ case IMAGE_CEE_CS_CALLCONV_THISCALL:
+ unmgdCallConv = pmCallConvThiscall;
+ break;
+ case IMAGE_CEE_CS_CALLCONV_FASTCALL:
+ unmgdCallConv = pmCallConvFastcall;
+ break;
+ default:
+ COMPlusThrow(kTypeLoadException, IDS_INVALID_PINVOKE_CALLCONV);
+ }
+
+ LoaderHeap *pHeap = pVASigCookie->pModule->GetLoaderAllocator()->GetHighFrequencyHeap();
+ PCOR_SIGNATURE new_sig = (PCOR_SIGNATURE)(void *)pHeap->AllocMem(S_SIZE_T(signature.GetRawSigLen()));
+ CopyMemory(new_sig, signature.GetRawSig(), signature.GetRawSigLen());
+
+ // make the stub IMAGE_CEE_CS_CALLCONV_DEFAULT
+ *new_sig &= ~IMAGE_CEE_CS_CALLCONV_MASK;
+ *new_sig |= IMAGE_CEE_CS_CALLCONV_DEFAULT;
+
+ signature = Signature(new_sig, signature.GetRawSigLen());
+ }
+ else
+ {
+ _ASSERTE(pMD->IsNDirect());
+ dwStubFlags |= NDIRECTSTUB_FL_CONVSIGASVARARG;
+
+ // vararg P/Invoke must be cdecl
+ unmgdCallConv = pmCallConvCdecl;
+
+ if (((NDirectMethodDesc *)pMD)->IsClassConstructorTriggeredByILStub())
+ {
+ dwStubFlags |= NDIRECTSTUB_FL_TRIGGERCCTOR;
+ }
+ }
+
+ mdMethodDef md;
+ CorNativeLinkFlags nlFlags;
+ CorNativeLinkType nlType;
+
+ if (pMD != NULL)
+ {
+ PInvokeStaticSigInfo sigInfo(pMD);
+
+ md = pMD->GetMemberDef();
+ nlFlags = sigInfo.GetLinkFlags();
+ nlType = sigInfo.GetCharSet();
+ }
+ else
+ {
+ md = mdMethodDefNil;
+ nlFlags = nlfNone;
+ nlType = nltAnsi;
+ }
+
+ StubSigDesc sigDesc(pMD, signature, pVASigCookie->pModule);
+
+ MethodDesc* pStubMD = NDirect::CreateCLRToNativeILStub(&sigDesc,
+ nlType,
+ nlFlags,
+ unmgdCallConv,
+ dwStubFlags);
+
+ pTempILStub = JitILStub(pStubMD);
+
+ InterlockedCompareExchangeT<PCODE>(&pVASigCookie->pNDirectILStub,
+ pTempILStub,
+ NULL);
+
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+ RETURN pVASigCookie->pNDirectILStub;
+}
+
+#endif // CROSSGEN_COMPILE
+
+#endif // #ifndef DACCESS_COMPILE
+
+//
+// Truncates a SString by first converting it to unicode and truncate it
+// if it is larger than size. "..." will be appened if it is truncated.
+//
+void TruncateUnicodeString(SString &string, COUNT_T bufSize)
+{
+ string.Normalize();
+ if ((string.GetCount() + 1) * sizeof(WCHAR) > bufSize)
+ {
+ _ASSERTE(bufSize / sizeof(WCHAR) > 4);
+ string.Truncate(string.Begin() + bufSize / sizeof(WCHAR) - 4);
+ string.Append(W("..."));
+ }
+}
diff --git a/src/vm/dllimport.h b/src/vm/dllimport.h
new file mode 100644
index 0000000000..a4279c7323
--- /dev/null
+++ b/src/vm/dllimport.h
@@ -0,0 +1,785 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: DllImport.h
+//
+
+//
+
+
+#ifndef __dllimport_h__
+#define __dllimport_h__
+
+#include "util.hpp"
+
+class ILStubHashBlob;
+class NDirectStubParameters;
+struct PInvokeStaticSigInfo;
+class LoadLibErrorTracker;
+
+// This structure groups together data that describe the signature for which a marshaling stub is being generated.
+struct StubSigDesc
+{
+public:
+ StubSigDesc(MethodDesc *pMD, PInvokeStaticSigInfo* pSigInfo = NULL);
+ StubSigDesc(MethodDesc *pMD, Signature sig, Module *m_pModule);
+
+ MethodDesc *m_pMD;
+ Signature m_sig;
+ Module *m_pModule;
+ Module *m_pLoaderModule;
+ mdMethodDef m_tkMethodDef;
+ SigTypeContext m_typeContext;
+
+#ifdef _DEBUG
+ LPCUTF8 m_pDebugName;
+ LPCUTF8 m_pDebugClassName;
+
+ void InitDebugNames()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_pMD != NULL)
+ {
+ m_pDebugName = m_pMD->m_pszDebugMethodName;
+ m_pDebugClassName = m_pMD->m_pszDebugClassName;
+ }
+ else
+ {
+ m_pDebugName = NULL;
+ m_pDebugClassName = NULL;
+ }
+ }
+#endif // _DEBUG
+};
+
+//=======================================================================
+// Collects code and data pertaining to the NDirect interface.
+//=======================================================================
+class NDirect
+{
+ friend class NDirectMethodDesc;
+
+public:
+ //---------------------------------------------------------
+ // One-time init
+ //---------------------------------------------------------
+ static void Init();
+
+ //---------------------------------------------------------
+ // Does a class or method have a NAT_L CustomAttribute?
+ //
+ // S_OK = yes
+ // S_FALSE = no
+ // FAILED = unknown because something failed.
+ //---------------------------------------------------------
+ static HRESULT HasNAT_LAttribute(IMDInternalImport *pInternalImport, mdToken token, DWORD dwMemberAttrs);
+
+ static LPVOID NDirectGetEntryPoint(NDirectMethodDesc *pMD, HINSTANCE hMod);
+ static HINSTANCE LoadLibraryModule( NDirectMethodDesc * pMD, LoadLibErrorTracker *pErrorTracker);
+
+#ifndef FEATURE_CORECLR
+ static VOID CheckUnificationList(NDirectMethodDesc * pMD, DWORD * pDllImportSearchPathFlag, BOOL * pSearchAssemblyDirectory);
+#endif // !FEATURE_CORECLR
+
+ static VOID NDirectLink(NDirectMethodDesc *pMD);
+
+ // Either MD or signature & module must be given.
+ static BOOL MarshalingRequired(MethodDesc *pMD, PCCOR_SIGNATURE pSig = NULL, Module *pModule = NULL);
+ static void PopulateNDirectMethodDesc(NDirectMethodDesc* pNMD, PInvokeStaticSigInfo* pSigInfo, BOOL throwOnError = TRUE);
+
+ static MethodDesc* CreateCLRToNativeILStub(
+ StubSigDesc* pSigDesc,
+ CorNativeLinkType nlType,
+ CorNativeLinkFlags nlFlags,
+ CorPinvokeMap unmgdCallConv,
+ DWORD dwStubFlags); // NDirectStubFlags
+
+#ifdef FEATURE_COMINTEROP
+ static MethodDesc* CreateFieldAccessILStub(
+ PCCOR_SIGNATURE szMetaSig,
+ DWORD cbMetaSigSize,
+ Module* pModule,
+ mdFieldDef fd,
+ DWORD dwStubFlags, // NDirectStubFlags
+ FieldDesc* pFD);
+#endif // FEATURE_COMINTEROP
+
+ static MethodDesc* CreateCLRToNativeILStub(PInvokeStaticSigInfo* pSigInfo,
+ DWORD dwStubFlags,
+ MethodDesc* pMD);
+
+ static MethodDesc* GetILStubMethodDesc(NDirectMethodDesc* pNMD, PInvokeStaticSigInfo* pSigInfo, DWORD dwNGenStubFlags);
+ static MethodDesc* GetStubMethodDesc(MethodDesc *pTargetMD, NDirectStubParameters* pParams, ILStubHashBlob* pHashParams, AllocMemTracker* pamTracker, bool& bILStubCreator, MethodDesc* pLastMD);
+ static void AddMethodDescChunkWithLockTaken(NDirectStubParameters* pParams, MethodDesc *pMD);
+ static void RemoveILStubCacheEntry(NDirectStubParameters* pParams, ILStubHashBlob* pHashParams);
+ static ILStubHashBlob* CreateHashBlob(NDirectStubParameters* pParams);
+ static PCODE GetStubForILStub(NDirectMethodDesc* pNMD, MethodDesc** ppStubMD, DWORD dwStubFlags);
+ static PCODE GetStubForILStub(MethodDesc* pMD, MethodDesc** ppStubMD, DWORD dwStubFlags);
+
+ inline static ILStubCache* GetILStubCache(NDirectStubParameters* pParams);
+
+#if defined(_TARGET_X86_) && !defined(FEATURE_CORECLR)
+ static Stub* GetStubForCopyCtor();
+#endif // _TARGET_X86_ && !FEATURE_CORECLR
+
+ static BOOL IsHostHookEnabled();
+
+ static Stub *GenerateStubForHost(Module *pModule, CorUnmanagedCallingConvention callConv, WORD wArgSize);
+
+private:
+ NDirect() {LIMITED_METHOD_CONTRACT;}; // prevent "new"'s on this class
+
+#if defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+ static HMODULE LoadLibraryModuleViaHost(NDirectMethodDesc * pMD, AppDomain* pDomain, const wchar_t* wszLibName);
+#endif //defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+
+#if !defined(FEATURE_CORESYSTEM)
+ static HINSTANCE CheckForWellKnownModules(LPCWSTR wszLibName, LoadLibErrorTracker *pErrorTracker);
+ static PtrHashMap *s_pWellKnownNativeModules;
+
+ // Indicates if the OS supports the new secure LoadLibraryEx flags introduced in KB2533623
+ static bool s_fSecureLoadLibrarySupported;
+
+public:
+ static bool SecureLoadLibrarySupported()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return s_fSecureLoadLibrarySupported;
+ }
+#endif // !FEATURE_CORESYSTEM
+};
+
+//----------------------------------------------------------------
+// Flags passed to CreateNDirectStub that control stub generation
+//----------------------------------------------------------------
+enum NDirectStubFlags
+{
+ NDIRECTSTUB_FL_CONVSIGASVARARG = 0x00000001,
+ NDIRECTSTUB_FL_BESTFIT = 0x00000002,
+ NDIRECTSTUB_FL_THROWONUNMAPPABLECHAR = 0x00000004,
+ NDIRECTSTUB_FL_NGENEDSTUB = 0x00000008,
+ NDIRECTSTUB_FL_DELEGATE = 0x00000010,
+ NDIRECTSTUB_FL_DOHRESULTSWAPPING = 0x00000020,
+ NDIRECTSTUB_FL_REVERSE_INTEROP = 0x00000040,
+#ifdef FEATURE_COMINTEROP
+ NDIRECTSTUB_FL_COM = 0x00000080,
+#endif // FEATURE_COMINTEROP
+ NDIRECTSTUB_FL_NGENEDSTUBFORPROFILING = 0x00000100,
+ NDIRECTSTUB_FL_GENERATEDEBUGGABLEIL = 0x00000200,
+ NDIRECTSTUB_FL_HASDECLARATIVESECURITY = 0x00000400,
+ NDIRECTSTUB_FL_UNMANAGED_CALLI = 0x00000800,
+ NDIRECTSTUB_FL_TRIGGERCCTOR = 0x00001000,
+#ifdef FEATURE_COMINTEROP
+ NDIRECTSTUB_FL_FIELDGETTER = 0x00002000, // COM->CLR field getter
+ NDIRECTSTUB_FL_FIELDSETTER = 0x00004000, // COM->CLR field setter
+ NDIRECTSTUB_FL_WINRT = 0x00008000,
+ NDIRECTSTUB_FL_WINRTDELEGATE = 0x00010000,
+ NDIRECTSTUB_FL_WINRTSHAREDGENERIC = 0x00020000, // stub for methods on shared generic interfaces (only used in the forward direction)
+ NDIRECTSTUB_FL_WINRTCTOR = 0x00080000,
+ NDIRECTSTUB_FL_WINRTCOMPOSITION = 0x00100000, // set along with WINRTCTOR
+ NDIRECTSTUB_FL_WINRTSTATIC = 0x00200000,
+
+ NDIRECTSTUB_FL_WINRTHASREDIRECTION = 0x00800000, // the stub may tail-call to a static stub in mscorlib, not shareable
+#endif // FEATURE_COMINTEROP
+
+ // internal flags -- these won't ever show up in an NDirectStubHashBlob
+ NDIRECTSTUB_FL_FOR_NUMPARAMBYTES = 0x10000000, // do just enough to return the right value from Marshal.NumParamBytes
+
+#ifdef FEATURE_COMINTEROP
+ NDIRECTSTUB_FL_COMLATEBOUND = 0x20000000, // we use a generic stub for late bound calls
+ NDIRECTSTUB_FL_COMEVENTCALL = 0x40000000, // we use a generic stub for event calls
+#endif // FEATURE_COMINTEROP
+
+ // Note: The upper half of the range is reserved for ILStubTypes enum
+ NDIRECTSTUB_FL_MASK = 0x7FFFFFFF,
+ NDIRECTSTUB_FL_INVALID = 0x80000000,
+};
+
+enum ILStubTypes
+{
+ ILSTUB_INVALID = 0x80000000,
+#ifdef FEATURE_ARRAYSTUB_AS_IL
+ ILSTUB_ARRAYOP_GET = 0x80000001,
+ ILSTUB_ARRAYOP_SET = 0x80000002,
+ ILSTUB_ARRAYOP_ADDRESS = 0x80000004,
+#endif
+#ifdef FEATURE_STUBS_AS_IL
+ ILSTUB_MULTICASTDELEGATE_INVOKE = 0x80000010,
+ ILSTUB_UNBOXINGILSTUB = 0x80000020,
+ ILSTUB_INSTANTIATINGSTUB = 0x80000040,
+#endif
+};
+
+#ifdef FEATURE_COMINTEROP
+#define COM_ONLY(x) (x)
+#else // FEATURE_COMINTEROP
+#define COM_ONLY(x) false
+#endif // FEATURE_COMINTEROP
+
+inline bool SF_IsVarArgStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_CONVSIGASVARARG)); }
+inline bool SF_IsBestFit (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_BESTFIT)); }
+inline bool SF_IsThrowOnUnmappableChar (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_THROWONUNMAPPABLECHAR)); }
+inline bool SF_IsNGENedStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_NGENEDSTUB)); }
+inline bool SF_IsDelegateStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_DELEGATE)); }
+inline bool SF_IsHRESULTSwapping (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_DOHRESULTSWAPPING)); }
+inline bool SF_IsReverseStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_REVERSE_INTEROP)); }
+inline bool SF_IsNGENedStubForProfiling(DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_NGENEDSTUBFORPROFILING)); }
+inline bool SF_IsDebuggableStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_GENERATEDEBUGGABLEIL)); }
+inline bool SF_IsStubWithDemand (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_HASDECLARATIVESECURITY)); }
+inline bool SF_IsCALLIStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_UNMANAGED_CALLI)); }
+inline bool SF_IsStubWithCctorTrigger (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_TRIGGERCCTOR)); }
+inline bool SF_IsForNumParamBytes (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_FOR_NUMPARAMBYTES)); }
+
+#ifdef FEATURE_ARRAYSTUB_AS_IL
+inline bool SF_IsArrayOpStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return ((dwStubFlags == ILSTUB_ARRAYOP_GET) ||
+ (dwStubFlags == ILSTUB_ARRAYOP_SET) ||
+ (dwStubFlags == ILSTUB_ARRAYOP_ADDRESS)); }
+#endif
+
+#ifdef FEATURE_STUBS_AS_IL
+inline bool SF_IsMulticastDelegateStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags == ILSTUB_MULTICASTDELEGATE_INVOKE); }
+inline bool SF_IsUnboxingILStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags == ILSTUB_UNBOXINGILSTUB); }
+inline bool SF_IsInstantiatingStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags == ILSTUB_INSTANTIATINGSTUB); }
+#endif
+
+inline bool SF_IsCOMStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return COM_ONLY(dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_COM)); }
+inline bool SF_IsWinRTStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return COM_ONLY(dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_WINRT)); }
+inline bool SF_IsCOMLateBoundStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return COM_ONLY(dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_COMLATEBOUND)); }
+inline bool SF_IsCOMEventCallStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return COM_ONLY(dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_COMEVENTCALL)); }
+inline bool SF_IsFieldGetterStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return COM_ONLY(dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_FIELDGETTER)); }
+inline bool SF_IsFieldSetterStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return COM_ONLY(dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_FIELDSETTER)); }
+inline bool SF_IsWinRTDelegateStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return COM_ONLY(dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_WINRTDELEGATE)); }
+inline bool SF_IsWinRTCtorStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return COM_ONLY(dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_WINRTCTOR)); }
+inline bool SF_IsWinRTCompositionStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return COM_ONLY(dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_WINRTCOMPOSITION)); }
+inline bool SF_IsWinRTStaticStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return COM_ONLY(dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_WINRTSTATIC)); }
+inline bool SF_IsWinRTSharedGenericStub(DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return COM_ONLY(dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_WINRTSHAREDGENERIC)); }
+inline bool SF_IsWinRTHasRedirection (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return COM_ONLY(dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_WINRTHASREDIRECTION)); }
+
+inline bool SF_IsSharedStub(DWORD dwStubFlags)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (SF_IsWinRTHasRedirection(dwStubFlags))
+ {
+ // tail-call to a target-specific mscorlib routine is burned into the stub
+ return false;
+ }
+
+ return !SF_IsFieldGetterStub(dwStubFlags) && !SF_IsFieldSetterStub(dwStubFlags);
+}
+
+inline bool SF_IsForwardStub (DWORD dwStubFlags) { WRAPPER_NO_CONTRACT; return !SF_IsReverseStub(dwStubFlags); }
+
+inline bool SF_IsForwardPInvokeStub (DWORD dwStubFlags) { WRAPPER_NO_CONTRACT; return (!SF_IsCOMStub(dwStubFlags) && SF_IsForwardStub(dwStubFlags)); }
+inline bool SF_IsReversePInvokeStub (DWORD dwStubFlags) { WRAPPER_NO_CONTRACT; return (!SF_IsCOMStub(dwStubFlags) && SF_IsReverseStub(dwStubFlags)); }
+
+inline bool SF_IsForwardCOMStub (DWORD dwStubFlags) { WRAPPER_NO_CONTRACT; return (SF_IsCOMStub(dwStubFlags) && SF_IsForwardStub(dwStubFlags)); }
+inline bool SF_IsReverseCOMStub (DWORD dwStubFlags) { WRAPPER_NO_CONTRACT; return (SF_IsCOMStub(dwStubFlags) && SF_IsReverseStub(dwStubFlags)); }
+
+inline bool SF_IsForwardDelegateStub (DWORD dwStubFlags) { WRAPPER_NO_CONTRACT; return (SF_IsDelegateStub(dwStubFlags) && SF_IsForwardStub(dwStubFlags)); }
+inline bool SF_IsReverseDelegateStub (DWORD dwStubFlags) { WRAPPER_NO_CONTRACT; return (SF_IsDelegateStub(dwStubFlags) && SF_IsReverseStub(dwStubFlags)); }
+
+#undef COM_ONLY
+
+inline void SF_ConsistencyCheck(DWORD dwStubFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Late bound and event calls imply COM
+ CONSISTENCY_CHECK(!(SF_IsCOMLateBoundStub(dwStubFlags) && !SF_IsCOMStub(dwStubFlags)));
+ CONSISTENCY_CHECK(!(SF_IsCOMEventCallStub(dwStubFlags) && !SF_IsCOMStub(dwStubFlags)));
+
+ // Field accessors imply reverse COM
+ CONSISTENCY_CHECK(!(SF_IsFieldGetterStub(dwStubFlags) && !SF_IsReverseCOMStub(dwStubFlags)));
+ CONSISTENCY_CHECK(!(SF_IsFieldSetterStub(dwStubFlags) && !SF_IsReverseCOMStub(dwStubFlags)));
+
+ // Field accessors are always HRESULT swapping
+ CONSISTENCY_CHECK(!(SF_IsFieldGetterStub(dwStubFlags) && !SF_IsHRESULTSwapping(dwStubFlags)));
+ CONSISTENCY_CHECK(!(SF_IsFieldSetterStub(dwStubFlags) && !SF_IsHRESULTSwapping(dwStubFlags)));
+
+ // Reverse and CALLI stubs don't have demands
+ CONSISTENCY_CHECK(!(SF_IsReverseStub(dwStubFlags) && SF_IsStubWithDemand(dwStubFlags)));
+ CONSISTENCY_CHECK(!(SF_IsCALLIStub(dwStubFlags) && SF_IsStubWithDemand(dwStubFlags)));
+
+ // Delegate stubs are not COM
+ CONSISTENCY_CHECK(!(SF_IsDelegateStub(dwStubFlags) && SF_IsCOMStub(dwStubFlags)));
+}
+
+enum ETW_IL_STUB_FLAGS
+{
+ ETW_IL_STUB_FLAGS_REVERSE_INTEROP = 0x00000001,
+ ETW_IL_STUB_FLAGS_COM_INTEROP = 0x00000002,
+ ETW_IL_STUB_FLAGS_NGENED_STUB = 0x00000004,
+ ETW_IL_STUB_FLAGS_DELEGATE = 0x00000008,
+ ETW_IL_STUB_FLAGS_VARARG = 0x00000010,
+ ETW_IL_STUB_FLAGS_UNMANAGED_CALLI = 0x00000020
+};
+
+//---------------------------------------------------------
+// PInvoke has three flavors: DllImport M->U, Delegate M->U and Delegate U->M
+// Each flavor uses rougly the same mechanism to marshal and place the call and so
+// each flavor supports roughly the same switches. Those switches which can be
+// statically determined via CAs (DllImport, UnmanagedFunctionPointer,
+// BestFitMappingAttribute, etc) or via MetaSig are parsed and unified by this
+// class. There are two flavors of constructor, one for NDirectMethodDescs and one
+// for Delegates.
+//---------------------------------------------------------
+struct PInvokeStaticSigInfo
+{
+public:
+ enum ThrowOnError { THROW_ON_ERROR = TRUE, NO_THROW_ON_ERROR = FALSE };
+
+public:
+ PInvokeStaticSigInfo() { LIMITED_METHOD_CONTRACT; }
+
+ PInvokeStaticSigInfo(Signature sig, Module* pModule, ThrowOnError throwOnError = THROW_ON_ERROR);
+
+ PInvokeStaticSigInfo(MethodDesc* pMdDelegate, ThrowOnError throwOnError = THROW_ON_ERROR);
+
+ PInvokeStaticSigInfo(MethodDesc* pMD, LPCUTF8 *pLibName, LPCUTF8 *pEntryPointName, ThrowOnError throwOnError = THROW_ON_ERROR);
+
+public:
+ void ReportErrors();
+
+private:
+ void InitCallConv(CorPinvokeMap callConv, BOOL bIsVarArg);
+ void DllImportInit(MethodDesc* pMD, LPCUTF8 *pLibName, LPCUTF8 *pEntryPointName);
+ void PreInit(Module* pModule, MethodTable *pClass);
+ void PreInit(MethodDesc* pMD);
+ void SetError(WORD error) { if (!m_error) m_error = error; }
+#ifdef FEATURE_MIXEDMODE
+ void BestGuessNDirectDefaults(MethodDesc* pMD);
+#endif
+
+public:
+ DWORD GetStubFlags()
+ {
+ WRAPPER_NO_CONTRACT;
+ return (GetThrowOnUnmappableChar() ? NDIRECTSTUB_FL_THROWONUNMAPPABLECHAR : 0) |
+ (GetBestFitMapping() ? NDIRECTSTUB_FL_BESTFIT : 0) |
+ (IsDelegateInterop() ? NDIRECTSTUB_FL_DELEGATE : 0);
+ }
+ Module* GetModule() { LIMITED_METHOD_CONTRACT; return m_pModule; }
+ BOOL IsStatic() { LIMITED_METHOD_CONTRACT; return m_wFlags & PINVOKE_STATIC_SIGINFO_IS_STATIC; }
+ void SetIsStatic (BOOL isStatic)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (isStatic)
+ m_wFlags |= PINVOKE_STATIC_SIGINFO_IS_STATIC;
+ else
+ m_wFlags &= ~PINVOKE_STATIC_SIGINFO_IS_STATIC;
+ }
+ BOOL GetThrowOnUnmappableChar() { LIMITED_METHOD_CONTRACT; return m_wFlags & PINVOKE_STATIC_SIGINFO_THROW_ON_UNMAPPABLE_CHAR; }
+ void SetThrowOnUnmappableChar (BOOL throwOnUnmappableChar)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (throwOnUnmappableChar)
+ m_wFlags |= PINVOKE_STATIC_SIGINFO_THROW_ON_UNMAPPABLE_CHAR;
+ else
+ m_wFlags &= ~PINVOKE_STATIC_SIGINFO_THROW_ON_UNMAPPABLE_CHAR;
+ }
+ BOOL GetBestFitMapping() { LIMITED_METHOD_CONTRACT; return m_wFlags & PINVOKE_STATIC_SIGINFO_BEST_FIT; }
+ void SetBestFitMapping (BOOL bestFit)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (bestFit)
+ m_wFlags |= PINVOKE_STATIC_SIGINFO_BEST_FIT;
+ else
+ m_wFlags &= ~PINVOKE_STATIC_SIGINFO_BEST_FIT;
+ }
+ BOOL IsDelegateInterop() { LIMITED_METHOD_CONTRACT; return m_wFlags & PINVOKE_STATIC_SIGINFO_IS_DELEGATE_INTEROP; }
+ void SetIsDelegateInterop (BOOL delegateInterop)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (delegateInterop)
+ m_wFlags |= PINVOKE_STATIC_SIGINFO_IS_DELEGATE_INTEROP;
+ else
+ m_wFlags &= ~PINVOKE_STATIC_SIGINFO_IS_DELEGATE_INTEROP;
+ }
+ CorPinvokeMap GetCallConv() { LIMITED_METHOD_CONTRACT; return m_callConv; }
+ Signature GetSignature() { LIMITED_METHOD_CONTRACT; return m_sig; }
+
+private:
+ Module* m_pModule;
+ Signature m_sig;
+ CorPinvokeMap m_callConv;
+ WORD m_error;
+
+ enum
+ {
+ PINVOKE_STATIC_SIGINFO_IS_STATIC = 0x0001,
+ PINVOKE_STATIC_SIGINFO_THROW_ON_UNMAPPABLE_CHAR = 0x0002,
+ PINVOKE_STATIC_SIGINFO_BEST_FIT = 0x0004,
+
+ COR_NATIVE_LINK_TYPE_MASK = 0x0038, // 0000 0000 0011 1000 <--- These 3 1's make the link type mask
+
+ COR_NATIVE_LINK_FLAGS_MASK = 0x00C0, //0000 0000 1100 0000 <---- These 2 bits make up the link flags
+
+ PINVOKE_STATIC_SIGINFO_IS_DELEGATE_INTEROP = 0x0100,
+
+ };
+ #define COR_NATIVE_LINK_TYPE_SHIFT 3 // Keep in synch with above mask
+ #define COR_NATIVE_LINK_FLAGS_SHIFT 6 // Keep in synch with above mask
+ WORD m_wFlags;
+
+ public:
+ CorNativeLinkType GetCharSet() { LIMITED_METHOD_CONTRACT; return (CorNativeLinkType)((m_wFlags & COR_NATIVE_LINK_TYPE_MASK) >> COR_NATIVE_LINK_TYPE_SHIFT); }
+ CorNativeLinkFlags GetLinkFlags() { LIMITED_METHOD_CONTRACT; return (CorNativeLinkFlags)((m_wFlags & COR_NATIVE_LINK_FLAGS_MASK) >> COR_NATIVE_LINK_FLAGS_SHIFT); }
+ void SetCharSet(CorNativeLinkType linktype)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE( linktype == (linktype & (COR_NATIVE_LINK_TYPE_MASK >> COR_NATIVE_LINK_TYPE_SHIFT)));
+ // Clear out the old value first
+ m_wFlags &= (~COR_NATIVE_LINK_TYPE_MASK);
+ // Then set the given value
+ m_wFlags |= (linktype << COR_NATIVE_LINK_TYPE_SHIFT);
+ }
+ void SetLinkFlags(CorNativeLinkFlags linkflags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE( linkflags == (linkflags & (COR_NATIVE_LINK_FLAGS_MASK >> COR_NATIVE_LINK_FLAGS_SHIFT)));
+ // Clear out the old value first
+ m_wFlags &= (~COR_NATIVE_LINK_FLAGS_MASK);
+ // Then set the given value
+ m_wFlags |= (linkflags << COR_NATIVE_LINK_FLAGS_SHIFT);
+ }
+};
+
+
+#include "stubgen.h"
+
+class NDirectStubLinker : public ILStubLinker
+{
+public:
+ NDirectStubLinker(
+ DWORD dwStubFlags,
+ Module* pModule,
+ const Signature &signature,
+ SigTypeContext *pTypeContext,
+ MethodDesc* pTargetMD,
+ int iLCIDParamIdx,
+ BOOL fTargetHasThis,
+ BOOL fStubHasThis);
+
+ void SetCallingConvention(CorPinvokeMap unmngCallConv, BOOL fIsVarArg);
+
+ void Begin(DWORD dwStubFlags);
+ void End(DWORD dwStubFlags);
+ void EmitSetLastError(ILCodeStream* pcsEmit);
+ void DoNDirect(ILCodeStream *pcsEmit, DWORD dwStubFlags, MethodDesc * pStubMD);
+ void EmitLogNativeArgument(ILCodeStream* pslILEmit, DWORD dwPinnedLocal);
+ void LoadCleanupWorkList(ILCodeStream* pcsEmit);
+#ifdef PROFILING_SUPPORTED
+ DWORD EmitProfilerBeginTransitionCallback(ILCodeStream* pcsEmit, DWORD dwStubFlags);
+ void EmitProfilerEndTransitionCallback(ILCodeStream* pcsEmit, DWORD dwStubFlags, DWORD dwMethodDescLocalNum);
+#endif
+#ifdef VERIFY_HEAP
+ void EmitValidateLocal(ILCodeStream* pcsEmit, DWORD dwLocalNum, bool fIsByref, DWORD dwStubFlags);
+ void EmitObjectValidation(ILCodeStream* pcsEmit, DWORD dwStubFlags);
+#endif // VERIFY_HEAP
+ void EmitLoadStubContext(ILCodeStream* pcsEmit, DWORD dwStubFlags);
+#ifdef MDA_SUPPORTED
+ void EmitCallGcCollectForMDA(ILCodeStream *pcsEmit, DWORD dwStubFlags);
+#endif // MDA_SUPPORTED
+ void GenerateInteropParamException(ILCodeStream* pcsEmit);
+ void NeedsCleanupList();
+
+#ifdef FEATURE_COMINTEROP
+ DWORD GetTargetInterfacePointerLocalNum();
+ DWORD GetTargetEntryPointLocalNum();
+ void EmitLoadRCWThis(ILCodeStream *pcsEmit, DWORD dwStubFlags);
+#endif // FEATURE_COMINTEROP
+ DWORD GetCleanupWorkListLocalNum();
+ DWORD GetThreadLocalNum();
+ DWORD GetReturnValueLocalNum();
+ void SetCleanupNeeded();
+ void SetExceptionCleanupNeeded();
+ BOOL IsCleanupWorkListSetup();
+ void GetCleanupFinallyOffsets(ILStubEHClause * pClause);
+ void AdjustTargetStackDeltaForReverseInteropHRESULTSwapping();
+ void AdjustTargetStackDeltaForExtraParam();
+#if defined(_TARGET_X86_) && !defined(FEATURE_CORECLR)
+ DWORD CreateCopyCtorCookie(ILCodeStream* pcsEmit);
+#endif // _TARGET_X86_ && !FEATURE_CORECLR
+
+ void SetInteropParamExceptionInfo(UINT resID, UINT paramIdx);
+ bool HasInteropParamExceptionInfo();
+
+ void ClearCode();
+
+ enum
+ {
+ CLEANUP_INDEX_ARG0_MARSHAL = 0x00000000, // cleanup index of the first argument (marshal and retval unmarshal stream)
+ CLEANUP_INDEX_RETVAL_UNMARSHAL = 0x3fffffff, // cleanup index of the return value (retval unmarshal stream)
+ CLEANUP_INDEX_ARG0_UNMARSHAL = 0x40000000, // cleanup index of the first argument (unmarshal stream)
+ CLEANUP_INDEX_ALL_DONE = 0x7ffffffe // everything was successfully marshaled and unmarshaled, no exception thrown
+ };
+
+ enum ArgCleanupBranchKind
+ {
+ BranchIfMarshaled,
+ BranchIfNotMarshaled
+ };
+
+ void EmitSetArgMarshalIndex(ILCodeStream* pcsEmit, UINT uArgIdx);
+ void EmitCheckForArgCleanup(ILCodeStream* pcsEmit, UINT uArgIdx, ArgCleanupBranchKind branchKind, ILCodeLabel* pSkipCleanupLabel);
+
+ int GetLCIDParamIdx();
+
+ ILCodeStream* GetSetupCodeStream();
+ ILCodeStream* GetMarshalCodeStream();
+ ILCodeStream* GetUnmarshalCodeStream();
+ ILCodeStream* GetReturnUnmarshalCodeStream();
+ ILCodeStream* GetDispatchCodeStream();
+ ILCodeStream* GetCleanupCodeStream();
+ ILCodeStream* GetExceptionCleanupCodeStream();
+
+protected:
+ BOOL IsCleanupNeeded();
+ BOOL IsExceptionCleanupNeeded();
+ void InitCleanupCode();
+ void InitExceptionCleanupCode();
+
+#if defined(_TARGET_X86_) && !defined(FEATURE_CORECLR)
+ BOOL IsCopyCtorStubNeeded();
+#endif // _TARGET_X86_ && !FEATURE_CORECLR
+
+
+ ILCodeStream* m_pcsSetup;
+ ILCodeStream* m_pcsMarshal;
+ ILCodeStream* m_pcsDispatch;
+ ILCodeStream* m_pcsRetUnmarshal;
+ ILCodeStream* m_pcsUnmarshal;
+ ILCodeStream* m_pcsExceptionCleanup;
+ ILCodeStream* m_pcsCleanup;
+
+
+ ILCodeLabel* m_pCleanupTryBeginLabel;
+ ILCodeLabel* m_pCleanupTryEndLabel;
+ ILCodeLabel* m_pCleanupFinallyBeginLabel;
+ ILCodeLabel* m_pCleanupFinallyEndLabel;
+ ILCodeLabel* m_pSkipExceptionCleanupLabel;
+
+#ifdef FEATURE_COMINTEROP
+ DWORD m_dwTargetInterfacePointerLocalNum;
+ DWORD m_dwTargetEntryPointLocalNum;
+ DWORD m_dwWinRTFactoryObjectLocalNum;
+#endif // FEATURE_COMINTEROP
+
+ BOOL m_fHasCleanupCode;
+ BOOL m_fHasExceptionCleanupCode;
+ BOOL m_fCleanupWorkListIsSetup;
+ DWORD m_dwThreadLocalNum; // managed-to-native only
+ DWORD m_dwArgMarshalIndexLocalNum;
+ DWORD m_dwCleanupWorkListLocalNum;
+ DWORD m_dwRetValLocalNum;
+
+#if defined(_TARGET_X86_) && !defined(FEATURE_CORECLR)
+ DWORD m_dwFirstCopyCtorCookieLocalNum; // list head passed to SetCopyCtorCookieChain
+ DWORD m_dwLastCopyCtorCookieLocalNum; // used for chaining the cookies into a linked list
+#endif // _TARGET_X86_ && !FEATURE_CORECLR
+
+ UINT m_ErrorResID;
+ UINT m_ErrorParamIdx;
+ int m_iLCIDParamIdx;
+
+ DWORD m_dwStubFlags;
+};
+
+#ifndef _TARGET_X86_
+// The one static host for stub used on !_TARGET_X86_
+EXTERN_C void PInvokeStubForHost(void);
+#endif
+
+#ifdef FEATURE_MIXEDMODE // IJW
+// This attempts to guess whether a target is an API call that uses SetLastError to communicate errors.
+BOOL HeuristicDoesThisLooksLikeAnApiCall(LPBYTE pTarget);
+BOOL HeuristicDoesThisLookLikeAGetLastErrorCall(LPBYTE pTarget);
+DWORD __stdcall FalseGetLastError();
+#endif // FEATURE_MIXEDMODE
+
+class NDirectStubParameters
+{
+public:
+
+ NDirectStubParameters(Signature sig,
+ SigTypeContext* pTypeContext,
+ Module* pModule,
+ Module* pLoaderModule,
+ CorNativeLinkType nlType,
+ CorNativeLinkFlags nlFlags,
+ CorPinvokeMap unmgdCallConv,
+ DWORD dwStubFlags, // NDirectStubFlags
+ int nParamTokens,
+ mdParamDef* pParamTokenArray,
+ int iLCIDArg
+ ) :
+ m_sig(sig),
+ m_pTypeContext(pTypeContext),
+ m_pModule(pModule),
+ m_pLoaderModule(pLoaderModule),
+ m_pParamTokenArray(pParamTokenArray),
+ m_unmgdCallConv(unmgdCallConv),
+ m_nlType(nlType),
+ m_nlFlags(nlFlags),
+ m_dwStubFlags(dwStubFlags),
+ m_iLCIDArg(iLCIDArg),
+ m_nParamTokens(nParamTokens)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ Signature m_sig;
+ SigTypeContext* m_pTypeContext;
+ Module* m_pModule;
+ Module* m_pLoaderModule;
+ mdParamDef* m_pParamTokenArray;
+ CorPinvokeMap m_unmgdCallConv;
+ CorNativeLinkType m_nlType;
+ CorNativeLinkFlags m_nlFlags;
+ DWORD m_dwStubFlags;
+ int m_iLCIDArg;
+ int m_nParamTokens;
+};
+
+PCODE GetILStubForCalli(VASigCookie *pVASigCookie, MethodDesc *pMD);
+
+MethodDesc *GetStubMethodDescFromInteropMethodDesc(MethodDesc* pMD, DWORD dwStubFlags);
+PCODE JitILStub(MethodDesc* pStubMD);
+MethodDesc *RestoreNGENedStub(MethodDesc* pStubMD);
+PCODE GetStubForInteropMethod(MethodDesc* pMD, DWORD dwStubFlags = 0, MethodDesc **ppStubMD = NULL);
+
+#ifdef FEATURE_COMINTEROP
+// Resolve and return the predefined IL stub method
+HRESULT FindPredefinedILStubMethod(MethodDesc *pTargetMD, DWORD dwStubFlags, MethodDesc **ppRetStubMD);
+#endif // FEATURE_COMINTEROP
+
+EXTERN_C BOOL CallNeedsHostHook(size_t target);
+
+#ifndef FEATURE_INCLUDE_ALL_INTERFACES
+//
+// Inlinable implementation allows compiler to strip all code related to host hook
+//
+inline BOOL NDirect::IsHostHookEnabled()
+{
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+}
+
+inline BOOL CallNeedsHostHook(size_t target)
+{
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+}
+#endif
+
+//
+// Limit length of string field in IL stub ETW events so that the whole
+// IL stub ETW events won't exceed 64KB
+//
+#define ETW_IL_STUB_EVENT_STRING_FIELD_MAXSIZE (1024)
+#define ETW_IL_STUB_EVENT_CODE_STRING_FIELD_MAXSIZE (1024*32)
+
+class SString;
+
+//
+// Truncates a SString by first converting it to unicode and truncate it
+// if it is larger than size. "..." will be appened if it is truncated.
+//
+void TruncateUnicodeString(SString &string, COUNT_T bufSize);
+
+//=======================================================================
+// ILStubCreatorHelper
+// The class is used as a helper class in CreateInteropILStub. It mainly
+// puts two methods NDirect::GetStubMethodDesc and NDirect::RemoveILStubCacheEntry
+// into a holder. See CreateInteropILStub for more information
+//=======================================================================
+class ILStubCreatorHelper
+{
+public:
+ ILStubCreatorHelper(MethodDesc *pTargetMD,
+ NDirectStubParameters* pParams
+ ) :
+ m_pTargetMD(pTargetMD),
+ m_pParams(pParams),
+ m_pStubMD(NULL),
+ m_bILStubCreator(false)
+ {
+ STANDARD_VM_CONTRACT;
+ m_pHashParams = NDirect::CreateHashBlob(m_pParams);
+ }
+
+ ~ILStubCreatorHelper()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ RemoveILStubCacheEntry();
+ }
+
+ inline void GetStubMethodDesc()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ m_pStubMD = NDirect::GetStubMethodDesc(m_pTargetMD, m_pParams, m_pHashParams, &m_amTracker, m_bILStubCreator, m_pStubMD);
+ }
+
+ inline void RemoveILStubCacheEntry()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (m_bILStubCreator)
+ {
+ NDirect::RemoveILStubCacheEntry(m_pParams, m_pHashParams);
+ m_bILStubCreator = false;
+ }
+ }
+
+ inline MethodDesc* GetStubMD()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pStubMD;
+ }
+
+ inline void SuppressRelease()
+ {
+ WRAPPER_NO_CONTRACT;
+ m_bILStubCreator = false;
+ m_amTracker.SuppressRelease();
+ }
+
+ DEBUG_NOINLINE static void HolderEnter(ILStubCreatorHelper *pThis)
+ {
+ WRAPPER_NO_CONTRACT;
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+ pThis->GetStubMethodDesc();
+ }
+
+ DEBUG_NOINLINE static void HolderLeave(ILStubCreatorHelper *pThis)
+ {
+ WRAPPER_NO_CONTRACT;
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+ pThis->RemoveILStubCacheEntry();
+ }
+
+private:
+ MethodDesc* m_pTargetMD;
+ NDirectStubParameters* m_pParams;
+ NewArrayHolder<ILStubHashBlob> m_pHashParams;
+ AllocMemTracker* m_pAmTracker;
+ MethodDesc* m_pStubMD;
+ AllocMemTracker m_amTracker;
+ bool m_bILStubCreator; // Only the creator can remove the ILStub from the Cache
+}; //ILStubCreatorHelper
+
+typedef Wrapper<ILStubCreatorHelper*, ILStubCreatorHelper::HolderEnter, ILStubCreatorHelper::HolderLeave> ILStubCreatorHelperHolder;
+
+#endif // __dllimport_h__
diff --git a/src/vm/dllimportcallback.cpp b/src/vm/dllimportcallback.cpp
new file mode 100644
index 0000000000..e3570ef129
--- /dev/null
+++ b/src/vm/dllimportcallback.cpp
@@ -0,0 +1,1522 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: DllImportCallback.cpp
+//
+
+//
+
+
+#include "common.h"
+
+#include "threads.h"
+#include "excep.h"
+#include "object.h"
+#include "dllimportcallback.h"
+#include "mlinfo.h"
+#include "comdelegate.h"
+#include "ceeload.h"
+#include "eeconfig.h"
+#include "dbginterface.h"
+#include "stubgen.h"
+#include "mdaassistants.h"
+#include "appdomain.inl"
+
+#ifndef CROSSGEN_COMPILE
+
+struct UM2MThunk_Args
+{
+ UMEntryThunk *pEntryThunk;
+ void *pAddr;
+ void *pThunkArgs;
+ int argLen;
+};
+
+EXTERN_C void STDCALL UM2MThunk_WrapperHelper(void *pThunkArgs,
+ int argLen,
+ void *pAddr,
+ UMEntryThunk *pEntryThunk,
+ Thread *pThread);
+
+EXTERN_C void __fastcall ReverseEnterRuntimeHelper(Thread *pThread)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // ReverseEnterRuntimeThrowComplus probes.
+ //BEGIN_ENTRYPOINT_THROWS;
+
+ _ASSERTE (pThread == GetThread());
+
+#ifdef FEATURE_STACK_PROBE
+ // The thread is calling into managed code. If we have the following sequence on stack
+ // Managed code 1 -> Unmanaged code -> Managed code 2,
+ // and we hit SO in managed code 2, in order to unwind stack for managed code 1, we need
+ // to make sure the thread is in cooperative gc mode. Due to unmanaged code in between,
+ // when we reach managed code 1, the thread is in preemptive GC mode. In order to switch
+ // to cooperative, we need to have enough stack. This means that we need to reclaim stack
+ // for managed code 2. Therefore we require that we have some amount of stack before entering
+ // managed code 2.
+ RetailStackProbe(static_cast<UINT>(ADJUST_PROBE(BACKOUT_CODE_STACK_LIMIT)),pThread);
+#endif
+ pThread->ReverseEnterRuntimeThrowComplus();
+ //END_ENTRYPOINT_THROWS
+}
+
+EXTERN_C void __fastcall ReverseLeaveRuntimeHelper(Thread *pThread)
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE (pThread == GetThread());
+ pThread->ReverseLeaveRuntime();
+}
+
+#ifdef MDA_SUPPORTED
+EXTERN_C void __fastcall CallbackOnCollectedDelegateHelper(UMEntryThunk *pEntryThunk)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pEntryThunk));
+ }
+ CONTRACTL_END;
+
+ MdaCallbackOnCollectedDelegate* pProbe = MDA_GET_ASSISTANT(CallbackOnCollectedDelegate);
+
+ // This MDA must be active if we generated a call to CallbackOnCollectedDelegateHelper
+ _ASSERTE(pProbe);
+
+ if (pEntryThunk->IsCollected())
+ {
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+ pProbe->ReportViolation(pEntryThunk->GetMethod());
+ COMPlusThrow(kNullReferenceException);
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+ }
+}
+#endif // MDA_SUPPORTED
+
+// This is used as target of callback from DoADCallBack. It sets up the environment and effectively
+// calls back into the thunk that needed to switch ADs.
+void UM2MThunk_Wrapper(LPVOID ptr) // UM2MThunk_Args
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+ UM2MThunk_Args *pArgs = (UM2MThunk_Args *) ptr;
+ Thread* pThread = GetThread();
+
+ BEGIN_CALL_TO_MANAGED();
+
+ // return value is saved to pArgs->pThunkArgs
+ UM2MThunk_WrapperHelper(pArgs->pThunkArgs,
+ pArgs->argLen,
+ pArgs->pAddr,
+ pArgs->pEntryThunk,
+ pThread);
+
+ END_CALL_TO_MANAGED();
+}
+
+EXTERN_C void STDCALL UM2MDoADCallBack(UMEntryThunk *pEntryThunk,
+ void *pAddr,
+ void *pArgs,
+ int argLen)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ ENTRY_POINT;
+ PRECONDITION(CheckPointer(pEntryThunk));
+ PRECONDITION(CheckPointer(pArgs));
+ }
+ CONTRACTL_END;
+
+ UM2MThunk_Args args = { pEntryThunk, pAddr, pArgs, argLen };
+
+
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+ {
+ AppDomainFromIDHolder domain(pEntryThunk->GetDomainId(),FALSE);
+ domain.ThrowIfUnloaded();
+ if(!domain->CanReversePInvokeEnter())
+ COMPlusThrow(kNotSupportedException);
+ }
+
+ GetThread()->DoADCallBack(pEntryThunk->GetDomainId(), UM2MThunk_Wrapper, &args);
+
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+}
+
+#ifdef _TARGET_X86_
+
+EXTERN_C VOID __cdecl UMThunkStubRareDisable();
+EXTERN_C Thread* __stdcall CreateThreadBlockThrow();
+
+// argument stack offsets are multiple of sizeof(SLOT) so we can tag them by OR'ing with 1
+static_assert_no_msg((sizeof(SLOT) & 1) == 0);
+#define MAKE_BYVAL_STACK_OFFSET(x) (x)
+#define MAKE_BYREF_STACK_OFFSET(x) ((x) | 1)
+#define IS_BYREF_STACK_OFFSET(x) ((x) & 1)
+#define GET_STACK_OFFSET(x) ((x) & ~1)
+
+// -1 means not used
+#define UNUSED_STACK_OFFSET (UINT)-1
+
+// static
+VOID UMEntryThunk::CompileUMThunkWorker(UMThunkStubInfo *pInfo,
+ CPUSTUBLINKER *pcpusl,
+ UINT *psrcofsregs, // NUM_ARGUMENT_REGISTERS elements
+ UINT *psrcofs, // pInfo->m_cbDstStack/STACK_ELEM_SIZE elements
+ UINT retbufofs) // the large structure return buffer ptr arg offset (if any)
+{
+ STANDARD_VM_CONTRACT;
+
+ CodeLabel* pSetupThreadLabel = pcpusl->NewCodeLabel();
+ CodeLabel* pRejoinThreadLabel = pcpusl->NewCodeLabel();
+ CodeLabel* pDisableGCLabel = pcpusl->NewCodeLabel();
+ CodeLabel* pRejoinGCLabel = pcpusl->NewCodeLabel();
+ CodeLabel* pDoADCallBackLabel = pcpusl->NewCodeLabel();
+ CodeLabel* pDoneADCallBackLabel = pcpusl->NewCodeLabel();
+ CodeLabel* pADCallBackEpilog = pcpusl->NewCodeLabel();
+ CodeLabel* pDoADCallBackStartLabel = pcpusl->NewAbsoluteCodeLabel();
+
+ // We come into this code with UMEntryThunk in EAX
+ const X86Reg kEAXentryThunk = kEAX;
+
+ // For ThisCall, we make it look like a normal stdcall so that
+ // the rest of the code (like repushing the arguments) does not
+ // have to worry about it.
+
+ if (pInfo->m_wFlags & umtmlThisCall)
+ {
+ // pop off the return address into EDX
+ pcpusl->X86EmitPopReg(kEDX);
+
+ if (pInfo->m_wFlags & umtmlThisCallHiddenArg)
+ {
+ // exchange ecx ( "this") with the hidden structure return buffer
+ // xchg ecx, [esp]
+ pcpusl->X86EmitOp(0x87, kECX, (X86Reg)4 /*ESP*/);
+ }
+
+ // jam ecx (the "this" param onto stack. Now it looks like a normal stdcall.)
+ pcpusl->X86EmitPushReg(kECX);
+
+ // push edx - repush the return address
+ pcpusl->X86EmitPushReg(kEDX);
+ }
+
+ // Setup the EBP frame
+ pcpusl->X86EmitPushEBPframe();
+
+ // Save EBX
+ pcpusl->X86EmitPushReg(kEBX);
+
+ // Make space for return value - instead of repeatedly doing push eax edx <trash regs> pop edx eax
+ // we will save the return value once and restore it just before returning.
+ pcpusl->X86EmitSubEsp(sizeof(PCONTEXT(NULL)->Eax) + sizeof(PCONTEXT(NULL)->Edx));
+
+ // Load thread descriptor into ECX
+ const X86Reg kECXthread = kECX;
+
+ // save UMEntryThunk
+ pcpusl->X86EmitPushReg(kEAXentryThunk);
+
+ pcpusl->EmitSetup(pSetupThreadLabel);
+
+ pcpusl->X86EmitMovRegReg(kECX, kEBX);
+
+ pcpusl->EmitLabel(pRejoinThreadLabel);
+
+ // restore UMEntryThunk
+ pcpusl->X86EmitPopReg(kEAXentryThunk);
+
+#ifdef _DEBUG
+ // Save incoming registers
+ pcpusl->X86EmitPushReg(kEAXentryThunk); // UMEntryThunk
+ pcpusl->X86EmitPushReg(kECXthread); // thread descriptor
+
+ pcpusl->X86EmitPushReg(kEAXentryThunk);
+ pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID) LogUMTransition), 4);
+
+ // Restore registers
+ pcpusl->X86EmitPopReg(kECXthread);
+ pcpusl->X86EmitPopReg(kEAXentryThunk);
+#endif
+
+#ifdef PROFILING_SUPPORTED
+ // Notify profiler of transition into runtime, before we disable preemptive GC
+ if (CORProfilerTrackTransitions())
+ {
+ // Load the methoddesc into EBX (UMEntryThunk->m_pMD)
+ pcpusl->X86EmitIndexRegLoad(kEBX, kEAXentryThunk, UMEntryThunk::GetOffsetOfMethodDesc());
+
+ // Save registers
+ pcpusl->X86EmitPushReg(kEAXentryThunk); // UMEntryThunk
+ pcpusl->X86EmitPushReg(kECXthread); // pCurThread
+
+ // Push arguments and notify profiler
+ pcpusl->X86EmitPushImm32(COR_PRF_TRANSITION_CALL); // Reason
+ pcpusl->X86EmitPushReg(kEBX); // MethodDesc*
+ pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID)ProfilerUnmanagedToManagedTransitionMD), 8);
+
+ // Restore registers
+ pcpusl->X86EmitPopReg(kECXthread);
+ pcpusl->X86EmitPopReg(kEAXentryThunk);
+
+ // Push the MethodDesc* (in EBX) for use by the transition on the way out.
+ pcpusl->X86EmitPushReg(kEBX);
+ }
+#endif // PROFILING_SUPPORTED
+
+ pcpusl->EmitDisable(pDisableGCLabel, TRUE, kECXthread);
+
+ pcpusl->EmitLabel(pRejoinGCLabel);
+
+ // construct a FrameHandlerExRecord
+
+ // push [ECX]Thread.m_pFrame - corresponding to FrameHandlerExRecord::m_pEntryFrame
+ pcpusl->X86EmitIndexPush(kECXthread, offsetof(Thread, m_pFrame));
+
+ // push offset FastNExportExceptHandler
+ pcpusl->X86EmitPushImm32((INT32)(size_t)FastNExportExceptHandler);
+
+ // push fs:[0]
+ const static BYTE codeSEH1[] = { 0x64, 0xFF, 0x35, 0x0, 0x0, 0x0, 0x0};
+ pcpusl->EmitBytes(codeSEH1, sizeof(codeSEH1));
+
+ // link in the exception frame
+ // mov dword ptr fs:[0], esp
+ const static BYTE codeSEH2[] = { 0x64, 0x89, 0x25, 0x0, 0x0, 0x0, 0x0};
+ pcpusl->EmitBytes(codeSEH2, sizeof(codeSEH2));
+
+ // EBX will hold address of start of arguments. Calculate here so the AD switch case can access
+ // the arguments at their original location rather than re-copying them to the inner frame.
+ // lea ebx, [ebp + 8]
+ pcpusl->X86EmitIndexLea(kEBX, kEBP, 8);
+
+ // Load pThread->m_pDomain into edx
+ // mov edx,[ecx + offsetof(Thread, m_pAppDomain)]
+ pcpusl->X86EmitIndexRegLoad(kEDX, kECXthread, Thread::GetOffsetOfAppDomain());
+
+ // Load pThread->m_pAppDomain->m_dwId into edx
+ // mov edx,[edx + offsetof(AppDomain, m_dwId)]
+ pcpusl->X86EmitIndexRegLoad(kEDX, kEDX, AppDomain::GetOffsetOfId());
+
+ // check if the app domain of the thread matches that of delegate
+ // cmp edx,[eax + offsetof(UMEntryThunk, m_dwDomainId))]
+ pcpusl->X86EmitOffsetModRM(0x3b, kEDX, kEAXentryThunk, offsetof(UMEntryThunk, m_dwDomainId));
+
+ // jne pWrongAppDomain ; mismatch. This will call back into the stub with the
+ // correct AppDomain through DoADCallBack
+ pcpusl->X86EmitCondJump(pDoADCallBackLabel, X86CondCode::kJNE);
+
+ //
+ // ----------------------------------------------------------------------------------------------
+ //
+ // From this point on (until noted) we might be executing as the result of calling into the
+ // runtime in order to switch AppDomain. In order for the following code to function in both
+ // scenarios it must be careful when making assumptions about the current stack layout (in the AD
+ // switch case a new inner frame has been pushed which is not identical to the original outer
+ // frame).
+ //
+ // Our guaranteed state at this point is as follows:
+ // EAX: Pointer to UMEntryThunk
+ // EBX: Pointer to start of caller's arguments
+ // ECX: Pointer to current Thread
+ // EBP: Equals EBX - 8 (no AD switch) or unspecified (AD switch)
+ //
+ // Stack:
+ //
+ // +-------------------------+
+ // ESP + 0 | |
+ //
+ // | Varies |
+ //
+ // | |
+ // +-------------------------+
+ // EBX - 20 | Saved Result: EDX/ST(0) |
+ // +- - - - - - - - - - - - -+
+ // EBX - 16 | Saved Result: EAX/ST(0) |
+ // +-------------------------+
+ // EBX - 12 | Caller's EBX |
+ // +-------------------------+
+ // EBX - 8 | Caller's EBP |
+ // +-------------------------+
+ // EBX - 4 | Return address |
+ // +-------------------------+
+ // EBX + 0 | |
+ //
+ // | Caller's arguments |
+ //
+ // | |
+ // +-------------------------+
+ //
+
+ // It's important that the "restart" after an AppDomain switch will skip
+ // the check for g_TrapReturningThreads. That's because, during shutdown,
+ // we can only go through the UMThunkStubRareDisable pathway if we have
+ // not yet pushed a frame. (Once pushed, the frame cannot be popped
+ // without coordinating with the GC. During shutdown, such coordination
+ // would deadlock).
+ pcpusl->EmitLabel(pDoADCallBackStartLabel);
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (NDirect::IsHostHookEnabled())
+ {
+ // We call ReverseEnterRuntimeHelper before we link a frame.
+ // So we know that when exception unwinds through our ReverseEnterRuntimeFrame,
+ // we need call ReverseLeaveRuntime.
+
+ // save registers
+ pcpusl->X86EmitPushReg(kEAXentryThunk);
+ pcpusl->X86EmitPushReg(kECXthread);
+
+ // ecx still has Thread
+ // ReverseEnterRuntimeHelper is a fast call
+ pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID)ReverseEnterRuntimeHelper), 0);
+
+ // restore registers
+ pcpusl->X86EmitPopReg(kECXthread);
+ pcpusl->X86EmitPopReg(kEAXentryThunk);
+
+ // push reg; leave room for m_next
+ pcpusl->X86EmitPushReg(kDummyPushReg);
+
+ // push IMM32 ; push Frame vptr
+ pcpusl->X86EmitPushImm32((UINT32)(size_t)ReverseEnterRuntimeFrame::GetMethodFrameVPtr());
+
+ // mov edx, esp ;; set EDX -> new frame
+ pcpusl->X86EmitMovRegSP(kEDX);
+
+ // push IMM32 ; push gsCookie
+ pcpusl->X86EmitPushImmPtr((LPVOID)GetProcessGSCookie());
+
+ // save UMEntryThunk
+ pcpusl->X86EmitPushReg(kEAXentryThunk);
+
+ // mov eax,[ecx + Thread.GetFrame()] ;; get previous frame
+ pcpusl->X86EmitIndexRegLoad(kEAXentryThunk, kECXthread, Thread::GetOffsetOfCurrentFrame());
+
+ // mov [edx + Frame.m_next], eax
+ pcpusl->X86EmitIndexRegStore(kEDX, Frame::GetOffsetOfNextLink(), kEAX);
+
+ // mov [ecx + Thread.GetFrame()], edx
+ pcpusl->X86EmitIndexRegStore(kECXthread, Thread::GetOffsetOfCurrentFrame(), kEDX);
+
+ // restore EAX
+ pcpusl->X86EmitPopReg(kEAXentryThunk);
+ }
+#endif
+
+#ifdef MDA_SUPPORTED
+ if ((pInfo->m_wFlags & umtmlSkipStub) && !(pInfo->m_wFlags & umtmlIsStatic) &&
+ MDA_GET_ASSISTANT(CallbackOnCollectedDelegate))
+ {
+ // save registers
+ pcpusl->X86EmitPushReg(kEAXentryThunk);
+ pcpusl->X86EmitPushReg(kECXthread);
+
+ // CallbackOnCollectedDelegateHelper is a fast call
+ pcpusl->X86EmitMovRegReg(kECX, kEAXentryThunk);
+ pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID)CallbackOnCollectedDelegateHelper), 0);
+
+ // restore registers
+ pcpusl->X86EmitPopReg(kECXthread);
+ pcpusl->X86EmitPopReg(kEAXentryThunk);
+ }
+#endif
+
+ // save the thread pointer
+ pcpusl->X86EmitPushReg(kECXthread);
+
+ // reserve the space for call slot
+ pcpusl->X86EmitSubEsp(4);
+
+ // remember stack size for offset computations
+ INT iStackSizeAtCallSlot = pcpusl->GetStackSize();
+
+ if (!(pInfo->m_wFlags & umtmlSkipStub))
+ {
+ // save EDI (it's used by the IL stub invocation code)
+ pcpusl->X86EmitPushReg(kEDI);
+ }
+
+ // repush any stack arguments
+ int arg = pInfo->m_cbDstStack/STACK_ELEM_SIZE;
+
+ while (arg--)
+ {
+ if (IS_BYREF_STACK_OFFSET(psrcofs[arg]))
+ {
+ // lea ecx, [ebx + ofs]
+ pcpusl->X86EmitIndexLea(kECX, kEBX, GET_STACK_OFFSET(psrcofs[arg]));
+
+ // push ecx
+ pcpusl->X86EmitPushReg(kECX);
+ }
+ else
+ {
+ // push dword ptr [ebx + ofs]
+ pcpusl->X86EmitIndexPush(kEBX, GET_STACK_OFFSET(psrcofs[arg]));
+ }
+ }
+
+ // load register arguments
+ int regidx = 0;
+
+#define ARGUMENT_REGISTER(regname) \
+ if (psrcofsregs[regidx] != UNUSED_STACK_OFFSET) \
+ { \
+ if (IS_BYREF_STACK_OFFSET(psrcofsregs[regidx])) \
+ { \
+ /* lea reg, [ebx + ofs] */ \
+ pcpusl->X86EmitIndexLea(k##regname, kEBX, GET_STACK_OFFSET(psrcofsregs[regidx])); \
+ } \
+ else \
+ { \
+ /* mov reg, [ebx + ofs] */ \
+ pcpusl->X86EmitIndexRegLoad(k##regname, kEBX, GET_STACK_OFFSET(psrcofsregs[regidx])); \
+ } \
+ } \
+ regidx++;
+
+ ENUM_ARGUMENT_REGISTERS_BACKWARD();
+
+#undef ARGUMENT_REGISTER
+
+ if (!(pInfo->m_wFlags & umtmlSkipStub))
+ {
+ //
+ // Call the IL stub which will:
+ // 1) marshal
+ // 2) call the managed method
+ // 3) unmarshal
+ //
+
+ // the delegate object is extracted by the stub from UMEntryThunk
+ _ASSERTE(pInfo->m_wFlags & umtmlIsStatic);
+
+ // mov EDI, [EAX + UMEntryThunk.m_pUMThunkMarshInfo]
+ pcpusl->X86EmitIndexRegLoad(kEDI, kEAXentryThunk, offsetof(UMEntryThunk, m_pUMThunkMarshInfo));
+
+ // mov EDI, [EDI + UMThunkMarshInfo.m_pILStub]
+ pcpusl->X86EmitIndexRegLoad(kEDI, kEDI, UMThunkMarshInfo::GetOffsetOfStub());
+
+ // EAX still contains the UMEntryThunk pointer, so we cannot really use SCRATCHREG
+ // we can use EDI, though
+
+ INT iCallSlotOffset = pcpusl->GetStackSize() - iStackSizeAtCallSlot;
+
+ // mov [ESP+iCallSlotOffset], EDI
+ pcpusl->X86EmitIndexRegStore((X86Reg)kESP_Unsafe, iCallSlotOffset, kEDI);
+
+ // call [ESP+iCallSlotOffset]
+ pcpusl->X86EmitOp(0xff, (X86Reg)2, (X86Reg)kESP_Unsafe, iCallSlotOffset);
+
+ // Emit a NOP so we know that we can call managed code
+ INDEBUG(pcpusl->Emit8(X86_INSTR_NOP));
+
+ // restore EDI
+ pcpusl->X86EmitPopReg(kEDI);
+ }
+ else if (!(pInfo->m_wFlags & umtmlIsStatic))
+ {
+ //
+ // This is call on delegate
+ //
+
+ // mov THIS, [EAX + UMEntryThunk.m_pObjectHandle]
+ pcpusl->X86EmitOp(0x8b, THIS_kREG, kEAXentryThunk, offsetof(UMEntryThunk, m_pObjectHandle));
+
+ // mov THIS, [THIS]
+ pcpusl->X86EmitOp(0x8b, THIS_kREG, THIS_kREG);
+
+ //
+ // Inline Delegate.Invoke for perf
+ //
+
+ // mov SCRATCHREG, [THISREG + Delegate.FP] ; Save target stub in register
+ pcpusl->X86EmitIndexRegLoad(SCRATCH_REGISTER_X86REG, THIS_kREG, DelegateObject::GetOffsetOfMethodPtr());
+
+ // mov THISREG, [THISREG + Delegate.OR] ; replace "this" pointer
+ pcpusl->X86EmitIndexRegLoad(THIS_kREG, THIS_kREG, DelegateObject::GetOffsetOfTarget());
+
+ INT iCallSlotOffset = pcpusl->GetStackSize() - iStackSizeAtCallSlot;
+
+ // mov [ESP+iCallSlotOffset], SCRATCHREG
+ pcpusl->X86EmitIndexRegStore((X86Reg)kESP_Unsafe,iCallSlotOffset,SCRATCH_REGISTER_X86REG);
+
+ // call [ESP+iCallSlotOffset]
+ pcpusl->X86EmitOp(0xff, (X86Reg)2, (X86Reg)kESP_Unsafe, iCallSlotOffset);
+
+ INDEBUG(pcpusl->Emit8(X86_INSTR_NOP)); // Emit a NOP so we know that we can call managed code
+ }
+ else
+ {
+ //
+ // Call the managed method
+ //
+
+ INT iCallSlotOffset = pcpusl->GetStackSize() - iStackSizeAtCallSlot;
+
+ // mov SCRATCH, [SCRATCH + offsetof(UMEntryThunk.m_pManagedTarget)]
+ pcpusl->X86EmitIndexRegLoad(SCRATCH_REGISTER_X86REG, SCRATCH_REGISTER_X86REG, offsetof(UMEntryThunk, m_pManagedTarget));
+
+ // mov [ESP+iCallSlotOffset], SCRATCHREG
+ pcpusl->X86EmitIndexRegStore((X86Reg)kESP_Unsafe, iCallSlotOffset, SCRATCH_REGISTER_X86REG);
+
+ // call [ESP+iCallSlotOffset]
+ pcpusl->X86EmitOp(0xff, (X86Reg)2, (X86Reg)kESP_Unsafe, iCallSlotOffset);
+
+ INDEBUG(pcpusl->Emit8(X86_INSTR_NOP)); // Emit a NOP so we know that we can call managed code
+ }
+
+ // skip the call slot
+ pcpusl->X86EmitAddEsp(4);
+
+ // Save the return value to the outer frame
+ if (pInfo->m_wFlags & umtmlFpu)
+ {
+ // save FP return value
+
+ // fstp qword ptr [ebx - 0x8 - 0xc]
+ pcpusl->X86EmitOffsetModRM(0xdd, (X86Reg)3, kEBX, -0x8 /* to outer EBP */ -0xc /* skip saved EBP, EBX */);
+ }
+ else
+ {
+ // save EDX:EAX
+ if (retbufofs == UNUSED_STACK_OFFSET)
+ {
+ pcpusl->X86EmitIndexRegStore(kEBX, -0x8 /* to outer EBP */ -0x8 /* skip saved EBP, EBX */, kEAX);
+ pcpusl->X86EmitIndexRegStore(kEBX, -0x8 /* to outer EBP */ -0xc /* skip saved EBP, EBX, EAX */, kEDX);
+ }
+ else
+ {
+ // pretend that the method returned the ret buf hidden argument
+ // (the structure ptr); C++ compiler seems to rely on this
+
+ // mov dword ptr eax, [ebx + retbufofs]
+ pcpusl->X86EmitIndexRegLoad(kEAX, kEBX, retbufofs);
+
+ // save it as the return value
+ pcpusl->X86EmitIndexRegStore(kEBX, -0x8 /* to outer EBP */ -0x8 /* skip saved EBP, EBX */, kEAX);
+ }
+ }
+
+ // restore the thread pointer
+ pcpusl->X86EmitPopReg(kECXthread);
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (NDirect::IsHostHookEnabled())
+ {
+#ifdef _DEBUG
+ // lea edx, [esp + sizeof(GSCookie)] ; edx <- current Frame
+ pcpusl->X86EmitEspOffset(0x8d, kEDX, sizeof(GSCookie));
+ pcpusl->EmitCheckGSCookie(kEDX, ReverseEnterRuntimeFrame::GetOffsetOfGSCookie());
+#endif
+
+ // Remove our frame
+ // Get the previous frame into EDX
+ // mov edx, [esp + GSCookie + Frame.m_next]
+ static const BYTE initArg1[] = { 0x8b, 0x54, 0x24, 0x08 }; // mov edx, [esp+8]
+ _ASSERTE(ReverseEnterRuntimeFrame::GetNegSpaceSize() + Frame::GetOffsetOfNextLink() == 0x8);
+ pcpusl->EmitBytes(initArg1, sizeof(initArg1));
+
+ // mov [ecx + Thread.GetFrame()], edx
+ pcpusl->X86EmitIndexRegStore(kECXthread, Thread::GetOffsetOfCurrentFrame(), kEDX);
+
+ // pop off stack
+ // add esp, 8
+ pcpusl->X86EmitAddEsp(sizeof(GSCookie) + sizeof(ReverseEnterRuntimeFrame));
+
+ // Save pThread
+ pcpusl->X86EmitPushReg(kECXthread);
+
+ // ReverseEnterRuntimeHelper is a fast call
+ pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID)ReverseLeaveRuntimeHelper), 0);
+
+ // Restore pThread
+ pcpusl->X86EmitPopReg(kECXthread);
+ }
+#endif
+
+ // Check whether we got here via the switch AD case. We can tell this by looking at whether the
+ // caller's arguments immediately precede our EBP frame (they will for the non-switch case but
+ // otherwise we will have pushed several frames in the interim). If we did switch now is the time
+ // to jump to our inner epilog which will clean up the inner stack frame and return to the runtime
+ // AD switching code.
+
+ // Does EBX (argument pointer) == EBP + 8?
+ // sub ebx, 8
+ pcpusl->X86EmitSubReg(kEBX, 8);
+
+ // cmp ebx, ebp
+ pcpusl->X86EmitR2ROp(0x3B, kEBX, kEBP);
+
+ // jne pADCallBackEpilog
+ pcpusl->X86EmitCondJump(pADCallBackEpilog, X86CondCode::kJNE);
+
+ //
+ // Once we reach this point in the code we're back to a single scenario: the outer frame of the
+ // reverse p/invoke. Either we never had to switch AppDomains or the AD switch code has already
+ // unwound and returned here to pop off the outer frame.
+ //
+ // ----------------------------------------------------------------------------------------------
+ //
+
+ pcpusl->EmitLabel(pDoneADCallBackLabel);
+
+ // move byte ptr [ecx + Thread.m_fPreemptiveGCDisabled],0
+ pcpusl->X86EmitOffsetModRM(0xc6, (X86Reg)0, kECXthread, Thread::GetOffsetOfGCFlag());
+ pcpusl->Emit8(0);
+
+ CodeLabel *pRareEnable, *pEnableRejoin;
+ pRareEnable = pcpusl->NewCodeLabel();
+ pEnableRejoin = pcpusl->NewCodeLabel();
+
+ // test byte ptr [ecx + Thread.m_State], TS_CatchAtSafePoint
+ pcpusl->X86EmitOffsetModRM(0xf6, (X86Reg)0, kECXthread, Thread::GetOffsetOfState());
+ pcpusl->Emit8(Thread::TS_CatchAtSafePoint);
+
+ pcpusl->X86EmitCondJump(pRareEnable,X86CondCode::kJNZ);
+
+ pcpusl->EmitLabel(pEnableRejoin);
+
+ // *** unhook SEH frame
+
+ // mov edx,[esp] ;;pointer to the next exception record
+ pcpusl->X86EmitEspOffset(0x8B, kEDX, 0);
+
+ // mov dword ptr fs:[0], edx
+ static const BYTE codeSEH[] = { 0x64, 0x89, 0x15, 0x0, 0x0, 0x0, 0x0 };
+ pcpusl->EmitBytes(codeSEH, sizeof(codeSEH));
+
+ // deallocate SEH frame
+ pcpusl->X86EmitAddEsp(sizeof(FrameHandlerExRecord));
+
+#ifdef PROFILING_SUPPORTED
+ if (CORProfilerTrackTransitions())
+ {
+ // Load the MethodDesc* we pushed on the entry transition into EBX.
+ pcpusl->X86EmitPopReg(kEBX);
+
+ // Save registers
+ pcpusl->X86EmitPushReg(kECX);
+
+ // Push arguments and notify profiler
+ pcpusl->X86EmitPushImm32(COR_PRF_TRANSITION_RETURN); // Reason
+ pcpusl->X86EmitPushReg(kEBX); // MethodDesc*
+ pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID)ProfilerManagedToUnmanagedTransitionMD), 8);
+
+ // Restore registers
+ pcpusl->X86EmitPopReg(kECX);
+ }
+#endif // PROFILING_SUPPORTED
+
+ // Load the saved return value
+ if (pInfo->m_wFlags & umtmlFpu)
+ {
+ // fld qword ptr [esp]
+ pcpusl->Emit8(0xdd);
+ pcpusl->Emit16(0x2404);
+
+ pcpusl->X86EmitAddEsp(8);
+ }
+ else
+ {
+ pcpusl->X86EmitPopReg(kEDX);
+ pcpusl->X86EmitPopReg(kEAX);
+ }
+
+ // Restore EBX, which was saved in prolog
+ pcpusl->X86EmitPopReg(kEBX);
+
+ pcpusl->X86EmitPopReg(kEBP);
+
+ //retn n
+ pcpusl->X86EmitReturn(pInfo->m_cbRetPop);
+
+ //-------------------------------------------------------------
+ // coming here if the thread is not set up yet
+ //
+
+ pcpusl->EmitLabel(pSetupThreadLabel);
+
+ // call CreateThreadBlock
+ pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID) CreateThreadBlockThrow), 0);
+
+ // mov ecx,eax
+ pcpusl->Emit16(0xc189);
+
+ // jump back into the main code path
+ pcpusl->X86EmitNearJump(pRejoinThreadLabel);
+
+ //-------------------------------------------------------------
+ // coming here if g_TrapReturningThreads was true
+ //
+
+ pcpusl->EmitLabel(pDisableGCLabel);
+
+ // call UMThunkStubRareDisable. This may throw if we are not allowed
+ // to enter. Note that we have not set up our SEH yet (deliberately).
+ // This is important to handle the case where we cannot enter the CLR
+ // during shutdown and cannot coordinate with the GC because of
+ // deadlocks.
+ pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID) UMThunkStubRareDisable), 0);
+
+ // jump back into the main code path
+ pcpusl->X86EmitNearJump(pRejoinGCLabel);
+
+ //-------------------------------------------------------------
+ // coming here if appdomain didn't match
+ //
+
+ pcpusl->EmitLabel(pDoADCallBackLabel);
+
+ // we will call DoADCallBack which calls into managed code to switch ADs and then calls us
+ // back. So when come in the second time the ADs will match and just keep processing.
+ // So we need to setup the parms to pass to DoADCallBack one of which is an address inside
+ // the stub that will branch back to the top of the stub to start again. Need to setup
+ // the parms etc so that when we return from the 2nd call we pop things properly.
+
+ // save thread pointer
+ pcpusl->X86EmitPushReg(kECXthread);
+
+ // push values for UM2MThunk_Args
+
+ // Move address of args (EBX) into EDX since some paths below use EBX.
+ pcpusl->X86EmitMovRegReg(kEDX, kEBX);
+
+ // size of args
+ pcpusl->X86EmitPushImm32(pInfo->m_cbSrcStack);
+
+ // address of args
+ pcpusl->X86EmitPushReg(kEDX);
+
+ // addr to call
+ pcpusl->X86EmitPushImm32(*pDoADCallBackStartLabel);
+
+ // UMEntryThunk
+ pcpusl->X86EmitPushReg(kEAXentryThunk);
+
+ // call UM2MDoADCallBack
+ pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID) UM2MDoADCallBack), 8);
+
+ // We need to clear the thread off the top of the stack and place it in ECX. Two birds with one stone.
+ pcpusl->X86EmitPopReg(kECX);
+
+ // Re-join the original stub to perform the last parts of the epilog.
+ pcpusl->X86EmitNearJump(pDoneADCallBackLabel);
+
+ //-------------------------------------------------------------
+ // Coming here for rare case when enabling GC pre-emptive mode
+ //
+
+ pcpusl->EmitLabel(pRareEnable);
+
+ // Thread object is expected to be in EBX. So first save caller's EBX
+ pcpusl->X86EmitPushReg(kEBX);
+ // mov ebx, ecx
+ pcpusl->X86EmitMovRegReg(kEBX, kECXthread);
+
+ pcpusl->EmitRareEnable(NULL);
+
+ // restore ebx
+ pcpusl->X86EmitPopReg(kEBX);
+
+ // return to mainline of function
+ pcpusl->X86EmitNearJump(pEnableRejoin);
+
+ //-------------------------------------------------------------
+ // Coming here when we switched AppDomain and have successfully called the target. We must return
+ // into the runtime code (which will eventually unwind the AD transition and return us to the
+ // mainline stub in order to run the outer epilog).
+ //
+
+ pcpusl->EmitLabel(pADCallBackEpilog);
+ pcpusl->X86EmitReturn(0);
+}
+
+// Compiles an unmanaged to managed thunk for the given signature.
+Stub *UMThunkMarshInfo::CompileNExportThunk(LoaderHeap *pLoaderHeap, PInvokeStaticSigInfo* pSigInfo, MetaSig *pMetaSig, BOOL fNoStub)
+{
+ STANDARD_VM_CONTRACT;
+
+ // stub is always static
+ BOOL fIsStatic = (fNoStub ? pSigInfo->IsStatic() : TRUE);
+
+ ArgIterator argit(pMetaSig);
+
+ UINT nStackBytes = argit.SizeOfArgStack();
+ _ASSERTE((nStackBytes % STACK_ELEM_SIZE) == 0);
+
+ // size of stack passed to us from unmanaged, may be bigger that nStackBytes if there are
+ // parameters with copy constructors where we perform value-to-reference transformation
+ UINT nStackBytesIncoming = nStackBytes;
+
+ UINT *psrcofs = (UINT *)_alloca((nStackBytes / STACK_ELEM_SIZE) * sizeof(UINT));
+ UINT psrcofsregs[NUM_ARGUMENT_REGISTERS];
+ UINT retbufofs = UNUSED_STACK_OFFSET;
+
+ for (int i = 0; i < NUM_ARGUMENT_REGISTERS; i++)
+ psrcofsregs[i] = UNUSED_STACK_OFFSET;
+
+ UINT nNumArgs = pMetaSig->NumFixedArgs();
+
+ UINT nOffset = 0;
+ int numRegistersUsed = 0;
+ int numStackSlotsIndex = nStackBytes / STACK_ELEM_SIZE;
+
+ // process this
+ if (!fIsStatic)
+ {
+ // just reserve ECX, instance target is special-cased in the thunk compiler
+ numRegistersUsed++;
+ }
+
+ // process the return buffer parameter
+ if (argit.HasRetBuffArg())
+ {
+ numRegistersUsed++;
+ _ASSERTE(numRegistersUsed - 1 < NUM_ARGUMENT_REGISTERS);
+ psrcofsregs[NUM_ARGUMENT_REGISTERS - numRegistersUsed] = nOffset;
+ retbufofs = nOffset;
+
+ nOffset += StackElemSize(sizeof(LPVOID));
+ }
+
+ // process ordinary parameters
+ for (DWORD i = nNumArgs; i > 0; i--)
+ {
+ TypeHandle thValueType;
+ CorElementType type = pMetaSig->NextArgNormalized(&thValueType);
+
+ UINT cbSize = MetaSig::GetElemSize(type, thValueType);
+
+ BOOL fPassPointer = FALSE;
+ if (!fNoStub && type == ELEMENT_TYPE_PTR)
+ {
+ // this is a copy-constructed argument - get its size
+ TypeHandle thPtr = pMetaSig->GetLastTypeHandleThrowing();
+
+ _ASSERTE(thPtr.IsPointer());
+ cbSize = thPtr.AsTypeDesc()->GetTypeParam().GetSize();
+
+ // the incoming stack may be bigger that the outgoing (IL stub) stack
+ nStackBytesIncoming += (StackElemSize(cbSize) - StackElemSize(sizeof(LPVOID)));
+ fPassPointer = TRUE;
+ }
+
+ if (ArgIterator::IsArgumentInRegister(&numRegistersUsed, type))
+ {
+ _ASSERTE(numRegistersUsed - 1 < NUM_ARGUMENT_REGISTERS);
+ psrcofsregs[NUM_ARGUMENT_REGISTERS - numRegistersUsed] =
+ (fPassPointer ?
+ MAKE_BYREF_STACK_OFFSET(nOffset) : // the register will get pointer to the incoming stack slot
+ MAKE_BYVAL_STACK_OFFSET(nOffset)); // the register will get the incoming stack slot
+ }
+ else if (fPassPointer)
+ {
+ // the stack slot will get pointer to the incoming stack slot
+ psrcofs[--numStackSlotsIndex] = MAKE_BYREF_STACK_OFFSET(nOffset);
+ }
+ else
+ {
+ // stack slots will get incoming stack slots (we may need more stack slots for larger parameters)
+ for (UINT nSlotOfs = StackElemSize(cbSize); nSlotOfs > 0; nSlotOfs -= STACK_ELEM_SIZE)
+ {
+ // note the reverse order here which is necessary to maintain
+ // the original layout of the structure (it'll be reversed once
+ // more when repushing)
+ psrcofs[--numStackSlotsIndex] = MAKE_BYVAL_STACK_OFFSET(nOffset + nSlotOfs - STACK_ELEM_SIZE);
+ }
+ }
+
+ nOffset += StackElemSize(cbSize);
+ }
+ _ASSERTE(numStackSlotsIndex == 0);
+
+ UINT cbActualArgSize = nStackBytesIncoming + (numRegistersUsed * STACK_ELEM_SIZE);
+
+ if (!fIsStatic)
+ {
+ // do not count THIS
+ cbActualArgSize -= StackElemSize(sizeof(LPVOID));
+ }
+
+ m_cbActualArgSize = cbActualArgSize;
+
+ m_callConv = static_cast<UINT16>(pSigInfo->GetCallConv());
+
+ UMThunkStubInfo stubInfo;
+ memset(&stubInfo, 0, sizeof(stubInfo));
+
+ if (!FitsInU2(m_cbActualArgSize))
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_SIGTOOCOMPLEX);
+
+ stubInfo.m_cbSrcStack = static_cast<UINT16>(m_cbActualArgSize);
+ stubInfo.m_cbDstStack = nStackBytes;
+
+ if (pSigInfo->GetCallConv() == pmCallConvCdecl)
+ {
+ // caller pop
+ m_cbRetPop = 0;
+ }
+ else
+ {
+ // callee pop
+ m_cbRetPop = static_cast<UINT16>(m_cbActualArgSize);
+
+ if (pSigInfo->GetCallConv() == pmCallConvThiscall)
+ {
+ stubInfo.m_wFlags |= umtmlThisCall;
+ if (argit.HasRetBuffArg())
+ {
+ stubInfo.m_wFlags |= umtmlThisCallHiddenArg;
+ }
+ }
+ }
+ stubInfo.m_cbRetPop = m_cbRetPop;
+
+ if (fIsStatic) stubInfo.m_wFlags |= umtmlIsStatic;
+ if (fNoStub) stubInfo.m_wFlags |= umtmlSkipStub;
+
+ if (pMetaSig->HasFPReturn()) stubInfo.m_wFlags |= umtmlFpu;
+
+ CPUSTUBLINKER cpusl;
+ CPUSTUBLINKER *pcpusl = &cpusl;
+
+ // call the worker to emit the actual thunk
+ UMEntryThunk::CompileUMThunkWorker(&stubInfo, pcpusl, psrcofsregs, psrcofs, retbufofs);
+
+ return pcpusl->Link(pLoaderHeap);
+}
+
+#else // _TARGET_X86_
+
+PCODE UMThunkMarshInfo::GetExecStubEntryPoint()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return GetEEFuncEntryPoint(UMThunkStub);
+}
+
+#endif // _TARGET_X86_
+
+UMEntryThunkCache::UMEntryThunkCache(AppDomain *pDomain) :
+ m_crst(CrstUMEntryThunkCache),
+ m_pDomain(pDomain)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(pDomain != NULL);
+}
+
+UMEntryThunkCache::~UMEntryThunkCache()
+{
+ WRAPPER_NO_CONTRACT;
+
+ for (SHash<ThunkSHashTraits>::Iterator i = m_hash.Begin(); i != m_hash.End(); i++)
+ {
+ // UMEntryThunks in this cache own UMThunkMarshInfo in 1-1 fashion
+ DestroyMarshInfo(i->m_pThunk->GetUMThunkMarshInfo());
+ UMEntryThunk::FreeUMEntryThunk(i->m_pThunk);
+ }
+}
+
+UMEntryThunk *UMEntryThunkCache::GetUMEntryThunk(MethodDesc *pMD)
+{
+ CONTRACT (UMEntryThunk *)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMD));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ UMEntryThunk *pThunk;
+
+ CrstHolder ch(&m_crst);
+
+ const CacheElement *pElement = m_hash.LookupPtr(pMD);
+ if (pElement != NULL)
+ {
+ pThunk = pElement->m_pThunk;
+ }
+ else
+ {
+ // cache miss -> create a new thunk
+ pThunk = UMEntryThunk::CreateUMEntryThunk();
+ Holder<UMEntryThunk *, DoNothing, UMEntryThunk::FreeUMEntryThunk> umHolder;
+ umHolder.Assign(pThunk);
+
+ UMThunkMarshInfo *pMarshInfo = (UMThunkMarshInfo *)(void *)(m_pDomain->GetStubHeap()->AllocMem(S_SIZE_T(sizeof(UMThunkMarshInfo))));
+ Holder<UMThunkMarshInfo *, DoNothing, UMEntryThunkCache::DestroyMarshInfo> miHolder;
+ miHolder.Assign(pMarshInfo);
+
+ pMarshInfo->LoadTimeInit(pMD);
+ pThunk->LoadTimeInit(NULL, NULL, pMarshInfo, pMD, m_pDomain->GetId());
+
+ // add it to the cache
+ CacheElement element;
+ element.m_pMD = pMD;
+ element.m_pThunk = pThunk;
+ m_hash.Add(element);
+
+ miHolder.SuppressRelease();
+ umHolder.SuppressRelease();
+ }
+
+ RETURN pThunk;
+}
+
+// Disable from a place that is calling into managed code via a UMEntryThunk.
+extern "C" VOID STDCALL UMThunkStubRareDisableWorker(Thread *pThread, UMEntryThunk *pUMEntryThunk)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ // Do not add a CONTRACT here. We haven't set up SEH. We rely
+ // on HandleThreadAbort and COMPlusThrowBoot dealing with this situation properly.
+
+ // WARNING!!!!
+ // when we start executing here, we are actually in cooperative mode. But we
+ // haven't synchronized with the barrier to reentry yet. So we are in a highly
+ // dangerous mode. If we call managed code, we will potentially be active in
+ // the GC heap, even as GC's are occuring!
+
+ // Check for ShutDown scenario. This happens only when we have initiated shutdown
+ // and someone is trying to call in after the CLR is suspended. In that case, we
+ // must either raise an unmanaged exception or return an HRESULT, depending on the
+ // expectations of our caller.
+ if (!CanRunManagedCode())
+ {
+ // DO NOT IMPROVE THIS EXCEPTION! It cannot be a managed exception. It
+ // cannot be a real exception object because we cannot execute any managed
+ // code here.
+ pThread->m_fPreemptiveGCDisabled = 0;
+ COMPlusThrowBoot(E_PROCESS_SHUTDOWN_REENTRY);
+ }
+
+ // We must do the following in this order, because otherwise we would be constructing
+ // the exception for the abort without synchronizing with the GC. Also, we have no
+ // CLR SEH set up, despite the fact that we may throw a ThreadAbortException.
+ pThread->RareDisablePreemptiveGC();
+ pThread->HandleThreadAbort();
+
+#ifdef DEBUGGING_SUPPORTED
+ // If the debugger is attached, we use this opportunity to see if
+ // we're disabling preemptive GC on the way into the runtime from
+ // unmanaged code. We end up here because
+ // Increment/DecrementTraceCallCount() will bump
+ // g_TrapReturningThreads for us.
+ if (CORDebuggerTraceCall())
+ g_pDebugInterface->TraceCall((const BYTE *)pUMEntryThunk->GetManagedTarget());
+#endif // DEBUGGING_SUPPORTED
+}
+
+PCODE TheUMEntryPrestubWorker(UMEntryThunk * pUMEntryThunk)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_PREEMPTIVE;
+
+ if (!CanRunManagedCode())
+ COMPlusThrowBoot(E_PROCESS_SHUTDOWN_REENTRY);
+
+ Thread * pThread = GetThreadNULLOk();
+ if (pThread == NULL)
+ pThread = CreateThreadBlockThrow();
+
+ GCX_COOP_THREAD_EXISTS(pThread);
+
+ if (pThread->IsAbortRequested())
+ pThread->HandleThreadAbort();
+
+ UMEntryThunk::DoRunTimeInit(pUMEntryThunk);
+
+ return (PCODE)pUMEntryThunk->GetCode();
+}
+
+void RunTimeInit_Wrapper(LPVOID /* UMThunkMarshInfo * */ ptr)
+{
+ WRAPPER_NO_CONTRACT;
+
+ UMEntryThunk::DoRunTimeInit((UMEntryThunk*)ptr);
+}
+
+
+// asm entrypoint
+void STDCALL UMEntryThunk::DoRunTimeInit(UMEntryThunk* pUMEntryThunk)
+{
+
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ ENTRY_POINT;
+ PRECONDITION(CheckPointer(pUMEntryThunk));
+ }
+ CONTRACTL_END;
+
+ // this method is called by stubs which are called by managed code,
+ // so we need an unwind and continue handler so that our internal
+ // exceptions don't leak out into managed code.
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+ // The thread object is guaranteed to have been set up at this point.
+ Thread *pThread = GetThread();
+
+ if (pThread->GetDomain()->GetId() != pUMEntryThunk->GetDomainId())
+ {
+ // call ourselves again through DoCallBack with a domain transition
+ pThread->DoADCallBack(pUMEntryThunk->GetDomainId(), RunTimeInit_Wrapper, pUMEntryThunk);
+ }
+ else
+ {
+ GCX_PREEMP();
+ pUMEntryThunk->RunTimeInit();
+ }
+
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+}
+
+UMEntryThunk* UMEntryThunk::CreateUMEntryThunk()
+{
+ CONTRACT (UMEntryThunk*)
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ UMEntryThunk * p;
+
+#ifdef FEATURE_WINDOWSPHONE
+ // On the phone, use loader heap to save memory commit of regular executable heap
+ p = (UMEntryThunk *)(void *)SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap()->AllocMem(S_SIZE_T(sizeof(UMEntryThunk)));
+#else
+ p = new (executable) UMEntryThunk;
+ memset (p, 0, sizeof(*p));
+#endif
+
+ RETURN p;
+}
+
+void UMEntryThunk::Terminate()
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef FEATURE_WINDOWSPHONE
+ SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap()->BackoutMem(this, sizeof(UMEntryThunk));
+#else
+ DeleteExecutable(this);
+#endif
+}
+
+VOID UMEntryThunk::FreeUMEntryThunk(UMEntryThunk* p)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(p));
+ }
+ CONTRACTL_END;
+
+#ifdef MDA_SUPPORTED
+ MdaCallbackOnCollectedDelegate* pProbe = MDA_GET_ASSISTANT(CallbackOnCollectedDelegate);
+ if (pProbe)
+ {
+ if (p->GetObjectHandle())
+ {
+ DestroyLongWeakHandle(p->GetObjectHandle());
+ p->m_pObjectHandle = NULL;
+
+ // We are intentionally not reseting m_pManagedTarget here so that
+ // it is available for diagnostics of call on collected delegate crashes.
+ }
+ else
+ {
+ p->m_pManagedTarget = NULL;
+ }
+
+ // Add this to the array of delegates to be cleaned up.
+ pProbe->AddToList(p);
+
+ return;
+ }
+#endif
+
+ p->Terminate();
+}
+
+#endif // CROSSGEN_COMPILE
+
+UMThunkMarshInfo::~UMThunkMarshInfo()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef _TARGET_X86_
+ if (m_pExecStub)
+ m_pExecStub->DecRef();
+#endif
+
+#ifdef _DEBUG
+ FillMemory(this, sizeof(*this), 0xcc);
+#endif
+}
+
+MethodDesc* UMThunkMarshInfo::GetILStubMethodDesc(MethodDesc* pInvokeMD, PInvokeStaticSigInfo* pSigInfo, DWORD dwStubFlags)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc* pStubMD = NULL;
+ dwStubFlags |= NDIRECTSTUB_FL_REVERSE_INTEROP; // could be either delegate interop or not--that info is passed in from the caller
+
+#if defined(DEBUGGING_SUPPORTED)
+ if (GetDebuggerCompileFlags(pSigInfo->GetModule(), 0) & CORJIT_FLG_DEBUG_CODE)
+ {
+ dwStubFlags |= NDIRECTSTUB_FL_GENERATEDEBUGGABLEIL;
+ }
+#endif // DEBUGGING_SUPPORTED
+
+ pStubMD = NDirect::CreateCLRToNativeILStub(
+ pSigInfo,
+ dwStubFlags,
+ pInvokeMD // may be NULL
+ );
+
+ return pStubMD;
+}
+
+//----------------------------------------------------------
+// This initializer is called during load time.
+// It does not do any stub initialization or sigparsing.
+// The RunTimeInit() must be called subsequently to fully
+// UMThunkMarshInfo.
+//----------------------------------------------------------
+VOID UMThunkMarshInfo::LoadTimeInit(MethodDesc* pMD)
+{
+ LIMITED_METHOD_CONTRACT;
+ PRECONDITION(pMD != NULL);
+
+ LoadTimeInit(pMD->GetSignature(), pMD->GetModule(), pMD);
+}
+
+VOID UMThunkMarshInfo::LoadTimeInit(Signature sig, Module * pModule, MethodDesc * pMD)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ FillMemory(this, sizeof(UMThunkMarshInfo), 0); // Prevent problems with partial deletes
+
+ // This will be overwritten by the actual code pointer (or NULL) at the end of UMThunkMarshInfo::RunTimeInit()
+ m_pILStub = (PCODE)1;
+
+ m_pMD = pMD;
+ m_pModule = pModule;
+ m_sig = sig;
+
+#ifdef _TARGET_X86_
+ INDEBUG(m_cbRetPop = 0xcccc;)
+#endif
+}
+
+#ifndef CROSSGEN_COMPILE
+//----------------------------------------------------------
+// This initializer finishes the init started by LoadTimeInit.
+// It does stub creation and can throw a exception.
+//
+// It can safely be called multiple times and by concurrent
+// threads.
+//----------------------------------------------------------
+VOID UMThunkMarshInfo::RunTimeInit()
+{
+ STANDARD_VM_CONTRACT;
+
+ // Nothing to do if already inited
+ if (IsCompletelyInited())
+ return;
+
+ PCODE pFinalILStub = NULL;
+ MethodDesc* pStubMD = NULL;
+
+ MethodDesc * pMD = GetMethod();
+
+ // Lookup NGened stub - currently we only support ngening of reverse delegate invoke interop stubs
+ if (pMD != NULL && pMD->IsEEImpl())
+ {
+ DWORD dwStubFlags = NDIRECTSTUB_FL_NGENEDSTUB | NDIRECTSTUB_FL_REVERSE_INTEROP | NDIRECTSTUB_FL_DELEGATE;
+
+#if defined(DEBUGGING_SUPPORTED)
+ if (GetDebuggerCompileFlags(GetModule(), 0) & CORJIT_FLG_DEBUG_CODE)
+ {
+ dwStubFlags |= NDIRECTSTUB_FL_GENERATEDEBUGGABLEIL;
+ }
+#endif // DEBUGGING_SUPPORTED
+
+ pFinalILStub = GetStubForInteropMethod(pMD, dwStubFlags, &pStubMD);
+ }
+
+#ifdef _TARGET_X86_
+ PInvokeStaticSigInfo sigInfo;
+
+ if (pMD != NULL)
+ new (&sigInfo) PInvokeStaticSigInfo(pMD);
+ else
+ new (&sigInfo) PInvokeStaticSigInfo(GetSignature(), GetModule());
+
+ Stub *pFinalExecStub = NULL;
+
+ // we will always emit the argument-shuffling thunk, m_cbActualArgSize is set inside
+ LoaderHeap *pHeap = (pMD == NULL ? NULL : pMD->GetLoaderAllocator()->GetStubHeap());
+
+ if (pFinalILStub != NULL ||
+#ifdef MDA_SUPPORTED
+ // GC.Collect calls are emitted to IL stubs
+ MDA_GET_ASSISTANT(GcManagedToUnmanaged) || MDA_GET_ASSISTANT(GcUnmanagedToManaged) ||
+#endif // MDA_SUPPORTED
+ NDirect::MarshalingRequired(pMD, GetSignature().GetRawSig(), GetModule()))
+ {
+ if (pFinalILStub == NULL)
+ {
+ DWORD dwStubFlags = 0;
+
+ if (sigInfo.IsDelegateInterop())
+ dwStubFlags |= NDIRECTSTUB_FL_DELEGATE;
+
+ pStubMD = GetILStubMethodDesc(pMD, &sigInfo, dwStubFlags);
+ pFinalILStub = JitILStub(pStubMD);
+ }
+
+ MetaSig msig(pStubMD);
+ pFinalExecStub = CompileNExportThunk(pHeap, &sigInfo, &msig, FALSE);
+ }
+ else
+ {
+ MetaSig msig(GetSignature(), GetModule(), NULL);
+ pFinalExecStub = CompileNExportThunk(pHeap, &sigInfo, &msig, TRUE);
+ }
+
+ if (FastInterlockCompareExchangePointer(&m_pExecStub,
+ pFinalExecStub,
+ NULL) != NULL)
+ {
+
+ // Some thread swooped in and set us. Our stub is now a
+ // duplicate, so throw it away.
+ if (pFinalExecStub)
+ pFinalExecStub->DecRef();
+ }
+
+#else // _TARGET_X86_
+
+ if (pFinalILStub == NULL)
+ {
+ if (pMD != NULL && !pMD->IsEEImpl() &&
+#ifdef MDA_SUPPORTED
+ // GC.Collect calls are emitted to IL stubs
+ !MDA_GET_ASSISTANT(GcManagedToUnmanaged) && !MDA_GET_ASSISTANT(GcUnmanagedToManaged) &&
+#endif // MDA_SUPPORTED
+ !NDirect::MarshalingRequired(pMD, GetSignature().GetRawSig(), GetModule()))
+ {
+ // Call the method directly in no-delegate case if possible. This is important to avoid JITing
+ // for stubs created via code:ICLRRuntimeHost2::CreateDelegate during coreclr startup.
+ pFinalILStub = pMD->GetMultiCallableAddrOfCode();
+ }
+ else
+ {
+ // For perf, it is important to avoid expensive initialization of
+ // PInvokeStaticSigInfo if we have NGened stub.
+ PInvokeStaticSigInfo sigInfo;
+
+ if (pMD != NULL)
+ new (&sigInfo) PInvokeStaticSigInfo(pMD);
+ else
+ new (&sigInfo) PInvokeStaticSigInfo(GetSignature(), GetModule());
+
+ DWORD dwStubFlags = 0;
+
+ if (sigInfo.IsDelegateInterop())
+ dwStubFlags |= NDIRECTSTUB_FL_DELEGATE;
+
+ pStubMD = GetILStubMethodDesc(pMD, &sigInfo, dwStubFlags);
+ pFinalILStub = JitILStub(pStubMD);
+ }
+ }
+
+ //
+ // m_cbActualArgSize gets the number of arg bytes for the NATIVE signature
+ //
+ m_cbActualArgSize = (pStubMD != NULL) ? pStubMD->AsDynamicMethodDesc()->GetNativeStackArgSize() : pMD->SizeOfArgStack();
+
+#endif // _TARGET_X86_
+
+ // Must be the last thing we set!
+ InterlockedCompareExchangeT<PCODE>(&m_pILStub, pFinalILStub, (PCODE)1);
+}
+
+#ifdef _DEBUG
+void STDCALL LogUMTransition(UMEntryThunk* thunk)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ DEBUG_ONLY;
+ GC_NOTRIGGER;
+ ENTRY_POINT;
+ if (GetThread()) MODE_PREEMPTIVE; else MODE_ANY;
+ DEBUG_ONLY;
+ PRECONDITION(CheckPointer(thunk));
+ PRECONDITION((GetThread() != NULL) ? (!GetThread()->PreemptiveGCDisabled()) : TRUE);
+ }
+ CONTRACTL_END;
+
+ BEGIN_ENTRYPOINT_VOIDRET;
+
+ void** retESP = ((void**) &thunk) + 4;
+
+ MethodDesc* method = thunk->GetMethod();
+ if (method)
+ {
+ LOG((LF_STUBS, LL_INFO1000000, "UNMANAGED -> MANAGED Stub To Method = %s::%s SIG %s Ret Address ESP = 0x%x ret = 0x%x\n",
+ method->m_pszDebugClassName,
+ method->m_pszDebugMethodName,
+ method->m_pszDebugMethodSignature, retESP, *retESP));
+ }
+
+ END_ENTRYPOINT_VOIDRET;
+
+ }
+#endif
+
+#endif // CROSSGEN_COMPILE
diff --git a/src/vm/dllimportcallback.h b/src/vm/dllimportcallback.h
new file mode 100644
index 0000000000..bc36056ccb
--- /dev/null
+++ b/src/vm/dllimportcallback.h
@@ -0,0 +1,595 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: DllImportCallback.h
+//
+
+//
+
+
+#ifndef __dllimportcallback_h__
+#define __dllimportcallback_h__
+
+#include "object.h"
+#include "stublink.h"
+#include "ceeload.h"
+#include "class.h"
+#include "dllimport.h"
+#include "mdaassistants.h"
+
+enum UMThunkStubFlags
+{
+ umtmlIsStatic = 0x0001,
+ umtmlThisCall = 0x0002,
+ umtmlThisCallHiddenArg = 0x0004,
+ umtmlFpu = 0x0008,
+#ifdef _TARGET_X86_
+ // the signature is trivial so stub need not be generated and the target can be called directly
+ umtmlSkipStub = 0x0080,
+#endif // _TARGET_X86_
+};
+
+#include <pshpack1.h>
+//--------------------------------------------------------------------------
+// This structure captures basic info needed to build an UMThunk.
+//--------------------------------------------------------------------------
+struct UMThunkStubInfo
+{
+ UINT32 m_cbDstStack; //# of bytes of stack portion of managed args
+ UINT16 m_cbSrcStack; //# of bytes of stack portion of unmanaged args
+ UINT16 m_cbRetPop; //# of bytes to pop on return to unmanaged
+ UINT16 m_wFlags; // UMThunkStubFlags enum
+};
+#include <poppack.h>
+
+//----------------------------------------------------------------------
+// This structure collects all information needed to marshal an
+// unmanaged->managed thunk. The only information missing is the
+// managed target and the "this" object (if any.) Those two pieces
+// are broken out into a small UMEntryThunk.
+//
+// The idea is to share UMThunkMarshInfo's between multiple thunks
+// that have the same signature while the UMEntryThunk contains the
+// minimal info needed to distinguish between actual function pointers.
+//----------------------------------------------------------------------
+
+class UMThunkMarshInfo
+{
+ friend class CheckAsmOffsets;
+
+private:
+ enum
+ {
+ kLoadTimeInited = 0x4c55544d, //'LUTM'
+ kRunTimeInited = 0x5255544d, //'RUTM'
+ };
+
+public:
+ //----------------------------------------------------------
+ // This initializer can be called during load time.
+ // It does not do any ML stub initialization or sigparsing.
+ // The RunTimeInit() must be called subsequently before this
+ // can safely be used.
+ //----------------------------------------------------------
+ VOID LoadTimeInit(MethodDesc* pMD);
+ VOID LoadTimeInit(Signature sig, Module * pModule, MethodDesc * pMD = NULL);
+
+ //----------------------------------------------------------
+ // This initializer finishes the init started by LoadTimeInit.
+ // It does all the ML stub creation, and can throw a COM+
+ // exception.
+ //
+ // It can safely be called multiple times and by concurrent
+ // threads.
+ //----------------------------------------------------------
+ VOID RunTimeInit();
+
+ // Destructor.
+ //----------------------------------------------------------
+ ~UMThunkMarshInfo();
+
+ //----------------------------------------------------------
+ // Accessor functions
+ //----------------------------------------------------------
+ Signature GetSignature()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_sig;
+ }
+
+ Module* GetModule()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pModule;
+ }
+
+ MethodDesc * GetMethod()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pMD;
+ }
+
+#ifdef _TARGET_X86_
+ PCODE GetExecStubEntryPoint()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetExecStub()->GetEntryPoint();
+ }
+
+ Stub* GetExecStub()
+ {
+ CONTRACT (Stub*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(IsCompletelyInited());
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ RETURN m_pExecStub;
+ }
+
+ UINT16 GetCbRetPop()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ PRECONDITION(IsCompletelyInited());
+ }
+ CONTRACTL_END;
+
+ return m_cbRetPop;
+ }
+
+ CorPinvokeMap GetCallingConvention()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ PRECONDITION(IsCompletelyInited());
+ }
+ CONTRACTL_END;
+
+ return (CorPinvokeMap)m_callConv;
+ }
+#else
+ PCODE GetExecStubEntryPoint();
+#endif
+
+ UINT32 GetCbActualArgSize()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(IsCompletelyInited());
+ }
+ CONTRACTL_END;
+
+ return m_cbActualArgSize;
+ }
+
+ BOOL IsCompletelyInited()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_pILStub != (PCODE)1);
+ }
+
+ static MethodDesc* GetILStubMethodDesc(MethodDesc* pInvokeMD, PInvokeStaticSigInfo* pSigInfo, DWORD dwStubFlags);
+
+ static UINT32 GetOffsetOfStub()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (UINT32)offsetof(UMThunkMarshInfo, m_pILStub);
+ }
+
+#ifdef _TARGET_X86_
+ // Compiles an unmanaged to managed thunk for the given signature. The thunk
+ // will call the stub or, if fNoStub == TRUE, directly the managed target.
+ Stub *CompileNExportThunk(LoaderHeap *pLoaderHeap, PInvokeStaticSigInfo* pSigInfo, MetaSig *pMetaSig, BOOL fNoStub);
+#endif // _TARGET_X86_
+
+private:
+ PCODE m_pILStub; // IL stub for marshaling
+ // On x86, NULL for no-marshal signatures
+ // On non-x86, the managed entrypoint for no-delegate no-marshal signatures
+ UINT32 m_cbActualArgSize; // caches m_pSig.SizeOfFrameArgumentArray()
+#ifdef _TARGET_X86_
+ Stub* m_pExecStub; // UMEntryThunk jumps directly here
+ UINT16 m_cbRetPop; // stack bytes popped by callee (for UpdateRegDisplay)
+ UINT16 m_callConv; // unmanaged calling convention and flags (CorPinvokeMap)
+#endif
+ MethodDesc * m_pMD; // maybe null
+ Module * m_pModule;
+ Signature m_sig;
+};
+
+
+//----------------------------------------------------------------------
+// This structure contains the minimal information required to
+// distinguish one function pointer from another, with the rest
+// being stored in a shared UMThunkMarshInfo.
+//
+// This structure also contains the actual code bytes that form the
+// front end of the thunk. A pointer to the m_code[] byte array is
+// what is actually handed to unmanaged client code.
+//----------------------------------------------------------------------
+class UMEntryThunk
+{
+ friend class CheckAsmOffsets;
+ friend class NDirectStubLinker;
+
+private:
+#ifdef _DEBUG
+ enum
+ {
+ kLoadTimeInited = 0x4c554554, //'LUET'
+ kRunTimeInited = 0x52554554, //'RUET'
+ };
+#endif
+
+public:
+ static UMEntryThunk* CreateUMEntryThunk();
+ static VOID FreeUMEntryThunk(UMEntryThunk* p);
+
+#ifdef _TARGET_X86_
+ // Compiles an unmanaged to managed thunk with the given calling convention adaptation.
+ // - psrcofsregs are stack offsets that should be loaded to argument registers (ECX, EDX)
+ // - psrcofs are stack offsets that should be repushed for the managed target
+ // - retbufofs is the offset of the hidden byref structure argument when returning large
+ // structures; -1 means there is none
+ // Special values recognized by psrcofsregs and psrcofs are -1 which means not present
+ // and 1 which means that this register/stack slot should get the UMEntryThunk pointer.
+ // This method is used for all reverse P/Invoke calls on x86 (the umtmlSkipStub
+ // flag determines whether the managed target is stub or the actual target method).
+ static VOID CompileUMThunkWorker(UMThunkStubInfo *pInfo,
+ CPUSTUBLINKER *pcpusl,
+ UINT *psrcofsregs,
+ UINT *psrcofs,
+ UINT retbufofs);
+#endif // _TARGET_X86_
+
+#ifndef DACCESS_COMPILE
+ VOID LoadTimeInit(PCODE pManagedTarget,
+ OBJECTHANDLE pObjectHandle,
+ UMThunkMarshInfo *pUMThunkMarshInfo,
+ MethodDesc *pMD,
+ ADID dwDomainId)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pUMThunkMarshInfo));
+ PRECONDITION(pMD != NULL);
+ }
+ CONTRACTL_END;
+
+ m_pManagedTarget = pManagedTarget;
+ m_pObjectHandle = pObjectHandle;
+ m_pUMThunkMarshInfo = pUMThunkMarshInfo;
+ m_dwDomainId = dwDomainId;
+
+ m_pMD = pMD; // For debugging and profiling, so they can identify the target
+
+ m_code.Encode((BYTE*)TheUMThunkPreStub(), this);
+
+#ifdef _DEBUG
+ m_state = kLoadTimeInited;
+#endif
+ }
+
+ ~UMEntryThunk()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (GetObjectHandle())
+ {
+ DestroyLongWeakHandle(GetObjectHandle());
+ }
+
+#ifdef _DEBUG
+ FillMemory(this, sizeof(*this), 0xcc);
+#endif
+ }
+
+ void Terminate();
+
+ void OnADUnload();
+
+ VOID RunTimeInit()
+ {
+ STANDARD_VM_CONTRACT;
+
+ // Ensure method's module is activate in app domain
+ m_pMD->EnsureActive();
+
+ m_pUMThunkMarshInfo->RunTimeInit();
+
+ // Ensure that we have either the managed target or the delegate.
+ if (m_pObjectHandle == NULL && m_pManagedTarget == NULL)
+ m_pManagedTarget = m_pMD->GetMultiCallableAddrOfCode();
+
+ m_code.Encode((BYTE*)m_pUMThunkMarshInfo->GetExecStubEntryPoint(), this);
+
+#ifdef _DEBUG
+ m_state = kRunTimeInited;
+#endif
+ }
+
+ // asm entrypoint
+ static VOID STDCALL DoRunTimeInit(UMEntryThunk* pThis);
+
+ PCODE GetManagedTarget() const
+ {
+ CONTRACT (PCODE)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(m_state == kRunTimeInited || m_state == kLoadTimeInited);
+ POSTCONDITION(RETVAL != NULL);
+ }
+ CONTRACT_END;
+
+ OBJECTHANDLE hndDelegate = GetObjectHandle();
+ if (hndDelegate != NULL)
+ {
+ GCX_COOP();
+
+ DELEGATEREF orDelegate = (DELEGATEREF)ObjectFromHandle(hndDelegate);
+ _ASSERTE(orDelegate != NULL);
+ _ASSERTE(m_pMD->IsEEImpl());
+
+ // We have optimizations that skip the Invoke method and call directly the
+ // delegate's target method. We need to return the target in that case,
+ // otherwise debugger would fail to step in.
+ RETURN orDelegate->GetMethodPtr();
+ }
+ else
+ {
+ if (m_pManagedTarget != NULL)
+ {
+ RETURN m_pManagedTarget;
+ }
+ else
+ {
+ RETURN m_pMD->GetMultiCallableAddrOfCode();
+ }
+ }
+ }
+#endif // !DACCESS_COMPILE
+
+ OBJECTHANDLE GetObjectHandle() const
+ {
+ CONTRACT (OBJECTHANDLE)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ // If we OOM after we create the holder but
+ // before we set the m_state we can have
+ // m_state == 0 and m_pObjectHandle == NULL
+ PRECONDITION(m_state == kRunTimeInited ||
+ m_state == kLoadTimeInited ||
+ m_pObjectHandle == NULL);
+ }
+ CONTRACT_END;
+
+ RETURN m_pObjectHandle;
+ }
+
+ UMThunkMarshInfo* GetUMThunkMarshInfo() const
+ {
+ CONTRACT (UMThunkMarshInfo*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ PRECONDITION(m_state == kRunTimeInited || m_state == kLoadTimeInited);
+#ifdef MDA_SUPPORTED
+ // We can return NULL here if the CollectedDelegate probe is on because
+ // a collected delegate will have set this field to NULL.
+ POSTCONDITION(g_pDebugInterface->ThisIsHelperThread() || MDA_GET_ASSISTANT(CallbackOnCollectedDelegate) || CheckPointer(RETVAL));
+#else
+ POSTCONDITION(CheckPointer(RETVAL));
+#endif
+ }
+ CONTRACT_END;
+
+ RETURN m_pUMThunkMarshInfo;
+ }
+
+
+ const BYTE* GetCode() const
+ {
+ CONTRACT (const BYTE*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(m_state == kRunTimeInited || m_state == kLoadTimeInited);
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ RETURN m_code.GetEntryPoint();
+ }
+
+ static UMEntryThunk* RecoverUMEntryThunk(const VOID* pCode)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (UMEntryThunk*)( ((LPBYTE)pCode) - offsetof(UMEntryThunk, m_code) );
+ }
+
+
+ MethodDesc* GetMethod() const
+ {
+ CONTRACT (MethodDesc*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(m_state == kRunTimeInited || m_state == kLoadTimeInited);
+ POSTCONDITION(CheckPointer(RETVAL,NULL_OK));
+ }
+ CONTRACT_END;
+
+ RETURN m_pMD;
+ }
+
+ ADID GetDomainId() const
+ {
+ CONTRACT (ADID)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(m_state == kRunTimeInited || m_state == kLoadTimeInited);
+ }
+ CONTRACT_END;
+
+ RETURN m_dwDomainId;
+ }
+
+ static DWORD GetOffsetOfMethodDesc()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return offsetof(class UMEntryThunk, m_pMD);
+ }
+
+ static DWORD GetCodeOffset()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return offsetof(UMEntryThunk, m_code);
+ }
+
+ static UMEntryThunk* Decode(LPVOID pCallback);
+
+#ifdef MDA_SUPPORTED
+ BOOL IsCollected() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_pMD != NULL && m_pMD->IsEEImpl());
+ return m_pObjectHandle == NULL;
+ }
+#endif
+
+private:
+ // The start of the managed code.
+ // if m_pObjectHandle is non-NULL, this field is still set to help with diagnostic of call on collected delegate crashes
+ // but it may not have the correct value.
+ PCODE m_pManagedTarget;
+
+ // This is used for profiling.
+ PTR_MethodDesc m_pMD;
+
+ // Object handle holding "this" reference. May be a strong or weak handle.
+ // Field is NULL for a static method.
+ OBJECTHANDLE m_pObjectHandle;
+
+ // Pointer to the shared structure containing everything else
+ PTR_UMThunkMarshInfo m_pUMThunkMarshInfo;
+
+ ADID m_dwDomainId; // appdomain of module (cached for fast access)
+#ifdef _DEBUG
+ DWORD m_state; // the initialization state
+#endif
+
+ UMEntryThunkCode m_code;
+};
+
+// Cache to hold UMEntryThunk/UMThunkMarshInfo instances associated with MethodDescs.
+// All UMEntryThunk/UMThunkMarshInfo instances are destroyed when the cache goes away.
+class UMEntryThunkCache
+{
+public:
+ UMEntryThunkCache(AppDomain *pDomain);
+ ~UMEntryThunkCache();
+
+ UMEntryThunk *GetUMEntryThunk(MethodDesc *pMD);
+
+private:
+ struct CacheElement
+ {
+ CacheElement()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pMD = NULL;
+ m_pThunk = NULL;
+ }
+
+ MethodDesc *m_pMD;
+ UMEntryThunk *m_pThunk;
+ };
+
+ class ThunkSHashTraits : public NoRemoveSHashTraits< DefaultSHashTraits<CacheElement> >
+ {
+ public:
+ typedef MethodDesc *key_t;
+ static key_t GetKey(element_t e) { LIMITED_METHOD_CONTRACT; return e.m_pMD; }
+ static BOOL Equals(key_t k1, key_t k2) { LIMITED_METHOD_CONTRACT; return (k1 == k2); }
+ static count_t Hash(key_t k) { LIMITED_METHOD_CONTRACT; return (count_t)(size_t)k; }
+ static const element_t Null() { LIMITED_METHOD_CONTRACT; return CacheElement(); }
+ static bool IsNull(const element_t &e) { LIMITED_METHOD_CONTRACT; return (e.m_pMD == NULL); }
+ };
+
+ static void DestroyMarshInfo(UMThunkMarshInfo *pMarshInfo)
+ {
+ WRAPPER_NO_CONTRACT;
+ pMarshInfo->~UMThunkMarshInfo();
+ }
+
+ SHash<ThunkSHashTraits> m_hash;
+ Crst m_crst;
+ AppDomain *m_pDomain;
+};
+
+#ifdef _TARGET_X86_
+//-------------------------------------------------------------------------
+// One-time creation of special prestub to initialize UMEntryThunks.
+//-------------------------------------------------------------------------
+Stub *GenerateUMThunkPrestub();
+#endif
+
+//-------------------------------------------------------------------------
+// NExport stub
+//-------------------------------------------------------------------------
+#if !defined(_WIN64) && !defined(DACCESS_COMPILE) && !defined(CROSS_COMPILE)
+EXCEPTION_HANDLER_DECL(FastNExportExceptHandler);
+EXCEPTION_HANDLER_DECL(UMThunkPrestubHandler);
+#endif // _WIN64
+
+extern "C" void TheUMEntryPrestub(void);
+extern "C" PCODE TheUMEntryPrestubWorker(UMEntryThunk * pUMEntryThunk);
+
+EXTERN_C void UMThunkStub(void);
+
+#ifdef _DEBUG
+void STDCALL LogUMTransition(UMEntryThunk* thunk);
+#endif
+
+#ifdef MDA_SUPPORTED
+EXTERN_C void __fastcall CallbackOnCollectedDelegateHelper(UMEntryThunk *pEntryThunk);
+#endif // MDA_SUPPORTED
+
+#endif //__dllimportcallback_h__
diff --git a/src/vm/domainfile.cpp b/src/vm/domainfile.cpp
new file mode 100644
index 0000000000..22fec1f982
--- /dev/null
+++ b/src/vm/domainfile.cpp
@@ -0,0 +1,4484 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// --------------------------------------------------------------------------------
+// DomainFile.cpp
+//
+
+// --------------------------------------------------------------------------------
+
+
+#include "common.h"
+
+// --------------------------------------------------------------------------------
+// Headers
+// --------------------------------------------------------------------------------
+
+#include <shlwapi.h>
+
+#include "security.h"
+#include "securitymeta.h"
+#include "invokeutil.h"
+#include "eeconfig.h"
+#include "dynamicmethod.h"
+#include "field.h"
+#include "dbginterface.h"
+#include "eventtrace.h"
+
+#ifdef FEATURE_PREJIT
+#include <corcompile.h>
+#include "compile.h"
+#endif // FEATURE_PREJIT
+
+#include "umthunkhash.h"
+#include "peimagelayout.inl"
+
+#if !defined(FEATURE_CORECLR) && !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+#include "policy.h" // for fusion::util::isanyframeworkassembly
+#endif
+#include "winrthelpers.h"
+
+BOOL DomainAssembly::IsUnloading()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ BOOL fIsUnloading = FALSE;
+
+ fIsUnloading = this->GetAppDomain()->IsUnloading();
+
+ if (!fIsUnloading)
+ {
+ fIsUnloading = m_fDebuggerUnloadStarted;
+ }
+
+ return fIsUnloading;
+}
+
+
+#ifndef DACCESS_COMPILE
+DomainFile::DomainFile(AppDomain *pDomain, PEFile *pFile)
+ : m_pDomain(pDomain),
+ m_pFile(pFile),
+ m_pOriginalFile(NULL),
+ m_pModule(NULL),
+ m_level(FILE_LOAD_CREATE),
+ m_pError(NULL),
+ m_notifyflags(NOT_NOTIFIED),
+ m_loading(TRUE),
+ m_pDynamicMethodTable(NULL),
+ m_pUMThunkHash(NULL),
+ m_bDisableActivationCheck(FALSE),
+ m_dwReasonForRejectingNativeImage(0)
+{
+ CONTRACTL
+ {
+ CONSTRUCTOR_CHECK;
+ THROWS; // From CreateHandle
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ m_hExposedModuleObject = NULL;
+ pFile->AddRef();
+}
+
+DomainFile::~DomainFile()
+{
+ CONTRACTL
+ {
+ DESTRUCTOR_CHECK;
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_pFile->Release();
+ if(m_pOriginalFile)
+ m_pOriginalFile->Release();
+ if (m_pDynamicMethodTable)
+ m_pDynamicMethodTable->Destroy();
+#if defined(FEATURE_MIXEDMODE) && !defined(CROSSGEN_COMPILE)
+ if (m_pUMThunkHash)
+ delete m_pUMThunkHash;
+#endif
+ delete m_pError;
+}
+
+#endif //!DACCESS_COMPILE
+
+LoaderAllocator * DomainFile::GetLoaderAllocator()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ Assembly *pAssembly = GetDomainAssembly()->GetAssembly();
+ if ((pAssembly != NULL) && (pAssembly->IsCollectible()))
+ {
+ return pAssembly->GetLoaderAllocator();
+ }
+ else
+ {
+ return this->GetAppDomain()->GetLoaderAllocator();
+ }
+}
+
+#ifndef DACCESS_COMPILE
+
+void DomainFile::ReleaseFiles()
+{
+ WRAPPER_NO_CONTRACT;
+ Module* pModule=GetCurrentModule();
+ if(pModule)
+ pModule->StartUnload();
+
+ if (m_pFile)
+ m_pFile->ReleaseIL();
+ if(m_pOriginalFile)
+ m_pOriginalFile->ReleaseIL();
+
+ if(pModule)
+ pModule->ReleaseILData();
+}
+
+BOOL DomainFile::TryEnsureActive()
+{
+ CONTRACT(BOOL)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACT_END;
+
+ BOOL success = TRUE;
+
+ EX_TRY
+ {
+ EnsureActive();
+ }
+ EX_CATCH
+ {
+ success = FALSE;
+ }
+ EX_END_CATCH(RethrowTransientExceptions);
+
+ RETURN success;
+}
+
+// Optimization intended for EnsureLoadLevel only
+#include <optsmallperfcritical.h>
+void DomainFile::EnsureLoadLevel(FileLoadLevel targetLevel)
+{
+ CONTRACT_VOID
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACT_END;
+
+ TRIGGERSGC ();
+ if (IsLoading())
+ {
+ this->GetAppDomain()->LoadDomainFile(this, targetLevel);
+
+ // Enforce the loading requirement. Note that we may have a deadlock in which case we
+ // may be off by one which is OK. (At this point if we are short of targetLevel we know
+ // we have done so because of reentrancy contraints.)
+
+ RequireLoadLevel((FileLoadLevel)(targetLevel-1));
+ }
+ else
+ ThrowIfError(targetLevel);
+
+ RETURN;
+}
+#include <optdefault.h>
+
+void DomainFile::AttemptLoadLevel(FileLoadLevel targetLevel)
+{
+ CONTRACT_VOID
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACT_END;
+
+ if (IsLoading())
+ this->GetAppDomain()->LoadDomainFile(this, targetLevel);
+ else
+ ThrowIfError(targetLevel);
+
+ RETURN;
+}
+
+
+CHECK DomainFile::CheckLoadLevel(FileLoadLevel requiredLevel, BOOL deadlockOK)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (deadlockOK)
+ {
+#ifndef CROSSGEN_COMPILE
+ // CheckLoading requires waiting on a host-breakable lock.
+ // Since this is only a checked-build assert and we've been
+ // living with it for a while, I'll leave it as is.
+ //@TODO: CHECK statements are *NOT* debug-only!!!
+ CONTRACT_VIOLATION(ThrowsViolation|GCViolation|TakesLockViolation);
+ CHECK(this->GetAppDomain()->CheckLoading(this, requiredLevel));
+#endif
+ }
+ else
+ {
+ CHECK_MSG(m_level >= requiredLevel,
+ "File not sufficiently loaded");
+ }
+
+ CHECK_OK;
+}
+
+
+
+void DomainFile::RequireLoadLevel(FileLoadLevel targetLevel)
+{
+ CONTRACT_VOID
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACT_END;
+
+ if (GetLoadLevel() < targetLevel)
+ {
+ ThrowIfError(targetLevel);
+ ThrowHR(MSEE_E_ASSEMBLYLOADINPROGRESS); // @todo: better exception
+ }
+
+ RETURN;
+}
+
+
+void DomainFile::SetError(Exception *ex)
+{
+ CONTRACT_VOID
+ {
+ PRECONDITION(!IsError());
+ PRECONDITION(ex != NULL);
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ POSTCONDITION(IsError());
+ }
+ CONTRACT_END;
+
+ m_pError = new ExInfo(ex->DomainBoundClone());
+
+ GetCurrentModule()->NotifyEtwLoadFinished(ex->GetHR());
+
+ if (!IsProfilerNotified())
+ {
+ SetProfilerNotified();
+
+#ifdef PROFILING_SUPPORTED
+ if (GetCurrentModule() != NULL
+ && !GetCurrentModule()->GetAssembly()->IsDomainNeutral())
+ {
+ // Only send errors for non-shared assemblies; other assemblies might be successfully completed
+ // in another app domain later.
+ GetCurrentModule()->NotifyProfilerLoadFinished(ex->GetHR());
+ }
+#endif
+ }
+
+ RETURN;
+}
+
+void DomainFile::ThrowIfError(FileLoadLevel targetLevel)
+{
+ CONTRACT_VOID
+ {
+ INSTANCE_CHECK;
+ MODE_ANY;
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACT_END;
+
+ if (m_level < targetLevel)
+ {
+ if (m_pError)
+ m_pError->Throw();
+ }
+
+ RETURN;
+}
+
+CHECK DomainFile::CheckNoError(FileLoadLevel targetLevel)
+{
+ LIMITED_METHOD_CONTRACT;
+ CHECK(m_level >= targetLevel
+ || !IsError());
+
+ CHECK_OK;
+}
+
+CHECK DomainFile::CheckLoaded()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ CHECK_MSG(CheckNoError(FILE_LOADED), "DomainFile load resulted in an error");
+
+ if (IsLoaded())
+ CHECK_OK;
+
+ // Mscorlib is allowed to run managed code much earlier than other
+ // assemblies for bootstrapping purposes. This is because it has no
+ // dependencies, security checks, and doesn't rely on loader notifications.
+
+ if (GetFile()->IsSystem())
+ CHECK_OK;
+
+ CHECK_MSG(GetFile()->CheckLoaded(), "PEFile has not been loaded");
+
+ CHECK_OK;
+}
+
+CHECK DomainFile::CheckActivated()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ CHECK_MSG(CheckNoError(FILE_ACTIVE), "DomainFile load resulted in an error");
+
+ if (IsActive())
+ CHECK_OK;
+
+ // Mscorlib is allowed to run managed code much earlier than other
+ // assemblies for bootstrapping purposes. This is because it has no
+ // dependencies, security checks, and doesn't rely on loader notifications.
+
+ if (GetFile()->IsSystem())
+ CHECK_OK;
+
+ CHECK_MSG(GetFile()->CheckLoaded(), "PEFile has not been loaded");
+ CHECK_MSG(IsLoaded(), "DomainFile has not been fully loaded");
+ CHECK_MSG(m_bDisableActivationCheck || CheckLoadLevel(FILE_ACTIVE), "File has not had execution verified");
+
+ CHECK_OK;
+}
+
+#endif //!DACCESS_COMPILE
+
+DomainAssembly *DomainFile::GetDomainAssembly()
+{
+ CONTRACTL
+ {
+ SUPPORTS_DAC;
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ if (IsAssembly())
+ {
+ return dac_cast<PTR_DomainAssembly>(this);
+ }
+ else
+ {
+ return dac_cast<PTR_DomainModule>(this)->GetDomainAssembly();
+ }
+#else
+ _ASSERTE(IsAssembly());
+ return (DomainAssembly *) this;
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+}
+
+BOOL DomainFile::IsIntrospectionOnly()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetFile()->IsIntrospectionOnly();
+}
+
+// Return true iff the debugger should get notifications about this assembly.
+//
+// Notes:
+// The debuggee may be stopped while a DomainAssmebly is being initialized. In this time window,
+// GetAssembly() may be NULL. If that's the case, this function has to return FALSE. Later on, when
+// the DomainAssembly is fully initialized, this function will return TRUE. This is the only scenario
+// where this function is mutable. In other words, a DomainAssembly can only change from being invisible
+// to visible, but NOT vice versa. Once a DomainAssmebly is fully initialized, this function should be
+// immutable for an instance of a module. That ensures that the debugger gets consistent
+// notifications about it. It this value mutates, than the debugger may miss relevant notifications.
+BOOL DomainAssembly::IsVisibleToDebugger()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ // If you can't run an assembly, then don't send notifications to the debugger.
+ // This check includeds IsIntrospectionOnly().
+ return ((GetAssembly() != NULL) ? GetAssembly()->HasRunAccess() : FALSE);
+}
+
+#ifndef DACCESS_COMPILE
+#ifdef FEATURE_PREJIT
+void DomainFile::ExternalLog(DWORD level, const WCHAR *fmt, ...)
+{
+ WRAPPER_NO_CONTRACT;
+
+ va_list args;
+ va_start(args, fmt);
+
+ GetOriginalFile()->ExternalVLog(LF_ZAP, level, fmt, args);
+
+ va_end(args);
+}
+
+void DomainFile::ExternalLog(DWORD level, const char *msg)
+{
+ WRAPPER_NO_CONTRACT;
+
+ GetOriginalFile()->ExternalLog(level, msg);
+}
+#endif
+
+#ifndef CROSSGEN_COMPILE
+//---------------------------------------------------------------------------------------
+//
+// Returns managed representation of the module (Module or ModuleBuilder).
+// Returns NULL if the managed scout was already collected (see code:LoaderAllocator#AssemblyPhases).
+//
+OBJECTREF DomainFile::GetExposedModuleObject()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ LoaderAllocator * pLoaderAllocator = GetLoaderAllocator();
+
+ if (m_hExposedModuleObject == NULL)
+ {
+ // Atomically create a handle
+ LOADERHANDLE handle = pLoaderAllocator->AllocateHandle(NULL);
+
+ FastInterlockCompareExchangePointer(&m_hExposedModuleObject, handle, static_cast<LOADERHANDLE>(NULL));
+ }
+
+ if (pLoaderAllocator->GetHandleValue(m_hExposedModuleObject) == NULL)
+ {
+ REFLECTMODULEBASEREF refClass = NULL;
+
+ // Will be TRUE only if LoaderAllocator managed object was already collected and therefore we should
+ // return NULL
+ BOOL fIsLoaderAllocatorCollected = FALSE;
+
+ GCPROTECT_BEGIN(refClass);
+
+ if (GetFile()->IsDynamic())
+ {
+ refClass = (REFLECTMODULEBASEREF) AllocateObject(MscorlibBinder::GetClass(CLASS__MODULE_BUILDER));
+ }
+ else
+ {
+ refClass = (REFLECTMODULEBASEREF) AllocateObject(MscorlibBinder::GetClass(CLASS__MODULE));
+ }
+ refClass->SetModule(m_pModule);
+
+ // Attach the reference to the assembly to keep the LoaderAllocator for this collectible type
+ // alive as long as a reference to the module is kept alive.
+ if (GetModule()->GetAssembly() != NULL)
+ {
+ OBJECTREF refAssembly = GetModule()->GetAssembly()->GetExposedObject();
+ if ((refAssembly == NULL) && GetModule()->GetAssembly()->IsCollectible())
+ {
+ fIsLoaderAllocatorCollected = TRUE;
+ }
+ refClass->SetAssembly(refAssembly);
+ }
+
+ pLoaderAllocator->CompareExchangeValueInHandle(m_hExposedModuleObject, (OBJECTREF)refClass, NULL);
+ GCPROTECT_END();
+
+ if (fIsLoaderAllocatorCollected)
+ { // The LoaderAllocator managed object was already collected, we cannot re-create it
+ // Note: We did not publish the allocated Module/ModuleBuilder object, it will get collected
+ // by GC
+ return NULL;
+ }
+ }
+
+ return pLoaderAllocator->GetHandleValue(m_hExposedModuleObject);
+} // DomainFile::GetExposedModuleObject
+#endif // CROSSGEN_COMPILE
+
+BOOL DomainFile::DoIncrementalLoad(FileLoadLevel level)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (IsError())
+ return FALSE;
+
+ Thread *pThread;
+ pThread = GetThread();
+ _ASSERTE(pThread);
+ INTERIOR_STACK_PROBE_FOR(pThread, 8);
+
+ switch (level)
+ {
+ case FILE_LOAD_BEGIN:
+ Begin();
+ break;
+
+ case FILE_LOAD_FIND_NATIVE_IMAGE:
+#ifdef FEATURE_PREJIT
+ FindNativeImage();
+#endif
+ break;
+
+ case FILE_LOAD_VERIFY_NATIVE_IMAGE_DEPENDENCIES:
+#ifdef FEATURE_PREJIT
+ VerifyNativeImageDependencies();
+#endif
+ break;
+
+ case FILE_LOAD_ALLOCATE:
+ Allocate();
+ break;
+
+ case FILE_LOAD_ADD_DEPENDENCIES:
+ AddDependencies();
+ break;
+
+ case FILE_LOAD_PRE_LOADLIBRARY:
+ PreLoadLibrary();
+ break;
+
+ case FILE_LOAD_LOADLIBRARY:
+ LoadLibrary();
+ break;
+
+ case FILE_LOAD_POST_LOADLIBRARY:
+ PostLoadLibrary();
+ break;
+
+ case FILE_LOAD_EAGER_FIXUPS:
+ EagerFixups();
+ break;
+
+ case FILE_LOAD_VTABLE_FIXUPS:
+ VtableFixups();
+ break;
+
+ case FILE_LOAD_DELIVER_EVENTS:
+ DeliverSyncEvents();
+ break;
+
+ case FILE_LOADED:
+ FinishLoad();
+ break;
+
+ case FILE_LOAD_VERIFY_EXECUTION:
+ VerifyExecution();
+ break;
+
+ case FILE_ACTIVE:
+ Activate();
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+
+ END_INTERIOR_STACK_PROBE;
+
+#ifdef FEATURE_MULTICOREJIT
+ {
+ Module * pModule = GetModule();
+
+ if (pModule != NULL) // Should not triggle assert when module is NULL
+ {
+ this->GetAppDomain()->GetMulticoreJitManager().RecordModuleLoad(pModule, level);
+ }
+ }
+#endif
+
+ return TRUE;
+}
+
+#ifdef FEATURE_PREJIT
+
+void DomainFile::VerifyNativeImageDependencies(bool verifyOnly)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ STANDARD_VM_CHECK;
+ PRECONDITION(verifyOnly || (m_pDomain->GetDomainFileLoadLevel(this) ==
+ FILE_LOAD_FIND_NATIVE_IMAGE));
+ }
+ CONTRACTL_END;
+
+ // This function gets called multiple times. The first call is the real work.
+ // Subsequent calls are only to verify that everything still looks OK.
+ if (!verifyOnly)
+ ClearNativeImageStress();
+
+ if (!m_pFile->HasNativeImage())
+ {
+ CheckZapRequired();
+ return;
+ }
+
+ {
+ // Go through native dependencies & make sure they still have their prejit images after
+ // the security check.
+ // NOTE: we could theoretically do this without loading the dependencies, if we cache the
+ // COR_TRUST structures from the dependencies in the version information.
+ //
+ // Verify that all of our hard dependencies are loaded at the right base address.
+ // If not, abandon prejit image (or fix ours up)
+ // Also, if there are any hard dependencies, then our native image also needs to be
+ // loaded at the right base address
+
+ // Note: we will go through all of our dependencies, call Load on them, and check the base
+ // addresses & identity.
+ // It is important to note that all of those dependencies are also going to do the
+ // same thing, so we might conceivably check a base address as OK, and then have that image
+ // abandoned by that assembly during its VerifyNativeImageDependencies phase.
+ // However, we avoid this problem since the hard depedencies stored are a closure of the
+ // hard dependencies of an image. This effectively means that our check here is a superset
+ // of the check that the dependencies will perform. Even if we hit a dependency loop, we
+ // will still guarantee that we've examined all of our dependencies.
+
+ ReleaseHolder<PEImage> pNativeImage = m_pFile->GetNativeImageWithRef();
+ if(pNativeImage==NULL)
+ {
+ CheckZapRequired();
+ return;
+ }
+
+ PEImageLayout* pNativeLayout = pNativeImage->GetLoadedLayout();
+
+ // reuse same codepath for both manifest and non-manifest modules
+ ReleaseHolder<PEImage> pManifestNativeImage(NULL);
+
+ PEFile* pManifestFile = m_pFile;
+ PEImageLayout* pManifestNativeLayout = pNativeLayout;
+
+ if (!IsAssembly())
+ {
+ pManifestFile = GetDomainAssembly()->GetCurrentAssembly()
+ ->GetManifestModule()->GetFile();
+
+ pManifestNativeImage = pManifestFile->GetNativeImageWithRef();
+
+ if (pManifestNativeImage == NULL)
+ {
+ ExternalLog(LL_ERROR, "Rejecting native image because there is no "
+ "ngen image for manifest module. Check why the manifest module "
+ "does not have an ngen image");
+ m_dwReasonForRejectingNativeImage = ReasonForRejectingNativeImage_NoNiForManifestModule;
+ STRESS_LOG3(LF_ZAP,LL_INFO100,"Rejecting native file %p, because its manifest module %p has no NI - reason 0x%x\n",pNativeImage.GetValue(),pManifestFile,m_dwReasonForRejectingNativeImage);
+ goto NativeImageRejected;
+ }
+
+ return;
+ }
+
+ COUNT_T cDependencies;
+ CORCOMPILE_DEPENDENCY *pDependencies = pManifestNativeLayout->GetNativeDependencies(&cDependencies);
+
+ LOG((LF_ZAP, LL_INFO100, "ZAP: Checking native image dependencies for %S.\n",
+ pNativeImage->GetPath().GetUnicode()));
+
+ for (COUNT_T iDependency = 0; iDependency < cDependencies; iDependency++)
+ {
+ CORCOMPILE_DEPENDENCY *pDependency = &(pDependencies[iDependency]);
+
+ // Later, for domain neutral assemblies, we will also want to verify security policy
+ // in such cases, the prejit image should store the publisher info for the dependencies
+ // for us.
+
+ // If this is not a hard-bound dependency, then skip to the next dependency
+ if (pDependency->signNativeImage == INVALID_NGEN_SIGNATURE)
+ continue;
+
+#ifdef FEATURE_CORECLR // hardbinding
+
+ //
+ // CoreCLR hard binds to mscorlib.dll only. Avoid going through the full load.
+ //
+
+#ifdef _DEBUG
+ AssemblySpec name;
+ name.InitializeSpec(pDependency->dwAssemblyRef,
+ ((pManifestNativeImage != NULL) ? pManifestNativeImage : pNativeImage)->GetNativeMDImport(),
+ GetDomainAssembly());
+ _ASSERTE(name.IsMscorlib());
+#endif
+
+ PEAssembly * pDependencyFile = SystemDomain::SystemFile();
+
+#else // FEATURE_CORECLR
+
+ //
+ // Load the manifest file for the given name assembly spec.
+ //
+
+ AssemblySpec name;
+ name.InitializeSpec(pDependency->dwAssemblyRef,
+ ((pManifestNativeImage != NULL) ? pManifestNativeImage : pNativeImage)->GetNativeMDImport(),
+ GetDomainAssembly());
+
+ if (this->GetAppDomain()->IsCompilationDomain())
+ {
+ //
+ // Allow transitive closure of hardbound dependecies to be loaded during ngen.
+ //
+
+ DomainAssembly * pDependencyAssembly = name.LoadDomainAssembly(FILE_LOAD_FIND_NATIVE_IMAGE);
+ pDependencyAssembly->GetFile()->SetSafeToHardBindTo();
+ }
+
+ DomainAssembly * pDependencyAssembly = NULL;
+ {
+ // We are about to validate the hard-bound dependencies of the assembly being loaded. The invariant of being hard-bound states
+ // that each hard-bound dependency must have its NI image to be valid and available for loading and this is done recursively for each
+ // hard-bound dependency.
+ //
+ // The validity (and presence) of the NI image happens in FILE_LOAD_ALLOCATE stage of assembly load, which is the next stage in assembly loading,
+ // and not the current stage (FILE_LOAD_VERIFY_NATIVE_DEPENDENCIES). In FILE_LOAD_ALLOCATE, we do sharing checks, closure validation, redirection policy application, etc
+ // before computing if a NI is available and if it is, whether it is valid or not.
+ //
+ // However, we need to know about validity of NI in the current(and earlier) stage. As a result, we will temporarily set the assembly load limit (defined as the maximum
+ // load level till which recursive assembly load can execute) to be FILE_LOAD_ALLOCATE if we have been invoked to validate the NI dependencies for the first time.
+ //
+ // A valid concern at this point is that we would allow to load a dependency at a load stage higher than its dependent assembly as it could crete cycles. This concern is
+ // alleviated since we are doing this override (of the load stage) only for hard-bound dependencies and NGEN is responsible for ensuring that there are no cycles.
+ //
+ // As a result, once the dependency load returns, we will know for sure if the dependency has a valid NI or not.
+ OVERRIDE_LOAD_LEVEL_LIMIT(verifyOnly ? FILE_LOADED : FILE_LOAD_ALLOCATE);
+ pDependencyAssembly = name.LoadDomainAssembly(FILE_LOADED);
+ }
+
+ PEAssembly * pDependencyFile = pDependencyAssembly->GetFile();
+
+#endif // FEATURE_CORECLR
+
+ ReleaseHolder<PEImage> pDependencyNativeImage = pDependencyFile->GetNativeImageWithRef();
+ if (pDependencyNativeImage == NULL)
+ {
+ ExternalLog(LL_ERROR, W("Rejecting native image because dependency %s is not native"),
+ pDependencyFile->GetPath().GetUnicode());
+ m_dwReasonForRejectingNativeImage = ReasonForRejectingNativeImage_DependencyNotNative;
+ STRESS_LOG3(LF_ZAP,LL_INFO100,"Rejecting native file %p, because dependency %p is not NI - reason 0x%x\n",pNativeImage.GetValue(),pDependencyFile,m_dwReasonForRejectingNativeImage);
+ goto NativeImageRejected;
+ }
+
+#ifndef FEATURE_FUSION // Fusion does this verification at native binding time.
+ PTR_PEImageLayout pDependencyNativeLayout = pDependencyNativeImage->GetLoadedLayout();
+ // Assert that the native image signature is as expected
+ // Fusion will ensure this
+ CORCOMPILE_VERSION_INFO * pDependencyNativeVersion =
+ pDependencyNativeLayout->GetNativeVersionInfo();
+
+ LoggablePEAssembly logAsm(pDependencyFile);
+ if (!RuntimeVerifyNativeImageDependency(pDependency, pDependencyNativeVersion, &logAsm))
+ goto NativeImageRejected;
+#endif
+ }
+ LOG((LF_ZAP, LL_INFO100, "ZAP: Native image dependencies for %S OK.\n",
+ pNativeImage->GetPath().GetUnicode()));
+
+ return;
+}
+
+NativeImageRejected:
+ m_pFile->ClearNativeImage();
+ m_pFile->SetCannotUseNativeImage();
+
+ CheckZapRequired();
+
+ return;
+}
+
+BOOL DomainFile::IsZapRequired()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (!m_pFile->HasMetadata() || !g_pConfig->RequireZap(GetSimpleName()))
+ return FALSE;
+
+#if defined(_DEBUG) && defined(FEATURE_TREAT_NI_AS_MSIL_DURING_DIAGNOSTICS)
+ // If we're intentionally treating NIs as if they were MSIL assemblies, and the test
+ // is flexible enough to accept that (e.g., complus_zaprequired=2), then zaps are not
+ // required (i.e., it's ok for m_pFile->m_nativeImage to be NULL), but only if we
+ // loaded an actual NI to be treated as an IL assembly
+ if (PEFile::ShouldTreatNIAsMSIL())
+ {
+ // Since the RequireZap() call above returned true, we know that some level of
+ // zap requiredness was configured
+ _ASSERTE(g_pConfig->RequireZaps() != EEConfig::REQUIRE_ZAPS_NONE);
+
+ // If config uses this special value (2), zaps are not required, so long as
+ // we're using an actual NI as IL
+ if ((g_pConfig->RequireZaps() == EEConfig::REQUIRE_ZAPS_ALL_JIT_OK) &&
+ m_pFile->HasOpenedILimage() &&
+ m_pFile->GetOpenedILimage()->HasNativeHeader())
+ {
+ return FALSE;
+ }
+ }
+#endif // defined(_DEBUG) && defined(FEATURE_TREAT_NI_AS_MSIL_DURING_DIAGNOSTICS)
+
+ // Does this look like a resource-only assembly? We assume an assembly is resource-only
+ // if it contains no TypeDef (other than the <Module> TypeDef) and no MethodDef.
+ // Note that pMD->GetCountWithTokenKind(mdtTypeDef) doesn't count the <Module> type.
+ IMDInternalImportHolder pMD = m_pFile->GetMDImport();
+ if (pMD->GetCountWithTokenKind(mdtTypeDef) == 0 && pMD->GetCountWithTokenKind(mdtMethodDef) == 0)
+ return FALSE;
+
+ DomainAssembly * pDomainAssembly = GetDomainAssembly();
+
+ // If the manifest module does not have an ngen image, the non-manifest
+ // modules cannot either
+ if (m_pFile->IsModule() && !pDomainAssembly->GetFile()->CanUseNativeImage())
+ m_pFile->SetCannotUseNativeImage();
+
+ // Some cases are not supported by design. They can never have a native image.
+ // So ignore such cases
+
+ if (!m_pFile->CanUseNativeImage() &&
+ g_pConfig->RequireZaps() == EEConfig::REQUIRE_ZAPS_SUPPORTED)
+ return FALSE;
+
+ if (IsCompilationProcess())
+ {
+ // Ignore the assembly being ngened.
+
+ bool fileIsBeingNGened = false;
+
+ if (this->GetAppDomain()->IsCompilationDomain())
+ {
+ Assembly * assemblyBeingNGened = this->GetAppDomain()->ToCompilationDomain()->GetTargetAssembly();
+ if (assemblyBeingNGened == NULL || assemblyBeingNGened == pDomainAssembly->GetCurrentAssembly())
+ fileIsBeingNGened = true;
+ }
+ else if (IsSystem())
+ {
+ // mscorlib gets loaded before the CompilationDomain gets created.
+ // However, we may be ngening mscorlib itself
+ fileIsBeingNGened = true;
+ }
+
+ if (fileIsBeingNGened)
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+void DomainFile::CheckZapRequired()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (m_pFile->HasNativeImage() || !IsZapRequired())
+ return;
+
+ // Flush any log messages
+ GetFile()->FlushExternalLog();
+
+ StackSString ss;
+ ss.Printf("ZapRequire: Could not get native image for %s.\n"
+ "Use FusLogVw.exe to check the reason.",
+ GetSimpleName());
+
+#if defined(_DEBUG)
+ // Assert as some test may not check their error codes well. So throwing an
+ // exception may not cause a test failure (as it should).
+ StackScratchBuffer scratch;
+ DbgAssertDialog(__FILE__, __LINE__, (char*)ss.GetUTF8(scratch));
+#endif // defined(_DEBUG)
+
+ COMPlusThrowNonLocalized(kFileNotFoundException, ss.GetUnicode());
+}
+
+// Discarding an ngen image can cause problems. For more coverage,
+// this stress-mode discards ngen images even if not needed.
+
+void DomainFile::ClearNativeImageStress()
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef _DEBUG
+ static ConfigDWORD clearNativeImageStress;
+ DWORD stressPercentage = clearNativeImageStress.val(CLRConfig::INTERNAL_clearNativeImageStress);
+ _ASSERTE(stressPercentage <= 100);
+ if (stressPercentage == 0 || !GetFile()->HasNativeImage())
+ return;
+
+ // Note that discarding a native image can affect dependencies. So its not enough
+ // to only check DomainFile::IsZapRequired() here.
+ if (g_pConfig->RequireZaps() != EEConfig::REQUIRE_ZAPS_NONE)
+ return;
+
+ // Its OK to ClearNativeImage even for a shared assembly, as the current PEFile will
+ // be discarded if we decide to share the assembly. However, we always use the same
+ // PEFile for the system assembly. So discarding the native image in the current
+ // AppDomain will actually affect the system assembly in the shared domain, and other
+ // appdomains may have already committed to using its ngen image.
+ if (GetFile()->IsSystem() && !this->GetAppDomain()->IsDefaultDomain())
+ return;
+
+ if (g_IBCLogger.InstrEnabled())
+ return;
+
+ ULONG hash = HashStringA(GetSimpleName());
+
+ // Hash in the FileLoadLevel so that we make a different decision for every level.
+ FileLoadLevel fileLoadLevel = m_pDomain->GetDomainFileLoadLevel(this);
+ hash ^= ULONG(fileLoadLevel);
+ // We do not discard native images after this level
+ _ASSERTE(fileLoadLevel < FILE_LOAD_VERIFY_NATIVE_IMAGE_DEPENDENCIES);
+
+ // Different app-domains should make different decisions
+ hash ^= HashString(this->GetAppDomain()->GetFriendlyName());
+
+ // Since DbgRandomOnHashAndExe() is not so random under ngen.exe, also
+ // factor in the module being compiled
+ if (this->GetAppDomain()->IsCompilationDomain())
+ {
+ Module * module = this->GetAppDomain()->ToCompilationDomain()->GetTargetModule();
+ // Has the target module been set yet?
+ if (module)
+ hash ^= HashStringA(module->GetSimpleName());
+ }
+
+ if (DbgRandomOnHashAndExe(hash, float(stressPercentage)/100))
+ {
+ GetFile()->SetCannotUseNativeImage();
+ GetFile()->ClearNativeImage();
+ ExternalLog(LL_ERROR, "Rejecting native image for **clearNativeImageStress**");
+ }
+#endif
+}
+
+#endif // FEATURE_PREJIT
+
+void DomainFile::PreLoadLibrary()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+
+ // Check skip verification for loading if required
+ if (!GetFile()->CanLoadLibrary())
+ {
+ DomainAssembly* pDomainAssembly = GetDomainAssembly();
+ if (pDomainAssembly->GetSecurityDescriptor()->IsResolved())
+ {
+ if (Security::CanSkipVerification(pDomainAssembly))
+ GetFile()->SetSkipVerification();
+ }
+ else
+ {
+ AppDomain *pAppDomain = this->GetAppDomain();
+ PEFile *pFile = GetFile();
+ _ASSERTE(pFile != NULL);
+ PEImage *pImage = pFile->GetILimage();
+ _ASSERTE(pImage != NULL);
+ _ASSERTE(!pImage->IsFile());
+ if (pImage->HasV1Metadata())
+ {
+ // In V1 case, try to derive SkipVerification status from parents
+ do
+ {
+ PEAssembly * pAssembly = pFile->GetAssembly();
+ if (pAssembly == NULL)
+ break;
+ pFile = pAssembly->GetCreator();
+ if (pFile != NULL)
+ {
+ pAssembly = pFile->GetAssembly();
+ // Find matching DomainAssembly for the given PEAsssembly
+ // Perf: This does not scale
+ AssemblyIterationFlags flags =
+ (AssemblyIterationFlags) (kIncludeLoaded | kIncludeLoading | kIncludeExecution);
+ AppDomain::AssemblyIterator i = pAppDomain->IterateAssembliesEx(flags);
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+
+ while (i.Next(pDomainAssembly.This()))
+ {
+ if ((pDomainAssembly != NULL) && (pDomainAssembly->GetFile() == pAssembly))
+ {
+ break;
+ }
+ }
+ if (pDomainAssembly != NULL)
+ {
+ if (pDomainAssembly->GetSecurityDescriptor()->IsResolved())
+ {
+ if (Security::CanSkipVerification(pDomainAssembly))
+ {
+ GetFile()->SetSkipVerification();
+ break;
+ }
+ }
+ }
+ else
+ {
+ // Potential Bug: Unable to find DomainAssembly for given PEAssembly
+ // In retail build gracefully exit loop
+ _ASSERTE(pDomainAssembly != NULL);
+ break;
+ }
+ }
+ }
+ while (pFile != NULL);
+ }
+ }
+ }
+} // DomainFile::PreLoadLibrary
+
+// Note that this is the sole loading function which must be called OUTSIDE THE LOCK, since
+// it will potentially involve the OS loader lock.
+void DomainFile::LoadLibrary()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+
+ Thread::LoadingFileHolder holder(GetThread());
+ GetThread()->SetLoadingFile(this);
+ GetFile()->LoadLibrary();
+}
+
+void DomainFile::PostLoadLibrary()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ // Note that GetFile()->LoadLibrary must be called before this OUTSIDE OF THE LOCKS
+ PRECONDITION(GetFile()->CheckLoaded());
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_PREJIT
+ if (GetFile()->HasNativeImage())
+ {
+ InsertIntoDomainFileWithNativeImageList();
+ }
+#endif
+#ifdef PROFILING_SUPPORTED
+ // After this point, it is possible to load types.
+ // We need to notify the profiler now because the profiler may need to inject methods into
+ // the module, and to do so reliably, it must have the chance to do so before
+ // any types are loaded from the module.
+ //
+ // In the past we only allowed injecting types/methods on non-NGEN images so notifying here
+ // worked ok, but for NGEN images this is pretty ugly. Rejitting often occurs in this callback,
+ // but then during fixup the results of LoadedMethodDesc iterator would change and we would
+ // need to re-iterate everything. Aside from Rejit other code often wasn't designed to handle
+ // running before Fixup. A concrete example VS recently hit, calling GetClassLayout using
+ // a MethodTable which doesn't need restore but its parent pointer isn't fixed up yet.
+ // We've already set the rules so that profilers can't modify the member list of types in NGEN images
+ // so it doesn't matter if types are pre-loaded. We only need the guarantee that code for the
+ // loaded types won't execute yet. For NGEN images we deliver the load notification in
+ // FILE_LOAD_DELIVER_EVENTS.
+ if (!GetFile()->HasNativeImage())
+ {
+ if (!IsProfilerNotified())
+ {
+ SetProfilerNotified();
+ GetCurrentModule()->NotifyProfilerLoadFinished(S_OK);
+ }
+ }
+
+#endif
+}
+
+void DomainFile::AddDependencies()
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef FEATURE_PREJIT
+
+#ifdef FEATURE_CORECLR // hardbinding
+ //
+ // CoreCLR hard binds to mscorlib.dll only. No need to track hardbound dependencies.
+ //
+#else
+ // Add hard bindings as unconditional dependencies
+ if (GetFile()->HasNativeImage() && GetCurrentModule()->HasNativeImage() && IsAssembly())
+ {
+ PEImage *pNativeImage = GetFile()->GetPersistentNativeImage();
+ PEImageLayout *pNativeLayout = pNativeImage->GetLoadedLayout();
+
+ COUNT_T cDependencies;
+ CORCOMPILE_DEPENDENCY *pDependencies = pNativeLayout->GetNativeDependencies(&cDependencies);
+ CORCOMPILE_DEPENDENCY *pDependenciesEnd = pDependencies + cDependencies;
+
+ while (pDependencies < pDependenciesEnd)
+ {
+ if (pDependencies->signNativeImage != INVALID_NGEN_SIGNATURE)
+ {
+
+ //
+ // Load the manifest file for the given name assembly spec.
+ //
+
+ AssemblySpec name;
+ name.InitializeSpec(pDependencies->dwAssemblyRef,
+ pNativeImage->GetNativeMDImport(),
+ GetDomainAssembly());
+
+ DomainAssembly *pDependency = name.LoadDomainAssembly(FILE_LOADED);
+
+ // Right now we only support hard binding to other manifest modules so we don't
+ // need to consider the other module cases
+ Module *pModule = pDependency->GetModule();
+
+ // Add hard binding as an unconditional active dependency
+ STRESS_LOG4(LF_CODESHARING,LL_INFO100,"unconditional dependency %p %p %i %i\n",
+ GetFile(),GetCurrentModule(),GetFile()->HasNativeImage(),GetCurrentModule()->HasNativeImage());
+ if(!pModule->IsSystem())
+ GetCurrentModule()->AddActiveDependency(pModule, TRUE);
+ }
+
+ pDependencies++;
+ }
+ }
+#endif // FEATURE_CORECLR
+
+#endif // FEATURE_PREJIT
+}
+
+void DomainFile::EagerFixups()
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef FEATURE_PREJIT
+ if (IsIntrospectionOnly())
+ return;
+
+ if (GetCurrentModule()->HasNativeImage())
+ {
+ GetCurrentModule()->RunEagerFixups();
+ }
+#ifdef FEATURE_READYTORUN
+ else
+ if (GetCurrentModule()->IsReadyToRun())
+ {
+#ifndef CROSSGEN_COMPILE
+ GetCurrentModule()->RunEagerFixups();
+#endif
+
+ PEImageLayout * pLayout = GetCurrentModule()->GetReadyToRunInfo()->GetImage();
+
+ TADDR base = dac_cast<TADDR>(pLayout->GetBase());
+
+ ExecutionManager::AddCodeRange(base, base + (TADDR)pLayout->GetVirtualSize(),
+ ExecutionManager::GetReadyToRunJitManager(),
+ RangeSection::RANGE_SECTION_READYTORUN,
+ GetCurrentModule() /* (void *)pLayout */);
+ }
+#endif // FEATURE_READYTORUN
+
+#endif // FEATURE_PREJIT
+}
+
+void DomainFile::VtableFixups()
+{
+ WRAPPER_NO_CONTRACT;
+
+#if defined(FEATURE_MIXEDMODE) && !defined(CROSSGEN_COMPILE)
+ if (!GetCurrentModule()->IsResource())
+ GetCurrentModule()->FixupVTables();
+#endif
+}
+
+void DomainFile::FinishLoad()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_PREJIT
+
+ if (m_pFile->HasNativeImage())
+ {
+#ifdef FEATURE_FUSION
+ // <REVISIT_TODO>Because of bug 112034, we may commit to a native image even though
+ // we should not have.</REVISIT_TODO>
+
+// #ifdef _DEBUG
+
+ // Verify that the native image dependencies are still valid
+ // Since we had already committed to using a native image, they cannot
+ // be invalidated
+ VerifyNativeImageDependencies(true);
+ _ASSERTE(m_pFile->HasNativeImage());
+
+ if (!m_pFile->HasNativeImage())
+ {
+ STRESS_LOG1(LF_CODESHARING, LL_FATALERROR, "Incorrectly committed to using native image for %S",
+ m_pFile->GetPath().GetUnicode());
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
+ }
+// #endif
+
+#endif // FEATURE_FUSION
+
+ LOG((LF_ZAP, LL_INFO10, "Using native image %S.\n", m_pFile->GetPersistentNativeImage()->GetPath().GetUnicode()));
+ ExternalLog(LL_INFO10, "Native image successfully used.");
+
+ // Inform metadata that it has been loaded from a native image
+ // (and so there was an opportunity to check for or fix inconsistencies in the original IL metadata)
+ m_pFile->GetMDImport()->SetVerifiedByTrustedSource(TRUE);
+ }
+
+ // Are we absolutely required to use a native image?
+ CheckZapRequired();
+
+#if defined(FEATURE_CORECLR) && defined(FEATURE_COMINTEROP)
+ // If this is a winmd file, ensure that the ngen reference namespace is loadable.
+ // This is necessary as on the phone we don't check ngen image dependencies, and thus we can get in a situation
+ // where a winmd is loaded as a dependency of an ngen image, but the type used to build cross module references
+ // in winmd files isn't loaded.
+ if (GetFile()->AsAssembly()->IsWindowsRuntime() && GetFile()->HasHostAssembly())
+ {
+ IMDInternalImport *pImport = GetFile()->GetPersistentMDImport();
+ LPCSTR szNameSpace;
+ LPCSTR szTypeName;
+ // It does not make sense to pass the file name to recieve fake type name for empty WinMDs, because we would use the name
+ // for binding in next call to BindAssemblySpec which would fail for fake WinRT type name
+ // We will throw/return the error instead and the caller will recognize it and react to it by not creating the ngen image -
+ // see code:Zapper::ComputeDependenciesInCurrentDomain
+ if (SUCCEEDED(::GetFirstWinRTTypeDef(pImport, &szNameSpace, &szTypeName, NULL, NULL)))
+ {
+ // Build assembly spec to describe binding to that WinRT type.
+ AssemblySpec spec;
+ IfFailThrow(spec.Init("WindowsRuntimeAssemblyName, ContentType=WindowsRuntime"));
+ spec.SetWindowsRuntimeType(szNameSpace, szTypeName);
+
+ // Bind to assembly using the CLRPriv binder infrastructure. (All WinRT loads are done through CLRPriv binders
+ ReleaseHolder<IAssemblyName> pAssemblyName;
+ IfFailThrow(spec.CreateFusionName(&pAssemblyName, FALSE, TRUE));
+ ReleaseHolder<ICLRPrivAssembly> pPrivAssembly;
+ IfFailThrow(GetFile()->GetHostAssembly()->BindAssemblyByName(pAssemblyName, &pPrivAssembly));
+
+ // Verify that we found this. If this invariant doesn't hold, then the ngen images that reference this winmd are be invalid.
+ // ALSO, this winmd file is invalid as it doesn't follow spec about how it is distributed.
+ if (GetAppDomain()->FindAssembly(pPrivAssembly) != this)
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ }
+ }
+#endif // defined(FEATURE_CORECLR) && defined(FEATURE_COMINTEROP)
+#endif // FEATURE_PREJIT
+
+ // Flush any log messages
+#ifdef FEATURE_PREJIT
+ GetFile()->FlushExternalLog();
+#endif
+ // Must set this a bit prematurely for the DAC stuff to work
+ m_level = FILE_LOADED;
+
+ // Now the DAC can find this module by enumerating assemblies in a domain.
+ DACNotify::DoModuleLoadNotification(m_pModule);
+
+#if defined(DEBUGGING_SUPPORTED) && !defined(DACCESS_COMPILE)
+ if (IsDebuggerNotified() && (g_pDebugInterface != NULL))
+ {
+ // We already notified dbgapi that this module was loading (via LoadModule()).
+ // Now let the dbgapi know the module has reached FILE_LOADED, so it can do any
+ // processing that needs to wait until this stage (e.g., binding breakpoints in
+ // NGENd generics).
+ g_pDebugInterface->LoadModuleFinished(m_pModule, m_pDomain);
+ }
+#endif // defined(DEBUGGING_SUPPORTED) && !defined(DACCESS_COMPILE)
+
+ // Set a bit to indicate that the module has been loaded in some domain, and therefore
+ // typeloads can involve types from this module. (Used for candidate instantiations.)
+ GetModule()->SetIsReadyForTypeLoad();
+}
+
+void DomainFile::VerifyExecution()
+{
+ CONTRACT_VOID
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(IsLoaded());
+ STANDARD_VM_CHECK;
+ }
+ CONTRACT_END;
+
+ if (GetModule()->IsIntrospectionOnly())
+ {
+ // Throw an exception
+ COMPlusThrow(kInvalidOperationException, IDS_EE_CODEEXECUTION_IN_INTROSPECTIVE_ASSEMBLY);
+ }
+
+ if (GetModule()->GetAssembly()->IsSIMDVectorAssembly() &&
+ !GetModule()->GetAssembly()->GetSecurityDescriptor()->IsFullyTrusted())
+ {
+ COMPlusThrow(kFileLoadException, IDS_EE_SIMD_PARTIAL_TRUST_DISALLOWED);
+ }
+
+ if(GetFile()->PassiveDomainOnly())
+ {
+ // Remove path - location must be hidden for security purposes
+ LPCWSTR path=GetFile()->GetPath();
+ LPCWSTR pStart = wcsrchr(path, '\\');
+ if (pStart != NULL)
+ pStart++;
+ else
+ pStart = path;
+ COMPlusThrow(kInvalidOperationException, IDS_EE_CODEEXECUTION_ASSEMBLY_FOR_PASSIVE_DOMAIN_ONLY,pStart);
+ }
+
+ RETURN;
+}
+
+void DomainFile::Activate()
+{
+ CONTRACT_VOID
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(IsLoaded());
+ STANDARD_VM_CHECK;
+ }
+ CONTRACT_END;
+
+ // If we are a module, ensure we've activated the assembly first.
+
+ if (!IsAssembly())
+ {
+ GetDomainAssembly()->EnsureActive();
+ }
+ else
+ {
+ // We cannot execute any code in this assembly until we know what exception plan it is on.
+ // At the point of an exception's stack-crawl it is too late because we cannot tolerate a GC.
+ // See PossiblyUnwrapThrowable and its callers.
+ _ASSERTE(GetLoadedModule() == GetDomainAssembly()->GetLoadedAssembly()->GetManifestModule());
+ GetLoadedModule()->IsRuntimeWrapExceptions();
+ }
+
+ // Now activate any dependencies.
+ // This will typically cause reentrancy of course.
+
+ if (!IsSingleAppDomain())
+ {
+ // increment the counter (see the comment in Module::AddActiveDependency)
+ GetModule()->IncrementNumberOfActivations();
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ AppDomain *pDomain = this->GetAppDomain();
+ Module::DependencyIterator i = GetCurrentModule()->IterateActiveDependencies();
+ STRESS_LOG2(LF_LOADER, LL_INFO100,"Activating module %p in AD %i",GetCurrentModule(),pDomain->GetId().m_dwId);
+
+ while (i.Next())
+ {
+ Module *pModule = i.GetDependency();
+ DomainFile *pDomainFile = pModule->FindDomainFile(pDomain);
+ if (pDomainFile == NULL)
+ pDomainFile = pDomain->LoadDomainNeutralModuleDependency(pModule, FILE_LOADED);
+
+ STRESS_LOG3(LF_LOADER, LL_INFO100,"Activating dependency %p -> %p, unconditional=%i",GetCurrentModule(),pModule,i.IsUnconditional());
+
+ if (i.IsUnconditional())
+ {
+ // Let any failures propagate
+ pDomainFile->EnsureActive();
+ }
+ else
+ {
+ // Enable triggers if we fail here
+ if (!pDomainFile->TryEnsureActive())
+ GetCurrentModule()->EnableModuleFailureTriggers(pModule, this->GetAppDomain());
+ }
+ STRESS_LOG3(LF_LOADER, LL_INFO100,"Activated dependency %p -> %p, unconditional=%i",GetCurrentModule(),pModule,i.IsUnconditional());
+ }
+#endif
+ }
+
+#ifndef CROSSGEN_COMPILE
+ if (m_pModule->CanExecuteCode())
+ {
+ //
+ // Now call the module constructor. Note that this might cause reentrancy;
+ // this is fine and will be handled by the class cctor mechanism.
+ //
+
+ MethodTable *pMT = m_pModule->GetGlobalMethodTable();
+ if (pMT != NULL)
+ {
+ pMT->CheckRestore();
+ m_bDisableActivationCheck=TRUE;
+ pMT->CheckRunClassInitThrowing();
+ }
+#ifdef FEATURE_CORECLR
+ if (g_pConfig->VerifyModulesOnLoad())
+ {
+ m_pModule->VerifyAllMethods();
+ }
+#endif //FEATURE_CORECLR
+#ifdef _DEBUG
+ if (g_pConfig->ExpandModulesOnLoad())
+ {
+ m_pModule->ExpandAll();
+ }
+#endif //_DEBUG
+ }
+ else
+ {
+ // This exception does not need to be localized as it can only happen in
+ // NGen and PEVerify, and we are not localizing those tools.
+ _ASSERTE(this->GetAppDomain()->IsPassiveDomain());
+ // This assert will fire if we attempt to run non-mscorlib code from within ngen
+ // Current audits of the system indicate that this will never occur, but if it does
+ // the exception below will prevent actual non-mscorlib code execution.
+ _ASSERTE(!this->GetAppDomain()->IsCompilationDomain());
+
+ LPCWSTR message = W("You may be trying to evaluate a permission from an assembly ")
+ W("without FullTrust, or which cannot execute code for other reasons.");
+ COMPlusThrowNonLocalized(kFileLoadException, message);
+ }
+#endif // CROSSGEN_COMPILE
+
+ RETURN;
+}
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+BOOL DomainFile::PropagateActivationInAppDomain(Module *pModuleFrom, Module *pModuleTo, AppDomain* pDomain)
+{
+ CONTRACTL
+ {
+ PRECONDITION(CheckPointer(pModuleFrom));
+ PRECONDITION(CheckPointer(pModuleTo));
+ THROWS; // should only throw transient failures
+ DISABLED(GC_TRIGGERS);
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_MULTICOREJIT
+ // Reset the flag to allow managed code to be called in multicore JIT background thread from this routine
+ ThreadStateNCStackHolder holder(-1, Thread::TSNC_CallingManagedCodeDisabled);
+#endif
+
+ BOOL completed=true;
+ EX_TRY
+ {
+ GCX_COOP();
+
+ ENTER_DOMAIN_PTR(pDomain,ADV_ITERATOR); //iterator
+ DomainFile *pDomainFileFrom = pModuleFrom->FindDomainFile(pDomain);
+ if (pDomainFileFrom != NULL && pDomain->IsLoading(pDomainFileFrom, FILE_ACTIVE))
+ {
+ STRESS_LOG3(LF_LOADER, LL_INFO100,"Found DomainFile %p for module %p in AppDomain %i\n",pDomainFileFrom,pModuleFrom,pDomain->GetId().m_dwId);
+ DomainFile *pDomainFileTo = pModuleTo->FindDomainFile(pDomain);
+ if (pDomainFileTo == NULL)
+ pDomainFileTo = pDomain->LoadDomainNeutralModuleDependency(pModuleTo, FILE_LOADED);
+
+ if (!pDomainFileTo->TryEnsureActive())
+ pModuleFrom->EnableModuleFailureTriggers(pModuleTo, pDomain);
+ else if (!pDomainFileTo->IsActive())
+ {
+ // We are in a reentrant case
+ completed = FALSE;
+ }
+ }
+ END_DOMAIN_TRANSITION;
+ }
+ EX_CATCH
+ {
+ if (!IsExceptionOfType(kAppDomainUnloadedException, GET_EXCEPTION()))
+ EX_RETHROW;
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+ return completed;
+}
+#endif
+
+// Returns TRUE if activation is completed for all app domains
+// static
+BOOL DomainFile::PropagateNewActivation(Module *pModuleFrom, Module *pModuleTo)
+{
+ CONTRACTL
+ {
+ PRECONDITION(CheckPointer(pModuleFrom));
+ PRECONDITION(CheckPointer(pModuleTo));
+ THROWS; // should only throw transient failures
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ BOOL completed = TRUE;
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ if (pModuleFrom->GetAssembly()->IsDomainNeutral())
+ {
+ AppDomainIterator ai(TRUE);
+ Thread *pThread = GetThread();
+
+ while (ai.Next())
+ {
+ STRESS_LOG3(LF_LOADER, LL_INFO100,"Attempting to propagate domain-neutral conditional module dependency %p -> %p to AppDomain %i\n",pModuleFrom,pModuleTo,ai.GetDomain()->GetId().m_dwId);
+ // This is to minimize the chances of trying to run code in an appdomain that's shutting down.
+ if (ai.GetDomain()->CanThreadEnter(pThread))
+ {
+ completed &= PropagateActivationInAppDomain(pModuleFrom,pModuleTo,ai.GetDomain());
+ }
+ }
+ }
+ else
+#endif
+ {
+ AppDomain *pDomain = pModuleFrom->GetDomain()->AsAppDomain();
+ DomainFile *pDomainFileFrom = pModuleFrom->GetDomainFile(pDomain);
+ if (pDomain->IsLoading(pDomainFileFrom, FILE_ACTIVE))
+ {
+ // The dependency should already be loaded
+ DomainFile *pDomainFileTo = pModuleTo->GetDomainFile(pDomain);
+ if (!pDomainFileTo->TryEnsureActive())
+ pModuleFrom->EnableModuleFailureTriggers(pModuleTo, pDomain);
+ else if (!pDomainFileTo->IsActive())
+ {
+ // Reentrant case
+ completed = FALSE;
+ }
+ }
+ }
+
+ return completed;
+}
+
+// Checks that module has not been activated in any domain
+CHECK DomainFile::CheckUnactivatedInAllDomains(Module *pModule)
+{
+ CONTRACTL
+ {
+ PRECONDITION(CheckPointer(pModule));
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (pModule->GetAssembly()->IsDomainNeutral())
+ {
+ AppDomainIterator ai(TRUE);
+
+ while (ai.Next())
+ {
+ AppDomain *pDomain = ai.GetDomain();
+ DomainFile *pDomainFile = pModule->FindDomainFile(pDomain);
+ if (pDomainFile != NULL)
+ CHECK(!pDomainFile->IsActive());
+ }
+ }
+ else
+ {
+ DomainFile *pDomainFile = pModule->FindDomainFile(pModule->GetDomain()->AsAppDomain());
+ if (pDomainFile != NULL)
+ CHECK(!pDomainFile->IsActive());
+ }
+
+ CHECK_OK;
+}
+
+#ifdef FEATURE_PREJIT
+DomainFile *DomainFile::FindNextDomainFileWithNativeImage()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pNextDomainFileWithNativeImage;
+}
+
+void DomainFile::InsertIntoDomainFileWithNativeImageList()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ while (true)
+ {
+ DomainFile *pLastDomainFileFoundWithNativeImage = m_pDomain->m_pDomainFileWithNativeImageList;
+ m_pNextDomainFileWithNativeImage = pLastDomainFileFoundWithNativeImage;
+ if (pLastDomainFileFoundWithNativeImage == InterlockedCompareExchangeT(&m_pDomain->m_pDomainFileWithNativeImageList, this, pLastDomainFileFoundWithNativeImage))
+ break;
+ }
+}
+#endif
+
+//--------------------------------------------------------------------------------
+// DomainAssembly
+//--------------------------------------------------------------------------------
+
+DomainAssembly::DomainAssembly(AppDomain *pDomain, PEFile *pFile, AssemblyLoadSecurity *pLoadSecurity, LoaderAllocator *pLoaderAllocator)
+ : DomainFile(pDomain, pFile),
+ m_pAssembly(NULL),
+ m_debuggerFlags(DACF_NONE),
+#ifdef FEATURE_FUSION
+ m_pAssemblyBindingClosure(NULL),
+#endif
+ m_MissingDependenciesCheckStatus(CMD_Unknown),
+ m_fSkipPolicyResolution(pLoadSecurity != NULL && !pLoadSecurity->ShouldResolvePolicy()),
+ m_fDebuggerUnloadStarted(FALSE),
+ m_fCollectible(pLoaderAllocator->IsCollectible()),
+ m_fHostAssemblyPublished(false),
+ m_fCalculatedShouldLoadDomainNeutral(false),
+ m_fShouldLoadDomainNeutral(false)
+{
+ CONTRACTL
+ {
+ CONSTRUCTOR_CHECK;
+ STANDARD_VM_CHECK;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ pFile->ValidateForExecution();
+
+#ifndef CROSSGEN_COMPILE
+ if (m_fCollectible)
+ {
+ ((AssemblyLoaderAllocator *)pLoaderAllocator)->SetDomainAssembly(this);
+ }
+#endif
+
+ // !!! backout
+
+ m_hExposedAssemblyObject = NULL;
+
+ NewHolder<IAssemblySecurityDescriptor> pSecurityDescriptorHolder(Security::CreateAssemblySecurityDescriptor(pDomain, this, pLoaderAllocator));
+
+ if (pLoadSecurity != NULL)
+ {
+#ifdef FEATURE_CAS_POLICY
+ // If this assembly had a file name specified, we aren't allowed to load from remote sources and we
+ // aren't in CAS policy mode (which sandboxes remote assemblies automatically), then we need to do a
+ // check on this assembly's zone of origin when creating it.
+ if (pLoadSecurity->m_fCheckLoadFromRemoteSource &&
+ !pLoadSecurity->m_fSuppressSecurityChecks &&
+ !m_pDomain->GetSecurityDescriptor()->AllowsLoadsFromRemoteSources() &&
+ !pFile->IsIntrospectionOnly())
+ {
+ SString strCodeBase;
+ BYTE pbUniqueID[MAX_SIZE_SECURITY_ID];
+ DWORD cbUniqueID = COUNTOF(pbUniqueID);
+ SecZone dwZone = NoZone;
+
+ GetSecurityIdentity(strCodeBase,
+ &dwZone,
+ 0,
+ pbUniqueID,
+ &cbUniqueID);
+
+ // Since loads from remote sources are not enabled for this assembly, we only want to allow the
+ // load if any of the following conditions apply:
+ //
+ // * The load is coming off the local machine
+ // * The load is coming from the intranet or a trusted site, and the code base is UNC. (ie,
+ // don't allow HTTP loads off the local intranet
+
+ bool safeLoad = false;
+ if (dwZone == LocalMachine)
+ {
+ safeLoad = true;
+ }
+ else if (dwZone == Intranet || dwZone == Trusted)
+ {
+ if (UrlIsFileUrl(strCodeBase.GetUnicode()))
+ {
+ safeLoad = true;
+ }
+ else if (PathIsUNC(strCodeBase.GetUnicode()))
+ {
+ safeLoad = true;
+ }
+ }
+
+ if (!safeLoad)
+ {
+ // We've tried to load an assembly from a location where it would have been sandboxed in legacy
+ // CAS situations, but the application hasn't indicated that this is a safe thing to do. In
+ // order to prevent accidental security holes by silently loading assemblies in full trust that
+ // an application expected to be sandboxed, we'll throw an exception instead.
+ //
+ // Since this exception can commonly occur with if the file is physically located on the
+ // hard drive, but has the mark of the web on it we'll also try to detect this mark and
+ // provide a customized error message if we find it. We do that by re-evaluating the
+ // assembly's zone with the NOSAVEDFILECHECK flag, which ignores the mark of the web, and if
+ // that comes back as MyComputer we flag the assembly as having the mark of the web on it.
+ SecZone dwNoMotwZone = NoZone;
+ GetSecurityIdentity(strCodeBase, &dwNoMotwZone, MUTZ_NOSAVEDFILECHECK, pbUniqueID, &cbUniqueID);
+
+ if (dwNoMotwZone == LocalMachine)
+ {
+ COMPlusThrow(kNotSupportedException, IDS_E_LOADFROM_REMOTE_SOURCE_MOTW);
+ }
+ else
+ {
+ COMPlusThrow(kNotSupportedException, IDS_E_LOADFROM_REMOTE_SOURCE);
+ }
+ }
+ }
+#endif // FEATURE_CAS_POLICY
+
+ if (GetFile()->IsSourceGAC())
+ {
+ // Assemblies in the GAC are not allowed to
+ // specify additional evidence. They must always follow default machine policy rules.
+
+ // So, we just ignore the evidence. (Ideally we would throw an error, but it would introduce app
+ // compat issues.)
+ }
+ else
+ {
+#ifdef FEATURE_FUSION
+ // We do not support sharing behavior of ALWAYS when using evidence to load assemblies
+ if (pDomain->GetSharePolicy() == AppDomain::SHARE_POLICY_ALWAYS
+ && ShouldLoadDomainNeutral())
+ {
+ // Just because we have information about the loaded assembly's security doesn't mean that
+ // we're trying to override evidence, make sure we're not just trying to push a grant set
+ if (((pLoadSecurity->m_pEvidence != NULL) && (*pLoadSecurity->m_pEvidence != NULL)) ||
+ ((pLoadSecurity->m_pAdditionalEvidence != NULL) && (*pLoadSecurity->m_pAdditionalEvidence != NULL)))
+ {
+ // We may not be able to reduce sharing policy at this point, if we have already loaded
+ // some non-GAC assemblies as domain neutral. For this case we must regrettably fail
+ // the whole operation.
+ if (!pDomain->ReduceSharePolicyFromAlways())
+ {
+ ThrowHR(COR_E_CANNOT_SPECIFY_EVIDENCE);
+ }
+ }
+ }
+#endif
+ {
+ GCX_COOP();
+
+#ifdef FEATURE_CAS_POLICY
+ if (pLoadSecurity->m_pAdditionalEvidence != NULL)
+ {
+ if(*pLoadSecurity->m_pAdditionalEvidence != NULL)
+ {
+ pSecurityDescriptorHolder->SetAdditionalEvidence(*pLoadSecurity->m_pAdditionalEvidence);
+ }
+ }
+ else if (pLoadSecurity->m_pEvidence != NULL)
+ {
+ if (*pLoadSecurity->m_pEvidence != NULL)
+ {
+ pSecurityDescriptorHolder->SetEvidence(*pLoadSecurity->m_pEvidence);
+ }
+ }
+#endif // FEATURE_CAS_POLICY
+
+ // If the assembly being loaded already knows its grant set (for instnace, it's being pushed
+ // from the loading assembly), then we can set that up now as well
+ if (!pLoadSecurity->ShouldResolvePolicy())
+ {
+ _ASSERTE(pLoadSecurity->m_pGrantSet != NULL);
+
+#ifdef FEATURE_CAS_POLICY
+ // The permissions from an anonymously hosted dynamic method are fulltrust/transparent,
+ // so ensure we have full trust to pass that on to the new assembly
+ if(pLoadSecurity->m_fPropagatingAnonymouslyHostedDynamicMethodGrant &&
+ !CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_Security_DisableAnonymouslyHostedDynamicMethodCreatorSecurityCheck))
+ {
+ Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_FULL_TRUST);
+ }
+#endif // FEATURE_CAS_POLICY
+
+ pSecurityDescriptorHolder->PropagatePermissionSet(
+ *pLoadSecurity->m_pGrantSet,
+ pLoadSecurity->m_pRefusedSet == NULL ? NULL : *pLoadSecurity->m_pRefusedSet,
+ pLoadSecurity->m_dwSpecialFlags);
+ }
+ }
+ }
+ }
+
+ SetupDebuggingConfig();
+
+ // Add a Module iterator entry for this assembly.
+ IfFailThrow(m_Modules.Append(this));
+
+ m_pSecurityDescriptor = pSecurityDescriptorHolder.Extract();
+}
+
+DomainAssembly::~DomainAssembly()
+{
+ CONTRACTL
+ {
+ DESTRUCTOR_CHECK;
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ #ifdef FEATURE_HOSTED_BINDER
+ if (m_fHostAssemblyPublished)
+ {
+ // Remove association first.
+ GetAppDomain()->UnPublishHostedAssembly(this);
+ }
+ #endif
+
+ ModuleIterator i = IterateModules(kModIterIncludeLoading);
+ while (i.Next())
+ {
+ if (i.GetDomainFile() != this)
+ delete i.GetDomainFile();
+ }
+
+ if (m_pAssembly != NULL && !m_pAssembly->IsDomainNeutral())
+ {
+ delete m_pAssembly;
+ }
+
+ delete m_pSecurityDescriptor;
+}
+
+void DomainAssembly::ReleaseFiles()
+{
+ STANDARD_VM_CONTRACT;
+
+ if(m_pAssembly)
+ m_pAssembly->StartUnload();
+#ifdef FEATURE_FUSION
+ // release the old closure from the holder
+ m_pAssemblyBindingClosure=NULL;
+#endif
+ ModuleIterator i = IterateModules(kModIterIncludeLoading);
+ while (i.Next())
+ {
+ if (i.GetDomainFile() != this)
+ i.GetDomainFile()->ReleaseFiles();
+ }
+
+ DomainFile::ReleaseFiles();
+}
+
+void DomainAssembly::SetAssembly(Assembly* pAssembly)
+{
+ STANDARD_VM_CONTRACT;
+
+ UpdatePEFile(pAssembly->GetManifestFile());
+ _ASSERTE(pAssembly->GetManifestModule()->GetFile()==m_pFile);
+ m_pAssembly = pAssembly;
+ m_pModule = pAssembly->GetManifestModule();
+
+ pAssembly->SetDomainAssembly(this);
+}
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+void DomainAssembly::AddModule(DomainModule *pModule)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ DWORD index = RidFromToken(pModule->GetToken());
+
+ while (index >= m_Modules.GetCount())
+ IfFailThrow(m_Modules.Append(NULL));
+
+ m_Modules.Set(index, pModule);
+}
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+
+#ifndef CROSSGEN_COMPILE
+//---------------------------------------------------------------------------------------
+//
+// Returns managed representation of the assembly (Assembly or AssemblyBuilder).
+// Returns NULL if the managed scout was already collected (see code:LoaderAllocator#AssemblyPhases).
+//
+OBJECTREF DomainAssembly::GetExposedAssemblyObject()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ LoaderAllocator * pLoaderAllocator = GetLoaderAllocator();
+
+ if (!pLoaderAllocator->IsManagedScoutAlive())
+ { // We already collected the managed scout, so we cannot re-create any managed objects
+ // Note: This is an optimization, as the managed scout can be collected right after this check
+ return NULL;
+ }
+
+ if (m_hExposedAssemblyObject == NULL)
+ {
+ // Atomically create a handle
+
+ LOADERHANDLE handle = pLoaderAllocator->AllocateHandle(NULL);
+
+ FastInterlockCompareExchangePointer(&m_hExposedAssemblyObject, handle, static_cast<LOADERHANDLE>(NULL));
+ }
+
+ if (pLoaderAllocator->GetHandleValue(m_hExposedAssemblyObject) == NULL)
+ {
+ ASSEMBLYREF assemblyObj = NULL;
+ MethodTable * pMT;
+ if (GetFile()->IsDynamic())
+ {
+ // This is unnecessary because the managed InternalAssemblyBuilder object
+ // should have already been created at the time of DefineDynamicAssembly
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+ pMT = MscorlibBinder::GetClass(CLASS__INTERNAL_ASSEMBLY_BUILDER);
+ }
+ else
+ {
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+ pMT = MscorlibBinder::GetClass(CLASS__ASSEMBLY);
+ }
+
+ // Will be TRUE only if LoaderAllocator managed object was already collected and therefore we should
+ // return NULL
+ BOOL fIsLoaderAllocatorCollected = FALSE;
+
+ // Create the assembly object
+ GCPROTECT_BEGIN(assemblyObj);
+ assemblyObj = (ASSEMBLYREF)AllocateObject(pMT);
+
+ assemblyObj->SetAssembly(this);
+
+ // Attach the reference to the assembly to keep the LoaderAllocator for this collectible type
+ // alive as long as a reference to the assembly is kept alive.
+ // Currently we overload the sync root field of the assembly to do so, but the overload is not necessary.
+ if (GetAssembly() != NULL)
+ {
+ OBJECTREF refLA = GetAssembly()->GetLoaderAllocator()->GetExposedObject();
+ if ((refLA == NULL) && GetAssembly()->GetLoaderAllocator()->IsCollectible())
+ { // The managed LoaderAllocator object was collected
+ fIsLoaderAllocatorCollected = TRUE;
+ }
+ assemblyObj->SetSyncRoot(refLA);
+ }
+
+ if (!fIsLoaderAllocatorCollected)
+ { // We should not expose this value in case the LoaderAllocator managed object was already
+ // collected
+ pLoaderAllocator->CompareExchangeValueInHandle(m_hExposedAssemblyObject, (OBJECTREF)assemblyObj, NULL);
+ }
+ GCPROTECT_END();
+
+ if (fIsLoaderAllocatorCollected)
+ { // The LoaderAllocator managed object was already collected, we cannot re-create it
+ // Note: We did not publish the allocated Assembly/AssmeblyBuilder object, it will get collected
+ // by GC
+ return NULL;
+ }
+ }
+
+ return pLoaderAllocator->GetHandleValue(m_hExposedAssemblyObject);
+} // DomainAssembly::GetExposedAssemblyObject
+#endif // CROSSGEN_COMPILE
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+
+#ifdef FEATURE_FUSION
+// This inner method exists to avoid EX_TRY calling _alloca repeatedly in the for loop below.
+DomainAssembly::CMDI_Result DomainAssembly::CheckMissingDependencyInner(IAssemblyBindingClosure* pClosure, DWORD idx)
+{
+ CONTRACTL {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ SafeComHolder<IAssemblyName> pAssemblyName;
+ HRESULT hrBindFailure = S_OK;
+ HRESULT hr = pClosure->GetNextFailureAssembly(idx, &pAssemblyName, &hrBindFailure);
+ if (hr == HRESULT_FROM_WIN32(ERROR_NO_MORE_ITEMS))
+ {
+ return CMDI_End;
+ }
+
+ IfFailThrow(hr);
+
+ CMDI_Result ret = CMDI_AssemblyResolveFailed;
+ AssemblySpec spec;
+ PEAssemblyHolder result;
+
+ EX_TRY
+ {
+ spec.InitializeSpec(pAssemblyName, this, FALSE);
+ result = this->GetAppDomain()->TryResolveAssembly(&spec,FALSE);
+
+ if (result && result->CanUseWithBindingCache())
+ {
+ this->GetAppDomain()->AddFileToCache(&spec, result);
+ ret = CMDI_AssemblyResolveSucceeded;
+ }
+ else
+ {
+ _ASSERTE(FAILED(hrBindFailure));
+
+ StackSString name;
+ spec.GetFileOrDisplayName(0, name);
+ NewHolder<EEFileLoadException> pEx(new EEFileLoadException(name, hrBindFailure));
+ this->GetAppDomain()->AddExceptionToCache(&spec, pEx);
+ }
+ }
+ EX_CATCH
+ {
+ // For compat reasons, we don't want to throw right now but make sure that we
+ // cache the exception so that it can be thrown if/when we try to load the
+ // further down the road. See VSW 528532 for more details.
+ }
+ EX_END_CATCH(RethrowTransientExceptions);
+
+ return ret;
+}
+
+
+// CheckMissingDependencies returns FALSE if any missing dependency would
+// successfully bind with an AssemblyResolve event. When this is the case, we
+// want to avoid sharing this assembly, since AssemblyResolve events are not
+// under our control, and therefore not predictable.
+CMD_State DomainAssembly::CheckMissingDependencies()
+{
+ CONTRACTL {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ if (MissingDependenciesCheckDone())
+ return m_MissingDependenciesCheckStatus;
+
+ if (this->GetAppDomain()->IsCompilationDomain())
+ {
+ // Compilation domains will never have resolve events. Plus, this path
+ // will sidestep the compilation domain's bind override, which will make
+ // us skip over some dependencies.
+ m_MissingDependenciesCheckStatus = CMD_NotNeeded;
+ return m_MissingDependenciesCheckStatus;
+ }
+
+ if (IsSystem())
+ {
+ m_MissingDependenciesCheckStatus = CMD_NotNeeded;
+ return m_MissingDependenciesCheckStatus;
+ }
+
+ GCX_PREEMP();
+ IAssemblyBindingClosure * pClosure = GetAssemblyBindingClosure(LEVEL_COMPLETE);
+
+ if(pClosure == NULL)
+ {
+ // If the closure is empty, no need to iterate them.
+ m_MissingDependenciesCheckStatus = CMD_NotNeeded;
+ return m_MissingDependenciesCheckStatus;
+ }
+
+ for (DWORD idx = 0;;idx++)
+ {
+ switch (CheckMissingDependencyInner(pClosure, idx))
+ {
+ case CMDI_AssemblyResolveSucceeded:
+ {
+ STRESS_LOG1(LF_CODESHARING,LL_INFO100,"Missing dependencies check FAILED, DomainAssembly=%p",this);
+ m_MissingDependenciesCheckStatus = CMD_Resolved;
+ return m_MissingDependenciesCheckStatus;
+ break;
+ }
+
+ case CMDI_End:
+ {
+ STRESS_LOG1(LF_CODESHARING,LL_INFO100,"Missing dependencies check SUCCESSFUL, DomainAssembly=%p",this);
+ m_MissingDependenciesCheckStatus = CMD_IndeedMissing;
+ return m_MissingDependenciesCheckStatus;
+ break;
+ }
+
+ case CMDI_AssemblyResolveFailed:
+ {
+ // Don't take any action, just continue the loop.
+ break;
+ }
+ }
+ }
+}
+#endif // FEATURE_FUSION
+
+BOOL DomainAssembly::MissingDependenciesCheckDone()
+{
+ return m_MissingDependenciesCheckStatus != CMD_Unknown;
+}
+
+#ifdef FEATURE_CORECLR
+CMD_State DomainAssembly::CheckMissingDependencies()
+{
+ //CoreCLR simply doesn't share if dependencies are missing
+ return CMD_NotNeeded;
+}
+#endif // FEATURE_CORECLR
+
+#endif // FEATURE_LOADER_OPTIMIZATION
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+DomainFile* DomainAssembly::FindModule(PEFile *pFile, BOOL includeLoading)
+{
+ CONTRACT (DomainFile*)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ ModuleIterator i = IterateModules(includeLoading ? kModIterIncludeLoading : kModIterIncludeLoaded);
+ while (i.Next())
+ {
+ if (i.GetDomainFile()->Equals(pFile))
+ RETURN i.GetDomainFile();
+ }
+ RETURN NULL;
+}
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+
+DomainFile* DomainAssembly::FindIJWModule(HMODULE hMod)
+{
+ CONTRACT (DomainFile*)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ ModuleIterator i = IterateModules(kModIterIncludeLoaded);
+ while (i.Next())
+ {
+ PEFile *pFile = i.GetDomainFile()->GetFile();
+
+ if ( !pFile->IsResource()
+ && !pFile->IsDynamic()
+ && !pFile->IsILOnly()
+ && pFile->GetIJWBase() == hMod)
+ {
+ RETURN i.GetDomainFile();
+ }
+ }
+ RETURN NULL;
+}
+
+
+void DomainAssembly::Begin()
+{
+ STANDARD_VM_CONTRACT;
+
+ {
+ AppDomain::LoadLockHolder lock(m_pDomain);
+ m_pDomain->AddAssembly(this);
+ }
+#ifdef FEATURE_HOSTED_BINDER
+ // Make it possible to find this DomainAssembly object from associated ICLRPrivAssembly.
+ GetAppDomain()->PublishHostedAssembly(this);
+ m_fHostAssemblyPublished = true;
+#endif
+}
+
+#ifdef FEATURE_PREJIT
+void DomainAssembly::FindNativeImage()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+
+ // For non-Apollo builds (i.e., when FEATURE_TREAT_NI_AS_MSIL_DURING_DIAGNOSTICS is
+ // NOT defined), this is how we avoid use of NGEN when diagnostics requests it: By
+ // clearing it out and forcing a load of the MSIL assembly. For Apollo builds
+ // (FEATURE_TREAT_NI_AS_MSIL_DURING_DIAGNOSTICS), though, this doesn't work, as we
+ // don't have MSIL assemblies handy (particularly for Fx Assemblies), so we need to
+ // keep the NGENd image loaded, but to treat it as if it were an MSIL assembly. See
+ // code:PEFile::SetNativeImage.
+#ifndef FEATURE_TREAT_NI_AS_MSIL_DURING_DIAGNOSTICS
+ if (!NGENImagesAllowed())
+ {
+ GetFile()->SetCannotUseNativeImage();
+
+ if (GetFile()->HasNativeImage())
+ GetFile()->ClearNativeImage();
+
+ return;
+ }
+#endif // FEATURE_TREAT_NI_AS_MSIL_DURING_DIAGNOSTICS
+
+#ifndef FEATURE_CORECLR // hardbinding
+ // The IsSafeToHardBindTo() check is only for use during the ngen compilation phase. It discards ngen images for
+ // assemblies that aren't hard-bound to (as this would cause all such assemblies be loaded eagerly.)
+ if (!IsSystem() && this->GetAppDomain()->IsCompilationDomain() && !GetFile()->IsSafeToHardBindTo())
+ {
+ if (!this->GetAppDomain()->ToCompilationDomain()->IsSafeToHardBindTo(GetFile()))
+ {
+ GetFile()->SetCannotUseNativeImage();
+
+ if (GetFile()->HasNativeImage())
+ GetFile()->ClearNativeImage();
+
+ return;
+ }
+
+ GetFile()->SetSafeToHardBindTo();
+ }
+#endif
+
+#ifdef FEATURE_FUSION
+ DomainAssembly * pDomainAssembly = GetDomainAssembly();
+ if (pDomainAssembly->GetSecurityDescriptor()->HasAdditionalEvidence() ||
+ !(pDomainAssembly->GetFile()->IsContextLoad()
+#ifdef FEATURE_HOSTED_BINDER
+ || pDomainAssembly->GetFile()->HasHostAssembly()
+#endif
+ ))
+ {
+ m_pFile->SetCannotUseNativeImage();
+ }
+#endif //FEATURE_FUSION
+
+ ClearNativeImageStress();
+
+ // We already have an image - we just need to do a few more checks
+
+ if (GetFile()->HasNativeImage())
+ {
+#if defined(_DEBUG) && defined(FEATURE_CORECLR)
+ if (g_pConfig->ForbidZap(GetSimpleName()))
+ {
+ SString sbuf;
+ StackScratchBuffer scratch;
+ sbuf.Printf("COMPLUS_NgenBind_ZapForbid violation: %s.", GetSimpleName());
+ DbgAssertDialog(__FILE__, __LINE__, sbuf.GetUTF8(scratch));
+ }
+#endif
+
+ ReleaseHolder<PEImage> pNativeImage = GetFile()->GetNativeImageWithRef();
+
+ if(!IsSystem() && !SystemDomain::System()->SystemFile()->HasNativeImage() && !CLRConfig::GetConfigValue(CLRConfig::INTERNAL_NgenAllowMscorlibSoftbind))
+ {
+ m_dwReasonForRejectingNativeImage = ReasonForRejectingNativeImage_MscorlibNotNative;
+ STRESS_LOG2(LF_ZAP,LL_INFO100,"Rejecting native file %p, because mscolib has not NI - reason 0x%x\n",pNativeImage.GetValue(),m_dwReasonForRejectingNativeImage);
+ ExternalLog(LL_ERROR, "Rejecting native image because mscorlib does not have native image");
+#ifdef FEATURE_FUSION
+ if(GetFile())
+ GetFile()->ETWTraceLogMessage(ETW::BinderLog::BinderStructs::NGEN_BIND_SYSTEM_ASSEMBLY_NATIVEIMAGE_NOT_AVAILABLE, NULL);
+#endif
+ GetFile()->ClearNativeImage();
+
+#ifdef FEATURE_WINDOWSPHONE
+ // On Phone, always through exceptions when we throw the NI out
+ ThrowHR(CLR_E_BIND_SYS_ASM_NI_MISSING);
+#endif
+ }
+ else
+ if (!CheckZapSecurity(pNativeImage))
+ {
+ m_dwReasonForRejectingNativeImage = ReasonForRejectingNativeImage_FailedSecurityCheck;
+ STRESS_LOG2(LF_ZAP,LL_INFO100,"Rejecting native file %p, because security check failed - reason 0x%x\n",pNativeImage.GetValue(),m_dwReasonForRejectingNativeImage);
+ ExternalLog(LL_ERROR, "Rejecting native image because it failed the security check. "
+ "The assembly's permissions must have changed since the time it was ngenned, "
+ "or it is running with a different security context.");
+
+#ifdef FEATURE_FUSION
+ if(GetFile())
+ GetFile()->ETWTraceLogMessage(ETW::BinderLog::BinderStructs::NGEN_BIND_ASSEMBLY_HAS_DIFFERENT_GRANT, NULL);
+#endif
+ GetFile()->ClearNativeImage();
+
+#ifdef FEATURE_WINDOWSPHONE
+ // On Phone, always through exceptions when we throw the NI out
+ ThrowHR(CLR_E_BIND_NI_SECURITY_FAILURE);
+#endif
+
+ }
+ else if (!CheckZapDependencyIdentities(pNativeImage))
+ {
+ m_dwReasonForRejectingNativeImage = ReasonForRejectingNativeImage_DependencyIdentityMismatch;
+ STRESS_LOG2(LF_ZAP,LL_INFO100,"Rejecting native file %p, because dependency identity mismatch - reason 0x%x\n",pNativeImage.GetValue(),m_dwReasonForRejectingNativeImage);
+ ExternalLog(LL_ERROR, "Rejecting native image because of identity mismatch "
+ "with one or more of its assembly dependencies. The assembly needs "
+ "to be ngenned again");
+
+#ifdef FEATURE_FUSION
+ if(GetFile())
+ GetFile()->ETWTraceLogMessage(ETW::BinderLog::BinderStructs::NGEN_BIND_DEPENDENCY_HAS_DIFFERENT_IDENTITY, NULL);
+#endif
+ GetFile()->ClearNativeImage();
+
+#ifdef FEATURE_WINDOWSPHONE
+ // On Phone, always through exceptions when we throw the NI out
+ ThrowHR(CLR_E_BIND_NI_DEP_IDENTITY_MISMATCH);
+#endif
+
+ }
+ else
+ {
+ // We can only use a native image for a single Module. If this is a domain-bound
+ // load, we know that this means only a single load will use this image, so we can just
+ // flag it as in use.
+
+ // If on the other hand, we are going to be domain neutral, we may have many loads use
+ // the same native image. Still, we only want to allow the native image to be used
+ // by loads which are going to end up with the same Module. So, we have to effectively
+ // eagerly compute whether that will be the case eagerly, now. To enable this computation,
+ // we store the binding closure in the image.
+
+ Module * pNativeModule = pNativeImage->GetLoadedLayout()->GetPersistedModuleImage();
+ EnsureWritablePages(pNativeModule);
+ PEFile ** ppNativeFile = (PEFile **) (PBYTE(pNativeModule) + Module::GetFileOffset());
+ BOOL bExpectedToBeShared= ShouldLoadDomainNeutral();
+ if (!bExpectedToBeShared)
+ {
+ GetFile()->SetNativeImageUsedExclusively();
+ }
+#ifdef FEATURE_FUSION
+ else
+ {
+ if (!IsSystem())
+ {
+ GetFile()->SetNativeImageClosure(GetAssemblyBindingClosure(LEVEL_STARTING));
+ }
+ }
+#endif //FEATURE_FUSION
+
+ PEAssembly * pFile = (PEAssembly *)FastInterlockCompareExchangePointer((void **)ppNativeFile, (void *)GetFile(), (void *)NULL);
+ STRESS_LOG3(LF_ZAP,LL_INFO100,"Attempted to set new native file %p, old file was %p, location in the image=%p\n",GetFile(),pFile,ppNativeFile);
+ if (pFile!=NULL && !IsSystem() &&
+
+ ( !bExpectedToBeShared ||
+ pFile == PEFile::Dummy() ||
+ pFile->IsNativeImageUsedExclusively() ||
+#ifdef FEATURE_FUSION
+ !pFile->HasEqualNativeClosure(this) ||
+#endif //FEATURE_FUSION
+ !(GetFile()->GetPath().Equals(pFile->GetPath())))
+
+ )
+ {
+ // The non-shareable native image has already been used in this process by another Module.
+ // We have to abandon the native image. (Note that it isn't enough to
+ // just abandon the preload image, since the code in the file will
+ // reference the image directly).
+ m_dwReasonForRejectingNativeImage = ReasonForRejectingNativeImage_CannotShareNiAssemblyNotDomainNeutral;
+ STRESS_LOG3(LF_ZAP,LL_INFO100,"Rejecting native file %p, because it is already used by file %p - reason 0x%x\n",GetFile(),pFile,m_dwReasonForRejectingNativeImage);
+
+ ExternalLog(LL_WARNING, "ZAP: An ngen image of an assembly which "
+ "is not loaded as domain-neutral cannot be used in multiple appdomains "
+ "- abandoning ngen image. The assembly will be JIT-compiled in "
+ "the second appdomain. See System.LoaderOptimization.MultiDomain "
+ "for information about domain-neutral loading.");
+#ifdef FEATURE_FUSION
+ if(GetFile())
+ GetFile()->ETWTraceLogMessage(ETW::BinderLog::BinderStructs::NGEN_BIND_ASSEMBLY_NOT_DOMAIN_NEUTRAL, NULL);
+#endif
+ GetFile()->ClearNativeImage();
+
+ // We only support a (non-shared) native image to be used from a single
+ // AppDomain. Its not obvious if this is an implementation restriction,
+ // or if this should fail DomainFile::CheckZapRequired().
+ // We err on the side of conservativeness, so that multi-domain tests
+ // do not blow up in CheckZapRequired()
+ GetFile()->SetCannotUseNativeImage();
+ }
+ else
+ {
+ //If we are the first and others can reuse us, we cannot go away
+ if ((pFile == NULL) && (!GetFile()->IsNativeImageUsedExclusively()))
+ GetFile()->AddRef();
+
+ LOG((LF_ZAP, LL_INFO100, "ZAP: Found a candidate native image for %s\n", GetSimpleName()));
+ }
+ }
+ }
+
+#if defined(FEATURE_CORECLR)
+ if (!GetFile()->HasNativeImage())
+ {
+ //
+ // Verify that the IL image is consistent with the NGen images loaded into appdomain
+ //
+
+ AssemblySpec spec;
+ spec.InitializeSpec(GetFile());
+
+ GUID mvid;
+ GetFile()->GetMVID(&mvid);
+
+ GetAppDomain()->CheckForMismatchedNativeImages(&spec, &mvid);
+ }
+#endif
+
+ CheckZapRequired();
+}
+#endif // FEATURE_PREJIT
+
+BOOL DomainAssembly::ShouldLoadDomainNeutral()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (m_fCalculatedShouldLoadDomainNeutral)
+ return m_fShouldLoadDomainNeutral;
+
+ m_fShouldLoadDomainNeutral = !!ShouldLoadDomainNeutralHelper();
+ m_fCalculatedShouldLoadDomainNeutral = true;
+
+ return m_fShouldLoadDomainNeutral;
+}
+
+BOOL DomainAssembly::ShouldLoadDomainNeutralHelper()
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+
+#ifndef FEATURE_CORECLR
+
+ BOOL fIsShareableHostAssembly = FALSE;
+ if (GetFile()->HasHostAssembly())
+ {
+ IfFailThrow(GetFile()->GetHostAssembly()->IsShareable(&fIsShareableHostAssembly));
+ }
+
+#ifdef FEATURE_FUSION
+ // Only use domain neutral code for normal assembly loads
+ if ((GetFile()->GetFusionAssembly() == NULL) && !fIsShareableHostAssembly)
+ {
+ return FALSE;
+ }
+#endif
+
+#ifdef FEATURE_REFLECTION_ONLY_LOAD
+ // Introspection only does not use domain neutral code
+ if (IsIntrospectionOnly())
+ return FALSE;
+#endif
+
+#ifdef FEATURE_FUSION
+ // use domain neutral code only for Load context, as the
+ // required eager binding interferes with LoadFrom binding semantics
+ if (!GetFile()->IsContextLoad() && !fIsShareableHostAssembly)
+ return FALSE;
+#endif
+
+ // Check app domain policy...
+ if (this->GetAppDomain()->ApplySharePolicy(this))
+ {
+ if (IsSystem())
+ return TRUE;
+
+ // if not the default AD, ensure that the closure is filled in
+ if (this->GetAppDomain() != SystemDomain::System()->DefaultDomain())
+ GetAssemblyBindingClosure(LEVEL_COMPLETE);
+
+
+ // Can be domain neutral only if we aren't binding any missing dependencies with
+ // the assembly resolve event
+ if ((this->GetAppDomain() != SystemDomain::System()->DefaultDomain()) &&
+ (CheckMissingDependencies() == CMD_Resolved))
+ {
+ return FALSE;
+ }
+
+ // Ensure that all security conditions are met for code sharing
+ if (!Security::CanShareAssembly(this))
+ {
+ return FALSE;
+ }
+
+ return TRUE;
+ }
+ return FALSE;
+
+#else // FEATURE_CORECLR
+
+ if (IsSystem())
+ return TRUE;
+
+ if (IsSingleAppDomain())
+ return FALSE;
+
+ if (GetFile()->IsDynamic())
+ return FALSE;
+
+#ifdef FEATURE_COMINTEROP
+ if (GetFile()->IsWindowsRuntime())
+ return FALSE;
+#endif
+
+ switch(this->GetAppDomain()->GetSharePolicy()) {
+ case AppDomain::SHARE_POLICY_ALWAYS:
+ return TRUE;
+
+ case AppDomain::SHARE_POLICY_GAC:
+ return IsSystem();
+
+ case AppDomain::SHARE_POLICY_NEVER:
+ return FALSE;
+ }
+
+ return FALSE; // No meaning in doing costly closure walk for CoreCLR.
+
+#endif // FEATURE_CORECLR
+
+#else // FEATURE_LOADER_OPTIMIZATION
+ return IsSystem();
+#endif // FEATURE_LOADER_OPTIMIZATION
+}
+
+BOOL DomainAssembly::ShouldSkipPolicyResolution()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fSkipPolicyResolution;
+}
+
+
+#if defined(FEATURE_LOADER_OPTIMIZATION) && defined(FEATURE_FUSION)
+//
+// Returns TRUE if the attempt to steal ownership of the native image succeeded, or if there are other
+// reasons for retrying load of the native image in the current appdomain.
+//
+// Returns FALSE if the native image should be rejected in the current appdomain.
+//
+static BOOL TryToStealSharedNativeImageOwnership(PEFile ** ppNativeImage, PEFile * pNativeFile, PEFile * pFile)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (pNativeFile == PEFile::Dummy())
+ {
+ // Nothing to steal anymore. Loading of the native image failed elsewhere.
+ return FALSE;
+ }
+
+ _ASSERTE(!pNativeFile->IsNativeImageUsedExclusively());
+ _ASSERTE(!pFile->IsNativeImageUsedExclusively());
+
+ SharedDomain * pSharedDomain = SharedDomain::GetDomain();
+
+ // Take the lock so that nobody steals or creates Assembly object for this native image while we are stealing it
+ SharedFileLockHolder pNativeFileLock(pSharedDomain, pNativeFile, TRUE);
+
+ if (pNativeFile != VolatileLoad(ppNativeImage))
+ {
+ // The ownership changed before we got a chance. Retry.
+ return TRUE;
+ }
+
+ SharedAssemblyLocator locator(pNativeFile->AsAssembly(), SharedAssemblyLocator::PEASSEMBLYEXACT);
+ if (pSharedDomain->FindShareableAssembly(&locator))
+ {
+ // Another shared assembly (with different binding closure) uses this image, therefore we cannot use it
+ return FALSE;
+ }
+
+ BOOL success = InterlockedCompareExchangeT(ppNativeImage, pFile, pNativeFile) == pNativeFile;
+
+ // If others can reuse us, we cannot go away
+ if (success)
+ pFile->AddRef();
+
+ STRESS_LOG3(LF_ZAP,LL_INFO100,"Attempt to steal ownership from native file %p by %p success %d\n", pNativeFile, pFile, success);
+
+ return TRUE;
+}
+#endif // FEATURE_LOADER_OPTIMIZATION && FEATURE_FUSION
+
+// This is where the decision whether an assembly is DomainNeutral (shared) nor not is made.
+void DomainAssembly::Allocate()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ STANDARD_VM_CHECK;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // Make sure the security system is happy with this assembly being loaded into the domain
+ GetSecurityDescriptor()->CheckAllowAssemblyLoad();
+
+ AllocMemTracker amTracker;
+ AllocMemTracker * pamTracker = &amTracker;
+
+ Assembly * pAssembly = m_pAssembly;
+
+ if (pAssembly==NULL)
+ {
+ //! If you decide to remove "if" do not remove this brace: order is important here - in the case of an exception,
+ //! the Assembly holder must destruct before the AllocMemTracker declared above.
+
+ NewHolder<Assembly> assemblyHolder(NULL);
+
+ // Determine whether we are supposed to load the assembly as a shared
+ // assembly or into the app domain.
+ if (ShouldLoadDomainNeutral())
+ {
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+
+#ifdef FEATURE_FUSION
+Retry:
+#endif
+
+ // Try to find an existing shared version of the assembly which
+ // is compatible with our domain.
+
+ SharedDomain * pSharedDomain = SharedDomain::GetDomain();
+
+ SIZE_T nInitialShareableAssemblyCount = pSharedDomain->GetShareableAssemblyCount();
+ DWORD dwSwitchCount = 0;
+
+ SharedFileLockHolder pFileLock(pSharedDomain, GetFile(), FALSE);
+
+ if (IsSystem())
+ {
+ pAssembly=SystemDomain::SystemAssembly();
+ }
+ else
+ {
+ SharedAssemblyLocator locator(this);
+ pAssembly = pSharedDomain->FindShareableAssembly(&locator);
+
+ if (pAssembly == NULL)
+ {
+ pFileLock.Acquire();
+ pAssembly = pSharedDomain->FindShareableAssembly(&locator);
+ }
+ }
+
+ if (pAssembly == NULL)
+ {
+#ifdef FEATURE_FUSION
+ // Final verification that we can use the ngen image.
+ //
+ // code:DomainAssembly::FindNativeImage checks the binding closures before declaring the native image as shareable candidate,
+ // but the ultimate decisions about sharing happens inside code:Assembly::CanBeShared called from FindShareableAssembly above.
+ // code:Assembly::CanBeShared checks more conditions than just binding closures. In particular, it also checks whether AssemblyResolve
+ // event resolves any missing dependencies found in the binding closure - the assembly cannot be shared if it is the case.
+ // The end result is that same ngen image can get here in multiple domains in parallel, but it may not be shareable between all of them.
+ //
+ // We reconcile this conflict by checking whether there is somebody else conflicting with us. If it is, we will try to steal
+ // the ownership of the native image from the other guy and retry. The retry logic is required to prevent a perfectly valid
+ // native image being dropped on the floor just because of multiple appdomains raced to load it.
+ {
+ ReleaseHolder<PEImage> pNativeImage = GetFile()->GetNativeImageWithRef();
+ if ((pNativeImage != NULL) && (pNativeImage->GetLoadedLayout() != NULL))
+ {
+ Module * pNativeModule = pNativeImage->GetLoadedLayout()->GetPersistedModuleImage();
+ if (pNativeModule != NULL)
+ {
+ // The owner of the native module was set thread-safe in code:DomainAssembly::FindNativeImage
+ // However the final decision if we can share the native image is done in this function (see usage of code:FindShareableAssembly above)
+ PEFile ** ppNativeFile = (PEFile **) (PBYTE(pNativeModule) + Module::GetFileOffset());
+ PEFile * pNativeFile = VolatileLoad(ppNativeFile);
+ if (pNativeFile != GetFile())
+ {
+ pFileLock.Release();
+
+ // Ensures that multiple threads won't fight with each other indefinitely
+ __SwitchToThread(0, ++dwSwitchCount);
+
+ if (!TryToStealSharedNativeImageOwnership(ppNativeFile, pNativeFile, GetFile()))
+ {
+ // If a shared assembly got loaded in the mean time, retry all lookups again
+ if (pSharedDomain->GetShareableAssemblyCount() != nInitialShareableAssemblyCount)
+ goto Retry;
+
+ m_dwReasonForRejectingNativeImage = ReasonForRejectingNativeImage_NiAlreadyUsedInAnotherSharedAssembly;
+ STRESS_LOG3(LF_ZAP,LL_INFO100,"Rejecting native file %p, because it is already used by shared file %p - reason 0x%x\n",GetFile(),pNativeFile,m_dwReasonForRejectingNativeImage);
+ GetFile()->ClearNativeImage();
+ GetFile()->SetCannotUseNativeImage();
+ }
+
+ goto Retry;
+ }
+ }
+ }
+ }
+#endif // FEATURE_FUSION
+
+ // We can now rely on the fact that our MDImport will not change so we can stop refcounting it.
+ GetFile()->MakeMDImportPersistent();
+
+ // Go ahead and create new shared version of the assembly if possible
+ // <TODO> We will need to pass a valid OBJECREF* here in the future when we implement SCU </TODO>
+ assemblyHolder = pAssembly = Assembly::Create(pSharedDomain, GetFile(), GetDebuggerInfoBits(), FALSE, pamTracker, NULL);
+
+ if (MissingDependenciesCheckDone())
+ pAssembly->SetMissingDependenciesCheckDone();
+
+ // Compute the closure assembly dependencies
+ // of the code & layout of given assembly.
+ //
+ // An assembly has direct dependencies listed in its manifest.
+ //
+ // We do not in general also have all of those dependencies' dependencies in the manifest.
+ // After all, we may be only using a small portion of the assembly.
+ //
+ // However, since all dependent assemblies must also be shared (so that
+ // the shared data in this assembly can refer to it), we are in
+ // effect forced to behave as though we do have all of their dependencies.
+ // This is because the resulting shared assembly that we will depend on
+ // DOES have those dependencies, but we won't be able to validly share that
+ // assembly unless we match all of ITS dependencies, too.
+#ifdef FEATURE_FUSION
+ if ((this->GetAppDomain()->GetFusionContext() != NULL) && !IsSystem())
+ {
+ IAssemblyBindingClosure* pClosure = GetAssemblyBindingClosure(LEVEL_STARTING);
+ pAssembly->SetBindingClosure(pClosure);
+ }
+#endif // FEATURE_FUSION
+ // Sets the tenured bit atomically with the hash insert.
+ pSharedDomain->AddShareableAssembly(pAssembly);
+ }
+#else // FEATURE_LOADER_OPTIMIZATION
+ _ASSERTE(IsSystem());
+ if (SystemDomain::SystemAssembly())
+ {
+ pAssembly = SystemDomain::SystemAssembly();
+ }
+ else
+ {
+ // We can now rely on the fact that our MDImport will not change so we can stop refcounting it.
+ GetFile()->MakeMDImportPersistent();
+
+ // <TODO> We will need to pass a valid OBJECTREF* here in the future when we implement SCU </TODO>
+ SharedDomain * pSharedDomain = SharedDomain::GetDomain();
+ assemblyHolder = pAssembly = Assembly::Create(pSharedDomain, GetFile(), GetDebuggerInfoBits(), FALSE, pamTracker, NULL);
+ pAssembly->SetIsTenured();
+ }
+#endif // FEATURE_LOADER_OPTIMIZATION
+ }
+ else
+ {
+ // We can now rely on the fact that our MDImport will not change so we can stop refcounting it.
+ GetFile()->MakeMDImportPersistent();
+
+ // <TODO> We will need to pass a valid OBJECTREF* here in the future when we implement SCU </TODO>
+ assemblyHolder = pAssembly = Assembly::Create(m_pDomain, GetFile(), GetDebuggerInfoBits(), FALSE, pamTracker, NULL);
+ assemblyHolder->SetIsTenured();
+ }
+
+
+ //@todo! This is too early to be calling SuppressRelease. The right place to call it is below after
+ // the CANNOTTHROWCOMPLUSEXCEPTION. Right now, we have to do this to unblock OOM injection testing quickly
+ // as doing the right thing is nontrivial.
+ pamTracker->SuppressRelease();
+ assemblyHolder.SuppressRelease();
+ }
+
+#ifdef FEATURE_COMINTEROP
+ // If we are in an AppX process we should prevent loading of PIA in the AppDomain.
+ // This will ensure that we do not run into any compatibility issues in case a type has both a co-Class and a Winrt Class
+ if (AppX::IsAppXProcess() && pAssembly->IsPIA())
+ {
+ COMPlusThrow(kNotSupportedException, W("NotSupported_PIAInAppxProcess"));
+ }
+#endif
+
+ SetAssembly(pAssembly);
+
+#ifdef FEATURE_PREJIT
+ BOOL fInsertIntoAssemblySpecBindingCache = TRUE;
+
+ // Insert AssemblyDef details into AssemblySpecBindingCache if appropriate
+
+#ifdef FEATURE_FUSION
+ fInsertIntoAssemblySpecBindingCache = GetFile()->GetLoadContext() == LOADCTX_TYPE_DEFAULT;
+#endif
+
+#if defined(FEATURE_HOSTED_BINDER)
+#if defined(FEATURE_APPX_BINDER)
+ fInsertIntoAssemblySpecBindingCache = fInsertIntoAssemblySpecBindingCache && !GetFile()->HasHostAssembly();
+#else
+ fInsertIntoAssemblySpecBindingCache = fInsertIntoAssemblySpecBindingCache && GetFile()->CanUseWithBindingCache();
+#endif
+#endif
+
+ if (fInsertIntoAssemblySpecBindingCache)
+ {
+ AssemblySpec specAssemblyDef;
+ specAssemblyDef.InitializeSpec(GetFile());
+ if (specAssemblyDef.IsStrongNamed() && specAssemblyDef.HasPublicKey())
+ {
+ specAssemblyDef.ConvertPublicKeyToToken();
+ }
+ m_pDomain->AddAssemblyToCache(&specAssemblyDef, this);
+ }
+#endif
+} // DomainAssembly::Allocate
+
+void DomainAssembly::DeliverAsyncEvents()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ OVERRIDE_LOAD_LEVEL_LIMIT(FILE_ACTIVE);
+ m_pDomain->RaiseLoadingAssemblyEvent(this);
+
+}
+
+
+void DomainAssembly::DeliverSyncEvents()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+
+ GetCurrentModule()->NotifyEtwLoadFinished(S_OK);
+
+ // We may be notified from inside the loader lock if we are delivering IJW events, so keep track.
+#ifdef PROFILING_SUPPORTED
+ if (!IsProfilerNotified())
+ {
+ SetProfilerNotified();
+ GetCurrentModule()->NotifyProfilerLoadFinished(S_OK);
+ }
+
+#endif
+#ifdef DEBUGGING_SUPPORTED
+ GCX_COOP();
+ if (!IsDebuggerNotified())
+ {
+ SetShouldNotifyDebugger();
+
+ if (m_pDomain->IsDebuggerAttached())
+ {
+ // If this is the first assembly in the AppDomain, it may be possible to get a better name than the
+ // default.
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+ m_pDomain->m_Assemblies.Get(m_pDomain, 0, pDomainAssembly.This());
+ if ((pDomainAssembly == this) && !m_pDomain->IsUserCreatedDomain())
+ m_pDomain->ResetFriendlyName();
+ }
+
+ // Still work to do even if no debugger is attached.
+ NotifyDebuggerLoad(ATTACH_ASSEMBLY_LOAD, FALSE);
+
+ }
+#endif // DEBUGGING_SUPPORTED
+} // DomainAssembly::DeliverSyncEvents
+
+/*
+ // The enum for dwLocation from managed code:
+ public enum ResourceLocation
+ {
+ Embedded = 1,
+ ContainedInAnotherAssembly = 2,
+ ContainedInManifestFile = 4
+ }
+*/
+
+BOOL DomainAssembly::GetResource(LPCSTR szName, DWORD *cbResource,
+ PBYTE *pbInMemoryResource, DomainAssembly** pAssemblyRef,
+ LPCSTR *szFileName, DWORD *dwLocation,
+ StackCrawlMark *pStackMark, BOOL fSkipSecurityCheck,
+ BOOL fSkipRaiseResolveEvent)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ return GetFile()->GetResource( szName,
+ cbResource,
+ pbInMemoryResource,
+ pAssemblyRef,
+ szFileName,
+ dwLocation,
+ pStackMark,
+ fSkipSecurityCheck,
+ fSkipRaiseResolveEvent,
+ this,
+ this->m_pDomain );
+}
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+BOOL DomainAssembly::GetModuleResource(mdFile mdResFile, LPCSTR szResName,
+ DWORD *cbResource, PBYTE *pbInMemoryResource,
+ LPCSTR *szFileName, DWORD *dwLocation,
+ BOOL fIsPublic, StackCrawlMark *pStackMark,
+ BOOL fSkipSecurityCheck)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ const char *szName;
+ DWORD dwFlags;
+ DomainFile *pModule = NULL;
+ DWORD dwOffset = 0;
+
+ if (! ((TypeFromToken(mdResFile) == mdtFile) &&
+ GetMDImport()->IsValidToken(mdResFile)) )
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT, BFA_INVALID_FILE_TOKEN);
+ }
+
+ IfFailThrow(GetMDImport()->GetFileProps(
+ mdResFile,
+ &szName,
+ NULL,
+ NULL,
+ &dwFlags));
+
+ if (IsFfContainsMetaData(dwFlags))
+ {
+ // The resource is embedded in a manifest-containing file.
+ mdManifestResource mdResource;
+ mdToken mdLinkRef;
+ DWORD dwResourceFlags;
+
+ Module *pContainerModule = GetCurrentModule();
+ // Use the real assembly with a rid map if possible
+ if (pContainerModule != NULL)
+ pModule = pContainerModule->LoadModule(m_pDomain, mdResFile, FALSE);
+ else
+ {
+ PEModuleHolder pFile(GetAssembly()->LoadModule_AddRef(mdResFile, FALSE));
+ pModule = m_pDomain->LoadDomainModule(this, pFile, FILE_LOADED);
+ }
+
+ if (FAILED(pModule->GetMDImport()->FindManifestResourceByName(szResName,
+ &mdResource)))
+ return FALSE;
+
+ IfFailThrow(pModule->GetMDImport()->GetManifestResourceProps(
+ mdResource,
+ NULL, //&szName,
+ &mdLinkRef,
+ &dwOffset,
+ &dwResourceFlags));
+
+ if (mdLinkRef != mdFileNil)
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT, BFA_CANT_GET_LINKREF);
+ }
+ fIsPublic = IsMrPublic(dwResourceFlags);
+ }
+
+#ifndef CROSSGEN_COMPILE
+ if (!fIsPublic && pStackMark && !fSkipSecurityCheck)
+ {
+ Assembly *pCallersAssembly = SystemDomain::GetCallersAssembly(pStackMark);
+ if (pCallersAssembly && // full trust for interop
+ (!pCallersAssembly->GetManifestFile()->Equals(GetFile())))
+ {
+ RefSecContext sCtx(AccessCheckOptions::kMemberAccess);
+
+ AccessCheckOptions accessCheckOptions(
+ AccessCheckOptions::kMemberAccess, /*accessCheckType*/
+ NULL, /*pAccessContext*/
+ FALSE, /*throwIfTargetIsInaccessible*/
+ (MethodTable *) NULL /*pTargetMT*/
+ );
+
+ // SL: return TRUE only if the caller is critical
+ // Desktop: return TRUE only if demanding MemberAccess succeeds
+ if (!accessCheckOptions.DemandMemberAccessOrFail(&sCtx, NULL, TRUE /*visibilityCheck*/))
+ return FALSE;
+ }
+ }
+#endif // CROSSGEN_COMPILE
+
+ if (IsFfContainsMetaData(dwFlags)) {
+ if (dwLocation) {
+ *dwLocation = *dwLocation | 1; // ResourceLocation.embedded
+ *szFileName = szName;
+ return TRUE;
+ }
+
+ pModule->GetFile()->GetEmbeddedResource(dwOffset, cbResource,
+ pbInMemoryResource);
+
+ return TRUE;
+ }
+
+ // The resource is linked (it's in its own file)
+ if (szFileName) {
+ *szFileName = szName;
+ return TRUE;
+ }
+
+ Module *pContainerModule = GetCurrentModule();
+
+ // Use the real assembly with a rid map if possible
+ if (pContainerModule != NULL)
+ pModule = pContainerModule->LoadModule(m_pDomain, mdResFile);
+ else
+ {
+ PEModuleHolder pFile(GetAssembly()->LoadModule_AddRef(mdResFile, TRUE));
+ pModule = m_pDomain->LoadDomainModule(this, pFile, FILE_LOADED);
+ }
+
+ COUNT_T size;
+ const void *contents = pModule->GetFile()->GetManagedFileContents(&size);
+
+ *pbInMemoryResource = (BYTE *) contents;
+ *cbResource = size;
+
+ return TRUE;
+}
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+
+#ifdef FEATURE_PREJIT
+
+// --------------------------------------------------------------------------------
+// Remember the timestamp of the CLR DLLs used to compile the ngen image.
+// These will be checked at runtime by PEFile::CheckNativeImageTimeStamp().
+//
+
+void GetTimeStampsForNativeImage(CORCOMPILE_VERSION_INFO * pNativeVersionInfo)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(::GetAppDomain()->IsCompilationDomain());
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_CORECLR
+ // Do not store runtime timestamps into NGen image for cross-platform NGen determinism
+#else
+ // fill in pRuntimeDllInfo
+ CORCOMPILE_RUNTIME_DLL_INFO *pRuntimeDllInfo = pNativeVersionInfo->runtimeDllInfo;
+
+ for (DWORD index = 0; index < NUM_RUNTIME_DLLS; index++)
+ {
+#ifdef CROSSGEN_COMPILE
+ SString sFileName(SString::Utf8, CorCompileGetRuntimeDllName((CorCompileRuntimeDlls)index));
+
+ PEImageHolder pImage;
+ if (!GetAppDomain()->ToCompilationDomain()->FindImage(sFileName, MDInternalImport_NoCache, &pImage))
+ {
+ EEFileLoadException::Throw(sFileName, COR_E_FILENOTFOUND);
+ }
+
+ PEImageLayoutHolder pLayout(pImage->GetLayout(PEImageLayout::LAYOUT_FLAT,PEImage::LAYOUT_CREATEIFNEEDED));
+ pRuntimeDllInfo[index].timeStamp = pLayout->GetTimeDateStamp();
+ pRuntimeDllInfo[index].virtualSize = pLayout->GetVirtualSize();
+
+#else // CROSSGEN_COMPILE
+
+ HMODULE hMod = CorCompileGetRuntimeDll((CorCompileRuntimeDlls)index);
+
+ if (hMod == NULL)
+ {
+ _ASSERTE((CorCompileRuntimeDlls)index == NGEN_COMPILER_INFO);
+
+ LPCWSTR wszDllName = CorCompileGetRuntimeDllName((CorCompileRuntimeDlls)index);
+ if (FAILED(g_pCLRRuntime->LoadLibrary(wszDllName, &hMod)))
+ {
+ EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, W("Unable to load CLR DLL during ngen"));
+ }
+ }
+
+ _ASSERTE(hMod != NULL);
+
+ PEDecoder pe(hMod);
+
+ pRuntimeDllInfo[index].timeStamp = pe.GetTimeDateStamp();
+ pRuntimeDllInfo[index].virtualSize = pe.GetVirtualSize();
+#endif // CROSSGEN_COMPILE
+
+ }
+#endif // FEATURE_CORECLR
+}
+
+//
+// Which processor should ngen target?
+// This is needed when ngen wants to target for "reach" if the ngen images will be
+// used on other machines (the Operating System or the OEM build lab can do this).
+// It can also be used to reduce the testing matrix
+//
+void GetNGenCpuInfo(CORINFO_CPU * cpuInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef _TARGET_X86_
+
+#ifdef FEATURE_CORECLR
+ static CORINFO_CPU ngenCpuInfo =
+ {
+ (CPU_X86_PENTIUM_PRO << 8), // dwCPUType
+ 0x00000000, // dwFeatures
+ 0 // dwExtendedFeatures
+ };
+
+ // We always generate P3-compatible code on CoreCLR
+ *cpuInfo = ngenCpuInfo;
+#else // FEATURE_CORECLR
+ static CORINFO_CPU ngenCpuInfo =
+ {
+ (CPU_X86_PENTIUM_4 << 8), // dwCPUType
+ 0x00008001, // dwFeatures
+ 0 // dwExtendedFeatures
+ };
+
+#ifndef CROSSGEN_COMPILE
+ GetSpecificCpuInfo(cpuInfo);
+ if (!IsCompatibleCpuInfo(cpuInfo, &ngenCpuInfo))
+ {
+ // Use the actual cpuInfo if the platform is not compatible
+ // with the "recommended" processor. We expect most platforms to be compatible
+ return;
+ }
+#endif
+
+ *cpuInfo = ngenCpuInfo;
+#endif // FEATURE_CORECLR
+
+#else // _TARGET_X86_
+ cpuInfo->dwCPUType = 0;
+ cpuInfo->dwFeatures = 0;
+ cpuInfo->dwExtendedFeatures = 0;
+#endif // _TARGET_X86_
+}
+
+// --------------------------------------------------------------------------------
+
+void DomainAssembly::GetCurrentVersionInfo(CORCOMPILE_VERSION_INFO *pNativeVersionInfo)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+
+ // Clear memory so that we won't write random data into the zapped file
+ ZeroMemory(pNativeVersionInfo, sizeof(CORCOMPILE_VERSION_INFO));
+
+ // Pick up any compilation directives for code flavor
+
+ BOOL fForceDebug, fForceProfiling, fForceInstrument;
+ SystemDomain::GetCompilationOverrides(&fForceDebug,
+ &fForceProfiling,
+ &fForceInstrument);
+
+ OSVERSIONINFOW osInfo;
+ osInfo.dwOSVersionInfoSize = sizeof(osInfo);
+ if (!GetOSVersion(&osInfo))
+ _ASSERTE(!"GetOSVersion failed");
+
+ _ASSERTE(osInfo.dwMajorVersion < 999);
+ _ASSERTE(osInfo.dwMinorVersion < 999);
+ pNativeVersionInfo->wOSPlatformID = (WORD) osInfo.dwPlatformId;
+
+ // The native images should be OS-version agnostic. Do not store the actual OS version for determinism.
+ // pNativeVersionInfo->wOSMajorVersion = (WORD) osInfo.dwMajorVersion;
+ pNativeVersionInfo->wOSMajorVersion = 4;
+
+ pNativeVersionInfo->wMachine = IMAGE_FILE_MACHINE_NATIVE;
+
+ pNativeVersionInfo->wVersionMajor = VER_MAJORVERSION;
+ pNativeVersionInfo->wVersionMinor = VER_MINORVERSION;
+ pNativeVersionInfo->wVersionBuildNumber = VER_PRODUCTBUILD;
+ pNativeVersionInfo->wVersionPrivateBuildNumber = VER_PRODUCTBUILD_QFE;
+
+ GetNGenCpuInfo(&pNativeVersionInfo->cpuInfo);
+
+#if _DEBUG
+ pNativeVersionInfo->wBuild = CORCOMPILE_BUILD_CHECKED;
+#else
+ pNativeVersionInfo->wBuild = CORCOMPILE_BUILD_FREE;
+#endif
+
+#ifdef DEBUGGING_SUPPORTED
+ if (fForceDebug || !CORDebuggerAllowJITOpts(GetDebuggerInfoBits()))
+ {
+ pNativeVersionInfo->wCodegenFlags |= CORCOMPILE_CODEGEN_DEBUGGING;
+ pNativeVersionInfo->wConfigFlags |= CORCOMPILE_CONFIG_DEBUG;
+ }
+ else
+#endif // DEBUGGING_SUPPORTED
+ {
+ pNativeVersionInfo->wConfigFlags |= CORCOMPILE_CONFIG_DEBUG_NONE;
+ }
+
+#if defined (PROFILING_SUPPORTED_DATA) || defined(PROFILING_SUPPORTED)
+ if (fForceProfiling || CORProfilerUseProfileImages())
+ {
+ pNativeVersionInfo->wCodegenFlags |= CORCOMPILE_CODEGEN_PROFILING;
+ pNativeVersionInfo->wConfigFlags |= CORCOMPILE_CONFIG_PROFILING;
+#ifdef DEBUGGING_SUPPORTED
+ // Note that we have hardwired profiling to also imply optimized debugging
+ // info. This cuts down on one permutation of prejit files.
+ pNativeVersionInfo->wCodegenFlags &= ~CORCOMPILE_CODEGEN_DEBUGGING;
+ pNativeVersionInfo->wConfigFlags &= ~(CORCOMPILE_CONFIG_DEBUG|
+ CORCOMPILE_CONFIG_DEBUG_DEFAULT);
+ pNativeVersionInfo->wConfigFlags |= CORCOMPILE_CONFIG_DEBUG_NONE;
+#endif // DEBUGGING_SUPPORTED
+ }
+ else
+#endif // PROFILING_SUPPORTED_DATA || PROFILING_SUPPORTED
+ {
+ pNativeVersionInfo->wConfigFlags |= CORCOMPILE_CONFIG_PROFILING_NONE;
+ }
+
+#ifdef DEBUGGING_SUPPORTED
+
+ // Note the default assembly flags (from the custom attributes & INI file) , so we can
+ // set determine whether or not the current settings
+ // match the "default" setting or not.
+
+ // Note that the INI file settings are considered a part of the
+ // assembly, even though they could theoretically change between
+ // ngen time and runtime. It is just too expensive and awkward to
+ // look up the INI file before binding to the native image at
+ // runtime, so we effectively snapshot it at ngen time.
+
+ DWORD defaultFlags = ComputeDebuggingConfig();
+
+ if (CORDebuggerAllowJITOpts(defaultFlags))
+ {
+ // Default is optimized code
+ if ((pNativeVersionInfo->wCodegenFlags & CORCOMPILE_CODEGEN_DEBUGGING) == 0)
+ pNativeVersionInfo->wConfigFlags |= CORCOMPILE_CONFIG_DEBUG_DEFAULT;
+ }
+ else
+ {
+ // Default is non-optimized debuggable code
+ if ((pNativeVersionInfo->wCodegenFlags & CORCOMPILE_CODEGEN_DEBUGGING) != 0)
+ pNativeVersionInfo->wConfigFlags |= CORCOMPILE_CONFIG_DEBUG_DEFAULT;
+ }
+
+#endif // DEBUGGING_SUPPORTED
+
+ if (fForceInstrument || GetAssembly()->IsInstrumented())
+ {
+ pNativeVersionInfo->wCodegenFlags |= CORCOMPILE_CODEGEN_PROF_INSTRUMENTING;
+ pNativeVersionInfo->wConfigFlags |= CORCOMPILE_CONFIG_INSTRUMENTATION;
+ }
+ else
+ {
+ pNativeVersionInfo->wConfigFlags |= CORCOMPILE_CONFIG_INSTRUMENTATION_NONE;
+ }
+
+ GetTimeStampsForNativeImage(pNativeVersionInfo);
+
+ // Store signature of source assembly.
+ GetOptimizedIdentitySignature(&pNativeVersionInfo->sourceAssembly);
+
+ // signature will is hash of the whole file. It is written by zapper.
+ // IfFailThrow(CoCreateGuid(&pNativeVersionInfo->signature));
+}
+
+void DomainAssembly::GetOptimizedIdentitySignature(CORCOMPILE_ASSEMBLY_SIGNATURE *pSignature)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ //
+ // Write the MVID into the version header.
+ //
+
+ //
+ // If this assembly has skip verification permission, then we store its
+ // mvid. If at load time the assembly still has skip verification
+ // permission, then we can base the matches purely on mvid values and
+ // skip the perf-heavy hashing of the file.
+ //
+
+ //
+ // The reason that we tell IsFullyTrusted to do a quick check
+ // only is because that allows us make a determination for the most
+ // common full trust scenarios (local machine) without actually
+ // resolving policy and bringing in a whole list of assembly
+ // dependencies.
+ //
+ ReleaseHolder<IMDInternalImport> scope (GetFile()->GetMDImportWithRef());
+ IfFailThrow(scope->GetScopeProps(NULL, &pSignature->mvid));
+
+ // Use the NGen image if posssible. IL image does not even have to be present on CoreCLR.
+ if (GetFile()->HasNativeImage())
+ {
+ PEImageHolder pNativeImage(GetFile()->GetNativeImageWithRef());
+
+ CORCOMPILE_VERSION_INFO* pVersionInfo = pNativeImage->GetLoadedLayout()->GetNativeVersionInfo();
+ pSignature->timeStamp = pVersionInfo->sourceAssembly.timeStamp;
+ pSignature->ilImageSize = pVersionInfo->sourceAssembly.ilImageSize;
+
+ return;
+ }
+
+ // Write the time stamp
+ PEImageLayoutHolder ilLayout(GetFile()->GetAnyILWithRef());
+ pSignature->timeStamp = ilLayout->GetTimeDateStamp();
+ pSignature->ilImageSize = ilLayout->GetVirtualSize();
+#ifdef MDIL
+ if (g_fIsNGenEmbedILProcess)
+ {
+ PEImageHolder pILImage(GetFile()->GetILimage());
+ DWORD dwActualILSize;
+ if (pILImage->GetLoadedLayout()->GetILSizeFromMDILCLRCtlData(&dwActualILSize))
+ {
+ // Use actual source IL size instead of MDIL size
+ pSignature->ilImageSize = dwActualILSize;
+ }
+ }
+#endif // MDIL
+}
+
+BOOL DomainAssembly::CheckZapDependencyIdentities(PEImage *pNativeImage)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_CORECLR
+ AssemblySpec spec;
+ spec.InitializeSpec(this->GetFile());
+
+ // The assembly spec should have the binding context associated with it
+ _ASSERTE(spec.GetBindingContext() || spec.IsAssemblySpecForMscorlib());
+
+ CORCOMPILE_VERSION_INFO *pVersionInfo = pNativeImage->GetLoadedLayout()->GetNativeVersionInfo();
+
+ // Check our own assembly first
+ GetAppDomain()->CheckForMismatchedNativeImages(&spec, &pVersionInfo->sourceAssembly.mvid);
+
+ // Check MVID in metadata against MVID in CORCOMPILE_VERSION_INFO - important when metadata is loaded from IL instead of NI
+ ReleaseHolder<IMDInternalImport> pImport(this->GetFile()->GetMDImportWithRef());
+ GUID mvid;
+ IfFailThrow(pImport->GetScopeProps(NULL, &mvid));
+ GetAppDomain()->CheckForMismatchedNativeImages(&spec, &mvid);
+
+ // Now Check dependencies
+ COUNT_T cDependencies;
+ CORCOMPILE_DEPENDENCY *pDependencies = pNativeImage->GetLoadedLayout()->GetNativeDependencies(&cDependencies);
+ CORCOMPILE_DEPENDENCY *pDependenciesEnd = pDependencies + cDependencies;
+
+ while (pDependencies < pDependenciesEnd)
+ {
+ if (pDependencies->dwAssemblyDef != mdAssemblyRefNil)
+ {
+ AssemblySpec name;
+ name.InitializeSpec(pDependencies->dwAssemblyDef, pNativeImage->GetNativeMDImport(), this);
+
+#if defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+ if (!name.IsAssemblySpecForMscorlib())
+ {
+ // We just initialized the assembly spec for the NI dependency. This will not have binding context
+ // associated with it, so set it from that of the parent.
+ _ASSERTE(!name.GetBindingContext());
+ ICLRPrivBinder *pParentAssemblyBindingContext = name.GetBindingContextFromParentAssembly(name.GetAppDomain());
+ _ASSERTE(pParentAssemblyBindingContext);
+ name.SetBindingContext(pParentAssemblyBindingContext);
+ }
+#endif // defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+
+ GetAppDomain()->CheckForMismatchedNativeImages(&name, &pDependencies->signAssemblyDef.mvid);
+ }
+
+ pDependencies++;
+ }
+#endif
+
+ return TRUE;
+}
+
+BOOL DomainAssembly::CheckZapSecurity(PEImage *pNativeImage)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+
+ //
+ // System libraries are a special case, the security info's always OK.
+ //
+
+ if (IsSystem())
+ return TRUE;
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+ //
+ // If we're just loading files as part of PDB generation, we're not executing code,
+ // so no need to do security checks
+ //
+
+ if (IsNgenPDBCompilationProcess())
+ return TRUE;
+#endif
+
+#if defined(FEATURE_CORECLR)
+ // Lets first check whether the assembly is going to receive full trust
+ BOOL fAssemblyIsFullyTrusted = this->GetAppDomain()->IsImageFullyTrusted(pNativeImage);
+
+ // Check if the assembly was ngen as platform
+ Module * pNativeModule = pNativeImage->GetLoadedLayout()->GetPersistedModuleImage();
+ BOOL fImageAndDependenciesAreFullTrust = pNativeModule->m_pModuleSecurityDescriptor->IsMicrosoftPlatform();
+
+ // return true only if image was ngen at the same trust level as the current trust level.
+ // images ngen'd as fulltrust can only be loaded in fulltrust and
+ // non-trusted transparent assembly ngen image can only be loaded in partial trust
+ // ( only tranparent assemblies can be ngen'd as partial trust.....if it has critical code ngen will error out)
+ return (fAssemblyIsFullyTrusted == fImageAndDependenciesAreFullTrust);
+
+#else // FEATURE_CORECLR
+ ETWOnStartup (SecurityCatchCall_V1, SecurityCatchCallEnd_V1);
+
+#ifdef CROSSGEN_COMPILE
+ return TRUE;
+#else
+
+#ifdef FEATURE_APTCA
+ if (!Security::NativeImageHasValidAptcaDependencies(pNativeImage, this))
+ {
+ return FALSE;
+ }
+#endif // !FEATURE_APTCA
+
+ GCX_COOP();
+
+ BOOL fHostProtectionOK = FALSE;
+ BOOL fImageAndDependenciesAreFullTrust = FALSE;
+
+ EX_TRY
+ {
+ // Check the HostProtection settings.
+ EApiCategories eRequestedProtectedCategories = GetHostProtectionManager()->GetProtectedCategories();
+ if (eRequestedProtectedCategories == eNoChecks)
+ fHostProtectionOK = TRUE;
+
+ // Due to native code generated for one IL image being more agressively put into another
+ // assembly's native image, we're disabling partial trust NGEN images. If the current
+ // domain can only have fully trusted assemblies, then we can load this image, or if the current
+ // assembly and its closure are all in the GAC we can also use it. Otherwise, we'll conservatively
+ // disable the use of this image.
+ IApplicationSecurityDescriptor *pAppDomainSecurity = this->GetAppDomain()->GetSecurityDescriptor();
+ if (pAppDomainSecurity->IsFullyTrusted() && pAppDomainSecurity->IsHomogeneous())
+ {
+ // A fully trusted homogenous domain can only have full trust assemblies, therefore this assembly
+ // and all its dependencies must be full trust
+ fImageAndDependenciesAreFullTrust = TRUE;
+ }
+ else if (IsClosedInGAC())
+ {
+ // The domain allows partial trust assemblies to be loaded into it. However, this assembly and
+ // all of its dependencies came from the GAC, so we know that they must all be trusted even if
+ // other code in this domain is not.
+ fImageAndDependenciesAreFullTrust = TRUE;
+ }
+ else
+ {
+ // The domain allows partial trust assemblies and we cannot prove that the closure of
+ // dependencies of this assembly will all be fully trusted. Conservatively throw away this NGEN
+ // image.
+ fImageAndDependenciesAreFullTrust = FALSE;
+ }
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return fHostProtectionOK && fImageAndDependenciesAreFullTrust;
+#endif // CROSSGEN_COMPILE
+
+#endif // FEATURE_CORECLR
+}
+#endif // FEATURE_PREJIT
+
+#ifdef FEATURE_CAS_POLICY
+void DomainAssembly::InitializeSecurityManager()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ GetFile()->InitializeSecurityManager();
+}
+#endif // FEATURE_CAS_POLICY
+
+#ifdef FEATURE_CAS_POLICY
+// Returns security information for the assembly based on the codebase
+void DomainAssembly::GetSecurityIdentity(SString &codebase,
+ SecZone *pdwZone,
+ DWORD dwFlags,
+ BYTE *pbUniqueID,
+ DWORD *pcbUniqueID)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pdwZone));
+ PRECONDITION(CheckPointer(pbUniqueID));
+ PRECONDITION(CheckPointer(pcbUniqueID));
+ }
+ CONTRACTL_END;
+
+ GetFile()->GetSecurityIdentity(codebase, pdwZone, dwFlags, pbUniqueID, pcbUniqueID);
+}
+#endif // FEATURE_CAS_POLICY
+
+#ifdef FEATURE_FUSION
+IAssemblyBindingClosure* DomainAssembly::GetAssemblyBindingClosure(WALK_LEVEL level)
+{
+ CONTRACT(IAssemblyBindingClosure *)
+ {
+ INSTANCE_CHECK;
+ POSTCONDITION(CheckPointer(RETVAL,NULL_OK));
+ //we could return NULL instead of asserting but hitting code paths that call this for mscorlib is just wasting of cycles anyhow
+ PRECONDITION(!IsSystem());
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ if (m_pAssemblyBindingClosure == NULL || m_pAssemblyBindingClosure->HasBeenWalked(level) == S_FALSE)
+ {
+ SafeComHolder<IAssemblyBindingClosure> pClosure;
+ if (this->GetAppDomain()->GetFusionContext() == NULL)
+ {
+ _ASSERTE(IsSystem());
+ RETURN NULL;
+ }
+
+ GCX_PREEMP();
+
+ ReleaseHolder<IBindResult> pWinRTBindResult;
+ IUnknown * pUnk;
+
+ if (GetFile()->IsIStream())
+ {
+ pUnk = GetFile()->GetIHostAssembly();
+ }
+ else if (GetFile()->IsWindowsRuntime())
+ { // It is .winmd file (WinRT assembly)
+ IfFailThrow(CLRPrivAssemblyWinRT::GetIBindResult(GetFile()->GetHostAssembly(), &pWinRTBindResult));
+ pUnk = pWinRTBindResult;
+ }
+ else
+ {
+ pUnk = GetFile()->GetFusionAssembly();
+ }
+
+ if (m_pAssemblyBindingClosure == NULL)
+ {
+ IfFailThrow(this->GetAppDomain()->GetFusionContext()->GetAssemblyBindingClosure(pUnk, NULL, &pClosure));
+ if (FastInterlockCompareExchangePointer<IAssemblyBindingClosure*>(&m_pAssemblyBindingClosure, pClosure.GetValue(), NULL) == NULL)
+ {
+ pClosure.SuppressRelease();
+ }
+ }
+ IfFailThrow(m_pAssemblyBindingClosure->EnsureWalked(pUnk, this->GetAppDomain()->GetFusionContext(), level));
+ }
+ RETURN m_pAssemblyBindingClosure;
+}
+
+// This is used to determine if the binding closure of the assembly in question is in the GAC. Amongst other uses,
+// this is the MULTI_DOMAIN_HOST scenario.
+BOOL DomainAssembly::IsClosedInGAC()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (IsSystem())
+ return TRUE;
+
+ BOOL fIsWindowsRuntime = GetFile()->IsWindowsRuntime();
+
+ if (!GetFile()->IsSourceGAC() && !fIsWindowsRuntime)
+ return FALSE;
+
+ // Do a binding closure that will help us determine if all the dependencies are in the GAC or not.
+ IAssemblyBindingClosure * pClosure = GetAssemblyBindingClosure(LEVEL_GACCHECK);
+ if (pClosure == NULL)
+ return FALSE;
+
+ // Once the closure is complete, determine if the dependencies are closed in the GAC (or not).
+ HRESULT hr = pClosure->IsAllAssembliesInGAC();
+ IfFailThrow(hr);
+
+ return (hr == S_OK);
+}
+
+BOOL DomainAssembly::MayHaveUnknownDependencies()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (IsSystem())
+ return FALSE;
+
+ // Perform the binding closure walk to initialize state that will help us
+ // determine if we have dependencies that could prevent code-sharing.
+ IAssemblyBindingClosure * pClosure = GetAssemblyBindingClosure(LEVEL_WINRTCHECK);
+ if (pClosure == NULL)
+ return FALSE;
+
+ HRESULT hr = pClosure->MayHaveUnknownDependencies();
+ IfFailThrow(hr);
+
+ return (hr == S_OK);
+}
+
+#endif // FEATURE_FUSION
+
+
+// <TODO>@todo Find a better place for these</TODO>
+#define DE_CUSTOM_VALUE_NAMESPACE "System.Diagnostics"
+#define DE_DEBUGGABLE_ATTRIBUTE_NAME "DebuggableAttribute"
+
+// <TODO>@todo .INI file is a temporary workaround for Beta 1</TODO>
+#define DE_INI_FILE_SECTION_NAME W(".NET Framework Debugging Control")
+#define DE_INI_FILE_KEY_TRACK_INFO W("GenerateTrackingInfo")
+#define DE_INI_FILE_KEY_ALLOW_JIT_OPTS W("AllowOptimize")
+
+DWORD DomainAssembly::ComputeDebuggingConfig()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+#ifdef DEBUGGING_SUPPORTED
+ DWORD dacfFlags = DACF_ALLOW_JIT_OPTS;
+
+ if (GetDebuggingOverrides(&dacfFlags))
+ {
+ dacfFlags |= DACF_USER_OVERRIDE;
+ }
+#ifdef FEATURE_LEGACYNETCF
+ else
+ if (GetAppDomain()->GetAppDomainCompatMode() == BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8)
+ {
+ // NetCF did not respect the DebuggableAttribute
+ }
+#endif
+ else
+ {
+ IfFailThrow(GetDebuggingCustomAttributes(&dacfFlags));
+ }
+
+ return dacfFlags;
+#else // !DEBUGGING_SUPPORTED
+ return 0;
+#endif // DEBUGGING_SUPPORTED
+}
+
+void DomainAssembly::SetupDebuggingConfig(void)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+#ifdef DEBUGGING_SUPPORTED
+ DWORD dacfFlags = ComputeDebuggingConfig();
+
+ SetDebuggerInfoBits((DebuggerAssemblyControlFlags)dacfFlags);
+
+ LOG((LF_CORDB, LL_INFO10, "Assembly %S: bits=0x%x\n", GetDebugName(), GetDebuggerInfoBits()));
+#endif // DEBUGGING_SUPPORTED
+}
+
+// The format for the (temporary) .INI file is:
+
+// [.NET Framework Debugging Control]
+// GenerateTrackingInfo=<n> where n is 0 or 1
+// AllowOptimize=<n> where n is 0 or 1
+
+// Where neither x nor y equal INVALID_INI_INT:
+#define INVALID_INI_INT (0xFFFF)
+
+bool DomainAssembly::GetDebuggingOverrides(DWORD *pdwFlags)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+#if defined(DEBUGGING_SUPPORTED) && !defined(FEATURE_CORESYSTEM)
+ // TODO FIX in V5.0
+ // Any touch of the file system is relatively expensive even in the warm case.
+ //
+ // Ideally we remove the .INI feature completely (if we need something put it in the .exe.config file)
+ //
+ // However because of compatibility concerns, we won't do this until the next side-by-side release
+ // In the mean time don't check in the case where we have already loaded the NGEN image, as the
+ // JIT overrides don't mean anything in that case as we won't be jitting anyway.
+ // This avoids doing these probes for framework DLLs right away.
+ if (GetFile()->HasNativeImage())
+ return false;
+
+ _ASSERTE(pdwFlags);
+
+ bool fHasBits = false;
+ WCHAR *pFileName = NULL;
+ HRESULT hr = S_OK;
+ UINT cbExtOrValue = 4;
+ WCHAR *pTail = NULL;
+ size_t len = 0;
+ WCHAR *lpFileName = NULL;
+
+ const WCHAR *wszFileName = GetFile()->GetPath();
+
+ if (wszFileName == NULL)
+ {
+ return false;
+ }
+
+ // lpFileName is a copy of the original, and will be edited.
+ CQuickBytes qb;
+ len = wcslen(wszFileName);
+ size_t cchlpFileName = (len + 1);
+ lpFileName = (WCHAR*)qb.AllocThrows(cchlpFileName * sizeof(WCHAR));
+ wcscpy_s(lpFileName, cchlpFileName, wszFileName);
+
+ pFileName = wcsrchr(lpFileName, W('\\'));
+
+ if (pFileName == NULL)
+ {
+ pFileName = lpFileName;
+ }
+
+ if (*pFileName == W('\\'))
+ {
+ pFileName++; //move the pointer past the last '\'
+ }
+
+ _ASSERTE(wcslen(W(".INI")) == cbExtOrValue);
+
+ if (pFileName == NULL || (pTail=wcsrchr(pFileName, W('.'))) == NULL || (wcslen(pTail)<cbExtOrValue))
+ {
+ return false;
+ }
+
+ wcscpy_s(pTail, cchlpFileName - (pTail - lpFileName), W(".INI"));
+
+ // Win2K has a problem if multiple processes call GetPrivateProfile* on the same
+ // non-existent .INI file simultaneously. The OS livelocks in the kernel (i.e.
+ // outside of user space) and remains there at full CPU for several minutes. Then
+ // it breaks out. Here is our work-around, while we pursue a fix in a future
+ // version of the OS.
+ if (WszGetFileAttributes(lpFileName) == INVALID_FILE_ATTRIBUTES)
+ return false;
+
+ // Having modified the filename, we use the full path
+ // to actually get the file.
+ if ((cbExtOrValue=WszGetPrivateProfileInt(DE_INI_FILE_SECTION_NAME,
+ DE_INI_FILE_KEY_TRACK_INFO,
+ INVALID_INI_INT,
+ lpFileName)) != INVALID_INI_INT)
+ {
+ if (cbExtOrValue != 0)
+ {
+ *pdwFlags |= DACF_OBSOLETE_TRACK_JIT_INFO;
+ }
+ else
+ {
+ *pdwFlags &= (~DACF_OBSOLETE_TRACK_JIT_INFO);
+ }
+
+ fHasBits = true;
+ }
+
+ if ((cbExtOrValue=WszGetPrivateProfileInt(DE_INI_FILE_SECTION_NAME,
+ DE_INI_FILE_KEY_ALLOW_JIT_OPTS,
+ INVALID_INI_INT,
+ lpFileName)) != INVALID_INI_INT)
+ {
+ if (cbExtOrValue != 0)
+ {
+ *pdwFlags |= DACF_ALLOW_JIT_OPTS;
+ }
+ else
+ {
+ *pdwFlags &= (~DACF_ALLOW_JIT_OPTS);
+ }
+
+ fHasBits = true;
+ }
+
+ return fHasBits;
+
+#else // DEBUGGING_SUPPORTED && !FEATURE_CORESYSTEM
+ return false;
+#endif // DEBUGGING_SUPPORTED && !FEATURE_CORESYSTEM
+}
+
+
+// For right now, we only check to see if the DebuggableAttribute is present - later may add fields/properties to the
+// attributes.
+HRESULT DomainAssembly::GetDebuggingCustomAttributes(DWORD *pdwFlags)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ FORBID_FAULT;
+ PRECONDITION(CheckPointer(pdwFlags));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+#ifdef FEATURE_PREJIT
+ ReleaseHolder<PEImage> pNativeImage=GetFile()->GetNativeImageWithRef();
+ if (pNativeImage)
+ {
+ CORCOMPILE_VERSION_INFO * pVersion = pNativeImage->GetLoadedLayout()->GetNativeVersionInfo();
+ PREFIX_ASSUME(pVersion != NULL);
+
+ WORD codegen = pVersion->wCodegenFlags;
+
+ if (codegen & CORCOMPILE_CODEGEN_DEBUGGING)
+ {
+ *pdwFlags &= (~DACF_ALLOW_JIT_OPTS);
+ }
+ else
+ {
+ *pdwFlags |= DACF_ALLOW_JIT_OPTS;
+ }
+
+ }
+ else
+#endif // FEATURE_PREJIT
+ {
+ ULONG size;
+ BYTE *blob;
+ mdModule mdMod;
+ ReleaseHolder<IMDInternalImport> mdImport(GetFile()->GetMDImportWithRef());
+ mdMod = mdImport->GetModuleFromScope();
+ mdAssembly asTK = TokenFromRid(mdtAssembly, 1);
+
+ hr = mdImport->GetCustomAttributeByName(asTK,
+ DE_CUSTOM_VALUE_NAMESPACE
+ NAMESPACE_SEPARATOR_STR
+ DE_DEBUGGABLE_ATTRIBUTE_NAME,
+ (const void**)&blob,
+ &size);
+
+ // If there is no custom value, then there is no entrypoint defined.
+ if (!(FAILED(hr) || hr == S_FALSE))
+ {
+ // We're expecting a 6 or 8 byte blob:
+ //
+ // 1, 0, enable tracking, disable opts, 0, 0
+ if ((size == 6) || (size == 8))
+ {
+ if (!((blob[0] == 1) && (blob[1] == 0)))
+ {
+ BAD_FORMAT_NOTHROW_ASSERT(!"Invalid blob format for custom attribute");
+ return COR_E_BADIMAGEFORMAT;
+ }
+
+ if (blob[2] & 0x1)
+ {
+ *pdwFlags |= DACF_OBSOLETE_TRACK_JIT_INFO;
+ }
+ else
+ {
+ *pdwFlags &= (~DACF_OBSOLETE_TRACK_JIT_INFO);
+ }
+
+ if (blob[2] & 0x2)
+ {
+ *pdwFlags |= DACF_IGNORE_PDBS;
+ }
+ else
+ {
+ *pdwFlags &= (~DACF_IGNORE_PDBS);
+ }
+
+
+ // For compatibility, we enable optimizations if the tracking byte is zero,
+ // even if disable opts is nonzero
+ if (((blob[2] & 0x1) == 0) || (blob[3] == 0))
+ {
+ *pdwFlags |= DACF_ALLOW_JIT_OPTS;
+ }
+ else
+ {
+ *pdwFlags &= (~DACF_ALLOW_JIT_OPTS);
+ }
+
+ LOG((LF_CORDB, LL_INFO10, "Assembly %S: has %s=%d,%d bits = 0x%x\n", GetDebugName(),
+ DE_DEBUGGABLE_ATTRIBUTE_NAME,
+ blob[2], blob[3], *pdwFlags));
+ }
+ }
+ }
+
+ return hr;
+}
+
+BOOL DomainAssembly::NotifyDebuggerLoad(int flags, BOOL attaching)
+{
+ WRAPPER_NO_CONTRACT;
+
+ BOOL result = FALSE;
+
+ if (!IsVisibleToDebugger())
+ return FALSE;
+
+ // Debugger Attach is done totally out-of-process. Does not call code in-proc.
+ _ASSERTE(!attaching);
+
+ // Make sure the debugger has been initialized. See code:Debugger::Startup.
+ if (g_pDebugInterface == NULL)
+ {
+ _ASSERTE(!CORDebuggerAttached());
+ return FALSE;
+ }
+
+ // There is still work we need to do even when no debugger is attached.
+
+ if (flags & ATTACH_ASSEMBLY_LOAD)
+ {
+ if (ShouldNotifyDebugger())
+ {
+ g_pDebugInterface->LoadAssembly(this);
+ }
+ result = TRUE;
+ }
+
+ DomainModuleIterator i = IterateModules(kModIterIncludeLoading);
+ while (i.Next())
+ {
+ DomainFile * pDomainFile = i.GetDomainFile();
+ if(pDomainFile->ShouldNotifyDebugger())
+ {
+ result = result ||
+ pDomainFile->GetModule()->NotifyDebuggerLoad(this->GetAppDomain(), pDomainFile, flags, attaching);
+ }
+ }
+ if( ShouldNotifyDebugger())
+ {
+ result|=m_pModule->NotifyDebuggerLoad(m_pDomain, this, ATTACH_MODULE_LOAD, attaching);
+ SetDebuggerNotified();
+ }
+
+
+
+ return result;
+}
+
+void DomainAssembly::NotifyDebuggerUnload()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (!IsVisibleToDebugger())
+ return;
+
+ if (!this->GetAppDomain()->IsDebuggerAttached())
+ return;
+
+ m_fDebuggerUnloadStarted = TRUE;
+
+ // Dispatch module unloads for all modules. Debugger is resilient in case we haven't dispatched
+ // a previous load event (such as if debugger attached after the modules was loaded).
+ DomainModuleIterator i = IterateModules(kModIterIncludeLoading);
+ while (i.Next())
+ {
+ i.GetDomainFile()->GetModule()->NotifyDebuggerUnload(this->GetAppDomain());
+ }
+
+ g_pDebugInterface->UnloadAssembly(this);
+
+}
+
+// This will enumerate for static GC refs (but not thread static GC refs)
+
+void DomainAssembly::EnumStaticGCRefs(promote_func* fn, ScanContext* sc)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACT_END;
+
+ _ASSERTE(GCHeap::IsGCInProgress() &&
+ GCHeap::IsServerHeap() &&
+ IsGCSpecialThread());
+
+ DomainModuleIterator i = IterateModules(kModIterIncludeLoaded);
+ while (i.Next())
+ {
+ DomainFile* pDomainFile = i.GetDomainFile();
+
+ if (pDomainFile->IsActive())
+ {
+ // We guarantee that at this point the module has it's DomainLocalModule set up
+ // , as we create it while we load the module
+ _ASSERTE(pDomainFile->GetLoadedModule()->GetDomainLocalModule(this->GetAppDomain()));
+ pDomainFile->GetLoadedModule()->EnumRegularStaticGCRefs(this->GetAppDomain(), fn, sc);
+
+ // We current to do not iterate over the ThreadLocalModules that correspond
+ // to this Module. The GC discovers thread statics through the handle table.
+ }
+ }
+
+ RETURN;
+}
+
+
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+//--------------------------------------------------------------------------------
+// DomainModule
+//--------------------------------------------------------------------------------
+
+DomainModule::DomainModule(AppDomain *pDomain, DomainAssembly *pAssembly, PEFile *pFile)
+ : DomainFile(pDomain, pFile),
+ m_pDomainAssembly(pAssembly)
+{
+ STANDARD_VM_CONTRACT;
+}
+
+DomainModule::~DomainModule()
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+void DomainModule::SetModule(Module* pModule)
+{
+ STANDARD_VM_CONTRACT;
+
+ UpdatePEFile(pModule->GetFile());
+ pModule->SetDomainFile(this);
+ // SetDomainFile can throw and will unwind to DomainModule::Allocate at which
+ // point pModule->Destruct will be called in the catch handler. if we set
+ // m_pModule = pModule before the call to SetDomainFile then we can end up with
+ // a bad m_pModule pointer when SetDomainFile throws. so we set m_pModule IIF
+ // the call to SetDomainFile succeeds.
+ m_pModule = pModule;
+}
+
+void DomainModule::Begin()
+{
+ STANDARD_VM_CONTRACT;
+ m_pDomainAssembly->AddModule(this);
+}
+
+#ifdef FEATURE_PREJIT
+
+void DomainModule::FindNativeImage()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Resource files are never prejitted.
+}
+
+#endif // FEATURE_PREJIT
+
+
+void DomainModule::Allocate()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+
+ // We can now rely on the fact that our MDImport will not change so we can stop refcounting it.
+ GetFile()->MakeMDImportPersistent();
+
+ AllocMemTracker amTracker;
+ AllocMemTracker *pamTracker = &amTracker;
+
+ Assembly *pAssembly = m_pDomainAssembly->GetCurrentAssembly();
+ Module *pModule = NULL;
+
+ if (pAssembly->IsDomainNeutral())
+ {
+ // For shared assemblies, the module may be already in the assembly list, even
+ // though we haven't loaded it here yet.
+
+ pModule = pAssembly->GetManifestModule()->GetModuleIfLoaded(GetToken(),FALSE, TRUE);
+ if (pModule != NULL)
+ {
+ SetModule(pModule);
+ return;
+ }
+ else
+ {
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ SharedDomain *pSharedDomain = SharedDomain::GetDomain();
+ SharedFileLockHolder pFileLock(pSharedDomain, GetFile());
+#else // FEATURE_LOADER_OPTIMIZATION
+ _ASSERTE(IsSystem());
+#endif // FEATURE_LOADER_OPTIMIZATION
+
+ pModule = pAssembly->GetManifestModule()->GetModuleIfLoaded(GetToken(), FALSE, TRUE);
+ if (pModule != NULL)
+ {
+ SetModule(pModule);
+ return;
+ }
+ else
+ {
+ pModule = Module::Create(pAssembly, GetToken(), m_pFile, pamTracker);
+
+ EX_TRY
+ {
+ pAssembly->PrepareModuleForAssembly(pModule, pamTracker);
+ SetModule(pModule); //@todo: This innocent-looking call looks like a mixture of allocations and publishing code - it probably needs to be split.
+ }
+ EX_HOOK
+ {
+ //! It's critical we destruct the manifest Module prior to the AllocMemTracker used to initialize it.
+ //! Otherwise, we will leave dangling pointers inside the Module that Module::Destruct will attempt
+ //! to dereference.
+ pModule->Destruct();
+ }
+ EX_END_HOOK
+
+ {
+ CANNOTTHROWCOMPLUSEXCEPTION();
+ FAULT_FORBID();
+
+ //Cannot fail after this point.
+ pamTracker->SuppressRelease();
+ pModule->SetIsTenured();
+
+ pAssembly->PublishModuleIntoAssembly(pModule);
+
+
+
+ return; // Explicit return to let you know you are NOT welcome to add code after the CANNOTTHROW/FAULT_FORBID expires
+ }
+
+
+
+ }
+ }
+
+ }
+ else
+ {
+ pModule = Module::Create(pAssembly, GetToken(), m_pFile, pamTracker);
+ EX_TRY
+ {
+ pAssembly->PrepareModuleForAssembly(pModule, pamTracker);
+ SetModule(pModule); //@todo: This innocent-looking call looks like a mixture of allocations and publishing code - it probably needs to be split.
+ }
+ EX_HOOK
+ {
+ //! It's critical we destruct the manifest Module prior to the AllocMemTracker used to initialize it.
+ //! Otherwise, we will leave dangling pointers inside the Module that Module::Destruct will attempt
+ //! to dereference.
+ pModule->Destruct();
+ }
+ EX_END_HOOK
+
+
+ {
+ CANNOTTHROWCOMPLUSEXCEPTION();
+ FAULT_FORBID();
+
+ //Cannot fail after this point.
+ pamTracker->SuppressRelease();
+ pModule->SetIsTenured();
+ pAssembly->PublishModuleIntoAssembly(pModule);
+
+
+ return; // Explicit return to let you know you are NOT welcome to add code after the CANNOTTHROW/FAULT_FORBID expires
+ }
+
+ }
+
+
+}
+
+
+
+void DomainModule::DeliverAsyncEvents()
+{
+ LIMITED_METHOD_CONTRACT;
+ return;
+}
+
+void DomainModule::DeliverSyncEvents()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ GetCurrentModule()->NotifyEtwLoadFinished(S_OK);
+
+#ifdef PROFILING_SUPPORTED
+ if (!IsProfilerNotified())
+ {
+ SetProfilerNotified();
+ GetCurrentModule()->NotifyProfilerLoadFinished(S_OK);
+ }
+#endif
+
+#ifdef DEBUGGING_SUPPORTED
+ GCX_COOP();
+ if(!IsDebuggerNotified())
+ {
+ SetShouldNotifyDebugger();
+ {
+ // Always give the module a chance to notify the debugger. If no debugger is attached, the
+ // module can skip out on the notification.
+ m_pModule->NotifyDebuggerLoad(m_pDomain, this, ATTACH_MODULE_LOAD, FALSE);
+ SetDebuggerNotified();
+ }
+ }
+#endif
+}
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+
+#endif // #ifndef DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+
+void
+DomainFile::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+
+ //sizeof(DomainFile) == 0x60
+ DAC_ENUM_VTHIS();
+
+ // Modules are needed for all minidumps, but they are enumerated elsewhere
+ // so we don't need to duplicate effort; thus we do noting with m_pModule.
+
+ // For MiniDumpNormal, we only want the file name.
+ if (m_pFile.IsValid())
+ {
+ m_pFile->EnumMemoryRegions(flags);
+ }
+
+ if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE
+ && m_pDomain.IsValid())
+ {
+ m_pDomain->EnumMemoryRegions(flags, true);
+ }
+}
+
+void
+DomainAssembly::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+
+ //sizeof(DomainAssembly) == 0xe0
+ DAC_ENUM_VTHIS();
+ DomainFile::EnumMemoryRegions(flags);
+
+ // For minidumps without full memory, we need to always be able to iterate over m_Modules.
+ m_Modules.EnumMemoryRegions(flags);
+
+ if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE)
+ {
+ if (m_pAssembly.IsValid())
+ {
+ m_pAssembly->EnumMemoryRegions(flags);
+ }
+ }
+}
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+void
+DomainModule::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ DomainFile::EnumMemoryRegions(flags);
+ if (m_pDomainAssembly.IsValid())
+ {
+ m_pDomainAssembly->EnumMemoryRegions(flags);
+ }
+}
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+
+#endif // #ifdef DACCESS_COMPILE
+
+#if defined(FEATURE_MIXEDMODE) && !defined(CROSSGEN_COMPILE)
+LPVOID DomainFile::GetUMThunk(LPVOID pManagedIp, PCCOR_SIGNATURE pSig, ULONG cSig)
+{
+ CONTRACT (LPVOID)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END
+
+
+ if (m_pUMThunkHash == NULL)
+ {
+ UMThunkHash *pUMThunkHash = new UMThunkHash(GetModule(), this->GetAppDomain());
+ if (FastInterlockCompareExchangePointer(&m_pUMThunkHash, pUMThunkHash, NULL) != NULL)
+ {
+ delete pUMThunkHash;
+ }
+ }
+ RETURN m_pUMThunkHash->GetUMThunk(pManagedIp, pSig, cSig);
+}
+#endif // FEATURE_MIXEDMODE && !CROSSGEN_COMPILE
diff --git a/src/vm/domainfile.h b/src/vm/domainfile.h
new file mode 100644
index 0000000000..f55f9fa009
--- /dev/null
+++ b/src/vm/domainfile.h
@@ -0,0 +1,947 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// --------------------------------------------------------------------------------
+// DomainFile.h
+//
+
+// --------------------------------------------------------------------------------
+
+
+#ifndef _DOMAINFILE_H_
+#define _DOMAINFILE_H_
+
+// --------------------------------------------------------------------------------
+// Required headers
+// --------------------------------------------------------------------------------
+
+// --------------------------------------------------------------------------------
+// Forward class declarations
+// --------------------------------------------------------------------------------
+class AppDomain;
+class DomainAssembly;
+class DomainModule;
+class Assembly;
+class Module;
+class DynamicMethodTable;
+struct AssemblyLoadSecurity;
+
+typedef VPTR(class IAssemblySecurityDescriptor) PTR_IAssemblySecurityDescriptor;
+
+enum FileLoadLevel
+{
+ // These states are tracked by FileLoadLock
+
+ // Note: This enum must match the static array fileLoadLevelName[]
+ // which contains the printable names of the enum values
+
+ // Note that semantics here are description is the LAST step done, not what is
+ // currently being done.
+
+ FILE_LOAD_CREATE,
+ FILE_LOAD_BEGIN,
+ FILE_LOAD_FIND_NATIVE_IMAGE,
+ FILE_LOAD_VERIFY_NATIVE_IMAGE_DEPENDENCIES,
+ FILE_LOAD_ALLOCATE,
+ FILE_LOAD_ADD_DEPENDENCIES,
+ FILE_LOAD_PRE_LOADLIBRARY,
+ FILE_LOAD_LOADLIBRARY,
+ FILE_LOAD_POST_LOADLIBRARY,
+ FILE_LOAD_EAGER_FIXUPS,
+ FILE_LOAD_VTABLE_FIXUPS,
+ FILE_LOAD_DELIVER_EVENTS,
+ FILE_LOADED, // Loaded by not yet active
+ FILE_LOAD_VERIFY_EXECUTION,
+ FILE_ACTIVE // Fully active (constructors run & security checked)
+};
+
+
+enum NotificationStatus
+{
+ NOT_NOTIFIED=0,
+ PROFILER_NOTIFIED=1,
+ DEBUGGER_NEEDNOTIFICATION=2,
+ DEBUGGER_NOTIFIED=4
+};
+
+// --------------------------------------------------------------------------------
+// DomainFile represents a file loaded (or being loaded) into an app domain. It
+// is guranteed to be unique per file per app domain.
+// --------------------------------------------------------------------------------
+
+class DomainFile
+{
+ VPTR_BASE_VTABLE_CLASS(DomainFile);
+
+ public:
+
+ // ------------------------------------------------------------
+ // Public API
+ // ------------------------------------------------------------
+
+#ifndef DACCESS_COMPILE
+ virtual ~DomainFile();
+ DomainFile() {LIMITED_METHOD_CONTRACT;};
+#endif
+
+ LoaderAllocator *GetLoaderAllocator();
+
+ PTR_AppDomain GetAppDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_pDomain;
+ }
+
+ PEFile *GetFile()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pFile;
+ }
+
+ PEFile *GetOriginalFile()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pOriginalFile!= NULL ? m_pOriginalFile : m_pFile;
+ }
+
+
+ IMDInternalImport *GetMDImport()
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_pFile->GetPersistentMDImport();
+ }
+
+ OBJECTREF GetExposedModuleObjectIfExists()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ OBJECTREF objRet = NULL;
+ GET_LOADERHANDLE_VALUE_FAST(GetLoaderAllocator(), m_hExposedModuleObject, &objRet);
+ return objRet;
+ }
+
+ OBJECTREF GetExposedModuleObject();
+
+ BOOL IsSystem()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetFile()->IsSystem();
+ }
+
+ LPCUTF8 GetSimpleName()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetFile()->GetSimpleName();
+ }
+
+#ifdef LOGGING
+ LPCWSTR GetDebugName()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetFile()->GetDebugName();
+ }
+#endif
+
+#ifdef FEATURE_MIXEDMODE
+ LPVOID GetUMThunk(LPVOID pManagedIp, PCCOR_SIGNATURE pSig, ULONG cSig);
+#endif
+
+ void ReleaseFiles() DAC_EMPTY();
+
+ virtual BOOL IsAssembly() = 0;
+
+ DomainAssembly *GetDomainAssembly();
+
+ // ------------------------------------------------------------
+ // Loading state checks
+ // ------------------------------------------------------------
+
+ // Return the File's load level. Note that this is the last level actually successfully completed.
+ // Note that this is subtly different than the FileLoadLock's level, which is the last level
+ // which was triggered (but potentially skipped if error or inappropriate.)
+ FileLoadLevel GetLoadLevel() { LIMITED_METHOD_DAC_CONTRACT; return m_level; }
+
+ // Error means that a permanent x-appdomain load error has occurred.
+ BOOL IsError()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ DACCOP_IGNORE(FieldAccess, "No marshalling required");
+ return m_pError != NULL;
+ }
+
+ // Loading means that the load is still being tracked by a FileLoadLock.
+ BOOL IsLoading() { LIMITED_METHOD_CONTRACT; return m_loading; }
+
+ // Loaded means that the file can be used passively. This includes loading types, reflection, and
+ // jitting.
+ BOOL IsLoaded() { LIMITED_METHOD_DAC_CONTRACT; return m_level >= FILE_LOAD_DELIVER_EVENTS; }
+
+ // Active means that the file can be used actively in the current app domain. Note that a shared file
+ // may conditionally not be able to be made active on a per app domain basis.
+ BOOL IsActive() { LIMITED_METHOD_CONTRACT; return m_level >= FILE_ACTIVE; }
+
+ // Checks if the load has reached the point where profilers may be notified
+ // about the file. It's important that IF a profiler is notified, THEN this returns
+ // TRUE, otherwise there can be profiler-attach races where the profiler doesn't see
+ // the file via either enumeration or notification. As a result, this begins
+ // returning TRUE just before the profiler is actually notified. See
+ // code:ProfilerFunctionEnum::Init#ProfilerEnumAssemblies
+ BOOL IsAvailableToProfilers()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return IsProfilerNotified(); // despite the name, this function returns TRUE just before we notify the profiler
+ }
+
+ // CheckLoaded is appropriate for asserts that the assembly can be passively used.
+ CHECK CheckLoaded();
+
+ // CheckActivated is appropriate for asserts that the assembly can be actively used. Note that
+ // it is slightly different from IsActive in that it deals with reentrancy cases properly.
+ CHECK CheckActivated();
+
+ // Ensure that an assembly has reached at least the IsLoaded state. Throw if not.
+ void EnsureLoaded()
+ {
+ WRAPPER_NO_CONTRACT;
+ return EnsureLoadLevel(FILE_LOADED);
+ }
+
+ // Ensure that an assembly has reached at least the IsActive state. Throw if not.
+ void EnsureActive()
+ {
+ WRAPPER_NO_CONTRACT;
+ return EnsureLoadLevel(FILE_ACTIVE);
+ }
+
+ // Ensure that an assembly has reached at least the Allocated state. Throw if not.
+ void EnsureAllocated()
+ {
+ WRAPPER_NO_CONTRACT;
+ return EnsureLoadLevel(FILE_LOAD_ALLOCATE);
+ }
+
+
+ void EnsureLibraryLoaded()
+ {
+ WRAPPER_NO_CONTRACT;
+ return EnsureLoadLevel(FILE_LOAD_LOADLIBRARY);
+ }
+
+ // This wraps EnsureActive, suppressing non-transient exceptions
+ BOOL TryEnsureActive();
+
+ // EnsureLoadLevel is a generic routine used to ensure that the file is not in a delay loaded
+ // state (unless it needs to be.) This should be used when a particular level of loading
+ // is required for an operation. Note that deadlocks are tolerated so the level may be one
+ void EnsureLoadLevel(FileLoadLevel targetLevel) DAC_EMPTY();
+
+ // AttemptLoadLevel is a generic routine used to try to further load the file to a given level.
+ // No guarantee is made about the load level resulting however.
+ void AttemptLoadLevel(FileLoadLevel targetLevel) DAC_EMPTY();
+
+ // CheckLoadLevel is an assert predicate used to verify the load level of an assembly.
+ // deadlockOK indicates that the level is allowed to be one short if we are restricted
+ // by loader reentrancy.
+ CHECK CheckLoadLevel(FileLoadLevel requiredLevel, BOOL deadlockOK = TRUE) DAC_EMPTY_RET(CHECK::OK());
+
+ // RequireLoadLevel throws an exception if the domain file isn't loaded enough. Note
+ // that this is intolerant of deadlock related failures so is only really appropriate for
+ // checks inside the main loading loop.
+ void RequireLoadLevel(FileLoadLevel targetLevel) DAC_EMPTY();
+
+ // Throws if a load error has occurred
+ void ThrowIfError(FileLoadLevel targetLevel) DAC_EMPTY();
+
+ // Checks that a load error has not occured before the given level
+ CHECK CheckNoError(FileLoadLevel targetLevel) DAC_EMPTY_RET(CHECK::OK());
+
+ // IsNotified means that the profiler API notification has been delivered
+ BOOL IsProfilerNotified() { LIMITED_METHOD_CONTRACT; return m_notifyflags & PROFILER_NOTIFIED; }
+ BOOL IsDebuggerNotified() { LIMITED_METHOD_CONTRACT; return m_notifyflags & DEBUGGER_NOTIFIED; }
+ BOOL ShouldNotifyDebugger() { LIMITED_METHOD_CONTRACT; return m_notifyflags & DEBUGGER_NEEDNOTIFICATION; }
+
+
+ // ------------------------------------------------------------
+ // Other public APIs
+ // ------------------------------------------------------------
+
+ BOOL IsIntrospectionOnly();
+
+#ifndef DACCESS_COMPILE
+ BOOL Equals(DomainFile *pFile) { WRAPPER_NO_CONTRACT; return GetFile()->Equals(pFile->GetFile()); }
+ BOOL Equals(PEFile *pFile) { WRAPPER_NO_CONTRACT; return GetFile()->Equals(pFile); }
+#endif // DACCESS_COMPILE
+
+ Module* GetCurrentModule();
+ Module* GetLoadedModule();
+ Module* GetModule();
+
+#ifdef FEATURE_PREJIT
+ BOOL IsZapRequired(); // Are we absolutely required to use a native image?
+#endif
+ // The format string is intentionally unicode to avoid globalization bugs
+#ifdef FEATURE_PREJIT
+ void ExternalLog(DWORD level, const WCHAR *fmt, ...);
+ void ExternalLog(DWORD level, const char *msg);
+#endif
+#ifdef DACCESS_COMPILE
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+#ifndef DACCESS_COMPILE
+ // light code gen. Keep the list of MethodTables needed for creating dynamic methods
+ DynamicMethodTable* GetDynamicMethodTable();
+#endif
+
+ protected:
+ // ------------------------------------------------------------
+ // Loader API
+ // ------------------------------------------------------------
+
+ friend class AppDomain;
+ friend class Assembly;
+ friend class Module;
+ friend class FileLoadLock;
+
+ DomainFile(AppDomain *pDomain, PEFile *pFile);
+
+ BOOL DoIncrementalLoad(FileLoadLevel targetLevel);
+ void ClearLoading() { LIMITED_METHOD_CONTRACT; m_loading = FALSE; }
+ void SetLoadLevel(FileLoadLevel level) { LIMITED_METHOD_CONTRACT; m_level = level; }
+
+#ifndef DACCESS_COMPILE
+ virtual void Begin() = 0;
+ virtual void Allocate() = 0;
+ void AddDependencies();
+ void PreLoadLibrary();
+ void LoadLibrary();
+ void PostLoadLibrary();
+ void EagerFixups();
+ void VtableFixups();
+ virtual void DeliverSyncEvents() = 0;
+ virtual void DeliverAsyncEvents() = 0;
+ void FinishLoad();
+ void VerifyExecution();
+ void Activate();
+#endif
+
+ // This is called when a new active dependency is added.
+ static BOOL PropagateNewActivation(Module *pModuleFrom, Module *pModuleTo);
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ static BOOL PropagateActivationInAppDomain(Module *pModuleFrom, Module *pModuleTo, AppDomain* pDomain);
+#endif
+ // This can be used to verify that no propagation is needed
+ static CHECK CheckUnactivatedInAllDomains(Module *pModule);
+
+ // This should be used to permanently set the load to fail. Do not use with transient conditions
+ void SetError(Exception *ex);
+
+#ifdef FEATURE_PREJIT
+
+#ifndef DACCESS_COMPILE
+ virtual void FindNativeImage() = 0;
+#endif
+ void VerifyNativeImageDependencies(bool verifyOnly = FALSE);
+
+ // Are we absolutely required to use a native image?
+ void CheckZapRequired();
+
+ void ClearNativeImageStress();
+
+#endif // FEATURE_PREJIT
+
+ void SetProfilerNotified() { LIMITED_METHOD_CONTRACT; m_notifyflags|= PROFILER_NOTIFIED; }
+ void SetDebuggerNotified() { LIMITED_METHOD_CONTRACT; m_notifyflags|=DEBUGGER_NOTIFIED; }
+ void SetShouldNotifyDebugger() { LIMITED_METHOD_CONTRACT; m_notifyflags|=DEBUGGER_NEEDNOTIFICATION; }
+#ifndef DACCESS_COMPILE
+ void UpdatePEFileWorker(PTR_PEFile pFile);
+#endif
+
+ // ------------------------------------------------------------
+ // Instance data
+ // ------------------------------------------------------------
+
+ PTR_AppDomain m_pDomain;
+ PTR_PEFile m_pFile;
+ PTR_PEFile m_pOriginalFile; // keep file alive just in case someone is sitill using it. If this is not NULL then m_pFile contains reused file from the shared assembly
+ PTR_Module m_pModule;
+ FileLoadLevel m_level;
+ LOADERHANDLE m_hExposedModuleObject;
+
+ class ExInfo
+ {
+ enum
+ {
+ ExType_ClrEx,
+ ExType_HR
+ }
+ m_type;
+ union
+ {
+ Exception *m_pEx;
+ HRESULT m_hr;
+ };
+
+ public:
+ void Throw()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ if (m_type==ExType_ClrEx)
+ {
+ PAL_CPP_THROW(Exception *, m_pEx->DomainBoundClone());
+ }
+ if (m_type==ExType_HR)
+ ThrowHR(m_hr);
+ _ASSERTE(!"Bad exception type");
+ ThrowHR(E_UNEXPECTED);
+ };
+ ExInfo(Exception* pEx)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_type=ExType_ClrEx;
+ m_pEx=pEx;
+ };
+
+ void ConvertToHResult()
+ {
+ LIMITED_METHOD_CONTRACT;
+ if(m_type==ExType_HR)
+ return;
+ _ASSERTE(m_type==ExType_ClrEx);
+ HRESULT hr=m_pEx->GetHR();
+ delete m_pEx;
+ m_hr=hr;
+ m_type=ExType_HR;
+ };
+ ~ExInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (m_type==ExType_ClrEx)
+ delete m_pEx;
+ }
+ }* m_pError;
+
+ void ReleaseManagedData()
+ {
+ if (m_pError)
+ m_pError->ConvertToHResult();
+ };
+
+#ifdef FEATURE_PREJIT
+ // Lock-free enumeration of DomainFiles in an AppDomain.
+public:
+ DomainFile *FindNextDomainFileWithNativeImage();
+private:
+ void InsertIntoDomainFileWithNativeImageList();
+#endif // FEATURE_PREJIT
+
+ DWORD m_notifyflags;
+ BOOL m_loading;
+ // m_pDynamicMethodTable is used by the light code generation to allow method
+ // generation on the fly. They are lazily created when/if a dynamic method is requested
+ // for this specific module
+ DynamicMethodTable *m_pDynamicMethodTable;
+ class UMThunkHash *m_pUMThunkHash;
+ BOOL m_bDisableActivationCheck;
+
+ // This value is to make it easier to diagnose Assembly Loader "rejected native image" crashes.
+ // See Dev11 bug 358184 for more details
+public:
+ DWORD m_dwReasonForRejectingNativeImage; // See code:g_dwLoaderReasonForNotSharing in Assembly.cpp for a similar variable.
+private:
+
+#ifdef FEATURE_PREJIT
+ // This value is to allow lock-free enumeration of all native images in an AppDomain
+ Volatile<DomainFile *> m_pNextDomainFileWithNativeImage;
+#endif
+};
+
+// These will sometimes result in a crash with error code 0x80131506 COR_E_EXECUTIONENGINE
+// "An internal error happened in the Common Language Runtime's Execution Engine"
+// Cause: Incorrectly committed to using native image for <path to assembly>
+enum ReasonForRejectingNativeImage
+{
+ ReasonForRejectingNativeImage_NoNiForManifestModule = 0x101,
+ ReasonForRejectingNativeImage_DependencyNotNative = 0x102,
+ ReasonForRejectingNativeImage_MscorlibNotNative = 0x103,
+ ReasonForRejectingNativeImage_FailedSecurityCheck = 0x104,
+ ReasonForRejectingNativeImage_DependencyIdentityMismatch = 0x105,
+ ReasonForRejectingNativeImage_CannotShareNiAssemblyNotDomainNeutral = 0x106,
+ ReasonForRejectingNativeImage_NiAlreadyUsedInAnotherSharedAssembly = 0x107,
+};
+
+//---------------------------------------------------------------------------------------
+// One of these values is specified when requesting a module iterator to customize which
+// modules should appear in the enumeration
+enum ModuleIterationOption
+{
+ // include only modules that are already loaded (m_level >= FILE_LOAD_DELIVER_EVENTS)
+ kModIterIncludeLoaded = 1,
+
+ // include all modules, even those that are still in the process of loading (all m_level values)
+ kModIterIncludeLoading = 2,
+
+ // include only modules loaded just enough that profilers are notified of them.
+ // (m_level >= FILE_LOAD_LOADLIBRARY). See comment at code:DomainFile::IsAvailableToProfilers
+ kModIterIncludeAvailableToProfilers = 3,
+};
+
+
+enum CMD_State
+{
+ CMD_Unknown,
+ CMD_NotNeeded,
+ CMD_IndeedMissing,
+ CMD_Resolved
+};
+
+// --------------------------------------------------------------------------------
+// DomainAssembly is a subclass of DomainFile which specifically represents a assembly.
+// --------------------------------------------------------------------------------
+
+class DomainAssembly : public DomainFile
+{
+ VPTR_VTABLE_CLASS(DomainAssembly, DomainFile);
+
+public:
+ // ------------------------------------------------------------
+ // Public API
+ // ------------------------------------------------------------
+
+ PEAssembly *GetFile()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return PTR_PEAssembly(m_pFile);
+ }
+
+#ifdef FEATURE_FUSION
+ IAssemblyBindingClosure* GetAssemblyBindingClosure(WALK_LEVEL level);
+ BOOL IsClosedInGAC();
+ BOOL MayHaveUnknownDependencies();
+#endif
+
+ // Returns security information for the assembly based on the codebase
+ void GetSecurityIdentity(SString &codebase, SecZone *pdwZone, DWORD dwFlags, BYTE *pbUniqueID, DWORD *pcbUniqueID);
+
+ IAssemblySecurityDescriptor* GetSecurityDescriptor()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return static_cast<IAssemblySecurityDescriptor*>(m_pSecurityDescriptor);
+ }
+#ifdef FEATURE_LOADER_OPTIMIZATION
+
+#ifdef FEATURE_FUSION
+private:
+ enum CMDI_Result
+ {
+ CMDI_End,
+ CMDI_AssemblyResolveSucceeded,
+ CMDI_AssemblyResolveFailed
+ };
+
+ CMDI_Result CheckMissingDependencyInner(IAssemblyBindingClosure* pClosure, DWORD idx);
+
+
+#endif
+public:
+ CMD_State CheckMissingDependencies();
+ BOOL MissingDependenciesCheckDone();
+#endif // FEATURE_LOADER_OPTIMIZATION
+
+#ifndef DACCESS_COMPILE
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ DomainFile *FindModule(PEFile *pFile, BOOL includeLoading = FALSE);
+ DomainModule *FindModule(PEModule *pFile, BOOL includeLoading = FALSE)
+ {
+ WRAPPER_NO_CONTRACT;
+ return (DomainModule *) FindModule((PEFile *) pFile, includeLoading);
+ }
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+ void ReleaseFiles();
+#endif // DACCESS_COMPILE
+
+ // Finds only loaded hmods
+ DomainFile *FindIJWModule(HMODULE hMod);
+
+ void SetAssembly(Assembly* pAssembly);
+
+ BOOL IsAssembly()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TRUE;
+ }
+
+ OBJECTREF GetExposedAssemblyObjectIfExists()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ OBJECTREF objRet = NULL;
+ GET_LOADERHANDLE_VALUE_FAST(GetLoaderAllocator(), m_hExposedAssemblyObject, &objRet);
+ return objRet;
+ }
+
+ // Returns managed representation of the assembly (Assembly or AssemblyBuilder).
+ // Returns NULL if the managed scout was already collected (see code:LoaderAllocator#AssemblyPhases).
+ OBJECTREF GetExposedAssemblyObject();
+
+ Assembly* GetCurrentAssembly();
+ Assembly* GetLoadedAssembly();
+ Assembly* GetAssembly();
+
+#ifdef DACCESS_COMPILE
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+ // ------------------------------------------------------------
+ // Modules
+ // ------------------------------------------------------------
+ class ModuleIterator
+ {
+ ArrayList::Iterator m_i;
+ ModuleIterationOption m_moduleIterationOption;
+
+ public:
+ BOOL Next()
+ {
+ WRAPPER_NO_CONTRACT;
+ while (m_i.Next())
+ {
+ if (m_i.GetElement() == NULL)
+ {
+ continue;
+ }
+ if (GetDomainFile()->IsError())
+ {
+ continue;
+ }
+ if (m_moduleIterationOption == kModIterIncludeLoading)
+ return TRUE;
+ if ((m_moduleIterationOption == kModIterIncludeLoaded) &&
+ GetDomainFile()->IsLoaded())
+ return TRUE;
+ if ((m_moduleIterationOption == kModIterIncludeAvailableToProfilers) &&
+ GetDomainFile()->IsAvailableToProfilers())
+ return TRUE;
+ }
+ return FALSE;
+ }
+ Module *GetModule()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetDomainFile()->GetModule();
+ }
+ Module *GetLoadedModule()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetDomainFile()->GetLoadedModule();
+ }
+ DomainFile *GetDomainFile()
+ {
+ WRAPPER_NO_CONTRACT;
+ return dac_cast<PTR_DomainFile>(m_i.GetElement());
+ }
+ SIZE_T GetIndex()
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_i.GetIndex();
+ }
+
+ private:
+ friend class DomainAssembly;
+ // Cannot have constructor so this iterator can be used inside a union
+ static ModuleIterator Create(DomainAssembly * pDomainAssembly, ModuleIterationOption moduleIterationOption)
+ {
+ WRAPPER_NO_CONTRACT;
+ ModuleIterator i;
+
+ i.m_i = pDomainAssembly->m_Modules.Iterate();
+ i.m_moduleIterationOption = moduleIterationOption;
+
+ return i;
+ }
+ };
+ friend class ModuleIterator;
+
+ ModuleIterator IterateModules(ModuleIterationOption moduleIterationOption)
+ {
+ WRAPPER_NO_CONTRACT;
+ return ModuleIterator::Create(this, moduleIterationOption);
+ }
+
+ DomainFile *LookupDomainFile(DWORD index)
+ {
+ WRAPPER_NO_CONTRACT;
+ if (index >= m_Modules.GetCount())
+ return NULL;
+ else
+ return dac_cast<PTR_DomainFile>(m_Modules.Get(index));
+ }
+
+ Module *LookupModule(DWORD index)
+ {
+ WRAPPER_NO_CONTRACT;
+ DomainFile *pModule = LookupDomainFile(index);
+ if (pModule == NULL)
+ return NULL;
+ else
+ return pModule->GetModule();
+ }
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ void AddModule(DomainModule *pModule);
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+
+ // ------------------------------------------------------------
+ // Resource access
+ // ------------------------------------------------------------
+
+ BOOL GetResource(LPCSTR szName, DWORD *cbResource,
+ PBYTE *pbInMemoryResource, DomainAssembly** pAssemblyRef,
+ LPCSTR *szFileName, DWORD *dwLocation,
+ StackCrawlMark *pStackMark, BOOL fSkipSecurityCheck,
+ BOOL fSkipRaiseResolveEvent);
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ BOOL GetModuleResource(mdFile mdResFile, LPCSTR szResName,
+ DWORD *cbResource, PBYTE *pbInMemoryResource,
+ LPCSTR *szFileName, DWORD *dwLocation,
+ BOOL fIsPublic, StackCrawlMark *pStackMark,
+ BOOL fSkipSecurityCheck);
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+#ifdef FEATURE_PREJIT
+ // ------------------------------------------------------------
+ // Prejitting API
+ // ------------------------------------------------------------
+
+ void GetCurrentVersionInfo(CORCOMPILE_VERSION_INFO *pZapVersionInfo);
+
+ void GetOptimizedIdentitySignature(CORCOMPILE_ASSEMBLY_SIGNATURE *pSignature);
+ BOOL CheckZapDependencyIdentities(PEImage *pNativeImage);
+ BOOL CheckZapSecurity(PEImage *pNativeImage);
+
+ BOOL CheckFileSystemTimeStamps(PEFile *pZapManifest);
+
+#endif // FEATURE_PREJIT
+
+ // ------------------------------------------------------------
+ // Debugger control API
+ // ------------------------------------------------------------
+
+ DebuggerAssemblyControlFlags GetDebuggerInfoBits(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_debuggerFlags;
+ }
+
+ void SetDebuggerInfoBits(DebuggerAssemblyControlFlags newBits)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_debuggerFlags = newBits;
+ }
+
+ void SetupDebuggingConfig(void);
+ DWORD ComputeDebuggingConfig(void);
+
+ bool GetDebuggingOverrides(DWORD *pdwFlags);
+
+ HRESULT GetDebuggingCustomAttributes(DWORD *pdwFlags);
+
+ BOOL IsVisibleToDebugger();
+ BOOL NotifyDebuggerLoad(int flags, BOOL attaching);
+ void NotifyDebuggerUnload();
+ BOOL IsUnloading();
+
+ inline BOOL IsCollectible();
+ //
+ // GC API
+ //
+ void EnumStaticGCRefs(promote_func* fn, ScanContext* sc);
+
+
+ private:
+
+ // ------------------------------------------------------------
+ // Loader API
+ // ------------------------------------------------------------
+
+ friend class AppDomain;
+ friend class Assembly;
+ friend class AssemblyNameNative;
+
+#ifndef DACCESS_COMPILE
+public:
+ ~DomainAssembly();
+private:
+ DomainAssembly(AppDomain *pDomain, PEFile *pFile, AssemblyLoadSecurity *pLoadSecurity, LoaderAllocator *pLoaderAllocator);
+#endif
+
+ // ------------------------------------------------------------
+ // Internal routines
+ // ------------------------------------------------------------
+
+ void SetSecurityError(Exception *ex);
+
+#ifndef DACCESS_COMPILE
+ void Begin();
+ void Allocate();
+ void LoadSharers();
+ void DeliverSyncEvents();
+ void DeliverAsyncEvents();
+#endif
+
+ void UpdatePEFile(PTR_PEFile pFile);
+
+#ifdef FEATURE_PREJIT
+#ifndef DACCESS_COMPILE
+ void FindNativeImage();
+#endif
+#endif // FEATURE_PREJIT
+
+ BOOL IsInstrumented();
+
+ public:
+ ULONG HashIdentity();
+
+ private:
+#ifdef FEATURE_CAS_POLICY
+ // Pulls in URLMON's security manager. It is used to translate a codebase
+ // into a zone and site
+ void InitializeSecurityManager();
+#endif // FEATURE_CAS_POLICY
+
+ BOOL ShouldLoadDomainNeutral();
+ BOOL ShouldLoadDomainNeutralHelper();
+ BOOL ShouldSkipPolicyResolution();
+
+ // ------------------------------------------------------------
+ // Instance data
+ // ------------------------------------------------------------
+
+ private:
+ LOADERHANDLE m_hExposedAssemblyObject;
+ PTR_IAssemblySecurityDescriptor m_pSecurityDescriptor;
+ PTR_Assembly m_pAssembly;
+ DebuggerAssemblyControlFlags m_debuggerFlags;
+#ifdef FEATURE_FUSION
+ ReleaseHolder<IAssemblyBindingClosure> m_pAssemblyBindingClosure;
+#endif
+ CMD_State m_MissingDependenciesCheckStatus;
+ ArrayList m_Modules;
+ BOOL m_fSkipPolicyResolution;
+ BOOL m_fDebuggerUnloadStarted;
+ BOOL m_fCollectible;
+ Volatile<bool> m_fHostAssemblyPublished;
+ Volatile<bool> m_fCalculatedShouldLoadDomainNeutral;
+ Volatile<bool> m_fShouldLoadDomainNeutral;
+
+ public:
+ // Indicates if the assembly can be cached in a binding cache such as AssemblySpecBindingCache.
+ inline bool CanUseWithBindingCache()
+ { STATIC_CONTRACT_WRAPPER; return GetFile()->CanUseWithBindingCache(); }
+};
+
+typedef DomainAssembly::ModuleIterator DomainModuleIterator;
+
+// --------------------------------------------------------------------------------
+// DomainModule is a subclass of DomainFile which specifically represents a module.
+// --------------------------------------------------------------------------------
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+
+class DomainModule : public DomainFile
+{
+ VPTR_VTABLE_CLASS(DomainModule, DomainFile);
+
+ private:
+ PTR_DomainAssembly m_pDomainAssembly;
+
+ void UpdatePEFile(PTR_PEFile pFile);
+
+ public:
+
+ // ------------------------------------------------------------
+ // Public API
+ // ------------------------------------------------------------
+
+ DomainAssembly *GetDomainAssembly()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_pDomainAssembly;
+ }
+
+ Module *GetModule()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pModule;
+ }
+
+ LPCSTR GetName()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetFile()->GetSimpleName();
+ }
+
+ mdFile GetToken()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetFile()->GetToken();
+ }
+
+ PEModule *GetFile()
+ {
+ WRAPPER_NO_CONTRACT;
+ return PTR_PEModule(m_pFile);
+ }
+
+ BOOL IsAssembly()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return FALSE;
+ }
+
+ void SetModule(Module *pModule);
+
+#ifdef DACCESS_COMPILE
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+ // ------------------------------------------------------------
+ // Loader API
+ // ------------------------------------------------------------
+
+ friend class COMModule;
+
+#ifndef DACCESS_COMPILE
+ DomainModule(AppDomain *pDomain, DomainAssembly *pAssembly, PEFile *pFile);
+ ~DomainModule();
+#endif
+
+ // ------------------------------------------------------------
+ // Internal routines
+ // ------------------------------------------------------------
+
+#ifndef DACCESS_COMPILE
+ void Begin();
+ void Allocate();
+ void LoadSharers();
+ void DeliverSyncEvents();
+ void DeliverAsyncEvents();
+#endif
+
+#ifdef FEATURE_PREJIT
+#ifndef DACCESS_COMPILE
+ void FindNativeImage();
+#endif
+#endif // FEATURE_PREJIT
+};
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+#endif // _DOMAINFILE_H_
diff --git a/src/vm/domainfile.inl b/src/vm/domainfile.inl
new file mode 100644
index 0000000000..1b9a057fe7
--- /dev/null
+++ b/src/vm/domainfile.inl
@@ -0,0 +1,137 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//
+
+inline Module* DomainFile::GetCurrentModule()
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ SUPPORTS_DAC;
+
+ return m_pModule;
+}
+
+inline Module* DomainFile::GetLoadedModule()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ {
+ // CheckLoaded() eventually calls PEFile::GetNativeImageWithRef(), which
+ // takes a reader lock on the file's m_pMetadataLock. However, this is
+ // only done in debug for the consistency check, so we can accept the lock violation.
+ CONTRACT_VIOLATION(TakesLockViolation);
+ CONSISTENCY_CHECK(CheckLoaded());
+ }
+
+ return m_pModule;
+}
+
+inline Module* DomainFile::GetModule()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ {
+ // While executing the consistency check, we will take a lock.
+ // But since this is debug-only, we'll allow the lock violation so that
+ // CANNOT_TAKE_LOCK callers aren't penalized
+ CONTRACT_VIOLATION(TakesLockViolation);
+ CONSISTENCY_CHECK(CheckLoadLevel(FILE_LOAD_ALLOCATE));
+ }
+
+ return m_pModule;
+}
+
+inline Assembly* DomainAssembly::GetCurrentAssembly()
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ return m_pAssembly;
+}
+
+inline Assembly* DomainAssembly::GetLoadedAssembly()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ {
+ // CheckLoaded() eventually calls PEFile::GetNativeImageWithRef(), which
+ // takes a reader lock on the file's m_pMetadataLock. However, this is
+ // only done in debug for the consistency check, so we can accept the lock violation.
+ CONTRACT_VIOLATION(TakesLockViolation);
+ CONSISTENCY_CHECK(CheckLoaded());
+ }
+
+ return m_pAssembly;
+}
+
+inline Assembly* DomainAssembly::GetAssembly()
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ {
+ // CheckLoadLevel() is SO_INTOLERANT. However, this is only done in
+ // debug for the consistency check, so we can accept the SO violation.
+ CONTRACT_VIOLATION(SOToleranceViolation);
+ CONSISTENCY_CHECK(CheckLoadLevel(FILE_LOAD_ALLOCATE));
+ }
+ return m_pAssembly;
+}
+
+#ifndef DACCESS_COMPILE
+inline void DomainFile::UpdatePEFileWorker(PTR_PEFile pFile)
+{
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(CheckPointer(pFile));
+ if (pFile==m_pFile)
+ return;
+ _ASSERTE(m_pOriginalFile==NULL);
+ m_pOriginalFile=m_pFile;
+ pFile->AddRef();
+ m_pFile=pFile;
+}
+
+inline void DomainAssembly::UpdatePEFile(PTR_PEFile pFile)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_HOSTED_BINDER
+ GetAppDomain()->UpdatePublishHostedAssembly(this, pFile);
+#else
+ this->UpdatePEFileWorker(pFile);
+#endif
+}
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+inline void DomainModule::UpdatePEFile(PTR_PEFile pFile)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ this->UpdatePEFileWorker(pFile);
+}
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+#endif // DACCESS_COMPILE
+
+inline ULONG DomainAssembly::HashIdentity()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetFile()->HashIdentity();
+}
+
+inline BOOL DomainAssembly::IsCollectible()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fCollectible;
+}
+
diff --git a/src/vm/dwbucketmanager.hpp b/src/vm/dwbucketmanager.hpp
new file mode 100644
index 0000000000..cd727f7b82
--- /dev/null
+++ b/src/vm/dwbucketmanager.hpp
@@ -0,0 +1,1495 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// FILE: dwbucketmanager.hpp
+//
+// This file contains the manager types for differents types of Watson buckets
+// and various helper types.
+//
+
+//
+
+//
+// ============================================================================
+
+#ifndef DWBUCKETMANAGER_HPP
+#define DWBUCKETMANAGER_HPP
+
+#ifdef FEATURE_WINDOWSPHONE
+#include "corhost.h"
+#endif
+
+// this will be used as an index into g_WerEventTraits
+enum WatsonBucketType
+{
+ CLR20r3 = 0,
+ MoCrash,
+#ifdef FEATURE_WINDOWSPHONE
+ WinPhoneCrash,
+#endif
+ // insert new types above this line
+ EndOfWerBucketTypes
+};
+
+const DWORD kInvalidParamsCount = 0xffffffff;
+
+struct WerEventTypeTraits
+{
+ const LPCWSTR EventName;
+ const DWORD CountParams;
+ INDEBUG(const WatsonBucketType BucketType);
+
+ WerEventTypeTraits(LPCWSTR name, DWORD params DEBUG_ARG(WatsonBucketType type))
+ : EventName(name), CountParams(params) DEBUG_ARG(BucketType(type))
+ {
+ _ASSERTE(params < kInvalidParamsCount);
+ }
+};
+
+const WerEventTypeTraits g_WerEventTraits[] =
+{
+ WerEventTypeTraits(W("CLR20r3"), 9 DEBUG_ARG(CLR20r3)),
+ WerEventTypeTraits(W("MoAppCrash"), 9 DEBUG_ARG(MoCrash))
+#ifdef FEATURE_WINDOWSPHONE
+ // unfortunately Apollo uses the same event name
+ ,WerEventTypeTraits(W("CLR20r3"), 9 DEBUG_ARG(WinPhoneCrash))
+#endif
+};
+
+DWORD GetCountBucketParamsForEvent(LPCWSTR wzEventName)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ if (wzEventName == NULL)
+ {
+ _ASSERTE(!"missing event name when retrieving bucket params count");
+ return 10;
+ }
+
+ DWORD countParams = kInvalidParamsCount;
+ for (int index = 0; index < EndOfWerBucketTypes; ++index)
+ {
+ if (wcscmp(wzEventName, g_WerEventTraits[index].EventName) == 0)
+ {
+ _ASSERTE(index == g_WerEventTraits[index].BucketType);
+ countParams = g_WerEventTraits[index].CountParams;
+ break;
+ }
+ }
+
+ if (countParams == kInvalidParamsCount)
+ {
+ _ASSERTE(!"unknown event name when retrieving bucket params count");
+ countParams = 10;
+ }
+
+ return countParams;
+}
+
+#ifndef DACCESS_COMPILE
+
+#include "dwreport.h"
+#include <msodwwrap.h>
+#include "dbginterface.h"
+#include <sha1.h>
+
+#ifdef FEATURE_APPX
+#include "appxutil.h"
+#endif
+
+//------------------------------------------------------------------------------
+// Description
+// Converts an array of bytes to a string of base32 encoded characters.
+//
+// Constructor
+// pData -- The bytes to be converted.
+// nData -- Count of bytes to be converted.
+//
+// Convert
+// pOut -- Put converted bytes here.
+// nOut -- Max number of characters to put
+//
+// returns -- Number of characters put.
+//
+// Notes
+// Five bytes of input produces 8 characters of output.
+//------------------------------------------------------------------------------
+class BytesToBase32
+{
+private:
+ // Five doesn't go into 8 very well, so we will wind up with 8 characters per
+ // five bytes of input. Specifically, a block of 5 bytes will be formatted
+ // like this:
+ // 7 6 5 4 3 2 1 0 <-- bit #
+ // 0 1 1 1 1 1 2 2 2
+ // 1 2 2 3 3 3 3 3 4 <-- which character does the bit go to?
+ // 2 4 4 4 4 5 5 5 5
+ // 3 5 6 6 6 6 6 7 7
+ // 4 7 7 7 8 8 8 8 8
+ // This structure defines 2 masks and 3 shift values per 5-bit value.
+ // The first mask is the mask from the first byte. The first two
+ // shifts are a left- OR a right- shift for the bits obtained via that mask.
+ // If there is a second mask, that is to get bits from the next byte,
+ // shifted right by the second shift value. Finally, there is a bit to
+ // indicate that the scanner should advance to the next byte.
+ // Referring to the table above, the decoder values for the first 5-bit
+ // value will be:
+ // m1 : 0xf8 - mask
+ // l1 : 0 - no left shift
+ // r1 : 3 - right shift 3 bits
+ // m2 : 0 - no second mask
+ // r2 : 0 - no second right shift
+ // skip : 0 - don't skip to next byte (still 3 more bits, for the second 5-bits.
+ struct decoder_
+ {
+ unsigned int m1 : 8; // Mask 1
+ unsigned int l1 : 4; // Left shift 1
+ unsigned int r1 : 4; // Right shift 2
+ unsigned int m2 : 8; // Mask 2
+ unsigned int r2 : 4; // Right shift 2
+ unsigned int skip:4; // Skip to next input byte
+ };
+
+ static const decoder_ decoder[8]; // Array of decoder specs.
+ static const WCHAR base32[33]; // Array of 33 characters: A-Z, 0-5, =
+
+ BYTE *pData; // Pointer to data.
+ int nData; // Total bytes of data.
+
+ BYTE *pEnd;
+
+ int nWhich; // Where in the sequence of 8 5-bit datums?
+
+public:
+ BytesToBase32(BYTE *p, int n) : pData(p), nData(n), nWhich(0) { LIMITED_METHOD_CONTRACT; pEnd = pData + nData; }
+
+ WCHAR GetNextChar();
+ BOOL MoreChars() { LIMITED_METHOD_CONTRACT; return pData < pEnd; }
+
+ int Convert(__inout_ecount(nOut) LPWSTR pOut, int nOut);
+};
+
+// This table tells how to pick out 5-bits at a time (8 times) from 5-bytes of data.
+const BytesToBase32::decoder_ BytesToBase32::decoder[8] =
+{ // m1 l1 r1 m2 r2 skip
+ {0xf8, 0, 3, 0x00, 0, 0},
+ {0x07, 2, 0, 0xc0, 6, 1},
+ {0x3e, 0, 1, 0x00, 0, 0},
+ {0x01, 4, 0, 0xf0, 4, 1},
+ {0x0f, 1, 0, 0x80, 7, 1},
+ {0x7c, 0, 2, 0x00, 0, 0},
+ {0x03, 3, 0, 0xe0, 5, 1},
+ {0x1f, 0, 0, 0x00, 0, 1},
+};
+
+// Array of characters with which to encode.
+const WCHAR BytesToBase32::base32[33] = {'A','B','C','D','E','F','G','H','I','J','K','L', 'M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z','0','1','2','3','4','5','6'};
+
+//------------------------------------------------------------------------------
+// Description
+// Converts 5-bits to a character; fundamental base32 encoding.
+//
+// Parameters
+// none
+//
+// Returns
+// The next 5-bits, converted to a character. Also advances the
+// character pointer. When no characters remain to be converted,
+// returns W('6')
+//
+//------------------------------------------------------------------------------
+WCHAR BytesToBase32::GetNextChar()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ unsigned int result = 0;
+
+ _ASSERTE(pData <= pEnd);
+ _ASSERTE(nWhich >= 0 && nWhich < lengthof(decoder));
+
+ // If out of data, return signal value, > any valid char.
+ if (pData == pEnd)
+ return base32[lengthof(base32)-1];
+
+#if defined(_DEBUG)
+ if (decoder[nWhich].l1)
+ { // There is a l1 shift.
+ _ASSERTE(decoder[nWhich].m1); // There should be a m1 mask
+ _ASSERTE(decoder[nWhich].r1 == 0); // There should not be a r1 shift
+ _ASSERTE(decoder[nWhich].m2); // There shoulbe a m2 mask to fill in the rest of the bits.
+ _ASSERTE(decoder[nWhich].r2); // m2 bits never start in the right place; there must be a shift
+ // The masks, shifted, and or'd together should equal 0x1f, 5-bits.
+ _ASSERTE( ( (decoder[nWhich].m1 << decoder[nWhich].l1) | (decoder[nWhich].m2 >> decoder[nWhich].r2)) == 0x1f);
+ }
+ else
+ { // There is no l1 shift.
+ _ASSERTE(decoder[nWhich].m2 == 0); // There should not be any m2 bits
+ _ASSERTE( (decoder[nWhich].m1 >> decoder[nWhich].r1) == 0x1f); // The m1 bits, shifted should be 0x1f, 5-bits.
+ }
+#endif
+
+ // Mask off the bits.
+ result = *pData & decoder[nWhich].m1;
+
+ // Shift left or right as needed.
+ if (decoder[nWhich].l1)
+ { // Shift up to make space for low-order bits from next byte.
+ result = result << decoder[nWhich].l1;
+ }
+ else
+ if (decoder[nWhich].r1)
+ { // Shift down into position. There should be no more bits from next byte.
+ result = result >> decoder[nWhich].r1;
+ }
+
+ // Skip to next byte if appropriate.
+ if (decoder[nWhich].skip)
+ ++pData;
+
+ // Grab more bits if specified, and more are available.
+ if (pData < pEnd && decoder[nWhich].m2)
+ { // All second-byte data are shifted right, so just mask and shift.
+ result |= ( (*pData & decoder[nWhich].m2) >> decoder[nWhich].r2);
+ }
+
+ // Advance the 'state machine' -- which 5-bits from an 8-byte block.
+ if (++nWhich == lengthof(decoder))
+ nWhich = 0;
+
+ // Sanity check on value.
+ _ASSERTE(result < lengthof(base32));
+
+ return base32[result];
+} // WCHAR BytesToBase32::GetNextChar()
+
+//------------------------------------------------------------------------------
+// Description
+// Performs the conversion of a buffer to base32.
+//
+// Parameters
+// pOut -- Buffer to receive the characters.
+// nOut -- Maximum characters to write to the buffer.
+//
+// Returns
+// the number of characters copied to the output buffer.
+//
+//------------------------------------------------------------------------------
+int BytesToBase32::Convert(
+ __inout_ecount(nOut) LPWSTR pOut,
+ int nOut)
+{
+ WRAPPER_NO_CONTRACT;
+
+ int nWritten = 0; // Count of bytes written to output.
+
+ // Stop when the buffer is full, or the bytes are fully converted.
+ while (nOut > 0 && MoreChars())
+ {
+ *pOut = GetNextChar();
+ ++pOut;
+ --nOut;
+ ++nWritten;
+ }
+
+ return nWritten;
+} // int BytesToBase32::Convert()
+
+// this abstract class provides base functionality for populating a bucket parameter in the GMB with some data.
+// the actual mapping of ordinal parameter to data type (eg parameter 1 is app name) is handled in subclasses
+// of this type. see GetBucketParamsManager() for retrieving a bucket params manager.
+class BaseBucketParamsManager
+{
+private:
+ GenericModeBlock* m_pGmb;
+ TypeOfReportedError m_tore;
+ Thread* m_pThread;
+ OBJECTREF* m_pException;
+ INDEBUG(size_t m_countParamsLogged);
+ MethodDesc* m_pFaultingMD;
+ PCODE m_faultingPc;
+
+ // misc helper functions
+ DWORD GetILOffset();
+ bool GetFileVersionInfoForModule(Module* pModule, USHORT& major, USHORT& minor, USHORT& build, USHORT& revision);
+ bool IsCodeContractsFrame(MethodDesc* pMD);
+ void FindFaultingMethodInfo();
+ OBJECTREF GetRealExceptionObject();
+ WCHAR* GetParamBufferForIndex(BucketParameterIndex paramIndex);
+ int CopyStringToBucket(__out_ecount(targetMaxLength) LPWSTR pTargetParam, int targetMaxLength, __in_z LPCWSTR pSource, bool cannonicalize = false);
+ void LogParam(__in_z LPCWSTR paramValue, BucketParameterIndex paramIndex);
+
+protected:
+ ~BaseBucketParamsManager();
+
+ typedef void (BaseBucketParamsManager::*DataPopulatorFunction)(__out_ecount(maxLength) WCHAR* targetParam, int maxLength);
+ void PopulateBucketParameter(BucketParameterIndex paramIndex, DataPopulatorFunction pFnDataPopulator, int maxLength);
+
+ void PopulateEventName(LPCWSTR eventTypeName);
+ // functions for retrieving data to go into various bucket parameters
+ void GetAppName(__out_ecount(maxLength) WCHAR* targetParam, int maxLength);
+ void GetAppVersion(__out_ecount(maxLength) WCHAR* targetParam, int maxLength);
+ void GetAppTimeStamp(__out_ecount(maxLength) WCHAR* targetParam, int maxLength);
+ void GetModuleName(__out_ecount(maxLength) WCHAR* targetParam, int maxLength);
+ void GetModuleVersion(__out_ecount(maxLength) WCHAR* targetParam, int maxLength);
+ void GetModuleTimeStamp(__out_ecount(maxLength) WCHAR* targetParam, int maxLength);
+ void GetMethodDef(__out_ecount(maxLength) WCHAR* targetParam, int maxLength);
+ void GetIlOffset(__out_ecount(maxLength) WCHAR* targetParam, int maxLength);
+ void GetExceptionName(__out_ecount(maxLength) WCHAR* targetParam, int maxLength);
+ void GetPackageMoniker(__out_ecount(maxLength) WCHAR* targetParam, int maxLength);
+ void GetPRAID(__out_ecount(maxLength) WCHAR* targetParam, int maxLength);
+ void GetIlRva(__out_ecount(maxLength) WCHAR* targetParam, int maxLength);
+
+public:
+ BaseBucketParamsManager(GenericModeBlock* pGenericModeBlock, TypeOfReportedError typeOfError, PCODE initialFaultingPc, Thread* pFaultingThread, OBJECTREF* pThrownException);
+
+ // function that consumers should call to populate the GMB
+ virtual void PopulateBucketParameters() = 0;
+};
+
+BaseBucketParamsManager::BaseBucketParamsManager(GenericModeBlock* pGenericModeBlock, TypeOfReportedError typeOfError, PCODE initialFaultingPc, Thread* pFaultingThread, OBJECTREF* pThrownException)
+ : m_pFaultingMD(NULL), m_faultingPc(initialFaultingPc), m_pGmb(pGenericModeBlock), m_tore(typeOfError), m_pThread(pFaultingThread), m_pException(pThrownException)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_pGmb);
+ INDEBUG(m_countParamsLogged = 0);
+
+ ZeroMemory(pGenericModeBlock, sizeof(GenericModeBlock));
+
+ EECodeInfo codeInfo(initialFaultingPc);
+ if (codeInfo.IsValid())
+ {
+ m_pFaultingMD = codeInfo.GetMethodDesc();
+
+ if (m_pFaultingMD)
+ FindFaultingMethodInfo();
+ }
+}
+
+BaseBucketParamsManager::~BaseBucketParamsManager()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(m_countParamsLogged == GetCountBucketParamsForEvent(m_pGmb->wzEventTypeName));
+}
+
+void BaseBucketParamsManager::PopulateEventName(LPCWSTR eventTypeName)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ wcsncpy_s(m_pGmb->wzEventTypeName, DW_MAX_BUCKETPARAM_CWC, eventTypeName, _TRUNCATE);
+
+ _ASSERTE(GetCountBucketParamsForEvent(eventTypeName));
+ LOG((LF_EH, LL_INFO10, "Event : %S\n", m_pGmb->wzEventTypeName));
+}
+
+WCHAR* BaseBucketParamsManager::GetParamBufferForIndex(BucketParameterIndex paramIndex)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(paramIndex < InvalidBucketParamIndex);
+ switch (paramIndex)
+ {
+ case Parameter1:
+ return m_pGmb->wzP1;
+ case Parameter2:
+ return m_pGmb->wzP2;
+ case Parameter3:
+ return m_pGmb->wzP3;
+ case Parameter4:
+ return m_pGmb->wzP4;
+ case Parameter5:
+ return m_pGmb->wzP5;
+ case Parameter6:
+ return m_pGmb->wzP6;
+ case Parameter7:
+ return m_pGmb->wzP7;
+ case Parameter8:
+ return m_pGmb->wzP8;
+ case Parameter9:
+ return m_pGmb->wzP9;
+ default:
+ {
+ _ASSERTE(!"bad paramIndex");
+ // this is a back-stop to prevent returning NULL and having to have
+ // callers check for it. we should never get here though anyways.
+ return m_pGmb->wzP10;
+ }
+ }
+}
+
+void BaseBucketParamsManager::PopulateBucketParameter(BucketParameterIndex paramIndex, DataPopulatorFunction pFnDataPopulator, int maxLength)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(paramIndex < InvalidBucketParamIndex);
+ WCHAR* targetParam = GetParamBufferForIndex(paramIndex);
+
+ // verify that we haven't already written data to this param
+ _ASSERTE(targetParam && targetParam[0] == W('\0'));
+#ifdef FEATURE_WINDOWSPHONE
+ WCHAR const* overrideParam = g_CLRErrorReportingManager.GetBucketParamOverride(paramIndex);
+ if (overrideParam != NULL)
+ {
+ CopyStringToBucket(targetParam, maxLength, overrideParam, false);
+ }
+ else
+#endif // FEATURE_WINDOWSPHONE
+ {
+ (this->*pFnDataPopulator)(targetParam, maxLength);
+ }
+
+ LogParam(targetParam, paramIndex);
+}
+
+void BaseBucketParamsManager::GetAppName(__out_ecount(maxLength) WCHAR* targetParam, int maxLength)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HMODULE hModule = WszGetModuleHandle(NULL);
+ WCHAR appPath[MAX_PATH];
+ DWORD cchAppPath = NumItems(appPath);
+
+ if (GetCurrentModuleFileName(appPath, &cchAppPath) == S_OK)
+ {
+ CopyStringToBucket(targetParam, maxLength, appPath);
+ }
+ else
+ {
+ wcsncpy_s(targetParam, maxLength, W("missing"), _TRUNCATE);
+ }
+}
+
+void BaseBucketParamsManager::GetAppVersion(__out_ecount(maxLength) WCHAR* targetParam, int maxLength)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HMODULE hModule = WszGetModuleHandle(NULL);
+ WCHAR appPath[MAX_PATH];
+ DWORD cchAppPath = NumItems(appPath);
+
+ WCHAR verBuf[23];
+ USHORT major, minor, build, revision;
+
+ if ((GetCurrentModuleFileName(appPath, &cchAppPath) == S_OK) && SUCCEEDED(DwGetFileVersionInfo(appPath, major, minor, build, revision)))
+ {
+ _snwprintf_s(targetParam,
+ maxLength,
+ _TRUNCATE,
+ W("%d.%d.%d.%d"),
+ major, minor, build, revision);
+ }
+ else if (DwGetAssemblyVersion(appPath, verBuf, NumItems(verBuf)) != 0)
+ {
+ wcscpy_s(targetParam, maxLength, verBuf);
+ }
+ else
+ {
+ wcsncpy_s(targetParam, maxLength, W("missing"), _TRUNCATE);
+ }
+}
+
+void BaseBucketParamsManager::GetAppTimeStamp(__out_ecount(maxLength) WCHAR* targetParam, int maxLength)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ EX_TRY
+ {
+ CONTRACT_VIOLATION(GCViolation);
+
+ HMODULE hModule = WszGetModuleHandle(NULL);
+ PEDecoder pe(hModule);
+
+ ULONG ulTimeStamp = pe.GetTimeDateStamp();
+
+ _snwprintf_s(targetParam,
+ maxLength,
+ _TRUNCATE,
+ W("%x"),
+ ulTimeStamp);
+ }
+ EX_CATCH
+ {
+ wcsncpy_s(targetParam, maxLength, W("missing"), _TRUNCATE);
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+}
+
+void BaseBucketParamsManager::GetModuleName(__out_ecount(maxLength) WCHAR* targetParam, int maxLength)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Module* pModule = NULL;
+
+ if (m_pFaultingMD != NULL)
+ pModule = m_pFaultingMD->GetModule();
+
+ bool failed = false;
+
+ if (pModule)
+ {
+ // Get the assembly name, and determine its length, including terminating NULL.
+ Assembly* pAssembly = pModule->GetAssembly();
+ LPCUTF8 utf8AssemblyName = pAssembly->GetSimpleName();
+ const int assemblyNameLength = WszMultiByteToWideChar(CP_UTF8, 0, utf8AssemblyName, -1, NULL, 0);
+
+ // full name and length. minor assumption that this is not multi-module.
+ WCHAR *fullName = NULL;
+ int fullNameLength = assemblyNameLength;
+
+ if (pModule->IsManifest())
+ {
+ // Single-module assembly; allocate a buffer and convert assembly name.
+ fullName = reinterpret_cast< WCHAR* >(_alloca(sizeof(WCHAR)*(fullNameLength)));
+ WszMultiByteToWideChar(CP_UTF8, 0, utf8AssemblyName, -1, fullName, fullNameLength);
+ }
+ else
+ { // This is a non-manifest module, which means it is a multi-module assembly.
+ // Construct a name like 'assembly+module'.
+
+ // Get the module name, and determine its length, including terminating NULL.
+ LPCUTF8 utf8ModuleName = pModule->GetSimpleName();
+ const int moduleNameLength = WszMultiByteToWideChar(CP_UTF8, 0, utf8ModuleName, -1, NULL, 0);
+
+ // Full name length is assembly name length + module name length + 1 char for '+'.
+ // However, both assemblyNameLength and moduleNameLength include space for terminating NULL,
+ // but of course only one NULL is needed, so the final length is just the sum of the two lengths.
+ if (!ClrSafeInt<int>::addition(assemblyNameLength, moduleNameLength, fullNameLength))
+ {
+ failed = true;
+ }
+ else
+ {
+ // Allocate a buffer with proper prefast checks.
+ int AllocLen;
+ if (!ClrSafeInt<int>::multiply(sizeof(WCHAR), fullNameLength, AllocLen))
+ {
+ failed = true;
+ }
+ else
+ {
+ fullName = reinterpret_cast< WCHAR* >(_alloca(AllocLen));
+
+ // Convert the assembly name.
+ WszMultiByteToWideChar(CP_UTF8, 0, utf8AssemblyName, -1, fullName, assemblyNameLength);
+
+ // replace NULL with '+'
+ _ASSERTE(fullName[assemblyNameLength-1] == 0);
+ fullName[assemblyNameLength-1] = W('+');
+
+ // Convert the module name after the '+'
+ WszMultiByteToWideChar(CP_UTF8, 0, utf8ModuleName,-1, &fullName[assemblyNameLength], moduleNameLength);
+ }
+ }
+ }
+
+ if (!failed)
+ {
+ // Make sure NULL termination is right.
+ _ASSERTE(fullName[fullNameLength - 1] == 0);
+
+ // Copy name in, with possible truncation or hashing.
+ CopyStringToBucket(targetParam, maxLength, fullName);
+ }
+ }
+
+ if (!pModule || failed)
+ {
+ wcsncpy_s(targetParam, maxLength, W("missing"), _TRUNCATE);
+ }
+}
+
+void BaseBucketParamsManager::GetModuleVersion(__out_ecount(maxLength) WCHAR* targetParam, int maxLength)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Module* pModule = NULL;
+
+ if (m_pFaultingMD != NULL)
+ pModule = m_pFaultingMD->GetModule();
+
+ bool failed = false;
+
+ // @TODO: what if the it is in-memory module? It can have the version info.
+ // But we will not retrieve it right.
+ if (pModule)
+ {
+ USHORT major = 0, minor = 0, build = 0, revision = 0;
+
+ bool gotFileVersion = GetFileVersionInfoForModule(pModule, major, minor, build, revision);
+
+ // if we failed to get a version and this isn't the manifest module then try that
+ if (!gotFileVersion && !pModule->IsManifest())
+ {
+ pModule = pModule->GetAssembly()->GetManifestModule();
+ if (pModule)
+ gotFileVersion = GetFileVersionInfoForModule(pModule, major, minor, build, revision);
+ }
+
+ if (!gotFileVersion)
+ {
+ // if we didn't get a file version then fall back to assembly version (typical for in-memory modules)
+ if (FAILED(pModule->GetAssembly()->GetVersion(&major, &minor, &build, &revision)))
+ failed = true;
+ }
+
+ if (!failed)
+ {
+ _snwprintf_s(targetParam,
+ maxLength,
+ _TRUNCATE,
+ W("%d.%d.%d.%d"),
+ major, minor, build, revision);
+ }
+ }
+
+ if (!pModule || failed)
+ {
+ wcsncpy_s(targetParam, maxLength, W("missing"), _TRUNCATE);
+ }
+}
+
+void BaseBucketParamsManager::GetModuleTimeStamp(__out_ecount(maxLength) WCHAR* targetParam, int maxLength)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Module* pModule = NULL;
+
+ if (m_pFaultingMD != NULL)
+ pModule = m_pFaultingMD->GetModule();
+
+ bool failed = false;
+
+ if (pModule)
+ {
+ EX_TRY
+ {
+ // We only store the IL timestamp in the native image for the
+ // manifest module. We should consider fixing this for Orcas.
+ PTR_PEFile pFile = pModule->GetAssembly()->GetManifestModule()->GetFile();
+
+ // for dynamic modules use 0 as the time stamp
+ ULONG ulTimeStamp = 0;
+
+ if (!pFile->IsDynamic())
+ {
+ ulTimeStamp = pFile->GetILImageTimeDateStamp();
+ _ASSERTE(ulTimeStamp != 0);
+ }
+
+ _snwprintf_s(targetParam,
+ maxLength,
+ _TRUNCATE,
+ W("%x"),
+ ulTimeStamp);
+ }
+ EX_CATCH
+ {
+ failed = true;
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+ }
+
+ if (!pModule || failed)
+ {
+ wcsncpy_s(targetParam, maxLength, W("missing"), _TRUNCATE);
+ }
+}
+
+void BaseBucketParamsManager::GetMethodDef(__out_ecount(maxLength) WCHAR* targetParam, int maxLength)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_pFaultingMD)
+ {
+ mdMethodDef methodDef = m_pFaultingMD->GetMemberDef();
+ _snwprintf_s(targetParam,
+ maxLength,
+ _TRUNCATE,
+ W("%x"),
+ RidFromToken(methodDef));
+ }
+ else
+ {
+ wcsncpy_s(targetParam, maxLength, W("missing"), _TRUNCATE);
+ }
+}
+
+void BaseBucketParamsManager::GetIlOffset(__out_ecount(maxLength) WCHAR* targetParam, int maxLength)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ DWORD ilOffset = GetILOffset();
+
+ _snwprintf_s(targetParam,
+ maxLength,
+ _TRUNCATE,
+ W("%x"),
+ ilOffset);
+}
+
+void BaseBucketParamsManager::GetExceptionName(__out_ecount(maxLength) WCHAR* targetParam, int maxLength)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_tore.GetType() != TypeOfReportedError::StackOverflowException)
+ {
+ // At this point we have to switch to cooperative mode, because we need an OBJECTREF.
+ GCX_COOP();
+
+ OBJECTREF throwable = GetRealExceptionObject();
+
+ LPCWSTR pExceptionName = NULL;
+
+ if (throwable == NULL)
+ {
+ // Don't have an exception object. Make up something reasonable.
+ switch (m_tore.GetType())
+ {
+ case TypeOfReportedError::NativeThreadUnhandledException:
+ case TypeOfReportedError::UnhandledException:
+ pExceptionName = W("Exception");
+ break;
+ case TypeOfReportedError::FatalError:
+ pExceptionName = W("FatalError");
+ break;
+ case TypeOfReportedError::UserBreakpoint:
+ pExceptionName = W("Debugger.Break");
+ break;
+ case TypeOfReportedError::NativeBreakpoint:
+ pExceptionName = W("Breakpoint");
+ break;
+ default:
+ _ASSERTE(!"Unexpected TypeOfReportedError");
+ break;
+ }
+ }
+ else
+ {
+ MethodTable* pMT = OBJECTREFToObject(throwable)->GetMethodTable();
+ DefineFullyQualifiedNameForClassWOnStack();
+
+ EX_TRY
+ {
+ pExceptionName = GetFullyQualifiedNameForClassNestedAwareW(pMT);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+
+ _ASSERTE(pExceptionName);
+
+ // Copy name in, with possible truncation or hashing.
+ CopyStringToBucket(targetParam, maxLength, pExceptionName);
+ }
+ else // StackOverflowException
+ {
+ // During StackOverflowException processing we may be under ThreadStore lock and cannot spawn a managed thread (otherwise deadlock).
+ // So we avoid using any managed heap objects and switching to GC_COOP.
+ CopyStringToBucket(targetParam, maxLength, W("System.StackOverflowException"));
+ }
+}
+
+void BaseBucketParamsManager::GetPackageMoniker(__out_ecount(maxLength) WCHAR* targetParam, int maxLength)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_CORECLR
+ bool success = false;
+ EX_TRY
+ {
+ wcsncpy_s(targetParam, maxLength, AppX::GetHeadPackageMoniker(), _TRUNCATE);
+ success = true;
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (!success)
+ {
+ // should this ever legitimately fail??
+ _ASSERTE(!"failed to get package moniker for watson");
+ wcsncpy_s(targetParam, maxLength, W("missing"), _TRUNCATE);
+ }
+#else
+ _ASSERTE(!"AppX support NYI for CoreCLR");
+#endif // FEATURE_CORECLR
+}
+
+void BaseBucketParamsManager::GetPRAID(__out_ecount(maxLength) WCHAR* targetParam, int maxLength)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_CORECLR
+ LPCWSTR pPraid = NULL;
+ if (SUCCEEDED(AppX::GetApplicationId(pPraid)))
+ {
+ _snwprintf_s(targetParam,
+ maxLength,
+ _TRUNCATE,
+ W("praid:%s"),
+ pPraid);
+ }
+ else
+ {
+ // should this ever legitimately fail??
+ _ASSERTE(!"failed to get PRAID for watson");
+ wcsncpy_s(targetParam, maxLength, W("missing"), _TRUNCATE);
+ }
+#else
+ _ASSERTE(!"PRAID support NYI for CoreCLR");
+#endif
+}
+
+void BaseBucketParamsManager::GetIlRva(__out_ecount(maxLength) WCHAR* targetParam, int maxLength)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ DWORD ilOffset = GetILOffset();
+
+ if (ilOffset == MAXDWORD)
+ ilOffset = 0;
+
+ if (m_pFaultingMD)
+ ilOffset += m_pFaultingMD->GetRVA();
+
+ _snwprintf_s(targetParam,
+ maxLength,
+ _TRUNCATE,
+ W("%x"),
+ ilOffset);
+}
+
+// helper functions
+
+DWORD BaseBucketParamsManager::GetILOffset()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ DWORD nativeOffset = 0;
+ DWORD ilOffset = MAXDWORD;
+
+ EECodeInfo codeInfo(m_faultingPc);
+ if (codeInfo.IsValid())
+ {
+ nativeOffset = codeInfo.GetRelOffset();
+ _ASSERTE(m_pFaultingMD == codeInfo.GetMethodDesc());
+ }
+
+ if (m_pFaultingMD)
+ {
+ EX_TRY
+ {
+ CONTRACT_VIOLATION(GCViolation);
+ _ASSERTE(g_pDebugInterface != NULL);
+ g_pDebugInterface->GetILOffsetFromNative(
+ m_pFaultingMD,
+ (const BYTE *)m_faultingPc,
+ nativeOffset,
+ &ilOffset);
+ }
+ EX_CATCH
+ {
+ // Swallow the exception, and just use MAXDWORD.
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+ }
+
+ return ilOffset;
+}
+
+// attempts to get file version information for the specified module.
+// returns true on success and all out params will contain data.
+// on failure the out params are not touched.
+// assumes that pModule is not NULL!!
+bool BaseBucketParamsManager::GetFileVersionInfoForModule(Module* pModule, USHORT& major, USHORT& minor, USHORT& build, USHORT& revision)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(pModule != NULL);
+ }
+ CONTRACTL_END;
+
+ bool succeeded = false;
+
+ PEFile* pFile = pModule->GetFile();
+ if (pFile)
+ {
+ // if we have a native imaged loaded for this module then get the version information from that.
+ if (pFile->IsNativeLoaded())
+ {
+ PEImage* pNativeImage = pFile->GetPersistentNativeImage();
+
+ if (pNativeImage)
+ {
+ LPCWSTR niPath = pNativeImage->GetPath().GetUnicode();
+ if (niPath != NULL && niPath != SString::Empty() && SUCCEEDED(DwGetFileVersionInfo(niPath, major, minor, build, revision)))
+ {
+ succeeded = true;
+ }
+ }
+ }
+
+ // if we failed to get the version info from the native image then fall back to the IL image.
+ if (!succeeded)
+ {
+ LPCWSTR modulePath = pFile->GetPath().GetUnicode();
+ if (modulePath != NULL && modulePath != SString::Empty() && SUCCEEDED(DwGetFileVersionInfo(modulePath, major, minor, build, revision)))
+ {
+ succeeded = true;
+ }
+ }
+ }
+
+ return succeeded;
+}
+
+// attempts to determine if the specified MethodDesc is one of the code contracts methods.
+// this is defined as any method on the System.Diagnostics.Contracts.__ContractsRuntime type.
+bool BaseBucketParamsManager::IsCodeContractsFrame(MethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(pMD != NULL);
+ }
+ CONTRACTL_END;
+
+ if (!pMD)
+ return false;
+
+ MethodTable* pMT = pMD->GetMethodTable_NoLogging();
+ LPCUTF8 pszNamespace = NULL;
+ LPCUTF8 pszName = NULL;
+ pszName = pMT->GetFullyQualifiedNameInfo(&pszNamespace);
+
+ if (!pszName || !pszNamespace)
+ return false;
+
+ LPCUTF8 pszContractsNamespace = "System.Diagnostics.Contracts";
+ LPCUTF8 pszContractsRuntimeType = "__ContractsRuntime";
+
+ if (strcmp(pszNamespace, pszContractsNamespace) == 0 &&
+ strcmp(pszName, pszContractsRuntimeType) == 0)
+ return true;
+
+ return false;
+}
+
+// code contract failures will have several frames on the stack which are part of the code contracts infrastructure.
+// as such we don't want to blame any of these frames since they're just propagating the fault from the user's code.
+// the purpose of this function is to identify if the current faulting frame is part of the code contract infrastructure
+// and if it is to traverse the stack trace in the exception object until the first frame which isn't code contracts stuff.
+void BaseBucketParamsManager::FindFaultingMethodInfo()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(m_pFaultingMD != NULL);
+ }
+ CONTRACTL_END;
+
+ // check if this frame is part of the code contracts infrastructure
+ if (IsCodeContractsFrame(m_pFaultingMD))
+ {
+ // it is so we need to do more searching to find the correct faulting MethodDesc.
+ // iterate over each frame in the stack trace object until we find the first
+ // frame that isn't part of the code contracts goop.
+ GCX_COOP();
+
+ OBJECTREF throwable = GetRealExceptionObject();
+
+ if (throwable != NULL)
+ {
+ StackTraceArray traceData;
+ EXCEPTIONREF(throwable)->GetStackTrace(traceData);
+
+ GCPROTECT_BEGIN(traceData);
+
+ size_t numElements = traceData.Size();
+
+ ContractFailureKind kind = GetContractFailureKind(throwable);
+
+ // skip frame 0 since we already know it's part of code contracts
+ for (size_t index = 1; index < numElements; ++index)
+ {
+ StackTraceElement const& cur = traceData[index];
+
+ MethodDesc* pMD = cur.pFunc;
+ _ASSERTE(pMD);
+
+ if (!IsCodeContractsFrame(pMD))
+ {
+ // we want the next frame for preconditions however if we don't have it for some
+ // reason then just use this frame (better than defaulting to the code contracts goop)
+ if ((kind == CONTRACT_FAILURE_PRECONDITION) && (index + 1 < numElements))
+ {
+ _ASSERTE(!IsCodeContractsFrame(traceData[index + 1].pFunc));
+ continue;
+ }
+
+ m_pFaultingMD = pMD;
+ m_faultingPc = cur.ip;
+ break;
+ }
+ }
+
+ GCPROTECT_END();
+ }
+ }
+}
+
+// gets the "real" exception object. it might be m_pException or the exception object on the thread
+OBJECTREF BaseBucketParamsManager::GetRealExceptionObject()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF throwable = NULL;
+
+ if (m_pException != NULL)
+ {
+ _ASSERTE(IsProtectedByGCFrame(m_pException));
+ throwable = *m_pException;
+ }
+ else if (m_tore.IsException())
+ {
+ // If it is an exception, see if there is a Throwable object.
+ if (m_pThread != NULL)
+ {
+ throwable = m_pThread->GetThrowable();
+
+ // If the "Throwable" is null, try the "LastThrownObject"
+ if (throwable == NULL)
+ throwable = m_pThread->LastThrownObject();
+ }
+ }
+
+ return throwable;
+}
+
+//------------------------------------------------------------------------------
+// Description
+// Copies a string to a Watson bucket parameter. If the offered string is
+// longer than the maxLen, the string will be shortened.
+//
+// Parameters
+// pTargetParam -- the destination buffer.
+// targetMaxLength -- the max length of the parameter.
+// pSource -- the input string.
+// cannonicalize -- if true, cannonicalize the filename (tolower)
+//
+// Returns
+// the number of characters copied to the output buffer. zero indicates an
+// error.
+//
+// Notes
+// The truncation algorithm is this:
+// - if the value contains non-ascii characters, divide the maxLen by 4,
+// due to restrictions in Watson bucketing rules
+// - if the value fits, just copy it as-is
+// - if the value doesn't fit, strip any trailing ".dll", ".exe", ".netmodule",
+// or "Exception"
+// - if the value still doesn't fit, take a SHA1 hash of the source, and
+// encode in base32.
+// - if the value may require hashing, the maxlen should be at least 32,
+// because that is what a SHA1 hash coded in base32 will require.
+// - the maxlen does not include the terminating nul.
+//------------------------------------------------------------------------------
+int BaseBucketParamsManager::CopyStringToBucket(__out_ecount(targetMaxLength) LPWSTR pTargetParam, int targetMaxLength, __in_z LPCWSTR pSource, bool cannonicalize)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Array of suffixes to truncate if necessary.
+ static const LPCWSTR truncations[] =
+ {
+ W("Exception"),
+ W(".dll"),
+ W(".exe"),
+ W(".netmodule"),
+ 0
+ };
+
+ int srcLen = static_cast<int>(wcslen(pSource));
+
+ // If the source contains unicode characters, they'll be encoded at 4 chars per char.
+ int targLen = ContainsUnicodeChars(pSource) ? targetMaxLength / 4 : targetMaxLength;
+
+ // If the string is too long, see if there is a suffix that can be trimmed.
+ if (srcLen > targLen)
+ {
+ for (int i = 0; truncations[i]; ++i)
+ {
+ // how long is this suffix?
+ int slen = static_cast<int>(wcslen(truncations[i]));
+
+ // Could the string have this suffix?
+ if (slen < srcLen)
+ {
+ // maybe -- check.
+ if (SString::_wcsicmp(&pSource[srcLen - slen], truncations[i]) == 0)
+ {
+ // yes, the string does have this suffix. drop it.
+ srcLen -= slen;
+ break;
+ }
+ }
+ }
+ }
+
+ // If the (possibly truncated) value fits, copy it and return.
+ if (srcLen <= targLen)
+ {
+ wcsncpy_s(pTargetParam, DW_MAX_BUCKETPARAM_CWC, pSource, srcLen);
+
+ if (cannonicalize)
+ {
+ // cannonicalize filenames so that the same exceptions tend to the same buckets.
+ _wcslwr_s(pTargetParam, DW_MAX_BUCKETPARAM_CWC);
+ }
+ return srcLen;
+ }
+
+ // String didn't fit, so hash it.
+ SHA1Hash hash;
+ hash.AddData(reinterpret_cast<BYTE*>(const_cast<LPWSTR>(pSource)), (static_cast<int>(wcslen(pSource))) * sizeof(WCHAR));
+
+ // Encode in base32. The hash is a fixed size; we'll accept up to maxLen characters of the encoding.
+ BytesToBase32 b32(hash.GetHash(), SHA1_HASH_SIZE);
+ targLen = b32.Convert(pTargetParam, targetMaxLength);
+ pTargetParam[targLen] = W('\0');
+
+ return targLen;
+}
+
+void BaseBucketParamsManager::LogParam(__in_z LPCWSTR paramValue, BucketParameterIndex paramIndex)
+{
+#ifdef _DEBUG
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(paramIndex < InvalidBucketParamIndex);
+ // the BucketParameterIndex enum starts at 0 however we refer to Watson
+ // bucket params with 1-based indices so we add one to paramIndex.
+ LOG((LF_EH, LL_INFO10, " p %d: %S\n", paramIndex + 1, paramValue));
+ ++m_countParamsLogged;
+#endif
+}
+
+// specific manager classes for the various watson bucket types that the CLR reports.
+// each type is responsible for populating the GMB according to the event type schema.
+// to add support for a new schema simply inherit from the BaseBucketParamsManager and
+// in the PopulateBucketParameters() function fill out the GMB as required. then update
+// function GetBucketParamsManager() (and a few depedent functions) to return the new
+// type as required.
+
+class CLR20r3BucketParamsManager : public BaseBucketParamsManager
+{
+public:
+ CLR20r3BucketParamsManager(GenericModeBlock* pGenericModeBlock, TypeOfReportedError typeOfError, PCODE faultingPC, Thread* pFaultingThread, OBJECTREF* pThrownException);
+ ~CLR20r3BucketParamsManager();
+
+ virtual void PopulateBucketParameters();
+};
+
+CLR20r3BucketParamsManager::CLR20r3BucketParamsManager(GenericModeBlock* pGenericModeBlock, TypeOfReportedError typeOfError, PCODE faultingPC, Thread* pFaultingThread, OBJECTREF* pThrownException)
+ : BaseBucketParamsManager(pGenericModeBlock, typeOfError, faultingPC, pFaultingThread, pThrownException)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+}
+
+CLR20r3BucketParamsManager::~CLR20r3BucketParamsManager()
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+void CLR20r3BucketParamsManager::PopulateBucketParameters()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PopulateEventName(g_WerEventTraits[CLR20r3].EventName);
+
+ // the "+ 1" is to explicitly indicate which fields need to specify space for NULL
+ PopulateBucketParameter(Parameter1, &CLR20r3BucketParamsManager::GetAppName, 32);
+ PopulateBucketParameter(Parameter2, &CLR20r3BucketParamsManager::GetAppVersion, 23 + 1);
+ PopulateBucketParameter(Parameter3, &CLR20r3BucketParamsManager::GetAppTimeStamp, 8 + 1);
+ PopulateBucketParameter(Parameter4, &CLR20r3BucketParamsManager::GetModuleName, 64);
+ PopulateBucketParameter(Parameter5, &CLR20r3BucketParamsManager::GetModuleVersion, 23 + 1);
+ PopulateBucketParameter(Parameter6, &CLR20r3BucketParamsManager::GetModuleTimeStamp, 8 + 1);
+ PopulateBucketParameter(Parameter7, &CLR20r3BucketParamsManager::GetMethodDef, 6 + 1);
+ PopulateBucketParameter(Parameter8, &CLR20r3BucketParamsManager::GetIlOffset, 8 + 1);
+ PopulateBucketParameter(Parameter9, &CLR20r3BucketParamsManager::GetExceptionName, 32);
+}
+
+class MoCrashBucketParamsManager : public BaseBucketParamsManager
+{
+public:
+ MoCrashBucketParamsManager(GenericModeBlock* pGenericModeBlock, TypeOfReportedError typeOfError, PCODE faultingPC, Thread* pFaultingThread, OBJECTREF* pThrownException);
+ ~MoCrashBucketParamsManager();
+
+ virtual void PopulateBucketParameters();
+};
+
+MoCrashBucketParamsManager::MoCrashBucketParamsManager(GenericModeBlock* pGenericModeBlock, TypeOfReportedError typeOfError, PCODE faultingPC, Thread* pFaultingThread, OBJECTREF* pThrownException)
+ : BaseBucketParamsManager(pGenericModeBlock, typeOfError, faultingPC, pFaultingThread, pThrownException)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+}
+
+MoCrashBucketParamsManager::~MoCrashBucketParamsManager()
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+void MoCrashBucketParamsManager::PopulateBucketParameters()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PopulateEventName(g_WerEventTraits[MoCrash].EventName);
+
+ // DW_MAX_BUCKETPARAM_CWC - 1 to ensure space for NULL
+ PopulateBucketParameter(Parameter1, &MoCrashBucketParamsManager::GetPackageMoniker, DW_MAX_BUCKETPARAM_CWC - 1);
+ PopulateBucketParameter(Parameter2, &MoCrashBucketParamsManager::GetPRAID, DW_MAX_BUCKETPARAM_CWC - 1);
+ PopulateBucketParameter(Parameter3, &MoCrashBucketParamsManager::GetAppVersion, DW_MAX_BUCKETPARAM_CWC - 1);
+ PopulateBucketParameter(Parameter4, &MoCrashBucketParamsManager::GetAppTimeStamp, DW_MAX_BUCKETPARAM_CWC - 1);
+ PopulateBucketParameter(Parameter5, &MoCrashBucketParamsManager::GetModuleName, DW_MAX_BUCKETPARAM_CWC - 1);
+ PopulateBucketParameter(Parameter6, &MoCrashBucketParamsManager::GetModuleVersion, DW_MAX_BUCKETPARAM_CWC - 1);
+ PopulateBucketParameter(Parameter7, &MoCrashBucketParamsManager::GetModuleTimeStamp, DW_MAX_BUCKETPARAM_CWC - 1);
+ PopulateBucketParameter(Parameter8, &MoCrashBucketParamsManager::GetExceptionName, DW_MAX_BUCKETPARAM_CWC - 1);
+ PopulateBucketParameter(Parameter9, &MoCrashBucketParamsManager::GetIlRva, DW_MAX_BUCKETPARAM_CWC - 1);
+}
+
+#ifdef FEATURE_WINDOWSPHONE
+class WinPhoneBucketParamsManager : public BaseBucketParamsManager
+{
+public:
+ WinPhoneBucketParamsManager(GenericModeBlock* pGenericModeBlock, TypeOfReportedError typeOfError, PCODE faultingPC, Thread* pFaultingThread, OBJECTREF* pThrownException);
+ ~WinPhoneBucketParamsManager();
+
+ virtual void PopulateBucketParameters();
+};
+
+WinPhoneBucketParamsManager::WinPhoneBucketParamsManager(GenericModeBlock* pGenericModeBlock, TypeOfReportedError typeOfError, PCODE faultingPC, Thread* pFaultingThread, OBJECTREF* pThrownException)
+ : BaseBucketParamsManager(pGenericModeBlock, typeOfError, faultingPC, pFaultingThread, pThrownException)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+}
+
+WinPhoneBucketParamsManager::~WinPhoneBucketParamsManager()
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+void WinPhoneBucketParamsManager::PopulateBucketParameters()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PopulateEventName(g_WerEventTraits[WinPhoneCrash].EventName);
+
+ // the "+ 1" is to explicitly indicate which fields need to specify space for NULL
+ PopulateBucketParameter(Parameter1, &WinPhoneBucketParamsManager::GetAppName, DW_MAX_BUCKETPARAM_CWC - 1);
+ PopulateBucketParameter(Parameter2, &WinPhoneBucketParamsManager::GetAppVersion, 23 + 1);
+ PopulateBucketParameter(Parameter3, &WinPhoneBucketParamsManager::GetAppTimeStamp, 8 + 1);
+ PopulateBucketParameter(Parameter4, &WinPhoneBucketParamsManager::GetModuleName, DW_MAX_BUCKETPARAM_CWC - 1);
+ PopulateBucketParameter(Parameter5, &WinPhoneBucketParamsManager::GetModuleVersion, 23 + 1);
+ PopulateBucketParameter(Parameter6, &WinPhoneBucketParamsManager::GetModuleTimeStamp, 8 + 1);
+ PopulateBucketParameter(Parameter7, &WinPhoneBucketParamsManager::GetMethodDef, 6 + 1);
+ PopulateBucketParameter(Parameter8, &WinPhoneBucketParamsManager::GetIlOffset, 8 + 1);
+ PopulateBucketParameter(Parameter9, &WinPhoneBucketParamsManager::GetExceptionName, DW_MAX_BUCKETPARAM_CWC - 1);
+}
+#endif // FEATURE_WINDOWSPHONE
+
+WatsonBucketType GetWatsonBucketType()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+#if defined(FEATURE_APPX) && !defined(FEATURE_CORECLR)
+ if (AppX::IsAppXProcess() && !AppX::IsAppXNGen())
+ return MoCrash;
+ else
+#endif // FEATURE_APPX
+
+#ifdef FEATURE_WINDOWSPHONE
+ return WinPhoneCrash;
+#else
+ return CLR20r3;
+#endif // FEATURE_WINDOWSPHONE
+}
+
+#endif // DACCESS_COMPILE
+
+#endif // DWBUCKETMANAGER_HPP
diff --git a/src/vm/dwreport.cpp b/src/vm/dwreport.cpp
new file mode 100644
index 0000000000..210e08240c
--- /dev/null
+++ b/src/vm/dwreport.cpp
@@ -0,0 +1,3285 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// FILE: dwreport.cpp
+//
+
+//
+
+//
+// ============================================================================
+
+#include "common.h"
+
+#include "dwreport.h"
+#include "dwbucketmanager.hpp"
+#include <cordbpriv.h>
+#include "field.h"
+#include <msodwwrap.h>
+#include <shlobj.h>
+#include "dbginterface.h"
+#include <sha1.h>
+#include <winver.h>
+#include "dlwrap.h"
+#include "eemessagebox.h"
+#include "eventreporter.h"
+#include "utilcode.h"
+#include "../dlls/mscorrc/resource.h" // for resource ids
+
+#include "imagehlp.h"
+
+#ifdef FEATURE_UEF_CHAINMANAGER
+// This is required to register our UEF callback with the UEF chain manager
+#include <mscoruefwrapper.h>
+#endif // FEATURE_UEF_CHAINMANAGER
+
+EFaultRepRetVal DoReportFault(EXCEPTION_POINTERS * pExceptionInfo);
+
+// Should the CLR use Watson to report fatal errors and unhandled exceptions?
+static BOOL g_watsonErrorReportingEnabled = FALSE;
+
+// Variables to control launching Watson only once, but making all threads wait for that single launch to finish.
+LONG g_watsonAlreadyLaunched = 0; // Used to note that another thread has done Watson.
+
+#if !defined(FEATURE_UEF_CHAINMANAGER)
+HandleHolder g_hWatsonCompletionEvent = NULL; // Used to signal that Watson has finished.
+#endif // FEATURE_UEF_CHAINMANAGER
+
+const WCHAR kErrorReportingPoliciesKey[] = W("SOFTWARE\\Policies\\Microsoft\\PCHealth\\ErrorReporting");
+const WCHAR kErrorReportingKey[] = W("SOFTWARE\\Microsoft\\PCHealth\\ErrorReporting");
+
+const WCHAR kShowUIValue[] = W("ShowUI");
+const WCHAR kForceQueueModeValue[] = W("ForceQueueMode");
+const WCHAR kDoReportValue[] = W("DoReport");
+const WCHAR kAllOrNoneValue[] = W("AllOrNone");
+const WCHAR kIncludeMSAppsValue[] = W("IncludeMicrosoftApps");
+const WCHAR kIncludeWindowsAppsValue[] = W("IncludeWindowsApps");
+const WCHAR kExclusionListKey[] = W("SOFTWARE\\Microsoft\\PCHealth\\ErrorReporting\\ExclusionList");
+const WCHAR kInclusionListKey[] = W("SOFTWARE\\Microsoft\\PCHealth\\ErrorReporting\\InclusionList");
+const WCHAR kExclusionListSubKey[] = W("\\ExclusionList");
+const WCHAR kInclusionListSubKey[] = W("\\InclusionList");
+
+
+// Default values for various registry keys
+const DWORD kDefaultShowUIValue = 1;
+const DWORD kDefaultForceQueueModeValue = 0;
+const DWORD kDefaultDoReportValue = 1;
+const DWORD kDefaultAllOrNoneValue = 1;
+const DWORD kDefaultExclusionValue = 0;
+const DWORD kDefaultInclusionValue = 0;
+const DWORD kDefaultIncludeMSAppsValue = 1;
+const DWORD kDefaultIncludeWindowsAppsValue = 1;
+
+// Default value for the default debugger and auto debugger attach settings.
+const BOOL kDefaultDebuggerIsWatson = FALSE;
+const BOOL kDefaultAutoValue = FALSE;
+
+// When debugging the watson process itself, the faulting process will spin
+// waiting for Watson to signal various events. If these waits time out, the
+// faulting process will go ahead and exit, which is sub-optimal if you need to
+// inspect the faulting process with the debugger at the same time. In debug
+// builds, use a longer wait time, since watson may be stopped under the
+// debugger for a while.
+
+#ifdef _DEBUG
+const DWORD kDwWaitTime = DW_TIMEOUT_VALUE * 1000;
+#else
+const DWORD kDwWaitTime = DW_TIMEOUT_VALUE;
+#endif
+
+#ifdef _TARGET_X86_
+ const DWORD kWatsonRegKeyOptions = 0;
+#else
+ const DWORD kWatsonRegKeyOptions = KEY_WOW64_32KEY;
+#endif
+
+const WCHAR kWatsonPath[] = WATSON_INSTALLED_REG_SUBPATH;
+#if defined(_TARGET_X86_)
+const WCHAR kWatsonValue[] = WATSON_INSTALLED_REG_VAL;
+#else
+const WCHAR kWatsonValue[] = WATSON_INSTALLED_REG_VAL_IA64;
+#endif
+const WCHAR* kWatsonImageNameOnLonghorn = W("\\dw20.exe");
+
+typedef HMODULE (*AcquireLibraryHandleFn)(LPCWSTR);
+
+template <AcquireLibraryHandleFn AcquireLibraryHandleFnPtr, bool RequiresFree>
+class SimpleModuleHolder
+{
+private:
+ HMODULE hModule;
+
+public:
+ SimpleModuleHolder(LPCWSTR moduleName)
+ {
+ hModule = AcquireLibraryHandleFnPtr(moduleName);
+ }
+
+ ~SimpleModuleHolder()
+ {
+ if (RequiresFree && hModule)
+ {
+ CLRFreeLibrary(hModule);
+ }
+ }
+
+ operator HMODULE() { return hModule; }
+};
+
+#ifndef FEATURE_CORESYSTEM
+#define WER_MODULE_NAME_W WINDOWS_KERNEL32_DLLNAME_W
+typedef SimpleModuleHolder<WszGetModuleHandle, false> WerModuleHolder;
+#else
+#define WER_MODULE_NAME_W W("api-ms-win-core-windowserrorreporting-l1-1-0.dll")
+typedef SimpleModuleHolder<CLRLoadLibrary, true> WerModuleHolder;
+#endif
+
+//------------------------------------------------------------------------------
+// Description
+// Indicate if Watson is enabled
+//
+// Parameters
+// None
+//
+// Returns
+// TRUE -- Yes, Watson is enabled.
+// FALSE -- No, it's not.
+//------------------------------------------------------------------------------
+BOOL IsWatsonEnabled()
+{
+ LIMITED_METHOD_CONTRACT;
+ return g_watsonErrorReportingEnabled;
+}
+
+//------------------------------------------------------------------------------
+// Description
+// Initializes watson global critsec and event. Records whether run via
+// managed .exe.
+//
+// Parameters
+// fFlags -- the COINITIEE flags used to start the runtime.
+//
+// Returns
+// TRUE -- always
+//------------------------------------------------------------------------------
+BOOL InitializeWatson(COINITIEE fFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Watson is enabled for all SKUs
+ g_watsonErrorReportingEnabled = TRUE;
+
+ LOG((LF_EH, LL_INFO10, "InitializeWatson: %s\n", g_watsonErrorReportingEnabled ? "enabled" : "disabled"));
+
+ if (!IsWatsonEnabled())
+ {
+ return TRUE;
+ }
+
+#if defined(FEATURE_UEF_CHAINMANAGER)
+ return TRUE;
+#else
+ // Create the event that all-but-the-first threads will wait on (the first thread
+ // will set the event when Watson is done.)
+ g_hWatsonCompletionEvent = WszCreateEvent(NULL, TRUE /*manual reset*/, FALSE /*initial state*/, NULL);
+ return (g_hWatsonCompletionEvent != NULL);
+#endif // FEATURE_UEF_CHAINMANAGER
+
+} // BOOL InitializeWatson()
+
+//------------------------------------------------------------------------------
+// Description
+// Register out-of-process Watson callbacks provided in DAC dll for WIN7 or later
+//
+// Parameters
+// None
+//
+// Returns
+// None
+//
+// Note: In Windows 7, the OS will take over the job of error reporting, and so most
+// of our watson code should not be used. In such cases, we will however still need
+// to provide some services to windows error reporting, such as computing bucket
+// parameters for a managed unhandled exception.
+//------------------------------------------------------------------------------
+BOOL RegisterOutOfProcessWatsonCallbacks()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(RunningOnWin7());
+ }
+ CONTRACTL_END;
+
+ WCHAR wszDACName[] = MAIN_DAC_MODULE_NAME_W W(".dll");
+ WCHAR wszDACPath[MAX_PATH];
+ DWORD dwSize = 0;
+
+ if ((FAILED(::GetCORSystemDirectoryInternal(wszDACPath, NumItems(wszDACPath), &dwSize))) ||
+ (wcscat_s(wszDACPath, _countof(wszDACPath), wszDACName) != 0))
+ {
+ return FALSE;
+ }
+
+ WerModuleHolder hWerModule(WER_MODULE_NAME_W);
+
+#ifdef FEATURE_CORESYSTEM
+ if ((hWerModule == NULL) && !RunningOnWin8())
+ {
+ // If we are built for CoreSystemServer, but are running on Windows 7, we need to look elsewhere
+ hWerModule = WerModuleHolder(W("Kernel32.dll"));
+ }
+#endif
+
+ if (hWerModule == NULL)
+ {
+ _ASSERTE(!"failed to get WER module handle");
+ return FALSE;
+ }
+
+ typedef HRESULT (WINAPI * WerRegisterRuntimeExceptionModuleFnPtr)(PCWSTR, PDWORD);
+ WerRegisterRuntimeExceptionModuleFnPtr pFnWerRegisterRuntimeExceptionModule;
+
+ pFnWerRegisterRuntimeExceptionModule = (WerRegisterRuntimeExceptionModuleFnPtr)
+ GetProcAddress(hWerModule, "WerRegisterRuntimeExceptionModule");
+
+ _ASSERTE(pFnWerRegisterRuntimeExceptionModule != NULL);
+ if (pFnWerRegisterRuntimeExceptionModule == NULL)
+ {
+ return FALSE;
+ }
+
+ HRESULT hr = (*pFnWerRegisterRuntimeExceptionModule)(wszDACPath, (PDWORD)g_pMSCorEE);
+ if (FAILED(hr))
+ {
+ STRESS_LOG0(LF_STARTUP,
+ LL_ERROR,
+ "WATSON support: failed to register DAC dll with WerRegisterRuntimeExceptionModule");
+
+#ifdef FEATURE_CORESYSTEM
+ // For CoreSys we *could* be running on a platform that doesn't have Watson proper
+ // (the APIs might exist but they just fail).
+ // WerRegisterRuntimeExceptionModule may return E_NOIMPL.
+ return TRUE;
+#else // FEATURE_CORESYSTEM
+ _ASSERTE(! "WATSON support: failed to register DAC dll with WerRegisterRuntimeExceptionModule");
+ return FALSE;
+#endif // FEATURE_CORESYSTEM
+ }
+
+ STRESS_LOG0(LF_STARTUP,
+ LL_INFO100,
+ "WATSON support: registered DAC dll with WerRegisterRuntimeExceptionModule");
+ return TRUE;
+}
+
+//------------------------------------------------------------------------------
+// CreateWatsonSharedMemory
+//
+// Description
+//
+// Creates a shared memory block for communication with Watson
+//
+// Parameters
+// hWatsonSharedMemory -- [out] The handle to the watson shared memory.
+// ppWatsonSharedMemory -- [out] A pointer to the Watson shared memory.
+// Returns
+// S_OK -- if the function complete normally.
+// FALSE -- otherwise
+// Exceptions
+// None
+//------------------------------------------------------------------------------
+HRESULT CreateWatsonSharedMemory(HANDLE* hWatsonSharedMemory,
+ DWSharedMem** ppWatsonSharedMemory);
+
+//------------------------------------------------------------------------------
+// Description
+// Alerts the host that the thread is leaving the runtime, and sleeps
+// waiting for an object to be signalled
+//
+// Parameters
+// handle -- the handle to wait on
+// timeout -- the length of time to wait
+//
+// Returns
+// DWORD -- The return value from WaitForSingleObject
+//
+// Exceptions
+// None
+//
+// Notes
+// winwrap.h prevents us from using SetEvent by including
+// #define SetEvent Dont_Use_SetEvent
+// This is because using SetEvent within the runtime will result in poor
+// interaction with any sort of host process (e.g. SQL). We can use the
+// SetEvent/WaitForSingleObject primitives as long as we do some other work to
+// make sure the host understands.
+//------------------------------------------------------------------------------
+#undef SetEvent
+DWORD ClrWaitForSingleObject(HANDLE handle, DWORD timeout)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ CONTRACT_VIOLATION(ThrowsViolation);
+
+ LeaveRuntimeHolder holder(reinterpret_cast< size_t >(WaitForSingleObject));
+ return WaitForSingleObject(handle, timeout);
+} // DWORD ClrWaitForSingleObject()
+
+//------------------------------------------------------------------------------
+// Helper class to set an event in destructor -- allows setting an event on the
+// way out of a function.
+//
+// Used to synchronize multiple threads with unhandled exceptions -- only the
+// first will run Watson, and all the rest will wait on the first one to be
+// done.
+//------------------------------------------------------------------------------
+class SettingEventHolder
+{
+public:
+ SettingEventHolder(HANDLE &event) : m_event(event), m_bSetIt(FALSE) { LIMITED_METHOD_CONTRACT; }
+ ~SettingEventHolder() { LIMITED_METHOD_CONTRACT; if (m_bSetIt && m_event) SetEvent(m_event); }
+ void EnableSetting() { LIMITED_METHOD_CONTRACT; m_bSetIt = TRUE; }
+ DWORD DoWait(DWORD timeout=INFINITE_TIMEOUT) { WRAPPER_NO_CONTRACT; return m_event ? ClrWaitForSingleObject(m_event, timeout) : 0; }
+
+private:
+ HANDLE m_event; // The event to set
+ BOOL m_bSetIt; // If true, set event in destructor.
+};
+
+HRESULT DwGetFileVersionInfo(
+ __in_z LPCWSTR wszFilePath,
+ USHORT& major,
+ USHORT& minor,
+ USHORT& build,
+ USHORT& revision)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ major = minor = build = revision = 0;
+ ULARGE_INTEGER appVersion = { 0, 0 };
+
+ HRESULT result = GetFileVersion(wszFilePath, &appVersion);
+ if (SUCCEEDED(result))
+ {
+ major = (appVersion.HighPart & 0xFFFF0000) >> 16;
+ minor = appVersion.HighPart & 0x0000FFFF;
+ build = (appVersion.LowPart & 0xFFFF0000) >> 16;
+ revision = appVersion.LowPart & 0x0000FFFF;
+ }
+
+ return result;
+}
+
+enum MicrosoftAppTypes
+{
+ MicrosoftAppTypesNone = 0,
+ MicrosoftAppTypesWindows = 0x1,
+ MicrosoftAppTypesOther = 0x2
+};
+
+inline void SetMSFTApp(DWORD &AppType) { LIMITED_METHOD_CONTRACT; AppType |= MicrosoftAppTypesOther; }
+inline void SetMSFTWindowsApp(DWORD &AppType) { LIMITED_METHOD_CONTRACT; AppType |= MicrosoftAppTypesWindows; }
+
+inline BOOL IsMSFTApp(DWORD AppType) { LIMITED_METHOD_CONTRACT; return (AppType & MicrosoftAppTypesOther) ? TRUE : FALSE; }
+inline BOOL IsMSFTWindowsApp(DWORD AppType) { LIMITED_METHOD_CONTRACT; return (AppType & MicrosoftAppTypesWindows) ? TRUE : FALSE; }
+
+
+//------------------------------------------------------------------------------
+// Description
+// Determine if the application is a Microsoft application.
+//
+// Parameters
+// wszFilePath Path to a file to exctract the information from
+// pAppTypes [out] Put MicrosoftAppTypes here.
+//
+// Returns
+// S_OK If the function succeede
+// E_XXXX Failure result.
+//
+// Exceptions
+// None
+//------------------------------------------------------------------------------
+HRESULT DwCheckCompany( // S_OK or error.
+ __in_z LPWSTR wszFilePath, // Path to the executable.
+ DWORD* pAppTypes) // Non-microsoft, microsoft, microsoft windows.
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ //
+ // Note that this code is equivalent to FusionGetFileVersionInfo, found in fusion\asmcache\asmcache.cpp
+ //
+
+ HRESULT hr = S_OK; // result of some operation
+ DWORD dwHandle = 0;
+ DWORD bufSize = 0; // Size of allocation for VersionInfo.
+ DWORD ret;
+
+ // Avoid confusion
+ *pAppTypes = MicrosoftAppTypesNone;
+
+ // Find the buffer size for the version info structure we need to create
+ EX_TRY
+ {
+ bufSize = GetFileVersionInfoSizeW(wszFilePath, &dwHandle);
+ if (!bufSize)
+ {
+ hr = HRESULT_FROM_GetLastErrorNA();
+ }
+ }
+ EX_CATCH
+ {
+ hr = E_OUTOFMEMORY;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ if (!bufSize)
+ {
+ return hr;
+ }
+
+ // Allocate the buffer for the version info structure
+ // _alloca() can't return NULL -- raises STATUS_STACK_OVERFLOW.
+ BYTE* pVersionInfoBuffer = reinterpret_cast< BYTE* >(_alloca(bufSize));
+
+ // Extract the version information blob. The version information
+ // contains much more than the actual item of interest.
+ {
+ // If the previoud GetFileVersionInfoSizeW succeeds, version.dll has been loaded
+ // in the process, and delay load of GetFileVersionInfoW will not throw.
+ CONTRACT_VIOLATION(ThrowsViolation);
+ ret = GetFileVersionInfoW(wszFilePath, dwHandle, bufSize, pVersionInfoBuffer);
+
+ if (!ret)
+ {
+ return HRESULT_FROM_GetLastErrorNA();
+ }
+ }
+
+ // Extract the actual CompanyName and compare it to "Microsoft" and
+ // "MicrosoftWindows"
+
+ // Get the language and codepage for the version info.
+ UINT size = 0;
+ struct
+ {
+ WORD language;
+ WORD codePage;
+ }* translation;
+
+ {
+ // If the previoud GetFileVersionInfoSizeW succeeds, version.dll has been loaded
+ // in the process, and delay load of GetFileVersionInfoW will not throw.
+ CONTRACT_VIOLATION(ThrowsViolation);
+ ret = VerQueryValueW(pVersionInfoBuffer, W("\\VarFileInfo\\Translation"),
+ reinterpret_cast< void **>(&translation), &size);
+
+ if (!ret || size == 0)
+ {
+ return HRESULT_FROM_GetLastErrorNA();
+ }
+ }
+
+ // Build the query key for the language-specific company name resource.
+ WCHAR buf[64]; //----+----1----+----2----+----3----+----4
+ _snwprintf_s(buf, NumItems(buf), _TRUNCATE, W("\\StringFileInfo\\%04x%04x\\CompanyName"),
+ translation->language, translation->codePage);
+
+ // Get the company name.
+ WCHAR *name;
+ {
+ // If the previoud GetFileVersionInfoSizeW succeeds, version.dll has been loaded
+ // in the process, and delay load of GetFileVersionInfoW will not throw.
+ CONTRACT_VIOLATION(ThrowsViolation);
+ ret = VerQueryValueW(pVersionInfoBuffer, buf,
+ reinterpret_cast< void** >(&name), &size);
+ }
+
+ // If there is company name info, check it.
+ if (ret != 0 && size != 0 && wcsstr(name, W("Microsoft")))
+ {
+ SetMSFTApp(*pAppTypes);
+ }
+
+
+ // Now build the query key for the language-specific product name resource.
+ _snwprintf_s(buf, NumItems(buf), _TRUNCATE, W("\\StringFileInfo\\%04x%04x\\ProductName"),
+ translation->language, translation->codePage);
+
+ // Get the product name.
+ {
+ // If the previoud GetFileVersionInfoSizeW succeeds, version.dll has been loaded
+ // in the process, and delay load of GetFileVersionInfoW will not throw.
+ CONTRACT_VIOLATION(ThrowsViolation);
+ ret = VerQueryValueW(pVersionInfoBuffer, buf,
+ reinterpret_cast< void** >(&name), &size);
+ }
+
+ // If there is product name info, check it.
+ if (ret != 0 && size != 0 && wcsstr(name, W("Microsoft\x0ae Windows\x0ae")))
+ {
+ SetMSFTWindowsApp(*pAppTypes);
+ }
+
+ return S_OK;
+
+} // HRESULT DwCheckCompany()
+
+
+//------------------------------------------------------------------------------
+// Description
+// Read the description from the resource section.
+//
+// Parameters
+// wszFilePath Path to a file from which to extract the description
+// pBuf [out] Put description here.
+// cchBuf [in] Size of buf, wide chars.
+//
+// Returns
+// The number of characters stored. Zero if error or no description.
+//
+// Exceptions
+// None
+//------------------------------------------------------------------------------
+int DwGetAppDescription( // Number of characters written.
+ __in_z LPWSTR wszFilePath, // Path to the executable.
+ __inout_ecount(cchBuf) WCHAR *pBuf, // Put description here.
+ int cchBuf) // Size of buf, wide chars.
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ DWORD dwHandle = 0;
+ DWORD bufSize = 0; // Size of allocation for VersionInfo.
+ DWORD ret;
+
+ // Find the buffer size for the version info structure we need to create
+ EX_TRY
+ {
+ bufSize = GetFileVersionInfoSizeW(wszFilePath, &dwHandle);
+ }
+ EX_CATCH
+ {
+ bufSize = 0;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (!bufSize)
+ {
+ return 0;
+ }
+
+ // Allocate the buffer for the version info structure
+ // _alloca() can't return NULL -- raises STATUS_STACK_OVERFLOW.
+ BYTE* pVersionInfoBuffer = reinterpret_cast< BYTE* >(_alloca(bufSize));
+
+ // Extract the version information blob. The version information
+ // contains much more than the actual item of interest.
+ {
+ // If the previoud GetFileVersionInfoSizeW succeeds, version.dll has been loaded
+ // in the process, and delay load of GetFileVersionInfoW will not throw.
+ CONTRACT_VIOLATION(ThrowsViolation);
+ ret = GetFileVersionInfoW(wszFilePath, dwHandle, bufSize, pVersionInfoBuffer);
+ }
+
+ if (!ret)
+ {
+ return 0;
+ }
+
+ // Extract the description.
+
+ // Get the language and codepage for the version info.
+ UINT size = 0;
+ struct
+ {
+ WORD language;
+ WORD codePage;
+ }* translation;
+
+ {
+ // If the previoud GetFileVersionInfoSizeW succeeds, version.dll has been loaded
+ // in the process, and delay load of GetFileVersionInfoW will not throw.
+ CONTRACT_VIOLATION(ThrowsViolation);
+ ret = VerQueryValueW(pVersionInfoBuffer, W("\\VarFileInfo\\Translation"),
+ reinterpret_cast< void **>(&translation), &size);
+ }
+
+ if (!ret || size == 0)
+ {
+ return 0;
+ }
+
+ // Build the query key for the language-specific file description resource.
+ WCHAR buf[64]; //----+----1----+----2----+----3----+----4----+
+ _snwprintf_s(buf, NumItems(buf), _TRUNCATE, W("\\StringFileInfo\\%04x%04x\\FileDescription"),
+ translation->language, translation->codePage);
+
+ // Get the file description.
+ WCHAR* fileDescription;
+ {
+ // If the previoud GetFileVersionInfoSizeW succeeds, version.dll has been loaded
+ // in the process, and delay load of GetFileVersionInfoW will not throw.
+ CONTRACT_VIOLATION(ThrowsViolation);
+ ret = VerQueryValueW(pVersionInfoBuffer, buf,
+ reinterpret_cast< void** >(&fileDescription), &size);
+ }
+
+ // If the call failed, or there is no file description, done.
+ if (!ret || size == 0)
+ {
+ return 0;
+ }
+
+ // If the description is a single space, ignore it.
+ if (wcscmp(fileDescription, W(" ")) == 0)
+ {
+ return 0;
+ }
+
+ // Copy back the description.
+ size = (int)size > cchBuf-1 ? cchBuf-1 : size;
+ wcsncpy_s(pBuf, cchBuf, fileDescription, size);
+
+ return size;
+} // int DwGetAppDescription()
+
+//------------------------------------------------------------------------------
+// Description
+// Extract the assembly version from an executable.
+//
+// Parameters
+// wszFilePath Path to a file to exctract the version information from
+// pBuf [out] Put version here.
+// cchBuf Size of pBuf, in wide chars.
+//
+// Returns
+// Count of characters stored.
+//
+// Exceptions
+// None
+//------------------------------------------------------------------------------
+int DwGetAssemblyVersion( // Number of characters written.
+ __in_z LPWSTR wszFilePath, // Path to the executable.
+ __inout_ecount(cchBuf) WCHAR *pBuf, // Put description here.
+ int cchBuf) // Size of buf, wide chars.
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ DWORD dwHandle = 0;
+ DWORD bufSize = 01; // Size of allocation for VersionInfo.
+ DWORD ret;
+
+ // Find the buffer size for the version info structure we need to create
+ EX_TRY
+ {
+ bufSize = GetFileVersionInfoSizeW(wszFilePath, &dwHandle);
+ }
+ EX_CATCH
+ {
+ bufSize = 0;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (!bufSize)
+ {
+ return 0;
+ }
+
+ // Allocate the buffer for the version info structure
+ // _alloca() can't return NULL -- raises STATUS_STACK_OVERFLOW.
+ BYTE* pVersionInfoBuffer = reinterpret_cast< BYTE* >(_alloca(bufSize));
+
+ // Extract the version information blob. The version information
+ // contains much more than the actual item of interest.
+ {
+ // If the previoud GetFileVersionInfoSizeW succeeds, version.dll has been loaded
+ // in the process, and delay load of GetFileVersionInfoW will not throw.
+ CONTRACT_VIOLATION(ThrowsViolation);
+ ret = GetFileVersionInfoW(wszFilePath, dwHandle, bufSize, pVersionInfoBuffer);
+ }
+
+ if (!ret)
+ {
+ return 0;
+ }
+
+ // Extract the description.
+
+ // Get the language and codepage for the version info.
+ UINT size = 0;
+ struct
+ {
+ WORD language;
+ WORD codePage;
+ }* translation;
+
+ {
+ // If the previoud GetFileVersionInfoSizeW succeeds, version.dll has been loaded
+ // in the process, and delay load of VerQueryValueW will not throw.
+ CONTRACT_VIOLATION(ThrowsViolation);
+ ret = VerQueryValueW(pVersionInfoBuffer, W("\\VarFileInfo\\Translation"),
+ reinterpret_cast< void **>(&translation), &size);
+ }
+
+ if (ret == 0 || size == 0)
+ {
+ return 0;
+ }
+
+ // Build the query key for the language-specific assembly version resource.
+ WCHAR buf[64]; //----+----1----+----2----+----3----+----4----+
+ _snwprintf_s(buf, NumItems(buf), _TRUNCATE, W("\\StringFileInfo\\%04x%04x\\Assembly Version"),
+ translation->language, translation->codePage);
+
+ // Get the assembly version.
+ WCHAR* assemblyVersion;
+ {
+ // If the previoud GetFileVersionInfoSizeW succeeds, version.dll has been loaded
+ // in the process, and delay load of VerQueryValueW will not throw.
+ CONTRACT_VIOLATION(ThrowsViolation);
+ ret = VerQueryValueW(pVersionInfoBuffer, buf,
+ reinterpret_cast< void** >(&assemblyVersion), &size);
+ }
+
+ // If the call failed, or there is no assembly version, done.
+ if (ret == 0 || size == 0)
+ {
+ return 0;
+ }
+
+ // If the assembly version is a single space, ignore it.
+ if (wcscmp(assemblyVersion, W(" ")) == 0)
+ {
+ return 0;
+ }
+
+ // Copy back the assembly version.
+ size = (int)size > cchBuf-1 ? cchBuf-1 : size;
+ wcsncpy_s(pBuf, cchBuf, assemblyVersion, size);
+
+ return size;
+} // int DwGetAssemblyVersion()
+
+
+
+//------------------------------------------------------------------------------
+// CLRWatsonHelper class
+//
+// Certain registry keys affect the behavior of watson. In particulary, they
+// control
+// o whether or not a Watson report should result in UI popups
+// o which debugger should be used to JIT attach to the faulting process
+// o whether error reports should be sent at all.
+// This class is a holder for static functions that access these registry keys
+// to determine the proper settings.
+//
+//------------------------------------------------------------------------------
+class CLRWatsonHelper
+{
+public:
+ enum WHDebugAction
+ {
+ WHDebug_InvalidValue,
+ WHDebug_AutoLaunch,
+ WHDebug_AskToLaunch,
+ WHDebug_DontLaunch
+ } m_debugAction;
+
+ enum WHReportAction
+ {
+ WHReport_InvalidValue,
+ WHReport_AutoQueue,
+ WHReport_AskToSend,
+ WHReport_DontSend
+ } m_reportAction;
+
+ enum WHDialogAction
+ {
+ WHDialog_InvalidValue,
+ WHDialog_OkToPopup,
+ WHDialog_DontPopup
+ } m_dialogAction;
+
+ CLRWatsonHelper()
+ : m_debugAction(WHDebug_InvalidValue),
+ m_reportAction(WHReport_InvalidValue),
+ m_dialogAction(WHDialog_InvalidValue)
+ { LIMITED_METHOD_CONTRACT; }
+
+ void Init(BOOL bIsManagedFault, TypeOfReportedError tore);
+
+ // Does the current interactive USER have sufficient permissions to
+ // launch Watson or a debugger against this PROCESS?
+ BOOL CurrentUserHasSufficientPermissions();
+
+ // Should a debugger automatically, or should the user be queried for a debugger?
+ BOOL ShouldDebug();
+
+ // Should a managed debugger be launched, without even asking?
+ BOOL ShouldAutoAttach();
+
+ // Should Watson include a "Debug" button?
+ BOOL ShouldOfferDebug();
+
+ // Should a Watson report be generated?
+ BOOL ShouldReport();
+
+ // Should there be a popup? Possibly with only "quit"?
+ BOOL ShouldShowUI();
+
+ // If a Watson report is generated, should it be auto-queued?
+ // (vs asking the user what to do about it)
+ BOOL ShouldQueueReport();
+
+private:
+ // Looks in HKCU/Software/Policies/Microsoft/PCHealth/ErrorReporting
+ // then in HKLM/ " " " " "
+ // then in HKCU/SOftware/Microsoft/PCHealth/ErrorReporting
+ // then in HKLM/ " " " "
+ static int GetPCHealthConfigLong( // Return value from registry or default.
+ LPCWSTR szName, // Name of value to get.
+ int iDefault); // Default value to return if not found.
+
+ // Like above, but searches for a subkey with the given value.
+ static BOOL GetPCHealthConfigSubKeyLong(// Return value from registry or default.
+ LPCWSTR szSubKey, // Name of the subkey.
+ LPCWSTR szName, // Name of value to get.
+ int iDefault, // Default value to return if not found.
+ DWORD *pValue); // Put value here.
+
+ void AssertValid()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_debugAction != WHDebug_InvalidValue);
+ _ASSERTE(m_reportAction != WHReport_InvalidValue);
+ _ASSERTE(m_dialogAction != WHDialog_InvalidValue);
+ }
+
+}; // class CLRWatsonHelper
+
+//------------------------------------------------------------------------------
+// Description
+// Initialization for watson helper class.
+//
+// Parameters
+// bIsManagedFault - true if EXCEPTION_COMPLUS or fault from jitted code.
+// - false otherwise
+//
+//
+// Notes:
+// - Launches and Pops always happen to the same session in which the
+// process is running.
+// - This function computes what actions should happen, but doesn't do any.
+//
+// This routine returns which actions should be taken given the current registry
+// settings and environment. It implements the following matrix:
+//
+// <<-- AutoLaunch -->>
+// TRUE FALSE
+// Interactive process A3 B2
+// Non-interactive process A3 C1
+//
+// Action codes:
+// A - Auto attach debugger
+// B - Ask to attach debugger
+// C - Don't attach debugger
+//
+// 1 - Auto Queue Watson report
+// 2 - Ask to Send Watson report
+// 3 - Don't send Watson report
+//
+//
+// CLRWatsonHelper::Init
+//------------------------------------------------------------------------------
+void CLRWatsonHelper::Init(
+ BOOL bIsManagedFault, // Is the fault in question from managed code?
+ TypeOfReportedError tore) // What sort of error is this?
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Initialize returned values
+ WHDebugAction tmpDebugAction = WHDebug_InvalidValue;
+ WHReportAction tmpReportAction = WHReport_InvalidValue;
+ WHDialogAction tmpDialogAction = WHDialog_InvalidValue;
+
+ // First run the matrix, then later provide the over-rides
+ BOOL fRunningInteractive = RunningInteractive();
+
+ if (fRunningInteractive)
+ {
+ // Interactive services and interactive apps running as LocalSystem are considered non-interactive
+ // so that we don't display any UI for them. Note that we should check the process token (and not the
+ // thread token if the thread is impersonating a user) to determine if the app is running as LocalSystem.
+ // This is because Watson displays UI for us and Watson is run by calling CreateProcess. CreateProcess
+ // always creates child processes using the process token.
+
+ BOOL fLocalSystemOrService;
+ if (RunningAsLocalSystemOrService(fLocalSystemOrService) != ERROR_SUCCESS)
+ {
+ // Err on the side of caution; treat the app as non-interactive
+ fRunningInteractive = FALSE;
+ }
+ else if (fLocalSystemOrService)
+ {
+ fRunningInteractive = FALSE;
+ }
+ }
+
+ BOOL bAutoLaunch = FALSE;
+ SString ssDummy;
+
+ GetDebuggerSettingInfo(ssDummy, &bAutoLaunch);
+
+ if (bAutoLaunch)
+ {
+ tmpDebugAction = WHDebug_AutoLaunch;
+ tmpReportAction = WHReport_DontSend;
+ tmpDialogAction = WHDialog_DontPopup;
+ }
+ else
+ {
+ if (fRunningInteractive)
+ {
+ tmpDebugAction = WHDebug_AskToLaunch;
+ tmpReportAction = WHReport_AskToSend;
+ tmpDialogAction = WHDialog_OkToPopup;
+ }
+ else
+ {
+ // Non-interactive process
+ tmpDebugAction = WHDebug_DontLaunch;
+ tmpReportAction = WHReport_AutoQueue;
+ tmpDialogAction = WHDialog_DontPopup;
+ }
+ }
+
+ // If this is a breakpoint, never send a report.
+ if (tore.IsBreakpoint())
+ tmpReportAction = WHReport_DontSend;
+
+ // Store off the results.
+ m_debugAction = tmpDebugAction;
+ m_reportAction = tmpReportAction;
+ m_dialogAction = tmpDialogAction;
+
+ // Done. Log some stuff in debug mode.
+ #if defined(_DEBUG)
+ {
+ char *(rda[]) = {"InvalidValue", "AutoDebug", "AskToDebug", "DontDebug"};
+ char *(rwa[]) = {"InvalidValue", "AutoQueue", "AskToSend", "DontSend"};
+ char *(rdlga[]) = {"InvalidValue", "OkToPopup", "DontPopup"};
+ LOG((LF_EH, LL_INFO100, "CLR Watson: debug action: %s\n", rda[m_debugAction]));
+ LOG((LF_EH, LL_INFO100, "CLR Watson: report action: %s\n", rwa[m_reportAction]));
+ LOG((LF_EH, LL_INFO100, "CLR Watson: dialog action: %s\n", rdlga[m_dialogAction]));
+ #define LB(expr) LOG((LF_EH, LL_INFO100, "CLR Watson: " #expr ": %s\n", ((expr) ? "true" : "false") ))
+ LB(CurrentUserHasSufficientPermissions());
+ LB(ShouldDebug());
+ LB(ShouldAutoAttach());
+ LB(ShouldOfferDebug());
+ LB(ShouldReport());
+ LB(ShouldQueueReport());
+ #undef LB
+ }
+ #endif
+
+} // void CLRWatsonHelper::Init()
+
+
+//------------------------------------------------------------------------------
+// CurrentUserHasSufficientPermissions
+//
+// Determines if the user logged in has the correct permissions to launch Watson.
+//
+// Parameters:
+// None.
+//
+// Returns:
+// TRUE if the user has sufficient permissions, else FALSE
+//------------------------------------------------------------------------------
+BOOL CLRWatsonHelper::CurrentUserHasSufficientPermissions()
+{
+ // TODO! Implement!
+ return TRUE;
+} // BOOL CLRWatsonHelper::CurrentUserHasSufficientPermissions()
+
+
+
+//------------------------------------------------------------------------------
+// Description
+// Determines whether we will show Watson at all.
+//
+// Parameters
+// none
+//
+// Returns
+// TRUE -- If Watson should show UI.
+// FALSE -- Otherwise
+//------------------------------------------------------------------------------
+BOOL CLRWatsonHelper::ShouldShowUI()
+{
+ WRAPPER_NO_CONTRACT;
+
+ AssertValid();
+
+ return (m_dialogAction == WHDialog_OkToPopup);
+} // BOOL CLRWatsonHelper::ShouldShowUI()
+
+//------------------------------------------------------------------------------
+// Description
+// Determines whether a debugger will (or may be) launched. True if there
+// is an auto-launch debugger, or if we will ask the user.
+//
+// Parameters
+// none
+//
+// Returns
+// TRUE -- If a debugger might be attached.
+// FALSE -- Otherwise
+//------------------------------------------------------------------------------
+BOOL CLRWatsonHelper::ShouldDebug()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return ShouldOfferDebug() || ShouldAutoAttach();
+} // BOOL CLRWatsonHelper::ShouldDebug()
+
+//------------------------------------------------------------------------------
+// Description
+// Determines whether or not the Debug button should be present in the
+// Watson dialog
+//
+// Parameters
+// none
+//
+// Returns
+// TRUE -- if the Debug button should be displayed
+// FALSE -- otherwise
+//
+// Notes
+// This means "is there an appropriate debugger registered for auto attach?"
+//------------------------------------------------------------------------------
+BOOL CLRWatsonHelper::ShouldOfferDebug()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ AssertValid();
+
+ // Permission check.
+ if (!CurrentUserHasSufficientPermissions())
+ {
+ return FALSE;
+ }
+
+ // Check based on DbgJitDebugLaunchSetting & interactivity.
+ if (m_debugAction != WHDebug_AskToLaunch)
+ {
+ // Don't ask the user about debugging. Do or don't debug; but don't ask.
+ return FALSE;
+ }
+
+ SString ssDebuggerString;
+ GetDebuggerSettingInfo(ssDebuggerString, NULL);
+
+ // If there is no debugger installed, don't offer to debug, since we can't.
+ if (ssDebuggerString.IsEmpty())
+ {
+ return FALSE;
+ }
+
+ return TRUE;
+
+} // BOOL CLRWatsonHelper::ShouldOfferDebug()
+
+//------------------------------------------------------------------------------
+//
+// ShouldAutoAttach
+//
+// Description
+// Determines whether or not a debugger should be launched
+// automatically, without prompting the user.
+//
+// Parameters
+// None.
+//
+// Returns
+// TRUE -- If a debugger should be auto-attached.
+// FALSE -- Otherwise
+//
+//------------------------------------------------------------------------------
+BOOL CLRWatsonHelper::ShouldAutoAttach()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ AssertValid();
+
+ // Permissions check.
+ if (!CurrentUserHasSufficientPermissions())
+ {
+ return FALSE;
+ }
+
+ return (m_debugAction == WHDebug_AutoLaunch);
+} // BOOL CLRWatsonHelper::ShouldAutoAttach()
+
+
+//------------------------------------------------------------------------------
+// Description
+// Returns whether a Watson report should be generated.
+//
+// Parameters
+// none
+//
+// Returns
+// TRUE - a Watson report should be generated (with a minidump).
+// FALSE - don't generate a report.
+//
+//------------------------------------------------------------------------------
+BOOL CLRWatsonHelper::ShouldReport()
+{
+ WRAPPER_NO_CONTRACT;
+
+ AssertValid();
+
+ // If we queue or ask, we should generate.
+ return (m_reportAction == WHReport_AutoQueue) || (m_reportAction == WHReport_AskToSend);
+
+} // BOOL CLRWatsonHelper::ShouldReport()
+
+
+//------------------------------------------------------------------------------
+// Description
+// If a Watson report is generated, returns whether it should be auto-queued.
+// (vs asking the user what to do about it)
+//
+// Parameters
+// none
+//
+// Returns
+// TRUE - any Watson report should be be queued.
+// FALSE - any Watson report is posed to the user for "send" or "don't send".
+//
+//------------------------------------------------------------------------------
+BOOL CLRWatsonHelper::ShouldQueueReport()
+{
+ WRAPPER_NO_CONTRACT;
+
+ AssertValid();
+
+ // If we queue a report.
+ return (m_reportAction == WHReport_AutoQueue);
+
+} // BOOL CLRWatsonHelper::ShouldQueueReport()
+
+//------------------------------------------------------------------------------
+// Description
+// Reads a PCHealth configuration LONG value from the registry.
+//
+// Parameters
+// szName -- name of the value
+// iDefault -- default value, if not found
+//
+// Returns
+// The value read, or default if no value found.
+//
+// Exceptions
+// None
+//
+// NOtes:
+// Looks in HKCU/Software/Policies/Microsoft/PCHealth/ErrorReporting
+// then in HKLM/ " " " " "
+// then in HKCU/SOftware/Microsoft/PCHealth/ErrorReporting
+// then in HKLM/ " " " "
+//------------------------------------------------------------------------------
+int CLRWatsonHelper::GetPCHealthConfigLong( // Return value from registry or default.
+ LPCTSTR szName, // Name of value to get.
+ int iDefault) // Default value to return if not found.
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ long iValue; // Actual value from registry.
+
+ // Try HKCR policy key
+ if (GetRegistryLongValue(HKEY_CURRENT_USER, kErrorReportingPoliciesKey, szName, &iValue, FALSE))
+ return iValue;
+
+ // Try HKLM policy key
+ if (GetRegistryLongValue(HKEY_LOCAL_MACHINE, kErrorReportingPoliciesKey, szName, &iValue, FALSE))
+ return iValue;
+
+ // Try HKCR key
+ if (GetRegistryLongValue(HKEY_CURRENT_USER, kErrorReportingKey, szName, &iValue, FALSE))
+ return iValue;
+
+ // Try HKLM key
+ if (GetRegistryLongValue(HKEY_LOCAL_MACHINE, kErrorReportingKey, szName, &iValue, FALSE))
+ return iValue;
+
+ // None of them had value -- return default.
+ return iDefault;
+} // long CLRWatsonHelper::GetPCHealthConfigLong()
+
+//------------------------------------------------------------------------------
+// Description
+// Reads a PCHealth configuration LONG value from the registry, from a
+// given subkey.
+//
+// Parameters
+// szSubKey -- name of the subkey.
+// szName -- name of the value
+// iDefault -- default value, if not found
+// pValue -- put value here.
+//
+// Returns
+// TRUE - a value was found in the registry
+// FALSE - no value found.
+//
+// Exceptions
+// None
+//
+// NOtes:
+// Looks in HKCU/Software/Policies/Microsoft/PCHealth/ErrorReporting
+// then in HKLM/ " " " " "
+// then in HKCU/SOftware/Microsoft/PCHealth/ErrorReporting
+// then in HKLM/ " " " "
+//------------------------------------------------------------------------------
+BOOL CLRWatsonHelper::GetPCHealthConfigSubKeyLong( // Return value from registry or default.
+ LPCWSTR szSubKey, // Name of the subkey.
+ LPCWSTR szName, // Name of value to get.
+ int iDefault, // Default value to return if not found.
+ DWORD *pValue) // Put the value (registry or default) here.
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ long iValue; // Actual value from registry.
+
+ // Only one thread will *ever* enter this function, so it is safe to use a static
+ // buffer. We know the the longest strings we will want to catenate. Size
+ // the buffer appropriately, and we're set.
+ static WCHAR rcBuf[lengthof(kErrorReportingPoliciesKey) + lengthof(kInclusionListSubKey) + 3];
+
+ _ASSERT( (wcslen(kErrorReportingPoliciesKey) + wcslen(szSubKey) + 1) < lengthof(rcBuf));
+
+ // Try HKCR policy key
+ wcscpy_s(rcBuf, COUNTOF(rcBuf), kErrorReportingPoliciesKey);
+ wcsncat_s(rcBuf, COUNTOF(rcBuf), szSubKey, lengthof(rcBuf)-wcslen(rcBuf)-1);
+
+ if (GetRegistryLongValue(HKEY_CURRENT_USER, rcBuf, szName, &iValue, FALSE))
+ {
+ *pValue = iValue;
+ return TRUE;
+ }
+
+ // Try the HKLM policy key
+ if (GetRegistryLongValue(HKEY_LOCAL_MACHINE, rcBuf, szName, &iValue, FALSE))
+ {
+ *pValue = iValue;
+ return TRUE;
+ }
+
+ // Try HKCR key
+ wcscpy_s(rcBuf, COUNTOF(rcBuf), kErrorReportingKey);
+ wcsncat_s(rcBuf, COUNTOF(rcBuf), szSubKey, lengthof(rcBuf)-wcslen(rcBuf)-1);
+
+ if (GetRegistryLongValue(HKEY_CURRENT_USER, rcBuf, szName, &iValue, FALSE))
+ {
+ *pValue = iValue;
+ return TRUE;
+ }
+
+ // Try HKLM key
+ if (GetRegistryLongValue(HKEY_LOCAL_MACHINE, rcBuf, szName, &iValue, FALSE))
+ {
+ *pValue = iValue;
+ return TRUE;
+ }
+
+ // None of them had value -- return default.
+ *pValue = iDefault;
+ return FALSE;
+} // long CLRWatsonHelper::GetPCHealthConfigLong()
+
+
+//------------------------------------------------------------------------------
+//------------------------------------------------------------------------------
+HRESULT CreateWatsonSharedMemory(
+ HANDLE *hWatsonSharedMemory,
+ DWSharedMem **ppWatsonSharedMemory)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Watson needs to inherit the shared memory block, so we have to set up
+ // security attributes to make that happens.
+ SECURITY_ATTRIBUTES securityAttributes;
+ memset(&securityAttributes, 0, sizeof(securityAttributes));
+ securityAttributes.nLength = sizeof(securityAttributes);
+ securityAttributes.bInheritHandle = TRUE;
+
+ _ASSERTE(NULL != hWatsonSharedMemory);
+ _ASSERTE(NULL != ppWatsonSharedMemory);
+
+ *hWatsonSharedMemory = NULL;
+ *ppWatsonSharedMemory = NULL;
+
+ // In cases where we have to return form this function with a failure, we
+ // need to clean up the handle. Use a holder to take care of that for us.
+ HandleHolder hTemp =
+ WszCreateFileMapping(INVALID_HANDLE_VALUE,
+ &securityAttributes,
+ PAGE_READWRITE,
+ 0,
+ sizeof(DWSharedMem),
+ NULL);
+
+ if (hTemp == NULL)
+ {
+ return HRESULT_FROM_GetLastErrorNA();
+ }
+
+ DWSharedMem* pTemp =
+ static_cast< DWSharedMem* >(CLRMapViewOfFile(hTemp,
+ FILE_MAP_ALL_ACCESS,
+ 0,
+ 0,
+ sizeof(DWSharedMem)));
+
+ if (NULL == pTemp)
+ {
+ return HRESULT_FROM_GetLastErrorNA();
+ }
+
+ memset(pTemp, 0, sizeof(DWSharedMem));
+ *hWatsonSharedMemory = hTemp;
+ *ppWatsonSharedMemory = pTemp;
+
+ // We're ready to exit normally and pass the IPC block's handle back to our
+ // caller, so we don't want to close it.
+ hTemp.SuppressRelease();
+
+ return S_OK;
+} // HRESULT CreateWatsonSharedMemory()
+
+
+
+const WCHAR* kWatsonImageNameOnVista = W("\\dw20.exe");
+
+//------------------------------------------------------------------------------
+// Description
+// A helper function to launch the Watson process and wait for it to
+// complete
+// Parameters
+// hWatsonSharedMemory
+// Handle to the shared memory block to pass to Watson. This handle
+// must be inheritable.
+// hEventAlive
+// hEventDone
+// hMutex
+// Returns
+// true - If watson executed normally
+// false - if watson was unable to launch, reported an error, or
+// appeared to hang/crash
+//------------------------------------------------------------------------------
+BOOL RunWatson(
+ HANDLE hWatsonSharedMemory,
+ HANDLE hEventAlive,
+ HANDLE hEventDone,
+ HANDLE hMutex)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(!RunningOnWin7());
+ }
+ CONTRACTL_END;
+
+ // Since we're doing our own error reporting, we don't want to pop up the
+ // OS Watson dialog/GPF Dialog. Supress it now.
+
+ PROCESS_INFORMATION processInformation;
+ STARTUPINFOW startupInfo;
+ memset(&startupInfo, 0, sizeof(STARTUPINFOW));
+ startupInfo.cb = sizeof(STARTUPINFOW);
+
+
+ WCHAR watsonAppName[MAX_PATH];
+ WCHAR watsonCommandLine[MAX_PATH+1];
+
+ {
+#if !defined(FEATURE_CORECLR)
+ // Use the version of DW20.exe that lives in the system directory.
+ DWORD ret;
+
+ if (FAILED(GetCORSystemDirectoryInternal(watsonAppName, NumItems(watsonAppName), &ret)))
+ {
+ return false;
+ }
+ if (wcsncat_s(watsonAppName, NumItems(watsonAppName), kWatsonImageNameOnVista, _TRUNCATE) != 0)
+ {
+ return false;
+ }
+#else // FEATURE_CORECLR
+ HKEYHolder hKey;
+ // Look for key \\HKLM\Software\Microsoft\PCHealth\ErrorReporting\DW\Installed"
+ DWORD ret = WszRegOpenKeyEx(HKEY_LOCAL_MACHINE,
+ kWatsonPath,
+ 0,
+ KEY_READ | kWatsonRegKeyOptions,
+ &hKey);
+
+ if (ERROR_SUCCESS != ret)
+ {
+ return false;
+ }
+
+
+ // Look in ...\DW\Installed for dw0200 (dw0201 on ia64). This will be
+ // the full path to the executable.
+ DWORD size = NumItems(watsonAppName);
+ ret = WszRegQueryValueEx(hKey,
+ kWatsonValue,
+ NULL,
+ NULL,
+ reinterpret_cast< LPBYTE >(watsonAppName),
+ &size);
+
+
+ if (ERROR_SUCCESS != ret)
+ {
+ return false;
+ }
+#endif // ! FEATURE_CORECLR
+
+ _snwprintf_s(watsonCommandLine,
+ NumItems(watsonCommandLine)-1,
+ _TRUNCATE,
+ W("dw20.exe -x -s %lu"),
+ PtrToUlong(hWatsonSharedMemory));
+ watsonCommandLine[NumItems(watsonCommandLine) - 1] = W('\0');
+ }
+
+
+ {
+ BOOL ret = WszCreateProcess(watsonAppName,
+ watsonCommandLine,
+ NULL,
+ NULL,
+ TRUE,
+ NULL,
+ NULL,
+ NULL,
+ &startupInfo,
+ &processInformation);
+
+ if (FALSE == ret)
+ {
+ //
+ // Watson failed to start up.
+ //
+ // This can happen if e.g. Watson wasn't installed on the machine.
+ //
+ HRESULT hr = HRESULT_FROM_GetLastErrorNA();
+ return false;
+ }
+
+ }
+
+ // Wait for watson to finish.
+ //
+ // This code was more-or-less pasted directly out of the test app for
+ // watson, found at
+ //
+ // \\redist\redist\Watson\dw20_latest\neutral\retail\0\testcrash.cpp
+
+ // These handles need to live until we're done waiting for the watson
+ // process to finish execution.
+ HandleHolder hProcess(processInformation.hProcess);
+ HandleHolder hThread(processInformation.hThread);
+
+
+ BOOL watsonSignalledCompletion = FALSE, bDWRunning = TRUE;
+
+ while (bDWRunning)
+ {
+ if (WAIT_OBJECT_0 == ClrWaitForSingleObject(hEventAlive,
+ kDwWaitTime))
+ {
+ // Okay, Watson's still pinging us; see if it's finished.
+ if (WAIT_OBJECT_0 == ClrWaitForSingleObject(hEventDone, 1))
+ {
+ bDWRunning = FALSE;
+ watsonSignalledCompletion = TRUE;
+ }
+
+ // If watson is finished (i.e. has signaled hEventDone),
+ // bDWRunning is false and we'll fall out of the loop. If
+ // watson isn't finished, we'll go back to waiting for the
+ // next ping on hEventAlive
+ continue;
+ }
+
+ Thread::BeginThreadAffinity();
+ // we timed-out waiting for DW to respond.
+ DWORD dw = WaitForSingleObject(hMutex, DW_TIMEOUT_VALUE);
+
+ if (WAIT_TIMEOUT == dw)
+ {
+ // either DW's hung or crashed, we must carry on. Let watson
+ // no that we're giving up on watson, in case it comes back
+ // from the hang.
+ SetEvent(hEventDone);
+ bDWRunning = FALSE;
+ }
+ else if (WAIT_ABANDONED == dw)
+ {
+ // The mutex was abandoned, which means Watson crashed on
+ // us.
+ bDWRunning = FALSE;
+
+ ReleaseMutex(hMutex);
+ }
+ else
+ {
+ // Check one last time to see if Watson has woken up.
+ if (WAIT_OBJECT_0 != ClrWaitForSingleObject(hEventAlive, 1))
+ {
+ // Nope. hasn't woken up. Give up on Watson
+ SetEvent(hEventDone);
+ bDWRunning = FALSE;
+ }
+ else
+ {
+ // Oh, it HAS woken up! See if it's finished as well.
+ if (WAIT_OBJECT_0 == ClrWaitForSingleObject(hEventDone, 1))
+ {
+ bDWRunning = FALSE;
+ watsonSignalledCompletion = TRUE;
+ }
+ }
+
+ ReleaseMutex(hMutex);
+ }
+ Thread::EndThreadAffinity();
+ }
+
+ // Go ahead and bail if Watson didn't exit for some reason.
+ if (!watsonSignalledCompletion)
+ {
+ return FALSE;
+ }
+
+ // We're now done with hProcess and hThread, it's safe to let the
+ // HandleHolders destroy them now.
+ //
+ // We don't need to wait for the Watson process to exit; once it's signalled
+ // "hEventDone" it's safe to assume that Watson will not try communicating
+ // with us anymore and we have succeeded.
+ return true;
+} // BOOL RunWatson()
+
+
+//
+// Constants used to control various aspects of Watson's behavior.
+//
+
+
+// Flags controlling the minidump Watson creates.
+const DWORD kMiniDumpType = MiniDumpNormal;
+const DWORD kThreadWriteFlags = ThreadWriteThread | ThreadWriteContext | ThreadWriteStack;
+const DWORD kModuleWriteFlags = ModuleWriteModule; // | ModuleWriteDataSeg ?
+
+
+
+// Reporting. The defaults are fine here
+const DWORD kReportingFlags = 0;
+
+//
+// Enable these flags if the report should be queued (i.e., if no UI should be
+// shown, but a report should still be sent).
+//
+
+// Enable these flags are for bfDWRFlags
+const DWORD kQueuingReportingFlags = fDwrForceToAdminQueue | fDwrIgnoreHKCU;
+
+// Enable these flags in the bfDWUFlags field
+const DWORD kQueuingUIFlags = fDwuNoEventUI;
+
+//
+// No reporting flags. Enable these flags if an error report should not be sent.
+//
+
+// Enable these flags in bfDWRFlags if a report is not to be sent.
+const DWORD kNoReportFlags = fDwrNeverUpload;
+
+
+// UI Flags
+//
+// We need to use the light plea, since we may be reporting faults for
+// Non-Microsoft software (if some random 3rd party app throws an exception, we
+// can't really promise that their error report will be used to fix the
+// problem).
+//
+const DWORD kUIFlags = fDwuDenySuspend | fDwuShowFeedbackLink;
+
+// Exception mode flags. By default, the "restart" and "recover" buttons are
+// checked. We need to turn that behavior off. We also need to use the
+// minidump API to gather the heap dump, in order to get a managed-aware
+// minidump. Finally, release the dumping thread before doing the cabbing
+// for performance reasons.
+const DWORD kExceptionModeFlags = fDweDefaultQuit | fDweGatherHeapAsMdmp | fDweReleaseBeforeCabbing;
+
+// "Miscellaneous" flags. These flags are only used by Office.
+const DWORD kMiscFlags = 0;
+
+// Flags to control which buttons are available on the Watson dialog.
+//
+// We will only display the "Send Error Report" and "Don't Send" buttons
+// available -- we're not going to make the "restart" or "recover" checkboxes
+// available by default.
+const DWORD kOfferFlags = msoctdsQuit;
+
+//------------------------------------------------------------------------------
+// Description
+// Returns the IP of the instruction that caused the exception to occur.
+// For managed exceptions this may not match the Exceptions contained in
+// the exception record.
+//
+// Parameters
+// pExceptionRecord -- the SEH exception for the current exception
+// pThread -- Pointer to Thread object of faulting thread
+//
+// Returns
+// The IP that caused the exception to occur
+//
+// Exceptions
+// None
+//------------------------------------------------------------------------------
+UINT_PTR GetIPOfThrowSite(
+ EXCEPTION_RECORD* pExceptionRecord,
+ Thread *pThread)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // If we can't determine a better value, use the exception record's exception address.
+ UINT_PTR rslt = reinterpret_cast< UINT_PTR >(pExceptionRecord->ExceptionAddress);
+
+ // If it is not a managed exception, use the IP from the exception.
+ if (!IsComPlusException(pExceptionRecord))
+ return rslt;
+
+ // Get the thread object, from which we'll try to get the managed exception object.
+ if (NULL == pThread)
+ { // If there's no managed thread, use the IP from the exception.
+ return rslt;
+ }
+
+ // Retrieve any stack trace from the managed exception. If there is a stack
+ // trace, it will start with the topmost (lowest address, newest) managed
+ // code, which is what we want.
+ GCX_COOP();
+ OBJECTREF throwable = pThread->GetThrowable();
+
+ // If there was no managed code on the stack and we are on 64-bit, then we won't have propagated
+ // the LastThrownObject into the Throwable yet.
+ if (throwable == NULL)
+ throwable = pThread->LastThrownObject();
+
+ _ASSERTE(throwable != NULL);
+ _ASSERTE(IsException(throwable->GetMethodTable()));
+
+ // If the last thrown object is of type Exception, get the stack trace.
+ if (throwable != NULL)
+ {
+ // Get the BYTE[] containing the stack trace.
+ StackTraceArray traceData;
+ ((EXCEPTIONREF)throwable)->GetStackTrace(traceData);
+
+ GCPROTECT_BEGIN(traceData);
+ // Grab the first non-zero, if there is one.
+ for (size_t ix = 0; ix < traceData.Size(); ++ix)
+ {
+ if (traceData[ix].ip)
+ {
+ rslt = traceData[ix].ip;
+ break;
+ }
+ }
+ GCPROTECT_END();
+ }
+
+ return rslt;
+} // UINT_PTR GetIPOfThrowSite()
+
+//------------------------------------------------------------------------------
+// Description
+// Given a wchar string, returns true if any of the individual characters are unicode.
+// Else returns false (which implies the string could be losslessly converted to ascii)
+//
+// Input
+// wsz -- The string to check.
+//
+// Returns
+// true -- if the string contained any non-ascii characters,
+// false -- otherwise.
+//
+//------------------------------------------------------------------------------
+BOOL ContainsUnicodeChars(__in_z LPCWSTR wsz)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(wsz != NULL);
+
+ while (NULL != *wsz)
+ {
+ if (!iswascii(*wsz))
+ {
+ return TRUE;
+ }
+ ++wsz;
+ }
+ return FALSE;
+} // BOOL ContainsUnicodeChars()
+
+//------------------------------------------------------------------------------
+// Description
+// Builds the GenericMode bucket parameters for a managed Watson dump.
+//
+// Parameters
+// tore -- type of error being reported
+// pThread -- Pointer to Thread object of faulting thread
+// ip -- Where the exception was thrown.
+// pGenericModeBlock -- Where to build the buckets
+// exception -- the throwable
+//
+// Returns
+// S_OK if all there is a valid managed exception to report on and
+// Watson buckets were initialized successfully
+// S_FALSE if there is no managed exception to report on
+// E_OUTOFMEMORY if we ran out of memory while filling out the buckets
+//
+// Notes
+// (pGenericModeBlock->fInited == TRUE) <=> (result = S_OK)
+// The original contract of this method required that both of these conditions
+// had to be checked independently and it has caused us some grief.
+// See Dev10 bug 833350.
+//------------------------------------------------------------------------------
+HRESULT GetManagedBucketParametersForIp(
+ TypeOfReportedError tore,
+ Thread * pThread,
+ UINT_PTR ip,
+ GenericModeBlock * pGenericModeBlock,
+ OBJECTREF * pThrowable)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Avoid confusion and stale data.
+ memset(pGenericModeBlock, 0, sizeof(GenericModeBlock));
+
+ // If the exception is not from managed code, then return S_FALSE. There is
+ // no more bucket data we can fill out.
+ if (ip == 0)
+ {
+ LOG((LF_EH, LL_INFO1000, "GetManagedBucketParametersForIP: ip == 0, returning\n"));
+ return S_FALSE;
+ }
+
+ PCODE currentPC = PCODE(ip);
+
+ if (!ExecutionManager::IsManagedCode(currentPC))
+ {
+ // If there's no code manager for the location of the exception, then we
+ // should just treat this exception like an unmanaged exception. We are
+ // probably inside of mscorwks
+ //
+ // Note that while there may be an actual managed exception that
+ // occured, we can live without the managed bucket parameters. For
+ // exceptions coming from within mscorwks.dll, the native bucket
+ // parameters will do just fine.
+
+ LOG((LF_EH, LL_INFO1000, "GetManagedBucketParametersForIP: IsManagedCode(%p) == FALSE, returning\n", currentPC));
+ return S_FALSE;
+ }
+
+ WatsonBucketType bucketType = GetWatsonBucketType();
+#ifndef FEATURE_CORECLR
+ if (bucketType == MoCrash)
+ {
+ MoCrashBucketParamsManager moCrashManager(pGenericModeBlock, tore, currentPC, pThread, pThrowable);
+ moCrashManager.PopulateBucketParameters();
+ }
+ else
+#endif // !FEATURE_CORECLR
+ {
+#ifdef FEATURE_WINDOWSPHONE
+ _ASSERTE(bucketType == WinPhoneCrash);
+ WinPhoneBucketParamsManager winphoneManager(pGenericModeBlock, tore, currentPC, pThread, pThrowable);
+ winphoneManager.PopulateBucketParameters();
+#else
+ // if we default to CLR20r3 then let's assert that the bucketType is correct
+ _ASSERTE(bucketType == CLR20r3);
+ CLR20r3BucketParamsManager clr20r3Manager(pGenericModeBlock, tore, currentPC, pThread, pThrowable);
+ clr20r3Manager.PopulateBucketParameters();
+#endif // FEATURE_WINDOWSPHONE
+ }
+
+ // At this point we have a valid managed exception, so the GMB should get
+ // filled out. If we set this to TRUE and there isn't a managed exception,
+ // Watson will get confused and not report the full unmanaged data.
+ pGenericModeBlock->fInited = TRUE;
+
+ return S_OK;
+} // HRESULT GetManagedBucketParametersForIp()
+
+//------------------------------------------------------------------------------
+// Description
+// Builds the GenericMode bucket parameters for a managed Watson dump.
+//
+// Parameters
+// ip -- the managed ip where the fault occurred.
+// tore -- the type of reportederror
+// pThread -- the thread point with the exception
+//
+// Returns
+// Allocated GenericModeBlock or null.
+//
+// Notes
+// This will attempt to allocate a new GenericModeBlock, and, if
+// successful, will fill it with the GenericMode parameters for
+// a managed Watson dump. This is intended to be used in places where
+// the information is about to be lost.
+//
+// This function is called from elsewhere in the runtime.
+//------------------------------------------------------------------------------
+void* GetBucketParametersForManagedException(UINT_PTR ip, TypeOfReportedError tore, Thread * pThread, OBJECTREF * pThrowable)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!IsWatsonEnabled())
+ {
+ return NULL;
+ }
+
+ // Set up an empty GenericModeBlock to hold the bucket parameters.
+ GenericModeBlock *pgmb = new (nothrow) GenericModeBlock;
+ if (pgmb == NULL)
+ return NULL;
+
+ // Try to get BucketParameters.
+ HRESULT hr = GetManagedBucketParametersForIp(tore, pThread, ip, pgmb, pThrowable);
+
+ // If it didn't succeed, delete the GenericModeBlock. Note that hr could be S_FALSE, and that still
+ // means the buckets aren't initialized.
+ if (hr != S_OK)
+ {
+ delete pgmb;
+ pgmb = NULL;
+ }
+
+ return pgmb;
+} // void* GetBucketParametersForManagedException()
+
+//------------------------------------------------------------------------------
+// Description
+// Frees the GenericModeBlock allocated by GetBucketParametersForManagedException.
+//
+// Parameters
+// pgmb -- the allocated GenericModeBlock.
+//
+// Returns
+// nothing.
+//------------------------------------------------------------------------------
+void FreeBucketParametersForManagedException(void *pgmb)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!IsWatsonEnabled())
+ {
+ _ASSERTE(pgmb == NULL);
+ return;
+ }
+
+ if (pgmb)
+ delete pgmb;
+} // void FreeBucketParametersForManagedException()
+
+
+//------------------------------------------------------------------------------
+// Description
+// Retrieves or builds the GenericMode bucket parameters for a managed
+// Watson dump.
+//
+// Parameters
+// pExceptionRecord -- Information regarding the exception
+// pGenericModeBlock -- Where to build the buckets
+// tore -- type of error being reported
+// pThread -- Pointer to Thread object of faulting thread
+//
+// Returns
+// S_OK or error code.
+//
+// Notes
+// If there is a saved GenericModeBlock on the thread object's ExceptionState
+// that will be used. Otherwise, a new block is created.
+//------------------------------------------------------------------------------
+HRESULT RetrieveManagedBucketParameters(
+ EXCEPTION_RECORD *pExceptionRecord,
+ GenericModeBlock *pGenericModeBlock,
+ TypeOfReportedError tore,
+ Thread *pThread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+#if defined(PRESERVE_WATSON_ACROSS_CONTEXTS)
+ GenericModeBlock *pBuckets = NULL;
+
+#ifdef FEATURE_CORECLR
+ // On CoreCLR, Watson may not be enabled. Thus, we should
+ // skip this.
+ if (IsWatsonEnabled())
+#endif // FEATURE_CORECLR
+ {
+ if (pThread != NULL)
+ {
+ // Try to get the buckets from the UE Watson Bucket Tracker
+ pBuckets = reinterpret_cast<GenericModeBlock*>(pThread->GetExceptionState()->GetUEWatsonBucketTracker()->RetrieveWatsonBuckets());
+ if ((pBuckets == NULL) && (pThread->GetExceptionState()->GetCurrentExceptionTracker() != NULL))
+ {
+ // If we didnt find the buckets in the UE Watson bucket tracker, then
+ // try to look them up in the current exception's watson tracker if
+ // an exception tracker exists.
+ pBuckets = reinterpret_cast<GenericModeBlock*>(pThread->GetExceptionState()->GetCurrentExceptionTracker()->GetWatsonBucketTracker()->RetrieveWatsonBuckets());
+ }
+ }
+ }
+
+ // See if the thread has some managed bucket parameters stashed away...
+ if (pBuckets != NULL)
+ { // Yes it does, so copy them to the output buffer.
+ LOG((LF_EH, LL_INFO100, "Watson: RetrieveManagedBucketParameters returning stashed parameters (%p)\n", pBuckets));
+ *pGenericModeBlock = *pBuckets;
+
+#if defined(_DEBUG)
+ LOG((LF_EH, LL_INFO100, "Watson b 1: %S\n", pGenericModeBlock->wzP1));
+ LOG((LF_EH, LL_INFO100, " b 2: %S\n", pGenericModeBlock->wzP2));
+ LOG((LF_EH, LL_INFO100, " b 3: %S\n", pGenericModeBlock->wzP3));
+ LOG((LF_EH, LL_INFO100, " b 4: %S\n", pGenericModeBlock->wzP4));
+ LOG((LF_EH, LL_INFO100, " b 5: %S\n", pGenericModeBlock->wzP5));
+ LOG((LF_EH, LL_INFO100, " b 6: %S\n", pGenericModeBlock->wzP6));
+ LOG((LF_EH, LL_INFO100, " b 7: %S\n", pGenericModeBlock->wzP7));
+ LOG((LF_EH, LL_INFO100, " b 8: %S\n", pGenericModeBlock->wzP8));
+ LOG((LF_EH, LL_INFO100, " b 9: %S\n", pGenericModeBlock->wzP9));
+#endif
+ }
+ else
+#endif
+ { // No stashed bucket parameters, so get them from the exception.
+ UINT_PTR ip = 0;
+ if (pExceptionRecord != NULL)
+ {
+ // This function is called from functions that have NOTHROW/GC_NOTRIGGER
+ // contracts (in particular EEPolicy::HandleFatalError). Because that
+ // function always passes pExceptionInfo as NULL, we will never actually
+ // reach the potentially throwing code.
+ //
+ CONTRACT_VIOLATION(ThrowsViolation | GCViolation);
+
+ ip = GetIPOfThrowSite(pExceptionRecord, pThread);
+ }
+
+ hr = GetManagedBucketParametersForIp(tore, pThread, ip, pGenericModeBlock, NULL);
+ }
+
+ return hr;
+} // HRESULT RetrieveManagedBucketParameters()
+
+//------------------------------------------------------------------------------
+// Description
+// Helper to get Watson bucket parameters, for the DebugManager interface.
+//
+// Parameters
+// pParams -- Fill the parameters here.
+//
+// Returns
+// S_OK -- Parameters filled in.
+// S_FALSE -- No current exception.
+// error -- Some error occurred.
+//
+// Note:
+// This function is exposed via the hosting interface.
+//------------------------------------------------------------------------------
+HRESULT GetBucketParametersForCurrentException(
+ BucketParameters *pParams)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ GenericModeBlock gmb;
+
+ // Make sure this is (or at least has been) a managed thread.
+ Thread *pThread = GetThread();
+ if (pThread == NULL)
+ { // Not the greatest error, but we don't expect to be called on a unmanaged thread.
+ return E_UNEXPECTED;
+ }
+
+ if (!IsWatsonEnabled())
+ {
+ return E_NOTIMPL;
+ }
+
+ // And make sure there is a current exception.
+ ThreadExceptionState* pExState = pThread->GetExceptionState();
+ if (!pExState->IsExceptionInProgress())
+ return S_FALSE;
+
+ // Make sure we're not in the second pass.
+ if (pExState->GetFlags()->UnwindHasStarted())
+ { // unwind indicates the second pass, so quit
+ return S_FALSE;
+ }
+
+ EXCEPTION_RECORD *pExceptionRecord = pExState->GetExceptionRecord();
+
+ // Try to get the parameters...
+ hr = RetrieveManagedBucketParameters(pExceptionRecord, &gmb, TypeOfReportedError::UnhandledException, pThread);
+
+ // ... and if successful, copy to the output block. If the return value is
+ // S_FALSE then it wasn't a managed exception and we should not copy the data in
+ // S_OK is the only success value that has inited the data
+ if (hr == S_OK)
+ {
+ // Event type name.
+ wcsncpy_s(pParams->pszEventTypeName, COUNTOF(pParams->pszEventTypeName), gmb.wzEventTypeName, _TRUNCATE);
+
+ // Buckets. Mind the 1-based vs 0-based.
+ wcsncpy_s(pParams->pszParams[0], COUNTOF(pParams->pszParams[0]), gmb.wzP1, _TRUNCATE);
+ wcsncpy_s(pParams->pszParams[1], COUNTOF(pParams->pszParams[1]), gmb.wzP2, _TRUNCATE);
+ wcsncpy_s(pParams->pszParams[2], COUNTOF(pParams->pszParams[2]), gmb.wzP3, _TRUNCATE);
+ wcsncpy_s(pParams->pszParams[3], COUNTOF(pParams->pszParams[3]), gmb.wzP4, _TRUNCATE);
+ wcsncpy_s(pParams->pszParams[4], COUNTOF(pParams->pszParams[4]), gmb.wzP5, _TRUNCATE);
+ wcsncpy_s(pParams->pszParams[5], COUNTOF(pParams->pszParams[5]), gmb.wzP6, _TRUNCATE);
+ wcsncpy_s(pParams->pszParams[6], COUNTOF(pParams->pszParams[6]), gmb.wzP7, _TRUNCATE);
+ wcsncpy_s(pParams->pszParams[7], COUNTOF(pParams->pszParams[7]), gmb.wzP8, _TRUNCATE);
+ wcsncpy_s(pParams->pszParams[8], COUNTOF(pParams->pszParams[8]), gmb.wzP9, _TRUNCATE);
+ wcsncpy_s(pParams->pszParams[9], COUNTOF(pParams->pszParams[9]), gmb.wzP10, _TRUNCATE);
+
+ // All good.
+ pParams->fInited = TRUE;
+ }
+
+ return hr;
+} // HRESULT GetBucketParametersForCurrentException()
+
+
+//------------------------------------------------------------------------------
+// Description
+//
+// Parameters
+// pExceptionInfo -- information about the exception that caused the error.
+// If the error is not the result of an exception, pass NULL for this
+// parameter
+// tore -- Information about the fault
+// pThread -- Thread object for faulting thread, could be NULL
+// dwThreadID -- OS Thread ID for faulting thread
+//
+// Returns
+// FaultReportResult -- enumeration indicating the
+// FaultReportResultAbort -- if Watson could not execute normally
+// FaultReportResultDebug -- if Watson executed normally, and the user
+// chose to debug the process
+// FaultReportResultQuit -- if Watson executed normally, and the user
+// chose to end the process (e.g. pressed "Send Error Report" or
+// "Don't Send").
+//
+// Exceptions
+// None.
+//------------------------------------------------------------------------------
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#endif
+FaultReportResult DoFaultReportWorker( // Was Watson attempted, successful? Run debugger?
+ EXCEPTION_POINTERS *pExceptionInfo, // Information about the fault.
+ TypeOfReportedError tore, // What sort of error is this?
+ Thread *pThread, // Thread object for faulting thread, could be NULL
+ DWORD dwThreadID) // OS Thread ID for faulting thread
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(!RunningOnWin7());
+
+ LOG((LF_EH, LL_INFO100, "DoFaultReportWorker: at sp %p ...\n", GetCurrentSP()));
+
+ if (!IsWatsonEnabled())
+ {
+ return FaultReportResultQuit;
+ }
+
+#if !defined(FEATURE_UEF_CHAINMANAGER)
+ // If we've already tried to report a Watson crash once, we don't really
+ // want to pester the user about this exception. This can occur in certain
+ // pathological programs.
+ // For events other than user breakpoint, we only want to report once.
+ // For user breakpoints, report whenever the thread wants to.
+ if (!tore.IsUserBreakpoint())
+ {
+ // If Watson already launched (say, on another thread)...
+ if (FastInterlockCompareExchange(&g_watsonAlreadyLaunched, 1, 0) != 0)
+ {
+ // wait until Watson process is completed
+ ClrWaitForSingleObject(g_hWatsonCompletionEvent, INFINITE_TIMEOUT);
+ return FaultReportResultQuit;
+ }
+ }
+#endif // FEATURE_UEF_CHAINMANAGER
+
+ // Assume an unmanaged fault until we determine otherwise.
+ BOOL bIsManagedFault = FALSE;
+
+ // IF we don't have an ExceptionInfo, what does that mean?
+ if (pExceptionInfo)
+ {
+ if (IsExceptionFromManagedCode(pExceptionInfo->ExceptionRecord))
+ {
+ // This is a managed fault.
+ bIsManagedFault = TRUE;
+ }
+ }
+
+ // Figure out what we should do.
+ CLRWatsonHelper policy;
+ policy.Init(bIsManagedFault, tore);
+
+ if (policy.ShouldAutoAttach())
+ {
+ return FaultReportResultDebug;
+ }
+
+ // Is there anything for Watson to do? (Either report, or ask about debugging?)
+ if ((!policy.ShouldReport()) && (!policy.ShouldOfferDebug()) && (!policy.ShouldShowUI()))
+ {
+ // Hmm ... we're not supposed to report anything or pop up a dialog. In
+ // this case, we can stop right now.
+ return FaultReportResultQuit;
+ }
+
+ HANDLE hWatsonSharedMemory;
+ DWSharedMem *pWatsonSharedMemory;
+ {
+ HRESULT hr = CreateWatsonSharedMemory(&hWatsonSharedMemory,
+ &pWatsonSharedMemory);
+ if (FAILED(hr))
+ {
+ return FaultReportResultAbort;
+ }
+ }
+
+ // Some basic bookkeeping for Watson
+ pWatsonSharedMemory->dwSize = sizeof(DWSharedMem);
+ pWatsonSharedMemory->dwVersion = DW_CURRENT_VERSION;
+ pWatsonSharedMemory->pid = GetCurrentProcessId();
+ pWatsonSharedMemory->tid = dwThreadID;
+ _snwprintf_s(pWatsonSharedMemory->wzEventLogSource,
+ NumItems(pWatsonSharedMemory->wzEventLogSource),
+ _TRUNCATE,
+ W(".NET Runtime %0d.%0d Error Reporting"),
+ VER_MAJORVERSION,
+ VER_MINORVERSION);
+ pWatsonSharedMemory->eip = (pExceptionInfo) ? reinterpret_cast< DWORD_PTR >(pExceptionInfo->ExceptionRecord->ExceptionAddress) : NULL;
+
+ // If we set exception pointers, the debugger will automatically do a .ecxr on them. SO,
+ // don't set the pointers unless it really is an exception and we have a
+ // a good context record
+ if (tore.IsException() ||
+ (tore.IsFatalError() && pExceptionInfo && pExceptionInfo->ContextRecord &&
+ (pExceptionInfo->ContextRecord->ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL)
+ )
+ {
+ pWatsonSharedMemory->pep = pExceptionInfo;
+ }
+ else
+ {
+ pWatsonSharedMemory->pep = NULL;
+ }
+
+ // Handles to kernel objects that Watson uses.
+ //
+ // We're expecting these handles to be valid until the Watson child process
+ // has run to completion. Make sure these holders stay in scope until after
+ // the call to RunWatson
+
+ HandleHolder hEventDone(NULL),
+ hEventNotifyDone(NULL),
+ hEventAlive(NULL),
+ hMutex(NULL),
+ hProc(NULL),
+ sharedMemoryHolder(hWatsonSharedMemory);
+ {
+ // SECURITY_ATTRIBUTES so the handles can be inherited (by Watson).
+ SECURITY_ATTRIBUTES securityAttributes =
+ { sizeof(SECURITY_ATTRIBUTES), NULL, true };
+
+ hEventDone = WszCreateEvent(&securityAttributes, FALSE, FALSE, NULL);
+ if (hEventDone == NULL)
+ {
+ LOG((LF_EH, LL_INFO100, "CLR Watson: WszCreateEvent returned error, GetLastError(): %#x\n", GetLastError()));
+ return FaultReportResultAbort;
+ }
+ pWatsonSharedMemory->hEventDone = hEventDone;
+
+
+ hEventNotifyDone = WszCreateEvent(&securityAttributes, FALSE, FALSE, NULL);
+ if (hEventNotifyDone == NULL)
+ {
+ LOG((LF_EH, LL_INFO100, "CLR Watson: WszCreateEvent returned error, GetLastError(): %#x\n", GetLastError()));
+ return FaultReportResultAbort;
+ }
+ pWatsonSharedMemory->hEventNotifyDone = hEventNotifyDone;
+
+
+ hEventAlive = WszCreateEvent(&securityAttributes, FALSE, FALSE, NULL);
+ if (hEventAlive == NULL)
+ {
+ LOG((LF_EH, LL_INFO100, "CLR Watson: WszCreateEvent returned error, GetLastError(): %#x\n", GetLastError()));
+ return FaultReportResultAbort;
+ }
+ pWatsonSharedMemory->hEventAlive = hEventAlive;
+
+
+ hMutex = WszCreateMutex(&securityAttributes, FALSE, NULL);
+ if (hMutex == NULL)
+ {
+ LOG((LF_EH, LL_INFO100, "CLR Watson: WszCreateEvent returned error, GetLastError(): %#x\n", GetLastError()));
+ return FaultReportResultAbort;
+ }
+ pWatsonSharedMemory->hMutex = hMutex;
+ }
+
+ // During error reporting we need to do dump collection, freeze threads inside the process, read memory blocks
+ // (if you register memory), read stuff from the PEB, create remote threads for recovery. So it needs quite a
+ // lot of permissions; we end up with PROCESS_ALL_ACCESS to satisfy all required permissions.
+ hProc = OpenProcess(PROCESS_ALL_ACCESS,
+ TRUE,
+ pWatsonSharedMemory->pid);
+ if (hProc == NULL)
+ {
+ LOG((LF_EH, LL_INFO100, "CLR Watson: OpenProcess returned error, GetLastError(): %#x\n", GetLastError()));
+ return FaultReportResultAbort;
+ }
+
+ pWatsonSharedMemory->hProc = hProc;
+
+
+ // Flags to control reporting, queuing, etc.
+ DWORD reportingFlags = kReportingFlags; // 0
+ DWORD uiFlags = kUIFlags; // fDwuDenySuspend | fDwuShowFeedbackLink
+ DWORD dwEflags = kExceptionModeFlags; // fDweDefaultQuit | fDweGatherHeapAsMdmp
+
+ // Reporting flags...
+ if (policy.ShouldQueueReport())
+ { // If we should queue a report,
+ // turn on kQueueingReportingFlags, which is fDwrForceToAdminQueue | fDwrIgnoreHKCU
+ reportingFlags |= kQueuingReportingFlags;
+ }
+ else
+ if (!policy.ShouldReport())
+ { // We shouldn't report at all,
+ // turn on kNoReportFlags, which is fDwrNeverUpload, which means "don't report"
+ reportingFlags |= kNoReportFlags;
+ }
+ else
+ {
+ // Ask to report.
+ }
+
+ // Offer flags...
+ DWORD offerFlags = kOfferFlags; // msoctdsQuit
+ if (policy.ShouldOfferDebug())
+ { // Turn on msoctdsDebug, which adds "Debug" button.
+ offerFlags |= msoctdsDebug;
+ }
+ else
+ { // No debug, so ignore aeDebug
+ dwEflags |= fDweIgnoreAeDebug;
+ }
+
+ // UI flags...
+ if (policy.ShouldQueueReport() && !policy.ShouldOfferDebug())
+ { // Queue report headlessly. Turn on kQueueingUIFlags, which is fDwuNoEventUI.
+ uiFlags |= kQueuingUIFlags;
+ }
+
+ pWatsonSharedMemory->bfmsoctdsOffer = offerFlags; // From above
+ pWatsonSharedMemory->bfDWRFlags = reportingFlags; // From above
+ pWatsonSharedMemory->bfDWUFlags = uiFlags; // From above
+ pWatsonSharedMemory->bfDWEFlags = dwEflags; // From above
+ pWatsonSharedMemory->bfDWMFlags = kMiscFlags; // 0
+
+ // We're going to rely on Watson's default localization behavior.
+ pWatsonSharedMemory->lcidUI = 0;
+
+ // By default, Watson will terminate the process after snapping a
+ // minidump. Notify & LetRun flags disable that.
+ pWatsonSharedMemory->bfmsoctdsNotify = msoctdsNull;
+ pWatsonSharedMemory->bfmsoctdsLetRun = offerFlags;
+
+ {
+ DWORD dwRet = WszGetModuleFileName(NULL,
+ pWatsonSharedMemory->wzModuleFileName,
+ NumItems(pWatsonSharedMemory->wzModuleFileName));
+ _ASSERTE(0 != dwRet);
+ if (0 == dwRet)
+ {
+ LOG((LF_EH, LL_INFO100, "CLR Watson: WszGetModuleFileName returned error, GetLastError(): %#x\n", GetLastError()));
+ return FaultReportResultAbort;
+ }
+ }
+
+ // We're going capture the same minidump information for all modules, so set wzDotDataDlls to "*"
+ if (sizeof(DW_ALLMODULES) <= sizeof(pWatsonSharedMemory->wzDotDataDlls))
+ {
+ memcpy(pWatsonSharedMemory->wzDotDataDlls, DW_ALLMODULES, sizeof(DW_ALLMODULES));
+ }
+ else
+ {
+ // Assert, but go on
+ _ASSERTE(sizeof(DW_ALLMODULES) <= sizeof(pWatsonSharedMemory->wzDotDataDlls));
+ pWatsonSharedMemory->wzDotDataDlls[0] = 0;
+ }
+
+ // UI Customization
+ //
+ // The only UI customization we perform is to set the App Name. Currently we
+ // do this just by using the executable name.
+ //
+ {
+ WCHAR buf[_MAX_PATH]; // Buffer for path for description.
+ WCHAR *pName = buf; // Pointer to filename or description.
+ int size; // Size of description.
+ HMODULE hModule; // Handle to module.
+ DWORD result; // Return code
+
+ // Get module name.
+ hModule = WszGetModuleHandle(NULL);
+ result = WszGetModuleFileName(hModule, buf, NumItems(buf));
+
+ if (result == 0)
+ { // Couldn't get module name. This should never happen.
+ wcscpy_s(buf, COUNTOF(buf), W("<<unknown>>"));
+ }
+ else
+ { // re-use the buf for pathname and description.
+ size = DwGetAppDescription(buf, buf, NumItems(buf));
+
+ // If the returned size was zero, buf wasn't changed, and still contains the path.
+ // find just the filename part.
+ if (size == 0)
+ { // Look for final '\'
+ pName = wcsrchr(buf, W('\\'));
+ // If found, skip it; if not, point to full name.
+ pName = pName ? pName+1 : buf;
+ }
+ }
+
+ wcsncpy_s(pWatsonSharedMemory->uib.wzGeneral_AppName,
+ COUNTOF(pWatsonSharedMemory->uib.wzGeneral_AppName),
+ pName,
+ _TRUNCATE);
+
+ // For breakpoint, need to customize the "We're sorry..." message
+ if (tore.IsBreakpoint())
+ {
+ LCID lcid = 0;
+ // Get the message.
+ StackSString sszMain_Intro_Bold;
+ StackSString sszMain_Intro_Reg;
+ EX_TRY
+ {
+ sszMain_Intro_Bold.LoadResource(CCompRC::Debugging, IDS_WATSON_DEBUG_BREAK_INTRO_BOLD);
+ sszMain_Intro_Reg.LoadResource(CCompRC::Debugging, IDS_WATSON_DEBUG_BREAK_INTRO_REG);
+ // Try to determine the language used for the above resources
+ // At the moment this OS call is a heuristic which should match most of the time. But the
+ // CLR is starting to support languages that don't even have LCIDs, so this may not always
+ // be correct (and there may be NO LCID we can pass to watson). Long term, the correct fix
+ // here is to get out of the game of making watson policy / UI decisions. This is happening
+ // for Windows 7.
+ lcid = GetThreadLocale();
+ }
+ EX_CATCH
+ {
+ // Just don't customize.
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ // If we were able to get a string, set it.
+ if (sszMain_Intro_Reg.GetCount() > 0)
+ {
+ // Instead of "<app.exe> has encountered an error and nees to close...", say
+ // "<app.exe> has encountered a user-defined breakpoint."
+ wcsncpy_s(pWatsonSharedMemory->uib.wzMain_Intro_Bold, COUNTOF(pWatsonSharedMemory->uib.wzMain_Intro_Bold), sszMain_Intro_Bold, _TRUNCATE);
+ // Instead of "If you were in the middle of something...", say
+ // "A breakpoint in an application indicates a program error..."
+ wcsncpy_s(pWatsonSharedMemory->uib.wzMain_Intro_Reg, COUNTOF(pWatsonSharedMemory->uib.wzMain_Intro_Reg), sszMain_Intro_Reg, _TRUNCATE);
+
+ pWatsonSharedMemory->bfDWUFlags = fDwuDenySuspend;
+
+ pWatsonSharedMemory->lcidUI = lcid;
+ }
+ }
+
+ }
+
+ // Get the bucket parameters.
+ switch (tore.GetType())
+ {
+ case TypeOfReportedError::NativeThreadUnhandledException:
+ // Let Watson provide the buckets for a native thread.
+ break;
+ case TypeOfReportedError::UnhandledException:
+ case TypeOfReportedError::FatalError:
+ case TypeOfReportedError::UserBreakpoint:
+ case TypeOfReportedError::NativeBreakpoint:
+ // For managed exception or exceptions that come from managed code, we get the managed bucket parameters,
+ // which will be displayed in the "details" section on any UI.
+ //
+ // Otherwise, use the unmanaged IP to bucket.
+ if (bIsManagedFault)
+ {
+ RetrieveManagedBucketParameters(pExceptionInfo?pExceptionInfo->ExceptionRecord:NULL, &pWatsonSharedMemory->gmb, tore, pThread);
+ }
+ break;
+ default:
+ _ASSERTE(!"Unexpected TypeOfReportedException");
+ break;
+ }
+
+ // dwThisThreadExFlags and dwOtherThreadExFlags are only used on IA64.
+ CustomMinidumpBlock cmb =
+ {
+ TRUE, // fCustomMinidump
+ kMiniDumpType, // dwMinidumpType : MiniDumpNormal
+ FALSE, // fOnlyThisThread
+ kThreadWriteFlags, // dwThisThreadFlags : ThreadWriteThread | ThreadWriteContext | ThreadWriteStack
+ kThreadWriteFlags, // dwOtherThreadFlags
+ 0, // dwThisThreadExFlags
+ 0, // dwOtherThreadExFlags
+ kModuleWriteFlags, // dwPreferredModuleFlags
+ kModuleWriteFlags // dwOtherModuleFlags.
+ };
+
+ pWatsonSharedMemory->cmb = cmb;
+
+ // At this point, the IPC block is all ready to go
+ BOOL result = false;
+ // There are two calls to RunWatson below. We want the second call to execute iff
+ // secondInvocation is true.
+ BOOL secondInvocation = true;
+
+
+ EX_TRY
+ {
+ bool fRunWatson = false;
+#if defined(_TARGET_X86_)
+ bool fGuardPagePresent = false;
+
+ // There is an unfortunate side effect of calling ReadProcessMemory() out-of-process on IA64 WOW.
+ // On all platforms (IA64 native & WOW64, AMD64 native & WOW64, and x86 native), if we call
+ // ReadProcessMemory() out-of-process on a page with PAGE_GUARD protection, the read operation
+ // fails as expected. However, on IA64 WOW64 only, the PAGE_GUARD protection is removed after
+ // the read operation. Even IA64 native preserves the PAGE_GUARD protection.
+ // See VSW 451447 for more information.
+ if ((pThread != NULL) && pThread->DetermineIfGuardPagePresent())
+ {
+ fGuardPagePresent = true;
+ }
+#endif // _TARGET_X86_
+
+ if (secondInvocation)
+ {
+ fRunWatson = true;
+ result = RunWatson(hWatsonSharedMemory,
+ pWatsonSharedMemory->hEventAlive,
+ pWatsonSharedMemory->hEventDone,
+ pWatsonSharedMemory->hMutex);
+ }
+
+#if defined(_TARGET_X86_)
+ if (fRunWatson && fGuardPagePresent)
+ {
+ // This shouldn't cause a problem because guard pages are present in the first place.
+ _ASSERTE(pThread != NULL);
+ pThread->RestoreGuardPage();
+ }
+#endif // _TARGET_X86_
+ }
+ EX_CATCH
+ {
+ // We couldn't wait around for watson to execute for some reason.
+ result = false;
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ // It's now safe to close all the synchronization and process handles.
+
+ if (!result)
+ {
+ // Hmmm ... watson couldn't execute correctly.
+ return FaultReportResultAbort;
+ }
+
+ LOG((LF_EH, LL_INFO100, "CLR Watson: returned 0x%x\n", pWatsonSharedMemory->msoctdsResult));
+
+ // If user clicked "Debug"
+ if (msoctdsDebug == pWatsonSharedMemory->msoctdsResult)
+ {
+ return FaultReportResultDebug;
+ }
+
+ // No debugging, successful completion.
+ return FaultReportResultQuit;
+} // FaultReportResult DoFaultReportWorker()
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+class WatsonThreadData {
+ public:
+
+ WatsonThreadData(EXCEPTION_POINTERS *pExc, TypeOfReportedError t, Thread* pThr, DWORD dwID, FaultReportResult res)
+ : pExceptionInfo(pExc)
+ , tore(t)
+ , pThread(pThr)
+ , dwThreadID(dwID)
+ , result(res)
+ {
+ }
+
+ EXCEPTION_POINTERS *pExceptionInfo; // Information about the exception, NULL if the error is not caused by an exception
+ TypeOfReportedError tore; // Information about the fault
+ Thread* pThread; // Thread object for faulting thread, could be NULL
+ DWORD dwThreadID; // OS Thread ID for faulting thread
+ FaultReportResult result; // Result of invoking Watson
+};
+
+class WatsonSOExceptionAddress {
+ public:
+
+ WatsonSOExceptionAddress()
+ {
+ m_SystemMethod = NULL;
+ m_UserMethod = NULL;
+ }
+
+ SLOT m_SystemMethod; // IP in the first method on the stack which is in a system module
+ SLOT m_UserMethod; // IP in the first method on the stack which is in a non-system module
+};
+
+//------------------------------------------------------------------------------
+// Description
+// This function is the stack walk callback for a thread that hit a soft SO (i.e., a SO caused by a
+// failed stack probe).
+//
+// Parameters
+// pCf -- A pointer to the current CrawlFrame
+// data - A pointer to WatsonSOExceptionAddress instance
+//
+// Returns:
+// SWA_ABORT to stop the stack crawl
+// SWA_CONTINUE to continue crawling the stack
+//
+// Exceptions
+// None.
+//------------------------------------------------------------------------------
+StackWalkAction WatsonSOStackCrawlCallback(CrawlFrame* pCf, void* pParam)
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(pParam != NULL);
+ WatsonSOExceptionAddress *pData = (WatsonSOExceptionAddress *) pParam;
+
+ SLOT ip;
+
+ if (pCf->IsFrameless())
+ {
+ ip = (PBYTE)GetControlPC(pCf->GetRegisterSet());
+ }
+ else
+ {
+ ip = (SLOT) pCf->GetFrame()->GetIP();
+ }
+
+ MethodDesc *pMD = pCf->GetFunction();
+
+ if (pMD != NULL)
+ {
+ if (pMD->GetModule()->IsSystem())
+ {
+ if (pData->m_SystemMethod == NULL)
+ {
+ pData->m_SystemMethod = ip;
+ }
+ return SWA_CONTINUE;
+ }
+ else
+ {
+ _ASSERTE(pData->m_UserMethod == NULL);
+ pData->m_UserMethod = ip;
+ return SWA_ABORT;
+ }
+ }
+ else
+ {
+ return SWA_CONTINUE;
+ }
+
+}// WatsonSOCrawlCallBack
+
+//------------------------------------------------------------------------------
+// Description
+// Wrapper function for DoFaultReport. This function is called for SOs.
+// It sets up the ExceptionInfo appropriately for soft SOs (caused by
+// failed stack probes) before callign DoFaultReport.
+//
+// Parameters
+// pParam -- A pointer to a WatsonThreadData instance
+//
+// Exceptions
+// None.
+//------------------------------------------------------------------------------
+DWORD WINAPI DoFaultReportWorkerCallback(LPVOID pParam)
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(pParam != NULL);
+
+ WatsonThreadData* pData = (WatsonThreadData*) pParam;
+
+ EXCEPTION_POINTERS ExceptionInfo;
+ EXCEPTION_RECORD ExceptionRecord;
+ PEXCEPTION_POINTERS pExceptionInfo = pData->pExceptionInfo;
+
+ if (IsSOExceptionCode(pExceptionInfo->ExceptionRecord->ExceptionCode))
+ {
+ EX_TRY
+ {
+ if (ShouldLogInEventLog())
+ {
+ EventReporter reporter(EventReporter::ERT_StackOverflow);
+ reporter.Report();
+ }
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+
+ SetupThread();
+
+ GCX_COOP();
+
+ if (pData->pThread != NULL && pExceptionInfo != NULL &&
+ pExceptionInfo->ContextRecord == NULL &&
+ pExceptionInfo->ExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW &&
+ pExceptionInfo->ExceptionRecord->ExceptionAddress == 0)
+ {
+ // In the case of a soft SO on a managed thread, we set the ExceptionAddress to one of the following
+ //
+ // 1. The first method on the stack that is in a non-system module.
+ // 2. Failing that, the first method on the stack that is in a system module
+
+ CONTEXT ContextRecord;
+ memset(&ContextRecord, 0, sizeof(CONTEXT));
+
+ ExceptionInfo.ContextRecord = &ContextRecord; // To display the "Send" button, dw20 wants a non-NULL pointer
+ ExceptionRecord = *(pExceptionInfo->ExceptionRecord);
+ ExceptionInfo.ExceptionRecord = &ExceptionRecord;
+ pExceptionInfo = &ExceptionInfo;
+
+ WatsonSOExceptionAddress WatsonExceptionAddresses;
+
+ pData->pThread->StackWalkFrames(
+ WatsonSOStackCrawlCallback,
+ &WatsonExceptionAddresses,
+ FUNCTIONSONLY|ALLOW_ASYNC_STACK_WALK);
+
+ if (WatsonExceptionAddresses.m_UserMethod != NULL)
+ {
+ pExceptionInfo->ExceptionRecord->ExceptionAddress = WatsonExceptionAddresses.m_UserMethod;
+ }
+ else if (WatsonExceptionAddresses.m_SystemMethod != NULL)
+ {
+ pExceptionInfo->ExceptionRecord->ExceptionAddress = WatsonExceptionAddresses.m_SystemMethod;
+ }
+
+ }
+
+ pData->result = DoFaultReportWorker(
+ pExceptionInfo,
+ pData->tore,
+ pData->pThread,
+ pData->dwThreadID);
+
+
+ return 0;
+
+} // void DoFaultReportFavorWorker()
+
+DWORD WINAPI ResetWatsonBucketsCallbackForStackOverflow(LPVOID pParam)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(IsWatsonEnabled());
+ PRECONDITION(RunningOnWin7());
+ PRECONDITION(pParam != NULL);
+ }
+ CONTRACTL_END;
+
+ // ThreadStore lock could be already taken (SO during GC) so we skip creating a managed thread and get a hardcoded exception name.
+ // If we wanted to get the exception name from OBJECTREF we would have to switch to GC_COOP mode and be on a managed thread.
+
+ ResetWatsonBucketsParams * pRWBP = reinterpret_cast<ResetWatsonBucketsParams *>(pParam);
+ Thread * pThread = pRWBP->m_pThread;
+ PTR_EHWatsonBucketTracker pUEWatsonBucketTracker = pThread->GetExceptionState()->GetUEWatsonBucketTracker();
+ _ASSERTE(pUEWatsonBucketTracker != NULL);
+
+ UINT_PTR ip = reinterpret_cast<UINT_PTR>(pRWBP->pExceptionRecord->ExceptionAddress);
+ pUEWatsonBucketTracker->SaveIpForWatsonBucket(ip);
+ pUEWatsonBucketTracker->CaptureUnhandledInfoForWatson(TypeOfReportedError::StackOverflowException, pThread, NULL);
+ if (pUEWatsonBucketTracker->RetrieveWatsonBuckets() == NULL)
+ {
+ pUEWatsonBucketTracker->ClearWatsonBucketDetails();
+ }
+
+ return 0;
+}
+
+//------------------------------------------------------------------------------
+// Description
+// This function is called by the Debugger thread in response to a favor
+// posted to it by the faulting thread. The faulting thread uses the
+// Debugger thread to reset Watson buckets in the case of stack overflows.
+// Since the debugger thread doesn't have a managed Thread object,
+// it cannot be directly used to call ResetWatsonBucketsFavorWorker.
+// Instead, this function spawns a worker thread and waits for it to complete.
+//
+// Parameters
+// pParam -- A pointer to a ResetWatsonBucketsParams instance
+//
+// Exceptions
+// None.
+//------------------------------------------------------------------------------
+void ResetWatsonBucketsFavorWorker(void * pParam)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(IsWatsonEnabled());
+ PRECONDITION(RunningOnWin7());
+ PRECONDITION(pParam != NULL);
+ }
+ CONTRACTL_END;
+
+ HANDLE hThread = NULL;
+ DWORD dwThreadId;
+
+ hThread = ::CreateThread(NULL, 0, ResetWatsonBucketsCallbackForStackOverflow, pParam, 0, &dwThreadId);
+ if (hThread != NULL)
+ {
+ WaitForSingleObject(hThread, INFINITE);
+ CloseHandle(hThread);
+ }
+
+ return;
+}
+
+
+//------------------------------------------------------------------------------
+// Description
+// This function is called by the Debugger thread in response to a favor
+// posted to it by the faulting thread. The faulting thread uses the
+// Debugger thread to invoke Watson in the case of stack overflows.
+// Since the debugger thread doesn't have a managed Thread object,
+// it cannot be directly used to call DoFaultReport. Instead, this function
+// spawns a worker thread and waits for it to complete.
+//
+// Parameters
+// pParam -- A pointer to a WatsonThreadData instance
+//
+// Exceptions
+// None.
+//------------------------------------------------------------------------------
+void DoFaultReportFavorWorker(void* pParam)
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(pParam != NULL);
+
+ HANDLE hThread = NULL;
+ DWORD dwThreadId;
+
+ hThread = ::CreateThread(NULL, 0, DoFaultReportWorkerCallback, pParam, 0, &dwThreadId);
+ if (hThread != NULL)
+ {
+ WaitForSingleObject(hThread, INFINITE);
+ CloseHandle(hThread);
+ }
+
+ return;
+
+} // void DoFaultReportFavorWorker()
+
+//----------------------------------------------------------------------------
+// CreateThread() callback to invoke native Watson or put up our fake Watson
+// dialog depending on m_fDoReportFault value.
+//
+// The output is a FaultReport* value communicated by setting
+// pFaultReportInfo->m_result. The DWORD function return value
+// is unused.
+//----------------------------------------------------------------------------
+static DWORD WINAPI DoFaultReportCreateThreadCallback(LPVOID pFaultReportInfoAsVoid)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+
+ // We are allowed to ignore OOM's here as FaultReport() is merely a notification of
+ // an unhandled exception. If we can't do the report, that's just too bad.
+ FAULT_NOT_FATAL();
+
+ LOG((LF_EH, LL_INFO100, "DoFaultReport: at sp %p ...\n", GetCurrentSP()));
+
+ FaultReportInfo *pFaultReportInfo = (FaultReportInfo*)pFaultReportInfoAsVoid;
+ EXCEPTION_POINTERS *pExceptionInfo = pFaultReportInfo->m_pExceptionInfo;
+
+ if (pFaultReportInfo->m_fDoReportFault)
+ {
+ pFaultReportInfo->m_faultRepRetValResult = DoReportFault(pExceptionInfo);
+ }
+ else
+ {
+ int res = EEMessageBoxCatastrophicWithCustomizedStyle(
+ IDS_DEBUG_UNHANDLEDEXCEPTION,
+ IDS_DEBUG_SERVICE_CAPTION,
+ MB_OKCANCEL | MB_ICONEXCLAMATION,
+ TRUE,
+ GetCurrentProcessId(),
+ GetCurrentProcessId(),
+ pFaultReportInfo->m_threadid,
+ pFaultReportInfo->m_threadid
+ );
+ if (res == IDOK)
+ {
+ pFaultReportInfo->m_faultReportResult = FaultReportResultQuit;
+ }
+ else
+ {
+ pFaultReportInfo->m_faultReportResult = FaultReportResultDebug;
+ }
+ }
+
+ return 0;
+}
+
+
+//----------------------------------------------------------------------------
+// Favor callback for the debugger thread.
+//----------------------------------------------------------------------------
+VOID WINAPI DoFaultReportDoFavorCallback(LPVOID pFaultReportInfoAsVoid)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+
+ // Since the debugger thread doesn't allow ordinary New's which our stuff
+ // indirectly calls, it cannot be directly used to call DoFaultReport. Instead, this function
+ // spawns a worker thread and waits for it to complete.
+
+ HANDLE hThread = NULL;
+ DWORD dwThreadId;
+
+ hThread = ::CreateThread(NULL, 0, DoFaultReportCreateThreadCallback, pFaultReportInfoAsVoid, 0, &dwThreadId);
+ if (hThread != NULL)
+ {
+ WaitForSingleObject(hThread, INFINITE);
+ CloseHandle(hThread);
+ }
+}
+
+
+
+//------------------------------------------------------------------------------
+// Description
+//
+// Parameters
+// pExceptionInfo -- information about the exception that caused the error.
+// If the error is not the result of an exception, pass NULL for this
+// parameter
+// tore -- Information about the fault
+// Returns
+// FaultReportResult -- enumeration indicating the
+// FaultReportResultAbort -- if Watson could not execute normally
+// FaultReportResultDebug -- if Watson executed normally, and the user
+// chose to debug the process
+// FaultReportResultQuit -- if Watson executed normally, and the user
+// chose to end the process (e.g. pressed "Send Error Report" or
+// "Don't Send").
+//
+// Exceptions
+// None.
+//------------------------------------------------------------------------------
+FaultReportResult DoFaultReport( // Was Watson attempted, successful? Run debugger?
+ EXCEPTION_POINTERS *pExceptionInfo, // Information about the fault.
+ TypeOfReportedError tore) // What sort of error is this?
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(!RunningOnWin7());
+
+ LOG((LF_EH, LL_INFO100, "DoFaultReport: at sp %p ...\n", GetCurrentSP()));
+
+ Thread *pThread = GetThread();
+
+#ifdef FEATURE_CORECLR
+ // If watson isn't available (eg. in Silverlight), then use a simple dialog box instead
+ if (!IsWatsonEnabled())
+ {
+ if (!pThread)
+ {
+ return FaultReportResultAbort;
+ }
+
+ // Since the StackOverflow handler also calls us, we must keep our stack budget
+ // to a minimum. Thus, we will launch a thread to do the actual work.
+ FaultReportInfo fri;
+ fri.m_fDoReportFault = FALSE;
+ fri.m_pExceptionInfo = pExceptionInfo;
+ fri.m_threadid = GetCurrentThreadId();
+ // DoFaultCreateThreadReportCallback will overwrite this - if it doesn't, we'll assume it failed.
+ fri.m_faultReportResult = FaultReportResultAbort;
+
+ GCX_PREEMP();
+
+
+ if (pExceptionInfo->ExceptionRecord->ExceptionCode != STATUS_STACK_OVERFLOW)
+ {
+ DoFaultReportCreateThreadCallback(&fri);
+ }
+ else
+ {
+ // Stack overflow case - we don't have enough stack on our own thread so let the debugger
+ // helper thread do the work.
+ if (!g_pDebugInterface || FAILED(g_pDebugInterface->RequestFavor(DoFaultReportDoFavorCallback, &fri)))
+ {
+ // If we can't initialize the debugger helper thread or we are running on the debugger helper
+ // thread, give it up. We don't have enough stack space.
+
+ }
+ }
+
+ return fri.m_faultReportResult;
+ }
+#endif // FEATURE_CORECLR
+
+#ifdef FEATURE_UEF_CHAINMANAGER
+ if (g_pUEFManager && !tore.IsUserBreakpoint())
+ {
+ IWatsonSxSManager * pWatsonSxSManager = g_pUEFManager->GetWastonSxSManagerInstance();
+
+ // Has Watson report been triggered?
+ if (pWatsonSxSManager->HasWatsonBeenTriggered())
+ {
+ LOG((LF_EH, LL_INFO100, "DoFaultReport: Watson has been triggered."));
+ LeaveRuntimeHolderNoThrow holder(reinterpret_cast< size_t >(WaitForSingleObject));
+ pWatsonSxSManager->WaitForWatsonSxSCompletionEvent();
+ return FaultReportResultQuit;
+ }
+ // The unhandled exception is thrown by the current runtime.
+ else if (IsExceptionFromManagedCode(pExceptionInfo->ExceptionRecord))
+ {
+ // Is the current runtime allowed to report Watson?
+ if (!pWatsonSxSManager->IsCurrentRuntimeAllowedToReportWatson())
+ {
+ LOG((LF_EH, LL_INFO100, "DoFaultReport: Watson is reported by another runtime."));
+ LeaveRuntimeHolderNoThrow holder(reinterpret_cast< size_t >(WaitForSingleObject));
+ pWatsonSxSManager->WaitForWatsonSxSCompletionEvent();
+ return FaultReportResultQuit;
+ }
+ }
+ // The unhandled exception is thrown by another runtime in the process.
+ else if (pWatsonSxSManager->IsExceptionClaimed(pExceptionInfo->ExceptionRecord))
+ {
+ LOG((LF_EH, LL_INFO100, "DoFaultReport: Watson will be reported by another runtime.\n"));
+ return FaultReportResultQuit;
+ }
+ // The unhandled exception is thrown by native code.
+ else
+ {
+ // Is the current runtime allowed to report Watson?
+ if (!pWatsonSxSManager->IsCurrentRuntimeAllowedToReportWatson())
+ {
+ LOG((LF_EH, LL_INFO100, "DoFaultReport: Watson is reported by another runtime."));
+ LeaveRuntimeHolderNoThrow holder(reinterpret_cast< size_t >(WaitForSingleObject));
+ pWatsonSxSManager->WaitForWatsonSxSCompletionEvent();
+ return FaultReportResultQuit;
+ }
+ }
+ }
+#endif // FEATURE_UEF_CHAINMANAGER
+
+ // Check if the current thread has the permission to open a process handle of the current process.
+ // If not, the current thread may have been impersonated, we have to launch Watson from a new thread as in SO case.
+ BOOL fOpenProcessFailed = FALSE;
+ if (pExceptionInfo->ExceptionRecord->ExceptionCode != STATUS_STACK_OVERFLOW)
+ {
+ HandleHolder hProcess = OpenProcess(PROCESS_ALL_ACCESS, TRUE, GetCurrentProcessId());
+ fOpenProcessFailed = hProcess == NULL;
+ }
+
+ if ((pExceptionInfo->ExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW) || fOpenProcessFailed)
+ {
+
+ WatsonThreadData* pData = new(nothrow) WatsonThreadData(
+ pExceptionInfo,
+ tore,
+ pThread,
+ GetCurrentThreadId(),
+ FaultReportResultAbort); // default result
+
+ if (pData == NULL)
+ {
+ return FaultReportResultAbort;
+ }
+
+ GCX_PREEMP();
+
+ if (!g_pDebugInterface ||
+ FAILED(g_pDebugInterface->RequestFavor(DoFaultReportFavorWorker, pData)))
+ {
+ // If we can't initialize the debugger helper thread or we are running on the debugger helper
+ // thread, return without invoking Watson. We don't have enough stack space.
+
+ delete pData;
+ return FaultReportResultAbort;
+ }
+
+ FaultReportResult ret = pData->result;
+ delete pData;
+ return ret;
+ }
+
+ return DoFaultReportWorker(pExceptionInfo, tore, GetThread(), GetCurrentThreadId());
+} // FaultReportResult DoFaultReport()
+
+// look at the type of the contract failure. if it's a precondition then we want to blame the caller
+// of the method that originated the ContractException not just the first non-contract runtime frame.
+// if this isn't a ContractException then we default to Invariant which won't skip the extra frame.
+ContractFailureKind GetContractFailureKind(OBJECTREF obj)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(obj != NULL);
+ }
+ CONTRACTL_END;
+
+ PTR_MethodTable pMT = obj->GetMethodTable();
+
+ if (MscorlibBinder::IsException(pMT, kContractException))
+ return CONTRACTEXCEPTIONREF(obj)->GetContractFailureKind();
+
+ // there are cases where the code contracts rewriter will use a ContractException
+ // type that's compiled into the user's assembly. if we get here then this is
+ // one of those cases. we will make a best guess if this is a ContractException
+ // so that we can return the value in the _Kind field.
+
+ // NOTE: this really isn't meant to be a general-purpose solution for identifying ContractException types.
+ // we're making a few assumptions here since we're being called in context of WER bucket parameter generation.
+
+ // just return anything that isn't precondition so that an extra frame won't be skipped.
+ ContractFailureKind result = CONTRACT_FAILURE_INVARIANT;
+
+ // first compare the exception name.
+ PTR_MethodTable pContractExceptionMT = MscorlibBinder::GetClassIfExist(CLASS__CONTRACTEXCEPTION);
+ _ASSERTE(pContractExceptionMT);
+
+ if (pContractExceptionMT)
+ {
+ LPCUTF8 contractExceptionNamespace = NULL;
+ LPCUTF8 contractExceptionName = pContractExceptionMT->GetFullyQualifiedNameInfo(&contractExceptionNamespace);
+ _ASSERTE(contractExceptionName);
+
+ LPCUTF8 incomingExceptionNamespace = NULL;
+ LPCUTF8 incomingExceptionName = pMT->GetFullyQualifiedNameInfo(&incomingExceptionNamespace);
+ _ASSERTE(incomingExceptionName);
+
+ // NOTE: we can't compare the namespaces since sometimes it comes back as an empty string
+ if (contractExceptionName && incomingExceptionName && strcmp(incomingExceptionName, contractExceptionName) == 0)
+ {
+ WORD requiredNumFields = pContractExceptionMT->GetNumInstanceFields();
+ WORD numFields = pMT->GetNumInstanceFields();
+
+ // now see if this exception object has the required number of fields
+ if (numFields == requiredNumFields)
+ {
+ // getting closer, now look for all three fields on ContractException
+ const int requiredFieldMatches = 3;
+
+ PTR_EEClass pEEClass = pMT->GetClass_NoLogging();
+
+ PTR_FieldDesc pFD = pEEClass->GetFieldDescList();
+ PTR_FieldDesc pFDEnd = pFD + numFields;
+ PTR_FieldDesc pKindFD = NULL;
+
+ int numMatchedFields = 0;
+ while ((pFD < pFDEnd) && (numMatchedFields != requiredFieldMatches))
+ {
+ CorElementType fieldType = pFD->GetFieldType();
+ if (fieldType == ELEMENT_TYPE_I4)
+ {
+ // found the _Kind field
+ LPCUTF8 name = NULL;
+ HRESULT hr = pFD->GetName_NoThrow(&name);
+ if (SUCCEEDED(hr) && name && (strcmp(name, "_Kind") == 0))
+ {
+ // found the _Kind field, remember this FieldDesc in case we have a match
+ pKindFD = pFD;
+ ++numMatchedFields;
+ }
+ }
+ else if (fieldType == ELEMENT_TYPE_CLASS)
+ {
+ LPCUTF8 name = NULL;
+ HRESULT hr = pFD->GetName_NoThrow(&name);
+ if (SUCCEEDED(hr) && name && ((strcmp(name, "_UserMessage") == 0) || (strcmp(name, "_Condition") == 0)))
+ {
+ // found another matching field
+ ++numMatchedFields;
+ }
+ }
+
+ ++pFD;
+ }
+
+ if (numMatchedFields == requiredFieldMatches)
+ {
+ _ASSERTE(pKindFD != NULL);
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+ pKindFD->GetInstanceField(obj, reinterpret_cast<void*>(&result));
+ }
+ }
+ }
+ }
+
+ return result;
+}
diff --git a/src/vm/dwreport.h b/src/vm/dwreport.h
new file mode 100644
index 0000000000..37f65ac084
--- /dev/null
+++ b/src/vm/dwreport.h
@@ -0,0 +1,106 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// FILE: dwreport.h
+//
+// This file contains declarations for functions used to report errors occuring
+// in a process running managed code.
+//
+
+//
+
+//
+// ============================================================================
+
+#ifndef __DWREPORT_H__
+#define __DWREPORT_H__
+
+// return values for DoFaultReport
+enum FaultReportResult
+{
+ FaultReportResultAbort,
+ FaultReportResultDebug,
+ FaultReportResultQuit
+};
+
+void* GetBucketParametersForManagedException(UINT_PTR ip, TypeOfReportedError tore, Thread * pThread, OBJECTREF * exception);
+void FreeBucketParametersForManagedException(void *pgmb);
+
+HRESULT GetBucketParametersForCurrentException(BucketParameters *pParams);
+
+//------------------------------------------------------------------------------
+// DoFaultReport
+//
+// Description
+//
+// Parameters
+// pExceptionInfo -- information about the exception that caused the error.
+// If the error is not the result of an exception, pass NULL for this
+// parameter
+// Returns
+// FaultReportResult -- enumeration indicating the
+// FaultReportResultAbort -- if Watson could not execute normally
+// FaultReportResultDebug -- if Watson executed normally, and the user
+// chose to debug the process
+// FaultReportResultQuit -- if Watson executed normally, and the user
+// chose to end the process (e.g. pressed "Send Error Report" or
+// "Don't Send").
+//
+//------------------------------------------------------------------------------
+FaultReportResult DoFaultReport( // Was Watson attempted, successful? Run debugger?
+ EXCEPTION_POINTERS *pExceptionInfo, // Information about the fault.
+ TypeOfReportedError tore); // What sort of error is reported.
+
+BOOL InitializeWatson(COINITIEE fFlags);
+BOOL InitializeWatsonVersionInfo(LPCSTR pVer);
+BOOL IsWatsonEnabled();
+BOOL RegisterOutOfProcessWatsonCallbacks();
+
+int DwGetAssemblyVersion( // Number of characters written.
+ __in_z LPWSTR wszFilePath, // Path to the executable.
+ __inout_ecount(cchBuf) WCHAR *pBuf, // Put description here.
+ int cchBuf);
+
+HRESULT DwGetFileVersionInfo( // S_OK or error
+ __in_z LPCWSTR wszFilePath, // Path to the executable.
+ USHORT& major,
+ USHORT& minor,
+ USHORT& build,
+ USHORT& revision);
+
+BOOL ContainsUnicodeChars(__in_z LPCWSTR wsz);
+
+// Proxy parameters for Resetting Watson buckets
+struct ResetWatsonBucketsParams
+{
+ Thread * m_pThread;
+ EXCEPTION_RECORD * pExceptionRecord;
+};
+void ResetWatsonBucketsFavorWorker(void * pParam);
+
+extern LONG g_watsonAlreadyLaunched;
+
+#if !defined(FEATURE_UEF_CHAINMANAGER)
+extern HandleHolder g_hWatsonCompletionEvent;
+#endif // FEATURE_UEF_CHAINMANAGER
+
+//----------------------------------------------------------------------------
+// Passes data between DoFaultReport and DoFaultReportCallback
+//----------------------------------------------------------------------------
+typedef enum tagEFaultRepRetVal EFaultRepRetVal;
+struct FaultReportInfo
+{
+ BOOL /*in*/ m_fDoReportFault;
+ EXCEPTION_POINTERS /*in*/ *m_pExceptionInfo;
+ DWORD /*in*/ m_threadid;
+ FaultReportResult /*out*/ m_faultReportResult;
+ EFaultRepRetVal /*out*/ m_faultRepRetValResult;
+};
+
+VOID WINAPI DoFaultReportDoFavorCallback(LPVOID pFaultReportInfoAsVoid);
+
+ContractFailureKind GetContractFailureKind(OBJECTREF obj);
+
+#endif // __DWREPORT_H__
diff --git a/src/vm/dynamicmethod.cpp b/src/vm/dynamicmethod.cpp
new file mode 100644
index 0000000000..4ee41faecd
--- /dev/null
+++ b/src/vm/dynamicmethod.cpp
@@ -0,0 +1,1590 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#include "common.h"
+#include "dynamicmethod.h"
+#include "object.h"
+#include "method.hpp"
+#include "comdelegate.h"
+#include "security.h"
+#include "field.h"
+#include "contractimpl.h"
+#include "nibblemapmacros.h"
+#include "stringliteralmap.h"
+#include "virtualcallstub.h"
+
+
+#ifndef DACCESS_COMPILE
+
+// get the method table for dynamic methods
+DynamicMethodTable* DomainFile::GetDynamicMethodTable()
+{
+ CONTRACT (DynamicMethodTable*)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(m_pDynamicMethodTable));
+ }
+ CONTRACT_END;
+
+ if (!m_pDynamicMethodTable)
+ DynamicMethodTable::CreateDynamicMethodTable(&m_pDynamicMethodTable, GetModule(), GetAppDomain());
+
+
+ RETURN m_pDynamicMethodTable;
+}
+
+void ReleaseDynamicMethodTable(DynamicMethodTable *pDynMT)
+{
+ WRAPPER_NO_CONTRACT;
+ if (pDynMT)
+ {
+ pDynMT->Destroy();
+ }
+}
+
+void DynamicMethodTable::CreateDynamicMethodTable(DynamicMethodTable **ppLocation, Module *pModule, AppDomain *pDomain)
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(ppLocation));
+ PRECONDITION(CheckPointer(pModule));
+ POSTCONDITION(CheckPointer(*ppLocation));
+ }
+ CONTRACT_END;
+
+ AllocMemTracker amt;
+
+ LoaderHeap* pHeap = pDomain->GetHighFrequencyHeap();
+ _ASSERTE(pHeap);
+
+ if (*ppLocation) RETURN;
+
+ DynamicMethodTable* pDynMT = (DynamicMethodTable*)
+ amt.Track(pHeap->AllocMem(S_SIZE_T(sizeof(DynamicMethodTable))));
+
+ // Note: Memory allocated on loader heap is zero filled
+ // memset((void*)pDynMT, 0, sizeof(DynamicMethodTable));
+
+ if (*ppLocation) RETURN;
+
+ LOG((LF_BCL, LL_INFO100, "Level2 - Creating DynamicMethodTable {0x%p}...\n", pDynMT));
+
+ Holder<DynamicMethodTable*, DoNothing, ReleaseDynamicMethodTable> dynMTHolder(pDynMT);
+ pDynMT->m_Crst.Init(CrstDynamicMT);
+ pDynMT->m_Module = pModule;
+ pDynMT->m_pDomain = pDomain;
+ pDynMT->MakeMethodTable(&amt);
+
+ if (*ppLocation) RETURN;
+
+ if (FastInterlockCompareExchangePointer(ppLocation, pDynMT, NULL) != NULL)
+ {
+ LOG((LF_BCL, LL_INFO100, "Level2 - Another thread got here first - deleting DynamicMethodTable {0x%p}...\n", pDynMT));
+ RETURN;
+ }
+
+ dynMTHolder.SuppressRelease();
+
+ amt.SuppressRelease();
+ LOG((LF_BCL, LL_INFO10, "Level1 - DynamicMethodTable created {0x%p}...\n", pDynMT));
+ RETURN;
+}
+
+void DynamicMethodTable::MakeMethodTable(AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ m_pMethodTable = CreateMinimalMethodTable(m_Module, m_pDomain->GetHighFrequencyHeap(), pamTracker);
+}
+
+void DynamicMethodTable::Destroy()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Go over all DynamicMethodDescs and make sure that they are destroyed
+
+ if (m_pMethodTable != NULL)
+ {
+ MethodTable::IntroducedMethodIterator it(m_pMethodTable);
+ for (; it.IsValid(); it.Next())
+ {
+ DynamicMethodDesc *pMD = (DynamicMethodDesc*)it.GetMethodDesc();
+ pMD->Destroy(TRUE /* fDomainUnload */);
+ }
+ }
+
+ m_Crst.Destroy();
+ LOG((LF_BCL, LL_INFO10, "Level1 - DynamicMethodTable destroyed {0x%p}\n", this));
+}
+
+void DynamicMethodTable::AddMethodsToList()
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACT_END;
+
+ AllocMemTracker amt;
+
+ LoaderHeap* pHeap = m_pDomain->GetHighFrequencyHeap();
+ _ASSERTE(pHeap);
+
+ //
+ // allocate as many chunks as needed to hold the methods
+ //
+ MethodDescChunk* pChunk = MethodDescChunk::CreateChunk(pHeap, 0 /* one chunk of maximum size */,
+ mcDynamic, TRUE /* fNonVtableSlot */, TRUE /* fNativeCodeSlot */, FALSE /* fComPlusCallInfo */, m_pMethodTable, &amt);
+ if (m_DynamicMethodList) RETURN;
+
+ int methodCount = pChunk->GetCount();
+
+ BYTE* pResolvers = (BYTE*)amt.Track(pHeap->AllocMem(S_SIZE_T(sizeof(LCGMethodResolver)) * S_SIZE_T(methodCount)));
+ if (m_DynamicMethodList) RETURN;
+
+ DynamicMethodDesc *pNewMD = (DynamicMethodDesc *)pChunk->GetFirstMethodDesc();
+ DynamicMethodDesc *pPrevMD = NULL;
+ // now go through all the methods in the chunk and link them
+ for(int i = 0; i < methodCount; i++)
+ {
+ _ASSERTE(pNewMD->GetClassification() == mcDynamic);
+
+ pNewMD->SetMemberDef(0);
+ pNewMD->SetSlot(MethodTable::NO_SLOT); // we can't ever use the slot for dynamic methods
+ pNewMD->SetStatic();
+
+ pNewMD->m_dwExtendedFlags = mdPublic | mdStatic | DynamicMethodDesc::nomdLCGMethod;
+
+ LCGMethodResolver* pResolver = new (pResolvers) LCGMethodResolver();
+ pResolver->m_pDynamicMethod = pNewMD;
+ pResolver->m_DynamicMethodTable = this;
+ pNewMD->m_pResolver = pResolver;
+
+ pNewMD->SetTemporaryEntryPoint(m_pDomain->GetLoaderAllocator(), &amt);
+
+#ifdef _DEBUG
+ pNewMD->m_pDebugMethodTable.SetValue(m_pMethodTable);
+#endif
+
+ if (pPrevMD)
+ {
+ pPrevMD->GetLCGMethodResolver()->m_next = pNewMD;
+ }
+ pPrevMD = pNewMD;
+ pNewMD = (DynamicMethodDesc *)(dac_cast<TADDR>(pNewMD) + pNewMD->SizeOf());
+
+ pResolvers += sizeof(LCGMethodResolver);
+ }
+
+ if (m_DynamicMethodList) RETURN;
+
+ {
+ // publish method list and method table
+ LockHolder lh(this);
+ if (m_DynamicMethodList) RETURN;
+
+ // publish the new method descs on the method table
+ m_pMethodTable->GetClass()->AddChunk(pChunk);
+ m_DynamicMethodList = (DynamicMethodDesc*)pChunk->GetFirstMethodDesc();
+ }
+
+ amt.SuppressRelease();
+}
+
+DynamicMethodDesc* DynamicMethodTable::GetDynamicMethod(BYTE *psig, DWORD sigSize, PTR_CUTF8 name)
+{
+ CONTRACT (DynamicMethodDesc*)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(psig));
+ PRECONDITION(sigSize > 0);
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ LOG((LF_BCL, LL_INFO10000, "Level4 - Getting DynamicMethod\n"));
+
+ DynamicMethodDesc *pNewMD = NULL;
+
+ for (;;)
+ {
+ {
+ LockHolder lh(this);
+ pNewMD = m_DynamicMethodList;
+ if (pNewMD)
+ {
+ m_DynamicMethodList = pNewMD->GetLCGMethodResolver()->m_next;
+#ifdef _DEBUG
+ m_Used++;
+#endif
+ break;
+ }
+ }
+
+ LOG((LF_BCL, LL_INFO1000, "Level4 - DynamicMethod unavailable\n"));
+
+ // need to create more methoddescs
+ AddMethodsToList();
+ }
+ _ASSERTE(pNewMD != NULL);
+
+ // Reset the method desc into pristine state
+
+ // Note: Reset has THROWS contract since it may allocate jump stub. It will never throw here
+ // since it will always reuse the existing jump stub.
+ pNewMD->Reset();
+
+ LOG((LF_BCL, LL_INFO1000, "Level3 - DynamicMethod obtained {0x%p} (used %d)\n", pNewMD, m_Used));
+
+ // the store sig part of the method desc
+ pNewMD->SetStoredMethodSig((PCCOR_SIGNATURE)psig, sigSize);
+ // the dynamic part of the method desc
+ pNewMD->m_pszMethodName = name;
+
+ pNewMD->m_dwExtendedFlags = mdPublic | mdStatic | DynamicMethodDesc::nomdLCGMethod;
+
+#ifdef _DEBUG
+ pNewMD->m_pszDebugMethodName = name;
+ pNewMD->m_pszDebugClassName = (LPUTF8)"dynamicclass";
+ pNewMD->m_pszDebugMethodSignature = "DynamicMethod Signature not available";
+#endif // _DEBUG
+ pNewMD->SetNotInline(TRUE);
+ pNewMD->GetLCGMethodResolver()->Reset();
+
+ RETURN pNewMD;
+}
+
+void DynamicMethodTable::LinkMethod(DynamicMethodDesc *pMethod)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMethod));
+ }
+ CONTRACT_END;
+
+ LOG((LF_BCL, LL_INFO10000, "Level4 - Returning DynamicMethod to free list {0x%p} (used %d)\n", pMethod, m_Used));
+ {
+ LockHolder lh(this);
+ pMethod->GetLCGMethodResolver()->m_next = m_DynamicMethodList;
+ m_DynamicMethodList = pMethod;
+#ifdef _DEBUG
+ m_Used--;
+#endif
+ }
+
+ RETURN;
+}
+
+
+//
+// CodeHeap implementation
+//
+HeapList* HostCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, EEJitManager *pJitManager)
+{
+ CONTRACT (HeapList*)
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ size_t MaxCodeHeapSize = pInfo->getRequestSize();
+ size_t ReserveBlockSize = MaxCodeHeapSize + sizeof(HeapList);
+
+ ReserveBlockSize += sizeof(TrackAllocation) + PAGE_SIZE; // make sure we have enough for the allocation
+ // take a conservative size for the nibble map, we may change that later if appropriate
+ size_t nibbleMapSize = ROUND_UP_TO_PAGE(HEAP2MAPSIZE(ROUND_UP_TO_PAGE(ALIGN_UP(ReserveBlockSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY))));
+ size_t heapListSize = (sizeof(HeapList) + CODE_SIZE_ALIGN - 1) & (~(CODE_SIZE_ALIGN - 1));
+ size_t otherData = heapListSize;
+ // make conservative estimate of the memory needed for otherData
+ size_t reservedData = (otherData + HOST_CODEHEAP_SIZE_ALIGN - 1) & (~(HOST_CODEHEAP_SIZE_ALIGN - 1));
+
+ NewHolder<HostCodeHeap> pCodeHeap(new HostCodeHeap(ReserveBlockSize + nibbleMapSize + reservedData, pJitManager, pInfo));
+ LOG((LF_BCL, LL_INFO10, "Level2 - CodeHeap creation {0x%p} - requested 0x%p, size available 0x%p, private data 0x%p, nibble map 0x%p\n",
+ (HostCodeHeap*)pCodeHeap, ReserveBlockSize, pCodeHeap->m_TotalBytesAvailable, reservedData, nibbleMapSize));
+
+ BYTE *pBuffer = pCodeHeap->InitCodeHeapPrivateData(ReserveBlockSize, reservedData, nibbleMapSize);
+ _ASSERTE(((size_t)pBuffer & PAGE_MASK) == 0);
+ LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap creation {0x%p} - base addr 0x%p, size available 0x%p, nibble map ptr 0x%p\n",
+ (HostCodeHeap*)pCodeHeap, pCodeHeap->m_pBaseAddr, pCodeHeap->m_TotalBytesAvailable, pBuffer));
+
+ void* pHdrMap = pBuffer;
+
+ HeapList *pHp = (HeapList*)pCodeHeap->AllocMemory(otherData, 0);
+ pHp->pHeap = (PTR_CodeHeap)pCodeHeap;
+ // wire it back
+ pCodeHeap->m_pHeapList = (PTR_HeapList)pHp;
+ // assign beginning of nibble map
+ pHp->pHdrMap = (PTR_DWORD)(DWORD*)pHdrMap;
+
+ TrackAllocation *pTracker = *((TrackAllocation**)pHp - 1);
+ LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap creation {0x%p} - size available 0x%p, private data ptr [0x%p, 0x%p]\n",
+ (HostCodeHeap*)pCodeHeap, pCodeHeap->m_TotalBytesAvailable, pTracker, pTracker->size));
+
+ // need to update the reserved data
+ pCodeHeap->m_ReservedData += pTracker->size;
+
+ pHp->startAddress = dac_cast<TADDR>(pCodeHeap->m_pBaseAddr) + pTracker->size;
+ pHp->mapBase = ROUND_DOWN_TO_PAGE(pHp->startAddress); // round down to next lower page align
+ pHp->endAddress = pHp->startAddress;
+
+ pHp->maxCodeHeapSize = pCodeHeap->m_TotalBytesAvailable - pTracker->size;
+ _ASSERTE(pHp->maxCodeHeapSize >= MaxCodeHeapSize);
+
+ // We do not need to memset this memory, since ClrVirtualAlloc() guarantees that the memory is zero.
+ // Furthermore, if we avoid writing to it, these pages don't come into our working set
+
+ pHp->bFull = FALSE;
+ pHp->cBlocks = 0;
+#ifdef _WIN64
+ emitJump(pHp->CLRPersonalityRoutine, (void *)ProcessCLRException);
+#endif
+
+ // zero the ref count as now starts the real counter
+ pCodeHeap->m_AllocationCount = 0;
+
+ pCodeHeap.SuppressRelease();
+
+ LOG((LF_BCL, LL_INFO10, "Level1 - CodeHeap created {0x%p}\n", (HostCodeHeap*)pCodeHeap));
+ RETURN pHp;
+}
+
+HostCodeHeap::HostCodeHeap(size_t ReserveBlockSize, EEJitManager *pJitManager, CodeHeapRequestInfo *pInfo)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ // reserve ReserveBlockSize rounded-up to VIRTUAL_ALLOC_RESERVE_GRANULARITY of memory
+ ReserveBlockSize = ALIGN_UP(ReserveBlockSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY);
+
+ if (pInfo->m_loAddr != NULL || pInfo->m_hiAddr != NULL)
+ {
+ m_pBaseAddr = ClrVirtualAllocWithinRange(pInfo->m_loAddr, pInfo->m_hiAddr,
+ ReserveBlockSize, MEM_RESERVE, PAGE_NOACCESS);
+ if (!m_pBaseAddr)
+ ThrowOutOfMemoryWithinRange();
+ }
+ else
+ {
+ m_pBaseAddr = ClrVirtualAllocExecutable(ReserveBlockSize, MEM_RESERVE, PAGE_NOACCESS);
+ if (!m_pBaseAddr)
+ ThrowOutOfMemory();
+ }
+
+ m_pLastAvailableCommittedAddr = m_pBaseAddr;
+ m_TotalBytesAvailable = ReserveBlockSize;
+ m_AllocationCount = 0;
+ m_ReservedData = 0;
+ m_pJitManager = (PTR_EEJitManager)pJitManager;
+ m_pFreeList = NULL;
+ m_pAllocator = pInfo->m_pAllocator;
+ m_pNextHeapToRelease = NULL;
+ LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap creation {0x%p, vt(0x%x)} - base addr 0x%p, total size 0x%p\n",
+ this, *(size_t*)this, m_pBaseAddr, m_TotalBytesAvailable));
+}
+
+HostCodeHeap::~HostCodeHeap()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_pBaseAddr)
+ ClrVirtualFree(m_pBaseAddr, 0, MEM_RELEASE);
+ LOG((LF_BCL, LL_INFO10, "Level1 - CodeHeap destroyed {0x%p}\n", this));
+}
+
+BYTE* HostCodeHeap::InitCodeHeapPrivateData(size_t ReserveBlockSize, size_t otherData, size_t nibbleMapSize)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ size_t nibbleNewSize = ROUND_UP_TO_PAGE(HEAP2MAPSIZE(ROUND_UP_TO_PAGE(m_TotalBytesAvailable)));
+ if (m_TotalBytesAvailable - nibbleNewSize < ReserveBlockSize + otherData)
+ {
+ // the new allocation for the nibble map would notleave enough room for the requested memory, bail out
+ nibbleNewSize = nibbleMapSize;
+ }
+
+ BYTE *pAddress = (BYTE*)ROUND_DOWN_TO_PAGE(dac_cast<TADDR>(m_pLastAvailableCommittedAddr) +
+ m_TotalBytesAvailable - nibbleNewSize);
+ _ASSERTE(m_pLastAvailableCommittedAddr + m_TotalBytesAvailable >= pAddress + nibbleNewSize);
+ if (NULL == ClrVirtualAlloc(pAddress, nibbleNewSize, MEM_COMMIT, PAGE_EXECUTE_READWRITE))
+ ThrowOutOfMemory();
+ m_TotalBytesAvailable = pAddress - m_pLastAvailableCommittedAddr;
+ _ASSERTE(m_TotalBytesAvailable >= ReserveBlockSize + otherData);
+ return pAddress;
+}
+
+ // used to flag a block that is too small
+#define UNUSABLE_BLOCK ((size_t)-1)
+
+size_t HostCodeHeap::GetPadding(TrackAllocation *pCurrent, size_t size, DWORD alignment)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (pCurrent->size < size)
+ return UNUSABLE_BLOCK;
+ size_t padding = 0;
+ if (alignment)
+ {
+ size_t pointer = (size_t)((BYTE*)pCurrent + sizeof(TrackAllocation));
+ padding = ((pointer + (size_t)alignment - 1) & (~((size_t)alignment - 1))) - pointer;
+ }
+ if (pCurrent->size < size + padding)
+ return UNUSABLE_BLOCK;
+ return padding;
+}
+
+void* HostCodeHeap::AllocFromFreeList(size_t size, DWORD alignment)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_pFreeList)
+ {
+ LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Alloc size corrected 0x%X for free list\n", this, size));
+ // walk the list looking for a block with enough capacity
+ TrackAllocation *pCurrent = m_pFreeList;
+ TrackAllocation *pPrevious = NULL;
+ while (pCurrent)
+ {
+ // GetPadding will return UNUSABLE_BLOCK if the current block is not big enough
+ size_t padding = GetPadding(pCurrent, size, alignment);
+ if (UNUSABLE_BLOCK != padding)
+ {
+ // found a block
+ LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Block found, size 0x%X\n", this, pCurrent->size));
+ size_t realSize = size + padding;
+ BYTE *pPointer = (BYTE*)pCurrent + sizeof(TrackAllocation) + padding;
+ _ASSERTE((size_t)(pPointer - (BYTE*)pCurrent) >= sizeof(TrackAllocation));
+
+ // The space left is not big enough for a new block, let's just
+ // update the TrackAllocation record for the current block
+ if (pCurrent->size - realSize <= sizeof(TrackAllocation))
+ {
+ LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Item removed %p, size 0x%X\n", this, pCurrent, pCurrent->size));
+ // remove current
+ if (pPrevious)
+ {
+ pPrevious->pNext = pCurrent->pNext;
+ }
+ else
+ {
+ m_pFreeList = pCurrent->pNext;
+ }
+ }
+ else
+ {
+ // create a new TrackAllocation after the memory we just allocated and insert it into the free list
+ TrackAllocation *pNewCurrent = (TrackAllocation*)((BYTE*)pCurrent + realSize);
+ pNewCurrent->pNext = pCurrent->pNext;
+ pNewCurrent->size = pCurrent->size - realSize;
+ LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Item changed %p, new size 0x%X\n", this, pNewCurrent, pNewCurrent->size));
+ if (pPrevious)
+ {
+ pPrevious->pNext = pNewCurrent;
+ }
+ else
+ {
+ m_pFreeList = pNewCurrent;
+ }
+
+ // We only need to update the size of the current block if we are creating a new block
+ pCurrent->size = realSize;
+ }
+
+ // now fill all the padding data correctly
+ pCurrent->pHeap = this;
+ // store the location of the TrackAllocation record right before pPointer
+ *((void**)pPointer - 1) = pCurrent;
+
+ LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Allocation returned %p, size 0x%X - data -> %p\n", this, pCurrent, pCurrent->size, pPointer));
+ return pPointer;
+ }
+ pPrevious = pCurrent;
+ pCurrent = pCurrent->pNext;
+ }
+ }
+ LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - No block in free list for size 0x%X\n", this, size));
+ return NULL;
+}
+
+void HostCodeHeap::AddToFreeList(TrackAllocation *pBlockToInsert)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Add to FreeList [%p, 0x%X]\n", this, pBlockToInsert, pBlockToInsert->size));
+
+ // append to the list in the proper position and coalesce if needed
+ if (m_pFreeList)
+ {
+ TrackAllocation *pCurrent = m_pFreeList;
+ TrackAllocation *pPrevious = NULL;
+ while (pCurrent)
+ {
+ if (pCurrent > pBlockToInsert)
+ {
+ // found the point of insertion
+ pBlockToInsert->pNext = pCurrent;
+ if (pPrevious)
+ {
+ pPrevious->pNext = pBlockToInsert;
+ LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Insert block [%p, 0x%X] -> [%p, 0x%X] -> [%p, 0x%X]\n", this,
+ pPrevious, pPrevious->size,
+ pBlockToInsert, pBlockToInsert->size,
+ pCurrent, pCurrent->size));
+ }
+ else
+ {
+ m_pFreeList = pBlockToInsert;
+ LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Insert block [%p, 0x%X] to head\n", this, pBlockToInsert, pBlockToInsert->size));
+ }
+
+ // check for coalescing
+ if ((BYTE*)pBlockToInsert + pBlockToInsert->size == (BYTE*)pCurrent)
+ {
+ // coalesce with next
+ LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Coalesce block [%p, 0x%X] with [%p, 0x%X] - new size 0x%X\n", this,
+ pBlockToInsert, pBlockToInsert->size,
+ pCurrent, pCurrent->size,
+ pCurrent->size + pBlockToInsert->size));
+ pBlockToInsert->pNext = pCurrent->pNext;
+ pBlockToInsert->size += pCurrent->size;
+ }
+
+ if (pPrevious && (BYTE*)pPrevious + pPrevious->size == (BYTE*)pBlockToInsert)
+ {
+ // coalesce with previous
+ LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Coalesce block [%p, 0x%X] with [%p, 0x%X] - new size 0x%X\n", this,
+ pPrevious, pPrevious->size,
+ pBlockToInsert, pBlockToInsert->size,
+ pPrevious->size + pBlockToInsert->size));
+ pPrevious->pNext = pBlockToInsert->pNext;
+ pPrevious->size += pBlockToInsert->size;
+ }
+
+ return;
+ }
+ pPrevious = pCurrent;
+ pCurrent = pCurrent->pNext;
+ }
+ _ASSERTE(pPrevious && pCurrent == NULL);
+ pBlockToInsert->pNext = NULL;
+ // last in the list
+ if ((BYTE*)pPrevious + pPrevious->size == (BYTE*)pBlockToInsert)
+ {
+ // coalesce with previous
+ LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Coalesce block [%p, 0x%X] with [%p, 0x%X] - new size 0x%X\n", this,
+ pPrevious, pPrevious->size,
+ pBlockToInsert, pBlockToInsert->size,
+ pPrevious->size + pBlockToInsert->size));
+ pPrevious->size += pBlockToInsert->size;
+ }
+ else
+ {
+ pPrevious->pNext = pBlockToInsert;
+ LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Insert block [%p, 0x%X] to end after [%p, 0x%X]\n", this,
+ pBlockToInsert, pBlockToInsert->size,
+ pPrevious, pPrevious->size));
+ }
+
+ return;
+
+ }
+ // first in the list
+ pBlockToInsert->pNext = m_pFreeList;
+ m_pFreeList = pBlockToInsert;
+ LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Insert block [%p, 0x%X] to head\n", this,
+ m_pFreeList, m_pFreeList->size));
+}
+
+void* HostCodeHeap::AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(header == sizeof(CodeHeader));
+
+ // The code allocator has to guarantee that there is only one entrypoint per nibble map entry.
+ // It is guaranteed because of HostCodeHeap allocator always aligns the size up to HOST_CODEHEAP_SIZE_ALIGN,
+ // and because the size of nibble map entries (BYTES_PER_BUCKET) is smaller than HOST_CODEHEAP_SIZE_ALIGN.
+ // Assert the later fact here.
+ _ASSERTE(HOST_CODEHEAP_SIZE_ALIGN >= BYTES_PER_BUCKET);
+
+ BYTE * pMem = (BYTE *)AllocMemory_NoThrow(size + sizeof(CodeHeader) + (alignment - 1), sizeof(void *));
+ if (pMem == NULL)
+ return NULL;
+
+ BYTE * pCode = (BYTE *)ALIGN_UP(pMem + sizeof(CodeHeader), alignment);
+
+ // Update tracker to account for the alignment we have just added
+ TrackAllocation *pTracker = *((TrackAllocation **)pMem - 1);
+
+ CodeHeader * pHdr = dac_cast<PTR_CodeHeader>(pCode) - 1;
+ *((TrackAllocation **)(pHdr) - 1) = pTracker;
+
+ return pCode;
+}
+
+void* HostCodeHeap::AllocMemory(size_t size, DWORD alignment)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ void *pAllocation = AllocMemory_NoThrow(size, alignment);
+ if (!pAllocation)
+ ThrowOutOfMemory();
+ return pAllocation;
+}
+
+void* HostCodeHeap::AllocMemory_NoThrow(size_t size, DWORD alignment)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ if (g_pConfig->ShouldInjectFault(INJECTFAULT_DYNAMICCODEHEAP))
+ {
+ char *a = new (nothrow) char;
+ if (a == NULL)
+ return NULL;
+ delete a;
+ }
+#endif // _DEBUG
+
+ // honor alignment (should assert the value is proper)
+ if (alignment)
+ size = (size + (size_t)alignment - 1) & (~((size_t)alignment - 1));
+ // align size to HOST_CODEHEAP_SIZE_ALIGN always
+ size = (size + HOST_CODEHEAP_SIZE_ALIGN - 1) & (~(HOST_CODEHEAP_SIZE_ALIGN - 1));
+
+ size += sizeof(TrackAllocation);
+
+ LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Allocation requested 0x%X\n", this, size));
+
+ void *pAddr = AllocFromFreeList(size, alignment);
+ if (!pAddr)
+ {
+ // walk free list to end to find available space
+ size_t availableInFreeList = 0;
+ TrackAllocation *pCurrentBlock = m_pFreeList;
+ TrackAllocation *pLastBlock = NULL;
+ while (pCurrentBlock)
+ {
+ pLastBlock = pCurrentBlock;
+ pCurrentBlock = pCurrentBlock->pNext;
+ }
+ if (pLastBlock && (BYTE*)pLastBlock + pLastBlock->size == m_pLastAvailableCommittedAddr)
+ {
+ availableInFreeList = pLastBlock->size;
+ }
+ _ASSERTE(size > availableInFreeList);
+ size_t sizeToCommit = size - availableInFreeList;
+ sizeToCommit = (size + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1)); // round up to page
+
+ if (m_pLastAvailableCommittedAddr + sizeToCommit <= m_pBaseAddr + m_TotalBytesAvailable)
+ {
+ if (NULL == ClrVirtualAlloc(m_pLastAvailableCommittedAddr, sizeToCommit, MEM_COMMIT, PAGE_EXECUTE_READWRITE))
+ {
+ LOG((LF_BCL, LL_ERROR, "CodeHeap [0x%p] - VirtualAlloc failed\n", this));
+ return NULL;
+ }
+
+ TrackAllocation *pBlockToInsert = (TrackAllocation*)(void*)m_pLastAvailableCommittedAddr;
+ pBlockToInsert->pNext = NULL;
+ pBlockToInsert->size = sizeToCommit;
+ m_pLastAvailableCommittedAddr += sizeToCommit;
+ AddToFreeList(pBlockToInsert);
+ pAddr = AllocFromFreeList(size, alignment);
+ }
+ else
+ {
+ LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - allocation failed:\n\tm_pLastAvailableCommittedAddr: 0x%X\n\tsizeToCommit: 0x%X\n\tm_pBaseAddr: 0x%X\n\tm_TotalBytesAvailable: 0x%X\n", this, m_pLastAvailableCommittedAddr, sizeToCommit, m_pBaseAddr, m_TotalBytesAvailable));
+ return NULL;
+ }
+ }
+
+ _ASSERTE(pAddr);
+ // ref count the whole heap
+ m_AllocationCount++;
+ LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - ref count %d\n", this, m_AllocationCount));
+ return pAddr;
+}
+
+#endif //!DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+void HostCodeHeap::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ WRAPPER_NO_CONTRACT;
+
+ DAC_ENUM_DTHIS();
+
+ TADDR addr = dac_cast<TADDR>(m_pBaseAddr);
+ size_t size = dac_cast<TADDR>(m_pLastAvailableCommittedAddr) - addr;
+
+#if (_DEBUG)
+ // Test hook: when testing on debug builds, we want an easy way to test that the while
+ // correctly terminates in the face of ridiculous stuff from the target.
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DumpGeneration_IntentionallyCorruptDataFromTarget) == 1)
+ {
+ // Pretend the object is very large.
+ size |= 0xefefefef << 28;
+ }
+#endif // (_DEBUG)
+
+ while (size)
+ {
+ ULONG32 enumSize;
+
+ if (size > 0x80000000)
+ {
+ enumSize = 0x80000000;
+ }
+ else
+ {
+ enumSize = (ULONG32)size;
+ }
+
+ // If we can't read the target memory, stop immediately so we don't work
+ // with broken data.
+ if (!DacEnumMemoryRegion(addr, enumSize))
+ break;
+
+ addr += enumSize;
+ size -= enumSize;
+ }
+}
+#endif // DACCESS_COMPILE
+
+// static
+struct HostCodeHeap::TrackAllocation * HostCodeHeap::GetTrackAllocation(TADDR codeStart)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ CodeHeader * pHdr = dac_cast<PTR_CodeHeader>(codeStart) - 1;
+
+ // Pointer to the TrackAllocation record is stored just before the code header
+ return *((TrackAllocation **)(pHdr) - 1);
+}
+
+HostCodeHeap* HostCodeHeap::GetCodeHeap(TADDR codeStart)
+{
+ WRAPPER_NO_CONTRACT;
+ return HostCodeHeap::GetTrackAllocation(codeStart)->pHeap;
+}
+
+
+#ifndef DACCESS_COMPILE
+
+void HostCodeHeap::FreeMemForCode(void * codeStart)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ TrackAllocation *pTracker = HostCodeHeap::GetTrackAllocation((TADDR)codeStart);
+ AddToFreeList(pTracker);
+
+ m_AllocationCount--;
+ LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap released [0x%p, vt(0x%x)] - ref count %d\n", this, *(size_t*)this, m_AllocationCount));
+
+ if (m_AllocationCount == 0)
+ {
+ m_pJitManager->AddToCleanupList(this);
+ }
+}
+
+//
+// Implementation for DynamicMethodDesc declared in method.hpp
+//
+void DynamicMethodDesc::Destroy(BOOL fDomainUnload)
+{
+ CONTRACTL
+ {
+ if (fDomainUnload) NOTHROW; else THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(IsDynamicMethod());
+ LoaderAllocator *pLoaderAllocator = GetLoaderAllocatorForCode();
+
+ LOG((LF_BCL, LL_INFO1000, "Level3 - Destroying DynamicMethod {0x%p}\n", this));
+ if (m_pSig)
+ {
+ delete[] (BYTE*)m_pSig;
+ m_pSig = NULL;
+ }
+ m_cSig = 0;
+ if (m_pszMethodName)
+ {
+ delete[] m_pszMethodName;
+ m_pszMethodName = NULL;
+ }
+
+ GetLCGMethodResolver()->Destroy(fDomainUnload);
+
+ if (pLoaderAllocator->IsCollectible() && !fDomainUnload)
+ {
+ if (pLoaderAllocator->Release())
+ {
+ GCX_PREEMP();
+ LoaderAllocator::GCLoaderAllocators(pLoaderAllocator->GetDomain()->AsAppDomain());
+ }
+ }
+}
+
+//
+// The resolver object is reused when the method is destroyed,
+// this will reset its state for the next use.
+//
+void LCGMethodResolver::Reset()
+{
+ m_DynamicStringLiterals = NULL;
+ m_recordCodePointer = NULL;
+ m_UsedIndCellList = NULL;
+ m_jumpStubBlock = NULL;
+ m_next = NULL;
+ m_Code = NULL;
+}
+
+//
+// Recycle all the indcells in m_UsedIndCellList by adding them to the free list
+//
+void LCGMethodResolver::RecycleIndCells()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ // Append the list of indirection cells used by this dynamic method to the free list
+ IndCellList * list = m_UsedIndCellList;
+ if (list)
+ {
+ BYTE * cellhead = list->indcell;
+ BYTE * cellprev = NULL;
+ BYTE * cellcurr = NULL;
+
+ // Build a linked list of indirection cells from m_UsedIndCellList.
+ // No need to lock newlist because this method is only called during the finalization of
+ // DynamicResolver.DestroyScout and at that time no one else should be modifying m_UsedIndCellList.
+ while (list)
+ {
+ cellcurr = list->indcell;
+ _ASSERTE(cellcurr != NULL);
+
+ if (cellprev)
+ *((BYTE**)cellprev) = cellcurr;
+
+ list = list->pNext;
+ cellprev = cellcurr;
+ }
+
+ // Insert the linked list to the free list of the VirtualCallStubManager of the current domain.
+ // We should use GetLoaderAllocatorForCode because that is where the ind cell was allocated.
+ LoaderAllocator *pLoaderAllocator = GetDynamicMethod()->GetLoaderAllocatorForCode();
+ VirtualCallStubManager *pMgr = pLoaderAllocator->GetVirtualCallStubManager();
+ pMgr->InsertIntoRecycledIndCellList_Locked(cellhead, cellcurr);
+ m_UsedIndCellList = NULL;
+ }
+}
+
+void LCGMethodResolver::Destroy(BOOL fDomainUnload)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ LOG((LF_BCL, LL_INFO100, "Level2 - Resolver - Destroying Resolver {0x%p}\n", this));
+ if (m_Code)
+ {
+ delete[] m_Code;
+ m_Code = NULL;
+ }
+ m_CodeSize = 0;
+ if (!m_LocalSig.IsNull())
+ {
+ delete[] m_LocalSig.GetPtr();
+ m_LocalSig = SigPointer();
+ }
+
+ // Get the global string literal interning map
+ GlobalStringLiteralMap* pStringLiteralMap = SystemDomain::GetGlobalStringLiteralMapNoCreate();
+
+ // release references to all the string literals used in this Dynamic Method
+ if (pStringLiteralMap != NULL)
+ {
+ // lock the global string literal interning map
+ // we cannot use GetGlobalStringLiteralMap() here because it might throw
+ CrstHolder gch(pStringLiteralMap->GetHashTableCrstGlobal());
+
+ // Access to m_DynamicStringLiterals doesn't need to be syncrhonized because
+ // this can be run in only one thread: the finalizer thread.
+ while (m_DynamicStringLiterals != NULL)
+ {
+ m_DynamicStringLiterals->m_pEntry->Release();
+ m_DynamicStringLiterals = m_DynamicStringLiterals->m_pNext;
+ }
+ }
+
+
+ if (!fDomainUnload)
+ {
+ // No need to recycle if the domain is unloading.
+ // Note that we need to do this before m_jitTempData is deleted
+ RecycleIndCells();
+ }
+
+ m_jitMetaHeap.Delete();
+ m_jitTempData.Delete();
+
+
+ // Per-appdomain resources has been reclaimed already if the appdomain is being unloaded. Do not try to
+ // release them again.
+ if (!fDomainUnload)
+ {
+ if (m_recordCodePointer)
+ {
+#if defined(_TARGET_AMD64_)
+ // Remove the unwind information (if applicable)
+ UnwindInfoTable::UnpublishUnwindInfoForMethod((TADDR)m_recordCodePointer);
+#endif // defined(_TARGET_AMD64_)
+
+ HostCodeHeap *pHeap = HostCodeHeap::GetCodeHeap((TADDR)m_recordCodePointer);
+ LOG((LF_BCL, LL_INFO1000, "Level3 - Resolver {0x%p} - Release reference to heap {%p, vt(0x%x)} \n", this, pHeap, *(size_t*)pHeap));
+ pHeap->m_pJitManager->FreeCodeMemory(pHeap, m_recordCodePointer);
+
+ m_recordCodePointer = NULL;
+ }
+
+ JumpStubBlockHeader* current = m_jumpStubBlock;
+ JumpStubBlockHeader* next;
+ while (current)
+ {
+ next = current->m_next;
+
+ HostCodeHeap *pHeap = current->GetHostCodeHeap();
+ LOG((LF_BCL, LL_INFO1000, "Level3 - Resolver {0x%p} - Release reference to heap {%p, vt(0x%x)} \n", current, pHeap, *(size_t*)pHeap));
+ pHeap->m_pJitManager->FreeCodeMemory(pHeap, current);
+
+ current = next;
+ }
+ m_jumpStubBlock = NULL;
+
+ if (m_managedResolver)
+ {
+ ::DestroyLongWeakHandle(m_managedResolver);
+ m_managedResolver = NULL;
+ }
+
+ m_DynamicMethodTable->LinkMethod(m_pDynamicMethod);
+ }
+}
+
+void LCGMethodResolver::FreeCompileTimeState()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ //m_jitTempData.Delete();
+}
+
+
+
+void LCGMethodResolver::GetJitContext(SecurityControlFlags * securityControlFlags,
+ TypeHandle *typeOwner)
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(securityControlFlags));
+ PRECONDITION(CheckPointer(typeOwner));
+ } CONTRACTL_END;
+
+ GCX_COOP();
+ GetJitContextCoop(securityControlFlags, typeOwner);
+}
+
+void LCGMethodResolver::GetJitContextCoop(SecurityControlFlags * securityControlFlags,
+ TypeHandle *typeOwner)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_INTOLERANT;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(CheckPointer(securityControlFlags));
+ PRECONDITION(CheckPointer(typeOwner));
+ } CONTRACTL_END;
+
+ MethodDescCallSite getJitContext(METHOD__RESOLVER__GET_JIT_CONTEXT, m_managedResolver);
+
+ OBJECTREF resolver = ObjectFromHandle(m_managedResolver);
+ _ASSERTE(resolver); // gc root must be up the stack
+
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(resolver),
+ PtrToArgSlot(securityControlFlags),
+ };
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)getJitContext.Call_RetOBJECTREF(args);
+ *typeOwner = refType != NULL ? refType->GetType() : TypeHandle();
+
+}
+
+ChunkAllocator* LCGMethodResolver::GetJitMetaHeap()
+{
+ LIMITED_METHOD_CONTRACT;
+ return &m_jitMetaHeap;
+}
+
+BYTE* LCGMethodResolver::GetCodeInfo(unsigned *pCodeSize, unsigned *pStackSize, CorInfoOptions *pOptions, unsigned *pEHSize)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(pCodeSize);
+
+ if (!m_Code)
+ {
+ GCX_COOP();
+
+ LOG((LF_BCL, LL_INFO100000, "Level5 - DM-JIT: Getting CodeInfo on resolver 0x%p...\n", this));
+ // get the code - Byte[] Resolver.GetCodeInfo(ref ushort stackSize, ref int EHCount)
+ MethodDescCallSite getCodeInfo(METHOD__RESOLVER__GET_CODE_INFO, m_managedResolver);
+
+ OBJECTREF resolver = ObjectFromHandle(m_managedResolver);
+ VALIDATEOBJECTREF(resolver); // gc root must be up the stack
+
+ DWORD initLocals = 0, EHSize = 0;
+ unsigned short stackSize = 0;
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(resolver),
+ PtrToArgSlot(&stackSize),
+ PtrToArgSlot(&initLocals),
+ PtrToArgSlot(&EHSize),
+ };
+ U1ARRAYREF dataArray = (U1ARRAYREF) getCodeInfo.Call_RetOBJECTREF(args);
+ DWORD codeSize = dataArray->GetNumComponents();
+ NewHolder<BYTE> code(new BYTE[codeSize]);
+ memcpy(code, dataArray->GetDataPtr(), codeSize);
+ m_CodeSize = codeSize;
+ _ASSERTE(FitsIn<unsigned short>(stackSize));
+ m_StackSize = static_cast<unsigned short>(stackSize);
+ m_Options = (initLocals) ? CORINFO_OPT_INIT_LOCALS : (CorInfoOptions)0;
+ _ASSERTE(FitsIn<unsigned short>(EHSize));
+ m_EHSize = static_cast<unsigned short>(EHSize);
+ m_Code = (BYTE*)code;
+ code.SuppressRelease();
+ LOG((LF_BCL, LL_INFO100000, "Level5 - DM-JIT: CodeInfo {0x%p} on resolver %p\n", m_Code, this));
+ }
+
+ *pCodeSize = m_CodeSize;
+ if (pStackSize)
+ *pStackSize = m_StackSize;
+ if (pOptions)
+ *pOptions = m_Options;
+ if (pEHSize)
+ *pEHSize = m_EHSize;
+ return m_Code;
+
+}
+
+//---------------------------------------------------------------------------------------
+//
+SigPointer
+LCGMethodResolver::GetLocalSig()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (m_LocalSig.IsNull())
+ {
+ GCX_COOP();
+
+ LOG((LF_BCL, LL_INFO100000, "Level5 - DM-JIT: Getting LocalSig on resolver 0x%p...\n", this));
+
+ MethodDescCallSite getLocalsSignature(METHOD__RESOLVER__GET_LOCALS_SIGNATURE, m_managedResolver);
+
+ OBJECTREF resolver = ObjectFromHandle(m_managedResolver);
+ VALIDATEOBJECTREF(resolver); // gc root must be up the stack
+
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(resolver)
+ };
+ U1ARRAYREF dataArray = (U1ARRAYREF) getLocalsSignature.Call_RetOBJECTREF(args);
+ DWORD localSigSize = dataArray->GetNumComponents();
+ NewHolder<COR_SIGNATURE> localSig(new COR_SIGNATURE[localSigSize]);
+ memcpy((void *)localSig, dataArray->GetDataPtr(), localSigSize);
+
+ m_LocalSig = SigPointer((PCCOR_SIGNATURE)localSig, localSigSize);
+ localSig.SuppressRelease();
+ LOG((LF_BCL, LL_INFO100000, "Level5 - DM-JIT: LocalSig {0x%p} on resolver %p\n", m_LocalSig.GetPtr(), this));
+ }
+
+ return m_LocalSig;
+} // LCGMethodResolver::GetLocalSig
+
+//---------------------------------------------------------------------------------------
+//
+OBJECTHANDLE
+LCGMethodResolver::ConstructStringLiteral(mdToken metaTok)
+{
+ STANDARD_VM_CONTRACT;
+
+ GCX_COOP();
+
+ OBJECTHANDLE string = NULL;
+ STRINGREF strRef = GetStringLiteral(metaTok);
+
+ GCPROTECT_BEGIN(strRef);
+
+ if (strRef != NULL)
+ {
+ // Instead of storing the string literal in the appdomain specific string literal map,
+ // we store it in the dynamic method specific string liternal list
+ // This way we can release it when the dynamic method is collected.
+ string = (OBJECTHANDLE)GetOrInternString(&strRef);
+ }
+
+ GCPROTECT_END();
+
+ return string;
+}
+
+//---------------------------------------------------------------------------------------
+//
+BOOL
+LCGMethodResolver::IsValidStringRef(mdToken metaTok)
+{
+ STANDARD_VM_CONTRACT;
+
+ GCX_COOP();
+
+ return GetStringLiteral(metaTok) != NULL;
+}
+
+//---------------------------------------------------------------------------------------
+//
+STRINGREF
+LCGMethodResolver::GetStringLiteral(
+ mdToken token)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ MethodDescCallSite getStringLiteral(METHOD__RESOLVER__GET_STRING_LITERAL, m_managedResolver);
+
+ OBJECTREF resolver = ObjectFromHandle(m_managedResolver);
+ VALIDATEOBJECTREF(resolver); // gc root must be up the stack
+
+ ARG_SLOT args[] = {
+ ObjToArgSlot(resolver),
+ token,
+ };
+ return getStringLiteral.Call_RetSTRINGREF(args);
+}
+
+// This method will get the interned string by calling GetInternedString on the
+// global string liternal interning map. It will also store the returned entry
+// in m_DynamicStringLiterals
+STRINGREF* LCGMethodResolver::GetOrInternString(STRINGREF *pProtectedStringRef)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pProtectedStringRef));
+ } CONTRACTL_END;
+
+ // Get the global string literal interning map
+ GlobalStringLiteralMap* pStringLiteralMap = SystemDomain::GetGlobalStringLiteralMap();
+
+ // Calculating the hash: EEUnicodeHashTableHelper::GetHash
+ EEStringData StringData = EEStringData((*pProtectedStringRef)->GetStringLength(), (*pProtectedStringRef)->GetBuffer());
+ DWORD dwHash = pStringLiteralMap->GetHash(&StringData);
+
+ // lock the global string literal interning map
+ CrstHolder gch(pStringLiteralMap->GetHashTableCrstGlobal());
+
+ StringLiteralEntryHolder pEntry(pStringLiteralMap->GetInternedString(pProtectedStringRef, dwHash, /* bAddIfNotFound */ TRUE));
+
+ DynamicStringLiteral* pStringLiteral = (DynamicStringLiteral*)m_jitTempData.New(sizeof(DynamicStringLiteral));
+ pStringLiteral->m_pEntry = pEntry.Extract();
+
+ // Add to m_DynamicStringLiterals:
+ // we don't need to check for duplicate because the string literal entries in
+ // the global string literal map are ref counted.
+ pStringLiteral->m_pNext = m_DynamicStringLiterals;
+ m_DynamicStringLiterals = pStringLiteral;
+
+ return pStringLiteral->m_pEntry->GetStringObject();
+
+}
+
+// AddToUsedIndCellList adds a IndCellList link to the beginning of m_UsedIndCellList. It is called by
+// code:CEEInfo::getCallInfo when a indirection cell is alocated for m_pDynamicMethod.
+// All the indirection cells usded by m_pDynamicMethod will be recycled when this resolver
+// is finalized, see code:LCGMethodResolver::RecycleIndCells
+void LCGMethodResolver::AddToUsedIndCellList(BYTE * indcell)
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(indcell));
+ } CONTRACTL_END;
+
+ IndCellList * link = (IndCellList *)m_jitTempData.New(sizeof(IndCellList));
+ link->indcell = indcell;
+
+ // Insert into m_UsedIndCellList
+ while (true)
+ {
+ link->pNext = m_UsedIndCellList;
+ if (InterlockedCompareExchangeT(&m_UsedIndCellList, link, link->pNext) == link->pNext)
+ break;
+ }
+
+}
+
+void LCGMethodResolver::ResolveToken(mdToken token, TypeHandle * pTH, MethodDesc ** ppMD, FieldDesc ** ppFD)
+{
+ STANDARD_VM_CONTRACT;
+
+ GCX_COOP();
+
+ PREPARE_SIMPLE_VIRTUAL_CALLSITE(METHOD__RESOLVER__RESOLVE_TOKEN, ObjectFromHandle(m_managedResolver));
+
+ DECLARE_ARGHOLDER_ARRAY(args, 5);
+
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(ObjectFromHandle(m_managedResolver));
+ args[ARGNUM_1] = DWORD_TO_ARGHOLDER(token);
+ args[ARGNUM_2] = pTH;
+ args[ARGNUM_3] = ppMD;
+ args[ARGNUM_4] = ppFD;
+
+ CALL_MANAGED_METHOD_NORET(args);
+
+ _ASSERTE(*ppMD == NULL || *ppFD == NULL);
+
+ if (pTH->IsNull())
+ {
+ if (*ppMD != NULL) *pTH = (*ppMD)->GetMethodTable();
+ else
+ if (*ppFD != NULL) *pTH = (*ppFD)->GetEnclosingMethodTable();
+ }
+
+ _ASSERTE(!pTH->IsNull());
+}
+
+//---------------------------------------------------------------------------------------
+//
+SigPointer
+LCGMethodResolver::ResolveSignature(
+ mdToken token)
+{
+ STANDARD_VM_CONTRACT;
+
+ GCX_COOP();
+
+ U1ARRAYREF dataArray = NULL;
+
+ PREPARE_SIMPLE_VIRTUAL_CALLSITE(METHOD__RESOLVER__RESOLVE_SIGNATURE, ObjectFromHandle(m_managedResolver));
+
+ DECLARE_ARGHOLDER_ARRAY(args, 3);
+
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(ObjectFromHandle(m_managedResolver));
+ args[ARGNUM_1] = DWORD_TO_ARGHOLDER(token);
+ args[ARGNUM_2] = DWORD_TO_ARGHOLDER(0);
+
+ CALL_MANAGED_METHOD_RETREF(dataArray, U1ARRAYREF, args);
+
+ if (dataArray == NULL)
+ COMPlusThrow(kInvalidProgramException);
+
+ DWORD cbSig = dataArray->GetNumComponents();
+ PCCOR_SIGNATURE pSig = (PCCOR_SIGNATURE)m_jitTempData.New(cbSig);
+ memcpy((void *)pSig, dataArray->GetDataPtr(), cbSig);
+ return SigPointer(pSig, cbSig);
+} // LCGMethodResolver::ResolveSignature
+
+//---------------------------------------------------------------------------------------
+//
+SigPointer
+LCGMethodResolver::ResolveSignatureForVarArg(
+ mdToken token)
+{
+ STANDARD_VM_CONTRACT;
+
+ GCX_COOP();
+
+ U1ARRAYREF dataArray = NULL;
+
+ PREPARE_SIMPLE_VIRTUAL_CALLSITE(METHOD__RESOLVER__RESOLVE_SIGNATURE, ObjectFromHandle(m_managedResolver));
+
+ DECLARE_ARGHOLDER_ARRAY(args, 3);
+
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(ObjectFromHandle(m_managedResolver));
+ args[ARGNUM_1] = DWORD_TO_ARGHOLDER(token);
+ args[ARGNUM_2] = DWORD_TO_ARGHOLDER(1);
+
+ CALL_MANAGED_METHOD_RETREF(dataArray, U1ARRAYREF, args);
+
+ if (dataArray == NULL)
+ COMPlusThrow(kInvalidProgramException);
+
+ DWORD cbSig = dataArray->GetNumComponents();
+ PCCOR_SIGNATURE pSig = (PCCOR_SIGNATURE)m_jitTempData.New(cbSig);
+ memcpy((void *)pSig, dataArray->GetDataPtr(), cbSig);
+ return SigPointer(pSig, cbSig);
+} // LCGMethodResolver::ResolveSignatureForVarArg
+
+//---------------------------------------------------------------------------------------
+//
+void LCGMethodResolver::GetEHInfo(unsigned EHnumber, CORINFO_EH_CLAUSE* clause)
+{
+ STANDARD_VM_CONTRACT;
+
+ GCX_COOP();
+
+ // attempt to get the raw EHInfo first
+ {
+ U1ARRAYREF dataArray;
+
+ PREPARE_SIMPLE_VIRTUAL_CALLSITE(METHOD__RESOLVER__GET_RAW_EH_INFO, ObjectFromHandle(m_managedResolver));
+
+ DECLARE_ARGHOLDER_ARRAY(args, 1);
+
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(ObjectFromHandle(m_managedResolver));
+
+ CALL_MANAGED_METHOD_RETREF(dataArray, U1ARRAYREF, args);
+
+ if (dataArray != NULL)
+ {
+ COR_ILMETHOD_SECT_EH* pEH = (COR_ILMETHOD_SECT_EH*)dataArray->GetDataPtr();
+
+ COR_ILMETHOD_SECT_EH_CLAUSE_FAT ehClause;
+ const COR_ILMETHOD_SECT_EH_CLAUSE_FAT* ehInfo;
+ ehInfo = (COR_ILMETHOD_SECT_EH_CLAUSE_FAT*)pEH->EHClause(EHnumber, &ehClause);
+
+ clause->Flags = (CORINFO_EH_CLAUSE_FLAGS)ehInfo->GetFlags();
+ clause->TryOffset = ehInfo->GetTryOffset();
+ clause->TryLength = ehInfo->GetTryLength();
+ clause->HandlerOffset = ehInfo->GetHandlerOffset();
+ clause->HandlerLength = ehInfo->GetHandlerLength();
+ clause->ClassToken = ehInfo->GetClassToken();
+ clause->FilterOffset = ehInfo->GetFilterOffset();
+ return;
+ }
+ }
+
+ // failed, get the info off the ilgenerator
+ {
+ PREPARE_SIMPLE_VIRTUAL_CALLSITE(METHOD__RESOLVER__GET_EH_INFO, ObjectFromHandle(m_managedResolver));
+
+ DECLARE_ARGHOLDER_ARRAY(args, 3);
+
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(ObjectFromHandle(m_managedResolver));
+ args[ARGNUM_1] = DWORD_TO_ARGHOLDER(EHnumber);
+ args[ARGNUM_2] = PTR_TO_ARGHOLDER(clause);
+
+ CALL_MANAGED_METHOD_NORET(args);
+ }
+}
+
+#endif // !DACCESS_COMPILE
+
+
+// Get the associated managed resolver. This method will be called during a GC so it should not throw, trigger a GC or cause the
+// object in question to be validated.
+OBJECTREF LCGMethodResolver::GetManagedResolver()
+{
+ LIMITED_METHOD_CONTRACT;
+ return ObjectFromHandle(m_managedResolver);
+}
+
+
+//
+// ChunkAllocator implementation
+//
+ChunkAllocator::~ChunkAllocator()
+{
+ LIMITED_METHOD_CONTRACT;
+ Delete();
+}
+
+void ChunkAllocator::Delete()
+{
+ LIMITED_METHOD_CONTRACT;
+ BYTE *next = NULL;
+ LOG((LF_BCL, LL_INFO10, "Level1 - DM - Allocator [0x%p] - deleting...\n", this));
+ while (m_pData)
+ {
+ LOG((LF_BCL, LL_INFO10, "Level1 - DM - Allocator [0x%p] - delete block {0x%p}\n", this, m_pData));
+ next = ((BYTE**)m_pData)[0];
+ delete[] m_pData;
+ m_pData = next;
+ }
+}
+
+void* ChunkAllocator::New(size_t size)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ // We need to align it, otherwise we might get DataMisalignedException on IA64
+ size = ALIGN_UP(size, sizeof(void *));
+
+ BYTE *pNewBlock = NULL;
+ LOG((LF_BCL, LL_INFO100, "Level2 - DM - Allocator [0x%p] - allocation requested 0x%X, available 0x%X\n", this, size, (m_pData) ? ((size_t*)m_pData)[1] : 0));
+ if (m_pData)
+ {
+ // we may have room available
+ size_t available = ((size_t*)m_pData)[1];
+ if (size <= available)
+ {
+ LOG((LF_BCL, LL_INFO100, "Level2 - DM - Allocator [0x%p] - reusing block {0x%p}\n", this, m_pData));
+ ((size_t*)m_pData)[1] = available - size;
+ pNewBlock = (m_pData + CHUNK_SIZE - available);
+ LOG((LF_BCL, LL_INFO100, "Level2 - DM - Allocator [0x%p] - ptr -> 0x%p, available 0x%X\n", this, pNewBlock, ((size_t*)m_pData)[1]));
+ return pNewBlock;
+ }
+ }
+
+ // no available - need to allocate a new buffer
+ if (size + (sizeof(void*) * 2) < CHUNK_SIZE)
+ {
+ // make the allocation
+ NewHolder<BYTE> newBlock(new BYTE[CHUNK_SIZE]);
+ pNewBlock = (BYTE*)newBlock;
+ ((size_t*)pNewBlock)[1] = CHUNK_SIZE - size - (sizeof(void*) * 2);
+ LOG((LF_BCL, LL_INFO10, "Level1 - DM - Allocator [0x%p] - new block {0x%p}\n", this, pNewBlock));
+ newBlock.SuppressRelease();
+ }
+ else
+ {
+ // request bigger than default size this is going to be a single block
+ NewHolder<BYTE> newBlock(new BYTE[size + (sizeof(void*) * 2)]);
+ pNewBlock = (BYTE*)newBlock;
+ ((size_t*)pNewBlock)[1] = 0; // no available bytes left
+ LOG((LF_BCL, LL_INFO10, "Level1 - DM - Allocator [0x%p] - new BIG block {0x%p}\n", this, pNewBlock));
+ newBlock.SuppressRelease();
+ }
+
+ // all we have left to do is to link the block.
+ // We leave at the top the block with more bytes available
+ if (m_pData)
+ {
+ if (((size_t*)pNewBlock)[1] > ((size_t*)m_pData)[1])
+ {
+ ((BYTE**)pNewBlock)[0] = m_pData;
+ m_pData = pNewBlock;
+ }
+ else
+ {
+ ((BYTE**)pNewBlock)[0] = ((BYTE**)m_pData)[0];
+ ((BYTE**)m_pData)[0] = pNewBlock;
+ }
+ }
+ else
+ {
+ // this is the first allocation
+ m_pData = pNewBlock;
+ ((BYTE**)m_pData)[0] = NULL;
+ }
+
+ pNewBlock += (sizeof(void*) * 2);
+ LOG((LF_BCL, LL_INFO100, "Level2 - DM - Allocator [0x%p] - ptr -> 0x%p, available 0x%X\n", this, pNewBlock, ((size_t*)m_pData)[1]));
+ return pNewBlock;
+}
+
diff --git a/src/vm/dynamicmethod.h b/src/vm/dynamicmethod.h
new file mode 100644
index 0000000000..772b422093
--- /dev/null
+++ b/src/vm/dynamicmethod.h
@@ -0,0 +1,381 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#ifndef _DYNAMICMETHOD_H_
+#define _DYNAMICMETHOD_H_
+
+#include "jitinterface.h"
+#include "methodtable.h"
+#include <daccess.h>
+
+//---------------------------------------------------------------------------------------
+//
+// This links together a set of news and release in one object.
+// The idea is to have a predefined size allocated up front and used by different calls to new.
+// All the allocation will be released at the same time releaseing an instance of this class
+// Here is how the object is laid out
+// | ptr_to_next_chunk | size_left_in_chunk | data | ... | data
+// This is not a particularly efficient allocator but it works well for a small number of allocation
+// needed while jitting a method
+//
+class ChunkAllocator
+{
+private:
+ #define CHUNK_SIZE 64
+
+ BYTE *m_pData;
+
+public:
+ ChunkAllocator() : m_pData(NULL) {}
+
+ ~ChunkAllocator();
+ void* New(size_t size);
+ void Delete();
+};
+
+//---------------------------------------------------------------------------------------
+//
+class DynamicResolver
+{
+public:
+ // Keep in sync with dynamicIlGenerator.cs
+ enum SecurityControlFlags
+ {
+ Default = 0,
+ SkipVisibilityChecks = 0x1,
+ RestrictedSkipVisibilityChecks = 0x2,
+ HasCreationContext = 0x4,
+ CanSkipCSEvaluation = 0x8,
+ };
+
+
+ // set up and clean up for jitting
+ virtual void FreeCompileTimeState() = 0;
+ virtual void GetJitContext(SecurityControlFlags * securityControlFlags,
+ TypeHandle *typeOwner) = 0;
+ virtual ChunkAllocator* GetJitMetaHeap() = 0;
+
+ //
+ // code info data
+ virtual BYTE * GetCodeInfo(
+ unsigned * pCodeSize,
+ unsigned * pStackSize,
+ CorInfoOptions * pOptions,
+ unsigned * pEHSize) = 0;
+ virtual SigPointer GetLocalSig() = 0;
+
+ //
+ // jit interface api
+ virtual OBJECTHANDLE ConstructStringLiteral(mdToken metaTok) = 0;
+ virtual BOOL IsValidStringRef(mdToken metaTok) = 0;
+ virtual void ResolveToken(mdToken token, TypeHandle * pTH, MethodDesc ** ppMD, FieldDesc ** ppFD) = 0;
+ virtual SigPointer ResolveSignature(mdToken token) = 0;
+ virtual SigPointer ResolveSignatureForVarArg(mdToken token) = 0;
+ virtual void GetEHInfo(unsigned EHnumber, CORINFO_EH_CLAUSE* clause) = 0;
+
+ virtual MethodDesc * GetDynamicMethod() = 0;
+}; // class DynamicResolver
+
+//---------------------------------------------------------------------------------------
+//
+class StringLiteralEntry;
+
+//---------------------------------------------------------------------------------------
+//
+struct DynamicStringLiteral
+{
+ DynamicStringLiteral * m_pNext;
+ StringLiteralEntry * m_pEntry;
+};
+
+//---------------------------------------------------------------------------------------
+//
+// LCGMethodResolver
+//
+// a jit resolver for managed dynamic methods
+//
+class LCGMethodResolver : public DynamicResolver
+{
+ friend class DynamicMethodDesc;
+ friend class DynamicMethodTable;
+ // review this to see whether the EEJitManageris the only thing to worry about
+ friend class ExecutionManager;
+ friend class EEJitManager;
+ friend class HostCodeHeap;
+
+public:
+ void Destroy(BOOL fDomainUnload = FALSE);
+
+ void FreeCompileTimeState();
+ void GetJitContext(SecurityControlFlags * securityControlFlags,
+ TypeHandle * typeOwner);
+ void GetJitContextCoop(SecurityControlFlags * securityControlFlags,
+ TypeHandle * typeOwner);
+ ChunkAllocator* GetJitMetaHeap();
+
+ BYTE* GetCodeInfo(unsigned *pCodeSize, unsigned *pStackSize, CorInfoOptions *pOptions, unsigned* pEHSize);
+ SigPointer GetLocalSig();
+
+ OBJECTHANDLE ConstructStringLiteral(mdToken metaTok);
+ BOOL IsValidStringRef(mdToken metaTok);
+ void ResolveToken(mdToken token, TypeHandle * pTH, MethodDesc ** ppMD, FieldDesc ** ppFD);
+ SigPointer ResolveSignature(mdToken token);
+ SigPointer ResolveSignatureForVarArg(mdToken token);
+ void GetEHInfo(unsigned EHnumber, CORINFO_EH_CLAUSE* clause);
+
+ MethodDesc* GetDynamicMethod() { LIMITED_METHOD_CONTRACT; return m_pDynamicMethod; }
+ OBJECTREF GetManagedResolver();
+ void SetManagedResolver(OBJECTHANDLE obj) { LIMITED_METHOD_CONTRACT; m_managedResolver = obj; }
+ void * GetRecordCodePointer() { LIMITED_METHOD_CONTRACT; return m_recordCodePointer; }
+
+ STRINGREF GetStringLiteral(mdToken token);
+ STRINGREF * GetOrInternString(STRINGREF *pString);
+ void AddToUsedIndCellList(BYTE * indcell);
+
+private:
+ void RecycleIndCells();
+ void Reset();
+
+ struct IndCellList
+ {
+ BYTE * indcell;
+ IndCellList * pNext;
+ };
+
+ DynamicMethodDesc* m_pDynamicMethod;
+ OBJECTHANDLE m_managedResolver;
+ BYTE *m_Code;
+ DWORD m_CodeSize;
+ SigPointer m_LocalSig;
+ unsigned short m_StackSize;
+ CorInfoOptions m_Options;
+ unsigned m_EHSize;
+ DynamicMethodTable *m_DynamicMethodTable;
+ DynamicMethodDesc *m_next;
+ void *m_recordCodePointer;
+ ChunkAllocator m_jitMetaHeap;
+ ChunkAllocator m_jitTempData;
+ DynamicStringLiteral* m_DynamicStringLiterals;
+ IndCellList * m_UsedIndCellList; // list to keep track of all the indirection cells used by the jitted code
+ JumpStubBlockHeader* m_jumpStubBlock;
+}; // class LCGMethodResolver
+
+//---------------------------------------------------------------------------------------
+//
+// a DynamicMethodTable is used by the light code generation to lazily allocate methods.
+// The methods in this MethodTable are not known up front and their signature is defined
+// at runtime
+//
+class DynamicMethodTable
+{
+public:
+#ifndef DACCESS_COMPILE
+ static void CreateDynamicMethodTable(DynamicMethodTable **ppLocation, Module *pModule, AppDomain *pDomain);
+#endif
+
+private:
+ CrstExplicitInit m_Crst;
+ DynamicMethodDesc *m_DynamicMethodList;
+ MethodTable *m_pMethodTable;
+ Module *m_Module;
+ AppDomain *m_pDomain;
+
+ DynamicMethodTable() {WRAPPER_NO_CONTRACT;}
+
+ class LockHolder : public CrstHolder
+ {
+ public:
+ LockHolder(DynamicMethodTable *pDynMT)
+ : CrstHolder(&pDynMT->m_Crst)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+ };
+ friend class LockHolder;
+
+#ifndef DACCESS_COMPILE
+ void MakeMethodTable(AllocMemTracker *pamTracker);
+ void AddMethodsToList();
+
+public:
+ void Destroy();
+ DynamicMethodDesc* GetDynamicMethod(BYTE *psig, DWORD sigSize, PTR_CUTF8 name);
+ void LinkMethod(DynamicMethodDesc *pMethod);
+
+#endif
+
+#ifdef _DEBUG
+public:
+ DWORD m_Used;
+#endif
+
+}; // class DynamicMethodTable
+
+
+//---------------------------------------------------------------------------------------
+//
+#define HOST_CODEHEAP_SIZE_ALIGN 64
+
+//---------------------------------------------------------------------------------------
+//
+// Implementation of the CodeHeap for DynamicMethods.
+// This CodeHeap uses the host interface VirtualAlloc/Free and allows
+// for reclamation of generated code
+// (Check the base class - CodeHeap in codeman.h - for comments on the functions)
+//
+class HostCodeHeap : CodeHeap
+{
+#ifdef DACCESS_COMPILE
+ friend class ClrDataAccess;
+#else
+ friend class EEJitManager;
+#endif
+
+ VPTR_VTABLE_CLASS(HostCodeHeap, CodeHeap)
+
+private:
+ // pointer back to jit manager info
+ PTR_HeapList m_pHeapList;
+ PTR_EEJitManager m_pJitManager;
+ // basic allocation data
+ PTR_BYTE m_pBaseAddr;
+ PTR_BYTE m_pLastAvailableCommittedAddr;
+ size_t m_TotalBytesAvailable;
+ size_t m_ReservedData;
+ // Heap ref count
+ DWORD m_AllocationCount;
+
+ // data to track free list and pointers into this heap
+ // - on an used block this struct has got a pointer back to the CodeHeap, size and start of aligned allocation
+ // - on an unused block (free block) this tracks the size of the block and the pointer to the next non contiguos free block
+ struct TrackAllocation {
+ union {
+ HostCodeHeap *pHeap;
+ TrackAllocation *pNext;
+ };
+ size_t size;
+
+ // the location of this TrackAllocation record will be stored right before the start of the allocated memory
+ // if there is padding between them it will be stored in that padding, otherwise it will be stored in this pad field
+ void *pad;
+ };
+ TrackAllocation *m_pFreeList;
+
+ // used for cleanup. Keep track of the next potential heap to release. Normally NULL
+ HostCodeHeap *m_pNextHeapToRelease;
+ LoaderAllocator*m_pAllocator;
+
+public:
+ static HeapList* CreateCodeHeap(CodeHeapRequestInfo *pInfo, EEJitManager *pJitManager);
+
+private:
+ HostCodeHeap(size_t ReserveBlockSize, EEJitManager *pJitManager, CodeHeapRequestInfo *pInfo);
+ BYTE* InitCodeHeapPrivateData(size_t ReserveBlockSize, size_t otherData, size_t nibbleMapSize);
+ void* AllocFromFreeList(size_t size, DWORD alignment);
+ void AddToFreeList(TrackAllocation *pBlockToInsert);
+ static size_t GetPadding(TrackAllocation *pCurrent, size_t size, DWORD alignement);
+
+ void* AllocMemory(size_t size, DWORD alignment);
+ void* AllocMemory_NoThrow(size_t size, DWORD alignment);
+
+public:
+ // Space for header is reserved immediately before. It is not included in size.
+ virtual void* AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment) DAC_EMPTY_RET(NULL);
+
+ virtual ~HostCodeHeap() DAC_EMPTY();
+
+ LoaderAllocator* GetAllocator() { return m_pAllocator; }
+
+#ifdef DACCESS_COMPILE
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+ static TrackAllocation * GetTrackAllocation(TADDR codeStart);
+ static HostCodeHeap* GetCodeHeap(TADDR codeStart);
+
+ void DestroyCodeHeap();
+
+protected:
+ friend class DynamicMethodDesc;
+ friend class LCGMethodResolver;
+
+ void FreeMemForCode(void * codeStart);
+
+}; // class HostCodeHeap
+
+//---------------------------------------------------------------------------------------
+//
+#include "ilstubresolver.h"
+
+inline MethodDesc* GetMethod(CORINFO_METHOD_HANDLE methodHandle)
+{
+ LIMITED_METHOD_CONTRACT;
+ return (MethodDesc*) methodHandle;
+}
+
+#ifndef DACCESS_COMPILE
+
+#define CORINFO_MODULE_HANDLE_TYPE_MASK 1
+
+enum CORINFO_MODULE_HANDLE_TYPES
+{
+ CORINFO_NORMAL_MODULE = 0,
+ CORINFO_DYNAMIC_MODULE,
+};
+
+inline bool IsDynamicScope(CORINFO_MODULE_HANDLE module)
+{
+ LIMITED_METHOD_CONTRACT;
+ return (CORINFO_DYNAMIC_MODULE == (((size_t)module) & CORINFO_MODULE_HANDLE_TYPE_MASK));
+}
+
+inline CORINFO_MODULE_HANDLE MakeDynamicScope(DynamicResolver* pResolver)
+{
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(0 == (((size_t)pResolver) & CORINFO_MODULE_HANDLE_TYPE_MASK));
+ return (CORINFO_MODULE_HANDLE)(((size_t)pResolver) | CORINFO_DYNAMIC_MODULE);
+}
+
+inline DynamicResolver* GetDynamicResolver(CORINFO_MODULE_HANDLE module)
+{
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(IsDynamicScope(module));
+ return (DynamicResolver*)(((size_t)module) & ~((size_t)CORINFO_MODULE_HANDLE_TYPE_MASK));
+}
+
+inline Module* GetModule(CORINFO_MODULE_HANDLE scope)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (IsDynamicScope(scope))
+ {
+ return GetDynamicResolver(scope)->GetDynamicMethod()->GetModule();
+ }
+ else
+ {
+ return((Module*)scope);
+ }
+}
+
+inline CORINFO_MODULE_HANDLE GetScopeHandle(Module* module)
+{
+ LIMITED_METHOD_CONTRACT;
+ return(CORINFO_MODULE_HANDLE(module));
+}
+
+inline bool IsDynamicMethodHandle(CORINFO_METHOD_HANDLE method)
+{
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(NULL != GetMethod(method));
+ return GetMethod(method)->IsDynamicMethod();
+}
+
+#endif // DACCESS_COMPILE
+
+#endif // _DYNAMICMETHOD_H_
diff --git a/src/vm/ecall.cpp b/src/vm/ecall.cpp
new file mode 100644
index 0000000000..da81993d13
--- /dev/null
+++ b/src/vm/ecall.cpp
@@ -0,0 +1,792 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ECALL.CPP -
+//
+// Handles our private native calling interface.
+//
+
+
+
+#include "common.h"
+
+#include "ecall.h"
+
+#include "comdelegate.h"
+
+#ifndef DACCESS_COMPILE
+
+#ifdef CROSSGEN_COMPILE
+namespace CrossGenMscorlib
+{
+ extern const ECClass c_rgECClasses[];
+ extern const int c_nECClasses;
+};
+using namespace CrossGenMscorlib;
+#else // CROSSGEN_COMPILE
+extern const ECClass c_rgECClasses[];
+extern const int c_nECClasses;
+#endif // CROSSGEN_COMPILE
+
+
+// METHOD__STRING__CTORF_XXX has to be in same order as ECall::CtorCharXxx
+#define METHOD__STRING__CTORF_FIRST METHOD__STRING__CTORF_CHARARRAY
+static_assert_no_msg(METHOD__STRING__CTORF_FIRST + 0 == METHOD__STRING__CTORF_CHARARRAY);
+static_assert_no_msg(METHOD__STRING__CTORF_FIRST + 1 == METHOD__STRING__CTORF_CHARARRAY_START_LEN);
+static_assert_no_msg(METHOD__STRING__CTORF_FIRST + 2 == METHOD__STRING__CTORF_CHAR_COUNT);
+static_assert_no_msg(METHOD__STRING__CTORF_FIRST + 3 == METHOD__STRING__CTORF_CHARPTR);
+static_assert_no_msg(METHOD__STRING__CTORF_FIRST + 4 == METHOD__STRING__CTORF_CHARPTR_START_LEN);
+
+// ECall::CtorCharXxx has to be in same order as METHOD__STRING__CTORF_XXX
+#define ECallCtor_First ECall::CtorCharArrayManaged
+static_assert_no_msg(ECallCtor_First + 0 == ECall::CtorCharArrayManaged);
+static_assert_no_msg(ECallCtor_First + 1 == ECall::CtorCharArrayStartLengthManaged);
+static_assert_no_msg(ECallCtor_First + 2 == ECall::CtorCharCountManaged);
+static_assert_no_msg(ECallCtor_First + 3 == ECall::CtorCharPtrManaged);
+static_assert_no_msg(ECallCtor_First + 4 == ECall::CtorCharPtrStartLengthManaged);
+
+#define NumberOfStringConstructors 5
+
+void ECall::PopulateManagedStringConstructors()
+{
+ STANDARD_VM_CONTRACT;
+
+ INDEBUG(static bool fInitialized = false);
+ _ASSERTE(!fInitialized); // assume this method is only called once
+ _ASSERTE(g_pStringClass != NULL);
+
+ for (int i = 0; i < NumberOfStringConstructors; i++)
+ {
+ MethodDesc* pMD = MscorlibBinder::GetMethod((BinderMethodID)(METHOD__STRING__CTORF_FIRST + i));
+ _ASSERTE(pMD != NULL);
+
+ PCODE pDest = pMD->GetMultiCallableAddrOfCode();
+
+ ECall::DynamicallyAssignFCallImpl(pDest, ECallCtor_First + i);
+ }
+ INDEBUG(fInitialized = true);
+}
+
+static CrstStatic gFCallLock;
+
+// This variable is used to force the compiler not to tailcall a function.
+int FC_NO_TAILCALL;
+
+#endif // !DACCESS_COMPILE
+
+// To provide a quick check, this is the lowest and highest
+// addresses of any FCALL starting address
+GVAL_IMPL_INIT(TADDR, gLowestFCall, (TADDR)-1);
+GVAL_IMPL(TADDR, gHighestFCall);
+
+GARY_IMPL(PTR_ECHash, gFCallMethods, FCALL_HASH_SIZE);
+
+inline unsigned FCallHash(PCODE pTarg) {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return pTarg % FCALL_HASH_SIZE;
+}
+
+#ifdef DACCESS_COMPILE
+
+GARY_IMPL(PCODE, g_FCDynamicallyAssignedImplementations,
+ ECall::NUM_DYNAMICALLY_ASSIGNED_FCALL_IMPLEMENTATIONS);
+
+#else // !DACCESS_COMPILE
+
+PCODE g_FCDynamicallyAssignedImplementations[ECall::NUM_DYNAMICALLY_ASSIGNED_FCALL_IMPLEMENTATIONS] = {
+ #undef DYNAMICALLY_ASSIGNED_FCALL_IMPL
+ #define DYNAMICALLY_ASSIGNED_FCALL_IMPL(id,defaultimpl) GetEEFuncEntryPoint(defaultimpl),
+ DYNAMICALLY_ASSIGNED_FCALLS()
+};
+
+void ECall::DynamicallyAssignFCallImpl(PCODE impl, DWORD index)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(index < NUM_DYNAMICALLY_ASSIGNED_FCALL_IMPLEMENTATIONS);
+ g_FCDynamicallyAssignedImplementations[index] = impl;
+}
+
+/*******************************************************************************/
+static INT FindImplsIndexForClass(MethodTable* pMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LPCUTF8 pszNamespace = 0;
+ LPCUTF8 pszName = pMT->GetFullyQualifiedNameInfo(&pszNamespace);
+
+ // Array classes get null from the above routine, but they have no ecalls.
+ if (pszName == NULL)
+ return (-1);
+
+ unsigned low = 0;
+ unsigned high = c_nECClasses;
+
+#ifdef _DEBUG
+ static bool checkedSort = false;
+ if (!checkedSort) {
+ checkedSort = true;
+ for (unsigned i = 1; i < high; i++) {
+ // Make certain list is sorted!
+ int cmp = strcmp(c_rgECClasses[i].m_szClassName, c_rgECClasses[i-1].m_szClassName);
+ if (cmp == 0)
+ cmp = strcmp(c_rgECClasses[i].m_szNameSpace, c_rgECClasses[i-1].m_szNameSpace);
+ _ASSERTE(cmp > 0 && W("You forgot to keep ECall class names sorted")); // Hey, you forgot to sort the new class
+ }
+ }
+#endif // _DEBUG
+ while (high > low) {
+ unsigned mid = (high + low) / 2;
+ int cmp = strcmp(pszName, c_rgECClasses[mid].m_szClassName);
+ if (cmp == 0)
+ cmp = strcmp(pszNamespace, c_rgECClasses[mid].m_szNameSpace);
+
+ if (cmp == 0) {
+ return(mid);
+ }
+ if (cmp > 0)
+ low = mid+1;
+ else
+ high = mid;
+ }
+
+ return (-1);
+}
+
+/*******************************************************************************/
+/* Finds the implementation for the given method desc. */
+
+static INT FindECIndexForMethod(MethodDesc *pMD, const LPVOID* impls)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LPCUTF8 szMethodName = pMD->GetName();
+ PCCOR_SIGNATURE pMethodSig;
+ ULONG cbMethodSigLen;
+
+ pMD->GetSig(&pMethodSig, &cbMethodSigLen);
+ Module* pModule = pMD->GetModule();
+
+ for (ECFunc* cur = (ECFunc*)impls; !cur->IsEndOfArray(); cur = cur->NextInArray())
+ {
+ if (strcmp(cur->m_szMethodName, szMethodName) != 0)
+ continue;
+
+ if (cur->HasSignature())
+ {
+ Signature sig = MscorlibBinder::GetTargetSignature(cur->m_pMethodSig);
+
+ //@GENERICS: none of these methods belong to generic classes so there is no instantiation info to pass in
+ if (!MetaSig::CompareMethodSigs(pMethodSig, cbMethodSigLen, pModule, NULL,
+ sig.GetRawSig(), sig.GetRawSigLen(), MscorlibBinder::GetModule(), NULL))
+ {
+ continue;
+ }
+ }
+
+ // We have found a match!
+ return static_cast<INT>((LPVOID*)cur - impls);
+ }
+
+ return -1;
+}
+
+/*******************************************************************************/
+/* ID is formed of 2 USHORTs - class index in high word, method index in low word. */
+/* class index starts at 1. id == 0 means no implementation. */
+
+DWORD ECall::GetIDForMethod(MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef MDIL
+ // We should not go here for NGened methods
+ // However, we will come here for MDIL binder generated images because the
+ // IDs for FCALLS are not exernally known (and certainly not constant).
+ _ASSERTE(!pMD->IsZapped());
+#endif
+
+ INT ImplsIndex = FindImplsIndexForClass(pMD->GetMethodTable());
+ if (ImplsIndex < 0)
+ return 0;
+ INT ECIndex = FindECIndexForMethod(pMD, c_rgECClasses[ImplsIndex].m_pECFunc);
+ if (ECIndex < 0)
+ return 0;
+
+ return (ImplsIndex<<16) | (ECIndex + 1);
+}
+
+static ECFunc *FindECFuncForID(DWORD id)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (id == 0)
+ return NULL;
+
+ INT ImplsIndex = (id >> 16);
+ INT ECIndex = (id & 0xffff) - 1;
+
+ return (ECFunc*)(c_rgECClasses[ImplsIndex].m_pECFunc + ECIndex);
+}
+
+static ECFunc* FindECFuncForMethod(MethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(pMD->IsFCall());
+ }
+ CONTRACTL_END;
+
+ DWORD id = ((FCallMethodDesc *)pMD)->GetECallID();
+ if (id == 0)
+ {
+ id = ECall::GetIDForMethod(pMD);
+
+ CONSISTENCY_CHECK_MSGF(0 != id,
+ ("No method entry found for %s::%s.\n",
+ pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName));
+
+ // Cache the id
+ ((FCallMethodDesc *)pMD)->SetECallID(id);
+ }
+
+ return FindECFuncForID(id);
+}
+
+/*******************************************************************************
+* Returns 0 if it is an ECALL,
+* Otherwise returns the native entry point (FCALL)
+*/
+PCODE ECall::GetFCallImpl(MethodDesc * pMD, BOOL * pfSharedOrDynamicFCallImpl /*=NULL*/)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(pMD->IsFCall());
+ }
+ CONTRACTL_END;
+
+ MethodTable * pMT = pMD->GetMethodTable();
+
+ //
+ // Delegate constructors are FCalls for which the entrypoint points to the target of the delegate
+ // We have to intercept these and set the call target to the helper COMDelegate::DelegateConstruct
+ //
+ if (pMT->IsDelegate())
+ {
+ if (pfSharedOrDynamicFCallImpl)
+ *pfSharedOrDynamicFCallImpl = TRUE;
+
+ // COMDelegate::DelegateConstruct is the only fcall used by user delegates.
+ // All the other gDelegateFuncs are only used by System.Delegate
+ _ASSERTE(pMD->IsCtor());
+
+ // We need to set up the ECFunc properly. We don't want to use the pMD passed in,
+ // since it may disappear. Instead, use the stable one on Delegate. Remember
+ // that this is 1:M between the FCall and the pMDs.
+ return GetFCallImpl(MscorlibBinder::GetMethod(METHOD__DELEGATE__CONSTRUCT_DELEGATE));
+ }
+
+#ifdef FEATURE_COMINTEROP
+ // COM imported classes have special constructors
+ if (pMT->IsComObjectType() && pMT != g_pBaseCOMObject && pMT != g_pBaseRuntimeClass)
+ {
+ if (pfSharedOrDynamicFCallImpl)
+ *pfSharedOrDynamicFCallImpl = TRUE;
+
+ // This has to be tlbimp constructor
+ _ASSERTE(pMD->IsCtor());
+ _ASSERTE(!pMT->IsProjectedFromWinRT());
+
+ // FCComCtor does not need to be in the fcall hashtable since it does not erect frame.
+ return GetEEFuncEntryPoint(FCComCtor);
+ }
+#endif // FEATURE_COMINTEROP
+
+ if (!pMD->GetModule()->IsSystem())
+ COMPlusThrow(kSecurityException, BFA_ECALLS_MUST_BE_IN_SYS_MOD);
+
+ ECFunc* ret = FindECFuncForMethod(pMD);
+
+ // ECall is a set of tables to call functions within the EE from the classlibs.
+ // First we use the class name & namespace to find an array of function pointers for
+ // a class, then use the function name (& sometimes signature) to find the correct
+ // function pointer for your method. Methods in the BCL will be marked as
+ // [MethodImplAttribute(MethodImplOptions.InternalCall)] and extern.
+ //
+ // You'll see this assert in several situations, almost all being the fault of whomever
+ // last touched a particular ecall or fcall method, either here or in the classlibs.
+ // However, you must also ensure you don't have stray copies of mscorlib.dll on your machine.
+ // 1) You forgot to add your class to c_rgECClasses, the list of classes w/ ecall & fcall methods.
+ // 2) You forgot to add your particular method to the ECFunc array for your class.
+ // 3) You misspelled the name of your function and/or classname.
+ // 4) The signature of the managed function doesn't match the hardcoded metadata signature
+ // listed in your ECFunc array. The hardcoded metadata sig is only necessary to disambiguate
+ // overloaded ecall functions - usually you can leave it set to NULL.
+ // 5) Your copy of mscorlib.dll & mscoree.dll are out of sync - rebuild both.
+ // 6) You've loaded the wrong copy of mscorlib.dll. In msdev's debug menu,
+ // select the "Modules..." dialog. Verify the path for mscorlib is right.
+ // 7) Someone mucked around with how the signatures in metasig.h are parsed, changing the
+ // interpretation of a part of the signature (this is very rare & extremely unlikely,
+ // but has happened at least once).
+
+ CONSISTENCY_CHECK_MSGF(ret != NULL,
+ ("Could not find an ECALL entry for %s::%s.\n"
+ "Read comment above this assert in vm/ecall.cpp\n",
+ pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName));
+
+ CONSISTENCY_CHECK_MSGF(!ret->IsQCall(),
+ ("%s::%s is not registered using FCFuncElement macro in ecall.cpp",
+ pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName));
+
+#ifdef CROSSGEN_COMPILE
+
+ // Use the ECFunc address as a unique fake entrypoint to make the entrypoint<->MethodDesc mapping work
+ PCODE pImplementation = (PCODE)ret;
+#ifdef _TARGET_ARM_
+ pImplementation |= THUMB_CODE;
+#endif
+
+#else // CROSSGEN_COMPILE
+
+ PCODE pImplementation = (PCODE)ret->m_pImplementation;
+
+ int iDynamicID = ret->DynamicID();
+ if (iDynamicID != InvalidDynamicFCallId)
+ {
+ if (pfSharedOrDynamicFCallImpl)
+ *pfSharedOrDynamicFCallImpl = TRUE;
+
+ pImplementation = g_FCDynamicallyAssignedImplementations[iDynamicID];
+ _ASSERTE(pImplementation != NULL);
+ return pImplementation;
+ }
+
+#endif // CROSSGEN_COMPILE
+
+ // Insert the implementation into hash table if it is not there already.
+
+ CrstHolder holder(&gFCallLock);
+
+ MethodDesc * pMDinTable = ECall::MapTargetBackToMethod(pImplementation, &pImplementation);
+
+ if (pMDinTable != NULL)
+ {
+ if (pMDinTable != pMD)
+ {
+ // The fcall entrypoints has to be at unique addresses. If you get failure here, use the following steps
+ // to fix it:
+ // 1. Consider merging the offending fcalls into one fcall. Do they really do different things?
+ // 2. If it does not make sense to merge the offending fcalls into one,
+ // add FCUnique(<a random unique number here>); to one of the offending fcalls.
+
+ _ASSERTE(!"Duplicate pImplementation entries found in reverse fcall table");
+ ThrowHR(E_FAIL);
+ }
+ }
+ else
+ {
+ ECHash * pEntry = (ECHash *)(PVOID)SystemDomain::GetGlobalLoaderAllocator()->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(ECHash)));
+
+ pEntry->m_pImplementation = pImplementation;
+ pEntry->m_pMD = pMD;
+
+ if(gLowestFCall > pImplementation)
+ gLowestFCall = pImplementation;
+ if(gHighestFCall < pImplementation)
+ gHighestFCall = pImplementation;
+
+ // add to hash table
+ ECHash** spot = &gFCallMethods[FCallHash(pImplementation)];
+ for(;;) {
+ if (*spot == 0) { // found end of list
+ *spot = pEntry;
+ break;
+ }
+ spot = &(*spot)->m_pNext;
+ }
+ }
+
+ if (pfSharedOrDynamicFCallImpl)
+ *pfSharedOrDynamicFCallImpl = FALSE;
+
+ _ASSERTE(pImplementation != NULL);
+ return pImplementation;
+}
+
+BOOL ECall::IsSharedFCallImpl(PCODE pImpl)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ PCODE pNativeCode = pImpl;
+
+ return
+#ifdef FEATURE_COMINTEROP
+ (pNativeCode == GetEEFuncEntryPoint(FCComCtor)) ||
+#endif
+ (pNativeCode == GetEEFuncEntryPoint(COMDelegate::DelegateConstruct));
+}
+
+BOOL ECall::CheckUnusedECalls(SetSHash<DWORD>& usedIDs)
+{
+ STANDARD_VM_CONTRACT;
+
+ BOOL fUnusedFCallsFound = FALSE;
+
+ INT num = c_nECClasses;
+ for (INT ImplsIndex=0; ImplsIndex < num; ImplsIndex++)
+ {
+ const ECClass * pECClass = c_rgECClasses + ImplsIndex;
+
+ BOOL fUnreferencedType = TRUE;
+ for (ECFunc* ptr = (ECFunc*)pECClass->m_pECFunc; !ptr->IsEndOfArray(); ptr = ptr->NextInArray())
+ {
+ if (ptr->DynamicID() == InvalidDynamicFCallId && !ptr->IsUnreferenced())
+ {
+ INT ECIndex = static_cast<INT>((LPVOID*)ptr - pECClass->m_pECFunc);
+
+ DWORD id = (ImplsIndex<<16) | (ECIndex + 1);
+
+ if (!usedIDs.Contains(id))
+ {
+ printf("CheckMscorlibExtended: Unused ecall found: %s.%s::%s\n", pECClass->m_szNameSpace, c_rgECClasses[ImplsIndex].m_szClassName, ptr->m_szMethodName);
+ fUnusedFCallsFound = TRUE;
+ continue;
+ }
+ }
+ fUnreferencedType = FALSE;
+ }
+
+ if (fUnreferencedType)
+ {
+ printf("CheckMscorlibExtended: Unused type found: %s.%s\n", c_rgECClasses[ImplsIndex].m_szNameSpace, c_rgECClasses[ImplsIndex].m_szClassName);
+ fUnusedFCallsFound = TRUE;
+ continue;
+ }
+ }
+
+ return !fUnusedFCallsFound;
+}
+
+
+#if defined(FEATURE_COMINTEROP) && !defined(CROSSGEN_COMPILE)
+FCIMPL1(VOID, FCComCtor, LPVOID pV)
+{
+ FCALL_CONTRACT;
+
+ FCUnique(0x34);
+}
+FCIMPLEND
+#endif // FEATURE_COMINTEROP && !CROSSGEN_COMPILE
+
+
+
+/* static */
+void ECall::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ gFCallLock.Init(CrstFCall);
+
+ // It is important to do an explicit increment here instead of just in-place initialization
+ // so that the global optimizer cannot figure out the value and remove the side-effect that
+ // we depend on in FC_INNER_RETURN macros and other places
+ FC_NO_TAILCALL++;
+}
+
+LPVOID ECall::GetQCallImpl(MethodDesc * pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(pMD->IsNDirect());
+ }
+ CONTRACTL_END;
+
+ DWORD id = ((NDirectMethodDesc *)pMD)->GetECallID();
+ if (id == 0)
+ {
+ id = ECall::GetIDForMethod(pMD);
+ _ASSERTE(id != 0);
+
+ // Cache the id
+ ((NDirectMethodDesc *)pMD)->SetECallID(id);
+ }
+
+ ECFunc * cur = FindECFuncForID(id);
+
+#ifdef _DEBUG
+ CONSISTENCY_CHECK_MSGF(cur != NULL,
+ ("%s::%s is not registered in ecall.cpp",
+ pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName));
+
+ CONSISTENCY_CHECK_MSGF(cur->IsQCall(),
+ ("%s::%s is not registered using QCFuncElement macro in ecall.cpp",
+ pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName));
+
+ CONSISTENCY_CHECK_MSGF(pMD->HasSuppressUnmanagedCodeAccessAttr(),
+ ("%s::%s is not marked with SuppressUnmanagedCodeSecurityAttribute()",
+ pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName));
+
+ DWORD dwAttrs = pMD->GetAttrs();
+ BOOL fPublicOrProtected = IsMdPublic(dwAttrs) || IsMdFamily(dwAttrs) || IsMdFamORAssem(dwAttrs);
+
+ // SuppressUnmanagedCodeSecurityAttribute on QCalls suppresses a full demand, but there's still a link demand
+ // for unmanaged code permission. All QCalls should be private or internal and wrapped in a managed method
+ // to suppress this link demand.
+ CONSISTENCY_CHECK_MSGF(!fPublicOrProtected,
+ ("%s::%s has to be private or internal.",
+ pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName));
+#endif
+
+ return cur->m_pImplementation;
+}
+
+#endif // !DACCESS_COMPILE
+
+MethodDesc* ECall::MapTargetBackToMethod(PCODE pTarg, PCODE * ppAdjustedEntryPoint /*=NULL*/)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ HOST_NOCALLS;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ // Searching all of the entries is expensive
+ // and we are often called with pTarg == NULL so
+ // check for this value and early exit.
+
+ if (!pTarg)
+ return NULL;
+
+ // Could this possibily be an FCall?
+ if ((pTarg < gLowestFCall) || (pTarg > gHighestFCall))
+ return NULL;
+
+ ECHash * pECHash = gFCallMethods[FCallHash(pTarg)];
+ while (pECHash != NULL)
+ {
+ if (pECHash->m_pImplementation == pTarg)
+ {
+ return pECHash->m_pMD;
+ }
+ pECHash = pECHash->m_pNext;
+ }
+ return NULL;
+}
+
+#ifndef DACCESS_COMPILE
+
+/* static */
+CorInfoIntrinsics ECall::GetIntrinsicID(MethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(pMD->IsFCall());
+ }
+ CONTRACTL_END;
+
+ MethodTable * pMT = pMD->GetMethodTable();
+
+#ifdef FEATURE_COMINTEROP
+ // COM imported classes have special constructors
+ if (pMT->IsComObjectType())
+ {
+ // This has to be tlbimp constructor
+ return(CORINFO_INTRINSIC_Illegal);
+ }
+#endif // FEATURE_COMINTEROP
+
+ //
+ // Delegate constructors are FCalls for which the entrypoint points to the target of the delegate
+ // We have to intercept these and set the call target to the helper COMDelegate::DelegateConstruct
+ //
+ if (pMT->IsDelegate())
+ {
+ // COMDelegate::DelegateConstruct is the only fcall used by user delegates.
+ // All the other gDelegateFuncs are only used by System.Delegate
+ _ASSERTE(pMD->IsCtor());
+
+ return(CORINFO_INTRINSIC_Illegal);
+ }
+
+ // All intrinsic live in mscorlib.dll (FindECFuncForMethod does not work for non-mscorlib intrinsics)
+ if (!pMD->GetModule()->IsSystem())
+ {
+ return(CORINFO_INTRINSIC_Illegal);
+ }
+
+ ECFunc* info = FindECFuncForMethod(pMD);
+
+ if (info == NULL)
+ return(CORINFO_INTRINSIC_Illegal);
+
+ return info->IntrinsicID();
+}
+
+#ifdef _DEBUG
+
+void FCallAssert(void*& cache, void* target)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_DEBUG_ONLY;
+
+ if (cache != 0)
+ {
+ return;
+ }
+
+ //
+ // Special case fcalls with 1:N mapping between implementation and methoddesc
+ //
+ if (ECall::IsSharedFCallImpl((PCODE)target))
+ {
+ cache = (void*)1;
+ return;
+ }
+
+ MethodDesc* pMD = ECall::MapTargetBackToMethod((PCODE)target);
+ if (pMD != 0)
+ {
+ return;
+ }
+
+ // Slow but only for debugging. This is needed because in some places
+ // we call FCALLs directly from EE code.
+
+ unsigned num = c_nECClasses;
+ for (unsigned i=0; i < num; i++)
+ {
+ for (ECFunc* ptr = (ECFunc*)c_rgECClasses[i].m_pECFunc; !ptr->IsEndOfArray(); ptr = ptr->NextInArray())
+ {
+ if (ptr->m_pImplementation == target)
+ {
+ cache = target;
+ return;
+ }
+ }
+ }
+
+ // Now check the dynamically assigned table too.
+ for (unsigned i=0; i<ECall::NUM_DYNAMICALLY_ASSIGNED_FCALL_IMPLEMENTATIONS; i++)
+ {
+ if (g_FCDynamicallyAssignedImplementations[i] == (PCODE)target)
+ {
+ cache = target;
+ return;
+ }
+ }
+
+ _ASSERTE(!"Could not find FCall implemenation in ECall.cpp");
+}
+
+void HCallAssert(void*& cache, void* target)
+{
+ CONTRACTL
+ {
+ SO_TOLERANT; // STATIC_CONTRACT_DEBUG_ONLY
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ DEBUG_ONLY;
+ }
+ CONTRACTL_END;
+
+ if (cache != 0)
+ cache = ECall::MapTargetBackToMethod((PCODE)target);
+ _ASSERTE(cache == 0 || "Use FCIMPL for fcalls");
+}
+
+#endif // _DEBUG
+
+#endif // !DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+
+void ECall::EnumFCallMethods()
+{
+ SUPPORTS_DAC;
+ gLowestFCall.EnumMem();
+ gHighestFCall.EnumMem();
+ gFCallMethods.EnumMem();
+
+ // save all ECFunc for stackwalks.
+ // TODO: we could be smarter and only save buckets referenced during stackwalks. But we
+ // need that entire bucket so that traversals such as MethodDesc* ECall::MapTargetBackToMethod will work.
+ for (UINT i=0;i<FCALL_HASH_SIZE;i++)
+ {
+ ECHash *ecHash = gFCallMethods[i];
+ while (ecHash)
+ {
+ // If we can't read the target memory, stop immediately so we don't work
+ // with broken data.
+ if (!DacEnumMemoryRegion(dac_cast<TADDR>(ecHash), sizeof(ECHash)))
+ break;
+ ecHash = ecHash->m_pNext;
+
+#if defined (_DEBUG)
+ // Test hook: when testing on debug builds, we want an easy way to test that the while
+ // correctly terminates in the face of ridiculous stuff from the target.
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DumpGeneration_IntentionallyCorruptDataFromTarget) == 1)
+ {
+ // Force us to struggle on with something bad.
+ if (!ecHash)
+ {
+ ecHash = (ECHash *)(((unsigned char *)&gFCallMethods[i])+1);
+ }
+ }
+#endif // defined (_DEBUG)
+
+ }
+ }
+}
+
+#endif // DACCESS_COMPILE
diff --git a/src/vm/ecall.h b/src/vm/ecall.h
new file mode 100644
index 0000000000..7d0b3c08d5
--- /dev/null
+++ b/src/vm/ecall.h
@@ -0,0 +1,143 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ECALL.H -
+//
+// Handles our private native calling interface.
+//
+
+
+
+
+#ifndef _ECALL_H_
+#define _ECALL_H_
+
+#include "fcall.h"
+
+class MethodDesc;
+
+#ifndef FEATURE_CORECLR
+// Every program tends to use only a subset of ~1000 FCalls. Even big apps like
+// VS do not usually hit more than 300. Pick a size of the hashtable that's sufficient
+// for the typical case. It is ok to have some colisions in the rare case. Note that
+// the size of the table should be prime.
+#define FCALL_HASH_SIZE 257
+#else
+// CoreCLR defines fewer FCalls so make the hashtable even smaller.
+#define FCALL_HASH_SIZE 127
+#endif
+
+typedef DPTR(struct ECHash) PTR_ECHash;
+
+struct ECHash
+{
+ PTR_ECHash m_pNext;
+ PCODE m_pImplementation;
+ PTR_MethodDesc m_pMD; // for reverse mapping
+};
+
+#ifdef DACCESS_COMPILE
+GVAL_DECL(TADDR, gLowestFCall);
+GVAL_DECL(TADDR, gHighestFCall);
+GARY_DECL(PTR_ECHash, gFCallMethods, FCALL_HASH_SIZE);
+#endif
+
+enum {
+ FCFuncFlag_EndOfArray = 0x01,
+ FCFuncFlag_HasSignature = 0x02,
+ FCFuncFlag_Unreferenced = 0x04, // Suppress unused fcall check
+ FCFuncFlag_QCall = 0x08, // QCall - mscorlib.dll to mscorwks.dll transition implemented as PInvoke
+};
+
+struct ECFunc {
+ UINT_PTR m_dwFlags;
+
+ LPVOID m_pImplementation;
+
+ LPCSTR m_szMethodName;
+ LPHARDCODEDMETASIG m_pMethodSig; // Optional field. It is valid only if HasSignature() is set.
+
+ bool IsEndOfArray() { LIMITED_METHOD_CONTRACT; return !!(m_dwFlags & FCFuncFlag_EndOfArray); }
+ bool HasSignature() { LIMITED_METHOD_CONTRACT; return !!(m_dwFlags & FCFuncFlag_HasSignature); }
+ bool IsUnreferenced(){ LIMITED_METHOD_CONTRACT; return !!(m_dwFlags & FCFuncFlag_Unreferenced); }
+ bool IsQCall() { LIMITED_METHOD_CONTRACT; return !!(m_dwFlags & FCFuncFlag_QCall); }
+ CorInfoIntrinsics IntrinsicID() { LIMITED_METHOD_CONTRACT; return (CorInfoIntrinsics)((INT8)(m_dwFlags >> 16)); }
+ int DynamicID() { LIMITED_METHOD_CONTRACT; return (int) ((INT8)(m_dwFlags >> 24)); }
+
+ ECFunc* NextInArray()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (ECFunc*)((BYTE*)this +
+ (HasSignature() ? sizeof(ECFunc) : offsetof(ECFunc, m_pMethodSig)));
+ }
+};
+
+struct ECClass
+{
+ LPCSTR m_szClassName;
+ LPCSTR m_szNameSpace;
+ const LPVOID * m_pECFunc;
+};
+
+//=======================================================================
+// Collects code and data pertaining to the ECall interface.
+//=======================================================================
+class ECall
+{
+ public:
+ //---------------------------------------------------------
+ // One-time init
+ //---------------------------------------------------------
+ static void Init();
+
+ static PCODE GetFCallImpl(MethodDesc* pMD, BOOL * pfSharedOrDynamicFCallImpl = NULL);
+ static MethodDesc* MapTargetBackToMethod(PCODE pTarg, PCODE * ppAdjustedEntryPoint = NULL);
+ static DWORD GetIDForMethod(MethodDesc *pMD);
+ static CorInfoIntrinsics GetIntrinsicID(MethodDesc *pMD);
+
+ // Some fcalls (delegate ctors and tlbimpl ctors) shared one implementation.
+ // We should never patch vtable for these since they have 1:N mapping between
+ // MethodDesc and the actual implementation
+ static BOOL IsSharedFCallImpl(PCODE pImpl);
+
+ static BOOL CheckUnusedECalls(SetSHash<DWORD>& usedIDs);
+
+ static void DynamicallyAssignFCallImpl(PCODE impl, DWORD index);
+
+ static void PopulateManagedStringConstructors();
+#ifdef DACCESS_COMPILE
+ // Enumerates all gFCallMethods for minidumps.
+ static void EnumFCallMethods();
+#endif // DACCESS_COMPILE
+
+#define DYNAMICALLY_ASSIGNED_FCALLS() \
+ DYNAMICALLY_ASSIGNED_FCALL_IMPL(FastAllocateString, FramedAllocateString) \
+ DYNAMICALLY_ASSIGNED_FCALL_IMPL(CtorCharArrayManaged, NULL) \
+ DYNAMICALLY_ASSIGNED_FCALL_IMPL(CtorCharArrayStartLengthManaged, NULL) \
+ DYNAMICALLY_ASSIGNED_FCALL_IMPL(CtorCharCountManaged, NULL) \
+ DYNAMICALLY_ASSIGNED_FCALL_IMPL(CtorCharPtrManaged, NULL) \
+ DYNAMICALLY_ASSIGNED_FCALL_IMPL(CtorCharPtrStartLengthManaged, NULL) \
+ DYNAMICALLY_ASSIGNED_FCALL_IMPL(InternalGetCurrentThread, NULL) \
+
+ enum
+ {
+ #undef DYNAMICALLY_ASSIGNED_FCALL_IMPL
+ #define DYNAMICALLY_ASSIGNED_FCALL_IMPL(id,defaultimpl) id,
+
+ DYNAMICALLY_ASSIGNED_FCALLS()
+
+ NUM_DYNAMICALLY_ASSIGNED_FCALL_IMPLEMENTATIONS,
+ InvalidDynamicFCallId = -1
+ };
+
+
+ static LPVOID GetQCallImpl(MethodDesc * pMD);
+};
+
+#ifdef FEATURE_COMINTEROP
+extern "C" FCDECL1(VOID, FCComCtor, LPVOID pV);
+#endif
+
+#endif // _ECALL_H_
diff --git a/src/vm/ecalllist.h b/src/vm/ecalllist.h
new file mode 100644
index 0000000000..8ebec6487a
--- /dev/null
+++ b/src/vm/ecalllist.h
@@ -0,0 +1,2478 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ECallList.H
+//
+// This file contains definitions of FCall entrypoints
+//
+
+
+
+
+#ifndef FCFuncElement
+#define FCFuncElement(name, impl)
+#endif
+
+#ifndef FCFuncElementSig
+#define FCFuncElementSig(name,sig,impl)
+#endif
+
+#ifndef FCIntrinsic
+#define FCIntrinsic(name,impl,intrinsicID)
+#endif
+
+#ifndef FCIntrinsicSig
+#define FCIntrinsicSig(name,sig,impl,intrinsicID)
+#endif
+
+#ifndef QCFuncElement
+#define QCFuncElement(name,impl)
+#endif
+
+#ifndef FCDynamic
+#define FCDynamic(name,intrinsicID,dynamicID)
+#endif
+
+#ifndef FCDynamicSig
+#define FCDynamicSig(name,sig,intrinsicID,dynamicID)
+#endif
+
+#ifndef FCUnreferenced
+#define FCUnreferenced
+#endif
+
+#ifndef FCFuncStart
+#define FCFuncStart(name)
+#endif
+
+#ifndef FCFuncEnd
+#define FCFuncEnd()
+#endif
+
+#ifndef FCClassElement
+#define FCClassElement(name,namespace,funcs)
+#endif
+
+//
+//
+// Entrypoint definitions
+//
+//
+
+#ifdef FEATURE_REMOTING
+FCFuncStart(gMarshalByRefFuncs)
+ FCFuncElement("GetComIUnknown", RemotingNative::GetComIUnknown)
+FCFuncEnd()
+
+FCFuncStart(gRemotingFuncs)
+ FCFuncElement("IsTransparentProxy", RemotingNative::FCIsTransparentProxy)
+ FCFuncElement("GetRealProxy", RemotingNative::FCGetRealProxy)
+ FCFuncElement("Unwrap", RemotingNative::FCUnwrap)
+ FCFuncElement("AlwaysUnwrap", RemotingNative::FCAlwaysUnwrap)
+ FCFuncElement("CheckCast", RemotingNative::NativeCheckCast)
+ FCFuncElement("nSetRemoteActivationConfigured", RemotingNative::SetRemotingConfiguredFlag)
+
+ FCFuncElement("CORProfilerTrackRemoting", ProfilingFCallHelper::FC_TrackRemoting)
+ FCFuncElement("CORProfilerTrackRemotingCookie", ProfilingFCallHelper::FC_TrackRemotingCookie)
+ FCFuncElement("CORProfilerTrackRemotingAsync", ProfilingFCallHelper::FC_TrackRemotingAsync)
+ FCFuncElement("CORProfilerRemotingClientSendingMessage", ProfilingFCallHelper::FC_RemotingClientSendingMessage)
+ FCFuncElement("CORProfilerRemotingClientReceivingReply", ProfilingFCallHelper::FC_RemotingClientReceivingReply)
+ FCFuncElement("CORProfilerRemotingServerReceivingMessage", ProfilingFCallHelper::FC_RemotingServerReceivingMessage)
+ FCFuncElement("CORProfilerRemotingServerSendingReply", ProfilingFCallHelper::FC_RemotingServerSendingReply)
+
+ FCFuncElement("CreateTransparentProxy", RemotingNative::CreateTransparentProxy)
+ FCFuncElement("AllocateUninitializedObject", RemotingNative::AllocateUninitializedObject)
+ FCFuncElement("CallDefaultCtor", RemotingNative::CallDefaultCtor)
+ FCFuncElement("AllocateInitializedObject", RemotingNative::AllocateInitializedObject)
+ FCFuncElement("ResetInterfaceCache", RemotingNative::ResetInterfaceCache)
+FCFuncEnd()
+
+FCFuncStart(gRealProxyFuncs)
+ FCFuncElement("SetStubData", CRealProxy::SetStubData)
+ FCFuncElement("GetStubData", CRealProxy::GetStubData)
+ FCFuncElement("GetStub", CRealProxy::GetStub)
+ FCFuncElement("GetDefaultStub", CRealProxy::GetDefaultStub)
+ FCFuncElement("GetProxiedType", CRealProxy::GetProxiedType)
+FCFuncEnd()
+
+FCFuncStart(gContextFuncs)
+ FCFuncElement("SetupInternalContext", Context::SetupInternalContext)
+ FCFuncElement("CleanupInternalContext", Context::CleanupInternalContext)
+ FCFuncElement("ExecuteCallBackInEE", Context::ExecuteCallBack)
+FCFuncEnd()
+#endif
+
+
+FCFuncStart(gDependentHandleFuncs)
+ FCFuncElement("nInitialize", DependentHandle::nInitialize)
+ FCFuncElement("nGetPrimary", DependentHandle::nGetPrimary)
+ FCFuncElement("nGetPrimaryAndSecondary", DependentHandle::nGetPrimaryAndSecondary)
+ FCFuncElement("nFree", DependentHandle::nFree)
+FCFuncEnd()
+
+#ifndef FEATURE_CORECLR
+FCFuncStart(gSizedRefHandleFuncs)
+ FCFuncElement("CreateSizedRef", SizedRefHandle::Initialize)
+ FCFuncElement("FreeSizedRef", SizedRefHandle::Free)
+ FCFuncElement("GetTargetOfSizedRef", SizedRefHandle::GetTarget)
+ FCFuncElement("GetApproximateSizeOfSizedRef", SizedRefHandle::GetApproximateSize)
+FCFuncEnd()
+#endif // !FEATURE_CORECLR
+
+#ifdef FEATURE_RWLOCK
+FCFuncStart(gRWLockFuncs)
+ FCFuncElement("AcquireReaderLockInternal", CRWLock::StaticAcquireReaderLockPublic)
+ FCFuncElement("AcquireWriterLockInternal", CRWLock::StaticAcquireWriterLockPublic)
+ FCFuncElement("ReleaseReaderLockInternal", CRWLock::StaticReleaseReaderLockPublic)
+ FCFuncElement("ReleaseWriterLockInternal", CRWLock::StaticReleaseWriterLockPublic)
+ FCFuncElement("FCallUpgradeToWriterLock", CRWLock::StaticDoUpgradeToWriterLockPublic)
+ FCFuncElement("DowngradeFromWriterLockInternal", CRWLock::StaticDowngradeFromWriterLock)
+ FCFuncElement("FCallReleaseLock", CRWLock::StaticDoReleaseLock)
+ FCFuncElement("RestoreLockInternal", CRWLock::StaticRestoreLockPublic)
+ FCFuncElement("PrivateGetIsReaderLockHeld", CRWLock::StaticIsReaderLockHeld)
+ FCFuncElement("PrivateGetIsWriterLockHeld", CRWLock::StaticIsWriterLockHeld)
+ FCFuncElement("PrivateGetWriterSeqNum", CRWLock::StaticGetWriterSeqNum)
+ FCFuncElement("AnyWritersSince", CRWLock::StaticAnyWritersSince)
+ FCFuncElement("PrivateInitialize", CRWLock::StaticPrivateInitialize)
+ FCFuncElement("PrivateDestruct", CRWLock::StaticPrivateDestruct)
+FCFuncEnd()
+#endif // FEATURE_RWLOCK
+
+#ifdef FEATURE_REMOTING
+FCFuncStart(gMessageFuncs)
+ FCFuncElement("InternalGetArgCount", CMessage::GetArgCount)
+ FCFuncElement("InternalHasVarArgs", CMessage::HasVarArgs)
+ FCFuncElement("InternalGetArg", CMessage::GetArg)
+ FCFuncElement("InternalGetArgs", CMessage::GetArgs)
+ FCFuncElement("PropagateOutParameters", CMessage::PropagateOutParameters)
+ FCFuncElement("GetReturnValue", CMessage::GetReturnValue)
+ FCFuncElement("GetAsyncBeginInfo", CMessage::GetAsyncBeginInfo)
+ FCFuncElement("GetAsyncResult", CMessage::GetAsyncResult)
+ FCFuncElement("GetThisPtr", CMessage::GetAsyncObject)
+ FCFuncElement("OutToUnmanagedDebugger", CMessage::DebugOut)
+ FCFuncElement("Dispatch", CMessage::Dispatch)
+FCFuncEnd()
+#endif //FEATURE_REMOTING
+
+#ifdef FEATURE_REMOTING
+FCFuncStart(gChannelServicesFuncs)
+ FCFuncElement("GetPrivateContextsPerfCounters", GetPrivateContextsPerfCountersEx)
+FCFuncEnd()
+#endif // FEATURE_REMOTING
+
+FCFuncStart(gEnumFuncs)
+ FCFuncElement("InternalGetUnderlyingType", ReflectionEnum::InternalGetEnumUnderlyingType)
+ FCFuncElement("InternalGetCorElementType", ReflectionEnum::InternalGetCorElementType)
+ QCFuncElement("GetEnumValuesAndNames", ReflectionEnum::GetEnumValuesAndNames)
+ FCFuncElement("InternalBoxEnum", ReflectionEnum::InternalBoxEnum)
+ FCFuncElement("Equals", ReflectionEnum::InternalEquals)
+ FCFuncElement("InternalCompareTo", ReflectionEnum::InternalCompareTo)
+ FCFuncElement("InternalHasFlag", ReflectionEnum::InternalHasFlag)
+FCFuncEnd()
+
+#ifdef FEATURE_REMOTING
+FCFuncStart(gStackBuilderSinkFuncs)
+ FCFuncElement("_PrivateProcessMessage", CStackBuilderSink::PrivateProcessMessage)
+FCFuncEnd()
+#endif
+
+#ifdef FEATURE_CORECLR
+FCFuncStart(gSymWrapperCodePunkSafeHandleFuncs)
+ FCFuncElement("nGetDReleaseTarget", COMPunkSafeHandle::nGetDReleaseTarget)
+FCFuncEnd()
+#endif //FEATURE_CORECLR
+
+FCFuncStart(gParseNumbersFuncs)
+ FCFuncElement("IntToString", ParseNumbers::IntToString)
+ FCFuncElement("LongToString", ParseNumbers::LongToString)
+ FCFuncElement("StringToInt", ParseNumbers::StringToInt)
+ FCFuncElement("StringToLong", ParseNumbers::StringToLong)
+FCFuncEnd()
+
+#ifndef FEATURE_CORECLR // FCalls used by System.TimeSpan
+FCFuncStart(gTimeSpanFuncs)
+ FCFuncElement("LegacyFormatMode", SystemNative::LegacyFormatMode)
+FCFuncEnd()
+#endif // !FEATURE_CORECLR
+
+#ifndef FEATURE_CORECLR // FCalls used by System.TimeZone
+FCFuncStart(gTimeZoneFuncs)
+ FCFuncElement("nativeGetTimeZoneMinuteOffset", COMNlsInfo::nativeGetTimeZoneMinuteOffset)
+ FCFuncElement("nativeGetStandardName", COMNlsInfo::nativeGetStandardName)
+ FCFuncElement("nativeGetDaylightName", COMNlsInfo::nativeGetDaylightName)
+ FCFuncElement("nativeGetDaylightChanges", COMNlsInfo::nativeGetDaylightChanges)
+FCFuncEnd()
+#endif // FEATURE_CORECLR
+
+FCFuncStart(gObjectFuncs)
+ FCIntrinsic("GetType", ObjectNative::GetClass, CORINFO_INTRINSIC_Object_GetType)
+ FCFuncElement("MemberwiseClone", ObjectNative::Clone)
+FCFuncEnd()
+
+FCFuncStart(gStringFuncs)
+ FCDynamic("FastAllocateString", CORINFO_INTRINSIC_Illegal, ECall::FastAllocateString)
+ FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_ArrChar_RetVoid, CORINFO_INTRINSIC_Illegal, ECall::CtorCharArrayManaged)
+ FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_ArrChar_Int_Int_RetVoid, CORINFO_INTRINSIC_Illegal, ECall::CtorCharArrayStartLengthManaged)
+ FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_PtrChar_RetVoid, CORINFO_INTRINSIC_Illegal, ECall::CtorCharPtrManaged)
+ FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_PtrChar_Int_Int_RetVoid, CORINFO_INTRINSIC_Illegal, ECall::CtorCharPtrStartLengthManaged)
+ FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_Char_Int_RetVoid, CORINFO_INTRINSIC_Illegal, ECall::CtorCharCountManaged)
+ FCFuncElementSig(COR_CTOR_METHOD_NAME, &gsig_IM_PtrSByt_RetVoid, COMString::StringInitCharPtr)
+ FCFuncElementSig(COR_CTOR_METHOD_NAME, &gsig_IM_PtrSByt_Int_Int_RetVoid, COMString::StringInitCharPtrPartial)
+#ifndef FEATURE_CORECLR
+ FCFuncElementSig(COR_CTOR_METHOD_NAME, &gsig_IM_PtrSByt_Int_Int_Encoding_RetVoid, COMString::StringInitSBytPtrPartialEx)
+ FCFuncElement("IsFastSort", COMString::IsFastSort)
+#endif // FEATURE_CORECLR
+ FCFuncElement("nativeCompareOrdinalIgnoreCaseWC", COMString::FCCompareOrdinalIgnoreCaseWC)
+ FCIntrinsic("get_Length", COMString::Length, CORINFO_INTRINSIC_StringLength)
+ FCIntrinsic("get_Chars", COMString::GetCharAt, CORINFO_INTRINSIC_StringGetChar)
+ FCFuncElement("IsAscii", COMString::IsAscii)
+ FCFuncElement("nativeCompareOrdinalEx", COMString::CompareOrdinalEx)
+ FCFuncElement("IndexOf", COMString::IndexOfChar)
+ FCFuncElement("IndexOfAny", COMString::IndexOfCharArray)
+ FCFuncElement("LastIndexOf", COMString::LastIndexOfChar)
+ FCFuncElement("LastIndexOfAny", COMString::LastIndexOfCharArray)
+ FCFuncElementSig("ReplaceInternal", &gsig_IM_Char_Char_RetStr, COMString::Replace)
+ FCFuncElementSig("ReplaceInternal", &gsig_IM_Str_Str_RetStr, COMString::ReplaceString)
+ FCFuncElement("PadHelper", COMString::PadHelper)
+#ifdef FEATURE_COMINTEROP
+ FCFuncElement("SetTrailByte", COMString::FCSetTrailByte)
+ FCFuncElement("TryGetTrailByte", COMString::FCTryGetTrailByte)
+#endif // FEATURE_COMINTEROP
+#ifdef FEATURE_RANDOMIZED_STRING_HASHING
+ FCFuncElement("InternalMarvin32HashString", COMString::Marvin32HashString)
+ QCFuncElement("InternalUseRandomizedHashing", COMString::UseRandomizedHashing)
+#endif // FEATURE_RANDOMIZED_STRING_HASHING
+FCFuncEnd()
+
+FCFuncStart(gStringBufferFuncs)
+ FCFuncElement("ReplaceBufferInternal", COMStringBuffer::ReplaceBufferInternal)
+ FCFuncElement("ReplaceBufferAnsiInternal", COMStringBuffer::ReplaceBufferAnsiInternal)
+FCFuncEnd()
+
+FCFuncStart(gValueTypeFuncs)
+ FCFuncElement("CanCompareBits", ValueTypeHelper::CanCompareBits)
+ FCFuncElement("FastEqualsCheck", ValueTypeHelper::FastEqualsCheck)
+ FCFuncElement("GetHashCode", ValueTypeHelper::GetHashCode)
+ FCFuncElement("GetHashCodeOfPtr", ValueTypeHelper::GetHashCodeOfPtr)
+FCFuncEnd()
+
+FCFuncStart(gDiagnosticsDebugger)
+ FCFuncElement("BreakInternal", DebugDebugger::Break)
+ FCFuncElement("LaunchInternal", DebugDebugger::Launch)
+ FCFuncElement("get_IsAttached", DebugDebugger::IsDebuggerAttached)
+ FCFuncElement("Log", DebugDebugger::Log)
+ FCFuncElement("IsLogging", DebugDebugger::IsLogging)
+ FCFuncElement("CustomNotification", DebugDebugger::CustomNotification)
+FCFuncEnd()
+
+FCFuncStart(gDiagnosticsStackTrace)
+ FCFuncElement("GetStackFramesInternal", DebugStackTrace::GetStackFramesInternal)
+FCFuncEnd()
+
+FCFuncStart(gDiagnosticsLog)
+ FCFuncElement("AddLogSwitch", Log::AddLogSwitch)
+ FCFuncElement("ModifyLogSwitch", Log::ModifyLogSwitch)
+FCFuncEnd()
+
+FCFuncStart(gDiagnosticsAssert)
+ FCFuncElement("ShowDefaultAssertDialog", DebuggerAssert::ShowDefaultAssertDialog)
+FCFuncEnd()
+
+FCFuncStart(gDateTimeFuncs)
+ FCFuncElement("GetSystemTimeAsFileTime", SystemNative::__GetSystemTimeAsFileTime)
+#ifndef FEATURE_CORECLR
+ QCFuncElement("LegacyParseMode", SystemNative::LegacyDateTimeParseMode)
+ QCFuncElement("EnableAmPmParseAdjustment", SystemNative::EnableAmPmParseAdjustment)
+#endif
+FCFuncEnd()
+
+FCFuncStart(gEnvironmentFuncs)
+ FCFuncElement("GetVersion", SystemNative::GetOSVersion)
+ FCFuncElement("GetVersionEx", SystemNative::GetOSVersionEx)
+ FCFuncElement("get_TickCount", SystemNative::GetTickCount)
+ QCFuncElement("_Exit", SystemNative::Exit)
+ FCFuncElement("set_ExitCode", SystemNative::SetExitCode)
+ FCFuncElement("get_ExitCode", SystemNative::GetExitCode)
+ FCFuncElement("get_HasShutdownStarted", SystemNative::HasShutdownStarted)
+ QCFuncElement("GetProcessorCount", SystemNative::GetProcessorCount)
+#ifndef FEATURE_CORECLR
+ QCFuncElement("GetWorkingSet", SystemNative::GetWorkingSet)
+ FCFuncElement("nativeGetEnvironmentVariable", SystemNative::_GetEnvironmentVariable)
+ FCFuncElement("GetCompatibilityFlag", SystemNative::_GetCompatibilityFlag)
+ QCFuncElement("GetCommandLine", SystemNative::_GetCommandLine)
+ FCFuncElement("GetCommandLineArgsNative", SystemNative::GetCommandLineArgs)
+ FCFuncElement("GetResourceFromDefault", GetResourceFromDefault)
+#endif // !FEATURE_CORECLR
+
+#if defined(FEATURE_COMINTEROP) && !defined(FEATURE_CORESYSTEM)
+ QCFuncElement("WinRTSupported", SystemNative::WinRTSupported)
+#endif // FEATURE_COMINTEROP
+ FCFuncElementSig("FailFast", &gsig_SM_Str_RetVoid, SystemNative::FailFast)
+#ifndef FEATURE_CORECLR
+ FCFuncElementSig("FailFast", &gsig_SM_Str_Uint_RetVoid, SystemNative::FailFastWithExitCode)
+#endif
+ FCFuncElementSig("FailFast", &gsig_SM_Str_Exception_RetVoid, SystemNative::FailFastWithException)
+#ifndef FEATURE_CORECLR
+ QCFuncElement("GetIsCLRHosted", SystemNative::IsCLRHosted)
+ QCFuncElement("TriggerCodeContractFailure", SystemNative::TriggerCodeContractFailure)
+#endif // !FEATURE_CORECLR
+FCFuncEnd()
+
+FCFuncStart(gRuntimeEnvironmentFuncs)
+ FCFuncElement("GetModuleFileName", SystemNative::_GetModuleFileName)
+ FCFuncElement("GetRuntimeDirectoryImpl", SystemNative::GetRuntimeDirectory)
+#ifdef FEATURE_FUSION
+ FCFuncElement("GetDeveloperPath", SystemNative::GetDeveloperPath)
+ FCFuncElement("GetHostBindingFile", SystemNative::GetHostBindingFile)
+#endif // FEATURE_FUSION
+#ifndef FEATURE_CORECLR
+ QCFuncElement("_GetSystemVersion", SystemNative::_GetSystemVersion)
+#endif
+#if defined(FEATURE_CLASSIC_COMINTEROP) && !defined(FEATURE_CORECLR)
+ QCFuncElement("GetRuntimeInterfaceImpl", SystemNative::GetRuntimeInterfaceImpl)
+#endif
+FCFuncEnd()
+
+FCFuncStart(gSerializationFuncs)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("GetEnableUnsafeTypeForwarders", ReflectionSerialization::GetEnableUnsafeTypeForwarders)
+ FCFuncElement("nativeGetSafeUninitializedObject", ReflectionSerialization::GetSafeUninitializedObject)
+#endif
+ FCFuncElement("nativeGetUninitializedObject", ReflectionSerialization::GetUninitializedObject)
+FCFuncEnd()
+
+FCFuncStart(gExceptionFuncs)
+ FCFuncElement("IsImmutableAgileException", ExceptionNative::IsImmutableAgileException)
+ FCFuncElement("nIsTransient", ExceptionNative::IsTransient)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("GetMethodFromStackTrace", SystemNative::GetMethodFromStackTrace)
+ FCFuncElement("StripFileInfo", ExceptionNative::StripFileInfo)
+#endif
+ QCFuncElement("GetMessageFromNativeResources", ExceptionNative::GetMessageFromNativeResources)
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ FCFuncElement("PrepareForForeignExceptionRaise", ExceptionNative::PrepareForForeignExceptionRaise)
+ FCFuncElement("CopyStackTrace", ExceptionNative::CopyStackTrace)
+ FCFuncElement("CopyDynamicMethods", ExceptionNative::CopyDynamicMethods)
+ FCFuncElement("GetStackTracesDeepCopy", ExceptionNative::GetStackTracesDeepCopy)
+ FCFuncElement("SaveStackTracesFromDeepCopy", ExceptionNative::SaveStackTracesFromDeepCopy)
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+FCFuncEnd()
+
+FCFuncStart(gSafeHandleFuncs)
+ FCFuncElement("InternalDispose", SafeHandle::DisposeNative)
+ FCFuncElement("InternalFinalize", SafeHandle::Finalize)
+ FCFuncElement("SetHandleAsInvalid", SafeHandle::SetHandleAsInvalid)
+ FCFuncElement("DangerousAddRef", SafeHandle::DangerousAddRef)
+ FCFuncElement("DangerousRelease", SafeHandle::DangerousRelease)
+FCFuncEnd()
+
+FCFuncStart(gCriticalHandleFuncs)
+ FCFuncElement("FireCustomerDebugProbe", CriticalHandle::FireCustomerDebugProbe)
+FCFuncEnd()
+
+FCFuncStart(gSafeBufferFuncs)
+ FCFuncElement("PtrToStructureNative", SafeBuffer::PtrToStructure)
+ FCFuncElement("StructureToPtrNative", SafeBuffer::StructureToPtr)
+FCFuncEnd()
+
+#ifndef FEATURE_CORECLR
+FCFuncStart(gNormalizationFuncs)
+ FCFuncElement("nativeNormalizationIsNormalizedString", COMNlsInfo::nativeNormalizationIsNormalizedString)
+ FCFuncElement("nativeNormalizationNormalizeString", COMNlsInfo::nativeNormalizationNormalizeString)
+ QCFuncElement("nativeNormalizationInitNormalization", COMNlsInfo::nativeNormalizationInitNormalization)
+FCFuncEnd()
+#endif // FEATURE_CORECLR
+
+FCFuncStart(gTypedReferenceFuncs)
+ FCFuncElement("InternalToObject", ReflectionInvocation::TypedReferenceToObject)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("InternalSetTypedReference", ReflectionInvocation::SetTypedReference)
+ FCFuncElement("InternalMakeTypedReference", ReflectionInvocation::MakeTypedReference)
+#endif
+FCFuncEnd()
+
+FCFuncStart(gSystem_Type)
+ FCIntrinsic("GetTypeFromHandle", RuntimeTypeHandle::GetTypeFromHandle, CORINFO_INTRINSIC_GetTypeFromHandle)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("GetTypeFromHandleUnsafe", RuntimeTypeHandle::GetRuntimeType)
+ FCIntrinsic("op_Equality", RuntimeTypeHandle::TypeEQ, CORINFO_INTRINSIC_TypeEQ)
+ FCIntrinsic("op_Inequality", RuntimeTypeHandle::TypeNEQ, CORINFO_INTRINSIC_TypeNEQ)
+#endif // !FEATURE_CORECLR
+FCFuncEnd()
+
+FCFuncStart(gSystem_RuntimeType)
+ FCFuncElement("GetGUID", ReflectionInvocation::GetGUID)
+ FCFuncElement("_CreateEnum", ReflectionInvocation::CreateEnum)
+ FCFuncElement("CanValueSpecialCast", ReflectionInvocation::CanValueSpecialCast)
+ FCFuncElement("AllocateValueType", ReflectionInvocation::AllocateValueType)
+#if defined(FEATURE_COMINTEROP)
+ FCFuncElement("GetTypeFromCLSIDImpl", ReflectionInvocation::GetClassFromCLSID)
+#if !defined(FEATURE_CORECLR)
+ FCFuncElement("GetTypeFromProgIDImpl", ReflectionInvocation::GetClassFromProgID)
+ FCFuncElement("InvokeDispMethod", ReflectionInvocation::InvokeDispMethod)
+#endif
+#ifdef FEATURE_COMINTEROP_WINRT_MANAGED_ACTIVATION
+ FCFuncElement("IsTypeExportedToWindowsRuntime", RuntimeTypeHandle::IsTypeExportedToWindowsRuntime)
+#endif
+ FCFuncElement("IsWindowsRuntimeObjectType", RuntimeTypeHandle::IsWindowsRuntimeObjectType)
+#endif // defined(FEATURE_COMINTEROP)
+FCFuncEnd()
+
+FCFuncStart(gJitHelpers)
+ FCFuncElement("UnsafeSetArrayElement", JitHelpers::UnsafeSetArrayElement)
+#ifdef _DEBUG
+ FCFuncElement("IsAddressInStack", ReflectionInvocation::IsAddressInStack)
+#endif
+FCFuncEnd()
+
+FCFuncStart(gCOMTypeHandleFuncs)
+ FCFuncElement("CreateInstance", RuntimeTypeHandle::CreateInstance)
+ FCFuncElement("CreateCaInstance", RuntimeTypeHandle::CreateCaInstance)
+ FCFuncElement("CreateInstanceForAnotherGenericParameter", RuntimeTypeHandle::CreateInstanceForGenericType)
+ QCFuncElement("GetGCHandle", RuntimeTypeHandle::GetGCHandle)
+
+ FCFuncElement("IsInstanceOfType", RuntimeTypeHandle::IsInstanceOfType)
+ FCFuncElement("GetDeclaringMethod", RuntimeTypeHandle::GetDeclaringMethod)
+ FCFuncElement("GetDeclaringType", RuntimeTypeHandle::GetDeclaringType)
+ QCFuncElement("GetDefaultConstructor", RuntimeTypeHandle::GetDefaultConstructor)
+ QCFuncElement("MakePointer", RuntimeTypeHandle::MakePointer)
+ QCFuncElement("MakeByRef", RuntimeTypeHandle::MakeByRef)
+ QCFuncElement("MakeSZArray", RuntimeTypeHandle::MakeSZArray)
+ QCFuncElement("MakeArray", RuntimeTypeHandle::MakeArray)
+ QCFuncElement("IsCollectible", RuntimeTypeHandle::IsCollectible)
+ FCFuncElement("GetFirstIntroducedMethod", RuntimeTypeHandle::GetFirstIntroducedMethod)
+ FCFuncElement("GetNextIntroducedMethod", RuntimeTypeHandle::GetNextIntroducedMethod)
+ FCFuncElement("GetCorElementType", RuntimeTypeHandle::GetCorElementType)
+ FCFuncElement("GetAssembly", RuntimeTypeHandle::GetAssembly)
+ FCFuncElement("GetModule", RuntimeTypeHandle::GetModule)
+ FCFuncElement("GetBaseType", RuntimeTypeHandle::GetBaseType)
+ FCFuncElement("GetElementType", RuntimeTypeHandle::GetElementType)
+ FCFuncElement("GetArrayRank", RuntimeTypeHandle::GetArrayRank)
+ FCFuncElement("GetToken", RuntimeTypeHandle::GetToken)
+ FCFuncElement("_GetUtf8Name", RuntimeTypeHandle::GetUtf8Name)
+ FCFuncElement("GetMethodAt", RuntimeTypeHandle::GetMethodAt)
+ FCFuncElement("GetFields", RuntimeTypeHandle::GetFields)
+ FCFuncElement("GetInterfaces", RuntimeTypeHandle::GetInterfaces)
+ QCFuncElement("GetConstraints", RuntimeTypeHandle::GetConstraints)
+ FCFuncElement("GetAttributes", RuntimeTypeHandle::GetAttributes)
+ FCFuncElement("_GetMetadataImport", RuntimeTypeHandle::GetMetadataImport)
+ FCFuncElement("GetNumVirtuals", RuntimeTypeHandle::GetNumVirtuals)
+ QCFuncElement("VerifyInterfaceIsImplemented", RuntimeTypeHandle::VerifyInterfaceIsImplemented)
+ QCFuncElement("GetInterfaceMethodImplementationSlot", RuntimeTypeHandle::GetInterfaceMethodImplementationSlot)
+ FCFuncElement("IsComObject", RuntimeTypeHandle::IsComObject)
+#ifdef FEATURE_REMOTING
+ FCFuncElement("HasProxyAttribute", RuntimeTypeHandle::HasProxyAttribute)
+ FCFuncElement("IsContextful", RuntimeTypeHandle::IsContextful)
+#endif
+ FCFuncElement("IsValueType", RuntimeTypeHandle::IsValueType)
+ FCFuncElement("IsInterface", RuntimeTypeHandle::IsInterface)
+ QCFuncElement("IsSecurityCritical", RuntimeTypeHandle::IsSecurityCritical)
+ QCFuncElement("IsSecuritySafeCritical", RuntimeTypeHandle::IsSecuritySafeCritical)
+ QCFuncElement("IsSecurityTransparent", RuntimeTypeHandle::IsSecurityTransparent)
+ QCFuncElement("_IsVisible", RuntimeTypeHandle::IsVisible)
+ QCFuncElement("ConstructName", RuntimeTypeHandle::ConstructName)
+ FCFuncElement("CanCastTo", RuntimeTypeHandle::CanCastTo)
+ QCFuncElement("GetTypeByName", RuntimeTypeHandle::GetTypeByName)
+ QCFuncElement("GetTypeByNameUsingCARules", RuntimeTypeHandle::GetTypeByNameUsingCARules)
+ QCFuncElement("GetInstantiation", RuntimeTypeHandle::GetInstantiation)
+ QCFuncElement("Instantiate", RuntimeTypeHandle::Instantiate)
+ QCFuncElement("GetGenericTypeDefinition", RuntimeTypeHandle::GetGenericTypeDefinition)
+ FCFuncElement("HasInstantiation", RuntimeTypeHandle::HasInstantiation)
+ FCFuncElement("GetGenericVariableIndex", RuntimeTypeHandle::GetGenericVariableIndex)
+ FCFuncElement("IsGenericVariable", RuntimeTypeHandle::IsGenericVariable)
+ FCFuncElement("IsGenericTypeDefinition", RuntimeTypeHandle::IsGenericTypeDefinition)
+ FCFuncElement("ContainsGenericVariables", RuntimeTypeHandle::ContainsGenericVariables)
+ FCFuncElement("SatisfiesConstraints", RuntimeTypeHandle::SatisfiesConstraints)
+ FCFuncElement("Allocate", RuntimeTypeHandle::Allocate) //for A.CI
+ FCFuncElement("CompareCanonicalHandles", RuntimeTypeHandle::CompareCanonicalHandles)
+ FCIntrinsic("GetValueInternal", RuntimeTypeHandle::GetValueInternal, CORINFO_INTRINSIC_RTH_GetValueInternal)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("IsEquivalentTo", RuntimeTypeHandle::IsEquivalentTo)
+ FCFuncElement("IsEquivalentType", RuntimeTypeHandle::IsEquivalentType)
+#endif // FEATURE_CORECLR
+FCFuncEnd()
+
+FCFuncStart(gMetaDataImport)
+ FCFuncElement("_GetDefaultValue", MetaDataImport::GetDefaultValue)
+ FCFuncElement("_GetName", MetaDataImport::GetName)
+ FCFuncElement("_GetUserString", MetaDataImport::GetUserString)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("_GetScopeProps", MetaDataImport::GetScopeProps)
+ FCFuncElement("_GetClassLayout", MetaDataImport::GetClassLayout)
+ FCFuncElement("_GetSignatureFromToken", MetaDataImport::GetSignatureFromToken)
+#endif // FEATURE_CORECLR
+ FCFuncElement("_GetNamespace", MetaDataImport::GetNamespace)
+ FCFuncElement("_GetEventProps", MetaDataImport::GetEventProps)
+ FCFuncElement("_GetFieldDefProps", MetaDataImport::GetFieldDefProps)
+ FCFuncElement("_GetPropertyProps", MetaDataImport::GetPropertyProps)
+ FCFuncElement("_GetParentToken", MetaDataImport::GetParentToken)
+ FCFuncElement("_GetParamDefProps", MetaDataImport::GetParamDefProps)
+ FCFuncElement("_GetGenericParamProps", MetaDataImport::GetGenericParamProps)
+
+ FCFuncElement("_Enum", MetaDataImport::Enum)
+ FCFuncElement("_GetMemberRefProps", MetaDataImport::GetMemberRefProps)
+ FCFuncElement("_GetCustomAttributeProps", MetaDataImport::GetCustomAttributeProps)
+ FCFuncElement("_GetFieldOffset", MetaDataImport::GetFieldOffset)
+
+ FCFuncElement("_GetSigOfFieldDef", MetaDataImport::GetSigOfFieldDef)
+ FCFuncElement("_GetSigOfMethodDef", MetaDataImport::GetSigOfMethodDef)
+ FCFuncElement("_GetFieldMarshal", MetaDataImport::GetFieldMarshal)
+ FCFuncElement("_GetPInvokeMap", MetaDataImport::GetPinvokeMap)
+ FCFuncElement("_IsValidToken", MetaDataImport::IsValidToken)
+ FCFuncElement("_GetMarshalAs", MetaDataImport::GetMarshalAs)
+FCFuncEnd()
+
+FCFuncStart(gRuntimeFieldInfoFuncs)
+ FCFuncElement("PerformVisibilityCheckOnField", ReflectionInvocation::PerformVisibilityCheckOnField)
+FCFuncEnd()
+
+FCFuncStart(gSignatureNative)
+ FCFuncElement("GetSignature", SignatureNative::GetSignature)
+ FCFuncElement("GetCustomModifiers", SignatureNative::GetCustomModifiers)
+ FCFuncElement("CompareSig", SignatureNative::CompareSig)
+#if FEATURE_LEGACYNETCF
+ FCFuncElement("CompareSigForAppCompat", SignatureNative::CompareSigForAppCompat)
+#endif
+FCFuncEnd()
+
+FCFuncStart(gRuntimeMethodHandle)
+ QCFuncElement("ConstructInstantiation", RuntimeMethodHandle::ConstructInstantiation)
+ FCFuncElement("_GetCurrentMethod", RuntimeMethodHandle::GetCurrentMethod)
+#ifdef FEATURE_SERIALIZATION
+ FCFuncElement("SerializationInvoke", RuntimeMethodHandle::SerializationInvoke)
+#endif // FEATURE_SERIALIZATION
+ FCFuncElement("InvokeMethod", RuntimeMethodHandle::InvokeMethod)
+ QCFuncElement("GetFunctionPointer", RuntimeMethodHandle::GetFunctionPointer)
+ FCFuncElement("GetImplAttributes", RuntimeMethodHandle::GetImplAttributes)
+ FCFuncElement("GetAttributes", RuntimeMethodHandle::GetAttributes)
+ FCFuncElement("GetDeclaringType", RuntimeMethodHandle::GetDeclaringType)
+ FCFuncElement("GetSlot", RuntimeMethodHandle::GetSlot)
+ FCFuncElement("GetMethodDef", RuntimeMethodHandle::GetMethodDef)
+ FCFuncElement("GetName", RuntimeMethodHandle::GetName)
+ FCFuncElement("_GetUtf8Name", RuntimeMethodHandle::GetUtf8Name)
+ FCFuncElement("MatchesNameHash", RuntimeMethodHandle::MatchesNameHash)
+ QCFuncElement("GetMethodInstantiation", RuntimeMethodHandle::GetMethodInstantiation)
+ FCFuncElement("HasMethodInstantiation", RuntimeMethodHandle::HasMethodInstantiation)
+ FCFuncElement("IsGenericMethodDefinition", RuntimeMethodHandle::IsGenericMethodDefinition)
+ FCFuncElement("IsTypicalMethodDefinition", RuntimeMethodHandle::IsTypicalMethodDefinition)
+ QCFuncElement("GetTypicalMethodDefinition", RuntimeMethodHandle::GetTypicalMethodDefinition)
+ QCFuncElement("StripMethodInstantiation", RuntimeMethodHandle::StripMethodInstantiation)
+ FCFuncElement("GetStubIfNeeded", RuntimeMethodHandle::GetStubIfNeeded)
+ FCFuncElement("GetMethodFromCanonical", RuntimeMethodHandle::GetMethodFromCanonical)
+ FCFuncElement("IsDynamicMethod", RuntimeMethodHandle::IsDynamicMethod)
+ FCFuncElement("GetMethodBody", RuntimeMethodHandle::GetMethodBody)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("_IsTokenSecurityTransparent", RuntimeMethodHandle::IsTokenSecurityTransparent)
+ QCFuncElement("_IsSecurityCritical", RuntimeMethodHandle::IsSecurityCritical)
+ QCFuncElement("_IsSecuritySafeCritical", RuntimeMethodHandle::IsSecuritySafeCritical)
+#endif // FEATURE_CORECLR
+ QCFuncElement("_IsSecurityTransparent", RuntimeMethodHandle::IsSecurityTransparent)
+ FCFuncElement("CheckLinktimeDemands", RuntimeMethodHandle::CheckLinktimeDemands)
+ QCFuncElement("IsCAVisibleFromDecoratedType", RuntimeMethodHandle::IsCAVisibleFromDecoratedType)
+ FCFuncElement("IsConstructor", RuntimeMethodHandle::IsConstructor)
+ QCFuncElement("Destroy", RuntimeMethodHandle::Destroy)
+ FCFuncElement("GetResolver", RuntimeMethodHandle::GetResolver)
+ QCFuncElement("GetCallerType", RuntimeMethodHandle::GetCallerType)
+ FCFuncElement("GetLoaderAllocator", RuntimeMethodHandle::GetLoaderAllocator)
+ FCFuncElement("GetSpecialSecurityFlags", ReflectionInvocation::GetSpecialSecurityFlags)
+ FCFuncElement("PerformSecurityCheck", ReflectionInvocation::PerformSecurityCheck)
+FCFuncEnd()
+
+FCFuncStart(gCOMDefaultBinderFuncs)
+ FCFuncElement("CanConvertPrimitive", ReflectionBinder::DBCanConvertPrimitive)
+ FCFuncElement("CanConvertPrimitiveObjectToType", ReflectionBinder::DBCanConvertObjectPrimitive)
+FCFuncEnd()
+
+
+FCFuncStart(gCOMFieldHandleNewFuncs)
+ FCFuncElement("GetValue", RuntimeFieldHandle::GetValue)
+ FCFuncElement("SetValue", RuntimeFieldHandle::SetValue)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("GetValueDirect", RuntimeFieldHandle::GetValueDirect)
+#endif
+#ifdef FEATURE_SERIALIZATION
+ FCFuncElement("SetValueDirect", RuntimeFieldHandle::SetValueDirect)
+#endif
+ FCFuncElement("GetName", RuntimeFieldHandle::GetName)
+ FCFuncElement("_GetUtf8Name", RuntimeFieldHandle::GetUtf8Name)
+ FCFuncElement("MatchesNameHash", RuntimeFieldHandle::MatchesNameHash)
+ FCFuncElement("GetAttributes", RuntimeFieldHandle::GetAttributes)
+ FCFuncElement("GetApproxDeclaringType", RuntimeFieldHandle::GetApproxDeclaringType)
+ FCFuncElement("GetToken", RuntimeFieldHandle::GetToken)
+ FCFuncElement("GetStaticFieldForGenericType", RuntimeFieldHandle::GetStaticFieldForGenericType)
+ QCFuncElement("IsSecurityCritical", RuntimeFieldHandle::IsSecurityCritical)
+ QCFuncElement("IsSecuritySafeCritical", RuntimeFieldHandle::IsSecuritySafeCritical)
+ QCFuncElement("IsSecurityTransparent", RuntimeFieldHandle::IsSecurityTransparent)
+ FCFuncElement("AcquiresContextFromThis", RuntimeFieldHandle::AcquiresContextFromThis)
+ QCFuncElement("CheckAttributeAccess", RuntimeFieldHandle::CheckAttributeAccess)
+FCFuncEnd()
+
+
+FCFuncStart(gCOMModuleFuncs)
+ QCFuncElement("GetType", COMModule::GetType)
+ QCFuncElement("GetScopeName", COMModule::GetScopeName)
+ FCFuncElement("GetTypes", COMModule::GetTypes)
+ QCFuncElement("GetFullyQualifiedName", COMModule::GetFullyQualifiedName)
+ QCFuncElement("nIsTransientInternal", COMModule::IsTransient)
+ FCFuncElement("IsResource", COMModule::IsResource)
+#if defined(FEATURE_X509) && defined(FEATURE_CAS_POLICY)
+ QCFuncElement("GetSignerCertificate", COMModule::GetSignerCertificate)
+#endif // defined(FEATURE_X509) && defined(FEATURE_CAS_POLICY)
+FCFuncEnd()
+
+FCFuncStart(gCOMModuleBuilderFuncs)
+ FCFuncElement("nCreateISymWriterForDynamicModule", COMModule::nCreateISymWriterForDynamicModule)
+ QCFuncElement("GetStringConstant", COMModule::GetStringConstant)
+ QCFuncElement("GetTypeRef", COMModule::GetTypeRef)
+ QCFuncElement("GetTokenFromTypeSpec", COMModule::GetTokenFromTypeSpec)
+ QCFuncElement("GetMemberRef", COMModule::GetMemberRef)
+ QCFuncElement("GetMemberRefOfMethodInfo", COMModule::GetMemberRefOfMethodInfo)
+ QCFuncElement("GetMemberRefOfFieldInfo", COMModule::GetMemberRefOfFieldInfo)
+ QCFuncElement("GetMemberRefFromSignature", COMModule::GetMemberRefFromSignature)
+#ifndef FEATURE_CORECLR
+ QCFuncElement("SetModuleName", COMModule::SetModuleName)
+ QCFuncElement("PreSavePEFile", COMDynamicWrite::PreSavePEFile)
+ QCFuncElement("SavePEFile", COMDynamicWrite::SavePEFile)
+ QCFuncElement("AddResource", COMDynamicWrite::AddResource)
+#endif
+ QCFuncElement("GetArrayMethodToken", COMModule::GetArrayMethodToken)
+ QCFuncElement("SetFieldRVAContent", COMModule::SetFieldRVAContent)
+#ifndef FEATURE_CORECLR
+ QCFuncElement("DefineNativeResourceFile", COMDynamicWrite::DefineNativeResourceFile)
+ QCFuncElement("DefineNativeResourceBytes", COMDynamicWrite::DefineNativeResourceBytes)
+#endif // FEATURE_CORECLR
+FCFuncEnd()
+
+FCFuncStart(gCOMModuleHandleFuncs)
+ FCFuncElement("GetToken", ModuleHandle::GetToken)
+ QCFuncElement("GetModuleType", ModuleHandle::GetModuleType)
+ FCFuncElement("GetDynamicMethod", ModuleHandle::GetDynamicMethod)
+ FCFuncElement("_GetMetadataImport", ModuleHandle::GetMetadataImport)
+ QCFuncElement("ResolveType", ModuleHandle::ResolveType)
+ QCFuncElement("ResolveMethod", ModuleHandle::ResolveMethod)
+ QCFuncElement("_ContainsPropertyMatchingHash", ModuleHandle::ContainsPropertyMatchingHash)
+ QCFuncElement("ResolveField", ModuleHandle::ResolveField)
+#ifndef FEATURE_CORECLR
+ QCFuncElement("GetAssembly", ModuleHandle::GetAssembly)
+#endif // FEATURE_CORECLR
+ QCFuncElement("GetPEKind", ModuleHandle::GetPEKind)
+ FCFuncElement("GetMDStreamVersion", ModuleHandle::GetMDStreamVersion)
+FCFuncEnd()
+
+FCFuncStart(gCustomAttributeEncodedArgument)
+ FCFuncElement("ParseAttributeArguments", Attribute::ParseAttributeArguments)
+FCFuncEnd()
+
+FCFuncStart(gPseudoCustomAttribute)
+ FCFuncElement("_GetSecurityAttributes", COMCustomAttribute::GetSecurityAttributes)
+FCFuncEnd()
+
+FCFuncStart(gCOMCustomAttributeFuncs)
+ FCFuncElement("_ParseAttributeUsageAttribute", COMCustomAttribute::ParseAttributeUsageAttribute)
+ FCFuncElement("_CreateCaObject", COMCustomAttribute::CreateCaObject)
+ FCFuncElement("_GetPropertyOrFieldData", COMCustomAttribute::GetPropertyOrFieldData)
+FCFuncEnd()
+
+FCFuncStart(gSecurityContextFrameFuncs)
+ FCFuncElement("Push", COMCustomAttribute::PushSecurityContextFrame)
+ FCFuncElement("Pop", COMCustomAttribute::PopSecurityContextFrame)
+FCFuncEnd()
+
+FCFuncStart(gCOMClassWriter)
+ QCFuncElement("DefineGenericParam", COMDynamicWrite::DefineGenericParam)
+ QCFuncElement("DefineType", COMDynamicWrite::DefineType)
+ QCFuncElement("SetParentType", COMDynamicWrite::SetParentType)
+ QCFuncElement("AddInterfaceImpl", COMDynamicWrite::AddInterfaceImpl)
+ QCFuncElement("DefineMethod", COMDynamicWrite::DefineMethod)
+ QCFuncElement("DefineMethodSpec", COMDynamicWrite::DefineMethodSpec)
+ QCFuncElement("SetMethodIL", COMDynamicWrite::SetMethodIL)
+ QCFuncElement("TermCreateClass", COMDynamicWrite::TermCreateClass)
+ QCFuncElement("DefineField", COMDynamicWrite::DefineField)
+ QCFuncElement("SetPInvokeData", COMDynamicWrite::SetPInvokeData)
+ QCFuncElement("DefineProperty", COMDynamicWrite::DefineProperty)
+ QCFuncElement("DefineEvent", COMDynamicWrite::DefineEvent)
+ QCFuncElement("DefineMethodSemantics", COMDynamicWrite::DefineMethodSemantics)
+ QCFuncElement("SetMethodImpl", COMDynamicWrite::SetMethodImpl)
+ QCFuncElement("DefineMethodImpl", COMDynamicWrite::DefineMethodImpl)
+ QCFuncElement("GetTokenFromSig", COMDynamicWrite::GetTokenFromSig)
+ QCFuncElement("SetFieldLayoutOffset", COMDynamicWrite::SetFieldLayoutOffset)
+ QCFuncElement("SetClassLayout", COMDynamicWrite::SetClassLayout)
+ QCFuncElement("SetParamInfo", COMDynamicWrite::SetParamInfo)
+#ifndef FEATURE_CORECLR
+ QCFuncElement("SetFieldMarshal", COMDynamicWrite::SetFieldMarshal)
+#endif // FEATURE_CORECLR
+ QCFuncElement("SetConstantValue", COMDynamicWrite::SetConstantValue)
+ QCFuncElement("DefineCustomAttribute", COMDynamicWrite::DefineCustomAttribute)
+#ifndef FEATURE_CORECLR
+ QCFuncElement("AddDeclarativeSecurity", COMDynamicWrite::AddDeclarativeSecurity)
+#endif // FEATURE_CORECLR
+FCFuncEnd()
+
+#ifdef FEATURE_METHOD_RENTAL
+FCFuncStart(gCOMMethodRental)
+ QCFuncElement("SwapMethodBody", COMMethodRental::SwapMethodBody)
+FCFuncEnd()
+#endif // FEATURE_METHOD_RENTAL
+
+#ifdef FEATURE_CAS_POLICY
+FCFuncStart(gFrameSecurityDescriptorFuncs)
+ FCFuncElement("IncrementOverridesCount", SecurityPolicy::IncrementOverridesCount)
+ FCFuncElement("DecrementOverridesCount", SecurityPolicy::DecrementOverridesCount)
+ FCFuncElement("IncrementAssertCount", SecurityPolicy::IncrementAssertCount)
+ FCFuncElement("DecrementAssertCount", SecurityPolicy::DecrementAssertCount)
+FCFuncEnd()
+#endif // FEATURE_CAS_POLICY
+
+#ifdef FEATURE_CAS_POLICY
+FCFuncStart(gCodeAccessSecurityEngineFuncs)
+ FCFuncElement("SpecialDemand", SecurityStackWalk::FcallSpecialDemand)
+ FCFuncElement("Check", SecurityStackWalk::Check)
+ FCFuncElement("CheckNReturnSO", SecurityStackWalk::CheckNReturnSO)
+ FCFuncElement("GetZoneAndOriginInternal", SecurityStackWalk::GetZoneAndOrigin)
+#ifdef FEATURE_COMPRESSEDSTACK
+ FCFuncElement("QuickCheckForAllDemands", SecurityStackWalk::FCallQuickCheckForAllDemands)
+ FCFuncElement("AllDomainsHomogeneousWithNoStackModifiers", SecurityStackWalk::FCallAllDomainsHomogeneousWithNoStackModifiers)
+#endif
+FCFuncEnd()
+#endif // FEATURE_CAS_POLICY
+
+FCFuncStart(gCompatibilitySwitchFuncs)
+ FCFuncElement("IsEnabledInternalCall", CompatibilitySwitch::IsEnabled)
+ FCFuncElement("GetValueInternalCall", CompatibilitySwitch::GetValue)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("GetAppContextOverridesInternalCall", CompatibilitySwitch::GetAppContextOverrides)
+#endif
+FCFuncEnd()
+
+
+#ifdef FEATURE_COMPRESSEDSTACK
+FCFuncStart(gCompressedStackFuncs)
+ FCFuncElement("DestroyDelayedCompressedStack", SecurityStackWalk::FcallDestroyDelayedCompressedStack)
+ FCFuncElement("DestroyDCSList", NewCompressedStack::DestroyDCSList)
+ FCFuncElement("GetDelayedCompressedStack", SecurityStackWalk::EcallGetDelayedCompressedStack)
+ FCFuncElement("GetDCSCount", NewCompressedStack::FCallGetDCSCount)
+ FCFuncElement("GetDomainCompressedStack", NewCompressedStack::GetDomainCompressedStack)
+ FCFuncElement("GetHomogeneousPLS", NewCompressedStack::FCallGetHomogeneousPLS)
+ FCFuncElement("IsImmediateCompletionCandidate", NewCompressedStack::FCallIsImmediateCompletionCandidate)
+FCFuncEnd()
+
+FCFuncStart(gDomainCompressedStackFuncs)
+ FCFuncElement("GetDescCount", DomainCompressedStack::GetDescCount)
+ FCFuncElement("GetDomainPermissionSets", DomainCompressedStack::GetDomainPermissionSets)
+ FCFuncElement("GetDescriptorInfo", DomainCompressedStack::GetDescriptorInfo)
+ FCFuncElement("IgnoreDomain", DomainCompressedStack::IgnoreDomain)
+FCFuncEnd()
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
+
+FCFuncStart(gCOMSecurityManagerFuncs)
+ QCFuncElement("IsSameType", SecurityPolicy::IsSameType)
+ FCFuncElement("_SetThreadSecurity", SecurityPolicy::SetThreadSecurity)
+#ifdef FEATURE_CAS_POLICY
+ QCFuncElement("GetGrantedPermissions", SecurityPolicy::GetGrantedPermissions)
+#endif
+FCFuncEnd()
+
+FCFuncStart(gCOMSecurityContextFuncs)
+#ifdef FEATURE_IMPERSONATION
+ FCFuncElement("GetImpersonationFlowMode", SecurityPolicy::GetImpersonationFlowMode)
+#endif
+#ifdef FEATURE_COMPRESSEDSTACK
+ FCFuncElement("IsDefaultThreadSecurityInfo", SecurityPolicy::IsDefaultThreadSecurityInfo)
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
+FCFuncEnd()
+
+#ifdef FEATURE_CAS_POLICY
+FCFuncStart(gCOMSecurityZone)
+ QCFuncElement("_CreateFromUrl", SecurityPolicy::CreateFromUrl)
+FCFuncEnd()
+#endif // FEATURE_CAS_POLICY
+
+FCFuncStart(gCOMFileIOAccessFuncs)
+ QCFuncElement("IsLocalDrive", SecurityPolicy::IsLocalDrive)
+FCFuncEnd()
+
+FCFuncStart(gCOMStringExpressionSetFuncs)
+ QCFuncElement("GetLongPathName", SecurityPolicy::_GetLongPathName)
+FCFuncEnd()
+
+
+FCFuncStart(gCOMUrlStringFuncs)
+ QCFuncElement("GetDeviceName", SecurityPolicy::GetDeviceName)
+FCFuncEnd()
+
+#ifdef FEATURE_CAS_POLICY
+FCFuncStart(gCOMSecurityRuntimeFuncs)
+ FCFuncElement("GetSecurityObjectForFrame", SecurityRuntime::GetSecurityObjectForFrame)
+FCFuncEnd()
+#endif // FEATURE_CAS_POLICY
+
+#ifdef FEATURE_X509
+
+FCFuncStart(gX509CertificateFuncs)
+#ifndef FEATURE_CORECLR
+FCFuncElement("_AddCertificateToStore", COMX509Store::AddCertificate)
+#endif // !FEATURE_CORECLR
+ FCFuncElement("_DuplicateCertContext", COMX509Certificate::DuplicateCertContext)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("_ExportCertificatesToBlob", COMX509Store::ExportCertificatesToBlob)
+#endif // !FEATURE_CORECLR
+ FCFuncElement("_GetCertRawData", COMX509Certificate::GetCertRawData)
+ FCFuncElement("_GetDateNotAfter", COMX509Certificate::GetDateNotAfter)
+ FCFuncElement("_GetDateNotBefore", COMX509Certificate::GetDateNotBefore)
+ FCFuncElement("_GetIssuerName", COMX509Certificate::GetIssuerName)
+ FCFuncElement("_GetPublicKeyOid", COMX509Certificate::GetPublicKeyOid)
+ FCFuncElement("_GetPublicKeyParameters", COMX509Certificate::GetPublicKeyParameters)
+ FCFuncElement("_GetPublicKeyValue", COMX509Certificate::GetPublicKeyValue)
+ FCFuncElement("_GetSerialNumber", COMX509Certificate::GetSerialNumber)
+ FCFuncElement("_GetSubjectInfo", COMX509Certificate::GetSubjectInfo)
+ FCFuncElement("_GetThumbprint", COMX509Certificate::GetThumbprint)
+ FCFuncElement("_LoadCertFromBlob", COMX509Certificate::LoadCertFromBlob)
+ FCFuncElement("_LoadCertFromFile", COMX509Certificate::LoadCertFromFile)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("_OpenX509Store", COMX509Store::OpenX509Store)
+#endif // !FEATURE_CORECLR
+ FCFuncElement("_QueryCertBlobType", COMX509Certificate::QueryCertBlobType)
+ FCFuncElement("_QueryCertFileType", COMX509Certificate::QueryCertFileType)
+FCFuncEnd()
+
+FCFuncStart(gX509SafeCertContextHandleFuncs)
+ FCFuncElement("_FreePCertContext", COMX509Certificate::FreePCertContext)
+FCFuncEnd()
+
+#ifndef FEATURE_CORECLR
+FCFuncStart(gX509SafeCertStoreHandleFuncs)
+ FCFuncElement("_FreeCertStoreContext", COMX509Store::FreeCertStoreContext)
+FCFuncEnd()
+#endif
+
+#endif // FEATURE_X509
+
+FCFuncStart(gBCLDebugFuncs)
+ FCFuncElement("GetRegistryLoggingValues", ManagedLoggingHelper::GetRegistryLoggingValues)
+FCFuncEnd()
+
+#if defined(FEATURE_CRYPTO) || defined(FEATURE_LEGACYNETCFCRYPTO)
+FCFuncStart(gCryptographyUtilsFuncs)
+ FCFuncElement("_AcquireCSP", COMCryptography::_AcquireCSP)
+ FCFuncElement("_CreateCSP", COMCryptography::_CreateCSP)
+ FCFuncElement("_ExportKey", COMCryptography::_ExportKey)
+ FCFuncElement("_GenerateKey", COMCryptography::_GenerateKey)
+ FCFuncElement("_GetKeyParameter", COMCryptography::_GetKeyParameter)
+ FCFuncElement("_GetUserKey", COMCryptography::_GetUserKey)
+ FCFuncElement("_ImportKey", COMCryptography::_ImportKey)
+ FCFuncElement("_ImportCspBlob", COMCryptography::_ImportCspBlob)
+ FCFuncElement("_OpenCSP", COMCryptography::_OpenCSP)
+ QCFuncElement("ExportCspBlob", COMCryptography::ExportCspBlob)
+ QCFuncElement("GetPersistKeyInCsp", COMCryptography::GetPersistKeyInCsp)
+ QCFuncElement("SetPersistKeyInCsp", COMCryptography::SetPersistKeyInCsp)
+ QCFuncElement("SignValue", COMCryptography::SignValue)
+ QCFuncElement("VerifySign", COMCryptography::VerifySign)
+ FCFuncElement("_GetProviderParameter", COMCryptography::_GetProviderParameter)
+ FCFuncElement("_ProduceLegacyHmacValues", COMCryptography::_ProduceLegacyHMACValues)
+ QCFuncElement("CreateHash", COMCryptography::CreateHash)
+ QCFuncElement("EndHash", COMCryptography::EndHash)
+ QCFuncElement("HashData", COMCryptography::HashData)
+ QCFuncElement("SetProviderParameter", COMCryptography::SetProviderParameter)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("_DecryptData", COMCryptography::_DecryptData)
+ FCFuncElement("_EncryptData", COMCryptography::_EncryptData)
+ FCFuncElement("_GetEnforceFipsPolicySetting", COMCryptography::_GetEnforceFipsPolicySetting)
+ FCFuncElement("_ImportBulkKey", COMCryptography::_ImportBulkKey)
+ FCFuncElement("_GetKeySetSecurityInfo", COMCryptography::_GetKeySetSecurityInfo)
+ QCFuncElement("SearchForAlgorithm", COMCryptography::SearchForAlgorithm)
+ QCFuncElement("SetKeyParamDw", COMCryptography::SetKeyParamDw)
+ QCFuncElement("SetKeyParamRgb", COMCryptography::SetKeyParamRgb)
+ QCFuncElement("SetKeySetSecurityInfo", COMCryptography::SetKeySetSecurityInfo)
+#endif //FEATURE_CORECLR
+FCFuncEnd()
+
+FCFuncStart(gSafeHashHandleFuncs)
+ QCFuncElement("FreeHash", COMCryptography::FreeHash)
+FCFuncEnd()
+
+FCFuncStart(gSafeKeyHandleFuncs)
+ QCFuncElement("FreeKey", COMCryptography::FreeKey)
+FCFuncEnd()
+
+FCFuncStart(gSafeProvHandleFuncs)
+ QCFuncElement("FreeCsp", COMCryptography::FreeCsp)
+FCFuncEnd()
+
+#ifndef FEATURE_CORECLR
+FCFuncStart(gPasswordDeriveBytesFuncs)
+ QCFuncElement("DeriveKey", COMCryptography::DeriveKey)
+FCFuncEnd()
+#endif
+
+#if defined(FEATURE_CRYPTO)
+FCFuncStart(gRfc2898DeriveBytesFuncs)
+ QCFuncElement("DeriveKey", COMCryptography::DeriveKey)
+FCFuncEnd()
+#endif
+
+#ifndef FEATURE_CORECLR
+FCFuncStart(gRNGCryptoServiceProviderFuncs)
+ QCFuncElement("GetBytes", COMCryptography::GetBytes)
+ QCFuncElement("GetNonZeroBytes", COMCryptography::GetNonZeroBytes)
+FCFuncEnd()
+#endif //FEATURE_CORECLR
+
+FCFuncStart(gRSACryptoServiceProviderFuncs)
+ QCFuncElement("DecryptKey", COMCryptography::DecryptKey)
+ QCFuncElement("EncryptKey", COMCryptography::EncryptKey)
+FCFuncEnd()
+#endif // FEATURE_CRYPTO || FEATURE_LEGACYNETCFCRYPTO
+
+FCFuncStart(gAppDomainManagerFuncs)
+ QCFuncElement("GetEntryAssembly", AssemblyNative::GetEntryAssembly)
+#ifdef FEATURE_APPDOMAINMANAGER_INITOPTIONS
+ FCFuncElement("HasHost", AppDomainNative::HasHost)
+ QCFuncElement("RegisterWithHost", AppDomainNative::RegisterWithHost)
+#endif
+FCFuncEnd()
+
+#ifdef FEATURE_FUSION
+FCFuncStart(gAppDomainSetupFuncs)
+ FCFuncElement("UpdateContextProperty", AppDomainNative::UpdateContextProperty)
+FCFuncEnd()
+#endif // FEATURE_FUSION
+
+#ifndef FEATURE_CORECLR
+FCFuncStart(gWindowsRuntimeContextFuncs)
+ QCFuncElement("CreateDesignerContext", AppDomainNative::CreateDesignerContext)
+ QCFuncElement("SetCurrentContext", AppDomainNative::SetCurrentDesignerContext)
+FCFuncEnd()
+#endif // FEATURE_CORECLR
+
+FCFuncStart(gAppDomainFuncs)
+#ifdef FEATURE_REMOTING
+ FCFuncElement("GetDefaultDomain", AppDomainNative::GetDefaultDomain)
+#endif
+#ifdef FEATURE_FUSION
+ FCFuncElement("GetFusionContext", AppDomainNative::GetFusionContext)
+#endif // FEATURE_FUSION
+ FCFuncElement("IsStringInterned", AppDomainNative::IsStringInterned)
+ FCFuncElement("IsUnloadingForcedFinalize", AppDomainNative::IsUnloadingForcedFinalize)
+#ifdef FEATURE_REMOTING
+ FCFuncElement("nCreateDomain", AppDomainNative::CreateDomain)
+ FCFuncElement("nCreateInstance", AppDomainNative::CreateInstance)
+#endif
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ FCFuncElement("UpdateLoaderOptimization", AppDomainNative::UpdateLoaderOptimization)
+#endif // FEATURE_LOADER_OPTIMIZATION
+ QCFuncElement("DisableFusionUpdatesFromADManager", AppDomainNative::DisableFusionUpdatesFromADManager)
+
+#ifdef FEATURE_APPX
+ QCFuncElement("nGetAppXFlags", AppDomainNative::GetAppXFlags)
+#endif
+ QCFuncElement("GetAppDomainManagerType", AppDomainNative::GetAppDomainManagerType)
+ QCFuncElement("SetAppDomainManagerType", AppDomainNative::SetAppDomainManagerType)
+ FCFuncElement("nGetFriendlyName", AppDomainNative::GetFriendlyName)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("GetSecurityDescriptor", AppDomainNative::GetSecurityDescriptor)
+ FCFuncElement("nIsDefaultAppDomainForEvidence", AppDomainNative::IsDefaultAppDomainForEvidence)
+#endif
+#if defined(FEATURE_CORESYSTEM) || !defined(FEATURE_CORECLR)
+ FCFuncElement("nGetAssemblies", AppDomainNative::GetAssemblies)
+#endif
+#ifdef FEATURE_CAS_POLICY
+ FCFuncElement("nSetHostSecurityManagerFlags", AppDomainNative::SetHostSecurityManagerFlags)
+ QCFuncElement("SetLegacyCasPolicyEnabled", AppDomainNative::SetLegacyCasPolicyEnabled)
+#endif // FEATURE_CAS_POLICY
+#ifdef FEATURE_APTCA
+ QCFuncElement("SetCanonicalConditionalAptcaList", AppDomainNative::SetCanonicalConditionalAptcaList)
+#endif // FEATURE_ATPCA
+ QCFuncElement("SetSecurityHomogeneousFlag", AppDomainNative::SetSecurityHomogeneousFlag)
+ QCFuncElement("SetupDomainSecurity", AppDomainNative::SetupDomainSecurity)
+ FCFuncElement("nSetupFriendlyName", AppDomainNative::SetupFriendlyName)
+#if FEATURE_COMINTEROP
+ FCFuncElement("nSetDisableInterfaceCache", AppDomainNative::SetDisableInterfaceCache)
+#endif // FEATURE_COMINTEROP
+ FCFuncElement("_nExecuteAssembly", AppDomainNative::ExecuteAssembly)
+#ifdef FEATURE_VERSIONING
+ FCFuncElement("nCreateContext", AppDomainNative::CreateContext)
+#endif // FEATURE_VERSIONING
+#ifdef FEATURE_REMOTING
+ FCFuncElement("nUnload", AppDomainNative::Unload)
+#endif // FEATURE_REMOTING
+ FCFuncElement("GetId", AppDomainNative::GetId)
+ FCFuncElement("GetOrInternString", AppDomainNative::GetOrInternString)
+ QCFuncElement("GetGrantSet", AppDomainNative::GetGrantSet)
+#ifdef FEATURE_REMOTING
+ FCFuncElement("GetDynamicDir", AppDomainNative::GetDynamicDir)
+#ifdef FEATURE_CAS_POLICY
+ QCFuncElement("GetIsLegacyCasPolicyEnabled", AppDomainNative::IsLegacyCasPolicyEnabled)
+#endif // FEATURE_CAS_POLICY
+ FCFuncElement("nChangeSecurityPolicy", AppDomainNative::ChangeSecurityPolicy)
+ FCFuncElement("IsDomainIdValid", AppDomainNative::IsDomainIdValid)
+ FCFuncElement("nApplyPolicy", AppDomainNative::nApplyPolicy)
+#endif // FEATURE_REMOTING
+#ifdef FEATURE_CORECLR
+ QCFuncElement("nSetupBindingPaths", AppDomainNative::SetupBindingPaths)
+ QCFuncElement("nSetNativeDllSearchDirectories", AppDomainNative::SetNativeDllSearchDirectories)
+#endif
+ FCFuncElement("IsFinalizingForUnload", AppDomainNative::IsFinalizingForUnload)
+ FCFuncElement("PublishAnonymouslyHostedDynamicMethodsAssembly", AppDomainNative::PublishAnonymouslyHostedDynamicMethodsAssembly)
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+ FCFuncElement("nEnableMonitoring", AppDomainNative::EnableMonitoring)
+ FCFuncElement("nMonitoringIsEnabled", AppDomainNative::MonitoringIsEnabled)
+ FCFuncElement("nGetTotalProcessorTime", AppDomainNative::GetTotalProcessorTime)
+ FCFuncElement("nGetTotalAllocatedMemorySize", AppDomainNative::GetTotalAllocatedMemorySize)
+ FCFuncElement("nGetLastSurvivedMemorySize", AppDomainNative::GetLastSurvivedMemorySize)
+ FCFuncElement("nGetLastSurvivedProcessMemorySize", AppDomainNative::GetLastSurvivedProcessMemorySize)
+
+#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
+FCFuncEnd()
+
+#if defined(FEATURE_MULTICOREJIT) && !defined(FEATURE_CORECLR)
+FCFuncStart(gProfileOptimizationFuncs)
+ QCFuncElement("InternalSetProfileRoot", MultiCoreJITNative::InternalSetProfileRoot)
+ QCFuncElement("InternalStartProfile", MultiCoreJITNative::InternalStartProfile)
+FCFuncEnd()
+#endif // defined(FEATURE_MULTICOREJIT) && !defined(FEATURE_CORECLR)
+
+FCFuncStart(gUtf8String)
+ FCFuncElement("EqualsCaseSensitive", Utf8String::EqualsCaseSensitive)
+ QCFuncElement("EqualsCaseInsensitive", Utf8String::EqualsCaseInsensitive)
+ QCFuncElement("HashCaseInsensitive", Utf8String::HashCaseInsensitive)
+FCFuncEnd()
+
+FCFuncStart(gTypeNameBuilder)
+ QCFuncElement("CreateTypeNameBuilder", TypeNameBuilder::_CreateTypeNameBuilder)
+ QCFuncElement("ReleaseTypeNameBuilder", TypeNameBuilder::_ReleaseTypeNameBuilder)
+ QCFuncElement("OpenGenericArguments", TypeNameBuilder::_OpenGenericArguments)
+ QCFuncElement("CloseGenericArguments", TypeNameBuilder::_CloseGenericArguments)
+ QCFuncElement("OpenGenericArgument", TypeNameBuilder::_OpenGenericArgument)
+ QCFuncElement("CloseGenericArgument", TypeNameBuilder::_CloseGenericArgument)
+ QCFuncElement("AddName", TypeNameBuilder::_AddName)
+ QCFuncElement("AddPointer", TypeNameBuilder::_AddPointer)
+ QCFuncElement("AddByRef", TypeNameBuilder::_AddByRef)
+ QCFuncElement("AddSzArray", TypeNameBuilder::_AddSzArray)
+ QCFuncElement("AddArray", TypeNameBuilder::_AddArray)
+ QCFuncElement("AddAssemblySpec", TypeNameBuilder::_AddAssemblySpec)
+ QCFuncElement("ToString", TypeNameBuilder::_ToString)
+ QCFuncElement("Clear", TypeNameBuilder::_Clear)
+FCFuncEnd()
+
+#ifndef FEATURE_CORECLR
+FCFuncStart(gSafeTypeNameParserHandle)
+ QCFuncElement("_ReleaseTypeNameParser", TypeName::QReleaseTypeNameParser)
+FCFuncEnd()
+
+FCFuncStart(gTypeNameParser)
+ QCFuncElement("_CreateTypeNameParser", TypeName::QCreateTypeNameParser)
+ QCFuncElement("_GetNames", TypeName::QGetNames)
+ QCFuncElement("_GetTypeArguments", TypeName::QGetTypeArguments)
+ QCFuncElement("_GetModifiers", TypeName::QGetModifiers)
+ QCFuncElement("_GetAssemblyName", TypeName::QGetAssemblyName)
+FCFuncEnd()
+#endif //!FEATURE_CORECLR
+
+#ifdef FEATURE_CAS_POLICY
+FCFuncStart(gPEFileFuncs)
+ QCFuncElement("ReleaseSafePEFileHandle", AssemblyNative::ReleaseSafePEFileHandle)
+FCFuncEnd()
+#endif // FEATURE_CAS_POLICY
+
+FCFuncStart(gManifestBasedResourceGrovelerFuncs)
+ QCFuncElement("GetNeutralResourcesLanguageAttribute", AssemblyNative::GetNeutralResourcesLanguageAttribute)
+FCFuncEnd()
+
+FCFuncStart(gAssemblyFuncs)
+ QCFuncElement("GetFullName", AssemblyNative::GetFullName)
+ QCFuncElement("GetLocation", AssemblyNative::GetLocation)
+ QCFuncElement("GetResource", AssemblyNative::GetResource)
+ QCFuncElement("GetCodeBase", AssemblyNative::GetCodeBase)
+ QCFuncElement("GetExecutingAssembly", AssemblyNative::GetExecutingAssembly)
+ QCFuncElement("GetFlags", AssemblyNative::GetFlags)
+ QCFuncElement("GetHashAlgorithm", AssemblyNative::GetHashAlgorithm)
+ QCFuncElement("GetLocale", AssemblyNative::GetLocale)
+ QCFuncElement("GetPublicKey", AssemblyNative::GetPublicKey)
+#ifndef FEATURE_CORECLR
+ QCFuncElement("GetSecurityRuleSet", AssemblyNative::GetSecurityRuleSet)
+#endif // !FEATURE_CORECLR
+ QCFuncElement("GetSimpleName", AssemblyNative::GetSimpleName)
+ QCFuncElement("GetVersion", AssemblyNative::GetVersion)
+ FCFuncElement("FCallIsDynamic", AssemblyNative::IsDynamic)
+ FCFuncElement("_nLoad", AssemblyNative::Load)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("IsFrameworkAssembly", AssemblyNative::IsFrameworkAssembly)
+ FCFuncElement("IsNewPortableAssembly", AssemblyNative::IsNewPortableAssembly)
+#endif
+ FCFuncElement("nLoadImage", AssemblyNative::LoadImage)
+#ifdef FEATURE_CORECLR
+ QCFuncElement("nLoadFromUnmanagedArray", AssemblyNative::LoadFromUnmanagedArray)
+#endif
+ FCFuncElement("nLoadFile", AssemblyNative::LoadFile)
+#ifndef FEATURE_CORECLR
+ QCFuncElement("LoadModule", AssemblyNative::LoadModule)
+#endif // FEATURE_CORECLR
+ QCFuncElement("GetType", AssemblyNative::GetType)
+ QCFuncElement("GetManifestResourceInfo", AssemblyNative::GetManifestResourceInfo)
+#ifndef FEATURE_CORECLR
+ QCFuncElement("UseRelativeBindForSatellites", AssemblyNative::UseRelativeBindForSatellites)
+#endif
+ QCFuncElement("GetModules", AssemblyNative::GetModules)
+ QCFuncElement("GetModule", AssemblyNative::GetModule)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("GetReferencedAssemblies", AssemblyNative::GetReferencedAssemblies)
+ QCFuncElement("GetForwardedTypes", AssemblyNative::GetForwardedTypes)
+#endif // FEATURE_CORECLR
+ QCFuncElement("GetExportedTypes", AssemblyNative::GetExportedTypes)
+ FCFuncElement("GetManifestResourceNames", AssemblyNative::GetManifestResourceNames)
+ QCFuncElement("GetEntryPoint", AssemblyNative::GetEntryPoint)
+#ifdef FEATURE_LEGACYNETCF
+ QCFuncElement("GetIsProfileAssembly", AssemblyNative::GetIsProfileAssembly)
+#endif // FEATURE_LEGACYNETCF
+ QCFuncElement("IsAllSecurityTransparent", AssemblyNative::IsAllSecurityTransparent)
+ QCFuncElement("IsAllSecurityCritical", AssemblyNative::IsAllSecurityCritical)
+#ifndef FEATURE_CORECLR
+ QCFuncElement("IsAllSecuritySafeCritical", AssemblyNative::IsAllSecuritySafeCritical)
+ QCFuncElement("IsAllPublicAreaSecuritySafeCritical", AssemblyNative::IsAllPublicAreaSecuritySafeCritical)
+ QCFuncElement("GetGrantSet", AssemblyNative::GetGrantSet)
+ FCFuncElement("IsGlobalAssemblyCache", AssemblyNative::IsGlobalAssemblyCache)
+#endif // !FEATURE_CORECLR
+#ifdef FEATURE_CAS_POLICY
+ QCFuncElement("GetEvidence", SecurityPolicy::GetEvidence)
+#endif // FEATURE_CAS_POLICY
+ QCFuncElement("GetImageRuntimeVersion", AssemblyNative::GetImageRuntimeVersion)
+ FCFuncElement("IsReflectionOnly", AssemblyNative::IsReflectionOnly)
+#ifndef FEATURE_CORECLR
+ QCFuncElement("GetHostContext", AssemblyNative::GetHostContext)
+#endif
+#ifdef FEATURE_CAS_POLICY
+ QCFuncElement("GetIsStrongNameVerified", AssemblyNative::IsStrongNameVerified)
+ QCFuncElement("GetRawBytes", AssemblyNative::GetRawBytes)
+#endif // FEATURE_CAS_POLICY
+ FCFuncElement("GetManifestModule", AssemblyHandle::GetManifestModule)
+ FCFuncElement("GetToken", AssemblyHandle::GetToken)
+#ifdef FEATURE_APTCA
+ FCFuncElement("AptcaCheck", AssemblyHandle::AptcaCheck)
+#endif // FEATURE_APTCA
+#ifdef FEATURE_APPX
+ QCFuncElement("nIsDesignerBindingContext", AssemblyNative::IsDesignerBindingContext)
+#endif
+
+FCFuncEnd()
+
+#if defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+FCFuncStart(gAssemblyLoadContextFuncs)
+ QCFuncElement("InitializeAssemblyLoadContext", AssemblyNative::InitializeAssemblyLoadContext)
+ QCFuncElement("LoadFromPath", AssemblyNative::LoadFromPath)
+ QCFuncElement("OverrideDefaultAssemblyLoadContextForCurrentDomain", AssemblyNative::OverrideDefaultAssemblyLoadContextForCurrentDomain)
+ QCFuncElement("CanUseAppPathAssemblyLoadContextInCurrentDomain", AssemblyNative::CanUseAppPathAssemblyLoadContextInCurrentDomain)
+ QCFuncElement("LoadFromStream", AssemblyNative::LoadFromStream)
+ FCFuncElement("nGetFileInformation", AssemblyNameNative::GetFileInformation)
+ QCFuncElement("GetLoadContextForAssembly", AssemblyNative::GetLoadContextForAssembly)
+#if defined(FEATURE_MULTICOREJIT)
+ QCFuncElement("InternalSetProfileRoot", MultiCoreJITNative::InternalSetProfileRoot)
+ QCFuncElement("InternalStartProfile", MultiCoreJITNative::InternalStartProfile)
+#endif // defined(FEATURE_MULTICOREJIT)
+FCFuncEnd()
+#endif // defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+
+FCFuncStart(gAssemblyNameFuncs)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("nGetFileInformation", AssemblyNameNative::GetFileInformation)
+#endif // FEATURE_CORECLR
+ FCFuncElement("nToString", AssemblyNameNative::ToString)
+ FCFuncElement("nGetPublicKeyToken", AssemblyNameNative::GetPublicKeyToken)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("EscapeCodeBase", AssemblyNameNative::EscapeCodeBase)
+#endif // !FEATURE_CORECLR
+ FCFuncElement("nInit", AssemblyNameNative::Init)
+ FCFuncElement("ReferenceMatchesDefinitionInternal", AssemblyNameNative::ReferenceMatchesDefinition)
+FCFuncEnd()
+
+FCFuncStart(gLoaderAllocatorFuncs)
+ QCFuncElement("Destroy", LoaderAllocator::Destroy)
+FCFuncEnd()
+
+FCFuncStart(gAssemblyBuilderFuncs)
+ FCFuncElement("nCreateDynamicAssembly", AppDomainNative::CreateDynamicAssembly)
+ FCFuncElement("GetInMemoryAssemblyModule", AssemblyNative::GetInMemoryAssemblyModule)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("GetOnDiskAssemblyModule", AssemblyNative::GetOnDiskAssemblyModule)
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ QCFuncElement("DefineDynamicModule", COMModule::DefineDynamicModule)
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+ QCFuncElement("PrepareForSavingManifestToDisk", AssemblyNative::PrepareForSavingManifestToDisk)
+ QCFuncElement("SaveManifestToDisk", AssemblyNative::SaveManifestToDisk)
+ QCFuncElement("AddFile", AssemblyNative::AddFile)
+ QCFuncElement("SetFileHashValue", AssemblyNative::SetFileHashValue)
+ QCFuncElement("AddStandAloneResource", AssemblyNative::AddStandAloneResource)
+ QCFuncElement("AddExportedTypeOnDisk", AssemblyNative::AddExportedTypeOnDisk)
+ QCFuncElement("AddExportedTypeInMemory", AssemblyNative::AddExportedTypeInMemory)
+ QCFuncElement("AddDeclarativeSecurity", AssemblyNative::AddDeclarativeSecurity)
+ QCFuncElement("CreateVersionInfoResource", AssemblyNative::CreateVersionInfoResource)
+#endif // !FEATURE_CORECLR
+FCFuncEnd()
+
+#ifdef MDA_SUPPORTED
+FCFuncStart(gMda)
+ FCFuncElement("MemberInfoCacheCreation", MdaManagedSupport::MemberInfoCacheCreation)
+ FCFuncElement("DateTimeInvalidLocalFormat", MdaManagedSupport::DateTimeInvalidLocalFormat)
+ FCFuncElement("IsStreamWriterBufferedDataLostEnabled", MdaManagedSupport::IsStreamWriterBufferedDataLostEnabled)
+ FCFuncElement("IsStreamWriterBufferedDataLostCaptureAllocatedCallStack", MdaManagedSupport::IsStreamWriterBufferedDataLostCaptureAllocatedCallStack)
+ FCFuncElement("ReportStreamWriterBufferedDataLost", MdaManagedSupport::ReportStreamWriterBufferedDataLost)
+ FCFuncElement("IsInvalidGCHandleCookieProbeEnabled", MdaManagedSupport::IsInvalidGCHandleCookieProbeEnabled)
+ FCFuncElement("FireInvalidGCHandleCookieProbe", MdaManagedSupport::FireInvalidGCHandleCookieProbe)
+ FCFuncElement("ReportErrorSafeHandleRelease", MdaManagedSupport::ReportErrorSafeHandleRelease)
+FCFuncEnd()
+#endif // MDA_SUPPORTED
+
+FCFuncStart(gDelegateFuncs)
+ FCFuncElement("BindToMethodName", COMDelegate::BindToMethodName)
+ FCFuncElement("BindToMethodInfo", COMDelegate::BindToMethodInfo)
+ FCFuncElement("GetMulticastInvoke", COMDelegate::GetMulticastInvoke)
+ FCFuncElement("GetInvokeMethod", COMDelegate::GetInvokeMethod)
+ FCFuncElement("InternalAlloc", COMDelegate::InternalAlloc)
+ FCFuncElement("InternalAllocLike", COMDelegate::InternalAllocLike)
+ FCFuncElement("InternalEqualTypes", COMDelegate::InternalEqualTypes)
+ FCFuncElement("InternalEqualMethodHandles", COMDelegate::InternalEqualMethodHandles)
+ FCFuncElement("FindMethodHandle", COMDelegate::FindMethodHandle)
+ FCFuncElement("AdjustTarget", COMDelegate::AdjustTarget)
+ FCFuncElement("GetCallStub", COMDelegate::GetCallStub)
+ FCFuncElement("CompareUnmanagedFunctionPtrs", COMDelegate::CompareUnmanagedFunctionPtrs)
+
+ // The FCall mechanism knows how to wire multiple different constructor calls into a
+ // single entrypoint, without the following entry. But we need this entry to satisfy
+ // frame creation within the body:
+ FCFuncElement("DelegateConstruct", COMDelegate::DelegateConstruct)
+FCFuncEnd()
+
+FCFuncStart(gMathFuncs)
+ FCIntrinsic("Sin", COMDouble::Sin, CORINFO_INTRINSIC_Sin)
+ FCIntrinsic("Cos", COMDouble::Cos, CORINFO_INTRINSIC_Cos)
+ FCIntrinsic("Sqrt", COMDouble::Sqrt, CORINFO_INTRINSIC_Sqrt)
+ FCIntrinsic("Round", COMDouble::Round, CORINFO_INTRINSIC_Round)
+ FCIntrinsicSig("Abs", &gsig_SM_Flt_RetFlt, COMDouble::AbsFlt, CORINFO_INTRINSIC_Abs)
+ FCIntrinsicSig("Abs", &gsig_SM_Dbl_RetDbl, COMDouble::AbsDbl, CORINFO_INTRINSIC_Abs)
+ FCFuncElement("Exp", COMDouble::Exp)
+ FCFuncElement("Pow", COMDouble::Pow)
+#if defined(_TARGET_X86_)
+ FCUnreferenced FCFuncElement("PowHelperSimple", COMDouble::PowHelperSimple)
+ FCUnreferenced FCFuncElement("PowHelper", COMDouble::PowHelper)
+#endif
+ FCFuncElement("Tan", COMDouble::Tan)
+ FCFuncElement("Floor", COMDouble::Floor)
+ FCFuncElement("Log", COMDouble::Log)
+ FCFuncElement("Sinh", COMDouble::Sinh)
+ FCFuncElement("Cosh", COMDouble::Cosh)
+ FCFuncElement("Tanh", COMDouble::Tanh)
+ FCFuncElement("Acos", COMDouble::Acos)
+ FCFuncElement("Asin", COMDouble::Asin)
+ FCFuncElement("Atan", COMDouble::Atan)
+ FCFuncElement("Atan2", COMDouble::Atan2)
+ FCFuncElement("Log10", COMDouble::Log10)
+ FCFuncElement("Ceiling", COMDouble::Ceil)
+ FCFuncElement("SplitFractionDouble", COMDouble::ModFDouble)
+FCFuncEnd()
+
+FCFuncStart(gThreadFuncs)
+ FCDynamic("InternalGetCurrentThread", CORINFO_INTRINSIC_Illegal, ECall::InternalGetCurrentThread)
+ FCFuncElement("StartInternal", ThreadNative::Start)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("SuspendInternal", ThreadNative::Suspend)
+ FCFuncElement("ResumeInternal", ThreadNative::Resume)
+ FCFuncElement("InterruptInternal", ThreadNative::Interrupt)
+#endif
+ FCFuncElement("get_IsAlive", ThreadNative::IsAlive)
+ FCFuncElement("GetThreadStateNative", ThreadNative::GetThreadState)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("GetPriorityNative", ThreadNative::GetPriority)
+ FCFuncElement("SetPriorityNative", ThreadNative::SetPriority)
+#endif
+#ifdef FEATURE_LEAK_CULTURE_INFO
+ FCFuncElement("nativeGetSafeCulture", ThreadNative::nativeGetSafeCulture)
+#else
+ QCFuncElement("nativeInitCultureAccessors", ThreadNative::nativeInitCultureAccessors)
+#endif
+ FCFuncElement("JoinInternal", ThreadNative::Join)
+#undef Sleep
+ FCFuncElement("SleepInternal", ThreadNative::Sleep)
+#define Sleep(a) Dont_Use_Sleep(a)
+ FCFuncElement("SetStart", ThreadNative::SetStart)
+ FCFuncElement("SetBackgroundNative", ThreadNative::SetBackground)
+ FCFuncElement("IsBackgroundNative", ThreadNative::IsBackground)
+#ifdef FEATURE_REMOTING
+ FCFuncElement("GetContextInternal", ThreadNative::GetContextFromContextID)
+#endif
+ FCFuncElement("GetDomainInternal", ThreadNative::GetDomain)
+ FCFuncElement("GetFastDomainInternal", ThreadNative::FastGetDomain)
+#ifdef FEATURE_COMPRESSEDSTACK
+ FCFuncElement("SetAppDomainStack", ThreadNative::SetAppDomainStack)
+ FCFuncElement("RestoreAppDomainStack", ThreadNative::RestoreAppDomainStack)
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
+#ifdef FEATURE_REMOTING
+ FCFuncElement("InternalCrossContextCallback", ThreadNative::InternalCrossContextCallback)
+#endif
+ QCFuncElement("InformThreadNameChange", ThreadNative::InformThreadNameChange)
+#ifndef FEATURE_CORECLR
+ QCFuncElement("GetProcessDefaultStackSize", ThreadNative::GetProcessDefaultStackSize)
+ FCFuncElement("BeginCriticalRegion", ThreadNative::BeginCriticalRegion)
+ FCFuncElement("EndCriticalRegion", ThreadNative::EndCriticalRegion)
+ FCFuncElement("BeginThreadAffinity", ThreadNative::BeginThreadAffinity)
+ FCFuncElement("EndThreadAffinity", ThreadNative::EndThreadAffinity)
+#endif // FEATURE_CORECLR
+ FCFuncElement("AbortInternal", ThreadNative::Abort)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("ResetAbortNative", ThreadNative::ResetAbort)
+#endif // FEATURE_CORECLR
+ FCFuncElement("get_IsThreadPoolThread", ThreadNative::IsThreadpoolThread)
+ FCFuncElement("SpinWaitInternal", ThreadNative::SpinWait)
+ QCFuncElement("YieldInternal", ThreadNative::YieldThread)
+ FCIntrinsic("GetCurrentThreadNative", ThreadNative::GetCurrentThread, CORINFO_INTRINSIC_GetCurrentManagedThread)
+ FCIntrinsic("get_ManagedThreadId", ThreadNative::GetManagedThreadId, CORINFO_INTRINSIC_GetManagedThreadId)
+ FCFuncElement("InternalFinalize", ThreadNative::Finalize)
+#if defined(FEATURE_COMINTEROP) && !defined(FEATURE_CORECLR)
+ FCFuncElement("DisableComObjectEagerCleanup", ThreadNative::DisableComObjectEagerCleanup)
+#endif // defined(FEATURE_COMINTEROP) && !defined(FEATURE_CORECLR)
+#ifdef FEATURE_LEAK_CULTURE_INFO
+ FCFuncElement("nativeSetThreadUILocale", ThreadNative::SetThreadUILocale)
+#endif
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+#ifndef FEATURE_CORECLR
+ FCFuncElement("SetApartmentStateNative", ThreadNative::SetApartmentState)
+ FCFuncElement("GetApartmentStateNative", ThreadNative::GetApartmentState)
+#endif // FEATURE_CORECLR
+ FCFuncElement("StartupSetApartmentStateInternal", ThreadNative::StartupSetApartmentState)
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+ FCIntrinsic("MemoryBarrier", ThreadNative::FCMemoryBarrier, CORINFO_INTRINSIC_MemoryBarrier)
+#ifndef FEATURE_CORECLR // coreclr does not support abort reason
+ FCFuncElement("SetAbortReason", ThreadNative::SetAbortReason)
+ FCFuncElement("GetAbortReason", ThreadNative::GetAbortReason)
+ FCFuncElement("ClearAbortReason", ThreadNative::ClearAbortReason)
+#endif
+FCFuncEnd()
+
+FCFuncStart(gThreadPoolFuncs)
+ FCFuncElement("PostQueuedCompletionStatus", ThreadPoolNative::CorPostQueuedCompletionStatus)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("GetAvailableThreadsNative", ThreadPoolNative::CorGetAvailableThreads)
+#endif // FEATURE_CORECLR
+ FCFuncElement("SetMinThreadsNative", ThreadPoolNative::CorSetMinThreads)
+ FCFuncElement("GetMinThreadsNative", ThreadPoolNative::CorGetMinThreads)
+ FCFuncElement("RegisterWaitForSingleObjectNative", ThreadPoolNative::CorRegisterWaitForSingleObject)
+ FCFuncElement("BindIOCompletionCallbackNative", ThreadPoolNative::CorBindIoCompletionCallback)
+ FCFuncElement("SetMaxThreadsNative", ThreadPoolNative::CorSetMaxThreads)
+ FCFuncElement("GetMaxThreadsNative", ThreadPoolNative::CorGetMaxThreads)
+ FCFuncElement("NotifyWorkItemComplete", ThreadPoolNative::NotifyRequestComplete)
+ FCFuncElement("NotifyWorkItemProgressNative", ThreadPoolNative::NotifyRequestProgress)
+ FCFuncElement("IsThreadPoolHosted", ThreadPoolNative::IsThreadPoolHosted)
+ QCFuncElement("InitializeVMTp", ThreadPoolNative::InitializeVMTp)
+ FCFuncElement("ReportThreadStatus", ThreadPoolNative::ReportThreadStatus)
+ QCFuncElement("RequestWorkerThread", ThreadPoolNative::RequestWorkerThread)
+FCFuncEnd()
+
+FCFuncStart(gTimerFuncs)
+ QCFuncElement("CreateAppDomainTimer", AppDomainTimerNative::CreateAppDomainTimer)
+ QCFuncElement("ChangeAppDomainTimer", AppDomainTimerNative::ChangeAppDomainTimer)
+ QCFuncElement("DeleteAppDomainTimer", AppDomainTimerNative::DeleteAppDomainTimer)
+FCFuncEnd()
+
+
+FCFuncStart(gRegisteredWaitHandleFuncs)
+ FCFuncElement("UnregisterWaitNative", ThreadPoolNative::CorUnregisterWait)
+ FCFuncElement("WaitHandleCleanupNative", ThreadPoolNative::CorWaitHandleCleanupNative)
+FCFuncEnd()
+
+FCFuncStart(gWaitHandleFuncs)
+ FCFuncElement("WaitOneNative", WaitHandleNative::CorWaitOneNative)
+ FCFuncElement("WaitMultiple", WaitHandleNative::CorWaitMultipleNative)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("SignalAndWaitOne", WaitHandleNative::CorSignalAndWaitOneNative)
+#endif // !FEATURE_CORECLR
+FCFuncEnd()
+
+FCFuncStart(gNumberFuncs)
+ FCFuncElement("FormatDecimal", COMNumber::FormatDecimal)
+ FCFuncElement("FormatDouble", COMNumber::FormatDouble)
+ FCFuncElement("FormatInt32", COMNumber::FormatInt32)
+ FCFuncElement("FormatUInt32", COMNumber::FormatUInt32)
+ FCFuncElement("FormatInt64", COMNumber::FormatInt64)
+ FCFuncElement("FormatUInt64", COMNumber::FormatUInt64)
+ FCFuncElement("FormatSingle", COMNumber::FormatSingle)
+#if !defined(FEATURE_CORECLR)
+ FCFuncElement("FormatNumberBuffer", COMNumber::FormatNumberBuffer)
+#endif // !FEATURE_CORECLR
+ FCFuncElement("NumberBufferToDecimal", COMNumber::NumberBufferToDecimal)
+ FCFuncElement("NumberBufferToDouble", COMNumber::NumberBufferToDouble)
+FCFuncEnd()
+
+#ifdef FEATURE_COMINTEROP
+FCFuncStart(gVariantFuncs)
+ FCFuncElement("SetFieldsObject", COMVariant::SetFieldsObject)
+ FCFuncElement("SetFieldsR4", COMVariant::SetFieldsR4)
+ FCFuncElement("SetFieldsR8", COMVariant::SetFieldsR8)
+ FCFuncElement("GetR4FromVar", COMVariant::GetR4FromVar)
+ FCFuncElement("GetR8FromVar", COMVariant::GetR8FromVar)
+ FCFuncElement("BoxEnum", COMVariant::BoxEnum)
+FCFuncEnd()
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_COMINTEROP
+FCFuncStart(gOAVariantFuncs)
+ FCFuncElement("ChangeTypeEx", COMOAVariant::ChangeTypeEx)
+FCFuncEnd()
+#endif // FEATURE_COMINTEROP
+
+FCFuncStart(gDecimalFuncs)
+ FCFuncElementSig(COR_CTOR_METHOD_NAME, &gsig_IM_Flt_RetVoid, COMDecimal::InitSingle)
+ FCFuncElementSig(COR_CTOR_METHOD_NAME, &gsig_IM_Dbl_RetVoid, COMDecimal::InitDouble)
+ FCFuncElement("FCallAddSub", COMDecimal::DoAddSubThrow)
+ FCFuncElement("FCallMultiply", COMDecimal::DoMultiplyThrow)
+ FCFuncElement("FCallDivide", COMDecimal::DoDivideThrow)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("FCallAddSubOverflowed", COMDecimal::DoAddSub)
+ FCFuncElement("FCallMultiplyOverflowed", COMDecimal::DoMultiply)
+ FCFuncElement("FCallDivideOverflowed", COMDecimal::DoDivide)
+#endif // FEATURE_CORECLR
+ FCFuncElement("FCallCompare", COMDecimal::DoCompare)
+ FCFuncElement("FCallFloor", COMDecimal::DoFloor)
+ FCFuncElement("GetHashCode", COMDecimal::GetHashCode)
+ FCFuncElement("FCallRound", COMDecimal::DoRound)
+ FCFuncElement("FCallToCurrency", COMDecimal::DoToCurrency)
+ FCFuncElement("FCallToInt32", COMDecimal::ToInt32)
+ FCFuncElement("ToDouble", COMDecimal::ToDouble)
+ FCFuncElement("ToSingle", COMDecimal::ToSingle)
+ FCFuncElement("FCallTruncate", COMDecimal::DoTruncate)
+FCFuncEnd()
+
+FCFuncStart(gCurrencyFuncs)
+ FCFuncElement("FCallToDecimal", COMCurrency::DoToDecimal)
+FCFuncEnd()
+
+#ifndef FEATURE_CORECLR
+FCFuncStart(gCLRConfigFuncs)
+ FCFuncElement("CheckLegacyManagedDeflateStream", SystemNative::CheckLegacyManagedDeflateStream)
+ FCFuncElement("CheckThrowUnobservedTaskExceptions", SystemNative::CheckThrowUnobservedTaskExceptions)
+FCFuncEnd()
+#endif // ifndef FEATURE_CORECLR
+
+#if defined(FEATURE_LEGACYSURFACE) && !defined(FEATURE_COREFX_GLOBALIZATION)
+FCFuncStart(gCompareInfoFuncs)
+ QCFuncElement("InternalGetGlobalizedHashCode", COMNlsInfo::InternalGetGlobalizedHashCode)
+ QCFuncElement("InternalCompareString", COMNlsInfo::InternalCompareString)
+ QCFuncElement("InternalFindNLSStringEx", COMNlsInfo::InternalFindNLSStringEx)
+ QCFuncElement("NativeInternalInitSortHandle", COMNlsInfo::InternalInitSortHandle)
+#ifndef FEATURE_CORECLR
+ QCFuncElement("InternalIsSortable", COMNlsInfo::InternalIsSortable)
+ QCFuncElement("InternalGetSortKey", COMNlsInfo::InternalGetSortKey)
+ QCFuncElement("InternalGetSortVersion", COMNlsInfo::InternalGetSortVersion)
+ QCFuncElement("InternalGetNlsVersionEx", COMNlsInfo::InternalGetNlsVersionEx)
+#endif
+FCFuncEnd()
+
+FCFuncStart(gEncodingTableFuncs)
+ FCFuncElement("GetNumEncodingItems", COMNlsInfo::nativeGetNumEncodingItems)
+ FCFuncElement("GetEncodingData", COMNlsInfo::nativeGetEncodingTableDataPointer)
+ FCFuncElement("GetCodePageData", COMNlsInfo::nativeGetCodePageTableDataPointer)
+#if FEATURE_CODEPAGES_FILE
+ FCFuncElement("nativeCreateOpenFileMapping", COMNlsInfo::nativeCreateOpenFileMapping)
+#endif
+FCFuncEnd()
+
+FCFuncStart(gCalendarDataFuncs)
+ FCFuncElement("nativeGetTwoDigitYearMax", CalendarData::nativeGetTwoDigitYearMax)
+ FCFuncElement("nativeGetCalendarData", CalendarData::nativeGetCalendarData)
+ FCFuncElement("nativeGetCalendars", CalendarData::nativeGetCalendars)
+FCFuncEnd()
+
+FCFuncStart(gCultureDataFuncs)
+ FCFuncElement("nativeInitCultureData", COMNlsInfo::nativeInitCultureData)
+ FCFuncElement("nativeGetNumberFormatInfoValues", COMNlsInfo::nativeGetNumberFormatInfoValues)
+ FCFuncElement("nativeEnumTimeFormats", CalendarData::nativeEnumTimeFormats)
+#ifdef FEATURE_USE_LCID
+ FCFuncElement("LCIDToLocaleName", COMNlsInfo::LCIDToLocaleName)
+ FCFuncElement("LocaleNameToLCID", COMNlsInfo::LocaleNameToLCID)
+#endif // FEATURE_USE_LCID
+
+ QCFuncElement("nativeEnumCultureNames", COMNlsInfo::nativeEnumCultureNames)
+
+FCFuncEnd()
+
+FCFuncStart(gCultureInfoFuncs)
+ QCFuncElement("InternalGetDefaultLocaleName", COMNlsInfo::InternalGetDefaultLocaleName)
+ FCFuncElement("nativeGetLocaleInfoEx", COMNlsInfo::nativeGetLocaleInfoEx)
+ FCFuncElement("nativeGetLocaleInfoExInt", COMNlsInfo::nativeGetLocaleInfoExInt)
+
+#ifndef FEATURE_CORECLR
+ FCFuncElement("nativeSetThreadLocale", COMNlsInfo::nativeSetThreadLocale)
+#endif
+ QCFuncElement("InternalGetUserDefaultUILanguage", COMNlsInfo::InternalGetUserDefaultUILanguage)
+ QCFuncElement("InternalGetSystemDefaultUILanguage", COMNlsInfo::InternalGetSystemDefaultUILanguage)
+// Added but disabled from desktop in .NET 4.0, stayed disabled in .NET 4.5
+#ifdef FEATURE_CORECLR
+ FCFuncElement("nativeGetResourceFallbackArray", COMNlsInfo::nativeGetResourceFallbackArray)
+#endif
+FCFuncEnd()
+
+FCFuncStart(gTextInfoFuncs)
+ FCFuncElement("InternalChangeCaseChar", COMNlsInfo::InternalChangeCaseChar)
+ FCFuncElement("InternalChangeCaseString", COMNlsInfo::InternalChangeCaseString)
+ FCFuncElement("InternalGetCaseInsHash", COMNlsInfo::InternalGetCaseInsHash)
+ QCFuncElement("InternalCompareStringOrdinalIgnoreCase", COMNlsInfo::InternalCompareStringOrdinalIgnoreCase)
+ QCFuncElement("InternalTryFindStringOrdinalIgnoreCase", COMNlsInfo::InternalTryFindStringOrdinalIgnoreCase)
+FCFuncEnd()
+#endif // defined(FEATURE_LEGACYSURFACE) && !defined(FEATURE_COREFX_GLOBALIZATION)
+
+FCFuncStart(gArrayFuncs)
+ FCFuncElement("get_Rank", ArrayNative::GetRank)
+ FCFuncElement("GetLowerBound", ArrayNative::GetLowerBound)
+ FCFuncElement("GetUpperBound", ArrayNative::GetUpperBound)
+ FCIntrinsicSig("GetLength", &gsig_IM_Int_RetInt, ArrayNative::GetLength, CORINFO_INTRINSIC_Array_GetDimLength)
+ FCFuncElement("get_Length", ArrayNative::GetLengthNoRank)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("get_LongLength", ArrayNative::GetLongLengthNoRank)
+#endif
+ FCFuncElement("GetDataPtrOffsetInternal", ArrayNative::GetDataPtrOffsetInternal)
+ FCFuncElement("Initialize", ArrayNative::Initialize)
+ FCFuncElement("Copy", ArrayNative::ArrayCopy)
+ FCFuncElement("Clear", ArrayNative::ArrayClear)
+ FCFuncElement("InternalCreate", ArrayNative::CreateInstance)
+ FCFuncElement("InternalGetReference", ArrayNative::GetReference)
+ FCFuncElement("InternalSetValue", ArrayNative::SetValue)
+ FCFuncElement("TrySZIndexOf", ArrayHelper::TrySZIndexOf)
+ FCFuncElement("TrySZLastIndexOf", ArrayHelper::TrySZLastIndexOf)
+ FCFuncElement("TrySZBinarySearch", ArrayHelper::TrySZBinarySearch)
+ FCFuncElement("TrySZSort", ArrayHelper::TrySZSort)
+ FCFuncElement("TrySZReverse", ArrayHelper::TrySZReverse)
+FCFuncEnd()
+
+FCFuncStart(gBufferFuncs)
+ FCFuncElement("BlockCopy", Buffer::BlockCopy)
+ FCFuncElement("InternalBlockCopy", Buffer::InternalBlockCopy)
+ FCFuncElement("_GetByte", Buffer::GetByte)
+ FCFuncElement("_SetByte", Buffer::SetByte)
+ FCFuncElement("IsPrimitiveTypeArray", Buffer::IsPrimitiveTypeArray)
+ FCFuncElement("_ByteLength", Buffer::ByteLength)
+#ifdef _TARGET_ARM_
+ FCFuncElement("Memcpy", FCallMemcpy)
+#endif
+ QCFuncElement("__Memmove", Buffer::MemMove)
+FCFuncEnd()
+
+FCFuncStart(gGCInterfaceFuncs)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("GetGenerationWR", GCInterface::GetGenerationWR)
+ FCFuncElement("_RegisterForFullGCNotification", GCInterface::RegisterForFullGCNotification)
+ FCFuncElement("_CancelFullGCNotification", GCInterface::CancelFullGCNotification)
+ FCFuncElement("_WaitForFullGCApproach", GCInterface::WaitForFullGCApproach)
+ FCFuncElement("_WaitForFullGCComplete", GCInterface::WaitForFullGCComplete)
+#endif
+ FCFuncElement("_CollectionCount", GCInterface::CollectionCount)
+ FCFuncElement("GetGCLatencyMode", GCInterface::GetGcLatencyMode)
+ FCFuncElement("SetGCLatencyMode", GCInterface::SetGcLatencyMode)
+ FCFuncElement("GetLOHCompactionMode", GCInterface::GetLOHCompactionMode)
+ FCFuncElement("SetLOHCompactionMode", GCInterface::SetLOHCompactionMode)
+ FCFuncElement("IsServerGC", SystemNative::IsServerGC)
+ QCFuncElement("_AddMemoryPressure", GCInterface::_AddMemoryPressure)
+ QCFuncElement("_RemoveMemoryPressure", GCInterface::_RemoveMemoryPressure)
+ FCFuncElement("GetGeneration", GCInterface::GetGeneration)
+ QCFuncElement("GetTotalMemory", GCInterface::GetTotalMemory)
+ QCFuncElement("_Collect", GCInterface::Collect)
+ FCFuncElement("GetMaxGeneration", GCInterface::GetMaxGeneration)
+ QCFuncElement("_WaitForPendingFinalizers", GCInterface::WaitForPendingFinalizers)
+
+ FCFuncElement("_SuppressFinalize", GCInterface::SuppressFinalize)
+ FCFuncElement("_ReRegisterForFinalize", GCInterface::ReRegisterForFinalize)
+
+FCFuncEnd()
+
+#ifndef FEATURE_CORECLR
+FCFuncStart(gMemoryFailPointFuncs)
+ FCFuncElement("GetMemorySettings", COMMemoryFailPoint::GetMemorySettings)
+FCFuncEnd()
+#endif // FEATURE_CORECLR
+
+FCFuncStart(gInteropMarshalFuncs)
+ FCFuncElement("GetLastWin32Error", MarshalNative::GetLastWin32Error)
+ FCFuncElement("SetLastWin32Error", MarshalNative::SetLastWin32Error)
+ FCFuncElement("SizeOfHelper", MarshalNative::SizeOfClass)
+ FCFuncElement("GetSystemMaxDBCSCharSize", MarshalNative::GetSystemMaxDBCSCharSize)
+ FCFuncElement("PtrToStructureHelper", MarshalNative::PtrToStructureHelper)
+ FCFuncElement("DestroyStructure", MarshalNative::DestroyStructure)
+ FCFuncElement("UnsafeAddrOfPinnedArrayElement", MarshalNative::FCUnsafeAddrOfPinnedArrayElement)
+ FCFuncElement("GetExceptionCode", ExceptionNative::GetExceptionCode)
+#ifndef FEATURE_CORECLR
+ QCFuncElement("InternalNumParamBytes", MarshalNative::NumParamBytes)
+ FCFuncElement("GetExceptionPointers", ExceptionNative::GetExceptionPointers)
+ QCFuncElement("GetHINSTANCE", COMModule::GetHINSTANCE)
+ FCFuncElement("GetUnmanagedThunkForManagedMethodPtr", MarshalNative::GetUnmanagedThunkForManagedMethodPtr)
+ FCFuncElement("GetManagedThunkForUnmanagedMethodPtr", MarshalNative::GetManagedThunkForUnmanagedMethodPtr)
+ FCFuncElement("InternalGetThreadFromFiberCookie", MarshalNative::GetThreadFromFiberCookie)
+#endif
+
+ FCFuncElement("OffsetOfHelper", MarshalNative::OffsetOfHelper)
+ FCFuncElement("SizeOfType", SafeBuffer::SizeOfType)
+ FCFuncElement("AlignedSizeOfType", SafeBuffer::AlignedSizeOfType)
+
+ QCFuncElement("InternalPrelink", MarshalNative::Prelink)
+ FCFuncElement("CopyToNative", MarshalNative::CopyToNative)
+ FCFuncElement("CopyToManaged", MarshalNative::CopyToManaged)
+ FCFuncElement("StructureToPtr", MarshalNative::StructureToPtr)
+ FCFuncElement("ThrowExceptionForHRInternal", MarshalNative::ThrowExceptionForHR)
+ FCFuncElement("GetExceptionForHRInternal", MarshalNative::GetExceptionForHR)
+ FCFuncElement("GetHRForException", MarshalNative::GetHRForException)
+ FCFuncElement("GetHRForException_WinRT", MarshalNative::GetHRForException_WinRT)
+ FCFuncElement("GetDelegateForFunctionPointerInternal", MarshalNative::GetDelegateForFunctionPointerInternal)
+ FCFuncElement("GetFunctionPointerForDelegateInternal", MarshalNative::GetFunctionPointerForDelegateInternal)
+#ifdef FEATURE_COMINTEROP
+ FCFuncElement("GetRawIUnknownForComObjectNoAddRef", MarshalNative::GetRawIUnknownForComObjectNoAddRef)
+ FCFuncElement("IsComObject", MarshalNative::IsComObject)
+ FCFuncElement("GetObjectForIUnknown", MarshalNative::GetObjectForIUnknown)
+ FCFuncElement("GetUniqueObjectForIUnknown", MarshalNative::GetUniqueObjectForIUnknown)
+ FCFuncElement("AddRef", MarshalNative::AddRef)
+ FCFuncElement("GetNativeVariantForObject", MarshalNative::GetNativeVariantForObject)
+ FCFuncElement("GetObjectForNativeVariant", MarshalNative::GetObjectForNativeVariant)
+ FCFuncElement("InternalFinalReleaseComObject", MarshalNative::FinalReleaseComObject)
+ FCFuncElement("QueryInterface", MarshalNative::QueryInterface)
+ FCFuncElement("CreateAggregatedObject", MarshalNative::CreateAggregatedObject)
+ FCFuncElement("AreComObjectsAvailableForCleanup", MarshalNative::AreComObjectsAvailableForCleanup)
+ FCFuncElement("InternalCreateWrapperOfType", MarshalNative::InternalCreateWrapperOfType)
+ FCFuncElement("GetObjectsForNativeVariants", MarshalNative::GetObjectsForNativeVariants)
+ FCFuncElement("GetStartComSlot", MarshalNative::GetStartComSlot)
+
+ FCFuncElement("InitializeManagedWinRTFactoryObject", MarshalNative::InitializeManagedWinRTFactoryObject)
+
+ FCFuncElement("GetNativeActivationFactory", MarshalNative::GetNativeActivationFactory)
+ FCFuncElement("GetIUnknownForObjectNative", MarshalNative::GetIUnknownForObjectNative)
+ FCFuncElement("GetIDispatchForObjectNative", MarshalNative::GetIDispatchForObjectNative)
+ FCFuncElement("GetComInterfaceForObjectNative", MarshalNative::GetComInterfaceForObjectNative)
+ FCFuncElement("InternalReleaseComObject", MarshalNative::ReleaseComObject)
+ FCFuncElement("Release", MarshalNative::Release)
+ FCFuncElement("InitializeWrapperForWinRT", MarshalNative::InitializeWrapperForWinRT)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("GetLoadedTypeForGUID", MarshalNative::GetLoadedTypeForGUID)
+ FCFuncElement("GetITypeInfoForType", MarshalNative::GetITypeInfoForType)
+ FCFuncElement("GetTypedObjectForIUnknown", MarshalNative::GetTypedObjectForIUnknown)
+ FCFuncElement("CleanupUnusedObjectsInCurrentContext", MarshalNative::CleanupUnusedObjectsInCurrentContext)
+ FCFuncElement("IsTypeVisibleFromCom", MarshalNative::IsTypeVisibleFromCom)
+ FCFuncElement("FCallGenerateGuidForType", MarshalNative::DoGenerateGuidForType)
+ FCFuncElement("FCallGetTypeLibGuid", MarshalNative::DoGetTypeLibGuid)
+ FCFuncElement("GetTypeLibLcid", MarshalNative::GetTypeLibLcid)
+ FCFuncElement("GetTypeLibVersion", MarshalNative::GetTypeLibVersion)
+ FCFuncElement("FCallGetTypeInfoGuid", MarshalNative::DoGetTypeInfoGuid)
+ FCFuncElement("FCallGetTypeLibGuidForAssembly", MarshalNative::DoGetTypeLibGuidForAssembly)
+ FCFuncElement("_GetTypeLibVersionForAssembly", MarshalNative::GetTypeLibVersionForAssembly)
+ FCFuncElement("GetEndComSlot", MarshalNative::GetEndComSlot)
+ FCFuncElement("GetMethodInfoForComSlot", MarshalNative::GetMethodInfoForComSlot)
+ FCFuncElement("InternalGetComSlotForMethodInfo", MarshalNative::GetComSlotForMethodInfo)
+ FCFuncElement("InternalSwitchCCW", MarshalNative::SwitchCCW)
+ FCFuncElement("InternalWrapIUnknownWithComObject", MarshalNative::WrapIUnknownWithComObject)
+ FCFuncElement("ChangeWrapperHandleStrength", MarshalNative::ChangeWrapperHandleStrength)
+ QCFuncElement("_GetInspectableIids", MarshalNative::GetInspectableIIDs)
+ QCFuncElement("_GetCachedWinRTTypes", MarshalNative::GetCachedWinRTTypes)
+ QCFuncElement("_GetCachedWinRTTypeByIid", MarshalNative::GetCachedWinRTTypeByIID)
+#endif // FEATURE_CORECLR
+#endif // FEATURE_COMINTEROP
+FCFuncEnd()
+
+FCFuncStart(gArrayWithOffsetFuncs)
+ FCFuncElement("CalculateCount", MarshalNative::CalculateCount)
+FCFuncEnd()
+
+#ifdef FEATURE_COMINTEROP
+
+#ifndef FEATURE_CORECLR
+FCFuncStart(gExtensibleClassFactoryFuncs)
+ FCFuncElement("RegisterObjectCreationCallback", RegisterObjectCreationCallback)
+FCFuncEnd()
+#endif
+
+
+#ifdef FEATURE_COMINTEROP_TLB_SUPPORT
+FCFuncStart(gTypeLibConverterFuncs)
+ FCFuncElement("nConvertAssemblyToTypeLib", COMTypeLibConverter::ConvertAssemblyToTypeLib)
+ FCFuncElement("nConvertTypeLibToMetadata", COMTypeLibConverter::ConvertTypeLibToMetadata)
+ QCFuncElement("LoadInMemoryTypeByName", COMModule::LoadInMemoryTypeByName)
+FCFuncEnd()
+#endif // FEATURE_COMINTEROP_TLB_SUPPORT
+
+#ifdef FEATURE_COMINTEROP_MANAGED_ACTIVATION
+FCFuncStart(gRegistrationFuncs)
+ FCFuncElement("RegisterTypeForComClientsNative", RegisterTypeForComClientsNative)
+ FCFuncElement("RegisterTypeForComClientsExNative", RegisterTypeForComClientsExNative)
+FCFuncEnd()
+#endif // FEATURE_COMINTEROP_MANAGED_ACTIVATION
+
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_CAS_POLICY
+FCFuncStart(gPolicyManagerFuncs)
+#ifdef _DEBUG
+ QCFuncElement("DebugOut", SecurityConfig::DebugOut)
+#endif
+FCFuncEnd()
+
+FCFuncStart(gPolicyConfigFuncs)
+ QCFuncElement("ResetCacheData", SecurityConfig::ResetCacheData)
+ QCFuncElement("SaveDataByte", SecurityConfig::SaveDataByte)
+ QCFuncElement("RecoverData", SecurityConfig::RecoverData)
+ QCFuncElement("SetQuickCache", SecurityConfig::SetQuickCache)
+ QCFuncElement("GetCacheEntry", SecurityConfig::GetCacheEntry)
+ QCFuncElement("AddCacheEntry", SecurityConfig::AddCacheEntry)
+ QCFuncElement("GetMachineDirectory", SecurityConfig::_GetMachineDirectory)
+ QCFuncElement("GetUserDirectory", SecurityConfig::_GetUserDirectory)
+ QCFuncElement("WriteToEventLog", SecurityConfig::WriteToEventLog)
+FCFuncEnd()
+#endif // FEATURE_CAS_POLICY
+
+#ifndef FEATURE_CORECLR
+FCFuncStart(gPrincipalFuncs)
+ FCFuncElement("OpenThreadToken", COMPrincipal::OpenThreadToken)
+ QCFuncElement("ImpersonateLoggedOnUser", COMPrincipal::ImpersonateLoggedOnUser)
+ QCFuncElement("RevertToSelf", COMPrincipal::RevertToSelf)
+ QCFuncElement("SetThreadToken", COMPrincipal::SetThreadToken)
+FCFuncEnd()
+#endif // !FEATURE_CORECLR
+
+#ifdef FEATURE_CAS_POLICY
+FCFuncStart(gEvidenceFuncs)
+FCFuncEnd()
+
+FCFuncStart(gAssemblyEvidenceFactoryFuncs)
+ QCFuncElement("GetAssemblyPermissionRequests", SecurityPolicy::GetAssemblyPermissionRequests)
+ QCFuncElement("GetStrongNameInformation", SecurityPolicy::GetStrongNameInformation)
+FCFuncEnd()
+
+FCFuncStart(gPEFileEvidenceFactoryFuncs)
+ QCFuncElement("GetAssemblySuppliedEvidence", SecurityPolicy::GetAssemblySuppliedEvidence)
+ QCFuncElement("GetLocationEvidence", SecurityPolicy::GetLocationEvidence)
+ QCFuncElement("GetPublisherCertificate", SecurityPolicy::GetPublisherCertificate)
+ QCFuncElement("FireEvidenceGeneratedEvent", SecurityPolicy::FireEvidenceGeneratedEvent)
+FCFuncEnd()
+
+FCFuncStart(gHostExecutionContextManagerFuncs)
+ FCFuncElement("ReleaseHostSecurityContext", HostExecutionContextManager::ReleaseSecurityContext)
+ FCFuncElement("CloneHostSecurityContext", HostExecutionContextManager::CloneSecurityContext)
+ FCFuncElement("CaptureHostSecurityContext", HostExecutionContextManager::CaptureSecurityContext)
+ FCFuncElement("SetHostSecurityContext", HostExecutionContextManager::SetSecurityContext)
+ FCFuncElement("HostSecurityManagerPresent", HostExecutionContextManager::HostPresent)
+FCFuncEnd()
+#endif // FEATURE_CAS_POLICY
+
+#if defined(FEATURE_ISOSTORE) && !defined(FEATURE_ISOSTORE_LIGHT)
+FCFuncStart(gIsolatedStorage)
+ QCFuncElement("GetCaller", COMIsolatedStorage::GetCaller)
+FCFuncEnd()
+
+FCFuncStart(gIsolatedStorageFile)
+ QCFuncElement("GetRootDir", COMIsolatedStorageFile::GetRootDir)
+ QCFuncElement("GetQuota", COMIsolatedStorageFile::GetQuota)
+ QCFuncElement("SetQuota", COMIsolatedStorageFile::SetQuota)
+ QCFuncElement("Reserve", COMIsolatedStorageFile::Reserve)
+ QCFuncElement("GetUsage", COMIsolatedStorageFile::GetUsage)
+ QCFuncElement("Open", COMIsolatedStorageFile::Open)
+ QCFuncElement("Lock", COMIsolatedStorageFile::Lock)
+ QCFuncElement("CreateDirectoryWithDacl", COMIsolatedStorageFile::CreateDirectoryWithDacl)
+FCFuncEnd()
+
+FCFuncStart(gIsolatedStorageFileHandle)
+ QCFuncElement("Close", COMIsolatedStorageFile::Close)
+FCFuncEnd()
+#endif // FEATURE_ISOSTORE && !FEATURE_ISOSTORE_LIGHT
+
+FCFuncStart(gTypeLoadExceptionFuncs)
+ QCFuncElement("GetTypeLoadExceptionMessage", GetTypeLoadExceptionMessage)
+FCFuncEnd()
+
+FCFuncStart(gFileLoadExceptionFuncs)
+ QCFuncElement("GetFileLoadExceptionMessage", GetFileLoadExceptionMessage)
+ QCFuncElement("GetMessageForHR", FileLoadException_GetMessageForHR)
+FCFuncEnd()
+
+FCFuncStart(gMissingMemberExceptionFuncs)
+ FCFuncElement("FormatSignature", MissingMemberException_FormatSignature)
+FCFuncEnd()
+
+FCFuncStart(gInterlockedFuncs)
+ FCIntrinsicSig("Exchange", &gsig_SM_RefInt_Int_RetInt, COMInterlocked::Exchange, CORINFO_INTRINSIC_InterlockedXchg32)
+ FCIntrinsicSig("Exchange", &gsig_SM_RefLong_Long_RetLong, COMInterlocked::Exchange64, CORINFO_INTRINSIC_InterlockedXchg64)
+ FCFuncElementSig("Exchange", &gsig_SM_RefDbl_Dbl_RetDbl, COMInterlocked::ExchangeDouble)
+ FCFuncElementSig("Exchange", &gsig_SM_RefFlt_Flt_RetFlt, COMInterlocked::ExchangeFloat)
+ FCFuncElementSig("Exchange", &gsig_SM_RefObj_Obj_RetObj, COMInterlocked::ExchangeObject)
+ FCFuncElementSig("Exchange", &gsig_SM_RefIntPtr_IntPtr_RetIntPtr, COMInterlocked::ExchangePointer)
+ FCIntrinsicSig("CompareExchange", &gsig_SM_RefInt_Int_Int_RetInt, COMInterlocked::CompareExchange, CORINFO_INTRINSIC_InterlockedCmpXchg32)
+ FCIntrinsicSig("CompareExchange", &gsig_SM_RefLong_Long_Long_RetLong, COMInterlocked::CompareExchange64, CORINFO_INTRINSIC_InterlockedCmpXchg64)
+ FCFuncElementSig("CompareExchange", &gsig_SM_RefDbl_Dbl_Dbl_RetDbl, COMInterlocked::CompareExchangeDouble)
+ FCFuncElementSig("CompareExchange", &gsig_SM_RefFlt_Flt_Flt_RetFlt, COMInterlocked::CompareExchangeFloat)
+ FCFuncElementSig("CompareExchange", &gsig_SM_RefObj_Obj_Obj_RetObj, COMInterlocked::CompareExchangeObject)
+ FCFuncElementSig("CompareExchange", &gsig_SM_RefInt_Int_Int_RefBool_RetInt, COMInterlocked::CompareExchangeReliableResult)
+ FCFuncElementSig("CompareExchange", &gsig_SM_RefIntPtr_IntPtr_IntPtr_RetIntPtr, COMInterlocked::CompareExchangePointer)
+ FCIntrinsicSig("ExchangeAdd", &gsig_SM_RefInt_Int_RetInt, COMInterlocked::ExchangeAdd32, CORINFO_INTRINSIC_InterlockedXAdd32)
+ FCIntrinsicSig("ExchangeAdd", &gsig_SM_RefLong_Long_RetLong, COMInterlocked::ExchangeAdd64, CORINFO_INTRINSIC_InterlockedXAdd64)
+
+ FCFuncElement("_Exchange", COMInterlocked::ExchangeGeneric)
+ FCFuncElement("_CompareExchange", COMInterlocked::CompareExchangeGeneric)
+
+FCFuncEnd()
+
+FCFuncStart(gVarArgFuncs)
+ FCFuncElementSig(COR_CTOR_METHOD_NAME, &gsig_IM_IntPtr_PtrVoid_RetVoid, VarArgsNative::Init2)
+#ifndef FEATURE_CORECLR
+ FCFuncElementSig(COR_CTOR_METHOD_NAME, &gsig_IM_IntPtr_RetVoid, VarArgsNative::Init)
+ FCFuncElement("GetRemainingCount", VarArgsNative::GetRemainingCount)
+ FCFuncElement("_GetNextArgType", VarArgsNative::GetNextArgType)
+ FCFuncElement("FCallGetNextArg", VarArgsNative::DoGetNextArg)
+ FCFuncElement("InternalGetNextArg", VarArgsNative::GetNextArg2)
+#endif // FEATURE_CORECLR
+FCFuncEnd()
+
+FCFuncStart(gMonitorFuncs)
+ FCFuncElement("Enter", JIT_MonEnter)
+ FCFuncElement("ReliableEnter", JIT_MonReliableEnter)
+ FCFuncElement("ReliableEnterTimeout", JIT_MonTryEnter)
+ FCFuncElement("Exit", JIT_MonExit)
+ FCFuncElement("ObjWait", ObjectNative::WaitTimeout)
+ FCFuncElement("ObjPulse", ObjectNative::Pulse)
+ FCFuncElement("ObjPulseAll", ObjectNative::PulseAll)
+ FCFuncElement("IsEnteredNative", ObjectNative::IsLockHeld)
+FCFuncEnd()
+
+FCFuncStart(gOverlappedFuncs)
+ FCFuncElement("AllocateNativeOverlapped", AllocateNativeOverlapped)
+ FCFuncElement("FreeNativeOverlapped", FreeNativeOverlapped)
+ FCFuncElement("CheckVMForIOPacket", CheckVMForIOPacket)
+ FCFuncElement("GetOverlappedFromNative", GetOverlappedFromNative)
+FCFuncEnd()
+
+FCFuncStart(gCompilerFuncs)
+ FCFuncElement("GetObjectValue", ObjectNative::GetObjectValue)
+ FCIntrinsic("InitializeArray", ArrayNative::InitializeArray, CORINFO_INTRINSIC_InitializeArray)
+ FCFuncElement("_RunClassConstructor", ReflectionInvocation::RunClassConstructor)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("_RunModuleConstructor", ReflectionInvocation::RunModuleConstructor)
+ FCFuncElement("_PrepareMethod", ReflectionInvocation::PrepareMethod)
+#endif // !FEATURE_CORECLR
+ QCFuncElement("_CompileMethod", ReflectionInvocation::CompileMethod)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("PrepareDelegate", ReflectionInvocation::PrepareDelegate)
+#endif // !FEATURE_CORECLR
+ FCFuncElement("PrepareContractedDelegate", ReflectionInvocation::PrepareContractedDelegate)
+ FCFuncElement("ProbeForSufficientStack", ReflectionInvocation::ProbeForSufficientStack)
+ FCFuncElement("ExecuteCodeWithGuaranteedCleanup", ReflectionInvocation::ExecuteCodeWithGuaranteedCleanup)
+ FCFuncElement("GetHashCode", ObjectNative::GetHashCode)
+ FCFuncElement("Equals", ObjectNative::Equals)
+ FCFuncElement("EnsureSufficientExecutionStack", ReflectionInvocation::EnsureSufficientExecutionStack)
+#ifdef FEATURE_CORECLR
+ FCFuncElement("TryEnsureSufficientExecutionStack", ReflectionInvocation::TryEnsureSufficientExecutionStack)
+#endif // FEATURE_CORECLR
+FCFuncEnd()
+
+FCFuncStart(gContextSynchronizationFuncs)
+#ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+ FCFuncElement("WaitHelper", SynchronizationContextNative::WaitHelper)
+#endif // #ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+#ifdef FEATURE_APPX
+ QCFuncElement("GetWinRTDispatcherForCurrentThread", SynchronizationContextNative::GetWinRTDispatcherForCurrentThread)
+#endif
+FCFuncEnd()
+
+FCFuncStart(gDateMarshalerFuncs)
+ FCFuncElement("ConvertToNative", StubHelpers::DateMarshaler__ConvertToNative)
+ FCFuncElement("ConvertToManaged", StubHelpers::DateMarshaler__ConvertToManaged)
+FCFuncEnd()
+
+FCFuncStart(gValueClassMarshalerFuncs)
+ FCFuncElement("ConvertToNative", StubHelpers::ValueClassMarshaler__ConvertToNative)
+ FCFuncElement("ConvertToManaged", StubHelpers::ValueClassMarshaler__ConvertToManaged)
+ FCFuncElement("ClearNative", StubHelpers::ValueClassMarshaler__ClearNative)
+FCFuncEnd()
+
+FCFuncStart(gMngdNativeArrayMarshalerFuncs)
+ FCFuncElement("CreateMarshaler", MngdNativeArrayMarshaler::CreateMarshaler)
+ FCFuncElement("ConvertSpaceToNative", MngdNativeArrayMarshaler::ConvertSpaceToNative)
+ FCFuncElement("ConvertContentsToNative", MngdNativeArrayMarshaler::ConvertContentsToNative)
+ FCFuncElement("ConvertSpaceToManaged", MngdNativeArrayMarshaler::ConvertSpaceToManaged)
+ FCFuncElement("ConvertContentsToManaged", MngdNativeArrayMarshaler::ConvertContentsToManaged)
+ FCFuncElement("ClearNative", MngdNativeArrayMarshaler::ClearNative)
+ FCFuncElement("ClearNativeContents", MngdNativeArrayMarshaler::ClearNativeContents)
+FCFuncEnd()
+
+#ifdef FEATURE_COMINTEROP
+FCFuncStart(gObjectMarshalerFuncs)
+ FCFuncElement("ConvertToNative", StubHelpers::ObjectMarshaler__ConvertToNative)
+ FCFuncElement("ConvertToManaged", StubHelpers::ObjectMarshaler__ConvertToManaged)
+ FCFuncElement("ClearNative", StubHelpers::ObjectMarshaler__ClearNative)
+FCFuncEnd()
+
+FCFuncStart(gInterfaceMarshalerFuncs)
+ FCFuncElement("ConvertToNative", StubHelpers::InterfaceMarshaler__ConvertToNative)
+ FCFuncElement("ConvertToManaged", StubHelpers::InterfaceMarshaler__ConvertToManaged)
+ QCFuncElement("ClearNative", StubHelpers::InterfaceMarshaler__ClearNative)
+ FCFuncElement("ConvertToManagedWithoutUnboxing", StubHelpers::InterfaceMarshaler__ConvertToManagedWithoutUnboxing)
+FCFuncEnd()
+
+FCFuncStart(gUriMarshalerFuncs)
+ FCFuncElement("GetRawUriFromNative", StubHelpers::UriMarshaler__GetRawUriFromNative)
+ FCFuncElement("CreateNativeUriInstanceHelper", StubHelpers::UriMarshaler__CreateNativeUriInstance)
+FCFuncEnd()
+
+FCFuncStart(gEventArgsMarshalerFuncs)
+ QCFuncElement("CreateNativeNCCEventArgsInstanceHelper", StubHelpers::EventArgsMarshaler__CreateNativeNCCEventArgsInstance)
+ QCFuncElement("CreateNativePCEventArgsInstance", StubHelpers::EventArgsMarshaler__CreateNativePCEventArgsInstance)
+FCFuncEnd()
+
+FCFuncStart(gMngdSafeArrayMarshalerFuncs)
+ FCFuncElement("CreateMarshaler", MngdSafeArrayMarshaler::CreateMarshaler)
+ FCFuncElement("ConvertSpaceToNative", MngdSafeArrayMarshaler::ConvertSpaceToNative)
+ FCFuncElement("ConvertContentsToNative", MngdSafeArrayMarshaler::ConvertContentsToNative)
+ FCFuncElement("ConvertSpaceToManaged", MngdSafeArrayMarshaler::ConvertSpaceToManaged)
+ FCFuncElement("ConvertContentsToManaged", MngdSafeArrayMarshaler::ConvertContentsToManaged)
+ FCFuncElement("ClearNative", MngdSafeArrayMarshaler::ClearNative)
+FCFuncEnd()
+
+FCFuncStart(gMngdHiddenLengthArrayMarshalerFuncs)
+ FCFuncElement("CreateMarshaler", MngdHiddenLengthArrayMarshaler::CreateMarshaler)
+ FCFuncElement("ConvertSpaceToNative", MngdHiddenLengthArrayMarshaler::ConvertSpaceToNative)
+ FCFuncElement("ConvertContentsToNative", MngdHiddenLengthArrayMarshaler::ConvertContentsToNative)
+ FCFuncElement("ConvertSpaceToManaged", MngdHiddenLengthArrayMarshaler::ConvertSpaceToManaged)
+ FCFuncElement("ConvertContentsToManaged", MngdHiddenLengthArrayMarshaler::ConvertContentsToManaged)
+ FCFuncElement("ClearNativeContents", MngdHiddenLengthArrayMarshaler::ClearNativeContents)
+FCFuncEnd()
+
+FCFuncStart(gWinRTTypeNameConverterFuncs)
+ FCFuncElement("ConvertToWinRTTypeName", StubHelpers::WinRTTypeNameConverter__ConvertToWinRTTypeName)
+ FCFuncElement("GetTypeFromWinRTTypeName", StubHelpers::WinRTTypeNameConverter__GetTypeFromWinRTTypeName)
+FCFuncEnd()
+
+#endif // FEATURE_COMINTEROP
+
+FCFuncStart(gMngdRefCustomMarshalerFuncs)
+ FCFuncElement("CreateMarshaler", MngdRefCustomMarshaler::CreateMarshaler)
+ FCFuncElement("ConvertContentsToNative", MngdRefCustomMarshaler::ConvertContentsToNative)
+ FCFuncElement("ConvertContentsToManaged", MngdRefCustomMarshaler::ConvertContentsToManaged)
+ FCFuncElement("ClearNative", MngdRefCustomMarshaler::ClearNative)
+ FCFuncElement("ClearManaged", MngdRefCustomMarshaler::ClearManaged)
+FCFuncEnd()
+
+FCFuncStart(gStubHelperFuncs)
+#ifndef FEATURE_CORECLR
+#ifndef _WIN64
+ FCFuncElement("GetFinalStubTarget", StubHelpers::GetFinalStubTarget)
+#endif // !_WIN64
+ FCFuncElement("DemandPermission", StubHelpers::DemandPermission)
+#endif // !FEATURE_CORECLR
+ FCFuncElement("IsQCall", StubHelpers::IsQCall)
+ FCFuncElement("InitDeclaringType", StubHelpers::InitDeclaringType)
+ FCIntrinsic("GetNDirectTarget", StubHelpers::GetNDirectTarget, CORINFO_INTRINSIC_StubHelpers_GetNDirectTarget)
+ FCFuncElement("GetDelegateTarget", StubHelpers::GetDelegateTarget)
+ FCFuncElement("SetLastError", StubHelpers::SetLastError)
+ FCFuncElement("ThrowInteropParamException", StubHelpers::ThrowInteropParamException)
+ FCFuncElement("InternalGetHRExceptionObject", StubHelpers::GetHRExceptionObject)
+#ifdef FEATURE_COMINTEROP
+ FCFuncElement("InternalGetCOMHRExceptionObject", StubHelpers::GetCOMHRExceptionObject)
+ FCFuncElement("GetCOMIPFromRCW", StubHelpers::GetCOMIPFromRCW)
+ FCFuncElement("GetCOMIPFromRCW_WinRT", StubHelpers::GetCOMIPFromRCW_WinRT)
+ FCFuncElement("GetCOMIPFromRCW_WinRTSharedGeneric", StubHelpers::GetCOMIPFromRCW_WinRTSharedGeneric)
+ FCFuncElement("GetCOMIPFromRCW_WinRTDelegate", StubHelpers::GetCOMIPFromRCW_WinRTDelegate)
+ FCFuncElement("ShouldCallWinRTInterface", StubHelpers::ShouldCallWinRTInterface)
+ FCFuncElement("GetTargetForAmbiguousVariantCall", StubHelpers::GetTargetForAmbiguousVariantCall)
+ FCFuncElement("StubRegisterRCW", StubHelpers::StubRegisterRCW)
+ FCFuncElement("StubUnregisterRCW", StubHelpers::StubUnregisterRCW)
+ FCFuncElement("GetDelegateInvokeMethod", StubHelpers::GetDelegateInvokeMethod)
+ FCFuncElement("GetWinRTFactoryObject", StubHelpers::GetWinRTFactoryObject)
+ FCFuncElement("GetWinRTFactoryReturnValue", StubHelpers::GetWinRTFactoryReturnValue)
+ FCFuncElement("GetOuterInspectable", StubHelpers::GetOuterInspectable)
+#ifdef MDA_SUPPORTED
+ FCFuncElement("TriggerExceptionSwallowedMDA", StubHelpers::TriggerExceptionSwallowedMDA)
+#endif
+#endif // FEATURE_COMINTEROP
+#ifdef MDA_SUPPORTED
+ FCFuncElement("CheckCollectedDelegateMDA", StubHelpers::CheckCollectedDelegateMDA)
+#endif // MDA_SUPPORTED
+#ifdef PROFILING_SUPPORTED
+ FCFuncElement("ProfilerBeginTransitionCallback", StubHelpers::ProfilerBeginTransitionCallback)
+ FCFuncElement("ProfilerEndTransitionCallback", StubHelpers::ProfilerEndTransitionCallback)
+#endif
+ FCFuncElement("CreateCustomMarshalerHelper", StubHelpers::CreateCustomMarshalerHelper)
+ FCFuncElement("DecimalCanonicalizeInternal", StubHelpers::DecimalCanonicalizeInternal)
+ FCFuncElement("FmtClassUpdateNativeInternal", StubHelpers::FmtClassUpdateNativeInternal)
+ FCFuncElement("FmtClassUpdateCLRInternal", StubHelpers::FmtClassUpdateCLRInternal)
+ FCFuncElement("LayoutDestroyNativeInternal", StubHelpers::LayoutDestroyNativeInternal)
+ FCFuncElement("AllocateInternal", StubHelpers::AllocateInternal)
+ FCFuncElement("strlen", StubHelpers::AnsiStrlen)
+ FCFuncElement("MarshalToUnmanagedVaListInternal", StubHelpers::MarshalToUnmanagedVaListInternal)
+ FCFuncElement("MarshalToManagedVaListInternal", StubHelpers::MarshalToManagedVaListInternal)
+ FCFuncElement("CalcVaListSize", StubHelpers::CalcVaListSize)
+ FCFuncElement("ValidateObject", StubHelpers::ValidateObject)
+ FCFuncElement("ValidateByref", StubHelpers::ValidateByref)
+ FCFuncElement("LogPinnedArgument", StubHelpers::LogPinnedArgument)
+ FCIntrinsic("GetStubContext", StubHelpers::GetStubContext, CORINFO_INTRINSIC_StubHelpers_GetStubContext)
+#ifdef _WIN64
+ FCIntrinsic("GetStubContextAddr", StubHelpers::GetStubContextAddr, CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
+#endif // _WIN64
+#ifdef MDA_SUPPORTED
+ FCFuncElement("TriggerGCForMDA", StubHelpers::TriggerGCForMDA)
+#endif // MDA_SUPPORTED
+#ifdef FEATURE_ARRAYSTUB_AS_IL
+ FCFuncElement("ArrayTypeCheck", StubHelpers::ArrayTypeCheck)
+#endif //FEATURE_ARRAYSTUB_AS_IL
+#ifdef FEATURE_STUBS_AS_IL
+ FCFuncElement("MulticastDebuggerTraceHelper", StubHelpers::MulticastDebuggerTraceHelper)
+#endif //FEATURE_STUBS_AS_IL
+FCFuncEnd()
+
+FCFuncStart(gCoverageFuncs)
+ FCUnreferenced FCFuncElement("nativeCoverBlock", COMCoverage::nativeCoverBlock)
+FCFuncEnd()
+
+FCFuncStart(gGCHandleFuncs)
+ FCFuncElement("InternalAlloc", MarshalNative::GCHandleInternalAlloc)
+ FCFuncElement("InternalFree", MarshalNative::GCHandleInternalFree)
+ FCFuncElement("InternalGet", MarshalNative::GCHandleInternalGet)
+ FCFuncElement("InternalSet", MarshalNative::GCHandleInternalSet)
+ FCFuncElement("InternalCompareExchange", MarshalNative::GCHandleInternalCompareExchange)
+ FCFuncElement("InternalAddrOfPinnedObject", MarshalNative::GCHandleInternalAddrOfPinnedObject)
+ FCFuncElement("InternalCheckDomain", MarshalNative::GCHandleInternalCheckDomain)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("InternalGetHandleType", MarshalNative::GCHandleInternalGetHandleType)
+#endif
+FCFuncEnd()
+
+#ifndef FEATURE_CORECLR
+FCFuncStart(gConfigHelper)
+ FCFuncElement("RunParser", ConfigNative::RunParser)
+FCFuncEnd()
+#endif // FEATURE_CORECLR
+
+#ifndef FEATURE_CORECLR
+FCFuncStart(gConsoleFuncs)
+ QCFuncElement("GetTitleNative", ConsoleNative::GetTitle)
+FCFuncEnd()
+#endif // ifndef FEATURE_CORECLR
+
+FCFuncStart(gVersioningHelperFuncs)
+ FCFuncElement("GetRuntimeId", GetRuntimeId_Wrapper)
+FCFuncEnd()
+
+#ifndef FEATURE_CORECLR
+FCFuncStart(gConsoleStreamFuncs)
+ FCFuncElement("WaitForAvailableConsoleInput", ConsoleStreamHelper::WaitForAvailableConsoleInput)
+FCFuncEnd()
+#endif
+
+#if defined(FEATURE_COMINTEROP) && defined(FEATURE_REFLECTION_ONLY_LOAD)
+FCFuncStart(gWindowsRuntimeMetadata)
+ QCFuncElement("nResolveNamespace", CLRPrivTypeCacheReflectionOnlyWinRT::ResolveNamespace)
+FCFuncEnd()
+#endif //FEATURE_COMINTEROP && FEATURE_REFLECTION_ONLY_LOAD
+
+#ifdef FEATURE_COMINTEROP
+FCFuncStart(gWindowsRuntimeBufferHelperFuncs)
+ QCFuncElement("StoreOverlappedPtrInCCW", WindowsRuntimeBufferHelper::StoreOverlappedPtrInCCW)
+ //QCFuncElement("ReleaseOverlapped", WindowsRuntimeBufferHelper::ReleaseOverlapped)
+FCFuncEnd()
+#endif // ifdef FEATURE_COMINTEROP
+
+#ifdef FEATURE_COMINTEROP
+FCFuncStart(gRuntimeClassFuncs)
+ FCFuncElement("GetRedirectedGetHashCodeMD", ComObject::GetRedirectedGetHashCodeMD)
+ FCFuncElement("RedirectGetHashCode", ComObject::RedirectGetHashCode)
+ FCFuncElement("GetRedirectedToStringMD", ComObject::GetRedirectedToStringMD)
+ FCFuncElement("RedirectToString", ComObject::RedirectToString)
+ FCFuncElement("GetRedirectedEqualsMD", ComObject::GetRedirectedEqualsMD)
+ FCFuncElement("RedirectEquals", ComObject::RedirectEquals)
+FCFuncEnd()
+#endif // ifdef FEATURE_COMINTEROP
+FCFuncStart(gWeakReferenceFuncs)
+ FCFuncElement("Create", WeakReferenceNative::Create)
+ FCFuncElement("Finalize", WeakReferenceNative::Finalize)
+ FCFuncElement("get_Target", WeakReferenceNative::GetTarget)
+ FCFuncElement("set_Target", WeakReferenceNative::SetTarget)
+ FCFuncElement("get_IsAlive", WeakReferenceNative::IsAlive)
+ FCFuncElement("IsTrackResurrection", WeakReferenceNative::IsTrackResurrection)
+FCFuncEnd()
+
+FCFuncStart(gWeakReferenceOfTFuncs)
+ FCFuncElement("Create", WeakReferenceOfTNative::Create)
+ FCFuncElement("Finalize", WeakReferenceOfTNative::Finalize)
+ FCFuncElement("get_Target", WeakReferenceOfTNative::GetTarget)
+ FCFuncElement("set_Target", WeakReferenceOfTNative::SetTarget)
+#ifndef FEATURE_CORECLR
+ FCFuncElement("IsTrackResurrection", WeakReferenceOfTNative::IsTrackResurrection)
+#endif
+FCFuncEnd()
+
+#ifdef FEATURE_COMINTEROP
+
+//
+// ECall helpers for the standard managed interfaces.
+//
+
+#define MNGSTDITF_BEGIN_INTERFACE(FriendlyName, strMngItfName, strUCOMMngItfName, strCustomMarshalerName, strCustomMarshalerCookie, strManagedViewName, NativeItfIID, bCanCastOnNativeItfQI) \
+FCFuncStart(g##FriendlyName##Funcs)
+
+#define MNGSTDITF_DEFINE_METH_IMPL(FriendlyName, FCallMethName, MethName, MethSig, FcallDecl) \
+ FCUnreferenced FCFuncElementSig(#MethName, MethSig, FriendlyName::FCallMethName)
+
+#define MNGSTDITF_END_INTERFACE(FriendlyName) \
+FCFuncEnd()
+
+#include "mngstditflist.h"
+
+#undef MNGSTDITF_BEGIN_INTERFACE
+#undef MNGSTDITF_DEFINE_METH_IMPL
+#undef MNGSTDITF_END_INTERFACE
+
+#endif // FEATURE_COMINTEROP
+
+
+//
+//
+// Class definitions
+//
+//
+
+// Note these have to remain sorted by name:namespace pair (Assert will wack you if you don't)
+// The sorting is case-sensitive
+
+FCClassElement("AppDomain", "System", gAppDomainFuncs)
+FCClassElement("AppDomainManager", "System", gAppDomainManagerFuncs)
+#ifdef FEATURE_FUSION
+FCClassElement("AppDomainSetup", "System", gAppDomainSetupFuncs)
+#endif // FEATURE_FUSION
+FCClassElement("ArgIterator", "System", gVarArgFuncs)
+FCClassElement("Array", "System", gArrayFuncs)
+FCClassElement("ArrayWithOffset", "System.Runtime.InteropServices", gArrayWithOffsetFuncs)
+FCClassElement("AssemblyBuilder", "System.Reflection.Emit", gAssemblyBuilderFuncs)
+#ifdef FEATURE_CAS_POLICY
+FCClassElement("AssemblyEvidenceFactory", "System.Security.Policy", gAssemblyEvidenceFactoryFuncs)
+#endif // FEATURE_CAS_POLICY
+
+#if defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+FCClassElement("AssemblyLoadContext", "System.Runtime.Loader", gAssemblyLoadContextFuncs)
+#endif // defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+
+FCClassElement("AssemblyName", "System.Reflection", gAssemblyNameFuncs)
+FCClassElement("Assert", "System.Diagnostics", gDiagnosticsAssert)
+FCClassElement("BCLDebug", "System", gBCLDebugFuncs)
+#ifndef FEATURE_CORECLR
+FCClassElement("BaseConfigHandler", "System", gConfigHelper)
+#endif // FEATURE_CORECLR
+FCClassElement("Buffer", "System", gBufferFuncs)
+#ifndef FEATURE_CORECLR
+// Since the 2nd letter of the classname is capital, we need to sort this before all class names
+// that start with Cx where x is any small letter (strcmp is used for verification).
+FCClassElement("CLRConfig", "System", gCLRConfigFuncs)
+#endif // FEATURE_CORECLR
+#if defined(FEATURE_LEGACYSURFACE) && !defined(FEATURE_COREFX_GLOBALIZATION)
+FCClassElement("CalendarData", "System.Globalization", gCalendarDataFuncs)
+#endif // defined(FEATURE_LEGACYSURFACE) && !defined(FEATURE_COREFX_GLOBALIZATION)
+#ifndef FEATURE_CORECLR
+FCClassElement("ChannelServices", "System.Runtime.Remoting.Channels", gChannelServicesFuncs)
+#endif // FEATURE_CORECLR
+#ifdef FEATURE_CAS_POLICY
+FCClassElement("CodeAccessSecurityEngine", "System.Security", gCodeAccessSecurityEngineFuncs)
+#endif
+#if defined(FEATURE_LEGACYSURFACE) && !defined(FEATURE_COREFX_GLOBALIZATION)
+FCClassElement("CompareInfo", "System.Globalization", gCompareInfoFuncs)
+#endif // defined(FEATURE_LEGACYSURFACE) && !defined(FEATURE_COREFX_GLOBALIZATION)
+FCClassElement("CompatibilitySwitch", "System.Runtime.Versioning", gCompatibilitySwitchFuncs)
+#ifdef FEATURE_COMPRESSEDSTACK
+FCClassElement("CompressedStack", "System.Threading", gCompressedStackFuncs)
+#endif // FEATURE_COMPRESSEDSTACK
+#ifdef FEATURE_CAS_POLICY
+FCClassElement("Config", "System.Security.Util", gPolicyConfigFuncs)
+#endif // FEATURE_CAS_POLICY
+#ifndef FEATURE_CORECLR
+FCClassElement("Console", "System", gConsoleFuncs)
+#endif // ifndef FEATURE_CORECLR
+#ifdef FEATURE_REMOTING
+FCClassElement("Context", "System.Runtime.Remoting.Contexts", gContextFuncs)
+#endif
+FCClassElement("CriticalHandle", "System.Runtime.InteropServices", gCriticalHandleFuncs)
+#if defined(FEATURE_LEGACYSURFACE) && !defined(FEATURE_COREFX_GLOBALIZATION)
+FCClassElement("CultureData", "System.Globalization", gCultureDataFuncs)
+FCClassElement("CultureInfo", "System.Globalization", gCultureInfoFuncs)
+#endif
+FCClassElement("Currency", "System", gCurrencyFuncs)
+#ifndef FEATURE_CORECLR
+FCClassElement("CurrentSystemTimeZone", "System", gTimeZoneFuncs)
+#endif // FEATURE_CORECLR
+FCClassElement("CustomAttribute", "System.Reflection", gCOMCustomAttributeFuncs)
+FCClassElement("CustomAttributeEncodedArgument", "System.Reflection", gCustomAttributeEncodedArgument)
+FCClassElement("DateMarshaler", "System.StubHelpers", gDateMarshalerFuncs)
+FCClassElement("DateTime", "System", gDateTimeFuncs)
+FCClassElement("Debugger", "System.Diagnostics", gDiagnosticsDebugger)
+FCClassElement("Decimal", "System", gDecimalFuncs)
+FCClassElement("DefaultBinder", "System", gCOMDefaultBinderFuncs)
+FCClassElement("Delegate", "System", gDelegateFuncs)
+FCClassElement("DependentHandle", "System.Runtime.CompilerServices", gDependentHandleFuncs)
+#ifdef FEATURE_COMPRESSEDSTACK
+FCClassElement("DomainCompressedStack", "System.Threading", gDomainCompressedStackFuncs)
+#endif // FEATURE_COMPRESSEDSTACK
+#if defined(FEATURE_LEGACYSURFACE) && !defined(FEATURE_COREFX_GLOBALIZATION)
+FCClassElement("EncodingTable", "System.Globalization", gEncodingTableFuncs)
+#endif // defined(FEATURE_LEGACYSURFACE) && !defined(FEATURE_COREFX_GLOBALIZATION)
+FCClassElement("Enum", "System", gEnumFuncs)
+FCClassElement("Environment", "System", gEnvironmentFuncs)
+#ifdef FEATURE_COMINTEROP
+FCClassElement("EventArgsMarshaler", "System.StubHelpers", gEventArgsMarshalerFuncs)
+#endif // FEATURE_COMINTEROP
+FCClassElement("Exception", "System", gExceptionFuncs)
+#if defined(FEATURE_COMINTEROP) && !defined(FEATURE_CORECLR)
+FCClassElement("ExtensibleClassFactory", "System.Runtime.InteropServices", gExtensibleClassFactoryFuncs)
+#endif
+FCClassElement("FileIOAccess", "System.Security.Permissions", gCOMFileIOAccessFuncs)
+FCClassElement("FileLoadException", "System.IO", gFileLoadExceptionFuncs)
+FCClassElement("FormatterServices", "System.Runtime.Serialization", gSerializationFuncs)
+#ifdef FEATURE_CAS_POLICY
+FCClassElement("FrameSecurityDescriptor", "System.Security", gFrameSecurityDescriptorFuncs)
+#endif
+FCClassElement("GC", "System", gGCInterfaceFuncs)
+FCClassElement("GCHandle", "System.Runtime.InteropServices", gGCHandleFuncs)
+#ifdef FEATURE_CAS_POLICY
+FCClassElement("HostExecutionContextManager", "System.Threading", gHostExecutionContextManagerFuncs)
+#endif // FEATURE_CAS_POLICY
+#ifdef FEATURE_COMINTEROP
+FCClassElement("IEnumerable", "System.Collections", gStdMngIEnumerableFuncs)
+FCClassElement("IEnumerator", "System.Collections", gStdMngIEnumeratorFuncs)
+FCClassElement("IExpando", "System.Runtime.InteropServices.Expando", gStdMngIExpandoFuncs)
+#endif // FEATURE_COMINTEROP
+FCClassElement("ILCover", "System.Coverage", gCoverageFuncs)
+#ifdef FEATURE_COMINTEROP
+FCClassElement("IReflect", "System.Reflection", gStdMngIReflectFuncs)
+#endif
+#ifdef FEATURE_COMINTEROP
+FCClassElement("InterfaceMarshaler", "System.StubHelpers", gInterfaceMarshalerFuncs)
+#endif
+FCClassElement("Interlocked", "System.Threading", gInterlockedFuncs)
+#if defined(FEATURE_ISOSTORE) && !defined(FEATURE_ISOSTORE_LIGHT)
+FCClassElement("IsolatedStorage", "System.IO.IsolatedStorage", gIsolatedStorage)
+FCClassElement("IsolatedStorageFile", "System.IO.IsolatedStorage", gIsolatedStorageFile)
+#endif // FEATURE_ISOSTORE && !FEATURE_ISOSTORE_LIGHT
+FCClassElement("JitHelpers", "System.Runtime.CompilerServices", gJitHelpers)
+FCClassElement("LoaderAllocatorScout", "System.Reflection", gLoaderAllocatorFuncs)
+FCClassElement("Log", "System.Diagnostics", gDiagnosticsLog)
+FCClassElement("ManifestBasedResourceGroveler", "System.Resources", gManifestBasedResourceGrovelerFuncs)
+FCClassElement("Marshal", "System.Runtime.InteropServices", gInteropMarshalFuncs)
+#ifdef FEATURE_REMOTING
+FCClassElement("MarshalByRefObject", "System", gMarshalByRefFuncs)
+#endif
+FCClassElement("Math", "System", gMathFuncs)
+#ifdef MDA_SUPPORTED
+FCClassElement("Mda", "System", gMda)
+#endif
+#ifndef FEATURE_CORECLR
+FCClassElement("MemoryFailPoint", "System.Runtime", gMemoryFailPointFuncs)
+#endif // FEATURE_CORECLR
+#ifdef FEATURE_REMOTING
+FCClassElement("Message", "System.Runtime.Remoting.Messaging", gMessageFuncs)
+#endif
+FCClassElement("MetadataImport", "System.Reflection", gMetaDataImport)
+#ifdef FEATURE_METHOD_RENTAL
+FCClassElement("MethodRental", "System.Reflection.Emit", gCOMMethodRental)
+#endif // FEATURE_METHOD_RENTAL
+FCClassElement("MissingMemberException", "System", gMissingMemberExceptionFuncs)
+#ifdef FEATURE_COMINTEROP
+FCClassElement("MngdHiddenLengthArrayMarshaler", "System.StubHelpers", gMngdHiddenLengthArrayMarshalerFuncs)
+#endif // FEATURE_COMINTEROP
+FCClassElement("MngdNativeArrayMarshaler", "System.StubHelpers", gMngdNativeArrayMarshalerFuncs)
+FCClassElement("MngdRefCustomMarshaler", "System.StubHelpers", gMngdRefCustomMarshalerFuncs)
+#ifdef FEATURE_COMINTEROP
+FCClassElement("MngdSafeArrayMarshaler", "System.StubHelpers", gMngdSafeArrayMarshalerFuncs)
+#endif // FEATURE_COMINTEROP
+FCClassElement("ModuleBuilder", "System.Reflection.Emit", gCOMModuleBuilderFuncs)
+FCClassElement("ModuleHandle", "System", gCOMModuleHandleFuncs)
+FCClassElement("Monitor", "System.Threading", gMonitorFuncs)
+#ifndef FEATURE_CORECLR
+FCClassElement("Normalization", "System.Text", gNormalizationFuncs)
+#endif // FEATURE_CORECLR
+FCClassElement("Number", "System", gNumberFuncs)
+#ifdef FEATURE_COMINTEROP
+FCClassElement("OAVariantLib", "Microsoft.Win32", gOAVariantFuncs)
+#endif
+FCClassElement("Object", "System", gObjectFuncs)
+#ifdef FEATURE_COMINTEROP
+FCClassElement("ObjectMarshaler", "System.StubHelpers", gObjectMarshalerFuncs)
+#endif
+FCClassElement("OverlappedData", "System.Threading", gOverlappedFuncs)
+#ifdef FEATURE_CAS_POLICY
+FCClassElement("PEFileEvidenceFactory", "System.Security.Policy", gPEFileEvidenceFactoryFuncs)
+#endif // FEATURE_CAS_POLICY
+FCClassElement("ParseNumbers", "System", gParseNumbersFuncs)
+#ifndef FEATURE_CORECLR
+FCClassElement("PasswordDeriveBytes", "System.Security.Cryptography", gPasswordDeriveBytesFuncs)
+#endif
+#ifdef FEATURE_CAS_POLICY
+FCClassElement("PolicyManager", "System.Security", gPolicyManagerFuncs)
+#endif
+
+#if defined(FEATURE_MULTICOREJIT) && !defined(FEATURE_CORECLR)
+FCClassElement("ProfileOptimization", "System.Runtime", gProfileOptimizationFuncs)
+#endif // defined(FEATURE_MULTICOREJIT) && !defined(FEATURE_CORECLR)
+
+FCClassElement("PseudoCustomAttribute", "System.Reflection", gPseudoCustomAttribute)
+#ifdef FEATURE_CORECLR
+FCClassElement("PunkSafeHandle", "System.Reflection.Emit", gSymWrapperCodePunkSafeHandleFuncs)
+#endif
+#ifndef FEATURE_CORECLR
+FCClassElement("RNGCryptoServiceProvider", "System.Security.Cryptography", gRNGCryptoServiceProviderFuncs)
+#endif
+#if defined(FEATURE_CRYPTO) || defined(FEATURE_LEGACYNETCFCRYPTO)
+FCClassElement("RSACryptoServiceProvider", "System.Security.Cryptography", gRSACryptoServiceProviderFuncs)
+#endif
+#ifdef FEATURE_RWLOCK
+FCClassElement("ReaderWriterLock", "System.Threading", gRWLockFuncs)
+#endif // FEATURE_RWLOCK
+#ifdef FEATURE_REMOTING
+FCClassElement("RealProxy", "System.Runtime.Remoting.Proxies", gRealProxyFuncs)
+#endif
+FCClassElement("RegisteredWaitHandleSafe", "System.Threading", gRegisteredWaitHandleFuncs)
+#ifdef FEATURE_COMINTEROP
+#ifdef FEATURE_COMINTEROP_MANAGED_ACTIVATION
+FCClassElement("RegistrationServices", "System.Runtime.InteropServices", gRegistrationFuncs)
+#endif // FEATURE_COMINTEROP_MANAGED_ACTIVATION
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_REMOTING
+FCClassElement("RemotingServices", "System.Runtime.Remoting", gRemotingFuncs)
+#endif
+#if defined(FEATURE_CRYPTO)
+FCClassElement("Rfc2898DeriveBytes", "System.Security.Cryptography", gRfc2898DeriveBytesFuncs)
+#endif
+FCClassElement("RtFieldInfo", "System.Reflection", gRuntimeFieldInfoFuncs)
+FCClassElement("RuntimeAssembly", "System.Reflection", gAssemblyFuncs)
+#ifdef FEATURE_COMINTEROP
+FCClassElement("RuntimeClass", "System.Runtime.InteropServices.WindowsRuntime", gRuntimeClassFuncs)
+#endif // FEATURE_COMINTEROP
+FCClassElement("RuntimeEnvironment", "System.Runtime.InteropServices", gRuntimeEnvironmentFuncs)
+FCClassElement("RuntimeFieldHandle", "System", gCOMFieldHandleNewFuncs)
+FCClassElement("RuntimeHelpers", "System.Runtime.CompilerServices", gCompilerFuncs)
+FCClassElement("RuntimeMethodHandle", "System", gRuntimeMethodHandle)
+FCClassElement("RuntimeModule", "System.Reflection", gCOMModuleFuncs)
+FCClassElement("RuntimeType", "System", gSystem_RuntimeType)
+FCClassElement("RuntimeTypeHandle", "System", gCOMTypeHandleFuncs)
+FCClassElement("SafeBuffer", "System.Runtime.InteropServices", gSafeBufferFuncs)
+#ifdef FEATURE_X509
+FCClassElement("SafeCertContextHandle", "System.Security.Cryptography.X509Certificates", gX509SafeCertContextHandleFuncs)
+#ifndef FEATURE_CORECLR
+FCClassElement("SafeCertStoreHandle", "System.Security.Cryptography.X509Certificates", gX509SafeCertStoreHandleFuncs)
+#endif // FEATURE_CORECLR
+#endif // FEATURE_X509
+FCClassElement("SafeHandle", "System.Runtime.InteropServices", gSafeHandleFuncs)
+#if defined(FEATURE_CRYPTO) || defined(FEATURE_LEGACYNETCFCRYPTO)
+FCClassElement("SafeHashHandle", "System.Security.Cryptography", gSafeHashHandleFuncs)
+#endif // FEATURE_CRYPTO || FEATURE_LEGACYNETCFCRYPTO
+#if defined(FEATURE_ISOSTORE) && !defined(FEATURE_ISOSTORE_LIGHT)
+FCClassElement("SafeIsolatedStorageFileHandle", "System.IO.IsolatedStorage", gIsolatedStorageFileHandle)
+#endif // FEATURE_ISOSTORE && !FEATURE_ISOSTORE_LIGHT
+#if defined(FEATURE_CRYPTO) || defined(FEATURE_LEGACYNETCFCRYPTO)
+FCClassElement("SafeKeyHandle", "System.Security.Cryptography", gSafeKeyHandleFuncs)
+#endif
+#ifdef FEATURE_CAS_POLICY
+FCClassElement("SafePEFileHandle", "Microsoft.Win32.SafeHandles", gPEFileFuncs)
+#endif // FEATURE_CAS_POLICY
+#if defined(FEATURE_CRYPTO) || defined(FEATURE_LEGACYNETCFCRYPTO)
+FCClassElement("SafeProvHandle", "System.Security.Cryptography", gSafeProvHandleFuncs)
+#endif
+#ifndef FEATURE_CORECLR
+FCClassElement("SafeTypeNameParserHandle", "System", gSafeTypeNameParserHandle)
+#endif //!FEATURE_CORECLR
+#if defined(FEATURE_IMPERSONATION) || defined(FEATURE_COMPRESSEDSTACK)
+FCClassElement("SecurityContext", "System.Security", gCOMSecurityContextFuncs)
+#endif // defined(FEATURE_IMPERSONATION) || defined(FEATURE_COMPRESSEDSTACK)
+FCClassElement("SecurityContextFrame", "System.Reflection", gSecurityContextFrameFuncs)
+FCClassElement("SecurityManager", "System.Security", gCOMSecurityManagerFuncs)
+#ifdef FEATURE_CAS_POLICY
+FCClassElement("SecurityRuntime", "System.Security", gCOMSecurityRuntimeFuncs)
+#endif
+FCClassElement("Signature", "System", gSignatureNative)
+#ifndef FEATURE_CORECLR
+FCClassElement("SizedReference", "System", gSizedRefHandleFuncs)
+#endif // !FEATURE_CORECLR
+#ifdef FEATURE_REMOTING
+FCClassElement("StackBuilderSink", "System.Runtime.Remoting.Messaging", gStackBuilderSinkFuncs)
+#endif
+FCClassElement("StackTrace", "System.Diagnostics", gDiagnosticsStackTrace)
+FCClassElement("String", "System", gStringFuncs)
+FCClassElement("StringBuilder", "System.Text", gStringBufferFuncs)
+FCClassElement("StringExpressionSet", "System.Security.Util", gCOMStringExpressionSetFuncs)
+FCClassElement("StubHelpers", "System.StubHelpers", gStubHelperFuncs)
+#if defined(FEATURE_SYNCHRONIZATIONCONTEXT_WAIT) || defined(FEATURE_APPX)
+FCClassElement("SynchronizationContext", "System.Threading", gContextSynchronizationFuncs)
+#endif // FEATURE_SYNCHRONIZATIONCONTEXT_WAIT || FEATURE_APPX
+#if defined(FEATURE_LEGACYSURFACE) && !defined(FEATURE_COREFX_GLOBALIZATION)
+FCClassElement("TextInfo", "System.Globalization", gTextInfoFuncs)
+#endif // defined(FEATURE_LEGACYSURFACE) && !defined(FEATURE_COREFX_GLOBALIZATION)
+FCClassElement("Thread", "System.Threading", gThreadFuncs)
+FCClassElement("ThreadPool", "System.Threading", gThreadPoolFuncs)
+#ifndef FEATURE_CORECLR
+FCClassElement("TimeSpan", "System", gTimeSpanFuncs)
+#endif // !FEATURE_CORECLR
+FCClassElement("TimerQueue", "System.Threading", gTimerFuncs)
+FCClassElement("Type", "System", gSystem_Type)
+FCClassElement("TypeBuilder", "System.Reflection.Emit", gCOMClassWriter)
+#ifdef FEATURE_COMINTEROP_TLB_SUPPORT
+FCClassElement("TypeLibConverter", "System.Runtime.InteropServices", gTypeLibConverterFuncs)
+#endif
+FCClassElement("TypeLoadException", "System", gTypeLoadExceptionFuncs)
+FCClassElement("TypeNameBuilder", "System.Reflection.Emit", gTypeNameBuilder)
+#ifndef FEATURE_CORECLR
+FCClassElement("TypeNameParser", "System", gTypeNameParser)
+#endif //!FEATURE_CORECLR
+FCClassElement("TypedReference", "System", gTypedReferenceFuncs)
+FCClassElement("URLString", "System.Security.Util", gCOMUrlStringFuncs)
+#ifdef FEATURE_COMINTEROP
+FCClassElement("UriMarshaler", "System.StubHelpers", gUriMarshalerFuncs)
+#endif
+FCClassElement("Utf8String", "System", gUtf8String)
+#if defined(FEATURE_CRYPTO) || defined(FEATURE_LEGACYNETCFCRYPTO)
+FCClassElement("Utils", "System.Security.Cryptography", gCryptographyUtilsFuncs)
+#endif
+FCClassElement("ValueClassMarshaler", "System.StubHelpers", gValueClassMarshalerFuncs)
+FCClassElement("ValueType", "System", gValueTypeFuncs)
+#ifdef FEATURE_COMINTEROP
+FCClassElement("Variant", "System", gVariantFuncs)
+#endif
+FCClassElement("VersioningHelper", "System.Runtime.Versioning", gVersioningHelperFuncs)
+FCClassElement("WaitHandle", "System.Threading", gWaitHandleFuncs)
+FCClassElement("WeakReference", "System", gWeakReferenceFuncs)
+FCClassElement("WeakReference`1", "System", gWeakReferenceOfTFuncs)
+
+#ifndef FEATURE_CORECLR
+FCClassElement("Win32", "System.Security.Principal", gPrincipalFuncs)
+#endif
+
+#ifdef FEATURE_COMINTEROP
+FCClassElement("WinRTTypeNameConverter", "System.StubHelpers", gWinRTTypeNameConverterFuncs)
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_COMINTEROP
+FCClassElement("WindowsRuntimeBufferHelper", "System.Runtime.InteropServices.WindowsRuntime", gWindowsRuntimeBufferHelperFuncs)
+#endif
+
+#ifndef FEATURE_CORECLR
+FCClassElement("WindowsRuntimeDesignerContext", "System.Runtime.DesignerServices", gWindowsRuntimeContextFuncs)
+#endif
+
+#if defined(FEATURE_COMINTEROP) && defined(FEATURE_REFLECTION_ONLY_LOAD)
+FCClassElement("WindowsRuntimeMetadata", "System.Runtime.InteropServices.WindowsRuntime", gWindowsRuntimeMetadata)
+#endif
+
+#ifdef FEATURE_X509
+FCClassElement("X509Utils", "System.Security.Cryptography.X509Certificates", gX509CertificateFuncs)
+#endif // FEATURE_X509
+#ifdef FEATURE_CAS_POLICY
+FCClassElement("Zone", "System.Security.Policy", gCOMSecurityZone)
+#endif // FEATURE_CAS_POLICY
+#ifndef FEATURE_CORECLR
+FCClassElement("__ConsoleStream", "System.IO", gConsoleStreamFuncs)
+#endif
+
+
+#undef FCFuncElement
+#undef FCFuncElementSig
+#undef FCIntrinsic
+#undef FCIntrinsicSig
+#undef QCFuncElement
+#undef FCDynamic
+#undef FCDynamicSig
+#undef FCUnreferenced
+#undef FCFuncStart
+#undef FCFuncEnd
+#undef FCClassElement
diff --git a/src/vm/eeconfig.cpp b/src/vm/eeconfig.cpp
new file mode 100644
index 0000000000..ebebebdf0f
--- /dev/null
+++ b/src/vm/eeconfig.cpp
@@ -0,0 +1,2186 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// EEConfig.CPP
+//
+
+//
+// Fetched configuration data from the registry (should we Jit, run GC checks ...)
+//
+//
+
+
+#include "common.h"
+#ifdef FEATURE_COMINTEROP
+#include <appxutil.h>
+#endif
+#include "eeconfig.h"
+#include "method.hpp"
+#ifndef FEATURE_CORECLR
+#include <xmlparser.h>
+#include <mscorcfg.h>
+#include "eeconfigfactory.h"
+#endif
+#ifdef FEATURE_FUSION
+#include "fusionsetup.h"
+#endif
+#include "eventtrace.h"
+#include "eehash.h"
+#include "eemessagebox.h"
+#include "corhost.h"
+#include "regex_util.h"
+#include "clr/fs/path.h"
+#ifdef FEATURE_WIN_DB_APPCOMPAT
+#include "QuirksApi.h"
+#endif
+
+using namespace clr;
+
+#define DEFAULT_ZAP_SET W("")
+
+#define DEFAULT_APP_DOMAIN_LEAKS 0
+
+
+#ifdef STRESS_HEAP
+// Global counter to disable GCStress. This is needed so we can inhibit
+// GC stres collections without resetting the global GCStressLevel, which
+// is relied on by the EH code and the JIT code (for handling patched
+// managed code, and GC stress exception) after GC stress is dynamically
+// turned off.
+Volatile<DWORD> GCStressPolicy::InhibitHolder::s_nGcStressDisabled = 0;
+#endif // STRESS_HEAP
+
+
+ConfigSource::ConfigSource()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ } CONTRACTL_END;
+
+ m_pNext = this;
+ m_pPrev = this;
+}// ConfigSource::ConfigSource
+
+ConfigSource::~ConfigSource()
+{
+ CONTRACTL {
+ NOTHROW;
+ FORBID_FAULT;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ for(ConfigStringHashtable::Iterator iter = m_Table.Begin(), end = m_Table.End(); iter != end; iter++)
+ {
+ ConfigStringKeyValuePair * pair = *(iter);
+ delete[] pair->key;
+ delete[] pair->value;
+ delete pair;
+ }
+}// ConfigSource::~ConfigSource
+
+ConfigStringHashtable * ConfigSource::Table()
+{
+ LIMITED_METHOD_CONTRACT;
+ return &(m_Table);
+}// ConfigSource::Table
+
+void ConfigSource::Add(ConfigSource* prev)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(prev));
+ PRECONDITION(CheckPointer(prev->m_pNext));
+ } CONTRACTL_END;
+
+ m_pPrev = prev;
+ m_pNext = prev->m_pNext;
+
+ m_pNext->m_pPrev = this;
+ prev->m_pNext = this;
+}// ConfigSource::Add
+
+
+
+/**************************************************************/
+// Poor mans narrow
+LPUTF8 NarrowWideChar(__inout_z LPWSTR str)
+{
+ CONTRACT (LPUTF8)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(str, NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ } CONTRACT_END;
+
+ if (str != 0) {
+ LPWSTR fromPtr = str;
+ LPUTF8 toPtr = (LPUTF8) str;
+ LPUTF8 result = toPtr;
+ while(*fromPtr != 0)
+ *toPtr++ = (char) *fromPtr++;
+ *toPtr = 0;
+ RETURN result;
+ }
+ RETURN NULL;
+}
+
+extern void UpdateGCSettingFromHost ();
+
+HRESULT EEConfig::Setup()
+{
+ STANDARD_VM_CONTRACT;
+
+ ETWOnStartup (EEConfigSetup_V1,EEConfigSetupEnd_V1);
+
+ // This 'new' uses EEConfig's overloaded new, which uses a static memory buffer and will
+ // not fail
+ EEConfig *pConfig = new EEConfig();
+
+ HRESULT hr = pConfig->Init();
+
+ if (FAILED(hr))
+ return hr;
+
+ EEConfig *pConfigOld = NULL;
+ pConfigOld = InterlockedCompareExchangeT(&g_pConfig, pConfig, NULL);
+
+ _ASSERTE(pConfigOld == NULL && "EEConfig::Setup called multiple times!");
+
+ UpdateGCSettingFromHost();
+
+ return S_OK;
+}
+
+/**************************************************************/
+// For in-place constructor
+BYTE g_EEConfigMemory[sizeof(EEConfig)];
+
+void *EEConfig::operator new(size_t size)
+{
+ CONTRACT(void*) {
+ FORBID_FAULT;
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ } CONTRACT_END;
+
+ RETURN g_EEConfigMemory;
+}
+
+#ifdef FEATURE_WIN_DB_APPCOMPAT
+void InitWinAppCompatDBApis()
+{
+ STANDARD_VM_CONTRACT;
+
+ HMODULE hMod = WszLoadLibraryEx(QUIRKSAPI_DLL, NULL, LOAD_LIBRARY_SEARCH_SYSTEM32);
+
+ PFN_CptQuirkIsEnabled3 pfnIsQuirkEnabled = NULL;
+ PFN_CptQuirkGetData2 pfnQuirkGetData = NULL;
+
+ if(hMod != NULL)
+ {
+ pfnIsQuirkEnabled = (PFN_CptQuirkIsEnabled3)GetProcAddress(hMod, "QuirkIsEnabled3");
+ pfnQuirkGetData = (PFN_CptQuirkGetData2)GetProcAddress(hMod, "QuirkGetData2");
+ }
+
+ if(pfnIsQuirkEnabled != NULL && pfnQuirkGetData != NULL)
+ {
+ CLRConfig::RegisterWinDbQuirkApis(pfnIsQuirkEnabled,pfnQuirkGetData);
+ }
+}
+#endif // FEATURE_WIN_DB_APPCOMPAT
+
+/**************************************************************/
+HRESULT EEConfig::Init()
+{
+ STANDARD_VM_CONTRACT;
+
+ fInited = false;
+
+#ifdef VERIFY_HEAP
+ iGCHeapVerify = 0; // Heap Verification OFF by default
+#endif
+
+#ifdef _DEBUG // TRACE_GC
+ iGCtraceStart = INT_MAX; // Set to huge value so GCtrace is off by default
+ iGCtraceEnd = INT_MAX;
+ iGCtraceFac = 0;
+ iGCprnLvl = DEFAULT_GC_PRN_LVL;
+
+#endif
+
+#if defined(STRESS_HEAP) || defined(_DEBUG)
+ iGCStress = 0;
+#endif
+
+#ifdef STRESS_HEAP
+ iGCStressMix = 0;
+ iGCStressStep = 1;
+#endif
+
+ fGCBreakOnOOM = false;
+ iGCgen0size = 0;
+ iGCSegmentSize = 0;
+ iGCconcurrent = 0;
+#ifdef _DEBUG
+ iGCLatencyMode = -1;
+#endif //_DEBUG
+ iGCForceCompact = 0;
+ iGCHoardVM = 0;
+ iGCLOHCompactionMode = 0;
+
+#ifdef GCTRIMCOMMIT
+ iGCTrimCommit = 0;
+#endif
+
+ m_fFreepZapSet = false;
+
+ dwSpinInitialDuration = 0x32;
+ dwSpinBackoffFactor = 0x3;
+ dwSpinLimitProcCap = 0xFFFFFFFF;
+ dwSpinLimitProcFactor = 0x4E20;
+ dwSpinLimitConstant = 0x0;
+ dwSpinRetryCount = 0xA;
+
+ iJitOptimizeType = OPT_DEFAULT;
+ fJitFramed = false;
+ fJitAlignLoops = false;
+ fAddRejitNops = false;
+ fJitMinOpts = false;
+ fPInvokeRestoreEsp = (DWORD)-1;
+
+ fLegacyNullReferenceExceptionPolicy = false;
+ fLegacyUnhandledExceptionPolicy = false;
+ fLegacyApartmentInitPolicy = false;
+ fLegacyComHierarchyVisibility = false;
+ fLegacyComVTableLayout = false;
+ fLegacyVirtualMethodCallVerification = false;
+ fNewComVTableLayout = false;
+ iImpersonationPolicy = IMP_DEFAULT;
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // By default, there is not pre-V4 CSE policy
+ fLegacyCorruptedStateExceptionsPolicy = false;
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+#ifdef _DEBUG
+ fLogTransparencyErrors = false;
+#endif // _DEBUG
+ fLegacyLoadMscorsnOnStartup = false;
+ fBypassStrongNameVerification = true;
+ fGeneratePublisherEvidence = true;
+ fEnforceFIPSPolicy = true;
+ fLegacyHMACMode = false;
+ fNgenBindOptimizeNonGac = false;
+ fStressLog = false;
+ fCacheBindingFailures = true;
+ fDisableFusionUpdatesFromADManager = false;
+ fDisableCommitThreadStack = false;
+ fProbeForStackOverflow = true;
+
+ INDEBUG(fStressLog = true;)
+
+#ifdef FEATURE_CORECLR
+ fVerifyAllOnLoad = false;
+#endif
+#ifdef _DEBUG
+ fExpandAllOnLoad = false;
+ fDebuggable = false;
+ fStressOn = false;
+ apiThreadStressCount = 0;
+ pPrestubHalt = 0;
+ pPrestubGC = 0;
+ pszBreakOnClassLoad = 0;
+ pszBreakOnClassBuild = 0;
+ pszBreakOnMethodName = 0;
+ pszDumpOnClassLoad = 0;
+ pszBreakOnInteropStubSetup = 0;
+ pszBreakOnComToClrNativeInfoInit = 0;
+ pszBreakOnStructMarshalSetup = 0;
+ fJitVerificationDisable= false;
+ fVerifierOff = false;
+
+ fDoAllowUntrustedCallerChecks = true;
+#ifdef ENABLE_STARTUP_DELAY
+ iStartupDelayMS = 0;
+#endif
+ iPerfNumAllocsThreshold = 0;
+ iPerfAllocsSizeThreshold = 0;
+ pPerfTypesToLog = NULL;
+ iFastGCStress = 0;
+ iInjectFatalError = 0;
+ fSaveThreadInfo = FALSE;
+ dwSaveThreadInfoMask = (DWORD)-1;
+#ifdef TEST_DATA_CONSISTENCY
+ // indicates whether to run the self test to determine that we are detecting when a lock is held by the
+ // LS in DAC builds. Initialized via the environment variable TestDataConsistency
+ fTestDataConsistency = false;
+#endif
+
+ // TlbImp Stuff
+ fTlbImpSkipLoading = false;
+
+ // In Thread::SuspendThread(), default the timeout to 2 seconds. If the suspension
+ // takes longer, assert (but keep trying).
+ m_SuspendThreadDeadlockTimeoutMs = 2000;
+
+ // For now, give our suspension attempts 40 seconds to succeed before trapping to
+ // the debugger. Note that we should probably lower this when the JIT is run in
+ // preemtive mode, as we really should not be starving the GC for 10's of seconds
+ m_SuspendDeadlockTimeout = 40000;
+#endif // _DEBUG
+
+#ifdef FEATURE_COMINTEROP
+ bLogCCWRefCountChange = false;
+ pszLogCCWRefCountChange = NULL;
+#endif // FEATURE_COMINTEROP
+
+#ifdef _DEBUG
+ m_fAssertOnBadImageFormat = false;
+ m_fAssertOnFailFast = true;
+
+ fSuppressChecks = false;
+ fConditionalContracts = false;
+ fEnableFullDebug = false;
+#endif
+
+#ifdef FEATURE_DOUBLE_ALIGNMENT_HINT
+ DoubleArrayToLargeObjectHeapThreshold = 1000;
+#endif
+
+ iRequireZaps = REQUIRE_ZAPS_NONE;
+
+#ifdef _TARGET_AMD64_
+ pDisableNativeImageLoadList = NULL;
+#endif
+
+ // new loader behavior switches
+
+ m_fDeveloperInstallation = false;
+
+ pZapSet = DEFAULT_ZAP_SET;
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ dwSharePolicy = AppDomain::SHARE_POLICY_UNSPECIFIED;
+#endif
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+ dwDisableStackwalkCache = 0;
+#else // _TARGET_X86_
+ dwDisableStackwalkCache = 1;
+#endif // _TARGET_X86_
+
+ fUseNewCrossDomainRemoting = 1;
+
+ szZapBBInstr = NULL;
+ szZapBBInstrDir = NULL;
+
+ fAppDomainUnload = true;
+ dwADURetryCount=1000;
+
+#ifdef _DEBUG
+ fAppDomainLeaks = DEFAULT_APP_DOMAIN_LEAKS;
+
+ // interop logging
+ m_pTraceIUnknown = NULL;
+ m_TraceWrapper = 0;
+#endif
+
+ iNgenHardBind = NGEN_HARD_BIND_DEFAULT;
+#ifdef _DEBUG
+ dwNgenForceFailureMask = 0;
+ dwNgenForceFailureCount = 0;
+ dwNgenForceFailureKind = 0;
+#endif
+
+ iGCPollType = GCPOLL_TYPE_DEFAULT;
+
+#ifdef _DEBUG
+ fGenerateStubForHost = FALSE;
+ fShouldInjectFault = 0;
+ testThreadAbort = 0;
+ testADUnload = 0;
+#endif
+
+#ifdef FEATURE_COMINTEROP
+ m_fComInsteadOfManagedRemoting = false;
+#endif
+ m_fInteropValidatePinnedObjects = false;
+ m_fInteropLogArguments = false;
+
+#if defined(_DEBUG) && defined(STUBLINKER_GENERATES_UNWIND_INFO)
+ fStubLinkerUnwindInfoVerificationOn = FALSE;
+#endif
+
+#if defined(_DEBUG) && defined(WIN64EXCEPTIONS)
+ fSuppressLockViolationsOnReentryFromOS = false;
+#endif
+
+#if defined(_DEBUG) && defined(_TARGET_AMD64_)
+ // For determining if we should force generation of long jump dispatch stubs.
+ m_cGenerateLongJumpDispatchStubRatio = (size_t)(-1);
+ m_cDispatchStubsGenerated = 0;
+#endif
+
+#if defined(_DEBUG)
+ bDiagnosticSuspend = false;
+#endif
+
+ // After initialization, register the code:#GetConfigValueCallback method with code:CLRConfig to let
+ // CLRConfig access config files. This is needed because CLRConfig lives outside the VM and can't
+ // statically link to EEConfig.
+ CLRConfig::RegisterGetConfigValueCallback(&GetConfigValueCallback);
+
+#ifdef FEATURE_WIN_DB_APPCOMPAT
+ InitWinAppCompatDBApis();
+#endif // FEATURE_WIN_DB_APPCOMPAT
+
+ return S_OK;
+}
+
+#ifdef _DEBUG
+static int DumpConfigTable(ConfigStringHashtable* table, __in_z LPCSTR label, int count)
+{
+ LIMITED_METHOD_CONTRACT;
+ LOG((LF_ALWAYS, LL_ALWAYS, label, count++));
+ LOG((LF_ALWAYS, LL_ALWAYS, "*********************************\n", count++));
+ for(ConfigStringHashtable::Iterator iter = table->Begin(), end = table->End(); iter != end; iter++)
+ {
+ ConfigStringKeyValuePair * pair = *(iter);
+ LPCWSTR keyString = pair->key;
+ LPCWSTR data = pair->value;
+ LOG((LF_ALWAYS, LL_ALWAYS, "%S = %S\n", keyString, data));
+ }
+ LOG((LF_ALWAYS, LL_ALWAYS, "\n"));
+ return count;
+}
+#endif
+
+/**************************************************************/
+HRESULT EEConfig::Cleanup()
+{
+ CONTRACTL {
+ FORBID_FAULT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+#ifdef _DEBUG
+ if (g_pConfig) {
+ // TODO: Do we even need this? CLRConfig::GetConfigValue has FORBID_FAULT in its contract.
+ FAULT_NOT_FATAL(); // If GetConfigValue fails the alloc, that's ok.
+
+ DWORD setting = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DumpConfiguration);
+ if (setting != 0)
+ {
+ ConfigList::ConfigIter iter(&m_Configuration);
+ int count = 0;
+ for(ConfigStringHashtable* table = iter.Next();table; table = iter.Next())
+ {
+ count = DumpConfigTable(table, "\nSystem Configuration Table: %d\n", count);
+ }
+ ConfigList::ConfigIter iter2(&m_Configuration);
+ count = 0;
+ for (ConfigStringHashtable* table = iter2.Previous();table; table = iter2.Previous())
+ {
+ count = DumpConfigTable(table, "\nApplication Configuration Table: %d\n", count);
+ }
+ }
+ }
+#endif
+
+ if (m_fFreepZapSet)
+ delete[] pZapSet;
+ delete[] szZapBBInstr;
+
+ if (pRequireZapsList)
+ delete pRequireZapsList;
+
+ if (pRequireZapsExcludeList)
+ delete pRequireZapsExcludeList;
+
+#ifdef _DEBUG
+ if (pForbidZapsList)
+ delete pForbidZapsList;
+
+ if (pForbidZapsExcludeList)
+ delete pForbidZapsExcludeList;
+#endif
+
+#ifdef _TARGET_AMD64_
+ if (pDisableNativeImageLoadList)
+ delete pDisableNativeImageLoadList;
+#endif
+
+#ifdef FEATURE_COMINTEROP
+ if (pszLogCCWRefCountChange)
+ delete [] pszLogCCWRefCountChange;
+#endif // FEATURE_COMINTEROP
+
+#ifdef _DEBUG
+ if (pPrestubHalt)
+ {
+ DestroyMethList(pPrestubHalt);
+ pPrestubHalt = NULL;
+ }
+ if (pPrestubGC)
+ {
+ DestroyMethList(pPrestubGC);
+ pPrestubGC = NULL;
+ }
+ if (pSkipGCCoverageList)
+ {
+ delete pSkipGCCoverageList;
+ pSkipGCCoverageList = NULL;
+ }
+
+ delete [] pszBreakOnClassLoad;
+ delete [] pszBreakOnClassBuild;
+ delete [] pszBreakOnInstantiation;
+ delete [] pszBreakOnMethodName;
+ delete [] pszDumpOnClassLoad;
+ delete [] pszBreakOnInteropStubSetup;
+ delete [] pszBreakOnComToClrNativeInfoInit;
+ delete [] pszBreakOnStructMarshalSetup;
+ delete [] pszGcCoverageOnMethod;
+#endif
+#ifdef _DEBUG
+ if (pPerfTypesToLog)
+ {
+ DestroyTypeList(pPerfTypesToLog);
+ pPerfTypesToLog = NULL;
+ }
+#endif
+
+ return S_OK;
+}
+
+
+//
+// NOTE: This function is deprecated; use the CLRConfig class instead.
+// To use the CLRConfig class, add an entry in file:../inc/CLRConfigValues.h.
+//
+HRESULT EEConfig::GetConfigString_DontUse_(__in_z LPCWSTR name, __deref_out_z LPWSTR *outVal, BOOL fPrependCOMPLUS, ConfigSearch direction)
+{
+ CONTRACT(HRESULT) {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT (CONTRACT_RETURN E_OUTOFMEMORY);
+ PRECONDITION(CheckPointer(name));
+ POSTCONDITION(CheckPointer(outVal, NULL_OK));
+ } CONTRACT_END;
+
+ LPWSTR pvalue = REGUTIL::GetConfigString_DontUse_(name, fPrependCOMPLUS);
+ if(pvalue == NULL && g_pConfig != NULL)
+ {
+ LPCWSTR pResult;
+ if(SUCCEEDED(g_pConfig->GetConfiguration_DontUse_(name, direction, &pResult)) && pResult != NULL)
+ {
+ size_t len = wcslen(pResult) + 1;
+ pvalue = new (nothrow) WCHAR[len];
+ if (pvalue == NULL)
+ {
+ RETURN E_OUTOFMEMORY;
+ }
+
+ wcscpy_s(pvalue,len,pResult);
+ }
+ }
+
+ *outVal = pvalue;
+
+ RETURN S_OK;
+}
+
+
+//
+// NOTE: This function is deprecated; use the CLRConfig class instead.
+// To use the CLRConfig class, add an entry in file:../inc/CLRConfigValues.h.
+//
+DWORD EEConfig::GetConfigDWORD_DontUse_(__in_z LPCWSTR name, DWORD defValue, DWORD level, BOOL fPrependCOMPLUS, ConfigSearch direction)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(name));
+ } CONTRACTL_END;
+
+ // <TODO>@TODO: After everyone has moved off registry, key remove the following line in golden</TODO>
+ DWORD result = REGUTIL::GetConfigDWORD_DontUse_(name, defValue, (REGUTIL::CORConfigLevel)level, fPrependCOMPLUS);
+ if(result == defValue && g_pConfig != NULL)
+ {
+ LPCWSTR pvalue;
+ if(SUCCEEDED(g_pConfig->GetConfiguration_DontUse_(name, direction, &pvalue)) && pvalue != NULL)
+ {
+ WCHAR *end;
+ errno = 0;
+ result = wcstoul(pvalue, &end, 0);
+ // errno is ERANGE if the number is out of range, and end is set to pvalue if
+ // no valid conversion exists.
+ if (errno == ERANGE || end == pvalue)
+ {
+ result = defValue;
+ }
+ }
+ }
+
+ return result;
+}
+
+//
+// NOTE: This function is deprecated; use the CLRConfig class instead.
+// To use the CLRConfig class, add an entry in file:../inc/CLRConfigValues.h.
+//
+// Note for PAL: right now PAL does not have a _wcstoui64 API, so I am temporarily reading in all numbers as
+// a 32-bit number. When we have the _wcstoui64 API on MAC we will use that instead of wcstoul.
+ULONGLONG EEConfig::GetConfigULONGLONG_DontUse_(__in_z LPCWSTR name, ULONGLONG defValue, DWORD level, BOOL fPrependCOMPLUS, ConfigSearch direction)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(name));
+ } CONTRACTL_END;
+
+ // <TODO>@TODO: After everyone has moved off registry, key remove the following line in golden</TODO>
+ ULONGLONG result = REGUTIL::GetConfigULONGLONG_DontUse_(name, defValue, (REGUTIL::CORConfigLevel)level, fPrependCOMPLUS);
+ if(result == defValue && g_pConfig != NULL)
+ {
+ LPCWSTR pvalue;
+ if(SUCCEEDED(g_pConfig->GetConfiguration_DontUse_(name, direction, &pvalue)) && pvalue != NULL)
+ {
+ WCHAR *end;
+ errno = 0;
+ result = _wcstoui64(pvalue, &end, 0);
+ // errno is ERANGE if the number is out of range, and end is set to pvalue if
+ // no valid conversion exists.
+ if (errno == ERANGE || end == pvalue)
+ {
+ result = defValue;
+ }
+ }
+ }
+
+ return result;
+}
+
+//
+// NOTE: This function is deprecated; use the CLRConfig class instead.
+// To use the CLRConfig class, add an entry in file:../inc/CLRConfigValues.h.
+//
+// This is very similar to GetConfigDWORD, except that it favors the settings in config files over those in the
+// registry. This is the Shim's policy with configuration flags, and there are a few flags in EEConfig that adhere
+// to this policy.
+//
+DWORD EEConfig::GetConfigDWORDFavoringConfigFile_DontUse_(__in_z LPCWSTR name,
+ DWORD defValue,
+ DWORD level,
+ BOOL fPrependCOMPLUS,
+ ConfigSearch direction)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(name));
+ } CONTRACTL_END;
+
+ DWORD result = defValue;
+
+ if (g_pConfig != NULL)
+ {
+ LPCWSTR pvalue;
+ if (SUCCEEDED(g_pConfig->GetConfiguration_DontUse_(name, direction, &pvalue)) && pvalue != NULL)
+ {
+ WCHAR *end = NULL;
+ errno = 0;
+ result = wcstoul(pvalue, &end, 0);
+ // errno is ERANGE if the number is out of range, and end is set to pvalue if
+ // no valid conversion exists.
+ if (errno == ERANGE || end == pvalue)
+ {
+ result = defValue;
+ }
+ }
+ else
+ {
+ result = REGUTIL::GetConfigDWORD_DontUse_(name, defValue, (REGUTIL::CORConfigLevel)level, fPrependCOMPLUS);
+ }
+ }
+
+ return result;
+}
+
+//
+// NOTE: This function is deprecated; use the CLRConfig class instead.
+// To use the CLRConfig class, add an entry in file:../inc/CLRConfigValues.h.
+//
+DWORD EEConfig::GetConfigDWORDInternal_DontUse_(__in_z LPCWSTR name, DWORD defValue, DWORD level, BOOL fPrependCOMPLUS, ConfigSearch direction)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(name));
+ } CONTRACTL_END;
+
+ // <TODO>@TODO: After everyone has moved off registry, key remove the following line in golden</TODO>
+ DWORD result = REGUTIL::GetConfigDWORD_DontUse_(name, defValue, (REGUTIL::CORConfigLevel)level, fPrependCOMPLUS);
+ if(result == defValue)
+ {
+ LPCWSTR pvalue;
+ if(SUCCEEDED(GetConfiguration_DontUse_(name, direction, &pvalue)) && pvalue != NULL)
+ {
+ WCHAR *end = NULL;
+ errno = 0;
+ result = wcstoul(pvalue, &end, 0);
+ // errno is ERANGE if the number is out of range, and end is set to pvalue if
+ // no valid conversion exists.
+ if (errno == ERANGE || end == pvalue)
+ {
+ result = defValue;
+ }
+ }
+ }
+ return result;
+}
+
+/**************************************************************/
+
+HRESULT EEConfig::sync()
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT (return E_OUTOFMEMORY);
+ } CONTRACTL_END;
+
+ ETWOnStartup (EEConfigSync_V1, EEConfigSyncEnd_V1);
+
+ HRESULT hr = S_OK;
+
+ // Note the global variable is not updated directly by the GetRegKey function
+ // so we only update it once (to avoid reentrancy windows)
+
+#ifdef _DEBUG
+ iFastGCStress = GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_FastGCStress, iFastGCStress);
+
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_GcCoverage, (LPWSTR*)&pszGcCoverageOnMethod));
+ pszGcCoverageOnMethod = NarrowWideChar((LPWSTR)pszGcCoverageOnMethod);
+ iGCLatencyMode = GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_GCLatencyMode, iGCLatencyMode);
+#endif
+
+ if (CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_ARMEnabled))
+ {
+ g_fEnableARM = TRUE;
+ }
+
+ int forceGCconcurrent = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_gcConcurrent);
+ if ((forceGCconcurrent > 0) || (forceGCconcurrent == -1 && g_IGCconcurrent))
+ iGCconcurrent = TRUE;
+
+ // Disable concurrent GC during ngen for the rare case a GC gets triggered, causing problems
+ if (IsCompilationProcess())
+ iGCconcurrent = FALSE;
+
+#ifdef _DEBUG
+ fAppDomainLeaks = GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_AppDomainAgilityChecked, DEFAULT_APP_DOMAIN_LEAKS) == 1;
+#endif
+
+#if defined(STRESS_HEAP) || defined(_DEBUG)
+ iGCStress = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_GCStress);
+#endif
+
+#ifdef STRESS_HEAP
+ BOOL bGCStressAndHeapVerifyAllowed = true;
+ iGCStressMix = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_GCStressMix);
+ iGCStressStep = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_GCStressStep);
+
+ // For GC stress mix mode ensure reasonable defaults
+ if (iGCStressMix != 0)
+ {
+ if (iGCStress == 0)
+ iGCStress |= int(GCSTRESS_ALLOC) | int(GCSTRESS_TRANSITION);
+ if (iGCStressStep == 0 || iGCStressStep == 1)
+ iGCStressStep = 0x10;
+ }
+
+ if (iGCStress)
+ {
+ LPWSTR pszGCStressExe = NULL;
+
+#ifdef _DEBUG
+ // If GCStress is turned on, then perform AppDomain agility checks in debug builds
+ fAppDomainLeaks = 1;
+#endif
+
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_RestrictedGCStressExe, &pszGCStressExe));
+ if (pszGCStressExe != NULL)
+ {
+ if (*pszGCStressExe != W('\0'))
+ {
+ bGCStressAndHeapVerifyAllowed = false;
+
+ WCHAR wszFileName[_MAX_PATH];
+ if (WszGetModuleFileName(NULL, wszFileName, _MAX_PATH) != 0)
+ {
+ // just keep the name
+ LPWSTR pwszName = wcsrchr(wszFileName, W('\\'));
+ pwszName = (pwszName == NULL) ? wszFileName : (pwszName + 1);
+
+ if (SString::_wcsicmp(pwszName,pszGCStressExe) == 0)
+ {
+ bGCStressAndHeapVerifyAllowed = true;
+ }
+ }
+ }
+ delete [] pszGCStressExe;
+ }
+
+ if (bGCStressAndHeapVerifyAllowed)
+ {
+ if (forceGCconcurrent > 0)
+ {
+#ifdef _DEBUG
+ iFastGCStress = 0;
+#endif
+ iGCStress |= int(GCSTRESS_ALLOC) | int(GCSTRESS_TRANSITION);
+ }
+ else
+ {
+ // If GCStress was enabled, and
+ // If GcConcurrent was NOT explicitly specified in the environment, and
+ // If GSCtressMix was NOT specified
+ // Then let's turn off concurrent GC since it make objects move less
+ if (iGCStressMix == 0)
+ {
+ iGCconcurrent =
+ g_IGCconcurrent = 0;
+ }
+ }
+ }
+ else
+ {
+ iGCStress = 0;
+ iGCStressMix = 0;
+ iGCStressStep = 1;
+ }
+ }
+
+#ifdef VERIFY_HEAP
+ if (bGCStressAndHeapVerifyAllowed)
+ {
+ iGCHeapVerify = GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_HeapVerify, iGCHeapVerify);
+ }
+#endif
+
+#endif //STRESS_HEAP
+
+#ifdef _WIN64
+ if (!iGCSegmentSize) iGCSegmentSize = GetConfigULONGLONG_DontUse_(CLRConfig::UNSUPPORTED_GCSegmentSize, iGCSegmentSize);
+ if (!iGCgen0size) iGCgen0size = GetConfigULONGLONG_DontUse_(CLRConfig::UNSUPPORTED_GCgen0size, iGCgen0size);
+#else
+ if (!iGCSegmentSize) iGCSegmentSize = GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_GCSegmentSize, iGCSegmentSize);
+ if (!iGCgen0size) iGCgen0size = GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_GCgen0size, iGCgen0size);
+#endif //_WIN64
+
+ if (g_IGCHoardVM)
+ iGCHoardVM = g_IGCHoardVM;
+ else
+ iGCHoardVM = GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_GCRetainVM, iGCHoardVM);
+
+ if (!iGCLOHCompactionMode) iGCLOHCompactionMode = GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_GCLOHCompact, iGCLOHCompactionMode);
+
+#ifdef GCTRIMCOMMIT
+ if (g_IGCTrimCommit)
+ iGCTrimCommit = g_IGCTrimCommit;
+ else
+ iGCTrimCommit = GetConfigDWORD_DontUse_(CLRConfig::EXTERNAL_gcTrimCommitOnLowMemory, iGCTrimCommit);
+#endif
+
+#ifdef FEATURE_CONSERVATIVE_GC
+ iGCConservative = (CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_gcConservative) != 0);
+#endif // FEATURE_CONSERVATIVE_GC
+
+#ifdef _WIN64
+ iGCAllowVeryLargeObjects = (CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_gcAllowVeryLargeObjects) != 0);
+#endif
+
+ fGCBreakOnOOM = (GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_GCBreakOnOOM, fGCBreakOnOOM) != 0);
+
+#ifdef TRACE_GC
+ iGCtraceStart = GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_GCtraceStart, iGCtraceStart);
+ iGCtraceEnd = GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_GCtraceEnd, iGCtraceEnd);
+ iGCtraceFac = GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_GCtraceFacility, iGCtraceFac);
+ iGCprnLvl = GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_GCprnLvl, iGCprnLvl);
+#endif
+
+#ifdef _DEBUG
+ iInjectFatalError = GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_InjectFatalError, iInjectFatalError);
+
+ fSaveThreadInfo = (GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_SaveThreadInfo, fSaveThreadInfo) != 0);
+
+ dwSaveThreadInfoMask = GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_SaveThreadInfoMask, dwSaveThreadInfoMask);
+
+ {
+ LPWSTR wszSkipGCCoverageList = NULL;
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SkipGCCoverage, &wszSkipGCCoverageList));
+
+ EX_TRY
+ {
+ if (wszSkipGCCoverageList)
+ pSkipGCCoverageList = new AssemblyNamesList(wszSkipGCCoverageList);
+ }
+ EX_CATCH_HRESULT(hr);
+ IfFailRet(hr);
+ }
+#endif
+
+ iGCForceCompact = GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_gcForceCompact, iGCForceCompact);
+
+ fStressLog = GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_StressLog, fStressLog) != 0;
+ fForceEnc = GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_ForceEnc, fForceEnc) != 0;
+
+#ifdef STRESS_THREAD
+ dwStressThreadCount = GetConfigDWORD_DontUse_(CLRConfig::EXTERNAL_StressThreadCount, dwStressThreadCount);
+#endif
+
+ iRequireZaps = RequireZapsType(GetConfigDWORD_DontUse_(CLRConfig::EXTERNAL_ZapRequire, iRequireZaps));
+ if (IsCompilationProcess() || iRequireZaps >= REQUIRE_ZAPS_COUNT)
+ iRequireZaps = REQUIRE_ZAPS_NONE;
+
+ if (iRequireZaps != REQUIRE_ZAPS_NONE)
+ {
+ {
+ NewArrayHolder<WCHAR> wszZapRequireList;
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ZapRequireList, &wszZapRequireList));
+ if (wszZapRequireList)
+ pRequireZapsList = new AssemblyNamesList(wszZapRequireList);
+ }
+
+ {
+ NewArrayHolder<WCHAR> wszZapRequireExcludeList;
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ZapRequireExcludeList, &wszZapRequireExcludeList));
+ if (wszZapRequireExcludeList)
+ pRequireZapsExcludeList = new AssemblyNamesList(wszZapRequireExcludeList);
+ }
+ }
+
+#ifdef _DEBUG
+ iForbidZaps = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_NgenBind_ZapForbid) != 0;
+ if (iForbidZaps != 0)
+ {
+ {
+ NewArrayHolder<WCHAR> wszZapForbidList;
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_NgenBind_ZapForbidList, &wszZapForbidList));
+ if (wszZapForbidList)
+ pForbidZapsList = new AssemblyNamesList(wszZapForbidList);
+ }
+
+ {
+ NewArrayHolder<WCHAR> wszZapForbidExcludeList;
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_NgenBind_ZapForbidExcludeList, &wszZapForbidExcludeList));
+ if (wszZapForbidExcludeList)
+ pForbidZapsExcludeList = new AssemblyNamesList(wszZapForbidExcludeList);
+ }
+ }
+#endif
+
+#ifdef _TARGET_AMD64_
+ if (!IsCompilationProcess())
+ {
+ NewArrayHolder<WCHAR> wszDisableNativeImageLoadList;
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_DisableNativeImageLoadList, &wszDisableNativeImageLoadList));
+ if (wszDisableNativeImageLoadList)
+ pDisableNativeImageLoadList = new AssemblyNamesList(wszDisableNativeImageLoadList);
+ }
+#endif
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ dwSharePolicy = GetConfigDWORD_DontUse_(CLRConfig::EXTERNAL_LoaderOptimization, dwSharePolicy);
+#endif
+
+#ifdef FEATURE_DOUBLE_ALIGNMENT_HINT
+ DoubleArrayToLargeObjectHeapThreshold = GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_DoubleArrayToLargeObjectHeap, DoubleArrayToLargeObjectHeapThreshold);
+#endif
+
+#ifdef FEATURE_PREJIT
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ZapBBInstr, (LPWSTR*)&szZapBBInstr));
+ if (szZapBBInstr)
+ {
+ szZapBBInstr = NarrowWideChar((LPWSTR)szZapBBInstr);
+
+ // If szZapBBInstr only contains white space, then there's nothing to instrument (this
+ // is the case with some test cases, and it's easier to fix all of them here).
+ LPWSTR pStr = (LPWSTR) szZapBBInstr;
+ while (*pStr == W(' ')) pStr++;
+ if (*pStr == 0)
+ szZapBBInstr = NULL;
+ }
+
+ if (szZapBBInstr != NULL)
+ {
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ZapBBInstrDir, &szZapBBInstrDir));
+ g_IBCLogger.EnableAllInstr();
+ }
+ else
+ g_IBCLogger.DisableAllInstr();
+#endif
+
+#ifdef FEATURE_FUSION
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ZapSet, (LPWSTR*)&pZapSet));
+
+ m_fFreepZapSet = true;
+
+ if (pZapSet == NULL)
+ {
+ m_fFreepZapSet = false;
+ pZapSet = W("");
+ }
+ if (wcslen(pZapSet) > 3)
+ {
+ _ASSERTE(!"Zap Set String must be less than 3 chars");
+ delete[] pZapSet;
+ m_fFreepZapSet = false;
+ pZapSet = W("");
+ }
+
+ fNgenBindOptimizeNonGac = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_NgenBind_OptimizeNonGac) != 0;
+#endif
+
+ dwDisableStackwalkCache = GetConfigDWORD_DontUse_(CLRConfig::EXTERNAL_DisableStackwalkCache, dwDisableStackwalkCache);
+
+#ifdef FEATURE_REMOTING
+ fUseNewCrossDomainRemoting = GetConfigDWORD_DontUse_(CLRConfig::EXTERNAL_UseNewCrossDomainRemoting, fUseNewCrossDomainRemoting);
+#endif
+
+#ifdef _DEBUG
+ IfFailRet (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnClassLoad, (LPWSTR*) &pszBreakOnClassLoad));
+ pszBreakOnClassLoad = NarrowWideChar((LPWSTR)pszBreakOnClassLoad);
+#endif
+
+ dwSpinInitialDuration = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_SpinInitialDuration);
+ dwSpinBackoffFactor = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_SpinBackoffFactor);
+ dwSpinLimitProcCap = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_SpinLimitProcCap);
+ dwSpinLimitProcFactor = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_SpinLimitProcFactor);
+ dwSpinLimitConstant = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_SpinLimitConstant);
+ dwSpinRetryCount = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_SpinRetryCount);
+
+ fJitFramed = (GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_JitFramed, fJitFramed) != 0);
+ fJitAlignLoops = (GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_JitAlignLoops, fJitAlignLoops) != 0);
+ fJitMinOpts = (GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_JITMinOpts, fJitMinOpts) == 1);
+ iJitOptimizeType = GetConfigDWORD_DontUse_(CLRConfig::EXTERNAL_JitOptimizeType, iJitOptimizeType);
+ if (iJitOptimizeType > OPT_RANDOM) iJitOptimizeType = OPT_DEFAULT;
+
+#ifdef FEATURE_REJIT
+ fAddRejitNops = (GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_AddRejitNops, fAddRejitNops) != 0);
+#endif
+
+#ifdef _TARGET_X86_
+ fPInvokeRestoreEsp = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_Jit_NetFx40PInvokeStackResilience);
+#endif
+
+#ifndef FEATURE_CORECLR
+ // These two values respect the Shim's policy of favoring config files over registry settings.
+ fLegacyNullReferenceExceptionPolicy = (GetConfigDWORDFavoringConfigFile_DontUse_(CLRConfig::UNSUPPORTED_legacyNullReferenceExceptionPolicy,
+ fLegacyNullReferenceExceptionPolicy) != 0);
+ fLegacyUnhandledExceptionPolicy = (GetConfigDWORDFavoringConfigFile_DontUse_(CLRConfig::UNSUPPORTED_legacyUnhandledExceptionPolicy,
+ fLegacyUnhandledExceptionPolicy) != 0);
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // Check if the user has overriden how Corrupted State Exceptions (CSE) will be handled. If the
+ // <runtime> section of app.exe.config has "legacyCorruptedStateExceptionsPolicy" set to 1, then
+ // V4 runtime will treat CSE in the same fashion as V2.
+ fLegacyCorruptedStateExceptionsPolicy = (CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_legacyCorruptedStateExceptionsPolicy) != 0);
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ fLegacyVirtualMethodCallVerification = (GetConfigDWORDFavoringConfigFile_DontUse_(CLRConfig::EXTERNAL_legacyVirtualMethodCallVerification,
+ fLegacyVirtualMethodCallVerification,
+ REGUTIL::COR_CONFIG_ALL, TRUE,
+ CONFIG_SYSTEMONLY) != 0);
+
+ fLegacyApartmentInitPolicy = (GetConfigDWORDFavoringConfigFile_DontUse_(CLRConfig::EXTERNAL_legacyApartmentInitPolicy,
+ fLegacyApartmentInitPolicy) != 0);
+
+ fLegacyComHierarchyVisibility = (GetConfigDWORDFavoringConfigFile_DontUse_(CLRConfig::EXTERNAL_legacyComHierarchyVisibility,
+ fLegacyComHierarchyVisibility) != 0);
+
+ fLegacyComVTableLayout = (GetConfigDWORDFavoringConfigFile_DontUse_(CLRConfig::EXTERNAL_legacyComVTableLayout,
+ fLegacyComVTableLayout) != 0);
+ fNewComVTableLayout = (GetConfigDWORDFavoringConfigFile_DontUse_(CLRConfig::EXTERNAL_newComVTableLayout,
+ fNewComVTableLayout) != 0);
+
+ if (CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_legacyImpersonationPolicy) != 0)
+ iImpersonationPolicy = IMP_NOFLOW;
+ else if (CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_alwaysFlowImpersonationPolicy) != 0)
+ iImpersonationPolicy = IMP_ALWAYSFLOW;
+
+ fLegacyLoadMscorsnOnStartup = (GetConfigDWORDFavoringConfigFile_DontUse_(CLRConfig::UNSUPPORTED_legacyLoadMscorsnOnStartup,
+ fLegacyLoadMscorsnOnStartup) != 0);
+ fBypassStrongNameVerification = (GetConfigDWORDFavoringConfigFile_DontUse_(W("bypassTrustedAppStrongNames"), fBypassStrongNameVerification) != 0) && // App opted in
+ (GetConfigDWORD_DontUse_(SN_CONFIG_BYPASS_POLICY_W, TRUE, REGUTIL::COR_CONFIG_MACHINE) != 0); // And the machine policy allows for bypass
+ fGeneratePublisherEvidence = (GetConfigDWORDFavoringConfigFile_DontUse_(CLRConfig::EXTERNAL_generatePublisherEvidence, fGeneratePublisherEvidence) != 0);
+ fEnforceFIPSPolicy = (GetConfigDWORDFavoringConfigFile_DontUse_(CLRConfig::EXTERNAL_enforceFIPSPolicy, fEnforceFIPSPolicy) != 0);
+ fLegacyHMACMode = (GetConfigDWORDFavoringConfigFile_DontUse_(CLRConfig::EXTERNAL_legacyHMACMode, fLegacyHMACMode) != 0);
+
+ fCacheBindingFailures = !(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_disableCachingBindingFailures));
+ fUseLegacyIdentityFormat =
+#ifdef FEATURE_APPX
+ AppX::IsAppXProcess() ||
+#endif
+ (CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_useLegacyIdentityFormat) != 0);
+ fDisableFusionUpdatesFromADManager = (CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_disableFusionUpdatesFromADManager) != 0);
+ fDisableCommitThreadStack = (GetConfigDWORDFavoringConfigFile_DontUse_(CLRConfig::EXTERNAL_disableCommitThreadStack, fDisableCommitThreadStack) != 0);
+ fProbeForStackOverflow = !(CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_disableStackOverflowProbing));
+#endif // FEATURE_CORECLR
+
+#ifdef _DEBUG
+ fDebuggable = (GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_JitDebuggable, fDebuggable) != 0);
+ fStressOn = (GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_StressOn, fStressOn) != 0);
+ apiThreadStressCount = GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_APIThreadStress, apiThreadStressCount);
+
+ LPWSTR wszPreStubStuff = NULL;
+
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_PrestubHalt, &wszPreStubStuff));
+ IfFailRet(ParseMethList(wszPreStubStuff, &pPrestubHalt));
+
+ LPWSTR wszInvokeStuff = NULL;
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_InvokeHalt, &wszInvokeStuff));
+ IfFailRet(ParseMethList(wszInvokeStuff, &pInvokeHalt));
+
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_PrestubGC, &wszPreStubStuff));
+ IfFailRet(ParseMethList(wszPreStubStuff, &pPrestubGC));
+
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnClassBuild, (LPWSTR*)&pszBreakOnClassBuild));
+ pszBreakOnClassBuild = NarrowWideChar((LPWSTR)pszBreakOnClassBuild);
+
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnInstantiation, (LPWSTR*)&pszBreakOnInstantiation));
+ pszBreakOnInstantiation = NarrowWideChar((LPWSTR)pszBreakOnInstantiation);
+
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnMethodName, (LPWSTR*)&pszBreakOnMethodName));
+ pszBreakOnMethodName = NarrowWideChar((LPWSTR)pszBreakOnMethodName);
+
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DumpOnClassLoad, (LPWSTR*)&pszDumpOnClassLoad));
+ pszDumpOnClassLoad = NarrowWideChar((LPWSTR)pszDumpOnClassLoad);
+
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnInteropStubSetup, (LPWSTR*)&pszBreakOnInteropStubSetup));
+ pszBreakOnInteropStubSetup = NarrowWideChar((LPWSTR)pszBreakOnInteropStubSetup);
+
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnComToClrNativeInfoInit, (LPWSTR*)&pszBreakOnComToClrNativeInfoInit));
+ pszBreakOnComToClrNativeInfoInit = NarrowWideChar((LPWSTR)pszBreakOnComToClrNativeInfoInit);
+
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnStructMarshalSetup, (LPWSTR*)&pszBreakOnStructMarshalSetup));
+ pszBreakOnStructMarshalSetup = NarrowWideChar((LPWSTR)pszBreakOnStructMarshalSetup);
+
+ m_fAssertOnBadImageFormat = (GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_AssertOnBadImageFormat, m_fAssertOnBadImageFormat) != 0);
+ m_fAssertOnFailFast = (GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_AssertOnFailFast, m_fAssertOnFailFast) != 0);
+
+ fSuppressChecks = (GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_SuppressChecks, fSuppressChecks) != 0);
+ CHECK::SetAssertEnforcement(!fSuppressChecks);
+
+ fConditionalContracts = (GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_ConditionalContracts, fConditionalContracts) != 0);
+
+#ifdef ENABLE_CONTRACTS_IMPL
+ Contract::SetUnconditionalContractEnforcement(!fConditionalContracts);
+#endif
+
+ fEnableFullDebug = (GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_EnableFullDebug, fEnableFullDebug) != 0);
+
+ fVerifierOff = (GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_VerifierOff, fVerifierOff) != 0);
+
+ fJitVerificationDisable = (GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_JitVerificationDisable, fJitVerificationDisable) != 0);
+
+ fLogTransparencyErrors = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_Security_LogTransparencyErrors) != 0;
+
+ // TlbImp stuff
+ fTlbImpSkipLoading = (GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_TlbImpSkipLoading, fTlbImpSkipLoading) != 0);
+
+ iExposeExceptionsInCOM = GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_ExposeExceptionsInCOM, iExposeExceptionsInCOM);
+#endif
+
+#ifdef FEATURE_COMINTEROP
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_LogCCWRefCountChange, (LPWSTR*)&pszLogCCWRefCountChange));
+ pszLogCCWRefCountChange = NarrowWideChar((LPWSTR)pszLogCCWRefCountChange);
+ if (pszLogCCWRefCountChange != NULL)
+ bLogCCWRefCountChange = true;
+
+ fEnableRCWCleanupOnSTAShutdown = (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_EnableRCWCleanupOnSTAShutdown) != 0);
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_CORECLR
+ //Eager verification of all assemblies.
+ fVerifyAllOnLoad = (GetConfigDWORD_DontUse_(CLRConfig::EXTERNAL_VerifyAllOnLoad, fVerifyAllOnLoad) != 0);
+#endif //FEATURE_CORECLR
+
+#ifdef _DEBUG
+ fExpandAllOnLoad = (GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_ExpandAllOnLoad, fExpandAllOnLoad) != 0);
+#endif //_DEBUG
+
+#ifdef FEATURE_FUSION
+ if(g_pConfig) {
+ LPCWSTR result = NULL;
+ if(SUCCEEDED(g_pConfig->GetConfiguration_DontUse_(CLRConfig::EXTERNAL_developerInstallation, CONFIG_SYSTEM, &result)) && result)
+ {
+ // <TODO> CTS, add addtional checks to ensure this is an SDK installation </TODO>
+ if(SString::_wcsicmp(result, W("true")) == 0)
+ m_fDeveloperInstallation = true;
+ }
+ }
+#endif
+
+#ifdef AD_NO_UNLOAD
+ fAppDomainUnload = (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_AppDomainNoUnload) == 0);
+#endif
+ dwADURetryCount=GetConfigDWORD_DontUse_(CLRConfig::EXTERNAL_ADURetryCount, dwADURetryCount);
+ if (dwADURetryCount==(DWORD)-1)
+ {
+ _ASSERTE(!"Reserved value");
+ dwADURetryCount=(DWORD)-2;
+ }
+
+#ifdef ENABLE_STARTUP_DELAY
+ {
+ //I want this string in decimal
+ WCHAR * end;
+ WCHAR * str;
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_StartupDelayMS, &str));
+ if( str )
+ {
+ errno = 0;
+ iStartupDelayMS = wcstoul(str, &end, 10);
+ if (errno == ERANGE || end == str)
+ iStartupDelayMS = 0;
+ }
+ }
+#endif
+
+#ifdef _DEBUG
+
+#ifdef TEST_DATA_CONSISTENCY
+ fTestDataConsistency = (CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_TestDataConsistency) !=0);
+#endif
+
+ fDoAllowUntrustedCallerChecks =
+ (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SupressAllowUntrustedCallerChecks) != 1);
+
+
+ m_SuspendThreadDeadlockTimeoutMs = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SuspendThreadDeadlockTimeoutMs);
+ m_SuspendDeadlockTimeout = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SuspendDeadlockTimeout);
+#endif // _DEBUG
+ fInited = true;
+
+#ifdef _DEBUG
+ m_pTraceIUnknown = (IUnknown*)(DWORD_PTR)(GetConfigDWORD_DontUse_(CLRConfig::EXTERNAL_TraceIUnknown, (DWORD)(DWORD_PTR)(m_pTraceIUnknown))); // <TODO> WIN64 - conversion from DWORD to IUnknown* of greater size</TODO>
+ m_TraceWrapper = GetConfigDWORD_DontUse_(CLRConfig::EXTERNAL_TraceWrap, m_TraceWrapper);
+
+ // can't have both
+ if (m_pTraceIUnknown != 0)
+ {
+ m_TraceWrapper = 0;
+ }
+ else
+ if (m_TraceWrapper != 0)
+ {
+ m_pTraceIUnknown = (IUnknown*)-1;
+ }
+#endif
+
+#ifdef _DEBUG
+
+ LPWSTR wszPerfTypes = NULL;
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_PerfTypesToLog, &wszPerfTypes));
+ IfFailRet(ParseTypeList(wszPerfTypes, &pPerfTypesToLog));
+
+ iPerfNumAllocsThreshold = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_PerfNumAllocsThreshold);
+ iPerfAllocsSizeThreshold = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_PerfAllocsSizeThreshold);
+
+ fGenerateStubForHost = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_GenerateStubForHost);
+
+ fShouldInjectFault = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_InjectFault);
+
+ testThreadAbort = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_HostTestThreadAbort);
+ testADUnload = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_HostTestADUnload);
+
+#endif //_DEBUG
+
+#ifdef FEATURE_COMINTEROP
+ m_fComInsteadOfManagedRemoting = (CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ComInsteadOfManagedRemoting) != 0);
+#endif // FEATURE_COMINTEROP
+ m_fInteropValidatePinnedObjects = (CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_InteropValidatePinnedObjects) != 0);
+ m_fInteropLogArguments = (CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_InteropLogArguments) != 0);
+
+#ifdef FEATURE_PREJIT
+#ifndef FEATURE_CORECLR
+ DWORD iNgenHardBindOverride = GetConfigDWORD_DontUse_(CLRConfig::EXTERNAL_HardPrejitEnabled, iNgenHardBind);
+ _ASSERTE(iNgenHardBindOverride < NGEN_HARD_BIND_COUNT);
+ if (iNgenHardBindOverride < NGEN_HARD_BIND_COUNT)
+ iNgenHardBind = NgenHardBindType(iNgenHardBindOverride);
+#endif
+#ifdef _DEBUG
+ dwNgenForceFailureMask = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_NgenForceFailureMask);
+ dwNgenForceFailureCount = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_NgenForceFailureCount);
+ dwNgenForceFailureKind = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_NgenForceFailureKind);
+#endif
+#endif // FEATURE_PREJIT
+
+ DWORD iGCPollTypeOverride = GetConfigDWORD_DontUse_(CLRConfig::EXTERNAL_GCPollType, iGCPollType);
+
+#ifndef FEATURE_HIJACK
+ // Platforms that do not support hijacking MUST support GC polling.
+ // Reject attempts by the user to configure the GC polling type as
+ // GCPOLL_TYPE_HIJACK.
+ _ASSERTE(EEConfig::GCPOLL_TYPE_HIJACK != iGCPollTypeOverride);
+ if (EEConfig::GCPOLL_TYPE_HIJACK == iGCPollTypeOverride)
+ iGCPollTypeOverride = EEConfig::GCPOLL_TYPE_DEFAULT;
+#endif
+
+ _ASSERTE(iGCPollTypeOverride < GCPOLL_TYPE_COUNT);
+ if (iGCPollTypeOverride < GCPOLL_TYPE_COUNT)
+ iGCPollType = GCPollType(iGCPollTypeOverride);
+
+#if defined(_DEBUG) && defined(WIN64EXCEPTIONS)
+ fSuppressLockViolationsOnReentryFromOS = (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SuppressLockViolationsOnReentryFromOS) != 0);
+#endif
+
+#if defined(_DEBUG) && defined(STUBLINKER_GENERATES_UNWIND_INFO)
+ fStubLinkerUnwindInfoVerificationOn = (GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_StubLinkerUnwindInfoVerificationOn, fStubLinkerUnwindInfoVerificationOn) != 0);
+#endif
+
+ if (CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_UseMethodDataCache) != 0) {
+ MethodTable::AllowMethodDataCaching();
+ }
+
+ if (CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_UseParentMethodData) != 0) {
+ MethodTable::AllowParentMethodDataCopy();
+ }
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ // Get the symbol reading policy setting which is maintained by the hosting API (since it can be overridden there)
+ const DWORD notSetToken = 0xFFFFFFFF;
+ DWORD iSymbolReadingConfig = GetConfigDWORDFavoringConfigFile_DontUse_(CLRConfig::EXTERNAL_SymbolReadingPolicy, notSetToken );
+ if( iSymbolReadingConfig != notSetToken &&
+ iSymbolReadingConfig <= eSymbolReadingFullTrustOnly )
+ {
+ ESymbolReadingPolicy policy = ESymbolReadingPolicy(iSymbolReadingConfig);
+ CCLRDebugManager::SetSymbolReadingPolicy( policy, eSymbolReadingSetByConfig );
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+#if defined(_DEBUG) && defined(_TARGET_AMD64_)
+ m_cGenerateLongJumpDispatchStubRatio = GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_GenerateLongJumpDispatchStubRatio,
+ static_cast<DWORD>(m_cGenerateLongJumpDispatchStubRatio));
+#endif
+
+#if defined(_DEBUG)
+ bDiagnosticSuspend = (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DiagnosticSuspend) != 0);
+#endif
+
+ dwSleepOnExit = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_SleepOnExit);
+
+#ifdef FEATURE_APPX
+ dwWindows8ProfileAPICheckFlag = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_Windows8ProfileAPICheckFlag);
+#endif
+
+ return hr;
+}
+
+//
+// #GetConfigValueCallback
+// Provides a way for code:CLRConfig to access configuration file values.
+//
+// static
+HRESULT EEConfig::GetConfigValueCallback(__in_z LPCWSTR pKey, __deref_out_opt LPCWSTR* pValue, BOOL systemOnly, BOOL applicationFirst)
+{
+ CONTRACT (HRESULT) {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pValue));
+ PRECONDITION(CheckPointer(pKey));
+ } CONTRACT_END;
+
+ // Ensure that both options aren't set.
+ _ASSERTE(!(systemOnly && applicationFirst));
+
+ if(g_pConfig != NULL)
+ {
+ ConfigSearch direction = CONFIG_SYSTEM;
+ if(systemOnly)
+ {
+ direction = CONFIG_SYSTEMONLY;
+ }
+ else if(applicationFirst)
+ {
+ direction = CONFIG_APPLICATION;
+ }
+
+ RETURN g_pConfig->GetConfiguration_DontUse_(pKey, direction, pValue);
+ }
+ else
+ {
+ RETURN E_FAIL;
+ }
+}
+
+HRESULT EEConfig::GetConfiguration_DontUse_(__in_z LPCWSTR pKey, ConfigSearch direction, __deref_out_opt LPCWSTR* pValue)
+{
+ CONTRACT (HRESULT) {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT; // TODO: Verify this does not do anything that would make it so_intolerant
+ PRECONDITION(CheckPointer(pValue));
+ PRECONDITION(CheckPointer(pKey));
+ } CONTRACT_END;
+
+ Thread *pThread = GetThread();
+ ConfigStringKeyValuePair * pair = NULL;
+
+ *pValue = NULL;
+ ConfigList::ConfigIter iter(&m_Configuration);
+
+ switch(direction) {
+ case CONFIG_SYSTEMONLY:
+ {
+ // for things that only admin should be able to set
+ ConfigStringHashtable* table = iter.Next();
+ if(table != NULL)
+ {
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, RETURN E_FAIL;)
+ pair = table->Lookup(pKey);
+ END_SO_INTOLERANT_CODE
+ if(pair != NULL)
+ {
+ *pValue = pair->value;
+ RETURN S_OK;
+ }
+ }
+ RETURN E_FAIL;
+ }
+ case CONFIG_SYSTEM:
+ {
+ for(ConfigStringHashtable* table = iter.Next();
+ table != NULL;
+ table = iter.Next())
+ {
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, RETURN E_FAIL;)
+ pair = table->Lookup(pKey);
+ END_SO_INTOLERANT_CODE
+ if(pair != NULL)
+ {
+ *pValue = pair->value;
+ RETURN S_OK;
+ }
+ }
+ RETURN E_FAIL;
+ }
+ case CONFIG_APPLICATION: {
+ for(ConfigStringHashtable* table = iter.Previous();
+ table != NULL;
+ table = iter.Previous())
+ {
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, RETURN E_FAIL;)
+ pair = table->Lookup(pKey);
+ END_SO_INTOLERANT_CODE
+ if(pair != NULL)
+ {
+ *pValue = pair->value;
+ RETURN S_OK;
+ }
+ }
+ RETURN E_FAIL;
+ }
+ default:
+ RETURN E_FAIL;
+ }
+}
+
+LPCWSTR EEConfig::GetProcessBindingFile()
+{
+ LIMITED_METHOD_CONTRACT;
+ return g_pszHostConfigFile;
+}
+
+SIZE_T EEConfig::GetSizeOfProcessBindingFile()
+{
+ LIMITED_METHOD_CONTRACT;
+ return g_dwHostConfigFile;
+}
+
+#if !defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE) // unimpactful install --> no config files
+
+/**************************************************************/
+static void MessageBoxParseError(HRESULT hr, __in_z LPCWSTR wszFile);
+
+#define IfFailParseError(FILE, ISAPPCONFIG, ...) \
+ do \
+ { \
+ /* On error, always show an error dialog and return an error result when process is immersive; */ \
+ /* otherwise show dialog (conditionally for App config) and swallow error. */ \
+ if (FAILED(hr = (__VA_ARGS__)) && (!(ISAPPCONFIG) || AppX::IsAppXProcess() || GetConfigDWORDInternal_DontUse_(CLRConfig::EXTERNAL_NotifyBadAppCfg,false))) \
+ { \
+ MessageBoxParseError(hr, FILE); \
+ if (AppX::IsAppXProcess()) \
+ { /* Fail on bad config in AppX process. */ \
+ return hr; \
+ } \
+ else \
+ { \
+ hr = S_FALSE; \
+ } \
+ } \
+ } while (false)
+
+/**************************************************************/
+HRESULT EEConfig::SetupConfiguration()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ WCHAR version[_MAX_PATH];
+ DWORD dwVersion = _MAX_PATH;
+
+ HRESULT hr = S_OK;
+ // Get the version location
+ IfFailRet(GetCORVersionInternal(version, _MAX_PATH, & dwVersion));
+
+ // See if the environment has specified an XML file
+ NewArrayHolder<WCHAR> file(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CONFIG));
+ if(file != NULL)
+ {
+ IfFailParseError(file, false, AppendConfigurationFile(file, version));
+ }
+
+ // We need to read configuration information from 3 sources... the app config file, the
+ // host supplied config file, and the machine.config file. The order in which we
+ // read them are very import. If the different config sources specify the same config
+ // setting, we will use the setting of the first one read.
+ //
+ // In the pecking order, machine.config should always have the final say. The host supplied config
+ // file should follow, and lastly, the app config file.
+ //
+ // Note: the order we read them is not the order they are published. Need to read the AppConfig
+ // first so that we can decide if we should import machine.config (yes in Classic, typically no
+ // in AppX). We still publish in the order required as described above.
+
+ enum
+ {
+ MachineConfig = 0,
+ HostConfig = 1,
+ AppConfig = 2,
+ NumConfig = 3,
+ };
+
+ ConfigSource * rgpSources[NumConfig] = { nullptr };
+
+ // Create ConfigSource objects for all config files.
+ for (size_t i = 0; i < NumConfig; ++i)
+ {
+ rgpSources[i] = new (nothrow) ConfigSource();
+ if (rgpSources[i] == NULL)
+ {
+ while (i != 0)
+ {
+ --i;
+ delete rgpSources[i];
+ rgpSources[i] = nullptr;
+ }
+ return E_OUTOFMEMORY;
+ }
+ }
+
+ // Publish ConfigSource objects in required order. It's ok that the file contents are imported below,
+ // since we're in EEStartup and this data cannot be accessed by any other threads yet.
+ for (size_t i = 0; i < NumConfig; ++i)
+ {
+ m_Configuration.Append(rgpSources[i]);
+ }
+
+ // ----------------------------------------------------
+ // Import the app.config file, or in the case of an
+ // AppX process check to make sure no app.config file
+ // exists unless launched with AO_DESIGNMODE.
+ // ----------------------------------------------------
+ {
+ WCHAR wzProcExe[_MAX_PATH];
+ size_t cchProcExe = COUNTOF(wzProcExe);
+
+ // Get name of file used to create process
+ if (g_pCachedModuleFileName)
+ {
+ IfFailRet(StringCchCopy(wzProcExe, COUNTOF(wzProcExe), g_pCachedModuleFileName));
+ IfFailRet(StringCchLength(wzProcExe, COUNTOF(wzProcExe), &cchProcExe));
+ }
+ else
+ {
+ cchProcExe = WszGetModuleFileName(NULL, wzProcExe, COUNTOF(wzProcExe));
+
+ if (cchProcExe == 0)
+ {
+ return HRESULT_FROM_GetLastError();
+ }
+ }
+
+ if (cchProcExe != 0)
+ {
+ IfFailRet(StringCchCat(wzProcExe, COUNTOF(wzProcExe), CONFIGURATION_EXTENSION));
+
+ if (AppX::IsAppXProcess() && !AppX::IsAppXDesignMode())
+ {
+ if (clr::fs::Path::Exists(wzProcExe))
+ {
+ return CLR_E_APP_CONFIG_NOT_ALLOWED_IN_APPX_PROCESS;
+ }
+ }
+
+ IfFailParseError(wzProcExe, true, AppendConfigurationFile(wzProcExe, version));
+
+ // We really should return a failure hresult if the app config file is bad, but that
+ // would be a breaking change. Not sure if it's worth it yet.
+ hr = S_OK;
+ }
+ }
+
+ // ----------------------------------------------------
+ // Import machine.config, if needed.
+ // ----------------------------------------------------
+ if (!AppX::IsAppXProcess() || AppX::IsAppXDesignMode())
+ {
+ WCHAR wzSystemDir[_MAX_PATH];
+ DWORD cchSystemDir = COUNTOF(wzSystemDir);
+ IfFailRet(GetInternalSystemDirectory(wzSystemDir, &cchSystemDir));
+
+ // cchSystemDir already includes the NULL
+ if(cchSystemDir + StrLen(MACHINE_CONFIGURATION_FILE) <= _MAX_PATH)
+ {
+ IfFailRet(StringCchCat(wzSystemDir, COUNTOF(wzSystemDir), MACHINE_CONFIGURATION_FILE));
+
+ // CLR_STARTUP_OPT:
+ // The machine.config file can be very large. We cannot afford
+ // to parse all of it at CLR startup time.
+ //
+ // Accordingly, we instruct the XML parser to stop parsing the
+ // machine.config file when it sees the end of the
+ // <runtime>...</runtime> section that holds our data (if any).
+ //
+ // By construction, this section is now placed near the top
+ // of machine.config.
+ //
+ IfFailParseError(wzSystemDir, false, ImportConfigurationFile(
+ rgpSources[MachineConfig]->Table(), wzSystemDir, version, stopAfterRuntimeSection));
+
+ if (hr == S_FALSE) // means that we couldn't find machine.config
+ hr = S_OK;
+ }
+ }
+
+ // ----------------------------------------------------
+ // Import the host supplied config file, if needed.
+ // ----------------------------------------------------
+ // Cannot host an AppX managed process, so no need to check devModeEnabled.
+ if (!AppX::IsAppXProcess())
+ {
+ if (GetProcessBindingFile() != NULL && GetSizeOfProcessBindingFile() > 0)
+ {
+ IfFailRet(ImportConfigurationFile(
+ rgpSources[HostConfig]->Table(), GetProcessBindingFile(), version));
+ }
+ }
+
+ return hr;
+}
+
+//
+// There was an error 'hr' parsing the file 'wszFile'.
+// Pop up a MessageBox reporting the error, unless the config setting
+// 'NoGuiFromShim' is in effect.
+//
+static void MessageBoxParseError(HRESULT hr, __in_z LPCWSTR wszFile)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(FAILED(hr));
+ } CONTRACTL_END;
+
+ if (!REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::UNSUPPORTED_NoGuiFromShim, FALSE))
+ {
+ EEMessageBoxCatastrophic(IDS_EE_CONFIGPARSER_ERROR, IDS_EE_CONFIGPARSER_ERROR_CAPTION, wszFile, hr);
+ }
+}
+
+/**************************************************************/
+
+STDAPI GetXMLObjectEx(IXMLParser **ppv);
+
+HRESULT EEConfig::ImportConfigurationFile(
+ ConfigStringHashtable* pTable,
+ LPCWSTR pszFileName,
+ LPCWSTR version,
+ ParseCtl parseCtl)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pTable));
+ PRECONDITION(CheckPointer(pszFileName));
+ PRECONDITION(CheckPointer(version));
+ INJECT_FAULT(return E_OUTOFMEMORY);
+ } CONTRACTL_END;
+
+ NonVMComHolder<IXMLParser> pIXMLParser(NULL);
+ NonVMComHolder<IStream> pFile(NULL);
+ NonVMComHolder<EEConfigFactory> factory(NULL);
+
+ HRESULT hr = CreateConfigStreamHelper(pszFileName, &pFile);
+ if(FAILED(hr)) goto Exit;
+
+ hr = GetXMLObjectEx(&pIXMLParser);
+ if(FAILED(hr)) goto Exit;
+
+ factory = new (nothrow) EEConfigFactory(pTable, version, parseCtl);
+
+ if ( ! factory) {
+ hr = E_OUTOFMEMORY;
+ goto Exit;
+ }
+ factory->AddRef(); // RefCount = 1
+
+
+ hr = pIXMLParser->SetInput(pFile); // filestream's RefCount=2
+ if ( ! SUCCEEDED(hr))
+ goto Exit;
+
+ hr = pIXMLParser->SetFactory(factory); // factory's RefCount=2
+ if ( ! SUCCEEDED(hr))
+ goto Exit;
+
+ {
+ CONTRACT_VIOLATION(ThrowsViolation); // @todo: Run() throws!
+ hr = pIXMLParser->Run(-1);
+ }
+
+Exit:
+ if (hr == (HRESULT) XML_E_MISSINGROOT)
+ hr = S_OK;
+ else if (Assembly::FileNotFound(hr))
+ hr = S_FALSE;
+
+ return hr;
+}
+
+HRESULT EEConfig::AppendConfigurationFile(
+ LPCWSTR pszFileName,
+ LPCWSTR version,
+ ParseCtl parseCtl)
+{
+ LIMITED_METHOD_CONTRACT;
+ HRESULT hr = S_OK;
+
+ ConfigStringHashtable* pTable = m_Configuration.Append();
+ IfNullRet(pTable);
+
+ return ImportConfigurationFile(pTable, pszFileName, version, parseCtl);
+}
+
+
+#endif // FEATURE_CORECLR && !CROSSGEN_COMPILE
+
+bool EEConfig::RequireZap(LPCUTF8 assemblyName) const
+{
+ LIMITED_METHOD_CONTRACT;
+ if (iRequireZaps == REQUIRE_ZAPS_NONE)
+ return false;
+
+ if (pRequireZapsExcludeList != NULL && pRequireZapsExcludeList->IsInList(assemblyName))
+ return false;
+
+ if (pRequireZapsList == NULL || pRequireZapsList->IsInList(assemblyName))
+ return true;
+
+ return false;
+}
+
+#ifdef _DEBUG
+bool EEConfig::ForbidZap(LPCUTF8 assemblyName) const
+{
+ LIMITED_METHOD_CONTRACT;
+ if (iForbidZaps == 0)
+ return false;
+
+ if (pForbidZapsExcludeList != NULL && pForbidZapsExcludeList->IsInList(assemblyName))
+ return false;
+
+ if (pForbidZapsList == NULL || pForbidZapsList->IsInList(assemblyName))
+ return true;
+
+ return false;
+}
+#endif
+
+#ifdef _TARGET_AMD64_
+bool EEConfig::DisableNativeImageLoad(LPCUTF8 assemblyName) const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (pDisableNativeImageLoadList != NULL && pDisableNativeImageLoadList->IsInList(assemblyName))
+ return true;
+
+ return false;
+}
+#endif
+
+/**************************************************************/
+#ifdef _DEBUG
+/**************************************************************/
+
+// Ownership of the string buffer passes to ParseMethList
+
+/* static */
+HRESULT EEConfig::ParseMethList(__in_z LPWSTR str, MethodNamesList** out) {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(return E_OUTOFMEMORY);
+ PRECONDITION(CheckPointer(str, NULL_OK));
+ PRECONDITION(CheckPointer(out));
+ } CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ *out = NULL;
+
+ // we are now done with the string passed in
+ if (str == NULL)
+ {
+ return S_OK;
+ }
+
+ EX_TRY
+ {
+ *out = new MethodNamesList(str);
+ } EX_CATCH_HRESULT(hr);
+
+ delete [] str;
+
+ return hr;
+}
+
+/**************************************************************/
+/* static */
+void EEConfig::DestroyMethList(MethodNamesList* list) {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(list));
+ } CONTRACTL_END;
+
+ if (list == 0)
+ return;
+ delete list;
+}
+
+/**************************************************************/
+/* static */
+bool EEConfig::IsInMethList(MethodNamesList* list, MethodDesc* pMD)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(list, NULL_OK));
+ PRECONDITION(CheckPointer(pMD));
+ } CONTRACTL_END;
+
+ if (list == 0)
+ return(false);
+ else
+ {
+ DefineFullyQualifiedNameForClass();
+
+ LPCUTF8 name = pMD->GetName();
+ if (name == NULL)
+ {
+ return false;
+ }
+ LPCUTF8 className = GetFullyQualifiedNameForClass(pMD->GetMethodTable());
+ if (className == NULL)
+ {
+ return false;
+ }
+ PCCOR_SIGNATURE sig = pMD->GetSig();
+
+ return list->IsInList(name, className, sig);
+ }
+}
+
+// Ownership of the string buffer passes to ParseTypeList
+/* static */
+HRESULT EEConfig::ParseTypeList(__in_z LPWSTR str, TypeNamesList** out)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(out));
+ PRECONDITION(CheckPointer(str, NULL_OK));
+ INJECT_FAULT(return E_OUTOFMEMORY);
+ } CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ *out = NULL;
+
+ if (str == NULL)
+ return S_OK;
+
+ NewHolder<TypeNamesList> newTypeNameList(new (nothrow) TypeNamesList());
+ if (newTypeNameList != NULL)
+ IfFailRet(newTypeNameList->Init(str));
+
+ delete [] str;
+
+ newTypeNameList.SuppressRelease();
+ *out = newTypeNameList;
+
+ return (*out != NULL)?S_OK:E_OUTOFMEMORY;
+}
+
+void EEConfig::DestroyTypeList(TypeNamesList* list) {
+
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(list));
+ } CONTRACTL_END;
+
+ if (list == 0)
+ return;
+ delete list;
+}
+
+TypeNamesList::TypeNamesList()
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+bool EEConfig::RegexOrExactMatch(LPCUTF8 regex, LPCUTF8 input)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (regex == NULL || input == NULL)
+ return false;
+
+ if (*regex == '/')
+ {
+ // Debug only, so we can live with it.
+ CONTRACT_VIOLATION(ThrowsViolation);
+
+ regex::STRRegEx::GroupingContainer groups;
+ if (regex::STRRegEx::Match("^/(.*)/(i?)$", regex, groups))
+ {
+ regex::STRRegEx::MatchFlags flags = regex::STRRegEx::DefaultMatchFlags;
+ if (groups[2].Length() != 0)
+ flags = (regex::STRRegEx::MatchFlags)(flags | regex::STRRegEx::MF_CASE_INSENSITIVE);
+
+ return regex::STRRegEx::Matches(groups[1].Begin(), groups[1].End(),
+ input, input + strlen(input), flags);
+ }
+ }
+ return strcmp(regex, input) == 0;
+}
+
+HRESULT TypeNamesList::Init(__in_z LPCWSTR str)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(str));
+ INJECT_FAULT(return E_OUTOFMEMORY);
+ } CONTRACTL_END;
+
+ pNames = NULL;
+
+ LPCWSTR currentType = str;
+ int length = 0;
+ bool typeFound = false;
+
+ for (; *str != '\0'; str++)
+ {
+ switch(*str)
+ {
+ case ' ':
+ {
+ if (!typeFound)
+ break;
+
+ NewHolder<TypeName> tn(new (nothrow) TypeName());
+ if (tn == NULL)
+ return E_OUTOFMEMORY;
+
+ tn->typeName = new (nothrow) char[length + 1];
+ if (tn->typeName == NULL)
+ return E_OUTOFMEMORY;
+
+ tn.SuppressRelease();
+ MAKE_UTF8PTR_FROMWIDE_NOTHROW(temp, currentType);
+ if (temp == NULL)
+ return E_OUTOFMEMORY;
+
+ memcpy(tn->typeName, temp, length * sizeof(char));
+ tn->typeName[length] = '\0';
+
+ tn->next = pNames;
+ pNames = tn;
+
+ typeFound = false;
+ length = 0;
+
+ break;
+ }
+
+ default:
+ if (!typeFound)
+ currentType = str;
+
+ typeFound = true;
+ length++;
+ break;
+ }
+ }
+
+ if (typeFound)
+ {
+ NewHolder<TypeName> tn(new (nothrow) TypeName());
+ if (tn == NULL)
+ return E_OUTOFMEMORY;
+
+ tn->typeName = new (nothrow) char[length + 1];
+
+ if (tn->typeName == NULL)
+ return E_OUTOFMEMORY;
+
+ tn.SuppressRelease();
+ MAKE_UTF8PTR_FROMWIDE_NOTHROW(temp, currentType);
+ if (temp == NULL)
+ return E_OUTOFMEMORY;
+
+ memcpy(tn->typeName, temp, length * sizeof(char));
+ tn->typeName[length] = '\0';
+
+ tn->next = pNames;
+ pNames = tn;
+ }
+ return S_OK;
+}
+
+TypeNamesList::~TypeNamesList()
+{
+ CONTRACTL {
+ NOTHROW;
+ FORBID_FAULT;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+
+ while (pNames)
+ {
+ delete [] pNames->typeName;
+
+ TypeName *tmp = pNames;
+ pNames = pNames->next;
+
+ delete tmp;
+ }
+}
+
+bool TypeNamesList::IsInList(LPCUTF8 typeName)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CANNOT_TAKE_LOCK;
+ PRECONDITION(CheckPointer(typeName));
+ } CONTRACTL_END;
+
+ TypeName *tnTemp = pNames;
+ while (tnTemp)
+ {
+ if (strstr(typeName, tnTemp->typeName) != typeName)
+ tnTemp = tnTemp->next;
+ else
+ return true;
+ }
+
+ return false;
+}
+#endif // _DEBUG
+
+#ifdef FEATURE_COMINTEROP
+void EEConfig::SetLogCCWRefCountChangeEnabled(bool newVal)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // logically we want pszLogCCWRefCountChange != NULL to force bLogCCWRefCountChange to be true
+ bLogCCWRefCountChange = (newVal || pszLogCCWRefCountChange != NULL);
+}
+
+bool EEConfig::ShouldLogCCWRefCountChange(LPCUTF8 pszClassName, LPCUTF8 pszNamespace) const
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END
+
+ if (pszLogCCWRefCountChange == NULL)
+ return false;
+
+ // check simple class name
+ if (strcmp(pszLogCCWRefCountChange, "*") == 0 ||
+ strcmp(pszLogCCWRefCountChange, pszClassName) == 0)
+ return true;
+
+ // check namespace DOT class name
+ LPCUTF8 dot = strrchr(pszLogCCWRefCountChange, '.');
+ if (dot != NULL)
+ {
+ if (strncmp(pszLogCCWRefCountChange, pszNamespace, dot - pszLogCCWRefCountChange) == 0 &&
+ strcmp(dot + 1, pszClassName) == 0)
+ return true;
+ }
+ return false;
+}
+#endif // FEATURE_COMINTEROP
diff --git a/src/vm/eeconfig.h b/src/vm/eeconfig.h
new file mode 100644
index 0000000000..fd19e7dbc3
--- /dev/null
+++ b/src/vm/eeconfig.h
@@ -0,0 +1,1360 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// EEConfig.H
+//
+
+//
+// Fetched configuration data from the registry (should we Jit, run GC checks ...)
+//
+//
+
+
+
+#ifndef EECONFIG_H
+#define EECONFIG_H
+
+class MethodDesc;
+
+#include "shash.h"
+#include "corhost.h"
+
+#ifdef _DEBUG
+class TypeNamesList
+{
+ class TypeName
+ {
+ LPUTF8 typeName;
+ TypeName *next; // Next name
+
+ friend class TypeNamesList;
+ };
+
+ TypeName *pNames; // List of names
+
+public:
+ TypeNamesList();
+ ~TypeNamesList();
+
+ HRESULT Init(__in_z LPCWSTR str);
+ bool IsInList(LPCUTF8 typeName);
+};
+#endif
+
+typedef struct _ConfigStringKeyValuePair
+{
+ WCHAR * key;
+ WCHAR * value;
+
+ _ConfigStringKeyValuePair()
+ {
+ key = NULL;
+ value = NULL;
+ }
+
+ WCHAR * GetKey()
+ {
+ return key;
+ }
+} ConfigStringKeyValuePair;
+
+typedef WStringSHash<ConfigStringKeyValuePair> ConfigStringHashtable;
+
+class ConfigList;
+
+//
+// Holds a pointer to a hashtable that is populated with data from config files.
+// Also acts as a node for a circular doubly-linked list.
+//
+class ConfigSource
+{
+ friend class ConfigList;
+public:
+ ConfigSource();
+ ~ConfigSource();
+
+ ConfigStringHashtable* Table();
+
+ //
+ // Connect this node into the list that prev is in.
+ //
+ void Add(ConfigSource* prev);
+
+ ConfigSource* Next()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pNext;
+ }
+
+ ConfigSource* Previous()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pPrev;
+ }
+
+
+private:
+ ConfigStringHashtable m_Table;
+ ConfigSource *m_pNext;
+ ConfigSource *m_pPrev;
+};
+
+//
+// Wrapper around the ConfigSource circular doubly-linked list.
+//
+class ConfigList
+{
+public:
+ //
+ // Iterator for traversing through a ConfigList.
+ //
+ class ConfigIter
+ {
+ public:
+ ConfigIter(ConfigList* pList)
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ // MODE_ANY;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ pEnd = &(pList->m_pElement);
+ pCurrent = pEnd;
+ }
+
+ //
+ // TODO: Check if iterating through the list once skips an element.
+ // Returns the next node. If the next node is the head, returns null.
+ // Note that iteration can be resumed by calling next again.
+ //
+ ConfigStringHashtable* Next()
+ {
+ CONTRACT (ConfigStringHashtable*) {
+ NOTHROW;
+ GC_NOTRIGGER;
+ // MODE_ANY;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ } CONTRACT_END;
+
+ pCurrent = pCurrent->Next();;
+ if(pCurrent == pEnd)
+ RETURN NULL;
+ else
+ RETURN pCurrent->Table();
+ }
+
+ ConfigStringHashtable* Previous()
+ {
+ CONTRACT (ConfigStringHashtable*) {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ // MODE_ANY;
+ SO_TOLERANT;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ } CONTRACT_END;
+
+ pCurrent = pCurrent->Previous();
+ if(pCurrent == pEnd)
+ RETURN NULL;
+ else
+ RETURN pCurrent->Table();
+ }
+
+ private:
+ ConfigSource* pEnd;
+ ConfigSource* pCurrent;
+ };
+
+ ConfigStringHashtable* Add()
+ {
+ CONTRACT (ConfigStringHashtable*) {
+ NOTHROW;
+ GC_NOTRIGGER;
+ // MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ } CONTRACT_END;
+
+ ConfigSource* pEntry = new (nothrow) ConfigSource();
+
+ if (pEntry == NULL)
+ RETURN NULL;
+
+ pEntry->Add(&m_pElement);
+ RETURN pEntry->Table();
+ }
+
+ ConfigStringHashtable* Append()
+ {
+ CONTRACT (ConfigStringHashtable*) {
+ NOTHROW;
+ GC_NOTRIGGER;
+ // MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ } CONTRACT_END;
+
+ ConfigSource* pEntry = new (nothrow) ConfigSource();
+ if (pEntry == NULL)
+ RETURN NULL;
+
+ pEntry->Add(m_pElement.Previous());
+ RETURN pEntry->Table();
+ }
+
+ void Append(ConfigSource * pEntry)
+ {
+ LIMITED_METHOD_CONTRACT;
+ PRECONDITION(CheckPointer(pEntry));
+
+ pEntry->Add(m_pElement.Previous());
+ }
+
+ ~ConfigList()
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ // MODE_ANY;
+ FORBID_FAULT;
+ } CONTRACTL_END;
+
+ ConfigSource* pNext = m_pElement.Next();
+ while(pNext != &m_pElement) {
+ ConfigSource *last = pNext;
+ pNext = pNext->m_pNext;
+ delete last;
+ }
+ }
+
+friend class ConfigIter;
+
+private:
+ ConfigSource m_pElement;
+};
+
+enum { OPT_BLENDED,
+ OPT_SIZE,
+ OPT_SPEED,
+ OPT_RANDOM,
+ OPT_DEFAULT = OPT_BLENDED };
+
+/* Control of impersonation flow:
+ FASTFLOW means that impersonation is flowed only if it has been achieved through managed means. This is the default and avoids a kernel call.
+ NOFLOW is the Everett default where we don't flow the impersonation at all
+ ALWAYSFLOW is the (potentially) slow mode where we will always flow the impersonation, regardless of how it was achieved (managed or p/invoke). Includes
+ a kernel call.
+ Keep in sync with values in SecurityContext.cs
+ */
+enum {
+ IMP_FASTFLOW = 0,
+ IMP_NOFLOW = 1,
+ IMP_ALWAYSFLOW = 2,
+ IMP_DEFAULT = IMP_FASTFLOW };
+
+enum ParseCtl {
+ parseAll, // parse entire config file
+ stopAfterRuntimeSection // stop after <runtime>...</runtime> section
+};
+
+extern CorHostProtectionManager s_CorHostProtectionManager;
+
+class EEConfig
+{
+public:
+ typedef enum {
+ CONFIG_SYSTEM,
+ CONFIG_APPLICATION,
+ CONFIG_SYSTEMONLY
+ } ConfigSearch;
+
+ static HRESULT Setup();
+
+ void *operator new(size_t size);
+
+ HRESULT Init();
+ HRESULT Cleanup();
+
+ // Spinning heuristics
+
+ DWORD SpinInitialDuration(void) const {LIMITED_METHOD_CONTRACT; return dwSpinInitialDuration; }
+ DWORD SpinBackoffFactor(void) const {LIMITED_METHOD_CONTRACT; return dwSpinBackoffFactor; }
+ DWORD SpinLimitProcCap(void) const {LIMITED_METHOD_CONTRACT; return dwSpinLimitProcCap; }
+ DWORD SpinLimitProcFactor(void) const {LIMITED_METHOD_CONTRACT; return dwSpinLimitProcFactor; }
+ DWORD SpinLimitConstant(void) const {LIMITED_METHOD_CONTRACT; return dwSpinLimitConstant; }
+ DWORD SpinRetryCount(void) const {LIMITED_METHOD_CONTRACT; return dwSpinRetryCount; }
+
+ // Jit-config
+
+ unsigned int GenOptimizeType(void) const {LIMITED_METHOD_CONTRACT; return iJitOptimizeType; }
+ bool JitFramed(void) const {LIMITED_METHOD_CONTRACT; return fJitFramed; }
+ bool JitAlignLoops(void) const {LIMITED_METHOD_CONTRACT; return fJitAlignLoops; }
+ bool AddRejitNops(void) const {LIMITED_METHOD_DAC_CONTRACT; return fAddRejitNops; }
+ bool JitMinOpts(void) const {LIMITED_METHOD_CONTRACT; return fJitMinOpts; }
+
+ BOOL PInvokeRestoreEsp(BOOL fDefault) const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ switch (fPInvokeRestoreEsp)
+ {
+ case (unsigned)-1: return fDefault;
+ case 0: return FALSE;
+ default : return TRUE;
+ }
+ }
+
+ bool LegacyNullReferenceExceptionPolicy(void) const {LIMITED_METHOD_CONTRACT; return fLegacyNullReferenceExceptionPolicy; }
+ bool LegacyUnhandledExceptionPolicy(void) const {LIMITED_METHOD_CONTRACT; return fLegacyUnhandledExceptionPolicy; }
+ bool LegacyVirtualMethodCallVerification(void) const {LIMITED_METHOD_CONTRACT; return fLegacyVirtualMethodCallVerification; }
+
+ bool LegacyApartmentInitPolicy(void) const {LIMITED_METHOD_CONTRACT; return fLegacyApartmentInitPolicy; }
+ bool LegacyComHierarchyVisibility(void) const {LIMITED_METHOD_CONTRACT; return fLegacyComHierarchyVisibility; }
+ bool LegacyComVTableLayout(void) const {LIMITED_METHOD_CONTRACT; return fLegacyComVTableLayout; }
+ bool NewComVTableLayout(void) const {LIMITED_METHOD_CONTRACT; return fNewComVTableLayout; }
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // Returns a bool to indicate if the legacy CSE (pre-v4) behaviour is enabled or not
+ bool LegacyCorruptedStateExceptionsPolicy(void) const {LIMITED_METHOD_CONTRACT; return fLegacyCorruptedStateExceptionsPolicy; }
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ // SECURITY
+ unsigned ImpersonationMode(void) const
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ // MODE_ANY;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+ return iImpersonationPolicy ;
+ }
+ void SetLegacyImpersonationPolicy() { LIMITED_METHOD_CONTRACT; iImpersonationPolicy = IMP_NOFLOW; }
+ void SetAlwaysFlowImpersonationPolicy() { LIMITED_METHOD_CONTRACT; iImpersonationPolicy = IMP_ALWAYSFLOW; }
+
+#ifdef _DEBUG
+ bool LogTransparencyErrors() const { LIMITED_METHOD_CONTRACT; return fLogTransparencyErrors; }
+ bool DisableTransparencyEnforcement() const { LIMITED_METHOD_CONTRACT; return fLogTransparencyErrors; }
+#endif // _DEBUG
+
+ void SetLegacyLoadMscorsnOnStartup(bool val) { LIMITED_METHOD_CONTRACT; fLegacyLoadMscorsnOnStartup = val; }
+ bool LegacyLoadMscorsnOnStartup(void) const { LIMITED_METHOD_CONTRACT; return fLegacyLoadMscorsnOnStartup; }
+ bool BypassTrustedAppStrongNames() const { LIMITED_METHOD_CONTRACT; return fBypassStrongNameVerification; } // See code:AssemblySecurityDescriptor::ResolveWorker#StrongNameBypass
+ bool GeneratePublisherEvidence(void) const { LIMITED_METHOD_CONTRACT; return fGeneratePublisherEvidence; }
+ bool EnforceFIPSPolicy() const { LIMITED_METHOD_CONTRACT; return fEnforceFIPSPolicy; }
+ bool LegacyHMACMode() const { LIMITED_METHOD_CONTRACT; return fLegacyHMACMode; }
+
+#ifdef FEATURE_COMINTEROP
+ bool ComInsteadOfManagedRemoting() const {LIMITED_METHOD_CONTRACT; return m_fComInsteadOfManagedRemoting; }
+#endif //FEATURE_COMINTEROP
+ bool InteropValidatePinnedObjects() const { LIMITED_METHOD_CONTRACT; return m_fInteropValidatePinnedObjects; }
+ bool InteropLogArguments() const { LIMITED_METHOD_CONTRACT; return m_fInteropLogArguments; }
+
+#ifdef _DEBUG
+ bool GenDebuggableCode(void) const {LIMITED_METHOD_CONTRACT; return fDebuggable; }
+ bool IsStressOn(void) const {LIMITED_METHOD_CONTRACT; return fStressOn; }
+ int GetAPIThreadStressCount(void) const {LIMITED_METHOD_CONTRACT; return apiThreadStressCount; }
+ bool TlbImpSkipLoading() const {LIMITED_METHOD_CONTRACT; return fTlbImpSkipLoading; }
+
+ bool ShouldExposeExceptionsInCOMToConsole() const {LIMITED_METHOD_CONTRACT; return (iExposeExceptionsInCOM & 1) != 0; }
+ bool ShouldExposeExceptionsInCOMToMsgBox() const {LIMITED_METHOD_CONTRACT; return (iExposeExceptionsInCOM & 2) != 0; }
+
+ static bool RegexOrExactMatch(LPCUTF8 regex, LPCUTF8 input);
+
+ inline bool ShouldPrestubHalt(MethodDesc* pMethodInfo) const
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsInMethList(pPrestubHalt, pMethodInfo);
+ }
+
+ inline bool ShouldInvokeHalt(MethodDesc* pMethodInfo) const
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsInMethList(pInvokeHalt, pMethodInfo);
+ }
+
+
+ inline bool ShouldPrestubGC(MethodDesc* pMethodInfo) const
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsInMethList(pPrestubGC, pMethodInfo);
+ }
+ inline bool ShouldBreakOnClassLoad(LPCUTF8 className) const
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ // MODE_ANY;
+ PRECONDITION(CheckPointer(className, NULL_OK));
+ } CONTRACTL_END
+ return RegexOrExactMatch(pszBreakOnClassLoad, className);
+ }
+ inline bool ShouldBreakOnClassBuild(LPCUTF8 className) const
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ // MODE_ANY;
+ PRECONDITION(CheckPointer(className, NULL_OK));
+ } CONTRACTL_END
+ return RegexOrExactMatch(pszBreakOnClassBuild, className);
+ }
+ inline bool BreakOnInstantiationEnabled() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return pszBreakOnInstantiation != NULL;
+ }
+ inline bool ShouldBreakOnInstantiation(LPCUTF8 className) const
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ // MODE_ANY;
+ PRECONDITION(CheckPointer(className, NULL_OK));
+ } CONTRACTL_END
+ return RegexOrExactMatch(pszBreakOnInstantiation, className);
+ }
+ inline bool ShouldBreakOnMethod(LPCUTF8 methodName) const
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ // MODE_ANY;
+ PRECONDITION(CheckPointer(methodName, NULL_OK));
+ } CONTRACTL_END
+ return RegexOrExactMatch(pszBreakOnMethodName, methodName);
+ }
+ inline bool ShouldDumpOnClassLoad(LPCUTF8 className) const
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ // MODE_ANY;
+ PRECONDITION(CheckPointer(className, NULL_OK));
+ } CONTRACTL_END
+ return RegexOrExactMatch(pszDumpOnClassLoad, className);
+ }
+ inline bool ShouldBreakOnInteropStubSetup(LPCUTF8 methodName) const
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ // MODE_ANY;
+ PRECONDITION(CheckPointer(methodName, NULL_OK));
+ } CONTRACTL_END
+ return RegexOrExactMatch(pszBreakOnInteropStubSetup, methodName);
+ }
+ inline bool ShouldBreakOnComToClrNativeInfoInit(LPCUTF8 methodName) const
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ // MODE_ANY;
+ PRECONDITION(CheckPointer(methodName, NULL_OK));
+ } CONTRACTL_END
+ return RegexOrExactMatch(pszBreakOnComToClrNativeInfoInit, methodName);
+ }
+ inline bool ShouldBreakOnStructMarshalSetup(LPCUTF8 className) const
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ // MODE_ANY;
+ PRECONDITION(CheckPointer(className, NULL_OK));
+ } CONTRACTL_END
+ return RegexOrExactMatch(pszBreakOnStructMarshalSetup, className);
+ }
+ static HRESULT ParseTypeList(__in_z LPWSTR str, TypeNamesList** out);
+ static void DestroyTypeList(TypeNamesList* list);
+
+ inline bool ShouldGcCoverageOnMethod(LPCUTF8 methodName) const
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ // MODE_ANY;
+ PRECONDITION(CheckPointer(methodName, NULL_OK));
+ } CONTRACTL_END
+ return (pszGcCoverageOnMethod == 0 || methodName == 0 || RegexOrExactMatch(pszGcCoverageOnMethod, methodName));
+ }
+
+ bool IsJitVerificationDisabled(void) const {LIMITED_METHOD_CONTRACT; return fJitVerificationDisable; }
+
+#ifdef WIN64EXCEPTIONS
+ bool SuppressLockViolationsOnReentryFromOS() const {LIMITED_METHOD_CONTRACT; return fSuppressLockViolationsOnReentryFromOS; }
+#endif
+
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ bool IsStubLinkerUnwindInfoVerificationOn() const { LIMITED_METHOD_CONTRACT; return fStubLinkerUnwindInfoVerificationOn; }
+#endif
+
+#endif // _DEBUG
+
+#ifdef FEATURE_COMINTEROP
+ inline bool LogCCWRefCountChangeEnabled()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return bLogCCWRefCountChange;
+ }
+
+ void SetLogCCWRefCountChangeEnabled(bool newVal);
+ bool ShouldLogCCWRefCountChange(LPCUTF8 pszClassName, LPCUTF8 pszNamespace) const;
+
+ inline bool EnableRCWCleanupOnSTAShutdown()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return fEnableRCWCleanupOnSTAShutdown;
+ }
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_CORECLR
+ bool VerifyModulesOnLoad(void) const { LIMITED_METHOD_CONTRACT; return fVerifyAllOnLoad; }
+#endif
+#ifdef _DEBUG
+ bool ExpandModulesOnLoad(void) const { LIMITED_METHOD_CONTRACT; return fExpandAllOnLoad; }
+#endif //_DEBUG
+
+#ifdef FEATURE_DOUBLE_ALIGNMENT_HINT
+ // Because the large object heap is 8 byte aligned, we want to put
+ // arrays of doubles there more agressively than normal objects.
+ // This is the threshold for this. It is the number of doubles,
+ // not the number of bytes in the array.
+ unsigned int GetDoubleArrayToLargeObjectHeapThreshold() const { LIMITED_METHOD_CONTRACT; return DoubleArrayToLargeObjectHeapThreshold; }
+#endif
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ inline DWORD DefaultSharePolicy() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return dwSharePolicy;
+ }
+#endif
+
+ inline bool CacheBindingFailures() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return fCacheBindingFailures;
+ }
+
+ inline bool UseLegacyIdentityFormat() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return fUseLegacyIdentityFormat;
+ }
+
+ inline bool DisableFusionUpdatesFromADManager() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return fDisableFusionUpdatesFromADManager;
+ }
+
+ inline void SetDisableCommitThreadStack(bool val)
+ {
+ LIMITED_METHOD_CONTRACT;
+ fDisableCommitThreadStack = val;
+ }
+
+ inline bool GetDisableCommitThreadStack() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return fDisableCommitThreadStack;
+ }
+
+ inline bool ProbeForStackOverflow() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return fProbeForStackOverflow;
+ }
+
+ inline bool AppDomainUnload() const
+ {LIMITED_METHOD_CONTRACT; return fAppDomainUnload; }
+
+ inline DWORD AppDomainUnloadRetryCount() const
+ {LIMITED_METHOD_CONTRACT; return dwADURetryCount; }
+
+
+#ifdef _DEBUG
+ inline bool AppDomainLeaks() const
+ {LIMITED_METHOD_DAC_CONTRACT; return fAppDomainLeaks; }
+#endif
+
+ inline bool DeveloperInstallation() const
+ {LIMITED_METHOD_CONTRACT; return m_fDeveloperInstallation; }
+
+#ifdef TEST_DATA_CONSISTENCY
+ // get the value of fTestDataConsistency, which controls whether we test that we can correctly detect
+ // held locks in DAC builds. This is determined by an environment variable.
+ inline bool TestDataConsistency() const { LIMITED_METHOD_DAC_CONTRACT; return fTestDataConsistency; }
+#endif
+
+#ifdef _DEBUG
+
+ unsigned SuspendThreadDeadlockTimeoutMs() const
+ {LIMITED_METHOD_CONTRACT; return m_SuspendThreadDeadlockTimeoutMs; }
+
+ unsigned SuspendDeadlockTimeout() const
+ {LIMITED_METHOD_CONTRACT; return m_SuspendDeadlockTimeout; }
+
+ // Verifier
+ bool IsVerifierOff() const {LIMITED_METHOD_CONTRACT; return fVerifierOff; }
+
+ inline bool fAssertOnBadImageFormat() const
+ {LIMITED_METHOD_CONTRACT; return m_fAssertOnBadImageFormat; }
+
+ inline bool fAssertOnFailFast() const
+ {LIMITED_METHOD_CONTRACT; return m_fAssertOnFailFast; }
+
+ inline bool SuppressChecks() const
+ {LIMITED_METHOD_CONTRACT; return fSuppressChecks; }
+
+ inline bool Do_AllowUntrustedCaller_Checks()
+ {LIMITED_METHOD_CONTRACT; return fDoAllowUntrustedCallerChecks; }
+
+ inline bool EnableFullDebug() const
+ {LIMITED_METHOD_CONTRACT; return fEnableFullDebug; }
+
+#endif
+#ifdef ENABLE_STARTUP_DELAY
+ inline int StartupDelayMS()
+ { LIMITED_METHOD_CONTRACT; return iStartupDelayMS; }
+#endif
+
+#ifdef VERIFY_HEAP
+ // GC config
+ enum HeapVerifyFlags {
+ HEAPVERIFY_NONE = 0,
+ HEAPVERIFY_GC = 1, // Verify the heap at beginning and end of GC
+ HEAPVERIFY_BARRIERCHECK = 2, // Verify the brick table
+ HEAPVERIFY_SYNCBLK = 4, // Verify sync block scanning
+
+ // the following options can be used to mitigate some of the overhead introduced
+ // by heap verification. some options might cause heap verifiction to be less
+ // effective depending on the scenario.
+
+ HEAPVERIFY_NO_RANGE_CHECKS = 0x10, // Excludes checking if an OBJECTREF is within the bounds of the managed heap
+ HEAPVERIFY_NO_MEM_FILL = 0x20, // Excludes filling unused segment portions with fill pattern
+ HEAPVERIFY_POST_GC_ONLY = 0x40, // Performs heap verification post-GCs only (instead of before and after each GC)
+ HEAPVERIFY_DEEP_ON_COMPACT = 0x80 // Performs deep object verfication only on compacting GCs.
+ };
+
+ int GetHeapVerifyLevel() {LIMITED_METHOD_CONTRACT; return iGCHeapVerify; }
+
+ bool IsHeapVerifyEnabled() const {LIMITED_METHOD_CONTRACT; return iGCHeapVerify != 0; }
+#endif
+
+#if defined(STRESS_HEAP) || defined(_DEBUG)
+ void SetGCStressLevel(int val) {LIMITED_METHOD_CONTRACT; iGCStress = val; }
+
+ enum GCStressFlags {
+ GCSTRESS_NONE = 0,
+ GCSTRESS_ALLOC = 1, // GC on all allocs and 'easy' places
+ GCSTRESS_TRANSITION = 2, // GC on transitions to preemtive GC
+ GCSTRESS_INSTR_JIT = 4, // GC on every allowable JITed instr
+ GCSTRESS_INSTR_NGEN = 8, // GC on every allowable NGEN instr
+ GCSTRESS_UNIQUE = 16, // GC only on a unique stack trace
+ };
+
+ GCStressFlags GetGCStressLevel() const { WRAPPER_NO_CONTRACT; SUPPORTS_DAC; return GCStressFlags(iGCStress); }
+#endif
+
+#ifdef _DEBUG // TRACE_GC
+
+ int GetGCtraceStart() const {LIMITED_METHOD_CONTRACT; return iGCtraceStart; }
+ int GetGCtraceEnd () const {LIMITED_METHOD_CONTRACT; return iGCtraceEnd; }
+ int GetGCtraceFac () const {LIMITED_METHOD_CONTRACT; return iGCtraceFac; }
+ int GetGCprnLvl () const {LIMITED_METHOD_CONTRACT; return iGCprnLvl; }
+
+#endif
+
+#ifdef STRESS_HEAP
+
+ bool IsGCStressMix () const {LIMITED_METHOD_CONTRACT; return iGCStressMix != 0;}
+ int GetGCStressStep() const {LIMITED_METHOD_CONTRACT; return iGCStressStep; }
+#endif
+
+ bool IsGCBreakOnOOMEnabled() const {LIMITED_METHOD_CONTRACT; return fGCBreakOnOOM; }
+
+ size_t GetGCgen0size () const {LIMITED_METHOD_CONTRACT; return iGCgen0size; }
+ void SetGCgen0size (size_t iSize) {LIMITED_METHOD_CONTRACT; iGCgen0size = iSize; }
+ size_t GetSegmentSize () const {LIMITED_METHOD_CONTRACT; return iGCSegmentSize; }
+ void SetSegmentSize (size_t iSize) {LIMITED_METHOD_CONTRACT; iGCSegmentSize = iSize; }
+
+ int GetGCconcurrent() const {LIMITED_METHOD_CONTRACT; return iGCconcurrent; }
+ void SetGCconcurrent(int val) {LIMITED_METHOD_CONTRACT; iGCconcurrent = val; }
+#ifdef _DEBUG
+ int GetGCLatencyMode() const {LIMITED_METHOD_CONTRACT; return iGCLatencyMode; }
+#endif //_DEBUG
+ int GetGCForceCompact() const {LIMITED_METHOD_CONTRACT; return iGCForceCompact; }
+ int GetGCRetainVM () const {LIMITED_METHOD_CONTRACT; return iGCHoardVM;}
+ int GetGCLOHCompactionMode() const {LIMITED_METHOD_CONTRACT; return iGCLOHCompactionMode;}
+
+#ifdef GCTRIMCOMMIT
+
+ int GetGCTrimCommit() const {LIMITED_METHOD_CONTRACT; return iGCTrimCommit;}
+
+#endif
+
+#ifdef FEATURE_CONSERVATIVE_GC
+ bool GetGCConservative() const {LIMITED_METHOD_CONTRACT; return iGCConservative;}
+#endif
+#ifdef _WIN64
+ bool GetGCAllowVeryLargeObjects() const {LIMITED_METHOD_CONTRACT; return iGCAllowVeryLargeObjects;}
+#endif
+#ifdef _DEBUG
+ bool SkipGCCoverage(LPCUTF8 assemblyName) const {WRAPPER_NO_CONTRACT; return (pSkipGCCoverageList != NULL
+ && pSkipGCCoverageList->IsInList(assemblyName));}
+#endif
+
+
+ // thread stress: number of threads to run
+#ifdef STRESS_THREAD
+ DWORD GetStressThreadCount () const {LIMITED_METHOD_CONTRACT; return dwStressThreadCount;}
+#endif
+
+#ifdef _DEBUG
+ inline DWORD FastGCStressLevel() const
+ {LIMITED_METHOD_CONTRACT; return iFastGCStress;}
+
+ inline DWORD InjectFatalError() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return iInjectFatalError;
+ }
+
+ inline BOOL SaveThreadInfo() const
+ {
+ return fSaveThreadInfo;
+ }
+
+ inline DWORD SaveThreadInfoMask() const
+ {
+ return dwSaveThreadInfoMask;
+ }
+#endif
+
+
+#ifdef _DEBUG
+ // Interop config
+ IUnknown* GetTraceIUnknown() const {LIMITED_METHOD_CONTRACT; return m_pTraceIUnknown; }
+ int GetTraceWrapper() const {LIMITED_METHOD_CONTRACT; return m_TraceWrapper; }
+#endif
+
+ // Loader
+
+ enum RequireZapsType
+ {
+ REQUIRE_ZAPS_NONE, // Dont care if native image is used or not
+ REQUIRE_ZAPS_ALL, // All assemblies must have native images
+ REQUIRE_ZAPS_ALL_JIT_OK,// All assemblies must have native images, but its OK if the JIT-compiler also gets used (if some function was not ngenned)
+ REQUIRE_ZAPS_SUPPORTED, // All assemblies must have native images, unless the loader does not support the scenario. Its OK if the JIT-compiler also gets used
+
+ REQUIRE_ZAPS_COUNT
+ };
+ RequireZapsType RequireZaps() const {LIMITED_METHOD_CONTRACT; return iRequireZaps; }
+ bool RequireZap(LPCUTF8 assemblyName) const;
+#ifdef _DEBUG
+ bool ForbidZap(LPCUTF8 assemblyName) const;
+#endif
+
+#ifdef _TARGET_AMD64_
+ bool DisableNativeImageLoad(LPCUTF8 assemblyName) const;
+ bool IsDisableNativeImageLoadListNonEmpty() const { LIMITED_METHOD_CONTRACT; return (pDisableNativeImageLoadList != NULL); }
+#endif
+
+ LPCWSTR ZapSet() const { LIMITED_METHOD_CONTRACT; return pZapSet; }
+
+ bool NgenBindOptimizeNonGac() const { LIMITED_METHOD_CONTRACT; return fNgenBindOptimizeNonGac; }
+
+ LPUTF8 GetZapBBInstr() const { LIMITED_METHOD_CONTRACT; return szZapBBInstr; }
+ LPWSTR GetZapBBInstrDir() const { LIMITED_METHOD_CONTRACT; return szZapBBInstrDir; }
+ DWORD DisableStackwalkCache() const {LIMITED_METHOD_CONTRACT; return dwDisableStackwalkCache; }
+ DWORD UseNewCrossDomainRemoting() const { LIMITED_METHOD_CONTRACT; return fUseNewCrossDomainRemoting; }
+
+ bool StressLog() const { LIMITED_METHOD_CONTRACT; return fStressLog; }
+ bool ForceEnc() const { LIMITED_METHOD_CONTRACT; return fForceEnc; }
+
+ // Optimizations to improve working set
+
+ HRESULT sync(); // check the registry again and update local state
+
+ // Helpers to read configuration
+
+ // This function exposes the config file data to CLRConfig. A pointer to this function is passed into CLRConfig on EEConfig::init.
+ // We are using BOOLs instead of ConfigSearch for direction since CLRConfig isn't always linked to EEConfig.
+ static HRESULT GetConfigValueCallback(__in_z LPCWSTR pKey, __deref_out_opt LPCWSTR* value, BOOL systemOnly, BOOL applicationFirst);
+
+ //
+ // NOTE: The following function is deprecated; use the CLRConfig class instead.
+ // To access a configuration value through CLRConfig, add an entry in file:../inc/CLRConfigValues.h.
+ //
+ static HRESULT GetConfigString_DontUse_(__in_z LPCWSTR name, __deref_out_z LPWSTR*out, BOOL fPrependCOMPLUS = TRUE,
+ ConfigSearch direction = CONFIG_SYSTEM); // Note that you own the returned string!
+
+ //
+ // NOTE: The following function is deprecated; use the CLRConfig class instead.
+ // To access a configuration value through CLRConfig, add an entry in file:../inc/CLRConfigValues.h.
+ //
+ static DWORD GetConfigDWORD_DontUse_(__in_z LPCWSTR name, DWORD defValue,
+ DWORD level=(DWORD) REGUTIL::COR_CONFIG_ALL,
+ BOOL fPrependCOMPLUS = TRUE,
+ ConfigSearch direction = CONFIG_SYSTEM);
+
+ //
+ // NOTE: The following function is deprecated; use the CLRConfig class instead.
+ // To access a configuration value through CLRConfig, add an entry in file:../inc/CLRConfigValues.h.
+ //
+ static ULONGLONG GetConfigULONGLONG_DontUse_(__in_z LPCWSTR name, ULONGLONG defValue,
+ DWORD level=(DWORD) REGUTIL::COR_CONFIG_ALL,
+ BOOL fPrependCOMPLUS = TRUE,
+ ConfigSearch direction = CONFIG_SYSTEM);
+ //
+ // NOTE: The following function is deprecated; use the CLRConfig class instead.
+ // To access a configuration value through CLRConfig, add an entry in file:../inc/CLRConfigValues.h.
+ //
+ static DWORD GetConfigDWORDFavoringConfigFile_DontUse_(__in_z LPCWSTR name, DWORD defValue,
+ DWORD level=(DWORD) REGUTIL::COR_CONFIG_ALL,
+ BOOL fPrependCOMPLUS = TRUE,
+ ConfigSearch direction = CONFIG_SYSTEM);
+
+ //
+ // NOTE: The following function is deprecated; use the CLRConfig class instead.
+ // To access a configuration value through CLRConfig, add an entry in file:../inc/CLRConfigValues.h.
+ //
+ static DWORD GetConfigFlag_DontUse_(__in_z LPCWSTR name, DWORD bitToSet, bool defValue = FALSE);
+
+#ifdef _DEBUG
+ // GC alloc logging
+ bool ShouldLogAlloc(const char *pClass) const { LIMITED_METHOD_CONTRACT; return pPerfTypesToLog && pPerfTypesToLog->IsInList(pClass);}
+ int AllocSizeThreshold() const {LIMITED_METHOD_CONTRACT; return iPerfAllocsSizeThreshold; }
+ int AllocNumThreshold() const { LIMITED_METHOD_CONTRACT; return iPerfNumAllocsThreshold; }
+
+#endif // _DEBUG
+
+ enum NgenHardBindType
+ {
+ NGEN_HARD_BIND_NONE, // Do not hardbind at all
+ NGEN_HARD_BIND_LIST, // Only hardbind to what is specified by CustomAttributes (and any default assemblies specified by the CLR)
+ NGEN_HARD_BIND_ALL, // Hardbind to any existing ngen images if possible
+ NGEN_HARD_BIND_COUNT,
+
+ NGEN_HARD_BIND_DEFAULT = NGEN_HARD_BIND_LIST,
+ };
+
+ NgenHardBindType NgenHardBind() { LIMITED_METHOD_CONTRACT; return iNgenHardBind; }
+
+#ifdef _DEBUG
+ DWORD NgenForceFailureMask() { LIMITED_METHOD_CONTRACT; return dwNgenForceFailureMask; }
+ DWORD NgenForceFailureCount() { LIMITED_METHOD_CONTRACT; return dwNgenForceFailureCount; }
+ DWORD NgenForceFailureKind() { LIMITED_METHOD_CONTRACT; return dwNgenForceFailureKind; }
+#endif
+ enum GCPollType
+ {
+ GCPOLL_TYPE_DEFAULT, // Use the default gc poll for the platform
+ GCPOLL_TYPE_HIJACK, // Depend on thread hijacking for gc suspension
+ GCPOLL_TYPE_POLL, // Emit function calls to a helper for GC Poll
+ GCPOLL_TYPE_INLINE, // Emit inlined tests to the helper for GC Poll
+ GCPOLL_TYPE_COUNT
+ };
+ GCPollType GetGCPollType() { LIMITED_METHOD_CONTRACT; return iGCPollType; }
+
+#ifdef _DEBUG
+ BOOL ShouldGenerateStubForHost() const {LIMITED_METHOD_CONTRACT; return fGenerateStubForHost;}
+ void DisableGenerateStubForHost() {LIMITED_METHOD_CONTRACT; fGenerateStubForHost = FALSE;}
+
+ DWORD GetHostTestADUnload() const {LIMITED_METHOD_CONTRACT; return testADUnload;}
+
+ DWORD GetHostTestThreadAbort() const {LIMITED_METHOD_CONTRACT; return testThreadAbort;}
+
+#define INJECTFAULT_LOADERHEAP 0x1
+#define INJECTFAULT_HANDLETABLE 0x1
+#define INJECTFAULT_GCHEAP 0x2
+#define INJECTFAULT_SO 0x4
+#define INJECTFAULT_GMHEAP 0x8
+#define INJECTFAULT_DYNAMICCODEHEAP 0x10
+#define INJECTFAULT_MAPVIEWOFFILE 0x20
+#define INJECTFAULT_JITHEAP 0x40
+
+ DWORD ShouldInjectFault(DWORD faultType) const {LIMITED_METHOD_CONTRACT; return fShouldInjectFault & faultType;}
+
+#endif
+
+private: //----------------------------------------------------------------
+
+ // @TODO - Fusion needs to be able to read this value, but they are unable to
+ // pull in all of the appropriate headers for all of the #defines found below.
+ // As long as this is defined at the top of the object, the "incorrect offsets" that
+ // will come as a result won't matter.
+ bool fCacheBindingFailures;
+ bool fUseLegacyIdentityFormat;
+ bool fDisableFusionUpdatesFromADManager;
+ bool fInited; // have we synced to the registry at least once?
+
+ // Jit-config
+
+ bool fJitFramed; // Enable/Disable EBP based frames
+ bool fJitAlignLoops; // Enable/Disable loop alignment
+ bool fAddRejitNops; // Enable/Disable nop padding for rejit. default is true
+ bool fJitMinOpts; // Enable MinOpts for all jitted methods
+
+ unsigned iJitOptimizeType; // 0=Blended,1=SmallCode,2=FastCode, default is 0=Blended
+
+ unsigned fPInvokeRestoreEsp; // -1=Default, 0=Never, Else=Always
+
+ bool fLegacyNullReferenceExceptionPolicy; // Old AV's as NullRef behavior
+ bool fLegacyUnhandledExceptionPolicy; // Old unhandled exception policy (many are swallowed)
+ bool fLegacyVirtualMethodCallVerification; // Old (pre-whidbey) policy for call (nonvirt) of virtual function
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ bool fLegacyCorruptedStateExceptionsPolicy;
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ bool fLegacyApartmentInitPolicy; // Old nondeterministic COM apartment initialization switch
+ bool fLegacyComHierarchyVisibility; // Old behavior allowing QIs for classes with invisible parents
+ bool fLegacyComVTableLayout; // Old behavior passing out IClassX interface for IUnknown and IDispatch.
+ bool fNewComVTableLayout; // New behavior passing out Basic interface for IUnknown and IDispatch.
+
+ // SECURITY
+ unsigned iImpersonationPolicy; //control flow of impersonation in the SecurityContext. 0=FASTFLOW 1=
+#ifdef _DEBUG
+ bool fLogTransparencyErrors; // don't throw on transparency errors, instead log to the CLR log file
+#endif // _DEBUG
+ bool fLegacyLoadMscorsnOnStartup; // load mscorsn.dll when starting up the runtime.
+ bool fBypassStrongNameVerification; // bypass strong name verification of trusted app assemblies
+ bool fGeneratePublisherEvidence; // verify Authenticode signatures of assemblies during load, generating publisher evidence for them
+ bool fEnforceFIPSPolicy; // enforce that only FIPS certified crypto algorithms are created if the FIPS machine settting is enabled
+ bool fLegacyHMACMode; // HMACSHA384 and HMACSHA512 should default to the Whidbey block size
+
+ LPUTF8 pszBreakOnClassLoad; // Halt just before loading this class
+
+#ifdef TEST_DATA_CONSISTENCY
+ bool fTestDataConsistency; // true if we are testing locks for data consistency in the debugger--
+ // If a lock is held during inspection, we assume the data under the lock
+ // is inconsistent. We have a special code path for testing this
+ // which we will follow if this is set. The value is determined by
+ // the environment variable TestDataConsistency
+#endif
+
+#ifdef FEATURE_COMINTEROP
+ bool m_fComInsteadOfManagedRemoting; // When communicating with a cross app domain CCW, use COM instead of managed remoting.
+#endif
+ bool m_fInteropValidatePinnedObjects; // After returning from a M->U interop call, validate GC heap around objects pinned by IL stubs.
+ bool m_fInteropLogArguments; // Log all pinned arguments passed to an interop call
+
+#ifdef _DEBUG
+ static HRESULT ParseMethList(__in_z LPWSTR str, MethodNamesList* * out);
+ static void DestroyMethList(MethodNamesList* list);
+ static bool IsInMethList(MethodNamesList* list, MethodDesc* pMD);
+
+ bool fDebuggable;
+ bool fStressOn;
+ int apiThreadStressCount;
+
+ MethodNamesList* pPrestubHalt; // list of methods on which to break when hit prestub
+ MethodNamesList* pPrestubGC; // list of methods on which to cause a GC when hit prestub
+ MethodNamesList* pInvokeHalt; // list of methods on which to break when hit prestub
+
+
+ LPUTF8 pszBreakOnClassBuild; // Halt just before loading this class
+ LPUTF8 pszBreakOnInstantiation; // Halt just before instantiating a non-canonical generic type
+ LPUTF8 pszBreakOnMethodName; // Halt when doing something with this method in the class defined in ClassBuild
+ LPUTF8 pszDumpOnClassLoad; // Dump the class to the log
+
+ LPUTF8 pszBreakOnInteropStubSetup; // Halt before we set up the interop stub for a method
+ LPUTF8 pszBreakOnComToClrNativeInfoInit; // Halt before we init the native info for a COM to CLR call
+ LPUTF8 pszBreakOnStructMarshalSetup; // Halt before the field marshallers are set up for a struct
+
+ bool fAppDomainLeaks; // Enable appdomain leak detection for object refs
+
+ bool m_fAssertOnBadImageFormat; // If false, don't assert on invalid IL (for testing)
+ bool m_fAssertOnFailFast; // If false, don't assert if we detect a stack corruption
+
+ bool fConditionalContracts; // Conditional contracts (off inside asserts)
+ bool fSuppressChecks; // Disable checks (including contracts)
+
+ DWORD iExposeExceptionsInCOM; // Should we exposed exceptions that will be transformed into HRs?
+
+ // Tlb Tools
+ bool fTlbImpSkipLoading;
+
+ unsigned m_SuspendThreadDeadlockTimeoutMs; // Used in Thread::SuspendThread()
+ unsigned m_SuspendDeadlockTimeout; // Used in Thread::SuspendRuntime.
+
+ bool fEnableFullDebug;
+#endif // _DEBUG
+
+#ifdef FEATURE_COMINTEROP
+ bool bLogCCWRefCountChange; // Is CCW logging on
+ LPCUTF8 pszLogCCWRefCountChange; // OutputDebugString when AddRef/Release is called on a CCW
+ // for the specified type(s)
+ bool fEnableRCWCleanupOnSTAShutdown; // Register our IInitializeSpy even in classic processes
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_DOUBLE_ALIGNMENT_HINT
+ unsigned int DoubleArrayToLargeObjectHeapThreshold; // double arrays of more than this number of elems go in large object heap
+#endif
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ DWORD dwSharePolicy; // Default policy for loading assemblies into the domain neutral area
+#endif
+
+ // Only developer machines are allowed to use DEVPATH. This value is set when there is an appropriate entry
+ // in the machine configuration file. This should not be sent out in the redist.
+ bool m_fDeveloperInstallation; // We are on a developers machine
+ bool fAppDomainUnload; // Enable appdomain unloading
+
+#ifdef FEATURE_CORECLR
+ bool fVerifyAllOnLoad; // True if we want to verify all methods in an assembly at load time.
+#endif //FEATURE_CORECLR
+
+ DWORD dwADURetryCount;
+
+#ifdef _DEBUG
+ bool fExpandAllOnLoad; // True if we want to load all types/jit all methods in an assembly
+ // at load time.
+ bool fJitVerificationDisable; // Turn off jit verification (for testing purposes only)
+
+
+ // Verifier
+ bool fVerifierOff;
+
+ bool fDoAllowUntrustedCallerChecks; // do AllowUntrustedCallerChecks
+
+#ifdef WIN64EXCEPTIONS
+ bool fSuppressLockViolationsOnReentryFromOS;
+#endif
+
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ bool fStubLinkerUnwindInfoVerificationOn;
+#endif
+#endif // _DEBUG
+#ifdef ENABLE_STARTUP_DELAY
+ int iStartupDelayMS; //Adds sleep to startup.
+#endif
+
+ // Spinning heuristics
+ DWORD dwSpinInitialDuration;
+ DWORD dwSpinBackoffFactor;
+ DWORD dwSpinLimitProcCap;
+ DWORD dwSpinLimitProcFactor;
+ DWORD dwSpinLimitConstant;
+ DWORD dwSpinRetryCount;
+
+#ifdef VERIFY_HEAP
+ int iGCHeapVerify;
+#endif
+
+#ifdef _DEBUG // TRACE_GC
+
+ int iGCtraceStart;
+ int iGCtraceEnd;
+ int iGCtraceFac;
+ int iGCprnLvl;
+
+#endif
+
+#if defined(STRESS_HEAP) || defined(_DEBUG)
+ int iGCStress;
+#endif
+
+#ifdef STRESS_HEAP
+ int iGCStressMix;
+ int iGCStressStep;
+#endif
+
+#define DEFAULT_GC_PRN_LVL 3
+ size_t iGCgen0size;
+ size_t iGCSegmentSize;
+ int iGCconcurrent;
+#ifdef _DEBUG
+ int iGCLatencyMode;
+#endif //_DEBUG
+ int iGCForceCompact;
+ int iGCHoardVM;
+ int iGCLOHCompactionMode;
+
+#ifdef GCTRIMCOMMIT
+
+ int iGCTrimCommit;
+
+#endif
+
+#ifdef FEATURE_CONSERVATIVE_GC
+ bool iGCConservative;
+#endif // FEATURE_CONSERVATIVE_GC
+#ifdef _WIN64
+ bool iGCAllowVeryLargeObjects;
+#endif // _WIN64
+
+ bool fGCBreakOnOOM;
+
+#ifdef STRESS_THREAD
+ DWORD dwStressThreadCount;
+#endif
+
+#ifdef _DEBUG
+ DWORD iFastGCStress;
+ LPUTF8 pszGcCoverageOnMethod;
+
+ DWORD iInjectFatalError;
+
+ BOOL fSaveThreadInfo;
+ DWORD dwSaveThreadInfoMask;
+
+ AssemblyNamesList *pSkipGCCoverageList;
+#endif
+
+ RequireZapsType iRequireZaps;
+ // Assemblies which need to have native images.
+ // This is only used if iRequireZaps!=REQUIRE_ZAPS_NONE
+ // This can be used to enforce that ngen images are used only selectively for some assemblies
+ AssemblyNamesList * pRequireZapsList;
+ // assemblies which need NOT have native images.
+ // This is only used if iRequireZaps!=REQUIRE_ZAPS_NONE
+ // This overrides pRequireZapsList.
+ AssemblyNamesList * pRequireZapsExcludeList;
+
+#ifdef _DEBUG
+ // Exact opposite of require zaps
+ BOOL iForbidZaps;
+ AssemblyNamesList * pForbidZapsList;
+ AssemblyNamesList * pForbidZapsExcludeList;
+#endif
+
+#ifdef _TARGET_AMD64_
+ // Assemblies for which we will not load a native image. This is from the COMPLUS_DisableNativeImageLoadList
+ // variable / reg key. It performs the same function as the config file key "<disableNativeImageLoad>" (except
+ // that is it just a list of assembly names, which the config file key can specify full assembly identities).
+ // This was added to support COMPLUS_UseLegacyJit, to support the rollout of RyuJIT to replace JIT64, where
+ // the user can cause the CLR to fall back to JIT64 for JITting but not for NGEN. This allows the user to
+ // force JITting for a specified list of NGEN assemblies.
+ AssemblyNamesList * pDisableNativeImageLoadList;
+#endif
+
+ LPCWSTR pZapSet;
+
+ bool fNgenBindOptimizeNonGac;
+
+ bool fStressLog;
+ bool fForceEnc;
+ bool fDisableCommitThreadStack;
+ bool fProbeForStackOverflow;
+
+ // Stackwalk optimization flag
+ DWORD dwDisableStackwalkCache;
+
+ // New cross domain remoting
+ DWORD fUseNewCrossDomainRemoting;
+
+ LPUTF8 szZapBBInstr;
+ LPWSTR szZapBBInstrDir;
+
+#ifdef _DEBUG
+ // interop logging
+ IUnknown* m_pTraceIUnknown;
+ int m_TraceWrapper;
+#endif
+
+ // Flag to keep track of memory
+ int m_fFreepZapSet;
+
+#ifdef _DEBUG
+ // GC Alloc perf flags
+ int iPerfNumAllocsThreshold; // Start logging after this many allocations are made
+ int iPerfAllocsSizeThreshold; // Log allocations of this size or above
+ TypeNamesList* pPerfTypesToLog; // List of types whose allocations are to be logged
+
+#endif // _DEBUG
+
+ // New configuration
+ ConfigList m_Configuration;
+
+ BOOL fEnableHardbinding;
+ NgenHardBindType iNgenHardBind;
+#ifdef _DEBUG
+ DWORD dwNgenForceFailureMask;
+ DWORD dwNgenForceFailureCount;
+ DWORD dwNgenForceFailureKind;
+#endif
+
+ GCPollType iGCPollType;
+
+#ifdef _DEBUG
+ BOOL fGenerateStubForHost;
+ DWORD fShouldInjectFault;
+ DWORD testADUnload;
+ DWORD testThreadAbort;
+#endif
+
+public:
+#ifndef FEATURE_CORECLR // unimpactful install --> no config files
+ HRESULT ImportConfigurationFile(
+ ConfigStringHashtable* pTable,
+ LPCWSTR pszFileName,
+ LPCWSTR version,
+ ParseCtl parseCtl = parseAll);
+
+ HRESULT AppendConfigurationFile(
+ LPCWSTR pszFileName,
+ LPCWSTR version,
+ ParseCtl parseCtl = parseAll);
+
+ HRESULT SetupConfiguration();
+#endif // FEATURE_CORECLR
+
+ HRESULT GetConfiguration_DontUse_(__in_z LPCWSTR pKey, ConfigSearch direction, __deref_out_opt LPCWSTR* value);
+ LPCWSTR GetProcessBindingFile(); // All flavors must support this method
+ SIZE_T GetSizeOfProcessBindingFile(); // All flavors must support this method
+
+ DWORD GetConfigDWORDInternal_DontUse_ (__in_z LPCWSTR name, DWORD defValue, //for getting data in the constructor of EEConfig
+ DWORD level=(DWORD) REGUTIL::COR_CONFIG_ALL,
+ BOOL fPrependCOMPLUS = TRUE,
+ ConfigSearch direction = CONFIG_SYSTEM);
+
+ enum BitForMask {
+ CallSite_1 = 0x0001,
+ CallSite_2 = 0x0002,
+ CallSite_3 = 0x0004,
+ CallSite_4 = 0x0008,
+ CallSite_5 = 0x0010,
+ CallSite_6 = 0x0020,
+ CallSite_7 = 0x0040,
+ CallSite_8 = 0x0080,
+ };
+
+#if defined(_DEBUG) && !defined(DACCESS_COMPILE)
+ void DebugCheckAndForceIBCFailure(BitForMask bitForMask);
+#endif
+
+#if defined(_DEBUG)
+#if defined(_TARGET_AMD64_)
+private:
+
+ // Defaults to 0, which means we will not generate long jump dispatch stubs.
+ // But if this is set to a positive integer, then this
+ // will be 1/x ration of stubs we generate as long jump. So if x is 4, then
+ // every 1 in 4 dispatch stubs will be long jump stubs.
+ size_t m_cGenerateLongJumpDispatchStubRatio;
+
+ // Total count of stubs generated, used with above variable to determine if
+ // the next stub should be a long jump.
+ size_t m_cDispatchStubsGenerated;
+
+public:
+ BOOL ShouldGenerateLongJumpDispatchStub()
+ {
+ return (m_cDispatchStubsGenerated++ % m_cGenerateLongJumpDispatchStubRatio) == 0;
+ }
+#else
+public:
+ // Just return false when we're in DEBUG but not on AMD64
+ BOOL ShouldGenerateLongJumpDispatchStub()
+ {
+ return FALSE;
+ }
+#endif // _TARGET_AMD64_
+#endif // _DEBUG
+
+#if defined(_DEBUG)
+private:
+ bool bDiagnosticSuspend;
+
+public:
+ bool GetDiagnosticSuspend()
+ { return bDiagnosticSuspend; }
+#endif
+
+private:
+ DWORD dwSleepOnExit;
+
+public:
+ DWORD GetSleepOnExit()
+ { return dwSleepOnExit; }
+
+#if FEATURE_APPX
+private:
+ DWORD dwWindows8ProfileAPICheckFlag;
+
+public:
+ DWORD GetWindows8ProfileAPICheckFlag() { return dwWindows8ProfileAPICheckFlag; }
+#endif
+};
+
+
+
+#ifdef _DEBUG_IMPL
+
+ // We actually want our asserts for illegal IL, but testers need to test that
+ // we fail gracefully under those conditions. Thus we have to hide them for those runs.
+#define BAD_FORMAT_NOTHROW_ASSERT(str) \
+ do { \
+ if (g_pConfig->fAssertOnBadImageFormat()) { \
+ _ASSERTE(str); \
+ } \
+ else if (!(str)) { \
+ if (IsDebuggerPresent()) DebugBreak(); \
+ } \
+ } while(0)
+
+ // STRESS_ASSERT is meant to be temperary additions to the code base that stop the
+ // runtime quickly when running stress
+#define STRESS_ASSERT(cond) do { if (!(cond) && g_pConfig->IsStressOn()) DebugBreak(); } while(0)
+
+#define FILE_FORMAT_CHECK_MSG(_condition, _message) \
+ do { \
+ if (g_pConfig != NULL && g_pConfig->fAssertOnBadImageFormat()) \
+ ASSERT_CHECK(_condition, _message, "Bad file format"); \
+ else if (!(_condition)) \
+ DebugBreak(); \
+ } while (0)
+
+#define FILE_FORMAT_CHECK(_condition) FILE_FORMAT_CHECK_MSG(_condition, "")
+
+#else
+
+#define STRESS_ASSERT(cond)
+#define BAD_FORMAT_NOTHROW_ASSERT(str)
+
+#define FILE_FORMAT_CHECK_MSG(_condition, _message)
+#define FILE_FORMAT_CHECK(_condition)
+
+#endif
+
+void InitHostProtectionManager();
+
+extern BYTE g_CorHostProtectionManagerInstance[];
+
+inline CorHostProtectionManager* GetHostProtectionManager()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+// MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ return (CorHostProtectionManager*)g_CorHostProtectionManagerInstance;
+}
+
+extern BOOL g_CLRPolicyRequested;
+
+// NGENImagesAllowed is the safe way to determine if NGEN Images are allowed to be loaded. (Defined as
+// a macro instead of an inlined function to avoid compilation errors due to dependent
+// definitions not being available to this header.)
+#ifdef PROFILING_SUPPORTED
+#define NGENImagesAllowed() \
+ (g_fAllowNativeImages && /* No one disabled use of native images */ \
+ !(CORProfilerDisableAllNGenImages())) /* Profiler didn't explicitly refuse NGEN images */
+#else
+#define NGENImagesAllowed() \
+ (g_fAllowNativeImages)
+#endif
+
+#endif // EECONFIG_H
diff --git a/src/vm/eeconfigfactory.cpp b/src/vm/eeconfigfactory.cpp
new file mode 100644
index 0000000000..6920ca4134
--- /dev/null
+++ b/src/vm/eeconfigfactory.cpp
@@ -0,0 +1,399 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// EEConfigFactory.cpp
+//
+
+//
+// Factory used to with the XML parser to read configuration files
+//
+
+#include "common.h"
+#include "ngenoptout.h"
+#include "eeconfigfactory.h"
+
+
+#define ISWHITE(ch) ((ch) >= 0x09 && (ch) <= 0x0D || (ch) == 0x20)
+
+#define CONST_STRING_AND_LEN(str) str, NumItems(str)-1
+
+
+int EEXMLStringCompare(const WCHAR *pStr1,
+ DWORD cchStr1,
+ const WCHAR *pStr2,
+ DWORD cchStr2)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (cchStr1 != cchStr2)
+ return -1;
+
+ return wcsncmp(pStr1, pStr2, cchStr1);
+}// EEXMLStringCompare
+
+
+int EEXMLStringComparei(const WCHAR *pStr1,
+ DWORD cchStr1,
+ const WCHAR *pStr2,
+ DWORD cchStr2)
+{
+ WRAPPER_NO_CONTRACT;
+ if (cchStr1 != cchStr2)
+ return -1;
+
+ return SString::_wcsnicmp(pStr1, pStr2, cchStr1);
+}// EEXMLStringCompare
+
+
+
+EEConfigFactory::EEConfigFactory(
+ ConfigStringHashtable* pTable,
+ LPCWSTR pString,
+ ParseCtl parseCtl)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_pTable = pTable;
+ m_pVersion = pString;
+ m_dwDepth = 0;
+ m_fUnderRuntimeElement = FALSE;
+ m_fDeveloperSettings = FALSE;
+ m_fVersionedRuntime= FALSE;
+ m_fOnEnabledAttribute = FALSE;
+ m_fOnValueAttribute = FALSE;
+ m_pCurrentRuntimeElement = m_pBuffer;
+ m_dwCurrentRuntimeElement = 0;
+ m_dwSize = CONFIG_KEY_SIZE;
+ m_parseCtl = parseCtl;
+ m_pActiveFactory = NULL;
+}
+
+EEConfigFactory::~EEConfigFactory()
+{
+ LIMITED_METHOD_CONTRACT;
+ DeleteKey();
+}
+
+HRESULT STDMETHODCALLTYPE EEConfigFactory::NotifyEvent(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ XML_NODEFACTORY_EVENT iEvt)
+{
+ LIMITED_METHOD_CONTRACT;
+ if(iEvt == XMLNF_ENDDOCUMENT) {
+ // <TODO> add error handling.</TODO>
+ }
+ if(m_pActiveFactory != NULL)
+ return m_pActiveFactory->NotifyEvent(pSource, iEvt);
+
+ return S_OK;
+}
+//---------------------------------------------------------------------------
+HRESULT STDMETHODCALLTYPE EEConfigFactory::BeginChildren(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ XML_NODE_INFO __RPC_FAR *pNodeInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_dwDepth++;
+ if(m_pActiveFactory != NULL)
+ return m_pActiveFactory->BeginChildren(pSource, pNodeInfo);
+ return S_OK;
+
+}
+//---------------------------------------------------------------------------
+HRESULT STDMETHODCALLTYPE EEConfigFactory::EndChildren(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ BOOL fEmptyNode,
+ /* [in] */ XML_NODE_INFO __RPC_FAR *pNodeInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+ if ( fEmptyNode ) {
+ m_fDeveloperSettings = FALSE;
+ }
+ else {
+ m_dwDepth--;
+ }
+
+ if (m_pActiveFactory != NULL)
+ {
+ HRESULT hr = S_OK;
+ IfFailRet(m_pActiveFactory->EndChildren(pSource, fEmptyNode, pNodeInfo));
+
+
+ if(m_dwDepth == 2) // when generalizing: use the current active factory depth
+ {
+ m_pActiveFactory = NULL;
+ }
+
+ }
+
+ if (m_fUnderRuntimeElement && wcscmp(pNodeInfo->pwcText, W("runtime")) == 0) {
+ m_fUnderRuntimeElement = FALSE;
+ m_fVersionedRuntime = FALSE;
+ ClearKey();
+ // CLR_STARTUP_OPT:
+ // Early out if we only need to read <runtime> section.
+ //
+ if (m_parseCtl == stopAfterRuntimeSection)
+ pSource->Abort(NULL/*unused*/);
+ }
+
+ return S_OK;
+}
+//---------------------------------------------------------------------------
+HRESULT STDMETHODCALLTYPE EEConfigFactory::CreateNode(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ PVOID pNode,
+ /* [in] */ USHORT cNumRecs,
+ /* [in] */ XML_NODE_INFO* __RPC_FAR * __RPC_FAR apNodeInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END;
+
+ if(m_pActiveFactory != NULL)
+ return m_pActiveFactory->CreateNode(pSource, pNode, cNumRecs, apNodeInfo);
+
+ if(m_dwDepth > 3)
+ {
+
+ return S_OK;
+ }
+
+ HRESULT hr = S_OK;
+ DWORD dwStringSize = 0;
+ WCHAR* pszString = NULL;
+ DWORD i;
+ BOOL fRuntimeKey = FALSE;
+ BOOL fVersion = FALSE;
+
+ for( i = 0; i < cNumRecs; i++) {
+ CONTRACT_VIOLATION(ThrowsViolation); // Lots of stuff in here throws!
+
+ if(apNodeInfo[i]->dwType == XML_ELEMENT ||
+ apNodeInfo[i]->dwType == XML_ATTRIBUTE ||
+ apNodeInfo[i]->dwType == XML_PCDATA) {
+
+ dwStringSize = apNodeInfo[i]->ulLen;
+ pszString = (WCHAR*) apNodeInfo[i]->pwcText;
+ // Trim the value
+
+ // we should never decrement lgth if it's 0, because it's unsigned
+
+ for(;*pszString && ISWHITE(*pszString) && dwStringSize>0; pszString++, dwStringSize--);
+ while( dwStringSize > 0 && ISWHITE(pszString[dwStringSize-1]))
+ dwStringSize--;
+
+ // NOTE: pszString is not guaranteed to be null terminated. Use EEXMLStringCompare to do
+ // string comparisions on it
+
+ switch(apNodeInfo[i]->dwType) {
+ case XML_ELEMENT :
+ fRuntimeKey = FALSE;
+ ClearKey();
+
+ if (m_dwDepth == 1 && EEXMLStringCompare(pszString, dwStringSize, CONST_STRING_AND_LEN(W("runtime"))) == 0) {
+ m_fUnderRuntimeElement = TRUE;
+ fRuntimeKey = TRUE;
+ }
+
+ if(m_dwDepth == 2 && m_fUnderRuntimeElement) {
+
+ // Developer settings can look like
+ // <runtime>
+ // <developerSettings installationVersion="v2.0.40223.0" />
+ //
+ // or
+ //
+ // <developmentMode developerInstallation="true" />
+ //
+ // Neither one is your standard config setting.
+ if (!EEXMLStringCompare(pszString, dwStringSize, CONST_STRING_AND_LEN(W("developerSettings"))) ||
+ !EEXMLStringCompare(pszString, dwStringSize, CONST_STRING_AND_LEN(W("developmentMode"))))
+ {
+ m_fDeveloperSettings = TRUE;
+ }
+ else
+ // when generalizing: use map of (string, depth) -> class
+ if (!EEXMLStringCompare(pszString, dwStringSize, CONST_STRING_AND_LEN(W("disableNativeImageLoad"))))
+ {
+ m_pActiveFactory = new NativeImageOptOutConfigFactory();
+ m_pActiveFactory->AddRef();
+ }
+ else
+ {
+ // This is a standard element under the runtime node.... it could look like this
+ // <runtime>
+ // <pszString enabled="1" />
+
+ hr = CopyToKey(pszString, dwStringSize);
+ if(FAILED(hr)) return hr;
+ }
+ }
+ // If our depth isn't 2, and we're not under the runtime element....
+ else
+ ClearKey();
+
+ break ;
+
+ case XML_ATTRIBUTE :
+ if(fRuntimeKey && EEXMLStringCompare(pszString, dwStringSize, CONST_STRING_AND_LEN(W("version"))) == 0) {
+ fVersion = TRUE;
+ }
+ else
+ {
+ if (m_dwDepth == 2 && m_fUnderRuntimeElement)
+ {
+ if (!m_fDeveloperSettings)
+ {
+ _ASSERTE(m_dwCurrentRuntimeElement > 0);
+
+ // The standard model for runtime config settings is as follows
+ //
+ // <runtime>
+ // <m_pCurrentRuntimeElement enabled="true|false" />
+ // or
+ // <m_pCurrentRuntimeElement enabled="1|0" />
+ // or
+ // <m_pCurrentRuntimeElement value="string" />
+
+ m_fOnEnabledAttribute = (EEXMLStringComparei(pszString, dwStringSize, CONST_STRING_AND_LEN(W("enabled"))) == 0);
+ m_fOnValueAttribute = (EEXMLStringComparei(pszString, dwStringSize, CONST_STRING_AND_LEN(W("value"))) == 0);
+ }
+ else // We're looking at developer settings
+ {
+ // Developer settings look like
+ // <developerSettings installationVersion="v2.0.40223.0" />
+ //
+ // or
+ //
+ // <developmentMode developerInstallation="true" />
+ //
+
+ // The key name will actually be the attribute name
+
+ hr = CopyToKey(pszString, dwStringSize);
+ if(FAILED(hr)) return hr;
+ m_fOnEnabledAttribute = FALSE;
+ m_fOnValueAttribute = FALSE;
+ }
+ }
+ }
+ break;
+ case XML_PCDATA:
+ if(fVersion) {
+ // if this is not the right version
+ // then we are not interested
+ if(EEXMLStringCompare(pszString, dwStringSize, m_pVersion, (DWORD)wcslen(m_pVersion))) {
+ m_fUnderRuntimeElement = FALSE;
+ }
+ else {
+ // if it is the right version then overwrite
+ // all entries that exist in the hash table
+ m_fVersionedRuntime = TRUE;
+ }
+
+ fVersion = FALSE;
+ }
+ else if(fRuntimeKey) {
+ break; // Ignore all other attributes on <runtime>
+ }
+
+ // m_dwCurrentRuntimeElement is set when we called CopyToKey in the XML_ELEMENT case
+ // section above.
+ else if(m_dwCurrentRuntimeElement > 0 && (m_fDeveloperSettings || m_fOnEnabledAttribute || m_fOnValueAttribute)) {
+
+ // This means that, either we are working on attribute values for the developer settings,
+ // or we've got what "enabled" is equal to, or we're reading a string for a value setting.
+ //
+ // <runtime>
+ // <m_pwzCurrentElementUnderRuntimeElement m_pLastKey=pString />
+
+ if (m_fOnEnabledAttribute) {
+ // For the enabled settings, let's convert all trues to 1s and the falses to 0s
+ if (EEXMLStringComparei(pszString, dwStringSize, CONST_STRING_AND_LEN(W("false"))) == 0) {
+ pszString = W("0");
+ dwStringSize = 1;
+ }
+ else if (EEXMLStringComparei(pszString, dwStringSize, CONST_STRING_AND_LEN(W("true"))) == 0) {
+ pszString = W("1");
+ dwStringSize = 1;
+ }
+
+ // <TODO> Right now, if pString isn't 0 or 1, then the XML schema is bad.
+ // If we were to ever do schema validation, this would be a place to put it.
+ // </TODO>
+ }
+
+ hr = AddKeyValuePair(pszString, dwStringSize, m_pCurrentRuntimeElement, m_dwCurrentRuntimeElement);
+ if(FAILED(hr)) { return hr; }
+ }
+
+ break ;
+ default:
+ ;
+ } // end of switch
+ }
+ }
+ return hr;
+}
+
+HRESULT STDMETHODCALLTYPE EEConfigFactory::AddKeyValuePair(
+ __in_ecount(dwStringSize) WCHAR * pszString,
+ /* [in] */ DWORD dwStringSize,
+ __in_ecount(m_dwCurrentRuntimeElement) WCHAR * m_pCurrentRuntimeElement,
+ /* [in] */ DWORD m_dwCurrentRuntimeElement
+ )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // verify we the size fields don't overflow
+ if (dwStringSize + 1 < dwStringSize) { return E_FAIL; }
+ if (m_dwCurrentRuntimeElement < m_dwCurrentRuntimeElement - 1) { return E_FAIL; }
+
+ EX_TRY
+ {
+ // Allocate memory that can store this setting
+ NewArrayHolder<WCHAR> pStringToKeep(new WCHAR[dwStringSize+1]);
+ wcsncpy_s(pStringToKeep, dwStringSize + 1, pszString, dwStringSize);
+
+ // See if we've already picked up a value for this setting
+ ConfigStringKeyValuePair * pair = m_pTable->Lookup(m_pCurrentRuntimeElement);
+ if(pair != NULL) {
+ // If this is a config section for this runtime version, then it's allowed to overwrite
+ // previous settings that we've picked up
+ if(m_fVersionedRuntime) {
+ delete[] pair->value;
+ pair->value = pStringToKeep;
+ pStringToKeep.SuppressRelease();
+ }
+ }
+ else {
+ // We're adding a new config item
+ NewArrayHolder<WCHAR> pKeyToKeep (new WCHAR[m_dwCurrentRuntimeElement]);
+ wcsncpy_s(pKeyToKeep, m_dwCurrentRuntimeElement, m_pCurrentRuntimeElement, m_dwCurrentRuntimeElement - 1);
+
+ ConfigStringKeyValuePair * newPair = new ConfigStringKeyValuePair();
+ newPair->key = pKeyToKeep;
+ newPair->value = pStringToKeep;
+ m_pTable->Add(newPair);
+ pKeyToKeep.SuppressRelease();
+ pStringToKeep.SuppressRelease();
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+}
+
diff --git a/src/vm/eeconfigfactory.h b/src/vm/eeconfigfactory.h
new file mode 100644
index 0000000000..f98c5b0e96
--- /dev/null
+++ b/src/vm/eeconfigfactory.h
@@ -0,0 +1,150 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// EEConfigFactory.h
+//
+
+//
+// Parses XML files and adding runtime entries to the EEConfig list
+//
+
+
+#ifndef EECONFIGFACTORY_H
+#define EECONFIGFACTORY_H
+
+#include <xmlparser.h>
+#include <objbase.h>
+#include "unknwn.h"
+#include "../xmlparser/_reference.h"
+#include "../xmlparser/_unknown.h"
+#include "eehash.h"
+#include "eeconfig.h"
+
+#define CONFIG_KEY_SIZE 128
+
+class EEConfigFactory : public _unknown<IXMLNodeFactory, &IID_IXMLNodeFactory>
+{
+
+public:
+ EEConfigFactory(
+ ConfigStringHashtable* pTable,
+ LPCWSTR,
+ ParseCtl parseCtl = parseAll);
+ ~EEConfigFactory();
+ HRESULT STDMETHODCALLTYPE NotifyEvent(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ XML_NODEFACTORY_EVENT iEvt);
+
+ HRESULT STDMETHODCALLTYPE BeginChildren(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ XML_NODE_INFO* __RPC_FAR pNodeInfo);
+
+ HRESULT STDMETHODCALLTYPE EndChildren(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ BOOL fEmptyNode,
+ /* [in] */ XML_NODE_INFO* __RPC_FAR pNodeInfo);
+
+ HRESULT STDMETHODCALLTYPE Error(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ HRESULT hrErrorCode,
+ /* [in] */ USHORT cNumRecs,
+ /* [in] */ XML_NODE_INFO* __RPC_FAR * __RPC_FAR apNodeInfo)
+ {
+ LIMITED_METHOD_CONTRACT;
+ /*
+ UNUSED(pSource);
+ UNUSED(hrErrorCode);
+ UNUSED(cNumRecs);
+ UNUSED(apNodeInfo);
+ */
+ return hrErrorCode;
+ }
+
+ HRESULT STDMETHODCALLTYPE CreateNode(
+ /* [in] */ IXMLNodeSource __RPC_FAR *pSource,
+ /* [in] */ PVOID pNodeParent,
+ /* [in] */ USHORT cNumRecs,
+ /* [in] */ XML_NODE_INFO* __RPC_FAR * __RPC_FAR apNodeInfo);
+
+private:
+
+ HRESULT GrowKey(DWORD dwSize)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ INJECT_FAULT(return E_OUTOFMEMORY);
+ }
+ CONTRACTL_END;
+
+ if(dwSize > m_dwSize) {
+ DeleteKey();
+ m_pCurrentRuntimeElement = new(nothrow) WCHAR[dwSize];
+ if(m_pCurrentRuntimeElement == NULL) return E_OUTOFMEMORY;
+ m_dwSize = dwSize;
+ }
+ return S_OK;
+ }
+
+ void ClearKey()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ *m_pCurrentRuntimeElement = 0;
+ m_dwCurrentRuntimeElement = 0;
+ }
+
+ void DeleteKey()
+ {
+ WRAPPER_NO_CONTRACT;
+ if(m_pCurrentRuntimeElement != NULL && m_pCurrentRuntimeElement != m_pBuffer)
+ delete [] m_pCurrentRuntimeElement;
+ m_dwSize = 0;
+ m_dwCurrentRuntimeElement = 0;
+ }
+
+ HRESULT CopyToKey(__in_z LPCWSTR pString, DWORD dwString)
+ {
+ WRAPPER_NO_CONTRACT;
+ dwString++; // add in the null
+ HRESULT hr = GrowKey(dwString);
+ if(FAILED(hr)) return hr;
+ wcsncpy_s(m_pCurrentRuntimeElement, m_dwSize, pString, dwString-1);
+
+ m_dwCurrentRuntimeElement = dwString;
+ return S_OK;
+ }
+
+ HRESULT STDMETHODCALLTYPE AddKeyValuePair(
+ __in_ecount(dwStringSize) WCHAR * pszString,
+ /* [in] */ DWORD dwStringSize,
+ __in_ecount(m_dwCurrentRuntimeElement) WCHAR * m_pCurrentRuntimeElement,
+ /* [in] */ DWORD m_dwCurrentRuntimeElement);
+
+ HRESULT CopyVersion(LPCWSTR version, DWORD dwVersion);
+
+ ConfigStringHashtable* m_pTable;
+ BOOL m_fUnderRuntimeElement;
+ BOOL m_fOnEnabledAttribute;
+ BOOL m_fOnValueAttribute;
+ BOOL m_fVersionedRuntime;
+ BOOL m_fDeveloperSettings;
+
+ LPCWSTR m_pVersion;
+ LPWSTR m_pCurrentRuntimeElement;
+ DWORD m_dwCurrentRuntimeElement;
+
+ WCHAR m_pBuffer[CONFIG_KEY_SIZE];
+ DWORD m_dwSize;
+
+ DWORD m_dwDepth;
+
+ bool m_bSafeMode; // If true, will ignore any settings that may compromise security
+ ParseCtl m_parseCtl; // usually parseAll, sometimes stopAfterRuntimeSection
+
+ ReleaseHolder<IXMLNodeFactory> m_pActiveFactory; // hold a factory responsible for parsing subnode
+};
+
+#endif
diff --git a/src/vm/eecontract.cpp b/src/vm/eecontract.cpp
new file mode 100644
index 0000000000..557ba20c88
--- /dev/null
+++ b/src/vm/eecontract.cpp
@@ -0,0 +1,273 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+// ---------------------------------------------------------------------------
+// EEContract.cpp
+//
+
+// ! I am the owner for issues in the contract *infrastructure*, not for every
+// ! CONTRACT_VIOLATION dialog that comes up. If you interrupt my work for a routine
+// ! CONTRACT_VIOLATION, you will become the new owner of this file.
+// ---------------------------------------------------------------------------
+
+
+#include "common.h"
+#include "dbginterface.h"
+
+
+#ifdef ENABLE_CONTRACTS
+
+void EEContract::Disable()
+{
+ BaseContract::Disable();
+}
+
+void EEContract::DoChecks(UINT testmask, __in_z const char *szFunction, __in_z char *szFile, int lineNum)
+{
+ SCAN_IGNORE_THROW; // Tell the static contract analyzer to ignore contract violations
+ SCAN_IGNORE_FAULT; // due to the contract checking logic itself.
+ SCAN_IGNORE_TRIGGER;
+ SCAN_IGNORE_LOCK;
+ SCAN_IGNORE_SO;
+
+ // Many of the checks below result in calls to GetThread()
+ // that work just fine if GetThread() returns NULL, so temporarily
+ // allow such calls.
+ BEGIN_GETTHREAD_ALLOWED_IN_NO_THROW_REGION;
+ m_pThread = GetThread();
+ if (m_pThread != NULL)
+ {
+ m_pClrDebugState = m_pThread->GetClrDebugState();
+ }
+
+ // Call our base DoChecks.
+ BaseContract::DoChecks(testmask, szFunction, szFile, lineNum);
+
+ m_testmask = testmask;
+ m_contractStackRecord.m_testmask = testmask;
+
+ // GC mode check
+ switch (testmask & MODE_Mask)
+ {
+ case MODE_Coop:
+ if (m_pThread == NULL || !m_pThread->PreemptiveGCDisabled())
+ {
+ //
+ // Check if this is the debugger helper thread and has the runtime
+ // stoppped. If both of these things are true, then we do not care
+ // whether we are in COOP mode or not.
+ //
+ if ((g_pDebugInterface != NULL) &&
+ g_pDebugInterface->ThisIsHelperThread() &&
+ g_pDebugInterface->IsStopped())
+ {
+ break;
+ }
+
+ // Pretend that the threads doing GC are in cooperative mode so that code with
+ // MODE_COOPERATIVE contract works fine on them.
+ if (IsGCThread())
+ {
+ break;
+ }
+
+ if (!( (ModeViolation|BadDebugState) & m_pClrDebugState->ViolationMask()))
+ {
+ if (m_pThread == NULL)
+ {
+ CONTRACT_ASSERT("You must have called SetupThread in order to be in GC Cooperative mode.",
+ Contract::MODE_Preempt,
+ Contract::MODE_Mask,
+ m_contractStackRecord.m_szFunction,
+ m_contractStackRecord.m_szFile,
+ m_contractStackRecord.m_lineNum
+ );
+ }
+ else
+ {
+ CONTRACT_ASSERT("MODE_COOPERATIVE encountered while thread is in preemptive state.",
+ Contract::MODE_Preempt,
+ Contract::MODE_Mask,
+ m_contractStackRecord.m_szFunction,
+ m_contractStackRecord.m_szFile,
+ m_contractStackRecord.m_lineNum
+ );
+ }
+ }
+ }
+ break;
+
+ case MODE_Preempt:
+ // Unmanaged threads are considered permanently preemptive so a NULL thread amounts to a passing case here.
+ if (m_pThread != NULL && m_pThread->PreemptiveGCDisabled())
+ {
+ if (!( (ModeViolation|BadDebugState) & m_pClrDebugState->ViolationMask()))
+ {
+ CONTRACT_ASSERT("MODE_PREEMPTIVE encountered while thread is in cooperative state.",
+ Contract::MODE_Coop,
+ Contract::MODE_Mask,
+ m_contractStackRecord.m_szFunction,
+ m_contractStackRecord.m_szFile,
+ m_contractStackRecord.m_lineNum
+ );
+ }
+ }
+ break;
+
+ case MODE_Disabled:
+ // Nothing
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+
+ // GC Trigger check
+ switch (testmask & GC_Mask)
+ {
+ case GC_Triggers:
+ // We don't want to do a full TRIGGERSGC here as this could corrupt
+ // OBJECTREF-typed arguments to the function.
+ {
+ if (m_pClrDebugState->GetGCNoTriggerCount())
+ {
+ if (!( (GCViolation|BadDebugState) & m_pClrDebugState->ViolationMask()))
+ {
+ CONTRACT_ASSERT("GC_TRIGGERS encountered in a GC_NOTRIGGER scope",
+ Contract::GC_NoTrigger,
+ Contract::GC_Mask,
+ m_contractStackRecord.m_szFunction,
+ m_contractStackRecord.m_szFile,
+ m_contractStackRecord.m_lineNum
+ );
+ }
+ }
+ }
+ break;
+
+ case GC_NoTrigger:
+ m_pClrDebugState->ViolationMaskReset( GCViolation );
+
+ // Inlined BeginNoTriggerGC
+ m_pClrDebugState->IncrementGCNoTriggerCount();
+ if (m_pThread && m_pThread->m_fPreemptiveGCDisabled)
+ {
+ m_pClrDebugState->IncrementGCForbidCount();
+ }
+
+ break;
+
+ case GC_Disabled:
+ // Nothing
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+
+ // Host Triggers check
+ switch (testmask & HOST_Mask)
+ {
+ case HOST_Calls:
+ {
+ if (!m_pClrDebugState->IsHostCaller())
+ {
+ if (!( (HostViolation|BadDebugState) & m_pClrDebugState->ViolationMask()))
+ {
+ // Avoid infinite recursion by temporarily allowing HOST_CALLS
+ // violations so that we don't get contract asserts in anything
+ // called downstream of CONTRACT_ASSERT. If we unwind out of
+ // here, our dtor will reset our state to what it was on entry.
+ CONTRACT_VIOLATION(HostViolation);
+ CONTRACT_ASSERT("HOST_CALLS encountered in a HOST_NOCALLS scope",
+ Contract::HOST_NoCalls,
+ Contract::HOST_Mask,
+ m_contractStackRecord.m_szFunction,
+ m_contractStackRecord.m_szFile,
+ m_contractStackRecord.m_lineNum
+ );
+ }
+ }
+ }
+ break;
+
+ case HOST_NoCalls:
+ // m_pClrDebugState->ViolationMaskReset( HostViolation );
+ m_pClrDebugState->ResetHostCaller();
+ break;
+
+ case HOST_Disabled:
+ // Nothing
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ END_GETTHREAD_ALLOWED_IN_NO_THROW_REGION;
+
+ // EE Thread-required check
+ // NOTE: The following must NOT be inside BEGIN/END_GETTHREAD_ALLOWED,
+ // as the change to m_pClrDebugState->m_allowGetThread below would be
+ // overwritten by END_GETTHREAD_ALLOWED.
+ switch (testmask & EE_THREAD_Mask)
+ {
+ case EE_THREAD_Required:
+ if (!((EEThreadViolation|BadDebugState) & m_pClrDebugState->ViolationMask()))
+ {
+ if (m_pThread == NULL)
+ {
+ CONTRACT_ASSERT("EE_THREAD_REQUIRED encountered with no current EE Thread object in TLS.",
+ Contract::EE_THREAD_Required,
+ Contract::EE_THREAD_Mask,
+ m_contractStackRecord.m_szFunction,
+ m_contractStackRecord.m_szFile,
+ m_contractStackRecord.m_lineNum
+ );
+ }
+ else if (!m_pClrDebugState->IsGetThreadAllowed())
+ {
+ // In general, it's unsafe for an EE_THREAD_NOT_REQUIRED function to
+ // call an EE_THREAD_REQUIRED function. In cases where it is safe,
+ // you may wrap the call to the EE_THREAD_REQUIRED function inside a
+ // BEGIN/END_GETTHREAD_ALLOWED block, but you may only do so if the
+ // case where GetThread() == NULL is clearly handled in a way that
+ // prevents entry into the BEGIN/END_GETTHREAD_ALLOWED block.
+ CONTRACT_ASSERT("EE_THREAD_REQUIRED encountered in an EE_THREAD_NOT_REQUIRED scope, without an intervening BEGIN/END_GETTHREAD_ALLOWED block.",
+ Contract::EE_THREAD_Required,
+ Contract::EE_THREAD_Mask,
+ m_contractStackRecord.m_szFunction,
+ m_contractStackRecord.m_szFile,
+ m_contractStackRecord.m_lineNum
+ );
+ }
+ }
+ m_pClrDebugState->SetGetThreadAllowed();
+ break;
+
+ case EE_THREAD_Not_Required:
+ m_pClrDebugState->ResetGetThreadAllowed();
+ break;
+
+ case EE_THREAD_Disabled:
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+}
+#endif // ENABLE_CONTRACTS
+
+
+BYTE* __stdcall GetAddrOfContractShutoffFlag()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Exposed entrypoint where we cannot probe or do anything TLS
+ // related
+ static BYTE gContractShutoffFlag = 0;
+
+ return &gContractShutoffFlag;
+}
+
diff --git a/src/vm/eecontract.h b/src/vm/eecontract.h
new file mode 100644
index 0000000000..37bfee728a
--- /dev/null
+++ b/src/vm/eecontract.h
@@ -0,0 +1,116 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+// ---------------------------------------------------------------------------
+// EEContract.h
+//
+
+// ! I am the owner for issues in the contract *infrastructure*, not for every
+// ! CONTRACT_VIOLATION dialog that comes up. If you interrupt my work for a routine
+// ! CONTRACT_VIOLATION, you will become the new owner of this file.
+// ---------------------------------------------------------------------------
+
+
+#ifndef EECONTRACT_H_
+#define EECONTRACT_H_
+
+#include "contract.h"
+#include "stackprobe.h"
+
+// --------------------------------------------------------------------------------
+// EECONTRACT is an extension of the lower level CONTRACT macros to include some
+// EE specific stuff like GC mode checking. See check.h for more info on CONTRACT.
+// --------------------------------------------------------------------------------
+
+#undef GC_TRIGGERS
+#undef GC_NOTRIGGER
+
+#ifdef ENABLE_CONTRACTS_IMPL
+
+class EEContract : public BaseContract
+{
+ private:
+ Thread *m_pThread; // Current thread pointer
+ // Have to override this function in any derived class to indicate that a valid destructor is defined for this class
+ virtual void DestructorDefinedThatCallsRestore(){}
+
+ public:
+ __declspec(nothrow) ~EEContract()
+ {
+ Restore();
+ }
+
+ void Disable();
+ void DoChecks(UINT testmask, __in_z const char *szFunction, __in_z char *szFile, int lineNum);
+};
+
+
+
+#define MODE_COOPERATIVE do { STATIC_CONTRACT_MODE_COOPERATIVE; REQUEST_TEST(Contract::MODE_Coop, Contract::MODE_Disabled); } while(0)
+#define MODE_PREEMPTIVE do { STATIC_CONTRACT_MODE_PREEMPTIVE; REQUEST_TEST(Contract::MODE_Preempt, Contract::MODE_Disabled); } while(0)
+#define MODE_ANY do { STATIC_CONTRACT_MODE_ANY; REQUEST_TEST(Contract::MODE_Disabled, Contract::MODE_Disabled); } while(0)
+
+#define GC_TRIGGERS do { STATIC_CONTRACT_GC_TRIGGERS; REQUEST_TEST(Contract::GC_Triggers, Contract::GC_Disabled); } while(0)
+#define GC_NOTRIGGER do { STATIC_CONTRACT_GC_NOTRIGGER; REQUEST_TEST(Contract::GC_NoTrigger, Contract::GC_Disabled); } while(0)
+
+// Notice there's no static contract component to this. It's
+// perfectly reasonable to find EE_THREAD_REQUIRED inside the scope of
+// EE_THREAD_NOT_REQUIRED (e.g., an EE_THREAD_NOT_REQUIRED scope can have two
+// possible code paths--one with an EE Thread and one without). So we can't do
+// any meaningful testing statically. It's all gotta be done at runtime.
+#define EE_THREAD_NOT_REQUIRED \
+ do { REQUEST_TEST(Contract::EE_THREAD_Not_Required, Contract::EE_THREAD_Disabled); } while(0)
+
+#define EE_THREAD_REQUIRED do { REQUEST_TEST(Contract::EE_THREAD_Required, Contract::EE_THREAD_Disabled); } while(0)
+
+#define HOST_NOCALLS do { STATIC_CONTRACT_HOST_NOCALLS; REQUEST_TEST(Contract::HOST_NoCalls, Contract::HOST_Disabled); } while(0)
+#define HOST_CALLS do { STATIC_CONTRACT_HOST_CALLS; REQUEST_TEST(Contract::HOST_Calls, Contract::HOST_Disabled); } while(0)
+
+#else // ENABLE_CONTRACTS_IMPL
+
+#define MODE_COOPERATIVE
+#define MODE_PREEMPTIVE
+#define MODE_ANY
+#define GC_TRIGGERS
+#define GC_NOTRIGGER
+#define HOST_NOCALLS
+#define HOST_CALLS
+#define EE_THREAD_NOT_REQUIRED
+#define EE_THREAD_REQUIRED
+
+
+#endif // ENABLE_CONTRACTS_IMPL
+
+// Replace the CONTRACT macro with the EE version
+#undef CONTRACT
+#define CONTRACT(_returntype) CUSTOM_CONTRACT(EEContract, _returntype)
+
+#undef CONTRACT_VOID
+#define CONTRACT_VOID CUSTOM_CONTRACT_VOID(EEContract)
+
+#undef CONTRACTL
+#define CONTRACTL CUSTOM_CONTRACTL(EEContract)
+
+#undef LIMITED_METHOD_CONTRACT
+#define LIMITED_METHOD_CONTRACT CUSTOM_LIMITED_METHOD_CONTRACT(EEContract)
+
+#undef WRAPPER_NO_CONTRACT
+#define WRAPPER_NO_CONTRACT CUSTOM_WRAPPER_NO_CONTRACT(EEContract)
+
+//
+// The default contract is the recommended contract for ordinary EE code.
+// The ordinary EE code can throw or trigger GC any time, does not operate
+// on raw object refs, etc.
+//
+
+#undef STANDARD_VM_CHECK
+#define STANDARD_VM_CHECK \
+ THROWS; \
+ GC_TRIGGERS; \
+ MODE_PREEMPTIVE; \
+ SO_INTOLERANT; \
+ INJECT_FAULT(COMPlusThrowOM();); \
+
+#endif // EECONTRACT_H_
diff --git a/src/vm/eedbginterface.h b/src/vm/eedbginterface.h
new file mode 100644
index 0000000000..a39b7db6fc
--- /dev/null
+++ b/src/vm/eedbginterface.h
@@ -0,0 +1,380 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// COM+99 EE to Debugger Interface Header
+//
+
+
+
+#ifndef _eedbginterface_h_
+#define _eedbginterface_h_
+
+#include "common.h"
+#include "corpriv.h"
+#include "hash.h"
+#include "class.h"
+#include "excep.h"
+#include "threads.h"
+#include "field.h"
+#include "stackwalk.h"
+
+#ifdef EnC_SUPPORTED
+#include "encee.h"
+#endif
+
+#include "cordebug.h"
+#include "../debug/inc/common.h"
+
+class MethodDesc;
+class Frame;
+//
+// The purpose of this object is to provide EE funcationality back to
+// the debugger. This represents the entire set of EE functions used
+// by the debugger.
+//
+// We will make this interface smaller over time to minimize the link
+// between the EE and the Debugger.
+//
+//
+typedef BOOL (*HashMapEnumCallback)(HashMap* h,
+ void* pData,
+ ULONG value);
+
+typedef enum AttachAppDomainEventsEnum
+{
+ ONLY_SEND_APP_DOMAIN_CREATE_EVENTS,
+ DONT_SEND_CLASS_EVENTS,
+ ONLY_SEND_CLASS_EVENTS
+} AttachAppDomainEventsEnum;
+
+typedef VPTR(class EEDebugInterface) PTR_EEDebugInterface;
+
+// Used for communicating EH Handler info between the LS and EE (DetermineIfOffsetsInFilterOrHandler)
+struct DebugOffsetToHandlerInfo
+{
+ // Native offset of interest, or -1 if this entry should be ignored
+ SIZE_T offset;
+
+ // Set to true by the EE if the specified native offset is in an EH filter or handler.
+ BOOL isInFilterOrHandler;
+};
+
+class EEDebugInterface
+{
+ VPTR_BASE_VTABLE_CLASS(EEDebugInterface);
+
+public:
+
+ //
+ // Functions exported from the EE to the debugger.
+ //
+
+ virtual Thread* GetThread(void) = 0;
+
+#ifndef DACCESS_COMPILE
+
+ virtual void SetEEThreadPtr(VOID* newPtr) = 0;
+
+ virtual StackWalkAction StackWalkFramesEx(Thread* pThread,
+ PREGDISPLAY pRD,
+ PSTACKWALKFRAMESCALLBACK pCallback,
+ VOID* pData,
+ unsigned int flags) = 0;
+
+ virtual Frame *GetFrame(CrawlFrame*) = 0;
+
+ virtual bool InitRegDisplay(Thread* pThread,
+ const PREGDISPLAY pRD,
+ const PT_CONTEXT pctx,
+ bool validContext) = 0;
+
+ virtual BOOL IsStringObject(Object* o) = 0;
+
+ virtual BOOL IsTypedReference(MethodTable* pMT) = 0;
+
+ virtual WCHAR* StringObjectGetBuffer(StringObject* so) = 0;
+
+ virtual DWORD StringObjectGetStringLength(StringObject* so) = 0;
+
+ virtual void *GetObjectFromHandle(OBJECTHANDLE handle) = 0;
+
+ virtual OBJECTHANDLE GetHandleFromObject(void *obj,
+ bool fStrongNewRef,
+ AppDomain *pAppDomain) = 0;
+
+ virtual void DbgDestroyHandle( OBJECTHANDLE oh, bool fStrongNewRef ) = 0;
+
+ virtual OBJECTHANDLE GetThreadException(Thread *pThread) = 0;
+
+ virtual bool IsThreadExceptionNull(Thread *pThread) = 0;
+
+ virtual void ClearThreadException(Thread *pThread) = 0;
+
+ virtual bool StartSuspendForDebug(AppDomain *pAppDomain,
+ BOOL fHoldingThreadStoreLock = FALSE) = 0;
+
+ virtual void ResumeFromDebug(AppDomain *pAppDomain)= 0;
+
+ virtual void MarkThreadForDebugSuspend(Thread* pRuntimeThread) = 0;
+
+ virtual void MarkThreadForDebugStepping(Thread* pRuntimeThread,
+ bool onOff) = 0;
+
+ virtual void SetThreadFilterContext(Thread *thread,
+ T_CONTEXT *context) = 0;
+
+ virtual T_CONTEXT *GetThreadFilterContext(Thread *thread) = 0;
+
+ virtual VOID *GetThreadDebuggerWord(Thread *thread) = 0;
+
+ virtual void SetThreadDebuggerWord(Thread *thread,
+ VOID *dw) = 0;
+
+ virtual BOOL IsManagedNativeCode(const BYTE *address) = 0;
+
+#endif // #ifndef DACCESS_COMPILE
+
+ virtual MethodDesc *GetNativeCodeMethodDesc(const PCODE address) = 0;
+
+#ifndef DACCESS_COMPILE
+
+ virtual BOOL IsInPrologOrEpilog(const BYTE *address,
+ size_t* prologSize) = 0;
+
+ // Determine whether certain native offsets of the specified function are within
+ // an exception filter or handler.
+ virtual void DetermineIfOffsetsInFilterOrHandler(const BYTE *functionAddress,
+ DebugOffsetToHandlerInfo *pOffsetToHandlerInfo,
+ unsigned offsetToHandlerInfoLength) = 0;
+
+#endif // #ifndef DACCESS_COMPILE
+
+ virtual void GetMethodRegionInfo(const PCODE pStart,
+ PCODE * pCold,
+ size_t * hotSize,
+ size_t * coldSize) = 0;
+
+#if defined(WIN64EXCEPTIONS)
+ virtual DWORD GetFuncletStartOffsets(const BYTE *pStart, DWORD* pStartOffsets, DWORD dwLength) = 0;
+ virtual StackFrame FindParentStackFrame(CrawlFrame* pCF) = 0;
+#endif // WIN64EXCEPTIONS
+
+ virtual size_t GetFunctionSize(MethodDesc *pFD) = 0;
+
+ virtual const PCODE GetFunctionAddress(MethodDesc *pFD) = 0;
+
+#ifndef DACCESS_COMPILE
+
+#ifdef EnC_SUPPORTED
+
+ // Apply an EnC edit
+ virtual HRESULT EnCApplyChanges(EditAndContinueModule *pModule,
+ DWORD cbMetadata,
+ BYTE *pMetadata,
+ DWORD cbIL,
+ BYTE *pIL) = 0;
+
+ // Perform an EnC remap to resume execution in the new version of a method (doesn't return)
+ virtual void ResumeInUpdatedFunction(EditAndContinueModule *pModule,
+ MethodDesc *pFD,
+ void *debuggerFuncHandle,
+ SIZE_T resumeIP,
+ CONTEXT *pContext) = 0;
+#endif //EnC_SUPPORTED
+
+ //
+ // New methods to support the new debugger.
+ //
+
+ virtual MethodDesc *FindLoadedMethodRefOrDef(Module* pModule,
+ mdMemberRef memberRef) = 0;
+
+ virtual MethodDesc *LoadMethodDef(Module* pModule,
+ mdMethodDef methodDef,
+ DWORD numGenericArgs = 0,
+ TypeHandle *pGenericArgs = NULL,
+ TypeHandle *pOwnerType = NULL) = 0;
+
+ // These will lookup a type, and if it's not loaded, return the null TypeHandle
+ virtual TypeHandle FindLoadedClass(Module *pModule,
+ mdTypeDef classToken) = 0;
+
+ virtual TypeHandle FindLoadedElementType(CorElementType et) = 0;
+
+ virtual TypeHandle FindLoadedInstantiation(Module *pModule,
+ mdTypeDef typeDef,
+ DWORD ntypars,
+ TypeHandle *inst) = 0;
+
+ virtual TypeHandle FindLoadedFnptrType(TypeHandle *inst,
+ DWORD ntypars) = 0;
+
+ virtual TypeHandle FindLoadedPointerOrByrefType(CorElementType et,
+ TypeHandle elemtype) = 0;
+
+ virtual TypeHandle FindLoadedArrayType(CorElementType et,
+ TypeHandle elemtype,
+ unsigned rank) = 0;
+
+ // These will lookup a type, and if it's not loaded, will load and run
+ // the class init etc.
+ virtual TypeHandle LoadClass(Module *pModule,
+ mdTypeDef classToken) = 0;
+
+ virtual TypeHandle LoadElementType(CorElementType et) = 0;
+
+ virtual TypeHandle LoadInstantiation(Module *pModule,
+ mdTypeDef typeDef,
+ DWORD ntypars,
+ TypeHandle *inst) = 0;
+
+ virtual TypeHandle LoadFnptrType(TypeHandle *inst,
+ DWORD ntypars) = 0;
+
+ virtual TypeHandle LoadPointerOrByrefType(CorElementType et,
+ TypeHandle elemtype) = 0;
+
+ virtual TypeHandle LoadArrayType(CorElementType et,
+ TypeHandle elemtype,
+ unsigned rank) = 0;
+
+ __checkReturn
+ virtual HRESULT GetMethodImplProps(Module *pModule,
+ mdToken tk,
+ DWORD *pRVA,
+ DWORD *pImplFlags) = 0;
+
+ virtual HRESULT GetParentToken(Module *pModule,
+ mdToken tk,
+ mdToken *pParentToken) = 0;
+
+ virtual bool IsPreemptiveGCDisabled(void) = 0;
+
+ virtual void DisablePreemptiveGC(void) = 0;
+
+ virtual void EnablePreemptiveGC(void) = 0;
+
+ virtual DWORD MethodDescIsStatic(MethodDesc *pFD) = 0;
+
+#endif // #ifndef DACCESS_COMPILE
+
+ virtual Module *MethodDescGetModule(MethodDesc *pFD) = 0;
+
+#ifndef DACCESS_COMPILE
+
+ virtual COR_ILMETHOD* MethodDescGetILHeader(MethodDesc *pFD) = 0;
+
+ virtual ULONG MethodDescGetRVA(MethodDesc *pFD) = 0;
+
+ virtual void MarkDebuggerAttached(void) = 0;
+
+ virtual void MarkDebuggerUnattached(void) = 0;
+
+ virtual bool CrawlFrameIsGcSafe(CrawlFrame *pCF) = 0;
+
+ virtual bool SweepThreadsForDebug(bool forceSync) = 0;
+
+ virtual void GetRuntimeOffsets(SIZE_T *pTLSIndex,
+ SIZE_T *pTLSIsSpecialIndex,
+ SIZE_T *pTLSCantStopIndex,
+ SIZE_T *pTLSIndexOfPredefs,
+ SIZE_T *pEEThreadStateOffset,
+ SIZE_T *pEEThreadStateNCOffset,
+ SIZE_T *pEEThreadPGCDisabledOffset,
+ DWORD *pEEThreadPGCDisabledValue,
+ SIZE_T *pEEThreadDebuggerWordOffset,
+ SIZE_T *pEEThreadFrameOffset,
+ SIZE_T *pEEThreadMaxNeededSize,
+ DWORD *pEEThreadSteppingStateMask,
+ DWORD *pEEMaxFrameValue,
+ SIZE_T *pEEThreadDebuggerFilterContextOffset,
+ SIZE_T *pEEThreadCantStopMask,
+ SIZE_T *pEEFrameNextOffset,
+ DWORD *pEEIsManagedExceptionStateMask) = 0;
+
+ virtual bool IsStub(const BYTE *ip) = 0;
+
+#endif // #ifndef DACCESS_COMPILE
+
+ virtual bool DetectHandleILStubs(Thread *thread) = 0;
+
+ virtual bool TraceStub(const BYTE *ip, TraceDestination *trace) = 0;
+
+#ifndef DACCESS_COMPILE
+
+ virtual bool FollowTrace(TraceDestination *trace) = 0;
+
+ virtual bool TraceFrame(Thread *thread,
+ Frame *frame,
+ BOOL fromPatch,
+ TraceDestination *trace,
+ REGDISPLAY *regs) = 0;
+
+ virtual bool TraceManager(Thread *thread,
+ StubManager *stubManager,
+ TraceDestination *trace,
+ T_CONTEXT *context,
+ BYTE **pRetAddr) = 0;
+
+ virtual void EnableTraceCall(Thread *thread) = 0;
+ virtual void DisableTraceCall(Thread *thread) = 0;
+
+#endif // #ifndef DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+
+ virtual void DebuggerModifyingLogSwitch (int iNewLevel,
+ const WCHAR *pLogSwitchName) = 0;
+
+#if defined(_TARGET_X86_) || defined(_WIN64) || defined(_TARGET_ARM_)
+ virtual HRESULT SetIPFromSrcToDst(Thread *pThread,
+ SLOT addrStart,
+ DWORD offFrom,
+ DWORD offTo,
+ bool fCanSetIPOnly,
+ PREGDISPLAY pReg,
+ PT_CONTEXT pCtx,
+ void *pDji,
+ EHRangeTree *pEHRT) = 0;
+#endif // _TARGET_X86_ || _WIN64 || _TARGET_ARM_
+
+ virtual void SetDebugState(Thread *pThread,
+ CorDebugThreadState state) = 0;
+
+ virtual void SetAllDebugState(Thread *et,
+ CorDebugThreadState state) = 0;
+
+ virtual CorDebugUserState GetPartialUserState( Thread *pThread ) = 0;
+
+#ifdef FEATURE_PREJIT
+#ifndef DACCESS_COMPILE
+ virtual void SetNGENDebugFlags(BOOL fAllowOpt) = 0;
+
+ virtual void GetNGENDebugFlags(BOOL *fAllowOpt) = 0;
+#endif
+#endif // FEATURE_PREJIT
+
+#endif // #ifndef DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags) = 0;
+#endif
+
+ virtual unsigned GetSizeForCorElementType(CorElementType etyp) = 0;
+
+#ifndef DACCESS_COMPILE
+ virtual BOOL ObjIsInstanceOf(Object *pElement, TypeHandle toTypeHnd) = 0;
+#endif
+
+ virtual void ClearAllDebugInterfaceReferences(void) = 0;
+
+#ifndef DACCESS_COMPILE
+#ifdef _DEBUG
+ virtual void ObjectRefFlush(Thread *pThread) = 0;
+#endif
+#endif
+};
+
+#endif // _eedbginterface_h_
diff --git a/src/vm/eedbginterfaceimpl.cpp b/src/vm/eedbginterfaceimpl.cpp
new file mode 100644
index 0000000000..f2648a934e
--- /dev/null
+++ b/src/vm/eedbginterfaceimpl.cpp
@@ -0,0 +1,1688 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+/*
+ *
+ * COM+99 EE to Debugger Interface Implementation
+ *
+ */
+
+#include "common.h"
+#include "dbginterface.h"
+#include "eedbginterfaceimpl.h"
+#include "virtualcallstub.h"
+#include "contractimpl.h"
+
+#ifdef DEBUGGING_SUPPORTED
+
+#ifndef DACCESS_COMPILE
+
+//
+// Cleanup any global data used by this interface.
+//
+void EEDbgInterfaceImpl::Terminate(void)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (g_pEEDbgInterfaceImpl)
+ {
+ delete g_pEEDbgInterfaceImpl;
+ g_pEEDbgInterfaceImpl = NULL;
+ }
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+Thread* EEDbgInterfaceImpl::GetThread(void)
+{
+ LIMITED_METHOD_CONTRACT;
+// Since this may be called from a Debugger Interop Hijack, the EEThread may be bogus.
+// Thus we can't use contracts. If we do fix that, then the contract below would be nice...
+#if 0
+ CONTRACT(Thread *)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+#endif
+
+ return ::GetThread();
+}
+
+#ifndef DACCESS_COMPILE
+
+void EEDbgInterfaceImpl::SetEEThreadPtr(VOID* newPtr)
+{
+ // Since this may be called from a Debugger Interop Hijack, the EEThread may be bogus.
+ // Thus we can't use contracts. If we do fix that, then the contract below would be nice...
+#if 0
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+
+ PRECONDITION(GetThread() == NULL); // shouldn't have an EE thread.
+ }
+ CONTRACTL_END;
+#endif
+ // This should only be called by interop-debugging when we don't have an EE thread
+ // object.
+
+ // Normally the LS & RS can communicate a pointer value using the EE thread's
+ // m_debuggerWord field. If we have no EE thread, then we can use the
+ // TLS slot that the EE thread would have been in.
+
+ SetThread((Thread*)newPtr);
+}
+
+StackWalkAction EEDbgInterfaceImpl::StackWalkFramesEx(Thread* pThread,
+ PREGDISPLAY pRD,
+ PSTACKWALKFRAMESCALLBACK pCallback,
+ VOID* pData,
+ unsigned int flags)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW); // FIX THIS when StackWalkFramesEx gets fixed.
+ DISABLED(GC_TRIGGERS); // We cannot predict if pCallback will trigger or not.
+ // Disabled is not a bug in this case.
+ PRECONDITION(CheckPointer(pThread));
+ }
+ CONTRACTL_END;
+
+ return pThread->StackWalkFramesEx(pRD, pCallback, pData, flags);
+}
+
+Frame *EEDbgInterfaceImpl::GetFrame(CrawlFrame *pCF)
+{
+ CONTRACT(Frame *)
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pCF));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ RETURN pCF->GetFrame();
+}
+
+bool EEDbgInterfaceImpl::InitRegDisplay(Thread* pThread,
+ const PREGDISPLAY pRD,
+ const PCONTEXT pctx,
+ bool validContext)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pThread));
+ PRECONDITION(CheckPointer(pRD));
+ if (validContext)
+ {
+ PRECONDITION(CheckPointer(pctx));
+ }
+ }
+ CONTRACTL_END;
+
+ return pThread->InitRegDisplay(pRD, pctx, validContext);
+}
+
+BOOL EEDbgInterfaceImpl::IsStringObject(Object* o)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(o));
+ }
+ CONTRACTL_END;
+
+ return o->GetMethodTable() == g_pStringClass;
+}
+
+BOOL EEDbgInterfaceImpl::IsTypedReference(MethodTable* pMT)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ return pMT == g_TypedReferenceMT;
+}
+
+WCHAR* EEDbgInterfaceImpl::StringObjectGetBuffer(StringObject* so)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(so));
+ }
+ CONTRACTL_END;
+
+ return so->GetBuffer();
+}
+
+DWORD EEDbgInterfaceImpl::StringObjectGetStringLength(StringObject* so)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(so));
+ }
+ CONTRACTL_END;
+
+ return so->GetStringLength();
+}
+
+void* EEDbgInterfaceImpl::GetObjectFromHandle(OBJECTHANDLE handle)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ void *v;
+
+ *((OBJECTREF *)&v) = *(OBJECTREF *)handle;
+
+ return v;
+}
+
+OBJECTHANDLE EEDbgInterfaceImpl::GetHandleFromObject(void *obj,
+ bool fStrongNewRef,
+ AppDomain *pAppDomain)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS; // From CreateHandle
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pAppDomain));
+ }
+ CONTRACTL_END;
+
+ OBJECTHANDLE oh;
+
+ if (fStrongNewRef)
+ {
+ oh = pAppDomain->CreateStrongHandle(ObjectToOBJECTREF((Object *)obj));
+
+ LOG((LF_CORDB, LL_INFO1000, "EEI::GHFO: Given objectref 0x%x,"
+ "created strong handle 0x%x!\n", obj, oh));
+ }
+ else
+ {
+ oh = pAppDomain->CreateLongWeakHandle( ObjectToOBJECTREF((Object *)obj));
+
+ LOG((LF_CORDB, LL_INFO1000, "EEI::GHFO: Given objectref 0x%x,"
+ "created long weak handle 0x%x!\n", obj, oh));
+ }
+
+ return oh;
+}
+
+void EEDbgInterfaceImpl::DbgDestroyHandle(OBJECTHANDLE oh,
+ bool fStrongNewRef)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO1000, "EEI::GHFO: Destroyed given handle 0x%x,"
+ "fStrong: 0x%x!\n", oh, fStrongNewRef));
+
+ if (fStrongNewRef)
+ {
+ DestroyStrongHandle(oh);
+ }
+ else
+ {
+ DestroyLongWeakHandle(oh);
+ }
+}
+
+
+OBJECTHANDLE EEDbgInterfaceImpl::GetThreadException(Thread *pThread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pThread));
+ }
+ CONTRACTL_END;
+
+ OBJECTHANDLE oh = pThread->GetThrowableAsHandle();
+
+ if (oh != NULL)
+ {
+ return oh;
+ }
+
+ // Return the last thrown object if there's no current throwable.
+ // This logic is similar to UpdateCurrentThrowable().
+ return pThread->m_LastThrownObjectHandle;
+}
+
+bool EEDbgInterfaceImpl::IsThreadExceptionNull(Thread *pThread)
+{
+ CONTRACTL
+ {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pThread));
+ }
+ CONTRACTL_END;
+
+ //
+ // We're assuming that the handle on the
+ // thread is a strong handle and we're goona check it for
+ // NULL. We're also assuming something about the
+ // implementation of the handle here, too.
+ //
+ OBJECTHANDLE h = pThread->GetThrowableAsHandle();
+ if (h == NULL)
+ {
+ return true;
+ }
+
+ void *pThrowable = *((void**)h);
+
+ return (pThrowable == NULL);
+}
+
+void EEDbgInterfaceImpl::ClearThreadException(Thread *pThread)
+{
+ //
+ // If one day there is a continuable exception, then this will have to be
+ // implemented properly.
+ //
+ //
+ LIMITED_METHOD_CONTRACT;
+}
+
+bool EEDbgInterfaceImpl::StartSuspendForDebug(AppDomain *pAppDomain,
+ BOOL fHoldingThreadStoreLock)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB,LL_INFO1000, "EEDbgII:SSFD: start suspend on AD:0x%x\n",
+ pAppDomain));
+
+ bool result = Thread::SysStartSuspendForDebug(pAppDomain);
+
+ return result;
+}
+
+bool EEDbgInterfaceImpl::SweepThreadsForDebug(bool forceSync)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ DISABLED(GC_TRIGGERS); // Called by unmanaged threads.
+ }
+ CONTRACTL_END;
+
+ return Thread::SysSweepThreadsForDebug(forceSync);
+}
+
+void EEDbgInterfaceImpl::ResumeFromDebug(AppDomain *pAppDomain)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ Thread::SysResumeFromDebug(pAppDomain);
+}
+
+void EEDbgInterfaceImpl::MarkThreadForDebugSuspend(Thread* pRuntimeThread)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pRuntimeThread));
+ }
+ CONTRACTL_END;
+
+ pRuntimeThread->MarkForDebugSuspend();
+}
+
+void EEDbgInterfaceImpl::MarkThreadForDebugStepping(Thread* pRuntimeThread,
+ bool onOff)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pRuntimeThread));
+ }
+ CONTRACTL_END;
+
+ pRuntimeThread->MarkDebuggerIsStepping(onOff);
+}
+
+void EEDbgInterfaceImpl::SetThreadFilterContext(Thread *thread,
+ CONTEXT *context)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(thread));
+ }
+ CONTRACTL_END;
+
+ thread->SetFilterContext(context);
+}
+
+CONTEXT *EEDbgInterfaceImpl::GetThreadFilterContext(Thread *thread)
+{
+ CONTRACT(CONTEXT *)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(thread));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ RETURN thread->GetFilterContext();
+}
+
+VOID * EEDbgInterfaceImpl::GetThreadDebuggerWord(Thread *thread)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(thread));
+ }
+ CONTRACTL_END;
+
+ return thread->m_debuggerWord;
+}
+
+void EEDbgInterfaceImpl::SetThreadDebuggerWord(Thread *thread,
+ VOID *dw)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(thread));
+ }
+ CONTRACTL_END;
+
+ thread->m_debuggerWord = dw;
+}
+
+BOOL EEDbgInterfaceImpl::IsManagedNativeCode(const BYTE *address)
+{
+ WRAPPER_NO_CONTRACT;
+ return ExecutionManager::IsManagedCode((PCODE)address);
+}
+
+MethodDesc *EEDbgInterfaceImpl::GetNativeCodeMethodDesc(const PCODE address)
+{
+ CONTRACT(MethodDesc *)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(address != NULL);
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ RETURN ExecutionManager::GetCodeMethodDesc(address);
+}
+
+// IsInPrologOrEpilog doesn't seem to be used for code that uses GC_INFO_DECODER
+BOOL EEDbgInterfaceImpl::IsInPrologOrEpilog(const BYTE *address,
+ size_t* prologSize)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ *prologSize = 0;
+
+ EECodeInfo codeInfo((PCODE)address);
+
+ if (codeInfo.IsValid())
+ {
+ LPVOID methodInfo = codeInfo.GetGCInfo();
+
+ if (codeInfo.GetCodeManager()->IsInPrologOrEpilog(codeInfo.GetRelOffset(), methodInfo, prologSize))
+ {
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+//
+// Given a collection of native offsets of a certain function, determine if each falls
+// within an exception filter or handler.
+//
+void EEDbgInterfaceImpl::DetermineIfOffsetsInFilterOrHandler(const BYTE *functionAddress,
+ DebugOffsetToHandlerInfo *pOffsetToHandlerInfo,
+ unsigned offsetToHandlerInfoLength)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ EECodeInfo codeInfo((PCODE)functionAddress);
+
+ if (!codeInfo.IsValid())
+ {
+ return;
+ }
+
+ // Loop through all the exception handling clause information for the method
+ EH_CLAUSE_ENUMERATOR pEnumState;
+ unsigned EHCount = codeInfo.GetJitManager()->InitializeEHEnumeration(codeInfo.GetMethodToken(), &pEnumState);
+ if (EHCount == 0)
+ {
+ return;
+ }
+
+ for (ULONG i=0; i < EHCount; i++)
+ {
+ EE_ILEXCEPTION_CLAUSE EHClause;
+ codeInfo.GetJitManager()->GetNextEHClause(&pEnumState, &EHClause);
+
+ // Check each EH clause against each offset of interest.
+ // Note that this could be time consuming for very long methods ( O(n^2) ).
+ // We could make this linear if we could guarentee that the two lists are sorted.
+ for (ULONG j=0; j < offsetToHandlerInfoLength; j++)
+ {
+ SIZE_T offs = pOffsetToHandlerInfo[j].offset;
+
+ // those with -1 indicate slots to skip
+ if (offs == (SIZE_T) -1)
+ {
+ continue;
+ }
+ // For a filter, the handler comes directly after it so check from start of filter
+ // to end of handler
+ if (IsFilterHandler(&EHClause))
+ {
+ if (offs >= EHClause.FilterOffset && offs < EHClause.HandlerEndPC)
+ {
+ pOffsetToHandlerInfo[j].isInFilterOrHandler = TRUE;
+ }
+ }
+ // For anything else, only care about handler range
+ else if (offs >= EHClause.HandlerStartPC && offs < EHClause.HandlerEndPC)
+ {
+ pOffsetToHandlerInfo[j].isInFilterOrHandler = TRUE;
+ }
+ }
+ }
+}
+#endif // #ifndef DACCESS_COMPILE
+
+void EEDbgInterfaceImpl::GetMethodRegionInfo(const PCODE pStart,
+ PCODE * pCold,
+ size_t *hotSize,
+ size_t *coldSize)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pCold));
+ PRECONDITION(CheckPointer(hotSize));
+ PRECONDITION(CheckPointer(coldSize));
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ IJitManager::MethodRegionInfo methodRegionInfo = {NULL, 0, NULL, 0};
+
+ EECodeInfo codeInfo(pStart);
+
+ if (codeInfo.IsValid() != NULL)
+ {
+ codeInfo.GetMethodRegionInfo(&methodRegionInfo);
+ }
+
+ *pCold = methodRegionInfo.coldStartAddress;
+ *hotSize = methodRegionInfo.hotSize;
+ *coldSize = methodRegionInfo.coldSize;
+}
+
+#if defined(WIN64EXCEPTIONS)
+DWORD EEDbgInterfaceImpl::GetFuncletStartOffsets(const BYTE *pStart, DWORD* pStartOffsets, DWORD dwLength)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pStart));
+ }
+ CONTRACTL_END;
+
+ EECodeInfo codeInfo((PCODE)pStart);
+ _ASSERTE(codeInfo.IsValid());
+
+ return codeInfo.GetJitManager()->GetFuncletStartOffsets(codeInfo.GetMethodToken(), pStartOffsets, dwLength);
+}
+
+StackFrame EEDbgInterfaceImpl::FindParentStackFrame(CrawlFrame* pCF)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pCF));
+ }
+ CONTRACTL_END;
+
+#if defined(DACCESS_COMPILE)
+ DacNotImpl();
+ return StackFrame();
+
+#else // !DACCESS_COMPILE
+ return ExceptionTracker::FindParentStackFrameForStackWalk(pCF);
+
+#endif // !DACCESS_COMPILE
+}
+#endif // WIN64EXCEPTIONS
+
+#ifndef DACCESS_COMPILE
+size_t EEDbgInterfaceImpl::GetFunctionSize(MethodDesc *pFD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pFD));
+ }
+ CONTRACTL_END;
+
+ PCODE methodStart = pFD->GetNativeCode();
+
+ if (methodStart == NULL)
+ return 0;
+
+ EECodeInfo codeInfo(methodStart);
+
+ PTR_VOID methodInfo = codeInfo.GetGCInfo();
+
+ return codeInfo.GetCodeManager()->GetFunctionSize(methodInfo);
+}
+#endif //!DACCESS_COMPILE
+
+const PCODE EEDbgInterfaceImpl::GetFunctionAddress(MethodDesc *pFD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pFD));
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ return pFD->GetNativeCode();
+}
+
+#ifndef DACCESS_COMPILE
+
+void EEDbgInterfaceImpl::DisablePreemptiveGC(void)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ DISABLED(GC_TRIGGERS); // Disabled because disabled in RareDisablePreemptiveGC()
+ }
+ CONTRACTL_END;
+
+ ::GetThread()->DisablePreemptiveGC();
+}
+
+void EEDbgInterfaceImpl::EnablePreemptiveGC(void)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ DISABLED(GC_TRIGGERS); // Disabled because disabled in RareEnablePreemptiveGC()
+ }
+ CONTRACTL_END;
+
+ ::GetThread()->EnablePreemptiveGC();
+}
+
+bool EEDbgInterfaceImpl::IsPreemptiveGCDisabled(void)
+{
+ CONTRACTL
+ {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return ::GetThread()->PreemptiveGCDisabled() != 0;
+}
+
+DWORD EEDbgInterfaceImpl::MethodDescIsStatic(MethodDesc *pFD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pFD));
+ }
+ CONTRACTL_END;
+
+ return pFD->IsStatic();
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+Module *EEDbgInterfaceImpl::MethodDescGetModule(MethodDesc *pFD)
+{
+ CONTRACT(Module *)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pFD));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ RETURN pFD->GetModule();
+}
+
+#ifndef DACCESS_COMPILE
+
+COR_ILMETHOD* EEDbgInterfaceImpl::MethodDescGetILHeader(MethodDesc *pFD)
+{
+ CONTRACT(COR_ILMETHOD *)
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pFD));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ if (pFD->IsIL())
+ {
+ RETURN pFD->GetILHeader();
+ }
+
+ RETURN NULL;
+}
+
+ULONG EEDbgInterfaceImpl::MethodDescGetRVA(MethodDesc *pFD)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pFD));
+ }
+ CONTRACTL_END;
+
+ return pFD->GetRVA();
+}
+
+MethodDesc *EEDbgInterfaceImpl::FindLoadedMethodRefOrDef(Module* pModule,
+ mdToken memberRef)
+{
+ CONTRACT(MethodDesc *)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pModule));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ // Must have a MemberRef or a MethodDef
+ mdToken tkType = TypeFromToken(memberRef);
+ _ASSERTE((tkType == mdtMemberRef) || (tkType == mdtMethodDef));
+
+ if (tkType == mdtMemberRef)
+ {
+ RETURN pModule->LookupMemberRefAsMethod(memberRef);
+ }
+
+ RETURN pModule->LookupMethodDef(memberRef);
+}
+
+MethodDesc *EEDbgInterfaceImpl::LoadMethodDef(Module* pModule,
+ mdMethodDef methodDef,
+ DWORD numGenericArgs,
+ TypeHandle *pGenericArgs,
+ TypeHandle *pOwnerType)
+{
+ CONTRACT(MethodDesc *)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pModule));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ _ASSERTE(TypeFromToken(methodDef) == mdtMethodDef);
+
+ // The generic class and method args are sent as one array
+ // by the debugger. We now split this into two by finding out how
+ // many generic args are for the class and how many for the
+ // method. The actual final checks are done in MemberLoader::GetMethodDescFromMethodDef.
+
+ DWORD numGenericClassArgs = 0;
+ TypeHandle *pGenericClassArgs = NULL;
+ DWORD nGenericMethodArgs = 0;
+ TypeHandle *pGenericMethodArgs = NULL;
+ mdTypeDef typeDef = 0;
+
+ TypeHandle thOwner;
+
+ BOOL forceRemotable = FALSE;
+ if (numGenericArgs != 0)
+ {
+ HRESULT hr = pModule->GetMDImport()->GetParentToken(methodDef, &typeDef);
+ if (FAILED(hr))
+ COMPlusThrowHR(E_INVALIDARG);
+
+ TypeHandle thClass = LoadClass(pModule, typeDef);
+ _ASSERTE(!thClass.IsNull());
+
+ numGenericClassArgs = thClass.GetNumGenericArgs();
+ if (numGenericArgs < numGenericClassArgs)
+ {
+ COMPlusThrowHR(COR_E_TARGETPARAMCOUNT);
+ }
+ pGenericClassArgs = (numGenericClassArgs > 0) ? pGenericArgs : NULL;
+ nGenericMethodArgs = (numGenericArgs >= numGenericClassArgs) ? (numGenericArgs - numGenericClassArgs) : 0;
+ pGenericMethodArgs = (nGenericMethodArgs > 0) ? (pGenericArgs + numGenericClassArgs) : NULL;
+
+#ifdef FEATURE_COMINTEROP
+ if (numGenericClassArgs > 0)
+ {
+ thOwner = ClassLoader::LoadGenericInstantiationThrowing(pModule, typeDef, Instantiation(pGenericClassArgs, numGenericClassArgs));
+ // for classes supporting generic interop force remotable method descs
+ forceRemotable = thOwner.GetMethodTable()->SupportsGenericInterop(TypeHandle::Interop_ManagedToNative);
+ }
+#endif // FEATURE_COMINTEROP
+ }
+
+ MethodDesc *pRes = MemberLoader::GetMethodDescFromMethodDef(pModule,
+ methodDef,
+ Instantiation(pGenericClassArgs, numGenericClassArgs),
+ Instantiation(pGenericMethodArgs, nGenericMethodArgs),
+ forceRemotable);
+
+ // The ownerType is extra information that augments the specification of an interface MD.
+ // It is only needed if generics code sharing is supported, because otherwise MDs are
+ // fully self-describing.
+ if (pOwnerType != NULL)
+ {
+ if (numGenericClassArgs != 0)
+ {
+ if (thOwner.IsNull())
+ *pOwnerType = ClassLoader::LoadGenericInstantiationThrowing(pModule, typeDef, Instantiation(pGenericClassArgs, numGenericClassArgs));
+ else
+ *pOwnerType = thOwner;
+ }
+ else
+ {
+ *pOwnerType = TypeHandle(pRes->GetMethodTable());
+ }
+ }
+ RETURN (pRes);
+
+}
+
+
+TypeHandle EEDbgInterfaceImpl::FindLoadedClass(Module *pModule,
+ mdTypeDef classToken)
+{
+ CONTRACT(TypeHandle)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pModule));
+ }
+ CONTRACT_END;
+
+ RETURN ClassLoader::LookupTypeDefOrRefInModule(pModule, classToken);
+
+}
+
+TypeHandle EEDbgInterfaceImpl::FindLoadedInstantiation(Module *pModule,
+ mdTypeDef typeDef,
+ DWORD ntypars,
+ TypeHandle *inst)
+{
+ // Lookup operations run the class loader in non-load mode.
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+
+ // scan violation: asserts that this can be suppressed since there is currently
+ // work on dac-izing all this code and as a result the issue will become moot.
+ CONTRACT_VIOLATION(FaultViolation);
+
+ return ClassLoader::LoadGenericInstantiationThrowing(pModule, typeDef, Instantiation(inst, ntypars),
+ ClassLoader::DontLoadTypes);
+}
+
+TypeHandle EEDbgInterfaceImpl::FindLoadedFnptrType(TypeHandle *inst,
+ DWORD ntypars)
+{
+ // Lookup operations run the class loader in non-load mode.
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+ //<TODO> : CALLCONV? </TODO>
+ return ClassLoader::LoadFnptrTypeThrowing(0, ntypars, inst,
+ // <TODO> should this be FailIfNotLoaded? - NO - although we may
+ // want to debug unrestored VCs, we can't do it because the debug API
+ // is not set up to handle them </TODO>
+ // == FailIfNotLoadedOrNotRestored
+ ClassLoader::DontLoadTypes);
+}
+
+TypeHandle EEDbgInterfaceImpl::FindLoadedPointerOrByrefType(CorElementType et,
+ TypeHandle elemtype)
+{
+ // Lookup operations run the class loader in non-load mode.
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+ return ClassLoader::LoadPointerOrByrefTypeThrowing(et, elemtype,
+ // <TODO> should this be FailIfNotLoaded? - NO - although we may
+ // want to debug unrestored VCs, we can't do it because the debug API
+ // is not set up to handle them </TODO>
+ // == FailIfNotLoadedOrNotRestored
+ ClassLoader::DontLoadTypes);
+}
+
+TypeHandle EEDbgInterfaceImpl::FindLoadedArrayType(CorElementType et,
+ TypeHandle elemtype,
+ unsigned rank)
+{
+ // Lookup operations run the class loader in non-load mode.
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+ if (elemtype.IsNull())
+ return TypeHandle();
+ else
+ return ClassLoader::LoadArrayTypeThrowing(elemtype, et, rank,
+ // <TODO> should this be FailIfNotLoaded? - NO - although we may
+ // want to debug unrestored VCs, we can't do it because the debug API
+ // is not set up to handle them </TODO>
+ // == FailIfNotLoadedOrNotRestored
+ ClassLoader::DontLoadTypes );
+}
+
+
+TypeHandle EEDbgInterfaceImpl::FindLoadedElementType(CorElementType et)
+{
+ // Lookup operations run the class loader in non-load mode.
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+ MethodTable *m = MscorlibBinder::GetElementType(et);
+
+ return TypeHandle(m);
+}
+
+TypeHandle EEDbgInterfaceImpl::LoadClass(Module *pModule,
+ mdTypeDef classToken)
+{
+ CONTRACT(TypeHandle)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pModule));
+ }
+ CONTRACT_END;
+
+ RETURN ClassLoader::LoadTypeDefOrRefThrowing(pModule, classToken,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef);
+
+}
+
+TypeHandle EEDbgInterfaceImpl::LoadInstantiation(Module *pModule,
+ mdTypeDef typeDef,
+ DWORD ntypars,
+ TypeHandle *inst)
+{
+ CONTRACT(TypeHandle)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pModule));
+ }
+ CONTRACT_END;
+
+ RETURN ClassLoader::LoadGenericInstantiationThrowing(pModule, typeDef, Instantiation(inst, ntypars));
+}
+
+TypeHandle EEDbgInterfaceImpl::LoadArrayType(CorElementType et,
+ TypeHandle elemtype,
+ unsigned rank)
+{
+ CONTRACT(TypeHandle)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACT_END;
+
+ if (elemtype.IsNull())
+ RETURN TypeHandle();
+ else
+ RETURN ClassLoader::LoadArrayTypeThrowing(elemtype, et, rank);
+}
+
+TypeHandle EEDbgInterfaceImpl::LoadPointerOrByrefType(CorElementType et,
+ TypeHandle elemtype)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ return ClassLoader::LoadPointerOrByrefTypeThrowing(et, elemtype);
+}
+
+TypeHandle EEDbgInterfaceImpl::LoadFnptrType(TypeHandle *inst,
+ DWORD ntypars)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ /* @TODO : CALLCONV? */
+ return ClassLoader::LoadFnptrTypeThrowing(0, ntypars, inst);
+}
+
+TypeHandle EEDbgInterfaceImpl::LoadElementType(CorElementType et)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ MethodTable *m = MscorlibBinder::GetElementType(et);
+
+ if (m == NULL)
+ {
+ return TypeHandle();
+ }
+
+ return TypeHandle(m);
+}
+
+
+HRESULT EEDbgInterfaceImpl::GetMethodImplProps(Module *pModule,
+ mdToken tk,
+ DWORD *pRVA,
+ DWORD *pImplFlags)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pModule));
+ }
+ CONTRACTL_END;
+
+ return pModule->GetMDImport()->GetMethodImplProps(tk, pRVA, pImplFlags);
+}
+
+HRESULT EEDbgInterfaceImpl::GetParentToken(Module *pModule,
+ mdToken tk,
+ mdToken *pParentToken)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pModule));
+ }
+ CONTRACTL_END;
+
+ return pModule->GetMDImport()->GetParentToken(tk, pParentToken);
+}
+
+void EEDbgInterfaceImpl::MarkDebuggerAttached(void)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ g_CORDebuggerControlFlags |= DBCF_ATTACHED;
+ g_CORDebuggerControlFlags &= ~DBCF_PENDING_ATTACH;
+}
+
+void EEDbgInterfaceImpl::MarkDebuggerUnattached(void)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ g_CORDebuggerControlFlags &= ~DBCF_ATTACHED;
+}
+
+
+#ifdef EnC_SUPPORTED
+
+// Apply an EnC edit to the specified module
+HRESULT EEDbgInterfaceImpl::EnCApplyChanges(EditAndContinueModule *pModule,
+ DWORD cbMetadata,
+ BYTE *pMetadata,
+ DWORD cbIL,
+ BYTE *pIL)
+{
+ LOG((LF_ENC, LL_INFO100, "EncApplyChanges\n"));
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ DISABLED(THROWS);
+ DISABLED(GC_TRIGGERS);
+ PRECONDITION(CheckPointer(pModule));
+ }
+ CONTRACTL_END;
+
+ return pModule->ApplyEditAndContinue(cbMetadata, pMetadata, cbIL, pIL);
+}
+
+// Remap execution to the latest version of an edited method
+// This function should never return.
+void EEDbgInterfaceImpl::ResumeInUpdatedFunction(EditAndContinueModule *pModule,
+ MethodDesc *pFD,
+ void *debuggerFuncHandle,
+ SIZE_T resumeIP,
+ CONTEXT *pContext)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ DISABLED(THROWS);
+ DISABLED(GC_TRIGGERS);
+ PRECONDITION(CheckPointer(pModule));
+ }
+ CONTRACTL_END;
+
+ pModule->ResumeInUpdatedFunction(pFD,
+ debuggerFuncHandle,
+ resumeIP,
+ pContext);
+}
+
+#endif // EnC_SUPPORTED
+
+bool EEDbgInterfaceImpl::CrawlFrameIsGcSafe(CrawlFrame *pCF)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pCF));
+ }
+ CONTRACTL_END;
+
+ return pCF->IsGcSafe();
+}
+
+bool EEDbgInterfaceImpl::IsStub(const BYTE *ip)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // IsStub will catch any exceptions and return false.
+ return StubManager::IsStub((PCODE) ip) != FALSE;
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+// static
+bool EEDbgInterfaceImpl::DetectHandleILStubs(Thread *thread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return thread->DetectHandleILStubsForDebugger();
+}
+
+bool EEDbgInterfaceImpl::TraceStub(const BYTE *ip,
+ TraceDestination *trace)
+{
+#ifndef DACCESS_COMPILE
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return StubManager::TraceStub((PCODE) ip, trace) != FALSE;
+#else
+ DacNotImpl();
+ return false;
+#endif // #ifndef DACCESS_COMPILE
+}
+
+#ifndef DACCESS_COMPILE
+
+bool EEDbgInterfaceImpl::FollowTrace(TraceDestination *trace)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return StubManager::FollowTrace(trace) != FALSE;
+}
+
+bool EEDbgInterfaceImpl::TraceFrame(Thread *thread,
+ Frame *frame,
+ BOOL fromPatch,
+ TraceDestination *trace,
+ REGDISPLAY *regs)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS;
+ DISABLED(GC_TRIGGERS); // This is not a bug - the debugger can call this on an un-managed thread.
+ PRECONDITION(CheckPointer(frame));
+ }
+ CONTRACTL_END;
+
+ bool fResult = frame->TraceFrame(thread, fromPatch, trace, regs) != FALSE;
+
+#ifdef _DEBUG
+ StubManager::DbgWriteLog("Doing TraceFrame on frame=0x%p (fromPatch=%d), yeilds:\n", frame, fromPatch);
+ if (fResult)
+ {
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+ FAULT_NOT_FATAL();
+ SString buffer;
+ StubManager::DbgWriteLog(" td=%S\n", trace->DbgToString(buffer));
+ }
+ else
+ {
+ StubManager::DbgWriteLog(" false (this frame does not expect to call managed code).\n");
+ }
+#endif
+ return fResult;
+}
+
+bool EEDbgInterfaceImpl::TraceManager(Thread *thread,
+ StubManager *stubManager,
+ TraceDestination *trace,
+ CONTEXT *context,
+ BYTE **pRetAddr)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(stubManager));
+ }
+ CONTRACTL_END;
+
+ bool fResult = false;
+
+ EX_TRY
+ {
+ fResult = stubManager->TraceManager(thread, trace, context, pRetAddr) != FALSE;
+ }
+ EX_CATCH
+ {
+ // We never expect TraceManager() to fail and throw an exception,
+ // so we should never hit this assertion.
+ _ASSERTE(!"Fail to trace a stub through TraceManager()");
+ fResult = false;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+#ifdef _DEBUG
+ StubManager::DbgWriteLog("Doing TraceManager on %s (0x%p) for IP=0x%p, yields:\n", stubManager->DbgGetName(), stubManager, GetIP(context));
+ if (fResult)
+ {
+ // Should never be on helper thread
+ FAULT_NOT_FATAL();
+ SString buffer;
+ StubManager::DbgWriteLog(" td=%S\n", trace->DbgToString(buffer));
+ }
+ else
+ {
+ StubManager::DbgWriteLog(" false (this stub does not expect to call managed code).\n");
+ }
+#endif
+ return fResult;
+}
+
+void EEDbgInterfaceImpl::EnableTraceCall(Thread *thread)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(thread));
+ }
+ CONTRACTL_END;
+
+ thread->IncrementTraceCallCount();
+}
+
+void EEDbgInterfaceImpl::DisableTraceCall(Thread *thread)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(thread));
+ }
+ CONTRACTL_END;
+
+ thread->DecrementTraceCallCount();
+}
+
+#ifdef FEATURE_IMPLICIT_TLS
+EXTERN_C UINT32 _tls_index;
+#endif
+
+void EEDbgInterfaceImpl::GetRuntimeOffsets(SIZE_T *pTLSIndex,
+ SIZE_T *pTLSIsSpecialIndex,
+ SIZE_T *pTLSCantStopIndex,
+ SIZE_T* pTLSIndexOfPredefs,
+ SIZE_T *pEEThreadStateOffset,
+ SIZE_T *pEEThreadStateNCOffset,
+ SIZE_T *pEEThreadPGCDisabledOffset,
+ DWORD *pEEThreadPGCDisabledValue,
+ SIZE_T *pEEThreadDebuggerWordOffset,
+ SIZE_T *pEEThreadFrameOffset,
+ SIZE_T *pEEThreadMaxNeededSize,
+ DWORD *pEEThreadSteppingStateMask,
+ DWORD *pEEMaxFrameValue,
+ SIZE_T *pEEThreadDebuggerFilterContextOffset,
+ SIZE_T *pEEThreadCantStopOffset,
+ SIZE_T *pEEFrameNextOffset,
+ DWORD *pEEIsManagedExceptionStateMask)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pTLSIndex));
+ PRECONDITION(CheckPointer(pTLSIsSpecialIndex));
+ PRECONDITION(CheckPointer(pEEThreadStateOffset));
+ PRECONDITION(CheckPointer(pTLSIndexOfPredefs));
+ PRECONDITION(CheckPointer(pEEThreadStateNCOffset));
+ PRECONDITION(CheckPointer(pEEThreadPGCDisabledOffset));
+ PRECONDITION(CheckPointer(pEEThreadPGCDisabledValue));
+ PRECONDITION(CheckPointer(pEEThreadDebuggerWordOffset));
+ PRECONDITION(CheckPointer(pEEThreadFrameOffset));
+ PRECONDITION(CheckPointer(pEEThreadMaxNeededSize));
+ PRECONDITION(CheckPointer(pEEThreadSteppingStateMask));
+ PRECONDITION(CheckPointer(pEEMaxFrameValue));
+ PRECONDITION(CheckPointer(pEEThreadDebuggerFilterContextOffset));
+ PRECONDITION(CheckPointer(pEEThreadCantStopOffset));
+ PRECONDITION(CheckPointer(pEEFrameNextOffset));
+ PRECONDITION(CheckPointer(pEEIsManagedExceptionStateMask));
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_IMPLICIT_TLS
+ *pTLSIndex = _tls_index;
+#else
+ *pTLSIndex = GetThreadTLSIndex();
+#endif
+ *pTLSIsSpecialIndex = TlsIdx_ThreadType;
+ *pTLSCantStopIndex = TlsIdx_CantStopCount;
+ *pTLSIndexOfPredefs = CExecutionEngine::TlsIndex;
+ *pEEThreadStateOffset = Thread::GetOffsetOfState();
+ *pEEThreadStateNCOffset = Thread::GetOffsetOfStateNC();
+ *pEEThreadPGCDisabledOffset = Thread::GetOffsetOfGCFlag();
+ *pEEThreadPGCDisabledValue = 1; // A little obvious, but just in case...
+ *pEEThreadDebuggerWordOffset = Thread::GetOffsetOfDebuggerWord();
+ *pEEThreadFrameOffset = Thread::GetOffsetOfCurrentFrame();
+ *pEEThreadMaxNeededSize = sizeof(Thread);
+ *pEEThreadDebuggerFilterContextOffset = Thread::GetOffsetOfDebuggerFilterContext();
+ *pEEThreadCantStopOffset = Thread::GetOffsetOfCantStop();
+ *pEEThreadSteppingStateMask = Thread::TSNC_DebuggerIsStepping;
+ *pEEMaxFrameValue = (DWORD)(size_t)FRAME_TOP; // <TODO> should this be size_t for 64bit?</TODO>
+ *pEEFrameNextOffset = Frame::GetOffsetOfNextLink();
+ *pEEIsManagedExceptionStateMask = Thread::TSNC_DebuggerIsManagedException;
+}
+
+void EEDbgInterfaceImpl::DebuggerModifyingLogSwitch (int iNewLevel,
+ const WCHAR *pLogSwitchName)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ Log::DebuggerModifyingLogSwitch (iNewLevel, pLogSwitchName);
+}
+
+
+HRESULT EEDbgInterfaceImpl::SetIPFromSrcToDst(Thread *pThread,
+ SLOT addrStart,
+ DWORD offFrom,
+ DWORD offTo,
+ bool fCanSetIPOnly,
+ PREGDISPLAY pReg,
+ PCONTEXT pCtx,
+ void *pDji,
+ EHRangeTree *pEHRT)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ return ::SetIPFromSrcToDst(pThread,
+ addrStart,
+ offFrom,
+ offTo,
+ fCanSetIPOnly,
+ pReg,
+ pCtx,
+ pDji,
+ pEHRT);
+
+}
+
+void EEDbgInterfaceImpl::SetDebugState(Thread *pThread,
+ CorDebugThreadState state)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pThread));
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(state == THREAD_SUSPEND || state == THREAD_RUN);
+
+ LOG((LF_CORDB,LL_INFO10000,"EEDbg:Setting thread 0x%x (ID:0x%x) to 0x%x\n", pThread, pThread->GetThreadId(), state));
+
+ if (state == THREAD_SUSPEND)
+ {
+ pThread->SetThreadStateNC(Thread::TSNC_DebuggerUserSuspend);
+ }
+ else
+ {
+ pThread->ResetThreadStateNC(Thread::TSNC_DebuggerUserSuspend);
+ }
+}
+
+void EEDbgInterfaceImpl::SetAllDebugState(Thread *et,
+ CorDebugThreadState state)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ Thread *pThread = NULL;
+
+ while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
+ {
+ if (pThread != et)
+ {
+ SetDebugState(pThread, state);
+ }
+ }
+}
+
+// This is pretty much copied from VM\COMSynchronizable's
+// INT32 __stdcall ThreadNative::GetThreadState, so propogate changes
+// to both functions
+// This just gets the user state from the EE's perspective (hence "partial").
+CorDebugUserState EEDbgInterfaceImpl::GetPartialUserState(Thread *pThread)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pThread));
+ }
+ CONTRACTL_END;
+
+ Thread::ThreadState ts = pThread->GetSnapshotState();
+ unsigned ret = 0;
+
+ if (ts & Thread::TS_Background)
+ {
+ ret |= (unsigned)USER_BACKGROUND;
+ }
+
+ if (ts & Thread::TS_Unstarted)
+ {
+ ret |= (unsigned)USER_UNSTARTED;
+ }
+
+ // Don't report a StopRequested if the thread has actually stopped.
+ if (ts & Thread::TS_Dead)
+ {
+ ret |= (unsigned)USER_STOPPED;
+ }
+
+ if (ts & Thread::TS_Interruptible)
+ {
+ ret |= (unsigned)USER_WAIT_SLEEP_JOIN;
+ }
+
+ // Don't report a SuspendRequested if the thread has actually Suspended.
+ if ( ((ts & Thread::TS_UserSuspendPending) && (ts & Thread::TS_SyncSuspended)))
+ {
+ ret |= (unsigned)USER_SUSPENDED;
+ }
+ else if (ts & Thread::TS_UserSuspendPending)
+ {
+ ret |= (unsigned)USER_SUSPEND_REQUESTED;
+ }
+
+ LOG((LF_CORDB,LL_INFO1000, "EEDbgII::GUS: thread 0x%x (id:0x%x)"
+ " userThreadState is 0x%x\n", pThread, pThread->GetThreadId(), ret));
+
+ return (CorDebugUserState)ret;
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+
+void
+EEDbgInterfaceImpl::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ DAC_ENUM_VTHIS();
+}
+
+#endif
+
+unsigned EEDbgInterfaceImpl::GetSizeForCorElementType(CorElementType etyp)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return (::GetSizeForCorElementType(etyp));
+}
+
+
+#ifndef DACCESS_COMPILE
+/*
+ * ObjIsInstanceOf
+ *
+ * This method supplies the internal VM implementation of this method to the
+ * debugger left-side.
+ *
+ */
+BOOL EEDbgInterfaceImpl::ObjIsInstanceOf(Object *pElement, TypeHandle toTypeHnd)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return (::ObjIsInstanceOf(pElement, toTypeHnd));
+}
+#endif
+
+/*
+ * ClearAllDebugInterfaceReferences
+ *
+ * This method is called by the debugging part of the runtime to notify
+ * that the debugger resources are no longer valid and any internal references
+ * to it must be null'ed out.
+ *
+ * Parameters:
+ * None.
+ *
+ * Returns:
+ * None.
+ *
+ */
+void EEDbgInterfaceImpl::ClearAllDebugInterfaceReferences()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ g_pDebugInterface = NULL;
+}
+
+#ifndef DACCESS_COMPILE
+#ifdef _DEBUG
+/*
+ * ObjectRefFlush
+ *
+ * Flushes all debug tracking information for object referencing.
+ *
+ * Parameters:
+ * pThread - The target thread to flush object references of.
+ *
+ * Returns:
+ * None.
+ *
+ */
+void EEDbgInterfaceImpl::ObjectRefFlush(Thread *pThread)
+{
+ WRAPPER_NO_CONTRACT;
+
+ Thread::ObjectRefFlush(pThread);
+}
+#endif
+#endif
+
+#endif // DEBUGGING_SUPPORTED
diff --git a/src/vm/eedbginterfaceimpl.h b/src/vm/eedbginterfaceimpl.h
new file mode 100644
index 0000000000..5b59f897d0
--- /dev/null
+++ b/src/vm/eedbginterfaceimpl.h
@@ -0,0 +1,348 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+/*
+ *
+ * COM+99 EE to Debugger Interface Implementation
+ *
+ */
+#ifndef _eedbginterfaceimpl_h_
+#define _eedbginterfaceimpl_h_
+
+#ifdef DEBUGGING_SUPPORTED
+
+#include "common.h"
+#include "corpriv.h"
+#include "hash.h"
+#include "class.h"
+#include "excep.h"
+#include "field.h"
+#include "eetwain.h"
+#include "jitinterface.h"
+#include "stubmgr.h"
+
+#include "eedbginterface.h"
+#include "debugdebugger.h"
+
+#ifdef FEATURE_PREJIT
+#include "corcompile.h"
+#endif // FEATURE_PREJIT
+
+#include "eeconfig.h"
+#include "pefile.h"
+
+class EEDbgInterfaceImpl : public EEDebugInterface
+{
+ VPTR_VTABLE_CLASS(EEDbgInterfaceImpl, EEDebugInterface);
+
+public:
+
+#ifndef DACCESS_COMPILE
+
+ //
+ // Setup and global data used by this interface.
+ //
+ static FORCEINLINE void Init(void)
+ {
+ g_pEEDbgInterfaceImpl = new EEDbgInterfaceImpl(); // new throws on failure
+ }
+
+ //
+ // Cleanup any global data used by this interface.
+ //
+ static void Terminate(void);
+
+#endif // #ifndef DACCESS_COMPILE
+
+ Thread* GetThread(void);
+
+ void SetEEThreadPtr(VOID* newPtr);
+
+ StackWalkAction StackWalkFramesEx(Thread* pThread,
+ PREGDISPLAY pRD,
+ PSTACKWALKFRAMESCALLBACK pCallback,
+ VOID* pData,
+ unsigned int flags);
+
+ Frame *GetFrame(CrawlFrame *pCF);
+
+ bool InitRegDisplay(Thread* pThread,
+ const PREGDISPLAY pRD,
+ const PT_CONTEXT pctx,
+ bool validContext);
+
+ BOOL IsStringObject(Object* o);
+
+ BOOL IsTypedReference(MethodTable* pMT);
+
+ WCHAR* StringObjectGetBuffer(StringObject* so);
+
+ DWORD StringObjectGetStringLength(StringObject* so);
+
+ void* GetObjectFromHandle(OBJECTHANDLE handle);
+
+ OBJECTHANDLE GetHandleFromObject(void *obj,
+ bool fStrongNewRef,
+ AppDomain *pAppDomain);
+
+ void DbgDestroyHandle(OBJECTHANDLE oh,
+ bool fStrongNewRef);
+
+ OBJECTHANDLE GetThreadException(Thread *pThread);
+
+ bool IsThreadExceptionNull(Thread *pThread);
+
+ void ClearThreadException(Thread *pThread);
+
+ bool StartSuspendForDebug(AppDomain *pAppDomain,
+ BOOL fHoldingThreadStoreLock);
+
+ bool SweepThreadsForDebug(bool forceSync);
+
+ void ResumeFromDebug(AppDomain *pAppDomain);
+
+ void MarkThreadForDebugSuspend(Thread* pRuntimeThread);
+
+ void MarkThreadForDebugStepping(Thread* pRuntimeThread,
+ bool onOff);
+
+ void SetThreadFilterContext(Thread *thread,
+ T_CONTEXT *context);
+
+ T_CONTEXT *GetThreadFilterContext(Thread *thread);
+
+ VOID *GetThreadDebuggerWord(Thread *thread);
+
+ void SetThreadDebuggerWord(Thread *thread,
+ VOID *dw);
+
+ BOOL IsManagedNativeCode(const BYTE *address);
+
+ MethodDesc *GetNativeCodeMethodDesc(const PCODE address) DAC_UNEXPECTED();
+
+ BOOL IsInPrologOrEpilog(const BYTE *address,
+ size_t* prologSize);
+
+ void DetermineIfOffsetsInFilterOrHandler(const BYTE *functionAddress,
+ DebugOffsetToHandlerInfo *pOffsetToHandlerInfo,
+ unsigned offsetToHandlerInfoLength);
+
+ void GetMethodRegionInfo(const PCODE pStart,
+ PCODE * pCold,
+ size_t *hotSize,
+ size_t *coldSize);
+
+#if defined(WIN64EXCEPTIONS)
+ DWORD GetFuncletStartOffsets(const BYTE *pStart, DWORD* pStartOffsets, DWORD dwLength);
+ StackFrame FindParentStackFrame(CrawlFrame* pCF);
+#endif // WIN64EXCEPTIONS
+
+ size_t GetFunctionSize(MethodDesc *pFD) DAC_UNEXPECTED();
+
+ const PCODE GetFunctionAddress(MethodDesc *pFD);
+
+ void DisablePreemptiveGC(void);
+
+ void EnablePreemptiveGC(void);
+
+ bool IsPreemptiveGCDisabled(void);
+
+ DWORD MethodDescIsStatic(MethodDesc *pFD);
+
+ Module *MethodDescGetModule(MethodDesc *pFD);
+
+ COR_ILMETHOD* MethodDescGetILHeader(MethodDesc *pFD);
+
+ ULONG MethodDescGetRVA(MethodDesc *pFD);
+
+ MethodDesc *FindLoadedMethodRefOrDef(Module* pModule,
+ mdToken memberRef);
+
+ MethodDesc *LoadMethodDef(Module* pModule,
+ mdMethodDef methodDef,
+ DWORD numGenericArgs = 0,
+ TypeHandle *pGenericArgs = NULL,
+ TypeHandle *pOwnerTypeRes = NULL);
+
+ TypeHandle FindLoadedClass(Module *pModule,
+ mdTypeDef classToken);
+
+ TypeHandle FindLoadedInstantiation(Module *pModule,
+ mdTypeDef typeDef,
+ DWORD numGenericArgs,
+ TypeHandle *pGenericArgs);
+
+ TypeHandle FindLoadedFnptrType(TypeHandle *inst,
+ DWORD ntypars);
+
+ TypeHandle FindLoadedPointerOrByrefType(CorElementType et,
+ TypeHandle elemtype);
+
+ TypeHandle FindLoadedArrayType(CorElementType et,
+ TypeHandle elemtype,
+ unsigned rank);
+
+ TypeHandle FindLoadedElementType(CorElementType et);
+
+ TypeHandle LoadClass(Module *pModule,
+ mdTypeDef classToken);
+
+ TypeHandle LoadInstantiation(Module *pModule,
+ mdTypeDef typeDef,
+ DWORD numGenericArgs,
+ TypeHandle *pGenericArgs);
+
+ TypeHandle LoadArrayType(CorElementType et,
+ TypeHandle elemtype,
+ unsigned rank);
+
+ TypeHandle LoadPointerOrByrefType(CorElementType et,
+ TypeHandle elemtype);
+
+ TypeHandle LoadFnptrType(TypeHandle *inst,
+ DWORD ntypars);
+
+ TypeHandle LoadElementType(CorElementType et);
+
+ __checkReturn
+ HRESULT GetMethodImplProps(Module *pModule,
+ mdToken tk,
+ DWORD *pRVA,
+ DWORD *pImplFlags);
+
+ HRESULT GetParentToken(Module *pModule,
+ mdToken tk,
+ mdToken *pParentToken);
+
+ void MarkDebuggerAttached(void);
+
+ void MarkDebuggerUnattached(void);
+
+#ifdef EnC_SUPPORTED
+
+ // Apply an EnC edit to the specified module
+ // This function should never return.
+ HRESULT EnCApplyChanges(EditAndContinueModule *pModule,
+ DWORD cbMetadata,
+ BYTE *pMetadata,
+ DWORD cbIL,
+ BYTE *pIL);
+
+ // Remap execution to the latest version of an edited method
+ void ResumeInUpdatedFunction(EditAndContinueModule *pModule,
+ MethodDesc *pFD,
+ void *debuggerFuncHandle,
+ SIZE_T resumeIP,
+ T_CONTEXT *pContext);
+ #endif // EnC_SUPPORTED
+
+ bool CrawlFrameIsGcSafe(CrawlFrame *pCF);
+
+ bool IsStub(const BYTE *ip);
+
+ bool DetectHandleILStubs(Thread *thread);
+
+ bool TraceStub(const BYTE *ip,
+ TraceDestination *trace);
+
+ bool FollowTrace(TraceDestination *trace);
+
+ bool TraceFrame(Thread *thread,
+ Frame *frame,
+ BOOL fromPatch,
+ TraceDestination *trace,
+ REGDISPLAY *regs);
+
+ bool TraceManager(Thread *thread,
+ StubManager *stubManager,
+ TraceDestination *trace,
+ T_CONTEXT *context,
+ BYTE **pRetAddr);
+
+ void EnableTraceCall(Thread *thread);
+
+ void DisableTraceCall(Thread *thread);
+
+ void GetRuntimeOffsets(SIZE_T *pTLSIndex,
+ SIZE_T *pTLSIsSpecialIndex,
+ SIZE_T *pTLSCantStopIndex,
+ SIZE_T *pTLSIndexOfPredefs,
+ SIZE_T *pEEThreadStateOffset,
+ SIZE_T *pEEThreadStateNCOffset,
+ SIZE_T *pEEThreadPGCDisabledOffset,
+ DWORD *pEEThreadPGCDisabledValue,
+ SIZE_T *pEEThreadDebuggerWordOffset,
+ SIZE_T *pEEThreadFrameOffset,
+ SIZE_T *pEEThreadMaxNeededSize,
+ DWORD *pEEThreadSteppingStateMask,
+ DWORD *pEEMaxFrameValue,
+ SIZE_T *pEEThreadDebuggerFilterContextOffset,
+ SIZE_T *pEEThreadCantStopOffset,
+ SIZE_T *pEEFrameNextOffset,
+ DWORD *pEEIsManagedExceptionStateMask);
+
+ void DebuggerModifyingLogSwitch (int iNewLevel,
+ const WCHAR *pLogSwitchName);
+
+ HRESULT SetIPFromSrcToDst(Thread *pThread,
+ SLOT addrStart,
+ DWORD offFrom,
+ DWORD offTo,
+ bool fCanSetIPOnly,
+ PREGDISPLAY pReg,
+ PT_CONTEXT pCtx,
+ void *pDji,
+ EHRangeTree *pEHRT);
+
+ void SetDebugState(Thread *pThread,
+ CorDebugThreadState state);
+
+ void SetAllDebugState(Thread *et,
+ CorDebugThreadState state);
+
+ // This is pretty much copied from VM\COMSynchronizable's
+ // INT32 __stdcall ThreadNative::GetThreadState, so propogate changes
+ // to both functions
+ CorDebugUserState GetPartialUserState( Thread *pThread );
+
+#ifdef FEATURE_PREJIT
+#ifndef DACCESS_COMPILE
+ virtual void SetNGENDebugFlags(BOOL fAllowOpt)
+ {
+ LIMITED_METHOD_CONTRACT;
+ PEFile::SetNGENDebugFlags(fAllowOpt);
+ }
+
+ virtual void GetNGENDebugFlags(BOOL *fAllowOpt)
+ {
+ LIMITED_METHOD_CONTRACT;
+ PEFile::GetNGENDebugFlags(fAllowOpt);
+ }
+#endif
+#endif // FEATURE_PREJIT
+
+#ifdef DACCESS_COMPILE
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+ virtual unsigned GetSizeForCorElementType(CorElementType etyp);
+
+#ifndef DACCESS_COMPILE
+ virtual BOOL ObjIsInstanceOf(Object *pElement, TypeHandle toTypeHnd);
+#endif
+
+ virtual void ClearAllDebugInterfaceReferences(void);
+
+#ifndef DACCESS_COMPILE
+#ifdef _DEBUG
+ virtual void ObjectRefFlush(Thread *pThread);
+#endif
+#endif
+};
+
+#endif // DEBUGGING_SUPPORTED
+
+#endif // _eedbginterfaceimpl_h_
diff --git a/src/vm/eedbginterfaceimpl.inl b/src/vm/eedbginterfaceimpl.inl
new file mode 100644
index 0000000000..6a469c8739
--- /dev/null
+++ b/src/vm/eedbginterfaceimpl.inl
@@ -0,0 +1,123 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//*****************************************************************************
+//*****************************************************************************
+
+#ifndef _EEDBGINTERFACEIMPL_INL_
+#define _EEDBGINTERFACEIMPL_INL_
+
+#include "common.h"
+
+
+// This class only serves as a wrapper for the debugger callbacks.
+// Using this class eliminates the need to check "#ifdef DEBUGGING_SUPPORTED"
+// and "CORDebuggerAttached()".
+class EEToDebuggerExceptionInterfaceWrapper
+{
+ public:
+
+#if defined(DEBUGGING_SUPPORTED) && !defined(DACCESS_COMPILE)
+ static inline bool FirstChanceManagedException(Thread* pThread, SIZE_T currentIP, SIZE_T currentSP)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ThreadExceptionState* pExState = pThread->GetExceptionState();
+ pExState->GetDebuggerState()->SetDebuggerIndicatedFramePointer((LPVOID)currentSP);
+
+ if (CORDebuggerAttached())
+ {
+ // Notfiy the debugger that we are on the first pass for a managed exception.
+ // Note that this callback is made for every managed frame.
+ return g_pDebugInterface->FirstChanceManagedException(pThread, currentIP, currentSP);
+ }
+ else
+ {
+ return false;
+ }
+ }
+
+ static inline void FirstChanceManagedExceptionCatcherFound(Thread* pThread, MethodDesc* pMD, TADDR pMethodAddr, SIZE_T currentSP,
+ EE_ILEXCEPTION_CLAUSE* pEHClause)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+
+ ThreadExceptionState* pExState = pThread->GetExceptionState();
+ pExState->GetDebuggerState()->SetDebuggerIndicatedFramePointer((LPVOID)currentSP);
+
+ if (CORDebuggerAttached())
+ {
+ g_pDebugInterface->FirstChanceManagedExceptionCatcherFound(pThread, pMD, pMethodAddr, (PBYTE)currentSP,
+ pEHClause);
+ }
+ }
+
+ static inline void NotifyOfCHFFilter(EXCEPTION_POINTERS * pExceptionInfo, Frame * pFrame)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (CORDebuggerAttached())
+ {
+ g_pDebugInterface->NotifyOfCHFFilter(pExceptionInfo, pFrame);
+ }
+ }
+
+ static inline void ManagedExceptionUnwindBegin(Thread* pThread)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (CORDebuggerAttached())
+ {
+ g_pDebugInterface->ManagedExceptionUnwindBegin(pThread);
+ }
+ }
+
+ static inline void ExceptionFilter(MethodDesc* pMD, TADDR pMethodAddr, SIZE_T offset, BYTE* pStack)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (CORDebuggerAttached())
+ {
+ g_pDebugInterface->ExceptionFilter(pMD, pMethodAddr, offset, pStack);
+ }
+ }
+
+ static inline void ExceptionHandle(MethodDesc* pMD, TADDR pMethodAddr, SIZE_T offset, BYTE* pStack)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (CORDebuggerAttached())
+ {
+ g_pDebugInterface->ExceptionHandle(pMD, pMethodAddr, offset, pStack);
+ }
+ }
+
+#else // !defined(DEBUGGING_SUPPORTED) || defined(DACCESS_COMPILE)
+ static inline bool FirstChanceManagedException(Thread* pThread, SIZE_T currentIP, SIZE_T currentSP) {LIMITED_METHOD_CONTRACT; return false;}
+ static inline void FirstChanceManagedExceptionCatcherFound(Thread* pThread, MethodDesc* pMD, TADDR pMethodAddr, BYTE* currentSP,
+ EE_ILEXCEPTION_CLAUSE* pEHClause) {LIMITED_METHOD_CONTRACT;}
+ static inline void ManagedExceptionUnwindBegin(Thread* pThread) {LIMITED_METHOD_CONTRACT;}
+ static inline void ExceptionFilter(MethodDesc* pMD, TADDR pMethodAddr, SIZE_T offset, BYTE* pStack) {LIMITED_METHOD_CONTRACT;}
+ static inline void ExceptionHandle(MethodDesc* pMD, TADDR pMethodAddr, SIZE_T offset, BYTE* pStack) {LIMITED_METHOD_CONTRACT;}
+#endif // !defined(DEBUGGING_SUPPORTED) || defined(DACCESS_COMPILE)
+};
+
+
+#endif // _EEDBGINTERFACEIMPL_INL_
diff --git a/src/vm/eehash.cpp b/src/vm/eehash.cpp
new file mode 100644
index 0000000000..c607eefa4c
--- /dev/null
+++ b/src/vm/eehash.cpp
@@ -0,0 +1,537 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: eehash.cpp
+//
+
+//
+
+
+#include "common.h"
+#include "excep.h"
+#include "eehash.h"
+#include "securityattributes.h"
+#include "securitydeclarativecache.h"
+#include "stringliteralmap.h"
+#include "clsload.hpp"
+#include "typectxt.h"
+#include "genericdict.h"
+
+// ============================================================================
+// UTF8 string hash table helper.
+// ============================================================================
+EEHashEntry_t * EEUtf8HashTableHelper::AllocateEntry(LPCUTF8 pKey, BOOL bDeepCopy, void *pHeap)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ INJECT_FAULT(return NULL;);
+ }
+ CONTRACTL_END
+
+ EEHashEntry_t *pEntry;
+
+ if (bDeepCopy)
+ {
+ DWORD StringLen = (DWORD)strlen(pKey);
+ DWORD BufLen = 0;
+// Review conversion of size_t to DWORD.
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable:4267)
+#endif
+ if (!ClrSafeInt<DWORD>::addition(StringLen, SIZEOF_EEHASH_ENTRY + sizeof(LPUTF8) + 1, BufLen))
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+ return NULL;
+ pEntry = (EEHashEntry_t *) new (nothrow) BYTE[BufLen];
+ if (!pEntry)
+ return NULL;
+
+ memcpy(pEntry->Key + sizeof(LPUTF8), pKey, StringLen + 1);
+ *((LPUTF8*)pEntry->Key) = (LPUTF8)(pEntry->Key + sizeof(LPUTF8));
+ }
+ else
+ {
+ pEntry = (EEHashEntry_t *) new (nothrow)BYTE[SIZEOF_EEHASH_ENTRY + sizeof(LPUTF8)];
+ if (pEntry)
+ *((LPCUTF8*)pEntry->Key) = pKey;
+ }
+
+ return pEntry;
+}
+
+
+void EEUtf8HashTableHelper::DeleteEntry(EEHashEntry_t *pEntry, void *pHeap)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ delete [] (BYTE*)pEntry;
+}
+
+
+BOOL EEUtf8HashTableHelper::CompareKeys(EEHashEntry_t *pEntry, LPCUTF8 pKey)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ LPCUTF8 pEntryKey = *((LPCUTF8*)pEntry->Key);
+ return (strcmp(pEntryKey, pKey) == 0) ? TRUE : FALSE;
+}
+
+
+DWORD EEUtf8HashTableHelper::Hash(LPCUTF8 pKey)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ DWORD dwHash = 0;
+
+ while (*pKey != 0)
+ {
+ dwHash = (dwHash << 5) + (dwHash >> 5) + (*pKey);
+ pKey++;
+ }
+
+ return dwHash;
+}
+
+
+LPCUTF8 EEUtf8HashTableHelper::GetKey(EEHashEntry_t *pEntry)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return *((LPCUTF8*)pEntry->Key);
+}
+
+#ifndef DACCESS_COMPILE
+
+// ============================================================================
+// Unicode string hash table helper.
+// ============================================================================
+EEHashEntry_t * EEUnicodeHashTableHelper::AllocateEntry(EEStringData *pKey, BOOL bDeepCopy, void *pHeap)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ INJECT_FAULT(return NULL;);
+ }
+ CONTRACTL_END
+
+ EEHashEntry_t *pEntry;
+
+ if (bDeepCopy)
+ {
+ pEntry = (EEHashEntry_t *) new (nothrow) BYTE[SIZEOF_EEHASH_ENTRY + sizeof(EEStringData) + ((pKey->GetCharCount() + 1) * sizeof(WCHAR))];
+ if (pEntry) {
+ EEStringData *pEntryKey = (EEStringData *)(&pEntry->Key);
+ pEntryKey->SetIsOnlyLowChars (pKey->GetIsOnlyLowChars());
+ pEntryKey->SetCharCount (pKey->GetCharCount());
+ pEntryKey->SetStringBuffer ((LPWSTR) ((LPBYTE)pEntry->Key + sizeof(EEStringData)));
+ memcpy((LPWSTR)pEntryKey->GetStringBuffer(), pKey->GetStringBuffer(), pKey->GetCharCount() * sizeof(WCHAR));
+ }
+ }
+ else
+ {
+ pEntry = (EEHashEntry_t *) new (nothrow) BYTE[SIZEOF_EEHASH_ENTRY + sizeof(EEStringData)];
+ if (pEntry) {
+ EEStringData *pEntryKey = (EEStringData *) pEntry->Key;
+ pEntryKey->SetIsOnlyLowChars (pKey->GetIsOnlyLowChars());
+ pEntryKey->SetCharCount (pKey->GetCharCount());
+ pEntryKey->SetStringBuffer (pKey->GetStringBuffer());
+ }
+ }
+
+ return pEntry;
+}
+
+
+void EEUnicodeHashTableHelper::DeleteEntry(EEHashEntry_t *pEntry, void *pHeap)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ delete [] (BYTE*)pEntry;
+}
+
+
+BOOL EEUnicodeHashTableHelper::CompareKeys(EEHashEntry_t *pEntry, EEStringData *pKey)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ EEStringData *pEntryKey = (EEStringData*) pEntry->Key;
+
+ // Same buffer, same string.
+ if (pEntryKey->GetStringBuffer() == pKey->GetStringBuffer())
+ return TRUE;
+
+ // Length not the same, never a match.
+ if (pEntryKey->GetCharCount() != pKey->GetCharCount())
+ return FALSE;
+
+ // Compare the entire thing.
+ // We'll deliberately ignore the bOnlyLowChars field since this derived from the characters
+ return !memcmp(pEntryKey->GetStringBuffer(), pKey->GetStringBuffer(), pEntryKey->GetCharCount() * sizeof(WCHAR));
+}
+
+
+DWORD EEUnicodeHashTableHelper::Hash(EEStringData *pKey)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (HashBytes((const BYTE *) pKey->GetStringBuffer(), pKey->GetCharCount()*sizeof(WCHAR)));
+}
+
+
+EEStringData *EEUnicodeHashTableHelper::GetKey(EEHashEntry_t *pEntry)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (EEStringData*)pEntry->Key;
+}
+
+void EEUnicodeHashTableHelper::ReplaceKey(EEHashEntry_t *pEntry, EEStringData *pNewKey)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ ((EEStringData*)pEntry->Key)->SetStringBuffer (pNewKey->GetStringBuffer());
+ ((EEStringData*)pEntry->Key)->SetCharCount (pNewKey->GetCharCount());
+ ((EEStringData*)pEntry->Key)->SetIsOnlyLowChars (pNewKey->GetIsOnlyLowChars());
+}
+
+// ============================================================================
+// Unicode stringliteral hash table helper.
+// ============================================================================
+EEHashEntry_t * EEUnicodeStringLiteralHashTableHelper::AllocateEntry(EEStringData *pKey, BOOL bDeepCopy, void *pHeap)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ INJECT_FAULT(return NULL;);
+ }
+ CONTRACTL_END
+
+ // We assert here because we expect that the heap is not null for EEUnicodeStringLiteralHash table.
+ // If someone finds more uses of this kind of hashtable then remove this asserte.
+ // Also note that in case of heap being null we go ahead and use new /delete which is EXPENSIVE
+ // But for production code this might be ok if the memory is fragmented then thers a better chance
+ // of getting smaller allocations than full pages.
+ _ASSERTE (pHeap);
+
+ if (pHeap)
+ return (EEHashEntry_t *) ((MemoryPool*)pHeap)->AllocateElementNoThrow ();
+ else
+ return (EEHashEntry_t *) new (nothrow) BYTE[SIZEOF_EEHASH_ENTRY];
+}
+
+
+void EEUnicodeStringLiteralHashTableHelper::DeleteEntry(EEHashEntry_t *pEntry, void *pHeap)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ // We assert here because we expect that the heap is not null for EEUnicodeStringLiteralHash table.
+ // If someone finds more uses of this kind of hashtable then remove this asserte.
+ // Also note that in case of heap being null we go ahead and use new /delete which is EXPENSIVE
+ // But for production code this might be ok if the memory is fragmented then thers a better chance
+ // of getting smaller allocations than full pages.
+ _ASSERTE (pHeap);
+
+ if (pHeap)
+ ((MemoryPool*)pHeap)->FreeElement(pEntry);
+ else
+ delete [] (BYTE*)pEntry;
+}
+
+
+BOOL EEUnicodeStringLiteralHashTableHelper::CompareKeys(EEHashEntry_t *pEntry, EEStringData *pKey)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ GCX_COOP();
+
+ StringLiteralEntry *pHashData = (StringLiteralEntry *)pEntry->Data;
+
+ EEStringData pEntryKey;
+ pHashData->GetStringData(&pEntryKey);
+
+ // Length not the same, never a match.
+ if (pEntryKey.GetCharCount() != pKey->GetCharCount())
+ return FALSE;
+
+ // Compare the entire thing.
+ // We'll deliberately ignore the bOnlyLowChars field since this derived from the characters
+ return (!memcmp(pEntryKey.GetStringBuffer(), pKey->GetStringBuffer(), pEntryKey.GetCharCount() * sizeof(WCHAR)));
+}
+
+
+DWORD EEUnicodeStringLiteralHashTableHelper::Hash(EEStringData *pKey)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (HashBytes((const BYTE *) pKey->GetStringBuffer(), pKey->GetCharCount() * sizeof(WCHAR)));
+}
+
+// ============================================================================
+// Permission set hash table helper.
+// ============================================================================
+
+EEHashEntry_t * EEPsetHashTableHelper::AllocateEntry(PsetCacheKey *pKey, BOOL bDeepCopy, void *pHeap)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ INJECT_FAULT(return NULL;);
+ }
+ CONTRACTL_END
+
+ _ASSERTE(!bDeepCopy);
+
+ EEHashEntry_t *pEntry;
+
+ if (pHeap) {
+
+ S_SIZE_T sizeEntry;
+ LoaderHeap *pLHeap;
+
+ sizeEntry = S_SIZE_T(sizeof (BYTE)) * (S_SIZE_T)SIZEOF_EEHASH_ENTRY +
+ (S_SIZE_T)sizeof (PPsetCacheKey);
+
+ pLHeap = (LoaderHeap*) pHeap;
+
+ pEntry = (EEHashEntry_t *)
+ ((void*) pLHeap->AllocMem_NoThrow (sizeEntry));
+
+ } else {
+ pEntry = (EEHashEntry_t *) new (nothrow)
+ BYTE [SIZEOF_EEHASH_ENTRY + sizeof(PPsetCacheKey)];
+ }
+
+ if (pEntry) {
+ *((PPsetCacheKey*)pEntry->Key) = pKey;
+ }
+
+ return pEntry;
+}
+
+void EEPsetHashTableHelper::DeleteEntry(EEHashEntry_t *pEntry, void *pHeap)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ //
+ // If a heap is present, memory will be reclaimed as part of appdomain
+ // unload.
+ //
+
+ if (pHeap == NULL) {
+ delete [] (BYTE*)pEntry;
+ }
+
+}
+
+BOOL EEPsetHashTableHelper::CompareKeys(EEHashEntry_t *pEntry, PsetCacheKey *pKey)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ PsetCacheKey *pThis = *((PPsetCacheKey*)pEntry->Key);
+ return pKey->IsEquiv(pThis);
+}
+
+DWORD EEPsetHashTableHelper::Hash(PsetCacheKey *pKey)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return pKey->Hash();
+}
+
+PsetCacheKey * EEPsetHashTableHelper::GetKey(EEHashEntry_t *pEntry)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ PsetCacheKey *pThis = *((PPsetCacheKey*)pEntry->Key);
+ return pThis;
+}
+
+
+// ============================================================================
+// Instantiation hash table helper.
+// ============================================================================
+
+EEHashEntry_t *EEInstantiationHashTableHelper::AllocateEntry(const SigTypeContext *pKey, BOOL bDeepCopy, AllocationHeap pHeap)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END
+
+ EEHashEntry_t *pEntry = (EEHashEntry_t *) new (nothrow) BYTE[SIZEOF_EEHASH_ENTRY + sizeof(SigTypeContext)];
+ if (!pEntry)
+ return NULL;
+ *((SigTypeContext*)pEntry->Key) = *pKey;
+
+ return pEntry;
+}
+
+void EEInstantiationHashTableHelper::DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap pHeap)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ delete [] (BYTE*)pEntry;
+}
+
+BOOL EEInstantiationHashTableHelper::CompareKeys(EEHashEntry_t *pEntry, const SigTypeContext *pKey)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ SigTypeContext *pThis = (SigTypeContext*)&pEntry->Key;
+ return SigTypeContext::Equal(pThis, pKey);
+}
+
+DWORD EEInstantiationHashTableHelper::Hash(const SigTypeContext *pKey)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DWORD dwHash = 5381;
+ DWORD i;
+
+ for (i = 0; i < pKey->m_classInst.GetNumArgs(); i++)
+ dwHash = ((dwHash << 5) + dwHash) ^ (unsigned int)(SIZE_T)pKey->m_classInst[i].AsPtr();
+
+ for (i = 0; i < pKey->m_methodInst.GetNumArgs(); i++)
+ dwHash = ((dwHash << 5) + dwHash) ^ (unsigned int)(SIZE_T)pKey->m_methodInst[i].AsPtr();
+
+ return dwHash;
+}
+
+const SigTypeContext *EEInstantiationHashTableHelper::GetKey(EEHashEntry_t *pEntry)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (const SigTypeContext*)&pEntry->Key;
+}
+
+
+
+// ============================================================================
+// ComComponentInfo hash table helper.
+// ============================================================================
+
+EEHashEntry_t *EEClassFactoryInfoHashTableHelper::AllocateEntry(ClassFactoryInfo *pKey, BOOL bDeepCopy, void *pHeap)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ INJECT_FAULT(return NULL;);
+ }
+ CONTRACTL_END
+
+ EEHashEntry_t *pEntry;
+ S_SIZE_T cbStringLen = S_SIZE_T(0);
+
+ _ASSERTE(bDeepCopy && "Non deep copy is not supported by the EEComCompInfoHashTableHelper");
+
+ if (pKey->m_strServerName)
+ cbStringLen = (S_SIZE_T(wcslen(pKey->m_strServerName)) + S_SIZE_T(1)) * S_SIZE_T(sizeof(WCHAR));
+
+ S_SIZE_T cbEntry = S_SIZE_T(SIZEOF_EEHASH_ENTRY + sizeof(ClassFactoryInfo)) + cbStringLen;
+
+ if (cbEntry.IsOverflow())
+ return NULL;
+
+ _ASSERTE(!cbStringLen.IsOverflow());
+
+ pEntry = (EEHashEntry_t *) new (nothrow) BYTE[cbEntry.Value()];
+ if (pEntry) {
+ memcpy(pEntry->Key + sizeof(ClassFactoryInfo), pKey->m_strServerName, cbStringLen.Value());
+ ((ClassFactoryInfo*)pEntry->Key)->m_strServerName = pKey->m_strServerName ? (WCHAR*)(pEntry->Key + sizeof(ClassFactoryInfo)) : NULL;
+ ((ClassFactoryInfo*)pEntry->Key)->m_clsid = pKey->m_clsid;
+ }
+
+ return pEntry;
+}
+
+void EEClassFactoryInfoHashTableHelper::DeleteEntry(EEHashEntry_t *pEntry, void *pHeap)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ delete [] (BYTE*) pEntry;
+}
+
+BOOL EEClassFactoryInfoHashTableHelper::CompareKeys(EEHashEntry_t *pEntry, ClassFactoryInfo *pKey)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // First check the GUIDs.
+ if (((ClassFactoryInfo*)pEntry->Key)->m_clsid != pKey->m_clsid)
+ return FALSE;
+
+ // Next do a trivial comparition on the server name pointer values.
+ if (((ClassFactoryInfo*)pEntry->Key)->m_strServerName == pKey->m_strServerName)
+ return TRUE;
+
+ // If the pointers are not equal then if one is NULL then the server names are different.
+ if (!((ClassFactoryInfo*)pEntry->Key)->m_strServerName || !pKey->m_strServerName)
+ return FALSE;
+
+ // Finally do a string comparition of the server names.
+ return wcscmp(((ClassFactoryInfo*)pEntry->Key)->m_strServerName, pKey->m_strServerName) == 0;
+}
+
+DWORD EEClassFactoryInfoHashTableHelper::Hash(ClassFactoryInfo *pKey)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DWORD dwHash = 0;
+ BYTE *pGuidData = (BYTE*)&pKey->m_clsid;
+
+ for (unsigned int i = 0; i < sizeof(GUID); i++)
+ {
+ dwHash = (dwHash << 5) + (dwHash >> 5) + (*pGuidData);
+ pGuidData++;
+ }
+
+ if (pKey->m_strServerName)
+ {
+ WCHAR *pSrvNameData = pKey->m_strServerName;
+
+ while (*pSrvNameData != 0)
+ {
+ dwHash = (dwHash << 5) + (dwHash >> 5) + (*pSrvNameData);
+ pSrvNameData++;
+ }
+ }
+
+ return dwHash;
+}
+
+ClassFactoryInfo *EEClassFactoryInfoHashTableHelper::GetKey(EEHashEntry_t *pEntry)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (ClassFactoryInfo*)pEntry->Key;
+}
+#endif // !DACCESS_COMPILE
diff --git a/src/vm/eehash.h b/src/vm/eehash.h
new file mode 100644
index 0000000000..34d72de7d8
--- /dev/null
+++ b/src/vm/eehash.h
@@ -0,0 +1,612 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+//emp
+// File: eehash.h
+//
+// Provides hash table functionality needed in the EE - intended to be replaced later with better
+// algorithms, but which have the same interface.
+//
+// Two requirements are:
+//
+// 1. Any number of threads can be reading the hash table while another thread is writing, without error.
+// 2. Only one thread can write at a time.
+// 3. When calling ReplaceValue(), a reader will get the old value, or the new value, but not something
+// in between.
+// 4. DeleteValue() is an unsafe operation - no other threads can be in the hash table when this happens.
+//
+
+#ifndef _EE_HASH_H
+#define _EE_HASH_H
+
+#include "exceptmacros.h"
+#include "syncclean.hpp"
+#ifdef FEATURE_PREJIT
+class DataImage;
+#endif
+
+#include "util.hpp"
+
+#ifdef FEATURE_PREJIT
+#include "corcompile.h"
+#endif
+
+class AllocMemTracker;
+class ClassLoader;
+struct LockOwner;
+class NameHandle;
+struct PsetCacheKey;
+class SigTypeContext;
+
+typedef PsetCacheKey* PPsetCacheKey;
+
+// The "blob" you get to store in the hash table
+
+typedef PTR_VOID HashDatum;
+
+// The heap that you want the allocation to be done in
+
+typedef void* AllocationHeap;
+
+
+// One of these is present for each element in the table.
+// Update the SIZEOF_EEHASH_ENTRY macro below if you change this
+// struct
+
+typedef struct EEHashEntry EEHashEntry_t;
+typedef DPTR(EEHashEntry_t) PTR_EEHashEntry_t;
+struct EEHashEntry
+{
+ PTR_EEHashEntry_t pNext;
+ DWORD dwHashValue;
+ HashDatum Data;
+ BYTE Key[1]; // The key is stored inline
+};
+
+// The key[1] is a place holder for the key
+// SIZEOF_EEHASH_ENTRY is the size of struct up to (and not including) the key
+#define SIZEOF_EEHASH_ENTRY (offsetof(EEHashEntry,Key[0]))
+
+
+// Struct to hold a client's iteration state
+struct EEHashTableIteration;
+
+class GCHeap;
+
+// Generic hash table.
+
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+class EEHashTableBase
+{
+public:
+
+
+ BOOL Init(DWORD dwNumBuckets, LockOwner *pLock, AllocationHeap pHeap = 0,BOOL CheckThreadSafety = TRUE);
+
+ void InsertValue(KeyType pKey, HashDatum Data, BOOL bDeepCopyKey = bDefaultCopyIsDeep);
+ void InsertKeyAsValue(KeyType pKey, BOOL bDeepCopyKey = bDefaultCopyIsDeep);
+ BOOL DeleteValue(KeyType pKey);
+ BOOL ReplaceValue(KeyType pKey, HashDatum Data);
+ BOOL ReplaceKey(KeyType pOldKey, KeyType pNewKey);
+ void ClearHashTable();
+ void EmptyHashTable();
+ BOOL IsEmpty();
+ void Destroy();
+
+ // Reader functions. Please place any functions that can be called from the
+ // reader threads here.
+ BOOL GetValue(KeyType pKey, HashDatum *pData);
+ BOOL GetValue(KeyType pKey, HashDatum *pData, DWORD hashValue);
+
+
+ // A fast inlinable flavor of GetValue that can return false instead of the actual item
+ // if there is race with updating of the hashtable. Callers of GetValueSpeculative
+ // should fall back to the slow GetValue if GetValueSpeculative returns false.
+ // Assumes that we are in cooperative mode already. For performance-sensitive codepaths.
+ BOOL GetValueSpeculative(KeyType pKey, HashDatum *pData);
+
+ DWORD GetHash(KeyType Key);
+ DWORD GetCount();
+
+ // Walk through all the entries in the hash table, in meaningless order, without any
+ // synchronization.
+ //
+ // IterateStart()
+ // while (IterateNext())
+ // IterateGetKey();
+ //
+ // This is guaranteed to be DeleteValue-friendly if you advance the iterator before
+ // deletig, i.e. if used in the following pattern:
+ //
+ // IterateStart();
+ // BOOL keepGoing = IterateNext();
+ // while(keepGoing)
+ // {
+ // key = IterateGetKey();
+ // keepGoing = IterateNext();
+ // ...
+ // DeleteValue(key);
+ // ..
+ // }
+ void IterateStart(EEHashTableIteration *pIter);
+ BOOL IterateNext(EEHashTableIteration *pIter);
+ KeyType IterateGetKey(EEHashTableIteration *pIter);
+ HashDatum IterateGetValue(EEHashTableIteration *pIter);
+#ifdef _DEBUG
+ void SuppressSyncCheck()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_CheckThreadSafety=FALSE;
+ }
+#endif
+protected:
+ BOOL GrowHashTable();
+ EEHashEntry_t * FindItem(KeyType pKey);
+ EEHashEntry_t * FindItem(KeyType pKey, DWORD hashValue);
+
+ // A fast inlinable flavor of FindItem that can return null instead of the actual item
+ // if there is race with updating of the hashtable. Callers of FindItemSpeculative
+ // should fall back to the slow FindItem if FindItemSpeculative returns null.
+ // Assumes that we are in cooperative mode already. For performance-sensitive codepaths.
+ EEHashEntry_t * FindItemSpeculative(KeyType pKey, DWORD hashValue);
+
+ // Double buffer to fix the race condition of growhashtable (the update
+ // of m_pBuckets and m_dwNumBuckets has to be atomic, so we double buffer
+ // the structure and access it through a pointer, which can be updated
+ // atomically. The union is in order to not change the SOS macros.
+
+ struct BucketTable
+ {
+ DPTR(PTR_EEHashEntry_t) m_pBuckets; // Pointer to first entry for each bucket
+ DWORD m_dwNumBuckets;
+ } m_BucketTable[2];
+ typedef DPTR(BucketTable) PTR_BucketTable;
+
+ // In a function we MUST only read this value ONCE, as the writer thread can change
+ // the value asynchronously. We make this member volatile the compiler won't do copy propagation
+ // optimizations that can make this read happen more than once. Note that we only need
+ // this property for the readers. As they are the ones that can have
+ // this variable changed (note also that if the variable was enregistered we wouldn't
+ // have any problem)
+ // BE VERY CAREFUL WITH WHAT YOU DO WITH THIS VARIABLE AS USING IT BADLY CAN CAUSE
+ // RACING CONDITIONS
+ VolatilePtr<BucketTable, PTR_BucketTable> m_pVolatileBucketTable;
+
+
+ DWORD m_dwNumEntries;
+ AllocationHeap m_Heap;
+ Volatile<LONG> m_bGrowing;
+#ifdef _DEBUG
+ LPVOID m_lockData;
+ FnLockOwner m_pfnLockOwner;
+
+ EEThreadId m_writerThreadId;
+ BOOL m_CheckThreadSafety;
+
+#endif
+
+#ifdef _DEBUG_IMPL
+ // A thread must own a lock for a hash if it is a writer.
+ BOOL OwnLock();
+#endif // _DEBUG_IMPL
+};
+
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+class EEHashTable : public EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>
+{
+public:
+ EEHashTable()
+ {
+ LIMITED_METHOD_CONTRACT;
+ this->m_BucketTable[0].m_pBuckets = NULL;
+ this->m_BucketTable[0].m_dwNumBuckets = 0;
+ this->m_BucketTable[1].m_pBuckets = NULL;
+ this->m_BucketTable[1].m_dwNumBuckets = 0;
+#ifndef DACCESS_COMPILE
+ this->m_pVolatileBucketTable = NULL;
+#endif
+ this->m_dwNumEntries = 0;
+ this->m_bGrowing = 0;
+#ifdef _DEBUG
+ this->m_lockData = NULL;
+ this->m_pfnLockOwner = NULL;
+#endif
+ }
+
+ ~EEHashTable()
+ {
+ WRAPPER_NO_CONTRACT;
+ this->Destroy();
+ }
+};
+
+/* to be used as static variable - no constructor/destructor, assumes zero
+ initialized memory */
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+class EEHashTableStatic : public EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>
+{
+};
+
+class EEIntHashTableHelper
+{
+public:
+ static EEHashEntry_t *AllocateEntry(int iKey, BOOL bDeepCopy, AllocationHeap pHeap = 0)
+ {
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_NOTRIGGER);
+ INJECT_FAULT(return NULL;);
+ }
+ CONTRACTL_END
+
+ _ASSERTE(!bDeepCopy && "Deep copy is not supported by the EEPtrHashTableHelper");
+
+ EEHashEntry_t *pEntry = (EEHashEntry_t *) new (nothrow) BYTE[SIZEOF_EEHASH_ENTRY + sizeof(int)];
+ if (!pEntry)
+ return NULL;
+ *((int*) pEntry->Key) = iKey;
+
+ return pEntry;
+ }
+
+ static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap pHeap = 0)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // Delete the entry.
+ delete [] (BYTE*) pEntry;
+ }
+
+ static BOOL CompareKeys(EEHashEntry_t *pEntry, int iKey)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return *((int*)pEntry->Key) == iKey;
+ }
+
+ static DWORD Hash(int iKey)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (DWORD)iKey;
+ }
+
+ static int GetKey(EEHashEntry_t *pEntry)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return *((int*) pEntry->Key);
+ }
+};
+typedef EEHashTable<int, EEIntHashTableHelper, FALSE> EEIntHashTable;
+
+typedef struct PtrPlusInt
+{
+ void* pValue;
+ int iValue;
+} *PPtrPlusInt;
+
+class EEPtrPlusIntHashTableHelper
+{
+public:
+ static EEHashEntry_t *AllocateEntry(PtrPlusInt ppiKey, BOOL bDeepCopy, AllocationHeap pHeap = 0)
+ {
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_NOTRIGGER);
+ INJECT_FAULT(return NULL;);
+ }
+ CONTRACTL_END
+
+ _ASSERTE(!bDeepCopy && "Deep copy is not supported by the EEPtrPlusIntHashTableHelper");
+
+ EEHashEntry_t *pEntry = (EEHashEntry_t *) new (nothrow) BYTE[SIZEOF_EEHASH_ENTRY + sizeof(PtrPlusInt)];
+ if (!pEntry)
+ return NULL;
+ *((PPtrPlusInt) pEntry->Key) = ppiKey;
+
+ return pEntry;
+ }
+
+ static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap pHeap = 0)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // Delete the entry.
+ delete [] (BYTE*) pEntry;
+ }
+
+ static BOOL CompareKeys(EEHashEntry_t *pEntry, PtrPlusInt ppiKey)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (((PPtrPlusInt)pEntry->Key)->pValue == ppiKey.pValue) &&
+ (((PPtrPlusInt)pEntry->Key)->iValue == ppiKey.iValue);
+ }
+
+ static DWORD Hash(PtrPlusInt ppiKey)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (DWORD)ppiKey.iValue ^
+#ifdef _TARGET_X86_
+ (DWORD)(size_t) ppiKey.pValue;
+#else
+ // <TODO> IA64: Is this a good hashing mechanism on IA64?</TODO>
+ (DWORD)(((size_t) ppiKey.pValue) >> 3);
+#endif
+ }
+
+ static PtrPlusInt GetKey(EEHashEntry_t *pEntry)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return *((PPtrPlusInt) pEntry->Key);
+ }
+};
+
+typedef EEHashTable<PtrPlusInt, EEPtrPlusIntHashTableHelper, FALSE> EEPtrPlusIntHashTable;
+
+// UTF8 string hash table. The UTF8 strings are NULL terminated.
+
+class EEUtf8HashTableHelper
+{
+public:
+ static EEHashEntry_t * AllocateEntry(LPCUTF8 pKey, BOOL bDeepCopy, AllocationHeap Heap);
+ static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap Heap);
+ static BOOL CompareKeys(EEHashEntry_t *pEntry, LPCUTF8 pKey);
+ static DWORD Hash(LPCUTF8 pKey);
+ static LPCUTF8 GetKey(EEHashEntry_t *pEntry);
+};
+
+typedef EEHashTable<LPCUTF8, EEUtf8HashTableHelper, TRUE> EEUtf8StringHashTable;
+typedef DPTR(EEUtf8StringHashTable) PTR_EEUtf8StringHashTable;
+
+// Unicode String hash table - the keys are UNICODE strings which may
+// contain embedded nulls. An EEStringData struct is used for the key
+// which contains the length of the item. Note that this string is
+// not necessarily null terminated and should never be treated as such.
+const DWORD ONLY_LOW_CHARS_MASK = 0x80000000;
+
+class EEStringData
+{
+private:
+ LPCWSTR szString; // The string data.
+ DWORD cch; // Characters in the string.
+#ifdef _DEBUG
+ BOOL bDebugOnlyLowChars; // Does the string contain only characters less than 0x80?
+ DWORD dwDebugCch;
+#endif // _DEBUG
+
+public:
+ // explicilty initialize cch to 0 because SetCharCount uses cch
+ EEStringData() : cch(0)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ SetStringBuffer(NULL);
+ SetCharCount(0);
+ SetIsOnlyLowChars(FALSE);
+ };
+ EEStringData(DWORD cchString, LPCWSTR str) : cch(0)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ SetStringBuffer(str);
+ SetCharCount(cchString);
+ SetIsOnlyLowChars(FALSE);
+ };
+ EEStringData(DWORD cchString, LPCWSTR str, BOOL onlyLow) : cch(0)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ SetStringBuffer(str);
+ SetCharCount(cchString);
+ SetIsOnlyLowChars(onlyLow);
+ };
+ inline ULONG GetCharCount() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE ((cch & ~ONLY_LOW_CHARS_MASK) == dwDebugCch);
+ return (cch & ~ONLY_LOW_CHARS_MASK);
+ }
+ inline void SetCharCount(ULONG _cch)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef _DEBUG
+ dwDebugCch = _cch;
+#endif // _DEBUG
+ cch = ((DWORD)_cch) | (cch & ONLY_LOW_CHARS_MASK);
+ }
+ inline LPCWSTR GetStringBuffer() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (szString);
+ }
+ inline void SetStringBuffer(LPCWSTR _szString)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ szString = _szString;
+ }
+ inline BOOL GetIsOnlyLowChars() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(bDebugOnlyLowChars == ((cch & ONLY_LOW_CHARS_MASK) ? TRUE : FALSE));
+ return ((cch & ONLY_LOW_CHARS_MASK) ? TRUE : FALSE);
+ }
+ inline void SetIsOnlyLowChars(BOOL bIsOnlyLowChars)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef _DEBUG
+ bDebugOnlyLowChars = bIsOnlyLowChars;
+#endif // _DEBUG
+ bIsOnlyLowChars ? (cch |= ONLY_LOW_CHARS_MASK) : (cch &= ~ONLY_LOW_CHARS_MASK);
+ }
+};
+
+class EEUnicodeHashTableHelper
+{
+public:
+ static EEHashEntry_t * AllocateEntry(EEStringData *pKey, BOOL bDeepCopy, AllocationHeap Heap);
+ static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap Heap);
+ static BOOL CompareKeys(EEHashEntry_t *pEntry, EEStringData *pKey);
+ static DWORD Hash(EEStringData *pKey);
+ static EEStringData * GetKey(EEHashEntry_t *pEntry);
+ static void ReplaceKey(EEHashEntry_t *pEntry, EEStringData *pNewKey);
+};
+
+typedef EEHashTable<EEStringData *, EEUnicodeHashTableHelper, TRUE> EEUnicodeStringHashTable;
+
+
+class EEUnicodeStringLiteralHashTableHelper
+{
+public:
+ static EEHashEntry_t * AllocateEntry(EEStringData *pKey, BOOL bDeepCopy, AllocationHeap Heap);
+ static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap Heap);
+ static BOOL CompareKeys(EEHashEntry_t *pEntry, EEStringData *pKey);
+ static DWORD Hash(EEStringData *pKey);
+ static void ReplaceKey(EEHashEntry_t *pEntry, EEStringData *pNewKey);
+};
+
+typedef EEHashTable<EEStringData *, EEUnicodeStringLiteralHashTableHelper, TRUE> EEUnicodeStringLiteralHashTable;
+
+// Permission set hash table.
+
+class EEPsetHashTableHelper
+{
+public:
+ static EEHashEntry_t * AllocateEntry(PsetCacheKey *pKey, BOOL bDeepCopy, AllocationHeap Heap);
+ static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap Heap);
+ static BOOL CompareKeys(EEHashEntry_t *pEntry, PsetCacheKey *pKey);
+ static DWORD Hash(PsetCacheKey *pKey);
+ static PsetCacheKey *GetKey(EEHashEntry_t *pEntry);
+};
+
+typedef EEHashTable<PsetCacheKey *, EEPsetHashTableHelper, FALSE> EEPsetHashTable;
+
+
+// Generic pointer hash table helper.
+
+template <class KeyPointerType>
+class EEPtrHashTableHelper
+{
+public:
+ static EEHashEntry_t *AllocateEntry(KeyPointerType pKey, BOOL bDeepCopy, AllocationHeap Heap)
+ {
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_NOTRIGGER);
+ INJECT_FAULT(return FALSE;);
+ }
+ CONTRACTL_END
+
+ _ASSERTE(!bDeepCopy && "Deep copy is not supported by the EEPtrHashTableHelper");
+ _ASSERTE(sizeof(KeyPointerType) == sizeof(void *) && "KeyPointerType must be a pointer type");
+
+ EEHashEntry_t *pEntry = (EEHashEntry_t *) new (nothrow) BYTE[SIZEOF_EEHASH_ENTRY + sizeof(KeyPointerType)];
+ if (!pEntry)
+ return NULL;
+ *((KeyPointerType*)pEntry->Key) = pKey;
+
+ return pEntry;
+ }
+
+ static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap Heap)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // Delete the entry.
+ delete [] (BYTE*) pEntry;
+ }
+
+ static BOOL CompareKeys(EEHashEntry_t *pEntry, KeyPointerType pKey)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ KeyPointerType pEntryKey = *((KeyPointerType*)pEntry->Key);
+ return pEntryKey == pKey;
+ }
+
+ static DWORD Hash(KeyPointerType pKey)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+#ifdef _TARGET_X86_
+ return (DWORD)(size_t) dac_cast<TADDR>(pKey);
+#else
+ // <TODO> IA64: Is this a good hashing mechanism on IA64?</TODO>
+ return (DWORD)(((size_t) dac_cast<TADDR>(pKey)) >> 3);
+#endif
+ }
+
+ static KeyPointerType GetKey(EEHashEntry_t *pEntry)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return *((KeyPointerType*)pEntry->Key);
+ }
+};
+
+#ifndef BINDER
+typedef EEHashTable<PTR_VOID, EEPtrHashTableHelper<PTR_VOID>, FALSE> EEPtrHashTable;
+typedef DPTR(EEPtrHashTable) PTR_EEPtrHashTable;
+#endif // !BINDER
+
+// Define a hash of generic instantiations (represented by a SigTypeContext).
+class EEInstantiationHashTableHelper
+{
+public:
+ static EEHashEntry_t *AllocateEntry(const SigTypeContext *pKey, BOOL bDeepCopy, AllocationHeap pHeap = 0);
+ static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap pHeap = 0);
+ static BOOL CompareKeys(EEHashEntry_t *pEntry, const SigTypeContext *pKey);
+ static DWORD Hash(const SigTypeContext *pKey);
+ static const SigTypeContext *GetKey(EEHashEntry_t *pEntry);
+};
+typedef EEHashTable<const SigTypeContext*, EEInstantiationHashTableHelper, FALSE> EEInstantiationHashTable;
+
+// ComComponentInfo hashtable.
+
+struct ClassFactoryInfo
+{
+ GUID m_clsid;
+ WCHAR *m_strServerName;
+};
+
+class EEClassFactoryInfoHashTableHelper
+{
+public:
+ static EEHashEntry_t *AllocateEntry(ClassFactoryInfo *pKey, BOOL bDeepCopy, AllocationHeap Heap);
+ static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap Heap);
+ static BOOL CompareKeys(EEHashEntry_t *pEntry, ClassFactoryInfo *pKey);
+ static DWORD Hash(ClassFactoryInfo *pKey);
+ static ClassFactoryInfo *GetKey(EEHashEntry_t *pEntry);
+};
+
+typedef EEHashTable<ClassFactoryInfo *, EEClassFactoryInfoHashTableHelper, TRUE> EEClassFactoryInfoHashTable;
+// Struct to hold a client's iteration state
+struct EEHashTableIteration
+{
+ DWORD m_dwBucket;
+ EEHashEntry_t *m_pEntry;
+
+#ifdef _DEBUG
+ void *m_pTable;
+#endif
+};
+
+#endif /* _EE_HASH_H */
diff --git a/src/vm/eehash.inl b/src/vm/eehash.inl
new file mode 100644
index 0000000000..98c6bfd86b
--- /dev/null
+++ b/src/vm/eehash.inl
@@ -0,0 +1,878 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+
+#ifndef _EE_HASH_INL
+#define _EE_HASH_INL
+
+#ifdef _DEBUG_IMPL
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::OwnLock()
+{
+ WRAPPER_NO_CONTRACT;
+ if (m_CheckThreadSafety == FALSE)
+ return TRUE;
+
+ if (m_pfnLockOwner == NULL) {
+ return m_writerThreadId.IsSameThread();
+ }
+ else {
+ BOOL ret = m_pfnLockOwner(m_lockData);
+ if (!ret) {
+ if (Debug_IsLockedViaThreadSuspension()) {
+ ret = TRUE;
+ }
+ }
+ return ret;
+ }
+}
+#endif // _DEBUG_IMPL
+
+#ifndef DACCESS_COMPILE
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+void EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::Destroy()
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_NOTRIGGER);
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ if (m_pVolatileBucketTable && m_pVolatileBucketTable->m_pBuckets != NULL)
+ {
+ DWORD i;
+
+ for (i = 0; i < m_pVolatileBucketTable->m_dwNumBuckets; i++)
+ {
+ EEHashEntry_t *pEntry, *pNext;
+
+ for (pEntry = m_pVolatileBucketTable->m_pBuckets[i]; pEntry != NULL; pEntry = pNext)
+ {
+ pNext = pEntry->pNext;
+ Helper::DeleteEntry(pEntry, m_Heap);
+ }
+ }
+
+ delete[] (m_pVolatileBucketTable->m_pBuckets-1);
+
+ m_pVolatileBucketTable = NULL;
+ }
+
+}
+
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+void EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::ClearHashTable()
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_NOTRIGGER);
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ //_ASSERTE (OwnLock());
+
+ // Transition to COOP mode. This is need because EEHashTable is lock free and it can be read
+ // from multiple threads without taking locks. On rehash, you want to get rid of the old copy
+ // of table. You can only get rid of it once nobody is using it. That’s a problem because
+ // there is no lock to tell when the last reader stopped using the old copy of the table.
+ // The solution to this problem is to access the table in cooperative mode, and to get rid of
+ // the old copy of the table when we are suspended for GC. When we are suspended for GC,
+ // we know that nobody is using the old copy of the table anymore.
+ // BROKEN: This is called sometimes from the CorMap hash before the EE is started up
+ GCX_COOP_NO_THREAD_BROKEN();
+
+ if (m_pVolatileBucketTable->m_pBuckets != NULL)
+ {
+ DWORD i;
+
+ for (i = 0; i < m_pVolatileBucketTable->m_dwNumBuckets; i++)
+ {
+ EEHashEntry_t *pEntry, *pNext;
+
+ for (pEntry = m_pVolatileBucketTable->m_pBuckets[i]; pEntry != NULL; pEntry = pNext)
+ {
+ pNext = pEntry->pNext;
+ Helper::DeleteEntry(pEntry, m_Heap);
+ }
+ }
+
+ delete[] (m_pVolatileBucketTable->m_pBuckets-1);
+ m_pVolatileBucketTable->m_pBuckets = NULL;
+ }
+
+ m_pVolatileBucketTable->m_dwNumBuckets = 0;
+ m_dwNumEntries = 0;
+}
+
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+void EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::EmptyHashTable()
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_NOTRIGGER);
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ _ASSERTE (OwnLock());
+
+ // Transition to COOP mode. This is need because EEHashTable is lock free and it can be read
+ // from multiple threads without taking locks. On rehash, you want to get rid of the old copy
+ // of table. You can only get rid of it once nobody is using it. That’s a problem because
+ // there is no lock to tell when the last reader stopped using the old copy of the table.
+ // The solution to this problem is to access the table in cooperative mode, and to get rid of
+ // the old copy of the table when we are suspended for GC. When we are suspended for GC,
+ // we know that nobody is using the old copy of the table anymore.
+ // BROKEN: This is called sometimes from the CorMap hash before the EE is started up
+ GCX_COOP_NO_THREAD_BROKEN();
+
+ if (m_pVolatileBucketTable->m_pBuckets != NULL)
+ {
+ DWORD i;
+
+ for (i = 0; i < m_pVolatileBucketTable->m_dwNumBuckets; i++)
+ {
+ EEHashEntry_t *pEntry, *pNext;
+
+ for (pEntry = m_pVolatileBucketTable->m_pBuckets[i]; pEntry != NULL; pEntry = pNext)
+ {
+ pNext = pEntry->pNext;
+ Helper::DeleteEntry(pEntry, m_Heap);
+ }
+
+ m_pVolatileBucketTable->m_pBuckets[i] = NULL;
+ }
+ }
+
+ m_dwNumEntries = 0;
+}
+
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+
+BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::Init(DWORD dwNumBuckets, LockOwner *pLock, AllocationHeap pHeap, BOOL CheckThreadSafety)
+{
+ CONTRACTL
+ {
+ WRAPPER(NOTHROW);
+ WRAPPER(GC_NOTRIGGER);
+ INJECT_FAULT(return FALSE;);
+
+#ifndef DACCESS_COMPILE
+ PRECONDITION(m_pVolatileBucketTable.Load() == NULL && "EEHashTable::Init() called twice.");
+#endif
+
+ }
+ CONTRACTL_END
+
+ m_pVolatileBucketTable = &m_BucketTable[0];
+
+ DWORD dwNumBucketsPlusOne;
+
+ // Prefast overflow sanity check the addition
+ if (!ClrSafeInt<DWORD>::addition(dwNumBuckets, 1, dwNumBucketsPlusOne))
+ return FALSE;
+
+ S_SIZE_T safeSize(sizeof(EEHashEntry_t *));
+ safeSize *= dwNumBucketsPlusOne;
+ if (safeSize.IsOverflow())
+ ThrowHR(COR_E_OVERFLOW);
+ SIZE_T cbAlloc = safeSize.Value();
+
+ m_pVolatileBucketTable->m_pBuckets = (EEHashEntry_t **) new (nothrow) BYTE[cbAlloc];
+
+ if (m_pVolatileBucketTable->m_pBuckets == NULL)
+ return FALSE;
+
+ memset(m_pVolatileBucketTable->m_pBuckets, 0, cbAlloc);
+
+ // The first slot links to the next list.
+ m_pVolatileBucketTable->m_pBuckets++;
+
+ m_pVolatileBucketTable->m_dwNumBuckets = dwNumBuckets;
+
+ m_Heap = pHeap;
+
+#ifdef _DEBUG
+ if (pLock == NULL) {
+ m_lockData = NULL;
+ m_pfnLockOwner = NULL;
+ }
+ else {
+ m_lockData = pLock->lock;
+ m_pfnLockOwner = pLock->lockOwnerFunc;
+ }
+
+ if (m_pfnLockOwner == NULL) {
+ m_writerThreadId.SetThreadId();
+ }
+ m_CheckThreadSafety = CheckThreadSafety;
+#endif
+
+ return TRUE;
+}
+
+
+// Does not handle duplicates!
+
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+void EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::InsertValue(KeyType pKey, HashDatum Data, BOOL bDeepCopyKey)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_NOTRIGGER);
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ _ASSERTE (OwnLock());
+
+ // Transition to COOP mode. This is need because EEHashTable is lock free and it can be read
+ // from multiple threads without taking locks. On rehash, you want to get rid of the old copy
+ // of table. You can only get rid of it once nobody is using it. That’s a problem because
+ // there is no lock to tell when the last reader stopped using the old copy of the table.
+ // The solution to this problem is to access the table in cooperative mode, and to get rid of
+ // the old copy of the table when we are suspended for GC. When we are suspended for GC,
+ // we know that nobody is using the old copy of the table anymore.
+ // BROKEN: This is called sometimes from the CorMap hash before the EE is started up
+ GCX_COOP_NO_THREAD_BROKEN();
+
+ _ASSERTE(m_pVolatileBucketTable->m_dwNumBuckets != 0);
+
+ if (m_dwNumEntries > m_pVolatileBucketTable->m_dwNumBuckets*2)
+ {
+ if (!GrowHashTable()) COMPlusThrowOM();
+ }
+
+ DWORD dwHash = (DWORD)Helper::Hash(pKey);
+ DWORD dwBucket = dwHash % m_pVolatileBucketTable->m_dwNumBuckets;
+ EEHashEntry_t * pNewEntry;
+
+ pNewEntry = Helper::AllocateEntry(pKey, bDeepCopyKey, m_Heap);
+ if (!pNewEntry)
+ {
+ COMPlusThrowOM();
+ }
+
+ // Fill in the information for the new entry.
+ pNewEntry->pNext = m_pVolatileBucketTable->m_pBuckets[dwBucket];
+ pNewEntry->Data = Data;
+ pNewEntry->dwHashValue = dwHash;
+
+ // Insert at head of bucket
+ // need volatile write to avoid write reordering problem in IA
+ VolatileStore(&m_pVolatileBucketTable->m_pBuckets[dwBucket], pNewEntry);;
+
+ m_dwNumEntries++;
+}
+
+
+// Similar to the above, except that the HashDatum is a pointer to key.
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+void EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::InsertKeyAsValue(KeyType pKey, BOOL bDeepCopyKey)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_NOTRIGGER);
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ _ASSERTE (OwnLock());
+
+ // Transition to COOP mode. This is need because EEHashTable is lock free and it can be read
+ // from multiple threads without taking locks. On rehash, you want to get rid of the old copy
+ // of table. You can only get rid of it once nobody is using it. That’s a problem because
+ // there is no lock to tell when the last reader stopped using the old copy of the table.
+ // The solution to this problem is to access the table in cooperative mode, and to get rid of
+ // the old copy of the table when we are suspended for GC. When we are suspended for GC,
+ // we know that nobody is using the old copy of the table anymore.
+ // BROKEN: This is called sometimes from the CorMap hash before the EE is started up
+ GCX_COOP_NO_THREAD_BROKEN();
+
+ _ASSERTE(m_pVolatileBucketTable->m_dwNumBuckets != 0);
+
+ if (m_dwNumEntries > m_pVolatileBucketTable->m_dwNumBuckets*2)
+ {
+ if (!GrowHashTable()) COMPlusThrowOM();
+ }
+
+ DWORD dwHash = Helper::Hash(pKey);
+ DWORD dwBucket = dwHash % m_pVolatileBucketTable->m_dwNumBuckets;
+ EEHashEntry_t * pNewEntry;
+
+ pNewEntry = Helper::AllocateEntry(pKey, bDeepCopyKey, m_Heap);
+ if (!pNewEntry)
+ {
+ COMPlusThrowOM();
+ }
+
+ // Fill in the information for the new entry.
+ pNewEntry->pNext = m_pVolatileBucketTable->m_pBuckets[dwBucket];
+ pNewEntry->dwHashValue = dwHash;
+ pNewEntry->Data = *((LPUTF8 *)pNewEntry->Key);
+
+ // Insert at head of bucket
+ // need volatile write to avoid write reordering problem in IA
+ VolatileStore(&m_pVolatileBucketTable->m_pBuckets[dwBucket], pNewEntry);
+
+ m_dwNumEntries++;
+}
+
+
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::DeleteValue(KeyType pKey)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_NOTRIGGER);
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ _ASSERTE (OwnLock());
+
+ Thread *pThread = GetThreadNULLOk();
+ GCX_MAYBE_COOP_NO_THREAD_BROKEN(pThread ? !(pThread->m_StateNC & Thread::TSNC_UnsafeSkipEnterCooperative) : FALSE);
+
+ _ASSERTE(m_pVolatileBucketTable->m_dwNumBuckets != 0);
+
+ DWORD dwHash = Helper::Hash(pKey);
+ DWORD dwBucket = dwHash % m_pVolatileBucketTable->m_dwNumBuckets;
+ EEHashEntry_t * pSearch;
+ EEHashEntry_t **ppPrev = &m_pVolatileBucketTable->m_pBuckets[dwBucket];
+
+ for (pSearch = m_pVolatileBucketTable->m_pBuckets[dwBucket]; pSearch; pSearch = pSearch->pNext)
+ {
+ if (pSearch->dwHashValue == dwHash && Helper::CompareKeys(pSearch, pKey))
+ {
+ *ppPrev = pSearch->pNext;
+ Helper::DeleteEntry(pSearch, m_Heap);
+
+ // Do we ever want to shrink?
+ m_dwNumEntries--;
+
+ return TRUE;
+ }
+
+ ppPrev = &pSearch->pNext;
+ }
+
+ return FALSE;
+}
+
+
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::ReplaceValue(KeyType pKey, HashDatum Data)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_NOTRIGGER);
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ _ASSERTE (OwnLock());
+
+ EEHashEntry_t *pItem = FindItem(pKey);
+
+ if (pItem != NULL)
+ {
+ // Required to be atomic
+ pItem->Data = Data;
+ return TRUE;
+ }
+ else
+ {
+ return FALSE;
+ }
+}
+
+
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::ReplaceKey(KeyType pOldKey, KeyType pNewKey)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_NOTRIGGER);
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ _ASSERTE (OwnLock());
+
+ EEHashEntry_t *pItem = FindItem(pOldKey);
+
+ if (pItem != NULL)
+ {
+ Helper::ReplaceKey (pItem, pNewKey);
+ return TRUE;
+ }
+ else
+ {
+ return FALSE;
+ }
+}
+#endif // !DACCESS_COMPILE
+
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+DWORD EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::GetHash(KeyType pKey)
+{
+ WRAPPER_NO_CONTRACT;
+ return Helper::Hash(pKey);
+}
+
+
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::GetValue(KeyType pKey, HashDatum *pData)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_NOTRIGGER);
+ FORBID_FAULT;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ EEHashEntry_t *pItem = FindItem(pKey);
+
+ if (pItem != NULL)
+ {
+ *pData = pItem->Data;
+ return TRUE;
+ }
+ else
+ {
+ return FALSE;
+ }
+}
+
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::GetValue(KeyType pKey, HashDatum *pData, DWORD hashValue)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_NOTRIGGER);
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ EEHashEntry_t *pItem = FindItem(pKey, hashValue);
+
+ if (pItem != NULL)
+ {
+ *pData = pItem->Data;
+ return TRUE;
+ }
+ else
+ {
+ return FALSE;
+ }
+}
+
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+FORCEINLINE BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::GetValueSpeculative(KeyType pKey, HashDatum *pData)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_NOTRIGGER);
+#ifdef MODE_COOPERATIVE // This header file sees contract.h, not eecontract.h - what a kludge!
+ MODE_COOPERATIVE;
+#endif
+ SO_TOLERANT;
+ }
+ CONTRACTL_END
+
+ EEHashEntry_t *pItem = FindItemSpeculative(pKey, Helper::Hash(pKey));
+
+ if (pItem != NULL)
+ {
+ *pData = pItem->Data;
+ return TRUE;
+ }
+ else
+ {
+ return FALSE;
+ }
+}
+
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+EEHashEntry_t *EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::FindItem(KeyType pKey)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_NOTRIGGER);
+ FORBID_FAULT;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ return FindItem(pKey, Helper::Hash(pKey));
+}
+
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+EEHashEntry_t *EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::FindItem(KeyType pKey, DWORD dwHash)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_NOTRIGGER);
+ FORBID_FAULT;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ // Transition to COOP mode. This is need because EEHashTable is lock free and it can be read
+ // from multiple threads without taking locks. On rehash, you want to get rid of the old copy
+ // of table. You can only get rid of it once nobody is using it. That’s a problem because
+ // there is no lock to tell when the last reader stopped using the old copy of the table.
+ // The solution to this problem is to access the table in cooperative mode, and to get rid of
+ // the old copy of the table when we are suspended for GC. When we are suspended for GC,
+ // we know that nobody is using the old copy of the table anymore.
+ //
+#ifndef DACCESS_COMPILE
+ GCX_COOP_NO_THREAD_BROKEN();
+#endif
+
+ // Atomic transaction. In any other point of this method or ANY of the callees of this function you can not read
+ // from m_pVolatileBucketTable!!!!!!! A racing condition would occur.
+ DWORD dwOldNumBuckets;
+
+#ifndef DACCESS_COMPILE
+ DWORD nTry = 0;
+ DWORD dwSwitchCount = 0;
+#endif
+
+ do
+ {
+ BucketTable* pBucketTable=(BucketTable*)(PTR_BucketTable)m_pVolatileBucketTable.Load();
+ dwOldNumBuckets = pBucketTable->m_dwNumBuckets;
+
+ _ASSERTE(pBucketTable->m_dwNumBuckets != 0);
+
+ DWORD dwBucket = dwHash % pBucketTable->m_dwNumBuckets;
+ EEHashEntry_t * pSearch;
+
+ for (pSearch = pBucketTable->m_pBuckets[dwBucket]; pSearch; pSearch = pSearch->pNext)
+ {
+ if (pSearch->dwHashValue == dwHash && Helper::CompareKeys(pSearch, pKey))
+ return pSearch;
+ }
+
+ // There is a race in EEHash Table: when we grow the hash table, we will nuke out
+ // the old bucket table. Readers might be looking up in the old table, they can
+ // fail to find an existing entry. The workaround is to retry the search process
+ // if we are called grow table during the search process.
+#ifndef DACCESS_COMPILE
+ nTry ++;
+ if (nTry == 20) {
+ __SwitchToThread(0, ++dwSwitchCount);
+ nTry = 0;
+ }
+#endif // #ifndef DACCESS_COMPILE
+ }
+ while ( m_bGrowing || dwOldNumBuckets != m_pVolatileBucketTable->m_dwNumBuckets);
+
+ return NULL;
+}
+
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+FORCEINLINE EEHashEntry_t *EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::FindItemSpeculative(KeyType pKey, DWORD dwHash)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_NOTRIGGER);
+#ifdef MODE_COOPERATIVE // This header file sees contract.h, not eecontract.h - what a kludge!
+ MODE_COOPERATIVE;
+#endif
+ SO_TOLERANT;
+ }
+ CONTRACTL_END
+
+ // Atomic transaction. In any other point of this method or ANY of the callees of this function you can not read
+ // from m_pVolatileBucketTable!!!!!!! A racing condition would occur.
+ DWORD dwOldNumBuckets;
+
+ BucketTable* pBucketTable=m_pVolatileBucketTable;
+ dwOldNumBuckets = pBucketTable->m_dwNumBuckets;
+
+ _ASSERTE(pBucketTable->m_dwNumBuckets != 0);
+
+ DWORD dwBucket = dwHash % pBucketTable->m_dwNumBuckets;
+ EEHashEntry_t * pSearch;
+
+ for (pSearch = pBucketTable->m_pBuckets[dwBucket]; pSearch; pSearch = pSearch->pNext)
+ {
+ if (pSearch->dwHashValue == dwHash && Helper::CompareKeys(pSearch, pKey))
+ return pSearch;
+ }
+
+ return NULL;
+}
+
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::IsEmpty()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_dwNumEntries == 0;
+}
+
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+DWORD EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::GetCount()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_dwNumEntries;
+}
+
+#ifndef DACCESS_COMPILE
+
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::GrowHashTable()
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_NOTRIGGER);
+ INJECT_FAULT(return FALSE;);
+ }
+ CONTRACTL_END
+
+#if defined(_DEBUG) && !defined(CROSSGEN_COMPILE)
+ BEGIN_GETTHREAD_ALLOWED;
+ Thread * pThread = GetThread();
+ _ASSERTE(!g_fEEStarted || (pThread == NULL) || (pThread->PreemptiveGCDisabled()));
+ END_GETTHREAD_ALLOWED;
+#endif
+
+ // Make the new bucket table 4 times bigger
+ //
+ DWORD dwNewNumBuckets;
+ DWORD dwNewNumBucketsPlusOne;
+ {
+ S_UINT32 safeSize(m_pVolatileBucketTable->m_dwNumBuckets);
+
+ safeSize *= 4;
+
+ if (safeSize.IsOverflow())
+ return FALSE;
+
+ dwNewNumBuckets = safeSize.Value();
+
+ safeSize += 1; // Allocate one extra
+
+ if (safeSize.IsOverflow())
+ return FALSE;
+
+ dwNewNumBucketsPlusOne = safeSize.Value();
+ }
+
+ // On resizes, we still have an array of old pointers we need to worry about.
+ // We can't free these old pointers, for we may hit a race condition where we're
+ // resizing and reading from the array at the same time. We need to keep track of these
+ // old arrays of pointers, so we're going to use the last item in the array to "link"
+ // to previous arrays, so that they may be freed at the end.
+ //
+
+ SIZE_T cbAlloc;
+ {
+ S_SIZE_T safeSize(sizeof(EEHashEntry_t *));
+
+ safeSize *= dwNewNumBucketsPlusOne;
+
+ if (safeSize.IsOverflow())
+ return FALSE;
+
+ cbAlloc = safeSize.Value();
+ }
+
+ EEHashEntry_t **pNewBuckets = (EEHashEntry_t **) new (nothrow) BYTE[cbAlloc];
+
+ if (pNewBuckets == NULL)
+ return FALSE;
+
+ memset(pNewBuckets, 0, cbAlloc);
+
+ // The first slot is linked to next list.
+ pNewBuckets++;
+
+ // Run through the old table and transfer all the entries
+
+ // Be sure not to mess with the integrity of the old table while
+ // we are doing this, as there can be concurrent readers! Note that
+ // it is OK if the concurrent reader misses out on a match, though -
+ // they will have to acquire the lock on a miss & try again.
+ FastInterlockExchange( (LONG *) &m_bGrowing, 1);
+ for (DWORD i = 0; i < m_pVolatileBucketTable->m_dwNumBuckets; i++)
+ {
+ EEHashEntry_t * pEntry = m_pVolatileBucketTable->m_pBuckets[i];
+
+ // Try to lock out readers from scanning this bucket. This is
+ // obviously a race which may fail. However, note that it's OK
+ // if somebody is already in the list - it's OK if we mess
+ // with the bucket groups, as long as we don't destroy
+ // anything. The lookup function will still do appropriate
+ // comparison even if it wanders aimlessly amongst entries
+ // while we are rearranging things. If a lookup finds a match
+ // under those circumstances, great. If not, they will have
+ // to acquire the lock & try again anyway.
+
+ m_pVolatileBucketTable->m_pBuckets[i] = NULL;
+
+ while (pEntry != NULL)
+ {
+ DWORD dwNewBucket = pEntry->dwHashValue % dwNewNumBuckets;
+ EEHashEntry_t * pNextEntry = pEntry->pNext;
+
+ pEntry->pNext = pNewBuckets[dwNewBucket];
+ pNewBuckets[dwNewBucket] = pEntry;
+ pEntry = pNextEntry;
+ }
+ }
+
+
+ // Finally, store the new number of buckets and the new bucket table
+ BucketTable* pNewBucketTable = (m_pVolatileBucketTable == &m_BucketTable[0]) ?
+ &m_BucketTable[1]:
+ &m_BucketTable[0];
+
+ pNewBucketTable->m_pBuckets = pNewBuckets;
+ pNewBucketTable->m_dwNumBuckets = dwNewNumBuckets;
+
+ // Add old table to the to free list. Note that the SyncClean thing will only
+ // delete the buckets at a safe point
+ //
+ SyncClean::AddEEHashTable (m_pVolatileBucketTable->m_pBuckets);
+
+ // Note that the SyncClean:AddEEHashTable performs at least one Interlock operation
+ // So we do not need to use an Interlocked operation to write m_pVolatileBucketTable
+ // Swap the double buffer, this is an atomic operation (the assignment)
+ //
+ m_pVolatileBucketTable = pNewBucketTable;
+
+ FastInterlockExchange( (LONG *) &m_bGrowing, 0);
+
+ return TRUE;
+}
+
+#endif // DACCESS_COMPILE
+
+// Walk through all the entries in the hash table, in meaningless order, without any
+// synchronization.
+//
+// IterateStart()
+// while (IterateNext())
+// GetKey();
+//
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+void EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::
+ IterateStart(EEHashTableIteration *pIter)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_NOTRIGGER);
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ _ASSERTE_IMPL(OwnLock());
+ pIter->m_dwBucket = -1;
+ pIter->m_pEntry = NULL;
+
+#ifdef _DEBUG
+ pIter->m_pTable = this;
+#endif
+}
+
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::
+ IterateNext(EEHashTableIteration *pIter)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_NOTRIGGER);
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ _ASSERTE_IMPL(OwnLock());
+
+ Thread *pThread = GetThreadNULLOk();
+ GCX_MAYBE_COOP_NO_THREAD_BROKEN(pThread ? !(pThread->m_StateNC & Thread::TSNC_UnsafeSkipEnterCooperative) : FALSE);
+
+ _ASSERTE(pIter->m_pTable == (void *) this);
+
+ // If we haven't started iterating yet, or if we are at the end of a particular
+ // chain, advance to the next chain.
+ while (pIter->m_pEntry == NULL || pIter->m_pEntry->pNext == NULL)
+ {
+ if (++pIter->m_dwBucket >= m_pVolatileBucketTable->m_dwNumBuckets)
+ {
+ // advanced beyond the end of the table.
+ _ASSERTE(pIter->m_dwBucket == m_pVolatileBucketTable->m_dwNumBuckets); // client keeps asking?
+ return FALSE;
+ }
+ pIter->m_pEntry = m_pVolatileBucketTable->m_pBuckets[pIter->m_dwBucket];
+
+ // If this bucket has no chain, keep advancing. Otherwise we are done
+ if (pIter->m_pEntry)
+ return TRUE;
+ }
+
+ // We are within a chain. Advance to the next entry
+ pIter->m_pEntry = pIter->m_pEntry->pNext;
+
+ _ASSERTE(pIter->m_pEntry);
+ return TRUE;
+}
+
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+KeyType EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::
+ IterateGetKey(EEHashTableIteration *pIter)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_NOTRIGGER);
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ _ASSERTE(pIter->m_pTable == (void *) this);
+ _ASSERTE(pIter->m_dwBucket < m_pVolatileBucketTable->m_dwNumBuckets && pIter->m_pEntry);
+ return Helper::GetKey(pIter->m_pEntry);
+}
+
+template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
+HashDatum EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::
+ IterateGetValue(EEHashTableIteration *pIter)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(pIter->m_pTable == (void *) this);
+ _ASSERTE(pIter->m_dwBucket < m_pVolatileBucketTable->m_dwNumBuckets && pIter->m_pEntry);
+ return pIter->m_pEntry->Data;
+}
+
+#endif /* _EE_HASH_INL */
diff --git a/src/vm/eemessagebox.cpp b/src/vm/eemessagebox.cpp
new file mode 100644
index 0000000000..69aff4640d
--- /dev/null
+++ b/src/vm/eemessagebox.cpp
@@ -0,0 +1,182 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// EEMessageBox.h
+//
+
+//
+// This module contains the implementation for the message box utility code for
+// use inside the Execution Engine. These APIs ensure the GC mode is properly
+// toggled to preemptive before the dialog is displayed.
+//
+//*****************************************************************************
+
+#include "common.h"
+#include "eemessagebox.h"
+
+// Undef these so we can call them from the EE versions.
+#undef UtilMessageBoxCatastrophicVA
+#undef UtilMessageBoxVA
+#undef UtilMessageBoxNonLocalizedVA
+
+int EEMessageBoxCatastrophicVA(
+ UINT uText, // Text for MessageBox
+ UINT uTitle, // Title for MessageBox
+ UINT uType, // Style of MessageBox
+ BOOL showFileNameInTitle, // Flag to show FileName in Caption
+ va_list insertionArgs) // Additional Arguments
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ return UtilMessageBoxCatastrophicVA(uText, uTitle, uType, showFileNameInTitle, insertionArgs);
+}
+
+int EEMessageBoxCatastrophic(
+ UINT uText, // Text for MessageBox
+ UINT uTitle, // Title for MessageBox
+ ...) // Additional Arguments
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ va_list marker;
+ va_start(marker, uTitle);
+
+ int result = EEMessageBoxCatastrophicVA(uText, uTitle, MB_OK | MB_ICONERROR, TRUE, marker);
+ va_end( marker );
+
+ return result;
+}
+
+int EEMessageBoxCatastrophicWithCustomizedStyle(
+ UINT uText, // Text for MessageBox
+ UINT uTitle, // Title for MessageBox
+ UINT uType, // Style of MessageBox
+ BOOL showFileNameInTitle, // Flag to show FileName in Caption
+ ...) // Additional Arguments
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ va_list marker;
+ va_start(marker, showFileNameInTitle);
+
+ int result = EEMessageBoxCatastrophicVA(uText, uTitle, uType, showFileNameInTitle, marker);
+ va_end( marker );
+
+ return result;
+}
+
+#ifdef _DEBUG
+
+int EEMessageBoxNonLocalizedDebugOnly(
+ LPCWSTR lpText, // Text message
+ LPCWSTR lpTitle, // Caption
+ UINT uType, // Style of MessageBox
+ ... ) // Additional Arguments
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_TRIGGERS;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+
+ va_list marker;
+ va_start(marker, uType);
+
+ int result = UtilMessageBoxNonLocalizedVA(NULL, lpText, lpTitle, uType, FALSE, TRUE, NULL, marker);
+ va_end( marker );
+
+ return result;
+}
+
+#endif // _DEBUG
+
+// If we didn't display a dialog to the user, this method returns IDIGNORE, unlike the others that return IDABORT.
+int EEMessageBoxNonLocalizedNonFatal(
+ LPCWSTR lpText, // Text message
+ LPCWSTR lpTitle, // Caption
+ UINT uType, // Style of MessageBox
+ ... ) // Additional Arguments
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_TRIGGERS;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+
+ va_list marker;
+ va_start(marker, uType);
+ BOOL inputFromUser = FALSE;
+
+ int result = UtilMessageBoxNonLocalizedVA(NULL, lpText, lpTitle, NULL, uType, FALSE, TRUE, &inputFromUser, marker);
+ va_end( marker );
+
+ if (inputFromUser == FALSE && result == IDABORT)
+ result = IDIGNORE;
+
+ return result;
+}
+
+// If we didn't display a dialog to the user, this method returns IDIGNORE, unlike the others that return IDABORT.
+int EEMessageBoxNonLocalizedNonFatal(
+ LPCWSTR lpText, // Text message
+ LPCWSTR lpTitle, // Caption
+ LPCWSTR lpDetails,// Detailed message like a stack trace
+ UINT uType, // Style of MessageBox
+ ... ) // Additional Arguments
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_TRIGGERS;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+
+ va_list marker;
+ va_start(marker, uType);
+ BOOL inputFromUser = FALSE;
+
+ int result = UtilMessageBoxNonLocalizedVA(NULL, lpText, lpTitle, lpDetails, uType, FALSE, TRUE, &inputFromUser, marker);
+ va_end( marker );
+
+ if (inputFromUser == FALSE && result == IDABORT)
+ result = IDIGNORE;
+
+ return result;
+}
+
+// Redefine these to errors just in case code is added after this point in the file.
+#define UtilMessageBoxCatastrophicVA __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
+#define UtilMessageBoxVA __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
+#define UtilMessageBoxNonLocalizedVA __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
+
diff --git a/src/vm/eemessagebox.h b/src/vm/eemessagebox.h
new file mode 100644
index 0000000000..8550c88c00
--- /dev/null
+++ b/src/vm/eemessagebox.h
@@ -0,0 +1,71 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// EEMessageBox.h
+//
+
+//
+// This module contains the definition for the message box utility code for use
+// inside the Execution Engine. These APIs ensure the GC mode is properly
+// toggled to preemptive before the dialog is displayed.
+//
+//*****************************************************************************
+
+#ifndef _H_EEMESSAGEBOX
+#define _H_EEMESSAGEBOX
+
+//========================================================================
+// APIs to pop messages boxes. These should be used instead of the UtilXXX
+// versions since they ensure we properly switch to preemptive GC mode and
+// validate that the thread can tolerate GC transitions before calling
+// out.
+//========================================================================
+
+int EEMessageBoxCatastrophicVA(
+ UINT uText, // Text for MessageBox
+ UINT uTitle, // Title for MessageBox
+ UINT uType, // Style of MessageBox
+ BOOL showFileNameInTitle, // Flag to show FileName in Caption
+ va_list insertionArgs); // Additional Arguments
+
+int EEMessageBoxCatastrophic(
+ UINT iText, // Text for MessageBox
+ UINT iTitle, // Title for MessageBox
+ ...); // Additional Arguments
+
+int EEMessageBoxCatastrophicWithCustomizedStyle(
+ UINT iText, // Text for MessageBox
+ UINT iTitle, // Title for MessageBox
+ UINT uType, // Style of MessageBox
+ BOOL showFileNameInTitle, // Flag to show FileName in Caption
+ ...); // Additional Arguments
+
+#ifdef _DEBUG
+
+int EEMessageBoxNonLocalizedDebugOnly(
+ LPCWSTR lpText, // Text message
+ LPCWSTR lpCaption, // Caption
+ UINT uType, // Style of MessageBox
+ ... ); // Additional Arguments
+
+#endif // _DEBUG
+
+// If we didn't display a dialog to the user, this method returns IDIGNORE, unlike the others that return IDABORT.
+int EEMessageBoxNonLocalizedNonFatal(
+ LPCWSTR lpText, // Text message
+ LPCWSTR lpTitle, // Caption
+ UINT uType, // Style of MessageBox
+ ... ); // Additional Arguments
+
+// If we didn't display a dialog to the user, this method returns IDIGNORE, unlike the others that return IDABORT.
+int EEMessageBoxNonLocalizedNonFatal(
+ LPCWSTR lpText, // Text message
+ LPCWSTR lpTitle, // Caption
+ LPCWSTR lpDetails,// Detailed message like a stack trace
+ UINT uType, // Style of MessageBox
+ ... ); // Additional Arguments
+
+#endif /* _H_EEMESSAGEBOX */
+
diff --git a/src/vm/eepolicy.cpp b/src/vm/eepolicy.cpp
new file mode 100644
index 0000000000..2e3b0aad5e
--- /dev/null
+++ b/src/vm/eepolicy.cpp
@@ -0,0 +1,1581 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+// ---------------------------------------------------------------------------
+// EEPolicy.cpp
+// ---------------------------------------------------------------------------
+
+
+#include "common.h"
+#include "eepolicy.h"
+#include "corhost.h"
+#include "dbginterface.h"
+#include "eemessagebox.h"
+
+#include "eventreporter.h"
+#include "finalizerthread.h"
+#include "threadsuspend.h"
+
+#ifndef FEATURE_PAL
+#include "dwreport.h"
+#endif // !FEATURE_PAL
+
+#include "eventtrace.h"
+#undef ExitProcess
+
+BYTE g_EEPolicyInstance[sizeof(EEPolicy)];
+
+void InitEEPolicy()
+{
+ WRAPPER_NO_CONTRACT;
+ new (g_EEPolicyInstance) EEPolicy();
+}
+
+EEPolicy::EEPolicy ()
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ int n;
+ for (n = 0; n < MaxClrOperation; n++) {
+ m_Timeout[n] = INFINITE;
+ m_ActionOnTimeout[n] = eNoAction;
+ m_DefaultAction[n] = eNoAction;
+ }
+ m_Timeout[OPR_ProcessExit] = 40000;
+ m_ActionOnTimeout[OPR_ProcessExit] = eRudeExitProcess;
+ m_ActionOnTimeout[OPR_ThreadAbort] = eAbortThread;
+ m_ActionOnTimeout[OPR_ThreadRudeAbortInNonCriticalRegion] = eRudeAbortThread;
+ m_ActionOnTimeout[OPR_ThreadRudeAbortInCriticalRegion] = eRudeAbortThread;
+
+ m_DefaultAction[OPR_ThreadAbort] = eAbortThread;
+ m_DefaultAction[OPR_ThreadRudeAbortInNonCriticalRegion] = eRudeAbortThread;
+ m_DefaultAction[OPR_ThreadRudeAbortInCriticalRegion] = eRudeAbortThread;
+ m_DefaultAction[OPR_AppDomainUnload] = eUnloadAppDomain;
+ m_DefaultAction[OPR_AppDomainRudeUnload] = eRudeUnloadAppDomain;
+ m_DefaultAction[OPR_ProcessExit] = eExitProcess;
+ m_DefaultAction[OPR_FinalizerRun] = eNoAction;
+
+ for (n = 0; n < MaxClrFailure; n++) {
+ m_ActionOnFailure[n] = eNoAction;
+ }
+ m_ActionOnFailure[FAIL_CriticalResource] = eThrowException;
+ m_ActionOnFailure[FAIL_NonCriticalResource] = eThrowException;
+ m_ActionOnFailure[FAIL_OrphanedLock] = eNoAction;
+ m_ActionOnFailure[FAIL_FatalRuntime] = eRudeExitProcess;
+#ifdef FEATURE_CORECLR
+ // For CoreCLR, initialize the default action for AV processing to all
+ // all kind of code to catch AV exception. If the host wants, they can
+ // specify a different action for this.
+ m_ActionOnFailure[FAIL_AccessViolation] = eNoAction;
+#endif // FEATURE_CORECLR
+ m_ActionOnFailure[FAIL_StackOverflow] = eRudeExitProcess;
+ m_ActionOnFailure[FAIL_CodeContract] = eThrowException;
+ m_unhandledExceptionPolicy = eRuntimeDeterminedPolicy;
+}
+
+BOOL EEPolicy::IsValidActionForOperation(EClrOperation operation, EPolicyAction action)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ switch (operation) {
+ case OPR_ThreadAbort:
+ return action >= eAbortThread &&
+ action < MaxPolicyAction;
+ case OPR_ThreadRudeAbortInNonCriticalRegion:
+ case OPR_ThreadRudeAbortInCriticalRegion:
+ return action >= eRudeAbortThread && action != eUnloadAppDomain &&
+ action < MaxPolicyAction;
+ case OPR_AppDomainUnload:
+ return action >= eUnloadAppDomain &&
+ action < MaxPolicyAction;
+ case OPR_AppDomainRudeUnload:
+ return action >= eRudeUnloadAppDomain &&
+ action < MaxPolicyAction;
+ case OPR_ProcessExit:
+ return action >= eExitProcess &&
+ action < MaxPolicyAction;
+ case OPR_FinalizerRun:
+ return action == eNoAction ||
+ (action >= eAbortThread &&
+ action < MaxPolicyAction);
+ default:
+ _ASSERT (!"Do not know valid action for this operation");
+ break;
+ }
+ return FALSE;
+}
+
+BOOL EEPolicy::IsValidActionForTimeout(EClrOperation operation, EPolicyAction action)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ switch (operation) {
+ case OPR_ThreadAbort:
+ return action > eAbortThread &&
+ action < MaxPolicyAction;
+ case OPR_ThreadRudeAbortInNonCriticalRegion:
+ case OPR_ThreadRudeAbortInCriticalRegion:
+ return action > eRudeUnloadAppDomain &&
+ action < MaxPolicyAction;
+ case OPR_AppDomainUnload:
+ return action > eUnloadAppDomain &&
+ action < MaxPolicyAction;
+ case OPR_AppDomainRudeUnload:
+ return action > eRudeUnloadAppDomain &&
+ action < MaxPolicyAction;
+ case OPR_ProcessExit:
+ return action > eExitProcess &&
+ action < MaxPolicyAction;
+ case OPR_FinalizerRun:
+ return action == eNoAction ||
+ (action >= eAbortThread &&
+ action < MaxPolicyAction);
+ default:
+ _ASSERT (!"Do not know valid action for this operation");
+ break;
+ }
+ return FALSE;
+}
+
+BOOL EEPolicy::IsValidActionForFailure(EClrFailure failure, EPolicyAction action)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ switch (failure) {
+ case FAIL_NonCriticalResource:
+ return action >= eThrowException &&
+ action < MaxPolicyAction;
+ case FAIL_CriticalResource:
+ return action >= eThrowException &&
+ action < MaxPolicyAction;
+ case FAIL_FatalRuntime:
+ return action >= eRudeExitProcess &&
+ action < MaxPolicyAction;
+ case FAIL_OrphanedLock:
+ return action >= eUnloadAppDomain &&
+ action < MaxPolicyAction;
+ case FAIL_AccessViolation:
+#ifdef FEATURE_CORECLR
+ // Allowed actions on failure are:
+ //
+ // eNoAction or eRudeExitProcess.
+ return ((action == eNoAction) || (action == eRudeExitProcess));
+#else // !FEATURE_CORECLR
+ // FAIL_AccessViolation is defined for the desktop so that
+ // if any more definitions are added after it, their value
+ // should remain constant irrespective of whether its the
+ // desktop CLR or CoreCLR.
+ //
+ // That said, currently, Desktop CLR does not support
+ // FAIL_AccessViolation. Thus, any calls which use
+ // this failure are not allowed.
+ return FALSE;
+#endif // FEATURE_CORECLR
+ case FAIL_StackOverflow:
+ return action >= eRudeUnloadAppDomain &&
+ action < MaxPolicyAction;
+ case FAIL_CodeContract:
+ return action >= eThrowException &&
+ action <= eExitProcess;
+ default:
+ _ASSERTE (!"Do not know valid action for this failure");
+ break;
+ }
+
+ return FALSE;
+}
+
+HRESULT EEPolicy::SetTimeout(EClrOperation operation, DWORD timeout)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ if (static_cast<UINT>(operation) < MaxClrOperation)
+ {
+ m_Timeout[operation] = timeout;
+ if (operation == OPR_FinalizerRun &&
+ g_fEEStarted)
+ {
+ FastInterlockOr((DWORD*)&g_FinalizerWaiterStatus, FWS_WaitInterrupt);
+ FinalizerThread::SignalFinalizationDone(FALSE);
+ }
+ return S_OK;
+}
+ else
+ {
+ return E_INVALIDARG;
+ }
+}
+
+HRESULT EEPolicy::SetActionOnTimeout(EClrOperation operation, EPolicyAction action)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ if (static_cast<UINT>(operation) < MaxClrOperation &&
+ IsValidActionForTimeout(operation, action))
+ {
+ m_ActionOnTimeout[operation] = action;
+ return S_OK;
+ }
+ else
+ {
+ return E_INVALIDARG;
+ }
+}
+
+EPolicyAction EEPolicy::GetFinalAction(EPolicyAction action, Thread *pThread)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(static_cast<UINT>(action) < MaxPolicyAction);
+
+ if (action < eAbortThread || action > eFastExitProcess)
+ {
+ return action;
+ }
+
+ while(TRUE)
+ {
+ // Look at default action. If the default action is more severe,
+ // use the default action instead.
+ EPolicyAction defaultAction = action;
+ switch (action)
+ {
+ case eAbortThread:
+ defaultAction = m_DefaultAction[OPR_ThreadAbort];
+ break;
+ case eRudeAbortThread:
+ if (pThread && !pThread->HasLockInCurrentDomain())
+ {
+ defaultAction = m_DefaultAction[OPR_ThreadRudeAbortInNonCriticalRegion];
+ }
+ else
+ {
+ defaultAction = m_DefaultAction[OPR_ThreadRudeAbortInCriticalRegion];
+ }
+ break;
+ case eUnloadAppDomain:
+ defaultAction = m_DefaultAction[OPR_AppDomainUnload];
+ break;
+ case eRudeUnloadAppDomain:
+ defaultAction = m_DefaultAction[OPR_AppDomainRudeUnload];
+ break;
+ case eExitProcess:
+ case eFastExitProcess:
+ defaultAction = m_DefaultAction[OPR_ProcessExit];
+ if (defaultAction < action)
+ {
+ defaultAction = action;
+ }
+ break;
+ default:
+ break;
+ }
+ _ASSERTE(static_cast<UINT>(defaultAction) < MaxPolicyAction);
+
+ if (defaultAction == action)
+ {
+ return action;
+ }
+
+ _ASSERTE(defaultAction > action);
+ action = defaultAction;
+ }
+}
+
+// Allow setting timeout and action in one call.
+// If we decide to have atomical operation on Policy, we can use lock here
+// while SetTimeout and SetActionOnTimeout can not.
+HRESULT EEPolicy::SetTimeoutAndAction(EClrOperation operation, DWORD timeout, EPolicyAction action)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ if (static_cast<UINT>(operation) < MaxClrOperation &&
+ IsValidActionForTimeout(operation, action))
+ {
+ m_ActionOnTimeout[operation] = action;
+ m_Timeout[operation] = timeout;
+ if (operation == OPR_FinalizerRun &&
+ g_fEEStarted)
+ {
+ FastInterlockOr((DWORD*)&g_FinalizerWaiterStatus, FWS_WaitInterrupt);
+ FinalizerThread::SignalFinalizationDone(FALSE);
+ }
+ return S_OK;
+ }
+ else
+ {
+ return E_INVALIDARG;
+ }
+}
+
+HRESULT EEPolicy::SetDefaultAction(EClrOperation operation, EPolicyAction action)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ if (static_cast<UINT>(operation) < MaxClrOperation &&
+ IsValidActionForOperation(operation, action))
+ {
+ m_DefaultAction[operation] = action;
+ return S_OK;
+ }
+ else
+ {
+ return E_INVALIDARG;
+ }
+}
+
+HRESULT EEPolicy::SetActionOnFailure(EClrFailure failure, EPolicyAction action)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ if (static_cast<UINT>(failure) < MaxClrFailure &&
+ IsValidActionForFailure(failure, action))
+ {
+ m_ActionOnFailure[failure] = action;
+ return S_OK;
+ }
+ else
+ {
+ return E_INVALIDARG;
+ }
+}
+
+EPolicyAction EEPolicy::GetActionOnFailureNoHostNotification(EClrFailure failure)
+{
+ CONTRACTL
+ {
+ SO_TOLERANT;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }CONTRACTL_END;
+
+ _ASSERTE (failure < MaxClrFailure);
+ if (failure == FAIL_StackOverflow)
+ {
+ return m_ActionOnFailure[failure];
+ }
+
+ return GetFinalAction(m_ActionOnFailure[failure], GetThread());
+}
+
+EPolicyAction EEPolicy::GetActionOnFailure(EClrFailure failure)
+{
+ CONTRACTL
+ {
+ SO_TOLERANT;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }CONTRACTL_END;
+
+ _ASSERTE(static_cast<UINT>(failure) < MaxClrFailure);
+ if (failure == FAIL_StackOverflow)
+ {
+ return m_ActionOnFailure[failure];
+ }
+
+ EPolicyAction finalAction = GetActionOnFailureNoHostNotification(failure);
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostPolicyManager *pHostPolicyManager = CorHost2::GetHostPolicyManager();
+ if (pHostPolicyManager)
+ {
+#ifdef _DEBUG
+ Thread* pThread = GetThread();
+ if (pThread)
+ {
+ pThread->AddFiberInfo(Thread::ThreadTrackInfo_Escalation);
+ }
+#endif
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ pHostPolicyManager->OnFailure(failure, finalAction);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ return finalAction;
+}
+
+
+void EEPolicy::NotifyHostOnTimeout(EClrOperation operation, EPolicyAction action)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostPolicyManager *pHostPolicyManager = CorHost2::GetHostPolicyManager();
+ if (pHostPolicyManager)
+ {
+#ifdef _DEBUG
+ Thread* pThread = GetThread();
+ if (pThread)
+ {
+ pThread->AddFiberInfo(Thread::ThreadTrackInfo_Escalation);
+ }
+#endif
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ pHostPolicyManager->OnTimeout(operation, action);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+}
+
+
+void EEPolicy::NotifyHostOnDefaultAction(EClrOperation operation, EPolicyAction action)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostPolicyManager *pHostPolicyManager = CorHost2::GetHostPolicyManager();
+ if (pHostPolicyManager)
+ {
+#ifdef _DEBUG
+ Thread* pThread = GetThread();
+ if (pThread)
+ {
+ pThread->AddFiberInfo(Thread::ThreadTrackInfo_Escalation);
+ }
+#endif
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ pHostPolicyManager->OnDefaultAction(operation, action);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+}
+
+void SafeExitProcess(UINT exitCode, BOOL fAbort = FALSE, ShutdownCompleteAction sca = SCA_ExitProcessWhenShutdownComplete)
+{
+ // The process is shutting down. No need to check SO contract.
+ SO_NOT_MAINLINE_FUNCTION;
+ STRESS_LOG2(LF_SYNC, LL_INFO10, "SafeExitProcess: exitCode = %d, fAbort = %d\n", exitCode, fAbort);
+ CONTRACTL
+ {
+ DISABLED(GC_TRIGGERS);
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ // The runtime must be in the appropriate thread mode when we exit, so that we
+ // aren't surprised by the thread mode when our DLL_PROCESS_DETACH occurs, or when
+ // other DLLs call Release() on us in their detach [dangerous!], etc.
+ GCX_PREEMP_NO_DTOR();
+
+ FastInterlockExchange((LONG*)&g_fForbidEnterEE, TRUE);
+
+ ProcessEventForHost(Event_ClrDisabled, NULL);
+
+ // Note that for free and retail builds StressLog must also be enabled
+ if (g_pConfig && g_pConfig->StressLog())
+ {
+ if (CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_BreakOnBadExit))
+ {
+ // Workaround for aspnet
+ WCHAR wszFilename[_MAX_PATH];
+ bool bShouldAssert = true;
+ if (WszGetModuleFileName(NULL, wszFilename, _MAX_PATH))
+ {
+ _wcslwr_s(wszFilename, COUNTOF(wszFilename));
+
+ if (wcsstr(wszFilename, W("aspnet_compiler")))
+ {
+ bShouldAssert = false;
+ }
+ }
+
+ unsigned goodExit = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_SuccessExit);
+ if (bShouldAssert && exitCode != goodExit)
+ {
+ _ASSERTE(!"Bad Exit value");
+ FAULT_NOT_FATAL(); // if we OOM we can simply give up
+ SetErrorMode(0); // Insure that we actually cause the messsage box to pop.
+ EEMessageBoxCatastrophic(IDS_EE_ERRORMESSAGETEMPLATE, IDS_EE_ERRORTITLE, exitCode, W("BreakOnBadExit: returning bad exit code"));
+ }
+ }
+ }
+
+ // If we call ExitProcess, other threads will be torn down
+ // so we don't get to debug their state. Stop this!
+#ifdef _DEBUG
+ if (_DbgBreakCount)
+ _ASSERTE(!"In SafeExitProcess: An assert was hit on some other thread");
+#endif
+
+ // Turn off exception processing, because if some other random DLL has a
+ // fault in DLL_PROCESS_DETACH, we could get called for exception handling.
+ // Since we've turned off part of the runtime, we can't, for instance,
+ // properly execute the GC that handling an exception might trigger.
+ g_fNoExceptions = true;
+ LOG((LF_EH, LL_INFO10, "SafeExitProcess: turning off exceptions\n"));
+
+ if (sca == SCA_ExitProcessWhenShutdownComplete)
+ {
+ // disabled because if we fault in this code path we will trigger our
+ // Watson code via EntryPointFilter which is THROWS (see Dev11 317016)
+ CONTRACT_VIOLATION(ThrowsViolation);
+ EEPolicy::ExitProcessViaShim(exitCode);
+ }
+}
+
+// This is a helper to exit the process after coordinating with the shim. It is used by
+// SafeExitProcess above, as well as from CorHost2::ExitProcess when we know that we must
+// exit the process without doing further work to shutdown this runtime. This first attempts
+// to call back to the Shim to shutdown any other runtimes within the process.
+//
+// IMPORTANT NOTE: exercise extreme caution when adding new calls to this method. It is highly
+// likely that you want to call SafeExitProcess, or EEPolicy::HandleExitProcess instead of this.
+// This function only exists to factor some common code out of the methods mentioned above.
+
+//static
+void EEPolicy::ExitProcessViaShim(UINT exitCode)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // We must call back to the Shim in order to exit the process, as this may be just one
+ // runtime in a process with many. We need to give the other runtimes a chance to exit
+ // cleanly. If we can't make the call, or if the call fails for some reason, then we
+ // simply exit the process here, which is rude to the others, but the best we can do.
+#if !defined(FEATURE_CORECLR)
+ {
+ ReleaseHolder<ICLRRuntimeHostInternal> pRuntimeHostInternal;
+
+ HRESULT hr = g_pCLRRuntime->GetInterface(CLSID_CLRRuntimeHostInternal,
+ IID_ICLRRuntimeHostInternal,
+ &pRuntimeHostInternal);
+
+ if (SUCCEEDED(hr))
+ {
+ pRuntimeHostInternal->ShutdownAllRuntimesThenExit(exitCode);
+ LOG((LF_EH, LL_INFO10, "ExitProcessViaShim: shim returned... exiting now.\n"));
+ }
+ }
+#endif // !FEATURE_CORECLR
+
+ ExitProcess(exitCode);
+}
+
+
+//---------------------------------------------------------------------------------------
+// DisableRuntime disables this runtime, suspending all managed execution and preventing
+// threads from entering the runtime. This will cause the caller to block forever as well
+// unless sca is SCA_ReturnWhenShutdownComplete.
+//---------------------------------------------------------------------------------------
+void DisableRuntime(ShutdownCompleteAction sca)
+{
+ CONTRACTL
+ {
+ DISABLED(GC_TRIGGERS);
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ FastInterlockExchange((LONG*)&g_fForbidEnterEE, TRUE);
+
+ if (!g_fSuspendOnShutdown)
+ {
+ if (!IsGCThread())
+ {
+ if (ThreadStore::HoldingThreadStore(GetThread()))
+ {
+ ThreadSuspend::UnlockThreadStore();
+ }
+ ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_FOR_SHUTDOWN);
+ }
+
+ if (!g_fSuspendOnShutdown)
+ {
+ ThreadStore::TrapReturningThreads(TRUE);
+ g_fSuspendOnShutdown = TRUE;
+ ClrFlsSetThreadType(ThreadType_Shutdown);
+ }
+
+ // Don't restart runtime. CLR is disabled.
+ }
+
+ GCX_PREEMP_NO_DTOR();
+
+ ProcessEventForHost(Event_ClrDisabled, NULL);
+ ClrFlsClearThreadType(ThreadType_Shutdown);
+
+ if (g_pDebugInterface != NULL)
+ {
+ g_pDebugInterface->DisableDebugger();
+ }
+
+ if (sca == SCA_ExitProcessWhenShutdownComplete)
+ {
+ __SwitchToThread(INFINITE, CALLER_LIMITS_SPINNING);
+ _ASSERTE (!"Should not reach here");
+ SafeExitProcess(0);
+ }
+}
+
+//---------------------------------------------------------------------------------------
+// HandleExitProcessHelper is used to shutdown the runtime as specified by the given
+// action, then to exit the process. Note, however, that the process will not exit if
+// sca is SCA_ReturnWhenShutdownComplete. In that case, this method will simply return after
+// performing the shutdown actions.
+//---------------------------------------------------------------------------------------
+
+// If g_fFastExitProcess is 0, normal shutdown
+// If g_fFastExitProcess is 1, fast shutdown. Only doing log.
+// If g_fFastExitProcess is 2, do not run EEShutDown.
+DWORD g_fFastExitProcess = 0;
+
+extern void STDMETHODCALLTYPE EEShutDown(BOOL fIsDllUnloading);
+
+static void HandleExitProcessHelper(EPolicyAction action, UINT exitCode, ShutdownCompleteAction sca)
+{
+ WRAPPER_NO_CONTRACT;
+
+ switch (action) {
+ case eFastExitProcess:
+ g_fFastExitProcess = 1;
+ case eExitProcess:
+ if (g_fEEStarted)
+ {
+ EEShutDown(FALSE);
+ }
+ if (exitCode == 0)
+ {
+ exitCode = GetLatchedExitCode();
+ }
+ SafeExitProcess(exitCode, FALSE, sca);
+ break;
+ case eRudeExitProcess:
+ g_fFastExitProcess = 2;
+ SafeExitProcess(exitCode, TRUE, sca);
+ break;
+ case eDisableRuntime:
+ DisableRuntime(sca);
+ break;
+ default:
+ _ASSERTE (!"Invalid policy");
+ break;
+ }
+}
+
+
+EPolicyAction EEPolicy::DetermineResourceConstraintAction(Thread *pThread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ EPolicyAction action;
+ if (pThread->HasLockInCurrentDomain()) {
+ action = GetEEPolicy()->GetActionOnFailure(FAIL_CriticalResource);
+ }
+ else
+ action = GetEEPolicy()->GetActionOnFailure(FAIL_NonCriticalResource);
+
+ AppDomain *pDomain = GetAppDomain();
+ // If it is default domain, we can not unload the appdomain
+ if (pDomain == SystemDomain::System()->DefaultDomain() &&
+ (action == eUnloadAppDomain || action == eRudeUnloadAppDomain))
+ {
+ action = eThrowException;
+ }
+ // If the current thread is AD unload helper thread, it should not block itself.
+ else if (pThread->HasThreadStateNC(Thread::TSNC_ADUnloadHelper) &&
+ action < eExitProcess)
+ {
+ action = eThrowException;
+ }
+ return action;
+}
+
+
+void EEPolicy::PerformADUnloadAction(EPolicyAction action, BOOL haveStack, BOOL forStackOverflow)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ STRESS_LOG0(LF_EH, LL_INFO100, "In EEPolicy::PerformADUnloadAction\n");
+
+ Thread *pThread = GetThread();
+
+ AppDomain *pDomain = GetAppDomain();
+
+ if (!IsFinalizerThread())
+ {
+ int count = 0;
+ Frame *pFrame = pThread->GetFirstTransitionInto(GetAppDomain(), &count);
+ {
+ pThread->SetUnloadBoundaryFrame(pFrame);
+ }
+ }
+
+ pDomain->EnableADUnloadWorker(action==eUnloadAppDomain? ADU_Safe : ADU_Rude);
+ // Can't perform a join when we are handling a true SO. We need to enable the unload woker but let the thread continue running
+ // through EH processing so that we can recover the stack and reset the guard page.
+ if (haveStack)
+ {
+ pThread->SetAbortRequest(action==eUnloadAppDomain? EEPolicy::TA_V1Compatible : EEPolicy::TA_Rude);
+ if (forStackOverflow)
+ {
+ OBJECTREF exceptObj = CLRException::GetPreallocatedRudeThreadAbortException();
+ pThread->SetAbortInitiated();
+ RaiseTheExceptionInternalOnly(exceptObj, FALSE, TRUE);
+ }
+
+ OBJECTREF exceptObj = CLRException::GetPreallocatedThreadAbortException();
+ pThread->SetAbortInitiated();
+ RaiseTheExceptionInternalOnly(exceptObj, FALSE, FALSE);
+ }
+}
+
+void EEPolicy::PerformResourceConstraintAction(Thread *pThread, EPolicyAction action, UINT exitCode, BOOL haveStack)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(GetAppDomain() != NULL);
+
+ switch (action) {
+ case eThrowException:
+ // Caller is going to rethrow.
+ return;
+ break;
+ case eAbortThread:
+ pThread->UserAbort(Thread::TAR_Thread, TA_Safe, GetEEPolicy()->GetTimeout(OPR_ThreadAbort), Thread::UAC_Normal);
+ break;
+ case eRudeAbortThread:
+ pThread->UserAbort(Thread::TAR_Thread, TA_Rude, GetEEPolicy()->GetTimeout(OPR_ThreadAbort), Thread::UAC_Normal);
+ break;
+ case eUnloadAppDomain:
+ case eRudeUnloadAppDomain:
+ {
+ GCX_ASSERT_COOP();
+ PerformADUnloadAction(action,haveStack);
+ }
+ break;
+ case eExitProcess:
+ case eFastExitProcess:
+ case eRudeExitProcess:
+ case eDisableRuntime:
+ HandleExitProcessFromEscalation(action, exitCode);
+ break;
+ default:
+ _ASSERTE (!"Invalid policy");
+ break;
+ }
+}
+
+void EEPolicy::HandleOutOfMemory()
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE (g_pOutOfMemoryExceptionClass);
+
+ Thread *pThread = GetThread();
+ _ASSERTE (pThread);
+
+ EPolicyAction action = DetermineResourceConstraintAction(pThread);
+
+ // Check if we are executing in the context of a Constrained Execution Region.
+ if (action != eThrowException && Thread::IsExecutingWithinCer())
+ {
+ // Hitting OOM in a CER region should throw the OOM without regard to the escalation policy
+ // since the CER author has declared they are hardened against such failures. That's
+ // the whole point of CERs, to denote regions where code knows exactly how to deal with
+ // failures in an attempt to minimize the need for rollback or recycling.
+ return;
+ }
+
+ PerformResourceConstraintAction(pThread, action, HOST_E_EXITPROCESS_OUTOFMEMORY, TRUE);
+}
+
+#ifdef FEATURE_STACK_PROBE
+//---------------------------------------------------------------------------------------
+//
+// IsSOTolerant - Is the current thread in SO Tolerant region?
+//
+// Arguments:
+// pLimitFrame: the limit of search for frames
+//
+// Return Value:
+// TRUE if in SO tolerant region.
+// FALSE if in SO intolerant region.
+//
+// Note:
+// We walk our frame chain to decide. If HelperMethodFrame is seen first, we are in tolerant
+// region. If EnterSOIntolerantCodeFrame is seen first, we are in intolerant region.
+//
+BOOL Thread::IsSOTolerant(void * pLimitFrame)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ Frame *pFrame = GetFrame();
+ void* pSOIntolerantMarker = ClrFlsGetValue(TlsIdx_SOIntolerantTransitionHandler);
+ if (pSOIntolerantMarker == FRAME_TOP)
+ {
+ // We have not set a marker for intolerant transition yet.
+ return TRUE;
+ }
+ while (pFrame != FRAME_TOP && pFrame < pLimitFrame)
+ {
+ Frame::ETransitionType type = pFrame->GetTransitionType();
+ if (pFrame > pSOIntolerantMarker)
+ {
+ return FALSE;
+ }
+ else if (type == Frame::TT_M2U || type == Frame::TT_InternalCall ||
+ // We can not call HelperMethodFrame::GetFunction on SO since the call
+ // may need to call into host. This is why we check for TT_InternalCall first.
+ pFrame->GetFunction() != NULL)
+ {
+ return TRUE;
+ }
+ pFrame = pFrame->Next();
+ }
+
+ if (pFrame == FRAME_TOP)
+ // We walked to the end of chain, but the thread has one IntolerantMarker on stack decided from
+ // the check above while loop.
+ return FALSE;
+ else
+ return TRUE;
+}
+
+#endif
+
+//---------------------------------------------------------------------------------------
+//
+// EEPolicy::HandleStackOverflow - Handle stack overflow according to policy
+//
+// Arguments:
+// detector:
+// pLimitFrame: the limit of search for frames in order to decide if in SO tolerant
+//
+// Return Value:
+// None.
+//
+// How is stack overflow handled?
+// If stack overflows in non-hosted case, we terminate the process.
+// For hosted case with escalation policy
+// 1. If stack overflows in managed code, or in VM before switching to SO intolerant region, and the GC mode is Cooperative
+// the domain is rudely unloaded, or the process is terminated if the current domain is default domain.
+// a. This action is done through BEGIN_SO_TOLERANT_CODE if there is one.
+// b. If there is not this macro on the stack, we mark the domain being unload requested, and when the thread
+// dies or is recycled, we finish the AD unload.
+// 2. If stack overflows in SO tolerant region, but the GC mode is Preemptive, the process is killed in vector handler, or our
+// managed exception handler (COMPlusFrameHandler or ProcessCLRException).
+// 3. If stack overflows in SO intolerant region, the process is killed as soon as the exception is seen by our vector handler, or
+// our managed exception handler.
+//
+// If SO Probing code is disabled (by FEATURE_STACK_PROBE not defined) then the process
+// is terminated if there is StackOverflow as all clr code will be considered SO Intolerant.
+void EEPolicy::HandleStackOverflow(StackOverflowDetector detector, void * pLimitFrame)
+{
+ WRAPPER_NO_CONTRACT;
+
+ STRESS_LOG0(LF_EH, LL_INFO100, "In EEPolicy::HandleStackOverflow\n");
+
+ Thread *pThread = GetThread();
+
+ if (pThread == NULL)
+ {
+ //_ASSERTE (detector != SOD_ManagedFrameHandler);
+ // ProcessSOEventForHost(NULL, FALSE);
+
+ // For security reason, it is not safe to continue execution if stack overflow happens
+ // unless a host tells us to do something different.
+ // EEPolicy::HandleFatalStackOverflow(NULL);
+ return;
+ }
+
+#ifdef FEATURE_STACK_PROBE
+
+ // We only process SO once at
+ // 1. VectoredExceptionHandler if SO in mscorwks
+ // 2. managed exception handler
+ // 3. SO_Tolerant transition handler
+ if (pThread->HasThreadStateNC(Thread::TSNC_SOWorkNeeded) &&
+ detector != SOD_UnmanagedFrameHandler)
+ {
+ return;
+ }
+#endif
+
+#ifdef FEATURE_STACK_PROBE
+ BOOL fInSoTolerant = pThread->IsSOTolerant(pLimitFrame);
+#else
+ BOOL fInSoTolerant = false;
+#endif
+
+ EXCEPTION_POINTERS exceptionInfo;
+ GetCurrentExceptionPointers(&exceptionInfo);
+
+ _ASSERTE(exceptionInfo.ExceptionRecord);
+
+#ifdef FEATURE_STACK_PROBE
+ DWORD exceptionCode = exceptionInfo.ExceptionRecord->ExceptionCode;
+
+ AppDomain *pCurrentDomain = ::GetAppDomain();
+ BOOL fInDefaultDomain = (pCurrentDomain == SystemDomain::System()->DefaultDomain());
+ BOOL fInCLR = IsIPInModule(g_pMSCorEE, (PCODE)GetIP(exceptionInfo.ContextRecord));
+
+ if (exceptionCode == EXCEPTION_SOFTSO)
+ {
+ // Our probe detects a thread does not have enough stack. But we have not trashed the process
+ // state yet.
+ fInSoTolerant = TRUE;
+ }
+ else
+ {
+ _ASSERTE (exceptionCode == STATUS_STACK_OVERFLOW);
+
+ switch (detector)
+ {
+ case SOD_ManagedFrameHandler:
+ if (!pThread->PreemptiveGCDisabled() && !fInCLR && fInSoTolerant
+ &&
+ // Before we call managed code, we probe inside ReverseEnterRuntime for BACKOUT_CODE_STACK_LIMIT pages
+ // If we hit hard so here, we are still in our stub
+ (!CLRTaskHosted() || (UINT_PTR)pThread->m_pFrame - pThread->GetLastAllowableStackAddress() >=
+ ADJUST_PROBE(BACKOUT_CODE_STACK_LIMIT) * OS_PAGE_SIZE)
+ )
+ {
+ // Managed exception handler detects SO, but the thread is in preemptive GC mode,
+ // and the IP is outside CLR. This means we are inside a PINVOKE call.
+ fInSoTolerant = FALSE;
+ }
+ break;
+
+ case SOD_UnmanagedFrameHandler:
+ break;
+
+ case SOD_SOIntolerantTransitor:
+ fInSoTolerant = FALSE;
+ break;
+
+ case SOD_SOTolerantTransitor:
+ if (!fInCLR)
+ {
+ // If SO happens outside of CLR, and it is not detected by managed frame handler,
+ // it is fatal
+ fInSoTolerant = FALSE;
+ }
+ break;
+
+ default:
+ _ASSERTE(!"should not get here");
+ }
+
+ if (fInDefaultDomain)
+ {
+ // StackOverflow in default domain is fatal
+ fInSoTolerant = FALSE;
+ }
+ }
+
+#endif // FEATURE_STACK_PROBE
+
+ ProcessSOEventForHost(&exceptionInfo, fInSoTolerant);
+
+#ifdef FEATURE_STACK_PROBE
+ if (!CLRHosted() || GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) != eRudeUnloadAppDomain)
+ {
+ // For security reason, it is not safe to continue execution if stack overflow happens
+ // unless a host tells us to do something different.
+ EEPolicy::HandleFatalStackOverflow(&exceptionInfo);
+ }
+#endif
+
+ if (!fInSoTolerant)
+ {
+ EEPolicy::HandleFatalStackOverflow(&exceptionInfo);
+ }
+#ifdef FEATURE_STACK_PROBE
+ else
+ {
+ // EnableADUnloadWorker is SO_Intolerant.
+ // But here we know that if we have only one page, we will only update states of the Domain.
+ CONTRACT_VIOLATION(SOToleranceViolation);
+
+ // Mark the current domain requested for rude unload
+ if (!fInDefaultDomain)
+ {
+ pCurrentDomain->EnableADUnloadWorker(ADU_Rude, FALSE);
+ }
+
+ pThread->PrepareThreadForSOWork();
+
+ pThread->MarkThreadForAbort(
+ (Thread::ThreadAbortRequester)(Thread::TAR_Thread|Thread::TAR_StackOverflow),
+ EEPolicy::TA_Rude);
+
+ pThread->SetSOWorkNeeded();
+ }
+#endif
+}
+
+
+// We provide WatsonLastChance with a SO exception record. The ExceptionAddress is set to 0
+// here. This ExceptionPointers struct is handed off to the debugger as is. A copy of this struct
+// is made before invoking Watson and the ExceptionAddress is set by inspecting the stack. Note
+// that the ExceptionContext member is unused and so it's ok to set it to NULL.
+static EXCEPTION_RECORD g_SOExceptionRecord = {
+ STATUS_STACK_OVERFLOW, // ExceptionCode
+ 0, // ExceptionFlags
+ NULL, // ExceptionRecord
+ 0, // ExceptionAddress
+ 0, // NumberOfParameters
+ {} }; // ExceptionInformation
+
+EXCEPTION_POINTERS g_SOExceptionPointers = {&g_SOExceptionRecord, NULL};
+
+#ifdef FEATURE_STACK_PROBE
+// This function may be called on a thread before debugger is notified of the thread, like in
+// ManagedThreadBase_DispatchMiddle. Currently we can not notify managed debugger, because
+// RS requires that notification is sent first.
+void EEPolicy::HandleSoftStackOverflow(BOOL fSkipDebugger)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // If we trigger a SO while handling the soft stack overflow,
+ // we'll rip the process
+ BEGIN_SO_INTOLERANT_CODE_NOPROBE;
+
+ AppDomain *pCurrentDomain = ::GetAppDomain();
+
+ if (GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) != eRudeUnloadAppDomain ||
+ pCurrentDomain == SystemDomain::System()->DefaultDomain())
+ {
+ // We may not be able to build a context on stack
+ ProcessSOEventForHost(NULL, FALSE);
+
+
+ EEPolicy::HandleFatalStackOverflow(&g_SOExceptionPointers, fSkipDebugger);
+ }
+ //else if (pCurrentDomain == SystemDomain::System()->DefaultDomain())
+ //{
+ // We hit soft SO in Default domain, but default domain can not be unloaded.
+ // Soft SO can happen in default domain, eg. GetResourceString, or EnsureGrantSetSerialized.
+ // So the caller is going to throw a managed exception.
+ // RaiseException(EXCEPTION_SOFTSO, 0, 0, NULL);
+ //}
+ else
+ {
+ Thread* pThread = GetThread();
+
+ if (pThread && pThread->PreemptiveGCDisabled())
+ {
+ // Mark the current domain requested for rude unload
+ GCX_ASSERT_COOP();
+ EEPolicy::PerformADUnloadAction(eRudeUnloadAppDomain, TRUE, TRUE);
+ }
+
+ // We are leaving VM boundary, either entering managed code, or entering
+ // non-VM unmanaged code.
+ // We should not throw internal C++ exception. Instead we throw an exception
+ // with EXCEPTION_SOFTSO code.
+ RaiseException(EXCEPTION_SOFTSO, 0, 0, NULL);
+ }
+
+ END_SO_INTOLERANT_CODE_NOPROBE;
+
+}
+
+void EEPolicy::HandleStackOverflowAfterCatch()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef STACK_GUARDS_DEBUG
+ BaseStackGuard::RestoreCurrentGuard(FALSE);
+#endif
+ Thread *pThread = GetThread();
+ pThread->RestoreGuardPage();
+ pThread->FinishSOWork();
+}
+#endif
+
+
+//---------------------------------------------------------------------------------------
+// HandleExitProcess is used to shutdown the runtime, based on policy previously set,
+// then to exit the process. Note, however, that the process will not exit if
+// sca is SCA_ReturnWhenShutdownComplete. In that case, this method will simply return after
+// performing the shutdown actions.
+//---------------------------------------------------------------------------------------
+void EEPolicy::HandleExitProcess(ShutdownCompleteAction sca)
+{
+ WRAPPER_NO_CONTRACT;
+
+ STRESS_LOG0(LF_EH, LL_INFO100, "In EEPolicy::HandleExitProcess\n");
+
+ EPolicyAction action = GetEEPolicy()->GetDefaultAction(OPR_ProcessExit, NULL);
+ GetEEPolicy()->NotifyHostOnDefaultAction(OPR_ProcessExit,action);
+ HandleExitProcessHelper(action, 0, sca);
+}
+
+//
+// Log an error to the event log if possible, then throw up a dialog box.
+//
+
+void EEPolicy::LogFatalError(UINT exitCode, UINT_PTR address, LPCWSTR pszMessage, PEXCEPTION_POINTERS pExceptionInfo)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+
+ _ASSERTE(pExceptionInfo != NULL);
+
+ if(ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, FailFast))
+ {
+ // Fire an ETW FailFast event
+ FireEtwFailFast(pszMessage,
+ (const PVOID)address,
+ ((pExceptionInfo && pExceptionInfo->ExceptionRecord) ? pExceptionInfo->ExceptionRecord->ExceptionCode : 0),
+ exitCode,
+ GetClrInstanceId());
+ }
+
+#ifndef FEATURE_PAL
+ // Write an event log entry. We do allocate some resources here (spread between the stack and maybe the heap for longer
+ // messages), so it's possible for the event write to fail. If needs be we can use a more elaborate scheme here in the future
+ // (maybe trying multiple approaches and backing off on failure, falling back on a limited size static buffer as a last
+ // resort). In all likelihood the Win32 event reporting mechanism requires resources though, so it's not clear how much
+ // effort we should put into this without knowing the benefit we'd receive.
+ EX_TRY
+ {
+ if (ShouldLogInEventLog())
+ {
+ // If the exit code is COR_E_FAILFAST then the fatal error was raised by managed code and the address argument points to a
+ // unicode message buffer rather than a faulting EIP.
+ EventReporter::EventReporterType failureType = EventReporter::ERT_UnmanagedFailFast;
+ if (exitCode == (UINT)COR_E_FAILFAST)
+ failureType = EventReporter::ERT_ManagedFailFast;
+ else if (exitCode == (UINT)COR_E_CODECONTRACTFAILED)
+ failureType = EventReporter::ERT_CodeContractFailed;
+ EventReporter reporter(failureType);
+
+
+ if ((exitCode == (UINT)COR_E_FAILFAST) || (exitCode == (UINT)COR_E_CODECONTRACTFAILED) || (exitCode == (UINT)CLR_E_GC_OOM))
+ {
+ if (pszMessage)
+ {
+ reporter.AddDescription((WCHAR*)pszMessage);
+ }
+
+ if (exitCode != (UINT)CLR_E_GC_OOM)
+ LogCallstackForEventReporter(reporter);
+ }
+ else
+ {
+ // Fetch the localized Fatal Execution Engine Error text or fall back on a hardcoded variant if things get dire.
+ InlineSString<80> ssMessage;
+ InlineSString<80> ssErrorFormat;
+ if(!ssErrorFormat.LoadResource(CCompRC::Optional, IDS_ER_UNMANAGEDFAILFASTMSG ))
+ ssErrorFormat.Set(W("at IP %1 (%2) with exit code %3."));
+ SmallStackSString addressString;
+ addressString.Printf(W("%p"), pExceptionInfo? (UINT_PTR)pExceptionInfo->ExceptionRecord->ExceptionAddress : address);
+
+ // We should always have the reference to the runtime's instance
+ _ASSERTE(g_pMSCorEE != NULL);
+
+ // Setup the string to contain the runtime's base address. Thus, when customers report FEEE with just
+ // the event log entry containing this string, we can use the absolute and base addresses to determine
+ // where the fault happened inside the runtime.
+ SmallStackSString runtimeBaseAddressString;
+ runtimeBaseAddressString.Printf(W("%p"), g_pMSCorEE);
+
+ SmallStackSString exitCodeString;
+ exitCodeString.Printf(W("%x"), exitCode);
+
+ // Format the string
+ ssMessage.FormatMessage(FORMAT_MESSAGE_FROM_STRING, (LPCWSTR)ssErrorFormat, 0, 0, addressString, runtimeBaseAddressString,
+ exitCodeString);
+ reporter.AddDescription(ssMessage);
+ }
+
+ reporter.Report();
+ }
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+#endif // !FEATURE_PAL
+
+#ifdef _DEBUG
+ // If we're native-only (Win32) debugging this process, we'd love to break now.
+ // However, we should not do this because a managed debugger attached to a
+ // SxS runtime also appears to be a native debugger. Unfortunately, the managed
+ // debugger won't handle any native event from another runtime, which means this
+ // breakpoint would go unhandled and terminate the process. Instead, we will let
+ // the process continue so at least the fatal error is logged rather than abrupt
+ // termination.
+ //
+ // This behavior can still be overridden if the right config value is set.
+ if (IsDebuggerPresent())
+ {
+ bool fBreak = (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgOOBinFEEE) != 0);
+
+ if (fBreak)
+ {
+ DebugBreak();
+ }
+ }
+#endif // _DEBUG
+
+ // We're here logging a fatal error. If the policy is to then do anything other than
+ // disable the runtime (ie, if the policy is to terminate the runtime), we should give
+ // Watson an opportunity to capture an error report.
+ // Presumably, hosts that are sophisticated enough to disable the runtime are also cognizant
+ // of how they want to handle fatal errors in the runtime, including whether they want
+ // to capture Watson information (for which they are responsible).
+ if (GetEEPolicy()->GetActionOnFailureNoHostNotification(FAIL_FatalRuntime) != eDisableRuntime)
+ {
+#ifdef DEBUGGING_SUPPORTED
+ //Give a managed debugger a chance if this fatal error is on a managed thread.
+ Thread *pThread = GetThread();
+
+ if (pThread)
+ {
+ GCX_COOP();
+
+ OBJECTHANDLE ohException = NULL;
+
+ if (exitCode == (UINT)COR_E_STACKOVERFLOW)
+ {
+ // If we're going down because of stack overflow, go ahead and use the preallocated SO exception.
+ ohException = CLRException::GetPreallocatedStackOverflowExceptionHandle();
+ }
+ else
+ {
+ // Though we would like to remove the usage of ExecutionEngineException in any manner,
+ // we cannot. Its okay to use it in the case below since the process is terminating
+ // and this will serve as an exception object for debugger.
+ ohException = CLRException::GetPreallocatedExecutionEngineExceptionHandle();
+ }
+
+ // Preallocated exception handles can be null if FailFast is invoked before LoadBaseSystemClasses
+ // (in SystemDomain::Init) finished. See Dev10 Bug 677432 for the detail.
+ if (ohException != NULL)
+ {
+#ifdef FEATURE_WINDOWSPHONE
+ // for fail-fast, if there's a LTO available then use that as the inner exception object
+ // for the FEEE we'll be reporting. this can help the Watson back-end to generate better
+ // buckets for apps that call Environment.FailFast() and supply an exception object.
+ OBJECTREF lto = pThread->LastThrownObject();
+
+ if (exitCode == static_cast<UINT>(COR_E_FAILFAST) && lto != NULL)
+ {
+ EXCEPTIONREF curEx = (EXCEPTIONREF)ObjectFromHandle(ohException);
+ curEx->SetInnerException(lto);
+ }
+#endif // FEATURE_WINDOWSPHONE
+ pThread->SetLastThrownObject(ObjectFromHandle(ohException), TRUE);
+ }
+
+ // If a managed debugger is already attached, and if that debugger is thinking it might be inclined to
+ // try to intercept this excepiton, then tell it that's not possible.
+ if (pThread->IsExceptionInProgress())
+ {
+ pThread->GetExceptionState()->GetFlags()->SetDebuggerInterceptNotPossible();
+ }
+ }
+
+ if (EXCEPTION_CONTINUE_EXECUTION == WatsonLastChance(pThread, pExceptionInfo, TypeOfReportedError::FatalError))
+ {
+ LOG((LF_EH, LL_INFO100, "EEPolicy::LogFatalError: debugger ==> EXCEPTION_CONTINUE_EXECUTION\n"));
+ _ASSERTE(!"Debugger should not have returned ContinueExecution");
+ }
+#endif // DEBUGGING_SUPPORTED
+ }
+}
+
+void DisplayStackOverflowException()
+{
+ LIMITED_METHOD_CONTRACT;
+ PrintToStdErrA("\n");
+
+ PrintToStdErrA("Process is terminated due to StackOverflowException.\n");
+}
+
+void DECLSPEC_NORETURN EEPolicy::HandleFatalStackOverflow(EXCEPTION_POINTERS *pExceptionInfo, BOOL fSkipDebugger)
+{
+ // This is fatal error. We do not care about SO mode any more.
+ // All of the code from here on out is robust to any failures in any API's that are called.
+ CONTRACT_VIOLATION(GCViolation | ModeViolation | SOToleranceViolation | FaultNotFatal | TakesLockViolation);
+
+ WRAPPER_NO_CONTRACT;
+
+ STRESS_LOG0(LF_EH, LL_INFO100, "In EEPolicy::HandleFatalStackOverflow\n");
+
+ DisplayStackOverflowException();
+
+ if(ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, FailFast))
+ {
+ // Fire an ETW FailFast event
+ FireEtwFailFast(W("StackOverflowException"),
+ (const PVOID)((pExceptionInfo && pExceptionInfo->ContextRecord) ? GetIP(pExceptionInfo->ContextRecord) : 0),
+ ((pExceptionInfo && pExceptionInfo->ExceptionRecord) ? pExceptionInfo->ExceptionRecord->ExceptionCode : 0),
+ COR_E_STACKOVERFLOW,
+ GetClrInstanceId());
+ }
+
+ if (!fSkipDebugger)
+ {
+ Thread *pThread = GetThread();
+ BOOL fTreatAsNativeUnhandledException = FALSE;
+ if (pThread)
+ {
+ GCX_COOP();
+ // If we had a SO before preallocated exception objects are initialized, we will AV here. This can happen
+ // during the initialization of SystemDomain during EEStartup. Thus, setup the SO throwable only if its not
+ // NULL.
+ //
+ // When WatsonLastChance (WLC) is invoked below, it treats this case as UnhandledException. If there is no
+ // managed exception object available, we should treat this case as NativeUnhandledException. This aligns
+ // well with the fact that there cannot be a managed debugger attached at this point that will require
+ // LastChanceManagedException notification to be delivered. Also, this is the same as how
+ // we treat an unhandled exception as NativeUnhandled when throwable is not available.
+ OBJECTHANDLE ohSO = CLRException::GetPreallocatedStackOverflowExceptionHandle();
+ if (ohSO != NULL)
+ {
+ pThread->SafeSetThrowables(ObjectFromHandle(ohSO)
+ DEBUG_ARG(ThreadExceptionState::STEC_CurrentTrackerEqualNullOkHackForFatalStackOverflow),
+ TRUE);
+ }
+ else
+ {
+ // We dont have a throwable - treat this as native unhandled exception
+ fTreatAsNativeUnhandledException = TRUE;
+ }
+ }
+ FrameWithCookie<FaultingExceptionFrame> fef;
+#if defined(WIN64EXCEPTIONS)
+ *((&fef)->GetGSCookiePtr()) = GetProcessGSCookie();
+#endif // WIN64EXCEPTIONS
+ if (pExceptionInfo && pExceptionInfo->ContextRecord)
+ {
+ GCX_COOP();
+ fef.InitAndLink(pExceptionInfo->ContextRecord);
+ }
+
+#ifndef FEATURE_PAL
+ if (RunningOnWin7() && IsWatsonEnabled() && (g_pDebugInterface != NULL))
+ {
+ _ASSERTE(pExceptionInfo != NULL);
+
+ ResetWatsonBucketsParams param;
+ param.m_pThread = pThread;
+ param.pExceptionRecord = pExceptionInfo->ExceptionRecord;
+ g_pDebugInterface->RequestFavor(ResetWatsonBucketsFavorWorker, reinterpret_cast<void *>(&param));
+ }
+
+ WatsonLastChance(pThread, pExceptionInfo,
+ (fTreatAsNativeUnhandledException == FALSE)? TypeOfReportedError::UnhandledException: TypeOfReportedError::NativeThreadUnhandledException);
+#endif // !FEATURE_PAL
+ }
+
+ TerminateProcess(GetCurrentProcess(), COR_E_STACKOVERFLOW);
+ UNREACHABLE();
+}
+
+void DECLSPEC_NORETURN EEPolicy::HandleFatalError(UINT exitCode, UINT_PTR address, LPCWSTR pszMessage /* = NULL */, PEXCEPTION_POINTERS pExceptionInfo /* = NULL */)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // All of the code from here on out is robust to any failures in any API's that are called.
+ FAULT_NOT_FATAL();
+
+ EXCEPTION_RECORD exceptionRecord;
+ EXCEPTION_POINTERS exceptionPointers;
+ CONTEXT context;
+
+ if (pExceptionInfo == NULL)
+ {
+ ZeroMemory(&exceptionPointers, sizeof(exceptionPointers));
+ ZeroMemory(&exceptionRecord, sizeof(exceptionRecord));
+ ZeroMemory(&context, sizeof(context));
+
+ context.ContextFlags = CONTEXT_CONTROL;
+ ClrCaptureContext(&context);
+
+ exceptionRecord.ExceptionCode = exitCode;
+ exceptionRecord.ExceptionAddress = reinterpret_cast< PVOID >(address);
+
+ exceptionPointers.ExceptionRecord = &exceptionRecord;
+ exceptionPointers.ContextRecord = &context;
+ pExceptionInfo = &exceptionPointers;
+ }
+
+ // All of the code from here on out is allowed to trigger a GC, even if we're in a no-trigger region. We're
+ // ripping the process down due to a fatal error... our invariants are already gone.
+ {
+ // This is fatal error. We do not care about SO mode any more.
+ // All of the code from here on out is robust to any failures in any API's that are called.
+ CONTRACT_VIOLATION(GCViolation | ModeViolation | SOToleranceViolation | FaultNotFatal | TakesLockViolation);
+
+ // ThreadStore lock needs to be released before continuing with the FatalError handling should
+ // because debugger is going to take CrstDebuggerMutex, whose lock level is higher than that of
+ // CrstThreadStore. It should be safe to release the lock since execution will not be resumed
+ // after fatal errors.
+ if (ThreadStore::HoldingThreadStore(GetThread()))
+ {
+ ThreadSuspend::UnlockThreadStore();
+ }
+
+ g_fFastExitProcess = 2;
+
+ STRESS_LOG0(LF_CORDB,LL_INFO100, "D::HFE: About to call LogFatalError\n");
+ switch (GetEEPolicy()->GetActionOnFailure(FAIL_FatalRuntime))
+ {
+ case eRudeExitProcess:
+ LogFatalError(exitCode, address, pszMessage, pExceptionInfo);
+ SafeExitProcess(exitCode, TRUE);
+ break;
+ case eDisableRuntime:
+ LogFatalError(exitCode, address, pszMessage, pExceptionInfo);
+ DisableRuntime(SCA_ExitProcessWhenShutdownComplete);
+ break;
+ default:
+ _ASSERTE(!"Invalid action for FAIL_FatalRuntime");
+ break;
+ }
+ }
+
+ UNREACHABLE();
+}
+
+void EEPolicy::HandleExitProcessFromEscalation(EPolicyAction action, UINT exitCode)
+{
+ WRAPPER_NO_CONTRACT;
+ CONTRACT_VIOLATION(GCViolation);
+
+ _ASSERTE (action >= eExitProcess);
+ // If policy for ExitProcess is not default action, i.e. ExitProcess, we will use it.
+ // Otherwise overwrite it with passing arg action;
+ EPolicyAction todo = GetEEPolicy()->GetDefaultAction(OPR_ProcessExit, NULL);
+ if (todo == eExitProcess)
+ {
+ todo = action;
+ }
+ GetEEPolicy()->NotifyHostOnDefaultAction(OPR_ProcessExit,todo);
+
+ HandleExitProcessHelper(todo, exitCode, SCA_ExitProcessWhenShutdownComplete);
+}
+
+void EEPolicy::HandleCodeContractFailure(LPCWSTR pMessage, LPCWSTR pCondition, LPCWSTR pInnerExceptionAsString)
+{
+ WRAPPER_NO_CONTRACT;
+
+ EEPolicy* pPolicy = GetEEPolicy();
+ // GetActionOnFailure will notify the host for us.
+ EPolicyAction action = pPolicy->GetActionOnFailure(FAIL_CodeContract);
+ Thread* pThread = GetThread();
+ AppDomain* pCurrentDomain = ::GetAppDomain();
+
+ switch(action) {
+ case eThrowException:
+ // Let managed code throw a ContractException (it's easier to pass the right parameters to the constructor).
+ break;
+ case eAbortThread:
+ pThread->UserAbort(Thread::TAR_Thread, TA_Safe, GetEEPolicy()->GetTimeout(OPR_ThreadAbort), Thread::UAC_Normal);
+ break;
+ case eRudeAbortThread:
+ pThread->UserAbort(Thread::TAR_Thread, TA_Rude, GetEEPolicy()->GetTimeout(OPR_ThreadAbort), Thread::UAC_Normal);
+ break;
+ case eUnloadAppDomain:
+ // Register an appdomain unload, which starts on a separate thread.
+ IfFailThrow(AppDomain::UnloadById(pCurrentDomain->GetId(), FALSE));
+ // Don't continue execution on this thread.
+ pThread->UserAbort(Thread::TAR_Thread, TA_Safe, GetEEPolicy()->GetTimeout(OPR_ThreadAbort), Thread::UAC_Normal);
+ break;
+ case eRudeUnloadAppDomain:
+ pCurrentDomain->SetRudeUnload();
+ // Register an appdomain unload, which starts on a separate thread.
+ IfFailThrow(AppDomain::UnloadById(pCurrentDomain->GetId(), FALSE));
+ // Don't continue execution on this thread.
+ pThread->UserAbort(Thread::TAR_Thread, TA_Rude, GetEEPolicy()->GetTimeout(OPR_ThreadAbort), Thread::UAC_Normal);
+ break;
+
+ case eExitProcess: // Merged w/ default case
+ default:
+ _ASSERTE(action == eExitProcess);
+ // Since we have no exception object, make sure
+ // UE tracker is clean so that RetrieveManagedBucketParameters
+ // does not take any bucket details.
+#ifndef FEATURE_PAL
+ pThread->GetExceptionState()->GetUEWatsonBucketTracker()->ClearWatsonBucketDetails();
+#endif // !FEATURE_PAL
+ pPolicy->HandleFatalError(COR_E_CODECONTRACTFAILED, NULL, pMessage);
+ break;
+ }
+}
+
diff --git a/src/vm/eepolicy.h b/src/vm/eepolicy.h
new file mode 100644
index 0000000000..5cbbe294dd
--- /dev/null
+++ b/src/vm/eepolicy.h
@@ -0,0 +1,192 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+// ---------------------------------------------------------------------------
+// EEPolicy.h
+// ---------------------------------------------------------------------------
+
+
+#ifndef EEPOLICY_H_
+#define EEPOLICY_H_
+
+#include "vars.hpp"
+#include "corhost.h"
+#include "ceemain.h"
+
+extern "C" UINT_PTR STDCALL GetCurrentIP();
+
+enum StackOverflowDetector
+{
+ SOD_ManagedFrameHandler,
+ SOD_UnmanagedFrameHandler,
+ SOD_SOTolerantTransitor,
+ SOD_SOIntolerantTransitor,
+};
+
+// EEPolicy maintains actions for resource failure and timeout
+class EEPolicy
+{
+public:
+ enum ThreadAbortTypes
+ {
+ TA_None, // No Abort
+ // Abort at a safe spot: not having any lock, not inside finally, not inside catch
+ TA_Safe,
+ // Do we need this one?
+ TA_V1Compatible,
+ // Do not run user finally, no attention to lock count
+ TA_Rude
+ };
+
+ enum AppDomainUnloadTypes
+ {
+ ADU_Safe,
+ ADU_Rude
+ };
+
+ EEPolicy ();
+
+ HRESULT SetTimeout(EClrOperation operation, DWORD timeout);
+
+ DWORD GetTimeout(EClrOperation operation)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(static_cast<UINT>(operation) < MaxClrOperation);
+ return m_Timeout[operation];
+ }
+
+ HRESULT SetActionOnTimeout(EClrOperation operation, EPolicyAction action);
+ EPolicyAction GetActionOnTimeout(EClrOperation operation, Thread *pThread)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(static_cast<UINT>(operation) < MaxClrOperation);
+ return GetFinalAction(m_ActionOnTimeout[operation], pThread);
+ }
+
+ void NotifyHostOnTimeout(EClrOperation operation, EPolicyAction action);
+
+ HRESULT SetTimeoutAndAction(EClrOperation operation, DWORD timeout, EPolicyAction action);
+
+ HRESULT SetDefaultAction(EClrOperation operation, EPolicyAction action);
+ EPolicyAction GetDefaultAction(EClrOperation operation, Thread *pThread)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(static_cast<UINT>(operation) < MaxClrOperation);
+ return GetFinalAction(m_DefaultAction[operation], pThread);
+ }
+
+ void NotifyHostOnDefaultAction(EClrOperation operation, EPolicyAction action);
+
+ HRESULT SetActionOnFailure(EClrFailure failure, EPolicyAction action);
+
+ // Generally GetActionOnFailure should be used so that a host can get notification.
+ // But if we have notified host on the same failure, but we need to check escalation again,
+ // GetActionOnFailureNoHostNotification can be used.
+ EPolicyAction GetActionOnFailure(EClrFailure failure);
+ EPolicyAction GetActionOnFailureNoHostNotification(EClrFailure failure);
+
+ // get and set unhandled exception policy
+ HRESULT SetUnhandledExceptionPolicy(EClrUnhandledException policy)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (policy != eRuntimeDeterminedPolicy && policy != eHostDeterminedPolicy)
+ {
+ return E_INVALIDARG;
+ }
+ else
+ {
+ m_unhandledExceptionPolicy = policy;
+ return S_OK;
+ }
+ }
+ EClrUnhandledException GetUnhandledExceptionPolicy()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_unhandledExceptionPolicy;
+ }
+
+ static EPolicyAction DetermineResourceConstraintAction(Thread *pThread);
+
+ static void PerformResourceConstraintAction(Thread *pThread, EPolicyAction action, UINT exitCode, BOOL haveStack);
+
+ static void PerformADUnloadAction(EPolicyAction action, BOOL haveStack, BOOL forStackOverflow = FALSE);
+
+ static void HandleOutOfMemory();
+
+ static void HandleStackOverflow(StackOverflowDetector detector, void * pLimitFrame);
+
+ static void HandleSoftStackOverflow(BOOL fSkipDebugger = FALSE);
+
+ static void HandleStackOverflowAfterCatch();
+
+ static void HandleExitProcess(ShutdownCompleteAction sca = SCA_ExitProcessWhenShutdownComplete);
+
+ static void DECLSPEC_NORETURN HandleFatalError(UINT exitCode, UINT_PTR address, LPCWSTR pMessage=NULL, PEXCEPTION_POINTERS pExceptionInfo= NULL);
+
+ static void DECLSPEC_NORETURN HandleFatalStackOverflow(EXCEPTION_POINTERS *pException, BOOL fSkipDebugger = FALSE);
+
+ static void HandleExitProcessFromEscalation(EPolicyAction action, UINT exitCode);
+
+ static void HandleCodeContractFailure(LPCWSTR pMessage, LPCWSTR pCondition, LPCWSTR pInnerExceptionAsString);
+
+private:
+ DWORD m_Timeout[MaxClrOperation];
+ EPolicyAction m_ActionOnTimeout[MaxClrOperation];
+ EPolicyAction m_DefaultAction[MaxClrOperation];
+ EPolicyAction m_ActionOnFailure[MaxClrFailure];
+ EClrUnhandledException m_unhandledExceptionPolicy;
+
+ // TODO: Support multiple methods to set policy: hosting, config, managed api.
+
+ // Return BOOL if action is acceptable for operation.
+ BOOL IsValidActionForOperation(EClrOperation operation, EPolicyAction action);
+ BOOL IsValidActionForTimeout(EClrOperation operation, EPolicyAction action);
+ BOOL IsValidActionForFailure(EClrFailure failure, EPolicyAction action);
+ EPolicyAction GetFinalAction(EPolicyAction action, Thread *pThread);
+
+ static void LogFatalError(UINT exitCode, UINT_PTR address, LPCWSTR pMessage, PEXCEPTION_POINTERS pExceptionInfo);
+
+ // IMPORTANT NOTE: only the following two functions should be calling ExitProcessViaShim.
+ // - CorHost2::ExitProcess
+ // - SafeExitProcess
+ friend class CorHost2;
+ friend void SafeExitProcess(UINT , BOOL , ShutdownCompleteAction);
+
+ static void ExitProcessViaShim(UINT exitCode);
+};
+
+void InitEEPolicy();
+
+extern BYTE g_EEPolicyInstance[];
+
+inline EEPolicy* GetEEPolicy()
+{
+ return (EEPolicy*)&g_EEPolicyInstance;
+}
+
+extern void FinalizerThreadAbortOnTimeout();
+extern ULONGLONG GetObjFinalizeStartTime();
+
+//
+// Use EEPOLICY_HANDLE_FATAL_ERROR when you have a situtation where the Runtime's internal state would be
+// inconsistent if execution were allowed to continue. This will apply the proper host's policy for fatal
+// errors. Note: this call will never return.
+//
+// NOTE: make sure to use the macro instead of claling EEPolicy::HandleFatalError directly. The macro grabs the IP
+// of where you are calling this from, so we can log it to help when debugging these failures later.
+//
+
+// FailFast with specific error code
+#define EEPOLICY_HANDLE_FATAL_ERROR(_exitcode) EEPolicy::HandleFatalError(_exitcode, GetCurrentIP());
+
+// FailFast with specific error code and message (LPWSTR)
+#define EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(_exitcode, _message) EEPolicy::HandleFatalError(_exitcode, GetCurrentIP(), _message);
+
+// FailFast with specific error code and exception details
+#define EEPOLICY_HANDLE_FATAL_ERROR_USING_EXCEPTION_INFO(_exitcode, _pExceptionInfo) EEPolicy::HandleFatalError(_exitcode, GetCurrentIP(), NULL, _pExceptionInfo);
+
+#endif // EEPOLICY_H_
diff --git a/src/vm/eeprofinterfaces.h b/src/vm/eeprofinterfaces.h
new file mode 100644
index 0000000000..0cdffd1482
--- /dev/null
+++ b/src/vm/eeprofinterfaces.h
@@ -0,0 +1,67 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// EEProfInterfaces.h
+//
+
+//
+// Common types used internally in the EE to support issuing profiling API callbacks
+//
+
+// ======================================================================================
+
+#ifndef _EEPROFINTERFACES_H_
+#define _EEPROFINTERFACES_H_
+
+#include <stddef.h>
+#include "corprof.h"
+#include "profilepriv.h"
+
+#define PROF_USER_MASK 0xFFFFFFFF
+
+class EEToProfInterfaceImpl;
+class ProfToEEInterfaceImpl;
+class Thread;
+class Frame;
+class MethodDesc;
+class Object;
+class Module;
+
+// This file defines the _internal_ interface between the EE and the
+// implementation of the COM profiling API. The _external_ API is defined
+// in inc/corprof.idl.
+//
+// Most IDs used by the _external_ API are just the pointer values
+// of the corresponding CLR data structure.
+//
+
+/*
+ * The following methods dispatch allocations tracking to the profiler as
+ * well as the method table reordering codes (as appropriate).
+ */
+
+void __stdcall ProfilerObjectAllocatedCallback(OBJECTREF objref, ClassID classId);
+
+void __stdcall GarbageCollectionStartedCallback(int generation, BOOL induced);
+
+void __stdcall GarbageCollectionFinishedCallback();
+
+void __stdcall UpdateGenerationBounds();
+#include "eetoprofinterfaceimpl.h"
+
+
+enum PTR_TYPE
+{
+ PT_MODULE,
+ PT_ASSEMBLY,
+};
+
+void __stdcall ProfilerManagedToUnmanagedTransitionMD(MethodDesc * pMD,
+ COR_PRF_TRANSITION_REASON reason);
+
+void __stdcall ProfilerUnmanagedToManagedTransitionMD(MethodDesc * pMD,
+ COR_PRF_TRANSITION_REASON reason);
+
+#endif //_EEPROFINTERFACES_H_
diff --git a/src/vm/eeprofinterfaces.inl b/src/vm/eeprofinterfaces.inl
new file mode 100644
index 0000000000..d60617e365
--- /dev/null
+++ b/src/vm/eeprofinterfaces.inl
@@ -0,0 +1,28 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// EEProfInterfaces.inl
+//
+
+//
+// Inline function implementations for common types used internally in the EE to support
+// issuing profiling API callbacks
+//
+
+// ======================================================================================
+
+#ifndef DACCESS_COMPILE
+
+FORCEINLINE BOOL TrackAllocations()
+{
+#ifdef PROFILING_SUPPORTED
+ return CORProfilerTrackAllocations();
+#else
+ return FALSE;
+#endif // PROFILING_SUPPORTED
+}
+
+
+#endif
diff --git a/src/vm/eetoprofinterfaceimpl.cpp b/src/vm/eetoprofinterfaceimpl.cpp
new file mode 100644
index 0000000000..bcb70f22f3
--- /dev/null
+++ b/src/vm/eetoprofinterfaceimpl.cpp
@@ -0,0 +1,6249 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// EEToProfInterfaceImpl.cpp
+//
+
+//
+// This module implements wrappers around calling the profiler's
+// ICorProfilerCallaback* interfaces. When code in the EE needs to call the
+// profiler, it goes through EEToProfInterfaceImpl to do so.
+//
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+// NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE!
+//
+// PLEASE READ!
+//
+// There are strict rules for how to implement ICorProfilerCallback* wrappers. Please read
+// http://devdiv/sites/CLR/ProfilingAPI/Shared%20Documents/ImplementingProfilability.doc
+// to understand the rules and why they exist.
+//
+// As a reminder, here is a short summary of your responsibilities. Every PUBLIC
+// ENTRYPOINT (from EE to profiler) must have:
+//
+// - An entrypoint macro at the top. Your choices are:
+// CLR_TO_PROFILER_ENTRYPOINT (typical choice)
+// This is used for calling ICorProfilerCallback* methods that either have no
+// ThreadID parameters, or if they do have a ThreadID parameter, the parameter's
+// value is always the *current* ThreadID (i.e., param == GetThread()). This will
+// also force a mode switch to preemptive before calling the profiler.
+// CLR_TO_PROFILER_ENTRYPOINT_FOR_THREAD
+// Similar to above, except these are used for ICorProfilerCallback* methods that
+// specify a ThreadID parameter whose value may not always be the *current*
+// ThreadID. You must specify the ThreadID as the first parameter to these
+// macros. The macro will then use your ThreadID rather than that of the current
+// GetThread(), to assert that the callback is currently allowed for that
+// ThreadID (i.e., that we have not yet issued a ThreadDestroyed() for that
+// ThreadID).
+//
+// - A complete contract block with comments over every contract choice. Wherever
+// possible, use the preferred contracts (if not possible, you must comment why):
+// NOTHROW
+// All callbacks are really NOTHROW, but that's enforced partially by
+// the profiler, whose try/catch blocks aren't visible to the
+// contract system. So you'll need to put a scoped
+// PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout)
+// around the call to the profiler
+// GC_TRIGGERS
+// MODE_PREEMPTIVE (MODE_COOPERATIVE if passing an ObjectID)
+// If you use MODE_ANY, you must comment why you don't want an exact mode.
+// CAN_TAKE_LOCK
+// ASSERT_NO_EE_LOCKS_HELD()
+// SO_NOT_MAINLINE
+// Note that the preferred contracts in this file are DIFFERENT than the preferred
+// contracts for proftoeeinterfaceimpl.cpp.
+//
+// Private helper functions in this file do not have the same preferred contracts as
+// public entrypoints, and they should be contracted following the same guidelines
+// as per the rest of the EE.
+//
+// NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE!
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+//
+
+// ======================================================================================
+
+#include "common.h"
+
+#ifdef PROFILING_SUPPORTED
+
+
+#include "eetoprofinterfaceimpl.h"
+#include "eetoprofinterfaceimpl.inl"
+#include "contract.h"
+#include "proftoeeinterfaceimpl.h"
+#include "proftoeeinterfaceimpl.inl"
+#include "profilinghelper.inl"
+#include "profdetach.h"
+#include "simplerwlock.hpp"
+#include "eeconfig.h"
+
+//---------------------------------------------------------------------------------------
+// Helpers
+
+// Bitmask of flags that may be passed to the CLR_TO_PROFILER_ENTRYPOINT* macros
+// to constrain when the callback may be issued
+enum ClrToProfEntrypointFlags
+{
+ // Default
+ kEE2PNone = 0x00000000,
+
+ // Callback is allowable even for detaching profilers
+ kEE2PAllowableWhileDetaching = 0x00000001,
+
+ // Callback is allowable even for initializing profilers
+ kEE2PAllowableWhileInitializing = 0x00000002,
+
+ // Callback is made while in a GC_NOTRIGGER contract. Whereas contracts are
+ // debug-only, this flag is used in retail builds as well.
+ kEE2PNoTrigger = 0x00000004,
+};
+
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+#define ASSERT_EVAC_COUNTER_NONZERO() \
+ _ASSERTE((GetThreadNULLOk() == NULL) || \
+ (GetThreadNULLOk()->GetProfilerEvacuationCounter() != 0U))
+#else // FEATURE_PROFAPI_ATTACH_DETACH
+#define ASSERT_EVAC_COUNTER_NONZERO()
+#endif // FEATURE_PROFAPI_ATTACH_DETACH
+
+#define CHECK_PROFILER_STATUS(ee2pFlags) \
+ /* If one of these asserts fires, perhaps you forgot to use */ \
+ /* BEGIN/END_PIN_PROFILER */ \
+ ASSERT_EVAC_COUNTER_NONZERO(); \
+ _ASSERTE(g_profControlBlock.pProfInterface.Load() != NULL); \
+ _ASSERTE(g_profControlBlock.pProfInterface == this); \
+ /* Early abort if... */ \
+ if ( \
+ /* Profiler isn't active, */ \
+ !CORProfilerPresent() && \
+ \
+ /* and it's not the case that both a) this callback is allowed */ \
+ /* on a detaching profiler, and b) the profiler is detaching */ \
+ !( \
+ (((ee2pFlags) & kEE2PAllowableWhileDetaching) != 0) && \
+ (g_profControlBlock.curProfStatus.Get() == kProfStatusDetaching) \
+ ) && \
+ \
+ /* and it's not the case that both a) this callback is allowed */ \
+ /* on an initializing profiler, and b) the profiler is initializing */ \
+ !( \
+ (((ee2pFlags) & kEE2PAllowableWhileInitializing) != 0) && \
+ ( \
+ (g_profControlBlock.curProfStatus.Get() \
+ == kProfStatusInitializingForStartupLoad) || \
+ (g_profControlBlock.curProfStatus.Get() \
+ == kProfStatusInitializingForAttachLoad) \
+ ) \
+ ) \
+ ) \
+ { \
+ return S_OK; \
+ }
+
+// Least common denominator for the callback wrappers. Logs, removes stack
+// guard (REMOVE_STACK_GUARD_FOR_PROFILER_CALL), records in EE Thread object that
+// we're in a callback, and asserts that we're allowed to issue callbacks for the
+// specified ThreadID (i.e., no ThreadDestroyed callback has been issued for the
+// ThreadID).
+//
+#define CLR_TO_PROFILER_ENTRYPOINT_FOR_THREAD_EX(ee2pFlags, threadId, logParams) \
+ INCONTRACT(AssertTriggersContract(!((ee2pFlags) & kEE2PNoTrigger))); \
+ CHECK_PROFILER_STATUS(ee2pFlags); \
+ LOG(logParams); \
+ _ASSERTE(m_pCallback2 != NULL); \
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL; \
+ /* Normally, set COR_PRF_CALLBACKSTATE_INCALLBACK | */ \
+ /* COR_PRF_CALLBACKSTATE_IN_TRIGGERS_SCOPE in the callback state, but omit */ \
+ /* COR_PRF_CALLBACKSTATE_IN_TRIGGERS_SCOPE if we're in a GC_NOTRIGGERS callback */ \
+ SetCallbackStateFlagsHolder __csf( \
+ (((ee2pFlags) & kEE2PNoTrigger) != 0) ? \
+ COR_PRF_CALLBACKSTATE_INCALLBACK : \
+ COR_PRF_CALLBACKSTATE_INCALLBACK | COR_PRF_CALLBACKSTATE_IN_TRIGGERS_SCOPE \
+ ); \
+ _ASSERTE(ProfilerCallbacksAllowedForThread((Thread *) (threadId)))
+
+#define CLR_TO_PROFILER_ENTRYPOINT_EX(ee2pFlags, logParams) \
+ CLR_TO_PROFILER_ENTRYPOINT_FOR_THREAD_EX(ee2pFlags, GetThreadNULLOk(), logParams)
+
+// Typical entrypoint macro you'll use. Checks that we're allowed to issue
+// callbacks for the current thread (i.e., no ThreadDestroyed callback has been
+// issued for the current thread).
+#define CLR_TO_PROFILER_ENTRYPOINT(logParams) \
+ CLR_TO_PROFILER_ENTRYPOINT_EX(kEE2PNone, logParams)
+#define CLR_TO_PROFILER_ENTRYPOINT_FOR_THREAD(threadId, logParams) \
+ CLR_TO_PROFILER_ENTRYPOINT_FOR_THREAD_EX(kEE2PNone, threadId, logParams)
+
+
+//---------------------------------------------------------------------------------------
+//
+// Wrapper around Thread::ProfilerCallbacksAllowed
+//
+// Arguments:
+// pThread - Thread on which we need to determine whether callbacks are allowed
+//
+// Return Value:
+// TRUE if the profiler portion has marked this thread as allowable, else FALSE.
+//
+
+inline BOOL ProfilerCallbacksAllowedForThread(Thread * pThread)
+{
+ WRAPPER_NO_CONTRACT;
+ return ((pThread == NULL) || (pThread->ProfilerCallbacksAllowed()));
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Wrapper around Thread::SetProfilerCallbacksAllowed
+//
+// Arguments:
+// pThread - Thread on which we're setting whether callbacks shall be allowed
+// fValue - The value to store.
+//
+
+inline void SetProfilerCallbacksAllowedForThread(Thread * pThread, BOOL fValue)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(pThread != NULL);
+ pThread->SetProfilerCallbacksAllowed(fValue);
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Low-level function to find and CoCreateInstance the profiler's DLL. Called when
+// initializing via EEToProfInterfaceImpl::Init()
+//
+// Arguments:
+// * pClsid - [in] Profiler's CLSID
+// * wszClsid - [in] String form of CLSID or progid of profiler to load.
+// * wszProfileDLL - [in] Path to profiler DLL
+// * ppCallback - [out] Pointer to profiler's ICorProfilerCallback2 interface
+// * phmodProfilerDLL - [out] HMODULE of profiler's DLL.
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+// Notes:
+// * This function (or one of its callees) will log an error to the event log if
+// there is a failure
+
+static HRESULT CoCreateProfiler(
+ const CLSID * pClsid,
+ __in_z LPCWSTR wszClsid,
+ __in_z LPCWSTR wszProfileDLL,
+ ICorProfilerCallback2 ** ppCallback,
+ HMODULE * phmodProfilerDLL)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ // This causes events to be logged, which loads resource strings,
+ // which takes locks.
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ } CONTRACTL_END;
+
+ _ASSERTE(pClsid != NULL);
+ _ASSERTE(wszClsid != NULL);
+ _ASSERTE(ppCallback != NULL);
+ _ASSERTE(phmodProfilerDLL != NULL);
+
+ LOG((LF_CORPROF, LL_INFO10, "**PROF: Entered CoCreateProfiler.\n"));
+
+ HRESULT hr;
+ *phmodProfilerDLL = NULL;
+
+ // This is the ICorProfilerCallback2 ptr we get back from the profiler's class
+ // factory's CreateInstance()
+ ReleaseHolder<ICorProfilerCallback2> pCallback2FromCreateInstance;
+
+ // This is the ICorProfilerCallback2 ptr we get back from the profiler's QI (see its
+ // first use below for an explanation on why this is necessary).
+ ReleaseHolder<ICorProfilerCallback2> pCallback2FromQI;
+
+ // Create an instance of the profiler
+ hr = FakeCoCreateInstanceEx(*pClsid,
+ wszProfileDLL,
+ IID_ICorProfilerCallback2,
+ (LPVOID *) &pCallback2FromCreateInstance,
+ phmodProfilerDLL);
+
+ // (pCallback2FromCreateInstance == NULL) should be considered an error!
+ if ((pCallback2FromCreateInstance == NULL) && SUCCEEDED(hr))
+ {
+ hr = E_NOINTERFACE;
+ }
+
+ if (hr == E_NOINTERFACE)
+ {
+ // Helpful message for a potentially common problem
+ ProfilingAPIUtility::LogNoInterfaceError(IID_ICorProfilerCallback2, wszClsid);
+ }
+ else if (hr == CORPROF_E_PROFILER_CANCEL_ACTIVATION)
+ {
+ // Profiler didn't encounter a bad error, but is voluntarily choosing not to
+ // profile this runtime. Profilers that need to set system environment
+ // variables to be able to profile services may use this HRESULT to avoid
+ // profiling all the other managed apps on the box.
+ ProfilingAPIUtility::LogProfInfo(IDS_PROF_CANCEL_ACTIVATION, wszClsid);
+ }
+ else if (FAILED(hr))
+ {
+ // Catch-all error for other CoCreateInstance failures
+ ProfilingAPIUtility::LogProfError(IDS_E_PROF_CCI_FAILED, wszClsid, hr);
+ }
+
+ // Now that hr is normalized (set to error if pCallback2FromCreateInstance == NULL),
+ // LOG and abort if there was a problem.
+ if (FAILED(hr))
+ {
+ LOG((
+ LF_CORPROF,
+ LL_INFO10,
+ "**PROF: Unable to CoCreateInstance profiler class %S. hr=0x%x.\n",
+ wszClsid,
+ hr));
+ return hr;
+ }
+
+ // Redundantly QI for ICorProfilerCallback2. This keeps CLR behavior consistent
+ // with Whidbey, and works around the following bug in some profilers' class factory
+ // CreateInstance:
+ // * CreateInstance() ignores the IID it's given
+ // * CreateInstance() returns a pointer to the object it created, even though
+ // that object might not support the IID passed to CreateInstance().
+ // Whidbey CLR worked around this problem by redundantly QI'ing for the same IID
+ // again after CreateInstance() returned. In this redudant QI, the profiler code would
+ // finally realize it didn't support that IID, and return an error there. Without
+ // the redundant QI, the CLR would accept what it got from CreateInstance(), and
+ // start calling into it using the unsupported interface's vtable, which would
+ // cause an AV.
+ //
+ // There were many MSDN samples (for example
+ // http://msdn.microsoft.com/msdnmag/issues/03/01/NETProfilerAPI/) which
+ // unfortunately had this CreateInstance() bug, so many profilers might have been
+ // generated based on this code. Since it's easy & cheap to work around the
+ // problem, we do so here with the redundant QI.
+ hr = pCallback2FromCreateInstance->QueryInterface(
+ IID_ICorProfilerCallback2,
+ (LPVOID *) &pCallback2FromQI);
+
+ // (pCallback2FromQI == NULL) should be considered an error!
+ if ((pCallback2FromQI == NULL) && SUCCEEDED(hr))
+ {
+ hr = E_NOINTERFACE;
+ }
+
+ // Any error at this stage implies IID_ICorProfilerCallback2 is not supported
+ if (FAILED(hr))
+ {
+ // Helpful message for a potentially common problem
+ ProfilingAPIUtility::LogNoInterfaceError(IID_ICorProfilerCallback2, wszClsid);
+ return hr;
+ }
+
+ // Ok, safe to transfer ownership to caller's [out] param
+ *ppCallback = pCallback2FromQI.Extract();
+ pCallback2FromQI = NULL;
+
+ return S_OK;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Implementation of CHashTableImpl functions. This class a simple implementation of
+// CHashTable to provide a very simple implementation of the Cmp pure virtual function
+//
+
+EEToProfInterfaceImpl::CHashTableImpl::CHashTableImpl(ULONG iBuckets)
+ : CHashTable(iBuckets)
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Comparison function for hash table of ClassIDs
+//
+// Arguments:
+// pc1 - hash key to compare
+// pc2 - hash value to compare
+//
+// Return Value:
+// TRUE if the key & value refer to the same ClassID; otherwise FALSE
+//
+
+BOOL EEToProfInterfaceImpl::CHashTableImpl::Cmp(SIZE_T k1, const HASHENTRY * pc2)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ ClassID key = (ClassID) k1;
+ ClassID val = ((CLASSHASHENTRY *)pc2)->m_clsId;
+
+ return (key != val);
+}
+
+
+//---------------------------------------------------------------------------------------
+// Private maintenance functions for initialization, cleanup, etc.
+
+EEToProfInterfaceImpl::AllocByClassData *EEToProfInterfaceImpl::m_pSavedAllocDataBlock = NULL;
+
+//---------------------------------------------------------------------------------------
+//
+// EEToProfInterfaceImpl ctor just sets initial values
+//
+
+EEToProfInterfaceImpl::EEToProfInterfaceImpl() :
+ m_pCallback2(NULL),
+ m_pCallback3(NULL),
+ m_pCallback4(NULL),
+ m_pCallback5(NULL),
+ m_pCallback6(NULL),
+ m_hmodProfilerDLL(NULL),
+ m_fLoadedViaAttach(FALSE),
+ m_pProfToEE(NULL),
+ m_pProfilersFuncIDMapper(NULL),
+ m_pProfilersFuncIDMapper2(NULL),
+ m_pProfilersFuncIDMapper2ClientData(NULL),
+ m_GUID(k_guidZero),
+ m_lGUIDCount(0),
+ m_pGCRefDataFreeList(NULL),
+ m_csGCRefDataFreeList(NULL),
+ m_pEnter(NULL),
+ m_pLeave(NULL),
+ m_pTailcall(NULL),
+ m_pEnter2(NULL),
+ m_pLeave2(NULL),
+ m_pTailcall2(NULL),
+ m_fIsClientIDToFunctionIDMappingEnabled(TRUE),
+ m_pEnter3(NULL),
+ m_pLeave3(NULL),
+ m_pTailcall3(NULL),
+ m_pEnter3WithInfo(NULL),
+ m_pLeave3WithInfo(NULL),
+ m_pTailcall3WithInfo(NULL),
+ m_fUnrevertiblyModifiedIL(FALSE),
+ m_pFunctionIDHashTable(NULL),
+ m_pFunctionIDHashTableRWLock(NULL),
+ m_dwConcurrentGCWaitTimeoutInMs(INFINITE),
+ m_bHasTimedOutWaitingForConcurrentGC(FALSE)
+{
+ // Also NULL out this static. (Note: consider making this a member variable.)
+ m_pSavedAllocDataBlock = NULL;
+ LIMITED_METHOD_CONTRACT;
+}
+
+//
+//---------------------------------------------------------------------------------------
+//
+// Post-constructor initialization of EEToProfInterfaceImpl. Sets everything up,
+// including creating the profiler.
+//
+// Parameters:
+// * pProfToEE - A newly-created ProfToEEInterfaceImpl instance that will be passed
+// to the profiler as the ICorProfilerInfo3 interface implementation.
+// * pClsid - Profiler's CLSID
+// * wszClsid - String form of CLSID or progid of profiler to load
+// * wszProfileDLL - Path to profiler DLL
+// * fLoadedViaAttach - TRUE iff the profiler is being attach-loaded (else
+// profiler is being startup-loaded)
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+// Notes:
+// This function (or one of its callees) will log an error to the event log if there
+// is a failure
+//
+
+
+HRESULT EEToProfInterfaceImpl::Init(
+ ProfToEEInterfaceImpl * pProfToEE,
+ const CLSID * pClsid,
+ __in_z LPCWSTR wszClsid,
+ __in_z LPCWSTR wszProfileDLL,
+ BOOL fLoadedViaAttach,
+ DWORD dwConcurrentGCWaitTimeoutInMs)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ // This causes events to be logged, which loads resource strings,
+ // which takes locks.
+ CAN_TAKE_LOCK;
+
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_UNEXPECTED;
+
+ _ASSERTE(pProfToEE != NULL);
+
+ m_fLoadedViaAttach = fLoadedViaAttach;
+ m_dwConcurrentGCWaitTimeoutInMs = dwConcurrentGCWaitTimeoutInMs;
+
+ // The rule sez your Crst should switch to preemptive when it's taken. We intentionally
+ // break this rule with CRST_UNSAFE_ANYMODE, because this Crst is taken DURING A GC
+ // (see AllocateMovedReferencesData(), called by MovedReference(), called by the GC),
+ // and we don't want to be switching modes in the middle of a GC! Indeed, on server there
+ // may not even be a mode in the first place.
+ CRITSEC_AllocationHolder csGCRefDataFreeList(ClrCreateCriticalSection(CrstProfilerGCRefDataFreeList, CRST_UNSAFE_ANYMODE));
+ if (csGCRefDataFreeList == NULL)
+ {
+ LOG((LF_CORPROF,
+ LL_ERROR,
+ "**PROF: Failed to create Crst during initialization.\n"));
+
+ // A specialized event log entry for this failure would be confusing and
+ // unhelpful. So just log a generic internal failure event
+ ProfilingAPIUtility::LogProfError(IDS_E_PROF_INTERNAL_INIT, wszClsid, E_FAIL);
+ return E_FAIL;
+ }
+
+ // CEEInfo::GetProfilingHandle will be PREEMPTIVE mode when trying to update
+ // m_pFunctionIDHashTable while ProfileEnter, ProfileLeave and ProfileTailcall
+ // and LookupClientIDFromCache all will be in COOPERATIVE mode when trying
+ // to read m_pFunctionIDHashTable, so pFunctionIDHashTableRWLock must be created
+ // with COOPERATIVE_OR_PREEMPTIVE. It is safe to so do because FunctionIDHashTable,
+ // synchronized by m_pFunctionIDHashTableRWLock runs only native code and uses
+ // only native heap.
+ NewHolder<SimpleRWLock> pFunctionIDHashTableRWLock(new (nothrow) SimpleRWLock(COOPERATIVE_OR_PREEMPTIVE, LOCK_TYPE_DEFAULT));
+
+ NewHolder<FunctionIDHashTable> pFunctionIDHashTable(new (nothrow) FunctionIDHashTable());
+
+ if ((pFunctionIDHashTable == NULL) || (pFunctionIDHashTableRWLock == NULL))
+ {
+ LOG((LF_CORPROF,
+ LL_ERROR,
+ "**PROF: Failed to create FunctionIDHashTable or FunctionIDHashTableRWLock during initialization.\n"));
+
+ // A specialized event log entry for this failure would be confusing and
+ // unhelpful. So just log a generic internal failure event
+ ProfilingAPIUtility::LogProfError(IDS_E_PROF_INTERNAL_INIT, wszClsid, E_OUTOFMEMORY);
+
+ return E_OUTOFMEMORY;
+ }
+
+ // This wraps the following profiler calls in a try / catch:
+ // * ClassFactory::CreateInstance
+ // * AddRef/Release/QueryInterface
+ // Although most profiler calls are not protected, these creation calls are
+ // protected here since it's cheap to do so (this is only done once per load of a
+ // profiler), and it would be nice to avoid tearing down the entire process when
+ // attaching a profiler that may pass back bogus vtables.
+ EX_TRY
+ {
+ // CoCreate the profiler (but don't call its Initialize() method yet)
+ hr = CreateProfiler(pClsid, wszClsid, wszProfileDLL);
+ }
+ EX_CATCH
+ {
+ hr = E_UNEXPECTED;
+ ProfilingAPIUtility::LogProfError(IDS_E_PROF_UNHANDLED_EXCEPTION_ON_LOAD, wszClsid);
+ }
+ // Intentionally swallowing all exceptions, as we don't want a poorly-written
+ // profiler that throws or AVs on attach to cause the entire process to go away.
+ EX_END_CATCH(SwallowAllExceptions);
+
+
+ if (FAILED(hr))
+ {
+ // CreateProfiler (or catch clause above) has already logged an event to the
+ // event log on failure
+ return hr;
+ }
+
+ m_pProfToEE = pProfToEE;
+
+ m_csGCRefDataFreeList = csGCRefDataFreeList.Extract();
+ csGCRefDataFreeList = NULL;
+
+ m_pFunctionIDHashTable = pFunctionIDHashTable.Extract();
+ pFunctionIDHashTable = NULL;
+
+ m_pFunctionIDHashTableRWLock = pFunctionIDHashTableRWLock.Extract();
+ pFunctionIDHashTableRWLock = NULL;
+
+ return S_OK;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// This is used by Init() to load the user-specified profiler (but not to call
+// its Initialize() method).
+//
+// Arguments:
+// pClsid - Profiler's CLSID
+// wszClsid - String form of CLSID or progid of profiler to load
+// wszProfileDLL - Path to profiler DLL
+//
+// Return Value:
+// HRESULT indicating success / failure. If this is successful, m_pCallback2 will be
+// set to the profiler's ICorProfilerCallback2 interface on return. m_pCallback3,4
+// will be set to the profiler's ICorProfilerCallback3 interface on return if
+// ICorProfilerCallback3,4 is supported.
+//
+// Assumptions:
+// Although the profiler has not yet been instantiated, it is assumed that the internal
+// profiling API structures have already been created
+//
+// Notes:
+// This function (or one of its callees) will log an error to the event log
+// if there is a failure
+
+HRESULT EEToProfInterfaceImpl::CreateProfiler(
+ const CLSID * pClsid,
+ __in_z LPCWSTR wszClsid,
+ __in_z LPCWSTR wszProfileDLL)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ // This causes events to be logged, which loads resource strings,
+ // which takes locks.
+ CAN_TAKE_LOCK;
+
+ MODE_PREEMPTIVE;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ // Always called before Thread created.
+ _ASSERTE(GetThreadNULLOk() == NULL);
+
+ // We'll be calling into the profiler to create its ICorProfilerCallback*
+ // implementation
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+
+ // Try and CoCreate the registered profiler
+ ReleaseHolder<ICorProfilerCallback2> pCallback2;
+ HModuleHolder hmodProfilerDLL;
+ HRESULT hr = CoCreateProfiler(
+ pClsid,
+ wszClsid,
+ wszProfileDLL,
+ &pCallback2,
+ &hmodProfilerDLL);
+ if (FAILED(hr))
+ {
+ // CoCreateProfiler logs events to the event log on failures
+ return hr;
+ }
+
+ // CoCreateProfiler ensures that if it succeeds, we get some valid pointers
+ _ASSERTE(pCallback2 != NULL);
+ _ASSERTE(hmodProfilerDLL != NULL);
+
+ // Save profiler pointers into this. The reference ownership now
+ // belongs to this class, so NULL out locals without allowing them to release
+ m_pCallback2 = pCallback2.Extract();
+ pCallback2 = NULL;
+ m_hmodProfilerDLL = hmodProfilerDLL.Extract();
+ hmodProfilerDLL = NULL;
+
+ // The profiler may optionally support ICorProfilerCallback3,4,5,6. Let's check.
+
+ ReleaseHolder<ICorProfilerCallback6> pCallback6;
+ hr = m_pCallback2->QueryInterface(
+ IID_ICorProfilerCallback6,
+ (LPVOID *) &pCallback6);
+ if (SUCCEEDED(hr) && (pCallback6 != NULL))
+ {
+ // Nifty. Transfer ownership to this class
+ _ASSERTE(m_pCallback6 == NULL);
+ m_pCallback6 = pCallback6.Extract();
+ pCallback6 = NULL;
+
+ // And while we're at it, we must now also have an ICorProfilerCallback3,4,5
+ // due to inheritance relationship of the interfaces
+
+ _ASSERTE(m_pCallback5 == NULL);
+ m_pCallback5 = static_cast<ICorProfilerCallback5 *>(m_pCallback6);
+ m_pCallback5->AddRef();
+
+ _ASSERTE(m_pCallback4 == NULL);
+ m_pCallback4 = static_cast<ICorProfilerCallback4 *>(m_pCallback5);
+ m_pCallback4->AddRef();
+
+ _ASSERTE(m_pCallback3 == NULL);
+ m_pCallback3 = static_cast<ICorProfilerCallback3 *>(m_pCallback4);
+ m_pCallback3->AddRef();
+ }
+
+ if (m_pCallback5 == NULL)
+ {
+ ReleaseHolder<ICorProfilerCallback5> pCallback5;
+ hr = m_pCallback2->QueryInterface(
+ IID_ICorProfilerCallback5,
+ (LPVOID *) &pCallback5);
+ if (SUCCEEDED(hr) && (pCallback5 != NULL))
+ {
+ // Nifty. Transfer ownership to this class
+ _ASSERTE(m_pCallback5 == NULL);
+ m_pCallback5 = pCallback5.Extract();
+ pCallback5 = NULL;
+
+ // And while we're at it, we must now also have an ICorProfilerCallback3, and
+ // ICorProfilerCallback4 due to inheritance relationship of the interfaces
+ _ASSERTE(m_pCallback4 == NULL);
+ m_pCallback4 = static_cast<ICorProfilerCallback4 *>(m_pCallback5);
+ m_pCallback4->AddRef();
+
+ _ASSERTE(m_pCallback3 == NULL);
+ m_pCallback3 = static_cast<ICorProfilerCallback3 *>(m_pCallback4);
+ m_pCallback3->AddRef();
+ }
+ }
+
+ if (m_pCallback4 == NULL)
+ {
+ ReleaseHolder<ICorProfilerCallback4> pCallback4;
+ hr = m_pCallback2->QueryInterface(
+ IID_ICorProfilerCallback4,
+ (LPVOID *) &pCallback4);
+ if (SUCCEEDED(hr) && (pCallback4 != NULL))
+ {
+ // Nifty. Transfer ownership to this class
+ _ASSERTE(m_pCallback4 == NULL);
+ m_pCallback4 = pCallback4.Extract();
+ pCallback4 = NULL;
+
+ // And while we're at it, we must now also have an ICorProfilerCallback3, and
+ // due to inheritance relationship of the interfaces
+ _ASSERTE(m_pCallback3 == NULL);
+ m_pCallback3 = static_cast<ICorProfilerCallback3 *>(m_pCallback4);
+ m_pCallback3->AddRef();
+ }
+ }
+
+ if (m_pCallback3 == NULL)
+ {
+ ReleaseHolder<ICorProfilerCallback3> pCallback3;
+ hr = m_pCallback2->QueryInterface(
+ IID_ICorProfilerCallback3,
+ (LPVOID *) &pCallback3);
+ if (SUCCEEDED(hr) && (pCallback3 != NULL))
+ {
+ // Nifty. Transfer ownership to this class
+ _ASSERTE(m_pCallback3 == NULL);
+ m_pCallback3 = pCallback3.Extract();
+ pCallback3 = NULL;
+ }
+ }
+
+ return S_OK;
+}
+
+
+
+
+//---------------------------------------------------------------------------------------
+//
+// Performs cleanup for EEToProfInterfaceImpl, including releasing the profiler's
+// callback interface. Called on termination of a profiler connection.
+//
+
+EEToProfInterfaceImpl::~EEToProfInterfaceImpl()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+
+ // When we release the profiler's callback interface
+ // below, it may well perform cleanup that takes locks.
+ // Example: profiler may release a metadata interface, which
+ // causes it to take a reader lock
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ // Make sure there's no pointer about to dangle once we disappear.
+ // FUTURE: For reattach-with-neutered-profilers feature crew, change this assert to
+ // scan through list of detaching profilers to make sure none of them give a
+ // GetEEToProfPtr() equal to this
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+ _ASSERTE(ProfilingAPIDetach::GetEEToProfPtr() == NULL);
+#endif // FEATURE_PROFAPI_ATTACH_DETACH
+
+ // Release user-specified profiler DLL
+ // NOTE: If we're tearing down the process, then do nothing related
+ // to cleaning up the profiler DLL, as the DLL may no longer
+ // be present.
+ if (!IsAtProcessExit())
+ {
+ if (m_pCallback2 != NULL)
+ {
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+ m_pCallback2->Release();
+ m_pCallback2 = NULL;
+ }
+
+ BOOL fIsV4Profiler = (m_pCallback3 != NULL);
+
+ if (fIsV4Profiler)
+ {
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+ m_pCallback3->Release();
+ m_pCallback3 = NULL;
+ }
+
+ if (m_pCallback4 != NULL)
+ {
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+ m_pCallback4->Release();
+ m_pCallback4 = NULL;
+ }
+
+ if (m_pCallback5 != NULL)
+ {
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+ m_pCallback5->Release();
+ m_pCallback5 = NULL;
+ }
+
+ if (m_pCallback6 != NULL)
+ {
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+ m_pCallback6->Release();
+ m_pCallback6 = NULL;
+ }
+
+ // Only unload the V4 profiler if this is not part of shutdown. This protects
+ // Whidbey profilers that aren't used to being FreeLibrary'd.
+ if (fIsV4Profiler && !g_fEEShutDown)
+ {
+ if (m_hmodProfilerDLL != NULL)
+ {
+ FreeLibrary(m_hmodProfilerDLL);
+ m_hmodProfilerDLL = NULL;
+ }
+
+ // Now that the profiler is destroyed, it is no longer referencing our
+ // ProfToEEInterfaceImpl, so it's safe to destroy that, too.
+ if (m_pProfToEE != NULL)
+ {
+ delete m_pProfToEE;
+ m_pProfToEE = NULL;
+ }
+ }
+ }
+
+ // Delete the structs associated with GC moved references
+ while (m_pGCRefDataFreeList)
+ {
+ GCReferencesData * pDel = m_pGCRefDataFreeList;
+ m_pGCRefDataFreeList = m_pGCRefDataFreeList->pNext;
+ delete pDel;
+ }
+
+ if (m_pSavedAllocDataBlock)
+ {
+#ifdef _WIN64
+ _ASSERTE((UINT_PTR)m_pSavedAllocDataBlock != 0xFFFFFFFFFFFFFFFF);
+#else
+ _ASSERTE((UINT_PTR)m_pSavedAllocDataBlock != 0xFFFFFFFF);
+#endif
+
+ _ASSERTE(m_pSavedAllocDataBlock->pHashTable != NULL);
+ // Get rid of the hash table
+ if (m_pSavedAllocDataBlock->pHashTable)
+ delete m_pSavedAllocDataBlock->pHashTable;
+
+ // Get rid of the two arrays used to hold class<->numinstance info
+ if (m_pSavedAllocDataBlock->cLength != 0)
+ {
+ _ASSERTE(m_pSavedAllocDataBlock->arrClsId != NULL);
+ _ASSERTE(m_pSavedAllocDataBlock->arrcObjects != NULL);
+
+ delete [] m_pSavedAllocDataBlock->arrClsId;
+ delete [] m_pSavedAllocDataBlock->arrcObjects;
+ }
+
+ // Get rid of the hash array used by the hash table
+ if (m_pSavedAllocDataBlock->arrHash)
+ {
+ delete [] m_pSavedAllocDataBlock->arrHash;
+ }
+
+ m_pSavedAllocDataBlock = NULL;
+ }
+
+ m_GUID = k_guidZero;
+
+ if (m_csGCRefDataFreeList != NULL)
+ {
+ ClrDeleteCriticalSection(m_csGCRefDataFreeList);
+ m_csGCRefDataFreeList = NULL;
+ }
+
+ if (m_pFunctionIDHashTable != NULL)
+ {
+ delete m_pFunctionIDHashTable;
+ m_pFunctionIDHashTable = NULL;
+ }
+
+ if (m_pFunctionIDHashTableRWLock != NULL)
+ {
+ delete m_pFunctionIDHashTableRWLock;
+ m_pFunctionIDHashTableRWLock = NULL;
+ }
+}
+
+
+
+//---------------------------------------------------------------------------------------
+//
+// Initialize the GUID used for the cookie in remoting callbacks. If already
+// initialized, this just does nothing and returns S_OK.
+//
+// Return Value:
+// HRESULT indicating success or failure. If the GUID was already initialized,
+// just returns S_OK
+//
+//
+
+HRESULT EEToProfInterfaceImpl::InitGUID()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ ASSERT_NO_EE_LOCKS_HELD();
+ }
+ CONTRACTL_END;
+
+ if (IsEqualGUID(m_GUID, k_guidZero))
+ {
+ return CoCreateGuid(&m_GUID);
+ }
+
+ return S_OK;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Returns a GUID suitable for use as a remoting callback cookie for this thread.
+// The GUID is based on the template GUID (m_GUID), the current thread, and
+// a counter.
+//
+// Arguments:
+// pGUID - [out] The GUID requested
+//
+
+void EEToProfInterfaceImpl::GetGUID(GUID * pGUID)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ ASSERT_NO_EE_LOCKS_HELD();
+ }
+ CONTRACTL_END;
+
+ // the member GUID and the argument should both be valid
+ _ASSERTE(!(IsEqualGUID(m_GUID, k_guidZero)));
+ _ASSERTE(pGUID);
+
+ // Copy the contents of the template GUID
+ memcpy(pGUID, &m_GUID, sizeof(GUID));
+
+ // Adjust the last two bytes
+ pGUID->Data4[6] = (BYTE) GetCurrentThreadId();
+ pGUID->Data4[7] = (BYTE) InterlockedIncrement((LPLONG)&m_lGUIDCount);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Wrapper around calling profiler's FunctionIDMapper hook. Called by JIT.
+//
+// Arguments:
+// funcId - FunctionID for profiler to map
+// pbHookFunction - [out] Specifies whether the profiler wants to hook (enter/leave)
+// this function
+//
+// Return Value:
+// The profiler-specified value that we should use to identify this function
+// in future hooks (enter/leave).
+// If the remapped ID returned by the profiler is NULL, we will replace it with
+// funcId. Thus, this function will never return NULL.
+//
+
+UINT_PTR EEToProfInterfaceImpl::EEFunctionIDMapper(FunctionID funcId, BOOL * pbHookFunction)
+{
+ // This isn't a public callback via ICorProfilerCallback*, but it's close (a
+ // public callback via a function pointer). So we'll aim to have the preferred
+ // contracts here.
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // ListLockEntry typically held during this callback (thanks to
+ // MethodTable::DoRunClassInitThrowing).
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ // only called when CORProfilerFunctionIDMapperEnabled() is true,
+ // which means either m_pProfilersFuncIDMapper or m_pProfilersFuncIDMapper2 should not be NULL;
+ _ASSERTE((m_pProfilersFuncIDMapper != NULL) || (m_pProfilersFuncIDMapper2 != NULL));
+
+ UINT_PTR clientId = NULL;
+
+ if (m_pProfilersFuncIDMapper2 != NULL)
+ {
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO100,
+ "**PROF: Calling profiler's FunctionIDMapper2. funcId: 0x%p. clientData: 0x%p.\n",
+ funcId,
+ m_pProfilersFuncIDMapper2ClientData));
+
+ // The attached profiler may not want to hook this function, so ask it
+ clientId = m_pProfilersFuncIDMapper2(funcId, m_pProfilersFuncIDMapper2ClientData, pbHookFunction);
+
+ }
+ else
+ {
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO100,
+ "**PROF: Calling profiler's FunctionIDMapper. funcId: 0x%p.\n",
+ funcId));
+
+ // The attached profiler may not want to hook this function, so ask it
+ clientId = m_pProfilersFuncIDMapper(funcId, pbHookFunction);
+ }
+
+ static LONG s_lIsELT2Enabled = -1;
+ if (s_lIsELT2Enabled == -1)
+ {
+ LONG lEnabled = ((m_pEnter2 != NULL) ||
+ (m_pLeave2 != NULL) ||
+ (m_pTailcall2 != NULL));
+
+ InterlockedCompareExchange(&s_lIsELT2Enabled, lEnabled, -1);
+ }
+
+ // We need to keep track the mapping between ClientID and FunctionID for ELT2
+ if (s_lIsELT2Enabled != 0)
+ {
+ FunctionIDAndClientID functionIDAndClientID;
+ functionIDAndClientID.functionID = funcId;
+ functionIDAndClientID.clientID = clientId;
+
+ // ClientID Hash table may throw OUTOFMEMORY exception, which is not expected by the caller.
+ EX_TRY
+ {
+ SimpleWriteLockHolder writeLockHolder(m_pFunctionIDHashTableRWLock);
+ m_pFunctionIDHashTable->AddOrReplace(functionIDAndClientID);
+ }
+ EX_CATCH
+ {
+ // Running out of heap memory means we no longer can maintain the integrity of the mapping table.
+ // All ELT2 fast-path hooks are disabled since we cannot report correct FunctionID to the
+ // profiler at this moment.
+ m_fIsClientIDToFunctionIDMappingEnabled = FALSE;
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ // If ELT2 is in use, FunctionID will be returned to the JIT to be embedded into the ELT3 probes
+ // instead of using clientID because the profiler may map several functionIDs to a clientID to
+ // do things like code coverage analysis. FunctionID to clientID has the one-on-one relationship,
+ // while the reverse may not have this one-on-one mapping. Therefore, FunctionID is used as the
+ // key to retrieve the corresponding clientID from the internal FunctionID hash table.
+ return funcId;
+ }
+
+ // For profilers that support ELT3, clientID will be embedded into the ELT3 probes
+ return clientId;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Private functions called by GC so we can cache data for later notification to
+// the profiler
+//
+
+//---------------------------------------------------------------------------------------
+//
+// Called lazily to allocate or use a recycled GCReferencesData.
+//
+// Return Value:
+// GCReferencesData * requested by caller.
+//
+// Notes:
+// Uses m_csGCRefDataFreeList to find a recycleable GCReferencesData
+// Called by GC callbacks that need to record GC references reported
+// to the callbacks by the GC as the GC walks the heap.
+//
+
+EEToProfInterfaceImpl::GCReferencesData * EEToProfInterfaceImpl::AllocateMovedReferencesData()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ if (GetThreadNULLOk()) { MODE_COOPERATIVE; }
+
+ // We directly take m_csGCRefDataFreeList around accessing the free list below
+ CAN_TAKE_LOCK;
+
+ // Thread store lock normally held during this call
+ }
+ CONTRACTL_END;
+
+ GCReferencesData *pData = NULL;
+
+ // SCOPE: Lock m_csGCRefDataFreeList for access to the free list
+ {
+ CRITSEC_Holder csh(m_csGCRefDataFreeList);
+
+ // Anything on the free list for us to grab?
+ if (m_pGCRefDataFreeList != NULL)
+ {
+ // Yup, get the first element from the free list
+ pData = m_pGCRefDataFreeList;
+ m_pGCRefDataFreeList = m_pGCRefDataFreeList->pNext;
+ }
+ }
+
+ if (pData == NULL)
+ {
+ // Still not set, so the free list must not have had anything
+ // available. Go ahead and allocate a struct directly.
+ pData = new (nothrow) GCReferencesData;
+ if (!pData)
+ {
+ return NULL;
+ }
+ }
+
+ // Now init the new block
+ _ASSERTE(pData != NULL);
+
+ // Set our index to the beginning
+ pData->curIdx = 0;
+ pData->compactingCount = 0;
+
+ return pData;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// After reporting references to the profiler, this recycles the GCReferencesData
+// that was used. See EEToProfInterfaceImpl::EndRootReferences2.
+//
+// Arguments:
+// pData - Pointer to GCReferencesData to recycle
+//
+
+void EEToProfInterfaceImpl::FreeMovedReferencesData(GCReferencesData * pData)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+
+ // We directly take m_csGCRefDataFreeList around accessing the free list below
+ CAN_TAKE_LOCK;
+
+ // Thread store lock normally held during this callback
+
+ }
+ CONTRACTL_END;
+
+ // SCOPE: Lock m_csGCRefDataFreeList for access to the free list
+ {
+ CRITSEC_Holder csh(m_csGCRefDataFreeList);
+ pData->pNext = m_pGCRefDataFreeList;
+ m_pGCRefDataFreeList = pData;
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Called by the GC to notify profapi of a moved reference. We cache the
+// info here so we can later notify the profiler of all moved references
+// in bulk.
+//
+// Arguments:
+// pbMemBlockStart - Start of moved block
+// pbMemBlockEnd - End of moved block
+// cbRelocDistance - Offset from pbMemBlockStart of where the block
+// was moved to
+// pHeapId - GCReferencesData * used to record the block
+// fCompacting - Is this a compacting collection?
+//
+// Return Value:
+// HRESULT indicating success or failure
+//
+
+HRESULT EEToProfInterfaceImpl::MovedReference(BYTE * pbMemBlockStart,
+ BYTE * pbMemBlockEnd,
+ ptrdiff_t cbRelocDistance,
+ void * pHeapId,
+ BOOL fCompacting)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+
+ // Called during a GC
+ GC_NOTRIGGER;
+ if (GetThreadNULLOk()) { MODE_COOPERATIVE; }
+
+ // Thread store lock normally held during this callback
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pHeapId);
+ _ASSERTE(*((size_t *)pHeapId) != (size_t)(-1));
+
+ // Get a pointer to the data for this heap
+ GCReferencesData *pData = (GCReferencesData *)(*((size_t *)pHeapId));
+
+ // If this is the first notification of a moved reference for this heap
+ // in this particular gc activation, then we need to get a ref data block
+ // from the free list of blocks, or if that's empty then we need to
+ // allocate a new one.
+ if (pData == NULL)
+ {
+ pData = AllocateMovedReferencesData();
+ if (pData == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ // Set the cookie so that we will be provided it on subsequent
+ // callbacks
+ ((*((size_t *)pHeapId))) = (size_t)pData;
+ }
+
+ _ASSERTE(pData->curIdx >= 0 && pData->curIdx <= kcReferencesMax);
+
+ // If the struct has been filled, then we need to notify the profiler of
+ // these moved references and clear the struct for the next load of
+ // moved references
+ if (pData->curIdx == kcReferencesMax)
+ {
+ MovedReferences(pData);
+ pData->curIdx = 0;
+ pData->compactingCount = 0;
+ }
+
+ // Now save the information in the struct
+ pData->arrpbMemBlockStartOld[pData->curIdx] = pbMemBlockStart;
+ pData->arrpbMemBlockStartNew[pData->curIdx] = pbMemBlockStart + cbRelocDistance;
+ pData->arrMemBlockSize[pData->curIdx] = pbMemBlockEnd - pbMemBlockStart;
+
+ // Increment the index into the parallel arrays
+ pData->curIdx += 1;
+
+ // Keep track of whether this is a compacting collection
+ if (fCompacting)
+ {
+ pData->compactingCount += 1;
+ // The gc is supposed to make up its mind whether this is a compacting collection or not
+ // Thus if this one is compacting, everything so far had to say compacting
+ _ASSERTE(pData->compactingCount == pData->curIdx);
+ }
+ else
+ {
+ // The gc is supposed to make up its mind whether this is a compacting collection or not
+ // Thus if this one is non-compacting, everything so far had to say non-compacting
+ _ASSERTE(pData->compactingCount == 0 && cbRelocDistance == 0);
+ }
+ return (S_OK);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Called by the GC to indicate that the GC is finished calling
+// EEToProfInterfaceImpl::MovedReference for this collection. This function will
+// call into the profiler to notify it of all the moved references we've cached.
+//
+// Arguments:
+// pHeapId - Casted to a GCReferencesData * that contains the moved reference
+// data we've cached.
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+
+HRESULT EEToProfInterfaceImpl::EndMovedReferences(void * pHeapId)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+
+ // Called during a GC
+ GC_NOTRIGGER;
+ if (GetThreadNULLOk()) { MODE_COOPERATIVE; }
+
+ // We directly take m_csGCRefDataFreeList around accessing the free list below
+ CAN_TAKE_LOCK;
+
+ // Thread store lock normally held during this callback
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pHeapId);
+ _ASSERTE((*((size_t *)pHeapId)) != (size_t)(-1));
+
+ HRESULT hr = S_OK;
+
+ // Get a pointer to the data for this heap
+ GCReferencesData *pData = (GCReferencesData *)(*((size_t *)pHeapId));
+
+ // If there were no moved references, profiler doesn't need to know
+ if (!pData)
+ return (S_OK);
+
+ // Communicate the moved references to the profiler
+ _ASSERTE(pData->curIdx> 0);
+ hr = MovedReferences(pData);
+
+ // Now we're done with the data block, we can shove it onto the free list
+ // SCOPE: Lock m_csGCRefDataFreeList for access to the free list
+ {
+ CRITSEC_Holder csh(m_csGCRefDataFreeList);
+ pData->pNext = m_pGCRefDataFreeList;
+ m_pGCRefDataFreeList = pData;
+ }
+
+#ifdef _DEBUG
+ // Set the cookie to an invalid number
+ (*((size_t *)pHeapId)) = (size_t)(-1);
+#endif // _DEBUG
+
+ return (hr);
+}
+
+
+#define HASH_ARRAY_SIZE_INITIAL 1024
+#define HASH_ARRAY_SIZE_INC 256
+#define HASH_NUM_BUCKETS 32
+#define HASH(x) ( (ULONG) ((SIZE_T)x) ) // A simple hash function
+
+//---------------------------------------------------------------------------------------
+//
+// Callback used by the GC when walking the heap (via AllocByClassHelper in
+// ProfToEEInterfaceImpl.cpp).
+//
+// Arguments:
+// objId - Object reference encountered during heap walk
+// classId - ClassID for objID
+// pHeapId - heap walk context used by this function; it's interpreted
+// as an AllocByClassData * to keep track of objects on the
+// heap by class.
+//
+// Return Value:
+// HRESULT indicating whether to continue with the heap walk (i.e.,
+// success HRESULT) or abort it (i.e., failure HRESULT).
+//
+
+HRESULT EEToProfInterfaceImpl::AllocByClass(ObjectID objId, ClassID clsId, void * pHeapId)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_INTOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ // This is a slight attempt to make sure that this is never called in a multi-threaded
+ // manner. This heap walk should be done by one thread at a time only.
+ static DWORD dwProcId = 0xFFFFFFFF;
+#endif
+
+ _ASSERTE(pHeapId != NULL);
+ _ASSERTE((*((size_t *)pHeapId)) != (size_t)(-1));
+
+ // The heapId they pass in is really a AllocByClassData struct ptr.
+ AllocByClassData *pData = (AllocByClassData *)(*((size_t *)pHeapId));
+
+ // If it's null, need to allocate one
+ if (pData == NULL)
+ {
+#ifdef _DEBUG
+ // This is a slight attempt to make sure that this is never called in a multi-threaded
+ // manner. This heap walk should be done by one thread at a time only.
+ dwProcId = GetCurrentProcessId();
+#endif
+
+ // See if we've saved a data block from a previous GC
+ if (m_pSavedAllocDataBlock != NULL)
+ pData = m_pSavedAllocDataBlock;
+
+ // This means we need to allocate all the memory to keep track of the info
+ else
+ {
+ // Get a new alloc data block
+ pData = new (nothrow) AllocByClassData;
+ if (pData == NULL)
+ return (E_OUTOFMEMORY);
+
+ // Create a new hash table
+ pData->pHashTable = new (nothrow) CHashTableImpl(HASH_NUM_BUCKETS);
+ if (!pData->pHashTable)
+ {
+ delete pData;
+ return (E_OUTOFMEMORY);
+ }
+
+ // Get the memory for the array that the hash table is going to use
+ pData->arrHash = new (nothrow) CLASSHASHENTRY[HASH_ARRAY_SIZE_INITIAL];
+ if (pData->arrHash == NULL)
+ {
+ delete pData->pHashTable;
+ delete pData;
+ return (E_OUTOFMEMORY);
+ }
+
+ // Save the number of elements in the array
+ pData->cHash = HASH_ARRAY_SIZE_INITIAL;
+
+ // Now initialize the hash table
+ HRESULT hr = pData->pHashTable->NewInit((BYTE *)pData->arrHash, sizeof(CLASSHASHENTRY));
+ if (hr == E_OUTOFMEMORY)
+ {
+ delete [] pData->arrHash;
+ delete pData->pHashTable;
+ delete pData;
+ return (E_OUTOFMEMORY);
+ }
+ _ASSERTE(pData->pHashTable->IsInited());
+
+ // Null some entries
+ pData->arrClsId = NULL;
+ pData->arrcObjects = NULL;
+ pData->cLength = 0;
+
+ // Hold on to the structure
+ m_pSavedAllocDataBlock = pData;
+ }
+
+ // Got some memory and hash table to store entries, yay!
+ *((size_t *)pHeapId) = (size_t)pData;
+
+ // Initialize the data
+ pData->iHash = 0;
+ pData->pHashTable->Clear();
+ }
+
+ _ASSERTE(pData->iHash <= pData->cHash);
+ _ASSERTE(dwProcId == GetCurrentProcessId());
+
+ // Lookup to see if this class already has an entry
+ CLASSHASHENTRY * pEntry =
+ reinterpret_cast<CLASSHASHENTRY *>(pData->pHashTable->Find(HASH(clsId), (SIZE_T)clsId));
+
+ // If this class has already been encountered, just increment the counter.
+ if (pEntry)
+ pEntry->m_count++;
+
+ // Otherwise, need to add this one as a new entry in the hash table
+ else
+ {
+ // If we're full, we need to realloc
+ if (pData->iHash == pData->cHash)
+ {
+ // Try to realloc the memory
+ CLASSHASHENTRY *tmp = new (nothrow) CLASSHASHENTRY[pData->cHash + HASH_ARRAY_SIZE_INC];
+ if (!tmp)
+ {
+ return (E_OUTOFMEMORY);
+ }
+
+ _ASSERTE(pData->arrHash);
+ memcpy (tmp, pData->arrHash, pData->cHash*sizeof(CLASSHASHENTRY));
+ delete [] pData->arrHash;
+ pData->arrHash = tmp;
+ // Tell the hash table that the memory location of the array has changed
+ pData->pHashTable->SetTable((BYTE *)pData->arrHash);
+
+ // Save the new size of the array
+ pData->cHash += HASH_ARRAY_SIZE_INC;
+ }
+
+ // Now add the new entry
+ CLASSHASHENTRY *pNewEntry = (CLASSHASHENTRY *) pData->pHashTable->Add(HASH(clsId), pData->iHash++);
+
+ pNewEntry->m_clsId = clsId;
+ pNewEntry->m_count = 1;
+ }
+
+ // Indicate success
+ return (S_OK);
+}
+
+HRESULT EEToProfInterfaceImpl::EndAllocByClass(void *pHeapId)
+{
+ _ASSERTE(pHeapId != NULL);
+ _ASSERTE((*((size_t *)pHeapId)) != (size_t)(-1));
+
+ HRESULT hr = S_OK;
+
+ AllocByClassData *pData = (AllocByClassData *)(*((size_t *)pHeapId));
+
+ // Notify the profiler if there are elements to notify it of
+ if (pData != NULL)
+ hr = NotifyAllocByClass(pData);
+
+#ifdef _DEBUG
+ (*((size_t *)pHeapId)) = (size_t)(-1);
+#endif // _DEBUG
+
+ return (hr);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Convert ETW-style root flag bitmask to ProfAPI-stye root flag bitmask
+//
+// Arguments:
+// dwEtwRootFlags - ETW-style root flag bitmask
+//
+// Return Value:
+// The corresponding ProfAPI-stye root flag bitmask
+//
+
+DWORD EtwRootFlagsToProfApiRootFlags(DWORD dwEtwRootFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // If a new ETW flag is added, adjust this assert, and add a case below.
+ _ASSERTE((dwEtwRootFlags &
+ ~(kEtwGCRootFlagsPinning | kEtwGCRootFlagsWeakRef | kEtwGCRootFlagsInterior | kEtwGCRootFlagsRefCounted))
+ == 0);
+
+ DWORD dwProfApiRootFlags = 0;
+
+ if ((dwEtwRootFlags & kEtwGCRootFlagsPinning) != 0)
+ {
+ dwProfApiRootFlags |= COR_PRF_GC_ROOT_PINNING;
+ }
+ if ((dwEtwRootFlags & kEtwGCRootFlagsWeakRef) != 0)
+ {
+ dwProfApiRootFlags |= COR_PRF_GC_ROOT_WEAKREF;
+ }
+ if ((dwEtwRootFlags & kEtwGCRootFlagsInterior) != 0)
+ {
+ dwProfApiRootFlags |= COR_PRF_GC_ROOT_INTERIOR;
+ }
+ if ((dwEtwRootFlags & kEtwGCRootFlagsRefCounted) != 0)
+ {
+ dwProfApiRootFlags |= COR_PRF_GC_ROOT_REFCOUNTED;
+ }
+ return dwProfApiRootFlags;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Convert ETW-style root kind enum to ProfAPI-stye root kind enum
+//
+// Arguments:
+// dwEtwRootKind - ETW-style root kind enum
+//
+// Return Value:
+// Corresponding ProfAPI-stye root kind enum
+//
+
+DWORD EtwRootKindToProfApiRootKind(EtwGCRootKind dwEtwRootKind)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ switch(dwEtwRootKind)
+ {
+ default:
+ // If a new ETW root kind is added, create a profapi root kind as well, and add
+ // the appropriate case below
+ _ASSERTE(!"Unrecognized ETW root kind");
+ // Deliberately fall through to kEtwGCRootKindOther
+
+ case kEtwGCRootKindOther:
+ return COR_PRF_GC_ROOT_OTHER;
+
+ case kEtwGCRootKindStack:
+ return COR_PRF_GC_ROOT_STACK;
+
+ case kEtwGCRootKindFinalizer:
+ return COR_PRF_GC_ROOT_FINALIZER;
+
+ case kEtwGCRootKindHandle:
+ return COR_PRF_GC_ROOT_HANDLE;
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Callback used by the GC when scanning the roots (via ScanRootsHelper in
+// ProfToEEInterfaceImpl.cpp).
+//
+// Arguments:
+// objectId - Root object reference encountered
+// dwEtwRootKind - ETW enum describing what kind of root objectId is
+// dwEtwRootFlags - ETW flags describing the root qualities of objectId
+// rootID - Root's methoddesc if dwEtwRootKind==kEtwGCRootKindStack, else NULL
+// pHeapId - Used as a GCReferencesData * to keep track of the GC references
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+
+HRESULT EEToProfInterfaceImpl::RootReference2(BYTE * objectId,
+ EtwGCRootKind dwEtwRootKind,
+ EtwGCRootFlags dwEtwRootFlags,
+ void * rootID,
+ void * pHeapId)
+{
+ _ASSERTE(pHeapId);
+ _ASSERTE(*((size_t *)pHeapId) != (size_t)(-1));
+
+ LOG((LF_CORPROF, LL_INFO100000, "**PROF: Root Reference. "
+ "ObjectID:0x%p dwEtwRootKind:0x%x dwEtwRootFlags:0x%x rootId:0x%p HeadId:0x%p\n",
+ objectId, dwEtwRootKind, dwEtwRootFlags, rootID, pHeapId));
+
+ DWORD dwProfApiRootFlags = EtwRootFlagsToProfApiRootFlags(dwEtwRootFlags);
+ DWORD dwProfApiRootKind = EtwRootKindToProfApiRootKind((EtwGCRootKind) dwEtwRootKind);
+
+ // Get a pointer to the data for this heap
+ GCReferencesData *pData = (GCReferencesData *)(*((size_t *)pHeapId));
+
+ // If this is the first notification of an extended root reference for this heap
+ // in this particular gc activation, then we need to get a ref data block
+ // from the free list of blocks, or if that's empty then we need to
+ // allocate a new one.
+ if (pData == NULL)
+ {
+ pData = AllocateMovedReferencesData();
+ if (pData == NULL)
+ return (E_OUTOFMEMORY);
+
+ // Set the cookie so that we will be provided it on subsequent
+ // callbacks
+ ((*((size_t *)pHeapId))) = (size_t)pData;
+ }
+
+ _ASSERTE(pData->curIdx >= 0 && pData->curIdx <= kcReferencesMax);
+
+ // If the struct has been filled, then we need to notify the profiler of
+ // these root references and clear the struct for the next load of
+ // root references
+ if (pData->curIdx == kcReferencesMax)
+ {
+ RootReferences2(pData);
+ pData->curIdx = 0;
+ }
+
+ // Now save the information in the struct
+ pData->arrpbMemBlockStartOld[pData->curIdx] = objectId;
+ pData->arrpbMemBlockStartNew[pData->curIdx] = (BYTE *)rootID;
+
+ // assert that dwProfApiRootKind and dwProfApiRootFlags both fit in 16 bits, so we can
+ // pack both into a 32-bit word
+ _ASSERTE((dwProfApiRootKind & 0xffff) == dwProfApiRootKind && (dwProfApiRootFlags & 0xffff) == dwProfApiRootFlags);
+
+ pData->arrULONG[pData->curIdx] = (dwProfApiRootKind << 16) | dwProfApiRootFlags;
+
+ // Increment the index into the parallel arrays
+ pData->curIdx += 1;
+
+ return S_OK;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Called by the GC to indicate that the GC is finished calling
+// EEToProfInterfaceImpl::RootReference2 for this collection. This function will
+// call into the profiler to notify it of all the root references we've cached.
+//
+// Arguments:
+// pHeapId - Casted to a GCReferencesData * that contains the root references
+// we've cached.
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+
+HRESULT EEToProfInterfaceImpl::EndRootReferences2(void * pHeapId)
+{
+ _ASSERTE(pHeapId);
+ _ASSERTE((*((size_t *)pHeapId)) != (size_t)(-1));
+
+ HRESULT hr = S_OK;
+
+ // Get a pointer to the data for this heap
+ GCReferencesData *pData = (GCReferencesData *)(*((size_t *)pHeapId));
+
+ // If there were no moved references, profiler doesn't need to know
+ if (!pData)
+ return (S_OK);
+
+ // Communicate the moved references to the profiler
+ _ASSERTE(pData->curIdx> 0);
+ hr = RootReferences2(pData);
+
+ // Now we're done with the data block, we can shove it onto the free list
+ FreeMovedReferencesData(pData);
+
+#ifdef _DEBUG
+ // Set the cookie to an invalid number
+ (*((size_t *)pHeapId)) = (size_t)(-1);
+#endif // _DEBUG
+
+ return (hr);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Callback used by the GC when scanning the roots (via
+// Ref_ScanDependentHandlesForProfilerAndETW in ObjectHandle.cpp).
+//
+// Arguments:
+// primaryObjectId - Primary object reference in the DependentHandle
+// secondaryObjectId - Secondary object reference in the DependentHandle
+// rootID - The DependentHandle maintaining the dependency relationship
+// pHeapId - Used as a GCReferencesData * to keep track of the GC references
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+
+HRESULT EEToProfInterfaceImpl::ConditionalWeakTableElementReference(BYTE * primaryObjectId,
+ BYTE * secondaryObjectId,
+ void * rootID,
+ void * pHeapId)
+{
+ _ASSERTE(pHeapId);
+ _ASSERTE(*((size_t *)pHeapId) != (size_t)(-1));
+
+ // Callers must ensure the profiler asked to be notified about dependent handles,
+ // since this is only available for profilers implementing ICorProfilerCallback5 and
+ // greater.
+ _ASSERTE(CORProfilerTrackConditionalWeakTableElements());
+
+ LOG((LF_CORPROF, LL_INFO100000, "**PROF: Root Dependent Handle. "
+ "PrimaryObjectID:0x%p SecondaryObjectID:0x%p rootId:0x%p HeadId:0x%p\n",
+ primaryObjectId, secondaryObjectId, rootID, pHeapId));
+
+ // Get a pointer to the data for this heap
+ GCReferencesData *pData = (GCReferencesData *)(*((size_t *)pHeapId));
+
+ // If this is the first notification of a dependent handle reference in
+ // this particular gc activation, then we need to get a ref data block
+ // from the free list of blocks, or if that's empty then we need to
+ // allocate a new one.
+ if (pData == NULL)
+ {
+ pData = AllocateMovedReferencesData();
+ if (pData == NULL)
+ return (E_OUTOFMEMORY);
+
+ // Set the cookie so that we will be provided it on subsequent
+ // callbacks
+ ((*((size_t *)pHeapId))) = (size_t)pData;
+ }
+
+ _ASSERTE(pData->curIdx >= 0 && pData->curIdx <= kcReferencesMax);
+
+ // If the struct has been filled, then we need to notify the profiler of
+ // these dependent handle references and clear the struct for the next
+ // load of dependent handle references
+ if (pData->curIdx == kcReferencesMax)
+ {
+ ConditionalWeakTableElementReferences(pData);
+ pData->curIdx = 0;
+ }
+
+ // Now save the information in the struct
+ pData->arrpbMemBlockStartOld[pData->curIdx] = primaryObjectId;
+ pData->arrpbMemBlockStartNew[pData->curIdx] = secondaryObjectId;
+ pData->arrpbRootId[pData->curIdx] = (BYTE*) rootID;
+
+ // Increment the index into the parallel arrays
+ pData->curIdx += 1;
+
+ return S_OK;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Called by the GC to indicate that the GC is finished calling
+// EEToProfInterfaceImpl::ConditionalWeakTableElementReference for this collection. This
+// function will call into the profiler to notify it of all the DependentHandle references
+// we've cached.
+//
+// Arguments:
+// pHeapId - Casted to a GCReferencesData * that contains the dependent handle
+// references we've cached.
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+
+HRESULT EEToProfInterfaceImpl::EndConditionalWeakTableElementReferences(void * pHeapId)
+{
+ _ASSERTE(pHeapId);
+ _ASSERTE((*((size_t *)pHeapId)) != (size_t)(-1));
+
+ // Callers must ensure the profiler asked to be notified about dependent handles,
+ // since this is only available for profilers implementing ICorProfilerCallback5 and
+ // greater.
+ _ASSERTE(CORProfilerTrackConditionalWeakTableElements());
+
+ HRESULT hr = S_OK;
+
+ // Get a pointer to the data for this heap
+ GCReferencesData *pData = (GCReferencesData *)(*((size_t *)pHeapId));
+
+ // If there were no dependent handles, profiler doesn't need to know
+ if (!pData)
+ return (S_OK);
+
+ // Communicate the dependent handle references to the profiler
+ _ASSERTE(pData->curIdx > 0);
+ hr = ConditionalWeakTableElementReferences(pData);
+
+ // Now we're done with the data block, we can shove it onto the free list
+ FreeMovedReferencesData(pData);
+
+#ifdef _DEBUG
+ // Set the cookie to an invalid number
+ (*((size_t *)pHeapId)) = (size_t)(-1);
+#endif // _DEBUG
+
+ return (hr);
+}
+
+
+
+//---------------------------------------------------------------------------------------
+//
+// Returns whether the profiler performed unrevertible acts, such as instrumenting
+// code or requesting ELT hooks. RequestProfilerDetach uses this function before
+// performing any sealing or evacuation checks to determine whether it's even possible
+// for the profiler ever to detach.
+//
+// Return Value:
+// * S_OK if it's safe to attempt a detach. Evacuation checks must still be performed
+// before actually unloading the profiler.
+// * else, an HRESULT error value indicating what the profiler did that made it
+// undetachable. This is a public HRESULT suitable for returning from the
+// RequestProfilerDetach API.
+//
+
+HRESULT EEToProfInterfaceImpl::EnsureProfilerDetachable()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if ((g_profControlBlock.dwEventMask & COR_PRF_MONITOR_IMMUTABLE) != 0)
+ {
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF: Profiler may not detach because it set an immutable flag. Flags = 0x%x.\n",
+ g_profControlBlock.dwEventMask));
+
+ return CORPROF_E_IMMUTABLE_FLAGS_SET;
+ }
+
+ if ((m_pEnter != NULL) ||
+ (m_pLeave != NULL) ||
+ (m_pTailcall != NULL) ||
+ (m_pEnter2 != NULL) ||
+ (m_pLeave2 != NULL) ||
+ (m_pTailcall2 != NULL) ||
+ (m_pEnter3 != NULL) ||
+ (m_pEnter3WithInfo != NULL) ||
+ (m_pLeave3 != NULL) ||
+ (m_pLeave3WithInfo != NULL) ||
+ (m_pTailcall3 != NULL) ||
+ (m_pTailcall3WithInfo != NULL))
+ {
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF: Profiler may not detach because it set an ELT(2) hook.\n"));
+
+ return CORPROF_E_IRREVERSIBLE_INSTRUMENTATION_PRESENT;
+ }
+
+ if (m_fUnrevertiblyModifiedIL)
+ {
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF: Profiler may not detach because it called SetILFunctionBody.\n"));
+
+ return CORPROF_E_IRREVERSIBLE_INSTRUMENTATION_PRESENT;
+ }
+
+ return S_OK;
+}
+
+// Declarations for asm wrappers of profiler callbacks
+EXTERN_C void __stdcall ProfileEnterNaked(FunctionIDOrClientID functionIDOrClientID);
+EXTERN_C void __stdcall ProfileLeaveNaked(FunctionIDOrClientID functionIDOrClientID);
+EXTERN_C void __stdcall ProfileTailcallNaked(FunctionIDOrClientID functionIDOrClientID);
+#define PROFILECALLBACK(name) name##Naked
+
+//---------------------------------------------------------------------------------------
+//
+// Determines the hooks (slow path vs. fast path) to which the JIT shall
+// insert calls, and then tells the JIT which ones we want
+//
+// Return Value:
+// HRESULT indicating success or failure
+//
+
+HRESULT EEToProfInterfaceImpl::DetermineAndSetEnterLeaveFunctionHooksForJit()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ // We're doing all ELT3 hooks, all-Whidbey hooks or all-Everett hooks. No mixing and matching.
+ BOOL fCLRv4Hooks = (m_pEnter3 != NULL) ||
+ (m_pLeave3 != NULL) ||
+ (m_pTailcall3 != NULL) ||
+ (m_pEnter3WithInfo != NULL) ||
+ (m_pLeave3WithInfo != NULL) ||
+ (m_pTailcall3WithInfo != NULL);
+
+ BOOL fWhidbeyHooks = (m_pEnter2 != NULL) ||
+ (m_pLeave2 != NULL) ||
+ (m_pTailcall2 != NULL);
+
+ // If no hooks were set (e.g., SetEventMask called with COR_PRF_MONITOR_ENTERLEAVE,
+ // but SetEnterLeaveFunctionHooks(*) never called), then nothing to do
+ if (!fCLRv4Hooks &&
+ !fWhidbeyHooks &&
+ (m_pEnter == NULL) &&
+ (m_pLeave == NULL) &&
+ (m_pTailcall == NULL))
+ {
+ return S_OK;
+ }
+
+
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ if (fCLRv4Hooks)
+ {
+ // For each type of hook (enter/leave/tailcall) we must determine if we can use the
+ // happy lucky fast path (i.e., direct call from JITd code right into the profiler's
+ // hook or the JIT default stub (see below)), or the slow path (i.e., call into an
+ // intermediary FCALL which then calls the profiler's hook) with extra information
+ // about the current function.
+
+ // The casts below are to appease rotor. cl.exe doesn't need them.
+ hr = SetEnterLeaveFunctionHooksForJit(
+ (m_pEnter3WithInfo != NULL) ?
+ reinterpret_cast<FunctionEnter3 *>(PROFILECALLBACK(ProfileEnter)) :
+ m_pEnter3,
+ (m_pLeave3WithInfo != NULL) ?
+ reinterpret_cast<FunctionLeave3 *>(PROFILECALLBACK(ProfileLeave)) :
+ m_pLeave3,
+ (m_pTailcall3WithInfo != NULL) ?
+ reinterpret_cast<FunctionTailcall3 *>(PROFILECALLBACK(ProfileTailcall)) :
+ m_pTailcall3);
+ }
+ else
+ {
+ //
+ // Everett or Whidbey hooks.
+ //
+
+ // When using Everett or Whidbey hooks, the check looks like this:
+ //
+ // IF Hook exists
+ // THEN Use slow path
+ //
+ // Why?
+ //
+ // - If the profiler wants the old-style Whidbey or Everett hooks, we need a wrapper
+ // to convert from the ELT3 prototype the JIT expects to the Whidbey or Everett
+ // prototype the profiler expects. It applies to Whidbey fast-path hooks. And due
+ // to the overhead of looking up FunctionID from cache and using lock to synchronize
+ // cache accesses, the so-called Whidbey fast-path hooks are much slower than they
+ // used to be. Whidbey and Everett hooks are supported to keep existing profiler
+ // running, but the profiler writers are encouraged to use ELT3 interface for the
+ // best performance.
+ //
+ // Implicit in the above logic is if one of the hook types has no hook pointer
+ // specified, then we pass NULL as the hook pointer to the JIT, in which case the JIT
+ // just generates a call to the default stub (a single ret) w/out invoking the slow-path
+ // wrapper. I call this the "fast path to nowhere"
+
+ BOOL fEnter = (m_pEnter != NULL) || (m_pEnter2 != NULL);
+ BOOL fLeave = (m_pLeave != NULL) || (m_pLeave2 != NULL);
+ BOOL fTailcall = (m_pTailcall != NULL) || (m_pTailcall2 != NULL);
+
+ // The casts below are to appease rotor. cl.exe doesn't need them.
+ hr = SetEnterLeaveFunctionHooksForJit(
+ fEnter ?
+ reinterpret_cast<FunctionEnter3 *>(PROFILECALLBACK(ProfileEnter)) :
+ NULL,
+ fLeave ?
+ reinterpret_cast<FunctionLeave3 *>(PROFILECALLBACK(ProfileLeave)) :
+ NULL,
+ fTailcall ?
+ reinterpret_cast<FunctionTailcall3 *>(PROFILECALLBACK(ProfileTailcall)) :
+ NULL);
+ }
+ }
+ EX_CATCH
+ {
+ hr = E_FAIL;
+ }
+ // We need to swallow all exceptions, because we will lock otherwise (in addition to
+ // the IA64-only lock while allocating stub space!). For example, specifying
+ // RethrowTerminalExceptions forces us to test to see if the caught exception is
+ // terminal and Exception::IsTerminal() can lock if we get a handle table cache miss
+ // while getting a handle for the exception. It is good to minimize locks from
+ // profiler Info functions (and their callees), and this is a dumb lock to have,
+ // given that we can avoid it altogether by just having terminal exceptions be
+ // swallowed here, and returning the failure to the profiler. For those who don't
+ // like swallowing terminal exceptions, this is mitigated by the fact that,
+ // currently, an exception only gets thrown from SetEnterLeaveFunctionHooksForJit on
+ // IA64. But to keep consistent (and in case the world changes), we'll do this on
+ // all platforms.
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return hr;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// The Info method SetEventMask() simply defers to this function to do the real work.
+//
+// Arguments:
+// dwEventMask - Event mask specified by the profiler
+//
+// Return Value:
+// HRESULT indicating success / failure to return straight through to the profiler
+//
+
+HRESULT EEToProfInterfaceImpl::SetEventMask(DWORD dwEventMask, DWORD dwEventMaskHigh)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ EE_THREAD_NOT_REQUIRED;
+ CANNOT_TAKE_LOCK;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ static const DWORD kEventFlagsRequiringSlowPathEnterLeaveHooks =
+ COR_PRF_ENABLE_FUNCTION_ARGS |
+ COR_PRF_ENABLE_FUNCTION_RETVAL |
+ COR_PRF_ENABLE_FRAME_INFO
+ ;
+
+ static const DWORD kEventFlagsAffectingEnterLeaveHooks =
+ COR_PRF_MONITOR_ENTERLEAVE |
+ kEventFlagsRequiringSlowPathEnterLeaveHooks
+ ;
+
+ HRESULT hr;
+
+#ifdef _DEBUG
+ // Some tests need to enable immutable flags after startup, when a profiler is
+ // attached. These flags enable features that are used solely to verify the
+ // correctness of other, MUTABLE features. Examples: enable immutable ELT to create
+ // shadow stacks to verify stack walks (which can be done mutably via manual
+ // EBP-frame walking), or enable immutable DSS to gather IP addresses to verify the
+ // mutable GetFunctionFromIP.
+ //
+ // Similarly, test profilers may need to extend the set of flags allowable on attach
+ // to enable features that help verify other parts of the profapi that ARE allowed
+ // on attach.
+ //
+ // See code:#P2CLRRestrictionsOverview for more information
+ DWORD dwImmutableEventFlags = COR_PRF_MONITOR_IMMUTABLE;
+ DWORD dwAllowableAfterAttachEventFlags = COR_PRF_ALLOWABLE_AFTER_ATTACH;
+ DWORD dwTestOnlyAllowedEventMask = 0;
+ dwTestOnlyAllowedEventMask = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_TestOnlyAllowedEventMask);
+ if (dwTestOnlyAllowedEventMask != 0)
+ {
+ // Remove from the immutable flag list those flags that a test-only profiler may
+ // need to set post-startup (specified via COMPLUS_TestOnlyAllowedEventMask)
+ dwImmutableEventFlags &= ~dwTestOnlyAllowedEventMask;
+
+ // And add to the "allowable after attach" list the same test-only flags.
+ dwAllowableAfterAttachEventFlags |= dwTestOnlyAllowedEventMask;
+
+ LOG((LF_CORPROF, LL_INFO10, "**PROF: TestOnlyAllowedEventMask=0x%x. New immutable flags=0x%x. New AllowableAfterAttach flags=0x%x\n",
+ dwTestOnlyAllowedEventMask,
+ dwImmutableEventFlags,
+ dwAllowableAfterAttachEventFlags));
+ }
+#endif //_DEBUG
+
+ // If we're not in initialization or shutdown, make sure profiler is
+ // not trying to set an immutable attribute
+ // FUTURE: If we add immutable flags to the high event mask, this would be a good
+ // place to check for them as well.
+ if (g_profControlBlock.curProfStatus.Get() != kProfStatusInitializingForStartupLoad)
+ {
+#ifdef _DEBUG
+ if ((dwEventMask & dwImmutableEventFlags) !=
+ (g_profControlBlock.dwEventMask & dwImmutableEventFlags))
+#else //!_DEBUG
+ if ((dwEventMask & COR_PRF_MONITOR_IMMUTABLE) !=
+ (g_profControlBlock.dwEventMask & COR_PRF_MONITOR_IMMUTABLE))
+#endif //_DEBUG
+ {
+ // FUTURE: Should we have a dedicated HRESULT for setting immutable flag?
+ return E_FAIL;
+ }
+ }
+
+ // If this is an attaching profiler, make sure the profiler only sets flags
+ // allowable after an attach
+ if (m_fLoadedViaAttach &&
+#ifdef _DEBUG
+ ((dwEventMask & (~dwAllowableAfterAttachEventFlags)) != 0))
+#else //!_DEBUG
+ ((dwEventMask & (~COR_PRF_ALLOWABLE_AFTER_ATTACH)) != 0))
+#endif //_DEBUG
+ {
+ return CORPROF_E_UNSUPPORTED_FOR_ATTACHING_PROFILER;
+ }
+
+ // After fast path ELT hooks are set in Initial callback, the startup profiler is not allowed to change flags
+ // that require slow path ELT hooks or disable ELT hooks.
+ if ((g_profControlBlock.curProfStatus.Get() == kProfStatusInitializingForStartupLoad) &&
+ (
+ (m_pEnter3 != NULL) ||
+ (m_pLeave3 != NULL) ||
+ (m_pTailcall3 != NULL)
+ ) &&
+ (
+ ((dwEventMask & kEventFlagsRequiringSlowPathEnterLeaveHooks) != 0) ||
+ ((dwEventMask & COR_PRF_MONITOR_ENTERLEAVE) == 0)
+ )
+ )
+ {
+ _ASSERTE((g_profControlBlock.dwEventMask & kEventFlagsRequiringSlowPathEnterLeaveHooks) == 0);
+ return CORPROF_E_INCONSISTENT_WITH_FLAGS;
+ }
+
+ // After slow path ELT hooks are set in Initial callback, the startup profiler is not allowed to remove
+ // all flags that require slow path ELT hooks or to change the flag to disable the ELT hooks.
+ if ((g_profControlBlock.curProfStatus.Get() == kProfStatusInitializingForStartupLoad) &&
+ (
+ (m_pEnter3WithInfo != NULL) ||
+ (m_pLeave3WithInfo != NULL) ||
+ (m_pTailcall3WithInfo != NULL)
+ ) &&
+ (
+ ((dwEventMask & kEventFlagsRequiringSlowPathEnterLeaveHooks) == 0) ||
+ ((dwEventMask & COR_PRF_MONITOR_ENTERLEAVE) == 0)
+ )
+ )
+ {
+ _ASSERTE((g_profControlBlock.dwEventMask & kEventFlagsRequiringSlowPathEnterLeaveHooks) != 0);
+ return CORPROF_E_INCONSISTENT_WITH_FLAGS;
+ }
+
+
+ // Note whether the caller is changing flags that affect enter leave hooks
+ BOOL fEnterLeaveHooksAffected =
+ // Did any of the relevant flags change?
+ (
+ (
+ // Old flags
+ ((g_profControlBlock.dwEventMask & kEventFlagsAffectingEnterLeaveHooks) ^
+ // XORed w/ the new flags
+ (dwEventMask & kEventFlagsAffectingEnterLeaveHooks))
+ ) != 0
+ ) &&
+ // And are any enter/leave hooks set?
+ (
+ (m_pEnter3 != NULL) ||
+ (m_pEnter3WithInfo != NULL) ||
+ (m_pEnter2 != NULL) ||
+ (m_pEnter != NULL) ||
+ (m_pLeave3 != NULL) ||
+ (m_pLeave3WithInfo != NULL) ||
+ (m_pLeave2 != NULL) ||
+ (m_pLeave != NULL) ||
+ (m_pTailcall3 != NULL) ||
+ (m_pTailcall3WithInfo != NULL) ||
+ (m_pTailcall2 != NULL) ||
+ (m_pTailcall != NULL)
+ );
+
+ BOOL fNeedToTurnOffConcurrentGC = FALSE;
+
+ if (((dwEventMask & COR_PRF_MONITOR_GC) != 0) &&
+ ((g_profControlBlock.dwEventMask & COR_PRF_MONITOR_GC) == 0))
+ {
+ // We don't need to worry about startup load as we'll turn off concurrent GC later
+ if (g_profControlBlock.curProfStatus.Get() != kProfStatusInitializingForStartupLoad)
+ {
+ // Since we're not an initializing startup profiler, the EE must be fully started up
+ // so we can check whether concurrent GC is on
+ if (!g_fEEStarted)
+ {
+ return CORPROF_E_RUNTIME_UNINITIALIZED;
+ }
+
+ // We don't want to change the flag before GC is fully initialized,
+ // otherwise the concurrent GC setting would be overwritten
+ // Make sure GC is fully initialized before proceed
+ if (!IsGarbageCollectorFullyInitialized())
+ {
+ return CORPROF_E_NOT_YET_AVAILABLE;
+ }
+
+ // If we are attaching and we are turning on COR_PRF_MONITOR_GC, turn off concurrent GC later
+ // in this function
+ if (g_profControlBlock.curProfStatus.Get() == kProfStatusInitializingForAttachLoad)
+ {
+ if (GCHeap::GetGCHeap()->IsConcurrentGCEnabled())
+ {
+ // We only allow turning off concurrent GC in the profiler attach thread inside
+ // InitializeForAttach, otherwise we would be vulnerable to weird races such as
+ // SetEventMask running on a separate thread and trying to turn off concurrent GC.
+ // The best option here is to fail with CORPROF_E_CONCURRENT_GC_NOT_PROFILABLE.
+ // Existing Dev10 profilers should be prepared to handle such case.
+ if (IsProfilerAttachThread())
+ {
+ fNeedToTurnOffConcurrentGC = TRUE;
+ }
+ else
+ {
+ return CORPROF_E_CONCURRENT_GC_NOT_PROFILABLE;
+ }
+ }
+ }
+ else
+ {
+ // Fail if concurrent GC is enabled
+ // This should only happen for attach profilers if user didn't turn on COR_PRF_MONITOR_GC
+ // at attach time
+ if (GCHeap::GetGCHeap()->IsConcurrentGCEnabled())
+ {
+ return CORPROF_E_CONCURRENT_GC_NOT_PROFILABLE;
+ }
+ }
+ }
+ }
+
+ // Flags defined in COR_PRF_REQUIRE_PROFILE_IMAGE will force to JIT mscorlib if the
+ // user does not ngen mscorlib with /profiler. Similarly, the
+ // COR_PRF_DISABLE_ALL_NGEN_IMAGES flag always forces us to JIT mscorlib. Using the
+ // jitted version of mscorlib with HPA(Host Protection Attributes) enabled will cause
+ // stack overflow inside JIT. See Dev 10 Bug 637987 for the detail.
+ if (((dwEventMask & (COR_PRF_REQUIRE_PROFILE_IMAGE | COR_PRF_DISABLE_ALL_NGEN_IMAGES)) != 0) &&
+ (GetHostProtectionManager() != NULL) &&
+ (GetHostProtectionManager()->GetProtectedCategories() != eNoChecks))
+ {
+ return CORPROF_E_INCONSISTENT_FLAGS_WITH_HOST_PROTECTION_SETTING;
+ }
+
+ // High event bits
+
+ if (((dwEventMaskHigh & COR_PRF_HIGH_ADD_ASSEMBLY_REFERENCES) != 0) &&
+ !IsCallback6Supported())
+ {
+ return CORPROF_E_CALLBACK6_REQUIRED;
+ }
+
+ // Now save the modified masks
+ g_profControlBlock.dwEventMask = dwEventMask;
+ g_profControlBlock.dwEventMaskHigh = dwEventMaskHigh;
+
+ if (fEnterLeaveHooksAffected)
+ {
+ hr = DetermineAndSetEnterLeaveFunctionHooksForJit();
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+ }
+
+ if (g_profControlBlock.curProfStatus.Get() == kProfStatusInitializingForStartupLoad)
+ {
+ // If the profiler has requested remoting cookies so that it can
+ // track logical call stacks, then we must initialize the cookie
+ // template.
+ if ((g_profControlBlock.dwEventMask & COR_PRF_MONITOR_REMOTING_COOKIE)
+ == COR_PRF_MONITOR_REMOTING_COOKIE)
+ {
+ hr = InitGUID();
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+ }
+ }
+
+ // Turn off concurrent GC as the last step so that we don't need to turn it back on if something
+ // else failed after that
+ if (fNeedToTurnOffConcurrentGC)
+ {
+ // Turn off concurrent GC if it is on so that user can walk the heap safely in GC callbacks
+ GCHeap * pGCHeap = GCHeap::GetGCHeap();
+
+ LOG((LF_CORPROF, LL_INFO10, "**PROF: Turning off concurrent GC at attach.\n"));
+
+ // First turn off concurrent GC
+ pGCHeap->TemporaryDisableConcurrentGC();
+
+ //
+ // Then wait until concurrent GC to finish if concurrent GC is in progress
+ // User can use a timeout that can be set by environment variable if the GC turns out
+ // to be too long. The default value is INFINITE.
+ //
+ // NOTE:
+ // If we don't do it in this order there might be a new concurrent GC started
+ // before we actually turn off concurrent GC
+ //
+ hr = pGCHeap->WaitUntilConcurrentGCCompleteAsync(m_dwConcurrentGCWaitTimeoutInMs);
+ if (FAILED(hr))
+ {
+ if (hr == HRESULT_FROM_WIN32(ERROR_TIMEOUT))
+ {
+ // Convert it to a more specific HRESULT
+ hr = CORPROF_E_TIMEOUT_WAITING_FOR_CONCURRENT_GC;
+
+ // Since we cannot call LogProfEvent here due to contact violations, we'll need to
+ // remember the fact that we've failed, and report the failure later after InitializeForAttach
+ m_bHasTimedOutWaitingForConcurrentGC = TRUE;
+ }
+
+ pGCHeap->TemporaryEnableConcurrentGC();
+ return hr;
+ }
+
+ // Remember that we've turned off concurrent GC and we'll turn it back on in TerminateProfiling
+ g_profControlBlock.fConcurrentGCDisabledForAttach = TRUE;
+
+ LOG((LF_CORPROF, LL_INFO10, "**PROF: Concurrent GC has been turned off at attach.\n"));
+ }
+
+ // Return success
+ return S_OK;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// The Info method SetEnterLeaveFunctionHooks() simply defers to this function to do the
+// real work.
+//
+// Arguments:
+// (same as specified in the public API docs)
+//
+// Return Value:
+// HRESULT indicating success / failure to return straight through to the profiler
+//
+
+HRESULT EEToProfInterfaceImpl::SetEnterLeaveFunctionHooks(FunctionEnter * pFuncEnter,
+ FunctionLeave * pFuncLeave,
+ FunctionTailcall * pFuncTailcall)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ EE_THREAD_NOT_REQUIRED;
+ CANNOT_TAKE_LOCK;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ // You have to be setting at least one hook
+ if ((pFuncEnter == NULL) && (pFuncLeave == NULL) && (pFuncTailcall == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ // ELT3 hooks beat Whidbey and Whidbey hooks beat Everett hooks. So if any ELT3 or
+ // Whidbey hooks were set (SetEnterLeaveFunctionHooks3(WithInfo) or SetEnterLeaveFunctionHooks2),
+ // this should be a noop
+ if ((m_pEnter3 != NULL) ||
+ (m_pEnter3WithInfo != NULL) ||
+ (m_pLeave3 != NULL) ||
+ (m_pLeave3WithInfo != NULL) ||
+ (m_pTailcall3 != NULL) ||
+ (m_pTailcall3WithInfo != NULL) ||
+ (m_pEnter2 != NULL) ||
+ (m_pLeave2 != NULL) ||
+ (m_pTailcall2 != NULL))
+ {
+ return S_OK;
+ }
+
+ // Always save onto the function pointers, since we won't know if the profiler
+ // is going to tracking enter/leave until after it returns from Initialize
+ m_pEnter = pFuncEnter;
+ m_pLeave = pFuncLeave;
+ m_pTailcall = pFuncTailcall;
+
+ return DetermineAndSetEnterLeaveFunctionHooksForJit();
+}
+
+//---------------------------------------------------------------------------------------
+//
+// The Info method SetEnterLeaveFunctionHooks2() simply defers to this function to do the
+// real work.
+//
+// Arguments:
+// (same as specified in the public API docs)
+//
+// Return Value:
+// HRESULT indicating success / failure to return straight through to the profiler
+//
+
+HRESULT EEToProfInterfaceImpl::SetEnterLeaveFunctionHooks2(FunctionEnter2 * pFuncEnter,
+ FunctionLeave2 * pFuncLeave,
+ FunctionTailcall2 * pFuncTailcall)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ EE_THREAD_NOT_REQUIRED;
+ CANNOT_TAKE_LOCK;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ // You have to be setting at least one hook
+ if ((pFuncEnter == NULL) && (pFuncLeave == NULL) && (pFuncTailcall == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ // ELT3 hooks beat Whidbey. So if any ELT3 hooks were set (SetEnterLeaveFunctionHooks3(WithInfo)),
+ // this should be a noop
+ if ((m_pEnter3 != NULL) ||
+ (m_pEnter3WithInfo != NULL) ||
+ (m_pLeave3 != NULL) ||
+ (m_pLeave3WithInfo != NULL) ||
+ (m_pTailcall3 != NULL) ||
+ (m_pTailcall3WithInfo != NULL))
+ {
+ return S_OK;
+ }
+
+ // Always save onto the function pointers, since we won't know if the profiler
+ // is going to track enter/leave until after it returns from Initialize
+ m_pEnter2 = pFuncEnter;
+ m_pLeave2 = pFuncLeave;
+ m_pTailcall2 = pFuncTailcall;
+
+ // Whidbey hooks override Everett hooks
+ m_pEnter = NULL;
+ m_pLeave = NULL;
+ m_pTailcall = NULL;
+
+ return DetermineAndSetEnterLeaveFunctionHooksForJit();
+}
+
+//---------------------------------------------------------------------------------------
+//
+// The Info method SetEnterLeaveFunctionHooks3() simply defers to this function to do the
+// real work.
+//
+// Arguments:
+// (same as specified in the public API docs)
+//
+// Return Value:
+// HRESULT indicating success / failure to return straight through to the profiler
+//
+
+HRESULT EEToProfInterfaceImpl::SetEnterLeaveFunctionHooks3(FunctionEnter3 * pFuncEnter3,
+ FunctionLeave3 * pFuncLeave3,
+ FunctionTailcall3 * pFuncTailcall3)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ EE_THREAD_NOT_REQUIRED;
+ CANNOT_TAKE_LOCK;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ // You have to be setting at least one hook
+ if ((pFuncEnter3 == NULL) &&
+ (pFuncLeave3 == NULL) &&
+ (pFuncTailcall3 == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ if (CORProfilerELT3SlowPathEnabled())
+ {
+ return CORPROF_E_INCONSISTENT_WITH_FLAGS;
+ }
+
+ // Always save onto the function pointers, since we won't know if the profiler
+ // is going to track enter/leave until after it returns from Initialize
+ m_pEnter3 = pFuncEnter3;
+ m_pLeave3 = pFuncLeave3;
+ m_pTailcall3 = pFuncTailcall3;
+ m_pEnter3WithInfo = NULL;
+ m_pLeave3WithInfo = NULL;
+ m_pTailcall3WithInfo = NULL;
+
+ // ELT3 hooks override Whidbey hooks and Everett hooks.
+ m_pEnter2 = NULL;
+ m_pLeave2 = NULL;
+ m_pTailcall2 = NULL;
+ m_pEnter = NULL;
+ m_pLeave = NULL;
+ m_pTailcall = NULL;
+
+ return DetermineAndSetEnterLeaveFunctionHooksForJit();
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// The Info method SetEnterLeaveFunctionHooks3() simply defers to this function to do the
+// real work.
+//
+// Arguments:
+// (same as specified in the public API docs)
+//
+// Return Value:
+// HRESULT indicating success / failure to return straight through to the profiler
+//
+
+HRESULT EEToProfInterfaceImpl::SetEnterLeaveFunctionHooks3WithInfo(FunctionEnter3WithInfo * pFuncEnter3WithInfo,
+ FunctionLeave3WithInfo * pFuncLeave3WithInfo,
+ FunctionTailcall3WithInfo * pFuncTailcall3WithInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ EE_THREAD_NOT_REQUIRED;
+ CANNOT_TAKE_LOCK;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ // You have to be setting at least one hook
+ if ((pFuncEnter3WithInfo == NULL) &&
+ (pFuncLeave3WithInfo == NULL) &&
+ (pFuncTailcall3WithInfo == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ if (!CORProfilerELT3SlowPathEnabled())
+ {
+ return CORPROF_E_INCONSISTENT_WITH_FLAGS;
+ }
+
+ // Always save onto the function pointers, since we won't know if the profiler
+ // is going to track enter/leave until after it returns from Initialize
+ m_pEnter3WithInfo = pFuncEnter3WithInfo;
+ m_pLeave3WithInfo = pFuncLeave3WithInfo;
+ m_pTailcall3WithInfo = pFuncTailcall3WithInfo;
+ m_pEnter3 = NULL;
+ m_pLeave3 = NULL;
+ m_pTailcall3 = NULL;
+
+ // ELT3 hooks override Whidbey hooks and Everett hooks.
+ m_pEnter2 = NULL;
+ m_pLeave2 = NULL;
+ m_pTailcall2 = NULL;
+ m_pEnter = NULL;
+ m_pLeave = NULL;
+ m_pTailcall = NULL;
+
+ return DetermineAndSetEnterLeaveFunctionHooksForJit();
+}
+
+
+
+//---------------------------------------------------------------------------------------
+//
+// ************************
+// Public callback wrappers
+// ************************
+//
+// NOTE: All public callback wrappers must follow the rules stated at the top
+// of this file!
+
+// See corprof.idl / MSDN for detailed comments about each of these public
+// functions, their parameters, return values, etc.
+
+
+
+//---------------------------------------------------------------------------------------
+// INITIALIZE CALLBACKS
+//
+
+HRESULT EEToProfInterfaceImpl::Initialize()
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT_EX(kEE2PAllowableWhileInitializing,
+ (LF_CORPROF,
+ LL_INFO10,
+ "**PROF: Calling profiler's Initialize() method.\n"));
+
+ _ASSERTE(m_pProfToEE != NULL);
+
+ // Startup initialization occurs before an EEThread object is created for this
+ // thread.
+ _ASSERTE(GetThreadNULLOk() == NULL);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->Initialize(m_pProfToEE);
+ }
+}
+
+
+HRESULT EEToProfInterfaceImpl::InitializeForAttach(void * pvClientData, UINT cbClientData)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT_EX(kEE2PAllowableWhileInitializing,
+ (LF_CORPROF,
+ LL_INFO10,
+ "**PROF: Calling profiler's InitializeForAttach() method.\n"));
+
+ _ASSERTE(m_pProfToEE != NULL);
+
+ // Attach initialization occurs on the AttachThread, which does not have an EEThread
+ // object
+ _ASSERTE(GetThreadNULLOk() == NULL);
+
+ // Should only be called on profilers that support ICorProfilerCallback3
+ _ASSERTE(m_pCallback3 != NULL);
+
+ HRESULT hr = E_UNEXPECTED;
+
+ // This wraps the profiler's InitializeForAttach callback in a try / catch. Although
+ // most profiler calls are not protected, this initial callback IS, since it's cheap
+ // to do so (this is only called once per attach of a profiler), and it would be nice to
+ // avoid tearing down the entire process when attaching a profiler that may pass back
+ // bogus vtables.
+ EX_TRY
+ {
+ hr = m_pCallback3->InitializeForAttach(m_pProfToEE, pvClientData, cbClientData);
+ }
+ EX_CATCH
+ {
+ hr = E_UNEXPECTED;
+ }
+ // Intentionally swallowing all exceptions, as we don't want a poorly-written
+ // profiler that throws or AVs on attach to cause the entire process to go away.
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return hr;
+}
+
+HRESULT EEToProfInterfaceImpl::ProfilerAttachComplete()
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO10,
+ "**PROF: Calling profiler's ProfilerAttachComplete() method.\n"));
+
+ // Attach initialization occurs on the AttachThread, which does not have an EEThread
+ // object
+ _ASSERTE(GetThreadNULLOk() == NULL);
+
+ // Should only be called on profilers that support ICorProfilerCallback3
+ _ASSERTE(m_pCallback3 != NULL);
+
+ HRESULT hr = E_UNEXPECTED;
+
+ // This wraps the profiler's ProfilerAttachComplete callback in a try / catch.
+ // Although most profiler calls are not protected, this early callback IS, since it's
+ // cheap to do so (this is only called once per attach of a profiler), and it would be
+ // nice to avoid tearing down the entire process when attaching a profiler that has
+ // serious troubles initializing itself (e.g., in this case, with processing catch-up
+ // information).
+ EX_TRY
+ {
+ hr = m_pCallback3->ProfilerAttachComplete();
+ }
+ EX_CATCH
+ {
+ hr = E_UNEXPECTED;
+ }
+ // Intentionally swallowing all exceptions, as we don't want a poorly-written
+ // profiler that throws or AVs on attach to cause the entire process to go away.
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return hr;
+}
+
+
+//---------------------------------------------------------------------------------------
+// THREAD EVENTS
+//
+
+
+HRESULT EEToProfInterfaceImpl::ThreadCreated(ThreadID threadId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Preemptive mode is particularly important here. See comment in
+ // EEToProfInterfaceImpl::ThreadDestroyed for more information.
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ // Normally these callback wrappers ask IsGCSpecial() and return without calling the
+ // profiler if true. However, ThreadCreated() is the special case where no caller
+ // should even get this far for GC Special threads, since our callers need to know to
+ // avoid the GCX_PREEMP around the call to this function in the first place. See
+ // code:Thread::m_fGCSpecial
+ _ASSERTE(!reinterpret_cast<Thread *>(threadId)->IsGCSpecial());
+
+ CLR_TO_PROFILER_ENTRYPOINT_FOR_THREAD(threadId,
+ (LF_CORPROF,
+ LL_INFO100,
+ "**PROF: Notifying profiler of created thread. ThreadId: 0x%p.\n",
+ threadId));
+
+ // Notify the profiler of the newly created thread.
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ThreadCreated(threadId);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::ThreadDestroyed(ThreadID threadId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // See comment below
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Thread store lock is typically held during this callback
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ if (reinterpret_cast<Thread *>(threadId)->IsGCSpecial())
+ return S_OK;
+
+ // In general, we like callbacks to switch to preemptive before calling into the
+ // profiler. And this is particularly important to do in the ThreadCreated &
+ // ThreadDestroyed callbacks.
+ //
+ // The profiler will typically block in the ThreadDestroyed callback, because
+ // it must coordinate the use of this threadid amongst all profiler
+ // threads. For instance, if a separate thread A is walking "this" (via DoStackSnapshot),
+ // then the profiler must block in ThreadDestroyed until A is finished. Otherwise,
+ // "this" will complete its destruction before A's walk is complete.
+ //
+ // Since the profiler will block indefinitely in ThreadDestroyed, we need
+ // to switch to preemptive mode. Otherwise, if another thread B needs to suspend
+ // the runtime (due to appdomain unload, GC, etc.), thread B will block
+ // waiting for "this" (assuming we allow "this" to remain in cooperative mode),
+ // while the profiler forces "this" to block on thread A from
+ // the example above. And thread A may need to block on thread B, since
+ // the stackwalking occasionally needs to switch to cooperative to access a
+ // hash map (thus DoStackSnapshot forces the switch to cooperative up-front, before
+ // the target thread to be walked gets suspended (yet another deadlock possibility)),
+ // and switching to cooperative requires a wait until an in-progress GC or
+ // EE suspension is complete. In other words, allowing "this" to remain
+ // in cooperative mode could lead to a 3-way deadlock:
+ // "this" waits on A
+ // A waits on B
+ // B waits on "this".
+ CLR_TO_PROFILER_ENTRYPOINT_FOR_THREAD(threadId,
+ (LF_CORPROF,
+ LL_INFO100,
+ "**PROF: Notifying profiler of destroyed thread. ThreadId: 0x%p.\n",
+ threadId));
+
+ // From now on, issue no more callbacks for this thread
+ SetProfilerCallbacksAllowedForThread((Thread *) threadId, FALSE);
+
+ // Notify the profiler of the destroyed thread
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ThreadDestroyed(threadId);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::ThreadAssignedToOSThread(ThreadID managedThreadId,
+ DWORD osThreadId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Called by notrigger Thread::DetachThread & CorHost::SwitchOutLogicalThreadState
+ // which do look to be dangerous times to be triggering a GC
+ GC_NOTRIGGER;
+
+ // This is called in notrigger zones (see above), so it's not safe to switch to preemptive
+ MODE_ANY;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ if (reinterpret_cast<Thread *>(managedThreadId)->IsGCSpecial())
+ return S_OK;
+
+ CLR_TO_PROFILER_ENTRYPOINT_FOR_THREAD_EX(
+ kEE2PNoTrigger,
+ managedThreadId,
+ (LF_CORPROF,
+ LL_INFO100,
+ "**PROF: Notifying profiler of thread assignment. ThreadId: 0x%p, OSThreadId: 0x%08x\n",
+ managedThreadId,
+ osThreadId));
+
+ // Notify the profiler of the thread being assigned to the OS thread
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ThreadAssignedToOSThread(managedThreadId, osThreadId);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::ThreadNameChanged(ThreadID managedThreadId,
+ ULONG cchName,
+ __in_ecount_opt(cchName) WCHAR name[])
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ if (reinterpret_cast<Thread *>(managedThreadId)->IsGCSpecial())
+ return S_OK;
+
+ CLR_TO_PROFILER_ENTRYPOINT_FOR_THREAD(managedThreadId,
+ (LF_CORPROF,
+ LL_INFO100,
+ "**PROF: Notifying profiler of thread name change.\n"));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ThreadNameChanged(managedThreadId, cchName, name);
+ }
+}
+
+//---------------------------------------------------------------------------------------
+// EE STARTUP/SHUTDOWN EVENTS
+//
+
+HRESULT EEToProfInterfaceImpl::Shutdown()
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO10,
+ "**PROF: Notifying profiler that shutdown is beginning.\n"));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->Shutdown();
+ }
+}
+
+//---------------------------------------------------------------------------------------
+// JIT/FUNCTION EVENTS
+//
+
+HRESULT EEToProfInterfaceImpl::FunctionUnloadStarted(FunctionID functionId)
+{
+ _ASSERTE(!"FunctionUnloadStarted() callback no longer issued");
+ return S_OK;
+}
+
+HRESULT EEToProfInterfaceImpl::JITCompilationFinished(FunctionID functionId,
+ HRESULT hrStatus,
+ BOOL fIsSafeToBlock)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // The JIT / MethodDesc code likely hold locks while this callback is made
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: JITCompilationFinished 0x%p, hr=0x%08x.\n",
+ functionId,
+ hrStatus));
+
+ _ASSERTE(functionId);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->JITCompilationFinished(functionId, hrStatus, fIsSafeToBlock);
+ }
+}
+
+
+HRESULT EEToProfInterfaceImpl::JITCompilationStarted(FunctionID functionId,
+ BOOL fIsSafeToBlock)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // The JIT / MethodDesc code likely hold locks while this callback is made
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: JITCompilationStarted 0x%p.\n",
+ functionId));
+
+ // Currently JITCompilationStarted is always called with fIsSafeToBlock==TRUE. If this ever changes,
+ // it's safe to remove this assert, but this should serve as a trigger to change our
+ // public documentation to state that this callback is no longer called in preemptive mode all the time.
+ _ASSERTE(fIsSafeToBlock);
+
+ _ASSERTE(functionId);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->JITCompilationStarted(functionId, fIsSafeToBlock);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::JITCachedFunctionSearchStarted(
+ /* [in] */ FunctionID functionId,
+ /* [out] */ BOOL *pbUseCachedFunction)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // The JIT / MethodDesc code likely hold locks while this callback is made
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: JITCachedFunctionSearchStarted 0x%p.\n",
+ functionId));
+ _ASSERTE(functionId);
+ _ASSERTE(pbUseCachedFunction != NULL);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->JITCachedFunctionSearchStarted(functionId, pbUseCachedFunction);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::JITCachedFunctionSearchFinished(
+ /* [in] */ FunctionID functionId,
+ /* [in] */ COR_PRF_JIT_CACHE result)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // The JIT / MethodDesc code likely hold locks while this callback is made
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: JITCachedFunctionSearchFinished 0x%p, %s.\n",
+ functionId,
+ (result == COR_PRF_CACHED_FUNCTION_FOUND ?
+ "Cached function found" :
+ "Cached function not found")));
+
+ _ASSERTE(functionId);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->JITCachedFunctionSearchFinished(functionId, result);
+ }
+}
+
+
+HRESULT EEToProfInterfaceImpl::JITFunctionPitched(FunctionID functionId)
+{
+ _ASSERTE(!"JITFunctionPitched() callback no longer issued");
+ return S_OK;
+}
+
+HRESULT EEToProfInterfaceImpl::JITInlining(
+ /* [in] */ FunctionID callerId,
+ /* [in] */ FunctionID calleeId,
+ /* [out] */ BOOL * pfShouldInline)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // The JIT / MethodDesc code likely hold locks while this callback is made
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: JITInlining caller: 0x%p, callee: 0x%p.\n",
+ callerId,
+ calleeId));
+
+ _ASSERTE(callerId);
+ _ASSERTE(calleeId);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->JITInlining(callerId, calleeId, pfShouldInline);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::ReJITCompilationStarted(
+ /* [in] */ FunctionID functionId,
+ /* [in] */ ReJITID reJitId,
+ /* [in] */ BOOL fIsSafeToBlock)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // The JIT / MethodDesc code likely hold locks while this callback is made
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: ReJITCompilationStarted 0x%p 0x%p.\n",
+ functionId, reJitId));
+
+ // Should only be called on profilers that support ICorProfilerCallback4
+ _ASSERTE(m_pCallback4 != NULL);
+
+ // Currently ReJITCompilationStarted is always called with fIsSafeToBlock==TRUE. If this ever changes,
+ // it's safe to remove this assert, but this should serve as a trigger to change our
+ // public documentation to state that this callback is no longer called in preemptive mode all the time.
+ _ASSERTE(fIsSafeToBlock);
+
+ _ASSERTE(functionId);
+ _ASSERTE(reJitId);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback4->ReJITCompilationStarted(functionId, reJitId, fIsSafeToBlock);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::GetReJITParameters(
+ /* [in] */ ModuleID moduleId,
+ /* [in] */ mdMethodDef methodId,
+ /* [in] */ ICorProfilerFunctionControl *
+ pFunctionControl)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // The ReJIT code holds a lock while this callback is made
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetReJITParameters 0x%p 0x%p.\n",
+ moduleId, methodId));
+
+ // Should only be called on profilers that support ICorProfilerCallback4
+ _ASSERTE(m_pCallback4 != NULL);
+
+ _ASSERTE(moduleId);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback4->GetReJITParameters(moduleId, methodId, pFunctionControl);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::ReJITCompilationFinished(
+ /* [in] */ FunctionID functionId,
+ /* [in] */ ReJITID reJitId,
+ /* [in] */ HRESULT hrStatus,
+ /* [in] */ BOOL fIsSafeToBlock)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // ReJit holds a lock as well as possibly others...
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: ReJITCompilationFinished 0x%p 0x%p hr=0x%x.\n",
+ functionId, reJitId, hrStatus));
+
+ // Should only be called on profilers that support ICorProfilerCallback4
+ _ASSERTE(m_pCallback4 != NULL);
+
+ _ASSERTE(functionId);
+ _ASSERTE(reJitId);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback4->ReJITCompilationFinished(functionId, reJitId, hrStatus, fIsSafeToBlock);
+ }
+}
+
+
+HRESULT EEToProfInterfaceImpl::ReJITError(
+ /* [in] */ ModuleID moduleId,
+ /* [in] */ mdMethodDef methodId,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ HRESULT hrStatus)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: ReJITError 0x%p 0x%x 0x%p 0x%x.\n",
+ moduleId, methodId, functionId, hrStatus));
+
+ // Should only be called on profilers that support ICorProfilerCallback4
+ _ASSERTE(m_pCallback4 != NULL);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback4->ReJITError(moduleId, methodId, functionId, hrStatus);
+ }
+}
+
+//---------------------------------------------------------------------------------------
+// MODULE EVENTS
+//
+
+HRESULT EEToProfInterfaceImpl::ModuleLoadStarted(ModuleID moduleId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // This has historically not run in preemptive, and is called from cooperative-mode
+ // functions. However, since we're triggers, it might actually be safe to consider
+ // letting this run in preemptive mode.
+ MODE_COOPERATIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO10,
+ "**PROF: ModuleLoadStarted 0x%p.\n",
+ moduleId));
+
+ _ASSERTE(moduleId != 0);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ModuleLoadStarted(moduleId);
+ }
+}
+
+
+HRESULT EEToProfInterfaceImpl::ModuleLoadFinished(
+ ModuleID moduleId,
+ HRESULT hrStatus)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO10,
+ "**PROF: ModuleLoadFinished 0x%p.\n",
+ moduleId));
+
+ _ASSERTE(moduleId != 0);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ModuleLoadFinished(moduleId, hrStatus);
+ }
+}
+
+
+
+HRESULT EEToProfInterfaceImpl::ModuleUnloadStarted(
+ ModuleID moduleId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO10,
+ "**PROF: ModuleUnloadStarted 0x%p.\n",
+ moduleId));
+
+ _ASSERTE(moduleId != 0);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ModuleUnloadStarted(moduleId);
+ }
+}
+
+
+HRESULT EEToProfInterfaceImpl::ModuleUnloadFinished(
+ ModuleID moduleId,
+ HRESULT hrStatus)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO10,
+ "**PROF: ModuleUnloadFinished 0x%p.\n",
+ moduleId));
+ _ASSERTE(moduleId != 0);
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ModuleUnloadFinished(moduleId, hrStatus);
+ }
+}
+
+
+HRESULT EEToProfInterfaceImpl::ModuleAttachedToAssembly(
+ ModuleID moduleId,
+ AssemblyID AssemblyId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO10,
+ "**PROF: ModuleAttachedToAssembly 0x%p, 0x%p.\n",
+ moduleId,
+ AssemblyId));
+
+ _ASSERTE(moduleId != 0);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ModuleAttachedToAssembly(moduleId, AssemblyId);
+ }
+}
+
+//---------------------------------------------------------------------------------------
+// CLASS EVENTS
+//
+
+HRESULT EEToProfInterfaceImpl::ClassLoadStarted(
+ ClassID classId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // UnresolvedClassLock typically held during this callback
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO100,
+ "**PROF: ClassLoadStarted 0x%p.\n",
+ classId));
+
+ _ASSERTE(classId);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ClassLoadStarted(classId);
+ }
+}
+
+
+HRESULT EEToProfInterfaceImpl::ClassLoadFinished(
+ ClassID classId,
+ HRESULT hrStatus)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // UnresolvedClassLock typically held during this callback
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO100,
+ "**PROF: ClassLoadFinished 0x%p, 0x%08x.\n",
+ classId,
+ hrStatus));
+
+ _ASSERTE(classId);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ClassLoadFinished(classId, hrStatus);
+ }
+}
+
+
+HRESULT EEToProfInterfaceImpl::ClassUnloadStarted(
+ ClassID classId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Although not typical, it's possible for UnresolvedClassLock to be held
+ // during this callback. This can occur if, during the class load, an
+ // exception is thrown, and EEClass::Destruct is called from the catch clause
+ // inside ClassLoader::CreateTypeHandleForTypeDefThrowing.
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO100,
+ "**PROF: ClassUnloadStarted 0x%p.\n",
+ classId));
+
+ _ASSERTE(classId);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ClassUnloadStarted(classId);
+ }
+}
+
+
+HRESULT EEToProfInterfaceImpl::ClassUnloadFinished(
+ ClassID classId,
+ HRESULT hrStatus)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Locks can be held when this is called. See comment in ClassUnloadStarted
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO100,
+ "**PROF: ClassUnloadFinished 0x%p, 0x%08x.\n",
+ classId,
+ hrStatus));
+
+ _ASSERTE(classId);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ClassUnloadFinished(classId, hrStatus);
+ }
+}
+
+//---------------------------------------------------------------------------------------
+// APPDOMAIN EVENTS
+//
+
+HRESULT EEToProfInterfaceImpl::AppDomainCreationStarted(
+ AppDomainID appDomainId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO10,
+ "**PROF: AppDomainCreationStarted 0x%p.\n",
+ appDomainId));
+
+ _ASSERTE(appDomainId != 0);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->AppDomainCreationStarted(appDomainId);
+ }
+}
+
+
+HRESULT EEToProfInterfaceImpl::AppDomainCreationFinished(
+ AppDomainID appDomainId,
+ HRESULT hrStatus)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO10,
+ "**PROF: AppDomainCreationFinished 0x%p, 0x%08x.\n",
+ appDomainId,
+ hrStatus));
+
+ _ASSERTE(appDomainId != 0);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->AppDomainCreationFinished(appDomainId, hrStatus);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::AppDomainShutdownStarted(
+ AppDomainID appDomainId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO10,
+ "**PROF: AppDomainShutdownStarted 0x%p.\n",
+ appDomainId));
+
+ _ASSERTE(appDomainId != 0);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->AppDomainShutdownStarted(appDomainId);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::AppDomainShutdownFinished(
+ AppDomainID appDomainId,
+ HRESULT hrStatus)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO10,
+ "**PROF: AppDomainShutdownFinished 0x%p, 0x%08x.\n",
+ appDomainId,
+ hrStatus));
+
+ _ASSERTE(appDomainId != 0);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->AppDomainShutdownFinished(appDomainId, hrStatus);
+ }
+}
+
+//---------------------------------------------------------------------------------------
+// ASSEMBLY EVENTS
+//
+
+HRESULT EEToProfInterfaceImpl::AssemblyLoadStarted(
+ AssemblyID assemblyId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // This has historically not run in preemptive, and is called from cooperative-mode
+ // functions. However, since we're triggers, it might actually be safe to consider
+ // letting this run in preemptive mode.
+ MODE_COOPERATIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO10,
+ "**PROF: AssemblyLoadStarted 0x%p.\n",
+ assemblyId));
+
+ _ASSERTE(assemblyId != 0);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->AssemblyLoadStarted(assemblyId);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::AssemblyLoadFinished(
+ AssemblyID assemblyId,
+ HRESULT hrStatus)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // This has historically not run in preemptive, and is called from cooperative-mode
+ // functions. However, since we're triggers, it might actually be safe to consider
+ // letting this run in preemptive mode.
+ MODE_COOPERATIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO10,
+ "**PROF: AssemblyLoadFinished 0x%p, 0x%08x.\n",
+ assemblyId,
+ hrStatus));
+
+ _ASSERTE(assemblyId != 0);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->AssemblyLoadFinished(assemblyId, hrStatus);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::AssemblyUnloadStarted(
+ AssemblyID assemblyId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO10,
+ "**PROF: AssemblyUnloadStarted 0x%p.\n",
+ assemblyId));
+
+ _ASSERTE(assemblyId != 0);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->AssemblyUnloadStarted(assemblyId);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::AssemblyUnloadFinished(
+ AssemblyID assemblyId,
+ HRESULT hrStatus)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO10,
+ "**PROF: AssemblyUnloadFinished 0x%p, 0x%08x.\n",
+ assemblyId,
+ hrStatus));
+
+ _ASSERTE(assemblyId != 0);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->AssemblyUnloadFinished(assemblyId, hrStatus);
+ }
+}
+
+//---------------------------------------------------------------------------------------
+// TRANSITION EVENTS
+//
+
+HRESULT EEToProfInterfaceImpl::UnmanagedToManagedTransition(
+ FunctionID functionId,
+ COR_PRF_TRANSITION_REASON reason)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO10000,
+ "**PROF: UnmanagedToManagedTransition 0x%p.\n",
+ functionId));
+
+ _ASSERTE(reason == COR_PRF_TRANSITION_CALL || reason == COR_PRF_TRANSITION_RETURN);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->UnmanagedToManagedTransition(functionId, reason);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::ManagedToUnmanagedTransition(
+ FunctionID functionId,
+ COR_PRF_TRANSITION_REASON reason)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(reason == COR_PRF_TRANSITION_CALL || reason == COR_PRF_TRANSITION_RETURN);
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO10000,
+ "**PROF: ManagedToUnmanagedTransition 0x%p.\n",
+ functionId));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ManagedToUnmanagedTransition(functionId, reason);
+ }
+}
+
+//---------------------------------------------------------------------------------------
+// EXCEPTION EVENTS
+//
+
+HRESULT EEToProfInterfaceImpl::ExceptionThrown(
+ ObjectID thrownObjectId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Preemptive mode would be bad, dude. There's an objectId in the param list!
+ MODE_COOPERATIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: ExceptionThrown. ObjectID: 0x%p. ThreadID: 0x%p\n",
+ thrownObjectId,
+ GetThread()));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ExceptionThrown(thrownObjectId);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::ExceptionSearchFunctionEnter(
+ FunctionID functionId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: ExceptionSearchFunctionEnter. ThreadID: 0x%p, functionId: 0x%p\n",
+ GetThread(),
+ functionId));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ExceptionSearchFunctionEnter(functionId);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::ExceptionSearchFunctionLeave()
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: ExceptionSearchFunctionLeave. ThreadID: 0x%p\n",
+ GetThread()));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ExceptionSearchFunctionLeave();
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::ExceptionSearchFilterEnter(FunctionID functionId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: ExceptionSearchFilterEnter. ThreadID: 0x%p, functionId: 0x%p\n",
+ GetThread(),
+ functionId));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ExceptionSearchFilterEnter(functionId);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::ExceptionSearchFilterLeave()
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: ExceptionFilterLeave. ThreadID: 0x%p\n",
+ GetThread()));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ExceptionSearchFilterLeave();
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::ExceptionSearchCatcherFound(FunctionID functionId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: ExceptionSearchCatcherFound. ThreadID: 0x%p\n",
+ GetThread()));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ExceptionSearchCatcherFound(functionId);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::ExceptionOSHandlerEnter(FunctionID functionId)
+{
+ _ASSERTE(!"ExceptionOSHandlerEnter() callback no longer issued");
+ return S_OK;
+}
+
+HRESULT EEToProfInterfaceImpl::ExceptionOSHandlerLeave(FunctionID functionId)
+{
+ _ASSERTE(!"ExceptionOSHandlerLeave() callback no longer issued");
+ return S_OK;
+}
+
+HRESULT EEToProfInterfaceImpl::ExceptionUnwindFunctionEnter(FunctionID functionId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Called by COMPlusUnwindCallback, which is notrigger
+ GC_NOTRIGGER;
+
+ // Cannot enable preemptive GC here, since the stack may not be in a GC-friendly state.
+ // Thus, the profiler cannot block on this call.
+ MODE_ANY;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT_EX(
+ kEE2PNoTrigger,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: ExceptionUnwindFunctionEnter. ThreadID: 0x%p, functionId: 0x%p\n",
+ GetThread(),
+ functionId));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ExceptionUnwindFunctionEnter(functionId);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::ExceptionUnwindFunctionLeave()
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Called by COMPlusUnwindCallback, which is notrigger
+ GC_NOTRIGGER;
+
+ // Cannot enable preemptive GC here, since the stack may not be in a GC-friendly state.
+ // Thus, the profiler cannot block on this call.
+ MODE_ANY;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT_EX(
+ kEE2PNoTrigger,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: ExceptionUnwindFunctionLeave. ThreadID: 0x%p\n",
+ GetThread()));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ExceptionUnwindFunctionLeave();
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::ExceptionUnwindFinallyEnter(FunctionID functionId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Called by COMPlusUnwindCallback, which is notrigger
+ GC_NOTRIGGER;
+
+ // Cannot enable preemptive GC here, since the stack may not be in a GC-friendly state.
+ // Thus, the profiler cannot block on this call.
+ MODE_COOPERATIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT_EX(
+ kEE2PNoTrigger,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: ExceptionUnwindFinallyEnter. ThreadID: 0x%p, functionId: 0x%p\n",
+ GetThread(),
+ functionId));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ExceptionUnwindFinallyEnter(functionId);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::ExceptionUnwindFinallyLeave()
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Called by COMPlusUnwindCallback, which is notrigger
+ GC_NOTRIGGER;
+
+ // Cannot enable preemptive GC here, since the stack may not be in a GC-friendly state.
+ // Thus, the profiler cannot block on this call.
+ MODE_COOPERATIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT_EX(
+ kEE2PNoTrigger,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: ExceptionUnwindFinallyLeave. ThreadID: 0x%p\n",
+ GetThread()));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ExceptionUnwindFinallyLeave();
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::ExceptionCatcherEnter(FunctionID functionId, ObjectID objectId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Called by COMPlusUnwindCallback, which is notrigger
+ GC_NOTRIGGER;
+
+ // Cannot enable preemptive GC here, since the stack may not be in a GC-friendly state.
+ // Thus, the profiler cannot block on this call.
+ MODE_COOPERATIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT_EX(
+ kEE2PNoTrigger,
+ (LF_CORPROF,
+ LL_INFO1000, "**PROF: ExceptionCatcherEnter. ThreadID: 0x%p, functionId: 0x%p\n",
+ GetThread(),
+ functionId));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ExceptionCatcherEnter(functionId, objectId);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::ExceptionCatcherLeave()
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Cannot enable preemptive GC here, since the stack may not be in a GC-friendly state.
+ // Thus, the profiler cannot block on this call.
+ MODE_COOPERATIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: ExceptionCatcherLeave. ThreadID: 0x%p\n",
+ GetThread()));
+
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ExceptionCatcherLeave();
+ }
+}
+
+
+//---------------------------------------------------------------------------------------
+// COM Callable Wrapper EVENTS
+//
+
+HRESULT EEToProfInterfaceImpl::COMClassicVTableCreated(
+ /* [in] */ ClassID classId,
+ /* [in] */ REFGUID implementedIID,
+ /* [in] */ void *pVTable,
+ /* [in] */ ULONG cSlots)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO100,
+ "**PROF: COMClassicWrapperCreated %#x %#08x... %#x %d.\n",
+ classId,
+ implementedIID.Data1,
+ pVTable,
+ cSlots));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->COMClassicVTableCreated(classId, implementedIID, pVTable, cSlots);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::COMClassicVTableDestroyed(
+ /* [in] */ ClassID classId,
+ /* [in] */ REFGUID implementedIID,
+ /* [in] */ void *pVTable)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ // NOTE: There is no problem with this code, and it is ready and willing
+ // to be called. However, this callback is intentionally not being
+ // issued currently. See comment in ComMethodTable::Cleanup() for more
+ // information.
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO100,
+ "**PROF: COMClassicWrapperDestroyed %#x %#08x... %#x.\n",
+ classId,
+ implementedIID.Data1,
+ pVTable));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->COMClassicVTableDestroyed(classId, implementedIID, pVTable);
+ }
+}
+
+
+//---------------------------------------------------------------------------------------
+// GC THREADING EVENTS
+//
+
+HRESULT EEToProfInterfaceImpl::RuntimeSuspendStarted(
+ COR_PRF_SUSPEND_REASON suspendReason)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Although the contract system doesn't yell if I mark this GC_TRIGGERS, it's safest
+ // not to allow a GC to occur while we're suspending / resuming the runtime, this is
+ // the thread trying to do a GC. So if the profiler tries to trigger another GC from
+ // this thread at this time, we might see potential recursion or deadlock.
+ GC_NOTRIGGER;
+
+ MODE_ANY;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Thread store lock is typically held during this callback
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT_EX(
+ kEE2PNoTrigger,
+ (LF_CORPROF,
+ LL_INFO100,
+ "**PROF: RuntimeSuspendStarted. ThreadID 0x%p.\n",
+ GetThread()));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->RuntimeSuspendStarted(suspendReason);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::RuntimeSuspendFinished()
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Although the contract system doesn't yell if I mark this GC_TRIGGERS, it's safest
+ // not to allow a GC to occur while we're suspending / resuming the runtime, this is
+ // the thread trying to do a GC. So if the profiler tries to trigger another GC from
+ // this thread at this time, we might see potential recursion or deadlock.
+ GC_NOTRIGGER;
+
+ MODE_ANY;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Thread store lock is typically held during this callback
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT_EX(
+ kEE2PNoTrigger,
+ (LF_CORPROF,
+ LL_INFO100,
+ "**PROF: RuntimeSuspendFinished. ThreadID 0x%p.\n",
+ GetThread()));
+
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->RuntimeSuspendFinished();
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::RuntimeSuspendAborted()
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Although the contract system doesn't yell if I mark this GC_TRIGGERS, it's safest
+ // not to allow a GC to occur while we're suspending / resuming the runtime, this is
+ // the thread trying to do a GC. So if the profiler tries to trigger another GC from
+ // this thread at this time, we might see potential recursion or deadlock.
+ GC_NOTRIGGER;
+
+ // NOTE: I have no empirical data for gc mode: none of the self-host BVTs call this
+ // So for now, assume this is callable in any mode.
+ // This has historically not caused a mode change to preemptive, and is called from
+ // cooperative-mode functions. Also, switching to preemptive while we're suspending
+ // the runtime just seems like a bad idea.
+ MODE_ANY;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Thread store lock is typically held during this callback
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT_EX(
+ kEE2PNoTrigger,
+ (LF_CORPROF,
+ LL_INFO100,
+ "**PROF: RuntimeSuspendAborted. ThreadID 0x%p.\n",
+ GetThread()));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->RuntimeSuspendAborted();
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::RuntimeResumeStarted()
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // This has historically not caused a mode change to preemptive, and is called from
+ // cooperative-mode functions. Also, switching to preemptive while we're resuming
+ // the runtime just seems like a bad idea.
+ MODE_ANY;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Thread store lock is typically held during this callback
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO100,
+ "**PROF: RuntimeResumeStarted. ThreadID 0x%p.\n",
+ GetThread()));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->RuntimeResumeStarted();
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::RuntimeResumeFinished()
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Thread store lock is typically held during this callback
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO100,
+ "**PROF: RuntimeResumeFinished. ThreadID 0x%p.\n",
+ GetThread()));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->RuntimeResumeFinished();
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::RuntimeThreadSuspended(ThreadID suspendedThreadId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Called by Thread::SuspendThread, which is notrigger.
+ GC_NOTRIGGER;
+
+ // Although I've verified we're called from both coop and preemp, we need to
+ // avoid switching to preemptive to satisfy our notrigger paths.
+ MODE_ANY;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Thread store lock is typically held during this callback
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ if (reinterpret_cast<Thread *>(suspendedThreadId)->IsGCSpecial())
+ return S_OK;
+
+ // NOTE: We cannot use the standard CLR_TO_PROFILER_ENTRYPOINT macro here because
+ // we might be called at a time when profiler callbacks have been disallowed for
+ // this thread. So we cannot simply ASSERT that callbacks are allowed (as this macro
+ // does). Instead, we must explicitly check for this condition and return gracefully
+ // if callbacks are disallowed. So the macro is unwrapped here manually
+
+ CHECK_PROFILER_STATUS(kEE2PNone);
+
+ LOG((LF_CORPROF, LL_INFO1000, "**PROF: RuntimeThreadSuspended. ThreadID 0x%p.\n",
+ suspendedThreadId));
+
+ // NOTE: We're notrigger, so we cannot switch to preemptive mode.
+
+ // We may have already indicated to the profiler that this thread has died, but
+ // the runtime may continue to suspend this thread during the process of destroying
+ // the thread, so we do not want to indicate to the profiler these suspensions.
+ if (!ProfilerCallbacksAllowedForThread((Thread *) suspendedThreadId))
+ {
+ return S_OK;
+ }
+
+ // Remaining essentials from our entrypoint macros with kEE2PNoTrigger flag
+ SetCallbackStateFlagsHolder csf(COR_PRF_CALLBACKSTATE_INCALLBACK);
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+ _ASSERTE(m_pCallback2 != NULL);
+
+ {
+ // SCOPE: ForbidSuspendThreadHolder
+
+ // The ForbidSuspendThreadHolder prevents deadlocks under the following scenario:
+ // 1) Thread A blocks waiting for the current GC to complete (this can happen if A is trying to
+ // switch to cooperative during a GC).
+ // 2) This causes us to send a RuntimeThreadSuspended callback to the profiler. (Although
+ // A isn't technically being "suspended", this blocking is considered suspension as far as the
+ // profapi is concerned.)
+ // 3) Profiler, in turn, may take one of its own private locks to synchronize this callback with
+ // the profiler's attempt to hijack thread A. Specifically, the profiler knows it's not allowed
+ // to hijack A if A is getting suspended by the runtime, because this suspension might be due to
+ // the GC trying to hijack A. And if the GC tries to hijack A at the same time as the profiler
+ // hijacking A and the profiler wins, then GC asserts because A is no longer at the IP that
+ // the GC thought (VsWhidbey 428477, 429741)
+ // 4) Meanwhile, thread B (GC thread) is suspending the runtime, and calls Thread::SuspendThread()
+ // on A. This is the bad thing we're trying to avoid, because when this happens, we call into
+ // the profiler AGAIN with RuntimeThreadSuspended for thread A, and the profiler again
+ // tries to grab the lock it acquired in step 3). Yes, at this point we now have two simultaneous
+ // calls into the profiler's RuntimeThreadSuspended() callback. One saying A is suspending A
+ // (3 above), and one saying B is suspending A (this step (4)). The problem is that A is now officially
+ // hard suspended, OS-style, so the lock acquired on 3) ain't never getting released until
+ // A is resumed. But A won't be resumed until B resumes it. And B won't resume A until
+ // the profiler returns from its RuntimeThreadSuspended callback. And the profiler
+ // can't return from its RuntimeThreadSuspended callback until it acquires this lock it tried to
+ // acquire in 4). And it can't acquire this lock until A is finally resumed so that the acquire
+ // from 3) is released. Have we gone in a circle yet?
+ // In order to avoid 4) we inc the ForbidSuspendThread count during 3) to prevent the hard suspension
+ // (4) from occurring until 3) is completely done. It's sufficient to determine we're in 3) by noting
+ // whether the callback is reporting that a thread is "suspending itself" (i.e., suspendedThreadId == threadId)
+
+ ForbidSuspendThreadHolder forbidSuspendThread((Thread *) suspendedThreadId == GetThread());
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->RuntimeThreadSuspended(suspendedThreadId);
+ }
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::RuntimeThreadResumed(ThreadID resumedThreadId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // This gets called in response to another profapi function:
+ // ICorProfilerInfo2::DoStackSnapshot! And that dude is called asynchronously and
+ // must therefore never cause a GC.
+ // Other reasons for notrigger: also called by notrigger dudes Thread::SysStartSuspendForDebug,
+ // CheckSuspended, Thread::IsRunningIn, Thread::IsExecutingWithinCer, Thread::IsExecutingWithinCer,
+ // UnwindFrames
+ GC_NOTRIGGER;
+
+ // Although we cannot trigger, verified empirically that this called coop & preemp
+ MODE_ANY;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Thread store lock is typically held during this callback
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ if (reinterpret_cast<Thread *>(resumedThreadId)->IsGCSpecial())
+ return S_OK;
+
+ // NOTE: We cannot use the standard CLR_TO_PROFILER_ENTRYPOINT macro here because
+ // we might be called at a time when profiler callbacks have been disallowed for
+ // this thread. So we cannot simply ASSERT that callbacks are allowed (as this macro
+ // does). Instead, we must explicitly check for this condition and return gracefully
+ // if callbacks are disallowed. So the macro is unwrapped here manually
+
+ CHECK_PROFILER_STATUS(kEE2PNone);
+
+ LOG((LF_CORPROF, LL_INFO1000, "**PROF: RuntimeThreadResumed. ThreadID 0x%p.\n", resumedThreadId));
+
+ // NOTE: We're notrigger, so we cannot switch to preemptive mode.
+
+ // We may have already indicated to the profiler that this thread has died, but
+ // the runtime may resume this thread during the process of destroying
+ // the thread, so we do not want to indicate to the profiler these resumes.
+ if (!ProfilerCallbacksAllowedForThread((Thread *) resumedThreadId))
+ {
+ return S_OK;
+ }
+
+ // Remaining essentials from our entrypoint macros with kEE2PNoTrigger flag
+ SetCallbackStateFlagsHolder csf(COR_PRF_CALLBACKSTATE_INCALLBACK);
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+ _ASSERTE(m_pCallback2 != NULL);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->RuntimeThreadResumed(resumedThreadId);
+ }
+}
+
+//---------------------------------------------------------------------------------------
+// REMOTING
+//
+
+HRESULT EEToProfInterfaceImpl::RemotingClientInvocationStarted()
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: RemotingClientInvocationStarted. ThreadID: 0x%p\n",
+ GetThread()));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->RemotingClientInvocationStarted();
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::RemotingClientSendingMessage(GUID *pCookie, BOOL fIsAsync)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: RemotingClientSendingMessage. ThreadID: 0x%p\n",
+ GetThread()));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->RemotingClientSendingMessage(pCookie, fIsAsync);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::RemotingClientReceivingReply(GUID * pCookie, BOOL fIsAsync)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: RemotingClientReceivingReply. ThreadID: 0x%p\n",
+ GetThread()));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->RemotingClientReceivingReply(pCookie, fIsAsync);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::RemotingClientInvocationFinished()
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: RemotingClientInvocationFinished. ThreadID: 0x%p\n",
+ GetThread()));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->RemotingClientInvocationFinished();
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::RemotingServerReceivingMessage(GUID *pCookie, BOOL fIsAsync)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: RemotingServerReceivingMessage. ThreadID: 0x%p\n",
+ GetThread()));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->RemotingServerReceivingMessage(pCookie, fIsAsync);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::RemotingServerInvocationStarted()
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: RemotingServerInvocationStarted. ThreadID: 0x%p\n",
+ GetThread()));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->RemotingServerInvocationStarted();
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::RemotingServerInvocationReturned()
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: RemotingServerInvocationReturned. ThreadID: 0x%p\n",
+ GetThread()));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->RemotingServerInvocationReturned();
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::RemotingServerSendingReply(GUID *pCookie, BOOL fIsAsync)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: RemotingServerSendingReply. ThreadID: 0x%p\n",
+ GetThread()));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->RemotingServerSendingReply(pCookie, fIsAsync);
+ }
+}
+
+//---------------------------------------------------------------------------------------
+// GC EVENTS
+//
+
+HRESULT EEToProfInterfaceImpl::ObjectAllocated(
+ /* [in] */ ObjectID objectId,
+ /* [in] */ ClassID classId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Preemptive mode would be bad, dude. There's an objectId in the param list!
+ MODE_COOPERATIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // CrstAppDomainHandleTable can be held while this is called
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: ObjectAllocated. ObjectID: 0x%p. ClassID: 0x%p\n",
+ objectId,
+ classId));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ObjectAllocated(objectId, classId);
+ }
+}
+
+
+HRESULT EEToProfInterfaceImpl::MovedReferences(GCReferencesData *pData)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // This is called by the thread doing a GC WHILE it does the GC
+ GC_NOTRIGGER;
+
+ // This is called by the thread doing a GC WHILE it does the GC
+ if (GetThreadNULLOk()) { MODE_COOPERATIVE; }
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Thread store lock normally held during this callback
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT_EX(
+ kEE2PNoTrigger,
+ (LF_CORPROF,
+ LL_INFO10000,
+ "**PROF: MovedReferences.\n"));
+
+ _ASSERTE(!GCHeap::GetGCHeap()->IsConcurrentGCEnabled());
+
+ if (pData->curIdx == 0)
+ {
+ return S_OK;
+ }
+
+ HRESULT hr = S_OK;
+
+ if (pData->compactingCount != 0)
+ {
+ _ASSERTE(pData->curIdx == pData->compactingCount);
+
+ if (m_pCallback4 != NULL)
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ hr = m_pCallback4->MovedReferences2((ULONG)pData->curIdx,
+ (ObjectID *)pData->arrpbMemBlockStartOld,
+ (ObjectID *)pData->arrpbMemBlockStartNew,
+ (SIZE_T *)pData->arrMemBlockSize);
+ if (FAILED(hr))
+ return hr;
+ }
+
+#ifdef _WIN64
+ // Recompute sizes as ULONGs for legacy callback
+ for (ULONG i = 0; i < pData->curIdx; i++)
+ pData->arrULONG[i] = (pData->arrMemBlockSize[i] > ULONG_MAX) ? ULONG_MAX : (ULONG)pData->arrMemBlockSize[i];
+#endif
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ hr = m_pCallback2->MovedReferences((ULONG)pData->curIdx,
+ (ObjectID *)pData->arrpbMemBlockStartOld,
+ (ObjectID *)pData->arrpbMemBlockStartNew,
+ pData->arrULONG);
+ }
+ }
+ else
+ {
+ if (m_pCallback4 != NULL)
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ hr = m_pCallback4->SurvivingReferences2((ULONG)pData->curIdx,
+ (ObjectID *)pData->arrpbMemBlockStartOld,
+ (SIZE_T *)pData->arrMemBlockSize);
+ if (FAILED(hr))
+ return hr;
+ }
+
+#ifdef _WIN64
+ // Recompute sizes as ULONGs for legacy callback
+ for (ULONG i = 0; i < pData->curIdx; i++)
+ pData->arrULONG[i] = (pData->arrMemBlockSize[i] > ULONG_MAX) ? ULONG_MAX : (ULONG)pData->arrMemBlockSize[i];
+#endif
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ hr = m_pCallback2->SurvivingReferences((ULONG)pData->curIdx,
+ (ObjectID *)pData->arrpbMemBlockStartOld,
+ pData->arrULONG);
+ }
+ }
+
+ return hr;
+}
+
+HRESULT EEToProfInterfaceImpl::NotifyAllocByClass(AllocByClassData *pData)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // This is called by the thread doing a GC WHILE it does the GC
+ GC_NOTRIGGER;
+
+ // This is called by the thread doing a GC WHILE it does the GC
+ if (GetThreadNULLOk()) { MODE_COOPERATIVE; }
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Thread store lock normally held during this callback
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT_EX(
+ kEE2PNoTrigger,
+ (LF_CORPROF,
+ LL_INFO10000,
+ "**PROF: ObjectsAllocatedByClass.\n"));
+
+ _ASSERTE(pData != NULL);
+ _ASSERTE(pData->iHash > 0);
+
+ // If the arrays are not long enough, get rid of them.
+ if (pData->cLength != 0 && pData->iHash > pData->cLength)
+ {
+ _ASSERTE(pData->arrClsId != NULL && pData->arrcObjects != NULL);
+ delete [] pData->arrClsId;
+ delete [] pData->arrcObjects;
+ pData->cLength = 0;
+ }
+
+ // If there are no arrays, must allocate them.
+ if (pData->cLength == 0)
+ {
+ pData->arrClsId = new (nothrow) ClassID[pData->iHash];
+ if (pData->arrClsId == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ pData->arrcObjects = new (nothrow) ULONG[pData->iHash];
+ if (pData->arrcObjects == NULL)
+ {
+ delete [] pData->arrClsId;
+ pData->arrClsId= NULL;
+
+ return E_OUTOFMEMORY;
+ }
+
+ // Indicate that the memory was successfully allocated
+ pData->cLength = pData->iHash;
+ }
+
+ // Now copy all the data
+ HASHFIND hFind;
+ CLASSHASHENTRY * pCur = (CLASSHASHENTRY *) pData->pHashTable->FindFirstEntry(&hFind);
+ size_t iCur = 0; // current index for arrays
+
+ while (pCur != NULL)
+ {
+ _ASSERTE(iCur < pData->iHash);
+
+ pData->arrClsId[iCur] = pCur->m_clsId;
+ pData->arrcObjects[iCur] = (DWORD) pCur->m_count;
+
+ // Move to the next entry
+ iCur++;
+ pCur = (CLASSHASHENTRY *) pData->pHashTable->FindNextEntry(&hFind);
+ }
+
+ _ASSERTE(iCur == pData->iHash);
+
+ // Now communicate the results to the profiler
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ObjectsAllocatedByClass((ULONG)pData->iHash, pData->arrClsId, pData->arrcObjects);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::ObjectReference(ObjectID objId,
+ ClassID classId,
+ ULONG cNumRefs,
+ ObjectID *arrObjRef)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // This is called by the thread doing a GC WHILE it does the GC
+ GC_NOTRIGGER;
+
+ // This is called by the thread doing a GC WHILE it does the GC
+ if (GetThreadNULLOk()) { MODE_COOPERATIVE; }
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Thread store lock normally held during this callback
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT_EX(
+ kEE2PNoTrigger,
+ (LF_CORPROF,
+ LL_INFO100000,
+ "**PROF: ObjectReferences.\n"));
+
+ _ASSERTE(!GCHeap::GetGCHeap()->IsConcurrentGCEnabled());
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->ObjectReferences(objId, classId, cNumRefs, arrObjRef);
+ }
+}
+
+
+HRESULT EEToProfInterfaceImpl::FinalizeableObjectQueued(BOOL isCritical, ObjectID objectID)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Can't be in preemptive when we're dealing in objectIDs!
+ // However, it's possible we're on a non-EE Thread--that happens when this
+ // is a server-mode GC thread.
+ MODE_COOPERATIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Thread store lock normally held during this callback
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO100,
+ "**PROF: Notifying profiler of finalizeable object.\n"));
+
+ _ASSERTE(!GCHeap::GetGCHeap()->IsConcurrentGCEnabled());
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->FinalizeableObjectQueued(isCritical ? COR_PRF_FINALIZER_CRITICAL : 0, objectID);
+ }
+}
+
+
+HRESULT EEToProfInterfaceImpl::RootReferences2(GCReferencesData *pData)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // This is called by the thread doing a GC WHILE it does the GC
+ GC_NOTRIGGER;
+
+ // This is called by the thread doing a GC WHILE it does the GC
+ if (GetThreadNULLOk()) { MODE_COOPERATIVE; }
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Thread store lock normally held during this callback
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT_EX(
+ kEE2PNoTrigger,
+ (LF_CORPROF,
+ LL_INFO10000,
+ "**PROF: RootReferences2.\n"));
+
+ _ASSERTE(!GCHeap::GetGCHeap()->IsConcurrentGCEnabled());
+
+ HRESULT hr = S_OK;
+
+ COR_PRF_GC_ROOT_FLAGS flags[kcReferencesMax];
+
+ _ASSERTE(pData->curIdx <= kcReferencesMax);
+ for (ULONG i = 0; i < pData->curIdx; i++)
+ {
+ flags[i] = (COR_PRF_GC_ROOT_FLAGS)(pData->arrULONG[i] & 0xffff);
+ pData->arrULONG[i] >>= 16;
+ }
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ hr = m_pCallback2->RootReferences2((ULONG)pData->curIdx,
+ (ObjectID *)pData->arrpbMemBlockStartOld,
+ (COR_PRF_GC_ROOT_KIND *)pData->arrULONG,
+ flags,
+ (ObjectID *)pData->arrpbMemBlockStartNew);
+ if (FAILED(hr))
+ return hr;
+ }
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ hr = m_pCallback2->RootReferences((ULONG)pData->curIdx, (ObjectID *)pData->arrpbMemBlockStartOld);
+ }
+
+ return hr;
+}
+
+
+HRESULT EEToProfInterfaceImpl::ConditionalWeakTableElementReferences(GCReferencesData * pData)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // This is called by the thread doing a GC WHILE it does the GC
+ GC_NOTRIGGER;
+
+ // This is called by the thread doing a GC WHILE it does the GC
+ if (GetThreadNULLOk()) { MODE_COOPERATIVE; }
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Thread store lock normally held during this callback
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT_EX(
+ kEE2PNoTrigger,
+ (LF_CORPROF,
+ LL_INFO10000,
+ "**PROF: ConditionalWeakTableElementReferences.\n"));
+
+ _ASSERTE(!GCHeap::GetGCHeap()->IsConcurrentGCEnabled());
+
+ HRESULT hr = S_OK;
+
+ _ASSERTE(pData->curIdx <= kcReferencesMax);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ hr = m_pCallback5->ConditionalWeakTableElementReferences(
+ (ULONG)pData->curIdx,
+ (ObjectID *)pData->arrpbMemBlockStartOld,
+ (ObjectID *)pData->arrpbMemBlockStartNew,
+ (GCHandleID *)pData->arrpbRootId);
+ }
+
+ return hr;
+}
+
+HRESULT EEToProfInterfaceImpl::HandleCreated(UINT_PTR handleId, ObjectID initialObjectId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Called by HndCreateHandle which is notrigger
+ GC_NOTRIGGER;
+
+ // This can be called in preemptive mode if initialObjectId is NULL.
+ // Otherwise, this will be in cooperative mode. Note that, although this
+ // can be called in preemptive, when it's called in cooperative we must not
+ // switch to preemptive (as we normally do in callbacks) and must not trigger,
+ // as this would really tick off some of our callers (as well as invalidating
+ // initialObjectId).
+ if (initialObjectId != NULL)
+ {
+ MODE_COOPERATIVE;
+ }
+ else
+ {
+ MODE_ANY;
+ }
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // CrstAppDomainHandleTable can be held during this callback
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT_EX(
+ kEE2PNoTrigger,
+ (LF_CORPROF,
+ LL_INFO10000,
+ "**PROF: HandleCreated.\n"));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->HandleCreated(handleId, initialObjectId);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::HandleDestroyed(UINT_PTR handleId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Called by HndDestroyHandle, which is notrigger. But HndDestroyHandle is also
+ // MODE_ANY, so perhaps we can change the whole call path to be triggers?
+ GC_NOTRIGGER;
+
+ // Although we're called from a notrigger function, I verified empirically that
+ // this is called coop & preemp
+ MODE_ANY;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Thread store lock is typically held during this callback
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT_EX(
+ kEE2PNoTrigger,
+ (LF_CORPROF,
+ LL_INFO10000,
+ "**PROF: HandleDestroyed.\n"));
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->HandleDestroyed(handleId);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::GarbageCollectionStarted(int cGenerations, BOOL generationCollected[], COR_PRF_GC_REASON reason)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // This is called by the thread doing a GC WHILE it does the GC
+ GC_NOTRIGGER;
+
+ // This is called by the thread doing a GC WHILE it does the GC
+ if (GetThreadNULLOk()) { MODE_COOPERATIVE; }
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Thread store lock normally held during this callback
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT_EX(
+ kEE2PNoTrigger,
+ (LF_CORPROF,
+ LL_INFO10000,
+ "**PROF: GarbageCollectionStarted.\n"));
+
+ _ASSERTE(!GCHeap::GetGCHeap()->IsConcurrentGCEnabled());
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->GarbageCollectionStarted(cGenerations, generationCollected, reason);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::GarbageCollectionFinished()
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // This is called by the thread doing a GC WHILE it does the GC
+ GC_NOTRIGGER;
+
+ // This is called by the thread doing a GC WHILE it does the GC
+ if (GetThreadNULLOk()) { MODE_COOPERATIVE; }
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // Thread store lock normally held during this callback
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT_EX(
+ kEE2PNoTrigger,
+ (LF_CORPROF,
+ LL_INFO10000,
+ "**PROF: GarbageCollectionFinished.\n"));
+
+ _ASSERTE(!GCHeap::GetGCHeap()->IsConcurrentGCEnabled());
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback2->GarbageCollectionFinished();
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::ProfilerDetachSucceeded()
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ // ProfilingAPIUtility::s_csStatus is held while this callback is issued.
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT_EX(kEE2PAllowableWhileDetaching,
+ (LF_CORPROF,
+ LL_INFO10,
+ "**PROF: ProfilerDetachSucceeded.\n"));
+
+ // Should only be called on profilers that support ICorProfilerCallback3
+ _ASSERTE(m_pCallback3 != NULL);
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback3->ProfilerDetachSucceeded();
+ }
+}
+
+#ifdef FEATURE_FUSION
+
+// Minimal wrappers so that Fusion can call the GetAssemblyReferences profiler callback
+// without needing a ton of profapi includes.
+
+BOOL ShouldCallGetAssemblyReferencesProfilerCallback()
+{
+ return CORProfilerAddsAssemblyReferences();
+}
+
+void CallGetAssemblyReferencesProfilerCallbackIfNecessary(LPCWSTR wszAssemblyPath, IAssemblyBindingClosure * pClosure, AssemblyReferenceClosureWalkContextForProfAPI * pContext)
+{
+ BEGIN_PIN_PROFILER(CORProfilerAddsAssemblyReferences());
+ g_profControlBlock.pProfInterface->GetAssemblyReferences(wszAssemblyPath, pClosure, pContext);
+ END_PIN_PROFILER();
+}
+
+// Implementation of ICorProfilerAssemblyReferenceProvider, which is given to the profiler so
+// that it can call back into the CLR with extra assembly references that should be considered
+// while Fusion performs its assembly reference closure walk.
+class ProfilerAssemblyReferenceProvider : public ICorProfilerAssemblyReferenceProvider
+{
+public:
+ // IUnknown functions
+ virtual HRESULT __stdcall QueryInterface(REFIID id, void** pInterface)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (id == IID_IUnknown)
+ {
+ *pInterface = static_cast<IUnknown *>(this);
+ }
+ else if (id == IID_ICorProfilerAssemblyReferenceProvider)
+ {
+ *pInterface = static_cast<ICorProfilerAssemblyReferenceProvider *>(this);
+ }
+ else
+ {
+ *pInterface = NULL;
+ return E_NOINTERFACE;
+ }
+
+ AddRef();
+ return S_OK;
+ }
+
+ virtual ULONG __stdcall AddRef()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return InterlockedIncrement(&m_refCount);
+ }
+
+ virtual ULONG __stdcall Release()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ ULONG refCount = InterlockedDecrement(&m_refCount);
+
+ if (0 == refCount)
+ {
+ delete this;
+ }
+
+ return refCount;
+ }
+
+ // ICorProfilerAssemblyReferenceProvider functions
+
+ // This is what the profiler calls to tell us about an assembly reference we should include
+ // when Fusion performs its closure walk. When this is called, the walk is already underway,
+ // and is sitting on our stack already.
+ virtual HRESULT __stdcall AddAssemblyReference(const COR_PRF_ASSEMBLY_REFERENCE_INFO * pAssemblyRefInfo)
+ {
+ _ASSERTE(m_pClosure != NULL);
+
+ return m_pClosure->AddProfilerAssemblyReference(
+ pAssemblyRefInfo->pbPublicKeyOrToken,
+ pAssemblyRefInfo->cbPublicKeyOrToken,
+ pAssemblyRefInfo->szName,
+ pAssemblyRefInfo->pMetaData,
+ pAssemblyRefInfo->pbHashValue,
+ pAssemblyRefInfo->cbHashValue,
+ pAssemblyRefInfo->dwAssemblyRefFlags,
+ m_pContext);
+ }
+
+ // Implementation
+ ProfilerAssemblyReferenceProvider(IAssemblyBindingClosure * pClosure, AssemblyReferenceClosureWalkContextForProfAPI * pContext) :
+ m_refCount(1),
+ m_pClosure(pClosure),
+ m_pContext(pContext)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pClosure->AddRef();
+ }
+
+protected:
+ Volatile<LONG> m_refCount;
+
+ // Our interface into Fusion's closure walk. We use this to inform Fusion about
+ // the assembly reference the profiler gave us.
+ ReleaseHolder<IAssemblyBindingClosure> m_pClosure;
+
+ // Extra context built up by fusion's closure walk that we need to remember. The
+ // walk is already in action by the time we're called, and this structure remembers
+ // the lists that are getting built up by the walk
+ AssemblyReferenceClosureWalkContextForProfAPI * m_pContext;
+};
+
+#endif // FEATURE_FUSION
+
+
+HRESULT EEToProfInterfaceImpl::GetAssemblyReferences(LPCWSTR wszAssemblyPath, IAssemblyBindingClosure * pClosure, AssemblyReferenceClosureWalkContextForProfAPI * pContext)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO10,
+ "**PROF: AssemblyReferenceClosureWalkStarted. wszAssemblyPath: 0x%p.\n",
+ wszAssemblyPath
+ ));
+ HRESULT hr = S_OK;
+
+#ifdef FEATURE_FUSION
+
+ SString sPath;
+ _ASSERTE(IsCallback6Supported());
+
+ // Create an instance of the class implementing the interface we pass back to the profiler,
+ // feeding it the context we're currently at in Fusion's closure walk
+ ReleaseHolder<ProfilerAssemblyReferenceProvider> pReferenceProvider =
+ new (nothrow) ProfilerAssemblyReferenceProvider(pClosure, pContext);
+ if (pReferenceProvider == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ hr = m_pCallback6->GetAssemblyReferences(
+ wszAssemblyPath,
+ static_cast<ICorProfilerAssemblyReferenceProvider *>(pReferenceProvider));
+ }
+
+#endif // FEATURE_FUSION
+
+ return hr;
+}
+
+
+#endif // PROFILING_SUPPORTED
diff --git a/src/vm/eetoprofinterfaceimpl.h b/src/vm/eetoprofinterfaceimpl.h
new file mode 100644
index 0000000000..039bdf8ae2
--- /dev/null
+++ b/src/vm/eetoprofinterfaceimpl.h
@@ -0,0 +1,676 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// EEToProfInterfaceImpl.h
+//
+
+//
+// Declaration of class that wraps calling into the profiler's implementation
+// of ICorProfilerCallback*
+//
+
+// ======================================================================================
+
+
+#ifndef __EETOPROFINTERFACEIMPL_H__
+#define __EETOPROFINTERFACEIMPL_H__
+
+#include <stddef.h>
+#include "profilepriv.h"
+#include "eeprofinterfaces.h"
+#include "shash.h"
+#include "eventtracebase.h"
+
+class SimpleRWLock;
+
+class ProfToEEInterfaceImpl;
+
+interface IAssemblyBindingClosure;
+struct AssemblyReferenceClosureWalkContextForProfAPI;
+
+const GUID k_guidZero = {0};
+
+class EEToProfInterfaceImpl
+{
+public:
+
+ //
+ // Internal initialization / cleanup
+ //
+
+ EEToProfInterfaceImpl();
+ ~EEToProfInterfaceImpl();
+
+ HRESULT Init(
+ ProfToEEInterfaceImpl * pProfToEE,
+ const CLSID * pClsid,
+ __inout_z LPCWSTR wszClsid,
+ __in_z LPCWSTR wszProfileDLL,
+ BOOL fLoadedViaAttach,
+ DWORD dwConcurrentGCWaitTimeoutInMs);
+
+ BOOL IsCallback3Supported();
+ BOOL IsCallback4Supported();
+ BOOL IsCallback5Supported();
+ BOOL IsCallback6Supported();
+
+ HRESULT SetEventMask(DWORD dwEventMask, DWORD dwEventMaskHigh);
+
+ // Used in ProfToEEInterfaceImpl.cpp to set this to the profiler's hook's
+ // function pointer (see SetFunctionIDMapper).
+ void SetFunctionIDMapper(FunctionIDMapper * pFunc);
+ void SetFunctionIDMapper2(FunctionIDMapper2 * pFunc, void * clientData);
+
+ FunctionIDMapper * GetFunctionIDMapper();
+ FunctionIDMapper2 * GetFunctionIDMapper2();
+ BOOL IsLoadedViaAttach();
+ HRESULT EnsureProfilerDetachable();
+ void SetUnrevertiblyModifiedILFlag();
+
+ FunctionEnter * GetEnterHook();
+ FunctionLeave * GetLeaveHook();
+ FunctionTailcall * GetTailcallHook();
+
+ FunctionEnter2 * GetEnter2Hook();
+ FunctionLeave2 * GetLeave2Hook();
+ FunctionTailcall2 * GetTailcall2Hook();
+
+ FunctionEnter3 * GetEnter3Hook();
+ FunctionLeave3 * GetLeave3Hook();
+ FunctionTailcall3 * GetTailcall3Hook();
+ FunctionEnter3WithInfo * GetEnter3WithInfoHook();
+ FunctionLeave3WithInfo * GetLeave3WithInfoHook();
+ FunctionTailcall3WithInfo * GetTailcall3WithInfoHook();
+
+ BOOL IsClientIDToFunctionIDMappingEnabled();
+
+ UINT_PTR LookupClientIDFromCache(FunctionID functionID);
+
+ HRESULT SetEnterLeaveFunctionHooks(
+ FunctionEnter * pFuncEnter,
+ FunctionLeave * pFuncLeave,
+ FunctionTailcall * pFuncTailcall);
+
+ HRESULT SetEnterLeaveFunctionHooks2(
+ FunctionEnter2 * pFuncEnter,
+ FunctionLeave2 * pFuncLeave,
+ FunctionTailcall2 * pFuncTailcall);
+
+ HRESULT SetEnterLeaveFunctionHooks3(
+ FunctionEnter3 * pFuncEnter3,
+ FunctionLeave3 * pFuncLeave3,
+ FunctionTailcall3 * pFuncTailcall3);
+
+ HRESULT SetEnterLeaveFunctionHooks3WithInfo(
+ FunctionEnter3WithInfo * pFuncEnter3WithInfo,
+ FunctionLeave3WithInfo * pFuncLeave3WithInfo,
+ FunctionTailcall3WithInfo * pFuncTailcall3WithInfo);
+
+ BOOL RequiresGenericsContextForEnterLeave();
+
+ UINT_PTR EEFunctionIDMapper(FunctionID funcId, BOOL * pbHookFunction);
+
+ // This fills in the non call-specific portions of the cookie GUID.
+ // This should only be called once at startup if necessary.
+ HRESULT InitGUID();
+
+ // This will assign a mostly-unique GUID. If enough calls to GetGUID
+ // are made from the same thread, then the GUIDs will cycle.
+ // (Current, it will cycle every 256 calls)
+ void GetGUID(GUID * pGUID);
+
+ //
+ // Initialize callback
+ //
+
+ HRESULT Initialize();
+
+ HRESULT InitializeForAttach(void * pvClientData, UINT cbClientData);
+
+ HRESULT ProfilerAttachComplete();
+
+ //
+ // Thread Events
+ //
+
+ HRESULT ThreadCreated(
+ ThreadID threadID);
+
+ HRESULT ThreadDestroyed(
+ ThreadID threadID);
+
+ HRESULT ThreadAssignedToOSThread(ThreadID managedThreadId,
+ DWORD osThreadId);
+
+ HRESULT ThreadNameChanged(ThreadID managedThreadId,
+ ULONG cchName,
+ __in_ecount_opt(cchName) WCHAR name[]);
+
+ //
+ // Startup/Shutdown Events
+ //
+
+ HRESULT Shutdown();
+
+ //
+ // JIT/Function Events
+ //
+
+ HRESULT FunctionUnloadStarted(
+ FunctionID functionId);
+
+ HRESULT JITCompilationFinished(
+ FunctionID functionId,
+ HRESULT hrStatus,
+ BOOL fIsSafeToBlock);
+
+ HRESULT JITCompilationStarted(
+ FunctionID functionId,
+ BOOL fIsSafeToBlock);
+
+ HRESULT JITCachedFunctionSearchStarted(
+ /* [in] */ FunctionID functionId,
+ /* [out] */ BOOL * pbUseCachedFunction);
+
+ HRESULT JITCachedFunctionSearchFinished(
+ /* [in] */ FunctionID functionId,
+ /* [in] */ COR_PRF_JIT_CACHE result);
+
+ HRESULT JITFunctionPitched(FunctionID functionId);
+
+ HRESULT JITInlining(
+ /* [in] */ FunctionID callerId,
+ /* [in] */ FunctionID calleeId,
+ /* [out] */ BOOL * pfShouldInline);
+
+ HRESULT ReJITCompilationStarted(
+ /* [in] */ FunctionID functionId,
+ /* [in] */ ReJITID reJitId,
+ /* [in] */ BOOL fIsSafeToBlock);
+
+ HRESULT GetReJITParameters(
+ /* [in] */ ModuleID moduleId,
+ /* [in] */ mdMethodDef methodId,
+ /* [in] */ ICorProfilerFunctionControl *
+ pFunctionControl);
+
+ HRESULT ReJITCompilationFinished(
+ /* [in] */ FunctionID functionId,
+ /* [in] */ ReJITID reJitId,
+ /* [in] */ HRESULT hrStatus,
+ /* [in] */ BOOL fIsSafeToBlock);
+
+ HRESULT ReJITError(
+ /* [in] */ ModuleID moduleId,
+ /* [in] */ mdMethodDef methodId,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ HRESULT hrStatus);
+
+ //
+ // Module Events
+ //
+
+ HRESULT ModuleLoadStarted(
+ ModuleID moduleId);
+
+ HRESULT ModuleLoadFinished(
+ ModuleID moduleId,
+ HRESULT hrStatus);
+
+ HRESULT ModuleUnloadStarted(
+ ModuleID moduleId);
+
+ HRESULT ModuleUnloadFinished(
+ ModuleID moduleId,
+ HRESULT hrStatus);
+
+ HRESULT ModuleAttachedToAssembly(
+ ModuleID moduleId,
+ AssemblyID AssemblyId);
+
+ //
+ // Class Events
+ //
+
+ HRESULT ClassLoadStarted(
+ ClassID classId);
+
+ HRESULT ClassLoadFinished(
+ ClassID classId,
+ HRESULT hrStatus);
+
+ HRESULT ClassUnloadStarted(
+ ClassID classId);
+
+ HRESULT ClassUnloadFinished(
+ ClassID classId,
+ HRESULT hrStatus);
+
+ //
+ // AppDomain Events
+ //
+
+ HRESULT AppDomainCreationStarted(
+ AppDomainID appDomainId);
+
+ HRESULT AppDomainCreationFinished(
+ AppDomainID appDomainId,
+ HRESULT hrStatus);
+
+ HRESULT AppDomainShutdownStarted(
+ AppDomainID appDomainId);
+
+ HRESULT AppDomainShutdownFinished(
+ AppDomainID appDomainId,
+ HRESULT hrStatus);
+
+ //
+ // Assembly Events
+ //
+
+ HRESULT AssemblyLoadStarted(
+ AssemblyID assemblyId);
+
+ HRESULT AssemblyLoadFinished(
+ AssemblyID assemblyId,
+ HRESULT hrStatus);
+
+ HRESULT AssemblyUnloadStarted(
+ AssemblyID assemblyId);
+
+ HRESULT AssemblyUnloadFinished(
+ AssemblyID assemblyId,
+ HRESULT hrStatus);
+
+ //
+ // Transition Events
+ //
+
+ HRESULT UnmanagedToManagedTransition(
+ FunctionID functionId,
+ COR_PRF_TRANSITION_REASON reason);
+
+ HRESULT ManagedToUnmanagedTransition(
+ FunctionID functionId,
+ COR_PRF_TRANSITION_REASON reason);
+
+ //
+ // Exception Events
+ //
+
+ HRESULT ExceptionThrown(
+ ObjectID thrownObjectId);
+
+ HRESULT ExceptionSearchFunctionEnter(
+ FunctionID functionId);
+
+ HRESULT ExceptionSearchFunctionLeave();
+
+ HRESULT ExceptionSearchFilterEnter(
+ FunctionID funcId);
+
+ HRESULT ExceptionSearchFilterLeave();
+
+ HRESULT ExceptionSearchCatcherFound(
+ FunctionID functionId);
+
+ HRESULT ExceptionOSHandlerEnter(
+ FunctionID funcId);
+
+ HRESULT ExceptionOSHandlerLeave(
+ FunctionID funcId);
+
+ HRESULT ExceptionUnwindFunctionEnter(
+ FunctionID functionId);
+
+ HRESULT ExceptionUnwindFunctionLeave();
+
+ HRESULT ExceptionUnwindFinallyEnter(
+ FunctionID functionId);
+
+ HRESULT ExceptionUnwindFinallyLeave();
+
+ HRESULT ExceptionCatcherEnter(
+ FunctionID functionId,
+ ObjectID objectId);
+
+ HRESULT ExceptionCatcherLeave();
+
+ //
+ // CCW Events
+ //
+
+ HRESULT COMClassicVTableCreated(
+ /* [in] */ ClassID wrappedClassId,
+ /* [in] */ REFGUID implementedIID,
+ /* [in] */ void * pVTable,
+ /* [in] */ ULONG cSlots);
+
+ HRESULT COMClassicVTableDestroyed(
+ /* [in] */ ClassID wrappedClassId,
+ /* [in] */ REFGUID implementedIID,
+ /* [in] */ void * pVTable);
+
+ //
+ // Remoting Events
+ //
+
+ HRESULT RemotingClientInvocationStarted();
+
+ HRESULT RemotingClientSendingMessage(GUID * pCookie,
+ BOOL fIsAsync);
+
+ HRESULT RemotingClientReceivingReply(GUID * pCookie,
+ BOOL fIsAsync);
+
+ HRESULT RemotingClientInvocationFinished();
+
+ HRESULT RemotingServerReceivingMessage(GUID * pCookie,
+ BOOL fIsAsync);
+
+ HRESULT RemotingServerInvocationStarted();
+
+ HRESULT RemotingServerInvocationReturned();
+
+ HRESULT RemotingServerSendingReply(GUID * pCookie,
+ BOOL fIsAsync);
+
+
+ //
+ // GC Events
+ //
+
+ HRESULT RuntimeSuspendStarted(COR_PRF_SUSPEND_REASON suspendReason);
+
+ HRESULT RuntimeSuspendFinished();
+
+ HRESULT RuntimeSuspendAborted();
+
+ HRESULT RuntimeResumeStarted();
+
+ HRESULT RuntimeResumeFinished();
+
+ HRESULT RuntimeThreadSuspended(ThreadID suspendedThreadId);
+
+ HRESULT RuntimeThreadResumed(ThreadID resumedThreadId);
+
+ HRESULT ObjectAllocated(
+ /* [in] */ ObjectID objectId,
+ /* [in] */ ClassID classId);
+
+ HRESULT FinalizeableObjectQueued(BOOL isCritical, ObjectID objectID);
+
+ //
+ // GC Moved References and RootReferences2 Notification Stuff
+ //
+
+ HRESULT MovedReference(BYTE * pbMemBlockStart,
+ BYTE * pbMemBlockEnd,
+ ptrdiff_t cbRelocDistance,
+ void * pHeapId,
+ BOOL fCompacting);
+
+ HRESULT EndMovedReferences(void * pHeapId);
+
+ HRESULT RootReference2(BYTE * objectId,
+ EtwGCRootKind dwEtwRootKind,
+ EtwGCRootFlags dwEtwRootFlags,
+ void * rootID,
+ void * pHeapId);
+
+ HRESULT EndRootReferences2(void * pHeapId);
+
+ HRESULT ConditionalWeakTableElementReference(BYTE * primaryObjectId,
+ BYTE * secondaryObjectId,
+ void * rootID,
+ void * pHeapId);
+
+ HRESULT EndConditionalWeakTableElementReferences(void * pHeapId);
+
+ //
+ // GC Root notification stuff
+ //
+
+ HRESULT AllocByClass(ObjectID objId, ClassID classId, void* pHeapId);
+
+ HRESULT EndAllocByClass(void * pHeapId);
+
+ //
+ // Heap walk notification stuff
+ //
+ HRESULT ObjectReference(ObjectID objId,
+ ClassID classId,
+ ULONG cNumRefs,
+ ObjectID * arrObjRef);
+
+ //
+ // GC Handle creation / destruction notifications
+ //
+ HRESULT HandleCreated(UINT_PTR handleId, ObjectID initialObjectId);
+
+ HRESULT HandleDestroyed(UINT_PTR handleId);
+
+ HRESULT GarbageCollectionStarted(int cGenerations, BOOL generationCollected[], COR_PRF_GC_REASON reason);
+
+ HRESULT GarbageCollectionFinished();
+
+ //
+ // Detach
+ //
+ HRESULT ProfilerDetachSucceeded();
+
+ BOOL HasTimedOutWaitingForConcurrentGC();
+
+ HRESULT GetAssemblyReferences(LPCWSTR wszAssemblyPath, IAssemblyBindingClosure * pClosure, AssemblyReferenceClosureWalkContextForProfAPI * pContext);
+
+private:
+
+ //
+ // Generation 0 Allocation by Class notification stuff
+ //
+
+ // This is for a hashing of ClassID values
+ struct CLASSHASHENTRY : HASHENTRY
+ {
+ ClassID m_clsId; // The class ID (also the key)
+ size_t m_count; // How many of this class have been counted
+ };
+
+ // This is a simple implementation of CHashTable to provide a very simple
+ // implementation of the Cmp pure virtual function
+ class CHashTableImpl : public CHashTable
+ {
+ public:
+ CHashTableImpl(ULONG iBuckets);
+
+ protected:
+ virtual BOOL Cmp(SIZE_T k1, const HASHENTRY * pc2);
+ };
+
+ // This contains the data for storing allocation information
+ // in terms of numbers of objects sorted by class.
+ struct AllocByClassData
+ {
+ CHashTableImpl * pHashTable; // The hash table
+ CLASSHASHENTRY * arrHash; // Array that the hashtable uses for linking
+ ULONG cHash; // The total number of elements in arrHash
+ ULONG iHash; // Next empty entry in the hash array
+ ClassID * arrClsId; // Array of ClassIDs for the call to ObjectsAllocatedByClass
+ ULONG * arrcObjects; // Array of counts for the call to ObjectsAllocatedByClass
+ size_t cLength; // Length of the above two parallel arrays
+ };
+
+ static const UINT kcReferencesMax = 512;
+
+ struct GCReferencesData
+ {
+ size_t curIdx;
+ size_t compactingCount;
+ BYTE * arrpbMemBlockStartOld[kcReferencesMax];
+ BYTE * arrpbMemBlockStartNew[kcReferencesMax];
+ union
+ {
+ size_t arrMemBlockSize[kcReferencesMax];
+ ULONG arrULONG[kcReferencesMax];
+ BYTE * arrpbRootId[kcReferencesMax];
+ };
+ GCReferencesData * pNext;
+ };
+
+ // Since this stuff can only be performed by one thread (right now), we don't need
+ // to make this thread safe and can just have one block we reuse every time around
+ static AllocByClassData * m_pSavedAllocDataBlock;
+
+ // Pointer to the profiler's implementation of the callback interface(s).
+ // Profilers MUST support ICorProfilerCallback2.
+ // Profilers MAY optionally support ICorProfilerCallback3,4,5
+ ICorProfilerCallback2 * m_pCallback2;
+ ICorProfilerCallback3 * m_pCallback3;
+ ICorProfilerCallback4 * m_pCallback4;
+ ICorProfilerCallback5 * m_pCallback5;
+ ICorProfilerCallback6 * m_pCallback6;
+ HMODULE m_hmodProfilerDLL;
+
+ BOOL m_fLoadedViaAttach;
+ ProfToEEInterfaceImpl * m_pProfToEE;
+
+ // Used in EEToProfInterfaceImpl.cpp to call into the profiler (see EEFunctionIDMapper)
+ FunctionIDMapper * m_pProfilersFuncIDMapper;
+ FunctionIDMapper2 * m_pProfilersFuncIDMapper2;
+ void * m_pProfilersFuncIDMapper2ClientData;
+
+ // This is used as a cookie template for remoting calls
+ GUID m_GUID;
+
+ // This is an incrementing counter for constructing unique GUIDS from
+ // m_GUID
+ LONG m_lGUIDCount;
+
+ // This will contain a list of free ref data structs, so they
+ // don't have to be re-allocated on every GC
+ GCReferencesData * m_pGCRefDataFreeList;
+
+ // This is for managing access to the free list above.
+ CRITSEC_COOKIE m_csGCRefDataFreeList;
+
+ FunctionEnter * m_pEnter;
+ FunctionLeave * m_pLeave;
+ FunctionTailcall * m_pTailcall;
+
+ FunctionEnter2 * m_pEnter2;
+ FunctionLeave2 * m_pLeave2;
+ FunctionTailcall2 * m_pTailcall2;
+
+ BOOL m_fIsClientIDToFunctionIDMappingEnabled;
+
+ FunctionEnter3 * m_pEnter3;
+ FunctionLeave3 * m_pLeave3;
+ FunctionTailcall3 * m_pTailcall3;
+
+ FunctionEnter3WithInfo * m_pEnter3WithInfo;
+ FunctionLeave3WithInfo * m_pLeave3WithInfo;
+ FunctionTailcall3WithInfo * m_pTailcall3WithInfo;
+
+
+ // Remembers whether the profiler used SetILFunctionBody() which modifies IL in a
+ // way that cannot be reverted. This prevents a detach from succeeding.
+ BOOL m_fUnrevertiblyModifiedIL;
+
+ GCReferencesData * AllocateMovedReferencesData();
+
+ void FreeMovedReferencesData(GCReferencesData * pData);
+
+ HRESULT MovedReferences(GCReferencesData * pData);
+
+ HRESULT RootReferences2(GCReferencesData * pData);
+
+ HRESULT ConditionalWeakTableElementReferences(GCReferencesData * pData);
+
+ HRESULT NotifyAllocByClass(AllocByClassData * pData);
+
+ HRESULT CreateProfiler(
+ const CLSID * pClsid,
+ __in_z LPCWSTR wszClsid,
+ __in_z LPCWSTR wszProfileDLL);
+
+ HRESULT DetermineAndSetEnterLeaveFunctionHooksForJit();
+
+ HRESULT STDMETHODCALLTYPE SetEnterLeaveFunctionHooksForJit(
+ FunctionEnter3 * pFuncEnter,
+ FunctionLeave3 * pFuncLeave,
+ FunctionTailcall3 * pFuncTailcall);
+
+ struct FunctionIDAndClientID
+ {
+ FunctionID functionID;
+ UINT_PTR clientID;
+ };
+
+ class FunctionIDHashTableTraits : public NoRemoveSHashTraits<DefaultSHashTraits<FunctionIDAndClientID> >
+ {
+ public:
+
+ static const COUNT_T s_minimum_allocation = 31;
+ typedef DefaultSHashTraits<FunctionIDAndClientID *>::count_t count_t;
+ typedef UINT_PTR key_t;
+
+ static key_t GetKey(FunctionIDAndClientID e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return e.functionID;
+ }
+
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return k1 == k2;
+ }
+
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (count_t)k;
+ }
+
+ static const FunctionIDAndClientID Null()
+ {
+ LIMITED_METHOD_CONTRACT;
+ FunctionIDAndClientID functionIDAndClientID;
+ functionIDAndClientID.functionID = NULL;
+ functionIDAndClientID.clientID = NULL;
+ return functionIDAndClientID;
+ }
+
+ static bool IsNull(const FunctionIDAndClientID &functionIDAndClientID)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE((functionIDAndClientID.functionID != NULL) || (functionIDAndClientID.clientID == NULL));
+ return functionIDAndClientID.functionID == NULL;
+ }
+ };
+
+ typedef SHash<FunctionIDHashTableTraits> FunctionIDHashTable;
+
+ // ELT3 no long keeps track of FunctionID of current managed method. Therefore, a hash table of bookkeeping
+ // the mapping from FunctionID to clientID is needed to build up ELT2 on top of ELT3. When ELT2 (slow-path
+ // or fast-path) is registered by the profiler and the profiler’s IDFunctionMapper requests to hook up the
+ // function being loading, the clientID returned by FunctionIDMapper will be saved as the value to be looked
+ // up by the corresponding FunctionID in the hash table. FunctionIDs can be recycled after an app domain
+ // that contains the function bodies is unloaded so this hash table needs to replace the existing FunctionID
+ // with new FunctionID if a duplication is found in the hash table.
+ FunctionIDHashTable * m_pFunctionIDHashTable;
+
+ // Since the hash table can be read and writen concurrently, a reader-writer lock is used to synchronize
+ // all accesses to the hash table.
+ SimpleRWLock * m_pFunctionIDHashTableRWLock;
+
+ // Timeout for wait operation on concurrent GC. Only used for attach scenario
+ DWORD m_dwConcurrentGCWaitTimeoutInMs;
+
+ // Remember the fact we've timed out when waiting for concurrent GC. Will report the error later
+ BOOL m_bHasTimedOutWaitingForConcurrentGC;
+};
+
+#endif // __EETOPROFINTERFACEIMPL_H__
diff --git a/src/vm/eetoprofinterfaceimpl.inl b/src/vm/eetoprofinterfaceimpl.inl
new file mode 100644
index 0000000000..c2c98f9a63
--- /dev/null
+++ b/src/vm/eetoprofinterfaceimpl.inl
@@ -0,0 +1,253 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// EEToProfInterfaceImpl.inl
+//
+
+//
+// Inline implementation of portions of the code that wraps calling into
+// the profiler's implementation of ICorProfilerCallback*
+//
+
+// ======================================================================================
+
+#ifndef __EETOPROFINTERFACEIMPL_INL__
+#define __EETOPROFINTERFACEIMPL_INL__
+
+#include "profilepriv.h"
+#include "profilepriv.inl"
+#include "simplerwlock.hpp"
+
+// ----------------------------------------------------------------------------
+// EEToProfInterfaceImpl::IsCallback3Supported
+//
+// Description:
+// Returns BOOL indicating whether the profiler implements
+// ICorProfilerCallback3.
+//
+
+inline BOOL EEToProfInterfaceImpl::IsCallback3Supported()
+{
+ LIMITED_METHOD_CONTRACT;
+ return (m_pCallback3 != NULL);
+}
+
+// ----------------------------------------------------------------------------
+// EEToProfInterfaceImpl::IsCallback4Supported
+//
+// Description:
+// Returns BOOL indicating whether the profiler implements
+// ICorProfilerCallback4.
+//
+
+inline BOOL EEToProfInterfaceImpl::IsCallback4Supported()
+{
+ LIMITED_METHOD_CONTRACT;
+ return (m_pCallback4 != NULL);
+}
+
+inline BOOL EEToProfInterfaceImpl::IsCallback5Supported()
+{
+ LIMITED_METHOD_CONTRACT;
+ return (m_pCallback5 != NULL);
+}
+
+inline BOOL EEToProfInterfaceImpl::IsCallback6Supported()
+{
+ LIMITED_METHOD_CONTRACT;
+ return (m_pCallback6 != NULL);
+}
+
+inline FunctionIDMapper * EEToProfInterfaceImpl::GetFunctionIDMapper()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pProfilersFuncIDMapper;
+}
+
+inline FunctionIDMapper2 * EEToProfInterfaceImpl::GetFunctionIDMapper2()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pProfilersFuncIDMapper2;
+}
+
+inline void EEToProfInterfaceImpl::SetFunctionIDMapper(FunctionIDMapper * pFunc)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_pProfilersFuncIDMapper = pFunc;
+}
+
+inline void EEToProfInterfaceImpl::SetFunctionIDMapper2(FunctionIDMapper2 * pFunc, void * clientData)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_pProfilersFuncIDMapper2 = pFunc;
+ m_pProfilersFuncIDMapper2ClientData = clientData;
+}
+
+inline BOOL EEToProfInterfaceImpl::IsLoadedViaAttach()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fLoadedViaAttach;
+}
+
+inline void EEToProfInterfaceImpl::SetUnrevertiblyModifiedILFlag()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_fUnrevertiblyModifiedIL = TRUE;
+}
+
+inline FunctionEnter * EEToProfInterfaceImpl::GetEnterHook()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pEnter;
+}
+
+inline FunctionLeave * EEToProfInterfaceImpl::GetLeaveHook()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pLeave;
+}
+
+inline FunctionTailcall * EEToProfInterfaceImpl::GetTailcallHook()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pTailcall;
+}
+
+inline FunctionEnter2 * EEToProfInterfaceImpl::GetEnter2Hook()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pEnter2;
+}
+
+inline FunctionLeave2 * EEToProfInterfaceImpl::GetLeave2Hook()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pLeave2;
+}
+
+inline FunctionTailcall2 * EEToProfInterfaceImpl::GetTailcall2Hook()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pTailcall2;
+}
+
+inline FunctionEnter3 * EEToProfInterfaceImpl::GetEnter3Hook()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pEnter3;
+}
+
+inline FunctionLeave3 * EEToProfInterfaceImpl::GetLeave3Hook()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pLeave3;
+}
+
+inline FunctionTailcall3 * EEToProfInterfaceImpl::GetTailcall3Hook()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pTailcall3;
+}
+
+inline FunctionEnter3WithInfo * EEToProfInterfaceImpl::GetEnter3WithInfoHook()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pEnter3WithInfo;
+}
+
+inline FunctionLeave3WithInfo * EEToProfInterfaceImpl::GetLeave3WithInfoHook()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pLeave3WithInfo;
+}
+
+inline FunctionTailcall3WithInfo * EEToProfInterfaceImpl::GetTailcall3WithInfoHook()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pTailcall3WithInfo;
+}
+
+inline BOOL EEToProfInterfaceImpl::IsClientIDToFunctionIDMappingEnabled()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fIsClientIDToFunctionIDMappingEnabled;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Lookup the clientID for a given functionID
+//
+// Arguments:
+// functionID
+//
+// Return Value:
+// If found a match, return clientID; Otherwise return NULL.
+//
+inline UINT_PTR EEToProfInterfaceImpl::LookupClientIDFromCache(FunctionID functionID)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ _ASSERTE(functionID != NULL);
+
+ SimpleReadLockHolder readLockHolder(m_pFunctionIDHashTableRWLock);
+ const FunctionIDAndClientID * entry = m_pFunctionIDHashTable->LookupPtr(functionID);
+
+ // entry can be NULL when OOM happens.
+ if (entry != NULL)
+ {
+ return entry->clientID;
+ }
+ else
+ {
+ return NULL;
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Returns whether the profiler chose options that require the JIT to compile with the
+// CORINFO_GENERICS_CTXT_KEEP_ALIVE flag.
+//
+// Return Value:
+// Nonzero iff the JIT should compile with CORINFO_GENERICS_CTXT_KEEP_ALIVE.
+//
+
+inline BOOL EEToProfInterfaceImpl::RequiresGenericsContextForEnterLeave()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ return
+ CORProfilerPresent() &&
+ ((g_profControlBlock.dwEventMask & COR_PRF_ENABLE_FRAME_INFO) != 0) &&
+ (
+ (m_pEnter2 != NULL) ||
+ (m_pLeave2 != NULL) ||
+ (m_pTailcall2 != NULL) ||
+ (m_pEnter3WithInfo != NULL) ||
+ (m_pLeave3WithInfo != NULL) ||
+ (m_pTailcall3WithInfo != NULL)
+ );
+}
+
+inline BOOL EEToProfInterfaceImpl::HasTimedOutWaitingForConcurrentGC()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_bHasTimedOutWaitingForConcurrentGC;
+}
+
+#endif // __EETOPROFINTERFACEIMPL_INL__
+
diff --git a/src/vm/eetoprofinterfacewrapper.inl b/src/vm/eetoprofinterfacewrapper.inl
new file mode 100644
index 0000000000..e0e29b6f41
--- /dev/null
+++ b/src/vm/eetoprofinterfacewrapper.inl
@@ -0,0 +1,243 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// EEToProfInterfaceWrapper.inl
+//
+
+//
+// Inline implementation of wrappers around code that calls into some of the profiler's
+// callback methods.
+//
+
+// ======================================================================================
+
+#ifndef _EETOPROFEXCEPTIONINTERFACEWRAPPER_INL_
+#define _EETOPROFEXCEPTIONINTERFACEWRAPPER_INL_
+
+#include "common.h"
+
+
+// A wrapper for the profiler. Various events to signal different phases of exception
+// handling.
+class EEToProfilerExceptionInterfaceWrapper
+{
+ public:
+
+#if defined(PROFILING_SUPPORTED)
+ //
+ // Exception creation
+ //
+
+ static inline void ExceptionThrown(Thread * pThread)
+ {
+ WRAPPER_NO_CONTRACT;
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackExceptions());
+ _ASSERTE(pThread->PreemptiveGCDisabled());
+
+ // Get a reference to the object that won't move
+ OBJECTREF thrown = pThread->GetThrowable();
+
+ g_profControlBlock.pProfInterface->ExceptionThrown(
+ reinterpret_cast<ObjectID>((*(BYTE **)&thrown)));
+ END_PIN_PROFILER();
+ }
+ }
+
+ //
+ // Search phase
+ //
+
+ static inline void ExceptionSearchFunctionEnter(MethodDesc * pFunction)
+ {
+ WRAPPER_NO_CONTRACT;
+ // Notify the profiler of the function being searched for a handler.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackExceptions());
+ if (!pFunction->IsNoMetadata())
+ {
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->ExceptionSearchFunctionEnter(
+ (FunctionID) pFunction);
+ }
+ END_PIN_PROFILER();
+ }
+ }
+
+ static inline void ExceptionSearchFunctionLeave(MethodDesc * pFunction)
+ {
+ WRAPPER_NO_CONTRACT;
+ // Notify the profiler of the function being searched for a handler.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackExceptions());
+ if (!pFunction->IsNoMetadata())
+ {
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->ExceptionSearchFunctionLeave();
+ }
+ END_PIN_PROFILER();
+ }
+ }
+
+ static inline void ExceptionSearchFilterEnter(MethodDesc * pFunc)
+ {
+ WRAPPER_NO_CONTRACT;
+ // Notify the profiler of the filter.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackExceptions());
+ if (!pFunc->IsNoMetadata())
+ {
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->ExceptionSearchFilterEnter(
+ (FunctionID) pFunc);
+ }
+ END_PIN_PROFILER();
+ }
+ }
+
+ static inline void ExceptionSearchFilterLeave()
+ {
+ WRAPPER_NO_CONTRACT;
+ // Notify the profiler of the filter.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackExceptions());
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->ExceptionSearchFilterLeave();
+ END_PIN_PROFILER();
+ }
+ }
+
+ static inline void ExceptionSearchCatcherFound(MethodDesc * pFunc)
+ {
+ WRAPPER_NO_CONTRACT;
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackExceptions());
+ if (!pFunc->IsNoMetadata())
+ {
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->ExceptionSearchCatcherFound(
+ (FunctionID) pFunc);
+ }
+ END_PIN_PROFILER();
+ }
+ }
+
+ //
+ // Unwind phase
+ //
+ static inline void ExceptionUnwindFunctionEnter(MethodDesc * pFunc)
+ {
+ WRAPPER_NO_CONTRACT;
+ // Notify the profiler of the function being searched for a handler.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackExceptions());
+ if (!pFunc->IsNoMetadata())
+ {
+ g_profControlBlock.pProfInterface->ExceptionUnwindFunctionEnter(
+ (FunctionID) pFunc);
+ }
+ END_PIN_PROFILER();
+ }
+ }
+
+ static inline void ExceptionUnwindFunctionLeave(MethodDesc * pFunction)
+ {
+ WRAPPER_NO_CONTRACT;
+ // Notify the profiler that searching this function is over.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackExceptions());
+ if (!pFunction->IsNoMetadata())
+ {
+ g_profControlBlock.pProfInterface->ExceptionUnwindFunctionLeave();
+ }
+ END_PIN_PROFILER();
+ }
+ }
+
+ static inline void ExceptionUnwindFinallyEnter(MethodDesc * pFunc)
+ {
+ WRAPPER_NO_CONTRACT;
+ // Notify the profiler of the function being searched for a handler.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackExceptions());
+ if (!pFunc->IsNoMetadata())
+ {
+ g_profControlBlock.pProfInterface->ExceptionUnwindFinallyEnter(
+ (FunctionID) pFunc);
+ }
+ END_PIN_PROFILER();
+ }
+ }
+
+ static inline void ExceptionUnwindFinallyLeave()
+ {
+ WRAPPER_NO_CONTRACT;
+ // Notify the profiler of the function being searched for a handler.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackExceptions());
+ g_profControlBlock.pProfInterface->ExceptionUnwindFinallyLeave();
+ END_PIN_PROFILER();
+ }
+ }
+
+ static inline void ExceptionCatcherEnter(Thread * pThread, MethodDesc * pFunc)
+ {
+ WRAPPER_NO_CONTRACT;
+ // Notify the profiler.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackExceptions());
+ if (!pFunc->IsNoMetadata())
+ {
+ // <TODO>Remove the thrown variable as well as the
+ // gcprotect they are pointless.</TODO>
+
+ // Note that the callee must be aware that the ObjectID
+ // passed CAN change when gc happens.
+ OBJECTREF thrown = NULL;
+ GCPROTECT_BEGIN(thrown);
+ thrown = pThread->GetThrowable();
+ {
+ g_profControlBlock.pProfInterface->ExceptionCatcherEnter(
+ (FunctionID) pFunc,
+ reinterpret_cast<ObjectID>((*(BYTE **)&thrown)));
+ }
+ GCPROTECT_END();
+ }
+ END_PIN_PROFILER();
+ }
+ }
+
+ static inline void ExceptionCatcherLeave()
+ {
+ WRAPPER_NO_CONTRACT;
+ // Notify the profiler of the function being searched for a handler.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackExceptions());
+ g_profControlBlock.pProfInterface->ExceptionCatcherLeave();
+ END_PIN_PROFILER();
+ }
+ }
+
+
+#else // !PROFILING_SUPPORTED
+ static inline void ExceptionThrown(Thread * pThread) { LIMITED_METHOD_CONTRACT;}
+ static inline void ExceptionSearchFunctionEnter(MethodDesc * pFunction) { LIMITED_METHOD_CONTRACT;}
+ static inline void ExceptionSearchFunctionLeave(MethodDesc * pFunction) { LIMITED_METHOD_CONTRACT;}
+ static inline void ExceptionSearchFilterEnter(MethodDesc * pFunc) { LIMITED_METHOD_CONTRACT;}
+ static inline void ExceptionSearchFilterLeave() { LIMITED_METHOD_CONTRACT;}
+ static inline void ExceptionSearchCatcherFound(MethodDesc * pFunc) { LIMITED_METHOD_CONTRACT;}
+ static inline void ExceptionOSHandlerEnter(MethodDesc ** ppNotify, MethodDesc * pFunc) { LIMITED_METHOD_CONTRACT;}
+ static inline void ExceptionOSHandlerLeave(MethodDesc * pNotify) { LIMITED_METHOD_CONTRACT;}
+ static inline void ExceptionUnwindFunctionEnter(MethodDesc * pFunc) { LIMITED_METHOD_CONTRACT;}
+ static inline void ExceptionUnwindFunctionLeave(MethodDesc * pFunction) { LIMITED_METHOD_CONTRACT;}
+ static inline void ExceptionUnwindFinallyEnter(MethodDesc * pFunc) { LIMITED_METHOD_CONTRACT;}
+ static inline void ExceptionUnwindFinallyLeave() { LIMITED_METHOD_CONTRACT;}
+ static inline void ExceptionCatcherEnter(Thread * pThread, MethodDesc * pFunc) { LIMITED_METHOD_CONTRACT;}
+ static inline void ExceptionCatcherLeave() { LIMITED_METHOD_CONTRACT;}
+#endif // !PROFILING_SUPPORTED
+};
+
+
+#endif // _EETOPROFEXCEPTIONINTERFACEWRAPPER_INL_
diff --git a/src/vm/eetwain.cpp b/src/vm/eetwain.cpp
new file mode 100644
index 0000000000..e27463c0af
--- /dev/null
+++ b/src/vm/eetwain.cpp
@@ -0,0 +1,5871 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "common.h"
+
+#include "eetwain.h"
+#include "dbginterface.h"
+#include "gcenv.h"
+
+#define RETURN_ADDR_OFFS 1 // in DWORDS
+
+#include "gcinfo.h"
+
+#ifdef USE_GC_INFO_DECODER
+#include "gcinfodecoder.h"
+#endif
+
+
+#define X86_INSTR_W_TEST_ESP 0x4485 // test [esp+N], eax
+#define X86_INSTR_TEST_ESP_SIB 0x24
+#define X86_INSTR_PUSH_0 0x6A // push 00, entire instruction is 0x6A00
+#define X86_INSTR_PUSH_IMM 0x68 // push NNNN,
+#define X86_INSTR_W_PUSH_IND_IMM 0x35FF // push [NNNN]
+#define X86_INSTR_CALL_REL32 0xE8 // call rel32
+#define X86_INSTR_W_CALL_IND_IMM 0x15FF // call [addr32]
+#define X86_INSTR_NOP 0x90 // nop
+#define X86_INSTR_NOP2 0x9090 // 2-byte nop
+#define X86_INSTR_NOP3_1 0x9090 // 1st word of 3-byte nop
+#define X86_INSTR_NOP3_3 0x90 // 3rd byte of 3-byte nop
+#define X86_INSTR_NOP4 0x90909090 // 4-byte nop
+#define X86_INSTR_NOP5_1 0x90909090 // 1st dword of 5-byte nop
+#define X86_INSTR_NOP5_5 0x90 // 5th byte of 5-byte nop
+#define X86_INSTR_INT3 0xCC // int3
+#define X86_INSTR_HLT 0xF4 // hlt
+#define X86_INSTR_PUSH_EBP 0x55 // push ebp
+#define X86_INSTR_W_MOV_EBP_ESP 0xEC8B // mov ebp, esp
+#define X86_INSTR_POP_ECX 0x59 // pop ecx
+#define X86_INSTR_RET 0xC2 // ret
+#define X86_INSTR_w_LEA_ESP_EBP_BYTE_OFFSET 0x658d // lea esp, [ebp-bOffset]
+#define X86_INSTR_w_LEA_ESP_EBP_DWORD_OFFSET 0xa58d // lea esp, [ebp-dwOffset]
+#define X86_INSTR_JMP_NEAR_REL32 0xE9 // near jmp rel32
+#define X86_INSTR_w_JMP_FAR_IND_IMM 0x25FF // far jmp [addr32]
+
+#ifndef USE_GC_INFO_DECODER
+
+
+#ifdef _DEBUG
+// For dumping of verbose info.
+#ifndef DACCESS_COMPILE
+static bool trFixContext = false;
+#endif
+static bool trEnumGCRefs = false;
+static bool dspPtr = false; // prints the live ptrs as reported
+#endif
+
+// NOTE: enabling compiler optimizations, even for debug builds.
+// Comment this out in order to be able to fully debug methods here.
+#if defined(_MSC_VER)
+#pragma optimize("tg", on)
+#endif
+
+__forceinline unsigned decodeUnsigned(PTR_CBYTE& src)
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+#ifdef DACCESS_COMPILE
+ PTR_CBYTE begin = src;
+#endif
+
+ BYTE byte = *src++;
+ unsigned value = byte & 0x7f;
+ while (byte & 0x80)
+ {
+#ifdef DACCESS_COMPILE
+ // In DAC builds, the target data may be corrupt. Rather than return incorrect data
+ // and risk wasting time in a potentially long loop, we want to fail early and gracefully.
+ // The data is encoded with 7 value-bits per byte, and so we may need to read a maximum
+ // of 5 bytes (7*5=35) to read a full 32-bit integer.
+ if ((src - begin) > 5)
+ {
+ DacError(CORDBG_E_TARGET_INCONSISTENT);
+ }
+#endif
+
+ byte = *src++;
+ value <<= 7;
+ value += byte & 0x7f;
+ }
+ return value;
+}
+
+__forceinline int decodeSigned(PTR_CBYTE& src)
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+#ifdef DACCESS_COMPILE
+ PTR_CBYTE begin = src;
+#endif
+
+ BYTE byte = *src++;
+ BYTE first = byte;
+ int value = byte & 0x3f;
+ while (byte & 0x80)
+ {
+#ifdef DACCESS_COMPILE
+ // In DAC builds, the target data may be corrupt. Rather than return incorrect data
+ // and risk wasting time in a potentially long loop, we want to fail early and gracefully.
+ // The data is encoded with 7 value-bits per byte, and so we may need to read a maximum
+ // of 5 bytes (7*5=35) to read a full 32-bit integer.
+ if ((src - begin) > 5)
+ {
+ DacError(CORDBG_E_TARGET_INCONSISTENT);
+ }
+#endif
+
+ byte = *src++;
+ value <<= 7;
+ value += byte & 0x7f;
+ }
+ if (first & 0x40)
+ value = -value;
+ return value;
+}
+
+// Fast versions of the above, with one iteration of the loop unrolled
+#define fastDecodeUnsigned(src) (((*(src) & 0x80) == 0) ? (unsigned) (*(src)++) : decodeUnsigned((src)))
+#define fastDecodeSigned(src) (((*(src) & 0xC0) == 0) ? (unsigned) (*(src)++) : decodeSigned((src)))
+
+// Fast skipping past encoded integers
+#ifndef DACCESS_COMPILE
+#define fastSkipUnsigned(src) { while ((*(src)++) & 0x80) { } }
+#define fastSkipSigned(src) { while ((*(src)++) & 0x80) { } }
+#else
+// In DAC builds we want to trade-off a little perf in the common case for reliaiblity against corrupt data.
+#define fastSkipUnsigned(src) (decodeUnsigned(src))
+#define fastSkipSigned(src) (decodeSigned(src))
+#endif
+
+
+/*****************************************************************************
+ *
+ * Decodes the methodInfoPtr and returns the decoded information
+ * in the hdrInfo struct. The EIP parameter is the PC location
+ * within the active method.
+ */
+static size_t crackMethodInfoHdr(PTR_VOID methodInfoPtr,
+ unsigned curOffset,
+ hdrInfo * infoPtr)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ PTR_CBYTE table = PTR_CBYTE(methodInfoPtr);
+#if VERIFY_GC_TABLES
+ _ASSERTE(*castto(table, unsigned short *)++ == 0xFEEF);
+#endif
+
+ infoPtr->methodSize = fastDecodeUnsigned(table);
+
+ _ASSERTE(curOffset >= 0);
+ _ASSERTE(curOffset <= infoPtr->methodSize);
+
+ /* Decode the InfoHdr */
+
+ InfoHdr header;
+ table = decodeHeader(table, &header);
+
+ BOOL hasArgTabOffset = FALSE;
+ if (header.untrackedCnt == HAS_UNTRACKED)
+ {
+ hasArgTabOffset = TRUE;
+ header.untrackedCnt = fastDecodeUnsigned(table);
+ }
+
+ if (header.varPtrTableSize == HAS_VARPTR)
+ {
+ hasArgTabOffset = TRUE;
+ header.varPtrTableSize = fastDecodeUnsigned(table);
+ }
+
+ if (header.gsCookieOffset == HAS_GS_COOKIE_OFFSET)
+ {
+ header.gsCookieOffset = fastDecodeUnsigned(table);
+ }
+
+ if (header.syncStartOffset == HAS_SYNC_OFFSET)
+ {
+ header.syncStartOffset = decodeUnsigned(table);
+ header.syncEndOffset = decodeUnsigned(table);
+
+ _ASSERTE(header.syncStartOffset != INVALID_SYNC_OFFSET && header.syncEndOffset != INVALID_SYNC_OFFSET);
+ _ASSERTE(header.syncStartOffset < header.syncEndOffset);
+ }
+
+
+ /* Some sanity checks on header */
+
+ _ASSERTE( header.prologSize +
+ (size_t)(header.epilogCount*header.epilogSize) <= infoPtr->methodSize);
+ _ASSERTE( header.epilogCount == 1 || !header.epilogAtEnd);
+
+ _ASSERTE( header.untrackedCnt <= header.argCount+header.frameSize);
+
+ _ASSERTE( header.ebpSaved || !(header.ebpFrame || header.doubleAlign));
+ _ASSERTE(!header.ebpFrame || !header.doubleAlign );
+ _ASSERTE( header.ebpFrame || !header.security );
+ _ASSERTE( header.ebpFrame || !header.handlers );
+ _ASSERTE( header.ebpFrame || !header.localloc );
+ _ASSERTE( header.ebpFrame || !header.editNcontinue); // <TODO> : Esp frames NYI for EnC</TODO>
+
+ /* Initialize the infoPtr struct */
+
+ infoPtr->argSize = header.argCount * 4;
+ infoPtr->ebpFrame = header.ebpFrame;
+ infoPtr->interruptible = header.interruptible;
+
+ infoPtr->prologSize = header.prologSize;
+ infoPtr->epilogSize = header.epilogSize;
+ infoPtr->epilogCnt = header.epilogCount;
+ infoPtr->epilogEnd = header.epilogAtEnd;
+
+ infoPtr->untrackedCnt = header.untrackedCnt;
+ infoPtr->varPtrTableSize = header.varPtrTableSize;
+ infoPtr->gsCookieOffset = header.gsCookieOffset;
+
+ infoPtr->syncStartOffset = header.syncStartOffset;
+ infoPtr->syncEndOffset = header.syncEndOffset;
+
+ infoPtr->doubleAlign = header.doubleAlign;
+ infoPtr->securityCheck = header.security;
+ infoPtr->handlers = header.handlers;
+ infoPtr->localloc = header.localloc;
+ infoPtr->editNcontinue = header.editNcontinue;
+ infoPtr->varargs = header.varargs;
+ infoPtr->profCallbacks = header.profCallbacks;
+ infoPtr->genericsContext = header.genericsContext;
+ infoPtr->genericsContextIsMethodDesc = header.genericsContextIsMethodDesc;
+ infoPtr->isSpeculativeStackWalk = false;
+
+ /* Are we within the prolog of the method? */
+
+ if (curOffset < infoPtr->prologSize)
+ {
+ infoPtr->prologOffs = curOffset;
+ }
+ else
+ {
+ infoPtr->prologOffs = hdrInfo::NOT_IN_PROLOG;
+ }
+
+ /* Assume we're not in the epilog of the method */
+
+ infoPtr->epilogOffs = hdrInfo::NOT_IN_EPILOG;
+
+ /* Are we within an epilog of the method? */
+
+ if (infoPtr->epilogCnt)
+ {
+ unsigned epilogStart;
+
+ if (infoPtr->epilogCnt > 1 || !infoPtr->epilogEnd)
+ {
+#if VERIFY_GC_TABLES
+ _ASSERTE(*castto(table, unsigned short *)++ == 0xFACE);
+#endif
+ epilogStart = 0;
+ for (unsigned i = 0; i < infoPtr->epilogCnt; i++)
+ {
+ epilogStart += fastDecodeUnsigned(table);
+ if (curOffset > epilogStart &&
+ curOffset < epilogStart + infoPtr->epilogSize)
+ {
+ infoPtr->epilogOffs = curOffset - epilogStart;
+ }
+ }
+ }
+ else
+ {
+ epilogStart = infoPtr->methodSize - infoPtr->epilogSize;
+
+ if (curOffset > epilogStart &&
+ curOffset < epilogStart + infoPtr->epilogSize)
+ {
+ infoPtr->epilogOffs = curOffset - epilogStart;
+ }
+ }
+
+ infoPtr->syncEpilogStart = epilogStart;
+ }
+
+ unsigned argTabOffset = INVALID_ARGTAB_OFFSET;
+ if (hasArgTabOffset)
+ {
+ argTabOffset = fastDecodeUnsigned(table);
+ }
+ infoPtr->argTabOffset = argTabOffset;
+
+ size_t frameDwordCount = header.frameSize;
+
+ /* Set the rawStackSize to the number of bytes that it bumps ESP */
+
+ infoPtr->rawStkSize = (UINT)(frameDwordCount * sizeof(size_t));
+
+ /* Calculate the callee saves regMask and adjust stackSize to */
+ /* include the callee saves register spills */
+
+ unsigned savedRegs = RM_NONE;
+ unsigned savedRegsCount = 0;
+
+ if (header.ediSaved)
+ {
+ savedRegsCount++;
+ savedRegs |= RM_EDI;
+ }
+ if (header.esiSaved)
+ {
+ savedRegsCount++;
+ savedRegs |= RM_ESI;
+ }
+ if (header.ebxSaved)
+ {
+ savedRegsCount++;
+ savedRegs |= RM_EBX;
+ }
+ if (header.ebpSaved)
+ {
+ savedRegsCount++;
+ savedRegs |= RM_EBP;
+ }
+
+ infoPtr->savedRegMask = (RegMask)savedRegs;
+
+ infoPtr->savedRegsCountExclFP = savedRegsCount;
+ if (header.ebpFrame || header.doubleAlign)
+ {
+ _ASSERTE(header.ebpSaved);
+ infoPtr->savedRegsCountExclFP = savedRegsCount - 1;
+ }
+
+ frameDwordCount += savedRegsCount;
+
+ infoPtr->stackSize = (UINT)(frameDwordCount * sizeof(size_t));
+
+ _ASSERTE(infoPtr->gsCookieOffset == INVALID_GS_COOKIE_OFFSET ||
+ (infoPtr->gsCookieOffset < infoPtr->stackSize) &&
+ ((header.gsCookieOffset % sizeof(void*)) == 0));
+
+ return table - PTR_CBYTE(methodInfoPtr);
+}
+
+/*****************************************************************************/
+
+// We do a "pop eax; jmp eax" to return from a fault or finally handler
+const size_t END_FIN_POP_STACK = sizeof(TADDR);
+
+
+// The offset (in bytes) from EBP for the secutiy object on the stack
+inline size_t GetSecurityObjectOffset(hdrInfo * info)
+{
+ SUPPORTS_DAC;
+
+ _ASSERTE(info->securityCheck && info->ebpFrame);
+
+ unsigned position = info->savedRegsCountExclFP +
+ 1;
+ return position * sizeof(TADDR);
+}
+
+inline
+size_t GetLocallocSPOffset(hdrInfo * info)
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(info->localloc && info->ebpFrame);
+
+ unsigned position = info->savedRegsCountExclFP +
+ info->securityCheck +
+ 1;
+ return position * sizeof(TADDR);
+}
+
+inline
+size_t GetParamTypeArgOffset(hdrInfo * info)
+{
+ SUPPORTS_DAC;
+
+ _ASSERTE((info->genericsContext || info->handlers) && info->ebpFrame);
+
+ unsigned position = info->savedRegsCountExclFP +
+ info->securityCheck +
+ info->localloc +
+ 1; // For CORINFO_GENERICS_CTXT_FROM_PARAMTYPEARG
+ return position * sizeof(TADDR);
+}
+
+inline size_t GetStartShadowSPSlotsOffset(hdrInfo * info)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(info->handlers && info->ebpFrame);
+
+ return GetParamTypeArgOffset(info) +
+ sizeof(TADDR); // Slot for end-of-last-executed-filter
+}
+
+/*****************************************************************************
+ * Returns the start of the hidden slots for the shadowSP for functions
+ * with exception handlers. There is one slot per nesting level starting
+ * near Ebp and is zero-terminated after the active slots.
+ */
+
+inline
+PTR_TADDR GetFirstBaseSPslotPtr(TADDR ebp, hdrInfo * info)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE(info->handlers && info->ebpFrame);
+
+ size_t offsetFromEBP = GetStartShadowSPSlotsOffset(info)
+ + sizeof(TADDR); // to get to the *start* of the next slot
+
+ return PTR_TADDR(ebp - offsetFromEBP);
+}
+
+inline size_t GetEndShadowSPSlotsOffset(hdrInfo * info, unsigned maxHandlerNestingLevel)
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(info->handlers && info->ebpFrame);
+
+ unsigned numberOfShadowSPSlots = maxHandlerNestingLevel +
+ 1 + // For zero-termination
+ 1; // For a filter (which can be active at the same time as a catch/finally handler
+
+ return GetStartShadowSPSlotsOffset(info) +
+ (numberOfShadowSPSlots * sizeof(TADDR));
+}
+
+/*****************************************************************************
+ * returns the base frame pointer corresponding to the target nesting level.
+ */
+
+inline
+TADDR GetOutermostBaseFP(TADDR ebp, hdrInfo * info)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // we are not taking into account double alignment. We are
+ // safe because the jit currently bails on double alignment if there
+ // are handles or localalloc
+ _ASSERTE(!info->doubleAlign);
+ if (info->localloc)
+ {
+ // If the function uses localloc we will fetch the ESP from the localloc
+ // slot.
+ PTR_TADDR pLocalloc = PTR_TADDR(ebp - GetLocallocSPOffset(info));
+
+ return (*pLocalloc);
+ }
+ else
+ {
+ // Default, go back all the method's local stack size
+ return ebp - info->stackSize + sizeof(int);
+ }
+}
+
+/*****************************************************************************
+ *
+ * For functions with handlers, checks if it is currently in a handler.
+ * Either of unwindESP or unwindLevel will specify the target nesting level.
+ * If unwindLevel is specified, info about the funclet at that nesting level
+ * will be returned. (Use if you are interested in a specific nesting level.)
+ * If unwindESP is specified, info for nesting level invoked before the stack
+ * reached unwindESP will be returned. (Use if you have a specific ESP value
+ * during stack walking.)
+ *
+ * *pBaseSP is set to the base SP (base of the stack on entry to
+ * the current funclet) corresponding to the target nesting level.
+ * *pNestLevel is set to the nesting level of the target nesting level (useful
+ * if unwindESP!=IGNORE_VAL
+ * *pHasInnerFilter will be set to true (only when unwindESP!=IGNORE_VAL) if a filter
+ * is currently active, but the target nesting level is an outer nesting level.
+ * *pHadInnerFilter - was the last use of the frame to execute a filter.
+ * This mainly affects GC lifetime reporting.
+ */
+
+enum FrameType
+{
+ FR_NORMAL, // Normal method frame - no exceptions currently active
+ FR_FILTER, // Frame-let of a filter
+ FR_HANDLER, // Frame-let of a callable catch/fault/finally
+
+ FR_INVALID, // Invalid frame (for speculative stackwalks)
+};
+
+enum { IGNORE_VAL = -1 };
+
+FrameType GetHandlerFrameInfo(hdrInfo * info,
+ TADDR frameEBP,
+ TADDR unwindESP,
+ DWORD unwindLevel,
+ TADDR * pBaseSP = NULL, /* OUT */
+ DWORD * pNestLevel = NULL, /* OUT */
+ bool * pHasInnerFilter = NULL, /* OUT */
+ bool * pHadInnerFilter = NULL) /* OUT */
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ _ASSERTE(info->ebpFrame && info->handlers);
+ // One and only one of them should be IGNORE_VAL
+ _ASSERTE((unwindESP == (TADDR) IGNORE_VAL) !=
+ (unwindLevel == (DWORD) IGNORE_VAL));
+ _ASSERTE(pHasInnerFilter == NULL || unwindESP != (TADDR) IGNORE_VAL);
+
+ // Many of the conditions that we'd like to assert cannot be asserted in the case that we're
+ // in the middle of a stackwalk seeded by a profiler, since such seeds can't be trusted
+ // (profilers are external, untrusted sources). So during profiler walks, we test the condition
+ // and throw an exception if it's not met. Otherwise, we just assert the condition.
+ #define FAIL_IF_SPECULATIVE_WALK(condition) \
+ if (info->isSpeculativeStackWalk) \
+ { \
+ if (!(condition)) \
+ { \
+ return FR_INVALID; \
+ } \
+ } \
+ else \
+ { \
+ _ASSERTE(condition); \
+ }
+
+ PTR_TADDR pFirstBaseSPslot = GetFirstBaseSPslotPtr(frameEBP, info);
+ TADDR baseSP = GetOutermostBaseFP(frameEBP, info);
+ bool nonLocalHandlers = false; // Are the funclets invoked by EE (instead of managed code itself)
+ bool hasInnerFilter = false;
+ bool hadInnerFilter = false;
+
+ /* Get the last non-zero slot >= unwindESP, or lvl<unwindLevel.
+ Also do some sanity checks */
+
+ // The shadow slots contain the SP of the nested EH clauses currently active on the stack.
+ // The slots grow towards lower address on the stack and is terminted by a NULL entry.
+ // Since each subsequent slot contains the SP of a more nested EH clause, the contents of the slots are
+ // expected to be in decreasing order.
+ size_t lvl;
+ PTR_TADDR pSlot;
+ for(lvl = 0, pSlot = pFirstBaseSPslot;
+ *pSlot && lvl < unwindLevel;
+ pSlot--, lvl++)
+ {
+ // Filters cant have inner funclets
+ FAIL_IF_SPECULATIVE_WALK(!(baseSP & ICodeManager::SHADOW_SP_IN_FILTER));
+
+ TADDR curSlotVal = *pSlot;
+
+ // The shadowSPs have to be less unless the stack has been unwound.
+ FAIL_IF_SPECULATIVE_WALK(baseSP > curSlotVal ||
+ (baseSP == curSlotVal && pSlot == pFirstBaseSPslot));
+
+ if (curSlotVal == LCL_FINALLY_MARK)
+ {
+ // Locally called finally
+ baseSP -= sizeof(TADDR);
+ }
+ else
+ {
+ // Is this a funclet we unwound before (can only happen with filters) ?
+ // If unwindESP is specified, normally we expect it to be the last entry in the shadow slot array.
+ // Or, if there is a filter, we expect unwindESP to be the second last entry. However, this may
+ // not be the case in DAC builds. For example, the user can use .cxr in an EH clause to set a
+ // CONTEXT captured in the try clause. In this case, unwindESP will be the ESP of the parent
+ // function, but the shadow slot array will contain the SP of the EH clause, which is closer to
+ // the leaf than the parent method.
+
+ if (unwindESP != (TADDR) IGNORE_VAL &&
+ unwindESP > END_FIN_POP_STACK +
+ (curSlotVal & ~ICodeManager::SHADOW_SP_BITS))
+ {
+ // In non-DAC builds, the only time unwindESP is closer to the root than entries in the shadow
+ // slot array is when the last entry in the array is for a filter. Also, filters can't have
+ // nested handlers.
+ if ((pSlot[0] & ICodeManager::SHADOW_SP_IN_FILTER) &&
+ (pSlot[-1] == 0) &&
+ !(baseSP & ICodeManager::SHADOW_SP_IN_FILTER))
+ {
+ if (pSlot[0] & ICodeManager::SHADOW_SP_FILTER_DONE)
+ hadInnerFilter = true;
+ else
+ hasInnerFilter = true;
+ break;
+ }
+ else
+ {
+#if defined(DACCESS_COMPILE)
+ // In DAC builds, this could happen. We just need to bail out of this loop early.
+ break;
+#else // !DACCESS_COMPILE
+ // In non-DAC builds, this is an error.
+ FAIL_IF_SPECULATIVE_WALK(FALSE);
+#endif // DACCESS_COMPILE
+ }
+ }
+
+ nonLocalHandlers = true;
+ baseSP = curSlotVal;
+ }
+ }
+
+ if (unwindESP != (TADDR) IGNORE_VAL)
+ {
+ FAIL_IF_SPECULATIVE_WALK(baseSP >= unwindESP ||
+ baseSP == unwindESP - sizeof(TADDR)); // About to locally call a finally
+
+ if (baseSP < unwindESP) // About to locally call a finally
+ baseSP = unwindESP;
+ }
+ else
+ {
+ FAIL_IF_SPECULATIVE_WALK(lvl == unwindLevel); // unwindLevel must be currently active on stack
+ }
+
+ if (pBaseSP)
+ *pBaseSP = baseSP & ~ICodeManager::SHADOW_SP_BITS;
+
+ if (pNestLevel)
+ {
+ *pNestLevel = (DWORD)lvl;
+ }
+
+ if (pHasInnerFilter)
+ *pHasInnerFilter = hasInnerFilter;
+
+ if (pHadInnerFilter)
+ *pHadInnerFilter = hadInnerFilter;
+
+ if (baseSP & ICodeManager::SHADOW_SP_IN_FILTER)
+ {
+ FAIL_IF_SPECULATIVE_WALK(!hasInnerFilter); // nested filters not allowed
+ return FR_FILTER;
+ }
+ else if (nonLocalHandlers)
+ {
+ return FR_HANDLER;
+ }
+ else
+ {
+ return FR_NORMAL;
+ }
+
+ #undef FAIL_IF_SPECULATIVE_WALK
+}
+
+// Returns the number of bytes at the beginning of the stack frame that shouldn't be
+// modified by an EnC. This is everything except the space for locals and temporaries.
+inline size_t GetSizeOfFrameHeaderForEnC(hdrInfo * info)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // See comment above Compiler::lvaAssignFrameOffsets() in src\jit\il\lclVars.cpp
+ // for frame layout
+
+ // EnC supports increasing the maximum handler nesting level by always
+ // assuming that the max is MAX_EnC_HANDLER_NESTING_LEVEL. Methods with
+ // a higher max cannot be updated by EnC
+
+ // Take the offset (from EBP) of the last slot of the header, plus one for the EBP slot itself
+ // to get the total size of the header.
+ return sizeof(TADDR) +
+ GetEndShadowSPSlotsOffset(info, MAX_EnC_HANDLER_NESTING_LEVEL);
+}
+#endif // !USE_GC_INFO_DECODER
+
+#ifndef DACCESS_COMPILE
+
+/*****************************************************************************
+ *
+ * Setup context to enter an exception handler (a 'catch' block).
+ * This is the last chance for the runtime support to do fixups in
+ * the context before execution continues inside a filter, catch handler,
+ * or finally.
+ */
+void EECodeManager::FixContext( ContextType ctxType,
+ EHContext *ctx,
+ EECodeInfo *pCodeInfo,
+ DWORD dwRelOffset,
+ DWORD nestingLevel,
+ OBJECTREF thrownObject,
+ CodeManState *pState,
+ size_t ** ppShadowSP,
+ size_t ** ppEndRegion)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ _ASSERTE((ctxType == FINALLY_CONTEXT) == (thrownObject == NULL));
+
+#ifdef _TARGET_X86_
+
+ _ASSERTE(sizeof(CodeManStateBuf) <= sizeof(pState->stateBuf));
+ CodeManStateBuf * stateBuf = (CodeManStateBuf*)pState->stateBuf;
+
+ /* Extract the necessary information from the info block header */
+
+ stateBuf->hdrInfoSize = (DWORD)crackMethodInfoHdr(pCodeInfo->GetGCInfo(),
+ dwRelOffset,
+ &stateBuf->hdrInfoBody);
+ pState->dwIsSet = 1;
+
+#ifdef _DEBUG
+ if (trFixContext) {
+ printf("FixContext [%s][%s] for %s.%s: ",
+ stateBuf->hdrInfoBody.ebpFrame?"ebp":" ",
+ stateBuf->hdrInfoBody.interruptible?"int":" ",
+ "UnknownClass","UnknownMethod");
+ fflush(stdout);
+ }
+#endif
+
+ /* make sure that we have an ebp stack frame */
+
+ _ASSERTE(stateBuf->hdrInfoBody.ebpFrame);
+ _ASSERTE(stateBuf->hdrInfoBody.handlers); // <TODO>@TODO : This will alway be set. Remove it</TODO>
+
+ TADDR baseSP;
+ GetHandlerFrameInfo(&stateBuf->hdrInfoBody, ctx->Ebp,
+ ctxType == FILTER_CONTEXT ? ctx->Esp : IGNORE_VAL,
+ ctxType == FILTER_CONTEXT ? (DWORD) IGNORE_VAL : nestingLevel,
+ &baseSP,
+ &nestingLevel);
+
+ _ASSERTE((size_t)ctx->Ebp >= baseSP);
+ _ASSERTE(baseSP >= (size_t)ctx->Esp);
+
+ ctx->Esp = (DWORD)baseSP;
+
+ // EE will write Esp to **pShadowSP before jumping to handler
+
+ PTR_TADDR pBaseSPslots =
+ GetFirstBaseSPslotPtr(ctx->Ebp, &stateBuf->hdrInfoBody);
+ *ppShadowSP = (size_t *)&pBaseSPslots[-(int) nestingLevel ];
+ pBaseSPslots[-(int)(nestingLevel+1)] = 0; // Zero out the next slot
+
+ // EE will write the end offset of the filter
+ if (ctxType == FILTER_CONTEXT)
+ *ppEndRegion = (size_t *)pBaseSPslots + 1;
+
+ /* This is just a simple assigment of throwObject to ctx->Eax,
+ just pretend the cast goo isn't there.
+ */
+
+ *((OBJECTREF*)&(ctx->Eax)) = thrownObject;
+
+#else // !_TARGET_X86_
+ _ASSERTE(!"@NYI - EECodeManager::FixContext (EETwain.cpp)");
+#endif // _TARGET_X86_
+}
+
+
+
+
+
+/*****************************************************************************/
+
+bool VarIsInReg(ICorDebugInfo::VarLoc varLoc)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ switch(varLoc.vlType)
+ {
+ case ICorDebugInfo::VLT_REG:
+ case ICorDebugInfo::VLT_REG_REG:
+ case ICorDebugInfo::VLT_REG_STK:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+#ifdef EnC_SUPPORTED
+/*****************************************************************************
+ * Last chance for the runtime support to do fixups in the context
+ * before execution continues inside an EnC updated function.
+ * It also adjusts ESP and munges on the stack. So the caller has to make
+ * sure that that stack region isnt needed (by doing a localloc)
+ * Also, if this returns EnC_FAIL, we should not have munged the
+ * context ie. transcated commit
+ * The plan of attack is:
+ * 1) Error checking up front. If we get through here, everything
+ * else should work
+ * 2) Get all the info about current variables, registers, etc
+ * 3) zero out the stack frame - this'll initialize _all_ variables
+ * 4) Put the variables from step 3 into their new locations.
+ *
+ * Note that while we use the ShuffleVariablesGet/Set methods, they don't
+ * have any info/logic that's internal to the runtime: another codemanger
+ * could easily duplicate what they do, which is why we're calling into them.
+ */
+
+HRESULT EECodeManager::FixContextForEnC(PCONTEXT pCtx,
+ EECodeInfo * pOldCodeInfo,
+ const ICorDebugInfo::NativeVarInfo * oldMethodVars,
+ SIZE_T oldMethodVarsCount,
+ EECodeInfo * pNewCodeInfo,
+ const ICorDebugInfo::NativeVarInfo * newMethodVars,
+ SIZE_T newMethodVarsCount)
+{
+ CONTRACTL {
+ DISABLED(NOTHROW);
+ DISABLED(GC_NOTRIGGER);
+ } CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Grab a copy of the context before the EnC update.
+ T_CONTEXT oldCtx = *pCtx;
+
+#if defined(_TARGET_X86_)
+ LOG((LF_CORDB, LL_INFO100, "EECM::FixContextForEnC\n"));
+
+ /* Extract the necessary information from the info block header */
+
+ hdrInfo oldInfo, newInfo;
+
+ crackMethodInfoHdr(pOldCodeInfo->GetGCInfo(),
+ pOldCodeInfo->GetRelOffset(),
+ &oldInfo);
+
+ crackMethodInfoHdr(pNewCodeInfo->GetGCInfo(),
+ pNewCodeInfo->GetRelOffset(),
+ &newInfo);
+
+ //1) Error checking up front. If we get through here, everything
+ // else should work
+
+ if (!oldInfo.editNcontinue || !newInfo.editNcontinue) {
+ LOG((LF_ENC, LL_INFO100, "**Error** EECM::FixContextForEnC EnC_INFOLESS_METHOD\n"));
+ return CORDBG_E_ENC_INFOLESS_METHOD;
+ }
+
+ if (!oldInfo.ebpFrame || !newInfo.ebpFrame) {
+ LOG((LF_ENC, LL_INFO100, "**Error** EECM::FixContextForEnC Esp frames NYI\n"));
+ return E_FAIL; // Esp frames NYI
+ }
+
+ if (pCtx->Esp != pCtx->Ebp - oldInfo.stackSize + sizeof(DWORD)) {
+ LOG((LF_ENC, LL_INFO100, "**Error** EECM::FixContextForEnC stack should be empty\n"));
+ return E_FAIL; // stack should be empty - <TODO> @TODO : Barring localloc</TODO>
+ }
+
+ if (oldInfo.handlers)
+ {
+ bool hasInnerFilter;
+ TADDR baseSP;
+ FrameType frameType = GetHandlerFrameInfo(&oldInfo, pCtx->Ebp,
+ pCtx->Esp, IGNORE_VAL,
+ &baseSP, NULL, &hasInnerFilter);
+ _ASSERTE(frameType != FR_INVALID);
+ _ASSERTE(!hasInnerFilter); // FixContextForEnC() is called for bottommost funclet
+
+ // If the method is in a fuclet, and if the framesize grows, we are in trouble.
+
+ if (frameType != FR_NORMAL)
+ {
+ /* <TODO> @TODO : What if the new method offset is in a fuclet,
+ and the old is not, or the nesting level changed, etc </TODO> */
+
+ if (oldInfo.stackSize != newInfo.stackSize) {
+ LOG((LF_ENC, LL_INFO100, "**Error** EECM::FixContextForEnC stack size mismatch\n"));
+ return CORDBG_E_ENC_IN_FUNCLET;
+ }
+ }
+ }
+
+ /* @TODO: Check if we have grown out of space for locals, in the face of localloc */
+ _ASSERTE(!oldInfo.localloc && !newInfo.localloc);
+
+ // Always reserve space for the securityCheck slot
+ _ASSERTE(oldInfo.securityCheck && newInfo.securityCheck);
+
+ // @TODO: If nesting level grows above the MAX_EnC_HANDLER_NESTING_LEVEL,
+ // we should return EnC_NESTED_HANLDERS
+ _ASSERTE(oldInfo.handlers && newInfo.handlers);
+
+ LOG((LF_ENC, LL_INFO100, "EECM::FixContextForEnC: Checks out\n"));
+
+#elif defined(_TARGET_AMD64_)
+
+ // Strategy for zeroing out the frame on x64:
+ //
+ // The stack frame looks like this (stack grows up)
+ //
+ // =======================================
+ // <--- RSP == RBP (invariant: localalloc disallowed before remap)
+ // Arguments for next call (if there is one)
+ // PSPSym (optional)
+ // JIT temporaries (if any)
+ // Security object (if any)
+ // Local variables (if any)
+ // ---------------------------------------
+ // Frame header (stuff we must preserve, such as bool for synchronized
+ // methods, saved RBP, etc.)
+ // Return address (also included in frame header)
+ // ---------------------------------------
+ // Arguments for this frame (that's getting remapped). Will naturally be preserved
+ // since fixed-frame size doesn't include this.
+ // =======================================
+ //
+ // Goal: Zero out everything AFTER (above) frame header.
+ //
+ // How do we find this stuff?
+ //
+ // EECodeInfo::GetFixedStackSize() gives us the full size from the top ("Arguments
+ // for next call") all the way down to and including Return Address.
+ //
+ // GetSizeOfEditAndContinuePreservedArea() gives us the size in bytes of the
+ // frame header at the bottom.
+ //
+ // So we start at RSP, and zero out:
+ // GetFixedStackSize() - GetSizeOfEditAndContinuePreservedArea() bytes.
+ //
+ // We'll need to restore PSPSym; location gotten from GCInfo.
+ // We'll need to copy security object; location gotten from GCInfo.
+
+ // GCInfo for old method
+ GcInfoDecoder oldGcDecoder(
+ dac_cast<PTR_CBYTE>(pOldCodeInfo->GetGCInfo()),
+ GcInfoDecoderFlags(DECODE_SECURITY_OBJECT | DECODE_PSP_SYM | DECODE_EDIT_AND_CONTINUE),
+ 0 // Instruction offset (not needed)
+ );
+
+ // GCInfo for new method
+ GcInfoDecoder newGcDecoder(
+ dac_cast<PTR_CBYTE>(pNewCodeInfo->GetGCInfo()),
+ GcInfoDecoderFlags(DECODE_SECURITY_OBJECT | DECODE_PSP_SYM | DECODE_EDIT_AND_CONTINUE),
+ 0 // Instruction offset (not needed)
+ );
+
+ UINT32 oldSizeOfPreservedArea = oldGcDecoder.GetSizeOfEditAndContinuePreservedArea();
+ UINT32 newSizeOfPreservedArea = newGcDecoder.GetSizeOfEditAndContinuePreservedArea();
+
+ // This ensures the JIT generated EnC compliant code.
+ if ((oldSizeOfPreservedArea == NO_SIZE_OF_EDIT_AND_CONTINUE_PRESERVED_AREA) ||
+ (newSizeOfPreservedArea == NO_SIZE_OF_EDIT_AND_CONTINUE_PRESERVED_AREA))
+ {
+ _ASSERTE(!"FixContextForEnC called on a non-EnC-compliant method frame");
+ return CORDBG_E_ENC_INFOLESS_METHOD;
+ }
+
+ // JIT is required to emit frame register for EnC-compliant code
+ _ASSERTE(pOldCodeInfo->HasFrameRegister());
+ _ASSERTE(pNewCodeInfo->HasFrameRegister());
+
+ TADDR oldStackBase = GetSP(&oldCtx);
+
+ // This verifies no localallocs were used in the old method. (RBP == RSP for
+ // EnC-compliant x64 code.)
+ if (oldStackBase != oldCtx.Rbp)
+ return E_FAIL;
+
+ // EnC remap inside handlers is not supported
+ if (pOldCodeInfo->IsFunclet() || pNewCodeInfo->IsFunclet())
+ return CORDBG_E_ENC_IN_FUNCLET;
+
+ if (oldSizeOfPreservedArea != newSizeOfPreservedArea)
+ {
+ _ASSERTE(!"FixContextForEnC called with method whose frame header size changed from old to new version.");
+ return E_FAIL;
+ }
+
+ // Note: we cannot assert anything about the relationship between oldFixedStackSize
+ // and newFixedStackSize. It's possible the edited frame grows (new locals) or
+ // shrinks (less temporaries).
+
+ DWORD oldFixedStackSize = pOldCodeInfo->GetFixedStackSize();
+ DWORD newFixedStackSize = pNewCodeInfo->GetFixedStackSize();
+
+ TADDR callerSP = oldStackBase + oldFixedStackSize;
+
+ // If the old code saved a security object, store the object's reference now.
+ OBJECTREF securityObject = NULL;
+ INT32 nOldSecurityObjectStackSlot = oldGcDecoder.GetSecurityObjectStackSlot();
+ if (nOldSecurityObjectStackSlot != NO_SECURITY_OBJECT)
+ {
+ securityObject = ObjectToOBJECTREF(*PTR_PTR_Object(callerSP + nOldSecurityObjectStackSlot));
+ }
+
+#ifdef _DEBUG
+ // If the old method has a PSPSym, then its value should == FP
+ INT32 nOldPspSymStackSlot = oldGcDecoder.GetPSPSymStackSlot();
+ if (nOldPspSymStackSlot != NO_PSP_SYM)
+ {
+ // Read the PSP.
+ TADDR oldPSP = *PTR_TADDR(oldStackBase + nOldPspSymStackSlot);
+
+ // Now we're set up to assert that PSPSym's value == FP
+ _ASSERTE(oldPSP == GetFP(&oldCtx));
+ }
+#endif // _DEBUG
+
+#else
+ PORTABILITY_ASSERT("Edit-and-continue not enabled on this platform.");
+#endif
+
+ // 2) Get all the info about current variables, registers, etc
+
+ const ICorDebugInfo::NativeVarInfo * pOldVar;
+
+ // sorted by varNumber
+ ICorDebugInfo::NativeVarInfo * oldMethodVarsSorted = NULL;
+ ICorDebugInfo::NativeVarInfo * oldMethodVarsSortedBase = NULL;
+ ICorDebugInfo::NativeVarInfo *newMethodVarsSorted = NULL;
+ ICorDebugInfo::NativeVarInfo *newMethodVarsSortedBase = NULL;
+
+ SIZE_T *rgVal1 = NULL;
+ SIZE_T *rgVal2 = NULL;
+
+ {
+ SIZE_T local;
+
+ // We'll need to sort the old native var info by variable number, since the
+ // order of them isn't necc. the same. We'll use the number as the key.
+ // We will assume we may have hidden arguments (which have negative values as the index)
+
+ unsigned oldNumVars = unsigned(-ICorDebugInfo::UNKNOWN_ILNUM);
+ for (pOldVar = oldMethodVars, local = 0;
+ local < oldMethodVarsCount;
+ local++, pOldVar++)
+ {
+ DWORD varNumber = pOldVar->varNumber;
+ if (signed(varNumber) >= 0)
+ {
+ // This is an explicit (not special) var, so add its varNumber + 1 to our
+ // max count ("+1" because varNumber is zero-based).
+ oldNumVars = max(oldNumVars, unsigned(-ICorDebugInfo::UNKNOWN_ILNUM) + varNumber + 1);
+ }
+ }
+
+ oldMethodVarsSortedBase = new (nothrow) ICorDebugInfo::NativeVarInfo[oldNumVars];
+ if (!oldMethodVarsSortedBase)
+ {
+ hr = E_FAIL;
+ goto ErrExit;
+ }
+ oldMethodVarsSorted = oldMethodVarsSortedBase + (-ICorDebugInfo::UNKNOWN_ILNUM);
+
+ memset((void *)oldMethodVarsSortedBase, 0, oldNumVars * sizeof(ICorDebugInfo::NativeVarInfo));
+
+ for (local = 0; local < oldNumVars;local++)
+ oldMethodVarsSortedBase[local].loc.vlType = ICorDebugInfo::VLT_INVALID;
+
+ BYTE **rgVCs = NULL;
+ DWORD oldMethodOffset = pOldCodeInfo->GetRelOffset();
+
+ for (pOldVar = oldMethodVars, local = 0;
+ local < oldMethodVarsCount;
+ local++, pOldVar++)
+ {
+ DWORD varNumber = pOldVar->varNumber;
+
+ _ASSERTE(varNumber + unsigned(-ICorDebugInfo::UNKNOWN_ILNUM) < oldNumVars);
+
+ // Only care about old local variables alive at oldMethodOffset
+ if (pOldVar->startOffset <= oldMethodOffset &&
+ pOldVar->endOffset > oldMethodOffset)
+ {
+ oldMethodVarsSorted[varNumber] = *pOldVar;
+ }
+ }
+
+ // 3) Next sort the new var info by varNumber. We want to do this here, since
+ // we're allocating memory (which may fail) - do this before going to step 2
+
+ // First, count the new vars the same way we did the old vars above.
+
+ const ICorDebugInfo::NativeVarInfo * pNewVar;
+
+ unsigned newNumVars = unsigned(-ICorDebugInfo::UNKNOWN_ILNUM);
+ for (pNewVar = newMethodVars, local = 0;
+ local < newMethodVarsCount;
+ local++, pNewVar++)
+ {
+ DWORD varNumber = pNewVar->varNumber;
+ if (signed(varNumber) >= 0)
+ {
+ // This is an explicit (not special) var, so add its varNumber + 1 to our
+ // max count ("+1" because varNumber is zero-based).
+ newNumVars = max(newNumVars, unsigned(-ICorDebugInfo::UNKNOWN_ILNUM) + varNumber + 1);
+ }
+ }
+
+ // sorted by varNumber
+ newMethodVarsSortedBase = new (nothrow) ICorDebugInfo::NativeVarInfo[newNumVars];
+ if (!newMethodVarsSortedBase)
+ {
+ hr = E_FAIL;
+ goto ErrExit;
+ }
+ newMethodVarsSorted = newMethodVarsSortedBase + (-ICorDebugInfo::UNKNOWN_ILNUM);
+
+ memset(newMethodVarsSortedBase, 0, newNumVars * sizeof(ICorDebugInfo::NativeVarInfo));
+ for (local = 0; local < newNumVars;local++)
+ newMethodVarsSortedBase[local].loc.vlType = ICorDebugInfo::VLT_INVALID;
+
+ DWORD newMethodOffset = pNewCodeInfo->GetRelOffset();
+
+ for (pNewVar = newMethodVars, local = 0;
+ local < newMethodVarsCount;
+ local++, pNewVar++)
+ {
+ DWORD varNumber = pNewVar->varNumber;
+
+ _ASSERTE(varNumber + unsigned(-ICorDebugInfo::UNKNOWN_ILNUM) < newNumVars);
+
+ // Only care about new local variables alive at newMethodOffset
+ if (pNewVar->startOffset <= newMethodOffset &&
+ pNewVar->endOffset > newMethodOffset)
+ {
+ newMethodVarsSorted[varNumber] = *pNewVar;
+ }
+ }
+
+ _ASSERTE(newNumVars >= oldNumVars ||
+ !"Not allowed to reduce the number of locals between versions!");
+
+ LOG((LF_ENC, LL_INFO100, "EECM::FixContextForEnC: gathered info!\n"));
+
+ rgVal1 = new (nothrow) SIZE_T[newNumVars];
+ if (rgVal1 == NULL)
+ {
+ hr = E_FAIL;
+ goto ErrExit;
+ }
+
+ rgVal2 = new (nothrow) SIZE_T[newNumVars];
+ if (rgVal2 == NULL)
+ {
+ hr = E_FAIL;
+ goto ErrExit;
+ }
+
+ // 4) Next we'll zero them out, so any variables that aren't in scope
+ // in the old method, but are in scope in the new, will have the
+ // default, zero, value.
+
+ memset(rgVal1, 0, sizeof(SIZE_T) * newNumVars);
+ memset(rgVal2, 0, sizeof(SIZE_T) * newNumVars);
+
+ unsigned varsToGet = (oldNumVars > newNumVars) ? newNumVars
+ : oldNumVars;
+
+ // 2) Get all the info about current variables, registers, etc.
+
+ hr = g_pDebugInterface->GetVariablesFromOffset(pOldCodeInfo->GetMethodDesc(),
+ varsToGet,
+ oldMethodVarsSortedBase,
+ oldMethodOffset,
+ &oldCtx,
+ rgVal1,
+ rgVal2,
+ newNumVars,
+ &rgVCs);
+ if (FAILED(hr))
+ {
+ goto ErrExit;
+ }
+
+
+ LOG((LF_ENC, LL_INFO100, "EECM::FixContextForEnC: got vars!\n"));
+
+ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
+ * IMPORTANT : Once we start munging on the context, we cannot return
+ * EnC_FAIL, as this should be a transacted commit,
+ **=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+
+#if defined(_TARGET_X86_)
+ // Zero out all the registers as some may hold new variables.
+ pCtx->Eax = pCtx->Ecx = pCtx->Edx = pCtx->Ebx =
+ pCtx->Esi = pCtx->Edi = 0;
+
+ // 3) zero out the stack frame - this'll initialize _all_ variables
+
+ /*-------------------------------------------------------------------------
+ * Adjust the stack height
+ */
+ pCtx->Esp -= (newInfo.stackSize - oldInfo.stackSize);
+
+ // Zero-init the local and tempory section of new stack frame being careful to avoid
+ // touching anything in the frame header.
+ // This is necessary to ensure that any JIT temporaries in the old version can't be mistaken
+ // for ObjRefs now.
+ size_t frameHeaderSize = GetSizeOfFrameHeaderForEnC( &newInfo );
+ _ASSERTE( frameHeaderSize <= oldInfo.stackSize );
+ _ASSERTE( GetSizeOfFrameHeaderForEnC( &oldInfo ) == frameHeaderSize );
+
+#elif defined(_TARGET_AMD64_)
+
+ // Next few statements zero out all registers that may end up holding new variables.
+
+ // volatile int registers (JIT may use these to enregister variables)
+ pCtx->Rax = pCtx->Rcx = pCtx->Rdx = pCtx->R8 = pCtx->R9 = pCtx->R10 = pCtx->R11 = 0;
+
+ // volatile float registers
+ pCtx->Xmm1.High = pCtx->Xmm1.Low = 0;
+ pCtx->Xmm2.High = pCtx->Xmm2.Low = 0;
+ pCtx->Xmm3.High = pCtx->Xmm3.Low = 0;
+ pCtx->Xmm4.High = pCtx->Xmm4.Low = 0;
+ pCtx->Xmm5.High = pCtx->Xmm5.Low = 0;
+
+ // Any saved nonvolatile registers should also be zeroed out, but there are none
+ // in EnC-compliant x64 code. Yes, you read that right. Registers like RDI, RSI,
+ // RBX, etc., which are often saved in the prolog of non-EnC code are NOT saved in
+ // EnC code. EnC code instead just agrees never to use those registers so they
+ // remain pristine for the caller (except RBP, which is considered part of the frame
+ // header, and is thus not zeroed out by us).
+
+ // 3) zero out the stack frame - this'll initialize _all_ variables
+
+ /*-------------------------------------------------------------------------
+ * Adjust the stack height
+ */
+
+ TADDR newStackBase = callerSP - newFixedStackSize;
+
+ SetSP(pCtx, newStackBase);
+
+ // We want to zero-out everything pushed after the frame header. This way we'll zero
+ // out locals (both old & new) and temporaries. This is necessary to ensure that any
+ // JIT temporaries in the old version can't be mistaken for ObjRefs now. (I am told
+ // this last point is less of an issue on x64 as it is on x86, but zeroing out the
+ // temporaries is still the cleanest, most robust way to go.)
+ size_t frameHeaderSize = newSizeOfPreservedArea;
+ _ASSERTE(frameHeaderSize <= oldFixedStackSize);
+ _ASSERTE(frameHeaderSize <= newFixedStackSize);
+
+ // For EnC-compliant x64 code, Rbp == Rsp. Since Rsp changed above, update Rbp now
+ pCtx->Rbp = newStackBase;
+#else // !X86, !AMD64
+ PORTABILITY_ASSERT("Edit-and-continue not enabled on this platform.");
+#endif
+
+ // Perform some debug-only sanity checks on stack variables. Some checks are
+ // performed differently between X86/AMD64.
+
+#ifdef _DEBUG
+ for( unsigned i = 0; i < newNumVars; i++ )
+ {
+ // Make sure that stack variables existing in both old and new methods did not
+ // move. This matters if the address of a local is used in the remapped method.
+ // For example:
+ //
+ // static unsafe void Main(string[] args)
+ // {
+ // int x;
+ // int* p = &x;
+ // <- Edit made here - cannot move address of x
+ // *p = 5;
+ // }
+ //
+ if ((i + unsigned(-ICorDebugInfo::UNKNOWN_ILNUM) < oldNumVars) && // Does variable exist in old method?
+ (oldMethodVarsSorted[i].loc.vlType == ICorDebugInfo::VLT_STK) && // Is the variable on the stack?
+ (newMethodVarsSorted[i].loc.vlType == ICorDebugInfo::VLT_STK))
+ {
+ SIZE_T * pOldVarStackLocation = NativeVarStackAddr(oldMethodVarsSorted[i].loc, &oldCtx);
+ SIZE_T * pNewVarStackLocation = NativeVarStackAddr(newMethodVarsSorted[i].loc, pCtx);
+ _ASSERTE(pOldVarStackLocation == pNewVarStackLocation);
+ }
+
+ // Sanity-check that the range we're clearing contains all of the stack variables
+
+#if defined(_TARGET_X86_)
+ const ICorDebugInfo::VarLoc &varLoc = newMethodVarsSortedBase[i].loc;
+ if( varLoc.vlType == ICorDebugInfo::VLT_STK )
+ {
+ // This is an EBP frame, all stack variables should be EBP relative
+ _ASSERTE( varLoc.vlStk.vlsBaseReg == ICorDebugInfo::REGNUM_EBP );
+ // Generic special args may show up as locals with positive offset from EBP, so skip them
+ if( varLoc.vlStk.vlsOffset <= 0 )
+ {
+ // Normal locals must occur after the header on the stack
+ _ASSERTE( unsigned(-varLoc.vlStk.vlsOffset) >= frameHeaderSize );
+ // Value must occur before the top of the stack
+ _ASSERTE( unsigned(-varLoc.vlStk.vlsOffset) < newInfo.stackSize );
+ }
+
+ // Ideally we'd like to verify that the stack locals (if any) start at exactly the end
+ // of the header. However, we can't easily determine the size of value classes here,
+ // and so (since the stack grows towards 0) can't easily determine where the end of
+ // the local lies.
+ }
+#elif defined (_TARGET_AMD64_)
+ switch(newMethodVarsSortedBase[i].loc.vlType)
+ {
+ default:
+ // No validation here for non-stack locals
+ break;
+
+ case ICorDebugInfo::VLT_STK_BYREF:
+ {
+ // For byrefs, verify that the ptr will be zeroed out
+
+ SIZE_T regOffs = GetRegOffsInCONTEXT(newMethodVarsSortedBase[i].loc.vlStk.vlsBaseReg);
+ TADDR baseReg = *(TADDR *)(regOffs + (BYTE*)pCtx);
+ TADDR addrOfPtr = baseReg + newMethodVarsSortedBase[i].loc.vlStk.vlsOffset;
+
+ _ASSERTE(
+ // The ref must exist in the portion we'll zero-out
+ (
+ (newStackBase <= addrOfPtr) &&
+ (addrOfPtr < newStackBase + (newFixedStackSize - frameHeaderSize))
+ ) ||
+ // OR in the caller's frame (for parameters)
+ (addrOfPtr >= newStackBase + newFixedStackSize));
+
+ // Deliberately fall through, so that we also verify that the value that the ptr
+ // points to will be zeroed out
+ // ...
+ }
+
+ case ICorDebugInfo::VLT_STK:
+ case ICorDebugInfo::VLT_STK2:
+ case ICorDebugInfo::VLT_REG_STK:
+ case ICorDebugInfo::VLT_STK_REG:
+ SIZE_T * pVarStackLocation = NativeVarStackAddr(newMethodVarsSortedBase[i].loc, pCtx);
+ _ASSERTE (pVarStackLocation != NULL);
+ _ASSERTE(
+ // The value must exist in the portion we'll zero-out
+ (
+ (newStackBase <= (TADDR) pVarStackLocation) &&
+ ((TADDR) pVarStackLocation < newStackBase + (newFixedStackSize - frameHeaderSize))
+ ) ||
+ // OR in the caller's frame (for parameters)
+ ((TADDR) pVarStackLocation >= newStackBase + newFixedStackSize));
+ break;
+ }
+#else // !X86, !X64
+ PORTABILITY_ASSERT("Edit-and-continue not enabled on this platform.");
+#endif
+ }
+
+#endif // _DEBUG
+
+ // Clear the local and temporary stack space
+
+#if defined (_TARGET_X86_)
+ memset((void*)(size_t)(pCtx->Esp), 0, newInfo.stackSize - frameHeaderSize );
+#elif defined (_TARGET_AMD64_)
+ memset((void*)newStackBase, 0, newFixedStackSize - frameHeaderSize);
+
+ // On AMD64, after zeroing out the stack, restore the security object and PSPSym...
+
+ // There is no relationship we can guarantee between the old code having a security
+ // object and the new code having a security object. If the new code does have a
+ // security object, then we copy over the old security object's reference if there
+ // was one (else we copy over NULL, which is fine). If the new code doesn't have a
+ // security object, we do nothing.
+ INT32 nNewSecurityObjectStackSlot = newGcDecoder.GetSecurityObjectStackSlot();
+ if (nNewSecurityObjectStackSlot != NO_SECURITY_OBJECT)
+ {
+ *PTR_PTR_Object(callerSP + nNewSecurityObjectStackSlot) = OBJECTREFToObject(securityObject);
+ }
+
+ // Restore PSPSym for the new function. Its value should be set to our new FP. But
+ // first, we gotta find PSPSym's location on the stack
+ INT32 nNewPspSymStackSlot = newGcDecoder.GetPSPSymStackSlot();
+ if (nNewPspSymStackSlot != NO_PSP_SYM)
+ {
+ *PTR_TADDR(newStackBase + nNewPspSymStackSlot) = GetFP(pCtx);
+ }
+#else // !X86, !X64
+ PORTABILITY_ASSERT("Edit-and-continue not enabled on this platform.");
+#endif
+
+ // 4) Put the variables from step 3 into their new locations.
+
+ LOG((LF_ENC, LL_INFO100, "EECM::FixContextForEnC: set vars!\n"));
+
+ // Move the old variables into their new places.
+
+ hr = g_pDebugInterface->SetVariablesAtOffset(pNewCodeInfo->GetMethodDesc(),
+ newNumVars,
+ newMethodVarsSortedBase,
+ newMethodOffset,
+ pCtx, // place them into the new context
+ rgVal1,
+ rgVal2,
+ rgVCs);
+
+ /*-----------------------------------------------------------------------*/
+ }
+ErrExit:
+ if (oldMethodVarsSortedBase)
+ delete[] oldMethodVarsSortedBase;
+ if (newMethodVarsSortedBase)
+ delete[] newMethodVarsSortedBase;
+ if (rgVal1 != NULL)
+ delete[] rgVal1;
+ if (rgVal2 != NULL)
+ delete[] rgVal2;
+
+ LOG((LF_ENC, LL_INFO100, "EECM::FixContextForEnC: exiting!\n"));
+
+ return hr;
+}
+#endif // !EnC_SUPPORTED
+
+#endif // #ifndef DACCESS_COMPILE
+
+#ifdef USE_GC_INFO_DECODER
+/*****************************************************************************
+ *
+ * Is the function currently at a "GC safe point" ?
+ */
+bool EECodeManager::IsGcSafe( EECodeInfo *pCodeInfo,
+ DWORD dwRelOffset)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ GcInfoDecoder gcInfoDecoder(
+ dac_cast<PTR_CBYTE>(pCodeInfo->GetGCInfo()),
+ DECODE_INTERRUPTIBILITY,
+ dwRelOffset
+ );
+
+ return gcInfoDecoder.IsInterruptible();
+}
+
+
+#if defined(_TARGET_AMD64_) && defined(_DEBUG)
+
+struct FindEndOfLastInterruptibleRegionState
+{
+ unsigned curOffset;
+ unsigned endOffset;
+ unsigned lastRangeOffset;
+};
+
+bool FindEndOfLastInterruptibleRegionCB (
+ UINT32 startOffset,
+ UINT32 stopOffset,
+ LPVOID hCallback)
+{
+ FindEndOfLastInterruptibleRegionState *pState = (FindEndOfLastInterruptibleRegionState*)hCallback;
+
+ //
+ // If the current range doesn't overlap the given range, keep searching.
+ //
+ if ( startOffset >= pState->endOffset
+ || stopOffset < pState->curOffset)
+ {
+ return false;
+ }
+
+ //
+ // If the range overlaps the end, then the last point is the end.
+ //
+ if ( stopOffset > pState->endOffset
+ /*&& startOffset < pState->endOffset*/)
+ {
+ // The ranges should be sorted in increasing order.
+ CONSISTENCY_CHECK(startOffset >= pState->lastRangeOffset);
+
+ pState->lastRangeOffset = pState->endOffset;
+ return true;
+ }
+
+ //
+ // See if the end of this range is the closet to the end that we've found
+ // so far.
+ //
+ if (stopOffset > pState->lastRangeOffset)
+ pState->lastRangeOffset = stopOffset;
+
+ return false;
+}
+
+/*
+ Locates the end of the last interruptible region in the given code range.
+ Returns 0 if the entire range is uninterruptible. Returns the end point
+ if the entire range is interruptible.
+*/
+unsigned EECodeManager::FindEndOfLastInterruptibleRegion(unsigned curOffset,
+ unsigned endOffset,
+ PTR_VOID methodInfoPtr)
+{
+#ifndef DACCESS_COMPILE
+ BYTE* gcInfoAddr = (BYTE*) methodInfoPtr;
+
+ GcInfoDecoder gcInfoDecoder(
+ gcInfoAddr,
+ DECODE_FOR_RANGES_CALLBACK,
+ 0);
+
+ FindEndOfLastInterruptibleRegionState state;
+ state.curOffset = curOffset;
+ state.endOffset = endOffset;
+ state.lastRangeOffset = 0;
+
+ gcInfoDecoder.EnumerateInterruptibleRanges(&FindEndOfLastInterruptibleRegionCB, &state);
+
+ return state.lastRangeOffset;
+#else
+ DacNotImpl();
+ return NULL;
+#endif // #ifndef DACCESS_COMPILE
+}
+
+#endif // _TARGET_AMD64_ && _DEBUG
+
+
+#else // !USE_GC_INFO_DECODER
+
+/*****************************************************************************
+ *
+ * Is the function currently at a "GC safe point" ?
+ */
+bool EECodeManager::IsGcSafe( EECodeInfo *pCodeInfo,
+ DWORD dwRelOffset)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ hdrInfo info;
+ BYTE * table;
+
+ /* Extract the necessary information from the info block header */
+
+ table = (BYTE *)crackMethodInfoHdr(pCodeInfo->GetGCInfo(),
+ dwRelOffset,
+ &info);
+
+ /* workaround: prevent interruption within prolog/epilog */
+
+ if (info.prologOffs != hdrInfo::NOT_IN_PROLOG || info.epilogOffs != hdrInfo::NOT_IN_EPILOG)
+ return false;
+
+#if VERIFY_GC_TABLES
+ _ASSERTE(*castto(table, unsigned short *)++ == 0xBEEF);
+#endif
+
+ return (info.interruptible);
+}
+
+
+/*****************************************************************************/
+static
+PTR_CBYTE skipToArgReg(const hdrInfo& info, PTR_CBYTE table)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+#ifdef _DEBUG
+ PTR_CBYTE tableStart = table;
+#else
+ if (info.argTabOffset != INVALID_ARGTAB_OFFSET)
+ {
+ return table + info.argTabOffset;
+ }
+#endif
+
+ unsigned count;
+
+#if VERIFY_GC_TABLES
+ _ASSERTE(*castto(table, unsigned short *)++ == 0xBEEF);
+#endif
+
+ /* Skip over the untracked frame variable table */
+
+ count = info.untrackedCnt;
+ while (count-- > 0) {
+ fastSkipSigned(table);
+ }
+
+#if VERIFY_GC_TABLES
+ _ASSERTE(*castto(table, unsigned short *)++ == 0xCAFE);
+#endif
+
+ /* Skip over the frame variable lifetime table */
+
+ count = info.varPtrTableSize;
+ while (count-- > 0) {
+ fastSkipUnsigned(table); fastSkipUnsigned(table); fastSkipUnsigned(table);
+ }
+
+#if VERIFY_GC_TABLES
+ _ASSERTE(*castto(table, unsigned short *) == 0xBABE);
+#endif
+
+#ifdef _DEBUG
+ if (info.argTabOffset != INVALID_ARGTAB_OFFSET)
+ {
+ CONSISTENCY_CHECK_MSGF((info.argTabOffset == (unsigned) (table - tableStart)),
+ ("table = %p, tableStart = %p, info.argTabOffset = %d", table, tableStart, info.argTabOffset));
+ }
+#endif
+
+ return table;
+}
+
+/*****************************************************************************/
+
+#define regNumToMask(regNum) RegMask(1<<regNum)
+
+/*****************************************************************************
+ Helper for scanArgRegTable() and scanArgRegTableI() for regMasks
+ */
+
+void * getCalleeSavedReg(PREGDISPLAY pContext, regNum reg)
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ switch (reg)
+ {
+ case REGI_EBP: return pContext->pEbp;
+ case REGI_EBX: return pContext->pEbx;
+ case REGI_ESI: return pContext->pEsi;
+ case REGI_EDI: return pContext->pEdi;
+
+ default: _ASSERTE(!"bad info.thisPtrResult"); return NULL;
+ }
+}
+
+/*****************************************************************************
+ These functions converts the bits in the GC encoding to RegMask
+ */
+
+inline
+RegMask convertCalleeSavedRegsMask(unsigned inMask) // EBP,EBX,ESI,EDI
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE((inMask & 0x0F) == inMask);
+
+ unsigned outMask = RM_NONE;
+ if (inMask & 0x1) outMask |= RM_EDI;
+ if (inMask & 0x2) outMask |= RM_ESI;
+ if (inMask & 0x4) outMask |= RM_EBX;
+ if (inMask & 0x8) outMask |= RM_EBP;
+
+ return (RegMask) outMask;
+}
+
+inline
+RegMask convertAllRegsMask(unsigned inMask) // EAX,ECX,EDX,EBX, EBP,ESI,EDI
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE((inMask & 0xEF) == inMask);
+
+ unsigned outMask = RM_NONE;
+ if (inMask & 0x01) outMask |= RM_EAX;
+ if (inMask & 0x02) outMask |= RM_ECX;
+ if (inMask & 0x04) outMask |= RM_EDX;
+ if (inMask & 0x08) outMask |= RM_EBX;
+ if (inMask & 0x20) outMask |= RM_EBP;
+ if (inMask & 0x40) outMask |= RM_ESI;
+ if (inMask & 0x80) outMask |= RM_EDI;
+
+ return (RegMask)outMask;
+}
+
+/*****************************************************************************
+ * scan the register argument table for the not fully interruptible case.
+ this function is called to find all live objects (pushed arguments)
+ and to get the stack base for EBP-less methods.
+
+ NOTE: If info->argTabResult is NULL, info->argHnumResult indicates
+ how many bits in argMask are valid
+ If info->argTabResult is non-NULL, then the argMask field does
+ not fit in 32-bits and the value in argMask meaningless.
+ Instead argHnum specifies the number of (variable-length) elements
+ in the array, and argTabBytes specifies the total byte size of the
+ array. [ Note this is an extremely rare case ]
+ */
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#endif
+static
+unsigned scanArgRegTable(PTR_CBYTE table,
+ unsigned curOffs,
+ hdrInfo * info)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ regNum thisPtrReg = REGI_NA;
+#ifdef _DEBUG
+ bool isCall = false;
+#endif
+ unsigned regMask = 0; // EBP,EBX,ESI,EDI
+ unsigned argMask = 0;
+ unsigned argHnum = 0;
+ PTR_CBYTE argTab = 0;
+ unsigned argTabBytes = 0;
+ unsigned stackDepth = 0;
+
+ unsigned iregMask = 0; // EBP,EBX,ESI,EDI
+ unsigned iargMask = 0;
+ unsigned iptrMask = 0;
+
+#if VERIFY_GC_TABLES
+ _ASSERTE(*castto(table, unsigned short *)++ == 0xBABE);
+#endif
+
+ unsigned scanOffs = 0;
+
+ _ASSERTE(scanOffs <= info->methodSize);
+
+ if (info->ebpFrame) {
+ /*
+ Encoding table for methods with an EBP frame and
+ that are not fully interruptible
+
+ The encoding used is as follows:
+
+ this pointer encodings:
+
+ 01000000 this pointer in EBX
+ 00100000 this pointer in ESI
+ 00010000 this pointer in EDI
+
+ tiny encoding:
+
+ 0bsdDDDD
+ requires code delta < 16 (4-bits)
+ requires pushed argmask == 0
+
+ where DDDD is code delta
+ b indicates that register EBX is a live pointer
+ s indicates that register ESI is a live pointer
+ d indicates that register EDI is a live pointer
+
+ small encoding:
+
+ 1DDDDDDD bsdAAAAA
+
+ requires code delta < 120 (7-bits)
+ requires pushed argmask < 64 (5-bits)
+
+ where DDDDDDD is code delta
+ AAAAA is the pushed args mask
+ b indicates that register EBX is a live pointer
+ s indicates that register ESI is a live pointer
+ d indicates that register EDI is a live pointer
+
+ medium encoding
+
+ 0xFD aaaaaaaa AAAAdddd bseDDDDD
+
+ requires code delta < 0x1000000000 (9-bits)
+ requires pushed argmask < 0x1000000000000 (12-bits)
+
+ where DDDDD is the upper 5-bits of the code delta
+ dddd is the low 4-bits of the code delta
+ AAAA is the upper 4-bits of the pushed arg mask
+ aaaaaaaa is the low 8-bits of the pushed arg mask
+ b indicates that register EBX is a live pointer
+ s indicates that register ESI is a live pointer
+ e indicates that register EDI is a live pointer
+
+ medium encoding with interior pointers
+
+ 0xF9 DDDDDDDD bsdAAAAAA iiiIIIII
+
+ requires code delta < (8-bits)
+ requires pushed argmask < (5-bits)
+
+ where DDDDDDD is the code delta
+ b indicates that register EBX is a live pointer
+ s indicates that register ESI is a live pointer
+ d indicates that register EDI is a live pointer
+ AAAAA is the pushed arg mask
+ iii indicates that EBX,EDI,ESI are interior pointers
+ IIIII indicates that bits is the arg mask are interior
+ pointers
+
+ large encoding
+
+ 0xFE [0BSD0bsd][32-bit code delta][32-bit argMask]
+
+ b indicates that register EBX is a live pointer
+ s indicates that register ESI is a live pointer
+ d indicates that register EDI is a live pointer
+ B indicates that register EBX is an interior pointer
+ S indicates that register ESI is an interior pointer
+ D indicates that register EDI is an interior pointer
+ requires pushed argmask < 32-bits
+
+ large encoding with interior pointers
+
+ 0xFA [0BSD0bsd][32-bit code delta][32-bit argMask][32-bit interior pointer mask]
+
+
+ b indicates that register EBX is a live pointer
+ s indicates that register ESI is a live pointer
+ d indicates that register EDI is a live pointer
+ B indicates that register EBX is an interior pointer
+ S indicates that register ESI is an interior pointer
+ D indicates that register EDI is an interior pointer
+ requires pushed argmask < 32-bits
+ requires pushed iArgmask < 32-bits
+
+ huge encoding This is the only encoding that supports
+ a pushed argmask which is greater than
+ 32-bits.
+
+ 0xFB [0BSD0bsd][32-bit code delta]
+ [32-bit table count][32-bit table size]
+ [pushed ptr offsets table...]
+
+ b indicates that register EBX is a live pointer
+ s indicates that register ESI is a live pointer
+ d indicates that register EDI is a live pointer
+ B indicates that register EBX is an interior pointer
+ S indicates that register ESI is an interior pointer
+ D indicates that register EDI is an interior pointer
+ the list count is the number of entries in the list
+ the list size gives the byte-lenght of the list
+ the offsets in the list are variable-length
+ */
+ while (scanOffs < curOffs)
+ {
+ iregMask = 0;
+ iargMask = 0;
+ argTab = NULL;
+#ifdef _DEBUG
+ isCall = true;
+#endif
+
+ /* Get the next byte and check for a 'special' entry */
+
+ unsigned encType = *table++;
+#if defined(DACCESS_COMPILE)
+ // In this scenario, it is invalid to have a zero byte in the GC info encoding (refer to the
+ // comments above). At least one bit has to be set. For example, a byte can represent which
+ // register is the "this" pointer, and this byte has to be 0x10, 0x20, or 0x40. Having a zero
+ // byte indicates there is most likely some sort of DAC error, and it may lead to problems such as
+ // infinite loops. So we bail out early instead.
+ if (encType == 0)
+ {
+ DacError(CORDBG_E_TARGET_INCONSISTENT);
+ UNREACHABLE();
+ }
+#endif // DACCESS_COMPILE
+
+ switch (encType)
+ {
+ unsigned val, nxt;
+
+ default:
+
+ /* A tiny or small call entry */
+ val = encType;
+ if ((val & 0x80) == 0x00) {
+ if (val & 0x0F) {
+ /* A tiny call entry */
+ scanOffs += (val & 0x0F);
+ regMask = (val & 0x70) >> 4;
+ argMask = 0;
+ argHnum = 0;
+ }
+ else {
+ /* This pointer liveness encoding */
+ regMask = (val & 0x70) >> 4;
+ if (regMask == 0x1)
+ thisPtrReg = REGI_EDI;
+ else if (regMask == 0x2)
+ thisPtrReg = REGI_ESI;
+ else if (regMask == 0x4)
+ thisPtrReg = REGI_EBX;
+ else
+ _ASSERTE(!"illegal encoding for 'this' pointer liveness");
+ }
+ }
+ else {
+ /* A small call entry */
+ scanOffs += (val & 0x7F);
+ val = *table++;
+ regMask = val >> 5;
+ argMask = val & 0x1F;
+ argHnum = 5;
+ }
+ break;
+
+ case 0xFD: // medium encoding
+
+ argMask = *table++;
+ val = *table++;
+ argMask |= ((val & 0xF0) << 4);
+ argHnum = 12;
+ nxt = *table++;
+ scanOffs += (val & 0x0F) + ((nxt & 0x1F) << 4);
+ regMask = nxt >> 5; // EBX,ESI,EDI
+
+ break;
+
+ case 0xF9: // medium encoding with interior pointers
+
+ scanOffs += *table++;
+ val = *table++;
+ argMask = val & 0x1F;
+ argHnum = 5;
+ regMask = val >> 5;
+ val = *table++;
+ iargMask = val & 0x1F;
+ iregMask = val >> 5;
+
+ break;
+
+ case 0xFE: // large encoding
+ case 0xFA: // large encoding with interior pointers
+
+ val = *table++;
+ regMask = val & 0x7;
+ iregMask = val >> 4;
+ scanOffs += *dac_cast<PTR_DWORD>(table); table += sizeof(DWORD);
+ argMask = *dac_cast<PTR_DWORD>(table); table += sizeof(DWORD);
+ argHnum = 31;
+ if (encType == 0xFA) // read iargMask
+ {
+ iargMask = *dac_cast<PTR_DWORD>(table); table += sizeof(DWORD);
+ }
+ break;
+
+ case 0xFB: // huge encoding This is the only partially interruptible
+ // encoding that supports a pushed ArgMask
+ // which is greater than 32-bits.
+ // The ArgMask is encoded using the argTab
+ val = *table++;
+ regMask = val & 0x7;
+ iregMask = val >> 4;
+ scanOffs += *dac_cast<PTR_DWORD>(table); table += sizeof(DWORD);
+ argHnum = *dac_cast<PTR_DWORD>(table); table += sizeof(DWORD);
+ argTabBytes = *dac_cast<PTR_DWORD>(table); table += sizeof(DWORD);
+ argTab = table; table += argTabBytes;
+
+ argMask = 0;
+ break;
+
+ case 0xFF:
+ scanOffs = curOffs + 1;
+ break;
+
+ } // end case
+
+ // iregMask & iargMask are subsets of regMask & argMask respectively
+
+ _ASSERTE((iregMask & regMask) == iregMask);
+ _ASSERTE((iargMask & argMask) == iargMask);
+
+ } // end while
+
+ }
+ else {
+
+/*
+ * Encoding table for methods with an ESP frame and are not fully interruptible
+ * This encoding does not support a pushed ArgMask greater than 32
+ *
+ * The encoding used is as follows:
+ *
+ * push 000DDDDD ESP push one item with 5-bit delta
+ * push 00100000 [pushCount] ESP push multiple items
+ * reserved 0011xxxx
+ * skip 01000000 [Delta] Skip Delta, arbitrary sized delta
+ * skip 0100DDDD Skip small Delta, for call (DDDD != 0)
+ * pop 01CCDDDD ESP pop CC items with 4-bit delta (CC != 00)
+ * call 1PPPPPPP Call Pattern, P=[0..79]
+ * call 1101pbsd DDCCCMMM Call RegMask=pbsd,ArgCnt=CCC,
+ * ArgMask=MMM Delta=commonDelta[DD]
+ * call 1110pbsd [ArgCnt] [ArgMask] Call ArgCnt,RegMask=pbsd,[32-bit ArgMask]
+ * call 11111000 [PBSDpbsd][32-bit delta][32-bit ArgCnt]
+ * [32-bit PndCnt][32-bit PndSize][PndOffs...]
+ * iptr 11110000 [IPtrMask] Arbitrary 32-bit Interior Pointer Mask
+ * thisptr 111101RR This pointer is in Register RR
+ * 00=EDI,01=ESI,10=EBX,11=EBP
+ * reserved 111100xx xx != 00
+ * reserved 111110xx xx != 00
+ * reserved 11111xxx xxx != 000 && xxx != 111(EOT)
+ *
+ * The value 11111111 [0xFF] indicates the end of the table.
+ *
+ * An offset (at which stack-walking is performed) without an explicit encoding
+ * is assumed to be a trivial call-site (no GC registers, stack empty before and
+ * after) to avoid having to encode all trivial calls.
+ *
+ * Note on the encoding used for interior pointers
+ *
+ * The iptr encoding must immediately preceed a call encoding. It is used to
+ * transform a normal GC pointer addresses into an interior pointers for GC purposes.
+ * The mask supplied to the iptr encoding is read from the least signicant bit
+ * to the most signicant bit. (i.e the lowest bit is read first)
+ *
+ * p indicates that register EBP is a live pointer
+ * b indicates that register EBX is a live pointer
+ * s indicates that register ESI is a live pointer
+ * d indicates that register EDI is a live pointer
+ * P indicates that register EBP is an interior pointer
+ * B indicates that register EBX is an interior pointer
+ * S indicates that register ESI is an interior pointer
+ * D indicates that register EDI is an interior pointer
+ *
+ * As an example the following sequence indicates that EDI.ESI and the 2nd pushed pointer
+ * in ArgMask are really interior pointers. The pointer in ESI in a normal pointer:
+ *
+ * iptr 11110000 00010011 => read Interior Ptr, Interior Ptr, Normal Ptr, Normal Ptr, Interior Ptr
+ * call 11010011 DDCCC011 RRRR=1011 => read EDI is a GC-pointer, ESI is a GC-pointer. EBP is a GC-pointer
+ * MMM=0011 => read two GC-pointers arguments on the stack (nested call)
+ *
+ * Since the call instruction mentions 5 GC-pointers we list them in the required order:
+ * EDI, ESI, EBP, 1st-pushed pointer, 2nd-pushed pointer
+ *
+ * And we apply the Interior Pointer mask mmmm=10011 to the above five ordered GC-pointers
+ * we learn that EDI and ESI are interior GC-pointers and that the second push arg is an
+ * interior GC-pointer.
+ */
+
+#if defined(DACCESS_COMPILE)
+ DWORD cbZeroBytes = 0;
+#endif // DACCESS_COMPILE
+
+ while (scanOffs <= curOffs)
+ {
+ unsigned callArgCnt;
+ unsigned skip;
+ unsigned newRegMask, inewRegMask;
+ unsigned newArgMask, inewArgMask;
+ unsigned oldScanOffs = scanOffs;
+
+ if (iptrMask)
+ {
+ // We found this iptrMask in the previous iteration.
+ // This iteration must be for a call. Set these variables
+ // so that they are available at the end of the loop
+
+ inewRegMask = iptrMask & 0x0F; // EBP,EBX,ESI,EDI
+ inewArgMask = iptrMask >> 4;
+
+ iptrMask = 0;
+ }
+ else
+ {
+ // Zero out any stale values.
+
+ inewRegMask = 0;
+ inewArgMask = 0;
+ }
+
+ /* Get the next byte and decode it */
+
+ unsigned val = *table++;
+#if defined(DACCESS_COMPILE)
+ // In this scenario, a 0 means that there is a push at the current offset. For a struct with
+ // two double fields, the JIT may use two movq instructions to push the struct onto the stack, and
+ // the JIT will encode 4 pushes at the same code offset. This means that we can have up to 4
+ // consecutive bytes of 0 without changing the code offset. Having more than 4 consecutive bytes
+ // of zero indicates that there is most likely some sort of DAC error, and it may lead to problems
+ // such as infinite loops. So we bail out early instead.
+ if (val == 0)
+ {
+ cbZeroBytes += 1;
+ if (cbZeroBytes > 4)
+ {
+ DacError(CORDBG_E_TARGET_INCONSISTENT);
+ UNREACHABLE();
+ }
+ }
+ else
+ {
+ cbZeroBytes = 0;
+ }
+#endif // DACCESS_COMPILE
+
+#ifdef _DEBUG
+ if (scanOffs != curOffs)
+ isCall = false;
+#endif
+
+ /* Check pushes, pops, and skips */
+
+ if (!(val & 0x80)) {
+
+ // iptrMask can immediately precede only calls
+
+ _ASSERTE(inewRegMask == 0);
+ _ASSERTE(inewArgMask == 0);
+
+ if (!(val & 0x40)) {
+
+ unsigned pushCount;
+
+ if (!(val & 0x20))
+ {
+ //
+ // push 000DDDDD ESP push one item, 5-bit delta
+ //
+ pushCount = 1;
+ scanOffs += val & 0x1f;
+ }
+ else
+ {
+ //
+ // push 00100000 [pushCount] ESP push multiple items
+ //
+ _ASSERTE(val == 0x20);
+ pushCount = fastDecodeUnsigned(table);
+ }
+
+ if (scanOffs > curOffs)
+ {
+ scanOffs = oldScanOffs;
+ goto FINISHED;
+ }
+
+ stackDepth += pushCount;
+ }
+ else if ((val & 0x3f) != 0) {
+ //
+ // pop 01CCDDDD pop CC items, 4-bit delta
+ //
+ scanOffs += val & 0x0f;
+ if (scanOffs > curOffs)
+ {
+ scanOffs = oldScanOffs;
+ goto FINISHED;
+ }
+ stackDepth -= (val & 0x30) >> 4;
+
+ } else if (scanOffs < curOffs) {
+ //
+ // skip 01000000 [Delta] Skip arbitrary sized delta
+ //
+ skip = fastDecodeUnsigned(table);
+ scanOffs += skip;
+ }
+ else // don't process a skip if we are already at curOffs
+ goto FINISHED;
+
+ /* reset regs and args state since we advance past last call site */
+
+ regMask = 0;
+ iregMask = 0;
+ argMask = 0;
+ iargMask = 0;
+ argHnum = 0;
+
+ }
+ else /* It must be a call, thisptr, or iptr */
+ {
+ switch ((val & 0x70) >> 4) {
+ default: // case 0-4, 1000xxxx through 1100xxxx
+ //
+ // call 1PPPPPPP Call Pattern, P=[0..79]
+ //
+ decodeCallPattern((val & 0x7f), &callArgCnt,
+ &newRegMask, &newArgMask, &skip);
+ // If we've already reached curOffs and the skip amount
+ // is non-zero then we are done
+ if ((scanOffs == curOffs) && (skip > 0))
+ goto FINISHED;
+ // otherwise process this call pattern
+ scanOffs += skip;
+ if (scanOffs > curOffs)
+ goto FINISHED;
+#ifdef _DEBUG
+ isCall = true;
+#endif
+ regMask = newRegMask;
+ argMask = newArgMask; argTab = NULL;
+ iregMask = inewRegMask;
+ iargMask = inewArgMask;
+ stackDepth -= callArgCnt;
+ argHnum = 2; // argMask is known to be <= 3
+ break;
+
+ case 5:
+ //
+ // call 1101RRRR DDCCCMMM Call RegMask=RRRR,ArgCnt=CCC,
+ // ArgMask=MMM Delta=commonDelta[DD]
+ //
+ newRegMask = val & 0xf; // EBP,EBX,ESI,EDI
+ val = *table++; // read next byte
+ skip = callCommonDelta[val>>6];
+ // If we've already reached curOffs and the skip amount
+ // is non-zero then we are done
+ if ((scanOffs == curOffs) && (skip > 0))
+ goto FINISHED;
+ // otherwise process this call encoding
+ scanOffs += skip;
+ if (scanOffs > curOffs)
+ goto FINISHED;
+#ifdef _DEBUG
+ isCall = true;
+#endif
+ regMask = newRegMask;
+ iregMask = inewRegMask;
+ callArgCnt = (val >> 3) & 0x7;
+ stackDepth -= callArgCnt;
+ argMask = (val & 0x7); argTab = NULL;
+ iargMask = inewArgMask;
+ argHnum = 3;
+ break;
+
+ case 6:
+ //
+ // call 1110RRRR [ArgCnt] [ArgMask]
+ // Call ArgCnt,RegMask=RRR,ArgMask
+ //
+#ifdef _DEBUG
+ isCall = true;
+#endif
+ regMask = val & 0xf; // EBP,EBX,ESI,EDI
+ iregMask = inewRegMask;
+ callArgCnt = fastDecodeUnsigned(table);
+ stackDepth -= callArgCnt;
+ argMask = fastDecodeUnsigned(table); argTab = NULL;
+ iargMask = inewArgMask;
+ argHnum = sizeof(argMask) * 8; // The size of argMask in bits
+ break;
+
+ case 7:
+ switch (val & 0x0C)
+ {
+ case 0x00:
+ //
+ // 0xF0 iptr 11110000 [IPtrMask] Arbitrary Interior Pointer Mask
+ //
+ iptrMask = fastDecodeUnsigned(table);
+ break;
+
+ case 0x04:
+ //
+ // 0xF4 thisptr 111101RR This pointer is in Register RR
+ // 00=EDI,01=ESI,10=EBX,11=EBP
+ //
+ {
+ static const regNum calleeSavedRegs[] =
+ { REGI_EDI, REGI_ESI, REGI_EBX, REGI_EBP };
+ thisPtrReg = calleeSavedRegs[val&0x3];
+ }
+ break;
+
+ case 0x08:
+ //
+ // 0xF8 call 11111000 [PBSDpbsd][32-bit delta][32-bit ArgCnt]
+ // [32-bit PndCnt][32-bit PndSize][PndOffs...]
+ //
+ val = *table++;
+ skip = *dac_cast<PTR_DWORD>(table); table += sizeof(DWORD);
+// [VSUQFE 4670]
+ // If we've already reached curOffs and the skip amount
+ // is non-zero then we are done
+ if ((scanOffs == curOffs) && (skip > 0))
+ goto FINISHED;
+// [VSUQFE 4670]
+ scanOffs += skip;
+ if (scanOffs > curOffs)
+ goto FINISHED;
+#ifdef _DEBUG
+ isCall = true;
+#endif
+ regMask = val & 0xF;
+ iregMask = val >> 4;
+ callArgCnt = *dac_cast<PTR_DWORD>(table); table += sizeof(DWORD);
+ stackDepth -= callArgCnt;
+ argHnum = *dac_cast<PTR_DWORD>(table); table += sizeof(DWORD);
+ argTabBytes = *dac_cast<PTR_DWORD>(table); table += sizeof(DWORD);
+ argTab = table;
+ table += argTabBytes;
+ break;
+
+ case 0x0C:
+ //
+ // 0xFF end 11111111 End of table marker
+ //
+ _ASSERTE(val==0xff);
+ goto FINISHED;
+
+ default:
+ _ASSERTE(!"reserved GC encoding");
+ break;
+ }
+ break;
+
+ } // end switch
+
+ } // end else (!(val & 0x80))
+
+ // iregMask & iargMask are subsets of regMask & argMask respectively
+
+ _ASSERTE((iregMask & regMask) == iregMask);
+ _ASSERTE((iargMask & argMask) == iargMask);
+
+ } // end while
+
+ } // end else ebp-less frame
+
+FINISHED:
+
+ // iregMask & iargMask are subsets of regMask & argMask respectively
+
+ _ASSERTE((iregMask & regMask) == iregMask);
+ _ASSERTE((iargMask & argMask) == iargMask);
+
+ if (scanOffs != curOffs)
+ {
+ /* must have been a boring call */
+ info->regMaskResult = RM_NONE;
+ info->argMaskResult = ptrArgTP(0);
+ info->iregMaskResult = RM_NONE;
+ info->iargMaskResult = ptrArgTP(0);
+ info->argHnumResult = 0;
+ info->argTabResult = NULL;
+ info->argTabBytes = 0;
+ }
+ else
+ {
+ info->regMaskResult = convertCalleeSavedRegsMask(regMask);
+ info->argMaskResult = ptrArgTP(argMask);
+ info->argHnumResult = argHnum;
+ info->iregMaskResult = convertCalleeSavedRegsMask(iregMask);
+ info->iargMaskResult = ptrArgTP(iargMask);
+ info->argTabResult = argTab;
+ info->argTabBytes = argTabBytes;
+ }
+
+#ifdef _DEBUG
+ if (scanOffs != curOffs) {
+ isCall = false;
+ }
+ _ASSERTE(thisPtrReg == REGI_NA || (!isCall || (regNumToMask(thisPtrReg) & info->regMaskResult)));
+#endif
+ info->thisPtrResult = thisPtrReg;
+
+ _ASSERTE(int(stackDepth) < INT_MAX); // check that it did not underflow
+ return (stackDepth * sizeof(unsigned));
+}
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+
+/*****************************************************************************
+ * scan the register argument table for the fully interruptible case.
+ this function is called to find all live objects (pushed arguments)
+ and to get the stack base for fully interruptible methods.
+ Returns size of things pushed on the stack for ESP frames
+ */
+
+static
+unsigned scanArgRegTableI(PTR_CBYTE table,
+ unsigned curOffs,
+ hdrInfo * info)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ regNum thisPtrReg = REGI_NA;
+ unsigned ptrRegs = 0;
+ unsigned iptrRegs = 0;
+ unsigned ptrOffs = 0;
+ unsigned argCnt = 0;
+
+ ptrArgTP ptrArgs(0);
+ ptrArgTP iptrArgs(0);
+ ptrArgTP argHigh(0);
+
+ bool isThis = false;
+ bool iptr = false;
+
+#if VERIFY_GC_TABLES
+ _ASSERTE(*castto(table, unsigned short *)++ == 0xBABE);
+#endif
+
+ /*
+ Encoding table for methods that are fully interruptible
+
+ The encoding used is as follows:
+
+ ptr reg dead 00RRRDDD [RRR != 100]
+ ptr reg live 01RRRDDD [RRR != 100]
+
+ non-ptr arg push 10110DDD [SSS == 110]
+ ptr arg push 10SSSDDD [SSS != 110] && [SSS != 111]
+ ptr arg pop 11CCCDDD [CCC != 000] && [CCC != 110] && [CCC != 111]
+ little delta skip 11000DDD [CCC == 000]
+ bigger delta skip 11110BBB [CCC == 110]
+
+ The values used in the encodings are as follows:
+
+ DDD code offset delta from previous entry (0-7)
+ BBB bigger delta 000=8,001=16,010=24,...,111=64
+ RRR register number (EAX=000,ECX=001,EDX=010,EBX=011,
+ EBP=101,ESI=110,EDI=111), ESP=100 is reserved
+ SSS argument offset from base of stack. This is
+ redundant for frameless methods as we can
+ infer it from the previous pushes+pops. However,
+ for EBP-methods, we only report GC pushes, and
+ so we need SSS
+ CCC argument count being popped (includes only ptrs for EBP methods)
+
+ The following are the 'large' versions:
+
+ large delta skip 10111000 [0xB8] , encodeUnsigned(delta)
+
+ large ptr arg push 11111000 [0xF8] , encodeUnsigned(pushCount)
+ large non-ptr arg push 11111001 [0xF9] , encodeUnsigned(pushCount)
+ large ptr arg pop 11111100 [0xFC] , encodeUnsigned(popCount)
+ large arg dead 11111101 [0xFD] , encodeUnsigned(popCount) for caller-pop args.
+ Any GC args go dead after the call,
+ but are still sitting on the stack
+
+ this pointer prefix 10111100 [0xBC] the next encoding is a ptr live
+ or a ptr arg push
+ and contains the this pointer
+
+ interior or by-ref 10111111 [0xBF] the next encoding is a ptr live
+ pointer prefix or a ptr arg push
+ and contains an interior
+ or by-ref pointer
+
+
+ The value 11111111 [0xFF] indicates the end of the table.
+ */
+
+#if defined(DACCESS_COMPILE)
+ bool fLastByteIsZero = false;
+#endif // DACCESS_COMPILE
+
+ /* Have we reached the instruction we're looking for? */
+
+ while (ptrOffs <= curOffs)
+ {
+ unsigned val;
+
+ int isPop;
+ unsigned argOfs;
+
+ unsigned regMask;
+
+ // iptrRegs & iptrArgs are subsets of ptrRegs & ptrArgs respectively
+
+ _ASSERTE((iptrRegs & ptrRegs) == iptrRegs);
+ _ASSERTE((iptrArgs & ptrArgs) == iptrArgs);
+
+ /* Now find the next 'life' transition */
+
+ val = *table++;
+#if defined(DACCESS_COMPILE)
+ // In this scenario, a zero byte means that EAX is going dead at the current offset. Since EAX
+ // can't go dead more than once at any given offset, it's invalid to have two consecutive bytes
+ // of zero. If this were to happen, then it means that there is most likely some sort of DAC
+ // error, and it may lead to problems such as infinite loops. So we bail out early instead.
+ if ((val == 0) && fLastByteIsZero)
+ {
+ DacError(CORDBG_E_TARGET_INCONSISTENT);
+ UNREACHABLE();
+ }
+ fLastByteIsZero = (val == 0);
+#endif // DACCESS_COMPILE
+
+ if (!(val & 0x80))
+ {
+ /* A small 'regPtr' encoding */
+
+ regNum reg;
+
+ ptrOffs += (val ) & 0x7;
+ if (ptrOffs > curOffs) {
+ iptr = isThis = false;
+ goto REPORT_REFS;
+ }
+
+ reg = (regNum)((val >> 3) & 0x7);
+ regMask = 1 << reg; // EAX,ECX,EDX,EBX,---,EBP,ESI,EDI
+
+#if 0
+ printf("regMask = %04X -> %04X\n", ptrRegs,
+ (val & 0x40) ? (ptrRegs | regMask)
+ : (ptrRegs & ~regMask));
+#endif
+
+ /* The register is becoming live/dead here */
+
+ if (val & 0x40)
+ {
+ /* Becomes Live */
+ _ASSERTE((ptrRegs & regMask) == 0);
+
+ ptrRegs |= regMask;
+
+ if (isThis)
+ {
+ thisPtrReg = reg;
+ }
+ if (iptr)
+ {
+ iptrRegs |= regMask;
+ }
+ }
+ else
+ {
+ /* Becomes Dead */
+ _ASSERTE((ptrRegs & regMask) != 0);
+
+ ptrRegs &= ~regMask;
+
+ if (reg == thisPtrReg)
+ {
+ thisPtrReg = REGI_NA;
+ }
+ if (iptrRegs & regMask)
+ {
+ iptrRegs &= ~regMask;
+ }
+ }
+ iptr = isThis = false;
+ continue;
+ }
+
+ /* This is probably an argument push/pop */
+
+ argOfs = (val & 0x38) >> 3;
+
+ /* 6 [110] and 7 [111] are reserved for other encodings */
+ if (argOfs < 6)
+ {
+
+ /* A small argument encoding */
+
+ ptrOffs += (val & 0x07);
+ if (ptrOffs > curOffs) {
+ iptr = isThis = false;
+ goto REPORT_REFS;
+ }
+ isPop = (val & 0x40);
+
+ ARG:
+
+ if (isPop)
+ {
+ if (argOfs == 0)
+ continue; // little skip encoding
+
+ /* We remove (pop) the top 'argOfs' entries */
+
+ _ASSERTE(argOfs || argOfs <= argCnt);
+
+ /* adjust # of arguments */
+
+ argCnt -= argOfs;
+ _ASSERTE(argCnt < MAX_PTRARG_OFS);
+
+// printf("[%04X] popping %u args: mask = %04X\n", ptrOffs, argOfs, (int)ptrArgs);
+
+ do
+ {
+ _ASSERTE(!isZero(argHigh));
+
+ /* Do we have an argument bit that's on? */
+
+ if (intersect(ptrArgs, argHigh))
+ {
+ /* Turn off the bit */
+
+ setDiff(ptrArgs, argHigh);
+ setDiff(iptrArgs, argHigh);
+
+ /* We've removed one more argument bit */
+
+ argOfs--;
+ }
+ else if (info->ebpFrame)
+ argCnt--;
+ else /* !ebpFrame && not a ref */
+ argOfs--;
+
+ /* Continue with the next lower bit */
+
+ argHigh >>= 1;
+ }
+ while (argOfs);
+
+ _ASSERTE((info->ebpFrame != 0) ||
+ isZero(argHigh) ||
+ (argHigh == CONSTRUCT_ptrArgTP(1, (argCnt-1))));
+
+ if (info->ebpFrame)
+ {
+ while (!intersect(argHigh, ptrArgs) && (!isZero(argHigh)))
+ argHigh >>= 1;
+ }
+
+ }
+ else
+ {
+ /* Add a new ptr arg entry at stack offset 'argOfs' */
+
+ if (argOfs >= MAX_PTRARG_OFS)
+ {
+ _ASSERTE_ALL_BUILDS("clr/src/VM/eetwain.cpp", !"scanArgRegTableI: args pushed 'too deep'");
+ }
+ else
+ {
+ /* For ESP-frames, all pushes are reported, and so
+ argOffs has to be consistent with argCnt */
+
+ _ASSERTE(info->ebpFrame || argCnt == argOfs);
+
+ /* store arg count */
+
+ argCnt = argOfs + 1;
+ _ASSERTE((argCnt < MAX_PTRARG_OFS));
+
+ /* Compute the appropriate argument offset bit */
+
+ ptrArgTP argMask = CONSTRUCT_ptrArgTP(1, argOfs);
+
+// printf("push arg at offset %02u --> mask = %04X\n", argOfs, (int)argMask);
+
+ /* We should never push twice at the same offset */
+
+ _ASSERTE(!intersect( ptrArgs, argMask));
+ _ASSERTE(!intersect(iptrArgs, argMask));
+
+ /* We should never push within the current highest offset */
+
+ // _ASSERTE(argHigh < argMask);
+
+ /* This is now the highest bit we've set */
+
+ argHigh = argMask;
+
+ /* Set the appropriate bit in the argument mask */
+
+ ptrArgs |= argMask;
+
+ if (iptr)
+ iptrArgs |= argMask;
+ }
+
+ iptr = isThis = false;
+ }
+ continue;
+ }
+ else if (argOfs == 6)
+ {
+ if (val & 0x40) {
+ /* Bigger delta 000=8,001=16,010=24,...,111=64 */
+ ptrOffs += (((val & 0x07) + 1) << 3);
+ }
+ else {
+ /* non-ptr arg push */
+ _ASSERTE(!(info->ebpFrame));
+ ptrOffs += (val & 0x07);
+ if (ptrOffs > curOffs) {
+ iptr = isThis = false;
+ goto REPORT_REFS;
+ }
+ argHigh = CONSTRUCT_ptrArgTP(1, argCnt);
+ argCnt++;
+ _ASSERTE(argCnt < MAX_PTRARG_OFS);
+ }
+ continue;
+ }
+
+ /* argOfs was 7 [111] which is reserved for the larger encodings */
+
+ _ASSERTE(argOfs==7);
+
+ switch (val)
+ {
+ case 0xFF:
+ iptr = isThis = false;
+ goto REPORT_REFS; // the method might loop !!!
+
+ case 0xB8:
+ val = fastDecodeUnsigned(table);
+ ptrOffs += val;
+ continue;
+
+ case 0xBC:
+ isThis = true;
+ break;
+
+ case 0xBF:
+ iptr = true;
+ break;
+
+ case 0xF8:
+ case 0xFC:
+ isPop = val & 0x04;
+ argOfs = fastDecodeUnsigned(table);
+ goto ARG;
+
+ case 0xFD: {
+ argOfs = fastDecodeUnsigned(table);
+ _ASSERTE(argOfs && argOfs <= argCnt);
+
+ // Kill the top "argOfs" pointers.
+
+ ptrArgTP argMask;
+ for(argMask = CONSTRUCT_ptrArgTP(1, argCnt); (argOfs != 0); argMask >>= 1)
+ {
+ _ASSERTE(!isZero(argMask) && !isZero(ptrArgs)); // there should be remaining pointers
+
+ if (intersect(ptrArgs, argMask))
+ {
+ setDiff(ptrArgs, argMask);
+ setDiff(iptrArgs, argMask);
+ argOfs--;
+ }
+ }
+
+ // For ebp-frames, need to find the next higest pointer for argHigh
+
+ if (info->ebpFrame)
+ {
+ for(argHigh = ptrArgTP(0); !isZero(argMask); argMask >>= 1)
+ {
+ if (intersect(ptrArgs, argMask)) {
+ argHigh = argMask;
+ break;
+ }
+ }
+ }
+ } break;
+
+ case 0xF9:
+ argOfs = fastDecodeUnsigned(table);
+ argCnt += argOfs;
+ break;
+
+ default:
+ _ASSERTE(!"Unexpected special code %04X");
+ }
+ }
+
+ /* Report all live pointer registers */
+REPORT_REFS:
+
+ _ASSERTE((iptrRegs & ptrRegs) == iptrRegs); // iptrRegs is a subset of ptrRegs
+ _ASSERTE((iptrArgs & ptrArgs) == iptrArgs); // iptrArgs is a subset of ptrArgs
+
+ /* Save the current live register, argument set, and argCnt */
+
+ info->regMaskResult = convertAllRegsMask(ptrRegs);
+ info->argMaskResult = ptrArgs;
+ info->argHnumResult = 0;
+ info->iregMaskResult = convertAllRegsMask(iptrRegs);
+ info->iargMaskResult = iptrArgs;
+
+ info->thisPtrResult = thisPtrReg;
+ _ASSERTE(thisPtrReg == REGI_NA || (regNumToMask(thisPtrReg) & info->regMaskResult));
+
+ if (info->ebpFrame)
+ {
+ return 0;
+ }
+ else
+ {
+ _ASSERTE(int(argCnt) < INT_MAX); // check that it did not underflow
+ return (argCnt * sizeof(unsigned));
+ }
+}
+
+/*****************************************************************************/
+
+unsigned GetPushedArgSize(hdrInfo * info, PTR_CBYTE table, DWORD curOffs)
+{
+ SUPPORTS_DAC;
+
+ unsigned sz;
+
+ if (info->interruptible)
+ {
+ sz = scanArgRegTableI(skipToArgReg(*info, table),
+ curOffs,
+ info);
+ }
+ else
+ {
+ sz = scanArgRegTable(skipToArgReg(*info, table),
+ curOffs,
+ info);
+ }
+
+ return sz;
+}
+
+/*****************************************************************************/
+
+inline
+void TRASH_CALLEE_UNSAVED_REGS(PREGDISPLAY pContext)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+#ifdef _DEBUG
+ /* This is not completely correct as we lose the current value, but
+ it should not really be useful to anyone. */
+ static DWORD s_badData = 0xDEADBEEF;
+ pContext->pEax = pContext->pEcx = pContext->pEdx = &s_badData;
+#endif //_DEBUG
+}
+
+/*****************************************************************************
+ * Sizes of certain i386 instructions which are used in the prolog/epilog
+ */
+
+// Can we use sign-extended byte to encode the imm value, or do we need a dword
+#define CAN_COMPRESS(val) ((INT8)(val) == (INT32)(val))
+
+#define SZ_ADD_REG(val) ( 2 + (CAN_COMPRESS(val) ? 1 : 4))
+#define SZ_AND_REG(val) SZ_ADD_REG(val)
+#define SZ_POP_REG 1
+#define SZ_LEA(offset) SZ_ADD_REG(offset)
+#define SZ_MOV_REG_REG 2
+
+bool IsMarkerInstr(BYTE val)
+{
+ SUPPORTS_DAC;
+#ifdef _DEBUG
+ return (val == X86_INSTR_INT3) || // Debugger might stomp with an int3
+ (val == X86_INSTR_HLT && GCStress<cfg_any>::IsEnabled()); // GcCover might stomp with a Hlt
+#else
+ return false;
+#endif
+}
+
+/* Check if the given instruction opcode is the one we expect.
+ This is a "necessary" but not "sufficient" check as it ignores the check
+ if the instruction is one of our special markers (for debugging and GcStress) */
+
+bool CheckInstrByte(BYTE val, BYTE expectedValue)
+{
+ SUPPORTS_DAC;
+ return ((val == expectedValue) || IsMarkerInstr(val));
+}
+
+/* Similar to CheckInstrByte(). Use this to check a masked opcode (ignoring
+ optional bits in the opcode encoding).
+ valPattern is the masked out value.
+ expectedPattern is the mask value we expect.
+ val is the actual instruction opcode
+ */
+bool CheckInstrBytePattern(BYTE valPattern, BYTE expectedPattern, BYTE val)
+{
+ SUPPORTS_DAC;
+
+ _ASSERTE((valPattern & val) == valPattern);
+
+ return ((valPattern == expectedPattern) || IsMarkerInstr(val));
+}
+
+/* Similar to CheckInstrByte() */
+
+bool CheckInstrWord(WORD val, WORD expectedValue)
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return ((val == expectedValue) || IsMarkerInstr(val & 0xFF));
+}
+
+// Use this to check if the instruction at offset "walkOffset" has already
+// been executed
+// "actualHaltOffset" is the offset when the code was suspended
+// It is assumed that there is linear control flow from offset 0 to "actualHaltOffset".
+//
+// This has been factored out just so that the intent of the comparison
+// is clear (compared to the opposite intent)
+
+bool InstructionAlreadyExecuted(unsigned walkOffset, unsigned actualHaltOffset)
+{
+ SUPPORTS_DAC;
+ return (walkOffset < actualHaltOffset);
+}
+
+// skips past a "arith REG, IMM"
+inline unsigned SKIP_ARITH_REG(int val, PTR_CBYTE base, unsigned offset)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ unsigned delta = 0;
+ if (val != 0)
+ {
+#ifdef _DEBUG
+ // Confirm that arith instruction is at the correct place
+ _ASSERTE(CheckInstrBytePattern(base[offset ] & 0xFD, 0x81, base[offset]) &&
+ CheckInstrBytePattern(base[offset+1] & 0xC0, 0xC0, base[offset+1]));
+ // only use DWORD form if needed
+ _ASSERTE(((base[offset] & 2) != 0) == CAN_COMPRESS(val) ||
+ IsMarkerInstr(base[offset]));
+#endif
+ delta = 2 + (CAN_COMPRESS(val) ? 1 : 4);
+ }
+ return(offset + delta);
+}
+
+inline unsigned SKIP_PUSH_REG(PTR_CBYTE base, unsigned offset)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // Confirm it is a push instruction
+ _ASSERTE(CheckInstrBytePattern(base[offset] & 0xF8, 0x50, base[offset]));
+ return(offset + 1);
+}
+
+inline unsigned SKIP_POP_REG(PTR_CBYTE base, unsigned offset)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // Confirm it is a pop instruction
+ _ASSERTE(CheckInstrBytePattern(base[offset] & 0xF8, 0x58, base[offset]));
+ return(offset + 1);
+}
+
+inline unsigned SKIP_MOV_REG_REG(PTR_CBYTE base, unsigned offset)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // Confirm it is a move instruction
+ // Note that only the first byte may have been stomped on by IsMarkerInstr()
+ // So we can check the second byte directly
+ _ASSERTE(CheckInstrBytePattern(base[offset] & 0xFD, 0x89, base[offset]) &&
+ (base[offset+1] & 0xC0) == 0xC0);
+ return(offset + 2);
+}
+
+inline unsigned SKIP_LEA_ESP_EBP(int val, PTR_CBYTE base, unsigned offset)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+#ifdef _DEBUG
+ // Confirm it is the right instruction
+ // Note that only the first byte may have been stomped on by IsMarkerInstr()
+ // So we can check the second byte directly
+ WORD wOpcode = *(PTR_WORD)base;
+ _ASSERTE((CheckInstrWord(wOpcode, X86_INSTR_w_LEA_ESP_EBP_BYTE_OFFSET) &&
+ (val == *(PTR_SBYTE)(base+2)) &&
+ CAN_COMPRESS(val)) ||
+ (CheckInstrWord(wOpcode, X86_INSTR_w_LEA_ESP_EBP_DWORD_OFFSET) &&
+ (val == *(PTR_INT32)(base+2)) &&
+ !CAN_COMPRESS(val)));
+#endif
+
+ unsigned delta = 2 + (CAN_COMPRESS(val) ? 1 : 4);
+ return(offset + delta);
+}
+
+unsigned SKIP_ALLOC_FRAME(int size, PTR_CBYTE base, unsigned offset)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ _ASSERTE(size != 0);
+
+ if (size == sizeof(void*))
+ {
+ // We do "push eax" instead of "sub esp,4"
+ return (SKIP_PUSH_REG(base, offset));
+ }
+
+ if (size >= CORINFO_PAGE_SIZE)
+ {
+ if (size < (3 * CORINFO_PAGE_SIZE))
+ {
+ // add 7 bytes for one or two TEST EAX, [ESP+CORINFO_PAGE_SIZE]
+ offset += (size / CORINFO_PAGE_SIZE) * 7;
+ }
+ else
+ {
+ // xor eax, eax 2
+ // [nop] 0-3
+ // loop:
+ // test [esp + eax], eax 3
+ // sub eax, 0x1000 5
+ // cmp EAX, -size 5
+ // jge loop 2
+ offset += 2;
+
+ // NGEN images that support rejit may have extra nops we need to skip over
+ while (offset < 5)
+ {
+ if (CheckInstrByte(base[offset], X86_INSTR_NOP))
+ {
+ offset++;
+ }
+ else
+ {
+ break;
+ }
+ }
+ offset += 15;
+ }
+ }
+
+ // sub ESP, size
+ return (SKIP_ARITH_REG(size, base, offset));
+}
+
+
+#endif // !USE_GC_INFO_DECODER
+
+
+#if !defined(_TARGET_X86_) && !defined(CROSSGEN_COMPILE)
+
+void EECodeManager::EnsureCallerContextIsValid( PREGDISPLAY pRD, StackwalkCacheEntry* pCacheEntry, EECodeInfo * pCodeInfo /*= NULL*/ )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ if( !pRD->IsCallerContextValid )
+ {
+#if !defined(DACCESS_COMPILE)
+ if (pCacheEntry != NULL)
+ {
+ // lightened schema: take stack unwind info from stackwalk cache
+ QuickUnwindStackFrame(pRD, pCacheEntry, EnsureCallerStackFrameIsValid);
+ }
+ else
+#endif // !DACCESS_COMPILE
+ {
+ // We need to make a copy here (instead of switching the pointers), in order to preserve the current context
+ *(pRD->pCallerContext) = *(pRD->pCurrentContext);
+
+ NOT_X86(*(pRD->pCallerContextPointers) = *(pRD->pCurrentContextPointers));
+
+ T_KNONVOLATILE_CONTEXT_POINTERS *pCallerContextPointers = NULL;
+ NOT_X86(pCallerContextPointers = pRD->pCallerContextPointers);
+
+ Thread::VirtualUnwindCallFrame(pRD->pCallerContext, pCallerContextPointers, pCodeInfo);
+ }
+
+ pRD->IsCallerContextValid = TRUE;
+ }
+
+ _ASSERTE( pRD->IsCallerContextValid );
+}
+
+size_t EECodeManager::GetCallerSp( PREGDISPLAY pRD )
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ // Don't add usage of this field. This is only temporary.
+ // See ExceptionTracker::InitializeCrawlFrame() for more information.
+ if (!pRD->IsCallerSPValid)
+ {
+ EnsureCallerContextIsValid(pRD, NULL);
+ }
+ return (size_t) (GetSP(pRD->pCallerContext));
+}
+
+#endif // !defined(_TARGET_X86_) && !defined(CROSSGEN_COMPILE)
+
+/*
+ * Light unwind the current stack frame, using provided cache entry.
+ * pPC, Esp and pEbp of pContext are updated.
+ */
+
+// static
+void EECodeManager::QuickUnwindStackFrame(PREGDISPLAY pRD, StackwalkCacheEntry *pCacheEntry, QuickUnwindFlag flag)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ _ASSERTE(pCacheEntry);
+ _ASSERTE(GetControlPC(pRD) == (PCODE)(pCacheEntry->IP));
+
+#if defined(_TARGET_X86_)
+ _ASSERTE(flag == UnwindCurrentStackFrame);
+
+ _ASSERTE(!pCacheEntry->fUseEbp || pCacheEntry->fUseEbpAsFrameReg);
+
+ if (pCacheEntry->fUseEbpAsFrameReg)
+ {
+ _ASSERTE(pCacheEntry->fUseEbp);
+ // EBP frame, update ESP through EBP, since ESPOffset may vary
+ pRD->pEbp = PTR_DWORD((TADDR)*pRD->pEbp);
+ pRD->Esp = (TADDR)pRD->pEbp + sizeof(void*);
+ }
+ else
+ {
+ _ASSERTE(!pCacheEntry->fUseEbp);
+ // ESP frame, update up to retAddr using ESPOffset
+ pRD->Esp += pCacheEntry->ESPOffset;
+ }
+ pRD->PCTAddr = (TADDR)pRD->Esp;
+ pRD->ControlPC = *PTR_PCODE(pRD->PCTAddr);
+ pRD->Esp += sizeof(void*) + pCacheEntry->argSize;
+
+#elif defined(_TARGET_AMD64_)
+ if (pRD->IsCallerContextValid)
+ {
+ pRD->pCurrentContext->Rbp = pRD->pCallerContext->Rbp;
+ pRD->pCurrentContext->Rsp = pRD->pCallerContext->Rsp;
+ pRD->pCurrentContext->Rip = pRD->pCallerContext->Rip;
+ }
+ else
+ {
+ PCONTEXT pSourceCtx = NULL;
+ PCONTEXT pTargetCtx = NULL;
+ if (flag == UnwindCurrentStackFrame)
+ {
+ pTargetCtx = pRD->pCurrentContext;
+ pSourceCtx = pRD->pCurrentContext;
+ }
+ else
+ {
+ pTargetCtx = pRD->pCallerContext;
+ pSourceCtx = pRD->pCurrentContext;
+ }
+
+ // Unwind RBP. The offset is relative to the current sp.
+ if (pCacheEntry->RBPOffset == 0)
+ {
+ pTargetCtx->Rbp = pSourceCtx->Rbp;
+ }
+ else
+ {
+ pTargetCtx->Rbp = *(UINT_PTR*)(pSourceCtx->Rsp + pCacheEntry->RBPOffset);
+ }
+
+ // Adjust the sp. From this pointer onwards pCurrentContext->Rsp is the caller sp.
+ pTargetCtx->Rsp = pSourceCtx->Rsp + pCacheEntry->RSPOffset;
+
+ // Retrieve the return address.
+ pTargetCtx->Rip = *(UINT_PTR*)((pTargetCtx->Rsp) - sizeof(UINT_PTR));
+ }
+
+ if (flag == UnwindCurrentStackFrame)
+ {
+ SyncRegDisplayToCurrentContext(pRD);
+ pRD->IsCallerContextValid = FALSE;
+ pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
+ }
+
+#else // !_TARGET_X86_ && !_TARGET_AMD64_
+ PORTABILITY_ASSERT("EECodeManager::QuickUnwindStackFrame is not implemented on this platform.");
+#endif // !_TARGET_X86_ && !_TARGET_AMD64_
+}
+
+/*****************************************************************************/
+#ifdef _TARGET_X86_ // UnwindStackFrame
+/*****************************************************************************/
+
+const RegMask CALLEE_SAVED_REGISTERS_MASK[] =
+{
+ RM_EDI, // first register to be pushed
+ RM_ESI,
+ RM_EBX,
+ RM_EBP // last register to be pushed
+};
+
+const SIZE_T REGDISPLAY_OFFSET_OF_CALLEE_SAVED_REGISTERS[] =
+{
+ offsetof(REGDISPLAY, pEdi), // first register to be pushed
+ offsetof(REGDISPLAY, pEsi),
+ offsetof(REGDISPLAY, pEbx),
+ offsetof(REGDISPLAY, pEbp) // last register to be pushed
+};
+
+/*****************************************************************************/
+
+void UnwindEspFrameEpilog(
+ PREGDISPLAY pContext,
+ hdrInfo * info,
+ PTR_CBYTE epilogBase,
+ unsigned flags)
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(info->epilogOffs != hdrInfo::NOT_IN_EPILOG);
+ _ASSERTE(!info->ebpFrame && !info->doubleAlign);
+ _ASSERTE(info->epilogOffs > 0);
+
+ int offset = 0;
+ unsigned ESP = pContext->Esp;
+
+ if (info->rawStkSize)
+ {
+ if (!InstructionAlreadyExecuted(offset, info->epilogOffs))
+ {
+ /* We have NOT executed the "ADD ESP, FrameSize",
+ so manually adjust stack pointer */
+ ESP += info->rawStkSize;
+ }
+
+ // We have already popped off the frame (excluding the callee-saved registers)
+
+ if (epilogBase[0] == X86_INSTR_POP_ECX)
+ {
+ // We may use "POP ecx" for doing "ADD ESP, 4",
+ // or we may not (in the case of JMP epilogs)
+ _ASSERTE(info->rawStkSize == sizeof(void*));
+ offset = SKIP_POP_REG(epilogBase, offset);
+ }
+ else
+ {
+ // "add esp, rawStkSize"
+ offset = SKIP_ARITH_REG(info->rawStkSize, epilogBase, offset);
+ }
+ }
+
+ /* Remaining callee-saved regs are at ESP. Need to update
+ regsMask as well to exclude registers which have already been popped. */
+
+ const RegMask regsMask = info->savedRegMask;
+
+ /* Increment "offset" in steps to see which callee-saved
+ registers have already been popped */
+
+ for (unsigned i = NumItems(CALLEE_SAVED_REGISTERS_MASK); i > 0; i--)
+ {
+ RegMask regMask = CALLEE_SAVED_REGISTERS_MASK[i - 1];
+
+ if (!(regMask & regsMask))
+ continue;
+
+ if (!InstructionAlreadyExecuted(offset, info->epilogOffs))
+ {
+ /* We have NOT yet popped off the register.
+ Get the value from the stack if needed */
+ if ((flags & UpdateAllRegs) || (regMask == RM_EBP))
+ {
+ SIZE_T offsetOfRegPtr = REGDISPLAY_OFFSET_OF_CALLEE_SAVED_REGISTERS[i - 1];
+ *(LPVOID*)(PBYTE(pContext) + offsetOfRegPtr) = PTR_DWORD((TADDR)ESP);
+ }
+
+ /* Adjust ESP */
+ ESP += sizeof(void*);
+ }
+
+ offset = SKIP_POP_REG(epilogBase, offset);
+ }
+
+ //CEE_JMP generates an epilog similar to a normal CEE_RET epilog except for the last instruction
+ _ASSERTE(CheckInstrBytePattern(epilogBase[offset] & X86_INSTR_RET, X86_INSTR_RET, epilogBase[offset]) //ret
+ || CheckInstrBytePattern(epilogBase[offset], X86_INSTR_JMP_NEAR_REL32, epilogBase[offset]) //jmp ret32
+ || CheckInstrWord(*PTR_WORD(epilogBase + offset), X86_INSTR_w_JMP_FAR_IND_IMM)); //jmp [addr32]
+
+ /* Finally we can set pPC */
+ pContext->PCTAddr = (TADDR)ESP;
+ pContext->ControlPC = *PTR_PCODE(pContext->PCTAddr);
+
+ pContext->Esp = ESP;
+}
+
+/*****************************************************************************/
+
+void UnwindEbpDoubleAlignFrameEpilog(
+ PREGDISPLAY pContext,
+ hdrInfo * info,
+ PTR_CBYTE epilogBase,
+ unsigned flags)
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(info->epilogOffs != hdrInfo::NOT_IN_EPILOG);
+ _ASSERTE(info->ebpFrame || info->doubleAlign);
+
+ _ASSERTE(info->argSize < 0x10000); // "ret" only has a 2 byte operand
+
+ /* See how many instructions we have executed in the
+ epilog to determine which callee-saved registers
+ have already been popped */
+ int offset = 0;
+
+ unsigned ESP = pContext->Esp;
+
+ bool needMovEspEbp = false;
+
+ if (info->doubleAlign)
+ {
+ // add esp, rawStkSize
+
+ if (!InstructionAlreadyExecuted(offset, info->epilogOffs))
+ ESP += info->rawStkSize;
+ _ASSERTE(info->rawStkSize != 0);
+ offset = SKIP_ARITH_REG(info->rawStkSize, epilogBase, offset);
+
+ // We also need "mov esp, ebp" after popping the callee-saved registers
+ needMovEspEbp = true;
+ }
+ else
+ {
+ bool needLea = false;
+
+ if (info->localloc)
+ {
+ // ESP may be variable if a localloc was actually executed. We will reset it.
+ // lea esp, [ebp-calleeSavedRegs]
+
+ needLea = true;
+ }
+ else if (info->savedRegsCountExclFP == 0)
+ {
+ // We will just generate "mov esp, ebp" and be done with it.
+
+ if (info->rawStkSize != 0)
+ {
+ needMovEspEbp = true;
+ }
+ }
+ else if (info->rawStkSize == 0)
+ {
+ // do nothing before popping the callee-saved registers
+ }
+ else if (info->rawStkSize == sizeof(void*))
+ {
+ // "pop ecx" will make ESP point to the callee-saved registers
+ if (!InstructionAlreadyExecuted(offset, info->epilogOffs))
+ ESP += sizeof(void*);
+ offset = SKIP_POP_REG(epilogBase, offset);
+ }
+ else
+ {
+ // We need to make ESP point to the callee-saved registers
+ // lea esp, [ebp-calleeSavedRegs]
+
+ needLea = true;
+ }
+
+ if (needLea)
+ {
+ // lea esp, [ebp-calleeSavedRegs]
+
+ unsigned calleeSavedRegsSize = info->savedRegsCountExclFP * sizeof(void*);
+
+ if (!InstructionAlreadyExecuted(offset, info->epilogOffs))
+ ESP = (*pContext->pEbp) - calleeSavedRegsSize;
+
+ offset = SKIP_LEA_ESP_EBP(-int(calleeSavedRegsSize), epilogBase, offset);
+ }
+ }
+
+ for (unsigned i = NumItems(CALLEE_SAVED_REGISTERS_MASK) - 1; i > 0; i--)
+ {
+ RegMask regMask = CALLEE_SAVED_REGISTERS_MASK[i - 1];
+ _ASSERTE(regMask != RM_EBP);
+
+ if ((info->savedRegMask & regMask) == 0)
+ continue;
+
+ if (!InstructionAlreadyExecuted(offset, info->epilogOffs))
+ {
+ if (flags & UpdateAllRegs)
+ {
+ SIZE_T offsetOfRegPtr = REGDISPLAY_OFFSET_OF_CALLEE_SAVED_REGISTERS[i - 1];
+ *(LPVOID*)(PBYTE(pContext) + offsetOfRegPtr) = PTR_DWORD((TADDR)ESP);
+ }
+ ESP += sizeof(void*);
+ }
+
+ offset = SKIP_POP_REG(epilogBase, offset);
+ }
+
+ if (needMovEspEbp)
+ {
+ if (!InstructionAlreadyExecuted(offset, info->epilogOffs))
+ ESP = *pContext->pEbp;
+
+ offset = SKIP_MOV_REG_REG(epilogBase, offset);
+ }
+
+ // Have we executed the pop EBP?
+ if (!InstructionAlreadyExecuted(offset, info->epilogOffs))
+ {
+ pContext->pEbp = PTR_DWORD(TADDR(ESP));
+ ESP += sizeof(void*);
+ }
+ offset = SKIP_POP_REG(epilogBase, offset);
+
+ pContext->PCTAddr = (TADDR)ESP;
+ pContext->ControlPC = *PTR_PCODE(pContext->PCTAddr);
+
+ pContext->Esp = ESP;
+}
+
+//****************************************************************************
+// This is the value ESP is incremented by on doing a "return"
+
+inline SIZE_T ESPIncrOnReturn(hdrInfo * info)
+{
+ SUPPORTS_DAC;
+ return sizeof(void *) + // pop off the return address
+ // Note varargs is caller-popped
+ (info->varargs ? 0 : info->argSize);
+}
+
+/*****************************************************************************/
+
+void UnwindEpilog(
+ PREGDISPLAY pContext,
+ hdrInfo * info,
+ PTR_CBYTE epilogBase,
+ unsigned flags)
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ _ASSERTE(info->epilogOffs != hdrInfo::NOT_IN_EPILOG);
+ // _ASSERTE(flags & ActiveStackFrame); // <TODO> Wont work for thread death</TODO>
+ _ASSERTE(info->epilogOffs > 0);
+
+ if (info->ebpFrame || info->doubleAlign)
+ {
+ UnwindEbpDoubleAlignFrameEpilog(pContext, info, epilogBase, flags);
+ }
+ else
+ {
+ UnwindEspFrameEpilog(pContext, info, epilogBase, flags);
+ }
+
+#ifdef _DEBUG
+ if (flags & UpdateAllRegs)
+ TRASH_CALLEE_UNSAVED_REGS(pContext);
+#endif
+
+ /* Now adjust stack pointer */
+
+ pContext->Esp += ESPIncrOnReturn(info);
+}
+
+/*****************************************************************************/
+
+void UnwindEspFrameProlog(
+ PREGDISPLAY pContext,
+ hdrInfo * info,
+ PTR_CBYTE methodStart,
+ unsigned flags)
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ /* we are in the middle of the prolog */
+ _ASSERTE(info->prologOffs != hdrInfo::NOT_IN_PROLOG);
+ _ASSERTE(!info->ebpFrame && !info->doubleAlign);
+
+ unsigned offset = 0;
+
+#ifdef _DEBUG
+ // If the first two instructions are 'nop, int3', then we will
+ // assume that is from a JitHalt operation and skip past it
+ if (methodStart[0] == X86_INSTR_NOP && methodStart[1] == X86_INSTR_INT3)
+ {
+ offset += 2;
+ }
+#endif
+
+ const DWORD curOffs = info->prologOffs;
+ unsigned ESP = pContext->Esp;
+
+ // Find out how many callee-saved regs have already been pushed
+
+ unsigned regsMask = RM_NONE;
+ PTR_DWORD savedRegPtr = PTR_DWORD((TADDR)ESP);
+
+ for (unsigned i = 0; i < NumItems(CALLEE_SAVED_REGISTERS_MASK); i++)
+ {
+ RegMask regMask = CALLEE_SAVED_REGISTERS_MASK[i];
+
+ if (!(info->savedRegMask & regMask))
+ continue;
+
+ if (InstructionAlreadyExecuted(offset, curOffs))
+ {
+ ESP += sizeof(void*);
+ regsMask |= regMask;
+ }
+
+ offset = SKIP_PUSH_REG(methodStart, offset);
+ }
+
+ if (info->rawStkSize)
+ {
+ offset = SKIP_ALLOC_FRAME(info->rawStkSize, methodStart, offset);
+
+ // Note that this assumes that only the last instruction in SKIP_ALLOC_FRAME
+ // actually updates ESP
+ if (InstructionAlreadyExecuted(offset, curOffs + 1))
+ {
+ savedRegPtr += (info->rawStkSize / sizeof(DWORD));
+ ESP += info->rawStkSize;
+ }
+ }
+
+ //
+ // Stack probe checks here
+ //
+
+ // Poison the value, we don't set it properly at the end of the prolog
+ INDEBUG(offset = 0xCCCCCCCC);
+
+
+ // Always restore EBP
+ if (regsMask & RM_EBP)
+ pContext->pEbp = savedRegPtr++;
+
+ if (flags & UpdateAllRegs)
+ {
+ if (regsMask & RM_EBX)
+ pContext->pEbx = savedRegPtr++;
+ if (regsMask & RM_ESI)
+ pContext->pEsi = savedRegPtr++;
+ if (regsMask & RM_EDI)
+ pContext->pEdi = savedRegPtr++;
+
+ TRASH_CALLEE_UNSAVED_REGS(pContext);
+ }
+
+#if 0
+// NOTE:
+// THIS IS ONLY TRUE IF PROLOGSIZE DOES NOT INCLUDE REG-VAR INITIALIZATION !!!!
+//
+ /* there is (potentially) only one additional
+ instruction in the prolog, (push ebp)
+ but if we would have been passed that instruction,
+ info->prologOffs would be hdrInfo::NOT_IN_PROLOG!
+ */
+ _ASSERTE(offset == info->prologOffs);
+#endif
+
+ pContext->Esp = ESP;
+}
+
+/*****************************************************************************/
+
+void UnwindEspFrame(
+ PREGDISPLAY pContext,
+ hdrInfo * info,
+ PTR_CBYTE table,
+ PTR_CBYTE methodStart,
+ DWORD curOffs,
+ unsigned flags)
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(!info->ebpFrame && !info->doubleAlign);
+ _ASSERTE(info->epilogOffs == hdrInfo::NOT_IN_EPILOG);
+
+ unsigned ESP = pContext->Esp;
+
+
+ if (info->prologOffs != hdrInfo::NOT_IN_PROLOG)
+ {
+ if (info->prologOffs != 0) // Do nothing for the very start of the method
+ {
+ UnwindEspFrameProlog(pContext, info, methodStart, flags);
+ ESP = pContext->Esp;
+ }
+ }
+ else
+ {
+ /* we are past the prolog, ESP has been set above */
+
+ // Are there any arguments pushed on the stack?
+
+ ESP += GetPushedArgSize(info, table, curOffs);
+
+ ESP += info->rawStkSize;
+
+ const RegMask regsMask = info->savedRegMask;
+
+ for (unsigned i = NumItems(CALLEE_SAVED_REGISTERS_MASK); i > 0; i--)
+ {
+ RegMask regMask = CALLEE_SAVED_REGISTERS_MASK[i - 1];
+
+ if ((regMask & regsMask) == 0)
+ continue;
+
+ SIZE_T offsetOfRegPtr = REGDISPLAY_OFFSET_OF_CALLEE_SAVED_REGISTERS[i - 1];
+ *(LPVOID*)(PBYTE(pContext) + offsetOfRegPtr) = PTR_DWORD((TADDR)ESP);
+
+ ESP += sizeof(unsigned);
+ }
+ }
+
+ /* we can now set the (address of the) return address */
+
+ pContext->PCTAddr = (TADDR)ESP;
+ pContext->ControlPC = *PTR_PCODE(pContext->PCTAddr);
+
+ /* Now adjust stack pointer */
+
+ pContext->Esp = ESP + ESPIncrOnReturn(info);
+}
+
+
+/*****************************************************************************/
+
+void UnwindEbpDoubleAlignFrameProlog(
+ PREGDISPLAY pContext,
+ hdrInfo * info,
+ PTR_CBYTE methodStart,
+ unsigned flags)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE(info->prologOffs != hdrInfo::NOT_IN_PROLOG);
+ _ASSERTE(info->ebpFrame || info->doubleAlign);
+
+ DWORD offset = 0;
+
+#ifdef _DEBUG
+ // If the first two instructions are 'nop, int3', then we will
+ // assume that is from a JitHalt operation and skip past it
+ if (methodStart[0] == X86_INSTR_NOP && methodStart[1] == X86_INSTR_INT3)
+ {
+ offset += 2;
+ }
+#endif
+
+ /* Check for the case where EBP has not been updated yet. */
+
+ const DWORD curOffs = info->prologOffs;
+
+ // If we have still not excecuted "push ebp; mov ebp, esp", then we need to
+ // report the frame relative to ESP
+
+ if (!InstructionAlreadyExecuted(offset + 1, curOffs))
+ {
+ _ASSERTE(CheckInstrByte(methodStart [offset], X86_INSTR_PUSH_EBP) ||
+ CheckInstrWord(*PTR_WORD(methodStart + offset), X86_INSTR_W_MOV_EBP_ESP) ||
+ CheckInstrByte(methodStart [offset], X86_INSTR_JMP_NEAR_REL32)); // a rejit jmp-stamp
+
+ /* If we're past the "push ebp", adjust ESP to pop EBP off */
+
+ if (curOffs == (offset + 1))
+ pContext->Esp += sizeof(TADDR);
+
+ /* Stack pointer points to return address */
+
+ pContext->PCTAddr = (TADDR)pContext->Esp;
+ pContext->ControlPC = *PTR_PCODE(pContext->PCTAddr);
+
+ /* EBP and callee-saved registers still have the correct value */
+
+ return;
+ }
+
+ // We are atleast after the "push ebp; mov ebp, esp"
+
+ offset = SKIP_MOV_REG_REG(methodStart,
+ SKIP_PUSH_REG(methodStart, offset));
+
+ /* At this point, EBP has been set up. The caller's ESP and the return value
+ can be determined using EBP. Since we are still in the prolog,
+ we need to know our exact location to determine the callee-saved registers */
+
+ const unsigned curEBP = *pContext->pEbp;
+
+ if (flags & UpdateAllRegs)
+ {
+ PTR_DWORD pSavedRegs = PTR_DWORD((TADDR)curEBP);
+
+ /* make sure that we align ESP just like the method's prolog did */
+ if (info->doubleAlign)
+ {
+ // "and esp,-8"
+ offset = SKIP_ARITH_REG(-8, methodStart, offset);
+ if (curEBP & 0x04)
+ {
+ pSavedRegs--;
+#ifdef _DEBUG
+ if (dspPtr) printf("EnumRef: dblalign ebp: %08X\n", curEBP);
+#endif
+ }
+ }
+
+ /* Increment "offset" in steps to see which callee-saved
+ registers have been pushed already */
+
+ for (unsigned i = 0; i < NumItems(CALLEE_SAVED_REGISTERS_MASK) - 1; i++)
+ {
+ RegMask regMask = CALLEE_SAVED_REGISTERS_MASK[i];
+ _ASSERTE(regMask != RM_EBP);
+
+ if ((info->savedRegMask & regMask) == 0)
+ continue;
+
+ if (InstructionAlreadyExecuted(offset, curOffs))
+ {
+ SIZE_T offsetOfRegPtr = REGDISPLAY_OFFSET_OF_CALLEE_SAVED_REGISTERS[i];
+ *(LPVOID*)(PBYTE(pContext) + offsetOfRegPtr) = --pSavedRegs;
+ }
+
+ // "push reg"
+ offset = SKIP_PUSH_REG(methodStart, offset) ;
+ }
+
+ TRASH_CALLEE_UNSAVED_REGS(pContext);
+ }
+
+ /* The caller's saved EBP is pointed to by our EBP */
+
+ pContext->pEbp = PTR_DWORD((TADDR)curEBP);
+ pContext->Esp = DWORD((TADDR)(curEBP + sizeof(void *)));
+
+ /* Stack pointer points to return address */
+
+ pContext->PCTAddr = (TADDR)pContext->Esp;
+ pContext->ControlPC = *PTR_PCODE(pContext->PCTAddr);
+}
+
+/*****************************************************************************/
+
+bool UnwindEbpDoubleAlignFrame(
+ PREGDISPLAY pContext,
+ hdrInfo * info,
+ PTR_CBYTE methodStart,
+ unsigned flags,
+ StackwalkCacheUnwindInfo *pUnwindInfo) // out-only, perf improvement
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(info->ebpFrame || info->doubleAlign);
+
+ const unsigned curESP = pContext->Esp;
+ const unsigned curEBP = *pContext->pEbp;
+
+ /* First check if we are in a filter (which is obviously after the prolog) */
+
+ if (info->handlers && info->prologOffs == hdrInfo::NOT_IN_PROLOG)
+ {
+ TADDR baseSP;
+
+ FrameType frameType = GetHandlerFrameInfo(info, curEBP,
+ curESP, (DWORD) IGNORE_VAL,
+ &baseSP);
+
+ /* If we are in a filter, we only need to unwind the funclet stack.
+ For catches/finallies, the normal handling will
+ cause the frame to be unwound all the way up to ebp skipping
+ other frames above it. This is OK, as those frames will be
+ dead. Also, the EE will detect that this has happened and it
+ will handle any EE frames correctly.
+ */
+
+ if (frameType == FR_INVALID)
+ {
+ return false;
+ }
+
+ if (frameType == FR_FILTER)
+ {
+ pContext->PCTAddr = baseSP;
+ pContext->ControlPC = *PTR_PCODE(pContext->PCTAddr);
+
+ pContext->Esp = (DWORD)(baseSP + sizeof(TADDR));
+
+ // pContext->pEbp = same as before;
+
+#ifdef _DEBUG
+ /* The filter has to be called by the VM. So we dont need to
+ update callee-saved registers.
+ */
+
+ if (flags & UpdateAllRegs)
+ {
+ static DWORD s_badData = 0xDEADBEEF;
+
+ pContext->pEax = pContext->pEbx = pContext->pEcx =
+ pContext->pEdx = pContext->pEsi = pContext->pEdi = &s_badData;
+ }
+#endif
+
+ if (pUnwindInfo)
+ {
+ // The filter funclet is like an ESP-framed-method.
+ pUnwindInfo->fUseEbp = FALSE;
+ pUnwindInfo->fUseEbpAsFrameReg = FALSE;
+ }
+
+ return true;
+ }
+ }
+
+ //
+ // Prolog of an EBP method
+ //
+
+ if (info->prologOffs != hdrInfo::NOT_IN_PROLOG)
+ {
+ UnwindEbpDoubleAlignFrameProlog(pContext, info, methodStart, flags);
+
+ /* Now adjust stack pointer. */
+
+ pContext->Esp += ESPIncrOnReturn(info);
+ return true;
+ }
+
+ if (flags & UpdateAllRegs)
+ {
+ // Get to the first callee-saved register
+ PTR_DWORD pSavedRegs = PTR_DWORD((TADDR)curEBP);
+
+ if (info->doubleAlign && (curEBP & 0x04))
+ pSavedRegs--;
+
+ for (unsigned i = 0; i < NumItems(CALLEE_SAVED_REGISTERS_MASK) - 1; i++)
+ {
+ RegMask regMask = CALLEE_SAVED_REGISTERS_MASK[i];
+ if ((info->savedRegMask & regMask) == 0)
+ continue;
+
+ SIZE_T offsetOfRegPtr = REGDISPLAY_OFFSET_OF_CALLEE_SAVED_REGISTERS[i];
+ *(LPVOID*)(PBYTE(pContext) + offsetOfRegPtr) = --pSavedRegs;
+ }
+ }
+
+ /* The caller's ESP will be equal to EBP + retAddrSize + argSize. */
+
+ pContext->Esp = (DWORD)(curEBP + sizeof(curEBP) + ESPIncrOnReturn(info));
+
+ /* The caller's saved EIP is right after our EBP */
+
+ pContext->PCTAddr = (TADDR)curEBP + RETURN_ADDR_OFFS * sizeof(TADDR);
+ pContext->ControlPC = *PTR_PCODE(pContext->PCTAddr);
+
+ /* The caller's saved EBP is pointed to by our EBP */
+
+ pContext->pEbp = PTR_DWORD((TADDR)curEBP);
+
+ return true;
+}
+
+/*****************************************************************************
+ *
+ * Unwind the current stack frame, i.e. update the virtual register
+ * set in pContext. This will be similar to the state after the function
+ * returns back to caller (IP points to after the call, Frame and Stack
+ * pointer has been reset, callee-saved registers restored (if UpdateAllRegs),
+ * callee-unsaved registers are trashed.
+ * Returns success of operation.
+ */
+
+bool EECodeManager::UnwindStackFrame(PREGDISPLAY pContext,
+ EECodeInfo *pCodeInfo,
+ unsigned flags,
+ CodeManState *pState,
+ StackwalkCacheUnwindInfo *pUnwindInfo /* out-only, perf improvement */)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ // Address where the method has been interrupted
+ PCODE breakPC = pContext->ControlPC;
+ _ASSERTE(PCODEToPINSTR(breakPC) == pCodeInfo->GetCodeAddress());
+
+ PTR_CBYTE methodStart = PTR_CBYTE(pCodeInfo->GetSavedMethodCode());
+
+ PTR_VOID methodInfoPtr = pCodeInfo->GetGCInfo();
+ DWORD curOffs = pCodeInfo->GetRelOffset();
+
+ _ASSERTE(sizeof(CodeManStateBuf) <= sizeof(pState->stateBuf));
+ CodeManStateBuf * stateBuf = (CodeManStateBuf*)pState->stateBuf;
+
+ if (pState->dwIsSet == 0)
+ {
+ /* Extract the necessary information from the info block header */
+
+ stateBuf->hdrInfoSize = (DWORD)crackMethodInfoHdr(methodInfoPtr,
+ curOffs,
+ &stateBuf->hdrInfoBody);
+ }
+
+ PTR_CBYTE table = dac_cast<PTR_CBYTE>(methodInfoPtr) + stateBuf->hdrInfoSize;
+
+ hdrInfo * info = &stateBuf->hdrInfoBody;
+
+ info->isSpeculativeStackWalk = ((flags & SpeculativeStackwalk) != 0);
+
+ if (pUnwindInfo != NULL)
+ {
+ pUnwindInfo->securityObjectOffset = 0;
+ if (info->securityCheck)
+ {
+ _ASSERTE(info->ebpFrame);
+ SIZE_T securityObjectOffset = (GetSecurityObjectOffset(info) / sizeof(void*));
+ _ASSERTE(securityObjectOffset != 0);
+ pUnwindInfo->securityObjectOffset = DWORD(securityObjectOffset);
+ }
+
+ pUnwindInfo->fUseEbpAsFrameReg = info->ebpFrame;
+ pUnwindInfo->fUseEbp = ((info->savedRegMask & RM_EBP) != 0);
+ }
+
+ if (info->epilogOffs != hdrInfo::NOT_IN_EPILOG)
+ {
+ /*---------------------------------------------------------------------
+ * First, handle the epilog
+ */
+
+ PTR_CBYTE epilogBase = (PTR_CBYTE) (breakPC - info->epilogOffs);
+ UnwindEpilog(pContext, info, epilogBase, flags);
+ }
+ else if (!info->ebpFrame && !info->doubleAlign)
+ {
+ /*---------------------------------------------------------------------
+ * Now handle ESP frames
+ */
+
+ UnwindEspFrame(pContext, info, table, methodStart, curOffs, flags);
+ return true;
+ }
+ else
+ {
+ /*---------------------------------------------------------------------
+ * Now we know that have an EBP frame
+ */
+
+ if (!UnwindEbpDoubleAlignFrame(pContext, info, methodStart, flags, pUnwindInfo))
+ return false;
+ }
+
+ // TODO [DAVBR]: For the full fix for VsWhidbey 450273, all the below
+ // may be uncommented once isLegalManagedCodeCaller works properly
+ // with non-return address inputs, and with non-DEBUG builds
+ /*
+ // Ensure isLegalManagedCodeCaller succeeds for speculative stackwalks.
+ // (We just assert this below for non-speculative stackwalks.)
+ //
+ FAIL_IF_SPECULATIVE_WALK(isLegalManagedCodeCaller(GetControlPC(pContext)));
+ */
+
+ return true;
+}
+
+/*****************************************************************************/
+#elif !defined(CROSSGEN_COMPILE) // _TARGET_X86_ - UnwindStackFrame
+/*****************************************************************************/
+
+bool EECodeManager::UnwindStackFrame(PREGDISPLAY pContext,
+ EECodeInfo *pCodeInfo,
+ unsigned flags,
+ CodeManState *pState,
+ StackwalkCacheUnwindInfo *pUnwindInfo /* out-only, perf improvement */)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+#if defined(_TARGET_AMD64_)
+ // To avoid unnecessary computation, we only crack the unwind info if pUnwindInfo is not NULL, which only happens
+ // if the LIGHTUNWIND flag is passed to StackWalkFramesEx().
+ if (pUnwindInfo != NULL)
+ {
+ pCodeInfo->GetOffsetsFromUnwindInfo(&(pUnwindInfo->RSPOffsetFromUnwindInfo),
+ &(pUnwindInfo->RBPOffset));
+ }
+#endif // _TARGET_AMD64_
+
+ _ASSERTE(pCodeInfo != NULL);
+ Thread::VirtualUnwindCallFrame(pContext, pCodeInfo);
+ return true;
+}
+
+/*****************************************************************************/
+#else // _TARGET_X86_ - UnwindStackFrame
+
+bool EECodeManager::UnwindStackFrame(PREGDISPLAY pContext,
+ EECodeInfo *pCodeInfo,
+ unsigned flags,
+ CodeManState *pState,
+ StackwalkCacheUnwindInfo *pUnwindInfo /* out-only, perf improvement */)
+{
+ _ASSERTE(!"EECodeManager::UnwindStackFrame not supported in this build configuration");
+ return true;
+}
+
+#endif // _TARGET_X86_ - UnwindStackFrame
+/*****************************************************************************/
+
+/* report args in 'msig' to the GC.
+ 'argsStart' is start of the stack-based arguments
+ 'varArgSig' describes the arguments
+ 'ctx' has the GC reporting info
+*/
+void promoteVarArgs(PTR_BYTE argsStart, PTR_VASigCookie varArgSig, GCCONTEXT* ctx)
+{
+ WRAPPER_NO_CONTRACT;
+
+ //Note: no instantiations needed for varargs
+ MetaSig msig(varArgSig->signature,
+ varArgSig->pModule,
+ NULL);
+
+ PTR_BYTE pFrameBase = argsStart - TransitionBlock::GetOffsetOfArgs();
+
+ ArgIterator argit(&msig);
+
+#ifdef _TARGET_X86_
+ // For the X86 target the JIT does not report any of the fixed args for a varargs method
+ // So we report the fixed args via the promoteArgs call below
+ bool skipFixedArgs = false;
+#else
+ // For other platforms the JITs do report the fixed args of a varargs method
+ // So we must tell promoteArgs to skip to the end of the fixed args
+ bool skipFixedArgs = true;
+#endif
+
+ bool inVarArgs = false;
+
+ int argOffset;
+ while ((argOffset = argit.GetNextOffset()) != TransitionBlock::InvalidOffset)
+ {
+ if (msig.GetArgProps().AtSentinel())
+ inVarArgs = true;
+
+ // if skipFixedArgs is false we report all arguments
+ // otherwise we just report the varargs.
+ if (!skipFixedArgs || inVarArgs)
+ msig.GcScanRoots(pFrameBase + argOffset, ctx->f, ctx->sc);
+ }
+}
+
+INDEBUG(void* forceStack1;)
+
+#if defined(_TARGET_X86_)
+
+/*****************************************************************************
+ *
+ * Enumerate all live object references in that function using
+ * the virtual register set.
+ * Returns success of operation.
+ */
+
+bool EECodeManager::EnumGcRefs( PREGDISPLAY pContext,
+ EECodeInfo *pCodeInfo,
+ unsigned flags,
+ GCEnumCallback pCallBack,
+ LPVOID hCallBack)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ PTR_VOID methodInfoPtr = pCodeInfo->GetGCInfo();
+ unsigned curOffs = pCodeInfo->GetRelOffset();
+
+ unsigned EBP = *pContext->pEbp;
+ unsigned ESP = pContext->Esp;
+
+ unsigned ptrOffs;
+
+ unsigned count;
+
+ hdrInfo info;
+ PTR_CBYTE table = PTR_CBYTE(methodInfoPtr);
+#if 0
+ printf("EECodeManager::EnumGcRefs - EIP = %08x ESP = %08x offset = %x GC Info is at %08x\n", *pContext->pPC, ESP, curOffs, table);
+#endif
+
+
+ /* Extract the necessary information from the info block header */
+
+ table += crackMethodInfoHdr(methodInfoPtr,
+ curOffs,
+ &info);
+
+ _ASSERTE( curOffs <= info.methodSize);
+
+#ifdef _DEBUG
+// if ((methodInfoPtr == (void*)0x37760d0) && (curOffs == 0x264))
+// __asm int 3;
+
+ if (trEnumGCRefs) {
+ static unsigned lastESP = 0;
+ unsigned diffESP = ESP - lastESP;
+ if (diffESP > 0xFFFF) {
+ printf("------------------------------------------------------\n");
+ }
+ lastESP = ESP;
+ printf("EnumGCRefs [%s][%s] at %s.%s + 0x%03X:\n",
+ info.ebpFrame?"ebp":" ",
+ info.interruptible?"int":" ",
+ "UnknownClass","UnknownMethod", curOffs);
+ fflush(stdout);
+ }
+#endif
+
+ /* Are we in the prolog or epilog of the method? */
+
+ if (info.prologOffs != hdrInfo::NOT_IN_PROLOG ||
+ info.epilogOffs != hdrInfo::NOT_IN_EPILOG)
+ {
+
+#if !DUMP_PTR_REFS
+ // Under normal circumstances the system will not suspend a thread
+ // if it is in the prolog or epilog of the function. However ThreadAbort
+ // exception or stack overflows can cause EH to happen in a prolog.
+ // Once in the handler, a GC can happen, so we can get to this code path.
+ // However since we are tearing down this frame, we don't need to report
+ // anything and we can simply return.
+
+ _ASSERTE(flags & ExecutionAborted);
+#endif
+ return true;
+ }
+
+#ifdef _DEBUG
+#define CHK_AND_REPORT_REG(reg, doIt, iptr, regName) \
+ if (doIt) \
+ { \
+ if (dspPtr) \
+ printf(" Live pointer register %s: ", #regName); \
+ pCallBack(hCallBack, \
+ (OBJECTREF*)(pContext->p##regName), \
+ (iptr ? GC_CALL_INTERIOR : 0) \
+ | CHECK_APP_DOMAIN \
+ DAC_ARG(DacSlotLocation(reg, 0, false))); \
+ }
+#else // !_DEBUG
+#define CHK_AND_REPORT_REG(reg, doIt, iptr, regName) \
+ if (doIt) \
+ pCallBack(hCallBack, \
+ (OBJECTREF*)(pContext->p##regName), \
+ (iptr ? GC_CALL_INTERIOR : 0) \
+ | CHECK_APP_DOMAIN \
+ DAC_ARG(DacSlotLocation(reg, 0, false)));
+
+#endif // _DEBUG
+
+ /* What kind of a frame is this ? */
+
+ FrameType frameType = FR_NORMAL;
+ TADDR baseSP = 0;
+
+ if (info.handlers)
+ {
+ _ASSERTE(info.ebpFrame);
+
+ bool hasInnerFilter, hadInnerFilter;
+ frameType = GetHandlerFrameInfo(&info, EBP,
+ ESP, (DWORD) IGNORE_VAL,
+ &baseSP, NULL,
+ &hasInnerFilter, &hadInnerFilter);
+ _ASSERTE(frameType != FR_INVALID);
+
+ /* If this is the parent frame of a filter which is currently
+ executing, then the filter would have enumerated the frame using
+ the filter PC.
+ */
+
+ if (hasInnerFilter)
+ return true;
+
+ /* If are in a try and we had a filter execute, we may have reported
+ GC refs from the filter (and not using the try's offset). So
+ we had better use the filter's end offset, as the try is
+ effectively dead and its GC ref's would be stale */
+
+ if (hadInnerFilter)
+ {
+ PTR_TADDR pFirstBaseSPslot = GetFirstBaseSPslotPtr(EBP, &info);
+ curOffs = (unsigned)pFirstBaseSPslot[1] - 1;
+ _ASSERTE(curOffs < info.methodSize);
+
+ /* Extract the necessary information from the info block header */
+
+ table = PTR_CBYTE(methodInfoPtr);
+
+ table += crackMethodInfoHdr(methodInfoPtr,
+ curOffs,
+ &info);
+ }
+ }
+
+
+ bool willContinueExecution = !(flags & ExecutionAborted);
+ unsigned pushedSize = 0;
+
+ /* if we have been interrupted we don't have to report registers/arguments
+ * because we are about to lose this context anyway.
+ * Alas, if we are in a ebp-less method we have to parse the table
+ * in order to adjust ESP.
+ *
+ * Note that we report "this" for all methods, even if
+ * noncontinuable, because because of the off chance they may be
+ * synchronized and we have to release the monitor on unwind. This
+ * could conceivably be optimized, but it turns out to be more
+ * expensive to check whether we're synchronized (which involves
+ * consulting metadata) than to just report "this" all the time in
+ * our most important scenarios.
+ */
+
+ if (info.interruptible)
+ {
+ pushedSize = scanArgRegTableI(skipToArgReg(info, table), curOffs, &info);
+
+ RegMask regs = info.regMaskResult;
+ RegMask iregs = info.iregMaskResult;
+ ptrArgTP args = info.argMaskResult;
+ ptrArgTP iargs = info.iargMaskResult;
+
+ _ASSERTE((isZero(args) || pushedSize != 0) || info.ebpFrame);
+ _ASSERTE((args & iargs) == iargs);
+ // Only synchronized methods and generic code that accesses
+ // the type context via "this" need to report "this".
+ // If its reported for other methods, its probably
+ // done incorrectly. So flag such cases.
+ _ASSERTE(info.thisPtrResult == REGI_NA ||
+ pCodeInfo->GetMethodDesc()->IsSynchronized() ||
+ pCodeInfo->GetMethodDesc()->AcquiresInstMethodTableFromThis());
+
+ /* now report registers and arguments if we are not interrupted */
+
+ if (willContinueExecution)
+ {
+
+ /* Propagate unsafed registers only in "current" method */
+ /* If this is not the active method, then the callee wil
+ * trash these registers, and so we wont need to report them */
+
+ if (flags & ActiveStackFrame)
+ {
+ CHK_AND_REPORT_REG(REGI_EAX, regs & RM_EAX, iregs & RM_EAX, Eax);
+ CHK_AND_REPORT_REG(REGI_ECX, regs & RM_ECX, iregs & RM_ECX, Ecx);
+ CHK_AND_REPORT_REG(REGI_EDX, regs & RM_EDX, iregs & RM_EDX, Edx);
+ }
+
+ CHK_AND_REPORT_REG(REGI_EBX, regs & RM_EBX, iregs & RM_EBX, Ebx);
+ CHK_AND_REPORT_REG(REGI_EBP, regs & RM_EBP, iregs & RM_EBP, Ebp);
+ CHK_AND_REPORT_REG(REGI_ESI, regs & RM_ESI, iregs & RM_ESI, Esi);
+ CHK_AND_REPORT_REG(REGI_EDI, regs & RM_EDI, iregs & RM_EDI, Edi);
+ _ASSERTE(!(regs & RM_ESP));
+
+ /* Report any pending pointer arguments */
+
+ DWORD * pPendingArgFirst; // points **AT** first parameter
+ if (!info.ebpFrame)
+ {
+ // -sizeof(void*) because we want to point *AT* first parameter
+ pPendingArgFirst = (DWORD *)(size_t)(ESP + pushedSize - sizeof(void*));
+ }
+ else
+ {
+ _ASSERTE(willContinueExecution);
+
+ if (info.handlers)
+ {
+ // -sizeof(void*) because we want to point *AT* first parameter
+ pPendingArgFirst = (DWORD *)(size_t)(baseSP - sizeof(void*));
+ }
+ else if (info.localloc)
+ {
+ baseSP = *(DWORD *)(size_t)(EBP - GetLocallocSPOffset(&info));
+ // -sizeof(void*) because we want to point *AT* first parameter
+ pPendingArgFirst = (DWORD *)(size_t) (baseSP - sizeof(void*));
+ }
+ else
+ {
+ // Note that 'info.stackSize includes the size for pushing EBP, but EBP is pushed
+ // BEFORE EBP is set from ESP, thus (EBP - info.stackSize) actually points past
+ // the frame by one DWORD, and thus points *AT* the first parameter
+
+ pPendingArgFirst = (DWORD *)(size_t)(EBP - info.stackSize);
+ }
+ }
+
+ if (!isZero(args))
+ {
+ unsigned i = 0;
+ ptrArgTP b(1);
+ for (; !isZero(args) && (i < MAX_PTRARG_OFS); i += 1, b <<= 1)
+ {
+ if (intersect(args,b))
+ {
+ unsigned argAddr = (unsigned)(size_t)(pPendingArgFirst - i);
+ bool iptr = false;
+
+ setDiff(args, b);
+ if (intersect(iargs,b))
+ {
+ setDiff(iargs, b);
+ iptr = true;
+ }
+
+#ifdef _DEBUG
+ if (dspPtr)
+ {
+ printf(" Pushed ptr arg [E");
+ if (info.ebpFrame)
+ printf("BP-%02XH]: ", EBP - argAddr);
+ else
+ printf("SP+%02XH]: ", argAddr - ESP);
+ }
+#endif
+ _ASSERTE(true == GC_CALL_INTERIOR);
+ pCallBack(hCallBack, (OBJECTREF *)(size_t)argAddr, (int)iptr | CHECK_APP_DOMAIN
+ DAC_ARG(DacSlotLocation(info.ebpFrame ? REGI_EBP : REGI_ESP,
+ info.ebpFrame ? EBP - argAddr : argAddr - ESP,
+ true)));
+ }
+ }
+ }
+ }
+ else
+ {
+ // Is "this" enregistered. If so, report it as we might need to
+ // release the monitor for synchronized methods.
+ // Else, it is on the stack and will be reported below.
+
+ if (info.thisPtrResult != REGI_NA)
+ {
+ // Synchronized methods and methods satisfying
+ // MethodDesc::AcquiresInstMethodTableFromThis (i.e. those
+ // where "this" is reported in thisPtrResult) are
+ // not supported on value types.
+ _ASSERTE((regNumToMask(info.thisPtrResult) & info.iregMaskResult)== 0);
+
+ void * thisReg = getCalleeSavedReg(pContext, info.thisPtrResult);
+ pCallBack(hCallBack, (OBJECTREF *)thisReg, CHECK_APP_DOMAIN
+ DAC_ARG(DacSlotLocation(info.thisPtrResult, 0, false)));
+ }
+ }
+ }
+ else /* not interruptible */
+ {
+ pushedSize = scanArgRegTable(skipToArgReg(info, table), curOffs, &info);
+
+ RegMask regMask = info.regMaskResult;
+ RegMask iregMask = info.iregMaskResult;
+ ptrArgTP argMask = info.argMaskResult;
+ ptrArgTP iargMask = info.iargMaskResult;
+ unsigned argHnum = info.argHnumResult;
+ PTR_CBYTE argTab = info.argTabResult;
+
+ // Only synchronized methods and generic code that accesses
+ // the type context via "this" need to report "this".
+ // If its reported for other methods, its probably
+ // done incorrectly. So flag such cases.
+ _ASSERTE(info.thisPtrResult == REGI_NA ||
+ pCodeInfo->GetMethodDesc()->IsSynchronized() ||
+ pCodeInfo->GetMethodDesc()->AcquiresInstMethodTableFromThis());
+
+
+ /* now report registers and arguments if we are not interrupted */
+
+ if (willContinueExecution)
+ {
+
+ /* Report all live pointer registers */
+
+ CHK_AND_REPORT_REG(REGI_EDI, regMask & RM_EDI, iregMask & RM_EDI, Edi);
+ CHK_AND_REPORT_REG(REGI_ESI, regMask & RM_ESI, iregMask & RM_ESI, Esi);
+ CHK_AND_REPORT_REG(REGI_EBX, regMask & RM_EBX, iregMask & RM_EBX, Ebx);
+ CHK_AND_REPORT_REG(REGI_EBP, regMask & RM_EBP, iregMask & RM_EBP, Ebp);
+
+ /* Esp cant be reported */
+ _ASSERTE(!(regMask & RM_ESP));
+ /* No callee-trashed registers */
+ _ASSERTE(!(regMask & RM_CALLEE_TRASHED));
+ /* EBP can't be reported unless we have an EBP-less frame */
+ _ASSERTE(!(regMask & RM_EBP) || !(info.ebpFrame));
+
+ /* Report any pending pointer arguments */
+
+ if (argTab != 0)
+ {
+ unsigned lowBits, stkOffs, argAddr, val;
+
+ // argMask does not fit in 32-bits
+ // thus arguments are reported via a table
+ // Both of these are very rare cases
+
+ do
+ {
+ val = fastDecodeUnsigned(argTab);
+
+ lowBits = val & OFFSET_MASK;
+ stkOffs = val & ~OFFSET_MASK;
+ _ASSERTE((lowBits == 0) || (lowBits == byref_OFFSET_FLAG));
+
+ argAddr = ESP + stkOffs;
+#ifdef _DEBUG
+ if (dspPtr)
+ printf(" Pushed %sptr arg at [ESP+%02XH]",
+ lowBits ? "iptr " : "", stkOffs);
+#endif
+ _ASSERTE(byref_OFFSET_FLAG == GC_CALL_INTERIOR);
+ pCallBack(hCallBack, (OBJECTREF *)(size_t)argAddr, lowBits | CHECK_APP_DOMAIN
+ DAC_ARG(DacSlotLocation(REGI_ESP, stkOffs, true)));
+ }
+ while(--argHnum);
+
+ _ASSERTE(info.argTabResult + info.argTabBytes == argTab);
+ }
+ else
+ {
+ unsigned argAddr = ESP;
+
+ while (!isZero(argMask))
+ {
+ _ASSERTE(argHnum-- > 0);
+
+ if (toUnsigned(argMask) & 1)
+ {
+ bool iptr = false;
+
+ if (toUnsigned(iargMask) & 1)
+ iptr = true;
+#ifdef _DEBUG
+ if (dspPtr)
+ printf(" Pushed ptr arg at [ESP+%02XH]",
+ argAddr - ESP);
+#endif
+ _ASSERTE(true == GC_CALL_INTERIOR);
+ pCallBack(hCallBack, (OBJECTREF *)(size_t)argAddr, (int)iptr | CHECK_APP_DOMAIN
+ DAC_ARG(DacSlotLocation(REGI_ESP, argAddr - ESP, true)));
+ }
+
+ argMask >>= 1;
+ iargMask >>= 1;
+ argAddr += 4;
+ }
+
+ }
+
+ }
+ else
+ {
+ // Is "this" enregistered. If so, report it as we will need to
+ // release the monitor. Else, it is on the stack and will be
+ // reported below.
+
+ // For partially interruptible code, info.thisPtrResult will be
+ // the last known location of "this". So the compiler needs to
+ // generate information which is correct at every point in the code,
+ // not just at call sites.
+
+ if (info.thisPtrResult != REGI_NA)
+ {
+ // Synchronized methods on value types are not supported
+ _ASSERTE((regNumToMask(info.thisPtrResult) & info.iregMaskResult)== 0);
+
+ void * thisReg = getCalleeSavedReg(pContext, info.thisPtrResult);
+ pCallBack(hCallBack, (OBJECTREF *)thisReg, CHECK_APP_DOMAIN
+ DAC_ARG(DacSlotLocation(info.thisPtrResult, 0, false)));
+ }
+ }
+
+ } //info.interruptible
+
+ /* compute the argument base (reference point) */
+
+ unsigned argBase;
+
+ if (info.ebpFrame)
+ argBase = EBP;
+ else
+ argBase = ESP + pushedSize;
+
+#if VERIFY_GC_TABLES
+ _ASSERTE(*castto(table, unsigned short *)++ == 0xBEEF);
+#endif
+
+ unsigned ptrAddr;
+ unsigned lowBits;
+
+
+ /* Process the untracked frame variable table */
+
+ count = info.untrackedCnt;
+ int lastStkOffs = 0;
+ while (count-- > 0)
+ {
+ int stkOffs = fastDecodeSigned(table);
+ stkOffs = lastStkOffs - stkOffs;
+ lastStkOffs = stkOffs;
+
+ _ASSERTE(0 == ~OFFSET_MASK % sizeof(void*));
+
+ lowBits = OFFSET_MASK & stkOffs;
+ stkOffs &= ~OFFSET_MASK;
+
+ ptrAddr = argBase + stkOffs;
+ if (info.doubleAlign && stkOffs >= int(info.stackSize - sizeof(void*))) {
+ // We encode the arguments as if they were ESP based variables even though they aren't
+ // If this frame would have ben an ESP based frame, This fake frame is one DWORD
+ // smaller than the real frame because it did not push EBP but the real frame did.
+ // Thus to get the correct EBP relative offset we have to ajust by info.stackSize-sizeof(void*)
+ ptrAddr = EBP + (stkOffs-(info.stackSize - sizeof(void*)));
+ }
+
+#ifdef _DEBUG
+ if (dspPtr)
+ {
+ printf(" Untracked %s%s local at [E",
+ (lowBits & pinned_OFFSET_FLAG) ? "pinned " : "",
+ (lowBits & byref_OFFSET_FLAG) ? "byref" : "");
+
+ int dspOffs = ptrAddr;
+ char frameType;
+
+ if (info.ebpFrame) {
+ dspOffs -= EBP;
+ frameType = 'B';
+ }
+ else {
+ dspOffs -= ESP;
+ frameType = 'S';
+ }
+
+ if (dspOffs < 0)
+ printf("%cP-%02XH]: ", frameType, -dspOffs);
+ else
+ printf("%cP+%02XH]: ", frameType, +dspOffs);
+ }
+#endif
+
+ _ASSERTE((pinned_OFFSET_FLAG == GC_CALL_PINNED) &&
+ (byref_OFFSET_FLAG == GC_CALL_INTERIOR));
+ pCallBack(hCallBack, (OBJECTREF*)(size_t)ptrAddr, lowBits | CHECK_APP_DOMAIN
+ DAC_ARG(DacSlotLocation(info.ebpFrame ? REGI_EBP : REGI_ESP,
+ info.ebpFrame ? EBP - ptrAddr : ptrAddr - ESP,
+ true)));
+ }
+
+#if VERIFY_GC_TABLES
+ _ASSERTE(*castto(table, unsigned short *)++ == 0xCAFE);
+#endif
+
+ /* Process the frame variable lifetime table */
+ count = info.varPtrTableSize;
+
+ /* If we are not in the active method, we are currently pointing
+ * to the return address; at the return address stack variables
+ * can become dead if the call the last instruction of a try block
+ * and the return address is the jump around the catch block. Therefore
+ * we simply assume an offset inside of call instruction.
+ */
+
+ unsigned newCurOffs;
+
+ if (willContinueExecution)
+ {
+ newCurOffs = (flags & ActiveStackFrame) ? curOffs // after "call"
+ : curOffs-1; // inside "call"
+ }
+ else
+ {
+ /* However if ExecutionAborted, then this must be one of the
+ * ExceptionFrames. Handle accordingly
+ */
+ _ASSERTE(!(flags & AbortingCall) || !(flags & ActiveStackFrame));
+
+ newCurOffs = (flags & AbortingCall) ? curOffs-1 // inside "call"
+ : curOffs; // at faulting instr, or start of "try"
+ }
+
+ ptrOffs = 0;
+
+ while (count-- > 0)
+ {
+ int stkOffs;
+ unsigned begOffs;
+ unsigned endOffs;
+
+ stkOffs = fastDecodeUnsigned(table);
+ begOffs = ptrOffs + fastDecodeUnsigned(table);
+ endOffs = begOffs + fastDecodeUnsigned(table);
+
+ _ASSERTE(0 == ~OFFSET_MASK % sizeof(void*));
+
+ lowBits = OFFSET_MASK & stkOffs;
+ stkOffs &= ~OFFSET_MASK;
+
+ if (info.ebpFrame) {
+ stkOffs = -stkOffs;
+ _ASSERTE(stkOffs < 0);
+ }
+ else {
+ _ASSERTE(stkOffs >= 0);
+ }
+
+ ptrAddr = argBase + stkOffs;
+
+ /* Is this variable live right now? */
+
+ if (newCurOffs >= begOffs)
+ {
+ if (newCurOffs < endOffs)
+ {
+#ifdef _DEBUG
+ if (dspPtr) {
+ printf(" Frame %s%s local at [E",
+ (lowBits & byref_OFFSET_FLAG) ? "byref " : "",
+ (lowBits & this_OFFSET_FLAG) ? "this-ptr" : "");
+
+ int dspOffs = ptrAddr;
+ char frameType;
+
+ if (info.ebpFrame) {
+ dspOffs -= EBP;
+ frameType = 'B';
+ }
+ else {
+ dspOffs -= ESP;
+ frameType = 'S';
+ }
+
+ if (dspOffs < 0)
+ printf("%cP-%02XH]: ", frameType, -dspOffs);
+ else
+ printf("%cP+%02XH]: ", frameType, +dspOffs);
+ }
+#endif
+ _ASSERTE(byref_OFFSET_FLAG == GC_CALL_INTERIOR);
+ pCallBack(hCallBack, (OBJECTREF*)(size_t)ptrAddr, (lowBits & byref_OFFSET_FLAG) | CHECK_APP_DOMAIN
+ DAC_ARG(DacSlotLocation(info.ebpFrame ? REGI_EBP : REGI_ESP,
+ info.ebpFrame ? EBP - ptrAddr : ptrAddr - ESP,
+ true)));
+ }
+ }
+ // exit loop early if start of live range is beyond PC, as ranges are sorted by lower bound
+ else break;
+
+ ptrOffs = begOffs;
+ }
+
+
+#if VERIFY_GC_TABLES
+ _ASSERTE(*castto(table, unsigned short *)++ == 0xBABE);
+#endif
+
+ /* Are we a varargs function, if so we have to report all args
+ except 'this' (note that the GC tables created by the x86 jit
+ do not contain ANY arguments except 'this' (even if they
+ were statically declared */
+
+ if (info.varargs) {
+ LOG((LF_GCINFO, LL_INFO100, "Reporting incoming vararg GC refs\n"));
+
+ PTR_BYTE argsStart;
+
+ if (info.ebpFrame || info.doubleAlign)
+ argsStart = PTR_BYTE((size_t)EBP) + 2* sizeof(void*); // pushed EBP and retAddr
+ else
+ argsStart = PTR_BYTE((size_t)argBase) + info.stackSize + sizeof(void*); // ESP + locals + retAddr
+
+#if defined(_DEBUG) && !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+ // Note that I really want to say hCallBack is a GCCONTEXT, but this is pretty close
+ extern void GcEnumObject(LPVOID pData, OBJECTREF *pObj, DWORD flags);
+ _ASSERTE((void*) GcEnumObject == pCallBack);
+#endif
+ GCCONTEXT *pCtx = (GCCONTEXT *) hCallBack;
+
+ // For varargs, look up the signature using the varArgSig token passed on the stack
+ PTR_VASigCookie varArgSig = *PTR_PTR_VASigCookie(argsStart);
+
+ promoteVarArgs(argsStart, varArgSig, pCtx);
+ }
+
+ return true;
+}
+
+#elif defined(USE_GC_INFO_DECODER) && !defined(CROSSGEN_COMPILE) // !defined(_TARGET_X86_)
+
+
+/*****************************************************************************
+ *
+ * Enumerate all live object references in that function using
+ * the virtual register set.
+ * Returns success of operation.
+ */
+
+bool EECodeManager::EnumGcRefs( PREGDISPLAY pRD,
+ EECodeInfo *pCodeInfo,
+ unsigned flags,
+ GCEnumCallback pCallBack,
+ LPVOID hCallBack)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ unsigned curOffs = pCodeInfo->GetRelOffset();
+
+#ifdef _TARGET_ARM_
+ // On ARM, the low-order bit if an instruction pointer indicates Thumb vs. ARM mode.
+ // Mask this off; all instructions are two-byte aligned.
+ curOffs &= (~THUMB_CODE);
+#endif // _TARGET_ARM_
+
+#ifdef _DEBUG
+ // Get the name of the current method
+ const char * methodName = pCodeInfo->GetMethodDesc()->GetName();
+ LOG((LF_GCINFO, LL_INFO1000, "Reporting GC refs for %s at offset %04x.\n",
+ methodName, curOffs));
+#endif
+
+ PTR_BYTE gcInfoAddr = dac_cast<PTR_BYTE>(pCodeInfo->GetGCInfo());
+
+#if defined(STRESS_HEAP) && defined(PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED)
+#ifdef USE_GC_INFO_DECODER
+ // When we simulate a hijack during gcstress
+ // we start with ActiveStackFrame and the offset
+ // after the call
+ // We need to make it look like a non-leaf frame
+ // so that it's treated like a regular hijack
+ if (flags & ActiveStackFrame)
+ {
+ GcInfoDecoder _gcInfoDecoder(
+ gcInfoAddr,
+ DECODE_INTERRUPTIBILITY,
+ curOffs
+ );
+ if(!_gcInfoDecoder.IsInterruptible())
+ {
+ // This must be the offset after a call
+#ifdef _DEBUG
+ GcInfoDecoder _safePointDecoder(gcInfoAddr, (GcInfoDecoderFlags)0, 0);
+ _ASSERTE(_safePointDecoder.IsSafePoint(curOffs));
+#endif
+ flags &= ~((unsigned)ActiveStackFrame);
+ }
+ }
+#endif // USE_GC_INFO_DECODER
+#endif // STRESS_HEAP && PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
+
+#ifdef _DEBUG
+ if (flags & ActiveStackFrame)
+ {
+ GcInfoDecoder _gcInfoDecoder(
+ gcInfoAddr,
+ DECODE_INTERRUPTIBILITY,
+ curOffs
+ );
+ _ASSERTE(_gcInfoDecoder.IsInterruptible());
+ }
+#endif
+
+#ifdef USE_GC_INFO_DECODER
+ /* If we are not in the active method, we are currently pointing
+ * to the return address; at the return address stack variables
+ * can become dead if the call is the last instruction of a try block
+ * and the return address is the jump around the catch block. Therefore
+ * we simply assume an offset inside of call instruction.
+ * NOTE: The GcInfoDecoder depends on this; if you change it, you must
+ * revisit the GcInfoEncoder/Decoder
+ */
+
+ if (!(flags & ExecutionAborted))
+ {
+ if (!(flags & ActiveStackFrame))
+ curOffs--;
+ }
+ else
+ {
+ /* However if ExecutionAborted, then this must be one of the
+ * ExceptionFrames. Handle accordingly
+ */
+ _ASSERTE(!(flags & AbortingCall) || !(flags & ActiveStackFrame));
+
+ if (flags & AbortingCall)
+ curOffs--;
+ }
+#endif // USE_GC_INFO_DECODER
+
+#if defined(WIN64EXCEPTIONS) // funclets
+ if (pCodeInfo->GetJitManager()->IsFilterFunclet(pCodeInfo))
+ {
+ // Filters are the only funclet that run during the 1st pass, and must have
+ // both the leaf and the parent frame reported. In order to avoid double
+ // reporting of the untracked variables, do not report them for the filter.
+ flags |= NoReportUntracked;
+ }
+#endif // WIN64EXCEPTIONS
+
+ bool reportScratchSlots;
+
+ // We report scratch slots only for leaf frames.
+ // A frame is non-leaf if we are executing a call, or a fault occurred in the function.
+ // The only case in which we need to report scratch slots for a non-leaf frame
+ // is when execution has to be resumed at the point of interruption (via ResumableFrame)
+ //<TODO>Implement ResumableFrame</TODO>
+ _ASSERTE( sizeof( BOOL ) >= sizeof( ActiveStackFrame ) );
+ reportScratchSlots = (flags & ActiveStackFrame) != 0;
+
+
+ GcInfoDecoder gcInfoDecoder(
+ gcInfoAddr,
+ GcInfoDecoderFlags (DECODE_GC_LIFETIMES | DECODE_SECURITY_OBJECT | DECODE_VARARG),
+ curOffs
+ );
+
+ if (!gcInfoDecoder.EnumerateLiveSlots(
+ pRD,
+ reportScratchSlots,
+ flags,
+ pCallBack,
+ hCallBack
+ ))
+ {
+ return false;
+ }
+
+#ifdef WIN64EXCEPTIONS // funclets
+ //
+ // If we're in a funclet, we do not want to report the incoming varargs. This is
+ // taken care of by the parent method and the funclet should access those arguments
+ // by way of the parent method's stack frame.
+ //
+ if(pCodeInfo->IsFunclet())
+ {
+ return true;
+ }
+#endif // WIN64EXCEPTIONS
+
+ if (gcInfoDecoder.GetIsVarArg())
+ {
+ MethodDesc* pMD = pCodeInfo->GetMethodDesc();
+ _ASSERTE(pMD != NULL);
+
+ // This does not apply to x86 because of how it handles varargs (it never
+ // reports the arguments from the explicit method signature).
+ //
+#ifndef _TARGET_X86_
+ //
+ // SPECIAL CASE:
+ // IL marshaling stubs have signatures that are marked as vararg,
+ // but they are callsite sigs that actually contain complete sig
+ // info. There are two reasons for this:
+ // 1) the stub callsites expect the method to be vararg
+ // 2) the marshaling stub must have full sig info so that
+ // it can do a ldarg.N on the arguments it needs to marshal.
+ // The result of this is that the code below will report the
+ // variable arguments twice--once from the va sig cookie and once
+ // from the explicit method signature (in the method's gc info).
+ //
+ // This fix to this is to early out of the va sig cookie reporting
+ // in this special case.
+ //
+ if (pMD->IsILStub())
+ {
+ return true;
+ }
+#endif // !_TARGET_X86_
+
+ LOG((LF_GCINFO, LL_INFO100, "Reporting incoming vararg GC refs\n"));
+
+ // Find the offset of the VASigCookie. It's offsets are relative to
+ // the base of a FramedMethodFrame.
+ int VASigCookieOffset;
+
+ {
+ MetaSig msigFindVASig(pMD);
+ ArgIterator argit(&msigFindVASig);
+ VASigCookieOffset = argit.GetVASigCookieOffset() - TransitionBlock::GetOffsetOfArgs();
+ }
+
+ PTR_BYTE prevSP = dac_cast<PTR_BYTE>(GetCallerSp(pRD));
+
+ _ASSERTE(prevSP + VASigCookieOffset >= dac_cast<PTR_BYTE>(GetSP(pRD->pCurrentContext)));
+
+#if defined(_DEBUG) && !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+ // Note that I really want to say hCallBack is a GCCONTEXT, but this is pretty close
+ extern void GcEnumObject(LPVOID pData, OBJECTREF *pObj, DWORD flags);
+ _ASSERTE((void*) GcEnumObject == pCallBack);
+#endif
+ GCCONTEXT *pCtx = (GCCONTEXT *) hCallBack;
+
+ // For varargs, look up the signature using the varArgSig token passed on the stack
+ PTR_VASigCookie varArgSig = *PTR_PTR_VASigCookie(prevSP + VASigCookieOffset);
+
+ promoteVarArgs(prevSP, varArgSig, pCtx);
+ }
+
+ return true;
+
+}
+
+#else // !defined(_TARGET_X86_) && !(defined(USE_GC_INFO_DECODER) && !defined(CROSSGEN_COMPILE))
+
+bool EECodeManager::EnumGcRefs( PREGDISPLAY pContext,
+ EECodeInfo *pCodeInfo,
+ unsigned flags,
+ GCEnumCallback pCallBack,
+ LPVOID hCallBack)
+{
+ PORTABILITY_ASSERT("EECodeManager::EnumGcRefs is not implemented on this platform.");
+ return false;
+}
+
+#endif // _TARGET_X86_
+
+/*****************************************************************************
+ *
+ * Return the address of the local security object reference
+ * using data that was previously cached before in UnwindStackFrame
+ * using StackwalkCacheUnwindInfo
+ */
+
+OBJECTREF* EECodeManager::GetAddrOfSecurityObjectFromCachedInfo(PREGDISPLAY pRD, StackwalkCacheUnwindInfo * stackwalkCacheUnwindInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+#ifdef _TARGET_X86_
+ size_t securityObjectOffset = stackwalkCacheUnwindInfo->securityObjectOffset;
+ _ASSERTE(securityObjectOffset != 0);
+ // We pretend that filters are ESP-based methods in UnwindEbpDoubleAlignFrame().
+ // Hence we cannot enforce this assert.
+ // _ASSERTE(stackwalkCacheUnwindInfo->fUseEbpAsFrameReg);
+ return (OBJECTREF *) (size_t) (DWORD(*pRD->pEbp) - (securityObjectOffset * sizeof(void*)));
+#else
+ PORTABILITY_ASSERT("EECodeManager::GetAddrOfSecurityObjectFromContext is not implemented on this platform.");
+ return NULL;
+#endif
+}
+
+#ifndef DACCESS_COMPILE
+OBJECTREF* EECodeManager::GetAddrOfSecurityObject(CrawlFrame *pCF)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ REGDISPLAY* pRD = pCF->GetRegisterSet();
+ IJitManager* pJitMan = pCF->GetJitManager();
+ METHODTOKEN methodToken = pCF->GetMethodToken();
+ unsigned relOffset = pCF->GetRelOffset();
+ CodeManState* pState = pCF->GetCodeManState();
+
+ PTR_VOID methodInfoPtr = pJitMan->GetGCInfo(methodToken);
+
+ _ASSERTE(sizeof(CodeManStateBuf) <= sizeof(pState->stateBuf));
+
+#if defined(_TARGET_X86_)
+ CodeManStateBuf * stateBuf = (CodeManStateBuf*)pState->stateBuf;
+
+ /* Extract the necessary information from the info block header */
+ stateBuf->hdrInfoSize = (DWORD)crackMethodInfoHdr(methodInfoPtr, // <TODO>truncation</TODO>
+ relOffset,
+ &stateBuf->hdrInfoBody);
+
+ pState->dwIsSet = 1;
+ if (stateBuf->hdrInfoBody.securityCheck)
+ {
+ _ASSERTE(stateBuf->hdrInfoBody.ebpFrame);
+ if(stateBuf->hdrInfoBody.prologOffs == hdrInfo::NOT_IN_PROLOG &&
+ stateBuf->hdrInfoBody.epilogOffs == hdrInfo::NOT_IN_EPILOG)
+ {
+ return (OBJECTREF *)(size_t)(((DWORD)*pRD->pEbp) - GetSecurityObjectOffset(&stateBuf->hdrInfoBody));
+ }
+ }
+#elif defined(USE_GC_INFO_DECODER) && !defined(CROSSGEN_COMPILE)
+
+ BYTE* gcInfoAddr = (BYTE*) methodInfoPtr;
+
+ GcInfoDecoder gcInfoDecoder(
+ gcInfoAddr,
+ DECODE_SECURITY_OBJECT,
+ 0
+ );
+
+ INT32 spOffset = gcInfoDecoder.GetSecurityObjectStackSlot();
+ if( spOffset != NO_SECURITY_OBJECT )
+ {
+ UINT_PTR uCallerSP = GetCallerSp(pRD);
+
+ if (pCF->IsFunclet())
+ {
+ if (!pCF->IsFilterFunclet())
+ {
+ // Cannot retrieve the security object for a non-filter funclet.
+ return NULL;
+ }
+
+ DWORD dwParentOffset = 0;
+ UINT_PTR uParentCallerSP = 0;
+
+ // If this is a filter funclet, retrieve the information of the parent method
+ // and use that to find the security object.
+ ExceptionTracker::FindParentStackFrameEx(pCF, &dwParentOffset, &uParentCallerSP);
+
+ relOffset = dwParentOffset;
+ uCallerSP = uParentCallerSP;
+ }
+
+ // Security object is always live anyplace we can throw or take a GC
+ OBJECTREF* pSlot = (OBJECTREF*) (spOffset + uCallerSP);
+ return pSlot;
+ }
+#else // !_TARGET_X86_ && !(USE_GC_INFO_DECODER && !CROSSGEN_COMPILE)
+ PORTABILITY_ASSERT("EECodeManager::GetAddrOfSecurityObject is not implemented on this platform.");
+#endif
+
+ return NULL;
+}
+#endif
+
+/*****************************************************************************
+ *
+ * Returns "this" pointer if it is a non-static method
+ * AND the object is still alive.
+ * Returns NULL in all other cases.
+ * Unfortunately, the semantics of this call currently depend on the architecture.
+ * On non-x86 architectures, where we use GcInfo{En,De}Coder, this returns NULL for
+ * all cases except the case where the GenericsContext is determined via "this." On x86,
+ * it will definitely return a non-NULL value in that case, and for synchronized methods;
+ * it may also return a non-NULL value for other cases, depending on how the method is compiled.
+ */
+OBJECTREF EECodeManager::GetInstance( PREGDISPLAY pContext,
+ EECodeInfo* pCodeInfo)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+#ifdef _TARGET_X86_
+ PTR_VOID methodInfoPtr = pCodeInfo->GetGCInfo();
+ unsigned relOffset = pCodeInfo->GetRelOffset();
+
+ PTR_CBYTE table = PTR_CBYTE(methodInfoPtr);
+ hdrInfo info;
+ unsigned stackDepth;
+ TADDR taArgBase;
+ unsigned count;
+
+ /* Extract the necessary information from the info block header */
+
+ table += crackMethodInfoHdr(methodInfoPtr,
+ relOffset,
+ &info);
+
+ // We do not have accurate information in the prolog or the epilog
+ if (info.prologOffs != hdrInfo::NOT_IN_PROLOG ||
+ info.epilogOffs != hdrInfo::NOT_IN_EPILOG)
+ {
+ return NULL;
+ }
+
+ if (info.interruptible)
+ {
+ stackDepth = scanArgRegTableI(skipToArgReg(info, table), (unsigned)relOffset, &info);
+ }
+ else
+ {
+ stackDepth = scanArgRegTable (skipToArgReg(info, table), (unsigned)relOffset, &info);
+ }
+
+ if (info.ebpFrame)
+ {
+ _ASSERTE(stackDepth == 0);
+ taArgBase = *pContext->pEbp;
+ }
+ else
+ {
+ taArgBase = pContext->Esp + stackDepth;
+ }
+
+ // Only synchronized methods and generic code that accesses
+ // the type context via "this" need to report "this".
+ // If it's reported for other methods, it's probably
+ // done incorrectly. So flag such cases.
+ _ASSERTE(info.thisPtrResult == REGI_NA ||
+ pCodeInfo->GetMethodDesc()->IsSynchronized() ||
+ pCodeInfo->GetMethodDesc()->AcquiresInstMethodTableFromThis());
+
+ if (info.thisPtrResult != REGI_NA)
+ {
+ // the register contains the Object pointer.
+ TADDR uRegValue = *(reinterpret_cast<TADDR *>(getCalleeSavedReg(pContext, info.thisPtrResult)));
+ return ObjectToOBJECTREF(PTR_Object(uRegValue));
+ }
+
+#if VERIFY_GC_TABLES
+ _ASSERTE(*castto(table, unsigned short *)++ == 0xBEEF);
+#endif
+
+ /* Parse the untracked frame variable table */
+
+ /* The 'this' pointer can never be located in the untracked table */
+ /* as we only allow pinned and byrefs in the untracked table */
+
+ count = info.untrackedCnt;
+ while (count-- > 0)
+ {
+ fastSkipSigned(table);
+ }
+
+ /* Look for the 'this' pointer in the frame variable lifetime table */
+
+ count = info.varPtrTableSize;
+ unsigned tmpOffs = 0;
+ while (count-- > 0)
+ {
+ unsigned varOfs = fastDecodeUnsigned(table);
+ unsigned begOfs = tmpOffs + fastDecodeUnsigned(table);
+ unsigned endOfs = begOfs + fastDecodeUnsigned(table);
+ _ASSERTE(!info.ebpFrame || (varOfs!=0));
+ /* Is this variable live right now? */
+ if (((unsigned)relOffset >= begOfs) && ((unsigned)relOffset < endOfs))
+ {
+ /* Does it contain the 'this' pointer */
+ if (varOfs & this_OFFSET_FLAG)
+ {
+ unsigned ofs = varOfs & ~OFFSET_MASK;
+
+ /* Tracked locals for EBP frames are always at negative offsets */
+
+ if (info.ebpFrame)
+ taArgBase -= ofs;
+ else
+ taArgBase += ofs;
+
+ return (OBJECTREF)(size_t)(*PTR_DWORD(taArgBase));
+ }
+ }
+ tmpOffs = begOfs;
+ }
+
+#if VERIFY_GC_TABLES
+ _ASSERTE(*castto(table, unsigned short *) == 0xBABE);
+#endif
+
+ return NULL;
+#elif defined(USE_GC_INFO_DECODER) && !defined(CROSSGEN_COMPILE)
+ PTR_VOID token = EECodeManager::GetExactGenericsToken(pContext, pCodeInfo);
+
+ OBJECTREF oRef = ObjectToOBJECTREF(PTR_Object(dac_cast<TADDR>(token)));
+ VALIDATEOBJECTREF(oRef);
+ return oRef;
+#else // !_TARGET_X86_ && !(USE_GC_INFO_DECODER && !CROSSGEN_COMPILE)
+ PORTABILITY_ASSERT("Port: EECodeManager::GetInstance is not implemented on this platform.");
+ return NULL;
+#endif // _TARGET_X86_
+}
+
+
+GenericParamContextType EECodeManager::GetParamContextType(PREGDISPLAY pContext,
+ EECodeInfo * pCodeInfo)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+#ifdef _TARGET_X86_
+ /* Extract the necessary information from the info block header */
+ PTR_VOID methodInfoPtr = pCodeInfo->GetGCInfo();
+ unsigned relOffset = pCodeInfo->GetRelOffset();
+
+ hdrInfo info;
+ PTR_CBYTE table = PTR_CBYTE(methodInfoPtr);
+ table += crackMethodInfoHdr(methodInfoPtr,
+ relOffset,
+ &info);
+
+ if (!info.genericsContext ||
+ info.prologOffs != hdrInfo::NOT_IN_PROLOG ||
+ info.epilogOffs != hdrInfo::NOT_IN_EPILOG)
+ {
+ return GENERIC_PARAM_CONTEXT_NONE;
+ }
+ else if (info.genericsContextIsMethodDesc)
+ {
+ return GENERIC_PARAM_CONTEXT_METHODDESC;
+ }
+ else
+ {
+ return GENERIC_PARAM_CONTEXT_METHODTABLE;
+ }
+ // On x86 the generic param context parameter is never this.
+#elif defined(USE_GC_INFO_DECODER)
+ PTR_VOID methodInfoPtr = pCodeInfo->GetGCInfo();
+ PTR_CBYTE gcInfoAddr = PTR_CBYTE(methodInfoPtr);
+
+ GcInfoDecoder gcInfoDecoder(
+ gcInfoAddr,
+ GcInfoDecoderFlags (DECODE_GENERICS_INST_CONTEXT),
+ 0
+ );
+
+ INT32 spOffsetGenericsContext = gcInfoDecoder.GetGenericsInstContextStackSlot();
+ if (spOffsetGenericsContext != NO_GENERICS_INST_CONTEXT)
+ {
+ if (gcInfoDecoder.HasMethodDescGenericsInstContext())
+ {
+ return GENERIC_PARAM_CONTEXT_METHODDESC;
+ }
+ else if (gcInfoDecoder.HasMethodTableGenericsInstContext())
+ {
+ return GENERIC_PARAM_CONTEXT_METHODTABLE;
+ }
+ return GENERIC_PARAM_CONTEXT_THIS;
+ }
+ return GENERIC_PARAM_CONTEXT_NONE;
+#else // !_TARGET_X86_ && !USE_GC_INFO_DECODER
+ PORTABILITY_ASSERT("Port: EECodeManager::GetParamContextType is not implemented on this platform.");
+ return GENERIC_PARAM_CONTEXT_NONE;
+#endif // _TARGET_X86_
+}
+
+/*****************************************************************************
+ *
+ * Returns the extra argument passed to to shared generic code if it is still alive.
+ * Returns NULL in all other cases.
+ */
+PTR_VOID EECodeManager::GetParamTypeArg(PREGDISPLAY pContext,
+ EECodeInfo * pCodeInfo)
+
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+#ifdef _TARGET_X86_
+ PTR_VOID methodInfoPtr = pCodeInfo->GetGCInfo();
+ unsigned relOffset = pCodeInfo->GetRelOffset();
+
+ /* Extract the necessary information from the info block header */
+ hdrInfo info;
+ PTR_CBYTE table = PTR_CBYTE(methodInfoPtr);
+ table += crackMethodInfoHdr(methodInfoPtr,
+ relOffset,
+ &info);
+
+ if (!info.genericsContext ||
+ info.prologOffs != hdrInfo::NOT_IN_PROLOG ||
+ info.epilogOffs != hdrInfo::NOT_IN_EPILOG)
+ {
+ return NULL;
+ }
+
+ TADDR fp = GetRegdisplayFP(pContext);
+ TADDR taParamTypeArg = *PTR_TADDR(fp - GetParamTypeArgOffset(&info));
+ return PTR_VOID(taParamTypeArg);
+
+#elif defined(USE_GC_INFO_DECODER) && !defined(CROSSGEN_COMPILE)
+ return EECodeManager::GetExactGenericsToken(pContext, pCodeInfo);
+
+#else // !_TARGET_X86_ && !(USE_GC_INFO_DECODER && !CROSSGEN_COMPILE)
+ PORTABILITY_ASSERT("Port: EECodeManager::GetInstance is not implemented on this platform.");
+ return NULL;
+#endif // _TARGET_X86_
+}
+
+#if defined(WIN64EXCEPTIONS) && !defined(CROSSGEN_COMPILE)
+/*
+ Returns the generics token. This is used by GetInstance() and GetParamTypeArg() on WIN64.
+*/
+//static
+PTR_VOID EECodeManager::GetExactGenericsToken(PREGDISPLAY pContext,
+ EECodeInfo * pCodeInfo)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ return EECodeManager::GetExactGenericsToken(GetCallerSp(pContext), pCodeInfo);
+}
+
+//static
+PTR_VOID EECodeManager::GetExactGenericsToken(SIZE_T baseStackSlot,
+ EECodeInfo * pCodeInfo)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ PTR_VOID methodInfoPtr = pCodeInfo->GetGCInfo();
+ PTR_CBYTE gcInfoAddr = PTR_CBYTE(methodInfoPtr);
+
+ GcInfoDecoder gcInfoDecoder(
+ gcInfoAddr,
+ GcInfoDecoderFlags (DECODE_PSP_SYM | DECODE_GENERICS_INST_CONTEXT),
+ 0
+ );
+
+ INT32 spOffsetGenericsContext = gcInfoDecoder.GetGenericsInstContextStackSlot();
+ if (spOffsetGenericsContext != NO_GENERICS_INST_CONTEXT)
+ {
+
+ TADDR taSlot;
+ if (pCodeInfo->IsFunclet())
+ {
+ INT32 spOffsetPSPSym = gcInfoDecoder.GetPSPSymStackSlot();
+ _ASSERTE(spOffsetPSPSym != NO_PSP_SYM);
+
+#ifdef _TARGET_AMD64_
+ // On AMD64 the spOffsetPSPSym is relative to the "Initial SP": the stack
+ // pointer at the end of the prolog before and dynamic allocations, so it
+ // can be the same for funclets and the main function.
+ // However, we have a caller SP, so we need to convert
+ baseStackSlot -= pCodeInfo->GetFixedStackSize();
+
+#endif // _TARGET_AMD64_
+
+ // For funclets we have to do an extra dereference to get the PSPSym first.
+ TADDR newBaseStackSlot = *PTR_TADDR(baseStackSlot + spOffsetPSPSym);
+
+#ifdef _TARGET_AMD64_
+ // On AMD64 the PSPSym stores the "Initial SP": the stack pointer at the end of
+ // prolog, before any dynamic allocations.
+ // However, the GenericsContext offset is relative to the caller SP for all
+ // platforms. So here we adjust to convert AMD64's initial sp to a caller SP.
+ // But we have to be careful to use the main function's EECodeInfo, not the
+ // funclet's EECodeInfo because they have different stack sizes!
+ newBaseStackSlot += pCodeInfo->GetMainFunctionInfo().GetFixedStackSize();
+#endif // _TARGET_AMD64_
+
+ taSlot = (TADDR)( spOffsetGenericsContext + newBaseStackSlot );
+ }
+ else
+ {
+ taSlot = (TADDR)( spOffsetGenericsContext + baseStackSlot );
+ }
+ TADDR taExactGenericsToken = *PTR_TADDR(taSlot);
+ return PTR_VOID(taExactGenericsToken);
+ }
+ return NULL;
+}
+
+
+#endif // WIN64EXCEPTIONS / CROSSGEN_COMPILE
+
+/*****************************************************************************/
+
+void * EECodeManager::GetGSCookieAddr(PREGDISPLAY pContext,
+ EECodeInfo * pCodeInfo,
+ CodeManState * pState)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ _ASSERTE(sizeof(CodeManStateBuf) <= sizeof(pState->stateBuf));
+
+ PTR_VOID methodInfoPtr = pCodeInfo->GetGCInfo();
+ unsigned relOffset = pCodeInfo->GetRelOffset();
+
+#if defined(_TARGET_X86_)
+ CodeManStateBuf * stateBuf = (CodeManStateBuf*)pState->stateBuf;
+
+ /* Extract the necessary information from the info block header */
+ hdrInfo * info = &stateBuf->hdrInfoBody;
+ stateBuf->hdrInfoSize = (DWORD)crackMethodInfoHdr(methodInfoPtr, // <TODO>truncation</TODO>
+ relOffset,
+ info);
+
+ pState->dwIsSet = 1;
+
+ if (info->prologOffs != hdrInfo::NOT_IN_PROLOG ||
+ info->epilogOffs != hdrInfo::NOT_IN_EPILOG ||
+ info->gsCookieOffset == INVALID_GS_COOKIE_OFFSET)
+ {
+ return NULL;
+ }
+
+ if (info->ebpFrame)
+ {
+ return PVOID(SIZE_T((DWORD(*pContext->pEbp) - info->gsCookieOffset)));
+ }
+ else
+ {
+ PTR_CBYTE table = PTR_CBYTE(methodInfoPtr) + stateBuf->hdrInfoSize;
+ unsigned argSize = GetPushedArgSize(info, table, relOffset);
+
+ return PVOID(SIZE_T(pContext->Esp + argSize + info->gsCookieOffset));
+ }
+
+#elif defined(USE_GC_INFO_DECODER) && !defined(CROSSGEN_COMPILE)
+ PTR_CBYTE gcInfoAddr = PTR_CBYTE(methodInfoPtr);
+
+ if (pCodeInfo->IsFunclet())
+ {
+ return NULL;
+ }
+
+ GcInfoDecoder gcInfoDecoder(
+ gcInfoAddr,
+ DECODE_GS_COOKIE,
+ 0
+ );
+
+ INT32 spOffsetGSCookie = gcInfoDecoder.GetGSCookieStackSlot();
+ if (spOffsetGSCookie != NO_GS_COOKIE)
+ {
+ if(relOffset >= gcInfoDecoder.GetGSCookieValidRangeStart()
+ && relOffset < gcInfoDecoder.GetGSCookieValidRangeEnd())
+ {
+ SIZE_T baseStackSlot = GetCallerSp(pContext);
+ return (LPVOID)( spOffsetGSCookie + baseStackSlot );
+ }
+ }
+ return NULL;
+
+#else
+ PORTABILITY_WARNING("EECodeManager::GetGSCookieAddr is not implemented on this platform.");
+ return NULL;
+#endif
+}
+
+/*****************************************************************************
+ *
+ * Returns true if the given IP is in the given method's prolog or epilog.
+ */
+bool EECodeManager::IsInPrologOrEpilog(DWORD relPCoffset,
+ PTR_VOID methodInfoPtr,
+ size_t* prologSize)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+#ifndef USE_GC_INFO_DECODER
+ hdrInfo info;
+
+ crackMethodInfoHdr(methodInfoPtr, relPCoffset, &info);
+
+ if (prologSize)
+ *prologSize = info.prologSize;
+
+ return ((info.prologOffs != hdrInfo::NOT_IN_PROLOG) ||
+ (info.epilogOffs != hdrInfo::NOT_IN_EPILOG));
+#else // USE_GC_INFO_DECODER
+ _ASSERTE(!"@NYI - EECodeManager::IsInPrologOrEpilog (EETwain.cpp)");
+ return false;
+#endif // USE_GC_INFO_DECODER
+}
+
+/*****************************************************************************
+ *
+ * Returns true if the given IP is in the synchronized region of the method (valid for synchronized functions only)
+*/
+bool EECodeManager::IsInSynchronizedRegion(
+ DWORD relOffset,
+ PTR_VOID methodInfoPtr,
+ unsigned flags)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+#ifndef USE_GC_INFO_DECODER
+ hdrInfo info;
+
+ crackMethodInfoHdr(methodInfoPtr, relOffset, &info);
+
+ // We should be called only for synchronized methods
+ _ASSERTE(info.syncStartOffset != INVALID_SYNC_OFFSET && info.syncEndOffset != INVALID_SYNC_OFFSET);
+
+ _ASSERTE(info.syncStartOffset < info.syncEndOffset);
+ _ASSERTE(info.epilogCnt <= 1);
+ _ASSERTE(info.epilogCnt == 0 || info.syncEndOffset <= info.syncEpilogStart);
+
+ return (info.syncStartOffset < relOffset && relOffset < info.syncEndOffset) ||
+ (info.syncStartOffset == relOffset && (flags & (ActiveStackFrame|ExecutionAborted))) ||
+ // Synchronized methods have at most one epilog. The epilog does not have to be at the end of the method though.
+ // Everything after the epilog is also in synchronized region.
+ (info.epilogCnt != 0 && info.syncEpilogStart + info.epilogSize <= relOffset);
+#else // USE_GC_INFO_DECODER
+ _ASSERTE(!"@NYI - EECodeManager::IsInSynchronizedRegion (EETwain.cpp)");
+ return false;
+#endif // USE_GC_INFO_DECODER
+}
+
+/*****************************************************************************
+ *
+ * Returns the size of a given function.
+ */
+size_t EECodeManager::GetFunctionSize(PTR_VOID methodInfoPtr)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+#if defined(_TARGET_X86_)
+ hdrInfo info;
+
+ crackMethodInfoHdr(methodInfoPtr, 0, &info);
+
+ return info.methodSize;
+#elif defined(USE_GC_INFO_DECODER)
+
+ PTR_BYTE gcInfoAddr = PTR_BYTE(methodInfoPtr);
+
+ GcInfoDecoder gcInfoDecoder(
+ gcInfoAddr,
+ DECODE_CODE_LENGTH,
+ 0
+ );
+
+ UINT32 codeLength = gcInfoDecoder.GetCodeLength();
+ _ASSERTE( codeLength > 0 );
+ return codeLength;
+
+#else // !_TARGET_X86_ && !USE_GC_INFO_DECODER
+ PORTABILITY_ASSERT("EECodeManager::GetFunctionSize is not implemented on this platform.");
+ return 0;
+#endif
+
+
+}
+
+/*****************************************************************************
+ *
+ * Returns the size of the frame of the given function.
+ */
+unsigned int EECodeManager::GetFrameSize(PTR_VOID methodInfoPtr)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+#ifndef USE_GC_INFO_DECODER
+ hdrInfo info;
+
+ crackMethodInfoHdr(methodInfoPtr, 0, &info);
+
+ // currently only used by E&C callers need to know about doubleAlign
+ // in all likelyhood
+ _ASSERTE(!info.doubleAlign);
+ return info.stackSize;
+#else // USE_GC_INFO_DECODER
+ PORTABILITY_ASSERT("EECodeManager::GetFrameSize is not implemented on this platform.");
+ return false;
+#endif // USE_GC_INFO_DECODER
+}
+
+#ifndef DACCESS_COMPILE
+
+/*****************************************************************************/
+
+const BYTE* EECodeManager::GetFinallyReturnAddr(PREGDISPLAY pReg)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef _TARGET_X86_
+ return *(const BYTE**)(size_t)(GetRegdisplaySP(pReg));
+#else
+ PORTABILITY_ASSERT("EECodeManager::GetFinallyReturnAddr is not implemented on this platform.");
+ return NULL;
+#endif
+}
+
+BOOL EECodeManager::IsInFilter(void *methodInfoPtr,
+ unsigned offset,
+ PCONTEXT pCtx,
+ DWORD curNestLevel)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+#ifdef _TARGET_X86_
+
+ /* Extract the necessary information from the info block header */
+
+ hdrInfo info;
+
+ crackMethodInfoHdr(methodInfoPtr,
+ offset,
+ &info);
+
+ /* make sure that we have an ebp stack frame */
+
+ _ASSERTE(info.ebpFrame);
+ _ASSERTE(info.handlers); // <TODO> This will alway be set. Remove it</TODO>
+
+ TADDR baseSP;
+ DWORD nestingLevel;
+
+ FrameType frameType = GetHandlerFrameInfo(&info, pCtx->Ebp,
+ pCtx->Esp, (DWORD) IGNORE_VAL,
+ &baseSP, &nestingLevel);
+ _ASSERTE(frameType != FR_INVALID);
+
+// _ASSERTE(nestingLevel == curNestLevel);
+
+ return frameType == FR_FILTER;
+
+#else
+ PORTABILITY_ASSERT("EECodeManager::IsInFilter is not implemented on this platform.");
+ return FALSE;
+#endif
+}
+
+
+BOOL EECodeManager::LeaveFinally(void *methodInfoPtr,
+ unsigned offset,
+ PCONTEXT pCtx)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+#ifdef _TARGET_X86_
+
+ hdrInfo info;
+
+ crackMethodInfoHdr(methodInfoPtr,
+ offset,
+ &info);
+
+ DWORD nestingLevel;
+ GetHandlerFrameInfo(&info, pCtx->Ebp, pCtx->Esp, (DWORD) IGNORE_VAL, NULL, &nestingLevel);
+
+ // Compute an index into the stack-based table of esp values from
+ // each level of catch block.
+ PTR_TADDR pBaseSPslots = GetFirstBaseSPslotPtr(pCtx->Ebp, &info);
+ PTR_TADDR pPrevSlot = pBaseSPslots - (nestingLevel - 1);
+
+ /* Currently, LeaveFinally() is not used if the finally is invoked in the
+ second pass for unwinding. So we expect the finally to be called locally */
+ _ASSERTE(*pPrevSlot == LCL_FINALLY_MARK);
+
+ *pPrevSlot = 0; // Zero out the previous shadow ESP
+
+ pCtx->Esp += sizeof(TADDR); // Pop the return value off the stack
+ return TRUE;
+#else
+ PORTABILITY_ASSERT("EEJitManager::LeaveFinally is not implemented on this platform.");
+ return FALSE;
+#endif
+}
+
+void EECodeManager::LeaveCatch(void *methodInfoPtr,
+ unsigned offset,
+ PCONTEXT pCtx)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+#ifdef _TARGET_X86_
+
+#ifdef _DEBUG
+ TADDR baseSP;
+ DWORD nestingLevel;
+ bool hasInnerFilter;
+ hdrInfo info;
+
+ crackMethodInfoHdr(methodInfoPtr, offset, &info);
+ GetHandlerFrameInfo(&info, pCtx->Ebp, pCtx->Esp, (DWORD) IGNORE_VAL,
+ &baseSP, &nestingLevel, &hasInnerFilter);
+// _ASSERTE(frameType == FR_HANDLER);
+// _ASSERTE(pCtx->Esp == baseSP);
+#endif
+
+ return;
+
+#else // !_TARGET_X86_
+ PORTABILITY_ASSERT("EECodeManager::LeaveCatch is not implemented on this platform.");
+ return;
+#endif // _TARGET_X86_
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+
+void EECodeManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ DAC_ENUM_VTHIS();
+}
+
+#endif // #ifdef DACCESS_COMPILE
+
+
+/*
+ * GetAmbientSP
+ *
+ * This function computes the zero-depth stack pointer for the given nesting
+ * level within the method given. Nesting level is the the depth within
+ * try-catch-finally blocks, and is zero based. It is up to the caller to
+ * supply a valid nesting level value.
+ *
+ */
+
+TADDR EECodeManager::GetAmbientSP(PREGDISPLAY pContext,
+ EECodeInfo *pCodeInfo,
+ DWORD dwRelOffset,
+ DWORD nestingLevel,
+ CodeManState *pState)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+#ifdef _TARGET_X86_
+ PTR_VOID methodInfoPtr = pCodeInfo->GetGCInfo();
+
+ _ASSERTE(sizeof(CodeManStateBuf) <= sizeof(pState->stateBuf));
+ CodeManStateBuf * stateBuf = (CodeManStateBuf*)pState->stateBuf;
+ PTR_CBYTE table = PTR_CBYTE(methodInfoPtr);
+
+ /* Extract the necessary information from the info block header */
+
+ stateBuf->hdrInfoSize = (DWORD)crackMethodInfoHdr(methodInfoPtr,
+ dwRelOffset,
+ &stateBuf->hdrInfoBody);
+ table += stateBuf->hdrInfoSize;
+
+ pState->dwIsSet = 1;
+
+#if defined(_DEBUG) && !defined(DACCESS_COMPILE)
+ if (trFixContext)
+ {
+ printf("GetAmbientSP [%s][%s] for %s.%s: ",
+ stateBuf->hdrInfoBody.ebpFrame?"ebp":" ",
+ stateBuf->hdrInfoBody.interruptible?"int":" ",
+ "UnknownClass","UnknownMethod");
+ fflush(stdout);
+ }
+#endif // _DEBUG && !DACCESS_COMPILE
+
+ if ((stateBuf->hdrInfoBody.prologOffs != hdrInfo::NOT_IN_PROLOG) ||
+ (stateBuf->hdrInfoBody.epilogOffs != hdrInfo::NOT_IN_EPILOG))
+ {
+ return NULL;
+ }
+
+ /* make sure that we have an ebp stack frame */
+
+ if (stateBuf->hdrInfoBody.handlers)
+ {
+ _ASSERTE(stateBuf->hdrInfoBody.ebpFrame);
+
+ TADDR baseSP;
+ GetHandlerFrameInfo(&stateBuf->hdrInfoBody,
+ GetRegdisplayFP(pContext),
+ (DWORD) IGNORE_VAL,
+ nestingLevel,
+ &baseSP);
+
+ _ASSERTE((GetRegdisplayFP(pContext) >= baseSP) && (baseSP >= GetRegdisplaySP(pContext)));
+
+ return baseSP;
+ }
+
+ _ASSERTE(nestingLevel == 0);
+
+ if (stateBuf->hdrInfoBody.ebpFrame)
+ {
+ return GetOutermostBaseFP(GetRegdisplayFP(pContext), &stateBuf->hdrInfoBody);
+ }
+
+ TADDR baseSP = GetRegdisplaySP(pContext);
+ if (stateBuf->hdrInfoBody.interruptible)
+ {
+ baseSP += scanArgRegTableI(skipToArgReg(stateBuf->hdrInfoBody, table),
+ dwRelOffset,
+ &stateBuf->hdrInfoBody);
+ }
+ else
+ {
+ baseSP += scanArgRegTable(skipToArgReg(stateBuf->hdrInfoBody, table),
+ dwRelOffset,
+ &stateBuf->hdrInfoBody);
+ }
+
+ return baseSP;
+
+#else // !_TARGET_X86_
+ PORTABILITY_ASSERT("EECodeManager::GetAmbientSP is not implemented on this platform.");
+ return 0;
+#endif // _TARGET_X86_
+}
+
+/*
+ Get the number of bytes used for stack parameters.
+ This is currently only used on x86.
+ */
+
+// virtual
+ULONG32 EECodeManager::GetStackParameterSize(EECodeInfo * pCodeInfo)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+#if defined(_TARGET_X86_)
+ PTR_VOID methodInfoPtr = pCodeInfo->GetGCInfo();
+ unsigned dwOffset = pCodeInfo->GetRelOffset();
+
+ CodeManState state;
+ state.dwIsSet = 0;
+
+ _ASSERTE(sizeof(CodeManStateBuf) <= sizeof(state.stateBuf));
+ CodeManStateBuf * pStateBuf = reinterpret_cast<CodeManStateBuf *>(state.stateBuf);
+
+ hdrInfo * pHdrInfo = &(pStateBuf->hdrInfoBody);
+ pStateBuf->hdrInfoSize = (DWORD)crackMethodInfoHdr(methodInfoPtr, dwOffset, pHdrInfo);
+
+ // We need to subtract 4 here because ESPIncrOnReturn() includes the stack slot containing the return
+ // address.
+ return (ULONG32)(ESPIncrOnReturn(pHdrInfo) - 4);
+
+#else
+ return 0;
+
+#endif // _TARGET_X86_
+}
+
diff --git a/src/vm/encee.cpp b/src/vm/encee.cpp
new file mode 100644
index 0000000000..6dfdfbc57a
--- /dev/null
+++ b/src/vm/encee.cpp
@@ -0,0 +1,1752 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: EnC.CPP
+//
+
+//
+// Handles EditAndContinue support in the EE
+// ===========================================================================
+
+
+#include "common.h"
+#include "dbginterface.h"
+#include "dllimport.h"
+#include "eeconfig.h"
+#include "excep.h"
+#include "stackwalk.h"
+
+#ifdef EnC_SUPPORTED
+
+// can't get this on the helper thread at runtime in ResolveField, so make it static and get when add a field.
+#ifdef _DEBUG
+static int g_BreakOnEnCResolveField = -1;
+#endif
+
+#ifndef DACCESS_COMPILE
+
+
+// Module initialization occurs in two phases: the constructor phase and the Initialize phase.
+//
+// The constructor phase initializes just enough so that Destruct() can be safely called.
+// It cannot throw or fail.
+//
+EditAndContinueModule::EditAndContinueModule(Assembly *pAssembly, mdToken moduleRef, PEFile *file)
+ : Module(pAssembly, moduleRef, file)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ LOG((LF_ENC,LL_INFO100,"EACM::ctor 0x%x\n", this));
+
+ m_applyChangesCount = CorDB_DEFAULT_ENC_FUNCTION_VERSION;
+}
+
+// Module initialization occurs in two phases: the constructor phase and the Initialize phase.
+//
+// The Initialize() phase completes the initialization after the constructor has run.
+// It can throw exceptions but whether it throws or succeeds, it must leave the Module
+// in a state where Destruct() can be safely called.
+//
+/*virtual*/
+void EditAndContinueModule::Initialize(AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ LOG((LF_ENC,LL_INFO100,"EACM::Initialize 0x%x\n", this));
+ Module::Initialize(pamTracker);
+}
+
+// Called when the module is being destroyed (eg. AD unload time)
+void EditAndContinueModule::Destruct()
+{
+ LIMITED_METHOD_CONTRACT;
+ LOG((LF_ENC,LL_EVERYTHING,"EACM::Destruct 0x%x\n", this));
+
+ // Call the superclass's Destruct method...
+ Module::Destruct();
+}
+
+//---------------------------------------------------------------------------------------
+//
+// ApplyEditAndContinue - updates this module for an EnC
+//
+// Arguments:
+// cbDeltaMD - number of bytes pointed to by pDeltaMD
+// pDeltaMD - pointer to buffer holding the delta metadata
+// cbDeltaIL - number of bytes pointed to by pDeltaIL
+// pDeltaIL - pointer to buffer holding the delta IL
+//
+// Return Value:
+// S_OK on success.
+// if the edit fails for any reason, at any point in this function,
+// we are toasted, so return out and IDE will end debug session.
+//
+
+HRESULT EditAndContinueModule::ApplyEditAndContinue(
+ DWORD cbDeltaMD,
+ BYTE *pDeltaMD,
+ DWORD cbDeltaIL,
+ BYTE *pDeltaIL)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // Update the module's EnC version number
+ ++m_applyChangesCount;
+
+ LOG((LF_ENC, LL_INFO100, "EACM::AEAC:\n"));
+
+#ifdef _DEBUG
+ // Debugging hook to optionally break when this method is called
+ static BOOL shouldBreak = -1;
+ if (shouldBreak == -1)
+ shouldBreak = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_EncApplyChanges);
+ if (shouldBreak > 0) {
+ _ASSERTE(!"EncApplyChanges");
+ }
+
+ // Debugging hook to dump out all edits to dmeta and dil files
+ static BOOL dumpChanges = -1;
+
+ if (dumpChanges == -1)
+
+ dumpChanges = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_EncDumpApplyChanges);
+
+ if (dumpChanges> 0) {
+ SString fn;
+ int ec;
+ fn.Printf(W("ApplyChanges.%d.dmeta"), m_applyChangesCount);
+ FILE *fp;
+ ec = _wfopen_s(&fp, fn.GetUnicode(), W("wb"));
+ _ASSERTE(SUCCEEDED(ec));
+ fwrite(pDeltaMD, 1, cbDeltaMD, fp);
+ fclose(fp);
+ fn.Printf(W("ApplyChanges.%d.dil"), m_applyChangesCount);
+ ec = _wfopen_s(&fp, fn.GetUnicode(), W("wb"));
+ _ASSERTE(SUCCEEDED(ec));
+ fwrite(pDeltaIL, 1, cbDeltaIL, fp);
+ fclose(fp);
+ }
+#endif
+
+ HRESULT hr = S_OK;
+ HENUMInternal enumENC;
+
+ CONTRACT_VIOLATION(GCViolation); // SafeComHolder goes to preemptive mode, which will trigger a GC
+ SafeComHolder<IMDInternalImportENC> pIMDInternalImportENC;
+ SafeComHolder<IMetaDataEmit> pEmitter;
+
+ // Apply the changes. Note that ApplyEditAndContinue() requires read/write metadata. If the metadata is
+ // not already RW, then ApplyEditAndContinue() will perform the conversion, invalidate the current
+ // metadata importer, and return us a new one. We can't let that happen. Other parts of the system are
+ // already using the current metadata importer, some possibly in preemptive GC mode at this very moment.
+ // Instead, we ensure that the metadata is RW by calling ConvertMDInternalToReadWrite(), which will make
+ // a new importer if necessary and ensure that new accesses to the metadata use that while still managing
+ // the lifetime of the old importer. Therefore, we can be sure that ApplyEditAndContinue() won't need to
+ // make a new importer.
+
+ // Ensure the metadata is RW.
+ EX_TRY
+ {
+ GetFile()->ConvertMetadataToRWForEnC();
+ }
+ EX_CATCH_HRESULT(hr);
+
+ IfFailGo(hr);
+
+ // Grab the current importer.
+ IMDInternalImport *pMDImport = GetMDImport();
+ IMDInternalImport *pNewMDImport;
+
+ // Apply the EnC delta to this module's metadata.
+ IfFailGo(pMDImport->ApplyEditAndContinue(pDeltaMD, cbDeltaMD, &pNewMDImport));
+
+ // The importer should not have changed! We assert that, and back-stop in a retail build just to be sure.
+ if (pNewMDImport != pMDImport)
+ {
+ _ASSERTE( !"ApplyEditAndContinue should not have needed to create a new metadata importer!" );
+ IfFailGo(CORDBG_E_ENC_INTERNAL_ERROR);
+ }
+
+ // get the delta interface
+ IfFailGo(pMDImport->QueryInterface(IID_IMDInternalImportENC, (void **)&pIMDInternalImportENC));
+
+ // get an emitter interface
+ IfFailGo(GetMetaDataPublicInterfaceFromInternal(pMDImport, IID_IMetaDataEmit, (void **)&pEmitter));
+
+ // Copy the deltaIL into our RVAable IL memory
+ BYTE *pLocalILMemory = new BYTE[cbDeltaIL];
+ memcpy(pLocalILMemory, pDeltaIL, cbDeltaIL);
+
+ // Enumerate all of the EnC delta tokens
+ memset(&enumENC, 0, sizeof(HENUMInternal));
+ IfFailGo(pIMDInternalImportENC->EnumDeltaTokensInit(&enumENC));
+
+ mdToken token;
+ FieldDesc * pField = NULL;
+ while (pIMDInternalImportENC->EnumNext(&enumENC, &token))
+ {
+ STRESS_LOG3(LF_ENC, LL_INFO100, "EACM::AEAC: updated token 0x%x; type 0x%x; rid 0x%x\n", token, TypeFromToken(token), RidFromToken(token));
+
+ switch (TypeFromToken(token))
+ {
+ case mdtMethodDef:
+
+ // MethodDef token - update/add a method
+ LOG((LF_ENC, LL_INFO10000, "EACM::AEAC: Found method 0x%x\n", token));
+
+ ULONG dwMethodRVA;
+ DWORD dwMethodFlags;
+ IfFailGo(pMDImport->GetMethodImplProps(token, &dwMethodRVA, &dwMethodFlags));
+
+ if (dwMethodRVA >= cbDeltaIL)
+ {
+ LOG((LF_ENC, LL_INFO10000, "EACM::AEAC: failure RVA of %d with cbDeltaIl %d\n", dwMethodRVA, cbDeltaIL));
+ IfFailGo(E_INVALIDARG);
+ }
+
+ SetDynamicIL(token, (TADDR)(pLocalILMemory + dwMethodRVA), FALSE);
+
+ // use module to resolve to method
+ MethodDesc *pMethod;
+ pMethod = LookupMethodDef(token);
+ if (pMethod)
+ {
+ // Method exists already - update it
+ IfFailGo(UpdateMethod(pMethod));
+ }
+ else
+ {
+ // This is a new method token - create a new method
+ IfFailGo(AddMethod(token));
+ }
+
+ break;
+
+ case mdtFieldDef:
+
+ // FieldDef token - add a new field
+ LOG((LF_ENC, LL_INFO10000, "EACM::AEAC: Found field 0x%x\n", token));
+
+ pField = LookupFieldDef(token);
+ if (pField)
+ {
+ // Field already exists - just ignore for now
+ continue;
+ }
+
+ // Field is new - add it
+ IfFailGo(AddField(token));
+ break;
+
+ case mdtTypeRef:
+ EnsureTypeRefCanBeStored(token);
+ break;
+
+ case mdtAssemblyRef:
+ EnsureAssemblyRefCanBeStored(token);
+ break;
+ }
+ }
+
+ErrExit:
+ if (pIMDInternalImportENC)
+ pIMDInternalImportENC->EnumClose(&enumENC);
+
+ return hr;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// UpdateMethod - called when a method has been updated by EnC.
+//
+// The module's metadata has already been updated. Here we notify the
+// debugger of the update, and swap the new IL in as the current
+// version of the method.
+//
+// Arguments:
+// pMethod - the method being updated
+//
+// Return Value:
+// S_OK on success.
+// if the edit fails for any reason, at any point in this function,
+// we are toasted, so return out and IDE will end debug session.
+//
+// Assumptions:
+// The CLR must be suspended for debugging.
+//
+HRESULT EditAndContinueModule::UpdateMethod(MethodDesc *pMethod)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // Notify the debugger of the update
+ HRESULT hr = g_pDebugInterface->UpdateFunction(pMethod, m_applyChangesCount);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ // Notify the JIT that we've got new IL for this method
+ // This will ensure that all new calls to the method will go to the new version.
+ // The runtime does this by never backpatching the methodtable slots in EnC-enabled modules.
+ LOG((LF_ENC, LL_INFO100000, "EACM::UM: Updating function %s to version %d\n", pMethod->m_pszDebugMethodName, m_applyChangesCount));
+
+ // Reset any flags relevant to the old code
+ //
+ // Note that this only works since we've very carefullly made sure that _all_ references
+ // to the Method's code must be to the call/jmp blob immediately in front of the
+ // MethodDesc itself. See MethodDesc::IsEnCMethod()
+ //
+ pMethod->Reset();
+
+ return S_OK;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// AddMethod - called when a new method is added by EnC.
+//
+// The module's metadata has already been updated. Here we notify the
+// debugger of the update, and create and add a new MethodDesc to the class.
+//
+// Arguments:
+// token - methodDef token for the method being added
+//
+// Return Value:
+// S_OK on success.
+// if the edit fails for any reason, at any point in this function,
+// we are toasted, so return out and IDE will end debug session.
+//
+// Assumptions:
+// The CLR must be suspended for debugging.
+//
+HRESULT EditAndContinueModule::AddMethod(mdMethodDef token)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ mdTypeDef parentTypeDef;
+ HRESULT hr = GetMDImport()->GetParentToken(token, &parentTypeDef);
+ if (FAILED(hr))
+ {
+ LOG((LF_ENC, LL_INFO100, "**Error** EnCModule::AM can't find parent token for method token %p\n", token));
+ return E_FAIL;
+ }
+
+ // see if the class is loaded yet.
+ MethodTable * pParentType = LookupTypeDef(parentTypeDef).AsMethodTable();
+ if (pParentType == NULL)
+ {
+ // Class isn't loaded yet, don't have to modify any existing EE data structures beyond the metadata.
+ // Just notify debugger and return.
+ LOG((LF_ENC, LL_INFO100, "EnCModule::AM class %p not loaded, our work is done\n", parentTypeDef));
+ hr = g_pDebugInterface->UpdateNotYetLoadedFunction(token, this, m_applyChangesCount);
+ return hr;
+ }
+
+ // Add the method to the runtime's Class data structures
+ LOG((LF_ENC, LL_INFO100000, "EACM::AM: Adding function %p\n", token));
+ MethodDesc *pMethod = NULL;
+ hr = EEClass::AddMethod(pParentType, token, 0, &pMethod);
+
+ if (FAILED(hr))
+ {
+ _ASSERTE(!"Failed to add function");
+ LOG((LF_ENC, LL_INFO100000, "**Error** EACM::AM: Failed to add function %p with hr 0x%x\n", token));
+ return hr;
+ }
+
+ // Tell the debugger about the new method so it get's the version number properly
+ hr = g_pDebugInterface->AddFunction(pMethod, m_applyChangesCount);
+ if (FAILED(hr))
+ {
+ _ASSERTE(!"Failed to add function");
+ LOG((LF_ENC, LL_INFO100000, "**Error** EACM::AF: Failed to add method %p to debugger with hr 0x%x\n", token));
+ }
+
+ return hr;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// AddField - called when a new field is added by EnC.
+//
+// The module's metadata has already been updated. Here we notify the
+// debugger of the update,
+//
+// Arguments:
+// token - fieldDef for the field being added
+//
+// Return Value:
+// S_OK on success.
+// if the edit fails for any reason, at any point in this function,
+// we are toasted, so return out and IDE will end debug session.
+//
+// Assumptions:
+// The CLR must be suspended for debugging.
+//
+HRESULT EditAndContinueModule::AddField(mdFieldDef token)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ mdTypeDef parentTypeDef;
+ HRESULT hr = GetMDImport()->GetParentToken(token, &parentTypeDef);
+
+ if (FAILED(hr))
+ {
+ LOG((LF_ENC, LL_INFO100, "**Error** EnCModule::AF can't find parent token for field token %p\n", token));
+ return E_FAIL;
+ }
+
+ // see if the class is loaded yet. If not we don't need to do anything. When this class is
+ // loaded (with the updated metadata), it will have this field like any other normal field.
+ // If the class hasn't been loaded, than the debugger shouldn't know anything about it
+ // so there shouldn't be any harm in not notifying it of the update. For completeness,
+ // we may want to consider changing this to notify the debugger here as well.
+ MethodTable * pParentType = LookupTypeDef(parentTypeDef).AsMethodTable();
+ if (pParentType == NULL)
+ {
+ LOG((LF_ENC, LL_INFO100, "EnCModule::AF class %p not loaded, our work is done\n", parentTypeDef));
+ return S_OK;
+ }
+
+ // Create a new EnCFieldDesc for the field and add it to the class
+ LOG((LF_ENC, LL_INFO100000, "EACM::AM: Adding field %p\n", token));
+ EnCFieldDesc *pField;
+ hr = EEClass::AddField(pParentType, token, &pField);
+
+ if (FAILED(hr))
+ {
+ LOG((LF_ENC, LL_INFO100000, "**Error** EACM::AF: Failed to add field %p to EE with hr 0x%x\n", token));
+ return hr;
+ }
+
+ // Tell the debugger about the new field
+ hr = g_pDebugInterface->AddField(pField, m_applyChangesCount);
+ if (FAILED(hr))
+ {
+ LOG((LF_ENC, LL_INFO100000, "**Error** EACM::AF: Failed to add field %p to debugger with hr 0x%x\n", token));
+ }
+
+#ifdef _DEBUG
+ if (g_BreakOnEnCResolveField == -1)
+ {
+ g_BreakOnEnCResolveField = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_EnCResolveField);
+ }
+#endif
+
+ return hr;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// JitUpdatedFunction - Jit the new version of a function for EnC.
+//
+// Arguments:
+// pMD - the MethodDesc for the method we want to JIT
+// pOrigContext - context of thread pointing into original version of the function
+//
+// Return value:
+// Return the address of the newly jitted code or NULL on failure.
+//
+PCODE EditAndContinueModule::JitUpdatedFunction( MethodDesc *pMD,
+ CONTEXT *pOrigContext)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_ENC, LL_INFO100, "EnCModule::JitUpdatedFunction for %s\n",
+ pMD->m_pszDebugMethodName));
+
+ PCODE jittedCode = NULL;
+
+ GCX_COOP();
+
+#ifdef _DEBUG
+ BOOL shouldBreak = CLRConfig::GetConfigValue(
+ CLRConfig::INTERNAL_EncJitUpdatedFunction);
+ if (shouldBreak > 0) {
+ _ASSERTE(!"EncJitUpdatedFunction");
+ }
+#endif
+
+ // Setup a frame so that has context for the exception
+ // so that gc can crawl the stack and do the right thing.
+ _ASSERTE(pOrigContext);
+ Thread *pCurThread = GetThread();
+ _ASSERTE(pCurThread);
+ FrameWithCookie<ResumableFrame> resFrame(pOrigContext);
+ resFrame.Push(pCurThread);
+
+ CONTEXT *pCtxTemp = NULL;
+ // We need to zero out the filter context so a multi-threaded GC doesn't result
+ // in somebody else tracing this thread & concluding that we're in JITted code.
+ // We need to remove the filter context so that if we're in preemptive GC
+ // mode, we'll either have the filter context, or the ResumableFrame,
+ // but not both, set.
+ // Since we're in cooperative mode here, we can swap the two non-atomically here.
+ pCtxTemp = pCurThread->GetFilterContext();
+ _ASSERTE(pCtxTemp != NULL); // currently called from within a filter context, protects us during GC-toggle.
+ pCurThread->SetFilterContext(NULL);
+
+ // get the code address (may jit the fcn if not already jitted)
+ EX_TRY {
+ if (!pMD->IsPointingToNativeCode())
+ {
+ GCX_PREEMP();
+ pMD->DoPrestub(NULL);
+ LOG((LF_ENC, LL_INFO100, "EnCModule::ResumeInUpdatedFunction JIT successful\n"));
+ }
+ else
+ {
+ LOG((LF_ENC, LL_INFO100, "EnCModule::ResumeInUpdatedFunction function already JITed\n"));
+ }
+ jittedCode = pMD->GetNativeCode();
+ } EX_CATCH {
+#ifdef _DEBUG
+ {
+ // This is debug-only code to print out the error string, but SString can throw.
+ // This function is no-throw, and we can't put an EX_TRY inside an EX_CATCH block, so
+ // we just have the violation.
+ CONTRACT_VIOLATION(ThrowsViolation);
+
+ StackSString exceptionMessage;
+ SString errorMessage;
+ GetExceptionMessage(GET_THROWABLE(), exceptionMessage);
+ errorMessage.AppendASCII("**Error: Probable rude edit.**\n\n"
+ "EnCModule::JITUpdatedFunction JIT failed with the following exception:\n\n");
+ errorMessage.Append(exceptionMessage);
+ StackScratchBuffer buffer;
+ DbgAssertDialog(__FILE__, __LINE__, errorMessage.GetANSI(buffer));
+ LOG((LF_ENC, LL_INFO100, errorMessage.GetANSI(buffer)));
+ }
+#endif
+ } EX_END_CATCH(SwallowAllExceptions)
+
+ resFrame.Pop(pCurThread);
+
+ // Restore the filter context here (see comment above)
+ pCurThread->SetFilterContext(pCtxTemp);
+
+ return jittedCode;
+}
+
+
+//-----------------------------------------------------------------------------
+// Called by EnC to resume the code in a new version of the function.
+// This will:
+// 1) jit the new function
+// 2) set the IP to newILOffset within that new function
+// 3) adjust local variables (particularly enregistered vars) to the new func.
+// It will not return.
+//
+// Params:
+// pMD - method desc for method being updated. This is not enc-version aware.
+// oldDebuggerFuncHandle - Debugger DJI to uniquely identify old function.
+// This is enc-version aware.
+// newILOffset - the IL offset to resume execution at within the new function.
+// pOrigContext - context of thread pointing into original version of the function.
+//
+// This function must be called on the thread that's executing the old function.
+// This function does not return. Instead, it will remap this thread directly
+// to be executing the new function.
+//-----------------------------------------------------------------------------
+HRESULT EditAndContinueModule::ResumeInUpdatedFunction(
+ MethodDesc *pMD,
+ void *oldDebuggerFuncHandle,
+ SIZE_T newILOffset,
+ CONTEXT *pOrigContext)
+{
+ LOG((LF_ENC, LL_INFO100, "EnCModule::ResumeInUpdatedFunction for %s at IL offset 0x%x, ",
+ pMD->m_pszDebugMethodName, newILOffset));
+
+#ifdef _DEBUG
+ BOOL shouldBreak = CLRConfig::GetConfigValue(
+ CLRConfig::INTERNAL_EncResumeInUpdatedFunction);
+ if (shouldBreak > 0) {
+ _ASSERTE(!"EncResumeInUpdatedFunction");
+ }
+#endif
+
+ HRESULT hr = E_FAIL;
+
+ // JIT-compile the updated version of the method
+ PCODE jittedCode = JitUpdatedFunction(pMD, pOrigContext);
+ if ( jittedCode == NULL )
+ return CORDBG_E_ENC_JIT_CANT_UPDATE;
+
+ GCX_COOP();
+
+ // This will create a new frame and copy old vars to it
+ // need pointer to old & new code, old & new info
+
+ EECodeInfo oldCodeInfo(GetIP(pOrigContext));
+ _ASSERTE(oldCodeInfo.GetMethodDesc() == pMD);
+
+ // Get the new native offset & IP from the new IL offset
+ LOG((LF_ENC, LL_INFO10000, "EACM::RIUF: About to map IL forwards!\n"));
+ SIZE_T newNativeOffset = 0;
+ g_pDebugInterface->MapILInfoToCurrentNative(pMD,
+ newILOffset,
+ jittedCode,
+ &newNativeOffset);
+
+ EECodeInfo newCodeInfo(jittedCode + newNativeOffset);
+ _ASSERTE(newCodeInfo.GetMethodDesc() == pMD);
+
+ _ASSERTE(newCodeInfo.GetRelOffset() == newNativeOffset);
+
+ _ASSERTE(oldCodeInfo.GetCodeManager() == newCodeInfo.GetCodeManager());
+
+ DWORD oldFrameSize = oldCodeInfo.GetFixedStackSize();
+ DWORD newFrameSize = newCodeInfo.GetFixedStackSize();
+
+ // FixContextAndResume() will replace the old stack frame of the function with the new
+ // one and will initialize that new frame to null. Anything on the stack where that new
+ // frame sits will be wiped out. This could include anything on the stack right up to or beyond our
+ // current stack from in ResumeInUpdatedFunction. In order to prevent our current frame from being
+ // trashed we determine the maximum amount that the stack could grow by and allocate this as a buffer using
+ // alloca. Then we call FixContextAndResume which can safely rely on the stack because none of it's frames
+ // state or anything lower can be reached by the new frame.
+
+ if( newFrameSize > oldFrameSize)
+ {
+ DWORD frameIncrement = newFrameSize - oldFrameSize;
+ (void)alloca(frameIncrement);
+ }
+
+ // Ask the EECodeManager to actually fill in the context and stack for the new frame so that
+ // values of locals etc. are preserved.
+ LOG((LF_ENC, LL_INFO100, "EnCModule::ResumeInUpdatedFunction calling FixContextAndResume oldNativeOffset: 0x%x, newNativeOffset: 0x%x,"
+ "oldFrameSize: 0x%x, newFrameSize: 0x%x\n",
+ oldCodeInfo.GetRelOffset(), newCodeInfo.GetRelOffset(), oldFrameSize, newFrameSize));
+
+ FixContextAndResume(pMD,
+ oldDebuggerFuncHandle,
+ pOrigContext,
+ &oldCodeInfo,
+ &newCodeInfo);
+
+ // At this point we shouldn't have failed, so this is genuinely erroneous.
+ LOG((LF_ENC, LL_ERROR, "**Error** EnCModule::ResumeInUpdatedFunction returned from ResumeAtJit"));
+ _ASSERTE(!"Should not return from FixContextAndResume()");
+
+ hr = E_FAIL;
+
+ // If we fail for any reason we have already potentially trashed with new locals and we have also unwound any
+ // Win32 handlers on the stack so cannot ever return from this function.
+ EEPOLICY_HANDLE_FATAL_ERROR(CORDBG_E_ENC_INTERNAL_ERROR);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// FixContextAndResume - Modify the thread context for EnC remap and resume execution
+//
+// Arguments:
+// pMD - MethodDesc for the method being remapped
+// oldDebuggerFuncHandle - Debugger DJI to uniquely identify old function.
+// pContext - the thread's original CONTEXT when the remap opportunity was hit
+// pOldCodeInfo - collection of various information about the current frame state
+// pNewCodeInfo - information about how we want the frame state to be after the remap
+//
+// Return Value:
+// Doesn't return
+//
+// Notes:
+// WARNING: This method cannot access any stack-data below its frame on the stack
+// (i.e. anything allocated in a caller frame), so all stack-based arguments must
+// EXPLICITLY be copied by value and this method cannot be inlined. We may need to expand
+// the stack frame to accomodate the new method, and so extra buffer space must have
+// been allocated on the stack. Note that passing a struct by value (via C++) is not
+// enough to ensure its data is really copied (on x64, large structs may internally be
+// passed by reference). Thus we explicitly make copies of structs passed in, at the
+// beginning.
+//
+
+NOINLINE void EditAndContinueModule::FixContextAndResume(
+ MethodDesc *pMD,
+ void *oldDebuggerFuncHandle,
+ T_CONTEXT *pContext,
+ EECodeInfo *pOldCodeInfo,
+ EECodeInfo *pNewCodeInfo)
+{
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_GC_TRIGGERS; // Sends IPC event
+ STATIC_CONTRACT_THROWS;
+
+ // Create local copies of all structs passed as arguments to prevent them from being overwritten
+ CONTEXT context;
+ memcpy(&context, pContext, sizeof(CONTEXT));
+ pContext = &context;
+
+#if defined(_TARGET_AMD64_)
+ // Since we made a copy of the incoming CONTEXT in context, clear any new flags we
+ // don't understand (like XSAVE), since we'll eventually be passing a CONTEXT based
+ // on this copy to RtlRestoreContext, and this copy doesn't have the extra info
+ // required by the XSAVE or other flags.
+ //
+ // FUTURE: No reason to ifdef this for amd64-only, except to make this late fix as
+ // surgical as possible. Would be nice to enable this on x86 early in the next cycle.
+ pContext->ContextFlags &= CONTEXT_ALL;
+#endif // defined(_TARGET_AMD64_)
+
+ EECodeInfo oldCodeInfo;
+ memcpy(&oldCodeInfo, pOldCodeInfo, sizeof(EECodeInfo));
+ pOldCodeInfo = &oldCodeInfo;
+
+ EECodeInfo newCodeInfo;
+ memcpy(&newCodeInfo, pNewCodeInfo, sizeof(EECodeInfo));
+ pNewCodeInfo = &newCodeInfo;
+
+ const ICorDebugInfo::NativeVarInfo *pOldVarInfo = NULL;
+ const ICorDebugInfo::NativeVarInfo *pNewVarInfo = NULL;
+ SIZE_T oldVarInfoCount = 0;
+ SIZE_T newVarInfoCount = 0;
+
+ // Get the var info which the codemanager will use for updating
+ // enregistered variables correctly, or variables whose lifetimes differ
+ // at the update point
+ g_pDebugInterface->GetVarInfo(pMD, oldDebuggerFuncHandle, &oldVarInfoCount, &pOldVarInfo);
+ g_pDebugInterface->GetVarInfo(pMD, NULL, &newVarInfoCount, &pNewVarInfo);
+
+#ifdef _TARGET_X86_
+ // save the frame pointer as FixContextForEnC might step on it.
+ LPVOID oldSP = dac_cast<PTR_VOID>(GetSP(pContext));
+
+ // need to pop the SEH records before write over the stack in FixContextForEnC
+ PopSEHRecords(oldSP);
+#endif
+
+ // Ask the EECodeManager to actually fill in the context and stack for the new frame so that
+ // values of locals etc. are preserved.
+ HRESULT hr = pNewCodeInfo->GetCodeManager()->FixContextForEnC(
+ pContext,
+ pOldCodeInfo,
+ pOldVarInfo, oldVarInfoCount,
+ pNewCodeInfo,
+ pNewVarInfo, newVarInfoCount);
+
+ // If FixContextForEnC succeeded, the stack is potentially trashed with any new locals and we have also unwound
+ // any Win32 handlers on the stack so cannot ever return from this function. If FixContextForEnC failed, can't
+ // assume that the stack is still intact so apply the proper policy for a fatal EE error to bring us down
+ // "gracefully" (it's all relative).
+ if (FAILED(hr))
+ {
+ LOG((LF_ENC, LL_INFO100, "**Error** EnCModule::ResumeInUpdatedFunction for FixContextForEnC failed\n"));
+ EEPOLICY_HANDLE_FATAL_ERROR(hr);
+ }
+
+ // Set the new IP
+ // Note that all we're really doing here is setting the IP register. We unfortunately don't
+ // share any code with the implementation of debugger SetIP, despite the similarities.
+ LOG((LF_ENC, LL_INFO100, "EnCModule::ResumeInUpdatedFunction: Resume at EIP=0x%x\n", pNewCodeInfo->GetCodeAddress()));
+
+ Thread *pCurThread = GetThread();
+ _ASSERTE(pCurThread);
+
+ pCurThread->SetFilterContext(pContext);
+ SetIP(pContext, pNewCodeInfo->GetCodeAddress());
+
+ // Notify the debugger that we're about to resume execution in the new version of the method
+ HRESULT hrIgnore = g_pDebugInterface->RemapComplete(pMD, pNewCodeInfo->GetCodeAddress(), pNewCodeInfo->GetRelOffset());
+
+ // Now jump into the new version of the method. Note that we can't just setup the filter context
+ // and return because we are potentially writing new vars onto the stack.
+ pCurThread->SetFilterContext( NULL );
+
+#if defined(_TARGET_X86_)
+ ResumeAtJit(pContext, oldSP);
+#else
+ RtlRestoreContext(pContext, NULL);
+#endif
+
+ // At this point we shouldn't have failed, so this is genuinely erroneous.
+ LOG((LF_ENC, LL_ERROR, "**Error** EnCModule::ResumeInUpdatedFunction returned from ResumeAtJit"));
+ _ASSERTE(!"Should not return from ResumeAtJit()");
+}
+#endif // #ifndef DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+// ResolveField - get a pointer to the value of a field that was added by EnC
+//
+// Arguments:
+// thisPointer - For instance fields, a pointer to the object instance of interest.
+// For static fields this is unused and should be NULL.
+// pFD - FieldDesc describing the field we're interested in
+// fAllocateNew - If storage doesn't yet exist for this field and fAllocateNew is true
+// then we will attempt to allocate the storage (throwing an exception
+// if it fails). Otherwise, if fAllocateNew is false, then we will just
+// return NULL when the storage is not yet available.
+//
+// Return Value:
+// If storage doesn't yet exist for this field we return NULL, otherwise, we return a pointer
+// to the contents of the field on success.
+//---------------------------------------------------------------------------------------
+PTR_CBYTE EditAndContinueModule::ResolveField(OBJECTREF thisPointer,
+ EnCFieldDesc * pFD)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ if (g_BreakOnEnCResolveField == 1)
+ {
+ _ASSERTE( !"EditAndContinueModule::ResolveField");
+ }
+#endif
+
+ // If it's static, we stash in the EnCFieldDesc
+ if (pFD->IsStatic())
+ {
+ _ASSERTE( thisPointer == NULL );
+ EnCAddedStaticField *pAddedStatic = pFD->GetStaticFieldData();
+ if (!pAddedStatic)
+ {
+ return NULL;
+ }
+
+ _ASSERTE( pAddedStatic->m_pFieldDesc == pFD );
+ return PTR_CBYTE(pAddedStatic->GetFieldData());
+ }
+
+ // not static so get it out of the syncblock
+ SyncBlock * pBlock = NULL;
+
+ // Get the SyncBlock, failing if not available
+ pBlock = thisPointer->PassiveGetSyncBlock();
+ if( pBlock == NULL )
+ {
+ return NULL;
+ }
+
+ EnCSyncBlockInfo * pEnCInfo = NULL;
+
+ // Attempt to get the EnC information from the sync block
+ pEnCInfo = pBlock->GetEnCInfo();
+
+ if (!pEnCInfo)
+ {
+ // No EnC info on this object yet, fail since we don't want to allocate it
+ return NULL;
+ }
+
+ // Lookup the actual field value from the EnCSyncBlockInfo
+ return pEnCInfo->ResolveField(thisPointer, pFD);
+} // EditAndContinueModule::ResolveField
+
+#ifndef DACCESS_COMPILE
+//---------------------------------------------------------------------------------------
+// ResolveOrAllocateField - get a pointer to the value of a field that was added by EnC,
+// allocating storage for it if necessary
+//
+// Arguments:
+// thisPointer - For instance fields, a pointer to the object instance of interest.
+// For static fields this is unused and should be NULL.
+// pFD - FieldDesc describing the field we're interested in
+// Return Value:
+// Returns a pointer to the contents of the field on success. This should only fail due
+// to out-of-memory and will therefore throw an OOM exception.
+//---------------------------------------------------------------------------------------
+PTR_CBYTE EditAndContinueModule::ResolveOrAllocateField(OBJECTREF thisPointer,
+ EnCFieldDesc * pFD)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ // first try getting a pre-existing field
+ PTR_CBYTE fieldAddr = ResolveField(thisPointer, pFD);
+ if (fieldAddr != NULL)
+ {
+ return fieldAddr;
+ }
+
+ // we didn't find the field already allocated
+ if (pFD->IsStatic())
+ {
+ _ASSERTE(thisPointer == NULL);
+ EnCAddedStaticField * pAddedStatic = pFD->GetOrAllocateStaticFieldData();
+ _ASSERTE(pAddedStatic->m_pFieldDesc == pFD);
+ return PTR_CBYTE(pAddedStatic->GetFieldData());
+ }
+
+ // not static so get it out of the syncblock
+ SyncBlock* pBlock = NULL;
+
+ // Get the SyncBlock, creating it if necessary
+ pBlock = thisPointer->GetSyncBlock();
+
+ EnCSyncBlockInfo * pEnCInfo = NULL;
+
+ // Attempt to get the EnC information from the sync block
+ pEnCInfo = pBlock->GetEnCInfo();
+
+ if (!pEnCInfo)
+ {
+ // Attach new EnC field info to this object.
+ pEnCInfo = new EnCSyncBlockInfo;
+ if (!pEnCInfo)
+ {
+ COMPlusThrowOM();
+ }
+ pBlock->SetEnCInfo(pEnCInfo);
+ }
+
+ // Lookup the actual field value from the EnCSyncBlockInfo
+ return pEnCInfo->ResolveOrAllocateField(thisPointer, pFD);
+} // EditAndContinueModule::ResolveOrAllocateField
+
+#endif // !DACCESS_COMPILE
+
+//-----------------------------------------------------------------------------
+// Get or optionally create an EnCEEClassData object for the specified
+// EEClass in this module.
+//
+// Arguments:
+// pClass - the EEClass of interest
+// getOnly - if false (the default), we'll create a new entry of none exists yet
+//
+// Note: If called in a DAC build, GetOnly must be TRUE
+//
+PTR_EnCEEClassData EditAndContinueModule::GetEnCEEClassData(MethodTable * pMT, BOOL getOnly /*=FALSE*/ )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+#ifdef DACCESS_COMPILE
+ _ASSERTE(getOnly == TRUE);
+#endif // DACCESS_COMPILE
+
+ DPTR(PTR_EnCEEClassData) ppData = m_ClassList.Table();
+ DPTR(PTR_EnCEEClassData) ppLast = ppData + m_ClassList.Count();
+
+ // Look for an existing entry for the specified class
+ while (ppData < ppLast)
+ {
+ PREFIX_ASSUME(ppLast != NULL);
+ if ((*ppData)->GetMethodTable() == pMT)
+ return *ppData;
+ ++ppData;
+ }
+
+ // No match found. Return now if we don't want to create a new entry
+ if (getOnly)
+ {
+ return NULL;
+ }
+
+#ifndef DACCESS_COMPILE
+ // Create a new entry and add it to the end our our table
+ EnCEEClassData *pNewData = (EnCEEClassData*)(void*)pMT->GetLoaderAllocator()->GetLowFrequencyHeap()->AllocMem_NoThrow(S_SIZE_T(sizeof(EnCEEClassData)));
+ pNewData->Init(pMT);
+ ppData = m_ClassList.Append();
+ if (!ppData)
+ return NULL;
+ *ppData = pNewData;
+ return pNewData;
+#else
+ DacNotImpl();
+ return NULL;
+#endif
+}
+
+// Computes the address of this field within the object "o"
+void *EnCFieldDesc::GetAddress( void *o)
+{
+#ifndef DACCESS_COMPILE
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ // can't throw through FieldDesc::GetInstanceField if FORBIDGC_LOADER_USE_ENABLED
+ _ASSERTE(! FORBIDGC_LOADER_USE_ENABLED());
+
+ EditAndContinueModule *pModule = (EditAndContinueModule*)GetModule();
+ _ASSERTE(pModule->IsEditAndContinueEnabled());
+
+ // EnC added fields aren't just at some static offset in the object like normal fields
+ // are. Get the EditAndContinueModule to compute the address for us.
+ return (void *)pModule->ResolveOrAllocateField(ObjectToOBJECTREF((Object *)o), this);
+#else
+ DacNotImpl();
+ return NULL;
+#endif
+}
+
+#ifndef DACCESS_COMPILE
+
+// Do simple field initialization
+// We do this when the process is suspended for debugging (in a GC_NOTRIGGER).
+// Full initialization will be done in Fixup when the process is running.
+void EnCFieldDesc::Init(mdFieldDef token, BOOL fIsStatic)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // Clear out the FieldDesc incase someone attempts to use any of the fields
+ memset( this, 0, sizeof(EnCFieldDesc) );
+
+ // Initialize our members
+ m_pStaticFieldData = NULL;
+ m_bNeedsFixup = TRUE;
+
+ // Initialize the bare minimum of FieldDesc necessary for now
+ if (fIsStatic)
+ FieldDesc::m_isStatic = TRUE;
+
+ SetMemberDef(token);
+
+ SetEnCNew();
+}
+
+// Allocate a new EnCAddedField instance and hook it up to hold the value for an instance
+// field which was added by EnC to the specified object. This effectively adds a reference from
+// the object to the new field value so that the field's lifetime is managed properly.
+//
+// Arguments:
+// pFD - description of the field being added
+// thisPointer - object instance to attach the new field to
+//
+EnCAddedField *EnCAddedField::Allocate(OBJECTREF thisPointer, EnCFieldDesc *pFD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_ENC, LL_INFO1000, "\tEnCAF:Allocate for this %p, FD %p\n", thisPointer, pFD->GetMemberDef()));
+
+ // Create a new EnCAddedField instance
+ EnCAddedField *pEntry = new EnCAddedField;
+ pEntry->m_pFieldDesc = pFD;
+
+ _ASSERTE(!pFD->GetApproxEnclosingMethodTable()->IsDomainNeutral());
+ AppDomain *pDomain = (AppDomain*) pFD->GetApproxEnclosingMethodTable()->GetDomain();
+
+ // We need to associate the contents of the new field with the object it is attached to
+ // in a way that mimics the lifetime behavior of a normal field reference. Specifically,
+ // when the object is collected, the field should also be collected (assuming there are no
+ // other references), but references to the field shouldn't keep the object alive.
+ // To acheive this, we have introduced the concept of a "dependent handle" which provides
+ // the appropriate semantics. The dependent handle has a weak reference to a "primary object"
+ // (the object getting a new field in this case), and a strong reference to a secondary object.
+ // When the primary object is collected, the reference to the secondary object is released.
+ // See the definition of code:HNDTYPE_DEPENDENT and code:Ref_ScanDependentHandles for more details.
+ //
+ // We create a helper object and store it as the secondary object in the dependant handle
+ // so that its liveliness can be maintained along with the primary object.
+ // The helper then contains an object reference to the real field value that we are adding.
+ // The reason for doing this is that we cannot hand out the handle address for
+ // the OBJECTREF address so we need to hand out something else that is hooked up to the handle.
+
+ GCPROTECT_BEGIN(thisPointer);
+ MethodTable *pHelperMT = MscorlibBinder::GetClass(CLASS__ENC_HELPER);
+ pEntry->m_FieldData = pDomain->CreateDependentHandle(thisPointer, AllocateObject(pHelperMT));
+ GCPROTECT_END();
+
+ LOG((LF_ENC, LL_INFO1000, "\tEnCAF:Allocate created dependent handle %p\n",pEntry->m_FieldData));
+
+ // The EnC helper object stores a reference to the actual field value. For fields which are
+ // reference types, this is simply a normal object reference so we don't need to do anything
+ // special here.
+
+ if (pFD->GetFieldType() != ELEMENT_TYPE_CLASS)
+ {
+ // The field is a value type so we need to create storage on the heap to hold a boxed
+ // copy of the value and have the helper's objectref point there.
+
+ OBJECTREF obj = NULL;
+ if (pFD->IsByValue())
+ {
+ // Create a boxed version of the value class. This allows the standard GC algorithm
+ // to take care of internal pointers into the value class.
+ obj = AllocateObject(pFD->GetFieldTypeHandleThrowing().GetMethodTable());
+ }
+ else
+ {
+ // In the case of primitive types, we use a reference to a 1-element array on the heap.
+ // I'm not sure why we bother treating primitives specially, it seems like we should be able
+ // to just box any value type including primitives.
+ obj = AllocatePrimitiveArray(ELEMENT_TYPE_I1, GetSizeForCorElementType(pFD->GetFieldType()));
+ }
+ GCPROTECT_BEGIN (obj);
+
+ // Get a FieldDesc for the object reference field in the EnC helper object (warning: triggers)
+ FieldDesc *pHelperField = MscorlibBinder::GetField(FIELD__ENC_HELPER__OBJECT_REFERENCE);
+
+ // store the empty boxed object into the helper object
+ OBJECTREF pHelperObj = GetDependentHandleSecondary(pEntry->m_FieldData);
+ OBJECTREF *pHelperRef = (OBJECTREF *)pHelperField->GetAddress( pHelperObj->GetAddress() );
+ SetObjectReference( pHelperRef, obj, pDomain );
+
+ GCPROTECT_END ();
+ }
+
+ return pEntry;
+}
+#endif // !DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+// EnCSyncBlockInfo::GetEnCFieldAddrFromHelperFieldDesc
+// Gets the address of an EnC field accounting for its type: valuetype, class or primitive
+// Arguments:
+// input: pHelperFieldDesc - FieldDesc for the enc helper object
+// pHelper - EnC helper (points to list of added fields)
+// pFD - fieldDesc describing the field of interest
+// Return value: the address of the EnC added field
+//---------------------------------------------------------------------------------------
+PTR_CBYTE EnCSyncBlockInfo::GetEnCFieldAddrFromHelperFieldDesc(FieldDesc * pHelperFieldDesc,
+ OBJECTREF pHelper,
+ EnCFieldDesc * pFD)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(pHelperFieldDesc != NULL);
+ _ASSERTE(pHelper != NULL);
+
+ // Get the address of the reference inside the helper object which points to
+ // the field contents
+ PTR_OBJECTREF pOR = dac_cast<PTR_OBJECTREF>(pHelperFieldDesc->GetAddress(pHelper->GetAddress()));
+ _ASSERTE(pOR != NULL);
+
+ PTR_CBYTE retAddr = NULL;
+
+ // Compute the address to the actual field contents based on the field type
+ // See the description above Allocate for details
+ if (pFD->IsByValue())
+ {
+ // field value is a value type, we store it boxed so get the pointer to the first field
+ retAddr = dac_cast<PTR_CBYTE>((*pOR)->UnBox());
+ }
+ else if (pFD->GetFieldType() == ELEMENT_TYPE_CLASS)
+ {
+ // field value is a reference type, we store the objref directly
+ retAddr = dac_cast<PTR_CBYTE>(pOR);
+ }
+ else
+ {
+ // field value is a primitive, we store it inside a 1-element array
+ OBJECTREF objRef = *pOR;
+ I1ARRAYREF primitiveArray = dac_cast<I1ARRAYREF>(objRef);
+ retAddr = dac_cast<PTR_CBYTE>(primitiveArray->GetDirectPointerToNonObjectElements());
+ }
+
+ LOG((LF_ENC, LL_INFO1000, "\tEnCSBI:RF address of %s type member is %p\n",
+ (pFD->IsByValue() ? "ByValue" : pFD->GetFieldType() == ELEMENT_TYPE_CLASS ? "Class" : "Other"), retAddr));
+
+ return retAddr;
+} // EnCSyncBlockInfo::GetEnCFieldAddrFromHelperFieldDesc
+
+//---------------------------------------------------------------------------------------
+// EnCSyncBlockInfo::ResolveField
+// Get the address of the data referenced by an instance field that was added with EnC
+// Arguments:
+// thisPointer - the object instance whose field to access
+// pFD - fieldDesc describing the field of interest
+// Return value: Returns a pointer to the data referenced by an EnC added instance field
+//---------------------------------------------------------------------------------------
+PTR_CBYTE EnCSyncBlockInfo::ResolveField(OBJECTREF thisPointer, EnCFieldDesc *pFD)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ // We should only be passed FieldDescs for instance fields
+ _ASSERTE(!pFD->IsStatic());
+
+ PTR_EnCAddedField pEntry = NULL;
+
+ LOG((LF_ENC, LL_INFO1000, "EnCSBI:RF for this %p, FD %p\n", thisPointer, pFD->GetMemberDef()));
+
+ // This list is not synchronized--it hasn't proved a problem, but we could conceivably see race conditions
+ // arise here.
+ // Look for an entry for the requested field in our linked list
+ pEntry = m_pList;
+ while (pEntry && pEntry->m_pFieldDesc != pFD)
+ {
+ pEntry = pEntry->m_pNext;
+ }
+
+ if (!pEntry)
+ {
+ // No existing entry - we have to return NULL
+ return NULL;
+ }
+
+ // we found a matching entry in the list of EnCAddedFields
+ // Get the EnC helper object (see the detailed description in Allocate above)
+ OBJECTREF pHelper = GetDependentHandleSecondary(pEntry->m_FieldData);
+ _ASSERTE(pHelper != NULL);
+
+ FieldDesc *pHelperFieldDesc = NULL;
+
+ // We _HAVE_ to call GetExistingField b/c (a) we can't throw exceptions, and
+ // (b) we _DON'T_ want to run class init code, either.
+ pHelperFieldDesc = MscorlibBinder::GetExistingField(FIELD__ENC_HELPER__OBJECT_REFERENCE);
+ if (pHelperFieldDesc == NULL)
+ {
+ return NULL;
+ }
+ else
+ {
+ return GetEnCFieldAddrFromHelperFieldDesc(pHelperFieldDesc, pHelper, pFD);
+ }
+} // EnCSyncBlockInfo::ResolveField
+
+#ifndef DACCESS_COMPILE
+//---------------------------------------------------------------------------------------
+// EnCSyncBlockInfo::ResolveOrAllocateField
+// get the address of an EnC added field, allocating it if it doesn't yet exist
+// Arguments:
+// thisPointer - the object instance whose field to access
+// pFD - fieldDesc describing the field of interest
+// Return value: Returns a pointer to the data referenced by an instance field that was added with EnC
+//---------------------------------------------------------------------------------------
+PTR_CBYTE EnCSyncBlockInfo::ResolveOrAllocateField(OBJECTREF thisPointer, EnCFieldDesc *pFD)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ WRAPPER(THROWS);
+ }
+ CONTRACTL_END;
+
+ // We should only be passed FieldDescs for instance fields
+ _ASSERTE( !pFD->IsStatic() );
+
+ // first try to get the address of a pre-existing field (storage has already been allocated)
+ PTR_CBYTE retAddr = ResolveField(thisPointer, pFD);
+
+ if (retAddr != NULL)
+ {
+ return retAddr;
+ }
+
+ // if the field doesn't yet have available storage, we'll have to allocate it.
+ PTR_EnCAddedField pEntry = NULL;
+
+ LOG((LF_ENC, LL_INFO1000, "EnCSBI:RF for this %p, FD %p\n", thisPointer, pFD->GetMemberDef()));
+
+ // This list is not synchronized--it hasn't proved a problem, but we could conceivably see race conditions
+ // arise here.
+ // Because we may have additions to the head of m_pList at any time, we have to keep searching this
+ // until we either find a match or succeed in allocating a new entry and adding it to the list
+ do
+ {
+ // Look for an entry for the requested field in our linked list (maybe it was just added)
+ pEntry = m_pList;
+ while (pEntry && pEntry->m_pFieldDesc != pFD)
+ {
+ pEntry = pEntry->m_pNext;
+ }
+
+ if (pEntry)
+ {
+ // match found
+ break;
+ }
+
+ // Allocate an entry and tie it to the object instance
+ pEntry = EnCAddedField::Allocate(thisPointer, pFD);
+
+ // put at front of list so the list is in order of most recently added
+ pEntry->m_pNext = m_pList;
+ if (FastInterlockCompareExchangePointer(&m_pList, pEntry, pEntry->m_pNext) == pEntry->m_pNext)
+ break;
+
+ // There was a race and another thread modified the list here, so we need to try again
+ // We should do this so rarely, and EnC perf is of relatively little
+ // consequence, we should just be taking a lock here to simplify this code.
+ // @todo - We leak a GC handle here. Allocate() above alloced a GC handle in m_FieldData.
+ // There's no dtor for pEntry to free it.
+ delete pEntry;
+ } while (TRUE);
+
+ // we found a matching entry in the list of EnCAddedFields
+ // Get the EnC helper object (see the detailed description in Allocate above)
+ OBJECTREF pHelper = GetDependentHandleSecondary(pEntry->m_FieldData);
+ _ASSERTE(pHelper != NULL);
+
+ FieldDesc * pHelperField = NULL;
+ GCPROTECT_BEGIN (pHelper);
+ pHelperField = MscorlibBinder::GetField(FIELD__ENC_HELPER__OBJECT_REFERENCE);
+ GCPROTECT_END ();
+
+ return GetEnCFieldAddrFromHelperFieldDesc(pHelperField, pHelper, pFD);
+} // EnCSyncBlockInfo::ResolveOrAllocateField
+
+// Free all the resources associated with the fields added to this object instance
+// This is invoked after the object instance has been collected, and the SyncBlock is
+// being reclaimed.
+//
+// Note, this is not threadsafe, and so should only be called when we know no-one else
+// maybe using this SyncBlockInfo.
+void EnCSyncBlockInfo::Cleanup()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ // Walk our linked list of all the fields that were added
+ EnCAddedField *pEntry = m_pList;
+ while (pEntry)
+ {
+ // Clean up the handle we created in EnCAddedField::Allocate
+ DestroyDependentHandle(*(OBJECTHANDLE*)&pEntry->m_FieldData);
+
+ // Delete this list entry and move onto the next
+ EnCAddedField *next = pEntry->m_pNext;
+ delete pEntry;
+ pEntry = next;
+ }
+
+ // Finally, delete the sync block info itself
+ delete this;
+}
+
+// Allocate space to hold the value for the new static field
+EnCAddedStaticField *EnCAddedStaticField::Allocate(EnCFieldDesc *pFD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!pFD->GetEnclosingMethodTable()->IsDomainNeutral());
+ AppDomain *pDomain = (AppDomain*) pFD->GetApproxEnclosingMethodTable()->GetDomain();
+
+ // Compute the size of the fieldData entry
+ size_t fieldSize;
+ if (pFD->IsByValue() || pFD->GetFieldType() == ELEMENT_TYPE_CLASS) {
+ // We store references to reference types or boxed value types
+ fieldSize = sizeof(OBJECTREF*);
+ } else {
+ // We store primitives inline
+ fieldSize = GetSizeForCorElementType(pFD->GetFieldType());
+ }
+
+ // allocate an instance with space for the field data
+ EnCAddedStaticField *pEntry = (EnCAddedStaticField *)
+ (void*)pDomain->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(offsetof(EnCAddedStaticField, m_FieldData)) + S_SIZE_T(fieldSize));
+ pEntry->m_pFieldDesc = pFD;
+
+ // Create a static objectref to point to the field contents, except for primitives
+ // which will use the memory available in-line at m_FieldData for storage.
+ // We use static object refs for static fields as these fields won't go away
+ // unless the module is unloaded, and they can easily be found by GC.
+ if (pFD->IsByValue())
+ {
+ // create a boxed version of the value class. This allows the standard GC
+ // algorithm to take care of internal pointers in the value class.
+ OBJECTREF **pOR = (OBJECTREF**)&pEntry->m_FieldData;
+ *pOR = pDomain->AllocateStaticFieldObjRefPtrs(1);
+ OBJECTREF obj = AllocateObject(pFD->GetFieldTypeHandleThrowing().GetMethodTable());
+ SetObjectReference( *pOR, obj, pDomain );
+ }
+ else if (pFD->GetFieldType() == ELEMENT_TYPE_CLASS)
+ {
+ // references to reference-types are stored directly in the field data
+ OBJECTREF **pOR = (OBJECTREF**)&pEntry->m_FieldData;
+ *pOR = pDomain->AllocateStaticFieldObjRefPtrs(1);
+ }
+
+ return pEntry;
+}
+#endif // !DACCESS_COMPILE
+// GetFieldData - return the ADDRESS where the field data is located
+PTR_CBYTE EnCAddedStaticField::GetFieldData()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ if ( (m_pFieldDesc->IsByValue()) || (m_pFieldDesc->GetFieldType() == ELEMENT_TYPE_CLASS) )
+ {
+ // It's indirect via an ObjRef at m_FieldData. This is a TADDR, so we need to make a PTR_CBYTE from
+ // the ObjRef
+ return *(PTR_CBYTE *)&m_FieldData;
+ }
+ else
+ {
+ // An elementry type. It's stored directly in m_FieldData. In this case, we need to get the target
+ // address of the m_FieldData data member and marshal it via the DAC.
+ return dac_cast<PTR_CBYTE>(PTR_HOST_MEMBER_TADDR(EnCAddedStaticField, this, m_FieldData));
+ }
+}
+
+// Gets a pointer to the field's contents (assuming this is a static field)
+// We'll return NULL if we don't yet have a pointer to the data.
+// Arguments: none
+// Return value: address of the static field data if available or NULL otherwise
+EnCAddedStaticField * EnCFieldDesc::GetStaticFieldData()
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(IsStatic());
+
+ return m_pStaticFieldData;
+}
+
+#ifndef DACCESS_COMPILE
+// Gets a pointer to the field's contents (assuming this is a static field)
+// Arguments: none
+// Return value: address of the field data. If we don't yet have a pointer to the data,
+// this will allocate space to store it.
+// May throw OOM.
+EnCAddedStaticField * EnCFieldDesc::GetOrAllocateStaticFieldData()
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(IsStatic());
+
+ // If necessary and requested, allocate space for the static field data
+ if (!m_pStaticFieldData)
+ {
+ m_pStaticFieldData = EnCAddedStaticField::Allocate(this);
+ }
+
+ return m_pStaticFieldData;
+}
+#endif // !DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+// Adds the provided new field to the appropriate linked list and updates the appropriate count
+void EnCEEClassData::AddField(EnCAddedFieldElement *pAddedField)
+{
+ LIMITED_METHOD_CONTRACT;
+ // Determine the appropriate field list and update the field counter
+ EnCFieldDesc *pFD = &pAddedField->m_fieldDesc;
+ EnCAddedFieldElement **pList;
+ if (pFD->IsStatic())
+ {
+ ++m_dwNumAddedStaticFields;
+ pList = &m_pAddedStaticFields;
+ }
+ else
+ {
+ ++m_dwNumAddedInstanceFields;
+ pList = &m_pAddedInstanceFields;
+ }
+
+ // If the list is empty, just add this field as the only entry
+ if (*pList == NULL)
+ {
+ *pList = pAddedField;
+ return;
+ }
+
+ // Otherwise, add this field to the end of the field list
+ EnCAddedFieldElement *pCur = *pList;
+ while (pCur->m_next != NULL)
+ {
+ pCur = pCur->m_next;
+ }
+ pCur->m_next = pAddedField;
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+
+void
+EnCEEClassData::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ DAC_ENUM_DTHIS();
+
+ if (m_pMT.IsValid())
+ {
+ m_pMT->EnumMemoryRegions(flags);
+ }
+
+ PTR_EnCAddedFieldElement elt = m_pAddedInstanceFields;
+ while (elt.IsValid())
+ {
+ elt.EnumMem();
+ elt = elt->m_next;
+ }
+ elt = m_pAddedStaticFields;
+ while (elt.IsValid())
+ {
+ elt.EnumMem();
+ elt = elt->m_next;
+ }
+}
+
+void
+EditAndContinueModule::EnumMemoryRegions(CLRDataEnumMemoryFlags flags,
+ bool enumThis)
+{
+ SUPPORTS_DAC;
+
+ if (enumThis)
+ {
+ DAC_ENUM_VTHIS();
+ }
+
+ Module::EnumMemoryRegions(flags, false);
+
+ m_ClassList.EnumMemoryRegions();
+
+ DPTR(PTR_EnCEEClassData) classData = m_ClassList.Table();
+ DPTR(PTR_EnCEEClassData) classLast = classData + m_ClassList.Count();
+
+ while (classData.IsValid() && classData < classLast)
+ {
+ if ((*classData).IsValid())
+ {
+ (*classData)->EnumMemoryRegions(flags);
+ }
+
+ classData++;
+ }
+}
+
+#endif // #ifdef DACCESS_COMPILE
+
+
+// Create a field iterator which includes EnC fields in addition to the fields from an
+// underlying ApproxFieldDescIterator.
+//
+// Arguments:
+// pMT - MethodTable indicating the type of interest
+// iteratorType - one of the ApproxFieldDescIterator::IteratorType values specifying which fields
+// are of interest.
+// fixupEnC - if true, then any partially-initialized EnC FieldDescs will be fixed up to be complete
+// initialized FieldDescs as they are returned by Next(). This may load types and do
+// other things to trigger a GC.
+//
+EncApproxFieldDescIterator::EncApproxFieldDescIterator(MethodTable *pMT, int iteratorType, BOOL fixupEnC) :
+ m_nonEnCIter( pMT, iteratorType )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ m_fixupEnC = fixupEnC;
+
+#ifndef DACCESS_COMPILE
+ // can't fixup for EnC on the debugger thread
+ _ASSERTE((g_pDebugInterface->GetRCThreadId() != GetCurrentThreadId()) || fixupEnC == FALSE);
+#endif
+
+ m_pCurrListElem = NULL;
+ m_encClassData = NULL;
+ m_encFieldsReturned = 0;
+
+ // If this is an EnC module, then grab a pointer to the EnC data
+ if( pMT->GetModule()->IsEditAndContinueEnabled() )
+ {
+ PTR_EditAndContinueModule encMod = PTR_EditAndContinueModule(pMT->GetModule());
+ m_encClassData = encMod->GetEnCEEClassData( pMT, TRUE);
+ }
+}
+
+// Iterates through all fields, returns NULL when done.
+PTR_FieldDesc EncApproxFieldDescIterator::Next()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ if (m_fixupEnC) {GC_TRIGGERS;} else {GC_NOTRIGGER;}
+ FORBID_FAULT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ // If we still have non-EnC fields to look at, return one of them
+ if( m_nonEnCIter.CountRemaining() > 0 )
+ {
+ _ASSERTE( m_encFieldsReturned == 0 );
+ return m_nonEnCIter.Next();
+ }
+
+ // Get the next EnC field Desc if any
+ PTR_EnCFieldDesc pFD = NextEnC();
+ if( pFD == NULL )
+ {
+ // No more fields
+ return NULL;
+ }
+
+#ifndef DACCESS_COMPILE
+ // Fixup the fieldDesc if requested and necessary
+ if ( m_fixupEnC && (pFD->NeedsFixup()) )
+ {
+ // if we get an OOM during fixup, the field will just not get fixed up
+ EX_TRY
+ {
+ FAULT_NOT_FATAL();
+ pFD->Fixup(pFD->GetMemberDef());
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+ }
+
+ // Either it's been fixed up so we can use it, or we're the Debugger RC thread, we can't fix it up,
+ // but it's ok since our logic will check & make sure we don't try and use it. If haven't asked to
+ // have the field fixed up, should never be trying to get at non-fixed up field in
+ // this list. Can't simply fixup the field always because loading triggers GC and many
+ // code paths can't tolerate that.
+ _ASSERTE( !(pFD->NeedsFixup()) ||
+ ( g_pDebugInterface->GetRCThreadId() == GetCurrentThreadId() ) );
+#endif
+
+ return dac_cast<PTR_FieldDesc>(pFD);
+}
+
+// Iterate through EnC added fields.
+// Returns NULL when done.
+PTR_EnCFieldDesc EncApproxFieldDescIterator::NextEnC()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ // If this module doesn't have any EnC data then there aren't any EnC fields
+ if( m_encClassData == NULL )
+ {
+ return NULL;
+ }
+
+ BOOL doInst = ( GetIteratorType() & (int)ApproxFieldDescIterator::INSTANCE_FIELDS);
+ BOOL doStatic = ( GetIteratorType() & (int)ApproxFieldDescIterator::STATIC_FIELDS);
+
+ int cNumAddedInst = doInst ? m_encClassData->GetAddedInstanceFields() : 0;
+ int cNumAddedStatics = doStatic ? m_encClassData->GetAddedStaticFields() : 0;
+
+ // If we haven't returned anything yet
+ if ( m_encFieldsReturned == 0 )
+ {
+ _ASSERTE(m_pCurrListElem == NULL);
+
+ // We're at the start of the instance list.
+ if ( doInst )
+ {
+ m_pCurrListElem = m_encClassData->m_pAddedInstanceFields;
+ }
+ }
+
+ // If we've finished the instance fields (or never wanted to do any)
+ if ( m_encFieldsReturned == cNumAddedInst)
+ {
+ // We should be at the end of the instance list if doInst is true
+ _ASSERTE(m_pCurrListElem == NULL);
+
+ // We're at the start of the statics list.
+ if ( doStatic )
+ {
+ m_pCurrListElem = m_encClassData->m_pAddedStaticFields;
+ }
+ }
+
+ // If we don't have any elements to return, then we're done
+ if (m_pCurrListElem == NULL)
+ {
+ // Verify that we returned the number we expected to
+ _ASSERTE( m_encFieldsReturned == cNumAddedInst + cNumAddedStatics );
+ return NULL;
+ }
+
+ // Advance the list pointer and return the element
+ m_encFieldsReturned++;
+ PTR_EnCFieldDesc fd = PTR_EnCFieldDesc(PTR_HOST_MEMBER_TADDR(EnCAddedFieldElement, m_pCurrListElem, m_fieldDesc));
+ m_pCurrListElem = m_pCurrListElem->m_next;
+ return fd;
+}
+
+#endif // EnC_SUPPORTED
diff --git a/src/vm/encee.h b/src/vm/encee.h
new file mode 100644
index 0000000000..00c40f45a8
--- /dev/null
+++ b/src/vm/encee.h
@@ -0,0 +1,449 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// EnCee.h
+//
+
+//
+// Defines the core VM data structures and methods for support EditAndContinue
+//
+// ======================================================================================
+
+
+#ifndef EnC_H
+#define EnC_H
+
+#include "ceeload.h"
+#include "field.h"
+#include "class.h"
+
+#ifdef EnC_SUPPORTED
+
+class FieldDesc;
+struct EnCAddedField;
+struct EnCAddedStaticField;
+class EnCFieldDesc;
+class EnCEEClassData;
+
+typedef DPTR(EnCAddedField) PTR_EnCAddedField;
+typedef DPTR(EnCAddedStaticField) PTR_EnCAddedStaticField;
+typedef DPTR(EnCFieldDesc) PTR_EnCFieldDesc;
+typedef DPTR(EnCEEClassData) PTR_EnCEEClassData;
+
+//---------------------------------------------------------------------------------------
+//
+// EnCFieldDesc - A field descriptor for fields added by EnC
+//
+// Notes: We need to track some additional data for added fields, since they can't
+// simply be glued onto existing object instances like any other field.
+//
+// For each field added, there is a single instance of this object tied to the type where
+// the field was added.
+//
+class EnCFieldDesc : public FieldDesc
+{
+public:
+ // Initialize just the bare minimum necessary now.
+ // We'll do a proper FieldDesc initialization later when Fixup is called.
+ void Init( mdFieldDef token, BOOL fIsStatic);
+
+ // Compute the address of this field for a specific object
+ void *GetAddress( void *o);
+
+ // Returns true if Fixup still needs to be called
+ BOOL NeedsFixup()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_bNeedsFixup;
+ }
+
+ // Used to properly configure the FieldDesc after it has been added
+ // This may do things like load classes (which can trigger a GC), and so can only be
+ // done after the process has resumed execution.
+ VOID Fixup(mdFieldDef token)
+ {
+ WRAPPER_NO_CONTRACT;
+ EEClass::FixupFieldDescForEnC(GetEnclosingMethodTable(), this, token);
+ m_bNeedsFixup = FALSE;
+ }
+
+ // Gets a pointer to the field's contents (assuming this is a static field) if it's
+ // available or NULL otherwise
+ EnCAddedStaticField *GetStaticFieldData();
+
+ // Gets a pointer to the field's contents (assuming this is a static field) if it's
+ // available or allocates space for it and returns the address to the allocated field
+ // Returns a valid address or throws OOM
+ EnCAddedStaticField * GetOrAllocateStaticFieldData();
+
+
+private:
+ // True if Fixup() has been called on this instance
+ BOOL m_bNeedsFixup;
+
+ // For static fields, pointer to where the field value is held
+ PTR_EnCAddedStaticField m_pStaticFieldData;
+};
+
+// EnCAddedFieldElement
+// A node in the linked list representing fields added to a class with EnC
+typedef DPTR(struct EnCAddedFieldElement) PTR_EnCAddedFieldElement;
+struct EnCAddedFieldElement
+{
+ // Pointer to the next element in the list
+ PTR_EnCAddedFieldElement m_next;
+
+ // Details about this field
+ EnCFieldDesc m_fieldDesc;
+
+ // Initialize this entry.
+ // Basically just sets a couple fields to default values.
+ // We'll have to go back later and call Fixup on the fieldDesc.
+ void Init(mdFieldDef token, BOOL fIsStatic)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_next = NULL;
+ m_fieldDesc.Init(token, fIsStatic);
+ }
+};
+
+//---------------------------------------------------------------------------------------
+//
+// EnCEEClassData - EnC specific information about this class
+//
+class EnCEEClassData
+{
+public:
+#ifndef DACCESS_COMPILE
+ // Initialize all the members
+ // pClass - the EEClass we're tracking EnC data for
+ void Init(MethodTable * pMT)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pMT = pMT;
+ m_dwNumAddedInstanceFields = 0;
+ m_dwNumAddedStaticFields = 0;
+ m_pAddedInstanceFields = NULL;
+ m_pAddedStaticFields = NULL;
+ }
+#endif
+
+ // Adds the provided new field to the appropriate linked list and updates the appropriate count
+ void AddField(EnCAddedFieldElement *pAddedField);
+
+ // Get the number of instance fields that have been added to this class.
+ // Since we can only add private fields, these fields can't be seen from any other class but this one.
+ int GetAddedInstanceFields()
+ {
+ SUPPORTS_DAC;
+ return m_dwNumAddedInstanceFields;
+ }
+
+ // Get the number of static fields that have been added to this class.
+ int GetAddedStaticFields()
+ {
+ SUPPORTS_DAC;
+ return m_dwNumAddedStaticFields;
+ }
+
+ // Get the methodtable that this EnC data refers to
+ MethodTable * GetMethodTable()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pMT;
+ }
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+private:
+ friend class EEClass;
+ friend class EncApproxFieldDescIterator;
+
+ // The class that this EnC data refers to
+ PTR_MethodTable m_pMT;
+
+ // The number of instance fields that have been added to this class
+ int m_dwNumAddedInstanceFields;
+
+ // The number of static fields that have been added to this class
+ int m_dwNumAddedStaticFields;
+
+ // Linked list of EnCFieldDescs for all the added instance fields
+ PTR_EnCAddedFieldElement m_pAddedInstanceFields;
+
+ // Linked list of EnCFieldDescs for all the added static fields
+ PTR_EnCAddedFieldElement m_pAddedStaticFields;
+};
+
+//---------------------------------------------------------------------------------------
+//
+// EditAndContinueModule - specialization of the Module class which adds EnC support
+//
+// Assumptions:
+//
+// Notes:
+//
+class EditAndContinueModule : public Module
+{
+ VPTR_VTABLE_CLASS(EditAndContinueModule, Module)
+
+ // keep track of the number of changes - this is used to apply a version number
+ // to an updated function. The version number for a function is the overall edit count,
+ // ie the number of times ApplyChanges has been called, not the number of times that
+ // function itself has been edited.
+ int m_applyChangesCount;
+
+ // Holds a table of EnCEEClassData object for classes in this module that have been modified
+ CUnorderedArray<EnCEEClassData*, 5> m_ClassList;
+
+#ifndef DACCESS_COMPILE
+ // Return the minimum permissable address for new IL to be stored at
+ // This can't be less than the current load address because then we'd
+ // have negative RVAs.
+ BYTE *GetEnCBase() { return (BYTE *) GetFile()->GetManagedFileContents(); }
+#endif // DACCESS_COMPILE
+
+private:
+ // Constructor is invoked only by Module::Create
+ friend Module *Module::Create(Assembly *pAssembly, mdToken moduleRef, PEFile *file, AllocMemTracker *pamTracker);
+ EditAndContinueModule(Assembly *pAssembly, mdToken moduleRef, PEFile *file);
+
+protected:
+#ifndef DACCESS_COMPILE
+ // Initialize the module
+ virtual void Initialize(AllocMemTracker *pamTracker);
+#endif
+
+public:
+#ifndef DACCESS_COMPILE
+ // Destruct the module when it's finished being unloaded
+ // Note that due to the loader's allocation mechanism, C++ consturctors and destructors
+ // wouldn't be called.
+ virtual void Destruct();
+#endif
+
+ // Apply an EnC edit
+ HRESULT ApplyEditAndContinue(DWORD cbMetadata,
+ BYTE *pMetadata,
+ DWORD cbIL,
+ BYTE *pIL);
+
+ // Called when a method has been modified (new IL)
+ HRESULT UpdateMethod(MethodDesc *pMethod);
+
+ // Called when a new method has been added to the module's metadata
+ HRESULT AddMethod(mdMethodDef token);
+
+ // Called when a new field has been added to the module's metadata
+ HRESULT AddField(mdFieldDef token);
+
+ // JIT the new version of a function for EnC
+ PCODE JitUpdatedFunction(MethodDesc *pMD, T_CONTEXT *pContext);
+
+ // Remap execution to the latest version of an edited method
+ HRESULT ResumeInUpdatedFunction(MethodDesc *pMD,
+ void *oldDebuggerFuncHandle,
+ SIZE_T newILOffset,
+ T_CONTEXT *pContext);
+
+ // Modify the thread context for EnC remap and resume execution
+ void FixContextAndResume(MethodDesc *pMD,
+ void *oldDebuggerFuncHandle,
+ T_CONTEXT *pContext,
+ EECodeInfo *pOldCodeInfo,
+ EECodeInfo *pNewCodeInfo);
+
+ // Get a pointer to the value of a field added by EnC or return NULL if it doesn't exist
+ PTR_CBYTE ResolveField(OBJECTREF thisPointer,
+ EnCFieldDesc *pFD);
+
+ // Get a pointer to the value of a field added by EnC. Allocates if it doesn't exist, so we'll
+ // return a valid address or throw OOM
+ PTR_CBYTE ResolveOrAllocateField(OBJECTREF thisPointer,
+ EnCFieldDesc * pFD);
+
+
+ // Get class-specific EnC data for a class in this module
+ // Note: For DAC build, getOnly must be TRUE
+ PTR_EnCEEClassData GetEnCEEClassData(MethodTable * pMT, BOOL getOnly = FALSE);
+
+ // Get the number of times edits have been applied to this module
+ int GetApplyChangesCount()
+ {
+ return m_applyChangesCount;
+ }
+
+#ifdef DACCESS_COMPILE
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags,
+ bool enumThis);
+#endif
+};
+
+// Information about an instance field value added by EnC
+// When an instance field is added to an object, we will lazily create an EnCAddedField
+// for EACH instance of that object, but there will be a single EnCFieldDesc.
+//
+// Note that if we were concerned about the overhead when there are lots of instances of
+// an object, we could slim this down to just the m_FieldData field by storing a pointer
+// to a growable array of these in the EnCSyncBlockInfo, instead of using a linked list, and
+// have the EnCFieldDesc specify a field index number.
+//
+struct EnCAddedField
+{
+ // This field data hangs off the SyncBlock in a linked list.
+ // This is the pointer to the next field in the list.
+ PTR_EnCAddedField m_pNext;
+
+ // Pointer to the fieldDesc describing which field this refers to
+ PTR_EnCFieldDesc m_pFieldDesc;
+
+ // A dependent handle whose primary object points to the object instance which has been modified,
+ // and whose secondary object points to an EnC helper object containing a reference to the field value.
+ OBJECTHANDLE m_FieldData;
+
+ // Allocates a new EnCAddedField and hook it up to the object
+ static EnCAddedField *Allocate(OBJECTREF thisPointer, EnCFieldDesc *pFD);
+};
+
+// Information about a static field value added by EnC
+// We can't change the MethodTable, so these are hung off the FieldDesc
+// Note that the actual size of this type is variable.
+struct EnCAddedStaticField
+{
+ // Pointer back to the fieldDesc describing which field this refers to
+ // This isn't strictly necessary since our callers always know it, but the overhead
+ // in minimal (per type, not per instance) and this is cleaner and permits an extra sanity check.
+ PTR_EnCFieldDesc m_pFieldDesc;
+
+ // For primitive types, this is the beginning of the actual value.
+ // For reference types and user-defined value types, it's the beginning of a pointer
+ // to the object.
+ // Note that this is intentionally the last field of this structure as it is variably-sized.
+ // NOTE: It looks like we did the same thing for instance fields in EnCAddedField but then simplified
+ // it by always storing just an OBJREF which may point to a boxed value type. I suggest we do the
+ // same here unless we can demonstrate that the extra indirection makes a noticable perf difference
+ // in scenarios which are important for EnC.
+ BYTE m_FieldData;
+
+ // Get a pointer to the contents of this field
+ PTR_CBYTE GetFieldData();
+
+ // Allocate a new instance appropriate for the specified field
+ static EnCAddedStaticField *Allocate(EnCFieldDesc *pFD);
+};
+
+// EnCSyncBlockInfo lives off an object's SyncBlock and contains a lazily-created linked
+// list of the values of all the fields added to the object by EnC
+//
+// Note that much of the logic here would probably belong better in EnCAddedField since it is
+// specific to the implementation there. Perhaps this should ideally just be a container
+// that holds a bunch of EnCAddedFields and can iterate over them and map from EnCFieldDesc
+// to them.
+class EnCSyncBlockInfo
+{
+public:
+ // Initialize the list
+ EnCSyncBlockInfo() :
+ m_pList(PTR_NULL)
+ {
+ }
+
+ // Get a pointer to the data in a specific field on this object or return NULL if it
+ // doesn't exist
+ PTR_CBYTE ResolveField(OBJECTREF thisPointer,
+ EnCFieldDesc * pFieldDesc);
+
+ // Get a pointer to the data in a specific field on this object. We'll allocate if it doesn't already
+ // exist, so we'll only fail on OOM
+ PTR_CBYTE ResolveOrAllocateField(OBJECTREF thisPointer, EnCFieldDesc *pFD);
+
+
+ // Free the data used by this field value. Called after the object instance the
+ // fields belong to is collected.
+ void Cleanup();
+
+private:
+ // Gets the address of an EnC field accounting for its type: valuetype, class or primitive
+ PTR_CBYTE GetEnCFieldAddrFromHelperFieldDesc(FieldDesc * pHelperFieldDesc,
+ OBJECTREF pHelper,
+ EnCFieldDesc * pFD);
+
+ // Pointer to the head of the list
+ PTR_EnCAddedField m_pList;
+};
+
+// The DPTR is actually defined in syncblk.h to make it visible to SyncBlock
+// typedef DPTR(EnCSyncBlockInfo) PTR_EnCSyncBlockInfo;
+
+#endif // !EnC_SUPPORTED
+
+
+//---------------------------------------------------------------------------------------
+//
+// EncApproxFieldDescIterator - Iterates through all fields of a class including ones
+// added by EnC
+//
+// Notes:
+// This is just like ApproxFieldDescIterator, but it also includes EnC fields if
+// EnC is supported.
+// This does not include inherited fields.
+// The order the fields returned here is unspecified.
+//
+// We don't bother maintaining an accurate total and remaining field count like
+// ApproxFieldDescIterator because none of our clients need it. But it would
+// be easy to add this using the data from m_classData
+//
+class EncApproxFieldDescIterator
+{
+public:
+#ifdef EnC_SUPPORTED
+ // Create and initialize the iterator
+ EncApproxFieldDescIterator(MethodTable *pMT, int iteratorType, BOOL fixupEnC);
+
+ // Get the next fieldDesc (either EnC or non-EnC)
+ PTR_FieldDesc Next();
+
+#else
+ // Non-EnC version - simple wrapper
+ EncApproxFieldDescIterator(MethodTable *pMT, int iteratorType, BOOL fixupEnC) :
+ m_nonEnCIter( pMT, iteratorType ) {}
+
+ PTR_FieldDesc Next() { WRAPPER_NO_CONTRACT; return m_nonEnCIter.Next(); }
+#endif // EnC_SUPPORTED
+
+ int GetIteratorType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_nonEnCIter.GetIteratorType();
+ }
+
+private:
+ // The iterator for the non-EnC fields.
+ // We delegate to this for alll non-EnC specific stuff
+ ApproxFieldDescIterator m_nonEnCIter;
+
+#ifdef EnC_SUPPORTED
+ // Return the next available EnC FieldDesc or NULL when done
+ PTR_EnCFieldDesc NextEnC();
+
+ // True if our client wants us to fixup any EnC fieldDescs before handing them back
+ BOOL m_fixupEnC;
+
+ // A count of how many EnC fields have been returned so far
+ int m_encFieldsReturned;
+
+ // The current pointer into one of the EnC field lists when enumerating EnC fields
+ PTR_EnCAddedFieldElement m_pCurrListElem;
+
+ // EnC specific data for the class of interest.
+ // NULL if EnC is disabled or this class doesn't have any EnC data
+ PTR_EnCEEClassData m_encClassData;
+#endif
+};
+
+#endif // #ifndef EnC_H
diff --git a/src/vm/eventreporter.cpp b/src/vm/eventreporter.cpp
new file mode 100644
index 0000000000..72f7ba9976
--- /dev/null
+++ b/src/vm/eventreporter.cpp
@@ -0,0 +1,768 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+//*****************************************************************************
+// EventReporter.cpp
+//
+// A utility to log an entry in event log.
+//
+//*****************************************************************************
+
+
+#include "common.h"
+#include "utilcode.h"
+#include "eventreporter.h"
+#include "typestring.h"
+
+#include "../dlls/mscorrc/resource.h"
+
+#if defined(FEATURE_CORECLR)
+#include "getproductversionnumber.h"
+#endif // FEATURE_CORECLR
+
+//---------------------------------------------------------------------------------------
+//
+// A constructor for EventReporter. The header of the log is generated here.
+//
+// Arguments:
+// type - Event report type
+//
+// Assumptions:
+// The argument type must be valid.
+//
+EventReporter::EventReporter(EventReporterType type)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_eventType = type;
+
+ HMODULE hModule = WszGetModuleHandle(NULL);
+ WCHAR appPath[MAX_PATH];
+ DWORD ret = WszGetModuleFileName(hModule, appPath, NumItems(appPath));
+
+ fBufferFull = FALSE;
+
+ InlineSString<256> ssMessage;
+
+ if(!ssMessage.LoadResource(CCompRC::Optional, IDS_ER_APPLICATION))
+ m_Description.Append(W("Application: "));
+ else
+ {
+ m_Description.Append(ssMessage);
+ }
+
+ // If we were able to get an app name.
+ if (ret != 0)
+ {
+ // If app name has a '\', consider the part after that; otherwise consider whole name.
+ WCHAR* appName = wcsrchr(appPath, W('\\'));
+ appName = appName ? appName+1 : appPath;
+ m_Description.Append(appName);
+ m_Description.Append(W("\n"));
+ }
+ else
+ {
+ ssMessage.Clear();
+ if(!ssMessage.LoadResource(CCompRC::Optional, IDS_ER_UNKNOWN))
+ m_Description.Append(W("unknown\n"));
+ else
+ {
+ m_Description.Append(ssMessage);
+ m_Description.Append(W("\n"));
+ }
+ }
+
+ ssMessage.Clear();
+ if(!ssMessage.LoadResource(CCompRC::Optional, IDS_ER_FRAMEWORK_VERSION))
+#ifndef FEATURE_CORECLR
+ m_Description.Append(W("Framework Version: "));
+#else // FEATURE_CORECLR
+ m_Description.Append(W("CoreCLR Version: "));
+#endif // !FEATURE_CORECLR
+ else
+ {
+ m_Description.Append(ssMessage);
+ }
+
+ BOOL fHasVersion = FALSE;
+#ifndef FEATURE_CORECLR
+ if (GetCLRModule() != NULL)
+ {
+ WCHAR buffer[80];
+ DWORD length;
+ if (SUCCEEDED(GetCORVersionInternal(buffer, 80, &length)))
+ {
+ m_Description.Append(buffer);
+ m_Description.Append(W("\n"));
+ fHasVersion = TRUE;
+ }
+ }
+#else // FEATURE_CORECLR
+ DWORD dwMajorVersion = 0;
+ DWORD dwMinorVersion = 0;
+ DWORD dwBuild = 0;
+ DWORD dwRevision = 0;
+ EventReporter::GetCoreCLRInstanceProductVersion(&dwMajorVersion, &dwMinorVersion, &dwBuild, &dwRevision);
+ m_Description.AppendPrintf(W("%lu.%lu.%lu.%lu\n"),dwMajorVersion, dwMinorVersion, dwBuild, dwRevision);
+ fHasVersion = TRUE;
+#endif // !FEATURE_CORECLR
+
+ if (!fHasVersion)
+ {
+ ssMessage.Clear();
+ if(!ssMessage.LoadResource(CCompRC::Optional, IDS_ER_UNKNOWN))
+ m_Description.Append(W("unknown\n"));
+ else
+ {
+ m_Description.Append(ssMessage);
+ m_Description.Append(W("\n"));
+ }
+ }
+
+ ssMessage.Clear();
+
+ switch(m_eventType) {
+ case ERT_UnhandledException:
+ if(!ssMessage.LoadResource(CCompRC::Optional, IDS_ER_UNHANDLEDEXCEPTION))
+ m_Description.Append(W("Description: The process was terminated due to an unhandled exception."));
+ else
+ {
+ m_Description.Append(ssMessage);
+ }
+ m_Description.Append(W("\n"));
+ break;
+
+ case ERT_ManagedFailFast:
+ if(!ssMessage.LoadResource(CCompRC::Optional, IDS_ER_MANAGEDFAILFAST))
+ m_Description.Append(W("Description: The application requested process termination through System.Environment.FailFast(string message)."));
+ else
+ {
+ m_Description.Append(ssMessage);
+ }
+ m_Description.Append(W("\n"));
+ break;
+
+ case ERT_UnmanagedFailFast:
+ if(!ssMessage.LoadResource(CCompRC::Optional, IDS_ER_UNMANAGEDFAILFAST))
+ m_Description.Append(W("Description: The process was terminated due to an internal error in the .NET Runtime "));
+ else
+ {
+ m_Description.Append(ssMessage);
+ }
+ break;
+
+ case ERT_StackOverflow:
+ // Fetch the localized Stack Overflow Error text or fall back on a hardcoded variant if things get dire.
+ if(!ssMessage.LoadResource(CCompRC::Optional, IDS_ER_STACK_OVERFLOW))
+ m_Description.Append(W("Description: The process was terminated due to stack overflow."));
+ else
+ {
+ m_Description.Append(ssMessage);
+ }
+ m_Description.Append(W("\n"));
+ break;
+
+ case ERT_CodeContractFailed:
+ if(!ssMessage.LoadResource(CCompRC::Optional, IDS_ER_CODECONTRACT_FAILED))
+ m_Description.Append(W("Description: The application encountered a bug. A managed code contract (precondition, postcondition, object invariant, or assert) failed."));
+ else
+ {
+ m_Description.Append(ssMessage);
+ }
+ m_Description.Append(W("\n"));
+ break;
+
+ default:
+ _ASSERTE(!"Unknown EventReporterType.");
+ break;
+ }
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Add extra description to the EventLog report.
+//
+// Arguments:
+// pString - The extra description to append to log
+//
+// Return Value:
+// None.
+//
+void EventReporter::AddDescription(__in WCHAR *pString)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_INTOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ StackSString s(pString);
+ AddDescription(s);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Add extra description to the EventLog report.
+//
+// Arguments:
+// pString - The extra description to append to log
+//
+// Return Value:
+// None.
+//
+void EventReporter::AddDescription(SString& s)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_INTOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ _ASSERTE (m_eventType == ERT_UnhandledException || m_eventType == ERT_ManagedFailFast ||
+ m_eventType == ERT_UnmanagedFailFast || m_eventType == ERT_CodeContractFailed);
+ if (m_eventType == ERT_ManagedFailFast)
+ {
+ SmallStackSString ssMessage;
+ if(!ssMessage.LoadResource(CCompRC::Optional, IDS_ER_MANAGEDFAILFASTMSG))
+ m_Description.Append(W("Message: "));
+ else
+ {
+ m_Description.Append(ssMessage);
+ }
+ }
+ else if (m_eventType == ERT_UnhandledException)
+ {
+ SmallStackSString ssMessage;
+ if (!ssMessage.LoadResource(CCompRC::Optional, IDS_ER_UNHANDLEDEXCEPTIONMSG))
+ {
+ m_Description.Append(W("Exception Info: "));
+ }
+ else
+ {
+ m_Description.Append(ssMessage);
+ }
+ }
+ else if (m_eventType == ERT_CodeContractFailed)
+ {
+ SmallStackSString ssMessage;
+ if (!ssMessage.LoadResource(CCompRC::Optional, IDS_ER_CODECONTRACT_DETAILMSG))
+ m_Description.Append(W("Contract details: "));
+ else
+ m_Description.Append(ssMessage);
+ }
+ m_Description.Append(s);
+ m_Description.Append(W("\n"));
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Add a marker for stack trace section in the EventLog entry
+//
+// Arguments:
+// None.
+//
+// Return Value:
+// None.
+//
+void EventReporter::BeginStackTrace()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_INTOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ _ASSERTE (m_eventType == ERT_UnhandledException || m_eventType == ERT_ManagedFailFast || m_eventType == ERT_CodeContractFailed);
+ InlineSString<80> ssMessage;
+ if(!ssMessage.LoadResource(CCompRC::Optional, IDS_ER_STACK))
+ m_Description.Append(W("Stack:\n"));
+ else
+ {
+ m_Description.Append(ssMessage);
+ m_Description.Append(W("\n"));
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Add the signature of one managed stack frame into EventLog entry.
+//
+// Arguments:
+// s - The signature of managed function, including argument type
+//
+// Return Value:
+// None.
+//
+void EventReporter::AddStackTrace(SString& s)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE (m_eventType == ERT_UnhandledException || m_eventType == ERT_ManagedFailFast || m_eventType == ERT_CodeContractFailed);
+
+ // Continue to append to the buffer until we are full
+ if (fBufferFull == FALSE)
+ {
+ m_Description.Append(s);
+ m_Description.Append(W("\n"));
+
+ COUNT_T curSize = m_Description.GetCount();
+
+ // Truncate the buffer if we have exceeded the limit based upon the OS we are on
+ DWORD dwMaxSizeLimit = MAX_SIZE_EVENTLOG_ENTRY_STRING_WINVISTA;
+ if (curSize >= dwMaxSizeLimit)
+ {
+ // Load the truncation message
+ StackSString truncate;
+ if (!truncate.LoadResource(CCompRC::Optional, IDS_ER_MESSAGE_TRUNCATE))
+ {
+ truncate.Set(W("The remainder of the message was truncated."));
+ }
+ truncate.Insert(truncate.Begin(), W("\n"));
+ truncate.Insert(truncate.End(), W("\n"));
+
+ SString::Iterator ext;
+ COUNT_T truncCount = truncate.GetCount();
+
+ // Go back "truncCount" characters from the end of the string.
+ // The "-1" in end is to accomodate null termination.
+ ext = m_Description.Begin() + dwMaxSizeLimit - truncCount - 1;
+
+ // Now look for a "\n" from the last position we got
+ BOOL fFoundMarker = m_Description.FindBack(ext, W("\n"));
+ if (ext != m_Description.Begin())
+ {
+ // Move to the next character if we found the "\n"
+ if (fFoundMarker)
+ ext++;
+ }
+
+ // Truncate the string till our current position and append
+ // the truncation message
+ m_Description.Truncate(ext);
+ m_Description.Append(truncate);
+
+ // Set the flag that we are full - no point appending more stack details
+ fBufferFull = TRUE;
+ }
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Generate an entry in EventLog.
+//
+// Arguments:
+// None.
+//
+// Return Value:
+// None.
+//
+void EventReporter::Report()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ DWORD eventID;
+ switch (m_eventType)
+ {
+ case ERT_UnhandledException:
+ eventID = 1026;
+ break;
+ case ERT_ManagedFailFast:
+ eventID = 1025;
+ break;
+ case ERT_UnmanagedFailFast:
+ eventID = 1023;
+ break;
+ case ERT_StackOverflow:
+ eventID = 1027;
+ break;
+ case ERT_CodeContractFailed:
+ eventID = 1028;
+ break;
+ default:
+ _ASSERTE(!"Invalid event type");
+ eventID = 1023;
+ break;
+ }
+
+ CONTRACT_VIOLATION(ThrowsViolation);
+
+ COUNT_T ctSize = m_Description.GetCount();
+ LOG((LF_EH, LL_INFO100, "EventReporter::Report - Writing %d bytes to event log.\n", ctSize));
+
+ if (ctSize > 0)
+ {
+ DWORD dwRetVal = ClrReportEvent(W(".NET Runtime"),
+ EVENTLOG_ERROR_TYPE,
+ 0,
+ eventID,
+ NULL,
+ m_Description.GetUnicode()
+ );
+
+ if (dwRetVal != ERROR_SUCCESS)
+ {
+ LOG((LF_EH, LL_INFO100, "EventReporter::Report - Error (win32 code %d) while writing to event log.\n", dwRetVal));
+
+ // We were unable to log the error to event log - now check why.
+ if ((dwRetVal != ERROR_EVENTLOG_FILE_CORRUPT) && (dwRetVal != ERROR_LOG_FILE_FULL) &&
+ (dwRetVal != ERROR_NOT_ENOUGH_MEMORY)) // Writing to the log can fail under OOM (observed on Vista)
+ {
+ // If the event log file was neither corrupt nor full, then assert,
+ // since something is wrong!
+#ifndef _TARGET_ARM_
+ //ARMTODO: Event reporting is currently non-functional on winpe.
+ _ASSERTE(!"EventReporter::Report - Unable to log details to event log!");
+#endif
+ }
+ else
+ {
+ // Since the event log file was either corrupt or full, simply
+ // write this status to our log. We cannot fix a corrupt file
+ // and we cannot clear the log since we dont administer the machine.
+ STRESS_LOG0(LF_CORDB, LL_INFO10000, "EventReporter::Report: Event log is full, corrupt or not enough memory to process.\n");
+ }
+ }
+ }
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Check if we should generate an EventLog entry.
+//
+// Arguments:
+// None
+//
+// Return Value:
+// TRUE - We should generate one entry
+// FALSE - We should not generate one entry
+//
+BOOL ShouldLogInEventLog()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_CORESYSTEM
+ // If the process is being debugged, don't log
+ if ((CORDebuggerAttached() || IsDebuggerPresent())
+#ifdef _DEBUG
+ // Allow debug to be able to break in
+ &&
+ CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnUncaughtException) == 0
+#endif
+ )
+ {
+ return FALSE;
+ }
+
+ static LONG fOnce = 0;
+ if (fOnce == 1 || FastInterlockExchange(&fOnce, 1) == 1)
+ {
+ return FALSE;
+ }
+
+ if (CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_logFatalError) == 0)
+ return FALSE;
+ else
+ return TRUE;
+#else
+ // no event log on Apollo
+ return FALSE;
+#endif //!FEATURE_CORESYSTEM
+}
+
+//---------------------------------------------------------------------------------------
+//
+// A callback function for stack walker to save signature of one managed frame.
+//
+// Arguments:
+// pCF - The frame info passed by stack walker.
+// pData - The data to pass info between stack walker and its caller
+//
+// Return Value:
+// SWA_CONTINUE - Continue search for the next frame.
+//
+struct LogCallstackData
+{
+ EventReporter *pReporter;
+ SmallStackSString *pWordAt;
+};
+
+StackWalkAction LogCallstackForEventReporterCallback(
+ CrawlFrame *pCF, //
+ VOID* pData // Caller's private data
+)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ EventReporter* pReporter = ((LogCallstackData*)pData)->pReporter;
+ SmallStackSString *pWordAt = ((LogCallstackData*)pData)->pWordAt;
+
+ MethodDesc *pMD = pCF->GetFunction();
+ _ASSERTE(pMD != NULL);
+
+ StackSString str;
+ str = *pWordAt;
+
+ TypeString::AppendMethodInternal(str, pMD, TypeString::FormatNamespace|TypeString::FormatFullInst|TypeString::FormatSignature);
+ pReporter->AddStackTrace(str);
+
+ return SWA_CONTINUE;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// A woker to save managed stack trace.
+//
+// Arguments:
+// reporter - EventReporter object for EventLog
+//
+// Return Value:
+// None
+//
+void LogCallstackForEventReporterWorker(EventReporter& reporter)
+{
+ Thread* pThread = GetThread();
+ _ASSERTE (pThread);
+
+ SmallStackSString WordAt;
+
+ if (!WordAt.LoadResource(CCompRC::Optional, IDS_ER_WORDAT))
+ {
+ WordAt.Set(W(" at"));
+ }
+ else
+ {
+ WordAt.Insert(WordAt.Begin(), W(" "));
+ }
+ WordAt += W(" ");
+
+ LogCallstackData data = {
+ &reporter, &WordAt
+ };
+
+ pThread->StackWalkFrames(&LogCallstackForEventReporterCallback, &data, QUICKUNWIND | FUNCTIONSONLY);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Generate stack trace info for those managed frames on the stack currently.
+//
+// Arguments:
+// reporter - EventReporter object for EventLog
+//
+// Return Value:
+// None
+//
+void LogCallstackForEventReporter(EventReporter& reporter)
+{
+ WRAPPER_NO_CONTRACT;
+
+ reporter.BeginStackTrace();
+
+ LogCallstackForEventReporterWorker(reporter);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Generate an EventLog entry for unhandled exception.
+//
+// Arguments:
+// pExceptionInfo - Exception information
+//
+// Return Value:
+// None
+//
+void DoReportForUnhandledException(PEXCEPTION_POINTERS pExceptionInfo)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (ShouldLogInEventLog())
+ {
+ Thread *pThread = GetThread();
+ EventReporter reporter(EventReporter::ERT_UnhandledException);
+ EX_TRY
+ {
+ StackSString s;
+ if (pThread && pThread->HasException() != NULL)
+ {
+ GCX_COOP();
+ struct
+ {
+ OBJECTREF throwable;
+ STRINGREF remoteStackTraceString;
+ STRINGREF originalExceptionMessage;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.throwable = pThread->GetThrowable();
+ _ASSERTE(gc.throwable != NULL);
+
+#ifdef FEATURE_CORECLR
+ // On CoreCLR, managed code execution happens in non-default AppDomains and all threads have an AD transition
+ // at their base from DefaultDomain to the target Domain before they start executing managed code. Thus, when
+ // an exception goes unhandled in a non-default AppDomain on a reverse pinvoke thread, the original exception details are copied
+ // to the Message property of System.CrossAppDomainMarshaledException instance at the AD transition boundary,
+ // and the exception is then thrown in the calling AppDomain. This is done since CoreCLR does not support marshaling of
+ // objects across AppDomains.
+ //
+ // On SL, exceptions dont go unhandled to the OS. But in WLC, they can. Thus, when the scenario above happens for WLC,
+ // the OS will invoke CoreCLR's registered UEF and reach here to write the stacktrace from the
+ // exception object (which will be a CrossAppDomainMarshaledException instance) to the event log. At this point,
+ // we shall be in DefaultDomain.
+ //
+ // However, the original exception details are in the Message property of CrossAppDomainMarshaledException. So, we should
+ // look that up and if it is not empty, add those details to the EventReporter so that they get written to the
+ // event log as well.
+ //
+ // We can also be here when in non-DefaultDomain an exception goes unhandled on a pure managed thread. In such a case,
+ // we wont have CrossAppDomainMarshaledException instance but the original exception object that will be used to extract
+ // the stack trace from.
+ if (pThread->GetDomain()->IsDefaultDomain())
+ {
+ if (IsExceptionOfType(kCrossAppDomainMarshaledException, &(gc.throwable)))
+ {
+ // This is a CrossAppDomainMarshaledException instance - check if it has
+ // something for us in the Message property.
+ gc.originalExceptionMessage = ((EXCEPTIONREF)gc.throwable)->GetMessage();
+ if (gc.originalExceptionMessage != NULL)
+ {
+ // Ok - so, we have details about the original exception. Add them to the
+ // EventReporter object so that they get written to the event log.
+ reporter.AddDescription(gc.originalExceptionMessage->GetBuffer());
+
+ LOG((LF_EH, LL_INFO100, "DoReportForUnhandledException - Added original exception details to EventReporter from CrossAppDomainMarshaledException object.\n"));
+ }
+ else
+ {
+ LOG((LF_EH, LL_INFO100, "DoReportForUnhandledException - Original exception details not present in CrossAppDomainMarshaledException object.\n"));
+ }
+ }
+ }
+ else
+#endif // FEATURE_CORECLR
+ {
+ // Add the details of the exception object to the event reporter.
+ TypeString::AppendType(s, TypeHandle(gc.throwable->GetMethodTable()), TypeString::FormatNamespace|TypeString::FormatFullInst);
+ reporter.AddDescription(s);
+ reporter.BeginStackTrace();
+ if (IsException(gc.throwable->GetMethodTable()))
+ {
+ gc.remoteStackTraceString = ((EXCEPTIONREF)gc.throwable)->GetRemoteStackTraceString();
+ if (gc.remoteStackTraceString != NULL && gc.remoteStackTraceString->GetStringLength())
+ {
+ SString remoteStackTrace;
+ gc.remoteStackTraceString->GetSString(remoteStackTrace);
+
+ // If source info is contained, trim it
+ StripFileInfoFromStackTrace(remoteStackTrace);
+
+ reporter.AddStackTrace(remoteStackTrace);
+ }
+ }
+ LogCallstackForEventReporterWorker(reporter);
+ }
+
+ GCPROTECT_END();
+ }
+ else
+ {
+ InlineSString<80> ssErrorFormat;
+ if(!ssErrorFormat.LoadResource(CCompRC::Optional, IDS_ER_UNHANDLEDEXCEPTIONINFO))
+ ssErrorFormat.Set(W("exception code %1, exception address %2"));
+ SmallStackSString exceptionCodeString;
+ exceptionCodeString.Printf(W("%x"), pExceptionInfo->ExceptionRecord->ExceptionCode);
+ SmallStackSString addressString;
+ addressString.Printf(W("%p"), (UINT_PTR)pExceptionInfo->ExceptionRecord->ExceptionAddress);
+ s.FormatMessage(FORMAT_MESSAGE_FROM_STRING, (LPCWSTR)ssErrorFormat, 0, 0, exceptionCodeString, addressString);
+ reporter.AddDescription(s);
+ if (pThread)
+ {
+ LogCallstackForEventReporter(reporter);
+ }
+ }
+ }
+ EX_CATCH
+ {
+ // We are reporting an exception. If we throw while working on this, it is not fatal.
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ reporter.Report();
+ }
+}
+
+#if defined(FEATURE_CORECLR)
+// This function will return the product version of CoreCLR
+// instance we are executing in.
+void EventReporter::GetCoreCLRInstanceProductVersion(DWORD * pdwMajor, DWORD * pdwMinor, DWORD * pdwBuild, DWORD * pdwRevision)
+{
+ STATIC_CONTRACT_THROWS;
+
+ // Get the instance of the runtime
+ HMODULE hModRuntime = GetCLRModule();
+ _ASSERTE(hModRuntime != NULL);
+
+ // Get the path to the runtime
+ WCHAR runtimePath[MAX_PATH];
+ DWORD ret = WszGetModuleFileName(hModRuntime, runtimePath, NumItems(runtimePath));
+ if (ret != 0)
+ {
+ // Got the path - get the file version from the path
+ SString path;
+ path.Clear();
+ path.Append(runtimePath);
+ DWORD dwVersionMS = 0;
+ DWORD dwVersionLS = 0;
+ GetProductVersionNumber(path, &dwVersionMS, &dwVersionLS);
+
+ // Get the Major.Minor.Build.Revision details from the returned values
+ *pdwMajor = HIWORD(dwVersionMS);
+ *pdwMinor = LOWORD(dwVersionMS);
+ *pdwBuild = HIWORD(dwVersionLS);
+ *pdwRevision = LOWORD(dwVersionLS);
+ LOG((LF_CORDB, LL_INFO100, "GetCoreCLRInstanceVersion: Got CoreCLR version: %lu.%lu.%lu.%lu\n",
+ *pdwMajor, *pdwMinor, *pdwBuild, *pdwRevision));
+ }
+ else
+ {
+ // Failed to get the path
+ LOG((LF_CORDB, LL_INFO100, "GetCoreCLRInstanceVersion: Unable to get CoreCLR version.\n"));
+ }
+}
+#endif // FEATURE_CORECLR
diff --git a/src/vm/eventreporter.h b/src/vm/eventreporter.h
new file mode 100644
index 0000000000..4dfcec6f7d
--- /dev/null
+++ b/src/vm/eventreporter.h
@@ -0,0 +1,78 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+//*****************************************************************************
+// EventReporter.h:
+// A utility to log an entry in event log.
+//*****************************************************************************
+
+
+#ifndef _eventreporter_h_
+#define _eventreporter_h_
+
+#include "contract.h"
+#include "sstring.h"
+
+// Maximum size for a string in event log entry
+#define MAX_SIZE_EVENTLOG_ENTRY_STRING 0x8000 // decimal 32768
+
+// The (approx.) maximum size that Vista appears to allow. Post discussion with the OS event log team,
+// it has been identified that Vista has taken a breaking change in ReportEventW API implementation
+// without getting it publicly documented.
+//
+// An event entry comprises of string to be written and event header information. Prior to Vista,
+// 32K length strings were allowed and event header size was over it. Vista onwards, the total
+// permissible length of the string and event header became 32K, resulting in strings becoming
+// shorter in length. Hence, the change in size.
+#define MAX_SIZE_EVENTLOG_ENTRY_STRING_WINVISTA 0x7C62 // decimal 31842
+
+class EventReporter
+{
+public:
+ enum EventReporterType
+ {
+ ERT_UnhandledException,
+ ERT_ManagedFailFast,
+ ERT_UnmanagedFailFast,
+ ERT_StackOverflow,
+ ERT_CodeContractFailed,
+ };
+private:
+ EventReporterType m_eventType;
+ // We use 2048 which is large enough for most task. This allows us to avoid
+ // unnecessary memory allocation.
+ InlineSString<2048> m_Description;
+
+ // Flag to indicate if the buffer is full
+ BOOL fBufferFull;
+
+#ifdef FEATURE_CORECLR
+ static void GetCoreCLRInstanceProductVersion(DWORD * pdwMajor, DWORD * pdwMinor, DWORD * pdwBuild, DWORD * pdwRevision);
+#endif // FEATURE_CORECLR
+
+public:
+ // Construct
+ EventReporter(EventReporterType type);
+ // Add extra info into description part of the log
+ void AddDescription(__in WCHAR *pString);
+ void AddDescription(SString& s);
+ // Start callstack record
+ void BeginStackTrace();
+ // Add one frame to the callstack part
+ void AddStackTrace(SString& s);
+ // Report to the EventLog
+ void Report();
+};
+
+// return TRUE if we need to log in EventLog.
+BOOL ShouldLogInEventLog();
+// Record managed callstack in EventReporter.
+void LogCallstackForEventReporter(EventReporter& reporter);
+// Generate a report in EventLog for unhandled exception for both managed and unmanaged.
+void DoReportForUnhandledException(PEXCEPTION_POINTERS pExceptionInfo);
+
+#endif // _eventreporter_h_
diff --git a/src/vm/eventstore.cpp b/src/vm/eventstore.cpp
new file mode 100644
index 0000000000..6568ebd784
--- /dev/null
+++ b/src/vm/eventstore.cpp
@@ -0,0 +1,220 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "common.h"
+#include "eventstore.hpp"
+#include "synch.h"
+
+// A class to maintain a pool of available events.
+
+const int EventStoreLength = 8;
+class EventStore
+{
+public:
+ // Note: No constructors/destructors - global instance
+
+ void Init()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ m_EventStoreCrst.Init(CrstEventStore, CRST_UNSAFE_ANYMODE);
+ m_Store = NULL;
+ }
+
+ void Destroy()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE (g_fEEShutDown);
+
+ m_EventStoreCrst.Destroy();
+
+ EventStoreElem *walk;
+ EventStoreElem *next;
+
+ walk = m_Store;
+ while (walk) {
+ next = walk->next;
+ delete (walk);
+ walk = next;
+ }
+ }
+
+ void StoreHandleForEvent (CLREvent* handle)
+ {
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ _ASSERTE (handle);
+ CrstHolder ch(&m_EventStoreCrst);
+ if (m_Store == NULL) {
+ m_Store = new EventStoreElem ();
+ }
+ EventStoreElem *walk;
+#ifdef _DEBUG
+ // See if we have some leakage.
+ LONG count = 0;
+ walk = m_Store;
+ while (walk) {
+ count += walk->AvailableEventCount();
+ walk = walk->next;
+ }
+ // The number of events stored in the pool should be small.
+ _ASSERTE (count <= ThreadStore::s_pThreadStore->ThreadCountInEE() * 2 + 10);
+#endif
+ walk = m_Store;
+ while (walk) {
+ if (walk->StoreHandleForEvent (handle) )
+ return;
+ if (walk->next == NULL) {
+ break;
+ }
+ walk = walk->next;
+ }
+ if (walk != NULL)
+ {
+ walk->next = new EventStoreElem ();
+ walk->next->hArray[0] = handle;
+ }
+ }
+
+ CLREvent* GetHandleForEvent ()
+ {
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ CLREvent* handle;
+ CrstHolder ch(&m_EventStoreCrst);
+ EventStoreElem *walk = m_Store;
+ while (walk) {
+ handle = walk->GetHandleForEvent();
+ if (handle != NULL) {
+ return handle;
+ }
+ walk = walk->next;
+ }
+ handle = new CLREvent;
+ _ASSERTE (handle != NULL);
+ handle->CreateManualEvent(TRUE);
+ return handle;
+ }
+
+private:
+ struct EventStoreElem
+ {
+ CLREvent *hArray[EventStoreLength];
+ EventStoreElem *next;
+
+ EventStoreElem ()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ next = NULL;
+ for (int i = 0; i < EventStoreLength; i ++) {
+ hArray[i] = NULL;
+ }
+ }
+
+ ~EventStoreElem ()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ for (int i = 0; i < EventStoreLength; i++) {
+ if (hArray[i]) {
+ delete hArray[i];
+ hArray[i] = NULL;
+ }
+ }
+ }
+
+ // Store a handle in the current EventStoreElem. Return TRUE if succeessful.
+ // Return FALSE if failed due to no free slot.
+ BOOL StoreHandleForEvent (CLREvent* handle)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ int i;
+ for (i = 0; i < EventStoreLength; i++) {
+ if (hArray[i] == NULL) {
+ hArray[i] = handle;
+ return TRUE;
+ }
+ }
+ return FALSE;
+ }
+
+ // Get a handle from the current EventStoreElem.
+ CLREvent* GetHandleForEvent ()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ int i;
+ for (i = 0; i < EventStoreLength; i++) {
+ if (hArray[i] != NULL) {
+ CLREvent* handle = hArray[i];
+ hArray[i] = NULL;
+ return handle;
+ }
+ }
+
+ return NULL;
+ }
+
+#ifdef _DEBUG
+ LONG AvailableEventCount ()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ LONG count = 0;
+ for (int i = 0; i < EventStoreLength; i++) {
+ if (hArray[i] != NULL) {
+ count ++;
+ }
+ }
+ return count;
+ }
+#endif
+ };
+
+ EventStoreElem *m_Store;
+
+ // Critical section for adding and removing event used for Object::Wait
+ CrstStatic m_EventStoreCrst;
+};
+
+static EventStore s_EventStore;
+
+CLREvent* GetEventFromEventStore()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return s_EventStore.GetHandleForEvent();
+}
+
+void StoreEventToEventStore(CLREvent* hEvent)
+{
+ WRAPPER_NO_CONTRACT;
+
+ s_EventStore.StoreHandleForEvent(hEvent);
+}
+
+void InitEventStore()
+{
+ WRAPPER_NO_CONTRACT;
+
+ s_EventStore.Init();
+}
+
+void TerminateEventStore()
+{
+ WRAPPER_NO_CONTRACT;
+
+ s_EventStore.Destroy();
+}
diff --git a/src/vm/eventstore.hpp b/src/vm/eventstore.hpp
new file mode 100644
index 0000000000..60d4a655c6
--- /dev/null
+++ b/src/vm/eventstore.hpp
@@ -0,0 +1,33 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#ifndef __EventStore_hpp
+#define __EventStore_hpp
+
+#include "synch.h"
+
+class SyncBlock;
+struct SLink;
+struct WaitEventLink;
+
+typedef DPTR(WaitEventLink) PTR_WaitEventLink;
+
+// Used inside Thread class to chain all events that a thread is waiting for by Object::Wait
+struct WaitEventLink {
+ SyncBlock *m_WaitSB;
+ CLREvent *m_EventWait;
+ PTR_Thread m_Thread; // Owner of this WaitEventLink.
+ PTR_WaitEventLink m_Next; // Chain to the next waited SyncBlock.
+ SLink m_LinkSB; // Chain to the next thread waiting on the same SyncBlock.
+ DWORD m_RefCount; // How many times Object::Wait is called on the same SyncBlock.
+};
+
+CLREvent* GetEventFromEventStore();
+void StoreEventToEventStore(CLREvent* hEvent);
+void InitEventStore();
+void TerminateEventStore();
+
+#endif
diff --git a/src/vm/eventtrace.cpp b/src/vm/eventtrace.cpp
new file mode 100644
index 0000000000..faf8a60c2f
--- /dev/null
+++ b/src/vm/eventtrace.cpp
@@ -0,0 +1,6827 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: eventtrace.cpp
+// Abstract: This module implements Event Tracing support
+//
+
+//
+
+//
+// ============================================================================
+
+#include "common.h"
+
+#ifdef FEATURE_REDHAWK
+
+#include "commontypes.h"
+#include "daccess.h"
+#include "debugmacrosext.h"
+#include "palredhawkcommon.h"
+#include "gcrhenv.h"
+#define Win32EventWrite PalEtwEventWrite
+#define InterlockedExchange64 PalInterlockedExchange64
+
+#else // !FEATURE_REDHAWK
+
+#include "eventtrace.h"
+#include "winbase.h"
+#include "contract.h"
+#include "ex.h"
+#include "dbginterface.h"
+#include "finalizerthread.h"
+
+#define Win32EventWrite EventWrite
+
+#ifdef FEATURE_COMINTEROP
+#include "comcallablewrapper.h"
+#include "runtimecallablewrapper.h"
+#endif
+
+// Flags used to store some runtime information for Event Tracing
+BOOL g_fEEOtherStartup=FALSE;
+BOOL g_fEEComActivatedStartup=FALSE;
+GUID g_EEComObjectGuid=GUID_NULL;
+
+BOOL g_fEEHostedStartup = FALSE;
+
+#endif // FEATURE_REDHAWK
+
+#include "eventtracepriv.h"
+
+#ifdef FEATURE_REDHAWK
+volatile LONGLONG ETW::GCLog::s_l64LastClientSequenceNumber = 0;
+#else // FEATURE_REDHAWK
+Volatile<LONGLONG> ETW::GCLog::s_l64LastClientSequenceNumber = 0;
+#endif // FEATURE_REDHAWK
+
+#ifndef FEATURE_REDHAWK
+
+//---------------------------------------------------------------------------------------
+// Helper macros to determine which version of the Method events to use
+//
+// The V2 versions of these events include the ReJITID, the V1 versions do not.
+// Historically, when we version events, we'd just stop sending the old version and only
+// send the new one. However, now that we have xperf in heavy use internally and soon to be
+// used externally, we need to be a bit careful. In particular, we'd like to allow
+// current xperf to continue working without knowledge of ReJITIDs, and allow future
+// xperf to decode symbols in ReJITted functions. Thus,
+// * During a first-JIT, only issue the existing V1 MethodLoad, etc. events (NOT v0,
+// NOT v2). This event does not include a ReJITID, and can thus continue to be
+// parsed by older decoders.
+// * During a rejit, only issue the new V2 events (NOT v0 or v1), which will include a
+// nonzero ReJITID. Thus, your unique key for a method extent would be MethodID +
+// ReJITID + extent (hot/cold). These events will be ignored by older decoders
+// (including current xperf) because of the version number, but xperf will be
+// updated to decode these in the future.
+
+#define FireEtwMethodLoadVerbose_V1_or_V2(ullMethodIdentifier, ullModuleID, ullMethodStartAddress, ulMethodSize, ulMethodToken, ulMethodFlags, szDtraceOutput1, szDtraceOutput2, szDtraceOutput3, clrInstanceID, rejitID) \
+{ \
+ if (rejitID == 0) \
+ { FireEtwMethodLoadVerbose_V1(ullMethodIdentifier, ullModuleID, ullMethodStartAddress, ulMethodSize, ulMethodToken, ulMethodFlags, szDtraceOutput1, szDtraceOutput2, szDtraceOutput3, clrInstanceID); } \
+ else \
+ { FireEtwMethodLoadVerbose_V2(ullMethodIdentifier, ullModuleID, ullMethodStartAddress, ulMethodSize, ulMethodToken, ulMethodFlags, szDtraceOutput1, szDtraceOutput2, szDtraceOutput3, clrInstanceID, rejitID); } \
+}
+
+#define FireEtwMethodLoad_V1_or_V2(ullMethodIdentifier, ullModuleID, ullMethodStartAddress, ulMethodSize, ulMethodToken, ulMethodFlags, clrInstanceID, rejitID) \
+{ \
+ if (rejitID == 0) \
+ { FireEtwMethodLoad_V1(ullMethodIdentifier, ullModuleID, ullMethodStartAddress, ulMethodSize, ulMethodToken, ulMethodFlags, clrInstanceID); } \
+ else \
+ { FireEtwMethodLoad_V2(ullMethodIdentifier, ullModuleID, ullMethodStartAddress, ulMethodSize, ulMethodToken, ulMethodFlags, clrInstanceID, rejitID); } \
+}
+
+#define FireEtwMethodUnloadVerbose_V1_or_V2(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, szDtraceOutput1, szDtraceOutput2, szDtraceOutput3, clrInstanceID, rejitID) \
+{ \
+ if (rejitID == 0) \
+ { FireEtwMethodUnloadVerbose_V1(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, szDtraceOutput1, szDtraceOutput2, szDtraceOutput3, clrInstanceID); } \
+ else \
+ { FireEtwMethodUnloadVerbose_V2(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, szDtraceOutput1, szDtraceOutput2, szDtraceOutput3, clrInstanceID, rejitID); } \
+}
+
+#define FireEtwMethodUnload_V1_or_V2(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, clrInstanceID, rejitID) \
+{ \
+ if (rejitID == 0) \
+ { FireEtwMethodUnload_V1(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, clrInstanceID); } \
+ else \
+ { FireEtwMethodUnload_V2(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, clrInstanceID, rejitID); } \
+}
+
+#define FireEtwMethodDCStartVerbose_V1_or_V2(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, szDtraceOutput1, szDtraceOutput2, szDtraceOutput3, clrInstanceID, rejitID) \
+{ \
+ if (rejitID == 0) \
+ { FireEtwMethodDCStartVerbose_V1(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, szDtraceOutput1, szDtraceOutput2, szDtraceOutput3, clrInstanceID); } \
+ else \
+ { FireEtwMethodDCStartVerbose_V2(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, szDtraceOutput1, szDtraceOutput2, szDtraceOutput3, clrInstanceID, rejitID); } \
+}
+
+#define FireEtwMethodDCStart_V1_or_V2(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, clrInstanceID, rejitID) \
+{ \
+ if (rejitID == 0) \
+ { FireEtwMethodDCStart_V1(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, clrInstanceID); } \
+ else \
+ { FireEtwMethodDCStart_V2(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, clrInstanceID, rejitID); } \
+}
+
+#define FireEtwMethodDCEndVerbose_V1_or_V2(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, szDtraceOutput1, szDtraceOutput2, szDtraceOutput3, clrInstanceID, rejitID) \
+{ \
+ if (rejitID == 0) \
+ { FireEtwMethodDCEndVerbose_V1(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, szDtraceOutput1, szDtraceOutput2, szDtraceOutput3, clrInstanceID); } \
+ else \
+ { FireEtwMethodDCEndVerbose_V2(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, szDtraceOutput1, szDtraceOutput2, szDtraceOutput3, clrInstanceID, rejitID); } \
+}
+
+#define FireEtwMethodDCEnd_V1_or_V2(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, clrInstanceID, rejitID) \
+{ \
+ if (rejitID == 0) \
+ { FireEtwMethodDCEnd_V1(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, clrInstanceID); } \
+ else \
+ { FireEtwMethodDCEnd_V2(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, clrInstanceID, rejitID); } \
+}
+
+// Module load / unload events:
+
+#define FireEtwModuleLoad_V1_or_V2(ullModuleId, ullAssemblyId, ulFlags, ulReservedFlags, szDtraceOutput1, szDtraceOutput2, clrInstanceId, ManagedPdbSignature, ManagedPdbAge, ManagedPdbPath, NativePdbSignature, NativePdbAge, NativePdbPath) \
+ FireEtwModuleLoad_V2(ullModuleId, ullAssemblyId, ulFlags, ulReservedFlags, szDtraceOutput1, szDtraceOutput2, clrInstanceId, ManagedPdbSignature, ManagedPdbAge, ManagedPdbPath, NativePdbSignature, NativePdbAge, NativePdbPath)
+#define FireEtwModuleUnload_V1_or_V2(ullModuleId, ullAssemblyId, ulFlags, ulReservedFlags, szDtraceOutput1, szDtraceOutput2, clrInstanceId, ManagedPdbSignature, ManagedPdbAge, ManagedPdbPath, NativePdbSignature, NativePdbAge, NativePdbPath) \
+ FireEtwModuleUnload_V2(ullModuleId, ullAssemblyId, ulFlags, ulReservedFlags, szDtraceOutput1, szDtraceOutput2, clrInstanceId, ManagedPdbSignature, ManagedPdbAge, ManagedPdbPath, NativePdbSignature, NativePdbAge, NativePdbPath)
+#define FireEtwModuleDCStart_V1_or_V2(ullModuleId, ullAssemblyId, ulFlags, ulReservedFlags, szDtraceOutput1, szDtraceOutput2, clrInstanceId, ManagedPdbSignature, ManagedPdbAge, ManagedPdbPath, NativePdbSignature, NativePdbAge, NativePdbPath) \
+ FireEtwModuleDCStart_V2(ullModuleId, ullAssemblyId, ulFlags, ulReservedFlags, szDtraceOutput1, szDtraceOutput2, clrInstanceId, ManagedPdbSignature, ManagedPdbAge, ManagedPdbPath, NativePdbSignature, NativePdbAge, NativePdbPath)
+#define FireEtwModuleDCEnd_V1_or_V2(ullModuleId, ullAssemblyId, ulFlags, ulReservedFlags, szDtraceOutput1, szDtraceOutput2, clrInstanceId, ManagedPdbSignature, ManagedPdbAge, ManagedPdbPath, NativePdbSignature, NativePdbAge, NativePdbPath) \
+ FireEtwModuleDCEnd_V2(ullModuleId, ullAssemblyId, ulFlags, ulReservedFlags, szDtraceOutput1, szDtraceOutput2, clrInstanceId, ManagedPdbSignature, ManagedPdbAge, ManagedPdbPath, NativePdbSignature, NativePdbAge, NativePdbPath)
+
+
+
+//---------------------------------------------------------------------------------------
+//
+// Rather than checking the NGEN keyword on the runtime provider directly, use this
+// helper that checks that the NGEN runtime provider keyword is enabled AND the
+// OverrideAndSuppressNGenEvents keyword on the runtime provider is NOT enabled.
+//
+// OverrideAndSuppressNGenEvents allows controllers to set the expensive NGEN keyword for
+// older runtimes (< 4.0) where NGEN PDB info is NOT available, while suppressing those
+// expensive events on newer runtimes (>= 4.5) where NGEN PDB info IS available. Note
+// that 4.0 has NGEN PDBS but unfortunately not the OverrideAndSuppressNGenEvents
+// keyword, b/c NGEN PDBs were made publicly only after 4.0 shipped. So tools that need
+// to consume both <4.0 and 4.0 events would neeed to enable the expensive NGEN events to
+// deal properly with 3.5, even though those events aren't necessary on 4.0.
+//
+// On CoreCLR, this keyword is a no-op, because coregen PDBs don't exist (and thus we'll
+// need the NGEN rundown to still work on Silverligth).
+//
+// Return Value:
+// nonzero iff NGenKeyword is enabled on the runtime provider and
+// OverrideAndSuppressNGenEventsKeyword is not enabled on the runtime provider.
+//
+
+BOOL IsRuntimeNgenKeywordEnabledAndNotSuppressed()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return
+ (
+ ETW_TRACING_CATEGORY_ENABLED(
+ MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_NGEN_KEYWORD)
+ && ! ( ETW_TRACING_CATEGORY_ENABLED(
+ MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_OVERRIDEANDSUPPRESSNGENEVENTS_KEYWORD) )
+ );
+}
+
+// Same as above, but for the rundown provider
+BOOL IsRundownNgenKeywordEnabledAndNotSuppressed()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return
+ (
+ ETW_TRACING_CATEGORY_ENABLED(
+ MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_RUNDOWNNGEN_KEYWORD)
+ && ! ( ETW_TRACING_CATEGORY_ENABLED(
+ MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_RUNDOWNOVERRIDEANDSUPPRESSNGENEVENTS_KEYWORD) )
+ );
+}
+
+/*******************************************************/
+/* Fast assembly function to get the topmost EBP frame */
+/*******************************************************/
+#if defined(_TARGET_X86_)
+extern "C"
+{
+ CallStackFrame* GetEbp()
+ {
+ CallStackFrame *frame=NULL;
+ __asm
+ {
+ mov frame, ebp
+ }
+ return frame;
+ }
+}
+#endif //_TARGET_X86_
+
+/*************************************/
+/* Function to append a frame to an existing stack */
+/*************************************/
+void ETW::SamplingLog::Append(SIZE_T currentFrame)
+{
+ LIMITED_METHOD_CONTRACT;
+ if(m_FrameCount < (ETW::SamplingLog::s_MaxStackSize-1) &&
+ currentFrame != 0)
+ {
+ m_EBPStack[m_FrameCount] = currentFrame;
+ m_FrameCount++;
+ }
+};
+
+/********************************************************/
+/* Function to get the callstack on the current thread */
+/********************************************************/
+ETW::SamplingLog::EtwStackWalkStatus ETW::SamplingLog::GetCurrentThreadsCallStack(UINT32 *frameCount, PVOID **Stack)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // The stack walk performed below can cause allocations (thus entering the host). But
+ // this is acceptable, since we're not supporting the use of SQL/F1 profiling and
+ // full-blown ETW CLR stacks (which would be redundant).
+ PERMANENT_CONTRACT_VIOLATION(HostViolation, ReasonUnsupportedForSQLF1Profiling);
+
+ m_FrameCount = 0;
+ ETW::SamplingLog::EtwStackWalkStatus stackwalkStatus = SaveCurrentStack();
+
+ _ASSERTE(m_FrameCount < ETW::SamplingLog::s_MaxStackSize);
+
+ // this not really needed, but let's do it
+ // because we use the framecount while dumping the stack event
+ for(int i=m_FrameCount; i<ETW::SamplingLog::s_MaxStackSize; i++)
+ {
+ m_EBPStack[i] = 0;
+ }
+ // This is for consumers to work correctly because the number of
+ // frames in the manifest file is specified to be 2
+ if(m_FrameCount < 2)
+ m_FrameCount = 2;
+
+ *frameCount = m_FrameCount;
+ *Stack = (PVOID *)m_EBPStack;
+ return stackwalkStatus;
+};
+
+/*************************************/
+/* Function to save the stack on the current thread */
+/*************************************/
+ETW::SamplingLog::EtwStackWalkStatus ETW::SamplingLog::SaveCurrentStack(int skipTopNFrames)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (!IsGarbageCollectorFullyInitialized())
+ {
+ // If the GC isn't ready yet, then there won't be any interesting
+ // managed code on the stack to walk. Plus, the stack walk itself may
+ // hit problems (e.g., when calling into the code manager) if it's run
+ // too early during startup.
+ return ETW::SamplingLog::UnInitialized;
+ }
+#ifndef DACCESS_COMPILE
+#ifdef _TARGET_AMD64_
+ if (RtlVirtualUnwind_Unsafe == NULL)
+ {
+ // We haven't even set up the RtlVirtualUnwind function pointer yet,
+ // so it's too early to try stack walking.
+ return ETW::SamplingLog::UnInitialized;
+ }
+#endif // _TARGET_AMD64_
+ Thread *pThread = GetThread();
+ if (pThread == NULL)
+ {
+ return ETW::SamplingLog::UnInitialized;
+ }
+ // The thread should not have a hijack set up or we can't walk the stack.
+ if (pThread->m_State & Thread::TS_Hijacked) {
+ return ETW::SamplingLog::UnInitialized;
+ }
+
+ if (pThread->IsEtwStackWalkInProgress())
+ {
+ return ETW::SamplingLog::InProgress;
+ }
+ pThread->MarkEtwStackWalkInProgress();
+ EX_TRY
+ {
+#ifdef _TARGET_X86_
+ CallStackFrame *currentEBP = GetEbp();
+ CallStackFrame *lastEBP = NULL;
+
+ // The EBP stack walk below is meant to be extremely fast. It does not attempt to protect
+ // against cases of stack corruption. *BUT* it does need to validate a "sane" EBP chain.
+
+ // Ensure the EBP in the starting frame is "reasonable" (i.e. above the address of a local)
+ if ((SIZE_T) currentEBP > (SIZE_T)&currentEBP)
+ {
+ while(currentEBP)
+ {
+ lastEBP = currentEBP;
+ currentEBP = currentEBP->m_Next;
+
+ // Check for stack upper limit; we don't check the lower limit on each iteration
+ // (we did it at the top) and each subsequent value in the loop is larger than
+ // the previous (see the check "currentEBP < lastEBP" below)
+ if((SIZE_T)currentEBP > (SIZE_T)Thread::GetStackUpperBound())
+ {
+ break;
+ }
+
+ // If we have a too small address, we are probably bad
+ if((SIZE_T)currentEBP < (SIZE_T)0x10000)
+ break;
+
+ if((SIZE_T)currentEBP < (SIZE_T)lastEBP)
+ {
+ break;
+ }
+
+ // Skip the top N frames
+ if(skipTopNFrames) {
+ skipTopNFrames--;
+ continue;
+ }
+
+ // Save the Return Address for symbol decoding
+ Append(lastEBP->m_ReturnAddress);
+ }
+ }
+#else
+ CONTEXT ctx;
+ ClrCaptureContext(&ctx);
+ UINT_PTR ControlPc = 0;
+ UINT_PTR CurrentSP = 0, PrevSP = 0;
+
+ while(1)
+ {
+ // Unwind to the caller
+ ControlPc = Thread::VirtualUnwindCallFrame(&ctx);
+
+ // This is to take care of recursion
+ CurrentSP = (UINT_PTR)GetSP(&ctx);
+
+ // when to break from this loop
+ if ( ControlPc == 0 || ( PrevSP == CurrentSP ) )
+ {
+ break;
+ }
+
+ // Skip the top N frames
+ if ( skipTopNFrames ) {
+ skipTopNFrames--;
+ continue;
+ }
+
+ // Add the stack frame to the list
+ Append(ControlPc);
+
+ PrevSP = CurrentSP;
+ }
+#endif //_TARGET_X86_
+ } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
+ pThread->MarkEtwStackWalkCompleted();
+#endif //!DACCESS_COMPILE
+
+ return ETW::SamplingLog::Completed;
+}
+
+#endif // !FEATURE_REDHAWK
+
+/****************************************************************************/
+/* Methods that are called from the runtime */
+/****************************************************************************/
+
+/****************************************************************************/
+/* Methods for rundown events */
+/****************************************************************************/
+
+/***************************************************************************/
+/* This function should be called from the event tracing callback routine
+ when the private CLR provider is enabled */
+/***************************************************************************/
+
+#ifndef FEATURE_REDHAWK
+
+VOID ETW::GCLog::GCSettingsEvent()
+{
+ if (GCHeap::IsGCHeapInitialized())
+ {
+ if (ETW_TRACING_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context,
+ GCSettings))
+ {
+ ETW::GCLog::ETW_GC_INFO Info;
+
+ Info.GCSettings.ServerGC = GCHeap::IsServerHeap ();
+ Info.GCSettings.SegmentSize = GCHeap::GetGCHeap()->GetValidSegmentSize (FALSE);
+ Info.GCSettings.LargeObjectSegmentSize = GCHeap::GetGCHeap()->GetValidSegmentSize (TRUE);
+ FireEtwGCSettings_V1(Info.GCSettings.SegmentSize, Info.GCSettings.LargeObjectSegmentSize, Info.GCSettings.ServerGC, GetClrInstanceId());
+ }
+ GCHeap::GetGCHeap()->TraceGCSegments();
+ }
+};
+
+#endif // !FEATURE_REDHAWK
+
+
+//---------------------------------------------------------------------------------------
+// Code for sending GC heap object events is generally the same for both FEATURE_REDHAWK
+// and !FEATURE_REDHAWK builds
+//---------------------------------------------------------------------------------------
+
+bool s_forcedGCInProgress = false;
+class ForcedGCHolder
+{
+public:
+ ForcedGCHolder() { LIMITED_METHOD_CONTRACT; s_forcedGCInProgress = true; }
+ ~ForcedGCHolder() { LIMITED_METHOD_CONTRACT; s_forcedGCInProgress = false; }
+};
+
+BOOL ETW::GCLog::ShouldWalkStaticsAndCOMForEtw()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return s_forcedGCInProgress &&
+ ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_GCHEAPDUMP_KEYWORD);
+}
+
+// Simple helpers called by the GC to decide whether it needs to do a walk of heap
+// objects and / or roots.
+
+BOOL ETW::GCLog::ShouldWalkHeapObjectsForEtw()
+{
+ LIMITED_METHOD_CONTRACT;
+ return s_forcedGCInProgress &&
+ ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_GCHEAPDUMP_KEYWORD);
+}
+
+BOOL ETW::GCLog::ShouldWalkHeapRootsForEtw()
+{
+ LIMITED_METHOD_CONTRACT;
+ return s_forcedGCInProgress &&
+ ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_GCHEAPDUMP_KEYWORD);
+}
+
+BOOL ETW::GCLog::ShouldTrackMovementForEtw()
+{
+ LIMITED_METHOD_CONTRACT;
+ return ETW_TRACING_CATEGORY_ENABLED(
+ MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_GCHEAPSURVIVALANDMOVEMENT_KEYWORD);
+}
+
+// Batches the list of moved/surviving references for the GCBulkMovedObjectRanges /
+// GCBulkSurvivingObjectRanges events
+struct EtwGcMovementContext
+{
+public:
+ // An instance of EtwGcMovementContext is dynamically allocated and stored
+ // inside of MovedReferenceContextForEtwAndProfapi, which in turn is dynamically
+ // allocated and pointed to by a profiling_context pointer created by the GC on the stack.
+ // This is used to batch and send GCBulkSurvivingObjectRanges events and
+ // GCBulkMovedObjectRanges events. This method is passed a pointer to
+ // MovedReferenceContextForEtwAndProfapi::pctxEtw; if non-NULL it gets returned;
+ // else, a new EtwGcMovementContext is allocated, stored in that pointer, and
+ // then returned. Callers should test for NULL, which can be returned if out of
+ // memory
+ static EtwGcMovementContext * GetOrCreateInGCContext(EtwGcMovementContext ** ppContext)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(ppContext != NULL);
+
+ EtwGcMovementContext * pContext = *ppContext;
+ if (pContext == NULL)
+ {
+ pContext = new (nothrow) EtwGcMovementContext;
+ *ppContext = pContext;
+ }
+ return pContext;
+ }
+
+ EtwGcMovementContext() :
+ iCurBulkSurvivingObjectRanges(0),
+ iCurBulkMovedObjectRanges(0)
+ {
+ LIMITED_METHOD_CONTRACT;
+ Clear();
+ }
+
+ // Resets structure for reuse on construction, and after each flush.
+ // (Intentionally leave iCurBulk* as is, since they persist across flushes within a GC.)
+ void Clear()
+ {
+ LIMITED_METHOD_CONTRACT;
+ cBulkSurvivingObjectRanges = 0;
+ cBulkMovedObjectRanges = 0;
+ ZeroMemory(rgGCBulkSurvivingObjectRanges, sizeof(rgGCBulkSurvivingObjectRanges));
+ ZeroMemory(rgGCBulkMovedObjectRanges, sizeof(rgGCBulkMovedObjectRanges));
+ }
+
+ //---------------------------------------------------------------------------------------
+ // GCBulkSurvivingObjectRanges
+ //---------------------------------------------------------------------------------------
+
+ // Sequence number for each GCBulkSurvivingObjectRanges event
+ UINT iCurBulkSurvivingObjectRanges;
+
+ // Number of surviving object ranges currently filled out in rgGCBulkSurvivingObjectRanges array
+ UINT cBulkSurvivingObjectRanges;
+
+ // Struct array containing the primary data for each GCBulkSurvivingObjectRanges
+ // event. Fix the size so the total event stays well below the 64K limit (leaving
+ // lots of room for non-struct fields that come before the values data)
+ EventStructGCBulkSurvivingObjectRangesValue rgGCBulkSurvivingObjectRanges[
+ (cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkSurvivingObjectRangesValue)];
+
+ //---------------------------------------------------------------------------------------
+ // GCBulkMovedObjectRanges
+ //---------------------------------------------------------------------------------------
+
+ // Sequence number for each GCBulkMovedObjectRanges event
+ UINT iCurBulkMovedObjectRanges;
+
+ // Number of Moved object ranges currently filled out in rgGCBulkMovedObjectRanges array
+ UINT cBulkMovedObjectRanges;
+
+ // Struct array containing the primary data for each GCBulkMovedObjectRanges
+ // event. Fix the size so the total event stays well below the 64K limit (leaving
+ // lots of room for non-struct fields that come before the values data)
+ EventStructGCBulkMovedObjectRangesValue rgGCBulkMovedObjectRanges[
+ (cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkMovedObjectRangesValue)];
+};
+
+// Contains above struct for ETW, plus extra info (opaque to us) used by the profiling
+// API to track its own information.
+struct MovedReferenceContextForEtwAndProfapi
+{
+ // An instance of MovedReferenceContextForEtwAndProfapi is dynamically allocated and
+ // pointed to by a profiling_context pointer created by the GC on the stack. This is used to
+ // batch and send GCBulkSurvivingObjectRanges events and GCBulkMovedObjectRanges
+ // events and the corresponding callbacks for profapi profilers. This method is
+ // passed a pointer to a MovedReferenceContextForEtwAndProfapi; if non-NULL it gets
+ // returned; else, a new MovedReferenceContextForEtwAndProfapi is allocated, stored
+ // in that pointer, and then returned. Callers should test for NULL, which can be
+ // returned if out of memory
+ static MovedReferenceContextForEtwAndProfapi * CreateInGCContext(LPVOID pvContext)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(pvContext != NULL);
+
+ MovedReferenceContextForEtwAndProfapi * pContext = *(MovedReferenceContextForEtwAndProfapi **) pvContext;
+
+ // Shouldn't be called if the context was already created. Perhaps someone made
+ // one too many BeginMovedReferences calls, or didn't have an EndMovedReferences
+ // in between?
+ _ASSERTE(pContext == NULL);
+
+ pContext = new (nothrow) MovedReferenceContextForEtwAndProfapi;
+ *(MovedReferenceContextForEtwAndProfapi **) pvContext = pContext;
+
+ return pContext;
+ }
+
+
+ MovedReferenceContextForEtwAndProfapi() :
+ pctxProfAPI(NULL),
+ pctxEtw(NULL)
+
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ LPVOID pctxProfAPI;
+ EtwGcMovementContext * pctxEtw;
+};
+
+
+//---------------------------------------------------------------------------------------
+//
+// Called by the GC for each moved or surviving reference that it encounters. This
+// batches the info into our context's buffer, and flushes that buffer to ETW as it fills
+// up.
+//
+// Arguments:
+// * pbMemBlockStart - Start of moved/surviving block
+// * pbMemBlockEnd - Next pointer after end of moved/surviving block
+// * cbRelocDistance - How far did the block move? (0 for non-compacted / surviving
+// references; negative if moved to earlier addresses)
+// * profilingContext - Where our context is stored
+// * fCompacting - Is this a compacting GC? Used to decide whether to send the moved
+// or surviving event
+//
+
+// static
+void ETW::GCLog::MovedReference(
+ BYTE * pbMemBlockStart,
+ BYTE * pbMemBlockEnd,
+ ptrdiff_t cbRelocDistance,
+ size_t profilingContext,
+ BOOL fCompacting,
+ BOOL fAllowProfApiNotification /* = TRUE */)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK; // EEToProfInterfaceImpl::AllocateMovedReferencesData takes lock
+ }
+ CONTRACTL_END;
+
+ MovedReferenceContextForEtwAndProfapi * pCtxForEtwAndProfapi =
+ (MovedReferenceContextForEtwAndProfapi *) profilingContext;
+ if (pCtxForEtwAndProfapi == NULL)
+ {
+ _ASSERTE(!"MovedReference() encountered a NULL profilingContext");
+ return;
+ }
+
+#ifdef PROFILING_SUPPORTED
+ // ProfAPI
+ if (fAllowProfApiNotification)
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackGC());
+ g_profControlBlock.pProfInterface->MovedReference(pbMemBlockStart,
+ pbMemBlockEnd,
+ cbRelocDistance,
+ &(pCtxForEtwAndProfapi->pctxProfAPI),
+ fCompacting);
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ // ETW
+
+ if (!ShouldTrackMovementForEtw())
+ return;
+
+ EtwGcMovementContext * pContext =
+ EtwGcMovementContext::GetOrCreateInGCContext(&pCtxForEtwAndProfapi->pctxEtw);
+ if (pContext == NULL)
+ return;
+
+ if (fCompacting)
+ {
+ // Moved references
+
+ _ASSERTE(pContext->cBulkMovedObjectRanges < _countof(pContext->rgGCBulkMovedObjectRanges));
+ EventStructGCBulkMovedObjectRangesValue * pValue =
+ &pContext->rgGCBulkMovedObjectRanges[pContext->cBulkMovedObjectRanges];
+ pValue->OldRangeBase = pbMemBlockStart;
+ pValue->NewRangeBase = pbMemBlockStart + cbRelocDistance;
+ pValue->RangeLength = pbMemBlockEnd - pbMemBlockStart;
+ pContext->cBulkMovedObjectRanges++;
+
+ // If buffer is now full, empty it into ETW
+ if (pContext->cBulkMovedObjectRanges == _countof(pContext->rgGCBulkMovedObjectRanges))
+ {
+ FireEtwGCBulkMovedObjectRanges(
+ pContext->iCurBulkMovedObjectRanges,
+ pContext->cBulkMovedObjectRanges,
+ GetClrInstanceId(),
+ sizeof(pContext->rgGCBulkMovedObjectRanges[0]),
+ &pContext->rgGCBulkMovedObjectRanges[0]);
+
+ pContext->iCurBulkMovedObjectRanges++;
+ pContext->Clear();
+ }
+ }
+ else
+ {
+ // Surviving references
+
+ _ASSERTE(pContext->cBulkSurvivingObjectRanges < _countof(pContext->rgGCBulkSurvivingObjectRanges));
+ EventStructGCBulkSurvivingObjectRangesValue * pValue =
+ &pContext->rgGCBulkSurvivingObjectRanges[pContext->cBulkSurvivingObjectRanges];
+ pValue->RangeBase = pbMemBlockStart;
+ pValue->RangeLength = pbMemBlockEnd - pbMemBlockStart;
+ pContext->cBulkSurvivingObjectRanges++;
+
+ // If buffer is now full, empty it into ETW
+ if (pContext->cBulkSurvivingObjectRanges == _countof(pContext->rgGCBulkSurvivingObjectRanges))
+ {
+ FireEtwGCBulkSurvivingObjectRanges(
+ pContext->iCurBulkSurvivingObjectRanges,
+ pContext->cBulkSurvivingObjectRanges,
+ GetClrInstanceId(),
+ sizeof(pContext->rgGCBulkSurvivingObjectRanges[0]),
+ &pContext->rgGCBulkSurvivingObjectRanges[0]);
+
+ pContext->iCurBulkSurvivingObjectRanges++;
+ pContext->Clear();
+ }
+ }
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Called by the GC just before it begins enumerating plugs. Gives us a chance to
+// allocate our context structure, to allow us to batch plugs before firing events
+// for them
+//
+// Arguments:
+// * pProfilingContext - Points to location on stack (in GC function) where we can
+// store a pointer to the context we allocate
+//
+
+// static
+VOID ETW::GCLog::BeginMovedReferences(size_t * pProfilingContext)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ MovedReferenceContextForEtwAndProfapi::CreateInGCContext(LPVOID(pProfilingContext));
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Called by the GC at the end of a heap walk to give us a place to flush any remaining
+// buffers of data to ETW or the profapi profiler
+//
+// Arguments:
+// profilingContext - Our context we built up during the heap walk
+//
+
+// static
+VOID ETW::GCLog::EndMovedReferences(size_t profilingContext, BOOL fAllowProfApiNotification /* = TRUE */)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ MovedReferenceContextForEtwAndProfapi * pCtxForEtwAndProfapi = (MovedReferenceContextForEtwAndProfapi *) profilingContext;
+ if (pCtxForEtwAndProfapi == NULL)
+ {
+ _ASSERTE(!"EndMovedReferences() encountered a NULL profilingContext");
+ return;
+ }
+
+#ifdef PROFILING_SUPPORTED
+ // ProfAPI
+ if (fAllowProfApiNotification)
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackGC());
+ g_profControlBlock.pProfInterface->EndMovedReferences(&(pCtxForEtwAndProfapi->pctxProfAPI));
+ END_PIN_PROFILER();
+ }
+#endif //PROFILING_SUPPORTED
+
+ // ETW
+
+ if (!ShouldTrackMovementForEtw())
+ return;
+
+ // If context isn't already set up for us, then we haven't been collecting any data
+ // for ETW events.
+ EtwGcMovementContext * pContext = pCtxForEtwAndProfapi->pctxEtw;
+ if (pContext == NULL)
+ return;
+
+ // Flush any remaining moved or surviving range data
+
+ if (pContext->cBulkMovedObjectRanges > 0)
+ {
+ FireEtwGCBulkMovedObjectRanges(
+ pContext->iCurBulkMovedObjectRanges,
+ pContext->cBulkMovedObjectRanges,
+ GetClrInstanceId(),
+ sizeof(pContext->rgGCBulkMovedObjectRanges[0]),
+ &pContext->rgGCBulkMovedObjectRanges[0]);
+ }
+
+ if (pContext->cBulkSurvivingObjectRanges > 0)
+ {
+ FireEtwGCBulkSurvivingObjectRanges(
+ pContext->iCurBulkSurvivingObjectRanges,
+ pContext->cBulkSurvivingObjectRanges,
+ GetClrInstanceId(),
+ sizeof(pContext->rgGCBulkSurvivingObjectRanges[0]),
+ &pContext->rgGCBulkSurvivingObjectRanges[0]);
+ }
+
+ pCtxForEtwAndProfapi->pctxEtw = NULL;
+ delete pContext;
+}
+
+/***************************************************************************/
+/* This implements the public runtime provider's GCHeapCollectKeyword. It
+ performs a full, gen-2, blocking GC. */
+/***************************************************************************/
+VOID ETW::GCLog::ForceGC(LONGLONG l64ClientSequenceNumber)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_REDHAWK
+ if (!IsGarbageCollectorFullyInitialized())
+ return;
+#endif // FEATURE_REDHAWK
+
+ InterlockedExchange64(&s_l64LastClientSequenceNumber, l64ClientSequenceNumber);
+
+ ForceGCForDiagnostics();
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Helper to fire the GCStart event. Figures out which version of GCStart to fire, and
+// includes the client sequence number, if available. Also logs the generation range
+// events.
+//
+// Arguments:
+// pGcInfo - ETW_GC_INFO containing details from GC about this collection
+//
+
+// static
+VOID ETW::GCLog::FireGcStartAndGenerationRanges(ETW_GC_INFO * pGcInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (ETW_TRACING_CATEGORY_ENABLED(
+ MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_GC_KEYWORD))
+ {
+ // If the controller specified a client sequence number for us to log with this
+ // GCStart, then retrieve it
+ LONGLONG l64ClientSequenceNumberToLog = 0;
+ if ((s_l64LastClientSequenceNumber != 0) &&
+ (pGcInfo->GCStart.Depth == GCHeap::GetMaxGeneration()) &&
+ (pGcInfo->GCStart.Reason == ETW_GC_INFO::GC_INDUCED))
+ {
+ l64ClientSequenceNumberToLog = InterlockedExchange64(&s_l64LastClientSequenceNumber, 0);
+ }
+
+ FireEtwGCStart_V2(pGcInfo->GCStart.Count, pGcInfo->GCStart.Depth, pGcInfo->GCStart.Reason, pGcInfo->GCStart.Type, GetClrInstanceId(), l64ClientSequenceNumberToLog);
+
+ // Fire an event per range per generation
+ GCHeap *hp = GCHeap::GetGCHeap();
+ hp->DescrGenerationsToProfiler(FireSingleGenerationRangeEvent, NULL /* context */);
+ }
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Helper to fire the GCEnd event and the generation range events.
+//
+// Arguments:
+// Count - (matching Count from corresponding GCStart event0
+// Depth - (matching Depth from corresponding GCStart event0
+//
+//
+
+// static
+VOID ETW::GCLog::FireGcEndAndGenerationRanges(ULONG Count, ULONG Depth)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (ETW_TRACING_CATEGORY_ENABLED(
+ MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_GC_KEYWORD))
+ {
+ // Fire an event per range per generation
+ GCHeap *hp = GCHeap::GetGCHeap();
+ hp->DescrGenerationsToProfiler(FireSingleGenerationRangeEvent, NULL /* context */);
+
+ // GCEnd
+ FireEtwGCEnd_V1(Count, Depth, GetClrInstanceId());
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Callback made by GC when we call GCHeap::DescrGenerationsToProfiler(). This is
+// called once per range per generation, and results in a single ETW event per range per
+// generation.
+//
+// Arguments:
+// context - unused
+// generation - Generation number
+// rangeStart - Where does this range start?
+// rangeEnd - How large is the used portion of this range?
+// rangeEndReserved - How large is the reserved portion of this range?
+//
+
+// static
+VOID ETW::GCLog::FireSingleGenerationRangeEvent(
+ void * /* context */,
+ int generation,
+ BYTE * rangeStart,
+ BYTE * rangeEnd,
+ BYTE * rangeEndReserved)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY; // can be called even on GC threads
+ PRECONDITION(0 <= generation && generation <= 3);
+ PRECONDITION(CheckPointer(rangeStart));
+ PRECONDITION(CheckPointer(rangeEnd));
+ PRECONDITION(CheckPointer(rangeEndReserved));
+ }
+ CONTRACT_END;
+
+ FireEtwGCGenerationRange((BYTE) generation, rangeStart, rangeEnd - rangeStart, rangeEndReserved - rangeStart, GetClrInstanceId());
+
+ RETURN;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Contains code common to profapi and ETW scenarios where the profiler wants to force
+// the CLR to perform a GC. The important work here is to create a managed thread for
+// the current thread BEFORE the GC begins. On both ETW and profapi threads, there may
+// not yet be a managed thread object. But some scenarios require a managed thread
+// object be present (notably if we need to call into Jupiter during the GC).
+//
+// Return Value:
+// HRESULT indicating success or failure
+//
+// Assumptions:
+// Caller should ensure that the EE has fully started up and that the GC heap is
+// initialized enough to actually perform a GC
+//
+
+// static
+HRESULT ETW::GCLog::ForceGCForDiagnostics()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_FAIL;
+
+#ifndef FEATURE_REDHAWK
+ // Caller should ensure we're past startup.
+ _ASSERTE(IsGarbageCollectorFullyInitialized());
+
+ // In immersive apps the GarbageCollect() call below will call into Jupiter,
+ // which will call back into the runtime to track references. This call
+ // chain would cause a Thread object to be created for this thread while code
+ // higher on the stack owns the ThreadStoreLock. This will lead to asserts
+ // since the ThreadStoreLock is non-reentrant. To avoid this we'll create
+ // the Thread object here instead.
+ if (GetThreadNULLOk() == NULL)
+ {
+ HRESULT hr = E_FAIL;
+ SetupThreadNoThrow(&hr);
+ if (FAILED(hr))
+ return hr;
+ }
+
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ EX_TRY
+ {
+ // Need to switch to cooperative mode as the thread will access managed
+ // references (through Jupiter callbacks).
+ GCX_COOP();
+#endif // FEATURE_REDHAWK
+
+ ForcedGCHolder forcedGCHolder;
+
+ hr = GCHeap::GetGCHeap()->GarbageCollect(
+ -1, // all generations should be collected
+ FALSE, // low_memory_p
+ collection_blocking);
+
+#ifndef FEATURE_REDHAWK
+ }
+ EX_CATCH { }
+ EX_END_CATCH(RethrowCorruptingExceptions);
+#endif // FEATURE_REDHAWK
+
+ return hr;
+}
+
+
+
+
+
+
+//---------------------------------------------------------------------------------------
+// WalkStaticsAndCOMForETW walks both CCW/RCW objects and static variables.
+//---------------------------------------------------------------------------------------
+
+VOID ETW::GCLog::WalkStaticsAndCOMForETW()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ EX_TRY
+ {
+ BulkTypeEventLogger typeLogger;
+
+ // Walk RCWs/CCWs
+ BulkComLogger comLogger(&typeLogger);
+ comLogger.LogAllComObjects();
+
+ // Walk static variables
+ BulkStaticsLogger staticLogger(&typeLogger);
+ staticLogger.LogAllStatics();
+
+ // Ensure all loggers have written all events, fire type logger last to batch events
+ // (FireBulkComEvent or FireBulkStaticsEvent may queue up additional types).
+ comLogger.FireBulkComEvent();
+ staticLogger.FireBulkStaticsEvent();
+ typeLogger.FireBulkTypeEvent();
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+
+//---------------------------------------------------------------------------------------
+// BulkStaticsLogger: Batches up and logs static variable roots
+//---------------------------------------------------------------------------------------
+
+BulkComLogger::BulkComLogger(BulkTypeEventLogger *typeLogger)
+ : m_currRcw(0), m_currCcw(0), m_typeLogger(typeLogger), m_etwRcwData(0), m_etwCcwData(0), m_enumResult(0)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_etwRcwData = new EventRCWEntry[kMaxRcwCount];
+ m_etwCcwData = new EventCCWEntry[kMaxCcwCount];
+}
+
+BulkComLogger::~BulkComLogger()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ FireBulkComEvent();
+
+ if (m_etwRcwData)
+ delete [] m_etwRcwData;
+
+ if (m_etwCcwData)
+ delete [] m_etwCcwData;
+
+ if (m_enumResult)
+ {
+ CCWEnumerationEntry *curr = m_enumResult;
+ while (curr)
+ {
+ CCWEnumerationEntry *next = curr->Next;
+ delete curr;
+ curr = next;
+ }
+ }
+}
+
+void BulkComLogger::FireBulkComEvent()
+{
+ WRAPPER_NO_CONTRACT;
+
+ FlushRcw();
+ FlushCcw();
+}
+
+void BulkComLogger::WriteRcw(RCW *pRcw, Object *obj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(pRcw != NULL);
+ PRECONDITION(obj != NULL);
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_currRcw < kMaxRcwCount);
+
+#ifdef FEATURE_COMINTEROP
+ EventRCWEntry &rcw = m_etwRcwData[m_currRcw];
+ rcw.ObjectID = (ULONGLONG)obj;
+ rcw.TypeID = (ULONGLONG)obj->GetTypeHandle().AsTAddr();
+ rcw.IUnk = (ULONGLONG)pRcw->GetIUnknown_NoAddRef();
+ rcw.VTable = (ULONGLONG)pRcw->GetVTablePtr();
+ rcw.RefCount = pRcw->GetRefCount();
+ rcw.Flags = 0;
+
+ if (++m_currRcw >= kMaxRcwCount)
+ FlushRcw();
+#endif
+}
+
+void BulkComLogger::FlushRcw()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_currRcw <= kMaxRcwCount);
+
+ if (m_currRcw == 0)
+ return;
+
+ if (m_typeLogger)
+ {
+ for (int i = 0; i < m_currRcw; ++i)
+ ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(m_typeLogger, m_etwRcwData[i].TypeID, ETW::TypeSystemLog::kTypeLogBehaviorTakeLockAndLogIfFirstTime);
+ }
+
+ unsigned short instance = GetClrInstanceId();
+
+ EVENT_DATA_DESCRIPTOR eventData[3];
+ EventDataDescCreate(&eventData[0], &m_currRcw, sizeof(const unsigned int));
+ EventDataDescCreate(&eventData[1], &instance, sizeof(const unsigned short));
+ EventDataDescCreate(&eventData[2], m_etwRcwData, sizeof(EventRCWEntry) * m_currRcw);
+
+ ULONG result = EventWrite(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkRCW, _countof(eventData), eventData);
+ _ASSERTE(result == ERROR_SUCCESS);
+
+ m_currRcw = 0;
+}
+
+void BulkComLogger::WriteCcw(ComCallWrapper *pCcw, Object **handle, Object *obj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(handle != NULL);
+ PRECONDITION(obj != NULL);
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_currCcw < kMaxCcwCount);
+
+#ifdef FEATURE_COMINTEROP
+ IUnknown *iUnk = NULL;
+ int refCount = 0;
+ ULONG jupiterRefCount = 0;
+ ULONG flags = 0;
+
+ if (pCcw)
+ {
+ iUnk = pCcw->GetOuter();
+ if (iUnk == NULL)
+ iUnk = pCcw->GetBasicIP(true);
+
+ refCount = pCcw->GetRefCount();
+ jupiterRefCount = pCcw->GetJupiterRefCount();
+
+ if (pCcw->IsWrapperActive())
+ flags |= EventCCWEntry::Strong;
+
+ if (pCcw->IsPegged())
+ flags |= EventCCWEntry::Pegged;
+ }
+
+ EventCCWEntry &ccw = m_etwCcwData[m_currCcw++];
+ ccw.RootID = (ULONGLONG)handle;
+ ccw.ObjectID = (ULONGLONG)obj;
+ ccw.TypeID = (ULONGLONG)obj->GetTypeHandle().AsTAddr();
+ ccw.IUnk = (ULONGLONG)iUnk;
+ ccw.RefCount = refCount;
+ ccw.JupiterRefCount = jupiterRefCount;
+ ccw.Flags = flags;
+
+ if (m_currCcw >= kMaxCcwCount)
+ FlushCcw();
+#endif
+}
+
+void BulkComLogger::FlushCcw()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_currCcw <= kMaxCcwCount);
+
+ if (m_currCcw == 0)
+ return;
+
+ if (m_typeLogger)
+ {
+ for (int i = 0; i < m_currCcw; ++i)
+ ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(m_typeLogger, m_etwCcwData[i].TypeID, ETW::TypeSystemLog::kTypeLogBehaviorTakeLockAndLogIfFirstTime);
+ }
+
+ unsigned short instance = GetClrInstanceId();
+
+ EVENT_DATA_DESCRIPTOR eventData[3];
+ EventDataDescCreate(&eventData[0], &m_currCcw, sizeof(const unsigned int));
+ EventDataDescCreate(&eventData[1], &instance, sizeof(const unsigned short));
+ EventDataDescCreate(&eventData[2], m_etwCcwData, sizeof(EventCCWEntry) * m_currCcw);
+
+ ULONG result = EventWrite(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkRootCCW, _countof(eventData), eventData);
+ _ASSERTE(result == ERROR_SUCCESS);
+
+ m_currCcw = 0;
+}
+
+void BulkComLogger::LogAllComObjects()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_COMINTEROP
+ SyncBlockCache *cache = SyncBlockCache::GetSyncBlockCache();
+ if (cache == NULL)
+ return;
+
+ int count = cache->GetTableEntryCount();
+ SyncTableEntry *table = SyncTableEntry::GetSyncTableEntry();
+
+ for (int i = 0; i < count; ++i)
+ {
+ SyncTableEntry &entry = table[i];
+ Object *obj = entry.m_Object.Load();
+ if (obj && entry.m_SyncBlock)
+ {
+ InteropSyncBlockInfo *interop = entry.m_SyncBlock->GetInteropInfoNoCreate();
+ if (interop)
+ {
+ RCW *rcw = interop->GetRawRCW();
+ if (rcw)
+ WriteRcw(rcw, obj);
+ }
+ }
+ }
+
+ // We need to do work in HandleWalkCallback which may trigger a GC. We cannot do this while
+ // enumerating the handle table. Instead, we will build a list of RefCount handles we found
+ // during the handle table enumeration first (m_enumResult) during this enumeration:
+ Ref_TraceRefCountHandles(BulkComLogger::HandleWalkCallback, LPARAM(this), 0);
+
+ // Now that we have all of the object handles, we will walk all of the handles and write the
+ // etw events.
+ for (CCWEnumerationEntry *curr = m_enumResult; curr; curr = curr->Next)
+ {
+ for (int i = 0; i < curr->Count; ++i)
+ {
+ Object **handle = curr->Handles[i];
+
+ Object *obj = NULL;
+ if (handle == NULL || (obj = *handle) == 0)
+ return;
+
+ ObjHeader *header = obj->GetHeader();
+ _ASSERTE(header != NULL);
+
+ // We can catch the refcount handle too early where we don't have a CCW, WriteCCW
+ // handles this case. We still report the refcount handle without the CCW data.
+ ComCallWrapper *ccw = NULL;
+
+ // Checking the index ensures that the syncblock is already created. The
+ // PassiveGetSyncBlock function does not check bounds, so we have to be sure
+ // the SyncBlock was already created.
+ int index = header->GetHeaderSyncBlockIndex();
+ if (index > 0)
+ {
+ SyncBlock *syncBlk = header->PassiveGetSyncBlock();
+ InteropSyncBlockInfo *interop = syncBlk->GetInteropInfoNoCreate();
+ if (interop)
+ ccw = interop->GetCCW();
+ }
+
+ WriteCcw(ccw, handle, obj);
+ }
+ }
+
+#endif
+
+}
+
+void BulkComLogger::HandleWalkCallback(Object **handle, LPARAM *pExtraInfo, LPARAM param1, LPARAM param2)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(param1 != NULL); // Should be the "this" pointer for BulkComLogger.
+ PRECONDITION(param2 == 0); // This is set by Ref_TraceRefCountHandles.
+ }
+ CONTRACTL_END;
+
+ // Simple sanity check to ensure the parameters are what we expect them to be.
+ _ASSERTE(param2 == 0);
+
+ if (handle != NULL)
+ ((BulkComLogger*)param1)->AddCcwHandle(handle);
+}
+
+
+
+// Used during CCW enumeration to keep track of all object handles which point to a CCW.
+void BulkComLogger::AddCcwHandle(Object **handle)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(handle != NULL);
+ }
+ CONTRACTL_END;
+
+ if (m_enumResult == NULL)
+ m_enumResult = new CCWEnumerationEntry;
+
+ CCWEnumerationEntry *curr = m_enumResult;
+ while (curr->Next)
+ curr = curr->Next;
+
+ if (curr->Count == _countof(curr->Handles))
+ {
+ curr->Next = new CCWEnumerationEntry;
+ curr = curr->Next;
+ }
+
+ curr->Handles[curr->Count++] = handle;
+}
+
+
+
+
+//---------------------------------------------------------------------------------------
+// BulkStaticsLogger: Batches up and logs static variable roots
+//---------------------------------------------------------------------------------------
+
+
+
+#include "domainfile.h"
+
+BulkStaticsLogger::BulkStaticsLogger(BulkTypeEventLogger *typeLogger)
+ : m_buffer(0), m_used(0), m_count(0), m_domain(0), m_typeLogger(typeLogger)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_buffer = new BYTE[kMaxBytesValues];
+}
+
+BulkStaticsLogger::~BulkStaticsLogger()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_used > 0)
+ FireBulkStaticsEvent();
+
+ if (m_buffer)
+ delete[] m_buffer;
+}
+
+void BulkStaticsLogger::FireBulkStaticsEvent()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_used <= 0 || m_count <= 0)
+ return;
+
+ _ASSERTE(m_domain != NULL);
+
+ unsigned short instance = GetClrInstanceId();
+ unsigned __int64 appDomain = (unsigned __int64)m_domain;
+
+ EVENT_DATA_DESCRIPTOR eventData[4];
+ EventDataDescCreate(&eventData[0], &m_count, sizeof(const unsigned int) );
+ EventDataDescCreate(&eventData[1], &appDomain, sizeof(unsigned __int64) );
+ EventDataDescCreate(&eventData[2], &instance, sizeof(const unsigned short) );
+ EventDataDescCreate(&eventData[3], m_buffer, m_used);
+
+ ULONG result = EventWrite(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkRootStaticVar, _countof(eventData), eventData);
+ _ASSERTE(result == ERROR_SUCCESS);
+
+ m_used = 0;
+ m_count = 0;
+}
+
+void BulkStaticsLogger::WriteEntry(AppDomain *domain, Object **address, Object *obj, FieldDesc *fieldDesc)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(domain != NULL);
+ PRECONDITION(address != NULL);
+ PRECONDITION(obj != NULL);
+ PRECONDITION(fieldDesc != NULL);
+ }
+ CONTRACTL_END;
+
+ // Each bulk statics event is for one AppDomain. If we are now inspecting a new domain,
+ // we need to flush the built up events now.
+ if (m_domain != domain)
+ {
+ if (m_domain != NULL)
+ FireBulkStaticsEvent();
+
+ m_domain = domain;
+ }
+
+ ULONGLONG th = (ULONGLONG)obj->GetTypeHandle().AsTAddr();
+ ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(m_typeLogger, th, ETW::TypeSystemLog::kTypeLogBehaviorTakeLockAndLogIfFirstTime);
+
+ // We should have at least 512 characters remaining in the buffer here.
+ int remaining = kMaxBytesValues - m_used;
+ _ASSERTE(kMaxBytesValues - m_used > 512);
+
+ int len = EventStaticEntry::WriteEntry(m_buffer + m_used, remaining, (ULONGLONG)address,
+ (ULONGLONG)obj, th, 0, fieldDesc);
+
+ // 512 bytes was not enough buffer? This shouldn't happen, so we'll skip emitting the
+ // event on error.
+ if (len > 0)
+ {
+ m_used += len;
+ m_count++;
+ }
+
+ // When we are close to running out of buffer, emit the event.
+ if (kMaxBytesValues - m_used < 512)
+ FireBulkStaticsEvent();
+}
+
+void BulkStaticsLogger::LogAllStatics()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Enumerate only active app domains (first parameter). We use the unsafe
+ // iterator here because this method is called under the threadstore lock
+ // and it's safe to use while the runtime is suspended.
+ UnsafeAppDomainIterator appIter(TRUE);
+ appIter.Init();
+ while (appIter.Next())
+ {
+ AppDomain *domain = appIter.GetDomain();
+
+ AppDomain::AssemblyIterator assemblyIter = domain->IterateAssembliesEx((AssemblyIterationFlags)(kIncludeLoaded|kIncludeExecution));
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+ while (assemblyIter.Next(pDomainAssembly.This()))
+ {
+ // Make sure the assembly is loaded.
+ if (!pDomainAssembly->IsLoaded())
+ continue;
+
+ CollectibleAssemblyHolder<Assembly *> pAssembly = pDomainAssembly->GetAssembly();
+ DomainModuleIterator modIter = pDomainAssembly->IterateModules(kModIterIncludeLoaded);
+
+ while (modIter.Next())
+ {
+ // Get the domain module from the module/appdomain pair.
+ Module *module = modIter.GetModule();
+ if (module == NULL)
+ continue;
+
+ DomainFile *domainFile = module->FindDomainFile(domain);
+ if (domainFile == NULL)
+ continue;
+
+ // Ensure the module has fully loaded.
+ if (!domainFile->IsActive())
+ continue;
+
+ DomainLocalModule *domainModule = module->GetDomainLocalModule(domain);
+ if (domainModule == NULL)
+ continue;
+
+ // Now iterate all types with
+ LookupMap<PTR_MethodTable>::Iterator mtIter = module->EnumerateTypeDefs();
+ while (mtIter.Next())
+ {
+ // I don't think mt can be null here, but the dac does a null check...
+ // IsFullyLoaded should be equivalent to 'GetLoadLevel() == CLASS_LOADED'
+ MethodTable *mt = mtIter.GetElement();
+ if (mt == NULL || !mt->IsFullyLoaded())
+ continue;
+
+ EEClass *cls = mt->GetClass();
+ _ASSERTE(cls != NULL);
+
+ if (cls->GetNumStaticFields() <= 0)
+ continue;
+
+ ApproxFieldDescIterator fieldIter(mt, ApproxFieldDescIterator::STATIC_FIELDS);
+ for (FieldDesc *field = fieldIter.Next(); field != NULL; field = fieldIter.Next())
+ {
+ // Don't want thread local or context local
+ _ASSERTE(field->IsStatic());
+ if (field->IsSpecialStatic() || field->IsEnCNew())
+ continue;
+
+ // Static valuetype values are boxed.
+ CorElementType fieldType = field->GetFieldType();
+ if (fieldType != ELEMENT_TYPE_CLASS && fieldType != ELEMENT_TYPE_VALUETYPE)
+ continue;
+
+ BYTE *base = field->GetBaseInDomainLocalModule(domainModule);
+ if (base == NULL)
+ continue;
+
+ Object **address = (Object**)field->GetStaticAddressHandle(base);
+ Object *obj = NULL;
+ if (address == NULL || ((obj = *address) == NULL))
+ continue;
+
+ WriteEntry(domain, address, *address, field);
+ } // foreach static field
+ }
+ } // foreach domain module
+ } // foreach domain assembly
+ } // foreach AppDomain
+} // BulkStaticsLogger::LogAllStatics
+
+
+
+//---------------------------------------------------------------------------------------
+// BulkTypeValue / BulkTypeEventLogger: These take care of batching up types so they can
+// be logged via ETW in bulk
+//---------------------------------------------------------------------------------------
+
+BulkTypeValue::BulkTypeValue() : cTypeParameters(0), rgTypeParameters()
+#ifdef FEATURE_REDHAWK
+, ullSingleTypeParameter(0)
+#else // FEATURE_REDHAWK
+, sName()
+#endif // FEATURE_REDHAWK
+{
+ LIMITED_METHOD_CONTRACT;
+ ZeroMemory(&fixedSizedData, sizeof(fixedSizedData));
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Clears a BulkTypeValue so it can be reused after the buffer is flushed to ETW
+//
+
+void BulkTypeValue::Clear()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ZeroMemory(&fixedSizedData, sizeof(fixedSizedData));
+ cTypeParameters = 0;
+#ifdef FEATURE_REDHAWK
+ ullSingleTypeParameter = 0;
+ rgTypeParameters.Release();
+#else // FEATURE_REDHAWK
+ sName.Clear();
+ rgTypeParameters.Clear();
+#endif // FEATURE_REDHAWK
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Fire an ETW event for all the types we batched so far, and then reset our state
+// so we can start batching new types at the beginning of the array.
+//
+//
+
+void BulkTypeEventLogger::FireBulkTypeEvent()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_nBulkTypeValueCount == 0)
+ {
+ // No types were batched up, so nothing to send
+ return;
+ }
+
+ // Normally, we'd use the MC-generated FireEtwBulkType for all this gunk, but
+ // it's insufficient as the bulk type event is too complex (arrays of structs of
+ // varying size). So we directly log the event via EventDataDescCreate and
+ // EventWrite
+
+ // We use one descriptor for the count + one for the ClrInstanceID + 4
+ // per batched type (to include fixed-size data + name + param count + param
+ // array). But the system limit of 128 descriptors per event kicks in way
+ // before the 64K event size limit, and we already limit our batch size
+ // (m_nBulkTypeValueCount) to stay within the 128 descriptor limit.
+ EVENT_DATA_DESCRIPTOR EventData[128];
+ UINT16 nClrInstanceID = GetClrInstanceId();
+
+ UINT iDesc = 0;
+
+ _ASSERTE(iDesc < _countof(EventData));
+ EventDataDescCreate(&EventData[iDesc++], &m_nBulkTypeValueCount, sizeof(m_nBulkTypeValueCount));
+
+ _ASSERTE(iDesc < _countof(EventData));
+ EventDataDescCreate(&EventData[iDesc++], &nClrInstanceID, sizeof(nClrInstanceID));
+
+ for (int iTypeData = 0; iTypeData < m_nBulkTypeValueCount; iTypeData++)
+ {
+ // Do fixed-size data as one bulk copy
+ _ASSERTE(iDesc < _countof(EventData));
+ EventDataDescCreate(
+ &EventData[iDesc++],
+ &(m_rgBulkTypeValues[iTypeData].fixedSizedData),
+ sizeof(m_rgBulkTypeValues[iTypeData].fixedSizedData));
+
+ // Do var-sized data individually per field
+
+ // Type name (nonexistent and thus empty on FEATURE_REDHAWK)
+ _ASSERTE(iDesc < _countof(EventData));
+#ifdef FEATURE_REDHAWK
+ EventDataDescCreate(&EventData[iDesc++], W(""), sizeof(WCHAR));
+#else // FEATURE_REDHAWK
+ LPCWSTR wszName = m_rgBulkTypeValues[iTypeData].sName.GetUnicode();
+ EventDataDescCreate(
+ &EventData[iDesc++],
+ (wszName == NULL) ? W("") : wszName,
+ (wszName == NULL) ? sizeof(WCHAR) : (m_rgBulkTypeValues[iTypeData].sName.GetCount() + 1) * sizeof(WCHAR));
+#endif // FEATURE_REDHAWK
+
+ // Type parameter count
+#ifndef FEATURE_REDHAWK
+ m_rgBulkTypeValues[iTypeData].cTypeParameters = m_rgBulkTypeValues[iTypeData].rgTypeParameters.GetCount();
+#endif // FEATURE_REDHAWK
+ _ASSERTE(iDesc < _countof(EventData));
+ EventDataDescCreate(
+ &EventData[iDesc++],
+ &(m_rgBulkTypeValues[iTypeData].cTypeParameters),
+ sizeof(m_rgBulkTypeValues[iTypeData].cTypeParameters));
+
+ // Type parameter array
+ if (m_rgBulkTypeValues[iTypeData].cTypeParameters > 0)
+ {
+ _ASSERTE(iDesc < _countof(EventData));
+ EventDataDescCreate(
+ &EventData[iDesc++],
+#ifdef FEATURE_REDHAWK
+ ((m_rgBulkTypeValues[iTypeData].cTypeParameters == 1) ?
+ &(m_rgBulkTypeValues[iTypeData].ullSingleTypeParameter) :
+ (ULONGLONG *) (m_rgBulkTypeValues[iTypeData].rgTypeParameters)),
+#else
+ m_rgBulkTypeValues[iTypeData].rgTypeParameters.GetElements(),
+#endif
+ sizeof(ULONGLONG) * m_rgBulkTypeValues[iTypeData].cTypeParameters);
+ }
+ }
+
+ Win32EventWrite(Microsoft_Windows_DotNETRuntimeHandle, &BulkType, iDesc, EventData);
+
+ // Reset state
+ m_nBulkTypeValueCount = 0;
+ m_nBulkTypeValueByteCount = 0;
+}
+
+#ifndef FEATURE_REDHAWK
+
+//---------------------------------------------------------------------------------------
+//
+// Batches a single type into the array, flushing the array to ETW if it fills up. Most
+// interaction with the type system (to analyze the type) is done here. This does not
+// recursively batch up any parameter types (for arrays or generics), but does add their
+// TypeHandles to the rgTypeParameters array. LogTypeAndParameters is responsible for
+// initiating any recursive calls to deal with type parameters.
+//
+// Arguments:
+// th - TypeHandle to batch
+//
+// Return Value:
+// Index into array of where this type got batched. -1 if there was a failure.
+//
+
+int BulkTypeEventLogger::LogSingleType(TypeHandle th)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK; // some of the type system stuff can take locks
+ }
+ CONTRACTL_END;
+
+ // If there's no room for another type, flush what we've got
+ if (m_nBulkTypeValueCount == _countof(m_rgBulkTypeValues))
+ {
+ FireBulkTypeEvent();
+ }
+
+ _ASSERTE(m_nBulkTypeValueCount < _countof(m_rgBulkTypeValues));
+
+ if (!th.IsTypeDesc() && th.GetMethodTable()->IsArray())
+ {
+ _ASSERTE(!"BulkTypeEventLogger::LogSingleType called with MethodTable array");
+ return -1;
+ }
+
+ BulkTypeValue * pVal = &m_rgBulkTypeValues[m_nBulkTypeValueCount];
+
+ // Clear out pVal before filling it out (array elements can get reused if there
+ // are enough types that we need to flush to multiple events). Clearing the
+ // contained SBuffer can throw, so deal with exceptions
+ BOOL fSucceeded = FALSE;
+ EX_TRY
+ {
+ pVal->Clear();
+ fSucceeded = TRUE;
+ }
+ EX_CATCH
+ {
+ fSucceeded = FALSE;
+ }
+ EX_END_CATCH(RethrowCorruptingExceptions);
+ if (!fSucceeded)
+ return -1;
+
+ pVal->fixedSizedData.TypeID = (ULONGLONG) th.AsTAddr();
+ pVal->fixedSizedData.ModuleID = (ULONGLONG) (TADDR) th.GetModule();
+ pVal->fixedSizedData.TypeNameID = (th.GetMethodTable() == NULL) ? 0 : th.GetCl();
+ pVal->fixedSizedData.Flags = 0;
+ pVal->fixedSizedData.CorElementType = (BYTE) th.GetInternalCorElementType();
+
+ if (th.IsArray())
+ {
+ // Normal typedesc array
+ pVal->fixedSizedData.Flags |= kEtwTypeFlagsArray;
+
+ // Fetch TypeHandle of array elements
+ fSucceeded = FALSE;
+ EX_TRY
+ {
+ pVal->rgTypeParameters.Append((ULONGLONG) th.AsArray()->GetArrayElementTypeHandle().AsTAddr());
+ fSucceeded = TRUE;
+ }
+ EX_CATCH
+ {
+ fSucceeded = FALSE;
+ }
+ EX_END_CATCH(RethrowCorruptingExceptions);
+ if (!fSucceeded)
+ return -1;
+ }
+ else if (th.IsTypeDesc())
+ {
+ // Non-array Typedescs
+ PTR_TypeDesc pTypeDesc = th.AsTypeDesc();
+ if (pTypeDesc->HasTypeParam())
+ {
+ fSucceeded = FALSE;
+ EX_TRY
+ {
+ pVal->rgTypeParameters.Append((ULONGLONG) pTypeDesc->GetTypeParam().AsTAddr());
+ fSucceeded = TRUE;
+ }
+ EX_CATCH
+ {
+ fSucceeded = FALSE;
+ }
+ EX_END_CATCH(RethrowCorruptingExceptions);
+ if (!fSucceeded)
+ return -1;
+ }
+ }
+ else
+ {
+ // Non-array MethodTable
+
+ PTR_MethodTable pMT = th.AsMethodTable();
+
+ // Make CorElementType more specific if this is a string MT
+ if (pMT->IsString())
+ {
+ pVal->fixedSizedData.CorElementType = ELEMENT_TYPE_STRING;
+ }
+ else if (pMT->IsObjectClass())
+ {
+ pVal->fixedSizedData.CorElementType = ELEMENT_TYPE_OBJECT;
+ }
+
+ // Generic arguments
+ DWORD cTypeParameters = pMT->GetNumGenericArgs();
+ if (cTypeParameters > 0)
+ {
+ Instantiation inst = pMT->GetInstantiation();
+ fSucceeded = FALSE;
+ EX_TRY
+ {
+ for (DWORD i=0; i < cTypeParameters; i++)
+ {
+ pVal->rgTypeParameters.Append((ULONGLONG) inst[i].AsTAddr());
+ }
+ fSucceeded = TRUE;
+ }
+ EX_CATCH
+ {
+ fSucceeded = FALSE;
+ }
+ EX_END_CATCH(RethrowCorruptingExceptions);
+ if (!fSucceeded)
+ return -1;
+ }
+
+ if (pMT->HasFinalizer())
+ {
+ pVal->fixedSizedData.Flags |= kEtwTypeFlagsFinalizable;
+ }
+ if (pMT->IsDelegate())
+ {
+ pVal->fixedSizedData.Flags |= kEtwTypeFlagsDelegate;
+ }
+ if (pMT->IsComObjectType())
+ {
+ pVal->fixedSizedData.Flags |= kEtwTypeFlagsExternallyImplementedCOMObject;
+ }
+ }
+
+ // If the profiler wants it, construct a name. Always normalize the string (even if
+ // type names are not requested) so that calls to sName.GetCount() can't throw
+ EX_TRY
+ {
+ if (ETW_TRACING_CATEGORY_ENABLED(
+ MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_GCHEAPANDTYPENAMES_KEYWORD))
+ {
+ th.GetName(pVal->sName);
+ }
+ pVal->sName.Normalize();
+ }
+ EX_CATCH
+ {
+ // If this failed, the name remains empty, which is ok; the event just
+ // won't have a name in it.
+ pVal->sName.Clear();
+ }
+ EX_END_CATCH(RethrowCorruptingExceptions);
+
+ // Now that we know the full size of this type's data, see if it fits in our
+ // batch or whether we need to flush
+
+ int cbVal = pVal->GetByteCountInEvent();
+ if (cbVal > kMaxBytesTypeValues)
+ {
+ // This type is apparently so huge, it's too big to squeeze into an event, even
+ // if it were the only type batched in the whole event. Bail
+ _ASSERTE(!"Type too big to log via ETW");
+ return -1;
+ }
+
+ if (m_nBulkTypeValueByteCount + cbVal > kMaxBytesTypeValues)
+ {
+ // Although this type fits into the array, its size is so big that the entire
+ // array can't be logged via ETW. So flush the array, and start over by
+ // calling ourselves--this refetches the type info and puts it at the
+ // beginning of the array. Since we know this type is small enough to be
+ // batched into an event on its own, this recursive call will not try to
+ // call itself again.
+ FireBulkTypeEvent();
+ return LogSingleType(th);
+ }
+
+ // The type fits into the batch, so update our state
+ m_nBulkTypeValueCount++;
+ m_nBulkTypeValueByteCount += cbVal;
+ return m_nBulkTypeValueCount - 1; // Index of type we just added
+}
+
+//---------------------------------------------------------------------------------------
+//
+// High-level method to batch a type and (recursively) its type parameters, flushing to
+// ETW as needed. This is called by (static)
+// ETW::TypeSystemLog::LogTypeAndParametersIfNecessary, which is what clients use to log
+// type events
+//
+// Arguments:
+// * thAsAddr - Type to batch
+// * typeLogBehavior - Reminder of whether the type system log lock is held
+// (useful if we need to recurively call back into TypeSystemLog), and whether
+// we even care to check if the type was already logged
+//
+
+void BulkTypeEventLogger::LogTypeAndParameters(ULONGLONG thAsAddr, ETW::TypeSystemLog::TypeLogBehavior typeLogBehavior)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK; // LogSingleType can take locks
+ }
+ CONTRACTL_END;
+
+ TypeHandle th = TypeHandle::FromTAddr((TADDR) thAsAddr);
+
+ // Batch up this type. This grabs useful info about the type, including any
+ // type parameters it may have, and sticks it in m_rgBulkTypeValues
+ int iBulkTypeEventData = LogSingleType(th);
+ if (iBulkTypeEventData == -1)
+ {
+ // There was a failure trying to log the type, so don't bother with its type
+ // parameters
+ return;
+ }
+
+ // Look at the type info we just batched, so we can get the type parameters
+ BulkTypeValue * pVal = &m_rgBulkTypeValues[iBulkTypeEventData];
+
+ // We're about to recursively call ourselves for the type parameters, so make a
+ // local copy of their type handles first (else, as we log them we could flush
+ // and clear out m_rgBulkTypeValues, thus trashing pVal)
+
+ StackSArray<ULONGLONG> rgTypeParameters;
+ DWORD cParams = pVal->rgTypeParameters.GetCount();
+
+ BOOL fSucceeded = FALSE;
+ EX_TRY
+ {
+ for (COUNT_T i = 0; i < cParams; i++)
+ {
+ rgTypeParameters.Append(pVal->rgTypeParameters[i]);
+ }
+ fSucceeded = TRUE;
+ }
+ EX_CATCH
+ {
+ fSucceeded = FALSE;
+ }
+ EX_END_CATCH(RethrowCorruptingExceptions);
+ if (!fSucceeded)
+ return;
+
+ // Before we recurse, adjust the special-cased type-log behavior that allows a
+ // top-level type to be logged without lookup, but still requires lookups to avoid
+ // dupes of type parameters
+ if (typeLogBehavior == ETW::TypeSystemLog::kTypeLogBehaviorAlwaysLogTopLevelType)
+ typeLogBehavior = ETW::TypeSystemLog::kTypeLogBehaviorTakeLockAndLogIfFirstTime;
+
+ // Recursively log any referenced parameter types
+ for (COUNT_T i=0; i < cParams; i++)
+ {
+ ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(this, rgTypeParameters[i], typeLogBehavior);
+ }
+}
+
+#endif // FEATURE_REDHAWK
+
+// Holds state that batches of roots, nodes, edges, and types as the GC walks the heap
+// at the end of a collection.
+class EtwGcHeapDumpContext
+{
+public:
+ // An instance of EtwGcHeapDumpContext is dynamically allocated and stored inside of
+ // ProfilingScanContext and ProfilerWalkHeapContext, which are context structures
+ // that the GC heap walker sends back to the callbacks. This method is passed a
+ // pointer to ProfilingScanContext::pvEtwContext or
+ // ProfilerWalkHeapContext::pvEtwContext; if non-NULL it gets returned; else, a new
+ // EtwGcHeapDumpContext is allocated, stored in that pointer, and then returned.
+ // Callers should test for NULL, which can be returned if out of memory
+ static EtwGcHeapDumpContext * GetOrCreateInGCContext(LPVOID * ppvEtwContext)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(ppvEtwContext != NULL);
+
+ EtwGcHeapDumpContext * pContext = (EtwGcHeapDumpContext *) *ppvEtwContext;
+ if (pContext == NULL)
+ {
+ pContext = new (nothrow) EtwGcHeapDumpContext;
+ *ppvEtwContext = pContext;
+ }
+ return pContext;
+ }
+
+ EtwGcHeapDumpContext() :
+ iCurBulkRootEdge(0),
+ iCurBulkRootConditionalWeakTableElementEdge(0),
+ iCurBulkNodeEvent(0),
+ iCurBulkEdgeEvent(0),
+ bulkTypeEventLogger()
+ {
+ LIMITED_METHOD_CONTRACT;
+ ClearRootEdges();
+ ClearRootConditionalWeakTableElementEdges();
+ ClearNodes();
+ ClearEdges();
+ }
+
+ // These helpers clear the individual buffers, for use after a flush and on
+ // construction. They intentionally leave the indices (iCur*) alone, since they
+ // persist across flushes within a GC
+
+ void ClearRootEdges()
+ {
+ LIMITED_METHOD_CONTRACT;
+ cGcBulkRootEdges = 0;
+ ZeroMemory(rgGcBulkRootEdges, sizeof(rgGcBulkRootEdges));
+ }
+
+ void ClearRootConditionalWeakTableElementEdges()
+ {
+ LIMITED_METHOD_CONTRACT;
+ cGCBulkRootConditionalWeakTableElementEdges = 0;
+ ZeroMemory(rgGCBulkRootConditionalWeakTableElementEdges, sizeof(rgGCBulkRootConditionalWeakTableElementEdges));
+ }
+
+ void ClearNodes()
+ {
+ LIMITED_METHOD_CONTRACT;
+ cGcBulkNodeValues = 0;
+ ZeroMemory(rgGcBulkNodeValues, sizeof(rgGcBulkNodeValues));
+ }
+
+ void ClearEdges()
+ {
+ LIMITED_METHOD_CONTRACT;
+ cGcBulkEdgeValues = 0;
+ ZeroMemory(rgGcBulkEdgeValues, sizeof(rgGcBulkEdgeValues));
+ }
+
+ //---------------------------------------------------------------------------------------
+ // GCBulkRootEdge
+ //
+ // A "root edge" is the relationship between a source "GCRootID" (i.e., stack
+ // variable, handle, static, etc.) and the target "RootedNodeAddress" (the managed
+ // object that gets rooted).
+ //
+ //---------------------------------------------------------------------------------------
+
+ // Sequence number for each GCBulkRootEdge event
+ UINT iCurBulkRootEdge;
+
+ // Number of root edges currently filled out in rgGcBulkRootEdges array
+ UINT cGcBulkRootEdges;
+
+ // Struct array containing the primary data for each GCBulkRootEdge event. Fix the size so
+ // the total event stays well below the 64K
+ // limit (leaving lots of room for non-struct fields that come before the root edge data)
+ EventStructGCBulkRootEdgeValue rgGcBulkRootEdges[(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkRootEdgeValue)];
+
+
+ //---------------------------------------------------------------------------------------
+ // GCBulkRootConditionalWeakTableElementEdge
+ //
+ // These describe dependent handles, which simulate an edge connecting a key NodeID
+ // to a value NodeID.
+ //
+ //---------------------------------------------------------------------------------------
+
+ // Sequence number for each GCBulkRootConditionalWeakTableElementEdge event
+ UINT iCurBulkRootConditionalWeakTableElementEdge;
+
+ // Number of root edges currently filled out in rgGCBulkRootConditionalWeakTableElementEdges array
+ UINT cGCBulkRootConditionalWeakTableElementEdges;
+
+ // Struct array containing the primary data for each GCBulkRootConditionalWeakTableElementEdge event. Fix the size so
+ // the total event stays well below the 64K
+ // limit (leaving lots of room for non-struct fields that come before the root edge data)
+ EventStructGCBulkRootConditionalWeakTableElementEdgeValue rgGCBulkRootConditionalWeakTableElementEdges
+ [(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkRootConditionalWeakTableElementEdgeValue)];
+
+ //---------------------------------------------------------------------------------------
+ // GCBulkNode
+ //
+ // A "node" is ANY managed object sitting on the heap, including RootedNodeAddresses
+ // as well as leaf nodes.
+ //
+ //---------------------------------------------------------------------------------------
+
+ // Sequence number for each GCBulkNode event
+ UINT iCurBulkNodeEvent;
+
+ // Number of nodes currently filled out in rgGcBulkNodeValues array
+ UINT cGcBulkNodeValues;
+
+ // Struct array containing the primary data for each GCBulkNode event. Fix the size so
+ // the total event stays well below the 64K
+ // limit (leaving lots of room for non-struct fields that come before the node data)
+ EventStructGCBulkNodeValue rgGcBulkNodeValues[(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkNodeValue)];
+
+ //---------------------------------------------------------------------------------------
+ // GCBulkEdge
+ //
+ // An "edge" is the relationship between a source node and its referenced target
+ // node. Edges are reported in bulk, separately from Nodes, but it is expected that
+ // the consumer read the Node and Edge streams together. One takes the first node
+ // from the Node stream, and then reads EdgeCount entries in the Edge stream, telling
+ // you all of that Node’s targets. Then, one takes the next node in the Node stream,
+ // and reads the next entries in the Edge stream (using this Node’s EdgeCount to
+ // determine how many) to find all of its targets. This continues on until the Node
+ // and Edge streams have been fully read.
+ //
+ // GCBulkRootEdges are not duplicated in the GCBulkEdge events. GCBulkEdge events
+ // begin at the GCBulkRootEdge.RootedNodeAddress and move forward.
+ //
+ //---------------------------------------------------------------------------------------
+
+ // Sequence number for each GCBulkEdge event
+ UINT iCurBulkEdgeEvent;
+
+ // Number of nodes currently filled out in rgGcBulkEdgeValues array
+ UINT cGcBulkEdgeValues;
+
+ // Struct array containing the primary data for each GCBulkEdge event. Fix the size so
+ // the total event stays well below the 64K
+ // limit (leaving lots of room for non-struct fields that come before the edge data)
+ EventStructGCBulkEdgeValue rgGcBulkEdgeValues[(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkEdgeValue)];
+
+
+ //---------------------------------------------------------------------------------------
+ // BulkType
+ //
+ // Types are a bit more complicated to batch up, since their data is of varying
+ // size. BulkTypeEventLogger takes care of the pesky details for us
+ //---------------------------------------------------------------------------------------
+
+ BulkTypeEventLogger bulkTypeEventLogger;
+};
+
+
+
+//---------------------------------------------------------------------------------------
+//
+// Called during a heap walk for each root reference encountered. Batches up the root in
+// the ETW context
+//
+// Arguments:
+// * pvHandle - If the root is a handle, this points to the handle
+// * pRootedNode - Points to object that is rooted
+// * pSecondaryNodeForDependentHandle - For dependent handles, this is the
+// secondary object
+// * fDependentHandle - nonzero iff this is for a dependent handle
+// * profilingScanContext - The shared profapi/etw context built up during the heap walk.
+// * dwGCFlags - Bitmask of "GC_"-style flags set by GC
+// * rootFlags - Bitmask of EtwGCRootFlags describing the root
+//
+
+// static
+VOID ETW::GCLog::RootReference(
+ LPVOID pvHandle,
+ Object * pRootedNode,
+ Object * pSecondaryNodeForDependentHandle,
+ BOOL fDependentHandle,
+ ProfilingScanContext * profilingScanContext,
+ DWORD dwGCFlags,
+ DWORD rootFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ EtwGcHeapDumpContext * pContext =
+ EtwGcHeapDumpContext::GetOrCreateInGCContext(&profilingScanContext->pvEtwContext);
+ if (pContext == NULL)
+ return;
+
+ // Determine root kind, root ID, and handle-specific flags
+ LPVOID pvRootID = NULL;
+ BYTE nRootKind = (BYTE) profilingScanContext->dwEtwRootKind;
+ switch (nRootKind)
+ {
+ case kEtwGCRootKindStack:
+#ifndef FEATURE_REDHAWK
+ pvRootID = profilingScanContext->pMD;
+#endif // !FEATURE_REDHAWK
+ break;
+
+ case kEtwGCRootKindHandle:
+ pvRootID = pvHandle;
+ break;
+
+ case kEtwGCRootKindFinalizer:
+ _ASSERTE(pvRootID == NULL);
+ break;
+
+ case kEtwGCRootKindOther:
+ default:
+ _ASSERTE(nRootKind == kEtwGCRootKindOther);
+ _ASSERTE(pvRootID == NULL);
+ break;
+ }
+
+ // Convert GC root flags to ETW root flags
+ if (dwGCFlags & GC_CALL_INTERIOR)
+ rootFlags |= kEtwGCRootFlagsInterior;
+ if (dwGCFlags & GC_CALL_PINNED)
+ rootFlags |= kEtwGCRootFlagsPinning;
+
+ // Add root edge to appropriate buffer
+ if (fDependentHandle)
+ {
+ _ASSERTE(pContext->cGCBulkRootConditionalWeakTableElementEdges <
+ _countof(pContext->rgGCBulkRootConditionalWeakTableElementEdges));
+ EventStructGCBulkRootConditionalWeakTableElementEdgeValue * pRCWTEEdgeValue =
+ &pContext->rgGCBulkRootConditionalWeakTableElementEdges[pContext->cGCBulkRootConditionalWeakTableElementEdges];
+ pRCWTEEdgeValue->GCKeyNodeID = pRootedNode;
+ pRCWTEEdgeValue->GCValueNodeID = pSecondaryNodeForDependentHandle;
+ pRCWTEEdgeValue->GCRootID = pvRootID;
+ pContext->cGCBulkRootConditionalWeakTableElementEdges++;
+
+ // If RCWTE edge buffer is now full, empty it into ETW
+ if (pContext->cGCBulkRootConditionalWeakTableElementEdges ==
+ _countof(pContext->rgGCBulkRootConditionalWeakTableElementEdges))
+ {
+ FireEtwGCBulkRootConditionalWeakTableElementEdge(
+ pContext->iCurBulkRootConditionalWeakTableElementEdge,
+ pContext->cGCBulkRootConditionalWeakTableElementEdges,
+ GetClrInstanceId(),
+ sizeof(pContext->rgGCBulkRootConditionalWeakTableElementEdges[0]),
+ &pContext->rgGCBulkRootConditionalWeakTableElementEdges[0]);
+
+ pContext->iCurBulkRootConditionalWeakTableElementEdge++;
+ pContext->ClearRootConditionalWeakTableElementEdges();
+ }
+ }
+ else
+ {
+ _ASSERTE(pContext->cGcBulkRootEdges < _countof(pContext->rgGcBulkRootEdges));
+ EventStructGCBulkRootEdgeValue * pBulkRootEdgeValue = &pContext->rgGcBulkRootEdges[pContext->cGcBulkRootEdges];
+ pBulkRootEdgeValue->RootedNodeAddress = pRootedNode;
+ pBulkRootEdgeValue->GCRootKind = nRootKind;
+ pBulkRootEdgeValue->GCRootFlag = rootFlags;
+ pBulkRootEdgeValue->GCRootID = pvRootID;
+ pContext->cGcBulkRootEdges++;
+
+ // If root edge buffer is now full, empty it into ETW
+ if (pContext->cGcBulkRootEdges == _countof(pContext->rgGcBulkRootEdges))
+ {
+ FireEtwGCBulkRootEdge(
+ pContext->iCurBulkRootEdge,
+ pContext->cGcBulkRootEdges,
+ GetClrInstanceId(),
+ sizeof(pContext->rgGcBulkRootEdges[0]),
+ &pContext->rgGcBulkRootEdges[0]);
+
+ pContext->iCurBulkRootEdge++;
+ pContext->ClearRootEdges();
+ }
+ }
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Called during a heap walk for each object reference encountered. Batches up the
+// corresponding node, edges, and type data for the ETW events.
+//
+// Arguments:
+// * profilerWalkHeapContext - The shared profapi/etw context built up during the heap walk.
+// * pObjReferenceSource - Object doing the pointing
+// * typeID - Type of pObjReferenceSource
+// * fDependentHandle - nonzero iff this is for a dependent handle
+// * cRefs - Count of objects being pointed to
+// * rgObjReferenceTargets - Array of objects being pointed to
+//
+
+// static
+VOID ETW::GCLog::ObjectReference(
+ ProfilerWalkHeapContext * profilerWalkHeapContext,
+ Object * pObjReferenceSource,
+ ULONGLONG typeID,
+ ULONGLONG cRefs,
+ Object ** rgObjReferenceTargets)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+
+ // LogTypeAndParametersIfNecessary can take a lock
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ EtwGcHeapDumpContext * pContext =
+ EtwGcHeapDumpContext::GetOrCreateInGCContext(&profilerWalkHeapContext->pvEtwContext);
+ if (pContext == NULL)
+ return;
+
+ //---------------------------------------------------------------------------------------
+ // GCBulkNode events
+ //---------------------------------------------------------------------------------------
+
+ // Add Node (pObjReferenceSource) to buffer
+ _ASSERTE(pContext->cGcBulkNodeValues < _countof(pContext->rgGcBulkNodeValues));
+ EventStructGCBulkNodeValue * pBulkNodeValue = &pContext->rgGcBulkNodeValues[pContext->cGcBulkNodeValues];
+ pBulkNodeValue->Address = pObjReferenceSource;
+ pBulkNodeValue->Size = pObjReferenceSource->GetSize();
+ pBulkNodeValue->TypeID = typeID;
+ pBulkNodeValue->EdgeCount = cRefs;
+ pContext->cGcBulkNodeValues++;
+
+ // If Node buffer is now full, empty it into ETW
+ if (pContext->cGcBulkNodeValues == _countof(pContext->rgGcBulkNodeValues))
+ {
+ FireEtwGCBulkNode(
+ pContext->iCurBulkNodeEvent,
+ pContext->cGcBulkNodeValues,
+ GetClrInstanceId(),
+ sizeof(pContext->rgGcBulkNodeValues[0]),
+ &pContext->rgGcBulkNodeValues[0]);
+
+ pContext->iCurBulkNodeEvent++;
+ pContext->ClearNodes();
+ }
+
+ //---------------------------------------------------------------------------------------
+ // BulkType events
+ //---------------------------------------------------------------------------------------
+
+ // We send type information as necessary--only for nodes, and only for nodes that we
+ // haven't already sent type info for
+ if (typeID != 0)
+ {
+ ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(
+ &pContext->bulkTypeEventLogger, // Batch up this type with others to minimize events
+ typeID,
+
+ // During heap walk, GC holds the lock for us, so we can directly enter the
+ // hash to see if the type has already been logged
+ ETW::TypeSystemLog::kTypeLogBehaviorTakeLockAndLogIfFirstTime
+ );
+ }
+
+ //---------------------------------------------------------------------------------------
+ // GCBulkEdge events
+ //---------------------------------------------------------------------------------------
+
+ // Add Edges (rgObjReferenceTargets) to buffer. Buffer could fill up before all edges
+ // are added (it could even fill up multiple times during this one call if there are
+ // a lot of edges), so empty Edge buffer into ETW as we go along, as many times as we
+ // need.
+
+ for (ULONGLONG i=0; i < cRefs; i++)
+ {
+ _ASSERTE(pContext->cGcBulkEdgeValues < _countof(pContext->rgGcBulkEdgeValues));
+ EventStructGCBulkEdgeValue * pBulkEdgeValue = &pContext->rgGcBulkEdgeValues[pContext->cGcBulkEdgeValues];
+ pBulkEdgeValue->Value = rgObjReferenceTargets[i];
+ // FUTURE: ReferencingFieldID
+ pBulkEdgeValue->ReferencingFieldID = 0;
+ pContext->cGcBulkEdgeValues++;
+
+ // If Edge buffer is now full, empty it into ETW
+ if (pContext->cGcBulkEdgeValues == _countof(pContext->rgGcBulkEdgeValues))
+ {
+ FireEtwGCBulkEdge(
+ pContext->iCurBulkEdgeEvent,
+ pContext->cGcBulkEdgeValues,
+ GetClrInstanceId(),
+ sizeof(pContext->rgGcBulkEdgeValues[0]),
+ &pContext->rgGcBulkEdgeValues[0]);
+
+ pContext->iCurBulkEdgeEvent++;
+ pContext->ClearEdges();
+ }
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Called by GC at end of heap dump to give us a convenient time to flush any remaining
+// buffers of data to ETW
+//
+// Arguments:
+// profilerWalkHeapContext - Context containing data we've batched up
+//
+
+// static
+VOID ETW::GCLog::EndHeapDump(ProfilerWalkHeapContext * profilerWalkHeapContext)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // If context isn't already set up for us, then we haven't been collecting any data
+ // for ETW events.
+ EtwGcHeapDumpContext * pContext = (EtwGcHeapDumpContext *) profilerWalkHeapContext->pvEtwContext;
+ if (pContext == NULL)
+ return;
+
+ // If the GC events are enabled, flush any remaining root, node, and / or edge data
+ if (s_forcedGCInProgress &&
+ ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_GCHEAPDUMP_KEYWORD))
+ {
+ if (pContext->cGcBulkRootEdges > 0)
+ {
+ FireEtwGCBulkRootEdge(
+ pContext->iCurBulkRootEdge,
+ pContext->cGcBulkRootEdges,
+ GetClrInstanceId(),
+ sizeof(pContext->rgGcBulkRootEdges[0]),
+ &pContext->rgGcBulkRootEdges[0]);
+ }
+
+ if (pContext->cGCBulkRootConditionalWeakTableElementEdges > 0)
+ {
+ FireEtwGCBulkRootConditionalWeakTableElementEdge(
+ pContext->iCurBulkRootConditionalWeakTableElementEdge,
+ pContext->cGCBulkRootConditionalWeakTableElementEdges,
+ GetClrInstanceId(),
+ sizeof(pContext->rgGCBulkRootConditionalWeakTableElementEdges[0]),
+ &pContext->rgGCBulkRootConditionalWeakTableElementEdges[0]);
+ }
+
+ if (pContext->cGcBulkNodeValues > 0)
+ {
+ FireEtwGCBulkNode(
+ pContext->iCurBulkNodeEvent,
+ pContext->cGcBulkNodeValues,
+ GetClrInstanceId(),
+ sizeof(pContext->rgGcBulkNodeValues[0]),
+ &pContext->rgGcBulkNodeValues[0]);
+ }
+
+ if (pContext->cGcBulkEdgeValues > 0)
+ {
+ FireEtwGCBulkEdge(
+ pContext->iCurBulkEdgeEvent,
+ pContext->cGcBulkEdgeValues,
+ GetClrInstanceId(),
+ sizeof(pContext->rgGcBulkEdgeValues[0]),
+ &pContext->rgGcBulkEdgeValues[0]);
+ }
+ }
+
+ // Ditto for type events
+ if (ETW_TRACING_CATEGORY_ENABLED(
+ MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_TYPE_KEYWORD))
+ {
+ pContext->bulkTypeEventLogger.FireBulkTypeEvent();
+ }
+
+ // Delete any GC state built up in the context
+ profilerWalkHeapContext->pvEtwContext = NULL;
+ delete pContext;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Helper to send public finalize object & type events, and private finalize object
+// event. If Type events are enabled, this will send the Type event for the finalized
+// objects. It will not be batched with other types (except type parameters, if any),
+// and will not check if the Type has already been logged (may thus result in dupe
+// logging of the Type).
+//
+// Arguments:
+// pMT - MT of object getting finalized
+// pObj - object getting finalized
+//
+
+// static
+VOID ETW::GCLog::SendFinalizeObjectEvent(MethodTable * pMT, Object * pObj)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+
+ // LogTypeAndParameters locks, and we take our own lock if typeLogBehavior says to
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ // Send public finalize object event, if it's enabled
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, FinalizeObject))
+ {
+ FireEtwFinalizeObject(pMT, pObj, GetClrInstanceId());
+
+ // This function checks if type events are enabled; if so, it sends event for
+ // finalized object's type (and parameter types, if any)
+ ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(
+ NULL, // Not batching this type with others
+ (TADDR) pMT,
+
+ // Don't spend the time entering the lock and checking the hash table to see
+ // if we've already logged the type; just log it (if type events are enabled).
+ ETW::TypeSystemLog::kTypeLogBehaviorAlwaysLog
+ );
+ }
+
+ // Send private finalize object event, if it's enabled
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, PrvFinalizeObject))
+ {
+ EX_TRY
+ {
+ DefineFullyQualifiedNameForClassWOnStack();
+ FireEtwPrvFinalizeObject(pMT, pObj, GetClrInstanceId(), GetFullyQualifiedNameForClassNestedAwareW(pMT));
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(RethrowCorruptingExceptions);
+ }
+}
+
+
+DWORD ETW::ThreadLog::GetEtwThreadFlags(Thread * pThread)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DWORD dwEtwThreadFlags = 0;
+
+ if (pThread->IsThreadPoolThread())
+ {
+ dwEtwThreadFlags |= kEtwThreadFlagThreadPoolWorker;
+ }
+ if (pThread->IsGCSpecial())
+ {
+ dwEtwThreadFlags |= kEtwThreadFlagGCSpecial;
+ }
+ if (IsGarbageCollectorFullyInitialized() &&
+ (pThread == FinalizerThread::GetFinalizerThread()))
+ {
+ dwEtwThreadFlags |= kEtwThreadFlagFinalizer;
+ }
+
+ return dwEtwThreadFlags;
+}
+
+VOID ETW::ThreadLog::FireThreadCreated(Thread * pThread)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ FireEtwThreadCreated(
+ (ULONGLONG)pThread,
+ (ULONGLONG)pThread->GetDomain(),
+ GetEtwThreadFlags(pThread),
+ pThread->GetThreadId(),
+ pThread->GetOSThreadId(),
+ GetClrInstanceId());
+}
+
+VOID ETW::ThreadLog::FireThreadDC(Thread * pThread)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ FireEtwThreadDC(
+ (ULONGLONG)pThread,
+ (ULONGLONG)pThread->GetDomain(),
+ GetEtwThreadFlags(pThread),
+ pThread->GetThreadId(),
+ pThread->GetOSThreadId(),
+ GetClrInstanceId());
+}
+
+
+
+#ifndef FEATURE_REDHAWK
+
+// TypeSystemLog implementation
+//
+// We keep track of which TypeHandles have been logged, and stats on instances of these
+// TypeHandles that have been allocated, by a hash table of hash tables. The outer hash
+// table maps Module*'s to an inner hash table that contains all the TypeLoggingInfos for that
+// Module*. Arranging things this way makes it easy to deal with Module unloads, as we
+// can simply remove the corresponding inner hash table from the outer hash table.
+
+// The following help define the "inner" hash table: a hash table of TypeLoggingInfos
+// from a particular Module (key = TypeHandle, value = TypeLoggingInfo.
+
+class LoggedTypesFromModuleTraits : public NoRemoveSHashTraits< DefaultSHashTraits<ETW::TypeLoggingInfo> >
+{
+public:
+
+ // explicitly declare local typedefs for these traits types, otherwise
+ // the compiler may get confused
+ typedef NoRemoveSHashTraits< DefaultSHashTraits<ETW::TypeLoggingInfo> > PARENT;
+ typedef PARENT::element_t element_t;
+ typedef PARENT::count_t count_t;
+
+ typedef TypeHandle key_t;
+
+ static key_t GetKey(const element_t &e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return e.th;
+ }
+
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (k1 == k2);
+ }
+
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (count_t) k.AsTAddr();
+ }
+
+ static bool IsNull(const element_t &e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (e.th.AsTAddr() == NULL);
+ }
+
+ static const element_t Null()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ETW::TypeLoggingInfo(NULL);
+ }
+};
+typedef SHash<LoggedTypesFromModuleTraits> LoggedTypesFromModuleHash;
+
+// The inner hash table is housed inside this class, which acts as an entry in the outer
+// hash table.
+class ETW::LoggedTypesFromModule
+{
+public:
+ Module * pModule;
+ LoggedTypesFromModuleHash loggedTypesFromModuleHash;
+
+ // These are used by the outer hash table (mapping Module*'s to instances of
+ // LoggedTypesFromModule).
+ static COUNT_T Hash(Module * pModule)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (COUNT_T) (SIZE_T) pModule;
+ }
+ Module * GetKey()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return pModule;
+ }
+
+ LoggedTypesFromModule(Module * pModuleParam) : loggedTypesFromModuleHash()
+ {
+ LIMITED_METHOD_CONTRACT;
+ pModule = pModuleParam;
+ }
+
+ ~LoggedTypesFromModule()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+};
+
+// The following define the outer hash table (mapping Module*'s to instances of
+// LoggedTypesFromModule).
+
+class AllLoggedTypesTraits : public DefaultSHashTraits<ETW::LoggedTypesFromModule *>
+{
+public:
+
+ // explicitly declare local typedefs for these traits types, otherwise
+ // the compiler may get confused
+ typedef DefaultSHashTraits<ETW::LoggedTypesFromModule *> PARENT;
+ typedef PARENT::element_t element_t;
+ typedef PARENT::count_t count_t;
+
+ typedef Module * key_t;
+
+ static key_t GetKey(const element_t &e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return e->pModule;
+ }
+
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (k1 == k2);
+ }
+
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (count_t) (size_t) k;
+ }
+
+ static bool IsNull(const element_t &e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (e == NULL);
+ }
+
+ static const element_t Null()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return NULL;
+ }
+};
+
+typedef SHash<AllLoggedTypesTraits> AllLoggedTypesHash;
+
+// The outer hash table (mapping Module*'s to instances of LoggedTypesFromModule) is
+// housed in this struct, which is dynamically allocated the first time we decide we need
+// it.
+struct AllLoggedTypes
+{
+public:
+ // This Crst protects the entire outer & inner hash tables. On a GC heap walk, it
+ // is entered once for the duration of the walk, so that we can freely access the
+ // hash tables during the walk. On each object allocation, this Crst must be
+ // entered individually each time.
+ static CrstStatic s_cs;
+
+ // A thread local copy of the global epoch.
+ // This value is used by each thread to ensure that the thread local data structures
+ // are in sync with the global state.
+ unsigned int nEpoch;
+
+ // The outer hash table (mapping Module*'s to instances of LoggedTypesFromModule)
+ AllLoggedTypesHash allLoggedTypesHash;
+};
+
+
+CrstStatic AllLoggedTypes::s_cs;
+AllLoggedTypes * ETW::TypeSystemLog::s_pAllLoggedTypes = NULL;
+unsigned int ETW::TypeSystemLog::s_nEpoch = 0;
+BOOL ETW::TypeSystemLog::s_fHeapAllocEventEnabledOnStartup = FALSE;
+BOOL ETW::TypeSystemLog::s_fHeapAllocHighEventEnabledNow = FALSE;
+BOOL ETW::TypeSystemLog::s_fHeapAllocLowEventEnabledNow = FALSE;
+int ETW::TypeSystemLog::s_nCustomMsBetweenEvents = 0;
+
+
+//---------------------------------------------------------------------------------------
+//
+// Initializes TypeSystemLog (specifically its crst). Called just before ETW providers
+// are registered with the OS
+//
+// Return Value:
+// HRESULT indicating success or failure
+//
+
+// static
+HRESULT ETW::TypeSystemLog::PreRegistrationInit()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (!AllLoggedTypes::s_cs.InitNoThrow(
+ CrstEtwTypeLogHash,
+ CRST_UNSAFE_ANYMODE)) // This lock is taken during a GC while walking the heap
+ {
+ return E_FAIL;
+ }
+
+ return S_OK;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Initializes TypeSystemLog (specifically its crst). Called just after ETW providers
+// are registered with the OS
+//
+// Return Value:
+// HRESULT indicating success or failure
+//
+
+// static
+void ETW::TypeSystemLog::PostRegistrationInit()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Initialize our "current state" BOOLs that remember if low or high allocation
+ // sampling is turned on
+ s_fHeapAllocLowEventEnabledNow = ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, TRACE_LEVEL_INFORMATION, CLR_GCHEAPALLOCLOW_KEYWORD);
+ s_fHeapAllocHighEventEnabledNow = ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, TRACE_LEVEL_INFORMATION, CLR_GCHEAPALLOCHIGH_KEYWORD);
+
+ // Snapshot the current state of the object allocated keyword (on startup), and rely
+ // on this snapshot for the rest of the process run. Since these events require the
+ // slow alloc JIT helper to be enabled, and that can only be done on startup, we
+ // remember in this BOOL that we did so, so that we can prevent the object allocated
+ // event from being fired if the fast allocation helper were enabled but had to
+ // degrade down to the slow helper (e.g., thread ran over its allocation limit). This
+ // keeps things consistent.
+ s_fHeapAllocEventEnabledOnStartup = (s_fHeapAllocLowEventEnabledNow || s_fHeapAllocHighEventEnabledNow);
+
+ if (s_fHeapAllocEventEnabledOnStartup)
+ {
+ // Determine if a COMPLUS env var is overriding the frequency for the sampled
+ // object allocated events
+
+ // Config value intentionally typed as string, b/c DWORD intepretation is hard-coded
+ // to hex, which is not what the user would expect. This way I can force the
+ // conversion to use decimal.
+ NewArrayHolder<WCHAR> wszCustomObjectAllocationEventsPerTypePerSec(NULL);
+ if (FAILED(CLRConfig::GetConfigValue(
+ CLRConfig::UNSUPPORTED_ETW_ObjectAllocationEventsPerTypePerSec,
+ &wszCustomObjectAllocationEventsPerTypePerSec)) ||
+ (wszCustomObjectAllocationEventsPerTypePerSec == NULL))
+ {
+ return;
+ }
+ LPWSTR endPtr;
+ DWORD dwCustomObjectAllocationEventsPerTypePerSec = wcstoul(
+ wszCustomObjectAllocationEventsPerTypePerSec,
+ &endPtr,
+ 10 // Base 10 conversion
+ );
+
+ if (dwCustomObjectAllocationEventsPerTypePerSec == ULONG_MAX)
+ dwCustomObjectAllocationEventsPerTypePerSec = 0;
+ if (dwCustomObjectAllocationEventsPerTypePerSec != 0)
+ {
+ // MsBetweenEvents = (1000 ms/sec) / (custom desired events/sec)
+ s_nCustomMsBetweenEvents = 1000 / dwCustomObjectAllocationEventsPerTypePerSec;
+ }
+ }
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Update object allocation sampling frequency and / or Type hash table contents based
+// on what keywords were changed.
+//
+
+// static
+void ETW::TypeSystemLog::OnKeywordsChanged()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // If the desired frequencey for the GCSampledObjectAllocation events has changed,
+ // update our state.
+ s_fHeapAllocLowEventEnabledNow = ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, TRACE_LEVEL_INFORMATION, CLR_GCHEAPALLOCLOW_KEYWORD);
+ s_fHeapAllocHighEventEnabledNow = ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, TRACE_LEVEL_INFORMATION, CLR_GCHEAPALLOCHIGH_KEYWORD);
+
+ // FUTURE: Would be nice here to log an error event if (s_fHeapAllocLowEventEnabledNow ||
+ // s_fHeapAllocHighEventEnabledNow), but !s_fHeapAllocEventEnabledOnStartup
+
+ // If the type events should be turned off, eliminate the hash tables that tracked
+ // which types were logged. (If type events are turned back on later, we'll re-log
+ // them all as we encounter them.) Note that all we can really test for is that the
+ // Types keyword on the runtime provider is off. Not necessarily that it was on and
+ // was just turned off with this request. But either way, TypeSystemLog can handle it
+ // because it is extremely smart.
+ if (!ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, TRACE_LEVEL_INFORMATION, CLR_TYPE_KEYWORD))
+ OnTypesKeywordTurnedOff();
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Based on keywords alone, determine the what the default sampling rate should be for
+// object allocation events. (This function does not consider any COMPLUS overrides for
+// the sampling rate.)
+//
+
+// static
+int ETW::TypeSystemLog::GetDefaultMsBetweenEvents()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // We should only get here if the allocation event is enabled. In spirit, this assert
+ // is correct, but a race could cause the assert to fire (if someone toggled the
+ // event off after we decided that the event was on and we started down the path of
+ // calculating statistics to fire the event). In such a case we'll end up returning
+ // k_nDefaultMsBetweenEventsLow below, but next time we won't get here as we'll know
+ // early enough not to fire the event.
+ //_ASSERTE(IsHeapAllocEventEnabled());
+
+ // MsBetweenEvents = (1000 ms/sec) / (desired events/sec)
+ const int k_nDefaultMsBetweenEventsHigh = 1000 / 100; // 100 events per type per sec
+ const int k_nDefaultMsBetweenEventsLow = 1000 / 5; // 5 events per type per sec
+
+ // If both are set, High takes precedence
+ if (s_fHeapAllocHighEventEnabledNow)
+ {
+ return k_nDefaultMsBetweenEventsHigh;
+ }
+ return k_nDefaultMsBetweenEventsLow;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Use this to decide whether to fire the object allocation event
+//
+// Return Value:
+// nonzero iff we should fire the event.
+//
+
+// static
+BOOL ETW::TypeSystemLog::IsHeapAllocEventEnabled()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return
+ // Only fire the event if it was enabled at startup (and thus the slow-JIT new
+ // helper is used in all cases)
+ s_fHeapAllocEventEnabledOnStartup &&
+
+ // AND a keyword is still enabled. (Thus people can turn off the event
+ // whenever they want; but they cannot turn it on unless it was also on at startup.)
+ (s_fHeapAllocHighEventEnabledNow || s_fHeapAllocLowEventEnabledNow);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Helper that adds (or updates) the TypeLoggingInfo inside the inner hash table passed
+// in.
+//
+// Arguments:
+// * pLoggedTypesFromModule - Inner hash table to update
+// * pTypeLoggingInfo - TypeLoggingInfo to store
+//
+// Return Value:
+// nonzero iff the add/replace was successful.
+//
+
+// static
+BOOL ETW::TypeSystemLog::AddOrReplaceTypeLoggingInfo(ETW::LoggedTypesFromModule * pLoggedTypesFromModule, const ETW::TypeLoggingInfo * pTypeLoggingInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(pLoggedTypesFromModule != NULL);
+
+ BOOL fSucceeded = FALSE;
+ EX_TRY
+ {
+ pLoggedTypesFromModule->loggedTypesFromModuleHash.AddOrReplace(*pTypeLoggingInfo);
+ fSucceeded = TRUE;
+ }
+ EX_CATCH
+ {
+ fSucceeded = FALSE;
+ }
+ EX_END_CATCH(RethrowCorruptingExceptions);
+
+ return fSucceeded;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Records stats about the object's allocation, and determines based on those stats whether
+// to fires the high / low frequency GCSampledObjectAllocation ETW event
+//
+// Arguments:
+// * pObject - Allocated object to log
+// * th - TypeHandle for the object
+//
+
+// static
+void ETW::TypeSystemLog::SendObjectAllocatedEvent(Object * pObject)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // No-op if the appropriate keywords were not enabled on startup (or we're not yet
+ // started up)
+ if (!s_fHeapAllocEventEnabledOnStartup || !g_fEEStarted)
+ return;
+
+ TypeHandle th = pObject->GetTypeHandle();
+
+ SIZE_T size = pObject->GetSize();
+ if (size < MIN_OBJECT_SIZE)
+ {
+ size = PtrAlign(size);
+ }
+
+ SIZE_T nTotalSizeForTypeSample = size;
+ DWORD dwTickNow = GetTickCount();
+ DWORD dwObjectCountForTypeSample = 0;
+
+ // Get stats for type
+ TypeLoggingInfo typeLoggingInfo(NULL);
+ LoggedTypesFromModule * pLoggedTypesFromModule = NULL;
+ BOOL fCreatedNew = FALSE;
+ typeLoggingInfo = LookupOrCreateTypeLoggingInfo(th, &fCreatedNew, &pLoggedTypesFromModule);
+ if (typeLoggingInfo.th.IsNull())
+ return;
+
+ // Update stats with current allocation
+ typeLoggingInfo.dwAllocsSkippedForSample++;
+ typeLoggingInfo.cbIgnoredSizeForSample += size;
+
+ // If both the high and low verbosity keywords are enabled, log all allocations.
+ if (!(s_fHeapAllocHighEventEnabledNow && s_fHeapAllocLowEventEnabledNow))
+ {
+ // Get the number of threads so that we can scale the per-thread sampling data.
+ // NOTE: We don't do this while holding the thread store lock, so this may not be perfect,
+ // but it will be close enough.
+ LONG numThreads = ThreadStore::s_pThreadStore->ThreadCountInEE();
+
+ // This is our filter. If we should ignore this alloc, then record our updated
+ // our stats, and bail without sending the event. Note that we always log objects
+ // over 10K in size.
+ if (size < 10000 && typeLoggingInfo.dwAllocsSkippedForSample < (typeLoggingInfo.dwAllocsToSkipPerSample * numThreads))
+ {
+ // Update hash table's copy of type logging info with these values. It is not optimal that
+ // we're doing another hash table lookup here. Could instead have used LookupPtr()
+ // if it gave us back a non-const pointer, and then we could have updated in-place
+ AddOrReplaceTypeLoggingInfo(pLoggedTypesFromModule, &typeLoggingInfo);
+ if (fCreatedNew)
+ {
+ // Although we're skipping logging the allocation, we still need to log
+ // the type (so it's available for resolving future allocation events to
+ // their types).
+ //
+ // (See other call to LogTypeAndParametersIfNecessary further down for
+ // more comments.)
+ LogTypeAndParametersIfNecessary(
+ NULL,
+ th.AsTAddr(),
+ kTypeLogBehaviorAlwaysLogTopLevelType);
+ }
+ return;
+ }
+
+ // Based on observed allocation stats, adjust our sampling rate for this type
+
+ typeLoggingInfo.dwAllocCountInCurrentBucket += typeLoggingInfo.dwAllocsSkippedForSample;
+ int delta = (dwTickNow - typeLoggingInfo.dwTickOfCurrentTimeBucket) & 0x7FFFFFFF; // make wrap around work.
+
+ int nMinAllocPerMSec = typeLoggingInfo.dwAllocCountInCurrentBucket / 16 / numThreads; // This is an underestimation of the true rate.
+ if (delta >= 16 || (nMinAllocPerMSec > 2 && nMinAllocPerMSec > typeLoggingInfo.flAllocPerMSec * 1.5F))
+ {
+ float flNewAllocPerMSec = 0;
+ if (delta >= 16)
+ {
+ // This is the normal case, our allocation rate is under control with the current throttling.
+ flNewAllocPerMSec = ((float) typeLoggingInfo.dwAllocCountInCurrentBucket) / delta;
+ // Do a exponential decay window that is 5 * max(16, AllocationInterval)
+ typeLoggingInfo.flAllocPerMSec = 0.8F * typeLoggingInfo.flAllocPerMSec + 0.2F * flNewAllocPerMSec;
+ typeLoggingInfo.dwTickOfCurrentTimeBucket = dwTickNow;
+ typeLoggingInfo.dwAllocCountInCurrentBucket = 0;
+ }
+ else
+ {
+ flNewAllocPerMSec = (float) nMinAllocPerMSec;
+ // This means the second clause above is true, which means our sampling rate is too low
+ // so we need to throttle quickly.
+ typeLoggingInfo.flAllocPerMSec = flNewAllocPerMSec;
+ }
+
+
+ // Obey the desired sampling rate, but don't ignore > 1000 allocations per second
+ // per type
+ int nDesiredMsBetweenEvents = (s_nCustomMsBetweenEvents == 0) ? GetDefaultMsBetweenEvents() : s_nCustomMsBetweenEvents;
+ typeLoggingInfo.dwAllocsToSkipPerSample = min((int) (typeLoggingInfo.flAllocPerMSec * nDesiredMsBetweenEvents), 1000);
+ if (typeLoggingInfo.dwAllocsToSkipPerSample == 1)
+ typeLoggingInfo.dwAllocsToSkipPerSample = 0;
+ }
+ }
+
+ // We're logging this sample, so save the values we need into locals, and reset
+ // our counts for the next sample.
+ nTotalSizeForTypeSample = typeLoggingInfo.cbIgnoredSizeForSample;
+ dwObjectCountForTypeSample = typeLoggingInfo.dwAllocsSkippedForSample;
+ typeLoggingInfo.cbIgnoredSizeForSample = 0;
+ typeLoggingInfo.dwAllocsSkippedForSample = 0;
+
+ // Save updated stats into hash table
+ if (!AddOrReplaceTypeLoggingInfo(pLoggedTypesFromModule, &typeLoggingInfo))
+ {
+ return;
+ }
+
+ // While we're still holding the crst, optionally log any relevant Types now (we may need
+ // to reconsult the hash in here if there are any type parameters, though we can
+ // optimize and NOT consult the hash for th itself).
+ if (fCreatedNew)
+ {
+ // We were the ones to add the Type to the hash. So it wasn't there before,
+ // which means it hasn't been logged yet.
+ LogTypeAndParametersIfNecessary(
+
+ // No BulkTypeEventLogger, as we're not batching during a GC heap walk
+ NULL,
+
+ th.AsTAddr(),
+
+ // We've determined the type is not yet logged, so no need to check
+ kTypeLogBehaviorAlwaysLogTopLevelType);
+ }
+
+ // Now log the allocation
+ if (s_fHeapAllocHighEventEnabledNow)
+ {
+ FireEtwGCSampledObjectAllocationHigh(pObject, (LPVOID) th.AsTAddr(), dwObjectCountForTypeSample, nTotalSizeForTypeSample, GetClrInstanceId());
+ }
+ else
+ {
+ FireEtwGCSampledObjectAllocationLow(pObject, (LPVOID) th.AsTAddr(), dwObjectCountForTypeSample, nTotalSizeForTypeSample, GetClrInstanceId());
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Accessor for global hash table crst
+//
+// Return Value:
+// global hash table crst
+//
+
+// static
+CrstBase * ETW::TypeSystemLog::GetHashCrst()
+{
+ LIMITED_METHOD_CONTRACT;
+ return &AllLoggedTypes::s_cs;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Outermost level of ETW-type-logging. Clients outside eventtrace.cpp call this to log
+// a TypeHandle and (recursively) its type parameters when present. This guy then calls
+// into the appropriate BulkTypeEventLogger to do the batching and logging
+//
+// Arguments:
+// * pBulkTypeEventLogger - If our caller is keeping track of batched types, it
+// passes this to us so we can use it to batch the current type (GC heap walk
+// does this). If this is NULL, no batching is going on (e.g., we're called on
+// object allocation, not a GC heal walk), in which case we create our own
+// temporary BulkTypeEventLogger.
+// * thAsAddr - TypeHandle to batch
+// * typeLogBehavior - Optimization to tell us we don't need to enter the
+// TypeSystemLog's crst, as the TypeSystemLog's hash table is already protected
+// by a prior acquisition of the crst by our caller. (Or that we don't even
+// need to check the hash in the first place.)
+//
+
+// static
+VOID ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(BulkTypeEventLogger * pLogger, ULONGLONG thAsAddr, TypeLogBehavior typeLogBehavior)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+
+ // LogTypeAndParameters locks, and we take our own lock if typeLogBehavior says to
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ if (!ETW_TRACING_CATEGORY_ENABLED(
+ MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_TYPE_KEYWORD))
+ {
+ return;
+ }
+
+ TypeHandle th = TypeHandle::FromTAddr((TADDR) thAsAddr);
+ if (!th.IsRestored())
+ {
+ return;
+ }
+
+ // Check to see if we've already logged this type. If so, bail immediately.
+ // Otherwise, mark that it's getting logged (by adding it to the hash), and fall
+ // through to the logging code below. If caller doesn't care, then don't even
+ // check; just log the type
+ BOOL fShouldLogType = ((typeLogBehavior == kTypeLogBehaviorAlwaysLog) ||
+ (typeLogBehavior == kTypeLogBehaviorAlwaysLogTopLevelType)) ?
+ TRUE :
+ ShouldLogType(th);
+ if (!fShouldLogType)
+ return;
+
+ if (pLogger == NULL)
+ {
+ // We're not batching this type against previous types (e.g., we're being called
+ // on object allocate instead of a GC heap walk). So create a temporary logger
+ // on the stack. If there are generic parameters that need to be logged, then
+ // at least they'll get batched together with the type
+ BulkTypeEventLogger logger;
+ logger.LogTypeAndParameters(thAsAddr, typeLogBehavior);
+
+ // Since this logger isn't being used to batch anything else, flush what we have
+ logger.FireBulkTypeEvent();
+ }
+ else
+ {
+ // We are batching this type with others (e.g., we're being called at the end of
+ // a GC on a heap walk). So use the logger our caller set up for us.
+ pLogger->LogTypeAndParameters(thAsAddr, typeLogBehavior);
+ }
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Ask hash table if we've already logged the type, without first acquiring the lock
+// (our caller already did this). As a side-effect, a TypeLoggingInfo will be created
+// for this type (so future calls to this function will return FALSE to avoid dupe type
+// logging).
+//
+// Arguments:
+// pth - TypeHandle to query
+//
+// Return Value:
+// nonzero iff type should be logged (i.e., not previously logged)
+//
+
+// static
+BOOL ETW::TypeSystemLog::ShouldLogType(TypeHandle th)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+
+ // Check to see if TypeLoggingInfo exists yet for th. If not, creates one and
+ // adds it to the hash.
+ BOOL fCreatedNew = FALSE;
+
+ // When we have a thread context, default to calling the API that requires one which
+ // reduces the cost of locking.
+ if (GetThread() != NULL)
+ {
+ LookupOrCreateTypeLoggingInfo(th, &fCreatedNew);
+ }
+ else
+ {
+ AddTypeToGlobalCacheIfNotExists(th, &fCreatedNew);
+ }
+
+ // Return whether we had to create the TypeLoggingInfo (indicating it was not yet in
+ // the hash, and thus that we hadn't yet logged the type).
+ return fCreatedNew;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Helper that returns (creating if necessary) the TypeLoggingInfo in the hash table
+// corresponding with the specified TypeHandle
+//
+// Arguments:
+// * th - Key to lookup the TypeLoggingInfo
+// * pfCreatedNew - [out] Points to nonzero iff a new TypeLoggingInfo was created
+// (i.e., none existed yet in the hash for th).
+// * ppLoggedTypesFromModule - [out] Points to the inner hash that was used to do
+// the lookup. (An otpimization so the caller doesn't have to find this again,
+// if it needs to do further operations on it.)
+//
+// Return Value:
+// TypeLoggingInfo found or created.
+//
+//
+
+// static
+ETW::TypeLoggingInfo ETW::TypeSystemLog::LookupOrCreateTypeLoggingInfo(TypeHandle th, BOOL * pfCreatedNew, LoggedTypesFromModule ** ppLoggedTypesFromModule /* = NULL */)
+{
+ //LIMITED_METHOD_CONTRACT;
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pfCreatedNew != NULL);
+
+ if (ppLoggedTypesFromModule != NULL)
+ {
+ *ppLoggedTypesFromModule = NULL;
+ }
+
+ BOOL fSucceeded = FALSE;
+
+ Thread *pThread = GetThread();
+
+ // Compare the thread local epoch value against the global epoch.
+ // If the epoch has changed, dump the thread local state and start over.
+ AllLoggedTypes * pThreadAllLoggedTypes = pThread->GetAllocationSamplingTable();
+ if((pThreadAllLoggedTypes != NULL) && (pThreadAllLoggedTypes->nEpoch != s_nEpoch))
+ {
+ // Set the type hash pointer on the thread to NULL.
+ pThread->SetAllocationSamplingTable(NULL);
+
+ // DeleteTypeHashNoLock will set pThreadAllLoggedTypes to NULL
+ DeleteTypeHashNoLock(&pThreadAllLoggedTypes);
+ }
+
+ // Create the thread local state if it doesn't exist.
+ if (pThreadAllLoggedTypes == NULL)
+ {
+ pThreadAllLoggedTypes = new (nothrow) AllLoggedTypes;
+ if (pThreadAllLoggedTypes == NULL)
+ {
+ // out of memory. Bail on ETW stuff
+ *pfCreatedNew = FALSE;
+ return TypeLoggingInfo(NULL);
+ }
+
+ // Set the epoch so we know we can track when changes to global state occur.
+ pThreadAllLoggedTypes->nEpoch = s_nEpoch;
+
+ // Save the thread local state to the thread.
+ pThread->SetAllocationSamplingTable(pThreadAllLoggedTypes);
+ }
+
+ BOOL addTypeToGlobalList = FALSE;
+
+ // Step 1: go from LoaderModule to hash of types.
+
+ Module * pLoaderModule = th.GetLoaderModule();
+ _ASSERTE(pLoaderModule != NULL);
+ LoggedTypesFromModule * pLoggedTypesFromModule = pThreadAllLoggedTypes->allLoggedTypesHash.Lookup(pLoaderModule);
+ if (pLoggedTypesFromModule == NULL)
+ {
+ addTypeToGlobalList = TRUE;
+ pLoggedTypesFromModule = new (nothrow) LoggedTypesFromModule(pLoaderModule);
+ if (pLoggedTypesFromModule == NULL)
+ {
+ // out of memory. Bail on ETW stuff
+ *pfCreatedNew = FALSE;
+ return TypeLoggingInfo(NULL);
+ }
+
+ fSucceeded = FALSE;
+ EX_TRY
+ {
+ pThreadAllLoggedTypes->allLoggedTypesHash.Add(pLoggedTypesFromModule);
+ fSucceeded = TRUE;
+ }
+ EX_CATCH
+ {
+ fSucceeded = FALSE;
+ }
+ EX_END_CATCH(RethrowCorruptingExceptions);
+ if (!fSucceeded)
+ {
+ *pfCreatedNew = FALSE;
+ return TypeLoggingInfo(NULL);
+ }
+ }
+
+ if (ppLoggedTypesFromModule != NULL)
+ {
+ *ppLoggedTypesFromModule = pLoggedTypesFromModule;
+ }
+
+ // Step 2: From hash of types, see if our TypeHandle is there already
+ TypeLoggingInfo typeLoggingInfoPreexisting = pLoggedTypesFromModule->loggedTypesFromModuleHash.Lookup(th);
+ if (!typeLoggingInfoPreexisting.th.IsNull())
+ {
+ // Type is already hashed, so it's already logged, so we don't need to
+ // log it again.
+ *pfCreatedNew = FALSE;
+ return typeLoggingInfoPreexisting;
+ }
+
+ // We haven't logged this type, so we need to continue with this function to
+ // log it below. Add it to the hash table first so any recursive calls will
+ // see that this type is already being taken care of
+ addTypeToGlobalList = TRUE;
+ fSucceeded = FALSE;
+ TypeLoggingInfo typeLoggingInfoNew(th);
+ EX_TRY
+ {
+ pLoggedTypesFromModule->loggedTypesFromModuleHash.Add(typeLoggingInfoNew);
+ fSucceeded = TRUE;
+ }
+ EX_CATCH
+ {
+ fSucceeded = FALSE;
+ }
+ EX_END_CATCH(RethrowCorruptingExceptions);
+ if (!fSucceeded)
+ {
+ *pfCreatedNew = FALSE;
+ return TypeLoggingInfo(NULL);
+ }
+
+ // This is the first time that we've seen this type on this thread, so we should attempt to
+ // add it to the global list.
+ if(!AddTypeToGlobalCacheIfNotExists(th, pfCreatedNew))
+ {
+ // out of memory or ETW has been disabled. Bail on ETW stuff
+ *pfCreatedNew = FALSE;
+ return TypeLoggingInfo(NULL);
+ }
+
+ return typeLoggingInfoNew;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Helper that creates a Type entry in the global type logging cache if one doesn't
+// already exist.
+//
+// Arguments:
+// * th - Key to lookup or create
+//
+// Return Value:
+// TRUE if the type needed to be added to the cache.
+//
+//
+
+// static
+BOOL ETW::TypeSystemLog::AddTypeToGlobalCacheIfNotExists(TypeHandle th, BOOL * pfCreatedNew)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ BOOL fSucceeded = FALSE;
+
+ {
+ CrstHolder _crst(GetHashCrst());
+
+ // Check if ETW is enabled, and if not, bail here.
+ // We do this inside of the lock to ensure that we don't immediately
+ // re-allocate the global type hash after it has been cleaned up.
+ if (!ETW_TRACING_CATEGORY_ENABLED(
+ MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_TYPE_KEYWORD))
+ {
+ *pfCreatedNew = FALSE;
+ return fSucceeded;
+ }
+
+ if (s_pAllLoggedTypes == NULL)
+ {
+ s_pAllLoggedTypes = new (nothrow) AllLoggedTypes;
+ if (s_pAllLoggedTypes == NULL)
+ {
+ // out of memory. Bail on ETW stuff
+ *pfCreatedNew = FALSE;
+ return fSucceeded;
+ }
+ }
+
+ // Step 1: go from LoaderModule to hash of types.
+
+ Module * pLoaderModule = th.GetLoaderModule();
+ _ASSERTE(pLoaderModule != NULL);
+ LoggedTypesFromModule * pLoggedTypesFromModule = s_pAllLoggedTypes->allLoggedTypesHash.Lookup(pLoaderModule);
+ if (pLoggedTypesFromModule == NULL)
+ {
+ pLoggedTypesFromModule = new (nothrow) LoggedTypesFromModule(pLoaderModule);
+ if (pLoggedTypesFromModule == NULL)
+ {
+ // out of memory. Bail on ETW stuff
+ *pfCreatedNew = FALSE;
+ return fSucceeded;
+ }
+
+ fSucceeded = FALSE;
+ EX_TRY
+ {
+ s_pAllLoggedTypes->allLoggedTypesHash.Add(pLoggedTypesFromModule);
+ fSucceeded = TRUE;
+ }
+ EX_CATCH
+ {
+ fSucceeded = FALSE;
+ }
+ EX_END_CATCH(RethrowCorruptingExceptions);
+ if (!fSucceeded)
+ {
+ *pfCreatedNew = FALSE;
+ return fSucceeded;
+ }
+ }
+
+ // Step 2: From hash of types, see if our TypeHandle is there already
+ TypeLoggingInfo typeLoggingInfoPreexisting = pLoggedTypesFromModule->loggedTypesFromModuleHash.Lookup(th);
+ if (!typeLoggingInfoPreexisting.th.IsNull())
+ {
+ // Type is already hashed, so it's already logged, so we don't need to
+ // log it again.
+ *pfCreatedNew = FALSE;
+ return fSucceeded;
+ }
+
+ // We haven't logged this type, so we need to continue with this function to
+ // log it below. Add it to the hash table first so any recursive calls will
+ // see that this type is already being taken care of
+ fSucceeded = FALSE;
+ TypeLoggingInfo typeLoggingInfoNew(th);
+ EX_TRY
+ {
+ pLoggedTypesFromModule->loggedTypesFromModuleHash.Add(typeLoggingInfoNew);
+ fSucceeded = TRUE;
+ }
+ EX_CATCH
+ {
+ fSucceeded = FALSE;
+ }
+ EX_END_CATCH(RethrowCorruptingExceptions);
+ if (!fSucceeded)
+ {
+ *pfCreatedNew = FALSE;
+ return fSucceeded;
+ }
+ } // RELEASE: CrstHolder _crst(GetHashCrst());
+
+ *pfCreatedNew = TRUE;
+ return fSucceeded;
+
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Called when we determine if a module was unloaded, so we can clear out that module's
+// set of types from our hash table
+//
+// Arguments:
+// pModule - Module getting unloaded
+//
+
+// static
+VOID ETW::TypeSystemLog::OnModuleUnload(Module * pModule)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ // We don't need to do anything if allocation sampling is disabled.
+ if (!ETW_TRACING_CATEGORY_ENABLED(
+ MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_TYPE_KEYWORD))
+ {
+ return;
+ }
+
+ LoggedTypesFromModule * pLoggedTypesFromModule = NULL;
+
+ {
+ CrstHolder _crst(GetHashCrst());
+
+ // We don't need to do anything if the global type hash doesn't contain any data.
+ if (s_pAllLoggedTypes == NULL)
+ return;
+
+ // Is there a TypesHash for this module?
+ pLoggedTypesFromModule = s_pAllLoggedTypes->allLoggedTypesHash.Lookup(pModule);
+ if (pLoggedTypesFromModule == NULL)
+ return;
+
+ // Remove TypesHash from master hash mapping modules to their TypesHash
+ s_pAllLoggedTypes->allLoggedTypesHash.Remove(pModule);
+
+ // Increment the epoch to signal the change to all threads.
+ s_nEpoch++;
+ }
+
+ // Destruct this TypesHash we just removed
+ delete pLoggedTypesFromModule;
+ pLoggedTypesFromModule = NULL;
+
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Same semantics as DeleteTypeHash but assumes that the appropriate lock
+// has already been acquired.
+//
+
+// static
+VOID ETW::TypeSystemLog::DeleteTypeHashNoLock(AllLoggedTypes **ppAllLoggedTypes)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if(ppAllLoggedTypes == NULL)
+ {
+ return;
+ }
+
+ AllLoggedTypes *pAllLoggedTypes = *ppAllLoggedTypes;
+
+ if(pAllLoggedTypes == NULL)
+ {
+ return;
+ }
+
+ // Destruct each of the per-module TypesHashes
+ AllLoggedTypesHash * pLoggedTypesHash = &pAllLoggedTypes->allLoggedTypesHash;
+ for (AllLoggedTypesHash::Iterator iter = pLoggedTypesHash->Begin();
+ iter != pLoggedTypesHash->End();
+ ++iter)
+ {
+ LoggedTypesFromModule * pLoggedTypesFromModule = *iter;
+ delete pLoggedTypesFromModule;
+ }
+
+ // This causes the default ~AllLoggedTypes() to be called, and thus
+ // ~AllLoggedTypesHash() to be called
+ delete pAllLoggedTypes;
+ *ppAllLoggedTypes = NULL;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Whenever we detect that the Types keyword is off, this gets called. This eliminates the
+// global hash tables that tracked which types were logged (if the hash tables had been created
+// previously). If type events are turned back on later, we'll re-log them all as we
+// encounter them. Thread local hash tables are destroyed in the Cleanup method, which is
+// called during GC to ensure that there aren't any races.
+//
+
+// static
+VOID ETW::TypeSystemLog::OnTypesKeywordTurnedOff()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ // Take the global cache lock.
+ CrstHolder _crst(GetHashCrst());
+
+ // Clean-up the global TypeHash if necessary.
+ if (s_pAllLoggedTypes == NULL)
+ {
+ // Even if we don't increment the epoch, but we get into a situation where
+ // some per thread data has been allocated, it will be cleaned up during the
+ // next GC because we are guaranteed that s_nEpoch has been incremented at
+ // least once (to shutdown allocation sampling).
+ return;
+ }
+
+ // Destruct the global TypeHash
+ DeleteTypeHashNoLock(&s_pAllLoggedTypes);
+
+ // Increment the epoch to signal the change to all threads.
+ s_nEpoch++;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Clean-up thread local type hashes. This is called from within the GC to ensure that
+// there are no races. All threads are suspended when this is called.
+//
+
+// static
+VOID ETW::TypeSystemLog::Cleanup()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // If allocation sampling is enabled, bail here so that we don't delete
+ // any of the thread local state.
+ if (ETW_TRACING_CATEGORY_ENABLED(
+ MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_TYPE_KEYWORD))
+ {
+ return;
+ }
+
+ // If logging is disabled but the epoch has not been incremented,
+ // we haven't ever turned on allocation sampling, so there is nothing
+ // to clean-up.
+ if(s_nEpoch <= 0)
+ {
+ return;
+ }
+
+ // Iterate over each thread and destruct the per thread caches
+ AllLoggedTypes * pThreadAllLoggedTypes = NULL;
+ Thread * pThread = NULL;
+ while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
+ {
+ pThreadAllLoggedTypes = pThread->GetAllocationSamplingTable();
+ if(pThreadAllLoggedTypes == NULL)
+ {
+ continue;
+ }
+
+ // Destruct each of the thread local TypesHashes
+ DeleteTypeHashNoLock(&pThreadAllLoggedTypes);
+
+ // Set the thread type hash pointer to NULL
+ pThread->SetAllocationSamplingTable(NULL);
+ }
+}
+
+
+/****************************************************************************/
+/* Called when ETW is turned ON on an existing process and ModuleRange events are to
+ be fired */
+/****************************************************************************/
+VOID ETW::EnumerationLog::ModuleRangeRundown()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ EX_TRY
+ {
+ if (ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_PERFTRACK_PRIVATE_KEYWORD))
+ {
+ ETW::EnumerationLog::EnumerationHelper(NULL, NULL, ETW::EnumerationLog::EnumerationStructs::ModuleRangeLoadPrivate);
+ }
+ } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
+}
+
+
+/****************************************************************************/
+/* Called when ETW is turned ON on an existing process */
+/****************************************************************************/
+VOID ETW::EnumerationLog::StartRundown()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ EX_TRY
+ {
+ BOOL bIsArmRundownEnabled = ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_RUNDOWNAPPDOMAINRESOURCEMANAGEMENT_KEYWORD);
+ BOOL bIsPerfTrackRundownEnabled = ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_RUNDOWNPERFTRACK_KEYWORD);
+ BOOL bIsThreadingRundownEnabled = ETW_TRACING_CATEGORY_ENABLED(
+ MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_RUNDOWNTHREADING_KEYWORD);
+
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_RUNDOWNJIT_KEYWORD)
+ ||
+ ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_RUNDOWNLOADER_KEYWORD)
+ ||
+ IsRundownNgenKeywordEnabledAndNotSuppressed()
+ ||
+ ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_RUNDOWNJITTEDMETHODILTONATIVEMAP_KEYWORD)
+ ||
+ bIsArmRundownEnabled
+ ||
+ bIsPerfTrackRundownEnabled
+ ||
+ bIsThreadingRundownEnabled)
+ {
+ // begin marker event will go to the rundown provider
+ FireEtwDCStartInit_V1(GetClrInstanceId());
+
+ // The rundown flag is expected to be checked in the caller, so no need to check here again
+ DWORD enumerationOptions=ETW::EnumerationLog::EnumerationStructs::None;
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_RUNDOWNLOADER_KEYWORD))
+ {
+ enumerationOptions |= ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCStart;
+ }
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_RUNDOWNJIT_KEYWORD))
+ {
+ enumerationOptions |= ETW::EnumerationLog::EnumerationStructs::JitMethodDCStart;
+ }
+ if(IsRundownNgenKeywordEnabledAndNotSuppressed())
+ {
+ enumerationOptions |= ETW::EnumerationLog::EnumerationStructs::NgenMethodDCStart;
+ }
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_RUNDOWNJITTEDMETHODILTONATIVEMAP_KEYWORD))
+ {
+ enumerationOptions |= ETW::EnumerationLog::EnumerationStructs::MethodDCStartILToNativeMap;
+ }
+ if(bIsPerfTrackRundownEnabled)
+ {
+ enumerationOptions |= ETW::EnumerationLog::EnumerationStructs::ModuleRangeDCStart;
+ }
+
+ ETW::EnumerationLog::EnumerationHelper(NULL, NULL, enumerationOptions);
+
+ if (bIsArmRundownEnabled)
+ {
+ // When an ETW event consumer asks for ARM rundown, that not only enables
+ // the ETW events, but also causes some minor behavioral changes in the
+ // CLR, such as gathering CPU usage baselines for each thread right now,
+ // and also gathering resource usage information later on (keyed off of
+ // g_fEnableARM, which we'll set right now).
+ EnableARM();
+ }
+
+ if (bIsArmRundownEnabled || bIsThreadingRundownEnabled)
+ {
+ SendThreadRundownEvent();
+ }
+
+ // end marker event will go to the rundown provider
+ FireEtwDCStartComplete_V1(GetClrInstanceId());
+ }
+ } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Simple helper to convert the currently active keywords on the runtime provider into a
+// bitmask of enumeration options as defined in ETW::EnumerationLog::EnumerationStructs
+//
+// Return Value:
+// ETW::EnumerationLog::EnumerationStructs bitmask corresponding to the currently
+// active keywords on the runtime provider
+//
+
+// static
+DWORD ETW::EnumerationLog::GetEnumerationOptionsFromRuntimeKeywords()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DWORD enumerationOptions=ETW::EnumerationLog::EnumerationStructs::None;
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_LOADER_KEYWORD))
+ {
+ enumerationOptions |= ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleUnload;
+ }
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_JIT_KEYWORD) &&
+ ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_ENDENUMERATION_KEYWORD))
+ {
+ enumerationOptions |= ETW::EnumerationLog::EnumerationStructs::JitMethodUnload;
+ }
+ if(IsRuntimeNgenKeywordEnabledAndNotSuppressed() &&
+ ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_ENDENUMERATION_KEYWORD))
+ {
+ enumerationOptions |= ETW::EnumerationLog::EnumerationStructs::NgenMethodUnload;
+ }
+
+ return enumerationOptions;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Executes a flavor of rundown initiated by a CAPTURE_STATE request to
+// code:#EtwCallback. CAPTURE_STATE is the "ETW-sanctioned" way of performing a
+// rundown, whereas the CLR's rundown provider was *our* version of this, implemented
+// before CAPTURE_STATE was standardized.
+//
+// When doing a CAPTURE_STATE, the CLR rundown provider is completely unused. Instead,
+// we pay attention to the runtime keywords active at the time the CAPTURE_STATE was
+// requested, and enumerate through the appropriate objects (AppDomains, assemblies,
+// modules, types, methods, threads) and send runtime events for each of them.
+//
+// CAPTURE_STATE is intended to be used primarily by PerfTrack. Implementing this form
+// of rundown allows PerfTrack to be blissfully unaware of the CLR's rundown provider.
+//
+
+// static
+VOID ETW::EnumerationLog::EnumerateForCaptureState()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ EX_TRY
+ {
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, TRACE_LEVEL_INFORMATION, KEYWORDZERO))
+ {
+ DWORD enumerationOptions = GetEnumerationOptionsFromRuntimeKeywords();
+
+ // Send unload events for all remaining domains, including shared domain and
+ // default domain.
+ ETW::EnumerationLog::EnumerationHelper(NULL /* module filter */, NULL /* domain filter */, enumerationOptions);
+
+ // Send thread created events for all currently active threads, if requested
+ if (ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_THREADING_KEYWORD))
+ {
+ SendThreadRundownEvent();
+ }
+ }
+ } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
+}
+
+/**************************************************************************************/
+/* Called when ETW is turned OFF on an existing process .Will be used by the controller for end rundown*/
+/**************************************************************************************/
+VOID ETW::EnumerationLog::EndRundown()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ EX_TRY
+ {
+ BOOL bIsPerfTrackRundownEnabled = ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_RUNDOWNPERFTRACK_KEYWORD);
+ BOOL bIsThreadingRundownEnabled = ETW_TRACING_CATEGORY_ENABLED(
+ MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_RUNDOWNTHREADING_KEYWORD);
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_RUNDOWNJIT_KEYWORD)
+ ||
+ ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_RUNDOWNLOADER_KEYWORD)
+ ||
+ IsRundownNgenKeywordEnabledAndNotSuppressed()
+ ||
+ ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_RUNDOWNJITTEDMETHODILTONATIVEMAP_KEYWORD)
+ ||
+ bIsPerfTrackRundownEnabled
+ ||
+ bIsThreadingRundownEnabled
+ )
+ {
+ // begin marker event will go to the rundown provider
+ FireEtwDCEndInit_V1(GetClrInstanceId());
+
+ // The rundown flag is expected to be checked in the caller, so no need to check here again
+ DWORD enumerationOptions=ETW::EnumerationLog::EnumerationStructs::None;
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_RUNDOWNLOADER_KEYWORD))
+ {
+ enumerationOptions |= ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCEnd;
+ }
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_RUNDOWNJIT_KEYWORD))
+ {
+ enumerationOptions |= ETW::EnumerationLog::EnumerationStructs::JitMethodDCEnd;
+ }
+ if(IsRundownNgenKeywordEnabledAndNotSuppressed())
+ {
+ enumerationOptions |= ETW::EnumerationLog::EnumerationStructs::NgenMethodDCEnd;
+ }
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_RUNDOWNJITTEDMETHODILTONATIVEMAP_KEYWORD))
+ {
+ enumerationOptions |= ETW::EnumerationLog::EnumerationStructs::MethodDCEndILToNativeMap;
+ }
+ if(bIsPerfTrackRundownEnabled)
+ {
+ enumerationOptions |= ETW::EnumerationLog::EnumerationStructs::ModuleRangeDCEnd;
+ }
+
+ ETW::EnumerationLog::EnumerationHelper(NULL, NULL, enumerationOptions);
+
+ if (bIsThreadingRundownEnabled)
+ {
+ SendThreadRundownEvent();
+ }
+
+ // end marker event will go to the rundown provider
+ FireEtwDCEndComplete_V1(GetClrInstanceId());
+ }
+ } EX_CATCH {
+ STRESS_LOG1(LF_ALWAYS, LL_ERROR, "Exception during Rundown Enumeration, EIP of last AV = %p", g_LastAccessViolationEIP);
+ } EX_END_CATCH(SwallowAllExceptions);
+}
+
+// #Registration
+/*++
+
+Routine Description:
+
+ Registers provider with ETW tracing framework.
+ This function should not be called more than once, on
+ Dll Process attach only.
+ Not thread safe.
+
+Arguments:
+ none
+
+Return Value:
+ Returns the return value from RegisterTraceGuids or EventRegister.
+
+--*/
+
+void InitializeEventTracing()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Do startup-only initialization of any state required by the ETW classes before
+ // events can be fired
+ HRESULT hr = ETW::TypeSystemLog::PreRegistrationInit();
+ if (FAILED(hr))
+ return;
+
+ // Register CLR providers with the OS
+ if (g_pEtwTracer == NULL)
+ {
+ NewHolder <ETW::CEtwTracer> tempEtwTracer (new (nothrow) ETW::CEtwTracer());
+ if (tempEtwTracer != NULL && tempEtwTracer->Register () == ERROR_SUCCESS)
+ g_pEtwTracer = tempEtwTracer.Extract ();
+ }
+
+ g_nClrInstanceId = GetRuntimeId() & 0x0000FFFF; // This will give us duplicate ClrInstanceId after UINT16_MAX
+
+ // Any classes that need some initialization to happen after we've registered the
+ // providers can do so now
+ ETW::TypeSystemLog::PostRegistrationInit();
+}
+
+HRESULT ETW::CEtwTracer::Register()
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifndef FEATURE_CORESYSTEM
+ OSVERSIONINFO osVer;
+ osVer.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
+
+ if (GetOSVersion(&osVer) == FALSE) {
+ return HRESULT_FROM_WIN32(ERROR_NOT_SUPPORTED);
+ }
+ else if (osVer.dwMajorVersion < ETW_SUPPORTED_MAJORVER) {
+ return HRESULT_FROM_WIN32(ERROR_NOT_SUPPORTED);
+ }
+
+ // if running on OS < Longhorn, skip registration unless reg key is set
+ // since ETW reg is expensive (in both time and working set) on older OSes
+ if (osVer.dwMajorVersion < ETW_ENABLED_MAJORVER && !g_fEnableETW && !CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_PreVistaETWEnabled))
+ return HRESULT_FROM_WIN32(ERROR_NOT_SUPPORTED);
+
+ // If running on OS >= Longhorn, skip registration if ETW is not enabled
+ if (osVer.dwMajorVersion >= ETW_ENABLED_MAJORVER && !g_fEnableETW && !CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_VistaAndAboveETWEnabled))
+ return HRESULT_FROM_WIN32(ERROR_NOT_SUPPORTED);
+#endif
+
+ EventRegisterMicrosoft_Windows_DotNETRuntime();
+ EventRegisterMicrosoft_Windows_DotNETRuntimePrivate();
+ EventRegisterMicrosoft_Windows_DotNETRuntimeRundown();
+
+ // Stress Log ETW events are available only on the desktop version of the runtime
+#ifndef FEATURE_CORECLR
+ EventRegisterMicrosoft_Windows_DotNETRuntimeStress();
+#endif // !FEATURE_CORECLR
+
+ MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.RegistrationHandle = Microsoft_Windows_DotNETRuntimeHandle;
+ MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.RegistrationHandle = Microsoft_Windows_DotNETRuntimePrivateHandle;
+ MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context.RegistrationHandle = Microsoft_Windows_DotNETRuntimeRundownHandle;
+#ifndef FEATURE_CORECLR
+ MICROSOFT_WINDOWS_DOTNETRUNTIME_STRESS_PROVIDER_Context.RegistrationHandle = Microsoft_Windows_DotNETRuntimeStressHandle;
+#endif // !FEATURE_CORECLR
+
+ return S_OK;
+}
+
+// #Unregistration
+/*++
+
+Routine Description:
+ Unregisters the provider from ETW. This function
+ should only be called once from DllMain Detach process.
+ Not thread safe.
+
+Arguments:
+ none
+
+Return Value:
+ Returns ERROR_SUCCESS
+
+--*/
+HRESULT ETW::CEtwTracer::UnRegister()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ EventUnregisterMicrosoft_Windows_DotNETRuntime();
+ EventUnregisterMicrosoft_Windows_DotNETRuntimePrivate();
+ EventUnregisterMicrosoft_Windows_DotNETRuntimeRundown();
+#ifndef FEATURE_CORECLR
+ EventUnregisterMicrosoft_Windows_DotNETRuntimeStress();
+#endif // !FEATURE_CORECLR
+ return S_OK;
+}
+
+extern "C"
+{
+ ETW_INLINE
+ VOID EtwCallout(REGHANDLE RegHandle,
+ PCEVENT_DESCRIPTOR Descriptor,
+ ULONG ArgumentCount,
+ PEVENT_DATA_DESCRIPTOR EventData)
+ {
+ WRAPPER_NO_CONTRACT;
+ UINT8 providerIndex = 0;
+ if(RegHandle == Microsoft_Windows_DotNETRuntimeHandle) {
+ providerIndex = 0;
+ } else if(RegHandle == Microsoft_Windows_DotNETRuntimeRundownHandle) {
+ providerIndex = 1;
+ } else if(RegHandle == Microsoft_Windows_DotNETRuntimeStressHandle) {
+ providerIndex = 2;
+ } else if(RegHandle == Microsoft_Windows_DotNETRuntimePrivateHandle) {
+ providerIndex = 3;
+ } else {
+ _ASSERTE(!"Provider not one of Runtime, Rundown, Private and Stress");
+ return;
+ }
+
+ // stacks are supposed to be fired for only the events with a bit set in the etwStackSupportedEvents bitmap
+ if(((etwStackSupportedEvents[providerIndex][Descriptor->Id/8]) &
+ (1<<(Descriptor->Id%8))) != 0)
+ {
+ if(RegHandle == Microsoft_Windows_DotNETRuntimeHandle) {
+ ETW::SamplingLog::SendStackTrace(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, &CLRStackWalk, &CLRStackId);
+ } else if(RegHandle == Microsoft_Windows_DotNETRuntimeRundownHandle) {
+ ETW::SamplingLog::SendStackTrace(MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context, &CLRStackWalkDCStart, &CLRStackRundownId);
+ } else if(RegHandle == Microsoft_Windows_DotNETRuntimePrivateHandle) {
+ ETW::SamplingLog::SendStackTrace(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, &CLRStackWalkPrivate, &CLRStackPrivateId);
+ } else if(RegHandle == Microsoft_Windows_DotNETRuntimeStressHandle) {
+ ETW::SamplingLog::SendStackTrace(MICROSOFT_WINDOWS_DOTNETRUNTIME_STRESS_PROVIDER_Context, &CLRStackWalkStress, &CLRStackStressId);
+ }
+ }
+ }
+}
+
+extern "C"
+{
+
+ // #EtwCallback:
+ // During the build, MC generates the code to register our provider, and to register
+ // our ETW callback. (This is buried under Intermediates, in a path like
+ // Intermediate\clr\corguids.nativeproj_1723354836\obj1c\x86\ClrEtwAll.h.) The ETW
+ // callback is also generated for us by MC. But we can hook into this generated
+ // callback by #defining MCGEN_PRIVATE_ENABLE_CALLBACK_V2 to be a call to this
+ // function (EtwCallback), thus causing EtwCallback to get called after the
+ // MC-generated code executes.
+ //
+ // This callback function is called whenever an ETW session is enabled or disabled. A
+ // callback function needs to be specified when the provider is registered. C style
+ // callback wrappers are needed during event registration. To handle the callback
+ // action in this class, we pass "this" during provider registration and modify the
+ // context to the relevant context in the C callback later.
+ ETW_INLINE
+ VOID EtwCallback(
+ _In_ LPCGUID SourceId,
+ _In_ ULONG ControlCode,
+ _In_ UCHAR Level,
+ _In_ ULONGLONG MatchAnyKeyword,
+ _In_ ULONGLONG MatchAllKeyword,
+ _In_opt_ PEVENT_FILTER_DESCRIPTOR FilterData,
+ _Inout_opt_ PVOID CallbackContext)
+ {
+ CONTRACTL {
+ NOTHROW;
+ if(g_fEEStarted) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);};
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ STATIC_CONTRACT_FAULT;
+ SO_NOT_MAINLINE;
+ } CONTRACTL_END;
+
+ // Mark that we are the special ETWRundown thread. Currently all this does
+ // is insure that AVs thrown in this thread are treated as normal exceptions.
+ // This allows us to catch and swallow them. We can do this because we have
+ // a reasonably strong belief that doing ETW Rundown does not change runtime state
+ // and thus if an AV happens it is better to simply give up logging ETW and
+ // instead of terminating the process (which is what we would do normally)
+ ClrFlsThreadTypeSwitch etwRundownThreadHolder(ThreadType_ETWRundownThread);
+ PMCGEN_TRACE_CONTEXT context = (PMCGEN_TRACE_CONTEXT)CallbackContext;
+
+ BOOLEAN bIsPublicTraceHandle = (context->RegistrationHandle==Microsoft_Windows_DotNETRuntimeHandle);
+
+ BOOLEAN bIsPrivateTraceHandle = (context->RegistrationHandle==Microsoft_Windows_DotNETRuntimePrivateHandle);
+
+ BOOLEAN bIsRundownTraceHandle = (context->RegistrationHandle==Microsoft_Windows_DotNETRuntimeRundownHandle);
+
+
+ // A manifest based provider can be enabled to multiple event tracing sessions
+ // As long as there is atleast 1 enabled session, IsEnabled will be TRUE
+ // Since classic providers can be enabled to only a single session,
+ // IsEnabled will be TRUE when it is enabled and FALSE when disabled
+ BOOL bEnabled =
+ ((ControlCode == EVENT_CONTROL_CODE_ENABLE_PROVIDER) ||
+ (ControlCode == EVENT_CONTROL_CODE_CAPTURE_STATE));
+ if(bEnabled)
+ {
+ // TypeSystemLog needs a notification when certain keywords are modified, so
+ // give it a hook here.
+ if (g_fEEStarted && !g_fEEShutDown && bIsPublicTraceHandle)
+ {
+ ETW::TypeSystemLog::OnKeywordsChanged();
+ }
+
+ if (bIsPrivateTraceHandle)
+ {
+ ETW::GCLog::GCSettingsEvent();
+ if(g_fEEStarted && !g_fEEShutDown)
+ {
+ ETW::EnumerationLog::ModuleRangeRundown();
+ }
+ }
+
+#ifdef _TARGET_AMD64_
+ // We only do this on amd64 (NOT ARM, because ARM uses frame based stack crawling)
+ // If we have turned on the JIT keyword to the VERBOSE setting (needed to get JIT names) then
+ // we assume that we also want good stack traces so we need to publish unwind information so
+ // ETW can get at it
+ if(bIsPublicTraceHandle && ETW_CATEGORY_ENABLED((*context), TRACE_LEVEL_VERBOSE, CLR_RUNDOWNJIT_KEYWORD))
+ UnwindInfoTable::PublishUnwindInfo(g_fEEStarted != FALSE);
+#endif
+
+ if(g_fEEStarted && !g_fEEShutDown && bIsRundownTraceHandle)
+ {
+ // Fire the runtime information event
+ ETW::InfoLog::RuntimeInformation(ETW::InfoLog::InfoStructs::Callback);
+
+ // Start and End Method/Module Rundowns
+ // Used to fire events that we missed since we started the controller after the process started
+ // flags for immediate start rundown
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_RUNDOWNSTART_KEYWORD))
+ ETW::EnumerationLog::StartRundown();
+
+ // flags delayed end rundown
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_RUNDOWNEND_KEYWORD))
+ ETW::EnumerationLog::EndRundown();
+ }
+
+ if (g_fEEStarted && !g_fEEShutDown && (ControlCode == EVENT_CONTROL_CODE_CAPTURE_STATE))
+ {
+ ETW::EnumerationLog::EnumerateForCaptureState();
+ }
+
+ // Special check for the runtime provider's GCHeapCollectKeyword. Profilers
+ // flick this to force a full GC.
+ if (g_fEEStarted && !g_fEEShutDown && bIsPublicTraceHandle &&
+ ((MatchAnyKeyword & CLR_GCHEAPCOLLECT_KEYWORD) != 0))
+ {
+ // Profilers may (optionally) specify extra data in the filter parameter
+ // to log with the GCStart event.
+ LONGLONG l64ClientSequenceNumber = 0;
+ if ((FilterData != NULL) &&
+ (FilterData->Type == 1) &&
+ (FilterData->Size == sizeof(l64ClientSequenceNumber)))
+ {
+ l64ClientSequenceNumber = *(LONGLONG *) (FilterData->Ptr);
+ }
+ ETW::GCLog::ForceGC(l64ClientSequenceNumber);
+ }
+ }
+#ifdef FEATURE_COMINTEROP
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, CCWRefCountChange))
+ g_pConfig->SetLogCCWRefCountChangeEnabled(bEnabled != 0);
+#endif // FEATURE_COMINTEROP
+
+ }
+}
+
+#endif // FEATURE_REDHAWK
+
+#ifndef FEATURE_REDHAWK
+
+/****************************************************************************/
+/* This is called by the runtime when an exception is thrown */
+/****************************************************************************/
+VOID ETW::ExceptionLog::ExceptionThrown(CrawlFrame *pCf, BOOL bIsReThrownException, BOOL bIsNewException)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ PRECONDITION(GetThread() != NULL);
+ PRECONDITION(GetThread()->GetThrowable() != NULL);
+ } CONTRACTL_END;
+
+ if(!(bIsReThrownException || bIsNewException))
+ {
+ return;
+ }
+ if(!ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, ExceptionThrown_V1))
+ {
+ return;
+ }
+ EX_TRY
+ {
+ SString exceptionType(W(""));
+ LPWSTR exceptionMessage = NULL;
+ BOOL bIsCLSCompliant=FALSE, bIsCSE=FALSE, bIsNestedException=FALSE, bHasInnerException=FALSE;
+ UINT16 exceptionFlags=0;
+ PVOID exceptionEIP=0;
+
+ Thread *pThread = GetThread();
+
+ struct
+ {
+ OBJECTREF exceptionObj;
+ OBJECTREF innerExceptionObj;
+ STRINGREF exceptionMessageRef;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ GCPROTECT_BEGIN(gc);
+
+ gc.exceptionObj = pThread->GetThrowable();
+ gc.innerExceptionObj = ((EXCEPTIONREF)gc.exceptionObj)->GetInnerException();
+
+ ThreadExceptionState *pExState = pThread->GetExceptionState();
+#ifndef WIN64EXCEPTIONS
+ PTR_ExInfo pExInfo = NULL;
+#else
+ PTR_ExceptionTracker pExInfo = NULL;
+#endif //!WIN64EXCEPTIONS
+ pExInfo = pExState->GetCurrentExceptionTracker();
+ _ASSERTE(pExInfo != NULL);
+ bIsNestedException = (pExInfo->GetPreviousExceptionTracker() != NULL);
+ bIsCSE = (pExInfo->GetCorruptionSeverity() == ProcessCorrupting);
+ bIsCLSCompliant = IsException((gc.exceptionObj)->GetMethodTable()) &&
+ ((gc.exceptionObj)->GetMethodTable() != MscorlibBinder::GetException(kRuntimeWrappedException));
+
+ // A rethrown exception is also a nested exception
+ // but since we have a separate flag for it, lets unset the nested flag
+ if(bIsReThrownException)
+ {
+ bIsNestedException = FALSE;
+ }
+ bHasInnerException = (gc.innerExceptionObj) != NULL;
+
+ exceptionFlags = ((bHasInnerException ? ETW::ExceptionLog::ExceptionStructs::HasInnerException : 0) |
+ (bIsNestedException ? ETW::ExceptionLog::ExceptionStructs::IsNestedException : 0) |
+ (bIsReThrownException ? ETW::ExceptionLog::ExceptionStructs::IsReThrownException : 0) |
+ (bIsCSE ? ETW::ExceptionLog::ExceptionStructs::IsCSE : 0) |
+ (bIsCLSCompliant ? ETW::ExceptionLog::ExceptionStructs::IsCLSCompliant : 0));
+
+ if (pCf->IsFrameless())
+ {
+#ifndef _WIN64
+ exceptionEIP = (PVOID)pCf->GetRegisterSet()->ControlPC;
+#else
+ exceptionEIP = (PVOID)GetIP(pCf->GetRegisterSet()->pContext);
+#endif //!_WIN64
+ }
+ else
+ {
+ exceptionEIP = (PVOID)(pCf->GetFrame()->GetIP());
+ }
+
+ // On platforms other than IA64, we are at the instruction after the faulting instruction
+ // This check has been copied from StackTraceInfo::AppendElement
+ if (!(pCf->HasFaulted() || pCf->IsIPadjusted()) && exceptionEIP != 0)
+ {
+ exceptionEIP = (PVOID)((UINT_PTR)exceptionEIP - 1);
+ }
+
+ gc.exceptionMessageRef = ((EXCEPTIONREF)gc.exceptionObj)->GetMessage();
+ TypeHandle exceptionTypeHandle = (gc.exceptionObj)->GetTypeHandle();
+ exceptionTypeHandle.GetName(exceptionType);
+ WCHAR *exceptionTypeName = (WCHAR *)exceptionType.GetUnicode();
+
+ if(gc.exceptionMessageRef != NULL)
+ {
+ exceptionMessage = (gc.exceptionMessageRef)->GetBuffer();
+ }
+
+ HRESULT exceptionHRESULT = ((EXCEPTIONREF)gc.exceptionObj)->GetHResult();
+
+ FireEtwExceptionThrown_V1(exceptionTypeName,
+ exceptionMessage,
+ exceptionEIP,
+ exceptionHRESULT,
+ exceptionFlags,
+ GetClrInstanceId());
+ GCPROTECT_END();
+ } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
+}
+
+/****************************************************************************/
+/* This is called by the runtime when a domain is loaded */
+/****************************************************************************/
+VOID ETW::LoaderLog::DomainLoadReal(BaseDomain *pDomain, __in_opt LPWSTR wszFriendlyName)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ EX_TRY
+ {
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_LOADER_KEYWORD))
+ {
+ DWORD dwEventOptions = ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleLoad;
+ ETW::LoaderLog::SendDomainEvent(pDomain, dwEventOptions, wszFriendlyName);
+ }
+ } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
+}
+
+/****************************************************************************/
+/* This is called by the runtime when an AppDomain is unloaded */
+/****************************************************************************/
+VOID ETW::LoaderLog::DomainUnload(AppDomain *pDomain)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ EX_TRY
+ {
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ KEYWORDZERO))
+ {
+ if(!pDomain->NoAccessToHandleTable())
+ {
+ DWORD enumerationOptions = ETW::EnumerationLog::GetEnumerationOptionsFromRuntimeKeywords();
+
+ // Domain unload also causes type unload events
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_TYPE_KEYWORD))
+ {
+ enumerationOptions |= ETW::EnumerationLog::EnumerationStructs::TypeUnload;
+ }
+
+ ETW::EnumerationLog::EnumerationHelper(NULL, pDomain, enumerationOptions);
+ }
+ }
+ } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
+}
+
+/****************************************************************************/
+/* This is called by the runtime when a LoaderAllocator is unloaded */
+/****************************************************************************/
+VOID ETW::LoaderLog::CollectibleLoaderAllocatorUnload(AssemblyLoaderAllocator *pLoaderAllocator)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ EX_TRY
+ {
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ KEYWORDZERO))
+ {
+ DWORD enumerationOptions = ETW::EnumerationLog::GetEnumerationOptionsFromRuntimeKeywords();
+
+ // Collectible Loader Allocator unload also causes type unload events
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_TYPE_KEYWORD))
+ {
+ enumerationOptions |= ETW::EnumerationLog::EnumerationStructs::TypeUnload;
+ }
+
+ ETW::EnumerationLog::IterateCollectibleLoaderAllocator(pLoaderAllocator, enumerationOptions);
+ }
+ } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
+}
+
+/****************************************************************************/
+/* This is called by the runtime when the runtime is loaded
+ Function gets called by both the Callback mechanism and regular ETW events.
+ Type is used to differentiate whether its a callback or a normal call*/
+/****************************************************************************/
+VOID ETW::InfoLog::RuntimeInformation(INT32 type)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ EX_TRY {
+ if((type == ETW::InfoLog::InfoStructs::Normal && ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, RuntimeInformationStart))
+ ||
+ (type == ETW::InfoLog::InfoStructs::Callback && ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context, RuntimeInformationDCStart))
+ )
+ {
+ PCWSTR szDtraceOutput1=W(""),szDtraceOutput2=W("");
+ UINT8 startupMode = 0;
+ UINT startupFlags = 0;
+ WCHAR dllPath[MAX_PATH+1] = {0};
+ UINT8 Sku = 0;
+ _ASSERTE(g_fEEManagedEXEStartup || //CLR started due to a managed exe
+ g_fEEIJWStartup || //CLR started as a mixed mode Assembly
+ CLRHosted() || g_fEEHostedStartup || //CLR started through one of the Hosting API CLRHosted() returns true if CLR started through the V2 Interface while
+ // g_fEEHostedStartup is true if CLR is hosted through the V1 API.
+ g_fEEComActivatedStartup || //CLR started as a COM object
+ g_fEEOtherStartup ); //In case none of the 4 above mentioned cases are true for example ngen, ildasm then we asssume its a "other" startup
+
+#ifdef FEATURE_CORECLR
+ Sku = ETW::InfoLog::InfoStructs::CoreCLR;
+#else
+ Sku = ETW::InfoLog::InfoStructs::DesktopCLR;
+#endif //FEATURE_CORECLR
+
+ //version info for clr.dll
+ USHORT vmMajorVersion = VER_MAJORVERSION;
+ USHORT vmMinorVersion = VER_MINORVERSION;
+ USHORT vmBuildVersion = VER_PRODUCTBUILD;
+ USHORT vmQfeVersion = VER_PRODUCTBUILD_QFE;
+
+ //version info for mscorlib.dll
+ USHORT bclMajorVersion = VER_ASSEMBLYMAJORVERSION;
+ USHORT bclMinorVersion = VER_ASSEMBLYMINORVERSION;
+ USHORT bclBuildVersion = VER_ASSEMBLYBUILD;
+ USHORT bclQfeVersion = VER_ASSEMBLYBUILD_QFE;
+
+ LPCGUID comGUID=&g_EEComObjectGuid;
+
+ LPWSTR lpwszCommandLine = W("");
+ LPWSTR lpwszRuntimeDllPath = (LPWSTR)dllPath;
+
+#ifndef FEATURE_CORECLR
+ startupFlags = CorHost2::GetStartupFlags();
+#endif //!FEATURE_CORECLR
+
+ // Determine the startupmode
+ if(g_fEEIJWStartup)
+ {
+ //IJW Mode
+ startupMode = ETW::InfoLog::InfoStructs::IJW;
+ }
+ else if(g_fEEManagedEXEStartup)
+ {
+ //managed exe
+ startupMode = ETW::InfoLog::InfoStructs::ManagedExe;
+ lpwszCommandLine = WszGetCommandLine();
+ }
+ else if (CLRHosted() || g_fEEHostedStartup)
+ {
+ //Hosted CLR
+ startupMode = ETW::InfoLog::InfoStructs::HostedCLR;
+ }
+ else if(g_fEEComActivatedStartup)
+ {
+ //com activated
+ startupMode = ETW::InfoLog::InfoStructs::COMActivated;
+ }
+ else if(g_fEEOtherStartup)
+ {
+ //startup type is other
+ startupMode = ETW::InfoLog::InfoStructs::Other;
+ }
+
+ _ASSERTE (NumItems(dllPath) > MAX_PATH);
+ // if WszGetModuleFileName fails, we return an empty string
+ if (!WszGetModuleFileName(GetCLRModule(), dllPath, MAX_PATH)) {
+ dllPath[0] = 0;
+ }
+ dllPath[MAX_PATH] = 0;
+
+ if(type == ETW::InfoLog::InfoStructs::Callback)
+ {
+ FireEtwRuntimeInformationDCStart( GetClrInstanceId(),
+ Sku,
+ bclMajorVersion,
+ bclMinorVersion,
+ bclBuildVersion,
+ bclQfeVersion,
+ vmMajorVersion,
+ vmMinorVersion,
+ vmBuildVersion,
+ vmQfeVersion,
+ startupFlags,
+ startupMode,
+ lpwszCommandLine,
+ comGUID,
+ lpwszRuntimeDllPath );
+ }
+ else
+ {
+ FireEtwRuntimeInformationStart( GetClrInstanceId(),
+ Sku,
+ bclMajorVersion,
+ bclMinorVersion,
+ bclBuildVersion,
+ bclQfeVersion,
+ vmMajorVersion,
+ vmMinorVersion,
+ vmBuildVersion,
+ vmQfeVersion,
+ startupFlags,
+ startupMode,
+ lpwszCommandLine,
+ comGUID,
+ lpwszRuntimeDllPath );
+ }
+ }
+ } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
+}
+
+/*******************************************************/
+/* This is called by the runtime when a method is jitted completely */
+/*******************************************************/
+VOID ETW::MethodLog::MethodJitted(MethodDesc *pMethodDesc, SString *namespaceOrClassName, SString *methodName, SString *methodSignature, SIZE_T pCode, ReJITID rejitID)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ EX_TRY
+ {
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_JIT_KEYWORD))
+ {
+ ETW::MethodLog::SendMethodEvent(pMethodDesc, ETW::EnumerationLog::EnumerationStructs::JitMethodLoad, TRUE, namespaceOrClassName, methodName, methodSignature, pCode, rejitID);
+ }
+
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_JITTEDMETHODILTONATIVEMAP_KEYWORD))
+ {
+ // The call to SendMethodILToNativeMapEvent assumes that the debugger's lazy
+ // data has already been initialized.
+
+ // g_pDebugInterface is initialized on startup on desktop CLR, regardless of whether a debugger
+ // or profiler is loaded. So it should always be available.
+ _ASSERTE(g_pDebugInterface != NULL);
+ g_pDebugInterface->InitializeLazyDataIfNecessary();
+
+ ETW::MethodLog::SendMethodILToNativeMapEvent(pMethodDesc, ETW::EnumerationLog::EnumerationStructs::JitMethodILToNativeMap, rejitID);
+ }
+
+ } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
+}
+
+/*************************************************/
+/* This is called by the runtime when method jitting started */
+/*************************************************/
+VOID ETW::MethodLog::MethodJitting(MethodDesc *pMethodDesc, SString *namespaceOrClassName, SString *methodName, SString *methodSignature)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ PRECONDITION(pMethodDesc != NULL);
+ } CONTRACTL_END;
+
+ EX_TRY
+ {
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_VERBOSE,
+ CLR_JIT_KEYWORD))
+ {
+ pMethodDesc->GetMethodInfo(*namespaceOrClassName, *methodName, *methodSignature);
+ ETW::MethodLog::SendMethodJitStartEvent(pMethodDesc, namespaceOrClassName, methodName, methodSignature);
+ }
+ } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
+}
+
+/**********************************************************************/
+/* This is called by the runtime when a single jit helper method with stub is initialized */
+/**********************************************************************/
+VOID ETW::MethodLog::StubInitialized(ULONGLONG ullHelperStartAddress, LPCWSTR pHelperName)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ PRECONDITION(ullHelperStartAddress != 0);
+ } CONTRACTL_END;
+
+ EX_TRY
+ {
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_JIT_KEYWORD))
+ {
+ DWORD dwHelperSize=0;
+ Stub::RecoverStubAndSize((TADDR)ullHelperStartAddress, &dwHelperSize);
+ ETW::MethodLog::SendHelperEvent(ullHelperStartAddress, dwHelperSize, pHelperName);
+ }
+ } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
+}
+
+/**********************************************************/
+/* This is called by the runtime when helpers with stubs are initialized */
+/**********************************************************/
+VOID ETW::MethodLog::StubsInitialized(PVOID *pHelperStartAddresss, PVOID *pHelperNames, LONG lNoOfHelpers)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_JIT_KEYWORD))
+ {
+ for(int i=0; i<lNoOfHelpers; i++)
+ {
+ if(pHelperStartAddresss[i])
+ {
+ StubInitialized((ULONGLONG)pHelperStartAddresss[i], (LPCWSTR)pHelperNames[i]);
+ }
+ }
+ }
+}
+
+/****************************************************************************/
+/* This is called by the runtime when a dynamic method is destroyed */
+/****************************************************************************/
+VOID ETW::MethodLog::DynamicMethodDestroyed(MethodDesc *pMethodDesc)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ EX_TRY
+ {
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_JIT_KEYWORD))
+ ETW::MethodLog::SendMethodEvent(pMethodDesc, ETW::EnumerationLog::EnumerationStructs::JitMethodUnload, TRUE);
+ } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
+}
+
+/****************************************************************************/
+/* This is called by the runtime when a ngen method is restored */
+/****************************************************************************/
+VOID ETW::MethodLog::MethodRestored(MethodDesc *pMethodDesc)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ EX_TRY
+ {
+ if(IsRuntimeNgenKeywordEnabledAndNotSuppressed()
+ &&
+ ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_STARTENUMERATION_KEYWORD))
+ {
+ ETW::MethodLog::SendMethodEvent(pMethodDesc, ETW::EnumerationLog::EnumerationStructs::NgenMethodLoad, FALSE);
+ }
+ } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
+}
+
+/****************************************************************************/
+/* This is called by the runtime when a method table is restored */
+/****************************************************************************/
+VOID ETW::MethodLog::MethodTableRestored(MethodTable *pMethodTable)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+ EX_TRY
+ {
+ if(IsRuntimeNgenKeywordEnabledAndNotSuppressed()
+ &&
+ ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_STARTENUMERATION_KEYWORD))
+ {
+#ifdef FEATURE_REMOTING
+ if(!pMethodTable->IsTransparentProxy())
+#endif
+ {
+ MethodTable::MethodIterator iter(pMethodTable);
+ for (; iter.IsValid(); iter.Next())
+ {
+ MethodDesc *pMD = (MethodDesc *)(iter.GetMethodDesc());
+ if(pMD && pMD->IsRestored() && pMD->GetMethodTable_NoLogging() == pMethodTable)
+ ETW::MethodLog::SendMethodEvent(pMD, ETW::EnumerationLog::EnumerationStructs::NgenMethodLoad, FALSE);
+ }
+ }
+ }
+ } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
+}
+
+
+/****************************************************************************/
+/* This is called by the runtime when a Strong Name Verification Starts */
+/****************************************************************************/
+VOID ETW::SecurityLog::StrongNameVerificationStart(DWORD dwInFlags, __in LPWSTR strFullyQualifiedAssemblyName)
+{
+ WRAPPER_NO_CONTRACT;
+#ifndef FEATURE_CORECLR
+ FireEtwStrongNameVerificationStart_V1(dwInFlags, 0, strFullyQualifiedAssemblyName, GetClrInstanceId());
+#endif // !FEATURE_CORECLR
+}
+
+
+/****************************************************************************/
+/* This is called by the runtime when a Strong Name Verification Ends */
+/****************************************************************************/
+VOID ETW::SecurityLog::StrongNameVerificationStop(DWORD dwInFlags,ULONG result, __in LPWSTR strFullyQualifiedAssemblyName)
+{
+ WRAPPER_NO_CONTRACT;
+#ifndef FEATURE_CORECLR
+ FireEtwStrongNameVerificationStop_V1(dwInFlags, result, strFullyQualifiedAssemblyName, GetClrInstanceId());
+#endif // !FEATURE_CORECLR
+}
+
+/****************************************************************************/
+/* This is called by the runtime when field transparency calculations begin */
+/****************************************************************************/
+void ETW::SecurityLog::FireFieldTransparencyComputationStart(LPCWSTR wszFieldName,
+ LPCWSTR wszModuleName,
+ DWORD dwAppDomain)
+{
+ WRAPPER_NO_CONTRACT;
+ FireEtwFieldTransparencyComputationStart(wszFieldName, wszModuleName, dwAppDomain, GetClrInstanceId());
+}
+
+/****************************************************************************/
+/* This is called by the runtime when field transparency calculations end */
+/****************************************************************************/
+void ETW::SecurityLog::FireFieldTransparencyComputationEnd(LPCWSTR wszFieldName,
+ LPCWSTR wszModuleName,
+ DWORD dwAppDomain,
+ BOOL fIsCritical,
+ BOOL fIsTreatAsSafe)
+{
+ WRAPPER_NO_CONTRACT;
+ FireEtwFieldTransparencyComputationEnd(wszFieldName, wszModuleName, dwAppDomain, fIsCritical, fIsTreatAsSafe, GetClrInstanceId());
+}
+
+/*****************************************************************************/
+/* This is called by the runtime when method transparency calculations begin */
+/*****************************************************************************/
+void ETW::SecurityLog::FireMethodTransparencyComputationStart(LPCWSTR wszMethodName,
+ LPCWSTR wszModuleName,
+ DWORD dwAppDomain)
+{
+ WRAPPER_NO_CONTRACT;
+ FireEtwMethodTransparencyComputationStart(wszMethodName, wszModuleName, dwAppDomain, GetClrInstanceId());
+}
+
+/*****************************************************************************/
+/* This is called by the runtime when method transparency calculations end */
+/********************************************(********************************/
+void ETW::SecurityLog::FireMethodTransparencyComputationEnd(LPCWSTR wszMethodName,
+ LPCWSTR wszModuleName,
+ DWORD dwAppDomain,
+ BOOL fIsCritical,
+ BOOL fIsTreatAsSafe)
+{
+ WRAPPER_NO_CONTRACT;
+ FireEtwMethodTransparencyComputationEnd(wszMethodName, wszModuleName, dwAppDomain, fIsCritical, fIsTreatAsSafe, GetClrInstanceId());
+}
+
+/*****************************************************************************/
+/* This is called by the runtime when module transparency calculations begin */
+/*****************************************************************************/
+void ETW::SecurityLog::FireModuleTransparencyComputationStart(LPCWSTR wszModuleName,
+ DWORD dwAppDomain)
+{
+ WRAPPER_NO_CONTRACT;
+ FireEtwModuleTransparencyComputationStart(wszModuleName, dwAppDomain, GetClrInstanceId());
+}
+
+/****************************************************************************/
+/* This is called by the runtime when module transparency calculations end */
+/****************************************************************************/
+void ETW::SecurityLog::FireModuleTransparencyComputationEnd(LPCWSTR wszModuleName,
+ DWORD dwAppDomain,
+ BOOL fIsAllCritical,
+ BOOL fIsAllTransparent,
+ BOOL fIsTreatAsSafe,
+ BOOL fIsOpportunisticallyCritical,
+ DWORD dwSecurityRuleSet)
+{
+ WRAPPER_NO_CONTRACT;
+ FireEtwModuleTransparencyComputationEnd(wszModuleName, dwAppDomain, fIsAllCritical, fIsAllTransparent, fIsTreatAsSafe, fIsOpportunisticallyCritical, dwSecurityRuleSet, GetClrInstanceId());
+}
+
+/****************************************************************************/
+/* This is called by the runtime when token transparency calculations begin */
+/****************************************************************************/
+void ETW::SecurityLog::FireTokenTransparencyComputationStart(DWORD dwToken,
+ LPCWSTR wszModuleName,
+ DWORD dwAppDomain)
+{
+ WRAPPER_NO_CONTRACT;
+ FireEtwTokenTransparencyComputationStart(dwToken, wszModuleName, dwAppDomain, GetClrInstanceId());
+}
+
+/****************************************************************************/
+/* This is called by the runtime when token transparency calculations end */
+/****************************************************************************/
+void ETW::SecurityLog::FireTokenTransparencyComputationEnd(DWORD dwToken,
+ LPCWSTR wszModuleName,
+ DWORD dwAppDomain,
+ BOOL fIsCritical,
+ BOOL fIsTreatAsSafe)
+{
+ WRAPPER_NO_CONTRACT;
+ FireEtwTokenTransparencyComputationEnd(dwToken, wszModuleName, dwAppDomain, fIsCritical, fIsTreatAsSafe, GetClrInstanceId());
+}
+
+/*****************************************************************************/
+/* This is called by the runtime when type transparency calculations begin */
+/*****************************************************************************/
+void ETW::SecurityLog::FireTypeTransparencyComputationStart(LPCWSTR wszTypeName,
+ LPCWSTR wszModuleName,
+ DWORD dwAppDomain)
+{
+ WRAPPER_NO_CONTRACT;
+ FireEtwTypeTransparencyComputationStart(wszTypeName, wszModuleName, dwAppDomain, GetClrInstanceId());
+}
+
+/****************************************************************************/
+/* This is called by the runtime when type transparency calculations end */
+/****************************************************************************/
+void ETW::SecurityLog::FireTypeTransparencyComputationEnd(LPCWSTR wszTypeName,
+ LPCWSTR wszModuleName,
+ DWORD dwAppDomain,
+ BOOL fIsAllCritical,
+ BOOL fIsAllTransparent,
+ BOOL fIsCritical,
+ BOOL fIsTreatAsSafe)
+{
+ WRAPPER_NO_CONTRACT;
+ FireEtwTypeTransparencyComputationEnd(wszTypeName, wszModuleName, dwAppDomain, fIsAllCritical, fIsAllTransparent, fIsCritical, fIsTreatAsSafe, GetClrInstanceId());
+}
+
+/**********************************************************************************/
+/* This is called by the runtime when a module is loaded */
+/* liReportedSharedModule will be 0 when this module is reported for the 1st time */
+/**********************************************************************************/
+VOID ETW::LoaderLog::ModuleLoad(Module *pModule, LONG liReportedSharedModule)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ EX_TRY
+ {
+ DWORD enumerationOptions = ETW::EnumerationLog::EnumerationStructs::None;
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ KEYWORDZERO))
+ {
+ BOOL bTraceFlagLoaderSet = ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_LOADER_KEYWORD);
+ BOOL bTraceFlagNgenMethodSet = IsRuntimeNgenKeywordEnabledAndNotSuppressed();
+ BOOL bTraceFlagStartRundownSet = ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_STARTENUMERATION_KEYWORD);
+ BOOL bTraceFlagPerfTrackSet = ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_PERFTRACK_KEYWORD);
+
+ if(liReportedSharedModule == 0)
+ {
+
+ if(bTraceFlagLoaderSet)
+ enumerationOptions |= ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleLoad;
+ if (bTraceFlagPerfTrackSet)
+ enumerationOptions |= ETW::EnumerationLog::EnumerationStructs::ModuleRangeLoad;
+ if(bTraceFlagNgenMethodSet && bTraceFlagStartRundownSet)
+ enumerationOptions |= ETW::EnumerationLog::EnumerationStructs::NgenMethodLoad;
+
+ if(pModule->IsManifest() && bTraceFlagLoaderSet)
+ ETW::LoaderLog::SendAssemblyEvent(pModule->GetAssembly(), enumerationOptions);
+
+ if(bTraceFlagLoaderSet || bTraceFlagPerfTrackSet)
+ ETW::LoaderLog::SendModuleEvent(pModule, ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleLoad | ETW::EnumerationLog::EnumerationStructs::ModuleRangeLoad);
+
+ ETW::EnumerationLog::EnumerationHelper(pModule, NULL, enumerationOptions);
+ }
+
+ // we want to report domainmodule events whenever they are loaded in any AppDomain
+ if(bTraceFlagLoaderSet)
+ ETW::LoaderLog::SendModuleEvent(pModule, ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleLoad, TRUE);
+ }
+
+ {
+ BOOL bTraceFlagPerfTrackPrivateSet = ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_PERFTRACK_PRIVATE_KEYWORD);
+ if (liReportedSharedModule == 0 && bTraceFlagPerfTrackPrivateSet)
+ {
+ enumerationOptions |= ETW::EnumerationLog::EnumerationStructs::ModuleRangeLoadPrivate;
+ ETW::LoaderLog::SendModuleRange(pModule, enumerationOptions);
+ }
+ }
+ } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
+}
+
+/****************************************************************************/
+/* This is called by the runtime when the process is being shutdown */
+/****************************************************************************/
+VOID ETW::EnumerationLog::ProcessShutdown()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ EX_TRY
+ {
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, TRACE_LEVEL_INFORMATION, KEYWORDZERO))
+ {
+ DWORD enumerationOptions = GetEnumerationOptionsFromRuntimeKeywords();
+
+ // Send unload events for all remaining domains, including shared domain and
+ // default domain.
+ ETW::EnumerationLog::EnumerationHelper(NULL /* module filter */, NULL /* domain filter */, enumerationOptions);
+ }
+ } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
+}
+
+/****************************************************************************/
+/****************************************************************************/
+/* Begining of helper functions */
+/****************************************************************************/
+/****************************************************************************/
+
+/****************************************************************************/
+/* This routine is used to send a domain load/unload or rundown event */
+/****************************************************************************/
+VOID ETW::LoaderLog::SendDomainEvent(BaseDomain *pBaseDomain, DWORD dwEventOptions, LPCWSTR wszFriendlyName)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ if(!pBaseDomain)
+ return;
+
+ PCWSTR szDtraceOutput1=W("");
+ BOOL bIsDefaultDomain = pBaseDomain->IsDefaultDomain();
+ BOOL bIsAppDomain = pBaseDomain->IsAppDomain();
+ BOOL bIsExecutable = bIsAppDomain ? !(pBaseDomain->AsAppDomain()->IsPassiveDomain()) : FALSE;
+ BOOL bIsSharedDomain = pBaseDomain->IsSharedDomain();
+ UINT32 uSharingPolicy = bIsAppDomain?(pBaseDomain->AsAppDomain()->GetSharePolicy()):0;
+
+ ULONGLONG ullDomainId = (ULONGLONG)pBaseDomain;
+ ULONG ulDomainFlags = ((bIsDefaultDomain ? ETW::LoaderLog::LoaderStructs::DefaultDomain : 0) |
+ (bIsExecutable ? ETW::LoaderLog::LoaderStructs::ExecutableDomain : 0) |
+ (bIsSharedDomain ? ETW::LoaderLog::LoaderStructs::SharedDomain : 0) |
+ (uSharingPolicy<<28));
+
+ LPCWSTR wsEmptyString = W("");
+ LPCWSTR wsSharedString = W("SharedDomain");
+
+ LPWSTR lpswzDomainName = (LPWSTR)wsEmptyString;
+
+ if(bIsAppDomain)
+ {
+ if(wszFriendlyName)
+ lpswzDomainName = (PWCHAR)wszFriendlyName;
+ else
+ lpswzDomainName = (PWCHAR)pBaseDomain->AsAppDomain()->GetFriendlyName();
+ }
+ else
+ lpswzDomainName = (LPWSTR)wsSharedString;
+
+ /* prepare events args for ETW and ETM */
+ szDtraceOutput1 = (PCWSTR)lpswzDomainName;
+
+ if(dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleLoad)
+ {
+ FireEtwAppDomainLoad_V1(ullDomainId, ulDomainFlags, szDtraceOutput1, pBaseDomain->GetId().m_dwId, GetClrInstanceId());
+ }
+ else if(dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleUnload)
+ {
+ FireEtwAppDomainUnload_V1(ullDomainId, ulDomainFlags, szDtraceOutput1, pBaseDomain->GetId().m_dwId, GetClrInstanceId());
+ }
+ else if(dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCStart)
+ {
+ FireEtwAppDomainDCStart_V1(ullDomainId, ulDomainFlags, szDtraceOutput1, pBaseDomain->GetId().m_dwId, GetClrInstanceId());
+ }
+ else if(dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCEnd)
+ {
+ FireEtwAppDomainDCEnd_V1(ullDomainId, ulDomainFlags, szDtraceOutput1, pBaseDomain->GetId().m_dwId, GetClrInstanceId());
+ }
+ else
+ {
+ _ASSERTE((dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleLoad) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleUnload) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCStart) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCEnd));
+ }
+}
+
+/********************************************************/
+/* This routine is used to send thread rundown events when ARM is enabled */
+/********************************************************/
+VOID ETW::EnumerationLog::SendThreadRundownEvent()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+ Thread *pThread = NULL;
+
+ // Take the thread store lock while we enumerate threads.
+ ThreadStoreLockHolder tsl;
+ while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
+ {
+ if (pThread->IsUnstarted() || pThread->IsDead())
+ continue;
+
+ // Send thread rundown provider events and thread created runtime provider
+ // events (depending on which are enabled)
+ ThreadLog::FireThreadDC(pThread);
+ ThreadLog::FireThreadCreated(pThread);
+ }
+#endif // !DACCESS_COMPILE
+}
+
+/****************************************************************************/
+/* This routine is used to send an assembly load/unload or rundown event ****/
+/****************************************************************************/
+
+VOID ETW::LoaderLog::SendAssemblyEvent(Assembly *pAssembly, DWORD dwEventOptions)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ if(!pAssembly)
+ return;
+
+ PCWSTR szDtraceOutput1=W("");
+ BOOL bIsDynamicAssembly = pAssembly->IsDynamic();
+ BOOL bIsCollectibleAssembly = pAssembly->IsCollectible();
+ BOOL bIsDomainNeutral = pAssembly->IsDomainNeutral() ;
+ BOOL bHasNativeImage = pAssembly->GetManifestFile()->HasNativeImage();
+
+ ULONGLONG ullAssemblyId = (ULONGLONG)pAssembly;
+ ULONGLONG ullDomainId = (ULONGLONG)pAssembly->GetDomain();
+ ULONGLONG ullBindingID = 0;
+#if (defined FEATURE_PREJIT) && (defined FEATURE_FUSION)
+ ullBindingID = pAssembly->GetManifestFile()->GetBindingID();
+#endif
+ ULONG ulAssemblyFlags = ((bIsDomainNeutral ? ETW::LoaderLog::LoaderStructs::DomainNeutralAssembly : 0) |
+ (bIsDynamicAssembly ? ETW::LoaderLog::LoaderStructs::DynamicAssembly : 0) |
+ (bHasNativeImage ? ETW::LoaderLog::LoaderStructs::NativeAssembly : 0) |
+ (bIsCollectibleAssembly ? ETW::LoaderLog::LoaderStructs::CollectibleAssembly : 0));
+
+ SString sAssemblyPath;
+ pAssembly->GetDisplayName(sAssemblyPath);
+ LPWSTR lpszAssemblyPath = (LPWSTR)sAssemblyPath.GetUnicode();
+
+/* prepare events args for ETW and ETM */
+ szDtraceOutput1 = (PCWSTR)lpszAssemblyPath;
+
+ if(dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleLoad)
+ {
+ FireEtwAssemblyLoad_V1(ullAssemblyId, ullDomainId, ullBindingID, ulAssemblyFlags, szDtraceOutput1, GetClrInstanceId());
+ }
+ else if(dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleUnload)
+ {
+ FireEtwAssemblyUnload_V1(ullAssemblyId, ullDomainId, ullBindingID, ulAssemblyFlags, szDtraceOutput1, GetClrInstanceId());
+ }
+ else if(dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCStart)
+ {
+ FireEtwAssemblyDCStart_V1(ullAssemblyId, ullDomainId, ullBindingID, ulAssemblyFlags, szDtraceOutput1, GetClrInstanceId());
+ }
+ else if(dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCEnd)
+ {
+ FireEtwAssemblyDCEnd_V1(ullAssemblyId, ullDomainId, ullBindingID, ulAssemblyFlags, szDtraceOutput1, GetClrInstanceId());
+ }
+ else
+ {
+ _ASSERTE((dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleLoad) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleUnload) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCStart) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCEnd));
+ }
+}
+
+ETW_INLINE
+ ULONG
+ ETW::LoaderLog::SendModuleRange(
+ __in Module *pModule,
+ __in DWORD dwEventOptions)
+
+{
+ ULONG Result = ERROR_SUCCESS;
+
+
+ // do not fire the ETW event when:
+ // 1. We did not load the native image
+ // 2. We do not have IBC data for the native image
+ if( !pModule || !pModule->HasNativeImage() || !pModule->IsIbcOptimized() )
+ {
+ return Result;
+ }
+
+ // get information about the hot sections from the native image that has been loaded
+ COUNT_T cbSizeOfSectionTable;
+ CORCOMPILE_VIRTUAL_SECTION_INFO* pVirtualSectionsTable = (CORCOMPILE_VIRTUAL_SECTION_INFO* )pModule->GetNativeImage()->GetVirtualSectionsTable(&cbSizeOfSectionTable);
+
+ COUNT_T RangeCount = cbSizeOfSectionTable/sizeof(CORCOMPILE_VIRTUAL_SECTION_INFO);
+
+ // if we do not have any hot ranges, we do not fire the ETW event
+
+ // Figure out the rest of the event data
+ UINT16 ClrInstanceId = GetClrInstanceId();
+ UINT64 ModuleID = (ULONGLONG)(TADDR) pModule;
+
+ for (COUNT_T i = 0; i < RangeCount; ++i)
+ {
+ DWORD rangeBegin = pVirtualSectionsTable[i].VirtualAddress;
+ DWORD rangeSize = pVirtualSectionsTable[i].Size;
+ DWORD sectionType = pVirtualSectionsTable[i].SectionType;
+
+ UINT8 ibcType = VirtualSectionData::IBCType(sectionType);
+ UINT8 rangeType = VirtualSectionData::RangeType(sectionType);
+ UINT16 virtualSectionType = VirtualSectionData::VirtualSectionType(sectionType);
+ BOOL isIBCProfiledColdSection = VirtualSectionData::IsIBCProfiledColdSection(sectionType);
+ if (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::ModuleRangeLoad)
+ {
+ if (isIBCProfiledColdSection)
+ Result &= FireEtwModuleRangeLoad(ClrInstanceId, ModuleID, rangeBegin, rangeSize, rangeType);
+ }
+ else if (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::ModuleRangeDCStart)
+ {
+ if (isIBCProfiledColdSection)
+ Result &= FireEtwModuleRangeDCStart(ClrInstanceId, ModuleID, rangeBegin, rangeSize, rangeType);
+ }
+ else if (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::ModuleRangeDCEnd)
+ {
+ if (isIBCProfiledColdSection)
+ Result &= FireEtwModuleRangeDCEnd(ClrInstanceId, ModuleID, rangeBegin, rangeSize, rangeType);
+ }
+ // Fire private events if they are requested.
+ if (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::ModuleRangeLoadPrivate)
+ {
+ Result &= FireEtwModuleRangeLoadPrivate(ClrInstanceId, ModuleID, rangeBegin, rangeSize, rangeType, ibcType, virtualSectionType);
+ }
+ }
+ return Result;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Helper that takes a module, and returns the managed and native PDB information
+// corresponding to that module. Used by the routine that fires the module load / unload
+// events.
+//
+// Arguments:
+// * pModule - Module to examine
+// * pCvInfoIL - [out] CV_INFO_PDB70 corresponding to managed PDB for this module
+// (the last debug directory entry in the PE File), if it exists. If it doesn't
+// exist, this is zeroed out.
+// * pCvInfoNative - [out] CV_INFO_PDB70 corresponding to native NGEN PDB for this
+// module (the next-to-last debug directory entry in the PE File), if it exists.
+// If it doesn't exist, this is zeroed out.
+//
+// Notes:
+// * This method only understands the CV_INFO_PDB70 / RSDS format. If the format
+// changes, this function will act as if there are no debug directory entries.
+// Module load / unload events will still be fired, but all PDB info will be
+// zeroed out.
+// * The raw data in the PE file's debug directory entries are assumed to be
+// untrusted, and reported sizes of buffers are verified against their data.
+//
+
+static void GetCodeViewInfo(Module * pModule, CV_INFO_PDB70 * pCvInfoIL, CV_INFO_PDB70 * pCvInfoNative)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE (pModule != NULL);
+ _ASSERTE (pCvInfoIL != NULL);
+ _ASSERTE (pCvInfoNative != NULL);
+
+ ZeroMemory(pCvInfoIL, sizeof(*pCvInfoIL));
+ ZeroMemory(pCvInfoNative, sizeof(*pCvInfoNative));
+
+ PTR_PEFile pPEFile = pModule->GetFile();
+ _ASSERTE(pPEFile != NULL);
+
+ PTR_PEImageLayout pLayout = NULL;
+ if (pPEFile->HasNativeImage())
+ {
+ pLayout = pPEFile->GetLoadedNative();
+ }
+ else if (pPEFile->HasOpenedILimage())
+ {
+ pLayout = pPEFile->GetLoadedIL();
+ }
+
+ if (pLayout == NULL)
+ {
+ // This can happen for reflection-loaded modules
+ return;
+ }
+
+ if (!pLayout->HasNTHeaders())
+ {
+ // Without NT headers, we'll have a tough time finding the debug directory
+ // entries. This can happen for nlp files.
+ return;
+ }
+
+ if (!pLayout->HasDirectoryEntry(IMAGE_DIRECTORY_ENTRY_DEBUG))
+ return;
+
+ COUNT_T cbDebugEntries;
+ IMAGE_DEBUG_DIRECTORY * rgDebugEntries =
+ (IMAGE_DEBUG_DIRECTORY *) pLayout->GetDirectoryEntryData(IMAGE_DIRECTORY_ENTRY_DEBUG, &cbDebugEntries);
+
+ if (cbDebugEntries < sizeof(IMAGE_DEBUG_DIRECTORY))
+ return;
+
+ // Since rgDebugEntries is an array of IMAGE_DEBUG_DIRECTORYs, cbDebugEntries
+ // should be a multiple of sizeof(IMAGE_DEBUG_DIRECTORY).
+ if (cbDebugEntries % sizeof(IMAGE_DEBUG_DIRECTORY) != 0)
+ return;
+
+ // Temporary storage for a CV_INFO_PDB70 and its size (which could be less than
+ // sizeof(CV_INFO_PDB70); see below).
+ struct PdbInfo
+ {
+ CV_INFO_PDB70 * m_pPdb70;
+ ULONG m_cbPdb70;
+ };
+
+ // Iterate through all debug directory entries. The very last one will be the
+ // managed PDB entry. The next to last one (if it exists) will be the (native) NGEN
+ // PDB entry. Treat raw bytes we read as untrusted.
+ PdbInfo pdbInfoLast = {0};
+ PdbInfo pdbInfoNextToLast = {0};
+ int cEntries = cbDebugEntries / sizeof(IMAGE_DEBUG_DIRECTORY);
+ for (int i = 0; i < cEntries; i++)
+ {
+ if (rgDebugEntries[i].Type != IMAGE_DEBUG_TYPE_CODEVIEW)
+ continue;
+
+ // Get raw data pointed to by this IMAGE_DEBUG_DIRECTORY
+
+ // Some compilers set PointerToRawData but not AddressOfRawData as they put the
+ // data at the end of the file in an unmapped part of the file
+ RVA rvaOfRawData = (rgDebugEntries[i].AddressOfRawData != NULL) ?
+ rgDebugEntries[i].AddressOfRawData :
+ pLayout->OffsetToRva(rgDebugEntries[i].PointerToRawData);
+
+ ULONG cbDebugData = rgDebugEntries[i].SizeOfData;
+ if (cbDebugData < (offsetof(CV_INFO_PDB70, magic) + sizeof(((CV_INFO_PDB70*)0)->magic)))
+ {
+ // raw data too small to contain magic number at expected spot, so its format
+ // is not recognizeable. Skip
+ continue;
+ }
+
+ if (!pLayout->CheckRva(rvaOfRawData, cbDebugData))
+ {
+ // Memory claimed to belong to the raw data does not fit.
+ // IMAGE_DEBUG_DIRECTORY is outright corrupt. Do not include PDB info in
+ // event at all.
+ return;
+ }
+
+ // Verify the magic number is as expected
+ CV_INFO_PDB70 * pPdb70 = (CV_INFO_PDB70 *) pLayout->GetRvaData(rvaOfRawData);
+ if (pPdb70->magic != CV_SIGNATURE_RSDS)
+ {
+ // Unrecognized magic number. Skip
+ continue;
+ }
+
+ // From this point forward, the format should adhere to the expected layout of
+ // CV_INFO_PDB70. If we find otherwise, then assume the IMAGE_DEBUG_DIRECTORY is
+ // outright corrupt, and do not include PDB info in event at all. The caller will
+ // still fire the module event, but have zeroed-out / empty PDB fields.
+
+ // Verify sane size of raw data
+ if (cbDebugData > sizeof(CV_INFO_PDB70))
+ return;
+
+ // cbDebugData actually can be < sizeof(CV_INFO_PDB70), since the "path" field
+ // can be truncated to its actual data length (i.e., fewer than MAX_PATH chars
+ // may be present in the PE file). In some cases, though, cbDebugData will
+ // include all MAX_PATH chars even though path gets null-terminated well before
+ // the MAX_PATH limit.
+
+ // Gotta have at least one byte of the path
+ if (cbDebugData < offsetof(CV_INFO_PDB70, path) + sizeof(char))
+ return;
+
+ // How much space is available for the path?
+ size_t cchPathMaxIncludingNullTerminator = (cbDebugData - offsetof(CV_INFO_PDB70, path)) / sizeof(char);
+ _ASSERTE(cchPathMaxIncludingNullTerminator >= 1); // Guaranteed above
+
+ // Verify path string fits inside the declared size
+ size_t cchPathActualExcludingNullTerminator = strnlen(pPdb70->path, cchPathMaxIncludingNullTerminator);
+ if (cchPathActualExcludingNullTerminator == cchPathMaxIncludingNullTerminator)
+ {
+ // This is how strnlen indicates failure--it couldn't find the null
+ // terminator within the buffer size specified
+ return;
+ }
+
+ // Looks valid. Remember it.
+ pdbInfoNextToLast = pdbInfoLast;
+ pdbInfoLast.m_pPdb70 = pPdb70;
+ pdbInfoLast.m_cbPdb70 = cbDebugData;
+ }
+
+ // Return whatever we found
+
+ if (pdbInfoLast.m_pPdb70 != NULL)
+ {
+ // The last guy is the IL (managed) PDB info
+ _ASSERTE(pdbInfoLast.m_cbPdb70 <= sizeof(*pCvInfoIL)); // Guaranteed by checks above
+ memcpy(pCvInfoIL, pdbInfoLast.m_pPdb70, pdbInfoLast.m_cbPdb70);
+ }
+
+ if (pdbInfoNextToLast.m_pPdb70 != NULL)
+ {
+ // The next-to-last guy is the NGEN (native) PDB info
+ _ASSERTE(pdbInfoNextToLast.m_cbPdb70 <= sizeof(*pCvInfoNative)); // Guaranteed by checks above
+ memcpy(pCvInfoNative, pdbInfoNextToLast.m_pPdb70, pdbInfoNextToLast.m_cbPdb70);
+ }
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// send a module load/unload or rundown event and domainmodule load and rundown event
+//
+// Arguments:
+// * pModule - Module loading or unloading
+// * dwEventOptions - Bitmask of which events to fire
+// * bFireDomainModuleEvents - nonzero if we are to fire DomainModule events; zero
+// if we are to fire Module events
+//
+VOID ETW::LoaderLog::SendModuleEvent(Module *pModule, DWORD dwEventOptions, BOOL bFireDomainModuleEvents)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ if(!pModule)
+ return;
+
+ PCWSTR szDtraceOutput1=W(""),szDtraceOutput2=W("");
+ BOOL bIsDynamicAssembly = pModule->GetAssembly()->IsDynamic();
+ BOOL bHasNativeImage = FALSE;
+#ifdef FEATURE_PREJIT
+ bHasNativeImage = pModule->HasNativeImage();
+#endif // FEATURE_PREJIT
+ BOOL bIsManifestModule = pModule->IsManifest();
+ ULONGLONG ullAppDomainId = 0; // This is used only with DomainModule events
+ ULONGLONG ullModuleId = (ULONGLONG)(TADDR) pModule;
+ ULONGLONG ullAssemblyId = (ULONGLONG)pModule->GetAssembly();
+ BOOL bIsDomainNeutral = pModule->GetAssembly()->IsDomainNeutral();
+ BOOL bIsIbcOptimized = FALSE;
+ if(bHasNativeImage)
+ {
+ bIsIbcOptimized = pModule->IsIbcOptimized();
+ }
+ ULONG ulReservedFlags = 0;
+ ULONG ulFlags = ((bIsDomainNeutral ? ETW::LoaderLog::LoaderStructs::DomainNeutralModule : 0) |
+ (bHasNativeImage ? ETW::LoaderLog::LoaderStructs::NativeModule : 0) |
+ (bIsDynamicAssembly ? ETW::LoaderLog::LoaderStructs::DynamicModule : 0) |
+ (bIsManifestModule ? ETW::LoaderLog::LoaderStructs::ManifestModule : 0) |
+ (bIsIbcOptimized ? ETW::LoaderLog::LoaderStructs::IbcOptimized : 0));
+
+ // Grab PDB path, guid, and age for managed PDB and native (NGEN) PDB when
+ // available. Any failures are not fatal. The corresponding PDB info will remain
+ // zeroed out, and that's what we'll include in the event.
+ CV_INFO_PDB70 cvInfoIL = {0};
+ CV_INFO_PDB70 cvInfoNative = {0};
+ GetCodeViewInfo(pModule, &cvInfoIL, &cvInfoNative);
+
+ PWCHAR ModuleILPath=W(""), ModuleNativePath=W("");
+
+ if(bFireDomainModuleEvents)
+ {
+ if(pModule->GetDomain()->IsSharedDomain()) // for shared domains, we do not fire domainmodule event
+ return;
+ ullAppDomainId = (ULONGLONG)pModule->FindDomainAssembly(pModule->GetDomain()->AsAppDomain())->GetAppDomain();
+ }
+
+ LPCWSTR pEmptyString = W("");
+ SString moduleName = W("");
+
+ if(!bIsDynamicAssembly)
+ {
+ ModuleILPath = (PWCHAR)pModule->GetAssembly()->GetManifestFile()->GetILimage()->GetPath().GetUnicode();
+ ModuleNativePath = (PWCHAR)pEmptyString;
+
+#ifdef FEATURE_PREJIT
+ if(bHasNativeImage)
+ ModuleNativePath = (PWCHAR)pModule->GetNativeImage()->GetPath().GetUnicode();
+#endif // FEATURE_PREJIT
+ }
+
+ // if we do not have a module path yet, we put the module name
+ if(bIsDynamicAssembly || ModuleILPath==NULL || wcslen(ModuleILPath) <= 2)
+ {
+ moduleName.SetUTF8(pModule->GetSimpleName());
+ ModuleILPath = (PWCHAR)moduleName.GetUnicode();
+ ModuleNativePath = (PWCHAR)pEmptyString;
+ }
+
+ /* prepare events args for ETW and ETM */
+ szDtraceOutput1 = (PCWSTR)ModuleILPath;
+ szDtraceOutput2 = (PCWSTR)ModuleNativePath;
+
+ // Convert PDB paths to UNICODE
+ StackSString managedPdbPath(SString::Utf8, cvInfoIL.path);
+ StackSString nativePdbPath(SString::Utf8, cvInfoNative.path);
+
+ if(bFireDomainModuleEvents)
+ {
+ if(dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleLoad)
+ {
+ FireEtwDomainModuleLoad_V1(ullModuleId, ullAssemblyId, ullAppDomainId, ulFlags, ulReservedFlags, szDtraceOutput1, szDtraceOutput2, GetClrInstanceId());
+ }
+ else if(dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCStart)
+ {
+ FireEtwDomainModuleDCStart_V1(ullModuleId, ullAssemblyId, ullAppDomainId, ulFlags, ulReservedFlags, szDtraceOutput1, szDtraceOutput2, GetClrInstanceId());
+ }
+ else if(dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCEnd)
+ {
+ FireEtwDomainModuleDCEnd_V1(ullModuleId, ullAssemblyId, ullAppDomainId, ulFlags, ulReservedFlags, szDtraceOutput1, szDtraceOutput2, GetClrInstanceId());
+ }
+ else
+ {
+ _ASSERTE((dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleLoad) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCStart) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCEnd));
+ }
+ }
+ else
+ {
+ if((dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleLoad) || (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::ModuleRangeLoad))
+ {
+ FireEtwModuleLoad_V1_or_V2(ullModuleId, ullAssemblyId, ulFlags, ulReservedFlags, szDtraceOutput1, szDtraceOutput2, GetClrInstanceId(), &cvInfoIL.signature, cvInfoIL.age, managedPdbPath, &cvInfoNative.signature, cvInfoNative.age, nativePdbPath);
+ }
+ else if(dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleUnload)
+ {
+ FireEtwModuleUnload_V1_or_V2(ullModuleId, ullAssemblyId, ulFlags, ulReservedFlags, szDtraceOutput1, szDtraceOutput2, GetClrInstanceId(), &cvInfoIL.signature, cvInfoIL.age, managedPdbPath, &cvInfoNative.signature, cvInfoNative.age, nativePdbPath);
+ }
+ else if((dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCStart) || (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::ModuleRangeDCStart))
+ {
+ FireEtwModuleDCStart_V1_or_V2(ullModuleId, ullAssemblyId, ulFlags, ulReservedFlags, szDtraceOutput1, szDtraceOutput2, GetClrInstanceId(), &cvInfoIL.signature, cvInfoIL.age, managedPdbPath, &cvInfoNative.signature, cvInfoNative.age, nativePdbPath);
+ }
+ else if((dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCEnd) || (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::ModuleRangeDCEnd))
+ {
+ FireEtwModuleDCEnd_V1_or_V2(ullModuleId, ullAssemblyId, ulFlags, ulReservedFlags, szDtraceOutput1, szDtraceOutput2, GetClrInstanceId(), &cvInfoIL.signature, cvInfoIL.age, managedPdbPath, &cvInfoNative.signature, cvInfoNative.age, nativePdbPath);
+ }
+ else
+ {
+ _ASSERTE((dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleLoad) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleUnload) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCStart) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCEnd) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::ModuleRangeEnabledAny));
+
+ }
+
+ if (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::ModuleRangeEnabledAny)
+ {
+ // Fire ModuleRangeLoad, ModuleRangeDCStart, ModuleRangeDCEnd or ModuleRangeLoadPrivate event for this Module
+ SendModuleRange(pModule, dwEventOptions);
+ }
+ }
+}
+
+/*****************************************************************/
+/* This routine is used to send an ETW event just before a method starts jitting*/
+/*****************************************************************/
+VOID ETW::MethodLog::SendMethodJitStartEvent(MethodDesc *pMethodDesc, SString *namespaceOrClassName, SString *methodName, SString *methodSignature)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ Module *pModule = NULL;
+ Module *pLoaderModule = NULL; // This must not be used except for getting the ModuleID
+
+ ULONGLONG ullMethodIdentifier=0;
+ ULONGLONG ullModuleID=0;
+ ULONG ulMethodToken=0;
+ ULONG ulMethodILSize=0;
+ PCWSTR szDtraceOutput1=W(""),szDtraceOutput2=W(""),szDtraceOutput3=W("");
+
+ if(pMethodDesc) {
+ pModule = pMethodDesc->GetModule_NoLogging();
+
+ if(!pMethodDesc->IsRestored()) {
+ return;
+ }
+
+ bool bIsDynamicMethod = pMethodDesc->IsDynamicMethod();
+ BOOL bIsGenericMethod = FALSE;
+ if(pMethodDesc->GetMethodTable_NoLogging())
+ bIsGenericMethod = pMethodDesc->HasClassOrMethodInstantiation_NoLogging();
+
+ ullModuleID = (ULONGLONG)(TADDR) pModule;
+ ullMethodIdentifier = (ULONGLONG)pMethodDesc;
+
+ // Use MethodDesc if Dynamic or Generic methods
+ if( bIsDynamicMethod || bIsGenericMethod)
+ {
+ if(bIsGenericMethod)
+ ulMethodToken = (ULONG)pMethodDesc->GetMemberDef_NoLogging();
+ if(bIsDynamicMethod) // if its a generic and a dynamic method, we would set the methodtoken to 0
+ ulMethodToken = (ULONG)0;
+ }
+ else
+ ulMethodToken = (ULONG)pMethodDesc->GetMemberDef_NoLogging();
+
+ if(pMethodDesc->IsIL())
+ {
+ COR_ILMETHOD_DECODER::DecoderStatus decoderstatus = COR_ILMETHOD_DECODER::FORMAT_ERROR;
+ COR_ILMETHOD_DECODER ILHeader(pMethodDesc->GetILHeader(), pMethodDesc->GetMDImport(), &decoderstatus);
+ ulMethodILSize = (ULONG)ILHeader.GetCodeSize();
+ }
+
+ SString tNamespace, tMethodName, tMethodSignature;
+ if(!namespaceOrClassName|| !methodName|| !methodSignature || (methodName->IsEmpty() && namespaceOrClassName->IsEmpty() && methodSignature->IsEmpty()))
+ {
+ pMethodDesc->GetMethodInfo(tNamespace, tMethodName, tMethodSignature);
+ namespaceOrClassName = &tNamespace;
+ methodName = &tMethodName;
+ methodSignature = &tMethodSignature;
+ }
+
+ // fire method information
+ /* prepare events args for ETW and ETM */
+ szDtraceOutput1 = (PCWSTR)namespaceOrClassName->GetUnicode();
+ szDtraceOutput2 = (PCWSTR)methodName->GetUnicode();
+ szDtraceOutput3 = (PCWSTR)methodSignature->GetUnicode();
+
+ FireEtwMethodJittingStarted_V1(ullMethodIdentifier,
+ ullModuleID,
+ ulMethodToken,
+ ulMethodILSize,
+ szDtraceOutput1,
+ szDtraceOutput2,
+ szDtraceOutput3,
+ GetClrInstanceId());
+ }
+}
+
+/****************************************************************************/
+/* This routine is used to send a method load/unload or rundown event */
+/****************************************************************************/
+VOID ETW::MethodLog::SendMethodEvent(MethodDesc *pMethodDesc, DWORD dwEventOptions, BOOL bIsJit, SString *namespaceOrClassName, SString *methodName, SString *methodSignature, SIZE_T pCode, ReJITID rejitID)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_NOT_MAINLINE;
+ } CONTRACTL_END;
+
+ Module *pModule = NULL;
+ Module *pLoaderModule = NULL; // This must not be used except for getting the ModuleID
+ ULONGLONG ullMethodStartAddress=0, ullColdMethodStartAddress=0, ullModuleID=0, ullMethodIdentifier=0;
+ ULONG ulMethodSize=0, ulColdMethodSize=0, ulMethodToken=0, ulMethodFlags=0, ulColdMethodFlags=0;
+ PWCHAR pMethodName=NULL, pNamespaceName=NULL, pMethodSignature=NULL;
+ BOOL bHasNativeImage = FALSE, bShowVerboseOutput = FALSE, bIsDynamicMethod = FALSE, bHasSharedGenericCode = FALSE, bIsGenericMethod = FALSE;
+ PCWSTR szDtraceOutput1=W(""),szDtraceOutput2=W(""),szDtraceOutput3=W("");
+
+ BOOL bIsRundownProvider = ((dwEventOptions & ETW::EnumerationLog::EnumerationStructs::JitMethodDCStart) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::JitMethodDCEnd) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::NgenMethodDCStart) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::NgenMethodDCEnd));
+
+ BOOL bIsRuntimeProvider = ((dwEventOptions & ETW::EnumerationLog::EnumerationStructs::JitMethodLoad) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::JitMethodUnload) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::NgenMethodLoad) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::NgenMethodUnload));
+
+ if (pMethodDesc == NULL)
+ return;
+
+ if(!pMethodDesc->IsRestored())
+ {
+ // Forcibly restoring ngen methods can cause all sorts of deadlocks and contract violations
+ // These events are therefore put under the private provider
+ if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_PRIVATENGENFORCERESTORE_KEYWORD))
+ {
+ PERMANENT_CONTRACT_VIOLATION(GCViolation, ReasonNonShippingCode);
+ pMethodDesc->CheckRestore();
+ }
+ else
+ {
+ return;
+ }
+ }
+
+
+ if(bIsRundownProvider)
+ {
+ bShowVerboseOutput = ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
+ TRACE_LEVEL_VERBOSE,
+ KEYWORDZERO);
+ }
+ else if(bIsRuntimeProvider)
+ {
+ bShowVerboseOutput = ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_VERBOSE,
+ KEYWORDZERO);
+ }
+
+ pModule = pMethodDesc->GetModule_NoLogging();
+#ifdef FEATURE_PREJIT
+ bHasNativeImage = pModule->HasNativeImage();
+#endif // FEATURE_PREJIT
+ bIsDynamicMethod = (BOOL)pMethodDesc->IsDynamicMethod();
+ bHasSharedGenericCode = pMethodDesc->IsSharedByGenericInstantiations();
+
+ if(pMethodDesc->GetMethodTable_NoLogging())
+ bIsGenericMethod = pMethodDesc->HasClassOrMethodInstantiation_NoLogging();
+
+ ulMethodFlags = ((ulMethodFlags |
+ (bHasSharedGenericCode ? ETW::MethodLog::MethodStructs::SharedGenericCode : 0) |
+ (bIsGenericMethod ? ETW::MethodLog::MethodStructs::GenericMethod : 0) |
+ (bIsDynamicMethod ? ETW::MethodLog::MethodStructs::DynamicMethod : 0) |
+ (bIsJit ? ETW::MethodLog::MethodStructs::JittedMethod : 0)));
+
+ // Intentionally set the extent flags (cold vs. hot) only after all the other common
+ // flags (above) have been set.
+ ulColdMethodFlags = ulMethodFlags | ETW::MethodLog::MethodStructs::ColdSection; // Method Extent (bits 28, 29, 30, 31)
+ ulMethodFlags = ulMethodFlags | ETW::MethodLog::MethodStructs::HotSection; // Method Extent (bits 28, 29, 30, 31)
+
+ // MethodDesc ==> Code Address ==>JitMananger
+ TADDR start = pCode ? pCode : PCODEToPINSTR(pMethodDesc->GetNativeCode());
+ if(start == 0) {
+ // this method hasn't been jitted
+ return;
+ }
+
+ // EECodeInfo is technically initialized by a "PCODE", but it can also be initialized
+ // by a TADDR (i.e., w/out thumb bit set on ARM)
+ EECodeInfo codeInfo(start);
+
+ // MethodToken ==> MethodRegionInfo
+ IJitManager::MethodRegionInfo methodRegionInfo;
+ codeInfo.GetMethodRegionInfo(&methodRegionInfo);
+
+ ullMethodStartAddress = (ULONGLONG)methodRegionInfo.hotStartAddress;
+ ulMethodSize = (ULONG)methodRegionInfo.hotSize;
+
+ ullModuleID = (ULONGLONG)(TADDR) pModule;
+ ullMethodIdentifier = (ULONGLONG)pMethodDesc;
+
+ // Use MethodDesc if Dynamic or Generic methods
+ if( bIsDynamicMethod || bIsGenericMethod)
+ {
+ bShowVerboseOutput = TRUE;
+ if(bIsGenericMethod)
+ ulMethodToken = (ULONG)pMethodDesc->GetMemberDef_NoLogging();
+ if(bIsDynamicMethod) // if its a generic and a dynamic method, we would set the methodtoken to 0
+ ulMethodToken = (ULONG)0;
+ }
+ else
+ ulMethodToken = (ULONG)pMethodDesc->GetMemberDef_NoLogging();
+
+ if(bHasNativeImage)
+ {
+ ullColdMethodStartAddress = (ULONGLONG)methodRegionInfo.coldStartAddress;
+ ulColdMethodSize = (ULONG)methodRegionInfo.coldSize; // methodRegionInfo.coldSize is size_t and info.MethodLoadInfo.MethodSize is 32 bit; will give incorrect values on a 64-bit machine
+ }
+
+ SString tNamespace, tMethodName, tMethodSignature;
+
+ // if verbose method load info needed, only then
+ // find method name and signature and fire verbose method load info
+ if(bShowVerboseOutput)
+ {
+ if(!namespaceOrClassName|| !methodName|| !methodSignature || (methodName->IsEmpty() && namespaceOrClassName->IsEmpty() && methodSignature->IsEmpty()))
+ {
+ pMethodDesc->GetMethodInfo(tNamespace, tMethodName, tMethodSignature);
+ namespaceOrClassName = &tNamespace;
+ methodName = &tMethodName;
+ methodSignature = &tMethodSignature;
+ }
+ pNamespaceName = (PWCHAR)namespaceOrClassName->GetUnicode();
+ pMethodName = (PWCHAR)methodName->GetUnicode();
+ pMethodSignature = (PWCHAR)methodSignature->GetUnicode();
+ }
+
+ BOOL bFireEventForColdSection = (bHasNativeImage && ullColdMethodStartAddress && ulColdMethodSize);
+
+ /* prepare events args for ETW and ETM */
+ szDtraceOutput1 = (PCWSTR)pNamespaceName;
+ szDtraceOutput2 = (PCWSTR)pMethodName;
+ szDtraceOutput3 = (PCWSTR)pMethodSignature;
+
+ if((dwEventOptions & ETW::EnumerationLog::EnumerationStructs::JitMethodLoad) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::NgenMethodLoad))
+ {
+ if(bShowVerboseOutput)
+ {
+ FireEtwMethodLoadVerbose_V1_or_V2(ullMethodIdentifier,
+ ullModuleID,
+ ullMethodStartAddress,
+ ulMethodSize,
+ ulMethodToken,
+ ulMethodFlags,
+ szDtraceOutput1,
+ szDtraceOutput2,
+ szDtraceOutput3,
+ GetClrInstanceId(),
+ rejitID);
+ }
+ else
+ {
+ FireEtwMethodLoad_V1_or_V2(ullMethodIdentifier,
+ ullModuleID,
+ ullMethodStartAddress,
+ ulMethodSize,
+ ulMethodToken,
+ ulMethodFlags,
+ GetClrInstanceId(),
+ rejitID);
+ }
+ if(bFireEventForColdSection)
+ {
+ if(bShowVerboseOutput)
+ {
+ FireEtwMethodLoadVerbose_V1_or_V2(ullMethodIdentifier,
+ ullModuleID,
+ ullColdMethodStartAddress,
+ ulColdMethodSize,
+ ulMethodToken,
+ ulColdMethodFlags,
+ szDtraceOutput1,
+ szDtraceOutput2,
+ szDtraceOutput3,
+ GetClrInstanceId(),
+ rejitID);
+ }
+ else
+ {
+ FireEtwMethodLoad_V1_or_V2(ullMethodIdentifier,
+ ullModuleID,
+ ullColdMethodStartAddress,
+ ulColdMethodSize,
+ ulMethodToken,
+ ulColdMethodFlags,
+ GetClrInstanceId(),
+ rejitID);
+ }
+ }
+ }
+ else if((dwEventOptions & ETW::EnumerationLog::EnumerationStructs::JitMethodUnload) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::NgenMethodUnload))
+ {
+ if(bShowVerboseOutput)
+ {
+ FireEtwMethodUnloadVerbose_V1_or_V2(ullMethodIdentifier,
+ ullModuleID,
+ ullMethodStartAddress,
+ ulMethodSize,
+ ulMethodToken,
+ ulMethodFlags,
+ szDtraceOutput1,
+ szDtraceOutput2,
+ szDtraceOutput3,
+ GetClrInstanceId(),
+ rejitID);
+ }
+ else
+ {
+ FireEtwMethodUnload_V1_or_V2(ullMethodIdentifier,
+ ullModuleID,
+ ullMethodStartAddress,
+ ulMethodSize,
+ ulMethodToken,
+ ulMethodFlags,
+ GetClrInstanceId(),
+ rejitID);
+ }
+ if(bFireEventForColdSection)
+ {
+ if(bShowVerboseOutput)
+ {
+ FireEtwMethodUnloadVerbose_V1_or_V2(ullMethodIdentifier,
+ ullModuleID,
+ ullColdMethodStartAddress,
+ ulColdMethodSize,
+ ulMethodToken,
+ ulColdMethodFlags,
+ szDtraceOutput1,
+ szDtraceOutput2,
+ szDtraceOutput3,
+ GetClrInstanceId(),
+ rejitID);
+ }
+ else
+ {
+ FireEtwMethodUnload_V1_or_V2(ullMethodIdentifier,
+ ullModuleID,
+ ullColdMethodStartAddress,
+ ulColdMethodSize,
+ ulMethodToken,
+ ulColdMethodFlags,
+ GetClrInstanceId(),
+ rejitID);
+ }
+ }
+ }
+ else if((dwEventOptions & ETW::EnumerationLog::EnumerationStructs::JitMethodDCStart) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::NgenMethodDCStart))
+ {
+ if(bShowVerboseOutput)
+ {
+ FireEtwMethodDCStartVerbose_V1_or_V2(ullMethodIdentifier,
+ ullModuleID,
+ ullMethodStartAddress,
+ ulMethodSize,
+ ulMethodToken,
+ ulMethodFlags,
+ szDtraceOutput1,
+ szDtraceOutput2,
+ szDtraceOutput3,
+ GetClrInstanceId(),
+ rejitID);
+ }
+ else
+ {
+ FireEtwMethodDCStart_V1_or_V2(ullMethodIdentifier,
+ ullModuleID,
+ ullMethodStartAddress,
+ ulMethodSize,
+ ulMethodToken,
+ ulMethodFlags,
+ GetClrInstanceId(),
+ rejitID);
+ }
+ if(bFireEventForColdSection)
+ {
+ if(bShowVerboseOutput)
+ {
+ FireEtwMethodDCStartVerbose_V1_or_V2(ullMethodIdentifier,
+ ullModuleID,
+ ullColdMethodStartAddress,
+ ulColdMethodSize,
+ ulMethodToken,
+ ulColdMethodFlags,
+ szDtraceOutput1,
+ szDtraceOutput2,
+ szDtraceOutput3,
+ GetClrInstanceId(),
+ rejitID);
+ }
+ else
+ {
+ FireEtwMethodDCStart_V1_or_V2(ullMethodIdentifier,
+ ullModuleID,
+ ullColdMethodStartAddress,
+ ulColdMethodSize,
+ ulMethodToken,
+ ulColdMethodFlags,
+ GetClrInstanceId(),
+ rejitID);
+ }
+ }
+ }
+ else if((dwEventOptions & ETW::EnumerationLog::EnumerationStructs::JitMethodDCEnd) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::NgenMethodDCEnd))
+ {
+ if(bShowVerboseOutput)
+ {
+ FireEtwMethodDCEndVerbose_V1_or_V2(ullMethodIdentifier,
+ ullModuleID,
+ ullMethodStartAddress,
+ ulMethodSize,
+ ulMethodToken,
+ ulMethodFlags,
+ szDtraceOutput1,
+ szDtraceOutput2,
+ szDtraceOutput3,
+ GetClrInstanceId(),
+ rejitID);
+ }
+ else
+ {
+ FireEtwMethodDCEnd_V1_or_V2(ullMethodIdentifier,
+ ullModuleID,
+ ullMethodStartAddress,
+ ulMethodSize,
+ ulMethodToken,
+ ulMethodFlags,
+ GetClrInstanceId(),
+ rejitID);
+ }
+ if(bFireEventForColdSection)
+ {
+ if(bShowVerboseOutput)
+ {
+ FireEtwMethodDCEndVerbose_V1_or_V2(ullMethodIdentifier,
+ ullModuleID,
+ ullColdMethodStartAddress,
+ ulColdMethodSize,
+ ulMethodToken,
+ ulColdMethodFlags,
+ szDtraceOutput1,
+ szDtraceOutput2,
+ szDtraceOutput3,
+ GetClrInstanceId(),
+ rejitID);
+ }
+ else
+ {
+ FireEtwMethodDCEnd_V1_or_V2(ullMethodIdentifier,
+ ullModuleID,
+ ullColdMethodStartAddress,
+ ulColdMethodSize,
+ ulMethodToken,
+ ulColdMethodFlags,
+ GetClrInstanceId(),
+ rejitID);
+ }
+ }
+ }
+ else
+ {
+ _ASSERTE((dwEventOptions & ETW::EnumerationLog::EnumerationStructs::JitMethodLoad) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::JitMethodUnload) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::JitMethodDCStart) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::JitMethodDCEnd) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::NgenMethodLoad) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::NgenMethodUnload) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::NgenMethodDCStart) ||
+ (dwEventOptions & ETW::EnumerationLog::EnumerationStructs::NgenMethodDCEnd));
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Fires the IL-to-native map event for JITted methods. This is used for the runtime,
+// rundown start, and rundown end events that include the il-to-native map information
+//
+// Arguments:
+// pMethodDesc - MethodDesc for which we'll fire the map event
+// dwEventOptions - Options that tells us, in the rundown case, whether we're
+// supposed to fire the start or end rundown events.
+//
+
+// static
+VOID ETW::MethodLog::SendMethodILToNativeMapEvent(MethodDesc * pMethodDesc, DWORD dwEventOptions, ReJITID rejitID)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ // This is the limit on how big the il-to-native map can get, as measured by number
+ // of entries in each parallel array (IL offset array and native offset array).
+ // This number was chosen to ensure the overall event stays under the Windows limit
+ // of 64K
+ const USHORT kMapEntriesMax = 7000;
+
+ if (pMethodDesc == NULL)
+ return;
+
+ if (pMethodDesc->HasClassOrMethodInstantiation() && pMethodDesc->IsTypicalMethodDefinition())
+ return;
+
+ // g_pDebugInterface is initialized on startup on desktop CLR, regardless of whether a debugger
+ // or profiler is loaded. So it should always be available.
+ _ASSERTE(g_pDebugInterface != NULL);
+
+ ULONGLONG ullMethodIdentifier = (ULONGLONG)pMethodDesc;
+
+ USHORT cMap;
+ NewArrayHolder<UINT> rguiILOffset;
+ NewArrayHolder<UINT> rguiNativeOffset;
+
+ HRESULT hr = g_pDebugInterface->GetILToNativeMappingIntoArrays(
+ pMethodDesc,
+ kMapEntriesMax,
+ &cMap,
+ &rguiILOffset,
+ &rguiNativeOffset);
+ if (FAILED(hr))
+ return;
+
+ // Runtime provider.
+ //
+ // This macro already checks for the JittedMethodILToNativeMapKeyword before
+ // choosing to fire the event
+ if ((dwEventOptions & ETW::EnumerationLog::EnumerationStructs::JitMethodILToNativeMap) != 0)
+ {
+ FireEtwMethodILToNativeMap(
+ ullMethodIdentifier,
+ rejitID,
+ 0, // Extent: This event is only sent for JITted (not NGENd) methods, and
+ // currently there is only one extent (hot) for JITted methods.
+ cMap,
+ rguiILOffset,
+ rguiNativeOffset,
+ GetClrInstanceId());
+ }
+
+ // Rundown provider
+ //
+ // These macros already check for the JittedMethodILToNativeMapRundownKeyword
+ // before choosing to fire the event--we further check our options to see if we
+ // should fire the Start and / or End flavor of the event (since the keyword alone
+ // is insufficient to distinguish these).
+ //
+ // (for an explanation of the parameters see the FireEtwMethodILToNativeMap call above)
+ if ((dwEventOptions & ETW::EnumerationLog::EnumerationStructs::MethodDCStartILToNativeMap) != 0)
+ FireEtwMethodDCStartILToNativeMap(ullMethodIdentifier, 0, 0, cMap, rguiILOffset, rguiNativeOffset, GetClrInstanceId());
+ if ((dwEventOptions & ETW::EnumerationLog::EnumerationStructs::MethodDCEndILToNativeMap) != 0)
+ FireEtwMethodDCEndILToNativeMap(ullMethodIdentifier, 0, 0, cMap, rguiILOffset, rguiNativeOffset, GetClrInstanceId());
+}
+
+
+VOID ETW::MethodLog::SendHelperEvent(ULONGLONG ullHelperStartAddress, ULONG ulHelperSize, LPCWSTR pHelperName)
+{
+ WRAPPER_NO_CONTRACT;
+ if(pHelperName)
+ {
+ PCWSTR szDtraceOutput1=W("");
+ ULONG methodFlags = ETW::MethodLog::MethodStructs::JitHelperMethod; // helper flag set
+ FireEtwMethodLoadVerbose_V1(ullHelperStartAddress,
+ 0,
+ ullHelperStartAddress,
+ ulHelperSize,
+ 0,
+ methodFlags,
+ NULL,
+ pHelperName,
+ NULL,
+ GetClrInstanceId());
+ }
+}
+
+
+/****************************************************************************/
+/* This routine sends back method events of type 'dwEventOptions', for all
+ NGEN methods in pModule */
+/****************************************************************************/
+VOID ETW::MethodLog::SendEventsForNgenMethods(Module *pModule, DWORD dwEventOptions)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+#ifdef FEATURE_PREJIT
+ if(!pModule || !pModule->HasNativeImage())
+ return;
+
+ MethodIterator mi(pModule);
+
+ while(mi.Next())
+ {
+ MethodDesc *hotDesc = (MethodDesc *)mi.GetMethodDesc();
+ ETW::MethodLog::SendMethodEvent(hotDesc, dwEventOptions, FALSE);
+ }
+#endif // FEATURE_PREJIT
+}
+
+/****************************************************************************/
+/* This routine sends back method events of type 'dwEventOptions', for all
+ JITed methods in either a given LoaderAllocator (if pLoaderAllocatorFilter is non NULL)
+ or in a given Domain (if pDomainFilter is non NULL) or for
+ all methods (if both filters are null) */
+/****************************************************************************/
+VOID ETW::MethodLog::SendEventsForJitMethods(BaseDomain *pDomainFilter, LoaderAllocator *pLoaderAllocatorFilter, DWORD dwEventOptions)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+#if !defined(DACCESS_COMPILE)
+ EX_TRY
+ {
+ // This is only called for JITted methods loading xor unloading
+ BOOL fLoadOrDCStart = ((dwEventOptions & ETW::EnumerationLog::EnumerationStructs::JitMethodLoadOrDCStartAny) != 0);
+ BOOL fUnloadOrDCEnd = ((dwEventOptions & ETW::EnumerationLog::EnumerationStructs::JitMethodUnloadOrDCEndAny) != 0);
+ _ASSERTE((fLoadOrDCStart || fUnloadOrDCEnd) && !(fLoadOrDCStart && fUnloadOrDCEnd));
+
+ BOOL fSendMethodEvent =
+ (dwEventOptions &
+ (ETW::EnumerationLog::EnumerationStructs::JitMethodLoad |
+ ETW::EnumerationLog::EnumerationStructs::JitMethodDCStart |
+ ETW::EnumerationLog::EnumerationStructs::JitMethodUnload |
+ ETW::EnumerationLog::EnumerationStructs::JitMethodDCEnd)) != 0;
+
+ BOOL fSendILToNativeMapEvent =
+ (dwEventOptions &
+ (ETW::EnumerationLog::EnumerationStructs::MethodDCStartILToNativeMap |
+ ETW::EnumerationLog::EnumerationStructs::MethodDCEndILToNativeMap)) != 0;
+
+ BOOL fCollectibleLoaderAllocatorFilter =
+ ((pLoaderAllocatorFilter != NULL) && (pLoaderAllocatorFilter->IsCollectible()));
+
+ if (fSendILToNativeMapEvent)
+ {
+ // The call to SendMethodILToNativeMapEvent assumes that the debugger's lazy
+ // data has already been initialized, to ensure we don't try to do the lazy init
+ // while under the implicit, notrigger CodeHeapIterator lock below.
+
+ // g_pDebugInterface is initialized on startup on desktop CLR, regardless of whether a debugger
+ // or profiler is loaded. So it should always be available.
+ _ASSERTE(g_pDebugInterface != NULL);
+ g_pDebugInterface->InitializeLazyDataIfNecessary();
+ }
+
+ // GetRejitIdNoLock requires that the rejit lock is taken already. We need to take
+ // it here, before CodeHeapIterator takes the SingleUseLock because that is defined
+ // ordering.
+ ReJitManager::TableLockHolder lksharedRejitMgrModule(SharedDomain::GetDomain()->GetReJitManager());
+ ReJitManager::TableLockHolder lkRejitMgrModule(pDomainFilter->GetReJitManager());
+ EEJitManager::CodeHeapIterator heapIterator(pDomainFilter, pLoaderAllocatorFilter);
+ while(heapIterator.Next())
+ {
+ MethodDesc * pMD = heapIterator.GetMethod();
+ if (pMD == NULL)
+ continue;
+
+ TADDR codeStart = heapIterator.GetMethodCode();
+
+ // Grab rejitID from the rejit manager. Short-circuit the call if we're filtering
+ // by a collectible loader allocator, since rejit is not supported on RefEmit
+ // assemblies.
+ ReJITID rejitID =
+ fCollectibleLoaderAllocatorFilter ?
+ 0 :
+ pMD->GetReJitManager()->GetReJitIdNoLock(pMD, codeStart);
+
+ // There are small windows of time where the heap iterator may come across a
+ // codeStart that is not yet published to the MethodDesc. This may happen if
+ // we're JITting the method right now on another thread, and have not completed
+ // yet. Detect the race, and skip the method if appropriate. (If rejitID is
+ // nonzero, there is no race, as GetReJitIdNoLock will not return a nonzero
+ // rejitID if the codeStart has not yet been published for that rejitted version
+ // of the method.) This check also catches recompilations due to EnC, which we do
+ // not want to issue events for, in order to ensure xperf's assumption that
+ // MethodDesc* + ReJITID + extent (hot vs. cold) form a unique key for code
+ // ranges of methods
+ if ((rejitID == 0) && (codeStart != PCODEToPINSTR(pMD->GetNativeCode())))
+ continue;
+
+ // When we're called to announce loads, then the methodload event itself must
+ // precede any supplemental events, so that the method load or method jitting
+ // event is the first event the profiler sees for that MethodID (and not, say,
+ // the MethodILToNativeMap event.)
+ if (fLoadOrDCStart)
+ {
+ if (fSendMethodEvent)
+ {
+ ETW::MethodLog::SendMethodEvent(
+ pMD,
+ dwEventOptions,
+ TRUE, // bIsJit
+ NULL, // namespaceOrClassName
+ NULL, // methodName
+ NULL, // methodSignature
+ codeStart,
+ rejitID);
+ }
+ }
+
+ // Send any supplemental events requested for this MethodID
+ if (fSendILToNativeMapEvent)
+ ETW::MethodLog::SendMethodILToNativeMapEvent(pMD, dwEventOptions, rejitID);
+
+ // When we're called to announce unloads, then the methodunload event itself must
+ // come after any supplemental events, so that the method unload event is the
+ // last event the profiler sees for this MethodID
+ if (fUnloadOrDCEnd)
+ {
+ if (fSendMethodEvent)
+ {
+ ETW::MethodLog::SendMethodEvent(
+ pMD,
+ dwEventOptions,
+ TRUE, // bIsJit
+ NULL, // namespaceOrClassName
+ NULL, // methodName
+ NULL, // methodSignature
+ codeStart,
+ rejitID);
+ }
+ }
+ }
+ } EX_CATCH{} EX_END_CATCH(SwallowAllExceptions);
+#endif // !DACCESS_COMPILE
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Wrapper around IterateDomain, which locks the AppDomain to be <
+// STAGE_FINALIZED until the iteration is complete.
+//
+// Arguments:
+// pAppDomain - AppDomain to iterate
+// enumerationOptions - Flags indicating what to enumerate. Just passed
+// straight through to IterateDomain
+//
+VOID ETW::EnumerationLog::IterateAppDomain(AppDomain * pAppDomain, DWORD enumerationOptions)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(pAppDomain != NULL);
+ }
+ CONTRACTL_END;
+
+ // Hold the system domain lock during the entire iteration, so we can
+ // ensure the App Domain does not get finalized until we're all done
+ SystemDomain::LockHolder lh;
+
+ if (pAppDomain->IsFinalized())
+ {
+ return;
+ }
+
+ // Since we're not FINALIZED yet, the handle table should remain intact,
+ // as should all type information in this AppDomain
+ _ASSERTE(!pAppDomain->NoAccessToHandleTable());
+
+ // Now it's safe to do the iteration
+ IterateDomain(pAppDomain, enumerationOptions);
+
+ // Since we're holding the system domain lock, the AD type info should be
+ // there throughout the entire iteration we just did
+ _ASSERTE(!pAppDomain->NoAccessToHandleTable());
+}
+
+/********************************************************************************/
+/* This routine fires ETW events for
+ Domain,
+ Assemblies in them,
+ DomainModule's in them,
+ Modules in them,
+ JIT methods in them,
+ and the NGEN methods in them
+ based on enumerationOptions.*/
+/********************************************************************************/
+VOID ETW::EnumerationLog::IterateDomain(BaseDomain *pDomain, DWORD enumerationOptions)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(pDomain != NULL);
+ } CONTRACTL_END;
+
+#if defined(_DEBUG) && !defined(DACCESS_COMPILE)
+ // Do not call IterateDomain() directly with an AppDomain. Use
+ // IterateAppDomain(), whch wraps this function with a hold on the
+ // SystemDomain lock, which ensures pDomain's type data doesn't disappear
+ // on us.
+ if (pDomain->IsAppDomain())
+ {
+ _ASSERTE(SystemDomain::IsUnderDomainLock());
+ }
+#endif // defined(_DEBUG) && !defined(DACCESS_COMPILE)
+
+ EX_TRY
+ {
+ // DC Start events for Domain
+ if(enumerationOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCStart)
+ {
+ ETW::LoaderLog::SendDomainEvent(pDomain, enumerationOptions);
+ }
+
+ // DC End or Unload Jit Method events
+ if (enumerationOptions & ETW::EnumerationLog::EnumerationStructs::JitMethodUnloadOrDCEndAny)
+ {
+ ETW::MethodLog::SendEventsForJitMethods(pDomain, NULL, enumerationOptions);
+ }
+
+ if (pDomain->IsAppDomain())
+ {
+ AppDomain::AssemblyIterator assemblyIterator = pDomain->AsAppDomain()->IterateAssembliesEx(
+ (AssemblyIterationFlags)(kIncludeLoaded | kIncludeExecution));
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+ while (assemblyIterator.Next(pDomainAssembly.This()))
+ {
+ CollectibleAssemblyHolder<Assembly *> pAssembly = pDomainAssembly->GetLoadedAssembly();
+ BOOL bIsDomainNeutral = pAssembly->IsDomainNeutral();
+ if (bIsDomainNeutral)
+ continue;
+ if (enumerationOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCStart)
+ {
+ ETW::EnumerationLog::IterateAssembly(pAssembly, enumerationOptions);
+ }
+
+ DomainModuleIterator domainModuleIterator = pDomainAssembly->IterateModules(kModIterIncludeLoaded);
+ while (domainModuleIterator.Next())
+ {
+ Module * pModule = domainModuleIterator.GetModule();
+ ETW::EnumerationLog::IterateModule(pModule, enumerationOptions);
+ }
+
+ if((enumerationOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCEnd) ||
+ (enumerationOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleUnload))
+ {
+ ETW::EnumerationLog::IterateAssembly(pAssembly, enumerationOptions);
+ }
+ }
+ }
+ else
+ {
+ SharedDomain::SharedAssemblyIterator sharedDomainIterator;
+ while (sharedDomainIterator.Next())
+ {
+ Assembly * pAssembly = sharedDomainIterator.GetAssembly();
+ if (enumerationOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCStart)
+ {
+ ETW::EnumerationLog::IterateAssembly(pAssembly, enumerationOptions);
+ }
+
+ ModuleIterator domainModuleIterator = pAssembly->IterateModules();
+ while (domainModuleIterator.Next())
+ {
+ Module * pModule = domainModuleIterator.GetModule();
+ ETW::EnumerationLog::IterateModule(pModule, enumerationOptions);
+ }
+
+ if ((enumerationOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCEnd) ||
+ (enumerationOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleUnload))
+ {
+ ETW::EnumerationLog::IterateAssembly(pAssembly, enumerationOptions);
+ }
+ }
+ }
+
+ // DC Start or Load Jit Method events
+ if (enumerationOptions & ETW::EnumerationLog::EnumerationStructs::JitMethodLoadOrDCStartAny)
+ {
+ ETW::MethodLog::SendEventsForJitMethods(pDomain, NULL, enumerationOptions);
+ }
+
+ // DC End or Unload events for Domain
+ if((enumerationOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCEnd) ||
+ (enumerationOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleUnload))
+ {
+ ETW::LoaderLog::SendDomainEvent(pDomain, enumerationOptions);
+ }
+ } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
+}
+
+
+/********************************************************************************/
+/* This routine fires ETW events for
+ Assembly in LoaderAllocator,
+ DomainModule's in them,
+ Modules in them,
+ JIT methods in them,
+ and the NGEN methods in them
+ based on enumerationOptions.*/
+/********************************************************************************/
+VOID ETW::EnumerationLog::IterateCollectibleLoaderAllocator(AssemblyLoaderAllocator *pLoaderAllocator, DWORD enumerationOptions)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(pLoaderAllocator != NULL);
+ } CONTRACTL_END;
+
+ EX_TRY
+ {
+ // Unload Jit Method events
+ if (enumerationOptions & ETW::EnumerationLog::EnumerationStructs::JitMethodUnload)
+ {
+ ETW::MethodLog::SendEventsForJitMethods(NULL, pLoaderAllocator, enumerationOptions);
+ }
+
+ Assembly *pAssembly = pLoaderAllocator->Id()->GetDomainAssembly()->GetAssembly();
+ _ASSERTE(!pAssembly->IsDomainNeutral()); // Collectible Assemblies are not domain neutral.
+
+ DomainModuleIterator domainModuleIterator = pLoaderAllocator->Id()->GetDomainAssembly()->IterateModules(kModIterIncludeLoaded);
+ while (domainModuleIterator.Next())
+ {
+ Module *pModule = domainModuleIterator.GetModule();
+ ETW::EnumerationLog::IterateModule(pModule, enumerationOptions);
+ }
+
+ if (enumerationOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleUnload)
+ {
+ ETW::EnumerationLog::IterateAssembly(pAssembly, enumerationOptions);
+ }
+
+ // Load Jit Method events
+ if (enumerationOptions & ETW::EnumerationLog::EnumerationStructs::JitMethodLoad)
+ {
+ ETW::MethodLog::SendEventsForJitMethods(NULL, pLoaderAllocator, enumerationOptions);
+ }
+ } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
+}
+
+/********************************************************************************/
+/* This routine fires ETW events for Assembly and the DomainModule's in them
+ based on enumerationOptions.*/
+/********************************************************************************/
+VOID ETW::EnumerationLog::IterateAssembly(Assembly *pAssembly, DWORD enumerationOptions)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(pAssembly != NULL);
+ } CONTRACTL_END;
+
+ EX_TRY
+ {
+ // DC Start events for Assembly
+ if(enumerationOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCStart)
+ {
+ ETW::LoaderLog::SendAssemblyEvent(pAssembly, enumerationOptions);
+ }
+
+ // DC Start, DCEnd, events for DomainModule
+ if((enumerationOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCEnd) ||
+ (enumerationOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCStart))
+ {
+ if(pAssembly->GetDomain()->IsAppDomain())
+ {
+ DomainModuleIterator dmIterator = pAssembly->FindDomainAssembly(pAssembly->GetDomain()->AsAppDomain())->IterateModules(kModIterIncludeLoaded);
+ while (dmIterator.Next())
+ {
+ ETW::LoaderLog::SendModuleEvent(dmIterator.GetModule(), enumerationOptions, TRUE);
+ }
+ }
+ }
+
+ // DC End or Unload events for Assembly
+ if((enumerationOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCEnd) ||
+ (enumerationOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleUnload))
+ {
+ ETW::LoaderLog::SendAssemblyEvent(pAssembly, enumerationOptions);
+ }
+ } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
+}
+
+/********************************************************************************/
+/* This routine fires ETW events for Module, their range information and the NGEN methods in them
+ based on enumerationOptions.*/
+/********************************************************************************/
+VOID ETW::EnumerationLog::IterateModule(Module *pModule, DWORD enumerationOptions)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(pModule != NULL);
+ } CONTRACTL_END;
+
+ EX_TRY
+ {
+ // DC Start events for Module
+ if((enumerationOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCStart) ||
+ (enumerationOptions & ETW::EnumerationLog::EnumerationStructs::ModuleRangeDCStart))
+ {
+ ETW::LoaderLog::SendModuleEvent(pModule, enumerationOptions);
+ }
+
+ // DC Start or Load or DC End or Unload Ngen Method events
+ if((enumerationOptions & ETW::EnumerationLog::EnumerationStructs::NgenMethodLoad) ||
+ (enumerationOptions & ETW::EnumerationLog::EnumerationStructs::NgenMethodDCStart) ||
+ (enumerationOptions & ETW::EnumerationLog::EnumerationStructs::NgenMethodUnload) ||
+ (enumerationOptions & ETW::EnumerationLog::EnumerationStructs::NgenMethodDCEnd))
+ {
+ ETW::MethodLog::SendEventsForNgenMethods(pModule, enumerationOptions);
+ }
+
+ // DC End or Unload events for Module
+ if((enumerationOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleDCEnd) ||
+ (enumerationOptions & ETW::EnumerationLog::EnumerationStructs::DomainAssemblyModuleUnload) ||
+ (enumerationOptions & ETW::EnumerationLog::EnumerationStructs::ModuleRangeDCEnd))
+ {
+ ETW::LoaderLog::SendModuleEvent(pModule, enumerationOptions);
+ }
+
+ // If we're logging types, then update the internal Type hash table to account
+ // for the module's unloading
+ if (enumerationOptions & ETW::EnumerationLog::EnumerationStructs::TypeUnload)
+ {
+ ETW::TypeSystemLog::OnModuleUnload(pModule);
+ }
+
+ // ModuleRangeLoadPrivate events for module range information from attach/detach scenarios
+ if (ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_PERFTRACK_PRIVATE_KEYWORD) &&
+ (enumerationOptions & ETW::EnumerationLog::EnumerationStructs::ModuleRangeLoadPrivate))
+ {
+ ETW::LoaderLog::SendModuleEvent(pModule, enumerationOptions);
+ }
+ } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// This routine sends back domain, assembly, module and method events based on
+// enumerationOptions.
+//
+// Arguments:
+// * moduleFilter - if non-NULL, events from only moduleFilter module are reported
+// * domainFilter - if non-NULL, events from only domainFilter domain are reported
+// * enumerationOptions - Flags from ETW::EnumerationLog::EnumerationStructs which
+// describe which events should be sent.
+//
+// Notes:
+// * if all filter args are NULL, events from all domains are reported
+//
+//
+
+// static
+VOID ETW::EnumerationLog::EnumerationHelper(Module *moduleFilter, BaseDomain *domainFilter, DWORD enumerationOptions)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ // Disable IBC logging during ETW enumeration since we call a lot of functionality
+ // that does logging and causes problems in the shutdown path due to critical
+ // section access for IBC logging
+ IBCLoggingDisabler disableLogging;
+
+ if(moduleFilter)
+ {
+ // Iteratate modules first because their number is ussualy smaller then the number of methods.
+ // Thus hitting a timeout due to a large number of methods will not affect modules rundown.tf g
+ ETW::EnumerationLog::IterateModule(moduleFilter, enumerationOptions);
+
+ // DC End or Unload Jit Method events from all Domains
+ if (enumerationOptions & ETW::EnumerationLog::EnumerationStructs::JitMethodUnloadOrDCEndAny)
+ {
+ ETW::MethodLog::SendEventsForJitMethods(NULL, NULL, enumerationOptions);
+ }
+
+ // DC Start or Load Jit Method events from all Domains
+ if (enumerationOptions & ETW::EnumerationLog::EnumerationStructs::JitMethodLoadOrDCStartAny)
+ {
+ ETW::MethodLog::SendEventsForJitMethods(NULL, NULL, enumerationOptions);
+ }
+ }
+ else
+ {
+ if(domainFilter)
+ {
+ if(domainFilter->IsAppDomain())
+ {
+ ETW::EnumerationLog::IterateAppDomain(domainFilter->AsAppDomain(), enumerationOptions);
+ }
+ else
+ {
+ ETW::EnumerationLog::IterateDomain(domainFilter, enumerationOptions);
+ }
+ }
+ else
+ {
+ AppDomainIterator appDomainIterator(FALSE);
+ while(appDomainIterator.Next())
+ {
+ AppDomain *pDomain = appDomainIterator.GetDomain();
+ if (pDomain != NULL)
+ {
+ ETW::EnumerationLog::IterateAppDomain(pDomain, enumerationOptions);
+ }
+ }
+ ETW::EnumerationLog::IterateDomain(SharedDomain::GetDomain(), enumerationOptions);
+ }
+ }
+}
+
+#endif // !FEATURE_REDHAWK
diff --git a/src/vm/eventtracepriv.h b/src/vm/eventtracepriv.h
new file mode 100644
index 0000000000..93c71b74e8
--- /dev/null
+++ b/src/vm/eventtracepriv.h
@@ -0,0 +1,410 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: eventtracepriv.h
+//
+// Contains some private definitions used by eventrace.cpp, but that aren't needed by
+// clients of eventtrace.cpp, and thus don't belong in eventtrace.h. Also, since
+// inclusions of this file are tightly controlled (basically just by eventtrace.cpp), we
+// can assume some classes are defined that aren't necessarily defined when eventtrace.h
+// is #included (e.g., StackSString and StackSArray).
+//
+
+//
+
+//
+// ============================================================================
+
+#ifndef __EVENTTRACEPRIV_H__
+#define __EVENTTRACEPRIV_H__
+
+#ifndef _countof
+#define _countof(_array) (sizeof(_array)/sizeof(_array[0]))
+#endif
+
+const UINT cbMaxEtwEvent = 64 * 1024;
+
+//---------------------------------------------------------------------------------------
+// C++ copies of ETW structures
+//---------------------------------------------------------------------------------------
+
+// !!!!!!! NOTE !!!!!!!!
+// The EventStruct* structs are described in the ETW manifest event templates, and the
+// LAYOUT MUST MATCH THE MANIFEST EXACTLY!
+// !!!!!!! NOTE !!!!!!!!
+
+#pragma pack(push, 1)
+
+struct EventStructGCBulkRootEdgeValue
+{
+ LPVOID RootedNodeAddress;
+ BYTE GCRootKind;
+ DWORD GCRootFlag;
+ LPVOID GCRootID;
+};
+
+struct EventStructGCBulkRootConditionalWeakTableElementEdgeValue
+{
+ LPVOID GCKeyNodeID;
+ LPVOID GCValueNodeID;
+ LPVOID GCRootID;
+};
+
+struct EventStructGCBulkNodeValue
+{
+ LPVOID Address;
+ ULONGLONG Size;
+ ULONGLONG TypeID;
+ ULONGLONG EdgeCount;
+};
+
+struct EventStructGCBulkEdgeValue
+{
+ LPVOID Value;
+ ULONG ReferencingFieldID;
+};
+
+struct EventStructGCBulkSurvivingObjectRangesValue
+{
+ LPVOID RangeBase;
+ ULONGLONG RangeLength;
+};
+
+struct EventStructGCBulkMovedObjectRangesValue
+{
+ LPVOID OldRangeBase;
+ LPVOID NewRangeBase;
+ ULONGLONG RangeLength;
+};
+
+struct EventStructStaticBulkFixedSizeData
+{
+ ULONGLONG TypeID;
+ ULONGLONG Address;
+ ULONGLONG Value;
+ ULONG Flags;
+};
+
+// This only contains the fixed-size data at the top of each struct in
+// the bulk type event. These fields must still match exactly the initial
+// fields of the struct described in the manifest.
+struct EventStructBulkTypeFixedSizedData
+{
+ ULONGLONG TypeID;
+ ULONGLONG ModuleID;
+ ULONG TypeNameID;
+ ULONG Flags;
+ BYTE CorElementType;
+};
+
+struct EventStaticEntry
+{
+ ULONGLONG GCRootID;
+ ULONGLONG ObjectID;
+ ULONGLONG TypeID;
+ ULONG Flags;
+ wchar_t Name[0];
+
+ // Writes one EventStaticEntry to the buffer specified by ptr. Since we don't actually know how large the event will be,
+ // this write may fail if the remaining buffer is not large enough. This function returns the number of bytes written
+ // on success (return is >= 0), and -1 on failure. If we return -1, the caller is expected to flush the current buffer
+ // and try again.
+ static int WriteEntry(BYTE *ptr, int sizeRemaining, ULONGLONG addr, ULONGLONG obj, ULONGLONG typeId, ULONG flags, FieldDesc *fieldDesc)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // sizeRemaining must be larger than the structure + 1 wchar for the struct and
+ // null terminator of Name. We will do a better bounds check when we know the
+ // size of the field name.
+ if (sizeRemaining < sizeof(EventStaticEntry) + sizeof(wchar_t))
+ return -1;
+
+ // The location in the structure to write to. We won't actually write here unless we have sufficient buffer.
+ wchar_t *name = (wchar_t *)(ptr + offsetof(EventStaticEntry, Name));
+ int len = 0;
+
+ LPCUTF8 utf8Name = 0;
+ if (SUCCEEDED(fieldDesc->GetName_NoThrow(&utf8Name)))
+ {
+ len = MultiByteToWideChar(CP_ACP, 0, utf8Name, -1, name, sizeRemaining - sizeof(EventStaticEntry));
+ if (len <= 0)
+ {
+ // We will ignore corrupted/bad metadata here and only emit names for fields which are
+ // up to 255 characters (and also don't fit in the buffer).
+ if (GetLastError() == ERROR_INSUFFICIENT_BUFFER && sizeRemaining < 256)
+ return -1; // nothing written, insufficient buffer. Flush and try again.
+
+ // If the name is larger than 255 or we have some other error converting the string,
+ // just emit an empty string.
+ len = 1;
+ name[0] = 0;
+ }
+ }
+ else
+ {
+ // Couldn't get the name for some reason, just emit an empty string.
+ len = 1;
+ name[0] = 0;
+ }
+
+ // At this point we should have written something to the name buffer.
+ _ASSERTE(len > 0);
+
+ // At this point we've written the field name (even if it's just an empty string).
+ // Write the rest of the fields to the buffer and return the total size.
+ EventStaticEntry *entry = (EventStaticEntry*)ptr;
+ entry->GCRootID = addr;
+ entry->ObjectID = obj;
+ entry->TypeID = typeId;
+ entry->Flags = flags;
+
+ return sizeof(EventStaticEntry) + len * sizeof(wchar_t);
+ }
+};
+
+struct EventRCWEntry
+{
+ ULONGLONG ObjectID;
+ ULONGLONG TypeID;
+ ULONGLONG IUnk;
+ ULONGLONG VTable;
+ ULONG RefCount;
+ ULONG Flags;
+};
+
+
+struct EventCCWEntry
+{
+ enum CCWFlags
+ {
+ Strong = 0x1,
+ Pegged = 0x2
+ };
+
+ ULONGLONG RootID;
+ ULONGLONG ObjectID;
+ ULONGLONG TypeID;
+ ULONGLONG IUnk;
+ ULONG RefCount;
+ ULONG JupiterRefCount;
+ ULONG Flags;
+};
+
+#pragma pack(pop)
+
+// Represents one instance of the Value struct inside a single BulkType event
+class BulkTypeValue
+{
+public:
+ BulkTypeValue();
+ void Clear();
+
+ // How many bytes will this BulkTypeValue take up when written into the actual ETW
+ // event?
+ int GetByteCountInEvent()
+ {
+ return
+ sizeof(fixedSizedData) +
+ sizeof(cTypeParameters) +
+#ifdef FEATURE_REDHAWK
+ sizeof(WCHAR) + // No name in event, so just the null terminator
+ cTypeParameters * sizeof(ULONGLONG); // Type parameters
+#else
+ (sName.GetCount() + 1) * sizeof(WCHAR) + // Size of name, including null terminator
+ rgTypeParameters.GetCount() * sizeof(ULONGLONG);// Type parameters
+#endif
+ }
+
+ EventStructBulkTypeFixedSizedData fixedSizedData;
+
+ // Below are the remainder of each struct in the bulk type event (i.e., the
+ // variable-sized data). The var-sized fields are copied into the event individually
+ // (not directly), so they don't need to have the same layout as in the ETW manifest
+
+ // This is really a denorm of the size already stored in rgTypeParameters, but we
+ // need a persistent place to stash this away so EventDataDescCreate & EventWrite
+ // have a reliable place to copy it from. This is filled in at the last minute,
+ // when sending the event. (On ProjectN, which doesn't have StackSArray, this is
+ // filled in earlier and used in more places.)
+ ULONG cTypeParameters;
+
+#ifdef FEATURE_REDHAWK
+ // If > 1 type parameter, this is an array of their EEType*'s
+ NewArrayHolder<ULONGLONG> rgTypeParameters;
+
+ // If exactly one type parameter, this is its EEType*. (If != 1 type parameter,
+ // this is 0.)
+ ULONGLONG ullSingleTypeParameter;
+#else // FEATURE_REDHAWK
+ StackSString sName;
+ StackSArray<ULONGLONG> rgTypeParameters;
+#endif // FEATURE_REDHAWK
+};
+
+// Encapsulates all the type event batching we need to do. This is used by
+// ETW::TypeSystemLog, which calls LogTypeAndParameters for each type to be logged.
+// BulkTypeEventLogger will batch each type and its generic type parameters, and flush to
+// ETW as necessary. ETW::TypeSystemLog also calls FireBulkTypeEvent directly to force a
+// flush (e.g., once at end of GC heap traversal, or on each object allocation).
+class BulkTypeEventLogger
+{
+private:
+
+ // Estimate of how many bytes we can squeeze in the event data for the value struct
+ // array. (Intentionally overestimate the size of the non-array parts to keep it safe.)
+ static const int kMaxBytesTypeValues = (cbMaxEtwEvent - 0x30);
+
+ // Estimate of how many type value elements we can put into the struct array, while
+ // staying under the ETW event size limit. Note that this is impossible to calculate
+ // perfectly, since each element of the struct array has variable size.
+ //
+ // In addition to the byte-size limit per event, Windows always forces on us a
+ // max-number-of-descriptors per event, which in the case of BulkType, will kick in
+ // far sooner. There's a max number of 128 descriptors allowed per event. 2 are used
+ // for Count + ClrInstanceID. Then 4 per batched value. (Might actually be 3 if there
+ // are no type parameters to log, but let's overestimate at 4 per value).
+ static const int kMaxCountTypeValues = (128 - 2) / 4;
+ // Note: This results in a relatively small batch (about 31 types per event). We
+ // could increase this substantially by creating a single, contiguous buffer, which
+ // would let us max out the number of type values to batch by allowing the byte-size
+ // limit to kick in before the max-descriptor limit. We could esimate that as
+ // follows:
+ //
+ // static const int kMaxCountTypeValues = kMaxBytesTypeValues /
+ // (sizeof(EventStructBulkTypeFixedSizedData) +
+ // 200 * sizeof(WCHAR) + // Assume 199 + 1 terminating-NULL character in type name
+ // sizeof(UINT) + // Type parameter count
+ // 10 * sizeof(ULONGLONG)); // Assume 10 type parameters
+ //
+ // The downside, though, is that we would have to do a lot more copying to fill out
+ // that buffer before sending the event. It's unclear that increasing the batch size
+ // is enough of a win to offset all the extra buffer copying. So for now, we'll keep
+ // the batch size low and avoid extra copying.
+
+ // How many types have we batched?
+ int m_nBulkTypeValueCount;
+
+ // What is the byte size of all the types we've batched?
+ int m_nBulkTypeValueByteCount;
+
+ // List of types we've batched.
+ BulkTypeValue m_rgBulkTypeValues[kMaxCountTypeValues];
+
+#ifdef FEATURE_REDHAWK
+ int LogSingleType(EEType * pEEType);
+#else
+ int LogSingleType(TypeHandle th);
+#endif
+
+public:
+ BulkTypeEventLogger() :
+ m_nBulkTypeValueCount(0),
+ m_nBulkTypeValueByteCount(0)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ void LogTypeAndParameters(ULONGLONG thAsAddr, ETW::TypeSystemLog::TypeLogBehavior typeLogBehavior);
+ void FireBulkTypeEvent();
+};
+
+
+// Does all logging for RCWs and CCWs in the process. We walk RCWs by enumerating all syncblocks in
+// the process and seeing if they have associated interop information. We enumerate all CCWs in the
+// process from the RefCount handles on the handle table.
+class BulkComLogger
+{
+public:
+ // If typeLogger is non-null, we will log out the types via the logger, otherwise no type
+ // information will be logged.
+ BulkComLogger(BulkTypeEventLogger *typeLogger);
+ ~BulkComLogger();
+
+ // Walks all RCW/CCW objects.
+ void LogAllComObjects();
+
+ // Forces a flush of all ETW events not yet fired.
+ void FireBulkComEvent();
+
+private:
+ // Writes one RCW to the RCW buffer. May or may not fire the event.
+ void WriteRcw(RCW *rcw, Object *obj);
+
+ // Writes one CCW to the CCW buffer. May or may not fire the event.
+ void WriteCcw(ComCallWrapper *ccw, Object **handle, Object *obj);
+
+ // Forces a flush of all RCW ETW events not yet fired.
+ void FlushRcw();
+
+ // Forces a flush of all CCW ETW events not yet fired.
+ void FlushCcw();
+
+ // Callback used during handle table enumeration.
+ static void HandleWalkCallback(PTR_UNCHECKED_OBJECTREF pref, LPARAM *pExtraInfo, LPARAM param1, LPARAM param2);
+
+ // Used during CCW enumeration to keep track of all object handles which point to a CCW.
+ void AddCcwHandle(Object **handle);
+
+private:
+ struct CCWEnumerationEntry
+ {
+ CCWEnumerationEntry *Next;
+ int Count;
+ Object **Handles[64];
+
+ CCWEnumerationEntry() : Next(0), Count(0)
+ {
+ }
+ };
+
+private:
+ // The maximum number of RCW/CCW events we can batch up based on the max size of an ETW event.
+ static const int kMaxRcwCount = (cbMaxEtwEvent - 0x30) / sizeof(EventRCWEntry);
+ static const int kMaxCcwCount = (cbMaxEtwEvent - 0x30) / sizeof(EventCCWEntry);
+
+ int m_currRcw; // The current number of batched (but not emitted) RCW events.
+ int m_currCcw; // The current number of batched (but not emitted) CCW events.
+
+ BulkTypeEventLogger *m_typeLogger; // Type logger to emit type data for.
+
+ EventRCWEntry *m_etwRcwData; // RCW buffer.
+ EventCCWEntry *m_etwCcwData; // CCW buffer.
+
+ CCWEnumerationEntry *m_enumResult;
+};
+
+
+// Does bulk static variable ETW logging.
+class BulkStaticsLogger
+{
+public:
+ BulkStaticsLogger(BulkTypeEventLogger *typeLogger);
+ ~BulkStaticsLogger();
+
+ // Walk all static variables in the process and write them to the buffer, firing ETW events
+ // as we reach the max buffer size.
+ void LogAllStatics();
+
+ // Force a flush of the static data, firing an ETW event for any not yet written.
+ void FireBulkStaticsEvent();
+
+private:
+ // Write a single static variable to the log.
+ void WriteEntry(AppDomain *domain, Object **address, Object *obj, FieldDesc *fieldDesc);
+
+private:
+ // The maximum bytes we can emit in the statics buffer.
+ static const int kMaxBytesValues = (cbMaxEtwEvent - 0x30);
+
+ BYTE *m_buffer; // Buffer to queue up statics in
+ int m_used; // The amount of bytes used in m_buffer.
+ int m_count; // The number of statics currently written to m_buffer.
+ AppDomain *m_domain; // The current AppDomain m_buffer contains statics for.
+ BulkTypeEventLogger *m_typeLogger; // The type logger used to emit type data as we encounter it.
+};
+
+
+
+#endif // __EVENTTRACEPRIV_H__
diff --git a/src/vm/excep.cpp b/src/vm/excep.cpp
new file mode 100644
index 0000000000..0c0f66c280
--- /dev/null
+++ b/src/vm/excep.cpp
@@ -0,0 +1,14085 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+/* EXCEP.CPP:
+ *
+ */
+
+#include "common.h"
+
+#include "frames.h"
+#include "threads.h"
+#include "excep.h"
+#include "object.h"
+#include "field.h"
+#include "dbginterface.h"
+#include "cgensys.h"
+#include "comutilnative.h"
+#include "siginfo.hpp"
+#include "gc.h"
+#include "eedbginterfaceimpl.h" //so we can clearexception in RealCOMPlusThrow
+#include "perfcounters.h"
+#include "dllimportcallback.h"
+#include "stackwalk.h" //for CrawlFrame, in SetIPFromSrcToDst
+#include "shimload.h"
+#include "eeconfig.h"
+#include "virtualcallstub.h"
+
+#ifndef FEATURE_PAL
+#include "dwreport.h"
+#endif // !FEATURE_PAL
+
+#include "eventreporter.h"
+
+#ifdef FEATURE_COMINTEROP
+#include<roerrorapi.h>
+#endif
+#ifdef WIN64EXCEPTIONS
+#include "exceptionhandling.h"
+#endif
+
+#include <errorrep.h>
+#ifndef FEATURE_PAL
+// Include definition of GenericModeBlock
+#include <msodw.h>
+#endif // FEATURE_PAL
+
+#ifdef FEATURE_UEF_CHAINMANAGER
+// This is required to register our UEF callback with the UEF chain manager
+#include <mscoruefwrapper.h>
+// The global UEFManager reference for use in the VM
+IUEFManager * g_pUEFManager = NULL;
+#endif // FEATURE_UEF_CHAINMANAGER
+
+// Support for extracting MethodDesc of a delegate.
+#include "comdelegate.h"
+
+#if defined(FEATURE_APPX_BINDER) && !defined(DACCESS_COMPILE)
+// For determining if we have a framework assembly trying to handle a corrupted state exception
+#include "policy.h"
+#endif // FEATURE_APPX && !DACCESS_COMPILE
+
+#ifndef FEATURE_PAL
+// Windows uses 64kB as the null-reference area
+#define NULL_AREA_SIZE (64 * 1024)
+#else // !FEATURE_PAL
+#define NULL_AREA_SIZE OS_PAGE_SIZE
+#endif // !FEATURE_PAL
+
+#ifndef CROSSGEN_COMPILE
+
+BOOL IsIPInEE(void *ip);
+
+//----------------------------------------------------------------------------
+//
+// IsExceptionFromManagedCode - determine if pExceptionRecord points to a managed exception
+//
+// Arguments:
+// pExceptionRecord - pointer to exception record
+//
+// Return Value:
+// TRUE or FALSE
+//
+//----------------------------------------------------------------------------
+BOOL IsExceptionFromManagedCode(const EXCEPTION_RECORD * pExceptionRecord)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ PRECONDITION(CheckPointer(pExceptionRecord));
+ } CONTRACTL_END;
+
+ if (pExceptionRecord == NULL)
+ {
+ return FALSE;
+ }
+
+ DACCOP_IGNORE(FieldAccess, "EXCEPTION_RECORD is a OS structure, and ExceptionAddress is actually a target address here.");
+ UINT_PTR address = reinterpret_cast<UINT_PTR>(pExceptionRecord->ExceptionAddress);
+
+ // An exception code of EXCEPTION_COMPLUS indicates a managed exception
+ // has occured (most likely due to executing a "throw" instruction).
+ //
+ // Also, a hardware level exception may not have an exception code of
+ // EXCEPTION_COMPLUS. In this case, an exception address that resides in
+ // managed code indicates a managed exception has occured.
+ return (IsComPlusException(pExceptionRecord) ||
+ (ExecutionManager::IsManagedCode((PCODE)address)));
+}
+
+
+#ifndef DACCESS_COMPILE
+
+//----------------------------------------------------------------------------
+//
+// IsExceptionFromManagedCodeCallback - a wrapper for IsExceptionFromManagedCode
+//
+// Arguments:
+// pExceptionRecord - pointer to exception record
+//
+// Return Value:
+// TRUE or FALSE
+//
+//----------------------------------------------------------------------------
+BOOL __stdcall IsExceptionFromManagedCodeCallback(EXCEPTION_RECORD * pExceptionRecord)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ PRECONDITION(CheckPointer(pExceptionRecord));
+ PRECONDITION(!RunningOnWin7());
+ } CONTRACTL_END;
+
+ // If we can't enter the EE, done.
+ if (g_fForbidEnterEE)
+ {
+ return FALSE;
+ }
+
+ return IsExceptionFromManagedCode(pExceptionRecord);
+}
+
+
+#define SZ_UNHANDLED_EXCEPTION W("Unhandled Exception:")
+#define SZ_UNHANDLED_EXCEPTION_CHARLEN ((sizeof(SZ_UNHANDLED_EXCEPTION) / sizeof(WCHAR)))
+
+
+typedef struct {
+ OBJECTREF pThrowable;
+ STRINGREF s1;
+ OBJECTREF pTmpThrowable;
+} ProtectArgsStruct;
+
+PEXCEPTION_REGISTRATION_RECORD GetCurrentSEHRecord();
+BOOL IsUnmanagedToManagedSEHHandler(EXCEPTION_REGISTRATION_RECORD*);
+
+VOID DECLSPEC_NORETURN RealCOMPlusThrow(OBJECTREF throwable, BOOL rethrow
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , CorruptionSeverity severity = NotCorrupting
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ );
+
+//-------------------------------------------------------------------------------
+// Basically, this asks whether the exception is a managed exception thrown by
+// this instance of the CLR.
+//
+// The way the result is used, however, is to decide whether this instance is the
+// one to throw up the Watson box.
+//-------------------------------------------------------------------------------
+BOOL ShouldOurUEFDisplayUI(PEXCEPTION_POINTERS pExceptionInfo)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ // Test first for the canned SO EXCEPTION_POINTERS structure as it has a NULL context record and will break the code below.
+ extern EXCEPTION_POINTERS g_SOExceptionPointers;
+ if (pExceptionInfo == &g_SOExceptionPointers)
+ {
+ return TRUE;
+ }
+ return IsComPlusException(pExceptionInfo->ExceptionRecord) || ExecutionManager::IsManagedCode(GetIP(pExceptionInfo->ContextRecord));
+}
+
+BOOL NotifyAppDomainsOfUnhandledException(
+ PEXCEPTION_POINTERS pExceptionPointers,
+ OBJECTREF *pThrowableIn,
+ BOOL useLastThrownObject,
+ BOOL isTerminating);
+
+VOID SetManagedUnhandledExceptionBit(
+ BOOL useLastThrownObject);
+
+
+void COMPlusThrowBoot(HRESULT hr)
+{
+ STATIC_CONTRACT_THROWS;
+
+ _ASSERTE(g_fEEShutDown >= ShutDown_Finalize2 || !"This should not be called unless we are in the last phase of shutdown!");
+ ULONG_PTR arg = hr;
+ RaiseException(BOOTUP_EXCEPTION_COMPLUS, EXCEPTION_NONCONTINUABLE, 1, &arg);
+}
+
+
+//-------------------------------------------------------------------------------
+// This simply tests to see if the exception object is a subclass of
+// the descriminating class specified in the exception clause.
+//-------------------------------------------------------------------------------
+BOOL ExceptionIsOfRightType(TypeHandle clauseType, TypeHandle thrownType)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ // if not resolved to, then it wasn't loaded and couldn't have been thrown
+ if (clauseType.IsNull())
+ return FALSE;
+
+ if (clauseType == thrownType)
+ return TRUE;
+
+ // now look for parent match
+ TypeHandle superType = thrownType;
+ while (!superType.IsNull()) {
+ if (superType == clauseType) {
+ break;
+ }
+ superType = superType.GetParent();
+ }
+
+ return !superType.IsNull();
+}
+
+//===========================================================================
+// Gets the message text from an exception
+//===========================================================================
+ULONG GetExceptionMessage(OBJECTREF throwable,
+ __inout_ecount(bufferLength) LPWSTR buffer,
+ ULONG bufferLength)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(ThrowOutOfMemory());
+ }
+ CONTRACTL_END;
+
+ // Prefast buffer sanity check. Don't call the API with a zero length buffer.
+ if (bufferLength == 0)
+ {
+ _ASSERTE(bufferLength > 0);
+ return 0;
+ }
+
+ StackSString result;
+ GetExceptionMessage(throwable, result);
+
+ ULONG length = result.GetCount();
+ LPCWSTR chars = result.GetUnicode();
+
+ if (length < bufferLength)
+ {
+ wcsncpy_s(buffer, bufferLength, chars, length);
+ }
+ else
+ {
+ wcsncpy_s(buffer, bufferLength, chars, bufferLength-1);
+ }
+
+ return length;
+}
+
+//-----------------------------------------------------------------------------
+// Given an object, get the "message" from it. If the object is an Exception
+// call Exception.InternalToString, otherwise, call Object.ToString
+//-----------------------------------------------------------------------------
+void GetExceptionMessage(OBJECTREF throwable, SString &result)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(ThrowOutOfMemory());
+ }
+ CONTRACTL_END;
+
+ STRINGREF pString = GetExceptionMessage(throwable);
+
+ // If call returned NULL (not empty), oh well, no message.
+ if (pString != NULL)
+ pString->GetSString(result);
+} // void GetExceptionMessage()
+
+#if FEATURE_COMINTEROP
+// This method returns IRestrictedErrorInfo associated with the ErrorObject.
+// It checks whether the given managed exception object has __HasRestrictedLanguageErrorObject set
+// in which case it returns the IRestrictedErrorInfo associated with the __RestrictedErrorObject.
+IRestrictedErrorInfo* GetRestrictedErrorInfoFromErrorObject(OBJECTREF throwable)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(ThrowOutOfMemory());
+ }
+ CONTRACTL_END;
+
+ IRestrictedErrorInfo* pRestrictedErrorInfo = NULL;
+
+ // If there is no object, there is no restricted error.
+ if (throwable == NULL)
+ return NULL;
+
+ _ASSERTE(IsException(throwable->GetMethodTable())); // what is the pathway here?
+ if (!IsException(throwable->GetMethodTable()))
+ {
+ return NULL;
+ }
+
+ struct _gc {
+ OBJECTREF Throwable;
+ OBJECTREF RestrictedErrorInfoObjRef;
+ } gc;
+
+ ZeroMemory(&gc, sizeof(gc));
+ GCPROTECT_BEGIN(gc);
+
+ gc.Throwable = throwable;
+
+ // Get the MethodDesc on which we'll call.
+ MethodDescCallSite getRestrictedLanguageErrorObject(METHOD__EXCEPTION__TRY_GET_RESTRICTED_LANGUAGE_ERROR_OBJECT, &gc.Throwable);
+
+ // Make the call.
+ ARG_SLOT Args[] =
+ {
+ ObjToArgSlot(gc.Throwable),
+ PtrToArgSlot(&gc.RestrictedErrorInfoObjRef)
+ };
+
+ BOOL bHasLanguageRestrictedErrorObject = (BOOL)getRestrictedLanguageErrorObject.Call_RetBool(Args);
+
+ if(bHasLanguageRestrictedErrorObject)
+ {
+ // The __RestrictedErrorObject represents IRestrictedErrorInfo RCW of a non-CLR platform. Lets get the corresponding IRestrictedErrorInfo for it.
+ pRestrictedErrorInfo = (IRestrictedErrorInfo *)GetComIPFromObjectRef(&gc.RestrictedErrorInfoObjRef, IID_IRestrictedErrorInfo);
+ }
+
+ GCPROTECT_END();
+
+ return pRestrictedErrorInfo;
+}
+#endif
+
+STRINGREF GetExceptionMessage(OBJECTREF throwable)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(ThrowOutOfMemory());
+ }
+ CONTRACTL_END;
+
+ // If there is no object, there is no message.
+ if (throwable == NULL)
+ return NULL;
+
+ // Assume we're calling Exception.InternalToString() ...
+ BinderMethodID sigID = METHOD__EXCEPTION__INTERNAL_TO_STRING;
+
+ // ... but if it isn't an exception, call Object.ToString().
+ _ASSERTE(IsException(throwable->GetMethodTable())); // what is the pathway here?
+ if (!IsException(throwable->GetMethodTable()))
+ {
+ sigID = METHOD__OBJECT__TO_STRING;
+ }
+
+ // Return value.
+ STRINGREF pString = NULL;
+
+ GCPROTECT_BEGIN(throwable);
+
+ // Get the MethodDesc on which we'll call.
+ MethodDescCallSite toString(sigID, &throwable);
+
+ // Make the call.
+ ARG_SLOT arg[1] = {ObjToArgSlot(throwable)};
+ pString = toString.Call_RetSTRINGREF(arg);
+
+ GCPROTECT_END();
+
+ return pString;
+}
+
+HRESULT GetExceptionHResult(OBJECTREF throwable)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_FAIL;
+ if (throwable == NULL)
+ return hr;
+
+ // Since any object can be thrown in managed code, not only instances of System.Exception subclasses
+ // we need to check to see if we are dealing with an exception before attempting to retrieve
+ // the HRESULT field. If we are not dealing with an exception, then we will simply return E_FAIL.
+ _ASSERTE(IsException(throwable->GetMethodTable())); // what is the pathway here?
+ if (IsException(throwable->GetMethodTable()))
+ {
+ hr = ((EXCEPTIONREF)throwable)->GetHResult();
+ }
+
+ return hr;
+} // HRESULT GetExceptionHResult()
+
+DWORD GetExceptionXCode(OBJECTREF throwable)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_FAIL;
+ if (throwable == NULL)
+ return hr;
+
+ // Since any object can be thrown in managed code, not only instances of System.Exception subclasses
+ // we need to check to see if we are dealing with an exception before attempting to retrieve
+ // the HRESULT field. If we are not dealing with an exception, then we will simply return E_FAIL.
+ _ASSERTE(IsException(throwable->GetMethodTable())); // what is the pathway here?
+ if (IsException(throwable->GetMethodTable()))
+ {
+ hr = ((EXCEPTIONREF)throwable)->GetXCode();
+ }
+
+ return hr;
+} // DWORD GetExceptionXCode()
+
+//------------------------------------------------------------------------------
+// This function will extract some information from an Access Violation SEH
+// exception, and store it in the System.AccessViolationException object.
+// - the faulting instruction's IP.
+// - the target address of the faulting instruction.
+// - a code indicating attempted read vs write
+//------------------------------------------------------------------------------
+void SetExceptionAVParameters( // No return.
+ OBJECTREF throwable, // The object into which to set the values.
+ EXCEPTION_RECORD *pExceptionRecord) // The SEH exception information.
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(throwable != NULL);
+ }
+ CONTRACTL_END;
+
+ GCPROTECT_BEGIN(throwable)
+ {
+ // This should only be called for AccessViolationException
+ _ASSERTE(MscorlibBinder::GetException(kAccessViolationException) == throwable->GetMethodTable());
+
+ FieldDesc *pFD_ip = MscorlibBinder::GetField(FIELD__ACCESS_VIOLATION_EXCEPTION__IP);
+ FieldDesc *pFD_target = MscorlibBinder::GetField(FIELD__ACCESS_VIOLATION_EXCEPTION__TARGET);
+ FieldDesc *pFD_access = MscorlibBinder::GetField(FIELD__ACCESS_VIOLATION_EXCEPTION__ACCESSTYPE);
+
+ _ASSERTE(pFD_ip->GetFieldType() == ELEMENT_TYPE_I);
+ _ASSERTE(pFD_target->GetFieldType() == ELEMENT_TYPE_I);
+ _ASSERTE(pFD_access->GetFieldType() == ELEMENT_TYPE_I4);
+
+ void *ip = pExceptionRecord->ExceptionAddress;
+ void *target = (void*)(pExceptionRecord->ExceptionInformation[1]);
+ DWORD access = (DWORD)(pExceptionRecord->ExceptionInformation[0]);
+
+ pFD_ip->SetValuePtr(throwable, ip);
+ pFD_target->SetValuePtr(throwable, target);
+ pFD_access->SetValue32(throwable, access);
+
+ }
+ GCPROTECT_END();
+
+} // void SetExceptionAVParameters()
+
+//------------------------------------------------------------------------------
+// This will call InternalPreserveStackTrace (if the throwable derives from
+// System.Exception), to copy the stack trace to the _remoteStackTraceString.
+// Doing so allows the stack trace of an exception caught by the runtime, and
+// rethrown with COMPlusThrow(OBJECTREF thowable), to be preserved. Otherwise
+// the exception handling code may clear the stack trace. (Generally, we see
+// the stack trace preserved on win32 and cleared on win64.)
+//------------------------------------------------------------------------------
+void ExceptionPreserveStackTrace( // No return.
+ OBJECTREF throwable) // Object about to be thrown.
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(ThrowOutOfMemory());
+ }
+ CONTRACTL_END;
+
+ // If there is no object, there is no stack trace to save.
+ if (throwable == NULL)
+ return;
+
+ GCPROTECT_BEGIN(throwable);
+
+ // Make sure it is derived from System.Exception, that it is not one of the
+ // preallocated exception objects, and that it has a stack trace to save.
+ if (IsException(throwable->GetMethodTable()) &&
+ !CLRException::IsPreallocatedExceptionObject(throwable))
+ {
+ LOG((LF_EH, LL_INFO1000, "ExceptionPreserveStackTrace called\n"));
+
+ // We're calling Exception.InternalPreserveStackTrace() ...
+ BinderMethodID sigID = METHOD__EXCEPTION__INTERNAL_PRESERVE_STACK_TRACE;
+
+
+ // Get the MethodDesc on which we'll call.
+ MethodDescCallSite preserveStackTrace(sigID, &throwable);
+
+ // Make the call.
+ ARG_SLOT arg[1] = {ObjToArgSlot(throwable)};
+ preserveStackTrace.Call(arg);
+ }
+
+ GCPROTECT_END();
+
+} // void ExceptionPreserveStackTrace()
+
+
+// We have to cache the MethodTable and FieldDesc for wrapped non-compliant exceptions the first
+// time we wrap, because we cannot tolerate a GC when it comes time to detect and unwrap one.
+
+static MethodTable *pMT_RuntimeWrappedException;
+static FieldDesc *pFD_WrappedException;
+
+// Non-compliant exceptions are immediately wrapped in a RuntimeWrappedException instance. The entire
+// exception system can now ignore the possibility of these cases except:
+//
+// 1) IL_Throw, which must wrap via this API
+// 2) Calls to Filters & Catch handlers, which must unwrap based on whether the assembly is on the legacy
+// plan.
+//
+void WrapNonCompliantException(OBJECTREF *ppThrowable)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(IsProtectedByGCFrame(ppThrowable));
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!IsException((*ppThrowable)->GetMethodTable()));
+
+ EX_TRY
+ {
+ // idempotent operations, so the race condition is okay.
+ if (pMT_RuntimeWrappedException == NULL)
+ pMT_RuntimeWrappedException = MscorlibBinder::GetException(kRuntimeWrappedException);
+
+ if (pFD_WrappedException == NULL)
+ pFD_WrappedException = MscorlibBinder::GetField(FIELD__RUNTIME_WRAPPED_EXCEPTION__WRAPPED_EXCEPTION);
+
+ OBJECTREF orWrapper = AllocateObject(MscorlibBinder::GetException(kRuntimeWrappedException));
+
+ GCPROTECT_BEGIN(orWrapper);
+
+ MethodDescCallSite ctor(METHOD__RUNTIME_WRAPPED_EXCEPTION__OBJ_CTOR, &orWrapper);
+
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(orWrapper),
+ ObjToArgSlot(*ppThrowable)
+ };
+
+ ctor.Call(args);
+
+ *ppThrowable = orWrapper;
+
+ GCPROTECT_END();
+ }
+ EX_CATCH
+ {
+ // If we took an exception while binding, or running the constructor of the RuntimeWrappedException
+ // instance, we know that this new exception is CLS compliant. In fact, it's likely to be
+ // OutOfMemoryException, StackOverflowException or ThreadAbortException.
+ OBJECTREF orReplacement = GET_THROWABLE();
+
+ _ASSERTE(IsException(orReplacement->GetMethodTable()));
+
+ *ppThrowable = orReplacement;
+
+ } EX_END_CATCH(SwallowAllExceptions);
+}
+
+// Before presenting an exception object to a handler (filter or catch, not finally or fault), it
+// may be necessary to turn it back into a non-compliant exception. This is conditioned on an
+// assembly level setting.
+OBJECTREF PossiblyUnwrapThrowable(OBJECTREF throwable, Assembly *pAssembly)
+{
+ // Check if we are required to compute the RuntimeWrapExceptions status.
+ BOOL fIsRuntimeWrappedException = ((throwable != NULL) && (throwable->GetMethodTable() == pMT_RuntimeWrappedException));
+ BOOL fRequiresComputingRuntimeWrapExceptionsStatus = (fIsRuntimeWrappedException &&
+ (!(pAssembly->GetManifestModule()->IsRuntimeWrapExceptionsStatusComputed())));
+
+ CONTRACTL
+ {
+ THROWS;
+ // If we are required to compute the status of RuntimeWrapExceptions, then the operation could trigger a GC.
+ // Thus, conditionally setup the contract.
+ if (fRequiresComputingRuntimeWrapExceptionsStatus) GC_TRIGGERS; else GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pAssembly));
+ }
+ CONTRACTL_END;
+
+ if (fIsRuntimeWrappedException && (!pAssembly->GetManifestModule()->IsRuntimeWrapExceptions()))
+ {
+ // We already created the instance, fetched the field. We know it is
+ // not marshal by ref, or any of the other cases that might trigger GC.
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+ throwable = pFD_WrappedException->GetRefValue(throwable);
+ }
+
+ return throwable;
+}
+
+
+// This is used by a holder in CreateTypeInitializationExceptionObject to
+// reset the state as appropriate.
+void ResetTypeInitializationExceptionState(BOOL isAlreadyCreating)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (!isAlreadyCreating)
+ GetThread()->ResetIsCreatingTypeInitException();
+}
+
+void CreateTypeInitializationExceptionObject(LPCWSTR pTypeThatFailed,
+ OBJECTREF *pInnerException,
+ OBJECTREF *pInitException,
+ OBJECTREF *pThrowable)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pInnerException, NULL_OK));
+ PRECONDITION(CheckPointer(pInitException));
+ PRECONDITION(CheckPointer(pThrowable));
+ PRECONDITION(IsProtectedByGCFrame(pInnerException));
+ PRECONDITION(IsProtectedByGCFrame(pInitException));
+ PRECONDITION(IsProtectedByGCFrame(pThrowable));
+ PRECONDITION(CheckPointer(GetThread()));
+ } CONTRACTL_END;
+
+ Thread *pThread = GetThread();
+ *pThrowable = NULL;
+
+ // This will make sure to put the thread back to its original state if something
+ // throws out of this function (like an OOM exception or something)
+ Holder< BOOL, DoNothing< BOOL >, ResetTypeInitializationExceptionState, FALSE, NoNull< BOOL > >
+ isAlreadyCreating(pThread->IsCreatingTypeInitException());
+
+ EX_TRY {
+ // This will contain the type of exception we want to create. Read comment below
+ // on why we'd want to create an exception other than TypeInitException
+ MethodTable *pMT;
+ BinderMethodID methodID;
+
+ // If we are already in the midst of creating a TypeInitializationException object,
+ // and we get here, it means there was an exception thrown while initializing the
+ // TypeInitializationException type itself, or one of the types used by its class
+ // constructor. In this case, we're going to back down and use a SystemException
+ // object in its place. It is *KNOWN* that both these exception types have identical
+ // .ctor sigs "void instance (string, exception)" so both can be used interchangeably
+ // in the code that follows.
+ if (!isAlreadyCreating.GetValue()) {
+ pThread->SetIsCreatingTypeInitException();
+ pMT = MscorlibBinder::GetException(kTypeInitializationException);
+ methodID = METHOD__TYPE_INIT_EXCEPTION__STR_EX_CTOR;
+ }
+ else {
+ // If we ever hit one of these asserts, then it is bad
+ // because we do not know what exception to return then.
+ _ASSERTE(pInnerException != NULL);
+ _ASSERTE(*pInnerException != NULL);
+ *pThrowable = *pInnerException;
+ *pInitException = *pInnerException;
+ goto ErrExit;
+ }
+
+ // Allocate the exception object
+ *pThrowable = AllocateObject(pMT);
+
+ MethodDescCallSite ctor(methodID, pThrowable);
+
+ // Since the inner exception object in the .ctor is of type Exception, make sure
+ // that the object we're passed in derives from Exception. If not, pass NULL.
+ BOOL isException = FALSE;
+ if (pInnerException != NULL)
+ isException = IsException((*pInnerException)->GetMethodTable());
+
+ _ASSERTE(isException); // What pathway can give us non-compliant exceptions?
+
+ STRINGREF sType = StringObject::NewString(pTypeThatFailed);
+
+ // If the inner object derives from exception, set it as the third argument.
+ ARG_SLOT args[] = { ObjToArgSlot(*pThrowable),
+ ObjToArgSlot(sType),
+ ObjToArgSlot(isException ? *pInnerException : NULL) };
+
+ // Call the .ctor
+ ctor.Call(args);
+
+ // On success, set the init exception.
+ *pInitException = *pThrowable;
+ }
+ EX_CATCH {
+ // If calling the constructor fails, then we'll call ourselves again, and this time
+ // through we will try and create an EEException object. If that fails, then the
+ // else block of this will be executed.
+ if (!isAlreadyCreating.GetValue()) {
+ CreateTypeInitializationExceptionObject(pTypeThatFailed, pInnerException, pInitException, pThrowable);
+ }
+
+ // If we were already in the middle of creating a type init
+ // exception when we were called, we would have tried to create an EEException instead
+ // of a TypeInitException.
+ else {
+ // If we're recursing, then we should be calling ourselves from DoRunClassInitThrowing,
+ // in which case we're guaranteed that we're passing in all three arguments.
+ *pInitException = pInnerException ? *pInnerException : NULL;
+ *pThrowable = GET_THROWABLE();
+ }
+ } EX_END_CATCH(SwallowAllExceptions);
+
+ CONSISTENCY_CHECK(*pInitException != NULL || !pInnerException);
+
+ ErrExit:
+ ;
+}
+
+// ==========================================================================
+// ComputeEnclosingHandlerNestingLevel
+//
+// This is code factored out of COMPlusThrowCallback to figure out
+// what the number of nested exception handlers is.
+// ==========================================================================
+DWORD ComputeEnclosingHandlerNestingLevel(IJitManager *pIJM,
+ const METHODTOKEN& mdTok,
+ SIZE_T offsNat)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ // Determine the nesting level of EHClause. Just walk the table
+ // again, and find out how many handlers enclose it
+ DWORD nestingLevel = 0;
+ EH_CLAUSE_ENUMERATOR pEnumState;
+ unsigned EHCount = pIJM->InitializeEHEnumeration(mdTok, &pEnumState);
+
+ for (unsigned j=0; j<EHCount; j++)
+ {
+ EE_ILEXCEPTION_CLAUSE EHClause;
+
+ pIJM->GetNextEHClause(&pEnumState,&EHClause);
+ _ASSERTE(EHClause.HandlerEndPC != (DWORD) -1); // <TODO> remove, only protects against a deprecated convention</TODO>
+
+ if ((offsNat > EHClause.HandlerStartPC) &&
+ (offsNat < EHClause.HandlerEndPC))
+ {
+ nestingLevel++;
+ }
+ }
+
+ return nestingLevel;
+}
+
+// ******************************* EHRangeTreeNode ************************** //
+EHRangeTreeNode::EHRangeTreeNode(void)
+{
+ WRAPPER_NO_CONTRACT;
+ CommonCtor(0, false);
+}
+
+EHRangeTreeNode::EHRangeTreeNode(DWORD offset, bool fIsRange /* = false */)
+{
+ WRAPPER_NO_CONTRACT;
+ CommonCtor(offset, fIsRange);
+}
+
+void EHRangeTreeNode::CommonCtor(DWORD offset, bool fIsRange)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_pTree = NULL;
+ m_clause = NULL;
+
+ m_pContainedBy = NULL;
+
+ m_offset = offset;
+ m_fIsRange = fIsRange;
+ m_fIsRoot = false; // must set this flag explicitly
+}
+
+inline bool EHRangeTreeNode::IsRange()
+{
+ // Please see the header file for an explanation of this assertion.
+ _ASSERTE(m_fIsRoot || m_clause != NULL || !m_fIsRange);
+ return m_fIsRange;
+}
+
+void EHRangeTreeNode::MarkAsRange()
+{
+ m_offset = 0;
+ m_fIsRange = true;
+ m_fIsRoot = false;
+}
+
+inline bool EHRangeTreeNode::IsRoot()
+{
+ // Please see the header file for an explanation of this assertion.
+ _ASSERTE(m_fIsRoot || m_clause != NULL || !m_fIsRange);
+ return m_fIsRoot;
+}
+
+void EHRangeTreeNode::MarkAsRoot(DWORD offset)
+{
+ m_offset = offset;
+ m_fIsRange = true;
+ m_fIsRoot = true;
+}
+
+inline DWORD EHRangeTreeNode::GetOffset()
+{
+ _ASSERTE(m_clause == NULL);
+ _ASSERTE(IsRoot() || !IsRange());
+ return m_offset;
+}
+
+inline DWORD EHRangeTreeNode::GetTryStart()
+{
+ _ASSERTE(IsRange());
+ _ASSERTE(!IsRoot());
+ if (IsRoot())
+ {
+ return 0;
+ }
+ else
+ {
+ return m_clause->TryStartPC;
+ }
+}
+
+inline DWORD EHRangeTreeNode::GetTryEnd()
+{
+ _ASSERTE(IsRange());
+ _ASSERTE(!IsRoot());
+ if (IsRoot())
+ {
+ return GetOffset();
+ }
+ else
+ {
+ return m_clause->TryEndPC;
+ }
+}
+
+inline DWORD EHRangeTreeNode::GetHandlerStart()
+{
+ _ASSERTE(IsRange());
+ _ASSERTE(!IsRoot());
+ if (IsRoot())
+ {
+ return 0;
+ }
+ else
+ {
+ return m_clause->HandlerStartPC;
+ }
+}
+
+inline DWORD EHRangeTreeNode::GetHandlerEnd()
+{
+ _ASSERTE(IsRange());
+ _ASSERTE(!IsRoot());
+ if (IsRoot())
+ {
+ return GetOffset();
+ }
+ else
+ {
+ return m_clause->HandlerEndPC;
+ }
+}
+
+inline DWORD EHRangeTreeNode::GetFilterStart()
+{
+ _ASSERTE(IsRange());
+ _ASSERTE(!IsRoot());
+ if (IsRoot())
+ {
+ return 0;
+ }
+ else
+ {
+ return m_clause->FilterOffset;
+ }
+}
+
+// Get the end offset of the filter clause. This offset is exclusive.
+inline DWORD EHRangeTreeNode::GetFilterEnd()
+{
+ _ASSERTE(IsRange());
+ _ASSERTE(!IsRoot());
+ if (IsRoot())
+ {
+ // We should never get here if the "this" node is the root.
+ // By definition, the root contains everything. No checking is necessary.
+ return 0;
+ }
+ else
+ {
+ return m_FilterEndPC;
+ }
+}
+
+bool EHRangeTreeNode::Contains(DWORD offset)
+{
+ WRAPPER_NO_CONTRACT;
+
+ EHRangeTreeNode node(offset);
+ return Contains(&node);
+}
+
+bool EHRangeTreeNode::TryContains(DWORD offset)
+{
+ WRAPPER_NO_CONTRACT;
+
+ EHRangeTreeNode node(offset);
+ return TryContains(&node);
+}
+
+bool EHRangeTreeNode::HandlerContains(DWORD offset)
+{
+ WRAPPER_NO_CONTRACT;
+
+ EHRangeTreeNode node(offset);
+ return HandlerContains(&node);
+}
+
+bool EHRangeTreeNode::FilterContains(DWORD offset)
+{
+ WRAPPER_NO_CONTRACT;
+
+ EHRangeTreeNode node(offset);
+ return FilterContains(&node);
+}
+
+bool EHRangeTreeNode::Contains(EHRangeTreeNode* pNode)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // If we are checking a range of address, then we should check the end address inclusively.
+ if (pNode->IsRoot())
+ {
+ // No node contains the root node.
+ return false;
+ }
+ else if (this->IsRoot())
+ {
+ return (pNode->IsRange() ?
+ (pNode->GetTryEnd() <= this->GetOffset()) && (pNode->GetHandlerEnd() <= this->GetOffset())
+ : (pNode->GetOffset() < this->GetOffset()) );
+ }
+ else
+ {
+ return (this->TryContains(pNode) || this->HandlerContains(pNode) || this->FilterContains(pNode));
+ }
+}
+
+bool EHRangeTreeNode::TryContains(EHRangeTreeNode* pNode)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(this->IsRange());
+
+ if (pNode->IsRoot())
+ {
+ // No node contains the root node.
+ return false;
+ }
+ else if (this->IsRoot())
+ {
+ // We will only get here from GetTcf() to determine if an address is in a try clause.
+ // In this case we want to return false.
+ return false;
+ }
+ else
+ {
+ DWORD tryStart = this->GetTryStart();
+ DWORD tryEnd = this->GetTryEnd();
+
+ // If we are checking a range of address, then we should check the end address inclusively.
+ if (pNode->IsRange())
+ {
+ DWORD start = pNode->GetTryStart();
+ DWORD end = pNode->GetTryEnd();
+
+ if (start == tryStart && end == tryEnd)
+ {
+ return false;
+ }
+ else if (start == end)
+ {
+ // This is effectively a single offset.
+ if ((tryStart <= start) && (end < tryEnd))
+ {
+ return true;
+ }
+ }
+ else if ((tryStart <= start) && (end <= tryEnd))
+ {
+ return true;
+ }
+ }
+ else
+ {
+ DWORD offset = pNode->GetOffset();
+ if ((tryStart <= offset) && (offset < tryEnd))
+ {
+ return true;
+ }
+ }
+ }
+
+#ifdef WIN64EXCEPTIONS
+ // If we are boot-strapping the tree, don't recurse down because the result could be unreliable. Note that
+ // even if we don't recurse, given a particular node, we can still always find its most specific container with
+ // the logic above, i.e. it's always safe to do one depth level of checking.
+ //
+ // To build the tree, all we need to know is the most specific container of a particular node. This can be
+ // done by just comparing the offsets of the try regions. However, funclets create a problem because even if
+ // a funclet is conceptually contained in a try region, we cannot determine this fact just by comparing the offsets.
+ // This is when we need to recurse the tree. Here is a classic example:
+ // try
+ // {
+ // try
+ // {
+ // }
+ // catch
+ // {
+ // // If the offset is here, then we need to recurse.
+ // }
+ // }
+ // catch
+ // {
+ // }
+ if (!m_pTree->m_fInitializing)
+ {
+ // Iterate all the contained clauses, and for the ones which are contained in the try region,
+ // ask if the requested range is contained by it.
+ USHORT i = 0;
+ USHORT numNodes = m_containees.Count();
+ EHRangeTreeNode** ppNodes = NULL;
+ for (i = 0, ppNodes = m_containees.Table(); i < numNodes; i++, ppNodes++)
+ {
+ // This variable is purely used for readability.
+ EHRangeTreeNode* pNodeCur = *ppNodes;
+
+ // it's possible for nested try blocks to have the same beginning and end offsets
+ if ( ( this->GetTryStart() <= pNodeCur->GetTryStart() ) &&
+ ( pNodeCur->GetTryEnd() <= this->GetTryEnd() ) )
+ {
+ if (pNodeCur->Contains(pNode))
+ {
+ return true;
+ }
+ }
+ }
+ }
+#endif // WIN64EXCEPTIONS
+
+ return false;
+}
+
+bool EHRangeTreeNode::HandlerContains(EHRangeTreeNode* pNode)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(this->IsRange());
+
+ if (pNode->IsRoot())
+ {
+ // No node contains the root node.
+ return false;
+ }
+ else if (this->IsRoot())
+ {
+ // We will only get here from GetTcf() to determine if an address is in a try clause.
+ // In this case we want to return false.
+ return false;
+ }
+ else
+ {
+ DWORD handlerStart = this->GetHandlerStart();
+ DWORD handlerEnd = this->GetHandlerEnd();
+
+ // If we are checking a range of address, then we should check the end address inclusively.
+ if (pNode->IsRange())
+ {
+ DWORD start = pNode->GetTryStart();
+ DWORD end = pNode->GetTryEnd();
+
+ if (start == handlerStart && end == handlerEnd)
+ {
+ return false;
+ }
+ else if ((handlerStart <= start) && (end <= handlerEnd))
+ {
+ return true;
+ }
+ }
+ else
+ {
+ DWORD offset = pNode->GetOffset();
+ if ((handlerStart <= offset) && (offset < handlerEnd))
+ {
+ return true;
+ }
+ }
+ }
+
+#ifdef WIN64EXCEPTIONS
+ // Refer to the comment in TryContains().
+ if (!m_pTree->m_fInitializing)
+ {
+ // Iterate all the contained clauses, and for the ones which are contained in the try region,
+ // ask if the requested range is contained by it.
+ USHORT i = 0;
+ USHORT numNodes = m_containees.Count();
+ EHRangeTreeNode** ppNodes = NULL;
+ for (i = 0, ppNodes = m_containees.Table(); i < numNodes; i++, ppNodes++)
+ {
+ // This variable is purely used for readability.
+ EHRangeTreeNode* pNodeCur = *ppNodes;
+
+ if ( ( this->GetHandlerStart() <= pNodeCur->GetTryStart() ) &&
+ ( pNodeCur->GetTryEnd() < this->GetHandlerEnd() ) )
+ {
+ if (pNodeCur->Contains(pNode))
+ {
+ return true;
+ }
+ }
+ }
+ }
+#endif // WIN64EXCEPTIONS
+
+ return false;
+}
+
+bool EHRangeTreeNode::FilterContains(EHRangeTreeNode* pNode)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(this->IsRange());
+
+ if (pNode->IsRoot())
+ {
+ // No node contains the root node.
+ return false;
+ }
+ else if (this->IsRoot() || !IsFilterHandler(this->m_clause))
+ {
+ // We will only get here from GetTcf() to determine if an address is in a try clause.
+ // In this case we want to return false.
+ return false;
+ }
+ else
+ {
+ DWORD filterStart = this->GetFilterStart();
+ DWORD filterEnd = this->GetFilterEnd();
+
+ // If we are checking a range of address, then we should check the end address inclusively.
+ if (pNode->IsRange())
+ {
+ DWORD start = pNode->GetTryStart();
+ DWORD end = pNode->GetTryEnd();
+
+ if (start == filterStart && end == filterEnd)
+ {
+ return false;
+ }
+ else if ((filterStart <= start) && (end <= filterEnd))
+ {
+ return true;
+ }
+ }
+ else
+ {
+ DWORD offset = pNode->GetOffset();
+ if ((filterStart <= offset) && (offset < filterEnd))
+ {
+ return true;
+ }
+ }
+ }
+
+#ifdef WIN64EXCEPTIONS
+ // Refer to the comment in TryContains().
+ if (!m_pTree->m_fInitializing)
+ {
+ // Iterate all the contained clauses, and for the ones which are contained in the try region,
+ // ask if the requested range is contained by it.
+ USHORT i = 0;
+ USHORT numNodes = m_containees.Count();
+ EHRangeTreeNode** ppNodes = NULL;
+ for (i = 0, ppNodes = m_containees.Table(); i < numNodes; i++, ppNodes++)
+ {
+ // This variable is purely used for readability.
+ EHRangeTreeNode* pNodeCur = *ppNodes;
+
+ if ( ( this->GetFilterStart() <= pNodeCur->GetTryStart() ) &&
+ ( pNodeCur->GetTryEnd() < this->GetFilterEnd() ) )
+ {
+ if (pNodeCur->Contains(pNode))
+ {
+ return true;
+ }
+ }
+ }
+ }
+#endif // WIN64EXCEPTIONS
+
+ return false;
+}
+
+EHRangeTreeNode* EHRangeTreeNode::GetContainer()
+{
+ return m_pContainedBy;
+}
+
+HRESULT EHRangeTreeNode::AddNode(EHRangeTreeNode *pNode)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ PRECONDITION(pNode != NULL);
+ }
+ CONTRACTL_END;
+
+ EHRangeTreeNode **ppEH = m_containees.Append();
+
+ if (ppEH == NULL)
+ return E_OUTOFMEMORY;
+
+ (*ppEH) = pNode;
+ return S_OK;
+}
+
+// ******************************* EHRangeTree ************************** //
+
+EHRangeTree::EHRangeTree(IJitManager* pIJM,
+ const METHODTOKEN& methodToken,
+ DWORD methodSize,
+ int cFunclet,
+ const DWORD * rgFunclet)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ LOG((LF_CORDB, LL_INFO10000, "EHRT::ERHT: already loaded!\n"));
+
+ EH_CLAUSE_ENUMERATOR pEnumState;
+ m_EHCount = pIJM->InitializeEHEnumeration(methodToken, &pEnumState);
+
+ _ASSERTE(m_EHCount != 0xFFFFFFFF);
+
+ ULONG i = 0;
+
+ m_rgClauses = NULL;
+ m_rgNodes = NULL;
+ m_root = NULL;
+ m_hrInit = S_OK;
+ m_fInitializing = true;
+
+ if (m_EHCount > 0)
+ {
+ m_rgClauses = new (nothrow) EE_ILEXCEPTION_CLAUSE[m_EHCount];
+ if (m_rgClauses == NULL)
+ {
+ m_hrInit = E_OUTOFMEMORY;
+ goto LError;
+ }
+ }
+
+ LOG((LF_CORDB, LL_INFO10000, "EHRT::CC: m_ehcount:0x%x, m_rgClauses:0%x\n",
+ m_EHCount, m_rgClauses));
+
+ m_rgNodes = new (nothrow) EHRangeTreeNode[m_EHCount+1];
+ if (m_rgNodes == NULL)
+ {
+ m_hrInit = E_OUTOFMEMORY;
+ goto LError;
+ }
+
+ //this contains everything, even stuff on the last IP
+ m_root = &(m_rgNodes[m_EHCount]);
+ m_root->MarkAsRoot(methodSize + 1);
+
+ LOG((LF_CORDB, LL_INFO10000, "EHRT::CC: rgNodes:0x%x\n", m_rgNodes));
+
+ if (m_EHCount ==0)
+ {
+ LOG((LF_CORDB, LL_INFO10000, "EHRT::CC: About to leave!\n"));
+ goto LSuccess;
+ }
+
+ LOG((LF_CORDB, LL_INFO10000, "EHRT::CC: Sticking around!\n"));
+
+ // First, load all the EH clauses into the object.
+ for (i = 0; i < m_EHCount; i++)
+ {
+ EE_ILEXCEPTION_CLAUSE * pEHClause = &(m_rgClauses[i]);
+
+ LOG((LF_CORDB, LL_INFO10000, "EHRT::CC: i:0x%x!\n", i));
+
+ pIJM->GetNextEHClause(&pEnumState, pEHClause);
+
+ LOG((LF_CORDB, LL_INFO10000, "EHRT::CC: EHRTT_JIT_MANAGER got clause\n", i));
+
+ LOG((LF_CORDB, LL_INFO10000, "EHRT::CC: clause 0x%x,"
+ "addrof:0x%x\n", i, pEHClause ));
+
+ _ASSERTE(pEHClause->HandlerEndPC != (DWORD) -1); // <TODO> remove, only protects against a deprecated convention</TODO>
+
+ EHRangeTreeNode * pNodeCur = &(m_rgNodes[i]);
+
+ pNodeCur->m_pTree = this;
+ pNodeCur->m_clause = pEHClause;
+
+ if (pEHClause->Flags == COR_ILEXCEPTION_CLAUSE_FILTER)
+ {
+#ifdef WIN64EXCEPTIONS
+ // Because of funclets, there is no way to guarantee the placement of a filter.
+ // Thus, we need to loop through the funclets to find the end offset.
+ for (int f = 0; f < cFunclet; f++)
+ {
+ // Check the start offset of the filter funclet.
+ if (pEHClause->FilterOffset == rgFunclet[f])
+ {
+ if (f < (cFunclet - 1))
+ {
+ // If it's NOT the last funclet, use the start offset of the next funclet.
+ pNodeCur->m_FilterEndPC = rgFunclet[f + 1];
+ }
+ else
+ {
+ // If it's the last funclet, use the size of the method.
+ pNodeCur->m_FilterEndPC = methodSize;
+ }
+ break;
+ }
+ }
+#else // WIN64EXCEPTIONS
+ // On x86, since the filter doesn't have an end FilterPC, the only way we can know the size
+ // of the filter is if it's located immediately prior to it's handler and immediately after
+ // its try region. We assume that this is, and if it isn't, we're so amazingly hosed that
+ // we can't continue.
+ if ((pEHClause->FilterOffset >= pEHClause->HandlerStartPC) ||
+ (pEHClause->FilterOffset < pEHClause->TryEndPC))
+ {
+ m_hrInit = CORDBG_E_SET_IP_IMPOSSIBLE;
+ goto LError;
+ }
+ pNodeCur->m_FilterEndPC = pEHClause->HandlerStartPC;
+#endif // WIN64EXCEPTIONS
+ }
+
+ pNodeCur->MarkAsRange();
+ }
+
+ LOG((LF_CORDB, LL_INFO10000, "EHRT::CC: about to do the second pass\n"));
+
+
+ // Second, for each EH, find it's most limited, containing clause
+ // On WIN64, we have duplicate clauses. There are two types of duplicate clauses.
+ //
+ // The first type is described in ExceptionHandling.cpp. This type doesn't add additional information to the
+ // EH tree structure. For example, if an offset is in the try region of a duplicate clause of this type,
+ // then some clause which comes before the duplicate clause should contain the offset in its handler region.
+ // Therefore, even though this type of duplicate clauses are added to the EH tree, they should never be used.
+ //
+ // The second type is what's called the protected clause. These clauses are used to mark the cloned finally
+ // region. They have an empty try region. Here's an example:
+ //
+ // // C# code
+ // try
+ // {
+ // A
+ // }
+ // finally
+ // {
+ // B
+ // }
+ //
+ // // jitted code
+ // parent
+ // -------
+ // A
+ // B'
+ // -------
+ //
+ // funclet
+ // -------
+ // B
+ // -------
+ //
+ // A protected clause covers the B' region in the parent method. In essence you can think of the method as
+ // having two try/finally regions, and that's exactly how protected clauses are handled in the EH tree.
+ // They are added to the EH tree just like any other EH clauses.
+ for (i = 0; i < m_EHCount; i++)
+ {
+ LOG((LF_CORDB, LL_INFO10000, "EHRT::CC: SP:0x%x\n", i));
+
+ EHRangeTreeNode * pNodeCur = &(m_rgNodes[i]);
+
+ EHRangeTreeNode *pNodeCandidate = NULL;
+ pNodeCandidate = FindContainer(pNodeCur);
+ _ASSERTE(pNodeCandidate != NULL);
+
+ pNodeCur->m_pContainedBy = pNodeCandidate;
+
+ LOG((LF_CORDB, LL_INFO10000, "EHRT::CC: SP: about to add to tree\n"));
+
+ HRESULT hr = pNodeCandidate->AddNode(pNodeCur);
+ if (FAILED(hr))
+ {
+ m_hrInit = hr;
+ goto LError;
+ }
+ }
+
+LSuccess:
+ m_fInitializing = false;
+ return;
+
+LError:
+ LOG((LF_CORDB, LL_INFO10000, "EHRT::CC: LError - something went wrong!\n"));
+
+ if (m_rgClauses != NULL)
+ {
+ delete [] m_rgClauses;
+ m_rgClauses = NULL;
+ }
+
+ if (m_rgNodes != NULL)
+ {
+ delete [] m_rgNodes;
+ m_rgNodes = NULL;
+ }
+
+ m_fInitializing = false;
+
+ LOG((LF_CORDB, LL_INFO10000, "EHRT::CC: Falling off of LError!\n"));
+} // Ctor Core
+
+EHRangeTree::~EHRangeTree()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_rgNodes != NULL)
+ delete [] m_rgNodes;
+
+ if (m_rgClauses != NULL)
+ delete [] m_rgClauses;
+} //Dtor
+
+EHRangeTreeNode *EHRangeTree::FindContainer(EHRangeTreeNode *pNodeSearch)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ EHRangeTreeNode *pNodeCandidate = NULL;
+
+ // Examine the root, too.
+ for (ULONG iInner = 0; iInner < m_EHCount+1; iInner++)
+ {
+ EHRangeTreeNode *pNodeCur = &(m_rgNodes[iInner]);
+
+ // Check if the current node contains the node we are searching for.
+ if ((pNodeSearch != pNodeCur) &&
+ pNodeCur->Contains(pNodeSearch))
+ {
+ // Update the candidate node if it is NULL or if it contains the current node
+ // (i.e. the current node is more specific than the candidate node).
+ if ((pNodeCandidate == NULL) ||
+ pNodeCandidate->Contains(pNodeCur))
+ {
+ pNodeCandidate = pNodeCur;
+ }
+ }
+ }
+
+ return pNodeCandidate;
+}
+
+EHRangeTreeNode *EHRangeTree::FindMostSpecificContainer(DWORD addr)
+{
+ WRAPPER_NO_CONTRACT;
+
+ EHRangeTreeNode node(addr);
+ return FindContainer(&node);
+}
+
+EHRangeTreeNode *EHRangeTree::FindNextMostSpecificContainer(EHRangeTreeNode *pNodeSearch, DWORD addr)
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(!m_fInitializing);
+
+ EHRangeTreeNode **rgpNodes = pNodeSearch->m_containees.Table();
+
+ if (NULL == rgpNodes)
+ return pNodeSearch;
+
+ // It's possible that no subrange contains the desired address, so
+ // keep a reasonable default around.
+ EHRangeTreeNode *pNodeCandidate = pNodeSearch;
+
+ USHORT cSubRanges = pNodeSearch->m_containees.Count();
+ EHRangeTreeNode **ppNodeCur = pNodeSearch->m_containees.Table();
+
+ for (int i = 0; i < cSubRanges; i++, ppNodeCur++)
+ {
+ if ((*ppNodeCur)->Contains(addr) &&
+ pNodeCandidate->Contains((*ppNodeCur)))
+ {
+ pNodeCandidate = (*ppNodeCur);
+ }
+ }
+
+ return pNodeCandidate;
+}
+
+BOOL EHRangeTree::isAtStartOfCatch(DWORD offset)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (NULL != m_rgNodes && m_EHCount != 0)
+ {
+ for(unsigned i = 0; i < m_EHCount;i++)
+ {
+ if (m_rgNodes[i].m_clause->HandlerStartPC == offset &&
+ (!IsFilterHandler(m_rgNodes[i].m_clause) && !IsFaultOrFinally(m_rgNodes[i].m_clause)))
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+enum TRY_CATCH_FINALLY
+{
+ TCF_NONE= 0,
+ TCF_TRY,
+ TCF_FILTER,
+ TCF_CATCH,
+ TCF_FINALLY,
+ TCF_COUNT, //count of all elements, not an element itself
+};
+
+#ifdef LOGGING
+const char *TCFStringFromConst(TRY_CATCH_FINALLY tcf)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ switch( tcf )
+ {
+ case TCF_NONE:
+ return "TCFS_NONE";
+ break;
+ case TCF_TRY:
+ return "TCFS_TRY";
+ break;
+ case TCF_FILTER:
+ return "TCF_FILTER";
+ break;
+ case TCF_CATCH:
+ return "TCFS_CATCH";
+ break;
+ case TCF_FINALLY:
+ return "TCFS_FINALLY";
+ break;
+ case TCF_COUNT:
+ return "TCFS_COUNT";
+ break;
+ default:
+ return "INVALID TCFS VALUE";
+ break;
+ }
+}
+#endif //LOGGING
+
+// We're unwinding if we'll return to the EE's code. Otherwise
+// we'll return to someplace in the current code. Anywhere outside
+// this function is "EE code".
+bool FinallyIsUnwinding(EHRangeTreeNode *pNode,
+ ICodeManager* pEECM,
+ PREGDISPLAY pReg,
+ SLOT addrStart)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ const BYTE *pbRetAddr = pEECM->GetFinallyReturnAddr(pReg);
+
+ if (pbRetAddr < (const BYTE *)addrStart)
+ return true;
+
+ DWORD offset = (DWORD)(size_t)(pbRetAddr - addrStart);
+ EHRangeTreeNode *pRoot = pNode->m_pTree->m_root;
+
+ if (!pRoot->Contains(offset))
+ return true;
+ else
+ return false;
+}
+
+#ifndef WIN64EXCEPTIONS
+BOOL LeaveCatch(ICodeManager* pEECM,
+ Thread *pThread,
+ CONTEXT *pCtx,
+ void *methodInfoPtr,
+ unsigned offset)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // We can assert these things here, and skip a call
+ // to COMPlusCheckForAbort later.
+
+ // If no abort has been requested,
+ _ASSERTE((pThread->GetThrowable() != NULL) ||
+ // or if there is a pending exception.
+ (!pThread->IsAbortRequested()) );
+
+ LPVOID esp = COMPlusEndCatchWorker(pThread);
+
+ PopNestedExceptionRecords(esp, pCtx, pThread->GetExceptionListPtr());
+
+ // Do JIT-specific work
+ pEECM->LeaveCatch(methodInfoPtr, offset, pCtx);
+
+ SetSP(pCtx, (UINT_PTR)esp);
+ return TRUE;
+}
+#endif // WIN64EXCEPTIONS
+
+TRY_CATCH_FINALLY GetTcf(EHRangeTreeNode *pNode,
+ unsigned offset)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pNode->IsRange() && !pNode->IsRoot());
+
+ TRY_CATCH_FINALLY tcf;
+
+ if (!pNode->Contains(offset))
+ {
+ tcf = TCF_NONE;
+ }
+ else if (pNode->TryContains(offset))
+ {
+ tcf = TCF_TRY;
+ }
+ else if (pNode->FilterContains(offset))
+ {
+ tcf = TCF_FILTER;
+ }
+ else
+ {
+ _ASSERTE(pNode->HandlerContains(offset));
+ if (IsFaultOrFinally(pNode->m_clause))
+ tcf = TCF_FINALLY;
+ else
+ tcf = TCF_CATCH;
+ }
+
+ return tcf;
+}
+
+const DWORD bEnter = 0x01;
+const DWORD bLeave = 0x02;
+
+HRESULT IsLegalTransition(Thread *pThread,
+ bool fCanSetIPOnly,
+ DWORD fEnter,
+ EHRangeTreeNode *pNode,
+ DWORD offFrom,
+ DWORD offTo,
+ ICodeManager* pEECM,
+ PREGDISPLAY pReg,
+ SLOT addrStart,
+ void *methodInfoPtr,
+ PCONTEXT pCtx)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ if (fEnter & bEnter)
+ {
+ _ASSERTE(pNode->Contains(offTo));
+ }
+ if (fEnter & bLeave)
+ {
+ _ASSERTE(pNode->Contains(offFrom));
+ }
+#endif //_DEBUG
+
+ // First, figure out where we're coming from/going to
+ TRY_CATCH_FINALLY tcfFrom = GetTcf(pNode,
+ offFrom);
+
+ TRY_CATCH_FINALLY tcfTo = GetTcf(pNode,
+ offTo);
+
+ LOG((LF_CORDB, LL_INFO10000, "ILT: from %s to %s\n",
+ TCFStringFromConst(tcfFrom),
+ TCFStringFromConst(tcfTo)));
+
+ // Now we'll consider, case-by-case, the various permutations that
+ // can arise
+ switch(tcfFrom)
+ {
+ case TCF_NONE:
+ case TCF_TRY:
+ {
+ switch(tcfTo)
+ {
+ case TCF_NONE:
+ case TCF_TRY:
+ {
+ return S_OK;
+ break;
+ }
+
+ case TCF_FILTER:
+ {
+ return CORDBG_E_CANT_SETIP_INTO_OR_OUT_OF_FILTER;
+ break;
+ }
+
+ case TCF_CATCH:
+ {
+ return CORDBG_E_CANT_SET_IP_INTO_CATCH;
+ break;
+ }
+
+ case TCF_FINALLY:
+ {
+ return CORDBG_E_CANT_SET_IP_INTO_FINALLY;
+ break;
+ }
+ default:
+ break;
+ }
+ break;
+ }
+
+ case TCF_FILTER:
+ {
+ switch(tcfTo)
+ {
+ case TCF_NONE:
+ case TCF_TRY:
+ case TCF_CATCH:
+ case TCF_FINALLY:
+ {
+ return CORDBG_E_CANT_SETIP_INTO_OR_OUT_OF_FILTER;
+ break;
+ }
+ case TCF_FILTER:
+ {
+ return S_OK;
+ break;
+ }
+ default:
+ break;
+
+ }
+ break;
+ }
+
+ case TCF_CATCH:
+ {
+ switch(tcfTo)
+ {
+ case TCF_NONE:
+ case TCF_TRY:
+ {
+#if !defined(WIN64EXCEPTIONS)
+ CONTEXT *pFilterCtx = pThread->GetFilterContext();
+ if (pFilterCtx == NULL)
+ return CORDBG_E_SET_IP_IMPOSSIBLE;
+
+ if (!fCanSetIPOnly)
+ {
+ if (!LeaveCatch(pEECM,
+ pThread,
+ pFilterCtx,
+ methodInfoPtr,
+ offFrom))
+ return E_FAIL;
+ }
+ return S_OK;
+#else // WIN64EXCEPTIONS
+ // <NOTE>
+ // Setting IP out of a catch clause is not supported for WIN64EXCEPTIONS because of funclets.
+ // This scenario is disabled with approval from VS because it's not considered to
+ // be a common user scenario.
+ // </NOTE>
+ return CORDBG_E_CANT_SET_IP_OUT_OF_CATCH_ON_WIN64;
+#endif // !WIN64EXCEPTIONS
+ break;
+ }
+
+ case TCF_FILTER:
+ {
+ return CORDBG_E_CANT_SETIP_INTO_OR_OUT_OF_FILTER;
+ break;
+ }
+
+ case TCF_CATCH:
+ {
+ return S_OK;
+ break;
+ }
+
+ case TCF_FINALLY:
+ {
+ return CORDBG_E_CANT_SET_IP_INTO_FINALLY;
+ break;
+ }
+ default:
+ break;
+ }
+ break;
+ }
+
+ case TCF_FINALLY:
+ {
+ switch(tcfTo)
+ {
+ case TCF_NONE:
+ case TCF_TRY:
+ {
+#if !defined(WIN64EXCEPTIONS)
+ if (!FinallyIsUnwinding(pNode, pEECM, pReg, addrStart))
+ {
+ CONTEXT *pFilterCtx = pThread->GetFilterContext();
+ if (pFilterCtx == NULL)
+ return CORDBG_E_SET_IP_IMPOSSIBLE;
+
+ if (!fCanSetIPOnly)
+ {
+ if (!pEECM->LeaveFinally(methodInfoPtr,
+ offFrom,
+ pFilterCtx))
+ return E_FAIL;
+ }
+ return S_OK;
+ }
+ else
+ {
+ return CORDBG_E_CANT_SET_IP_OUT_OF_FINALLY;
+ }
+#else // _WIN64
+ // <NOTE>
+ // Setting IP out of a non-unwinding finally clause is not supported on WIN64EXCEPTIONS because of funclets.
+ // This scenario is disabled with approval from VS because it's not considered to be a common user
+ // scenario.
+ // </NOTE>
+ return CORDBG_E_CANT_SET_IP_OUT_OF_FINALLY_ON_WIN64;
+#endif // _WIN64
+
+ break;
+ }
+
+ case TCF_FILTER:
+ {
+ return CORDBG_E_CANT_SETIP_INTO_OR_OUT_OF_FILTER;
+ break;
+ }
+
+ case TCF_CATCH:
+ {
+ return CORDBG_E_CANT_SET_IP_INTO_CATCH;
+ break;
+ }
+
+ case TCF_FINALLY:
+ {
+ return S_OK;
+ break;
+ }
+ default:
+ break;
+ }
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ _ASSERTE( !"IsLegalTransition: We should never reach this point!" );
+
+ return CORDBG_E_SET_IP_IMPOSSIBLE;
+}
+
+// We need this to determine what
+// to do based on whether the stack in general is empty
+HRESULT DestinationIsValid(void *pDjiToken,
+ DWORD offTo,
+ EHRangeTree *pEHRT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ // We'll add a call to the DebugInterface that takes this
+ // & tells us if the destination is a stack empty point.
+// DebuggerJitInfo *pDji = (DebuggerJitInfo *)pDjiToken;
+
+ if (pEHRT->isAtStartOfCatch(offTo))
+ return CORDBG_S_BAD_START_SEQUENCE_POINT;
+ else
+ return S_OK;
+} // HRESULT DestinationIsValid()
+
+// We want to keep the 'worst' HRESULT - if one has failed (..._E_...) & the
+// other hasn't, take the failing one. If they've both/neither failed, then
+// it doesn't matter which we take.
+// Note that this macro favors retaining the first argument
+#define WORST_HR(hr1,hr2) (FAILED(hr1)?hr1:hr2)
+HRESULT SetIPFromSrcToDst(Thread *pThread,
+ SLOT addrStart, // base address of method
+ DWORD offFrom, // native offset
+ DWORD offTo, // native offset
+ bool fCanSetIPOnly, // if true, don't do any real work
+ PREGDISPLAY pReg,
+ PCONTEXT pCtx,
+ void *pDji,
+ EHRangeTree *pEHRT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ HRESULT hrReturn = S_OK;
+ bool fCheckOnly = true;
+
+ EECodeInfo codeInfo((TADDR)(addrStart));
+
+ ICodeManager * pEECM = codeInfo.GetCodeManager();
+ LPVOID methodInfoPtr = codeInfo.GetGCInfo();
+
+ // Do both checks here so compiler doesn't complain about skipping
+ // initialization b/c of goto.
+ if (fCanSetIPOnly && !pEECM->IsGcSafe(&codeInfo, offFrom))
+ {
+ hrReturn = WORST_HR(hrReturn, CORDBG_E_SET_IP_IMPOSSIBLE);
+ }
+
+ if (fCanSetIPOnly && !pEECM->IsGcSafe(&codeInfo, offTo))
+ {
+ hrReturn = WORST_HR(hrReturn, CORDBG_E_SET_IP_IMPOSSIBLE);
+ }
+
+ if ((hr = DestinationIsValid(pDji, offTo, pEHRT)) != S_OK
+ && fCanSetIPOnly)
+ {
+ hrReturn = WORST_HR(hrReturn,hr);
+ }
+
+ // The basic approach is this: We'll start with the most specific (smallest)
+ // EHClause that contains the starting address. We'll 'back out', to larger
+ // and larger ranges, until we either find an EHClause that contains both
+ // the from and to addresses, or until we reach the root EHRangeTreeNode,
+ // which contains all addresses within it. At each step, we check/do work
+ // that the various transitions (from inside to outside a catch, etc).
+ // At that point, we do the reverse process - we go from the EHClause that
+ // encompasses both from and to, and narrow down to the smallest EHClause that
+ // encompasses the to point. We use our nifty data structure to manage
+ // the tree structure inherent in this process.
+ //
+ // NOTE: We do this process twice, once to check that we're not doing an
+ // overall illegal transition, such as ultimately set the IP into
+ // a catch, which is never allowed. We're doing this because VS
+ // calls SetIP without calling CanSetIP first, and so we should be able
+ // to return an error code and have the stack in the same condition
+ // as the start of the call, and so we shouldn't back out of clauses
+ // or move into them until we're sure that can be done.
+
+retryForCommit:
+
+ EHRangeTreeNode *node;
+ EHRangeTreeNode *nodeNext;
+ node = pEHRT->FindMostSpecificContainer(offFrom);
+
+ while (!node->Contains(offTo))
+ {
+ hr = IsLegalTransition(pThread,
+ fCheckOnly,
+ bLeave,
+ node,
+ offFrom,
+ offTo,
+ pEECM,
+ pReg,
+ addrStart,
+ methodInfoPtr,
+ pCtx);
+
+ if (FAILED(hr))
+ {
+ hrReturn = WORST_HR(hrReturn,hr);
+ }
+
+ node = node->GetContainer();
+ // m_root prevents node from ever being NULL.
+ }
+
+ if (node != pEHRT->m_root)
+ {
+ hr = IsLegalTransition(pThread,
+ fCheckOnly,
+ bEnter|bLeave,
+ node,
+ offFrom,
+ offTo,
+ pEECM,
+ pReg,
+ addrStart,
+ methodInfoPtr,
+ pCtx);
+
+ if (FAILED(hr))
+ {
+ hrReturn = WORST_HR(hrReturn,hr);
+ }
+ }
+
+ nodeNext = pEHRT->FindNextMostSpecificContainer(node,
+ offTo);
+
+ while(nodeNext != node)
+ {
+ hr = IsLegalTransition(pThread,
+ fCheckOnly,
+ bEnter,
+ nodeNext,
+ offFrom,
+ offTo,
+ pEECM,
+ pReg,
+ addrStart,
+ methodInfoPtr,
+ pCtx);
+
+ if (FAILED(hr))
+ {
+ hrReturn = WORST_HR(hrReturn, hr);
+ }
+
+ node = nodeNext;
+ nodeNext = pEHRT->FindNextMostSpecificContainer(node,
+ offTo);
+ }
+
+ // If it was the intention to actually set the IP and the above transition checks succeeded,
+ // then go back and do it all again but this time widen and narrow the thread's actual scope
+ if (!fCanSetIPOnly && fCheckOnly && SUCCEEDED(hrReturn))
+ {
+ fCheckOnly = false;
+ goto retryForCommit;
+ }
+
+ return hrReturn;
+} // HRESULT SetIPFromSrcToDst()
+
+// This function should only be called if the thread is suspended and sitting in jitted code
+BOOL IsInFirstFrameOfHandler(Thread *pThread, IJitManager *pJitManager, const METHODTOKEN& MethodToken, DWORD offset)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ // if don't have a throwable the aren't processing an exception
+ if (IsHandleNullUnchecked(pThread->GetThrowableAsHandle()))
+ return FALSE;
+
+ EH_CLAUSE_ENUMERATOR pEnumState;
+ unsigned EHCount = pJitManager->InitializeEHEnumeration(MethodToken, &pEnumState);
+
+ for(ULONG i=0; i < EHCount; i++)
+ {
+ EE_ILEXCEPTION_CLAUSE EHClause;
+ pJitManager->GetNextEHClause(&pEnumState, &EHClause);
+ _ASSERTE(IsValidClause(&EHClause));
+
+ if ( offset >= EHClause.HandlerStartPC && offset < EHClause.HandlerEndPC)
+ return TRUE;
+
+ // check if it's in the filter itself if we're not in the handler
+ if (IsFilterHandler(&EHClause) && offset >= EHClause.FilterOffset && offset < EHClause.HandlerStartPC)
+ return TRUE;
+ }
+ return FALSE;
+} // BOOL IsInFirstFrameOfHandler()
+
+
+#if !defined(WIN64EXCEPTIONS)
+
+//******************************************************************************
+// LookForHandler -- search for a function that will handle the exception.
+//******************************************************************************
+LFH LookForHandler( // LFH return types
+ const EXCEPTION_POINTERS *pExceptionPointers, // The ExceptionRecord and ExceptionContext
+ Thread *pThread, // Thread on which to look (always current?)
+ ThrowCallbackType *tct) // Structure to pass back to callback functions.
+{
+ // We don't want to use a runtime contract here since this codepath is used during
+ // the processing of a hard SO. Contracts use a significant amount of stack
+ // which we can't afford for those cases.
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ // go through to find if anyone handles the exception
+ StackWalkAction action = pThread->StackWalkFrames((PSTACKWALKFRAMESCALLBACK)COMPlusThrowCallback,
+ tct,
+ 0, //can't use FUNCTIONSONLY because the callback uses non-function frames to stop the walk
+ tct->pBottomFrame);
+
+ // If someone handles it, the action will be SWA_ABORT with pFunc and dHandler indicating the
+ // function and handler that is handling the exception. Debugger can put a hook in here.
+ if (action == SWA_ABORT && tct->pFunc != NULL)
+ return LFH_FOUND;
+
+ // nobody is handling it
+ return LFH_NOT_FOUND;
+} // LFH LookForHandler()
+
+StackWalkAction COMPlusUnwindCallback (CrawlFrame *pCf, ThrowCallbackType *pData);
+
+//******************************************************************************
+// UnwindFrames
+//******************************************************************************
+void UnwindFrames( // No return value.
+ Thread *pThread, // Thread to unwind.
+ ThrowCallbackType *tct) // Structure to pass back to callback function.
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ if (pThread->IsExceptionInProgress())
+ {
+ pThread->GetExceptionState()->GetFlags()->SetUnwindHasStarted();
+ }
+
+ #ifdef DEBUGGING_SUPPORTED
+ //
+ // If a debugger is attached, notify it that unwinding is going on.
+ //
+ if (CORDebuggerAttached())
+ {
+ g_pDebugInterface->ManagedExceptionUnwindBegin(pThread);
+ }
+ #endif // DEBUGGING_SUPPORTED
+
+ LOG((LF_EH, LL_INFO1000, "UnwindFrames: going to: pFunc:%#X, pStack:%#X\n",
+ tct->pFunc, tct->pStack));
+
+ pThread->StackWalkFrames((PSTACKWALKFRAMESCALLBACK)COMPlusUnwindCallback,
+ tct,
+ POPFRAMES,
+ tct->pBottomFrame);
+} // void UnwindFrames()
+
+#endif // !defined(WIN64EXCEPTIONS)
+
+void StackTraceInfo::SaveStackTrace(BOOL bAllowAllocMem, OBJECTHANDLE hThrowable, BOOL bReplaceStack, BOOL bSkipLastElement)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // Do not save stacktrace to preallocated exception. These are shared.
+ if (CLRException::IsPreallocatedExceptionHandle(hThrowable))
+ {
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ // Preallocated exceptions will never have this flag set. However, its possible
+ // that after this flag is set for a regular exception but before we throw, we have an async
+ // exception like a RudeThreadAbort, which will replace the exception
+ // containing the restored stack trace.
+ //
+ // In such a case, we should clear the flag as the throwable representing the
+ // preallocated exception will not have the restored (or any) stack trace.
+ PTR_ThreadExceptionState pCurTES = GetThread()->GetExceptionState();
+ pCurTES->ResetRaisingForeignException();
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+
+ return;
+ }
+
+ LOG((LF_EH, LL_INFO1000, "StackTraceInfo::SaveStackTrace (%p), alloc = %d, replace = %d, skiplast = %d\n", this, bAllowAllocMem, bReplaceStack, bSkipLastElement));
+
+ // if have bSkipLastElement, must also keep the stack
+ _ASSERTE(! bSkipLastElement || ! bReplaceStack);
+
+ bool fSuccess = false;
+ MethodTable* pMT = ObjectFromHandle(hThrowable)->GetTrueMethodTable();
+
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ // Check if the flag indicating foreign exception raise has been setup or not,
+ // and then reset it so that subsequent processing of managed frames proceeds
+ // normally.
+ PTR_ThreadExceptionState pCurTES = GetThread()->GetExceptionState();
+ BOOL fRaisingForeignException = pCurTES->IsRaisingForeignException();
+ pCurTES->ResetRaisingForeignException();
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+
+ if (bAllowAllocMem && m_dFrameCount != 0)
+ {
+ EX_TRY
+ {
+ // Only save stack trace info on exceptions
+ _ASSERTE(IsException(pMT)); // what is the pathway here?
+ if (!IsException(pMT))
+ {
+ fSuccess = true;
+ }
+ else
+ {
+ // If the stack trace contains DynamicMethodDescs, we need to save the corrosponding
+ // System.Resolver objects in the Exception._dynamicMethods field. Failing to do that
+ // will cause an AV in the runtime when we try to visit those MethodDescs in the
+ // Exception._stackTrace field, because they have been recycled or destroyed.
+ unsigned iNumDynamics = 0;
+
+ // How many DynamicMethodDescs do we need to keep alive?
+ for (unsigned iElement=0; iElement < m_dFrameCount; iElement++)
+ {
+ MethodDesc *pMethod = m_pStackTrace[iElement].pFunc;
+ _ASSERTE(pMethod);
+
+ if (pMethod->IsLCGMethod())
+ {
+ // Increment the number of new dynamic methods we have found
+ iNumDynamics++;
+ }
+ else
+ if (pMethod->GetMethodTable()->Collectible())
+ {
+ iNumDynamics++;
+ }
+ }
+
+ struct _gc
+ {
+ StackTraceArray stackTrace;
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ StackTraceArray stackTraceTemp;
+ PTRARRAYREF dynamicMethodsArrayTemp;
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ PTRARRAYREF dynamicMethodsArray; // Object array of Managed Resolvers
+ PTRARRAYREF pOrigDynamicArray;
+
+ _gc()
+ : stackTrace()
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ , stackTraceTemp()
+ , dynamicMethodsArrayTemp(static_cast<PTRArray *>(NULL))
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ , dynamicMethodsArray(static_cast<PTRArray *>(NULL))
+ , pOrigDynamicArray(static_cast<PTRArray *>(NULL))
+ {}
+ };
+
+ _gc gc;
+ GCPROTECT_BEGIN(gc);
+
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ // If the flag indicating foreign exception raise has been setup, then check
+ // if the exception object has stacktrace or not. If we have an async non-preallocated
+ // exception after setting this flag but before we throw, then the new
+ // exception will not have any stack trace set and thus, we should behave as if
+ // the flag was not setup.
+ if (fRaisingForeignException)
+ {
+ // Get the reference to stack trace and reset our flag if applicable.
+ ((EXCEPTIONREF)ObjectFromHandle(hThrowable))->GetStackTrace(gc.stackTraceTemp);
+ if (gc.stackTraceTemp.Size() == 0)
+ {
+ fRaisingForeignException = FALSE;
+ }
+ }
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+
+ // Replace stack (i.e. build a new stack trace) only if we are not raising a foreign exception.
+ // If we are, then we will continue to extend the existing stack trace.
+ if (bReplaceStack
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ && (!fRaisingForeignException)
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ )
+ {
+ // Cleanup previous info
+ gc.stackTrace.Append(m_pStackTrace, m_pStackTrace + m_dFrameCount);
+
+ if (iNumDynamics)
+ {
+ // Adjust the allocation size of the array, if required
+ if (iNumDynamics > m_cDynamicMethodItems)
+ {
+ S_UINT32 cNewSize = S_UINT32(2) * S_UINT32(iNumDynamics);
+ if (cNewSize.IsOverflow())
+ {
+ // Overflow here implies we cannot allocate memory anymore
+ LOG((LF_EH, LL_INFO100, "StackTraceInfo::SaveStackTrace - Cannot calculate initial resolver array size due to overflow!\n"));
+ COMPlusThrowOM();
+ }
+
+ m_cDynamicMethodItems = cNewSize.Value();
+ }
+
+ gc.dynamicMethodsArray = (PTRARRAYREF)AllocateObjectArray(m_cDynamicMethodItems, g_pObjectClass);
+ LOG((LF_EH, LL_INFO100, "StackTraceInfo::SaveStackTrace - allocated dynamic array for first frame of size %lu\n",
+ m_cDynamicMethodItems));
+ }
+
+ m_dCurrentDynamicIndex = 0;
+ }
+ else
+ {
+ // Fetch the stacktrace and the dynamic method array
+ ((EXCEPTIONREF)ObjectFromHandle(hThrowable))->GetStackTrace(gc.stackTrace, &gc.pOrigDynamicArray);
+
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ if (fRaisingForeignException)
+ {
+ // Just before we append to the stack trace, mark the last recorded frame to be from
+ // the foreign thread so that we can insert an annotation indicating so when building
+ // the stack trace string.
+ size_t numCurrentFrames = gc.stackTrace.Size();
+ if (numCurrentFrames > 0)
+ {
+ // "numCurrentFrames" can be zero if the user created an EDI using
+ // an unthrown exception.
+ StackTraceElement & refLastElementFromForeignStackTrace = gc.stackTrace[numCurrentFrames - 1];
+ refLastElementFromForeignStackTrace.fIsLastFrameFromForeignStackTrace = TRUE;
+ }
+ }
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+
+ if (bSkipLastElement && gc.stackTrace.Size() != 0)
+ gc.stackTrace.AppendSkipLast(m_pStackTrace, m_pStackTrace + m_dFrameCount);
+ else
+ gc.stackTrace.Append(m_pStackTrace, m_pStackTrace + m_dFrameCount);
+
+ //////////////////////////////
+
+ unsigned cOrigDynamic = 0; // number of objects in the old array
+ if (gc.pOrigDynamicArray != NULL)
+ {
+ cOrigDynamic = gc.pOrigDynamicArray->GetNumComponents();
+ }
+ else
+ {
+ // Since there is no dynamic method array, reset the corresponding state variables
+ m_dCurrentDynamicIndex = 0;
+ m_cDynamicMethodItems = 0;
+ }
+
+ if ((gc.pOrigDynamicArray != NULL)
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ || (fRaisingForeignException)
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ )
+ {
+ // Since we have just restored the dynamic method array as well,
+ // calculate the dynamic array index which would be the total
+ // number of dynamic methods present in the stack trace.
+ //
+ // In addition to the ForeignException scenario, we need to reset these
+ // values incase the exception object in question is being thrown by
+ // multiple threads in parallel and thus, could have potentially different
+ // dynamic method array contents/size as opposed to the current state of
+ // StackTraceInfo.
+
+ unsigned iStackTraceElements = (unsigned)gc.stackTrace.Size();
+ m_dCurrentDynamicIndex = 0;
+ for (unsigned iIndex = 0; iIndex < iStackTraceElements; iIndex++)
+ {
+ MethodDesc *pMethod = gc.stackTrace[iIndex].pFunc;
+ if (pMethod)
+ {
+ if ((pMethod->IsLCGMethod()) || (pMethod->GetMethodTable()->Collectible()))
+ {
+ // Increment the number of new dynamic methods we have found
+ m_dCurrentDynamicIndex++;
+ }
+ }
+ }
+
+ // Total number of elements in the dynamic method array should also be
+ // reset based upon the restored array size.
+ m_cDynamicMethodItems = cOrigDynamic;
+ }
+
+ // Make the dynamic Array field reference the original array we got from the
+ // Exception object. If, below, we have to add new entries, we will add it to the
+ // array if it is allocated, or else, we will allocate it before doing so.
+ gc.dynamicMethodsArray = gc.pOrigDynamicArray;
+
+ // Create an object array if we have new dynamic method entries AND
+ // if we are at the (or went past) the current size limit
+ if (iNumDynamics > 0)
+ {
+ // Reallocate the array if we are at the (or went past) the current size limit
+ unsigned cTotalDynamicMethodCount = m_dCurrentDynamicIndex;
+
+ S_UINT32 cNewSum = S_UINT32(cTotalDynamicMethodCount) + S_UINT32(iNumDynamics);
+ if (cNewSum.IsOverflow())
+ {
+ // If the current size is already the UINT32 max size, then we
+ // cannot go further. Overflow here implies we cannot allocate memory anymore.
+ LOG((LF_EH, LL_INFO100, "StackTraceInfo::SaveStackTrace - Cannot calculate resolver array size due to overflow!\n"));
+ COMPlusThrowOM();
+ }
+
+ cTotalDynamicMethodCount = cNewSum.Value();
+
+ if (cTotalDynamicMethodCount > m_cDynamicMethodItems)
+ {
+ // Double the current limit of the array.
+ S_UINT32 cNewSize = S_UINT32(2) * S_UINT32(cTotalDynamicMethodCount);
+ if (cNewSize.IsOverflow())
+ {
+ // Overflow here implies that we cannot allocate any more memory
+ LOG((LF_EH, LL_INFO100, "StackTraceInfo::SaveStackTrace - Cannot resize resolver array beyond max size due to overflow!\n"));
+ COMPlusThrowOM();
+ }
+
+ m_cDynamicMethodItems = cNewSize.Value();
+ gc.dynamicMethodsArray = (PTRARRAYREF)AllocateObjectArray(m_cDynamicMethodItems,
+ g_pObjectClass);
+
+ _ASSERTE(!(cOrigDynamic && !gc.pOrigDynamicArray));
+
+ LOG((LF_EH, LL_INFO100, "StackTraceInfo::SaveStackTrace - resized dynamic array to size %lu\n",
+ m_cDynamicMethodItems));
+
+ // Copy previous entries if there are any, and update iCurDynamic to point
+ // to the following index.
+ if (cOrigDynamic && (gc.pOrigDynamicArray != NULL))
+ {
+ memmoveGCRefs(gc.dynamicMethodsArray->GetDataPtr(),
+ gc.pOrigDynamicArray->GetDataPtr(),
+ cOrigDynamic * sizeof(Object *));
+
+ // m_dCurrentDynamicIndex is already referring to the correct index
+ // at which the next resolver object will be saved
+ }
+ }
+ else
+ {
+ // We are adding objects to the existing array.
+ //
+ // We have new dynamic method entries for which
+ // resolver objects need to be saved. Ensure
+ // that we have the array to store them
+ if (gc.dynamicMethodsArray == NULL)
+ {
+ _ASSERTE(m_cDynamicMethodItems > 0);
+
+ gc.dynamicMethodsArray = (PTRARRAYREF)AllocateObjectArray(m_cDynamicMethodItems,
+ g_pObjectClass);
+ m_dCurrentDynamicIndex = 0;
+ LOG((LF_EH, LL_INFO100, "StackTraceInfo::SaveStackTrace - allocated dynamic array of size %lu\n",
+ m_cDynamicMethodItems));
+ }
+ else
+ {
+ // The array exists for storing resolver objects.
+ // Simply set the index at which the next resolver
+ // will be stored in it.
+ }
+ }
+ }
+ }
+
+ // Update _dynamicMethods field
+ if (iNumDynamics)
+ {
+ // At this point, we should be having a valid array for storage
+ _ASSERTE(gc.dynamicMethodsArray != NULL);
+
+ // Assert that we are in valid range of the array in which resolver objects will be saved.
+ // We subtract 1 below since storage will start from m_dCurrentDynamicIndex onwards and not
+ // from (m_dCurrentDynamicIndex + 1).
+ _ASSERTE((m_dCurrentDynamicIndex + iNumDynamics - 1) < gc.dynamicMethodsArray->GetNumComponents());
+
+ for (unsigned i=0; i < m_dFrameCount; i++)
+ {
+ MethodDesc *pMethod = m_pStackTrace[i].pFunc;
+ _ASSERTE(pMethod);
+
+ if (pMethod->IsLCGMethod())
+ {
+ // We need to append the corresponding System.Resolver for
+ // this DynamicMethodDesc to keep it alive.
+ DynamicMethodDesc *pDMD = (DynamicMethodDesc *) pMethod;
+ OBJECTREF pResolver = pDMD->GetLCGMethodResolver()->GetManagedResolver();
+
+ _ASSERTE(pResolver != NULL);
+
+ // Store Resolver information in the array
+ gc.dynamicMethodsArray->SetAt(m_dCurrentDynamicIndex++, pResolver);
+ }
+ else
+ if (pMethod->GetMethodTable()->Collectible())
+ {
+ OBJECTREF pLoaderAllocator = pMethod->GetMethodTable()->GetLoaderAllocator()->GetExposedObject();
+ _ASSERTE(pLoaderAllocator != NULL);
+ gc.dynamicMethodsArray->SetAt (m_dCurrentDynamicIndex++, pLoaderAllocator);
+ }
+ }
+ }
+
+ ((EXCEPTIONREF)ObjectFromHandle(hThrowable))->SetStackTrace(gc.stackTrace, gc.dynamicMethodsArray);
+
+ // Update _stackTraceString field.
+ ((EXCEPTIONREF)ObjectFromHandle(hThrowable))->SetStackTraceString(NULL);
+ fSuccess = true;
+
+ GCPROTECT_END(); // gc
+ }
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+ }
+
+ ClearStackTrace();
+
+ if (!fSuccess)
+ {
+ EX_TRY
+ {
+ _ASSERTE(IsException(pMT)); // what is the pathway here?
+ if (bReplaceStack && IsException(pMT))
+ ((EXCEPTIONREF)ObjectFromHandle(hThrowable))->ClearStackTraceForThrow();
+ }
+ EX_CATCH
+ {
+ // Do nothing
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+}
+
+// Copy a context record, being careful about whether or not the target
+// is large enough to support CONTEXT_EXTENDED_REGISTERS.
+//
+// NOTE: this function can ONLY be used when a filter function will return
+// EXCEPTION_CONTINUE_EXECUTION. On AMD64, replacing the CONTEXT in any other
+// situation may break exception unwinding.
+//
+// NOTE: this function MUST be used on AMD64. During exception handling,
+// parts of the CONTEXT struct must not be modified.
+
+
+// High 2 bytes are machine type. Low 2 bytes are register subset.
+#define CONTEXT_EXTENDED_BIT (CONTEXT_EXTENDED_REGISTERS & 0xffff)
+
+VOID
+ReplaceExceptionContextRecord(CONTEXT *pTarget, CONTEXT *pSource)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(pTarget);
+ _ASSERTE(pSource);
+
+#if defined(_TARGET_X86_)
+ //<TODO>
+ // @TODO IA64: CONTEXT_DEBUG_REGISTERS not defined on IA64, may need updated SDK
+ //</TODO>
+
+ // Want CONTROL, INTEGER, SEGMENTS. If we have Floating Point, fine.
+ _ASSERTE((pSource->ContextFlags & CONTEXT_FULL) == CONTEXT_FULL);
+#endif // _TARGET_X86_
+
+#ifdef CONTEXT_EXTENDED_REGISTERS
+
+ if (pSource->ContextFlags & CONTEXT_EXTENDED_BIT)
+ {
+ if (pTarget->ContextFlags & CONTEXT_EXTENDED_BIT)
+ { // Source and Target have EXTENDED bit set.
+ *pTarget = *pSource;
+ }
+ else
+ { // Source has but Target doesn't have EXTENDED bit set. (Target is shorter than Source.)
+ // Copy non-extended part of the struct, and reset the bit on the Target, as it was.
+ memcpy(pTarget, pSource, offsetof(CONTEXT, ExtendedRegisters));
+ pTarget->ContextFlags &= ~CONTEXT_EXTENDED_BIT; // Target was short. Reset the extended bit.
+ }
+ }
+ else
+ { // Source does not have EXTENDED bit. Copy only non-extended part of the struct.
+ memcpy(pTarget, pSource, offsetof(CONTEXT, ExtendedRegisters));
+ }
+ STRESS_LOG3(LF_SYNC, LL_INFO1000, "ReSet thread context EIP = %p ESP = %p EBP = %p\n",
+ GetIP((CONTEXT*)pTarget), GetSP((CONTEXT*)pTarget), GetFP((CONTEXT*)pTarget));
+
+#else // !CONTEXT_EXTENDED_REGISTERS
+
+ // Everything that's left
+ *pTarget = *pSource;
+
+#endif // !CONTEXT_EXTENDED_REGISTERS
+}
+
+VOID FixupOnRethrow(Thread* pCurThread, EXCEPTION_POINTERS* pExceptionPointers)
+{
+ WRAPPER_NO_CONTRACT;
+
+ ThreadExceptionState* pExState = pCurThread->GetExceptionState();
+
+#ifdef FEATURE_INTERPRETER
+ // Abort if we don't have any state from the original exception.
+ if (!pExState->IsExceptionInProgress())
+ {
+ return;
+ }
+#endif // FEATURE_INTERPRETER
+
+ // Don't allow rethrow of a STATUS_STACK_OVERFLOW -- it's a new throw of the COM+ exception.
+ if (pExState->GetExceptionCode() == STATUS_STACK_OVERFLOW)
+ {
+ return;
+ }
+
+ // For COMPLUS exceptions, we don't need the original context for our rethrow.
+ if (!(pExState->IsComPlusException()))
+ {
+ _ASSERTE(pExState->GetExceptionRecord());
+
+ // don't copy parm args as have already supplied them on the throw
+ memcpy((void*)pExceptionPointers->ExceptionRecord,
+ (void*)pExState->GetExceptionRecord(),
+ offsetof(EXCEPTION_RECORD, ExceptionInformation));
+
+// Replacing the exception context breaks unwinding on AMD64. It also breaks exception dispatch on IA64.
+// The info saved by pExState will be given to exception filters.
+#ifndef WIN64EXCEPTIONS
+ // Restore original context if available.
+ if (pExState->GetContextRecord())
+ {
+ ReplaceExceptionContextRecord(pExceptionPointers->ContextRecord,
+ pExState->GetContextRecord());
+ }
+#endif // !WIN64EXCEPTIONS
+ }
+
+ pExState->GetFlags()->SetIsRethrown();
+}
+
+struct RaiseExceptionFilterParam
+{
+ BOOL isRethrown;
+};
+
+LONG RaiseExceptionFilter(EXCEPTION_POINTERS* ep, LPVOID pv)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+
+ RaiseExceptionFilterParam *pParam = (RaiseExceptionFilterParam *) pv;
+
+ if (1 == pParam->isRethrown)
+ {
+ // need to reset the EH info back to the original thrown exception
+ FixupOnRethrow(GetThread(), ep);
+#ifdef WIN64EXCEPTIONS
+ // only do this once
+ pParam->isRethrown++;
+#endif // WIN64EXCEPTIONS
+ }
+ else
+ {
+ CONSISTENCY_CHECK((2 == pParam->isRethrown) || (0 == pParam->isRethrown));
+ }
+
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+
+//==========================================================================
+// Throw an object.
+//==========================================================================
+VOID DECLSPEC_NORETURN RaiseTheException(OBJECTREF throwable, BOOL rethrow
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , CorruptionSeverity severity
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ )
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ LOG((LF_EH, LL_INFO100, "RealCOMPlusThrow throwing %s\n",
+ throwable->GetTrueMethodTable()->GetDebugClassName()));
+
+ if (throwable == NULL)
+ {
+ _ASSERTE(!"RealCOMPlusThrow(OBJECTREF) called with NULL argument. Somebody forgot to post an exception!");
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
+ }
+
+ if (g_CLRPolicyRequested &&
+ throwable->GetMethodTable() == g_pOutOfMemoryExceptionClass)
+ {
+ // We depends on UNINSTALL_UNWIND_AND_CONTINUE_HANDLER to handle out of memory escalation.
+ // We should throw c++ exception instead.
+ ThrowOutOfMemory();
+ }
+#ifdef FEATURE_STACK_PROBE
+ else if (throwable == CLRException::GetPreallocatedStackOverflowException())
+ {
+ ThrowStackOverflow();
+ }
+#else
+ _ASSERTE(throwable != CLRException::GetPreallocatedStackOverflowException());
+#endif
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ if (!g_pConfig->LegacyCorruptedStateExceptionsPolicy())
+ {
+ // This is Scenario 3 described in clrex.h around the definition of SET_CE_RETHROW_FLAG_FOR_EX_CATCH macro.
+ //
+ // We are here because the VM is attempting to throw a managed exception. It is posssible this exception
+ // may not be seen by CLR's exception handler for managed code (e.g. there maybe an EX_CATCH up the stack
+ // that will swallow or rethrow this exception). In the following scenario:
+ //
+ // [VM1 - RethrowCSE] -> [VM2 - RethrowCSE] -> [VM3 - RethrowCSE] -> <managed code>
+ //
+ // When managed code throws a CSE (e.g. TargetInvocationException flagged as CSE), [VM3] will rethrow it and we will
+ // enter EX_CATCH in VM2 which is supposed to rethrow it as well. Two things can happen:
+ //
+ // 1) The implementation of EX_CATCH in VM2 throws a new managed exception *before* rethrow policy is applied and control
+ // will reach EX_CATCH in VM1, OR
+ //
+ // 2) EX_CATCH in VM2 swallows the exception, comes out of the catch block and later throws a new managed exception that
+ // will be caught by EX_CATCH in VM1.
+ //
+ // In either of the cases, rethrow in VM1 should be on the basis of the new managed exception's corruption severity.
+ //
+ // To support this scenario, we set corruption severity of the managed exception VM is throwing. If its a rethrow,
+ // it implies we are rethrowing the last exception that was seen by CLR's managed code exception handler. In such a case,
+ // we will copy over the corruption severity of that exception.
+
+ // If throwable indicates corrupted state, forcibly set the severity.
+ if (CEHelper::IsProcessCorruptedStateException(throwable))
+ {
+ severity = ProcessCorrupting;
+ }
+
+ // No one should have passed us an invalid severity.
+ _ASSERTE(severity > NotSet);
+
+ if (severity == NotSet)
+ {
+ severity = NotCorrupting;
+ }
+
+ // Update the corruption severity of the exception being thrown by the VM.
+ GetThread()->GetExceptionState()->SetLastActiveExceptionCorruptionSeverity(severity);
+
+ // Exception's corruption severity should be reused in reraise if this exception leaks out from the VM
+ // into managed code
+ CEHelper::MarkLastActiveExceptionCorruptionSeverityForReraiseReuse();
+
+ LOG((LF_EH, LL_INFO100, "RaiseTheException - Set VM thrown managed exception severity to %d.\n", severity));
+ }
+
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ RaiseTheExceptionInternalOnly(throwable,rethrow);
+}
+
+HRESULT GetHRFromThrowable(OBJECTREF throwable)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+
+ HRESULT hr = E_FAIL;
+ MethodTable *pMT = throwable->GetTrueMethodTable();
+
+ // Only Exception objects have a HResult field
+ // So don't fetch the field unless we have an exception
+
+ _ASSERTE(IsException(pMT)); // what is the pathway here?
+ if (IsException(pMT))
+ {
+ hr = ((EXCEPTIONREF)throwable)->GetHResult();
+ }
+
+ return hr;
+}
+
+
+VOID DECLSPEC_NORETURN RaiseTheExceptionInternalOnly(OBJECTREF throwable, BOOL rethrow, BOOL fForStackOverflow)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ STRESS_LOG3(LF_EH, LL_INFO100, "******* MANAGED EXCEPTION THROWN: Object thrown: %p MT %pT rethrow %d\n",
+ OBJECTREFToObject(throwable), (throwable!=0)?throwable->GetMethodTable():0, rethrow);
+
+#ifdef STRESS_LOG
+ // Any object could have been thrown, but System.Exception objects have useful information for the stress log
+ if (!NingenEnabled() && throwable == CLRException::GetPreallocatedStackOverflowException())
+ {
+ // if are handling an SO, don't try to get all that other goop. It isn't there anyway,
+ // and it could cause us to take another SO.
+ STRESS_LOG1(LF_EH, LL_INFO100, "Exception HRESULT = 0x%x \n", COR_E_STACKOVERFLOW);
+ }
+ else if (throwable != 0)
+ {
+ _ASSERTE(IsException(throwable->GetMethodTable()));
+
+ int hr = ((EXCEPTIONREF)throwable)->GetHResult();
+ STRINGREF message = ((EXCEPTIONREF)throwable)->GetMessage();
+ OBJECTREF innerEH = ((EXCEPTIONREF)throwable)->GetInnerException();
+
+ STRESS_LOG4(LF_EH, LL_INFO100, "Exception HRESULT = 0x%x Message String 0x%p (db will display) InnerException %p MT %pT\n",
+ hr, OBJECTREFToObject(message), OBJECTREFToObject(innerEH), (innerEH!=0)?innerEH->GetMethodTable():0);
+ }
+#endif
+
+ struct Param : RaiseExceptionFilterParam
+ {
+ OBJECTREF throwable;
+ BOOL fForStackOverflow;
+ ULONG_PTR exceptionArgs[INSTANCE_TAGGED_SEH_PARAM_ARRAY_SIZE];
+ Thread *pThread;
+ ThreadExceptionState* pExState;
+ } param;
+ param.isRethrown = rethrow ? 1 : 0; // normalize because we use it as a count in RaiseExceptionFilter
+ param.throwable = throwable;
+ param.fForStackOverflow = fForStackOverflow;
+ param.pThread = GetThread();
+
+ _ASSERTE(param.pThread);
+ param.pExState = param.pThread->GetExceptionState();
+
+ // Make sure that the object being thrown belongs in the current appdomain.
+ #if defined(_DEBUG) && CHECK_APP_DOMAIN_LEAKS
+ if (param.throwable != NULL)
+ {
+ GCPROTECT_BEGIN(param.throwable);
+ if (!CLRException::IsPreallocatedExceptionObject(param.throwable))
+ _ASSERTE(param.throwable->CheckAppDomain(GetAppDomain()));
+ GCPROTECT_END();
+ }
+ else
+ { // throwable is NULL -- that shouldn't happen
+ _ASSERTE(NingenEnabled() || param.throwable != NULL);
+ }
+ #endif
+
+ if (param.pThread->IsRudeAbortInitiated())
+ {
+ // Nobody should be able to swallow rude thread abort.
+ param.throwable = CLRException::GetPreallocatedRudeThreadAbortException();
+ }
+
+#if 0
+ // TODO: enable this after we change RealCOMPlusThrow
+#ifdef _DEBUG
+ // If ThreadAbort exception is thrown, the thread should be marked with AbortRequest.
+ // If not, we may see unhandled exception.
+ if (param.throwable->GetTrueMethodTable() == g_pThreadAbortExceptionClass)
+ {
+ _ASSERTE(GetThread()->IsAbortRequested()
+#ifdef _TARGET_X86_
+ ||
+ GetFirstCOMPlusSEHRecord(this) == EXCEPTION_CHAIN_END
+#endif
+ );
+ }
+#endif
+#endif
+
+ // raise
+ PAL_TRY(Param *, pParam, &param)
+ {
+ //_ASSERTE(! pParam->isRethrown || pParam->pExState->m_pExceptionRecord);
+ ULONG_PTR *args = NULL;
+ ULONG argCount = 0;
+ ULONG flags = 0;
+ ULONG code = 0;
+
+ // Always save the current object in the handle so on rethrow we can reuse it. This is important as it
+ // contains stack trace info.
+ //
+ // Note: we use SafeSetLastThrownObject, which will try to set the throwable and if there are any problems,
+ // it will set the throwable to something appropiate (like OOM exception) and return the new
+ // exception. Thus, the user's exception object can be replaced here.
+ pParam->throwable = NingenEnabled() ? NULL : pParam->pThread->SafeSetLastThrownObject(pParam->throwable);
+
+ if (!pParam->isRethrown ||
+#ifdef FEATURE_INTERPRETER
+ !pParam->pExState->IsExceptionInProgress() ||
+#endif // FEATURE_INTERPRETER
+ pParam->pExState->IsComPlusException() ||
+ (pParam->pExState->GetExceptionCode() == STATUS_STACK_OVERFLOW))
+ {
+ ULONG_PTR hr = NingenEnabled() ? E_FAIL : GetHRFromThrowable(pParam->throwable);
+
+ args = pParam->exceptionArgs;
+ argCount = MarkAsThrownByUs(args, hr);
+ flags = EXCEPTION_NONCONTINUABLE;
+ code = EXCEPTION_COMPLUS;
+ }
+ else
+ {
+ // Exception code should be consistent.
+ _ASSERTE((DWORD)(pParam->pExState->GetExceptionRecord()->ExceptionCode) == pParam->pExState->GetExceptionCode());
+
+ args = pParam->pExState->GetExceptionRecord()->ExceptionInformation;
+ argCount = pParam->pExState->GetExceptionRecord()->NumberParameters;
+ flags = pParam->pExState->GetExceptionRecord()->ExceptionFlags;
+ code = pParam->pExState->GetExceptionRecord()->ExceptionCode;
+ }
+
+ if (pParam->pThread->IsAbortInitiated () && IsExceptionOfType(kThreadAbortException,&pParam->throwable))
+ {
+ pParam->pThread->ResetPreparingAbort();
+
+ if (pParam->pThread->GetFrame() == FRAME_TOP)
+ {
+ // There is no more managed code on stack.
+ pParam->pThread->EEResetAbort(Thread::TAR_ALL);
+ }
+ }
+
+ // Can't access the exception object when are in pre-emptive, so find out before
+ // if its an SO.
+ BOOL fIsStackOverflow = IsExceptionOfType(kStackOverflowException, &pParam->throwable);
+
+ if (fIsStackOverflow || pParam->fForStackOverflow)
+ {
+ // Don't probe if we're already handling an SO. Just throw the exception.
+ RaiseException(code, flags, argCount, args);
+ }
+
+ // Probe for sufficient stack.
+ PUSH_STACK_PROBE_FOR_THROW(pParam->pThread);
+
+#ifndef STACK_GUARDS_DEBUG
+ // This needs to be both here and inside the handler below
+ // enable preemptive mode before call into OS
+ GCX_PREEMP_NO_DTOR();
+
+ // In non-debug, we can just raise the exception once we've probed.
+ RaiseException(code, flags, argCount, args);
+
+#else
+ // In a debug build, we need to unwind our probe structure off the stack.
+ BaseStackGuard *pThrowGuard = NULL;
+ // Stach away the address of the guard we just pushed above in PUSH_STACK_PROBE_FOR_THROW
+ SAVE_ADDRESS_OF_STACK_PROBE_FOR_THROW(pThrowGuard);
+
+ // Add the stack guard reference to the structure below so that it can be accessed within
+ // PAL_TRY as well
+ struct ParamInner
+ {
+ ULONG code;
+ ULONG flags;
+ ULONG argCount;
+ ULONG_PTR *args;
+ BaseStackGuard *pGuard;
+ } param;
+ param.code = code;
+ param.flags = flags;
+ param.argCount = argCount;
+ param.args = args;
+ param.pGuard = pThrowGuard;
+
+ PAL_TRY(ParamInner *, pParam, &param)
+ {
+ // enable preemptive mode before call into OS
+ GCX_PREEMP_NO_DTOR();
+
+ RaiseException(pParam->code, pParam->flags, pParam->argCount, pParam->args);
+
+ // We never return from RaiseException, so shouldn't have to call SetNoException.
+ // However, in the debugger we can, and if we don't call SetNoException we get
+ // a short-circuit return assert.
+ RESET_EXCEPTION_FROM_STACK_PROBE_FOR_THROW(pParam->pGuard);
+ }
+ PAL_FINALLY
+ {
+ // pop the guard that we pushed above in PUSH_STACK_PROBE_FOR_THROW
+ POP_STACK_PROBE_FOR_THROW(pThrowGuard);
+ }
+ PAL_ENDTRY
+#endif
+ }
+ PAL_EXCEPT_FILTER (RaiseExceptionFilter)
+ {
+ }
+ PAL_ENDTRY
+ _ASSERTE(!"Cannot continue after COM+ exception"); // Debugger can bring you here.
+ // For example,
+ // Debugger breaks in due to second chance exception (unhandled)
+ // User hits 'g'
+ // Then debugger can bring us here.
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
+}
+
+
+// INSTALL_COMPLUS_EXCEPTION_HANDLER has a filter, so must put the call in a separate fcn
+static VOID DECLSPEC_NORETURN RealCOMPlusThrowWorker(OBJECTREF throwable, BOOL rethrow
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , CorruptionSeverity severity
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+) {
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+
+ // RaiseTheException will throw C++ OOM and SO, so that our escalation policy can kick in.
+ // Unfortunately, COMPlusFrameHandler installed here, will try to create managed exception object.
+ // We may hit a recursion.
+
+ if (g_CLRPolicyRequested &&
+ throwable->GetMethodTable() == g_pOutOfMemoryExceptionClass)
+ {
+ // We depends on UNINSTALL_UNWIND_AND_CONTINUE_HANDLER to handle out of memory escalation.
+ // We should throw c++ exception instead.
+ ThrowOutOfMemory();
+ }
+#ifdef FEATURE_STACK_PROBE
+ else if (throwable == CLRException::GetPreallocatedStackOverflowException())
+ {
+ ThrowStackOverflow();
+ }
+#else
+ _ASSERTE(throwable != CLRException::GetPreallocatedStackOverflowException());
+#endif
+
+ // TODO: Do we need to install COMPlusFrameHandler here?
+ INSTALL_COMPLUS_EXCEPTION_HANDLER();
+ RaiseTheException(throwable, rethrow
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , severity
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ );
+ UNINSTALL_COMPLUS_EXCEPTION_HANDLER();
+}
+
+
+VOID DECLSPEC_NORETURN RealCOMPlusThrow(OBJECTREF throwable, BOOL rethrow
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , CorruptionSeverity severity
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+) {
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+ GCPROTECT_BEGIN(throwable);
+
+ _ASSERTE(IsException(throwable->GetMethodTable()));
+
+ // This may look a bit odd, but there is an explaination. The rethrow boolean
+ // means that an actual RaiseException(EXCEPTION_COMPLUS,...) is being re-thrown,
+ // and that the exception context saved on the Thread object should replace
+ // the exception context from the upcoming RaiseException(). There is logic
+ // in the stack trace code to preserve MOST of the stack trace, but to drop the
+ // last element of the stack trace (has to do with having the address of the rethrow
+ // instead of the address of the original call in the stack trace. That is
+ // controversial itself, but we won't get into that here.)
+ // However, if this is not re-raising that original exception, but rather a new
+ // os exception for what may be an existing exception object, it is generally
+ // a good thing to preserve the stack trace.
+ if (!rethrow)
+ {
+ ExceptionPreserveStackTrace(throwable);
+ }
+
+ RealCOMPlusThrowWorker(throwable, rethrow
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , severity
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ );
+
+ GCPROTECT_END();
+}
+
+VOID DECLSPEC_NORETURN RealCOMPlusThrow(OBJECTREF throwable
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , CorruptionSeverity severity
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ RealCOMPlusThrow(throwable, FALSE
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , severity
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ );
+}
+
+// this function finds the managed callback to get a resource
+// string from the then current local domain and calls it
+// this could be a lot of work
+STRINGREF GetResourceStringFromManaged(STRINGREF key)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(key != NULL);
+ }
+ CONTRACTL_END;
+
+ struct xx {
+ STRINGREF key;
+ STRINGREF ret;
+ } gc;
+
+ gc.key = key;
+ gc.ret = NULL;
+
+ // The standard probe isn't good enough here. It's possible that we only have ~14 pages of stack
+ // left. By the time we transition to the default domain and start fetching this resource string,
+ // another 12 page probe could fail.
+ // This failing probe would cause us to unload the default appdomain, which would cause us
+ // to take down the process.
+
+ // Instead, let's probe for a lots more stack to make sure that doesn' happen.
+
+ // We need to have enough stack to survive 2 more probes... the original entrypoint back
+ // into mscorwks after we go into managed code, and a "large" probe that protects the GC
+
+ INTERIOR_STACK_PROBE_FOR(GetThread(), DEFAULT_ENTRY_PROBE_AMOUNT * 2);
+ GCPROTECT_BEGIN(gc);
+
+ MethodDescCallSite getResourceStringLocal(METHOD__ENVIRONMENT__GET_RESOURCE_STRING_LOCAL);
+
+ // Call Environment::GetResourceStringLocal(String name). Returns String value (or maybe null)
+
+ ENTER_DOMAIN_PTR(SystemDomain::System()->DefaultDomain(),ADV_DEFAULTAD);
+
+ // Don't need to GCPROTECT pArgs, since it's not used after the function call.
+
+ ARG_SLOT pArgs[1] = { ObjToArgSlot(gc.key) };
+ gc.ret = getResourceStringLocal.Call_RetSTRINGREF(pArgs);
+
+ END_DOMAIN_TRANSITION;
+
+ GCPROTECT_END();
+
+ END_INTERIOR_STACK_PROBE;
+
+
+ return gc.ret;
+}
+
+// This function does poentially a LOT of work (loading possibly 50 classes).
+// The return value is an un-GC-protected string ref, or possibly NULL.
+void ResMgrGetString(LPCWSTR wszResourceName, STRINGREF * ppMessage)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(ppMessage != NULL);
+
+ if (wszResourceName == NULL || *wszResourceName == W('\0'))
+ {
+ ppMessage = NULL;
+ return;
+ }
+
+ // this function never looks at name again after
+ // calling the helper so no need to GCPROTECT it
+ STRINGREF name = StringObject::NewString(wszResourceName);
+
+ if (wszResourceName != NULL)
+ {
+ STRINGREF value = GetResourceStringFromManaged(name);
+
+ _ASSERTE(value!=NULL || !"Resource string lookup failed - possible misspelling or .resources missing or out of date?");
+ *ppMessage = value;
+ }
+}
+
+// GetResourceFromDefault
+// transition to the default domain and get a resource there
+FCIMPL1(Object*, GetResourceFromDefault, StringObject* keyUnsafe)
+{
+ FCALL_CONTRACT;
+
+ STRINGREF ret = NULL;
+ STRINGREF key = (STRINGREF)keyUnsafe;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_2(ret, key);
+
+ ret = GetResourceStringFromManaged(key);
+
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(ret);
+}
+FCIMPLEND
+
+void FreeExceptionData(ExceptionData *pedata)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pedata != NULL);
+
+ // <TODO>@NICE: At one point, we had the comment:
+ // (DM) Remove this when shutdown works better.</TODO>
+ // This test may no longer be necessary. Remove at own peril.
+ Thread *pThread = GetThread();
+ if (!pThread)
+ return;
+
+ if (pedata->bstrSource)
+ SysFreeString(pedata->bstrSource);
+ if (pedata->bstrDescription)
+ SysFreeString(pedata->bstrDescription);
+ if (pedata->bstrHelpFile)
+ SysFreeString(pedata->bstrHelpFile);
+#ifdef FEATURE_COMINTEROP
+ if (pedata->bstrRestrictedError)
+ SysFreeString(pedata->bstrRestrictedError);
+ if (pedata->bstrReference)
+ SysFreeString(pedata->bstrReference);
+ if (pedata->bstrCapabilitySid)
+ SysFreeString(pedata->bstrCapabilitySid);
+ if (pedata->pRestrictedErrorInfo)
+ {
+ ULONG cbRef = SafeRelease(pedata->pRestrictedErrorInfo);
+ LogInteropRelease(pedata->pRestrictedErrorInfo, cbRef, "IRestrictedErrorInfo");
+ }
+#endif // FEATURE_COMINTEROP
+}
+
+void GetExceptionForHR(HRESULT hr, IErrorInfo* pErrInfo, bool fUseCOMException, OBJECTREF* pProtectedThrowable, IRestrictedErrorInfo *pResErrorInfo, BOOL bHasLangRestrictedErrInfo)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(IsProtectedByGCFrame(pProtectedThrowable));
+ }
+ CONTRACTL_END;
+
+ // Initialize
+ *pProtectedThrowable = NULL;
+
+#if defined(FEATURE_COMINTEROP) && !defined(CROSSGEN_COMPILE)
+ if (pErrInfo != NULL)
+ {
+ // If this represents a managed object...
+ // ...then get the managed exception object and also check if it is a __ComObject...
+ if (IsManagedObject(pErrInfo))
+ {
+ GetObjectRefFromComIP(pProtectedThrowable, pErrInfo);
+ if ((*pProtectedThrowable) != NULL)
+ {
+ // ...if it is, then we'll just default to an exception based on the IErrorInfo.
+ if ((*pProtectedThrowable)->GetMethodTable()->IsComObjectType())
+ {
+ (*pProtectedThrowable) = NULL;
+ }
+ else
+ {
+ // We have created an exception. Release the IErrorInfo
+ ULONG cbRef = SafeRelease(pErrInfo);
+ LogInteropRelease(pErrInfo, cbRef, "IErrorInfo release");
+ return;
+ }
+ }
+ }
+
+ // If we got here and we don't have an exception object, we have a native IErrorInfo or
+ // a managed __ComObject based IErrorInfo, so we'll just create an exception based on
+ // the native IErrorInfo.
+ if ((*pProtectedThrowable) == NULL)
+ {
+ EECOMException ex(hr, pErrInfo, fUseCOMException, pResErrorInfo, bHasLangRestrictedErrInfo COMMA_INDEBUG(FALSE));
+ (*pProtectedThrowable) = ex.GetThrowable();
+ }
+ }
+#endif // defined(FEATURE_COMINTEROP) && !defined(CROSSGEN_COMPILE)
+
+ // If we made it here and we don't have an exception object, we didn't have a valid IErrorInfo
+ // so we'll create an exception based solely on the hresult.
+ if ((*pProtectedThrowable) == NULL)
+ {
+ EEMessageException ex(hr, fUseCOMException);
+ (*pProtectedThrowable) = ex.GetThrowable();
+ }
+}
+
+void GetExceptionForHR(HRESULT hr, IErrorInfo* pErrInfo, OBJECTREF* pProtectedThrowable)
+{
+ WRAPPER_NO_CONTRACT;
+
+ GetExceptionForHR(hr, pErrInfo, true, pProtectedThrowable);
+}
+
+void GetExceptionForHR(HRESULT hr, OBJECTREF* pProtectedThrowable)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS; // because of IErrorInfo
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Get an IErrorInfo if one is available.
+ IErrorInfo *pErrInfo = NULL;
+#ifndef CROSSGEN_COMPILE
+ if (SafeGetErrorInfo(&pErrInfo) != S_OK)
+ pErrInfo = NULL;
+#endif
+
+ GetExceptionForHR(hr, pErrInfo, true, pProtectedThrowable);
+}
+
+
+//
+// Maps a Win32 fault to a COM+ Exception enumeration code
+//
+DWORD MapWin32FaultToCOMPlusException(EXCEPTION_RECORD *pExceptionRecord)
+{
+ WRAPPER_NO_CONTRACT;
+
+ switch (pExceptionRecord->ExceptionCode)
+ {
+ case STATUS_FLOAT_INEXACT_RESULT:
+ case STATUS_FLOAT_INVALID_OPERATION:
+ case STATUS_FLOAT_STACK_CHECK:
+ case STATUS_FLOAT_UNDERFLOW:
+ return (DWORD) kArithmeticException;
+ case STATUS_FLOAT_OVERFLOW:
+ case STATUS_INTEGER_OVERFLOW:
+ return (DWORD) kOverflowException;
+
+ case STATUS_FLOAT_DIVIDE_BY_ZERO:
+ case STATUS_INTEGER_DIVIDE_BY_ZERO:
+ return (DWORD) kDivideByZeroException;
+
+ case STATUS_FLOAT_DENORMAL_OPERAND:
+ return (DWORD) kFormatException;
+
+ case STATUS_ACCESS_VIOLATION:
+ {
+ // We have a config key, InsecurelyTreatAVsAsNullReference, that ensures we always translate to
+ // NullReferenceException instead of doing the new AV translation logic.
+ if ((g_pConfig != NULL) && !g_pConfig->LegacyNullReferenceExceptionPolicy() &&
+ !GetCompatibilityFlag(compatNullReferenceExceptionOnAV) )
+ {
+#ifdef FEATURE_HIJACK
+ // If we got the exception on a redirect function it means the original exception happened in managed code:
+ if (Thread::IsAddrOfRedirectFunc(pExceptionRecord->ExceptionAddress))
+ return (DWORD) kNullReferenceException;
+
+ if (pExceptionRecord->ExceptionAddress == (LPVOID)GetEEFuncEntryPoint(THROW_CONTROL_FOR_THREAD_FUNCTION))
+ {
+ return (DWORD) kNullReferenceException;
+ }
+#endif // FEATURE_HIJACK
+
+ // If the IP of the AV is not in managed code, then its an AccessViolationException.
+ if (!ExecutionManager::IsManagedCode((PCODE)pExceptionRecord->ExceptionAddress))
+ {
+ return (DWORD) kAccessViolationException;
+ }
+
+ // If the address accessed is above 64k (Windows) or page size (PAL), then its an AccessViolationException.
+ // Note: Win9x is a little different... it never gives you the proper address of the read or write that caused
+ // the fault. It always gives -1, so we can't use it as part of the decision... just give
+ // NullReferenceException instead.
+ if (pExceptionRecord->ExceptionInformation[1] >= NULL_AREA_SIZE)
+ {
+ return (DWORD) kAccessViolationException;
+ }
+ }
+
+ return (DWORD) kNullReferenceException;
+ }
+
+ case STATUS_ARRAY_BOUNDS_EXCEEDED:
+ return (DWORD) kIndexOutOfRangeException;
+
+ case STATUS_NO_MEMORY:
+ return (DWORD) kOutOfMemoryException;
+
+ case STATUS_STACK_OVERFLOW:
+ return (DWORD) kStackOverflowException;
+
+#ifdef ALIGN_ACCESS
+ case STATUS_DATATYPE_MISALIGNMENT:
+ return (DWORD) kDataMisalignedException;
+#endif // ALIGN_ACCESS
+
+ default:
+ return kSEHException;
+ }
+}
+
+#ifdef _DEBUG
+#ifndef WIN64EXCEPTIONS
+// check if anyone has written to the stack above the handler which would wipe out the EH registration
+void CheckStackBarrier(EXCEPTION_REGISTRATION_RECORD *exRecord)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (exRecord->Handler != (PEXCEPTION_ROUTINE)COMPlusFrameHandler)
+ return;
+
+ DWORD *stackOverwriteBarrier = (DWORD *)((BYTE*)exRecord - offsetof(FrameHandlerExRecordWithBarrier, m_ExRecord));
+ for (int i =0; i < STACK_OVERWRITE_BARRIER_SIZE; i++) {
+ if (*(stackOverwriteBarrier+i) != STACK_OVERWRITE_BARRIER_VALUE) {
+ // to debug this error, you must determine who erroneously overwrote the stack
+ _ASSERTE(!"Fatal error: the stack has been overwritten");
+ }
+ }
+}
+#endif // WIN64EXCEPTIONS
+#endif // _DEBUG
+
+//-------------------------------------------------------------------------
+// A marker for JIT -> EE transition when we know we're in preemptive
+// gc mode. As we leave the EE, we fix a few things:
+//
+// - the gc state must be set back to preemptive-operative
+// - the COM+ frame chain must be rewound to what it was on entry
+// - ExInfo()->m_pSearchBoundary must be adjusted
+// if we popped the frame that is identified as begnning the next
+// crawl.
+//-------------------------------------------------------------------------
+
+void COMPlusCooperativeTransitionHandler(Frame* pFrame)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_EH, LL_INFO1000, "COMPlusCooprativeTransitionHandler unwinding\n"));
+
+ {
+ Thread* pThread = GetThread();
+
+ // Restore us to cooperative gc mode.
+ GCX_COOP();
+
+ // Pop the frame chain.
+ UnwindFrameChain(pThread, pFrame);
+ CONSISTENCY_CHECK(pFrame == pThread->GetFrame());
+
+#ifndef WIN64EXCEPTIONS
+ // An exception is being thrown through here. The COM+ exception
+ // info keeps a pointer to a frame that is used by the next
+ // COM+ Exception Handler as the starting point of its crawl.
+ // We may have popped this marker -- in which case, we need to
+ // update it to the current frame.
+ //
+ ThreadExceptionState* pExState = pThread->GetExceptionState();
+ Frame* pSearchBoundary = NULL;
+
+ if (pThread->IsExceptionInProgress())
+ {
+ pSearchBoundary = pExState->m_currentExInfo.m_pSearchBoundary;
+ }
+
+ if (pSearchBoundary && pSearchBoundary < pFrame)
+ {
+ LOG((LF_EH, LL_INFO1000, "\tpExInfo->m_pSearchBoundary = %08x\n", (void*)pFrame));
+ pExState->m_currentExInfo.m_pSearchBoundary = pFrame;
+ }
+#endif // WIN64EXCEPTIONS
+}
+
+ // Restore us to preemptive gc mode.
+ GCX_PREEMP_NO_DTOR();
+}
+
+
+
+void StackTraceInfo::Init()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_EH, LL_INFO10000, "StackTraceInfo::Init (%p)\n", this));
+
+ m_pStackTrace = NULL;
+ m_cStackTrace = 0;
+ m_dFrameCount = 0;
+ m_cDynamicMethodItems = 0;
+ m_dCurrentDynamicIndex = 0;
+}
+
+void StackTraceInfo::FreeStackTrace()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (m_pStackTrace)
+ {
+ delete [] m_pStackTrace;
+ m_pStackTrace = NULL;
+ m_cStackTrace = 0;
+ m_dFrameCount = 0;
+ m_cDynamicMethodItems = 0;
+ m_dCurrentDynamicIndex = 0;
+ }
+}
+
+BOOL StackTraceInfo::IsEmpty()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return 0 == m_dFrameCount;
+}
+
+void StackTraceInfo::ClearStackTrace()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ LOG((LF_EH, LL_INFO1000, "StackTraceInfo::ClearStackTrace (%p)\n", this));
+ m_dFrameCount = 0;
+}
+
+// allocate stack trace info. As each function is found in the stack crawl, it will be added
+// to this list. If the list is too small, it is reallocated.
+void StackTraceInfo::AllocateStackTrace()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ LOG((LF_EH, LL_INFO1000, "StackTraceInfo::AllocateStackTrace (%p)\n", this));
+
+ if (!m_pStackTrace)
+ {
+#ifdef _DEBUG
+ unsigned int allocSize = 2; // make small to exercise realloc
+#else
+ unsigned int allocSize = 30;
+#endif
+
+ SCAN_IGNORE_FAULT; // A fault of new is okay here. The rest of the system is cool if we don't have enough
+ // memory to remember the stack as we run our first pass.
+ m_pStackTrace = new (nothrow) StackTraceElement[allocSize];
+
+ if (m_pStackTrace != NULL)
+ {
+ // Remember how much we allocated.
+ m_cStackTrace = allocSize;
+ m_cDynamicMethodItems = allocSize;
+ }
+ else
+ {
+ m_cStackTrace = 0;
+ m_cDynamicMethodItems = 0;
+ }
+ }
+}
+
+//
+// Returns true if it appended the element, false otherwise.
+//
+BOOL StackTraceInfo::AppendElement(BOOL bAllowAllocMem, UINT_PTR currentIP, UINT_PTR currentSP, MethodDesc* pFunc, CrawlFrame* pCf)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ NOTHROW;
+ }
+ CONTRACTL_END
+
+ LOG((LF_EH, LL_INFO10000, "StackTraceInfo::AppendElement (%p), IP = %p, SP = %p, %s::%s\n", this, currentIP, currentSP, pFunc ? pFunc->m_pszDebugClassName : "", pFunc ? pFunc->m_pszDebugMethodName : "" ));
+ BOOL bRetVal = FALSE;
+
+ if (pFunc != NULL && pFunc->IsILStub())
+ return FALSE;
+
+ // Save this function in the stack trace array, which we only build on the first pass. We'll try to expand the
+ // stack trace array if we don't have enough room. Note that we only try to expand if we're allowed to allocate
+ // memory (bAllowAllocMem).
+ if (bAllowAllocMem && (m_dFrameCount >= m_cStackTrace))
+ {
+ StackTraceElement* pTempElement = new (nothrow) StackTraceElement[m_cStackTrace*2];
+
+ if (pTempElement != NULL)
+ {
+ memcpy(pTempElement, m_pStackTrace, m_cStackTrace * sizeof(StackTraceElement));
+ delete [] m_pStackTrace;
+ m_pStackTrace = pTempElement;
+ m_cStackTrace *= 2;
+ }
+ }
+
+ // Add the function to the stack trace array if there's room.
+ if (m_dFrameCount < m_cStackTrace)
+ {
+ StackTraceElement* pStackTraceElem;
+
+ // If we get in here, we'd better have a stack trace array.
+ CONSISTENCY_CHECK(m_pStackTrace != NULL);
+
+ pStackTraceElem = &(m_pStackTrace[m_dFrameCount]);
+
+ pStackTraceElem->pFunc = pFunc;
+
+ pStackTraceElem->ip = currentIP;
+ pStackTraceElem->sp = currentSP;
+
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ // When we are building stack trace as we encounter managed frames during exception dispatch,
+ // then none of those frames represent a stack trace from a foreign exception (as they represent
+ // the current exception). Hence, set the corresponding flag to FALSE.
+ pStackTraceElem->fIsLastFrameFromForeignStackTrace = FALSE;
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+
+ // This is a workaround to fix the generation of stack traces from exception objects so that
+ // they point to the line that actually generated the exception instead of the line
+ // following.
+ if (!(pCf->HasFaulted() || pCf->IsIPadjusted()) && pStackTraceElem->ip != 0)
+ {
+ pStackTraceElem->ip -= 1;
+ }
+
+ ++m_dFrameCount;
+ bRetVal = TRUE;
+ COUNTER_ONLY(GetPerfCounters().m_Excep.cThrowToCatchStackDepth++);
+ }
+
+#ifndef FEATURE_PAL // Watson is supported on Windows only
+ Thread *pThread = GetThread();
+ _ASSERTE(pThread);
+
+ if (pThread && (currentIP != 0))
+ {
+ // Setup the watson bucketing details for the initial throw
+ // callback only if we dont already have them.
+ ThreadExceptionState *pExState = pThread->GetExceptionState();
+ if (!pExState->GetFlags()->GotWatsonBucketDetails())
+ {
+ // Adjust the IP if necessary.
+ UINT_PTR adjustedIp = currentIP;
+ // This is a workaround copied from above.
+ if (!(pCf->HasFaulted() || pCf->IsIPadjusted()) && adjustedIp != 0)
+ {
+ adjustedIp -= 1;
+ }
+
+ // Setup the bucketing details for the initial throw
+ SetupInitialThrowBucketDetails(adjustedIp);
+ }
+ }
+#endif // !FEATURE_PAL
+
+ return bRetVal;
+}
+
+void StackTraceInfo::GetLeafFrameInfo(StackTraceElement* pStackTraceElement)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (NULL == m_pStackTrace)
+ {
+ return;
+ }
+ _ASSERTE(NULL != pStackTraceElement);
+
+ *pStackTraceElement = m_pStackTrace[0];
+}
+
+
+void UnwindFrameChain(Thread* pThread, LPVOID pvLimitSP)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ DISABLED(GC_TRIGGERS); // some Frames' ExceptionUnwind methods trigger :(
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // @todo - Remove this and add a hard SO probe as can't throw from here.
+ CONTRACT_VIOLATION(SOToleranceViolation);
+
+ Frame* pFrame = pThread->m_pFrame;
+ if (pFrame < pvLimitSP)
+ {
+ GCX_COOP_THREAD_EXISTS(pThread);
+
+ //
+ // call ExceptionUnwind with the Frame chain intact
+ //
+ pFrame = pThread->NotifyFrameChainOfExceptionUnwind(pFrame, pvLimitSP);
+
+ //
+ // now pop the frames off by trimming the Frame chain
+ //
+ pThread->SetFrame(pFrame);
+ }
+}
+
+BOOL IsExceptionOfType(RuntimeExceptionKind reKind, Exception *pException)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ if (pException->IsType(reKind))
+ return TRUE;
+
+ if (pException->IsType(CLRException::GetType()))
+ {
+ // Since we're going to be holding onto the Throwable object we
+ // need to be in COOPERATIVE.
+ GCX_COOP();
+
+ OBJECTREF Throwable=((CLRException*)pException)->GetThrowable();
+
+ GCX_FORBID();
+ if (IsExceptionOfType(reKind, &Throwable))
+ return TRUE;
+ }
+ return FALSE;
+}
+
+BOOL IsExceptionOfType(RuntimeExceptionKind reKind, OBJECTREF *pThrowable)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ _ASSERTE(pThrowable != NULL);
+
+ if (*pThrowable == NULL)
+ return FALSE;
+
+ MethodTable *pThrowableMT = (*pThrowable)->GetTrueMethodTable();
+
+ // IsExceptionOfType is supported for mscorlib exception types only
+ _ASSERTE(reKind <= kLastExceptionInMscorlib);
+ return MscorlibBinder::IsException(pThrowableMT, reKind);
+}
+
+BOOL IsAsyncThreadException(OBJECTREF *pThrowable) {
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ if ( (GetThread() && GetThread()->IsRudeAbort() && GetThread()->IsRudeAbortInitiated())
+ ||IsExceptionOfType(kThreadAbortException, pThrowable)
+ ||IsExceptionOfType(kThreadInterruptedException, pThrowable)) {
+ return TRUE;
+ } else {
+ return FALSE;
+ }
+}
+
+BOOL IsUncatchable(OBJECTREF *pThrowable)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ FORBID_FAULT;
+ } CONTRACTL_END;
+
+ _ASSERTE(pThrowable != NULL);
+
+ Thread *pThread = GetThread();
+
+ if (pThread)
+ {
+ if (pThread->IsAbortInitiated())
+ return TRUE;
+
+ if (OBJECTREFToObject(*pThrowable)->GetMethodTable() == g_pExecutionEngineExceptionClass)
+ return TRUE;
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // Corrupting exceptions are also uncatchable
+ if (CEHelper::IsProcessCorruptedStateException(*pThrowable))
+ {
+ return TRUE;
+ }
+#endif //FEATURE_CORRUPTING_EXCEPTIONS
+ }
+
+ return FALSE;
+}
+
+BOOL IsStackOverflowException(Thread* pThread, EXCEPTION_RECORD* pExceptionRecord)
+{
+ if (IsSOExceptionCode(pExceptionRecord->ExceptionCode))
+ {
+ return true;
+ }
+
+ if (IsComPlusException(pExceptionRecord) &&
+ pThread->IsLastThrownObjectStackOverflowException())
+ {
+ return true;
+ }
+
+ return false;
+}
+
+
+#ifdef _DEBUG
+BOOL IsValidClause(EE_ILEXCEPTION_CLAUSE *EHClause)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#if 0
+ DWORD valid = COR_ILEXCEPTION_CLAUSE_FILTER | COR_ILEXCEPTION_CLAUSE_FINALLY |
+ COR_ILEXCEPTION_CLAUSE_FAULT | COR_ILEXCEPTION_CLAUSE_CACHED_CLASS;
+
+ // <TODO>@NICE: enable this when VC stops generatng a bogus 0x8000.</TODO>
+ if (EHClause->Flags & ~valid)
+ return FALSE;
+#endif
+ if (EHClause->TryStartPC > EHClause->TryEndPC)
+ return FALSE;
+ return TRUE;
+}
+#endif
+
+
+#ifdef DEBUGGING_SUPPORTED
+LONG NotifyDebuggerLastChance(Thread *pThread,
+ EXCEPTION_POINTERS *pExceptionInfo,
+ BOOL jitAttachRequested)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+
+ LONG retval = EXCEPTION_CONTINUE_SEARCH;
+
+ // Debugger does func-evals inside this call, which may take nested exceptions. We need a nested exception
+ // handler to allow this.
+ INSTALL_NESTED_EXCEPTION_HANDLER(pThread->GetFrame());
+
+ EXCEPTION_POINTERS dummy;
+ dummy.ExceptionRecord = NULL;
+ dummy.ContextRecord = NULL;
+
+ if (NULL == pExceptionInfo)
+ {
+ pExceptionInfo = &dummy;
+ }
+ else if (NULL != pExceptionInfo->ExceptionRecord && NULL == pExceptionInfo->ContextRecord)
+ {
+ // In a soft stack overflow, we have an exception record but not a context record.
+ // Debugger::LastChanceManagedException requires that both ExceptionRecord and
+ // ContextRecord be valid or both be NULL.
+ pExceptionInfo = &dummy;
+ }
+
+ if (g_pDebugInterface && g_pDebugInterface->LastChanceManagedException(pExceptionInfo,
+ pThread,
+ jitAttachRequested) == ExceptionContinueExecution)
+ {
+ retval = EXCEPTION_CONTINUE_EXECUTION;
+ }
+
+ UNINSTALL_NESTED_EXCEPTION_HANDLER();
+
+ EX_TRY
+ {
+ // if the debugger wants to intercept the unhandled exception then we immediately unwind without returning
+ // If there is a problem with this function unwinding here it could be separated out however
+ // we need to be very careful. Previously we had the opposite problem in that we notified the debugger
+ // of an unhandled exception and then either:
+ // a) never gave the debugger a chance to intercept later, or
+ // b) code changed more process state unaware that the debugger would be handling the exception
+ if ((pThread->IsExceptionInProgress()) && pThread->GetExceptionState()->GetFlags()->DebuggerInterceptInfo())
+ {
+ // The debugger wants to intercept this exception. It may return in a failure case, in which case we want
+ // to continue thru this path.
+ ClrDebuggerDoUnwindAndIntercept(X86_FIRST_ARG(EXCEPTION_CHAIN_END) pExceptionInfo->ExceptionRecord);
+ }
+ }
+ EX_CATCH // if we fail to intercept just continue as is
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return retval;
+}
+
+#ifndef FEATURE_PAL
+//----------------------------------------------------------------------------
+//
+// DoReportFault - wrapper for ReportFault in FaultRep.dll, which also handles
+// debugger launch synchronization if the user chooses to launch
+// a debugger
+//
+// Arguments:
+// pExceptionInfo - pointer to exception info
+//
+// Return Value:
+// The returned EFaultRepRetVal value from ReportFault
+//
+// Note:
+//
+//----------------------------------------------------------------------------
+EFaultRepRetVal DoReportFault(EXCEPTION_POINTERS * pExceptionInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ HINSTANCE hmod = WszLoadLibrary(W("FaultRep.dll"));
+ EFaultRepRetVal r = frrvErr;
+ if (hmod)
+ {
+ pfn_REPORTFAULT pfnReportFault = (pfn_REPORTFAULT)GetProcAddress(hmod, "ReportFault");
+ if (pfnReportFault)
+ {
+ r = pfnReportFault(pExceptionInfo, 0);
+ }
+ FreeLibrary(hmod);
+ }
+
+ if (r == frrvLaunchDebugger)
+ {
+ // Wait until the pending managed debugger attach is completed
+ if (g_pDebugInterface != NULL)
+ {
+ g_pDebugInterface->WaitForDebuggerAttach();
+ }
+ }
+ return r;
+}
+
+//----------------------------------------------------------------------------
+//
+// DisableOSWatson - Set error mode to disable OS Watson
+//
+// Arguments:
+// None
+//
+// Return Value:
+// None
+//
+// Note: SetErrorMode changes the process wide error mode, which can be overridden by other threads
+// in a race. The solution is to use new Win7 per thread error mode APIs, which take precedence
+// over process wide error mode. However, we shall not use per thread error mode if the runtime
+// is being hosted because with per thread error mode being used the OS will ignore the process
+// wide error mode set by the host.
+//
+//----------------------------------------------------------------------------
+void DisableOSWatson(void)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // When a debugger is attached (or will be attaching), we need to disable the OS GPF dialog.
+ // If we don't, an unhandled managed exception will launch the OS watson dialog even when
+ // the debugger is attached.
+ const UINT lastErrorMode = SetErrorMode(0);
+ SetErrorMode(lastErrorMode | SEM_NOGPFAULTERRORBOX);
+ LOG((LF_EH, LL_INFO100, "DisableOSWatson: SetErrorMode = 0x%x\n", lastErrorMode | SEM_NOGPFAULTERRORBOX));
+
+#ifndef FEATURE_CORECLR
+ // CoreCLR is always hosted and so this condition is always false
+ if (RunningOnWin7() && !CLRHosted())
+ {
+ typedef DWORD (WINAPI * GetThreadErrorModeFnPtr)(void);
+ typedef BOOL (WINAPI * SetThreadErrorModeFnPtr)(DWORD, LPDWORD);
+ GetThreadErrorModeFnPtr pFnGetThreadErrorMode;
+ SetThreadErrorModeFnPtr pFnSetThreadErrorMode;
+
+ HINSTANCE hKernel32 = WszGetModuleHandle(WINDOWS_KERNEL32_DLLNAME_W);
+ if (hKernel32 != NULL)
+ {
+ pFnGetThreadErrorMode = (GetThreadErrorModeFnPtr)GetProcAddress(hKernel32, "GetThreadErrorMode");
+ pFnSetThreadErrorMode = (SetThreadErrorModeFnPtr)GetProcAddress(hKernel32, "SetThreadErrorMode");
+
+ // GetThreadErrorMode and SetThreadErrorMode should be available on Win7.
+ _ASSERTE((pFnGetThreadErrorMode != NULL) && (pFnSetThreadErrorMode != NULL));
+ if ((pFnGetThreadErrorMode != NULL) && (pFnSetThreadErrorMode != NULL))
+ {
+ DWORD dwOldMode = (*pFnGetThreadErrorMode)();
+ (*pFnSetThreadErrorMode)(dwOldMode | SEM_NOGPFAULTERRORBOX, &dwOldMode);
+ LOG((LF_EH, LL_INFO100, "DisableOSWatson: SetThreadErrorMode = 0x%x\n", dwOldMode | SEM_NOGPFAULTERRORBOX));
+ }
+ }
+ }
+#endif // FEATURE_CORECLR
+}
+
+
+//----------------------------------------------------------------------------
+//
+// RaiseFailFastExceptionOnWin7 - invoke RaiseFailFastException on Win7
+//
+// Arguments:
+// pExceptionRecord - pointer to exception record
+// pContext - pointer to exception context
+//
+// Return Value:
+// None
+//
+// Note:
+// RaiseFailFastException will not return unless a debugger is attached
+// and the user chooses to keep going.
+//
+//----------------------------------------------------------------------------
+void RaiseFailFastExceptionOnWin7(PEXCEPTION_RECORD pExceptionRecord, PCONTEXT pContext)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(RunningOnWin7());
+
+#ifndef FEATURE_CORESYSTEM
+ typedef void (WINAPI * RaiseFailFastExceptionFnPtr)(PEXCEPTION_RECORD, PCONTEXT, DWORD);
+ RaiseFailFastExceptionFnPtr RaiseFailFastException;
+
+ HINSTANCE hKernel32 = WszGetModuleHandle(WINDOWS_KERNEL32_DLLNAME_W);
+ if (hKernel32 == NULL)
+ return;
+
+ RaiseFailFastException = (RaiseFailFastExceptionFnPtr)GetProcAddress(hKernel32, "RaiseFailFastException");
+ if (RaiseFailFastException == NULL)
+ return;
+#endif
+
+ // enable preemptive mode before call into OS to allow runtime suspend to finish
+ GCX_PREEMP();
+
+ STRESS_LOG0(LF_CORDB,LL_INFO10, "D::RFFE: About to call RaiseFailFastException\n");
+ RaiseFailFastException(pExceptionRecord, pContext, 0);
+ STRESS_LOG0(LF_CORDB,LL_INFO10, "D::RFFE: Return from RaiseFailFastException\n");
+}
+#endif // !FEATURE_PAL
+
+//------------------------------------------------------------------------------
+// This function is called on an unhandled exception, via the runtime's
+// Unhandled Exception Filter (Hence the name, "last chance", because this
+// is the last chance to see the exception. When running under a native
+// debugger, that won't generally happen, because the OS notifies the debugger
+// instead of calling the application's registered UEF; the debugger will
+// show the exception as second chance.)
+// The function is also called sometimes for the side effects, which are
+// to possibly invoke Watson and to possibly notify the managed debugger.
+// If running in a debugger already, either native or managed, we shouldn't
+// invoke Watson.
+// If not running under a managed debugger, we shouldn't try to send a debugger
+// notification.
+//------------------------------------------------------------------------------
+LONG WatsonLastChance( // EXCEPTION_CONTINUE_SEARCH, _CONTINUE_EXECUTION
+ Thread *pThread, // Thread object.
+ EXCEPTION_POINTERS *pExceptionInfo,// Information about reported exception.
+ TypeOfReportedError tore) // Just what kind of error is reported?
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+
+ // If allocation fails, we may not produce watson dump. But this is not fatal.
+ CONTRACT_VIOLATION(AllViolation);
+ LOG((LF_EH, LL_INFO10, "D::WLC: Enter WatsonLastChance\n"));
+
+#ifndef FEATURE_PAL
+ static DWORD fDisableWatson = -1;
+ if (fDisableWatson == -1)
+ {
+ fDisableWatson = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DisableWatsonForManagedExceptions);
+ }
+
+ if (fDisableWatson && (tore.GetType() == TypeOfReportedError::UnhandledException))
+ {
+ DisableOSWatson();
+ LOG((LF_EH, LL_INFO10, "D::WLC: OS Watson is disabled for an managed unhandled exception\n"));
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+#endif // !FEATURE_PAL
+
+ // We don't want to launch Watson if a debugger is already attached to
+ // the process.
+ BOOL shouldNotifyDebugger = FALSE; // Assume we won't debug.
+
+ // VS debugger team requested the Whidbey experience, which is no Watson when the debugger thread detects
+ // that the debugger process is abruptly terminated, and triggers a failfast error. In this particular
+ // scenario CORDebuggerAttached() will be TRUE, but IsDebuggerPresent() will be FALSE because from OS
+ // perspective the native debugger has been detached from the debuggee, but CLR has not yet marked the
+ // managed debugger as detached. Therefore, CORDebuggerAttached() is checked, so Watson will not pop up
+ // when a debugger is abruptly terminated. It also prevents a debugger from being launched on a helper
+ // thread.
+ BOOL alreadyDebugging = CORDebuggerAttached() || IsDebuggerPresent();
+
+ BOOL jitAttachRequested = !alreadyDebugging; // Launch debugger if not already running.
+
+#ifdef _DEBUG
+ // If BreakOnUnCaughtException is set, we may be using a native debugger to debug this stuff
+ BOOL BreakOnUnCaughtException = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnUncaughtException);
+ if(!alreadyDebugging || (!CORDebuggerAttached() && BreakOnUnCaughtException) )
+#else
+ if (!alreadyDebugging)
+#endif
+ {
+ LOG((LF_EH, LL_INFO10, "WatsonLastChance: Debugger not attached at sp %p ...\n", GetCurrentSP()));
+
+#ifndef FEATURE_PAL
+
+ BOOL bRunDoFaultReport = TRUE;
+ FaultReportResult result = FaultReportResultQuit;
+
+ if (RunningOnWin7())
+ {
+ BOOL fSOException = FALSE;
+
+ if ((pExceptionInfo != NULL) &&
+ (pExceptionInfo->ExceptionRecord != NULL) &&
+ (pExceptionInfo->ExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW))
+ {
+ fSOException = TRUE;
+ }
+
+ if (g_pDebugInterface)
+ {
+ // we are about to let the OS trigger jit attach, however we need to synchronize with our
+ // own jit attach that we might be doing on another thread
+ // PreJitAttach races this thread against any others which might be attaching and if some other
+ // thread is doing it then we wait for its attach to complete first
+ g_pDebugInterface->PreJitAttach(TRUE, FALSE, FALSE);
+ }
+
+ // Let unhandled excpetions except stack overflow go to the OS
+ if (tore.IsUnhandledException() && !fSOException)
+ {
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+ else if (tore.IsUserBreakpoint())
+ {
+ DoReportFault(pExceptionInfo);
+ }
+ else
+ {
+ BOOL fWatsonAlreadyLaunched = FALSE;
+ if (FastInterlockCompareExchange(&g_watsonAlreadyLaunched, 1, 0) != 0)
+ {
+ fWatsonAlreadyLaunched = TRUE;
+ }
+
+ // Logic to avoid double prompt if more than one threads calling into WatsonLastChance
+ if (!fWatsonAlreadyLaunched)
+ {
+ // EEPolicy::HandleFatalStackOverflow pushes a FaultingExceptionFrame on the stack after SO
+ // exception. Our hijack code runs in the exception context, and overwrites the stack space
+ // after SO excpetion, so we need to pop up this frame before invoking RaiseFailFast.
+ // This cumbersome code should be removed once SO synchronization is moved to be completely
+ // out-of-process.
+ if (fSOException && pThread && pThread->GetFrame() != FRAME_TOP)
+ {
+ GCX_COOP(); // Must be cooperative to modify frame chain.
+ pThread->GetFrame()->Pop(pThread);
+ }
+
+ LOG((LF_EH, LL_INFO10, "D::WLC: Call RaiseFailFastExceptionOnWin7\n"));
+ RaiseFailFastExceptionOnWin7(pExceptionInfo == NULL ? NULL : pExceptionInfo->ExceptionRecord,
+ pExceptionInfo == NULL ? NULL : pExceptionInfo->ContextRecord);
+ STRESS_LOG0(LF_CORDB,LL_INFO10, "D::WLC: Return from RaiseFailFastExceptionOnWin7\n");
+ }
+ }
+
+ if (g_pDebugInterface)
+ {
+ // if execution resumed here then we may or may not be attached
+ // either way we need to end the attach process and unblock any other
+ // threads which were waiting for the attach here to complete
+ g_pDebugInterface->PostJitAttach();
+ }
+
+
+ if (IsDebuggerPresent())
+ {
+ result = FaultReportResultDebug;
+ jitAttachRequested = FALSE;
+ }
+ }
+ else
+ {
+ // If we've got a fatal error but Watson isn't enabled, then fall back to old-style non-managed-aware
+ // error reporting using faultrep to try and ensure we get an error report about this fatal error.
+ if (!IsWatsonEnabled() && tore.IsFatalError() && (pExceptionInfo != NULL))
+ {
+ EFaultRepRetVal r = DoReportFault(pExceptionInfo);
+ if (r != frrvErr && r != frrvErrNoDW && r != frrvErrTimeout)
+ {
+ // Once native Watson is sucessfully launched, we should not try to launch
+ // our fake Watson dailog box.
+ bRunDoFaultReport = FALSE;
+ }
+ }
+
+ if (bRunDoFaultReport)
+ {
+ // http://devdiv/sites/docs/NetFX4/CLR/Specs/Developer%20Services/Error%20Reporting/WER%20SxS%20DCR.doc
+ //
+ // Watson SxS support for Desktop CLR
+ //
+ // For an unhandled exception thrown from native code, the first runtime that encounters the
+ // unhandled native exception will report Watson if it is allowed by Watson SxS manager to do
+ // Watson. If more than one runtimes attempt to report Watson concurrently, only one runtims
+ // will be bestowed to report Watson. The result is that at most one Watson report will be
+ // submitted for a process.
+ //
+ // To coordinate Watson reporting among runtimes in a process, Watson SxS manager, which is part
+ // of the shim, will provide a new set of APIs, and keeps a status of whether a Watson report
+ // has been submitted for a process.
+ //
+ // Each runtime registers an exception claiming callack with Watson SxS manager at startup.
+ // Watson SxS manager provide an exception claiming API, which iterators through registerd
+ // exception claiming callbacks to determine if an exception is thrown by one of registered
+ // runtimes.
+ //
+ // Before a runtime goes to process Watson for an unhandled exception, it first asks Waston SxS
+ // manager if a Watson report has already been submitted for the current process. If so, it
+ // will not try to do Watson. If not, it checks if the unhandled exception is thrown by itself.
+ // If true, it will report Watson only when Watson SxS manager allows it to do Watson.
+ //
+ // If the unhandled exception is not thrown by itself, it will invoke Watson SxS manager's exception
+ // claiming API to determine if the unhandled exception was thrown by another runtime which is
+ // responsible for reporting Watson. If true, it will not try to do Watson. If none of runtimes
+ // in the process claims the ownership of the unhandled exception, it will report Watson only when
+ // Watson SxS manager allows it to do Watson.
+ result = DoFaultReport(pExceptionInfo, tore);
+
+ // Set the event to indicate that Watson processing is completed. Other threads can continue.
+#if defined(FEATURE_UEF_CHAINMANAGER)
+ if (g_pUEFManager)
+ {
+ g_pUEFManager->GetWastonSxSManagerInstance()->SignalWatsonSxSCompletionEvent();
+ }
+#else
+ UnsafeSetEvent(g_hWatsonCompletionEvent);
+#endif // FEATURE_UEF_CHAINMANAGER
+ }
+ }
+
+ switch(result)
+ {
+ case FaultReportResultAbort:
+ {
+ // We couldn't launch watson properly. First fall-back to OS error-reporting
+ // so that we don't break native apps.
+ EFaultRepRetVal r = frrvErr;
+
+ if (pExceptionInfo != NULL)
+ {
+ GCX_PREEMP();
+
+ if (pExceptionInfo->ExceptionRecord->ExceptionCode != STATUS_STACK_OVERFLOW)
+ {
+ r = DoReportFault(pExceptionInfo);
+ }
+ else
+ {
+ // Since the StackOverflow handler also calls us, we must keep our stack budget
+ // to a minimum. Thus, we will launch a thread to do the actual work.
+ FaultReportInfo fri;
+ fri.m_fDoReportFault = TRUE;
+ fri.m_pExceptionInfo = pExceptionInfo;
+ // DoFaultCreateThreadReportCallback will overwrite this - if it doesn't, we'll assume it failed.
+ fri.m_faultRepRetValResult = frrvErr;
+
+ // Stack overflow case - we don't have enough stack on our own thread so let the debugger
+ // helper thread do the work.
+ if (!g_pDebugInterface || FAILED(g_pDebugInterface->RequestFavor(DoFaultReportDoFavorCallback, &fri)))
+ {
+ // If we can't initialize the debugger helper thread or we are running on the debugger helper
+ // thread, give it up. We don't have enough stack space.
+ }
+
+ r = fri.m_faultRepRetValResult;
+ }
+ }
+
+ if ((r == frrvErr) || (r == frrvErrNoDW) || (r == frrvErrTimeout))
+ {
+ // If we don't have an exception record, or otherwise can't use OS error
+ // reporting then offer the old "press OK to terminate, cancel to debug"
+ // dialog as a futher fallback.
+ if (g_pDebugInterface && g_pDebugInterface->FallbackJITAttachPrompt())
+ {
+ // User requested to launch the debugger
+ shouldNotifyDebugger = TRUE;
+ }
+ }
+ else if (r == frrvLaunchDebugger)
+ {
+ // User requested to launch the debugger
+ shouldNotifyDebugger = TRUE;
+ }
+ break;
+ }
+ case FaultReportResultQuit:
+ // No debugger, just exit normally
+ break;
+ case FaultReportResultDebug:
+ // JIT attach a debugger here.
+ shouldNotifyDebugger = TRUE;
+ break;
+ default:
+ UNREACHABLE_MSG("Unknown FaultReportResult");
+ break;
+ }
+#endif // !FEATURE_PAL
+ }
+ // When the debugger thread detects that the debugger process is abruptly terminated, and triggers
+ // a failfast error, CORDebuggerAttached() will be TRUE, but IsDebuggerPresent() will be FALSE.
+ // If IsDebuggerPresent() is FALSE, do not try to notify the deubgger.
+ else if (CORDebuggerAttached() && IsDebuggerPresent())
+ {
+ // Already debugging with a managed debugger. Should let that debugger know.
+ LOG((LF_EH, LL_INFO100, "WatsonLastChance: Managed debugger already attached at sp %p ...\n", GetCurrentSP()));
+
+ // The managed EH subsystem ignores native breakpoints and single step exceptions. These exceptions are
+ // not considered managed, and the managed debugger should not be notified. Moreover, we won't have
+ // created a managed exception object at this point.
+ if (tore.GetType() != TypeOfReportedError::NativeBreakpoint)
+ {
+ shouldNotifyDebugger = TRUE;
+ }
+ }
+
+#ifndef FEATURE_PAL
+ DisableOSWatson();
+#endif // !FEATURE_PAL
+
+ if (!shouldNotifyDebugger)
+ {
+ LOG((LF_EH, LL_INFO100, "WatsonLastChance: should not notify debugger. Returning EXCEPTION_CONTINUE_SEARCH\n"));
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ // If no debugger interface, we can't notify the debugger.
+ if (g_pDebugInterface == NULL)
+ {
+ LOG((LF_EH, LL_INFO100, "WatsonLastChance: No debugger interface. Returning EXCEPTION_CONTINUE_SEARCH\n"));
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ LOG((LF_EH, LL_INFO10, "WatsonLastChance: Notifying debugger\n"));
+
+ switch (tore.GetType())
+ {
+ case TypeOfReportedError::FatalError:
+ #ifdef MDA_SUPPORTED
+ {
+ MdaFatalExecutionEngineError * pMDA = MDA_GET_ASSISTANT_EX(FatalExecutionEngineError);
+
+ if ((pMDA != NULL) && (pExceptionInfo != NULL) && (pExceptionInfo->ExceptionRecord != NULL))
+ {
+ TADDR addr = (TADDR) pExceptionInfo->ExceptionRecord->ExceptionAddress;
+ HRESULT hrError = pExceptionInfo->ExceptionRecord->ExceptionCode;
+ pMDA->ReportFEEE(addr, hrError);
+ }
+ }
+ #endif // MDA_SUPPORTED
+
+ if (pThread != NULL)
+ {
+ NotifyDebuggerLastChance(pThread, pExceptionInfo, jitAttachRequested);
+
+ // If the registed debugger is not a managed debugger, we need to stop the debugger here.
+ if (!CORDebuggerAttached() && IsDebuggerPresent())
+ {
+ DebugBreak();
+ }
+ }
+ else
+ {
+ g_pDebugInterface->LaunchDebuggerForUser(GetThread(), pExceptionInfo, FALSE, FALSE);
+ }
+
+ return EXCEPTION_CONTINUE_SEARCH;
+
+ case TypeOfReportedError::UnhandledException:
+ case TypeOfReportedError::NativeBreakpoint:
+ // Notify the debugger only if this is a managed thread.
+ if (pThread != NULL)
+ {
+ return NotifyDebuggerLastChance(pThread, pExceptionInfo, jitAttachRequested);
+ }
+ else
+ {
+ g_pDebugInterface->JitAttach(pThread, pExceptionInfo, FALSE, FALSE);
+
+ // return EXCEPTION_CONTINUE_SEARCH, so OS's UEF will reraise the unhandled exception for debuggers
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ case TypeOfReportedError::UserBreakpoint:
+ g_pDebugInterface->LaunchDebuggerForUser(pThread, pExceptionInfo, TRUE, FALSE);
+
+ return EXCEPTION_CONTINUE_EXECUTION;
+
+ case TypeOfReportedError::NativeThreadUnhandledException:
+ g_pDebugInterface->JitAttach(pThread, pExceptionInfo, FALSE, FALSE);
+
+ // return EXCEPTION_CONTINUE_SEARCH, so OS's UEF will reraise the unhandled exception for debuggers
+ return EXCEPTION_CONTINUE_SEARCH;
+
+ default:
+ _ASSERTE(!"Unknown case in WatsonLastChance");
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ UNREACHABLE();
+} // LONG WatsonLastChance()
+
+//---------------------------------------------------------------------------------------
+//
+// This is just a simple helper to do some basic checking to see if an exception is intercepted.
+// It checks that we are on a managed thread and that an exception is indeed in progress.
+//
+// Return Value:
+// true iff we are on a managed thread and an exception is in flight
+//
+
+bool CheckThreadExceptionStateForInterception()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ Thread* pThread = GetThread();
+
+ if (pThread == NULL)
+ {
+ return false;
+ }
+
+ if (!pThread->IsExceptionInProgress())
+ {
+ return false;
+ }
+
+ return true;
+}
+#endif
+
+//===========================================================================================
+//
+// UNHANDLED EXCEPTION HANDLING
+//
+
+static Volatile<BOOL> fReady = 0;
+static SpinLock initLock;
+
+void DECLSPEC_NORETURN RaiseDeadLockException()
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+// Disable the "initialization of static local vars is no thread safe" error
+#ifdef _MSC_VER
+#pragma warning(disable: 4640)
+#endif
+ CHECK_LOCAL_STATIC_VAR(static SString s);
+#ifdef _MSC_VER
+#pragma warning(default : 4640)
+#endif
+ if (!fReady)
+ {
+ WCHAR name[256];
+ HRESULT hr = S_OK;
+ {
+ FAULT_NOT_FATAL();
+ GCX_COOP();
+ hr = UtilLoadStringRC(IDS_EE_THREAD_DEADLOCK_VICTIM, name, sizeof(name)/sizeof(WCHAR), 1);
+ }
+ initLock.Init(LOCK_TYPE_DEFAULT);
+ SpinLockHolder __spinLockHolder(&initLock);
+ if (!fReady)
+ {
+ if (SUCCEEDED(hr))
+ {
+ s.Set(name);
+ fReady = 1;
+ }
+ else
+ {
+ ThrowHR(hr);
+ }
+ }
+ }
+
+ ThrowHR(HOST_E_DEADLOCK, s);
+}
+
+//******************************************************************************
+//
+// ExceptionIsAlwaysSwallowed
+//
+// Determine whether an exception is of a type that it should always
+// be swallowed, even when exceptions otherwise are left to go unhandled.
+// (For Whidbey, ThreadAbort, RudeThreadAbort, or AppDomainUnload exception)
+//
+// Parameters:
+// pExceptionInfo EXCEPTION_POINTERS for current exception
+//
+// Returns:
+// true If the exception is of a type that is always swallowed.
+//
+bool ExceptionIsAlwaysSwallowed(EXCEPTION_POINTERS *pExceptionInfo)
+{
+ bool isSwallowed = false;
+
+ // The exception code must be ours, if it is one of our Exceptions.
+ if (IsComPlusException(pExceptionInfo->ExceptionRecord))
+ {
+ // Our exception code. Get the current exception from the thread.
+ Thread *pThread = GetThread();
+ if (pThread)
+ {
+ OBJECTREF throwable;
+
+ GCX_COOP();
+ if ((throwable = pThread->GetThrowable()) == NULL)
+ {
+ throwable = pThread->LastThrownObject();
+ }
+ //@todo: could throwable be NULL here?
+ isSwallowed = IsExceptionOfType(kThreadAbortException, &throwable) ||
+ IsExceptionOfType(kAppDomainUnloadedException, &throwable);
+ }
+ }
+
+ return isSwallowed;
+} // BOOL ExceptionIsAlwaysSwallowed()
+
+//
+// UserBreakpointFilter is used to ensure that we get a popup on user breakpoints (DebugBreak(), hard-coded int 3,
+// etc.) as soon as possible.
+//
+LONG UserBreakpointFilter(EXCEPTION_POINTERS* pEP)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#if defined(DEBUGGING_SUPPORTED) && !defined(FEATURE_PAL)
+ // Invoke the unhandled exception filter, bypassing any further first pass exception processing and treating
+ // user breakpoints as if they're unhandled exceptions right away.
+ //
+ // @todo: The InternalUnhandledExceptionFilter can trigger.
+ CONTRACT_VIOLATION(GCViolation | ThrowsViolation | ModeViolation | FaultViolation | FaultNotFatal);
+
+
+ int result = UnhandledExceptionFilter(pEP);
+
+ if (result == EXCEPTION_CONTINUE_SEARCH)
+ {
+ // A debugger got attached. Instead of allowing the exception to continue up, and hope for the
+ // second-chance, we cause it to happen again. The debugger snags all int3's on first-chance. NOTE: the
+ // InternalUnhandledExceptionFilter allowed GC's to occur, but it may be the case that some managed frames
+ // may have been unprotected. Therefore, you may have GC holes if you attempt to continue execution from
+ // here.
+ return EXCEPTION_CONTINUE_EXECUTION;
+ }
+#endif // DEBUGGING_SUPPORTED && !FEATURE_PAL
+
+ if(ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, FailFast))
+ {
+ // Fire an ETW FailFast event
+ FireEtwFailFast(W("StatusBreakpoint"),
+ (const PVOID)((pEP && pEP->ContextRecord) ? GetIP(pEP->ContextRecord) : 0),
+ ((pEP && pEP->ExceptionRecord) ? pEP->ExceptionRecord->ExceptionCode : 0),
+ STATUS_BREAKPOINT,
+ GetClrInstanceId());
+ }
+
+ // Otherwise, we termintate the process.
+ TerminateProcess(GetCurrentProcess(), STATUS_BREAKPOINT);
+
+ // Shouldn't get here ...
+ return EXCEPTION_CONTINUE_EXECUTION;
+} // LONG UserBreakpointFilter()
+
+//******************************************************************************
+//
+// DefaultCatchFilter
+//
+// The old default except filter (v1.0/v1.1) . For user breakpoints, call out to UserBreakpointFilter()
+// but otherwise return EXCEPTION_EXECUTE_HANDLER, to swallow the exception.
+//
+// Parameters:
+// pExceptionInfo EXCEPTION_POINTERS for current exception
+// pv A constant as an INT_PTR. Must be COMPLUS_EXCEPTION_EXECUTE_HANDLER.
+//
+// Returns:
+// EXCEPTION_EXECUTE_HANDLER Generally returns this to swallow the exception.
+//
+// IMPORTANT!! READ ME!!
+//
+// This filter is very similar to DefaultCatchNoSwallowFilter, except when unhandled
+// exception policy/config dictate swallowing the exception.
+// If you make any changes to this function, look to see if the other one also needs
+// the same change.
+//
+LONG DefaultCatchFilter(EXCEPTION_POINTERS *ep, PVOID pv)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ //
+ // @TODO: this seems like a strong candidate for elimination due to duplication with
+ // our vectored exception handler.
+ //
+
+ DefaultCatchFilterParam *pParam;
+ pParam = (DefaultCatchFilterParam *) pv;
+
+ // the only valid parameter for DefaultCatchFilter so far
+ _ASSERTE(pParam->pv == COMPLUS_EXCEPTION_EXECUTE_HANDLER);
+
+ PEXCEPTION_RECORD er = ep->ExceptionRecord;
+ DWORD code = er->ExceptionCode;
+
+ if (code == STATUS_SINGLE_STEP || code == STATUS_BREAKPOINT)
+ {
+ return UserBreakpointFilter(ep);
+ }
+
+ // return EXCEPTION_EXECUTE_HANDLER to swallow the exception.
+ return EXCEPTION_EXECUTE_HANDLER;
+} // LONG DefaultCatchFilter()
+
+
+//******************************************************************************
+//
+// DefaultCatchNoSwallowFilter
+//
+// The new default except filter (v2.0). For user breakpoints, call out to UserBreakpointFilter().
+// Otherwise consults host policy and config file to return EXECUTE_HANDLER / CONTINUE_SEARCH.
+//
+// Parameters:
+// pExceptionInfo EXCEPTION_POINTERS for current exception
+// pv A constant as an INT_PTR. Must be COMPLUS_EXCEPTION_EXECUTE_HANDLER.
+//
+// Returns:
+// EXCEPTION_CONTINUE_SEARCH Generally returns this to let the exception go unhandled.
+// EXCEPTION_EXECUTE_HANDLER May return this to swallow the exception.
+//
+// IMPORTANT!! READ ME!!
+//
+// This filter is very similar to DefaultCatchFilter, except when unhandled
+// exception policy/config dictate swallowing the exception.
+// If you make any changes to this function, look to see if the other one also needs
+// the same change.
+//
+LONG DefaultCatchNoSwallowFilter(EXCEPTION_POINTERS *ep, PVOID pv)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ DefaultCatchFilterParam *pParam; pParam = (DefaultCatchFilterParam *) pv;
+
+ // the only valid parameter for DefaultCatchFilter so far
+ _ASSERTE(pParam->pv == COMPLUS_EXCEPTION_EXECUTE_HANDLER);
+
+ PEXCEPTION_RECORD er = ep->ExceptionRecord;
+ DWORD code = er->ExceptionCode;
+
+ if (code == STATUS_SINGLE_STEP || code == STATUS_BREAKPOINT)
+ {
+ return UserBreakpointFilter(ep);
+ }
+
+ // If host policy or config file says "swallow"...
+ if (SwallowUnhandledExceptions())
+ { // ...return EXCEPTION_EXECUTE_HANDLER to swallow the exception.
+ return EXCEPTION_EXECUTE_HANDLER;
+ }
+
+ // If the exception is of a type that is always swallowed (ThreadAbort, AppDomainUnload)...
+ if (ExceptionIsAlwaysSwallowed(ep))
+ { // ...return EXCEPTION_EXECUTE_HANDLER to swallow the exception.
+ return EXCEPTION_EXECUTE_HANDLER;
+ }
+
+ // Otherwise, continue search. i.e. let the exception go unhandled (at least for now).
+ return EXCEPTION_CONTINUE_SEARCH;
+} // LONG DefaultCatchNoSwallowFilter()
+
+#if defined(FEATURE_CORECLR)
+// Note: This is used only for CoreCLR on WLC.
+//
+// We keep a pointer to the previous unhandled exception filter. After we install, we use
+// this to call the previous guy. When we un-install, we put them back. Putting them back
+// is a bug -- we have no guarantee that the DLL unload order matches the DLL load order -- we
+// may in fact be putting back a pointer to a DLL that has been unloaded.
+//
+
+// initialize to -1 because NULL won't detect difference between us not having installed our handler
+// yet and having installed it but the original handler was NULL.
+static LPTOP_LEVEL_EXCEPTION_FILTER g_pOriginalUnhandledExceptionFilter = (LPTOP_LEVEL_EXCEPTION_FILTER)-1;
+#define FILTER_NOT_INSTALLED (LPTOP_LEVEL_EXCEPTION_FILTER) -1
+#endif // defined(FEATURE_CORECLR)
+
+
+BOOL InstallUnhandledExceptionFilter() {
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+#ifndef FEATURE_PAL
+#ifdef FEATURE_UEF_CHAINMANAGER
+ if (g_pUEFManager == NULL) {
+
+ /*CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;*/
+
+ static HMODULE hMSCorEE;
+
+ if (!hMSCorEE) {
+ hMSCorEE = WszGetModuleHandle(MSCOREE_SHIM_W);
+ if (!hMSCorEE) {
+
+ _ASSERTE(!"InstallUnhandledExceptionFilter failed to get MSCorEE instance!");
+ STRESS_LOG0(LF_EH, LL_INFO10, "InstallUnhandledExceptionFilter failed to get MSCorEE instance!\n");
+
+ // Failure to get instance of mscoree.dll is fatal since that would imply
+ // that we cannot setup our UEF
+ return FALSE;
+ }
+ }
+
+ // Signature of GetCLRUEFManager exported by MSCorEE.dll
+ typedef HRESULT (*pGetCLRUEFManager)(REFIID riid,
+ IUnknown **ppUnk);
+
+ static pGetCLRUEFManager pFuncGetCLRUEFManager;
+
+ if (!pFuncGetCLRUEFManager) {
+
+ // Try to get function address via ordinal
+ pFuncGetCLRUEFManager = (pGetCLRUEFManager)GetProcAddress(hMSCorEE, MAKEINTRESOURCEA(24));
+ if (!pFuncGetCLRUEFManager) {
+ _ASSERTE(!"InstallUnhandledExceptionFilter failed to get UEFManager!");
+ STRESS_LOG0(LF_EH, LL_INFO10, "InstallUnhandledExceptionFilter failed to find UEFManager!\n");
+ return FALSE;
+ }
+ }
+
+ HRESULT hr = (*pFuncGetCLRUEFManager)((REFIID)IID_IUEFManager, (IUnknown **)&g_pUEFManager);
+ if (FAILED(hr))
+ {
+ _ASSERTE(!"InstallUnhandledExceptionFilter failed to get IUEFManager*!");
+
+ STRESS_LOG0(LF_EH, LL_INFO10, "InstallUnhandledExceptionFilter failed to get IUEFManager*\n");
+
+ // Ensure the reference to chain manager is NULL
+ g_pUEFManager= NULL;
+
+ return FALSE;
+ }
+
+ // Register our UEF callback with the UEF chain manager
+ if (!g_pUEFManager->AddUnhandledExceptionFilter(COMUnhandledExceptionFilter, TRUE))
+ {
+ _ASSERTE(!"InstallUnhandledExceptionFilter failed to register the UEF callback!");
+
+ // Failed to register the UEF callback
+ STRESS_LOG0(LF_EH, LL_INFO10, "InstallUnhandledExceptionFilter failed to register the UEF callback!\n");
+
+ g_pUEFManager->Release();
+
+ // Ensure the reference to chain manager is NULL
+ g_pUEFManager= NULL;
+
+ return FALSE;
+ }
+
+ // Register our exception claiming callback with the UEF chain manager on preWin7
+ if (!RunningOnWin7() && !g_pUEFManager->GetWastonSxSManagerInstance()->RegisterExceptionClaimingCallback(IsExceptionFromManagedCodeCallback))
+ {
+ _ASSERTE(!"RegisterExceptionClaimingCallback failed to register the exception claiming callback!");
+
+ // Failed to register the exception claiming callback
+ STRESS_LOG0(LF_EH, LL_INFO10, "RegisterExceptionClaimingCallback failed to register the exception claiming callback!");
+
+ return FALSE;
+ }
+ }
+#else // !FEATURE_UEF_CHAINMANAGER
+ // We will be here only for CoreCLR on WLC since we dont
+ // register UEF for SL.
+ if (g_pOriginalUnhandledExceptionFilter == FILTER_NOT_INSTALLED) {
+ g_pOriginalUnhandledExceptionFilter =
+ SetUnhandledExceptionFilter(COMUnhandledExceptionFilter);
+ // make sure is set (ie. is not our special value to indicate unset)
+ LOG((LF_EH, LL_INFO10, "InstallUnhandledExceptionFilter registered UEF with OS for CoreCLR!\n"));
+ }
+ _ASSERTE(g_pOriginalUnhandledExceptionFilter != FILTER_NOT_INSTALLED);
+#endif // FEATURE_UEF_CHAINMANAGER
+#endif // !FEATURE_PAL
+
+ // All done - successfully!
+ return TRUE;
+}
+
+void UninstallUnhandledExceptionFilter() {
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+#ifndef FEATURE_PAL
+#ifdef FEATURE_UEF_CHAINMANAGER
+ if (g_pUEFManager)
+ {
+ if (!RunningOnWin7())
+ {
+ g_pUEFManager->GetWastonSxSManagerInstance()->UnregisterExceptionClaimingCallback(IsExceptionFromManagedCodeCallback);
+ }
+
+ g_pUEFManager->RemoveUnhandledExceptionFilter(COMUnhandledExceptionFilter);
+ g_pUEFManager->Release();
+ g_pUEFManager = NULL;
+ }
+#else // !FEATURE_UEF_CHAINMANAGER
+ // We will be here only for CoreCLR on WLC or on Mac SL.
+ if (g_pOriginalUnhandledExceptionFilter != FILTER_NOT_INSTALLED) {
+ SetUnhandledExceptionFilter(g_pOriginalUnhandledExceptionFilter);
+ g_pOriginalUnhandledExceptionFilter = FILTER_NOT_INSTALLED;
+ LOG((LF_EH, LL_INFO10, "UninstallUnhandledExceptionFilter unregistered UEF from OS for CoreCLR!\n"));
+ }
+#endif // FEATURE_UEF_CHAINMANAGER
+#endif // !FEATURE_PAL
+}
+
+//
+// Update the current throwable on the thread if necessary. If we're looking at one of our exceptions, and if the
+// current throwable on the thread is NULL, then we'll set it to something more useful based on the
+// LastThrownObject.
+//
+BOOL UpdateCurrentThrowable(PEXCEPTION_RECORD pExceptionRecord)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ BOOL useLastThrownObject = FALSE;
+
+ Thread* pThread = GetThread();
+
+ // GetThrowable needs cooperative.
+ GCX_COOP();
+
+ if ((pThread->GetThrowable() == NULL) && (pThread->LastThrownObject() != NULL))
+ {
+ // If GetThrowable is NULL and LastThrownObject is not, use lastThrownObject.
+ // In current (June 05) implementation, this is only used to pass to
+ // NotifyAppDomainsOfUnhandledException, which needs to get a throwable
+ // from somewhere, with which to notify the AppDomains.
+ useLastThrownObject = TRUE;
+
+ if (IsComPlusException(pExceptionRecord))
+ {
+#ifndef WIN64EXCEPTIONS
+ OBJECTREF oThrowable = pThread->LastThrownObject();
+
+ // @TODO: we have a problem on Win64 where we won't have any place to
+ // store the throwable on an unhandled exception. Currently this
+ // only effects the managed debugging services as they will try
+ // to inspect the thread to see what the throwable is on an unhandled
+ // exception.. (but clearly it needs to be fixed asap)
+ // We have the same problem in EEPolicy::LogFatalError().
+ LOG((LF_EH, LL_INFO100, "UpdateCurrentThrowable: setting throwable to %s\n", (oThrowable == NULL) ? "NULL" : oThrowable->GetTrueMethodTable()->GetDebugClassName()));
+ pThread->SafeSetThrowables(oThrowable);
+#endif // WIN64EXCEPTIONS
+ }
+ }
+
+ return useLastThrownObject;
+}
+
+//
+// COMUnhandledExceptionFilter is used to catch all unhandled exceptions.
+// The debugger will either handle the exception, attach a debugger, or
+// notify an existing attached debugger.
+//
+
+struct SaveIPFilterParam
+{
+ SLOT ExceptionEIP;
+};
+
+LONG SaveIPFilter(EXCEPTION_POINTERS* ep, LPVOID pv)
+{
+ WRAPPER_NO_CONTRACT;
+
+ SaveIPFilterParam *pParam = (SaveIPFilterParam *) pv;
+ pParam->ExceptionEIP = (SLOT)GetIP(ep->ContextRecord);
+ DefaultCatchFilterParam param(COMPLUS_EXCEPTION_EXECUTE_HANDLER);
+ return DefaultCatchFilter(ep, &param);
+}
+
+//------------------------------------------------------------------------------
+// Description
+// Does not call any previous UnhandledExceptionFilter. The assumption is that
+// either it is inappropriate to call it (because we have elected to rip the
+// process without transitioning completely to the base of the thread), or
+// the caller has already consulted the previously installed UnhandledExceptionFilter.
+//
+// So we know we are ripping and Watson is appropriate.
+//
+// **** Note*****
+// This is a stack-sensitive function if we have an unhandled SO.
+// Do not allocate more than a few bytes on the stack or we risk taking an
+// AV while trying to throw up Watson.
+
+// Parameters
+// pExceptionInfo -- information about the exception that caused the error.
+// If the error is not the result of an exception, pass NULL for this
+// parameter
+//
+// Returns
+// EXCEPTION_CONTINUE_SEARCH -- we've done anything we will with the exception.
+// As far as the runtime is concerned, the process is doomed.
+// EXCEPTION_CONTINUE_EXECUTION -- means a debugger "caught" the exception and
+// wants to continue running.
+// EXCEPTION_EXECUTE_HANDLER -- CoreCLR only, and only when not running as a UEF.
+// Returned only if the host has asked us to swallow unhandled exceptions on
+// managed threads in an AD they (the host) creates.
+//------------------------------------------------------------------------------
+LONG InternalUnhandledExceptionFilter_Worker(
+ EXCEPTION_POINTERS *pExceptionInfo) // Information about the exception
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+
+#ifdef _DEBUG
+ static int fBreakOnUEF = -1;
+ if (fBreakOnUEF==-1) fBreakOnUEF = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnUEF);
+ _ASSERTE(!fBreakOnUEF);
+#endif
+
+ STRESS_LOG2(LF_EH, LL_INFO10, "In InternalUnhandledExceptionFilter_Worker, Exception = %x, sp = %p\n",
+ pExceptionInfo->ExceptionRecord->ExceptionCode, GetCurrentSP());
+
+ // If we can't enter the EE, done.
+ if (g_fForbidEnterEE)
+ {
+ LOG((LF_EH, LL_INFO100, "InternalUnhandledExceptionFilter_Worker: g_fForbidEnterEE is TRUE\n"));
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+
+ if (GetEEPolicy()->GetActionOnFailure(FAIL_FatalRuntime) == eDisableRuntime)
+ {
+ ETaskType type = ::GetCurrentTaskType();
+ if (type != TT_UNKNOWN && type != TT_USER)
+ {
+ LOG((LF_EH, LL_INFO100, "InternalUnhandledExceptionFilter_Worker: calling EEPolicy::HandleFatalError\n"));
+ EEPolicy::HandleFatalError(COR_E_EXECUTIONENGINE, (UINT_PTR)GetIP(pExceptionInfo->ContextRecord), NULL, pExceptionInfo);
+ }
+ }
+
+ // We don't do anything when this is called from an unmanaged thread.
+ Thread *pThread = GetThread();
+
+#ifdef _DEBUG
+ static bool bBreakOnUncaught = false;
+ static int fBreakOnUncaught = 0;
+
+ if (!bBreakOnUncaught)
+ {
+ fBreakOnUncaught = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnUncaughtException);
+ bBreakOnUncaught = true;
+ }
+ if (fBreakOnUncaught != 0)
+ {
+ if (pExceptionInfo->ExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW)
+ {
+ // if we've got an uncaught SO, we don't have enough stack to pop a debug break. So instead,
+ // loop infinitely and we can attach a debugger at that point and break in.
+ LOG((LF_EH, LL_INFO100, "InternalUnhandledExceptionFilter_Worker: Infinite loop on uncaught SO\n"));
+ for ( ;; )
+ {
+ }
+ }
+ else
+ {
+ LOG((LF_EH, LL_INFO100, "InternalUnhandledExceptionFilter_Worker: ASSERTING on uncaught\n"));
+ _ASSERTE(!"BreakOnUnCaughtException");
+ }
+ }
+#endif
+
+#ifdef _DEBUG_ADUNLOAD
+ printf("%x InternalUnhandledExceptionFilter_Worker: Called for %x\n",
+ ((pThread == NULL) ? NULL : pThread->GetThreadId()), pExceptionInfo->ExceptionRecord->ExceptionCode);
+ fflush(stdout);
+#endif
+
+ // This shouldn't be possible, but MSVC re-installs us... for now, just bail if this happens.
+ if (g_fNoExceptions)
+ {
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ // Are we looking at a stack overflow here?
+ if ((pThread != NULL) && !pThread->DetermineIfGuardPagePresent())
+ {
+ g_fForbidEnterEE = true;
+ }
+
+#ifdef DEBUGGING_SUPPORTED
+
+ // Mark that this exception has gone unhandled. At the moment only the debugger will
+ // ever look at this flag. This should come before any user-visible side effect of an exception
+ // being unhandled as seen from managed code or from a debugger. These include the
+ // managed unhandled notification callback, execution of catch/finally clauses,
+ // receiving the managed debugger unhandled exception event,
+ // the OS sending the debugger 2nd pass native exception notification, etc.
+ //
+ // This needs to be done before the check for TSNC_ProcessedUnhandledException because it is perfectly
+ // legitimate (though rare) for the debugger to be inspecting exceptions which are nested in finally
+ // clauses that run after an unhandled exception has already occured on the thread
+ if ((pThread != NULL) && pThread->IsExceptionInProgress())
+ {
+ LOG((LF_EH, LL_INFO1000, "InternalUnhandledExceptionFilter_Worker: Set unhandled exception flag at %p\n",
+ pThread->GetExceptionState()->GetFlags() ));
+ pThread->GetExceptionState()->GetFlags()->SetUnhandled();
+ }
+#endif
+
+ // If we have already done unhandled exception processing for this thread, then
+ // simply return back. See comment in threads.h for details for the flag
+ // below.
+ //
+ if (pThread && (pThread->HasThreadStateNC(Thread::TSNC_ProcessedUnhandledException) || pThread->HasThreadStateNC(Thread::TSNC_AppDomainContainUnhandled)))
+ {
+#ifdef FEATURE_CORECLR
+ // This assert shouldnt be hit in CoreCLR since:
+ //
+ // 1) It has no concept of managed entry point that is invoked by the shim. You can
+ // only run managed code via hosting APIs that will run code in non-default domains.
+ //
+ // 2) Managed threads cannot be created in DefaultDomain since no user code executes
+ // in default domain.
+ //
+ // So, if this is hit, something is not right!
+ if (pThread->HasThreadStateNC(Thread::TSNC_ProcessedUnhandledException))
+ {
+ _ASSERTE(!"How come a thread with TSNC_ProcessedUnhandledException state entered the UEF on CoreCLR?");
+ }
+#endif // FEATURE_CORECLR
+
+ LOG((LF_EH, LL_INFO100, "InternalUnhandledExceptionFilter_Worker: have already processed unhandled exception for this thread.\n"));
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ LOG((LF_EH, LL_INFO100, "InternalUnhandledExceptionFilter_Worker: Handling\n"));
+
+ struct Param : SaveIPFilterParam
+ {
+ EXCEPTION_POINTERS *pExceptionInfo;
+ Thread *pThread;
+ LONG retval;
+ BOOL fIgnore;
+ }; Param param;
+
+ param.ExceptionEIP = 0;
+ param.pExceptionInfo = pExceptionInfo;
+ param.pThread = pThread;
+ param.retval = EXCEPTION_CONTINUE_SEARCH; // Result of UEF filter.
+
+ // Is this a particular kind of exception that we'd like to ignore?
+ param.fIgnore = ((param.pExceptionInfo->ExceptionRecord->ExceptionCode == STATUS_BREAKPOINT) ||
+ (param.pExceptionInfo->ExceptionRecord->ExceptionCode == STATUS_SINGLE_STEP));
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ // If fIgnore, then this is some sort of breakpoint, not a "normal" unhandled exception. But, the
+ // breakpoint is due to an int3 or debugger step instruction, not due to calling Debugger.Break()
+ TypeOfReportedError tore = pParam->fIgnore ? TypeOfReportedError::NativeBreakpoint : TypeOfReportedError::UnhandledException;
+
+ //
+ // If this exception is on a thread without managed code, then report this as a NativeThreadUnhandledException
+ //
+ // The thread object may exist if there was once managed code on the stack, but if the exception never
+ // bubbled thru managed code, ie no managed code is on its stack, then this is a native unhandled exception
+ //
+ // Ignore breakpoints and single-step.
+ if (!pParam->fIgnore)
+ { // Possibly interesting exception. Is there no Thread at all? Or, is there a Thread,
+ // but with no exception at all on it?
+ if ((pParam->pThread == NULL) ||
+ (pParam->pThread->IsThrowableNull() && pParam->pThread->IsLastThrownObjectNull()) )
+ { // Whatever this exception is, we don't know about it. Treat as Native.
+ tore = TypeOfReportedError::NativeThreadUnhandledException;
+ }
+ }
+
+ // If there is no throwable on the thread, go ahead and update from the last thrown exception if possible.
+ // Note: don't do this for exceptions that we're going to ignore below anyway...
+ BOOL useLastThrownObject = FALSE;
+ if (!pParam->fIgnore && (pParam->pThread != NULL))
+ {
+ useLastThrownObject = UpdateCurrentThrowable(pParam->pExceptionInfo->ExceptionRecord);
+ }
+
+#ifdef DEBUGGING_SUPPORTED
+
+ LOG((LF_EH, LL_INFO100, "InternalUnhandledExceptionFilter_Worker: Notifying Debugger...\n"));
+
+ // If we are using the throwable in LastThrownObject, mark that it is now unhandled
+ if ((pParam->pThread != NULL) && useLastThrownObject)
+ {
+ LOG((LF_EH, LL_INFO1000, "InternalUnhandledExceptionFilter_Worker: Set lto is unhandled\n"));
+ pParam->pThread->MarkLastThrownObjectUnhandled();
+ }
+
+ //
+ // We don't want the managed debugger to try to "intercept" breakpoints
+ // or singlestep exceptions.
+ // TODO: why does the exception handling code need to set this? Shouldn't the debugger code
+ // be able to determine what it can/should intercept?
+ if ((pParam->pThread != NULL) && pParam->pThread->IsExceptionInProgress() && pParam->fIgnore)
+ {
+ pParam->pThread->GetExceptionState()->GetFlags()->SetDebuggerInterceptNotPossible();
+ }
+
+
+ if (pParam->pThread != NULL)
+ {
+ BOOL fIsProcessTerminating = TRUE;
+
+#ifdef FEATURE_CORECLR
+ // In CoreCLR, we can be asked to not let an exception go unhandled on managed threads in a given AppDomain.
+ // If the exception reaches the top of the thread's stack, we simply deliver AppDomain's UnhandledException event and
+ // return back to the filter, instead of letting the process terminate because of unhandled exception.
+
+ // Below is how we perform the check:
+ //
+ // 1) The flag is specified on the AD when it is created by the host and all managed threads created
+ // in such an AD will inherit the flag. For non-finalizer and non-threadpool threads, we check the flag against the thread.
+ // 2) The finalizer thread always switches to the AD of the object that is going to be finalized. Thus,
+ // while it wont have the flag specified, the AD it switches to will.
+ // 3) The threadpool thread also switches to the correct AD before executing the request. The thread wont have the
+ // flag specified, but the AD it switches to will.
+
+ // This code must only be exercised when running as a normal filter; returning
+ // EXCEPTION_EXECUTE_HANDLER is not valid if this code is being invoked from
+ // the UEF.
+ // Fortunately, we should never get into this case, since the thread flag about
+ // ignoring unhandled exceptions cannot be set on the default domain.
+
+ if (IsFinalizerThread() || (pParam->pThread->IsThreadPoolThread()))
+ fIsProcessTerminating = !(pParam->pThread->GetDomain()->IgnoreUnhandledExceptions());
+ else
+ fIsProcessTerminating = !(pParam->pThread->HasThreadStateNC(Thread::TSNC_IgnoreUnhandledExceptions));
+#endif // FEATURE_CORECLR
+
+#ifndef FEATURE_PAL
+ // Setup the watson bucketing details for UE processing.
+ // do this before notifying appdomains of the UE so if an AD attempts to
+ // retrieve the bucket params in the UE event handler it gets the correct data.
+ SetupWatsonBucketsForUEF(useLastThrownObject);
+#endif // !FEATURE_PAL
+
+ // Send notifications to the AppDomains.
+ NotifyAppDomainsOfUnhandledException(pParam->pExceptionInfo, NULL, useLastThrownObject, fIsProcessTerminating /*isTerminating*/);
+
+#ifdef FEATURE_CORECLR
+ // If the process is not terminating, then return back to the filter and ask it to execute
+ if (!fIsProcessTerminating)
+ {
+ pParam->retval = EXCEPTION_EXECUTE_HANDLER;
+ goto lDone;
+ }
+#endif // FEATURE_CORECLR
+ }
+ else
+ {
+ LOG((LF_EH, LL_INFO100, "InternalUnhandledExceptionFilter_Worker: Not collecting bucket information as thread object does not exist\n"));
+ }
+
+ // AppDomain.UnhandledException event could have thrown an exception that would have gone unhandled in managed code.
+ // The runtime swallows all such exceptions. Hence, if we are not using LastThrownObject and the current LastThrownObject
+ // is not the same as the one in active exception tracker (if available), then update the last thrown object.
+ if ((pParam->pThread != NULL) && (!useLastThrownObject))
+ {
+ GCX_COOP_NO_DTOR();
+
+ OBJECTREF oThrowable = pParam->pThread->GetThrowable();
+ if ((oThrowable != NULL) && (pParam->pThread->LastThrownObject() != oThrowable))
+ {
+ pParam->pThread->SafeSetLastThrownObject(oThrowable);
+ LOG((LF_EH, LL_INFO100, "InternalUnhandledExceptionFilter_Worker: Resetting the LastThrownObject as it appears to have changed.\n"));
+ }
+
+ GCX_COOP_NO_DTOR_END();
+ }
+
+ // Launch Watson and see if we want to debug the process
+ //
+ // Note that we need to do this before "ignoring" exceptions like
+ // breakpoints and single step exceptions
+ //
+
+ LOG((LF_EH, LL_INFO100, "InternalUnhandledExceptionFilter_Worker: Launching Watson at sp %p ...\n", GetCurrentSP()));
+
+ if (WatsonLastChance(pParam->pThread, pParam->pExceptionInfo, tore) == EXCEPTION_CONTINUE_EXECUTION)
+ {
+ LOG((LF_EH, LL_INFO100, "InternalUnhandledExceptionFilter_Worker: debugger ==> EXCEPTION_CONTINUE_EXECUTION\n"));
+ pParam->retval = EXCEPTION_CONTINUE_EXECUTION;
+ goto lDone;
+ }
+
+ LOG((LF_EH, LL_INFO100, "InternalUnhandledExceptionFilter_Worker: ... returned.\n"));
+#endif // DEBUGGING_SUPPORTED
+
+
+#ifdef FEATURE_EVENT_TRACE
+ DoReportForUnhandledException(pParam->pExceptionInfo);
+#endif // FEATURE_EVENT_TRACE
+
+ //
+ // Except for notifying debugger, ignore exception if unmanaged, or
+ // if it's a debugger-generated exception or user breakpoint exception.
+ //
+ if (tore.GetType() == TypeOfReportedError::NativeThreadUnhandledException)
+ {
+ pParam->retval = EXCEPTION_CONTINUE_SEARCH;
+ goto lDone;
+ }
+
+ if (pParam->fIgnore)
+ {
+ LOG((LF_EH, LL_INFO100, "InternalUnhandledExceptionFilter_Worker, ignoring the exception\n"));
+ pParam->retval = EXCEPTION_CONTINUE_SEARCH;
+ goto lDone;
+ }
+
+ LOG((LF_EH, LL_INFO100, "InternalUnhandledExceptionFilter_Worker: Calling DefaultCatchHandler\n"));
+
+
+ // Call our default catch handler to do the managed unhandled exception work.
+ DefaultCatchHandler(pParam->pExceptionInfo, NULL, useLastThrownObject,
+ TRUE /*isTerminating*/, FALSE /*isThreadBaseFIlter*/, FALSE /*sendAppDomainEvents*/);
+
+lDone: ;
+ }
+ PAL_EXCEPT_FILTER (SaveIPFilter)
+ {
+ // Should never get here.
+#ifdef _DEBUG
+ char buffer[200];
+ sprintf_s(buffer, 200, "\nInternal error: Uncaught exception was thrown from IP = %p in UnhandledExceptionFilter_Worker on thread 0x%08x\n",
+ param.ExceptionEIP, ((GetThread() == NULL) ? NULL : GetThread()->GetThreadId()));
+ PrintToStdErrA(buffer);
+ _ASSERTE(!"Unexpected exception in UnhandledExceptionFilter_Worker");
+#endif
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE)
+ }
+ PAL_ENDTRY;
+
+ //if (param.fIgnore)
+ //{
+ // VC's try/catch ignores breakpoint or single step exceptions. We can not continue running.
+ // TerminateProcess(GetCurrentProcess(), pExceptionInfo->ExceptionRecord->ExceptionCode);
+ //}
+
+ return param.retval;
+} // LONG InternalUnhandledExceptionFilter_Worker()
+
+//------------------------------------------------------------------------------
+// Description
+// Calls our InternalUnhandledExceptionFilter for Watson at the appropriate
+// place in the chain.
+//
+// For non-side-by-side CLR's, we call everyone else's UEF first.
+//
+// For side-by-side CLR's, we call our own filter first. This is primary
+// so Whidbey's UEF won't put up a second dialog box. In exchange,
+// side-by-side CLR's won't put up UI's unless the EH really came
+// from that instance's managed code.
+//
+// Parameters
+// pExceptionInfo -- information about the exception that caused the error.
+// If the error is not the result of an exception, pass NULL for this
+// parameter
+//
+// Returns
+// EXCEPTION_CONTINUE_SEARCH -- we've done anything we will with the exception.
+// As far as the runtime is concerned, the process is doomed.
+// EXCEPTION_CONTINUE_EXECUTION -- means a debugger "caught" the exception and
+// wants to continue running.
+//------------------------------------------------------------------------------
+LONG InternalUnhandledExceptionFilter(
+ EXCEPTION_POINTERS *pExceptionInfo) // Information about the exception
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+ // We don't need to be SO-robust for an unhandled exception
+ SO_NOT_MAINLINE_FUNCTION;
+
+ LOG((LF_EH, LL_INFO100, "InternalUnhandledExceptionFilter: at sp %p.\n", GetCurrentSP()));
+
+ // Side-by-side UEF: Calls ours first, then the rest (unless we put up a UI for
+ // the exception.)
+
+ LONG retval = InternalUnhandledExceptionFilter_Worker(pExceptionInfo); // Result of UEF filter.
+
+ // Keep looking, or done?
+ if (retval != EXCEPTION_CONTINUE_SEARCH)
+ { // done.
+ return retval;
+ }
+
+ BOOL fShouldOurUEFDisplayUI = ShouldOurUEFDisplayUI(pExceptionInfo);
+
+ // If this is a managed exception thrown by this instance of the CLR, the exception is no one's
+ // business but ours (nudge, nudge: Whidbey). Break the UEF chain at this point.
+ if (fShouldOurUEFDisplayUI)
+ {
+ return retval;
+ }
+
+ // Chaining back to previous UEF handler could be a potential security risk. See
+ // http://uninformed.org/index.cgi?v=4&a=5&p=1 for details. We are not alone in
+ // stopping the chain - CRT (as of Orcas) is also doing that.
+ //
+ // The change below applies to a thread that starts in native mode and transitions to managed.
+
+ // Let us assume the process loaded two CoreCLRs, C1 and C2, in that order. Thus, in the UEF chain
+ // (assuming no other entity setup their UEF), C2?s UEF will be the topmost.
+ //
+ // Now, assume the stack looks like the following (stack grows down):
+ //
+ // Native frame
+ // Managed Frame (C1)
+ // Managed Frame (C2)
+ // Managed Frame (C1)
+ // Managed Frame (C2)
+ // Managed Frame (C1)
+ //
+ // Suppose an exception is thrown in C1 instance in the last managed frame and it goes unhandled. Eventually
+ // it will reach the OS which will invoke the UEF. Note that the topmost UEF belongs to C2 instance and it
+ // will start processing the exception. C2?s UEF could return EXCEPTION_CONTINUE_SEARCH to indicate
+ // that we should handoff the processing to the last installed UEF. In the example above, we would handoff
+ // the control to the UEF of the CoreCLR instance that actually threw the exception today. In reality, it
+ // could be some unknown code too.
+ //
+ // Not chaining back to the last UEF, in the case of this example, would imply that certain notifications
+ // (e.g. Unhandled Exception Notification to the AppDomain) specific to the instance that raised the exception
+ // will not get fired. However, similar behavior can happen today if another UEF sits between
+ // C1 and C2 and that may not callback to C1 or perhaps just terminate process.
+ //
+ // For CoreCLR, this will not be an issue. See
+ // http://sharepoint/sites/clros/Shared%20Documents/Design%20Documents/EH/Chaining%20in%20%20UEF%20-%20One%20Pager.docx
+ // for details.
+ //
+ // Note: Also see the conditional UEF registration with the OS in EEStartupHelper.
+
+#ifdef FEATURE_CORECLR
+ // We would be here only on CoreCLR for WLC since we dont register
+ // the UEF with the OS for SL.
+ if (g_pOriginalUnhandledExceptionFilter != FILTER_NOT_INSTALLED
+ && g_pOriginalUnhandledExceptionFilter != NULL)
+ {
+ STRESS_LOG1(LF_EH, LL_INFO100, "InternalUnhandledExceptionFilter: Not chaining back to previous UEF at address %p on CoreCLR!\n", g_pOriginalUnhandledExceptionFilter);
+ }
+#endif // FEATURE_CORECLR
+
+ return retval;
+
+} // LONG InternalUnhandledExceptionFilter()
+
+// This filter is used to trigger unhandled exception processing for the entrypoint thread
+// incase an exception goes unhandled from it. This makes us independent of the OS
+// UEF mechanism to invoke our registered UEF to trigger CLR specific unhandled exception
+// processing since that can be skipped if another UEF registered over ours and not chain back.
+LONG EntryPointFilter(PEXCEPTION_POINTERS pExceptionInfo, PVOID _pData)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ LONG ret = -1;
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return EXCEPTION_CONTINUE_SEARCH;);
+
+ // Invoke the UEF worker to perform unhandled exception processing
+ ret = InternalUnhandledExceptionFilter_Worker (pExceptionInfo);
+
+ Thread* pThread = GetThread();
+ if (pThread)
+ {
+ // Set the flag that we have done unhandled exception processing for this thread
+ // so that we dont duplicate the effort in the UEF.
+ //
+ // For details on this flag, refer to threads.h.
+ LOG((LF_EH, LL_INFO100, "EntryPointFilter: setting TSNC_ProcessedUnhandledException\n"));
+ pThread->SetThreadStateNC(Thread::TSNC_ProcessedUnhandledException);
+ }
+
+#ifdef FEATURE_UEF_CHAINMANAGER
+ if (g_pUEFManager && (ret == EXCEPTION_CONTINUE_SEARCH))
+ {
+ // Since the "UEF" of this runtime instance didnt handle the exception,
+ // invoke the other registered UEF callbacks as well
+ ret = g_pUEFManager->InvokeUEFCallbacks(pExceptionInfo);
+ }
+#endif // FEATURE_UEF_CHAINMANAGER
+
+ END_SO_INTOLERANT_CODE;
+
+ return ret;
+}
+
+//------------------------------------------------------------------------------
+// Description
+// The actual UEF. Defers to InternalUnhandledExceptionFilter.
+//
+// Updated to be in its own code segment named CLR_UEF_SECTION_NAME to prevent
+// "VirtualProtect" calls from affecting its pages and thus, its
+// invocation. For details, see the comment within the implementation of
+// CExecutionEngine::ClrVirtualProtect.
+//
+// Parameters
+// pExceptionInfo -- information about the exception
+//
+// Returns
+// the result of calling InternalUnhandledExceptionFilter
+//------------------------------------------------------------------------------
+#if defined(FEATURE_CORECLR) && !defined(FEATURE_PAL)
+#pragma code_seg(push, uef, CLR_UEF_SECTION_NAME)
+#endif // FEATURE_CORECLR && !FEATURE_PAL
+LONG __stdcall COMUnhandledExceptionFilter( // EXCEPTION_CONTINUE_SEARCH or EXCEPTION_CONTINUE_EXECUTION
+ EXCEPTION_POINTERS *pExceptionInfo) // Information about the exception.
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+ // We don't need to be SO-robust for an unhandled exception
+ SO_NOT_MAINLINE_FUNCTION;
+
+ LONG retVal = EXCEPTION_CONTINUE_SEARCH;
+
+ // Incase of unhandled exceptions on managed threads, we kick in our UE processing at the thread base and also invoke
+ // UEF callbacks that various runtimes have registered with us. Once the callbacks return, we return back to the OS
+ // to give other registered UEFs a chance to do their custom processing.
+ //
+ // If the topmost UEF registered with the OS belongs to mscoruef.dll (or someone chained back to its UEF callback),
+ // it will start invoking the UEF callbacks (which is this function, COMUnhandledExceptionFiler) registered by
+ // various runtimes again.
+ //
+ // Thus, check if this UEF has already been invoked in context of this thread and runtime and if so, dont invoke it again.
+ if (GetThread() && (GetThread()->HasThreadStateNC(Thread::TSNC_ProcessedUnhandledException) ||
+ GetThread()->HasThreadStateNC(Thread::TSNC_AppDomainContainUnhandled)))
+ {
+ LOG((LF_EH, LL_INFO10, "Exiting COMUnhandledExceptionFilter since we have already done UE processing for this thread!\n"));
+ return retVal;
+ }
+
+#ifdef _DEBUG
+ // V4 onwards, we will reach here in the UEF only on the following conditions:
+ //
+ // 1) Faulting address is in native code on a reverse pinvoke thread. An example is an exception that escaped
+ // out of the reverse pinvoke thread but was caught in the native part of the thread. The native part then
+ // had another exception that went unhandled. The difference between this and (3) below is that
+ // we have a thread object here but not in (3).
+ //
+ // An exception from PInvoke, that is never caught/rethrown in managed code and goes unhandled, also falls
+ // in this category.
+ //
+ // 2) The exception escaped out of a reverse pinvoke thread and went unhandled.
+ //
+ // 3) Faulting thread was never seen by the runtime. An example is a another native thread
+ // which the user code created that had unhandled exception.
+ //
+ // 4) A corrupting exception may become unhandled.
+ //
+ // This is applicable to CoreCLR as well. We wont enter the UEF (and thus come here) on Silverlight,
+ // but we could when being used by WLC as we register the UEF for them.
+
+ // Assert these conditions here - we shouldnt be here for any other unhandled exception processing.
+ Thread *pThread = GetThread();
+ _ASSERTE((pThread == NULL) || // condition 3
+ (pThread->GetExceptionState()->IsExceptionInProgress() &&
+ pThread->GetExceptionState()->GetFlags()->ReversePInvokeEscapingException()) || // condition 2
+ ((!ExecutionManager::IsManagedCode((PCODE)pExceptionInfo->ExceptionRecord->ExceptionAddress))) // condition 1
+ CORRUPTING_EXCEPTIONS_ONLY(||(CEHelper::IsProcessCorruptedStateException(pExceptionInfo->ExceptionRecord->ExceptionCode)) ||) // condition 4
+ CORRUPTING_EXCEPTIONS_ONLY((CEHelper::IsProcessCorruptedStateException(pThread->GetThrowable())))); // condition 4
+#endif // _DEBUG
+
+ retVal = InternalUnhandledExceptionFilter(pExceptionInfo);
+
+ // If thread object exists, mark that this thread has done unhandled exception processing
+ if (GetThread())
+ {
+ LOG((LF_EH, LL_INFO100, "COMUnhandledExceptionFilter: setting TSNC_ProcessedUnhandledException\n"));
+ GetThread()->SetThreadStateNC(Thread::TSNC_ProcessedUnhandledException);
+ }
+
+ return retVal;
+} // LONG __stdcall COMUnhandledExceptionFilter()
+#if defined(FEATURE_CORECLR) && !defined(FEATURE_PAL)
+#pragma code_seg(pop, uef)
+#endif // FEATURE_CORECLR && !FEATURE_PAL
+
+void PrintStackTraceToStdout();
+
+static SString GetExceptionMessageWrapper(Thread* pThread, OBJECTREF throwable)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ StackSString result;
+
+ INSTALL_NESTED_EXCEPTION_HANDLER(pThread->GetFrame());
+ GetExceptionMessage(throwable, result);
+ UNINSTALL_NESTED_EXCEPTION_HANDLER();
+
+ return result;
+}
+
+void STDMETHODCALLTYPE
+DefaultCatchHandlerExceptionMessageWorker(Thread* pThread,
+ OBJECTREF throwable,
+ __inout_ecount(buf_size) WCHAR *buf,
+ const int buf_size)
+{
+ if (throwable != NULL)
+ {
+ PrintToStdErrA("\n");
+
+ if (FAILED(UtilLoadResourceString(CCompRC::Error, IDS_EE_UNHANDLED_EXCEPTION, buf, buf_size)))
+ {
+ wcsncpy_s(buf, buf_size, SZ_UNHANDLED_EXCEPTION, SZ_UNHANDLED_EXCEPTION_CHARLEN);
+ }
+
+ PrintToStdErrW(buf);
+ PrintToStdErrA(" ");
+
+ SString message = GetExceptionMessageWrapper(pThread, throwable);
+
+ if (!message.IsEmpty())
+ {
+ NPrintToStdErrW(message, message.GetCount());
+ }
+
+ PrintToStdErrA("\n");
+ }
+}
+
+//******************************************************************************
+// DefaultCatchHandler -- common processing for otherwise uncaught exceptions.
+//******************************************************************************
+void STDMETHODCALLTYPE
+DefaultCatchHandler(PEXCEPTION_POINTERS pExceptionPointers,
+ OBJECTREF *pThrowableIn,
+ BOOL useLastThrownObject,
+ BOOL isTerminating,
+ BOOL isThreadBaseFilter,
+ BOOL sendAppDomainEvents)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // <TODO> The strings in here should be translatable.</TODO>
+ LOG((LF_EH, LL_INFO10, "In DefaultCatchHandler\n"));
+
+#if defined(_DEBUG)
+ static bool bHaveInitialized_BreakOnUncaught = false;
+ enum BreakOnUncaughtAction {
+ breakOnNone = 0, // Default.
+ breakOnAll = 1, // Always break.
+ breakSelective = 2, // Break on exceptions application can catch,
+ // but not ThreadAbort, AppdomainUnload
+ breakOnMax = 2
+ };
+ static DWORD breakOnUncaught = breakOnNone;
+
+ if (!bHaveInitialized_BreakOnUncaught)
+ {
+ breakOnUncaught = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnUncaughtException);
+ if (breakOnUncaught > breakOnMax)
+ { // Could turn it off completely, or turn into legal value. Since it is debug code, be accommodating.
+ breakOnUncaught = breakOnAll;
+ }
+ bHaveInitialized_BreakOnUncaught = true;
+ }
+
+ if (breakOnUncaught == breakOnAll)
+ {
+ _ASSERTE(!"BreakOnUnCaughtException");
+ }
+
+ int suppressSelectiveBreak = false; // to filter for the case where breakOnUncaught == "2"
+#endif
+
+ Thread *pThread = GetThread();
+
+ // The following reduces a window for a race during shutdown.
+ if (!pThread)
+ {
+ _ASSERTE(g_fEEShutDown);
+ return;
+ }
+
+ _ASSERTE(pThread);
+
+ ThreadPreventAsyncHolder prevAsync;
+
+ GCX_COOP();
+
+ OBJECTREF throwable;
+
+ if (pThrowableIn != NULL)
+ {
+ throwable = *pThrowableIn;
+ }
+ else if (useLastThrownObject)
+ {
+ throwable = pThread->LastThrownObject();
+ }
+ else
+ {
+ throwable = pThread->GetThrowable();
+ }
+
+ // If we've got no managed object, then we can't send an event or print a message, so we just return.
+ if (throwable == NULL)
+ {
+#ifdef LOGGING
+ if (!pThread->IsRudeAbortInitiated())
+ {
+ LOG((LF_EH, LL_INFO10, "Unhandled exception, throwable == NULL\n"));
+ }
+#endif
+
+ return;
+ }
+
+#ifdef _DEBUG
+ DWORD unbreakableLockCount = 0;
+ // Do not care about lock check for unhandled exception.
+ while (pThread->HasUnbreakableLock())
+ {
+ pThread->DecUnbreakableLockCount();
+ unbreakableLockCount ++;
+ }
+ BOOL fOwnsSpinLock = pThread->HasThreadStateNC(Thread::TSNC_OwnsSpinLock);
+ if (fOwnsSpinLock)
+ {
+ pThread->ResetThreadStateNC(Thread::TSNC_OwnsSpinLock);
+ }
+#endif
+
+ GCPROTECT_BEGIN(throwable);
+ //BOOL IsStackOverflow = (throwable->GetTrueMethodTable() == g_pStackOverflowExceptionClass);
+ BOOL IsOutOfMemory = (throwable->GetTrueMethodTable() == g_pOutOfMemoryExceptionClass);
+
+ // Notify the AppDomain that we have taken an unhandled exception. Can't notify of stack overflow -- guard
+ // page is not yet reset.
+ BOOL SentEvent = FALSE;
+
+ // Send up the unhandled exception appdomain event.
+ if (sendAppDomainEvents)
+ {
+ SentEvent = NotifyAppDomainsOfUnhandledException(pExceptionPointers, &throwable, useLastThrownObject, isTerminating);
+ }
+
+ const int buf_size = 128;
+ WCHAR buf[buf_size];
+
+ // See detailed explanation of this flag in threads.cpp. But the basic idea is that we already
+ // reported the exception in the AppDomain where it went unhandled, so we don't need to report
+ // it at the process level.
+ // Print the unhandled exception message.
+ if (!pThread->HasThreadStateNC(Thread::TSNC_AppDomainContainUnhandled))
+ {
+ EX_TRY
+ {
+ EX_TRY
+ {
+ // If this isn't ThreadAbortException, we want to print a stack trace to indicate why this thread abruptly
+ // terminated. Exceptions kill threads rarely enough that an uncached name check is reasonable.
+ BOOL dump = TRUE;
+
+ if (/*IsStackOverflow ||*/
+ !pThread->DetermineIfGuardPagePresent() ||
+ IsOutOfMemory)
+ {
+ // We have to be very careful. If we walk off the end of the stack, the process will just
+ // die. e.g. IsAsyncThreadException() and Exception.ToString both consume too much stack -- and can't
+ // be called here.
+ dump = FALSE;
+ PrintToStdErrA("\n");
+
+ if (FAILED(UtilLoadStringRC(IDS_EE_UNHANDLED_EXCEPTION, buf, buf_size)))
+ {
+ wcsncpy_s(buf, COUNTOF(buf), SZ_UNHANDLED_EXCEPTION, SZ_UNHANDLED_EXCEPTION_CHARLEN);
+ }
+
+ PrintToStdErrW(buf);
+
+ if (IsOutOfMemory)
+ {
+ PrintToStdErrA(" OutOfMemoryException.\n");
+ }
+ else
+ {
+ PrintToStdErrA(" StackOverflowException.\n");
+ }
+ }
+ else if (!CanRunManagedCode(LoaderLockCheck::None))
+ {
+ // Well, if we can't enter the runtime, we very well can't get the exception message.
+ dump = FALSE;
+ }
+ else if (SentEvent || IsAsyncThreadException(&throwable))
+ {
+ // We don't print anything on async exceptions, like ThreadAbort.
+ dump = FALSE;
+ INDEBUG(suppressSelectiveBreak=TRUE);
+ }
+ else if (isThreadBaseFilter && IsExceptionOfType(kAppDomainUnloadedException, &throwable))
+ {
+ // AppdomainUnloadedException is also a special case.
+ dump = FALSE;
+ INDEBUG(suppressSelectiveBreak=TRUE);
+ }
+
+ // Finally, should we print the message?
+ if (dump)
+ {
+ // this is stack heavy because of the CQuickWSTRBase, so we break it out
+ // and don't have to carry the weight through our other code paths.
+ DefaultCatchHandlerExceptionMessageWorker(pThread, throwable, buf, buf_size);
+ }
+ }
+ EX_CATCH
+ {
+ LOG((LF_EH, LL_INFO10, "Exception occurred while processing uncaught exception\n"));
+ UtilLoadStringRC(IDS_EE_EXCEPTION_TOSTRING_FAILED, buf, buf_size);
+ PrintToStdErrA("\n ");
+ PrintToStdErrW(buf);
+ PrintToStdErrA("\n");
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+ EX_CATCH
+ { // If we got here, we can't even print the localized error message. Print non-localized.
+ LOG((LF_EH, LL_INFO10, "Exception occurred while logging processing uncaught exception\n"));
+ PrintToStdErrA("\n Error: Can't print exception string because Exception.ToString() failed.\n");
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+
+#if defined(_DEBUG)
+ if ((breakOnUncaught == breakSelective) && !suppressSelectiveBreak)
+ {
+ _ASSERTE(!"BreakOnUnCaughtException");
+ }
+#endif // defined(_DEBUG)
+
+ FlushLogging(); // Flush any logging output
+ GCPROTECT_END();
+
+#ifdef _DEBUG
+ // Do not care about lock check for unhandled exception.
+ while (unbreakableLockCount)
+ {
+ pThread->IncUnbreakableLockCount();
+ unbreakableLockCount --;
+ }
+ if (fOwnsSpinLock)
+ {
+ pThread->SetThreadStateNC(Thread::TSNC_OwnsSpinLock);
+ }
+#endif
+} // DefaultCatchHandler()
+
+
+//******************************************************************************
+// NotifyAppDomainsOfUnhandledException -- common processing for otherwise uncaught exceptions.
+//******************************************************************************
+BOOL NotifyAppDomainsOfUnhandledException(
+ PEXCEPTION_POINTERS pExceptionPointers,
+ OBJECTREF *pThrowableIn,
+ BOOL useLastThrownObject,
+ BOOL isTerminating)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ static int fBreakOnNotify = -1;
+ if (fBreakOnNotify==-1) fBreakOnNotify = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnNotify);
+ _ASSERTE(!fBreakOnNotify);
+#endif
+
+ BOOL SentEvent = FALSE;
+
+ LOG((LF_EH, LL_INFO10, "In NotifyAppDomainsOfUnhandledException\n"));
+
+ Thread *pThread = GetThread();
+
+ // The following reduces a window for a race during shutdown.
+ if (!pThread)
+ {
+ _ASSERTE(g_fEEShutDown);
+ return FALSE;
+ }
+
+ // See detailed explanation of this flag in threads.cpp. But the basic idea is that we already
+ // reported the exception in the AppDomain where it went unhandled, so we don't need to report
+ // it at the process level.
+ if (pThread->HasThreadStateNC(Thread::TSNC_AppDomainContainUnhandled))
+ return FALSE;
+
+ ThreadPreventAsyncHolder prevAsync;
+
+ GCX_COOP();
+
+ OBJECTREF throwable;
+
+ if (pThrowableIn != NULL)
+ {
+ throwable = *pThrowableIn;
+ }
+ else if (useLastThrownObject)
+ {
+ throwable = pThread->LastThrownObject();
+ }
+ else
+ {
+ throwable = pThread->GetThrowable();
+ }
+
+ // If we've got no managed object, then we can't send an event, so we just return.
+ if (throwable == NULL)
+ {
+ return FALSE;
+ }
+
+#ifdef _DEBUG
+ DWORD unbreakableLockCount = 0;
+ // Do not care about lock check for unhandled exception.
+ while (pThread->HasUnbreakableLock())
+ {
+ pThread->DecUnbreakableLockCount();
+ unbreakableLockCount ++;
+ }
+ BOOL fOwnsSpinLock = pThread->HasThreadStateNC(Thread::TSNC_OwnsSpinLock);
+ if (fOwnsSpinLock)
+ {
+ pThread->ResetThreadStateNC(Thread::TSNC_OwnsSpinLock);
+ }
+#endif
+
+ GCPROTECT_BEGIN(throwable);
+ //BOOL IsStackOverflow = (throwable->GetTrueMethodTable() == g_pStackOverflowExceptionClass);
+
+ // Notify the AppDomain that we have taken an unhandled exception. Can't notify of stack overflow -- guard
+ // page is not yet reset.
+
+ // Send up the unhandled exception appdomain event.
+ //
+ // If we can't run managed code, we can't deliver the event. Nor do we attempt to delieve the event in stack
+ // overflow or OOM conditions.
+ if (/*!IsStackOverflow &&*/
+ pThread->DetermineIfGuardPagePresent() &&
+ CanRunManagedCode(LoaderLockCheck::None))
+ {
+
+ // x86 only
+#if !defined(WIN64EXCEPTIONS)
+ // If the Thread object's exception state's exception pointers
+ // is null, use the passed-in pointer.
+ BOOL bSetPointers = FALSE;
+
+ ThreadExceptionState* pExceptionState = pThread->GetExceptionState();
+
+ if (pExceptionState->GetExceptionPointers() == NULL)
+ {
+ bSetPointers = TRUE;
+ pExceptionState->SetExceptionPointers(pExceptionPointers);
+ }
+
+#endif // !defined(WIN64EXCEPTIONS)
+
+ INSTALL_NESTED_EXCEPTION_HANDLER(pThread->GetFrame());
+
+ // This guy will never throw, but it will need a spot to store
+ // any nested exceptions it might find.
+ SentEvent = AppDomain::OnUnhandledException(&throwable, isTerminating);
+
+ UNINSTALL_NESTED_EXCEPTION_HANDLER();
+
+#if !defined(WIN64EXCEPTIONS)
+
+ if (bSetPointers)
+ {
+ pExceptionState->SetExceptionPointers(NULL);
+ }
+
+#endif // !defined(WIN64EXCEPTIONS)
+
+ }
+
+ GCPROTECT_END();
+
+#ifdef _DEBUG
+ // Do not care about lock check for unhandled exception.
+ while (unbreakableLockCount)
+ {
+ pThread->IncUnbreakableLockCount();
+ unbreakableLockCount --;
+ }
+ if (fOwnsSpinLock)
+ {
+ pThread->SetThreadStateNC(Thread::TSNC_OwnsSpinLock);
+ }
+#endif
+
+ return SentEvent;
+
+} // NotifyAppDomainsOfUnhandledException()
+
+
+//******************************************************************************
+//
+// ThreadBaseExceptionFilter_Worker
+//
+// The return from the function can be EXCEPTION_CONTINUE_SEARCH to let an
+// exception go unhandled. This is the default behaviour (starting in v2.0),
+// but can be overridden by hosts or by config file.
+// When the behaviour is overridden, the return will be EXCEPTION_EXECUTE_HANDLER
+// to swallow the exception.
+// Note that some exceptions are always swallowed: ThreadAbort, and AppDomainUnload.
+//
+// Parameters:
+// pExceptionInfo EXCEPTION_POINTERS for current exception
+// _location A constant as an INT_PTR. Tells the context from whence called.
+// swallowing Are we swallowing unhandled exceptions based on policy?
+//
+// Returns:
+// EXCEPTION_CONTINUE_SEARCH Generally returns this to let the exception go unhandled.
+// EXCEPTION_EXECUTE_HANDLER May return this to swallow the exception.
+//
+static LONG ThreadBaseExceptionFilter_Worker(PEXCEPTION_POINTERS pExceptionInfo,
+ PVOID pvParam,
+ BOOL swallowing)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_EH, LL_INFO100, "ThreadBaseExceptionFilter_Worker: Enter\n"));
+
+ ThreadBaseExceptionFilterParam *pParam = (ThreadBaseExceptionFilterParam *) pvParam;
+ UnhandledExceptionLocation location = pParam->location;
+
+ _ASSERTE(!g_fNoExceptions);
+
+ Thread* pThread = GetThread();
+ _ASSERTE(pThread);
+
+#ifdef _DEBUG
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnUncaughtException) &&
+ !(swallowing && (SwallowUnhandledExceptions() || ExceptionIsAlwaysSwallowed(pExceptionInfo))) &&
+ !(location == ClassInitUnhandledException && pThread->IsRudeAbortInitiated()))
+ _ASSERTE(!"BreakOnUnCaughtException");
+#endif
+
+ BOOL doDefault = ((location != ClassInitUnhandledException) &&
+ (pExceptionInfo->ExceptionRecord->ExceptionCode != STATUS_BREAKPOINT) &&
+ (pExceptionInfo->ExceptionRecord->ExceptionCode != STATUS_SINGLE_STEP));
+
+ if (swallowing)
+ {
+ // The default handling for versions v1.0 and v1.1 was to swallow unhandled exceptions.
+ // With v2.0, the default is to let them go unhandled. Hosts & config files can modify the default
+ // to retain the v1.1 behaviour.
+ // Should we swallow this exception, or let it continue up and be unhandled?
+ if (!SwallowUnhandledExceptions())
+ {
+ // No, don't swallow unhandled exceptions...
+
+ // ...except if the exception is of a type that is always swallowed (ThreadAbort, AppDomainUnload)...
+ if (ExceptionIsAlwaysSwallowed(pExceptionInfo))
+ { // ...return EXCEPTION_EXECUTE_HANDLER to swallow the exception anyway.
+ return EXCEPTION_EXECUTE_HANDLER;
+ }
+
+ #ifdef _DEBUG
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnUncaughtException))
+ _ASSERTE(!"BreakOnUnCaughtException");
+ #endif
+
+ // ...so, continue search. i.e. let the exception go unhandled.
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+ }
+
+#ifdef DEBUGGING_SUPPORTED
+ // If there's a debugger (and not doing a thread abort), give the debugger a shot at the exception.
+ // If the debugger is going to try to continue the exception, it will return ContinueException (which
+ // we see here as EXCEPTION_CONTINUE_EXECUTION).
+ if (!pThread->IsAbortRequested())
+ {
+ // TODO: do we really need this check? I don't think we do
+ if(CORDebuggerAttached())
+ {
+ if (NotifyDebuggerLastChance(pThread, pExceptionInfo, FALSE) == EXCEPTION_CONTINUE_EXECUTION)
+ {
+ LOG((LF_EH, LL_INFO100, "ThreadBaseExceptionFilter_Worker: EXCEPTION_CONTINUE_EXECUTION\n"));
+ return EXCEPTION_CONTINUE_EXECUTION;
+ }
+ }
+ }
+#endif // DEBUGGING_SUPPORTED
+
+ // Do default handling, but ignore breakpoint exceptions and class init exceptions
+ if (doDefault)
+ {
+ LOG((LF_EH, LL_INFO100, "ThreadBaseExceptionFilter_Worker: Calling DefaultCatchHandler\n"));
+
+ BOOL useLastThrownObject = UpdateCurrentThrowable(pExceptionInfo->ExceptionRecord);
+
+ DefaultCatchHandler(pExceptionInfo,
+ NULL,
+ useLastThrownObject,
+ FALSE,
+ location == ManagedThread || location == ThreadPoolThread || location == FinalizerThread);
+ }
+
+ // Return EXCEPTION_EXECUTE_HANDLER to swallow the exception.
+ return (swallowing
+ ? EXCEPTION_EXECUTE_HANDLER
+ : EXCEPTION_CONTINUE_SEARCH);
+} // LONG ThreadBaseExceptionFilter_Worker()
+
+
+// This is the filter for new managed threads, for threadpool threads, and for
+// running finalizer methods.
+LONG ThreadBaseExceptionSwallowingFilter(PEXCEPTION_POINTERS pExceptionInfo, PVOID pvParam)
+{
+ return ThreadBaseExceptionFilter_Worker(pExceptionInfo, pvParam, /*swallowing=*/true);
+}
+
+// This was the filter for new managed threads in v1.0 and v1.1. Now used
+// for delegate invoke, various things in the thread pool, and the
+// class init handler.
+LONG ThreadBaseExceptionFilter(PEXCEPTION_POINTERS pExceptionInfo, PVOID pvParam)
+{
+ return ThreadBaseExceptionFilter_Worker(pExceptionInfo, pvParam, /*swallowing=*/false);
+}
+
+
+// This is the filter that we install when transitioning an AppDomain at the base of a managed
+// thread. Nothing interesting will get swallowed after us. So we never decide to continue
+// the search. Instead, we let it go unhandled and get the Watson report and debugging
+// experience before the AD transition has an opportunity to catch/rethrow and lose all the
+// relevant information.
+LONG ThreadBaseExceptionAppDomainFilter(EXCEPTION_POINTERS *pExceptionInfo, PVOID pvParam)
+{
+ LONG ret = ThreadBaseExceptionSwallowingFilter(pExceptionInfo, pvParam);
+
+ if (ret != EXCEPTION_CONTINUE_SEARCH)
+ return ret;
+
+ // Consider the exception to be unhandled
+ return InternalUnhandledExceptionFilter_Worker(pExceptionInfo);
+}
+
+// Filter for calls out from the 'vm' to native code, if there's a possibility of SEH exceptions
+// in the native code.
+LONG CallOutFilter(PEXCEPTION_POINTERS pExceptionInfo, PVOID pv)
+{
+ CallOutFilterParam *pParam = static_cast<CallOutFilterParam *>(pv);
+
+ _ASSERTE(pParam->OneShot && (pParam->OneShot == TRUE || pParam->OneShot == FALSE));
+
+ if (pParam->OneShot == TRUE)
+ {
+ pParam->OneShot = FALSE;
+
+ // Replace whatever SEH exception is in flight, with an SEHException derived from
+ // CLRException. But if the exception already looks like one of ours, let it
+ // go past since LastThrownObject should already represent it.
+ if ((!IsComPlusException(pExceptionInfo->ExceptionRecord)) &&
+ (pExceptionInfo->ExceptionRecord->ExceptionCode != EXCEPTION_MSVC))
+ PAL_CPP_THROW(SEHException *, new SEHException(pExceptionInfo->ExceptionRecord,
+ pExceptionInfo->ContextRecord));
+ }
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+
+
+//==========================================================================
+// Convert the format string used by sprintf to the format used by String.Format.
+// Using the managed formatting routine avoids bogus access violations
+// that happen for long strings in Win32's FormatMessage.
+//
+// Note: This is not general purpose routine. It handles only cases found
+// in TypeLoadException and FileLoadException.
+//==========================================================================
+static BOOL GetManagedFormatStringForResourceID(CCompRC::ResourceCategory eCategory, UINT32 resId, SString & converted)
+{
+ STANDARD_VM_CONTRACT;
+
+ StackSString temp;
+ if (!temp.LoadResource(eCategory, resId))
+ return FALSE;
+
+ SString::Iterator itr = temp.Begin();
+ while (*itr)
+ {
+ WCHAR c = *itr++;
+ switch (c) {
+ case '%':
+ {
+ WCHAR fmt = *itr++;
+ if (fmt >= '1' && fmt <= '9') {
+ converted.Append(W("{"));
+ converted.Append(fmt - 1); // the managed args start at 0
+ converted.Append(W("}"));
+ }
+ else
+ if (fmt == '%') {
+ converted.Append(W("%"));
+ }
+ else {
+ _ASSERTE(!"Unexpected formating string: %s");
+ }
+ }
+ break;
+ case '{':
+ converted.Append(W("{{"));
+ break;
+ case '}':
+ converted.Append(W("}}"));
+ break;
+ default:
+ converted.Append(c);
+ break;
+ }
+ }
+ return TRUE;
+}
+
+//==========================================================================
+// Private helper for TypeLoadException.
+//==========================================================================
+void QCALLTYPE GetTypeLoadExceptionMessage(UINT32 resId, QCall::StringHandleOnStack retString)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ StackSString format;
+ GetManagedFormatStringForResourceID(CCompRC::Error, resId ? resId : IDS_CLASSLOAD_GENERAL, format);
+ retString.Set(format);
+
+ END_QCALL;
+}
+
+
+
+//==========================================================================
+// Private helper for FileLoadException and FileNotFoundException.
+//==========================================================================
+
+void QCALLTYPE GetFileLoadExceptionMessage(UINT32 hr, QCall::StringHandleOnStack retString)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ StackSString format;
+ GetManagedFormatStringForResourceID(CCompRC::Error, GetResourceIDForFileLoadExceptionHR(hr), format);
+ retString.Set(format);
+
+ END_QCALL;
+}
+
+//==========================================================================
+// Private helper for FileLoadException and FileNotFoundException.
+//==========================================================================
+void QCALLTYPE FileLoadException_GetMessageForHR(UINT32 hresult, QCall::StringHandleOnStack retString)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ BOOL bNoGeekStuff = FALSE;
+ switch ((HRESULT)hresult)
+ {
+ // These are not usually app errors - as long
+ // as the message is reasonably clear, we can live without the hex code stuff.
+ case COR_E_FILENOTFOUND:
+ case __HRESULT_FROM_WIN32(ERROR_MOD_NOT_FOUND):
+ case __HRESULT_FROM_WIN32(ERROR_PATH_NOT_FOUND):
+ case __HRESULT_FROM_WIN32(ERROR_INVALID_NAME):
+ case __HRESULT_FROM_WIN32(ERROR_BAD_NET_NAME):
+ case __HRESULT_FROM_WIN32(ERROR_BAD_NETPATH):
+ case __HRESULT_FROM_WIN32(ERROR_DLL_NOT_FOUND):
+ case CTL_E_FILENOTFOUND:
+ case COR_E_DLLNOTFOUND:
+ case COR_E_PATHTOOLONG:
+ case E_ACCESSDENIED:
+ case COR_E_BADIMAGEFORMAT:
+ case COR_E_NEWER_RUNTIME:
+ case COR_E_ASSEMBLYEXPECTED:
+ bNoGeekStuff = TRUE;
+ break;
+ }
+
+ SString s;
+ GetHRMsg((HRESULT)hresult, s, bNoGeekStuff);
+ retString.Set(s);
+
+ END_QCALL;
+}
+
+
+#define ValidateSigBytes(_size) do { if ((_size) > csig) COMPlusThrow(kArgumentException, W("Argument_BadSigFormat")); csig -= (_size); } while (false)
+
+//==========================================================================
+// Unparses an individual type.
+//==========================================================================
+const BYTE *UnparseType(const BYTE *pType, DWORD& csig, StubLinker *psl)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(ThrowOutOfMemory();); // Emitting data to the StubLinker can throw OOM.
+ }
+ CONTRACTL_END;
+
+ LPCUTF8 pName = NULL;
+
+ ValidateSigBytes(sizeof(BYTE));
+ switch ( (CorElementType) *(pType++) ) {
+ case ELEMENT_TYPE_VOID:
+ psl->EmitUtf8("void");
+ break;
+
+ case ELEMENT_TYPE_BOOLEAN:
+ psl->EmitUtf8("boolean");
+ break;
+
+ case ELEMENT_TYPE_CHAR:
+ psl->EmitUtf8("char");
+ break;
+
+ case ELEMENT_TYPE_U1:
+ psl->EmitUtf8("unsigned ");
+ //fallthru
+ case ELEMENT_TYPE_I1:
+ psl->EmitUtf8("byte");
+ break;
+
+ case ELEMENT_TYPE_U2:
+ psl->EmitUtf8("unsigned ");
+ //fallthru
+ case ELEMENT_TYPE_I2:
+ psl->EmitUtf8("short");
+ break;
+
+ case ELEMENT_TYPE_U4:
+ psl->EmitUtf8("unsigned ");
+ //fallthru
+ case ELEMENT_TYPE_I4:
+ psl->EmitUtf8("int");
+ break;
+
+ case ELEMENT_TYPE_I:
+ psl->EmitUtf8("native int");
+ break;
+ case ELEMENT_TYPE_U:
+ psl->EmitUtf8("native unsigned");
+ break;
+
+ case ELEMENT_TYPE_U8:
+ psl->EmitUtf8("unsigned ");
+ //fallthru
+ case ELEMENT_TYPE_I8:
+ psl->EmitUtf8("long");
+ break;
+
+
+ case ELEMENT_TYPE_R4:
+ psl->EmitUtf8("float");
+ break;
+
+ case ELEMENT_TYPE_R8:
+ psl->EmitUtf8("double");
+ break;
+
+ case ELEMENT_TYPE_STRING:
+ psl->EmitUtf8(g_StringName);
+ break;
+
+ case ELEMENT_TYPE_VAR:
+ case ELEMENT_TYPE_OBJECT:
+ psl->EmitUtf8(g_ObjectName);
+ break;
+
+ case ELEMENT_TYPE_PTR:
+ pType = UnparseType(pType, csig, psl);
+ psl->EmitUtf8("*");
+ break;
+
+ case ELEMENT_TYPE_BYREF:
+ pType = UnparseType(pType, csig, psl);
+ psl->EmitUtf8("&");
+ break;
+
+ case ELEMENT_TYPE_VALUETYPE:
+ case ELEMENT_TYPE_CLASS:
+ pName = (LPCUTF8)pType;
+ while (true) {
+ ValidateSigBytes(sizeof(CHAR));
+ if (*(pType++) == '\0')
+ break;
+ }
+ psl->EmitUtf8(pName);
+ break;
+
+ case ELEMENT_TYPE_SZARRAY:
+ {
+ pType = UnparseType(pType, csig, psl);
+ psl->EmitUtf8("[]");
+ }
+ break;
+
+ case ELEMENT_TYPE_ARRAY:
+ {
+ pType = UnparseType(pType, csig, psl);
+ ValidateSigBytes(sizeof(DWORD));
+ DWORD rank = GET_UNALIGNED_VAL32(pType);
+ pType += sizeof(DWORD);
+ if (rank)
+ {
+ ValidateSigBytes(sizeof(UINT32));
+ UINT32 nsizes = GET_UNALIGNED_VAL32(pType); // Get # of sizes
+ ValidateSigBytes(nsizes * sizeof(UINT32));
+ pType += 4 + nsizes*4;
+ ValidateSigBytes(sizeof(UINT32));
+ UINT32 nlbounds = GET_UNALIGNED_VAL32(pType); // Get # of lower bounds
+ ValidateSigBytes(nlbounds * sizeof(UINT32));
+ pType += 4 + nlbounds*4;
+
+
+ while (rank--) {
+ psl->EmitUtf8("[]");
+ }
+
+}
+
+ }
+ break;
+
+ case ELEMENT_TYPE_TYPEDBYREF:
+ psl->EmitUtf8("&");
+ break;
+
+ case ELEMENT_TYPE_FNPTR:
+ psl->EmitUtf8("ftnptr");
+ break;
+
+ default:
+ psl->EmitUtf8("?");
+ break;
+ }
+
+ return pType;
+ }
+
+
+
+//==========================================================================
+// Helper for MissingMemberException.
+//==========================================================================
+static STRINGREF MissingMemberException_FormatSignature_Internal(I1ARRAYREF* ppPersistedSig)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(ThrowOutOfMemory(););
+ }
+ CONTRACTL_END;
+
+ STRINGREF pString = NULL;
+
+ DWORD csig = 0;
+ const BYTE *psig = 0;
+ StubLinker *psl = NULL;
+ StubHolder<Stub> pstub;
+
+ if ((*ppPersistedSig) != NULL)
+ csig = (*ppPersistedSig)->GetNumComponents();
+
+ if (csig == 0)
+ {
+ return StringObject::NewString("Unknown signature");
+ }
+
+ psig = (const BYTE*)_alloca(csig);
+ CopyMemory((BYTE*)psig,
+ (*ppPersistedSig)->GetDirectPointerToNonObjectElements(),
+ csig);
+
+ {
+ GCX_PREEMP();
+
+ StubLinker sl;
+ psl = &sl;
+ pstub = NULL;
+
+ ValidateSigBytes(sizeof(UINT32));
+ UINT32 cconv = GET_UNALIGNED_VAL32(psig);
+ psig += 4;
+
+ if (cconv == IMAGE_CEE_CS_CALLCONV_FIELD) {
+ psig = UnparseType(psig, csig, psl);
+ } else {
+ ValidateSigBytes(sizeof(UINT32));
+ UINT32 nargs = GET_UNALIGNED_VAL32(psig);
+ psig += 4;
+
+ // Unparse return type
+ psig = UnparseType(psig, csig, psl);
+ psl->EmitUtf8("(");
+ while (nargs--) {
+ psig = UnparseType(psig, csig, psl);
+ if (nargs) {
+ psl->EmitUtf8(", ");
+ }
+ }
+ psl->EmitUtf8(")");
+ }
+ psl->Emit8('\0');
+ pstub = psl->Link();
+ }
+
+ pString = StringObject::NewString( (LPCUTF8)(pstub->GetEntryPoint()) );
+ return pString;
+}
+
+FCIMPL1(Object*, MissingMemberException_FormatSignature, I1Array* pPersistedSigUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ STRINGREF pString = NULL;
+ I1ARRAYREF pPersistedSig = (I1ARRAYREF) pPersistedSigUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(pPersistedSig);
+
+ pString = MissingMemberException_FormatSignature_Internal(&pPersistedSig);
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(pString);
+ }
+FCIMPLEND
+
+// Check if the Win32 Error code is an IO error.
+BOOL IsWin32IOError(SCODE scode)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ switch (scode)
+ {
+ case ERROR_FILE_NOT_FOUND:
+ case ERROR_PATH_NOT_FOUND:
+ case ERROR_TOO_MANY_OPEN_FILES:
+ case ERROR_ACCESS_DENIED:
+ case ERROR_INVALID_HANDLE:
+ case ERROR_INVALID_DRIVE:
+ case ERROR_WRITE_PROTECT:
+ case ERROR_NOT_READY:
+ case ERROR_WRITE_FAULT:
+ case ERROR_SHARING_VIOLATION:
+ case ERROR_LOCK_VIOLATION:
+ case ERROR_SHARING_BUFFER_EXCEEDED:
+ case ERROR_HANDLE_DISK_FULL:
+ case ERROR_BAD_NETPATH:
+ case ERROR_DEV_NOT_EXIST:
+ case ERROR_FILE_EXISTS:
+ case ERROR_CANNOT_MAKE:
+ case ERROR_NET_WRITE_FAULT:
+ case ERROR_DRIVE_LOCKED:
+ case ERROR_OPEN_FAILED:
+ case ERROR_BUFFER_OVERFLOW:
+ case ERROR_DISK_FULL:
+ case ERROR_INVALID_NAME:
+ case ERROR_FILENAME_EXCED_RANGE:
+ case ERROR_IO_DEVICE:
+ case ERROR_DISK_OPERATION_FAILED:
+ return TRUE;
+
+ default:
+ return FALSE;
+ }
+}
+
+
+// Check if there is a pending exception or the thread is already aborting. Returns 0 if yes.
+// Otherwise, sets the thread up for generating an abort and returns address of ThrowControlForThread
+// It is the caller's responsibility to set up Thread::m_OSContext prior to this call. This is used as
+// the context for checking if a ThreadAbort is allowed, and also as the context for the ThreadAbortException
+// itself.
+LPVOID COMPlusCheckForAbort(UINT_PTR uTryCatchResumeAddress)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // Initialize the return address
+ LPVOID pRetAddress = 0;
+
+ Thread* pThread = GetThread();
+
+ if ((!pThread->IsAbortRequested()) || // if no abort has been requested
+ (!pThread->IsRudeAbort() &&
+ (pThread->GetThrowable() != NULL)) ) // or if there is a pending exception
+ {
+ goto exit;
+ }
+
+ // Reverse COM interop IL stubs map all exceptions to HRESULTs and must not propagate Thread.Abort
+ // to their unmanaged callers.
+ if (uTryCatchResumeAddress != NULL)
+ {
+ MethodDesc * pMDResumeMethod = ExecutionManager::GetCodeMethodDesc((PCODE)uTryCatchResumeAddress);
+ if (pMDResumeMethod->IsILStub())
+ goto exit;
+ }
+
+ // else we must produce an abort
+ if ((pThread->GetThrowable() == NULL) &&
+ (pThread->IsAbortInitiated()))
+ {
+ // Oops, we just swallowed an abort, must restart the process
+ pThread->ResetAbortInitiated();
+ }
+
+ // Question: Should we also check for (pThread->m_PreventAsync == 0)
+
+#if !defined(WIN64EXCEPTIONS) && defined(FEATURE_STACK_PROBE)
+ // On Win64, this function is called by our exception handling code which has probed.
+ // But on X86, this is called from JIT code directly. We probe here so that
+ // we can restore the state of the thread below.
+ if (GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) == eRudeUnloadAppDomain)
+ {
+ // In case of SO, we will skip the managed code.
+ CONTRACT_VIOLATION(ThrowsViolation);
+ RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), pThread);
+ }
+#endif // !WIN64EXCEPTIONS && FEATURE_STACK_PROBE
+
+ pThread->SetThrowControlForThread(Thread::InducedThreadRedirectAtEndOfCatch);
+ if (!pThread->ReadyForAbort())
+ {
+ pThread->ResetThrowControlForThread();
+ goto exit;
+ }
+ pThread->SetThrowControlForThread(Thread::InducedThreadStop);
+
+ pRetAddress = (LPVOID)THROW_CONTROL_FOR_THREAD_FUNCTION;
+
+exit:
+
+#ifndef FEATURE_PAL
+
+#ifdef FEATURE_CORECLR
+ // Only proceed if Watson is enabled - CoreCLR may have it disabled.
+ if (IsWatsonEnabled())
+#endif // FEATURE_CORECLR
+ {
+ BOOL fClearUEWatsonBucketTracker = TRUE;
+ PTR_EHWatsonBucketTracker pUEWatsonBucketTracker = pThread->GetExceptionState()->GetUEWatsonBucketTracker();
+
+ if (pRetAddress && pThread->IsAbortRequested())
+ {
+ // Since we are going to reraise the thread abort exception, we would like to assert that
+ // the buckets present in the UE tracker are the ones which were setup TAE was first raised.
+ //
+ // However, these buckets could come from across AD transition as well and thus, would be
+ // marked for "Captured at AD transition". Thus, we cannot just assert them to be only from
+ // TAE raise.
+ //
+ // We try to preserve buckets incase there is another catch that may catch the exception we reraise
+ // and it attempts to FailFast using the TA exception object. In such a case,
+ // we should maintain the original exception point's bucket details.
+ if (pUEWatsonBucketTracker->RetrieveWatsonBucketIp() != NULL)
+ {
+ _ASSERTE(pUEWatsonBucketTracker->CapturedForThreadAbort() || pUEWatsonBucketTracker->CapturedAtADTransition());
+ fClearUEWatsonBucketTracker = FALSE;
+ }
+#ifdef _DEBUG
+ else
+ {
+ // If we are here and UE Watson bucket tracker is empty,
+ // then it is possible that a thread abort was signalled when the catch was executing
+ // and thus, hijack for TA from here is not a reraise but an initial raise.
+ //
+ // However, if we have partial details, then something is really not right.
+ if (!((pUEWatsonBucketTracker->RetrieveWatsonBucketIp() == NULL) &&
+ (pUEWatsonBucketTracker->RetrieveWatsonBuckets() == NULL)))
+ {
+ _ASSERTE(!"How come TA is being [re]raised and we have incomplete watson bucket details?");
+ }
+ }
+#endif // _DEBUG
+ }
+
+ if (fClearUEWatsonBucketTracker)
+ {
+ // Clear the UE watson bucket tracker for future use since it does not have anything
+ // useful for us right now.
+ pUEWatsonBucketTracker->ClearWatsonBucketDetails();
+ LOG((LF_EH, LL_INFO100, "COMPlusCheckForAbort - Cleared UE watson bucket tracker since TAE was not being reraised.\n"));
+ }
+ }
+
+#endif // !FEATURE_PAL
+
+ return pRetAddress;
+}
+
+
+BOOL IsThreadHijackedForThreadStop(Thread* pThread, EXCEPTION_RECORD* pExceptionRecord)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (IsComPlusException(pExceptionRecord))
+ {
+ if (pThread->ThrewControlForThread() == Thread::InducedThreadStop)
+ {
+ LOG((LF_EH, LL_INFO100, "Asynchronous Thread Stop or Abort\n"));
+ return TRUE;
+ }
+ }
+ else if (IsStackOverflowException(pThread, pExceptionRecord))
+ {
+ // SO happens before we are able to change the state to InducedThreadStop, but
+ // we are still in our hijack routine.
+ if (pThread->ThrewControlForThread() == Thread::InducedThreadRedirect)
+ {
+ LOG((LF_EH, LL_INFO100, "Asynchronous Thread Stop or Abort caused by SO\n"));
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+// We sometimes move a thread's execution so it will throw an exception for us.
+// But then we have to treat the exception as if it came from the instruction
+// the thread was originally running.
+//
+// NOTE: This code depends on the fact that there are no register-based data dependencies
+// between a try block and a catch, fault, or finally block. If there were, then we need
+// to preserve more of the register context.
+
+void AdjustContextForThreadStop(Thread* pThread,
+ CONTEXT* pContext)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pThread->m_OSContext);
+
+#ifndef WIN64EXCEPTIONS
+ SetIP(pContext, GetIP(pThread->m_OSContext));
+ SetSP(pContext, (GetSP(pThread->m_OSContext)));
+
+ if (GetFP(pThread->m_OSContext) != 0) // ebp = 0 implies that we got here with the right values for ebp
+ {
+ SetFP(pContext, GetFP(pThread->m_OSContext));
+ }
+
+ // We might have been interrupted execution at a point where the jit has roots in
+ // registers. We just need to store a "safe" value in here so that the collector
+ // doesn't trap. We're not going to use these objects after the exception.
+ //
+ // Only callee saved registers are going to be reported by the faulting excepiton frame.
+#if defined(_TARGET_X86_)
+ // Ebx,esi,edi are important. Eax,ecx,edx are not.
+ pContext->Ebx = 0;
+ pContext->Edi = 0;
+ pContext->Esi = 0;
+#else
+ PORTABILITY_ASSERT("AdjustContextForThreadStop");
+#endif
+
+#else // !WIN64EXCEPTIONS
+ CopyOSContext(pContext, pThread->m_OSContext);
+#if defined(_TARGET_ARM_) && defined(_DEBUG)
+ // Make sure that the thumb bit is set on the IP of the original abort context we just restored.
+ PCODE controlPC = GetIP(pContext);
+ _ASSERTE(controlPC & THUMB_CODE);
+#endif // _TARGET_ARM_
+#endif // !WIN64EXCEPTIONS
+
+ pThread->ResetThrowControlForThread();
+
+ // Should never get here if we're already throwing an exception.
+ _ASSERTE(!pThread->IsExceptionInProgress() || pThread->IsRudeAbort());
+
+ // Should never get here if we're already abort initiated.
+ _ASSERTE(!pThread->IsAbortInitiated() || pThread->IsRudeAbort());
+
+ if (pThread->IsAbortRequested())
+ {
+ pThread->SetAbortInitiated(); // to prevent duplicate aborts
+ }
+}
+
+// Create a COM+ exception , stick it in the thread.
+OBJECTREF
+CreateCOMPlusExceptionObject(Thread *pThread, EXCEPTION_RECORD *pExceptionRecord, BOOL bAsynchronousThreadStop)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(GetThread() == pThread);
+
+ DWORD exceptionCode = pExceptionRecord->ExceptionCode;
+
+ OBJECTREF result = 0;
+
+ DWORD COMPlusExceptionCode = (bAsynchronousThreadStop
+ ? kThreadAbortException
+ : MapWin32FaultToCOMPlusException(pExceptionRecord));
+
+ if (exceptionCode == STATUS_NO_MEMORY)
+ {
+ result = CLRException::GetBestOutOfMemoryException();
+ }
+ else if (IsStackOverflowException(pThread, pExceptionRecord))
+ {
+ result = CLRException::GetPreallocatedStackOverflowException();
+ }
+ else if (bAsynchronousThreadStop && pThread->IsAbortRequested() && pThread->IsRudeAbort())
+ {
+ result = CLRException::GetPreallocatedRudeThreadAbortException();
+ }
+ else
+ {
+ EX_TRY
+ {
+ // We need to disable the backout stack validation at this point since CreateThrowable can
+ // take arbitrarily large amounts of stack for different exception types; however we know
+ // for a fact that we will never go through this code path if the exception is a stack
+ // overflow exception since we already handled that case above with the pre-allocated SO exception.
+ DISABLE_BACKOUT_STACK_VALIDATION;
+
+ FAULT_NOT_FATAL();
+
+ ThreadPreventAsyncHolder preventAsync;
+ ResetProcessorStateHolder procState;
+
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+ GCPROTECT_BEGIN(result)
+
+ EEException e((RuntimeExceptionKind)COMPlusExceptionCode);
+ result = e.CreateThrowable();
+
+ // EEException is "one size fits all". But AV needs some more information.
+ if (COMPlusExceptionCode == kAccessViolationException)
+ {
+ SetExceptionAVParameters(result, pExceptionRecord);
+ }
+
+ GCPROTECT_END();
+
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+ }
+ EX_CATCH
+ {
+ // If we get an exception trying to build the managed exception object, then go ahead and return the
+ // thrown object as the result of this function. This is preferable to letting the exception try to
+ // percolate up through the EH code, and it effectively replaces the thrown exception with this
+ // exception.
+ result = GET_THROWABLE();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+
+ return result;
+}
+
+LONG FilterAccessViolation(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ if (pExceptionPointers->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION)
+ return EXCEPTION_EXECUTE_HANDLER;
+
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+
+/*
+ * IsContinuableException
+ *
+ * Returns whether this is an exception the EE knows how to intercept and continue from.
+ *
+ * Parameters:
+ * pThread - The thread the exception occured on.
+ *
+ * Returns:
+ * TRUE if the exception on the thread is interceptable or not.
+ *
+ * Notes:
+ * Conditions for an interceptable exception:
+ * 1) must be on a managed thread
+ * 2) an exception must be in progress
+ * 3) a managed exception object must have been created
+ * 4) the thread must not be aborting
+ * 5) the exception must not be a breakpoint, a single step, or a stack overflow
+ * 6) the exception dispatch must be in the first pass
+ * 7) the exception must not be a fatal error, as determined by the EE policy (see LogFatalError())
+ */
+bool IsInterceptableException(Thread *pThread)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return ((pThread != NULL) &&
+ (!pThread->IsAbortRequested()) &&
+ (pThread->IsExceptionInProgress()) &&
+ (!pThread->IsThrowableNull())
+
+#ifdef DEBUGGING_SUPPORTED
+ &&
+ pThread->GetExceptionState()->IsDebuggerInterceptable()
+#endif
+
+ );
+}
+
+// Did we hit an DO_A_GC_HERE marker in JITTed code?
+bool IsGcMarker(DWORD exceptionCode, CONTEXT *pContext)
+{
+#ifdef HAVE_GCCOVER
+ WRAPPER_NO_CONTRACT;
+
+ if (GCStress<cfg_any>::IsEnabled())
+ {
+#ifdef _TARGET_X86_
+ // on x86 we can't suspend EE to update the GC marker instruction so
+ // we update it directly without suspending. this can sometimes yield
+ // a STATUS_ACCESS_VIOLATION instead of STATUS_CLR_GCCOVER_CODE. in
+ // this case we let the AV through and retry the instruction. we'll
+ // track the IP of the instruction that generated an AV so we don't
+ // mix up a real AV with a "fake" AV.
+ // see comments in function DoGcStress for more details on this race.
+ // also make sure that the thread is actually in managed code since AVs
+ // outside of of JIT code will never be potential GC markers
+ Thread* pThread = GetThread();
+ if (exceptionCode == STATUS_ACCESS_VIOLATION &&
+ GCStress<cfg_instr>::IsEnabled() &&
+ pThread->GetLastAVAddress() != (LPVOID)GetIP(pContext) &&
+ pThread->PreemptiveGCDisabled() &&
+ !IsIPInEE((LPVOID)GetIP(pContext)))
+ {
+ pThread->SetLastAVAddress((LPVOID)GetIP(pContext));
+ return true;
+ }
+#endif // _TARGET_X86_
+
+ if (exceptionCode == STATUS_CLR_GCCOVER_CODE)
+ {
+ if (OnGcCoverageInterrupt(pContext))
+ {
+ return true;
+ }
+
+ {
+ // ExecutionManager::IsManagedCode takes a spinlock. Since this is in a debug-only
+ // check, we'll allow the lock.
+ CONTRACT_VIOLATION(TakesLockViolation);
+
+ // Should never be in managed code.
+ CONSISTENCY_CHECK_MSG(!ExecutionManager::IsManagedCode(GetIP(pContext)), "hit privileged instruction!");
+ }
+ }
+ }
+#else
+ LIMITED_METHOD_CONTRACT;
+#endif // HAVE_GCCOVER
+ return false;
+}
+
+// Return true if the access violation is well formed (has two info parameters
+// at the end)
+static inline BOOL
+IsWellFormedAV(EXCEPTION_RECORD *pExceptionRecord)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ #define NUM_AV_PARAMS 2
+
+ if (pExceptionRecord->NumberParameters == NUM_AV_PARAMS)
+ {
+ return TRUE;
+ }
+ else
+ {
+ return FALSE;
+ }
+}
+
+static inline BOOL
+IsDebuggerFault(EXCEPTION_RECORD *pExceptionRecord,
+ CONTEXT *pContext,
+ DWORD exceptionCode,
+ Thread *pThread)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef DEBUGGING_SUPPORTED
+ SO_NOT_MAINLINE_FUNCTION;
+
+#ifdef _TARGET_ARM_
+ // On ARM we don't have any reliable hardware support for single stepping so it is emulated in software.
+ // The implementation will end up throwing an EXCEPTION_BREAKPOINT rather than an EXCEPTION_SINGLE_STEP
+ // and leaves other aspects of the thread context in an invalid state. Therefore we use this opportunity
+ // to fixup the state before any other part of the system uses it (we do it here since only the debugger
+ // uses single step functionality).
+
+ // First ask the emulation itself whether this exception occured while single stepping was enabled. If so
+ // it will fix up the context to be consistent again and return true. If so and the exception was
+ // EXCEPTION_BREAKPOINT then we translate it to EXCEPTION_SINGLE_STEP (otherwise we leave it be, e.g. the
+ // instruction stepped caused an access violation). since this is called from our VEH there might not
+ // be a thread object so we must check pThread first.
+ if ((pThread != NULL) && pThread->HandleSingleStep(pContext, exceptionCode) && (exceptionCode == EXCEPTION_BREAKPOINT))
+ {
+ exceptionCode = EXCEPTION_SINGLE_STEP;
+ pExceptionRecord->ExceptionCode = EXCEPTION_SINGLE_STEP;
+ pExceptionRecord->ExceptionAddress = (PVOID)pContext->Pc;
+ }
+#endif // _TARGET_ARM_
+
+ // Is this exception really meant for the COM+ Debugger? Note: we will let the debugger have a chance if there
+ // is a debugger attached to any part of the process. It is incorrect to consider whether or not the debugger
+ // is attached the the thread's current app domain at this point.
+
+ // Even if a debugger is not attached, we must let the debugger handle the exception in case it's coming from a
+ // patch-skipper.
+ if ((!IsComPlusException(pExceptionRecord)) &&
+ (GetThread() != NULL) &&
+ (g_pDebugInterface != NULL) &&
+ g_pDebugInterface->FirstChanceNativeException(pExceptionRecord,
+ pContext,
+ exceptionCode,
+ pThread))
+ {
+ LOG((LF_EH | LF_CORDB, LL_INFO1000, "IsDebuggerFault - it's the debugger's fault\n"));
+ return true;
+ }
+#endif // DEBUGGING_SUPPORTED
+ return false;
+}
+
+#ifdef WIN64EXCEPTIONS
+
+EXTERN_C void JIT_MemSet_End();
+EXTERN_C void JIT_MemCpy_End();
+
+EXTERN_C void JIT_WriteBarrier_End();
+EXTERN_C void JIT_CheckedWriteBarrier_End();
+
+#if defined(_TARGET_AMD64_) && defined(_DEBUG)
+EXTERN_C void JIT_WriteBarrier_Debug();
+EXTERN_C void JIT_WriteBarrier_Debug_End();
+#endif
+
+#ifdef _TARGET_ARM_
+EXTERN_C void FCallMemcpy_End();
+#endif
+
+static
+bool IsIPExcluded(UINT_PTR uControlPc)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#define CHECK_RANGE(name) \
+ if (GetEEFuncEntryPoint(name) <= uControlPc && uControlPc < GetEEFuncEntryPoint(name##_End)) return true;
+
+ CHECK_RANGE(JIT_MemSet)
+ CHECK_RANGE(JIT_MemCpy)
+
+ CHECK_RANGE(JIT_WriteBarrier)
+ CHECK_RANGE(JIT_CheckedWriteBarrier)
+
+#if defined(_TARGET_AMD64_) && defined(_DEBUG)
+ CHECK_RANGE(JIT_WriteBarrier_Debug)
+#endif
+
+#ifdef _TARGET_ARM_
+ CHECK_RANGE(FCallMemcpy)
+#endif
+
+ return false;
+}
+#endif // WIN64EXCEPTIONS
+
+// Returns TRUE if caller should resume execution.
+BOOL
+AdjustContextForWriteBarrier(
+ EXCEPTION_RECORD *pExceptionRecord,
+ CONTEXT *pContext)
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef _TARGET_X86_
+
+ void* f_IP = (void *)GetIP(pContext);
+
+ if (f_IP >= (void *) JIT_WriteBarrierStart && f_IP <= (void *) JIT_WriteBarrierLast ||
+ f_IP >= (void *) JIT_PatchedWriteBarrierStart && f_IP <= (void *) JIT_PatchedWriteBarrierLast)
+ {
+ // set the exception IP to be the instruction that called the write barrier
+ void* callsite = (void *)GetAdjustedCallAddress(*dac_cast<PTR_PCODE>(GetSP(pContext)));
+ pExceptionRecord->ExceptionAddress = callsite;
+ SetIP(pContext, (PCODE)callsite);
+
+ // put ESP back to what it was before the call.
+ SetSP(pContext, PCODE((BYTE*)GetSP(pContext) + sizeof(void*)));
+ }
+
+ return FALSE;
+
+#elif defined(WIN64EXCEPTIONS)
+
+ void* f_IP = dac_cast<PTR_VOID>(GetIP(pContext));
+
+ CONTEXT tempContext;
+ CONTEXT* pExceptionContext = pContext;
+
+ BOOL fExcluded = IsIPExcluded((UINT_PTR)f_IP);
+
+ if (fExcluded)
+ {
+ bool fShouldHandleManagedFault = false;
+
+ if (pContext != &tempContext)
+ {
+ tempContext = *pContext;
+ pContext = &tempContext;
+ }
+
+ Thread::VirtualUnwindToFirstManagedCallFrame(pContext);
+
+#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+ // We had an AV in the writebarrier that needs to be treated
+ // as originating in managed code. At this point, the stack (growing
+ // from left->right) looks like this:
+ //
+ // ManagedFunc -> Native_WriteBarrierInVM -> AV
+ //
+ // We just performed an unwind from the write-barrier
+ // and now have the context in ManagedFunc. Since
+ // ManagedFunc called into the write-barrier, the return
+ // address in the unwound context corresponds to the
+ // instruction where the call will return.
+ //
+ // On ARM, just like we perform ControlPC adjustment
+ // during exception dispatch (refer to ExceptionTracker::InitializeCrawlFrame),
+ // we will need to perform the corresponding adjustment of IP
+ // we got from unwind above, so as to indicate that the AV
+ // happened "before" the call to the writebarrier and not at
+ // the instruction at which the control will return.
+ PCODE ControlPCPostAdjustment = GetIP(pContext) - STACKWALK_CONTROLPC_ADJUST_OFFSET;
+
+ // Now we save the address back into the context so that it gets used
+ // as the faulting address.
+ SetIP(pContext, ControlPCPostAdjustment);
+#endif // _TARGET_ARM_
+
+ // Unwind the frame chain - On Win64, this is required since we may handle the managed fault and to do so,
+ // we will replace the exception context with the managed context and "continue execution" there. Thus, we do not
+ // want any explicit frames active below the resumption SP.
+ //
+ // Question: Why do we unwind before determining whether we will handle the exception or not?
+ UnwindFrameChain(GetThread(), (Frame*)GetSP(pContext));
+ fShouldHandleManagedFault = ShouldHandleManagedFault(pExceptionRecord,pContext,
+ NULL, // establisher frame (x86 only)
+ NULL // pThread (x86 only)
+ );
+
+ if (fShouldHandleManagedFault)
+ {
+ ReplaceExceptionContextRecord(pExceptionContext, pContext);
+ pExceptionRecord->ExceptionAddress = dac_cast<PTR_VOID>(GetIP(pContext));
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+
+#else // ! _X86_ && !WIN64EXCEPTIONS
+
+ PORTABILITY_WARNING("AdjustContextForWriteBarrier() not implemented on this platform");
+ return FALSE;
+
+#endif
+}
+
+struct SavedExceptionInfo
+{
+ EXCEPTION_RECORD m_ExceptionRecord;
+ CONTEXT m_ExceptionContext;
+ CrstStatic m_Crst;
+
+ void SaveExceptionRecord(EXCEPTION_RECORD *pExceptionRecord)
+ {
+ LIMITED_METHOD_CONTRACT;
+ size_t erSize = offsetof(EXCEPTION_RECORD, ExceptionInformation) +
+ pExceptionRecord->NumberParameters * sizeof(pExceptionRecord->ExceptionInformation[0]);
+ memcpy(&m_ExceptionRecord, pExceptionRecord, erSize);
+
+ }
+
+ void SaveContext(CONTEXT *pContext)
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifdef CONTEXT_EXTENDED_REGISTERS
+
+ size_t contextSize = offsetof(CONTEXT, ExtendedRegisters);
+ if ((pContext->ContextFlags & CONTEXT_EXTENDED_REGISTERS) == CONTEXT_EXTENDED_REGISTERS)
+ contextSize += sizeof(pContext->ExtendedRegisters);
+ memcpy(&m_ExceptionContext, pContext, contextSize);
+
+#else // !CONTEXT_EXTENDED_REGISTERS
+
+ size_t contextSize = sizeof(CONTEXT);
+ memcpy(&m_ExceptionContext, pContext, contextSize);
+
+#endif // !CONTEXT_EXTENDED_REGISTERS
+ }
+
+ DEBUG_NOINLINE void Enter()
+ {
+ WRAPPER_NO_CONTRACT;
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+ m_Crst.Enter();
+ }
+
+ DEBUG_NOINLINE void Leave()
+ {
+ WRAPPER_NO_CONTRACT;
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+ m_Crst.Leave();
+ }
+
+ void Init()
+ {
+ WRAPPER_NO_CONTRACT;
+ m_Crst.Init(CrstSavedExceptionInfo, CRST_UNSAFE_ANYMODE);
+ }
+};
+
+
+#if defined(USE_FEF)
+
+SavedExceptionInfo g_SavedExceptionInfo; // Globals are guaranteed zero-init;
+
+void InitSavedExceptionInfo()
+{
+ g_SavedExceptionInfo.Init();
+}
+
+EXTERN_C VOID FixContextForFaultingExceptionFrame (
+ EXCEPTION_RECORD* pExceptionRecord,
+ CONTEXT *pContextRecord)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // don't copy parm args as have already supplied them on the throw
+ memcpy((void*) pExceptionRecord,
+ (void*) &g_SavedExceptionInfo.m_ExceptionRecord,
+ offsetof(EXCEPTION_RECORD, ExceptionInformation)
+ );
+
+ ReplaceExceptionContextRecord(pContextRecord, &g_SavedExceptionInfo.m_ExceptionContext);
+
+ g_SavedExceptionInfo.Leave();
+
+ GetThread()->ResetThreadStateNC(Thread::TSNC_DebuggerIsManagedException);
+}
+
+EXTERN_C VOID __fastcall
+LinkFrameAndThrow(FaultingExceptionFrame* pFrame)
+{
+ WRAPPER_NO_CONTRACT;
+
+ *(TADDR*)pFrame = FaultingExceptionFrame::GetMethodFrameVPtr();
+ *pFrame->GetGSCookiePtr() = GetProcessGSCookie();
+
+ pFrame->InitAndLink(&g_SavedExceptionInfo.m_ExceptionContext);
+
+ GetThread()->SetThreadStateNC(Thread::TSNC_DebuggerIsManagedException);
+
+ ULONG argcount = g_SavedExceptionInfo.m_ExceptionRecord.NumberParameters;
+ ULONG flags = g_SavedExceptionInfo.m_ExceptionRecord.ExceptionFlags;
+ ULONG code = g_SavedExceptionInfo.m_ExceptionRecord.ExceptionCode;
+ ULONG_PTR* args = &g_SavedExceptionInfo.m_ExceptionRecord.ExceptionInformation[0];
+
+ RaiseException(code, flags, argcount, args);
+}
+
+void SetNakedThrowHelperArgRegistersInContext(CONTEXT* pContext)
+{
+#if defined(_TARGET_AMD64_)
+ pContext->Rcx = (UINT_PTR)GetIP(pContext);
+#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+ // Save the original IP in LR
+ pContext->Lr = (DWORD)GetIP(pContext);
+#else
+ PORTABILITY_WARNING("NakedThrowHelper argument not defined");
+#endif
+}
+
+EXTERN_C VOID STDCALL NakedThrowHelper(VOID);
+
+void HandleManagedFault(EXCEPTION_RECORD* pExceptionRecord,
+ CONTEXT* pContext,
+ EXCEPTION_REGISTRATION_RECORD* pEstablisherFrame,
+ Thread* pThread)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Ok. Now we have a brand new fault in jitted code.
+ g_SavedExceptionInfo.Enter();
+ g_SavedExceptionInfo.SaveExceptionRecord(pExceptionRecord);
+ g_SavedExceptionInfo.SaveContext(pContext);
+
+ SetNakedThrowHelperArgRegistersInContext(pContext);
+
+ SetIP(pContext, GetEEFuncEntryPoint(NakedThrowHelper));
+}
+
+#else // USE_FEF
+
+void InitSavedExceptionInfo()
+{
+}
+
+#endif // USE_FEF
+
+//
+// Init a new frame
+//
+void FaultingExceptionFrame::Init(CONTEXT *pContext)
+{
+ WRAPPER_NO_CONTRACT;
+#if defined(_TARGET_X86_)
+ CalleeSavedRegisters *pRegs = GetCalleeSavedRegisters();
+ pRegs->ebp = pContext->Ebp;
+ pRegs->ebx = pContext->Ebx;
+ pRegs->esi = pContext->Esi;
+ pRegs->edi = pContext->Edi;
+ m_ReturnAddress = ::GetIP(pContext);
+ m_Esp = (DWORD)GetSP(pContext);
+#elif defined(WIN64EXCEPTIONS)
+ m_ReturnAddress = ::GetIP(pContext);
+ CopyOSContext(&m_ctx, pContext);
+#else
+ PORTABILITY_ASSERT("FaultingExceptionFrame::InitAndLink");
+#endif
+}
+
+//
+// Init and Link in a new frame
+//
+void FaultingExceptionFrame::InitAndLink(CONTEXT *pContext)
+{
+ WRAPPER_NO_CONTRACT;
+
+ Init(pContext);
+
+ Push();
+}
+
+
+bool ShouldHandleManagedFault(
+ EXCEPTION_RECORD* pExceptionRecord,
+ CONTEXT* pContext,
+ EXCEPTION_REGISTRATION_RECORD* pEstablisherFrame,
+ Thread* pThread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // If we get a faulting instruction inside managed code, we're going to
+ // 1. Allocate the correct exception object, store it in the thread.
+ // 2. Save the EIP in the thread.
+ // 3. Change the EIP to our throw helper
+ // 4. Resume execution.
+ //
+ // The helper will push a frame for us, and then throw the correct managed exception.
+ //
+ // Is this exception really meant for the COM+ Debugger? Note: we will let the debugger have a chance if there is a
+ // debugger attached to any part of the process. It is incorrect to consider whether or not the debugger is attached
+ // the the thread's current app domain at this point.
+
+
+ // A managed exception never comes from managed code, and we can ignore all breakpoint
+ // exceptions.
+ //
+ DWORD exceptionCode = pExceptionRecord->ExceptionCode;
+ if (IsComPlusException(pExceptionRecord)
+ || exceptionCode == STATUS_BREAKPOINT
+ || exceptionCode == STATUS_SINGLE_STEP)
+ {
+ return false;
+ }
+
+#ifdef _DEBUG
+ // This is a workaround, but it's debug-only as is gc stress 4.
+ // The problem is that if we get an exception with this code that
+ // didn't come from GCStress=4, then we won't push a FeF and will
+ // end up with a gc hole and potential crash.
+ if (exceptionCode == STATUS_CLR_GCCOVER_CODE)
+ return false;
+#endif // _DEBUG
+
+#ifndef WIN64EXCEPTIONS
+ // If there's any frame below the ESP of the exception, then we can forget it.
+ if (pThread->m_pFrame < dac_cast<PTR_VOID>(GetSP(pContext)))
+ return false;
+
+ // If we're a subsequent handler forget it.
+ EXCEPTION_REGISTRATION_RECORD* pBottomMostHandler = pThread->GetExceptionState()->m_currentExInfo.m_pBottomMostHandler;
+ if (pBottomMostHandler != NULL && pEstablisherFrame > pBottomMostHandler)
+ {
+ return false;
+ }
+#endif // WIN64EXCEPTIONS
+
+ {
+ // If it's not a fault in jitted code, forget it.
+
+ // ExecutionManager::IsManagedCode takes a spinlock. Since we're in the middle of throwing,
+ // we'll allow the lock, even if a caller didn't expect it.
+ CONTRACT_VIOLATION(TakesLockViolation);
+
+ if (!ExecutionManager::IsManagedCode(GetIP(pContext)))
+ return false;
+ }
+
+ // caller should call HandleManagedFault and resume execution.
+ return true;
+}
+
+LONG WINAPI CLRVectoredExceptionHandlerPhase2(PEXCEPTION_POINTERS pExceptionInfo);
+
+typedef enum VEH_ACTION
+{
+ VEH_NO_ACTION = 0,
+ VEH_EXECUTE_HANDLE_MANAGED_EXCEPTION,
+ VEH_CONTINUE_EXECUTION,
+ VEH_CONTINUE_SEARCH,
+ VEH_EXECUTE_HANDLER
+};
+
+
+VEH_ACTION WINAPI CLRVectoredExceptionHandlerPhase3(PEXCEPTION_POINTERS pExceptionInfo);
+
+LONG WINAPI CLRVectoredExceptionHandler(PEXCEPTION_POINTERS pExceptionInfo)
+{
+ // It is not safe to execute code inside VM after we shutdown EE. One example is DiablePreemptiveGC
+ // will block forever.
+ if (g_fForbidEnterEE)
+ {
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ //
+ // For images ngen'd with FEATURE_LAZY_COW_PAGES, the .data section will be read-only. Any writes to that data need to be
+ // preceded by a call to EnsureWritablePages. This code is here to catch the ones we forget.
+ //
+#ifdef FEATURE_LAZY_COW_PAGES
+ if (pExceptionInfo->ExceptionRecord->ExceptionCode == STATUS_ACCESS_VIOLATION &&
+ IsWellFormedAV(pExceptionInfo->ExceptionRecord) &&
+ pExceptionInfo->ExceptionRecord->ExceptionInformation[0] == 1 /* means this was a failed write */)
+ {
+ void* location = (void*)pExceptionInfo->ExceptionRecord->ExceptionInformation[1];
+
+ if (IsInReadOnlyLazyCOWPage(location))
+ {
+#ifdef _DEBUG
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DebugAssertOnMissedCOWPage))
+ _ASSERTE_MSG(false, "Writes to NGen'd data must be protected by EnsureWritablePages.");
+#endif
+
+#pragma push_macro("VirtualQuery")
+#undef VirtualQuery
+ MEMORY_BASIC_INFORMATION mbi;
+ if (!::VirtualQuery(location, &mbi, sizeof(mbi)))
+ {
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_OUTOFMEMORY);
+ }
+#pragma pop_macro("VirtualQuery")
+
+ bool executable = (mbi.Protect == PAGE_EXECUTE_READ) ||
+ (mbi.Protect == PAGE_EXECUTE_READWRITE) ||
+ (mbi.Protect == PAGE_EXECUTE_READ) ||
+ (mbi.Protect == PAGE_EXECUTE_WRITECOPY);
+
+ if (!(executable ? EnsureWritableExecutablePagesNoThrow(location, 1) : EnsureWritablePagesNoThrow(location, 1)))
+ {
+ // Note that this failfast is very rare. It will only be hit in the theoretical cases there is
+ // missing EnsureWritablePages probe (there should be none when we ship), and the OS run into OOM
+ // exactly at the point when we executed the code with the missing probe.
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_OUTOFMEMORY);
+ }
+
+ return EXCEPTION_CONTINUE_EXECUTION;
+ }
+ }
+#endif //FEATURE_LAZY_COW_PAGES
+
+
+ //
+ // DO NOT USE CONTRACTS HERE AS THIS ROUTINE MAY NEVER RETURN. You can use
+ // static contracts, but currently this is all WRAPPER_NO_CONTRACT.
+ //
+
+
+ //
+ // READ THIS!
+ //
+ //
+ // You cannot put any code in here that allocates during an out-of-memory handling.
+ // This routine runs before *any* other handlers, including __try. Thus, if you
+ // allocate anything in this routine then it will throw out-of-memory and end up
+ // right back here.
+ //
+ // There are various things that allocate that you may not expect to allocate. One
+ // instance of this is STRESS_LOG. It allocates the log buffer if the thread does
+ // not already have one allocated. Thus, if we OOM during the setting up of the
+ // thread, the log buffer will not be allocated and this will try to do so. Thus,
+ // all STRESS_LOGs in here need to be after you have guaranteed the allocation has
+ // already occured.
+ //
+
+
+#ifndef FEATURE_PAL
+ Thread *pThread;
+
+ {
+ MAYBE_FAULT_FORBID_NO_ALLOC((pExceptionInfo->ExceptionRecord->ExceptionCode == STATUS_NO_MEMORY));
+
+ pThread = GetThread();
+
+ //
+ // Since we are in an OOM situation, we test the thread object before logging since if the
+ // thread exists we know the log buffer has been allocated already.
+ //
+ if (pThread != NULL)
+ {
+ CantAllocHolder caHolder;
+ STRESS_LOG4(LF_EH, LL_INFO100, "In CLRVectoredExceptionHandler, Exception = %x, Context = %p, IP = %p SP = %p\n",
+ pExceptionInfo->ExceptionRecord->ExceptionCode, pExceptionInfo->ContextRecord,
+ GetIP(pExceptionInfo->ContextRecord), GetSP(pExceptionInfo->ContextRecord));
+ }
+
+ }
+
+ // We need to unhijack the thread here if it is not unhijacked already. On x86 systems,
+ // we do this in Thread::StackWalkFramesEx, but on amd64 systems we have the OS walk the
+ // stack for us. If we leave CLRVectoredExceptionHandler with a thread still hijacked,
+ // the operating system will not be able to walk the stack and not find the handlers for
+ // the exception. It is safe to unhijack the thread in this case for two reasons:
+ // 1. pThread refers to *this* thread.
+ // 2. If another thread trys to hijack this thread, it will se we are not in managed
+ // code (and thus won't try to hijack us).
+#if defined(WIN64EXCEPTIONS)
+ if (pThread != NULL)
+ {
+ pThread->UnhijackThreadNoAlloc();
+ }
+#endif // defined(WIN64EXCEPTIONS)
+
+ if (IsSOExceptionCode(pExceptionInfo->ExceptionRecord->ExceptionCode))
+ {
+ //
+ // Not an Out-of-memory situation, so no need for a forbid fault region here
+ //
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ LONG retVal = 0;
+
+#ifdef FEATURE_STACK_PROBE
+ // See if we've got enough stack to handle this exception
+
+ // There isn't much stack left to attempt to report an exception. Let's trigger a hard
+ // SO, so we clear the guard page and give us at least another page of stack to work with.
+
+ if (pThread && !pThread->IsStackSpaceAvailable(ADJUST_PROBE(1)))
+ {
+ DontCallDirectlyForceStackOverflow();
+ }
+#endif
+
+ // We can't probe here, because we won't return from the CLRVectoredExceptionHandlerPhase2
+ // on WIN64
+ //
+
+ if (pThread)
+ {
+ FAULT_FORBID_NO_ALLOC();
+ CantAllocHolder caHolder;
+ }
+
+ retVal = CLRVectoredExceptionHandlerPhase2(pExceptionInfo);
+
+ //
+ //END_ENTRYPOINT_VOIDRET;
+ //
+ return retVal;
+#else // !FEATURE_PAL
+ return CLRVectoredExceptionHandlerPhase2(pExceptionInfo);
+#endif // !FEATURE_PAL
+}
+
+
+LONG WINAPI CLRVectoredExceptionHandlerPhase2(PEXCEPTION_POINTERS pExceptionInfo)
+{
+ //
+ // DO NOT USE CONTRACTS HERE AS THIS ROUTINE MAY NEVER RETURN. You can use
+ // static contracts, but currently this is all WRAPPER_NO_CONTRACT.
+ //
+
+ //
+ // READ THIS!
+ //
+ //
+ // You cannot put any code in here that allocates during an out-of-memory handling.
+ // This routine runs before *any* other handlers, including __try. Thus, if you
+ // allocate anything in this routine then it will throw out-of-memory and end up
+ // right back here.
+ //
+ // There are various things that allocate that you may not expect to allocate. One
+ // instance of this is STRESS_LOG. It allocates the log buffer if the thread does
+ // not already have one allocated. Thus, if we OOM during the setting up of the
+ // thread, the log buffer will not be allocated and this will try to do so. Thus,
+ // all STRESS_LOGs in here need to be after you have guaranteed the allocation has
+ // already occured.
+ //
+
+ PEXCEPTION_RECORD pExceptionRecord = pExceptionInfo->ExceptionRecord;
+ VEH_ACTION action;
+
+ {
+ MAYBE_FAULT_FORBID_NO_ALLOC((pExceptionRecord->ExceptionCode == STATUS_NO_MEMORY));
+ CantAllocHolder caHolder;
+
+ action = CLRVectoredExceptionHandlerPhase3(pExceptionInfo);
+ }
+
+ if (action == VEH_CONTINUE_EXECUTION)
+ {
+ return EXCEPTION_CONTINUE_EXECUTION;
+ }
+
+ if (action == VEH_CONTINUE_SEARCH)
+ {
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ if (action == VEH_EXECUTE_HANDLER)
+ {
+ return EXCEPTION_EXECUTE_HANDLER;
+ }
+
+#if defined(WIN64EXCEPTIONS)
+
+ if (action == VEH_EXECUTE_HANDLE_MANAGED_EXCEPTION)
+ {
+ //
+ // If the exception context was unwound by Phase3 then
+ // we'll jump here to save the managed context and resume execution at
+ // NakedThrowHelper. This needs to be done outside of any holder's
+ // scope, because HandleManagedFault may not return.
+ //
+ HandleManagedFault(pExceptionInfo->ExceptionRecord,
+ pExceptionInfo->ContextRecord,
+ NULL, // establisher frame (x86 only)
+ NULL // pThread (x86 only)
+ );
+ return EXCEPTION_CONTINUE_EXECUTION;
+ }
+
+#endif // defined(WIN64EXCEPTIONS)
+
+
+ //
+ // In OOM situations, this call better not fault.
+ //
+ {
+ MAYBE_FAULT_FORBID_NO_ALLOC((pExceptionRecord->ExceptionCode == STATUS_NO_MEMORY));
+ CantAllocHolder caHolder;
+
+ // Give the debugger a chance. Note that its okay for this call to trigger a GC, since the debugger will take
+ // special steps to make that okay.
+ //
+ // @TODO: I'd love a way to call into the debugger with GCX_NOTRIGGER still in scope, and force them to make
+ // the choice to break the no-trigger region after taking all necessary precautions.
+ if (IsDebuggerFault(pExceptionRecord, pExceptionInfo->ContextRecord, pExceptionRecord->ExceptionCode, GetThread()))
+ {
+ return EXCEPTION_CONTINUE_EXECUTION;
+ }
+ }
+
+ //
+ // No reason to put a forbid fault region here as the exception code is not STATUS_NO_MEMORY.
+ //
+
+ // Handle a user breakpoint. Note that its okay for the UserBreakpointFilter to trigger a GC, since we're going
+ // to either a) terminate the process, or b) let a user attach an unmanaged debugger, and debug knowing that
+ // managed state may be messed up.
+ if ((pExceptionRecord->ExceptionCode == STATUS_BREAKPOINT) ||
+ (pExceptionRecord->ExceptionCode == STATUS_SINGLE_STEP))
+ {
+#ifndef FEATURE_PAL
+ // A breakpoint outside managed code and outside the runtime will have to be handled by some
+ // other piece of code.
+
+ BOOL fExternalException = FALSE;
+
+ BEGIN_SO_INTOLERANT_CODE_NOPROBE;
+
+ {
+ // ExecutionManager::IsManagedCode takes a spinlock. Since we're in the middle of throwing,
+ // we'll allow the lock, even if a caller didn't expect it.
+ CONTRACT_VIOLATION(TakesLockViolation);
+
+ fExternalException = (!ExecutionManager::IsManagedCode(GetIP(pExceptionInfo->ContextRecord)) &&
+ !IsIPInModule(g_pMSCorEE, GetIP(pExceptionInfo->ContextRecord)));
+ }
+
+ END_SO_INTOLERANT_CODE_NOPROBE;
+
+ if (fExternalException)
+ {
+ // The breakpoint was not ours. Someone else can handle it. (Or if not, we'll get it again as
+ // an unhandled exception.)
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+#endif // !FEATURE_PAL
+
+ // The breakpoint was from managed or the runtime. Handle it. Or,
+ // this may be a Rotor build.
+ return UserBreakpointFilter(pExceptionInfo);
+ }
+
+#if defined(WIN64EXCEPTIONS)
+ BOOL fShouldHandleManagedFault;
+
+ {
+ MAYBE_FAULT_FORBID_NO_ALLOC((pExceptionRecord->ExceptionCode == STATUS_NO_MEMORY));
+ CantAllocHolder caHolder;
+ fShouldHandleManagedFault = ShouldHandleManagedFault(pExceptionInfo->ExceptionRecord,
+ pExceptionInfo->ContextRecord,
+ NULL, // establisher frame (x86 only)
+ NULL // pThread (x86 only)
+ );
+ }
+
+ if (fShouldHandleManagedFault)
+ {
+ //
+ // HandleManagedFault may never return, so we cannot use a forbid fault region around it.
+ //
+ HandleManagedFault(pExceptionInfo->ExceptionRecord,
+ pExceptionInfo->ContextRecord,
+ NULL, // establisher frame (x86 only)
+ NULL // pThread (x86 only)
+ );
+ return EXCEPTION_CONTINUE_EXECUTION;
+}
+#endif // defined(WIN64EXCEPTIONS)
+
+ return EXCEPTION_EXECUTE_HANDLER;
+}
+
+/*
+ * CLRVectoredExceptionHandlerPhase3
+ *
+ * This routine does some basic processing on the exception, making decisions about common
+ * exception types and whether to continue them or not. It has side-effects, in that it may
+ * adjust the context in the exception.
+ *
+ * Parameters:
+ * pExceptionInfo - pointer to the exception
+ *
+ * Returns:
+ * VEH_NO_ACTION - This indicates that Phase3 has no specific action to take and that further
+ * processing of this exception should continue.
+ * VEH_EXECUTE_HANDLE_MANAGED_EXCEPTION - This indicates that the caller should call HandleMandagedException
+ * immediately.
+ * VEH_CONTINUE_EXECUTION - Caller should return EXCEPTION_CONTINUE_EXECUTION.
+ * VEH_CONTINUE_SEARCH - Caller should return EXCEPTION_CONTINUE_SEARCH;
+ * VEH_EXECUTE_HANDLER - Caller should return EXCEPTION_EXECUTE_HANDLER.
+ *
+ * Note that in all cases the context in the exception may have been adjusted.
+ *
+ */
+
+VEH_ACTION WINAPI CLRVectoredExceptionHandlerPhase3(PEXCEPTION_POINTERS pExceptionInfo)
+{
+ //
+ // DO NOT USE CONTRACTS HERE AS THIS ROUTINE MAY NEVER RETURN. You can use
+ // static contracts, but currently this is all WRAPPER_NO_CONTRACT.
+ //
+
+ //
+ // READ THIS!
+ //
+ //
+ // You cannot put any code in here that allocates during an out-of-memory handling.
+ // This routine runs before *any* other handlers, including __try. Thus, if you
+ // allocate anything in this routine then it will throw out-of-memory and end up
+ // right back here.
+ //
+ // There are various things that allocate that you may not expect to allocate. One
+ // instance of this is STRESS_LOG. It allocates the log buffer if the thread does
+ // not already have one allocated. Thus, if we OOM during the setting up of the
+ // thread, the log buffer will not be allocated and this will try to do so. Thus,
+ // all STRESS_LOGs in here need to be after you have guaranteed the allocation has
+ // already occured.
+ //
+
+ // Handle special cases which are common amongst all filters.
+ PEXCEPTION_RECORD pExceptionRecord = pExceptionInfo->ExceptionRecord;
+ PCONTEXT pContext = pExceptionInfo->ContextRecord;
+ DWORD exceptionCode = pExceptionRecord->ExceptionCode;
+
+ // Its extremely important that no one trigger a GC in here. This is called from CPFH_FirstPassHandler, in
+ // cases where we've taken an unmanaged exception on a managed thread (AV, divide by zero, etc.) but
+ // _before_ we've done our work to erect a FaultingExceptionFrame. Thus, the managed frames are
+ // unprotected. We setup a GCX_NOTRIGGER holder in this scope to prevent us from messing this up. Note
+ // that the scope of this is limited, since there are times when its okay to trigger even in this special
+ // case. The debugger is a good example: if it gets a breakpoint in managed code, it has the smarts to
+ // prevent the GC before enabling GC, thus its okay for it to trigger.
+
+ GCX_NOTRIGGER();
+
+ if (IsInstrModifyFault(pExceptionInfo))
+ {
+ return VEH_CONTINUE_EXECUTION;
+ }
+
+#ifdef USE_REDIRECT_FOR_GCSTRESS
+ // NOTE: this is effectively ifdef (_TARGET_AMD64_ || _TARGET_ARM_), and does not actually trigger
+ // a GC. This will redirect the exception context to a stub which will
+ // push a frame and cause GC.
+ if (IsGcMarker(exceptionCode, pContext))
+ {
+ return VEH_CONTINUE_EXECUTION;;
+ }
+#endif // USE_REDIRECT_FOR_GCSTRESS
+
+#ifdef FEATURE_HIJACK
+#ifdef _TARGET_X86_
+ CPFH_AdjustContextForThreadSuspensionRace(pContext, GetThread());
+#endif // _TARGET_X86_
+#endif // FEATURE_HIJACK
+
+ // Some other parts of the EE use exceptions in their own nefarious ways. We do some up-front processing
+ // here to fix up the exception if needed.
+ if (exceptionCode == STATUS_ACCESS_VIOLATION)
+ {
+ if (IsWellFormedAV(pExceptionRecord))
+ {
+ if (AdjustContextForWriteBarrier(pExceptionRecord, pContext))
+ {
+ // On x86, AdjustContextForWriteBarrier simply backs up AV's
+ // in write barrier helpers into the calling frame, so that
+ // the subsequent logic here sees a managed fault.
+ //
+ // On 64-bit, some additional work is required..
+#ifdef WIN64EXCEPTIONS
+ return VEH_EXECUTE_HANDLE_MANAGED_EXCEPTION;
+#endif // defined(WIN64EXCEPTIONS)
+ }
+ else if (AdjustContextForVirtualStub(pExceptionRecord, pContext))
+ {
+#ifdef WIN64EXCEPTIONS
+ return VEH_EXECUTE_HANDLE_MANAGED_EXCEPTION;
+#endif
+ }
+
+ // Remember the EIP for stress debugging purposes.
+ g_LastAccessViolationEIP = (void*) ::GetIP(pContext);
+
+#ifndef FEATURE_PAL
+ // Note: we have a holder, called AVInRuntimeImplOkayHolder, that tells us that its okay to have an
+ // AV in the Runtime's implementation in certain places. So, if its okay to have an AV at this
+ // time, then skip the check for whether or not the AV is in our impl.
+ // AVs are ok on the Helper thread (for which there is no pThread object,
+ // and so the AVInRuntime holder doesn't work.
+ Thread *pThread = GetThread();
+
+ bool fAVisOk =
+ (IsDbgHelperSpecialThread() || IsETWRundownSpecialThread() ||
+ ((pThread != NULL) && (pThread->AVInRuntimeImplOkay())) );
+
+
+ // It is unnecessary to check this on second pass as we would have torn down
+ // the process on the first pass. Also, the context record is not reliable
+ // on second pass and this subjects us to false positives.
+ if ((!fAVisOk) && !(pExceptionRecord->ExceptionFlags & EXCEPTION_UNWINDING))
+ {
+ if (IsIPInModule(g_pMSCorEE, (PCODE)GetIP(pContext)))
+ {
+ CONTRACT_VIOLATION(ThrowsViolation|FaultViolation|SOToleranceViolation);
+
+ //
+ // If you're debugging, set the debugger to catch first-chance AV's, then simply hit F5 or
+ // 'go' and continue after the assert. We'll recgonize that a debugger is attached, and
+ // return EXCEPTION_CONTINUE_EXECUTION. You'll re-execute the faulting instruction, and the
+ // debugger will stop at the AV. The value of EXCEPTION_CONTINUE_EXECUTION is -1, just in
+ // case you need to verify the return value for some reason. If you need to actually debug
+ // the failure path, then set your IP around the check below.
+ //
+ // You can also use Windbg's .cxr command to set the context to pContext.
+ //
+#if defined(_DEBUG)
+ const char * pStack = "<stack not available>";
+ StackScratchBuffer buffer;
+ SString sStack;
+ if (GetStackTraceAtContext(sStack, pContext))
+ {
+ pStack = sStack.GetANSI(buffer);
+ }
+
+ DWORD tid = GetCurrentThreadId();
+
+ BOOL debuggerPresentBeforeAssert = IsDebuggerPresent();
+
+
+ CONSISTENCY_CHECK_MSGF(false, ("AV in clr at this callstack:\n------\n%s\n-----\n.AV on tid=0x%x (%d), cxr=%p, exr=%p\n",
+ pStack, tid, tid, pContext, pExceptionRecord));
+
+ // @todo - this may not be what we want for interop-debugging...
+ //
+ // If there was no debugger before the assert, but there is one now, then go ahead and
+ // return EXCEPTION_CONTINUE_EXECUTION to re-execute the faulting instruction. This is
+ // supposed to be a nice little feature for CLR devs who attach debuggers on the "Av in
+ // mscorwks" assert above. Since this is only for that case, its only in debug builds.
+ if (!debuggerPresentBeforeAssert && IsDebuggerPresent())
+ {
+ return VEH_CONTINUE_EXECUTION;;
+ }
+#endif // defined(_DEBUG)
+
+ EEPOLICY_HANDLE_FATAL_ERROR_USING_EXCEPTION_INFO(COR_E_EXECUTIONENGINE, pExceptionInfo);
+ }
+ }
+#endif // !FEATURE_PAL
+ }
+ }
+ else if (exceptionCode == BOOTUP_EXCEPTION_COMPLUS)
+ {
+ // Don't handle a boot exception
+ return VEH_CONTINUE_SEARCH;
+ }
+
+ return VEH_NO_ACTION;
+}
+
+BOOL IsIPInEE(void *ip)
+{
+ WRAPPER_NO_CONTRACT;
+
+#if defined(FEATURE_PREJIT) && !defined(FEATURE_PAL)
+ if ((TADDR)ip > g_runtimeLoadedBaseAddress &&
+ (TADDR)ip < g_runtimeLoadedBaseAddress + g_runtimeVirtualSize)
+ {
+ return TRUE;
+ }
+ else
+#endif // FEATURE_PREJIT && !FEATURE_PAL
+ {
+ return FALSE;
+ }
+}
+
+#if defined(_TARGET_AMD64_) && defined(FEATURE_HIJACK)
+
+// This function is used to check if the specified IP is in the prolog or not.
+bool IsIPInProlog(EECodeInfo *pCodeInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ bool fInsideProlog = true;
+
+ _ASSERTE(pCodeInfo->IsValid());
+
+ PTR_RUNTIME_FUNCTION funcEntry = pCodeInfo->GetFunctionEntry();
+
+ // We should always get a function entry for a managed method
+ _ASSERTE(funcEntry != NULL);
+
+ // Get the unwindInfo from the function entry
+ PUNWIND_INFO pUnwindInfo = (PUNWIND_INFO)(pCodeInfo->GetModuleBase() + funcEntry->UnwindData);
+
+ // Check if the specified IP is beyond the prolog or not.
+ DWORD dwPrologLen = pUnwindInfo->SizeOfProlog;
+ if (pCodeInfo->GetRelOffset() >= dwPrologLen)
+ {
+ fInsideProlog = false;
+ }
+
+ return fInsideProlog;
+}
+
+// This function is used to check if the specified IP is in the epilog or not.
+bool IsIPInEpilog(PTR_CONTEXT pContextToCheck, EECodeInfo *pCodeInfo, BOOL *pSafeToInjectThreadAbort)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(pContextToCheck != NULL);
+ PRECONDITION(ExecutionManager::IsManagedCode(GetIP(pContextToCheck)));
+ PRECONDITION(pSafeToInjectThreadAbort != NULL);
+ }
+ CONTRACTL_END;
+
+ TADDR ipToCheck = GetIP(pContextToCheck);
+
+ _ASSERTE(pCodeInfo->IsValid());
+
+ // The Codeinfo should correspond to the IP we are interested in.
+ _ASSERTE(ipToCheck == pCodeInfo->GetCodeAddress());
+
+ // By default, assume its safe to inject the abort.
+ *pSafeToInjectThreadAbort = TRUE;
+
+ // If we are inside a prolog, then we are obviously not in the epilog.
+ // Its safe to inject the abort here.
+ if (IsIPInProlog(pCodeInfo))
+ {
+ return false;
+ }
+
+ // We are not inside the prolog. We could either be in the middle of the method body or
+ // inside the epilog. While unwindInfo contains the prolog length, it does not contain the
+ // epilog length.
+ //
+ // Thus, to determine if we are inside the epilog, we use a property of RtlVirtualUnwind.
+ // When invoked for an IP, it will return a NULL for personality routine in only two scenarios:
+ //
+ // 1) The unwindInfo does not contain any personality routine information, OR
+ // 2) The IP is in prolog or epilog.
+ //
+ // For jitted code, (1) is not applicable since we *always* emit details of the managed personality routine
+ // in the unwindInfo. Thus, since we have already determined that we are not inside the prolog, if performing
+ // RtlVirtualUnwind against "ipToCheck" results in a NULL personality routine, it implies that we are inside
+ // the epilog.
+
+ DWORD64 imageBase = 0;
+ PUNWIND_INFO pUnwindInfo = NULL;
+ CONTEXT tempContext;
+ PVOID HandlerData;
+ DWORD64 establisherFrame = 0;
+ PEXCEPTION_ROUTINE personalityRoutine = NULL;
+
+ // Lookup the function entry for the IP
+ PTR_RUNTIME_FUNCTION funcEntry = pCodeInfo->GetFunctionEntry();
+
+ // We should always get a function entry for a managed method
+ _ASSERTE(funcEntry != NULL);
+
+ imageBase = pCodeInfo->GetModuleBase();
+ pUnwindInfo = (PUNWIND_INFO)(imageBase+ funcEntry->UnwindData);
+
+#if defined(_DEBUG)
+ // In debug builds, assert our invariant that jitted code always have the managed personality routine
+ // specified in the unwindInfo. For this, get an IP that is inside the method body. For this case,
+ // we calculate the first address comprising the method body.
+ PCODE ipInMethodBody = pCodeInfo->GetStartAddress()+pUnwindInfo->SizeOfProlog;
+
+ ZeroMemory(&tempContext, sizeof(CONTEXT));
+ CopyOSContext(&tempContext, pContextToCheck);
+
+ // TODO: Explain the context mismatch issue here and why the unwind is still okay.
+ personalityRoutine = RtlVirtualUnwind(UNW_FLAG_EHANDLER, // HandlerType
+ imageBase,
+ ipInMethodBody,
+ funcEntry,
+ &tempContext,
+ &HandlerData,
+ &establisherFrame,
+ NULL);
+
+ _ASSERTE(personalityRoutine != NULL);
+#endif // _DEBUG
+
+ ZeroMemory(&tempContext, sizeof(CONTEXT));
+ CopyOSContext(&tempContext, pContextToCheck);
+ KNONVOLATILE_CONTEXT_POINTERS ctxPtrs;
+ ZeroMemory(&ctxPtrs, sizeof(ctxPtrs));
+
+ personalityRoutine = RtlVirtualUnwind(UNW_FLAG_EHANDLER, // HandlerType
+ imageBase,
+ ipToCheck,
+ funcEntry,
+ &tempContext,
+ &HandlerData,
+ &establisherFrame,
+ &ctxPtrs);
+
+ bool fIsInEpilog = false;
+
+ if (personalityRoutine == NULL)
+ {
+ // We are in epilog.
+ fIsInEpilog = true;
+
+ // Check if context pointers has returned the address of the stack location in the hijacked function
+ // from where RBP was restored. If the address is NULL, then it implies that RBP has been popped off.
+ // Since JIT64 ensures that pop of RBP is the last instruction before ret/jmp, it implies its not safe
+ // to inject an abort @ this point as EstablisherFrame (which will be based
+ // of RBP for managed code since that is the FramePointer register, as indicated in the UnwindInfo)
+ // will be off and can result in bad managed exception dispatch.
+ if (ctxPtrs.Rbp == NULL)
+ {
+ *pSafeToInjectThreadAbort = FALSE;
+ }
+ }
+
+ return fIsInEpilog;
+}
+
+#endif // defined(_TARGET_AMD64_) && defined(FEATURE_HIJACK)
+
+#define EXCEPTION_VISUALCPP_DEBUGGER ((DWORD) (1<<30 | 0x6D<<16 | 5000))
+
+#if defined(_TARGET_X86_)
+
+// This holder is used to capture the FPU state, reset it to what the CLR expects
+// and then restore the original state that was captured.
+//
+// FPU has a set of exception masks which the CLR expects to be always set,
+// implying that any corresponding condition will *not* result in FPU raising
+// an exception.
+//
+// However, native code (e.g. high precision math libs) can change this mask.
+// Thus, when control enters the CLR (e.g. via exception dispatch into the VEH),
+// we will end up using floating point instructions that could satify the exception mask
+// condition and raise an exception. This could result in an infinite loop, resulting in
+// SO.
+//
+// We use this holder to protect applicable parts of the runtime from running into such cases.
+extern "C" void CaptureFPUContext(BYTE *pFPBUBuf);
+extern "C" void RestoreFPUContext(BYTE *pFPBUBuf);
+
+// This is FPU specific and only applicable to x86 on Windows.
+class FPUStateHolder
+{
+ // Capturing FPU state requires a 28byte buffer
+ BYTE m_bufFPUState[28];
+
+public:
+ FPUStateHolder()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ BYTE *pFPUBuf = m_bufFPUState;
+
+ // Save the FPU state using the non-waiting instruction
+ // so that FPU may not raise an exception incase the
+ // exception masks are unset in the FPU Control Word
+ CaptureFPUContext(pFPUBuf);
+
+ // Reset the FPU state
+ ResetCurrentContext();
+ }
+
+ ~FPUStateHolder()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ BYTE *pFPUBuf = m_bufFPUState;
+
+ // Restore the capture FPU state
+ RestoreFPUContext(pFPUBuf);
+ }
+};
+
+#endif // defined(_TARGET_X86_)
+
+LONG WINAPI CLRVectoredExceptionHandlerShim(PEXCEPTION_POINTERS pExceptionInfo)
+{
+ //
+ // HandleManagedFault will take a Crst that causes an unbalanced
+ // notrigger scope, and this contract will whack the thread's
+ // ClrDebugState to what it was on entry in the dtor, which causes
+ // us to assert when we finally release the Crst later on.
+ //
+// CONTRACTL
+// {
+// NOTHROW;
+// GC_NOTRIGGER;
+// MODE_ANY;
+// }
+// CONTRACTL_END;
+
+ //
+ // WARNING WARNING WARNING WARNING WARNING WARNING WARNING
+ //
+ // o This function should not call functions that acquire
+ // synchronization objects or allocate memory, because this
+ // can cause problems. <-- quoteth MSDN -- probably for
+ // the same reason as we cannot use LOG(); we'll recurse
+ // into a stack overflow.
+ //
+ // o You cannot use LOG() in here because that will trigger an
+ // exception which will cause infinite recursion with this
+ // function. We work around this by ignoring all non-error
+ // exception codes, which serves as the base of the recursion.
+ // That way, we can LOG() from the rest of the function
+ //
+ // The same goes for any function called by this
+ // function.
+ //
+ // WARNING WARNING WARNING WARNING WARNING WARNING WARNING
+ //
+
+ // If exceptions (or runtime) have been disabled, then simply return.
+ if (g_fForbidEnterEE || g_fNoExceptions)
+ {
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ // WARNING
+ //
+ // We must preserve this so that GCStress=4 eh processing doesnt kill last error.
+ // Note that even GetThread below can affect the LastError.
+ // Keep this in mind when adding code above this line!
+ //
+ // WARNING
+ DWORD dwLastError = GetLastError();
+
+#if defined(_TARGET_X86_)
+ // Capture the FPU state before we do anything involving floating point instructions
+ FPUStateHolder captureFPUState;
+#endif // defined(_TARGET_X86_)
+
+#ifdef FEATURE_INTEROP_DEBUGGING
+ // For interop debugging we have a fancy exception queueing stunt. When the debugger
+ // initially gets the first chance exception notification it may not know whether to
+ // continue it handled or unhandled, but it must continue the process to allow the
+ // in-proc helper thread to work. What it does is continue the exception unhandled which
+ // will let the thread immediately execute to this point. Inside this worker the thread
+ // will block until the debugger knows how to continue the exception. If it decides the
+ // exception was handled then we immediately resume execution as if the exeption had never
+ // even been allowed to run into this handler. If it is unhandled then we keep processing
+ // this handler
+ //
+ // WARNING: This function could potentially throw an exception, however it should only
+ // be able to do so when an interop debugger is attached
+ if(g_pDebugInterface != NULL)
+ {
+ if(g_pDebugInterface->FirstChanceSuspendHijackWorker(pExceptionInfo->ContextRecord,
+ pExceptionInfo->ExceptionRecord) == EXCEPTION_CONTINUE_EXECUTION)
+ return EXCEPTION_CONTINUE_EXECUTION;
+ }
+#endif
+
+
+ DWORD dwCode = pExceptionInfo->ExceptionRecord->ExceptionCode;
+ if (dwCode == DBG_PRINTEXCEPTION_C || dwCode == EXCEPTION_VISUALCPP_DEBUGGER)
+ {
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+#if defined(_TARGET_X86_)
+ if (dwCode == EXCEPTION_BREAKPOINT || dwCode == EXCEPTION_SINGLE_STEP)
+ {
+ // For interop debugging, debugger bashes our managed exception handler.
+ // Interop debugging does not work with real vectored exception handler :(
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+#endif
+
+ bool bIsGCMarker = false;
+
+#ifdef USE_REDIRECT_FOR_GCSTRESS
+ // This is AMD64 & ARM specific as the macro above is defined for AMD64 & ARM only
+ bIsGCMarker = IsGcMarker(dwCode, pExceptionInfo->ContextRecord);
+#elif defined(_TARGET_X86_) && defined(HAVE_GCCOVER)
+ // This is the equivalent of the check done in COMPlusFrameHandler, incase the exception is
+ // seen by VEH first on x86.
+ bIsGCMarker = IsGcMarker(dwCode, pExceptionInfo->ContextRecord);
+#endif // USE_REDIRECT_FOR_GCSTRESS
+
+ // Do not update the TLS with exception details for exceptions pertaining to GCStress
+ // as they are continueable in nature.
+ if (!bIsGCMarker)
+ {
+ SaveCurrentExceptionInfo(pExceptionInfo->ExceptionRecord, pExceptionInfo->ContextRecord);
+ }
+
+
+ LONG result = EXCEPTION_CONTINUE_SEARCH;
+
+ // If we cannot obtain a Thread object, then we have no business processing any
+ // exceptions on this thread. Indeed, even checking to see if the faulting
+ // address is in JITted code is problematic if we have no Thread object, since
+ // this thread will bypass all our locks.
+ Thread *pThread = GetThread();
+
+ // Also check if the exception was in the EE or not
+ BOOL fExceptionInEE = FALSE;
+ if (!pThread)
+ {
+ // Check if the exception was in EE only if Thread object isnt available.
+ // This will save us from unnecessary checks
+ fExceptionInEE = IsIPInEE(pExceptionInfo->ExceptionRecord->ExceptionAddress);
+ }
+
+ // We are going to process the exception only if one of the following conditions is true:
+ //
+ // 1) We have a valid Thread object (implies exception on managed thread)
+ // 2) Not a valid Thread object but the IP is in the execution engine (implies native thread within EE faulted)
+ if (pThread || fExceptionInEE)
+ {
+ if (!bIsGCMarker)
+ result = CLRVectoredExceptionHandler(pExceptionInfo);
+ else
+ result = EXCEPTION_CONTINUE_EXECUTION;
+
+ if (EXCEPTION_EXECUTE_HANDLER == result)
+ {
+ result = EXCEPTION_CONTINUE_SEARCH;
+ }
+
+#ifdef _DEBUG
+#ifndef FEATURE_PAL
+#ifndef WIN64EXCEPTIONS
+ {
+ CantAllocHolder caHolder;
+
+ PEXCEPTION_REGISTRATION_RECORD pRecord = GetCurrentSEHRecord();
+ while (pRecord != EXCEPTION_CHAIN_END)
+ {
+ STRESS_LOG2(LF_EH, LL_INFO10000, "CLRVectoredExceptionHandlerShim: FS:0 %p:%p\n",
+ pRecord, pRecord->Handler);
+ pRecord = pRecord->Next;
+ }
+ }
+#endif // WIN64EXCEPTIONS
+
+ {
+ // The call to "CLRVectoredExceptionHandler" above can return EXCEPTION_CONTINUE_SEARCH
+ // for different scenarios like StackOverFlow/SOFT_SO, or if it is forbidden to enter the EE.
+ // Thus, if we dont have a Thread object for the thread that has faulted and we came this far
+ // because the fault was in MSCORWKS, then we work with the frame chain below only if we have
+ // valid Thread object.
+
+ if (pThread)
+ {
+ CantAllocHolder caHolder;
+
+ TADDR* sp;
+ sp = (TADDR*)&sp;
+ DWORD count = 0;
+ void* stopPoint = pThread->GetCachedStackBase();
+ // If Frame chain is corrupted, we may get AV while accessing frames, and this function will be
+ // called recursively. We use Frame chain to limit our search range. It is not disaster if we
+ // can not use it.
+ if (!(dwCode == STATUS_ACCESS_VIOLATION &&
+ IsIPInEE(pExceptionInfo->ExceptionRecord->ExceptionAddress)))
+ {
+ // Find the stop point (most jitted function)
+ Frame* pFrame = pThread->GetFrame();
+ for(;;)
+ {
+ // skip GC frames
+ if (pFrame == 0 || pFrame == (Frame*) -1)
+ break;
+
+ Frame::ETransitionType type = pFrame->GetTransitionType();
+ if (type == Frame::TT_M2U || type == Frame::TT_InternalCall)
+ {
+ stopPoint = pFrame;
+ break;
+ }
+ pFrame = pFrame->Next();
+ }
+ }
+ STRESS_LOG0(LF_EH, LL_INFO100, "CLRVectoredExceptionHandlerShim: stack");
+ while (count < 20 && sp < stopPoint)
+ {
+ if (IsIPInEE((BYTE*)*sp))
+ {
+ STRESS_LOG1(LF_EH, LL_INFO100, "%pK\n", *sp);
+ count ++;
+ }
+ sp += 1;
+ }
+ }
+ }
+#endif // !FEATURE_PAL
+#endif // _DEBUG
+
+#ifndef WIN64EXCEPTIONS
+ {
+ CantAllocHolder caHolder;
+ STRESS_LOG1(LF_EH, LL_INFO1000, "CLRVectoredExceptionHandlerShim: returning %d\n", result);
+ }
+#endif // WIN64EXCEPTIONS
+
+ }
+
+ SetLastError(dwLastError);
+
+ return result;
+}
+
+
+// Contains the handle to the registered VEH
+static PVOID g_hVectoredExceptionHandler = NULL;
+
+void CLRAddVectoredHandlers(void)
+{
+#ifndef FEATURE_PAL
+
+ // We now install a vectored exception handler on all supporting Windows architectures.
+ g_hVectoredExceptionHandler = AddVectoredExceptionHandler(TRUE, (PVECTORED_EXCEPTION_HANDLER)CLRVectoredExceptionHandlerShim);
+ if (g_hVectoredExceptionHandler == NULL)
+ {
+ LOG((LF_EH, LL_INFO100, "CLRAddVectoredHandlers: AddVectoredExceptionHandler() failed\n"));
+ COMPlusThrowHR(E_FAIL);
+ }
+
+ LOG((LF_EH, LL_INFO100, "CLRAddVectoredHandlers: AddVectoredExceptionHandler() succeeded\n"));
+#endif // !FEATURE_PAL
+}
+
+// This function removes the vectored exception and continue handler registration
+// from the OS.
+void CLRRemoveVectoredHandlers(void)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+#ifndef FEATURE_PAL
+
+ // Unregister the vectored exception handler if one is registered (and we can).
+ if (g_hVectoredExceptionHandler != NULL)
+ {
+ // Unregister the vectored exception handler
+ if (RemoveVectoredExceptionHandler(g_hVectoredExceptionHandler) == FALSE)
+ {
+ LOG((LF_EH, LL_INFO100, "CLRRemoveVectoredHandlers: RemoveVectoredExceptionHandler() failed.\n"));
+ }
+ else
+ {
+ LOG((LF_EH, LL_INFO100, "CLRRemoveVectoredHandlers: RemoveVectoredExceptionHandler() succeeded.\n"));
+ }
+ }
+#endif // !FEATURE_PAL
+}
+
+//
+// This does the work of the Unwind and Continue Hanlder inside the catch clause of that handler. The stack has not
+// been unwound when this is called. Keep that in mind when deciding where to put new code :)
+//
+void UnwindAndContinueRethrowHelperInsideCatch(Frame* pEntryFrame, Exception* pException)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ Thread* pThread = GetThread();
+
+ GCX_COOP();
+
+ LOG((LF_EH, LL_INFO1000, "UNWIND_AND_CONTINUE inside catch, unwinding frame chain\n"));
+
+ // This SetFrame is OK because we will not have frames that require ExceptionUnwind in strictly unmanaged EE
+ // code chunks which is all that an UnC handler can guard.
+ //
+ // @todo: we'd rather use UnwindFrameChain, but there is a concern: some of the ExceptionUnwind methods on some
+ // of the Frame types do a great deal of work; load classes, throw exceptions, etc. We need to decide on some
+ // policy here. Do we want to let such funcitons throw, etc.? Right now, we believe that there are no such
+ // frames on the stack to be unwound, so the SetFrame is alright (see the first comment above.) At the very
+ // least, we should add some way to assert that.
+ //
+ // ~FrameWithCookieHolder is also calling SetFrame() and if UnwindAndContinueRethrowHelperInsideCatch is ever changed
+ // to not call SetFrame then the change should be reflected in the FrameWithCookieHolder as well.
+ //
+ pThread->SetFrame(pEntryFrame);
+
+#ifdef _DEBUG
+ if (!NingenEnabled())
+ {
+ CONTRACT_VIOLATION(ThrowsViolation);
+ BEGIN_SO_INTOLERANT_CODE(pThread);
+ // Call CLRException::GetThrowableFromException to force us to retrieve the THROWABLE
+ // while we are still within the context of the catch block. This will help diagnose
+ // cases where the last thrown object is NULL.
+ OBJECTREF orThrowable = CLRException::GetThrowableFromException(pException);
+ CONSISTENCY_CHECK(orThrowable != NULL);
+ END_SO_INTOLERANT_CODE;
+ }
+#endif
+}
+
+//
+// This does the work of the Unwind and Continue Hanlder after the catch clause of that handler. The stack has been
+// unwound by the time this is called. Keep that in mind when deciding where to put new code :)
+//
+VOID DECLSPEC_NORETURN UnwindAndContinueRethrowHelperAfterCatch(Frame* pEntryFrame, Exception* pException)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ // We really should probe before switching to cooperative mode, although there's no chance
+ // we'll SO in doing that as we've just caught an exception. We can't probe just
+ // yet though, because we want to avoid reprobing on an SO exception and we need to switch
+ // to cooperative to check the throwable for an SO as well as the pException object (as the
+ // pException could be a LastThrownObjectException.) Blech.
+ CONTRACT_VIOLATION(SOToleranceViolation);
+
+ GCX_COOP();
+
+ LOG((LF_EH, LL_INFO1000, "UNWIND_AND_CONTINUE caught and will rethrow\n"));
+
+ OBJECTREF orThrowable = NingenEnabled() ? NULL : CLRException::GetThrowableFromException(pException);
+ LOG((LF_EH, LL_INFO1000, "UNWIND_AND_CONTINUE got throwable %p\n",
+ OBJECTREFToObject(orThrowable)));
+
+ Exception::Delete(pException);
+
+ if (orThrowable != NULL && g_CLRPolicyRequested)
+ {
+ if (orThrowable->GetMethodTable() == g_pOutOfMemoryExceptionClass)
+ {
+ EEPolicy::HandleOutOfMemory();
+ }
+ else if (orThrowable->GetMethodTable() == g_pStackOverflowExceptionClass)
+ {
+#ifdef FEATURE_STACK_PROBE
+ EEPolicy::HandleSoftStackOverflow();
+#else
+ /* The parameters of the function do not matter here */
+ EEPolicy::HandleStackOverflow(SOD_UnmanagedFrameHandler, NULL);
+#endif
+ }
+ }
+
+ RaiseTheExceptionInternalOnly(orThrowable, FALSE);
+}
+
+void SaveCurrentExceptionInfo(PEXCEPTION_RECORD pRecord, PCONTEXT pContext)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if ((pRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND)))
+ {
+ // If exception is unwinding the stack, the ExceptionCode may have been changed to
+ // STATUS_UNWIND if RtlUnwind is called with a NULL ExceptionRecord.
+ // Since we have captured exception info in the first pass, we don't need to capture it again.
+ return;
+ }
+
+ if (CExecutionEngine::CheckThreadStateNoCreate(TlsIdx_PEXCEPTION_RECORD))
+ {
+ BOOL fSave = TRUE;
+ if (!IsSOExceptionCode(pRecord->ExceptionCode))
+ {
+ DWORD dwLastExceptionCode = (DWORD)(SIZE_T) (ClrFlsGetValue(TlsIdx_EXCEPTION_CODE));
+ if (IsSOExceptionCode(dwLastExceptionCode))
+ {
+ PEXCEPTION_RECORD lastRecord =
+ static_cast<PEXCEPTION_RECORD> (ClrFlsGetValue(TlsIdx_PEXCEPTION_RECORD));
+
+ // We are trying to see if C++ is attempting a rethrow of a SO exception. If so,
+ // we want to prevent updating the exception details in the TLS. This is a workaround,
+ // as explained below.
+ if (pRecord->ExceptionCode == EXCEPTION_MSVC)
+ {
+ // This is a workaround.
+ // When C++ rethrows, C++ internally gets rid of the new exception record after
+ // unwinding stack, and present the original exception record to the thread.
+ // When we get VC's support to obtain exception record in try/catch, we will replace
+ // this code.
+ if (pRecord < lastRecord)
+ {
+ // For the C++ rethrow workaround, ensure that the last exception record is still valid and as we expect it to be.
+ //
+ // Its possible that we are still below the address of last exception record
+ // but since the execution stack could have changed, simply comparing its address
+ // with the address of the current exception record may not be enough.
+ //
+ // Thus, ensure that its still valid and holds the exception code we expect it to
+ // have (i.e. value in dwLastExceptionCode).
+ if ((lastRecord != NULL) && (lastRecord->ExceptionCode == dwLastExceptionCode))
+ {
+ fSave = FALSE;
+ }
+ }
+ }
+ }
+ }
+ if (fSave)
+ {
+ ClrFlsSetValue(TlsIdx_EXCEPTION_CODE, (void*)(size_t)(pRecord->ExceptionCode));
+ ClrFlsSetValue(TlsIdx_PEXCEPTION_RECORD, pRecord);
+ ClrFlsSetValue(TlsIdx_PCONTEXT, pContext);
+ }
+ }
+}
+
+#ifndef DACCESS_COMPILE
+//******************************************************************************
+//
+// NotifyOfCHFFilterWrapper
+//
+// Helper function to deliver notifications of CatchHandlerFound inside a
+// EX_TRY/EX_CATCH.
+//
+// Parameters:
+// pExceptionInfo - the pExceptionInfo passed to a filter function.
+// pCatcherStackAddr - a Frame* from the PAL_TRY/PAL_EXCEPT_FILTER site.
+//
+// Return:
+// always returns EXCEPTION_CONTINUE_SEARCH.
+//
+//******************************************************************************
+LONG NotifyOfCHFFilterWrapper(
+ EXCEPTION_POINTERS *pExceptionInfo, // the pExceptionInfo passed to a filter function.
+ PVOID pParam) // contains a Frame* from the PAL_TRY/PAL_EXCEPT_FILTER site.
+{
+ LIMITED_METHOD_CONTRACT;
+
+ PVOID pCatcherStackAddr = ((NotifyOfCHFFilterWrapperParam *)pParam)->pFrame;
+ ULONG ret = EXCEPTION_CONTINUE_SEARCH;
+
+ // We are here to send an event notification to the debugger and to the appdomain. To
+ // determine if it is safe to send these notifications, check the following:
+ // 1) The thread object has been set up.
+ // 2) The thread has an exception on it.
+ // 3) The exception is the same as the one this filter is called on.
+ Thread *pThread = GetThread();
+ if ( (pThread == NULL) ||
+ (pThread->GetExceptionState()->GetContextRecord() == NULL) ||
+ (GetSP(pThread->GetExceptionState()->GetContextRecord()) != GetSP(pExceptionInfo->ContextRecord) ) )
+ {
+ LOG((LF_EH, LL_INFO1000, "NotifyOfCHFFilterWrapper: not sending notices. pThread: %0x8", pThread));
+ if (pThread)
+ {
+ LOG((LF_EH, LL_INFO1000, ", Thread SP: %0x8, Exception SP: %08x",
+ pThread->GetExceptionState()->GetContextRecord() ? GetSP(pThread->GetExceptionState()->GetContextRecord()) : NULL,
+ pExceptionInfo->ContextRecord ? GetSP(pExceptionInfo->ContextRecord) : NULL ));
+ }
+ LOG((LF_EH, LL_INFO1000, "\n"));
+ return ret;
+ }
+
+ if (g_pDebugInterface)
+ {
+ // It looks safe, so make the debugger notification.
+ ret = g_pDebugInterface->NotifyOfCHFFilter(pExceptionInfo, pCatcherStackAddr);
+ }
+
+ return ret;
+} // LONG NotifyOfCHFFilterWrapper()
+
+// This filter will be used process exceptions escaping out of AD transition boundaries
+// that are not at the base of the managed thread. Those are handled in ThreadBaseRedirectingFilter.
+// This will be invoked when an exception is going unhandled from the called AppDomain.
+//
+// This can be used to do last moment work before the exception gets caught by the EX_CATCH setup
+// at the AD transition point.
+LONG AppDomainTransitionExceptionFilter(
+ EXCEPTION_POINTERS *pExceptionInfo, // the pExceptionInfo passed to a filter function.
+ PVOID pParam)
+{
+ // Ideally, we would be NOTHROW here. However, NotifyOfCHFFilterWrapper calls into
+ // NotifyOfCHFFilter that is THROWS. Thus, to prevent contract violation,
+ // we abide by the rules and be THROWS.
+ //
+ // Same rationale for GC_TRIGGERS as well.
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_ANY;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ ULONG ret = EXCEPTION_CONTINUE_SEARCH;
+
+ // First, call into NotifyOfCHFFilterWrapper
+ ret = NotifyOfCHFFilterWrapper(pExceptionInfo, pParam);
+
+#ifndef FEATURE_PAL
+ // Setup the watson bucketing details if the escaping
+ // exception is preallocated.
+ if (SetupWatsonBucketsForEscapingPreallocatedExceptions())
+ {
+ // Set the flag that these were captured at AD Transition
+ DEBUG_STMT(GetThread()->GetExceptionState()->GetUEWatsonBucketTracker()->SetCapturedAtADTransition());
+ }
+
+ // Attempt to capture buckets for non-preallocated exceptions just before the AppDomain transition boundary
+ {
+ GCX_COOP();
+ OBJECTREF oThrowable = GetThread()->GetThrowable();
+ if ((oThrowable != NULL) && (CLRException::IsPreallocatedExceptionObject(oThrowable) == FALSE))
+ {
+ SetupWatsonBucketsForNonPreallocatedExceptions();
+ }
+ }
+#endif // !FEATURE_PAL
+
+ return ret;
+} // LONG AppDomainTransitionExceptionFilter()
+
+// This filter will be used process exceptions escaping out of dynamic reflection invocation as
+// unhandled and will eventually be caught in the VM to be made as inner exception of
+// TargetInvocationException that will be thrown from the VM.
+LONG ReflectionInvocationExceptionFilter(
+ EXCEPTION_POINTERS *pExceptionInfo, // the pExceptionInfo passed to a filter function.
+ PVOID pParam)
+{
+ // Ideally, we would be NOTHROW here. However, NotifyOfCHFFilterWrapper calls into
+ // NotifyOfCHFFilter that is THROWS. Thus, to prevent contract violation,
+ // we abide by the rules and be THROWS.
+ //
+ // Same rationale for GC_TRIGGERS as well.
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_ANY;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ ULONG ret = EXCEPTION_CONTINUE_SEARCH;
+
+ // First, call into NotifyOfCHFFilterWrapper
+ ret = NotifyOfCHFFilterWrapper(pExceptionInfo, pParam);
+
+#ifndef FEATURE_PAL
+ // Setup the watson bucketing details if the escaping
+ // exception is preallocated.
+ if (SetupWatsonBucketsForEscapingPreallocatedExceptions())
+ {
+ // Set the flag that these were captured during Reflection Invocation
+ DEBUG_STMT(GetThread()->GetExceptionState()->GetUEWatsonBucketTracker()->SetCapturedAtReflectionInvocation());
+ }
+
+ // Attempt to capture buckets for non-preallocated exceptions just before the ReflectionInvocation boundary
+ {
+ GCX_COOP();
+ OBJECTREF oThrowable = GetThread()->GetThrowable();
+ if ((oThrowable != NULL) && (CLRException::IsPreallocatedExceptionObject(oThrowable) == FALSE))
+ {
+ SetupWatsonBucketsForNonPreallocatedExceptions();
+ }
+ }
+#endif // !FEATURE_PAL
+
+ return ret;
+} // LONG ReflectionInvocationExceptionFilter()
+
+#endif // !DACCESS_COMPILE
+
+#ifdef _DEBUG
+bool DebugIsEECxxExceptionPointer(void* pv)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_NOTRIGGER;
+ MODE_ANY;
+ DEBUG_ONLY;
+ }
+ CONTRACTL_END;
+
+ if (pv == NULL)
+ {
+ return false;
+ }
+
+ // check whether the memory is readable in no-throw way
+ if (!isMemoryReadable((TADDR)pv, sizeof(UINT_PTR)))
+ {
+ return false;
+ }
+
+ bool retVal = false;
+
+ EX_TRY
+ {
+ UINT_PTR vtbl = *(UINT_PTR*)pv;
+
+ // ex.h
+
+ HRException boilerplate1;
+ COMException boilerplate2;
+ SEHException boilerplate3;
+
+ // clrex.h
+
+ CLRException boilerplate4;
+ CLRLastThrownObjectException boilerplate5;
+ EEException boilerplate6;
+ EEMessageException boilerplate7;
+ EEResourceException boilerplate8;
+
+ // EECOMException::~EECOMException calls FreeExceptionData, which is GC_TRIGGERS,
+ // but it won't trigger in this case because EECOMException's members remain NULL.
+ CONTRACT_VIOLATION(GCViolation);
+ EECOMException boilerplate9;
+
+ EEFieldException boilerplate10;
+ EEMethodException boilerplate11;
+ EEArgumentException boilerplate12;
+ EETypeLoadException boilerplate13;
+ EEFileLoadException boilerplate14;
+ ObjrefException boilerplate15;
+
+ UINT_PTR ValidVtbls[] =
+ {
+ *((TADDR*)&boilerplate1),
+ *((TADDR*)&boilerplate2),
+ *((TADDR*)&boilerplate3),
+ *((TADDR*)&boilerplate4),
+ *((TADDR*)&boilerplate5),
+ *((TADDR*)&boilerplate6),
+ *((TADDR*)&boilerplate7),
+ *((TADDR*)&boilerplate8),
+ *((TADDR*)&boilerplate9),
+ *((TADDR*)&boilerplate10),
+ *((TADDR*)&boilerplate11),
+ *((TADDR*)&boilerplate12),
+ *((TADDR*)&boilerplate13),
+ *((TADDR*)&boilerplate14),
+ *((TADDR*)&boilerplate15)
+ };
+
+ const int nVtbls = sizeof(ValidVtbls) / sizeof(ValidVtbls[0]);
+
+ for (int i = 0; i < nVtbls; i++)
+ {
+ if (vtbl == ValidVtbls[i])
+ {
+ retVal = true;
+ break;
+ }
+ }
+ }
+ EX_CATCH
+ {
+ // Swallow any exception out of the exception constructors above and simply return false.
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return retVal;
+}
+
+void *DebugGetCxxException(EXCEPTION_RECORD* pExceptionRecord);
+
+bool DebugIsEECxxException(EXCEPTION_RECORD* pExceptionRecord)
+{
+ return DebugIsEECxxExceptionPointer(DebugGetCxxException(pExceptionRecord));
+}
+
+//
+// C++ EH cracking material gleaned from the debugger:
+// (DO NOT USE THIS KNOWLEDGE IN NON-DEBUG CODE!!!)
+//
+// EHExceptionRecord::EHParameters
+// [0] magicNumber : uint
+// [1] pExceptionObject : void*
+// [2] pThrowInfo : ThrowInfo*
+
+#ifdef _WIN64
+#define NUM_CXX_EXCEPTION_PARAMS 4
+#else
+#define NUM_CXX_EXCEPTION_PARAMS 3
+#endif
+
+void *DebugGetCxxException(EXCEPTION_RECORD* pExceptionRecord)
+{
+ WRAPPER_NO_CONTRACT;
+
+ bool fExCodeIsCxx = (EXCEPTION_MSVC == pExceptionRecord->ExceptionCode);
+ bool fExHasCorrectNumParams = (NUM_CXX_EXCEPTION_PARAMS == pExceptionRecord->NumberParameters);
+
+ if (fExCodeIsCxx && fExHasCorrectNumParams)
+ {
+ void** ppException = (void**)pExceptionRecord->ExceptionInformation[1];
+
+ if (NULL == ppException)
+ {
+ return NULL;
+ }
+
+ return *ppException;
+
+ }
+
+ CONSISTENCY_CHECK_MSG(!fExCodeIsCxx || fExHasCorrectNumParams, "We expected an EXCEPTION_MSVC exception to have 3 parameters. Did the CRT change its exception format?");
+
+ return NULL;
+}
+
+#endif // _DEBUG
+
+#endif // #ifndef DACCESS_COMPILE
+
+BOOL IsException(MethodTable *pMT) {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ ASSERT(g_pExceptionClass != NULL);
+
+ while (pMT != NULL && pMT != g_pExceptionClass) {
+ pMT = pMT->GetParentMethodTable();
+ }
+
+ return pMT != NULL;
+} // BOOL IsException()
+
+// Returns TRUE iff calling get_StackTrace on an exception of the given type ends up
+// executing some other code than just Exception.get_StackTrace.
+BOOL ExceptionTypeOverridesStackTraceGetter(PTR_MethodTable pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(IsException(pMT));
+
+ if (pMT == g_pExceptionClass)
+ {
+ // if the type is System.Exception, it certainly doesn't override anything
+ return FALSE;
+ }
+
+ // find the slot corresponding to get_StackTrace
+ for (DWORD slot = g_pObjectClass->GetNumVirtuals(); slot < g_pExceptionClass->GetNumVirtuals(); slot++)
+ {
+ MethodDesc *pMD = g_pExceptionClass->GetMethodDescForSlot(slot);
+ LPCUTF8 name = pMD->GetName();
+
+ if (name != NULL && strcmp(name, "get_StackTrace") == 0)
+ {
+ // see if the slot is overriden by pMT
+ MethodDesc *pDerivedMD = pMT->GetMethodDescForSlot(slot);
+ return (pDerivedMD != pMD);
+ }
+ }
+
+ // there must be get_StackTrace on System.Exception
+ UNREACHABLE();
+}
+
+// Removes source file names/paths and line information from a stack trace generated
+// by Environment.GetStackTrace.
+void StripFileInfoFromStackTrace(SString &ssStackTrace)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ SString::Iterator i = ssStackTrace.Begin();
+ SString::Iterator end;
+ int countBracket = 0;
+ int position = 0;
+
+ while (i < ssStackTrace.End())
+ {
+ if (i[0] == W('('))
+ {
+ countBracket ++;
+ }
+ else if (i[0] == W(')'))
+ {
+ if (countBracket == 1)
+ {
+ end = i + 1;
+ SString::Iterator j = i + 1;
+ while (j < ssStackTrace.End())
+ {
+ if (j[0] == W('\r') || j[0] == W('\n'))
+ {
+ break;
+ }
+ j++;
+ }
+ if (j > end)
+ {
+ ssStackTrace.Delete(end,j-end);
+ i = ssStackTrace.Begin() + position;
+ }
+ }
+ countBracket --;
+ }
+ i ++;
+ position ++;
+ }
+ ssStackTrace.Truncate(end);
+}
+
+#ifdef _DEBUG
+//==============================================================================
+// This function will set a thread state indicating if an exception is escaping
+// the last CLR personality routine on the stack in a reverse pinvoke scenario.
+//
+// If the exception continues to go unhandled, it will eventually reach the OS
+// that will start invoking the UEFs. Since CLR registers its UEF only to handle
+// unhandled exceptions on such reverse pinvoke threads, we will assert this
+// state in our UEF to ensure it does not get called for any other reason.
+//
+// This function should be called only if the personality routine returned
+// EXCEPTION_CONTINUE_SEARCH.
+//==============================================================================
+void SetReversePInvokeEscapingUnhandledExceptionStatus(BOOL fIsUnwinding,
+#if defined(_TARGET_X86_)
+ EXCEPTION_REGISTRATION_RECORD * pEstablisherFrame
+#elif defined(WIN64EXCEPTIONS)
+ ULONG64 pEstablisherFrame
+#else
+#error Unsupported platform
+#endif
+ )
+{
+#ifndef DACCESS_COMPILE
+
+ LIMITED_METHOD_CONTRACT;
+
+ Thread *pCurThread = GetThread();
+ _ASSERTE(pCurThread);
+
+ if (pCurThread->GetExceptionState()->IsExceptionInProgress())
+ {
+ if (!fIsUnwinding)
+ {
+ // Get the top-most Frame of this thread.
+ Frame* pCurFrame = pCurThread->GetFrame();
+ Frame* pTopMostFrame = pCurFrame;
+ while (pCurFrame && (pCurFrame != FRAME_TOP))
+ {
+ pTopMostFrame = pCurFrame;
+ pCurFrame = pCurFrame->PtrNextFrame();
+ }
+
+ // Is the exception escaping the last CLR personality routine on the stack of a
+ // reverse pinvoke thread?
+ if (((pTopMostFrame == NULL) || (pTopMostFrame == FRAME_TOP)) ||
+ ((void *)(pEstablisherFrame) > (void *)(pTopMostFrame)))
+ {
+ LOG((LF_EH, LL_INFO100, "SetReversePInvokeEscapingUnhandledExceptionStatus: setting Ex_RPInvokeEscapingException\n"));
+ // Set the flag on the thread indicating the exception is escaping the
+ // top most reverse pinvoke exception handler.
+ pCurThread->GetExceptionState()->GetFlags()->SetReversePInvokeEscapingException();
+ }
+ }
+ else
+ {
+ // Since we are unwinding, simply unset the flag indicating escaping unhandled exception
+ // if it was set.
+ if (pCurThread->GetExceptionState()->GetFlags()->ReversePInvokeEscapingException())
+ {
+ LOG((LF_EH, LL_INFO100, "SetReversePInvokeEscapingUnhandledExceptionStatus: unsetting Ex_RPInvokeEscapingException\n"));
+ pCurThread->GetExceptionState()->GetFlags()->ResetReversePInvokeEscapingException();
+ }
+ }
+ }
+ else
+ {
+ LOG((LF_EH, LL_INFO100, "SetReversePInvokeEscapingUnhandledExceptionStatus: not setting Ex_RPInvokeEscapingException since no exception is in progress.\n"));
+ }
+#endif // !DACCESS_COMPILE
+}
+
+#endif // _DEBUG
+
+#ifndef FEATURE_PAL
+
+// This function will capture the watson buckets for the current exception object that is:
+//
+// 1) Non-preallocated
+// 2) Already contains the IP for watson bucketing
+BOOL SetupWatsonBucketsForNonPreallocatedExceptions(OBJECTREF oThrowable /* = NULL */)
+{
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_CORECLR
+ // CoreCLR may have watson bucketing conditionally enabled.
+ if (!IsWatsonEnabled())
+ {
+ return FALSE;
+ }
+#endif // FEATURE_CORECLR
+
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ NOTHROW;
+ PRECONDITION(GetThread() != NULL);
+ }
+ CONTRACTL_END;
+
+ // By default, assume we didnt get the buckets
+ BOOL fSetupWatsonBuckets = FALSE;
+
+ Thread * pThread = GetThread();
+
+ struct
+ {
+ OBJECTREF oThrowable;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ GCPROTECT_BEGIN(gc);
+
+ // Get the throwable to be used
+ gc.oThrowable = (oThrowable != NULL) ? oThrowable : pThread->GetThrowable();
+ if (gc.oThrowable == NULL)
+ {
+ // If we have no throwable, then simply return back.
+ //
+ // We could be here because the VM may have raised an exception,
+ // and not managed code, for its internal usage (e.g. TA to end the
+ // threads when unloading an AppDomain). Thus, there would be no throwable
+ // present since the exception has not been seen by the runtime's
+ // personality routine.
+ //
+ // Hence, we have no work to do here.
+ LOG((LF_EH, LL_INFO100, "SetupWatsonBucketsForNonPreallocatedExceptions - No throwable available.\n"));
+ goto done;
+ }
+
+ // The exception object should be non-preallocated
+ _ASSERTE(!CLRException::IsPreallocatedExceptionObject(gc.oThrowable));
+
+ if (((EXCEPTIONREF)gc.oThrowable)->AreWatsonBucketsPresent() == FALSE)
+ {
+ // Attempt to capture the watson buckets since they are not present.
+ UINT_PTR ip = ((EXCEPTIONREF)gc.oThrowable)->GetIPForWatsonBuckets();
+ if (ip != NULL)
+ {
+ // Attempt to capture the buckets
+ PTR_VOID pBuckets = GetBucketParametersForManagedException(ip, TypeOfReportedError::UnhandledException, pThread, &gc.oThrowable);
+ if (pBuckets != NULL)
+ {
+ // Got the buckets - save them to the exception object
+ fSetupWatsonBuckets = FALSE;
+ EX_TRY
+ {
+ fSetupWatsonBuckets = CopyWatsonBucketsToThrowable(pBuckets, gc.oThrowable);
+ }
+ EX_CATCH
+ {
+ // OOM can bring us here
+ fSetupWatsonBuckets = FALSE;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (!fSetupWatsonBuckets)
+ {
+ LOG((LF_EH, LL_INFO100, "SetupWatsonBucketsForNonPreallocatedExceptions - Unable to copy buckets to throwable likely due to OOM.\n"));
+ }
+ else
+ {
+ // Clear the saved IP since we have captured the buckets
+ ((EXCEPTIONREF)gc.oThrowable)->SetIPForWatsonBuckets(NULL);
+ LOG((LF_EH, LL_INFO100, "SetupWatsonBucketsForNonPreallocatedExceptions - Buckets copied to throwable.\n"));
+ }
+ FreeBucketParametersForManagedException(pBuckets);
+ }
+ else
+ {
+ LOG((LF_EH, LL_INFO100, "SetupWatsonBucketsForNonPreallocatedExceptions - Unable to capture buckets from IP likely due to OOM.\n"));
+ }
+ }
+ else
+ {
+ LOG((LF_EH, LL_INFO100, "SetupWatsonBucketsForNonPreallocatedExceptions - No IP available to capture buckets from.\n"));
+ }
+ }
+
+done:;
+ GCPROTECT_END();
+
+ return fSetupWatsonBuckets;
+#else // DACCESS_COMPILE
+ return FALSE;
+#endif // !DACCESS_COMPILE
+}
+
+// When exceptions are escaping out of various transition boundaries,
+// we will need to capture bucket details for the original exception
+// before the exception goes across the boundary to the caller.
+//
+// Examples of such boundaries include:
+//
+// 1) AppDomain transition boundaries (these are physical transition boundaries)
+// 2) Dynamic method invocation in Reflection (these are logical transition boundaries).
+//
+// This function will capture the bucketing details in the UE tracker so that
+// they can be used once we cross over.
+BOOL SetupWatsonBucketsForEscapingPreallocatedExceptions()
+{
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_CORECLR
+ // CoreCLR may have watson bucketing conditionally enabled.
+ if (!IsWatsonEnabled())
+ {
+ return FALSE;
+ }
+#endif // FEATURE_CORECLR
+
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ MODE_ANY;
+ NOTHROW;
+ PRECONDITION(GetThread() != NULL);
+ }
+ CONTRACTL_END;
+
+ // By default, assume we didnt get the buckets
+ BOOL fSetupWatsonBuckets = FALSE;
+ PTR_EHWatsonBucketTracker pUEWatsonBucketTracker;
+
+ Thread * pThread = GetThread();
+
+ // If the exception going unhandled is preallocated, then capture the Watson buckets in the UE Watson
+ // bucket tracker provided its not already populated.
+ //
+ // Switch to COOP mode
+ GCX_COOP();
+
+ struct
+ {
+ OBJECTREF oThrowable;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ GCPROTECT_BEGIN(gc);
+
+ // Get the throwable corresponding to the escaping exception
+ gc.oThrowable = pThread->GetThrowable();
+ if (gc.oThrowable == NULL)
+ {
+ // If we have no throwable, then simply return back.
+ //
+ // We could be here because the VM may have raised an exception,
+ // and not managed code, for its internal usage (e.g. TA to end the
+ // threads when unloading an AppDomain). Thus, there would be no throwable
+ // present since the exception has not been seen by the runtime's
+ // personality routine.
+ //
+ // Hence, we have no work to do here.
+ LOG((LF_EH, LL_INFO100, "SetupWatsonBucketsForEscapingPreallocatedExceptions - No throwable available.\n"));
+ goto done;
+ }
+
+ // Is the exception preallocated? We are not going to process non-preallocated exception objects since
+ // they already have the watson buckets in them.
+ //
+ // We skip thread abort as well since we track them in the UE watson bucket tracker at
+ // throw time itself.
+ if (!((CLRException::IsPreallocatedExceptionObject(gc.oThrowable)) &&
+ !IsThrowableThreadAbortException(gc.oThrowable)))
+ {
+ // Its either not preallocated or a thread abort exception,
+ // neither of which we need to process.
+ goto done;
+ }
+
+ // The UE watson bucket tracker could be non-empty if there were earlier transitions
+ // on the threads stack before the exception got raised.
+ pUEWatsonBucketTracker = pThread->GetExceptionState()->GetUEWatsonBucketTracker();
+ _ASSERTE(pUEWatsonBucketTracker != NULL);
+
+ // Proceed to capture bucketing details only if the UE watson bucket tracker is empty.
+ if((pUEWatsonBucketTracker->RetrieveWatsonBucketIp() == NULL) && (pUEWatsonBucketTracker->RetrieveWatsonBuckets() == NULL))
+ {
+ // Get the Watson Bucket tracker for this preallocated exception
+ PTR_EHWatsonBucketTracker pCurWatsonBucketTracker = GetWatsonBucketTrackerForPreallocatedException(gc.oThrowable, FALSE);
+
+ if (pCurWatsonBucketTracker != NULL)
+ {
+ // If the tracker exists, we must have the throw site IP
+ _ASSERTE(pCurWatsonBucketTracker->RetrieveWatsonBucketIp() != NULL);
+
+ // Init the UE Watson bucket tracker
+ pUEWatsonBucketTracker->ClearWatsonBucketDetails();
+
+ // Copy the Bucket details to the UE watson bucket tracker
+ pUEWatsonBucketTracker->CopyEHWatsonBucketTracker(*(pCurWatsonBucketTracker));
+
+ // If the buckets dont exist, capture them now
+ if (pUEWatsonBucketTracker->RetrieveWatsonBuckets() == NULL)
+ {
+ pUEWatsonBucketTracker->CaptureUnhandledInfoForWatson(TypeOfReportedError::UnhandledException, pThread, &gc.oThrowable);
+ }
+
+ // If the IP was in managed code, we will have the buckets.
+ if(pUEWatsonBucketTracker->RetrieveWatsonBuckets() != NULL)
+ {
+ fSetupWatsonBuckets = TRUE;
+ LOG((LF_EH, LL_INFO100, "SetupWatsonBucketsForEscapingPreallocatedExceptions - Captured watson buckets for preallocated exception at transition.\n"));
+ }
+ else
+ {
+ // IP was likely in native code - hence, watson helper functions couldnt get us the buckets
+ LOG((LF_EH, LL_INFO100, "SetupWatsonBucketsForEscapingPreallocatedExceptions - Watson buckets not found for IP. IP likely in native code.\n"));
+
+ // Clear the UE tracker
+ pUEWatsonBucketTracker->ClearWatsonBucketDetails();
+ }
+ }
+ else
+ {
+ LOG((LF_EH, LL_INFO100, "SetupWatsonBucketsForEscapingPreallocatedExceptions - Watson bucket tracker for preallocated exception not found. Exception likely thrown in native code.\n"));
+ }
+ }
+
+done:;
+ GCPROTECT_END();
+
+ return fSetupWatsonBuckets;
+#else // DACCESS_COMPILE
+ return FALSE;
+#endif // !DACCESS_COMPILE
+}
+
+// This function is invoked from the UEF worker to setup the watson buckets
+// for the exception going unhandled, if details are available. See
+// implementation below for specifics.
+void SetupWatsonBucketsForUEF(BOOL fUseLastThrownObject)
+{
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_CORECLR
+ // CoreCLR may have watson bucketing conditionally enabled.
+ if (!IsWatsonEnabled())
+ {
+ return;
+ }
+#endif // FEATURE_CORECLR
+
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_ANY;
+ NOTHROW;
+ PRECONDITION(GetThread() != NULL);
+ }
+ CONTRACTL_END;
+
+ Thread *pThread = GetThread();
+
+ PTR_EHWatsonBucketTracker pCurWatsonBucketTracker = NULL;
+ ThreadExceptionState *pExState = pThread->GetExceptionState();
+ _ASSERTE(pExState != NULL);
+
+ // If the exception tracker exists, then copy the bucketing details
+ // from it to the UE Watson Bucket tracker.
+ //
+ // On 64bit, the EH system allocates the EHTracker only in the case of an exception.
+ // Thus, assume a reverse pinvoke thread transitions to managed code from native,
+ // does some work in managed and returns back to native code.
+ //
+ // In the native code, it has an exception that goes unhandled and the OS
+ // ends up invoking our UEF, and thus, we land up here.
+ //
+ // In such a case, on 64bit, we wont have an exception tracker since there
+ // was no managed exception active. On 32bit, we will have a tracker
+ // but there wont be an IP corresponding to the throw site since exception
+ // was raised in native code.
+ //
+ // But if the tracker exists, simply copy the bucket details to the UE Watson Bucket
+ // tracker for use by the "WatsonLastChance" path.
+ BOOL fDoWeHaveWatsonBuckets = FALSE;
+ if (pExState->GetCurrentExceptionTracker() != NULL)
+ {
+ // Check the exception state if we have Watson bucket details
+ fDoWeHaveWatsonBuckets = pExState->GetFlags()->GotWatsonBucketDetails();
+ }
+
+ // Switch to COOP mode before working with the throwable
+ GCX_COOP();
+
+ // Get the throwable we are going to work with
+ struct
+ {
+ OBJECTREF oThrowable;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ GCPROTECT_BEGIN(gc);
+
+ gc.oThrowable = fUseLastThrownObject ? pThread->LastThrownObject() : pThread->GetThrowable();
+ BOOL fThrowableExists = (gc.oThrowable != NULL);
+ BOOL fIsThrowablePreallocated = !fThrowableExists ? FALSE : CLRException::IsPreallocatedExceptionObject(gc.oThrowable);
+
+ if ((!fDoWeHaveWatsonBuckets) && fThrowableExists)
+ {
+ // Check the throwable if it has buckets - this could be the scenario
+ // of native code calling into a non-default domain and thus, have an AD
+ // transition in between that could reraise the exception but that would
+ // never be seen by our exception handler. Thus, there wont be any tracker
+ // or tracker state.
+ //
+ // Invocation of entry point on WLC via reverse pinvoke is an example.
+ if (!fIsThrowablePreallocated)
+ {
+ fDoWeHaveWatsonBuckets = ((EXCEPTIONREF)gc.oThrowable)->AreWatsonBucketsPresent();
+ if (!fDoWeHaveWatsonBuckets)
+ {
+ // If buckets are not present, then we may have IP to capture the buckets from.
+ fDoWeHaveWatsonBuckets = ((EXCEPTIONREF)gc.oThrowable)->IsIPForWatsonBucketsPresent();
+ }
+ }
+ else
+ {
+ // Get the watson bucket tracker for the preallocated exception
+ PTR_EHWatsonBucketTracker pCurWBTracker = GetWatsonBucketTrackerForPreallocatedException(gc.oThrowable, FALSE);
+
+ // We would have buckets if we have the IP
+ if (pCurWBTracker && (pCurWBTracker->RetrieveWatsonBucketIp() != NULL))
+ {
+ fDoWeHaveWatsonBuckets = TRUE;
+ }
+ }
+ }
+
+ if (fDoWeHaveWatsonBuckets)
+ {
+ // Get the UE Watson bucket tracker
+ PTR_EHWatsonBucketTracker pUEWatsonBucketTracker = pExState->GetUEWatsonBucketTracker();
+
+ // Clear any existing information
+ pUEWatsonBucketTracker->ClearWatsonBucketDetails();
+
+ if (fIsThrowablePreallocated)
+ {
+ // Get the watson bucket tracker for the preallocated exception
+ PTR_EHWatsonBucketTracker pCurWBTracker = GetWatsonBucketTrackerForPreallocatedException(gc.oThrowable, FALSE);
+
+ if (pCurWBTracker != NULL)
+ {
+ // We should be having an IP for this exception at this point
+ _ASSERTE(pCurWBTracker->RetrieveWatsonBucketIp() != NULL);
+
+ // Copy the existing bucketing details to the UE tracker
+ pUEWatsonBucketTracker->CopyEHWatsonBucketTracker(*(pCurWBTracker));
+
+ // Get the buckets if we dont already have them since we
+ // dont want to overwrite existing bucket information (e.g.
+ // from an AD transition)
+ if (pUEWatsonBucketTracker->RetrieveWatsonBuckets() == NULL)
+ {
+ pUEWatsonBucketTracker->CaptureUnhandledInfoForWatson(TypeOfReportedError::UnhandledException, pThread, &gc.oThrowable);
+ if (pUEWatsonBucketTracker->RetrieveWatsonBuckets() != NULL)
+ {
+ LOG((LF_EH, LL_INFO100, "SetupWatsonBucketsForUEF: Collected watson bucket information for preallocated exception\n"));
+ }
+ else
+ {
+ // If we are here, then one of the following could have happened:
+ //
+ // 1) pCurWBTracker had buckets but we couldnt copy them over to pUEWatsonBucketTracker due to OOM, or
+ // 2) pCurWBTracker's IP was in native code; thus pUEWatsonBucketTracker->CaptureUnhandledInfoForWatson()
+ // couldnt get us the watson buckets.
+ LOG((LF_EH, LL_INFO100, "SetupWatsonBucketsForUEF: Unable to collect watson bucket information for preallocated exception due to OOM or IP being in native code.\n"));
+ }
+ }
+ }
+ else
+ {
+ // We likely had an OOM earlier (while copying the bucket information) if we are here
+ LOG((LF_EH, LL_INFO100, "SetupWatsonBucketsForUEF: Watson bucket tracker for preallocated exception not found.\n"));
+ }
+ }
+ else
+ {
+ // Throwable is not preallocated - get the bucket details from it for use by Watson
+ _ASSERTE_MSG(((EXCEPTIONREF)gc.oThrowable)->AreWatsonBucketsPresent() ||
+ ((EXCEPTIONREF)gc.oThrowable)->IsIPForWatsonBucketsPresent(),
+ "How come we dont have watson buckets (or IP) for a non-preallocated exception in the UEF?");
+
+ if ((((EXCEPTIONREF)gc.oThrowable)->AreWatsonBucketsPresent() == FALSE) &&
+ ((EXCEPTIONREF)gc.oThrowable)->IsIPForWatsonBucketsPresent())
+ {
+ // Capture the buckets using the IP we have.
+ SetupWatsonBucketsForNonPreallocatedExceptions(gc.oThrowable);
+ }
+
+ if (((EXCEPTIONREF)gc.oThrowable)->AreWatsonBucketsPresent())
+ {
+ pUEWatsonBucketTracker->CopyBucketsFromThrowable(gc.oThrowable);
+ }
+
+ if (pUEWatsonBucketTracker->RetrieveWatsonBuckets() == NULL)
+ {
+ LOG((LF_EH, LL_INFO100, "SetupWatsonBucketsForUEF: Unable to copy watson buckets from regular exception throwable (%p), likely due to OOM.\n",
+ OBJECTREFToObject(gc.oThrowable)));
+ }
+ }
+ }
+ else
+ {
+ // We dont have the watson buckets; exception was in native code that we dont care about
+ LOG((LF_EH, LL_INFO100, "SetupWatsonBucketsForUEF: We dont have watson buckets - likely an exception in native code.\n"));
+ }
+
+ GCPROTECT_END();
+#endif // !DACCESS_COMPILE
+}
+
+// Given a throwable, this function will return a BOOL indicating
+// if it corresponds to any of the following thread abort exception
+// objects:
+//
+// 1) Regular allocated ThreadAbortException
+// 2) Preallocated ThreadAbortException
+// 3) Preallocated RudeThreadAbortException
+BOOL IsThrowableThreadAbortException(OBJECTREF oThrowable)
+{
+#ifndef DACCESS_COMPILE
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ NOTHROW;
+ PRECONDITION(GetThread() != NULL);
+ PRECONDITION(oThrowable != NULL);
+ }
+ CONTRACTL_END;
+
+ BOOL fIsTAE = FALSE;
+
+ struct
+ {
+ OBJECTREF oThrowable;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ GCPROTECT_BEGIN(gc);
+
+ gc.oThrowable = oThrowable;
+
+ fIsTAE = (IsExceptionOfType(kThreadAbortException,&(gc.oThrowable)) || // regular TAE
+ ((g_pPreallocatedThreadAbortException != NULL) &&
+ (gc.oThrowable == CLRException::GetPreallocatedThreadAbortException())) ||
+ ((g_pPreallocatedRudeThreadAbortException != NULL) &&
+ (gc.oThrowable == CLRException::GetPreallocatedRudeThreadAbortException())));
+
+ GCPROTECT_END();
+
+ return fIsTAE;
+
+#else // DACCESS_COMPILE
+ return FALSE;
+#endif // !DACCESS_COMPILE
+}
+
+// Given a throwable, this function will walk the exception tracker
+// list to return the tracker, if available, corresponding to the preallocated
+// exception object.
+//
+// The caller can also specify the starting EHTracker to walk the list from.
+// If not specified, this will default to the current exception tracker active
+// on the thread.
+#if defined(WIN64EXCEPTIONS)
+PTR_ExceptionTracker GetEHTrackerForPreallocatedException(OBJECTREF oPreAllocThrowable,
+ PTR_ExceptionTracker pStartingEHTracker)
+#elif _TARGET_X86_
+PTR_ExInfo GetEHTrackerForPreallocatedException(OBJECTREF oPreAllocThrowable,
+ PTR_ExInfo pStartingEHTracker)
+#else
+#error Unsupported platform
+#endif
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ NOTHROW;
+ PRECONDITION(GetThread() != NULL);
+ PRECONDITION(oPreAllocThrowable != NULL);
+ PRECONDITION(CLRException::IsPreallocatedExceptionObject(oPreAllocThrowable));
+ PRECONDITION(IsWatsonEnabled());
+ }
+ CONTRACTL_END;
+
+ // Get the reference to the current exception tracker
+#if defined(WIN64EXCEPTIONS)
+ PTR_ExceptionTracker pEHTracker = (pStartingEHTracker != NULL) ? pStartingEHTracker : GetThread()->GetExceptionState()->GetCurrentExceptionTracker();
+#elif _TARGET_X86_
+ PTR_ExInfo pEHTracker = (pStartingEHTracker != NULL) ? pStartingEHTracker : GetThread()->GetExceptionState()->GetCurrentExceptionTracker();
+#else // !(_WIN64 || _TARGET_X86_)
+#error Unsupported platform
+#endif // _WIN64
+
+ BOOL fFoundTracker = FALSE;
+
+ struct
+ {
+ OBJECTREF oPreAllocThrowable;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ GCPROTECT_BEGIN(gc);
+
+ gc.oPreAllocThrowable = oPreAllocThrowable;
+
+ // Start walking the list to find the tracker correponding
+ // to the preallocated exception object.
+ while (pEHTracker != NULL)
+ {
+ if (pEHTracker->GetThrowable() == gc.oPreAllocThrowable)
+ {
+ // found the tracker - break out.
+ fFoundTracker = TRUE;
+ break;
+ }
+
+ // move to the previous tracker...
+ pEHTracker = pEHTracker->GetPreviousExceptionTracker();
+ }
+
+ GCPROTECT_END();
+
+ return fFoundTracker ? pEHTracker : NULL;
+}
+
+// This function will return the pointer to EHWatsonBucketTracker corresponding to the
+// preallocated exception object. If none is found, it will return NULL.
+PTR_EHWatsonBucketTracker GetWatsonBucketTrackerForPreallocatedException(OBJECTREF oPreAllocThrowable,
+ BOOL fCaptureBucketsIfNotPresent,
+ BOOL fStartSearchFromPreviousTracker /*= FALSE*/)
+{
+#ifndef DACCESS_COMPILE
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ NOTHROW;
+ PRECONDITION(GetThread() != NULL);
+ PRECONDITION(oPreAllocThrowable != NULL);
+ PRECONDITION(CLRException::IsPreallocatedExceptionObject(oPreAllocThrowable));
+ PRECONDITION(IsWatsonEnabled());
+ }
+ CONTRACTL_END;
+
+ PTR_EHWatsonBucketTracker pWBTracker = NULL;
+
+ struct
+ {
+ OBJECTREF oPreAllocThrowable;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ GCPROTECT_BEGIN(gc);
+
+ gc.oPreAllocThrowable = oPreAllocThrowable;
+
+ // Before doing anything, check if this is a thread abort exception. If it is,
+ // then simply return the reference to the UE watson bucket tracker since it
+ // tracks the bucketing details for all types of TAE.
+ if (IsThrowableThreadAbortException(gc.oPreAllocThrowable))
+ {
+ pWBTracker = GetThread()->GetExceptionState()->GetUEWatsonBucketTracker();
+ LOG((LF_EH, LL_INFO100, "GetWatsonBucketTrackerForPreallocatedException - Setting UE Watson Bucket Tracker to be returned for preallocated ThreadAbortException.\n"));
+ goto doValidation;
+ }
+
+ // Find the reference to the exception tracker corresponding to the preallocated exception,
+ // starting the search from the current exception tracker (2nd arg of NULL specifies that).
+#if defined(WIN64EXCEPTIONS)
+ PTR_ExceptionTracker pEHTracker = NULL;
+ PTR_ExceptionTracker pPreviousEHTracker = NULL;
+
+#elif _TARGET_X86_
+ PTR_ExInfo pEHTracker = NULL;
+ PTR_ExInfo pPreviousEHTracker = NULL;
+#else // !(_WIN64 || _TARGET_X86_)
+#error Unsupported platform
+#endif // _WIN64
+
+ if (fStartSearchFromPreviousTracker)
+ {
+ // Get the exception tracker previous to the current one
+ pPreviousEHTracker = GetThread()->GetExceptionState()->GetCurrentExceptionTracker()->GetPreviousExceptionTracker();
+
+ // If there is no previous tracker to start from, then simply abort the search attempt.
+ // If we couldnt find the exception tracker, then buckets are not available
+ if (pPreviousEHTracker == NULL)
+ {
+ LOG((LF_EH, LL_INFO100, "GetWatsonBucketTrackerForPreallocatedException - Couldnt find the previous EHTracker to start the search from.\n"));
+ pWBTracker = NULL;
+ goto done;
+ }
+ }
+
+ pEHTracker = GetEHTrackerForPreallocatedException(gc.oPreAllocThrowable, pPreviousEHTracker);
+
+ // If we couldnt find the exception tracker, then buckets are not available
+ if (pEHTracker == NULL)
+ {
+ LOG((LF_EH, LL_INFO100, "GetWatsonBucketTrackerForPreallocatedException - Couldnt find EHTracker for preallocated exception object.\n"));
+ pWBTracker = NULL;
+ goto done;
+ }
+
+ // Get the Watson Bucket Tracker from the exception tracker
+ pWBTracker = pEHTracker->GetWatsonBucketTracker();
+
+doValidation:
+ _ASSERTE(pWBTracker != NULL);
+
+ // Incase of an OOM, we may not have an IP in the Watson bucket tracker. A scenario
+ // would be default domain calling to AD 2 that calls into AD 3.
+ //
+ // AD 3 has an exception that is represented by a preallocated exception object. The
+ // exception goes unhandled and reaches AD2/AD3 transition boundary. The bucketing details
+ // from AD3 are copied to UETracker and once the exception is reraised in AD2, we will
+ // enter SetupInitialThrowBucketingDetails to copy the bucketing details to the active
+ // exception tracker.
+ //
+ // This copy operation could fail due to OOM and the active exception tracker in AD 2,
+ // for the preallocated exception object, will not have any bucketing details. If the
+ // exception remains unhandled in AD 2, then just before it reaches DefDomain/AD2 boundary,
+ // we will attempt to capture the bucketing details in AppDomainTransitionExceptionFilter,
+ // that will bring us here.
+ //
+ // In such a case, the active exception tracker will not have any bucket details for the
+ // preallocated exception. In such a case, if the IP does not exist, we will return NULL
+ // indicating that we couldnt find the Watson bucket tracker, since returning a tracker
+ // that does not have any bucketing details will be of no use to the caller.
+ if (pWBTracker->RetrieveWatsonBucketIp() != NULL)
+ {
+ // Check if the buckets exist or not..
+ PTR_VOID pBuckets = pWBTracker->RetrieveWatsonBuckets();
+
+ // If they dont exist and we have been asked to collect them,
+ // then do so.
+ if (pBuckets == NULL)
+ {
+ if (fCaptureBucketsIfNotPresent)
+ {
+ pWBTracker->CaptureUnhandledInfoForWatson(TypeOfReportedError::UnhandledException, GetThread(), &gc.oPreAllocThrowable);
+
+ // Check if we have the buckets now
+ if (pWBTracker->RetrieveWatsonBuckets() != NULL)
+ {
+ LOG((LF_EH, LL_INFO100, "GetWatsonBucketTrackerForPreallocatedException - Captured watson buckets for preallocated exception object.\n"));
+ }
+ else
+ {
+ LOG((LF_EH, LL_INFO100, "GetWatsonBucketTrackerForPreallocatedException - Unable to capture watson buckets for preallocated exception object due to OOM.\n"));
+ }
+ }
+ else
+ {
+ LOG((LF_EH, LL_INFO100, "GetWatsonBucketTrackerForPreallocatedException - Found IP but no buckets for preallocated exception object.\n"));
+ }
+ }
+ else
+ {
+ LOG((LF_EH, LL_INFO100, "GetWatsonBucketTrackerForPreallocatedException - Buckets already exist for preallocated exception object.\n"));
+ }
+ }
+ else
+ {
+ LOG((LF_EH, LL_INFO100, "GetWatsonBucketTrackerForPreallocatedException - Returning NULL EHWatsonBucketTracker since bucketing IP does not exist. This is likely due to an earlier OOM.\n"));
+ pWBTracker = NULL;
+ }
+
+done:;
+
+ GCPROTECT_END();
+
+ // Return the Watson bucket tracker
+ return pWBTracker;
+#else // DACCESS_COMPILE
+ return NULL;
+#endif // !DACCESS_COMPILE
+}
+
+// Given an exception object, this function will attempt to look up
+// the watson buckets for it and set them up against the thread
+// for use by FailFast mechanism.
+// Return TRUE when it succeeds or Waston is disabled on CoreCLR
+// Return FALSE when refException neither has buckets nor has inner exception
+BOOL SetupWatsonBucketsForFailFast(EXCEPTIONREF refException)
+{
+ BOOL fResult = TRUE;
+
+#ifndef DACCESS_COMPILE
+#ifdef FEATURE_CORECLR
+ // On CoreCLR, Watson may not be enabled. Thus, we should
+ // skip this.
+ if (!IsWatsonEnabled())
+ {
+ return fResult;
+ }
+#endif // FEATURE_CORECLR
+
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_ANY;
+ NOTHROW;
+ PRECONDITION(GetThread() != NULL);
+ PRECONDITION(refException != NULL);
+ PRECONDITION(IsWatsonEnabled());
+ }
+ CONTRACTL_END;
+
+ // Switch to COOP mode
+ GCX_COOP();
+
+ struct
+ {
+ OBJECTREF refException;
+ OBJECTREF oInnerMostExceptionThrowable;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ GCPROTECT_BEGIN(gc);
+ gc.refException = refException;
+
+ Thread *pThread = GetThread();
+
+ // If we dont already have the bucketing details for the exception
+ // being thrown, then get them.
+ ThreadExceptionState *pExState = pThread->GetExceptionState();
+
+ // Check if the exception object is preallocated or not
+ BOOL fIsPreallocatedException = CLRException::IsPreallocatedExceptionObject(gc.refException);
+
+ // Get the WatsonBucketTracker where bucketing details will be copied to
+ PTR_EHWatsonBucketTracker pUEWatsonBucketTracker = pExState->GetUEWatsonBucketTracker();
+
+ // Check if this is a thread abort exception of any kind.
+ // See IsThrowableThreadAbortException implementation for details.
+ BOOL fIsThreadAbortException = IsThrowableThreadAbortException(gc.refException);
+
+ if (fIsPreallocatedException)
+ {
+ // If the exception being used to FailFast is preallocated,
+ // then it cannot have any inner exception. Thus, try to
+ // find the watson bucket tracker corresponding to this exception.
+ //
+ // Also, capture the buckets if we dont have them already.
+ PTR_EHWatsonBucketTracker pTargetWatsonBucketTracker = GetWatsonBucketTrackerForPreallocatedException(gc.refException, TRUE);
+ if ((pTargetWatsonBucketTracker != NULL) && (!fIsThreadAbortException))
+ {
+ // Buckets are not captured proactively for preallocated exception objects. We only
+ // save the IP in the watson bucket tracker (see SetupInitialThrowBucketingDetails for
+ // details).
+ //
+ // Thus, if, say in DefDomain, a preallocated exception is thrown and we enter
+ // the catch block and invoke the FailFast API with the reference to the preallocated
+ // exception object, we will have the IP but not the buckets. In such a case,
+ // capture the buckets before proceeding ahead.
+ if (pTargetWatsonBucketTracker->RetrieveWatsonBuckets() == NULL)
+ {
+ LOG((LF_EH, LL_INFO100, "SetupWatsonBucketsForFailFast - Collecting watson bucket details for preallocated exception.\n"));
+ pTargetWatsonBucketTracker->CaptureUnhandledInfoForWatson(TypeOfReportedError::UnhandledException, pThread, &gc.refException);
+ }
+
+ // Copy the buckets to the UE tracker
+ pUEWatsonBucketTracker->ClearWatsonBucketDetails();
+ pUEWatsonBucketTracker->CopyEHWatsonBucketTracker(*pTargetWatsonBucketTracker);
+ if (pUEWatsonBucketTracker->RetrieveWatsonBuckets() != NULL)
+ {
+ LOG((LF_EH, LL_INFO100, "SetupWatsonBucketsForFailFast - Collected watson bucket details for preallocated exception in UE tracker.\n"));
+ }
+ else
+ {
+ // If we are here, then the copy operation above had an OOM, resulting
+ // in no buckets for us.
+ LOG((LF_EH, LL_INFO100, "SetupWatsonBucketsForFailFast - Unable to collect watson bucket details for preallocated exception due to out of memory.\n"));
+
+ // Make sure the tracker is clean.
+ pUEWatsonBucketTracker->ClearWatsonBucketDetails();
+ }
+ }
+ else
+ {
+ // For TAE, UE watson bucket tracker is the one that tracks the buckets. It *may*
+ // not have the bucket details if FailFast is being invoked from outside the
+ // managed EH clauses. But if invoked from within the active EH clause for the exception,
+ // UETracker will have the bucketing details (see SetupInitialThrowBucketingDetails for details).
+ if (fIsThreadAbortException && (pUEWatsonBucketTracker->RetrieveWatsonBuckets() != NULL))
+ {
+ _ASSERTE(pTargetWatsonBucketTracker == pUEWatsonBucketTracker);
+ LOG((LF_EH, LL_INFO100, "SetupWatsonBucketsForFailFast - UE tracker already watson bucket details for preallocated thread abort exception.\n"));
+ }
+ else
+ {
+ LOG((LF_EH, LL_INFO100, "SetupWatsonBucketsForFailFast - Unable to find bucket details for preallocated %s exception.\n",
+ fIsThreadAbortException?"rude/thread abort":""));
+
+ // Make sure the tracker is clean.
+ pUEWatsonBucketTracker->ClearWatsonBucketDetails();
+ }
+ }
+ }
+ else
+ {
+ // Since the exception object is not preallocated, start by assuming
+ // that we dont need to check it for watson buckets
+ BOOL fCheckThrowableForWatsonBuckets = FALSE;
+
+ // Get the innermost exception object (if any)
+ gc.oInnerMostExceptionThrowable = ((EXCEPTIONREF)gc.refException)->GetBaseException();
+ if (gc.oInnerMostExceptionThrowable != NULL)
+ {
+ if (CLRException::IsPreallocatedExceptionObject(gc.oInnerMostExceptionThrowable))
+ {
+ // If the inner most exception being used to FailFast is preallocated,
+ // try to find the watson bucket tracker corresponding to it.
+ //
+ // Also, capture the buckets if we dont have them already.
+ PTR_EHWatsonBucketTracker pTargetWatsonBucketTracker =
+ GetWatsonBucketTrackerForPreallocatedException(gc.oInnerMostExceptionThrowable, TRUE);
+
+ if (pTargetWatsonBucketTracker != NULL)
+ {
+ if (pTargetWatsonBucketTracker->RetrieveWatsonBuckets() == NULL)
+ {
+ LOG((LF_EH, LL_INFO1000, "SetupWatsonBucketsForFailFast - Capturing Watson bucket details for preallocated inner exception.\n"));
+ pTargetWatsonBucketTracker->CaptureUnhandledInfoForWatson(TypeOfReportedError::UnhandledException, pThread, &gc.oInnerMostExceptionThrowable);
+ }
+
+ // Copy the details to the UE tracker
+ pUEWatsonBucketTracker->ClearWatsonBucketDetails();
+ pUEWatsonBucketTracker->CopyEHWatsonBucketTracker(*pTargetWatsonBucketTracker);
+ if (pUEWatsonBucketTracker->RetrieveWatsonBuckets() != NULL)
+ {
+ LOG((LF_EH, LL_INFO1000, "SetupWatsonBucketsForFailFast - Watson bucket details collected for preallocated inner exception.\n"));
+ }
+ else
+ {
+ // If we are here, copy operation failed likely due to OOM
+ LOG((LF_EH, LL_INFO1000, "SetupWatsonBucketsForFailFast - Unable to copy watson bucket details for preallocated inner exception.\n"));
+
+ // Keep the UETracker clean
+ pUEWatsonBucketTracker->ClearWatsonBucketDetails();
+ }
+ }
+ else
+ {
+ LOG((LF_EH, LL_INFO1000, "SetupWatsonBucketsForFailFast - Unable to find bucket details for preallocated inner exception.\n"));
+
+ // Keep the UETracker clean
+ pUEWatsonBucketTracker->ClearWatsonBucketDetails();
+
+ // Since we couldnt find the watson bucket tracker for the the inner most exception,
+ // try to look for the buckets in the throwable.
+ fCheckThrowableForWatsonBuckets = TRUE;
+ }
+ }
+ else
+ {
+ // Inner most exception is not preallocated.
+ //
+ // If it has the IP but not the buckets, then capture them now.
+ if ((((EXCEPTIONREF)gc.oInnerMostExceptionThrowable)->AreWatsonBucketsPresent() == FALSE) &&
+ (((EXCEPTIONREF)gc.oInnerMostExceptionThrowable)->IsIPForWatsonBucketsPresent()))
+ {
+ SetupWatsonBucketsForNonPreallocatedExceptions(gc.oInnerMostExceptionThrowable);
+ }
+
+ // If it has the buckets, copy them over to the current Watson bucket tracker
+ if (((EXCEPTIONREF)gc.oInnerMostExceptionThrowable)->AreWatsonBucketsPresent())
+ {
+ pUEWatsonBucketTracker->ClearWatsonBucketDetails();
+ pUEWatsonBucketTracker->CopyBucketsFromThrowable(gc.oInnerMostExceptionThrowable);
+ if (pUEWatsonBucketTracker->RetrieveWatsonBuckets() != NULL)
+ {
+ LOG((LF_EH, LL_INFO1000, "SetupWatsonBucketsForFailFast - Got watson buckets from regular innermost exception.\n"));
+ }
+ else
+ {
+ // Copy operation can fail due to OOM
+ LOG((LF_EH, LL_INFO1000, "SetupWatsonBucketsForFailFast - Unable to copy watson buckets from regular innermost exception, likely due to OOM.\n"));
+ }
+ }
+ else
+ {
+ // Since the inner most exception didnt have the buckets,
+ // try to look for them in the throwable
+ fCheckThrowableForWatsonBuckets = TRUE;
+ LOG((LF_EH, LL_INFO1000, "SetupWatsonBucketsForFailFast - Neither exception object nor its inner exception has watson buckets.\n"));
+ }
+ }
+ }
+ else
+ {
+ // There is no innermost exception - try to look for buckets
+ // in the throwable
+ fCheckThrowableForWatsonBuckets = TRUE;
+ LOG((LF_EH, LL_INFO1000, "SetupWatsonBucketsForFailFast - Innermost exception does not exist\n"));
+ }
+
+ if (fCheckThrowableForWatsonBuckets)
+ {
+ // Since we have not found buckets anywhere, try to look for them
+ // in the throwable.
+ if ((((EXCEPTIONREF)gc.refException)->AreWatsonBucketsPresent() == FALSE) &&
+ (((EXCEPTIONREF)gc.refException)->IsIPForWatsonBucketsPresent()))
+ {
+ // Capture the buckets from the IP.
+ SetupWatsonBucketsForNonPreallocatedExceptions(gc.refException);
+ }
+
+ if (((EXCEPTIONREF)gc.refException)->AreWatsonBucketsPresent())
+ {
+ // Copy the buckets to the current watson bucket tracker
+ pUEWatsonBucketTracker->ClearWatsonBucketDetails();
+ pUEWatsonBucketTracker->CopyBucketsFromThrowable(gc.refException);
+ if (pUEWatsonBucketTracker->RetrieveWatsonBuckets() != NULL)
+ {
+ LOG((LF_EH, LL_INFO1000, "SetupWatsonBucketsForFailFast - Watson buckets copied from the exception object.\n"));
+ }
+ else
+ {
+ LOG((LF_EH, LL_INFO1000, "SetupWatsonBucketsForFailFast - Unable to copy Watson buckets copied from the exception object, likely due to OOM.\n"));
+ }
+ }
+ else
+ {
+ fResult = FALSE;
+ LOG((LF_EH, LL_INFO1000, "SetupWatsonBucketsForFailFast - Exception object neither has buckets nor has inner exception.\n"));
+ }
+ }
+ }
+
+ GCPROTECT_END();
+
+#endif // !DACCESS_COMPILE
+
+ return fResult;
+}
+
+// This function will setup the bucketing details in the exception
+// tracker or the throwable, if they are not already setup.
+//
+// This is called when an exception is thrown (or raised):
+//
+// 1) from outside the confines of managed EH clauses, OR
+// 2) from within the confines of managed EH clauses but the
+// exception does not have bucketing details with it, OR
+// 3) When an exception is reraised at AD transition boundary
+// after it has been marshalled over to the returning AD.
+void SetupInitialThrowBucketDetails(UINT_PTR adjustedIp)
+{
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_CORECLR
+ // On CoreCLR, Watson may not be enabled. Thus, we should
+ // skip this.
+ if (!IsWatsonEnabled())
+ {
+ return;
+ }
+#endif // FEATURE_CORECLR
+
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_ANY;
+ NOTHROW;
+ PRECONDITION(GetThread() != NULL);
+ PRECONDITION(!(GetThread()->GetExceptionState()->GetFlags()->GotWatsonBucketDetails()));
+ PRECONDITION(adjustedIp != NULL);
+ PRECONDITION(IsWatsonEnabled());
+ }
+ CONTRACTL_END;
+
+ Thread *pThread = GetThread();
+
+ // If we dont already have the bucketing details for the exception
+ // being thrown, then get them.
+ ThreadExceptionState *pExState = pThread->GetExceptionState();
+
+ // Ensure that the exception tracker exists
+ _ASSERTE(pExState->GetCurrentExceptionTracker() != NULL);
+
+ // Switch to COOP mode
+ GCX_COOP();
+
+ // Get the throwable for the exception being thrown
+ struct
+ {
+ OBJECTREF oCurrentThrowable;
+ OBJECTREF oInnerMostExceptionThrowable;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.oCurrentThrowable = pExState->GetThrowable();
+
+ // Check if the exception object is preallocated or not
+ BOOL fIsPreallocatedException = CLRException::IsPreallocatedExceptionObject(gc.oCurrentThrowable);
+
+ // Get the WatsonBucketTracker for the current exception
+ PTR_EHWatsonBucketTracker pWatsonBucketTracker = pExState->GetCurrentExceptionTracker()->GetWatsonBucketTracker();
+
+ // Get the innermost exception object (if any)
+ gc.oInnerMostExceptionThrowable = ((EXCEPTIONREF)gc.oCurrentThrowable)->GetBaseException();
+
+ // By default, assume that no watson bucketing details are available and inner exception
+ // is not preallocated
+ BOOL fAreBucketingDetailsPresent = FALSE;
+ BOOL fIsInnerExceptionPreallocated = FALSE;
+
+ // Check if this is a thread abort exception of any kind. See IsThrowableThreadAbortException implementation for details.
+ // We shouldnt use the thread state as well to determine if it is a TAE since, in cases like throwing a cached exception
+ // as part of type initialization failure, we could throw a TAE but the thread will not be in abort state (which is expected).
+ BOOL fIsThreadAbortException = IsThrowableThreadAbortException(gc.oCurrentThrowable);
+
+ // If we are here, then this was a new exception raised
+ // from outside the managed EH clauses (fault/finally/catch).
+ //
+ // The throwable *may* have the bucketing details already
+ // if this exception was raised when it was crossing over
+ // an AD transition boundary. Those are stored in UE watson bucket
+ // tracker by AppDomainTransitionExceptionFilter.
+ if (fIsPreallocatedException)
+ {
+ PTR_EHWatsonBucketTracker pUEWatsonBucketTracker = pExState->GetUEWatsonBucketTracker();
+ fAreBucketingDetailsPresent = ((pUEWatsonBucketTracker->RetrieveWatsonBucketIp() != NULL) &&
+ (pUEWatsonBucketTracker->RetrieveWatsonBuckets() != NULL));
+
+ // If they are present, copy them over to the watson tracker for the exception
+ // being processed.
+ if (fAreBucketingDetailsPresent)
+ {
+#ifdef _DEBUG
+ // Under OOM scenarios, its possible that when we are raising a threadabort,
+ // the throwable may get converted to preallocated OOM object when RaiseTheExceptionInternalOnly
+ // invokes Thread::SafeSetLastThrownObject. We check if this is the current case and use it in
+ // our validation below.
+ BOOL fIsPreallocatedOOMExceptionForTA = FALSE;
+ if ((!fIsThreadAbortException) && pUEWatsonBucketTracker->CapturedForThreadAbort())
+ {
+ fIsPreallocatedOOMExceptionForTA = (gc.oCurrentThrowable == CLRException::GetPreallocatedOutOfMemoryException());
+ if (fIsPreallocatedOOMExceptionForTA)
+ {
+ LOG((LF_EH, LL_INFO100, "SetupInitialThrowBucketDetails - Got preallocated OOM throwable for buckets captured for thread abort.\n"));
+ }
+ }
+#endif // _DEBUG
+ // These should have been captured at AD transition OR
+ // could be bucketing details of preallocated [rude] thread abort exception.
+ _ASSERTE(pUEWatsonBucketTracker->CapturedAtADTransition() ||
+ ((fIsThreadAbortException || fIsPreallocatedOOMExceptionForTA) && pUEWatsonBucketTracker->CapturedForThreadAbort()));
+
+ if (!fIsThreadAbortException)
+ {
+ // The watson bucket tracker for the exceptiong being raised should be empty at this point
+ // since we are here because of a cross AD reraise of the original exception.
+ _ASSERTE((pWatsonBucketTracker->RetrieveWatsonBucketIp() == NULL) && (pWatsonBucketTracker->RetrieveWatsonBuckets() == NULL));
+
+ // Copy the buckets over to it
+ pWatsonBucketTracker->CopyEHWatsonBucketTracker(*(pUEWatsonBucketTracker));
+ if (pWatsonBucketTracker->RetrieveWatsonBuckets() == NULL)
+ {
+ // If we dont have buckets after the copy operation, its due to us running out of
+ // memory.
+ LOG((LF_EH, LL_INFO100, "SetupInitialThrowBucketDetails - Unable to copy watson buckets from cross AD rethrow, likely due to out of memory.\n"));
+ }
+ else
+ {
+ LOG((LF_EH, LL_INFO100, "SetupInitialThrowBucketDetails - Copied watson buckets from cross AD rethrow.\n"));
+ }
+ }
+ else
+ {
+ // Thread abort watson bucket details are already present in the
+ // UE watson bucket tracker.
+ LOG((LF_EH, LL_INFO100, "SetupInitialThrowBucketDetails - Already have watson buckets for preallocated thread abort reraise.\n"));
+ }
+ }
+ else if (fIsThreadAbortException)
+ {
+ // This is a preallocated thread abort exception.
+ UINT_PTR ip = pUEWatsonBucketTracker->RetrieveWatsonBucketIp();
+ if (ip != NULL)
+ {
+ // Since we have the IP, assert that this was the one setup
+ // for ThreadAbort. This is for the reraise scenario where
+ // the original exception was non-preallocated TA but the
+ // reraise resulted in preallocated TA.
+ //
+ // In this case, we will update the ip to be used as the
+ // one we have. The control flow below will automatically
+ // endup using it.
+ _ASSERTE(pUEWatsonBucketTracker->CapturedForThreadAbort());
+ adjustedIp = ip;
+ LOG((LF_EH, LL_INFO100, "SetupInitialThrowBucketDetails - Setting an existing IP (%p) to be used for capturing buckets for preallocated thread abort.\n", ip));
+ goto phase1;
+ }
+ }
+
+ if (!fAreBucketingDetailsPresent || !fIsThreadAbortException)
+ {
+ // Clear the UE Watson bucket tracker so that its usable
+ // in future. We dont clear this for ThreadAbort since
+ // the UE watson bucket tracker carries bucketing details
+ // for the same, unless the UE tracker is not containing them
+ // already.
+ pUEWatsonBucketTracker->ClearWatsonBucketDetails();
+ }
+ }
+ else
+ {
+ // The exception object is not preallocated
+ fAreBucketingDetailsPresent = ((EXCEPTIONREF)gc.oCurrentThrowable)->AreWatsonBucketsPresent();
+ if (!fAreBucketingDetailsPresent)
+ {
+ // If buckets are not present, check if the bucketing IP is present.
+ fAreBucketingDetailsPresent = ((EXCEPTIONREF)gc.oCurrentThrowable)->IsIPForWatsonBucketsPresent();
+ }
+
+ // If throwable does not have buckets and this is a thread abort exception,
+ // then this maybe a reraise of the original thread abort.
+ //
+ // We can also be here if an exception was caught at AppDomain transition and
+ // in the returning domain, a non-preallocated TAE was raised. In such a case,
+ // the UE tracker flags could indicate the exception is from AD transition.
+ // This is similar to preallocated case above.
+ //
+ // Check the UE Watson bucket tracker if it has the buckets and if it does,
+ // copy them over to the current throwable.
+ if (!fAreBucketingDetailsPresent && fIsThreadAbortException)
+ {
+ PTR_EHWatsonBucketTracker pUEWatsonBucketTracker = pExState->GetUEWatsonBucketTracker();
+ UINT_PTR ip = pUEWatsonBucketTracker->RetrieveWatsonBucketIp();
+ if (ip != NULL)
+ {
+ // Confirm that we had the buckets captured for thread abort
+ _ASSERTE(pUEWatsonBucketTracker->CapturedForThreadAbort() || pUEWatsonBucketTracker->CapturedAtADTransition());
+
+ if (pUEWatsonBucketTracker->RetrieveWatsonBuckets() != NULL)
+ {
+ // Copy the buckets to the current throwable - CopyWatsonBucketsToThrowable
+ // can throw in OOM. However, since the current function is called as part of
+ // setting up the stack trace, where we bail out incase of OOM, we will do
+ // no different here as well.
+ BOOL fCopiedBuckets = TRUE;
+ EX_TRY
+ {
+ CopyWatsonBucketsToThrowable(pUEWatsonBucketTracker->RetrieveWatsonBuckets());
+ _ASSERTE(((EXCEPTIONREF)gc.oCurrentThrowable)->AreWatsonBucketsPresent());
+ }
+ EX_CATCH
+ {
+ fCopiedBuckets = FALSE;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (fCopiedBuckets)
+ {
+ // Since the throwable has the buckets, set the flag that indicates so
+ fAreBucketingDetailsPresent = TRUE;
+ LOG((LF_EH, LL_INFO100, "SetupInitialThrowBucketDetails - Setup watson buckets for thread abort reraise.\n"));
+ }
+ }
+ else
+ {
+ // Copy the faulting IP from the UE tracker to the exception object. This was setup in COMPlusCheckForAbort
+ // for non-preallocated exceptions.
+ ((EXCEPTIONREF)gc.oCurrentThrowable)->SetIPForWatsonBuckets(ip);
+ fAreBucketingDetailsPresent = TRUE;
+ LOG((LF_EH, LL_INFO100, "SetupInitialThrowBucketDetails - Setup watson bucket IP for thread abort reraise.\n"));
+ }
+ }
+ else
+ {
+ // Clear the UE Watson bucket tracker so that its usable
+ // in future.
+ pUEWatsonBucketTracker->ClearWatsonBucketDetails();
+ LOG((LF_EH, LL_INFO100, "SetupInitialThrowBucketDetails - Didnt find watson buckets for thread abort - likely being raised.\n"));
+ }
+ }
+ }
+
+phase1:
+ if (fAreBucketingDetailsPresent)
+ {
+ // Since we already have the buckets, simply bail out
+ LOG((LF_EH, LL_INFO100, "SetupInitialThrowBucketDetails - Already had watson ip/buckets.\n"));
+ goto done;
+ }
+
+ // Check if an inner most exception exists and if it does, examine
+ // it for watson bucketing details.
+ if (gc.oInnerMostExceptionThrowable != NULL)
+ {
+ // Preallocated exception objects do not have inner exception objects.
+ // Thus, if we are here, then the current throwable cannot be
+ // a preallocated exception object.
+ _ASSERTE(!fIsPreallocatedException);
+
+ fIsInnerExceptionPreallocated = CLRException::IsPreallocatedExceptionObject(gc.oInnerMostExceptionThrowable);
+
+ // If we are here, then this was a "throw" with inner exception
+ // outside of any managed EH clauses.
+ //
+ // If the inner exception object is preallocated, then we will need to create the
+ // watson buckets since we are outside the managed EH clauses with no exception tracking
+ // information relating to the inner exception.
+ //
+ // But if the inner exception object was not preallocated, create new watson buckets
+ // only if inner exception does not have them.
+ if (fIsInnerExceptionPreallocated)
+ {
+ fAreBucketingDetailsPresent = FALSE;
+ }
+ else
+ {
+ // Do we have either the IP for Watson buckets or the buckets themselves?
+ fAreBucketingDetailsPresent = (((EXCEPTIONREF)gc.oInnerMostExceptionThrowable)->AreWatsonBucketsPresent() ||
+ ((EXCEPTIONREF)gc.oInnerMostExceptionThrowable)->IsIPForWatsonBucketsPresent());
+ }
+ }
+
+ if (!fAreBucketingDetailsPresent)
+ {
+ // Collect the bucketing details since they are not already present
+ pWatsonBucketTracker->SaveIpForWatsonBucket(adjustedIp);
+
+ if (!fIsPreallocatedException || fIsThreadAbortException)
+ {
+ if (!fIsPreallocatedException)
+ {
+ // Save the IP for Watson bucketing in the exception object for non-preallocated exception
+ // objects
+ ((EXCEPTIONREF)gc.oCurrentThrowable)->SetIPForWatsonBuckets(adjustedIp);
+
+ // Save the IP in the UE tracker as well for TAE if an abort is in progress
+ // since when we attempt reraise, the exception object is not available. Otherwise,
+ // treat the exception like a regular non-preallocated exception and not do anything else.
+ if (fIsThreadAbortException && pThread->IsAbortInitiated())
+ {
+ PTR_EHWatsonBucketTracker pUEWatsonBucketTracker = pExState->GetUEWatsonBucketTracker();
+
+ pUEWatsonBucketTracker->ClearWatsonBucketDetails();
+ pUEWatsonBucketTracker->SaveIpForWatsonBucket(adjustedIp);
+
+ // Set the flag that we captured the IP for Thread abort
+ DEBUG_STMT(pUEWatsonBucketTracker->SetCapturedForThreadAbort());
+ LOG((LF_EH, LL_INFO100, "SetupInitialThrowBucketDetails - Saved bucket IP for initial thread abort raise.\n"));
+ }
+ }
+ else
+ {
+ // Create the buckets proactively for preallocated threadabort exception
+ pWatsonBucketTracker->CaptureUnhandledInfoForWatson(TypeOfReportedError::UnhandledException, pThread, &gc.oCurrentThrowable);
+ PTR_VOID pUnmanagedBuckets = pWatsonBucketTracker->RetrieveWatsonBuckets();
+ if(pUnmanagedBuckets != NULL)
+ {
+ // Copy the details over to the UE Watson bucket tracker so that we can use them if the exception
+ // is "reraised" after invoking the catch block.
+ //
+ // Since we can be here for preallocated threadabort exception when UE Tracker is simply
+ // carrying the IP (that has been copied to pWatsonBucketTracker and buckets captured for it),
+ // we will need to clear UE tracker so that we can copy over the captured buckets.
+ PTR_EHWatsonBucketTracker pUEWatsonBucketTracker = pExState->GetUEWatsonBucketTracker();
+ pUEWatsonBucketTracker->ClearWatsonBucketDetails();
+
+ // Copy over the buckets from the current tracker that captured them.
+ pUEWatsonBucketTracker->CopyEHWatsonBucketTracker(*(pWatsonBucketTracker));
+
+ // Buckets should be present now (unless the copy operation had an OOM)
+ if (pUEWatsonBucketTracker->RetrieveWatsonBuckets() != NULL)
+ {
+ // Set the flag that we captured buckets for Thread abort
+ DEBUG_STMT(pUEWatsonBucketTracker->SetCapturedForThreadAbort());
+ LOG((LF_EH, LL_INFO100, "SetupInitialThrowBucketDetails - Saved buckets for Watson Bucketing for initial thread abort raise.\n"));
+ }
+ else
+ {
+ // If we are here, then the bucket copy operation (above) failed due to OOM.
+ LOG((LF_EH, LL_INFO100, "SetupInitialThrowBucketDetails - Unable to save buckets for Watson Bucketing for initial thread abort raise, likely due to OOM.\n"));
+ pUEWatsonBucketTracker->ClearWatsonBucketDetails();
+ }
+ }
+ else
+ {
+ // Watson helper function can bail out on us under OOM scenarios and return a NULL.
+ // We cannot do much in such a case.
+ LOG((LF_EH, LL_INFO100, "SetupInitialThrowBucketDetails - No buckets were captured and returned to us for initial thread abort raise. Likely encountered an OOM.\n"));
+ }
+
+ // Clear the buckets since we no longer need them
+ pWatsonBucketTracker->ClearWatsonBucketDetails();
+ }
+ }
+ else
+ {
+ // We have already saved the throw site IP for bucketing the non-ThreadAbort preallocated exceptions
+ LOG((LF_EH, LL_INFO100, "SetupInitialThrowBucketDetails - Saved IP (%p) for Watson Bucketing for a preallocated exception\n", adjustedIp));
+ }
+ }
+ else
+ {
+ // The inner exception object should be having either the IP for watson bucketing or the buckets themselves.
+ // We shall copy over, whatever is available, to the current exception object.
+ _ASSERTE(gc.oInnerMostExceptionThrowable != NULL);
+ _ASSERTE(((EXCEPTIONREF)gc.oInnerMostExceptionThrowable)->AreWatsonBucketsPresent() ||
+ ((EXCEPTIONREF)gc.oInnerMostExceptionThrowable)->IsIPForWatsonBucketsPresent());
+
+ if (((EXCEPTIONREF)gc.oInnerMostExceptionThrowable)->AreWatsonBucketsPresent())
+ {
+ EX_TRY
+ {
+ // Copy the bucket details from innermost exception to the current exception object.
+ CopyWatsonBucketsFromThrowableToCurrentThrowable(gc.oInnerMostExceptionThrowable);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ LOG((LF_EH, LL_INFO100, "SetupInitialThrowBucketDetails - Copied watson bucket details from the innermost exception\n"));
+ }
+ else
+ {
+ // Copy the IP to the current exception object
+ ((EXCEPTIONREF)gc.oCurrentThrowable)->SetIPForWatsonBuckets(((EXCEPTIONREF)gc.oInnerMostExceptionThrowable)->GetIPForWatsonBuckets());
+ LOG((LF_EH, LL_INFO100, "SetupInitialThrowBucketDetails - Copied watson bucket IP from the innermost exception\n"));
+ }
+ }
+
+done:
+ // Set the flag that we have got the bucketing details
+ pExState->GetFlags()->SetGotWatsonBucketDetails();
+
+ GCPROTECT_END();
+
+#endif // !DACCESS_COMPILE
+}
+
+// This function is a wrapper to copy the watson bucket byte[] from the specified
+// throwable to the current throwable.
+void CopyWatsonBucketsFromThrowableToCurrentThrowable(OBJECTREF oThrowableFrom)
+{
+#ifndef DACCESS_COMPILE
+
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ PRECONDITION(oThrowableFrom != NULL);
+ PRECONDITION(!CLRException::IsPreallocatedExceptionObject(oThrowableFrom));
+ PRECONDITION(((EXCEPTIONREF)oThrowableFrom)->AreWatsonBucketsPresent());
+ PRECONDITION(IsWatsonEnabled());
+ }
+ CONTRACTL_END;
+
+ struct
+ {
+ OBJECTREF oThrowableFrom;
+ } _gc;
+
+ ZeroMemory(&_gc, sizeof(_gc));
+ GCPROTECT_BEGIN(_gc);
+ _gc.oThrowableFrom = oThrowableFrom;
+
+ // Copy the watson buckets to the current throwable by NOT passing
+ // the second argument that will default to NULL.
+ //
+ // CopyWatsonBucketsBetweenThrowables will pass that NULL to
+ // CopyWatsonBucketsToThrowables that will make it copy the buckets
+ // to the current throwable.
+ CopyWatsonBucketsBetweenThrowables(_gc.oThrowableFrom);
+
+ GCPROTECT_END();
+
+#endif // !DACCESS_COMPILE
+}
+
+// This function will copy the watson bucket byte[] from the source
+// throwable to the destination throwable.
+//
+// If the destination throwable is NULL, it will result in the buckets
+// being copied to the current throwable.
+void CopyWatsonBucketsBetweenThrowables(OBJECTREF oThrowableFrom, OBJECTREF oThrowableTo /*=NULL*/)
+{
+#ifndef DACCESS_COMPILE
+
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ PRECONDITION(oThrowableFrom != NULL);
+ PRECONDITION(!CLRException::IsPreallocatedExceptionObject(oThrowableFrom));
+ PRECONDITION(((EXCEPTIONREF)oThrowableFrom)->AreWatsonBucketsPresent());
+ PRECONDITION(IsWatsonEnabled());
+ }
+ CONTRACTL_END;
+
+ BOOL fRetVal = FALSE;
+
+ struct
+ {
+ OBJECTREF oFrom;
+ OBJECTREF oTo;
+ OBJECTREF oWatsonBuckets;
+ } _gc;
+
+ ZeroMemory(&_gc, sizeof(_gc));
+ GCPROTECT_BEGIN(_gc);
+
+ _gc.oFrom = oThrowableFrom;
+ _gc.oTo = (oThrowableTo == NULL)?GetThread()->GetThrowable():oThrowableTo;
+ _ASSERTE(_gc.oTo != NULL);
+
+ // The target throwable to which Watson buckets are going to be copied
+ // shouldnt be preallocated exception object.
+ _ASSERTE(!CLRException::IsPreallocatedExceptionObject(_gc.oTo));
+
+ // Size of a watson bucket
+ DWORD size = sizeof(GenericModeBlock);
+
+ // Create the managed byte[] to hold the bucket details
+ _gc.oWatsonBuckets = AllocatePrimitiveArray(ELEMENT_TYPE_U1, size);
+ if (_gc.oWatsonBuckets == NULL)
+ {
+ // return failure if failed to create bucket array
+ fRetVal = FALSE;
+ }
+ else
+ {
+ // Get the raw array data pointer of the source array
+ U1ARRAYREF refSourceWatsonBucketArray = ((EXCEPTIONREF)_gc.oFrom)->GetWatsonBucketReference();
+ PTR_VOID pRawSourceWatsonBucketArray = dac_cast<PTR_VOID>(refSourceWatsonBucketArray->GetDataPtr());
+
+ // Get the raw array data pointer to the destination array
+ U1ARRAYREF refDestWatsonBucketArray = (U1ARRAYREF)_gc.oWatsonBuckets;
+ PTR_VOID pRawDestWatsonBucketArray = dac_cast<PTR_VOID>(refDestWatsonBucketArray->GetDataPtr());
+
+ // Deep copy the bucket information to the managed array
+ memcpyNoGCRefs(pRawDestWatsonBucketArray, pRawSourceWatsonBucketArray, size);
+
+ // Setup the managed field reference to point to the byte array.
+ //
+ // The throwable, to which the buckets are being copied to, may be
+ // having existing buckets (e.g. when TypeInitialization exception
+ // maybe thrown again when attempt is made to load the originally
+ // failed type).
+ //
+ // This is also possible if exception object is used as singleton
+ // and thrown by multiple threads.
+ if (((EXCEPTIONREF)_gc.oTo)->AreWatsonBucketsPresent())
+ {
+ LOG((LF_EH, LL_INFO1000, "CopyWatsonBucketsBetweenThrowables: Throwable (%p) being copied to had previous buckets.\n", OBJECTREFToObject(_gc.oTo)));
+ }
+
+ ((EXCEPTIONREF)_gc.oTo)->SetWatsonBucketReference(_gc.oWatsonBuckets);
+
+ fRetVal = TRUE;
+ }
+
+ // We shouldn't be here when fRetVal is FALSE since failure to allocate the primitive
+ // array should throw an OOM.
+ _ASSERTE(fRetVal);
+
+ GCPROTECT_END();
+#endif // !DACCESS_COMPILE
+}
+
+// This function will copy the watson bucket information to the managed byte[] in
+// the specified managed exception object.
+//
+// If throwable is not specified, it will be copied to the current throwable.
+//
+// pUnmanagedBuckets is a pointer to native memory that cannot be affected by GC.
+BOOL CopyWatsonBucketsToThrowable(PTR_VOID pUnmanagedBuckets, OBJECTREF oTargetThrowable /*= NULL*/)
+{
+#ifndef DACCESS_COMPILE
+
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ PRECONDITION(GetThread() != NULL);
+ PRECONDITION(pUnmanagedBuckets != NULL);
+ PRECONDITION(!CLRException::IsPreallocatedExceptionObject((oTargetThrowable == NULL)?GetThread()->GetThrowable():oTargetThrowable));
+ PRECONDITION(IsWatsonEnabled());
+ }
+ CONTRACTL_END;
+
+ BOOL fRetVal = TRUE;
+ struct
+ {
+ OBJECTREF oThrowable;
+ OBJECTREF oWatsonBuckets;
+ } _gc;
+
+ ZeroMemory(&_gc, sizeof(_gc));
+ GCPROTECT_BEGIN(_gc);
+ _gc.oThrowable = (oTargetThrowable == NULL)?GetThread()->GetThrowable():oTargetThrowable;
+
+ // Throwable to which buckets should be copied to, must exist.
+ _ASSERTE(_gc.oThrowable != NULL);
+
+ // Size of a watson bucket
+ DWORD size = sizeof(GenericModeBlock);
+
+ _gc.oWatsonBuckets = AllocatePrimitiveArray(ELEMENT_TYPE_U1, size);
+ if (_gc.oWatsonBuckets == NULL)
+ {
+ // return failure if failed to create bucket array
+ fRetVal = FALSE;
+ }
+ else
+ {
+ // Get the raw array data pointer
+ U1ARRAYREF refWatsonBucketArray = (U1ARRAYREF)_gc.oWatsonBuckets;
+ PTR_VOID pRawWatsonBucketArray = dac_cast<PTR_VOID>(refWatsonBucketArray->GetDataPtr());
+
+ // Deep copy the bucket information to the managed array
+ memcpyNoGCRefs(pRawWatsonBucketArray, pUnmanagedBuckets, size);
+
+ // Setup the managed field reference to point to the byte array.
+ //
+ // The throwable, to which the buckets are being copied to, may be
+ // having existing buckets (e.g. when TypeInitialization exception
+ // maybe thrown again when attempt is made to load the originally
+ // failed type).
+ //
+ // This is also possible if exception object is used as singleton
+ // and thrown by multiple threads.
+ if (((EXCEPTIONREF)_gc.oThrowable)->AreWatsonBucketsPresent())
+ {
+ LOG((LF_EH, LL_INFO1000, "CopyWatsonBucketsToThrowable: Throwable (%p) being copied to had previous buckets.\n", OBJECTREFToObject(_gc.oThrowable)));
+ }
+
+ ((EXCEPTIONREF)_gc.oThrowable)->SetWatsonBucketReference(_gc.oWatsonBuckets);
+ }
+
+ GCPROTECT_END();
+
+ return fRetVal;
+#else // DACCESS_COMPILE
+ return TRUE;
+#endif // !DACCESS_COMPILE
+}
+
+// This function will setup the bucketing information for nested exceptions
+// raised. These would be any exceptions thrown from within the confines of
+// managed EH clauses and include "rethrow" and "throw new ...".
+//
+// This is called from within CLR's personality routine for managed
+// exceptions to preemptively setup the watson buckets from the ones that may
+// already exist. If none exist already, we will automatically endup in the
+// path (SetupInitialThrowBucketDetails) that will set up buckets for the
+// exception being thrown.
+void SetStateForWatsonBucketing(BOOL fIsRethrownException, OBJECTHANDLE ohOriginalException)
+{
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_CORECLR
+ // On CoreCLR, Watson may not be enabled. Thus, we should
+ // skip this.
+ if (!IsWatsonEnabled())
+ {
+ return;
+ }
+#endif // FEATURE_CORECLR
+
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_ANY;
+ NOTHROW;
+ PRECONDITION(GetThread() != NULL);
+ PRECONDITION(IsWatsonEnabled());
+ }
+ CONTRACTL_END;
+
+ // Switch to COOP mode
+ GCX_COOP();
+
+ struct
+ {
+ OBJECTREF oCurrentThrowable;
+ OBJECTREF oInnerMostExceptionThrowable;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ GCPROTECT_BEGIN(gc);
+
+ Thread* pThread = GetThread();
+
+ // Get the current exception state of the thread
+ ThreadExceptionState* pCurExState = pThread->GetExceptionState();
+ _ASSERTE(NULL != pCurExState);
+
+ // Ensure that the exception tracker exists
+ _ASSERTE(pCurExState->GetCurrentExceptionTracker() != NULL);
+
+ // Get the current throwable
+ gc.oCurrentThrowable = pThread->GetThrowable();
+ _ASSERTE(gc.oCurrentThrowable != NULL);
+
+ // Is the throwable a preallocated exception object?
+ BOOL fIsPreallocatedExceptionObject = CLRException::IsPreallocatedExceptionObject(gc.oCurrentThrowable);
+
+ // Copy the bucketing details from the original exception tracker if the current exception is a rethrow
+ // AND the throwable is a preallocated exception object.
+ //
+ // For rethrown non-preallocated exception objects, the throwable would already have the bucketing
+ // details inside it.
+ if (fIsRethrownException)
+ {
+ if (fIsPreallocatedExceptionObject)
+ {
+ // Get the WatsonBucket tracker for the original exception, starting search from the previous EH tracker.
+ // This is required so that when a preallocated exception is rethrown, then the current tracker would have
+ // the same throwable as the original exception but no bucketing details.
+ //
+ // To ensure GetWatsonBucketTrackerForPreallocatedException uses the EH tracker corresponding to the original
+ // exception to get the bucketing details, we pass TRUE as the third parameter.
+ PTR_EHWatsonBucketTracker pPreallocWatsonBucketTracker = GetWatsonBucketTrackerForPreallocatedException(gc.oCurrentThrowable, FALSE, TRUE);
+ if (pPreallocWatsonBucketTracker != NULL)
+ {
+ if (!IsThrowableThreadAbortException(gc.oCurrentThrowable))
+ {
+ // For non-thread abort preallocated exceptions, we copy the bucketing details
+ // from their corresponding watson bucket tracker to the one corresponding to the
+ // rethrow that is taking place.
+ //
+ // Bucketing details for preallocated exception may not be present if the exception came
+ // from across AD transition and we attempted to copy them over from the UETracker, when
+ // the exception was reraised in the calling AD, and the copy operation failed due to OOM.
+ //
+ // In such a case, when the reraised exception is caught and rethrown, we will not have
+ // any bucketing details.
+ if (NULL != pPreallocWatsonBucketTracker->RetrieveWatsonBucketIp())
+ {
+ // Copy the bucketing details now
+ pCurExState->GetCurrentExceptionTracker()->GetWatsonBucketTracker()->CopyEHWatsonBucketTracker(*pPreallocWatsonBucketTracker);
+ }
+ else
+ {
+ LOG((LF_EH, LL_INFO1000, "SetStateForWatsonBucketing - Watson bucketing details for rethrown preallocated exception not found in the EH tracker corresponding to the original exception. This is likely due to a previous OOM.\n"));
+ LOG((LF_EH, LL_INFO1000, ">>>>>>>>>>>>>>>>>>>>>>>>>> Original WatsonBucketTracker = %p\n", pPreallocWatsonBucketTracker));
+
+ // Make the active tracker clear
+ pCurExState->GetCurrentExceptionTracker()->GetWatsonBucketTracker()->ClearWatsonBucketDetails();
+ }
+ }
+ #ifdef _DEBUG
+ else
+ {
+ // For thread abort exceptions, the returned watson bucket tracker
+ // would correspond to UE Watson bucket tracker and it will have
+ // all the details.
+ _ASSERTE(pPreallocWatsonBucketTracker == pCurExState->GetUEWatsonBucketTracker());
+ }
+ #endif // _DEBUG
+ }
+ else
+ {
+ // OOM can result in not having a Watson bucket tracker with valid bucketing details for a preallocated exception.
+ // Thus, we may end up here. For details, see implementation of GetWatsonBucketTrackerForPreallocatedException.
+ LOG((LF_EH, LL_INFO1000, "SetStateForWatsonBucketing - Watson bucketing tracker for rethrown preallocated exception not found. This is likely due to a previous OOM.\n"));
+
+ // Make the active tracker clear
+ pCurExState->GetCurrentExceptionTracker()->GetWatsonBucketTracker()->ClearWatsonBucketDetails();
+ }
+ }
+ else
+ {
+ // We dont need to do anything here since the throwable would already have the bucketing
+ // details inside it. Simply assert that the original exception object is the same as the current throwable.
+ //
+ // We cannot assert for Watson buckets since the original throwable may not have got them in
+ // SetupInitialThrowBucketDetails due to OOM
+ _ASSERTE((NULL != ohOriginalException) && (ObjectFromHandle(ohOriginalException) == gc.oCurrentThrowable));
+ if ((((EXCEPTIONREF)gc.oCurrentThrowable)->AreWatsonBucketsPresent() == FALSE) &&
+ (((EXCEPTIONREF)gc.oCurrentThrowable)->IsIPForWatsonBucketsPresent() == FALSE))
+ {
+ LOG((LF_EH, LL_INFO1000, "SetStateForWatsonBucketing - Regular rethrown exception (%p) does not have Watson buckets, likely due to OOM.\n",
+ OBJECTREFToObject(gc.oCurrentThrowable)));
+ }
+ }
+
+ // Set the flag that we have bucketing details for the exception
+ pCurExState->GetFlags()->SetGotWatsonBucketDetails();
+ LOG((LF_EH, LL_INFO1000, "SetStateForWatsonBucketing - Using original exception details for Watson bucketing for rethrown exception.\n"));
+ }
+ else
+ {
+ // If we are here, then an exception is being thrown from within the
+ // managed EH clauses of fault, finally or catch, with an inner exception.
+
+ // By default, we will create buckets based upon the exception being thrown unless
+ // thrown exception has an inner exception that has got bucketing details
+ BOOL fCreateBucketsForExceptionBeingThrown = TRUE;
+
+ // Start off by assuming that inner exception object is not preallocated
+ BOOL fIsInnerExceptionPreallocated = FALSE;
+
+ // Reference to the WatsonBucket tracker for the inner exception, if it is preallocated
+ PTR_EHWatsonBucketTracker pInnerExceptionWatsonBucketTracker = NULL;
+
+ // Since this is a new exception being thrown, we will check if it has buckets already or not.
+ // This is possible when Reflection throws TargetInvocationException with an inner exception
+ // that is preallocated exception object. In such a case, we copy the inner exception details
+ // to the TargetInvocationException object already. This is done in InvokeImpl in ReflectionInvocation.cpp.
+ if (((EXCEPTIONREF)gc.oCurrentThrowable)->AreWatsonBucketsPresent() ||
+ ((EXCEPTIONREF)gc.oCurrentThrowable)->IsIPForWatsonBucketsPresent())
+ {
+ goto done;
+ }
+
+ // If no buckets are present, then we will check if it has an innermost exception or not.
+ // If it does, then we will make the exception being thrown use the bucketing details of the
+ // innermost exception.
+ //
+ // If there is no innermost exception or if one is present without bucketing details, then
+ // we will have bucket details based upon the exception being thrown.
+
+ // Get the innermost exception from the exception being thrown.
+ gc.oInnerMostExceptionThrowable = ((EXCEPTIONREF)gc.oCurrentThrowable)->GetBaseException();
+ if (gc.oInnerMostExceptionThrowable != NULL)
+ {
+ fIsInnerExceptionPreallocated = CLRException::IsPreallocatedExceptionObject(gc.oInnerMostExceptionThrowable);
+
+ // Preallocated exception objects do not have inner exception objects.
+ // Thus, if we are here, then the current throwable cannot be
+ // a preallocated exception object.
+ _ASSERTE(!fIsPreallocatedExceptionObject);
+
+ // Create the new buckets only if the innermost exception object
+ // does not have them already.
+ if (fIsInnerExceptionPreallocated)
+ {
+ // If we are able to find the watson bucket tracker for the preallocated
+ // inner exception, then we dont need to create buckets for throw site.
+ pInnerExceptionWatsonBucketTracker = GetWatsonBucketTrackerForPreallocatedException(gc.oInnerMostExceptionThrowable, FALSE, TRUE);
+ fCreateBucketsForExceptionBeingThrown = ((pInnerExceptionWatsonBucketTracker != NULL) &&
+ (pInnerExceptionWatsonBucketTracker->RetrieveWatsonBucketIp() != NULL)) ? FALSE : TRUE;
+ }
+ else
+ {
+ // Since the inner exception object is not preallocated, create
+ // watson buckets only if it does not have them.
+ fCreateBucketsForExceptionBeingThrown = !(((EXCEPTIONREF)gc.oInnerMostExceptionThrowable)->AreWatsonBucketsPresent() ||
+ ((EXCEPTIONREF)gc.oInnerMostExceptionThrowable)->IsIPForWatsonBucketsPresent());
+ }
+ }
+
+ // If we are NOT going to create buckets for the thrown exception,
+ // then copy them over from the inner exception object.
+ //
+ // If we have to create the buckets for the thrown exception,
+ // we wont do that now - it will be done in StackTraceInfo::AppendElement
+ // when we get the IP for bucketing.
+ if (!fCreateBucketsForExceptionBeingThrown)
+ {
+ // Preallocated exception objects do not have inner exception objects.
+ // Thus, if we are here, then the current throwable cannot be
+ // a preallocated exception object.
+ _ASSERTE(!fIsPreallocatedExceptionObject);
+
+ if (fIsInnerExceptionPreallocated)
+ {
+
+ // We should have the inner exception watson bucket tracker
+ _ASSERTE((pInnerExceptionWatsonBucketTracker != NULL) && (pInnerExceptionWatsonBucketTracker->RetrieveWatsonBucketIp() != NULL));
+
+ // Capture the buckets for the innermost exception if they dont already exist.
+ // Since the current throwable cannot be preallocated (see the assert above),
+ // copy the buckets to the throwable.
+ PTR_VOID pInnerExceptionWatsonBuckets = pInnerExceptionWatsonBucketTracker->RetrieveWatsonBuckets();
+ if (pInnerExceptionWatsonBuckets == NULL)
+ {
+ // Capture the buckets since they dont exist
+ pInnerExceptionWatsonBucketTracker->CaptureUnhandledInfoForWatson(TypeOfReportedError::UnhandledException, pThread, &gc.oInnerMostExceptionThrowable);
+ pInnerExceptionWatsonBuckets = pInnerExceptionWatsonBucketTracker->RetrieveWatsonBuckets();
+ }
+
+ if (pInnerExceptionWatsonBuckets == NULL)
+ {
+ // Couldnt capture details like due to OOM
+ LOG((LF_EH, LL_INFO1000, "SetStateForWatsonBucketing - Preallocated inner-exception's WBTracker (%p) has no bucketing details for the thrown exception, likely due to OOM.\n", pInnerExceptionWatsonBucketTracker));
+ }
+ else
+ {
+ // Copy the buckets to the current throwable
+ BOOL fCopied = TRUE;
+ EX_TRY
+ {
+ fCopied = CopyWatsonBucketsToThrowable(pInnerExceptionWatsonBuckets);
+ _ASSERTE(fCopied);
+ }
+ EX_CATCH
+ {
+ // Dont do anything if we fail to copy the buckets - this is no different than
+ // the native watson helper functions failing under OOM
+ fCopied = FALSE;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+ }
+ else
+ {
+ // Assert that the inner exception has the Watson buckets
+ _ASSERTE(gc.oInnerMostExceptionThrowable != NULL);
+ _ASSERTE(((EXCEPTIONREF)gc.oInnerMostExceptionThrowable)->AreWatsonBucketsPresent() ||
+ ((EXCEPTIONREF)gc.oInnerMostExceptionThrowable)->IsIPForWatsonBucketsPresent());
+
+ if (((EXCEPTIONREF)gc.oInnerMostExceptionThrowable)->AreWatsonBucketsPresent())
+ {
+ // Copy the bucket information from the inner exception object to the current throwable
+ EX_TRY
+ {
+ CopyWatsonBucketsFromThrowableToCurrentThrowable(gc.oInnerMostExceptionThrowable);
+ }
+ EX_CATCH
+ {
+ // Dont do anything if we fail to copy the buckets - this is no different than
+ // the native watson helper functions failing under OOM
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+ else
+ {
+ // Copy the IP for Watson bucketing to the exception object
+ ((EXCEPTIONREF)gc.oCurrentThrowable)->SetIPForWatsonBuckets(((EXCEPTIONREF)gc.oInnerMostExceptionThrowable)->GetIPForWatsonBuckets());
+ }
+ }
+
+ // Set the flag that we got bucketing details for the exception
+ pCurExState->GetFlags()->SetGotWatsonBucketDetails();
+ LOG((LF_EH, LL_INFO1000, "SetStateForWatsonBucketing - Using innermost exception details for Watson bucketing for thrown exception.\n"));
+ }
+done:;
+ }
+
+ GCPROTECT_END();
+
+#endif // !DACCESS_COMPILE
+}
+
+// Constructor that will do the initialization of the object
+EHWatsonBucketTracker::EHWatsonBucketTracker()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ Init();
+}
+
+// Reset the fields to default values
+void EHWatsonBucketTracker::Init()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_WatsonUnhandledInfo.m_UnhandledIp = 0;
+ m_WatsonUnhandledInfo.m_pUnhandledBuckets = NULL;
+
+ DEBUG_STMT(ResetFlags());
+
+ LOG((LF_EH, LL_INFO1000, "EHWatsonBucketTracker::Init - initializing watson bucket tracker (%p)\n", this));
+}
+
+// This method copies the bucketing details from the specified throwable
+// to the current Watson Bucket tracker.
+void EHWatsonBucketTracker::CopyBucketsFromThrowable(OBJECTREF oThrowable)
+{
+#ifndef DACCESS_COMPILE
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(oThrowable != NULL);
+ PRECONDITION(((EXCEPTIONREF)oThrowable)->AreWatsonBucketsPresent());
+ PRECONDITION(IsWatsonEnabled());
+ }
+ CONTRACTL_END;
+
+ GCX_COOP();
+
+ struct
+ {
+ OBJECTREF oFrom;
+ } _gc;
+
+ ZeroMemory(&_gc, sizeof(_gc));
+ GCPROTECT_BEGIN(_gc);
+
+ _gc.oFrom = oThrowable;
+
+ LOG((LF_EH, LL_INFO1000, "EHWatsonBucketTracker::CopyEHWatsonBucketTracker - Copying bucketing details from throwable (%p) to tracker (%p)\n",
+ OBJECTREFToObject(_gc.oFrom), this));
+
+ // Watson bucket is a "GenericModeBlock" type. Set up an empty GenericModeBlock
+ // to hold the bucket parameters.
+ GenericModeBlock *pgmb = new (nothrow) GenericModeBlock;
+ if (pgmb == NULL)
+ {
+ // If we are unable to allocate memory to hold the WatsonBucket, then
+ // reset the IP and bucket pointer to NULL and bail out
+ SaveIpForWatsonBucket(NULL);
+ m_WatsonUnhandledInfo.m_pUnhandledBuckets = NULL;
+ }
+ else
+ {
+ // Get the raw array data pointer
+ U1ARRAYREF refWatsonBucketArray = ((EXCEPTIONREF)_gc.oFrom)->GetWatsonBucketReference();
+ PTR_VOID pRawWatsonBucketArray = dac_cast<PTR_VOID>(refWatsonBucketArray->GetDataPtr());
+
+ // Copy over the details to our new allocation
+ memcpyNoGCRefs(pgmb, pRawWatsonBucketArray, sizeof(GenericModeBlock));
+
+ // and save the address where the buckets were copied
+ _ASSERTE(m_WatsonUnhandledInfo.m_pUnhandledBuckets == NULL);
+ m_WatsonUnhandledInfo.m_pUnhandledBuckets = pgmb;
+ }
+
+ GCPROTECT_END();
+
+ LOG((LF_EH, LL_INFO1000, "EHWatsonBucketTracker::CopyEHWatsonBucketTracker - Copied Watson Buckets from throwable to (%p)\n",
+ m_WatsonUnhandledInfo.m_pUnhandledBuckets));
+#endif // !DACCESS_COMPILE
+}
+
+// This method copies the bucketing details from the specified Watson Bucket tracker
+// to the current one.
+void EHWatsonBucketTracker::CopyEHWatsonBucketTracker(const EHWatsonBucketTracker& srcTracker)
+{
+#ifndef DACCESS_COMPILE
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(m_WatsonUnhandledInfo.m_UnhandledIp == 0);
+ PRECONDITION(m_WatsonUnhandledInfo.m_pUnhandledBuckets == NULL);
+ PRECONDITION(IsWatsonEnabled());
+ }
+ CONTRACTL_END;
+
+ LOG((LF_EH, LL_INFO1000, "EHWatsonBucketTracker::CopyEHWatsonBucketTracker - Copying bucketing details from %p to %p\n", &srcTracker, this));
+
+ // Copy the tracking details over from the specified tracker
+ SaveIpForWatsonBucket(srcTracker.m_WatsonUnhandledInfo.m_UnhandledIp);
+
+ if (srcTracker.m_WatsonUnhandledInfo.m_pUnhandledBuckets != NULL)
+ {
+ // To save the bucket information, we will need to memcpy.
+ // This is to ensure that if the original watson bucket tracker
+ // (for original exception) is released and its memory deallocated,
+ // the new watson bucket tracker (for rethrown exception, for e.g.)
+ // would still have all the bucket details.
+
+ // Watson bucket is a "GenericModeBlock" type. Set up an empty GenericModeBlock
+ // to hold the bucket parameters.
+ GenericModeBlock *pgmb = new (nothrow) GenericModeBlock;
+ if (pgmb == NULL)
+ {
+ // If we are unable to allocate memory to hold the WatsonBucket, then
+ // reset the IP and bucket pointer to NULL and bail out
+ SaveIpForWatsonBucket(NULL);
+ m_WatsonUnhandledInfo.m_pUnhandledBuckets = NULL;
+
+ LOG((LF_EH, LL_INFO1000, "EHWatsonBucketTracker::CopyEHWatsonBucketTracker - Not copying buckets due to out of memory.\n"));
+ }
+ else
+ {
+ // Copy over the details to our new allocation
+ memcpyNoGCRefs(pgmb, srcTracker.m_WatsonUnhandledInfo.m_pUnhandledBuckets, sizeof(GenericModeBlock));
+
+ // and save the address where the buckets were copied
+ m_WatsonUnhandledInfo.m_pUnhandledBuckets = pgmb;
+ }
+ }
+
+ LOG((LF_EH, LL_INFO1000, "EHWatsonBucketTracker::CopyEHWatsonBucketTracker - Copied Watson Bucket to (%p)\n", m_WatsonUnhandledInfo.m_pUnhandledBuckets));
+#endif // !DACCESS_COMPILE
+}
+
+void EHWatsonBucketTracker::SaveIpForWatsonBucket(
+ UINT_PTR ip) // The new IP.
+{
+#ifndef DACCESS_COMPILE
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(IsWatsonEnabled());
+ }
+ CONTRACTL_END;
+
+ LOG((LF_EH, LL_INFO1000, "EHWatsonBucketTracker::SaveIpForUnhandledInfo - this = %p, IP = %p\n", this, ip));
+
+ // Since we are setting a new IP for tracking buckets,
+ // clear any existing details we may hold
+ ClearWatsonBucketDetails();
+
+ // Save the new IP for bucketing
+ m_WatsonUnhandledInfo.m_UnhandledIp = ip;
+#endif // !DACCESS_COMPILE
+}
+
+UINT_PTR EHWatsonBucketTracker::RetrieveWatsonBucketIp()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ LOG((LF_EH, LL_INFO1000, "EHWatsonBucketTracker::RetrieveWatsonBucketIp - this = %p, IP = %p\n", this, m_WatsonUnhandledInfo.m_UnhandledIp));
+
+ return m_WatsonUnhandledInfo.m_UnhandledIp;
+}
+
+// This function returns the reference to the Watson buckets tracked by the
+// instance of WatsonBucket tracker.
+//
+// This is *also* invoked from the DAC when buckets are requested.
+PTR_VOID EHWatsonBucketTracker::RetrieveWatsonBuckets()
+{
+#if defined(FEATURE_CORECLR) && !defined(DACCESS_COMPILE)
+ if (!IsWatsonEnabled())
+ {
+ return NULL;
+ }
+#endif // defined(FEATURE_CORECLR) && !defined(DACCESS_COMPILE)
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(IsWatsonEnabled());
+ }
+ CONTRACTL_END;
+
+ LOG((LF_EH, LL_INFO1000, "EHWatsonBucketTracker::RetrieveWatsonBuckets - this = %p, bucket address = %p\n", this, m_WatsonUnhandledInfo.m_pUnhandledBuckets));
+
+ return m_WatsonUnhandledInfo.m_pUnhandledBuckets;
+}
+
+void EHWatsonBucketTracker::ClearWatsonBucketDetails()
+{
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_CORECLR
+ if (!IsWatsonEnabled())
+ {
+ return;
+ }
+#endif // FEATURE_CORECLR
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(IsWatsonEnabled());
+ }
+ CONTRACTL_END;
+
+
+ LOG((LF_EH, LL_INFO1000, "EHWatsonBucketTracker::ClearWatsonBucketDetails for tracker (%p)\n", this));
+
+ if (m_WatsonUnhandledInfo.m_pUnhandledBuckets != NULL)
+ {
+ FreeBucketParametersForManagedException(m_WatsonUnhandledInfo.m_pUnhandledBuckets);
+ }
+
+ Init();
+#endif // !DACCESS_COMPILE
+}
+
+void EHWatsonBucketTracker::CaptureUnhandledInfoForWatson(TypeOfReportedError tore, Thread * pThread, OBJECTREF * pThrowable)
+{
+#ifndef DACCESS_COMPILE
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(IsWatsonEnabled());
+ }
+ CONTRACTL_END;
+
+ LOG((LF_EH, LL_INFO1000, "EHWatsonBucketTracker::CaptureUnhandledInfoForWatson capturing watson bucket details for (%p)\n", this));
+
+ // Only capture the bucket information if there is an IP AND we dont already have collected them.
+ // We could have collected them from a previous AD transition and wouldnt want to overwrite them.
+ if (m_WatsonUnhandledInfo.m_UnhandledIp != 0)
+ {
+ if (m_WatsonUnhandledInfo.m_pUnhandledBuckets == NULL)
+ {
+ // Get the bucket details since we dont have them
+ m_WatsonUnhandledInfo.m_pUnhandledBuckets = GetBucketParametersForManagedException(m_WatsonUnhandledInfo.m_UnhandledIp, tore, pThread, pThrowable);
+ LOG((LF_EH, LL_INFO1000, "EHWatsonBucketTracker::CaptureUnhandledInfoForWatson captured the following watson bucket details: (this = %p, bucket addr = %p)\n",
+ this, m_WatsonUnhandledInfo.m_pUnhandledBuckets));
+ }
+ else
+ {
+ // We already have the bucket details - so no need to capture them again
+ LOG((LF_EH, LL_INFO1000, "EHWatsonBucketTracker::CaptureUnhandledInfoForWatson already have the watson bucket details: (this = %p, bucket addr = %p)\n",
+ this, m_WatsonUnhandledInfo.m_pUnhandledBuckets));
+ }
+ }
+ else
+ {
+ LOG((LF_EH, LL_INFO1000, "EHWatsonBucketTracker::CaptureUnhandledInfoForWatson didnt have an IP to use for capturing watson buckets\n"));
+ }
+#endif // !DACCESS_COMPILE
+}
+#endif // !FEATURE_PAL
+
+// Given a throwable, this function will attempt to find an active EH tracker corresponding to it.
+// If none found, it will return NULL
+#ifdef WIN64EXCEPTIONS
+PTR_ExceptionTracker GetEHTrackerForException(OBJECTREF oThrowable, PTR_ExceptionTracker pStartingEHTracker)
+#elif _TARGET_X86_
+PTR_ExInfo GetEHTrackerForException(OBJECTREF oThrowable, PTR_ExInfo pStartingEHTracker)
+#else
+#error Unsupported platform
+#endif
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ NOTHROW;
+ SO_TOLERANT;
+ PRECONDITION(GetThread() != NULL);
+ PRECONDITION(oThrowable != NULL);
+ }
+ CONTRACTL_END;
+
+ // Get the reference to the exception tracker to start with. If one has been provided to us,
+ // then use it. Otherwise, start from the current one.
+#ifdef WIN64EXCEPTIONS
+ PTR_ExceptionTracker pEHTracker = (pStartingEHTracker != NULL) ? pStartingEHTracker : GetThread()->GetExceptionState()->GetCurrentExceptionTracker();
+#elif _TARGET_X86_
+ PTR_ExInfo pEHTracker = (pStartingEHTracker != NULL) ? pStartingEHTracker : GetThread()->GetExceptionState()->GetCurrentExceptionTracker();
+#else
+#error Unsupported platform
+#endif
+
+ BOOL fFoundTracker = FALSE;
+
+ // Start walking the list to find the tracker correponding
+ // to the exception object.
+ while (pEHTracker != NULL)
+ {
+ if (pEHTracker->GetThrowable() == oThrowable)
+ {
+ // found the tracker - break out.
+ fFoundTracker = TRUE;
+ break;
+ }
+
+ // move to the previous tracker...
+ pEHTracker = pEHTracker->GetPreviousExceptionTracker();
+ }
+
+ return fFoundTracker ? pEHTracker : NULL;
+}
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+// -----------------------------------------------------------------------
+// Support for CorruptedState Exceptions
+// -----------------------------------------------------------------------
+
+// Given an exception code, this method returns a BOOL to indicate if the
+// code belongs to a corrupting exception or not.
+/* static */
+BOOL CEHelper::IsProcessCorruptedStateException(DWORD dwExceptionCode, BOOL fCheckForSO /*= TRUE*/)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (g_pConfig->LegacyCorruptedStateExceptionsPolicy())
+ {
+ return FALSE;
+ }
+
+ // Call into the utilcode helper function to check if this
+ // is a CE or not.
+ return (::IsProcessCorruptedStateException(dwExceptionCode, fCheckForSO));
+}
+
+// This is used in the VM folder version of "SET_CE_RETHROW_FLAG_FOR_EX_CATCH" (in clrex.h)
+// to check if the managed exception caught by EX_END_CATCH is CSE or not.
+//
+// If you are using it from rethrow boundaries (e.g. SET_CE_RETHROW_FLAG_FOR_EX_CATCH
+// macro that is used to automatically rethrow corrupting exceptions), then you may
+// want to set the "fMarkForReuseIfCorrupting" to TRUE to enable propagation of the
+// corruption severity when the reraised exception is seen by managed code again.
+/* static */
+BOOL CEHelper::IsLastActiveExceptionCorrupting(BOOL fMarkForReuseIfCorrupting /* = FALSE */)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(GetThread() != NULL);
+ }
+ CONTRACTL_END;
+
+ if (g_pConfig->LegacyCorruptedStateExceptionsPolicy())
+ {
+ return FALSE;
+ }
+
+ BOOL fIsCorrupting = FALSE;
+ ThreadExceptionState *pCurTES = GetThread()->GetExceptionState();
+
+ // Check the corruption severity
+ CorruptionSeverity severity = pCurTES->GetLastActiveExceptionCorruptionSeverity();
+ fIsCorrupting = (severity == ProcessCorrupting);
+ if (fIsCorrupting && fMarkForReuseIfCorrupting)
+ {
+ // Mark the corruption severity for reuse
+ CEHelper::MarkLastActiveExceptionCorruptionSeverityForReraiseReuse();
+ }
+
+ LOG((LF_EH, LL_INFO100, "CEHelper::IsLastActiveExceptionCorrupting - Using corruption severity from TES.\n"));
+
+ return fIsCorrupting;
+}
+
+// Given a MethodDesc, this method will return a BOOL to indicate if
+// the containing assembly was built for PreV4 runtime or not.
+/* static */
+BOOL CEHelper::IsMethodInPreV4Assembly(PTR_MethodDesc pMethodDesc)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(pMethodDesc != NULL);
+ }
+ CONTRACTL_END;
+
+ // By default, assume that the containing assembly was not
+ // built for PreV4 runtimes.
+ BOOL fBuiltForPreV4Runtime = FALSE;
+
+ if (g_pConfig->LegacyCorruptedStateExceptionsPolicy())
+ {
+ return TRUE;
+ }
+
+ LPCSTR pszVersion = NULL;
+
+ // Retrieve the manifest metadata reference since that contains
+ // the "built-for" runtime details
+ IMDInternalImport *pImport = pMethodDesc->GetAssembly()->GetManifestImport();
+ if (pImport && SUCCEEDED(pImport->GetVersionString(&pszVersion)))
+ {
+ if (pszVersion != NULL)
+ {
+ // If version begins with "v1.*" or "v2.*", it was built for preV4 runtime
+ if ((pszVersion[0] == 'v' || pszVersion[0] == 'V') &&
+ IS_DIGIT(pszVersion[1]) &&
+ (pszVersion[2] == '.') )
+ {
+ // Looks like a version. Is it lesser than v4.0 major version where we start using new behavior?
+ fBuiltForPreV4Runtime = ((DIGIT_TO_INT(pszVersion[1]) != 0) &&
+ (DIGIT_TO_INT(pszVersion[1]) <= HIGHEST_MAJOR_VERSION_OF_PREV4_RUNTIME));
+ }
+ }
+ }
+
+ return fBuiltForPreV4Runtime;
+}
+
+// Given a MethodDesc and CorruptionSeverity, this method will return a
+// BOOL indicating if the method can handle those kinds of CEs or not.
+/* static */
+BOOL CEHelper::CanMethodHandleCE(PTR_MethodDesc pMethodDesc, CorruptionSeverity severity, BOOL fCalculateSecurityInfo /*= TRUE*/)
+{
+ BOOL fCanMethodHandleSeverity = FALSE;
+
+#ifndef DACCESS_COMPILE
+ CONTRACTL
+ {
+ if (fCalculateSecurityInfo)
+ {
+ GC_TRIGGERS; // CEHelper::CanMethodHandleCE will invoke Security::IsMethodCritical that could endup invoking MethodTable::LoadEnclosingMethodTable that is GC_TRIGGERS
+ }
+ else
+ {
+ // See comment in COMPlusUnwindCallback for details.
+ GC_NOTRIGGER;
+ }
+ // First pass requires THROWS and in 2nd we need to be due to the AppX check below where GetFusionAssemblyName can throw.
+ THROWS;
+ MODE_ANY;
+ PRECONDITION(pMethodDesc != NULL);
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_APPX_BINDER
+ // In an Metro application, disallow application code to catch any corrupted state exception
+ if (AppX::IsAppXProcess())
+ {
+ // This call to GetFusionAssemblyNameNoCreate will return a valid fusion assembly name
+ // in the second pass of exception dispatch as the name would have been created in the first pass,
+ // if not already existent.
+ IAssemblyName *pIAssemblyName = pMethodDesc->GetAssembly()->GetFusionAssemblyNameNoCreate();
+ if (!pIAssemblyName)
+ {
+ pIAssemblyName = pMethodDesc->GetAssembly()->GetFusionAssemblyName();
+ }
+
+ if (Fusion::Util::IsAnyFrameworkAssembly(pIAssemblyName) != S_OK)
+ {
+ return FALSE;
+ }
+ }
+#endif // FEATURE_APPX
+
+ if (g_pConfig->LegacyCorruptedStateExceptionsPolicy())
+ {
+ return TRUE;
+ }
+
+ // Only SecurityCritical code can handle CE since only they can generate it.
+ // Even in full trusted assembly, transparent code cannot generate CE and thus,
+ // will not know how to handle it properly.
+ //
+ // Check if the method in question is SecurityCritical or not.
+ MethodSecurityDescriptor mdSec(pMethodDesc);
+ fCanMethodHandleSeverity = mdSec.IsCritical();
+
+ if (fCanMethodHandleSeverity)
+ {
+ // Reset the flag to FALSE
+ fCanMethodHandleSeverity = FALSE;
+
+ // Since the method is Security Critical, now check if it is
+ // attributed to handle the CE or not.
+ IMDInternalImport *pImport = pMethodDesc->GetMDImport();
+ if (pImport != NULL)
+ {
+ mdMethodDef methodDef = pMethodDesc->GetMemberDef();
+ switch(severity)
+ {
+ case ProcessCorrupting:
+ fCanMethodHandleSeverity = (S_OK == pImport->GetCustomAttributeByName(
+ methodDef,
+ HANDLE_PROCESS_CORRUPTED_STATE_EXCEPTION_ATTRIBUTE,
+ NULL,
+ NULL));
+ break;
+ default:
+ _ASSERTE(!"Unknown Exception Corruption Severity!");
+ break;
+ }
+ }
+ }
+#endif // !DACCESS_COMPILE
+
+ return fCanMethodHandleSeverity;
+}
+
+// Given a MethodDesc, this method will return a BOOL to indicate if the method should be examined for exception
+// handlers for the specified exception.
+//
+// This method accounts for both corrupting and non-corrupting exceptions.
+/* static */
+BOOL CEHelper::CanMethodHandleException(CorruptionSeverity severity, PTR_MethodDesc pMethodDesc, BOOL fCalculateSecurityInfo /*= TRUE*/)
+{
+ CONTRACTL
+ {
+ // CEHelper::CanMethodHandleCE will invoke Security::IsMethodCritical that could endup invoking MethodTable::LoadEnclosingMethodTable that is GC_TRIGGERS/THROWS
+ if (fCalculateSecurityInfo)
+ {
+ GC_TRIGGERS;
+ }
+ else
+ {
+ // See comment in COMPlusUnwindCallback for details.
+ GC_NOTRIGGER;
+ }
+ THROWS;
+ MODE_ANY;
+ PRECONDITION(pMethodDesc != NULL);
+ }
+ CONTRACTL_END;
+
+ // By default, assume that the runtime shouldn't look for exception handlers
+ // in the method pointed by the MethodDesc
+ BOOL fLookForExceptionHandlersInMethod = FALSE;
+
+ if (g_pConfig->LegacyCorruptedStateExceptionsPolicy())
+ {
+ return TRUE;
+ }
+
+ // If we have been asked to use the last active corruption severity (e.g. in cases of Reflection
+ // or COM interop), then retrive it.
+ if (severity == UseLast)
+ {
+ LOG((LF_EH, LL_INFO100, "CEHelper::CanMethodHandleException - Using LastActiveExceptionCorruptionSeverity.\n"));
+ severity = GetThread()->GetExceptionState()->GetLastActiveExceptionCorruptionSeverity();
+ }
+
+ LOG((LF_EH, LL_INFO100, "CEHelper::CanMethodHandleException - Processing CorruptionSeverity: %d.\n", severity));
+
+ if (severity > NotCorrupting)
+ {
+ // If the method lies in an assembly built for pre-V4 runtime, allow the runtime
+ // to look for exception handler for the CE.
+ BOOL fIsMethodInPreV4Assembly = FALSE;
+ fIsMethodInPreV4Assembly = CEHelper::IsMethodInPreV4Assembly(pMethodDesc);
+
+ if (!fIsMethodInPreV4Assembly)
+ {
+ // Method lies in an assembly built for V4 or later runtime.
+ LOG((LF_EH, LL_INFO100, "CEHelper::CanMethodHandleException - Method is in an assembly built for V4 or later runtime.\n"));
+
+ // Depending upon the corruption severity of the exception, see if the
+ // method supports handling that.
+ LOG((LF_EH, LL_INFO100, "CEHelper::CanMethodHandleException - Exception is corrupting.\n"));
+
+ // Check if the method can handle the severity specified in the exception object.
+ fLookForExceptionHandlersInMethod = CEHelper::CanMethodHandleCE(pMethodDesc, severity, fCalculateSecurityInfo);
+ }
+ else
+ {
+ // Method is in a Pre-V4 assembly - allow it to be examined for processing the CE
+ fLookForExceptionHandlersInMethod = TRUE;
+ }
+ }
+ else
+ {
+ // Non-corrupting exceptions can continue to be delivered
+ fLookForExceptionHandlersInMethod = TRUE;
+ }
+
+ return fLookForExceptionHandlersInMethod;
+}
+
+// Given a managed exception object, this method will return a BOOL
+// indicating if it corresponds to a ProcessCorruptedState exception
+// or not.
+/* static */
+BOOL CEHelper::IsProcessCorruptedStateException(OBJECTREF oThrowable)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ PRECONDITION(oThrowable != NULL);
+ }
+ CONTRACTL_END;
+
+ if (g_pConfig->LegacyCorruptedStateExceptionsPolicy())
+ {
+ return FALSE;
+ }
+
+#ifndef DACCESS_COMPILE
+ // If the throwable represents preallocated SO, then indicate it as a CSE
+ if (CLRException::GetPreallocatedStackOverflowException() == oThrowable)
+ {
+ return TRUE;
+ }
+#endif // !DACCESS_COMPILE
+
+ // Check if we have an exception tracker for this exception
+ // and if so, if it represents corrupting exception or not.
+ // Get the exception tracker for the current exception
+#ifdef WIN64EXCEPTIONS
+ PTR_ExceptionTracker pEHTracker = GetEHTrackerForException(oThrowable, NULL);
+#elif _TARGET_X86_
+ PTR_ExInfo pEHTracker = GetEHTrackerForException(oThrowable, NULL);
+#else
+#error Unsupported platform
+#endif
+
+ if (pEHTracker != NULL)
+ {
+ // Found the tracker for exception object - check if its CSE or not.
+ return (pEHTracker->GetCorruptionSeverity() == ProcessCorrupting);
+ }
+
+ return FALSE;
+}
+
+#ifdef WIN64EXCEPTIONS
+void CEHelper::SetupCorruptionSeverityForActiveExceptionInUnwindPass(Thread *pCurThread, PTR_ExceptionTracker pEHTracker, BOOL fIsFirstPass,
+ DWORD dwExceptionCode)
+{
+#ifndef DACCESS_COMPILE
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(!fIsFirstPass); // This method should only be called during an unwind
+ PRECONDITION(pCurThread != NULL);
+ }
+ CONTRACTL_END;
+
+ // <WIN64>
+ //
+ // Typically, exception tracker is created for an exception when the OS is in the first pass.
+ // However, it may be created during the 2nd pass under specific cases. Managed C++ provides
+ // such a scenario. In the following, stack grows left to right:
+ //
+ // CallDescrWorker -> ILStub1 -> <Native Main> -> UMThunkStub -> IL_Stub2 -> <Managed Main>
+ //
+ // If a CSE exception goes unhandled from managed main, it will reach the OS. The [CRT in?] OS triggers
+ // unwind that results in invoking the personality routine of UMThunkStub, called UMThunkStubUnwindFrameChainHandler,
+ // that releases all exception trackers below it. Thus, the tracker for the CSE, which went unhandled, is also
+ // released. This detail is 64bit specific and the crux of this issue.
+ //
+ // Now, it is expected that by the time we are in the unwind pass, the corruption severity would have already been setup in the
+ // exception tracker and thread exception state (TES) as part of the first pass, and thus, are identical.
+ //
+ // However, for the scenario above, when the unwind continues and reaches ILStub1, its personality routine (which is ProcessCLRException)
+ // is invoked. It attempts to get the exception tracker corresponding to the exception. Since none exists, it creates a brand new one,
+ // which has the exception corruption severity as NotSet.
+ //
+ // During the stack walk, we know (from TES) that the active exception was a CSE, and thus, ILStub1 cannot handle the exception. Prior
+ // to bailing out, we assert that our data structures are intact by comparing the exception severity in TES with the one in the current
+ // exception tracker. Since the tracker was recreated, it had the severity as NotSet and this does not match the severity in TES.
+ // Thus, the assert fires. [This check is performed in ProcessManagedCallFrame.]
+ //
+ // To address such a case, if we have created a new exception tracker in the unwind (2nd) pass, then set its
+ // exception corruption severity to what the TES holds currently. This will maintain the same semantic as the case
+ // where new tracker is not created (for e.g. the exception was caught in Managed main).
+ //
+ // The exception is the scenario of code that uses longjmp to jump to a different context. Longjmp results in a raise
+ // of a new exception with the longjmp exception code (0x80000026) but with ExceptionFlags set indicating unwind. When this is
+ // seen by ProcessCLRException (64bit personality routine), it will create a new tracker in the 2nd pass.
+ //
+ // Longjmp outside an exceptional path does not interest us, but the one in the exceptional
+ // path would only happen when a method attributed to handle CSE invokes it. Thus, if the longjmp happened during the 2nd pass of a CSE,
+ // we want it to proceed (and thus, jump) as expected and not apply the CSE severity to the tracker - this is equivalent to
+ // a catch block that handles a CSE and then does a "throw new Exception();". The new exception raised is
+ // non-CSE in nature as well.
+ //
+ // http://www.nynaeve.net/?p=105 has a brief description of how exception-safe setjmp/longjmp works.
+ //
+ // </WIN64>
+ if (pEHTracker->GetCorruptionSeverity() == NotSet)
+ {
+ // Get the thread exception state
+ ThreadExceptionState *pCurTES = pCurThread->GetExceptionState();
+
+ // Set the tracker to have the same corruption severity as the last active severity unless we are dealing
+ // with LONGJMP
+ if (dwExceptionCode == STATUS_LONGJUMP)
+ {
+ pCurTES->SetLastActiveExceptionCorruptionSeverity(NotCorrupting);
+ }
+
+ pEHTracker->SetCorruptionSeverity(pCurTES->GetLastActiveExceptionCorruptionSeverity());
+ LOG((LF_EH, LL_INFO100, "CEHelper::SetupCorruptionSeverityForActiveExceptionInUnwindPass - Setup the corruption severity in the second pass.\n"));
+ }
+#endif // !DACCESS_COMPILE
+}
+#endif // WIN64EXCEPTIONS
+
+// This method is invoked from the personality routine for managed code and is used to setup the
+// corruption severity for the active exception on the thread exception state and the
+// exception tracker corresponding to the exception.
+/* static */
+void CEHelper::SetupCorruptionSeverityForActiveException(BOOL fIsRethrownException, BOOL fIsNestedException, BOOL fShouldTreatExceptionAsNonCorrupting /* = FALSE */)
+{
+#ifndef DACCESS_COMPILE
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // Get the thread and the managed exception object - they must exist at this point
+ Thread *pCurThread = GetThread();
+ _ASSERTE(pCurThread != NULL);
+
+ OBJECTREF oThrowable = pCurThread->GetThrowable();
+ _ASSERTE(oThrowable != NULL);
+
+ // Get the thread exception state
+ ThreadExceptionState * pCurTES = pCurThread->GetExceptionState();
+ _ASSERTE(pCurTES != NULL);
+
+ // Get the exception tracker for the current exception
+#ifdef WIN64EXCEPTIONS
+ PTR_ExceptionTracker pEHTracker = pCurTES->GetCurrentExceptionTracker();
+#elif _TARGET_X86_
+ PTR_ExInfo pEHTracker = pCurTES->GetCurrentExceptionTracker();
+#else // !(_WIN64 || _TARGET_X86_)
+#error Unsupported platform
+#endif // _WIN64
+
+ _ASSERTE(pEHTracker != NULL);
+
+ // Get the current exception code from the tracker.
+ PEXCEPTION_RECORD pEHRecord = pCurTES->GetExceptionRecord();
+ _ASSERTE(pEHRecord != NULL);
+ DWORD dwActiveExceptionCode = pEHRecord->ExceptionCode;
+
+ if (pEHTracker->GetCorruptionSeverity() != NotSet)
+ {
+ // Since the exception tracker already has the corruption severity set,
+ // we dont have much to do. Just confirm that our assumptions are correct.
+ _ASSERTE(pEHTracker->GetCorruptionSeverity() == pCurTES->GetLastActiveExceptionCorruptionSeverity());
+
+ LOG((LF_EH, LL_INFO100, "CEHelper::SetupCorruptionSeverityForActiveException - Current tracker already has the corruption severity set.\n"));
+ return;
+ }
+
+ // If the exception in question is to be treated as non-corrupting,
+ // then flag it and exit.
+ if (fShouldTreatExceptionAsNonCorrupting || g_pConfig->LegacyCorruptedStateExceptionsPolicy())
+ {
+ pEHTracker->SetCorruptionSeverity(NotCorrupting);
+ LOG((LF_EH, LL_INFO100, "CEHelper::SetupCorruptionSeverityForActiveException - Exception treated as non-corrupting.\n"));
+ goto done;
+ }
+
+ if (!fIsRethrownException && !fIsNestedException)
+ {
+ // There should be no previously active exception for this case
+ _ASSERTE(pEHTracker->GetPreviousExceptionTracker() == NULL);
+
+ CorruptionSeverity severityTES = NotSet;
+
+ if (pCurTES->ShouldLastActiveExceptionCorruptionSeverityBeReused())
+ {
+ // Get the corruption severity from the ThreadExceptionState (TES) for the last active exception
+ severityTES = pCurTES->GetLastActiveExceptionCorruptionSeverity();
+
+ // Incase of scenarios like AD transition or Reflection invocation,
+ // TES would hold corruption severity of the last active exception. To propagate it
+ // to the current exception, we will apply it to current tracker and only if the applied
+ // severity is "NotSet", will we proceed to check the current exception for corruption
+ // severity.
+ pEHTracker->SetCorruptionSeverity(severityTES);
+ }
+
+ // Reset TES Corruption Severity
+ pCurTES->SetLastActiveExceptionCorruptionSeverity(NotSet);
+
+ if (severityTES == NotSet)
+ {
+ // Since the last active exception's severity was "NotSet", we will look up the
+ // exception code and the exception object to see if the exception should be marked
+ // corrupting.
+ //
+ // Since this exception was neither rethrown nor is nested, it implies that we are
+ // outside an active exception. Thus, even if it contains inner exceptions, we wont have
+ // corruption severity for them since that information is tracked in EH tracker and
+ // we wont have an EH tracker for the inner most exception.
+
+ if (CEHelper::IsProcessCorruptedStateException(dwActiveExceptionCode) ||
+ CEHelper::IsProcessCorruptedStateException(oThrowable))
+ {
+ pEHTracker->SetCorruptionSeverity(ProcessCorrupting);
+ LOG((LF_EH, LL_INFO100, "CEHelper::SetupCorruptionSeverityForActiveException - Marked non-rethrow/non-nested exception as ProcessCorrupting.\n"));
+ }
+ else
+ {
+ pEHTracker->SetCorruptionSeverity(NotCorrupting);
+ LOG((LF_EH, LL_INFO100, "CEHelper::SetupCorruptionSeverityForActiveException - Marked non-rethrow/non-nested exception as NotCorrupting.\n"));
+ }
+ }
+ else
+ {
+ LOG((LF_EH, LL_INFO100, "CEHelper::SetupCorruptionSeverityForActiveException - Copied the corruption severity to tracker from ThreadExceptionState for non-rethrow/non-nested exception.\n"));
+ }
+ }
+ else
+ {
+ // Its either a rethrow or nested exception
+
+#ifdef WIN64EXCEPTIONS
+ PTR_ExceptionTracker pOrigEHTracker = NULL;
+#elif _TARGET_X86_
+ PTR_ExInfo pOrigEHTracker = NULL;
+#else
+#error Unsupported platform
+#endif
+
+ BOOL fDoWeHaveCorruptionSeverity = FALSE;
+
+ if (fIsRethrownException)
+ {
+ // Rethrown exceptions are nested by nature (of our implementation). The
+ // original EHTracker will exist for the exception - infact, it will be
+ // the tracker previous to the current one. We will simply copy
+ // its severity to the current EH tracker representing the rethrow.
+ pOrigEHTracker = pEHTracker->GetPreviousExceptionTracker();
+ _ASSERTE(pOrigEHTracker != NULL);
+
+ // Ideally, we would like have the assert below enabled. But, as may happen under OOM
+ // stress, this can be false. Here's how it will happen:
+ //
+ // An exception is thrown, which is later caught and rethrown in the catch block. Rethrow
+ // results in calling IL_Rethrow that will call RaiseTheExceptionInternalOnly to actually
+ // raise the exception. Prior to the raise, we update the last thrown object on the thread
+ // by calling Thread::SafeSetLastThrownObject which, internally, could have an OOM, resulting
+ // in "changing" the throwable used to raise the exception to be preallocated OOM object.
+ //
+ // When the rethrow happens and CLR's exception handler for managed code sees the exception,
+ // the exception tracker created for the rethrown exception will contain the reference to
+ // the last thrown object, which will be the preallocated OOM object.
+ //
+ // Thus, though, we came here because of a rethrow, and logically, the throwable should remain
+ // the same, it neednt be. Simply put, rethrow can result in working with a completely different
+ // exception object than what was originally thrown.
+ //
+ // Hence, the assert cannot be enabled.
+ //
+ // Thus, we will use the EH tracker corresponding to the original exception, to get the
+ // rethrown exception's corruption severity, only when the rethrown throwable is the same
+ // as the original throwable. Otherwise, we will pretend that we didnt get the original tracker
+ // and will automatically enter the path below to set the corruption severity based upon the
+ // rethrown throwable.
+
+ // _ASSERTE(pOrigEHTracker->GetThrowable() == oThrowable);
+ if (pOrigEHTracker->GetThrowable() != oThrowable)
+ {
+ pOrigEHTracker = NULL;
+ LOG((LF_EH, LL_INFO100, "CEHelper::SetupCorruptionSeverityForActiveException - Rethrown throwable does not match the original throwable. Corruption severity will be set based upon rethrown throwable.\n"));
+ }
+ }
+ else
+ {
+ // Get the corruption severity from the ThreadExceptionState (TES) for the last active exception
+ CorruptionSeverity severityTES = NotSet;
+
+ if (pCurTES->ShouldLastActiveExceptionCorruptionSeverityBeReused())
+ {
+ severityTES = pCurTES->GetLastActiveExceptionCorruptionSeverity();
+
+ // Incase of scenarios like AD transition or Reflection invocation,
+ // TES would hold corruption severity of the last active exception. To propagate it
+ // to the current exception, we will apply it to current tracker and only if the applied
+ // severity is "NotSet", will we proceed to check the current exception for corruption
+ // severity.
+ pEHTracker->SetCorruptionSeverity(severityTES);
+ }
+
+ // Reset TES Corruption Severity
+ pCurTES->SetLastActiveExceptionCorruptionSeverity(NotSet);
+
+ // If the last exception didnt have any corruption severity, proceed to look for it.
+ if (severityTES == NotSet)
+ {
+ // This is a nested exception - check if it has an inner exception(s). If it does,
+ // find the EH tracker corresponding to the innermost exception and we will copy the
+ // corruption severity from the original tracker to the current one.
+ OBJECTREF oInnermostThrowable = ((EXCEPTIONREF)oThrowable)->GetBaseException();
+ if (oInnermostThrowable != NULL)
+ {
+ // Find the tracker corresponding to the inner most exception, starting from
+ // the tracker previous to the current one. An EH tracker may not be found if
+ // the code did the following inside a catch clause:
+ //
+ // Exception ex = new Exception("inner exception");
+ // throw new Exception("message", ex);
+ //
+ // Or, an exception like AV happened in the catch clause.
+ pOrigEHTracker = GetEHTrackerForException(oInnermostThrowable, pEHTracker->GetPreviousExceptionTracker());
+ }
+ }
+ else
+ {
+ // We have the corruption severity from the TES. Set the flag indicating so.
+ fDoWeHaveCorruptionSeverity = TRUE;
+ LOG((LF_EH, LL_INFO100, "CEHelper::SetupCorruptionSeverityForActiveException - Copied the corruption severity to tracker from ThreadExceptionState for nested exception.\n"));
+ }
+ }
+
+ if (!fDoWeHaveCorruptionSeverity)
+ {
+ if (pOrigEHTracker != NULL)
+ {
+ // Copy the severity from the original EH tracker to the current one
+ CorruptionSeverity origCorruptionSeverity = pOrigEHTracker->GetCorruptionSeverity();
+ _ASSERTE(origCorruptionSeverity != NotSet);
+ pEHTracker->SetCorruptionSeverity(origCorruptionSeverity);
+
+ LOG((LF_EH, LL_INFO100, "CEHelper::SetupCorruptionSeverityForActiveException - Copied the corruption severity (%d) from the original EH tracker for rethrown exception.\n", origCorruptionSeverity));
+ }
+ else
+ {
+ if (CEHelper::IsProcessCorruptedStateException(dwActiveExceptionCode) ||
+ CEHelper::IsProcessCorruptedStateException(oThrowable))
+ {
+ pEHTracker->SetCorruptionSeverity(ProcessCorrupting);
+ LOG((LF_EH, LL_INFO100, "CEHelper::SetupCorruptionSeverityForActiveException - Marked nested exception as ProcessCorrupting.\n"));
+ }
+ else
+ {
+ pEHTracker->SetCorruptionSeverity(NotCorrupting);
+ LOG((LF_EH, LL_INFO100, "CEHelper::SetupCorruptionSeverityForActiveException - Marked nested exception as NotCorrupting.\n"));
+ }
+ }
+ }
+ }
+
+done:
+ // Save the current exception's corruption severity in the ThreadExceptionState (TES)
+ // for cases when we catch the managed exception in the runtime using EX_CATCH.
+ // At such a time, all exception trackers get released (due to unwind triggered
+ // by EX_END_CATCH) and yet we need the corruption severity information for
+ // scenarios like AD Transition, Reflection invocation, etc.
+ CorruptionSeverity currentSeverity = pEHTracker->GetCorruptionSeverity();
+
+ // We should be having a valid corruption severity at this point
+ _ASSERTE(currentSeverity != NotSet);
+
+ // Save it in the TES
+ pCurTES->SetLastActiveExceptionCorruptionSeverity(currentSeverity);
+ LOG((LF_EH, LL_INFO100, "CEHelper::SetupCorruptionSeverityForActiveException - Copied the corruption severity (%d) to ThreadExceptionState.\n", currentSeverity));
+
+#endif // !DACCESS_COMPILE
+}
+
+// CE can be caught in the VM and later reraised again. Examples of such scenarios
+// include AD transition, COM interop, Reflection invocation, to name a few.
+// In such cases, we want to mark the corruption severity for reuse upon reraise,
+// implying that when the VM does a reraise of such a exception, we should use
+// the original corruption severity for the new raised exception, instead of creating
+// a new one for it.
+/* static */
+void CEHelper::MarkLastActiveExceptionCorruptionSeverityForReraiseReuse()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(GetThread() != NULL);
+ }
+ CONTRACTL_END;
+
+ // If the last active exception's corruption severity is anything but
+ // "NotSet", mark it for ReraiseReuse
+ ThreadExceptionState *pCurTES = GetThread()->GetExceptionState();
+ _ASSERTE(pCurTES != NULL);
+
+ CorruptionSeverity severityTES = pCurTES->GetLastActiveExceptionCorruptionSeverity();
+ if (severityTES != NotSet)
+ {
+ pCurTES->SetLastActiveExceptionCorruptionSeverity((CorruptionSeverity)(severityTES | ReuseForReraise));
+ }
+}
+
+// This method will return a BOOL to indicate if the current exception is to be treated as
+// non-corrupting. Currently, this returns true for NullReferenceException only.
+/* static */
+BOOL CEHelper::ShouldTreatActiveExceptionAsNonCorrupting()
+{
+ BOOL fShouldTreatAsNonCorrupting = FALSE;
+
+#ifndef DACCESS_COMPILE
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(GetThread() != NULL);
+ }
+ CONTRACTL_END;
+
+ if (g_pConfig->LegacyCorruptedStateExceptionsPolicy())
+ {
+ return TRUE;
+ }
+
+ DWORD dwActiveExceptionCode = GetThread()->GetExceptionState()->GetExceptionRecord()->ExceptionCode;
+ if (dwActiveExceptionCode == STATUS_ACCESS_VIOLATION)
+ {
+ // NullReference has the same exception code as AV
+ OBJECTREF oThrowable = NULL;
+ GCPROTECT_BEGIN(oThrowable);
+
+ // Get the throwable and check if it represents null reference exception
+ oThrowable = GetThread()->GetThrowable();
+ _ASSERTE(oThrowable != NULL);
+ if (MscorlibBinder::GetException(kNullReferenceException) == oThrowable->GetMethodTable())
+ {
+ fShouldTreatAsNonCorrupting = TRUE;
+ }
+ GCPROTECT_END();
+ }
+#endif // !DACCESS_COMPILE
+
+ return fShouldTreatAsNonCorrupting;
+}
+
+// If we were working in a nested exception scenario, reset the corruption severity to the last
+// exception we were processing, based upon its EH tracker.
+//
+// If none was present, reset it to NotSet.
+//
+// Note: This method must be called once the exception trackers have been adjusted post catch-block execution.
+/* static */
+void CEHelper::ResetLastActiveCorruptionSeverityPostCatchHandler()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(GetThread() != NULL);
+ }
+ CONTRACTL_END;
+
+ ThreadExceptionState *pCurTES = GetThread()->GetExceptionState();
+
+ // By this time, we would have set the correct exception tracker for the active exception domain,
+ // if applicable. An example is throwing and catching an exception within a catch block. We will update
+ // the LastActiveCorruptionSeverity based upon the active exception domain. If we are not in one, we will
+ // set it to "NotSet".
+#ifdef WIN64EXCEPTIONS
+ PTR_ExceptionTracker pEHTracker = pCurTES->GetCurrentExceptionTracker();
+#elif _TARGET_X86_
+ PTR_ExInfo pEHTracker = pCurTES->GetCurrentExceptionTracker();
+#else
+#error Unsupported platform
+#endif
+
+ if (pEHTracker)
+ {
+ pCurTES->SetLastActiveExceptionCorruptionSeverity(pEHTracker->GetCorruptionSeverity());
+ }
+ else
+ {
+ pCurTES->SetLastActiveExceptionCorruptionSeverity(NotSet);
+ }
+
+ LOG((LF_EH, LL_INFO100, "CEHelper::ResetLastActiveCorruptionSeverityPostCatchHandler - Reset LastActiveException corruption severity to %d.\n",
+ pCurTES->GetLastActiveExceptionCorruptionSeverity()));
+}
+
+// This method will return a BOOL indicating if the target of IDispatch can handle the specified exception or not.
+/* static */
+BOOL CEHelper::CanIDispatchTargetHandleException()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(GetThread() != NULL);
+ }
+ CONTRACTL_END;
+
+ // By default, assume that the target of IDispatch cannot handle the exception.
+ BOOL fCanMethodHandleException = FALSE;
+
+ if (g_pConfig->LegacyCorruptedStateExceptionsPolicy())
+ {
+ return TRUE;
+ }
+
+ // IDispatch implementation in COM interop works by invoking the actual target via reflection.
+ // Thus, a COM client could use the V4 runtime to invoke a V2 method. In such a case, a CSE
+ // could come unhandled at the actual target invoked via reflection.
+ //
+ // Reflection invocation would have set a flag for us, indicating if the actual target was
+ // enabled to handle the CE or not. If it is, then we should allow the COM client to get the
+ // hresult from the call and not let the exception continue up the stack.
+ ThreadExceptionState *pCurTES = GetThread()->GetExceptionState();
+ fCanMethodHandleException = pCurTES->CanReflectionTargetHandleException();
+
+ // Reset the flag so that subsequent invocations work as expected.
+ pCurTES->SetCanReflectionTargetHandleException(FALSE);
+
+ return fCanMethodHandleException;
+}
+
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+#ifndef DACCESS_COMPILE
+// When a managed thread starts in non-default domain, its callstack looks like below:
+//
+// <ManagedThreadBase_DispatchOuter>
+// <ManagedThreadBase_DispatchMiddle>
+// <ManagedThreadBase_DispatchInner>
+//
+// -- AD transition is here -- ==> Pushes ContextTransitionFrame and has EX_CATCH
+//
+// <ManagedThreadBase_DispatchOuter>
+// <ManagedThreadBase_DispatchMiddle>
+// <ManagedThreadBase_DispatchInner>
+//
+// In CoreCLR, all managed threads spawned will have a stack like this since they all
+// run in non-DefaultDomain. The upper three frames are in default domain and the lower
+// three are in the non-default domain in which the thread was created. Any exception
+// that is unhandled in non-default domain will be caught at AD transition boundary.
+// The transition boundary does the following tasks:
+//
+// 1) Catch any incoming unhandled exception from the non-default domain using EX_CATCH.
+// 2) Marshal the exception object to the return context (i.e. DefaultDomain)
+// 3) Return to the context of DefaultDomain and throw the marshalled exception object there.
+//
+// All this depends upon the EX_CATCH (which is based upon C++ exception handling) being
+// able to catch the exception.
+//
+// However, if a breakpoint exception ia raised and a debugger is not available to handle it,
+// C++'s catch(...) will not be able to catch it, even when compiled with /EHa. For the curious,
+// refer to "FindHandlerForForeignException" function's implementation in the CRT. One of the first
+// things it does is check for breakpoint exception and if it is, it will simply bail out of the
+// process of finding a handler. Thus, EX_CATCH will not be able to catch this exception and we
+// will not be able to transition to the previous AD context.
+//
+// Imagine a thread in non-default domain suffers breakpoint exception. Assuming it will go unhandled,
+// it will reach the OS, which will trigger an unwind. The execution of termination handlers in lower
+// three frames (above) is fine since they are in the same AD as the thread. But when termination
+// handlers in the upper three frames execute, its a case of bad mixup since the thread is in a different
+// AD context than what the frames are expected to be in.
+//
+// Hence, we need a mechanism to transition to the expected AppDomain in case of breakpoint exception.
+// This function supports this mechanism in a generic fashion, i.e., one can use it to transition to
+// any AppDomain, though only up the stack.
+
+BOOL ReturnToPreviousAppDomain()
+{
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ Thread *pCurThread = GetThread();
+ _ASSERTE(pCurThread != NULL);
+
+ BOOL fTransitioned = FALSE;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(pCurThread, return FALSE);
+
+ // Get the thread's current domain
+ AppDomain *pCurDomain = pCurThread->GetDomain();
+ _ASSERTE(pCurDomain != NULL);
+
+ // Lookup the ContextTransitionFrame for the transition into the current AppDomain.
+ Frame *pCtxTransitionFrame = pCurThread->GetFirstTransitionInto(pCurDomain, NULL);
+ if (pCtxTransitionFrame == NULL)
+ {
+ // Since we couldnt find the context transition frame, check if its the default domain.
+ // If so, we will set fTransitioned to TRUE since there is no context transition frame
+ // setup for the initial entry into the default domain. For all other transitions to it
+ // from non-default domains, we will have a context transition frame. We will do a
+ // debug-only check to assert this invariant.
+ BOOL fIsDefDomain = pCurDomain->IsDefaultDomain();
+#ifdef _DEBUG
+ if (fIsDefDomain)
+ {
+ // Start with the topmost frame and look for a CTX frame until we reach the top of the frame chain.
+ // We better not find one since we couldnt find a transition frame to the DefaultDomain.
+ Frame *pStartFrame = pCurThread->GetFrame();
+ BOOL fFoundCTXFrame = FALSE;
+ while ((pStartFrame != NULL) && (pStartFrame != (Frame *)FRAME_TOP))
+ {
+ if (pStartFrame->GetVTablePtr() == ContextTransitionFrame::GetMethodFrameVPtr())
+ {
+ fFoundCTXFrame = TRUE;
+ break;
+ }
+
+ // Get the next frame in the chain
+ pStartFrame = pStartFrame->PtrNextFrame();
+ }
+
+ _ASSERTE_MSG(!fFoundCTXFrame, "How come we didnt find the transition frame to DefDomain but found another CTX frame on the frame chain?");
+ }
+#endif // _DEBUG
+ fTransitioned = fIsDefDomain;
+ LOG((LF_EH, LL_INFO100, "ReturnToPreviousAppDomain: Unable to find the transition into the current domain (IsDefaultDomain: %d).\n", fIsDefDomain));
+
+ goto done;
+ }
+
+ // Confirm its the correct type of frame
+ _ASSERTE_MSG(pCtxTransitionFrame->GetVTablePtr() == ContextTransitionFrame::GetMethodFrameVPtr(),
+ "How come we didn't find context transition frame for this AD transition?");
+
+ // Get the topmost Frame
+ Frame *pCurFrame;
+ pCurFrame = pCurThread->GetFrame();
+
+ // <ASSUMPTION>
+ //
+ // The loop below assumes we are called during an exception unwind since it
+ // unwinds the Frames and pops them off the thread.
+ //
+ // </ASSUMPTION>
+ //
+ // Clear all the frames until we are at the frame of our interest. If there was a
+ // CTX frame between the topmost frame and the AD transition, then we should be able to
+ // catch it here as well.
+ while((pCurFrame != NULL) && (pCurFrame < pCtxTransitionFrame) &&
+ (pCurFrame->GetVTablePtr() != ContextTransitionFrame::GetMethodFrameVPtr()))
+ {
+ // Invoke exception unwind and pop the frame off
+ pCurFrame->ExceptionUnwind();
+ pCurFrame->Pop();
+ pCurFrame = pCurThread->GetFrame();
+ }
+
+ // Confirm that we are at the expected Frame.
+ _ASSERTE_MSG(((pCurFrame != NULL) &&
+ (pCurFrame->GetVTablePtr() == ContextTransitionFrame::GetMethodFrameVPtr()) &&
+ (pCurFrame == pCtxTransitionFrame)),
+ "How come we are not at the exact context transition frame?");
+
+ // Log our context return
+ LOG((LF_EH, LL_INFO100, "ReturnToPreviousAppDomain: Returning from AD %d to AD %d\n",
+ GetAppDomain()->GetId().m_dwId, pCtxTransitionFrame->GetReturnDomain()->GetId().m_dwId));
+
+ // Return to the previous AD context
+ pCurThread->ReturnToContext((ContextTransitionFrame *)pCtxTransitionFrame);
+
+#ifdef _DEBUG
+ // At this point, the context transition frame would have been popped off by
+ // ReturnToContext above.
+ pCurFrame = pCurThread->GetFrame();
+ _ASSERTE_MSG(pCurFrame != pCtxTransitionFrame, "How come the CTX frame of AD transition is still on the frame chain?");
+#endif // _DEBUG
+
+ // Set the flag that we transitioned correctly.
+ fTransitioned = TRUE;
+
+done:;
+ END_SO_INTOLERANT_CODE;
+
+ return fTransitioned;
+}
+
+// This class defines a holder that can be used to return to previous AppDomain incase an exception
+// goes across an AD transition boundary without reverting the active context.
+//
+// Use this holder *after* you have transitioned to the target AD.
+void ReturnToPreviousAppDomainHolder::Init()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ m_fShouldReturnToPreviousAppDomain = TRUE;
+ m_pThread = GetThread();
+ _ASSERTE(m_pThread != NULL);
+
+#ifdef _DEBUG
+ m_pTransitionedToAD = m_pThread->GetDomain();
+#endif // _DEBUG
+}
+
+ReturnToPreviousAppDomainHolder::ReturnToPreviousAppDomainHolder()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ Init();
+}
+
+void ReturnToPreviousAppDomainHolder::ReturnToPreviousAppDomain()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ // Test your sanity - we should still be in the transitioned-to AD.
+ PRECONDITION(m_pThread->GetDomain() == m_pTransitionedToAD);
+ }
+ CONTRACTL_END;
+
+ {
+ GCX_COOP();
+ ::ReturnToPreviousAppDomain();
+ }
+
+ // Set the LastThrownObject as NULL since we have returned to a different
+ // AD. Maintaining the reference to an object in the "returned-from" AD
+ // will prevent the AD from getting unloaded.
+ //
+ // Setting to NULL does not require us to be in COOP mode.
+ m_pThread->SafeSetLastThrownObject(NULL);
+}
+
+ReturnToPreviousAppDomainHolder::~ReturnToPreviousAppDomainHolder()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (m_fShouldReturnToPreviousAppDomain)
+ {
+ ReturnToPreviousAppDomain();
+ }
+}
+
+// Reset the flag to indicate that reverting to previous AD is not required anymore.
+// This should be invoked when the call has successfully returned from the target execution context.
+//
+// By default, this flag is TRUE (see the contructor above) to enable automatic context
+// revert incase an exception goes past the transition.
+//
+// END_DOMAIN_TRANSITION_NO_EH_AT_TRANSITION macro uses it. See its implementation in threads.h
+// for usage.
+void ReturnToPreviousAppDomainHolder::SuppressRelease()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_fShouldReturnToPreviousAppDomain = FALSE;
+}
+
+#endif // !DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+// This method will deliver the actual exception notification. Its assumed that the caller has done the necessary checks, including
+// checking whether the delegate can be invoked for the exception's corruption severity.
+//
+// This has been factored out of the #IFDEF FEATURE_EXCEPTION_NOTIFICATIONS so that existing ADUEN mechanism can be integrated with
+// the enhanced exception notifications.
+void ExceptionNotifications::DeliverExceptionNotification(ExceptionNotificationHandlerType notificationType, OBJECTREF *pDelegate,
+ OBJECTREF *pAppDomain, OBJECTREF *pEventArgs)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(pDelegate != NULL && IsProtectedByGCFrame(pDelegate) && (*pDelegate != NULL));
+ PRECONDITION(pEventArgs != NULL && IsProtectedByGCFrame(pEventArgs));
+ PRECONDITION(pAppDomain != NULL && IsProtectedByGCFrame(pAppDomain));
+ }
+ CONTRACTL_END;
+
+ PREPARE_NONVIRTUAL_CALLSITE_USING_CODE(DELEGATEREF(*pDelegate)->GetMethodPtr());
+
+ DECLARE_ARGHOLDER_ARRAY(args, 3);
+
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(DELEGATEREF(*pDelegate)->GetTarget());
+ args[ARGNUM_1] = OBJECTREF_TO_ARGHOLDER(*pAppDomain);
+ args[ARGNUM_2] = OBJECTREF_TO_ARGHOLDER(*pEventArgs);
+
+ CALL_MANAGED_METHOD_NORET(args);
+}
+
+#ifdef FEATURE_EXCEPTION_NOTIFICATIONS
+// To include definition of COMDelegate::GetMethodDesc
+#include "comdelegate.h"
+
+// This method constructs the arguments to be passed to the exception notification event callback
+void ExceptionNotifications::GetEventArgsForNotification(ExceptionNotificationHandlerType notificationType,
+ OBJECTREF *pOutEventArgs, OBJECTREF *pThrowable)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(notificationType != UnhandledExceptionHandler);
+ PRECONDITION((pOutEventArgs != NULL) && IsProtectedByGCFrame(pOutEventArgs));
+ PRECONDITION(*pOutEventArgs == NULL);
+ PRECONDITION((pThrowable != NULL) && (*pThrowable != NULL) && IsProtectedByGCFrame(pThrowable));
+ PRECONDITION(IsException((*pThrowable)->GetMethodTable())); // We expect a valid exception object
+ }
+ CONTRACTL_END;
+
+ MethodTable *pMTEventArgs = NULL;
+ BinderMethodID idEventArgsCtor = METHOD__FIRSTCHANCE_EVENTARGS__CTOR;
+
+ EX_TRY
+ {
+ switch(notificationType)
+ {
+ case FirstChanceExceptionHandler:
+ pMTEventArgs = MscorlibBinder::GetClass(CLASS__FIRSTCHANCE_EVENTARGS);
+ idEventArgsCtor = METHOD__FIRSTCHANCE_EVENTARGS__CTOR;
+ break;
+ default:
+ _ASSERTE(!"Invalid Exception Notification Handler!");
+ break;
+ }
+
+ // Allocate the instance of the eventargs corresponding to the notification
+ *pOutEventArgs = AllocateObject(pMTEventArgs);
+
+ // Prepare to invoke the .ctor
+ MethodDescCallSite ctor(idEventArgsCtor, pOutEventArgs);
+
+ // Setup the arguments to be passed to the notification specific EventArgs .ctor
+ if (notificationType == FirstChanceExceptionHandler)
+ {
+ // FirstChance notification takes only a single argument: the exception object.
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(*pOutEventArgs),
+ ObjToArgSlot(*pThrowable),
+ };
+
+ ctor.Call(args);
+ }
+ else
+ {
+ // Since we have already asserted above, just set the args to NULL.
+ *pOutEventArgs = NULL;
+ }
+ }
+ EX_CATCH
+ {
+ // Set event args to be NULL incase of any error (e.g. OOM)
+ *pOutEventArgs = NULL;
+ LOG((LF_EH, LL_INFO100, "ExceptionNotifications::GetEventArgsForNotification: Setting event args to NULL due to an exception.\n"));
+ }
+ EX_END_CATCH(RethrowCorruptingExceptions); // Dont swallow any CSE that may come in from the .ctor.
+}
+
+// This SEH filter will be invoked when an exception escapes out of the exception notification
+// callback and enters the runtime. In such a case, we ill simply failfast.
+static LONG ExceptionNotificationFilter(PEXCEPTION_POINTERS pExceptionInfo, LPVOID pParam)
+{
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
+}
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+// This method will return a BOOL indicating if the delegate should be invoked for the exception
+// of the specified corruption severity.
+BOOL ExceptionNotifications::CanDelegateBeInvokedForException(OBJECTREF *pDelegate, CorruptionSeverity severity)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(pDelegate != NULL && IsProtectedByGCFrame(pDelegate) && (*pDelegate != NULL));
+ PRECONDITION(severity > NotSet);
+ }
+ CONTRACTL_END;
+
+ // Notifications for CSE are only delivered if the delegate target follows CSE rules.
+ BOOL fCanMethodHandleException = g_pConfig->LegacyCorruptedStateExceptionsPolicy() ? TRUE:(severity == NotCorrupting);
+ if (!fCanMethodHandleException)
+ {
+ EX_TRY
+ {
+ // Get the MethodDesc of the delegate to be invoked
+ MethodDesc *pMDDelegate = COMDelegate::GetMethodDesc(*pDelegate);
+ _ASSERTE(pMDDelegate != NULL);
+
+ // Check the callback target and see if it is following CSE rules or not.
+ fCanMethodHandleException = CEHelper::CanMethodHandleException(severity, pMDDelegate);
+ }
+ EX_CATCH
+ {
+ // Incase of any exceptions, pretend we cannot handle the exception
+ fCanMethodHandleException = FALSE;
+ LOG((LF_EH, LL_INFO100, "ExceptionNotifications::CanDelegateBeInvokedForException: Exception while trying to determine if exception notification can be invoked or not.\n"));
+ }
+ EX_END_CATCH(RethrowCorruptingExceptions); // Dont swallow any CSEs.
+ }
+
+ return fCanMethodHandleException;
+}
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+// This method will make the actual delegate invocation for the exception notification to be delivered. If an
+// exception escapes out of the notification, our filter in ExceptionNotifications::DeliverNotification will
+// address it.
+void ExceptionNotifications::InvokeNotificationDelegate(ExceptionNotificationHandlerType notificationType, OBJECTREF *pDelegate, OBJECTREF *pEventArgs,
+ OBJECTREF *pAppDomain
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , CorruptionSeverity severity
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(pDelegate != NULL && IsProtectedByGCFrame(pDelegate) && (*pDelegate != NULL));
+ PRECONDITION(pEventArgs != NULL && IsProtectedByGCFrame(pEventArgs));
+ PRECONDITION(pAppDomain != NULL && IsProtectedByGCFrame(pAppDomain));
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ PRECONDITION(severity > NotSet);
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ // Unhandled Exception Notification is delivered via Unhandled Exception Processing
+ // mechanism.
+ PRECONDITION(notificationType != UnhandledExceptionHandler);
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // Notifications are delivered based upon corruption severity of the exception
+ if (!ExceptionNotifications::CanDelegateBeInvokedForException(pDelegate, severity))
+ {
+ LOG((LF_EH, LL_INFO100, "ExceptionNotifications::InvokeNotificationDelegate: Delegate cannot be invoked for corruption severity %d\n",
+ severity));
+ return;
+ }
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ // We've already exercised the prestub on this delegate's COMDelegate::GetMethodDesc,
+ // as part of wiring up a reliable event sink in the BCL. Deliver the notification.
+ ExceptionNotifications::DeliverExceptionNotification(notificationType, pDelegate, pAppDomain, pEventArgs);
+}
+
+// This method returns a BOOL to indicate if the AppDomain is ready to receive exception notifications or not.
+BOOL ExceptionNotifications::CanDeliverNotificationToCurrentAppDomain(ExceptionNotificationHandlerType notificationType)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ PRECONDITION(GetThread() != NULL);
+ PRECONDITION(notificationType != UnhandledExceptionHandler);
+ }
+ CONTRACTL_END;
+
+ Thread *pCurThread = GetThread();
+
+ // Get the current AppDomain
+ OBJECTREF oCurAppDomain = pCurThread->GetDomain()->GetRawExposedObject();
+ if (oCurAppDomain == NULL)
+ {
+ // Managed object for the current domain does not exist. Hence, no one
+ // can wireup to exception notifications, let alone receive them.
+ return FALSE;
+ }
+
+ // Do we have handler(s) of the specific type wired up?
+ if (notificationType == FirstChanceExceptionHandler)
+ {
+ return (((APPDOMAINREF)oCurAppDomain)->GetFirstChanceExceptionNotificationHandler() != NULL);
+ }
+ else
+ {
+ _ASSERTE(!"Invalid exception notification handler specified!");
+ return FALSE;
+ }
+}
+
+// This method wraps the call to the actual 'DeliverNotificationInternal' method in an SEH filter
+// so that if an exception escapes out of the notification callback, we will trigger failfast from
+// our filter.
+void ExceptionNotifications::DeliverNotification(ExceptionNotificationHandlerType notificationType,
+ OBJECTREF *pThrowable
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , CorruptionSeverity severity
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ )
+{
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_NOTHROW; // NOTHROW because incase of an exception, we will FailFast.
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ struct TryArgs
+ {
+ ExceptionNotificationHandlerType notificationType;
+ OBJECTREF *pThrowable;
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ CorruptionSeverity severity;
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ } args;
+
+ args.notificationType = notificationType;
+ args.pThrowable = pThrowable;
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ args.severity = severity;
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ PAL_TRY(TryArgs *, pArgs, &args)
+ {
+ // Make the call to the actual method that will invoke the callbacks
+ ExceptionNotifications::DeliverNotificationInternal(pArgs->notificationType,
+ pArgs->pThrowable
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , pArgs->severity
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ );
+ }
+ PAL_EXCEPT_FILTER(ExceptionNotificationFilter)
+ {
+ // We should never be entering this handler since there should be
+ // no exception escaping out of a callback. If we are here,
+ // failfast.
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
+ }
+ PAL_ENDTRY;
+}
+
+// This method will deliver the exception notification to the current AppDomain.
+void ExceptionNotifications::DeliverNotificationInternal(ExceptionNotificationHandlerType notificationType,
+ OBJECTREF *pThrowable
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , CorruptionSeverity severity
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+
+ // Unhandled Exception Notification is delivered via Unhandled Exception Processing
+ // mechanism.
+ PRECONDITION(notificationType != UnhandledExceptionHandler);
+ PRECONDITION((pThrowable != NULL) && (*pThrowable != NULL));
+ PRECONDITION(ExceptionNotifications::CanDeliverNotificationToCurrentAppDomain(notificationType));
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ PRECONDITION(severity > NotSet); // Exception corruption severity must be valid at this point.
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ }
+ CONTRACTL_END;
+
+ Thread *pCurThread = GetThread();
+ _ASSERTE(pCurThread != NULL);
+
+ // Get the current AppDomain
+ AppDomain *pCurDomain = GetAppDomain();
+ _ASSERTE(pCurDomain != NULL);
+
+#ifdef FEATURE_CORECLR
+ if (true)
+ {
+ // On CoreCLR, we dont support enhanced exception notifications
+ _ASSERTE(!"CoreCLR does not support enhanced exception notifications!");
+ return;
+ }
+#endif // FEATURE_CORECLR
+
+ struct
+ {
+ OBJECTREF oNotificationDelegate;
+ PTRARRAYREF arrDelegates;
+ OBJECTREF oInnerDelegate;
+ OBJECTREF oEventArgs;
+ OBJECTREF oCurrentThrowable;
+ OBJECTREF oCurAppDomain;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ // This will hold the MethodDesc of the callback that will be invoked.
+ MethodDesc *pMDDelegate = NULL;
+
+ GCPROTECT_BEGIN(gc);
+
+ // Protect the throwable to be passed to the delegate callback
+ gc.oCurrentThrowable = *pThrowable;
+
+ // We expect a valid exception object
+ _ASSERTE(IsException(gc.oCurrentThrowable->GetMethodTable()));
+
+ // Save the reference to the current AppDomain. If the user code has
+ // wired upto this event, then the managed AppDomain object will exist.
+ gc.oCurAppDomain = pCurDomain->GetRawExposedObject();
+ _ASSERTE(gc.oCurAppDomain);
+
+ // Get the reference to the delegate based upon the type of notification
+ if (notificationType == FirstChanceExceptionHandler)
+ {
+ gc.oNotificationDelegate = ((APPDOMAINREF)gc.oCurAppDomain)->GetFirstChanceExceptionNotificationHandler();
+ }
+ else
+ {
+ gc.oNotificationDelegate = NULL;
+ _ASSERTE(!"Invalid Exception Notification Handler specified!");
+ }
+
+ if (gc.oNotificationDelegate != NULL)
+ {
+ // Prevent any async exceptions from this moment on this thread
+ ThreadPreventAsyncHolder prevAsync;
+
+ gc.oEventArgs = NULL;
+
+ // Get the arguments to be passed to the delegate callback. Incase of any
+ // problem while allocating the event args, we will return a NULL.
+ ExceptionNotifications::GetEventArgsForNotification(notificationType, &gc.oEventArgs,
+ &gc.oCurrentThrowable);
+
+ // Check if there are multiple callbacks registered? If there are, we will
+ // loop through them, invoking each one at a time. Before invoking the target,
+ // we will check if the target can be invoked based upon the corruption severity
+ // for the active exception that was passed to us.
+ gc.arrDelegates = (PTRARRAYREF) ((DELEGATEREF)(gc.oNotificationDelegate))->GetInvocationList();
+ if (gc.arrDelegates == NULL || !gc.arrDelegates->GetMethodTable()->IsArray())
+ {
+ ExceptionNotifications::InvokeNotificationDelegate(notificationType, &gc.oNotificationDelegate, &gc.oEventArgs,
+ &gc.oCurAppDomain
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , severity
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ );
+ }
+ else
+ {
+ // The _invocationCount could be less than the array size, if we are sharing
+ // immutable arrays cleverly.
+ UINT_PTR cnt = ((DELEGATEREF)(gc.oNotificationDelegate))->GetInvocationCount();
+ _ASSERTE(cnt <= gc.arrDelegates->GetNumComponents());
+
+ for (UINT_PTR i=0; i<cnt; i++)
+ {
+ gc.oInnerDelegate = gc.arrDelegates->m_Array[i];
+ ExceptionNotifications::InvokeNotificationDelegate(notificationType, &gc.oInnerDelegate, &gc.oEventArgs,
+ &gc.oCurAppDomain
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , severity
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ );
+ }
+ }
+ }
+
+ GCPROTECT_END();
+}
+
+void ExceptionNotifications::DeliverFirstChanceNotification()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // We check for FirstChance notification delivery after setting up the corruption severity
+ // so that we can determine if the callback delegate can handle CSE (or not).
+ //
+ // Deliver it only if not already done and someone has wiredup to receive it.
+ //
+ // We do this provided this is the first frame of a new exception
+ // that was thrown or a rethrown exception. We dont want to do this
+ // processing for subsequent frames on the stack since FirstChance notification
+ // will be delivered only when the exception is first thrown/rethrown.
+ ThreadExceptionState *pCurTES = GetThread()->GetExceptionState();
+ _ASSERTE(pCurTES->GetCurrentExceptionTracker());
+ _ASSERTE(!(pCurTES->GetCurrentExceptionTracker()->DeliveredFirstChanceNotification()));
+ {
+ GCX_COOP();
+ if (ExceptionNotifications::CanDeliverNotificationToCurrentAppDomain(FirstChanceExceptionHandler))
+ {
+ OBJECTREF oThrowable = NULL;
+ GCPROTECT_BEGIN(oThrowable);
+
+ oThrowable = pCurTES->GetThrowable();
+ _ASSERTE(oThrowable != NULL);
+
+ ExceptionNotifications::DeliverNotification(FirstChanceExceptionHandler, &oThrowable
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , pCurTES->GetCurrentExceptionTracker()->GetCorruptionSeverity()
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ );
+ GCPROTECT_END();
+
+ }
+
+ // Mark the exception tracker as having delivered the first chance notification
+ pCurTES->GetCurrentExceptionTracker()->SetFirstChanceNotificationStatus(TRUE);
+ }
+}
+
+#endif // FEATURE_EXCEPTION_NOTIFICATIONS
+
+#ifdef WIN64EXCEPTIONS
+struct TAResetStateCallbackData
+{
+ // Do we have more managed code up the stack?
+ BOOL fDoWeHaveMoreManagedCodeOnStack;
+
+ // StackFrame representing the crawlFrame above which
+ // we are searching for presence of managed code.
+ StackFrame sfSeedCrawlFrame;
+};
+
+// This callback helps the 64bit EH attempt to determine if there is more managed code
+// up the stack (or not). Currently, it is used to conditionally reset the thread abort state
+// as the unwind passes by.
+StackWalkAction TAResetStateCallback(CrawlFrame* pCf, void* data)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ TAResetStateCallbackData *pTAResetStateCallbackData = static_cast<TAResetStateCallbackData *>(data);
+ StackWalkAction retStatus = SWA_CONTINUE;
+
+ if(pCf->IsFrameless())
+ {
+ IJitManager* pJitManager = pCf->GetJitManager();
+ _ASSERTE(pJitManager);
+ if (pJitManager && (!pTAResetStateCallbackData->fDoWeHaveMoreManagedCodeOnStack))
+ {
+ // The stackwalker can give us a callback for the seeding CrawlFrame (or other crawlframes)
+ // depending upon which is closer to the leaf: the seeding crawlframe or the explicit frame
+ // specified when starting the stackwalk.
+ //
+ // Since we are interested in checking if there is more managed code up the stack from
+ // the seeding crawlframe, we check if the current crawlframe is above it or not. If it is,
+ // then we have found managed code up the stack and should stop the stack walk. Otherwise,
+ // continue searching.
+ StackFrame sfCurrentFrame = StackFrame::FromRegDisplay(pCf->GetRegisterSet());
+ if (pTAResetStateCallbackData->sfSeedCrawlFrame < sfCurrentFrame)
+ {
+ // We have found managed code on the stack. Flag it and stop the stackwalk.
+ pTAResetStateCallbackData->fDoWeHaveMoreManagedCodeOnStack = TRUE;
+ retStatus = SWA_ABORT;
+ }
+ }
+ }
+
+ return retStatus;
+}
+#endif // WIN64EXCEPTIONS
+
+// This function will reset the thread abort state agains the specified thread if it is determined that
+// there is no more managed code on the stack.
+//
+// Note: This function should be invoked ONLY during unwind.
+#if defined(_TARGET_X86_)
+void ResetThreadAbortState(PTR_Thread pThread, void *pEstablisherFrame)
+#elif defined(WIN64EXCEPTIONS)
+void ResetThreadAbortState(PTR_Thread pThread, CrawlFrame *pCf, StackFrame sfCurrentStackFrame)
+#endif
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(pThread != NULL);
+#if defined(_TARGET_X86_)
+ PRECONDITION(pEstablisherFrame != NULL);
+#elif defined(WIN64EXCEPTIONS)
+ PRECONDITION(pCf != NULL);
+ PRECONDITION(!sfCurrentStackFrame.IsNull());
+#endif
+ }
+ CONTRACTL_END;
+
+ BOOL fResetThreadAbortState = FALSE;
+
+ if (pThread->IsAbortRequested())
+ {
+#if defined(_TARGET_X86_)
+ if (GetNextCOMPlusSEHRecord(static_cast<EXCEPTION_REGISTRATION_RECORD *>(pEstablisherFrame)) == EXCEPTION_CHAIN_END)
+ {
+ // Topmost handler and abort requested.
+ fResetThreadAbortState = TRUE;
+ LOG((LF_EH, LL_INFO100, "ResetThreadAbortState: Topmost handler resets abort as no more managed code beyond %p.\n", pEstablisherFrame));
+ }
+#elif defined(WIN64EXCEPTIONS)
+ // Get the active exception tracker
+ PTR_ExceptionTracker pCurEHTracker = pThread->GetExceptionState()->GetCurrentExceptionTracker();
+ _ASSERTE(pCurEHTracker != NULL);
+
+ // We will check if thread abort state needs to be reset only for the case of exception caught in
+ // native code. This will happen when:
+ //
+ // 1) an unwind is triggered and
+ // 2) current frame is the topmost frame we saw in the first pass and
+ // 3) a thread abort is requested and
+ // 4) we dont have address of the exception handler to be invoked.
+ //
+ // (1), (2) and (4) above are checked for in ExceptionTracker::ProcessOSExceptionNotification from where we call this
+ // function.
+
+ // Current frame should be the topmost frame we saw in the first pass
+ _ASSERTE(pCurEHTracker->GetTopmostStackFrameFromFirstPass() == sfCurrentStackFrame);
+
+ // If the exception has been caught in native code, then alongwith not having address of the handler to be
+ // invoked, we also wont have the IL clause for the catch block and resume stack frame will be NULL as well.
+ _ASSERTE((pCurEHTracker->GetCatchToCallPC() == NULL) &&
+ (pCurEHTracker->GetCatchHandlerExceptionClauseToken() == NULL) &&
+ (pCurEHTracker->GetResumeStackFrame().IsNull()));
+
+ // Walk the frame chain to see if there is any more managed code on the stack. If not, then this is the last managed frame
+ // on the stack and we can reset the thread abort state.
+ //
+ // Get the frame from which to start the stack walk from
+ Frame* pFrame = pCurEHTracker->GetLimitFrame();
+
+ // At this point, we are at the topmost frame we saw during the first pass
+ // before the unwind began. Walk the stack using the specified crawlframe and the topmost
+ // explicit frame to determine if we have more managed code up the stack. If none is found,
+ // we can reset the thread abort state.
+
+ // Setup the data structure to be passed to the callback
+ TAResetStateCallbackData dataCallback;
+ dataCallback.fDoWeHaveMoreManagedCodeOnStack = FALSE;
+
+ // At this point, the StackFrame in CrawlFrame should represent the current frame we have been called for.
+ // _ASSERTE(sfCurrentStackFrame == StackFrame::FromRegDisplay(pCf->GetRegisterSet()));
+
+ // Reference to the StackFrame beyond which we are looking for managed code.
+ dataCallback.sfSeedCrawlFrame = sfCurrentStackFrame;
+
+ pThread->StackWalkFramesEx(pCf->GetRegisterSet(), TAResetStateCallback, &dataCallback, QUICKUNWIND, pFrame);
+
+ if (!dataCallback.fDoWeHaveMoreManagedCodeOnStack)
+ {
+ // There is no more managed code on the stack, so reset the thread abort state.
+ fResetThreadAbortState = TRUE;
+ LOG((LF_EH, LL_INFO100, "ResetThreadAbortState: Resetting thread abort state since there is no more managed code beyond stack frames:\n"));
+ LOG((LF_EH, LL_INFO100, "sf.SP = %p ", dataCallback.sfSeedCrawlFrame.SP));
+ }
+#else // WIN64EXCEPTIONS
+#error Unsupported platform
+#endif // WIN64EXCEPTIONS
+ }
+
+ if (fResetThreadAbortState)
+ {
+ pThread->EEResetAbort(Thread::TAR_Thread);
+ }
+}
+#endif // !DACCESS_COMPILE
+
+#endif // !CROSSGEN_COMPILE
+
+//---------------------------------------------------------------------------------
+//
+//
+// EXCEPTION THROWING HELPERS
+//
+//
+//---------------------------------------------------------------------------------
+
+//---------------------------------------------------------------------------------
+// Funnel-worker for THROW_BAD_FORMAT and friends.
+//
+// Note: The "cond" argument is there to tide us over during the transition from
+// BAD_FORMAT_ASSERT to THROW_BAD_FORMAT. It will go away soon.
+//---------------------------------------------------------------------------------
+VOID ThrowBadFormatWorker(UINT resID, LPCWSTR imageName DEBUGARG(__in_z const char *cond))
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+#ifndef DACCESS_COMPILE
+ SString msgStr;
+
+ if ((imageName != NULL) && (imageName[0] != 0))
+ {
+ msgStr += W("[");
+ msgStr += imageName;
+ msgStr += W("] ");
+ }
+
+ SString resStr;
+ if (resID == 0 || !resStr.LoadResource(CCompRC::Optional, resID))
+ {
+ resStr.LoadResource(CCompRC::Error, MSG_FOR_URT_HR(COR_E_BADIMAGEFORMAT));
+ }
+ msgStr += resStr;
+
+#ifdef _DEBUG
+ if (0 != strcmp(cond, "FALSE"))
+ {
+ msgStr += W(" (Failed condition: "); // this is in DEBUG only - not going to localize it.
+ SString condStr(SString::Ascii, cond);
+ msgStr += condStr;
+ msgStr += W(")");
+ }
+#endif
+
+ ThrowHR(COR_E_BADIMAGEFORMAT, msgStr);
+#endif // #ifndef DACCESS_COMPILE
+}
+
+UINT GetResourceIDForFileLoadExceptionHR(HRESULT hr)
+{
+ switch (hr) {
+
+ case CTL_E_FILENOTFOUND:
+ hr = IDS_EE_FILE_NOT_FOUND;
+ break;
+
+ case (HRESULT)IDS_EE_PROC_NOT_FOUND:
+ case (HRESULT)IDS_EE_PATH_TOO_LONG:
+ case INET_E_OBJECT_NOT_FOUND:
+ case INET_E_DATA_NOT_AVAILABLE:
+ case INET_E_DOWNLOAD_FAILURE:
+ case INET_E_UNKNOWN_PROTOCOL:
+ case (HRESULT)IDS_INET_E_SECURITY_PROBLEM:
+ case (HRESULT)IDS_EE_BAD_USER_PROFILE:
+ case (HRESULT)IDS_EE_ALREADY_EXISTS:
+ case IDS_EE_REFLECTIONONLY_LOADFAILURE:
+ case IDS_CLASSLOAD_32BITCLRLOADING64BITASSEMBLY:
+ break;
+
+ case MK_E_SYNTAX:
+ hr = FUSION_E_INVALID_NAME;
+ break;
+
+ case INET_E_CONNECTION_TIMEOUT:
+ hr = IDS_INET_E_CONNECTION_TIMEOUT;
+ break;
+
+ case INET_E_CANNOT_CONNECT:
+ hr = IDS_INET_E_CANNOT_CONNECT;
+ break;
+
+ case INET_E_RESOURCE_NOT_FOUND:
+ hr = IDS_INET_E_RESOURCE_NOT_FOUND;
+ break;
+
+ case NTE_BAD_HASH:
+ case NTE_BAD_LEN:
+ case NTE_BAD_KEY:
+ case NTE_BAD_DATA:
+ case NTE_BAD_ALGID:
+ case NTE_BAD_FLAGS:
+ case NTE_BAD_HASH_STATE:
+ case NTE_BAD_UID:
+ case NTE_FAIL:
+ case NTE_BAD_TYPE:
+ case NTE_BAD_VER:
+ case NTE_BAD_SIGNATURE:
+ case NTE_SIGNATURE_FILE_BAD:
+ case CRYPT_E_HASH_VALUE:
+ hr = IDS_EE_HASH_VAL_FAILED;
+ break;
+
+ default:
+ hr = IDS_EE_FILELOAD_ERROR_GENERIC;
+ break;
+
+ }
+
+ return (UINT) hr;
+}
+
+#ifndef DACCESS_COMPILE
+
+//==========================================================================
+// Throw a runtime exception based on the last Win32 error (GetLastError())
+//==========================================================================
+VOID DECLSPEC_NORETURN RealCOMPlusThrowWin32()
+{
+
+// before we do anything else...
+ DWORD err = ::GetLastError();
+
+ CONTRACTL
+ {
+ THROWS;
+ DISABLED(GC_NOTRIGGER); // Must sanitize first pass handling to enable this
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ RealCOMPlusThrowWin32(HRESULT_FROM_WIN32(err));
+} // VOID DECLSPEC_NORETURN RealCOMPlusThrowWin32()
+
+//==========================================================================
+// Throw a runtime exception based on the last Win32 error (GetLastError())
+//==========================================================================
+VOID DECLSPEC_NORETURN RealCOMPlusThrowWin32(HRESULT hr)
+{
+ CONTRACTL
+ {
+ THROWS;
+ DISABLED(GC_NOTRIGGER); // Must sanitize first pass handling to enable this
+ MODE_ANY;
+}
+ CONTRACTL_END;
+
+ // Force to ApplicationException for compatability with previous versions. We would
+ // prefer a "Win32Exception" here.
+ EX_THROW(EEMessageException, (kApplicationException, hr, 0 /* resid*/,
+ NULL /* szArg1 */, NULL /* szArg2 */, NULL /* szArg3 */, NULL /* szArg4 */,
+ NULL /* szArg5 */, NULL /* szArg6 */));
+} // VOID DECLSPEC_NORETURN RealCOMPlusThrowWin32()
+
+
+//==========================================================================
+// Throw an OutOfMemoryError
+//==========================================================================
+VOID DECLSPEC_NORETURN RealCOMPlusThrowOM()
+{
+ CONTRACTL
+ {
+ THROWS;
+ DISABLED(GC_NOTRIGGER); // Must sanitize first pass handling to enable this
+ CANNOT_TAKE_LOCK;
+ MODE_ANY;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ ThrowOutOfMemory();
+}
+
+//==========================================================================
+// Throw an undecorated runtime exception.
+//==========================================================================
+VOID DECLSPEC_NORETURN RealCOMPlusThrow(RuntimeExceptionKind reKind)
+{
+ CONTRACTL
+ {
+ THROWS;
+ DISABLED(GC_NOTRIGGER); // Must sanitize first pass handling to enable this
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE((reKind != kExecutionEngineException) ||
+ !"ExecutionEngineException shouldn't be thrown. Use EEPolicy to failfast or a better exception. The caller of this function should modify their code.");
+
+ EX_THROW(EEException, (reKind));
+}
+
+//==========================================================================
+// Throw a decorated runtime exception.
+// Try using RealCOMPlusThrow(reKind, wszResourceName) instead.
+//==========================================================================
+VOID DECLSPEC_NORETURN RealCOMPlusThrowNonLocalized(RuntimeExceptionKind reKind, LPCWSTR wszTag)
+{
+ CONTRACTL
+ {
+ THROWS;
+ DISABLED(GC_NOTRIGGER); // Must sanitize first pass handling to enable this
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE((reKind != kExecutionEngineException) ||
+ !"ExecutionEngineException shouldn't be thrown. Use EEPolicy to failfast or a better exception. The caller of this function should modify their code.");
+
+ EX_THROW(EEMessageException, (reKind, IDS_EE_GENERIC, wszTag));
+}
+
+//==========================================================================
+// Throw a runtime exception based on an HResult
+//==========================================================================
+VOID DECLSPEC_NORETURN RealCOMPlusThrowHR(HRESULT hr, IErrorInfo* pErrInfo, Exception * pInnerException)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS; // because of IErrorInfo
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE (FAILED(hr));
+
+ // Though we would like to assert this, it can happen in the following scenario:
+ //
+ // MgdCode --RCW-> COM --CCW-> MgdCode2
+ //
+ // If MgdCode2 throws EEE, when it reaches the RCW, it will invoking MarshalNative::ThrowExceptionForHr and thus,
+ // reach here. Hence, we will need to keep the assert off, until user code is stopped for creating an EEE.
+
+ //_ASSERTE((hr != COR_E_EXECUTIONENGINE) ||
+ // !"ExecutionEngineException shouldn't be thrown. Use EEPolicy to failfast or a better exception. The caller of this function should modify their code.");
+
+#ifndef CROSSGEN_COMPILE
+#ifdef FEATURE_COMINTEROP
+ // check for complus created IErrorInfo pointers
+ if (pErrInfo != NULL)
+ {
+ GCX_COOP();
+ {
+ OBJECTREF oRetVal = NULL;
+ GCPROTECT_BEGIN(oRetVal);
+ GetExceptionForHR(hr, pErrInfo, &oRetVal);
+ _ASSERTE(oRetVal != NULL);
+ RealCOMPlusThrow(oRetVal);
+ GCPROTECT_END ();
+ }
+ }
+#endif // FEATURE_COMINTEROP
+
+ if (pErrInfo != NULL)
+ {
+ if (pInnerException == NULL)
+ {
+ EX_THROW(EECOMException, (hr, pErrInfo, true, NULL, FALSE));
+ }
+ else
+ {
+ EX_THROW_WITH_INNER(EECOMException, (hr, pErrInfo, true, NULL, FALSE), pInnerException);
+ }
+ }
+ else
+#endif // CROSSGEN_COMPILE
+ {
+ if (pInnerException == NULL)
+ {
+ EX_THROW(EEMessageException, (hr));
+ }
+ else
+ {
+ EX_THROW_WITH_INNER(EEMessageException, (hr), pInnerException);
+ }
+ }
+}
+
+VOID DECLSPEC_NORETURN RealCOMPlusThrowHR(HRESULT hr)
+{
+ CONTRACTL
+ {
+ THROWS;
+ DISABLED(GC_NOTRIGGER); // Must sanitize first pass handling to enable this
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+
+ // ! COMPlusThrowHR(hr) no longer snags the IErrorInfo off the TLS (Too many places
+ // ! call this routine where no IErrorInfo was set by the prior call.)
+ // !
+ // ! If you actually want to pull IErrorInfo off the TLS, call
+ // !
+ // ! COMPlusThrowHR(hr, kGetErrorInfo)
+
+ RealCOMPlusThrowHR(hr, (IErrorInfo*)NULL);
+}
+
+
+VOID DECLSPEC_NORETURN RealCOMPlusThrowHR(HRESULT hr, tagGetErrorInfo)
+{
+ CONTRACTL
+ {
+ THROWS;
+ DISABLED(GC_NOTRIGGER); // Must sanitize first pass handling to enable this
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Get an IErrorInfo if one is available.
+ IErrorInfo *pErrInfo = NULL;
+
+#ifndef CROSSGEN_COMPILE
+ if (SafeGetErrorInfo(&pErrInfo) != S_OK)
+ pErrInfo = NULL;
+#endif
+
+ // Throw the exception.
+ RealCOMPlusThrowHR(hr, pErrInfo);
+}
+
+
+
+VOID DECLSPEC_NORETURN RealCOMPlusThrowHR(HRESULT hr, UINT resID, LPCWSTR wszArg1,
+ LPCWSTR wszArg2, LPCWSTR wszArg3, LPCWSTR wszArg4,
+ LPCWSTR wszArg5, LPCWSTR wszArg6)
+{
+ CONTRACTL
+ {
+ THROWS;
+ DISABLED(GC_NOTRIGGER); // Must sanitize first pass handling to enable this
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE (FAILED(hr));
+
+ // Though we would like to assert this, it can happen in the following scenario:
+ //
+ // MgdCode --RCW-> COM --CCW-> MgdCode2
+ //
+ // If MgdCode2 throws EEE, when it reaches the RCW, it will invoking MarshalNative::ThrowExceptionForHr and thus,
+ // reach here. Hence, we will need to keep the assert off, until user code is stopped for creating an EEE.
+
+ //_ASSERTE((hr != COR_E_EXECUTIONENGINE) ||
+ // !"ExecutionEngineException shouldn't be thrown. Use EEPolicy to failfast or a better exception. The caller of this function should modify their code.");
+
+ EX_THROW(EEMessageException,
+ (hr, resID, wszArg1, wszArg2, wszArg3, wszArg4, wszArg5, wszArg6));
+}
+
+//==========================================================================
+// Throw a decorated runtime exception with a localized message.
+// Queries the ResourceManager for a corresponding resource value.
+//==========================================================================
+VOID DECLSPEC_NORETURN RealCOMPlusThrow(RuntimeExceptionKind reKind, LPCWSTR wszResourceName, Exception * pInnerException)
+{
+ CONTRACTL
+ {
+ THROWS;
+ DISABLED(GC_NOTRIGGER); // Must sanitize first pass handling to enable this
+ MODE_ANY;
+ PRECONDITION(CheckPointer(wszResourceName));
+ }
+ CONTRACTL_END;
+
+ _ASSERTE((reKind != kExecutionEngineException) ||
+ !"ExecutionEngineException shouldn't be thrown. Use EEPolicy to failfast or a better exception. The caller of this function should modify their code.");
+ //
+ // For some reason, the compiler complains about unreachable code if
+ // we don't split the new from the throw. So we're left with this
+ // unnecessarily verbose syntax.
+ //
+
+ if (pInnerException == NULL)
+ {
+ EX_THROW(EEResourceException, (reKind, wszResourceName));
+ }
+ else
+ {
+ EX_THROW_WITH_INNER(EEResourceException, (reKind, wszResourceName), pInnerException);
+ }
+}
+
+//==========================================================================
+// Used by the classloader to record a managed exception object to explain
+// why a classload got botched.
+//
+// - Can be called with gc enabled or disabled.
+// This allows a catch-all error path to post a generic catchall error
+// message w/out bonking more specific error messages posted by inner functions.
+//==========================================================================
+VOID DECLSPEC_NORETURN ThrowTypeLoadException(LPCWSTR pFullTypeName,
+ LPCWSTR pAssemblyName,
+ LPCUTF8 pMessageArg,
+ UINT resIDWhy)
+{
+ CONTRACTL
+ {
+ THROWS;
+ DISABLED(GC_NOTRIGGER); // Must sanitize first pass handling to enable this
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ EX_THROW(EETypeLoadException, (pFullTypeName, pAssemblyName, pMessageArg, resIDWhy));
+}
+
+
+//==========================================================================
+// Used by the classloader to post illegal layout
+//==========================================================================
+VOID DECLSPEC_NORETURN ThrowFieldLayoutError(mdTypeDef cl, // cl of the NStruct being loaded
+ Module* pModule, // Module that defines the scope, loader and heap (for allocate FieldMarshalers)
+ DWORD dwOffset, // Offset of field
+ DWORD dwID) // Message id
+{
+ CONTRACTL
+ {
+ THROWS;
+ DISABLED(GC_NOTRIGGER); // Must sanitize first pass handling to enable this
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ IMDInternalImport *pInternalImport = pModule->GetMDImport(); // Internal interface for the NStruct being loaded.
+
+ LPCUTF8 pszName, pszNamespace;
+ if (FAILED(pInternalImport->GetNameOfTypeDef(cl, &pszName, &pszNamespace)))
+ {
+ pszName = pszNamespace = "Invalid TypeDef record";
+ }
+
+ CHAR offsetBuf[16];
+ sprintf_s(offsetBuf, COUNTOF(offsetBuf), "%d", dwOffset);
+ offsetBuf[COUNTOF(offsetBuf) - 1] = '\0';
+
+ pModule->GetAssembly()->ThrowTypeLoadException(pszNamespace,
+ pszName,
+ offsetBuf,
+ dwID);
+}
+
+//==========================================================================
+// Throw an ArithmeticException
+//==========================================================================
+VOID DECLSPEC_NORETURN RealCOMPlusThrowArithmetic()
+{
+ CONTRACTL
+ {
+ THROWS;
+ DISABLED(GC_NOTRIGGER); // Must sanitize first pass handling to enable this
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ RealCOMPlusThrow(kArithmeticException);
+}
+
+//==========================================================================
+// Throw an ArgumentNullException
+//==========================================================================
+VOID DECLSPEC_NORETURN RealCOMPlusThrowArgumentNull(LPCWSTR argName, LPCWSTR wszResourceName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ DISABLED(GC_NOTRIGGER); // Must sanitize first pass handling to enable this
+ MODE_ANY;
+ PRECONDITION(CheckPointer(wszResourceName));
+ }
+ CONTRACTL_END;
+
+ EX_THROW(EEArgumentException, (kArgumentNullException, argName, wszResourceName));
+}
+
+
+VOID DECLSPEC_NORETURN RealCOMPlusThrowArgumentNull(LPCWSTR argName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ DISABLED(GC_NOTRIGGER); // Must sanitize first pass handling to enable this
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ EX_THROW(EEArgumentException, (kArgumentNullException, argName, W("ArgumentNull_Generic")));
+}
+
+
+//==========================================================================
+// Throw an ArgumentOutOfRangeException
+//==========================================================================
+VOID DECLSPEC_NORETURN RealCOMPlusThrowArgumentOutOfRange(LPCWSTR argName, LPCWSTR wszResourceName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ DISABLED(GC_NOTRIGGER); // Must sanitize first pass handling to enable this
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ EX_THROW(EEArgumentException, (kArgumentOutOfRangeException, argName, wszResourceName));
+}
+
+//==========================================================================
+// Throw an ArgumentException
+//==========================================================================
+VOID DECLSPEC_NORETURN RealCOMPlusThrowArgumentException(LPCWSTR argName, LPCWSTR wszResourceName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ DISABLED(GC_NOTRIGGER); // Must sanitize first pass handling to enable this
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ EX_THROW(EEArgumentException, (kArgumentException, argName, wszResourceName));
+}
+
+//=========================================================================
+// Used by the classloader to record a managed exception object to explain
+// why a classload got botched.
+//
+// - Can be called with gc enabled or disabled.
+// This allows a catch-all error path to post a generic catchall error
+// message w/out bonking more specific error messages posted by inner functions.
+//==========================================================================
+VOID DECLSPEC_NORETURN ThrowTypeLoadException(LPCUTF8 pszNameSpace,
+ LPCUTF8 pTypeName,
+ LPCWSTR pAssemblyName,
+ LPCUTF8 pMessageArg,
+ UINT resIDWhy)
+{
+ CONTRACTL
+ {
+ THROWS;
+ DISABLED(GC_NOTRIGGER); // Must sanitize first pass handling to enable this
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ EX_THROW(EETypeLoadException, (pszNameSpace, pTypeName, pAssemblyName, pMessageArg, resIDWhy));
+}
+
+//==========================================================================
+// Throw a decorated runtime exception.
+//==========================================================================
+VOID DECLSPEC_NORETURN RealCOMPlusThrow(RuntimeExceptionKind reKind, UINT resID,
+ LPCWSTR wszArg1, LPCWSTR wszArg2, LPCWSTR wszArg3,
+ LPCWSTR wszArg4, LPCWSTR wszArg5, LPCWSTR wszArg6)
+{
+ CONTRACTL
+ {
+ THROWS;
+ DISABLED(GC_NOTRIGGER); // Must sanitize first pass handling to enable this
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ EX_THROW(EEMessageException,
+ (reKind, resID, wszArg1, wszArg2, wszArg3, wszArg4, wszArg5, wszArg6));
+}
+
+#ifdef FEATURE_COMINTEROP
+#ifndef CROSSGEN_COMPILE
+//==========================================================================
+// Throw a runtime exception based on an HResult, check for error info
+//==========================================================================
+VOID DECLSPEC_NORETURN RealCOMPlusThrowHR(HRESULT hr, IUnknown *iface, REFIID riid)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS; // because of IErrorInfo
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ IErrorInfo *info = NULL;
+ {
+ GCX_PREEMP();
+ info = GetSupportedErrorInfo(iface, riid);
+ }
+ RealCOMPlusThrowHR(hr, info);
+}
+
+//==========================================================================
+// Throw a runtime exception based on an EXCEPINFO. This function will free
+// the strings in the EXCEPINFO that is passed in.
+//==========================================================================
+VOID DECLSPEC_NORETURN RealCOMPlusThrowHR(EXCEPINFO *pExcepInfo)
+{
+ CONTRACTL
+ {
+ THROWS;
+ DISABLED(GC_NOTRIGGER); // Must sanitize first pass handling to enable this
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ EX_THROW(EECOMException, (pExcepInfo));
+}
+#endif //CROSSGEN_COMPILE
+
+#endif // FEATURE_COMINTEROP
+
+
+#ifdef FEATURE_STACK_PROBE
+//==========================================================================
+// Throw a StackOverflowError
+//==========================================================================
+VOID DECLSPEC_NORETURN RealCOMPlusThrowSO()
+{
+ CONTRACTL
+ {
+ // This should be throws... But it isn't because a SO doesn't technically
+ // fall into the same THROW/NOTHROW conventions as the rest of the contract
+ // infrastructure.
+ NOTHROW;
+
+ DISABLED(GC_NOTRIGGER); // Must sanitize first pass handling to enable this
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // We only use BreakOnSO if we are in debug mode, so we'll only checking if the
+ // _DEBUG flag is set.
+#ifdef _DEBUG
+ static int breakOnSO = -1;
+
+ if (breakOnSO == -1)
+ breakOnSO = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_BreakOnSO);
+
+ if (breakOnSO != 0)
+ {
+ _ASSERTE(!"SO occured");
+ }
+#endif
+
+ ThrowStackOverflow();
+}
+#endif
+
+//==========================================================================
+// Throw an InvalidCastException
+//==========================================================================
+
+#ifdef FEATURE_FUSION
+static const WCHAR *GetContextName(LOADCTX_TYPE kLoadContext,
+ BOOL fIntrospectionOnly)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Context names are treated as symbols and therefore not localized
+ switch (kLoadContext)
+ {
+ case LOADCTX_TYPE_DEFAULT:
+ return W("Default");
+ case LOADCTX_TYPE_LOADFROM:
+ return W("LoadFrom");
+ default:
+ return (fIntrospectionOnly ? W("InspectionContext") : W("LoadNeither"));
+ }
+}
+#endif
+
+VOID GetAssemblyDetailInfo(SString &sType,
+ SString &sAssemblyDisplayName,
+ PEAssembly *pPEAssembly,
+ SString &sAssemblyDetailInfo)
+{
+ WRAPPER_NO_CONTRACT;
+
+ InlineSString<MAX_PATH> sFormat;
+#ifdef FEATURE_FUSION
+ const WCHAR *pwzLoadContext = GetContextName(pPEAssembly->GetLoadContext(),
+ pPEAssembly->IsIntrospectionOnly());
+#else
+ const WCHAR *pwzLoadContext = W("Default");
+#endif
+
+ if (pPEAssembly->GetPath().IsEmpty())
+ {
+ sFormat.LoadResource(CCompRC::Debugging, IDS_EE_CANNOTCAST_HELPER_BYTE);
+
+ sAssemblyDetailInfo.Printf(sFormat.GetUnicode(),
+ sType.GetUnicode(),
+ sAssemblyDisplayName.GetUnicode(),
+ pwzLoadContext);
+ }
+ else
+ {
+ sFormat.LoadResource(CCompRC::Debugging, IDS_EE_CANNOTCAST_HELPER_PATH);
+
+ sAssemblyDetailInfo.Printf(sFormat.GetUnicode(),
+ sType.GetUnicode(),
+ sAssemblyDisplayName.GetUnicode(),
+ pwzLoadContext,
+ pPEAssembly->GetPath().GetUnicode());
+ }
+}
+
+VOID CheckAndThrowSameTypeAndAssemblyInvalidCastException(TypeHandle thCastFrom,
+ TypeHandle thCastTo)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_INTOLERANT;
+ } CONTRACTL_END;
+
+ Module *pModuleTypeFrom = thCastFrom.GetModule();
+ Module *pModuleTypeTo = thCastTo.GetModule();
+
+ if ((pModuleTypeFrom != NULL) && (pModuleTypeTo != NULL))
+ {
+ Assembly *pAssemblyTypeFrom = pModuleTypeFrom->GetAssembly();
+ Assembly *pAssemblyTypeTo = pModuleTypeTo->GetAssembly();
+
+ _ASSERTE(pAssemblyTypeFrom != NULL);
+ _ASSERTE(pAssemblyTypeTo != NULL);
+
+ PEAssembly *pPEAssemblyTypeFrom = pAssemblyTypeFrom->GetManifestFile();
+ PEAssembly *pPEAssemblyTypeTo = pAssemblyTypeTo->GetManifestFile();
+
+ _ASSERTE(pPEAssemblyTypeFrom != NULL);
+ _ASSERTE(pPEAssemblyTypeTo != NULL);
+
+ InlineSString<MAX_PATH> sAssemblyFromDisplayName;
+ InlineSString<MAX_PATH> sAssemblyToDisplayName;
+
+ pPEAssemblyTypeFrom->GetDisplayName(sAssemblyFromDisplayName);
+ pPEAssemblyTypeTo->GetDisplayName(sAssemblyToDisplayName);
+
+ // Found the culprit case. Now format the new exception text.
+ InlineSString<MAX_CLASSNAME_LENGTH + 1> strCastFromName;
+ InlineSString<MAX_CLASSNAME_LENGTH + 1> strCastToName;
+ InlineSString<MAX_PATH> sAssemblyDetailInfoFrom;
+ InlineSString<MAX_PATH> sAssemblyDetailInfoTo;
+
+ thCastFrom.GetName(strCastFromName);
+ thCastTo.GetName(strCastToName);
+
+ SString typeA = SL(W("A"));
+ GetAssemblyDetailInfo(typeA,
+ sAssemblyFromDisplayName,
+ pPEAssemblyTypeFrom,
+ sAssemblyDetailInfoFrom);
+ SString typeB = SL(W("B"));
+ GetAssemblyDetailInfo(typeB,
+ sAssemblyToDisplayName,
+ pPEAssemblyTypeTo,
+ sAssemblyDetailInfoTo);
+
+ COMPlusThrow(kInvalidCastException,
+ IDS_EE_CANNOTCASTSAME,
+ strCastFromName.GetUnicode(),
+ strCastToName.GetUnicode(),
+ sAssemblyDetailInfoFrom.GetUnicode(),
+ sAssemblyDetailInfoTo.GetUnicode());
+ }
+}
+
+VOID RealCOMPlusThrowInvalidCastException(TypeHandle thCastFrom, TypeHandle thCastTo)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ // Use an InlineSString with a size of MAX_CLASSNAME_LENGTH + 1 to prevent
+ // TypeHandle::GetName from having to allocate a new block of memory. This
+ // significantly improves the performance of throwing an InvalidCastException.
+ InlineSString<MAX_CLASSNAME_LENGTH + 1> strCastFromName;
+ InlineSString<MAX_CLASSNAME_LENGTH + 1> strCastToName;
+
+ thCastTo.GetName(strCastToName);
+#ifdef FEATURE_REMOTING
+ if (thCastFrom.IsTransparentProxy())
+ {
+ COMPlusThrow(kInvalidCastException, IDS_EE_CANNOTCASTPROXY, strCastToName.GetUnicode());
+ }
+ else
+#endif
+ {
+ thCastFrom.GetName(strCastFromName);
+ // Attempt to catch the A.T != A.T case that causes so much user confusion.
+ if (strCastFromName.Equals(strCastToName))
+ {
+ CheckAndThrowSameTypeAndAssemblyInvalidCastException(thCastFrom, thCastTo);
+ }
+ COMPlusThrow(kInvalidCastException, IDS_EE_CANNOTCAST, strCastFromName.GetUnicode(), strCastToName.GetUnicode());
+ }
+}
+
+#ifndef CROSSGEN_COMPILE
+VOID RealCOMPlusThrowInvalidCastException(OBJECTREF *pObj, TypeHandle thCastTo)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(IsProtectedByGCFrame (pObj));
+ } CONTRACTL_END;
+
+ TypeHandle thCastFrom = (*pObj)->GetTypeHandle();
+#ifdef FEATURE_COMINTEROP
+ if (thCastFrom.GetMethodTable()->IsComObjectType())
+ {
+ // Special case casting RCWs so we can give better error information when the
+ // cast fails.
+ ComObject::ThrowInvalidCastException(pObj, thCastTo.GetMethodTable());
+ }
+#endif
+ COMPlusThrowInvalidCastException(thCastFrom, thCastTo);
+}
+#endif // CROSSGEN_COMPILE
+
+#endif // DACCESS_COMPILE
+
+#ifndef CROSSGEN_COMPILE // ???
+#ifdef FEATURE_COMINTEROP
+#include "comtoclrcall.h"
+#endif // FEATURE_COMINTEROP
+
+// Reverse COM interop IL stubs need to catch all exceptions and translate them into HRESULTs.
+// But we allow for CSEs to be rethrown. Our corrupting state policy gets applied to the
+// original user-visible method that triggered the IL stub to be generated. So we must be able
+// to map back from a given IL stub to the user-visible method. Here, we do that only when we
+// see a 'matching' ComMethodFrame further up the stack.
+MethodDesc * GetUserMethodForILStub(Thread * pThread, UINT_PTR uStubSP, MethodDesc * pILStubMD, Frame ** ppFrameOut)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(pILStubMD->IsILStub());
+ }
+ CONTRACTL_END;
+
+ MethodDesc * pUserMD = pILStubMD;
+#ifdef FEATURE_COMINTEROP
+ DynamicMethodDesc * pDMD = pILStubMD->AsDynamicMethodDesc();
+ if (pDMD->IsCOMToCLRStub())
+ {
+ // There are some differences across architectures for "which" SP is passed in.
+ // On ARM, the SP is the SP on entry to the IL stub, on the other arches, it's
+ // a post-prolog SP. But this doesn't matter here because the COM->CLR path
+ // always pushes the Frame in a caller's stack frame.
+
+ Frame * pCurFrame = pThread->GetFrame();
+ while ((UINT_PTR)pCurFrame < uStubSP)
+ {
+ pCurFrame = pCurFrame->PtrNextFrame();
+ }
+
+ // The construction of the COM->CLR path ensures that our corresponding ComMethodFrame
+ // should be present further up the stack. Normally, the ComMethodFrame in question is
+ // simply the next stack frame; however, there are situations where there may be other
+ // stack frames present (such as an optional ContextTransitionFrame if we switched
+ // AppDomains, or an inlined stack frame from a QCall in the IL stub).
+ while (pCurFrame->GetVTablePtr() != ComMethodFrame::GetMethodFrameVPtr())
+ {
+ pCurFrame = pCurFrame->PtrNextFrame();
+ }
+
+ ComMethodFrame * pComFrame = (ComMethodFrame *)pCurFrame;
+ _ASSERTE((UINT_PTR)pComFrame > uStubSP);
+
+ CONSISTENCY_CHECK_MSG(pComFrame->GetVTablePtr() == ComMethodFrame::GetMethodFrameVPtr(),
+ "Expected to find a ComMethodFrame.");
+
+ ComCallMethodDesc * pCMD = pComFrame->GetComCallMethodDesc();
+
+ CONSISTENCY_CHECK_MSG(pILStubMD == ExecutionManager::GetCodeMethodDesc(pCMD->GetILStub()),
+ "The ComMethodFrame that we found doesn't match the IL stub passed in.");
+
+ pUserMD = pCMD->GetMethodDesc();
+ *ppFrameOut = pComFrame;
+ }
+#endif // FEATURE_COMINTEROP
+ return pUserMD;
+}
+#endif //CROSSGEN_COMPILE
diff --git a/src/vm/excep.h b/src/vm/excep.h
new file mode 100644
index 0000000000..531e42706d
--- /dev/null
+++ b/src/vm/excep.h
@@ -0,0 +1,966 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// EXCEP.H - Copyright (C) 1998 Microsoft Corporation
+//
+
+//
+
+
+#ifndef __excep_h__
+#define __excep_h__
+
+#include "exstatecommon.h"
+#include "exceptmacros.h"
+#include "corerror.h" // HResults for the COM+ Runtime
+#include "corexcep.h" // Exception codes for the COM+ Runtime
+
+class Thread;
+
+#include "../dlls/mscorrc/resource.h"
+
+#include <excepcpu.h>
+#include "interoputil.h"
+
+BOOL IsExceptionFromManagedCode(const EXCEPTION_RECORD * pExceptionRecord);
+
+#if defined(_TARGET_AMD64_) && defined(FEATURE_HIJACK)
+
+// General purpose functions for use on an IP in jitted code.
+bool IsIPInProlog(EECodeInfo *pCodeInfo);
+bool IsIPInEpilog(PTR_CONTEXT pContextToCheck, EECodeInfo *pCodeInfo, BOOL *pSafeToInjectThreadAbort);
+
+#endif // defined(_TARGET_AMD64_) && defined(FEATURE_HIJACK)
+
+void RaiseFailFastExceptionOnWin7(PEXCEPTION_RECORD pExceptionRecord, PT_CONTEXT pContext);
+
+// Check if the Win32 Error code is an IO error.
+BOOL IsWin32IOError(SCODE scode);
+
+//******************************************************************************
+//
+// SwallowUnhandledExceptions
+//
+// Consult the EE policy and the app config to determine if the runtime should "swallow" unhandled exceptions.
+// Swallow if: the EEPolicy->UnhandledExceptionPolicy is "eHostDeterminedPolicy"
+// or: the app config value LegacyUnhandledExceptionPolicy() is set.
+//
+// Parameters:
+// none
+//
+// Return value:
+// true - the runtime should "swallow" unhandled exceptions
+//
+inline bool SwallowUnhandledExceptions()
+{
+ return (eHostDeterminedPolicy == GetEEPolicy()->GetUnhandledExceptionPolicy()) ||
+ g_pConfig->LegacyUnhandledExceptionPolicy() ||
+ GetCompatibilityFlag(compatSwallowUnhandledExceptions);
+}
+
+// Enums
+// return values of LookForHandler
+enum LFH {
+ LFH_NOT_FOUND = 0,
+ LFH_FOUND = 1,
+};
+
+#include "runtimeexceptionkind.h"
+
+class IJitManager;
+
+//
+// ThrowCallbackType is used to pass information to between various functions and the callbacks that they call
+// during a managed stack walk.
+//
+struct ThrowCallbackType
+{
+ MethodDesc * pFunc; // the function containing a filter that returned catch indication
+ int dHandler; // the index of the handler whose filter returned catch indication
+ BOOL bIsUnwind; // are we currently unwinding an exception
+ BOOL bUnwindStack; // reset the stack before calling the handler? (Stack overflow only)
+ BOOL bAllowAllocMem; // are we allowed to allocate memory?
+ BOOL bDontCatch; // can we catch this exception?
+ BYTE *pStack;
+ Frame * pTopFrame;
+ Frame * pBottomFrame;
+ MethodDesc * pProfilerNotify; // Context for profiler callbacks -- see COMPlusFrameHandler().
+ BOOL bReplaceStack; // Used to pass info to SaveStackTrace call
+ BOOL bSkipLastElement;// Used to pass info to SaveStackTrace call
+ HANDLE hCallerToken;
+ HANDLE hImpersonationToken;
+ BOOL bImpersonationTokenSet;
+#ifdef _DEBUG
+ void * pCurrentExceptionRecord;
+ void * pPrevExceptionRecord;
+#endif
+
+ // Is the current exception a longjmp?
+ CORRUPTING_EXCEPTIONS_ONLY(BOOL m_fIsLongJump;)
+ void Init()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ pFunc = NULL;
+ dHandler = 0;
+ bIsUnwind = FALSE;
+ bUnwindStack = FALSE;
+ bAllowAllocMem = TRUE;
+ bDontCatch = FALSE;
+ pStack = NULL;
+ pTopFrame = (Frame *)-1;
+ pBottomFrame = (Frame *)-1;
+ pProfilerNotify = NULL;
+ bReplaceStack = FALSE;
+ bSkipLastElement = FALSE;
+ hCallerToken = NULL;
+ hImpersonationToken = NULL;
+ bImpersonationTokenSet = FALSE;
+
+#ifdef _DEBUG
+ pCurrentExceptionRecord = 0;
+ pPrevExceptionRecord = 0;
+#endif
+ // By default, the current exception is not a longjmp
+ CORRUPTING_EXCEPTIONS_ONLY(m_fIsLongJump = FALSE;)
+ }
+};
+
+
+
+struct EE_ILEXCEPTION_CLAUSE;
+
+void InitializeExceptionHandling();
+void CLRAddVectoredHandlers(void);
+void CLRRemoveVectoredHandlers(void);
+void TerminateExceptionHandling();
+
+// Prototypes
+EXTERN_C VOID STDCALL ResetCurrentContext();
+#if !defined(WIN64EXCEPTIONS)
+#ifdef _DEBUG
+void CheckStackBarrier(EXCEPTION_REGISTRATION_RECORD *exRecord);
+#endif
+EXCEPTION_REGISTRATION_RECORD *FindNestedEstablisherFrame(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame);
+LFH LookForHandler(const EXCEPTION_POINTERS *pExceptionPointers, Thread *pThread, ThrowCallbackType *tct);
+StackWalkAction COMPlusThrowCallback (CrawlFrame *pCf, ThrowCallbackType *pData);
+void UnwindFrames(Thread *pThread, ThrowCallbackType *tct);
+#endif // !defined(WIN64EXCEPTIONS)
+
+void UnwindFrameChain(Thread *pThread, LPVOID pvLimitSP);
+DWORD MapWin32FaultToCOMPlusException(EXCEPTION_RECORD *pExceptionRecord);
+DWORD ComputeEnclosingHandlerNestingLevel(IJitManager *pIJM, const METHODTOKEN& mdTok, SIZE_T offsNat);
+BOOL IsException(MethodTable *pMT);
+BOOL IsExceptionOfType(RuntimeExceptionKind reKind, OBJECTREF *pThrowable);
+BOOL IsExceptionOfType(RuntimeExceptionKind reKind, Exception *pException);
+BOOL IsAsyncThreadException(OBJECTREF *pThrowable);
+BOOL IsUncatchable(OBJECTREF *pThrowable);
+VOID FixupOnRethrow(Thread *pCurThread, EXCEPTION_POINTERS *pExceptionPointers);
+BOOL UpdateCurrentThrowable(PEXCEPTION_RECORD pExceptionRecord);
+BOOL IsStackOverflowException(Thread* pThread, EXCEPTION_RECORD* pExceptionRecord);
+void WrapNonCompliantException(OBJECTREF *ppThrowable);
+OBJECTREF PossiblyUnwrapThrowable(OBJECTREF throwable, Assembly *pAssembly);
+BOOL ExceptionTypeOverridesStackTraceGetter(PTR_MethodTable pMT);
+
+// Removes source file names/paths and line information from a stack trace.
+void StripFileInfoFromStackTrace(SString &ssStackTrace);
+
+#ifdef _DEBUG
+// C++ EH cracking material gleaned from the debugger:
+// (DO NOT USE THIS KNOWLEDGE IN NON-DEBUG CODE!!!)
+void *DebugGetCxxException(EXCEPTION_RECORD* pExceptionRecord);
+#endif
+
+
+#ifdef _DEBUG_IMPL
+BOOL IsValidClause(EE_ILEXCEPTION_CLAUSE *EHClause);
+BOOL IsCOMPlusExceptionHandlerInstalled();
+#endif
+
+BOOL InstallUnhandledExceptionFilter();
+void UninstallUnhandledExceptionFilter();
+
+#if defined(FEATURE_CORECLR) && !defined(FEATURE_PAL)
+// Section naming is a strategy by itself. Ideally, we could have named the UEF section
+// ".text$zzz" (lowercase after $ is important). What the linker does is look for the sections
+// that has the same name before '$' sign. It combines them together but sorted in an alphabetical
+// order. Thus, naming the UEF section ".text$zzz" would ensure that the UEF section is the last
+// thing in the .text section. Reason for opting out of this approach was that BBT can move code
+// within a section, no matter where it was located - and for this case, we need the UEF code
+// at the right location to ensure that we can check the memory protection of its following
+// section so that shouldnt affect UEF's memory protection. For details, read the comment in
+// "CExecutionEngine::ClrVirtualProtect".
+//
+// Keeping UEF in its own section helps prevent code movement as BBT does not reorder
+// sections. As per my understanding of the linker, ".text" section always comes first,
+// followed by other "executable" sections (like our UEF section) and then ".data", etc.
+// The order of user defined executable sections is typically defined by the linker
+// in terms of which section it sees first. So, if there is another custom executable
+// section that comes after UEF section, it can affect the UEF section and we will
+// assert about it in "CExecutionEngine::ClrVirtualProtect".
+#define CLR_UEF_SECTION_NAME ".CLR_UEF"
+#endif // defined(FEATURE_CORECLR) && !defined(FEATURE_PAL)
+LONG __stdcall COMUnhandledExceptionFilter(EXCEPTION_POINTERS *pExceptionInfo);
+
+
+//////////////
+// A list of places where we might have unhandled exceptions or other serious faults. These can be used as a mask in
+// DbgJITDebuggerLaunchSetting to help control when we decide to ask the user about whether or not to launch a debugger.
+//
+enum UnhandledExceptionLocation
+ {
+ ProcessWideHandler = 0x000001,
+ ManagedThread = 0x000002, // Does not terminate the application. CLR swallows the unhandled exception.
+ ThreadPoolThread = 0x000004, // ditto.
+ FinalizerThread = 0x000008, // ditto.
+ FatalStackOverflow = 0x000010,
+ SystemNotification = 0x000020, // CLR will swallow after the notification occurs
+ FatalExecutionEngineException = 0x000040,
+ ClassInitUnhandledException = 0x000080, // Does not terminate the application. CLR transforms this into TypeInitializationException
+
+ MaximumLocationValue = 0x800000, // This is the maximum location value you're allowed to use. (Max 24 bits allowed.)
+
+ // This is a mask of all the locations that the debugger will attach to by default.
+ DefaultDebuggerAttach = ProcessWideHandler |
+ FatalStackOverflow |
+ FatalExecutionEngineException
+};
+
+struct ThreadBaseExceptionFilterParam
+{
+ UnhandledExceptionLocation location;
+};
+
+LONG ThreadBaseExceptionFilter(PEXCEPTION_POINTERS pExceptionInfo, PVOID pvParam);
+LONG ThreadBaseExceptionSwallowingFilter(PEXCEPTION_POINTERS pExceptionInfo, PVOID pvParam);
+LONG ThreadBaseExceptionAppDomainFilter(PEXCEPTION_POINTERS pExceptionInfo, PVOID pvParam);
+
+// Filter for calls out from the 'vm' to native code, if there's a possibility of SEH exceptions
+// in the native code.
+struct CallOutFilterParam { BOOL OneShot; };
+LONG CallOutFilter(PEXCEPTION_POINTERS pExceptionInfo, PVOID pv);
+
+
+void DECLSPEC_NORETURN RaiseDeadLockException();
+
+void STDMETHODCALLTYPE DefaultCatchHandler(PEXCEPTION_POINTERS pExceptionInfo,
+ OBJECTREF *Throwable = NULL,
+ BOOL useLastThrownObject = FALSE,
+ BOOL isTerminating = FALSE,
+ BOOL isThreadBaseFilter = FALSE,
+ BOOL sendAppDomainEvents = TRUE);
+
+void ReplaceExceptionContextRecord(T_CONTEXT *pTarget, T_CONTEXT *pSource);
+
+// Localization helper function
+void ResMgrGetString(LPCWSTR wszResourceName, STRINGREF * ppMessage);
+
+// externs
+
+//==========================================================================
+// Various routines to throw COM+ objects.
+//==========================================================================
+
+//==========================================================================
+// Throw an undecorated runtime exception with a specific string parameter
+// that won't be localized. If possible, try using
+// COMPlusThrow(reKind, LPCWSTR wszResourceName) instead.
+//==========================================================================
+
+VOID DECLSPEC_NORETURN RealCOMPlusThrowNonLocalized(RuntimeExceptionKind reKind, LPCWSTR wszTag);
+
+//==========================================================================
+// Throw an object.
+//==========================================================================
+
+VOID DECLSPEC_NORETURN RealCOMPlusThrow(OBJECTREF throwable
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , CorruptionSeverity severity = NotCorrupting
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ );
+
+//==========================================================================
+// Throw an undecorated runtime exception.
+//==========================================================================
+
+VOID DECLSPEC_NORETURN RealCOMPlusThrow(RuntimeExceptionKind reKind);
+
+//==========================================================================
+// Throw an undecorated runtime exception with a localized message. Given
+// a resource name, the ResourceManager will find the correct paired string
+// in our .resources file.
+//==========================================================================
+
+VOID DECLSPEC_NORETURN RealCOMPlusThrow(RuntimeExceptionKind reKind, LPCWSTR wszResourceName, Exception * pInnerException = NULL);
+
+//==========================================================================
+// Throw a decorated runtime exception.
+//==========================================================================
+
+VOID DECLSPEC_NORETURN RealCOMPlusThrow(RuntimeExceptionKind reKind, UINT resID,
+ LPCWSTR wszArg1 = NULL, LPCWSTR wszArg2 = NULL, LPCWSTR wszArg3 = NULL,
+ LPCWSTR wszArg4 = NULL, LPCWSTR wszArg5 = NULL, LPCWSTR wszArg6 = NULL);
+
+
+//==========================================================================
+// Throw a runtime exception based on an HResult. Note that for the version
+// of RealCOMPlusThrowHR that takes a resource ID, the HRESULT will be
+// passed as the first substitution string (%1).
+//==========================================================================
+
+enum tagGetErrorInfo
+{
+ kGetErrorInfo
+};
+
+VOID DECLSPEC_NORETURN RealCOMPlusThrowHR(HRESULT hr, IErrorInfo* pErrInfo, Exception * pInnerException = NULL);
+VOID DECLSPEC_NORETURN RealCOMPlusThrowHR(HRESULT hr, tagGetErrorInfo);
+VOID DECLSPEC_NORETURN RealCOMPlusThrowHR(HRESULT hr);
+VOID DECLSPEC_NORETURN RealCOMPlusThrowHR(HRESULT hr, UINT resID, LPCWSTR wszArg1 = NULL, LPCWSTR wszArg2 = NULL,
+ LPCWSTR wszArg3 = NULL, LPCWSTR wszArg4 = NULL, LPCWSTR wszArg5 = NULL,
+ LPCWSTR wszArg6 = NULL);
+
+#ifdef FEATURE_COMINTEROP
+
+//==========================================================================
+// Throw a runtime exception based on an HResult, check for error info
+//==========================================================================
+
+VOID DECLSPEC_NORETURN RealCOMPlusThrowHR(HRESULT hr, IUnknown *iface, REFIID riid);
+
+
+//==========================================================================
+// Throw a runtime exception based on an EXCEPINFO. This function will free
+// the strings in the EXCEPINFO that is passed in.
+//==========================================================================
+
+VOID DECLSPEC_NORETURN RealCOMPlusThrowHR(EXCEPINFO *pExcepInfo);
+
+#endif // FEATURE_COMINTEROP
+
+//==========================================================================
+// Throw a runtime exception based on the last Win32 error (GetLastError())
+//==========================================================================
+
+VOID DECLSPEC_NORETURN RealCOMPlusThrowWin32();
+VOID DECLSPEC_NORETURN RealCOMPlusThrowWin32(HRESULT hr);
+
+
+//==========================================================================
+// Create an exception object
+// Note that this may not succeed due to problems creating the exception
+// object. On failure, it will set pInitException to the value of
+// pInnerException, and will set pThrowable to the exception that got thrown
+// while trying to create the TypeInitializationException object, which
+// could be due to other type init issues, OOM, thread abort, etc.
+// pInnerException (may be NULL) and pInitException and are IN params.
+// pThrowable is an OUT param.
+//==========================================================================
+void CreateTypeInitializationExceptionObject(LPCWSTR pTypeThatFailed,
+ OBJECTREF *pInnerException,
+ OBJECTREF *pInitException,
+ OBJECTREF *pThrowable);
+
+//==========================================================================
+// Examine an exception object
+//==========================================================================
+
+ULONG GetExceptionMessage(OBJECTREF throwable,
+ __inout_ecount(bufferLength) LPWSTR buffer,
+ ULONG bufferLength);
+void GetExceptionMessage(OBJECTREF throwable, SString &result);
+STRINGREF GetExceptionMessage(OBJECTREF throwable);
+HRESULT GetExceptionHResult(OBJECTREF throwable);
+DWORD GetExceptionXCode(OBJECTREF throwable);
+
+void ExceptionPreserveStackTrace(OBJECTREF throwable);
+
+
+//==========================================================================
+// Create an exception object for an HRESULT
+//==========================================================================
+
+void GetExceptionForHR(HRESULT hr, IErrorInfo* pErrInfo, bool fUseCOMException, OBJECTREF* pProtectedThrowable, IRestrictedErrorInfo *pResErrorInfo = NULL, BOOL bHasLanguageRestrictedErrorInfo = FALSE);
+void GetExceptionForHR(HRESULT hr, IErrorInfo* pErrInfo, OBJECTREF* pProtectedThrowable);
+void GetExceptionForHR(HRESULT hr, OBJECTREF* pProtectedThrowable);
+HRESULT GetHRFromThrowable(OBJECTREF throwable);
+
+#if FEATURE_COMINTEROP
+IRestrictedErrorInfo* GetRestrictedErrorInfoFromErrorObject(OBJECTREF throwable);
+#endif
+//==========================================================================
+// Throw an ArithmeticException
+//==========================================================================
+
+VOID DECLSPEC_NORETURN RealCOMPlusThrowArithmetic();
+
+//==========================================================================
+// Throw an ArgumentNullException
+//==========================================================================
+
+VOID DECLSPEC_NORETURN RealCOMPlusThrowArgumentNull(LPCWSTR argName, LPCWSTR wszResourceName);
+
+VOID DECLSPEC_NORETURN RealCOMPlusThrowArgumentNull(LPCWSTR argName);
+
+//==========================================================================
+// Throw an ArgumentOutOfRangeException
+//==========================================================================
+
+VOID DECLSPEC_NORETURN RealCOMPlusThrowArgumentOutOfRange(LPCWSTR argName, LPCWSTR wszResourceName);
+
+//==========================================================================
+// Throw an ArgumentException
+//==========================================================================
+VOID DECLSPEC_NORETURN RealCOMPlusThrowArgumentException(LPCWSTR argName, LPCWSTR wszResourceName);
+
+//==========================================================================
+// Throw an InvalidCastException
+//==========================================================================
+VOID DECLSPEC_NORETURN RealCOMPlusThrowInvalidCastException(TypeHandle thCastFrom, TypeHandle thCastTo);
+
+VOID DECLSPEC_NORETURN RealCOMPlusThrowInvalidCastException(OBJECTREF *pObj, TypeHandle thCastTo);
+
+
+#include "eexcp.h"
+#include "exinfo.h"
+
+#ifdef _TARGET_X86_
+struct FrameHandlerExRecord
+{
+ EXCEPTION_REGISTRATION_RECORD m_ExReg;
+
+ Frame *m_pEntryFrame;
+
+ Frame *GetCurrFrame()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pEntryFrame;
+ }
+};
+
+struct NestedHandlerExRecord : public FrameHandlerExRecord
+{
+ ExInfo m_handlerInfo;
+ BOOL m_ActiveForUnwind;
+ ExInfo *m_pCurrentExInfo;
+ EXCEPTION_REGISTRATION_RECORD *m_pCurrentHandler;
+ NestedHandlerExRecord() : m_handlerInfo() {LIMITED_METHOD_CONTRACT;}
+ void Init(PEXCEPTION_ROUTINE pFrameHandler, Frame *pEntryFrame)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ m_ExReg.Next=NULL;
+ m_ExReg.Handler=pFrameHandler;
+ m_pEntryFrame=pEntryFrame;
+ m_pCurrentExInfo = NULL;
+ m_pCurrentHandler = NULL;
+ m_handlerInfo.Init();
+ m_ActiveForUnwind = FALSE;
+ }
+};
+
+#endif // _TARGET_X86_
+
+#if defined(ENABLE_CONTRACTS_IMPL)
+
+// Never call this class directly: Call it through CANNOTTHROWCOMPLUSEXCEPTION.
+class COMPlusCannotThrowExceptionHelper
+{
+public:
+ DEBUG_NOINLINE COMPlusCannotThrowExceptionHelper(BOOL fCond,
+ const char *szFunction,
+ const char *szFile,
+ int linenum)
+ {
+ SCAN_SCOPE_BEGIN;
+ STATIC_CONTRACT_NOTHROW;
+
+ m_fCond = fCond;
+
+ if (m_fCond)
+ {
+ m_pClrDebugState = GetClrDebugState();
+ m_oldClrDebugState = *m_pClrDebugState;
+
+ m_ContractStackRecord.m_szFunction = szFunction;
+ m_ContractStackRecord.m_szFile = szFile;
+ m_ContractStackRecord.m_lineNum = linenum;
+ m_ContractStackRecord.m_testmask = (Contract::ALL_Disabled & ~((UINT)(Contract::THROWS_Mask))) | Contract::THROWS_No;
+ m_ContractStackRecord.m_construct = "CANNOTTHROW";
+ m_pClrDebugState->LinkContractStackTrace( &m_ContractStackRecord );
+
+ m_pClrDebugState->ViolationMaskReset( ThrowsViolation );
+ m_pClrDebugState->ResetOkToThrow();
+ }
+ }
+
+ DEBUG_NOINLINE ~COMPlusCannotThrowExceptionHelper()
+ {
+ SCAN_SCOPE_END;
+
+ if (m_fCond)
+ {
+ *m_pClrDebugState = m_oldClrDebugState;
+ }
+ }
+
+private:
+ BOOL m_fCond;
+
+ ClrDebugState *m_pClrDebugState;
+ ClrDebugState m_oldClrDebugState;
+
+ ContractStackRecord m_ContractStackRecord;
+};
+
+#endif // ENABLE_CONTRACTS_IMPL
+
+//-------------------------------------------------------------------------------
+// This simply tests to see if the exception object is a subclass of
+// the descriminating class specified in the exception clause.
+//-------------------------------------------------------------------------------
+extern "C" BOOL ExceptionIsOfRightType(TypeHandle clauseType, TypeHandle thrownType);
+
+//==========================================================================
+// The stuff below is what works "behind the scenes" of the public macros.
+//==========================================================================
+
+#ifdef _TARGET_X86_
+LPVOID COMPlusEndCatchWorker(Thread *pCurThread);
+EXTERN_C LPVOID STDCALL COMPlusEndCatch(LPVOID ebp, DWORD ebx, DWORD edi, DWORD esi, LPVOID* pRetAddress);
+#endif
+
+// Specify NULL for uTryCatchResumeAddress when not checking for a InducedThreadRedirectAtEndOfCatch
+EXTERN_C LPVOID COMPlusCheckForAbort(UINT_PTR uTryCatchResumeAddress = NULL);
+
+BOOL IsThreadHijackedForThreadStop(Thread* pThread, EXCEPTION_RECORD* pExceptionRecord);
+void AdjustContextForThreadStop(Thread* pThread, T_CONTEXT* pContext);
+OBJECTREF CreateCOMPlusExceptionObject(Thread* pThread, EXCEPTION_RECORD* pExceptionRecord, BOOL bAsynchronousThreadStop);
+
+#if !defined(WIN64EXCEPTIONS)
+EXCEPTION_HANDLER_DECL(COMPlusFrameHandler);
+EXCEPTION_HANDLER_DECL(COMPlusNestedExceptionHandler);
+#ifdef FEATURE_COMINTEROP
+EXCEPTION_HANDLER_DECL(COMPlusFrameHandlerRevCom);
+#endif // FEATURE_COMINTEROP
+
+// Pop off any SEH handlers we have registered below pTargetSP
+VOID __cdecl PopSEHRecords(LPVOID pTargetSP);
+
+#if defined(_TARGET_X86_) && defined(DEBUGGING_SUPPORTED)
+VOID UnwindExceptionTrackerAndResumeInInterceptionFrame(ExInfo* pExInfo, EHContext* context);
+#endif // _TARGET_X86_ && DEBUGGING_SUPPORTED
+
+BOOL PopNestedExceptionRecords(LPVOID pTargetSP, BOOL bCheckForUnknownHandlers = FALSE);
+VOID PopNestedExceptionRecords(LPVOID pTargetSP, T_CONTEXT *pCtx, void *pSEH);
+
+// Misc functions to access and update the SEH chain. Be very, very careful about updating the SEH chain.
+// Frankly, if you think you need to use one of these function, please
+// consult with the owner of the exception system.
+PEXCEPTION_REGISTRATION_RECORD GetCurrentSEHRecord();
+VOID SetCurrentSEHRecord(EXCEPTION_REGISTRATION_RECORD *pSEH);
+
+
+#define STACK_OVERWRITE_BARRIER_SIZE 20
+#define STACK_OVERWRITE_BARRIER_VALUE 0xabcdefab
+
+#ifdef _DEBUG
+#if defined(_TARGET_X86_)
+struct FrameHandlerExRecordWithBarrier {
+ DWORD m_StackOverwriteBarrier[STACK_OVERWRITE_BARRIER_SIZE];
+ FrameHandlerExRecord m_ExRecord;
+};
+
+void VerifyValidTransitionFromManagedCode(Thread *pThread, CrawlFrame *pCF);
+#endif // defined(_TARGET_X86_)
+#endif // _DEBUG
+#endif // !defined(WIN64EXCEPTIONS)
+
+//==========================================================================
+// This is a workaround designed to allow the use of the StubLinker object at bootup
+// time where the EE isn't sufficient awake to create COM+ exception objects.
+// Instead, COMPlusThrow(rexcep) does a simple RaiseException using this code.
+// Or use COMPlusThrowBoot() to explicitly do so.
+//==========================================================================
+#define BOOTUP_EXCEPTION_COMPLUS 0xC0020001
+
+void COMPlusThrowBoot(HRESULT hr);
+
+
+//==========================================================================
+// Used by the classloader to record a managed exception object to explain
+// why a classload got botched.
+//
+// - Can be called with gc enabled or disabled.
+// This allows a catch-all error path to post a generic catchall error
+// message w/out bonking more specific error messages posted by inner functions.
+//==========================================================================
+VOID DECLSPEC_NORETURN ThrowTypeLoadException(LPCUTF8 pNameSpace, LPCUTF8 pTypeName,
+ LPCWSTR pAssemblyName, LPCUTF8 pMessageArg,
+ UINT resIDWhy);
+
+VOID DECLSPEC_NORETURN ThrowTypeLoadException(LPCWSTR pFullTypeName,
+ LPCWSTR pAssemblyName,
+ LPCUTF8 pMessageArg,
+ UINT resIDWhy);
+
+VOID DECLSPEC_NORETURN ThrowFieldLayoutError(mdTypeDef cl, // cl of the NStruct being loaded
+ Module* pModule, // Module that defines the scope, loader and heap (for allocate FieldMarshalers)
+ DWORD dwOffset, // Field offset
+ DWORD dwID);
+
+UINT GetResourceIDForFileLoadExceptionHR(HRESULT hr);
+
+FCDECL1(Object*, MissingMemberException_FormatSignature, I1Array* pPersistedSigUNSAFE);
+FCDECL1(Object*, GetResourceFromDefault, StringObject* key);
+
+#define EXCEPTION_NONCONTINUABLE 0x1 // Noncontinuable exception
+#define EXCEPTION_UNWINDING 0x2 // Unwind is in progress
+#define EXCEPTION_EXIT_UNWIND 0x4 // Exit unwind is in progress
+#define EXCEPTION_STACK_INVALID 0x8 // Stack out of limits or unaligned
+#define EXCEPTION_NESTED_CALL 0x10 // Nested exception handler call
+#define EXCEPTION_TARGET_UNWIND 0x20 // Target unwind in progress
+#define EXCEPTION_COLLIDED_UNWIND 0x40 // Collided exception handler call
+
+#define EXCEPTION_UNWIND (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND | \
+ EXCEPTION_TARGET_UNWIND | EXCEPTION_COLLIDED_UNWIND)
+
+#define IS_UNWINDING(Flag) ((Flag & EXCEPTION_UNWIND) != 0)
+
+//#include "CodeMan.h"
+
+class EHRangeTreeNode;
+class EHRangeTree;
+
+typedef CUnorderedArray<EHRangeTreeNode *, 7> EH_CLAUSE_UNORDERED_ARRAY;
+
+class EHRangeTreeNode
+{
+public:
+ EHRangeTree *m_pTree;
+ EE_ILEXCEPTION_CLAUSE *m_clause;
+
+ EHRangeTreeNode *m_pContainedBy;
+ EH_CLAUSE_UNORDERED_ARRAY m_containees;
+
+ DWORD m_FilterEndPC;
+
+private:
+ // A node can represent a range or a single offset.
+ // A node representing a range can either be the root node, which
+ // contains everything and has a NULL m_clause, or it can be
+ // a node mapping to an EH clause.
+ DWORD m_offset;
+ bool m_fIsRange;
+ bool m_fIsRoot;
+
+public:
+ EHRangeTreeNode(void);
+ EHRangeTreeNode(DWORD offset, bool fIsRange = false);
+ void CommonCtor(DWORD offset, bool fIsRange);
+
+ bool IsRange();
+ void MarkAsRange();
+
+ bool IsRoot();
+ void MarkAsRoot(DWORD offset);
+
+ DWORD GetOffset();
+ DWORD GetTryStart();
+ DWORD GetTryEnd();
+ DWORD GetHandlerStart();
+ DWORD GetHandlerEnd();
+ DWORD GetFilterStart();
+ DWORD GetFilterEnd();
+
+ // These four functions may actually be called via FindContainer() while we are building the tree
+ // structure, in which case we shouldn't really check the tree recursively because the result is unreliable.
+ // Thus, they check m_pTree->m_fInitializing to see if they should call themselves recursively.
+ // Also, FindContainer() has extra logic to work around this boot-strapping problem.
+ bool Contains(EHRangeTreeNode* pNode);
+ bool TryContains(EHRangeTreeNode* pNode);
+ bool HandlerContains(EHRangeTreeNode* pNode);
+ bool FilterContains(EHRangeTreeNode* pNode);
+
+ // These are simple wrappers around the previous four.
+ bool Contains(DWORD offset);
+ bool TryContains(DWORD offset);
+ bool HandlerContains(DWORD offset);
+ bool FilterContains(DWORD offset);
+
+ EHRangeTreeNode* GetContainer();
+
+ HRESULT AddNode(EHRangeTreeNode *pNode);
+} ;
+
+class EHRangeTree
+{
+ unsigned m_EHCount;
+ EHRangeTreeNode *m_rgNodes;
+ EE_ILEXCEPTION_CLAUSE *m_rgClauses;
+
+public:
+
+ EHRangeTreeNode *m_root; // This is a sentinel, NOT an actual
+ // Exception Handler!
+ HRESULT m_hrInit; // Ctor fills this out.
+
+ bool m_fInitializing;
+
+ EHRangeTree(IJitManager* pIJM,
+ const METHODTOKEN& methodToken,
+ DWORD methodSize,
+ int cFunclet,
+ const DWORD * rgFuncletOffset);
+
+ ~EHRangeTree();
+
+ EHRangeTreeNode *FindContainer(EHRangeTreeNode *pNodeCur);
+ EHRangeTreeNode *FindMostSpecificContainer(DWORD addr);
+ EHRangeTreeNode *FindNextMostSpecificContainer(EHRangeTreeNode *pNodeCur,
+ DWORD addr);
+
+ // <TODO> We shouldn't need this - instead, we
+ // should get sequence points annotated with whether they're STACK_EMPTY, etc,
+ // and then we'll figure out if the destination is ok based on that, instead.</TODO>
+ BOOL isAtStartOfCatch(DWORD offset);
+} ;
+
+HRESULT SetIPFromSrcToDst(Thread *pThread,
+ SLOT addrStart, // base address of method
+ DWORD offFrom, // native offset
+ DWORD offTo, // native offset
+ bool fCanSetIPOnly, // if true, don't do any real work
+ PREGDISPLAY pReg,
+ PT_CONTEXT pCtx,
+ void *pDji,
+ EHRangeTree *pEHRT);
+
+BOOL IsInFirstFrameOfHandler(Thread *pThread,
+ IJitManager *pJitManager,
+ const METHODTOKEN& MethodToken,
+ DWORD offSet);
+
+//==========================================================================
+// Handy helper functions
+//==========================================================================
+LONG FilterAccessViolation(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam);
+
+bool IsInstrModifyFault(PEXCEPTION_POINTERS pExceptionInfo);
+
+bool IsContinuableException(Thread *pThread);
+
+bool IsInterceptableException(Thread *pThread);
+
+#ifdef DEBUGGING_SUPPORTED
+// perform simple checking to see if the current exception is intercepted
+bool CheckThreadExceptionStateForInterception();
+
+// Intercept the current exception and start an unwind. This function may never return.
+EXCEPTION_DISPOSITION ClrDebuggerDoUnwindAndIntercept(X86_FIRST_ARG(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame)
+ EXCEPTION_RECORD *pExceptionRecord);
+
+LONG NotifyDebuggerLastChance(Thread *pThread,
+ EXCEPTION_POINTERS *pExceptionInfo,
+ BOOL jitAttachRequested);
+#endif // DEBUGGING_SUPPORTED
+
+#if defined(_TARGET_X86_)
+void CPFH_AdjustContextForThreadSuspensionRace(T_CONTEXT *pContext, Thread *pThread);
+#endif // _TARGET_X86_
+
+bool IsGcMarker(DWORD exceptionCode, T_CONTEXT *pContext);
+
+void InitSavedExceptionInfo();
+
+bool ShouldHandleManagedFault(
+ EXCEPTION_RECORD* pExceptionRecord,
+ T_CONTEXT* pContext,
+ EXCEPTION_REGISTRATION_RECORD* pEstablisherFrame,
+ Thread* pThread);
+
+void HandleManagedFault(EXCEPTION_RECORD* pExceptionRecord,
+ T_CONTEXT* pContext,
+ EXCEPTION_REGISTRATION_RECORD* pEstablisherFrame,
+ Thread* pThread);
+
+LONG WatsonLastChance(
+ Thread *pThread,
+ EXCEPTION_POINTERS *pExceptionInfo,
+ TypeOfReportedError tore);
+
+bool DebugIsEECxxException(EXCEPTION_RECORD* pExceptionRecord);
+
+
+inline void CopyOSContext(T_CONTEXT* pDest, T_CONTEXT* pSrc)
+{
+ SIZE_T cbReadOnlyPost = 0;
+#ifdef _TARGET_AMD64_
+ cbReadOnlyPost = sizeof(CONTEXT) - FIELD_OFFSET(CONTEXT, FltSave); // older OSes don't have the vector reg fields
+#endif // _TARGET_AMD64_
+
+ memcpyNoGCRefs(pDest, pSrc, sizeof(T_CONTEXT) - cbReadOnlyPost);
+}
+
+void SaveCurrentExceptionInfo(PEXCEPTION_RECORD pRecord, PT_CONTEXT pContext);
+
+#ifdef _DEBUG
+void SetReversePInvokeEscapingUnhandledExceptionStatus(BOOL fIsUnwinding,
+#ifdef _TARGET_X86_
+ EXCEPTION_REGISTRATION_RECORD * pEstablisherFrame
+#elif defined(WIN64EXCEPTIONS)
+ ULONG64 pEstablisherFrame
+#else
+#error Unsupported platform
+#endif
+ );
+#endif // _DEBUG
+
+// See implementation for detailed comments in excep.cpp
+LONG AppDomainTransitionExceptionFilter(
+ EXCEPTION_POINTERS *pExceptionInfo, // the pExceptionInfo passed to a filter function.
+ PVOID pParam);
+
+// See implementation for detailed comments in excep.cpp
+LONG ReflectionInvocationExceptionFilter(
+ EXCEPTION_POINTERS *pExceptionInfo, // the pExceptionInfo passed to a filter function.
+ PVOID pParam);
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+// -----------------------------------------------------------------------
+// Support for Corrupted State Exceptions
+// -----------------------------------------------------------------------
+#ifndef HANDLE_PROCESS_CORRUPTED_STATE_EXCEPTION_ATTRIBUTE
+#define HANDLE_PROCESS_CORRUPTED_STATE_EXCEPTION_ATTRIBUTE "System.Runtime.ExceptionServices.HandleProcessCorruptedStateExceptionsAttribute"
+#endif // HANDLE_PROCESS_CORRUPTED_STATE_EXCEPTION_ATTRIBUTE
+
+#ifndef HIGHEST_MAJOR_VERSION_OF_PREV4_RUNTIME
+#define HIGHEST_MAJOR_VERSION_OF_PREV4_RUNTIME 2
+#endif // HIGHEST_MAJOR_VERSION_OF_PREV4_RUNTIME
+
+// This helper class contains static method to support working with Corrupted State Exceptions,
+// including checking if a method can handle it or not, copy state across throwables, etc.
+class CEHelper
+{
+ BOOL static IsMethodInPreV4Assembly(PTR_MethodDesc pMethodDesc);
+ BOOL static CanMethodHandleCE(PTR_MethodDesc pMethodDesc, CorruptionSeverity severity, BOOL fCalculateSecurityInfo = TRUE);
+
+public:
+ BOOL static CanMethodHandleException(CorruptionSeverity severity, PTR_MethodDesc pMethodDesc, BOOL fCalculateSecurityInfo = TRUE);
+ BOOL static CanIDispatchTargetHandleException();
+ BOOL static IsProcessCorruptedStateException(DWORD dwExceptionCode, BOOL fCheckForSO = TRUE);
+ BOOL static IsProcessCorruptedStateException(OBJECTREF oThrowable);
+ BOOL static IsLastActiveExceptionCorrupting(BOOL fMarkForReuseIfCorrupting = FALSE);
+ BOOL static ShouldTreatActiveExceptionAsNonCorrupting();
+ void static MarkLastActiveExceptionCorruptionSeverityForReraiseReuse();
+ void static SetupCorruptionSeverityForActiveException(BOOL fIsRethrownException, BOOL fIsNestedException, BOOL fShouldTreatExceptionAsNonCorrupting = FALSE);
+#ifdef WIN64EXCEPTIONS
+ typedef DPTR(class ExceptionTracker) PTR_ExceptionTracker;
+ void static SetupCorruptionSeverityForActiveExceptionInUnwindPass(Thread *pCurThread, PTR_ExceptionTracker pEHTracker, BOOL fIsFirstPass,
+ DWORD dwExceptionCode);
+#endif // WIN64EXCEPTIONS
+ void static ResetLastActiveCorruptionSeverityPostCatchHandler();
+};
+
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+#ifndef DACCESS_COMPILE
+// Switches to the previous AppDomain on the thread. See implementation for detailed comments.
+BOOL ReturnToPreviousAppDomain();
+
+// This is a generic holder that will enable you to revert to previous execution context (e.g. an AD).
+// Set it up *once* you have transitioned to the target context.
+class ReturnToPreviousAppDomainHolder
+{
+protected: // protected so that derived holder classes can also use them
+ BOOL m_fShouldReturnToPreviousAppDomain;
+ Thread * m_pThread;
+#ifdef _DEBUG
+ AppDomain * m_pTransitionedToAD;
+#endif // _DEBUG
+
+ void Init();
+ void ReturnToPreviousAppDomain();
+
+public:
+ ReturnToPreviousAppDomainHolder();
+ ~ReturnToPreviousAppDomainHolder();
+ void SuppressRelease();
+};
+
+// exception filter invoked for unhandled exceptions on the entry point thread (thread 0)
+LONG EntryPointFilter(PEXCEPTION_POINTERS pExceptionInfo, PVOID _pData);
+
+#endif // !DACCESS_COMPILE
+
+// Enum that defines the types of exception notification handlers
+// that we support.
+enum ExceptionNotificationHandlerType
+{
+ UnhandledExceptionHandler = 0x1
+#ifdef FEATURE_EXCEPTION_NOTIFICATIONS
+ ,
+ FirstChanceExceptionHandler = 0x2
+#endif // FEATURE_EXCEPTION_NOTIFICATIONS
+};
+
+// Defined in Frames.h
+// class ContextTransitionFrame;
+
+// This class contains methods to support delivering the various exception notifications.
+class ExceptionNotifications
+{
+#ifdef FEATURE_EXCEPTION_NOTIFICATIONS
+private:
+ void static GetEventArgsForNotification(ExceptionNotificationHandlerType notificationType,
+ OBJECTREF *pOutEventArgs, OBJECTREF *pThrowable);
+
+ void static DeliverNotificationInternal(ExceptionNotificationHandlerType notificationType,
+ OBJECTREF *pThrowable
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , CorruptionSeverity severity
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ );
+
+ void static InvokeNotificationDelegate(ExceptionNotificationHandlerType notificationType, OBJECTREF *pDelegate, OBJECTREF *pEventArgs,
+ OBJECTREF *pAppDomain
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , CorruptionSeverity severity
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ );
+
+public:
+ BOOL static CanDeliverNotificationToCurrentAppDomain(ExceptionNotificationHandlerType notificationType);
+
+ void static DeliverNotification(ExceptionNotificationHandlerType notificationType,
+ OBJECTREF *pThrowable
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , CorruptionSeverity severity
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ );
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ BOOL static CanDelegateBeInvokedForException(OBJECTREF *pDelegate, CorruptionSeverity severity);
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+#endif // FEATURE_EXCEPTION_NOTIFICATIONS
+
+public:
+ void static DeliverExceptionNotification(ExceptionNotificationHandlerType notificationType, OBJECTREF *pDelegate,
+ OBJECTREF *pAppDomain, OBJECTREF *pEventArgs);
+ void static DeliverFirstChanceNotification();
+};
+
+#ifndef DACCESS_COMPILE
+
+#if defined(_TARGET_X86_)
+void ResetThreadAbortState(PTR_Thread pThread, void *pEstablisherFrame);
+#elif defined(WIN64EXCEPTIONS)
+void ResetThreadAbortState(PTR_Thread pThread, CrawlFrame *pCf, StackFrame sfCurrentStackFrame);
+#endif
+
+X86_ONLY(EXCEPTION_REGISTRATION_RECORD* GetNextCOMPlusSEHRecord(EXCEPTION_REGISTRATION_RECORD* pRec);)
+
+#endif // !DACCESS_COMPILE
+
+#endif // __excep_h__
diff --git a/src/vm/exceptionhandling.cpp b/src/vm/exceptionhandling.cpp
new file mode 100644
index 0000000000..f984f94379
--- /dev/null
+++ b/src/vm/exceptionhandling.cpp
@@ -0,0 +1,6051 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+#include "common.h"
+
+#ifndef FEATURE_PAL
+#include "securityprincipal.h"
+#endif // !FEATURE_PAL
+
+#ifdef WIN64EXCEPTIONS
+#include "exceptionhandling.h"
+#include "dbginterface.h"
+#include "asmconstants.h"
+#include "eetoprofinterfacewrapper.inl"
+#include "eedbginterfaceimpl.inl"
+#include "perfcounters.h"
+#include "eventtrace.h"
+
+#ifndef DACCESS_COMPILE
+
+// o Functions and funclets are tightly associated. In fact, they are laid out in contiguous memory.
+// They also present some interesting issues with respect to EH because we will see callstacks with
+// both functions and funclets, but need to logically treat them as the original single IL function
+// described them.
+//
+// o All funclets are ripped out of line from the main function. Finally clause are pulled out of
+// line and replaced by calls to the funclets. Catch clauses, however, are simply pulled out of
+// line. !!!This causes a loss of nesting information in clause offsets.!!! A canonical example of
+// two different functions which look identical due to clause removal is as shown in the code
+// snippets below. The reason they look identical in the face of out-of-line funclets is that the
+// region bounds for the "try A" region collapse and become identical to the region bounds for
+// region "try B". This will look identical to the region information for Bar because Bar must
+// have a separate entry for each catch clause, both of which will have the same try-region bounds.
+//
+// void Foo() void Bar()
+// { {
+// try A try C
+// { {
+// try B BAR_BLK_1
+// { }
+// FOO_BLK_1 catch C
+// } {
+// catch B BAR_BLK_2
+// { }
+// FOO_BLK_2 catch D
+// } {
+// } BAR_BLK_3
+// catch A }
+// { }
+// FOO_BLK_3
+// }
+// }
+//
+// O The solution is to duplicate all clauses that logically cover the funclet in its parent
+// method, but with the try-region covering the entire out-of-line funclet code range. This will
+// differentiate the canonical example above because the CatchB funclet will have a try-clause
+// covering it whose associated handler is CatchA. In Bar, there is no such duplication of any clauses.
+//
+// o The behavior of the personality routine depends upon the JIT to properly order the clauses from
+// inside-out. This allows us to properly handle a situation where our control PC is covered by clauses
+// that should not be considered because a more nested clause will catch the exception and resume within
+// the scope of the outer clauses.
+//
+// o This sort of clause duplication for funclets should be done for all clause types, not just catches.
+// Unfortunately, I cannot articulate why at the moment.
+//
+#ifdef _DEBUG
+void DumpClauses(IJitManager* pJitMan, const METHODTOKEN& MethToken, UINT_PTR uMethodStartPC, UINT_PTR dwControlPc);
+static void DoEHLog(DWORD lvl, __in_z char *fmt, ...);
+#define EH_LOG(expr) { DoEHLog expr ; }
+#else
+#define EH_LOG(expr)
+#endif
+
+TrackerAllocator g_theTrackerAllocator;
+
+void __declspec(noinline)
+ClrUnwindEx(EXCEPTION_RECORD* pExceptionRecord,
+ UINT_PTR ReturnValue,
+ UINT_PTR TargetIP,
+ UINT_PTR TargetFrameSp);
+
+bool FixNonvolatileRegisters(UINT_PTR uOriginalSP,
+ Thread* pThread,
+ CONTEXT* pContextRecord,
+ bool fAborting
+ );
+
+MethodDesc * GetUserMethodForILStub(Thread * pThread, UINT_PTR uStubSP, MethodDesc * pILStubMD, Frame ** ppFrameOut);
+
+static ExceptionTracker* GetTrackerMemory()
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ NOTHROW;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return g_theTrackerAllocator.GetTrackerMemory();
+}
+
+static void FreeTrackerMemory(ExceptionTracker* pTracker, TrackerMemoryType mem)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (mem & memManaged)
+ {
+ pTracker->ReleaseResources();
+ }
+
+ if (mem & memUnmanaged)
+ {
+ g_theTrackerAllocator.FreeTrackerMemory(pTracker);
+ }
+}
+
+static inline void UpdatePerformanceMetrics(CrawlFrame *pcfThisFrame, BOOL bIsRethrownException, BOOL bIsNewException)
+{
+ WRAPPER_NO_CONTRACT;
+ COUNTER_ONLY(GetPerfCounters().m_Excep.cThrown++);
+
+ // Fire an exception thrown ETW event when an exception occurs
+ ETW::ExceptionLog::ExceptionThrown(pcfThisFrame, bIsRethrownException, bIsNewException);
+}
+
+void InitializeExceptionHandling()
+{
+ EH_LOG((LL_INFO100, "InitializeExceptionHandling(): ExceptionTracker size: 0x%x bytes\n", sizeof(ExceptionTracker)));
+
+ InitSavedExceptionInfo();
+
+ CLRAddVectoredHandlers();
+
+ g_theTrackerAllocator.Init();
+
+ // Initialize the lock used for synchronizing access to the stacktrace in the exception object
+ g_StackTraceArrayLock.Init(LOCK_TYPE_DEFAULT, TRUE);
+}
+
+struct UpdateObjectRefInResumeContextCallbackState
+{
+ UINT_PTR uResumeSP;
+ Frame *pHighestFrameWithRegisters;
+ TADDR uResumeFrameFP;
+ TADDR uICFCalleeSavedFP;
+
+#ifdef _DEBUG
+ UINT nFrames;
+ bool fFound;
+#endif
+};
+
+// Stack unwind callback for UpdateObjectRefInResumeContext().
+StackWalkAction UpdateObjectRefInResumeContextCallback(CrawlFrame* pCF, LPVOID pData)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ UpdateObjectRefInResumeContextCallbackState *pState = (UpdateObjectRefInResumeContextCallbackState*)pData;
+ CONTEXT* pSrcContext = pCF->GetRegisterSet()->pCurrentContext;
+
+ INDEBUG(pState->nFrames++);
+
+ // Check to see if we have reached the resume frame.
+ if (pCF->IsFrameless())
+ {
+ // At this point, we are trying to find the managed frame containing the catch handler to be invoked.
+ // This is done by comparing the SP of the managed frame for which this callback was invoked with the
+ // SP the OS passed to our personality routine for the current managed frame. If they match, then we have
+ // reached the target frame.
+ //
+ // It is possible that a managed frame may execute a PInvoke after performing a stackalloc:
+ //
+ // 1) The Evanesco JIT will always inline the PInvoke in the managed frame, whether or not the frame
+ // contains EH. As a result, the ICF will live in the same frame which performs stackalloc.
+ //
+ // 2) JIT64 will only inline the PInvoke in the managed frame if the frame *does not* contain EH. If it does,
+ // then pinvoke will be performed via an ILStub and thus, stackalloc will be performed in a frame different
+ // from the one (ILStub) that contains the ICF.
+ //
+ // Thus, for the scenario where the catch handler lives in the frame that performed stackalloc, in case of
+ // Evanesco JIT, the SP returned by the OS will be the SP *after* the stackalloc has happened. However,
+ // the stackwalker will invoke this callback with the CrawlFrameSP that was initialized at the time ICF was setup, i.e.,
+ // it will be the SP after the prolog has executed (refer to InlinedCallFrame::UpdateRegDisplay).
+ //
+ // Thus, checking only the SP will not work for this scenario when using the Evanesco JIT.
+ //
+ // To address this case, the callback data also contains the frame pointer (FP) passed by the OS. This will
+ // be the value that is saved in the "CalleeSavedFP" field of the InlinedCallFrame during ICF
+ // initialization. When the stackwalker sees an ICF and invokes this callback, we copy the value of "CalleeSavedFP" in the data
+ // structure passed to this callback.
+ //
+ // Later, when the stackwalker invokes the callback for the managed frame containing the ICF, and the check
+ // for SP comaprison fails, we will compare the FP value we got from the ICF with the FP value the OS passed
+ // to us. If they match, then we have reached the resume frame.
+ //
+ // Note: This problem/scenario is not applicable to JIT64 since it does not perform pinvoke inlining if the
+ // method containing pinvoke also contains EH. Thus, the SP check will never fail for it.
+ if (pState->uResumeSP == GetSP(pSrcContext))
+ {
+ INDEBUG(pState->fFound = true);
+
+ return SWA_ABORT;
+ }
+
+ // Perform the FP check, as explained above.
+ if ((pState->uICFCalleeSavedFP !=0) && (pState->uICFCalleeSavedFP == pState->uResumeFrameFP))
+ {
+ // FP from ICF is the one that was also copied to the FP register in InlinedCallFrame::UpdateRegDisplay.
+ _ASSERTE(pState->uICFCalleeSavedFP == GetFP(pSrcContext));
+
+ INDEBUG(pState->fFound = true);
+
+ return SWA_ABORT;
+ }
+
+ // Reset the ICF FP in callback data
+ pState->uICFCalleeSavedFP = 0;
+ }
+ else
+ {
+ Frame *pFrame = pCF->GetFrame();
+
+ if (pFrame->NeedsUpdateRegDisplay())
+ {
+ CONSISTENCY_CHECK(pFrame >= pState->pHighestFrameWithRegisters);
+ pState->pHighestFrameWithRegisters = pFrame;
+
+ // Is this an InlinedCallFrame?
+ if (pFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr())
+ {
+ // If we are here, then ICF is expected to be active.
+ _ASSERTE(InlinedCallFrame::FrameHasActiveCall(pFrame));
+
+ // Copy the CalleeSavedFP to the data structure that is passed this callback
+ // by the stackwalker. This is the value of frame pointer when ICF is setup
+ // in a managed frame.
+ //
+ // Setting this value here is based upon the assumption (which holds true on X64 and ARM) that
+ // the stackwalker invokes the callback for explicit frames before their
+ // container/corresponding managed frame.
+ pState->uICFCalleeSavedFP = ((PTR_InlinedCallFrame)pFrame)->GetCalleeSavedFP();
+ }
+ else
+ {
+ // For any other frame, simply reset uICFCalleeSavedFP field
+ pState->uICFCalleeSavedFP = 0;
+ }
+ }
+ }
+
+ return SWA_CONTINUE;
+}
+
+
+//
+// Locates the locations of the nonvolatile registers. This will be used to
+// retrive the latest values of the object references before we resume
+// execution from an exception.
+//
+//static
+bool ExceptionTracker::FindNonvolatileRegisterPointers(Thread* pThread, UINT_PTR uOriginalSP, REGDISPLAY* pRegDisplay, TADDR uResumeFrameFP)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ //
+ // Find the highest frame below the resume frame that will update the
+ // REGDISPLAY. A normal StackWalkFrames will RtlVirtualUnwind through all
+ // managed frames on the stack, so this avoids some unnecessary work. The
+ // frame we find will have all of the nonvolatile registers/other state
+ // needed to start a managed unwind from that point.
+ //
+ Frame *pHighestFrameWithRegisters = NULL;
+ Frame *pFrame = pThread->GetFrame();
+
+ while ((UINT_PTR)pFrame < uOriginalSP)
+ {
+ if (pFrame->NeedsUpdateRegDisplay())
+ pHighestFrameWithRegisters = pFrame;
+
+ pFrame = pFrame->Next();
+ }
+
+ //
+ // Do a stack walk from this frame. This may find a higher frame within
+ // the resume frame (ex. inlined pinvoke frame). This will also update
+ // the REGDISPLAY pointers if any intervening managed frames saved
+ // nonvolatile registers.
+ //
+
+ UpdateObjectRefInResumeContextCallbackState state;
+
+ state.uResumeSP = uOriginalSP;
+ state.uResumeFrameFP = uResumeFrameFP;
+ state.uICFCalleeSavedFP = 0;
+ state.pHighestFrameWithRegisters = pHighestFrameWithRegisters;
+
+ INDEBUG(state.nFrames = 0);
+ INDEBUG(state.fFound = false);
+
+ pThread->StackWalkFramesEx(pRegDisplay, &UpdateObjectRefInResumeContextCallback, &state, 0, pHighestFrameWithRegisters);
+
+ // For managed exceptions, we should at least find a HelperMethodFrame (the one we put in IL_Throw()).
+ // For native exceptions such as AV's, we should at least find the FaultingExceptionFrame.
+ // If we don't find anything, then we must have hit an SO when we are trying to erect an HMF.
+ // Bail out in such situations.
+ //
+ // Note that pinvoke frames may be inlined in a managed method, so we cannot use the child SP (a.k.a. the current SP)
+ // to check for explicit frames "higher" on the stack ("higher" here means closer to the leaf frame). The stackwalker
+ // knows how to deal with inlined pinvoke frames, and it issues callbacks for them before issuing the callback for the
+ // containing managed method. So we have to do this check after we are done with the stackwalk.
+ pHighestFrameWithRegisters = state.pHighestFrameWithRegisters;
+ if (pHighestFrameWithRegisters == NULL)
+ {
+ return false;
+ }
+
+ CONSISTENCY_CHECK(state.nFrames);
+ CONSISTENCY_CHECK(state.fFound);
+ CONSISTENCY_CHECK(NULL != pHighestFrameWithRegisters);
+
+ //
+ // Now the REGDISPLAY has been unwound to the resume frame. The
+ // nonvolatile registers will either point into pHighestFrameWithRegisters,
+ // an inlined pinvoke frame, or into calling managed frames.
+ //
+
+ return true;
+}
+
+
+//static
+void ExceptionTracker::UpdateNonvolatileRegisters(CONTEXT *pContextRecord, REGDISPLAY *pRegDisplay, bool fAborting)
+{
+ CONTEXT* pAbortContext = NULL;
+ if (fAborting)
+ {
+ pAbortContext = GetThread()->GetAbortContext();
+ }
+
+#define UPDATEREG(reg) \
+ do { \
+ _ASSERTE(pRegDisplay->pCurrentContextPointers->reg != NULL); \
+ STRESS_LOG3(LF_GCROOTS, LL_INFO100, "Updating reg %p to %p from %p\n", \
+ pContextRecord->reg, \
+ *pRegDisplay->pCurrentContextPointers->reg, \
+ pRegDisplay->pCurrentContextPointers->reg); \
+ pContextRecord->reg = *pRegDisplay->pCurrentContextPointers->reg; \
+ if (pAbortContext) \
+ { \
+ pAbortContext->reg = pContextRecord->reg; \
+ } \
+ } while (0)
+
+#if defined(_TARGET_AMD64_)
+
+ UPDATEREG(Rbx);
+ UPDATEREG(Rbp);
+ UPDATEREG(Rsi);
+ UPDATEREG(Rdi);
+ UPDATEREG(R12);
+ UPDATEREG(R13);
+ UPDATEREG(R14);
+ UPDATEREG(R15);
+
+#elif defined(_TARGET_ARM_)
+
+ UPDATEREG(R4);
+ UPDATEREG(R5);
+ UPDATEREG(R6);
+ UPDATEREG(R7);
+ UPDATEREG(R8);
+ UPDATEREG(R9);
+ UPDATEREG(R10);
+ UPDATEREG(R11);
+
+#elif defined(_TARGET_ARM64_)
+
+ UPDATEREG(X19);
+ UPDATEREG(X20);
+ UPDATEREG(X21);
+ UPDATEREG(X22);
+ UPDATEREG(X23);
+ UPDATEREG(X24);
+ UPDATEREG(X25);
+ UPDATEREG(X26);
+ UPDATEREG(X27);
+ UPDATEREG(X28);
+ // Obtain value of Fp from CurrentContext instead of from CurrentContextPointers
+ // It should not matter. CurrentContextPointers does not have value of FP as this will
+ // require changes in MachState to also store pointer of FP which it does not do currently.
+ pContextRecord->Fp = pRegDisplay->pCurrentContext->Fp;
+ if (pAbortContext)
+ {
+ pAbortContext->Fp = pContextRecord->Fp;
+ }
+
+#else
+ PORTABILITY_ASSERT("ExceptionTracker::UpdateNonvolatileRegisters");
+#endif
+
+#undef UPDATEREG
+}
+
+
+#ifndef _DEBUG
+#define DebugLogExceptionRecord(pExceptionRecord)
+#else // _DEBUG
+#define LOG_FLAG(name) \
+ if (flags & name) \
+ { \
+ LOG((LF_EH, LL_INFO100, "" #name " ")); \
+ } \
+
+void DebugLogExceptionRecord(EXCEPTION_RECORD* pExceptionRecord)
+{
+ ULONG flags = pExceptionRecord->ExceptionFlags;
+
+ EH_LOG((LL_INFO100, ">>exr: %p, code: %08x, addr: %p, flags: 0x%02x ", pExceptionRecord, pExceptionRecord->ExceptionCode, pExceptionRecord->ExceptionAddress, flags));
+
+ LOG_FLAG(EXCEPTION_NONCONTINUABLE);
+ LOG_FLAG(EXCEPTION_UNWINDING);
+ LOG_FLAG(EXCEPTION_EXIT_UNWIND);
+ LOG_FLAG(EXCEPTION_STACK_INVALID);
+ LOG_FLAG(EXCEPTION_NESTED_CALL);
+ LOG_FLAG(EXCEPTION_TARGET_UNWIND);
+ LOG_FLAG(EXCEPTION_COLLIDED_UNWIND);
+
+ LOG((LF_EH, LL_INFO100, "\n"));
+
+}
+
+LPCSTR DebugGetExceptionDispositionName(EXCEPTION_DISPOSITION disp)
+{
+
+ switch (disp)
+ {
+ case ExceptionContinueExecution: return "ExceptionContinueExecution";
+ case ExceptionContinueSearch: return "ExceptionContinueSearch";
+ case ExceptionNestedException: return "ExceptionNestedException";
+ case ExceptionCollidedUnwind: return "ExceptionCollidedUnwind";
+ default:
+ UNREACHABLE_MSG("Invalid EXCEPTION_DISPOSITION!");
+ }
+}
+#endif // _DEBUG
+
+bool ExceptionTracker::IsStackOverflowException()
+{
+ if (m_pThread->GetThrowableAsHandle() == g_pPreallocatedStackOverflowException)
+ {
+ return true;
+ }
+
+ return false;
+}
+
+UINT_PTR ExceptionTracker::CallCatchHandler(CONTEXT* pContextRecord, bool* pfAborting /*= NULL*/)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+
+ PRECONDITION(CheckPointer(pContextRecord, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ UINT_PTR uResumePC = 0;
+ ULONG_PTR ulRelOffset;
+ StackFrame sfStackFp = m_sfResumeStackFrame;
+ Thread* pThread = m_pThread;
+ MethodDesc* pMD = m_pMethodDescOfCatcher;
+ bool fIntercepted = false;
+
+ ThreadExceptionState* pExState = pThread->GetExceptionState();
+
+#if defined(DEBUGGING_SUPPORTED)
+
+ // If the exception is intercepted, use the information stored in the DebuggerExState to resume the
+ // exception instead of calling the catch clause (there may not even be one).
+ if (pExState->GetFlags()->DebuggerInterceptInfo())
+ {
+ _ASSERTE(pMD != NULL);
+
+ // retrieve the interception information
+ pExState->GetDebuggerState()->GetDebuggerInterceptInfo(NULL, NULL, (PBYTE*)&(sfStackFp.SP), &ulRelOffset, NULL);
+
+ PCODE pStartAddress = pMD->GetNativeCode();
+
+ EECodeInfo codeInfo(pStartAddress);
+ _ASSERTE(codeInfo.IsValid());
+
+ // Note that the value returned for ulRelOffset is actually the offset,
+ // so we need to adjust it to get the actual IP.
+ _ASSERTE(FitsIn<DWORD>(ulRelOffset));
+ uResumePC = codeInfo.GetJitManager()->GetCodeAddressForRelOffset(codeInfo.GetMethodToken(), static_cast<DWORD>(ulRelOffset));
+
+ // Either we haven't set m_uResumeStackFrame (for unhandled managed exceptions), or we have set it
+ // and it equals to MemoryStackFp.
+ _ASSERTE(m_sfResumeStackFrame.IsNull() || m_sfResumeStackFrame == sfStackFp);
+
+ fIntercepted = true;
+ }
+#endif // DEBUGGING_SUPPORTED
+
+ _ASSERTE(!sfStackFp.IsNull());
+
+ m_sfResumeStackFrame.Clear();
+ m_pMethodDescOfCatcher = NULL;
+
+ _ASSERTE(pContextRecord);
+
+ //
+ // call the handler
+ //
+ EH_LOG((LL_INFO100, " calling catch at 0x%p\n", m_uCatchToCallPC));
+
+ // do not call the catch clause if the exception is intercepted
+ if (!fIntercepted)
+ {
+ _ASSERTE(m_uCatchToCallPC != 0 && m_pClauseForCatchToken != NULL);
+ uResumePC = CallHandler(m_uCatchToCallPC, sfStackFp, &m_ClauseForCatch, pMD, Catch ARM_ARG(pContextRecord) ARM64_ARG(pContextRecord));
+ }
+ else
+ {
+ // Since the exception has been intercepted and we could resuming execution at any
+ // user-specified arbitary location, reset the EH clause index and EstablisherFrame
+ // we may have saved for addressing any potential ThreadAbort raise.
+ //
+ // This is done since the saved EH clause index is related to the catch block executed,
+ // which does not happen in interception. As user specifies where we resume execution,
+ // we let that behaviour override the index and pretend as if we have no index available.
+ m_dwIndexClauseForCatch = 0;
+ m_sfEstablisherOfActualHandlerFrame.Clear();
+ m_sfCallerOfActualHandlerFrame.Clear();
+ }
+
+ EH_LOG((LL_INFO100, " resume address should be 0x%p\n", uResumePC));
+
+ //
+ // Our tracker may have gone away at this point, don't reference it.
+ //
+
+ return FinishSecondPass(pThread, uResumePC, sfStackFp, pContextRecord, this, pfAborting);
+}
+
+// static
+UINT_PTR ExceptionTracker::FinishSecondPass(
+ Thread* pThread,
+ UINT_PTR uResumePC,
+ StackFrame sf,
+ CONTEXT* pContextRecord,
+ ExceptionTracker* pTracker,
+ bool* pfAborting /*= NULL*/)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_NOTRIGGER;
+ NOTHROW;
+ PRECONDITION(CheckPointer(pThread, NULL_NOT_OK));
+ PRECONDITION(CheckPointer((void*)uResumePC, NULL_NOT_OK));
+ PRECONDITION(CheckPointer(pContextRecord, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // Between the time when we pop the ExceptionTracker for the current exception and the time
+ // when we actually resume execution, it is unsafe to start a funclet-skipping stackwalk.
+ // So we set a flag here to indicate that we are in this time window. The only user of this
+ // information right now is the profiler.
+ ThreadExceptionFlagHolder tefHolder(ThreadExceptionState::TEF_InconsistentExceptionState);
+
+#ifdef DEBUGGING_SUPPORTED
+ // This must be done before we pop the trackers.
+ BOOL fIntercepted = pThread->GetExceptionState()->GetFlags()->DebuggerInterceptInfo();
+#endif // DEBUGGING_SUPPORTED
+
+ // Since we may [re]raise ThreadAbort post the catch block execution,
+ // save the index, and Establisher, of the EH clause corresponding to the handler
+ // we just executed before we release the tracker. This will be used to ensure that reraise
+ // proceeds forward and not get stuck in a loop. Refer to
+ // ExceptionTracker::ProcessManagedCallFrame for details.
+ DWORD ehClauseCurrentHandlerIndex = pTracker->GetCatchHandlerExceptionClauseIndex();
+ StackFrame sfEstablisherOfActualHandlerFrame = pTracker->GetEstablisherOfActualHandlingFrame();
+
+ EH_LOG((LL_INFO100, "second pass finished\n"));
+ EH_LOG((LL_INFO100, "cleaning up ExceptionTracker state\n"));
+
+ // Release the exception trackers till the current (specified) frame.
+ ExceptionTracker::PopTrackers(sf, true);
+
+ // This will set the last thrown to be either null if we have handled all the exceptions in the nested chain or
+ // to whatever the current exception is.
+ //
+ // In a case when we're nested inside another catch block, the domain in which we're executing may not be the
+ // same as the one the domain of the throwable that was just made the current throwable above. Therefore, we
+ // make a special effort to preserve the domain of the throwable as we update the the last thrown object.
+ //
+ // If an exception is active, we dont want to reset the LastThrownObject to NULL as the active exception
+ // might be represented by a tracker created in the second pass (refer to
+ // CEHelper::SetupCorruptionSeverityForActiveExceptionInUnwindPass to understand how exception trackers can be
+ // created in the 2nd pass on 64bit) that does not have a throwable attached to it. Thus, if this exception
+ // is caught in the VM and it attempts to get the LastThrownObject using GET_THROWABLE macro, then it should be available.
+ //
+ // But, if the active exception tracker remains consistent in the 2nd pass (which will happen if the exception is caught
+ // in managed code), then the call to SafeUpdateLastThrownObject below will automatically update the LTO as per the
+ // active exception.
+ if (!pThread->GetExceptionState()->IsExceptionInProgress())
+ {
+ pThread->SafeSetLastThrownObject(NULL);
+ }
+ pThread->SafeUpdateLastThrownObject();
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // Since the catch clause has successfully executed and we are exiting it, reset the corruption severity
+ // in the ThreadExceptionState for the last active exception. This will ensure that when the next exception
+ // gets thrown/raised, EH tracker wont pick up an invalid value.
+ CEHelper::ResetLastActiveCorruptionSeverityPostCatchHandler();
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+
+ //
+ // If we are aborting, we should not resume execution. Instead, we raise another
+ // exception. However, we do this by resuming execution at our thread redirecter
+ // function (RedirectForThrowControl), which is the same process we use for async
+ // thread stops. This redirecter function will cover the stack frame and register
+ // stack frame and then throw an exception. When we first see the exception thrown
+ // by this redirecter, we fixup the context for the thread stackwalk by copying
+ // pThread->m_OSContext into the dispatcher context and restarting the exception
+ // dispatch. As a result, we need to save off the "correct" resume context before
+ // we resume so the exception processing can work properly after redirect. A side
+ // benefit of this mechanism is that it makes synchronous and async thread abort
+ // use exactly the same codepaths.
+ //
+ UINT_PTR uAbortAddr = 0;
+
+#if defined(DEBUGGING_SUPPORTED)
+ // Don't honour thread abort requests at this time for intercepted exceptions.
+ if (fIntercepted)
+ {
+ uAbortAddr = 0;
+ }
+ else
+#endif // !DEBUGGING_SUPPORTED
+ {
+ CopyOSContext(pThread->m_OSContext, pContextRecord);
+ SetIP(pThread->m_OSContext, (PCODE)uResumePC);
+ uAbortAddr = (UINT_PTR)COMPlusCheckForAbort(uResumePC);
+ }
+
+ if (uAbortAddr)
+ {
+ if (pfAborting != NULL)
+ {
+ *pfAborting = true;
+ }
+
+ EH_LOG((LL_INFO100, "thread abort in progress, resuming thread under control...\n"));
+
+ // We are aborting, so keep the reference to the current EH clause index.
+ // We will use this when the exception is reraised and we begin commencing
+ // exception dispatch. This is done in ExceptionTracker::ProcessOSExceptionNotification.
+ //
+ // The "if" condition below can be false if the exception has been intercepted (refer to
+ // ExceptionTracker::CallCatchHandler for details)
+ if ((ehClauseCurrentHandlerIndex > 0) && (!sfEstablisherOfActualHandlerFrame.IsNull()))
+ {
+ pThread->m_dwIndexClauseForCatch = ehClauseCurrentHandlerIndex;
+ pThread->m_sfEstablisherOfActualHandlerFrame = sfEstablisherOfActualHandlerFrame;
+ }
+
+ CONSISTENCY_CHECK(CheckPointer(pContextRecord));
+
+ STRESS_LOG1(LF_EH, LL_INFO10, "resume under control: ip: %p\n", uResumePC);
+
+#ifdef _TARGET_AMD64_
+ pContextRecord->Rcx = uResumePC;
+#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+ // On ARM & ARM64, we save off the original PC in Lr. This is the same as done
+ // in HandleManagedFault for H/W generated exceptions.
+ pContextRecord->Lr = uResumePC;
+#endif
+
+ uResumePC = uAbortAddr;
+ }
+
+ CONSISTENCY_CHECK(pThread->DetermineIfGuardPagePresent());
+
+ EH_LOG((LL_INFO100, "FinishSecondPass complete, uResumePC = %p, current SP = %p\n", uResumePC, GetCurrentSP()));
+ return uResumePC;
+}
+
+// On CoreARM, the MemoryStackFp is ULONG when passed by RtlDispatchException,
+// unlike its 64bit counterparts.
+EXTERN_C EXCEPTION_DISPOSITION
+ProcessCLRException(IN PEXCEPTION_RECORD pExceptionRecord
+ WIN64_ARG(IN ULONG64 MemoryStackFp)
+ NOT_WIN64_ARG(IN ULONG MemoryStackFp),
+ IN OUT PCONTEXT pContextRecord,
+ IN OUT PDISPATCHER_CONTEXT pDispatcherContext
+ )
+{
+ //
+ // This method doesn't always return, so it will leave its
+ // state on the thread if using dynamic contracts.
+ //
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_THROWS;
+
+ // We must preserve this so that GCStress=4 eh processing doesnt kill last error.
+ DWORD dwLastError = GetLastError();
+
+ EXCEPTION_DISPOSITION returnDisposition = ExceptionContinueSearch;
+
+ STRESS_LOG5(LF_EH, LL_INFO10, "Processing exception at establisher=%p, ip=%p disp->cxr: %p, sp: %p, cxr @ exception: %p\n",
+ MemoryStackFp, pDispatcherContext->ControlPc,
+ pDispatcherContext->ContextRecord,
+ GetSP(pDispatcherContext->ContextRecord), pContextRecord);
+ AMD64_ONLY(STRESS_LOG3(LF_EH, LL_INFO10, " rbx=%p, rsi=%p, rdi=%p\n", pContextRecord->Rbx, pContextRecord->Rsi, pContextRecord->Rdi));
+
+ // sample flags early on because we may change pExceptionRecord below
+ // if we are seeing a STATUS_UNWIND_CONSOLIDATE
+ DWORD dwExceptionFlags = pExceptionRecord->ExceptionFlags;
+ Thread* pThread = GetThread();
+
+ // Stack Overflow is handled specially by the CLR EH mechanism. In fact
+ // there are cases where we aren't in managed code, but aren't quite in
+ // known unmanaged code yet either...
+ //
+ // These "boundary code" cases include:
+ // - in JIT helper methods which don't have a frame
+ // - in JIT helper methods before/during frame setup
+ // - in FCALL before/during frame setup
+ //
+ // In those cases on x86 we take special care to start our unwind looking
+ // for a handler which is below the last explicit frame which has been
+ // established on the stack as it can't reliably crawl the stack frames
+ // above that.
+ // NOTE: see code in the CLRVectoredExceptionHandler() routine.
+ //
+ // From the perspective of the EH subsystem, we can handle unwind correctly
+ // even without erecting a transition frame on WIN64. However, since the GC
+ // uses the stackwalker to update object references, and since the stackwalker
+ // relies on transition frame, we still cannot let an exception be handled
+ // by an unprotected managed frame.
+ //
+ // This code below checks to see if a SO has occured outside of managed code.
+ // If it has, and if we don't have a transition frame higher up the stack, then
+ // we don't handle the SO.
+ if (!(dwExceptionFlags & EXCEPTION_UNWINDING))
+ {
+ if (IsSOExceptionCode(pExceptionRecord->ExceptionCode))
+ {
+ // We don't need to unwind the frame chain here because we have backstop
+ // personality routines at the U2M boundary to handle do that. They are
+ // the personality routines of CallDescrWorker() and UMThunkStubCommon().
+ //
+ // See VSW 471619 for more information.
+
+ // We should be in cooperative mode if we are going to handle the SO.
+ // We track SO state for the thread.
+ EEPolicy::HandleStackOverflow(SOD_ManagedFrameHandler, (void*)MemoryStackFp);
+ FastInterlockAnd (&pThread->m_fPreemptiveGCDisabled, 0);
+ return ExceptionContinueSearch;
+ }
+ else
+ {
+#ifdef FEATURE_STACK_PROBE
+ if (GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) == eRudeUnloadAppDomain)
+ {
+ RetailStackProbe(static_cast<unsigned int>(ADJUST_PROBE(BACKOUT_CODE_STACK_LIMIT)), pThread);
+ }
+#endif
+ }
+ }
+ else
+ {
+ DWORD exceptionCode = pExceptionRecord->ExceptionCode;
+
+ if (exceptionCode == STATUS_UNWIND)
+ // If exceptionCode is STATUS_UNWIND, RtlUnwind is called with a NULL ExceptionRecord,
+ // therefore OS uses a faked ExceptionRecord with STATUS_UNWIND code. Then we need to
+ // look at our saved exception code.
+ exceptionCode = GetCurrentExceptionCode();
+
+ if (IsSOExceptionCode(exceptionCode))
+ {
+ return ExceptionContinueSearch;
+ }
+ }
+
+ BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
+
+ StackFrame sf((UINT_PTR)MemoryStackFp);
+
+
+ {
+ GCX_COOP();
+ // Update the current establisher frame
+ if (dwExceptionFlags & EXCEPTION_UNWINDING)
+ {
+ ExceptionTracker *pCurrentTracker = pThread->GetExceptionState()->GetCurrentExceptionTracker();
+ if (pCurrentTracker != NULL)
+ {
+ pCurrentTracker->SetCurrentEstablisherFrame(sf);
+ }
+ }
+
+#ifdef _DEBUG
+ Thread::ObjectRefFlush(pThread);
+#endif // _DEBUG
+ }
+
+
+ //
+ // begin Early Processing
+ //
+ {
+#ifndef USE_REDIRECT_FOR_GCSTRESS
+ if (IsGcMarker(pExceptionRecord->ExceptionCode, pContextRecord))
+ {
+ returnDisposition = ExceptionContinueExecution;
+ goto lExit;
+ }
+#endif // !USE_REDIRECT_FOR_GCSTRESS
+
+ EH_LOG((LL_INFO100, "..................................................................................\n"));
+ EH_LOG((LL_INFO100, "ProcessCLRException enter, sp = 0x%p, ControlPc = 0x%p\n", MemoryStackFp, pDispatcherContext->ControlPc));
+ DebugLogExceptionRecord(pExceptionRecord);
+
+ if (STATUS_UNWIND_CONSOLIDATE == pExceptionRecord->ExceptionCode)
+ {
+ EH_LOG((LL_INFO100, "STATUS_UNWIND_CONSOLIDATE, retrieving stored exception record\n"));
+ _ASSERTE(pExceptionRecord->NumberParameters >= 7);
+ pExceptionRecord = (EXCEPTION_RECORD*)pExceptionRecord->ExceptionInformation[6];
+ DebugLogExceptionRecord(pExceptionRecord);
+ }
+
+ CONSISTENCY_CHECK_MSG(!DebugIsEECxxException(pExceptionRecord), "EE C++ Exception leaked into managed code!!\n");
+ }
+ //
+ // end Early Processing (tm) -- we're now into really processing an exception for managed code
+ //
+
+ if (!(dwExceptionFlags & EXCEPTION_UNWINDING))
+ {
+ // If the exception is a breakpoint, but outside of the runtime or managed code,
+ // let it go. It is not ours, so someone else will handle it, or we'll see
+ // it again as an unhandled exception.
+ if ((pExceptionRecord->ExceptionCode == STATUS_BREAKPOINT) ||
+ (pExceptionRecord->ExceptionCode == STATUS_SINGLE_STEP))
+ {
+ // It is a breakpoint; is it from the runtime or managed code?
+ PCODE ip = GetIP(pContextRecord); // IP of the fault.
+
+ BOOL fExternalException = FALSE;
+
+ BEGIN_SO_INTOLERANT_CODE_NOPROBE;
+
+ fExternalException = (!ExecutionManager::IsManagedCode(ip) &&
+ !IsIPInModule(g_pMSCorEE, ip));
+
+ END_SO_INTOLERANT_CODE_NOPROBE;
+
+ if (fExternalException)
+ {
+ // The breakpoint was not ours. Someone else can handle it. (Or if not, we'll get it again as
+ // an unhandled exception.)
+ returnDisposition = ExceptionContinueSearch;
+ goto lExit;
+ }
+ }
+ }
+
+ {
+ BOOL bAsynchronousThreadStop = IsThreadHijackedForThreadStop(pThread, pExceptionRecord);
+
+ // we already fixed the context in HijackHandler, so let's
+ // just clear the thread state.
+ pThread->ResetThrowControlForThread();
+
+ ExceptionTracker::StackTraceState STState;
+
+ ExceptionTracker* pTracker = ExceptionTracker::GetOrCreateTracker(
+ pDispatcherContext->ControlPc,
+ sf,
+ pExceptionRecord,
+ pContextRecord,
+ bAsynchronousThreadStop,
+ !(dwExceptionFlags & EXCEPTION_UNWINDING),
+ &STState);
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // Only setup the Corruption Severity in the first pass
+ if (!(dwExceptionFlags & EXCEPTION_UNWINDING))
+ {
+ // Switch to COOP mode
+ GCX_COOP();
+
+ if (pTracker && pTracker->GetThrowable() != NULL)
+ {
+ // Setup the state in current exception tracker indicating the corruption severity
+ // of the active exception.
+ CEHelper::SetupCorruptionSeverityForActiveException((STState == ExceptionTracker::STS_FirstRethrowFrame), (pTracker->GetPreviousExceptionTracker() != NULL),
+ CEHelper::ShouldTreatActiveExceptionAsNonCorrupting());
+ }
+ }
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+#ifdef FEATURE_CORECLR
+ {
+ // Switch to COOP mode since we are going to work
+ // with throwable
+ GCX_COOP();
+ if (pTracker->GetThrowable() != NULL)
+ {
+ BOOL fIsThrownExceptionAV = FALSE;
+ OBJECTREF oThrowable = NULL;
+ GCPROTECT_BEGIN(oThrowable);
+ oThrowable = pTracker->GetThrowable();
+
+ // Check if we are dealing with AV or not and if we are,
+ // ensure that this is a real AV and not managed AV exception
+ if ((pExceptionRecord->ExceptionCode == STATUS_ACCESS_VIOLATION) &&
+ (MscorlibBinder::GetException(kAccessViolationException) == oThrowable->GetMethodTable()))
+ {
+ // Its an AV - set the flag
+ fIsThrownExceptionAV = TRUE;
+ }
+
+ GCPROTECT_END();
+
+ // Did we get an AV?
+ if (fIsThrownExceptionAV == TRUE)
+ {
+ // Get the escalation policy action for handling AV
+ EPolicyAction actionAV = GetEEPolicy()->GetActionOnFailure(FAIL_AccessViolation);
+
+ // Valid actions are: eNoAction (default behviour) or eRudeExitProcess
+ _ASSERTE(((actionAV == eNoAction) || (actionAV == eRudeExitProcess)));
+ if (actionAV == eRudeExitProcess)
+ {
+ LOG((LF_EH, LL_INFO100, "ProcessCLRException: AccessViolation handler found and doing RudeExitProcess due to escalation policy (eRudeExitProcess)\n"));
+
+ // EEPolicy::HandleFatalError will help us RudeExit the process.
+ // RudeExitProcess due to AV is to prevent a security risk - we are ripping
+ // at the boundary, without looking for the handlers.
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_SECURITY);
+ }
+ }
+ }
+ }
+#endif // FEATURE_CORECLR
+
+#ifndef FEATURE_PAL // Watson is on Windows only
+ // Setup bucketing details for nested exceptions (rethrow and non-rethrow) only if we are in the first pass
+ if (!(dwExceptionFlags & EXCEPTION_UNWINDING))
+ {
+ ExceptionTracker *pPrevEHTracker = pTracker->GetPreviousExceptionTracker();
+ if (pPrevEHTracker != NULL)
+ {
+ SetStateForWatsonBucketing((STState == ExceptionTracker::STS_FirstRethrowFrame), pPrevEHTracker->GetThrowableAsHandle());
+ }
+ }
+#endif //!FEATURE_PAL
+
+ CLRUnwindStatus status;
+
+ // Refer to comment in ProcessOSExceptionNotification about ICF and codegen difference.
+ ARM_ONLY(InlinedCallFrame *pICFSetAsLimitFrame = NULL;)
+
+ status = pTracker->ProcessOSExceptionNotification(
+ pExceptionRecord,
+ pContextRecord,
+ pDispatcherContext,
+ dwExceptionFlags,
+ sf,
+ pThread,
+ STState ARM_ARG((PVOID)pICFSetAsLimitFrame));
+
+ if (FirstPassComplete == status)
+ {
+ EH_LOG((LL_INFO100, "first pass finished, found handler, TargetFrameSp = %p\n",
+ pDispatcherContext->EstablisherFrame));
+
+ SetLastError(dwLastError);
+
+ //
+ // At this point (the end of the 1st pass) we don't know where
+ // we are going to resume to. So, we pass in an address, which
+ // lies in NULL pointer partition of the memory, as the target IP.
+ //
+ // Once we reach the target frame in the second pass unwind, we call
+ // the catch funclet that caused us to resume execution and it
+ // tells us where we are resuming to. At that point, we patch
+ // the context record with the resume IP and RtlUnwind2 finishes
+ // by restoring our context at the right spot.
+ //
+ // If we are unable to set the resume PC for some reason, then
+ // the OS will try to resume at the NULL partition address and the
+ // attempt will fail due to AV, resulting in failfast, helping us
+ // isolate problems in patching the IP.
+
+ ClrUnwindEx(pExceptionRecord,
+ (UINT_PTR)pThread,
+ INVALID_RESUME_ADDRESS,
+ pDispatcherContext->EstablisherFrame);
+
+ UNREACHABLE();
+ //
+ // doesn't return
+ //
+ }
+ else if (SecondPassComplete == status)
+ {
+ bool fAborting = false;
+ UINT_PTR uResumePC = (UINT_PTR)-1;
+ UINT_PTR uOriginalSP = GetSP(pContextRecord);
+
+ Frame* pLimitFrame = pTracker->GetLimitFrame();
+
+ pDispatcherContext->ContextRecord = pContextRecord;
+
+ // We maybe in COOP mode at this point - the indefinite switch was done
+ // in ExceptionTracker::ProcessManagedCallFrame.
+ //
+ // However, if a finally was invoked non-exceptionally and raised an exception
+ // that was caught in its parent method, unwind will result in invoking any applicable termination
+ // handlers in the finally funclet and thus, also switching the mode to COOP indefinitely.
+ //
+ // Since the catch block to be executed will lie in the parent method,
+ // we will skip frames till we reach the parent and in the process, switch back to PREEMP mode
+ // as control goes back to the OS.
+ //
+ // Upon reaching the target of unwind, we wont call ExceptionTracker::ProcessManagedCallFrame (since any
+ // handlers in finally or surrounding it will be invoked when we unwind finally funclet). Thus,
+ // we may not be in COOP mode.
+ //
+ // Since CallCatchHandler expects to be in COOP mode, perform the switch here.
+ GCX_COOP_NO_DTOR();
+ uResumePC = pTracker->CallCatchHandler(pContextRecord, &fAborting);
+
+ {
+ //
+ // GC must NOT occur after the handler has returned until
+ // we resume at the new address because the stackwalker
+ // EnumGcRefs would try and report things as live from the
+ // try body, that were probably reported dead from the
+ // handler body.
+ //
+ // GC must NOT occur once the frames have been popped because
+ // the values in the unwound CONTEXT are not GC-protected.
+ //
+ GCX_FORBID();
+
+ CONSISTENCY_CHECK((UINT_PTR)-1 != uResumePC);
+
+ // Ensure we are not resuming to the invalid target IP we had set at the end of
+ // first pass
+ _ASSERTE_MSG(INVALID_RESUME_ADDRESS != uResumePC, "CallCatchHandler returned invalid resume PC!");
+
+ //
+ // CallCatchHandler freed the tracker.
+ //
+ INDEBUG(pTracker = (ExceptionTracker*)POISONC);
+
+ // Note that we should only fail to fix up for SO.
+ bool fFixedUp = FixNonvolatileRegisters(uOriginalSP, pThread, pContextRecord, fAborting);
+ _ASSERTE(fFixedUp || (pExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW));
+
+
+ CONSISTENCY_CHECK(pLimitFrame > dac_cast<PTR_VOID>(GetSP(pContextRecord)));
+#if defined(_TARGET_ARM_)
+ if (pICFSetAsLimitFrame != NULL)
+ {
+ _ASSERTE(pICFSetAsLimitFrame == pLimitFrame);
+
+ // Mark the ICF as inactive (by setting the return address as NULL).
+ // It will be marked as active at the next PInvoke callsite.
+ //
+ // This ensures that any stackwalk post the catch handler but before
+ // the next pinvoke callsite does not see the frame as active.
+ pICFSetAsLimitFrame->Reset();
+ }
+#endif // defined(_TARGET_ARM_)
+
+ pThread->SetFrame(pLimitFrame);
+
+ SetIP(pContextRecord, (PCODE)uResumePC);
+ }
+
+#ifdef STACK_GUARDS_DEBUG
+ // We are transitioning back to managed code, so ensure that we are in
+ // SO-tolerant mode before we do so.
+ RestoreSOToleranceState();
+#endif
+ RESET_CONTRACT_VIOLATION();
+ ExceptionTracker::ResumeExecution(pContextRecord,
+ NULL
+ );
+ UNREACHABLE();
+ }
+ }
+
+lExit: ;
+
+ EH_LOG((LL_INFO100, "returning %s\n", DebugGetExceptionDispositionName(returnDisposition)));
+ CONSISTENCY_CHECK( !((dwExceptionFlags & EXCEPTION_TARGET_UNWIND) && (ExceptionContinueSearch == returnDisposition)));
+
+ if ((ExceptionContinueSearch == returnDisposition))
+ {
+ GCX_PREEMP_NO_DTOR();
+ }
+
+ END_CONTRACT_VIOLATION;
+
+ SetLastError(dwLastError);
+
+ return returnDisposition;
+}
+
+// When we hit a native exception such as an AV in managed code, we put up a FaultingExceptionFrame which saves all the
+// non-volatile registers. The GC may update these registers if they contain object references. However, the CONTEXT
+// with which we are going to resume execution doesn't have these updated values. Thus, we need to fix up the non-volatile
+// registers in the CONTEXT with the updated ones stored in the FaultingExceptionFrame. To do so properly, we need
+// to perform a full stackwalk.
+bool FixNonvolatileRegisters(UINT_PTR uOriginalSP,
+ Thread* pThread,
+ CONTEXT* pContextRecord,
+ bool fAborting
+ )
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ CONTEXT _ctx = {0};
+#if defined(_TARGET_AMD64_)
+ REGDISPLAY regdisp = {0};
+#else
+ // Ctor will initialize it to NULL
+ REGDISPLAY regdisp;
+#endif // _TARGET_AMD64_
+
+ pThread->FillRegDisplay(&regdisp, &_ctx);
+
+ bool fFound = ExceptionTracker::FindNonvolatileRegisterPointers(pThread, uOriginalSP, &regdisp, GetFP(pContextRecord));
+ if (!fFound)
+ {
+ return false;
+ }
+
+ {
+ //
+ // GC must NOT occur once the frames have been popped because
+ // the values in the unwound CONTEXT are not GC-protected.
+ //
+ GCX_FORBID();
+
+ ExceptionTracker::UpdateNonvolatileRegisters(pContextRecord, &regdisp, fAborting);
+ }
+
+ return true;
+}
+
+
+
+
+// static
+void ExceptionTracker::InitializeCrawlFrameForExplicitFrame(CrawlFrame* pcfThisFrame, Frame* pFrame, MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ NOTHROW;
+ GC_NOTRIGGER;
+
+ PRECONDITION(pFrame != FRAME_TOP);
+ }
+ CONTRACTL_END;
+
+ INDEBUG(memset(pcfThisFrame, 0xCC, sizeof(*pcfThisFrame)));
+
+ pcfThisFrame->isFrameless = false;
+ pcfThisFrame->pFrame = pFrame;
+ pcfThisFrame->pFunc = pFrame->GetFunction();
+
+ if (pFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr() &&
+ !InlinedCallFrame::FrameHasActiveCall(pFrame))
+ {
+ // Inactive ICFs in IL stubs contain the true interop MethodDesc which must be
+ // reported in the stack trace.
+ if (pMD->IsILStub() && pMD->AsDynamicMethodDesc()->HasMDContextArg())
+ {
+ // Report interop MethodDesc
+ pcfThisFrame->pFunc = ((InlinedCallFrame *)pFrame)->GetActualInteropMethodDesc();
+ _ASSERTE(pcfThisFrame->pFunc != NULL);
+ _ASSERTE(pcfThisFrame->pFunc->SanityCheck());
+ }
+ }
+
+ pcfThisFrame->pFirstGSCookie = NULL;
+ pcfThisFrame->pCurGSCookie = NULL;
+}
+
+// static
+void ExceptionTracker::InitializeCrawlFrame(CrawlFrame* pcfThisFrame, Thread* pThread, StackFrame sf, REGDISPLAY* pRD,
+ PDISPATCHER_CONTEXT pDispatcherContext, DWORD_PTR ControlPCForEHSearch,
+ UINT_PTR* puMethodStartPC
+ ARM_ARG(ExceptionTracker *pCurrentTracker)
+ ARM64_ARG(ExceptionTracker *pCurrentTracker))
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ INDEBUG(memset(pcfThisFrame, 0xCC, sizeof(*pcfThisFrame)));
+ pcfThisFrame->pRD = pRD;
+#ifdef FEATURE_INTERPRETER
+ pcfThisFrame->pFrame = NULL;
+#endif // FEATURE_INTERPRETER
+
+#if defined(_TARGET_AMD64_)
+ pThread->InitRegDisplay(pcfThisFrame->pRD, pDispatcherContext->ContextRecord, true);
+
+ if (pDispatcherContext->ControlPc != (UINT_PTR)GetIP(pDispatcherContext->ContextRecord))
+ {
+ INDEBUG(memset(pRD->pCurrentContext, 0xCC, sizeof(*(pRD->pCurrentContext))));
+ // Ensure that clients can tell the current context isn't valid.
+ SetIP(pRD->pCurrentContext, 0);
+
+ *(pRD->pCallerContext) = *(pDispatcherContext->ContextRecord);
+ pRD->IsCallerContextValid = TRUE;
+
+ pcfThisFrame->pRD->SP = sf.SP;
+ pcfThisFrame->pRD->ControlPC = pDispatcherContext->ControlPc;
+ }
+#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+ pThread->InitRegDisplay(pcfThisFrame->pRD, pDispatcherContext->ContextRecord, true);
+
+ if (pCurrentTracker->IsInFirstPass())
+ {
+ // In the first pass on ARM & ARM64:
+ //
+ // 1) EstablisherFrame (passed as 'sf' to this method) represents the SP at the time
+ // the current managed method was invoked and thus, is the SP of the caller. This is
+ // the value of DispatcherContext->EstablisherFrame as well.
+ // 2) DispatcherContext->ControlPC is the pc in the current managed method for which personality
+ // routine has been invoked.
+ // 3) DispatcherContext->ContextRecord contains the context record of the caller (and thus, IP
+ // in the caller). Most of the times, these values will be distinct. However, recursion
+ // may result in them being the same (case "run2" of baseservices\Regression\V1\Threads\functional\CS_TryFinally.exe
+ // is an example). In such a case, we ensure that EstablisherFrame value is the same as
+ // the SP in DispatcherContext->ContextRecord (which is (1) above).
+
+ // Based upon this, ensure that clients can tell the current context isn't valid.
+ INDEBUG(memset(pRD->pCurrentContext, 0xCC, sizeof(*(pRD->pCurrentContext))));
+ SetIP(pRD->pCurrentContext, 0);
+
+ // Assert that our assumption (1) above is true
+ _ASSERTE(sf.SP == GetSP(pDispatcherContext->ContextRecord));
+
+ // Setup the caller context
+ *(pRD->pCallerContext) = *(pDispatcherContext->ContextRecord);
+ pRD->IsCallerContextValid = TRUE;
+ pcfThisFrame->pRD->SP = sf.SP;
+ pcfThisFrame->pRD->IsCallerSPValid = TRUE;
+
+ EH_LOG((LL_INFO100, "ExceptionTracker::InitializeCrawlFrame: DispatcherContext->ControlPC = %p; IP in DispatcherContext->ContextRecord = %p.\n",
+ pDispatcherContext->ControlPc, GetIP(pDispatcherContext->ContextRecord)));
+ pcfThisFrame->pRD->ControlPC = pDispatcherContext->ControlPc;
+ }
+ else
+ {
+ // In second pass on ARM & ARM64:
+ //
+ // 1) EstablisherFrame (passed as 'sf' to this method) represents the SP at the time
+ // the current managed method was invoked and thus, is the SP of the caller. This is
+ // the value of DispatcherContext->EstablisherFrame as well.
+ // 2) DispatcherContext->ControlPC is the pc in the current managed method for which personality
+ // routine has been invoked.
+ // 3) DispatcherContext->ContextRecord contains the context record of the current managed method
+ // for which the personality routine is invoked.
+ // Assert that our assumption (3) above is true
+ _ASSERTE(pDispatcherContext->ControlPc == GetIP(pDispatcherContext->ContextRecord));
+
+ // Simply setup the callerSP during the second pass in the caller context.
+ // This is used in setting up the "EnclosingClauseCallerSP" in ExceptionTracker::ProcessManagedCallFrame
+ // when the termination handlers are invoked.
+ ::SetSP(pcfThisFrame->pRD->pCallerContext, sf.SP);
+ pcfThisFrame->pRD->IsCallerSPValid = TRUE;
+ }
+
+ // Further below, we will adjust the ControlPC based upon whether we are at a callsite or not.
+ // We need to do this for "RegDisplay.ControlPC" field as well so that when data structures like
+ // EECodeInfo initialize themselves using this field, they will have the correct absolute value
+ // that is in sync with the "relOffset" we calculate below.
+ //
+ // However, we do this *only* when "ControlPCForEHSearch" is the same as "DispatcherContext->ControlPC",
+ // indicating we are not using the thread-abort reraise loop prevention logic.
+ //
+ bool fAdjustRegdisplayControlPC = false;
+ if (pDispatcherContext->ControlPc == ControlPCForEHSearch)
+ {
+ // Since DispatcherContext->ControlPc is used to initialize the
+ // RegDisplay.ControlPC field, assert that it is the same
+ // as the ControlPC we are going to use to initialize the CrawlFrame
+ // with as well.
+ _ASSERTE(pcfThisFrame->pRD->ControlPC == ControlPCForEHSearch);
+ fAdjustRegdisplayControlPC = true;
+
+ }
+
+#if defined(_TARGET_ARM_)
+ // Remove the Thumb bit
+ ControlPCForEHSearch = ThumbCodeToDataPointer<DWORD_PTR, DWORD_PTR>(ControlPCForEHSearch);
+#endif
+
+ // If the OS indicated that the IP is a callsite, then adjust the ControlPC by decrementing it
+ // by two. This is done because unwinding at callsite will make ControlPC point to the
+ // instruction post the callsite. If a protected region ends "at" the callsite, then
+ // not doing this adjustment will result in a one-off error that can result in us not finding
+ // a handler.
+ //
+ // For async exceptions (e.g. AV), this will be false.
+ //
+ // We decrement by two to be in accordance with how the kernel does as well.
+ if (pDispatcherContext->ControlPcIsUnwound)
+ {
+ ControlPCForEHSearch -= STACKWALK_CONTROLPC_ADJUST_OFFSET;
+ if (fAdjustRegdisplayControlPC == true)
+ {
+ // Once the check above is removed, the assignment below should
+ // be done unconditionally.
+ pcfThisFrame->pRD->ControlPC = ControlPCForEHSearch;
+ // On ARM & ARM64, the IP is either at the callsite (post the adjustment above)
+ // or at the instruction at which async exception took place.
+ pcfThisFrame->isIPadjusted = true;
+ }
+ }
+#endif // _TARGET_AMD64_ || _TARGET_ARM_ || _TARGET_ARM64_
+
+ pcfThisFrame->codeInfo.Init(ControlPCForEHSearch);
+
+ if (pcfThisFrame->codeInfo.IsValid())
+ {
+ pcfThisFrame->isFrameless = true;
+ pcfThisFrame->pFunc = pcfThisFrame->codeInfo.GetMethodDesc();
+
+ *puMethodStartPC = pcfThisFrame->codeInfo.GetStartAddress();
+ }
+ else
+ {
+ pcfThisFrame->isFrameless = false;
+ pcfThisFrame->pFunc = NULL;
+
+ *puMethodStartPC = NULL;
+ }
+
+ pcfThisFrame->pThread = pThread;
+ pcfThisFrame->hasFaulted = false;
+
+ Frame* pTopFrame = pThread->GetFrame();
+ pcfThisFrame->isIPadjusted = (FRAME_TOP != pTopFrame) && (pTopFrame->GetVTablePtr() != FaultingExceptionFrame::GetMethodFrameVPtr());
+ if (pcfThisFrame->isFrameless && (pcfThisFrame->isIPadjusted == false) && (pcfThisFrame->GetRelOffset() == 0))
+ {
+ // If we are here, then either a hardware generated exception happened at the first instruction
+ // of a managed method an exception was thrown at that location.
+ //
+ // Adjusting IP in such a case will lead us into unknown code - it could be native code or some
+ // other JITted code.
+ //
+ // Hence, we will flag that the IP is already adjusted.
+ pcfThisFrame->isIPadjusted = true;
+
+ EH_LOG((LL_INFO100, "ExceptionTracker::InitializeCrawlFrame: Exception at offset zero of the method (MethodDesc %p); setting IP as adjusted.\n",
+ pcfThisFrame->pFunc));
+ }
+
+ pcfThisFrame->pFirstGSCookie = NULL;
+ pcfThisFrame->pCurGSCookie = NULL;
+
+ pcfThisFrame->isFilterFuncletCached = FALSE;
+}
+
+bool ExceptionTracker::UpdateScannedStackRange(StackFrame sf, bool fIsFirstPass)
+{
+ CONTRACTL
+ {
+ // Since this function will modify the scanned stack range, which is also accessed during the GC stackwalk,
+ // we invoke it in COOP mode so that that access to the range is synchronized.
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ //
+ // collapse trackers if a nested exception passes a previous exception
+ //
+
+ HandleNestedExceptionEscape(sf, fIsFirstPass);
+
+ //
+ // update stack bounds
+ //
+ BOOL fUnwindingToFindResumeFrame = m_ExceptionFlags.UnwindingToFindResumeFrame();
+
+ if (m_ScannedStackRange.Contains(sf))
+ {
+ // If we're unwinding to find the resume frame and we're examining the topmost previously scanned frame,
+ // then we can't ignore it because we could resume here due to an escaped nested exception.
+ if (!fUnwindingToFindResumeFrame || (m_ScannedStackRange.GetUpperBound() != sf))
+ {
+ // been there, done that.
+ EH_LOG((LL_INFO100, " IGNOREFRAME: This frame has been processed already\n"));
+ return false;
+ }
+ }
+ else
+ {
+ if (sf < m_ScannedStackRange.GetLowerBound())
+ {
+ m_ScannedStackRange.ExtendLowerBound(sf);
+ }
+
+ if (sf > m_ScannedStackRange.GetUpperBound())
+ {
+ m_ScannedStackRange.ExtendUpperBound(sf);
+ }
+
+ DebugLogTrackerRanges(" C");
+ }
+
+ return true;
+}
+
+void CheckForRudeAbort(Thread* pThread, bool fIsFirstPass)
+{
+ if (fIsFirstPass && pThread->IsRudeAbort())
+ {
+ GCX_COOP();
+ OBJECTREF rudeAbortThrowable = CLRException::GetPreallocatedRudeThreadAbortException();
+ if (pThread->GetThrowable() != rudeAbortThrowable)
+ {
+ pThread->SafeSetThrowables(rudeAbortThrowable);
+ }
+
+ if (!pThread->IsRudeAbortInitiated())
+ {
+ pThread->PreWorkForThreadAbort();
+ }
+ }
+}
+
+void ExceptionTracker::FirstPassIsComplete()
+{
+ m_ExceptionFlags.ResetUnwindingToFindResumeFrame();
+ m_pSkipToParentFunctionMD = NULL;
+}
+
+void ExceptionTracker::SecondPassIsComplete(MethodDesc* pMD, StackFrame sfResumeStackFrame)
+{
+ EH_LOG((LL_INFO100, " second pass unwind completed\n"));
+
+ m_pMethodDescOfCatcher = pMD;
+ m_sfResumeStackFrame = sfResumeStackFrame;
+}
+
+CLRUnwindStatus ExceptionTracker::ProcessOSExceptionNotification(
+ PEXCEPTION_RECORD pExceptionRecord,
+ PCONTEXT pContextRecord,
+ PDISPATCHER_CONTEXT pDispatcherContext,
+ DWORD dwExceptionFlags,
+ StackFrame sf,
+ Thread* pThread,
+ StackTraceState STState ARM_ARG(PVOID pICFSetAsLimitFrame))
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ CLRUnwindStatus status = UnwindPending;
+
+ CrawlFrame cfThisFrame;
+ REGDISPLAY regdisp;
+ UINT_PTR uMethodStartPC;
+ UINT_PTR uCallerSP;
+
+ DWORD_PTR ControlPc = pDispatcherContext->ControlPc;
+
+ ExceptionTracker::InitializeCrawlFrame(&cfThisFrame, pThread, sf, &regdisp, pDispatcherContext, ControlPc, &uMethodStartPC
+ ARM_ARG(this)
+ ARM64_ARG(this));
+
+#ifdef _TARGET_AMD64_
+ uCallerSP = EECodeManager::GetCallerSp(cfThisFrame.pRD);
+#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+ // On ARM & ARM64, the EstablisherFrame is the value of SP at the time a function was called and before it's prolog
+ // executed. Effectively, it is the SP of the caller. This has been confirmed by AaronGi from the kernel
+ // team.
+ uCallerSP = sf.SP;
+#else
+ PORTABILITY_ASSERT("ExceptionTracker::ProcessOSExceptionNotification");
+ uCallerSP = NULL;
+#endif // _TARGET_AMD64_
+
+ EH_LOG((LL_INFO100, "ProcessCrawlFrame: PSP: " FMT_ADDR " EstablisherFrame: " FMT_ADDR "\n", DBG_ADDR(uCallerSP), DBG_ADDR(sf.SP)));
+
+ bool fIsFirstPass = !(dwExceptionFlags & EXCEPTION_UNWINDING);
+ bool fTargetUnwind = !!(dwExceptionFlags & EXCEPTION_TARGET_UNWIND);
+
+ // If a thread abort was raised after a catch block's execution, we would have saved
+ // the index and EstablisherFrame of the EH clause corresponding to the handler that executed.
+ // Fetch that locally and reset the state against the thread if we are in the unwind pass.
+ //
+ // It should be kept in mind that by the virtue of copying the information below, we will
+ // have it available for the first frame seen during the unwind pass (which will be the
+ // frame where ThreadAbort was raised after the catch block) for us to skip any termination
+ // handlers that maybe present prior to the EH clause whose index we saved.
+ DWORD dwTACatchHandlerClauseIndex = pThread->m_dwIndexClauseForCatch;
+ StackFrame sfEstablisherOfActualHandlerFrame = pThread->m_sfEstablisherOfActualHandlerFrame;
+ if (!fIsFirstPass)
+ {
+ pThread->m_dwIndexClauseForCatch = 0;
+ pThread->m_sfEstablisherOfActualHandlerFrame.Clear();
+ }
+
+ bool fProcessThisFrame = false;
+ bool fCrawlFrameIsDirty = false;
+
+ // <GC_FUNCLET_REFERENCE_REPORTING>
+ //
+ // Refer to the detailed comment in ExceptionTracker::ProcessManagedCallFrame for more context.
+ // In summary, if we have reached the target of the unwind, then we need to fix CallerSP (for
+ // GC reference reporting) if we have been asked to.
+ //
+ // This will be done only when we reach the frame that is handling the exception.
+ //
+ // </GC_FUNCLET_REFERENCE_REPORTING>
+ if (fTargetUnwind && (m_fFixupCallerSPForGCReporting == true))
+ {
+ m_fFixupCallerSPForGCReporting = false;
+ this->m_EnclosingClauseInfoForGCReporting.SetEnclosingClauseCallerSP(uCallerSP);
+ }
+
+#if defined(_TARGET_ARM_)
+ // Refer to detailed comment below.
+ PTR_Frame pICFForUnwindTarget = NULL;
+#endif // defined(_TARGET_ARM_)
+
+ CheckForRudeAbort(pThread, fIsFirstPass);
+
+ bool fIsFrameLess = cfThisFrame.IsFrameless();
+ GSCookie* pGSCookie = NULL;
+ bool fSetLastUnwoundEstablisherFrame = false;
+
+ //
+ // process any frame since the last frame we've seen
+ //
+ {
+ GCX_COOP_THREAD_EXISTS(pThread);
+
+ // UpdateScannedStackRange needs to be invoked in COOP mode since
+ // the stack range can also be accessed during GC stackwalk.
+ fProcessThisFrame = UpdateScannedStackRange(sf, fIsFirstPass);
+
+ MethodDesc *pMD = cfThisFrame.GetFunction();
+
+ Frame* pFrame = GetLimitFrame(); // next frame to process
+ if (pFrame != FRAME_TOP)
+ {
+ // The following function call sets the GS cookie pointers and checks the cookie.
+ cfThisFrame.SetCurGSCookie(Frame::SafeGetGSCookiePtr(pFrame));
+ }
+
+ while (((UINT_PTR)pFrame) < uCallerSP)
+ {
+#if defined(_TARGET_ARM_)
+ // InlinedCallFrames (ICF) are allocated, initialized and linked to the Frame chain
+ // by the code generated by the JIT for a method containing a PInvoke.
+ //
+ // On X64, JIT generates code to dynamically link and unlink the ICF around
+ // each PInvoke call. On ARM, on the other hand, JIT's codegen, in context of ICF,
+ // is more inline with X86 and thus, it links in the ICF at the start of the method
+ // and unlinks it towards the method end. Thus, ICF is present on the Frame chain
+ // at any given point so long as the method containing the PInvoke is on the stack.
+ //
+ // Now, if the method containing ICF catches an exception, we will reset the Frame chain
+ // with the LimitFrame, that is computed below, after the catch handler returns. Since this
+ // computation is done relative to the CallerSP (on both X64 and ARM), we will end up
+ // removing the ICF from the Frame chain as that will always be below (stack growing down)
+ // the CallerSP since it lives in the stack space of the current managed frame.
+ //
+ // As a result, if there is another PInvoke call after the catch block, it will expect
+ // the ICF to be present and without one, execution will go south.
+ //
+ // To account for this ICF codegen difference, in the EH system we check if the current
+ // Frame is an ICF or not. If it is and lies inside the current managed method, we
+ // keep a reference to it and reset the LimitFrame to this saved reference before we
+ // return back to invoke the catc handler.
+ //
+ // Thus, if there is another PInvoke call post the catch handler, it will find ICF as expected.
+ //
+ // This is based upon the following assumptions:
+ //
+ // 1) There will be no other explicit Frame inserted above the ICF inside the
+ // managed method containing ICF. That is, ICF is the top-most explicit frame
+ // in the managed method (and thus, lies in the current managed frame).
+ //
+ // 2) There is only one ICF per managed method containing one (or more) PInvoke(s).
+ //
+ // 3) We only do this if the current frame is the one handling the exception. This is to
+ // address the scenario of keeping any ICF from frames lower in the stack active.
+ //
+ // 4) The ExceptionUnwind method of the ICF is a no-op. As noted above, we save a reference
+ // to the ICF and yet continue to process the frame chain. During unwind, this implies
+ // that we will end up invoking the ExceptionUnwind methods of all frames that lie
+ // below the caller SP of the managed frame handling the exception. And since the handling
+ // managed frame contains an ICF, it will be the topmost frame that will lie
+ // below the callerSP for which we will invoke ExceptionUnwind.
+ //
+ // Thus, ICF::ExceptionUnwind should not do anything significant. If any of these assumptions
+ // break, then the next best thing will be to make the JIT link/unlink the frame dynamically.
+
+ if (fTargetUnwind && (pFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr()))
+ {
+ PTR_InlinedCallFrame pICF = (PTR_InlinedCallFrame)pFrame;
+ // Does it live inside the current managed method? It will iff:
+ //
+ // 1) ICF address is higher than the current frame's SP (which we get from DispatcherContext), AND
+ // 2) ICF address is below callerSP.
+ if ((GetSP(pDispatcherContext->ContextRecord) < (TADDR)pICF) &&
+ ((UINT_PTR)pICF < uCallerSP))
+ {
+ pICFForUnwindTarget = pFrame;
+ }
+ }
+#endif // defined(_TARGET_ARM)
+
+ cfThisFrame.CheckGSCookies();
+
+ if (fProcessThisFrame)
+ {
+ ExceptionTracker::InitializeCrawlFrameForExplicitFrame(&cfThisFrame, pFrame, pMD);
+ fCrawlFrameIsDirty = true;
+
+ status = ProcessExplicitFrame(
+ &cfThisFrame,
+ sf,
+ fIsFirstPass,
+ STState);
+ cfThisFrame.CheckGSCookies();
+ }
+
+ if (!fIsFirstPass)
+ {
+ //
+ // notify Frame of unwind
+ //
+ pFrame->ExceptionUnwind();
+
+ // If we have not yet set the initial explicit frame processed by this tracker, then
+ // set it now.
+ if (m_pInitialExplicitFrame == NULL)
+ {
+ m_pInitialExplicitFrame = pFrame;
+ }
+ }
+
+ pFrame = pFrame->Next();
+ m_pLimitFrame = pFrame;
+
+ if (UnwindPending != status)
+ {
+ goto lExit;
+ }
+ }
+
+ if (fCrawlFrameIsDirty)
+ {
+ // If crawlframe is dirty, it implies that it got modified as part of explicit frame processing. Thus, we shall
+ // reinitialize it here.
+ ExceptionTracker::InitializeCrawlFrame(&cfThisFrame, pThread, sf, &regdisp, pDispatcherContext, ControlPc, &uMethodStartPC
+ ARM_ARG(this)
+ ARM64_ARG(this));
+ }
+
+ if (fIsFrameLess)
+ {
+ pGSCookie = (GSCookie*)cfThisFrame.GetCodeManager()->GetGSCookieAddr(cfThisFrame.pRD,
+ &cfThisFrame.codeInfo,
+ &cfThisFrame.codeManState);
+ if (pGSCookie)
+ {
+ // The following function call sets the GS cookie pointers and checks the cookie.
+ cfThisFrame.SetCurGSCookie(pGSCookie);
+ }
+
+ status = HandleFunclets(&fProcessThisFrame, fIsFirstPass,
+ cfThisFrame.GetFunction(), cfThisFrame.IsFunclet(), sf);
+ }
+
+ if ((!fIsFirstPass) && (!fProcessThisFrame))
+ {
+ // If we are unwinding and not processing the current frame, it implies that
+ // this frame has been unwound for one of the following reasons:
+ //
+ // 1) We have already seen it due to nested exception processing, OR
+ // 2) We are skipping frames to find a funclet's parent and thus, its been already
+ // unwound.
+ //
+ // If the current frame is NOT the target of unwind, update the last unwound
+ // establisher frame. We dont do this for "target of unwind" since it has the catch handler, for a
+ // dupilcate EH clause reported in the funclet, that needs to be invoked and thus, may have valid
+ // references to report for GC reporting.
+ //
+ // If we are not skipping the managed frame, then LastUnwoundEstablisherFrame will be updated later in this method,
+ // just before we return back to our caller.
+ if (!fTargetUnwind)
+ {
+ SetLastUnwoundEstablisherFrame(sf);
+ fSetLastUnwoundEstablisherFrame = true;
+ }
+ }
+
+ // GCX_COOP_THREAD_EXISTS ends here and we may switch to preemp mode now (if applicable).
+ }
+
+ //
+ // now process managed call frame if needed
+ //
+ if (fIsFrameLess)
+ {
+ if (fProcessThisFrame)
+ {
+ status = ProcessManagedCallFrame(
+ &cfThisFrame,
+ sf,
+ StackFrame::FromEstablisherFrame(pDispatcherContext->EstablisherFrame),
+ pExceptionRecord,
+ STState,
+ uMethodStartPC,
+ dwExceptionFlags,
+ dwTACatchHandlerClauseIndex,
+ sfEstablisherOfActualHandlerFrame);
+
+ if (pGSCookie)
+ {
+ cfThisFrame.CheckGSCookies();
+ }
+ }
+
+ if (fTargetUnwind && (UnwindPending == status))
+ {
+ SecondPassIsComplete(cfThisFrame.GetFunction(), sf);
+ status = SecondPassComplete;
+ }
+ }
+
+lExit:
+
+ // If we are unwinding and have returned successfully from unwinding the frame, then mark it as the last unwound frame for the current
+ // exception. We dont do this if the frame is target of unwind(i.e. handling the exception) since catch block invocation may have references to be
+ // reported (if a GC happens during catch block invocation).
+ //
+ // If an exception escapes out of a funclet (this is only possible for fault/finally/catch clauses), then we will not return here.
+ // Since this implies that the funclet no longer has any valid references to report, we will need to set the LastUnwoundEstablisherFrame
+ // close to the point we detect the exception has escaped the funclet. This is done in ExceptionTracker::CallHandler and marks the
+ // frame that invoked (and thus, contained) the funclet as the LastUnwoundEstablisherFrame.
+ //
+ // Note: Do no add any GC triggering code between the return from ProcessManagedCallFrame and setting of the LastUnwoundEstablisherFrame
+ if ((!fIsFirstPass) && (!fTargetUnwind) && (!fSetLastUnwoundEstablisherFrame))
+ {
+ GCX_COOP();
+ SetLastUnwoundEstablisherFrame(sf);
+ }
+
+ if (FirstPassComplete == status)
+ {
+ FirstPassIsComplete();
+ }
+
+ if (fTargetUnwind && (status == SecondPassComplete))
+ {
+#if defined(_TARGET_ARM_)
+ // If we have got a ICF to set as the LimitFrame, do that now.
+ // The Frame chain is still intact and would be updated using
+ // the LimitFrame (done after the catch handler returns).
+ //
+ // NOTE: This should be done as the last thing before we return
+ // back to invoke the catch handler.
+ if (pICFForUnwindTarget != NULL)
+ {
+ m_pLimitFrame = pICFForUnwindTarget;
+ pICFSetAsLimitFrame = (PVOID)pICFForUnwindTarget;
+ }
+#endif // _TARGET_ARM_
+
+ // Since second pass is complete and we have reached
+ // the frame containing the catch funclet, reset the enclosing
+ // clause SP for the catch funclet, if applicable, to be the CallerSP of the
+ // current frame.
+ //
+ // Refer to the detailed comment about this code
+ // in ExceptionTracker::ProcessManagedCallFrame.
+ if (m_fResetEnclosingClauseSPForCatchFunclet)
+ {
+#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+ // DispatcherContext->EstablisherFrame's value
+ // represents the CallerSP of the current frame.
+ UINT_PTR EnclosingClauseCallerSP = (UINT_PTR)pDispatcherContext->EstablisherFrame;
+#elif defined(_TARGET_AMD64_)
+ // Extract the CallerSP from RegDisplay on AMD64
+ REGDISPLAY *pRD = cfThisFrame.GetRegisterSet();
+ _ASSERTE(pRD->IsCallerContextValid || pRD->IsCallerSPValid);
+ UINT_PTR EnclosingClauseCallerSP = (UINT_PTR)GetSP(pRD->pCallerContext);
+#else // !_ARM_ && !_AMD64_ && !_ARM64_
+ PORTABILITY_ASSERT("ExceptionTracker::ProcessOSExceptionNotification");
+ UINT_PTR EnclosingClauseCallerSP = NULL;
+#endif // defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+ m_EnclosingClauseInfo = EnclosingClauseInfo(false, cfThisFrame.GetRelOffset(), EnclosingClauseCallerSP);
+ }
+ m_fResetEnclosingClauseSPForCatchFunclet = FALSE;
+ }
+
+ // If we are unwinding and the exception was not caught in managed code and we have reached the
+ // topmost frame we saw in the first pass, then reset thread abort state if this is the last managed
+ // code personality routine on the stack.
+ if ((fIsFirstPass == false) && (this->GetTopmostStackFrameFromFirstPass() == sf) && (GetCatchToCallPC() == NULL))
+ {
+ ExceptionTracker::ResetThreadAbortStatus(pThread, &cfThisFrame, sf);
+ }
+
+ //
+ // fill in the out parameter
+ //
+ return status;
+}
+
+// static
+void ExceptionTracker::DebugLogTrackerRanges(__in_z char *pszTag)
+{
+#ifdef _DEBUG
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ Thread* pThread = GetThread();
+ ExceptionTracker* pTracker = pThread ? pThread->GetExceptionState()->m_pCurrentTracker : NULL;
+
+ int i = 0;
+
+ while (pTracker)
+ {
+ EH_LOG((LL_INFO100, "%s:|%02d| %p: (%p %p) %s\n", pszTag, i, pTracker, pTracker->m_ScannedStackRange.GetLowerBound().SP, pTracker->m_ScannedStackRange.GetUpperBound().SP,
+ pTracker->IsInFirstPass() ? "1st pass" : "2nd pass"
+ ));
+ pTracker = pTracker->m_pPrevNestedInfo;
+ i++;
+ }
+#endif // _DEBUG
+}
+
+
+bool ExceptionTracker::HandleNestedExceptionEscape(StackFrame sf, bool fIsFirstPass)
+{
+ CONTRACTL
+ {
+ // Since this function can modify the scanned stack range, which is also accessed during the GC stackwalk,
+ // we invoke it in COOP mode so that that access to the range is synchronized.
+ MODE_COOPERATIVE;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ bool fResult = false;
+
+ DebugLogTrackerRanges(" A");
+
+ ExceptionTracker* pPreviousTracker = m_pPrevNestedInfo;
+
+ while (pPreviousTracker && pPreviousTracker->m_ScannedStackRange.IsSupersededBy(sf))
+ {
+ //
+ // If the previous tracker (representing exception E1 and whose scanned stack range is superseded by the current frame)
+ // is in the first pass AND current tracker (representing exceptio E2) has not seen the current frame AND we are here,
+ // it implies that we had a nested exception while the previous tracker was in the first pass.
+ //
+ // This can happen in the following scenarios:
+ //
+ // 1) An exception escapes a managed filter (which are invoked in the first pass). However,
+ // that is not possible since any exception escaping them is swallowed by the runtime.
+ // If someone does longjmp from within the filter, then that is illegal and unsupported.
+ //
+ // 2) While processing an exception (E1), either us or native code caught it, triggering unwind. However, before the
+ // first managed frame was processed for unwind, another native frame (below the first managed frame on the stack)
+ // did a longjmp to go past us or raised another exception from one of their termination handlers.
+ //
+ // Thus, we will never get a chance to switch our tracker for E1 to 2nd pass (which would be done when
+ // ExceptionTracker::GetOrCreateTracker will be invoked for the first managed frame) since the longjmp, or the
+ // new-exception would result in a new tracker being setup.
+ //
+ // Below is an example of such a case that does longjmp
+ // ----------------------------------------------------
+ //
+ // NativeA (does setjmp) -> ManagedFunc -> NativeB
+ //
+ //
+ // NativeB could be implemented as:
+ //
+ // __try { // raise exception } __finally { longjmp(jmp1, 1); }
+ //
+ // "jmp1" is the jmp_buf setup by NativeA by calling setjmp.
+ //
+ // ManagedFunc could be implemented as:
+ //
+ // try {
+ // try { NativeB(); }
+ // finally { Console.WriteLine("Finally in ManagedFunc"); }
+ // }
+ // catch(Exception ex} { Console.WriteLine("Caught"); }
+ //
+ //
+ // In case of nested exception, we combine the stack range (see below) since we have already seen those frames
+ // in the specified pass for the previous tracker. However, in the example above, the current tracker (in 2nd pass)
+ // has not see the frames which the previous tracker (which is in the first pass) has seen.
+ //
+ // On a similar note, the __finally in the example above could also do a "throw 1;". In such a case, we would expect
+ // that the catch in ManagedFunc would catch the exception (since "throw 1;" would be represented as SEHException in
+ // the runtime). However, during first pass, when the exception enters ManagedFunc, the current tracker would not have
+ // processed the ManagedFunc frame, while the previous tracker (for E1) would have. If we proceed to combine the stack
+ // ranges, we will omit examining the catch clause in ManagedFunc.
+ //
+ // Thus, we cannot combine the stack range yet and must let each frame, already scanned by the previous
+ // tracker, be also processed by the current (longjmp) tracker if not already done.
+ //
+ // Note: This is not a concern if the previous tracker (for exception E1) is in the second pass since any escaping exception (E2)
+ // would come out of a finally/fault funclet and the runtime's funclet skipping logic will deal with it correctly.
+
+ if (pPreviousTracker->IsInFirstPass() && (!this->m_ScannedStackRange.Contains(sf)))
+ {
+ // Allow all stackframes seen by previous tracker to be seen by the current
+ // tracker as well.
+ if (sf <= pPreviousTracker->m_ScannedStackRange.GetUpperBound())
+ {
+ EH_LOG((LL_INFO100, " - not updating current tracker bounds for escaped exception since\n"));
+ EH_LOG((LL_INFO100, " - active tracker (%p; %s) has not seen the current frame [", this, this->IsInFirstPass()?"FirstPass":"SecondPass"));
+ EH_LOG((LL_INFO100, " - SP = %p", sf.SP));
+ EH_LOG((LL_INFO100, "]\n"));
+ EH_LOG((LL_INFO100, " - which the previous (%p) tracker has processed.\n", pPreviousTracker));
+ return fResult;
+ }
+ }
+
+ EH_LOG((LL_INFO100, " nested exception ESCAPED\n"));
+ EH_LOG((LL_INFO100, " - updating current tracker stack bounds\n"));
+ m_ScannedStackRange.CombineWith(sf, &pPreviousTracker->m_ScannedStackRange);
+
+ //
+ // Only the topmost tracker can be in the first pass.
+ //
+ // (Except in the case where we have an exception thrown in a filter,
+ // which should never escape the filter, and thus, will never supersede
+ // the previous exception. This is why we cannot walk the entire list
+ // of trackers to assert that they're all in the right mode.)
+ //
+ // CONSISTENCY_CHECK(!pPreviousTracker->IsInFirstPass());
+
+ // If our modes don't match, don't actually delete the supersceded exception.
+ // If we did, we would lose valueable state on which frames have been scanned
+ // on the second pass if an exception is thrown during the 2nd pass.
+
+ // Advance the current tracker pointer now, since it may be deleted below.
+ pPreviousTracker = pPreviousTracker->m_pPrevNestedInfo;
+
+ if (!fIsFirstPass)
+ {
+ EH_LOG((LL_INFO100, " - removing previous tracker\n"));
+
+ ExceptionTracker* pTrackerToFree = m_pPrevNestedInfo;
+ m_pPrevNestedInfo = pTrackerToFree->m_pPrevNestedInfo;
+
+#if defined(DEBUGGING_SUPPORTED)
+ if (g_pDebugInterface != NULL)
+ {
+ g_pDebugInterface->DeleteInterceptContext(pTrackerToFree->m_DebuggerExState.GetDebuggerInterceptContext());
+ }
+#endif // DEBUGGING_SUPPORTED
+
+ CONSISTENCY_CHECK(pTrackerToFree->IsValid());
+ FreeTrackerMemory(pTrackerToFree, memBoth);
+ }
+
+ DebugLogTrackerRanges(" B");
+ }
+
+ return fResult;
+}
+
+CLRUnwindStatus ExceptionTracker::ProcessExplicitFrame(
+ CrawlFrame* pcfThisFrame,
+ StackFrame sf,
+ BOOL fIsFirstPass,
+ StackTraceState& STState
+ )
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ PRECONDITION(!pcfThisFrame->IsFrameless());
+ PRECONDITION(pcfThisFrame->GetFrame() != FRAME_TOP);
+ }
+ CONTRACTL_END;
+
+ Frame* pFrame = pcfThisFrame->GetFrame();
+
+ EH_LOG((LL_INFO100, " [ ProcessExplicitFrame: pFrame: " FMT_ADDR " pMD: " FMT_ADDR " %s PASS ]\n", DBG_ADDR(pFrame), DBG_ADDR(pFrame->GetFunction()), fIsFirstPass ? "FIRST" : "SECOND"));
+
+ if (FRAME_TOP == pFrame)
+ {
+ goto lExit;
+ }
+
+ if (!m_ExceptionFlags.UnwindingToFindResumeFrame())
+ {
+ //
+ // update our exception stacktrace
+ //
+
+ BOOL bReplaceStack = FALSE;
+ BOOL bSkipLastElement = FALSE;
+
+ if (STS_FirstRethrowFrame == STState)
+ {
+ bSkipLastElement = TRUE;
+ }
+ else
+ if (STS_NewException == STState)
+ {
+ bReplaceStack = TRUE;
+ }
+
+ // Normally, we need to notify the profiler in two cases:
+ // 1) a brand new exception is thrown, and
+ // 2) an exception is rethrown.
+ // However, in this case, if the explicit frame doesn't correspond to a MD, we don't set STState to STS_Append,
+ // so the next managed call frame we process will give another ExceptionThrown() callback to the profiler.
+ // So we give the callback below, only in the case when we append to the stack trace.
+
+ MethodDesc* pMD = pcfThisFrame->GetFunction();
+ if (pMD)
+ {
+ Thread* pThread = m_pThread;
+
+ if (fIsFirstPass)
+ {
+ //
+ // notify profiler of new/rethrown exception
+ //
+ if (bSkipLastElement || bReplaceStack)
+ {
+ GCX_COOP();
+ EEToProfilerExceptionInterfaceWrapper::ExceptionThrown(pThread);
+ UpdatePerformanceMetrics(pcfThisFrame, bSkipLastElement, bReplaceStack);
+ }
+
+ //
+ // Update stack trace
+ //
+ m_StackTraceInfo.AppendElement(CanAllocateMemory(), NULL, sf.SP, pMD, pcfThisFrame);
+ m_StackTraceInfo.SaveStackTrace(CanAllocateMemory(), m_hThrowable, bReplaceStack, bSkipLastElement);
+
+ //
+ // make callback to debugger and/or profiler
+ //
+#if defined(DEBUGGING_SUPPORTED)
+ if (ExceptionTracker::NotifyDebuggerOfStub(pThread, sf, pFrame))
+ {
+#ifdef FEATURE_EXCEPTION_NOTIFICATIONS
+ // Deliver the FirstChanceNotification after the debugger, if not already delivered.
+ if (!this->DeliveredFirstChanceNotification())
+ {
+ ExceptionNotifications::DeliverFirstChanceNotification();
+ }
+#endif // FEATURE_EXCEPTION_NOTIFICATIONS
+ }
+#endif // DEBUGGING_SUPPORTED
+
+ STState = STS_Append;
+ }
+ }
+ }
+
+lExit:
+ return UnwindPending;
+}
+
+CLRUnwindStatus ExceptionTracker::HandleFunclets(bool* pfProcessThisFrame, bool fIsFirstPass,
+ MethodDesc * pMD, bool fFunclet, StackFrame sf)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+#ifdef WIN64EXCEPTIONS // funclets
+ BOOL fUnwindingToFindResumeFrame = m_ExceptionFlags.UnwindingToFindResumeFrame();
+
+ //
+ // handle out-of-line finallys
+ //
+
+ // In the second pass, we always want to execute this code.
+ // In the first pass, we only execute this code if we are not unwinding to find the resume frame.
+ // We do this to avoid calling the same filter more than once. Search for "UnwindingToFindResumeFrame"
+ // to find a more elaborate comment in ProcessManagedCallFrame().
+
+ // If we are in the first pass and we are unwinding to find the resume frame, then make sure the flag is cleared.
+ if (fIsFirstPass && fUnwindingToFindResumeFrame)
+ {
+ m_pSkipToParentFunctionMD = NULL;
+ }
+ else
+ {
+ // <TODO>
+ // this 'skip to parent function MD' code only seems to be needed
+ // in the case where we call a finally funclet from the normal
+ // execution codepath. Is there a better way to achieve the same
+ // goal? Also, will recursion break us in any corner cases?
+ // [ThrowInFinallyNestedInTryTest]
+ // [GoryManagedPresentTest]
+ // </TODO>
+
+ // <TODO>
+ // this was done for AMD64, but i don't understand why AMD64 needed the workaround..
+ // (the workaround is the "double call on parent method" part.)
+ // </TODO>
+
+ //
+ // If we encounter a funclet, we need to skip all call frames up
+ // to and including its parent method call frame. The reason
+ // behind this is that a funclet is logically part of the parent
+ // method has all the clauses that covered its logical location
+ // in the parent covering its body.
+ //
+ if (((UINT_PTR)m_pSkipToParentFunctionMD) & 1)
+ {
+ EH_LOG((LL_INFO100, " IGNOREFRAME: SKIPTOPARENT: skipping to parent\n"));
+ *pfProcessThisFrame = false;
+ if ((((UINT_PTR)pMD) == (((UINT_PTR)m_pSkipToParentFunctionMD) & ~((UINT_PTR)1))) && !fFunclet)
+ {
+ EH_LOG((LL_INFO100, " SKIPTOPARENT: found parent for funclet pMD = %p, sf.SP = %p, will stop skipping frames\n", pMD, sf.SP));
+ _ASSERTE(0 == (((UINT_PTR)sf.SP) & 1));
+ m_pSkipToParentFunctionMD = (MethodDesc*)sf.SP;
+
+ _ASSERTE(!fUnwindingToFindResumeFrame);
+ }
+ }
+ else if (fFunclet)
+ {
+ EH_LOG((LL_INFO100, " SKIPTOPARENT: found funclet pMD = %p, will start skipping frames\n", pMD));
+ _ASSERTE(0 == (((UINT_PTR)pMD) & 1));
+ m_pSkipToParentFunctionMD = (MethodDesc*)(((UINT_PTR)pMD) | 1);
+ }
+ else
+ {
+ if (sf.SP == ((UINT_PTR)m_pSkipToParentFunctionMD))
+ {
+ EH_LOG((LL_INFO100, " IGNOREFRAME: SKIPTOPARENT: got double call on parent method\n"));
+ *pfProcessThisFrame = false;
+ }
+ else if (m_pSkipToParentFunctionMD && (sf.SP > ((UINT_PTR)m_pSkipToParentFunctionMD)))
+ {
+ EH_LOG((LL_INFO100, " SKIPTOPARENT: went past parent method\n"));
+ m_pSkipToParentFunctionMD = NULL;
+ }
+ }
+ }
+#endif // WIN64EXCEPTIONS
+
+ return UnwindPending;
+}
+
+CLRUnwindStatus ExceptionTracker::ProcessManagedCallFrame(
+ CrawlFrame* pcfThisFrame,
+ StackFrame sf,
+ StackFrame sfEstablisherFrame,
+ EXCEPTION_RECORD* pExceptionRecord,
+ StackTraceState STState,
+ UINT_PTR uMethodStartPC,
+ DWORD dwExceptionFlags,
+ DWORD dwTACatchHandlerClauseIndex,
+ StackFrame sfEstablisherOfActualHandlerFrame
+ )
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_TRIGGERS;
+ THROWS;
+ PRECONDITION(pcfThisFrame->IsFrameless());
+ }
+ CONTRACTL_END;
+
+ UINT_PTR uControlPC = (UINT_PTR)GetControlPC(pcfThisFrame->GetRegisterSet());
+ CLRUnwindStatus ReturnStatus = UnwindPending;
+
+ MethodDesc* pMD = pcfThisFrame->GetFunction();
+
+ bool fIsFirstPass = !(dwExceptionFlags & EXCEPTION_UNWINDING);
+ bool fIsFunclet = pcfThisFrame->IsFunclet();
+
+ CONSISTENCY_CHECK(IsValid());
+ CONSISTENCY_CHECK(ThrowableIsValid() || !fIsFirstPass);
+ CONSISTENCY_CHECK(pMD != 0);
+
+ EH_LOG((LL_INFO100, " [ ProcessManagedCallFrame this=%p, %s PASS ]\n", this, (fIsFirstPass ? "FIRST" : "SECOND")));
+
+#ifdef WIN64EXCEPTIONS // funclets
+ EH_LOG((LL_INFO100, " [ method: %s%s, %s ]\n",
+ (fIsFunclet ? "FUNCLET of " : ""),
+ pMD->m_pszDebugMethodName, pMD->m_pszDebugClassName));
+#else // !WIN64EXCEPTIONS
+ EH_LOG((LL_INFO100, " [ method: %s, %s ]\n",
+ pMD->m_pszDebugMethodName, pMD->m_pszDebugClassName));
+#endif // WIN64EXCEPTIONS
+
+ Thread *pThread = GetThread();
+ _ASSERTE (pThread);
+
+ INDEBUG( DumpClauses(pcfThisFrame->GetJitManager(), pcfThisFrame->GetMethodToken(), uMethodStartPC, uControlPC) );
+
+ bool fIsILStub = pMD->IsILStub();
+ bool fGiveDebuggerAndProfilerNotification = !fIsILStub;
+ BOOL fUnwindingToFindResumeFrame = m_ExceptionFlags.UnwindingToFindResumeFrame();
+
+ bool fIgnoreThisFrame = false;
+ bool fProcessThisFrameToFindResumeFrameOnly = false;
+
+ MethodDesc * pUserMDForILStub = NULL;
+ Frame * pILStubFrame = NULL;
+ if (fIsILStub && !fIsFunclet) // only make this callback on the main method body of IL stubs
+ pUserMDForILStub = GetUserMethodForILStub(pThread, sf.SP, pMD, &pILStubFrame);
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ BOOL fCanMethodHandleException = TRUE;
+ CorruptionSeverity currentSeverity = NotCorrupting;
+ {
+ // Switch to COOP mode since we are going to request throwable
+ GCX_COOP();
+
+ // We must defer to the MethodDesc of the user method instead of the IL stub
+ // itself because the user can specify the policy on a per-method basis and
+ // that won't be reflected via the IL stub's MethodDesc.
+ MethodDesc * pMDWithCEAttribute = (pUserMDForILStub != NULL) ? pUserMDForILStub : pMD;
+
+ // Check if the exception can be delivered to the method? It will check if the exception
+ // is a CE or not. If it is, it will check if the method can process it or not.
+ currentSeverity = pThread->GetExceptionState()->GetCurrentExceptionTracker()->GetCorruptionSeverity();
+ fCanMethodHandleException = CEHelper::CanMethodHandleException(currentSeverity, pMDWithCEAttribute);
+ }
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ // Doing rude abort. Skip all non-constrained execution region code.
+ // When rude abort is initiated, we cannot intercept any exceptions.
+ if ((pThread->IsRudeAbortInitiated() && !pThread->IsWithinCer(pcfThisFrame)))
+ {
+ // If we are unwinding to find the real resume frame, then we cannot ignore frames yet.
+ // We need to make sure we find the correct resume frame before starting to ignore frames.
+ if (fUnwindingToFindResumeFrame)
+ {
+ fProcessThisFrameToFindResumeFrameOnly = true;
+ }
+ else
+ {
+ EH_LOG((LL_INFO100, " IGNOREFRAME: rude abort/CE\n"));
+ fIgnoreThisFrame = true;
+ }
+ }
+
+ //
+ // BEGIN resume frame processing code
+ //
+ // Often times, we'll run into the situation where the actual resume call frame
+ // is not the same call frame that we see the catch clause in. The reason for this
+ // is that catch clauses get duplicated down to cover funclet code ranges. When we
+ // see a catch clause covering our control PC, but it is marked as a duplicate, we
+ // need to continue to unwind until we find the same clause that isn't marked as a
+ // duplicate. This will be the correct resume frame.
+ //
+ // We actually achieve this skipping by observing that if we are catching at a
+ // duplicated clause, all the call frames we should be skipping have already been
+ // processed by a previous exception dispatch. So if we allow the unwind to
+ // continue, we will immediately bump into the ExceptionTracker for the previous
+ // dispatch, and our resume frame will be the last frame seen by that Tracker.
+ //
+ // Note that we will have visited all the EH clauses for a particular method when we
+ // see its first funclet (the funclet which is closest to the leaf). We need to make
+ // sure we don't process any EH clause again when we see other funclets or the parent
+ // method until we get to the real resume frame. The real resume frame may be another
+ // funclet, which is why we can't blindly skip all funclets until we see the parent
+ // method frame.
+ //
+ // If the exception is handled by the method, then UnwindingToFindResumeFrame takes
+ // care of the skipping. We basically skip everything when we are unwinding to find
+ // the resume frame. If the exception is not handled by the method, then we skip all the
+ // funclets until we get to the parent method. The logic to handle this is in
+ // HandleFunclets(). In the first pass, HandleFunclets() only kicks
+ // in if we are not unwinding to find the resume frame.
+ //
+ // Then on the second pass, we need to process frames up to the initial place where
+ // we saw the catch clause, which means upto and including part of the resume stack
+ // frame. Then we need to skip the call frames up to the real resume stack frame
+ // and resume.
+ //
+ // In the second pass, we have the same problem with skipping funclets as in the first
+ // pass. However, in this case, we know exactly which frame is our target unwind frame
+ // (EXCEPTION_TARGET_UNWIND will be set). So we blindly unwind until we see the parent
+ // method, or until the target unwind frame.
+ PTR_EXCEPTION_CLAUSE_TOKEN pLimitClauseToken = NULL;
+ if (!fIgnoreThisFrame && !fIsFirstPass && !m_sfResumeStackFrame.IsNull() && (sf >= m_sfResumeStackFrame))
+ {
+ CONSISTENCY_CHECK_MSG(sf == m_sfResumeStackFrame, "Passed initial resume frame and fIgnoreThisFrame wasn't set!");
+ EH_LOG((LL_INFO100, " RESUMEFRAME: %s initial resume frame: %p\n", (sf == m_sfResumeStackFrame) ? "REACHED" : "PASSED" , m_sfResumeStackFrame.SP));
+
+ // process this frame to call handlers
+ EH_LOG((LL_INFO100, " RESUMEFRAME: Found last frame to process finallys in, need to process only part of call frame\n"));
+ EH_LOG((LL_INFO100, " RESUMEFRAME: Limit clause token: %p\n", m_pClauseForCatchToken));
+ pLimitClauseToken = m_pClauseForCatchToken;
+
+ // The limit clause is the same as the clause we're catching at. It is used
+ // as the last clause we process in the "inital resume frame". Anything further
+ // down the list of clauses is skipped along with all call frames up to the actual
+ // resume frame.
+ }
+ //
+ // END resume frame code
+ //
+
+ if (!fIgnoreThisFrame)
+ {
+ BOOL fFoundHandler = FALSE;
+ DWORD_PTR dwHandlerStartPC = NULL;
+
+ BOOL bReplaceStack = FALSE;
+ BOOL bSkipLastElement = FALSE;
+ bool fUnwindFinished = false;
+
+ if (STS_FirstRethrowFrame == STState)
+ {
+ bSkipLastElement = TRUE;
+ }
+ else
+ if (STS_NewException == STState)
+ {
+ bReplaceStack = TRUE;
+ }
+
+ // We need to notify the profiler on the first pass in two cases:
+ // 1) a brand new exception is thrown, and
+ // 2) an exception is rethrown.
+ if (fIsFirstPass && (bSkipLastElement || bReplaceStack))
+ {
+ GCX_COOP();
+ EEToProfilerExceptionInterfaceWrapper::ExceptionThrown(pThread);
+ UpdatePerformanceMetrics(pcfThisFrame, bSkipLastElement, bReplaceStack);
+ }
+
+ if (!fUnwindingToFindResumeFrame)
+ {
+ //
+ // update our exception stacktrace, ignoring IL stubs
+ //
+ if (fIsFirstPass && !pMD->IsILStub())
+ {
+ GCX_COOP();
+
+ m_StackTraceInfo.AppendElement(CanAllocateMemory(), uControlPC, sf.SP, pMD, pcfThisFrame);
+ m_StackTraceInfo.SaveStackTrace(CanAllocateMemory(), m_hThrowable, bReplaceStack, bSkipLastElement);
+ }
+
+ //
+ // make callback to debugger and/or profiler
+ //
+ if (fGiveDebuggerAndProfilerNotification)
+ {
+ if (fIsFirstPass)
+ {
+ EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionEnter(pMD);
+
+ // Notfiy the debugger that we are on the first pass for a managed exception.
+ // Note that this callback is made for every managed frame.
+ EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedException(pThread, uControlPC, sf.SP);
+
+#if defined(DEBUGGING_SUPPORTED)
+ _ASSERTE(this == pThread->GetExceptionState()->m_pCurrentTracker);
+
+ // check if the current exception has been intercepted.
+ if (m_ExceptionFlags.DebuggerInterceptInfo())
+ {
+ // According to the x86 implementation, we don't need to call the ExceptionSearchFunctionLeave()
+ // profiler callback.
+ StackFrame sfInterceptStackFrame;
+ m_DebuggerExState.GetDebuggerInterceptInfo(NULL, NULL,
+ reinterpret_cast<PBYTE *>(&(sfInterceptStackFrame.SP)),
+ NULL, NULL);
+
+ // Save the target unwind frame just like we do when we find a catch clause.
+ m_sfResumeStackFrame = sfInterceptStackFrame;
+ ReturnStatus = FirstPassComplete;
+ goto lExit;
+ }
+#endif // DEBUGGING_SUPPORTED
+
+#ifdef FEATURE_EXCEPTION_NOTIFICATIONS
+ // Attempt to deliver the first chance notification to the AD only *AFTER* the debugger
+ // has done that, provided we have not already delivered it.
+ if (!this->DeliveredFirstChanceNotification())
+ {
+ ExceptionNotifications::DeliverFirstChanceNotification();
+ }
+#endif // FEATURE_EXCEPTION_NOTIFICATIONS
+ }
+ else
+ {
+#if defined(DEBUGGING_SUPPORTED)
+ _ASSERTE(this == pThread->GetExceptionState()->m_pCurrentTracker);
+
+ // check if the exception is intercepted.
+ if (m_ExceptionFlags.DebuggerInterceptInfo())
+ {
+ MethodDesc* pInterceptMD = NULL;
+ StackFrame sfInterceptStackFrame;
+
+ // check if we have reached the interception point yet
+ m_DebuggerExState.GetDebuggerInterceptInfo(&pInterceptMD, NULL,
+ reinterpret_cast<PBYTE *>(&(sfInterceptStackFrame.SP)),
+ NULL, NULL);
+
+ // If the exception has gone unhandled in the first pass, we wouldn't have a chance
+ // to set the target unwind frame. Check for this case now.
+ if (m_sfResumeStackFrame.IsNull())
+ {
+ m_sfResumeStackFrame = sfInterceptStackFrame;
+ }
+ _ASSERTE(m_sfResumeStackFrame == sfInterceptStackFrame);
+
+ if ((pInterceptMD == pMD) &&
+ (sfInterceptStackFrame == sf))
+ {
+ // If we have reached the stack frame at which the exception is intercepted,
+ // then finish the second pass prematurely.
+ SecondPassIsComplete(pMD, sf);
+ ReturnStatus = SecondPassComplete;
+ goto lExit;
+ }
+ }
+#endif // DEBUGGING_SUPPORTED
+
+ // According to the x86 implementation, we don't need to call the ExceptionUnwindFunctionEnter()
+ // profiler callback when an exception is intercepted.
+ EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionEnter(pMD);
+ }
+ }
+
+ }
+
+#ifdef FEATURE_STACK_PROBE
+ // Don't call a handler if we're within a certain distance of the end of the stack. Could end up here via probe, in
+ // which case guard page is intact, or via hard SO, in which case guard page won't be. So don't check for presence of
+ // guard page, just check for sufficient space on stack.
+ if ( IsStackOverflowException()
+ && !pThread->CanResetStackTo((void*)sf.SP))
+ {
+ EH_LOG((LL_INFO100, " STACKOVERFLOW: IGNOREFRAME: stack frame too close to guard page: sf.SP: %p\n", sf.SP));
+ }
+ else
+#endif // FEATURE_STACK_PROBE
+ {
+ IJitManager* pJitMan = pcfThisFrame->GetJitManager();
+ const METHODTOKEN& MethToken = pcfThisFrame->GetMethodToken();
+
+ EH_CLAUSE_ENUMERATOR EnumState;
+ unsigned EHCount;
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // The method cannot handle the exception (e.g. cannot handle the CE), then simply bail out
+ // without examining the EH clauses in it.
+ if (!fCanMethodHandleException)
+ {
+ LOG((LF_EH, LL_INFO100, "ProcessManagedCallFrame - CEHelper decided not to look for exception handlers in the method(MD:%p).\n", pMD));
+
+ // Set the flag to skip this frame since the CE cannot be delivered
+ _ASSERTE(currentSeverity == ProcessCorrupting);
+
+ // Force EHClause count to be zero
+ EHCount = 0;
+ }
+ else
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ {
+ EHCount = pJitMan->InitializeEHEnumeration(MethToken, &EnumState);
+ }
+
+
+ if (!fIsFirstPass)
+ {
+ // For a method that may have nested funclets, it is possible that a reference maybe
+ // dead at the point where control flow left the method but may become active once
+ // a funclet is executed.
+ //
+ // Upon returning from the funclet but before the next funclet is invoked, a GC
+ // may happen if we are in preemptive mode. Since the GC stackwalk will commence
+ // at the original IP at which control left the method, it can result in the reference
+ // not being updated (since it was dead at the point control left the method) if the object
+ // is moved during GC.
+ //
+ // To address this, we will indefinitely switch to COOP mode while enumerating, and invoking,
+ // funclets.
+ //
+ // This switch is also required for another scenario: we maybe in unwind phase and the current frame
+ // may not have any termination handlers to be invoked (i.e. it may have zero EH clauses applicable to
+ // the unwind phase). If we do not switch to COOP mode for such a frame, we could remain in preemp mode.
+ // Upon returning back from ProcessOSExceptionNotification in ProcessCLRException, when we attempt to
+ // switch to COOP mode to update the LastUnwoundEstablisherFrame, we could get blocked due to an
+ // active GC, prior to peforming the update.
+ //
+ // In this case, if the GC stackwalk encounters the current frame and attempts to check if it has been
+ // unwound by an exception, then while it has been unwound (especially since it had no termination handlers)
+ // logically, it will not figure out as unwound and thus, GC stackwalk would attempt to report references from
+ // it, which is incorrect.
+ //
+ // Thus, when unwinding, we will always switch to COOP mode indefinitely, irrespective of whether
+ // the frame has EH clauses to be processed or not.
+ GCX_COOP_NO_DTOR();
+
+ // We will also forbid any GC to happen between successive funclet invocations.
+ // This will be automatically undone when the contract goes off the stack as the method
+ // returns back to its caller.
+ BEGINFORBIDGC();
+ }
+
+ for (unsigned i = 0; i < EHCount; i++)
+ {
+ EE_ILEXCEPTION_CLAUSE EHClause;
+ PTR_EXCEPTION_CLAUSE_TOKEN pEHClauseToken = pJitMan->GetNextEHClause(&EnumState, &EHClause);
+
+ EH_LOG((LL_INFO100, " considering %s clause [%x,%x], ControlPc is %s clause (offset %x)",
+ (IsFault(&EHClause) ? "fault" :
+ (IsFinally(&EHClause) ? "finally" :
+ (IsFilterHandler(&EHClause) ? "filter" :
+ (IsTypedHandler(&EHClause) ? "typed" : "unknown")))),
+ EHClause.TryStartPC,
+ EHClause.TryEndPC,
+ (ClauseCoversPC(&EHClause, pcfThisFrame->GetRelOffset()) ? "inside" : "outside"),
+ pcfThisFrame->GetRelOffset()
+ ));
+
+ LOG((LF_EH, LL_INFO100, "\n"));
+
+ // If we have a valid EstablisherFrame for the managed frame where
+ // ThreadAbort was raised after the catch block, then see if we
+ // have reached that frame during the exception dispatch. If we
+ // have, then proceed to skip applicable EH clauses.
+ if ((!sfEstablisherOfActualHandlerFrame.IsNull()) && (sfEstablisherFrame == sfEstablisherOfActualHandlerFrame))
+ {
+ // We should have a valid index of the EH clause (corresponding to a catch block) after
+ // which thread abort was raised?
+ _ASSERTE(dwTACatchHandlerClauseIndex > 0);
+ {
+ // Since we have the index, check if the current EH clause index
+ // is less then saved index. If it is, then it implies that
+ // we are evaluating clauses that lie "before" the EH clause
+ // for the catch block "after" which thread abort was raised.
+ //
+ // Since ThreadAbort has to make forward progress, we will
+ // skip evaluating any such EH clauses. Two things can happen:
+ //
+ // 1) We will find clauses representing handlers beyond the
+ // catch block after which ThreadAbort was raised. Since this is
+ // what we want, we evaluate them.
+ //
+ // 2) There wont be any more clauses implying that the catch block
+ // after which the exception was raised was the outermost
+ // handler in the method. Thus, the exception will escape out,
+ // which is semantically the correct thing to happen.
+ //
+ // The premise of this check is based upon a JIT compiler's implementation
+ // detail: when it generates EH clauses, JIT compiler will order them from
+ // top->bottom (when reading a method) and inside->out when reading nested
+ // clauses.
+ //
+ // This assumption is not new since the basic EH type-matching is reliant
+ // on this very assumption. However, now we have one more candidate that
+ // gets to rely on it.
+ //
+ // Eventually, this enables forward progress of thread abort exception.
+ if (i <= (dwTACatchHandlerClauseIndex -1))
+ {
+ EH_LOG((LL_INFO100, " skipping the evaluation of EH clause (index=%d) since we cannot process an exception in a handler\n", i));
+ EH_LOG((LL_INFO100, " that exists prior to the one (index=%d) after which ThreadAbort was [re]raised.\n", dwTACatchHandlerClauseIndex));
+ continue;
+ }
+ }
+ }
+
+
+ // see comment above where we set pLimitClauseToken
+ if (pEHClauseToken == pLimitClauseToken)
+ {
+ EH_LOG((LL_INFO100, " found limit clause, stopping clause enumeration\n"));
+
+ // <GC_FUNCLET_REFERENCE_REPORTING>
+ //
+ // If we are here, the exception has been identified to be handled by a duplicate catch clause
+ // that is protecting the current funclet. The call to SetEnclosingClauseInfo (below)
+ // will setup the CallerSP (for GC reference reporting) to be the SP of the
+ // of the caller of current funclet (where the exception has happened, or is escaping from).
+ //
+ // However, we need the CallerSP to be set as the SP of the caller of the
+ // actual frame that will contain (and invoke) the catch handler corresponding to
+ // the duplicate clause. But that isn't available right now and we can only know
+ // once we unwind upstack to reach the target frame.
+ //
+ // Thus, upon reaching the target frame and before invoking the catch handler,
+ // we will fix up the CallerSP (for GC reporting) to be that of the caller of the
+ // target frame that will be invoking the actual catch handler.
+ //
+ // </GC_FUNCLET_REFERENCE_REPORTING>
+ //
+ // for catch clauses
+ SetEnclosingClauseInfo(fIsFunclet,
+ pcfThisFrame->GetRelOffset(),
+ GetSP(pcfThisFrame->GetRegisterSet()->pCallerContext));
+ fUnwindFinished = true;
+ m_fFixupCallerSPForGCReporting = true;
+ break;
+ }
+
+ BOOL fTermHandler = IsFaultOrFinally(&EHClause);
+ fFoundHandler = FALSE;
+
+ if (( fIsFirstPass && fTermHandler) ||
+ (!fIsFirstPass && !fTermHandler))
+ {
+ continue;
+ }
+
+ if (ClauseCoversPC(&EHClause, pcfThisFrame->GetRelOffset()))
+ {
+ EH_LOG((LL_INFO100, " clause covers ControlPC\n"));
+
+ dwHandlerStartPC = pJitMan->GetCodeAddressForRelOffset(MethToken, EHClause.HandlerStartPC);
+
+ if (fUnwindingToFindResumeFrame)
+ {
+ CONSISTENCY_CHECK(fIsFirstPass);
+ if (!fTermHandler)
+ {
+ // m_pClauseForCatchToken can only be NULL for continuable exceptions, but we should never
+ // get here if we are handling continuable exceptions. fUnwindingToFindResumeFrame is
+ // only true at the end of the first pass.
+ _ASSERTE(m_pClauseForCatchToken != NULL);
+
+ // handlers match and not duplicate?
+ EH_LOG((LL_INFO100, " RESUMEFRAME: catch handler: [%x,%x], this handler: [%x,%x] %s\n",
+ m_ClauseForCatch.HandlerStartPC,
+ m_ClauseForCatch.HandlerEndPC,
+ EHClause.HandlerStartPC,
+ EHClause.HandlerEndPC,
+ IsDuplicateClause(&EHClause) ? "[duplicate]" : ""));
+
+ if ((m_ClauseForCatch.HandlerStartPC == EHClause.HandlerStartPC) &&
+ (m_ClauseForCatch.HandlerEndPC == EHClause.HandlerEndPC))
+ {
+ EH_LOG((LL_INFO100, " RESUMEFRAME: found clause with same handler as catch\n"));
+ if (!IsDuplicateClause(&EHClause))
+ {
+ CONSISTENCY_CHECK(fIsFirstPass);
+
+ if (fProcessThisFrameToFindResumeFrameOnly)
+ {
+ EH_LOG((LL_INFO100, " RESUMEFRAME: identified real resume frame, \
+ but rude thread abort is initiated: %p\n", sf.SP));
+
+ // We have found the real resume frame. However, rude thread abort
+ // has been initiated. Thus, we need to continue the first pass
+ // as if we have not found a handler yet. To do so, we need to
+ // reset all the information we have saved when we find the handler.
+ m_ExceptionFlags.ResetUnwindingToFindResumeFrame();
+
+ m_uCatchToCallPC = NULL;
+ m_pClauseForCatchToken = NULL;
+
+ m_sfResumeStackFrame.Clear();
+ ReturnStatus = UnwindPending;
+ }
+ else
+ {
+ EH_LOG((LL_INFO100, " RESUMEFRAME: identified real resume frame: %p\n", sf.SP));
+
+ // Save off the index and the EstablisherFrame of the EH clause of the non-duplicate handler
+ // that decided to handle the exception. We may need it
+ // if a ThreadAbort is raised after the catch block
+ // executes.
+ m_dwIndexClauseForCatch = i + 1;
+ m_sfEstablisherOfActualHandlerFrame = sfEstablisherFrame;
+#ifdef _TARGET_AMD64_
+ m_sfCallerOfActualHandlerFrame = EECodeManager::GetCallerSp(pcfThisFrame->pRD);
+#else
+ // On ARM & ARM64, the EstablisherFrame is the value of SP at the time a function was called and before it's prolog
+ // executed. Effectively, it is the SP of the caller.
+ m_sfCallerOfActualHandlerFrame = sfEstablisherFrame.SP;
+#endif
+
+ ReturnStatus = FirstPassComplete;
+ }
+ }
+ break;
+ }
+ }
+ }
+ else if (IsFilterHandler(&EHClause))
+ {
+ DWORD_PTR dwResult = EXCEPTION_CONTINUE_SEARCH;
+ DWORD_PTR dwFilterStartPC;
+
+ dwFilterStartPC = pJitMan->GetCodeAddressForRelOffset(MethToken, EHClause.FilterOffset);
+
+ EH_LOG((LL_INFO100, " calling filter\n"));
+
+ // @todo : If user code throws a StackOveflowException and we have plenty of stack,
+ // we probably don't want to be so strict in not calling handlers.
+ if (! IsStackOverflowException())
+ {
+#ifndef FEATURE_PAL
+ // Check for any impersonation on the frame and save that for use during EH filter callbacks
+ OBJECTREF* pRefSecDesc = pcfThisFrame->GetAddrOfSecurityObject();
+ if (pRefSecDesc != NULL && *pRefSecDesc != NULL)
+ {
+ GCX_COOP();
+ FRAMESECDESCREF fsdRef = (FRAMESECDESCREF)*pRefSecDesc;
+ if (fsdRef->GetCallerToken() != NULL)
+ {
+ m_hCallerToken = fsdRef->GetCallerToken();
+ STRESS_LOG1(LF_EH, LL_INFO100, "In COMPlusThrowCallback. Found non-NULL callertoken on FSD:%d\n",m_hCallerToken);
+ if (!m_ExceptionFlags.ImpersonationTokenSet())
+ {
+ m_hImpersonationToken = fsdRef->GetImpersonationToken();
+ STRESS_LOG1(LF_EH, LL_INFO100, "In COMPlusThrowCallback. Found non-NULL impersonationtoken on FSD:%d\n",m_hImpersonationToken);
+ m_ExceptionFlags.SetImpersonationTokenSet();
+ }
+ }
+ }
+ BOOL impersonating = FALSE;
+#endif // !FEATURE_PAL
+
+ // Save the current EHClause Index and Establisher of the clause post which
+ // ThreadAbort was raised. This is done an exception handled inside a filter
+ // reset the state that was setup before the filter was invoked.
+ //
+ // We dont have to do this for finally/fault clauses since they execute
+ // in the second pass and by that time, we have already skipped the required
+ // EH clauses in the applicable stackframe.
+ DWORD dwPreFilterTACatchHandlerClauseIndex = dwTACatchHandlerClauseIndex;
+ StackFrame sfPreFilterEstablisherOfActualHandlerFrame = sfEstablisherOfActualHandlerFrame;
+
+ EX_TRY
+ {
+#ifndef FEATURE_PAL
+ if (m_hCallerToken != NULL)
+ {
+ STRESS_LOG1(LF_EH, LL_INFO100, "About to call filter with hCallerToken = %d\n",m_hCallerToken);
+ // CLR_ImpersonateLoggedOnUser fails fast on error
+ COMPrincipal::CLR_ImpersonateLoggedOnUser(m_hCallerToken);
+ impersonating = TRUE;
+ }
+#endif // !FEATURE_PAL
+
+ // We want to call filters even if the thread is aborting, so suppress abort
+ // checks while the filter runs.
+ ThreadPreventAsyncHolder preventAbort(TRUE);
+
+ // for filter clauses
+ SetEnclosingClauseInfo(fIsFunclet,
+ pcfThisFrame->GetRelOffset(),
+ GetSP(pcfThisFrame->GetRegisterSet()->pCallerContext));
+#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+ // On ARM & ARM64, the OS passes us the CallerSP for the frame for which personality routine has been invoked.
+ // Since IL filters are invoked in the first pass, we pass this CallerSP to the filter funclet which will
+ // then lookup the actual frame pointer value using it since we dont have a frame pointer to pass to it
+ // directly.
+ //
+ // Assert our invariants (we had set them up in InitializeCrawlFrame):
+ REGDISPLAY *pCurRegDisplay = pcfThisFrame->GetRegisterSet();
+
+ // 1) In first pass, we dont have a valid current context IP
+ _ASSERTE(GetIP(pCurRegDisplay->pCurrentContext) == 0);
+ // 2) Our caller context and caller SP are valid
+ _ASSERTE(pCurRegDisplay->IsCallerContextValid && pCurRegDisplay->IsCallerSPValid);
+ // 3) CallerSP is intact
+ _ASSERTE(GetSP(pCurRegDisplay->pCallerContext) == GetRegdisplaySP(pCurRegDisplay));
+#endif // _TARGET_ARM_ || _TARGET_ARM64_
+ {
+ // CallHandler expects to be in COOP mode.
+ GCX_COOP();
+ dwResult = CallHandler(dwFilterStartPC, sf, &EHClause, pMD, Filter ARM_ARG(pCurRegDisplay->pCallerContext) ARM64_ARG(pCurRegDisplay->pCallerContext));
+ }
+
+#ifndef FEATURE_PAL
+ if (impersonating)
+ {
+ STRESS_LOG1(LF_EH, LL_INFO100, "After calling filter, resetting to hImpersonationToken = %d\n",m_hImpersonationToken);
+ // CLR_ImpersonateLoggedOnUser fails fast on error
+ COMPrincipal::CLR_ImpersonateLoggedOnUser(m_hImpersonationToken);
+ impersonating = FALSE;
+ }
+#endif // !FEATURE_PAL
+ }
+ EX_CATCH
+ {
+#ifndef FEATURE_PAL
+ if (impersonating)
+ {
+ STRESS_LOG1(LF_EH, LL_INFO100, "Filter threw exception. In Catch. Resetting to hImpersonationToken = %d\n",m_hImpersonationToken);
+ // CLR_ImpersonateLoggedOnUser fails fast on error
+ COMPrincipal::CLR_ImpersonateLoggedOnUser(m_hImpersonationToken);
+ impersonating = FALSE;
+ }
+#endif // !FEATURE_PAL
+
+ // we've returned from the filter abruptly, now out of managed code
+ m_EHClauseInfo.SetManagedCodeEntered(FALSE);
+
+ EH_LOG((LL_INFO100, " filter threw an exception\n"));
+
+ // notify profiler
+ EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFilterLeave();
+ m_EHClauseInfo.ResetInfo();
+
+ // continue search
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ // Reset the EH clause Index and Establisher of the TA reraise clause
+ pThread->m_dwIndexClauseForCatch = dwPreFilterTACatchHandlerClauseIndex;
+ pThread->m_sfEstablisherOfActualHandlerFrame = sfPreFilterEstablisherOfActualHandlerFrame;
+
+ if (pThread->IsRudeAbortInitiated() && !pThread->IsWithinCer(pcfThisFrame))
+ {
+ EH_LOG((LL_INFO100, " IGNOREFRAME: rude abort\n"));
+ goto lExit;
+ }
+ }
+ else
+ {
+ EH_LOG((LL_INFO100, " STACKOVERFLOW: filter not called due to lack of guard page\n"));
+ // continue search
+ }
+
+ if (EXCEPTION_EXECUTE_HANDLER == dwResult)
+ {
+ fFoundHandler = TRUE;
+ }
+ else if (EXCEPTION_CONTINUE_SEARCH != dwResult)
+ {
+ //
+ // Behavior is undefined according to the spec. Let's not execute the handler.
+ //
+ }
+ EH_LOG((LL_INFO100, " filter returned %s\n", (fFoundHandler ? "EXCEPTION_EXECUTE_HANDLER" : "EXCEPTION_CONTINUE_SEARCH")));
+ }
+ else if (IsTypedHandler(&EHClause))
+ {
+ GCX_COOP();
+
+ TypeHandle thrownType = TypeHandle();
+ OBJECTREF oThrowable = m_pThread->GetThrowable();
+ if (oThrowable != NULL)
+ {
+ oThrowable = PossiblyUnwrapThrowable(oThrowable, pcfThisFrame->GetAssembly());
+ thrownType = oThrowable->GetTrueTypeHandle();
+ }
+
+ if (!thrownType.IsNull())
+ {
+ if (EHClause.ClassToken == mdTypeRefNil)
+ {
+ // this is a catch(...)
+ fFoundHandler = TRUE;
+ }
+ else
+ {
+ TypeHandle typeHnd = pJitMan->ResolveEHClause(&EHClause, pcfThisFrame);
+
+ EH_LOG((LL_INFO100,
+ " clause type = %s\n",
+ (!typeHnd.IsNull() ? typeHnd.GetMethodTable()->GetDebugClassName()
+ : "<couldn't resolve>")));
+ EH_LOG((LL_INFO100,
+ " thrown type = %s\n",
+ thrownType.GetMethodTable()->GetDebugClassName()));
+
+ fFoundHandler = !typeHnd.IsNull() && ExceptionIsOfRightType(typeHnd, thrownType);
+ }
+ }
+ }
+ else
+ {
+ _ASSERTE(fTermHandler);
+ fFoundHandler = TRUE;
+ }
+
+ if (fFoundHandler)
+ {
+ if (fIsFirstPass)
+ {
+ _ASSERTE(IsFilterHandler(&EHClause) || IsTypedHandler(&EHClause));
+
+ EH_LOG((LL_INFO100, " found catch at 0x%p, sp = 0x%p\n", dwHandlerStartPC, sf.SP));
+ m_uCatchToCallPC = dwHandlerStartPC;
+ m_pClauseForCatchToken = pEHClauseToken;
+ m_ClauseForCatch = EHClause;
+
+ m_sfResumeStackFrame = sf;
+
+#if defined(DEBUGGING_SUPPORTED) || defined(PROFILING_SUPPORTED)
+ //
+ // notify the debugger and profiler
+ //
+ if (fGiveDebuggerAndProfilerNotification)
+ {
+ EEToProfilerExceptionInterfaceWrapper::ExceptionSearchCatcherFound(pMD);
+ }
+
+ if (fIsILStub)
+ {
+ //
+ // NotifyOfCHFFilter has two behaviors
+ // * Notifify debugger, get interception info and unwind (function will not return)
+ // In this case, m_sfResumeStackFrame is expected to be NULL or the frame of interception.
+ // We NULL it out because we get the interception event after this point.
+ // * Notifify debugger and return.
+ // In this case the normal EH proceeds and we need to reset m_sfResumeStackFrame to the sf catch handler.
+ // TODO: remove this call and try to report the IL catch handler in the IL stub itself.
+ m_sfResumeStackFrame.Clear();
+ EEToDebuggerExceptionInterfaceWrapper::NotifyOfCHFFilter((EXCEPTION_POINTERS*)&m_ptrs, pILStubFrame);
+ m_sfResumeStackFrame = sf;
+ }
+ else
+ {
+ // We don't need to do anything special for continuable exceptions after calling
+ // this callback. We are going to start unwinding anyway.
+ EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedExceptionCatcherFound(pThread, pMD, (TADDR) uMethodStartPC, sf.SP,
+ &EHClause);
+ }
+
+ // If the exception is intercepted, then the target unwind frame may not be the
+ // stack frame we are currently processing, so clear it now. We'll set it
+ // later in second pass.
+ if (pThread->GetExceptionState()->GetFlags()->DebuggerInterceptInfo())
+ {
+ m_sfResumeStackFrame.Clear();
+ }
+#endif //defined(DEBUGGING_SUPPORTED) || defined(PROFILING_SUPPORTED)
+
+ //
+ // BEGIN resume frame code
+ //
+ EH_LOG((LL_INFO100, " RESUMEFRAME: initial resume stack frame: %p\n", sf.SP));
+
+ if (IsDuplicateClause(&EHClause))
+ {
+ EH_LOG((LL_INFO100, " RESUMEFRAME: need to unwind to find real resume frame\n"));
+ m_ExceptionFlags.SetUnwindingToFindResumeFrame();
+
+ // This is a duplicate catch funclet. As a result, we will continue to let the
+ // exception dispatch proceed upstack to find the actual frame where the
+ // funclet lives.
+ //
+ // At the same time, we also need to save the CallerSP of the frame containing
+ // the catch funclet (like we do for other funclets). If the current frame
+ // represents a funclet that was invoked by JITted code, then we will save
+ // the caller SP of the current frame when we see it during the 2nd pass -
+ // refer to the use of "pLimitClauseToken" in the code above.
+ //
+ // However, that is not the callerSP of the frame containing the catch funclet
+ // as the actual frame containing the funclet (and where it will be executed)
+ // is the one that will be the target of unwind during the first pass.
+ //
+ // To correctly get that, we will determine if the current frame is a funclet
+ // and if it was invoked from JITted code. If this is true, then current frame
+ // represents a finally funclet invoked non-exceptionally (from its parent frame
+ // or yet another funclet). In such a case, we will set a flag indicating that
+ // we need to reset the enclosing clause SP for the catch funclet and later,
+ // when 2nd pass reaches the actual frame containing the catch funclet to be
+ // executed, we will update the enclosing clause SP if the
+ // "m_fResetEnclosingClauseSPForCatchFunclet" flag is set, just prior to
+ // invoking the catch funclet.
+ if (fIsFunclet)
+ {
+ REGDISPLAY* pCurRegDisplay = pcfThisFrame->GetRegisterSet();
+ _ASSERTE(pCurRegDisplay->IsCallerContextValid);
+ TADDR adrReturnAddressFromFunclet = PCODEToPINSTR(GetIP(pCurRegDisplay->pCallerContext)) - STACKWALK_CONTROLPC_ADJUST_OFFSET;
+ m_fResetEnclosingClauseSPForCatchFunclet = ExecutionManager::IsManagedCode(adrReturnAddressFromFunclet);
+ }
+
+ ReturnStatus = UnwindPending;
+ break;
+ }
+
+ EH_LOG((LL_INFO100, " RESUMEFRAME: no extra unwinding required, real resume frame: %p\n", sf.SP));
+
+ // Save off the index and the EstablisherFrame of the EH clause of the non-duplicate handler
+ // that decided to handle the exception. We may need it
+ // if a ThreadAbort is raised after the catch block
+ // executes.
+ m_dwIndexClauseForCatch = i + 1;
+ m_sfEstablisherOfActualHandlerFrame = sfEstablisherFrame;
+
+#ifdef _TARGET_AMD64_
+ m_sfCallerOfActualHandlerFrame = EECodeManager::GetCallerSp(pcfThisFrame->pRD);
+#else
+ // On ARM & ARM64, the EstablisherFrame is the value of SP at the time a function was called and before it's prolog
+ // executed. Effectively, it is the SP of the caller.
+ m_sfCallerOfActualHandlerFrame = sfEstablisherFrame.SP;
+#endif
+ //
+ // END resume frame code
+ //
+
+ ReturnStatus = FirstPassComplete;
+ break;
+ }
+ else
+ {
+ EH_LOG((LL_INFO100, " found finally/fault at 0x%p\n", dwHandlerStartPC));
+ _ASSERTE(fTermHandler);
+
+ // @todo : If user code throws a StackOveflowException and we have plenty of stack,
+ // we probably don't want to be so strict in not calling handlers.
+ if (!IsStackOverflowException())
+ {
+ DWORD_PTR dwStatus;
+
+ // for finally clauses
+ SetEnclosingClauseInfo(fIsFunclet,
+ pcfThisFrame->GetRelOffset(),
+ GetSP(pcfThisFrame->GetRegisterSet()->pCallerContext));
+
+ // We have switched to indefinite COOP mode just before this loop started.
+ // Since we also forbid GC during second pass, disable it now since
+ // invocation of managed code can result in a GC.
+ ENDFORBIDGC();
+ dwStatus = CallHandler(dwHandlerStartPC, sf, &EHClause, pMD, FaultFinally ARM_ARG(pcfThisFrame->GetRegisterSet()->pCurrentContext) ARM64_ARG(pcfThisFrame->GetRegisterSet()->pCurrentContext));
+
+ // Once we return from a funclet, forbid GC again (refer to comment before start of the loop for details)
+ BEGINFORBIDGC();
+ }
+ else
+ {
+ EH_LOG((LL_INFO100, " STACKOVERFLOW: finally not called due to lack of guard page\n"));
+ // continue search
+ }
+
+ //
+ // will continue to find next fault/finally in this call frame
+ //
+ }
+ } // if fFoundHandler
+ } // if clause covers PC
+ } // foreach eh clause
+ } // if stack frame is far enough away from guard page
+
+ //
+ // notify the profiler
+ //
+ if (fGiveDebuggerAndProfilerNotification)
+ {
+ if (fIsFirstPass)
+ {
+ if (!fUnwindingToFindResumeFrame)
+ {
+ EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pMD);
+ }
+ }
+ else
+ {
+ if (!fUnwindFinished)
+ {
+ EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionLeave(pMD);
+ }
+ }
+ }
+ } // fIgnoreThisFrame
+
+lExit:
+ return ReturnStatus;
+}
+
+// <64bit_And_Arm_Specific>
+
+// For funclets, add support for unwinding frame chain during SO. These definitions will be automatically picked up by
+// BEGIN_SO_TOLERANT_CODE/END_SO_TOLERANT_CODE usage in ExceptionTracker::CallHandler below.
+//
+// This is required since funclet invocation is the only case of calling managed code from VM that is not wrapped by
+// assembly helper with associated personality routine. The personality routine will invoke CleanupForSecondPass to
+// release exception trackers and unwind frame chain.
+//
+// We need to do the same work as CleanupForSecondPass for funclet invocation in the face of SO. Thus, we redefine OPTIONAL_SO_CLEANUP_UNWIND
+// below. This will perform frame chain unwind inside the "__finally" block that is part of the END_SO_TOLERANT_CODE macro only in the face
+// of an SO.
+//
+// The second part of work, releasing exception trackers, is done inside the "__except" block also part of the END_SO_TOLERANT_CODE by invoking
+// ClearExceptionStateAfterSO.
+//
+// </64bit_And_Arm_Specific>
+
+#undef OPTIONAL_SO_CLEANUP_UNWIND
+
+#define OPTIONAL_SO_CLEANUP_UNWIND(pThread, pFrame) if (pThread->GetFrame() < pFrame) { UnwindFrameChain(pThread, pFrame); }
+
+#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+// This is an assembly helper that enables us to call into EH funclets.
+EXTERN_C DWORD_PTR STDCALL CallEHFunclet(Object *pThrowable, UINT_PTR pFuncletToInvoke, UINT_PTR *pFirstNonVolReg, UINT_PTR *pFuncletCallerSP);
+
+// This is an assembly helper that enables us to call into EH filter funclets.
+EXTERN_C DWORD_PTR STDCALL CallEHFilterFunclet(Object *pThrowable, TADDR CallerSP, UINT_PTR pFuncletToInvoke, UINT_PTR *pFuncletCallerSP);
+#endif // _TARGET_ARM_ || _TARGET_ARM64_
+
+DWORD_PTR ExceptionTracker::CallHandler(
+ UINT_PTR uHandlerStartPC,
+ StackFrame sf,
+ EE_ILEXCEPTION_CLAUSE* pEHClause,
+ MethodDesc* pMD,
+ EHFuncletType funcletType
+ ARM_ARG(PCONTEXT pContextRecord)
+ ARM64_ARG(PCONTEXT pContextRecord)
+ )
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ DWORD_PTR dwResumePC;
+ OBJECTREF throwable;
+ HandlerFn* pfnHandler = (HandlerFn*)uHandlerStartPC;
+
+ EH_LOG((LL_INFO100, " calling handler at 0x%p, sp = 0x%p\n", uHandlerStartPC, sf.SP));
+
+ Thread* pThread = GetThread();
+
+ // The first parameter specifies whether we want to make callbacks before (true) or after (false)
+ // calling the handler.
+ MakeCallbacksRelatedToHandler(true, pThread, pMD, pEHClause, uHandlerStartPC, sf);
+
+ _ASSERTE(pThread->DetermineIfGuardPagePresent());
+
+ throwable = PossiblyUnwrapThrowable(pThread->GetThrowable(), pMD->GetAssembly());
+
+ // We probe for stack space before attempting to call a filter, finally, or catch clause. The path from
+ // here to the actual managed code is very short. We must probe, however, because the JIT does not generate a
+ // probe for us upon entry to the handler. This probe ensures we have enough stack space to actually make it
+ // into the managed code.
+ //
+ // Incase a SO happens, this macro will also unwind the frame chain before continuing to dispatch the SO
+ // upstack (look at the macro implementation for details).
+ BEGIN_SO_TOLERANT_CODE(pThread);
+
+ // Stores the current SP and BSP, which will be the caller SP and BSP for the funclet.
+ // Note that we are making the assumption here that the SP and BSP don't change from this point
+ // forward until we actually make the call to the funclet. If it's not the case then we will need
+ // some sort of assembly wrappers to help us out.
+ CallerStackFrame csfFunclet = CallerStackFrame((UINT_PTR)GetCurrentSP());
+ this->m_EHClauseInfo.SetManagedCodeEntered(TRUE);
+ this->m_EHClauseInfo.SetCallerStackFrame(csfFunclet);
+
+#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+ // Invoke the funclet. We pass throwable only when invoking the catch block.
+ // Since the actual caller of the funclet is the assembly helper, pass the reference
+ // to the CallerStackFrame instance so that it can be updated.
+ CallerStackFrame* pCallerStackFrame = this->m_EHClauseInfo.GetCallerStackFrameForEHClauseReference();
+ UINT_PTR *pFuncletCallerSP = &(pCallerStackFrame->SP);
+ if (funcletType != EHFuncletType::Filter)
+ {
+ dwResumePC = CallEHFunclet((funcletType == EHFuncletType::Catch)?OBJECTREFToObject(throwable):(Object *)NULL,
+#ifdef _TARGET_ARM_
+ DataPointerToThumbCode<UINT_PTR, HandlerFn *>(pfnHandler),
+ (UINT_PTR*)&(pContextRecord->R4),
+#else
+ (UINT_PTR)pfnHandler,
+ &(pContextRecord->X19),
+#endif // _TARGET_ARM_
+ pFuncletCallerSP);
+ }
+ else
+ {
+ // For invoking IL filter funclet, we pass the CallerSP to the funclet using which
+ // it will retrieve the framepointer for accessing the locals in the parent
+ // method.
+ dwResumePC = CallEHFilterFunclet(OBJECTREFToObject(throwable),
+ GetSP(pContextRecord),
+#ifdef _TARGET_ARM_
+ DataPointerToThumbCode<UINT_PTR, HandlerFn *>(pfnHandler),
+#else
+ (UINT_PTR)pfnHandler,
+#endif // _TARGET_ARM_
+ pFuncletCallerSP);
+ }
+#else // defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+ //
+ // Invoke the funclet.
+ //
+ dwResumePC = pfnHandler(sf.SP, OBJECTREFToObject(throwable));
+#endif // _TARGET_ARM_
+
+ this->m_EHClauseInfo.SetManagedCodeEntered(FALSE);
+
+ END_SO_TOLERANT_CODE;
+
+ // The first parameter specifies whether we want to make callbacks before (true) or after (false)
+ // calling the handler.
+ MakeCallbacksRelatedToHandler(false, pThread, pMD, pEHClause, uHandlerStartPC, sf);
+
+ return dwResumePC;
+}
+
+#undef OPTIONAL_SO_CLEANUP_UNWIND
+#define OPTIONAL_SO_CLEANUP_UNWIND(pThread, pFrame)
+
+
+//
+// this must be done after the second pass has run, it does not
+// reference anything on the stack, so it is safe to run in an
+// SEH __except clause as well as a C++ catch clause.
+//
+// static
+void ExceptionTracker::PopTrackers(
+ void* pStackFrameSP
+ )
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ StackFrame sf((UINT_PTR)pStackFrameSP);
+
+ // Only call into PopTrackers if we have a managed thread and we have an exception progress.
+ // Otherwise, the call below (to PopTrackers) is a noop. If this ever changes, then this short-circuit needs to be fixed.
+ Thread *pCurThread = GetThread();
+ if ((pCurThread != NULL) && (pCurThread->GetExceptionState()->IsExceptionInProgress()))
+ {
+ // Refer to the comment around ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException
+ // for details on the usage of this COOP switch.
+ GCX_COOP();
+
+ PopTrackers(sf, false);
+ }
+}
+
+//
+// during the second pass, an exception might escape out to
+// unmanaged code where it is swallowed (or potentially rethrown).
+// The current tracker is abandoned in this case, and if a rethrow
+// does happen in unmanaged code, this is unfortunately treated as
+// a brand new exception. This is unavoidable because if two
+// exceptions escape out to unmanaged code in this manner, a subsequent
+// rethrow cannot be disambiguated as corresponding to the nested vs.
+// the original exception.
+void ExceptionTracker::PopTrackerIfEscaping(
+ void* pStackPointer
+ )
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ Thread* pThread = GetThread();
+ ThreadExceptionState* pExState = pThread->GetExceptionState();
+ ExceptionTracker* pTracker = pExState->m_pCurrentTracker;
+ CONSISTENCY_CHECK((NULL == pTracker) || pTracker->IsValid());
+
+ // If we are resuming in managed code (albeit further up the stack) we will still need this
+ // tracker. Otherwise we are either propagating into unmanaged code -- with the rethrow
+ // issues mentioned above -- or we are going unhandled.
+ //
+ // Note that we don't distinguish unmanaged code in the EE vs. unmanaged code outside the
+ // EE. We could use the types of the Frames above us to make this distinction. Without
+ // this, the technique of EX_TRY/EX_CATCH/EX_RETHROW inside the EE will lose its tracker
+ // and have to rely on LastThrownObject in the rethrow. Along the same lines, unhandled
+ // exceptions only have access to LastThrownObject.
+ //
+ // There may not be a current tracker if, for instance, UMThunk has dispatched into managed
+ // code via CallDescr. In that case, CallDescr may pop the tracker, leaving UMThunk with
+ // nothing to do.
+
+ if (pTracker && pTracker->m_sfResumeStackFrame.IsNull())
+ {
+ StackFrame sf((UINT_PTR)pStackPointer);
+ StackFrame sfTopMostStackFrameFromFirstPass = pTracker->GetTopmostStackFrameFromFirstPass();
+
+ // Refer to the comment around ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException
+ // for details on the usage of this COOP switch.
+ GCX_COOP();
+ ExceptionTracker::PopTrackers(sf, true);
+ }
+}
+
+//
+// static
+void ExceptionTracker::PopTrackers(
+ StackFrame sfResumeFrame,
+ bool fPopWhenEqual
+ )
+{
+ CONTRACTL
+ {
+ // Refer to the comment around ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException
+ // for details on the mode being COOP here.
+ MODE_COOPERATIVE;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ Thread* pThread = GetThread();
+ ExceptionTracker* pTracker = (pThread ? pThread->GetExceptionState()->m_pCurrentTracker : NULL);
+
+ // NOTE:
+ //
+ // This method is a no-op when there is no managed Thread object. We detect such a case and short circuit out in ExceptionTrackers::PopTrackers.
+ // If this ever changes, then please revisit that method and fix it up appropriately.
+
+ // If this tracker does not have valid stack ranges,
+ // then we came here likely when the tracker was being setup
+ // and an exception took place.
+ //
+ // In such a case, we will not pop off the tracker
+ if (pTracker && pTracker->m_ScannedStackRange.IsEmpty())
+ {
+ // skip any others with empty ranges...
+ do
+ {
+ pTracker = pTracker->m_pPrevNestedInfo;
+ }
+ while (pTracker && pTracker->m_ScannedStackRange.IsEmpty());
+
+ // pTracker is now the first non-empty one, make sure it doesn't need popping
+ // if it does, then someone let an exception propagate out of the exception dispatch code
+
+ _ASSERTE(!pTracker || (pTracker->m_ScannedStackRange.GetUpperBound() > sfResumeFrame));
+ return;
+ }
+
+#if defined(DEBUGGING_SUPPORTED)
+ DWORD_PTR dwInterceptStackFrame = 0;
+
+ // This method may be called on an unmanaged thread, in which case no interception can be done.
+ if (pTracker)
+ {
+ ThreadExceptionState* pExState = pThread->GetExceptionState();
+
+ // If the exception is intercepted, then pop trackers according to the stack frame at which
+ // the exception is intercepted. We must retrieve the frame pointer before we start popping trackers.
+ if (pExState->GetFlags()->DebuggerInterceptInfo())
+ {
+ pExState->GetDebuggerState()->GetDebuggerInterceptInfo(NULL, NULL, (PBYTE*)&dwInterceptStackFrame,
+ NULL, NULL);
+ }
+ }
+#endif // DEBUGGING_SUPPORTED
+
+ while (pTracker)
+ {
+ // When we are about to pop off a tracker, it should
+ // have a stack range setup.
+ _ASSERTE(!pTracker->m_ScannedStackRange.IsEmpty());
+
+ ExceptionTracker* pPrev = pTracker->m_pPrevNestedInfo;
+
+ // <TODO>
+ // with new tracker collapsing code, we will only ever pop one of these at a time
+ // at the end of the 2nd pass. However, CLRException::HandlerState::SetupCatch
+ // still uses this function and we still need to revisit how it interacts with
+ // ExceptionTrackers
+ // </TODO>
+
+ if ((fPopWhenEqual && (pTracker->m_ScannedStackRange.GetUpperBound() == sfResumeFrame)) ||
+ (pTracker->m_ScannedStackRange.GetUpperBound() < sfResumeFrame))
+ {
+#if defined(DEBUGGING_SUPPORTED)
+ if (g_pDebugInterface != NULL)
+ {
+ if (pTracker->m_ScannedStackRange.GetUpperBound().SP < dwInterceptStackFrame)
+ {
+ g_pDebugInterface->DeleteInterceptContext(pTracker->m_DebuggerExState.GetDebuggerInterceptContext());
+ }
+ else
+ {
+ _ASSERTE(dwInterceptStackFrame == 0 ||
+ ( dwInterceptStackFrame == sfResumeFrame.SP &&
+ dwInterceptStackFrame == pTracker->m_ScannedStackRange.GetUpperBound().SP ));
+ }
+ }
+#endif // DEBUGGING_SUPPORTED
+
+ ExceptionTracker* pTrackerToFree = pTracker;
+ EH_LOG((LL_INFO100, "Unlinking ExceptionTracker object 0x%p, thread = 0x%p\n", pTrackerToFree, pTrackerToFree->m_pThread));
+ CONSISTENCY_CHECK(pTracker->IsValid());
+ pTracker = pPrev;
+
+ // free managed tracker resources causing notification -- do this before unlinking the tracker
+ // this is necessary so that we know an exception is still in flight while we give the notification
+ FreeTrackerMemory(pTrackerToFree, memManaged);
+
+ // unlink the tracker from the thread
+ pThread->GetExceptionState()->m_pCurrentTracker = pTracker;
+ CONSISTENCY_CHECK((NULL == pTracker) || pTracker->IsValid());
+
+ // free unmanaged tracker resources
+ FreeTrackerMemory(pTrackerToFree, memUnmanaged);
+ }
+ else
+ {
+ break;
+ }
+ }
+}
+
+//
+// static
+ExceptionTracker* ExceptionTracker::GetOrCreateTracker(
+ UINT_PTR ControlPc,
+ StackFrame sf,
+ EXCEPTION_RECORD* pExceptionRecord,
+ CONTEXT* pContextRecord,
+ BOOL bAsynchronousThreadStop,
+ bool fIsFirstPass,
+ StackTraceState* pStackTraceState
+ )
+{
+ CONTRACT(ExceptionTracker*)
+ {
+ MODE_ANY;
+ GC_TRIGGERS;
+ NOTHROW;
+ PRECONDITION(CheckPointer(pStackTraceState));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ Thread* pThread = GetThread();
+ ThreadExceptionState* pExState = pThread->GetExceptionState();
+ ExceptionTracker* pTracker = pExState->m_pCurrentTracker;
+ CONSISTENCY_CHECK((NULL == pTracker) || (pTracker->IsValid()));
+
+ bool fCreateNewTracker = false;
+ bool fIsRethrow = false;
+
+ // Initialize the out parameter.
+ *pStackTraceState = STS_Append;
+
+ if (NULL != pTracker)
+ {
+ CONSISTENCY_CHECK(!pTracker->m_ScannedStackRange.IsEmpty());
+
+ if (pTracker->m_ExceptionFlags.IsRethrown())
+ {
+ EH_LOG((LL_INFO100, ">>continued processing of RETHROWN exception\n"));
+ // this is the first time we've seen a rethrown exception, reuse the tracker and reset some state
+
+ fCreateNewTracker = true;
+ fIsRethrow = true;
+ }
+ else
+ if (pTracker->m_ptrs.ExceptionRecord != pExceptionRecord)
+ {
+ EH_LOG((LL_INFO100, ">>NEW exception (exception records do not match)\n"));
+ fCreateNewTracker = true;
+ }
+ else
+ if (sf >= pTracker->m_ScannedStackRange.GetUpperBound())
+ {
+ // We can't have a transition from 1st pass to 2nd pass in this case.
+ _ASSERTE( ( sf == pTracker->m_ScannedStackRange.GetUpperBound() ) ||
+ ( fIsFirstPass || !pTracker->IsInFirstPass() ) );
+
+ if (fIsFirstPass && !pTracker->IsInFirstPass())
+ {
+ // We just transition from 2nd pass to 1st pass without knowing it.
+ // This means that some unmanaged frame outside of the EE catches the previous exception,
+ // so we should trash the current tracker and create a new one.
+ EH_LOG((LL_INFO100, ">>NEW exception (the previous second pass finishes at some unmanaged frame outside of the EE)\n"));
+
+ {
+ GCX_COOP();
+ ExceptionTracker::PopTrackers(sf, false);
+ }
+
+ fCreateNewTracker = true;
+ }
+ else
+ {
+ EH_LOG((LL_INFO100, ">>continued processing of PREVIOUS exception\n"));
+ // previously seen exception, reuse the tracker
+
+ *pStackTraceState = STS_Append;
+ }
+ }
+ else
+ if (pTracker->m_ScannedStackRange.Contains(sf))
+ {
+ EH_LOG((LL_INFO100, ">>continued processing of PREVIOUS exception (revisiting previously processed frames)\n"));
+ }
+ else
+ {
+ // nested exception
+ EH_LOG((LL_INFO100, ">>new NESTED exception\n"));
+ fCreateNewTracker = true;
+ }
+ }
+ else
+ {
+ EH_LOG((LL_INFO100, ">>NEW exception\n"));
+ fCreateNewTracker = true;
+ }
+
+ if (fCreateNewTracker)
+ {
+#ifdef _DEBUG
+ if (STATUS_STACK_OVERFLOW == pExceptionRecord->ExceptionCode)
+ {
+ CONSISTENCY_CHECK(pExceptionRecord->NumberParameters >= 2);
+ UINT_PTR uFaultAddress = pExceptionRecord->ExceptionInformation[1];
+ UINT_PTR uStackLimit = (UINT_PTR)pThread->GetCachedStackLimit();
+
+ EH_LOG((LL_INFO100, "STATUS_STACK_OVERFLOW accessing address %p %s\n",
+ uFaultAddress));
+
+ UINT_PTR uDispatchStackAvailable;
+
+ uDispatchStackAvailable = uFaultAddress - uStackLimit - HARD_GUARD_REGION_SIZE;
+
+ EH_LOG((LL_INFO100, "%x bytes available for SO processing\n", uDispatchStackAvailable));
+ }
+ else if ((IsComPlusException(pExceptionRecord)) &&
+ (pThread->GetThrowableAsHandle() == g_pPreallocatedStackOverflowException))
+ {
+ EH_LOG((LL_INFO100, "STACKOVERFLOW: StackOverflowException manually thrown\n"));
+ }
+#endif // _DEBUG
+
+ ExceptionTracker* pNewTracker;
+
+ pNewTracker = GetTrackerMemory();
+ if (!pNewTracker)
+ {
+ if (NULL != pExState->m_OOMTracker.m_pThread)
+ {
+ // Fatal error: we spun and could not allocate another tracker
+ // and our existing emergency tracker is in use.
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
+ }
+
+ pNewTracker = &pExState->m_OOMTracker;
+ }
+
+ new (pNewTracker) ExceptionTracker(ControlPc,
+ pExceptionRecord,
+ pContextRecord);
+
+ CONSISTENCY_CHECK(pNewTracker->IsValid());
+ CONSISTENCY_CHECK(pThread == pNewTracker->m_pThread);
+
+ EH_LOG((LL_INFO100, "___________________________________________\n"));
+ EH_LOG((LL_INFO100, "creating new tracker object 0x%p, thread = 0x%p\n", pNewTracker, pThread));
+
+ GCX_COOP();
+
+ // We always create a throwable in the first pass when we first see an exception.
+ //
+ // On 64bit, everytime the exception passes beyond a boundary (e.g. RPInvoke call, or CallDescrWorker call),
+ // the exception trackers that were created below (stack growing down) that boundary are released, during the 2nd pass,
+ // if the exception was not caught in managed code. This is because the catcher is in native code and managed exception
+ // data structures are for use of VM only when the exception is caught in managed code. Also, passing by such
+ // boundaries is our only oppurtunity to release such internal structures and not leak the memory.
+ //
+ // However, in certain case, release of exception trackers at each boundary can prove to be a bit aggressive.
+ // Take the example below where "VM" prefix refers to a VM frame and "M" prefix refers to a managed frame on the stack.
+ //
+ // VM1 -> M1 - VM2 - (via RPinvoke) -> M2
+ //
+ // Let M2 throw E2 that remains unhandled in managed code (i.e. M1 also does not catch it) but is caught in VM1.
+ // Note that the acting of throwing an exception also sets it as the LastThrownObject (LTO) against the thread.
+ //
+ // Since this is native code (as mentioned in the comments above, there is no distinction made between VM native
+ // code and external native code) that caught the exception, when the unwind goes past the "Reverse Pinvoke" boundary,
+ // its personality routine will release the tracker for E2. Thus, only the LTO (which is off the Thread object and not
+ // the exception tracker) is indicative of type of the last exception thrown.
+ //
+ // As the unwind goes up the stack, we come across M1 and, since the original tracker was released, we create a new
+ // tracker in the 2nd pass that does not contain details like the active exception object. A managed finally executes in M1
+ // that throws and catches E1 inside the finally block. Thus, LTO is updated to indicate E1 as the last exception thrown.
+ // When the exception is caught in VM1 and VM attempts to get LTO, it gets E1, which is incorrect as it was handled within the finally.
+ // Semantically, it should have got E2 as the LTO.
+ //
+ // To address, this we will *also* create a throwable during second pass for most exceptions
+ // since most of them have had the corresponding first pass. If we are processing
+ // an exception's second pass, we would have processed its first pass as well and thus, already
+ // created a throwable that would be setup as the LastThrownObject (LTO) against the Thread.
+ //
+ // The only exception to this rule is the longjump - this exception only has second pass
+ // Thus, if we are in second pass and exception in question is longjump, then do not create a throwable.
+ //
+ // In the case of the scenario above, when we attempt to create a new exception tracker, during the unwind,
+ // for M1, we will also setup E2 as the throwable in the tracker. As a result, when the finally in M1 throws
+ // and catches the exception, the LTO is correctly updated against the thread (see SafeUpdateLastThrownObject)
+ // and thus, when VM requests for the LTO, it gets E2 as expected.
+ bool fCreateThrowableForCurrentPass = true;
+ if (pExceptionRecord->ExceptionCode == STATUS_LONGJUMP)
+ {
+ // Long jump is only in second pass of exception dispatch
+ _ASSERTE(!fIsFirstPass);
+ fCreateThrowableForCurrentPass = false;
+ }
+
+ // When dealing with SQL Hosting like scenario, a real SO
+ // maybe caught in native code. As a result, CRT will perform
+ // STATUS_UNWIND_CONSOLIDATE that will result in replacing
+ // the exception record in ProcessCLRException. This replaced
+ // exception record will point to the exception record for original
+ // SO for which we will not have created a throwable in the first pass
+ // due to the SO-specific early exit code in ProcessCLRException.
+ // //
+ // Thus, if we see that we are here for SO in the 2nd pass, then
+ // we shouldn't attempt to create a throwable.
+ if ((!fIsFirstPass) && (IsSOExceptionCode(pExceptionRecord->ExceptionCode)))
+ {
+ fCreateThrowableForCurrentPass = false;
+ }
+
+#ifdef _DEBUG
+ if ((!fIsFirstPass) && (fCreateThrowableForCurrentPass == true))
+ {
+ // We should have a LTO available if we are creating
+ // a throwable during second pass.
+ _ASSERTE(pThread->LastThrownObjectHandle() != NULL);
+ }
+#endif // _DEBUG
+
+ bool fCreateThrowable = (fCreateThrowableForCurrentPass || (bAsynchronousThreadStop && !pThread->IsAsyncPrevented()));
+ OBJECTREF oThrowable = NULL;
+
+ if (fCreateThrowable)
+ {
+ if (fIsRethrow)
+ {
+ oThrowable = ObjectFromHandle(pTracker->m_hThrowable);
+ }
+ else
+ {
+ // this can take a nested exception
+ oThrowable = CreateThrowable(pExceptionRecord, bAsynchronousThreadStop);
+ }
+ }
+
+ GCX_FORBID(); // we haven't protected oThrowable
+
+ if (pExState->m_pCurrentTracker != pNewTracker) // OOM can make this false
+ {
+ pNewTracker->m_pPrevNestedInfo = pExState->m_pCurrentTracker;
+ pTracker = pNewTracker;
+ pThread->GetExceptionState()->m_pCurrentTracker = pTracker;
+ }
+
+ if (fCreateThrowable)
+ {
+ CONSISTENCY_CHECK(oThrowable != NULL);
+ CONSISTENCY_CHECK(NULL == pTracker->m_hThrowable);
+
+ pThread->SafeSetThrowables(oThrowable);
+
+ if (pTracker->CanAllocateMemory())
+ {
+ pTracker->m_StackTraceInfo.AllocateStackTrace();
+ }
+ }
+ INDEBUG(oThrowable = NULL);
+
+ if (fIsRethrow)
+ {
+ *pStackTraceState = STS_FirstRethrowFrame;
+ }
+ else
+ {
+ *pStackTraceState = STS_NewException;
+ }
+
+ _ASSERTE(pTracker->m_pLimitFrame == NULL);
+ pTracker->ResetLimitFrame();
+ }
+
+ if (!fIsFirstPass)
+ {
+ {
+ // Refer to the comment around ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException
+ // for details on the usage of this COOP switch.
+ GCX_COOP();
+
+ if (pTracker->IsInFirstPass())
+ {
+ CONSISTENCY_CHECK_MSG(fCreateNewTracker || pTracker->m_ScannedStackRange.Contains(sf),
+ "Tracker did not receive a first pass!");
+
+ // Save the topmost StackFrame the tracker saw in the first pass before we reset the
+ // scanned stack range.
+ pTracker->m_sfFirstPassTopmostFrame = pTracker->m_ScannedStackRange.GetUpperBound();
+
+ // We have to detect this transition becuase otherwise we break when unmanaged code
+ // catches our exceptions.
+ EH_LOG((LL_INFO100, ">>tracker transitioned to second pass\n"));
+ pTracker->m_ScannedStackRange.Reset();
+ pTracker->m_ExceptionFlags.SetUnwindHasStarted();
+ if (pTracker->m_ExceptionFlags.UnwindingToFindResumeFrame())
+ {
+ // UnwindingToFindResumeFrame means that in the first pass, we determine that a method
+ // catches the exception, but the method frame we are inspecting is a funclet method frame
+ // and is not the correct frame to resume execution. We need to resume to the correct
+ // method frame before starting the second pass. The correct method frame is most likely
+ // the parent method frame, but it can also be another funclet method frame.
+ //
+ // If the exception transitions from first pass to second pass before we find the parent
+ // method frame, there is only one possibility: some other thread has initiated a rude
+ // abort on the current thread, causing us to skip processing of all method frames.
+ _ASSERTE(pThread->IsRudeAbortInitiated());
+ }
+ // Lean on the safe side and just reset everything unconditionally.
+ pTracker->FirstPassIsComplete();
+
+ EEToDebuggerExceptionInterfaceWrapper::ManagedExceptionUnwindBegin(pThread);
+
+ pTracker->ResetLimitFrame();
+ }
+ else
+ {
+ // In the second pass, there's a possibility that UMThunkUnwindFrameChainHandler() has
+ // popped some frames off the frame chain underneath us. Check for this case here.
+ if (pTracker->m_pLimitFrame < pThread->GetFrame())
+ {
+ pTracker->ResetLimitFrame();
+ }
+ }
+ }
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ if (fCreateNewTracker)
+ {
+ // Exception tracker should be in the 2nd pass right now
+ _ASSERTE(!pTracker->IsInFirstPass());
+
+ // The corruption severity of a newly created tracker is NotSet
+ _ASSERTE(pTracker->GetCorruptionSeverity() == NotSet);
+
+ // See comment in CEHelper::SetupCorruptionSeverityForActiveExceptionInUnwindPass for details
+ CEHelper::SetupCorruptionSeverityForActiveExceptionInUnwindPass(pThread, pTracker, FALSE, pExceptionRecord->ExceptionCode);
+ }
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ }
+
+ _ASSERTE(pTracker->m_pLimitFrame >= pThread->GetFrame());
+
+ RETURN pTracker;
+}
+
+void ExceptionTracker::ResetLimitFrame()
+{
+ WRAPPER_NO_CONTRACT;
+
+ m_pLimitFrame = m_pThread->GetFrame();
+}
+
+//
+// static
+void ExceptionTracker::ResumeExecution(
+ CONTEXT* pContextRecord,
+ EXCEPTION_RECORD* pExceptionRecord
+ )
+{
+ //
+ // This method never returns, so it will leave its
+ // state on the thread if useing dynamic contracts.
+ //
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_NOTHROW;
+
+ AMD64_ONLY(STRESS_LOG4(LF_GCROOTS, LL_INFO100, "Resuming after exception at %p, rbx=%p, rsi=%p, rdi=%p\n",
+ GetIP(pContextRecord),
+ pContextRecord->Rbx,
+ pContextRecord->Rsi,
+ pContextRecord->Rdi));
+
+ EH_LOG((LL_INFO100, "resuming execution at 0x%p\n", GetIP(pContextRecord)));
+ EH_LOG((LL_INFO100, "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n"));
+
+ RtlRestoreContext(pContextRecord, pExceptionRecord);
+
+ UNREACHABLE();
+ //
+ // doesn't return
+ //
+}
+
+//
+// static
+OBJECTREF ExceptionTracker::CreateThrowable(
+ PEXCEPTION_RECORD pExceptionRecord,
+ BOOL bAsynchronousThreadStop
+ )
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF oThrowable = NULL;
+ Thread* pThread = GetThread();
+
+
+ if ((!bAsynchronousThreadStop) && IsComPlusException(pExceptionRecord))
+ {
+ oThrowable = pThread->LastThrownObject();
+ }
+ else
+ {
+ oThrowable = CreateCOMPlusExceptionObject(pThread, pExceptionRecord, bAsynchronousThreadStop);
+ }
+
+ return oThrowable;
+}
+
+//
+//static
+BOOL ExceptionTracker::ClauseCoversPC(
+ EE_ILEXCEPTION_CLAUSE* pEHClause,
+ DWORD dwOffset
+ )
+{
+ // TryStartPC and TryEndPC are offsets relative to the start
+ // of the method so we can just compare them to the offset returned
+ // by JitCodeToMethodInfo.
+ //
+ return ((pEHClause->TryStartPC <= dwOffset) && (dwOffset < pEHClause->TryEndPC));
+}
+
+#if defined(DEBUGGING_SUPPORTED)
+BOOL ExceptionTracker::NotifyDebuggerOfStub(Thread* pThread, StackFrame sf, Frame* pCurrentFrame)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ BOOL fDeliveredFirstChanceNotification = FALSE;
+
+ // <TODO>
+ // Remove this once SIS is fully enabled.
+ // </TODO>
+ extern bool g_EnableSIS;
+
+ if (g_EnableSIS)
+ {
+ _ASSERTE(GetThread() == pThread);
+
+ GCX_COOP();
+
+ // For debugger, we may want to notify 1st chance exceptions if they're coming out of a stub.
+ // We recognize stubs as Frames with a M2U transition type. The debugger's stackwalker also
+ // recognizes these frames and publishes ICorDebugInternalFrames in the stackwalk. It's
+ // important to use pFrame as the stack address so that the Exception callback matches up
+ // w/ the ICorDebugInternlFrame stack range.
+ if (CORDebuggerAttached())
+ {
+ if (pCurrentFrame->GetTransitionType() == Frame::TT_M2U)
+ {
+ // Use -1 for the backing store pointer whenever we use the address of a frame as the stack pointer.
+ EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedException(pThread,
+ (SIZE_T)0,
+ (SIZE_T)pCurrentFrame);
+ fDeliveredFirstChanceNotification = TRUE;
+ }
+ }
+ }
+
+ return fDeliveredFirstChanceNotification;
+}
+
+bool ExceptionTracker::IsFilterStartOffset(EE_ILEXCEPTION_CLAUSE* pEHClause, DWORD_PTR dwHandlerStartPC)
+{
+ EECodeInfo codeInfo((PCODE)dwHandlerStartPC);
+ _ASSERTE(codeInfo.IsValid());
+
+ return pEHClause->FilterOffset == codeInfo.GetRelOffset();
+}
+
+void ExceptionTracker::MakeCallbacksRelatedToHandler(
+ bool fBeforeCallingHandler,
+ Thread* pThread,
+ MethodDesc* pMD,
+ EE_ILEXCEPTION_CLAUSE* pEHClause,
+ DWORD_PTR dwHandlerStartPC,
+ StackFrame sf
+ )
+{
+ // Here we need to make an extra check for filter handlers because we could be calling the catch handler
+ // associated with a filter handler and yet the EH clause we have saved is for the filter handler.
+ BOOL fIsFilterHandler = IsFilterHandler(pEHClause) && ExceptionTracker::IsFilterStartOffset(pEHClause, dwHandlerStartPC);
+ BOOL fIsFaultOrFinallyHandler = IsFaultOrFinally(pEHClause);
+
+ if (fBeforeCallingHandler)
+ {
+ StackFrame sfToStore = sf;
+ if ((this->m_pPrevNestedInfo != NULL) &&
+ (this->m_pPrevNestedInfo->m_EnclosingClauseInfo == this->m_EnclosingClauseInfo))
+ {
+ // If this is a nested exception which has the same enclosing clause as the previous exception,
+ // we should just propagate the clause info from the previous exception.
+ sfToStore = this->m_pPrevNestedInfo->m_EHClauseInfo.GetStackFrameForEHClause();
+ }
+ m_EHClauseInfo.SetInfo(COR_PRF_CLAUSE_NONE, (UINT_PTR)dwHandlerStartPC, sfToStore);
+
+ if (pMD->IsILStub())
+ {
+ return;
+ }
+
+ if (fIsFilterHandler)
+ {
+ m_EHClauseInfo.SetEHClauseType(COR_PRF_CLAUSE_FILTER);
+ EEToDebuggerExceptionInterfaceWrapper::ExceptionFilter(pMD, (TADDR) dwHandlerStartPC, pEHClause->FilterOffset, (BYTE*)sf.SP);
+
+ EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFilterEnter(pMD);
+
+ COUNTER_ONLY(GetPerfCounters().m_Excep.cFiltersExecuted++);
+ }
+ else
+ {
+ EEToDebuggerExceptionInterfaceWrapper::ExceptionHandle(pMD, (TADDR) dwHandlerStartPC, pEHClause->HandlerStartPC, (BYTE*)sf.SP);
+
+ if (fIsFaultOrFinallyHandler)
+ {
+ m_EHClauseInfo.SetEHClauseType(COR_PRF_CLAUSE_FINALLY);
+ EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFinallyEnter(pMD);
+ COUNTER_ONLY(GetPerfCounters().m_Excep.cFinallysExecuted++);
+ }
+ else
+ {
+ m_EHClauseInfo.SetEHClauseType(COR_PRF_CLAUSE_CATCH);
+ EEToProfilerExceptionInterfaceWrapper::ExceptionCatcherEnter(pThread, pMD);
+
+ DACNotify::DoExceptionCatcherEnterNotification(pMD, pEHClause->HandlerStartPC);
+ }
+ }
+ }
+ else
+ {
+ if (pMD->IsILStub())
+ {
+ return;
+ }
+
+ if (fIsFilterHandler)
+ {
+ EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFilterLeave();
+ }
+ else
+ {
+ if (fIsFaultOrFinallyHandler)
+ {
+ EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFinallyLeave();
+ }
+ else
+ {
+ EEToProfilerExceptionInterfaceWrapper::ExceptionCatcherLeave();
+ }
+ }
+ m_EHClauseInfo.ResetInfo();
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// This function is called by DefaultCatchHandler() to intercept an exception and start an unwind.
+//
+// Arguments:
+// pCurrentEstablisherFrame - unused on WIN64
+// pExceptionRecord - EXCEPTION_RECORD of the exception being intercepted
+//
+// Return Value:
+// ExceptionContinueSearch if the exception cannot be intercepted
+//
+// Notes:
+// If the exception is intercepted, this function never returns.
+//
+
+EXCEPTION_DISPOSITION ClrDebuggerDoUnwindAndIntercept(X86_FIRST_ARG(EXCEPTION_REGISTRATION_RECORD* pCurrentEstablisherFrame)
+ EXCEPTION_RECORD* pExceptionRecord)
+{
+ if (!CheckThreadExceptionStateForInterception())
+ {
+ return ExceptionContinueSearch;
+ }
+
+ Thread* pThread = GetThread();
+ ThreadExceptionState* pExState = pThread->GetExceptionState();
+
+ UINT_PTR uInterceptStackFrame = 0;
+
+ pExState->GetDebuggerState()->GetDebuggerInterceptInfo(NULL, NULL,
+ (PBYTE*)&uInterceptStackFrame,
+ NULL, NULL);
+
+ ClrUnwindEx(pExceptionRecord, (UINT_PTR)pThread, INVALID_RESUME_ADDRESS, uInterceptStackFrame);
+
+ UNREACHABLE();
+}
+#endif // DEBUGGING_SUPPORTED
+
+#ifdef _DEBUG
+inline bool ExceptionTracker::IsValid()
+{
+ bool fRetVal = false;
+
+ EX_TRY
+ {
+ Thread* pThisThread = GetThread();
+ if (m_pThread == pThisThread)
+ {
+ fRetVal = true;
+ }
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (!fRetVal)
+ {
+ EH_LOG((LL_ERROR, "ExceptionTracker::IsValid() failed! this = 0x%p\n", this));
+ }
+
+ return fRetVal;
+}
+BOOL ExceptionTracker::ThrowableIsValid()
+{
+ GCX_COOP();
+ CONSISTENCY_CHECK(IsValid());
+
+ BOOL isValid = FALSE;
+
+
+ isValid = (m_pThread->GetThrowable() != NULL);
+
+ return isValid;
+}
+//
+// static
+UINT_PTR ExceptionTracker::DebugComputeNestingLevel()
+{
+ UINT_PTR uNestingLevel = 0;
+ Thread* pThread = GetThread();
+
+ if (pThread)
+ {
+ ExceptionTracker* pTracker;
+ pTracker = pThread->GetExceptionState()->m_pCurrentTracker;
+
+ while (pTracker)
+ {
+ uNestingLevel++;
+ pTracker = pTracker->m_pPrevNestedInfo;
+ };
+ }
+
+ return uNestingLevel;
+}
+void DumpClauses(IJitManager* pJitMan, const METHODTOKEN& MethToken, UINT_PTR uMethodStartPC, UINT_PTR dwControlPc)
+{
+ EH_CLAUSE_ENUMERATOR EnumState;
+ unsigned EHCount;
+
+ EH_LOG((LL_INFO1000, " | uMethodStartPC: %p, ControlPc at offset %x\n", uMethodStartPC, dwControlPc - uMethodStartPC));
+
+ EHCount = pJitMan->InitializeEHEnumeration(MethToken, &EnumState);
+ for (unsigned i = 0; i < EHCount; i++)
+ {
+ EE_ILEXCEPTION_CLAUSE EHClause;
+ pJitMan->GetNextEHClause(&EnumState, &EHClause);
+
+ EH_LOG((LL_INFO1000, " | %s clause [%x, %x], handler: [%x, %x] %s",
+ (IsFault(&EHClause) ? "fault" :
+ (IsFinally(&EHClause) ? "finally" :
+ (IsFilterHandler(&EHClause) ? "filter" :
+ (IsTypedHandler(&EHClause) ? "typed" : "unknown")))),
+ EHClause.TryStartPC , // + uMethodStartPC,
+ EHClause.TryEndPC , // + uMethodStartPC,
+ EHClause.HandlerStartPC , // + uMethodStartPC,
+ EHClause.HandlerEndPC , // + uMethodStartPC
+ (IsDuplicateClause(&EHClause) ? "[duplicate]" : "")
+ ));
+
+ if (IsFilterHandler(&EHClause))
+ {
+ LOG((LF_EH, LL_INFO1000, " filter: [%x, ...]",
+ EHClause.FilterOffset));// + uMethodStartPC
+ }
+
+ LOG((LF_EH, LL_INFO1000, "\n"));
+ }
+
+}
+
+#define STACK_ALLOC_ARRAY(numElements, type) \
+ ((type *)_alloca((numElements)*(sizeof(type))))
+
+static void DoEHLog(
+ DWORD lvl,
+ __in_z char *fmt,
+ ...
+ )
+{
+ if (!LoggingOn(LF_EH, lvl))
+ return;
+
+ va_list args;
+ va_start(args, fmt);
+
+ UINT_PTR nestinglevel = ExceptionTracker::DebugComputeNestingLevel();
+ if (nestinglevel)
+ {
+ _ASSERTE(FitsIn<UINT_PTR>(2 * nestinglevel));
+ UINT_PTR cch = 2 * nestinglevel;
+ char* pPadding = STACK_ALLOC_ARRAY(cch + 1, char);
+ memset(pPadding, '.', cch);
+ pPadding[cch] = 0;
+
+ LOG((LF_EH, lvl, pPadding));
+ }
+
+ LogSpewValist(LF_EH, lvl, fmt, args);
+}
+#endif // _DEBUG
+
+void ClrUnwindEx(EXCEPTION_RECORD* pExceptionRecord, UINT_PTR ReturnValue, UINT_PTR TargetIP, UINT_PTR TargetFrameSp)
+{
+#ifndef FEATURE_PAL
+ PVOID TargetFrame = (PVOID)TargetFrameSp;
+
+ CONTEXT ctx;
+ RtlUnwindEx(TargetFrame,
+ (PVOID)TargetIP,
+ pExceptionRecord,
+ (PVOID)ReturnValue, // ReturnValue
+ &ctx,
+ NULL); // HistoryTable
+
+#else // !FEATURE_PAL
+ PORTABILITY_ASSERT("UNIXTODO: Implement unwinding for PAL");
+#endif // !FEATURE_PAL
+
+ // doesn't return
+ UNREACHABLE();
+}
+
+void TrackerAllocator::Init()
+{
+ void* pvFirstPage = (void*)new BYTE[TRACKER_ALLOCATOR_PAGE_SIZE];
+
+ ZeroMemory(pvFirstPage, TRACKER_ALLOCATOR_PAGE_SIZE);
+
+ m_pFirstPage = (Page*)pvFirstPage;
+
+ _ASSERTE(NULL == m_pFirstPage->m_header.m_pNext);
+ _ASSERTE(0 == m_pFirstPage->m_header.m_idxFirstFree);
+
+ m_pCrst = new Crst(CrstException, CRST_UNSAFE_ANYMODE);
+
+ EH_LOG((LL_INFO100, "TrackerAllocator::Init() succeeded..\n"));
+}
+
+void TrackerAllocator::Terminate()
+{
+ Page* pPage = m_pFirstPage;
+
+ while (pPage)
+ {
+ Page* pDeleteMe = pPage;
+ pPage = pPage->m_header.m_pNext;
+ delete [] pDeleteMe;
+ }
+ delete m_pCrst;
+}
+
+ExceptionTracker* TrackerAllocator::GetTrackerMemory()
+{
+ CONTRACT(ExceptionTracker*)
+ {
+ GC_TRIGGERS;
+ NOTHROW;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ _ASSERTE(NULL != m_pFirstPage);
+
+ Page* pPage = m_pFirstPage;
+
+ ExceptionTracker* pTracker = NULL;
+
+ for (int i = 0; i < TRACKER_ALLOCATOR_MAX_OOM_SPINS; i++)
+ {
+ { // open lock scope
+ CrstHolder ch(m_pCrst);
+
+ while (pPage)
+ {
+ int idx;
+ for (idx = 0; idx < NUM_TRACKERS_PER_PAGE; idx++)
+ {
+ pTracker = &(pPage->m_rgTrackers[idx]);
+ if (pTracker->m_pThread == NULL)
+ {
+ break;
+ }
+ }
+
+ if (idx < NUM_TRACKERS_PER_PAGE)
+ {
+ break;
+ }
+ else
+ {
+ if (NULL == pPage->m_header.m_pNext)
+ {
+ Page* pNewPage = (Page*) new (nothrow) BYTE[TRACKER_ALLOCATOR_PAGE_SIZE];
+
+ if (pNewPage)
+ {
+ STRESS_LOG0(LF_EH, LL_INFO10, "TrackerAllocator: allocated page\n");
+ pPage->m_header.m_pNext = pNewPage;
+ ZeroMemory(pPage->m_header.m_pNext, TRACKER_ALLOCATOR_PAGE_SIZE);
+ }
+ else
+ {
+ STRESS_LOG0(LF_EH, LL_WARNING, "TrackerAllocator: failed to allocate a page\n");
+ pTracker = NULL;
+ }
+ }
+
+ pPage = pPage->m_header.m_pNext;
+ }
+ }
+
+ if (pTracker)
+ {
+ Thread* pThread = GetThread();
+ _ASSERTE(NULL != pPage);
+ ZeroMemory(pTracker, sizeof(*pTracker));
+ pTracker->m_pThread = pThread;
+ EH_LOG((LL_INFO100, "TrackerAllocator: allocating tracker 0x%p, thread = 0x%p\n", pTracker, pTracker->m_pThread));
+ break;
+ }
+ } // end lock scope
+
+ //
+ // We could not allocate a new page of memory. This is a fatal error if it happens twice (nested)
+ // on the same thread because we have only one m_OOMTracker. We will spin hoping for another thread
+ // to give back to the pool or for the allocation to succeed.
+ //
+
+ ClrSleepEx(TRACKER_ALLOCATOR_OOM_SPIN_DELAY, FALSE);
+ STRESS_LOG1(LF_EH, LL_WARNING, "TrackerAllocator: retry #%d\n", i);
+ }
+
+ RETURN pTracker;
+}
+
+void TrackerAllocator::FreeTrackerMemory(ExceptionTracker* pTracker)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // mark this entry as free
+ EH_LOG((LL_INFO100, "TrackerAllocator: freeing tracker 0x%p, thread = 0x%p\n", pTracker, pTracker->m_pThread));
+ CONSISTENCY_CHECK(pTracker->IsValid());
+ FastInterlockExchangePointer(&(pTracker->m_pThread), NULL);
+}
+
+// This is Windows specific implementation as it is based upon the notion of collided unwind that is specific
+// to Windows 64bit.
+//
+// If pContext is not NULL, then this function copies pContext to pDispatcherContext->ContextRecord. If pContext
+// is NULL, then this function assumes that pDispatcherContext->ContextRecord has already been fixed up. In any
+// case, this function then starts to update the various fields in pDispatcherContext.
+//
+// In order to redirect the unwind, the OS requires us to provide a personality routine for the code at the
+// new context we are providing. If RtlVirtualUnwind can't determine the personality routine and using
+// the default managed code personality routine isn't appropriate (maybe you aren't returning to managed code)
+// specify pUnwindPersonalityRoutine. For instance the debugger uses this to unwind from ExceptionHijack back
+// to RaiseException in win32 and specifies an empty personality routine. For more details about this
+// see the comments in the code below.
+//
+// <AMD64-specific>
+// AMD64 is more "advanced", in that the DISPATCHER_CONTEXT contains a field for the TargetIp. So we don't have
+// to use the control PC in pDispatcherContext->ContextRecord to indicate the target IP for the unwind. However,
+// this also means that pDispatcherContext->ContextRecord is expected to be consistent.
+// </AMD64-specific>
+//
+// For more information, refer to vctools\crt\crtw32\misc\{ia64|amd64}\chandler.c for __C_specific_handler() and
+// nt\base\ntos\rtl\{ia64|amd64}\exdsptch.c for RtlUnwindEx().
+void FixupDispatcherContext(DISPATCHER_CONTEXT* pDispatcherContext, CONTEXT* pContext, LPVOID originalControlPC, PEXCEPTION_ROUTINE pUnwindPersonalityRoutine)
+{
+#ifndef FEATURE_PAL
+ if (pContext)
+ {
+ STRESS_LOG1(LF_EH, LL_INFO10, "FDC: pContext: %p\n", pContext);
+ CopyOSContext(pDispatcherContext->ContextRecord, pContext);
+ }
+
+ pDispatcherContext->ControlPc = (UINT_PTR) GetIP(pDispatcherContext->ContextRecord);
+
+#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+ // Since this routine is used to fixup contexts for async exceptions,
+ // clear the CONTEXT_UNWOUND_TO_CALL flag since, semantically, frames
+ // where such exceptions have happened do not have callsites. On a similar
+ // note, also clear out the ControlPcIsUnwound field. Post discussion with
+ // AaronGi from the kernel team, it's safe for us to have both of these
+ // cleared.
+ //
+ // The OS will pick this up with the rest of the DispatcherContext state
+ // when it processes collided unwind and thus, when our managed personality
+ // routine is invoked, ExceptionTracker::InitializeCrawlFrame will adjust
+ // ControlPC correctly.
+ pDispatcherContext->ContextRecord->ContextFlags &= ~CONTEXT_UNWOUND_TO_CALL;
+ pDispatcherContext->ControlPcIsUnwound = FALSE;
+
+ // Also, clear out the debug-registers flag so that when this context is used by the
+ // OS, it does not end up setting bogus access breakpoints. The kernel team will also
+ // be fixing it at their end, in their implementation of collided unwind.
+ pDispatcherContext->ContextRecord->ContextFlags &= ~CONTEXT_DEBUG_REGISTERS;
+
+#ifdef _TARGET_ARM_
+ // But keep the architecture flag set (its part of CONTEXT_DEBUG_REGISTERS)
+ pDispatcherContext->ContextRecord->ContextFlags |= CONTEXT_ARM;
+#else // _TARGET_ARM64_
+ // But keep the architecture flag set (its part of CONTEXT_DEBUG_REGISTERS)
+ pDispatcherContext->ContextRecord->ContextFlags |= CONTEXT_ARM64;
+#endif // _TARGET_ARM_
+
+#endif // _TARGET_ARM_ || _TARGET_ARM64_
+
+ INDEBUG(pDispatcherContext->FunctionEntry = (PRUNTIME_FUNCTION)INVALID_POINTER_CD);
+ INDEBUG(pDispatcherContext->ImageBase = INVALID_POINTER_CD);
+
+ pDispatcherContext->FunctionEntry = RtlLookupFunctionEntry(pDispatcherContext->ControlPc,
+ &(pDispatcherContext->ImageBase),
+ NULL
+ );
+
+ _ASSERTE(((PRUNTIME_FUNCTION)INVALID_POINTER_CD) != pDispatcherContext->FunctionEntry);
+ _ASSERTE(INVALID_POINTER_CD != pDispatcherContext->ImageBase);
+
+ //
+ // need to find the establisher frame by virtually unwinding
+ //
+ CONTEXT tempContext;
+ PVOID HandlerData;
+
+ CopyOSContext(&tempContext, pDispatcherContext->ContextRecord);
+
+ // RtlVirtualUnwind returns the language specific handler for the ControlPC in question
+ // on ARM and AMD64.
+ pDispatcherContext->LanguageHandler = RtlVirtualUnwind(
+ NULL, // HandlerType
+ pDispatcherContext->ImageBase,
+ pDispatcherContext->ControlPc,
+ pDispatcherContext->FunctionEntry,
+ &tempContext,
+ &HandlerData,
+ &(pDispatcherContext->EstablisherFrame),
+ NULL);
+
+ pDispatcherContext->HandlerData = NULL;
+ pDispatcherContext->HistoryTable = NULL;
+
+
+ // Why does the OS consider it invalid to have a NULL personality routine (or, why does
+ // the OS assume that DispatcherContext returned from ExceptionCollidedUnwind will always
+ // have a valid personality routine)?
+ //
+ //
+ // We force the OS to pickup the DispatcherContext (that we fixed above) by returning
+ // ExceptionCollidedUnwind. Per Dave Cutler, the only entity which is allowed to return
+ // this exception disposition is the personality routine of the assembly helper which is used
+ // to invoke the user (stack-based) personality routines. For such invocations made by the
+ // OS assembly helper, the DispatcherContext it saves before invoking the user personality routine
+ // will always have a valid personality routine reference and thus, when a real collided unwind happens
+ // and this exception disposition is returned, OS exception dispatch will have a valid personality routine
+ // to invoke.
+ //
+ // By using this exception disposition to make the OS walk stacks we broke (for async exceptions), we are
+ // simply abusing the semantic of this disposition. However, since we must use it, we should also check
+ // that we are returning a valid personality routine reference back to the OS.
+ if(pDispatcherContext->LanguageHandler == NULL)
+ {
+ if (pUnwindPersonalityRoutine != NULL)
+ {
+ pDispatcherContext->LanguageHandler = pUnwindPersonalityRoutine;
+ }
+ else
+ {
+ // We would be here only for fixing up context for an async exception in managed code.
+ // This implies that we should have got a personality routine returned from the call to
+ // RtlVirtualUnwind above.
+ //
+ // However, if the ControlPC happened to be in the prolog or epilog of a managed method,
+ // then RtlVirtualUnwind will always return NULL. We cannot return this NULL back to the
+ // OS as it is an invalid value which the OS does not expect (and attempting to do so will
+ // result in the kernel exception dispatch going haywire).
+#if defined(_DEBUG)
+ // We should be in jitted code
+ TADDR adrRedirectedIP = PCODEToPINSTR(pDispatcherContext->ControlPc);
+ _ASSERTE(ExecutionManager::IsManagedCode(adrRedirectedIP));
+#endif // _DEBUG
+
+ // Set the personality routine to be returned as the one which is conventionally
+ // invoked for exception dispatch.
+ pDispatcherContext->LanguageHandler = (PEXCEPTION_ROUTINE)GetEEFuncEntryPoint(ProcessCLRException);
+ STRESS_LOG1(LF_EH, LL_INFO10, "FDC: ControlPC was in prolog/epilog, so setting DC->LanguageHandler to %p\n", pDispatcherContext->LanguageHandler);
+ }
+ }
+
+ _ASSERTE(pDispatcherContext->LanguageHandler != NULL);
+#else // !FEATURE_PAL
+ PORTABILITY_ASSERT("UNIXTODO: Implement the fixup for PAL");
+#endif // !FEATURE_PAL
+}
+
+
+// See the comment above for the overloaded version of this function.
+FORCEINLINE void FixupDispatcherContext(DISPATCHER_CONTEXT* pDispatcherContext, CONTEXT* pContext, CONTEXT* pOriginalContext, PEXCEPTION_ROUTINE pUnwindPersonalityRoutine = NULL)
+{
+ _ASSERTE(pOriginalContext != NULL);
+ FixupDispatcherContext(pDispatcherContext, pContext, (LPVOID)::GetIP(pOriginalContext), pUnwindPersonalityRoutine);
+}
+
+
+BOOL FirstCallToHandler (
+ DISPATCHER_CONTEXT *pDispatcherContext,
+ CONTEXT **ppContextRecord)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ FaultingExceptionFrame *pFrame = GetFrameFromRedirectedStubStackFrame(pDispatcherContext);
+
+ BOOL *pfFilterExecuted = pFrame->GetFilterExecutedFlag();
+ BOOL fFilterExecuted = *pfFilterExecuted;
+
+ STRESS_LOG4(LF_EH, LL_INFO10, "FirstCallToHandler: Fixing exception context for redirect stub, sp %p, establisher %p, flag %p -> %u\n",
+ GetSP(pDispatcherContext->ContextRecord),
+ pDispatcherContext->EstablisherFrame,
+ pfFilterExecuted,
+ fFilterExecuted);
+
+ *ppContextRecord = pFrame->GetExceptionContext();
+ *pfFilterExecuted = TRUE;
+
+ return !fFilterExecuted;
+}
+
+
+EXTERN_C EXCEPTION_DISPOSITION
+HijackHandler(IN PEXCEPTION_RECORD pExceptionRecord
+ WIN64_ARG(IN ULONG64 MemoryStackFp)
+NOT_WIN64_ARG(IN ULONG MemoryStackFp),
+ IN OUT PCONTEXT pContextRecord,
+ IN OUT PDISPATCHER_CONTEXT pDispatcherContext
+ )
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ STRESS_LOG4(LF_EH, LL_INFO10, "HijackHandler: establisher: %p, disp->cxr: %p, sp %p, cxr @ exception: %p\n",
+ pDispatcherContext->EstablisherFrame,
+ pDispatcherContext->ContextRecord,
+ GetSP(pDispatcherContext->ContextRecord),
+ pContextRecord);
+
+ Thread* pThread = GetThread();
+ CONTEXT *pNewContext = NULL;
+
+ VALIDATE_BACKOUT_STACK_CONSUMPTION;
+
+ if (FirstCallToHandler(pDispatcherContext, &pNewContext))
+ {
+ //
+ // We've pushed a Frame, but it is not initialized yet, so we
+ // must not be in preemptive mode
+ //
+ CONSISTENCY_CHECK(pThread->PreemptiveGCDisabled());
+
+ //
+ // AdjustContextForThreadStop will reset the ThrowControlForThread state
+ // on the thread, but we don't want to do that just yet. We need that
+ // information in our personality routine, so we will reset it back to
+ // InducedThreadStop and then clear it in our personality routine.
+ //
+ CONSISTENCY_CHECK(IsThreadHijackedForThreadStop(pThread, pExceptionRecord));
+ AdjustContextForThreadStop(pThread, pNewContext);
+ pThread->SetThrowControlForThread(Thread::InducedThreadStop);
+ }
+
+ FixupDispatcherContext(pDispatcherContext, pNewContext, pContextRecord);
+
+ STRESS_LOG4(LF_EH, LL_INFO10, "HijackHandler: new establisher: %p, disp->cxr: %p, new ip: %p, new sp: %p\n",
+ pDispatcherContext->EstablisherFrame,
+ pDispatcherContext->ContextRecord,
+ GetIP(pDispatcherContext->ContextRecord),
+ GetSP(pDispatcherContext->ContextRecord));
+
+ // Returning ExceptionCollidedUnwind will cause the OS to take our new context record
+ // and dispatcher context and restart the exception dispatching on this call frame,
+ // which is exactly the behavior we want in order to restore our thread's unwindability
+ // (which was broken when we whacked the IP to get control over the thread)
+ return ExceptionCollidedUnwind;
+}
+
+
+EXTERN_C VOID FixContextForFaultingExceptionFrame (
+ EXCEPTION_RECORD* pExceptionRecord,
+ CONTEXT *pContextRecord);
+
+EXTERN_C EXCEPTION_DISPOSITION
+FixContextHandler(IN PEXCEPTION_RECORD pExceptionRecord
+ WIN64_ARG(IN ULONG64 MemoryStackFp)
+ NOT_WIN64_ARG(IN ULONG MemoryStackFp),
+ IN OUT PCONTEXT pContextRecord,
+ IN OUT PDISPATCHER_CONTEXT pDispatcherContext
+ )
+{
+ CONTEXT* pNewContext = NULL;
+
+ VALIDATE_BACKOUT_STACK_CONSUMPTION;
+
+ // Our backout validation should ensure that we don't SO here.
+ BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
+
+ if (FirstCallToHandler(pDispatcherContext, &pNewContext))
+ {
+ //
+ // We've pushed a Frame, but it is not initialized yet, so we
+ // must not be in preemptive mode
+ //
+ CONSISTENCY_CHECK(GetThread()->PreemptiveGCDisabled());
+
+ FixContextForFaultingExceptionFrame(pExceptionRecord, pNewContext);
+ }
+
+ FixupDispatcherContext(pDispatcherContext, pNewContext, pContextRecord);
+
+ END_CONTRACT_VIOLATION;
+
+ // Returning ExceptionCollidedUnwind will cause the OS to take our new context record
+ // and dispatcher context and restart the exception dispatching on this call frame,
+ // which is exactly the behavior we want in order to restore our thread's unwindability
+ // (which was broken when we whacked the IP to get control over the thread)
+ return ExceptionCollidedUnwind;
+}
+
+#ifdef _DEBUG
+// IsSafeToUnwindFrameChain:
+// Arguments:
+// pThread the Thread* being unwound
+// MemoryStackFpForFrameChain the stack limit to unwind the Frames
+// Returns
+// FALSE if the value MemoryStackFpForFrameChain falls between a M2U transition frame
+// and its corresponding managed method stack pointer
+// TRUE otherwise.
+//
+// If the managed method will *NOT* be unwound by the current exception
+// pass we have an error: with no Frame on the stack to report it, the
+// managed method will not be included in the next stack walk.
+// An example of running into this issue was DDBug 1133, where
+// TransparentProxyStubIA64 had a personality routine that removed a
+// transition frame. As a consequence the managed method did not
+// participate in the stack walk until the exception handler was called. At
+// that time the stack walking code was able to see the managed method again
+// but by this time all references from this managed method were stale.
+BOOL IsSafeToUnwindFrameChain(Thread* pThread, LPVOID MemoryStackFpForFrameChain)
+{
+ // Look for the last Frame to be removed that marks a managed-to-unmanaged transition
+ Frame* pLastFrameOfInterest = FRAME_TOP;
+ for (Frame* pf = pThread->m_pFrame; pf < MemoryStackFpForFrameChain; pf = pf->PtrNextFrame())
+ {
+ PCODE retAddr = pf->GetReturnAddress();
+ if (retAddr != NULL && ExecutionManager::IsManagedCode(retAddr))
+ {
+ pLastFrameOfInterest = pf;
+ }
+ }
+
+ // If there is none it's safe to remove all these Frames
+ if (pLastFrameOfInterest == FRAME_TOP)
+ {
+ return TRUE;
+ }
+
+ // Otherwise "unwind" to managed method
+ REGDISPLAY rd;
+ CONTEXT ctx;
+ SetIP(&ctx, 0);
+ SetSP(&ctx, 0);
+ FillRegDisplay(&rd, &ctx);
+ pLastFrameOfInterest->UpdateRegDisplay(&rd);
+
+ // We're safe only if the managed method will be unwound also
+ LPVOID managedSP = dac_cast<PTR_VOID>(GetRegdisplaySP(&rd));
+
+ if (managedSP < MemoryStackFpForFrameChain)
+ {
+ return TRUE;
+ }
+ else
+ {
+ return FALSE;
+ }
+
+}
+#endif // _DEBUG
+
+
+void CleanUpForSecondPass(Thread* pThread, bool fIsSO, LPVOID MemoryStackFpForFrameChain, LPVOID MemoryStackFp)
+{
+ WRAPPER_NO_CONTRACT;
+
+ EH_LOG((LL_INFO100, "Exception is going into unmanaged code, unwinding frame chain to %p\n", MemoryStackFpForFrameChain));
+
+ // On AMD64 the establisher pointer is the live stack pointer, but on
+ // IA64 and ARM it's the caller's stack pointer. It makes no difference, since there
+ // is no Frame anywhere in CallDescrWorker's region of stack.
+
+ // First make sure that unwinding the frame chain does not remove any transition frames
+ // that report managed methods that will not be unwound.
+ // If this assert fires it's probably the personality routine of some assembly code that
+ // incorrectly removed a transition frame (more details in IsSafeToUnwindFrameChain)
+ // [Do not perform the IsSafeToUnwindFrameChain() check in the SO case, since
+ // IsSafeToUnwindFrameChain() requires a large amount of stack space.]
+ _ASSERTE(fIsSO || IsSafeToUnwindFrameChain(pThread, (Frame*)MemoryStackFpForFrameChain));
+
+ UnwindFrameChain(pThread, (Frame*)MemoryStackFpForFrameChain);
+
+ // Only pop the trackers if this is not an SO. It's not safe to pop the trackers during EH for an SO.
+ // Instead, we rely on the END_SO_TOLERANT_CODE macro to call ClearExceptionStateAfterSO(). Of course,
+ // we may leak in the UMThunkStubCommon() case where we don't have this macro lower on the stack
+ // (stack grows up).
+ if (!fIsSO)
+ {
+ ExceptionTracker::PopTrackerIfEscaping((void*)MemoryStackFp);
+ }
+}
+
+EXTERN_C EXCEPTION_DISPOSITION
+UMThunkUnwindFrameChainHandler(IN PEXCEPTION_RECORD pExceptionRecord
+ WIN64_ARG(IN ULONG64 MemoryStackFp)
+ NOT_WIN64_ARG(IN ULONG MemoryStackFp),
+ IN OUT PCONTEXT pContextRecord,
+ IN OUT PDISPATCHER_CONTEXT pDispatcherContext
+ )
+{
+ Thread* pThread = GetThread();
+ if (pThread == NULL) {
+ return ExceptionContinueSearch;
+ }
+
+ bool fIsSO =
+ IsSOExceptionCode(pExceptionRecord->ExceptionCode);
+
+ VALIDATE_BACKOUT_STACK_CONSUMPTION;
+
+ if (IS_UNWINDING(pExceptionRecord->ExceptionFlags))
+ {
+ if (fIsSO)
+ {
+ if (!pThread->PreemptiveGCDisabled())
+ {
+ pThread->DisablePreemptiveGC();
+ }
+ }
+ // The VALIDATE_BACKOUT_STACK_CONSUMPTION makes sure that this function does not use stack more than backout limit.
+ CONTRACT_VIOLATION(SOToleranceViolation);
+ CleanUpForSecondPass(pThread, fIsSO, (void*)MemoryStackFp, (void*)MemoryStackFp);
+ }
+
+ // The asm stub put us into COOP mode, but we're about to scan unmanaged call frames
+ // so unmanaged filters/handlers/etc can run and we must be in PREEMP mode for that.
+ if (pThread->PreemptiveGCDisabled())
+ {
+ if (fIsSO)
+ {
+ // We don't have stack to do full-version EnablePreemptiveGC.
+ FastInterlockAnd (&pThread->m_fPreemptiveGCDisabled, 0);
+ }
+ else
+ {
+ pThread->EnablePreemptiveGC();
+ }
+ }
+
+ return ExceptionContinueSearch;
+}
+
+EXTERN_C EXCEPTION_DISPOSITION
+UMEntryPrestubUnwindFrameChainHandler(
+ IN PEXCEPTION_RECORD pExceptionRecord
+ WIN64_ARG(IN ULONG64 MemoryStackFp)
+ NOT_WIN64_ARG(IN ULONG MemoryStackFp),
+ IN OUT PCONTEXT pContextRecord,
+ IN OUT PDISPATCHER_CONTEXT pDispatcherContext
+ )
+{
+ EXCEPTION_DISPOSITION disposition = UMThunkUnwindFrameChainHandler(
+ pExceptionRecord,
+ MemoryStackFp,
+ pContextRecord,
+ pDispatcherContext
+ );
+
+ return disposition;
+}
+
+EXTERN_C EXCEPTION_DISPOSITION
+UMThunkStubUnwindFrameChainHandler(
+ IN PEXCEPTION_RECORD pExceptionRecord
+ WIN64_ARG(IN ULONG64 MemoryStackFp)
+NOT_WIN64_ARG(IN ULONG MemoryStackFp),
+ IN OUT PCONTEXT pContextRecord,
+ IN OUT PDISPATCHER_CONTEXT pDispatcherContext
+ )
+{
+
+#ifdef _DEBUG
+ // If the exception is escaping the last CLR personality routine on the stack,
+ // then state a flag on the thread to indicate so.
+ //
+ // We check for thread object since this function is the personality routine of the UMThunk
+ // and we can landup here even when thread creation (within the thunk) fails.
+ if (GetThread() != NULL)
+ {
+ SetReversePInvokeEscapingUnhandledExceptionStatus(IS_UNWINDING(pExceptionRecord->ExceptionFlags),
+ MemoryStackFp
+ );
+ }
+#endif // _DEBUG
+
+ // We need to ReverseLeaveRuntime if we are unwinding (since there is no
+ // frame to do this for us...
+ if (IS_UNWINDING(pExceptionRecord->ExceptionFlags))
+ {
+ BYTE bFlag;
+
+#ifdef _TARGET_AMD64_
+ bFlag = *(BYTE*)(pDispatcherContext->ContextRecord->Rbp + UMTHUNKSTUB_HOST_NOTIFY_FLAG_RBPOFFSET);
+#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+ // On ARM, we do not need to do anything here. If required, ReverseEnterRuntime should happen
+ // in the VM in UMThunkStubWorker via a holder so that during an exceptional case, we will
+ // automatically perform the ReverseLeaveRuntime.
+ bFlag = 0;
+#else
+ bFlag = 0;
+ PORTABILITY_ASSERT("NYI -- UMThunkStubUnwindFrameChainHandler notify host of ReverseLeaveRuntime");
+#endif // _TARGET_AMD64_
+
+ if (0 != bFlag)
+ {
+ GetThread()->ReverseLeaveRuntime();
+ }
+ }
+
+ EXCEPTION_DISPOSITION disposition = UMThunkUnwindFrameChainHandler(
+ pExceptionRecord,
+ MemoryStackFp,
+ pContextRecord,
+ pDispatcherContext
+ );
+
+ return disposition;
+}
+
+
+// This is the personality routine setup for the assembly helper (CallDescrWorker) that calls into
+// managed code.
+EXTERN_C EXCEPTION_DISPOSITION
+CallDescrWorkerUnwindFrameChainHandler(IN PEXCEPTION_RECORD pExceptionRecord
+ WIN64_ARG(IN ULONG64 MemoryStackFp)
+ NOT_WIN64_ARG(IN ULONG MemoryStackFp),
+ IN OUT PCONTEXT pContextRecord,
+ IN OUT PDISPATCHER_CONTEXT pDispatcherContext
+ )
+{
+
+ Thread* pThread = GetThread();
+ _ASSERTE(pThread);
+
+ if (IsSOExceptionCode(pExceptionRecord->ExceptionCode))
+ {
+ if (IS_UNWINDING(pExceptionRecord->ExceptionFlags))
+ {
+ GCX_COOP_NO_DTOR();
+ CleanUpForSecondPass(pThread, true, (void*)MemoryStackFp, (void*)MemoryStackFp);
+ }
+
+ FastInterlockAnd (&pThread->m_fPreemptiveGCDisabled, 0);
+ // We'll let the SO infrastructure handle this exception... at that point, we
+ // know that we'll have enough stack to do it.
+ return ExceptionContinueSearch;
+ }
+
+ EXCEPTION_DISPOSITION retVal = ProcessCLRException(pExceptionRecord,
+ MemoryStackFp,
+ pContextRecord,
+ pDispatcherContext);
+
+ // Our backout validation should ensure that we don't SO here. Add a
+ // backout validation here.
+ BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
+
+ if (retVal == ExceptionContinueSearch)
+ {
+
+ if (IS_UNWINDING(pExceptionRecord->ExceptionFlags))
+ {
+ CleanUpForSecondPass(pThread, false, (void*)MemoryStackFp, (void*)MemoryStackFp);
+ }
+
+ // We're scanning out from CallDescr and potentially through the EE and out to unmanaged.
+ // So switch to preemptive mode.
+ GCX_PREEMP_NO_DTOR();
+ }
+
+ END_CONTRACT_VIOLATION;
+
+ return retVal;
+}
+
+#ifdef FEATURE_COMINTEROP
+EXTERN_C EXCEPTION_DISPOSITION
+ReverseComUnwindFrameChainHandler(IN PEXCEPTION_RECORD pExceptionRecord
+ WIN64_ARG(IN ULONG64 MemoryStackFp)
+ NOT_WIN64_ARG(IN ULONG MemoryStackFp),
+ IN OUT PCONTEXT pContextRecord,
+ IN OUT PDISPATCHER_CONTEXT pDispatcherContext
+ )
+{
+ if (IS_UNWINDING(pExceptionRecord->ExceptionFlags))
+ {
+ ComMethodFrame::DoSecondPassHandlerCleanup(GetThread()->GetFrame());
+ }
+ return ExceptionContinueSearch;
+}
+#endif // FEATURE_COMINTEROP
+
+EXTERN_C EXCEPTION_DISPOSITION
+FixRedirectContextHandler(
+ IN PEXCEPTION_RECORD pExceptionRecord
+ WIN64_ARG(IN ULONG64 MemoryStackFp)
+ NOT_WIN64_ARG(IN ULONG MemoryStackFp),
+ IN OUT PCONTEXT pContextRecord,
+ IN OUT PDISPATCHER_CONTEXT pDispatcherContext
+ )
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ STRESS_LOG4(LF_EH, LL_INFO10, "FixRedirectContextHandler: sp %p, establisher %p, cxr: %p, disp cxr: %p\n",
+ GetSP(pDispatcherContext->ContextRecord),
+ pDispatcherContext->EstablisherFrame,
+ pContextRecord,
+ pDispatcherContext->ContextRecord);
+
+ VALIDATE_BACKOUT_STACK_CONSUMPTION;
+
+ CONTEXT *pRedirectedContext = GetCONTEXTFromRedirectedStubStackFrame(pDispatcherContext);
+
+ FixupDispatcherContext(pDispatcherContext, pRedirectedContext, pContextRecord);
+
+ // Returning ExceptionCollidedUnwind will cause the OS to take our new context record
+ // and dispatcher context and restart the exception dispatching on this call frame,
+ // which is exactly the behavior we want in order to restore our thread's unwindability
+ // (which was broken when we whacked the IP to get control over the thread)
+ return ExceptionCollidedUnwind;
+}
+
+#endif // DACCESS_COMPILE
+
+void ExceptionTracker::StackRange::Reset()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_sfLowBound.SetMaxVal();
+ m_sfHighBound.Clear();
+}
+
+bool ExceptionTracker::StackRange::IsEmpty()
+{
+ LIMITED_METHOD_CONTRACT;
+ return (m_sfLowBound.IsMaxVal() &&
+ m_sfHighBound.IsNull());
+}
+
+bool ExceptionTracker::StackRange::IsSupersededBy(StackFrame sf)
+{
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(IsConsistent());
+
+ return (sf >= m_sfLowBound);
+}
+
+void ExceptionTracker::StackRange::CombineWith(StackFrame sfCurrent, StackRange* pPreviousRange)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if ((pPreviousRange->m_sfHighBound < sfCurrent) && IsEmpty())
+ {
+ // This case comes from an unusual situation. It is possible for a new nested tracker to start its
+ // first pass at a higher SP than any previously scanned frame in the previous "enclosing" tracker.
+ // Typically this doesn't happen because the ProcessCLRException callback is made multiple times for
+ // the frame where the nesting first occurs and that will ensure that the stack range of the new
+ // nested exception is extended to contain the scan range of the previous tracker's scan. However,
+ // if the exception dispatch calls a C++ handler (e.g. a finally) and then that handler tries to
+ // reverse-pinvoke into the runtime, AND we trigger an exception (e.g. ThreadAboard,
+ // AppDomainUnloaded) before we reach another managed frame (which would have the CLR personality
+ // routine associated with it), the first callback to ProcessCLRException for this new exception
+ // will occur on a frame that has never been seen before by the current tracker.
+ //
+ // So in this case, we'll see a sfCurrent that is larger than the previous tracker's high bound and
+ // we'll have an empty scan range for the current tracker. And we'll just need to pre-init the
+ // scanned stack range for the new tracker to the previous tracker's range. This maintains the
+ // invariant that the scanned range for nested trackers completely cover the scanned range of thier
+ // previous tracker once they "escape" the previous tracker.
+ STRESS_LOG3(LF_EH, LL_INFO100,
+ "Initializing current StackRange with previous tracker's StackRange. sfCurrent: %p, prev low: %p, prev high: %p\n",
+ sfCurrent.SP, pPreviousRange->m_sfLowBound.SP, pPreviousRange->m_sfHighBound.SP);
+
+ *this = *pPreviousRange;
+ }
+ else
+ {
+ m_sfHighBound = pPreviousRange->m_sfHighBound;
+ }
+}
+
+bool ExceptionTracker::StackRange::Contains(StackFrame sf)
+{
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(IsConsistent());
+
+ return ((m_sfLowBound <= sf) &&
+ (sf <= m_sfHighBound));
+}
+
+void ExceptionTracker::StackRange::ExtendUpperBound(StackFrame sf)
+{
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(IsConsistent());
+ CONSISTENCY_CHECK(sf > m_sfHighBound);
+
+ m_sfHighBound = sf;
+}
+
+void ExceptionTracker::StackRange::ExtendLowerBound(StackFrame sf)
+{
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(IsConsistent());
+ CONSISTENCY_CHECK(sf < m_sfLowBound);
+
+ m_sfLowBound = sf;
+}
+
+void ExceptionTracker::StackRange::TrimLowerBound(StackFrame sf)
+{
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(IsConsistent());
+ CONSISTENCY_CHECK(sf >= m_sfLowBound);
+
+ m_sfLowBound = sf;
+}
+
+StackFrame ExceptionTracker::StackRange::GetLowerBound()
+{
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(IsConsistent());
+
+ return m_sfLowBound;
+}
+
+StackFrame ExceptionTracker::StackRange::GetUpperBound()
+{
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(IsConsistent());
+
+ return m_sfHighBound;
+}
+
+#ifdef _DEBUG
+bool ExceptionTracker::StackRange::IsDisjointWithAndLowerThan(StackRange* pOtherRange)
+{
+ CONSISTENCY_CHECK(IsConsistent());
+ CONSISTENCY_CHECK(pOtherRange->IsConsistent());
+
+ return m_sfHighBound < pOtherRange->m_sfLowBound;
+}
+
+#endif // _DEBUG
+
+
+#ifdef _DEBUG
+bool ExceptionTracker::StackRange::IsConsistent()
+{
+ LIMITED_METHOD_CONTRACT;
+ if (m_sfLowBound.IsMaxVal() ||
+ m_sfHighBound.IsNull())
+ {
+ return true;
+ }
+
+ if (m_sfLowBound <= m_sfHighBound)
+ {
+ return true;
+ }
+
+ LOG((LF_EH, LL_ERROR, "sp: low: %p high: %p\n", m_sfLowBound.SP, m_sfHighBound.SP));
+
+ return false;
+}
+#endif // _DEBUG
+
+// Determine if the given StackFrame is in the stack region unwound by the specified ExceptionTracker.
+// This is used by the stackwalker to skip funclets. Refer to the calls to this method in StackWalkFramesEx()
+// for more information.
+//
+// Effectively, this will make the stackwalker skip all the frames until it reaches the frame
+// containing the funclet. Details of the skipping logic are described in the method implementation.
+//
+// static
+bool ExceptionTracker::IsInStackRegionUnwoundBySpecifiedException(CrawlFrame * pCF, PTR_ExceptionTracker pExceptionTracker)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(pCF != NULL);
+
+ // The tracker must be in the second pass, and its stack range must not be empty.
+ if ( (pExceptionTracker == NULL) ||
+ pExceptionTracker->IsInFirstPass() ||
+ pExceptionTracker->m_ScannedStackRange.IsEmpty())
+ {
+ return false;
+ }
+
+ CallerStackFrame csfToCheck;
+ if (pCF->IsFrameless())
+ {
+ csfToCheck = CallerStackFrame::FromRegDisplay(pCF->GetRegisterSet());
+ }
+ else
+ {
+ csfToCheck = CallerStackFrame((UINT_PTR)pCF->GetFrame());
+ }
+
+ StackFrame sfLowerBound = pExceptionTracker->m_ScannedStackRange.GetLowerBound();
+ StackFrame sfUpperBound = pExceptionTracker->m_ScannedStackRange.GetUpperBound();
+
+ //
+ // Let's take an example callstack that grows from left->right:
+ //
+ // M5 (50) -> M4 (40) -> M3 (30) -> M2 (20) -> M1 (10) ->throw
+ //
+ // These are all managed frames, where M1 throws and the exception is caught
+ // in M4. The numbers in the brackets is the value of stack pointer after
+ // the prolog is executed (or, incase of dynamic allocation, its SP after
+ // dynamic allocation) and will be the SP at the time the callee function
+ // is invoked.
+ //
+ // When the stackwalker is asked to skip funclets during the stackwalk,
+ // it will skip all the frames on the stack until it reaches the frame
+ // containing the funclet after it has identified the funclet from
+ // which the skipping of frames needs to commence.
+ //
+ // At such a point, the exception tracker's scanned stack range's
+ // lowerbound will correspond to the frame that had the exception
+ // and the upper bound will correspond to the frame that had the funclet.
+ // For scenarios like security stackwalk that maybe triggered out of a
+ // funclet (e.g. a catch block), skipping funclets and frames in this fashion
+ // is expected to lead us to the parent frame containing the funclet as it
+ // will contain an object of interest (e.g. security descriptor).
+ //
+ // The check below ensures that we skip the frames from the one that
+ // had exception to the one that is the callee of the method containing
+ // the funclet of interest. In the example above, this would mean skipping
+ // from M1 to M3.
+ //
+ // We use CallerSP of a given CrawlFrame to perform such a skip. On AMD64,
+ // the first frame where CallerSP will be greater than SP of the frame
+ // itself will be when we reach the lowest frame itself (i.e. M1). On a similar
+ // note, the only time when CallerSP of a given CrawlFrame will be equal to the
+ // upper bound is when we reach the callee of the frame containing the funclet.
+ // Thus, our check for the skip range is done by the following clause:
+ //
+ // if ((sfLowerBound < csfToCheck) && (csfToCheck <= sfUpperBound))
+ //
+ // On ARM & ARM64, while the lower and upper bounds are populated using the Establisher
+ // frame given by the OS during exception dispatch, they actually correspond to the
+ // SP of the caller of a given frame, instead of being the SP of the given frame.
+ // That is, on ARM, we will have lowerBound as 20 (corresponding to M1) and
+ // upperBound as 50 (corresponding to M4 which contains the catch funclet).
+ //
+ // Thus, to skip frames on ARM until we reach the frame containing funclet of
+ // interest, the skipping will done by the following clause:
+ //
+ // if ((sfLowerBound <= csfToCheck) && (csfToCheck < sfUpperBound))
+ //
+ // The first time when CallerSP of a given CrawlFrame will be the same as lowerBound
+ // is when we will reach the first frame to be skipped. Likewise, last frame whose
+ // CallerSP will be less than the upperBound will be the callee of the frame
+ // containing the funclet. When CallerSP is equal to the upperBound, we have reached
+ // the frame containing the funclet and DO NOT want to skip it. Hence, "<"
+ // in the 2nd part of the clause.
+
+ // Remember that sfLowerBound and sfUpperBound are in the "OS format".
+ // Refer to the comment for CallerStackFrame for more information.
+#if defined(_TARGET_AMD64_)
+ if ((sfLowerBound < csfToCheck) && (csfToCheck <= sfUpperBound))
+#else // _TARGET_ARM_ || _TARGET_ARM64_
+ if ((sfLowerBound <= csfToCheck) && (csfToCheck < sfUpperBound))
+#endif // _TARGET_AMD64_
+ {
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+}
+
+// Returns a bool indicating if the specified CrawlFrame has been unwound by the active exception.
+bool ExceptionTracker::IsInStackRegionUnwoundByCurrentException(CrawlFrame * pCF)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ Thread * pThread = pCF->pThread;
+ PTR_ExceptionTracker pCurrentTracker = pThread->GetExceptionState()->GetCurrentExceptionTracker();
+ return ExceptionTracker::IsInStackRegionUnwoundBySpecifiedException(pCF, pCurrentTracker);
+}
+
+// Returns a bool indicating if the specified CrawlFrame has been unwound by any active (e.g. nested) exceptions.
+//
+// This method uses various fields of the ExceptionTracker data structure to do its work. Since this code runs on the thread
+// performing the GC stackwalk, it must be ensured that these fields are not updated on another thread in parallel. Thus,
+// any access to the fields in question that may result in updating them should happen in COOP mode. This provides a high-level
+// synchronization with the GC thread since when GC stackwalk is active, attempt to enter COOP mode will result in the thread blocking
+// and thus, attempts to update such fields will be synchronized.
+//
+// Currently, the following fields are used below:
+//
+// m_ExceptionFlags, m_ScannedStackRange, m_sfCurrentEstablisherFrame, m_sfLastUnwoundEstablisherFrame,
+// m_pInitialExplicitFrame, m_pLimitFrame, m_pPrevNestedInfo.
+//
+bool ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException(CrawlFrame * pCF)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(pCF != NULL);
+
+ // Enumerate all (nested) exception trackers and see if any of them has unwound the
+ // specified CrawlFrame.
+ Thread * pTargetThread = pCF->pThread;
+ PTR_ExceptionTracker pTopTracker = pTargetThread->GetExceptionState()->GetCurrentExceptionTracker();
+ PTR_ExceptionTracker pCurrentTracker = pTopTracker;
+
+ bool fHasFrameBeenUnwound = false;
+
+ while (pCurrentTracker != NULL)
+ {
+ bool fSkipCurrentTracker = false;
+
+ // The tracker must be in the second pass, and its stack range must not be empty.
+ if (pCurrentTracker->IsInFirstPass() ||
+ pCurrentTracker->m_ScannedStackRange.IsEmpty())
+ {
+ fSkipCurrentTracker = true;
+ }
+
+ if (!fSkipCurrentTracker)
+ {
+ CallerStackFrame csfToCheck;
+ bool fFrameless = false;
+ if (pCF->IsFrameless())
+ {
+ csfToCheck = CallerStackFrame::FromRegDisplay(pCF->GetRegisterSet());
+ fFrameless = true;
+ }
+ else
+ {
+ csfToCheck = CallerStackFrame((UINT_PTR)pCF->GetFrame());
+ }
+
+ STRESS_LOG4(LF_EH|LF_GCROOTS, LL_INFO100, "CrawlFrame (%p): Frameless: %s %s: %p\n", pCF, fFrameless?"Yes":"No", fFrameless?"CallerSP":"Address", csfToCheck.SP);
+
+
+ StackFrame sfLowerBound = pCurrentTracker->m_ScannedStackRange.GetLowerBound();
+ StackFrame sfUpperBound = pCurrentTracker->m_ScannedStackRange.GetUpperBound();
+ StackFrame sfCurrentEstablisherFrame = pCurrentTracker->GetCurrentEstablisherFrame();
+ StackFrame sfLastUnwoundEstablisherFrame = pCurrentTracker->GetLastUnwoundEstablisherFrame();
+
+ STRESS_LOG4(LF_EH|LF_GCROOTS, LL_INFO100, "LowerBound/UpperBound/CurrentEstablisherFrame/LastUnwoundManagedFrame: %p/%p/%p/%p\n", sfLowerBound.SP, sfUpperBound.SP,
+ sfCurrentEstablisherFrame.SP, sfLastUnwoundEstablisherFrame.SP);
+
+ // Refer to the detailed comment in ExceptionTracker::IsInStackRegionUnwoundBySpecifiedException on the nature
+ // of this check.
+ //
+#if defined(_TARGET_AMD64_)
+ if ((sfLowerBound < csfToCheck) && (csfToCheck <= sfUpperBound))
+#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+ if ((sfLowerBound <= csfToCheck) && (csfToCheck < sfUpperBound))
+#else
+ PORTABILITY_ASSERT("ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException");
+#endif // _TARGET_AMD64_
+ {
+ fHasFrameBeenUnwound = true;
+ break;
+ }
+
+ //
+ // The frame in question was not found to be covered by the scanned stack range of the exception tracker.
+ // If the frame is managed, then its possible that it forms the upper bound of the scanned stack range.
+ //
+ // The scanned stack range is updated by our personality routine once ExceptionTracker::ProcessOSExceptionNotification is invoked.
+ // However, it is possible that we have unwound a frame and returned back to the OS (in preemptive mode) and:
+ //
+ // 1) Either our personality routine has been invoked for the subsequent upstack managed frame but it has not yet got a chance to update
+ // the scanned stack range, OR
+ // 2) We have simply returned to the kernel exception dispatch and yet to be invoked for a subsequent frame.
+ //
+ // In such a window, if we have been asked to check if the frame forming the upper bound of the scanned stack range has been unwound, or not,
+ // then do the needful validations.
+ //
+ // This is applicable to managed frames only.
+ if (fFrameless)
+ {
+#if defined(_TARGET_AMD64_)
+ // On X64, if the SP of the managed frame indicates that the frame is forming the upper bound,
+ // then:
+ //
+ // For case (1) above, sfCurrentEstablisherFrame will be the same as the callerSP of the managed frame.
+ // For case (2) above, sfLastUnwoundEstbalisherFrame would be the same as the managed frame's SP (or upper bound)
+ //
+ // For these scenarios, the frame is considered unwound.
+ if (GetRegdisplaySP(pCF->GetRegisterSet()) == sfUpperBound.SP)
+ {
+ if (csfToCheck == sfCurrentEstablisherFrame)
+ {
+ fHasFrameBeenUnwound = true;
+ break;
+ }
+ else if (sfUpperBound == sfLastUnwoundEstablisherFrame)
+ {
+ fHasFrameBeenUnwound = true;
+ break;
+ }
+ }
+#else // _TARGET_ARM_ || _TARGET_ARM64_
+ // On ARM, if the callerSP of the managed frame is the same as upper bound, then:
+ //
+ // For case (1), sfCurrentEstablisherFrame will be above the callerSP of the managed frame (since EstbalisherFrame is the caller SP for a given frame on ARM)
+ // For case (2), upper bound will be the same as LastUnwoundEstbalisherFrame.
+ //
+ // For these scenarios, the frame is considered unwound.
+ if (sfUpperBound == csfToCheck)
+ {
+ if (csfToCheck < sfCurrentEstablisherFrame)
+ {
+ fHasFrameBeenUnwound = true;
+ break;
+ }
+ else if (sfLastUnwoundEstablisherFrame == sfUpperBound)
+ {
+ fHasFrameBeenUnwound = true;
+ break;
+ }
+ }
+#endif // _TARGET_AMD64_
+ }
+
+ // The frame in question does not appear in the current tracker's scanned stack range (of managed frames).
+ // If the frame is an explicit frame, then check if it equal to (or greater) than the initial explicit frame
+ // of the tracker. We can do this equality comparison because explicit frames are stack allocated.
+ //
+ // Do keep in mind that InitialExplicitFrame is only set in the 2nd (unwind) pass, which works
+ // fine for the purpose of this method since it operates on exception trackers in the second pass only.
+ if (!fFrameless)
+ {
+ PTR_Frame pInitialExplicitFrame = pCurrentTracker->GetInitialExplicitFrame();
+ PTR_Frame pLimitFrame = pCurrentTracker->GetLimitFrame();
+
+#if !defined(DACCESS_COMPILE)
+ STRESS_LOG2(LF_EH|LF_GCROOTS, LL_INFO100, "InitialExplicitFrame: %p, LimitFrame: %p\n", pInitialExplicitFrame, pLimitFrame);
+#endif // !defined(DACCESS_COMPILE)
+
+ // Ideally, we would like to perform a comparison check to determine if the
+ // frame has been unwound. This, however, is based upon the premise that
+ // each explicit frame that is added to the frame chain is at a lower
+ // address than this predecessor.
+ //
+ // This works for frames across function calls but if we have multiple
+ // explicit frames in the same function, then the compiler is free to
+ // assign an address it deems fit. Thus, its totally possible for a
+ // frame at the head of the frame chain to be @ a higher address than
+ // its predecessor. This has been observed to be true with VC++ compiler
+ // in the CLR ret build.
+ //
+ // To address this, we loop starting from the InitialExplicitFrame until we reach
+ // the LimitFrame. Since all frames starting from the InitialExplicitFrame, and prior
+ // to the LimitFrame, have been unwound, we break out of the loop if we find
+ // the frame we are looking for, setting a flag indicating that the frame in question
+ // was unwound.
+
+ /*if ((sfInitialExplicitFrame <= csfToCheck) && (csfToCheck < sfLimitFrame))
+ {
+ // The explicit frame falls in the range of explicit frames unwound by this tracker.
+ fHasFrameBeenUnwound = true;
+ break;
+ }*/
+ PTR_Frame pFrameToCheck = (PTR_Frame)csfToCheck.SP;
+ PTR_Frame pCurrentFrame = pInitialExplicitFrame;
+
+ {
+ while((pCurrentFrame != FRAME_TOP) && (pCurrentFrame != pLimitFrame))
+ {
+ if (pCurrentFrame == pFrameToCheck)
+ {
+ fHasFrameBeenUnwound = true;
+ break;
+ }
+
+ pCurrentFrame = pCurrentFrame->PtrNextFrame();
+ }
+ }
+
+ if (fHasFrameBeenUnwound == true)
+ {
+ break;
+ }
+ }
+ }
+
+ // Move to the next (previous) tracker
+ pCurrentTracker = pCurrentTracker->GetPreviousExceptionTracker();
+ }
+
+ if (fHasFrameBeenUnwound)
+ STRESS_LOG0(LF_EH|LF_GCROOTS, LL_INFO100, "Has already been unwound\n");
+
+ return fHasFrameBeenUnwound;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Given the CrawlFrame of the current frame, return a StackFrame representing the current frame.
+// This StackFrame should only be used in a check to see if the current frame is the parent method frame
+// of a particular funclet. Don't use the returned StackFrame in any other way except to pass it back to
+// ExceptionTracker::IsUnwoundToTargetParentFrame(). The comparison logic is very platform-dependent.
+//
+// Arguments:
+// pCF - the CrawlFrame for the current frame
+//
+// Return Value:
+// Return a StackFrame for parent frame check
+//
+// Notes:
+// Don't use the returned StackFrame in any other way.
+//
+
+//static
+StackFrame ExceptionTracker::GetStackFrameForParentCheck(CrawlFrame * pCF)
+{
+ WRAPPER_NO_CONTRACT;
+
+ StackFrame sfResult;
+
+ // Returns the CrawlFrame's caller's SP - this is used to determine if we have
+ // reached the intended CrawlFrame in question (or not).
+
+ // sfParent is returned by the EH subsystem, which uses the OS format, i.e. the initial SP before
+ // any dynamic stack allocation. The stackwalker uses the current SP, i.e. the SP after all
+ // dynamic stack allocations. Thus, we cannot do an equality check. Instead, we get the
+ // CallerStackFrame, which is the caller SP.
+ sfResult = (StackFrame)CallerStackFrame::FromRegDisplay(pCF->GetRegisterSet());
+
+ return sfResult;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Given the StackFrame of a parent method frame, determine if we have unwound to it during stackwalking yet.
+// The StackFrame should be the return value of one of the FindParentStackFrameFor*() functions.
+// Refer to the comment for UnwindStackFrame for more information.
+//
+// Arguments:
+// pCF - the CrawlFrame of the current frame
+// sfParent - the StackFrame of the target parent method frame,
+// returned by one of the FindParentStackFrameFor*() functions
+//
+// Return Value:
+// whether we have unwound to the target parent method frame
+//
+
+// static
+bool ExceptionTracker::IsUnwoundToTargetParentFrame(CrawlFrame * pCF, StackFrame sfParent)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION( CheckPointer(pCF, NULL_NOT_OK) );
+ PRECONDITION( pCF->IsFrameless() );
+ PRECONDITION( pCF->GetRegisterSet()->IsCallerContextValid || pCF->GetRegisterSet()->IsCallerSPValid );
+ }
+ CONTRACTL_END;
+
+ StackFrame sfToCheck = GetStackFrameForParentCheck(pCF);
+ return IsUnwoundToTargetParentFrame(sfToCheck, sfParent);
+}
+
+// static
+bool ExceptionTracker::IsUnwoundToTargetParentFrame(StackFrame sfToCheck, StackFrame sfParent)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (sfParent == sfToCheck);
+}
+
+// Given the CrawlFrame for a funclet frame, return the frame pointer of the enclosing funclet frame.
+// For filter funclet frames and a normal method frames, this function returns a NULL StackFrame.
+//
+// <WARNING>
+// It is not valid to call this function on an arbitrary funclet. You have to be doing a full stackwalk from
+// the leaf frame and skipping method frames as indicated by the return value of this function. This function
+// relies on the ExceptionTrackers, which are collapsed in the second pass when a nested exception escapes.
+// When this happens, we'll lose information on the funclet represented by the collapsed tracker.
+// </WARNING>
+//
+// Return Value:
+// StackFrame.IsNull() - no skipping is necessary
+// StackFrame.IsMaxVal() - skip one frame and then ask again
+// Anything else - skip to the method frame indicated by the return value and ask again
+//
+// static
+StackFrame ExceptionTracker::FindParentStackFrameForStackWalk(CrawlFrame* pCF, bool fForGCReporting /*= false */)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // We should never skip filter funclets. However, if we are stackwalking for GC reference
+ // reporting, then we need to get the stackframe of the parent frame (where the filter was
+ // invoked from) so that when we reach it, we can indicate that the filter has already
+ // performed the reporting.
+ //
+ // Thus, for GC reporting purposes, get filter's parent frame.
+ if (pCF->IsFilterFunclet() && (!fForGCReporting))
+ {
+ return StackFrame();
+ }
+ else
+ {
+ return FindParentStackFrameHelper(pCF, NULL, NULL, NULL, fForGCReporting);
+ }
+}
+
+// Given the CrawlFrame for a filter funclet frame, return the frame pointer of the parent method frame.
+// It also returns the relative offset and the caller SP of the parent method frame.
+//
+// <WARNING>
+// The same warning for FindParentStackFrameForStackWalk() also applies here. Moreoever, although
+// this function seems to be more convenient, it may potentially trigger a full stackwalk! Do not
+// call this unless you know absolutely what you are doing. In most cases FindParentStackFrameForStackWalk()
+// is what you need.
+// </WARNING>
+//
+// Return Value:
+// StackFrame.IsNull() - no skipping is necessary
+// Anything else - the StackFrame of the parent method frame
+//
+// static
+StackFrame ExceptionTracker::FindParentStackFrameEx(CrawlFrame* pCF,
+ DWORD* pParentOffset,
+ UINT_PTR* pParentCallerSP)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION( pCF != NULL );
+ PRECONDITION( pCF->IsFilterFunclet() );
+ }
+ CONTRACTL_END;
+
+ bool fRealParent = false;
+ StackFrame sfResult = ExceptionTracker::FindParentStackFrameHelper(pCF, &fRealParent, pParentOffset, pParentCallerSP);
+
+ if (fRealParent)
+ {
+ // If the enclosing method is the parent method, then we are done.
+ return sfResult;
+ }
+ else
+ {
+ // Otherwise we need to do a full stackwalk to find the parent method frame.
+ // This should only happen if we are calling a filter inside a funclet.
+ return ExceptionTracker::RareFindParentStackFrame(pCF, pParentOffset, pParentCallerSP);
+ }
+}
+
+// static
+StackFrame ExceptionTracker::GetCallerSPOfParentOfNonExceptionallyInvokedFunclet(CrawlFrame *pCF)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(pCF != NULL);
+ PRECONDITION(pCF->IsFunclet() && (!pCF->IsFilterFunclet()));
+ }
+ CONTRACTL_END;
+
+ PREGDISPLAY pRD = pCF->GetRegisterSet();
+
+ // Ensure that the caller Context is valid.
+ _ASSERTE(pRD->IsCallerContextValid);
+
+ // Make a copy of the caller context
+ T_CONTEXT tempContext;
+ CopyOSContext(&tempContext, pRD->pCallerContext);
+
+ // Now unwind it to get the context of the caller's caller.
+ EECodeInfo codeInfo(dac_cast<PCODE>(GetIP(pRD->pCallerContext)));
+ Thread::VirtualUnwindCallFrame(&tempContext, NULL, &codeInfo);
+
+ StackFrame sfRetVal = StackFrame((UINT_PTR)(GetSP(&tempContext)));
+ _ASSERTE(!sfRetVal.IsNull() && !sfRetVal.IsMaxVal());
+
+ return sfRetVal;
+}
+
+// static
+StackFrame ExceptionTracker::FindParentStackFrameHelper(CrawlFrame* pCF,
+ bool* pfRealParent,
+ DWORD* pParentOffset,
+ UINT_PTR* pParentCallerSP,
+ bool fForGCReporting /* = false */)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION( pCF != NULL );
+ PRECONDITION( pCF->IsFunclet() );
+ PRECONDITION( CheckPointer(pfRealParent, NULL_OK) );
+ PRECONDITION( CheckPointer(pParentOffset, NULL_OK) );
+ PRECONDITION( CheckPointer(pParentCallerSP, NULL_OK) );
+ }
+ CONTRACTL_END;
+
+ StackFrame sfResult;
+ REGDISPLAY* pRegDisplay = pCF->GetRegisterSet();
+
+ // At this point, we need a valid caller SP and the CallerStackFrame::FromRegDisplay
+ // asserts that the RegDisplay contains one.
+ CallerStackFrame csfCurrent = CallerStackFrame::FromRegDisplay(pRegDisplay);
+ ExceptionTracker *pCurrentTracker = NULL;
+ bool fIsFilterFunclet = pCF->IsFilterFunclet();
+
+ // We can't do this on an unmanaged thread.
+ Thread* pThread = pCF->pThread;
+ if (pThread == NULL)
+ {
+ _ASSERTE(!"FindParentStackFrame() called on an unmanaged thread");
+ goto lExit;
+ }
+
+ // Check for out-of-line finally funclets. Filter funclets can't be out-of-line.
+ if (!fIsFilterFunclet)
+ {
+ if (pRegDisplay->IsCallerContextValid)
+ {
+ PCODE callerIP = dac_cast<PCODE>(GetIP(pRegDisplay->pCallerContext));
+ BOOL fIsCallerInVM = FALSE;
+
+ // Check if the caller IP is in mscorwks. If it is not, then it is an out-of-line finally.
+ // Normally, the caller of a finally is ExceptionTracker::CallHandler().
+#if defined(DACCESS_COMPILE)
+ HMODULE_TGT hEE = DacGlobalBase();
+#else // !DACCESS_COMPILE
+ HMODULE_TGT hEE = g_pMSCorEE;
+#endif // !DACCESS_COMPILE
+ fIsCallerInVM = IsIPInModule(hEE, callerIP);
+
+ if (!fIsCallerInVM)
+ {
+ if (!fForGCReporting)
+ {
+ sfResult.SetMaxVal();
+ goto lExit;
+ }
+ else
+ {
+ // We have run into a non-exceptionally invoked finally funclet (aka out-of-line finally funclet).
+ // Since these funclets are invoked from JITted code, we will not find their EnclosingClauseCallerSP
+ // in an exception tracker as one does not exist (remember, these funclets are invoked "non"-exceptionally).
+ //
+ // At this point, the caller context is that of the parent frame of the funclet. All we need is the CallerSP
+ // of that parent. We leverage a helper function that will perform an unwind against the caller context
+ // and return us the SP (of the caller of the funclet's parent).
+ StackFrame sfCallerSPOfFuncletParent = ExceptionTracker::GetCallerSPOfParentOfNonExceptionallyInvokedFunclet(pCF);
+ return sfCallerSPOfFuncletParent;
+ }
+ }
+ }
+ }
+
+ for (pCurrentTracker = pThread->GetExceptionState()->m_pCurrentTracker;
+ pCurrentTracker != NULL;
+ pCurrentTracker = pCurrentTracker->m_pPrevNestedInfo)
+ {
+ // Check if the tracker has just been created.
+ if (pCurrentTracker->m_ScannedStackRange.IsEmpty())
+ {
+ continue;
+ }
+
+ // Since the current frame is a non-filter funclet, determine if its caller is the same one
+ // as was saved against the exception tracker before the funclet was invoked in ExceptionTracker::CallHandler.
+ CallerStackFrame csfFunclet = pCurrentTracker->m_EHClauseInfo.GetCallerStackFrameForEHClause();
+ if (csfCurrent == csfFunclet)
+ {
+ // The EnclosingClauseCallerSP is initialized in ExceptionTracker::ProcessManagedCallFrame, just before
+ // invoking the funclets. Basically, we are using the SP of the caller of the frame containing the funclet
+ // to determine if we have reached the frame containing the funclet.
+ EnclosingClauseInfo srcEnclosingClause = (fForGCReporting)?pCurrentTracker->m_EnclosingClauseInfoForGCReporting:pCurrentTracker->m_EnclosingClauseInfo;
+ sfResult = (StackFrame)(CallerStackFrame(srcEnclosingClause.GetEnclosingClauseCallerSP()));
+
+ // Check whether the tracker has called any funclet yet.
+ if (sfResult.IsNull())
+ {
+ continue;
+ }
+
+ // Set the relevant information.
+ if (pfRealParent != NULL)
+ {
+ *pfRealParent = !srcEnclosingClause.EnclosingClauseIsFunclet();
+ }
+ if (pParentOffset != NULL)
+ {
+ *pParentOffset = srcEnclosingClause.GetEnclosingClauseOffset();
+ }
+ if (pParentCallerSP != NULL)
+ {
+ *pParentCallerSP = srcEnclosingClause.GetEnclosingClauseCallerSP();
+ }
+
+ break;
+ }
+ }
+
+lExit: ;
+
+ STRESS_LOG3(LF_EH|LF_GCROOTS, LL_INFO100, "Returning 0x%p as the parent stack frame for %s 0x%p\n",
+ sfResult.SP, fIsFilterFunclet ? "filter funclet" : "funclet", csfCurrent.SP);
+
+ return sfResult;
+}
+
+struct RareFindParentStackFrameCallbackState
+{
+ StackFrame m_sfTarget;
+ StackFrame m_sfParent;
+ bool m_fFoundTarget;
+ DWORD m_dwParentOffset;
+ UINT_PTR m_uParentCallerSP;
+};
+
+// This is the callback for the stackwalk to get the parent stack frame for a filter funclet.
+//
+// static
+StackWalkAction ExceptionTracker::RareFindParentStackFrameCallback(CrawlFrame* pCF, LPVOID pData)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ RareFindParentStackFrameCallbackState* pState = (RareFindParentStackFrameCallbackState*)pData;
+
+ // In all cases, we don't care about explicit frame.
+ if (!pCF->IsFrameless())
+ {
+ return SWA_CONTINUE;
+ }
+
+ REGDISPLAY* pRegDisplay = pCF->GetRegisterSet();
+ StackFrame sfCurrent = StackFrame::FromRegDisplay(pRegDisplay);
+
+ // Check if we have reached the target already.
+ if (!pState->m_fFoundTarget)
+ {
+ if (sfCurrent != pState->m_sfTarget)
+ {
+ return SWA_CONTINUE;
+ }
+
+ pState->m_fFoundTarget = true;
+ }
+
+ // We hae reached the target, now do the normal frames skipping.
+ if (!pState->m_sfParent.IsNull())
+ {
+ if (pState->m_sfParent.IsMaxVal() || IsUnwoundToTargetParentFrame(pCF, pState->m_sfParent))
+ {
+ // We have reached the specified method frame to skip to.
+ // Now clear the flag and ask again.
+ pState->m_sfParent.Clear();
+ }
+ }
+
+ if (pState->m_sfParent.IsNull() && pCF->IsFunclet())
+ {
+ pState->m_sfParent = ExceptionTracker::FindParentStackFrameHelper(pCF, NULL, NULL, NULL);
+ }
+
+ // If we still need to skip, then continue the stackwalk.
+ if (!pState->m_sfParent.IsNull())
+ {
+ return SWA_CONTINUE;
+ }
+
+ // At this point, we are done.
+ pState->m_sfParent = ExceptionTracker::GetStackFrameForParentCheck(pCF);
+ pState->m_dwParentOffset = pCF->GetRelOffset();
+
+ _ASSERTE(pRegDisplay->IsCallerContextValid);
+ pState->m_uParentCallerSP = GetSP(pRegDisplay->pCallerContext);
+
+ return SWA_ABORT;
+}
+
+// static
+StackFrame ExceptionTracker::RareFindParentStackFrame(CrawlFrame* pCF,
+ DWORD* pParentOffset,
+ UINT_PTR* pParentCallerSP)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION( pCF != NULL );
+ PRECONDITION( pCF->IsFunclet() );
+ PRECONDITION( CheckPointer(pParentOffset, NULL_OK) );
+ PRECONDITION( CheckPointer(pParentCallerSP, NULL_OK) );
+ }
+ CONTRACTL_END;
+
+ Thread* pThread = pCF->pThread;
+
+ RareFindParentStackFrameCallbackState state;
+ state.m_sfParent.Clear();
+ state.m_sfTarget = StackFrame::FromRegDisplay(pCF->GetRegisterSet());
+ state.m_fFoundTarget = false;
+
+ PTR_Frame pFrame = pCF->pFrame;
+ T_CONTEXT ctx;
+ REGDISPLAY rd;
+ CopyRegDisplay((const PREGDISPLAY)pCF->GetRegisterSet(), &rd, &ctx);
+
+ pThread->StackWalkFramesEx(&rd, &ExceptionTracker::RareFindParentStackFrameCallback, &state, 0, pFrame);
+
+ if (pParentOffset != NULL)
+ {
+ *pParentOffset = state.m_dwParentOffset;
+ }
+ if (pParentCallerSP != NULL)
+ {
+ *pParentCallerSP = state.m_uParentCallerSP;
+ }
+ return state.m_sfParent;
+}
+
+ExceptionTracker::StackRange::StackRange()
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifndef DACCESS_COMPILE
+ Reset();
+#endif // DACCESS_COMPILE
+}
+
+ExceptionTracker::EnclosingClauseInfo::EnclosingClauseInfo()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_fEnclosingClauseIsFunclet = false;
+ m_dwEnclosingClauseOffset = 0;
+ m_uEnclosingClauseCallerSP = 0;
+}
+
+ExceptionTracker::EnclosingClauseInfo::EnclosingClauseInfo(bool fEnclosingClauseIsFunclet,
+ DWORD dwEnclosingClauseOffset,
+ UINT_PTR uEnclosingClauseCallerSP)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_fEnclosingClauseIsFunclet = fEnclosingClauseIsFunclet;
+ m_dwEnclosingClauseOffset = dwEnclosingClauseOffset;
+ m_uEnclosingClauseCallerSP = uEnclosingClauseCallerSP;
+}
+
+bool ExceptionTracker::EnclosingClauseInfo::EnclosingClauseIsFunclet()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fEnclosingClauseIsFunclet;
+}
+
+DWORD ExceptionTracker::EnclosingClauseInfo::GetEnclosingClauseOffset()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_dwEnclosingClauseOffset;
+}
+
+UINT_PTR ExceptionTracker::EnclosingClauseInfo::GetEnclosingClauseCallerSP()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_uEnclosingClauseCallerSP;
+}
+
+void ExceptionTracker::EnclosingClauseInfo::SetEnclosingClauseCallerSP(UINT_PTR callerSP)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_uEnclosingClauseCallerSP = callerSP;
+}
+
+bool ExceptionTracker::EnclosingClauseInfo::operator==(const EnclosingClauseInfo & rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return ((this->m_fEnclosingClauseIsFunclet == rhs.m_fEnclosingClauseIsFunclet) &&
+ (this->m_dwEnclosingClauseOffset == rhs.m_dwEnclosingClauseOffset) &&
+ (this->m_uEnclosingClauseCallerSP == rhs.m_uEnclosingClauseCallerSP));
+}
+
+void ExceptionTracker::ReleaseResources()
+{
+#ifndef DACCESS_COMPILE
+ if (m_hThrowable)
+ {
+ if (!CLRException::IsPreallocatedExceptionHandle(m_hThrowable))
+ {
+ DestroyHandle(m_hThrowable);
+ }
+ m_hThrowable = NULL;
+ }
+ m_StackTraceInfo.FreeStackTrace();
+
+#ifndef FEATURE_PAL
+ // Clear any held Watson Bucketing details
+ GetWatsonBucketTracker()->ClearWatsonBucketDetails();
+#endif // !FEATURE_PAL
+#endif // DACCESS_COMPILE
+}
+
+void ExceptionTracker::SetEnclosingClauseInfo(bool fEnclosingClauseIsFunclet,
+ DWORD dwEnclosingClauseOffset,
+ UINT_PTR uEnclosingClauseCallerSP)
+{
+ // Preserve the details of the current frame for GC reporting before
+ // we apply the nested exception logic below.
+ this->m_EnclosingClauseInfoForGCReporting = EnclosingClauseInfo(fEnclosingClauseIsFunclet,
+ dwEnclosingClauseOffset,
+ uEnclosingClauseCallerSP);
+ if (this->m_pPrevNestedInfo != NULL)
+ {
+ PTR_ExceptionTracker pPrevTracker = this->m_pPrevNestedInfo;
+ CallerStackFrame csfPrevEHClause = pPrevTracker->m_EHClauseInfo.GetCallerStackFrameForEHClause();
+
+ // Just propagate the information if this is a nested exception.
+ if (csfPrevEHClause.SP == uEnclosingClauseCallerSP)
+ {
+ this->m_EnclosingClauseInfo = pPrevTracker->m_EnclosingClauseInfo;
+ return;
+ }
+ }
+
+ this->m_EnclosingClauseInfo = EnclosingClauseInfo(fEnclosingClauseIsFunclet,
+ dwEnclosingClauseOffset,
+ uEnclosingClauseCallerSP);
+}
+
+
+#ifdef DACCESS_COMPILE
+void ExceptionTracker::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ // ExInfo is embedded so don't enum 'this'.
+ OBJECTHANDLE_EnumMemoryRegions(m_hThrowable);
+ m_ptrs.ExceptionRecord.EnumMem();
+ m_ptrs.ContextRecord.EnumMem();
+}
+#endif // DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+// This is a thin wrapper around ResetThreadAbortState. Its primarily used to
+// instantiate CrawlFrame, when required, for walking the stack on IA64.
+//
+// The "when required" part are the set of conditions checked prior to the call to
+// this method in ExceptionTracker::ProcessOSExceptionNotification (and asserted in
+// ResetThreadabortState).
+//
+// Also, since CrawlFrame ctor is protected, it can only be instantiated by friend
+// types (which ExceptionTracker is).
+
+// static
+void ExceptionTracker::ResetThreadAbortStatus(PTR_Thread pThread, CrawlFrame *pCf, StackFrame sfCurrentStackFrame)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(pThread != NULL);
+ WIN64_ONLY(PRECONDITION(pCf != NULL);)
+ WIN64_ONLY(PRECONDITION(!sfCurrentStackFrame.IsNull());)
+ }
+ CONTRACTL_END;
+
+ if (pThread->IsAbortRequested())
+ {
+ ResetThreadAbortState(pThread, pCf, sfCurrentStackFrame);
+ }
+}
+#endif //!DACCESS_COMPILE
+
+#endif // _WIN64
+
diff --git a/src/vm/exceptionhandling.h b/src/vm/exceptionhandling.h
new file mode 100644
index 0000000000..639f47c5b4
--- /dev/null
+++ b/src/vm/exceptionhandling.h
@@ -0,0 +1,779 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+#ifndef __EXCEPTION_HANDLING_h__
+#define __EXCEPTION_HANDLING_h__
+
+#ifdef WIN64EXCEPTIONS
+
+// This address lies in the NULL pointer partition of the process memory.
+// Accessing it will result in AV.
+#define INVALID_RESUME_ADDRESS 0x000000000000bad0
+
+#include "exstatecommon.h"
+
+LONG WINAPI CLRVectoredExceptionHandlerShim(PEXCEPTION_POINTERS pExceptionInfo);
+
+EXTERN_C EXCEPTION_DISPOSITION
+ProcessCLRException(IN PEXCEPTION_RECORD pExceptionRecord
+ WIN64_ARG(IN ULONG64 MemoryStackFp)
+ NOT_WIN64_ARG(IN ULONG MemoryStackFp),
+ IN OUT PT_CONTEXT pContextRecord,
+ IN OUT PT_DISPATCHER_CONTEXT pDispatcherContext);
+
+
+void __declspec(noinline)
+ClrUnwindEx(EXCEPTION_RECORD* pExceptionRecord,
+ UINT_PTR ReturnValue,
+ UINT_PTR TargetIP,
+ UINT_PTR TargetFrameSp);
+
+typedef DWORD_PTR (HandlerFn)(UINT_PTR uStackFrame, Object* pExceptionObj);
+
+enum CLRUnwindStatus { UnwindPending, FirstPassComplete, SecondPassComplete };
+
+enum TrackerMemoryType
+{
+ memManaged = 0x0001,
+ memUnmanaged = 0x0002,
+ memBoth = 0x0003,
+};
+
+// Enum that specifies the type of EH funclet we are about to invoke
+enum EHFuncletType
+{
+ Filter = 0x0001,
+ FaultFinally = 0x0002,
+ Catch = 0x0004,
+};
+
+typedef DPTR(class ExceptionTracker) PTR_ExceptionTracker;
+class ExceptionTracker
+{
+ friend class TrackerAllocator;
+ friend class ThreadExceptionState;
+ friend class ClrDataExceptionState;
+#ifdef DACCESS_COMPILE
+ friend class ClrDataAccess;
+#endif // DACCESS_COMPILE
+
+ friend void FreeTrackerMemory(ExceptionTracker* pTracker, TrackerMemoryType mem);
+
+private:
+ class StackRange;
+public:
+
+ ExceptionTracker() :
+ m_pThread(NULL),
+ m_hThrowable(NULL),
+ m_hCallerToken(NULL),
+ m_hImpersonationToken(NULL)
+ {
+#ifndef DACCESS_COMPILE
+ m_StackTraceInfo.Init();
+#endif // DACCESS_COMPILE
+
+#ifndef FEATURE_PAL
+ // Init the WatsonBucketTracker
+ m_WatsonBucketTracker.Init();
+#endif // !FEATURE_PAL
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // Initialize the default exception severity to NotCorrupting
+ m_CorruptionSeverity = NotSet;
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+#ifdef FEATURE_EXCEPTION_NOTIFICATIONS
+ // By default, mark the tracker as not having delivered the first
+ // chance exception notification
+ m_fDeliveredFirstChanceNotification = FALSE;
+#endif // FEATURE_EXCEPTION_NOTIFICATIONS
+
+ m_sfFirstPassTopmostFrame.Clear();
+
+ m_dwIndexClauseForCatch = 0;
+ m_sfEstablisherOfActualHandlerFrame.Clear();
+ m_sfCallerOfActualHandlerFrame.Clear();
+
+ m_fFixupCallerSPForGCReporting = false;
+
+ m_fResetEnclosingClauseSPForCatchFunclet = FALSE;
+
+ m_sfCurrentEstablisherFrame.Clear();
+ m_sfLastUnwoundEstablisherFrame.Clear();
+ m_pInitialExplicitFrame = NULL;
+ m_pLimitFrame = NULL;
+ }
+
+ ExceptionTracker(DWORD_PTR dwExceptionPc,
+ PTR_EXCEPTION_RECORD pExceptionRecord,
+ PTR_CONTEXT pContextRecord) :
+ m_pPrevNestedInfo((ExceptionTracker*)NULL),
+ m_pThread(GetThread()),
+ m_hThrowable(NULL),
+ m_uCatchToCallPC(NULL),
+ m_pSkipToParentFunctionMD(NULL),
+// these members were added for resume frame processing
+ m_pClauseForCatchToken(NULL),
+// end resume frame members
+ m_ExceptionCode(pExceptionRecord->ExceptionCode),
+ m_hCallerToken(NULL),
+ m_hImpersonationToken(NULL)
+ {
+ m_ptrs.ExceptionRecord = pExceptionRecord;
+ m_ptrs.ContextRecord = pContextRecord;
+
+ m_pLimitFrame = NULL;
+
+ if (IsInstanceTaggedSEHCode(pExceptionRecord->ExceptionCode) && ::WasThrownByUs(pExceptionRecord, pExceptionRecord->ExceptionCode))
+ {
+ m_ExceptionFlags.SetWasThrownByUs();
+ }
+
+ m_StackTraceInfo.Init();
+
+#ifndef FEATURE_PAL
+ // Init the WatsonBucketTracker
+ m_WatsonBucketTracker.Init();
+#endif // !FEATURE_PAL
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // Initialize the default exception severity to NotCorrupting
+ m_CorruptionSeverity = NotSet;
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+#ifdef FEATURE_EXCEPTION_NOTIFICATIONS
+ // By default, mark the tracker as not having delivered the first
+ // chance exception notification
+ m_fDeliveredFirstChanceNotification = FALSE;
+#endif // FEATURE_EXCEPTION_NOTIFICATIONS
+
+ m_dwIndexClauseForCatch = 0;
+ m_sfEstablisherOfActualHandlerFrame.Clear();
+ m_sfCallerOfActualHandlerFrame.Clear();
+
+ m_sfFirstPassTopmostFrame.Clear();
+
+ m_fFixupCallerSPForGCReporting = false;
+
+ m_fResetEnclosingClauseSPForCatchFunclet = FALSE;
+
+ m_sfCurrentEstablisherFrame.Clear();
+ m_sfLastUnwoundEstablisherFrame.Clear();
+ m_pInitialExplicitFrame = NULL;
+ }
+
+ ~ExceptionTracker()
+ {
+ ReleaseResources();
+ }
+
+ enum StackTraceState
+ {
+ STS_Append,
+ STS_FirstRethrowFrame,
+ STS_NewException,
+ };
+
+ static void InitializeCrawlFrame(CrawlFrame* pcfThisFrame, Thread* pThread, StackFrame sf, REGDISPLAY* pRD,
+ PT_DISPATCHER_CONTEXT pDispatcherContext, DWORD_PTR ControlPCForEHSearch,
+ UINT_PTR* puMethodStartPC
+ ARM_ARG(ExceptionTracker *pCurrentTracker)
+ ARM64_ARG(ExceptionTracker *pCurrentTracker));
+ static void InitializeCrawlFrameForExplicitFrame(CrawlFrame* pcfThisFrame, Frame* pFrame, MethodDesc *pMD);
+
+#ifndef DACCESS_COMPILE
+ static void ResetThreadAbortStatus(PTR_Thread pThread, CrawlFrame *pCf, StackFrame sfCurrentStackFrame);
+#endif // !DACCESS_COMPILE
+
+ CLRUnwindStatus ProcessOSExceptionNotification(
+ PEXCEPTION_RECORD pExceptionRecord,
+ PT_CONTEXT pContextRecord,
+ PT_DISPATCHER_CONTEXT pDispatcherContext,
+ DWORD dwExceptionFlags,
+ StackFrame sf,
+ Thread* pThread,
+ StackTraceState STState ARM_ARG(PVOID pICFSetAsLimitFrame));
+
+ CLRUnwindStatus ProcessExplicitFrame(
+ CrawlFrame* pcfThisFrame,
+ StackFrame sf,
+ BOOL fIsFirstPass,
+ StackTraceState& STState
+ );
+
+ CLRUnwindStatus ProcessManagedCallFrame(
+ CrawlFrame* pcfThisFrame,
+ StackFrame sf,
+ StackFrame sfEstablisherFrame,
+ EXCEPTION_RECORD* pExceptionRecord,
+ StackTraceState STState,
+ UINT_PTR uMethodStartPC,
+ DWORD dwExceptionFlags,
+ DWORD dwTACatchHandlerClauseIndex,
+ StackFrame sfEstablisherOfActualHandlerFrame
+ );
+
+ bool UpdateScannedStackRange(StackFrame sf, bool fIsFirstPass);
+
+ void FirstPassIsComplete();
+ void SecondPassIsComplete(MethodDesc* pMD, StackFrame sfResumeStackFrame);
+
+ CLRUnwindStatus HandleFunclets(bool* pfProcessThisFrame, bool fIsFirstPass,
+ MethodDesc * pMD, bool fFunclet, StackFrame sf);
+
+ static OBJECTREF CreateThrowable(
+ PEXCEPTION_RECORD pExceptionRecord,
+ BOOL bAsynchronousThreadStop
+ );
+
+ DWORD GetExceptionCode() { return m_ExceptionCode; }
+ INDEBUG(inline bool IsValid());
+ INDEBUG(static UINT_PTR DebugComputeNestingLevel());
+
+ inline OBJECTREF GetThrowable()
+ {
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (NULL != m_hThrowable)
+ {
+ return ObjectFromHandle(m_hThrowable);
+ }
+
+ return NULL;
+ }
+
+ // Return a StackFrame of the current frame for parent frame checking purposes.
+ // Don't use this StackFrame in any way except to pass it back to the ExceptionTracker
+ // via IsUnwoundToTargetParentFrame().
+ static StackFrame GetStackFrameForParentCheck(CrawlFrame * pCF);
+
+ static bool IsInStackRegionUnwoundBySpecifiedException(CrawlFrame * pCF, PTR_ExceptionTracker pExceptionTracker);
+ static bool IsInStackRegionUnwoundByCurrentException(CrawlFrame * pCF);
+
+ static bool HasFrameBeenUnwoundByAnyActiveException(CrawlFrame * pCF);
+ void SetCurrentEstablisherFrame(StackFrame sfEstablisher)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_sfCurrentEstablisherFrame = sfEstablisher;
+ }
+
+ StackFrame GetCurrentEstablisherFrame()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_sfCurrentEstablisherFrame;
+ }
+
+ void SetLastUnwoundEstablisherFrame(StackFrame sfEstablisher)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_sfLastUnwoundEstablisherFrame = sfEstablisher;
+ }
+
+ StackFrame GetLastUnwoundEstablisherFrame()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_sfLastUnwoundEstablisherFrame;
+ }
+
+ PTR_Frame GetInitialExplicitFrame()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pInitialExplicitFrame;
+ }
+
+ // Determines if we have unwound to the specified parent method frame.
+ // Currently this is only used for funclet skipping.
+ static bool IsUnwoundToTargetParentFrame(CrawlFrame * pCF, StackFrame sfParent);
+ static bool IsUnwoundToTargetParentFrame(StackFrame sfToCheck, StackFrame sfParent);
+
+ // Given the CrawlFrame for a funclet frame, return the frame pointer of the enclosing funclet frame.
+ // For filter funclet frames and a normal method frames, this function returns a NULL StackFrame.
+ //
+ // <WARNING>
+ // It is not valid to call this function on an arbitrary funclet. You have to be doing a full stackwalk from
+ // the leaf frame and skipping method frames as indicated by the return value of this function. This function
+ // relies on the ExceptionTrackers, which are collapsed in the second pass when a nested exception escapes.
+ // When this happens, we'll lose information on the funclet represented by the collapsed tracker.
+ // </WARNING>
+ //
+ // Return Value:
+ // StackFrame.IsNull() - no skipping is necessary
+ // StackFrame.IsMaxVal() - skip one frame and then ask again
+ // Anything else - skip to the method frame indicated by the return value and ask again
+ static StackFrame FindParentStackFrameForStackWalk(CrawlFrame* pCF, bool fForGCReporting = false);
+
+ // Given the CrawlFrame for a filter funclet frame, return the frame pointer of the parent method frame.
+ // It also returns the relative offset and the caller SP of the parent method frame.
+ //
+ // <WARNING>
+ // The same warning for FindParentStackFrameForStackWalk() also applies here. Moreoever, although
+ // this function seems to be more convenient, it may potentially trigger a full stackwalk! Do not
+ // call this unless you know absolutely what you are doing. In most cases FindParentStackFrameForStackWalk()
+ // is what you need.
+ // </WARNING>
+ //
+ // Return Value:
+ // StackFrame.IsNull() - no skipping is necessary
+ // Anything else - the StackFrame of the parent method frame
+ static StackFrame FindParentStackFrameEx(CrawlFrame* pCF,
+ DWORD* pParentOffset,
+ UINT_PTR* pParentCallerSP);
+
+ static void
+ PopTrackers(StackFrame sfResumeFrame,
+ bool fPopWhenEqual);
+
+ static void
+ PopTrackers(void* pvStackPointer);
+
+ static void
+ PopTrackerIfEscaping(void* pvStackPointer);
+
+ static ExceptionTracker*
+ GetOrCreateTracker(UINT_PTR ControlPc,
+ StackFrame sf,
+ EXCEPTION_RECORD* pExceptionRecord,
+ T_CONTEXT* pContextRecord,
+ BOOL bAsynchronousThreadStop,
+ bool fIsFirstPass,
+ StackTraceState* pSTState);
+
+ static void
+ ResumeExecution(T_CONTEXT* pContextRecord,
+ EXCEPTION_RECORD* pExceptionRecord
+ );
+
+ void ResetLimitFrame();
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif // DACCESS_COMPILE
+
+ static void DebugLogTrackerRanges(__in_z char *pszTag);
+
+ bool IsStackOverflowException();
+
+private:
+ DWORD_PTR
+ CallHandler(UINT_PTR dwHandlerStartPC,
+ StackFrame sf,
+ EE_ILEXCEPTION_CLAUSE* pEHClause,
+ MethodDesc* pMD,
+ EHFuncletType funcletType
+ ARM_ARG(PT_CONTEXT pContextRecord)
+ ARM64_ARG(PT_CONTEXT pContextRecord)
+ );
+
+ inline static BOOL
+ ClauseCoversPC(EE_ILEXCEPTION_CLAUSE* pEHClause,
+ DWORD dwOffset);
+
+ static bool
+ IsFilterStartOffset(EE_ILEXCEPTION_CLAUSE* pEHClause, DWORD_PTR dwHandlerStartPC);
+
+#ifndef DACCESS_COMPILE
+ void DestroyExceptionHandle()
+ {
+ // Never, ever destroy a preallocated exception handle.
+ if ((m_hThrowable != NULL) && !CLRException::IsPreallocatedExceptionHandle(m_hThrowable))
+ {
+ DestroyHandle(m_hThrowable);
+ }
+
+ m_hThrowable = NULL;
+ }
+#endif
+
+ void SaveStackTrace();
+
+ inline BOOL CanAllocateMemory()
+ {
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF oThrowable = GetThrowable();
+
+ return !(oThrowable == CLRException::GetPreallocatedOutOfMemoryException()) &&
+ !(oThrowable == CLRException::GetPreallocatedStackOverflowException());
+ }
+
+ INDEBUG(inline BOOL ThrowableIsValid());
+
+ bool HandleNestedExceptionEscape(StackFrame sf, bool fIsFirstPass);
+
+#if defined(DEBUGGING_SUPPORTED)
+ BOOL NotifyDebuggerOfStub(Thread* pThread, StackFrame sf, Frame* pCurrentFrame);
+
+ void
+ MakeCallbacksRelatedToHandler(bool fBeforeCallingHandler,
+ Thread* pThread,
+ MethodDesc* pMD,
+ EE_ILEXCEPTION_CLAUSE* pEHClause,
+ DWORD_PTR dwHandlerStartPC,
+ StackFrame sf);
+#else // !DEBUGGING_SUPPORTED
+ void
+ MakeCallbacksRelatedToHandler(bool fBeforeCallingHandler,
+ Thread* pThread,
+ MethodDesc* pMD,
+ EE_ILEXCEPTION_CLAUSE* pEHClause,
+ DWORD_PTR dwHandlerStartPC,
+ StackFrame sf) {return;}
+#endif // !DEBUGGING_SUPPORTED
+
+ // private helpers
+ static StackFrame GetCallerSPOfParentOfNonExceptionallyInvokedFunclet(CrawlFrame *pCF);
+
+ static StackFrame FindParentStackFrameHelper(CrawlFrame* pCF,
+ bool* pfRealParent,
+ DWORD* pParentOffset,
+ UINT_PTR* pParentCallerSP,
+ bool fForGCReporting = false);
+
+ static StackFrame RareFindParentStackFrame(CrawlFrame* pCF,
+ DWORD* pParentOffset,
+ UINT_PTR* pParentCallerSP);
+
+ static StackWalkAction RareFindParentStackFrameCallback(CrawlFrame* pCF, LPVOID pData);
+
+ struct DAC_EXCEPTION_POINTERS
+ {
+ PTR_EXCEPTION_RECORD ExceptionRecord;
+ PTR_CONTEXT ContextRecord;
+ };
+
+public:
+
+ static UINT_PTR FinishSecondPass(Thread* pThread, UINT_PTR uResumePC, StackFrame sf,
+ T_CONTEXT* pContextRecord, ExceptionTracker *pTracker, bool* pfAborting = NULL);
+ UINT_PTR CallCatchHandler(T_CONTEXT* pContextRecord, bool* pfAborting = NULL);
+
+ static bool FindNonvolatileRegisterPointers(Thread* pThread, UINT_PTR uOriginalSP, REGDISPLAY* pRegDisplay, TADDR uResumeFrameFP);
+ static void UpdateNonvolatileRegisters(T_CONTEXT* pContextRecord, REGDISPLAY *pRegDisplay, bool fAborting);
+
+ PTR_Frame GetLimitFrame()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pLimitFrame;
+ }
+
+ StackRange GetScannedStackRange()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_ScannedStackRange;
+ }
+
+ UINT_PTR GetCatchToCallPC()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_uCatchToCallPC;
+ }
+
+ // Returns the topmost frame seen during the first pass.
+ StackFrame GetTopmostStackFrameFromFirstPass()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_sfFirstPassTopmostFrame;
+ }
+
+#ifdef _DEBUG
+ StackFrame GetResumeStackFrame()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_sfResumeStackFrame;
+ }
+
+ PTR_EXCEPTION_CLAUSE_TOKEN GetCatchHandlerExceptionClauseToken()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pClauseForCatchToken;
+ }
+#endif // _DEBUG
+
+ DWORD GetCatchHandlerExceptionClauseIndex()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_dwIndexClauseForCatch;
+ }
+
+ StackFrame GetEstablisherOfActualHandlingFrame()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_sfEstablisherOfActualHandlerFrame;
+ }
+
+ StackFrame GetCallerOfActualHandlingFrame()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_sfCallerOfActualHandlerFrame;
+ }
+
+#ifndef FEATURE_PAL
+private:
+ EHWatsonBucketTracker m_WatsonBucketTracker;
+public:
+ inline PTR_EHWatsonBucketTracker GetWatsonBucketTracker()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return PTR_EHWatsonBucketTracker(PTR_HOST_MEMBER_TADDR(ExceptionTracker, this, m_WatsonBucketTracker));
+ }
+#endif // !FEATURE_PAL
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+private:
+ CorruptionSeverity m_CorruptionSeverity;
+public:
+ inline CorruptionSeverity GetCorruptionSeverity()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (CorruptionSeverity)GET_CORRUPTION_SEVERITY(m_CorruptionSeverity);
+ }
+
+ inline void SetCorruptionSeverity(CorruptionSeverity severityToSet)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_CorruptionSeverity = severityToSet;
+ }
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+#ifdef FEATURE_EXCEPTION_NOTIFICATIONS
+private:
+ BOOL m_fDeliveredFirstChanceNotification;
+
+public:
+ inline BOOL DeliveredFirstChanceNotification()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_fDeliveredFirstChanceNotification;
+ }
+
+ inline void SetFirstChanceNotificationStatus(BOOL fDelivered)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_fDeliveredFirstChanceNotification = fDelivered;
+ }
+#endif // FEATURE_EXCEPTION_NOTIFICATIONS
+
+ // Returns the exception tracker previous to the current
+ inline PTR_ExceptionTracker GetPreviousExceptionTracker()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pPrevNestedInfo;
+ }
+
+ // Returns the throwble associated with the tracker as handle
+ inline OBJECTHANDLE GetThrowableAsHandle()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_hThrowable;
+ }
+
+ bool IsInFirstPass()
+ {
+ return !m_ExceptionFlags.UnwindHasStarted();
+ }
+
+private: ;
+
+ void ReleaseResources();
+
+ void SetEnclosingClauseInfo(bool fEnclosingClauseIsFunclet,
+ DWORD dwEnclosingClauseOffset,
+ UINT_PTR uEnclosingClauseCallerSP);
+
+ class StackRange
+ {
+ public:
+ StackRange();
+ void Reset();
+ bool IsEmpty();
+ bool IsSupersededBy(StackFrame sf);
+ void CombineWith(StackFrame sfCurrent, StackRange* pPreviousRange);
+ bool Contains(StackFrame sf);
+ void ExtendUpperBound(StackFrame sf);
+ void ExtendLowerBound(StackFrame sf);
+ void TrimLowerBound(StackFrame sf);
+ StackFrame GetLowerBound();
+ StackFrame GetUpperBound();
+ INDEBUG(bool IsDisjointWithAndLowerThan(StackRange* pOtherRange));
+ private:
+ INDEBUG(bool IsConsistent());
+
+ private:
+ // <TODO> can we use a smaller encoding? </TODO>
+ StackFrame m_sfLowBound;
+ StackFrame m_sfHighBound;
+ };
+
+ struct EnclosingClauseInfo
+ {
+ public:
+ EnclosingClauseInfo();
+ EnclosingClauseInfo(bool fEnclosingClauseIsFunclet, DWORD dwEnclosingClauseOffset, UINT_PTR uEnclosingClauseCallerSP);
+
+ bool EnclosingClauseIsFunclet();
+ DWORD GetEnclosingClauseOffset();
+ UINT_PTR GetEnclosingClauseCallerSP();
+ void SetEnclosingClauseCallerSP(UINT_PTR callerSP);
+
+ bool operator==(const EnclosingClauseInfo & rhs);
+
+ private:
+ UINT_PTR m_uEnclosingClauseCallerSP;
+ DWORD m_dwEnclosingClauseOffset;
+ bool m_fEnclosingClauseIsFunclet;
+ };
+
+ PTR_ExceptionTracker m_pPrevNestedInfo;
+ Thread* m_pThread; // this is used as an IsValid/IsFree field -- if it's NULL, the allocator can
+ // reuse its memory, if it's non-NULL, it better be a valid thread pointer
+
+ StackRange m_ScannedStackRange;
+ DAC_EXCEPTION_POINTERS m_ptrs;
+ OBJECTHANDLE m_hThrowable;
+ StackTraceInfo m_StackTraceInfo;
+ UINT_PTR m_uCatchToCallPC;
+ BOOL m_fResetEnclosingClauseSPForCatchFunclet;
+
+ union
+ {
+ MethodDesc* m_pSkipToParentFunctionMD; // SKIPTOPARENT
+ MethodDesc* m_pMethodDescOfCatcher;
+ };
+
+ StackFrame m_sfResumeStackFrame; // RESUMEFRAME
+ StackFrame m_sfFirstPassTopmostFrame; // Topmost frame seen during first pass
+ PTR_EXCEPTION_CLAUSE_TOKEN m_pClauseForCatchToken; // RESUMEFRAME
+ EE_ILEXCEPTION_CLAUSE m_ClauseForCatch;
+ // Index of EH clause that will catch the exception
+ DWORD m_dwIndexClauseForCatch;
+
+ // Establisher frame of the managed frame that contains
+ // the handler for the exception (corresponding
+ // to the EH index we save off in m_dwIndexClauseForCatch)
+ StackFrame m_sfEstablisherOfActualHandlerFrame;
+ StackFrame m_sfCallerOfActualHandlerFrame;
+
+ ExceptionFlags m_ExceptionFlags;
+ DWORD m_ExceptionCode;
+
+ PTR_Frame m_pLimitFrame;
+
+ // Thread Security State
+ HANDLE m_hCallerToken;
+ HANDLE m_hImpersonationToken;
+
+#ifdef DEBUGGING_SUPPORTED
+ //
+ // DEBUGGER STATE
+ //
+ DebuggerExState m_DebuggerExState;
+#endif // DEBUGGING_SUPPORTED
+
+ //
+ // Information for the funclet we are calling
+ //
+ EHClauseInfo m_EHClauseInfo;
+
+ // This flag indicates whether the SP we pass to a funclet is for an enclosing funclet.
+ EnclosingClauseInfo m_EnclosingClauseInfo;
+
+ // This stores the actual callerSP of the frame that is about to execute the funclet.
+ // It differs from "m_EnclosingClauseInfo" where upon detecting a nested exception,
+ // the latter can contain the callerSP of the original funclet instead of that of the
+ // current frame.
+ EnclosingClauseInfo m_EnclosingClauseInfoForGCReporting;
+ bool m_fFixupCallerSPForGCReporting;
+
+ StackFrame m_sfCurrentEstablisherFrame;
+ StackFrame m_sfLastUnwoundEstablisherFrame;
+ PTR_Frame m_pInitialExplicitFrame;
+};
+
+#if defined(WIN64EXCEPTIONS)
+PTR_ExceptionTracker GetEHTrackerForPreallocatedException(OBJECTREF oPreAllocThrowable, PTR_ExceptionTracker pStartingEHTracker);
+#endif // WIN64EXCEPTIONS
+
+class TrackerAllocator
+{
+public:
+ void Init();
+ void Terminate();
+ ExceptionTracker* GetTrackerMemory();
+ void FreeTrackerMemory(ExceptionTracker* pTracker);
+
+private:
+
+ struct Page;
+
+ struct PageHeader
+ {
+ Page* m_pNext;
+ LONG m_idxFirstFree;
+ };
+
+ enum
+ {
+ //
+ // Due to the unexpected growth of the ExceptionTracker struct,
+ // OS_PAGE_SIZE does not seem appropriate anymore on x64, and
+ // we should behave the same on x64 as on ia64 regardless of
+ // the difference between the page sizes on the platforms.
+ //
+ TRACKER_ALLOCATOR_PAGE_SIZE = 8*1024,
+ TRACKER_ALLOCATOR_MAX_OOM_SPINS = 20,
+ TRACKER_ALLOCATOR_OOM_SPIN_DELAY = 100,
+ NUM_TRACKERS_PER_PAGE = ((TRACKER_ALLOCATOR_PAGE_SIZE - sizeof(PageHeader)) / sizeof(ExceptionTracker)),
+ };
+
+ struct Page
+ {
+ PageHeader m_header;
+ ExceptionTracker m_rgTrackers[NUM_TRACKERS_PER_PAGE];
+ };
+
+ static_assert_no_msg(sizeof(Page) <= TRACKER_ALLOCATOR_PAGE_SIZE);
+
+ Page* m_pFirstPage;
+ Crst* m_pCrst;
+};
+
+#endif // WIN64EXCEPTIONS
+
+#endif // __EXCEPTION_HANDLING_h__
diff --git a/src/vm/exceptmacros.h b/src/vm/exceptmacros.h
new file mode 100644
index 0000000000..da4b759b9e
--- /dev/null
+++ b/src/vm/exceptmacros.h
@@ -0,0 +1,592 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+// EXCEPTMACROS.H -
+//
+// This header file exposes mechanisms to:
+//
+// 1. Throw COM+ exceptions using the COMPlusThrow() function
+// 2. Guard a block of code using EX_TRY, and catch
+// COM+ exceptions using EX_CATCH
+//
+// from the *unmanaged* portions of the EE. Much of the EE runs
+// in a hybrid state where it runs like managed code but the code
+// is produced by a classic unmanaged-code C++ compiler.
+//
+// THROWING A COM+ EXCEPTION
+// -------------------------
+// To throw a COM+ exception, call the function:
+//
+// COMPlusThrow(OBJECTREF pThrowable);
+//
+// This function does not return. There are also various functions
+// that wrap COMPlusThrow for convenience.
+//
+// COMPlusThrow() must only be called within the scope of a EX_TRY
+// block. See below for more information.
+//
+//
+// THROWING A RUNTIME EXCEPTION
+// ----------------------------
+// COMPlusThrow() is overloaded to take a constant describing
+// the common EE-generated exceptions, e.g.
+//
+// COMPlusThrow(kOutOfMemoryException);
+//
+// See rexcep.h for list of constants (prepend "k" to get the actual
+// constant name.)
+//
+// You can also add a descriptive error string as follows:
+//
+// - Add a descriptive error string and resource id to
+// COM99\src\dlls\mscorrc\resource.h and mscorrc.rc.
+// Embed "%1", "%2" or "%3" to leave room for runtime string
+// inserts.
+//
+// - Pass the resource ID and inserts to COMPlusThrow, i.e.
+//
+// COMPlusThrow(kSecurityException,
+// IDS_CANTREFORMATCDRIVEBECAUSE,
+// W("Formatting C drive permissions not granted."));
+//
+//
+//
+// TO CATCH COMPLUS EXCEPTIONS:
+// ----------------------------
+//
+// Use the following syntax:
+//
+// #include "exceptmacros.h"
+//
+//
+// OBJECTREF pThrownObject;
+//
+// EX_TRY {
+// ...guarded code...
+// } EX_CATCH {
+// ...handler...
+// } EX_END_CATCH(SwallowAllExceptions)
+//
+//
+// EX_TRY blocks can be nested.
+//
+// From within the handler, you can call the GET_THROWABLE() macro to
+// obtain the object that was thrown.
+//
+// CRUCIAL POINTS
+// --------------
+// In order to call COMPlusThrow(), you *must* be within the scope
+// of a EX_TRY block. Under _DEBUG, COMPlusThrow() will assert
+// if you call it out of scope. This implies that just about every
+// external entrypoint into the EE has to have a EX_TRY, in order
+// to convert uncaught COM+ exceptions into some error mechanism
+// more understandable to its non-COM+ caller.
+//
+// Any function that can throw a COM+ exception out to its caller
+// has the same requirement. ALL such functions should be tagged
+// with THROWS in CONTRACT. Aside from making the code
+// self-document its contract, the checked version of this will fire
+// an assert if the function is ever called without being in scope.
+//
+//
+// AVOIDING EX_TRY GOTCHAS
+// ----------------------------
+// EX_TRY/EX_CATCH actually expands into a Win32 SEH
+// __try/__except structure. It does a lot of goo under the covers
+// to deal with pre-emptive GC settings.
+//
+// 1. Do not use C++ or SEH try/__try use EX_TRY instead.
+//
+// 2. Remember that any function marked THROWS
+// has the potential not to return. So be wary of allocating
+// non-gc'd objects around such calls because ensuring cleanup
+// of these things is not simple (you can wrap another EX_TRY
+// around the call to simulate a COM+ "try-finally" but EX_TRY
+// is relatively expensive compared to the real thing.)
+//
+//
+
+
+#ifndef __exceptmacros_h__
+#define __exceptmacros_h__
+
+struct _EXCEPTION_REGISTRATION_RECORD;
+class Thread;
+class Frame;
+class Exception;
+
+VOID DECLSPEC_NORETURN RealCOMPlusThrowOM();
+VOID DECLSPEC_NORETURN RealCOMPlusThrowSO();
+
+#include <excepcpu.h>
+#include "stackprobe.h"
+
+
+
+//==========================================================================
+// Macros to allow catching exceptions from within the EE. These are lightweight
+// handlers that do not install the managed frame handler.
+//
+// struct Param { ... } param;
+// EE_TRY_FOR_FINALLY(Param *, pParam, &param) {
+// ...<guarded code>...
+// } EE_FINALLY {
+// ...<handler>...
+// } EE_END_FINALLY
+//
+// EE_TRY(filter expr) {
+// ...<guarded code>...
+// } EE_CATCH {
+// ...<handler>...
+// }
+//==========================================================================
+
+// __GotException will only be FALSE if got all the way through the code
+// guarded by the try, otherwise it will be TRUE, so we know if we got into the
+// finally from an exception or not. In which case need to reset the GC state back
+// to what it was for the finally to run in that state.
+
+#define EE_TRY_FOR_FINALLY(ParamType, paramDef, paramRef) \
+ { \
+ struct __EEParam \
+ { \
+ BOOL fGCDisabled; \
+ BOOL GotException; \
+ ParamType param; \
+ } __EEparam; \
+ __EEparam.fGCDisabled = GetThread()->PreemptiveGCDisabled(); \
+ __EEparam.GotException = TRUE; \
+ __EEparam.param = paramRef; \
+ PAL_TRY(__EEParam *, __pEEParam, &__EEparam) \
+ { \
+ ParamType paramDef; paramDef = __pEEParam->param;
+
+#define GOT_EXCEPTION() __EEparam.GotException
+
+#define EE_FINALLY \
+ __pEEParam->GotException = FALSE; \
+ } PAL_FINALLY { \
+ if (__EEparam.GotException) { \
+ if (__EEparam.fGCDisabled != GetThread()->PreemptiveGCDisabled()) \
+ if (__EEparam.fGCDisabled) \
+ GetThread()->DisablePreemptiveGC(); \
+ else \
+ GetThread()->EnablePreemptiveGC(); \
+ }
+
+#define EE_END_FINALLY \
+ } \
+ PAL_ENDTRY \
+ }
+
+
+
+
+//==========================================================================
+// Helpful macros to declare exception handlers, their implementaiton,
+// and to call them.
+//==========================================================================
+
+#define _EXCEPTION_HANDLER_DECL(funcname) \
+ EXCEPTION_DISPOSITION __cdecl funcname(EXCEPTION_RECORD *pExceptionRecord, \
+ struct _EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame, \
+ CONTEXT *pContext, \
+ DISPATCHER_CONTEXT *pDispatcherContext)
+
+#define EXCEPTION_HANDLER_DECL(funcname) \
+ extern "C" _EXCEPTION_HANDLER_DECL(funcname)
+
+#define EXCEPTION_HANDLER_IMPL(funcname) \
+ _EXCEPTION_HANDLER_DECL(funcname)
+
+#define EXCEPTION_HANDLER_FWD(funcname) \
+ funcname(pExceptionRecord, pEstablisherFrame, pContext, pDispatcherContext)
+
+//==========================================================================
+// Declares a COM+ frame handler that can be used to make sure that
+// exceptions that should be handled from within managed code
+// are handled within and don't leak out to give other handlers a
+// chance at them.
+//==========================================================================
+#define INSTALL_COMPLUS_EXCEPTION_HANDLER() \
+ DECLARE_CPFH_EH_RECORD(GET_THREAD()); \
+ INSTALL_COMPLUS_EXCEPTION_HANDLER_NO_DECLARE()
+
+#define INSTALL_COMPLUS_EXCEPTION_HANDLER_NO_DECLARE() \
+{ \
+ INSTALL_EXCEPTION_HANDLING_RECORD(&(___pExRecord->m_ExReg)); \
+ /* work around unreachable code warning */ \
+ if (true) {
+
+#define UNINSTALL_COMPLUS_EXCEPTION_HANDLER() \
+ } \
+ UNINSTALL_EXCEPTION_HANDLING_RECORD(&(___pExRecord->m_ExReg)); \
+}
+
+#if !defined(WIN64EXCEPTIONS)
+
+#define INSTALL_NESTED_EXCEPTION_HANDLER(frame) \
+ NestedHandlerExRecord *__pNestedHandlerExRecord = (NestedHandlerExRecord*) _alloca(sizeof(NestedHandlerExRecord)); \
+ __pNestedHandlerExRecord->m_handlerInfo.m_hThrowable = NULL; \
+ __pNestedHandlerExRecord->Init((PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler, frame); \
+ INSTALL_EXCEPTION_HANDLING_RECORD(&(__pNestedHandlerExRecord->m_ExReg));
+
+#define UNINSTALL_NESTED_EXCEPTION_HANDLER() \
+ UNINSTALL_EXCEPTION_HANDLING_RECORD(&(__pNestedHandlerExRecord->m_ExReg));
+
+#else // defined(WIN64EXCEPTIONS)
+
+#define INSTALL_NESTED_EXCEPTION_HANDLER(frame)
+#define UNINSTALL_NESTED_EXCEPTION_HANDLER()
+
+#endif // !defined(WIN64EXCEPTIONS)
+
+LONG WINAPI CLRVectoredExceptionHandler(PEXCEPTION_POINTERS pExceptionInfo);
+
+// Actual UEF worker prototype for use by GCUnhandledExceptionFilter.
+extern LONG InternalUnhandledExceptionFilter_Worker(PEXCEPTION_POINTERS pExceptionInfo);
+
+// This function is the filter function for the "__except" setup in "gc1()"
+// in gc.cpp to handle exceptions that happen during GC.
+inline LONG CheckException(EXCEPTION_POINTERS* pExceptionPointers, PVOID pv)
+{
+ WRAPPER_NO_CONTRACT;
+
+ LONG result = CLRVectoredExceptionHandler(pExceptionPointers);
+ if (result != EXCEPTION_EXECUTE_HANDLER)
+ return result;
+
+#ifdef _DEBUG_IMPL
+ _ASSERTE(!"Unexpected Exception");
+#else
+ FreeBuildDebugBreak();
+#endif
+
+ // Set the debugger to break on AV and return a value of EXCEPTION_CONTINUE_EXECUTION (-1)
+ // here and you will bounce back to the point of the AV.
+ return EXCEPTION_EXECUTE_HANDLER;
+
+}
+
+//==========================================================================
+// Installs a handler to unwind exception frames, but not catch the exception
+//==========================================================================
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+// -----------------------------------------------------------------------
+// Support for Corrupted State Exceptions
+// -----------------------------------------------------------------------
+// This enumeration defines the corruption severity of an exception and
+// whether it should be reused for the next exception thrown or not.
+enum CorruptionSeverity
+{
+ UseLast = 0x0, // When specified, the last active corruption severity from TES should be used
+ NotSet = 0x1, // Corruption Severity has not been set - this is the default/reset value
+ NotCorrupting = 0x2, // Indicates exception is not corrupting
+ ProcessCorrupting = 0x4, // Indicates exception represents process corrupted state
+ ReuseForReraise = 0x2000 // Indicates that the corruption severity should be reused for the next exception thrown,
+ // provided its not nested and isnt a rethrow. This flag is used typically for propagation of
+ // severity across boundaries like Reflection invocation, AD transition etc.
+};
+
+#define GET_CORRUPTION_SEVERITY(severity) ((severity & (~ReuseForReraise)))
+#define CAN_REUSE_CORRUPTION_SEVERITY(severity) ((severity & ReuseForReraise) == ReuseForReraise)
+
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+VOID DECLSPEC_NORETURN RaiseTheException(OBJECTREF throwable, BOOL rethrow
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , CorruptionSeverity severity
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+);
+
+VOID DECLSPEC_NORETURN RaiseTheExceptionInternalOnly(OBJECTREF throwable, BOOL rethrow, BOOL fForStackOverflow = FALSE);
+
+#if defined(DACCESS_COMPILE) || defined(CROSSGEN_COMPILE)
+
+#define INSTALL_UNWIND_AND_CONTINUE_HANDLER
+#define UNINSTALL_UNWIND_AND_CONTINUE_HANDLER
+
+#define INSTALL_UNWIND_AND_CONTINUE_HANDLER_NO_PROBE
+#define UNINSTALL_UNWIND_AND_CONTINUE_HANDLER_NO_PROBE
+#else // DACCESS_COMPILE || CROSSGEN_COMPILE
+
+void UnwindAndContinueRethrowHelperInsideCatch(Frame* pEntryFrame, Exception* pException);
+VOID DECLSPEC_NORETURN UnwindAndContinueRethrowHelperAfterCatch(Frame* pEntryFrame, Exception* pException);
+
+#define INSTALL_UNWIND_AND_CONTINUE_HANDLER_NO_PROBE \
+ { \
+ MAKE_CURRENT_THREAD_AVAILABLE(); \
+ Exception* __pUnCException = NULL; \
+ Frame* __pUnCEntryFrame = CURRENT_THREAD->GetFrame(); \
+ bool __fExceptionCatched = false; \
+ SCAN_EHMARKER(); \
+ if (true) PAL_CPP_TRY { \
+ SCAN_EHMARKER_TRY(); \
+ DEBUG_ASSURE_NO_RETURN_BEGIN(IUACH)
+
+#define INSTALL_UNWIND_AND_CONTINUE_HANDLER \
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER_NO_PROBE \
+ /* The purpose of the INSTALL_UNWIND_AND_CONTINUE_HANDLER is to translate an exception to a managed */ \
+ /* exception before it hits managed code. The transition to SO_INTOLERANT code does not logically belong here. */ \
+ /* However, we don't want to miss any probe points and the intersection between a probe point and installing */ \
+ /* an INSTALL_UNWIND_AND_CONTINUE_HANDLER is very high. The probes are very cheap, so we can tolerate */ \
+ /* those few places where we are probing and don't need to. */ \
+ /* Ideally, we would instead have an encompassing ENTER_SO_INTOLERANT_CODE macro that would */ \
+ /* include INSTALL_UNWIND_AND_CONTINUE_HANDLER */ \
+ BEGIN_SO_INTOLERANT_CODE(GET_THREAD());
+
+// Optimized version for helper method frame. Avoids redundant GetThread() calls.
+#define INSTALL_UNWIND_AND_CONTINUE_HANDLER_FOR_HMF(pHelperFrame) \
+ { \
+ Exception* __pUnCException = NULL; \
+ Frame* __pUnCEntryFrame = (pHelperFrame); \
+ bool __fExceptionCatched = false; \
+ SCAN_EHMARKER(); \
+ if (true) PAL_CPP_TRY { \
+ SCAN_EHMARKER_TRY(); \
+ DEBUG_ASSURE_NO_RETURN_BEGIN(IUACH); \
+ BEGIN_SO_INTOLERANT_CODE(GET_THREAD());
+
+#define UNINSTALL_UNWIND_AND_CONTINUE_HANDLER_NO_PROBE \
+ DEBUG_ASSURE_NO_RETURN_END(IUACH) \
+ SCAN_EHMARKER_END_TRY(); \
+ } \
+ PAL_CPP_CATCH_DERIVED (Exception, __pException) \
+ { \
+ SCAN_EHMARKER_CATCH(); \
+ CONSISTENCY_CHECK(NULL != __pException); \
+ __pUnCException = __pException; \
+ UnwindAndContinueRethrowHelperInsideCatch(__pUnCEntryFrame, __pUnCException); \
+ __fExceptionCatched = true; \
+ SCAN_EHMARKER_END_CATCH(); \
+ } \
+ PAL_CPP_ENDTRY \
+ if (__fExceptionCatched) \
+ { \
+ SCAN_EHMARKER_CATCH(); \
+ UnwindAndContinueRethrowHelperAfterCatch(__pUnCEntryFrame, __pUnCException); \
+ } \
+ } \
+
+#define UNINSTALL_UNWIND_AND_CONTINUE_HANDLER \
+ END_SO_INTOLERANT_CODE; \
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER_NO_PROBE; \
+
+#endif // DACCESS_COMPILE || CROSSGEN_COMPILE
+
+
+#define ENCLOSE_IN_EXCEPTION_HANDLER( func ) \
+ { \
+ struct exception_handler_wrapper \
+ { \
+ static void wrap() \
+ { \
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER; \
+ func(); \
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER; \
+ } \
+ }; \
+ \
+ exception_handler_wrapper::wrap(); \
+ }
+
+
+//==========================================================================
+// Declares that a function can throw a COM+ exception.
+//==========================================================================
+#if defined(ENABLE_CONTRACTS) && !defined(DACCESS_COMPILE)
+
+//==========================================================================
+// Declares that a function cannot throw a COM+ exception.
+// Adds a record to the contract chain.
+//==========================================================================
+
+#define CANNOTTHROWCOMPLUSEXCEPTION() ANNOTATION_NOTHROW; \
+ COMPlusCannotThrowExceptionHelper _dummyvariable(TRUE, __FUNCTION__, __FILE__, __LINE__);
+
+extern char *g_ExceptionFile;
+extern DWORD g_ExceptionLine;
+
+#define THROWLOG() ( g_ExceptionFile = __FILE__, g_ExceptionLine = __LINE__, TRUE )
+
+#define COMPlusThrow if(THROWLOG() && 0) { } else RealCOMPlusThrow
+#define COMPlusThrowNonLocalized if(THROWLOG() && 0) { } else RealCOMPlusThrowNonLocalized
+#define COMPlusThrowHR if(THROWLOG() && 0) { } else RealCOMPlusThrowHR
+#define COMPlusThrowWin32 if(THROWLOG() && 0) { } else RealCOMPlusThrowWin32
+#define COMPlusThrowOM if(THROWLOG() && 0) { } else RealCOMPlusThrowOM
+#ifdef FEATURE_STACK_PROBE
+#define COMPlusThrowSO if(THROWLOG() && 0) { } else RealCOMPlusThrowSO
+#endif
+#define COMPlusThrowArithmetic if(THROWLOG() && 0) { } else RealCOMPlusThrowArithmetic
+#define COMPlusThrowArgumentNull if(THROWLOG() && 0) { } else RealCOMPlusThrowArgumentNull
+#define COMPlusThrowArgumentOutOfRange if(THROWLOG() && 0) { } else RealCOMPlusThrowArgumentOutOfRange
+#define COMPlusThrowArgumentException if(THROWLOG() && 0) { } else RealCOMPlusThrowArgumentException
+#define COMPlusThrowInvalidCastException if(THROWLOG() && 0) { } else RealCOMPlusThrowInvalidCastException
+#define COMPlusRareRethrow if(THROWLOG() && 0) { } else RealCOMPlusRareRethrow
+
+#else // ENABLE_CONTRACTS && !DACCESS_COMPILE
+
+#define CANNOTTHROWCOMPLUSEXCEPTION() ANNOTATION_NOTHROW
+#define BEGINCANNOTTHROWCOMPLUSEXCEPTION_SEH() ANNOTATION_NOTHROW
+#define ENDCANNOTTHROWCOMPLUSEXCEPTION_SEH()
+
+#define COMPlusThrow RealCOMPlusThrow
+#ifndef CLR_STANDALONE_BINDER
+#define COMPlusThrowNonLocalized RealCOMPlusThrowNonLocalized
+#endif // !CLR_STANDALONE_BINDER
+#ifndef DACCESS_COMPILE
+#define COMPlusThrowHR RealCOMPlusThrowHR
+#else
+#define COMPlusThrowHR ThrowHR
+#endif
+#define COMPlusThrowWin32 RealCOMPlusThrowWin32
+#ifndef CLR_STANDALONE_BINDER
+#define COMPlusThrowOM RealCOMPlusThrowOM
+#endif // !CLR_STANDALONE_BINDER
+#ifdef FEATURE_STACK_PROBE
+#define COMPlusThrowSO RealCOMPlusThrowSO
+#endif
+#define COMPlusThrowArithmetic RealCOMPlusThrowArithmetic
+#define COMPlusThrowArgumentNull RealCOMPlusThrowArgumentNull
+#define COMPlusThrowArgumentOutOfRange RealCOMPlusThrowArgumentOutOfRange
+#define COMPlusThrowArgumentException RealCOMPlusThrowArgumentException
+#define COMPlusThrowInvalidCastException RealCOMPlusThrowInvalidCastException
+
+#endif // ENABLE_CONTRACTS && !DACCESS_COMPILE
+/* Non-VM exception helpers to be rerouted inside the VM directory:
+ThrowHR
+ThrowWin32
+ThrowLastError -->ThrowWin32(GetLastError())
+ThrowOutOfMemory COMPlusThrowOM defers to this
+ThrowStackOverflow COMPlusThrowSO defers to this
+ThrowMessage ThrowHR(E_FAIL, Message)
+
+*/
+
+/* Ideally we could make these defines. But the sources in the VM directory
+ won't build with them as defines. @todo: go through VM directory and
+ eliminate calls to the non-VM style functions.
+
+#define ThrowHR COMPlusThrowHR
+#define ThrowWin32 COMPlusThrowWin32
+#define ThrowLastError() COMPlusThrowWin32(GetLastError())
+#define ThrowMessage "Don't use this in the VM directory"
+
+*/
+
+//======================================================
+// Used when we're entering the EE from unmanaged code
+// and we can assert that the gc state is cooperative.
+//
+// If an exception is thrown through this transition
+// handler, it will clean up the EE appropriately. See
+// the definition of COMPlusCooperativeTransitionHandler
+// for the details.
+//======================================================
+
+void COMPlusCooperativeTransitionHandler(Frame* pFrame);
+
+#ifdef CROSSGEN_COMPILE
+
+#define COOPERATIVE_TRANSITION_BEGIN()
+#define COOPERATIVE_TRANSITION_END()
+
+#else // CROSSGEN_COMPILE
+
+#define COOPERATIVE_TRANSITION_BEGIN() \
+ { \
+ MAKE_CURRENT_THREAD_AVAILABLE(); \
+ BEGIN_GCX_ASSERT_PREEMP; \
+ BEGIN_SO_INTOLERANT_CODE(CURRENT_THREAD); \
+ CoopTransitionHolder __CoopTransition(CURRENT_THREAD); \
+ DEBUG_ASSURE_NO_RETURN_BEGIN(COOP_TRANSITION)
+
+#define COOPERATIVE_TRANSITION_END() \
+ DEBUG_ASSURE_NO_RETURN_END(COOP_TRANSITION) \
+ __CoopTransition.SuppressRelease(); \
+ END_SO_INTOLERANT_CODE; \
+ END_GCX_ASSERT_PREEMP; \
+ }
+
+#endif // CROSSGEN_COMPILE
+
+extern LONG UserBreakpointFilter(EXCEPTION_POINTERS *ep);
+extern LONG DefaultCatchFilter(EXCEPTION_POINTERS *ep, LPVOID pv);
+extern LONG DefaultCatchNoSwallowFilter(EXCEPTION_POINTERS *ep, LPVOID pv);
+
+
+// the only valid parameter for DefaultCatchFilter
+#define COMPLUS_EXCEPTION_EXECUTE_HANDLER (PVOID)EXCEPTION_EXECUTE_HANDLER
+struct DefaultCatchFilterParam
+{
+ PVOID pv; // must be COMPLUS_EXCEPTION_EXECUTE_HANDLER
+ DefaultCatchFilterParam() {}
+ DefaultCatchFilterParam(PVOID _pv) : pv(_pv) {}
+};
+
+template <typename T>
+LPCWSTR GetPathForErrorMessagesT(T *pImgObj)
+{
+ SUPPORTS_DAC_HOST_ONLY;
+ if (pImgObj)
+ {
+ return pImgObj->GetPathForErrorMessages();
+ }
+ else
+ {
+ return W("");
+ }
+}
+
+VOID ThrowBadFormatWorker(UINT resID, LPCWSTR imageName DEBUGARG(__in_z const char *cond));
+
+template <typename T>
+NOINLINE
+VOID ThrowBadFormatWorkerT(UINT resID, T * pImgObj DEBUGARG(__in_z const char *cond))
+{
+ LPCWSTR tmpStr = GetPathForErrorMessagesT(pImgObj);
+ ThrowBadFormatWorker(resID, tmpStr DEBUGARG(cond));
+}
+
+
+// Worker macro for throwing BadImageFormat exceptions.
+//
+// resID: resource ID in mscorrc.rc. Message may not have substitutions. resID is permitted (but not encouraged) to be 0.
+// imgObj: one of Module* or PEFile* or PEImage* (must support GetPathForErrorMessages method.)
+//
+#define IfFailThrowBF(hresult, resID, imgObj) \
+ do \
+ { \
+ if (FAILED(hresult)) \
+ THROW_BAD_FORMAT(resID, imgObj); \
+ } \
+ while(0)
+
+
+#define THROW_BAD_FORMAT(resID, imgObj) THROW_BAD_FORMAT_MAYBE(FALSE, resID, imgObj)
+
+
+// Conditional version of THROW_BAD_FORMAT. Do not use for new callsites. This is really meant to be a drop-in replacement
+// for the obsolete BAD_FORMAT_ASSERT.
+
+#define THROW_BAD_FORMAT_MAYBE(cond, resID, imgObj) \
+ do \
+ { \
+ if (!(cond)) \
+ { \
+ ThrowBadFormatWorkerT((resID), (imgObj) DEBUGARG(#cond)); \
+ } \
+ } \
+ while(0)
+
+
+// Same as above, but allows you to specify your own HRESULT
+#define THROW_HR_ERROR_WITH_INFO(hr, imgObj) RealCOMPlusThrowHR(hr, hr, GetPathForErrorMessagesT(imgObj))
+
+
+#endif // __exceptmacros_h__
diff --git a/src/vm/exinfo.cpp b/src/vm/exinfo.cpp
new file mode 100644
index 0000000000..b310130031
--- /dev/null
+++ b/src/vm/exinfo.cpp
@@ -0,0 +1,307 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+#include "common.h"
+#include "exinfo.h"
+#include "dbginterface.h"
+
+#ifndef DACCESS_COMPILE
+//
+// Destroy the handle within an ExInfo. This respects the fact that we can have preallocated global handles living
+// in ExInfo's.
+//
+void ExInfo::DestroyExceptionHandle(void)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // Never, ever destroy a preallocated exception handle.
+ if ((m_hThrowable != NULL) && !CLRException::IsPreallocatedExceptionHandle(m_hThrowable))
+ {
+ DestroyHandle(m_hThrowable);
+ }
+
+ m_hThrowable = NULL;
+}
+
+//
+// CopyAndClearSource copies the contents of the given ExInfo into the current ExInfo, then re-initializes the
+// given ExInfo.
+//
+void ExInfo::CopyAndClearSource(ExInfo *from)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ if (GetThread() != NULL) MODE_COOPERATIVE; else MODE_ANY;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifdef _TARGET_X86_
+ LOG((LF_EH, LL_INFO100, "In ExInfo::CopyAndClearSource: m_dEsp=%08x, %08x <- [%08x], stackAddress = 0x%p <- 0x%p\n",
+ from->m_dEsp, &(this->m_dEsp), &from->m_dEsp, this->m_StackAddress, from->m_StackAddress));
+#endif // _TARGET_X86_
+
+ // If we have a handle to an exception object in this ExInfo already, then go ahead and destroy it before we
+ // loose it.
+ DestroyExceptionHandle();
+
+ // The stack address is handled differently. Save the original value.
+ void* stackAddress = this->m_StackAddress;
+
+ // Blast the entire record. Note: we're copying the handle from the source ExInfo to this object. That's okay,
+ // since we're going to clear out the source ExInfo right below this.
+ memcpy(this, from, sizeof(ExInfo));
+
+ // Preserve the stack address. It should never change.
+ m_StackAddress = stackAddress;
+
+ // This ExInfo just took ownership of the handle to the exception object, so clear out that handle in the
+ // source ExInfo.
+ from->m_hThrowable = NULL;
+
+ // Finally, initialize the source ExInfo.
+ from->Init();
+
+ // Clear the Watson Bucketing information as well since they
+ // have been transferred over by the "memcpy" above.
+ from->GetWatsonBucketTracker()->Init();
+}
+
+void ExInfo::Init()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ m_ExceptionFlags.Init();
+ m_StackTraceInfo.Init();
+ m_DebuggerExState.Init();
+
+ m_pSearchBoundary = NULL;
+ STRESS_LOG3(LF_EH, LL_INFO10000, "ExInfo::Init: setting ExInfo:0x%p m_pBottomMostHandler from 0x%p to 0x%p\n",
+ this, m_pBottomMostHandler, NULL);
+ m_pBottomMostHandler = NULL;
+ m_pPrevNestedInfo = NULL;
+ m_ExceptionCode = 0xcccccccc;
+ m_pExceptionRecord = NULL;
+ m_pContext = NULL;
+ m_pShadowSP = NULL;
+ m_StackAddress = this;
+ DestroyExceptionHandle();
+ m_hThrowable = NULL;
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // Initialize the default exception severity to NotCorrupting
+ m_CorruptionSeverity = NotSet;
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+#ifdef FEATURE_EXCEPTION_NOTIFICATIONS
+ // By default, mark the tracker as not having delivered the first
+ // chance exception notification
+ m_fDeliveredFirstChanceNotification = FALSE;
+#endif // FEATURE_EXCEPTION_NOTIFICATIONS
+
+ m_pTopMostHandlerDuringSO = NULL;
+
+#if defined(_TARGET_X86_) && defined(DEBUGGING_SUPPORTED)
+ m_InterceptionContext.Init();
+ m_ValidInterceptionContext = FALSE;
+#endif //_TARGET_X86_ && DEBUGGING_SUPPORTED
+}
+
+ExInfo::ExInfo()
+{
+ WRAPPER_NO_CONTRACT;
+
+ m_hThrowable = NULL;
+ Init();
+
+ // Init the WatsonBucketTracker
+ m_WatsonBucketTracker.Init();
+}
+
+//*******************************************************************************
+// When we hit an endcatch or an unwind and have nested handler info, either
+// 1) we have contained a nested exception and will continue handling the original
+// exception
+// - or -
+// 2) the nested exception was not contained, and was thrown beyond the original
+// bounds where the first exception occurred.
+//
+// The way we can tell this is from the stack pointer. The topmost nested handler is
+// installed at the point where the exception occurred. For a nested exception to be
+// contained, it must be caught within the scope of any code that is called after
+// the nested handler is installed. (remember: after is a lower stack address.)
+//
+// If it is caught by anything earlier on the stack, it was not contained, and we
+// unwind the nested handlers until we get to one that is higher on the stack
+// than the esp we will unwind to.
+//
+// If we still have a nested handler, then we have successfully handled a nested
+// exception and should restore the exception settings that we saved so that
+// processing of the original exception can continue.
+// Otherwise the nested exception has gone beyond where the original exception was
+// thrown and therefore replaces the original exception.
+//
+// We will always remove the current exception info from the chain.
+//
+void ExInfo::UnwindExInfo(VOID* limit)
+{
+ CONTRACTL
+ {
+ NOTHROW; // This function does not throw.
+ GC_NOTRIGGER;
+ if (GetThread() != NULL) MODE_COOPERATIVE; else MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // We must be in cooperative mode to do the chaining below
+#ifdef DEBUGGING_SUPPORTED
+ // The debugger thread will be using this, even though it has no
+ // Thread object associated with it.
+ _ASSERTE((GetThread() != NULL && GetThread()->PreemptiveGCDisabled()) ||
+ ((g_pDebugInterface != NULL) && (g_pDebugInterface->GetRCThreadId() == GetCurrentThreadId())));
+#endif // DEBUGGING_SUPPORTED
+
+ LOG((LF_EH, LL_INFO100, "UnwindExInfo: unwind limit is 0x%p, prevNested is 0x%p\n", limit, m_pPrevNestedInfo));
+
+ ExInfo *pPrevNestedInfo = m_pPrevNestedInfo;
+
+ // At first glance, you would think that each nested exception has
+ // been unwound by it's corresponding NestedExceptionHandler. But that's
+ // not necessarily the case. The following assertion cannot be made here,
+ // and the loop is necessary.
+ //
+ //_ASSERTE(pPrevNestedInfo == 0 || (DWORD)pPrevNestedInfo >= limit);
+ //
+ // Make sure we've unwound any nested exceptions that we're going to skip over.
+ //
+ while (pPrevNestedInfo && pPrevNestedInfo->m_StackAddress < limit)
+ {
+ STRESS_LOG1(LF_EH, LL_INFO100, "UnwindExInfo: PopExInfo(): popping nested ExInfo at 0x%p\n", pPrevNestedInfo->m_StackAddress);
+
+ if (pPrevNestedInfo->m_hThrowable != NULL)
+ {
+ pPrevNestedInfo->DestroyExceptionHandle();
+ }
+
+ // Free the Watson bucket details when ExInfo
+ // is being released
+ pPrevNestedInfo->GetWatsonBucketTracker()->ClearWatsonBucketDetails();
+
+ pPrevNestedInfo->m_StackTraceInfo.FreeStackTrace();
+
+ #ifdef DEBUGGING_SUPPORTED
+ if (g_pDebugInterface != NULL)
+ {
+ g_pDebugInterface->DeleteInterceptContext(pPrevNestedInfo->m_DebuggerExState.GetDebuggerInterceptContext());
+ }
+ #endif
+
+ // Get the next nested handler detail...
+ ExInfo* pPrev = pPrevNestedInfo->m_pPrevNestedInfo;
+
+ if (pPrevNestedInfo->IsHeapAllocated())
+ {
+ delete pPrevNestedInfo;
+ }
+
+ pPrevNestedInfo = pPrev;
+ }
+
+ // either clear the one we're about to copy over or the topmost one
+ m_StackTraceInfo.FreeStackTrace();
+
+ if (pPrevNestedInfo)
+ {
+ // found nested handler info that is above the esp restore point so succesfully caught nested
+ STRESS_LOG2(LF_EH, LL_INFO100, "UnwindExInfo: resetting nested ExInfo to 0x%p stackaddress:0x%p\n", pPrevNestedInfo, pPrevNestedInfo->m_StackAddress);
+
+ // Remember if this ExInfo is heap allocated or not.
+ BOOL isHeapAllocated = pPrevNestedInfo->IsHeapAllocated();
+
+ // Copy pPrevNestedInfo to 'this', clearing pPrevNestedInfo in the process.
+ CopyAndClearSource(pPrevNestedInfo);
+
+ if (isHeapAllocated)
+ {
+ delete pPrevNestedInfo; // Now delete the old record if we needed to.
+ }
+ }
+ else
+ {
+ STRESS_LOG0(LF_EH, LL_INFO100, "UnwindExInfo: clearing topmost ExInfo\n");
+
+ // We just do a basic Init of the current top ExInfo here.
+ Init();
+
+ // Init the Watson buckets as well
+ GetWatsonBucketTracker()->ClearWatsonBucketDetails();
+ }
+}
+#endif // DACCESS_COMPILE
+
+
+#ifdef DACCESS_COMPILE
+
+void
+ExInfo::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ // ExInfo is embedded so don't enum 'this'.
+ OBJECTHANDLE_EnumMemoryRegions(m_hThrowable);
+
+ m_pExceptionRecord.EnumMem();
+ m_pContext.EnumMem();
+}
+
+#endif // #ifdef DACCESS_COMPILE
+
+
+void ExInfo::SetExceptionCode(const EXCEPTION_RECORD *pCER)
+{
+#ifndef DACCESS_COMPILE
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ _ASSERTE(pCER != NULL);
+ m_ExceptionCode = pCER->ExceptionCode;
+
+ if (IsInstanceTaggedSEHCode(pCER->ExceptionCode) && ::WasThrownByUs(pCER, pCER->ExceptionCode))
+ {
+ m_ExceptionFlags.SetWasThrownByUs();
+ }
+ else
+ {
+ m_ExceptionFlags.ResetWasThrownByUs();
+ }
+#else // DACCESS_COMPILE
+ // This method is invoked by the X86 version of CLR's exception handler for
+ // managed code. There is no reason why DAC would be invoking this.
+ DacError(E_UNEXPECTED);
+#endif // !DACCESS_COMPILE
+}
diff --git a/src/vm/exinfo.h b/src/vm/exinfo.h
new file mode 100644
index 0000000000..73f8c1205c
--- /dev/null
+++ b/src/vm/exinfo.h
@@ -0,0 +1,184 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#ifndef __ExInfo_h__
+#define __ExInfo_h__
+#if !defined(WIN64EXCEPTIONS)
+
+#include "exstatecommon.h"
+
+typedef DPTR(class ExInfo) PTR_ExInfo;
+class ExInfo
+{
+ friend class ThreadExceptionState;
+ friend class ClrDataExceptionState;
+
+public:
+
+ BOOL IsHeapAllocated()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_StackAddress != (void *) this;
+ }
+
+ void CopyAndClearSource(ExInfo *from);
+
+ void UnwindExInfo(VOID* limit);
+
+ // Q: Why does this thing take an EXCEPTION_RECORD rather than an ExceptionCode?
+ // A: Because m_ExceptionCode and Ex_WasThrownByUs have to be kept
+ // in sync and this function needs the exception parms inside the record to figure
+ // out the "IsTagged" part.
+ void SetExceptionCode(const EXCEPTION_RECORD *pCER);
+
+ DWORD GetExceptionCode()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_ExceptionCode;
+ }
+
+public: // @TODO: make more of these private!
+ // Note: the debugger assumes that m_pThrowable is a strong
+ // reference so it can check it for NULL with preemptive GC
+ // enabled.
+ OBJECTHANDLE m_hThrowable; // thrown exception
+ PTR_Frame m_pSearchBoundary; // topmost frame for current managed frame group
+private:
+ DWORD m_ExceptionCode; // After a catch of a COM+ exception, pointers/context are trashed.
+public:
+ PTR_EXCEPTION_REGISTRATION_RECORD m_pBottomMostHandler; // most recent EH record registered
+
+ // Reference to the topmost handler we saw during an SO that goes past us
+ PTR_EXCEPTION_REGISTRATION_RECORD m_pTopMostHandlerDuringSO;
+
+ LPVOID m_dEsp; // Esp when fault occured, OR esp to restore on endcatch
+
+ StackTraceInfo m_StackTraceInfo;
+
+ PTR_ExInfo m_pPrevNestedInfo; // pointer to nested info if are handling nested exception
+
+ size_t* m_pShadowSP; // Zero this after endcatch
+
+ PTR_EXCEPTION_RECORD m_pExceptionRecord;
+ PTR_EXCEPTION_POINTERS m_pExceptionPointers;
+ PTR_CONTEXT m_pContext;
+
+ // We have a rare case where (re-entry to the EE from an unmanaged filter) where we
+ // need to create a new ExInfo ... but don't have a nested handler for it. The handlers
+ // use stack addresses to figure out their correct lifetimes. This stack location is
+ // used for that. For most records, it will be the stack address of the ExInfo ... but
+ // for some records, it will be a pseudo stack location -- the place where we think
+ // the record should have been (except for the re-entry case).
+ //
+ //
+ //
+ void* m_StackAddress; // A pseudo or real stack location for this record.
+
+private:
+ EHWatsonBucketTracker m_WatsonBucketTracker;
+public:
+ inline PTR_EHWatsonBucketTracker GetWatsonBucketTracker()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return PTR_EHWatsonBucketTracker(PTR_HOST_MEMBER_TADDR(ExInfo, this, m_WatsonBucketTracker));
+ }
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+private:
+ CorruptionSeverity m_CorruptionSeverity;
+public:
+ inline CorruptionSeverity GetCorruptionSeverity()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (CorruptionSeverity)GET_CORRUPTION_SEVERITY(m_CorruptionSeverity);
+ }
+
+ inline void SetCorruptionSeverity(CorruptionSeverity severityToSet)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_CorruptionSeverity = severityToSet;
+ }
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+#ifdef FEATURE_EXCEPTION_NOTIFICATIONS
+private:
+ BOOL m_fDeliveredFirstChanceNotification;
+public:
+ inline BOOL DeliveredFirstChanceNotification()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_fDeliveredFirstChanceNotification;
+ }
+
+ inline void SetFirstChanceNotificationStatus(BOOL fDelivered)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_fDeliveredFirstChanceNotification = fDelivered;
+ }
+#endif // FEATURE_EXCEPTION_NOTIFICATIONS
+
+ // Returns the exception tracker previous to the current
+ inline PTR_ExInfo GetPreviousExceptionTracker()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pPrevNestedInfo;
+ }
+
+ // Returns the throwble associated with the tracker
+ inline OBJECTREF GetThrowable()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_hThrowable != NULL)?ObjectFromHandle(m_hThrowable):NULL;
+ }
+
+ // Returns the throwble associated with the tracker as handle
+ inline OBJECTHANDLE GetThrowableAsHandle()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_hThrowable;
+ }
+
+public:
+
+ DebuggerExState m_DebuggerExState;
+ EHClauseInfo m_EHClauseInfo;
+ ExceptionFlags m_ExceptionFlags;
+
+#if defined(_TARGET_X86_) && defined(DEBUGGING_SUPPORTED)
+ EHContext m_InterceptionContext;
+ BOOL m_ValidInterceptionContext;
+#endif
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+ void Init();
+ ExInfo() DAC_EMPTY();
+
+ void DestroyExceptionHandle();
+
+private:
+ // Don't allow this
+ ExInfo& operator=(const ExInfo &from);
+};
+
+#if defined(_TARGET_X86_)
+PTR_ExInfo GetEHTrackerForPreallocatedException(OBJECTREF oPreAllocThrowable, PTR_ExInfo pStartingEHTracker);
+#endif // _TARGET_X86_
+
+#endif // !WIN64EXCEPTIONS
+#endif // __ExInfo_h__
diff --git a/src/vm/exstate.cpp b/src/vm/exstate.cpp
new file mode 100644
index 0000000000..72729a1a8d
--- /dev/null
+++ b/src/vm/exstate.cpp
@@ -0,0 +1,648 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#include "common.h"
+#include "exstate.h"
+#include "exinfo.h"
+
+#ifdef _DEBUG
+#include "comutilnative.h" // for assertions only
+#endif
+
+OBJECTHANDLE ThreadExceptionState::GetThrowableAsHandle()
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef WIN64EXCEPTIONS
+ if (m_pCurrentTracker)
+ {
+ return m_pCurrentTracker->m_hThrowable;
+ }
+
+ return NULL;
+#else // WIN64EXCEPTIONS
+ return m_currentExInfo.m_hThrowable;
+#endif // WIN64EXCEPTIONS
+}
+
+
+ThreadExceptionState::ThreadExceptionState()
+{
+#ifdef WIN64EXCEPTIONS
+ m_pCurrentTracker = NULL;
+#endif // WIN64EXCEPTIONS
+
+ m_flag = TEF_None;
+
+#ifndef FEATURE_PAL
+ // Init the UE Watson BucketTracker
+ m_UEWatsonBucketTracker.Init();
+#endif // !FEATURE_PAL
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // Initialize the default exception severity to NotCorrupting
+ m_LastActiveExceptionCorruptionSeverity = NotSet;
+ m_fCanReflectionTargetHandleException = FALSE;
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+}
+
+ThreadExceptionState::~ThreadExceptionState()
+{
+#ifndef FEATURE_PAL
+ // Init the UE Watson BucketTracker
+ m_UEWatsonBucketTracker.ClearWatsonBucketDetails();
+#endif // !FEATURE_PAL
+}
+
+#if defined(_DEBUG)
+void ThreadExceptionState::AssertStackTraceInfo(StackTraceInfo *pSTI)
+{
+ LIMITED_METHOD_CONTRACT;
+#if defined(WIN64EXCEPTIONS)
+
+ _ASSERTE(pSTI == &(m_pCurrentTracker->m_StackTraceInfo) || pSTI == &(m_OOMTracker.m_StackTraceInfo));
+
+#else // win64exceptions
+
+ _ASSERTE(pSTI == &(m_currentExInfo.m_StackTraceInfo));
+
+#endif // win64exceptions
+} // void ThreadExceptionState::AssertStackTraceInfo()
+#endif // _debug
+
+#ifndef DACCESS_COMPILE
+
+Thread* ThreadExceptionState::GetMyThread()
+{
+ return (Thread*)(((BYTE*)this) - offsetof(Thread, m_ExceptionState));
+}
+
+
+void ThreadExceptionState::FreeAllStackTraces()
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef WIN64EXCEPTIONS
+ ExceptionTracker* pNode = m_pCurrentTracker;
+#else // WIN64EXCEPTIONS
+ ExInfo* pNode = &m_currentExInfo;
+#endif // WIN64EXCEPTIONS
+
+ for ( ;
+ pNode != NULL;
+ pNode = pNode->m_pPrevNestedInfo)
+ {
+ pNode->m_StackTraceInfo.FreeStackTrace();
+ }
+}
+
+void ThreadExceptionState::ClearThrowablesForUnload(HandleTableBucket* pHndTblBucket)
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef WIN64EXCEPTIONS
+ ExceptionTracker* pNode = m_pCurrentTracker;
+#else // WIN64EXCEPTIONS
+ ExInfo* pNode = &m_currentExInfo;
+#endif // WIN64EXCEPTIONS
+
+ for ( ;
+ pNode != NULL;
+ pNode = pNode->m_pPrevNestedInfo)
+ {
+ if (pHndTblBucket->Contains(pNode->m_hThrowable))
+ {
+ pNode->DestroyExceptionHandle();
+ }
+ }
+}
+
+
+// After unwinding from an SO, there may be stale exception state.
+void ThreadExceptionState::ClearExceptionStateAfterSO(void* pStackFrameSP)
+{
+ WRAPPER_NO_CONTRACT;
+
+ #if defined(WIN64EXCEPTIONS)
+ ExceptionTracker::PopTrackers(pStackFrameSP);
+ #else
+ // After unwinding from an SO, there may be stale exception state. We need to
+ // get rid of any state that assumes the handlers that have been unwound/unlinked.
+ //
+ // Because the ExState chains to entries that may be on the stack, and the
+ // stack has been unwound, it may not be safe to reference any entries
+ // other than the one of the Thread object.
+ //
+ // Consequently, we will simply Init() the ExInfo on the Thread object.
+ m_currentExInfo.Init();
+ #endif
+} // void ThreadExceptionState::ClearExceptionStateAfterSO()
+
+OBJECTREF ThreadExceptionState::GetThrowable()
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifdef WIN64EXCEPTIONS
+ if (m_pCurrentTracker && m_pCurrentTracker->m_hThrowable)
+ {
+ return ObjectFromHandle(m_pCurrentTracker->m_hThrowable);
+ }
+#else // WIN64EXCEPTIONS
+ if (m_currentExInfo.m_hThrowable)
+ {
+ return ObjectFromHandle(m_currentExInfo.m_hThrowable);
+ }
+#endif // WIN64EXCEPTIONS
+
+ return NULL;
+}
+
+void ThreadExceptionState::SetThrowable(OBJECTREF throwable DEBUG_ARG(SetThrowableErrorChecking stecFlags))
+{
+ CONTRACTL
+ {
+ if ((throwable == NULL) || CLRException::IsPreallocatedExceptionObject(throwable)) NOTHROW; else THROWS; // From CreateHandle
+ GC_NOTRIGGER;
+ if (throwable == NULL) MODE_ANY; else MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifdef WIN64EXCEPTIONS
+ if (m_pCurrentTracker)
+ {
+ m_pCurrentTracker->DestroyExceptionHandle();
+ }
+#else // WIN64EXCEPTIONS
+ m_currentExInfo.DestroyExceptionHandle();
+#endif // WIN64EXCEPTIONS
+
+ if (throwable != NULL)
+ {
+ // Non-compliant exceptions are always wrapped.
+ // The use of the ExceptionNative:: helper here (rather than the global ::IsException helper)
+ // is hokey, but we need a GC_NOTRIGGER version and it's only for an ASSERT.
+ _ASSERTE(IsException(throwable->GetMethodTable()));
+
+ OBJECTHANDLE hNewThrowable;
+
+ // If we're tracking one of the preallocated exception objects, then just use the global handle that
+ // matches it rather than creating a new one.
+ if (CLRException::IsPreallocatedExceptionObject(throwable))
+ {
+ hNewThrowable = CLRException::GetPreallocatedHandleForObject(throwable);
+ }
+ else
+ {
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+ {
+ AppDomain* pDomain = GetMyThread()->GetDomain();
+ PREFIX_ASSUME(pDomain != NULL);
+ hNewThrowable = pDomain->CreateHandle(throwable);
+ }
+ END_SO_INTOLERANT_CODE;
+ }
+
+#ifdef WIN64EXCEPTIONS
+#ifdef _DEBUG
+ //
+ // Fatal stack overflow policy ends up short-circuiting the normal exception handling
+ // flow such that there could be no Tracker for this SO that is in flight. In this
+ // situation there is no place to store the throwable in the exception state, and instead
+ // it is presumed that the handle to the SO exception is elsewhere. (Current knowledge
+ // as of 7/15/05 is that it is stored in Thread::m_LastThrownObjectHandle;
+ //
+ if (stecFlags != STEC_CurrentTrackerEqualNullOkHackForFatalStackOverflow
+#ifdef FEATURE_INTERPRETER
+ && stecFlags != STEC_CurrentTrackerEqualNullOkForInterpreter
+#endif // FEATURE_INTERPRETER
+ )
+ {
+ CONSISTENCY_CHECK(CheckPointer(m_pCurrentTracker));
+ }
+#endif
+
+ if (m_pCurrentTracker != NULL)
+ {
+ m_pCurrentTracker->m_hThrowable = hNewThrowable;
+ }
+#else // WIN64EXCEPTIONS
+ m_currentExInfo.m_hThrowable = hNewThrowable;
+#endif // WIN64EXCEPTIONS
+ }
+}
+
+DWORD ThreadExceptionState::GetExceptionCode()
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef WIN64EXCEPTIONS
+ _ASSERTE(m_pCurrentTracker);
+ return m_pCurrentTracker->m_ExceptionCode;
+#else // WIN64EXCEPTIONS
+ return m_currentExInfo.m_ExceptionCode;
+#endif // WIN64EXCEPTIONS
+}
+
+BOOL ThreadExceptionState::IsComPlusException()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ if (GetExceptionCode() != EXCEPTION_COMPLUS)
+ {
+ return FALSE;
+ }
+
+ _ASSERTE(IsInstanceTaggedSEHCode(GetExceptionCode()));
+
+
+
+ return GetFlags()->WasThrownByUs();
+}
+
+
+#endif // !DACCESS_COMPILE
+
+BOOL ThreadExceptionState::IsExceptionInProgress()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+#ifdef WIN64EXCEPTIONS
+ return (m_pCurrentTracker != NULL);
+#else // WIN64EXCEPTIONS
+ return (m_currentExInfo.m_pBottomMostHandler != NULL);
+#endif // WIN64EXCEPTIONS
+}
+
+#if !defined(DACCESS_COMPILE)
+
+void ThreadExceptionState::GetLeafFrameInfo(StackTraceElement* pStackTraceElement)
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef WIN64EXCEPTIONS
+ m_pCurrentTracker->m_StackTraceInfo.GetLeafFrameInfo(pStackTraceElement);
+#else
+ m_currentExInfo.m_StackTraceInfo.GetLeafFrameInfo(pStackTraceElement);
+#endif
+}
+
+EXCEPTION_POINTERS* ThreadExceptionState::GetExceptionPointers()
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef WIN64EXCEPTIONS
+ if (m_pCurrentTracker)
+ {
+ return (EXCEPTION_POINTERS*)&(m_pCurrentTracker->m_ptrs);
+ }
+ else
+ {
+ return NULL;
+ }
+#else // WIN64EXCEPTIONS
+ return m_currentExInfo.m_pExceptionPointers;
+#endif // WIN64EXCEPTIONS
+}
+
+//-----------------------------------------------------------------------------
+// SetExceptionPointers -- accessor to set pointer to EXCEPTION_POINTERS
+// member.
+//
+// only x86
+//
+#if !defined(WIN64EXCEPTIONS)
+void ThreadExceptionState::SetExceptionPointers(
+ EXCEPTION_POINTERS *pExceptionPointers) // Value to set
+{
+ m_currentExInfo.m_pExceptionPointers = pExceptionPointers;
+} // void ThreadExceptionState::SetExceptionPointers()
+#endif
+
+#endif // !DACCESS_COMPILE
+
+PTR_EXCEPTION_RECORD ThreadExceptionState::GetExceptionRecord()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+#ifdef WIN64EXCEPTIONS
+ if (m_pCurrentTracker)
+ {
+ return m_pCurrentTracker->m_ptrs.ExceptionRecord;
+ }
+ else
+ {
+ return NULL;
+ }
+#else // WIN64EXCEPTIONS
+ return m_currentExInfo.m_pExceptionRecord;
+#endif // WIN64EXCEPTIONS
+}
+
+PTR_CONTEXT ThreadExceptionState::GetContextRecord()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+#ifdef WIN64EXCEPTIONS
+ if (m_pCurrentTracker)
+ {
+ return m_pCurrentTracker->m_ptrs.ContextRecord;
+ }
+ else
+ {
+ return NULL;
+ }
+#else // WIN64EXCEPTIONS
+ return m_currentExInfo.m_pContext;
+#endif // WIN64EXCEPTIONS
+}
+
+ExceptionFlags* ThreadExceptionState::GetFlags()
+{
+#ifdef WIN64EXCEPTIONS
+
+ if (m_pCurrentTracker)
+ {
+ return &(m_pCurrentTracker->m_ExceptionFlags);
+ }
+ else
+ {
+ _ASSERTE(!"GetFlags() called when there is no current exception");
+ return NULL;
+ }
+
+#else // WIN64EXCEPTIONS
+
+ return &(m_currentExInfo.m_ExceptionFlags);
+
+#endif // WIN64EXCEPTIONS
+}
+
+#if !defined(DACCESS_COMPILE)
+
+#ifdef DEBUGGING_SUPPORTED
+DebuggerExState* ThreadExceptionState::GetDebuggerState()
+{
+#ifdef WIN64EXCEPTIONS
+ if (m_pCurrentTracker)
+ {
+ return &(m_pCurrentTracker->m_DebuggerExState);
+ }
+ else
+ {
+ _ASSERTE(!"unexpected use of GetDebuggerState() when no exception in flight");
+#if defined(_MSC_VER)
+ #pragma warning(disable : 4640)
+#endif
+ static DebuggerExState m_emptyDebuggerExState;
+
+#if defined(_MSC_VER)
+ #pragma warning(default : 4640)
+#endif
+ return &m_emptyDebuggerExState;
+ }
+#else // WIN64EXCEPTIONS
+ return &(m_currentExInfo.m_DebuggerExState);
+#endif // WIN64EXCEPTIONS
+}
+
+BOOL ThreadExceptionState::IsDebuggerInterceptable()
+{
+ LIMITED_METHOD_CONTRACT;
+ DWORD ExceptionCode = GetExceptionCode();
+ return (BOOL)((ExceptionCode != STATUS_STACK_OVERFLOW) &&
+ (ExceptionCode != EXCEPTION_BREAKPOINT) &&
+ (ExceptionCode != EXCEPTION_SINGLE_STEP) &&
+ !GetFlags()->UnwindHasStarted() &&
+ !GetFlags()->DebuggerInterceptNotPossible());
+}
+
+#ifdef _TARGET_X86_
+PEXCEPTION_REGISTRATION_RECORD GetClrSEHRecordServicingStackPointer(Thread *pThread, void *pStackPointer);
+#endif // _TARGET_X86_
+
+//---------------------------------------------------------------------------------------
+//
+// This function is called by the debugger to store information necessary to intercept the current exception.
+// This information is consumed by the EH subsystem to start the unwind and resume execution without
+// finding and executing a catch clause.
+//
+// Arguments:
+// pJitManager - the JIT manager for the method where we are going to intercept the exception
+// pThread - the thread on which the interception is taking place
+// methodToken - the MethodDef token of the interception method
+// pFunc - the MethodDesc of the interception method
+// natOffset - the native offset at which we are going to resume execution
+// sfDebuggerInterceptFramePointer
+// - the frame pointer of the interception method frame
+// pFlags - flags on the current exception (ExInfo on x86 and ExceptionTracker on WIN64);
+// to be set by this function to indicate that an interception is going on
+//
+// Return Value:
+// whether the operation is successful
+//
+
+BOOL DebuggerExState::SetDebuggerInterceptInfo(IJitManager *pJitManager,
+ Thread *pThread,
+ const METHODTOKEN& methodToken,
+ MethodDesc *pFunc,
+ ULONG_PTR natOffset,
+ StackFrame sfDebuggerInterceptFramePointer,
+ ExceptionFlags* pFlags)
+{
+ WRAPPER_NO_CONTRACT;
+
+ //
+ // Verify parameters are non-NULL
+ //
+ if ((pJitManager == NULL) ||
+ (pThread == NULL) ||
+ (methodToken.IsNull()) ||
+ (pFunc == NULL) ||
+ (natOffset == (TADDR)0) ||
+ (sfDebuggerInterceptFramePointer.IsNull()))
+ {
+ return FALSE;
+ }
+
+ //
+ // You can only call this function on the currently active exception.
+ //
+ if (this != pThread->GetExceptionState()->GetDebuggerState())
+ {
+ return FALSE;
+ }
+
+ //
+ // Check that the stack pointer is less than as far as we have searched so far.
+ //
+ if (sfDebuggerInterceptFramePointer > m_sfDebuggerIndicatedFramePointer)
+ {
+ return FALSE;
+ }
+
+ int nestingLevel = 0;
+
+#if defined(_TARGET_X86_)
+ //
+ // Get the SEH frame that covers this location on the stack. Note: we pass a skip count of 1. We know that when
+ // this is called, there is a nested exception handler on pThread's stack that is only there during exception
+ // processing, and it won't be there when we go to do the interception. Therefore, we skip that nested record,
+ // and pick the next valid record above it.
+ //
+ m_pDebuggerInterceptFrame = GetClrSEHRecordServicingStackPointer(pThread, (LPVOID)sfDebuggerInterceptFramePointer.SP);
+ if (m_pDebuggerInterceptFrame == EXCEPTION_CHAIN_END)
+ {
+ return FALSE;
+ }
+
+ //
+ // Now we need to search and find the function information for this entry on the stack.
+ //
+ nestingLevel = ComputeEnclosingHandlerNestingLevel(pJitManager,
+ methodToken,
+ natOffset);
+#elif !defined(WIN64EXCEPTIONS)
+ // !_TARGET_X86_ && !WIN64EXCEPTIONS
+ PORTABILITY_ASSERT("SetDebuggerInterceptInfo() (ExState.cpp) - continuable exceptions NYI\n");
+ return FALSE;
+#endif // !_TARGET_X86_
+
+ //
+ // These values will override the normal information used by the EH subsystem to handle the exception.
+ // They are retrieved by GetDebuggerInterceptInfo().
+ //
+ m_pDebuggerInterceptFunc = pFunc;
+ m_dDebuggerInterceptHandlerDepth = nestingLevel;
+ m_sfDebuggerInterceptFramePointer = sfDebuggerInterceptFramePointer;
+ m_pDebuggerInterceptNativeOffset = natOffset;
+
+ // set a flag on the exception tracking struct to indicate that an interception is in progress
+ pFlags->SetDebuggerInterceptInfo();
+ return TRUE;
+}
+#endif // DEBUGGING_SUPPORTED
+
+EHClauseInfo* ThreadExceptionState::GetCurrentEHClauseInfo()
+{
+#ifdef WIN64EXCEPTIONS
+ if (m_pCurrentTracker)
+ {
+ return &(m_pCurrentTracker->m_EHClauseInfo);
+ }
+ else
+ {
+ _ASSERTE(!"unexpected use of GetCurrentEHClauseInfo() when no exception in flight");
+#if defined(_MSC_VER)
+ #pragma warning(disable : 4640)
+#endif // defined(_MSC_VER)
+
+ static EHClauseInfo m_emptyEHClauseInfo;
+
+#if defined(_MSC_VER)
+ #pragma warning(default : 4640)
+#endif // defined(_MSC_VER)
+
+ return &m_emptyEHClauseInfo;
+ }
+#else // WIN64EXCEPTIONS
+ return &(m_currentExInfo.m_EHClauseInfo);
+#endif // WIN64EXCEPTIONS
+}
+
+#endif // DACCESS_COMPILE
+
+void ThreadExceptionState::SetThreadExceptionFlag(ThreadExceptionFlag flag)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_flag = (ThreadExceptionFlag)((DWORD)m_flag | flag);
+}
+
+void ThreadExceptionState::ResetThreadExceptionFlag(ThreadExceptionFlag flag)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_flag = (ThreadExceptionFlag)((DWORD)m_flag & ~flag);
+}
+
+BOOL ThreadExceptionState::HasThreadExceptionFlag(ThreadExceptionFlag flag)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return ((DWORD)m_flag & flag);
+}
+
+ThreadExceptionFlagHolder::ThreadExceptionFlagHolder(ThreadExceptionState::ThreadExceptionFlag flag)
+{
+ WRAPPER_NO_CONTRACT;
+
+ Thread* pThread = GetThread();
+ _ASSERTE(pThread);
+
+ m_pExState = pThread->GetExceptionState();
+
+ m_flag = flag;
+ m_pExState->SetThreadExceptionFlag(m_flag);
+}
+
+ThreadExceptionFlagHolder::~ThreadExceptionFlagHolder()
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(m_pExState);
+ m_pExState->ResetThreadExceptionFlag(m_flag);
+}
+
+#ifdef DACCESS_COMPILE
+
+void
+ThreadExceptionState::EnumChainMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+#ifdef WIN64EXCEPTIONS
+ ExceptionTracker* head = m_pCurrentTracker;
+
+ if (head == NULL)
+ {
+ return;
+ }
+
+#else // WIN64EXCEPTIONS
+ ExInfo* head = &m_currentExInfo;
+#endif // WIN64EXCEPTIONS
+
+ for (;;)
+ {
+ head->EnumMemoryRegions(flags);
+
+ if (!head->m_pPrevNestedInfo.IsValid())
+ {
+ break;
+ }
+
+ head->m_pPrevNestedInfo.EnumMem();
+ head = head->m_pPrevNestedInfo;
+ }
+}
+
+
+#endif // DACCESS_COMPILE
+
+
+
diff --git a/src/vm/exstate.h b/src/vm/exstate.h
new file mode 100644
index 0000000000..daf8029b75
--- /dev/null
+++ b/src/vm/exstate.h
@@ -0,0 +1,374 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+#ifndef __ExState_h__
+#define __ExState_h__
+
+class ExceptionFlags;
+class DebuggerExState;
+class EHClauseInfo;
+
+#include "exceptionhandling.h"
+
+#if !defined(WIN64EXCEPTIONS)
+// ExInfo contains definitions for 32bit
+#include "exinfo.h"
+#endif // !defined(WIN64EXCEPTIONS)
+
+#if !defined(DACCESS_COMPILE)
+#define PRESERVE_WATSON_ACROSS_CONTEXTS 1
+#endif
+
+extern LONG WINAPI CLRVectoredExceptionHandlerShim(PEXCEPTION_POINTERS pExceptionInfo);
+extern StackWalkAction COMPlusUnwindCallback(CrawlFrame *pCf, ThrowCallbackType *pData);
+
+//
+// This class serves as a forwarding and abstraction layer for the EH subsystem.
+// Since we have two different implementations, this class is needed to unify
+// the EE's view of EH. Ideally, this is just a step along the way to a unified
+// EH subsystem.
+//
+typedef DPTR(class ThreadExceptionState) PTR_ThreadExceptionState;
+class ThreadExceptionState
+{
+ friend class ClrDataExceptionState;
+ friend class CheckAsmOffsets;
+ friend class StackFrameIterator;
+
+#ifdef DACCESS_COMPILE
+ friend class ClrDataAccess;
+#endif // DACCESS_COMPILE
+
+ // ProfToEEInterfaceImpl::GetNotifiedExceptionClauseInfo needs access so that it can fetch the
+ // ExceptionTracker or the ExInfo as appropriate for the platform
+ friend class ProfToEEInterfaceImpl;
+
+#ifdef WIN64EXCEPTIONS
+ friend class ExceptionTracker;
+#else
+ friend class ExInfo;
+#endif // WIN64EXCEPTIONS
+
+public:
+
+ void FreeAllStackTraces();
+ void ClearThrowablesForUnload(HandleTableBucket* pHndTblBucket);
+
+#ifdef _DEBUG
+ typedef enum
+ {
+ STEC_All,
+ STEC_CurrentTrackerEqualNullOkHackForFatalStackOverflow,
+#ifdef FEATURE_INTERPRETER
+ STEC_CurrentTrackerEqualNullOkForInterpreter,
+#endif // FEATURE_INTERPRETER
+ } SetThrowableErrorChecking;
+#endif
+
+ void SetThrowable(OBJECTREF throwable DEBUG_ARG(SetThrowableErrorChecking stecFlags = STEC_All));
+ OBJECTREF GetThrowable();
+ OBJECTHANDLE GetThrowableAsHandle();
+ DWORD GetExceptionCode();
+ BOOL IsComPlusException();
+ EXCEPTION_POINTERS* GetExceptionPointers();
+ PTR_EXCEPTION_RECORD GetExceptionRecord();
+ PTR_CONTEXT GetContextRecord();
+ BOOL IsExceptionInProgress();
+ void GetLeafFrameInfo(StackTraceElement* pStackTrace);
+
+ ExceptionFlags* GetFlags();
+
+ ThreadExceptionState();
+ ~ThreadExceptionState();
+
+#if !defined(WIN64EXCEPTIONS)
+ void SetExceptionPointers(EXCEPTION_POINTERS *pExceptionPointers);
+#endif
+
+
+#ifdef DEBUGGING_SUPPORTED
+ // DebuggerExState stores information necessary for intercepting an exception
+ DebuggerExState* GetDebuggerState();
+
+ // check to see if the current exception is interceptable
+ BOOL IsDebuggerInterceptable();
+#endif // DEBUGGING_SUPPORTED
+
+ EHClauseInfo* GetCurrentEHClauseInfo();
+
+#ifdef DACCESS_COMPILE
+ void EnumChainMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif // DACCESS_COMPILE
+
+ // After unwinding from an SO, there may be stale exception state.
+ void ClearExceptionStateAfterSO(void* pStackFrameSP);
+
+ enum ThreadExceptionFlag
+ {
+ TEF_None = 0x00000000,
+
+ // Right now this flag is only used on WIN64. We set this flag near the end of the second pass when we pop
+ // the ExceptionTracker for the current exception but before we actually resume execution. It is unsafe
+ // to start a funclet-skipping stackwalk in this time window.
+ TEF_InconsistentExceptionState = 0x00000001,
+
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ TEF_ForeignExceptionRaise = 0x00000002,
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ };
+
+ void SetThreadExceptionFlag(ThreadExceptionFlag flag);
+ void ResetThreadExceptionFlag(ThreadExceptionFlag flag);
+ BOOL HasThreadExceptionFlag(ThreadExceptionFlag flag);
+
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ inline void SetRaisingForeignException()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetThreadExceptionFlag(TEF_ForeignExceptionRaise);
+ }
+
+ inline BOOL IsRaisingForeignException()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return HasThreadExceptionFlag(TEF_ForeignExceptionRaise);
+ }
+
+ inline void ResetRaisingForeignException()
+ {
+ LIMITED_METHOD_CONTRACT;
+ ResetThreadExceptionFlag(TEF_ForeignExceptionRaise);
+ }
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+
+#if defined(_DEBUG)
+ void AssertStackTraceInfo(StackTraceInfo *pSTI);
+#endif // _debug
+
+private:
+ Thread* GetMyThread();
+
+#ifdef WIN64EXCEPTIONS
+ PTR_ExceptionTracker m_pCurrentTracker;
+ ExceptionTracker m_OOMTracker;
+public:
+ PTR_ExceptionTracker GetCurrentExceptionTracker()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pCurrentTracker;
+ }
+#else
+ ExInfo m_currentExInfo;
+public:
+ PTR_ExInfo GetCurrentExceptionTracker()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return PTR_ExInfo(PTR_HOST_MEMBER_TADDR(ThreadExceptionState, this, m_currentExInfo));
+ }
+#endif
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+private:
+ CorruptionSeverity m_LastActiveExceptionCorruptionSeverity;
+ BOOL m_fCanReflectionTargetHandleException;
+
+public:
+ // Returns the corruption severity of the last active exception
+ inline CorruptionSeverity GetLastActiveExceptionCorruptionSeverity()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (CorruptionSeverity)GET_CORRUPTION_SEVERITY(m_LastActiveExceptionCorruptionSeverity);
+ }
+
+ // Set the corruption severity of the last active exception
+ inline void SetLastActiveExceptionCorruptionSeverity(CorruptionSeverity severityToSet)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_LastActiveExceptionCorruptionSeverity = severityToSet;
+ }
+
+ // Returns a bool indicating if the last active exception's corruption severity should
+ // be used when exception is reraised (e.g. Reflection Invocation, AD transition, etc)
+ inline BOOL ShouldLastActiveExceptionCorruptionSeverityBeReused()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return CAN_REUSE_CORRUPTION_SEVERITY(m_LastActiveExceptionCorruptionSeverity);
+ }
+
+ // Returns a BOOL to indicate if reflection target can handle CSE or not.
+ // This is used in DispatchInfo::CanIDispatchTargetHandleException.
+ inline BOOL CanReflectionTargetHandleException()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_fCanReflectionTargetHandleException;
+ }
+
+ // Sets a BOOL indicate if the Reflection invocation target can handle exception or not.
+ // Used in ReflectionInvocation.cpp.
+ inline void SetCanReflectionTargetHandleException(BOOL fCanReflectionTargetHandleException)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_fCanReflectionTargetHandleException = fCanReflectionTargetHandleException;
+ }
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+private:
+ ThreadExceptionFlag m_flag;
+
+#ifndef FEATURE_PAL
+private:
+ EHWatsonBucketTracker m_UEWatsonBucketTracker;
+public:
+ PTR_EHWatsonBucketTracker GetUEWatsonBucketTracker()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return PTR_EHWatsonBucketTracker(PTR_HOST_MEMBER_TADDR(ThreadExceptionState, this, m_UEWatsonBucketTracker));
+ }
+#endif // !FEATURE_PAL
+
+private:
+
+#ifndef WIN64EXCEPTIONS
+
+ //
+ // @NICE: Ideally, these friends shouldn't all be enumerated like this. If they were all part of the same
+ // class, that would be nice. I'm trying to avoid adding x86-specific accessors to this class as well as
+ // trying to limit the visibility of the ExInfo struct since Win64 doesn't use ExInfo.
+ //
+ friend EXCEPTION_DISPOSITION COMPlusAfterUnwind(
+ EXCEPTION_RECORD *pExceptionRecord,
+ EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
+ ThrowCallbackType& tct);
+ friend EXCEPTION_DISPOSITION COMPlusAfterUnwind(
+ EXCEPTION_RECORD *pExceptionRecord,
+ EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
+ ThrowCallbackType& tct,
+ Frame *pStartFrame);
+
+ friend EXCEPTION_HANDLER_IMPL(COMPlusFrameHandler);
+
+ friend EXCEPTION_DISPOSITION __cdecl
+ CPFH_RealFirstPassHandler(EXCEPTION_RECORD *pExceptionRecord,
+ EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
+ CONTEXT *pContext,
+ void *pDispatcherContext,
+ BOOL bAsynchronousThreadStop,
+ BOOL fPGCDisabledOnEntry);
+
+ friend EXCEPTION_DISPOSITION __cdecl
+ CPFH_UnwindHandler(EXCEPTION_RECORD *pExceptionRecord,
+ EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
+ CONTEXT *pContext,
+ void *pDispatcherContext);
+
+ friend void CPFH_UnwindFrames1(Thread* pThread,
+ EXCEPTION_REGISTRATION_RECORD* pEstablisherFrame,
+ DWORD exceptionCode);
+
+#ifdef _TARGET_X86_
+ friend LPVOID COMPlusEndCatchWorker(Thread * pThread);
+#endif
+
+ friend StackWalkAction COMPlusThrowCallback(CrawlFrame *pCf, ThrowCallbackType *pData);
+
+ friend StackWalkAction COMPlusUnwindCallback(CrawlFrame *pCf, ThrowCallbackType *pData);
+
+#if defined(_TARGET_X86_)
+ friend void ResumeAtJitEH(CrawlFrame* pCf, BYTE* startPC, EE_ILEXCEPTION_CLAUSE *EHClausePtr,
+ DWORD nestingLevel, Thread *pThread, BOOL unwindStack);
+#endif // _TARGET_X86_
+
+ friend _EXCEPTION_HANDLER_DECL(COMPlusNestedExceptionHandler);
+
+ friend void COMPlusCooperativeTransitionHandler(Frame* pFrame);
+
+ friend bool ShouldHandleManagedFault(
+ EXCEPTION_RECORD* pExceptionRecord,
+ CONTEXT* pContext,
+ EXCEPTION_REGISTRATION_RECORD* pEstablisherFrame,
+ Thread* pThread);
+
+ friend LONG WINAPI CLRVectoredExceptionHandlerShim(PEXCEPTION_POINTERS pExceptionInfo);
+
+ friend class Thread;
+ // It it the following method that needs to be a friend. But the prototype pulls in a lot more stuff,
+ // so just make the Thread class a friend.
+ // friend StackWalkAction Thread::StackWalkFramesEx(PREGDISPLAY pRD, PSTACKWALKFRAMESCALLBACK pCallback,
+ // VOID *pData, unsigned flags, Frame *pStartFrame);
+
+#endif // WIN64EXCEPTIONS
+
+};
+
+
+// <WARNING>
+// This holder is not thread safe.
+// </WARNING>
+class ThreadExceptionFlagHolder
+{
+public:
+ ThreadExceptionFlagHolder(ThreadExceptionState::ThreadExceptionFlag flag);
+ ~ThreadExceptionFlagHolder();
+
+private:
+ ThreadExceptionState* m_pExState;
+ ThreadExceptionState::ThreadExceptionFlag m_flag;
+};
+
+extern BOOL IsWatsonEnabled();
+
+#ifndef FEATURE_PAL
+// This preprocessor definition is used to capture watson buckets
+// at AppDomain transition boundary in END_DOMAIN_TRANSITION macro.
+//
+// This essentially copies the watson bucket details from the current exception tracker
+// to the UE watson bucket tracker only if it is preallocated exception and NOT
+// thread abort. For preallocated thread abort, UE Watson bucket tracker would already have the
+// bucket details.
+//
+// It also captures buckets for non-preallocated exceptions (including non-preallocated threadabort) since
+// the object would have the IP inside it.
+#define CAPTURE_BUCKETS_AT_TRANSITION(pThread, oThrowable) \
+ if (IsWatsonEnabled()) \
+ { \
+ /* Switch to COOP mode */ \
+ GCX_COOP(); \
+ \
+ /* oThrowable is actually GET_THROWABLE macro; so extract the actual throwable for once and for all */ \
+ OBJECTREF throwable = oThrowable; \
+ if (CLRException::IsPreallocatedExceptionObject(throwable)) \
+ { \
+ if (pThread->GetExceptionState()->GetCurrentExceptionTracker() != NULL) \
+ { \
+ if (!IsThrowableThreadAbortException(throwable)) \
+ { \
+ PTR_EHWatsonBucketTracker pWatsonBucketTracker = GetWatsonBucketTrackerForPreallocatedException(throwable, FALSE); \
+ if (pWatsonBucketTracker != NULL) \
+ { \
+ pThread->GetExceptionState()->GetUEWatsonBucketTracker()->CopyEHWatsonBucketTracker(*(pWatsonBucketTracker)); \
+ pThread->GetExceptionState()->GetUEWatsonBucketTracker()->CaptureUnhandledInfoForWatson(TypeOfReportedError::UnhandledException, pThread, NULL); \
+ DEBUG_STMT(pThread->GetExceptionState()->GetUEWatsonBucketTracker()->SetCapturedAtADTransition();) \
+ } \
+ } \
+ } \
+ } \
+ else \
+ { \
+ SetupWatsonBucketsForNonPreallocatedExceptions(throwable); \
+ } \
+ }
+#else // !FEATURE_PAL
+#define CAPTURE_BUCKETS_AT_TRANSITION(pThread, oThrowable)
+#endif // FEATURE_PAL
+
+#endif // __ExState_h__
diff --git a/src/vm/exstatecommon.h b/src/vm/exstatecommon.h
new file mode 100644
index 0000000000..1a17794015
--- /dev/null
+++ b/src/vm/exstatecommon.h
@@ -0,0 +1,531 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+#ifndef __ExStateCommon_h__
+#define __ExStateCommon_h__
+
+#include "stackframe.h"
+
+class ExceptionFlags;
+
+#ifdef DEBUGGING_SUPPORTED
+//---------------------------------------------------------------------------------------
+//
+// This class stores information necessary to intercept an exception. It's basically a communication channel
+// between the debugger and the EH subsystem. Each internal exception tracking structure
+// (ExInfo on x86 and ExceptionTracker on WIN64) contains one DebuggerExState.
+//
+// Notes:
+// This class actually stores more information on x86 than on WIN64 because the x86 EH subsystem
+// has more work to do when unwinding the stack. WIN64 just asks the OS to do it.
+//
+
+class DebuggerExState
+{
+public:
+
+ //---------------------------------------------------------------------------------------
+ //
+ // constructor
+ //
+
+ DebuggerExState()
+ {
+ Init();
+ }
+
+ //---------------------------------------------------------------------------------------
+ //
+ // This function is simply used to initialize all the fields in the DebuggerExState.
+ //
+
+ void Init()
+ {
+ m_sfDebuggerIndicatedFramePointer = StackFrame();
+ m_pDebuggerInterceptFunc = NULL;
+ m_sfDebuggerInterceptFramePointer = StackFrame();
+ m_pDebuggerContext = NULL;
+ m_pDebuggerInterceptNativeOffset = 0;
+
+ // x86-specific fields
+ #if defined(_TARGET_X86_)
+ m_pDebuggerInterceptFrame = EXCEPTION_CHAIN_END;
+ #endif // defined(_TARGET_X86_)
+ m_dDebuggerInterceptHandlerDepth = 0;
+ }
+
+ //---------------------------------------------------------------------------------------
+ //
+ // Retrieves the opaque token stored by the debugger.
+ //
+ // Return Value:
+ // the stored opaque token for the debugger
+ //
+
+ void* GetDebuggerInterceptContext()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pDebuggerContext;
+ }
+
+ //---------------------------------------------------------------------------------------
+ //
+ // Stores an opaque token which is only used by the debugger.
+ //
+ // Arguments:
+ // pContext - the token to be stored
+ //
+
+ void SetDebuggerInterceptContext(void* pContext)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pDebuggerContext = pContext;
+ }
+
+ //---------------------------------------------------------------------------------------
+ //
+ // Marks the current stack frame visited by the EH subsystem during the first pass.
+ // This marker moves closer to the root of the stack while each stack frame is examined in the first pass.
+ // This continues until the end of the first pass.
+ //
+ // Arguments:
+ // stackPointer - SP of the current stack frame
+ // bStorePointer - BSP of the current stack frame
+ //
+
+ void SetDebuggerIndicatedFramePointer(void* stackPointer)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_sfDebuggerIndicatedFramePointer = StackFrame((UINT_PTR)stackPointer);
+ }
+
+ // This function stores the information necessary to intercept an exception in the DebuggerExState.
+ BOOL SetDebuggerInterceptInfo(IJitManager *pJitManager,
+ Thread *pThread,
+ const METHODTOKEN& methodToken,
+ MethodDesc *pMethDesc,
+ ULONG_PTR natOffset,
+ StackFrame sfDebuggerInterceptFramePointer,
+ ExceptionFlags* pFlags);
+
+ //---------------------------------------------------------------------------------------
+ //
+ // This function is basically just a getter to retrieve the information stored on the DebuggerExState.
+ // Refer to the comments for individual fields for more information.
+ //
+ // Arguments:
+ // pEstablisherFrame - m_pDebuggerInterceptFrame
+ // ppFunc - m_pDebuggerInterceptFunc
+ // pdHandler - m_dDebuggerInterceptHandlerDepth
+ // ppStack - the SP of m_sfDebuggerInterceptFramePointer
+ // ppBStore - the BSP of m_sfDebuggerInterceptFramePointer
+ // pNativeOffset - m_pDebuggerInterceptNativeOffset;
+ // ppFrame - always set to NULL
+ //
+ // Notes:
+ // Everything is an out parameter.
+ //
+ // Apparently ppFrame is actually used on x86 to set tct.pBottomFrame to NULL.
+ //
+
+ void GetDebuggerInterceptInfo(
+ #if defined(_TARGET_X86_)
+ PEXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
+ #endif // _TARGET_X86_
+ MethodDesc **ppFunc,
+ int *pdHandler,
+ BYTE **ppStack,
+ ULONG_PTR *pNativeOffset,
+ Frame **ppFrame)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+#if defined(_TARGET_X86_)
+ if (pEstablisherFrame != NULL)
+ {
+ *pEstablisherFrame = m_pDebuggerInterceptFrame;
+ }
+#endif // _TARGET_X86_
+
+ if (ppFunc != NULL)
+ {
+ *ppFunc = m_pDebuggerInterceptFunc;
+ }
+
+ if (pdHandler != NULL)
+ {
+ *pdHandler = m_dDebuggerInterceptHandlerDepth;
+ }
+
+ if (ppStack != NULL)
+ {
+ *ppStack = (BYTE *)m_sfDebuggerInterceptFramePointer.SP;
+ }
+
+ if (pNativeOffset != NULL)
+ {
+ *pNativeOffset = m_pDebuggerInterceptNativeOffset;
+ }
+
+ if (ppFrame != NULL)
+ {
+ *ppFrame = NULL;
+ }
+ }
+
+private:
+ // This frame pointer marks the latest stack frame examined by the EH subsystem in the first pass.
+ // An exception cannot be intercepted closer to the root than this frame pointer.
+ StackFrame m_sfDebuggerIndicatedFramePointer;
+
+ // the method in which we are going to resume execution
+ MethodDesc* m_pDebuggerInterceptFunc;
+
+ // the frame pointer of the stack frame where we are intercepting the exception
+ StackFrame m_sfDebuggerInterceptFramePointer;
+
+ // opaque token used by the debugger
+ void* m_pDebuggerContext;
+
+ // the native offset at which to resume execution
+ ULONG_PTR m_pDebuggerInterceptNativeOffset;
+
+ // The remaining fields are only used on x86.
+#if defined(_TARGET_X86_)
+ // the exception registration record covering the stack range containing the interception point
+ PEXCEPTION_REGISTRATION_RECORD m_pDebuggerInterceptFrame;
+#endif // defined(_TARGET_X86_)
+
+ // the nesting level at which we want to resume execution
+ int m_dDebuggerInterceptHandlerDepth;
+};
+#endif // DEBUGGING_SUPPORTED
+
+class EHClauseInfo
+{
+public:
+ EHClauseInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // For the profiler, other clause fields are not valid if m_ClauseType is COR_PRF_CLAUSE_NONE.
+ m_ClauseType = COR_PRF_CLAUSE_NONE;
+ m_IPForEHClause = 0;
+ m_sfForEHClause.Clear();
+ m_csfEHClause.Clear();
+ m_fManagedCodeEntered = FALSE;
+ }
+
+ void SetEHClauseType(COR_PRF_CLAUSE_TYPE EHClauseType)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_ClauseType = EHClauseType;
+ }
+
+ void SetInfo(COR_PRF_CLAUSE_TYPE EHClauseType,
+ UINT_PTR uIPForEHClause,
+ StackFrame sfForEHClause)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_ClauseType = EHClauseType;
+ m_IPForEHClause = uIPForEHClause;
+ m_sfForEHClause = sfForEHClause;
+ }
+
+ void ResetInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // For the profiler, other clause fields are not valid if m_ClauseType is COR_PRF_CLAUSE_NONE.
+ m_ClauseType = COR_PRF_CLAUSE_NONE;
+ m_IPForEHClause = 0;
+ m_sfForEHClause.Clear();
+ m_csfEHClause.Clear();
+ }
+
+ void SetManagedCodeEntered(BOOL fEntered)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_fManagedCodeEntered = fEntered;
+ }
+
+ void SetCallerStackFrame(CallerStackFrame csfEHClause)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_csfEHClause = csfEHClause;
+ }
+
+ COR_PRF_CLAUSE_TYPE GetClauseType() { LIMITED_METHOD_CONTRACT; return m_ClauseType; }
+
+ UINT_PTR GetIPForEHClause() { LIMITED_METHOD_CONTRACT; return m_IPForEHClause; }
+ UINT_PTR GetFramePointerForEHClause() { LIMITED_METHOD_CONTRACT; return m_sfForEHClause.SP; }
+
+ BOOL IsManagedCodeEntered() { LIMITED_METHOD_CONTRACT; return m_fManagedCodeEntered; }
+
+ StackFrame GetStackFrameForEHClause() { LIMITED_METHOD_CONTRACT; return m_sfForEHClause; }
+ CallerStackFrame GetCallerStackFrameForEHClause(){ LIMITED_METHOD_CONTRACT; return m_csfEHClause; }
+
+ // On some platforms, we make the call to the funclets via an assembly helper. The reference to the field
+ // containing the stack pointer is passed to the assembly helper so that it can update
+ // it with correct SP value once its prolog has executed.
+ //
+ // This method is used to get the field reference
+ CallerStackFrame* GetCallerStackFrameForEHClauseReference()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return &m_csfEHClause;
+ }
+
+private:
+ UINT_PTR m_IPForEHClause; // the entry point of the current notified exception clause
+ StackFrame m_sfForEHClause; // the assocated frame pointer of the current notified exception clause
+ CallerStackFrame m_csfEHClause; // the caller SP of the funclet; only used on WIN64
+
+ COR_PRF_CLAUSE_TYPE m_ClauseType; // this has a value from COR_PRF_CLAUSE_TYPE while an exception notification is pending
+ BOOL m_fManagedCodeEntered; // this flag indicates that we have called the managed code for the current EH clause
+};
+
+class ExceptionFlags
+{
+public:
+ ExceptionFlags()
+ {
+ Init();
+ }
+
+#if defined(WIN64EXCEPTIONS)
+ ExceptionFlags(bool fReadOnly)
+ {
+ Init();
+#ifdef _DEBUG
+ if (fReadOnly)
+ {
+ m_flags |= Ex_FlagsAreReadOnly;
+ m_debugFlags |= Ex_FlagsAreReadOnly;
+ }
+#endif // _DEBUG
+ }
+#endif // defined(WIN64EXCEPTIONS)
+
+ void AssertIfReadOnly()
+ {
+ SUPPORTS_DAC;
+
+#if defined(WIN64EXCEPTIONS) && defined(_DEBUG)
+ if ((m_flags & Ex_FlagsAreReadOnly) || (m_debugFlags & Ex_FlagsAreReadOnly))
+ {
+ _ASSERTE(!"Tried to update read-only flags!");
+ }
+#endif // defined(WIN64EXCEPTIONS) && defined(_DEBUG)
+ }
+
+ void Init()
+ {
+ m_flags = 0;
+#ifdef _DEBUG
+ m_debugFlags = 0;
+#endif // _DEBUG
+ }
+
+ BOOL IsRethrown() { LIMITED_METHOD_CONTRACT; return m_flags & Ex_IsRethrown; }
+ void SetIsRethrown() { LIMITED_METHOD_CONTRACT; AssertIfReadOnly(); m_flags |= Ex_IsRethrown; }
+ void ResetIsRethrown() { LIMITED_METHOD_CONTRACT; AssertIfReadOnly(); m_flags &= ~Ex_IsRethrown; }
+
+ BOOL UnwindHasStarted() { LIMITED_METHOD_CONTRACT; return m_flags & Ex_UnwindHasStarted; }
+ void SetUnwindHasStarted() { LIMITED_METHOD_CONTRACT; AssertIfReadOnly(); m_flags |= Ex_UnwindHasStarted; }
+ void ResetUnwindHasStarted() { LIMITED_METHOD_CONTRACT; AssertIfReadOnly(); m_flags &= ~Ex_UnwindHasStarted; }
+
+ BOOL UnwindingToFindResumeFrame() { LIMITED_METHOD_CONTRACT; return m_flags & Ex_UnwindingToFindResumeFrame; }
+ void SetUnwindingToFindResumeFrame() { LIMITED_METHOD_CONTRACT; AssertIfReadOnly(); m_flags |= Ex_UnwindingToFindResumeFrame; }
+ void ResetUnwindingToFindResumeFrame() { LIMITED_METHOD_CONTRACT; AssertIfReadOnly(); m_flags &= ~Ex_UnwindingToFindResumeFrame; }
+
+ BOOL UseExInfoForStackwalk() { LIMITED_METHOD_DAC_CONTRACT; return m_flags & Ex_UseExInfoForStackwalk; }
+ void SetUseExInfoForStackwalk() { LIMITED_METHOD_DAC_CONTRACT; AssertIfReadOnly(); m_flags |= Ex_UseExInfoForStackwalk; }
+ void ResetUseExInfoForStackwalk() { LIMITED_METHOD_DAC_CONTRACT; AssertIfReadOnly(); m_flags &= ~Ex_UseExInfoForStackwalk; }
+
+#ifdef _DEBUG
+ BOOL ReversePInvokeEscapingException() { LIMITED_METHOD_DAC_CONTRACT; return m_debugFlags & Ex_RPInvokeEscapingException; }
+ void SetReversePInvokeEscapingException() { LIMITED_METHOD_DAC_CONTRACT; AssertIfReadOnly(); m_debugFlags |= Ex_RPInvokeEscapingException; }
+ void ResetReversePInvokeEscapingException() { LIMITED_METHOD_DAC_CONTRACT; AssertIfReadOnly(); m_debugFlags &= ~Ex_RPInvokeEscapingException; }
+#endif // _DEBUG
+
+#ifdef DEBUGGING_SUPPORTED
+ BOOL SentDebugUserFirstChance() { LIMITED_METHOD_CONTRACT; return m_flags & Ex_SentDebugUserFirstChance; }
+ void SetSentDebugUserFirstChance() { LIMITED_METHOD_CONTRACT; AssertIfReadOnly(); m_flags |= Ex_SentDebugUserFirstChance; }
+
+ BOOL SentDebugFirstChance() { LIMITED_METHOD_CONTRACT; return m_flags & Ex_SentDebugFirstChance; }
+ void SetSentDebugFirstChance() { LIMITED_METHOD_CONTRACT; AssertIfReadOnly(); m_flags |= Ex_SentDebugFirstChance; }
+
+ BOOL SentDebugUnwindBegin() { LIMITED_METHOD_CONTRACT; return m_flags & Ex_SentDebugUnwindBegin; }
+ void SetSentDebugUnwindBegin() { LIMITED_METHOD_CONTRACT; AssertIfReadOnly(); m_flags |= Ex_SentDebugUnwindBegin; }
+
+ BOOL DebugCatchHandlerFound() { LIMITED_METHOD_CONTRACT; return m_flags & Ex_DebugCatchHandlerFound; }
+ void SetDebugCatchHandlerFound() { LIMITED_METHOD_CONTRACT; AssertIfReadOnly(); m_flags |= Ex_DebugCatchHandlerFound; }
+
+ BOOL SentDebugUnhandled() { LIMITED_METHOD_CONTRACT; return m_flags & Ex_SentDebugUnhandled; }
+ void SetSentDebugUnhandled() { LIMITED_METHOD_CONTRACT; AssertIfReadOnly(); m_flags |= Ex_SentDebugUnhandled; }
+
+ BOOL IsUnhandled() { LIMITED_METHOD_CONTRACT; return m_flags & Ex_IsUnhandled; }
+ void SetUnhandled() { LIMITED_METHOD_CONTRACT; AssertIfReadOnly(); m_flags |= Ex_IsUnhandled; }
+
+ BOOL DebuggerInterceptNotPossible() { LIMITED_METHOD_CONTRACT; return m_flags & Ex_DebuggerInterceptNotPossible; }
+ void SetDebuggerInterceptNotPossible() { LIMITED_METHOD_CONTRACT; AssertIfReadOnly(); m_flags |= Ex_DebuggerInterceptNotPossible; }
+
+ BOOL DebuggerInterceptInfo() { LIMITED_METHOD_DAC_CONTRACT; return m_flags & Ex_DebuggerInterceptInfo; }
+ void SetDebuggerInterceptInfo() { LIMITED_METHOD_DAC_CONTRACT; AssertIfReadOnly(); m_flags |= Ex_DebuggerInterceptInfo; }
+#endif
+
+ BOOL ImpersonationTokenSet() { LIMITED_METHOD_CONTRACT; return m_flags & Ex_ImpersonationTokenSet; }
+ void SetImpersonationTokenSet() { LIMITED_METHOD_CONTRACT; AssertIfReadOnly(); m_flags |= Ex_ImpersonationTokenSet; }
+ void ResetImpersonationTokenSet() { LIMITED_METHOD_CONTRACT; AssertIfReadOnly(); m_flags &= ~Ex_ImpersonationTokenSet; }
+
+ BOOL WasThrownByUs() { LIMITED_METHOD_CONTRACT; return m_flags & Ex_WasThrownByUs; }
+ void SetWasThrownByUs() { LIMITED_METHOD_CONTRACT; AssertIfReadOnly(); m_flags |= Ex_WasThrownByUs; }
+ void ResetWasThrownByUs() { LIMITED_METHOD_CONTRACT; AssertIfReadOnly(); m_flags &= ~Ex_WasThrownByUs; }
+
+ BOOL GotWatsonBucketDetails() { LIMITED_METHOD_CONTRACT; return m_flags & Ex_GotWatsonBucketInfo; }
+ void SetGotWatsonBucketDetails() { LIMITED_METHOD_CONTRACT; AssertIfReadOnly(); m_flags |= Ex_GotWatsonBucketInfo; }
+ void ResetGotWatsonBucketDetails() { LIMITED_METHOD_CONTRACT; AssertIfReadOnly(); m_flags &= ~Ex_GotWatsonBucketInfo; }
+
+private:
+ enum
+ {
+ Ex_IsRethrown = 0x00000001,
+ Ex_UnwindingToFindResumeFrame = 0x00000002,
+ Ex_UnwindHasStarted = 0x00000004,
+ Ex_UseExInfoForStackwalk = 0x00000008, // Use this ExInfo to unwind a fault (AV, zerodiv) back to managed code?
+
+#ifdef DEBUGGING_SUPPORTED
+ Ex_SentDebugUserFirstChance = 0x00000010,
+ Ex_SentDebugFirstChance = 0x00000020,
+ Ex_SentDebugUnwindBegin = 0x00000040,
+ Ex_DebugCatchHandlerFound = 0x00000080,
+ Ex_SentDebugUnhandled = 0x00000100,
+ Ex_DebuggerInterceptInfo = 0x00000200,
+ Ex_DebuggerInterceptNotPossible = 0x00000400,
+ Ex_IsUnhandled = 0x00000800,
+#endif
+ Ex_ImpersonationTokenSet = 0x00001000,
+
+ Ex_WasThrownByUs = 0x00002000,
+
+ Ex_GotWatsonBucketInfo = 0x00004000
+
+
+#if defined(WIN64EXCEPTIONS) && defined(_DEBUG)
+ ,
+ Ex_FlagsAreReadOnly = 0x80000000
+#endif // defined(WIN64EXCEPTIONS) && defined(_DEBUG)
+
+ };
+
+ UINT32 m_flags;
+
+#ifdef _DEBUG
+ enum
+ {
+ Ex_RPInvokeEscapingException = 0x40000000
+ };
+ UINT32 m_debugFlags;
+#endif // _DEBUG
+};
+
+//------------------------------------------------------------------------------
+// Error reporting (unhandled exception, fatal error, user breakpoint
+class TypeOfReportedError
+{
+public:
+ enum Type {INVALID, UnhandledException, FatalError, UserBreakpoint, NativeThreadUnhandledException, NativeBreakpoint, StackOverflowException};
+
+ TypeOfReportedError(Type t) : m_type(t) {}
+
+ BOOL IsUnhandledException() { LIMITED_METHOD_CONTRACT; return (m_type == UnhandledException) || (m_type == NativeThreadUnhandledException) || (m_type == StackOverflowException); }
+ BOOL IsFatalError() { return (m_type == FatalError); }
+ BOOL IsUserBreakpoint() {return (m_type == UserBreakpoint); }
+ BOOL IsBreakpoint() {return (m_type == UserBreakpoint) || (m_type == NativeBreakpoint); }
+ BOOL IsException() { LIMITED_METHOD_CONTRACT; return IsUnhandledException() || (m_type == NativeBreakpoint) || (m_type == StackOverflowException); }
+
+ Type GetType() { return m_type; }
+ void SetType(Type t) { m_type = t; }
+
+private:
+ Type m_type;
+};
+
+
+#ifndef FEATURE_PAL
+// This class is used to track Watson bucketing information for an exception.
+typedef DPTR(class EHWatsonBucketTracker) PTR_EHWatsonBucketTracker;
+class EHWatsonBucketTracker
+{
+private:
+ struct
+ {
+ PTR_VOID m_pUnhandledBuckets;
+ UINT_PTR m_UnhandledIp;
+ } m_WatsonUnhandledInfo;
+
+#ifdef _DEBUG
+ enum
+ {
+ // Bucket details were captured for ThreadAbort
+ Wb_CapturedForThreadAbort = 1,
+
+ // Bucket details were captured at AD Transition
+ Wb_CapturedAtADTransition = 2,
+
+ // Bucket details were captured during Reflection invocation
+ Wb_CapturedAtReflectionInvocation = 4
+ };
+
+ DWORD m_DebugFlags;
+#endif // _DEBUG
+
+public:
+ EHWatsonBucketTracker();
+ void Init();
+ void CopyEHWatsonBucketTracker(const EHWatsonBucketTracker& srcTracker);
+ void CopyBucketsFromThrowable(OBJECTREF oThrowable);
+ void SaveIpForWatsonBucket(UINT_PTR ip);
+ UINT_PTR RetrieveWatsonBucketIp();
+ PTR_VOID RetrieveWatsonBuckets();
+ void ClearWatsonBucketDetails();
+ void CaptureUnhandledInfoForWatson(TypeOfReportedError tore, Thread * pThread, OBJECTREF * pThrowable);
+
+#ifdef _DEBUG
+ void ResetFlags() { LIMITED_METHOD_CONTRACT; m_DebugFlags = 0; }
+ BOOL CapturedForThreadAbort() { LIMITED_METHOD_CONTRACT; return m_DebugFlags & Wb_CapturedForThreadAbort; }
+ void SetCapturedForThreadAbort() { LIMITED_METHOD_CONTRACT; m_DebugFlags |= Wb_CapturedForThreadAbort; }
+ void ResetCapturedForThreadAbort() { LIMITED_METHOD_CONTRACT; m_DebugFlags &= ~Wb_CapturedForThreadAbort; }
+
+ BOOL CapturedAtADTransition() { LIMITED_METHOD_CONTRACT; return m_DebugFlags & Wb_CapturedAtADTransition; }
+ void SetCapturedAtADTransition() { LIMITED_METHOD_CONTRACT; m_DebugFlags |= Wb_CapturedAtADTransition; }
+ void ResetCapturedAtADTransition() { LIMITED_METHOD_CONTRACT; m_DebugFlags &= ~Wb_CapturedAtADTransition; }
+
+ BOOL CapturedAtReflectionInvocation() { LIMITED_METHOD_CONTRACT; return m_DebugFlags & Wb_CapturedAtReflectionInvocation; }
+ void SetCapturedAtReflectionInvocation() { LIMITED_METHOD_CONTRACT; m_DebugFlags |= Wb_CapturedAtReflectionInvocation; }
+ void ResetCapturedAtReflectionInvocation() { LIMITED_METHOD_CONTRACT; m_DebugFlags &= ~Wb_CapturedAtReflectionInvocation; }
+#endif // _DEBUG
+};
+
+void SetStateForWatsonBucketing(BOOL fIsRethrownException, OBJECTHANDLE ohOriginalException);
+BOOL CopyWatsonBucketsToThrowable(PTR_VOID pUnmanagedBuckets, OBJECTREF oTargetThrowable = NULL);
+void CopyWatsonBucketsFromThrowableToCurrentThrowable(OBJECTREF oThrowableFrom);
+void CopyWatsonBucketsBetweenThrowables(OBJECTREF oThrowableFrom, OBJECTREF oThrowableTo = NULL);
+void SetupInitialThrowBucketDetails(UINT_PTR adjustedIp);
+BOOL SetupWatsonBucketsForFailFast(EXCEPTIONREF refException);
+void SetupWatsonBucketsForUEF(BOOL fUseLastThrownObject);
+BOOL SetupWatsonBucketsForEscapingPreallocatedExceptions();
+BOOL SetupWatsonBucketsForNonPreallocatedExceptions(OBJECTREF oThrowable = NULL);
+PTR_EHWatsonBucketTracker GetWatsonBucketTrackerForPreallocatedException(OBJECTREF oPreAllocThrowable, BOOL fCaptureBucketsIfNotPresent,
+ BOOL fStartSearchFromPreviousTracker = FALSE);
+BOOL IsThrowableThreadAbortException(OBJECTREF oThrowable);
+#endif // !FEATURE_PAL
+
+#endif // __ExStateCommon_h__
diff --git a/src/vm/extensibleclassfactory.cpp b/src/vm/extensibleclassfactory.cpp
new file mode 100644
index 0000000000..3107f555e7
--- /dev/null
+++ b/src/vm/extensibleclassfactory.cpp
@@ -0,0 +1,131 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: ExtensibleClassFactory.cpp
+**
+**
+** Purpose: Native methods on System.Runtime.InteropServices.ExtensibleClassFactory
+**
+
+**
+===========================================================*/
+
+#include "common.h"
+
+#include "excep.h"
+#include "stackwalk.h"
+#include "extensibleclassfactory.h"
+
+
+// Helper function used to walk stack frames looking for a class initializer.
+static StackWalkAction FrameCallback(CrawlFrame *pCF, void *pData)
+{
+ _ASSERTE(NULL != pCF);
+ MethodDesc *pMD = pCF->GetFunction();
+
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(CheckPointer(pData, NULL_OK));
+ PRECONDITION(pMD->GetMethodTable() != NULL);
+ }
+ CONTRACTL_END;
+
+
+ // We use the pData context argument to track the class as we move down the
+ // stack and to return the class whose initializer is being called. If
+ // *ppMT is NULL we are looking at the caller's initial frame and just
+ // record the class that the method belongs to. From that point on the class
+ // must remain the same until we hit a class initializer or else we must
+ // fail (to prevent other classes called from a class initializer from
+ // setting the current classes callback). The very first class we will see
+ // belongs to RegisterObjectCreationCallback itself, so skip it (we set
+ // *ppMT to an initial value of -1 to detect this).
+ MethodTable **ppMT = (MethodTable **)pData;
+
+ if (*ppMT == (MethodTable *)-1)
+ *ppMT = NULL;
+
+ else if (*ppMT == NULL)
+ *ppMT = pMD->GetMethodTable();
+
+ else if (pMD->GetMethodTable() != *ppMT)
+ {
+ *ppMT = NULL;
+ return SWA_ABORT;
+ }
+
+ if (pMD->IsClassConstructor())
+ return SWA_ABORT;
+
+ return SWA_CONTINUE;
+}
+
+
+// Register a delegate that will be called whenever an instance of a
+// managed type that extends from an unmanaged type needs to allocate
+// the aggregated unmanaged object. This delegate is expected to
+// allocate and aggregate the unmanaged object and is called in place
+// of a CoCreateInstance. This routine must be called in the context
+// of the static initializer for the class for which the callbacks
+// will be made.
+// It is not legal to register this callback from a class that has any
+// parents that have already registered a callback.
+FCIMPL1(void, RegisterObjectCreationCallback, Object* pDelegateUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF orDelegate = (OBJECTREF) pDelegateUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_1(orDelegate);
+
+ // Validate the delegate argument.
+ if (orDelegate == 0)
+ COMPlusThrowArgumentNull(W("callback"));
+
+ // We should have been called in the context of a class static initializer.
+ // Walk back up the stack to verify this and to determine just what class
+ // we're registering a callback for.
+ MethodTable *pMT = (MethodTable *)-1;
+ if (GetThread()->StackWalkFrames(FrameCallback, &pMT, FUNCTIONSONLY, NULL) == SWA_FAILED)
+ COMPlusThrow(kInvalidOperationException, IDS_EE_CALLBACK_NOT_CALLED_FROM_CCTOR);
+
+ // If we didn't find a class initializer, we can't continue.
+ if (pMT == NULL)
+ {
+ COMPlusThrow(kInvalidOperationException, IDS_EE_CALLBACK_NOT_CALLED_FROM_CCTOR);
+ }
+
+ // The object type must derive at some stage from a COM imported object.
+ // Also we must fail the call if some parent class has already registered a
+ // callback.
+ MethodTable *pParent = pMT;
+ do
+ {
+ pParent = pParent->GetParentMethodTable();
+ if (pParent && !pParent->IsComImport() && (pParent->GetObjCreateDelegate() != NULL))
+ {
+ COMPlusThrow(kInvalidOperationException, IDS_EE_CALLBACK_ALREADY_REGISTERED);
+ }
+ }
+ while (pParent && !pParent->IsComImport());
+
+ // If the class does not have a COM imported base class then fail the call.
+ if (pParent == NULL || pParent->IsProjectedFromWinRT())
+ {
+ COMPlusThrow(kInvalidOperationException, IDS_EE_CALLBACK_NOT_CALLED_FROM_CCTOR);
+ }
+
+ // Save the delegate in the MethodTable for the class.
+ pMT->SetObjCreateDelegate(orDelegate);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
diff --git a/src/vm/extensibleclassfactory.h b/src/vm/extensibleclassfactory.h
new file mode 100644
index 0000000000..8e23e0c04f
--- /dev/null
+++ b/src/vm/extensibleclassfactory.h
@@ -0,0 +1,36 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: ExtensibleClassFactory.h
+**
+**
+** Purpose: Native methods on System.Runtime.InteropServices.ExtensibleClassFactory
+**
+
+**
+===========================================================*/
+
+#ifndef _EXTENSIBLECLASSFACTORY_H
+#define _EXTENSIBLECLASSFACTORY_H
+
+#ifndef FEATURE_COMINTEROP
+#error FEATURE_COMINTEROP is required for this file
+#endif // FEATURE_COMINTEROP
+
+// Register a delegate that will be called whenever an instance of a
+// managed type that extends from an unmanaged type needs to allocate
+// the aggregated unmanaged object. This delegate is expected to
+// allocate and aggregate the unmanaged object and is called in place
+// of a CoCreateInstance. This routine must be called in the context
+// of the static initializer for the class for which the callbacks
+// will be made.
+// It is not legal to register this callback from a class that has any
+// parents that have already registered a callback.
+FCDECL1(void, RegisterObjectCreationCallback, Object* pDelegateUNSAFE);
+
+
+#endif
diff --git a/src/vm/fcall.cpp b/src/vm/fcall.cpp
new file mode 100644
index 0000000000..b4945a5a3c
--- /dev/null
+++ b/src/vm/fcall.cpp
@@ -0,0 +1,413 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// FCALL.CPP
+//
+
+//
+
+
+#include "common.h"
+#include "vars.hpp"
+#include "fcall.h"
+#include "excep.h"
+#include "frames.h"
+#include "gms.h"
+#include "ecall.h"
+#include "eeconfig.h"
+
+NOINLINE LPVOID __FCThrow(LPVOID __me, RuntimeExceptionKind reKind, UINT resID, LPCWSTR arg1, LPCWSTR arg2, LPCWSTR arg3)
+{
+ STATIC_CONTRACT_THROWS;
+ // This isn't strictly true... But the guarentee that we make here is
+ // that we won't trigger without having setup a frame.
+ // STATIC_CONTRACT_TRIGGER
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_SO_TOLERANT; // function probes before it does any work
+
+ // side effect the compiler can't remove
+ if (FC_NO_TAILCALL != 1)
+ return (LPVOID)(FC_NO_TAILCALL + 1);
+
+ FC_CAN_TRIGGER_GC();
+ INCONTRACT(FCallCheck __fCallCheck(__FILE__, __LINE__));
+ FC_GC_POLL_NOT_NEEDED();
+
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_NOPOLL(Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
+ // Now, we can construct & throw.
+
+ // In V1, throwing an ExecutionEngineException actually never really threw anything... its was the same as a
+ // fatal error in the runtime, and we will most probably would have ripped the process down. Starting in
+ // Whidbey, this behavior has changed a lot. Its not really legal to try to throw an
+ // ExecutionEngineExcpetion with this function.
+ _ASSERTE((reKind != kExecutionEngineException) ||
+ !"Don't throw kExecutionEngineException from here. Go to EEPolicy directly, or throw something better.");
+
+ if (resID == 0)
+ {
+ // If we have an string to add use NonLocalized otherwise just throw the exception.
+ if (arg1)
+ COMPlusThrowNonLocalized(reKind, arg1); //COMPlusThrow(reKind,arg1);
+ else
+ COMPlusThrow(reKind);
+ }
+ else
+ COMPlusThrow(reKind, resID, arg1, arg2, arg3);
+
+ HELPER_METHOD_FRAME_END();
+ FC_CAN_TRIGGER_GC_END();
+ _ASSERTE(!"Throw returned");
+ return NULL;
+}
+
+NOINLINE LPVOID __FCThrowArgument(LPVOID __me, RuntimeExceptionKind reKind, LPCWSTR argName, LPCWSTR resourceName)
+{
+ STATIC_CONTRACT_THROWS;
+ // This isn't strictly true... But the guarentee that we make here is
+ // that we won't trigger without having setup a frame.
+ // STATIC_CONTRACT_TRIGGER
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_SO_TOLERANT; // function probes before it does any work
+
+ // side effect the compiler can't remove
+ if (FC_NO_TAILCALL != 1)
+ return (LPVOID)(FC_NO_TAILCALL + 1);
+
+ FC_CAN_TRIGGER_GC();
+ INCONTRACT(FCallCheck __fCallCheck(__FILE__, __LINE__));
+ FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
+
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_NOPOLL(Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
+
+ switch (reKind) {
+ case kArgumentNullException:
+ if (resourceName) {
+ COMPlusThrowArgumentNull(argName, resourceName);
+ } else {
+ COMPlusThrowArgumentNull(argName);
+ }
+ break;
+
+ case kArgumentOutOfRangeException:
+ COMPlusThrowArgumentOutOfRange(argName, resourceName);
+ break;
+
+ case kArgumentException:
+ COMPlusThrowArgumentException(argName, resourceName);
+ break;
+
+ default:
+ // If you see this assert, add a case for your exception kind above.
+ _ASSERTE(argName == NULL);
+ COMPlusThrow(reKind, resourceName);
+ }
+
+ HELPER_METHOD_FRAME_END();
+ FC_CAN_TRIGGER_GC_END();
+ _ASSERTE(!"Throw returned");
+ return NULL;
+}
+
+/**************************************************************************************/
+/* erect a frame in the FCALL and then poll the GC, objToProtect will be protected
+ during the poll and the updated object returned. */
+
+NOINLINE Object* FC_GCPoll(void* __me, Object* objToProtect)
+{
+ CONTRACTL {
+ THROWS;
+ // This isn't strictly true... But the guarentee that we make here is
+ // that we won't trigger without having setup a frame.
+ UNCHECKED(GC_NOTRIGGER);
+ SO_TOLERANT; // function probes before it does any work
+ } CONTRACTL_END;
+
+ FC_CAN_TRIGGER_GC();
+ INCONTRACT(FCallCheck __fCallCheck(__FILE__, __LINE__));
+
+ Thread *thread = GetThread();
+ if (thread->CatchAtSafePointOpportunistic()) // Does someone want this thread stopped?
+ {
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(Frame::FRAME_ATTR_CAPTURE_DEPTH_2, objToProtect);
+
+#ifdef _DEBUG
+ BOOL GCOnTransition = FALSE;
+ if (g_pConfig->FastGCStressLevel()) {
+ GCOnTransition = GC_ON_TRANSITIONS (FALSE);
+ }
+#endif
+ CommonTripThread();
+#ifdef _DEBUG
+ if (g_pConfig->FastGCStressLevel()) {
+ GC_ON_TRANSITIONS (GCOnTransition);
+ }
+#endif
+
+ HELPER_METHOD_FRAME_END();
+ }
+
+ FC_CAN_TRIGGER_GC_END();
+
+ return objToProtect;
+}
+
+#ifdef _DEBUG
+
+unsigned FcallTimeHist[11];
+
+#endif
+
+#ifdef ENABLE_CONTRACTS
+
+/**************************************************************************************/
+#if defined(_TARGET_X86_) && defined(ENABLE_PERF_COUNTERS)
+static __int64 getCycleCount() {
+
+ LIMITED_METHOD_CONTRACT;
+ return GET_CYCLE_COUNT();
+}
+#else
+static __int64 getCycleCount() { LIMITED_METHOD_CONTRACT; return(0); }
+#endif
+
+/**************************************************************************************/
+// No contract here: The contract destructor restores the thread contract state to what it was
+// soon after constructing the contract. This would have the effect of reverting the contract
+// state change made by the call to BeginForbidGC.
+DEBUG_NOINLINE ForbidGC::ForbidGC(const char *szFile, int lineNum)
+{
+ SCAN_SCOPE_BEGIN;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ m_pThread = GetThread();
+ m_pThread->BeginForbidGC(szFile, lineNum);
+}
+
+/**************************************************************************************/
+// No contract here: The contract destructor restores the thread contract state to what it was
+// soon after constructing the contract. This would have the effect of reverting the contract
+// state change made by the call to BeginForbidGC.
+DEBUG_NOINLINE ForbidGC::~ForbidGC()
+{
+ SCAN_SCOPE_END;
+
+ // IF EH happens, this is still called, in which case
+ // we should not bother
+
+ if (m_pThread->RawGCNoTrigger())
+ m_pThread->EndNoTriggerGC();
+}
+
+/**************************************************************************************/
+DEBUG_NOINLINE FCallCheck::FCallCheck(const char *szFile, int lineNum) : ForbidGC(szFile, lineNum)
+{
+ SCAN_SCOPE_BEGIN;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+#ifdef _DEBUG
+ unbreakableLockCount = m_pThread->GetUnbreakableLockCount();
+#endif
+ didGCPoll = false;
+ notNeeded = false;
+ startTicks = getCycleCount();
+}
+
+/**************************************************************************************/
+DEBUG_NOINLINE FCallCheck::~FCallCheck()
+{
+ SCAN_SCOPE_END;
+
+ // Confirm that we don't starve the GC or thread-abort.
+ // Basically every control flow path through an FCALL must
+ // to a poll. If you hit the assert below, you can fix it by
+ //
+ // If you erect a HELPER_METHOD_FRAME, you can
+ //
+ // Call HELPER_METHOD_POLL()
+ // or use HELPER_METHOD_FRAME_END_POLL
+ //
+ // If you don't have a helper frame you can used
+ //
+ // FC_GC_POLL_AND_RETURN_OBJREF or
+ // FC_GC_POLL or
+ // FC_GC_POLL_RET
+ //
+ // Note that these must be at GC safe points. In particular
+ // all object references that are NOT protected will be trashed.
+
+
+ // There is a special poll called FC_GC_POLL_NOT_NEEDED
+ // which says the code path is short enough that a GC poll is not need
+ // you should not use this in most cases.
+
+ _ASSERTE(unbreakableLockCount == m_pThread->GetUnbreakableLockCount() ||
+ (!m_pThread->HasUnbreakableLock() && !m_pThread->HasThreadStateNC(Thread::TSNC_OwnsSpinLock)));
+
+ if (notNeeded) {
+
+ /*<TODO> TODO, we want to actually measure the time to make certain we are not too far off
+
+ unsigned delta = unsigned(getCycleCount() - startTicks);
+ </TODO>*/
+ }
+ else if (!didGCPoll) {
+ // <TODO>TODO turn this on!!! _ASSERTE(!"FCALL without a GC poll in it somewhere!");</TODO>
+ }
+
+}
+
+
+#if defined(_TARGET_AMD64_)
+
+
+FCallTransitionState::FCallTransitionState ()
+{
+ WRAPPER_NO_CONTRACT;
+
+ m_pThread = GetThread();
+ _ASSERTE(m_pThread);
+
+ m_pPreviousHelperMethodFrameCallerList = m_pThread->m_pHelperMethodFrameCallerList;
+
+ m_pThread->m_pHelperMethodFrameCallerList = NULL;
+}
+
+
+FCallTransitionState::~FCallTransitionState ()
+{
+ WRAPPER_NO_CONTRACT;
+
+ m_pThread->m_pHelperMethodFrameCallerList = m_pPreviousHelperMethodFrameCallerList;
+}
+
+
+PermitHelperMethodFrameState::PermitHelperMethodFrameState ()
+{
+ WRAPPER_NO_CONTRACT;
+
+ m_pThread = GetThread();
+ _ASSERTE(m_pThread);
+
+ CONSISTENCY_CHECK_MSG((HelperMethodFrameCallerList*)-1 != m_pThread->m_pHelperMethodFrameCallerList,
+ "fcall entry point is missing a FCALL_TRANSITION_BEGIN or a FCIMPL\n");
+
+ m_ListEntry.pCaller = m_pThread->m_pHelperMethodFrameCallerList;
+ m_pThread->m_pHelperMethodFrameCallerList = &m_ListEntry;
+}
+
+
+PermitHelperMethodFrameState::~PermitHelperMethodFrameState ()
+{
+ WRAPPER_NO_CONTRACT;
+
+ m_pThread->m_pHelperMethodFrameCallerList = m_ListEntry.pCaller;
+}
+
+
+VOID PermitHelperMethodFrameState::CheckHelperMethodFramePermitted ()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ DEBUG_ONLY;
+ } CONTRACTL_END;
+
+ //
+ // Get current context and unwind to caller
+ //
+
+ CONTEXT ctx;
+
+ ClrCaptureContext(&ctx);
+ Thread::VirtualUnwindCallFrame(&ctx);
+
+ //
+ // Make sure each unmanaged frame used PERMIT_HELPER_METHOD_FRAME_BEGIN.
+ // If we hit NULL before we reach managed code, then the caller of the
+ // fcall was not managed.
+ //
+
+ Thread *pThread = GetThread();
+ _ASSERTE(pThread);
+
+ HelperMethodFrameCallerList *pList = pThread->m_pHelperMethodFrameCallerList;
+ PCODE CurrentIP;
+ TADDR CurrentSP;
+
+ do
+ {
+ CurrentSP = GetSP(&ctx);
+ CurrentIP = GetIP(&ctx);
+
+ Thread::VirtualUnwindCallFrame(&ctx);
+
+ TADDR CallerSP = GetSP(&ctx);
+
+ unsigned nAssociatedListEntries = 0;
+
+ while ( (SIZE_T)pList >= (SIZE_T)CurrentSP
+ && (SIZE_T)pList < (SIZE_T)CallerSP)
+ {
+ nAssociatedListEntries++;
+ pList = pList->pCaller;
+ }
+
+ if (!nAssociatedListEntries)
+ {
+ char szFunction[cchMaxAssertStackLevelStringLen];
+ GetStringFromAddr((DWORD_PTR)CurrentIP, szFunction);
+
+ CONSISTENCY_CHECK_MSGF(false, ("Unmanaged caller %s at sp %p/ip %p is missing a "
+ "PERMIT_HELPER_METHOD_FRAME_BEGIN, or this function "
+ "is calling an fcall entry point that is missing a "
+ "FCALL_TRANSITION_BEGIN or a FCIMPL\n", szFunction, CurrentSP, CurrentIP));
+ }
+ }
+ while (pList && !ExecutionManager::IsManagedCode(GetIP(&ctx)));
+
+ //
+ // We should have exhausted the list. If not, the list was not reset at
+ // the transition from managed code.
+ //
+
+ if (pList)
+ {
+ char szFunction[cchMaxAssertStackLevelStringLen];
+ GetStringFromAddr((DWORD_PTR)CurrentIP, szFunction);
+
+ CONSISTENCY_CHECK_MSGF(false, ("fcall entry point %s at sp %p/ip %p is missing a "
+ "FCALL_TRANSITION_BEGIN or a FCIMPL\n", szFunction, CurrentSP, CurrentIP));
+ }
+}
+
+
+CompletedFCallTransitionState::CompletedFCallTransitionState ()
+{
+ WRAPPER_NO_CONTRACT;
+
+ Thread *pThread = GetThread();
+ _ASSERTE(pThread);
+
+ m_pLastHelperMethodFrameCallerList = pThread->m_pHelperMethodFrameCallerList;
+
+ pThread->m_pHelperMethodFrameCallerList = (HelperMethodFrameCallerList*)-1;
+}
+
+
+CompletedFCallTransitionState::~CompletedFCallTransitionState ()
+{
+ WRAPPER_NO_CONTRACT;
+
+ Thread *pThread = GetThread();
+ _ASSERTE(pThread);
+
+ pThread->m_pHelperMethodFrameCallerList = m_pLastHelperMethodFrameCallerList;
+}
+
+
+#endif // _TARGET_AMD64_
+
+#endif // ENABLE_CONTRACTS
diff --git a/src/vm/fcall.h b/src/vm/fcall.h
new file mode 100644
index 0000000000..b937896e9e
--- /dev/null
+++ b/src/vm/fcall.h
@@ -0,0 +1,1371 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// FCall.H
+//
+
+//
+// FCall is a high-performance alternative to ECall. Unlike ECall, FCall
+// methods do not necessarily create a frame. Jitted code calls directly
+// to the FCall entry point. It is possible to do operations that need
+// to have a frame within an FCall, you need to manually set up the frame
+// before you do such operations.
+
+// It is illegal to cause a GC or EH to happen in an FCALL before setting
+// up a frame. To prevent accidentally violating this rule, FCALLs turn
+// on BEGINGCFORBID, which insures that these things can't happen in a
+// checked build without causing an ASSERTE. Once you set up a frame,
+// this state is turned off as long as the frame is active, and then is
+// turned on again when the frame is torn down. This mechanism should
+// be sufficient to insure that the rules are followed.
+
+// In general you set up a frame by using the following macros
+
+// HELPER_METHOD_FRAME_BEGIN_RET*() // Use If the FCALL has a return value
+// HELPER_METHOD_FRAME_BEGIN*() // Use If FCALL does not return a value
+// HELPER_METHOD_FRAME_END*()
+
+// These macros introduce a scope which is protected by an HelperMethodFrame.
+// In this scope you can do EH or GC. There are rules associated with
+// their use. In particular
+
+// 1) These macros can only be used in the body of a FCALL (that is
+// something using the FCIMPL* or HCIMPL* macros for their decaration.
+
+// 2) You may not perform a 'return' within this scope..
+
+// Compile time errors occur if you try to violate either of these rules.
+
+// The frame that is set up does NOT protect any GC variables (in particular the
+// arguments of the FCALL. Thus you need to do an explicit GCPROTECT once the
+// frame is established if you need to protect an argument. There are flavors
+// of HELPER_METHOD_FRAME that protect a certain number of GC variables. For
+// example
+
+// HELPER_METHOD_FRAME_BEGIN_RET_2(arg1, arg2)
+
+// will protect the GC variables arg1, and arg2 as well as erecting the frame.
+
+// Another invariant that you must be aware of is the need to poll to see if
+// a GC is needed by some other thread. Unless the FCALL is VERY short,
+// every code path through the FCALL must do such a poll. The important
+// thing here is that a poll will cause a GC, and thus you can only do it
+// when all you GC variables are protected. To make things easier
+// HELPER_METHOD_FRAMES that protect things automatically do this poll.
+// If you don't need to protect anything HELPER_METHOD_FRAME_BEGIN_0
+// will also do the poll.
+
+// Sometimes it is convenient to do the poll a the end of the frame, you
+// can use HELPER_METHOD_FRAME_BEGIN_NOPOLL and HELPER_METHOD_FRAME_END_POLL
+// to do the poll at the end. If somewhere in the middle is the best
+// place you can do that too with HELPER_METHOD_POLL()
+
+// You don't need to erect a helper method frame to do a poll. FC_GC_POLL
+// can do this (remember all your GC refs will be trashed).
+
+// Finally if your method is VERY small, you can get away without a poll,
+// you have to use FC_GC_POLL_NOT_NEEDED to mark this.
+// Use sparingly!
+
+// It is possible to set up the frame as the first operation in the FCALL and
+// tear it down as the last operation before returning. This works and is
+// reasonably efficient (as good as an ECall), however, if it is the case that
+// you can defer the setup of the frame to an unlikely code path (exception path)
+// that is much better.
+
+// If you defer setup of the frame, all codepaths leading to the frame setup
+// must be wrapped with PERMIT_HELPER_METHOD_FRAME_BEGIN/END. These block
+// certain compiler optimizations that interfere with the delayed frame setup.
+// These macros are automatically included in the HCIMPL, FCIMPL, and frame
+// setup macros.
+
+// <TODO>TODO: we should have a way of doing a trial allocation (an allocation that
+// will fail if it would cause a GC). That way even FCALLs that need to allocate
+// would not necessarily need to set up a frame. </TODO>
+
+// It is common to only need to set up a frame in order to throw an exception.
+// While this can be done by doing
+
+// HELPER_METHOD_FRAME_BEGIN() // Use if FCALL does not return a value
+// COMPlusThrow(execpt);
+// HELPER_METHOD_FRAME_END()
+
+// It is more efficient (in space) to use convenience macro FCTHROW that does
+// this for you (sets up a frame, and does the throw).
+
+// FCTHROW(except)
+
+// Since FCALLS have to conform to the EE calling conventions and not to C
+// calling conventions, FCALLS, need to be declared using special macros (FCIMPL*)
+// that implement the correct calling conventions. There are variants of these
+// macros depending on the number of args, and sometimes the types of the
+// arguments.
+
+//------------------------------------------------------------------------
+// A very simple example:
+//
+// FCIMPL2(INT32, Div, INT32 x, INT32 y)
+// {
+// if (y == 0)
+// FCThrow(kDivideByZeroException);
+// return x/y;
+// }
+// FCIMPLEND
+//
+//
+// *** WATCH OUT FOR THESE GOTCHAS: ***
+// ------------------------------------
+// - In your FCDECL & FCIMPL protos, don't declare a param as type OBJECTREF
+// or any of its deriveds. This will break on the checked build because
+// __fastcall doesn't enregister C++ objects (which OBJECTREF is).
+// Instead, you need to do something like;
+//
+// FCIMPL(.., .., Object* pObject0)
+// OBJECTREF pObject = ObjectToOBJECTREF(pObject0);
+// FCIMPL
+//
+// For similar reasons, use Object* rather than OBJECTREF as a return type.
+// Consider either using ObjectToOBJECTREF or calling VALIDATEOBJECTREF
+// to make sure your Object* is valid.
+//
+// - FCThrow() must be called directly from your FCall impl function: it
+// cannot be called from a subfunction. Calling from a subfunction breaks
+// the VC code parsing workaround that lets us recover the callee saved registers.
+// Fortunately, you'll get a compile error complaining about an
+// unknown variable "__me".
+//
+// - If your FCall returns VOID, you must use FCThrowVoid() rather than
+// FCThrow(). This is because FCThrow() has to generate an unexecuted
+// "return" statement for the code parser.
+//
+// - If first and/or second argument of your FCall is 64-bit value on x86
+// (ie INT64, UINT64 or DOUBLE), you must use "V" versions of FCDECL and
+// FCIMPL macros to enregister arguments correctly. For example, FCDECL3_IVI
+// must be used for FCalls that take 3 arguments and 2nd argument is INT64.
+//
+// - You may use structs for protecting multiple OBJECTREF's simultaneously.
+// In these cases, you must use a variant of a helper method frame with PROTECT
+// in the name, to ensure all the OBJECTREF's in the struct get protected.
+// Also, initialize all the OBJECTREF's first. Like this:
+//
+// FCIMPL4(Object*, COMNlsInfo::nativeChangeCaseString, LocaleIDObject* localeUNSAFE,
+// INT_PTR pNativeTextInfo, StringObject* pStringUNSAFE, CLR_BOOL bIsToUpper)
+// {
+// [ignoring CONTRACT for now]
+// struct _gc
+// {
+// STRINGREF pResult;
+// STRINGREF pString;
+// LOCALEIDREF pLocale;
+// } gc;
+// gc.pResult = NULL;
+// gc.pString = ObjectToSTRINGREF(pStringUNSAFE);
+// gc.pLocale = (LOCALEIDREF)ObjectToOBJECTREF(localeUNSAFE);
+//
+// HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc)
+//
+// If you forgot the PROTECT part, the macro will only protect the first OBJECTREF,
+// introducing a subtle GC hole in your code. Fortunately, we now issue a
+// compile-time error if you forget.
+
+// How FCall works:
+// ----------------
+// An FCall target uses __fastcall or some other calling convention to
+// match the IL calling convention exactly. Thus, a call to FCall is a direct
+// call to the target w/ no intervening stub or frame.
+//
+// The tricky part is when FCThrow is called. FCThrow must generate
+// a proper method frame before allocating and throwing the exception.
+// To do this, it must recover several things:
+//
+// - The location of the FCIMPL's return address (since that's
+// where the frame will be based.)
+//
+// - The on-entry values of the callee-saved regs; which must
+// be recorded in the frame so that GC can update them.
+// Depending on how VC compiles your FCIMPL, those values are still
+// in the original registers or saved on the stack.
+//
+// To figure out which, FCThrow() generates the code:
+//
+// while (NULL == __FCThrow(__me, ...)) {};
+// return 0;
+//
+// The "return" statement will never execute; but its presence guarantees
+// that VC will follow the __FCThrow() call with a VC epilog
+// that restores the callee-saved registers using a pretty small
+// and predictable set of Intel opcodes. __FCThrow() parses this
+// epilog and simulates its execution to recover the callee saved
+// registers.
+//
+// The while loop is to prevent the compiler from doing tail call optimizations.
+// The helper frame interpretter needs the frame to be present.
+//
+// - The MethodDesc* that this FCall implements. This MethodDesc*
+// is part of the frame and ensures that the FCall will appear
+// in the exception's stack trace. To get this, FCDECL declares
+// a static local __me, initialized to point to the FC target itself.
+// This address is exactly what's stored in the ECall lookup tables;
+// so __FCThrow() simply does a reverse lookup on that table to recover
+// the MethodDesc*.
+//
+
+
+#if !defined(__FCall_h__) && !defined(CLR_STANDALONE_BINDER)
+#define __FCall_h__
+
+#include "gms.h"
+#include "runtimeexceptionkind.h"
+#include "debugreturn.h"
+#include "stackprobe.h"
+
+//==============================================================================================
+// These macros defeat compiler optimizations that might mix nonvolatile
+// register loads and stores with other code in the function body. This
+// creates problems for the frame setup code, which assumes that any
+// nonvolatiles that are saved at the point of the frame setup will be
+// re-loaded when the frame is popped.
+//
+// Currently this is only known to be an issue on AMD64. It's uncertain
+// whether it is an issue on x86.
+//==============================================================================================
+
+#if defined(_TARGET_AMD64_) && !defined(FEATURE_PAL)
+
+//
+// On AMD64 this is accomplished by including a setjmp anywhere in a function.
+// Doesn't matter whether it is reachable or not, and in fact in optimized
+// builds the setjmp is removed altogether.
+//
+#include <setjmp.h>
+
+//
+// Use of setjmp is temporary, we will eventually have compiler intrinsics to
+// disable the optimizations. Besides, we don't actually execute setjmp in
+// these macros (or anywhere else in the VM on AMD64).
+//
+#pragma warning(disable:4611) // interaction between '_setjmp' and C++ object destruction is non-portable
+
+#ifdef _DEBUG
+//
+// Linked list of unmanaged methods preceeding a HelperMethodFrame push. This
+// is linked onto the current Thread. Each list entry is stack-allocated so it
+// can be associated with an unmanaged frame. Each unmanaged frame needs to be
+// associated with at least one list entry.
+//
+struct HelperMethodFrameCallerList
+{
+ HelperMethodFrameCallerList *pCaller;
+};
+#endif // _DEBUG
+
+//
+// Resets the Thread state at a new managed -> fcall transition.
+//
+class FCallTransitionState
+{
+public:
+
+ FCallTransitionState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; });
+ ~FCallTransitionState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; });
+
+#ifdef _DEBUG
+private:
+ Thread *m_pThread;
+ HelperMethodFrameCallerList *m_pPreviousHelperMethodFrameCallerList;
+#endif // _DEBUG
+};
+
+//
+// Pushes/pops state for each caller.
+//
+class PermitHelperMethodFrameState
+{
+public:
+
+ PermitHelperMethodFrameState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; });
+ ~PermitHelperMethodFrameState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; });
+
+ static VOID CheckHelperMethodFramePermitted () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; });
+
+#ifdef _DEBUG
+private:
+ Thread *m_pThread;
+ HelperMethodFrameCallerList m_ListEntry;
+#endif // _DEBUG
+};
+
+//
+// Resets the Thread state after the HelperMethodFrame is pushed. At this
+// point, the HelperMethodFrame is capable of unwinding to the managed code,
+// so we can reset the Thread state for any nested fcalls.
+//
+class CompletedFCallTransitionState
+{
+public:
+
+ CompletedFCallTransitionState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; });
+ ~CompletedFCallTransitionState () NOT_DEBUG({ LIMITED_METHOD_CONTRACT; });
+
+#ifdef _DEBUG
+private:
+
+ HelperMethodFrameCallerList *m_pLastHelperMethodFrameCallerList;
+#endif // _DEBUG
+};
+
+#define PERMIT_HELPER_METHOD_FRAME_BEGIN() \
+ if (1) \
+ { \
+ PermitHelperMethodFrameState ___PermitHelperMethodFrameState;
+
+#define PERMIT_HELPER_METHOD_FRAME_END() \
+ } \
+ else \
+ { \
+ jmp_buf ___jmpbuf; \
+ setjmp(___jmpbuf); \
+ __assume(0); \
+ }
+
+#define FCALL_TRANSITION_BEGIN() \
+ FCallTransitionState ___FCallTransitionState; \
+ PERMIT_HELPER_METHOD_FRAME_BEGIN();
+
+#define FCALL_TRANSITION_END() \
+ PERMIT_HELPER_METHOD_FRAME_END();
+
+#define CHECK_HELPER_METHOD_FRAME_PERMITTED() \
+ PermitHelperMethodFrameState::CheckHelperMethodFramePermitted(); \
+ CompletedFCallTransitionState ___CompletedFCallTransitionState;
+
+#else // unsupported processor
+
+#define PERMIT_HELPER_METHOD_FRAME_BEGIN()
+#define PERMIT_HELPER_METHOD_FRAME_END()
+#define FCALL_TRANSITION_BEGIN()
+#define FCALL_TRANSITION_END()
+#define CHECK_HELPER_METHOD_FRAME_PERMITTED()
+
+#endif // unsupported processor
+
+//==============================================================================================
+// This is where FCThrow ultimately ends up. Never call this directly.
+// Use the FCThrow() macros. __FCThrowArgument is the helper to throw ArgumentExceptions
+// with a resource taken from the managed resource manager.
+//==============================================================================================
+LPVOID __FCThrow(LPVOID me, enum RuntimeExceptionKind reKind, UINT resID, LPCWSTR arg1, LPCWSTR arg2, LPCWSTR arg3);
+LPVOID __FCThrowArgument(LPVOID me, enum RuntimeExceptionKind reKind, LPCWSTR argumentName, LPCWSTR resourceName);
+
+//==============================================================================================
+// FDECLn: A set of macros for generating header declarations for FC targets.
+// Use FIMPLn for the actual body.
+//==============================================================================================
+
+// Note: on the x86, these defs reverse all but the first two arguments
+// (IL stack calling convention is reversed from __fastcall.)
+
+
+// Calling convention for varargs
+#define F_CALL_VA_CONV __cdecl
+
+
+#ifdef _TARGET_X86_
+
+// Choose the appropriate calling convention for FCALL helpers on the basis of the JIT calling convention
+#ifdef __GNUC__
+#define F_CALL_CONV __attribute__((stdcall, regparm(3)))
+#else
+#define F_CALL_CONV __fastcall
+#endif
+
+#if defined(__GNUC__)
+
+// GCC fastcall convention is different from MSVC fastcall convention. GCC can use up to 3 registers to
+// store parameters. The registers used are EAX, EDX, ECX. Dummy parameters and reordering of the
+// actual parameters in the FCALL signature is used to make the calling convention to look like in MSVC.
+
+#define FCDECL0(rettype, funcname) rettype F_CALL_CONV funcname()
+#define FCDECL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1)
+#define FCDECL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a1)
+#define FCDECL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1)
+#define FCDECL2VA(rettype, funcname, a1, a2) rettype F_CALL_VA_CONV funcname(a1, a2, ...)
+#define FCDECL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a2, a1)
+#define FCDECL2_VI(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a2, a1)
+#define FCDECL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a2)
+#define FCDECL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3)
+#define FCDECL3_IIV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3)
+#define FCDECL3_VII(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a3, a2, a1)
+#define FCDECL3_IVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a3, a2)
+#define FCDECL3_IVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a3, a1, a2)
+#define FCDECL3_VVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a3, a2, a1)
+#define FCDECL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a4, a3)
+#define FCDECL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a5, a4, a3)
+#define FCDECL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a6, a5, a4, a3)
+#define FCDECL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a7, a6, a5, a4, a3)
+#define FCDECL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a8, a7, a6, a5, a4, a3)
+#define FCDECL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a9, a8, a7, a6, a5, a4, a3)
+#define FCDECL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a10, a9, a8, a7, a6, a5, a4, a3)
+#define FCDECL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a11, a10, a9, a8, a7, a6, a5, a4, a3)
+#define FCDECL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3)
+#define FCDECL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3)
+#define FCDECL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a14, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3)
+
+#define FCDECL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a3, a1, a5, a4, a2)
+#define FCDECL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a3, a2, a5, a4, a1)
+
+#else // __GNUC__
+
+#define FCDECL0(rettype, funcname) rettype F_CALL_CONV funcname()
+#define FCDECL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1)
+#define FCDECL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1)
+#define FCDECL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2)
+#define FCDECL2VA(rettype, funcname, a1, a2) rettype F_CALL_VA_CONV funcname(a1, a2, ...)
+#define FCDECL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a2, a1)
+#define FCDECL2_VI(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a2, a1)
+#define FCDECL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2)
+#define FCDECL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3)
+#define FCDECL3_IIV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3)
+#define FCDECL3_VII(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a2, a3, a1)
+#define FCDECL3_IVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a3, a2)
+#define FCDECL3_IVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a3, a2)
+#define FCDECL3_VVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a2, a1, a3)
+#define FCDECL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(a1, a2, a4, a3)
+#define FCDECL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a5, a4, a3)
+#define FCDECL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype F_CALL_CONV funcname(a1, a2, a6, a5, a4, a3)
+#define FCDECL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype F_CALL_CONV funcname(a1, a2, a7, a6, a5, a4, a3)
+#define FCDECL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype F_CALL_CONV funcname(a1, a2, a8, a7, a6, a5, a4, a3)
+#define FCDECL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype F_CALL_CONV funcname(a1, a2, a9, a8, a7, a6, a5, a4, a3)
+#define FCDECL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype F_CALL_CONV funcname(a1, a2, a10, a9, a8, a7, a6, a5, a4, a3)
+#define FCDECL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype F_CALL_CONV funcname(a1, a2, a11, a10, a9, a8, a7, a6, a5, a4, a3)
+#define FCDECL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype F_CALL_CONV funcname(a1, a2, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3)
+#define FCDECL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype F_CALL_CONV funcname(a1, a2, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3)
+#define FCDECL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype F_CALL_CONV funcname(a1, a2, a14, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3)
+
+#define FCDECL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a3, a5, a4, a2)
+#define FCDECL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a2, a3, a5, a4, a1)
+
+#endif // __GNUC__
+
+#if 0
+//
+// don't use something like this... directly calling an FCALL from within the runtime breaks stackwalking because
+// the FCALL reverse mapping only gets established in ECall::GetFCallImpl and that codepath is circumvented by
+// directly calling and FCALL
+// See below for usage of FC_CALL_INNER (used in SecurityStackWalk::Check presently)
+//
+#define FCCALL0(funcname) funcname()
+#define FCCALL1(funcname, a1) funcname(a1)
+#define FCCALL2(funcname, a1, a2) funcname(a1, a2)
+#define FCCALL3(funcname, a1, a2, a3) funcname(a1, a2, a3)
+#define FCCALL4(funcname, a1, a2, a3, a4) funcname(a1, a2, a4, a3)
+#define FCCALL5(funcname, a1, a2, a3, a4, a5) funcname(a1, a2, a5, a4, a3)
+#define FCCALL6(funcname, a1, a2, a3, a4, a5, a6) funcname(a1, a2, a6, a5, a4, a3)
+#define FCCALL7(funcname, a1, a2, a3, a4, a5, a6, a7) funcname(a1, a2, a7, a6, a5, a4, a3)
+#define FCCALL8(funcname, a1, a2, a3, a4, a5, a6, a7, a8) funcname(a1, a2, a8, a7, a6, a5, a4, a3)
+#define FCCALL9(funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) funcname(a1, a2, a9, a8, a7, a6, a5, a4, a3)
+#define FCCALL10(funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) funcname(a1, a2, a10, a9, a8, a7, a6, a5, a4, a3)
+#define FCCALL11(funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) funcname(a1, a2, a11, a10, a9, a8, a7, a6, a5, a4, a3)
+#define FCCALL12(funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) funcname(a1, a2, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3)
+#endif // 0
+
+#else // !_TARGET_X86
+
+#define F_CALL_CONV
+
+#define FCDECL0(rettype, funcname) rettype funcname()
+#define FCDECL1(rettype, funcname, a1) rettype funcname(a1)
+#define FCDECL1_V(rettype, funcname, a1) rettype funcname(a1)
+#define FCDECL2(rettype, funcname, a1, a2) rettype funcname(a1, a2)
+#define FCDECL2VA(rettype, funcname, a1, a2) rettype funcname(a1, a2, ...)
+#define FCDECL2_VV(rettype, funcname, a1, a2) rettype funcname(a1, a2)
+#define FCDECL2_VI(rettype, funcname, a1, a2) rettype funcname(a1, a2)
+#define FCDECL2_IV(rettype, funcname, a1, a2) rettype funcname(a1, a2)
+#define FCDECL3(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3)
+#define FCDECL3_IIV(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3)
+#define FCDECL3_VII(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3)
+#define FCDECL3_IVV(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3)
+#define FCDECL3_IVI(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3)
+#define FCDECL3_VVI(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3)
+#define FCDECL4(rettype, funcname, a1, a2, a3, a4) rettype funcname(a1, a2, a3, a4)
+#define FCDECL5(rettype, funcname, a1, a2, a3, a4, a5) rettype funcname(a1, a2, a3, a4, a5)
+#define FCDECL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype funcname(a1, a2, a3, a4, a5, a6)
+#define FCDECL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype funcname(a1, a2, a3, a4, a5, a6, a7)
+#define FCDECL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8)
+#define FCDECL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9)
+#define FCDECL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10)
+#define FCDECL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11)
+#define FCDECL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12)
+#define FCDECL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13)
+#define FCDECL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14)
+
+#define FCDECL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype funcname(a1, a2, a3, a4, a5)
+#define FCDECL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype funcname(a1, a2, a3, a4, a5)
+
+#endif // _TARGET_X86_
+
+#define HELPER_FRAME_DECL(x) FrameWithCookie<HelperMethodFrame_##x##OBJ> __helperframe
+
+// use the capture state machinery if the architecture has one
+//
+// For a normal build we create a loop (see explaination on RestoreState below)
+// We don't want a loop here for PREFAST since that causes
+// warning 263: Using _alloca in a loop
+// And we can't use DEBUG_OK_TO_RETURN for PREFAST because the PREFAST version
+// requires that you already be in a DEBUG_ASSURE_NO_RETURN_BEGIN scope
+
+#define HelperMethodFrame_0OBJ HelperMethodFrame
+#define HELPER_FRAME_ARGS(attribs) __me, attribs
+#define FORLAZYMACHSTATE(x) x
+
+#if defined(_PREFAST_)
+ #define FORLAZYMACHSTATE_BEGINLOOP(x) x
+ #define FORLAZYMACHSTATE_ENDLOOP(x)
+ #define FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_BEGIN
+ #define FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_END
+#else
+ #define FORLAZYMACHSTATE_BEGINLOOP(x) x do
+ #define FORLAZYMACHSTATE_ENDLOOP(x) while(x)
+ #define FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_BEGIN DEBUG_OK_TO_RETURN_BEGIN(LAZYMACHSTATE)
+ #define FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_END DEBUG_OK_TO_RETURN_END(LAZYMACHSTATE)
+#endif
+
+// BEGIN: before gcpoll
+//FCallGCCanTriggerNoDtor __fcallGcCanTrigger;
+//__fcallGcCanTrigger.Enter();
+
+// END: after gcpoll
+//__fcallGcCanTrigger.Leave(__FUNCTION__, __FILE__, __LINE__);
+
+// We have to put DEBUG_OK_TO_RETURN_BEGIN around the FORLAZYMACHSTATE
+// to allow the HELPER_FRAME to be installed inside an SO_INTOLERANT region
+// which does not allow a return. The return is used by FORLAZYMACHSTATE
+// to capture the state, but is not an actual return, so it is ok.
+#define HELPER_METHOD_FRAME_BEGIN_EX_BODY(ret, helperFrame, gcpoll, allowGC) \
+ FORLAZYMACHSTATE_BEGINLOOP(int alwaysZero = 0;) \
+ { \
+ INDEBUG(static BOOL __haveCheckedRestoreState = FALSE;) \
+ PERMIT_HELPER_METHOD_FRAME_BEGIN(); \
+ CHECK_HELPER_METHOD_FRAME_PERMITTED(); \
+ helperFrame; \
+ FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_BEGIN; \
+ FORLAZYMACHSTATE(CAPTURE_STATE(__helperframe.MachineState(), ret);) \
+ FORLAZYMACHSTATE_DEBUG_OK_TO_RETURN_END; \
+ INDEBUG(__helperframe.SetAddrOfHaveCheckedRestoreState(&__haveCheckedRestoreState)); \
+ DEBUG_ASSURE_NO_RETURN_BEGIN(HELPER_METHOD_FRAME); \
+ INCONTRACT(FCallGCCanTrigger::Enter()); \
+ __helperframe.Push(); \
+ MAKE_CURRENT_THREAD_AVAILABLE_EX(__helperframe.GetThread()); \
+
+#define HELPER_METHOD_FRAME_BEGIN_EX(ret, helperFrame, gcpoll, allowGC) \
+ HELPER_METHOD_FRAME_BEGIN_EX_BODY(ret, helperFrame, gcpoll, allowGC) \
+ TESTHOOKCALL(AppDomainCanBeUnloaded(GET_THREAD()->GetDomain()->GetId().m_dwId,!allowGC)); \
+ /* <TODO>TODO TURN THIS ON!!! </TODO> */ \
+ /* gcpoll; */ \
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER_FOR_HMF(&__helperframe);
+
+#define HELPER_METHOD_FRAME_BEGIN_EX_NOTHROW(ret, helperFrame, gcpoll, allowGC, probeFailExpr) \
+ HELPER_METHOD_FRAME_BEGIN_EX_BODY(ret, helperFrame, gcpoll, allowGC) \
+ /* <TODO>TODO TURN THIS ON!!! </TODO> */ \
+ /* gcpoll; */ \
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GET_THREAD(), probeFailExpr);
+
+
+// The while(__helperframe.RestoreState() needs a bit of explanation.
+// The issue is insuring that the same machine state (which registers saved)
+// exists when the machine state is probed (when the frame is created, and
+// when it is actually used (when the frame is popped. We do this by creating
+// a flow of control from use to def. Note that 'RestoreState' always returns false
+// we never actually loop, but the compiler does not know that, and thus
+// will be forced to make the keep the state of register spills the same at
+// the two locations.
+
+#define HELPER_METHOD_FRAME_END_EX_BODY(gcpoll,allowGC) \
+ /* <TODO>TODO TURN THIS ON!!! </TODO> */ \
+ /* gcpoll; */ \
+ __helperframe.Pop(); \
+ DEBUG_ASSURE_NO_RETURN_END(HELPER_METHOD_FRAME); \
+ INCONTRACT(FCallGCCanTrigger::Leave(__FUNCTION__, __FILE__, __LINE__)); \
+ FORLAZYMACHSTATE(alwaysZero = \
+ HelperMethodFrameRestoreState(INDEBUG_COMMA(&__helperframe) \
+ __helperframe.MachineState());) \
+ PERMIT_HELPER_METHOD_FRAME_END() \
+ } FORLAZYMACHSTATE_ENDLOOP(alwaysZero);
+
+#define HELPER_METHOD_FRAME_END_EX(gcpoll,allowGC) \
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER; \
+ TESTHOOKCALL(AppDomainCanBeUnloaded(GET_THREAD()->GetDomain()->GetId().m_dwId,!allowGC)); \
+ HELPER_METHOD_FRAME_END_EX_BODY(gcpoll,allowGC);
+
+#define HELPER_METHOD_FRAME_END_EX_NOTHROW(gcpoll,allowGC) \
+ END_SO_INTOLERANT_CODE; \
+ HELPER_METHOD_FRAME_END_EX_BODY(gcpoll,allowGC);
+
+#define HELPER_METHOD_FRAME_BEGIN_ATTRIB(attribs) \
+ HELPER_METHOD_FRAME_BEGIN_EX( \
+ return, \
+ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \
+ HELPER_METHOD_POLL(),TRUE)
+
+#define HELPER_METHOD_FRAME_BEGIN_0() \
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB(Frame::FRAME_ATTR_NONE)
+
+#define HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(attribs) \
+ HELPER_METHOD_FRAME_BEGIN_EX( \
+ return, \
+ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \
+ {},FALSE)
+
+#define HELPER_METHOD_FRAME_BEGIN_NOPOLL() HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_NONE)
+
+#define HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(attribs, arg1) \
+ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\
+ HELPER_METHOD_FRAME_BEGIN_EX( \
+ return, \
+ HELPER_FRAME_DECL(1)(HELPER_FRAME_ARGS(attribs), \
+ (OBJECTREF*) &arg1), \
+ HELPER_METHOD_POLL(),TRUE)
+
+#define HELPER_METHOD_FRAME_BEGIN_1(arg1) HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_NONE, arg1)
+
+#define HELPER_METHOD_FRAME_BEGIN_ATTRIB_2(attribs, arg1, arg2) \
+ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\
+ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\
+ HELPER_METHOD_FRAME_BEGIN_EX( \
+ return, \
+ HELPER_FRAME_DECL(2)(HELPER_FRAME_ARGS(attribs), \
+ (OBJECTREF*) &arg1, (OBJECTREF*) &arg2), \
+ HELPER_METHOD_POLL(),TRUE)
+
+#define HELPER_METHOD_FRAME_BEGIN_2(arg1, arg2) HELPER_METHOD_FRAME_BEGIN_ATTRIB_2(Frame::FRAME_ATTR_NONE, arg1, arg2)
+
+#define HELPER_METHOD_FRAME_BEGIN_PROTECT(gc) \
+ HELPER_METHOD_FRAME_BEGIN_EX( \
+ return, \
+ HELPER_FRAME_DECL(PROTECT)(HELPER_FRAME_ARGS(Frame::FRAME_ATTR_NONE), \
+ (OBJECTREF*)&(gc), sizeof(gc)/sizeof(OBJECTREF)), \
+ HELPER_METHOD_POLL(),TRUE)
+
+#define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_NOPOLL(attribs) \
+ HELPER_METHOD_FRAME_BEGIN_EX( \
+ return 0, \
+ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \
+ {},FALSE)
+
+#define HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_NOPOLL(attribs) \
+ HELPER_METHOD_FRAME_BEGIN_EX( \
+ FC_RETURN_VC(), \
+ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \
+ {},FALSE)
+
+#define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(attribs) \
+ HELPER_METHOD_FRAME_BEGIN_EX( \
+ return 0, \
+ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(attribs)), \
+ HELPER_METHOD_POLL(),TRUE)
+
+#define HELPER_METHOD_FRAME_BEGIN_RET_0() \
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(Frame::FRAME_ATTR_NONE)
+
+#define HELPER_METHOD_FRAME_BEGIN_RET_VC_0() \
+ HELPER_METHOD_FRAME_BEGIN_EX( \
+ FC_RETURN_VC(), \
+ HELPER_FRAME_DECL(0)(HELPER_FRAME_ARGS(Frame::FRAME_ATTR_NONE)), \
+ HELPER_METHOD_POLL(),TRUE)
+
+#define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(attribs, arg1) \
+ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\
+ HELPER_METHOD_FRAME_BEGIN_EX( \
+ return 0, \
+ HELPER_FRAME_DECL(1)(HELPER_FRAME_ARGS(attribs), \
+ (OBJECTREF*) &arg1), \
+ HELPER_METHOD_POLL(),TRUE)
+
+#define HELPER_METHOD_FRAME_BEGIN_RET_NOTHROW_1(probeFailExpr, arg1) \
+ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\
+ HELPER_METHOD_FRAME_BEGIN_EX_NOTHROW( \
+ return 0, \
+ HELPER_FRAME_DECL(1)(HELPER_FRAME_ARGS(Frame::FRAME_ATTR_NO_THREAD_ABORT), \
+ (OBJECTREF*) &arg1), \
+ HELPER_METHOD_POLL(), TRUE, probeFailExpr)
+
+#define HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_1(attribs, arg1) \
+ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\
+ HELPER_METHOD_FRAME_BEGIN_EX( \
+ FC_RETURN_VC(), \
+ HELPER_FRAME_DECL(1)(HELPER_FRAME_ARGS(attribs), \
+ (OBJECTREF*) &arg1), \
+ HELPER_METHOD_POLL(),TRUE)
+
+#define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_2(attribs, arg1, arg2) \
+ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\
+ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\
+ HELPER_METHOD_FRAME_BEGIN_EX( \
+ return 0, \
+ HELPER_FRAME_DECL(2)(HELPER_FRAME_ARGS(attribs), \
+ (OBJECTREF*) &arg1, (OBJECTREF*) &arg2), \
+ HELPER_METHOD_POLL(),TRUE)
+
+#define HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_2(attribs, arg1, arg2) \
+ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\
+ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\
+ HELPER_METHOD_FRAME_BEGIN_EX( \
+ FC_RETURN_VC(), \
+ HELPER_FRAME_DECL(2)(HELPER_FRAME_ARGS(attribs), \
+ (OBJECTREF*) &arg1, (OBJECTREF*) &arg2), \
+ HELPER_METHOD_POLL(),TRUE)
+
+#define HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_PROTECT(attribs, gc) \
+ HELPER_METHOD_FRAME_BEGIN_EX( \
+ return 0, \
+ HELPER_FRAME_DECL(PROTECT)(HELPER_FRAME_ARGS(attribs), \
+ (OBJECTREF*)&(gc), sizeof(gc)/sizeof(OBJECTREF)), \
+ HELPER_METHOD_POLL(),TRUE)
+
+#define HELPER_METHOD_FRAME_BEGIN_RET_VC_NOPOLL() \
+ HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_NOPOLL(Frame::FRAME_ATTR_NONE)
+
+#define HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL() \
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_NOPOLL(Frame::FRAME_ATTR_NONE)
+
+#define HELPER_METHOD_FRAME_BEGIN_RET_1(arg1) \
+ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(Frame::FRAME_ATTR_NONE, arg1)
+
+#define HELPER_METHOD_FRAME_BEGIN_RET_VC_1(arg1) \
+ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\
+ HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_1(Frame::FRAME_ATTR_NONE, arg1)
+
+#define HELPER_METHOD_FRAME_BEGIN_RET_2(arg1, arg2) \
+ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\
+ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_2(Frame::FRAME_ATTR_NONE, arg1, arg2)
+
+#define HELPER_METHOD_FRAME_BEGIN_RET_VC_2(arg1, arg2) \
+ static_assert(sizeof(arg1) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\
+ static_assert(sizeof(arg2) == sizeof(OBJECTREF), "GC protecting structs of multiple OBJECTREFs requires a PROTECT variant of the HELPER METHOD FRAME macro");\
+ HELPER_METHOD_FRAME_BEGIN_RET_VC_ATTRIB_2(Frame::FRAME_ATTR_NONE, arg1, arg2)
+
+#define HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc) \
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_PROTECT(Frame::FRAME_ATTR_NONE, gc)
+
+
+#define HELPER_METHOD_FRAME_END() HELPER_METHOD_FRAME_END_EX({},FALSE)
+#define HELPER_METHOD_FRAME_END_POLL() HELPER_METHOD_FRAME_END_EX(HELPER_METHOD_POLL(),TRUE)
+#define HELPER_METHOD_FRAME_END_NOTHROW()HELPER_METHOD_FRAME_END_EX_NOTHROW({},FALSE)
+
+// This is the fastest way to do a GC poll if you have already erected a HelperMethodFrame
+#define HELPER_METHOD_POLL() { __helperframe.Poll(); INCONTRACT(__fCallCheck.SetDidPoll()); }
+
+// The HelperMethodFrame knows how to get its return address. Let other code get at it, too.
+// (Uses comma operator to call InsureInit & discard result.
+#define HELPER_METHOD_FRAME_GET_RETURN_ADDRESS() \
+ ( static_cast<UINT_PTR>( (__helperframe.InsureInit(false, NULL)), (__helperframe.MachineState()->GetRetAddr()) ) )
+
+ // Very short routines, or routines that are guarenteed to force GC or EH
+ // don't need to poll the GC. USE VERY SPARINGLY!!!
+#define FC_GC_POLL_NOT_NEEDED() INCONTRACT(__fCallCheck.SetNotNeeded())
+
+Object* FC_GCPoll(void* me, Object* objToProtect = NULL);
+
+#define FC_GC_POLL_EX(ret) \
+ { \
+ INCONTRACT(Thread::TriggersGC(GetThread());) \
+ INCONTRACT(__fCallCheck.SetDidPoll();) \
+ if (g_TrapReturningThreads.LoadWithoutBarrier()) \
+ { \
+ if (FC_GCPoll(__me)) \
+ return ret; \
+ while (0 == FC_NO_TAILCALL) { }; /* side effect the compile can't remove */ \
+ } \
+ }
+
+#define FC_GC_POLL() FC_GC_POLL_EX(;)
+#define FC_GC_POLL_RET() FC_GC_POLL_EX(0)
+
+#define FC_GC_POLL_AND_RETURN_OBJREF(obj) \
+ { \
+ INCONTRACT(__fCallCheck.SetDidPoll();) \
+ Object* __temp = OBJECTREFToObject(obj); \
+ if (g_TrapReturningThreads.LoadWithoutBarrier()) \
+ { \
+ __temp = FC_GCPoll(__me, __temp); \
+ while (0 == FC_NO_TAILCALL) { }; /* side effect the compile can't remove */ \
+ } \
+ return __temp; \
+ }
+
+#if defined(ENABLE_CONTRACTS)
+#define FC_CAN_TRIGGER_GC() FCallGCCanTrigger::Enter()
+#define FC_CAN_TRIGGER_GC_END() FCallGCCanTrigger::Leave(__FUNCTION__, __FILE__, __LINE__)
+
+#define FC_CAN_TRIGGER_GC_HAVE_THREAD(thread) FCallGCCanTrigger::Enter(thread)
+#define FC_CAN_TRIGGER_GC_HAVE_THREADEND(thread) FCallGCCanTrigger::Leave(thread, __FUNCTION__, __FILE__, __LINE__)
+
+ // turns on forbidGC for the lifetime of the instance
+class ForbidGC {
+protected:
+ Thread *m_pThread;
+public:
+ ForbidGC(const char *szFile, int lineNum);
+ ~ForbidGC();
+};
+
+ // this little helper class checks to make certain
+ // 1) ForbidGC is set throughout the routine.
+ // 2) Sometime during the routine, a GC poll is done
+
+class FCallCheck : public ForbidGC {
+public:
+ FCallCheck(const char *szFile, int lineNum);
+ ~FCallCheck();
+ void SetDidPoll() {LIMITED_METHOD_CONTRACT; didGCPoll = true; }
+ void SetNotNeeded() {LIMITED_METHOD_CONTRACT; notNeeded = true; }
+
+private:
+#ifdef _DEBUG
+ DWORD unbreakableLockCount;
+#endif
+ bool didGCPoll; // GC poll was done
+ bool notNeeded; // GC poll not needed
+ unsigned __int64 startTicks; // tick count at begining of FCall
+};
+
+ // FC_COMMON_PROLOG is used for both FCalls and HCalls
+#define FC_COMMON_PROLOG(target, assertFn) \
+ /* The following line has to be first. We do not want to trash last error */ \
+ DWORD __lastError = ::GetLastError(); \
+ static void* __cache = 0; \
+ assertFn(__cache, (LPVOID)target); \
+ { \
+ Thread *_pThread = GetThread(); \
+ Thread::ObjectRefFlush(_pThread); \
+ /*_ASSERTE (_pThread->IsSOTolerant() ||*/ \
+ /* _pThread->HasThreadStateNC(Thread::TSNC_DisableSOCheckInHCALL)); */ \
+ } \
+ FCallCheck __fCallCheck(__FILE__, __LINE__); \
+ FCALL_TRANSITION_BEGIN(); \
+ ::SetLastError(__lastError); \
+
+void FCallAssert(void*& cache, void* target);
+void HCallAssert(void*& cache, void* target);
+
+#else
+#define FC_COMMON_PROLOG(target, assertFn) FCALL_TRANSITION_BEGIN()
+#define FC_CAN_TRIGGER_GC()
+#define FC_CAN_TRIGGER_GC_END()
+#endif // ENABLE_CONTRACTS
+
+// #FC_INNER
+// Macros that allows fcall to be split into two function to avoid the helper frame overhead on common fast
+// codepaths.
+//
+// The helper routine needs to know the name of the routine that called it so that it can look up the name of
+// the managed routine this code is associted with (for managed stack traces). This is passed with the
+// FC_INNER_PROLOG macro.
+//
+// The helper can set up a HELPER_METHOD_FRAME, but should pass the
+// Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2 which indicates the exact number of
+// unwinds to do to get back to managed code. Currently we only support depth 2 which means that the
+// HELPER_METHOD_FRAME needs to be set up in the function directly called by the FCALL. The helper should
+// use the NOINLINE macro to prevent the compiler from inlining it into the FCALL (which would obviously
+// mess up the unwind count).
+//
+// The other invarient that needs to hold is that the epilog walker needs to be able to get from the call to
+// the helper routine to the end of the FCALL using trivial heurisitics. The easiest (and only supported)
+// way of doing this is to place your helper right before a return (eg at the end of the method). Generally
+// this is not a problem at all, since the FCALL itself will pick off some common case and then tail-call to
+// the helper for everything else. You must use the code:FC_INNER_RETURN macros to do the call, to insure
+// that the C++ compiler does not tail-call optimize the call to the inner function and mess up the stack
+// depth.
+//
+// see code:ObjectNative::GetClass for an example
+//
+#define FC_INNER_PROLOG(outterfuncname) \
+ LPVOID __me; \
+ __me = GetEEFuncEntryPointMacro(outterfuncname); \
+ FC_CAN_TRIGGER_GC(); \
+ INCONTRACT(FCallCheck __fCallCheck(__FILE__, __LINE__));
+
+// This variant should be used for inner fcall functions that have the
+// __me value passed as an argument to the function. This allows
+// inner functions to be shared across multiple fcalls.
+#define FC_INNER_PROLOG_NO_ME_SETUP() \
+ FC_CAN_TRIGGER_GC(); \
+ INCONTRACT(FCallCheck __fCallCheck(__FILE__, __LINE__));
+
+#define FC_INNER_EPILOG() \
+ FC_CAN_TRIGGER_GC_END();
+
+// If you are using FC_INNER, and you are tail calling to the helper method (a common case), then you need
+// to use the FC_INNER_RETURN macros (there is one for methods that return a value and another if the
+// function returns void). This macro's purpose is to inhibit any tail calll optimization the C++ compiler
+// might do, which would otherwise confuse the epilog walker.
+//
+// * See #FC_INNER for more
+extern int FC_NO_TAILCALL;
+#define FC_INNER_RETURN(type, expr) \
+ type __retVal = expr; \
+ while (0 == FC_NO_TAILCALL) { }; /* side effect the compile can't remove */ \
+ return(__retVal);
+
+#define FC_INNER_RETURN_VOID(stmt) \
+ stmt; \
+ while (0 == FC_NO_TAILCALL) { }; /* side effect the compile can't remove */ \
+ return;
+
+//==============================================================================================
+// FIMPLn: A set of macros for generating the proto for the actual
+// implementation (use FDECLN for header protos.)
+//
+// The hidden "__me" variable lets us recover the original MethodDesc*
+// so any thrown exceptions will have the correct stack trace. FCThrow()
+// passes this along to __FCThrowInternal().
+//==============================================================================================
+
+#define GetEEFuncEntryPointMacro(func) ((LPVOID)(func))
+
+#define FCIMPL_PROLOG(funcname) \
+ LPVOID __me; \
+ __me = GetEEFuncEntryPointMacro(funcname); \
+ FC_COMMON_PROLOG(__me, FCallAssert)
+
+
+#if defined(_DEBUG) && !defined(CROSSGEN_COMPILE)
+
+// Build the list of all fcalls signatures. It is used in binder.cpp to verify
+// compatibility of managed and unmanaged fcall signatures. The check is currently done
+// for x86 only.
+
+struct FCSigCheck {
+public:
+ FCSigCheck(void* fnc, char* sig)
+ {
+ LIMITED_METHOD_CONTRACT;
+ func = fnc;
+ signature = sig;
+ next = g_pFCSigCheck;
+ g_pFCSigCheck = this;
+ }
+
+ FCSigCheck* next;
+ void* func;
+ char* signature;
+
+ static FCSigCheck* g_pFCSigCheck;
+};
+
+#define FCSIGCHECK(funcname, signature) \
+ static FCSigCheck UNIQUE_LABEL(FCSigCheck)(GetEEFuncEntryPointMacro(funcname), signature);
+
+#else
+
+#define FCSIGCHECK(funcname, signature)
+
+#endif
+
+
+#ifdef _TARGET_X86_
+
+#if defined(__GNUC__)
+
+#define FCIMPL0(rettype, funcname) rettype F_CALL_CONV funcname() { FCIMPL_PROLOG(funcname)
+#define FCIMPL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1) { FCIMPL_PROLOG(funcname)
+#define FCIMPL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a1) { FCIMPL_PROLOG(funcname)
+#define FCIMPL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1) { FCIMPL_PROLOG(funcname)
+#define FCIMPL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a2, a1) { FCIMPL_PROLOG(funcname)
+#define FCIMPL2_VI(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a2, a1) { FCIMPL_PROLOG(funcname)
+#define FCIMPL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a2) { FCIMPL_PROLOG(funcname)
+#define FCIMPL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL3_IIV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL3_VII(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a3, a2, a1) { FCIMPL_PROLOG(funcname)
+#define FCIMPL3_IVV(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a3, a2) { FCIMPL_PROLOG(funcname)
+#define FCIMPL3_IVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a3, a1, a2) { FCIMPL_PROLOG(funcname)
+#define FCIMPL3_VVI(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a3, a2, a1) { FCIMPL_PROLOG(funcname)
+#define FCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a4, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a5, a4, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a14, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname)
+
+#define FCIMPL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a3, a1, a5, a4, a2) { FCIMPL_PROLOG(funcname)
+#define FCIMPL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a3, a2, a5, a4, a1) { FCIMPL_PROLOG(funcname)
+
+#else // __GNUC__
+
+#define FCIMPL0(rettype, funcname) FCSIGCHECK(funcname, #rettype) \
+ rettype F_CALL_CONV funcname() { FCIMPL_PROLOG(funcname)
+#define FCIMPL1(rettype, funcname, a1) FCSIGCHECK(funcname, #rettype "," #a1) \
+ rettype F_CALL_CONV funcname(a1) { FCIMPL_PROLOG(funcname)
+#define FCIMPL1_V(rettype, funcname, a1) FCSIGCHECK(funcname, #rettype "," "V" #a1) \
+ rettype F_CALL_CONV funcname(a1) { FCIMPL_PROLOG(funcname)
+#define FCIMPL2(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2) \
+ rettype F_CALL_CONV funcname(a1, a2) { FCIMPL_PROLOG(funcname)
+#define FCIMPL2VA(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," "...") \
+ rettype F_CALL_VA_CONV funcname(a1, a2, ...) { FCIMPL_PROLOG(funcname)
+#define FCIMPL2_VV(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," "V" #a2) \
+ rettype F_CALL_CONV funcname(a2, a1) { FCIMPL_PROLOG(funcname)
+#define FCIMPL2_VI(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," #a2) \
+ rettype F_CALL_CONV funcname(a2, a1) { FCIMPL_PROLOG(funcname)
+#define FCIMPL2_IV(rettype, funcname, a1, a2) FCSIGCHECK(funcname, #rettype "," #a1 "," "V" #a2) \
+ rettype F_CALL_CONV funcname(a1, a2) { FCIMPL_PROLOG(funcname)
+#define FCIMPL3(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3) \
+ rettype F_CALL_CONV funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL3_IIV(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," "V" #a3) \
+ rettype F_CALL_CONV funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL3_VII(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," #a2 "," #a3) \
+ rettype F_CALL_CONV funcname(a2, a3, a1) { FCIMPL_PROLOG(funcname)
+#define FCIMPL3_IVV(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," #a1 "," "V" #a2 "," "V" #a3) \
+ rettype F_CALL_CONV funcname(a1, a3, a2) { FCIMPL_PROLOG(funcname)
+#define FCIMPL3_IVI(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," #a1 "," "V" #a2 "," #a3) \
+ rettype F_CALL_CONV funcname(a1, a3, a2) { FCIMPL_PROLOG(funcname)
+#define FCIMPL3_VVI(rettype, funcname, a1, a2, a3) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," "V" #a2 "," #a3) \
+ rettype F_CALL_CONV funcname(a2, a1, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL4(rettype, funcname, a1, a2, a3, a4) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4) \
+ rettype F_CALL_CONV funcname(a1, a2, a4, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5) \
+ rettype F_CALL_CONV funcname(a1, a2, a5, a4, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL6(rettype, funcname, a1, a2, a3, a4, a5, a6) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6) \
+ rettype F_CALL_CONV funcname(a1, a2, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7) \
+ rettype F_CALL_CONV funcname(a1, a2, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8) \
+ rettype F_CALL_CONV funcname(a1, a2, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9) \
+ rettype F_CALL_CONV funcname(a1, a2, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10) \
+ rettype F_CALL_CONV funcname(a1, a2, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10 "," #a11) \
+ rettype F_CALL_CONV funcname(a1, a2, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10 "," #a11 "," #a12) \
+ rettype F_CALL_CONV funcname(a1, a2, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10 "," #a11 "," #a12 "," #a13) \
+ rettype F_CALL_CONV funcname(a1, a2, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) FCSIGCHECK(funcname, #rettype "," #a1 "," #a2 "," #a3 "," #a4 "," #a5 "," #a6 "," #a7 "," #a8 "," #a9 "," #a10 "," #a11 "," #a12 "," #a13 "," #a14) \
+ rettype F_CALL_CONV funcname(a1, a2, a14, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3) { FCIMPL_PROLOG(funcname)
+
+#define FCIMPL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) FCSIGCHECK(funcname, #rettype "," #a1 "," "V" #a2 "," #a3 "," #a4 "," #a5) \
+ rettype F_CALL_CONV funcname(a1, a3, a5, a4, a2) { FCIMPL_PROLOG(funcname)
+#define FCIMPL5_VII(rettype, funcname, a1, a2, a3, a4, a5) FCSIGCHECK(funcname, #rettype "," "V" #a1 "," #a2 "," #a3 "," #a4 "," #a5) \
+ rettype F_CALL_CONV funcname(a2, a3, a5, a4, a1) { FCIMPL_PROLOG(funcname)
+
+#endif // __GNUC__
+
+#else // !_TARGET_X86_
+//
+// non-x86 platforms don't have messed-up calling convention swizzling
+//
+
+#define FCIMPL0(rettype, funcname) rettype funcname() { FCIMPL_PROLOG(funcname)
+#define FCIMPL1(rettype, funcname, a1) rettype funcname(a1) { FCIMPL_PROLOG(funcname)
+#define FCIMPL1_V(rettype, funcname, a1) rettype funcname(a1) { FCIMPL_PROLOG(funcname)
+#define FCIMPL2(rettype, funcname, a1, a2) rettype funcname(a1, a2) { FCIMPL_PROLOG(funcname)
+#define FCIMPL2VA(rettype, funcname, a1, a2) rettype funcname(a1, a2, ...) { FCIMPL_PROLOG(funcname)
+#define FCIMPL2_VV(rettype, funcname, a1, a2) rettype funcname(a1, a2) { FCIMPL_PROLOG(funcname)
+#define FCIMPL2_VI(rettype, funcname, a1, a2) rettype funcname(a1, a2) { FCIMPL_PROLOG(funcname)
+#define FCIMPL2_IV(rettype, funcname, a1, a2) rettype funcname(a1, a2) { FCIMPL_PROLOG(funcname)
+#define FCIMPL3(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL3_IIV(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL3_IVV(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL3_VII(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL3_IVI(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL3_VVI(rettype, funcname, a1, a2, a3) rettype funcname(a1, a2, a3) { FCIMPL_PROLOG(funcname)
+#define FCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype funcname(a1, a2, a3, a4) { FCIMPL_PROLOG(funcname)
+#define FCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype funcname(a1, a2, a3, a4, a5) { FCIMPL_PROLOG(funcname)
+#define FCIMPL6(rettype, funcname, a1, a2, a3, a4, a5, a6) rettype funcname(a1, a2, a3, a4, a5, a6) { FCIMPL_PROLOG(funcname)
+#define FCIMPL7(rettype, funcname, a1, a2, a3, a4, a5, a6, a7) rettype funcname(a1, a2, a3, a4, a5, a6, a7) { FCIMPL_PROLOG(funcname)
+#define FCIMPL8(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8) { FCIMPL_PROLOG(funcname)
+#define FCIMPL9(rettype, funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9) { FCIMPL_PROLOG(funcname)
+#define FCIMPL10(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) { FCIMPL_PROLOG(funcname)
+#define FCIMPL11(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) { FCIMPL_PROLOG(funcname)
+#define FCIMPL12(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) { FCIMPL_PROLOG(funcname)
+#define FCIMPL13(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) { FCIMPL_PROLOG(funcname)
+#define FCIMPL14(rettype,funcname, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) rettype funcname(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) { FCIMPL_PROLOG(funcname)
+
+#define FCIMPL5_IVI(rettype, funcname, a1, a2, a3, a4, a5) rettype funcname(a1, a2, a3, a4, a5) { FCIMPL_PROLOG(funcname)
+#define FCIMPL5_VII(rettype, funcname, a1, a2, a3, a4, a5) rettype funcname(a1, a2, a3, a4, a5) { FCIMPL_PROLOG(funcname)
+
+#endif
+
+//==============================================================================================
+// Use this to terminte an FCIMPLEND.
+//==============================================================================================
+
+#define FCIMPL_EPILOG() FCALL_TRANSITION_END()
+
+#define FCIMPLEND FCIMPL_EPILOG(); }
+
+#define HCIMPL_PROLOG(funcname) LPVOID __me; __me = 0; FC_COMMON_PROLOG(funcname, HCallAssert)
+
+ // HCIMPL macros are just like their FCIMPL counterparts, however
+ // they do not remember the function they come from. Thus they will not
+ // show up in a stack trace. This is what you want for JIT helpers and the like
+
+#ifdef _TARGET_X86_
+
+#if defined(__GNUC__)
+
+#define HCIMPL0(rettype, funcname) rettype F_CALL_CONV funcname() { HCIMPL_PROLOG(funcname)
+#define HCIMPL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1) { HCIMPL_PROLOG(funcname)
+#define HCIMPL1_RAW(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1) {
+#define HCIMPL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a1) { HCIMPL_PROLOG(funcname)
+#define HCIMPL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1) { HCIMPL_PROLOG(funcname)
+#define HCIMPL2_RAW(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1) {
+#define HCIMPL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, int /* ECX */, a2, a1) { HCIMPL_PROLOG(funcname)
+#define HCIMPL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(int /* EAX */, int /* EDX */, a1, a2) { HCIMPL_PROLOG(funcname)
+#define HCIMPL2VA(rettype, funcname, a1, a2) rettype F_CALL_VA_CONV funcname(a1, a2, ...) { HCIMPL_PROLOG(funcname)
+#define HCIMPL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a3) { HCIMPL_PROLOG(funcname)
+#define HCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a4, a3) { HCIMPL_PROLOG(funcname)
+#define HCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(int /* EAX */, a2, a1, a5, a4, a3) { HCIMPL_PROLOG(funcname)
+
+#define HCCALL1(funcname, a1) funcname(0, 0, a1)
+#define HCCALL1_V(funcname, a1) funcname(0, 0, 0, a1)
+#define HCCALL2(funcname, a1, a2) funcname(0, a2, a1)
+#define HCCALL3(funcname, a1, a2, a3) funcname(0, a2, a1, a3)
+#define HCCALL4(funcname, a1, a2, a3, a4) funcname(0, a2, a1, a4, a3)
+#define HCCALL5(funcname, a1, a2, a3, a4, a5) funcname(0, a2, a1, a5, a4, a3)
+#define HCCALL1_PTR(rettype, funcptr, a1) rettype (F_CALL_CONV * funcptr)(int /* EAX */, int /* EDX */, a1)
+#define HCCALL2_PTR(rettype, funcptr, a1, a2) rettype (F_CALL_CONV * funcptr)(int /* EAX */, a2, a1)
+#else
+
+#define HCIMPL0(rettype, funcname) rettype F_CALL_CONV funcname() { HCIMPL_PROLOG(funcname)
+#define HCIMPL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { HCIMPL_PROLOG(funcname)
+#define HCIMPL1_RAW(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) {
+#define HCIMPL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { HCIMPL_PROLOG(funcname)
+#define HCIMPL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname)
+#define HCIMPL2_RAW(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) {
+#define HCIMPL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a2, a1) { HCIMPL_PROLOG(funcname)
+#define HCIMPL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname)
+#define HCIMPL2VA(rettype, funcname, a1, a2) rettype F_CALL_VA_CONV funcname(a1, a2, ...) { HCIMPL_PROLOG(funcname)
+#define HCIMPL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) { HCIMPL_PROLOG(funcname)
+#define HCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(a1, a2, a4, a3) { HCIMPL_PROLOG(funcname)
+#define HCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a5, a4, a3) { HCIMPL_PROLOG(funcname)
+
+#define HCCALL1(funcname, a1) funcname(a1)
+#define HCCALL1_V(funcname, a1) funcname(a1)
+#define HCCALL2(funcname, a1, a2) funcname(a1, a2)
+#define HCCALL3(funcname, a1, a2, a3) funcname(a1, a2, a3)
+#define HCCALL4(funcname, a1, a2, a3, a4) funcname(a1, a2, a4, a3)
+#define HCCALL5(funcname, a1, a2, a3, a4, a5) funcname(a1, a2, a5, a4, a3)
+#define HCCALL1_PTR(rettype, funcptr, a1) rettype (F_CALL_CONV * funcptr)(a1)
+#define HCCALL2_PTR(rettype, funcptr, a1, a2) rettype (F_CALL_CONV * funcptr)(a1, a2)
+
+#endif
+
+#else // !_TARGET_X86_
+//
+// non-x86 platforms don't have messed-up calling convention swizzling
+//
+
+#define HCIMPL0(rettype, funcname) rettype F_CALL_CONV funcname() { HCIMPL_PROLOG(funcname)
+#define HCIMPL1(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { HCIMPL_PROLOG(funcname)
+#define HCIMPL1_RAW(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) {
+#define HCIMPL1_V(rettype, funcname, a1) rettype F_CALL_CONV funcname(a1) { HCIMPL_PROLOG(funcname)
+#define HCIMPL2(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname)
+#define HCIMPL2_RAW(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) {
+#define HCIMPL2_VV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname)
+#define HCIMPL2_IV(rettype, funcname, a1, a2) rettype F_CALL_CONV funcname(a1, a2) { HCIMPL_PROLOG(funcname)
+#define HCIMPL2VA(rettype, funcname, a1, a2) rettype F_CALL_VA_CONV funcname(a1, a2, ...) { HCIMPL_PROLOG(funcname)
+#define HCIMPL3(rettype, funcname, a1, a2, a3) rettype F_CALL_CONV funcname(a1, a2, a3) { HCIMPL_PROLOG(funcname)
+#define HCIMPL4(rettype, funcname, a1, a2, a3, a4) rettype F_CALL_CONV funcname(a1, a2, a3, a4) { HCIMPL_PROLOG(funcname)
+#define HCIMPL5(rettype, funcname, a1, a2, a3, a4, a5) rettype F_CALL_CONV funcname(a1, a2, a3, a4, a5) { HCIMPL_PROLOG(funcname)
+
+#define HCCALL1(funcname, a1) funcname(a1)
+#define HCCALL1_V(funcname, a1) funcname(a1)
+#define HCCALL2(funcname, a1, a2) funcname(a1, a2)
+#define HCCALL3(funcname, a1, a2, a3) funcname(a1, a2, a3)
+#define HCCALL4(funcname, a1, a2, a3, a4) funcname(a1, a2, a3, a4)
+#define HCCALL5(funcname, a1, a2, a3, a4, a5) funcname(a1, a2, a3, a4, a5)
+#define HCCALL1_PTR(rettype, funcptr, a1) rettype (F_CALL_CONV * funcptr)(a1)
+#define HCCALL2_PTR(rettype, funcptr, a1, a2) rettype (F_CALL_CONV * funcptr)(a1, a2)
+
+#endif
+
+#define HCIMPLEND_RAW }
+#define HCIMPLEND FCALL_TRANSITION_END(); }
+
+
+//==============================================================================================
+// Throws an exception from an FCall. See rexcep.h for a list of valid
+// exception codes.
+//==============================================================================================
+#define FCThrow(reKind) FCThrowEx(reKind, 0, 0, 0, 0)
+
+//==============================================================================================
+// This version lets you attach a message with inserts (similar to
+// COMPlusThrow()).
+//==============================================================================================
+#define FCThrowEx(reKind, resID, arg1, arg2, arg3) \
+ { \
+ while (NULL == \
+ __FCThrow(__me, reKind, resID, arg1, arg2, arg3)) {}; \
+ return 0; \
+ }
+
+//==============================================================================================
+// Like FCThrow but can be used for a VOID-returning FCall. The only
+// difference is in the "return" statement.
+//==============================================================================================
+#define FCThrowVoid(reKind) FCThrowExVoid(reKind, 0, 0, 0, 0)
+
+//==============================================================================================
+// This version lets you attach a message with inserts (similar to
+// COMPlusThrow()).
+//==============================================================================================
+#define FCThrowExVoid(reKind, resID, arg1, arg2, arg3) \
+ { \
+ while (NULL == \
+ __FCThrow(__me, reKind, resID, arg1, arg2, arg3)) {}; \
+ return; \
+ }
+
+// Use FCThrowRes to throw an exception with a localized error message from the
+// ResourceManager in managed code.
+#define FCThrowRes(reKind, resourceName) FCThrowArgumentEx(reKind, NULL, resourceName)
+#define FCThrowArgumentNull(argName) FCThrowArgumentEx(kArgumentNullException, argName, NULL)
+#define FCThrowArgumentOutOfRange(argName, message) FCThrowArgumentEx(kArgumentOutOfRangeException, argName, message)
+#define FCThrowArgument(argName, message) FCThrowArgumentEx(kArgumentException, argName, message)
+
+#define FCThrowArgumentEx(reKind, argName, resourceName) \
+ { \
+ while (NULL == \
+ __FCThrowArgument(__me, reKind, argName, resourceName)) {}; \
+ return 0; \
+ }
+
+// Use FCThrowRes to throw an exception with a localized error message from the
+// ResourceManager in managed code.
+#define FCThrowResVoid(reKind, resourceName) FCThrowArgumentVoidEx(reKind, NULL, resourceName)
+#define FCThrowArgumentNullVoid(argName) FCThrowArgumentVoidEx(kArgumentNullException, argName, NULL)
+#define FCThrowArgumentOutOfRangeVoid(argName, message) FCThrowArgumentVoidEx(kArgumentOutOfRangeException, argName, message)
+#define FCThrowArgumentVoid(argName, message) FCThrowArgumentVoidEx(kArgumentException, argName, message)
+
+#define FCThrowArgumentVoidEx(reKind, argName, resourceName) \
+ { \
+ while (NULL == \
+ __FCThrowArgument(__me, reKind, argName, resourceName)) {}; \
+ return; \
+ }
+
+
+
+// The x86 JIT calling convention expects returned small types (e.g. bool) to be
+// widened on return. The C/C++ calling convention does not guarantee returned
+// small types to be widened. The small types has to be artifically widened on return
+// to fit x86 JIT calling convention. Thus fcalls returning small types has to
+// use the FC_XXX_RET types to force C/C++ compiler to do the widening.
+//
+// The most common small return type of FCALLs is bool. The widening of bool is
+// especially tricky since the value has to be also normalized. FC_BOOL_RET and
+// FC_RETURN_BOOL macros are provided to make it fool-proof. FCALLs returning bool
+// should be implemented using following pattern:
+
+// FCIMPL0(FC_BOOL_RET, Foo) // the return type should be FC_BOOL_RET
+// BOOL ret;
+//
+// FC_RETURN_BOOL(ret); // return statements should be FC_RETURN_BOOL
+// FCIMPLEND
+
+// This rules are verified in binder.cpp if COMPlus_ConsistencyCheck is set.
+
+#ifdef _PREFAST_
+
+// Use prefast build to ensure that functions returning FC_BOOL_RET
+// are using FC_RETURN_BOOL to return it. Missing FC_RETURN_BOOL will
+// result into type mismatch error in prefast builds. This will also
+// catch misuses of FC_BOOL_RET for other places (e.g. in FCALL parameters).
+
+typedef LPVOID FC_BOOL_RET;
+#define FC_RETURN_BOOL(x) do { return (LPVOID)!!(x); } while(0)
+
+#else
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+// The return value is artifically widened on x86 and amd64
+typedef INT32 FC_BOOL_RET;
+#else
+typedef CLR_BOOL FC_BOOL_RET;
+#endif
+
+#define FC_RETURN_BOOL(x) do { return !!(x); } while(0)
+
+#endif
+
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+// The return value is artifically widened on x86 and amd64
+typedef UINT32 FC_CHAR_RET;
+typedef INT32 FC_INT8_RET;
+typedef UINT32 FC_UINT8_RET;
+typedef INT32 FC_INT16_RET;
+typedef UINT32 FC_UINT16_RET;
+#else
+typedef CLR_CHAR FC_CHAR_RET;
+typedef INT8 FC_INT8_RET;
+typedef UINT8 FC_UINT8_RET;
+typedef INT16 FC_INT16_RET;
+typedef UINT16 FC_UINT16_RET;
+#endif
+
+
+
+
+// The fcall entrypoints has to be at unique addresses. Use this helper macro to make
+// the code of the fcalls unique if you get assert in ecall.cpp that mentions it.
+// The parameter of the FCUnique macro is an arbitrary 32-bit random non-zero number.
+#define FCUnique(unique) { Volatile<int> u = (unique); while (u.LoadWithoutBarrier() == 0) { }; }
+
+
+
+
+// FCALL contracts come in two forms:
+//
+// Short form that should be used if the FCALL contract does not have any extras like preconditions, failure injection. Example:
+//
+// FCIMPL0(void, foo)
+// {
+// FCALL_CONTRACT;
+// ...
+//
+// Long form that should be used otherwise. Example:
+//
+// FCIMPL1(void, foo, void *p)
+// {
+// CONTRACTL {
+// FCALL_CHECK;
+// PRECONDITION(CheckPointer(p));
+// } CONTRACTL_END;
+// ...
+
+
+//
+// FCALL_CHECK defines the actual contract conditions required for FCALLs
+//
+#define FCALL_CHECK \
+ THROWS; \
+ DISABLED(GC_TRIGGERS); /* FCALLS with HELPER frames have issues with GC_TRIGGERS */ \
+ MODE_COOPERATIVE; \
+ SO_TOLERANT
+
+//
+// FCALL_CONTRACT should be the following shortcut:
+//
+// #define FCALL_CONTRACT CONTRACTL { FCALL_CHECK; } CONTRACTL_END;
+//
+// Since there is very little value in having runtime contracts in FCalls, FCALL_CONTRACT is defined as static contract only for performance reasons.
+//
+#define FCALL_CONTRACT \
+ STATIC_CONTRACT_SO_TOLERANT; \
+ STATIC_CONTRACT_THROWS; \
+ /* FCALLS are a special case contract wise, they are "NOTRIGGER, unless you setup a frame" */ \
+ STATIC_CONTRACT_GC_NOTRIGGER; \
+ STATIC_CONTRACT_MODE_COOPERATIVE
+
+#endif //__FCall_h__
diff --git a/src/vm/field.cpp b/src/vm/field.cpp
new file mode 100644
index 0000000000..f6b1c16474
--- /dev/null
+++ b/src/vm/field.cpp
@@ -0,0 +1,1025 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: Field.cpp
+//
+
+// ===========================================================================
+// This file contains the implementation for FieldDesc methods.
+// ===========================================================================
+//
+
+
+#include "common.h"
+
+#include "encee.h"
+#include "field.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "generics.h"
+
+#include "peimagelayout.inl"
+
+// called from code:MethodTableBuilder::InitializeFieldDescs#InitCall
+VOID FieldDesc::Init(mdFieldDef mb, CorElementType FieldType, DWORD dwMemberAttrs, BOOL fIsStatic, BOOL fIsRVA, BOOL fIsThreadLocal, BOOL fIsContextLocal, LPCSTR pszFieldName)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // We allow only a subset of field types here - all objects must be set to TYPE_CLASS
+ // By-value classes are ELEMENT_TYPE_VALUETYPE
+ _ASSERTE(
+ FieldType == ELEMENT_TYPE_I1 ||
+ FieldType == ELEMENT_TYPE_BOOLEAN ||
+ FieldType == ELEMENT_TYPE_U1 ||
+ FieldType == ELEMENT_TYPE_I2 ||
+ FieldType == ELEMENT_TYPE_U2 ||
+ FieldType == ELEMENT_TYPE_CHAR ||
+ FieldType == ELEMENT_TYPE_I4 ||
+ FieldType == ELEMENT_TYPE_U4 ||
+ FieldType == ELEMENT_TYPE_I8 ||
+ FieldType == ELEMENT_TYPE_I ||
+ FieldType == ELEMENT_TYPE_U ||
+ FieldType == ELEMENT_TYPE_U8 ||
+ FieldType == ELEMENT_TYPE_R4 ||
+ FieldType == ELEMENT_TYPE_R8 ||
+ FieldType == ELEMENT_TYPE_CLASS ||
+ FieldType == ELEMENT_TYPE_VALUETYPE ||
+ FieldType == ELEMENT_TYPE_PTR ||
+ FieldType == ELEMENT_TYPE_FNPTR
+ );
+ _ASSERTE(fIsStatic || (!fIsRVA && !fIsThreadLocal && !fIsContextLocal));
+ _ASSERTE(fIsRVA + fIsThreadLocal + fIsContextLocal <= 1);
+
+ m_requiresFullMbValue = 0;
+ SetMemberDef(mb);
+
+ m_type = FieldType;
+ m_prot = fdFieldAccessMask & dwMemberAttrs;
+ m_isStatic = fIsStatic != 0;
+ m_isRVA = fIsRVA != 0;
+ m_isThreadLocal = fIsThreadLocal != 0;
+#ifdef FEATURE_REMOTING
+ m_isContextLocal = fIsContextLocal != 0;
+#endif
+
+#ifdef _DEBUG
+ m_debugName = (LPUTF8)pszFieldName;
+#endif
+
+#if CHECK_APP_DOMAIN_LEAKS
+ m_isDangerousAppDomainAgileField = 0;
+#endif
+
+ _ASSERTE(GetMemberDef() == mb); // no truncation
+ _ASSERTE(GetFieldType() == FieldType);
+ _ASSERTE(GetFieldProtection() == (fdFieldAccessMask & dwMemberAttrs));
+ _ASSERTE((BOOL) IsStatic() == (fIsStatic != 0));
+}
+
+// Return whether the field is a GC ref type
+BOOL FieldDesc::IsObjRef()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return CorTypeInfo::IsObjRef_NoThrow(GetFieldType());
+}
+
+#ifndef DACCESS_COMPILE
+void FieldDesc::PrecomputeNameHash()
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(IsCompilationProcess());
+ }
+ CONTRACTL_END;
+
+ // We only have space for the name hash when we can use the packed mb layout
+ if (m_requiresFullMbValue)
+ {
+ return;
+ }
+
+ // Store a case-insensitive hash so that we can use this value for
+ // both case-sensitive and case-insensitive name lookups
+ SString name(SString::Utf8Literal, GetName());
+ ULONG nameHashValue = name.HashCaseInsensitive() & enum_packedMbLayout_NameHashMask;
+
+ // We should never overwrite any other bits
+ _ASSERTE((m_mb & enum_packedMbLayout_NameHashMask) == 0 ||
+ (m_mb & enum_packedMbLayout_NameHashMask) == nameHashValue);
+
+ m_mb |= nameHashValue;
+}
+#endif
+
+BOOL FieldDesc::MightHaveName(ULONG nameHashValue)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ g_IBCLogger.LogFieldDescsAccess(this);
+
+ // We only have space for a name hash when we are using the packed mb layout
+ if (m_requiresFullMbValue)
+ {
+ return TRUE;
+ }
+
+ ULONG thisHashValue = m_mb & enum_packedMbLayout_NameHashMask;
+
+ // A zero value might mean no hash has ever been set
+ // (checking this way is better than dedicating a bit to tell us)
+ if (thisHashValue == 0)
+ {
+ return TRUE;
+ }
+
+ ULONG testHashValue = nameHashValue & enum_packedMbLayout_NameHashMask;
+
+ return (thisHashValue == testHashValue);
+}
+
+#ifndef DACCESS_COMPILE //we don't require DAC to special case simple types
+// Return the type of the field, as a class, but only if it's loaded.
+TypeHandle FieldDesc::LookupFieldTypeHandle(ClassLoadLevel level, BOOL dropGenericArgumentLevel)
+{
+
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END
+
+ // This function is called during GC promotion.
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+ // Caller should have handled all the non-class cases, already.
+ _ASSERTE(GetFieldType() == ELEMENT_TYPE_CLASS ||
+ GetFieldType() == ELEMENT_TYPE_VALUETYPE);
+
+ MetaSig sig(this);
+ CorElementType type;
+
+ type = sig.NextArg();
+
+ // This may be the real type which includes other things
+ // beside class and value class such as arrays
+ _ASSERTE(type == ELEMENT_TYPE_CLASS ||
+ type == ELEMENT_TYPE_VALUETYPE ||
+ type == ELEMENT_TYPE_STRING ||
+ type == ELEMENT_TYPE_SZARRAY ||
+ type == ELEMENT_TYPE_VAR
+ );
+
+ // == FailIfNotLoaded, can also assert that the thing is restored
+ TypeHandle th = NULL;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), return NULL);
+ {
+ th = sig.GetLastTypeHandleThrowing(ClassLoader::DontLoadTypes, level, dropGenericArgumentLevel);
+ }
+ END_SO_INTOLERANT_CODE;
+
+ return th;
+}
+#else //simplified version
+TypeHandle FieldDesc::LookupFieldTypeHandle(ClassLoadLevel level, BOOL dropGenericArgumentLevel)
+{
+ WRAPPER_NO_CONTRACT;
+ MetaSig sig(this);
+ CorElementType type;
+ type = sig.NextArg();
+ return sig.GetLastTypeHandleThrowing(ClassLoader::DontLoadTypes, level, dropGenericArgumentLevel);
+}
+#endif //DACCESS_COMPILE
+
+TypeHandle FieldDesc::GetFieldTypeHandleThrowing(ClassLoadLevel level/*=CLASS_LOADED*/,
+ BOOL dropGenericArgumentLevel /*=FALSE*/)
+{
+ WRAPPER_NO_CONTRACT;
+
+ MetaSig sig(this);
+ sig.NextArg();
+
+ return sig.GetLastTypeHandleThrowing(ClassLoader::LoadTypes, level, dropGenericArgumentLevel);
+}
+
+#ifndef DACCESS_COMPILE
+
+void* FieldDesc::GetStaticAddress(void *base)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY; // Needed by profiler and server GC
+ }
+ CONTRACTL_END;
+
+ void* ret = GetStaticAddressHandle(base); // Get the handle
+
+ // For value classes, the handle points at an OBJECTREF
+ // which holds the boxed value class, so derefernce and unbox.
+ if (GetFieldType() == ELEMENT_TYPE_VALUETYPE && !IsRVA())
+ {
+ OBJECTREF obj = ObjectToOBJECTREF(*(Object**) ret);
+ ret = obj->GetData();
+ }
+ return ret;
+}
+
+MethodTable * FieldDesc::GetExactDeclaringType(MethodTable * ownerOrSubType)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodTable * pMT = GetApproxEnclosingMethodTable();
+
+ // Fast path for typical case.
+ if (ownerOrSubType == pMT)
+ return pMT;
+
+ return ownerOrSubType->GetMethodTableMatchingParentClass(pMT);
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+ // static value classes are actually stored in their boxed form.
+ // this means that their address moves.
+PTR_VOID FieldDesc::GetStaticAddressHandle(PTR_VOID base)
+{
+
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ PRECONDITION(IsStatic());
+ PRECONDITION(GetEnclosingMethodTable()->IsRestored_NoLogging());
+ }
+ CONTRACTL_END
+
+ g_IBCLogger.LogFieldDescsAccess(this);
+
+ _ASSERTE(IsStatic());
+#ifdef EnC_SUPPORTED
+ if (IsEnCNew())
+ {
+ EnCFieldDesc * pFD = dac_cast<PTR_EnCFieldDesc>(this);
+ _ASSERTE_IMPL(pFD->GetApproxEnclosingMethodTable()->SanityCheck());
+ _ASSERTE(pFD->GetModule()->IsEditAndContinueEnabled());
+
+ EditAndContinueModule *pModule = (EditAndContinueModule*)pFD->GetModule();
+ _ASSERTE(pModule->IsEditAndContinueEnabled());
+
+ PTR_VOID retVal = NULL;
+
+ // BEGIN_SO_INTOLERANT_CODE will throw if we don't have enough stack
+ // and GetStaticAddressHandle has no failure semantics, so we need
+ // to just do the SO policy (e.g. rip the appdomain or process).
+ CONTRACT_VIOLATION(ThrowsViolation)
+
+#ifdef DACCESS_COMPILE
+ DacNotImpl();
+#else
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+ {
+ GCX_COOP();
+ // This routine doesn't have a failure semantic - but Resolve*Field(...) does.
+ // Something needs to be rethought here and I think it's E&C.
+ CONTRACT_VIOLATION(ThrowsViolation|FaultViolation|GCViolation); //B#25680 (Fix Enc violations)
+ retVal = (void *)(pModule->ResolveOrAllocateField(NULL, pFD));
+ }
+ END_SO_INTOLERANT_CODE;
+#endif // !DACCESS_COMPILE
+ return retVal;
+ }
+#endif // EnC_SUPPORTED
+
+
+ if (IsRVA())
+ {
+ Module* pModule = GetModule();
+ PTR_VOID ret = pModule->GetRvaField(GetOffset(), IsZapped());
+
+ _ASSERTE(!pModule->IsPEFile() || !pModule->IsRvaFieldTls(GetOffset()));
+
+ return(ret);
+ }
+
+ CONSISTENCY_CHECK(CheckPointer(base));
+
+ PTR_VOID ret = PTR_VOID(dac_cast<PTR_BYTE>(base) + GetOffset());
+
+
+ return ret;
+}
+
+
+#ifndef CROSSGEN_COMPILE
+
+// These routines encapsulate the operation of getting and setting
+// fields.
+void FieldDesc::GetInstanceField(OBJECTREF o, VOID * pOutVal)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ if (FORBIDGC_LOADER_USE_ENABLED() ) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED() ) GC_NOTRIGGER; else GC_TRIGGERS;
+ MODE_ANY;
+ if (FORBIDGC_LOADER_USE_ENABLED() ) FORBID_FAULT; else INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ // We know that it isn't going to be null here. Tell PREFIX that we know it.
+ PREFIX_ASSUME(o != NULL);
+
+ // Check whether we are getting a field value on a proxy. If so, then ask
+ // remoting services to extract the value from the instance.
+#ifdef FEATURE_REMOTING
+ if (o->IsTransparentProxy())
+ {
+#ifndef DACCESS_COMPILE
+ o = CRemotingServices::GetObjectFromProxy(o);
+
+ if (o->IsTransparentProxy())
+ {
+#ifdef PROFILING_SUPPORTED
+
+ GCPROTECT_BEGIN(o); // protect from RemotingClientInvocationStarted
+
+ // If profiling is active, notify it that remoting stuff is kicking in,
+ // if AlwaysUnwrap returned an identical object pointer which means that
+ // we are definitely going through remoting for this access.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackRemoting());
+ {
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->RemotingClientInvocationStarted();
+ }
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ CRemotingServices::FieldAccessor(this, o, pOutVal, TRUE);
+
+#ifdef PROFILING_SUPPORTED
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackRemoting());
+ {
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->RemotingClientInvocationFinished();
+ }
+ END_PIN_PROFILER();
+ }
+
+ GCPROTECT_END(); // protect from RemotingClientInvocationStarted
+
+#endif // PROFILING_SUPPORTED
+
+ return;
+ }
+#else
+ DacNotImpl();
+#endif // #ifndef DACCESS_COMPILE
+ }
+#endif // FEATURE_REMOTING
+
+ // Unbox the value class
+ TADDR pFieldAddress = (TADDR)GetInstanceAddress(o);
+ UINT cbSize = GetSize();
+
+ switch (cbSize)
+ {
+ case 1:
+ *(INT8*)pOutVal = VolatileLoad<INT8>(PTR_INT8(pFieldAddress));
+ break;
+
+ case 2:
+ *(INT16*)pOutVal = VolatileLoad<INT16>(PTR_INT16(pFieldAddress));
+ break;
+
+ case 4:
+ *(INT32*)pOutVal = VolatileLoad<INT32>(PTR_INT32(pFieldAddress));
+ break;
+
+ case 8:
+ *(INT64*)pOutVal = VolatileLoad<INT64>(PTR_INT64(pFieldAddress));
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+#ifndef DACCESS_COMPILE
+void FieldDesc::SetInstanceField(OBJECTREF o, const VOID * pInVal)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+
+ // Check whether we are setting a field value on a proxy or a marshalbyref
+ // class. If so, then ask remoting services to set the value on the
+ // instance
+
+#ifdef FEATURE_REMOTING
+ if(o->IsTransparentProxy())
+ {
+ o = CRemotingServices::GetObjectFromProxy(o);
+
+ if (o->IsTransparentProxy())
+ {
+#ifdef PROFILING_SUPPORTED
+
+ GCPROTECT_BEGIN(o);
+
+ // If profiling is active, notify it that remoting stuff is kicking in,
+ // if AlwaysUnwrap returned an identical object pointer which means that
+ // we are definitely going through remoting for this access.
+
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackRemoting());
+ {
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->RemotingClientInvocationStarted();
+ }
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ CRemotingServices::FieldAccessor(this, o, (void *)pInVal, FALSE);
+
+#ifdef PROFILING_SUPPORTED
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackRemoting());
+ {
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->RemotingClientInvocationFinished();
+ }
+ END_PIN_PROFILER();
+ }
+ GCPROTECT_END();
+
+#endif // PROFILING_SUPPORTED
+
+ return;
+ }
+ }
+#endif // FEATURE_REMOTING
+
+#ifdef _DEBUG
+ //
+ // assert that o is derived from MT of enclosing class
+ //
+ // walk up o's inheritence chain to make sure m_pMTOfEnclosingClass is along it
+ //
+ MethodTable* pCursor = o->GetMethodTable();
+
+ //<TODO>@todo : work out exactly why instantiations aren't matching; probably
+ // because of approx loads on field types in the loader</TODO>
+ while (pCursor && (GetApproxEnclosingMethodTable()->HasSameTypeDefAs(pCursor)))
+ {
+ pCursor = pCursor->GetParentMethodTable();
+ }
+ _ASSERTE(pCursor != NULL);
+#endif // _DEBUG
+
+ // Unbox the value class
+ LPVOID pFieldAddress = GetInstanceAddress(o);
+
+ CorElementType fieldType = GetFieldType();
+
+ if (fieldType == ELEMENT_TYPE_CLASS)
+ {
+ OBJECTREF ref = ObjectToOBJECTREF(*(Object**)pInVal);
+
+ SetObjectReference((OBJECTREF*)pFieldAddress, ref,
+ o->GetAppDomain());
+ }
+ else if (fieldType == ELEMENT_TYPE_VALUETYPE)
+ {
+ CONSISTENCY_CHECK(!LookupFieldTypeHandle().IsNull());
+ // The Approximate MT is enough to do the copy
+ CopyValueClass(pFieldAddress,
+ (void*)pInVal,
+ LookupFieldTypeHandle().GetMethodTable(),
+ o->GetAppDomain());
+ }
+ else
+ {
+ UINT cbSize = LoadSize();
+
+ switch (cbSize)
+ {
+ case 1:
+ VolatileStore<INT8>((INT8*)pFieldAddress, *(INT8*)pInVal);
+ break;
+
+ case 2:
+ VolatileStore<INT16>((INT16*)pFieldAddress, *(INT16*)pInVal);
+ break;
+
+ case 4:
+ VolatileStore<INT32>((INT32*)pFieldAddress, *(INT32*)pInVal);
+ break;
+
+ case 8:
+ VolatileStore<INT64>((INT64*)pFieldAddress, *(INT64*)pInVal);
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+#endif // #ifndef DACCESS_COMPILE
+
+
+// This function is used for BYREF support of fields. Since it generates
+// interior pointers, you really have to watch the lifetime of the pointer
+// so that GCs don't happen while you have the reference active
+PTR_VOID FieldDesc::GetAddressNoThrowNoGC(PTR_VOID o)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ PRECONDITION(!IsEnCNew());
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ DWORD dwOffset = GetOffset();
+ if (!IsFieldOfValueType())
+ {
+ dwOffset += sizeof(Object);
+ }
+ return dac_cast<PTR_BYTE>(o) + dwOffset;
+}
+
+PTR_VOID FieldDesc::GetAddress(PTR_VOID o)
+{
+ CONTRACTL
+ {
+ if(IsEnCNew()) {THROWS;} else {DISABLED(THROWS);};
+ if(IsEnCNew()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);};
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+#ifdef DACCESS_COMPILE
+ _ASSERTE(!IsEnCNew()); // when we call this while finding an EnC field via the DAC,
+ // the field desc is for the EnCHelper, not the new EnC field
+#endif
+ g_IBCLogger.LogFieldDescsAccess(this);
+
+#if defined(EnC_SUPPORTED) && !defined(DACCESS_COMPILE)
+ // EnC added fields aren't at a simple offset like normal fields.
+ if (IsEnCNew())
+ {
+ // We'll have to go through some effort to compute the address of this field.
+ return ((EnCFieldDesc *)this)->GetAddress(o);
+ }
+#endif // defined(EnC_SUPPORTED) && !defined(DACCESS_COMPILE)
+ return GetAddressNoThrowNoGC(o);
+}
+
+void *FieldDesc::GetInstanceAddress(OBJECTREF o)
+{
+ CONTRACTL
+ {
+ if(IsEnCNew()) {THROWS;} else {DISABLED(THROWS);};
+ if(IsEnCNew()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);};
+ }
+ CONTRACTL_END;
+
+ g_IBCLogger.LogFieldDescsAccess(this);
+
+ DWORD dwOffset = m_dwOffset; // GetOffset()
+
+#ifdef EnC_SUPPORTED
+ // EnC added fields aren't at a simple offset like normal fields.
+ if (dwOffset == FIELD_OFFSET_NEW_ENC) // IsEnCNew()
+ {
+ // We'll have to go through some effort to compute the address of this field.
+ return ((EnCFieldDesc *)this)->GetAddress(OBJECTREFToObject(o));
+ }
+#endif
+
+ return (void *) (dac_cast<TADDR>(o->GetData()) + dwOffset);
+}
+
+// And here's the equivalent, when you are guaranteed that the enclosing instance of
+// the field is in the GC Heap. So if the enclosing instance is a value type, it had
+// better be boxed. We ASSERT this.
+void *FieldDesc::GetAddressGuaranteedInHeap(void *o)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!IsEnCNew());
+
+ return ((BYTE*)(o)) + sizeof(Object) + m_dwOffset;
+}
+
+
+DWORD FieldDesc::GetValue32(OBJECTREF o)
+{
+ WRAPPER_NO_CONTRACT;
+
+ DWORD val;
+ GetInstanceField(o, (LPVOID)&val);
+ return val;
+}
+
+#ifndef DACCESS_COMPILE
+VOID FieldDesc::SetValue32(OBJECTREF o, DWORD dwValue)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ SetInstanceField(o, (LPVOID)&dwValue);
+}
+#endif // #ifndef DACCESS_COMPILE
+
+void* FieldDesc::GetValuePtr(OBJECTREF o)
+{
+ WRAPPER_NO_CONTRACT;
+
+ void* val;
+ GetInstanceField(o, (LPVOID)&val);
+ return val;
+}
+
+#ifndef DACCESS_COMPILE
+VOID FieldDesc::SetValuePtr(OBJECTREF o, void* pValue)
+{
+ WRAPPER_NO_CONTRACT;
+
+ SetInstanceField(o, (LPVOID)&pValue);
+}
+#endif // #ifndef DACCESS_COMPILE
+
+OBJECTREF FieldDesc::GetRefValue(OBJECTREF o)
+{
+ WRAPPER_NO_CONTRACT;
+
+ OBJECTREF val = NULL;
+
+#ifdef PROFILING_SUPPORTED
+ GCPROTECT_BEGIN(val);
+#endif
+
+ GetInstanceField(o, (LPVOID)&val);
+
+#ifdef PROFILING_SUPPORTED
+ GCPROTECT_END();
+#endif
+
+ return val;
+}
+
+#ifndef DACCESS_COMPILE
+VOID FieldDesc::SetRefValue(OBJECTREF o, OBJECTREF orValue)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ VALIDATEOBJECTREF(o);
+ VALIDATEOBJECTREF(orValue);
+
+ SetInstanceField(o, (LPVOID)&orValue);
+}
+#endif // #ifndef DACCESS_COMPILE
+
+USHORT FieldDesc::GetValue16(OBJECTREF o)
+{
+ WRAPPER_NO_CONTRACT;
+
+ USHORT val;
+ GetInstanceField(o, (LPVOID)&val);
+ return val;
+}
+
+#ifndef DACCESS_COMPILE
+VOID FieldDesc::SetValue16(OBJECTREF o, DWORD dwValue)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ USHORT val = (USHORT)dwValue;
+ SetInstanceField(o, (LPVOID)&val);
+}
+#endif // #ifndef DACCESS_COMPILE
+
+BYTE FieldDesc::GetValue8(OBJECTREF o)
+{
+ WRAPPER_NO_CONTRACT;
+
+ BYTE val;
+ GetInstanceField(o, (LPVOID)&val);
+ return val;
+
+}
+
+#ifndef DACCESS_COMPILE
+VOID FieldDesc::SetValue8(OBJECTREF o, DWORD dwValue)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ BYTE val = (BYTE)dwValue;
+ SetInstanceField(o, (LPVOID)&val);
+}
+#endif // #ifndef DACCESS_COMPILE
+
+__int64 FieldDesc::GetValue64(OBJECTREF o)
+{
+ WRAPPER_NO_CONTRACT;
+ __int64 val;
+ GetInstanceField(o, (LPVOID)&val);
+ return val;
+
+}
+
+#ifndef DACCESS_COMPILE
+VOID FieldDesc::SetValue64(OBJECTREF o, __int64 value)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ SetInstanceField(o, (LPVOID)&value);
+}
+#endif // #ifndef DACCESS_COMPILE
+
+#endif // !CROSSGEN_COMPILE
+
+
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+void FieldDesc::SaveContents(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef _DEBUG
+ if (m_debugName && !image->IsStored((void*) m_debugName))
+ image->StoreStructure((void *) m_debugName,
+ (ULONG)(strlen(m_debugName) + 1),
+ DataImage::ITEM_DEBUG,
+ 1);
+#endif
+
+ //
+ // If we are compiling an IL only image, and our RVA fits
+ // in the designated range, copy the RVA data over to the prejit
+ // image.
+ //
+
+ if (IsILOnlyRVAField())
+ {
+ //
+ // Move the RVA data into the prejit image.
+ //
+
+ UINT size = LoadSize();
+
+ //
+ // Compute an alignment for the data based on the alignment
+ // of the RVA. We'll align up to 8 bytes.
+ //
+
+ UINT align = 1;
+ DWORD rva = GetOffset();
+ DWORD rvaTemp = rva;
+
+ while ((rvaTemp&1) == 0 && align < 8 && align < size)
+ {
+ align <<= 1;
+ rvaTemp >>= 1;
+ }
+
+ image->StoreRvaInfo(this,
+ rva,
+ size,
+ align);
+ }
+}
+
+void FieldDesc::Fixup(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ LOG((LF_ZAP, LL_INFO10000, "FieldDesc::Fixup %s::%s\n", GetApproxEnclosingMethodTable()->GetDebugClassName(), m_debugName));
+ image->FixupRelativePointerField(this, offsetof(FieldDesc, m_pMTOfEnclosingClass));
+
+#ifdef _DEBUG
+ image->FixupPointerField(this, offsetof(FieldDesc, m_debugName));
+#endif
+
+ // if (IsRVAFieldWithLessThanBigOffset())
+ // {
+ // offset of RVA fields is fixed up in DataImage::FixupRvaStructure
+ // }
+}
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+#endif // #ifndef DACCESS_COMPILE
+
+UINT FieldDesc::LoadSize()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ CorElementType type = GetFieldType();
+ UINT size = GetSizeForCorElementType(type);
+ if (size == (UINT) -1)
+ {
+ // LOG((LF_CLASSLOADER, LL_INFO10000, "FieldDesc::LoadSize %s::%s\n", GetApproxEnclosingMethodTable()->GetDebugClassName(), m_debugName));
+ CONSISTENCY_CHECK(GetFieldType() == ELEMENT_TYPE_VALUETYPE);
+ size = GetApproxFieldTypeHandleThrowing().GetMethodTable()->GetNumInstanceFieldBytes();
+ }
+
+ return size;
+}
+
+UINT FieldDesc::GetSize()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ CorElementType type = GetFieldType();
+ UINT size = GetSizeForCorElementType(type);
+ if (size == (UINT) -1)
+ {
+ LOG((LF_CLASSLOADER, LL_INFO10000, "FieldDesc::GetSize %s::%s\n", GetApproxEnclosingMethodTable()->GetDebugClassName(), m_debugName));
+ CONSISTENCY_CHECK(GetFieldType() == ELEMENT_TYPE_VALUETYPE);
+ TypeHandle t = LookupApproxFieldTypeHandle();
+ if (!t.IsNull())
+ {
+ size = t.GetMethodTable()->GetNumInstanceFieldBytes();
+ }
+ }
+
+ return size;
+}
+
+// See field.h for details
+Instantiation FieldDesc::GetExactClassInstantiation(TypeHandle possibleObjType)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // We know that it isn't going to be null here. Tell PREFIX that we know it.
+ PREFIX_ASSUME(GetApproxEnclosingMethodTable()!=NULL);
+ if (possibleObjType.IsNull())
+ {
+ return GetApproxEnclosingMethodTable()->GetInstantiation();
+ }
+ else
+ {
+ PREFIX_ASSUME(GetApproxEnclosingMethodTable()!=NULL);
+ return possibleObjType.GetInstantiationOfParentClass(GetApproxEnclosingMethodTable());
+ }
+}
+
+// Given, { List<String>, List<__Canon>._items } where _items is of type T[],
+// it returns String[].
+
+TypeHandle FieldDesc::GetExactFieldType(TypeHandle owner)
+{
+ CONTRACT(TypeHandle)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(owner, NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END
+
+ if (GetApproxEnclosingMethodTable() == owner.AsMethodTable())
+ {
+ //Yes, this is exactly the type I was looking for.
+ RETURN(GetFieldTypeHandleThrowing());
+ }
+ else
+ {
+ //This FieldDesc doesn't exactly represent the owner type. Go look up the exact type.
+
+ // We need to figure out the precise type of the field.
+ // First, get the signature of the field
+ PCCOR_SIGNATURE pSig;
+ DWORD cSig;
+ GetSig(&pSig, &cSig);
+ SigPointer sig(pSig, cSig);
+
+ // Get the generics information
+ SigTypeContext sigTypeContext(GetExactClassInstantiation(owner), Instantiation());
+
+ TypeHandle thApproxFieldType = GetApproxFieldTypeHandleThrowing();
+ // Load the exact type
+ RETURN (sig.GetTypeHandleThrowing(thApproxFieldType.GetModule(), &sigTypeContext));
+ }
+}
+
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+REFLECTFIELDREF FieldDesc::GetStubFieldInfo()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ REFLECTFIELDREF retVal;
+ REFLECTFIELDREF fieldRef = (REFLECTFIELDREF)AllocateObject(MscorlibBinder::GetClass(CLASS__STUBFIELDINFO));
+ GCPROTECT_BEGIN(fieldRef);
+
+ fieldRef->SetField(this);
+ LoaderAllocator *pLoaderAllocatorOfMethod = this->GetApproxEnclosingMethodTable()->GetLoaderAllocator();
+ if (pLoaderAllocatorOfMethod->IsCollectible())
+ fieldRef->SetKeepAlive(pLoaderAllocatorOfMethod->GetExposedObject());
+
+ retVal = fieldRef;
+ GCPROTECT_END();
+
+ return retVal;
+}
+#endif // !DACCESS_COMPILE && !CROSSGEN_COMPILE
diff --git a/src/vm/field.h b/src/vm/field.h
new file mode 100644
index 0000000000..a278c4d12c
--- /dev/null
+++ b/src/vm/field.h
@@ -0,0 +1,1004 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//
+// COM+ Data Field Abstraction
+//
+
+
+#ifndef _FIELD_H_
+#define _FIELD_H_
+
+#ifndef BINDER
+#include "objecthandle.h"
+#include "excep.h"
+#else // BINDER
+#include "methodtable.h"
+#endif // BINDER
+
+// Temporary values stored in FieldDesc m_dwOffset during loading
+// The high 5 bits must be zero (because in field.h we steal them for other uses), so we must choose values > 0
+#define FIELD_OFFSET_MAX ((1<<27)-1)
+#define FIELD_OFFSET_UNPLACED FIELD_OFFSET_MAX
+#define FIELD_OFFSET_UNPLACED_GC_PTR (FIELD_OFFSET_MAX-1)
+#define FIELD_OFFSET_VALUE_CLASS (FIELD_OFFSET_MAX-2)
+#define FIELD_OFFSET_NOT_REAL_FIELD (FIELD_OFFSET_MAX-3)
+
+// Offset to indicate an EnC added field. They don't have offsets as aren't placed in the object.
+#define FIELD_OFFSET_NEW_ENC (FIELD_OFFSET_MAX-4)
+#define FIELD_OFFSET_BIG_RVA (FIELD_OFFSET_MAX-5)
+#define FIELD_OFFSET_LAST_REAL_OFFSET (FIELD_OFFSET_MAX-6) // real fields have to be smaller than this
+
+
+//
+// This describes a field - one of this is allocated for every field, so don't make this structure any larger.
+//
+// @GENERICS:
+// Field descriptors for fields in instantiated types may be shared between compatible instantiations
+// Hence for reflection it's necessary to pair a field desc with the exact owning type handle
+class FieldDesc
+{
+ friend class MethodTableBuilder;
+#ifdef BINDER
+ friend class MdilModule;
+#endif // BINDER
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+
+ protected:
+ RelativePointer<PTR_MethodTable> m_pMTOfEnclosingClass; // This is used to hold the log2 of the field size temporarily during class loading. Yuck.
+
+#if defined(DACCESS_COMPILE)
+ union { //create a union so I can get the correct offset for ClrDump.
+ unsigned m_dword1;
+ struct {
+#endif
+ // Note that we may store other information in the high bits if available --
+ // see enum_packedMBLayout and m_requiresFullMbValue for details.
+ unsigned m_mb : 24;
+
+ // 8 bits...
+ unsigned m_isStatic : 1;
+ unsigned m_isThreadLocal : 1;
+#ifdef FEATURE_REMOTING
+ unsigned m_isContextLocal : 1;
+#endif
+ unsigned m_isRVA : 1;
+ unsigned m_prot : 3;
+ // Does this field's mb require all 24 bits
+ unsigned m_requiresFullMbValue : 1;
+#if defined(DACCESS_COMPILE)
+ };
+ };
+#endif
+
+#if defined(DACCESS_COMPILE)
+ union { //create a union so I can get the correct offset for ClrDump
+ unsigned m_dword2;
+ struct {
+#endif
+ // Note: this has been as low as 22 bits in the past & seemed to be OK.
+ // we can steal some more bits here if we need them.
+ unsigned m_dwOffset : 27;
+ unsigned m_type : 5;
+#if defined(DACCESS_COMPILE)
+ };
+ };
+#endif
+
+#ifdef _DEBUG
+ struct {
+ unsigned m_isDangerousAppDomainAgileField : 1;
+ };
+ LPUTF8 m_debugName;
+#endif
+
+ // Allocated by special heap means, don't construct me
+ FieldDesc() {};
+
+public:
+#ifdef BINDER
+ // We will need these to implement pseudoinstructions COPY_STRUCT,
+ // PUSH_STRUCT (versionable struct support).
+ // They are implemented via a side hash table in MdilModule
+ DWORD GetFieldValueTypeToken();
+ void SetFieldValueTypeToken(DWORD valueTypeToken);
+ MethodTable *GetFieldFullType();
+ void SetFieldFullType(MethodTable *mt);
+#endif
+
+#ifdef _DEBUG
+ inline LPUTF8 GetDebugName()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_debugName;
+ }
+#endif // _DEBUG
+
+#ifndef DACCESS_COMPILE
+ // This should be called. It was added so that Reflection
+ // can create FieldDesc's for the static primitive fields that aren't
+ // stored with the EEClass.
+ void SetMethodTable(MethodTable* mt)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pMTOfEnclosingClass.SetValue(mt);
+ }
+#endif
+
+ VOID Init(mdFieldDef mb,
+ CorElementType FieldType,
+ DWORD dwMemberAttrs,
+ BOOL fIsStatic,
+ BOOL fIsRVA,
+ BOOL fIsThreadLocal,
+ BOOL fIsContextLocal,
+ LPCSTR pszFieldName);
+
+ enum {
+ enum_packedMbLayout_MbMask = 0x01FFFF,
+ enum_packedMbLayout_NameHashMask = 0xFE0000
+ };
+
+ void SetMemberDef(mdFieldDef mb)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // Check if we have to avoid using the packed mb layout
+ if (RidFromToken(mb) > enum_packedMbLayout_MbMask)
+ {
+ m_requiresFullMbValue = 1;
+ }
+
+ // Set only the portion of m_mb we are using
+ if (!m_requiresFullMbValue)
+ {
+ m_mb &= ~enum_packedMbLayout_MbMask;
+ m_mb |= RidFromToken(mb);
+ }
+ else
+ {
+ m_mb = RidFromToken(mb);
+ }
+ }
+
+ mdFieldDef GetMemberDef() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // Check if this FieldDesc is using the packed mb layout
+ if (!m_requiresFullMbValue)
+ {
+ return TokenFromRid(m_mb & enum_packedMbLayout_MbMask, mdtFieldDef);
+ }
+
+ return TokenFromRid(m_mb, mdtFieldDef);
+ }
+
+ CorElementType GetFieldType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // Set in code:FieldDesc.Init which in turn is called from
+ // code:MethodTableBuilder.InitializeFieldDescs#InitCall which in turn calls
+ // code:MethodTableBuilder.InitializeFieldDescs#FieldDescTypeMorph
+ return (CorElementType) m_type;
+ }
+#ifdef BINDER
+ void SetFieldType(CorElementType type)
+ {
+
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ // Set in code:FieldDesc.Init which in turn is called from
+ // code:MethodTableBuilder.InitializeFieldDescs#InitCall which in turn calls
+ // code:MethodTableBuilder.InitializeFieldDescs#FieldDescTypeMorph
+ m_type = type;
+ }
+#endif
+
+ DWORD GetFieldProtection()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // Set in code:FieldDesc.Init which in turn is called from code:MethodTableBuilder::InitializeFieldDescs#InitCall
+ return m_prot;
+ }
+
+ // Please only use this in a path that you have already guarenteed
+ // the assert is true
+ DWORD GetOffsetUnsafe()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ g_IBCLogger.LogFieldDescsAccess(this);
+ _ASSERTE(m_dwOffset <= FIELD_OFFSET_LAST_REAL_OFFSET);
+ return m_dwOffset;
+ }
+
+ DWORD GetOffset()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ g_IBCLogger.LogFieldDescsAccess(this);
+ return GetOffset_NoLogging();
+ }
+
+ // During class load m_pMTOfEnclosingClass has the field size in it, so it has to use this version of
+ // GetOffset during that time
+ DWORD GetOffset_NoLogging()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // Note FieldDescs are no longer on "hot" paths so the optimized code here
+ // does not look necessary.
+
+ if (m_dwOffset != FIELD_OFFSET_BIG_RVA) {
+ // Assert that the big RVA case handling doesn't get out of sync
+ // with the normal RVA case.
+#ifdef _DEBUG
+ // The OutOfLine_BigRVAOffset() can't be correctly evaluated during the time
+ // that we repurposed m_pMTOfEnclosingClass for holding the field size
+ // I don't see any good way to determine when this is so hurray for
+ // heuristics!
+ //
+ // As of 4/11/2012 I could repro this by turning on the COMPLUS log and
+ // the LOG() at line methodtablebuilder.cpp:7845
+ // MethodTableBuilder::PlaceRegularStaticFields() calls GetOffset_NoLogging()
+ if((DWORD)(DWORD_PTR&)m_pMTOfEnclosingClass > 16)
+ {
+ _ASSERTE(!this->IsRVA() || (m_dwOffset == OutOfLine_BigRVAOffset()));
+ }
+#endif
+ return m_dwOffset;
+ }
+
+ return OutOfLine_BigRVAOffset();
+ }
+
+ DWORD OutOfLine_BigRVAOffset()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ DWORD rva;
+
+#ifndef BINDER
+ // <NICE>I'm discarding a potential error here. According to the code in MDInternalRO.cpp,
+ // we won't get an error if we initially found the RVA. So I'm going to just
+ // assert it never happens.
+ //
+ // This is a small sin, but I don't see a good alternative. --cwb.</NICE>
+ HRESULT hr;
+ hr = GetMDImport()->GetFieldRVA(GetMemberDef(), &rva);
+ _ASSERTE(SUCCEEDED(hr));
+#else // BINDER
+ BOOL fSucceeded = GetRVAOffsetForFieldDesc(this, &rva);
+ assert(fSucceeded);
+#endif // BINDER
+ return rva;
+ }
+
+ HRESULT SetOffset(DWORD dwOffset)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ //
+ // value class fields must be aligned to pointer-sized boundaries
+ //
+ //
+ // This is commented out because it isn't valid in all cases.
+ // This is still here because it is useful for finding alignment
+ // problems on IA64.
+ //
+ //_ASSERTE((dwOffset > FIELD_OFFSET_LAST_REAL_OFFSET) ||
+ // (ELEMENT_TYPE_VALUETYPE != GetFieldType()) ||
+ // (IS_ALIGNED(dwOffset, sizeof(void*))));
+
+ m_dwOffset = dwOffset;
+ return((dwOffset > FIELD_OFFSET_LAST_REAL_OFFSET) ? COR_E_TYPELOAD : S_OK);
+ }
+
+ // Okay, we've stolen too many bits from FieldDescs. In the RVA case, there's no
+ // reason to believe they will be limited to 22 bits. So use a sentinel for the
+ // huge cases, and recover them from metadata on-demand.
+ void SetOffsetRVA(DWORD dwOffset)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_dwOffset = (dwOffset > FIELD_OFFSET_LAST_REAL_OFFSET)
+ ? FIELD_OFFSET_BIG_RVA
+ : dwOffset;
+#ifdef BINDER
+ StoreRVAOffsetForFieldDesc(this, dwOffset);
+#endif
+ }
+
+#ifndef BINDER
+ BOOL IsILOnlyRVAField()
+ {
+ WRAPPER_NO_CONTRACT;
+ return (IsRVA() && GetModule()->GetFile()->IsILOnly());
+ }
+#endif // !BINDER
+
+ DWORD IsStatic() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return m_isStatic;
+ }
+
+ BOOL IsSpecialStatic()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_isStatic && (m_isRVA || m_isThreadLocal
+#ifdef FEATURE_REMOTING
+ || m_isContextLocal
+#endif
+ );
+ }
+
+#if defined(CHECK_APP_DOMAIN_LEAKS) || defined(_DEBUG)
+ BOOL IsDangerousAppDomainAgileField()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_isDangerousAppDomainAgileField;
+ }
+
+ void SetDangerousAppDomainAgileField()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_isDangerousAppDomainAgileField = TRUE;
+ }
+#endif
+
+ BOOL IsRVA() const // Has an explicit RVA associated with it
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return m_isRVA;
+ }
+
+ BOOL IsThreadStatic() const // Static relative to a thread
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return m_isThreadLocal;
+ }
+
+ BOOL IsContextStatic() const // Static relative to a context
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+#ifdef FEATURE_REMOTING
+ return m_isContextLocal;
+#else
+ return FALSE;
+#endif
+ }
+
+ // Indicate that this field was added by EnC
+ // Must only be called on instances of EnCFieldDesc
+ void SetEnCNew()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // EnC added fields don't live in the actual object, so don't have a real offset
+ SetOffset(FIELD_OFFSET_NEW_ENC);
+ }
+
+ // Was this field added by EnC?
+ // If this is true, then this object is an instance of EnCFieldDesc
+ BOOL IsEnCNew()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // EnC added fields don't have a real offset
+ return m_dwOffset == FIELD_OFFSET_NEW_ENC;
+ }
+
+ BOOL IsByValue()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return GetFieldType() == ELEMENT_TYPE_VALUETYPE;
+ }
+
+ BOOL IsPrimitive()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (CorIsPrimitiveType(GetFieldType()) != FALSE);
+ }
+
+ BOOL IsObjRef();
+
+#ifdef FEATURE_PREJIT
+ void SaveContents(DataImage *image);
+ void Fixup(DataImage *image);
+#endif // FEATURE_PREJIT
+
+ UINT LoadSize();
+
+ // Return -1 if the type isn't loaded yet (i.e. if LookupFieldTypeHandle() would return null)
+ UINT GetSize();
+
+ // These routines encapsulate the operation of getting and setting
+ // fields.
+ void GetInstanceField(OBJECTREF o, VOID * pOutVal);
+ void SetInstanceField(OBJECTREF o, const VOID * pInVal);
+
+ void* GetInstanceAddress(OBJECTREF o);
+
+ // Get the address of a field within object 'o'
+ PTR_VOID GetAddress(PTR_VOID o);
+
+ PTR_VOID GetAddressNoThrowNoGC(PTR_VOID o);
+ void* GetAddressGuaranteedInHeap(void *o);
+
+ void* GetValuePtr(OBJECTREF o);
+ VOID SetValuePtr(OBJECTREF o, void* pValue);
+ DWORD GetValue32(OBJECTREF o);
+ VOID SetValue32(OBJECTREF o, DWORD dwValue);
+ OBJECTREF GetRefValue(OBJECTREF o);
+ VOID SetRefValue(OBJECTREF o, OBJECTREF orValue);
+ USHORT GetValue16(OBJECTREF o);
+ VOID SetValue16(OBJECTREF o, DWORD dwValue);
+ BYTE GetValue8(OBJECTREF o);
+ VOID SetValue8(OBJECTREF o, DWORD dwValue);
+ __int64 GetValue64(OBJECTREF o);
+ VOID SetValue64(OBJECTREF o, __int64 value);
+
+ PTR_MethodTable GetApproxEnclosingMethodTable_NoLogging()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pMTOfEnclosingClass.GetValue(PTR_HOST_MEMBER_TADDR(FieldDesc, this, m_pMTOfEnclosingClass));
+ }
+
+ PTR_MethodTable GetApproxEnclosingMethodTable()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ g_IBCLogger.LogFieldDescsAccess(this);
+ return GetApproxEnclosingMethodTable_NoLogging();
+ }
+
+ PTR_MethodTable GetEnclosingMethodTable()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(!IsSharedByGenericInstantiations());
+ return GetApproxEnclosingMethodTable();
+ }
+
+ // FieldDesc can be shared between generic instantiations. So List<String>._items
+ // is really the same as List<__Canon>._items. Hence, the FieldDesc itself
+ // cannot know the exact enclosing type. You need to provide the exact owner
+ // like List<String> or a subtype like MyInheritedList<String>.
+ MethodTable * GetExactDeclaringType(MethodTable * ownerOrSubType);
+
+ BOOL IsSharedByGenericInstantiations()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+#ifndef BINDER
+ return (!IsStatic()) && GetApproxEnclosingMethodTable()->IsSharedByGenericInstantiations();
+#else // BINDER
+ return FALSE;
+#endif // BINDER
+ }
+
+ BOOL IsFieldOfValueType()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetApproxEnclosingMethodTable()->IsValueType();
+ }
+
+ DWORD GetNumGenericClassArgs()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetApproxEnclosingMethodTable()->GetNumGenericArgs();
+ }
+
+
+#ifndef BINDER
+
+ PTR_BYTE GetBaseInDomainLocalModule(DomainLocalModule * pLocalModule)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (GetFieldType() == ELEMENT_TYPE_CLASS || GetFieldType() == ELEMENT_TYPE_VALUETYPE)
+ {
+ return pLocalModule->GetGCStaticsBasePointer(GetEnclosingMethodTable());
+ }
+ else
+ {
+ return pLocalModule->GetNonGCStaticsBasePointer(GetEnclosingMethodTable());
+ }
+ }
+
+#ifndef DACCESS_COMPILE
+ PTR_BYTE GetBase()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ MethodTable *pMT = GetEnclosingMethodTable();
+
+ return GetBaseInDomainLocalModule(pMT->GetDomainLocalModule());
+ }
+
+#endif //!DACCESS_COMPILE
+
+ PTR_BYTE GetBaseInDomain(AppDomain * appDomain)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ Module *pModule = GetEnclosingMethodTable()->GetModuleForStatics();
+ if (pModule == NULL)
+ return NULL;
+
+ DomainLocalModule *pLocalModule = pModule->GetDomainLocalModule(appDomain);
+ if (pLocalModule == NULL)
+ return NULL;
+
+ return GetBaseInDomainLocalModule(pLocalModule);
+ }
+
+ // returns the address of the field
+ void* GetStaticAddress(void *base);
+
+ // In all cases except Value classes, the AddressHandle is
+ // simply the address of the static. For the case of value
+ // types, however, it is the address of OBJECTREF that holds
+ // the boxed value used to hold the value type. This is needed
+ // because the OBJECTREF moves, and the JIT needs to embed something
+ // in the code that does not move. Thus the jit has to
+ // dereference and unbox before the access.
+ PTR_VOID GetStaticAddressHandle(PTR_VOID base);
+
+#ifndef DACCESS_COMPILE
+ OBJECTREF GetStaticOBJECTREF()
+ {
+ WRAPPER_NO_CONTRACT;
+ return *(OBJECTREF *)GetCurrentStaticAddress();
+ }
+
+ VOID SetStaticOBJECTREF(OBJECTREF objRef)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ GCPROTECT_BEGIN(objRef);
+ OBJECTREF *pObjRef = (OBJECTREF *)GetCurrentStaticAddress();
+ SetObjectReference(pObjRef, objRef, GetAppDomain());
+ GCPROTECT_END();
+ }
+
+ void* GetStaticValuePtr()
+ {
+ WRAPPER_NO_CONTRACT;
+ return *(void**)GetCurrentStaticAddress();
+ }
+
+ VOID SetStaticValuePtr(void *value)
+ {
+ WRAPPER_NO_CONTRACT;
+ *(void**)GetCurrentStaticAddress() = value;
+ }
+
+ DWORD GetStaticValue32()
+ {
+ WRAPPER_NO_CONTRACT;
+ return *(DWORD*)GetCurrentStaticAddress();
+ }
+
+ VOID SetStaticValue32(DWORD dwValue)
+ {
+ WRAPPER_NO_CONTRACT;
+ *(DWORD*)GetCurrentStaticAddress() = dwValue;
+ }
+
+ USHORT GetStaticValue16()
+ {
+ WRAPPER_NO_CONTRACT;
+ return *(USHORT*)GetCurrentStaticAddress();
+ }
+
+ VOID SetStaticValue16(DWORD dwValue)
+ {
+ WRAPPER_NO_CONTRACT;
+ *(USHORT*)GetCurrentStaticAddress() = (USHORT)dwValue;
+ }
+
+ BYTE GetStaticValue8()
+ {
+ WRAPPER_NO_CONTRACT;
+ return *(BYTE*)GetCurrentStaticAddress();
+ }
+
+ VOID SetStaticValue8(DWORD dwValue)
+ {
+ WRAPPER_NO_CONTRACT;
+ *(BYTE*)GetCurrentStaticAddress() = (BYTE)dwValue;
+ }
+
+ __int64 GetStaticValue64()
+ {
+ WRAPPER_NO_CONTRACT;
+ return *(__int64*)GetCurrentStaticAddress();
+ }
+
+ VOID SetStaticValue64(__int64 qwValue)
+ {
+ WRAPPER_NO_CONTRACT;
+ *(__int64*)GetCurrentStaticAddress() = qwValue;
+ }
+
+ void* GetCurrentStaticAddress()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ _ASSERTE(IsStatic());
+
+#ifdef FEATURE_REMOTING
+ if (IsContextStatic())
+ return Context::GetStaticFieldAddress(this);
+ else
+#endif
+ if (IsThreadStatic())
+ {
+ return Thread::GetStaticFieldAddress(this);
+ }
+ else {
+ PTR_BYTE base = 0;
+ if (!IsRVA()) // for RVA the base is ignored
+ base = GetBase();
+ return GetStaticAddress((void *)dac_cast<TADDR>(base));
+ }
+ }
+
+ VOID CheckRunClassInitThrowing()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ GetEnclosingMethodTable()->CheckRunClassInitThrowing();
+ }
+#endif //DACCESS_COMPILE
+#endif // !BINDER
+
+#ifdef BINDER
+ MdilModule *GetModule()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return GetApproxEnclosingMethodTable()->GetModule();
+ }
+
+ MdilModule *GetLoaderModule()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // Field Desc's are currently always saved into the same module as their
+ // corresponding method table.
+ return GetApproxEnclosingMethodTable()->GetLoaderModule();
+ }
+#else //!BINDER
+ Module *GetModule()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return GetApproxEnclosingMethodTable()->GetModule();
+ }
+
+ BOOL IsZapped()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // Field Desc's are currently always saved into the same module as their
+ // corresponding method table.
+ return GetApproxEnclosingMethodTable()->IsZapped();
+ }
+
+ Module *GetLoaderModule()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // Field Desc's are currently always saved into the same module as their
+ // corresponding method table.
+ return GetApproxEnclosingMethodTable()->GetLoaderModule();
+ }
+
+ void GetSig(PCCOR_SIGNATURE *ppSig, DWORD *pcSig)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END
+
+ if (FAILED(GetMDImport()->GetSigOfFieldDef(GetMemberDef(), pcSig, ppSig)))
+ { // Class loader already asked for signature, so this should always succeed (unless there's a
+ // bug or a new code path)
+ _ASSERTE(!"If this ever fires, then this method should return HRESULT");
+ *ppSig = NULL;
+ *pcSig = 0;
+ }
+ }
+
+ SigPointer GetSigPointer()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ PCCOR_SIGNATURE pSig;
+ DWORD cSig;
+
+ GetSig(&pSig, &cSig);
+
+ return SigPointer(pSig, cSig);
+ }
+
+ // This is slow (uses MetaData), don't use it!
+ LPCUTF8 GetName()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ LPCSTR szName;
+ IfFailThrow(GetMDImport()->GetNameOfFieldDef(GetMemberDef(), &szName));
+ _ASSERTE(szName != NULL);
+ return szName;
+ }
+ // This is slow (uses MetaData), don't use it!
+ __checkReturn
+ HRESULT GetName_NoThrow(LPCUTF8 *pszName)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END
+
+ return GetMDImport()->GetNameOfFieldDef(GetMemberDef(), pszName);
+ }
+
+ void PrecomputeNameHash();
+ BOOL MightHaveName(ULONG nameHashValue);
+
+ // <TODO>@TODO: </TODO>This is slow, don't use it!
+ DWORD GetAttributes()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END
+
+ DWORD dwAttributes;
+ if (FAILED(GetMDImport()->GetFieldDefProps(GetMemberDef(), &dwAttributes)))
+ { // Class loader already asked for attributes, so this should always succeed (unless there's a
+ // bug or a new code path)
+ _ASSERTE(!"If this ever fires, then this method should return HRESULT");
+ return 0;
+ }
+ return dwAttributes;
+ }
+
+ // Mini-Helpers
+ DWORD IsPublic()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return IsFdPublic(GetFieldProtection());
+ }
+
+ DWORD IsProtected()
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsFdFamily(GetFieldProtection());
+ }
+
+ DWORD IsPrivate()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return IsFdPrivate(GetFieldProtection());
+ }
+
+ BOOL IsNotSerialized()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodTable *pMT = GetApproxEnclosingMethodTable();
+ if (pMT->IsSerializable() && !IsStatic())
+ return pMT->IsFieldNotSerialized(pMT->GetIndexForFieldDesc(this));
+ return IsFdNotSerialized(GetAttributes());
+ }
+
+ // Only safe to call this for non-static fields on serializable types.
+ BOOL IsOptionallySerialized()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(!IsStatic() && GetApproxEnclosingMethodTable()->IsSerializable());
+
+ MethodTable *pMT = GetApproxEnclosingMethodTable();
+ return pMT->IsFieldOptionallySerialized(pMT->GetIndexForFieldDesc(this));
+ }
+
+ IMDInternalImport *GetMDImport()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return GetModule()->GetMDImport();
+ }
+
+#ifndef DACCESS_COMPILE
+ IMetaDataImport *GetRWImporter()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return GetModule()->GetRWImporter();
+ }
+#endif // DACCESS_COMPILE
+
+ TypeHandle LookupFieldTypeHandle(ClassLoadLevel level = CLASS_LOADED, BOOL dropGenericArgumentLevel = FALSE);
+
+ TypeHandle LookupApproxFieldTypeHandle()
+ {
+ WRAPPER_NO_CONTRACT;
+ return LookupFieldTypeHandle(CLASS_LOAD_APPROXPARENTS, TRUE);
+ }
+
+ // Instance FieldDesc can be shared between generic instantiations. So List<String>._items
+ // is really the same as List<__Canon>._items. Hence, the FieldDesc itself
+ // cannot know the exact field type. This function returns the approximate field type.
+ // For eg. this will return "__Canon[]" for List<String>._items.
+ TypeHandle GetFieldTypeHandleThrowing(ClassLoadLevel level = CLASS_LOADED, BOOL dropGenericArgumentLevel = FALSE);
+
+ TypeHandle GetApproxFieldTypeHandleThrowing()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetFieldTypeHandleThrowing(CLASS_LOAD_APPROXPARENTS, TRUE);
+ }
+#endif // !BINDER
+
+ // Given a type handle of an object and a method that comes from some
+ // superclass of the class of that object, find the instantiation of
+ // that superclass, i.e. the class instantiation which will be relevant
+ // to interpreting the signature of the method. The type handle of
+ // the object does not need to be given in all circumstances, in
+ // particular it is only needed for FieldDescs pFD that
+ // return true for pFD->GetApproxEnclosingMethodTable()->IsSharedByGenericInstantiations().
+ // In other cases it is allowed to be null and will be ignored.
+ //
+ // Will return NULL if the field is not in a generic class.
+ Instantiation GetExactClassInstantiation(TypeHandle possibleObjType);
+
+ // Instance FieldDesc can be shared between generic instantiations. So List<String>._items
+ // is really the same as List<__Canon>._items. Hence, the FieldDesc itself
+ // cannot know the exact field type. You need to specify the owner
+ // like List<String> in order to get the exact type which would be "String[]"
+ TypeHandle GetExactFieldType(TypeHandle owner);
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+ {
+ SUPPORTS_DAC;
+ DAC_ENUM_DTHIS();
+ }
+#endif
+
+#ifndef DACCESS_COMPILE
+ REFLECTFIELDREF GetStubFieldInfo();
+#endif
+};
+
+#ifdef BINDER
+inline VOID FieldDesc::Init(mdFieldDef mb, CorElementType FieldType, DWORD dwMemberAttrs, BOOL fIsStatic, BOOL fIsRVA, BOOL fIsThreadLocal, BOOL fIsContextLocal, LPCSTR pszFieldName)
+{
+
+ LIMITED_METHOD_CONTRACT;
+
+ // We allow only a subset of field types here - all objects must be set to TYPE_CLASS
+ // By-value classes are ELEMENT_TYPE_VALUETYPE
+ _ASSERTE(
+ FieldType == ELEMENT_TYPE_I1 ||
+ FieldType == ELEMENT_TYPE_BOOLEAN ||
+ FieldType == ELEMENT_TYPE_U1 ||
+ FieldType == ELEMENT_TYPE_I2 ||
+ FieldType == ELEMENT_TYPE_U2 ||
+ FieldType == ELEMENT_TYPE_CHAR ||
+ FieldType == ELEMENT_TYPE_I4 ||
+ FieldType == ELEMENT_TYPE_U4 ||
+ FieldType == ELEMENT_TYPE_I8 ||
+ FieldType == ELEMENT_TYPE_I ||
+ FieldType == ELEMENT_TYPE_U ||
+ FieldType == ELEMENT_TYPE_U8 ||
+ FieldType == ELEMENT_TYPE_R4 ||
+ FieldType == ELEMENT_TYPE_R8 ||
+ FieldType == ELEMENT_TYPE_CLASS ||
+ FieldType == ELEMENT_TYPE_VALUETYPE ||
+ FieldType == ELEMENT_TYPE_PTR ||
+ FieldType == ELEMENT_TYPE_FNPTR
+ );
+ _ASSERTE(fIsStatic || (!fIsRVA && !fIsThreadLocal && !fIsContextLocal));
+ _ASSERTE(fIsRVA + fIsThreadLocal + fIsContextLocal <= 1);
+
+ m_mb = RidFromToken(mb);
+ m_type = FieldType;
+ m_prot = fdFieldAccessMask & dwMemberAttrs;
+ m_isStatic = fIsStatic != 0;
+ m_isRVA = fIsRVA != 0;
+ m_isThreadLocal = fIsThreadLocal != 0;
+#ifdef FEATURE_REMOTING
+ m_isContextLocal = fIsContextLocal != 0;
+#endif
+
+#ifdef _DEBUG
+ m_isDangerousAppDomainAgileField = 0;
+ m_debugName = (LPUTF8)pszFieldName;
+#endif
+ _ASSERTE(GetMemberDef() == mb); // no truncation
+ _ASSERTE(GetFieldType() == FieldType);
+ _ASSERTE(GetFieldProtection() == (fdFieldAccessMask & dwMemberAttrs));
+ _ASSERTE((BOOL) IsStatic() == (fIsStatic != 0));
+}
+#endif // BINDER
+
+#endif // _FIELD_H_
+
diff --git a/src/vm/fieldmarshaler.cpp b/src/vm/fieldmarshaler.cpp
new file mode 100644
index 0000000000..4bd9388b90
--- /dev/null
+++ b/src/vm/fieldmarshaler.cpp
@@ -0,0 +1,4751 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: FieldMarshaler.cpp
+//
+
+//
+
+
+#include "common.h"
+#include "vars.hpp"
+#include "class.h"
+#include "ceeload.h"
+#include "excep.h"
+#include "fieldmarshaler.h"
+#include "field.h"
+#include "frames.h"
+#include "dllimport.h"
+#include "comdelegate.h"
+#include "eeconfig.h"
+#include "comdatetime.h"
+#include "olevariant.h"
+#include <cor.h>
+#include <corpriv.h>
+#include <corerror.h>
+#include "sigformat.h"
+#include "marshalnative.h"
+#include "typeparse.h"
+#ifdef FEATURE_COMINTEROP
+#include <winstring.h>
+#endif // FEATURE_COMINTEROP
+
+// forward declaration
+BOOL CheckForPrimitiveType(CorElementType elemType, CQuickArray<WCHAR> *pStrPrimitiveType);
+TypeHandle ArraySubTypeLoadWorker(const SString &strUserDefTypeName, Assembly* pAssembly);
+TypeHandle GetFieldTypeHandleWorker(MetaSig *pFieldSig);
+
+
+//=======================================================================
+// A database of NFT types.
+//=======================================================================
+struct NFTDataBaseEntry
+{
+ UINT32 m_cbNativeSize; // native size of field (0 if not constant)
+ bool m_fWinRTSupported; // true if the field marshaler is supported for WinRT
+};
+
+static const NFTDataBaseEntry NFTDataBase[] =
+{
+ #undef DEFINE_NFT
+ #define DEFINE_NFT(name, nativesize, fWinRTSupported) { nativesize, fWinRTSupported },
+ #include "nsenums.h"
+};
+
+
+//=======================================================================
+// This is invoked from the class loader while building the internal structures for a type
+// This function should check if explicit layout metadata exists.
+//
+// Returns:
+// TRUE - yes, there's layout metadata
+// FALSE - no, there's no layout.
+// fail - throws a typeload exception
+//
+// If TRUE,
+// *pNLType gets set to nltAnsi or nltUnicode
+// *pPackingSize declared packing size
+// *pfExplicitoffsets offsets explicit in metadata or computed?
+//=======================================================================
+BOOL HasLayoutMetadata(Assembly* pAssembly, IMDInternalImport *pInternalImport, mdTypeDef cl, MethodTable*pParentMT, BYTE *pPackingSize, BYTE *pNLTType, BOOL *pfExplicitOffsets)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pInternalImport));
+ PRECONDITION(CheckPointer(pPackingSize));
+ PRECONDITION(CheckPointer(pNLTType));
+ PRECONDITION(CheckPointer(pfExplicitOffsets));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ ULONG clFlags;
+#ifdef _DEBUG
+ clFlags = 0xcccccccc;
+#endif
+
+ if (FAILED(pInternalImport->GetTypeDefProps(cl, &clFlags, NULL)))
+ {
+ pAssembly->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_BADFORMAT);
+ }
+
+ if (IsTdAutoLayout(clFlags))
+ {
+ // <BUGNUM>workaround for B#104780 - VC fails to set SequentialLayout on some classes
+ // with ClassSize. Too late to fix compiler for V1.
+ //
+ // To compensate, we treat AutoLayout classes as Sequential if they
+ // meet all of the following criteria:
+ //
+ // - ClassSize present and nonzero.
+ // - No instance fields declared
+ // - Base class is System.ValueType.
+ //</BUGNUM>
+ ULONG cbTotalSize = 0;
+ if (SUCCEEDED(pInternalImport->GetClassTotalSize(cl, &cbTotalSize)) && cbTotalSize != 0)
+ {
+ if (pParentMT && pParentMT->IsValueTypeClass())
+ {
+ MDEnumHolder hEnumField(pInternalImport);
+ if (SUCCEEDED(pInternalImport->EnumInit(mdtFieldDef, cl, &hEnumField)))
+ {
+ ULONG numFields = pInternalImport->EnumGetCount(&hEnumField);
+ if (numFields == 0)
+ {
+ *pfExplicitOffsets = FALSE;
+ *pNLTType = nltAnsi;
+ *pPackingSize = 1;
+ return TRUE;
+ }
+ }
+ }
+ }
+
+ return FALSE;
+ }
+ else if (IsTdSequentialLayout(clFlags))
+ {
+ *pfExplicitOffsets = FALSE;
+ }
+ else if (IsTdExplicitLayout(clFlags))
+ {
+ *pfExplicitOffsets = TRUE;
+ }
+ else
+ {
+ pAssembly->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_BADFORMAT);
+ }
+
+ // We now know this class has seq. or explicit layout. Ensure the parent does too.
+ if (pParentMT && !(pParentMT->IsObjectClass() || pParentMT->IsValueTypeClass()) && !(pParentMT->HasLayout()))
+ pAssembly->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_BADFORMAT);
+
+ if (IsTdAnsiClass(clFlags))
+ {
+ *pNLTType = nltAnsi;
+ }
+ else if (IsTdUnicodeClass(clFlags))
+ {
+ *pNLTType = nltUnicode;
+ }
+ else if (IsTdAutoClass(clFlags))
+ {
+ // We no longer support Win9x so TdAuto always maps to Unicode.
+ *pNLTType = nltUnicode;
+ }
+ else
+ {
+ pAssembly->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_BADFORMAT);
+ }
+
+ DWORD dwPackSize;
+ hr = pInternalImport->GetClassPackSize(cl, &dwPackSize);
+ if (FAILED(hr) || dwPackSize == 0)
+ dwPackSize = DEFAULT_PACKING_SIZE;
+
+ // This has to be reduced to a BYTE value, so we had better make sure it fits. If
+ // not, we'll throw an exception instead of trying to munge the value to what we
+ // think the user might want.
+ if (!FitsInU1((UINT64)(dwPackSize)))
+ {
+ pAssembly->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_BADFORMAT);
+ }
+
+ *pPackingSize = (BYTE)dwPackSize;
+
+ return TRUE;
+}
+
+typedef enum
+{
+ ParseNativeTypeFlag_None = 0x00,
+ ParseNativeTypeFlag_IsAnsi = 0x01,
+
+#ifdef FEATURE_COMINTEROP
+ ParseNativeTypeFlag_IsWinRT = 0x02,
+#endif // FEATURE_COMINTEROP
+}
+ParseNativeTypeFlags;
+
+inline ParseNativeTypeFlags operator|=(ParseNativeTypeFlags& lhs, ParseNativeTypeFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ lhs = static_cast<ParseNativeTypeFlags>(lhs | rhs);
+ return lhs;
+}
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#endif
+VOID ParseNativeType(Module* pModule,
+ PCCOR_SIGNATURE pCOMSignature,
+ DWORD cbCOMSignature,
+ ParseNativeTypeFlags flags,
+ LayoutRawFieldInfo* pfwalk,
+ PCCOR_SIGNATURE pNativeType,
+ ULONG cbNativeType,
+ IMDInternalImport* pInternalImport,
+ mdTypeDef cl,
+ const SigTypeContext * pTypeContext,
+ BOOL *pfDisqualifyFromManagedSequential // set to TRUE if needed (never set to FALSE, it may come in as TRUE!)
+#ifdef _DEBUG
+ ,
+ LPCUTF8 szNamespace,
+ LPCUTF8 szClassName,
+ LPCUTF8 szFieldName
+#endif
+ )
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pfwalk));
+ }
+ CONTRACTL_END;
+
+ // Make sure that there is no junk in the unused part of the field marshaler space (ngen image determinism)
+ ZeroMemory(&pfwalk->m_FieldMarshaler, MAXFIELDMARSHALERSIZE);
+
+#define INITFIELDMARSHALER(nfttype, fmtype, args) \
+do \
+{ \
+ static_assert_no_msg(sizeof(fmtype) <= MAXFIELDMARSHALERSIZE); \
+ pfwalk->m_nft = (nfttype); \
+ new ( &(pfwalk->m_FieldMarshaler) ) fmtype args; \
+ ((FieldMarshaler*)&(pfwalk->m_FieldMarshaler))->SetNStructFieldType(nfttype); \
+} while(0)
+
+ BOOL fAnsi = (flags & ParseNativeTypeFlag_IsAnsi);
+#ifdef FEATURE_COMINTEROP
+ BOOL fIsWinRT = (flags & ParseNativeTypeFlag_IsWinRT);
+#endif // FEATURE_COMINTEROP
+ CorElementType corElemType = ELEMENT_TYPE_END;
+ PCCOR_SIGNATURE pNativeTypeStart = pNativeType;
+ ULONG cbNativeTypeStart = cbNativeType;
+ CorNativeType ntype;
+ BOOL fDefault;
+ BOOL BestFit;
+ BOOL ThrowOnUnmappableChar;
+
+ pfwalk->m_nft = NFT_NONE;
+
+ if (cbNativeType == 0)
+ {
+ ntype = NATIVE_TYPE_DEFAULT;
+ fDefault = TRUE;
+ }
+ else
+ {
+ ntype = (CorNativeType) *( ((BYTE*&)pNativeType)++ );
+ cbNativeType--;
+ fDefault = (ntype == NATIVE_TYPE_DEFAULT);
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (fIsWinRT && !fDefault)
+ {
+ // Do not allow any MarshalAs in WinRT scenarios - marshaling is fully described by the field type.
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_WINRT_MARSHAL_AS));
+ }
+#endif // FEATURE_COMINTEROP
+
+ // Setup the signature and normalize
+ MetaSig fsig(pCOMSignature, cbCOMSignature, pModule, pTypeContext, MetaSig::sigField);
+ corElemType = fsig.NextArgNormalized();
+
+
+ if (!(*pfDisqualifyFromManagedSequential))
+ {
+ // This type may qualify for ManagedSequential. Collect managed size and alignment info.
+ if (CorTypeInfo::IsPrimitiveType(corElemType))
+ {
+ pfwalk->m_managedSize = ((UINT32)CorTypeInfo::Size(corElemType)); // Safe cast - no primitive type is larger than 4gb!
+ pfwalk->m_managedAlignmentReq = pfwalk->m_managedSize;
+ }
+ else if (corElemType == ELEMENT_TYPE_PTR)
+ {
+ pfwalk->m_managedSize = sizeof(LPVOID);
+ pfwalk->m_managedAlignmentReq = sizeof(LPVOID);
+ }
+ else if (corElemType == ELEMENT_TYPE_VALUETYPE)
+ {
+ TypeHandle pNestedType = fsig.GetLastTypeHandleThrowing(ClassLoader::LoadTypes,
+ CLASS_LOAD_APPROXPARENTS,
+ TRUE);
+ if (pNestedType.GetMethodTable()->IsManagedSequential())
+ {
+ pfwalk->m_managedSize = (pNestedType.GetMethodTable()->GetNumInstanceFieldBytes());
+
+ _ASSERTE(pNestedType.GetMethodTable()->HasLayout()); // If it is ManagedSequential(), it also has Layout but doesn't hurt to check before we do a cast!
+ pfwalk->m_managedAlignmentReq = pNestedType.GetMethodTable()->GetLayoutInfo()->m_ManagedLargestAlignmentRequirementOfAllMembers;
+ }
+ else
+ {
+ *pfDisqualifyFromManagedSequential = TRUE;
+ }
+ }
+ else
+ {
+ // No other type permitted for ManagedSequential.
+ *pfDisqualifyFromManagedSequential = TRUE;
+ }
+ }
+
+#ifdef _TARGET_X86_
+ // Normalization might have put corElementType and ntype out of sync which can
+ // result in problems with non-default ntype being validated against the
+ // normalized primitive corElemType.
+ //
+ VerifyAndAdjustNormalizedType(pModule, fsig.GetArgProps(), fsig.GetSigTypeContext(), &corElemType, &ntype);
+
+ fDefault = (ntype == NATIVE_TYPE_DEFAULT);
+#endif // _TARGET_X86_
+
+ CorElementType sigElemType;
+ IfFailThrow(fsig.GetArgProps().PeekElemType(&sigElemType));
+ if ((sigElemType == ELEMENT_TYPE_GENERICINST || sigElemType == ELEMENT_TYPE_VAR) && corElemType == ELEMENT_TYPE_CLASS)
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_GENERICS_RESTRICTION));
+ }
+ else switch (corElemType)
+ {
+ case ELEMENT_TYPE_CHAR:
+ if (fDefault)
+ {
+ if (fAnsi)
+ {
+ ReadBestFitCustomAttribute(pInternalImport, cl, &BestFit, &ThrowOnUnmappableChar);
+ INITFIELDMARSHALER(NFT_ANSICHAR, FieldMarshaler_Ansi, (BestFit, ThrowOnUnmappableChar));
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_COPY2, FieldMarshaler_Copy2, ());
+ }
+ }
+ else if (ntype == NATIVE_TYPE_I1 || ntype == NATIVE_TYPE_U1)
+ {
+ ReadBestFitCustomAttribute(pInternalImport, cl, &BestFit, &ThrowOnUnmappableChar);
+ INITFIELDMARSHALER(NFT_ANSICHAR, FieldMarshaler_Ansi, (BestFit, ThrowOnUnmappableChar));
+ }
+ else if (ntype == NATIVE_TYPE_I2 || ntype == NATIVE_TYPE_U2)
+ {
+ INITFIELDMARSHALER(NFT_COPY2, FieldMarshaler_Copy2, ());
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_CHAR));
+ }
+ break;
+
+ case ELEMENT_TYPE_BOOLEAN:
+ if (fDefault)
+ {
+#ifdef FEATURE_COMINTEROP
+ if (fIsWinRT)
+ {
+ INITFIELDMARSHALER(NFT_CBOOL, FieldMarshaler_CBool, ());
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ INITFIELDMARSHALER(NFT_WINBOOL, FieldMarshaler_WinBool, ());
+ }
+ }
+ else if (ntype == NATIVE_TYPE_BOOLEAN)
+ {
+ INITFIELDMARSHALER(NFT_WINBOOL, FieldMarshaler_WinBool, ());
+ }
+#ifdef FEATURE_COMINTEROP
+ else if (ntype == NATIVE_TYPE_VARIANTBOOL)
+ {
+ INITFIELDMARSHALER(NFT_VARIANTBOOL, FieldMarshaler_VariantBool, ());
+ }
+#endif // FEATURE_COMINTEROP
+ else if (ntype == NATIVE_TYPE_U1 || ntype == NATIVE_TYPE_I1)
+ {
+ INITFIELDMARSHALER(NFT_CBOOL, FieldMarshaler_CBool, ());
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_BOOLEAN));
+ }
+ break;
+
+
+ case ELEMENT_TYPE_I1:
+ if (fDefault || ntype == NATIVE_TYPE_I1 || ntype == NATIVE_TYPE_U1)
+ {
+ INITFIELDMARSHALER(NFT_COPY1, FieldMarshaler_Copy1, ());
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_I1));
+ }
+ break;
+
+ case ELEMENT_TYPE_U1:
+ if (fDefault || ntype == NATIVE_TYPE_U1 || ntype == NATIVE_TYPE_I1)
+ {
+ INITFIELDMARSHALER(NFT_COPY1, FieldMarshaler_Copy1, ());
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_I1));
+ }
+ break;
+
+ case ELEMENT_TYPE_I2:
+ if (fDefault || ntype == NATIVE_TYPE_I2 || ntype == NATIVE_TYPE_U2)
+ {
+ INITFIELDMARSHALER(NFT_COPY2, FieldMarshaler_Copy2, ());
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_I2));
+ }
+ break;
+
+ case ELEMENT_TYPE_U2:
+ if (fDefault || ntype == NATIVE_TYPE_U2 || ntype == NATIVE_TYPE_I2)
+ {
+ INITFIELDMARSHALER(NFT_COPY2, FieldMarshaler_Copy2, ());
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_I2));
+ }
+ break;
+
+ case ELEMENT_TYPE_I4:
+ if (fDefault || ntype == NATIVE_TYPE_I4 || ntype == NATIVE_TYPE_U4 || ntype == NATIVE_TYPE_ERROR)
+ {
+ INITFIELDMARSHALER(NFT_COPY4, FieldMarshaler_Copy4, ());
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_I4));
+ }
+ break;
+
+ case ELEMENT_TYPE_U4:
+ if (fDefault || ntype == NATIVE_TYPE_U4 || ntype == NATIVE_TYPE_I4 || ntype == NATIVE_TYPE_ERROR)
+ {
+ INITFIELDMARSHALER(NFT_COPY4, FieldMarshaler_Copy4, ());
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_I4));
+ }
+ break;
+
+ case ELEMENT_TYPE_I8:
+ if (fDefault || ntype == NATIVE_TYPE_I8 || ntype == NATIVE_TYPE_U8)
+ {
+ INITFIELDMARSHALER(NFT_COPY8, FieldMarshaler_Copy8, ());
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_I8));
+ }
+ break;
+
+ case ELEMENT_TYPE_U8:
+ if (fDefault || ntype == NATIVE_TYPE_U8 || ntype == NATIVE_TYPE_I8)
+ {
+ INITFIELDMARSHALER(NFT_COPY8, FieldMarshaler_Copy8, ());
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_I8));
+ }
+ break;
+
+ case ELEMENT_TYPE_I: //fallthru
+ case ELEMENT_TYPE_U:
+#ifdef FEATURE_COMINTEROP
+ if (fIsWinRT)
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_WINRT_ILLEGAL_TYPE));
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ if (fDefault || ntype == NATIVE_TYPE_INT || ntype == NATIVE_TYPE_UINT)
+ {
+ if (sizeof(LPVOID)==4)
+ {
+ INITFIELDMARSHALER(NFT_COPY4, FieldMarshaler_Copy4, ());
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_COPY8, FieldMarshaler_Copy8, ());
+ }
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_I));
+ }
+ break;
+
+ case ELEMENT_TYPE_R4:
+ if (fDefault || ntype == NATIVE_TYPE_R4)
+ {
+ INITFIELDMARSHALER(NFT_COPY4, FieldMarshaler_Copy4, ());
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_R4));
+ }
+ break;
+
+ case ELEMENT_TYPE_R8:
+ if (fDefault || ntype == NATIVE_TYPE_R8)
+ {
+ INITFIELDMARSHALER(NFT_COPY8, FieldMarshaler_Copy8, ());
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_R8));
+ }
+ break;
+
+ case ELEMENT_TYPE_PTR:
+#ifdef FEATURE_COMINTEROP
+ if (fIsWinRT)
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_WINRT_ILLEGAL_TYPE));
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ if (fDefault)
+ {
+ switch (sizeof(LPVOID))
+ {
+ case 4:
+ INITFIELDMARSHALER(NFT_COPY4, FieldMarshaler_Copy4, ());
+ break;
+
+ case 8:
+ INITFIELDMARSHALER(NFT_COPY8, FieldMarshaler_Copy8, ());
+ break;
+
+ default:
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_BADMANAGED));
+ break;
+ }
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_PTR));
+ }
+ break;
+
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ // This may cause a TypeLoadException, which we currently seem to have to swallow.
+ // This happens with structs that contain fields of class type where the class itself
+ // refers to the struct in a field.
+ TypeHandle thNestedType = GetFieldTypeHandleWorker(&fsig);
+ if (!thNestedType.GetMethodTable())
+ break;
+#ifdef FEATURE_COMINTEROP
+ if (fIsWinRT && sigElemType == ELEMENT_TYPE_GENERICINST)
+ {
+ // If this is a generic value type, lets see whether it is a Nullable<T>
+ TypeHandle genType = fsig.GetLastTypeHandleThrowing();
+ if(genType != NULL && genType.GetMethodTable()->HasSameTypeDefAs(g_pNullableClass))
+ {
+ // The generic type is System.Nullable<T>.
+ // Lets extract the typeArg and check if the typeArg is valid.
+ // typeArg is invalid if
+ // 1. It is not a value type.
+ // 2. It is string
+ // 3. We have an open type with us.
+ Instantiation inst = genType.GetMethodTable()->GetInstantiation();
+ MethodTable* typeArgMT = inst[0].GetMethodTable();
+ if (!typeArgMT->IsLegalNonArrayWinRTType())
+ {
+ // Type is not a valid WinRT value type.
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_NULLABLE_RESTRICTION));
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_WINDOWSFOUNDATIONIREFERENCE, FieldMarshaler_Nullable, (genType.GetMethodTable()));
+ }
+ break;
+ }
+ }
+#endif
+ if (fsig.IsClass(g_DateClassName))
+ {
+ if (fDefault || ntype == NATIVE_TYPE_STRUCT)
+ {
+ INITFIELDMARSHALER(NFT_DATE, FieldMarshaler_Date, ());
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_DATETIME));
+ }
+ }
+ else if (fsig.IsClass(g_DecimalClassName))
+ {
+ if (fDefault || ntype == NATIVE_TYPE_STRUCT)
+ {
+ INITFIELDMARSHALER(NFT_DECIMAL, FieldMarshaler_Decimal, ());
+ }
+#ifdef FEATURE_COMINTEROP
+ else if (ntype == NATIVE_TYPE_CURRENCY)
+ {
+ INITFIELDMARSHALER(NFT_CURRENCY, FieldMarshaler_Currency, ());
+ }
+#endif // FEATURE_COMINTEROP
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHALFIELD_DECIMAL));
+ }
+ }
+#ifdef FEATURE_COMINTEROP
+ else if (fsig.IsClass(g_DateTimeOffsetClassName))
+ {
+ if (fDefault || ntype == NATIVE_TYPE_STRUCT)
+ {
+ INITFIELDMARSHALER(NFT_DATETIMEOFFSET, FieldMarshaler_DateTimeOffset, ());
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_DATETIMEOFFSET));
+ }
+ }
+ else if (fIsWinRT && !thNestedType.GetMethodTable()->IsLegalNonArrayWinRTType())
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_WINRT_ILLEGAL_TYPE));
+ }
+#endif // FEATURE_COMINTEROP
+ else if (thNestedType.GetMethodTable()->HasLayout())
+ {
+ if (fDefault || ntype == NATIVE_TYPE_STRUCT)
+ {
+ if (IsStructMarshalable(thNestedType))
+ {
+ INITFIELDMARSHALER(NFT_NESTEDVALUECLASS, FieldMarshaler_NestedValueClass, (thNestedType.GetMethodTable()));
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_NOTMARSHALABLE));
+ }
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_VALUETYPE));
+ }
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_NOTMARSHALABLE));
+ }
+ break;
+ }
+
+ case ELEMENT_TYPE_CLASS:
+ {
+ // This may cause a TypeLoadException, which we currently seem to have to swallow.
+ // This happens with structs that contain fields of class type where the class itself
+ // refers to the struct in a field.
+ TypeHandle thNestedType = GetFieldTypeHandleWorker(&fsig);
+ if (!thNestedType.GetMethodTable())
+ break;
+
+ if (thNestedType.GetMethodTable()->IsObjectClass())
+ {
+#ifdef FEATURE_COMINTEROP
+ if (fDefault || ntype == NATIVE_TYPE_IUNKNOWN || ntype == NATIVE_TYPE_IDISPATCH || ntype == NATIVE_TYPE_INTF)
+ {
+ // Only NATIVE_TYPE_IDISPATCH maps to an IDispatch based interface pointer.
+ DWORD dwFlags = ItfMarshalInfo::ITF_MARSHAL_USE_BASIC_ITF;
+ if (ntype == NATIVE_TYPE_IDISPATCH)
+ {
+ dwFlags |= ItfMarshalInfo::ITF_MARSHAL_DISP_ITF;
+ }
+ INITFIELDMARSHALER(NFT_INTERFACE, FieldMarshaler_Interface, (NULL, NULL, dwFlags));
+ }
+ else if (ntype == NATIVE_TYPE_STRUCT)
+ {
+ INITFIELDMARSHALER(NFT_VARIANT, FieldMarshaler_Variant, ());
+ }
+#else // FEATURE_COMINTEROP
+ if (fDefault || ntype == NATIVE_TYPE_IUNKNOWN || ntype == NATIVE_TYPE_IDISPATCH || ntype == NATIVE_TYPE_INTF)
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_OBJECT_TO_ITF_NOT_SUPPORTED));
+ }
+ else if (ntype == NATIVE_TYPE_STRUCT)
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_OBJECT_TO_VARIANT_NOT_SUPPORTED));
+ }
+#endif // FEATURE_COMINTEROP
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHALFIELD_OBJECT));
+ }
+ }
+#ifdef FEATURE_COMINTEROP
+ else if (ntype == NATIVE_TYPE_INTF || thNestedType.IsInterface())
+ {
+ if (fIsWinRT && !thNestedType.GetMethodTable()->IsLegalNonArrayWinRTType())
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_WINRT_ILLEGAL_TYPE));
+ }
+ else
+ {
+ ItfMarshalInfo itfInfo;
+ if (FAILED(MarshalInfo::TryGetItfMarshalInfo(thNestedType, FALSE, FALSE, &itfInfo)))
+ break;
+
+ INITFIELDMARSHALER(NFT_INTERFACE, FieldMarshaler_Interface, (itfInfo.thClass.GetMethodTable(), itfInfo.thItf.GetMethodTable(), itfInfo.dwFlags));
+ }
+ }
+#else // FEATURE_COMINTEROP
+ else if (ntype == NATIVE_TYPE_INTF || thNestedType.IsInterface())
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_OBJECT_TO_ITF_NOT_SUPPORTED));
+ }
+#endif // FEATURE_COMINTEROP
+ else if (ntype == NATIVE_TYPE_CUSTOMMARSHALER)
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHALFIELD_NOCUSTOMMARSH));
+ }
+ else if (thNestedType == TypeHandle(g_pStringClass))
+ {
+ if (fDefault)
+ {
+#ifdef FEATURE_COMINTEROP
+ if (fIsWinRT)
+ {
+ INITFIELDMARSHALER(NFT_HSTRING, FieldMarshaler_HSTRING, ());
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ if (fAnsi)
+ {
+ ReadBestFitCustomAttribute(pInternalImport, cl, &BestFit, &ThrowOnUnmappableChar);
+ INITFIELDMARSHALER(NFT_STRINGANSI, FieldMarshaler_StringAnsi, (BestFit, ThrowOnUnmappableChar));
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_STRINGUNI, FieldMarshaler_StringUni, ());
+ }
+ }
+ else
+ {
+ switch (ntype)
+ {
+ case NATIVE_TYPE_LPSTR:
+ ReadBestFitCustomAttribute(pInternalImport, cl, &BestFit, &ThrowOnUnmappableChar);
+ INITFIELDMARSHALER(NFT_STRINGANSI, FieldMarshaler_StringAnsi, (BestFit, ThrowOnUnmappableChar));
+ break;
+
+ case NATIVE_TYPE_LPWSTR:
+ INITFIELDMARSHALER(NFT_STRINGUNI, FieldMarshaler_StringUni, ());
+ break;
+
+ case NATIVE_TYPE_LPTSTR:
+ // We no longer support Win9x so LPTSTR always maps to a Unicode string.
+ INITFIELDMARSHALER(NFT_STRINGUNI, FieldMarshaler_StringUni, ());
+ break;
+#ifdef FEATURE_COMINTEROP
+ case NATIVE_TYPE_BSTR:
+ INITFIELDMARSHALER(NFT_BSTR, FieldMarshaler_BSTR, ());
+ break;
+
+ case NATIVE_TYPE_HSTRING:
+ INITFIELDMARSHALER(NFT_HSTRING, FieldMarshaler_HSTRING, ());
+ break;
+#endif // FEATURE_COMINTEROP
+ case NATIVE_TYPE_FIXEDSYSSTRING:
+ {
+ ULONG nchars;
+ ULONG udatasize = CorSigUncompressedDataSize(pNativeType);
+
+ if (cbNativeType < udatasize)
+ break;
+
+ nchars = CorSigUncompressData(pNativeType);
+ cbNativeType -= udatasize;
+
+ if (nchars == 0)
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHALFIELD_ZEROLENGTHFIXEDSTRING));
+ break;
+ }
+
+ if (fAnsi)
+ {
+ ReadBestFitCustomAttribute(pInternalImport, cl, &BestFit, &ThrowOnUnmappableChar);
+ INITFIELDMARSHALER(NFT_FIXEDSTRINGANSI, FieldMarshaler_FixedStringAnsi, (nchars, BestFit, ThrowOnUnmappableChar));
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_FIXEDSTRINGUNI, FieldMarshaler_FixedStringUni, (nchars));
+ }
+ }
+ break;
+
+ default:
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHALFIELD_STRING));
+ break;
+ }
+ }
+ }
+#ifdef FEATURE_COMINTEROP
+ else if (fIsWinRT && fsig.IsClass(g_TypeClassName))
+ { // Note: If the System.Type field is in non-WinRT struct, do not change the previously shipped behavior
+ INITFIELDMARSHALER(NFT_SYSTEMTYPE, FieldMarshaler_SystemType, ());
+ }
+ else if (fIsWinRT && fsig.IsClass(g_ExceptionClassName)) // Marshal Windows.Foundation.HResult as System.Exception for WinRT.
+ {
+ INITFIELDMARSHALER(NFT_WINDOWSFOUNDATIONHRESULT, FieldMarshaler_Exception, ());
+ }
+#endif //FEATURE_COMINTEROP
+#ifdef FEATURE_CLASSIC_COMINTEROP
+ else if (thNestedType.GetMethodTable() == g_pArrayClass)
+ {
+ if (ntype == NATIVE_TYPE_SAFEARRAY)
+ {
+ NativeTypeParamInfo ParamInfo;
+ CorElementType etyp = ELEMENT_TYPE_OBJECT;
+ MethodTable* pMT = NULL;
+ VARTYPE vtElement = VT_EMPTY;
+
+ // Compat: If no safe array used def subtype was specified, we assume TypeOf(Object).
+ TypeHandle thElement = TypeHandle(g_pObjectClass);
+
+ // If we have no native type data, assume default behavior
+ if (S_OK != CheckForCompressedData(pNativeTypeStart, pNativeType, cbNativeTypeStart))
+ {
+ INITFIELDMARSHALER(NFT_SAFEARRAY, FieldMarshaler_SafeArray, (VT_EMPTY, NULL));
+ break;
+ }
+
+ vtElement = (VARTYPE) (CorSigUncompressData(/*modifies*/pNativeType));
+
+ // Extract the name of the record type's.
+ if (S_OK == CheckForCompressedData(pNativeTypeStart, pNativeType, cbNativeTypeStart))
+ {
+ ULONG strLen;
+ if (FAILED(CPackedLen::SafeGetData(pNativeType, pNativeTypeStart + cbNativeTypeStart, &strLen, &pNativeType)))
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_BADMETADATA));
+ break;
+ }
+ if (strLen > 0)
+ {
+ // Load the type. Use a SString for the string since we need to NULL terminate the string
+ // that comes from the metadata.
+ StackSString safeArrayUserDefTypeName(SString::Utf8, (LPCUTF8)pNativeType, strLen);
+ _ASSERTE((ULONG)(pNativeType + strLen - pNativeTypeStart) == cbNativeTypeStart);
+
+ // Sadly this may cause a TypeLoadException, which we currently have to swallow.
+ // This happens with structs that contain fields of class type where the class itself
+ // refers to the struct in a field.
+ thElement = ArraySubTypeLoadWorker(safeArrayUserDefTypeName, pModule->GetAssembly());
+ if (thElement.IsNull())
+ break;
+ }
+ }
+
+ ArrayMarshalInfo arrayMarshalInfo(amiRuntime);
+ arrayMarshalInfo.InitForSafeArray(MarshalInfo::MARSHAL_SCENARIO_FIELD, thElement, vtElement, fAnsi);
+
+ if (!arrayMarshalInfo.IsValid())
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (arrayMarshalInfo.GetErrorResourceId()));
+ break;
+ }
+
+ INITFIELDMARSHALER(NFT_SAFEARRAY, FieldMarshaler_SafeArray, (arrayMarshalInfo.GetElementVT(), arrayMarshalInfo.GetElementTypeHandle().GetMethodTable()));
+ }
+ else if (ntype == NATIVE_TYPE_FIXEDARRAY)
+ {
+ // Check for the number of elements. This is required, if not present fail.
+ if (S_OK != CheckForCompressedData(pNativeTypeStart, pNativeType, cbNativeTypeStart))
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHALFIELD_FIXEDARRAY_NOSIZE));
+ break;
+ }
+
+ ULONG numElements = CorSigUncompressData(/*modifies*/pNativeType);
+
+ if (numElements == 0)
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHALFIELD_FIXEDARRAY_ZEROSIZE));
+ break;
+ }
+
+ // Since these always export to arrays of BSTRs, we don't need to fetch the native type.
+
+ // Compat: FixedArrays of System.Arrays map to fixed arrays of BSTRs.
+ INITFIELDMARSHALER(NFT_FIXEDARRAY, FieldMarshaler_FixedArray, (pInternalImport, cl, numElements, VT_BSTR, g_pStringClass));
+ }
+ }
+#endif // FEATURE_CLASSIC_COMINTEROP
+ else if (COMDelegate::IsDelegate(thNestedType.GetMethodTable()))
+ {
+ if (fDefault || ntype == NATIVE_TYPE_FUNC)
+ {
+ INITFIELDMARSHALER(NFT_DELEGATE, FieldMarshaler_Delegate, (thNestedType.GetMethodTable()));
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_DELEGATE));
+ }
+ }
+ else if (thNestedType.CanCastTo(TypeHandle(MscorlibBinder::GetClass(CLASS__SAFE_HANDLE))))
+ {
+ if (fDefault)
+ {
+ INITFIELDMARSHALER(NFT_SAFEHANDLE, FieldMarshaler_SafeHandle, ());
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_SAFEHANDLE));
+ }
+ }
+ else if (thNestedType.CanCastTo(TypeHandle(MscorlibBinder::GetClass(CLASS__CRITICAL_HANDLE))))
+ {
+ if (fDefault)
+ {
+ INITFIELDMARSHALER(NFT_CRITICALHANDLE, FieldMarshaler_CriticalHandle, ());
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_CRITICALHANDLE));
+ }
+ }
+ else if (fsig.IsClass(g_StringBufferClassName))
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHALFIELD_NOSTRINGBUILDER));
+ }
+ else if (IsStructMarshalable(thNestedType))
+ {
+ if (fDefault || ntype == NATIVE_TYPE_STRUCT)
+ {
+ INITFIELDMARSHALER(NFT_NESTEDLAYOUTCLASS, FieldMarshaler_NestedLayoutClass, (thNestedType.GetMethodTable()));
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHALFIELD_LAYOUTCLASS));
+ }
+ }
+#ifdef FEATURE_COMINTEROP
+ else if (fIsWinRT)
+ {
+ // no other reference types are allowed as field types in WinRT
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_WINRT_ILLEGAL_TYPE));
+ }
+ else if (fDefault)
+ {
+ ItfMarshalInfo itfInfo;
+ if (FAILED(MarshalInfo::TryGetItfMarshalInfo(thNestedType, FALSE, FALSE, &itfInfo)))
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_BADMANAGED));
+ }
+ else
+ {
+ INITFIELDMARSHALER(NFT_INTERFACE, FieldMarshaler_Interface, (itfInfo.thClass.GetMethodTable(), itfInfo.thItf.GetMethodTable(), itfInfo.dwFlags));
+ }
+ }
+#endif // FEATURE_COMINTEROP
+ break;
+ }
+
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_ARRAY:
+ {
+#ifdef FEATURE_COMINTEROP
+ if (fIsWinRT)
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_WINRT_ILLEGAL_TYPE));
+ break;
+ }
+#endif // FEATURE_COMINTEROP
+
+ // This may cause a TypeLoadException, which we currently seem to have to swallow.
+ // This happens with structs that contain fields of class type where the class itself
+ // refers to the struct in a field.
+ TypeHandle thArray = GetFieldTypeHandleWorker(&fsig);
+ if (thArray.IsNull() || !thArray.IsArray())
+ break;
+
+ TypeHandle thElement = thArray.AsArray()->GetArrayElementTypeHandle();
+ if (thElement.IsNull())
+ break;
+
+ if (ntype == NATIVE_TYPE_FIXEDARRAY)
+ {
+ CorNativeType elementNativeType = NATIVE_TYPE_DEFAULT;
+
+ // The size constant must be specified, if it isn't then the struct can't be marshalled.
+ if (S_OK != CheckForCompressedData(pNativeTypeStart, pNativeType, cbNativeTypeStart))
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHALFIELD_FIXEDARRAY_NOSIZE));
+ break;
+ }
+
+ // Read the size const, if it's 0, then the struct can't be marshalled.
+ ULONG numElements = CorSigUncompressData(pNativeType);
+ if (numElements == 0)
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHALFIELD_FIXEDARRAY_ZEROSIZE));
+ break;
+ }
+
+ // The array sub type is optional so extract it if specified.
+ if (S_OK == CheckForCompressedData(pNativeTypeStart, pNativeType, cbNativeTypeStart))
+ elementNativeType = (CorNativeType)CorSigUncompressData(pNativeType);
+
+ ArrayMarshalInfo arrayMarshalInfo(amiRuntime);
+ arrayMarshalInfo.InitForFixedArray(thElement, elementNativeType, fAnsi);
+
+ if (!arrayMarshalInfo.IsValid())
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (arrayMarshalInfo.GetErrorResourceId()));
+ break;
+ }
+
+ if (arrayMarshalInfo.GetElementVT() == VTHACK_ANSICHAR)
+ {
+ // We need to special case fixed sized arrays of ANSI chars since the OleVariant code
+ // that is used by the generic fixed size array marshaller doesn't support them
+ // properly.
+ ReadBestFitCustomAttribute(pInternalImport, cl, &BestFit, &ThrowOnUnmappableChar);
+ INITFIELDMARSHALER(NFT_FIXEDCHARARRAYANSI, FieldMarshaler_FixedCharArrayAnsi, (numElements, BestFit, ThrowOnUnmappableChar));
+ break;
+ }
+ else
+ {
+ VARTYPE elementVT = arrayMarshalInfo.GetElementVT();
+
+ INITFIELDMARSHALER(NFT_FIXEDARRAY, FieldMarshaler_FixedArray, (pInternalImport, cl, numElements, elementVT, arrayMarshalInfo.GetElementTypeHandle().GetMethodTable()));
+ break;
+ }
+ }
+#ifdef FEATURE_CLASSIC_COMINTEROP
+ else if (fDefault || ntype == NATIVE_TYPE_SAFEARRAY)
+ {
+ VARTYPE vtElement = VT_EMPTY;
+
+ // Check for data remaining in the signature before we attempt to grab some.
+ if (S_OK == CheckForCompressedData(pNativeTypeStart, pNativeType, cbNativeTypeStart))
+ vtElement = (VARTYPE) (CorSigUncompressData(/*modifies*/pNativeType));
+
+ ArrayMarshalInfo arrayMarshalInfo(amiRuntime);
+ arrayMarshalInfo.InitForSafeArray(MarshalInfo::MARSHAL_SCENARIO_FIELD, thElement, vtElement, fAnsi);
+
+ if (!arrayMarshalInfo.IsValid())
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (arrayMarshalInfo.GetErrorResourceId()));
+ break;
+ }
+
+ INITFIELDMARSHALER(NFT_SAFEARRAY, FieldMarshaler_SafeArray, (arrayMarshalInfo.GetElementVT(), arrayMarshalInfo.GetElementTypeHandle().GetMethodTable()));
+ }
+#endif //FEATURE_CLASSIC_COMINTEROP
+ else
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHALFIELD_ARRAY));
+ }
+ break;
+ }
+
+ case ELEMENT_TYPE_OBJECT:
+ case ELEMENT_TYPE_STRING:
+ break;
+
+ default:
+ // let it fall thru as NFT_NONE
+ break;
+ }
+
+ if (pfwalk->m_nft == NFT_NONE)
+ {
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_BADMANAGED));
+ }
+#ifdef FEATURE_COMINTEROP
+ else if (fIsWinRT && !NFTDataBase[pfwalk->m_nft].m_fWinRTSupported)
+ {
+ // the field marshaler we came up with is not supported in WinRT scenarios
+ ZeroMemory(&pfwalk->m_FieldMarshaler, MAXFIELDMARSHALERSIZE);
+ INITFIELDMARSHALER(NFT_ILLEGAL, FieldMarshaler_Illegal, (IDS_EE_BADMARSHAL_WINRT_ILLEGAL_TYPE));
+ }
+#endif // FEATURE_COMINTEROP
+#undef INITFIELDMARSHALER
+}
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+
+TypeHandle ArraySubTypeLoadWorker(const SString &strUserDefTypeName, Assembly* pAssembly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pAssembly));
+ }
+ CONTRACTL_END;
+
+ TypeHandle th;
+
+ EX_TRY
+ {
+ // Load the user defined type.
+ StackScratchBuffer utf8Name;
+ th = TypeName::GetTypeUsingCASearchRules(strUserDefTypeName.GetUTF8(utf8Name), pAssembly);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(RethrowTerminalExceptions)
+
+ return th;
+}
+
+
+TypeHandle GetFieldTypeHandleWorker(MetaSig *pFieldSig)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pFieldSig));
+ }
+ CONTRACTL_END;
+
+ TypeHandle th;
+
+ EX_TRY
+ {
+ // Load the user defined type.
+ th = pFieldSig->GetLastTypeHandleThrowing(ClassLoader::LoadTypes,
+ CLASS_LOAD_APPROXPARENTS,
+ TRUE /*dropGenericArgumentLevel*/);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(RethrowTerminalExceptions)
+
+ return th;
+}
+
+
+//=======================================================================
+// This function returns TRUE if the type passed in is either a value class or a class and if it has layout information
+// and is marshalable. In all other cases it will return FALSE.
+//=======================================================================
+BOOL IsStructMarshalable(TypeHandle th)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(!th.IsNull());
+ }
+ CONTRACTL_END;
+
+ if (th.IsBlittable())
+ {
+ // th.IsBlittable will return true for arrays of blittable types, however since IsStructMarshalable
+ // is only supposed to return true for value classes or classes with layout that are marshallable
+ // we need to return false if the type is an array.
+ if (th.IsArray())
+ return FALSE;
+ else
+ return TRUE;
+ }
+
+ // Check to see if the type has layout.
+ if (!th.HasLayout())
+ return FALSE;
+
+ MethodTable *pMT= th.GetMethodTable();
+ PREFIX_ASSUME(pMT != NULL);
+
+ if (pMT->IsStructMarshalable())
+ return TRUE;
+
+ const FieldMarshaler *pFieldMarshaler = pMT->GetLayoutInfo()->GetFieldMarshalers();
+ UINT numReferenceFields = pMT->GetLayoutInfo()->GetNumCTMFields();
+
+ while (numReferenceFields--)
+ {
+ if (pFieldMarshaler->GetNStructFieldType() == NFT_ILLEGAL)
+ return FALSE;
+
+ ((BYTE*&)pFieldMarshaler) += MAXFIELDMARSHALERSIZE;
+ }
+
+ return TRUE;
+}
+
+
+//=======================================================================
+// Called from the clsloader to load up and summarize the field metadata
+// for layout classes.
+//
+// Warning: This function can load other classes (esp. for nested structs.)
+//=======================================================================
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#endif
+VOID EEClassLayoutInfo::CollectLayoutFieldMetadataThrowing(
+ mdTypeDef cl, // cl of the NStruct being loaded
+ BYTE packingSize, // packing size (from @dll.struct)
+ BYTE nlType, // nltype (from @dll.struct)
+#ifdef FEATURE_COMINTEROP
+ BOOL isWinRT, // Is the type a WinRT type
+#endif // FEATURE_COMINTEROP
+ BOOL fExplicitOffsets, // explicit offsets?
+ MethodTable *pParentMT, // the loaded superclass
+ ULONG cMembers, // total number of members (methods + fields)
+ HENUMInternal *phEnumField, // enumerator for field
+ Module *pModule, // Module that defines the scope, loader and heap (for allocate FieldMarshalers)
+ const SigTypeContext *pTypeContext, // Type parameters for NStruct being loaded
+ EEClassLayoutInfo *pEEClassLayoutInfoOut, // caller-allocated structure to fill in.
+ LayoutRawFieldInfo *pInfoArrayOut, // caller-allocated array to fill in. Needs room for cMember+1 elements
+ LoaderAllocator *pAllocator,
+ AllocMemTracker *pamTracker
+)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pModule));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ MD_CLASS_LAYOUT classlayout;
+ mdFieldDef fd;
+ ULONG ulOffset;
+ ULONG cFields = 0;
+
+ // Running tote - if anything in this type disqualifies it from being ManagedSequential, somebody will set this to TRUE by the the time
+ // function exits.
+ BOOL fDisqualifyFromManagedSequential = FALSE;
+
+ // Internal interface for the NStruct being loaded.
+ IMDInternalImport *pInternalImport = pModule->GetMDImport();
+
+
+#ifdef _DEBUG
+ LPCUTF8 szName;
+ LPCUTF8 szNamespace;
+ if (FAILED(pInternalImport->GetNameOfTypeDef(cl, &szName, &szNamespace)))
+ {
+ szName = szNamespace = "Invalid TypeDef record";
+ }
+
+ if (g_pConfig->ShouldBreakOnStructMarshalSetup(szName))
+ CONSISTENCY_CHECK_MSGF(false, ("BreakOnStructMarshalSetup: '%s' ", szName));
+#endif
+
+
+ // Check if this type might be ManagedSequential. Only valuetypes marked Sequential can be
+ // ManagedSequential. Other issues checked below might also disqualify the type.
+ if ( (!fExplicitOffsets) && // Is it marked sequential?
+ (pParentMT && (pParentMT->IsValueTypeClass() || pParentMT->IsManagedSequential())) // Is it a valuetype or derived from a qualifying valuetype?
+ )
+ {
+ // Type qualifies so far... need do nothing.
+ }
+ else
+ {
+ fDisqualifyFromManagedSequential = TRUE;
+ }
+
+
+ BOOL fHasNonTrivialParent = pParentMT &&
+ !pParentMT->IsObjectClass() &&
+ !pParentMT->IsValueTypeClass();
+
+
+ //====================================================================
+ // First, some validation checks.
+ //====================================================================
+ _ASSERTE(!(fHasNonTrivialParent && !(pParentMT->HasLayout())));
+
+ hr = pInternalImport->GetClassLayoutInit(cl, &classlayout);
+ if (FAILED(hr))
+ {
+ COMPlusThrowHR(hr, BFA_CANT_GET_CLASSLAYOUT);
+ }
+
+ pEEClassLayoutInfoOut->m_numCTMFields = fHasNonTrivialParent ? pParentMT->GetLayoutInfo()->m_numCTMFields : 0;
+ pEEClassLayoutInfoOut->m_pFieldMarshalers = NULL;
+ pEEClassLayoutInfoOut->SetIsBlittable(TRUE);
+ if (fHasNonTrivialParent)
+ pEEClassLayoutInfoOut->SetIsBlittable(pParentMT->IsBlittable());
+ pEEClassLayoutInfoOut->SetIsZeroSized(FALSE);
+ pEEClassLayoutInfoOut->SetHasExplicitSize(FALSE);
+ pEEClassLayoutInfoOut->m_cbPackingSize = packingSize;
+
+ LayoutRawFieldInfo *pfwalk = pInfoArrayOut;
+
+ S_UINT32 cbSortArraySize = S_UINT32(cMembers) * S_UINT32(sizeof(LayoutRawFieldInfo *));
+ if (cbSortArraySize.IsOverflow())
+ {
+ ThrowHR(COR_E_TYPELOAD);
+ }
+ LayoutRawFieldInfo **pSortArray = (LayoutRawFieldInfo **)_alloca(cbSortArraySize.Value());
+ LayoutRawFieldInfo **pSortArrayEnd = pSortArray;
+
+ ULONG maxRid = pInternalImport->GetCountWithTokenKind(mdtFieldDef);
+
+
+ //=====================================================================
+ // Phase 1: Figure out the NFT of each field based on both the CLR
+ // signature of the field and the FieldMarshaler metadata.
+ //=====================================================================
+ BOOL fParentHasLayout = pParentMT && pParentMT->HasLayout();
+ UINT32 cbAdjustedParentLayoutNativeSize = 0;
+ EEClassLayoutInfo *pParentLayoutInfo = NULL;;
+ if (fParentHasLayout)
+ {
+ pParentLayoutInfo = pParentMT->GetLayoutInfo();
+ // Treat base class as an initial member.
+ cbAdjustedParentLayoutNativeSize = pParentLayoutInfo->GetNativeSize();
+ // If the parent was originally a zero-sized explicit type but
+ // got bumped up to a size of 1 for compatability reasons, then
+ // we need to remove the padding, but ONLY for inheritance situations.
+ if (pParentLayoutInfo->IsZeroSized()) {
+ CONSISTENCY_CHECK(cbAdjustedParentLayoutNativeSize == 1);
+ cbAdjustedParentLayoutNativeSize = 0;
+ }
+ }
+
+ ULONG i;
+ for (i = 0; pInternalImport->EnumNext(phEnumField, &fd); i++)
+ {
+ DWORD dwFieldAttrs;
+ ULONG rid = RidFromToken(fd);
+
+ if((rid == 0)||(rid > maxRid))
+ {
+ COMPlusThrowHR(COR_E_TYPELOAD, BFA_BAD_FIELD_TOKEN);
+ }
+
+ IfFailThrow(pInternalImport->GetFieldDefProps(fd, &dwFieldAttrs));
+
+ PCCOR_SIGNATURE pNativeType = NULL;
+ ULONG cbNativeType;
+ // We ignore marshaling data attached to statics and literals,
+ // since these do not contribute to instance data.
+ if (!IsFdStatic(dwFieldAttrs) && !IsFdLiteral(dwFieldAttrs))
+ {
+ PCCOR_SIGNATURE pCOMSignature;
+ ULONG cbCOMSignature;
+
+ if (IsFdHasFieldMarshal(dwFieldAttrs))
+ {
+ hr = pInternalImport->GetFieldMarshal(fd, &pNativeType, &cbNativeType);
+ if (FAILED(hr))
+ cbNativeType = 0;
+ }
+ else
+ cbNativeType = 0;
+
+ IfFailThrow(pInternalImport->GetSigOfFieldDef(fd,&cbCOMSignature, &pCOMSignature));
+
+ IfFailThrow(::validateTokenSig(fd,pCOMSignature,cbCOMSignature,dwFieldAttrs,pInternalImport));
+
+ // fill the appropriate entry in pInfoArrayOut
+ pfwalk->m_MD = fd;
+ pfwalk->m_nft = NULL;
+ pfwalk->m_offset = (UINT32) -1;
+ pfwalk->m_sequence = 0;
+
+#ifdef _DEBUG
+ LPCUTF8 szFieldName;
+ if (FAILED(pInternalImport->GetNameOfFieldDef(fd, &szFieldName)))
+ {
+ szFieldName = "Invalid FieldDef record";
+ }
+#endif
+
+ ParseNativeTypeFlags flags = ParseNativeTypeFlag_None;
+#ifdef FEATURE_COMINTEROP
+ if (isWinRT)
+ flags |= ParseNativeTypeFlag_IsWinRT;
+ else // WinRT types have nlType == nltAnsi but should be treated as Unicode
+#endif // FEATURE_COMINTEROP
+ if (nlType == nltAnsi)
+ flags |= ParseNativeTypeFlag_IsAnsi;
+
+ ParseNativeType(pModule,
+ pCOMSignature,
+ cbCOMSignature,
+ flags,
+ pfwalk,
+ pNativeType,
+ cbNativeType,
+ pInternalImport,
+ cl,
+ pTypeContext,
+ &fDisqualifyFromManagedSequential
+#ifdef _DEBUG
+ ,
+ szNamespace,
+ szName,
+ szFieldName
+#endif
+ );
+
+
+ //<TODO>@nice: This is obviously not the place to bury this logic.
+ // We're replacing NFT's with MARSHAL_TYPES_* in the near future
+ // so this isn't worth perfecting.</TODO>
+
+ BOOL resetBlittable = TRUE;
+
+ // if it's a simple copy...
+ if (pfwalk->m_nft == NFT_COPY1 ||
+ pfwalk->m_nft == NFT_COPY2 ||
+ pfwalk->m_nft == NFT_COPY4 ||
+ pfwalk->m_nft == NFT_COPY8)
+ {
+ resetBlittable = FALSE;
+ }
+
+ // Or if it's a nested value class that is itself blittable...
+ if (pfwalk->m_nft == NFT_NESTEDVALUECLASS)
+ {
+ FieldMarshaler *pFM = (FieldMarshaler*)&(pfwalk->m_FieldMarshaler);
+ _ASSERTE(pFM->IsNestedValueClassMarshaler());
+
+ if (((FieldMarshaler_NestedValueClass *) pFM)->IsBlittable())
+ resetBlittable = FALSE;
+ }
+
+ // ...Otherwise, this field prevents blitting
+ if (resetBlittable)
+ pEEClassLayoutInfoOut->SetIsBlittable(FALSE);
+
+ cFields++;
+ pfwalk++;
+ }
+ }
+
+ _ASSERTE(i == cMembers);
+
+ // NULL out the last entry
+ pfwalk->m_MD = mdFieldDefNil;
+
+
+ //
+ // fill in the layout information
+ //
+
+ // pfwalk points to the beginging of the array
+ pfwalk = pInfoArrayOut;
+
+ while (SUCCEEDED(hr = pInternalImport->GetClassLayoutNext(
+ &classlayout,
+ &fd,
+ &ulOffset)) &&
+ fd != mdFieldDefNil)
+ {
+ // watch for the last entry: must be mdFieldDefNil
+ while ((mdFieldDefNil != pfwalk->m_MD)&&(pfwalk->m_MD < fd))
+ pfwalk++;
+
+ // if we haven't found a matching token, it must be a static field with layout -- ignore it
+ if(pfwalk->m_MD != fd) continue;
+
+ if (!fExplicitOffsets)
+ {
+ // ulOffset is the sequence
+ pfwalk->m_sequence = ulOffset;
+ }
+ else
+ {
+ // ulOffset is the explicit offset
+ pfwalk->m_offset = ulOffset;
+ pfwalk->m_sequence = (ULONG) -1;
+
+ // Treat base class as an initial member.
+ if (!SafeAddUINT32(&(pfwalk->m_offset), cbAdjustedParentLayoutNativeSize))
+ COMPlusThrowOM();
+ }
+ }
+ IfFailThrow(hr);
+
+ // now sort the array
+ if (!fExplicitOffsets)
+ {
+ // sort sequential by ascending sequence
+ for (i = 0; i < cFields; i++)
+ {
+ LayoutRawFieldInfo**pSortWalk = pSortArrayEnd;
+ while (pSortWalk != pSortArray)
+ {
+ if (pInfoArrayOut[i].m_sequence >= (*(pSortWalk-1))->m_sequence)
+ break;
+
+ pSortWalk--;
+ }
+
+ // pSortWalk now points to the target location for new FieldInfo.
+ MoveMemory(pSortWalk + 1, pSortWalk, (pSortArrayEnd - pSortWalk) * sizeof(LayoutRawFieldInfo*));
+ *pSortWalk = &pInfoArrayOut[i];
+ pSortArrayEnd++;
+ }
+ }
+ else // no sorting for explicit layout
+ {
+ for (i = 0; i < cFields; i++)
+ {
+ if(pInfoArrayOut[i].m_MD != mdFieldDefNil)
+ {
+ if (pInfoArrayOut[i].m_offset == (UINT32)-1)
+ {
+ LPCUTF8 szFieldName;
+ if (FAILED(pInternalImport->GetNameOfFieldDef(pInfoArrayOut[i].m_MD, &szFieldName)))
+ {
+ szFieldName = "Invalid FieldDef record";
+ }
+ pModule->GetAssembly()->ThrowTypeLoadException(pInternalImport,
+ cl,
+ szFieldName,
+ IDS_CLASSLOAD_NSTRUCT_EXPLICIT_OFFSET);
+ }
+ else if ((INT)pInfoArrayOut[i].m_offset < 0)
+ {
+ LPCUTF8 szFieldName;
+ if (FAILED(pInternalImport->GetNameOfFieldDef(pInfoArrayOut[i].m_MD, &szFieldName)))
+ {
+ szFieldName = "Invalid FieldDef record";
+ }
+ pModule->GetAssembly()->ThrowTypeLoadException(pInternalImport,
+ cl,
+ szFieldName,
+ IDS_CLASSLOAD_NSTRUCT_NEGATIVE_OFFSET);
+ }
+ }
+
+ *pSortArrayEnd = &pInfoArrayOut[i];
+ pSortArrayEnd++;
+ }
+ }
+
+ //=====================================================================
+ // Phase 2: Compute the native size (in bytes) of each field.
+ // Store this in pInfoArrayOut[].cbNativeSize;
+ //=====================================================================
+
+ // Now compute the native size of each field
+ for (pfwalk = pInfoArrayOut; pfwalk->m_MD != mdFieldDefNil; pfwalk++)
+ {
+ UINT8 nft = pfwalk->m_nft;
+ pEEClassLayoutInfoOut->m_numCTMFields++;
+
+ // If the NFT's size never changes, it is stored in the database.
+ UINT32 cbNativeSize = NFTDataBase[nft].m_cbNativeSize;
+
+ if (cbNativeSize == 0)
+ {
+ // Size of 0 means NFT's size is variable, so we have to figure it
+ // out case by case.
+ cbNativeSize = ((FieldMarshaler*)&(pfwalk->m_FieldMarshaler))->NativeSize();
+ }
+ pfwalk->m_cbNativeSize = cbNativeSize;
+ }
+
+ if (pEEClassLayoutInfoOut->m_numCTMFields)
+ {
+ pEEClassLayoutInfoOut->m_pFieldMarshalers = (FieldMarshaler*)(pamTracker->Track(pAllocator->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(MAXFIELDMARSHALERSIZE) * S_SIZE_T(pEEClassLayoutInfoOut->m_numCTMFields))));
+
+ // Bring in the parent's fieldmarshalers
+ if (fHasNonTrivialParent)
+ {
+ CONSISTENCY_CHECK(fParentHasLayout);
+ PREFAST_ASSUME(pParentLayoutInfo != NULL); // See if (fParentHasLayout) branch above
+
+ UINT numChildCTMFields = pEEClassLayoutInfoOut->m_numCTMFields - pParentLayoutInfo->m_numCTMFields;
+ memcpyNoGCRefs( ((BYTE*)pEEClassLayoutInfoOut->m_pFieldMarshalers) + MAXFIELDMARSHALERSIZE*numChildCTMFields,
+ pParentLayoutInfo->m_pFieldMarshalers,
+ MAXFIELDMARSHALERSIZE * (pParentLayoutInfo->m_numCTMFields) );
+ }
+
+ }
+
+
+ //=====================================================================
+ // Phase 3: If FieldMarshaler requires autooffsetting, compute the offset
+ // of each field and the size of the total structure. We do the layout
+ // according to standard VC layout rules:
+ //
+ // Each field has an alignment requirement. The alignment-requirement
+ // of a scalar field is the smaller of its size and the declared packsize.
+ // The alighnment-requirement of a struct field is the smaller of the
+ // declared packsize and the largest of the alignment-requirement
+ // of its fields. The alignment requirement of an array is that
+ // of one of its elements.
+ //
+ // In addition, each struct gets padding at the end to ensure
+ // that an array of such structs contain no unused space between
+ // elements.
+ //=====================================================================
+ {
+ BYTE LargestAlignmentRequirement = 1;
+ UINT32 cbCurOffset = 0;
+
+ // Treat base class as an initial member.
+ if (!SafeAddUINT32(&cbCurOffset, cbAdjustedParentLayoutNativeSize))
+ COMPlusThrowOM();
+
+ if (fParentHasLayout)
+ {
+ BYTE alignmentRequirement;
+
+ alignmentRequirement = min(packingSize, pParentLayoutInfo->GetLargestAlignmentRequirementOfAllMembers());
+
+ LargestAlignmentRequirement = max(LargestAlignmentRequirement, alignmentRequirement);
+ }
+
+ // Start with the size inherited from the parent (if any).
+ unsigned calcTotalSize = cbAdjustedParentLayoutNativeSize;
+
+ LayoutRawFieldInfo **pSortWalk;
+ for (pSortWalk = pSortArray, i=cFields; i; i--, pSortWalk++)
+ {
+ pfwalk = *pSortWalk;
+
+ BYTE alignmentRequirement = static_cast<BYTE>(((FieldMarshaler*)&(pfwalk->m_FieldMarshaler))->AlignmentRequirement());
+ if (!(alignmentRequirement == 1 ||
+ alignmentRequirement == 2 ||
+ alignmentRequirement == 4 ||
+ alignmentRequirement == 8))
+ {
+ COMPlusThrowHR(COR_E_INVALIDPROGRAM, BFA_METADATA_CORRUPT);
+ }
+
+ alignmentRequirement = min(alignmentRequirement, packingSize);
+
+ LargestAlignmentRequirement = max(LargestAlignmentRequirement, alignmentRequirement);
+
+ // This assert means I forgot to special-case some NFT in the
+ // above switch.
+ _ASSERTE(alignmentRequirement <= 8);
+
+ // Check if this field is overlapped with other(s)
+ pfwalk->m_fIsOverlapped = FALSE;
+ if (fExplicitOffsets) {
+ LayoutRawFieldInfo *pfwalk1;
+ DWORD dwBegin = pfwalk->m_offset;
+ DWORD dwEnd = dwBegin+pfwalk->m_cbNativeSize;
+ for (pfwalk1 = pInfoArrayOut; pfwalk1 < pfwalk; pfwalk1++)
+ {
+ if((pfwalk1->m_offset >= dwEnd) || (pfwalk1->m_offset+pfwalk1->m_cbNativeSize <= dwBegin)) continue;
+ pfwalk->m_fIsOverlapped = TRUE;
+ pfwalk1->m_fIsOverlapped = TRUE;
+ }
+ }
+ else
+ {
+ // Insert enough padding to align the current data member.
+ while (cbCurOffset % alignmentRequirement)
+ {
+ if (!SafeAddUINT32(&cbCurOffset, 1))
+ COMPlusThrowOM();
+ }
+
+ // Insert current data member.
+ pfwalk->m_offset = cbCurOffset;
+
+ // if we overflow we will catch it below
+ cbCurOffset += pfwalk->m_cbNativeSize;
+ }
+
+ unsigned fieldEnd = pfwalk->m_offset + pfwalk->m_cbNativeSize;
+ if (fieldEnd < pfwalk->m_offset)
+ COMPlusThrowOM();
+
+ // size of the structure is the size of the last field.
+ if (fieldEnd > calcTotalSize)
+ calcTotalSize = fieldEnd;
+ }
+
+ ULONG clstotalsize = 0;
+ if (FAILED(pInternalImport->GetClassTotalSize(cl, &clstotalsize)))
+ {
+ clstotalsize = 0;
+ }
+
+ if (clstotalsize != 0)
+ {
+ if (!SafeAddULONG(&clstotalsize, (ULONG)cbAdjustedParentLayoutNativeSize))
+ COMPlusThrowOM();
+
+ // size must be large enough to accomodate layout. If not, we use the layout size instead.
+ if (clstotalsize < calcTotalSize)
+ {
+ clstotalsize = calcTotalSize;
+ }
+ calcTotalSize = clstotalsize; // use the size they told us
+ }
+ else
+ {
+ // The did not give us an explict size, so lets round up to a good size (for arrays)
+ while (calcTotalSize % LargestAlignmentRequirement != 0)
+ {
+ if (!SafeAddUINT32(&calcTotalSize, 1))
+ COMPlusThrowOM();
+ }
+ }
+
+ // We'll cap the total native size at a (somewhat) arbitrary limit to ensure
+ // that we don't expose some overflow bug later on.
+ if (calcTotalSize >= MAX_SIZE_FOR_INTEROP)
+ COMPlusThrowOM();
+
+ // This is a zero-sized struct - need to record the fact and bump it up to 1.
+ if (calcTotalSize == 0)
+ {
+ pEEClassLayoutInfoOut->SetIsZeroSized(TRUE);
+ calcTotalSize = 1;
+ }
+
+ pEEClassLayoutInfoOut->m_cbNativeSize = calcTotalSize;
+
+ // The packingSize acts as a ceiling on all individual alignment
+ // requirements so it follows that the largest alignment requirement
+ // is also capped.
+ _ASSERTE(LargestAlignmentRequirement <= packingSize);
+ pEEClassLayoutInfoOut->m_LargestAlignmentRequirementOfAllMembers = LargestAlignmentRequirement;
+ }
+
+
+
+ //=====================================================================
+ // Phase 4: Now we do the same thing again for managedsequential layout.
+ //=====================================================================
+ if (!fDisqualifyFromManagedSequential)
+ {
+ BYTE LargestAlignmentRequirement = 1;
+ UINT32 cbCurOffset = 0;
+
+ if (pParentMT && pParentMT->IsManagedSequential())
+ {
+ // Treat base class as an initial member.
+ if (!SafeAddUINT32(&cbCurOffset, pParentMT->GetNumInstanceFieldBytes()))
+ COMPlusThrowOM();
+
+ BYTE alignmentRequirement = 0;
+
+ alignmentRequirement = min(packingSize, pParentLayoutInfo->m_ManagedLargestAlignmentRequirementOfAllMembers);
+
+ LargestAlignmentRequirement = max(LargestAlignmentRequirement, alignmentRequirement);
+ }
+
+ // The current size of the structure as a whole, we start at 1, because we disallow 0 sized structures.
+ // NOTE: We do not need to do the same checking for zero-sized types as phase 3 because only ValueTypes
+ // can be ManagedSequential and ValueTypes can not be inherited from.
+ unsigned calcTotalSize = 1;
+
+ LayoutRawFieldInfo **pSortWalk;
+ for (pSortWalk = pSortArray, i=cFields; i; i--, pSortWalk++)
+ {
+ pfwalk = *pSortWalk;
+
+ BYTE alignmentRequirement = ((BYTE)(pfwalk->m_managedAlignmentReq));
+ if (!(alignmentRequirement == 1 ||
+ alignmentRequirement == 2 ||
+ alignmentRequirement == 4 ||
+ alignmentRequirement == 8))
+ {
+ COMPlusThrowHR(COR_E_INVALIDPROGRAM, BFA_METADATA_CORRUPT);
+ }
+
+ alignmentRequirement = min(alignmentRequirement, packingSize);
+
+ LargestAlignmentRequirement = max(LargestAlignmentRequirement, alignmentRequirement);
+
+ _ASSERTE(alignmentRequirement <= 8);
+
+ // Insert enough padding to align the current data member.
+ while (cbCurOffset % alignmentRequirement)
+ {
+ if (!SafeAddUINT32(&cbCurOffset, 1))
+ COMPlusThrowOM();
+ }
+
+ // Insert current data member.
+ pfwalk->m_managedOffset = cbCurOffset;
+
+ // if we overflow we will catch it below
+ cbCurOffset += pfwalk->m_managedSize;
+
+ unsigned fieldEnd = pfwalk->m_managedOffset + pfwalk->m_managedSize;
+ if (fieldEnd < pfwalk->m_managedOffset)
+ COMPlusThrowOM();
+
+ // size of the structure is the size of the last field.
+ if (fieldEnd > calcTotalSize)
+ calcTotalSize = fieldEnd;
+
+#ifdef _DEBUG
+ // @perf: If the type is blittable, the managed and native layouts have to be identical
+ // so they really shouldn't be calculated twice. Until this code has been well tested and
+ // stabilized, however, it is useful to compute both and assert that they are equal in the blittable
+ // case.
+ if (pEEClassLayoutInfoOut->IsBlittable())
+ {
+ _ASSERTE(pfwalk->m_managedOffset == pfwalk->m_offset);
+ _ASSERTE(pfwalk->m_managedSize == pfwalk->m_cbNativeSize);
+ }
+#endif
+ } //for
+
+ ULONG clstotalsize = 0;
+ if (FAILED(pInternalImport->GetClassTotalSize(cl, &clstotalsize)))
+ {
+ clstotalsize = 0;
+ }
+
+ if (clstotalsize != 0)
+ {
+ pEEClassLayoutInfoOut->SetHasExplicitSize(TRUE);
+
+ if (pParentMT && pParentMT->IsManagedSequential())
+ {
+ // Treat base class as an initial member.
+ UINT32 parentSize = pParentMT->GetNumInstanceFieldBytes();
+ if (!SafeAddULONG(&clstotalsize, parentSize))
+ COMPlusThrowOM();
+ }
+
+ // size must be large enough to accomodate layout. If not, we use the layout size instead.
+ if (clstotalsize < calcTotalSize)
+ {
+ clstotalsize = calcTotalSize;
+ }
+ calcTotalSize = clstotalsize; // use the size they told us
+ }
+ else
+ {
+ // The did not give us an explict size, so lets round up to a good size (for arrays)
+ while (calcTotalSize % LargestAlignmentRequirement != 0)
+ {
+ if (!SafeAddUINT32(&calcTotalSize, 1))
+ COMPlusThrowOM();
+ }
+ }
+
+ pEEClassLayoutInfoOut->m_cbManagedSize = calcTotalSize;
+
+ // The packingSize acts as a ceiling on all individual alignment
+ // requirements so it follows that the largest alignment requirement
+ // is also capped.
+ _ASSERTE(LargestAlignmentRequirement <= packingSize);
+ pEEClassLayoutInfoOut->m_ManagedLargestAlignmentRequirementOfAllMembers = LargestAlignmentRequirement;
+
+#ifdef _DEBUG
+ // @perf: If the type is blittable, the managed and native layouts have to be identical
+ // so they really shouldn't be calculated twice. Until this code has been well tested and
+ // stabilized, however, it is useful to compute both and assert that they are equal in the blittable
+ // case.
+ if (pEEClassLayoutInfoOut->IsBlittable())
+ {
+ _ASSERTE(pEEClassLayoutInfoOut->m_cbManagedSize == pEEClassLayoutInfoOut->m_cbNativeSize);
+ _ASSERTE(pEEClassLayoutInfoOut->m_ManagedLargestAlignmentRequirementOfAllMembers == pEEClassLayoutInfoOut->m_LargestAlignmentRequirementOfAllMembers);
+ }
+#endif
+ } //if
+
+ pEEClassLayoutInfoOut->SetIsManagedSequential(!fDisqualifyFromManagedSequential);
+
+#ifdef _DEBUG
+ {
+ BOOL illegalMarshaler = FALSE;
+
+ LOG((LF_INTEROP, LL_INFO100000, "\n\n"));
+ LOG((LF_INTEROP, LL_INFO100000, "%s.%s\n", szNamespace, szName));
+ LOG((LF_INTEROP, LL_INFO100000, "Packsize = %lu\n", (ULONG)packingSize));
+ LOG((LF_INTEROP, LL_INFO100000, "Max align req = %lu\n", (ULONG)(pEEClassLayoutInfoOut->m_LargestAlignmentRequirementOfAllMembers)));
+ LOG((LF_INTEROP, LL_INFO100000, "----------------------------\n"));
+ for (pfwalk = pInfoArrayOut; pfwalk->m_MD != mdFieldDefNil; pfwalk++)
+ {
+ LPCUTF8 fieldname;
+ if (FAILED(pInternalImport->GetNameOfFieldDef(pfwalk->m_MD, &fieldname)))
+ {
+ fieldname = "??";
+ }
+ LOG((LF_INTEROP, LL_INFO100000, "+%-5lu ", (ULONG)(pfwalk->m_offset)));
+ LOG((LF_INTEROP, LL_INFO100000, "%s", fieldname));
+ LOG((LF_INTEROP, LL_INFO100000, "\n"));
+
+ if (((FieldMarshaler*)&pfwalk->m_FieldMarshaler)->GetNStructFieldType() == NFT_ILLEGAL)
+ illegalMarshaler = TRUE;
+ }
+
+ // If we are dealing with a non trivial parent, determine if it has any illegal marshallers.
+ if (fHasNonTrivialParent)
+ {
+ FieldMarshaler *pParentFM = pParentMT->GetLayoutInfo()->GetFieldMarshalers();
+ for (i = 0; i < pParentMT->GetLayoutInfo()->m_numCTMFields; i++)
+ {
+ if (pParentFM->GetNStructFieldType() == NFT_ILLEGAL)
+ illegalMarshaler = TRUE;
+ ((BYTE*&)pParentFM) += MAXFIELDMARSHALERSIZE;
+ }
+ }
+
+ LOG((LF_INTEROP, LL_INFO100000, "+%-5lu EOS\n", (ULONG)(pEEClassLayoutInfoOut->m_cbNativeSize)));
+ LOG((LF_INTEROP, LL_INFO100000, "Allocated %d %s field marshallers for %s.%s\n", pEEClassLayoutInfoOut->m_numCTMFields, (illegalMarshaler ? "pointless" : "usable"), szNamespace, szName));
+ }
+#endif
+ return;
+}
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+
+#ifndef CROSSGEN_COMPILE
+
+//=======================================================================
+// For each reference-typed FieldMarshaler field, marshals the current CLR value
+// to a new native instance and stores it in the fixed portion of the FieldMarshaler.
+//
+// This function does not attempt to delete the native value that it overwrites.
+//
+// If there is a SafeHandle field, ppCleanupWorkListOnStack must be non-null, otherwise
+// InvalidOperationException is thrown.
+//=======================================================================
+VOID LayoutUpdateNative(LPVOID *ppProtectedManagedData, SIZE_T offsetbias, MethodTable *pMT, BYTE* pNativeData, OBJECTREF *ppCleanupWorkListOnStack)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ FieldMarshaler* pFM = pMT->GetLayoutInfo()->GetFieldMarshalers();
+ UINT numReferenceFields = pMT->GetLayoutInfo()->GetNumCTMFields();
+
+ OBJECTREF pCLRValue = NULL;
+ LPVOID scalar = NULL;
+
+ GCPROTECT_BEGIN(pCLRValue)
+ GCPROTECT_BEGININTERIOR(scalar)
+ {
+ g_IBCLogger.LogFieldMarshalersReadAccess(pMT);
+
+ while (numReferenceFields--)
+ {
+ pFM->Restore();
+
+ DWORD internalOffset = pFM->GetFieldDesc()->GetOffset();
+
+ if (pFM->IsScalarMarshaler())
+ {
+ scalar = (LPVOID)(internalOffset + offsetbias + (BYTE*)(*ppProtectedManagedData));
+ // Note this will throw for FieldMarshaler_Illegal
+ pFM->ScalarUpdateNative(scalar, pNativeData + pFM->GetExternalOffset() );
+
+ }
+ else if (pFM->IsNestedValueClassMarshaler())
+ {
+ pFM->NestedValueClassUpdateNative((const VOID **)ppProtectedManagedData, internalOffset + offsetbias, pNativeData + pFM->GetExternalOffset(),
+ ppCleanupWorkListOnStack);
+ }
+ else
+ {
+ pCLRValue = *(OBJECTREF*)(internalOffset + offsetbias + (BYTE*)(*ppProtectedManagedData));
+ pFM->UpdateNative(&pCLRValue, pNativeData + pFM->GetExternalOffset(), ppCleanupWorkListOnStack);
+ SetObjectReferenceUnchecked( (OBJECTREF*) (internalOffset + offsetbias + (BYTE*)(*ppProtectedManagedData)), pCLRValue);
+ }
+
+ // The cleanup work list is not used to clean up the native contents. It is used
+ // to handle cleanup of any additionnal resources the FieldMarshalers allocate.
+
+ ((BYTE*&)pFM) += MAXFIELDMARSHALERSIZE;
+ }
+ }
+ GCPROTECT_END();
+ GCPROTECT_END();
+}
+
+
+VOID FmtClassUpdateNative(OBJECTREF *ppProtectedManagedData, BYTE *pNativeData, OBJECTREF *ppCleanupWorkListOnStack)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(ppProtectedManagedData));
+ }
+ CONTRACTL_END;
+
+ MethodTable *pMT = (*ppProtectedManagedData)->GetMethodTable();
+ _ASSERTE(pMT->IsBlittable() || pMT->HasLayout());
+ UINT32 cbsize = pMT->GetNativeSize();
+
+ if (pMT->IsBlittable())
+ {
+ memcpyNoGCRefs(pNativeData, (*ppProtectedManagedData)->GetData(), cbsize);
+ }
+ else
+ {
+ // This allows us to do a partial LayoutDestroyNative in the case of
+ // a marshaling error on one of the fields.
+ FillMemory(pNativeData, cbsize, 0);
+ NativeLayoutDestroyer nld(pNativeData, pMT, cbsize);
+
+ LayoutUpdateNative( (VOID**)ppProtectedManagedData,
+ Object::GetOffsetOfFirstField(),
+ pMT,
+ pNativeData,
+ ppCleanupWorkListOnStack);
+
+ nld.SuppressRelease();
+ }
+
+}
+
+
+VOID FmtClassUpdateCLR(OBJECTREF *ppProtectedManagedData, BYTE *pNativeData)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ MethodTable *pMT = (*ppProtectedManagedData)->GetMethodTable();
+ _ASSERTE(pMT->IsBlittable() || pMT->HasLayout());
+ UINT32 cbsize = pMT->GetNativeSize();
+
+ if (pMT->IsBlittable())
+ {
+ memcpyNoGCRefs((*ppProtectedManagedData)->GetData(), pNativeData, cbsize);
+ }
+ else
+ {
+ LayoutUpdateCLR((VOID**)ppProtectedManagedData,
+ Object::GetOffsetOfFirstField(),
+ pMT,
+ (BYTE*)pNativeData
+ );
+ }
+}
+
+
+
+//=======================================================================
+// For each reference-typed FieldMarshaler field, marshals the current CLR value
+// to a new CLR instance and stores it in the GC portion of the FieldMarshaler.
+//
+// If fDeleteNativeCopies is true, it will also destroy the native version.
+//
+// NOTE: To avoid error-path leaks, this function attempts to destroy
+// all of the native fields even if one or more of the conversions fail.
+//=======================================================================
+VOID LayoutUpdateCLR(LPVOID *ppProtectedManagedData, SIZE_T offsetbias, MethodTable *pMT, BYTE *pNativeData)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ // Don't try to destroy/free native the structure on exception, we may not own it. If we do own it and
+ // are supposed to destroy/free it, we do it upstack (e.g. in a helper called from the marshaling stub).
+
+ FieldMarshaler* pFM = pMT->GetLayoutInfo()->GetFieldMarshalers();
+ UINT numReferenceFields = pMT->GetLayoutInfo()->GetNumCTMFields();
+
+ struct _gc
+ {
+ OBJECTREF pCLRValue;
+ OBJECTREF pOldCLRValue;
+ } gc;
+
+ gc.pCLRValue = NULL;
+ gc.pOldCLRValue = NULL;
+ LPVOID scalar = NULL;
+
+ GCPROTECT_BEGIN(gc)
+ GCPROTECT_BEGININTERIOR(scalar)
+ {
+ g_IBCLogger.LogFieldMarshalersReadAccess(pMT);
+
+ while (numReferenceFields--)
+ {
+ pFM->Restore();
+
+ DWORD internalOffset = pFM->GetFieldDesc()->GetOffset();
+
+ if (pFM->IsScalarMarshaler())
+ {
+ scalar = (LPVOID)(internalOffset + offsetbias + (BYTE*)(*ppProtectedManagedData));
+ // Note this will throw for FieldMarshaler_Illegal
+ pFM->ScalarUpdateCLR( pNativeData + pFM->GetExternalOffset(), scalar);
+ }
+ else if (pFM->IsNestedValueClassMarshaler())
+ {
+ pFM->NestedValueClassUpdateCLR(pNativeData + pFM->GetExternalOffset(), ppProtectedManagedData, internalOffset + offsetbias);
+ }
+ else
+ {
+ gc.pOldCLRValue = *(OBJECTREF*)(internalOffset + offsetbias + (BYTE*)(*ppProtectedManagedData));
+ pFM->UpdateCLR( pNativeData + pFM->GetExternalOffset(), &gc.pCLRValue, &gc.pOldCLRValue );
+ SetObjectReferenceUnchecked( (OBJECTREF*) (internalOffset + offsetbias + (BYTE*)(*ppProtectedManagedData)), gc.pCLRValue );
+ }
+
+ ((BYTE*&)pFM) += MAXFIELDMARSHALERSIZE;
+ }
+ }
+ GCPROTECT_END();
+ GCPROTECT_END();
+}
+
+
+VOID LayoutDestroyNative(LPVOID pNative, MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ FieldMarshaler *pFM = pMT->GetLayoutInfo()->GetFieldMarshalers();
+ UINT numReferenceFields = pMT->GetLayoutInfo()->GetNumCTMFields();
+ BYTE *pNativeData = (BYTE*)pNative;
+
+ while (numReferenceFields--)
+ {
+ pFM->DestroyNative( pNativeData + pFM->GetExternalOffset() );
+ ((BYTE*&)pFM) += MAXFIELDMARSHALERSIZE;
+ }
+}
+
+VOID FmtClassDestroyNative(LPVOID pNative, MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ if (pNative)
+ {
+ if (!(pMT->IsBlittable()))
+ {
+ _ASSERTE(pMT->HasLayout());
+ LayoutDestroyNative(pNative, pMT);
+ }
+ }
+}
+
+VOID FmtValueTypeUpdateNative(LPVOID pProtectedManagedData, MethodTable *pMT, BYTE *pNativeData, OBJECTREF *ppCleanupWorkListOnStack)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pMT->IsValueType() && (pMT->IsBlittable() || pMT->HasLayout()));
+ UINT32 cbsize = pMT->GetNativeSize();
+
+ if (pMT->IsBlittable())
+ {
+ memcpyNoGCRefs(pNativeData, pProtectedManagedData, cbsize);
+ }
+ else
+ {
+ // This allows us to do a partial LayoutDestroyNative in the case of
+ // a marshaling error on one of the fields.
+ FillMemory(pNativeData, cbsize, 0);
+
+ NativeLayoutDestroyer nld(pNativeData, pMT, cbsize);
+
+ LayoutUpdateNative( (VOID**)pProtectedManagedData,
+ 0,
+ pMT,
+ pNativeData,
+ ppCleanupWorkListOnStack);
+
+ nld.SuppressRelease();
+ }
+}
+
+VOID FmtValueTypeUpdateCLR(LPVOID pProtectedManagedData, MethodTable *pMT, BYTE *pNativeData)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pMT->IsValueType() && (pMT->IsBlittable() || pMT->HasLayout()));
+ UINT32 cbsize = pMT->GetNativeSize();
+
+ if (pMT->IsBlittable())
+ {
+ memcpyNoGCRefs(pProtectedManagedData, pNativeData, cbsize);
+ }
+ else
+ {
+ LayoutUpdateCLR((VOID**)pProtectedManagedData,
+ 0,
+ pMT,
+ (BYTE*)pNativeData);
+ }
+}
+
+
+#ifdef FEATURE_COMINTEROP
+
+//=======================================================================
+// BSTR <--> System.String
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_BSTR::UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ STRINGREF pString;
+ *((OBJECTREF*)&pString) = *pCLRValue;
+
+ if (pString == NULL)
+ MAYBE_UNALIGNED_WRITE(pNativeValue, _PTR, NULL);
+ else
+ {
+ BSTR pBSTR = SysAllocStringLen(pString->GetBuffer(), pString->GetStringLength());
+ if (!pBSTR)
+ COMPlusThrowOM();
+
+ MAYBE_UNALIGNED_WRITE(pNativeValue, _PTR, pBSTR);
+ }
+}
+
+
+//=======================================================================
+// BSTR <--> System.String
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_BSTR::UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ _ASSERTE(NULL != pNativeValue);
+ _ASSERTE(NULL != ppProtectedCLRValue);
+
+ STRINGREF pString;
+ BSTR pBSTR = (BSTR)MAYBE_UNALIGNED_READ(pNativeValue, _PTR);
+
+ if (!pBSTR)
+ pString = NULL;
+ else
+ {
+ struct Param : CallOutFilterParam {
+ int length;
+ BSTR pBSTR;
+ }; Param param;
+
+ param.OneShot = TRUE;
+ param.length = 0;
+ param.pBSTR = pBSTR;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ pParam->length = SysStringLen(pParam->pBSTR);
+ }
+ PAL_EXCEPT_FILTER(CallOutFilter)
+ {
+ _ASSERTE(!"CallOutFilter returned EXECUTE_HANDLER.");
+ }
+ PAL_ENDTRY;
+
+ pString = StringObject::NewString(pBSTR, param.length);
+ }
+
+ *((STRINGREF*)ppProtectedCLRValue) = pString;
+}
+
+
+//=======================================================================
+// BSTR <--> System.String
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_BSTR::DestroyNativeImpl(LPVOID pNativeValue) const
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ BSTR pBSTR = (BSTR)MAYBE_UNALIGNED_READ(pNativeValue, _PTR);
+ MAYBE_UNALIGNED_WRITE(pNativeValue, _PTR, NULL);
+
+ if (pBSTR)
+ {
+ _ASSERTE (GetModuleHandleA("oleaut32.dll") != NULL);
+ // BSTR has been created, which means oleaut32 should have been loaded.
+ // Delay load will not fail.
+ CONTRACT_VIOLATION(ThrowsViolation);
+ SysFreeString(pBSTR);
+ }
+}
+
+//===========================================================================================
+// Windows.Foundation.IReference'1<-- System.Nullable'1
+//
+VOID FieldMarshaler_Nullable::ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pNative));
+ PRECONDITION(CheckPointer(pCLR));
+ }
+ CONTRACTL_END;
+
+ IUnknown *pUnk = NULL;
+
+ // ConvertToNative<T>(ref Nullable<T> pManaged) where T : struct
+ MethodDescCallSite convertToNative(GetMethodDescForGenericInstantiation(MscorlibBinder::GetMethod(METHOD__NULLABLEMARSHALER__CONVERT_TO_NATIVE)));
+ ARG_SLOT args[] =
+ {
+ PtrToArgSlot(pCLR)
+ };
+
+ pUnk = (IUnknown*) convertToNative.Call_RetLPVOID(args);
+
+ MAYBE_UNALIGNED_WRITE(pNative, _PTR, pUnk);
+}
+
+//===========================================================================================
+// Windows.Foundation.IReference'1--> System.Nullable'1
+//
+VOID FieldMarshaler_Nullable::ScalarUpdateCLRImpl(const VOID *pNative, LPVOID pCLR) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNative));
+ PRECONDITION(CheckPointer(pCLR));
+ }
+ CONTRACTL_END;
+
+ IUnknown *pUnk = (IUnknown*)MAYBE_UNALIGNED_READ(pNative, _PTR);
+
+ MethodDescCallSite convertToManaged(GetMethodDescForGenericInstantiation(MscorlibBinder::GetMethod(METHOD__NULLABLEMARSHALER__CONVERT_TO_MANAGED_RET_VOID)));
+
+ ARG_SLOT args[] =
+ {
+ PtrToArgSlot(pUnk),
+ PtrToArgSlot(pCLR)
+ };
+
+ //ConvertToManaged<T>(Intptr pNative, ref Nullable<T> retObj) where T : struct;
+ convertToManaged.Call(args);
+}
+
+//===========================================================================================
+// Windows.Foundation.IReference'1<--> System.Nullable'1
+//
+VOID FieldMarshaler_Nullable::DestroyNativeImpl(const VOID* pNative) const
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pNative));
+ }
+ CONTRACTL_END;
+
+ IUnknown *pUnk = (IUnknown*)MAYBE_UNALIGNED_READ(pNative, _PTR);
+ MAYBE_UNALIGNED_WRITE(pNative, _PTR, NULL);
+
+ if (pUnk != NULL)
+ {
+ ULONG cbRef = SafeRelease(pUnk);
+ LogInteropRelease(pUnk, cbRef, "Field marshaler destroy native");
+ }
+}
+
+//=======================================================================
+// HSTRING <--> System.String
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_HSTRING::UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pCLRValue));
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ if (!WinRTSupported())
+ {
+ COMPlusThrow(kPlatformNotSupportedException, W("PlatformNotSupported_WinRT"));
+ }
+
+ STRINGREF stringref = (STRINGREF)(*pCLRValue);
+
+ if (stringref == NULL)
+ {
+ DefineFullyQualifiedNameForClassW();
+ StackSString ssFieldName(SString::Utf8, GetFieldDesc()->GetName());
+
+ SString errorString;
+ errorString.LoadResource(CCompRC::Error, IDS_EE_BADMARSHALFIELD_NULL_HSTRING);
+
+ COMPlusThrow(kMarshalDirectiveException,
+ IDS_EE_BADMARSHALFIELD_ERROR_MSG,
+ GetFullyQualifiedNameForClassW(GetFieldDesc()->GetEnclosingMethodTable()),
+ ssFieldName.GetUnicode(),
+ errorString.GetUnicode());
+ }
+
+ HSTRING hstring;
+ IfFailThrow(WindowsCreateString(stringref->GetBuffer(), stringref->GetStringLength(), &hstring));
+
+ MAYBE_UNALIGNED_WRITE(pNativeValue, _PTR, hstring);
+}
+
+//=======================================================================
+// HSTRING <--> System.String
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_HSTRING::UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNativeValue));
+ PRECONDITION(CheckPointer(ppProtectedCLRValue));
+ }
+ CONTRACTL_END;
+
+ if (!WinRTSupported())
+ {
+ COMPlusThrow(kPlatformNotSupportedException, W("PlatformNotSupported_WinRT"));
+ }
+
+ // NULL HSTRINGS are equivilent to empty strings
+ UINT32 cchString = 0;
+ LPCWSTR pwszString = W("");
+
+ HSTRING hstring = (HSTRING)MAYBE_UNALIGNED_READ(pNativeValue, _PTR);
+ if (hstring != NULL)
+ {
+ pwszString = WindowsGetStringRawBuffer(hstring, &cchString);
+ }
+
+ STRINGREF stringref = StringObject::NewString(pwszString, cchString);
+ *((STRINGREF *)ppProtectedCLRValue) = stringref;
+}
+
+//=======================================================================
+// HSTRING <--> System.String
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_HSTRING::DestroyNativeImpl(LPVOID pNativeValue) const
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ HSTRING hstring = (HSTRING)MAYBE_UNALIGNED_READ(pNativeValue, _PTR);
+ MAYBE_UNALIGNED_WRITE(pNativeValue, _PTR, NULL);
+
+ if (hstring != NULL)
+ {
+ // We need this for code:System.Runtime.InteropServices.Marshal.DestroyStructure (user can explicitly call it)
+ if (WinRTSupported())
+ {
+ // If WinRT is supported we've already loaded combase.dll, which means
+ // this delay load will succeed
+ CONTRACT_VIOLATION(ThrowsViolation);
+ WindowsDeleteString(hstring);
+ }
+ }
+}
+
+//=======================================================================================
+// Windows.UI.Xaml.Interop.TypeName <--> System.Type
+//
+VOID FieldMarshaler_SystemType::UpdateNativeImpl(OBJECTREF * pCLRValue, LPVOID pNativeValue, OBJECTREF * ppCleanupWorkListOnStack) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pCLRValue));
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ // ConvertToNative(System.Type managedType, TypeName *pTypeName)
+ MethodDescCallSite convertToNative(METHOD__SYSTEMTYPEMARSHALER__CONVERT_TO_NATIVE);
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(*pCLRValue),
+ PtrToArgSlot(pNativeValue)
+ };
+ convertToNative.Call(args);
+}
+
+//=======================================================================================
+// Windows.UI.Xaml.Interop.TypeName <--> System.Type
+//
+VOID FieldMarshaler_SystemType::UpdateCLRImpl(const VOID * pNativeValue, OBJECTREF * ppProtectedCLRValue, OBJECTREF * ppProtectedOldCLRValue) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNativeValue));
+ PRECONDITION(CheckPointer(ppProtectedCLRValue));
+ }
+ CONTRACTL_END;
+
+ // ConvertToManaged(TypeName *pTypeName, out System.Type)
+ MethodDescCallSite convertToManaged(METHOD__SYSTEMTYPEMARSHALER__CONVERT_TO_MANAGED);
+ ARG_SLOT args[] =
+ {
+ PtrToArgSlot(pNativeValue),
+ PtrToArgSlot(ppProtectedCLRValue)
+ };
+
+ convertToManaged.Call(args);
+}
+
+//=======================================================================================
+// Windows.UI.Xaml.Interop.TypeName <--> System.Type
+// Clear the HSTRING field
+//
+VOID FieldMarshaler_SystemType::DestroyNativeImpl(LPVOID pNativeValue) const
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pNativeValue));
+ PRECONDITION(WinRTSupported());
+ }
+ CONTRACTL_END;
+
+ //
+ // Call WindowsDeleteString instead of SystemTypeMarshaler.ClearNative
+ // because WindowsDeleteString does not throw and is much faster
+ //
+ size_t offset = offsetof(TypeNameNative, typeName);
+ HSTRING hstring = (HSTRING)MAYBE_UNALIGNED_READ((LPBYTE) pNativeValue + offset , _PTR);
+ MAYBE_UNALIGNED_WRITE((LPBYTE) pNativeValue + offset, _PTR, NULL);
+
+ if (hstring != NULL)
+ {
+ // Note: we've already loaded combase.dll, which means this delay load will succeed
+ CONTRACT_VIOLATION(ThrowsViolation);
+ WindowsDeleteString(hstring);
+ }
+}
+
+//=======================================================================================
+// Windows.Foundation.HResult <--> System.Exception
+// Note: The WinRT struct has exactly 1 field, Value (an HRESULT)
+//
+VOID FieldMarshaler_Exception::UpdateNativeImpl(OBJECTREF * pCLRValue, LPVOID pNativeValue, OBJECTREF * ppCleanupWorkListOnStack) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pCLRValue));
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ // int ConvertToNative(Exception ex)
+ MethodDescCallSite convertToNative(METHOD__HRESULTEXCEPTIONMARSHALER__CONVERT_TO_NATIVE);
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(*pCLRValue)
+ };
+ int iReturnedValue = convertToNative.Call_RetI4(args);
+ MAYBE_UNALIGNED_WRITE(pNativeValue, 32, iReturnedValue);
+}
+
+//=======================================================================================
+// Windows.Foundation.HResult <--> System.Exception
+// Note: The WinRT struct has exactly 1 field, Value (an HRESULT)
+//
+VOID FieldMarshaler_Exception::UpdateCLRImpl(const VOID * pNativeValue, OBJECTREF * ppProtectedCLRValue, OBJECTREF * ppProtectedOldCLRValue) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNativeValue));
+ PRECONDITION(CheckPointer(ppProtectedCLRValue));
+ }
+ CONTRACTL_END;
+
+ // Exception ConvertToManaged(int hr)
+ MethodDescCallSite convertToManaged(METHOD__HRESULTEXCEPTIONMARSHALER__CONVERT_TO_MANAGED);
+ ARG_SLOT args[] =
+ {
+ (ARG_SLOT)MAYBE_UNALIGNED_READ(pNativeValue, 32)
+ };
+ *ppProtectedCLRValue = convertToManaged.Call_RetOBJECTREF(args);
+}
+
+#endif // FEATURE_COMINTEROP
+
+
+//=======================================================================
+// Nested structure conversion
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_NestedLayoutClass::UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ UINT32 cbNativeSize = GetMethodTable()->GetNativeSize();
+
+ if (*pCLRValue == NULL)
+ {
+ ZeroMemoryInGCHeap(pNativeValue, cbNativeSize);
+ }
+ else
+ {
+ LayoutUpdateNative((LPVOID*)pCLRValue, Object::GetOffsetOfFirstField(),
+ GetMethodTable(), (BYTE*)pNativeValue, ppCleanupWorkListOnStack);
+ }
+
+}
+
+
+//=======================================================================
+// Nested structure conversion
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_NestedLayoutClass::UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pNativeValue));
+ PRECONDITION(CheckPointer(ppProtectedCLRValue));
+ }
+ CONTRACTL_END;
+
+ *ppProtectedCLRValue = GetMethodTable()->Allocate();
+
+ LayoutUpdateCLR( (LPVOID*)ppProtectedCLRValue,
+ Object::GetOffsetOfFirstField(),
+ GetMethodTable(),
+ (BYTE *)pNativeValue);
+
+}
+
+
+//=======================================================================
+// Nested structure conversion
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_NestedLayoutClass::DestroyNativeImpl(LPVOID pNativeValue) const
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ LayoutDestroyNative(pNativeValue, GetMethodTable());
+}
+
+#endif // CROSSGEN_COMPILE
+
+
+//=======================================================================
+// Nested structure conversion
+// See FieldMarshaler for details.
+//=======================================================================
+UINT32 FieldMarshaler_NestedLayoutClass::NativeSizeImpl() const
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return GetMethodTable()->GetLayoutInfo()->GetNativeSize();
+}
+
+//=======================================================================
+// Nested structure conversion
+// See FieldMarshaler for details.
+//=======================================================================
+UINT32 FieldMarshaler_NestedLayoutClass::AlignmentRequirementImpl() const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return GetMethodTable()->GetLayoutInfo()->GetLargestAlignmentRequirementOfAllMembers();
+}
+
+#if FEATURE_COMINTEROP
+MethodDesc* FieldMarshaler_Nullable::GetMethodDescForGenericInstantiation(MethodDesc* pMD) const
+{
+ MethodDesc *pMethodInstantiation;
+
+ pMethodInstantiation = MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pMD,
+ pMD->GetMethodTable(),
+ FALSE,
+ GetMethodTable()->GetInstantiation(),
+ FALSE,
+ TRUE);
+
+ _ASSERTE(pMethodInstantiation != NULL);
+
+ return pMethodInstantiation;
+}
+#endif //FEATURE_COMINTEROP
+
+#ifndef CROSSGEN_COMPILE
+
+//=======================================================================
+// Nested structure conversion
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_NestedValueClass::NestedValueClassUpdateNativeImpl(const VOID **ppProtectedCLR, SIZE_T startoffset, LPVOID pNative, OBJECTREF *ppCleanupWorkListOnStack) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(ppProtectedCLR));
+ PRECONDITION(CheckPointer(pNative));
+ }
+ CONTRACTL_END;
+
+ // would be better to detect this at class load time (that have a nested value
+ // class with no layout) but don't have a way to know
+ if (! GetMethodTable()->GetLayoutInfo())
+ COMPlusThrow(kArgumentException, IDS_NOLAYOUT_IN_EMBEDDED_VALUECLASS);
+
+ LayoutUpdateNative((LPVOID*)ppProtectedCLR, startoffset, GetMethodTable(), (BYTE*)pNative, ppCleanupWorkListOnStack);
+}
+
+
+//=======================================================================
+// Nested structure conversion
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_NestedValueClass::NestedValueClassUpdateCLRImpl(const VOID *pNative, LPVOID *ppProtectedCLR, SIZE_T startoffset) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNative));
+ PRECONDITION(CheckPointer(ppProtectedCLR));
+ }
+ CONTRACTL_END;
+
+ // would be better to detect this at class load time (that have a nested value
+ // class with no layout) but don't have a way to know
+ if (! GetMethodTable()->GetLayoutInfo())
+ COMPlusThrow(kArgumentException, IDS_NOLAYOUT_IN_EMBEDDED_VALUECLASS);
+
+ LayoutUpdateCLR( (LPVOID*)ppProtectedCLR,
+ startoffset,
+ GetMethodTable(),
+ (BYTE *)pNative);
+
+
+}
+
+
+//=======================================================================
+// Nested structure conversion
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_NestedValueClass::DestroyNativeImpl(LPVOID pNativeValue) const
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ LayoutDestroyNative(pNativeValue, GetMethodTable());
+}
+
+#endif // CROSSGEN_COMPILE
+
+
+//=======================================================================
+// Nested structure conversion
+// See FieldMarshaler for details.
+//=======================================================================
+UINT32 FieldMarshaler_NestedValueClass::NativeSizeImpl() const
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // this can't be marshalled as native type if no layout, so we allow the
+ // native size info to be created if available, but the size will only
+ // be valid for native, not unions. Marshaller will throw exception if
+ // try to marshall a value class with no layout
+ if (GetMethodTable()->HasLayout())
+ return GetMethodTable()->GetLayoutInfo()->GetNativeSize();
+
+ return 0;
+}
+
+
+//=======================================================================
+// Nested structure conversion
+// See FieldMarshaler for details.
+//=======================================================================
+UINT32 FieldMarshaler_NestedValueClass::AlignmentRequirementImpl() const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // this can't be marshalled as native type if no layout, so we allow the
+ // native size info to be created if available, but the alignment will only
+ // be valid for native, not unions. Marshaller will throw exception if
+ // try to marshall a value class with no layout
+ if (GetMethodTable()->HasLayout())
+ {
+ UINT32 uAlignmentReq = GetMethodTable()->GetLayoutInfo()->GetLargestAlignmentRequirementOfAllMembers();
+ return uAlignmentReq;
+ }
+ return 1;
+}
+
+
+#ifndef CROSSGEN_COMPILE
+
+//=======================================================================
+// CoTask Uni <--> System.String
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_StringUni::UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ STRINGREF pString;
+ *((OBJECTREF*)&pString) = *pCLRValue;
+
+ if (pString == NULL)
+ {
+ MAYBE_UNALIGNED_WRITE(pNativeValue, _PTR, NULL);
+ }
+ else
+ {
+ DWORD nc = pString->GetStringLength();
+ if (nc > MAX_SIZE_FOR_INTEROP)
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_STRING_TOOLONG);
+
+ LPWSTR wsz = (LPWSTR)CoTaskMemAlloc( (nc + 1) * sizeof(WCHAR) );
+ if (!wsz)
+ COMPlusThrowOM();
+
+ memcpyNoGCRefs(wsz, pString->GetBuffer(), nc*sizeof(WCHAR));
+ wsz[nc] = W('\0');
+ MAYBE_UNALIGNED_WRITE(pNativeValue, _PTR, wsz);
+ }
+}
+
+
+//=======================================================================
+// CoTask Uni <--> System.String
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_StringUni::UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNativeValue));
+ PRECONDITION(CheckPointer(ppProtectedCLRValue));
+ }
+ CONTRACTL_END;
+
+ STRINGREF pString;
+ LPCWSTR wsz = (LPCWSTR)MAYBE_UNALIGNED_READ(pNativeValue, _PTR);
+
+ if (!wsz)
+ pString = NULL;
+ else
+ {
+ SIZE_T length = wcslen(wsz);
+ if (length > MAX_SIZE_FOR_INTEROP)
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_STRING_TOOLONG);
+
+ pString = StringObject::NewString(wsz, (DWORD)length);
+ }
+
+ *((STRINGREF*)ppProtectedCLRValue) = pString;
+}
+
+
+//=======================================================================
+// CoTask Uni <--> System.String
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_StringUni::DestroyNativeImpl(LPVOID pNativeValue) const
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ LPWSTR wsz = (LPWSTR)MAYBE_UNALIGNED_READ(pNativeValue, _PTR);
+ MAYBE_UNALIGNED_WRITE(pNativeValue, _PTR, NULL);
+ if (wsz)
+ CoTaskMemFree(wsz);
+}
+
+
+
+//=======================================================================
+// CoTask Ansi <--> System.String
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_StringAnsi::UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ STRINGREF pString;
+ *((OBJECTREF*)&pString) = *pCLRValue;
+
+ if (pString == NULL)
+ {
+ MAYBE_UNALIGNED_WRITE(pNativeValue, _PTR, NULL);
+ }
+ else
+ {
+ DWORD nc = pString->GetStringLength();
+ if (nc > MAX_SIZE_FOR_INTEROP)
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_STRING_TOOLONG);
+
+ LPSTR sz = (LPSTR)CoTaskMemAlloc( (nc + 1) * 2 /* 2 for MBCS */ );
+ if (!sz)
+ COMPlusThrowOM();
+
+ int nbytes = InternalWideToAnsi(pString->GetBuffer(),
+ nc,
+ sz,
+ nc*2,
+ m_BestFitMap,
+ m_ThrowOnUnmappableChar);
+ sz[nbytes] = '\0';
+
+ MAYBE_UNALIGNED_WRITE(pNativeValue, _PTR, sz);
+ }
+}
+
+
+//=======================================================================
+// CoTask Ansi <--> System.String
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_StringAnsi::UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pNativeValue));
+ PRECONDITION(CheckPointer(ppProtectedCLRValue));
+ }
+ CONTRACTL_END;
+
+ STRINGREF pString = NULL;
+ LPCSTR sz = (LPCSTR)MAYBE_UNALIGNED_READ(pNativeValue, _PTR);
+ if (!sz)
+ pString = NULL;
+ else
+ {
+ MAKE_WIDEPTR_FROMANSI(wsztemp, sz);
+ pString = StringObject::NewString(wsztemp, __lwsztemp - 1);
+ }
+
+ *((STRINGREF*)ppProtectedCLRValue) = pString;
+}
+
+
+//=======================================================================
+// CoTask Ansi <--> System.String
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_StringAnsi::DestroyNativeImpl(LPVOID pNativeValue) const
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ LPSTR sz = (LPSTR)MAYBE_UNALIGNED_READ(pNativeValue, _PTR);
+ MAYBE_UNALIGNED_WRITE(pNativeValue, _PTR, NULL);
+ if (sz)
+ CoTaskMemFree(sz);
+}
+
+
+
+//=======================================================================
+// FixedString <--> System.String
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_FixedStringUni::UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ STRINGREF pString;
+ *((OBJECTREF*)&pString) = *pCLRValue;
+
+ if (pString == NULL)
+ {
+ MAYBE_UNALIGNED_WRITE(pNativeValue, 16, W('\0'));
+ }
+ else
+ {
+ DWORD nc = pString->GetStringLength();
+ if (nc >= m_numchar)
+ nc = m_numchar - 1;
+
+ memcpyNoGCRefs(pNativeValue, pString->GetBuffer(), nc*sizeof(WCHAR));
+ MAYBE_UNALIGNED_WRITE(&(((WCHAR*)pNativeValue)[nc]), 16, W('\0'));
+ }
+
+}
+
+
+//=======================================================================
+// FixedString <--> System.String
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_FixedStringUni::UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNativeValue));
+ PRECONDITION(CheckPointer(ppProtectedCLRValue));
+ }
+ CONTRACTL_END;
+
+ STRINGREF pString;
+ SIZE_T ncActual = wcsnlen((const WCHAR *)pNativeValue, m_numchar);
+
+ if (!FitsIn<int>(ncActual))
+ COMPlusThrowHR(COR_E_OVERFLOW);
+
+ pString = StringObject::NewString((const WCHAR *)pNativeValue, (int)ncActual);
+ *((STRINGREF*)ppProtectedCLRValue) = pString;
+}
+
+
+
+
+
+
+
+//=======================================================================
+// FixedString <--> System.String
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_FixedStringAnsi::UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ STRINGREF pString;
+ *((OBJECTREF*)&pString) = *pCLRValue;
+
+ if (pString == NULL)
+ *((CHAR*)pNativeValue) = W('\0');
+ else
+ {
+ DWORD nc = pString->GetStringLength();
+ if (nc >= m_numchar)
+ nc = m_numchar - 1;
+
+ int cbwritten = InternalWideToAnsi(pString->GetBuffer(),
+ nc,
+ (CHAR*)pNativeValue,
+ m_numchar,
+ m_BestFitMap,
+ m_ThrowOnUnmappableChar);
+ ((CHAR*)pNativeValue)[cbwritten] = '\0';
+ }
+
+}
+
+
+//=======================================================================
+// FixedString <--> System.String
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_FixedStringAnsi::UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pNativeValue));
+ PRECONDITION(CheckPointer(ppProtectedCLRValue));
+
+ // should not have slipped past the metadata
+ PRECONDITION(m_numchar != 0);
+ }
+ CONTRACTL_END;
+
+ STRINGREF pString;
+ if (m_numchar == 0)
+ {
+ // but if it does, better to throw an exception tardily rather than
+ // allow a memory corrupt.
+ COMPlusThrow(kMarshalDirectiveException);
+ }
+
+ UINT32 allocSize = m_numchar + 2;
+ if (allocSize < m_numchar)
+ ThrowOutOfMemory();
+
+ LPSTR tempbuf = (LPSTR)(_alloca((size_t)allocSize));
+ if (!tempbuf)
+ ThrowOutOfMemory();
+
+ memcpyNoGCRefs(tempbuf, pNativeValue, m_numchar);
+ tempbuf[m_numchar-1] = '\0';
+ tempbuf[m_numchar] = '\0';
+ tempbuf[m_numchar+1] = '\0';
+
+ allocSize = m_numchar * sizeof(WCHAR);
+ if (allocSize < m_numchar)
+ ThrowOutOfMemory();
+
+ LPWSTR wsztemp = (LPWSTR)_alloca( (size_t)allocSize );
+ int ncwritten = MultiByteToWideChar(CP_ACP,
+ MB_PRECOMPOSED,
+ tempbuf,
+ -1, // # of CHAR's in inbuffer
+ wsztemp,
+ m_numchar // size (in WCHAR) of outbuffer
+ );
+
+ if (!ncwritten)
+ {
+ // intentionally not throwing for MB2WC failure. We don't always know
+ // whether to expect a valid string in the buffer and we don't want
+ // to throw exceptions randomly.
+ ncwritten++;
+ }
+
+ pString = StringObject::NewString((const WCHAR *)wsztemp, ncwritten-1);
+ *((STRINGREF*)ppProtectedCLRValue) = pString;
+}
+
+
+//=======================================================================
+// CHAR[] <--> char[]
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_FixedCharArrayAnsi::UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ I2ARRAYREF pArray;
+ *((OBJECTREF*)&pArray) = *pCLRValue;
+
+ if (pArray == NULL)
+ FillMemory(pNativeValue, m_numElems * sizeof(CHAR), 0);
+ else
+ {
+ if (pArray->GetNumComponents() < m_numElems)
+ COMPlusThrow(kArgumentException, IDS_WRONGSIZEARRAY_IN_NSTRUCT);
+ else
+ {
+ InternalWideToAnsi((const WCHAR*) pArray->GetDataPtr(),
+ m_numElems,
+ (CHAR*)pNativeValue,
+ m_numElems * sizeof(CHAR),
+ m_BestFitMap,
+ m_ThrowOnUnmappableChar);
+ }
+ }
+}
+
+
+//=======================================================================
+// CHAR[] <--> char[]
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_FixedCharArrayAnsi::UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pNativeValue));
+ PRECONDITION(CheckPointer(ppProtectedCLRValue));
+ }
+ CONTRACTL_END;
+
+ *ppProtectedCLRValue = AllocatePrimitiveArray(ELEMENT_TYPE_CHAR, m_numElems);
+
+ MultiByteToWideChar(CP_ACP,
+ MB_PRECOMPOSED,
+ (const CHAR *)pNativeValue,
+ m_numElems * sizeof(CHAR), // size, in bytes, of in buffer
+ (WCHAR*) ((*((I2ARRAYREF*)ppProtectedCLRValue))->GetDirectPointerToNonObjectElements()),
+ m_numElems); // size, in WCHAR's of outbuffer
+}
+
+#endif // CROSSGEN_COMPILE
+
+
+//=======================================================================
+// Embedded array
+// See FieldMarshaler for details.
+//=======================================================================
+FieldMarshaler_FixedArray::FieldMarshaler_FixedArray(IMDInternalImport *pMDImport, mdTypeDef cl, UINT32 numElems, VARTYPE vt, MethodTable* pElementMT)
+: m_numElems(numElems)
+, m_vt(vt)
+, m_BestFitMap(FALSE)
+, m_ThrowOnUnmappableChar(FALSE)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pElementMT));
+ PRECONDITION(vt != VTHACK_ANSICHAR); // This must be handled by the FixedCharArrayAnsi marshaler.
+ }
+ CONTRACTL_END;
+
+ // Only attempt to read the best fit mapping attribute if required to minimize
+ // custom attribute accesses.
+ if (vt == VT_LPSTR || vt == VT_RECORD)
+ {
+ BOOL BestFitMap = FALSE;
+ BOOL ThrowOnUnmappableChar = FALSE;
+ ReadBestFitCustomAttribute(pMDImport, cl, &BestFitMap, &ThrowOnUnmappableChar);
+ m_BestFitMap = !!BestFitMap;
+ m_ThrowOnUnmappableChar = !!ThrowOnUnmappableChar;
+ }
+
+ m_arrayType.SetValue(ClassLoader::LoadArrayTypeThrowing(TypeHandle(pElementMT),
+ ELEMENT_TYPE_SZARRAY,
+ 0,
+ ClassLoader::LoadTypes,
+ pElementMT->GetLoadLevel()));
+}
+
+
+#ifndef CROSSGEN_COMPILE
+
+//=======================================================================
+// Embedded array
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_FixedArray::UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ if (*pCLRValue == NULL)
+ {
+ FillMemory(pNativeValue, NativeSize(), 0);
+ }
+ else
+ {
+ // Make sure the size of the array is the same as specified in the MarshalAs attribute (via the SizeConst field).
+ if ((*pCLRValue)->GetNumComponents() < m_numElems)
+ COMPlusThrow(kArgumentException, IDS_WRONGSIZEARRAY_IN_NSTRUCT);
+
+ // Marshal the contents from the managed array to the native array.
+ const OleVariant::Marshaler *pMarshaler = OleVariant::GetMarshalerForVarType(m_vt, TRUE);
+ if (pMarshaler == NULL || pMarshaler->ComToOleArray == NULL)
+ {
+ memcpyNoGCRefs(pNativeValue, (*(BASEARRAYREF*)pCLRValue)->GetDataPtr(), NativeSize());
+ }
+ else
+ {
+ MethodTable *pElementMT = m_arrayType.GetValue().AsArray()->GetArrayElementTypeHandle().GetMethodTable();
+
+ // We never operate on an uninitialized native layout here, we have zero'ed it if needed.
+ // Therefore fOleArrayIsValid is always TRUE.
+ pMarshaler->ComToOleArray((BASEARRAYREF*)pCLRValue, pNativeValue, pElementMT, m_BestFitMap, m_ThrowOnUnmappableChar, TRUE);
+ }
+ }
+}
+
+
+//=======================================================================
+// Embedded array
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_FixedArray::UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pNativeValue));
+ PRECONDITION(CheckPointer(ppProtectedCLRValue));
+ }
+ CONTRACTL_END;
+
+ // Allocate the value class array.
+ *ppProtectedCLRValue = AllocateArrayEx(m_arrayType.GetValue(), (INT32*)&m_numElems, 1);
+
+ // Marshal the contents from the native array to the managed array.
+ const OleVariant::Marshaler *pMarshaler = OleVariant::GetMarshalerForVarType(m_vt, TRUE);
+ if (pMarshaler == NULL || pMarshaler->OleToComArray == NULL)
+ {
+ memcpyNoGCRefs((*(BASEARRAYREF*)ppProtectedCLRValue)->GetDataPtr(), pNativeValue, NativeSize());
+ }
+ else
+ {
+ MethodTable *pElementMT = m_arrayType.GetValue().AsArray()->GetArrayElementTypeHandle().GetMethodTable();
+ pMarshaler->OleToComArray((VOID *)pNativeValue, (BASEARRAYREF*)ppProtectedCLRValue, pElementMT);
+ }
+}
+
+//=======================================================================
+// Embedded array
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_FixedArray::DestroyNativeImpl(LPVOID pNativeValue) const
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ const OleVariant::Marshaler *pMarshaler = OleVariant::GetMarshalerForVarType(m_vt, FALSE);
+
+ if (pMarshaler != NULL && pMarshaler->ClearOleArray != NULL)
+ {
+ MethodTable *pElementMT = m_arrayType.GetValue().AsArray()->GetArrayElementTypeHandle().GetMethodTable();
+ pMarshaler->ClearOleArray(pNativeValue, m_numElems, pElementMT);
+ }
+}
+
+#endif // CROSSGEN_COMPILE
+
+
+//=======================================================================
+// Embedded array
+// See FieldMarshaler for details.
+//=======================================================================
+UINT32 FieldMarshaler_FixedArray::AlignmentRequirementImpl() const
+{
+ WRAPPER_NO_CONTRACT;
+
+ UINT32 alignment = 0;
+ TypeHandle elementType = m_arrayType.GetValue().AsArray()->GetArrayElementTypeHandle();
+
+ switch (m_vt)
+ {
+ case VT_DECIMAL:
+ alignment = 8;
+ break;
+
+ case VT_VARIANT:
+ alignment = 8;
+ break;
+
+ case VT_RECORD:
+ alignment = elementType.GetMethodTable()->GetLayoutInfo()->GetLargestAlignmentRequirementOfAllMembers();
+ break;
+
+ default:
+ alignment = OleVariant::GetElementSizeForVarType(m_vt, elementType.GetMethodTable());
+ break;
+ }
+
+ return alignment;
+}
+
+#ifndef CROSSGEN_COMPILE
+
+#ifdef FEATURE_CLASSIC_COMINTEROP
+//=======================================================================
+// SafeArray
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_SafeArray::UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ BASEARRAYREF pArray;
+ *((OBJECTREF*)&pArray) = *pCLRValue;
+ if ((pArray == NULL) || (OBJECTREFToObject(pArray) == NULL))
+ {
+ FillMemory(pNativeValue, sizeof(LPSAFEARRAY*), 0);
+ return;
+ }
+
+ LPSAFEARRAY* pSafeArray;
+ pSafeArray = (LPSAFEARRAY*)pNativeValue;
+
+ VARTYPE vt = m_vt;
+ MethodTable* pMT = m_pMT.GetValue();
+
+ GCPROTECT_BEGIN(pArray)
+ {
+ if (vt == VT_EMPTY)
+ vt = OleVariant::GetElementVarTypeForArrayRef(pArray);
+
+ if (!pMT)
+ pMT = OleVariant::GetArrayElementTypeWrapperAware(&pArray).GetMethodTable();
+
+ // OleVariant calls throw on error.
+ *pSafeArray = OleVariant::CreateSafeArrayForArrayRef(&pArray, vt, pMT);
+ OleVariant::MarshalSafeArrayForArrayRef(&pArray, *pSafeArray, vt, pMT);
+ }
+ GCPROTECT_END();
+}
+
+
+//=======================================================================
+// SafeArray
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_SafeArray::UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pNativeValue));
+ PRECONDITION(CheckPointer(ppProtectedCLRValue));
+ }
+ CONTRACTL_END;
+
+ LPSAFEARRAY* pSafeArray;
+ pSafeArray = (LPSAFEARRAY*)pNativeValue;
+
+ if ((pSafeArray == NULL) || (*pSafeArray == NULL))
+ {
+ *ppProtectedCLRValue = NULL;
+ return;
+ }
+
+ VARTYPE vt = m_vt;
+ MethodTable* pMT = m_pMT.GetValue();
+
+ // If we have an empty vartype, get it from the safearray vartype
+ if (vt == VT_EMPTY)
+ {
+ if (FAILED(ClrSafeArrayGetVartype(*pSafeArray, &vt)))
+ COMPlusThrow(kArgumentException, IDS_EE_INVALID_SAFEARRAY);
+ }
+
+ // Get the method table if we need to.
+ if ((vt == VT_RECORD) && (!pMT))
+ pMT = OleVariant::GetElementTypeForRecordSafeArray(*pSafeArray).GetMethodTable();
+
+ // If we have a single dimension safearray, it will be converted into a SZArray.
+ // SZArray must have a lower bound of zero.
+ LONG LowerBound = -1;
+ UINT Dimensions = SafeArrayGetDim( (SAFEARRAY*)*pSafeArray );
+
+ if (Dimensions == 1)
+ {
+ HRESULT hr = SafeArrayGetLBound((SAFEARRAY*)*pSafeArray, 1, &LowerBound);
+ if ( FAILED(hr) || LowerBound != 0)
+ COMPlusThrow(kSafeArrayRankMismatchException, IDS_EE_SAFEARRAYSZARRAYMISMATCH);
+ }
+
+ // OleVariant calls throw on error.
+ *ppProtectedCLRValue = OleVariant::CreateArrayRefForSafeArray(*pSafeArray, vt, pMT);
+ OleVariant::MarshalArrayRefForSafeArray(*pSafeArray, (BASEARRAYREF*)ppProtectedCLRValue, vt, pMT);
+}
+
+
+//=======================================================================
+// SafeArray
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_SafeArray::DestroyNativeImpl(LPVOID pNativeValue) const
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ GCX_PREEMP();
+
+ LPSAFEARRAY psa = (LPSAFEARRAY)MAYBE_UNALIGNED_READ(pNativeValue, _PTR);
+ MAYBE_UNALIGNED_WRITE(pNativeValue, _PTR, NULL);
+
+ if (psa)
+ {
+ _ASSERTE (GetModuleHandleA("oleaut32.dll") != NULL);
+ // SafeArray has been created, which means oleaut32 should have been loaded.
+ // Delay load will not fail.
+ CONTRACT_VIOLATION(ThrowsViolation);
+ hr = SafeArrayDestroy(psa);
+ _ASSERTE(!FAILED(hr));
+ }
+}
+#endif //FEATURE_CLASSIC_COMINTEROP
+
+
+//=======================================================================
+// function ptr <--> Delegate
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_Delegate::UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ LPVOID fnPtr = COMDelegate::ConvertToCallback(*pCLRValue);
+ MAYBE_UNALIGNED_WRITE(pNativeValue, _PTR, fnPtr);
+}
+
+
+//=======================================================================
+// function ptr <--> Delegate
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_Delegate::UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNativeValue));
+ PRECONDITION(CheckPointer(ppProtectedCLRValue));
+ }
+ CONTRACTL_END;
+
+ *ppProtectedCLRValue = COMDelegate::ConvertToDelegate((LPVOID)MAYBE_UNALIGNED_READ(pNativeValue, _PTR), GetMethodTable());
+}
+
+
+//=======================================================================
+// SafeHandle <--> Handle
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_SafeHandle::UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNativeValue));
+ PRECONDITION(CheckPointer(ppCleanupWorkListOnStack, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ SAFEHANDLE *pSafeHandleObj = ((SAFEHANDLE *)pCLRValue);
+
+ // A cleanup list MUST be specified in order for us to be able to marshal
+ // the SafeHandle.
+ if (ppCleanupWorkListOnStack == NULL)
+ COMPlusThrow(kInvalidOperationException, IDS_EE_SH_FIELD_INVALID_OPERATION);
+
+ if (*pSafeHandleObj == NULL)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_SafeHandle"));
+
+ // Call StubHelpers.AddToCleanupList to AddRef and schedule Release on this SafeHandle
+ // This is realiable, i.e. the cleanup will happen if and only if the SH was actually AddRef'ed.
+ MethodDescCallSite AddToCleanupList(METHOD__STUBHELPERS__ADD_TO_CLEANUP_LIST);
+
+ ARG_SLOT args[] =
+ {
+ (ARG_SLOT)ppCleanupWorkListOnStack,
+ ObjToArgSlot(*pSafeHandleObj)
+ };
+
+ LPVOID handle = AddToCleanupList.Call_RetLPVOID(args);
+
+ MAYBE_UNALIGNED_WRITE(pNativeValue, _PTR, handle);
+}
+
+
+//=======================================================================
+// SafeHandle <--> Handle
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_SafeHandle::UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNativeValue));
+ PRECONDITION(CheckPointer(ppProtectedCLRValue));
+ PRECONDITION(CheckPointer(ppProtectedOldCLRValue));
+ }
+ CONTRACTL_END;
+
+ // Since we dissallow marshaling SafeHandle fields from unmanaged to managed, check
+ // to see if this handle was obtained from a SafeHandle and if it was that the
+ // handle value hasn't changed.
+ SAFEHANDLE *pSafeHandleObj = (SAFEHANDLE *)ppProtectedOldCLRValue;
+ if (!*pSafeHandleObj || (*pSafeHandleObj)->GetHandle() != (LPVOID)MAYBE_UNALIGNED_READ(pNativeValue, _PTR))
+ COMPlusThrow(kNotSupportedException, IDS_EE_CANNOT_CREATE_SAFEHANDLE_FIELD);
+
+ // Now that we know the handle hasn't changed we just copy set the new SafeHandle
+ // to the old one.
+ *ppProtectedCLRValue = *ppProtectedOldCLRValue;
+}
+
+
+//=======================================================================
+// CriticalHandle <--> Handle
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_CriticalHandle::UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ LPVOID handle = ((CRITICALHANDLE)*pCLRValue)->GetHandle();
+ MAYBE_UNALIGNED_WRITE(pNativeValue, _PTR, handle);
+}
+
+
+//=======================================================================
+// CriticalHandle <--> Handle
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_CriticalHandle::UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNativeValue));
+ PRECONDITION(CheckPointer(ppProtectedCLRValue));
+ PRECONDITION(CheckPointer(ppProtectedOldCLRValue));
+ }
+ CONTRACTL_END;
+
+ // Since we dissallow marshaling CriticalHandle fields from unmanaged to managed, check
+ // to see if this handle was obtained from a CriticalHandle and if it was that the
+ // handle value hasn't changed.
+ CRITICALHANDLE *pCriticalHandleObj = (CRITICALHANDLE *)ppProtectedOldCLRValue;
+ if (!*pCriticalHandleObj || (*pCriticalHandleObj)->GetHandle() != (LPVOID)MAYBE_UNALIGNED_READ(pNativeValue, _PTR))
+ COMPlusThrow(kNotSupportedException, IDS_EE_CANNOT_CREATE_CRITICALHANDLE_FIELD);
+
+ // Now that we know the handle hasn't changed we just copy set the new CriticalHandle
+ // to the old one.
+ *ppProtectedCLRValue = *ppProtectedOldCLRValue;
+}
+
+#ifdef FEATURE_COMINTEROP
+
+//=======================================================================
+// COM IP <--> interface
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_Interface::UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ IUnknown *pUnk = NULL;
+
+ if (!m_pItfMT.IsNull())
+ {
+ pUnk = GetComIPFromObjectRef(pCLRValue, GetInterfaceMethodTable());
+ }
+ else if (!(m_dwFlags & ItfMarshalInfo::ITF_MARSHAL_USE_BASIC_ITF))
+ {
+ pUnk = GetComIPFromObjectRef(pCLRValue, GetMethodTable());
+ }
+ else
+ {
+ ComIpType ReqIpType = !!(m_dwFlags & ItfMarshalInfo::ITF_MARSHAL_DISP_ITF) ? ComIpType_Dispatch : ComIpType_Unknown;
+ pUnk = GetComIPFromObjectRef(pCLRValue, ReqIpType, NULL);
+ }
+
+ MAYBE_UNALIGNED_WRITE(pNativeValue, _PTR, pUnk);
+}
+
+
+//=======================================================================
+// COM IP <--> interface
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_Interface::UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNativeValue));
+ PRECONDITION(CheckPointer(ppProtectedCLRValue));
+ PRECONDITION(IsProtectedByGCFrame(ppProtectedCLRValue));
+ }
+ CONTRACTL_END;
+
+ IUnknown *pUnk = (IUnknown*)MAYBE_UNALIGNED_READ(pNativeValue, _PTR);
+
+ MethodTable *pItfMT = GetInterfaceMethodTable();
+ if (pItfMT != NULL && !pItfMT->IsInterface())
+ pItfMT = NULL;
+
+ GetObjectRefFromComIP(
+ ppProtectedCLRValue, // Created object
+ pUnk, // Interface pointer
+ GetMethodTable(), // Class MT
+ pItfMT, // Interface MT
+ (m_dwFlags & ItfMarshalInfo::ITF_MARSHAL_CLASS_IS_HINT) // Flags
+ );
+}
+
+
+//=======================================================================
+// COM IP <--> interface
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_Interface::DestroyNativeImpl(LPVOID pNativeValue) const
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ IUnknown *pUnk = (IUnknown*)MAYBE_UNALIGNED_READ(pNativeValue, _PTR);
+ MAYBE_UNALIGNED_WRITE(pNativeValue, _PTR, NULL);
+
+ if (pUnk != NULL)
+ {
+ ULONG cbRef = SafeRelease(pUnk);
+ LogInteropRelease(pUnk, cbRef, "Field marshaler destroy native");
+ }
+}
+
+#endif // FEATURE_COMINTEROP
+
+
+//=======================================================================
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_Date::ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pCLR));
+ PRECONDITION(CheckPointer(pNative));
+ }
+ CONTRACTL_END;
+
+ // <TODO> Handle unaligned native fields </TODO>
+ *((DATE*)pNative) = COMDateTime::TicksToDoubleDate(*((INT64*)pCLR));
+}
+
+
+//=======================================================================
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_Date::ScalarUpdateCLRImpl(const VOID *pNative, LPVOID pCLR) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNative));
+ PRECONDITION(CheckPointer(pCLR));
+ }
+ CONTRACTL_END;
+
+ // <TODO> Handle unaligned native fields </TODO>
+ *((INT64*)pCLR) = COMDateTime::DoubleDateToTicks(*((DATE*)pNative));
+}
+
+
+#ifdef FEATURE_COMINTEROP
+
+//=======================================================================
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_Currency::ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCLR));
+ PRECONDITION(CheckPointer(pNative));
+ }
+ CONTRACTL_END;
+
+ // no need to switch to preemptive mode because it's very primitive operaion, doesn't take
+ // long and is guaranteed not to call 3rd party code.
+ // But if we do need to switch to preemptive mode, we can't pass the managed pointer to native code directly
+ HRESULT hr = VarCyFromDec( (DECIMAL *)pCLR, (CURRENCY*)pNative);
+ if (FAILED(hr))
+ COMPlusThrowHR(hr);
+
+}
+
+
+//=======================================================================
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_Currency::ScalarUpdateCLRImpl(const VOID *pNative, LPVOID pCLR) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pNative));
+ PRECONDITION(CheckPointer(pCLR));
+ }
+ CONTRACTL_END;
+
+ // no need to switch to preemptive mode because it's very primitive operaion, doesn't take
+ // long and is guaranteed not to call 3rd party code.
+ // But if we do need to switch to preemptive mode, we can't pass the managed pointer to native code directly
+ HRESULT hr = VarDecFromCy( *(CURRENCY*)pNative, (DECIMAL *)pCLR );
+ if (FAILED(hr))
+ COMPlusThrowHR(hr);
+
+ if (FAILED(DecimalCanonicalize((DECIMAL*)pCLR)))
+ COMPlusThrow(kOverflowException, W("Overflow_Currency"));
+}
+
+VOID FieldMarshaler_DateTimeOffset::ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pCLR));
+ PRECONDITION(CheckPointer(pNative));
+ }
+ CONTRACTL_END;
+
+ MethodDescCallSite convertToNative(METHOD__DATETIMEOFFSETMARSHALER__CONVERT_TO_NATIVE);
+ ARG_SLOT args[] =
+ {
+ PtrToArgSlot(pCLR),
+ PtrToArgSlot(pNative)
+ };
+ convertToNative.Call(args);
+}
+
+VOID FieldMarshaler_DateTimeOffset::ScalarUpdateCLRImpl(const VOID *pNative, LPVOID pCLR) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNative));
+ PRECONDITION(CheckPointer(pCLR));
+ }
+ CONTRACTL_END;
+
+ MethodDescCallSite convertToManaged(METHOD__DATETIMEOFFSETMARSHALER__CONVERT_TO_MANAGED);
+ ARG_SLOT args[] =
+ {
+ PtrToArgSlot(pCLR),
+ PtrToArgSlot(pNative)
+ };
+ convertToManaged.Call(args);
+}
+
+#endif // FEATURE_COMINTEROP
+
+
+//=======================================================================
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_Illegal::ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pCLR));
+ PRECONDITION(CheckPointer(pNative));
+ }
+ CONTRACTL_END;
+
+ DefineFullyQualifiedNameForClassW();
+
+ StackSString ssFieldName(SString::Utf8, GetFieldDesc()->GetName());
+
+ StackSString errorString(W("Unknown error."));
+ errorString.LoadResource(CCompRC::Error, m_resIDWhy);
+
+ COMPlusThrow(kTypeLoadException, IDS_EE_BADMARSHALFIELD_ERROR_MSG,
+ GetFullyQualifiedNameForClassW(GetFieldDesc()->GetEnclosingMethodTable()),
+ ssFieldName.GetUnicode(), errorString.GetUnicode());
+}
+
+
+//=======================================================================
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_Illegal::ScalarUpdateCLRImpl(const VOID *pNative, LPVOID pCLR) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNative));
+ PRECONDITION(CheckPointer(pCLR));
+ }
+ CONTRACTL_END;
+
+ DefineFullyQualifiedNameForClassW();
+
+ StackSString ssFieldName(SString::Utf8, GetFieldDesc()->GetName());
+
+ StackSString errorString(W("Unknown error."));
+ errorString.LoadResource(CCompRC::Error,m_resIDWhy);
+
+ COMPlusThrow(kTypeLoadException, IDS_EE_BADMARSHALFIELD_ERROR_MSG,
+ GetFullyQualifiedNameForClassW(GetFieldDesc()->GetEnclosingMethodTable()),
+ ssFieldName.GetUnicode(), errorString.GetUnicode());
+}
+
+#ifdef FEATURE_COMINTEROP
+
+
+//=======================================================================
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_Variant::UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ OleVariant::MarshalOleVariantForObject(pCLRValue, (VARIANT*)pNativeValue);
+
+}
+
+
+//=======================================================================
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_Variant::UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pNativeValue));
+ PRECONDITION(CheckPointer(ppProtectedCLRValue));
+ }
+ CONTRACTL_END;
+
+ OleVariant::MarshalObjectForOleVariant((VARIANT*)pNativeValue, ppProtectedCLRValue);
+}
+
+
+//=======================================================================
+// See FieldMarshaler for details.
+//=======================================================================
+VOID FieldMarshaler_Variant::DestroyNativeImpl(LPVOID pNativeValue) const
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pNativeValue));
+ }
+ CONTRACTL_END;
+
+ SafeVariantClear( (VARIANT*)pNativeValue );
+}
+
+#endif // FEATURE_COMINTEROP
+
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#endif
+VOID NStructFieldTypeToString(FieldMarshaler* pFM, SString& strNStructFieldType)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pFM));
+ }
+ CONTRACTL_END;
+
+ NStructFieldType cls = pFM->GetNStructFieldType();
+ LPCWSTR strRetVal;
+ CorElementType elemType = pFM->GetFieldDesc()->GetFieldType();
+
+ // Some NStruct Field Types have extra information and require special handling.
+ if (cls == NFT_FIXEDCHARARRAYANSI)
+ {
+ strNStructFieldType.Printf(W("fixed array of ANSI char (size = %i bytes)"), pFM->NativeSize());
+ return;
+ }
+ else if (cls == NFT_FIXEDARRAY)
+ {
+ VARTYPE vtElement = ((FieldMarshaler_FixedArray*)pFM)->GetElementVT();
+ TypeHandle thElement = ((FieldMarshaler_FixedArray*)pFM)->GetElementTypeHandle();
+ BOOL fElementTypeUserDefined = FALSE;
+
+ // Determine if the array type is a user defined type.
+ if (vtElement == VT_RECORD)
+ {
+ fElementTypeUserDefined = TRUE;
+ }
+ else if (vtElement == VT_UNKNOWN || vtElement == VT_DISPATCH)
+ {
+ fElementTypeUserDefined = !thElement.IsObjectType();
+ }
+
+ // Retrieve the string representation for the VARTYPE.
+ StackSString strVarType;
+ MarshalInfo::VarTypeToString(vtElement, strVarType);
+
+ MethodTable *pMT = ((FieldMarshaler_FixedArray*)pFM)->GetElementTypeHandle().GetMethodTable();
+ DefineFullyQualifiedNameForClassW();
+ WCHAR* szClassName = (WCHAR*)GetFullyQualifiedNameForClassW(pMT);
+
+ if (fElementTypeUserDefined)
+ {
+ strNStructFieldType.Printf(W("fixed array of %s exposed as %s elements (array size = %i bytes)"),
+ szClassName,
+ strVarType.GetUnicode(), pFM->NativeSize());
+ }
+ else
+ {
+ strNStructFieldType.Printf(W("fixed array of %s (array size = %i bytes)"),
+ szClassName, pFM->NativeSize());
+ }
+
+ return;
+ }
+#ifdef FEATURE_COMINTEROP
+ else if (cls == NFT_INTERFACE)
+ {
+ MethodTable *pItfMT = NULL;
+ DWORD dwFlags = 0;
+
+ ((FieldMarshaler_Interface*)pFM)->GetInterfaceInfo(&pItfMT, &dwFlags);
+
+ if (dwFlags & ItfMarshalInfo::ITF_MARSHAL_DISP_ITF)
+ {
+ strNStructFieldType.Set(W("IDispatch "));
+ }
+ else
+ {
+ strNStructFieldType.Set(W("IUnknown "));
+ }
+
+ if (dwFlags & ItfMarshalInfo::ITF_MARSHAL_USE_BASIC_ITF)
+ {
+ strNStructFieldType.Append(W("(basic) "));
+ }
+
+
+ if (pItfMT)
+ {
+ DefineFullyQualifiedNameForClassW();
+ GetFullyQualifiedNameForClassW(pItfMT);
+
+ strNStructFieldType.Append(GetFullyQualifiedNameForClassW(pItfMT));
+ }
+
+ return;
+ }
+#ifdef FEATURE_CLASSIC_COMINTEROP
+ else if (cls == NFT_SAFEARRAY)
+ {
+ VARTYPE vtElement = ((FieldMarshaler_SafeArray*)pFM)->GetElementVT();
+ TypeHandle thElement = ((FieldMarshaler_SafeArray*)pFM)->GetElementTypeHandle();
+ BOOL fElementTypeUserDefined = FALSE;
+
+ // Determine if the array type is a user defined type.
+ if (vtElement == VT_RECORD)
+ {
+ fElementTypeUserDefined = TRUE;
+ }
+ else if (vtElement == VT_UNKNOWN || vtElement == VT_DISPATCH)
+ {
+ fElementTypeUserDefined = !thElement.IsObjectType();
+ }
+
+ // Retrieve the string representation for the VARTYPE.
+ StackSString strVarType;
+ MarshalInfo::VarTypeToString(vtElement, strVarType);
+
+
+ StackSString strClassName;
+ if (!thElement.IsNull())
+ {
+ DefineFullyQualifiedNameForClassW();
+ MethodTable *pMT = ((FieldMarshaler_SafeArray*)pFM)->GetElementTypeHandle().GetMethodTable();
+ strClassName.Set((WCHAR*)GetFullyQualifiedNameForClassW(pMT));
+ }
+ else
+ {
+ strClassName.Set(W("object"));
+ }
+
+ if (fElementTypeUserDefined)
+ {
+ strNStructFieldType.Printf(W("safe array of %s exposed as %s elements (array size = %i bytes)"),
+ strClassName.GetUnicode(),
+ strVarType.GetUnicode(), pFM->NativeSize());
+ }
+ else
+ {
+ strNStructFieldType.Printf(W("safearray of %s (array size = %i bytes)"),
+ strClassName.GetUnicode(), pFM->NativeSize());
+ }
+
+ return;
+ }
+#endif // FEATURE_CLASSIC_COMINTEROP
+#endif // FEATURE_COMINTEROP
+ else if (cls == NFT_NESTEDLAYOUTCLASS)
+ {
+ MethodTable *pMT = ((FieldMarshaler_NestedLayoutClass*)pFM)->GetMethodTable();
+ DefineFullyQualifiedNameForClassW();
+ strNStructFieldType.Printf(W("nested layout class %s"),
+ GetFullyQualifiedNameForClassW(pMT));
+ return;
+ }
+ else if (cls == NFT_NESTEDVALUECLASS)
+ {
+ MethodTable *pMT = ((FieldMarshaler_NestedValueClass*)pFM)->GetMethodTable();
+ DefineFullyQualifiedNameForClassW();
+ strNStructFieldType.Printf(W("nested value class %s"),
+ GetFullyQualifiedNameForClassW(pMT));
+ return;
+ }
+ else if (cls == NFT_COPY1)
+ {
+ // The following CorElementTypes are the only ones handled with FieldMarshaler_Copy1.
+ switch (elemType)
+ {
+ case ELEMENT_TYPE_I1:
+ strRetVal = W("SByte");
+ break;
+
+ case ELEMENT_TYPE_U1:
+ strRetVal = W("Byte");
+ break;
+
+ default:
+ strRetVal = W("Unknown");
+ break;
+ }
+ }
+ else if (cls == NFT_COPY2)
+ {
+ // The following CorElementTypes are the only ones handled with FieldMarshaler_Copy2.
+ switch (elemType)
+ {
+ case ELEMENT_TYPE_CHAR:
+ strRetVal = W("Unicode char");
+ break;
+
+ case ELEMENT_TYPE_I2:
+ strRetVal = W("Int16");
+ break;
+
+ case ELEMENT_TYPE_U2:
+ strRetVal = W("UInt16");
+ break;
+
+ default:
+ strRetVal = W("Unknown");
+ break;
+ }
+ }
+ else if (cls == NFT_COPY4)
+ {
+ // The following CorElementTypes are the only ones handled with FieldMarshaler_Copy4.
+ switch (elemType)
+ {
+ // At this point, ELEMENT_TYPE_I must be 4 bytes long. Same for ELEMENT_TYPE_U.
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_I4:
+ strRetVal = W("Int32");
+ break;
+
+ case ELEMENT_TYPE_U:
+ case ELEMENT_TYPE_U4:
+ strRetVal = W("UInt32");
+ break;
+
+ case ELEMENT_TYPE_R4:
+ strRetVal = W("Single");
+ break;
+
+ case ELEMENT_TYPE_PTR:
+ strRetVal = W("4-byte pointer");
+ break;
+
+ default:
+ strRetVal = W("Unknown");
+ break;
+ }
+ }
+ else if (cls == NFT_COPY8)
+ {
+ // The following CorElementTypes are the only ones handled with FieldMarshaler_Copy8.
+ switch (elemType)
+ {
+ // At this point, ELEMENT_TYPE_I must be 8 bytes long. Same for ELEMENT_TYPE_U.
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_I8:
+ strRetVal = W("Int64");
+ break;
+
+ case ELEMENT_TYPE_U:
+ case ELEMENT_TYPE_U8:
+ strRetVal = W("UInt64");
+ break;
+
+ case ELEMENT_TYPE_R8:
+ strRetVal = W("Double");
+ break;
+
+ case ELEMENT_TYPE_PTR:
+ strRetVal = W("8-byte pointer");
+ break;
+
+ default:
+ strRetVal = W("Unknown");
+ break;
+ }
+ }
+ else if (cls == NFT_FIXEDSTRINGUNI)
+ {
+ int nativeSize = pFM->NativeSize();
+ int strLength = nativeSize / sizeof(WCHAR);
+
+ strNStructFieldType.Printf(W("embedded LPWSTR (length %d)"), strLength);
+
+ return;
+ }
+ else if (cls == NFT_FIXEDSTRINGANSI)
+ {
+ int nativeSize = pFM->NativeSize();
+ int strLength = nativeSize / sizeof(CHAR);
+
+ strNStructFieldType.Printf(W("embedded LPSTR (length %d)"), strLength);
+
+ return;
+ }
+ else
+ {
+ // All other NStruct Field Types which do not require special handling.
+ switch (cls)
+ {
+#ifdef FEATURE_COMINTEROP
+ case NFT_BSTR:
+ strRetVal = W("BSTR");
+ break;
+ case NFT_HSTRING:
+ strRetVal = W("HSTRING");
+ break;
+#endif // FEATURE_COMINTEROP
+ case NFT_STRINGUNI:
+ strRetVal = W("LPWSTR");
+ break;
+ case NFT_STRINGANSI:
+ strRetVal = W("LPSTR");
+ break;
+ case NFT_DELEGATE:
+ strRetVal = W("Delegate");
+ break;
+#ifdef FEATURE_COMINTEROP
+ case NFT_VARIANT:
+ strRetVal = W("VARIANT");
+ break;
+#endif // FEATURE_COMINTEROP
+ case NFT_ANSICHAR:
+ strRetVal = W("ANSI char");
+ break;
+ case NFT_WINBOOL:
+ strRetVal = W("Windows Bool");
+ break;
+ case NFT_CBOOL:
+ strRetVal = W("CBool");
+ break;
+ case NFT_DECIMAL:
+ strRetVal = W("DECIMAL");
+ break;
+ case NFT_DATE:
+ strRetVal = W("DATE");
+ break;
+#ifdef FEATURE_COMINTEROP
+ case NFT_VARIANTBOOL:
+ strRetVal = W("VARIANT Bool");
+ break;
+ case NFT_CURRENCY:
+ strRetVal = W("CURRENCY");
+ break;
+#endif // FEATURE_COMINTEROP
+ case NFT_ILLEGAL:
+ strRetVal = W("illegal type");
+ break;
+ case NFT_SAFEHANDLE:
+ strRetVal = W("SafeHandle");
+ break;
+ case NFT_CRITICALHANDLE:
+ strRetVal = W("CriticalHandle");
+ break;
+ default:
+ strRetVal = W("<UNKNOWN>");
+ break;
+ }
+ }
+
+ strNStructFieldType.Set(strRetVal);
+
+ return;
+}
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+#endif // CROSSGEN_COMPILE
+
+
+//
+// Implementation of the virtual functions using switch statements.
+//
+// We are not able bake pointers to the FieldMarshaller vtables into NGen images. We store
+// the field marshaller id instead, and implement the virtualization by switch based on the id.
+//
+
+#ifdef FEATURE_CLASSIC_COMINTEROP
+#define FieldMarshaler_SafeArray_Case(rettype, name, args) case NFT_SAFEARRAY: rettype ((FieldMarshaler_SafeArray*)this)->name##Impl args; break;
+#else
+#define FieldMarshaler_SafeArray_Case(rettype, name, args)
+#endif
+
+#ifdef FEATURE_COMINTEROP
+
+#define IMPLEMENT_FieldMarshaler_METHOD(ret, name, argsdecl, rettype, args) \
+ ret FieldMarshaler::name argsdecl { \
+ WRAPPER_NO_CONTRACT; \
+ switch (GetNStructFieldType()) { \
+ case NFT_STRINGUNI: rettype ((FieldMarshaler_StringUni*)this)->name##Impl args; break; \
+ case NFT_STRINGANSI: rettype ((FieldMarshaler_StringAnsi*)this)->name##Impl args; break; \
+ case NFT_FIXEDSTRINGUNI: rettype ((FieldMarshaler_FixedStringUni*)this)->name##Impl args; break; \
+ case NFT_FIXEDSTRINGANSI: rettype ((FieldMarshaler_FixedStringAnsi*)this)->name##Impl args; break; \
+ case NFT_FIXEDCHARARRAYANSI: rettype ((FieldMarshaler_FixedCharArrayAnsi*)this)->name##Impl args; break; \
+ case NFT_FIXEDARRAY: rettype ((FieldMarshaler_FixedArray*)this)->name##Impl args; break; \
+ case NFT_DELEGATE: rettype ((FieldMarshaler_Delegate*)this)->name##Impl args; break; \
+ case NFT_COPY1: rettype ((FieldMarshaler_Copy1*)this)->name##Impl args; break; \
+ case NFT_COPY2: rettype ((FieldMarshaler_Copy2*)this)->name##Impl args; break; \
+ case NFT_COPY4: rettype ((FieldMarshaler_Copy4*)this)->name##Impl args; break; \
+ case NFT_COPY8: rettype ((FieldMarshaler_Copy8*)this)->name##Impl args; break; \
+ case NFT_ANSICHAR: rettype ((FieldMarshaler_Ansi*)this)->name##Impl args; break; \
+ case NFT_WINBOOL: rettype ((FieldMarshaler_WinBool*)this)->name##Impl args; break; \
+ case NFT_NESTEDLAYOUTCLASS: rettype ((FieldMarshaler_NestedLayoutClass*)this)->name##Impl args; break; \
+ case NFT_NESTEDVALUECLASS: rettype ((FieldMarshaler_NestedValueClass*)this)->name##Impl args; break; \
+ case NFT_CBOOL: rettype ((FieldMarshaler_CBool*)this)->name##Impl args; break; \
+ case NFT_DATE: rettype ((FieldMarshaler_Date*)this)->name##Impl args; break; \
+ case NFT_DECIMAL: rettype ((FieldMarshaler_Decimal*)this)->name##Impl args; break; \
+ case NFT_INTERFACE: rettype ((FieldMarshaler_Interface*)this)->name##Impl args; break; \
+ case NFT_SAFEHANDLE: rettype ((FieldMarshaler_SafeHandle*)this)->name##Impl args; break; \
+ case NFT_CRITICALHANDLE: rettype ((FieldMarshaler_CriticalHandle*)this)->name##Impl args; break; \
+ FieldMarshaler_SafeArray_Case(rettype, name, args) \
+ case NFT_BSTR: rettype ((FieldMarshaler_BSTR*)this)->name##Impl args; break; \
+ case NFT_HSTRING: rettype ((FieldMarshaler_HSTRING*)this)->name##Impl args; break; \
+ case NFT_VARIANT: rettype ((FieldMarshaler_Variant*)this)->name##Impl args; break; \
+ case NFT_VARIANTBOOL: rettype ((FieldMarshaler_VariantBool*)this)->name##Impl args; break; \
+ case NFT_CURRENCY: rettype ((FieldMarshaler_Currency*)this)->name##Impl args; break; \
+ case NFT_DATETIMEOFFSET: rettype ((FieldMarshaler_DateTimeOffset*)this)->name##Impl args; break; \
+ case NFT_SYSTEMTYPE: rettype ((FieldMarshaler_SystemType *)this)->name##Impl args; break; \
+ case NFT_WINDOWSFOUNDATIONHRESULT: rettype ((FieldMarshaler_Exception*)this)->name##Impl args; break; \
+ case NFT_WINDOWSFOUNDATIONIREFERENCE: rettype ((FieldMarshaler_Nullable*)this)->name##Impl args; break; \
+ case NFT_ILLEGAL: rettype ((FieldMarshaler_Illegal*)this)->name##Impl args; break; \
+ default: UNREACHABLE_MSG("unexpected type of FieldMarshaler"); break; \
+ } \
+ }
+
+#else // FEATURE_COMINTEROP
+
+#define IMPLEMENT_FieldMarshaler_METHOD(ret, name, argsdecl, rettype, args) \
+ ret FieldMarshaler::name argsdecl { \
+ WRAPPER_NO_CONTRACT; \
+ switch (GetNStructFieldType()) { \
+ case NFT_STRINGUNI: rettype ((FieldMarshaler_StringUni*)this)->name##Impl args; break; \
+ case NFT_STRINGANSI: rettype ((FieldMarshaler_StringAnsi*)this)->name##Impl args; break; \
+ case NFT_FIXEDSTRINGUNI: rettype ((FieldMarshaler_FixedStringUni*)this)->name##Impl args; break; \
+ case NFT_FIXEDSTRINGANSI: rettype ((FieldMarshaler_FixedStringAnsi*)this)->name##Impl args; break; \
+ case NFT_FIXEDCHARARRAYANSI: rettype ((FieldMarshaler_FixedCharArrayAnsi*)this)->name##Impl args; break; \
+ case NFT_FIXEDARRAY: rettype ((FieldMarshaler_FixedArray*)this)->name##Impl args; break; \
+ case NFT_DELEGATE: rettype ((FieldMarshaler_Delegate*)this)->name##Impl args; break; \
+ case NFT_COPY1: rettype ((FieldMarshaler_Copy1*)this)->name##Impl args; break; \
+ case NFT_COPY2: rettype ((FieldMarshaler_Copy2*)this)->name##Impl args; break; \
+ case NFT_COPY4: rettype ((FieldMarshaler_Copy4*)this)->name##Impl args; break; \
+ case NFT_COPY8: rettype ((FieldMarshaler_Copy8*)this)->name##Impl args; break; \
+ case NFT_ANSICHAR: rettype ((FieldMarshaler_Ansi*)this)->name##Impl args; break; \
+ case NFT_WINBOOL: rettype ((FieldMarshaler_WinBool*)this)->name##Impl args; break; \
+ case NFT_NESTEDLAYOUTCLASS: rettype ((FieldMarshaler_NestedLayoutClass*)this)->name##Impl args; break; \
+ case NFT_NESTEDVALUECLASS: rettype ((FieldMarshaler_NestedValueClass*)this)->name##Impl args; break; \
+ case NFT_CBOOL: rettype ((FieldMarshaler_CBool*)this)->name##Impl args; break; \
+ case NFT_DATE: rettype ((FieldMarshaler_Date*)this)->name##Impl args; break; \
+ case NFT_DECIMAL: rettype ((FieldMarshaler_Decimal*)this)->name##Impl args; break; \
+ case NFT_SAFEHANDLE: rettype ((FieldMarshaler_SafeHandle*)this)->name##Impl args; break; \
+ case NFT_CRITICALHANDLE: rettype ((FieldMarshaler_CriticalHandle*)this)->name##Impl args; break; \
+ case NFT_ILLEGAL: rettype ((FieldMarshaler_Illegal*)this)->name##Impl args; break; \
+ default: UNREACHABLE_MSG("unexpected type of FieldMarshaler"); break; \
+ } \
+ }
+
+#endif // FEATURE_COMINTEROP
+
+
+IMPLEMENT_FieldMarshaler_METHOD(UINT32, NativeSize,
+ () const,
+ return,
+ ())
+
+IMPLEMENT_FieldMarshaler_METHOD(UINT32, AlignmentRequirement,
+ () const,
+ return,
+ ())
+
+IMPLEMENT_FieldMarshaler_METHOD(BOOL, IsScalarMarshaler,
+ () const,
+ return,
+ ())
+
+IMPLEMENT_FieldMarshaler_METHOD(BOOL, IsNestedValueClassMarshaler,
+ () const,
+ return,
+ ())
+
+#ifndef CROSSGEN_COMPILE
+IMPLEMENT_FieldMarshaler_METHOD(VOID, UpdateNative,
+ (OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const,
+ ,
+ (pCLRValue, pNativeValue, ppCleanupWorkListOnStack))
+
+IMPLEMENT_FieldMarshaler_METHOD(VOID, UpdateCLR,
+ (const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const,
+ ,
+ (pNativeValue, ppProtectedCLRValue, ppProtectedOldCLRValue))
+
+IMPLEMENT_FieldMarshaler_METHOD(VOID, DestroyNative,
+ (LPVOID pNativeValue) const,
+ ,
+ (pNativeValue))
+
+IMPLEMENT_FieldMarshaler_METHOD(VOID, ScalarUpdateNative,
+ (LPVOID pCLR, LPVOID pNative) const,
+ return,
+ (pCLR, pNative))
+
+IMPLEMENT_FieldMarshaler_METHOD(VOID, ScalarUpdateCLR,
+ (const VOID *pNative, LPVOID pCLR) const,
+ return,
+ (pNative, pCLR))
+
+IMPLEMENT_FieldMarshaler_METHOD(VOID, NestedValueClassUpdateNative,
+ (const VOID **ppProtectedCLR, SIZE_T startoffset, LPVOID pNative, OBJECTREF *ppCleanupWorkListOnStack) const,
+ ,
+ (ppProtectedCLR, startoffset, pNative, ppCleanupWorkListOnStack))
+
+IMPLEMENT_FieldMarshaler_METHOD(VOID, NestedValueClassUpdateCLR,
+ (const VOID *pNative, LPVOID *ppProtectedCLR, SIZE_T startoffset) const,
+ ,
+ (pNative, ppProtectedCLR, startoffset))
+#endif // CROSSGEN_COMPILE
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+IMPLEMENT_FieldMarshaler_METHOD(void, Save,
+ (DataImage *image),
+ ,
+ (image))
+
+IMPLEMENT_FieldMarshaler_METHOD(void, Fixup,
+ (DataImage *image),
+ ,
+ (image))
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+IMPLEMENT_FieldMarshaler_METHOD(void, Restore,
+ (),
+ ,
+ ())
diff --git a/src/vm/fieldmarshaler.h b/src/vm/fieldmarshaler.h
new file mode 100644
index 0000000000..d67637e27c
--- /dev/null
+++ b/src/vm/fieldmarshaler.h
@@ -0,0 +1,1956 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: FieldMarshaler.h
+//
+
+//
+// FieldMarshalers are used to allow CLR programs to allocate and access
+// native structures for interop purposes. FieldMarshalers are actually normal GC
+// objects with a class, but instead of keeping fields in the GC object,
+// it keeps a hidden pointer to a fixed memory block (which may have been
+// allocated by a third party.) Field accesses to FieldMarshalers are redirected
+// to this fixed block.
+//
+
+
+#ifndef __FieldMarshaler_h__
+#define __FieldMarshaler_h__
+
+#ifdef BINDER
+
+//TritonToDo: why don't we use the value from vm\<cpu>\cgencpu.h ??
+#if defined(_X86_) || defined(_ARM_)
+#define MAXFIELDMARSHALERSIZE 24
+#else
+#error "port field marshaler size"
+#endif
+
+typedef unsigned char U1;
+
+#else //BINDER
+#include "util.hpp"
+#include "mlinfo.h"
+#include "eeconfig.h"
+#include "olevariant.h"
+#endif // BINDER
+
+#ifdef FEATURE_COMINTEROP
+#endif // FEATURE_COMINTEROP
+
+#ifndef BINDER
+#ifdef FEATURE_PREJIT
+#include "compile.h"
+#endif // FEATURE_PREJIT
+#endif // BINDER
+
+// Forward refernces
+class EEClassLayoutInfo;
+class FieldDesc;
+class MethodTable;
+
+class FieldMarshaler_NestedLayoutClass;
+class FieldMarshaler_NestedValueClass;
+class FieldMarshaler_StringUni;
+class FieldMarshaler_StringAnsi;
+class FieldMarshaler_FixedStringUni;
+class FieldMarshaler_FixedStringAnsi;
+class FieldMarshaler_FixedArray;
+class FieldMarshaler_FixedCharArrayAnsi;
+class FieldMarshaler_Delegate;
+class FieldMarshaler_Illegal;
+class FieldMarshaler_Copy1;
+class FieldMarshaler_Copy2;
+class FieldMarshaler_Copy4;
+class FieldMarshaler_Copy8;
+class FieldMarshaler_Ansi;
+class FieldMarshaler_WinBool;
+class FieldMarshaler_CBool;
+class FieldMarshaler_Decimal;
+class FieldMarshaler_Date;
+#ifdef FEATURE_COMINTEROP
+class FieldMarshaler_SafeArray;
+class FieldMarshaler_BSTR;
+class FieldMarshaler_HSTRING;
+class FieldMarshaler_Interface;
+class FieldMarshaler_Variant;
+class FieldMarshaler_VariantBool;
+class FieldMarshaler_DateTimeOffset;
+class FieldMarshaler_SystemType;
+class FieldMarshaler_Exception;
+class FieldMarshaler_Nullable;
+#endif // FEATURE_COMINTEROP
+
+VOID NStructFieldTypeToString(FieldMarshaler* pFM, SString& strNStructFieldType);
+
+//=======================================================================
+// Each possible COM+/Native pairing of data type has a
+// NLF_* id. This is used to select the marshaling code.
+//=======================================================================
+#undef DEFINE_NFT
+#define DEFINE_NFT(name, nativesize, fWinRTSupported) name,
+enum NStructFieldType
+{
+#include "nsenums.h"
+ NFT_COUNT
+};
+
+
+//=======================================================================
+// Magic number for default struct packing size.
+//=======================================================================
+#define DEFAULT_PACKING_SIZE 8
+
+
+//=======================================================================
+// This is invoked from the class loader while building the data structures for a type.
+// This function checks if explicit layout metadata exists.
+//
+// Returns:
+// TRUE - yes, there's layout metadata
+// FALSE - no, there's no layout.
+// fail - throws a typeload exception
+//
+// If S_OK,
+// *pNLType gets set to nltAnsi or nltUnicode
+// *pPackingSize declared packing size
+// *pfExplicitoffsets offsets explicit in metadata or computed?
+//=======================================================================
+BOOL HasLayoutMetadata(Assembly* pAssembly, IMDInternalImport *pInternalImport, mdTypeDef cl,
+ MethodTable *pParentMT, BYTE *pPackingSize, BYTE *pNLTType,
+ BOOL *pfExplicitOffsets);
+
+
+//=======================================================================
+// This function returns TRUE if the type passed in is either a value class or a class and if it has layout information
+// and is marshalable. In all other cases it will return FALSE.
+//=======================================================================
+BOOL IsStructMarshalable(TypeHandle th);
+
+//=======================================================================
+// The classloader stores an intermediate representation of the layout
+// metadata in an array of these structures. The dual-pass nature
+// is a bit extra overhead but building this structure requiring loading
+// other classes (for nested structures) and I'd rather keep this
+// next to the other places where we load other classes (e.g. the superclass
+// and implemented interfaces.)
+//
+// Each redirected field gets one entry in LayoutRawFieldInfo.
+// The array is terminated by one dummy record whose m_MD == mdMemberDefNil.
+// WARNING!! Before you change this struct see the comment above the m_FieldMarshaler field
+//=======================================================================
+struct LayoutRawFieldInfo
+{
+ mdFieldDef m_MD; // mdMemberDefNil for end of array
+ UINT8 m_nft; // NFT_* value
+ UINT32 m_offset; // native offset of field
+ UINT32 m_cbNativeSize; // native size of field in bytes
+ ULONG m_sequence; // sequence # from metadata
+ BOOL m_fIsOverlapped;
+
+
+ //----- Post v1.0 addition: The LayoutKind.Sequential attribute now affects managed layout as well.
+ //----- So we need to keep a parallel set of layout data for the managed side. The Size and AlignmentReq
+ //----- is redundant since we can figure it out from the sig but since we're already accessing the sig
+ //----- in ParseNativeType, we might as well capture it at that time.
+ UINT32 m_managedSize; // managed size of field
+ UINT32 m_managedAlignmentReq; // natural alignment of field
+ UINT32 m_managedOffset; // managed offset of field
+ UINT32 m_pad; // needed to keep m_FieldMarshaler 8-byte aligned
+
+ // WARNING!
+ // We in-place create a field marshaler in the following
+ // memory, so keep it 8-byte aligned or
+ // the vtable pointer initialization will cause a
+ // misaligned memory write on IA64.
+ // The entire struct's size must also be multiple of 8 bytes
+ struct
+ {
+ private:
+ char m_space[MAXFIELDMARSHALERSIZE];
+ } m_FieldMarshaler;
+};
+
+
+//=======================================================================
+//
+//=======================================================================
+
+VOID LayoutUpdateNative(LPVOID *ppProtectedManagedData, SIZE_T offsetbias, MethodTable *pMT, BYTE* pNativeData, OBJECTREF *ppCleanupWorkListOnStack);
+VOID LayoutUpdateCLR(LPVOID *ppProtectedManagedData, SIZE_T offsetbias, MethodTable *pMT, BYTE *pNativeData);
+VOID LayoutDestroyNative(LPVOID pNative, MethodTable *pMT);
+
+VOID FmtClassUpdateNative(OBJECTREF *ppProtectedManagedData, BYTE *pNativeData, OBJECTREF *ppCleanupWorkListOnStack);
+VOID FmtClassUpdateCLR(OBJECTREF *ppProtectedManagedData, BYTE *pNativeData);
+VOID FmtClassDestroyNative(LPVOID pNative, MethodTable *pMT);
+
+VOID FmtValueTypeUpdateNative(LPVOID pProtectedManagedData, MethodTable *pMT, BYTE *pNativeData, OBJECTREF *ppCleanupWorkListOnStack);
+VOID FmtValueTypeUpdateCLR(LPVOID pProtectedManagedData, MethodTable *pMT, BYTE *pNativeData);
+
+
+//=======================================================================
+// Abstract base class. Each type of NStruct reference field extends
+// this class and implements the necessary methods.
+//
+// UpdateNativeImpl
+// - this method receives a COM+ field value and a pointer to
+// native field inside the fixed portion. it should marshal
+// the COM+ value to a new native instance and store it
+// inside *pNativeValue. Do not destroy the value you overwrite
+// in *pNativeValue.
+//
+// may throw COM+ exceptions
+//
+// UpdateCLRImpl
+// - this method receives a read-only pointer to the native field inside
+// the fixed portion. it should marshal the native value to
+// a new CLR instance and store it in *ppCLRValue.
+// (the caller keeps *ppCLRValue gc-protected.)
+//
+// may throw CLR exceptions
+//
+// DestroyNativeImpl
+// - should do the type-specific deallocation of a native instance.
+// if the type has a "NULL" value, this method should
+// overwrite the field with this "NULL" value (whether or not
+// it does, however, it's considered a bug to depend on the
+// value left over after a DestroyNativeImpl.)
+//
+// must NOT throw a CLR exception
+//
+// NativeSizeImpl
+// - returns the size, in bytes, of the native version of the field.
+//
+// AlignmentRequirementImpl
+// - returns one of 1,2,4 or 8; indicating the "natural" alignment
+// of the native field. In general,
+//
+// for scalars, the AR is equal to the size
+// for arrays, the AR is that of a single element
+// for structs, the AR is that of the member with the largest AR
+//
+//
+//=======================================================================
+
+
+#ifndef DACCESS_COMPILE
+
+#define UNUSED_METHOD_IMPL(PROTOTYPE) \
+ PROTOTYPE \
+ { \
+ LIMITED_METHOD_CONTRACT; \
+ _ASSERTE(!"Not supposed to get here."); \
+ }
+
+#define ELEMENT_SIZE_IMPL(NativeSize, AlignmentReq) \
+ UINT32 NativeSizeImpl() const \
+ { \
+ LIMITED_METHOD_CONTRACT; \
+ return NativeSize; \
+ } \
+ UINT32 AlignmentRequirementImpl() const \
+ { \
+ LIMITED_METHOD_CONTRACT; \
+ return AlignmentReq; \
+ }
+
+#define SCALAR_MARSHALER_IMPL(NativeSize, AlignmentReq) \
+ BOOL IsScalarMarshalerImpl() const \
+ { \
+ LIMITED_METHOD_CONTRACT; \
+ return TRUE; \
+ } \
+ ELEMENT_SIZE_IMPL(NativeSize, AlignmentReq)
+
+
+//=======================================================================
+//
+// FieldMarshaler's are constructed in place and replicated via bit-wise
+// copy, so you can't have a destructor. Make sure you don't define a
+// destructor in derived classes!!
+// We used to enforce this by defining a private destructor, by the C++
+// compiler doesn't allow that anymore.
+//
+//=======================================================================
+
+class FieldMarshaler
+{
+#ifdef BINDER
+ friend class CompactTypeBuilder;
+ friend class MdilModule;
+#endif
+
+public:
+ VOID UpdateNative(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const;
+ VOID UpdateCLR(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const;
+ VOID DestroyNative(LPVOID pNativeValue) const;
+ UINT32 NativeSize() const;
+ UINT32 AlignmentRequirement() const;
+ BOOL IsScalarMarshaler() const;
+ BOOL IsNestedValueClassMarshaler() const;
+ VOID ScalarUpdateNative(LPVOID pCLR, LPVOID pNative) const;
+ VOID ScalarUpdateCLR(const VOID *pNative, LPVOID pCLR) const;
+ VOID NestedValueClassUpdateNative(const VOID **ppProtectedCLR, SIZE_T startoffset, LPVOID pNative, OBJECTREF *ppCleanupWorkListOnStack) const;
+ VOID NestedValueClassUpdateCLR(const VOID *pNative, LPVOID *ppProtectedCLR, SIZE_T startoffset) const;
+#ifdef FEATURE_PREJIT
+ void Save(DataImage *image);
+ void Fixup(DataImage *image);
+#endif // FEATURE_PREJIT
+ void Restore();
+
+ VOID DestroyNativeImpl(LPVOID pNativeValue) const
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ BOOL IsScalarMarshalerImpl() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+ }
+
+ BOOL IsNestedValueClassMarshalerImpl() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+ }
+
+ UNUSED_METHOD_IMPL(VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const)
+ UNUSED_METHOD_IMPL(VOID ScalarUpdateCLRImpl(const VOID *pNative, LPVOID pCLR) const)
+ UNUSED_METHOD_IMPL(VOID NestedValueClassUpdateNativeImpl(const VOID **ppProtectedCLR, SIZE_T startoffset, LPVOID pNative, OBJECTREF *ppCleanupWorkListOnStack) const)
+ UNUSED_METHOD_IMPL(VOID NestedValueClassUpdateCLRImpl(const VOID *pNative, LPVOID *ppProtectedCLR, SIZE_T startoffset) const)
+
+ //
+ // Methods for saving & restoring in prejitted images:
+ //
+
+ NStructFieldType GetNStructFieldType() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_nft;
+ }
+
+ void SetNStructFieldType(NStructFieldType nft)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_nft = nft;
+ }
+
+#ifdef FEATURE_PREJIT
+ void SaveImpl(DataImage *image)
+ {
+ STANDARD_VM_CONTRACT;
+ }
+
+ void FixupImpl(DataImage *image)
+ {
+ STANDARD_VM_CONTRACT;
+
+ image->FixupFieldDescPointer(this, &m_pFD);
+ }
+#endif // FEATURE_PREJIT
+
+ void RestoreImpl()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef BINDER
+#ifdef FEATURE_PREJIT
+ Module::RestoreFieldDescPointer(&m_pFD);
+#endif // FEATURE_PREJIT
+#endif // BINDER
+ }
+
+ void SetFieldDesc(FieldDesc* pFD)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pFD.SetValue(pFD);
+ }
+
+ FieldDesc* GetFieldDesc() const
+ {
+ CONTRACT (FieldDesc*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ RETURN m_pFD.GetValue();
+ }
+
+ void SetExternalOffset(UINT32 dwExternalOffset)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_dwExternalOffset = dwExternalOffset;
+ }
+
+ UINT32 GetExternalOffset()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwExternalOffset;
+ }
+
+protected:
+ FieldMarshaler()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef _DEBUG
+ m_dwExternalOffset = 0xcccccccc;
+#endif
+ }
+
+ static inline void RestoreHelper(FixupPointer<PTR_MethodTable> *ppMT)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(ppMT));
+ }
+ CONTRACTL_END;
+
+#ifndef BINDER
+#ifdef FEATURE_PREJIT
+ Module::RestoreMethodTablePointer(ppMT);
+#else // FEATURE_PREJIT
+ // without NGEN we only have to make sure that the type is fully loaded
+ ClassLoader::EnsureLoaded(ppMT->GetValue());
+#endif // FEATURE_PREJIT
+#endif // BINDER
+ }
+
+#ifdef _DEBUG
+ static inline BOOL IsRestoredHelper(FixupPointer<PTR_MethodTable> pMT)
+ {
+ WRAPPER_NO_CONTRACT;
+
+#ifdef FEATURE_PREJIT
+ return pMT.IsNull() || (!pMT.IsTagged() && pMT.GetValue()->IsRestored());
+#else // FEATURE_PREJIT
+ // putting the IsFullyLoaded check here is tempting but incorrect
+ return TRUE;
+#endif // FEATURE_PREJIT
+ }
+#endif // _DEBUG
+
+
+ FixupPointer<PTR_FieldDesc> m_pFD; // FieldDesc
+ UINT32 m_dwExternalOffset; // offset of field in the fixed portion
+ NStructFieldType m_nft;
+};
+
+
+
+#ifdef FEATURE_COMINTEROP
+
+//=======================================================================
+// BSTR <--> System.String
+//=======================================================================
+class FieldMarshaler_BSTR : public FieldMarshaler
+{
+public:
+
+ VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const;
+ VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const;
+ VOID DestroyNativeImpl(LPVOID pNativeValue) const;
+
+ ELEMENT_SIZE_IMPL(sizeof(BSTR), sizeof(BSTR))
+};
+
+//=======================================================================
+// HSTRING <--> System.String
+//=======================================================================
+class FieldMarshaler_HSTRING : public FieldMarshaler
+{
+public:
+ VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const;
+ VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const;
+ VOID DestroyNativeImpl(LPVOID pNativeValue) const;
+
+ ELEMENT_SIZE_IMPL(sizeof(HSTRING), sizeof(HSTRING))
+};
+
+//=======================================================================
+// Windows.Foundation.IReference`1 <--> System.Nullable`1
+//=======================================================================
+class FieldMarshaler_Nullable : public FieldMarshaler
+{
+#ifdef BINDER
+ friend class CompactTypeBuilder;
+ friend class MdilModule;
+#endif
+
+public:
+
+ FieldMarshaler_Nullable(MethodTable* pMT)
+ {
+ m_pNullableTypeMT.SetValue(pMT);
+ }
+
+ BOOL IsNullableMarshalerImpl() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return TRUE;
+ }
+
+ //UnImplementedMethods.
+ UNUSED_METHOD_IMPL(VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const)
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ ELEMENT_SIZE_IMPL(sizeof(IUnknown*), sizeof(IUnknown*))
+
+ //ImplementedMethods
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const;
+ VOID ScalarUpdateCLRImpl(const VOID *pNative, LPVOID pCLR) const;
+ VOID DestroyNativeImpl(const VOID* pNativeValue) const;
+ MethodDesc* GetMethodDescForGenericInstantiation(MethodDesc* pMD) const;
+
+ BOOL IsScalarMarshalerImpl() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return TRUE;
+ }
+
+#ifdef FEATURE_PREJIT
+ void FixupImpl(DataImage *image)
+ {
+ STANDARD_VM_CONTRACT;
+
+ image->FixupMethodTablePointer(this, &m_pNullableTypeMT);
+
+ FieldMarshaler::FixupImpl(image);
+ }
+#endif // FEATURE_PREJIT
+
+ void RestoreImpl()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ RestoreHelper(&m_pNullableTypeMT);
+
+ FieldMarshaler::RestoreImpl();
+ }
+
+#ifdef _DEBUG
+ BOOL IsRestored() const
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return IsRestoredHelper(m_pNullableTypeMT);
+ }
+#endif
+
+ MethodTable *GetMethodTable() const
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(IsRestored());
+ }
+ CONTRACTL_END;
+
+ return m_pNullableTypeMT.GetValue();
+ }
+
+private:
+ FixupPointer<PTR_MethodTable> m_pNullableTypeMT;
+};
+
+
+//=======================================================================
+// Windows.UI.Xaml.Interop.TypeName <--> System.Type
+//=======================================================================
+class FieldMarshaler_SystemType : public FieldMarshaler
+{
+public:
+ VOID UpdateNativeImpl(OBJECTREF * pCLRValue, LPVOID pNativeValue, OBJECTREF * ppCleanupWorkListOnStack) const;
+ VOID UpdateCLRImpl(const VOID * pNativeValue, OBJECTREF * ppProtectedCLRValue, OBJECTREF * ppProtectedOldCLRValue) const;
+ VOID DestroyNativeImpl(LPVOID pNativeValue) const;
+
+ ELEMENT_SIZE_IMPL(sizeof(HSTRING), sizeof(HSTRING))
+};
+
+//=======================================================================
+// Windows.Foundation.HResult <--> System.Exception
+// Note: The WinRT struct has exactly 1 field, Value (an HRESULT)
+//=======================================================================
+class FieldMarshaler_Exception : public FieldMarshaler
+{
+public:
+ VOID UpdateNativeImpl(OBJECTREF * pCLRValue, LPVOID pNativeValue, OBJECTREF * ppCleanupWorkListOnStack) const;
+ VOID UpdateCLRImpl(const VOID * pNativeValue, OBJECTREF * ppProtectedCLRValue, OBJECTREF * ppProtectedOldCLRValue) const;
+
+ ELEMENT_SIZE_IMPL(sizeof(HRESULT), sizeof(HRESULT))
+};
+
+#endif // FEATURE_COMINTEROP
+
+
+
+//=======================================================================
+// Embedded struct <--> LayoutClass
+//=======================================================================
+class FieldMarshaler_NestedLayoutClass : public FieldMarshaler
+{
+#ifdef BINDER
+ friend class CompactTypeBuilder;
+ friend class MdilModule;
+#endif
+public:
+ FieldMarshaler_NestedLayoutClass(MethodTable *pMT)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_pNestedMethodTable.SetValue(pMT);
+ }
+
+ VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const;
+ VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const;
+ VOID DestroyNativeImpl(LPVOID pNativeValue) const;
+
+ UINT32 NativeSizeImpl() const;
+ UINT32 AlignmentRequirementImpl() const;
+
+#ifdef FEATURE_PREJIT
+ void FixupImpl(DataImage *image)
+ {
+ STANDARD_VM_CONTRACT;
+
+ image->FixupMethodTablePointer(this, &m_pNestedMethodTable);
+
+ FieldMarshaler::FixupImpl(image);
+ }
+#endif // FEATURE_PREJIT
+
+ void RestoreImpl()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ RestoreHelper(&m_pNestedMethodTable);
+
+ FieldMarshaler::RestoreImpl();
+ }
+
+#ifdef _DEBUG
+ BOOL IsRestored() const
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return IsRestoredHelper(m_pNestedMethodTable);
+ }
+#endif
+
+ MethodTable *GetMethodTable() const
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(IsRestored());
+ }
+ CONTRACTL_END;
+
+ return m_pNestedMethodTable.GetValue();
+ }
+
+private:
+ // MethodTable of nested FieldMarshaler.
+ FixupPointer<PTR_MethodTable> m_pNestedMethodTable;
+};
+
+
+//=======================================================================
+// Embedded struct <--> ValueClass
+//=======================================================================
+class FieldMarshaler_NestedValueClass : public FieldMarshaler
+{
+#ifdef BINDER
+ friend class CompactTypeBuilder;
+ friend class MdilModule;
+#endif
+public:
+ FieldMarshaler_NestedValueClass(MethodTable *pMT)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_pNestedMethodTable.SetValue(pMT);
+ }
+
+ BOOL IsNestedValueClassMarshalerImpl() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return TRUE;
+ }
+
+ UNUSED_METHOD_IMPL(VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const)
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ VOID DestroyNativeImpl(LPVOID pNativeValue) const;
+
+ UINT32 NativeSizeImpl() const;
+ UINT32 AlignmentRequirementImpl() const;
+ VOID NestedValueClassUpdateNativeImpl(const VOID **ppProtectedCLR, SIZE_T startoffset, LPVOID pNative, OBJECTREF *ppCleanupWorkListOnStack) const;
+ VOID NestedValueClassUpdateCLRImpl(const VOID *pNative, LPVOID *ppProtectedCLR, SIZE_T startoffset) const;
+
+#ifdef FEATURE_PREJIT
+ void FixupImpl(DataImage *image)
+ {
+ STANDARD_VM_CONTRACT;
+
+ image->FixupMethodTablePointer(this, &m_pNestedMethodTable);
+
+ FieldMarshaler::FixupImpl(image);
+ }
+#endif // FEATURE_PREJIT
+
+ void RestoreImpl()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ RestoreHelper(&m_pNestedMethodTable);
+
+ FieldMarshaler::RestoreImpl();
+ }
+
+#ifdef _DEBUG
+ BOOL IsRestored() const
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return IsRestoredHelper(m_pNestedMethodTable);
+ }
+#endif
+
+ BOOL IsBlittable()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetMethodTable()->IsBlittable();
+ }
+
+ MethodTable *GetMethodTable() const
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(IsRestored());
+ }
+ CONTRACTL_END;
+
+ return m_pNestedMethodTable.GetValue();
+ }
+
+
+private:
+ // MethodTable of nested NStruct.
+ FixupPointer<PTR_MethodTable> m_pNestedMethodTable;
+};
+
+
+//=======================================================================
+// LPWSTR <--> System.String
+//=======================================================================
+class FieldMarshaler_StringUni : public FieldMarshaler
+{
+public:
+
+ VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const;
+ VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const;
+ VOID DestroyNativeImpl(LPVOID pNativeValue) const;
+
+ ELEMENT_SIZE_IMPL(sizeof(LPWSTR), sizeof(LPWSTR))
+};
+
+
+//=======================================================================
+// LPSTR <--> System.String
+//=======================================================================
+class FieldMarshaler_StringAnsi : public FieldMarshaler
+{
+#ifdef BINDER
+ friend class CompactTypeBuilder;
+ friend class MdilModule;
+#endif
+public:
+ FieldMarshaler_StringAnsi(BOOL BestFit, BOOL ThrowOnUnmappableChar) :
+ m_BestFitMap(!!BestFit), m_ThrowOnUnmappableChar(!!ThrowOnUnmappableChar)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const;
+ VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const;
+ VOID DestroyNativeImpl(LPVOID pNativeValue) const;
+
+ ELEMENT_SIZE_IMPL(sizeof(LPSTR), sizeof(LPSTR))
+
+ BOOL GetBestFit()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_BestFitMap;
+ }
+
+ BOOL GetThrowOnUnmappableChar()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_ThrowOnUnmappableChar;
+ }
+
+private:
+ bool m_BestFitMap:1;
+ bool m_ThrowOnUnmappableChar:1;
+};
+
+
+//=======================================================================
+// Embedded LPWSTR <--> System.String
+//=======================================================================
+class FieldMarshaler_FixedStringUni : public FieldMarshaler
+{
+#ifdef BINDER
+ friend class CompactTypeBuilder;
+ friend class MdilModule;
+#endif
+public:
+ VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const;
+ VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const;
+
+ ELEMENT_SIZE_IMPL(m_numchar * sizeof(WCHAR), sizeof(WCHAR))
+
+ FieldMarshaler_FixedStringUni(UINT32 numChar)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_numchar = numChar;
+ }
+
+private:
+ // # of characters for fixed strings
+ UINT32 m_numchar;
+};
+
+
+//=======================================================================
+// Embedded LPSTR <--> System.String
+//=======================================================================
+class FieldMarshaler_FixedStringAnsi : public FieldMarshaler
+{
+#ifdef BINDER
+ friend class CompactTypeBuilder;
+ friend class MdilModule;
+#endif
+public:
+ FieldMarshaler_FixedStringAnsi(UINT32 numChar, BOOL BestFitMap, BOOL ThrowOnUnmappableChar) :
+ m_numchar(numChar), m_BestFitMap(!!BestFitMap), m_ThrowOnUnmappableChar(!!ThrowOnUnmappableChar)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const;
+ VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const;
+
+ ELEMENT_SIZE_IMPL(m_numchar * sizeof(CHAR), sizeof(CHAR))
+
+ BOOL GetBestFit()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_BestFitMap;
+ }
+
+ BOOL GetThrowOnUnmappableChar()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_ThrowOnUnmappableChar;
+ }
+
+private:
+ // # of characters for fixed strings
+ UINT32 m_numchar;
+ bool m_BestFitMap:1;
+ bool m_ThrowOnUnmappableChar:1;
+};
+
+
+//=======================================================================
+// Embedded AnsiChar array <--> char[]
+//=======================================================================
+class FieldMarshaler_FixedCharArrayAnsi : public FieldMarshaler
+{
+#ifdef BINDER
+ friend class CompactTypeBuilder;
+ friend class MdilModule;
+#endif
+public:
+ FieldMarshaler_FixedCharArrayAnsi(UINT32 numElems, BOOL BestFit, BOOL ThrowOnUnmappableChar) :
+ m_numElems(numElems), m_BestFitMap(!!BestFit), m_ThrowOnUnmappableChar(!!ThrowOnUnmappableChar)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const;
+ VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const;
+
+ ELEMENT_SIZE_IMPL(m_numElems * sizeof(CHAR), sizeof(CHAR))
+
+ BOOL GetBestFit()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_BestFitMap;
+ }
+
+ BOOL GetThrowOnUnmappableChar()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_ThrowOnUnmappableChar;
+ }
+
+private:
+ // # of elements for fixedchararray
+ UINT32 m_numElems;
+ bool m_BestFitMap:1;
+ bool m_ThrowOnUnmappableChar:1;
+};
+
+
+//=======================================================================
+// Embedded arrays
+//=======================================================================
+class FieldMarshaler_FixedArray : public FieldMarshaler
+{
+#ifdef BINDER
+ friend class CompactTypeBuilder;
+ friend class MdilModule;
+#endif
+public:
+ FieldMarshaler_FixedArray(IMDInternalImport *pMDImport, mdTypeDef cl, UINT32 numElems, VARTYPE vt, MethodTable* pElementMT);
+
+ VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const;
+ VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const;
+ VOID DestroyNativeImpl(LPVOID pNativeValue) const;
+ UINT32 AlignmentRequirementImpl() const;
+
+ UINT32 NativeSizeImpl() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef BINDER
+ return 0;
+#else
+ MethodTable *pElementMT = m_arrayType.GetValue().AsArray()->GetArrayElementTypeHandle().GetMethodTable();
+ return OleVariant::GetElementSizeForVarType(m_vt, pElementMT) * m_numElems;
+#endif
+ }
+
+ TypeHandle GetElementTypeHandle() const
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(IsRestored());
+ }
+ CONTRACTL_END;
+
+ return m_arrayType.GetValue().AsArray()->GetArrayElementTypeHandle();
+ }
+
+ VARTYPE GetElementVT() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_vt;
+ }
+
+#ifdef FEATURE_PREJIT
+ void FixupImpl(DataImage *image)
+ {
+ STANDARD_VM_CONTRACT;
+
+ image->FixupTypeHandlePointer(this, &m_arrayType);
+
+ FieldMarshaler::FixupImpl(image);
+ }
+#endif // FEATURE_PREJIT
+
+ void RestoreImpl()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef BINDER
+#ifdef FEATURE_PREJIT
+ Module::RestoreTypeHandlePointer(&m_arrayType);
+#else // FEATURE_PREJIT
+ // without NGEN we only have to make sure that the type is fully loaded
+ ClassLoader::EnsureLoaded(m_arrayType.GetValue());
+#endif // FEATURE_PREJIT
+#endif
+ FieldMarshaler::RestoreImpl();
+ }
+
+#ifdef _DEBUG
+ BOOL IsRestored() const
+ {
+ WRAPPER_NO_CONTRACT;
+
+#ifdef FEATURE_PREJIT
+ return !m_arrayType.IsTagged() && (m_arrayType.IsNull() || m_arrayType.GetValue().IsRestored());
+#else // FEATURE_PREJIT
+ return m_arrayType.IsNull() || m_arrayType.GetValue().IsFullyLoaded();
+#endif // FEATURE_PREJIT
+ }
+#endif
+
+private:
+ FixupPointer<TypeHandle> m_arrayType;
+ UINT32 m_numElems;
+ VARTYPE m_vt;
+ bool m_BestFitMap:1; // Note: deliberately use small bools to save on working set - this is the largest FieldMarshaler and dominates the cost of the FieldMarshaler array
+ bool m_ThrowOnUnmappableChar:1; // Note: deliberately use small bools to save on working set - this is the largest FieldMarshaler and dominates the cost of the FieldMarshaler array
+};
+
+
+#ifdef FEATURE_CLASSIC_COMINTEROP
+//=======================================================================
+// SafeArrays
+//=======================================================================
+class FieldMarshaler_SafeArray : public FieldMarshaler
+{
+#ifdef BINDER
+ friend class CompactTypeBuilder;
+ friend class MdilModule;
+#endif
+public:
+
+ VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const;
+ VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const;
+ VOID DestroyNativeImpl(LPVOID pNativeValue) const;
+
+ ELEMENT_SIZE_IMPL(sizeof(LPSAFEARRAY), sizeof(LPSAFEARRAY))
+
+ FieldMarshaler_SafeArray(VARTYPE vt, MethodTable* pMT)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_vt = vt;
+ m_pMT.SetValue(pMT);
+ }
+
+#ifdef FEATURE_PREJIT
+ void FixupImpl(DataImage *image)
+ {
+ STANDARD_VM_CONTRACT;
+
+ image->FixupMethodTablePointer(this, &m_pMT);
+
+ FieldMarshaler::FixupImpl(image);
+ }
+#endif // FEATURE_PREJIT
+
+ void RestoreImpl()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ RestoreHelper(&m_pMT);
+
+ FieldMarshaler::RestoreImpl();
+ }
+
+#ifdef _DEBUG
+ BOOL IsRestored() const
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return IsRestoredHelper(m_pMT);
+ }
+#endif
+
+ TypeHandle GetElementTypeHandle() const
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(IsRestored());
+ }
+ CONTRACTL_END;
+
+ return TypeHandle(m_pMT.GetValue());
+ }
+
+ VARTYPE GetElementVT() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_vt;
+ }
+
+private:
+ FixupPointer<PTR_MethodTable> m_pMT;
+ VARTYPE m_vt;
+};
+#endif //FEATURE_CLASSIC_COMINTEROP
+
+
+//=======================================================================
+// Embedded function ptr <--> Delegate
+//=======================================================================
+class FieldMarshaler_Delegate : public FieldMarshaler
+{
+#ifdef BINDER
+ friend class CompactTypeBuilder;
+ friend class MdilModule;
+#endif
+public:
+ FieldMarshaler_Delegate(MethodTable* pMT)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_pNestedMethodTable.SetValue(pMT);
+ }
+
+ VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const;
+ VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const;
+
+ ELEMENT_SIZE_IMPL(sizeof(LPVOID), sizeof(LPVOID))
+
+#ifdef FEATURE_PREJIT
+ void FixupImpl(DataImage *image)
+ {
+ STANDARD_VM_CONTRACT;
+
+ image->FixupMethodTablePointer(this, &m_pNestedMethodTable);
+
+ FieldMarshaler::FixupImpl(image);
+ }
+#endif // FEATURE_PREJIT
+
+ void RestoreImpl()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ RestoreHelper(&m_pNestedMethodTable);
+
+ FieldMarshaler::RestoreImpl();
+ }
+
+#ifdef _DEBUG
+ BOOL IsRestored() const
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return IsRestoredHelper(m_pNestedMethodTable);
+ }
+#endif
+
+ MethodTable *GetMethodTable() const
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(IsRestored());
+ }
+ CONTRACTL_END;
+
+ return m_pNestedMethodTable.GetValue();
+ }
+
+ FixupPointer<PTR_MethodTable> m_pNestedMethodTable;
+};
+
+
+//=======================================================================
+// Embedded SafeHandle <--> Handle. This field really only supports
+// going from managed to unmanaged. In the other direction, we only
+// check that the handle value has not changed.
+//=======================================================================
+class FieldMarshaler_SafeHandle : public FieldMarshaler
+{
+public:
+
+ VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const;
+ VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const;
+
+ ELEMENT_SIZE_IMPL(sizeof(LPVOID), sizeof(LPVOID))
+};
+
+
+//=======================================================================
+// Embedded CriticalHandle <--> Handle. This field really only supports
+// going from managed to unmanaged. In the other direction, we only
+// check that the handle value has not changed.
+//=======================================================================
+class FieldMarshaler_CriticalHandle : public FieldMarshaler
+{
+public:
+
+ VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const;
+ VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const;
+
+ ELEMENT_SIZE_IMPL(sizeof(LPVOID), sizeof(LPVOID))
+};
+
+#ifdef FEATURE_COMINTEROP
+
+//=======================================================================
+// COM IP <--> Interface
+//=======================================================================
+class FieldMarshaler_Interface : public FieldMarshaler
+{
+#ifdef BINDER
+ friend class CompactTypeBuilder;
+ friend class MdilModule;
+#endif
+public:
+
+ VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const;
+ VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const;
+ VOID DestroyNativeImpl(LPVOID pNativeValue) const;
+
+ ELEMENT_SIZE_IMPL(sizeof(IUnknown*), sizeof(IUnknown*))
+
+ FieldMarshaler_Interface(MethodTable *pClassMT, MethodTable *pItfMT, DWORD dwFlags)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_pClassMT.SetValue(pClassMT);
+ m_pItfMT.SetValue(pItfMT);
+ m_dwFlags = dwFlags;
+ }
+
+#ifdef FEATURE_PREJIT
+ void FixupImpl(DataImage *image)
+ {
+ STANDARD_VM_CONTRACT;
+
+ image->FixupMethodTablePointer(this, &m_pClassMT);
+ image->FixupMethodTablePointer(this, &m_pItfMT);
+
+ FieldMarshaler::FixupImpl(image);
+ }
+#endif // FEATURE_PREJIT
+
+ void RestoreImpl()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ RestoreHelper(&m_pClassMT);
+ RestoreHelper(&m_pItfMT);
+
+ FieldMarshaler::RestoreImpl();
+ }
+
+#ifdef _DEBUG
+ BOOL IsRestored() const
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return (IsRestoredHelper(m_pClassMT) && IsRestoredHelper(m_pItfMT));
+ }
+#endif
+
+ void GetInterfaceInfo(MethodTable **ppItfMT, DWORD* pdwFlags) const
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(ppItfMT));
+#ifdef FEATURE_PREJIT
+ PRECONDITION(IsRestored());
+#endif
+ }
+ CONTRACTL_END;
+
+ *ppItfMT = m_pItfMT.GetValue();
+ *pdwFlags = m_dwFlags;
+ }
+
+ MethodTable *GetMethodTable() const
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(IsRestored());
+ }
+ CONTRACTL_END;
+
+ return m_pClassMT.GetValue();
+ }
+
+ MethodTable *GetInterfaceMethodTable() const
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(IsRestored());
+ }
+ CONTRACTL_END;
+
+ return m_pItfMT.GetValue();
+ }
+
+private:
+ FixupPointer<PTR_MethodTable> m_pClassMT;
+ FixupPointer<PTR_MethodTable> m_pItfMT;
+ DWORD m_dwFlags;
+};
+
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_COMINTEROP
+// This compile-time assert checks that the above FieldMarshaler is the biggest
+// (or equal-biggest) FieldMasharler we have,
+// i.e. that we haven't set MAXFIELDMARSHALERSIZE to a value that is needlessly big.
+// Corresponding asserts in FieldMarshaler.cpp ensure that we haven't set it to a value that is needlessly
+// big, which would waste a whole lot of memory given the current storage scheme for FMs.
+//
+// If this assert first, it probably means you have successully reduced the size of the above FieldMarshaler.
+// You should now place this assert on the FieldMarshaler that is the biggest, or modify MAXFIELDMARSHALERSIZE
+// to match the new size.
+static_assert_no_msg(sizeof(FieldMarshaler_Interface) == MAXFIELDMARSHALERSIZE);
+
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_COMINTEROP
+
+//=======================================================================
+// VARIANT <--> Object
+//=======================================================================
+class FieldMarshaler_Variant : public FieldMarshaler
+{
+public:
+
+ VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const;
+ VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const;
+ VOID DestroyNativeImpl(LPVOID pNativeValue) const;
+
+ ELEMENT_SIZE_IMPL(sizeof(VARIANT), 8)
+};
+
+#endif // FEATURE_COMINTEROP
+
+
+//=======================================================================
+// Dummy marshaler
+//=======================================================================
+class FieldMarshaler_Illegal : public FieldMarshaler
+{
+public:
+ FieldMarshaler_Illegal(UINT resIDWhy)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_resIDWhy = resIDWhy;
+ }
+
+ UNUSED_METHOD_IMPL(VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const)
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const;
+ VOID ScalarUpdateCLRImpl(const VOID *pNative, LPVOID pCLR) const;
+
+ SCALAR_MARSHALER_IMPL(1, 1)
+
+private:
+ UINT m_resIDWhy;
+};
+
+
+#define FIELD_MARSHALER_COPY
+
+
+class FieldMarshaler_Copy1 : public FieldMarshaler
+{
+public:
+
+ UNUSED_METHOD_IMPL(VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const)
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ SCALAR_MARSHALER_IMPL(1, 1)
+
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCLR));
+ PRECONDITION(CheckPointer(pNative));
+ }
+ CONTRACTL_END;
+
+ *((U1*)pNative) = *((U1*)pCLR);
+ }
+
+
+ VOID ScalarUpdateCLRImpl(const VOID *pNative, LPVOID pCLR) const
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCLR));
+ PRECONDITION(CheckPointer(pNative));
+ }
+ CONTRACTL_END;
+
+ *((U1*)pCLR) = *((U1*)pNative);
+ }
+
+};
+
+
+
+class FieldMarshaler_Copy2 : public FieldMarshaler
+{
+public:
+
+ UNUSED_METHOD_IMPL(VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const)
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ SCALAR_MARSHALER_IMPL(2, 2)
+
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCLR));
+ PRECONDITION(CheckPointer(pNative));
+ }
+ CONTRACTL_END;
+
+ MAYBE_UNALIGNED_WRITE(pNative, 16, MAYBE_UNALIGNED_READ(pCLR, 16));
+ }
+
+
+ VOID ScalarUpdateCLRImpl(const VOID *pNative, LPVOID pCLR) const
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCLR));
+ PRECONDITION(CheckPointer(pNative));
+ }
+ CONTRACTL_END;
+
+ MAYBE_UNALIGNED_WRITE(pCLR, 16, MAYBE_UNALIGNED_READ(pNative, 16));
+ }
+
+};
+
+
+class FieldMarshaler_Copy4 : public FieldMarshaler
+{
+public:
+
+ UNUSED_METHOD_IMPL(VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const)
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ SCALAR_MARSHALER_IMPL(4, 4)
+
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCLR));
+ PRECONDITION(CheckPointer(pNative));
+ }
+ CONTRACTL_END;
+
+ MAYBE_UNALIGNED_WRITE(pNative, 32, MAYBE_UNALIGNED_READ(pCLR, 32));
+ }
+
+
+ VOID ScalarUpdateCLRImpl(const VOID *pNative, LPVOID pCLR) const
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCLR));
+ PRECONDITION(CheckPointer(pNative));
+ }
+ CONTRACTL_END;
+
+ MAYBE_UNALIGNED_WRITE(pCLR, 32, MAYBE_UNALIGNED_READ(pNative, 32));
+ }
+
+};
+
+
+class FieldMarshaler_Copy8 : public FieldMarshaler
+{
+public:
+
+ UNUSED_METHOD_IMPL(VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const)
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ SCALAR_MARSHALER_IMPL(8, 8)
+
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCLR));
+ PRECONDITION(CheckPointer(pNative));
+ }
+ CONTRACTL_END;
+
+ MAYBE_UNALIGNED_WRITE(pNative, 64, MAYBE_UNALIGNED_READ(pCLR, 64));
+ }
+
+
+ VOID ScalarUpdateCLRImpl(const VOID *pNative, LPVOID pCLR) const
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCLR));
+ PRECONDITION(CheckPointer(pNative));
+ }
+ CONTRACTL_END;
+
+ MAYBE_UNALIGNED_WRITE(pCLR, 64, MAYBE_UNALIGNED_READ(pNative, 64));
+ }
+
+};
+
+
+
+class FieldMarshaler_Ansi : public FieldMarshaler
+{
+#ifdef BINDER
+ friend class CompactTypeBuilder;
+ friend class MdilModule;
+#endif
+public:
+ FieldMarshaler_Ansi(BOOL BestFitMap, BOOL ThrowOnUnmappableChar) :
+ m_BestFitMap(!!BestFitMap), m_ThrowOnUnmappableChar(!!ThrowOnUnmappableChar)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ UNUSED_METHOD_IMPL(VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const)
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ SCALAR_MARSHALER_IMPL(sizeof(CHAR), sizeof(CHAR))
+
+#ifndef BINDER
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCLR, NULL_OK));
+ PRECONDITION(CheckPointer(pNative));
+ }
+ CONTRACTL_END;
+
+ char c;
+ InternalWideToAnsi((LPCWSTR)pCLR,
+ 1,
+ &c,
+ 1,
+ m_BestFitMap,
+ m_ThrowOnUnmappableChar);
+
+ *((char*)pNative) = c;
+ }
+#endif
+
+ VOID ScalarUpdateCLRImpl(const VOID *pNative, LPVOID pCLR) const
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCLR));
+ PRECONDITION(CheckPointer(pNative));
+ }
+ CONTRACTL_END;
+
+ MultiByteToWideChar(CP_ACP, 0, (char*)pNative, 1, (LPWSTR)pCLR, 1);
+ }
+
+ BOOL GetBestFit()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_BestFitMap;
+ }
+
+ BOOL GetThrowOnUnmappableChar()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_ThrowOnUnmappableChar;
+ }
+
+private:
+ bool m_BestFitMap:1;
+ bool m_ThrowOnUnmappableChar:1;
+};
+
+
+
+class FieldMarshaler_WinBool : public FieldMarshaler
+{
+public:
+
+ UNUSED_METHOD_IMPL(VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const)
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ SCALAR_MARSHALER_IMPL(sizeof(BOOL), sizeof(BOOL))
+
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCLR));
+ PRECONDITION(CheckPointer(pNative));
+ }
+ CONTRACTL_END;
+ static_assert_no_msg(sizeof(BOOL) == sizeof(UINT32));
+ MAYBE_UNALIGNED_WRITE(pNative, 32, ((*((U1 UNALIGNED*)pCLR)) ? 1 : 0));
+ }
+
+
+ VOID ScalarUpdateCLRImpl(const VOID *pNative, LPVOID pCLR) const
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCLR));
+ PRECONDITION(CheckPointer(pNative));
+ }
+ CONTRACTL_END;
+
+ static_assert_no_msg(sizeof(BOOL) == sizeof(UINT32));
+ *((U1*)pCLR) = MAYBE_UNALIGNED_READ(pNative, 32) ? 1 : 0;
+ }
+
+};
+
+
+
+#ifdef FEATURE_COMINTEROP
+
+class FieldMarshaler_VariantBool : public FieldMarshaler
+{
+public:
+
+ UNUSED_METHOD_IMPL(VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const)
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ SCALAR_MARSHALER_IMPL(sizeof(VARIANT_BOOL), sizeof(VARIANT_BOOL))
+
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCLR));
+ PRECONDITION(CheckPointer(pNative));
+ }
+ CONTRACTL_END;
+
+ static_assert_no_msg(sizeof(VARIANT_BOOL) == sizeof(BYTE) * 2);
+
+ MAYBE_UNALIGNED_WRITE(pNative, 16, (*((U1*)pCLR)) ? VARIANT_TRUE : VARIANT_FALSE);
+ }
+
+
+ VOID ScalarUpdateCLRImpl(const VOID *pNative, LPVOID pCLR) const
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCLR));
+ PRECONDITION(CheckPointer(pNative));
+ }
+ CONTRACTL_END;
+
+ static_assert_no_msg(sizeof(VARIANT_BOOL) == sizeof(BYTE) * 2);
+
+ *((U1*)pCLR) = MAYBE_UNALIGNED_READ(pNative, 16) ? 1 : 0;
+ }
+
+};
+
+#endif // FEATURE_COMINTEROP
+
+
+
+class FieldMarshaler_CBool : public FieldMarshaler
+{
+public:
+
+ UNUSED_METHOD_IMPL(VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const)
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ SCALAR_MARSHALER_IMPL(1, 1)
+
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCLR));
+ PRECONDITION(CheckPointer(pNative));
+ }
+ CONTRACTL_END;
+
+ *((U1*)pNative) = (*((U1*)pCLR)) ? 1 : 0;
+ }
+
+ VOID ScalarUpdateCLRImpl(const VOID *pNative, LPVOID pCLR) const
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCLR));
+ PRECONDITION(CheckPointer(pNative));
+ }
+ CONTRACTL_END;
+
+ *((U1*)pCLR) = (*((U1*)pNative)) ? 1 : 0;
+ }
+
+};
+
+
+class FieldMarshaler_Decimal : public FieldMarshaler
+{
+public:
+ UNUSED_METHOD_IMPL(VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const)
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ SCALAR_MARSHALER_IMPL(sizeof(DECIMAL), 8);
+
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCLR));
+ PRECONDITION(CheckPointer(pNative));
+ }
+ CONTRACTL_END;
+
+ memcpyNoGCRefs(pNative, pCLR, sizeof(DECIMAL));
+ }
+
+ VOID ScalarUpdateCLRImpl(const VOID *pNative, LPVOID pCLR) const
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCLR));
+ PRECONDITION(CheckPointer(pNative));
+ }
+ CONTRACTL_END;
+
+ memcpyNoGCRefs(pCLR, pNative, sizeof(DECIMAL));
+ }
+
+};
+
+class FieldMarshaler_Date : public FieldMarshaler
+{
+public:
+
+ UNUSED_METHOD_IMPL(VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const)
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ SCALAR_MARSHALER_IMPL(sizeof(DATE), sizeof(DATE))
+
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const;
+ VOID ScalarUpdateCLRImpl(const VOID *pNative, LPVOID pCLR) const;
+
+};
+
+
+
+#ifdef FEATURE_COMINTEROP
+
+class FieldMarshaler_Currency : public FieldMarshaler
+{
+public:
+
+ UNUSED_METHOD_IMPL(VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const)
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ SCALAR_MARSHALER_IMPL(sizeof(CURRENCY), sizeof(CURRENCY))
+
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const;
+ VOID ScalarUpdateCLRImpl(const VOID *pNative, LPVOID pCLR) const;
+
+};
+
+class FieldMarshaler_DateTimeOffset : public FieldMarshaler
+{
+public:
+
+ UNUSED_METHOD_IMPL(VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const)
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ SCALAR_MARSHALER_IMPL(sizeof(INT64), sizeof(INT64))
+
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const;
+ VOID ScalarUpdateCLRImpl(const VOID *pNative, LPVOID pCLR) const;
+
+};
+
+#endif // FEATURE_COMINTEROP
+
+
+//========================================================================
+// Used to ensure that native data is properly deleted in exception cases.
+//========================================================================
+class NativeLayoutDestroyer
+{
+public:
+ NativeLayoutDestroyer(BYTE* pNativeData, MethodTable* pMT, UINT32 cbSize)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pNativeData));
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ m_pNativeData = pNativeData;
+ m_pMT = pMT;
+ m_cbSize = cbSize;
+ m_fDestroy = TRUE;
+ }
+
+ ~NativeLayoutDestroyer()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (m_fDestroy)
+ {
+ LayoutDestroyNative(m_pNativeData, m_pMT);
+ FillMemory(m_pNativeData, m_cbSize, 0);
+ }
+ }
+
+ void SuppressRelease()
+ {
+ m_fDestroy = FALSE;
+ }
+
+private:
+ NativeLayoutDestroyer()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ BYTE* m_pNativeData;
+ MethodTable* m_pMT;
+ UINT32 m_cbSize;
+ BOOL m_fDestroy;
+};
+
+#endif // DACCESS_COMPILE
+
+
+#endif // __FieldMarshaler_h__
diff --git a/src/vm/finalizerthread.cpp b/src/vm/finalizerthread.cpp
new file mode 100644
index 0000000000..aca97e3ff2
--- /dev/null
+++ b/src/vm/finalizerthread.cpp
@@ -0,0 +1,1448 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+
+#include "common.h"
+
+#include "finalizerthread.h"
+#include "threadsuspend.h"
+
+#ifdef FEATURE_COMINTEROP
+#include "runtimecallablewrapper.h"
+#endif
+
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+#include "profattach.h"
+#endif // FEATURE_PROFAPI_ATTACH_DETACH
+
+BOOL FinalizerThread::fRunFinalizersOnUnload = FALSE;
+BOOL FinalizerThread::fQuitFinalizer = FALSE;
+AppDomain * FinalizerThread::UnloadingAppDomain;
+
+CLREvent * FinalizerThread::hEventFinalizer = NULL;
+CLREvent * FinalizerThread::hEventFinalizerDone = NULL;
+CLREvent * FinalizerThread::hEventShutDownToFinalizer = NULL;
+CLREvent * FinalizerThread::hEventFinalizerToShutDown = NULL;
+
+HANDLE FinalizerThread::MHandles[kHandleCount];
+
+BOOL FinalizerThread::IsCurrentThreadFinalizer()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return GetThread() == g_pFinalizerThread;
+}
+
+void FinalizerThread::EnableFinalization()
+{
+ WRAPPER_NO_CONTRACT;
+
+ hEventFinalizer->Set();
+}
+
+BOOL FinalizerThread::HaveExtraWorkForFinalizer()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return GetFinalizerThread()->HaveExtraWorkForFinalizer();
+}
+
+// This helper is here to avoid EH goo associated with DefineFullyQualifiedNameForStack being
+// invoked when logging is off.
+__declspec(noinline)
+void LogFinalization(Object* obj)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+
+#ifdef FEATURE_EVENT_TRACE
+ ETW::GCLog::SendFinalizeObjectEvent(obj->GetMethodTable(), obj);
+#endif // FEATURE_EVENT_TRACE
+}
+
+
+void CallFinalizer(Object* obj)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ MethodTable *pMT = obj->GetMethodTable();
+ STRESS_LOG2(LF_GC, LL_INFO1000, "Finalizing object %p MT %pT\n", obj, pMT);
+ LOG((LF_GC, LL_INFO1000, "Finalizing " LOG_OBJECT_CLASS(obj)));
+
+ _ASSERTE(GetThread()->PreemptiveGCDisabled());
+ // if we don't have a class, we can't call the finalizer
+ // if the object has been marked run as finalizer run don't call either
+ if (pMT)
+ {
+ if (!((obj->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN))
+ {
+#ifdef FEATURE_REMOTING
+ if (pMT->IsContextful())
+ {
+ Object *proxy = OBJECTREFToObject(CRemotingServices::GetProxyFromObject(ObjectToOBJECTREF(obj)));
+
+ _ASSERTE(proxy && "finalizing an object that was never wrapped?????");
+ if (proxy == NULL)
+ {
+ // Quite possibly the app abruptly shutdown while a proxy
+ // was being setup for a contextful object. We will skip
+ // finalizing this object.
+ _ASSERTE (g_fEEShutDown);
+ return;
+ }
+ else
+ {
+ // This saves us from the situation where an object gets GC-ed
+ // after its Context.
+ OBJECTREF stub = ((TRANSPARENTPROXYREF)ObjectToOBJECTREF(proxy))->GetStubData();
+ Context *pServerCtx = *((Context **)stub->UnBox());
+ // Check if the context is valid
+ if (!Context::ValidateContext(pServerCtx))
+ {
+ // Since the server context is gone (GC-ed)
+ // we will associate the server with the default
+ // context for a good faith attempt to run
+ // the finalizer
+ // We want to do this only if we are using RemotingProxy
+ // and not for other types of proxies (eg. SvcCompPrxy)
+ OBJECTREF orRP = ObjectToOBJECTREF(CRemotingServices::GetRealProxy(proxy));
+ if(CTPMethodTable::IsInstanceOfRemotingProxy(
+ orRP->GetMethodTable()))
+ {
+ *((Context **)stub->UnBox()) = (Context*) GetThread()->GetContext();
+ }
+ }
+ // call Finalize on the proxy of the server object.
+ obj = proxy;
+ pMT = obj->GetMethodTable();
+ }
+ }
+#endif // FEATURE_REMOTING
+
+ _ASSERTE(obj->GetMethodTable() == pMT);
+ _ASSERTE(pMT->HasFinalizer() || pMT->IsTransparentProxy());
+
+ LogFinalization(obj);
+ MethodTable::CallFinalizer(obj);
+ }
+ else
+ {
+ //reset the bit so the object can be put on the list
+ //with RegisterForFinalization
+ obj->GetHeader()->ClrBit (BIT_SBLK_FINALIZER_RUN);
+ }
+ }
+}
+
+struct FinalizeAllObjects_Args {
+ OBJECTREF fobj;
+ int bitToCheck;
+};
+
+void FinalizerThread::FinalizeAllObjects_Wrapper(void *ptr)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ FinalizeAllObjects_Args *args = (FinalizeAllObjects_Args *) ptr;
+ _ASSERTE(args->fobj);
+ Object *fobj = OBJECTREFToObject(args->fobj);
+ args->fobj = NULL; // don't want to do this guy again, if we take an exception here:
+ args->fobj = ObjectToOBJECTREF(FinalizeAllObjects(fobj, args->bitToCheck));
+}
+
+// The following is inadequate when we have multiple Finalizer threads in some future release.
+// Instead, we will have to store this in TLS or pass it through the call tree of finalization.
+// It is used to tie together the base exception handling and the AppDomain transition exception
+// handling for this thread.
+static struct ManagedThreadCallState *pThreadTurnAround;
+
+Object * FinalizerThread::DoOneFinalization(Object* fobj, Thread* pThread,int bitToCheck,bool *pbTerminate)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ bool fTerminate=false;
+ Object *pReturnObject = NULL;
+
+
+ AppDomain* targetAppDomain = fobj->GetAppDomain();
+ AppDomain* currentDomain = pThread->GetDomain();
+ if (! targetAppDomain || ! targetAppDomain->CanThreadEnter(pThread))
+ {
+ // if can't get into domain to finalize it, then it must be agile so finalize in current domain
+ targetAppDomain = currentDomain;
+#if CHECK_APP_DOMAIN_LEAKS
+ {
+ // object must be agile if can't get into it's domain
+ if (g_pConfig->AppDomainLeaks() && !fobj->TrySetAppDomainAgile(FALSE))
+ _ASSERTE(!"Found non-agile GC object which should have been finalized during app domain unload.");
+ }
+#endif
+ }
+
+ if (targetAppDomain == currentDomain)
+ {
+ if (!targetAppDomain->IsRudeUnload() ||
+ fobj->GetMethodTable()->HasCriticalFinalizer())
+ {
+ class ResetFinalizerStartTime
+ {
+ public:
+ ResetFinalizerStartTime()
+ {
+ if (CLRHosted())
+ {
+ g_ObjFinalizeStartTime = CLRGetTickCount64();
+ }
+ }
+ ~ResetFinalizerStartTime()
+ {
+ if (g_ObjFinalizeStartTime)
+ {
+ g_ObjFinalizeStartTime = 0;
+ }
+ }
+ };
+ {
+ ThreadLocaleHolder localeHolder;
+
+ {
+ ResetFinalizerStartTime resetTime;
+ CallFinalizer(fobj);
+ }
+ }
+ pThread->InternalReset(FALSE);
+ }
+ }
+ else
+ {
+ if (! targetAppDomain->GetDefaultContext())
+ {
+ // Can no longer enter domain becuase the handle containing the context has been
+ // destroyed so just bail. Should only get this if are at the stage of nuking the
+ // handles in the domain if it's still open.
+ _ASSERTE(targetAppDomain->IsUnloading() && targetAppDomain->ShouldHaveFinalization());
+ }
+ else if (!currentDomain->IsDefaultDomain())
+ {
+ // this means we are in some other domain, so need to return back out through the DoADCallback
+ // and handle the object from there in another domain.
+ pReturnObject = fobj;
+ fTerminate = true;
+ }
+ else
+ {
+ // otherwise call back to ourselves to process as many as we can in that other domain
+ FinalizeAllObjects_Args args;
+ args.fobj = ObjectToOBJECTREF(fobj);
+ args.bitToCheck = bitToCheck;
+ GCPROTECT_BEGIN(args.fobj);
+ {
+ ThreadLocaleHolder localeHolder;
+
+ _ASSERTE(pThreadTurnAround != NULL);
+ ManagedThreadBase::FinalizerAppDomain(targetAppDomain,
+ FinalizeAllObjects_Wrapper,
+ &args,
+ pThreadTurnAround);
+ }
+ pThread->InternalReset(FALSE);
+ // process the object we got back or be done if we got back null
+ pReturnObject = OBJECTREFToObject(args.fobj);
+ GCPROTECT_END();
+ }
+ }
+
+ *pbTerminate = fTerminate;
+ return pReturnObject;
+}
+
+Object * FinalizerThread::FinalizeAllObjects(Object* fobj, int bitToCheck)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ FireEtwGCFinalizersBegin_V1(GetClrInstanceId());
+
+ unsigned int fcount = 0;
+ bool fTerminate = false;
+
+ if (fobj == NULL)
+ {
+ if (AppDomain::HasWorkForFinalizerThread())
+ {
+ return NULL;
+ }
+ fobj = GCHeap::GetGCHeap()->GetNextFinalizable();
+ }
+
+ Thread *pThread = GetThread();
+
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+ ULONGLONG ui64TimestampLastCheckedProfAttachEventMs = 0;
+#endif //FEATURE_PROFAPI_ATTACH_DETACH
+
+ // Finalize everyone
+ while (fobj)
+ {
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+ // Don't let an overloaded finalizer queue starve out
+ // an attaching profiler. In between running finalizers,
+ // check the profiler attach event without blocking.
+ ProcessProfilerAttachIfNecessary(&ui64TimestampLastCheckedProfAttachEventMs);
+#endif // FEATURE_PROFAPI_ATTACH_DETACH
+
+ if (fobj->GetHeader()->GetBits() & bitToCheck)
+ {
+ if (AppDomain::HasWorkForFinalizerThread())
+ {
+ return NULL;
+ }
+ fobj = GCHeap::GetGCHeap()->GetNextFinalizable();
+ }
+ else
+ {
+ fcount++;
+ fobj = DoOneFinalization(fobj, pThread, bitToCheck,&fTerminate);
+ if (fTerminate)
+ {
+ break;
+ }
+
+ if (fobj == NULL)
+ {
+ if (AppDomain::HasWorkForFinalizerThread())
+ {
+ return NULL;
+ }
+ fobj = GCHeap::GetGCHeap()->GetNextFinalizable();
+ }
+ }
+ }
+ FireEtwGCFinalizersEnd_V1(fcount, GetClrInstanceId());
+
+ return fobj;
+}
+
+
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+
+// ----------------------------------------------------------------------------
+// ProcessProfilerAttachIfNecessary
+//
+// Description:
+// This is called to peek at the Profiler Attach Event in between finalizers to check
+// if it's signaled. If it is, this calls
+// code:ProfilingAPIAttachDetach::ProcessSignaledAttachEvent to deal with it.
+//
+//
+// Arguments:
+// * pui64TimestampLastCheckedEventMs: [in / out] This keeps track of how often the
+// Profiler Attach Event is checked, so it's not checked too often during a
+// tight loop (in particular, the loop in code:SVR::FinalizeAllObjects which
+// executes all finalizer routines in the queue). This argument has the
+// following possible values:
+// * [in] (pui64TimestampLastCheckedEventMs) == NULL: Means the arg is not used, so
+// just check the event and ignore this argument
+// * [in] (*pui64TimestampLastCheckedEventMs) == 0: Arg is uninitialized. Just
+// initialize it with the current tick count and return without checking the
+// event (as the event was probably just checked before entering the loop
+// that called this function).
+// * [in] (*pui64TimestampLastCheckedEventMs) != 0: Arg is initialized to the
+// approximate tick count of when the event was last checked. If it's time
+// to check the event again, do so and update this parameter on [out] with
+// the current timestamp. Otherwise, do nothing and return.
+//
+// Notes:
+// * The Profiler Attach Event is also checked in the main WaitForMultipleObjects in
+// WaitForFinalizerEvent
+//
+
+// static
+void FinalizerThread::ProcessProfilerAttachIfNecessary(ULONGLONG * pui64TimestampLastCheckedEventMs)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+
+ if (CLRMemoryHosted() ||
+ CLRSyncHosted() ||
+ (MHandles[kProfilingAPIAttach] == NULL))
+ {
+ return;
+ }
+
+ if (pui64TimestampLastCheckedEventMs != NULL)
+ {
+ if (*pui64TimestampLastCheckedEventMs == 0)
+ {
+ // Just initialize timestamp and leave
+ *pui64TimestampLastCheckedEventMs = CLRGetTickCount64();
+ return;
+ }
+
+ static DWORD dwMsBetweenCheckingProfAPIAttachEvent = 0;
+ if (dwMsBetweenCheckingProfAPIAttachEvent == 0)
+ {
+ // First time through, initialize with how long to wait between checking the
+ // event.
+ dwMsBetweenCheckingProfAPIAttachEvent = CLRConfig::GetConfigValue(
+ CLRConfig::EXTERNAL_MsBetweenAttachCheck);
+ }
+ ULONGLONG ui64TimestampNowMs = CLRGetTickCount64();
+ _ASSERTE(ui64TimestampNowMs >= (*pui64TimestampLastCheckedEventMs));
+ if (ui64TimestampNowMs - (*pui64TimestampLastCheckedEventMs) <
+ dwMsBetweenCheckingProfAPIAttachEvent)
+ {
+ // Too soon, go home
+ return;
+ }
+
+ // Otherwise, update the timestamp and wait on the finalizer event below
+ *pui64TimestampLastCheckedEventMs = ui64TimestampNowMs;
+ }
+
+ // Check the attach event without waiting; only if it's signaled right now will we
+ // process the event.
+ if (WaitForSingleObject(MHandles[kProfilingAPIAttach], 0) != WAIT_OBJECT_0)
+ {
+ // Any return value that indicates we can't verify the attach event is signaled
+ // right now means we should just forget about it and immediately return to
+ // whatever we were doing
+ return;
+ }
+
+ // Event is signaled; process it by spawning a new thread to do the work
+ ProfilingAPIAttachDetach::ProcessSignaledAttachEvent();
+}
+
+#endif // FEATURE_PROFAPI_ATTACH_DETACH
+
+void FinalizerThread::WaitForFinalizerEvent (CLREvent *event)
+{
+ // TODO wwl: merge the following two blocks
+ if (!CLRMemoryHosted() && !CLRSyncHosted()) {
+ // Non-host environment
+
+ // We don't want kLowMemoryNotification to starve out kFinalizer
+ // (as the latter may help correct the former), and we don't want either
+ // to starve out kProfilingAPIAttach, as we want decent responsiveness
+ // to a user trying to attach a profiler. So check in this order:
+ // kProfilingAPIAttach alone (0 wait)
+ // kFinalizer alone (2s wait)
+ // all events together (infinite wait)
+
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+ // NULL means check attach event now, and don't worry about how long it was since
+ // the last time the event was checked.
+ ProcessProfilerAttachIfNecessary(NULL);
+#endif // FEATURE_PROFAPI_ATTACH_DETACH
+
+ //give a chance to the finalizer event (2s)
+ switch (event->Wait(2000, FALSE))
+ {
+ case (WAIT_OBJECT_0):
+ return;
+ case (WAIT_ABANDONED):
+ return;
+ case (WAIT_TIMEOUT):
+ break;
+ }
+ MHandles[kFinalizer] = event->GetHandleUNHOSTED();
+ while (1)
+ {
+ // WaitForMultipleObjects will wait on the event handles in MHandles
+ // starting at this offset
+ UINT uiEventIndexOffsetForWait = 0;
+
+ // WaitForMultipleObjects will wait on this number of event handles
+ DWORD cEventsForWait = kHandleCount;
+
+ // #MHandleTypeValues:
+ // WaitForMultipleObjects will now wait on a subset of the events in the
+ // MHandles array. At this point kFinalizer should have a non-NULL entry
+ // in the array. Wait on the following events:
+ //
+ // * kLowMemoryNotification (if it's non-NULL && g_fEEStarted)
+ // * kFinalizer (always)
+ // * kProfilingAPIAttach (if it's non-NULL)
+ //
+ // The enum code:MHandleType values become important here, as
+ // WaitForMultipleObjects needs to wait on a contiguous set of non-NULL
+ // entries in MHandles, so we'll assert the values are contiguous as we
+ // expect.
+ _ASSERTE(kLowMemoryNotification == 0);
+ _ASSERTE((kFinalizer == 1) && (MHandles[1] != NULL));
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+ _ASSERTE(kProfilingAPIAttach == 2);
+#endif //FEATURE_PROFAPI_ATTACH_DETACH
+
+ // Exclude the low-memory notification event from the wait if the event
+ // handle is NULL or the EE isn't fully started up yet.
+ if ((MHandles[kLowMemoryNotification] == NULL) || !g_fEEStarted)
+ {
+ uiEventIndexOffsetForWait = kLowMemoryNotification + 1;
+ cEventsForWait--;
+ }
+
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+ // Exclude kProfilingAPIAttach if it's NULL
+ if (MHandles[kProfilingAPIAttach] == NULL)
+ {
+ cEventsForWait--;
+ }
+#endif //FEATURE_PROFAPI_ATTACH_DETACH
+
+ switch (WaitForMultipleObjectsEx(
+ cEventsForWait, // # objects to wait on
+ &(MHandles[uiEventIndexOffsetForWait]), // array of objects to wait on
+ FALSE, // bWaitAll == FALSE, so wait for first signal
+ INFINITE, // timeout
+ FALSE) // alertable
+
+ // Adjust the returned array index for the offset we used, so the return
+ // value is relative to entire MHandles array
+ + uiEventIndexOffsetForWait)
+ {
+ case (WAIT_OBJECT_0 + kLowMemoryNotification):
+ //short on memory GC immediately
+ GetFinalizerThread()->DisablePreemptiveGC();
+ GCHeap::GetGCHeap()->GarbageCollect(0, TRUE);
+ GetFinalizerThread()->EnablePreemptiveGC();
+ //wait only on the event for 2s
+ switch (event->Wait(2000, FALSE))
+ {
+ case (WAIT_OBJECT_0):
+ return;
+ case (WAIT_ABANDONED):
+ return;
+ case (WAIT_TIMEOUT):
+ break;
+ }
+ break;
+ case (WAIT_OBJECT_0 + kFinalizer):
+ return;
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+ case (WAIT_OBJECT_0 + kProfilingAPIAttach):
+ // Spawn thread to perform the profiler attach, then resume our wait
+ ProfilingAPIAttachDetach::ProcessSignaledAttachEvent();
+ break;
+#endif // FEATURE_PROFAPI_ATTACH_DETACH
+ default:
+ //what's wrong?
+ _ASSERTE (!"Bad return code from WaitForMultipleObjects");
+ return;
+ }
+ }
+ }
+ else {
+ static LONG sLastLowMemoryFromHost = 0;
+ while (1) {
+ DWORD timeout = INFINITE;
+ if (!CLRMemoryHosted())
+ {
+ if (WaitForSingleObject(MHandles[kLowMemoryNotification], 0) == WAIT_OBJECT_0) {
+ //short on memory GC immediately
+ GetFinalizerThread()->DisablePreemptiveGC();
+ GCHeap::GetGCHeap()->GarbageCollect(0, TRUE);
+ GetFinalizerThread()->EnablePreemptiveGC();
+ }
+ //wait only on the event for 2s
+ // The previous GC might not wake up finalizer thread if there is
+ // no objects to be finalized.
+ timeout = 2000;
+
+ }
+ switch (event->Wait(timeout, FALSE))
+ {
+ case (WAIT_OBJECT_0):
+ if (CLRMemoryHosted())
+ {
+ if (sLastLowMemoryFromHost != g_bLowMemoryFromHost)
+ {
+ sLastLowMemoryFromHost = g_bLowMemoryFromHost;
+ if (sLastLowMemoryFromHost != 0)
+ {
+ GetFinalizerThread()->DisablePreemptiveGC();
+ GCHeap::GetGCHeap()->GarbageCollect(0, TRUE);
+ GetFinalizerThread()->EnablePreemptiveGC();
+ }
+ }
+ }
+ return;
+ case (WAIT_ABANDONED):
+ return;
+ case (WAIT_TIMEOUT):
+ break;
+ }
+ }
+ }
+}
+
+
+
+static BOOL s_FinalizerThreadOK = FALSE;
+
+
+
+VOID FinalizerThread::FinalizerThreadWorker(void *args)
+{
+ // TODO: The following line should be removed after contract violation is fixed.
+ // See bug 27409
+ SCAN_IGNORE_THROW;
+ SCAN_IGNORE_TRIGGER;
+
+ // This is used to stitch together the exception handling at the base of our thread with
+ // any eventual transitions into different AppDomains for finalization.
+ _ASSERTE(args != NULL);
+ pThreadTurnAround = (ManagedThreadCallState *) args;
+
+ BOOL bPriorityBoosted = FALSE;
+
+ while (!fQuitFinalizer)
+ {
+ // Wait for work to do...
+
+ _ASSERTE(GetFinalizerThread()->PreemptiveGCDisabled());
+#ifdef _DEBUG
+ if (g_pConfig->FastGCStressLevel())
+ {
+ GetFinalizerThread()->m_GCOnTransitionsOK = FALSE;
+ }
+#endif
+ GetFinalizerThread()->EnablePreemptiveGC();
+#ifdef _DEBUG
+ if (g_pConfig->FastGCStressLevel())
+ {
+ GetFinalizerThread()->m_GCOnTransitionsOK = TRUE;
+ }
+#endif
+#if 0
+ // Setting the event here, instead of at the bottom of the loop, could
+ // cause us to skip draining the Q, if the request is made as soon as
+ // the app starts running.
+ SignalFinalizationDone(TRUE);
+#endif //0
+
+ WaitForFinalizerEvent (hEventFinalizer);
+
+ if (!bPriorityBoosted)
+ {
+ if (GetFinalizerThread()->SetThreadPriority(THREAD_PRIORITY_HIGHEST))
+ bPriorityBoosted = TRUE;
+ }
+
+ GetFinalizerThread()->DisablePreemptiveGC();
+
+ // TODO: The following call causes 12 more classes loaded.
+ //if (!fNameSet) {
+ // fNameSet = TRUE;
+ // GetFinalizerThread()->SetName(L"FinalizerThread");
+ //}
+
+#ifdef _DEBUG
+ // <TODO> workaround. make finalization very lazy for gcstress 3 or 4.
+ // only do finalization if the system is quiescent</TODO>
+ if (g_pConfig->GetGCStressLevel() > 1)
+ {
+ size_t last_gc_count;
+ DWORD dwSwitchCount = 0;
+
+ do
+ {
+ last_gc_count = GCHeap::GetGCHeap()->CollectionCount(0);
+ GetFinalizerThread()->m_GCOnTransitionsOK = FALSE;
+ GetFinalizerThread()->EnablePreemptiveGC();
+ __SwitchToThread (0, ++dwSwitchCount);
+ GetFinalizerThread()->DisablePreemptiveGC();
+ // If no GCs happended, then we assume we are quiescent
+ GetFinalizerThread()->m_GCOnTransitionsOK = TRUE;
+ } while (GCHeap::GetGCHeap()->CollectionCount(0) - last_gc_count > 0);
+ }
+#endif //_DEBUG
+
+ // we might want to do some extra work on the finalizer thread
+ // check and do it
+ if (GetFinalizerThread()->HaveExtraWorkForFinalizer())
+ {
+ GetFinalizerThread()->DoExtraWorkForFinalizer();
+ }
+ LOG((LF_GC, LL_INFO100, "***** Calling Finalizers\n"));
+ // We may mark the finalizer thread for abort. If so the abort request is for previous finalizer method, not for next one.
+ if (GetFinalizerThread()->IsAbortRequested())
+ {
+ GetFinalizerThread()->EEResetAbort(Thread::TAR_ALL);
+ }
+ FastInterlockExchange ((LONG*)&g_FinalizerIsRunning, TRUE);
+ AppDomain::EnableADUnloadWorkerForFinalizer();
+
+ do
+ {
+ FinalizeAllObjects(NULL, 0);
+ _ASSERTE(GetFinalizerThread()->GetDomain()->IsDefaultDomain());
+
+ if (AppDomain::HasWorkForFinalizerThread())
+ {
+ AppDomain::ProcessUnloadDomainEventOnFinalizeThread();
+ }
+ else if (UnloadingAppDomain == NULL)
+ break;
+ else if (!GCHeap::GetGCHeap()->FinalizeAppDomain(UnloadingAppDomain, fRunFinalizersOnUnload))
+ {
+ break;
+ }
+ // Now schedule any objects from an unloading app domain for finalization
+ // on the next pass (even if they are reachable.)
+ // Note that it may take several passes to complete the unload, if new objects are created during
+ // finalization.
+ }
+ while(TRUE);
+
+ if (UnloadingAppDomain != NULL)
+ {
+ SyncBlockCache::GetSyncBlockCache()->CleanupSyncBlocksInAppDomain(UnloadingAppDomain);
+ {
+ // Before we continue with AD unloading, mark the stage as
+ // FINALIZED under the SystemDomain lock so that this portion
+ // of unloading may be serialized with other parts of the CLR
+ // that require the AD stage to be < FINALIZED, in particular
+ // ETW's AD enumeration code used during its rundown events.
+ SystemDomain::LockHolder lh;
+ UnloadingAppDomain->SetFinalized(); // All finalizers have run except for FinalizableAndAgile objects
+ }
+ UnloadingAppDomain = NULL;
+ }
+
+ FastInterlockExchange ((LONG*)&g_FinalizerIsRunning, FALSE);
+ // We may still have the finalizer thread for abort. If so the abort request is for previous finalizer method, not for next one.
+ if (GetFinalizerThread()->IsAbortRequested())
+ {
+ GetFinalizerThread()->EEResetAbort(Thread::TAR_ALL);
+ }
+
+ // Increment the loop count. This is currently used by the AddMemoryPressure heuristic to see
+ // if finalizers have run since the last time it triggered GC.
+ FastInterlockIncrement((LONG *)&g_FinalizerLoopCount);
+
+ // Anyone waiting to drain the Q can now wake up. Note that there is a
+ // race in that another thread starting a drain, as we leave a drain, may
+ // consider itself satisfied by the drain that just completed. This is
+ // acceptable.
+ SignalFinalizationDone(TRUE);
+ }
+}
+
+
+// During shutdown, finalize all objects that haven't been run yet... whether reachable or not.
+void FinalizerThread::FinalizeObjectsOnShutdown(LPVOID args)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // This is used to stitch together the exception handling at the base of our thread with
+ // any eventual transitions into different AppDomains for finalization.
+ _ASSERTE(args != NULL);
+ pThreadTurnAround = (ManagedThreadCallState *) args;
+
+ FinalizeAllObjects(NULL, BIT_SBLK_FINALIZER_RUN);
+}
+
+
+DWORD __stdcall FinalizerThread::FinalizerThreadStart(void *args)
+{
+ ClrFlsSetThreadType (ThreadType_Finalizer);
+
+ ASSERT(args == 0);
+ ASSERT(hEventFinalizer->IsValid());
+
+ // TODO: The following line should be removed after contract violation is fixed.
+ // See bug 27409
+ SCAN_IGNORE_THROW;
+ SCAN_IGNORE_TRIGGER;
+
+ LOG((LF_GC, LL_INFO10, "Finalizer thread starting...\n"));
+
+ _ASSERTE(GetFinalizerThread()->GetDomain()->IsDefaultDomain());
+
+#if defined(FEATURE_COMINTEROP_APARTMENT_SUPPORT) && !defined(FEATURE_COMINTEROP)
+ // Make sure the finalizer thread is set to MTA to avoid hitting
+ // DevDiv Bugs 180773 - [Stress Failure] AV at CoreCLR!SafeQueryInterfaceHelper
+ GetFinalizerThread()->SetApartment(Thread::AS_InMTA, FALSE);
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT && !FEATURE_COMINTEROP
+
+ s_FinalizerThreadOK = GetFinalizerThread()->HasStarted();
+
+ _ASSERTE(s_FinalizerThreadOK);
+ _ASSERTE(GetThread() == GetFinalizerThread());
+
+ // workaround wwl: avoid oom problem for finalizer thread startup.
+ if (CLRTaskHosted())
+ {
+ SignalFinalizationDone(TRUE);
+ // SQL's scheduler may give finalizer thread a very small slice of CPU if finalizer thread
+ // shares a scheduler with other tasks. This can cause severe problem for finalizer thread.
+ // To reduce pain here, we move finalizer thread off SQL's scheduler.
+ // But SQL's scheduler does not support IHostTask::Alert on a task off scheduler, so we need
+ // to return finalizer thread back to scheduler when we wait alertably.
+ // GetFinalizerThread()->LeaveRuntime((size_t)SetupThreadNoThrow);
+ }
+
+ // finalizer should always park in default domain
+
+ if (s_FinalizerThreadOK)
+ {
+#ifdef _DEBUG // The only purpose of this try/finally is to trigger an assertion
+ EE_TRY_FOR_FINALLY(void *, unused, NULL)
+ {
+#endif
+ GetFinalizerThread()->SetBackground(TRUE);
+
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+ // Add the Profiler Attach Event to the array of event handles that the
+ // finalizer thread waits on. If the process is not enabled for profiler
+ // attach (e.g., running memory- or sync-hosted, or there is some other error
+ // that causes the Profiler Attach Event not to be created), then this just
+ // adds NULL in the slot where the Profiler Attach Event handle would go. In
+ // this case, WaitForFinalizerEvent will know to ignore that handle when it
+ // waits.
+ //
+ // Calling ProfilingAPIAttachDetach::GetAttachEvent induces lazy
+ // initialization of the profiling API attach/detach support objects,
+ // including the event itself and its security descriptor. So switch to
+ // preemptive mode during these OS calls
+ GetFinalizerThread()->EnablePreemptiveGC();
+ MHandles[kProfilingAPIAttach] = ::ProfilingAPIAttachDetach::GetAttachEvent();
+ GetFinalizerThread()->DisablePreemptiveGC();
+#endif // FEATURE_PROFAPI_ATTACH_DETACH
+
+ while (!fQuitFinalizer)
+ {
+ // This will apply any policy for swallowing exceptions during normal
+ // processing, without allowing the finalizer thread to disappear on us.
+ ManagedThreadBase::FinalizerBase(FinalizerThreadWorker);
+
+ // If we came out on an exception, then we probably lost the signal that
+ // there are objects in the queue ready to finalize. The safest thing is
+ // to reenable finalization.
+ if (!fQuitFinalizer)
+ EnableFinalization();
+ }
+
+ // Tell shutdown thread we are done with finalizing dead objects.
+ hEventFinalizerToShutDown->Set();
+
+ // Wait for shutdown thread to signal us.
+ GetFinalizerThread()->EnablePreemptiveGC();
+ hEventShutDownToFinalizer->Wait(INFINITE,FALSE);
+ GetFinalizerThread()->DisablePreemptiveGC();
+
+ AppDomain::RaiseExitProcessEvent();
+
+ hEventFinalizerToShutDown->Set();
+
+ // Phase 1 ends.
+ // Now wait for Phase 2 signal.
+
+ // Wait for shutdown thread to signal us.
+ GetFinalizerThread()->EnablePreemptiveGC();
+ hEventShutDownToFinalizer->Wait(INFINITE,FALSE);
+ GetFinalizerThread()->DisablePreemptiveGC();
+
+ GCHeap::GetGCHeap()->SetFinalizeQueueForShutdown (FALSE);
+
+ // Finalize all registered objects during shutdown, even they are still reachable.
+ // we have been asked to quit, so must be shutting down
+ _ASSERTE(g_fEEShutDown);
+ _ASSERTE(GetFinalizerThread()->PreemptiveGCDisabled());
+
+ // This will apply any policy for swallowing exceptions during normal
+ // processing, without allowing the finalizer thread to disappear on us.
+ ManagedThreadBase::FinalizerBase(FinalizeObjectsOnShutdown);
+
+ _ASSERTE(GetFinalizerThread()->GetDomain()->IsDefaultDomain());
+
+ // we might want to do some extra work on the finalizer thread
+ // check and do it
+ if (GetFinalizerThread()->HaveExtraWorkForFinalizer())
+ {
+ GetFinalizerThread()->DoExtraWorkForFinalizer();
+ }
+
+ hEventFinalizerToShutDown->Set();
+
+ // Wait for shutdown thread to signal us.
+ GetFinalizerThread()->EnablePreemptiveGC();
+ hEventShutDownToFinalizer->Wait(INFINITE,FALSE);
+ GetFinalizerThread()->DisablePreemptiveGC();
+
+#ifdef FEATURE_COMINTEROP
+ // Do extra cleanup for part 1 of shutdown.
+ // If we hang here (bug 87809) shutdown thread will
+ // timeout on us and will proceed normally
+ //
+ // We cannot call CoEEShutDownCOM, since the BEGIN_EXTERNAL_ENTRYPOINT
+ // will turn our call into a NOP. We can no longer execute managed
+ // code for an external caller.
+ InnerCoEEShutDownCOM();
+#endif // FEATURE_COMINTEROP
+
+ hEventFinalizerToShutDown->Set();
+
+#ifdef _DEBUG // The only purpose of this try/finally is to trigger an assertion
+ }
+ EE_FINALLY
+ {
+ // We can have exception to reach here if policy tells us to
+ // let exception go on finalizer thread.
+ //
+ if (GOT_EXCEPTION() && SwallowUnhandledExceptions())
+ _ASSERTE(!"Exception in the finalizer thread!");
+
+ }
+ EE_END_FINALLY;
+#endif
+ }
+ // finalizer should always park in default domain
+ _ASSERTE(GetThread()->GetDomain()->IsDefaultDomain());
+
+ LOG((LF_GC, LL_INFO10, "Finalizer thread done."));
+
+ // Enable pre-emptive GC before we leave so that anybody trying to suspend
+ // us will not end up waiting forever. Don't do a DestroyThread because this
+ // will happen soon when we tear down the thread store.
+ GetFinalizerThread()->EnablePreemptiveGC();
+
+ // We do not want to tear Finalizer thread,
+ // since doing so will cause OLE32 to CoUninitialize.
+ while (1)
+ {
+ PAL_TRY(void *, unused, NULL)
+ {
+ __SwitchToThread(INFINITE, CALLER_LIMITS_SPINNING);
+ }
+ PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ {
+ }
+ PAL_ENDTRY
+ }
+
+ return 0;
+}
+
+DWORD FinalizerThread::FinalizerThreadCreate()
+{
+ DWORD dwRet = 0;
+
+ // TODO: The following line should be removed after contract violation is fixed.
+ // See bug 27409
+ SCAN_IGNORE_THROW;
+
+#ifndef FEATURE_PAL
+ if (!CLRMemoryHosted())
+ {
+ MHandles[kLowMemoryNotification] =
+ CreateMemoryResourceNotification(LowMemoryResourceNotification);
+ }
+#endif // FEATURE_PAL
+
+ hEventFinalizerDone = new CLREvent();
+ hEventFinalizerDone->CreateManualEvent(FALSE);
+ hEventFinalizer = new CLREvent();
+ hEventFinalizer->CreateAutoEvent(FALSE);
+ hEventFinalizerToShutDown = new CLREvent();
+ hEventFinalizerToShutDown->CreateAutoEvent(FALSE);
+ hEventShutDownToFinalizer = new CLREvent();
+ hEventShutDownToFinalizer->CreateAutoEvent(FALSE);
+
+ _ASSERTE(g_pFinalizerThread == 0);
+ g_pFinalizerThread = SetupUnstartedThread();
+ if (g_pFinalizerThread == 0) {
+ return 0;
+ }
+
+ // We don't want the thread block disappearing under us -- even if the
+ // actual thread terminates.
+ GetFinalizerThread()->IncExternalCount();
+
+ if (GetFinalizerThread()->CreateNewThread(0, &FinalizerThreadStart, NULL))
+ {
+ dwRet = GetFinalizerThread()->StartThread();
+
+ // When running under a user mode native debugger there is a race
+ // between the moment we've created the thread (in CreateNewThread) and
+ // the moment we resume it (in StartThread); the debugger may receive
+ // the "ct" (create thread) notification, and it will attempt to
+ // suspend/resume all threads in the process. Now imagine the debugger
+ // resumes this thread first, and only later does it try to resume the
+ // newly created thread (the finalizer thread). In these conditions our
+ // call to ResumeThread may come before the debugger's call to ResumeThread
+ // actually causing dwRet to equal 2.
+ // We cannot use IsDebuggerPresent() in the condition below because the
+ // debugger may have been detached between the time it got the notification
+ // and the moment we execute the test below.
+ _ASSERTE(dwRet == 1 || dwRet == 2);
+ if (dwRet == 2)
+ {
+ dwRet = 1;
+ }
+
+ // workaround wwl: make sure finalizer is ready. This avoids OOM problem on finalizer
+ // thread startup.
+ if (CLRTaskHosted()) {
+ FinalizerThreadWait(INFINITE);
+ if (!s_FinalizerThreadOK)
+ {
+ dwRet = 0;
+ }
+ }
+ }
+
+ return dwRet;
+}
+
+void FinalizerThread::SignalFinalizationDone(BOOL fFinalizer)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (fFinalizer)
+ {
+ FastInterlockAnd((DWORD*)&g_FinalizerWaiterStatus, ~FWS_WaitInterrupt);
+ }
+ hEventFinalizerDone->Set();
+}
+
+// Wait for the finalizer thread to complete one pass.
+void FinalizerThread::FinalizerThreadWait(DWORD timeout)
+{
+ ASSERT(hEventFinalizerDone->IsValid());
+ ASSERT(hEventFinalizer->IsValid());
+ ASSERT(GetFinalizerThread());
+
+ // Can't call this from within a finalized method.
+ if (!IsCurrentThreadFinalizer())
+ {
+#ifdef FEATURE_COMINTEROP
+ // To help combat finalizer thread starvation, we check to see if there are any wrappers
+ // scheduled to be cleaned up for our context. If so, we'll do them here to avoid making
+ // the finalizer thread do a transition.
+ if (g_pRCWCleanupList != NULL)
+ g_pRCWCleanupList->CleanupWrappersInCurrentCtxThread();
+#endif // FEATURE_COMINTEROP
+
+ GCX_PREEMP();
+
+ Thread *pThread = GetThread();
+ BOOL fADUnloadHelper = (pThread && pThread->HasThreadStateNC(Thread::TSNC_ADUnloadHelper));
+
+ ULONGLONG startTime = CLRGetTickCount64();
+ ULONGLONG endTime;
+ if (timeout == INFINITE)
+ {
+ endTime = MAXULONGLONG;
+ }
+ else
+ {
+ endTime = timeout + startTime;
+ }
+
+ while (TRUE)
+ {
+ hEventFinalizerDone->Reset();
+ EnableFinalization();
+
+ //----------------------------------------------------
+ // Do appropriate wait and pump messages if necessary
+ //----------------------------------------------------
+ //WaitForSingleObject(hEventFinalizerDone, INFINITE);
+
+ if (fADUnloadHelper)
+ {
+ timeout = GetEEPolicy()->GetTimeout(OPR_FinalizerRun);
+ }
+
+ DWORD status = hEventFinalizerDone->Wait(timeout,TRUE);
+ if (status != WAIT_TIMEOUT && !(g_FinalizerWaiterStatus & FWS_WaitInterrupt))
+ {
+ return;
+ }
+ if (!fADUnloadHelper)
+ {
+ // recalculate timeout
+ if (timeout != INFINITE)
+ {
+ ULONGLONG curTime = CLRGetTickCount64();
+ if (curTime >= endTime)
+ {
+ return;
+ }
+ else
+ {
+ timeout = (DWORD)(endTime - curTime);
+ }
+ }
+ }
+ else
+ {
+ if (status == WAIT_TIMEOUT)
+ {
+ ULONGLONG finalizeStartTime = GetObjFinalizeStartTime();
+ if (finalizeStartTime || AppDomain::HasWorkForFinalizerThread())
+ {
+ if (CLRGetTickCount64() >= finalizeStartTime+timeout)
+ {
+ GCX_COOP();
+ FinalizerThreadAbortOnTimeout();
+ }
+ }
+ }
+ if (endTime != MAXULONGLONG)
+ {
+ ULONGLONG curTime = CLRGetTickCount64();
+ if (curTime >= endTime)
+ {
+ return;
+ }
+ }
+ }
+ }
+ }
+}
+
+
+#ifdef _DEBUG
+#define FINALIZER_WAIT_TIMEOUT 250
+#else
+#define FINALIZER_WAIT_TIMEOUT 200
+#endif
+#define FINALIZER_TOTAL_WAIT 2000
+
+static BOOL s_fRaiseExitProcessEvent = FALSE;
+static DWORD dwBreakOnFinalizeTimeOut = (DWORD) -1;
+
+static ULONGLONG ShutdownEnd;
+
+
+BOOL FinalizerThread::FinalizerThreadWatchDog()
+{
+ Thread *pThread = GetThread();
+
+ if (dwBreakOnFinalizeTimeOut == (DWORD) -1) {
+ dwBreakOnFinalizeTimeOut = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_BreakOnFinalizeTimeOut);
+ }
+
+ // Do not wait for FinalizerThread if the current one is FinalizerThread.
+ if (pThread == GetFinalizerThread())
+ return TRUE;
+
+ // If finalizer thread is gone, just return.
+ if (GetFinalizerThread()->Join (0, FALSE) != WAIT_TIMEOUT)
+ return TRUE;
+
+ // *** This is the first call ShutDown -> Finalizer to Finilize dead objects ***
+ if ((g_fEEShutDown & ShutDown_Finalize1) &&
+ !(g_fEEShutDown & ShutDown_Finalize2)) {
+ ShutdownEnd = CLRGetTickCount64() + GetEEPolicy()->GetTimeout(OPR_ProcessExit);
+ // Wait for the finalizer...
+ LOG((LF_GC, LL_INFO10, "Signalling finalizer to quit..."));
+
+ fQuitFinalizer = TRUE;
+ hEventFinalizerDone->Reset();
+ EnableFinalization();
+
+ LOG((LF_GC, LL_INFO10, "Waiting for finalizer to quit..."));
+
+ if (pThread)
+ {
+ pThread->EnablePreemptiveGC();
+ }
+
+ BOOL fTimeOut = FinalizerThreadWatchDogHelper();
+
+ if (!fTimeOut) {
+ hEventShutDownToFinalizer->Set();
+
+ // Wait for finalizer thread to finish raising ExitProcess Event.
+ s_fRaiseExitProcessEvent = TRUE;
+ fTimeOut = FinalizerThreadWatchDogHelper();
+ s_fRaiseExitProcessEvent = FALSE;
+ }
+
+ if (pThread)
+ {
+ pThread->DisablePreemptiveGC();
+ }
+
+ // Can not call ExitProcess here if we are in a hosting environment.
+ // The host does not expect that we terminate the process.
+ //if (fTimeOut)
+ //{
+ //::ExitProcess (GetLatchedExitCode());
+ //}
+
+ return !fTimeOut;
+ }
+
+ // *** This is the second call ShutDown -> Finalizer to ***
+ // suspend the Runtime and Finilize live objects
+ if ( g_fEEShutDown & ShutDown_Finalize2 &&
+ !(g_fEEShutDown & ShutDown_COM) ) {
+
+#ifdef BACKGROUND_GC
+ gc_heap::gc_can_use_concurrent = FALSE;
+
+ if (pGenGCHeap->settings.concurrent)
+ pGenGCHeap->background_gc_wait();
+#endif //BACKGROUND_GC
+
+ _ASSERTE ((g_fEEShutDown & ShutDown_Finalize1) || g_fFastExitProcess);
+ ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_FOR_SHUTDOWN);
+
+ g_fSuspendOnShutdown = TRUE;
+
+ // Do not balance the trap returning threads.
+ // We are shutting down CLR. Only Finalizer/Shutdown threads can
+ // return from DisablePreemptiveGC.
+ ThreadStore::TrapReturningThreads(TRUE);
+
+ ThreadSuspend::RestartEE(FALSE, TRUE);
+
+ if (g_fFastExitProcess)
+ {
+ return TRUE;
+ }
+
+ // !!! Before we wake up Finalizer thread, we need to enable preemptive gc on the
+ // !!! shutdown thread. Otherwise we may see a deadlock during debug test.
+ if (pThread)
+ {
+ pThread->EnablePreemptiveGC();
+ }
+
+ g_fFinalizerRunOnShutDown = TRUE;
+
+ // Wait for finalizer thread to finish finalizing all objects.
+ hEventShutDownToFinalizer->Set();
+ BOOL fTimeOut = FinalizerThreadWatchDogHelper();
+
+ if (!fTimeOut) {
+ g_fFinalizerRunOnShutDown = FALSE;
+ }
+
+ // Can not call ExitProcess here if we are in a hosting environment.
+ // The host does not expect that we terminate the process.
+ //if (fTimeOut) {
+ // ::ExitProcess (GetLatchedExitCode());
+ //}
+
+ if (pThread)
+ {
+ pThread->DisablePreemptiveGC();
+ }
+ return !fTimeOut;
+ }
+
+ // *** This is the third call ShutDown -> Finalizer ***
+ // to do additional cleanup
+ if (g_fEEShutDown & ShutDown_COM) {
+ _ASSERTE (g_fEEShutDown & (ShutDown_Finalize2 | ShutDown_Finalize1));
+
+ if (pThread)
+ {
+ pThread->EnablePreemptiveGC();
+ }
+ g_fFinalizerRunOnShutDown = TRUE;
+
+ hEventShutDownToFinalizer->Set();
+ DWORD status = WAIT_OBJECT_0;
+ while (CLREventWaitWithTry(hEventFinalizerToShutDown, FINALIZER_WAIT_TIMEOUT, TRUE, &status))
+ {
+ }
+
+ BOOL fTimeOut = (status == WAIT_TIMEOUT) ? TRUE : FALSE;
+
+#ifndef GOLDEN
+ if (fTimeOut)
+ {
+ if (dwBreakOnFinalizeTimeOut) {
+ LOG((LF_GC, LL_INFO10, "Finalizer took too long to clean up COM IP's.\n"));
+ DebugBreak();
+ }
+ }
+#endif // GOLDEN
+
+ if (pThread)
+ {
+ pThread->DisablePreemptiveGC();
+ }
+
+ return !fTimeOut;
+ }
+
+ _ASSERTE(!"Should never reach this point");
+ return FALSE;
+}
+
+BOOL FinalizerThread::FinalizerThreadWatchDogHelper()
+{
+ // Since our thread is blocking waiting for the finalizer thread, we must be in preemptive GC
+ // so that we don't in turn block the finalizer on us in a GC.
+ Thread *pCurrentThread;
+ pCurrentThread = GetThread();
+ _ASSERTE (pCurrentThread == NULL || !pCurrentThread->PreemptiveGCDisabled());
+
+ // We're monitoring the finalizer thread.
+ Thread *pThread = GetFinalizerThread();
+ _ASSERTE(pThread != pCurrentThread);
+
+ ULONGLONG dwBeginTickCount = CLRGetTickCount64();
+
+ size_t prevCount;
+ size_t curCount;
+ BOOL fTimeOut = FALSE;
+ DWORD nTry = 0;
+ DWORD maxTotalWait = (DWORD)(ShutdownEnd - dwBeginTickCount);
+ DWORD totalWaitTimeout;
+ totalWaitTimeout = GetEEPolicy()->GetTimeout(OPR_FinalizerRun);
+ if (totalWaitTimeout == (DWORD)-1)
+ {
+ totalWaitTimeout = FINALIZER_TOTAL_WAIT;
+ }
+
+ if (s_fRaiseExitProcessEvent)
+ {
+ DWORD tmp = maxTotalWait/20; // Normally we assume 2 seconds timeout if total timeout is 40 seconds.
+ if (tmp > totalWaitTimeout)
+ {
+ totalWaitTimeout = tmp;
+ }
+ prevCount = MAXLONG;
+ }
+ else
+ {
+ prevCount = GCHeap::GetGCHeap()->GetNumberOfFinalizable();
+ }
+
+ DWORD maxTry = (DWORD)(totalWaitTimeout*1.0/FINALIZER_WAIT_TIMEOUT + 0.5);
+ BOOL bAlertable = TRUE; //(g_fEEShutDown & ShutDown_Finalize2) ? FALSE:TRUE;
+
+ if (dwBreakOnFinalizeTimeOut == (DWORD) -1) {
+ dwBreakOnFinalizeTimeOut = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_BreakOnFinalizeTimeOut);
+ }
+
+ DWORD dwTimeout = FINALIZER_WAIT_TIMEOUT;
+
+ // This used to set the dwTimeout to infinite, but this can cause a hang when shutting down
+ // if a finalizer tries to take a lock that another suspended managed thread already has.
+ // This results in the hang because the other managed thread is never going to be resumed
+ // because we're in shutdown. So we make a compromise here - make the timeout for every
+ // iteration 10 times longer and make the total wait infinite - so if things hang we will
+ // eventually shutdown but we also give things a chance to finish if they're running slower
+ // because of the profiler.
+#ifdef PROFILING_SUPPORTED
+ if (CORProfilerPresent())
+ {
+ dwTimeout *= 10;
+ maxTotalWait = INFINITE;
+ }
+#endif // PROFILING_SUPPORTED
+
+#ifdef FEATURE_CORECLR
+ // This change was added late in Windows Phone 8, so we want to keep it minimal.
+ // We should consider refactoring this later, as we've got a lot of dead code here now on CoreCLR.
+ dwTimeout = INFINITE;
+ maxTotalWait = INFINITE;
+#endif // FEATURE_CORECLR
+
+ while (1) {
+ struct Param
+ {
+ DWORD status;
+ DWORD dwTimeout;
+ BOOL bAlertable;
+ } param;
+ param.status = 0;
+ param.dwTimeout = dwTimeout;
+ param.bAlertable = bAlertable;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ pParam->status = hEventFinalizerToShutDown->Wait(pParam->dwTimeout, pParam->bAlertable);
+ }
+ PAL_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
+ {
+ param.status = WAIT_TIMEOUT;
+ }
+ PAL_ENDTRY
+
+ if (param.status != WAIT_TIMEOUT) {
+ break;
+ }
+ nTry ++;
+ // ExitProcessEventCount is incremental
+ // FinalizableObjects is decremental
+ if (s_fRaiseExitProcessEvent)
+ {
+ curCount = MAXLONG - GetProcessedExitProcessEventCount();
+ }
+ else
+ {
+ curCount = GCHeap::GetGCHeap()->GetNumberOfFinalizable();
+ }
+
+ if ((prevCount <= curCount)
+ && !GCHeap::GetGCHeap()->ShouldRestartFinalizerWatchDog()
+ && (pThread == NULL || !(pThread->m_State & (Thread::TS_UserSuspendPending | Thread::TS_DebugSuspendPending)))){
+ if (nTry == maxTry) {
+ if (!s_fRaiseExitProcessEvent) {
+ LOG((LF_GC, LL_INFO10, "Finalizer took too long on one object.\n"));
+ }
+ else
+ LOG((LF_GC, LL_INFO10, "Finalizer took too long to process ExitProcess event.\n"));
+
+ fTimeOut = TRUE;
+ if (dwBreakOnFinalizeTimeOut != 2) {
+ break;
+ }
+ }
+ }
+ else
+ {
+ nTry = 0;
+ prevCount = curCount;
+ }
+ ULONGLONG dwCurTickCount = CLRGetTickCount64();
+ if (pThread && pThread->m_State & (Thread::TS_UserSuspendPending | Thread::TS_DebugSuspendPending)) {
+ dwBeginTickCount = dwCurTickCount;
+ }
+ if (dwCurTickCount - dwBeginTickCount >= maxTotalWait)
+ {
+ LOG((LF_GC, LL_INFO10, "Finalizer took too long on shutdown.\n"));
+ fTimeOut = TRUE;
+ if (dwBreakOnFinalizeTimeOut != 2) {
+ break;
+ }
+ }
+ }
+
+#ifndef GOLDEN
+ if (fTimeOut)
+ {
+ if (dwBreakOnFinalizeTimeOut){
+ DebugBreak();
+ }
+ }
+#endif
+ return fTimeOut;
+}
diff --git a/src/vm/finalizerthread.h b/src/vm/finalizerthread.h
new file mode 100644
index 0000000000..0f4fa547d3
--- /dev/null
+++ b/src/vm/finalizerthread.h
@@ -0,0 +1,95 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+
+#ifndef _FINALIZER_THREAD_H_
+#define _FINALIZER_THREAD_H_
+
+class FinalizerThread
+{
+ static BOOL fRunFinalizersOnUnload;
+ static BOOL fQuitFinalizer;
+ static AppDomain *UnloadingAppDomain;
+
+ static CLREvent *hEventFinalizer;
+ static CLREvent *hEventFinalizerDone;
+ static CLREvent *hEventShutDownToFinalizer;
+ static CLREvent *hEventFinalizerToShutDown;
+
+ // Note: This enum makes it easier to read much of the code that deals with the
+ // array of events that the finalizer thread waits on. However, the ordering
+ // is important.
+ // See code:SVR::WaitForFinalizerEvent#MHandleTypeValues for more info
+ enum MHandleType
+ {
+ kLowMemoryNotification = 0,
+ kFinalizer = 1,
+
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+ kProfilingAPIAttach = 2,
+#endif // FEATURE_PROFAPI_ATTACH_DETACH
+
+ kHandleCount,
+ };
+
+ static HANDLE MHandles[kHandleCount];
+
+ static void WaitForFinalizerEvent (CLREvent *event);
+
+ static BOOL FinalizerThreadWatchDogHelper();
+
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+ static void ProcessProfilerAttachIfNecessary(ULONGLONG * pui64TimestampLastCheckedEventMs);
+#endif // FEATURE_PROFAPI_ATTACH_DETACH
+
+ static Object * DoOneFinalization(Object* fobj, Thread* pThread, int bitToCheck, bool *pbTerminate);
+
+ static void FinalizeAllObjects_Wrapper(void *ptr);
+ static Object * FinalizeAllObjects(Object* fobj, int bitToCheck);
+
+public:
+ static Thread* GetFinalizerThread()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(g_pFinalizerThread != 0);
+ return g_pFinalizerThread;
+ }
+
+ // Start unloading app domain
+ static void UnloadAppDomain(AppDomain *pDomain, BOOL fRunFinalizers)
+ {
+ LIMITED_METHOD_CONTRACT;
+ UnloadingAppDomain = pDomain;
+ fRunFinalizersOnUnload = fRunFinalizers;
+ }
+
+ static AppDomain* GetUnloadingAppDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return UnloadingAppDomain;
+ }
+
+ static BOOL IsCurrentThreadFinalizer();
+
+ static void EnableFinalization();
+
+ static BOOL HaveExtraWorkForFinalizer();
+
+ static void FinalizerThreadWait(DWORD timeout = INFINITE);
+
+ // We wake up a wait for finaliation for two reasons:
+ // if fFinalizer=TRUE, we have finished finalization.
+ // if fFinalizer=FALSE, the timeout for finalization is changed, and AD unload helper thread is notified.
+ static void SignalFinalizationDone(BOOL fFinalizer);
+
+ static VOID FinalizerThreadWorker(void *args);
+ static void FinalizeObjectsOnShutdown(LPVOID args);
+ static DWORD __stdcall FinalizerThreadStart(void *args);
+
+ static DWORD FinalizerThreadCreate();
+ static BOOL FinalizerThreadWatchDog();
+};
+
+#endif // _FINALIZER_THREAD_H_
diff --git a/src/vm/formattype.cpp b/src/vm/formattype.cpp
new file mode 100644
index 0000000000..42fcb054ca
--- /dev/null
+++ b/src/vm/formattype.cpp
@@ -0,0 +1,10 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+//This file just includes formattype.cpp from the inc directory.
+#include "common.h"
+
+#include "../inc/formattype.cpp"
diff --git a/src/vm/fptrstubs.cpp b/src/vm/fptrstubs.cpp
new file mode 100644
index 0000000000..3d6fda3c70
--- /dev/null
+++ b/src/vm/fptrstubs.cpp
@@ -0,0 +1,168 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#include "common.h"
+#include "fptrstubs.h"
+
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+
+// -------------------------------------------------------
+// FuncPtr stubs
+// -------------------------------------------------------
+
+Precode* FuncPtrStubs::Lookup(MethodDesc * pMD, PrecodeType type)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ Precode* pPrecode = NULL;
+ {
+ CrstHolder ch(&m_hashTableCrst);
+ pPrecode = m_hashTable.Lookup(PrecodeKey(pMD, type));
+ }
+ return pPrecode;
+}
+
+
+#ifndef DACCESS_COMPILE
+//
+// FuncPtrStubs
+//
+
+FuncPtrStubs::FuncPtrStubs()
+ : m_hashTableCrst(CrstFuncPtrStubs, CRST_UNSAFE_ANYMODE)
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+PrecodeType FuncPtrStubs::GetDefaultType(MethodDesc* pMD)
+{
+ WRAPPER_NO_CONTRACT;
+
+ PrecodeType type = PRECODE_STUB;
+
+#ifdef HAS_FIXUP_PRECODE
+ // Use the faster fixup precode if it is available
+ type = PRECODE_FIXUP;
+#endif // HAS_FIXUP_PRECODE
+
+#ifdef HAS_REMOTING_PRECODE
+ if (pMD->IsRemotingInterceptedViaVirtualDispatch())
+ {
+ type = PRECODE_REMOTING;
+ }
+#endif // HAS_REMOTING_PRECODE
+
+ return type;
+}
+
+//
+// Returns an existing stub, or creates a new one
+//
+
+PCODE FuncPtrStubs::GetFuncPtrStub(MethodDesc * pMD, PrecodeType type)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ INJECT_FAULT(ThrowOutOfMemory(););
+ }
+ CONTRACTL_END
+
+ Precode* pPrecode = NULL;
+ {
+ CrstHolder ch(&m_hashTableCrst);
+ pPrecode = m_hashTable.Lookup(PrecodeKey(pMD, type));
+ }
+
+ if (pPrecode != NULL)
+ {
+ return pPrecode->GetEntryPoint();
+ }
+
+ PCODE target = NULL;
+
+#ifdef FEATURE_REMOTING
+ if (pMD->IsInterface() && !pMD->IsStatic())
+ {
+ // FuncPtrStubs on interface virtuals are used to transition
+ // into the remoting system with the exact interface method.
+
+ _ASSERTE(type == PRECODE_STUB);
+
+ target = CRemotingServices::GetDispatchInterfaceHelper(pMD);
+ }
+ else
+#endif // FEATURE_REMOTING
+ if (type != GetDefaultType(pMD) &&
+ // Always use stable entrypoint for LCG. If the cached precode pointed directly to JITed code,
+ // we would not be able to reuse it when the DynamicMethodDesc got reused for a new DynamicMethod.
+ !pMD->IsLCGMethod())
+ {
+ // Set the target if precode is not of the default type. We are patching the precodes of the default type only.
+ target = pMD->GetMultiCallableAddrOfCode();
+ }
+ else
+ if (pMD->HasStableEntryPoint())
+ {
+ // Set target
+ target = pMD->GetStableEntryPoint();
+ }
+ else
+ {
+ // Set the target if method is methodimpled. We would not get to patch it otherwise.
+ MethodDesc* pMDImpl = MethodTable::MapMethodDeclToMethodImpl(pMD);
+
+ if (pMDImpl != pMD)
+ target = pMDImpl->GetMultiCallableAddrOfCode();
+ }
+
+ //
+ // We currently do not have a precode for this MethodDesc, so we will allocate one.
+ // We allocate outside of the lock and then take the lock (m_hashTableCrst) and
+ // if we still do not have a precode we Add the one that we just allocated and
+ // call SuppressRelease to keep our allocation
+ // If another thread beat us in adding the precode we don't call SuppressRelease
+ // so the AllocMemTracker destructor will free the memory that we allocated
+ //
+ {
+ AllocMemTracker amt;
+ Precode* pNewPrecode = Precode::Allocate(type, pMD, pMD->GetLoaderAllocatorForCode(), &amt);
+
+ if (target != NULL)
+ {
+ pNewPrecode->SetTargetInterlocked(target);
+ }
+
+ {
+ CrstHolder ch(&m_hashTableCrst);
+
+ // Was an entry added in the meantime?
+ // Is the entry still NULL?
+ pPrecode = m_hashTable.Lookup(PrecodeKey(pMD, type));
+
+ if (pPrecode == NULL)
+ {
+ // Use the one we allocated above
+ pPrecode = pNewPrecode;
+ m_hashTable.Add(pPrecode);
+ amt.SuppressRelease();
+ }
+ }
+ }
+
+ return pPrecode->GetEntryPoint();
+}
+#endif // DACCESS_COMPILE
diff --git a/src/vm/fptrstubs.h b/src/vm/fptrstubs.h
new file mode 100644
index 0000000000..0e0bfca91f
--- /dev/null
+++ b/src/vm/fptrstubs.h
@@ -0,0 +1,84 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#ifndef _FPTRSTUBS_H
+#define _FPTRSTUBS_H
+
+#include "common.h"
+
+// FuncPtrStubs contains stubs that is used by GetMultiCallableAddrOfCode() if
+// the function has not been jitted. Using a stub decouples ldftn from
+// the prestub, so prestub does not need to be backpatched.
+//
+// This stub is also used in other places which need a function pointer
+
+class FuncPtrStubs
+{
+public :
+ FuncPtrStubs();
+
+ Precode* Lookup(MethodDesc * pMD, PrecodeType type);
+ PCODE GetFuncPtrStub(MethodDesc * pMD, PrecodeType type);
+
+ Precode* Lookup(MethodDesc * pMD)
+ {
+ return Lookup(pMD, GetDefaultType(pMD));
+ }
+
+ PCODE GetFuncPtrStub(MethodDesc * pMD)
+ {
+ return GetFuncPtrStub(pMD, GetDefaultType(pMD));
+ }
+
+ static PrecodeType GetDefaultType(MethodDesc* pMD);
+
+private:
+ Crst m_hashTableCrst;
+
+ struct PrecodeKey
+ {
+ PrecodeKey(MethodDesc* pMD, PrecodeType type)
+ : m_pMD(pMD), m_type(type)
+ {
+ }
+
+ MethodDesc* m_pMD;
+ PrecodeType m_type;
+ };
+
+ class PrecodeTraits : public NoRemoveSHashTraits< DefaultSHashTraits<Precode*> >
+ {
+ public:
+ typedef PrecodeKey key_t;
+
+ static key_t GetKey(element_t e)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ return PrecodeKey(e->GetMethodDesc(), e->GetType());
+ }
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (k1.m_pMD == k2.m_pMD) && (k1.m_type == k2.m_type);
+ }
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (count_t)(size_t)k.m_pMD ^ k.m_type;
+ }
+ };
+
+ SHash<PrecodeTraits> m_hashTable; // To find a existing stub for a method
+};
+
+#endif // _FPTRSTUBS_H
diff --git a/src/vm/frames.cpp b/src/vm/frames.cpp
new file mode 100644
index 0000000000..365964512e
--- /dev/null
+++ b/src/vm/frames.cpp
@@ -0,0 +1,2154 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// FRAMES.CPP
+
+
+
+#include "common.h"
+#include "log.h"
+#include "frames.h"
+#include "threads.h"
+#include "object.h"
+#include "method.hpp"
+#include "class.h"
+#include "excep.h"
+#include "security.h"
+#include "stublink.h"
+#include "fieldmarshaler.h"
+#include "objecthandle.h"
+#include "siginfo.hpp"
+#include "gc.h"
+#include "dllimportcallback.h"
+#include "stackwalk.h"
+#include "dbginterface.h"
+#include "gms.h"
+#include "eeconfig.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "ecall.h"
+#include "clsload.hpp"
+#include "cgensys.h"
+#include "virtualcallstub.h"
+#include "mdaassistants.h"
+#include "dllimport.h"
+#include "gcrefmap.h"
+#include "asmconstants.h"
+
+#ifdef FEATURE_COMINTEROP
+#include "comtoclrcall.h"
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_INTERPRETER
+#include "interpreter.h"
+#endif // FEATURE_INTERPRETER
+
+#if CHECK_APP_DOMAIN_LEAKS
+#define CHECK_APP_DOMAIN GC_CALL_CHECK_APP_DOMAIN
+#else
+#define CHECK_APP_DOMAIN 0
+#endif
+
+//-----------------------------------------------------------------------
+#if _DEBUG
+//-----------------------------------------------------------------------
+
+#ifndef DACCESS_COMPILE
+
+unsigned dbgStubCtr = 0;
+unsigned dbgStubTrip = 0xFFFFFFFF;
+
+void Frame::Log() {
+ WRAPPER_NO_CONTRACT;
+
+ if (!LoggingOn(LF_STUBS, LL_INFO1000000))
+ return;
+
+ dbgStubCtr++;
+ if (dbgStubCtr > dbgStubTrip) {
+ dbgStubCtr++; // basicly a nop to put a breakpoint on.
+ }
+
+ MethodDesc* method = GetFunction();
+
+#ifdef _TARGET_X86_
+ if (GetVTablePtr() == UMThkCallFrame::GetMethodFrameVPtr())
+ method = ((UMThkCallFrame*) this)->GetUMEntryThunk()->GetMethod();
+#endif
+
+ STRESS_LOG3(LF_STUBS, LL_INFO1000000, "STUBS: In Stub with Frame %p assoc Method %pM FrameType = %pV\n", this, method, *((void**) this));
+
+ char buff[64];
+ const char* frameType;
+ if (GetVTablePtr() == PrestubMethodFrame::GetMethodFrameVPtr())
+ frameType = "PreStub";
+#ifdef _TARGET_X86_
+ else if (GetVTablePtr() == UMThkCallFrame::GetMethodFrameVPtr())
+ frameType = "UMThkCallFrame";
+#endif
+ else if (GetVTablePtr() == PInvokeCalliFrame::GetMethodFrameVPtr())
+ {
+ sprintf_s(buff, COUNTOF(buff), "PInvoke CALLI target" FMT_ADDR,
+ DBG_ADDR(((PInvokeCalliFrame*)this)->GetPInvokeCalliTarget()));
+ frameType = buff;
+ }
+ else if (GetVTablePtr() == StubDispatchFrame::GetMethodFrameVPtr())
+ frameType = "StubDispatch";
+ else if (GetVTablePtr() == ExternalMethodFrame::GetMethodFrameVPtr())
+ frameType = "ExternalMethod";
+ else
+ frameType = "Unknown";
+
+ if (method != 0)
+ LOG((LF_STUBS, LL_INFO1000000,
+ "IN %s Stub Method = %s::%s SIG %s ESP of return" FMT_ADDR "\n",
+ frameType,
+ method->m_pszDebugClassName,
+ method->m_pszDebugMethodName,
+ method->m_pszDebugMethodSignature,
+ DBG_ADDR(GetReturnAddressPtr())));
+ else
+ LOG((LF_STUBS, LL_INFO1000000,
+ "IN %s Stub Method UNKNOWN ESP of return" FMT_ADDR "\n",
+ frameType,
+ DBG_ADDR(GetReturnAddressPtr()) ));
+
+ _ASSERTE(GetThread()->PreemptiveGCDisabled());
+}
+
+//-----------------------------------------------------------------------
+// This function is used to log transitions in either direction
+// between unmanaged code and CLR/managed code.
+// This is typically done in a stub that sets up a Frame, which is
+// passed as an argument to this function.
+
+void __stdcall Frame::LogTransition(Frame* frame)
+{
+
+ CONTRACTL {
+ DEBUG_ONLY;
+ NOTHROW;
+ ENTRY_POINT;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ BEGIN_ENTRYPOINT_VOIDRET;
+
+#ifdef _TARGET_X86_
+ // On x86, StubLinkerCPU::EmitMethodStubProlog calls Frame::LogTransition
+ // but the caller of EmitMethodStubProlog sets the GSCookie later on.
+ // So the cookie is not initialized by the point we get here.
+#else
+ _ASSERTE(*frame->GetGSCookiePtr() == GetProcessGSCookie());
+#endif
+
+ if (Frame::ShouldLogTransitions())
+ frame->Log();
+
+ END_ENTRYPOINT_VOIDRET;
+} // void Frame::Log()
+
+#endif // #ifndef DACCESS_COMPILE
+
+//-----------------------------------------------------------------------
+#endif // _DEBUG
+//-----------------------------------------------------------------------
+
+
+// TODO [DAVBR]: For the full fix for VsWhidbey 450273, all the below
+// may be uncommented once isLegalManagedCodeCaller works properly
+// with non-return address inputs, and with non-DEBUG builds
+#if 0
+//-----------------------------------------------------------------------
+// returns TRUE if retAddr, is a return address that can call managed code
+
+bool isLegalManagedCodeCaller(PCODE retAddr) {
+ WRAPPER_NO_CONTRACT;
+#ifdef _TARGET_X86_
+
+ // we expect to be called from JITTED code or from special code sites inside
+ // mscorwks like callDescr which we have put a NOP (0x90) so we know that they
+ // are specially blessed.
+ if (!ExecutionManager::IsManagedCode(retAddr) &&
+ (
+#ifdef DACCESS_COMPILE
+ !(PTR_BYTE(retAddr).IsValid()) ||
+#endif
+ ((*PTR_BYTE(retAddr) != 0x90) &&
+ (*PTR_BYTE(retAddr) != 0xcc))))
+ {
+ LOG((LF_GC, LL_INFO10, "Bad caller to managed code: retAddr=0x%08x, *retAddr=0x%x\n",
+ retAddr, *(BYTE*)PTR_BYTE(retAddr)));
+
+ return false;
+ }
+
+ // it better be a return address of some kind
+ TADDR dummy;
+ if (isRetAddr(retAddr, &dummy))
+ return true;
+
+#ifndef DACCESS_COMPILE
+#ifdef DEBUGGING_SUPPORTED
+ // The debugger could have dropped an INT3 on the instruction that made the call
+ // Calls can be 2 to 7 bytes long
+ if (CORDebuggerAttached()) {
+ PTR_BYTE ptr = PTR_BYTE(retAddr);
+ for (int i = -2; i >= -7; --i)
+ if (ptr[i] == 0xCC)
+ return true;
+ return false;
+ }
+#endif // DEBUGGING_SUPPORTED
+#endif // #ifndef DACCESS_COMPILE
+
+ _ASSERTE(!"Bad return address on stack");
+ return false;
+#else // _TARGET_X86_
+ return true;
+#endif // _TARGET_X86_
+}
+#endif //0
+
+
+//-----------------------------------------------------------------------
+// Count of the number of frame types
+const size_t FRAME_TYPES_COUNT =
+#define FRAME_TYPE_NAME(frameType) +1
+#include "frames.h"
+;
+
+#if defined (_DEBUG_IMPL) // _DEBUG and !DAC
+
+//-----------------------------------------------------------------------
+// Implementation of the global table of names. On the DAC side, just the global pointer.
+// On the runtime side, the array of names.
+ #define FRAME_TYPE_NAME(x) {x::GetMethodFrameVPtr(), #x} ,
+ static FrameTypeName FrameTypeNameTable[] = {
+ #include "frames.h"
+ };
+
+
+/* static */
+PTR_CSTR Frame::GetFrameTypeName(TADDR vtbl)
+{
+ LIMITED_METHOD_CONTRACT;
+ for (size_t i=0; i<FRAME_TYPES_COUNT; ++i)
+ {
+ if (vtbl == FrameTypeNameTable[(int)i].vtbl)
+ {
+ return FrameTypeNameTable[(int)i].name;
+ }
+ }
+
+ return NULL;
+} // char* Frame::FrameTypeName()
+
+
+//-----------------------------------------------------------------------
+
+
+void Frame::LogFrame(
+ int LF, // Log facility for this call.
+ int LL) // Log Level for this call.
+{
+ char buf[32];
+ const char *pFrameType;
+ pFrameType = GetFrameTypeName();
+
+ if (pFrameType == NULL)
+ {
+ pFrameType = GetFrameTypeName(GetVTablePtr());
+ }
+
+ if (pFrameType == NULL)
+ {
+ _ASSERTE(!"New Frame type needs to be added to FrameTypeName()");
+ // Pointer is up to 17chars + vtbl@ = 22 chars
+ sprintf_s(buf, COUNTOF(buf), "vtbl@%p", GetVTablePtr());
+ pFrameType = buf;
+ }
+
+ LOG((LF, LL, "FRAME: addr:%p, next:%p, type:%s\n",
+ this, m_Next, pFrameType));
+} // void Frame::LogFrame()
+
+void Frame::LogFrameChain(
+ int LF, // Log facility for this call.
+ int LL) // Log Level for this call.
+{
+ if (!LoggingOn(LF, LL))
+ return;
+
+ Frame *pFrame = this;
+ while (pFrame != FRAME_TOP)
+ {
+ pFrame->LogFrame(LF, LL);
+ pFrame = pFrame->m_Next;
+ }
+} // void Frame::LogFrameChain()
+
+//-----------------------------------------------------------------------
+#endif // _DEBUG_IMPL
+//-----------------------------------------------------------------------
+
+#ifndef DACCESS_COMPILE
+
+// This hashtable contains the vtable value of every Frame type.
+static PtrHashMap* s_pFrameVTables = NULL;
+
+// static
+void Frame::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ // create a table big enough for all the frame types, not in asynchronous mode, and with no lock owner
+ s_pFrameVTables = ::new PtrHashMap;
+ s_pFrameVTables->Init(2 * FRAME_TYPES_COUNT, FALSE, &g_lockTrustMeIAmThreadSafe);
+#define FRAME_TYPE_NAME(frameType) \
+ s_pFrameVTables->InsertValue(frameType::GetMethodFrameVPtr(), \
+ (LPVOID) frameType::GetMethodFrameVPtr());
+#include "frames.h"
+
+} // void Frame::Init()
+
+// static
+void Frame::Term()
+{
+ LIMITED_METHOD_CONTRACT;
+ delete s_pFrameVTables;
+ s_pFrameVTables = NULL;
+}
+
+#endif // DACCESS_COMPILE
+
+// Returns true if the Frame's VTablePtr is valid
+
+// static
+bool Frame::HasValidVTablePtr(Frame * pFrame)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (pFrame == NULL || pFrame == FRAME_TOP)
+ return false;
+
+#ifndef DACCESS_COMPILE
+ TADDR vptr = pFrame->GetVTablePtr();
+ //
+ // Helper MethodFrame,GCFrame,DebuggerSecurityCodeMarkFrame are the most
+ // common frame types, explicitly check for them.
+ //
+ if (vptr == HelperMethodFrame::GetMethodFrameVPtr())
+ return true;
+
+ if (vptr == GCFrame::GetMethodFrameVPtr())
+ return true;
+
+ if (vptr == DebuggerSecurityCodeMarkFrame::GetMethodFrameVPtr())
+ return true;
+
+ //
+ // otherwise consult the hashtable
+ //
+ if (s_pFrameVTables->LookupValue(vptr, (LPVOID) vptr) == (LPVOID) INVALIDENTRY)
+ return false;
+#endif
+
+ return true;
+}
+
+// Returns the location of the expected GSCookie,
+// Return NULL if the frame's vtable pointer is corrupt
+//
+// Note that Frame::GetGSCookiePtr is a virtual method,
+// and so it cannot be used without first checking if
+// the vtable is valid.
+
+// static
+PTR_GSCookie Frame::SafeGetGSCookiePtr(Frame * pFrame)
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(pFrame != FRAME_TOP);
+
+ if (Frame::HasValidVTablePtr(pFrame))
+ return pFrame->GetGSCookiePtr();
+ else
+ return NULL;
+}
+
+//-----------------------------------------------------------------------
+#ifndef DACCESS_COMPILE
+//-----------------------------------------------------------------------
+// Link and Unlink this frame.
+//-----------------------------------------------------------------------
+
+VOID Frame::Push()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ Push(GetThread());
+}
+
+VOID Frame::Push(Thread *pThread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(*GetGSCookiePtr() == GetProcessGSCookie());
+
+ m_Next = pThread->GetFrame();
+
+ // PAGE_SIZE is used to relax the assert for cases where two Frames are
+ // declared in the same source function. We cannot predict the order
+ // in which the C compiler will lay them out in the stack frame.
+ // So PAGE_SIZE is a guess of the maximum stack frame size of any method
+ // with multiple Frames in mscorwks.dll
+ _ASSERTE(((m_Next == FRAME_TOP) ||
+ (PBYTE(m_Next) + (2 * PAGE_SIZE)) > PBYTE(this)) &&
+ "Pushing a frame out of order ?");
+
+ _ASSERTE(// If AssertOnFailFast is set, the test expects to do stack overrun
+ // corruptions. In that case, the Frame chain may be corrupted,
+ // and the rest of the assert is not valid.
+ // Note that the corrupted Frame chain will be detected
+ // during stack-walking.
+ !g_pConfig->fAssertOnFailFast() ||
+ (m_Next == FRAME_TOP) ||
+ (*m_Next->GetGSCookiePtr() == GetProcessGSCookie()));
+
+ pThread->SetFrame(this);
+}
+
+VOID Frame::Pop()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ Pop(GetThread());
+}
+
+VOID Frame::Pop(Thread *pThread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pThread->GetFrame() == this && "Popping a frame out of order ?");
+ _ASSERTE(*GetGSCookiePtr() == GetProcessGSCookie());
+ _ASSERTE(// If AssertOnFailFast is set, the test expects to do stack overrun
+ // corruptions. In that case, the Frame chain may be corrupted,
+ // and the rest of the assert is not valid.
+ // Note that the corrupted Frame chain will be detected
+ // during stack-walking.
+ !g_pConfig->fAssertOnFailFast() ||
+ (m_Next == FRAME_TOP) ||
+ (*m_Next->GetGSCookiePtr() == GetProcessGSCookie()));
+
+ pThread->SetFrame(m_Next);
+}
+
+//-----------------------------------------------------------------------
+#endif // #ifndef DACCESS_COMPILE
+//---------------------------------------------------------------
+// Get the extra param for shared generic code.
+//---------------------------------------------------------------
+PTR_VOID TransitionFrame::GetParamTypeArg()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ // This gets called while creating stack traces during exception handling.
+ // Using the ArgIterator constructor calls ArgIterator::Init which calls GetInitialOfsAdjust
+ // which calls SizeOfArgStack, which thinks it may load value types.
+ // However all these will have previously been loaded.
+ //
+ // I'm not entirely convinced this is the best places to put this: CrawlFrame::GetExactGenericArgsToken
+ // may be another option.
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+ MethodDesc *pFunction = GetFunction();
+ _ASSERTE (pFunction->RequiresInstArg());
+
+ MetaSig msig(pFunction);
+ ArgIterator argit (&msig);
+
+ INT offs = argit.GetParamTypeArgOffset();
+
+ TADDR taParamTypeArg = *PTR_TADDR(GetTransitionBlock() + offs);
+ return PTR_VOID(taParamTypeArg);
+}
+
+TADDR TransitionFrame::GetAddrOfThis()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetTransitionBlock() + ArgIterator::GetThisOffset();
+}
+
+VASigCookie * TransitionFrame::GetVASigCookie()
+{
+#if defined(_TARGET_X86_)
+ LIMITED_METHOD_CONTRACT;
+ return dac_cast<PTR_VASigCookie>(
+ *dac_cast<PTR_TADDR>(GetTransitionBlock() +
+ sizeof(TransitionBlock)));
+#else
+ WRAPPER_NO_CONTRACT;
+ MetaSig msig(GetFunction());
+ ArgIterator argit(&msig);
+ return PTR_VASigCookie(
+ *dac_cast<PTR_TADDR>(GetTransitionBlock() + argit.GetVASigCookieOffset()));
+#endif
+}
+
+#ifndef DACCESS_COMPILE
+PrestubMethodFrame::PrestubMethodFrame(TransitionBlock * pTransitionBlock, MethodDesc * pMD)
+ : FramedMethodFrame(pTransitionBlock, pMD)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+#endif // #ifndef DACCESS_COMPILE
+
+BOOL PrestubMethodFrame::TraceFrame(Thread *thread, BOOL fromPatch,
+ TraceDestination *trace, REGDISPLAY *regs)
+{
+ WRAPPER_NO_CONTRACT;
+
+ //
+ // We want to set a frame patch, unless we're already at the
+ // frame patch, in which case we'll trace stable entrypoint which
+ // should be set by now.
+ //
+
+ if (fromPatch)
+ {
+ trace->InitForStub(GetFunction()->GetStableEntryPoint());
+ }
+ else
+ {
+ trace->InitForStub(GetPreStubEntryPoint());
+ }
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "PrestubMethodFrame::TraceFrame: ip=" FMT_ADDR "\n", DBG_ADDR(trace->GetAddress()) ));
+
+ return TRUE;
+}
+
+#ifndef DACCESS_COMPILE
+//-----------------------------------------------------------------------
+// A rather specialized routine for the exclusive use of StubDispatch.
+//-----------------------------------------------------------------------
+StubDispatchFrame::StubDispatchFrame(TransitionBlock * pTransitionBlock)
+ : FramedMethodFrame(pTransitionBlock, NULL)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_pRepresentativeMT = NULL;
+ m_representativeSlot = 0;
+
+ m_pZapModule = NULL;
+ m_pIndirection = NULL;
+
+ m_pGCRefMap = NULL;
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+MethodDesc* StubDispatchFrame::GetFunction()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ MethodDesc * pMD = m_pMD;
+
+ if (m_pMD == NULL)
+ {
+ if (m_pRepresentativeMT != NULL)
+ {
+ pMD = m_pRepresentativeMT->GetMethodDescForSlot(m_representativeSlot);
+#ifndef DACCESS_COMPILE
+ m_pMD = pMD;
+#endif
+ }
+ }
+
+ return pMD;
+}
+
+static PTR_BYTE FindGCRefMap(PTR_Module pZapModule, TADDR ptr)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ PEImageLayout *pNativeImage = pZapModule->GetNativeOrReadyToRunImage();
+
+ RVA rva = pNativeImage->GetDataRva(ptr);
+
+ PTR_CORCOMPILE_IMPORT_SECTION pImportSection = pZapModule->GetImportSectionForRVA(rva);
+ if (pImportSection == NULL)
+ return NULL;
+
+ COUNT_T index = (rva - pImportSection->Section.VirtualAddress) / pImportSection->EntrySize;
+
+ PTR_BYTE pGCRefMap = dac_cast<PTR_BYTE>(pNativeImage->GetRvaData(pImportSection->AuxiliaryData));
+ _ASSERTE(pGCRefMap != NULL);
+
+ // GCRefMap starts with lookup index to limit size of linear scan that follows.
+ PTR_BYTE p = pGCRefMap + dac_cast<PTR_DWORD>(pGCRefMap)[index / GCREFMAP_LOOKUP_STRIDE];
+ COUNT_T remaining = index % GCREFMAP_LOOKUP_STRIDE;
+
+ while (remaining > 0)
+ {
+ while ((*p & 0x80) != 0)
+ p++;
+ p++;
+
+ remaining--;
+ }
+
+ return p;
+}
+
+PTR_BYTE StubDispatchFrame::GetGCRefMap()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ PTR_BYTE pGCRefMap = m_pGCRefMap;
+
+ if (pGCRefMap == NULL)
+ {
+ if (m_pIndirection != NULL)
+ {
+ if (m_pZapModule == NULL)
+ {
+ m_pZapModule = ExecutionManager::FindModuleForGCRefMap(m_pIndirection);
+ }
+
+ if (m_pZapModule != NULL)
+ {
+ pGCRefMap = FindGCRefMap(m_pZapModule, m_pIndirection);
+ }
+
+#ifndef DACCESS_COMPILE
+ if (pGCRefMap != NULL)
+ {
+ m_pGCRefMap = pGCRefMap;
+ }
+ else
+ {
+ // Clear the indirection to avoid retrying
+ m_pIndirection = NULL;
+ }
+#endif
+ }
+ }
+
+ return pGCRefMap;
+}
+
+void StubDispatchFrame::GcScanRoots(promote_func *fn, ScanContext* sc)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END
+
+ FramedMethodFrame::GcScanRoots(fn, sc);
+
+ PTR_BYTE pGCRefMap = GetGCRefMap();
+ if (pGCRefMap != NULL)
+ {
+ PromoteCallerStackUsingGCRefMap(fn, sc, pGCRefMap);
+ }
+ else
+ {
+ PromoteCallerStack(fn, sc);
+ }
+}
+
+BOOL StubDispatchFrame::TraceFrame(Thread *thread, BOOL fromPatch,
+ TraceDestination *trace, REGDISPLAY *regs)
+{
+ WRAPPER_NO_CONTRACT;
+
+ //
+ // We want to set a frame patch, unless we're already at the
+ // frame patch, in which case we'll trace stable entrypoint which
+ // should be set by now.
+ //
+
+ if (fromPatch)
+ {
+ trace->InitForStub(GetFunction()->GetStableEntryPoint());
+ }
+ else
+ {
+ trace->InitForStub(GetPreStubEntryPoint());
+ }
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "StubDispatchFrame::TraceFrame: ip=" FMT_ADDR "\n", DBG_ADDR(trace->GetAddress()) ));
+
+ return TRUE;
+}
+
+Frame::Interception StubDispatchFrame::GetInterception()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return INTERCEPTION_NONE;
+}
+
+#ifndef DACCESS_COMPILE
+ExternalMethodFrame::ExternalMethodFrame(TransitionBlock * pTransitionBlock)
+ : FramedMethodFrame(pTransitionBlock, NULL)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_pIndirection = NULL;
+ m_pZapModule = NULL;
+
+ m_pGCRefMap = NULL;
+}
+#endif // !DACCESS_COMPILE
+
+void ExternalMethodFrame::GcScanRoots(promote_func *fn, ScanContext* sc)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END
+
+ FramedMethodFrame::GcScanRoots(fn, sc);
+ PromoteCallerStackUsingGCRefMap(fn, sc, GetGCRefMap());
+}
+
+PTR_BYTE ExternalMethodFrame::GetGCRefMap()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ PTR_BYTE pGCRefMap = m_pGCRefMap;
+
+ if (pGCRefMap == NULL)
+ {
+ if (m_pIndirection != NULL)
+ {
+ pGCRefMap = FindGCRefMap(m_pZapModule, m_pIndirection);
+#ifndef DACCESS_COMPILE
+ m_pGCRefMap = pGCRefMap;
+#endif
+ }
+ }
+
+ _ASSERTE(pGCRefMap != NULL);
+ return pGCRefMap;
+}
+
+Frame::Interception ExternalMethodFrame::GetInterception()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return INTERCEPTION_NONE;
+}
+
+Frame::Interception PrestubMethodFrame::GetInterception()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ //
+ // The only direct kind of interception done by the prestub
+ // is class initialization.
+ //
+
+ return INTERCEPTION_PRESTUB;
+}
+
+#ifdef FEATURE_READYTORUN
+
+#ifndef DACCESS_COMPILE
+DynamicHelperFrame::DynamicHelperFrame(TransitionBlock * pTransitionBlock, int dynamicHelperFrameFlags)
+ : FramedMethodFrame(pTransitionBlock, NULL)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_dynamicHelperFrameFlags = dynamicHelperFrameFlags;
+}
+#endif // !DACCESS_COMPILE
+
+void DynamicHelperFrame::GcScanRoots(promote_func *fn, ScanContext* sc)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END
+
+ FramedMethodFrame::GcScanRoots(fn, sc);
+
+ PTR_PTR_Object pArgumentRegisters = dac_cast<PTR_PTR_Object>(GetTransitionBlock() + TransitionBlock::GetOffsetOfArgumentRegisters());
+
+ if (m_dynamicHelperFrameFlags & DynamicHelperFrameFlags_ObjectArg)
+ {
+ TADDR pArgument = GetTransitionBlock() + TransitionBlock::GetOffsetOfArgumentRegisters();
+#ifdef _TARGET_X86_
+ // x86 is special as always
+ pArgument += offsetof(ArgumentRegisters, ECX);
+#endif
+ (*fn)(dac_cast<PTR_PTR_Object>(pArgument), sc, CHECK_APP_DOMAIN);
+ }
+
+ if (m_dynamicHelperFrameFlags & DynamicHelperFrameFlags_ObjectArg2)
+ {
+ TADDR pArgument = GetTransitionBlock() + TransitionBlock::GetOffsetOfArgumentRegisters();
+#ifdef _TARGET_X86_
+ // x86 is special as always
+ pArgument += offsetof(ArgumentRegisters, EDX);
+#else
+ pArgument += sizeof(TADDR);
+#endif
+ (*fn)(dac_cast<PTR_PTR_Object>(pArgument), sc, CHECK_APP_DOMAIN);
+ }
+}
+
+#endif // FEATURE_READYTORUN
+
+
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_COMINTEROP
+//-----------------------------------------------------------------------
+// A rather specialized routine for the exclusive use of the COM PreStub.
+//-----------------------------------------------------------------------
+VOID
+ComPrestubMethodFrame::Init()
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Initializes the frame's VPTR. This assumes C++ puts the vptr
+ // at offset 0 for a class not using MI, but this is no different
+ // than the assumption that COM Classic makes.
+ *((TADDR*)this) = GetMethodFrameVPtr();
+ *GetGSCookiePtr() = GetProcessGSCookie();
+}
+#endif // FEATURE_COMINTEROP
+
+//-----------------------------------------------------------------------
+// GCFrames
+//-----------------------------------------------------------------------
+
+
+//--------------------------------------------------------------------
+// This constructor pushes a new GCFrame on the frame chain.
+//--------------------------------------------------------------------
+GCFrame::GCFrame(OBJECTREF *pObjRefs, UINT numObjRefs, BOOL maybeInterior)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ Init(GetThread(), pObjRefs, numObjRefs, maybeInterior);
+}
+
+GCFrame::GCFrame(Thread *pThread, OBJECTREF *pObjRefs, UINT numObjRefs, BOOL maybeInterior)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ Init(pThread, pObjRefs, numObjRefs, maybeInterior);
+}
+
+void GCFrame::Init(Thread *pThread, OBJECTREF *pObjRefs, UINT numObjRefs, BOOL maybeInterior)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifdef USE_CHECKED_OBJECTREFS
+ if (!maybeInterior) {
+ UINT i;
+ for(i = 0; i < numObjRefs; i++)
+ Thread::ObjectRefProtected(&pObjRefs[i]);
+
+ for (i = 0; i < numObjRefs; i++) {
+ pObjRefs[i].Validate();
+ }
+ }
+
+#if 0 // We'll want to restore this goodness check at some time. For now, the fact that we use
+ // this as temporary backstops in our loader exception conversions means we're highly
+ // exposed to infinite stack recursion should the loader be invoked during a stackwalk.
+ // So we'll do without.
+
+ if (g_pConfig->GetGCStressLevel() != 0 && IsProtectedByGCFrame(pObjRefs)) {
+ _ASSERTE(!"This objectref is already protected by a GCFrame. Protecting it twice will corrupt the GC.");
+ }
+#endif
+
+#endif
+
+ m_pObjRefs = pObjRefs;
+ m_numObjRefs = numObjRefs;
+ m_pCurThread = pThread;
+ m_MaybeInterior = maybeInterior;
+
+ Frame::Push(m_pCurThread);
+}
+
+
+//
+// GCFrame Object Scanning
+//
+// This handles scanning/promotion of GC objects that were
+// protected by the programmer explicitly protecting it in a GC Frame
+// via the GCPROTECTBEGIN / GCPROTECTEND facility...
+//
+
+#endif // !DACCESS_COMPILE
+
+void GCFrame::GcScanRoots(promote_func *fn, ScanContext* sc)
+{
+ WRAPPER_NO_CONTRACT;
+
+ PTR_PTR_Object pRefs = dac_cast<PTR_PTR_Object>(m_pObjRefs);
+
+ for (UINT i = 0;i < m_numObjRefs; i++) {
+
+ LOG((LF_GC, INFO3, "GC Protection Frame Promoting" FMT_ADDR "to",
+ DBG_ADDR(OBJECTREF_TO_UNCHECKED_OBJECTREF(m_pObjRefs[i])) ));
+ if (m_MaybeInterior)
+ PromoteCarefully(fn, pRefs + i, sc, GC_CALL_INTERIOR|CHECK_APP_DOMAIN);
+ else
+ (*fn)(pRefs + i, sc, 0);
+ LOG((LF_GC, INFO3, FMT_ADDR "\n", DBG_ADDR(OBJECTREF_TO_UNCHECKED_OBJECTREF(m_pObjRefs[i])) ));
+ }
+}
+
+#ifdef FEATURE_REMOTING
+#include "objectclone.h"
+void GCSafeCollectionFrame::GcScanRoots(promote_func *fn, ScanContext* sc)
+{
+ WRAPPER_NO_CONTRACT;
+
+ PTR_GCSafeCollection collection = dac_cast<PTR_GCSafeCollection>(m_pCollection);
+ collection->ReportGCRefs(fn, sc);
+}
+
+#ifndef DACCESS_COMPILE
+GCSafeCollectionFrame::GCSafeCollectionFrame(void *collection)
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(collection != NULL);
+ m_pCollection = collection;
+
+ Frame::Push();
+}
+
+VOID GCSafeCollectionFrame::Pop()
+{
+ LIMITED_METHOD_CONTRACT;
+ Frame::Pop();
+}
+#endif // !DACCESS_COMPILE
+#endif // FEATURE_REMOTING
+
+#ifndef DACCESS_COMPILE
+//--------------------------------------------------------------------
+// Pops the GCFrame and cancels the GC protection.
+//--------------------------------------------------------------------
+VOID GCFrame::Pop()
+{
+ WRAPPER_NO_CONTRACT;
+
+ Frame::Pop(m_pCurThread);
+#ifdef _DEBUG
+ m_pCurThread->EnableStressHeap();
+ for(UINT i = 0; i < m_numObjRefs; i++)
+ Thread::ObjectRefNew(&m_pObjRefs[i]); // Unprotect them
+#endif
+}
+
+#ifdef FEATURE_INTERPRETER
+// Methods of IntepreterFrame.
+InterpreterFrame::InterpreterFrame(Interpreter* interp)
+ : Frame(), m_interp(interp)
+{
+ Push();
+}
+
+
+MethodDesc* InterpreterFrame::GetFunction()
+{
+ return m_interp->GetMethodDesc();
+}
+
+void InterpreterFrame::GcScanRoots(promote_func *fn, ScanContext* sc)
+{
+ return m_interp->GCScanRoots(fn, sc);
+}
+
+#endif // FEATURE_INTERPRETER
+
+#ifdef _DEBUG
+
+struct IsProtectedByGCFrameStruct
+{
+ OBJECTREF *ppObjectRef;
+ UINT count;
+};
+
+static StackWalkAction IsProtectedByGCFrameStackWalkFramesCallback(
+ CrawlFrame *pCF,
+ VOID *pData
+)
+{
+ DEBUG_ONLY_FUNCTION;
+ WRAPPER_NO_CONTRACT;
+
+ IsProtectedByGCFrameStruct *pd = (IsProtectedByGCFrameStruct*)pData;
+ Frame *pFrame = pCF->GetFrame();
+ if (pFrame) {
+ if (pFrame->Protects(pd->ppObjectRef)) {
+ pd->count++;
+ }
+ }
+ return SWA_CONTINUE;
+}
+
+BOOL IsProtectedByGCFrame(OBJECTREF *ppObjectRef)
+{
+ DEBUG_ONLY_FUNCTION;
+ WRAPPER_NO_CONTRACT;
+
+ // Just report TRUE if GCStress is not on. This satisfies the asserts that use this
+ // code without the cost of actually determining it.
+ if (!GCStress<cfg_any>::IsEnabled())
+ return TRUE;
+
+ if (ppObjectRef == NULL) {
+ return TRUE;
+ }
+
+ CONTRACT_VIOLATION(ThrowsViolation);
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE ();
+ IsProtectedByGCFrameStruct d = {ppObjectRef, 0};
+ GetThread()->StackWalkFrames(IsProtectedByGCFrameStackWalkFramesCallback, &d);
+ if (d.count > 1) {
+ _ASSERTE(!"Multiple GCFrames protecting the same pointer. This will cause GC corruption!");
+ }
+ return d.count != 0;
+}
+#endif // _DEBUG
+
+#endif //!DACCESS_COMPILE
+
+void ProtectByRefsFrame::GcScanRoots(promote_func *fn, ScanContext *sc)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END
+
+ ByRefInfo *pByRefInfos = m_brInfo;
+ while (pByRefInfos)
+ {
+ if (!CorIsPrimitiveType(pByRefInfos->typ))
+ {
+ TADDR pData = PTR_HOST_MEMBER_TADDR(ByRefInfo, pByRefInfos, data);
+
+ if (pByRefInfos->typeHandle.IsValueType())
+ {
+ ReportPointersFromValueType(fn, sc, pByRefInfos->typeHandle.GetMethodTable(), PTR_VOID(pData));
+ }
+ else
+ {
+ PTR_PTR_Object ppObject = PTR_PTR_Object(pData);
+
+ LOG((LF_GC, INFO3, "ProtectByRefs Frame Promoting" FMT_ADDR "to ", DBG_ADDR(*ppObject)));
+
+ (*fn)(ppObject, sc, CHECK_APP_DOMAIN);
+
+ LOG((LF_GC, INFO3, FMT_ADDR "\n", DBG_ADDR(*ppObject) ));
+ }
+ }
+ pByRefInfos = pByRefInfos->pNext;
+ }
+}
+
+void ProtectValueClassFrame::GcScanRoots(promote_func *fn, ScanContext *sc)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END
+
+ ValueClassInfo *pVCInfo = m_pVCInfo;
+ while (pVCInfo != NULL)
+ {
+ _ASSERTE(pVCInfo->pMT->IsValueType());
+ ReportPointersFromValueType(fn, sc, pVCInfo->pMT, pVCInfo->pData);
+ pVCInfo = pVCInfo->pNext;
+ }
+}
+
+//
+// Promote Caller Stack
+//
+//
+
+void TransitionFrame::PromoteCallerStack(promote_func* fn, ScanContext* sc)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // I believe this is the contract:
+ //CONTRACTL
+ //{
+ // INSTANCE_CHECK;
+ // NOTHROW;
+ // GC_NOTRIGGER;
+ // FORBID_FAULT;
+ // MODE_ANY;
+ //}
+ //CONTRACTL_END
+
+ MethodDesc *pFunction;
+
+ LOG((LF_GC, INFO3, " Promoting method caller Arguments\n" ));
+
+ // We're going to have to look at the signature to determine
+ // which arguments a are pointers....First we need the function
+ pFunction = GetFunction();
+ if (pFunction == NULL)
+ return;
+
+ // Now get the signature...
+ Signature callSignature = pFunction->GetSignature();
+ if (callSignature.IsEmpty())
+ {
+ return;
+ }
+
+ //If not "vararg" calling convention, assume "default" calling convention
+ if (!MetaSig::IsVarArg(pFunction->GetModule(), callSignature))
+ {
+ MetaSig msig(pFunction);
+ PromoteCallerStackHelper (fn, sc, pFunction, &msig);
+ }
+ else
+ {
+ VASigCookie *varArgSig = GetVASigCookie();
+
+ //Note: no instantiations needed for varargs
+ MetaSig msig(varArgSig->signature,
+ varArgSig->pModule,
+ NULL);
+ PromoteCallerStackHelper (fn, sc, pFunction, &msig);
+ }
+}
+
+void TransitionFrame::PromoteCallerStackHelper(promote_func* fn, ScanContext* sc,
+ MethodDesc *pFunction, MetaSig *pmsig)
+{
+ WRAPPER_NO_CONTRACT;
+ // I believe this is the contract:
+ //CONTRACTL
+ //{
+ // INSTANCE_CHECK;
+ // NOTHROW;
+ // GC_NOTRIGGER;
+ // FORBID_FAULT;
+ // MODE_ANY;
+ //}
+ //CONTRACTL_END
+
+ ArgIterator argit(pmsig);
+
+ TADDR pTransitionBlock = GetTransitionBlock();
+
+ // promote 'this' for non-static methods
+ if (argit.HasThis() && pFunction != NULL)
+ {
+ BOOL interior = pFunction->GetMethodTable()->IsValueType() && !pFunction->IsUnboxingStub();
+
+ PTR_PTR_VOID pThis = dac_cast<PTR_PTR_VOID>(pTransitionBlock + argit.GetThisOffset());
+ LOG((LF_GC, INFO3,
+ " 'this' Argument at " FMT_ADDR "promoted from" FMT_ADDR "\n",
+ DBG_ADDR(pThis), DBG_ADDR(*pThis) ));
+
+ if (interior)
+ PromoteCarefully(fn, PTR_PTR_Object(pThis), sc, GC_CALL_INTERIOR|CHECK_APP_DOMAIN);
+ else
+ (fn)(PTR_PTR_Object(pThis), sc, CHECK_APP_DOMAIN);
+ }
+
+ if (argit.HasRetBuffArg())
+ {
+ PTR_PTR_VOID pRetBuffArg = dac_cast<PTR_PTR_VOID>(pTransitionBlock + argit.GetRetBuffArgOffset());
+ LOG((LF_GC, INFO3, " ret buf Argument promoted from" FMT_ADDR "\n", DBG_ADDR(*pRetBuffArg) ));
+ PromoteCarefully(fn, PTR_PTR_Object(pRetBuffArg), sc, GC_CALL_INTERIOR|CHECK_APP_DOMAIN);
+ }
+
+ int argOffset;
+ while ((argOffset = argit.GetNextOffset()) != TransitionBlock::InvalidOffset)
+ {
+ pmsig->GcScanRoots(dac_cast<PTR_VOID>(pTransitionBlock + argOffset), fn, sc);
+ }
+}
+
+#ifdef _TARGET_X86_
+UINT TransitionFrame::CbStackPopUsingGCRefMap(PTR_BYTE pGCRefMap)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ GCRefMapDecoder decoder(pGCRefMap);
+ return decoder.ReadStackPop() * sizeof(TADDR);
+}
+#endif
+
+void TransitionFrame::PromoteCallerStackUsingGCRefMap(promote_func* fn, ScanContext* sc, PTR_BYTE pGCRefMap)
+{
+ WRAPPER_NO_CONTRACT;
+
+ GCRefMapDecoder decoder(pGCRefMap);
+
+#ifdef _TARGET_X86_
+ // Skip StackPop
+ decoder.ReadStackPop();
+#endif
+
+ TADDR pTransitionBlock = GetTransitionBlock();
+
+ while (!decoder.AtEnd())
+ {
+ int pos = decoder.CurrentPos();
+ int token = decoder.ReadToken();
+
+ int ofs;
+
+#ifdef _TARGET_X86_
+ ofs = (pos < NUM_ARGUMENT_REGISTERS) ?
+ (TransitionBlock::GetOffsetOfArgumentRegisters() + ARGUMENTREGISTERS_SIZE - (pos + 1) * sizeof(TADDR)) :
+ (TransitionBlock::GetOffsetOfArgs() + (pos - NUM_ARGUMENT_REGISTERS) * sizeof(TADDR));
+#else
+ ofs = TransitionBlock::GetOffsetOfArgumentRegisters() + pos * sizeof(TADDR);
+#endif
+
+ PTR_TADDR ppObj = dac_cast<PTR_TADDR>(pTransitionBlock + ofs);
+
+ switch (token)
+ {
+ case GCREFMAP_SKIP:
+ break;
+ case GCREFMAP_REF:
+ fn(dac_cast<PTR_PTR_Object>(ppObj), sc, CHECK_APP_DOMAIN);
+ break;
+ case GCREFMAP_INTERIOR:
+ PromoteCarefully(fn, dac_cast<PTR_PTR_Object>(ppObj), sc, GC_CALL_INTERIOR | GC_CALL_CHECK_APP_DOMAIN);
+ break;
+ case GCREFMAP_METHOD_PARAM:
+ if (sc->promotion)
+ {
+#ifndef DACCESS_COMPILE
+ MethodDesc *pMDReal = dac_cast<PTR_MethodDesc>(*ppObj);
+ if (pMDReal != NULL)
+ GcReportLoaderAllocator(fn, sc, pMDReal->GetLoaderAllocator());
+#endif
+ }
+ break;
+ case GCREFMAP_TYPE_PARAM:
+ if (sc->promotion)
+ {
+#ifndef DACCESS_COMPILE
+ MethodTable *pMTReal = dac_cast<PTR_MethodTable>(*ppObj);
+ if (pMTReal != NULL)
+ GcReportLoaderAllocator(fn, sc, pMTReal->GetLoaderAllocator());
+#endif
+ }
+ break;
+ case GCREFMAP_VASIG_COOKIE:
+ {
+ VASigCookie *varArgSig = dac_cast<PTR_VASigCookie>(*ppObj);
+
+ //Note: no instantiations needed for varargs
+ MetaSig msig(varArgSig->signature,
+ varArgSig->pModule,
+ NULL);
+ PromoteCallerStackHelper (fn, sc, NULL, &msig);
+ }
+ break;
+ default:
+ _ASSERTE(!"Unknown GCREFMAP token");
+ break;
+ }
+ }
+}
+
+void PInvokeCalliFrame::PromoteCallerStack(promote_func* fn, ScanContext* sc)
+{
+ WRAPPER_NO_CONTRACT;
+
+ LOG((LF_GC, INFO3, " Promoting CALLI caller Arguments\n" ));
+
+ // get the signature
+ VASigCookie *varArgSig = GetVASigCookie();
+ if (varArgSig->signature.IsEmpty())
+ {
+ return;
+ }
+
+ // no instantiations needed for varargs
+ MetaSig msig(varArgSig->signature,
+ varArgSig->pModule,
+ NULL);
+ PromoteCallerStackHelper(fn, sc, NULL, &msig);
+}
+
+#ifndef DACCESS_COMPILE
+PInvokeCalliFrame::PInvokeCalliFrame(TransitionBlock * pTransitionBlock, VASigCookie * pVASigCookie, PCODE pUnmanagedTarget)
+ : FramedMethodFrame(pTransitionBlock, NULL)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_pVASigCookie = pVASigCookie;
+ m_pUnmanagedTarget = pUnmanagedTarget;
+}
+#endif // #ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_COMINTEROP
+
+#ifndef DACCESS_COMPILE
+ComPlusMethodFrame::ComPlusMethodFrame(TransitionBlock * pTransitionBlock, MethodDesc * pMD)
+ : FramedMethodFrame(pTransitionBlock, pMD)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+#endif // #ifndef DACCESS_COMPILE
+
+//virtual
+void ComPlusMethodFrame::GcScanRoots(promote_func *fn, ScanContext* sc)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // ComPlusMethodFrame is only used in the event call / late bound call code path where we do not have IL stub
+ // so we need to promote the arguments and return value manually.
+
+ FramedMethodFrame::GcScanRoots(fn, sc);
+ PromoteCallerStack(fn, sc);
+
+ MetaSig::RETURNTYPE returnType = GetFunction()->ReturnsObject();
+
+ // Promote the returned object
+ if(returnType == MetaSig::RETOBJ)
+ (*fn)(GetReturnObjectPtr(), sc, CHECK_APP_DOMAIN);
+ else if (returnType == MetaSig::RETBYREF)
+ PromoteCarefully(fn, GetReturnObjectPtr(), sc, GC_CALL_INTERIOR|CHECK_APP_DOMAIN);
+}
+#endif // FEATURE_COMINTEROP
+
+#if defined (_DEBUG) && !defined (DACCESS_COMPILE)
+// For IsProtectedByGCFrame, we need to know whether a given object ref is protected
+// by a ComPlusMethodFrame or a ComMethodFrame. Since GCScanRoots for those frames are
+// quite complicated, we don't want to duplicate their logic so we call GCScanRoots with
+// IsObjRefProtected (a fake promote function) and an extended ScanContext to do the checking.
+
+struct IsObjRefProtectedScanContext : public ScanContext
+{
+ OBJECTREF * oref_to_check;
+ BOOL oref_protected;
+ IsObjRefProtectedScanContext (OBJECTREF * oref)
+ {
+ thread_under_crawl = GetThread ();
+ promotion = TRUE;
+ oref_to_check = oref;
+ oref_protected = FALSE;
+ }
+};
+
+void IsObjRefProtected (Object** ppObj, ScanContext* sc, DWORD)
+{
+ LIMITED_METHOD_CONTRACT;
+ IsObjRefProtectedScanContext * orefProtectedSc = (IsObjRefProtectedScanContext *)sc;
+ if (ppObj == (Object **)(orefProtectedSc->oref_to_check))
+ orefProtectedSc->oref_protected = TRUE;
+}
+
+BOOL TransitionFrame::Protects(OBJECTREF * ppORef)
+{
+ WRAPPER_NO_CONTRACT;
+ IsObjRefProtectedScanContext sc (ppORef);
+ GcScanRoots (IsObjRefProtected, &sc);
+ return sc.oref_protected;
+}
+#endif //defined (_DEBUG) && !defined (DACCESS_COMPILE)
+
+//+----------------------------------------------------------------------------
+//
+// Method: TPMethodFrame::GcScanRoots public
+//
+// Synopsis: GC protects arguments on the stack
+//
+
+//
+//+----------------------------------------------------------------------------
+#ifdef FEATURE_REMOTING
+
+#ifndef DACCESS_COMPILE
+TPMethodFrame::TPMethodFrame(TransitionBlock * pTransitionBlock)
+ : FramedMethodFrame(pTransitionBlock, NULL)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+#endif // #ifndef DACCESS_COMPILE
+
+void TPMethodFrame::GcScanRoots(promote_func *fn, ScanContext* sc)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Delegate to FramedMethodFrame
+ FramedMethodFrame::GcScanRoots(fn, sc);
+ FramedMethodFrame::PromoteCallerStack(fn, sc);
+
+ MetaSig::RETURNTYPE returnType = GetFunction()->ReturnsObject();
+
+ // Promote the returned object
+ if(returnType == MetaSig::RETOBJ)
+ {
+ (*fn)(GetReturnObjectPtr(), sc, CHECK_APP_DOMAIN);
+ }
+ else if (returnType == MetaSig::RETBYREF)
+ {
+ PromoteCarefully(fn, GetReturnObjectPtr(), sc, GC_CALL_INTERIOR|CHECK_APP_DOMAIN);
+ }
+
+ return;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: TPMethodFrame::TraceFrame public
+//
+// Synopsis: Return where the frame will execute next - the result is filled
+// into the given "trace" structure. The frame is responsible for
+// detecting where it is in its execution lifetime.
+//
+//
+
+//
+//+----------------------------------------------------------------------------
+BOOL TPMethodFrame::TraceFrame(Thread *thread, BOOL fromPatch,
+ TraceDestination *trace, REGDISPLAY *regs)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // We want to set a frame patch, unless we're already at the
+ // frame patch, in which case we'll trace stable entrypoint which
+ // should be set by now.
+
+ if (fromPatch)
+ {
+ trace->InitForStub(GetFunction()->GetStableEntryPoint());
+ }
+ else
+ {
+#ifdef DACCESS_COMPILE
+ DacNotImpl();
+#else
+ trace->InitForStub(GetEEFuncEntryPoint(TransparentProxyStubPatchLabel));
+#endif
+ }
+ return TRUE;
+
+}
+#endif // FEATURE_REMOTING
+
+#ifdef FEATURE_COMINTEROP
+
+#ifdef _TARGET_X86_
+// Return the # of stack bytes pushed by the unmanaged caller.
+UINT ComMethodFrame::GetNumCallerStackBytes()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ ComCallMethodDesc* pCMD = PTR_ComCallMethodDesc((TADDR)GetDatum());
+ PREFIX_ASSUME(pCMD != NULL);
+ // assumes __stdcall
+ // compute the callee pop stack bytes
+ return pCMD->GetNumStackBytes();
+}
+#endif // _TARGET_X86_
+
+#ifndef DACCESS_COMPILE
+void ComMethodFrame::DoSecondPassHandlerCleanup(Frame * pCurFrame)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Find ComMethodFrame, noting any ContextTransitionFrame along the way
+
+ while ((pCurFrame != FRAME_TOP) &&
+ (pCurFrame->GetVTablePtr() != ComMethodFrame::GetMethodFrameVPtr()))
+ {
+ if (pCurFrame->GetVTablePtr() == ContextTransitionFrame::GetMethodFrameVPtr())
+ {
+ // If there is a context transition before we find a ComMethodFrame, do nothing. Expect that
+ // the AD transition code will perform the corresponding work after it pops its context
+ // transition frame and before it rethrows the exception.
+ return;
+ }
+ pCurFrame = pCurFrame->PtrNextFrame();
+ }
+
+ if (pCurFrame == FRAME_TOP)
+ return;
+
+ ComMethodFrame * pComMethodFrame = (ComMethodFrame *)pCurFrame;
+
+ _ASSERTE(pComMethodFrame != NULL);
+ Thread * pThread = GetThread();
+ GCX_COOP_THREAD_EXISTS(pThread);
+ // Unwind the frames till the entry frame (which was ComMethodFrame)
+ pCurFrame = pThread->GetFrame();
+ while ((pCurFrame != NULL) && (pCurFrame <= pComMethodFrame))
+ {
+ pCurFrame->ExceptionUnwind();
+ pCurFrame = pCurFrame->PtrNextFrame();
+ }
+
+ // At this point, pCurFrame would be the ComMethodFrame's predecessor frame
+ // that we need to reset to.
+ _ASSERTE((pCurFrame != NULL) && (pComMethodFrame->PtrNextFrame() == pCurFrame));
+ pThread->SetFrame(pCurFrame);
+}
+#endif // !DACCESS_COMPILE
+
+#endif // FEATURE_COMINTEROP
+
+
+#ifdef _TARGET_X86_
+
+PTR_UMEntryThunk UMThkCallFrame::GetUMEntryThunk()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return dac_cast<PTR_UMEntryThunk>(GetDatum());
+}
+
+#ifdef DACCESS_COMPILE
+void UMThkCallFrame::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ WRAPPER_NO_CONTRACT;
+ UnmanagedToManagedFrame::EnumMemoryRegions(flags);
+
+ // Pieces of the UMEntryThunk need to be saved.
+ UMEntryThunk *pThunk = GetUMEntryThunk();
+ DacEnumMemoryRegion(dac_cast<TADDR>(pThunk), sizeof(UMEntryThunk));
+
+ UMThunkMarshInfo *pMarshInfo = pThunk->GetUMThunkMarshInfo();
+ DacEnumMemoryRegion(dac_cast<TADDR>(pMarshInfo), sizeof(UMThunkMarshInfo));
+}
+#endif
+
+#endif // _TARGET_X86_
+
+#ifndef DACCESS_COMPILE
+
+#if defined(_MSC_VER) && defined(_TARGET_X86_)
+#pragma optimize("y", on) // Small critical routines, don't put in EBP frame
+#endif
+
+// Initialization of HelperMethodFrame.
+void HelperMethodFrame::Push()
+{
+ CONTRACTL {
+ if (m_Attribs & FRAME_ATTR_NO_THREAD_ABORT) NOTHROW; else THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ //
+ // Finish initialization
+ //
+
+ // Compiler would not inline GetGSCookiePtr() because of it is virtual method.
+ // Inline it manually and verify that it gives same result.
+ _ASSERTE(GetGSCookiePtr() == (((GSCookie *)(this)) - 1));
+ *(((GSCookie *)(this)) - 1) = GetProcessGSCookie();
+
+ _ASSERTE(!m_MachState.isValid());
+
+ Thread * pThread = ::GetThread();
+ m_pThread = pThread;
+
+ // Push the frame
+ Frame::Push(pThread);
+
+ if (!pThread->HasThreadStateOpportunistic((Thread::ThreadState)(Thread::TS_YieldRequested | Thread::TS_AbortRequested)))
+ return;
+
+ // Outline the slow path for better perf
+ PushSlowHelper();
+}
+
+void HelperMethodFrame::Pop()
+{
+ CONTRACTL {
+ if (m_Attribs & FRAME_ATTR_NO_THREAD_ABORT) NOTHROW; else THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ Thread * pThread = m_pThread;
+
+ if ((m_Attribs & FRAME_ATTR_NO_THREAD_ABORT) || !pThread->HasThreadStateOpportunistic(Thread::TS_AbortInitiated))
+ {
+ Frame::Pop(pThread);
+ return;
+ }
+
+ // Outline the slow path for better perf
+ PopSlowHelper();
+}
+
+#if defined(_MSC_VER) && defined(_TARGET_X86_)
+#pragma optimize("", on) // Go back to command line default optimizations
+#endif
+
+NOINLINE void HelperMethodFrame::PushSlowHelper()
+{
+ CONTRACTL {
+ if (m_Attribs & FRAME_ATTR_NO_THREAD_ABORT) NOTHROW; else THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ if (!(m_Attribs & FRAME_ATTR_NO_THREAD_ABORT))
+ {
+ if (m_pThread->IsAbortRequested())
+ {
+ m_pThread->HandleThreadAbort();
+ }
+
+ }
+
+ if (m_pThread->IsYieldRequested())
+ {
+ __SwitchToThread(0, CALLER_LIMITS_SPINNING);
+ }
+}
+
+NOINLINE void HelperMethodFrame::PopSlowHelper()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ m_pThread->HandleThreadAbort();
+ Frame::Pop(m_pThread);
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+MethodDesc* HelperMethodFrame::GetFunction()
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifndef DACCESS_COMPILE
+ InsureInit(false, NULL);
+ return m_pMD;
+#else
+ if (m_MachState.isValid())
+ {
+ return m_pMD;
+ }
+ else
+ {
+ return ECall::MapTargetBackToMethod(m_FCallEntry);
+ }
+#endif
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Ensures the HelperMethodFrame gets initialized, if not already.
+//
+// Arguments:
+// * initialInit -
+// * true: ensure the simple, first stage of initialization has been completed.
+// This is used when the HelperMethodFrame is first created.
+// * false: complete any initialization that was left to do, if any.
+// * unwindState - [out] DAC builds use this to return the unwound machine state.
+// * hostCallPreference - (See code:HelperMethodFrame::HostCallPreference.)
+//
+// Return Value:
+// Normally, the function always returns TRUE meaning the initialization succeeded.
+//
+// However, if hostCallPreference is NoHostCalls, AND if a callee (like
+// LazyMachState::unwindLazyState) needed to acquire a JIT reader lock and was unable
+// to do so (lest it re-enter the host), then InsureInit will abort and return FALSE.
+// So any callers that specify hostCallPreference = NoHostCalls (which is not the
+// default), should check for FALSE return, and refuse to use the HMF in that case.
+// Currently only asynchronous calls made by profilers use that code path.
+//
+
+BOOL HelperMethodFrame::InsureInit(bool initialInit,
+ MachState * unwindState,
+ HostCallPreference hostCallPreference /* = AllowHostCalls */)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ if ((hostCallPreference == AllowHostCalls) && !m_MachState.isValid()) { HOST_CALLS; } else { HOST_NOCALLS; }
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ if (m_MachState.isValid())
+ {
+ return TRUE;
+ }
+
+ _ASSERTE(m_Attribs != 0xCCCCCCCC);
+
+#ifndef DACCESS_COMPILE
+ if (!initialInit)
+ {
+ m_pMD = ECall::MapTargetBackToMethod(m_FCallEntry);
+
+ // if this is an FCall, we should find it
+ _ASSERTE(m_FCallEntry == 0 || m_pMD != 0);
+ }
+#endif
+
+ // Because TRUE FCalls can be called from via reflection, com-interop, etc.,
+ // we can't rely on the fact that we are called from jitted code to find the
+ // caller of the FCALL. Thus FCalls must erect the frame directly in the
+ // FCall. For JIT helpers, however, we can rely on this, and so they can
+ // be sneakier and defer the HelperMethodFrame setup to a called worker method.
+
+ // Work with a copy so that we only write the values once.
+ // this avoids race conditions.
+ LazyMachState* lazy = &m_MachState;
+ MachState unwound;
+
+ if (!initialInit &&
+ m_FCallEntry == 0 &&
+ !(m_Attribs & Frame::FRAME_ATTR_EXACT_DEPTH)) // Jit Helper
+ {
+ LazyMachState::unwindLazyState(
+ lazy,
+ &unwound,
+ 0,
+ hostCallPreference);
+
+#if !defined(DACCESS_COMPILE)
+ if (!unwound.isValid())
+ {
+ // This only happens if LazyMachState::unwindLazyState had to abort as a
+ // result of failing to take a reader lock (because we told it not to yield,
+ // but the writer lock was already held). Since we've not yet updated
+ // m_MachState, this HelperMethodFrame will still be considered not fully
+ // initialized (so a future call into InsureInit() will attempt to complete
+ // initialization again).
+ //
+ // Note that, in DAC builds, the contract with LazyMachState::unwindLazyState
+ // is a bit different, and it's expected that LazyMachState::unwindLazyState
+ // will commonly return an unwound state with _pRetAddr==NULL (which counts
+ // as an "invalid" MachState). So have DAC builds deliberately fall through
+ // rather than aborting when unwound is invalid.
+ _ASSERTE(hostCallPreference == NoHostCalls);
+ return FALSE;
+ }
+#endif // !defined(DACCESS_COMPILE)
+ }
+ else if (!initialInit &&
+ (m_Attribs & Frame::FRAME_ATTR_CAPTURE_DEPTH_2) != 0)
+ {
+ // explictly told depth
+ LazyMachState::unwindLazyState(lazy, &unwound, 2);
+ }
+ else
+ {
+ // True FCall
+ LazyMachState::unwindLazyState(lazy, &unwound, 1);
+ }
+
+ _ASSERTE(unwound.isValid());
+
+#if !defined(DACCESS_COMPILE)
+ lazy->setLazyStateFromUnwind(&unwound);
+#else // DACCESS_COMPILE
+ if (unwindState)
+ {
+ *unwindState = unwound;
+ }
+#endif // DACCESS_COMPILE
+
+ return TRUE;
+}
+
+
+#include "comdelegate.h"
+
+Assembly* SecureDelegateFrame::GetAssembly()
+{
+ WRAPPER_NO_CONTRACT;
+
+#if !defined(DACCESS_COMPILE)
+ // obtain the frame off the delegate pointer
+ DELEGATEREF delegate = (DELEGATEREF) GetThis();
+ _ASSERTE(delegate);
+ if (!delegate->IsWrapperDelegate())
+ {
+ MethodDesc* pMethod = (MethodDesc*) delegate->GetMethodPtrAux();
+ Assembly* pAssembly = pMethod->GetAssembly();
+ _ASSERTE(pAssembly != NULL);
+ return pAssembly;
+ }
+ else
+ return NULL;
+#else
+ DacNotImpl();
+ return NULL;
+#endif
+}
+
+BOOL SecureDelegateFrame::TraceFrame(Thread *thread, BOOL fromPatch, TraceDestination *trace, REGDISPLAY *regs)
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(!fromPatch);
+
+ // Unlike multicast delegates, secure delegates only call one method. So, we should just return false here
+ // and let the step out logic continue to the caller of the secure delegate stub.
+ LOG((LF_CORDB, LL_INFO1000, "SDF::TF: return FALSE\n"));
+
+ return FALSE;
+}
+
+BOOL MulticastFrame::TraceFrame(Thread *thread, BOOL fromPatch,
+ TraceDestination *trace, REGDISPLAY *regs)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!fromPatch);
+
+#ifdef DACCESS_COMPILE
+ return FALSE;
+
+#else // !DACCESS_COMPILE
+ LOG((LF_CORDB,LL_INFO10000, "MulticastFrame::TF FromPatch:0x%x, at 0x%x\n", fromPatch, GetControlPC(regs)));
+
+ // At this point we have no way to recover the Stub object from the control pc. We can't use the MD stored
+ // in the MulticastFrame because it points to the dummy Invoke() method, not the method we want to call.
+
+ BYTE *pbDel = NULL;
+ int delegateCount = 0;
+
+#if defined(_TARGET_X86_)
+ // At this point the counter hasn't been incremented yet.
+ delegateCount = *regs->pEdi + 1;
+ pbDel = *(BYTE **)( (size_t)*(regs->pEsi) + GetOffsetOfTransitionBlock() + ArgIterator::GetThisOffset());
+#elif defined(_TARGET_AMD64_)
+ // At this point the counter hasn't been incremented yet.
+ delegateCount = (int)regs->pCurrentContext->Rdi + 1;
+ pbDel = *(BYTE **)( (size_t)(regs->pCurrentContext->Rsi) + GetOffsetOfTransitionBlock() + ArgIterator::GetThisOffset());
+#elif defined(_TARGET_ARM_)
+ // At this point the counter has not yet been incremented. Counter is in R7, frame pointer in R4.
+ delegateCount = regs->pCurrentContext->R7 + 1;
+ pbDel = *(BYTE **)( (size_t)(regs->pCurrentContext->R4) + GetOffsetOfTransitionBlock() + ArgIterator::GetThisOffset());
+#else
+ delegateCount = 0;
+ PORTABILITY_ASSERT("MulticastFrame::TraceFrame (frames.cpp)");
+#endif
+
+ int totalDelegateCount = (int)*(size_t*)(pbDel + DelegateObject::GetOffsetOfInvocationCount());
+
+ _ASSERTE( COMDelegate::IsTrueMulticastDelegate( ObjectToOBJECTREF((Object*)pbDel) ) );
+
+ if (delegateCount == totalDelegateCount)
+ {
+ LOG((LF_CORDB, LL_INFO1000, "MF::TF: Executed all stubs, should return\n"));
+ // We've executed all the stubs, so we should return
+ return FALSE;
+ }
+ else
+ {
+ // We're going to execute stub delegateCount next, so go and grab it.
+ BYTE *pbDelInvocationList = *(BYTE **)(pbDel + DelegateObject::GetOffsetOfInvocationList());
+
+ pbDel = *(BYTE**)( ((ArrayBase *)pbDelInvocationList)->GetDataPtr() +
+ ((ArrayBase *)pbDelInvocationList)->GetComponentSize()*delegateCount);
+
+ _ASSERTE(pbDel);
+ return DelegateInvokeStubManager::TraceDelegateObject(pbDel, trace);
+ }
+#endif // !DACCESS_COMPILE
+}
+
+#ifndef DACCESS_COMPILE
+
+VOID InlinedCallFrame::Init()
+{
+ WRAPPER_NO_CONTRACT;
+
+ *((TADDR *)this) = GetMethodFrameVPtr();
+
+ // GetGSCookiePtr contains a virtual call and this is a perf critical method so we don't want to call it in ret builds
+ GSCookie *ptrGS = (GSCookie *)((BYTE *)this - sizeof(GSCookie));
+ _ASSERTE(ptrGS == GetGSCookiePtr());
+
+ *ptrGS = GetProcessGSCookie();
+
+ m_Datum = NULL;
+ m_pCallSiteSP = NULL;
+ m_pCallerReturnAddress = NULL;
+}
+
+#ifdef _WIN64
+
+EXTERN_C void PInvokeStubForHostInner(DWORD dwStackSize, LPVOID pStackFrame, LPVOID pTarget);
+
+// C++ piece of one static stub for host on 64-bit. This is called by PInvokeStubForHost (PInvokeStubs.asm).
+void __stdcall PInvokeStubForHostWorker(DWORD dwStackSize, LPVOID pStackFrame, LPVOID pThis)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ InlinedCallFrame *pFrame = (InlinedCallFrame *)GetThread()->GetFrame();
+ _ASSERTE(InlinedCallFrame::FrameHasActiveCall(pFrame));
+
+ LPVOID pTarget = NULL;
+ MethodDesc *pMD = pFrame->GetFunction();
+
+ if (pMD == NULL)
+ {
+ // This is a CALLI and m_Datum is a mangled target
+ pTarget = (LPVOID)((UINT_PTR)pFrame->m_Datum >> 1);
+ }
+ else if (pMD->IsNDirect())
+ {
+ pTarget = ((NDirectMethodDesc *)pMD)->ndirect.m_pNativeNDirectTarget;
+ if (pMD->IsQCall())
+ {
+#ifdef FEATURE_STACK_PROBE
+ // We need just the stack probe for QCalls
+ RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT));
+#endif
+ PInvokeStubForHostInner(dwStackSize, pStackFrame, pTarget);
+ return;
+ }
+ }
+#ifdef FEATURE_COMINTEROP
+ else if (pMD->IsComPlusCall() || pMD->IsEEImpl())
+ {
+ LPVOID *lpVtbl = *(LPVOID **)pThis;
+ ComPlusCallInfo *pComPlusCallInfo = ComPlusCallInfo::FromMethodDesc(pMD);
+ pTarget = lpVtbl[pComPlusCallInfo->m_cachedComSlot];
+ }
+#endif // FEATURE_COMINTEROP
+ else
+ {
+ UNREACHABLE_MSG("Unexpected MethodDesc kind encountered in PInvokeStubForHostWorker");
+ }
+
+ // We ask the host on every call. This is different from x86 but we keep this
+ // behavior for maximum compatibility with previous releases.
+ if (CallNeedsHostHook((size_t)pTarget))
+ {
+ // call LeaveRuntime
+ class LeaveRuntimeHolderThrowComplus
+ {
+ public:
+ LeaveRuntimeHolderThrowComplus(size_t target)
+ {
+ Thread::LeaveRuntimeThrowComplus(target);
+ }
+ ~LeaveRuntimeHolderThrowComplus()
+ {
+ Thread::EnterRuntime();
+ }
+ } holder((size_t)pTarget);
+
+ PInvokeStubForHostInner(dwStackSize, pStackFrame, pTarget);
+ // ~LeaveRuntimeHolderThrowComplus calls EnterRuntime here
+ }
+ else
+ {
+ // The host doesn't want to be notified - just call the target
+ PInvokeStubForHostInner(dwStackSize, pStackFrame, pTarget);
+ }
+}
+
+#endif // _WIN64
+
+
+
+void UnmanagedToManagedFrame::ExceptionUnwind()
+{
+ WRAPPER_NO_CONTRACT;
+
+ AppDomain::ExceptionUnwind(this);
+}
+
+#endif // !DACCESS_COMPILE
+
+void ContextTransitionFrame::GcScanRoots(promote_func *fn, ScanContext* sc)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Don't check app domains here - m_ReturnExecutionContext is in the parent frame's app domain
+ (*fn)(dac_cast<PTR_PTR_Object>(PTR_HOST_MEMBER_TADDR(ContextTransitionFrame, this, m_ReturnExecutionContext)), sc, 0);
+ LOG((LF_GC, INFO3, " " FMT_ADDR "\n", DBG_ADDR(m_ReturnExecutionContext) ));
+
+ // Don't check app domains here - m_LastThrownObjectInParentContext is in the parent frame's app domain
+ (*fn)(dac_cast<PTR_PTR_Object>(PTR_HOST_MEMBER_TADDR(ContextTransitionFrame, this, m_LastThrownObjectInParentContext)), sc, 0);
+ LOG((LF_GC, INFO3, " " FMT_ADDR "\n", DBG_ADDR(m_LastThrownObjectInParentContext) ));
+
+ // don't need to worry about the object moving as it is stored in a weak handle
+ // but do need to report it so it doesn't get collected if the only reference to
+ // it is in this frame. So only do something if are in promotion phase. And if are
+ // in reloc phase this could cause invalid refs as the object may have been moved.
+ if (! sc->promotion)
+ return;
+
+ // The dac only cares about strong references at the moment. Since this is always
+ // in a weak ref, we don't report it here.
+#if defined(FEATURE_REMOTING) && !defined(DACCESS_COMPILE)
+ Context *returnContext = GetReturnContext();
+ PREFIX_ASSUME(returnContext != NULL);
+ _ASSERTE(returnContext);
+ _ASSERTE(returnContext->GetDomain()); // this will make sure is a valid pointer
+
+ // In the VM we operate on a local to avoid double relocation. In the dac we actually want
+ // to know the real location.
+#ifdef DACCESS_COMPILE
+ PTR_PTR_Object ppRef = returnContext->GetExposedObjectRawUncheckedPtr();
+
+ if (*ppRef == NULL)
+ return;
+
+ (*fn)(ppRef, sc, 0);
+
+#else // DACCESS_COMPILE
+ // We are in the middle of the GC. OBJECTREFs can't be used here since their built-in validation
+ // chokes on objects that have been relocated already
+ Object *pRef = returnContext->GetExposedObjectRawUnchecked();
+
+ if (pRef == NULL)
+ return;
+
+ LOG((LF_GC, INFO3, "ContextTransitionFrame Protection Frame Promoting" FMT_ADDR "to ", DBG_ADDR(pRef) ));
+ // Don't check app domains here - the objects are in the parent frame's app domain
+
+ (*fn)(&pRef, sc, 0);
+ LOG((LF_GC, INFO3, FMT_ADDR "\n", DBG_ADDR(pRef)));
+#endif // !DACCESS_COMPILE
+
+#endif
+}
+
+
+PCODE UnmanagedToManagedFrame::GetReturnAddress()
+{
+ WRAPPER_NO_CONTRACT;
+
+ PCODE pRetAddr = Frame::GetReturnAddress();
+
+ if (InlinedCallFrame::FrameHasActiveCall(m_Next) &&
+ pRetAddr == m_Next->GetReturnAddress())
+ {
+ // there's actually no unmanaged code involved - we were called directly
+ // from managed code using an InlinedCallFrame
+ return NULL;
+ }
+ else
+ {
+ return pRetAddr;
+ }
+}
diff --git a/src/vm/frames.h b/src/vm/frames.h
new file mode 100644
index 0000000000..885c65c84e
--- /dev/null
+++ b/src/vm/frames.h
@@ -0,0 +1,3851 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// FRAMES.H
+
+
+//
+// These C++ classes expose activation frames to the rest of the EE.
+// Activation frames are actually created by JIT-generated or stub-generated
+// code on the machine stack. Thus, the layout of the Frame classes and
+// the JIT/Stub code generators are tightly interwined.
+//
+// IMPORTANT: Since frames are not actually constructed by C++,
+// don't try to define constructor/destructor functions. They won't get
+// called.
+//
+// IMPORTANT: Not all methods have full-fledged activation frames (in
+// particular, the JIT may create frameless methods.) This is one reason
+// why Frame doesn't expose a public "Next()" method: such a method would
+// skip frameless method calls. You must instead use one of the
+// StackWalk methods.
+//
+//
+// The following is the hierarchy of frames:
+//
+// Frame - the root class. There are no actual instances
+// | of Frames.
+// |
+// +-GCFrame - this frame doesn't represent a method call.
+// | it's sole purpose is to let the EE gc-protect
+// | object references that it is manipulating.
+// |
+// +- FaultingExceptionFrame - this frame was placed on a method which faulted
+// | to save additional state information
+// |
+#ifdef FEATURE_HIJACK
+// |
+// +-HijackFrame - if a method's return address is hijacked, we
+// | construct one of these to allow crawling back
+// | to where the return should have gone.
+// |
+// +-ResumableFrame - this abstract frame provides the context necessary to
+// | | allow garbage collection during handling of
+// | | a resumable exception (e.g. during edit-and-continue,
+// | | or under GCStress4).
+// | |
+// | +-RedirectedThreadFrame - this frame is used for redirecting threads during suspension
+// |
+#endif // FEATURE_HIJACK
+// |
+// |
+#ifdef FEATURE_REMOTING
+// +-GCSafeCollectionFrame - this handles reporting for GCSafeCollections, which are
+// | generally used during appdomain transitions
+// |
+#endif // FEATURE_REMOTING
+// |
+// +-InlinedCallFrame - if a call to unmanaged code is hoisted into
+// | a JIT'ted caller, the calling method keeps
+// | this frame linked throughout its activation.
+// |
+// +-HelperMethodFrame - frame used allow stack crawling inside jit helpers and fcalls
+// | |
+// + +-HelperMethodFrame_1OBJ- reports additional object references
+// | |
+// + +-HelperMethodFrame_2OBJ- reports additional object references
+// | |
+// + +-HelperMethodFrame_PROTECTOBJ - reports additional object references
+// |
+// +-TransitionFrame - this abstract frame represents a transition from
+// | | one or more nested frameless method calls
+// | | to either a EE runtime helper function or
+// | | a framed method.
+// | |
+// | +-StubHelperFrame - for instantiating stubs that need to grow stack arguments
+// | |
+// | +-SecureDelegateFrame - represents a call Delegate.Invoke for secure delegate
+// | |
+// | +-MulticastFrame - this frame protects arguments to a MulticastDelegate
+// | Invoke() call while calling each subscriber.
+// |
+// | +-FramedMethodFrame - this abstract frame represents a call to a method
+// | | that generates a full-fledged frame.
+// | |
+#ifdef FEATURE_COMINTEROP
+// | |
+// | +-ComPlusMethodFrame - represents a CLR to COM call using the generic worker
+// | |
+#endif //FEATURE_COMINTEROP
+// | |
+// | +-PInvokeCalliFrame - protects arguments when a call to GetILStubForCalli is made
+// | | to get or create IL stub for an unmanaged CALLI
+// | |
+// | +-PrestubMethodFrame - represents a call to a prestub
+// | |
+// | +-StubDispatchFrame - represents a call into the virtual call stub manager
+// | |
+// | |
+// | +-ExternalMethodFrame - represents a call from an ExternalMethdThunk
+// | |
+// | +-TPMethodFrame - for calls on transparent proxy
+// |
+// +-UnmanagedToManagedFrame - this frame represents a transition from
+// | | unmanaged code back to managed code. It's
+// | | main functions are to stop COM+ exception
+// | | propagation and to expose unmanaged parameters.
+// | |
+#ifdef FEATURE_COMINTEROP
+// | |
+// | +-ComMethodFrame - this frame represents a transition from
+// | | com to com+
+// | |
+// | +-ComPrestubMethodFrame - prestub frame for calls from COM to CLR
+// |
+#endif //FEATURE_COMINTEROP
+// | +-UMThkCallFrame - this frame represents an unmanaged->managed
+// | transition through N/Direct
+// |
+// +-ContextTransitionFrame - this frame is used to mark an appdomain transition
+// |
+// |
+// +-TailCallFrame - padding for tailcalls
+// |
+// +-ProtectByRefsFrame
+// |
+// +-ProtectValueClassFrame
+// |
+// +-DebuggerClassInitMarkFrame - marker frame to indicate that "class init" code is running
+// |
+// +-DebuggerSecurityCodeMarkFrame - marker frame to indicate that security code is running
+// |
+// +-DebuggerExitFrame - marker frame to indicate that a "break" IL instruction is being executed
+// |
+// +-DebuggerU2MCatchHandlerFrame - marker frame to indicate that native code is going to catch and
+// | swallow a managed exception
+// |
+#ifdef DEBUGGING_SUPPORTED
+// +-FuncEvalFrame - frame for debugger function evaluation
+#endif // DEBUGGING_SUPPORTED
+// |
+#if defined(FEATURE_INCLUDE_ALL_INTERFACES) && defined(_TARGET_X86_)
+// |
+// +-ReverseEnterRuntimeFrame
+// |
+// +-LeaveRuntimeFrame
+// |
+#endif
+// |
+// +-ExceptionFilterFrame - this frame wraps call to exception filter
+// |
+// +-SecurityContextFrame - place the security context of an assembly on the stack to ensure it will be included in security demands
+//
+//------------------------------------------------------------------------
+#if 0
+//------------------------------------------------------------------------
+
+This is the list of Interop stubs & transition helpers with information
+regarding what (if any) Frame they used and where they were set up:
+
+P/Invoke:
+ JIT inlined: The code to call the method is inlined into the caller by the JIT.
+ InlinedCallFrame is erected by the JITted code.
+ Requires marshaling: The stub does not erect any frames explicitly but contains
+ an unmanaged CALLI which turns it into the JIT inlined case.
+
+Delegate over a native function pointer:
+ The same as P/Invoke but the raw JIT inlined case is not present (the call always
+ goes through an IL stub).
+
+Calli:
+ The same as P/Invoke.
+ PInvokeCalliFrame is erected in stub generated by GenerateGetStubForPInvokeCalli
+ before calling to GetILStubForCalli which generates the IL stub. This happens only
+ the first time a call via the corresponding VASigCookie is made.
+
+ClrToCom:
+ Late-bound or eventing: The stub is generated by GenerateGenericComplusWorker
+ (x86) or exists statically as GenericComPlusCallStub[RetBuffArg] (64-bit),
+ and it erects a ComPlusMethodFrame frame.
+ Early-bound: The stub does not erect any frames explicitly but contains an
+ unmanaged CALLI which turns it into the JIT inlined case.
+
+ComToClr:
+ Normal stub:
+ Interpreted: The stub is generated by ComCall::CreateGenericComCallStub
+ (in ComToClrCall.cpp) and it erects a ComMethodFrame frame.
+Prestub:
+ The prestub is ComCallPreStub (in ComCallableWrapper.cpp) and it erects
+ a ComPrestubMethodFrame frame.
+
+Reverse P/Invoke (used for C++ exports & fixups as well as delegates
+obtained from function pointers):
+ Normal stub:
+ x86: The stub is generated by UMEntryThunk::CompileUMThunkWorker
+ (in DllImportCallback.cpp) and it is frameless. It calls directly
+ the managed target or to IL stub if marshaling is required.
+ non-x86: The stub exists statically as UMThunkStub and calls to IL stub.
+Prestub:
+ The prestub is generated by GenerateUMThunkPrestub (x86) or exists statically
+ as TheUMEntryPrestub (64-bit), and it erects an UMThkCallFrame frame.
+
+Reverse P/Invoke AppDomain selector stub:
+ The asm helper is IJWNOADThunkJumpTarget (in asmhelpers.asm) and it is frameless.
+
+//------------------------------------------------------------------------
+#endif // 0
+//------------------------------------------------------------------------
+
+#ifndef FRAME_ABSTRACT_TYPE_NAME
+#define FRAME_ABSTRACT_TYPE_NAME(frameType)
+#endif
+#ifndef FRAME_TYPE_NAME
+#define FRAME_TYPE_NAME(frameType)
+#endif
+
+FRAME_ABSTRACT_TYPE_NAME(FrameBase)
+FRAME_ABSTRACT_TYPE_NAME(Frame)
+FRAME_ABSTRACT_TYPE_NAME(TransitionFrame)
+#ifdef FEATURE_HIJACK
+FRAME_TYPE_NAME(ResumableFrame)
+FRAME_TYPE_NAME(RedirectedThreadFrame)
+#endif // FEATURE_HIJACK
+FRAME_TYPE_NAME(FaultingExceptionFrame)
+#ifdef DEBUGGING_SUPPORTED
+FRAME_TYPE_NAME(FuncEvalFrame)
+#endif // DEBUGGING_SUPPORTED
+FRAME_TYPE_NAME(HelperMethodFrame)
+FRAME_TYPE_NAME(HelperMethodFrame_1OBJ)
+FRAME_TYPE_NAME(HelperMethodFrame_2OBJ)
+FRAME_TYPE_NAME(HelperMethodFrame_PROTECTOBJ)
+FRAME_ABSTRACT_TYPE_NAME(FramedMethodFrame)
+#ifdef FEATURE_REMOTING
+FRAME_TYPE_NAME(TPMethodFrame)
+#endif
+FRAME_TYPE_NAME(SecureDelegateFrame)
+FRAME_TYPE_NAME(MulticastFrame)
+FRAME_ABSTRACT_TYPE_NAME(UnmanagedToManagedFrame)
+#ifdef FEATURE_COMINTEROP
+FRAME_TYPE_NAME(ComMethodFrame)
+FRAME_TYPE_NAME(ComPlusMethodFrame)
+FRAME_TYPE_NAME(ComPrestubMethodFrame)
+#endif // FEATURE_COMINTEROP
+FRAME_TYPE_NAME(PInvokeCalliFrame)
+#ifdef FEATURE_HIJACK
+FRAME_TYPE_NAME(HijackFrame)
+#endif // FEATURE_HIJACK
+FRAME_TYPE_NAME(PrestubMethodFrame)
+FRAME_TYPE_NAME(StubDispatchFrame)
+FRAME_TYPE_NAME(ExternalMethodFrame)
+#ifdef FEATURE_READYTORUN
+FRAME_TYPE_NAME(DynamicHelperFrame)
+#endif
+#if defined(_WIN64) || defined(_TARGET_ARM_)
+FRAME_TYPE_NAME(StubHelperFrame)
+#endif
+FRAME_TYPE_NAME(GCFrame)
+#ifdef FEATURE_INTERPRETER
+FRAME_TYPE_NAME(InterpreterFrame)
+#endif // FEATURE_INTERPRETER
+FRAME_TYPE_NAME(ProtectByRefsFrame)
+FRAME_TYPE_NAME(ProtectValueClassFrame)
+#ifdef FEATURE_REMOTING
+FRAME_TYPE_NAME(GCSafeCollectionFrame)
+#endif // FEATURE_REMOTING
+FRAME_TYPE_NAME(DebuggerClassInitMarkFrame)
+FRAME_TYPE_NAME(DebuggerSecurityCodeMarkFrame)
+FRAME_TYPE_NAME(DebuggerExitFrame)
+FRAME_TYPE_NAME(DebuggerU2MCatchHandlerFrame)
+#ifdef _TARGET_X86_
+FRAME_TYPE_NAME(UMThkCallFrame)
+#endif
+#if defined(FEATURE_INCLUDE_ALL_INTERFACES) && defined(_TARGET_X86_)
+FRAME_TYPE_NAME(ReverseEnterRuntimeFrame)
+FRAME_TYPE_NAME(LeaveRuntimeFrame)
+#endif
+FRAME_TYPE_NAME(InlinedCallFrame)
+FRAME_TYPE_NAME(ContextTransitionFrame)
+FRAME_TYPE_NAME(TailCallFrame)
+FRAME_TYPE_NAME(ExceptionFilterFrame)
+#if defined(_DEBUG)
+FRAME_TYPE_NAME(AssumeByrefFromJITStack)
+#endif // _DEBUG
+FRAME_TYPE_NAME(SecurityContextFrame)
+
+#undef FRAME_ABSTRACT_TYPE_NAME
+#undef FRAME_TYPE_NAME
+
+//------------------------------------------------------------------------
+
+#ifndef __frames_h__
+#define __frames_h__
+#if defined(_MSC_VER) && defined(_TARGET_X86_) && !defined(FPO_ON)
+#pragma optimize("y", on) // Small critical routines, don't put in EBP frame
+#define FPO_ON 1
+#define FRAMES_TURNED_FPO_ON 1
+#endif
+
+#include "util.hpp"
+#include "vars.hpp"
+#include "regdisp.h"
+#include "object.h"
+#include "objecthandle.h"
+#include <stddef.h>
+#include "siginfo.hpp"
+// context headers
+#include "context.h"
+#include "method.hpp"
+#include "stackwalk.h"
+#include "stubmgr.h"
+#include "gms.h"
+#include "threads.h"
+#include "callingconvention.h"
+
+// Forward references
+class Frame;
+class FieldMarshaler;
+class FramedMethodFrame;
+typedef VPTR(class FramedMethodFrame) PTR_FramedMethodFrame;
+struct HijackArgs;
+class UMEntryThunk;
+class UMThunkMarshInfo;
+class Marshaler;
+struct ResolveCacheElem;
+#if defined(DACCESS_COMPILE)
+class DacDbiInterfaceImpl;
+#endif // DACCESS_COMPILE
+#ifdef FEATURE_COMINTEROP
+class ComMethodFrame;
+class ComCallMethodDesc;
+#endif // FEATURE_COMINTEROP
+
+// Note: the value (-1) is used to generate the largest possible pointer value: this keeps frame addresses
+// increasing upward. Because we want to ensure that we don't accidentally change this, we have a C_ASSERT
+// in stackwalk.cpp. Since it requires constant values as args, we need to define FRAME_TOP in two steps.
+// First we define FRAME_TOP_VALUE which we'll use when we do the compile-time check, then we'll define
+// FRAME_TOP in terms of FRAME_TOP_VALUE. Defining FRAME_TOP as a PTR_Frame means we don't have to type cast
+// whenever we compare it to a PTR_Frame value (the usual use of the value).
+#define FRAME_TOP_VALUE ~0 // we want to say -1 here, but gcc has trouble with the signed value
+#define FRAME_TOP (PTR_Frame(FRAME_TOP_VALUE))
+
+#ifndef DACCESS_COMPILE
+
+#define DEFINE_VTABLE_GETTER(klass) \
+ public: \
+ static TADDR GetMethodFrameVPtr() { \
+ LIMITED_METHOD_CONTRACT; \
+ klass boilerplate(false); \
+ return *((TADDR*)&boilerplate); \
+ } \
+ klass(bool dummy) { LIMITED_METHOD_CONTRACT; }
+
+#define DEFINE_VTABLE_GETTER_AND_CTOR(klass) \
+ DEFINE_VTABLE_GETTER(klass) \
+ protected: \
+ klass() { LIMITED_METHOD_CONTRACT; }
+
+#else
+
+#define DEFINE_VTABLE_GETTER(klass) \
+ public: \
+ static TADDR GetMethodFrameVPtr() { \
+ LIMITED_METHOD_CONTRACT; \
+ return klass::VPtrTargetVTable(); \
+ } \
+
+#define DEFINE_VTABLE_GETTER_AND_CTOR(klass) \
+ DEFINE_VTABLE_GETTER(klass) \
+
+#endif // #ifndef DACCESS_COMPILE
+
+//-----------------------------------------------------------------------------
+// For reporting on types of frames at runtime.
+class FrameTypeName
+{
+public:
+ TADDR vtbl;
+ PTR_CSTR name;
+};
+typedef DPTR(FrameTypeName) PTR_FrameTypeName;
+
+//-----------------------------------------------------------------------------
+// Frame depends on the location of its vtable within the object. This
+// superclass ensures that the vtable for Frame objects is in the same
+// location under both MSVC and GCC.
+//-----------------------------------------------------------------------------
+
+class FrameBase
+{
+ VPTR_BASE_VTABLE_CLASS(FrameBase)
+
+public:
+ FrameBase() {LIMITED_METHOD_CONTRACT; }
+
+ virtual void GcScanRoots(promote_func *fn, ScanContext* sc) {
+ LIMITED_METHOD_CONTRACT;
+ // Nothing to protect
+ }
+
+#ifdef DACCESS_COMPILE
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags) = 0;
+#endif
+};
+
+//------------------------------------------------------------------------
+// Frame defines methods common to all frame types. There are no actual
+// instances of root frames.
+//------------------------------------------------------------------------
+
+class Frame : public FrameBase
+{
+ friend class CheckAsmOffsets;
+#ifdef DACCESS_COMPILE
+ friend void Thread::EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+ VPTR_ABSTRACT_VTABLE_CLASS(Frame, FrameBase)
+
+public:
+
+ //------------------------------------------------------------------------
+ // Special characteristics of a frame
+ //------------------------------------------------------------------------
+ enum FrameAttribs {
+ FRAME_ATTR_NONE = 0,
+ FRAME_ATTR_EXCEPTION = 1, // This frame caused an exception
+ FRAME_ATTR_OUT_OF_LINE = 2, // The exception out of line (IP of the frame is not correct)
+ FRAME_ATTR_FAULTED = 4, // Exception caused by Win32 fault
+ FRAME_ATTR_RESUMABLE = 8, // We may resume from this frame
+ FRAME_ATTR_CAPTURE_DEPTH_2 = 0x10, // This is a helperMethodFrame and the capture occured at depth 2
+ FRAME_ATTR_EXACT_DEPTH = 0x20, // This is a helperMethodFrame and a jit helper, but only crawl to the given depth
+ FRAME_ATTR_NO_THREAD_ABORT = 0x40, // This is a helperMethodFrame that should not trigger thread aborts on entry
+ };
+ virtual unsigned GetFrameAttribs()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return FRAME_ATTR_NONE;
+ }
+
+ //------------------------------------------------------------------------
+ // Performs cleanup on an exception unwind
+ //------------------------------------------------------------------------
+#ifndef DACCESS_COMPILE
+ virtual void ExceptionUnwind()
+ {
+ // Nothing to do here.
+ LIMITED_METHOD_CONTRACT;
+ }
+#endif
+
+ // Should be overridden to return TRUE if the frame contains register
+ // state of the caller.
+ virtual BOOL NeedsUpdateRegDisplay()
+ {
+ return FALSE;
+ }
+
+ //------------------------------------------------------------------------
+ // Is this a frame used on transition to native code from jitted code?
+ //------------------------------------------------------------------------
+ virtual BOOL IsTransitionToNativeFrame()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+ }
+
+ virtual MethodDesc *GetFunction()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return NULL;
+ }
+
+ virtual Assembly *GetAssembly()
+ {
+ WRAPPER_NO_CONTRACT;
+ MethodDesc *pMethod = GetFunction();
+ if (pMethod != NULL)
+ return pMethod->GetModule()->GetAssembly();
+ else
+ return NULL;
+ }
+
+ // indicate the current X86 IP address within the current method
+ // return 0 if the information is not available
+ virtual const PTR_BYTE GetIP()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return NULL;
+ }
+
+ // DACCESS: GetReturnAddressPtr should return the
+ // target address of the return address in the frame.
+ virtual TADDR GetReturnAddressPtr()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return NULL;
+ }
+
+ virtual PCODE GetReturnAddress()
+ {
+ WRAPPER_NO_CONTRACT;
+ TADDR ptr = GetReturnAddressPtr();
+ return (ptr != NULL) ? *PTR_PCODE(ptr) : NULL;
+ }
+
+ virtual PTR_Context* GetReturnContextAddr()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return NULL;
+ }
+
+ Context *GetReturnContext()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ PTR_Context* ppReturnContext = GetReturnContextAddr();
+ if (! ppReturnContext)
+ return NULL;
+ return *ppReturnContext;
+ }
+
+ AppDomain *GetReturnDomain()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ if (! GetReturnContext())
+ return NULL;
+ return GetReturnContext()->GetDomain();
+ }
+
+#ifndef DACCESS_COMPILE
+ virtual Object **GetReturnExecutionContextAddr()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return NULL;
+ }
+
+ void SetReturnAddress(TADDR val)
+ {
+ WRAPPER_NO_CONTRACT;
+ TADDR ptr = GetReturnAddressPtr();
+ _ASSERTE(ptr != NULL);
+ *(TADDR*)ptr = val;
+ }
+
+#ifndef DACCESS_COMPILE
+ void SetReturnContext(Context *pReturnContext)
+ {
+ WRAPPER_NO_CONTRACT;
+ PTR_Context* ppReturnContext = GetReturnContextAddr();
+ _ASSERTE(ppReturnContext);
+ *ppReturnContext = pReturnContext;
+ }
+#endif
+
+ void SetReturnExecutionContext(OBJECTREF ref)
+ {
+ WRAPPER_NO_CONTRACT;
+ Object **pRef = GetReturnExecutionContextAddr();
+ if (pRef != NULL)
+ *pRef = OBJECTREFToObject(ref);
+ }
+
+ OBJECTREF GetReturnExecutionContext()
+ {
+ WRAPPER_NO_CONTRACT;
+ Object **pRef = GetReturnExecutionContextAddr();
+ if (pRef == NULL)
+ return NULL;
+ else
+ return ObjectToOBJECTREF(*pRef);
+ }
+#endif // #ifndef DACCESS_COMPILE
+
+ PTR_GSCookie GetGSCookiePtr()
+ {
+ WRAPPER_NO_CONTRACT;
+ return dac_cast<PTR_GSCookie>(dac_cast<TADDR>(this) + GetOffsetOfGSCookie());
+ }
+
+ static int GetOffsetOfGSCookie()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return -(int)sizeof(GSCookie);
+ }
+
+ static bool HasValidVTablePtr(Frame * pFrame);
+ static PTR_GSCookie SafeGetGSCookiePtr(Frame * pFrame);
+ static void Init();
+ static void Term();
+
+ // Callers, note that the REGDISPLAY parameter is actually in/out. While
+ // UpdateRegDisplay is generally used to fill out the REGDISPLAY parameter, some
+ // overrides (e.g., code:ResumableFrame::UpdateRegDisplay) will actually READ what
+ // you pass in. So be sure to pass in a valid or zeroed out REGDISPLAY.
+ virtual void UpdateRegDisplay(const PREGDISPLAY)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return;
+ }
+
+ //------------------------------------------------------------------------
+ // Debugger support
+ //------------------------------------------------------------------------
+
+
+public:
+ enum ETransitionType
+ {
+ TT_NONE,
+ TT_M2U, // we can safely cast to a FramedMethodFrame
+ TT_U2M, // we can safely cast to a UnmanagedToManagedFrame
+ TT_AppDomain, // transitioniting between AppDomains.
+ TT_InternalCall, // calling into the CLR (ecall/fcall).
+ };
+
+ // Get the type of transition.
+ // M-->U, U-->M
+ virtual ETransitionType GetTransitionType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TT_NONE;
+ }
+
+ enum
+ {
+ TYPE_INTERNAL,
+ TYPE_ENTRY,
+ TYPE_EXIT,
+ TYPE_CONTEXT_CROSS,
+ TYPE_INTERCEPTION,
+ TYPE_SECURITY,
+ TYPE_CALL,
+ TYPE_FUNC_EVAL,
+#ifdef FEATURE_REMOTING
+ TYPE_TP_METHOD_FRAME,
+#endif
+ TYPE_MULTICAST,
+
+ // HMFs and derived classes should use this so the profiling API knows it needs
+ // to ensure HMF-specific lazy initialization gets done w/out re-entering to the host.
+ TYPE_HELPER_METHOD_FRAME,
+
+ TYPE_COUNT
+ };
+
+ virtual int GetFrameType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TYPE_INTERNAL;
+ };
+
+ // When stepping into a method, various other methods may be called.
+ // These are refererred to as interceptors. They are all invoked
+ // with frames of various types. GetInterception() indicates whether
+ // the frame was set up for execution of such interceptors
+
+ enum Interception
+ {
+ INTERCEPTION_NONE,
+ INTERCEPTION_CLASS_INIT,
+ INTERCEPTION_EXCEPTION,
+ INTERCEPTION_CONTEXT,
+ INTERCEPTION_SECURITY,
+ INTERCEPTION_PRESTUB,
+ INTERCEPTION_OTHER,
+
+ INTERCEPTION_COUNT
+ };
+
+ virtual Interception GetInterception()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return INTERCEPTION_NONE;
+ }
+
+ // Return information about an unmanaged call the frame
+ // will make.
+ // ip - the unmanaged routine which will be called
+ // returnIP - the address in the stub which the unmanaged routine
+ // will return to.
+ // returnSP - the location returnIP is pushed onto the stack
+ // during the call.
+ //
+ virtual void GetUnmanagedCallSite(TADDR* ip,
+ TADDR* returnIP,
+ TADDR* returnSP)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (ip)
+ *ip = NULL;
+
+ if (returnIP)
+ *returnIP = NULL;
+
+ if (returnSP)
+ *returnSP = NULL;
+ }
+
+ // Return where the frame will execute next - the result is filled
+ // into the given "trace" structure. The frame is responsible for
+ // detecting where it is in its execution lifetime.
+ virtual BOOL TraceFrame(Thread *thread, BOOL fromPatch,
+ TraceDestination *trace, REGDISPLAY *regs)
+ {
+ LIMITED_METHOD_CONTRACT;
+ LOG((LF_CORDB, LL_INFO10000,
+ "Default TraceFrame always returns false.\n"));
+ return FALSE;
+ }
+
+#ifdef DACCESS_COMPILE
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+ {
+ WRAPPER_NO_CONTRACT;
+ DAC_ENUM_VTHIS();
+
+ // Many frames store a MethodDesc pointer in m_Datum
+ // so pick that up automatically.
+ MethodDesc* func = GetFunction();
+ if (func)
+ {
+ func->EnumMemoryRegions(flags);
+ }
+
+ // Include the NegSpace
+ GSCookie * pGSCookie = GetGSCookiePtr();
+ _ASSERTE(FitsIn<ULONG32>(PBYTE(pGSCookie) - PBYTE(this)));
+ ULONG32 negSpaceSize = static_cast<ULONG32>(PBYTE(pGSCookie) - PBYTE(this));
+ DacEnumMemoryRegion(dac_cast<TADDR>(this) - negSpaceSize, negSpaceSize);
+ }
+#endif
+
+ //---------------------------------------------------------------
+ // Expose key offsets and values for stub generation.
+ //---------------------------------------------------------------
+ static BYTE GetOffsetOfNextLink()
+ {
+ WRAPPER_NO_CONTRACT;
+ size_t ofs = offsetof(class Frame, m_Next);
+ _ASSERTE(FitsInI1(ofs));
+ return (BYTE)ofs;
+ }
+
+ // get your VTablePointer (can be used to check what type the frame is)
+ TADDR GetVTablePtr()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return VPTR_HOST_VTABLE_TO_TADDR(*(LPVOID*)this);
+ }
+
+#ifdef _DEBUG
+ virtual BOOL Protects(OBJECTREF *ppObjectRef)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+ }
+#endif
+
+#ifndef DACCESS_COMPILE
+ // Link and Unlink this frame
+ VOID Push();
+ VOID Pop();
+ VOID Push(Thread *pThread);
+ VOID Pop(Thread *pThread);
+#endif // DACCESS_COMPILE
+
+#ifdef _DEBUG_IMPL
+ void Log();
+ static BOOL ShouldLogTransitions() { WRAPPER_NO_CONTRACT; return LoggingOn(LF_STUBS, LL_INFO1000000); }
+ static void __stdcall LogTransition(Frame* frame);
+ void LogFrame(int LF, int LL); // General purpose logging.
+ void LogFrameChain(int LF, int LL); // Log the whole chain.
+ virtual const char* GetFrameTypeName() {return NULL;}
+ static PTR_CSTR GetFrameTypeName(TADDR vtbl);
+#endif
+
+ //------------------------------------------------------------------------
+ // Returns the address of a security object or
+ // null if there is no space for an object on this frame.
+ //------------------------------------------------------------------------
+ virtual OBJECTREF *GetAddrOfSecurityDesc()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return NULL;
+ }
+
+private:
+ // Pointer to the next frame up the stack.
+
+protected:
+ PTR_Frame m_Next; // offset +4
+
+public:
+ PTR_Frame PtrNextFrame() { return m_Next; }
+
+private:
+ // Because JIT-method activations cannot be expressed as Frames,
+ // everyone must use the StackCrawler to walk the frame chain
+ // reliably. We'll expose the Next method only to the StackCrawler
+ // to prevent mistakes.
+ /*<TODO>@NICE: Restrict "friendship" again to the StackWalker method;
+ not done because of circular dependency with threads.h</TODO>
+ */
+ // friend Frame* Thread::StackWalkFrames(PSTACKWALKFRAMESCALLBACK pCallback, VOID *pData);
+ friend class Thread;
+ friend void CrawlFrame::GotoNextFrame();
+ friend class StackFrameIterator;
+ friend class TailCallFrame;
+ friend class AppDomain;
+ friend VOID RealCOMPlusThrow(OBJECTREF
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , CorruptionSeverity severity
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ );
+ friend FCDECL0(VOID, JIT_StressGC);
+#ifdef _DEBUG
+ friend LONG WINAPI CLRVectoredExceptionHandlerShim(PEXCEPTION_POINTERS pExceptionInfo);
+#endif // _DEBUG
+#ifdef _WIN64
+ friend Thread * __stdcall JIT_InitPInvokeFrame(InlinedCallFrame *pFrame, PTR_VOID StubSecretArg);
+#endif
+#ifdef WIN64EXCEPTIONS
+ friend class ExceptionTracker;
+#endif
+#if defined(DACCESS_COMPILE)
+ friend class DacDbiInterfaceImpl;
+#endif // DACCESS_COMPILE
+#ifdef FEATURE_COMINTEROP
+ friend void COMToCLRWorkerBodyWithADTransition(Thread *pThread, ComMethodFrame *pFrame, ComCallWrapper *pWrap, UINT64 *pRetValOut);
+#endif // FEATURE_COMINTEROP
+
+ PTR_Frame Next()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_Next;
+ }
+
+protected:
+ // Frame is considered an abstract class: this protected constructor
+ // causes any attempt to instantiate one to fail at compile-time.
+ Frame()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+};
+
+
+//-----------------------------------------------------------------------------
+// This frame provides context for a frame that
+// took an exception that is going to be resumed.
+//
+// It is necessary to create this frame if garbage
+// collection may happen during handling of the
+// exception. The FRAME_ATTR_RESUMABLE flag tells
+// the GC that the preceding frame needs to be treated
+// like the top of stack (with the important implication that
+// caller-save-regsiters will be potential roots).
+//-----------------------------------------------------------------------------
+#ifdef FEATURE_HIJACK
+//-----------------------------------------------------------------------------
+
+class ResumableFrame : public Frame
+{
+ VPTR_VTABLE_CLASS(ResumableFrame, Frame)
+
+public:
+#ifndef DACCESS_COMPILE
+ ResumableFrame(T_CONTEXT* regs) {
+ LIMITED_METHOD_CONTRACT;
+ m_Regs = regs;
+ }
+#endif
+
+ virtual TADDR GetReturnAddressPtr();
+
+ virtual BOOL NeedsUpdateRegDisplay()
+ {
+ return TRUE;
+ }
+
+ virtual void UpdateRegDisplay(const PREGDISPLAY pRD);
+
+ virtual unsigned GetFrameAttribs() {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return FRAME_ATTR_RESUMABLE; // Treat the next frame as the top frame.
+ }
+
+ T_CONTEXT *GetContext() {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (m_Regs);
+ }
+
+#ifdef DACCESS_COMPILE
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+ {
+ WRAPPER_NO_CONTRACT;
+ Frame::EnumMemoryRegions(flags);
+ m_Regs.EnumMem();
+ }
+#endif
+
+protected:
+ PTR_CONTEXT m_Regs;
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(ResumableFrame)
+};
+
+
+//-----------------------------------------------------------------------------
+// RedirectedThreadFrame
+//-----------------------------------------------------------------------------
+
+class RedirectedThreadFrame : public ResumableFrame
+{
+ VPTR_VTABLE_CLASS(RedirectedThreadFrame, ResumableFrame)
+ VPTR_UNIQUE(VPTR_UNIQUE_RedirectedThreadFrame)
+
+public:
+#ifndef DACCESS_COMPILE
+ RedirectedThreadFrame(T_CONTEXT *regs) : ResumableFrame(regs) {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual void ExceptionUnwind();
+#endif
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(RedirectedThreadFrame)
+};
+
+typedef DPTR(RedirectedThreadFrame) PTR_RedirectedThreadFrame;
+
+inline BOOL ISREDIRECTEDTHREAD(Thread * thread)
+{
+ WRAPPER_NO_CONTRACT;
+ return (thread->GetFrame() != FRAME_TOP &&
+ thread->GetFrame()->GetVTablePtr() ==
+ RedirectedThreadFrame::GetMethodFrameVPtr());
+}
+
+inline T_CONTEXT * GETREDIRECTEDCONTEXT(Thread * thread)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(ISREDIRECTEDTHREAD(thread));
+ return dac_cast<PTR_RedirectedThreadFrame>(thread->GetFrame())->GetContext();
+}
+
+//------------------------------------------------------------------------
+#else // FEATURE_HIJACK
+//------------------------------------------------------------------------
+
+inline BOOL ISREDIRECTEDTHREAD(Thread * thread) { LIMITED_METHOD_CONTRACT; return FALSE; }
+inline CONTEXT * GETREDIRECTEDCONTEXT(Thread * thread) { LIMITED_METHOD_CONTRACT; return (CONTEXT*) NULL; }
+
+//------------------------------------------------------------------------
+#endif // FEATURE_HIJACK
+//------------------------------------------------------------------------
+// This frame represents a transition from one or more nested frameless
+// method calls to either a EE runtime helper function or a framed method.
+// Because most stackwalks from the EE start with a full-fledged frame,
+// anything but the most trivial call into the EE has to push this
+// frame in order to prevent the frameless methods inbetween from
+// getting lost.
+//------------------------------------------------------------------------
+
+class TransitionFrame : public Frame
+{
+ VPTR_ABSTRACT_VTABLE_CLASS(TransitionFrame, Frame)
+
+public:
+ virtual TADDR GetTransitionBlock() = 0;
+
+ // DACCESS: GetReturnAddressPtr should return the
+ // target address of the return address in the frame.
+ virtual TADDR GetReturnAddressPtr()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetTransitionBlock() + TransitionBlock::GetOffsetOfReturnAddress();
+ }
+
+ //---------------------------------------------------------------
+ // Get the "this" object.
+ //---------------------------------------------------------------
+ OBJECTREF GetThis()
+ {
+ WRAPPER_NO_CONTRACT;
+ Object* obj = PTR_Object(*PTR_TADDR(GetAddrOfThis()));
+ return ObjectToOBJECTREF(obj);
+ }
+
+ PTR_OBJECTREF GetThisPtr()
+ {
+ WRAPPER_NO_CONTRACT;
+ return PTR_OBJECTREF(GetAddrOfThis());
+ }
+
+ //---------------------------------------------------------------
+ // Get the extra info for shared generic code.
+ //---------------------------------------------------------------
+ PTR_VOID GetParamTypeArg();
+
+protected: // we don't want people using this directly
+ //---------------------------------------------------------------
+ // Get the address of the "this" object. WARNING!!! Whether or not "this"
+ // is gc-protected is depends on the frame type!!!
+ //---------------------------------------------------------------
+ TADDR GetAddrOfThis();
+
+public:
+ //---------------------------------------------------------------
+ // For vararg calls, return cookie.
+ //---------------------------------------------------------------
+ VASigCookie *GetVASigCookie();
+
+ CalleeSavedRegisters *GetCalleeSavedRegisters()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return dac_cast<PTR_CalleeSavedRegisters>(
+ GetTransitionBlock() + TransitionBlock::GetOffsetOfCalleeSavedRegisters());
+ }
+
+ ArgumentRegisters *GetArgumentRegisters()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return dac_cast<PTR_ArgumentRegisters>(
+ GetTransitionBlock() + TransitionBlock::GetOffsetOfArgumentRegisters());
+ }
+
+ TADDR GetSP()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetTransitionBlock() + sizeof(TransitionBlock);
+ }
+
+ virtual BOOL NeedsUpdateRegDisplay()
+ {
+ return TRUE;
+ }
+
+ virtual void UpdateRegDisplay(const PREGDISPLAY);
+#ifdef _TARGET_X86_
+ void UpdateRegDisplayHelper(const PREGDISPLAY, UINT cbStackPop);
+#endif
+
+#if defined (_DEBUG) && !defined (DACCESS_COMPILE)
+ virtual BOOL Protects(OBJECTREF *ppORef);
+#endif //defined (_DEBUG) && defined (DACCESS_COMPILE)
+
+ // For use by classes deriving from FramedMethodFrame.
+ void PromoteCallerStack(promote_func* fn, ScanContext* sc);
+
+ void PromoteCallerStackHelper(promote_func* fn, ScanContext* sc,
+ MethodDesc * pMD, MetaSig *pmsig);
+
+ void PromoteCallerStackUsingGCRefMap(promote_func* fn, ScanContext* sc, PTR_BYTE pGCRefMap);
+
+#ifdef _TARGET_X86_
+ UINT CbStackPopUsingGCRefMap(PTR_BYTE pGCRefMap);
+#endif
+
+protected:
+ TransitionFrame()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+};
+
+//-----------------------------------------------------------------------
+// TransitionFrames for exceptions
+//-----------------------------------------------------------------------
+
+// The define USE_FEF controls how this class is used. Look for occurances
+// of USE_FEF.
+
+class FaultingExceptionFrame : public Frame
+{
+ friend class CheckAsmOffsets;
+
+#if defined(_TARGET_X86_)
+ DWORD m_Esp;
+ CalleeSavedRegisters m_regs;
+ TADDR m_ReturnAddress;
+#endif
+
+#ifdef WIN64EXCEPTIONS
+ BOOL m_fFilterExecuted; // Flag for FirstCallToHandler
+ TADDR m_ReturnAddress;
+ T_CONTEXT m_ctx;
+#endif // WIN64EXCEPTIONS
+
+ VPTR_VTABLE_CLASS(FaultingExceptionFrame, Frame)
+
+public:
+#ifndef DACCESS_COMPILE
+ FaultingExceptionFrame() {
+ LIMITED_METHOD_CONTRACT;
+ }
+#endif
+
+ virtual TADDR GetReturnAddressPtr()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return PTR_HOST_MEMBER_TADDR(FaultingExceptionFrame, this, m_ReturnAddress);
+ }
+
+ void Init(T_CONTEXT *pContext);
+ void InitAndLink(T_CONTEXT *pContext);
+
+ Interception GetInterception()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return INTERCEPTION_EXCEPTION;
+ }
+
+ unsigned GetFrameAttribs()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return FRAME_ATTR_EXCEPTION | FRAME_ATTR_FAULTED;
+ }
+
+#if defined(_TARGET_X86_)
+ CalleeSavedRegisters *GetCalleeSavedRegisters()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return &m_regs;
+ }
+#endif
+
+#ifdef WIN64EXCEPTIONS
+ T_CONTEXT *GetExceptionContext ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return &m_ctx;
+ }
+
+ BOOL * GetFilterExecutedFlag()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return &m_fFilterExecuted;
+ }
+#endif // WIN64EXCEPTIONS
+
+ virtual BOOL NeedsUpdateRegDisplay()
+ {
+ return TRUE;
+ }
+
+ virtual void UpdateRegDisplay(const PREGDISPLAY);
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER(FaultingExceptionFrame)
+};
+
+//-----------------------------------------------------------------------
+// Frame for debugger function evaluation
+//
+// This frame holds a ptr to a DebuggerEval object which contains a copy
+// of the thread's context at the time it was hijacked for the func
+// eval.
+//
+// UpdateRegDisplay updates all registers inthe REGDISPLAY, not just
+// the callee saved registers, because we can hijack for a func eval
+// at any point in a thread's execution.
+//
+//-----------------------------------------------------------------------
+
+#ifdef DEBUGGING_SUPPORTED
+class DebuggerEval;
+typedef DPTR(class DebuggerEval) PTR_DebuggerEval;
+
+class FuncEvalFrame : public Frame
+{
+ VPTR_VTABLE_CLASS(FuncEvalFrame, Frame)
+
+ TADDR m_ReturnAddress;
+ PTR_DebuggerEval m_pDebuggerEval;
+
+ BOOL m_showFrame;
+
+public:
+#ifndef DACCESS_COMPILE
+ FuncEvalFrame(DebuggerEval *pDebuggerEval, TADDR returnAddress, BOOL showFrame)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pDebuggerEval = pDebuggerEval;
+ m_ReturnAddress = returnAddress;
+ m_showFrame = showFrame;
+ }
+#endif
+
+ virtual BOOL IsTransitionToNativeFrame()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+ }
+
+ virtual int GetFrameType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TYPE_FUNC_EVAL;
+ }
+
+ virtual unsigned GetFrameAttribs();
+
+ virtual BOOL NeedsUpdateRegDisplay()
+ {
+ return TRUE;
+ }
+
+ virtual void UpdateRegDisplay(const PREGDISPLAY);
+
+ virtual DebuggerEval * GetDebuggerEval();
+
+ virtual TADDR GetReturnAddressPtr();
+
+ /*
+ * ShowFrame
+ *
+ * Returns if this frame should be returned as part of a stack trace to a debugger or not.
+ *
+ */
+ BOOL ShowFrame()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_showFrame;
+ }
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(FuncEvalFrame)
+};
+
+typedef VPTR(FuncEvalFrame) PTR_FuncEvalFrame;
+#endif // DEBUGGING_SUPPORTED
+
+//----------------------------------------------------------------------------------------------
+// A HelperMethodFrame is created by jit helper (Modified slightly it could be used
+// for native routines). This frame just does the callee saved register fixup.
+// It does NOT protect arguments; you must use GCPROTECT or one of the HelperMethodFrame
+// subclases. (see JitInterface for sample use, YOU CAN'T RETURN WHILE IN THE PROTECTED STATE!)
+//----------------------------------------------------------------------------------------------
+
+class HelperMethodFrame : public Frame
+{
+ VPTR_VTABLE_CLASS(HelperMethodFrame, Frame);
+
+public:
+#ifndef DACCESS_COMPILE
+ // Lazy initialization of HelperMethodFrame. Need to
+ // call InsureInit to complete initialization
+ // If this is an FCall, the first param is the entry point for the FCALL.
+ // The MethodDesc will be looked up form this (lazily), and this method
+ // will be used in stack reporting, if this is not an FCall pass a 0
+ FORCEINLINE HelperMethodFrame(void* fCallFtnEntry, unsigned attribs = 0)
+ {
+ WRAPPER_NO_CONTRACT;
+ // Most of the initialization is actually done in HelperMethodFrame::Push()
+ INDEBUG(memset(&m_Attribs, 0xCC, sizeof(HelperMethodFrame) - offsetof(HelperMethodFrame, m_Attribs));)
+ m_Attribs = attribs;
+ m_FCallEntry = (TADDR)fCallFtnEntry;
+ }
+#endif // DACCESS_COMPILE
+
+ virtual int GetFrameType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TYPE_HELPER_METHOD_FRAME;
+ };
+
+ virtual PCODE GetReturnAddress()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (!m_MachState.isValid())
+ {
+#if defined(DACCESS_COMPILE)
+ MachState unwoundState;
+ InsureInit(false, &unwoundState);
+ return unwoundState.GetRetAddr();
+#else // !DACCESS_COMPILE
+ _ASSERTE(!"HMF's should always be initialized in the non-DAC world.");
+ return NULL;
+
+#endif // !DACCESS_COMPILE
+ }
+
+ return m_MachState.GetRetAddr();
+ }
+
+ virtual MethodDesc* GetFunction();
+
+ virtual BOOL NeedsUpdateRegDisplay()
+ {
+ return TRUE;
+ }
+
+ virtual void UpdateRegDisplay(const PREGDISPLAY);
+
+ virtual Interception GetInterception()
+ {
+ WRAPPER_NO_CONTRACT;
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (GetFrameAttribs() & FRAME_ATTR_EXCEPTION)
+ return(INTERCEPTION_EXCEPTION);
+ return(INTERCEPTION_NONE);
+ }
+
+ virtual ETransitionType GetTransitionType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TT_InternalCall;
+ }
+
+#ifdef _DEBUG
+ void SetAddrOfHaveCheckedRestoreState(BOOL* pDoneCheck)
+ {
+ m_pDoneCheck = pDoneCheck;
+ }
+
+ BOOL HaveDoneConfirmStateCheck()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_pDoneCheck != NULL);
+ return *m_pDoneCheck;
+ }
+
+ void SetHaveDoneConfirmStateCheck()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_pDoneCheck != NULL);
+ *m_pDoneCheck = TRUE;
+ }
+#endif
+
+ virtual unsigned GetFrameAttribs()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return(m_Attribs);
+ }
+
+#ifdef DACCESS_COMPILE
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+ {
+ WRAPPER_NO_CONTRACT;
+ Frame::EnumMemoryRegions(flags);
+ }
+#endif
+
+#ifndef DACCESS_COMPILE
+ void Push();
+ void Pop();
+
+ FORCEINLINE void Poll()
+ {
+ WRAPPER_NO_CONTRACT;
+ if (m_pThread->CatchAtSafePointOpportunistic())
+ CommonTripThread();
+ }
+#endif // DACCESS_COMPILE
+
+ BOOL InsureInit(bool initialInit, struct MachState* unwindState, HostCallPreference hostCallPreference = AllowHostCalls);
+
+ LazyMachState * MachineState() {
+ LIMITED_METHOD_CONTRACT;
+ return &m_MachState;
+ }
+
+ Thread * GetThread() {
+ LIMITED_METHOD_CONTRACT;
+ return m_pThread;
+ }
+
+private:
+ // Slow paths of Push/Pop are factored into a separate functions for better perf.
+ NOINLINE void PushSlowHelper();
+ NOINLINE void PopSlowHelper();
+
+protected:
+ PTR_MethodDesc m_pMD;
+ unsigned m_Attribs;
+ INDEBUG(BOOL* m_pDoneCheck;)
+ PTR_Thread m_pThread;
+ TADDR m_FCallEntry; // used to determine our identity for stack traces
+
+ LazyMachState m_MachState; // pRetAddr points to the return address and the stack arguments
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(HelperMethodFrame)
+};
+
+// Restores registers saved in m_MachState
+EXTERN_C int __fastcall HelperMethodFrameRestoreState(
+ INDEBUG_COMMA(HelperMethodFrame *pFrame)
+ MachState *pState
+ );
+
+
+// workhorse for our promotion efforts
+inline void DoPromote(promote_func *fn, ScanContext* sc, OBJECTREF *address, BOOL interior)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // We use OBJECTREF_TO_UNCHECKED_OBJECTREF since address may be an interior pointer
+ LOG((LF_GC, INFO3,
+ " Promoting pointer argument at" FMT_ADDR "from" FMT_ADDR "to ",
+ DBG_ADDR(address), DBG_ADDR(OBJECTREF_TO_UNCHECKED_OBJECTREF(*address)) ));
+
+ if (interior)
+ PromoteCarefully(fn, PTR_PTR_Object(address), sc);
+ else
+ (*fn) (PTR_PTR_Object(address), sc, 0);
+
+ LOG((LF_GC, INFO3, " " FMT_ADDR "\n", DBG_ADDR(OBJECTREF_TO_UNCHECKED_OBJECTREF(*address)) ));
+}
+
+
+//-----------------------------------------------------------------------------
+// a HelplerMethodFrames that also report additional object references
+//-----------------------------------------------------------------------------
+
+class HelperMethodFrame_1OBJ : public HelperMethodFrame
+{
+ VPTR_VTABLE_CLASS(HelperMethodFrame_1OBJ, HelperMethodFrame)
+
+public:
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+ HelperMethodFrame_1OBJ(void* fCallFtnEntry, unsigned attribs, OBJECTREF* aGCPtr1)
+ : HelperMethodFrame(fCallFtnEntry, attribs)
+ {
+ LIMITED_METHOD_CONTRACT;
+ gcPtrs[0] = aGCPtr1;
+ INDEBUG(Thread::ObjectRefProtected(aGCPtr1);)
+ INDEBUG((*aGCPtr1).Validate ();)
+ }
+#endif
+
+ void SetProtectedObject(PTR_OBJECTREF objPtr)
+ {
+ LIMITED_METHOD_CONTRACT;
+ gcPtrs[0] = objPtr;
+ INDEBUG(Thread::ObjectRefProtected(objPtr);)
+ }
+
+ virtual void GcScanRoots(promote_func *fn, ScanContext* sc)
+ {
+ WRAPPER_NO_CONTRACT;
+ DoPromote(fn, sc, gcPtrs[0], FALSE);
+ HelperMethodFrame::GcScanRoots(fn, sc);
+ }
+
+#ifdef _DEBUG
+#ifndef DACCESS_COMPILE
+ void Pop()
+ {
+ WRAPPER_NO_CONTRACT;
+ HelperMethodFrame::Pop();
+ Thread::ObjectRefNew(gcPtrs[0]);
+ }
+#endif // DACCESS_COMPILE
+
+ BOOL Protects(OBJECTREF *ppORef)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (ppORef == gcPtrs[0]) ? TRUE : FALSE;
+ }
+
+#endif
+
+private:
+ PTR_OBJECTREF gcPtrs[1];
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(HelperMethodFrame_1OBJ)
+};
+
+
+//-----------------------------------------------------------------------------
+// HelperMethodFrame_2OBJ
+//-----------------------------------------------------------------------------
+
+class HelperMethodFrame_2OBJ : public HelperMethodFrame
+{
+ VPTR_VTABLE_CLASS(HelperMethodFrame_2OBJ, HelperMethodFrame)
+
+public:
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+ HelperMethodFrame_2OBJ(
+ void* fCallFtnEntry,
+ unsigned attribs,
+ OBJECTREF* aGCPtr1,
+ OBJECTREF* aGCPtr2)
+ : HelperMethodFrame(fCallFtnEntry, attribs)
+ {
+ LIMITED_METHOD_CONTRACT;
+ gcPtrs[0] = aGCPtr1;
+ gcPtrs[1] = aGCPtr2;
+ INDEBUG(Thread::ObjectRefProtected(aGCPtr1);)
+ INDEBUG(Thread::ObjectRefProtected(aGCPtr2);)
+ INDEBUG((*aGCPtr1).Validate ();)
+ INDEBUG((*aGCPtr2).Validate ();)
+ }
+#endif
+
+ virtual void GcScanRoots(promote_func *fn, ScanContext* sc)
+ {
+ WRAPPER_NO_CONTRACT;
+ DoPromote(fn, sc, gcPtrs[0], FALSE);
+ DoPromote(fn, sc, gcPtrs[1], FALSE);
+ HelperMethodFrame::GcScanRoots(fn, sc);
+ }
+
+#ifdef _DEBUG
+#ifndef DACCESS_COMPILE
+ void Pop()
+ {
+ WRAPPER_NO_CONTRACT;
+ HelperMethodFrame::Pop();
+ Thread::ObjectRefNew(gcPtrs[0]);
+ Thread::ObjectRefNew(gcPtrs[1]);
+ }
+#endif // DACCESS_COMPILE
+
+ BOOL Protects(OBJECTREF *ppORef)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (ppORef == gcPtrs[0] || ppORef == gcPtrs[1]) ? TRUE : FALSE;
+ }
+#endif
+
+private:
+ PTR_OBJECTREF gcPtrs[2];
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(HelperMethodFrame_2OBJ)
+};
+
+
+//-----------------------------------------------------------------------------
+// HelperMethodFrame_PROTECTOBJ
+//-----------------------------------------------------------------------------
+
+class HelperMethodFrame_PROTECTOBJ : public HelperMethodFrame
+{
+ VPTR_VTABLE_CLASS(HelperMethodFrame_PROTECTOBJ, HelperMethodFrame)
+
+public:
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+ HelperMethodFrame_PROTECTOBJ(void* fCallFtnEntry, unsigned attribs, OBJECTREF* pObjRefs, int numObjRefs)
+ : HelperMethodFrame(fCallFtnEntry, attribs)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pObjRefs = pObjRefs;
+ m_numObjRefs = numObjRefs;
+#ifdef _DEBUG
+ for (UINT i = 0; i < m_numObjRefs; i++) {
+ Thread::ObjectRefProtected(&m_pObjRefs[i]);
+ m_pObjRefs[i].Validate();
+ }
+#endif
+ }
+#endif
+
+ virtual void GcScanRoots(promote_func *fn, ScanContext* sc)
+ {
+ WRAPPER_NO_CONTRACT;
+ for (UINT i = 0; i < m_numObjRefs; i++) {
+ DoPromote(fn, sc, &m_pObjRefs[i], FALSE);
+ }
+ HelperMethodFrame::GcScanRoots(fn, sc);
+ }
+
+#ifdef _DEBUG
+#ifndef DACCESS_COMPILE
+ void Pop()
+ {
+ WRAPPER_NO_CONTRACT;
+ HelperMethodFrame::Pop();
+ for (UINT i = 0; i < m_numObjRefs; i++) {
+ Thread::ObjectRefNew(&m_pObjRefs[i]);
+ }
+ }
+#endif // DACCESS_COMPILE
+
+ BOOL Protects(OBJECTREF *ppORef)
+ {
+ LIMITED_METHOD_CONTRACT;
+ for (UINT i = 0; i < m_numObjRefs; i++) {
+ if (ppORef == &m_pObjRefs[i])
+ return TRUE;
+ }
+ return FALSE;
+ }
+#endif
+
+private:
+ PTR_OBJECTREF m_pObjRefs;
+ UINT m_numObjRefs;
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(HelperMethodFrame_PROTECTOBJ)
+};
+
+class FramedMethodFrame : public TransitionFrame
+{
+ VPTR_ABSTRACT_VTABLE_CLASS(FramedMethodFrame, TransitionFrame)
+
+ TADDR m_pTransitionBlock;
+
+protected:
+ PTR_MethodDesc m_pMD;
+
+public:
+#ifndef DACCESS_COMPILE
+ FramedMethodFrame(TransitionBlock * pTransitionBlock, MethodDesc * pMD)
+ : m_pTransitionBlock(dac_cast<TADDR>(pTransitionBlock)), m_pMD(pMD)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+#endif // DACCESS_COMPILE
+
+ virtual TADDR GetTransitionBlock()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pTransitionBlock;
+ }
+
+ virtual MethodDesc *GetFunction()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pMD;
+ }
+
+#ifndef DACCESS_COMPILE
+ void SetFunction(MethodDesc *pMD)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE; // Frame MethodDesc should be always updated in cooperative mode to avoid racing with GC stackwalk
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ m_pMD = pMD;
+ }
+#endif
+
+ virtual ETransitionType GetTransitionType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TT_M2U; // we can safely cast to a FramedMethodFrame
+ }
+
+ int GetFrameType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TYPE_CALL;
+ }
+
+#ifdef COM_STUBS_SEPARATE_FP_LOCATIONS
+ static int GetFPArgOffset(int iArg)
+ {
+#ifdef _TARGET_AMD64_
+ // Floating point spill area is between return value and transition block for frames that need it
+ // (code:TPMethodFrame and code:ComPlusMethodFrame)
+ return -(4 * 0x10 /* floating point args */ + 0x8 /* alignment pad */ + TransitionBlock::GetNegSpaceSize()) + (iArg * 0x10);
+#endif
+ }
+#endif
+
+ //
+ // GetReturnObjectPtr and GetReturnValuePtr are only valid on frames
+ // that allocate
+ //
+ PTR_PTR_Object GetReturnObjectPtr()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return PTR_PTR_Object(GetReturnValuePtr());
+ }
+
+ // Get return value address
+ PTR_VOID GetReturnValuePtr()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+#ifdef COM_STUBS_SEPARATE_FP_LOCATIONS
+ TADDR p = GetTransitionBlock() + GetFPArgOffset(0);
+#else
+ TADDR p = GetTransitionBlock() - TransitionBlock::GetNegSpaceSize();
+#endif
+ // Return value is right before the transition block (or floating point spill area on AMD64) for frames that need it
+ // (code:TPMethodFrame and code:ComPlusMethodFrame)
+#ifdef ENREGISTERED_RETURNTYPE_MAXSIZE
+ p -= ENREGISTERED_RETURNTYPE_MAXSIZE;
+#else
+ p -= sizeof(ARG_SLOT);
+#endif
+ return dac_cast<PTR_VOID>(p);
+ }
+
+protected:
+ FramedMethodFrame()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+};
+
+//+----------------------------------------------------------------------------
+//
+// Class: TPMethodFrame private
+//
+// Synopsis: This frame is pushed onto the stack for calls on transparent
+// proxy
+//
+//
+//+----------------------------------------------------------------------------
+#ifdef FEATURE_REMOTING
+class TPMethodFrame : public FramedMethodFrame
+{
+ VPTR_VTABLE_CLASS(TPMethodFrame, FramedMethodFrame)
+
+public:
+ TPMethodFrame(TransitionBlock * pTransitionBlock);
+
+ virtual int GetFrameType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TYPE_TP_METHOD_FRAME;
+ }
+
+ // GC protect arguments
+ virtual void GcScanRoots(promote_func *fn, ScanContext* sc);
+
+ // Our base class is a a M2U TransitionType; but we're not. So override and set us back to None.
+ ETransitionType GetTransitionType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TT_NONE;
+ }
+
+#if defined(_TARGET_X86_) && !defined(DACCESS_COMPILE)
+ void SetCbStackPop(UINT cbStackPop)
+ {
+ LIMITED_METHOD_CONTRACT;
+ // Number of bytes to pop for x86 is stored right before the return value
+ void * pReturnValue = GetReturnValuePtr();
+ *(((DWORD *)pReturnValue) - 1) = cbStackPop;
+ }
+#endif
+
+ // Aid the debugger in finding the actual address of callee
+ virtual BOOL TraceFrame(Thread *thread, BOOL fromPatch,
+ TraceDestination *trace, REGDISPLAY *regs);
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(TPMethodFrame)
+};
+#endif // FEATURE_REMOTING
+
+//------------------------------------------------------------------------
+// This represents a call Delegate.Invoke for secure delegate
+// It's only used to gc-protect the arguments during the call.
+// Actually the only reason to have this frame is so a proper
+// Assembly can be reported
+//------------------------------------------------------------------------
+
+class SecureDelegateFrame : public TransitionFrame
+{
+ VPTR_VTABLE_CLASS(SecureDelegateFrame, TransitionFrame)
+
+ PTR_MethodDesc m_pMD;
+ TransitionBlock m_TransitionBlock;
+
+public:
+ virtual MethodDesc* GetFunction()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pMD;
+ }
+
+ virtual TADDR GetTransitionBlock()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return PTR_HOST_MEMBER_TADDR(SecureDelegateFrame, this,
+ m_TransitionBlock);
+ }
+
+ static BYTE GetOffsetOfDatum()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return offsetof(SecureDelegateFrame, m_pMD);
+ }
+
+ static int GetOffsetOfTransitionBlock()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return offsetof(SecureDelegateFrame, m_TransitionBlock);
+ }
+
+ virtual void GcScanRoots(promote_func *fn, ScanContext* sc)
+ {
+ WRAPPER_NO_CONTRACT;
+ TransitionFrame::GcScanRoots(fn, sc);
+ PromoteCallerStack(fn, sc);
+ }
+
+ virtual Assembly *GetAssembly();
+
+ int GetFrameType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TYPE_MULTICAST;
+ }
+
+ // For the debugger:
+ // Our base class, FramedMethodFrame, is a M2U transition;
+ // but Delegate.Invoke isn't. So override and fix it here.
+ // If we didn't do this, we'd see a Managed/Unmanaged transition in debugger's stack trace.
+ virtual ETransitionType GetTransitionType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TT_NONE;
+ }
+
+ virtual BOOL TraceFrame(Thread *thread, BOOL fromPatch,
+ TraceDestination *trace, REGDISPLAY *regs);
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(SecureDelegateFrame)
+};
+
+
+//------------------------------------------------------------------------
+// This represents a call Multicast.Invoke. It's only used to gc-protect
+// the arguments during the iteration.
+//------------------------------------------------------------------------
+
+class MulticastFrame : public SecureDelegateFrame
+{
+ VPTR_VTABLE_CLASS(MulticastFrame, SecureDelegateFrame)
+
+ public:
+
+ virtual Assembly *GetAssembly()
+ {
+ WRAPPER_NO_CONTRACT;
+ return Frame::GetAssembly();
+ }
+
+ int GetFrameType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TYPE_MULTICAST;
+ }
+
+ virtual BOOL TraceFrame(Thread *thread, BOOL fromPatch,
+ TraceDestination *trace, REGDISPLAY *regs);
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(MulticastFrame)
+};
+
+
+//-----------------------------------------------------------------------
+// Transition frame from unmanaged to managed
+//-----------------------------------------------------------------------
+
+class UnmanagedToManagedFrame : public Frame
+{
+ friend class CheckAsmOffsets;
+
+ VPTR_ABSTRACT_VTABLE_CLASS(UnmanagedToManagedFrame, Frame)
+
+public:
+
+ // DACCESS: GetReturnAddressPtr should return the
+ // target address of the return address in the frame.
+ virtual TADDR GetReturnAddressPtr()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return PTR_HOST_MEMBER_TADDR(UnmanagedToManagedFrame, this,
+ m_ReturnAddress);
+ }
+
+ virtual PCODE GetReturnAddress();
+
+ // Retrives pointer to the lowest-addressed argument on
+ // the stack. Depending on the calling convention, this
+ // may or may not be the first argument.
+ TADDR GetPointerToArguments()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return dac_cast<TADDR>(this) + GetOffsetOfArgs();
+ }
+
+ // Exposes an offset for stub generation.
+ static BYTE GetOffsetOfArgs()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+ size_t ofs = offsetof(UnmanagedToManagedFrame, m_argumentRegisters);
+#else
+ size_t ofs = sizeof(UnmanagedToManagedFrame);
+#endif
+ _ASSERTE(FitsInI1(ofs));
+ return (BYTE)ofs;
+ }
+
+ // depends on the sub frames to return approp. type here
+ TADDR GetDatum()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pvDatum;
+ }
+
+ static int GetOffsetOfDatum()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return offsetof(UnmanagedToManagedFrame, m_pvDatum);
+ }
+
+#ifdef _TARGET_X86_
+ static int GetOffsetOfCalleeSavedRegisters()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return offsetof(UnmanagedToManagedFrame, m_calleeSavedRegisters);
+ }
+#endif
+
+ int GetFrameType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TYPE_ENTRY;
+ }
+
+ //------------------------------------------------------------------------
+ // For the debugger.
+ //------------------------------------------------------------------------
+ virtual ETransitionType GetTransitionType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TT_U2M;
+ }
+
+ //------------------------------------------------------------------------
+ // Performs cleanup on an exception unwind
+ //------------------------------------------------------------------------
+#ifndef DACCESS_COMPILE
+ virtual void ExceptionUnwind();
+#endif
+
+protected:
+ TADDR m_pvDatum; // type depends on the sub class
+
+#if defined(_TARGET_X86_)
+ CalleeSavedRegisters m_calleeSavedRegisters;
+ TADDR m_ReturnAddress;
+#elif defined(_TARGET_ARM_)
+ TADDR m_R11; // R11 chain
+ TADDR m_ReturnAddress;
+ ArgumentRegisters m_argumentRegisters;
+#elif defined (_TARGET_ARM64_)
+ TADDR m_fp;
+ TADDR m_ReturnAddress;
+ ArgumentRegisters m_argumentRegisters;
+#else
+ TADDR m_ReturnAddress; // return address into unmanaged code
+#endif
+};
+
+#ifdef FEATURE_COMINTEROP
+
+//------------------------------------------------------------------------
+// This frame represents a transition from COM to COM+
+//------------------------------------------------------------------------
+
+class ComMethodFrame : public UnmanagedToManagedFrame
+{
+ VPTR_VTABLE_CLASS(ComMethodFrame, UnmanagedToManagedFrame)
+ VPTR_UNIQUE(VPTR_UNIQUE_ComMethodFrame)
+
+public:
+
+#ifdef _TARGET_X86_
+ // Return the # of stack bytes pushed by the unmanaged caller.
+ UINT GetNumCallerStackBytes();
+#endif
+
+ PTR_ComCallMethodDesc GetComCallMethodDesc()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return dac_cast<PTR_ComCallMethodDesc>(m_pvDatum);
+ }
+
+#ifndef DACCESS_COMPILE
+ static void DoSecondPassHandlerCleanup(Frame * pCurFrame);
+#endif // !DACCESS_COMPILE
+
+protected:
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(ComMethodFrame)
+};
+
+typedef DPTR(class ComMethodFrame) PTR_ComMethodFrame;
+
+//------------------------------------------------------------------------
+// This represents a generic call from CLR to COM
+//------------------------------------------------------------------------
+
+class ComPlusMethodFrame : public FramedMethodFrame
+{
+ VPTR_VTABLE_CLASS(ComPlusMethodFrame, FramedMethodFrame)
+
+public:
+ ComPlusMethodFrame(TransitionBlock * pTransitionBlock, MethodDesc * pMethodDesc);
+
+ virtual void GcScanRoots(promote_func *fn, ScanContext* sc);
+
+ virtual BOOL IsTransitionToNativeFrame()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return TRUE;
+ }
+
+ int GetFrameType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TYPE_EXIT;
+ }
+
+ void GetUnmanagedCallSite(TADDR* ip,
+ TADDR* returnIP,
+ TADDR* returnSP);
+
+ BOOL TraceFrame(Thread *thread, BOOL fromPatch,
+ TraceDestination *trace, REGDISPLAY *regs);
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(ComPlusMethodFrame)
+};
+
+#endif // FEATURE_COMINTEROP
+
+//------------------------------------------------------------------------
+// This represents a call from a helper to GetILStubForCalli
+//------------------------------------------------------------------------
+
+class PInvokeCalliFrame : public FramedMethodFrame
+{
+ VPTR_VTABLE_CLASS(PInvokeCalliFrame, FramedMethodFrame)
+
+ PTR_VASigCookie m_pVASigCookie;
+ PCODE m_pUnmanagedTarget;
+
+public:
+ PInvokeCalliFrame(TransitionBlock * pTransitionBlock, VASigCookie * pVASigCookie, PCODE pUnmanagedTarget);
+
+ virtual void GcScanRoots(promote_func *fn, ScanContext* sc)
+ {
+ WRAPPER_NO_CONTRACT;
+ FramedMethodFrame::GcScanRoots(fn, sc);
+ PromoteCallerStack(fn, sc);
+ }
+
+ void PromoteCallerStack(promote_func* fn, ScanContext* sc);
+
+ // not a method
+ virtual MethodDesc *GetFunction()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return NULL;
+ }
+
+ int GetFrameType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TYPE_INTERCEPTION;
+ }
+
+ PCODE GetPInvokeCalliTarget()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pUnmanagedTarget;
+ }
+
+ PTR_VASigCookie GetVASigCookie()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pVASigCookie;
+ }
+
+#ifdef _TARGET_X86_
+ virtual void UpdateRegDisplay(const PREGDISPLAY);
+#endif // _TARGET_X86_
+
+ BOOL TraceFrame(Thread *thread, BOOL fromPatch,
+ TraceDestination *trace, REGDISPLAY *regs)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ trace->InitForUnmanaged(GetPInvokeCalliTarget());
+ return TRUE;
+ }
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(PInvokeCalliFrame)
+};
+
+// Some context-related forwards.
+#ifdef FEATURE_HIJACK
+//------------------------------------------------------------------------
+// This frame represents a hijacked return. If we crawl back through it,
+// it gets us back to where the return should have gone (and eventually will
+// go).
+//------------------------------------------------------------------------
+class HijackFrame : public Frame
+{
+ VPTR_VTABLE_CLASS(HijackFrame, Frame)
+ VPTR_UNIQUE(VPTR_UNIQUE_HijackFrame);
+
+public:
+ // DACCESS: GetReturnAddressPtr should return the
+ // target address of the return address in the frame.
+ virtual TADDR GetReturnAddressPtr()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return PTR_HOST_MEMBER_TADDR(HijackFrame, this,
+ m_ReturnAddress);
+ }
+
+ virtual BOOL NeedsUpdateRegDisplay()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return TRUE;
+ }
+
+ virtual void UpdateRegDisplay(const PREGDISPLAY);
+
+ // HijackFrames are created by trip functions. See OnHijackObjectTripThread()
+ // and OnHijackScalarTripThread(). They are real C++ objects on the stack. So
+ // it's a public function -- but that doesn't mean you should make some.
+ HijackFrame(LPVOID returnAddress, Thread *thread, HijackArgs *args);
+
+protected:
+
+ TADDR m_ReturnAddress;
+ PTR_Thread m_Thread;
+ DPTR(HijackArgs) m_Args;
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(HijackFrame)
+};
+
+#endif // FEATURE_HIJACK
+
+//------------------------------------------------------------------------
+// This represents a call to a method prestub. Because the prestub
+// can do gc and throw exceptions while building the replacement
+// stub, we need this frame to keep things straight.
+//------------------------------------------------------------------------
+
+class PrestubMethodFrame : public FramedMethodFrame
+{
+ VPTR_VTABLE_CLASS(PrestubMethodFrame, FramedMethodFrame)
+
+public:
+ PrestubMethodFrame(TransitionBlock * pTransitionBlock, MethodDesc * pMD);
+
+ virtual void GcScanRoots(promote_func *fn, ScanContext* sc)
+ {
+ WRAPPER_NO_CONTRACT;
+ FramedMethodFrame::GcScanRoots(fn, sc);
+ PromoteCallerStack(fn, sc);
+ }
+
+ BOOL TraceFrame(Thread *thread, BOOL fromPatch,
+ TraceDestination *trace, REGDISPLAY *regs);
+
+ int GetFrameType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TYPE_INTERCEPTION;
+ }
+
+ // Our base class is a a M2U TransitionType; but we're not. So override and set us back to None.
+ ETransitionType GetTransitionType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TT_NONE;
+ }
+
+ Interception GetInterception();
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(PrestubMethodFrame)
+};
+
+//------------------------------------------------------------------------
+// This represents a call into the virtual call stub manager
+// Because the stub manager can do gc and throw exceptions while
+// building the resolve and dispatch stubs and needs to communicate
+// if we need to setup for a methodDesc call or do a direct call
+// we need this frame to keep things straight.
+//------------------------------------------------------------------------
+
+class StubDispatchFrame : public FramedMethodFrame
+{
+ VPTR_VTABLE_CLASS(StubDispatchFrame, FramedMethodFrame)
+
+ // Representative MethodTable * and slot. They are used to
+ // compute the MethodDesc* lazily
+ PTR_MethodTable m_pRepresentativeMT;
+ UINT32 m_representativeSlot;
+
+ // Indirection cell and containing module. Used to compute pGCRefMap lazily.
+ PTR_Module m_pZapModule;
+ TADDR m_pIndirection;
+
+ // Cached pointer to native ref data.
+ PTR_BYTE m_pGCRefMap;
+
+public:
+ StubDispatchFrame(TransitionBlock * pTransitionBlock);
+
+ MethodDesc* GetFunction();
+
+ // Returns this frame GC ref map if it has one
+ PTR_BYTE GetGCRefMap();
+
+#ifdef _TARGET_X86_
+ virtual void UpdateRegDisplay(const PREGDISPLAY pRD);
+ virtual PCODE GetReturnAddress();
+#endif // _TARGET_X86_
+
+ PCODE GetUnadjustedReturnAddress()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return FramedMethodFrame::GetReturnAddress();
+ }
+
+ virtual void GcScanRoots(promote_func *fn, ScanContext* sc);
+
+#ifndef DACCESS_COMPILE
+ void SetRepresentativeSlot(MethodTable * pMT, UINT32 representativeSlot)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_pRepresentativeMT = pMT;
+ m_representativeSlot = representativeSlot;
+ }
+
+ void SetCallSite(Module * pZapModule, TADDR pIndirection)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_pZapModule = pZapModule;
+ m_pIndirection = pIndirection;
+ }
+
+ void SetForNullReferenceException()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // Nothing to do. Everything is initialized in Init.
+ }
+#endif
+
+ BOOL TraceFrame(Thread *thread, BOOL fromPatch,
+ TraceDestination *trace, REGDISPLAY *regs);
+
+ int GetFrameType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return TYPE_CALL;
+ }
+
+ Interception GetInterception();
+
+private:
+ friend class VirtualCallStubManager;
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(StubDispatchFrame)
+};
+
+typedef VPTR(class StubDispatchFrame) PTR_StubDispatchFrame;
+
+
+//------------------------------------------------------------------------
+// This represents a call from an ExternalMethodThunk or a VirtualImportThunk
+// Because the resolving of the target address can do gc and/or
+// throw exceptions we need this frame to report the gc references.
+//------------------------------------------------------------------------
+
+class ExternalMethodFrame : public FramedMethodFrame
+{
+ VPTR_VTABLE_CLASS(ExternalMethodFrame, FramedMethodFrame)
+
+ // Indirection and containing module. Used to compute pGCRefMap lazily.
+ PTR_Module m_pZapModule;
+ TADDR m_pIndirection;
+
+ // Cached pointer to native ref data.
+ PTR_BYTE m_pGCRefMap;
+
+public:
+ ExternalMethodFrame(TransitionBlock * pTransitionBlock);
+
+ virtual void GcScanRoots(promote_func *fn, ScanContext* sc);
+
+ // Returns this frame GC ref map if it has one
+ PTR_BYTE GetGCRefMap();
+
+#ifndef DACCESS_COMPILE
+ void SetCallSite(Module * pZapModule, TADDR pIndirection)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_pZapModule = pZapModule;
+ m_pIndirection = pIndirection;
+ }
+#endif
+
+ int GetFrameType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return TYPE_CALL;
+ }
+
+ Interception GetInterception();
+
+#ifdef _TARGET_X86_
+ virtual void UpdateRegDisplay(const PREGDISPLAY pRD);
+#endif
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(ExternalMethodFrame)
+};
+
+typedef VPTR(class ExternalMethodFrame) PTR_ExternalMethodFrame;
+
+#ifdef FEATURE_READYTORUN
+class DynamicHelperFrame : public FramedMethodFrame
+{
+ VPTR_VTABLE_CLASS(DynamicHelperFrame, FramedMethodFrame)
+
+ int m_dynamicHelperFrameFlags;
+
+public:
+ DynamicHelperFrame(TransitionBlock * pTransitionBlock, int dynamicHelperFrameFlags);
+
+ virtual void GcScanRoots(promote_func *fn, ScanContext* sc);
+
+#ifdef _TARGET_X86_
+ virtual void UpdateRegDisplay(const PREGDISPLAY pRD)
+ {
+ WRAPPER_NO_CONTRACT;
+ UpdateRegDisplayHelper(pRD, 0);
+ }
+#endif
+
+ virtual ETransitionType GetTransitionType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TT_InternalCall;
+ }
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(DynamicHelperFrame)
+};
+
+typedef VPTR(class DynamicHelperFrame) PTR_DynamicHelperFrame;
+#endif // FEATURE_READYTORUN
+
+//------------------------------------------------------------------------
+// This frame is used for instantiating stubs when the argument transform
+// is too complex to generate a tail-calling stub.
+//------------------------------------------------------------------------
+#if !defined(_TARGET_X86_)
+class StubHelperFrame : public TransitionFrame
+{
+ friend class CheckAsmOffsets;
+ friend class StubLinkerCPU;
+
+ VPTR_VTABLE_CLASS(StubHelperFrame, TransitionFrame)
+ VPTR_UNIQUE(VPTR_UNIQUE_StubHelperFrame)
+
+ TransitionBlock m_TransitionBlock;
+
+ virtual TADDR GetTransitionBlock()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return PTR_HOST_MEMBER_TADDR(StubHelperFrame, this,
+ m_TransitionBlock);
+ }
+
+ static int GetOffsetOfTransitionBlock()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return offsetof(StubHelperFrame, m_TransitionBlock);
+ }
+
+private:
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(StubHelperFrame)
+};
+#endif // _TARGET_X86_
+
+#ifdef FEATURE_COMINTEROP
+
+//------------------------------------------------------------------------
+// This represents a com to com+ call method prestub.
+// we need to catch exceptions etc. so this frame is not the same
+// as the prestub method frame
+// Note that in rare IJW cases, the immediate caller could be a managed method
+// which pinvoke-inlined a call to a COM interface, which happenned to be
+// implemented by a managed function via COM-interop.
+//------------------------------------------------------------------------
+class ComPrestubMethodFrame : public ComMethodFrame
+{
+ friend class CheckAsmOffsets;
+
+ VPTR_VTABLE_CLASS(ComPrestubMethodFrame, ComMethodFrame)
+
+public:
+ // Set the vptr and GSCookie
+ VOID Init();
+
+ int GetFrameType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TYPE_INTERCEPTION;
+ }
+
+ // ComPrestubMethodFrame should return the same interception type as
+ // code:PrestubMethodFrame.GetInterception.
+ virtual Interception GetInterception()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return INTERCEPTION_PRESTUB;
+ }
+
+ // Our base class is a a M2U TransitionType; but we're not. So override and set us back to None.
+ virtual ETransitionType GetTransitionType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TT_NONE;
+ }
+
+ virtual void ExceptionUnwind()
+ {
+ }
+
+private:
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(ComPrestubMethodFrame)
+};
+
+#endif // FEATURE_COMINTEROP
+
+
+//------------------------------------------------------------------------
+// This frame protects object references for the EE's convenience.
+// This frame type actually is created from C++.
+//------------------------------------------------------------------------
+class GCFrame : public Frame
+{
+ VPTR_VTABLE_CLASS(GCFrame, Frame)
+
+public:
+
+
+ //--------------------------------------------------------------------
+ // This constructor pushes a new GCFrame on the frame chain.
+ //--------------------------------------------------------------------
+#ifndef DACCESS_COMPILE
+ GCFrame() {
+ LIMITED_METHOD_CONTRACT;
+ };
+
+ GCFrame(OBJECTREF *pObjRefs, UINT numObjRefs, BOOL maybeInterior);
+ GCFrame(Thread *pThread, OBJECTREF *pObjRefs, UINT numObjRefs, BOOL maybeInterior);
+#endif
+ void Init(Thread *pThread, OBJECTREF *pObjRefs, UINT numObjRefs, BOOL maybeInterior);
+
+
+ //--------------------------------------------------------------------
+ // Pops the GCFrame and cancels the GC protection. Also
+ // trashes the contents of pObjRef's in _DEBUG.
+ //--------------------------------------------------------------------
+ VOID Pop();
+
+ virtual void GcScanRoots(promote_func *fn, ScanContext* sc);
+
+#ifdef _DEBUG
+ virtual BOOL Protects(OBJECTREF *ppORef)
+ {
+ LIMITED_METHOD_CONTRACT;
+ for (UINT i = 0; i < m_numObjRefs; i++) {
+ if (ppORef == m_pObjRefs + i) {
+ return TRUE;
+ }
+ }
+ return FALSE;
+ }
+#endif
+
+#ifndef DACCESS_COMPILE
+ void *operator new (size_t sz, void* p)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return p ;
+ }
+#endif
+
+#if defined(_DEBUG_IMPL)
+ const char* GetFrameTypeName() { LIMITED_METHOD_CONTRACT; return "GCFrame"; }
+#endif
+
+private:
+ PTR_OBJECTREF m_pObjRefs;
+ UINT m_numObjRefs;
+ PTR_Thread m_pCurThread;
+ BOOL m_MaybeInterior;
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER(GCFrame)
+};
+
+#ifdef FEATURE_INTERPRETER
+class InterpreterFrame: public Frame
+{
+ VPTR_VTABLE_CLASS(InterpreterFrame, Frame)
+
+ class Interpreter* m_interp;
+
+public:
+
+#ifndef DACCESS_COMPILE
+ InterpreterFrame(class Interpreter* interp);
+
+ class Interpreter* GetInterpreter() { return m_interp; }
+
+ // Override.
+ virtual void GcScanRoots(promote_func *fn, ScanContext* sc);
+
+ MethodDesc* GetFunction();
+#endif
+
+ DEFINE_VTABLE_GETTER(InterpreterFrame)
+
+};
+
+typedef VPTR(class InterpreterFrame) PTR_InterpreterFrame;
+#endif // FEATURE_INTERPRETER
+
+#ifdef FEATURE_REMOTING
+class GCSafeCollectionFrame : public Frame
+{
+ VPTR_VTABLE_CLASS(GCSafeCollectionFrame, Frame)
+ PTR_VOID m_pCollection;
+
+ public:
+ //--------------------------------------------------------------------
+ // This constructor pushes a new GCFrame on the frame chain.
+ //--------------------------------------------------------------------
+#ifndef DACCESS_COMPILE
+ GCSafeCollectionFrame() { }
+ GCSafeCollectionFrame(void *collection);
+#endif
+
+ virtual void GcScanRoots(promote_func *fn, ScanContext* sc);
+
+ VOID Pop();
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER(GCSafeCollectionFrame)
+};
+#endif // FEATURE_REMOTING
+
+//-----------------------------------------------------------------------------
+
+struct ByRefInfo;
+typedef DPTR(ByRefInfo) PTR_ByRefInfo;
+
+struct ByRefInfo
+{
+ PTR_ByRefInfo pNext;
+ INT32 argIndex;
+ CorElementType typ;
+ TypeHandle typeHandle;
+ char data[1];
+};
+
+//-----------------------------------------------------------------------------
+// ProtectByRefsFrame
+//-----------------------------------------------------------------------------
+
+class ProtectByRefsFrame : public Frame
+{
+ VPTR_VTABLE_CLASS(ProtectByRefsFrame, Frame)
+
+public:
+#ifndef DACCESS_COMPILE
+ ProtectByRefsFrame(Thread *pThread, ByRefInfo *brInfo)
+ : m_brInfo(brInfo)
+ {
+ WRAPPER_NO_CONTRACT;
+ Frame::Push(pThread);
+ }
+#endif
+
+ virtual void GcScanRoots(promote_func *fn, ScanContext *sc);
+
+private:
+ PTR_ByRefInfo m_brInfo;
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(ProtectByRefsFrame)
+};
+
+
+//-----------------------------------------------------------------------------
+
+struct ValueClassInfo;
+typedef DPTR(struct ValueClassInfo) PTR_ValueClassInfo;
+
+struct ValueClassInfo
+{
+ PTR_ValueClassInfo pNext;
+ PTR_MethodTable pMT;
+ PTR_VOID pData;
+
+ ValueClassInfo(PTR_VOID aData, PTR_MethodTable aMT, PTR_ValueClassInfo aNext)
+ : pNext(aNext), pMT(aMT), pData(aData)
+ {
+ }
+};
+
+//-----------------------------------------------------------------------------
+// ProtectValueClassFrame
+//-----------------------------------------------------------------------------
+
+
+class ProtectValueClassFrame : public Frame
+{
+ VPTR_VTABLE_CLASS(ProtectValueClassFrame, Frame)
+
+public:
+#ifndef DACCESS_COMPILE
+ ProtectValueClassFrame()
+ : m_pVCInfo(NULL)
+ {
+ WRAPPER_NO_CONTRACT;
+ Frame::Push();
+ }
+
+ ProtectValueClassFrame(Thread *pThread, ValueClassInfo *vcInfo)
+ : m_pVCInfo(vcInfo)
+ {
+ WRAPPER_NO_CONTRACT;
+ Frame::Push(pThread);
+ }
+#endif
+
+ virtual void GcScanRoots(promote_func *fn, ScanContext *sc);
+
+ ValueClassInfo ** GetValueClassInfoList()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return &m_pVCInfo;
+ }
+
+private:
+
+ ValueClassInfo *m_pVCInfo;
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER(ProtectValueClassFrame)
+};
+
+
+#ifdef _DEBUG
+BOOL IsProtectedByGCFrame(OBJECTREF *ppObjectRef);
+#endif
+
+
+//------------------------------------------------------------------------
+// DebuggerClassInitMarkFrame is a small frame whose only purpose in
+// life is to mark for the debugger that "class initialization code" is
+// being run. It does nothing useful except return good values from
+// GetFrameType and GetInterception.
+//------------------------------------------------------------------------
+
+class DebuggerClassInitMarkFrame : public Frame
+{
+ VPTR_VTABLE_CLASS(DebuggerClassInitMarkFrame, Frame)
+
+public:
+
+#ifndef DACCESS_COMPILE
+ DebuggerClassInitMarkFrame()
+ {
+ WRAPPER_NO_CONTRACT;
+ Push();
+ };
+#endif
+
+ virtual int GetFrameType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TYPE_INTERCEPTION;
+ }
+
+ virtual Interception GetInterception()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return INTERCEPTION_CLASS_INIT;
+ }
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER(DebuggerClassInitMarkFrame)
+};
+
+
+//------------------------------------------------------------------------
+// DebuggerSecurityCodeMarkFrame is a small frame whose only purpose in
+// life is to mark for the debugger that "security code" is
+// being run. It does nothing useful except return good values from
+// GetFrameType and GetInterception.
+//------------------------------------------------------------------------
+
+class DebuggerSecurityCodeMarkFrame : public Frame
+{
+ VPTR_VTABLE_CLASS(DebuggerSecurityCodeMarkFrame, Frame)
+
+public:
+#ifndef DACCESS_COMPILE
+ DebuggerSecurityCodeMarkFrame()
+ {
+ WRAPPER_NO_CONTRACT;
+ Push();
+ }
+#endif
+
+ virtual int GetFrameType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TYPE_INTERCEPTION;
+ }
+
+ virtual Interception GetInterception()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return INTERCEPTION_SECURITY;
+ }
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER(DebuggerSecurityCodeMarkFrame)
+};
+
+//------------------------------------------------------------------------
+// DebuggerExitFrame is a small frame whose only purpose in
+// life is to mark for the debugger that there is an exit transiton on
+// the stack. This is special cased for the "break" IL instruction since
+// it is an fcall using a helper frame which returns TYPE_CALL instead of
+// an ecall (as in System.Diagnostics.Debugger.Break()) which returns
+// TYPE_EXIT. This just makes the two consistent for debugging services.
+//------------------------------------------------------------------------
+
+class DebuggerExitFrame : public Frame
+{
+ VPTR_VTABLE_CLASS(DebuggerExitFrame, Frame)
+
+public:
+#ifndef DACCESS_COMPILE
+ DebuggerExitFrame()
+ {
+ WRAPPER_NO_CONTRACT;
+ Push();
+ }
+#endif
+
+ virtual int GetFrameType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TYPE_EXIT;
+ }
+
+ // Return information about an unmanaged call the frame
+ // will make.
+ // ip - the unmanaged routine which will be called
+ // returnIP - the address in the stub which the unmanaged routine
+ // will return to.
+ // returnSP - the location returnIP is pushed onto the stack
+ // during the call.
+ //
+ virtual void GetUnmanagedCallSite(TADDR* ip,
+ TADDR* returnIP,
+ TADDR* returnSP)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (ip)
+ *ip = NULL;
+
+ if (returnIP)
+ *returnIP = NULL;
+
+ if (returnSP)
+ *returnSP = NULL;
+ }
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER(DebuggerExitFrame)
+};
+
+//---------------------------------------------------------------------------------------
+//
+// DebuggerU2MCatchHandlerFrame is a small frame whose only purpose in life is to mark for the debugger
+// that there is catch handler inside the runtime which may catch and swallow managed exceptions. The
+// debugger needs this frame to send a CatchHandlerFound (CHF) notification. Without this frame, the
+// debugger doesn't know where a managed exception is caught.
+//
+// Notes:
+// Currently this frame is only used in code:DispatchInfo.InvokeMember, which is an U2M transition.
+//
+
+class DebuggerU2MCatchHandlerFrame : public Frame
+{
+ VPTR_VTABLE_CLASS(DebuggerU2MCatchHandlerFrame, Frame)
+
+public:
+#ifndef DACCESS_COMPILE
+ DebuggerU2MCatchHandlerFrame()
+ {
+ WRAPPER_NO_CONTRACT;
+ Frame::Push();
+ }
+
+ DebuggerU2MCatchHandlerFrame(Thread * pThread)
+ {
+ WRAPPER_NO_CONTRACT;
+ Frame::Push(pThread);
+ }
+#endif
+
+ ETransitionType GetTransitionType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TT_U2M;
+ }
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER(DebuggerU2MCatchHandlerFrame)
+};
+
+
+class UMThunkMarshInfo;
+typedef DPTR(class UMThunkMarshInfo) PTR_UMThunkMarshInfo;
+
+class UMEntryThunk;
+typedef DPTR(class UMEntryThunk) PTR_UMEntryThunk;
+
+#ifdef _TARGET_X86_
+//------------------------------------------------------------------------
+// This frame guards an unmanaged->managed transition thru a UMThk
+//------------------------------------------------------------------------
+
+class UMThkCallFrame : public UnmanagedToManagedFrame
+{
+ VPTR_VTABLE_CLASS(UMThkCallFrame, UnmanagedToManagedFrame)
+
+public:
+
+#ifdef DACCESS_COMPILE
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+ PTR_UMEntryThunk GetUMEntryThunk();
+
+ static int GetOffsetOfUMEntryThunk()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetOffsetOfDatum();
+ }
+
+protected:
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(UMThkCallFrame)
+};
+#endif // _TARGET_X86_
+
+#if defined(_TARGET_X86_)
+//-------------------------------------------------------------------------
+// Exception handler for COM to managed frame
+// and the layout of the exception registration record structure in the stack
+// the layout is similar to the NT's EXCEPTIONREGISTRATION record
+// followed by the UnmanagedToManagedFrame specific info
+
+struct ComToManagedExRecord
+{
+ EXCEPTION_REGISTRATION_RECORD m_ExReg;
+ ArgumentRegisters m_argRegs;
+ GSCookie m_gsCookie;
+ UMThkCallFrame m_frame;
+
+ UnmanagedToManagedFrame * GetCurrFrame()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return &m_frame;
+ }
+};
+#endif // _TARGET_X86_
+
+#if defined(FEATURE_INCLUDE_ALL_INTERFACES) && defined(_TARGET_X86_)
+//-----------------------------------------------------------------------------
+// ReverseEnterRuntimeFrame
+//-----------------------------------------------------------------------------
+
+class ReverseEnterRuntimeFrame : public Frame
+{
+ VPTR_VTABLE_CLASS(ReverseEnterRuntimeFrame, Frame)
+
+public:
+ //------------------------------------------------------------------------
+ // Performs cleanup on an exception unwind
+ //------------------------------------------------------------------------
+#ifndef DACCESS_COMPILE
+ virtual void ExceptionUnwind()
+ {
+ WRAPPER_NO_CONTRACT;
+ GetThread()->ReverseLeaveRuntime();
+ }
+#endif
+
+ //---------------------------------------------------------------
+ // Expose key offsets and values for stub generation.
+ //---------------------------------------------------------------
+
+ static UINT32 GetNegSpaceSize()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return sizeof(GSCookie);
+ }
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(ReverseEnterRuntimeFrame)
+};
+
+//-----------------------------------------------------------------------------
+// LeaveRuntimeFrame
+//-----------------------------------------------------------------------------
+
+class LeaveRuntimeFrame : public Frame
+{
+ VPTR_VTABLE_CLASS(LeaveRuntimeFrame, Frame)
+
+public:
+ //------------------------------------------------------------------------
+ // Performs cleanup on an exception unwind
+ //------------------------------------------------------------------------
+#ifndef DACCESS_COMPILE
+ virtual void ExceptionUnwind()
+ {
+ WRAPPER_NO_CONTRACT;
+ Thread::EnterRuntime();
+ }
+#endif
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(LeaveRuntimeFrame)
+};
+#endif
+
+//------------------------------------------------------------------------
+// This frame is pushed by any JIT'ted method that contains one or more
+// inlined N/Direct calls. Note that the JIT'ted method keeps it pushed
+// the whole time to amortize the pushing cost across the entire method.
+//------------------------------------------------------------------------
+
+typedef DPTR(class InlinedCallFrame) PTR_InlinedCallFrame;
+
+class InlinedCallFrame : public Frame
+{
+ VPTR_VTABLE_CLASS(InlinedCallFrame, Frame)
+
+public:
+ virtual MethodDesc *GetFunction()
+ {
+ WRAPPER_NO_CONTRACT;
+ if (FrameHasActiveCall(this) && HasFunction())
+ return PTR_MethodDesc(m_Datum);
+ else
+ return NULL;
+ }
+
+ BOOL HasFunction()
+ {
+ WRAPPER_NO_CONTRACT;
+
+#ifdef _WIN64
+ return ((m_Datum != NULL) && !(dac_cast<TADDR>(m_Datum) & 0x1));
+#else // _WIN64
+ return ((dac_cast<TADDR>(m_Datum) & ~0xffff) != 0);
+#endif // _WIN64
+ }
+
+ // Retrieves the return address into the code that called out
+ // to managed code
+ virtual TADDR GetReturnAddressPtr()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (FrameHasActiveCall(this))
+ return PTR_HOST_MEMBER_TADDR(InlinedCallFrame, this,
+ m_pCallerReturnAddress);
+ else
+ return NULL;
+ }
+
+ virtual BOOL NeedsUpdateRegDisplay()
+ {
+ WRAPPER_NO_CONTRACT;
+ return FrameHasActiveCall(this);
+ }
+
+ // Given a methodDesc representing an ILStub for a pinvoke call,
+ // this method will return the MethodDesc for the actual interop
+ // method if the current InlinedCallFrame is inactive.
+ PTR_MethodDesc GetActualInteropMethodDesc()
+ {
+#if defined(_TARGET_X86_) || defined(_TARGET_ARM_)
+ // Important: This code relies on the way JIT lays out frames. Keep it in sync
+ // with code:Compiler.lvaAssignFrameOffsets.
+ //
+ // | ... |
+ // +--------------------+
+ // | lvaStubArgumentVar | <= filled with EAX in prolog |
+ // +--------------------+ |
+ // | | |
+ // | InlinedCallFrame | |
+ // | | <= m_pCrawl.pFrame | to lower addresses
+ // +--------------------+ V
+ // | ... |
+ //
+ // Extract the actual MethodDesc to report from the InlinedCallFrame.
+ TADDR addr = dac_cast<TADDR>(this) + sizeof(InlinedCallFrame);
+ return PTR_MethodDesc(*PTR_TADDR(addr));
+#elif defined(_WIN64)
+ // On 64bit, the actual interop MethodDesc is saved off in a field off the InlinedCrawlFrame
+ // which is populated by the JIT. Refer to JIT_InitPInvokeFrame for details.
+ return PTR_MethodDesc(m_StubSecretArg);
+#else
+ _ASSERTE(!"NYI - Interop method reporting for this architecture!");
+ return NULL;
+#endif // defined(_TARGET_X86_) || defined(_TARGET_ARM_)
+ }
+
+ virtual void UpdateRegDisplay(const PREGDISPLAY);
+
+ // m_Datum contains MethodDesc ptr or
+ // - on AMD64: CALLI target address (if lowest bit is set)
+ // - on X86: argument stack size (if value is <64k)
+ // See code:HasFunction.
+ PTR_NDirectMethodDesc m_Datum;
+
+#ifdef _WIN64
+ // IL stubs fill this field with the incoming secret argument when they erect
+ // InlinedCallFrame so we know which interop method was invoked even if the frame
+ // is not active at the moment.
+ PTR_VOID m_StubSecretArg;
+#endif // _WIN64
+
+protected:
+ // X86: ESP after pushing the outgoing arguments, and just before calling
+ // out to unmanaged code.
+ // Other platforms: the field stays set throughout the declaring method.
+ PTR_VOID m_pCallSiteSP;
+
+ // EIP where the unmanaged call will return to. This will be a pointer into
+ // the code of the managed frame which has the InlinedCallFrame
+ // This is set to NULL in the method prolog. It gets set just before the
+ // call to the target and reset back to NULL after the stop-for-GC check
+ // following the call.
+ TADDR m_pCallerReturnAddress;
+
+ // This is used only for EBP. Hence, a stackwalk will miss the other
+ // callee-saved registers for the method with the InlinedCallFrame.
+ // To prevent GC-holes, we do not keep any GC references in callee-saved
+ // registers across an NDirect call.
+ TADDR m_pCalleeSavedFP;
+
+public:
+ //---------------------------------------------------------------
+ // Expose key offsets and values for stub generation.
+ //---------------------------------------------------------------
+
+ static void GetEEInfo(CORINFO_EE_INFO::InlinedCallFrameInfo * pEEInfo);
+
+ // Is the specified frame an InlinedCallFrame which has an active call
+ // inside it right now?
+ static BOOL FrameHasActiveCall(Frame *pFrame)
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return pFrame &&
+ pFrame != FRAME_TOP &&
+ InlinedCallFrame::GetMethodFrameVPtr() == pFrame->GetVTablePtr() &&
+ dac_cast<TADDR>(dac_cast<PTR_InlinedCallFrame>(pFrame)->m_pCallerReturnAddress) != NULL;
+ }
+
+ // Marks the frame as inactive.
+ void Reset()
+ {
+ m_pCallerReturnAddress = NULL;
+ }
+
+ int GetFrameType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TYPE_EXIT;
+ }
+
+ virtual BOOL IsTransitionToNativeFrame()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return TRUE;
+ }
+
+ PTR_VOID GetCallSiteSP()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pCallSiteSP;
+ }
+
+ TADDR GetCalleeSavedFP()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pCalleeSavedFP;
+ }
+
+ // Set the vptr and GSCookie
+ VOID Init();
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(InlinedCallFrame)
+};
+
+//------------------------------------------------------------------------
+// This frame is used to mark a Context/AppDomain Transition
+//------------------------------------------------------------------------
+
+class ContextTransitionFrame : public Frame
+{
+private:
+ PTR_Context m_pReturnContext;
+ PTR_Object m_ReturnExecutionContext;
+ PTR_Object m_LastThrownObjectInParentContext;
+ ULONG_PTR m_LockCount; // Number of locks the thread takes
+ // before the transition.
+ ULONG_PTR m_CriticalRegionCount;
+
+ VPTR_VTABLE_CLASS(ContextTransitionFrame, Frame)
+
+public:
+ virtual void GcScanRoots(promote_func *fn, ScanContext* sc);
+
+ virtual PTR_Context* GetReturnContextAddr()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return &m_pReturnContext;
+ }
+
+ virtual Object **GetReturnExecutionContextAddr()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (Object **) &m_ReturnExecutionContext;
+ }
+
+ OBJECTREF GetLastThrownObjectInParentContext()
+ {
+ return ObjectToOBJECTREF(m_LastThrownObjectInParentContext);
+ }
+
+ void SetLastThrownObjectInParentContext(OBJECTREF lastThrownObject)
+ {
+ m_LastThrownObjectInParentContext = OBJECTREFToObject(lastThrownObject);
+ }
+
+ void SetLockCount(DWORD lockCount, DWORD criticalRegionCount)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_LockCount = lockCount;
+ m_CriticalRegionCount = criticalRegionCount;
+ }
+ void GetLockCount(DWORD* pLockCount, DWORD* pCriticalRegionCount)
+ {
+ LIMITED_METHOD_CONTRACT;
+ *pLockCount = (DWORD) m_LockCount;
+ *pCriticalRegionCount = (DWORD) m_CriticalRegionCount;
+ }
+
+ // Let debugger know that we're transitioning between AppDomains.
+ ETransitionType GetTransitionType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TT_AppDomain;
+ }
+
+#ifndef DACCESS_COMPILE
+ ContextTransitionFrame()
+ : m_pReturnContext(NULL)
+ , m_ReturnExecutionContext(NULL)
+ , m_LastThrownObjectInParentContext(NULL)
+ , m_LockCount(0)
+ , m_CriticalRegionCount(0)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+#endif
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER(ContextTransitionFrame)
+};
+
+// TODO [DAVBR]: For the full fix for VsWhidbey 450273, this
+// may be uncommented once isLegalManagedCodeCaller works properly
+// with non-return address inputs, and with non-DEBUG builds
+//bool isLegalManagedCodeCaller(TADDR retAddr);
+bool isRetAddr(TADDR retAddr, TADDR* whereCalled);
+
+//------------------------------------------------------------------------
+#ifdef _TARGET_X86_
+// This frame is used as padding for virtual stub dispatch tailcalls.
+// When A calls B via virtual stub dispatch, the stub dispatch stub resolves
+// the target code for B and jumps to it. If A wants to do a tail call,
+// it does not get a chance to unwind its frame since the virtual stub dispatch
+// stub is not set up to return the address of the target code (rather
+// than just jumping to it).
+// To do a tail call, A calls JIT_TailCall, which unwinds A's frame
+// and sets up a TailCallFrame. It then calls the stub dispatch stub
+// which disassembles the caller (JIT_TailCall, in this case) to get some information,
+// resolves the target code for B, and then jumps to B.
+// If B also does a virtual stub dispatch tail call, then we reuse the
+// existing TailCallFrame instead of setting up a second one.
+//
+// We could eliminate TailCallFrame if we factor the VSD stub to return
+// the target code address. This is currently not a very important scenario
+// as tail calls on interface calls are uncommon.
+#else
+// This frame is used as padding for tailcalls which require more space
+// than the caller has in it's incoming argument space.
+// To do a tail call from A to B, A calls JIT_TailCall, which unwinds A's frame
+// and sets up a TailCallFrame and the arguments. It then jumps to B.
+// If B also does a tail call, then we reuse the
+// existing TailCallFrame instead of setting up a second one.
+//
+// This is also used whenever value types that aren't enregisterable are
+// passed by value instead of ref. This is currently not a very important
+// scenario as tail calls are uncommon.
+#endif
+//------------------------------------------------------------------------
+
+class TailCallFrame : public Frame
+{
+ VPTR_VTABLE_CLASS(TailCallFrame, Frame)
+
+#if defined(_TARGET_X86_)
+ TADDR m_CallerAddress; // the address the tailcall was initiated from
+ CalleeSavedRegisters m_regs; // callee saved registers - the stack walk assumes that all non-JIT frames have them
+ TADDR m_ReturnAddress; // the return address of the tailcall
+#elif defined(_TARGET_AMD64_)
+ TADDR m_pGCLayout;
+ TADDR m_padding; // code:StubLinkerCPU::CreateTailCallCopyArgsThunk expects the size of TailCallFrame to be 16-byte aligned
+ CalleeSavedRegisters m_calleeSavedRegisters;
+ TADDR m_ReturnAddress;
+#elif defined(_TARGET_ARM_)
+ union {
+ CalleeSavedRegisters m_calleeSavedRegisters;
+ // alias saved link register as m_ReturnAddress
+ struct {
+ INT32 r4, r5, r6, r7, r8, r9, r10;
+ INT32 r11;
+ TADDR m_ReturnAddress;
+ };
+ };
+#else
+ TADDR m_ReturnAddress;
+#endif
+
+public:
+#if !defined(_TARGET_X86_)
+
+#ifndef DACCESS_COMPILE
+ TailCallFrame(T_CONTEXT * pContext, Thread * pThread)
+ {
+ InitFromContext(pContext);
+ m_Next = pThread->GetFrame();
+ }
+
+ void InitFromContext(T_CONTEXT * pContext);
+
+ // Architecture-specific method to initialize a CONTEXT record as if the first
+ // part of the TailCallHelperStub had executed
+ static TailCallFrame * AdjustContextForTailCallHelperStub(_CONTEXT * pContext, size_t cbNewArgArea, Thread * pThread);
+#endif
+
+ static TailCallFrame * GetFrameFromContext(CONTEXT * pContext);
+#endif // !_TARGET_X86_
+
+#if defined(_TARGET_X86_)
+ static TailCallFrame* FindTailCallFrame(Frame* pFrame)
+ {
+ LIMITED_METHOD_CONTRACT;
+ // loop through the frame chain
+ while (pFrame->GetVTablePtr() != TailCallFrame::GetMethodFrameVPtr())
+ pFrame = pFrame->m_Next;
+ return (TailCallFrame*)pFrame;
+ }
+
+ TADDR GetCallerAddress()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_CallerAddress;
+ }
+#endif // _TARGET_X86_
+
+ virtual TADDR GetReturnAddressPtr()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return PTR_HOST_MEMBER_TADDR(TailCallFrame, this,
+ m_ReturnAddress);
+ }
+
+ virtual BOOL NeedsUpdateRegDisplay()
+ {
+ return TRUE;
+ }
+
+ virtual void UpdateRegDisplay(const PREGDISPLAY pRD);
+
+#ifdef _TARGET_AMD64_
+ void SetGCLayout(TADDR pGCLayout)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pGCLayout = pGCLayout;
+ }
+
+ virtual void GcScanRoots(promote_func *fn, ScanContext* sc);
+#else
+ void SetGCLayout(TADDR pGCLayout)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(pGCLayout == NULL);
+ }
+#endif
+
+private:
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(TailCallFrame)
+};
+
+//------------------------------------------------------------------------
+// ExceptionFilterFrame is a small frame whose only purpose in
+// life is to set SHADOW_SP_FILTER_DONE during unwind from exception filter.
+//------------------------------------------------------------------------
+
+class ExceptionFilterFrame : public Frame
+{
+ VPTR_VTABLE_CLASS(ExceptionFilterFrame, Frame)
+ size_t* m_pShadowSP;
+
+public:
+#ifndef DACCESS_COMPILE
+ ExceptionFilterFrame(size_t* pShadowSP)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_pShadowSP = pShadowSP;
+ Push();
+ }
+
+ void Pop()
+ {
+ // Nothing to do here.
+ WRAPPER_NO_CONTRACT;
+ SetFilterDone();
+ Frame::Pop();
+ }
+
+ void SetFilterDone()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // Mark the filter as having completed
+ if (m_pShadowSP)
+ {
+ // Make sure that CallJitEHFilterHelper marked us as being in the filter.
+ _ASSERTE(*m_pShadowSP & ICodeManager::SHADOW_SP_IN_FILTER);
+ *m_pShadowSP |= ICodeManager::SHADOW_SP_FILTER_DONE;
+ }
+ }
+#endif
+
+private:
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(ExceptionFilterFrame)
+};
+
+#ifdef _DEBUG
+// We use IsProtectedByGCFrame to check if some OBJECTREF pointers are protected
+// against GC. That function doesn't know if a byref is from managed stack thus
+// protected by JIT. AssumeByrefFromJITStack is used to bypass that check if an
+// OBJECTRef pointer is passed from managed code to an FCall and it's in stack.
+class AssumeByrefFromJITStack : public Frame
+{
+ VPTR_VTABLE_CLASS(AssumeByrefFromJITStack, Frame)
+public:
+#ifndef DACCESS_COMPILE
+ AssumeByrefFromJITStack(OBJECTREF *pObjRef)
+ {
+ m_pObjRef = pObjRef;
+ }
+#endif
+
+ BOOL Protects(OBJECTREF *ppORef)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ppORef == m_pObjRef;
+ }
+
+private:
+ OBJECTREF *m_pObjRef;
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER(AssumeByrefFromJITStack)
+}; //AssumeByrefFromJITStack
+
+#endif //_DEBUG
+
+//-----------------------------------------------------------------------------
+// FrameWithCookie is used to declare a Frame in source code with a cookie
+// immediately preceeding it.
+// This is just a specialized version of GSCookieFor<T>
+//
+// For Frames that are set up by stubs, the stub is responsible for setting up
+// the GSCookie.
+//
+// Note that we have to play all these games for the GSCookie as the GSCookie
+// needs to precede the vtable pointer, so that the GSCookie is guaranteed to
+// catch any stack-buffer-overrun corruptions that overwrite the Frame data.
+//
+//-----------------------------------------------------------------------------
+
+class DebuggerEval;
+
+class GCSafeCollection;
+
+template <typename FrameType>
+class FrameWithCookie
+{
+protected:
+
+ GSCookie m_gsCookie;
+ FrameType m_frame;
+
+public:
+
+ //
+ // Overload all the required constructors
+ //
+
+ FrameWithCookie() :
+ m_gsCookie(GetProcessGSCookie()), m_frame() { WRAPPER_NO_CONTRACT; }
+
+ FrameWithCookie(Thread * pThread) :
+ m_gsCookie(GetProcessGSCookie()), m_frame(pThread) { WRAPPER_NO_CONTRACT; }
+
+ FrameWithCookie(T_CONTEXT * pContext) :
+ m_gsCookie(GetProcessGSCookie()), m_frame(pContext) { WRAPPER_NO_CONTRACT; }
+
+ FrameWithCookie(TransitionBlock * pTransitionBlock) :
+ m_gsCookie(GetProcessGSCookie()), m_frame(pTransitionBlock) { WRAPPER_NO_CONTRACT; }
+
+ FrameWithCookie(TransitionBlock * pTransitionBlock, MethodDesc * pMD) :
+ m_gsCookie(GetProcessGSCookie()), m_frame(pTransitionBlock, pMD) { WRAPPER_NO_CONTRACT; }
+
+ FrameWithCookie(TransitionBlock * pTransitionBlock, VASigCookie * pVASigCookie, PCODE pUnmanagedTarget) :
+ m_gsCookie(GetProcessGSCookie()), m_frame(pTransitionBlock, pVASigCookie, pUnmanagedTarget) { WRAPPER_NO_CONTRACT; }
+
+ FrameWithCookie(TransitionBlock * pTransitionBlock, int frameFlags) :
+ m_gsCookie(GetProcessGSCookie()), m_frame(pTransitionBlock, frameFlags) { WRAPPER_NO_CONTRACT; }
+
+
+ // GCFrame
+ FrameWithCookie(Thread * pThread, OBJECTREF *pObjRefs, UINT numObjRefs, BOOL maybeInterior) :
+ m_gsCookie(GetProcessGSCookie()), m_frame(pThread, pObjRefs, numObjRefs, maybeInterior) { WRAPPER_NO_CONTRACT; }
+
+ FrameWithCookie(OBJECTREF *pObjRefs, UINT numObjRefs, BOOL maybeInterior) :
+ m_gsCookie(GetProcessGSCookie()), m_frame(pObjRefs, numObjRefs, maybeInterior) { WRAPPER_NO_CONTRACT; }
+
+ // GCSafeCollectionFrame
+ FrameWithCookie(GCSafeCollection *gcSafeCollection) :
+ m_gsCookie(GetProcessGSCookie()), m_frame(gcSafeCollection) { WRAPPER_NO_CONTRACT; }
+
+#ifdef FEATURE_INTERPRETER
+ // InterpreterFrame
+ FrameWithCookie(Interpreter* interp) :
+ m_gsCookie(GetProcessGSCookie()), m_frame(interp) { WRAPPER_NO_CONTRACT; }
+#endif
+
+ // HijackFrame
+ FrameWithCookie(LPVOID returnAddress, Thread *thread, HijackArgs *args) :
+ m_gsCookie(GetProcessGSCookie()), m_frame(returnAddress, thread, args) { WRAPPER_NO_CONTRACT; }
+
+#ifdef DEBUGGING_SUPPORTED
+ // FuncEvalFrame
+ FrameWithCookie(DebuggerEval *pDebuggerEval, TADDR returnAddress, BOOL showFrame) :
+ m_gsCookie(GetProcessGSCookie()), m_frame(pDebuggerEval, returnAddress, showFrame) { WRAPPER_NO_CONTRACT; }
+#endif // DEBUGGING_SUPPORTED
+
+ // TailCallFrame
+ FrameWithCookie(T_CONTEXT * pContext, Thread *thread) :
+ m_gsCookie(GetProcessGSCookie()), m_frame(pContext, thread) { WRAPPER_NO_CONTRACT; }
+
+#ifndef DACCESS_COMPILE
+ // GSCookie for HelperMethodFrames is initialized in a common HelperMethodFrame init method
+
+ // HelperMethodFrame
+ FORCEINLINE FrameWithCookie(void* fCallFtnEntry, unsigned attribs = 0) :
+ m_frame(fCallFtnEntry, attribs) { WRAPPER_NO_CONTRACT; }
+
+ // HelperMethodFrame_1OBJ
+ FORCEINLINE FrameWithCookie(void* fCallFtnEntry, unsigned attribs, OBJECTREF * aGCPtr1) :
+ m_frame(fCallFtnEntry, attribs, aGCPtr1) { WRAPPER_NO_CONTRACT; }
+
+ // HelperMethodFrame_2OBJ
+ FORCEINLINE FrameWithCookie(void* fCallFtnEntry, unsigned attribs, OBJECTREF * aGCPtr1, OBJECTREF * aGCPtr2) :
+ m_frame(fCallFtnEntry, attribs, aGCPtr1, aGCPtr2) { WRAPPER_NO_CONTRACT; }
+
+ // HelperMethodFrame_PROTECTOBJ
+ FORCEINLINE FrameWithCookie(void* fCallFtnEntry, unsigned attribs, OBJECTREF* pObjRefs, int numObjRefs) :
+ m_frame(fCallFtnEntry, attribs, pObjRefs, numObjRefs) { WRAPPER_NO_CONTRACT; }
+
+#endif // DACCESS_COMPILE
+
+ // ProtectByRefsFrame
+ FrameWithCookie(Thread * pThread, ByRefInfo * pByRefs) :
+ m_gsCookie(GetProcessGSCookie()), m_frame(pThread, pByRefs) { WRAPPER_NO_CONTRACT; }
+
+ // ProtectValueClassFrame
+ FrameWithCookie(Thread * pThread, ValueClassInfo * pValueClasses) :
+ m_gsCookie(GetProcessGSCookie()), m_frame(pThread, pValueClasses) { WRAPPER_NO_CONTRACT; }
+
+ // ExceptionFilterFrame
+ FrameWithCookie(size_t* pShadowSP) :
+ m_gsCookie(GetProcessGSCookie()), m_frame(pShadowSP) { WRAPPER_NO_CONTRACT; }
+
+#ifdef _DEBUG
+ // AssumeByrefFromJITStack
+ FrameWithCookie(OBJECTREF *pObjRef) :
+ m_gsCookie(GetProcessGSCookie()), m_frame(pObjRef) { WRAPPER_NO_CONTRACT; }
+
+ void SetAddrOfHaveCheckedRestoreState(BOOL* pDoneCheck)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_frame.SetAddrOfHaveCheckedRestoreState(pDoneCheck);
+ }
+
+#endif //_DEBUG
+
+ //
+ // Overload some common Frame methods for easy redirection
+ //
+
+ void Push() { WRAPPER_NO_CONTRACT; m_frame.Push(); }
+ void Pop() { WRAPPER_NO_CONTRACT; m_frame.Pop(); }
+ void Push(Thread * pThread) { WRAPPER_NO_CONTRACT; m_frame.Push(pThread); }
+ void Pop(Thread * pThread) { WRAPPER_NO_CONTRACT; m_frame.Pop(pThread); }
+ PCODE GetReturnAddress() { WRAPPER_NO_CONTRACT; return m_frame.GetReturnAddress(); }
+ T_CONTEXT * GetContext() { WRAPPER_NO_CONTRACT; return m_frame.GetContext(); }
+ FrameType* operator&() { LIMITED_METHOD_CONTRACT; return &m_frame; }
+ LazyMachState * MachineState() { WRAPPER_NO_CONTRACT; return m_frame.MachineState(); }
+ Thread * GetThread() { WRAPPER_NO_CONTRACT; return m_frame.GetThread(); }
+ BOOL InsureInit(bool initialInit, struct MachState* unwindState)
+ { WRAPPER_NO_CONTRACT; return m_frame.InsureInit(initialInit, unwindState); }
+ void Poll() { WRAPPER_NO_CONTRACT; m_frame.Poll(); }
+ void SetStackPointerPtr(TADDR sp) { WRAPPER_NO_CONTRACT; m_frame.SetStackPointerPtr(sp); }
+ void InitAndLink(T_CONTEXT *pContext) { WRAPPER_NO_CONTRACT; m_frame.InitAndLink(pContext); }
+ void Init(Thread *pThread, OBJECTREF *pObjRefs, UINT numObjRefs, BOOL maybeInterior)
+ { WRAPPER_NO_CONTRACT; m_frame.Init(pThread, pObjRefs, numObjRefs, maybeInterior); }
+ ValueClassInfo ** GetValueClassInfoList() { WRAPPER_NO_CONTRACT; return m_frame.GetValueClassInfoList(); }
+
+#if 0
+ //
+ // Access to the underlying Frame
+ // You should only need to use this if none of the above overloads work for you
+ // Consider adding the required overload to the list above
+ //
+
+ FrameType& operator->() { LIMITED_METHOD_CONTRACT; return m_frame; }
+#endif
+
+ // Since the "&" operator is overloaded, use this function to get to the
+ // address of FrameWithCookie, rather than that of FrameWithCookie::m_frame.
+ GSCookie * GetGSCookiePtr() { LIMITED_METHOD_CONTRACT; return &m_gsCookie; }
+};
+
+
+// The frame doesn't represent a transition of any sort, it's simply placed on the stack to represent an assembly that will be found
+// and checked by stackwalking security demands. This can be used in scenarios where an assembly is implicitly controlling a
+// security sensitive operation without being explicitly represented on the stack. For example, an assembly decorating one of its
+// classes or methods with a custom attribute can implicitly cause the ctor or property setters for that attribute to be executed by
+// a third party if they happen to browse the attributes on the assembly.
+// Note: This frame is pushed from managed code, so be sure to keep the layout synchronized with that in
+// bcl\system\reflection\customattribute.cs.
+class SecurityContextFrame : public Frame
+{
+ VPTR_VTABLE_CLASS(SecurityContextFrame, Frame)
+
+ Assembly *m_pAssembly;
+
+public:
+ virtual Assembly *GetAssembly() { LIMITED_METHOD_CONTRACT; return m_pAssembly; }
+
+ void SetAssembly(Assembly *pAssembly) { LIMITED_METHOD_CONTRACT; m_pAssembly = pAssembly; }
+
+ // Keep as last entry in class
+ DEFINE_VTABLE_GETTER_AND_CTOR(SecurityContextFrame)
+};
+
+
+// This holder is defined for addressing a very specific issue:
+// Frames that use GCPROTECT_BEGIN/GCPROTECT_END can end up referencing corrupted object refs
+// when an exception is thrown until the point where the Frame is actually popped from the thread's Frame-chain.
+// Stack space allocated for OBJECTREFs in a try block may be reused in the catch block by other structures,
+// corrupting our protected OBJECTREFs and the Frame containing them. While the Frame is still on the callstack
+// a GC may occur, detecting the corrupt OBJECTREF and taking down the process. The FrameWithCookieHolder
+// forces the Frame to be popped out when exiting the current scope, therefore before the OBJECTREF is corrupted.
+//
+// This holder explicitly calls Thread::SetFrame, therefore potentially removing Frames from the thread's frame
+// chain without properly calling their corresponding ExceptionUnwind() method. This is extremely dangerous to
+// use unless it is backed by a call to UnwindAndContinueRethrowHelperInsideCatch() which does the same thing
+// (and has been vetted to be correct in doing so). Using this holder in any other circumstances may lead to bugs that
+// are extremely difficult to track down.
+template <typename TYPE>
+class FrameWithCookieHolder
+{
+ protected:
+ FrameWithCookie<TYPE> m_frame;
+
+ public:
+ FORCEINLINE FrameWithCookieHolder()
+ : m_frame()
+ {
+ }
+
+ // GCFrame
+ FORCEINLINE FrameWithCookieHolder(OBJECTREF *pObjRefs, UINT numObjRefs, BOOL maybeInterior)
+ : m_frame(pObjRefs, numObjRefs, maybeInterior)
+ {
+ }
+
+ FORCEINLINE ~FrameWithCookieHolder()
+ {
+#ifndef DACCESS_COMPILE
+
+ Thread* pThread = GetThread();
+ if (pThread)
+ {
+ GCX_COOP();
+ pThread->SetFrame(&m_frame);
+ m_frame.Pop();
+ }
+
+#endif // #ifndef DACCESS_COMPILE
+ }
+
+};
+
+#ifndef DACCESS_COMPILE
+// Restrictions from FrameWithCookieHolder are also applying for GCPROTECT_HOLDER.
+// Please read the FrameWithCookieHolder comments before using GCPROTECT_HOLDER.
+#define GCPROTECT_HOLDER(ObjRefStruct) \
+ FrameWithCookieHolder<GCFrame> __gcframe((OBJECTREF*)&(ObjRefStruct), \
+ sizeof(ObjRefStruct)/sizeof(OBJECTREF), \
+ FALSE);
+
+#else // #ifndef DACCESS_COMPILE
+
+#define GCPROTECT_HOLDER(ObjRefStruct)
+
+#endif // #ifndef DACCESS_COMPILE
+
+
+//------------------------------------------------------------------------
+// These macros GC-protect OBJECTREF pointers on the EE's behalf.
+// In between these macros, the GC can move but not discard the protected
+// objects. If the GC moves an object, it will update the guarded OBJECTREF's.
+// Typical usage:
+//
+// OBJECTREF or = <some valid objectref>;
+// GCPROTECT_BEGIN(or);
+//
+// ...<do work that can trigger GC>...
+//
+// GCPROTECT_END();
+//
+//
+// These macros can also protect multiple OBJECTREF's if they're packaged
+// into a structure:
+//
+// struct xx {
+// OBJECTREF o1;
+// OBJECTREF o2;
+// } gc;
+//
+// GCPROTECT_BEGIN(gc);
+// ....
+// GCPROTECT_END();
+//
+//
+// Notes:
+//
+// - GCPROTECT_BEGININTERIOR() can be used in place of GCPROTECT_BEGIN()
+// to handle the case where one or more of the OBJECTREFs is potentially
+// an interior pointer. This is a rare situation, because boxing would
+// normally prevent us from encountering it. Be aware that the OBJECTREFs
+// we protect are not validated in this situation.
+//
+// - GCPROTECT_ARRAY_BEGIN() can be used when an array of object references
+// is allocated on the stack. The pointer to the first element is passed
+// along with the number of elements in the array.
+//
+// - The argument to GCPROTECT_BEGIN should be an lvalue because it
+// uses "sizeof" to count the OBJECTREF's.
+//
+// - GCPROTECT_BEGIN spiritually violates our normal convention of not passing
+// non-const refernce arguments. Unfortunately, this is necessary in
+// order for the sizeof thing to work.
+//
+// - GCPROTECT_BEGIN does _not_ zero out the OBJECTREF's. You must have
+// valid OBJECTREF's when you invoke this macro.
+//
+// - GCPROTECT_BEGIN begins a new C nesting block. Besides allowing
+// GCPROTECT_BEGIN's to nest, it also has the advantage of causing
+// a compiler error if you forget to code a maching GCPROTECT_END.
+//
+// - If you are GCPROTECTing something, it means you are expecting a GC to occur.
+// So we assert that GC is not forbidden. If you hit this assert, you probably need
+// a HELPER_METHOD_FRAME to protect the region that can cause the GC.
+//------------------------------------------------------------------------
+
+#ifndef DACCESS_COMPILE
+
+#ifdef _PREFAST_
+// Suppress prefast warning #6384: Dividing sizeof a pointer by another value
+#pragma warning(disable:6384)
+#endif /*_PREFAST_ */
+
+#define GCPROTECT_BEGIN(ObjRefStruct) do { \
+ FrameWithCookie<GCFrame> __gcframe( \
+ (OBJECTREF*)&(ObjRefStruct), \
+ sizeof(ObjRefStruct)/sizeof(OBJECTREF), \
+ FALSE); \
+ /* work around unreachable code warning */ \
+ if (true) { DEBUG_ASSURE_NO_RETURN_BEGIN(GCPROTECT)
+
+#define GCPROTECT_BEGIN_THREAD(pThread, ObjRefStruct) do { \
+ FrameWithCookie<GCFrame> __gcframe( \
+ pThread, \
+ (OBJECTREF*)&(ObjRefStruct), \
+ sizeof(ObjRefStruct)/sizeof(OBJECTREF), \
+ FALSE); \
+ /* work around unreachable code warning */ \
+ if (true) { DEBUG_ASSURE_NO_RETURN_BEGIN(GCPROTECT)
+
+#define GCPROTECT_ARRAY_BEGIN(ObjRefArray,cnt) do { \
+ FrameWithCookie<GCFrame> __gcframe( \
+ (OBJECTREF*)&(ObjRefArray), \
+ cnt * sizeof(ObjRefArray) / sizeof(OBJECTREF), \
+ FALSE); \
+ /* work around unreachable code warning */ \
+ if (true) { DEBUG_ASSURE_NO_RETURN_BEGIN(GCPROTECT)
+
+#define GCPROTECT_BEGININTERIOR(ObjRefStruct) do { \
+ FrameWithCookie<GCFrame> __gcframe( \
+ (OBJECTREF*)&(ObjRefStruct), \
+ sizeof(ObjRefStruct)/sizeof(OBJECTREF), \
+ TRUE); \
+ /* work around unreachable code warning */ \
+ if (true) { DEBUG_ASSURE_NO_RETURN_BEGIN(GCPROTECT)
+
+#define GCPROTECT_BEGININTERIOR_ARRAY(ObjRefArray,cnt) do { \
+ FrameWithCookie<GCFrame> __gcframe( \
+ (OBJECTREF*)&(ObjRefArray), \
+ cnt, \
+ TRUE); \
+ /* work around unreachable code warning */ \
+ if (true) { DEBUG_ASSURE_NO_RETURN_BEGIN(GCPROTECT)
+
+
+#define GCPROTECT_END() \
+ DEBUG_ASSURE_NO_RETURN_END(GCPROTECT) } \
+ __gcframe.Pop(); } while(0)
+
+
+#else // #ifndef DACCESS_COMPILE
+
+#define GCPROTECT_BEGIN(ObjRefStruct)
+#define GCPROTECT_ARRAY_BEGIN(ObjRefArray,cnt)
+#define GCPROTECT_BEGININTERIOR(ObjRefStruct)
+#define GCPROTECT_END()
+
+#endif // #ifndef DACCESS_COMPILE
+
+
+#define ASSERT_ADDRESS_IN_STACK(address) _ASSERTE (GetThread () && GetThread ()->IsAddressInStack (address));
+
+#if defined (_DEBUG) && !defined (DACCESS_COMPILE)
+#define ASSUME_BYREF_FROM_JIT_STACK_BEGIN(__objRef) \
+ /* make sure we are only called inside an FCall */ \
+ if (__me == 0) {}; \
+ /* make sure the address is in stack. If the address is an interior */ \
+ /*pointer points to GC heap, the FCall still needs to protect it explicitly */ \
+ ASSERT_ADDRESS_IN_STACK (__objRef); \
+ do { \
+ FrameWithCookie<AssumeByrefFromJITStack> __dummyAssumeByrefFromJITStack ((__objRef)); \
+ __dummyAssumeByrefFromJITStack.Push (); \
+ /* work around unreachable code warning */ \
+ if (true) { DEBUG_ASSURE_NO_RETURN_BEGIN(GC_PROTECT)
+
+#define ASSUME_BYREF_FROM_JIT_STACK_END() \
+ DEBUG_ASSURE_NO_RETURN_END(GC_PROTECT) } \
+ __dummyAssumeByrefFromJITStack.Pop(); } while(0)
+#else //defined (_DEBUG) && !defined (DACCESS_COMPILE)
+#define ASSUME_BYREF_FROM_JIT_STACK_BEGIN(__objRef)
+#define ASSUME_BYREF_FROM_JIT_STACK_END()
+#endif //defined (_DEBUG) && !defined (DACCESS_COMPILE)
+
+//------------------------------------------------------------------------
+
+#if defined(FRAMES_TURNED_FPO_ON)
+#pragma optimize("", on) // Go back to command line default optimizations
+#undef FRAMES_TURNED_FPO_ON
+#undef FPO_ON
+#endif
+
+#endif //__frames_h__
diff --git a/src/vm/frameworkexceptionloader.cpp b/src/vm/frameworkexceptionloader.cpp
new file mode 100644
index 0000000000..d181907da2
--- /dev/null
+++ b/src/vm/frameworkexceptionloader.cpp
@@ -0,0 +1,104 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+// Just the subset of functionality from the MscorlibBinder necessary for exceptions.
+
+#include "common.h"
+#include "frameworkexceptionloader.h"
+
+
+struct ExceptionLocationData
+{
+ LPCUTF8 Namespace;
+ LPCUTF8 Name;
+ LPCUTF8 AssemblySimpleName;
+ LPCUTF8 PublicKeyToken;
+};
+
+static const
+ExceptionLocationData g_ExceptionLocationData[] = {
+#define DEFINE_EXCEPTION(ns, reKind, bHRformessage, ...)
+#define DEFINE_EXCEPTION_HR_WINRT_ONLY(ns, reKind, ...)
+#define DEFINE_EXCEPTION_IN_OTHER_FX_ASSEMBLY(ns, reKind, assemblySimpleName, publicKeyToken, bHRformessage, ...) { ns, PTR_CSTR((TADDR) # reKind), assemblySimpleName, publicKeyToken },
+#include "rexcep.h"
+ {NULL, NULL, NULL, NULL} // On Silverlight, this table may be empty. This dummy entry allows us to compile.
+};
+
+
+// Note that some assemblies, like System.Runtime.WindowsRuntime, might not be installed on pre-Windows 8 machines.
+// This may return null.
+MethodTable* FrameworkExceptionLoader::GetException(RuntimeExceptionKind kind)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+
+ PRECONDITION(kind > kLastExceptionInMscorlib);
+ PRECONDITION(kind - (kLastExceptionInMscorlib + 1) < COUNTOF(g_ExceptionLocationData) - 1);
+ }
+ CONTRACTL_END;
+
+ // This is for loading rarely-used exception objects in arbitrary appdomains.
+ // The loader should do caching - let's not create a multi-appdomain cache of these exception types here.
+ // Note that some assemblies, like System.Runtime.WindowsRuntime, might not be installed on pre-Windows 8 machines.
+ int index = kind - (kLastExceptionInMscorlib + 1);
+ ExceptionLocationData exData = g_ExceptionLocationData[index];
+ _ASSERTE(exData.Name != NULL && exData.AssemblySimpleName != NULL && exData.PublicKeyToken != NULL); // Was the exception defined in mscorlib instead?
+ StackSString assemblyQualifiedName;
+ _ASSERTE(exData.Namespace != NULL); // If we need to support stuff in a global namespace, fix this.
+ assemblyQualifiedName.SetUTF8(exData.Namespace);
+ assemblyQualifiedName.AppendUTF8(".");
+ assemblyQualifiedName.AppendUTF8(exData.Name);
+ assemblyQualifiedName.AppendUTF8(", ");
+ assemblyQualifiedName.AppendUTF8(exData.AssemblySimpleName);
+ assemblyQualifiedName.AppendUTF8(", PublicKeyToken=");
+ assemblyQualifiedName.AppendUTF8(exData.PublicKeyToken);
+ assemblyQualifiedName.AppendUTF8(", Version=");
+ assemblyQualifiedName.AppendUTF8(VER_ASSEMBLYVERSION_STR);
+ assemblyQualifiedName.AppendUTF8(", Culture=neutral");
+
+ MethodTable* pMT = NULL;
+ // Loading will either succeed or throw a FileLoadException. Catch & swallow that exception.
+ EX_TRY
+ {
+ pMT = TypeName::GetTypeFromAsmQualifiedName(assemblyQualifiedName.GetUnicode(), FALSE).GetMethodTable();
+
+ // Since this type is from another assembly, we must ensure that assembly has been sufficiently loaded.
+ pMT->EnsureActive();
+ }
+ EX_CATCH
+ {
+ Exception *ex = GET_EXCEPTION();
+
+ // Let non-file-not-found execeptions propagate
+ if (EEFileLoadException::GetFileLoadKind(ex->GetHR()) != kFileNotFoundException)
+ EX_RETHROW;
+
+ // Return COMException if we can't load the assembly we expect.
+ pMT = MscorlibBinder::GetException(kCOMException);
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ return pMT;
+}
+
+void FrameworkExceptionLoader::GetExceptionName(RuntimeExceptionKind kind, SString & exceptionName)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+
+ PRECONDITION(kind > kLastExceptionInMscorlib);
+ PRECONDITION(kind - (kLastExceptionInMscorlib + 1) < COUNTOF(g_ExceptionLocationData) - 1);
+ } CONTRACTL_END;
+
+ exceptionName.SetUTF8(g_ExceptionLocationData[kind].Namespace);
+ exceptionName.AppendUTF8(".");
+ exceptionName.AppendUTF8(g_ExceptionLocationData[kind].Name);
+}
diff --git a/src/vm/frameworkexceptionloader.h b/src/vm/frameworkexceptionloader.h
new file mode 100644
index 0000000000..cbb5ea33ed
--- /dev/null
+++ b/src/vm/frameworkexceptionloader.h
@@ -0,0 +1,27 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+// Just the subset of functionality from the MscorlibBinder necessary for exceptions.
+#ifndef _FRAMEWORKEXCEPTIONLOADER_H_
+#define _FRAMEWORKEXCEPTIONLOADER_H_
+
+#include "runtimeexceptionkind.h"
+
+class MethodTable;
+
+// For loading exception types that are not defined in mscorlib.dll
+class FrameworkExceptionLoader
+{
+ public:
+ //
+ // Utilities for exceptions
+ //
+
+ static MethodTable *GetException(RuntimeExceptionKind kind);
+
+ static void GetExceptionName(RuntimeExceptionKind kind, SString & exceptionName);
+};
+
+#endif // _FRAMEWORKEXCEPTIONLOADER_H_
diff --git a/src/vm/fusionbind.cpp b/src/vm/fusionbind.cpp
new file mode 100644
index 0000000000..7dd5f5b6eb
--- /dev/null
+++ b/src/vm/fusionbind.cpp
@@ -0,0 +1,662 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: FusionBind.cpp
+**
+** Purpose: Implements fusion interface
+**
+**
+
+
+===========================================================*/
+
+#include "common.h"
+
+#include <stdlib.h>
+#include "fusionbind.h"
+#include "shimload.h"
+#include "eventtrace.h"
+#include "strongnameholders.h"
+
+HRESULT BaseAssemblySpec::ParseName()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ GC_NOTRIGGER;
+ NOTHROW;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ if (!m_pAssemblyName)
+ return S_OK;
+
+ CQuickBytes ssName;
+
+ hr = ssName.ConvertUtf8_UnicodeNoThrow(m_pAssemblyName);
+
+ if (SUCCEEDED(hr))
+ {
+ NonVMComHolder<IAssemblyName> pName;
+
+ IfFailRet(CreateAssemblyNameObject(&pName, (LPCWSTR) ssName.Ptr(), CANOF_PARSE_DISPLAY_NAME, NULL));
+
+ if (m_ownedFlags & NAME_OWNED)
+ delete [] m_pAssemblyName;
+ m_pAssemblyName = NULL;
+
+ hr = Init(pName);
+ }
+
+ return hr;
+}
+
+void BaseAssemblySpec::GetFileOrDisplayName(DWORD flags, SString &result) const
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ INJECT_FAULT(ThrowOutOfMemory());
+ PRECONDITION(CheckValue(result));
+ PRECONDITION(result.IsEmpty());
+ }
+ CONTRACTL_END;
+
+ if (m_pAssemblyName != NULL) {
+ NonVMComHolder<IAssemblyName> pFusionName;
+ IfFailThrow(CreateFusionName(&pFusionName));
+
+ FusionBind::GetAssemblyNameDisplayName(pFusionName, result, flags);
+ }
+ else
+ result.Set(m_wszCodeBase);
+}
+
+HRESULT AssemblySpec::LoadAssembly(IApplicationContext* pFusionContext,
+ FusionSink *pSink,
+ IAssembly** ppIAssembly,
+ IHostAssembly** ppIHostAssembly,
+ IBindResult** ppNativeFusionAssembly,
+ BOOL fForIntrospectionOnly,
+ BOOL fSuppressSecurityChecks)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ MODE_PREEMPTIVE;
+ INJECT_FAULT(ThrowOutOfMemory());
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_FAIL;
+
+ if (!IsAfContentType_Default(m_dwFlags))
+ { // Fusion can process only Default ContentType (non-WindowsRuntime)
+ IfFailThrow(COR_E_BADIMAGEFORMAT);
+ }
+
+ NonVMComHolder<IAssembly> pIAssembly(NULL);
+ NonVMComHolder<IBindResult> pNativeFusionAssembly(NULL);
+ NonVMComHolder<IHostAssembly> pIHostAssembly(NULL);
+ NonVMComHolder<IAssemblyName> pSpecName;
+ NonVMComHolder<IAssemblyName> pCodeBaseName;
+
+
+ BOOL fFXOnly = FALSE;
+ DWORD size = sizeof(fFXOnly);
+
+ hr = pFusionContext->Get(ACTAG_FX_ONLY, &fFXOnly, &size, 0);
+ if(FAILED(hr))
+ {
+ /// just in case it corrupted fFXOnly
+ fFXOnly = FALSE;
+ }
+
+ // reset hr
+ hr = E_FAIL;
+
+ // Make sure we don't have malformed names
+
+ if (m_pAssemblyName)
+ IfFailGo(FusionBind::VerifyBindingString(m_pAssemblyName));
+
+ if (m_context.szLocale)
+ IfFailGo(FusionBind::VerifyBindingString(m_context.szLocale));
+
+ // If we have assembly name info, first bind using that
+ if (m_pAssemblyName != NULL) {
+ IfFailGo(CreateFusionName(&pSpecName, FALSE));
+
+ if(m_fParentLoadContext == LOADCTX_TYPE_UNKNOWN)
+ {
+ BOOL bOptionallyRetargetable;
+ IfFailGo(IsOptionallyRetargetableAssembly(pSpecName, &bOptionallyRetargetable));
+ if (bOptionallyRetargetable)
+ return HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND); // do not propagate to load, let the event handle
+ }
+
+ hr = FusionBind::RemoteLoad(pFusionContext, pSink,
+ pSpecName, GetParentIAssembly(), NULL,
+ &pIAssembly, &pIHostAssembly, &pNativeFusionAssembly, fForIntrospectionOnly, fSuppressSecurityChecks);
+ }
+
+
+ // Now, bind using the codebase.
+ if (FAILED(hr) && !fFXOnly && m_wszCodeBase) {
+ // No resolution by code base for SQL-hosted environment, except for introspection
+ if((!fForIntrospectionOnly) && CorHost2::IsLoadFromBlocked())
+ {
+ hr = FUSION_E_LOADFROM_BLOCKED;
+ goto ErrExit;
+ }
+ IfFailGo(CreateAssemblyNameObject(&pCodeBaseName, NULL, 0, NULL));
+
+ IfFailGo(pCodeBaseName->SetProperty(ASM_NAME_CODEBASE_URL,
+ (void*)m_wszCodeBase,
+ (DWORD)(wcslen(m_wszCodeBase) + 1) * sizeof(WCHAR)));
+
+ // Note that we cannot bind a native image using a codebase, as it will
+ // always be in the LoadFrom context which does not support native images.
+
+ pSink->Reset();
+ hr = FusionBind::RemoteLoad(pFusionContext, pSink,
+ pCodeBaseName, NULL, m_wszCodeBase,
+ &pIAssembly, &pIHostAssembly, &pNativeFusionAssembly, fForIntrospectionOnly, fSuppressSecurityChecks);
+
+ // If we had both name info and codebase, make sure they are consistent.
+ if (SUCCEEDED(hr) && m_pAssemblyName != NULL) {
+
+ NonVMComHolder<IAssemblyName> pPolicyRefName(NULL);
+ if (!fForIntrospectionOnly) {
+ // Get post-policy ref, because we'll be comparing
+ // it against a post-policy def
+ HRESULT policyHr = PreBindAssembly(pFusionContext,
+ pSpecName,
+ NULL, // pAsmParent
+ &pPolicyRefName,
+ NULL); // pvReserved
+ if (FAILED(policyHr) && (policyHr != FUSION_E_REF_DEF_MISMATCH) &&
+ (policyHr != E_INVALIDARG)) // partial ref
+ IfFailGo(policyHr);
+ }
+
+ NonVMComHolder<IAssemblyName> pBoundName;
+ if (pIAssembly == NULL)
+ IfFailGo(pIHostAssembly->GetAssemblyNameDef(&pBoundName));
+ else
+ IfFailGo(pIAssembly->GetAssemblyNameDef(&pBoundName));
+
+ // Order matters: Ref->IsEqual(Def)
+ HRESULT equalHr;
+ if (pPolicyRefName)
+ equalHr = pPolicyRefName->IsEqual(pBoundName, ASM_CMPF_DEFAULT);
+ else
+ equalHr = pSpecName->IsEqual(pBoundName, ASM_CMPF_DEFAULT);
+ if (equalHr != S_OK)
+ {
+ // post-policy name is pBoundName and it's not correct for the
+ // original name, so we need to clear it
+ ReleaseNameAfterPolicy();
+ IfFailGo(FUSION_E_REF_DEF_MISMATCH);
+ }
+ }
+ }
+
+ // We should have found an assembly by now.
+ IfFailGo(hr);
+
+ // <NOTE> Comment about the comment below. The work is done in fusion now.
+ // But we still keep the comment here to illustrate the problem. </NOTE>
+
+ // Until we can create multiple Assembly objects for a single HMODULE
+ // we can only store one IAssembly* per Assembly. It is very important
+ // to maintain the IAssembly* for an image that is in the load-context.
+ // An Assembly in the load-from-context can bind to an assembly in the
+ // load-context but not visa-versa. Therefore, if we every get an IAssembly
+ // from the load-from-context we must make sure that it will never be
+ // found using a load. If it did then we could end up with Assembly dependencies
+ // that are wrong. For example, if I do a LoadFrom() on an assembly in the GAC
+ // and it requires another Assembly that I have preloaded in the load-from-context
+ // then that dependency gets burnt into the Jitted code. Later on a Load() is
+ // done on the assembly in the GAC and we single instance it back to the one
+ // we have gotten from the load-from-context because the HMODULES are the same.
+ // Now the dependency is wrong because it would not have the preloaded assembly
+ // if the order was reversed.
+
+#if 0
+ if (!fForIntrospectionOnly)
+ {
+ NonVMComHolder<IFusionLoadContext> pLoadContext;
+ if (pIAssembly == NULL)
+ IfFailGo(pIHostAssembly->GetFusionLoadContext(&pLoadContext));
+ else
+ IfFailGo(pIAssembly->GetFusionLoadContext(&pLoadContext));
+
+ if (pLoadContext->GetContextType() == LOADCTX_TYPE_LOADFROM) {
+ _ASSERTE(pIAssembly != NULL);
+ HRESULT hrLocal;
+
+ NonVMComHolder<IAssemblyName> pBoundName;
+ pIAssembly->GetAssemblyNameDef(&pBoundName);
+
+ // We need to copy the bound name to modify it
+ IAssemblyName *pClone;
+ IfFailGo(pBoundName->Clone(&pClone));
+ pBoundName.Release();
+ pBoundName = pClone;
+
+ // Null out the architecture for the second bind
+ IfFailGo(pBoundName->SetProperty(ASM_NAME_ARCHITECTURE, NULL, 0));
+
+ NonVMComHolder<IAssembly> pAliasingAssembly;
+ NonVMComHolder<IHostAssembly> pIHA;
+ pSink->Reset();
+ hrLocal = FusionBind::RemoteLoad(pFusionContext, pSink,
+ pBoundName, NULL, NULL,
+ &pAliasingAssembly, &pIHA, fForIntrospectionOnly);
+
+ if(SUCCEEDED(hrLocal)) {
+ // If the paths are the same or the loadfrom assembly is in the GAC,
+ // then use the non-LoadFrom assembly as the result.
+
+ DWORD location;
+ hrLocal = pIAssembly->GetAssemblyLocation(&location);
+ BOOL alias = (SUCCEEDED(hrLocal) && location == ASMLOC_GAC);
+
+ if (!alias) {
+ SString boundPath;
+ GetAssemblyManifestModulePath(pIAssembly, boundPath);
+
+ SString aliasingPath;
+ GetAssemblyManifestModulePath(pAliasingAssembly, aliasingPath);
+
+ alias = SString::_wcsicmp(boundPath, aliasingPath) == 0;
+ }
+
+ // Keep the default context's IAssembly if the paths are the same
+ if (alias)
+ pIAssembly = pAliasingAssembly.Extract();
+ }
+ }
+ }
+#endif
+
+ if (SUCCEEDED(hr)) {
+ if (pIAssembly == NULL)
+ *ppIHostAssembly = pIHostAssembly.Extract();
+ else
+ *ppIAssembly = pIAssembly.Extract();
+ if (ppNativeFusionAssembly) {
+ *ppNativeFusionAssembly = pNativeFusionAssembly.Extract();
+ }
+ }
+
+ ErrExit:
+ return hr;
+}
+
+
+/* static */
+HRESULT FusionBind::RemoteLoad(IApplicationContext* pFusionContext,
+ FusionSink *pSink,
+ IAssemblyName *pName,
+ IAssembly *pParentAssembly,
+ LPCWSTR pCodeBase,
+ IAssembly** ppIAssembly,
+ IHostAssembly** ppIHostAssembly,
+ IBindResult **ppNativeFusionAssembly,
+ BOOL fForIntrospectionOnly,
+ BOOL fSuppressSecurityChecks)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_PREEMPTIVE;
+ // The resulting IP must be held so the assembly will not be scavenged.
+ PRECONDITION(CheckPointer(ppIAssembly));
+ PRECONDITION(CheckPointer(ppIHostAssembly));
+
+ PRECONDITION(CheckPointer(pName));
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ } CONTRACTL_END;
+
+ ETWOnStartup (FusionBinding_V1, FusionBindingEnd_V1);
+
+ HRESULT hr;
+ ASM_BIND_FLAGS dwFlags = ASM_BINDF_NONE;
+ DWORD dwReserved = 0;
+ LPVOID pReserved = NULL;
+
+ // Event Tracing for Windows is used to log data for performance and functional testing purposes.
+ // The events below are used to help measure the performance of the download phase of assembly binding (be it download of a remote file or accessing a local file on disk),
+ // as well as of lookup scenarios such as from a host store.
+ DWORD dwAppDomainId = ETWAppDomainIdNotAvailable;
+ if (ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, TRACE_LEVEL_INFORMATION, CLR_PRIVATEBINDING_KEYWORD)) {
+ DWORD cbValue = sizeof(dwAppDomainId);
+ // Gather data used by ETW events later in this function.
+ if (pFusionContext == NULL || FAILED(pFusionContext->Get(ACTAG_APP_DOMAIN_ID, &dwAppDomainId, &cbValue, 0))) {
+ dwAppDomainId = ETWAppDomainIdNotAvailable;
+ }
+ }
+
+ NonVMComHolder< IUnknown > pSinkIUnknown(NULL);
+ NonVMComHolder< IAssemblyNameBinder> pBinder(NULL);
+ *ppNativeFusionAssembly=NULL;
+
+ if(pParentAssembly != NULL) {
+ // Only use a parent assembly hint when the parent assembly has a load context.
+ // Assemblies in anonymous context are not dicoverable by loader's binding rules,
+ // thus loader can't find their dependencies.
+ // Loader will only try to locate dependencies in default load context.
+ if (pParentAssembly->GetFusionLoadContext() != LOADCTX_TYPE_UNKNOWN) {
+ dwReserved = sizeof(IAssembly*);
+ pReserved = (LPVOID) pParentAssembly;
+ dwFlags = ASM_BINDF_PARENT_ASM_HINT;
+ }
+ }
+
+ IfFailRet(pSink->AssemblyResetEvent());
+ IfFailRet(pSink->QueryInterface(IID_IUnknown, (void**)&pSinkIUnknown));
+ IUnknown *pFusionAssembly=NULL;
+ IUnknown *pNativeAssembly=NULL;
+ BOOL fCached = TRUE;
+
+
+ if (fForIntrospectionOnly)
+ {
+ dwFlags = (ASM_BIND_FLAGS)(dwFlags | ASM_BINDF_INSPECTION_ONLY);
+ }
+
+ if (fSuppressSecurityChecks)
+ {
+ dwFlags = (ASM_BIND_FLAGS)(dwFlags | ASM_BINDF_SUPPRESS_SECURITY_CHECKS);
+ }
+
+ IfFailRet(pName->QueryInterface(IID_IAssemblyNameBinder, (void **)&pBinder));
+ {
+ // In SQL, this can call back into the runtime
+ CONTRACT_VIOLATION(ThrowsViolation);
+ hr = pBinder->BindToObject(IID_IAssembly,
+ pSinkIUnknown,
+ pFusionContext,
+ pCodeBase,
+ dwFlags,
+ pReserved,
+ dwReserved,
+ (void**) &pFusionAssembly,
+ (void**)&pNativeAssembly);
+ }
+
+ if(hr == E_PENDING) {
+ // If there is an assembly IP then we were successful.
+ hr = pSink->Wait();
+ if (SUCCEEDED(hr))
+ hr = pSink->LastResult();
+ if(SUCCEEDED(hr)) {
+ if(pSink->m_punk) {
+ if (pSink->m_pNIunk)
+ pNativeAssembly=pSink->m_pNIunk;
+ pFusionAssembly = pSink->m_punk;
+ fCached = FALSE;
+ }
+ else
+ hr = HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND);
+ }
+ }
+
+ FireEtwBindingDownloadPhaseEnd(dwAppDomainId, ETWLoadContextNotAvailable, ETWFieldUnused, ETWLoaderLoadTypeNotAvailable, pCodeBase, NULL, GetClrInstanceId());
+
+ FireEtwBindingLookupAndProbingPhaseEnd(dwAppDomainId, ETWLoadContextNotAvailable, ETWFieldUnused, ETWLoaderLoadTypeNotAvailable, pCodeBase, NULL, GetClrInstanceId());
+
+ if (SUCCEEDED(hr)) {
+ // Keep a handle to ensure it does not disappear from the cache
+ // and allow access to modules associated with the assembly.
+ hr = pFusionAssembly->QueryInterface(IID_IAssembly,
+ (void**) ppIAssembly);
+ if (hr == E_NOINTERFACE) // IStream assembly
+ hr = pFusionAssembly->QueryInterface(IID_IHostAssembly,
+ (void**) ppIHostAssembly);
+ if (SUCCEEDED(hr) && pNativeAssembly)
+ hr=pNativeAssembly->QueryInterface(IID_IBindResult,
+ (void**)ppNativeFusionAssembly);
+
+ if (fCached)
+ {
+ pFusionAssembly->Release();
+ if(pNativeAssembly)
+ pNativeAssembly->Release();
+ }
+ }
+
+ return hr;
+}
+
+/* static */
+HRESULT FusionBind::RemoteLoadModule(IApplicationContext * pFusionContext,
+ IAssemblyModuleImport* pModule,
+ FusionSink *pSink,
+ IAssemblyModuleImport** pResult)
+{
+
+ CONTRACTL
+ {
+ NOTHROW;
+ PRECONDITION(CheckPointer(pFusionContext));
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(CheckPointer(pSink));
+ PRECONDITION(CheckPointer(pResult));
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ } CONTRACTL_END;
+
+ ETWOnStartup (FusionBinding_V1, FusionBindingEnd_V1);
+
+ HRESULT hr;
+ IfFailGo(pSink->AssemblyResetEvent());
+ hr = pModule->BindToObject(pSink,
+ pFusionContext,
+ ASM_BINDF_NONE,
+ (void**) pResult);
+ if(hr == E_PENDING) {
+ // If there is an assembly IP then we were successful.
+ hr = pSink->Wait();
+ if (SUCCEEDED(hr))
+ hr = pSink->LastResult();
+ if (SUCCEEDED(hr)) {
+ if(pSink->m_punk)
+ hr = pSink->m_punk->QueryInterface(IID_IAssemblyModuleImport,
+ (void**) pResult);
+ else
+ hr = HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND);
+ }
+ }
+
+ ErrExit:
+ return hr;
+}
+
+
+/* static */
+HRESULT FusionBind::AddEnvironmentProperty(__in LPCWSTR variable,
+ __in LPCWSTR pProperty,
+ IApplicationContext* pFusionContext)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ PRECONDITION(CheckPointer(pProperty));
+ PRECONDITION(CheckPointer(variable));
+ PRECONDITION(CheckPointer(pFusionContext));
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END;
+
+ DWORD size = _MAX_PATH;
+ WCHAR rcValue[_MAX_PATH]; // Buffer for the directory.
+ WCHAR *pValue = &(rcValue[0]);
+ size = WszGetEnvironmentVariable(variable, pValue, size);
+ if(size > _MAX_PATH) {
+ pValue = (WCHAR*) _alloca(size * sizeof(WCHAR));
+ size = WszGetEnvironmentVariable(variable, pValue, size);
+ size++; // Add in the null terminator
+ }
+
+ if(size)
+ return pFusionContext->Set(pProperty,
+ pValue,
+ size * sizeof(WCHAR),
+ 0);
+ else
+ return S_FALSE; // no variable found
+}
+
+// Fusion uses a context class to drive resolution of assemblies.
+// Each application has properties that can be pushed into the
+// fusion context (see fusionp.h). The public api is part of
+// application domains.
+/* static */
+HRESULT FusionBind::SetupFusionContext(LPCWSTR szAppBase,
+ LPCWSTR szPrivateBin,
+ IApplicationContext** ppFusionContext)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ PRECONDITION(CheckPointer(ppFusionContext));
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ NonVMComHolder <IApplicationContext> pFusionContext;
+
+ LPCWSTR pBase;
+ // if the appbase is null then use the current directory
+ if (szAppBase == NULL) {
+ pBase = (LPCWSTR) _alloca(_MAX_PATH * sizeof(WCHAR));
+ if(!WszGetCurrentDirectory(_MAX_PATH, (LPWSTR) pBase))
+ IfFailGo(HRESULT_FROM_GetLastError());
+ }
+ else
+ pBase = szAppBase;
+
+
+ IfFailGo(CreateFusionContext(pBase, &pFusionContext));
+
+
+ IfFailGo((pFusionContext)->Set(ACTAG_APP_BASE_URL,
+ (void*) pBase,
+ (DWORD)(wcslen(pBase) + 1) * sizeof(WCHAR),
+ 0));
+
+ if (szPrivateBin)
+ IfFailGo((pFusionContext)->Set(ACTAG_APP_PRIVATE_BINPATH,
+ (void*) szPrivateBin,
+ (DWORD)(wcslen(szPrivateBin) + 1) * sizeof(WCHAR),
+ 0));
+ else
+ IfFailGo(AddEnvironmentProperty(APPENV_RELATIVEPATH, ACTAG_APP_PRIVATE_BINPATH, pFusionContext));
+
+ *ppFusionContext=pFusionContext;
+ pFusionContext.SuppressRelease();
+
+ErrExit:
+ return hr;
+}
+
+/* static */
+HRESULT FusionBind::CreateFusionContext(LPCWSTR pzName, IApplicationContext** ppFusionContext)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ PRECONDITION(CheckPointer(ppFusionContext));
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END;
+
+ // This is a file name not a namespace
+ LPCWSTR contextName = NULL;
+
+ if(pzName) {
+ contextName = wcsrchr( pzName, W('\\') );
+ if(contextName)
+ contextName++;
+ else
+ contextName = pzName;
+ }
+ // We go off and create a fusion context for this application domain.
+ // Note, once it is made it can not be modified.
+ NonVMComHolder<IAssemblyName> pFusionAssemblyName;
+ HRESULT hr = CreateAssemblyNameObject(&pFusionAssemblyName, contextName, 0, NULL);
+
+ if(SUCCEEDED(hr))
+ hr = CreateApplicationContext(pFusionAssemblyName, ppFusionContext);
+
+ return hr;
+}
+
+/* static */
+HRESULT FusionBind::GetVersion(__out_ecount(*pdwVersion) LPWSTR pVersion, __inout DWORD* pdwVersion)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ PRECONDITION(CheckPointer(pdwVersion));
+ PRECONDITION(pdwVersion>0 && CheckPointer(pVersion));
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END;
+
+ DWORD dwCORSystem = 0;
+
+ LPCWSTR pCORSystem = GetInternalSystemDirectory(&dwCORSystem);
+
+ if (dwCORSystem == 0)
+ return E_FAIL;
+
+ dwCORSystem--; // remove the null character
+ if (dwCORSystem && pCORSystem[dwCORSystem-1] == W('\\'))
+ dwCORSystem--; // and the trailing slash if it exists
+
+ if (dwCORSystem==0)
+ return E_FAIL;
+
+ const WCHAR* pSeparator;
+ const WCHAR* pTail = pCORSystem + dwCORSystem;
+
+ for (pSeparator = pCORSystem+dwCORSystem-1; pSeparator > pCORSystem && *pSeparator != W('\\');pSeparator--);
+
+ if (*pSeparator == W('\\'))
+ pSeparator++;
+
+ DWORD lgth = (DWORD)(pTail - pSeparator);
+
+ if (lgth > *pdwVersion) {
+ *pdwVersion = lgth+1;
+ return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
+ }
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:26000) // "Disable PREFast/espX warning about buffer overflow"
+#endif
+ while(pSeparator < pTail)
+ *pVersion++ = *pSeparator++;
+
+ *pVersion = W('\0');
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+ return S_OK;
+} // FusionBind::GetVersion
+
diff --git a/src/vm/fusioninit.cpp b/src/vm/fusioninit.cpp
new file mode 100644
index 0000000000..9408c8b620
--- /dev/null
+++ b/src/vm/fusioninit.cpp
@@ -0,0 +1,625 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+
+#include "common.h"
+
+#include "clrhost.h"
+#include "helpers.h"
+#include "dbglog.h"
+#include "adl.h"
+#include "cacheutils.h"
+#include "installlogger.h"
+#include "actasm.h"
+#include "naming.h"
+#include "policy.h"
+#include "bindhelpers.h"
+#ifdef FEATURE_COMINTEROP
+#include "appxutil.h"
+#endif
+
+#include "msi.h"
+
+STDAPI InitializeNativeBinder();
+
+#define DEVOVERRIDE_PATH W(".local\\")
+#define REG_KEY_IMAGE_FILE_EXECUTION_OPTIONS W("Software\\Microsoft\\Windows NT\\CurrentVersion\\Image File Execution Options")
+#define REG_VAL_DEVOVERRIDE_PATH W("DevOverridePath")
+#define REG_VAL_DEVOVERRIDE_ENABLE W("DevOverrideEnable")
+#define REG_VAL_FUSION_FORCE_UNIFICATION W("OnlyUseLatestCLR")
+#define REG_VAL_USE_LEGACY_IDENTITY_FORMAT W("UseLegacyIdentityFormat")
+#define REG_VAL_ENABLE_MSI_IN_LOCAL_GAC W("FusionEnableMSIInLocalGac")
+#define REG_VAL_ENABLE_FORCED_FULL_CLOSURE_WALK W("FusionEnableForcedFullClosureWalk")
+
+// Registry / Environment variable
+#define BINDING_CONFIGURATION W("BindingConfiguration")
+
+extern HMODULE g_pMSCorEE;
+extern BOOL g_bRunningOnNT6OrHigher;
+
+WCHAR g_szWindowsDir[MAX_PATH+1];
+WCHAR g_FusionDllPath[MAX_PATH+1];
+HINSTANCE g_hInst = NULL;
+HMODULE g_hMSCorEE = NULL;
+DWORD g_dwLogInMemory;
+DWORD g_dwLogLevel;
+DWORD g_dwForceLog;
+DWORD g_dwLogFailures;
+DWORD g_dwLogResourceBinds;
+BOOL g_bBinderLoggingNeeded;
+DWORD g_dwDevOverrideEnable;
+WORD g_wProcessorArchitecture; // Default to 32 bit
+BOOL g_fWow64Process; // Wow64 Process
+PEKIND g_peKindProcess;
+List<CAssemblyDownload *> *g_pDownloadList;
+BOOL g_bLogToWininet;
+WCHAR g_wzCustomLogPath[MAX_PATH];
+DWORD g_dwConfigForceUnification;
+DWORD g_dwFileInUseRetryAttempts;
+DWORD g_dwFileInUseMillisecondsBetweenRetries;
+DWORD g_dwUseLegacyIdentityFormat;
+DWORD g_dwConfigEnableMSIInLocalGac = 0;
+DWORD g_dwConfigEnableForcedFullClosureWalk = 0;
+
+CRITSEC_COOKIE g_csInitClb;
+CRITSEC_COOKIE g_csConfigSettings;
+CRITSEC_COOKIE g_csSingleUse;
+CRITSEC_COOKIE g_csDownload;
+CRITSEC_COOKIE g_csBindLog;
+
+// Defined in peimage.cpp
+extern LCID g_lcid;
+
+IIdentityAuthority *g_pIdentityAuthority;
+CIdentityCache *g_pIdentityCache;
+
+WCHAR g_wzEXEPath[MAX_PATH+1];
+
+
+#ifdef _DEBUG
+BOOL g_bTagAssemblyNames;
+#endif //_DEBUG
+
+extern int SetOsFlag(void) ;
+
+// MSI
+DWORD g_dwDisableMSIPeek;
+typedef HRESULT (*pfnMsiProvideAssemblyW)(LPCWSTR wzAssemblyName, LPCWSTR szAppContext,
+ DWORD dwInstallMode, DWORD dwUnused,
+ LPWSTR lpPathBuf, DWORD *pcchPathBuf);
+typedef INSTALLUILEVEL (*pfnMsiSetInternalUI)(INSTALLUILEVEL dwUILevel, HWND *phWnd);
+typedef UINT (*pfnMsiInstallProductW)(LPCWSTR wzPackagePath, LPCWSTR wzCmdLine);
+
+pfnMsiProvideAssemblyW g_pfnMsiProvideAssemblyW;
+pfnMsiSetInternalUI g_pfnMsiSetInternalUI;
+pfnMsiInstallProductW g_pfnMsiInstallProductW;
+BOOL g_bCheckedMSIPresent;
+HMODULE g_hModMSI;
+
+WCHAR g_wzLocalDevOverridePath[MAX_PATH + 1];
+WCHAR g_wzGlobalDevOverridePath[MAX_PATH + 1];
+DWORD g_dwDevOverrideFlags;
+
+HRESULT GetScavengerQuotasFromReg(DWORD *pdwZapQuotaInGAC,
+ DWORD *pdwDownloadQuotaAdmin,
+ DWORD *pdwDownloadQuotaUser);
+
+HRESULT SetupDevOverride(LPCWSTR pwzBindingConfigDevOverridePath);
+
+static DWORD GetConfigDWORD(HKEY hkey, LPCWSTR wzName, DWORD dwDefault)
+{
+ LIMITED_METHOD_CONTRACT;
+ DWORD lResult;
+ DWORD dwSize;
+ DWORD dwType = REG_DWORD;
+ DWORD dwValue;
+
+ if (hkey == 0) {
+ return dwDefault;
+ }
+
+ dwSize = sizeof(DWORD);
+ lResult = WszRegQueryValueEx(hkey, wzName, NULL,
+ &dwType, (LPBYTE)&dwValue, &dwSize);
+ if (lResult != ERROR_SUCCESS || dwType != REG_DWORD) {
+ return dwDefault;
+ }
+
+ return dwValue;
+}
+
+BOOL InitFusionCriticalSections()
+{
+ WRAPPER_NO_CONTRACT;
+ BOOL fRet = FALSE;
+
+ g_csInitClb = ClrCreateCriticalSection(CrstFusionClb, CRST_REENTRANCY);
+ if (!g_csInitClb) {
+ goto Exit;
+ }
+
+ g_csDownload = ClrCreateCriticalSection(CrstFusionDownload, CRST_REENTRANCY);
+ if (!g_csDownload) {
+ goto Exit;
+ }
+
+ g_csBindLog = ClrCreateCriticalSection(CrstFusionLog, CRST_DEFAULT);
+ if (!g_csBindLog) {
+ goto Exit;
+ }
+
+ g_csSingleUse = ClrCreateCriticalSection(CrstFusionSingleUse, CRST_DEFAULT);
+ if (!g_csSingleUse) {
+ goto Exit;
+ }
+
+ // <TODO> Get rid of this critical section</TODO>
+ g_csConfigSettings = ClrCreateCriticalSection(CrstFusionConfigSettings, CRST_DEFAULT);
+ if (!g_csConfigSettings) {
+ goto Exit;
+ }
+
+ fRet = TRUE;
+
+Exit:
+ return fRet;
+}
+
+// ensure that the symbol will be exported properly
+extern "C" HRESULT STDMETHODCALLTYPE InitializeFusion();
+
+HRESULT STDMETHODCALLTYPE InitializeFusion()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ static BOOL bInitialized = FALSE;
+ LPWSTR pwzBindingConfigAssemblyStorePath = NULL;
+ LPWSTR pwzBindingConfigDevOverridePath = NULL;
+ ReleaseHolder<CNodeFactory> pNodeFact;
+ DWORD dwSize = 0;
+ BOOL fExecutableIsKnown = FALSE;
+ LPWSTR pwzFileName = NULL;
+ WCHAR wzBindingConfigPath[MAX_PATH + 1];
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+
+ if (bInitialized) {
+ hr = S_OK;
+ goto Exit;
+ }
+
+ g_hInst = g_pMSCorEE;
+
+ SetOsFlag();
+
+ g_lcid = MAKELCID(LOCALE_INVARIANT, SORT_DEFAULT);
+
+ WszGetModuleFileName(g_hInst, g_FusionDllPath, MAX_PATH);
+
+ if (!InitFusionCriticalSections()) {
+ hr = E_OUTOFMEMORY;
+ goto Exit;
+ }
+
+ g_wProcessorArchitecture = g_SystemInfo.wProcessorArchitecture;
+
+ g_fWow64Process = RunningInWow64();
+
+ if (IsProcess32()) {
+ g_peKindProcess = (g_wProcessorArchitecture==PROCESSOR_ARCHITECTURE_INTEL)?peI386:peARM;
+ }
+ else {
+ g_peKindProcess = (g_wProcessorArchitecture==PROCESSOR_ARCHITECTURE_IA64)?peIA64:peAMD64;
+ }
+
+ if (!g_pIdentityAuthority) {
+ IIdentityAuthority *pAuth = NULL;
+
+ hr = GetIdentityAuthority(&pAuth);
+ if (FAILED(hr)) {
+ goto Exit;
+ }
+
+ if (InterlockedCompareExchangeT(&g_pIdentityAuthority, pAuth, NULL) != NULL) {
+ SAFE_RELEASE(pAuth);
+ }
+ }
+
+ if (!g_pIdentityCache) {
+ CIdentityCache *pIdentityCache;
+
+ hr = CIdentityCache::Create(&pIdentityCache);
+ if (FAILED(hr)) {
+ goto Exit;
+ }
+
+ if (InterlockedCompareExchangeT(&g_pIdentityCache, pIdentityCache, NULL) != NULL) {
+ SAFEDELETE(pIdentityCache);
+ }
+ }
+
+ DWORD lResult;
+ HKEY hKey;
+
+ lResult = FusionRegOpenKeyEx(HKEY_LOCAL_MACHINE, REG_KEY_FUSION_SETTINGS, 0, KEY_READ, &hKey);
+ if (lResult != ERROR_SUCCESS) {
+ hKey = 0;
+ }
+
+#define _GetConfigDWORD(name, default) GetConfigDWORD(hKey, name, default)
+
+ // Get this executable's filename
+ fExecutableIsKnown = WszGetModuleFileName(NULL, g_wzEXEPath, MAX_PATH);
+ if(!fExecutableIsKnown) {
+ hr = StringCbCopy(g_wzEXEPath, sizeof(g_wzEXEPath), W("Unknown"));
+ if (FAILED(hr)) {
+ goto Exit;
+ }
+ }
+
+ if (g_bRunningOnNT6OrHigher) {
+ //
+ // BINDINGCONFIGURATION
+ //
+ // Obtain the path to an override xml file via the registry for
+ // this executable or from the environment variable
+ //
+ wzBindingConfigPath[0] = W('\0');
+
+ // If there is a BindingConfiguration entry for this
+ // executable, then read it
+ pwzFileName = PathFindFileName(g_wzEXEPath);
+ if(fExecutableIsKnown && pwzFileName) {
+ WCHAR wzValue[MAX_PATH + 1];
+ HKEY hKeyExeName = NULL;
+ DWORD dwType = REG_SZ;
+
+ wzValue[0] = W('\0');
+
+ // key name + '\' + filename + null
+ if(lstrlenW(REG_KEY_IMAGE_FILE_EXECUTION_OPTIONS) + 1 + lstrlenW(pwzFileName) + 1 > MAX_PATH) {
+ hr = HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
+ goto Exit;
+ }
+
+ hr = StringCchPrintf(wzValue, MAX_PATH, W("%ws\\%ws"),
+ REG_KEY_IMAGE_FILE_EXECUTION_OPTIONS, pwzFileName);
+ if (FAILED(hr)) {
+ goto Exit;
+ }
+
+ lResult = FusionRegOpenKeyEx(HKEY_LOCAL_MACHINE, wzValue, 0, KEY_READ, &hKeyExeName);
+ if(lResult != ERROR_SUCCESS) {
+ if(lResult != ERROR_FILE_NOT_FOUND) {
+ hr = HRESULT_FROM_WIN32(lResult);
+ goto Exit;
+ }
+ }
+ else { // Success
+ dwSize = MAX_PATH * sizeof(WCHAR);
+ wzValue[0] = W('\0');
+
+ lResult = WszRegQueryValueEx(hKeyExeName, BINDING_CONFIGURATION, NULL, &dwType, (LPBYTE)wzValue, &dwSize);
+ RegCloseKey(hKeyExeName);
+ if(lResult != ERROR_SUCCESS) {
+ if(lResult != ERROR_FILE_NOT_FOUND) {
+ hr = HRESULT_FROM_WIN32(lResult);
+ goto Exit;
+ }
+ }
+ else { // Success
+ if(dwType != REG_SZ) {
+ hr = HRESULT_FROM_WIN32(ERROR_BADKEY);
+ goto Exit;
+ }
+
+ if(wzValue[0]) {
+ hr = StringCbCopy(wzBindingConfigPath, sizeof(wzBindingConfigPath), wzValue);
+ if (FAILED(hr)) {
+ goto Exit;
+ }
+ }
+ }
+ }
+ }
+
+ // If we didn't get a path from the registry,
+ // try the ENV variable.
+ if(!wzBindingConfigPath[0]) {
+ dwSize = WszGetEnvironmentVariable(BINDING_CONFIGURATION, wzBindingConfigPath, MAX_PATH);
+ if(dwSize > MAX_PATH) {
+ hr = HRESULT_FROM_WIN32(ERROR_BUFFER_OVERFLOW);
+ goto Exit;
+ }
+ }
+
+ // We have a path to XML config override file
+ if(wzBindingConfigPath[0]) {
+ hr = ParseXML(&pNodeFact, wzBindingConfigPath, NULL, FALSE);
+ if (FAILED(hr)) {
+ goto Exit;
+ }
+
+ // Get AssemblyStore override path
+ hr = pNodeFact->GetAssemblyStorePath(&pwzBindingConfigAssemblyStorePath);
+ if(FAILED(hr)) {
+ goto Exit;
+ }
+
+ // Get DevOverride path
+ hr = pNodeFact->GetDevOverridePath(&pwzBindingConfigDevOverridePath);
+ if(FAILED(hr)) {
+ goto Exit;
+ }
+ }
+ }
+
+ //
+ // end BindingConfiguration
+ //
+
+ hr = SetRootCachePath(pwzBindingConfigAssemblyStorePath);
+ if(FAILED(hr)) {
+ goto Exit;
+ }
+
+ GetScavengerQuotasFromReg(NULL, NULL, NULL);
+
+#ifdef _DEBUG
+ g_bTagAssemblyNames = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_TagAssemblyNames);
+#endif //_DEBUG
+
+ // Machine level logging settings
+ g_dwLogInMemory = _GetConfigDWORD(REG_VAL_FUSION_LOG_ENABLE, 0);
+ g_dwLogLevel = _GetConfigDWORD(REG_VAL_FUSION_LOG_LEVEL, 1);
+ g_dwForceLog = _GetConfigDWORD(REG_VAL_FUSION_LOG_FORCE, 0);
+ g_dwLogFailures = _GetConfigDWORD(REG_VAL_FUSION_LOG_FAILURES, 0);
+ g_dwLogResourceBinds = _GetConfigDWORD(REG_VAL_FUSION_LOG_RESOURCE_BINDS, 0);
+
+ BOOL fusionMessagesAreEnabled = ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, FusionMessageEvent);
+ g_bBinderLoggingNeeded = !!(fusionMessagesAreEnabled || g_dwLogFailures || g_dwForceLog || g_dwLogResourceBinds || g_dwLogInMemory);
+
+ g_dwConfigForceUnification = REGUTIL::GetConfigDWORD_DontUse_(REG_VAL_FUSION_FORCE_UNIFICATION, 0);
+
+ // Settings to indicate how many times to retry opening a hard-locked file for reading, moving/renaming or deletion.
+ // Used to configure CreateFileWithRetries and MoveFileWithRetries in fusion\Utils\Helpers.cpp
+ // These are used mainly for GAC Installation where anti-virus and indexing programs can temporarily hard-lock files.
+ // g_dwFileInUseRetryAttempts does not include the first attempt to open the file (hence set it to 0 to prevent retries).
+ g_dwFileInUseRetryAttempts = _GetConfigDWORD(REG_VAL_FUSION_FILE_IN_USE_RETRY_ATTEMPTS, FILE_IN_USE_RETRY_ATTEMPTS);
+ if (g_dwFileInUseRetryAttempts > MAX_FILE_IN_USE_RETRY_ATTEMPTS)
+ g_dwFileInUseRetryAttempts = MAX_FILE_IN_USE_RETRY_ATTEMPTS;
+ g_dwFileInUseMillisecondsBetweenRetries = _GetConfigDWORD(REG_VAL_FUSION_FILE_IN_USE_MILLISECONDS_BETWEEN_RETRIES, FILE_IN_USE_MILLISECONDS_BETWEEN_RETRIES);
+ if (g_dwFileInUseMillisecondsBetweenRetries > MAX_FILE_IN_USE_MILLISECONDS_BETWEEN_RETRIES)
+ g_dwFileInUseMillisecondsBetweenRetries = MAX_FILE_IN_USE_MILLISECONDS_BETWEEN_RETRIES;
+
+ // This setting is only relevant in the unsupported case where we're running out of a local GAC
+ // g_bUseDefaultStore is initialized in SetRootCachePath above
+ if (!g_bUseDefaultStore)
+ {
+ g_dwConfigEnableMSIInLocalGac = REGUTIL::GetConfigDWORD_DontUse_(REG_VAL_ENABLE_MSI_IN_LOCAL_GAC, 0);
+ }
+ g_dwConfigEnableForcedFullClosureWalk = REGUTIL::GetConfigDWORD_DontUse_(REG_VAL_ENABLE_FORCED_FULL_CLOSURE_WALK, 0);
+
+ g_dwUseLegacyIdentityFormat = AppX::IsAppXProcess() || _GetConfigDWORD(REG_VAL_USE_LEGACY_IDENTITY_FORMAT, 0);
+ g_dwDisableMSIPeek = _GetConfigDWORD(W("DisableMSIPeek"), FALSE);
+ g_dwDisableMSIPeek |= CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DisableMSIPeek);
+
+ if (hKey) {
+ RegCloseKey(hKey);
+ hKey = NULL;
+ }
+
+ {
+ WCHAR wzBuf[MAX_PATH];
+
+ wzBuf[0] = W('\0');
+ dwSize = WszGetEnvironmentVariable(W("USE_LEGACY_IDENTITY_FORMAT"), wzBuf, MAX_PATH);
+ if (dwSize == 1 && !FusionCompareString(wzBuf, W("1"))) {
+ g_dwUseLegacyIdentityFormat = 1;
+ }
+ }
+
+ if (IsLoggingNeeded()) {
+ g_bLogToWininet = TRUE;
+ dwSize = MAX_PATH;
+ DWORD dwAttr;
+ BOOL fExists;
+ g_wzCustomLogPath[0] = W('\0');
+ GetCustomLogPath(g_wzCustomLogPath, &dwSize);
+ if (g_wzCustomLogPath[0] != W('\0')) {
+ if(SUCCEEDED(CheckFileExistence(g_wzCustomLogPath, &fExists, &dwAttr)) &&
+ fExists && (dwAttr & FILE_ATTRIBUTE_DIRECTORY)) {
+ g_bLogToWininet = FALSE;
+ }
+ }
+ }
+
+ // make devoverride longhorn+ only
+ if (g_bRunningOnNT6OrHigher) {
+ hr = SetupDevOverride(pwzBindingConfigDevOverridePath);
+ if (FAILED(hr)) {
+ goto Exit;
+ }
+ }
+
+ g_pDownloadList = new (nothrow) List<CAssemblyDownload *>;
+ if (!g_pDownloadList) {
+ hr = E_OUTOFMEMORY;
+ goto Exit;
+ }
+
+ g_pAssemblyFingerprintCache = new (nothrow) CAssemblyFingerprintCache;
+ if (!g_pAssemblyFingerprintCache) {
+ hr = E_OUTOFMEMORY;
+ goto Exit;
+ }
+
+ hr = g_pAssemblyFingerprintCache->Init();
+ if (FAILED(hr))
+ {
+ goto Exit;
+ }
+
+ bInitialized = TRUE;
+
+ hr = InitializeNativeBinder();
+ if (FAILED(hr))
+ {
+ goto Exit;
+ }
+
+Exit:
+ SAFEDELETEARRAY(pwzBindingConfigAssemblyStorePath);
+ SAFEDELETEARRAY(pwzBindingConfigDevOverridePath);
+
+ END_SO_INTOLERANT_CODE;
+
+ return hr;
+}
+
+HRESULT SetupDevOverride(LPCWSTR pwzBindingConfigDevOverridePath)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // g_wzLocalDevOverridePath[0] = W('\0');
+ // g_wzGlobalDevOverridePath[0] = W('\0');
+ g_dwDevOverrideFlags = 0;
+
+ DWORD dwFileAttr;
+ DWORD dwType;
+ DWORD dwSize;
+ LONG lResult = 0;
+ HKEY hKey = 0;
+
+ lResult = FusionRegOpenKeyEx(HKEY_LOCAL_MACHINE, REG_KEY_IMAGE_FILE_EXECUTION_OPTIONS, 0, KEY_READ, &hKey);
+ if (lResult != ERROR_SUCCESS) {
+ hr = S_FALSE;
+ goto Exit;
+ }
+
+ g_dwDevOverrideEnable = _GetConfigDWORD(REG_VAL_DEVOVERRIDE_ENABLE, 0);
+
+ if (g_dwDevOverrideEnable != 0) {
+
+ // Check local dev path
+ if (!WszGetModuleFileName(NULL, g_wzLocalDevOverridePath, MAX_PATH)) {
+ hr = HRESULT_FROM_GetLastError();
+ goto Exit;
+ }
+
+ if (lstrlenW(g_wzLocalDevOverridePath) + lstrlenW(DEVOVERRIDE_PATH) <= MAX_PATH) {
+ // Only process .devoverride if the total path length <= MAX_PATH
+
+ hr = StringCbCat(g_wzLocalDevOverridePath, sizeof(g_wzLocalDevOverridePath), DEVOVERRIDE_PATH);
+ if (FAILED(hr)) {
+ goto Exit;
+ }
+
+ dwFileAttr = WszGetFileAttributes(g_wzLocalDevOverridePath);
+ if ((dwFileAttr != INVALID_FILE_ATTRIBUTES) && (dwFileAttr & FILE_ATTRIBUTE_DIRECTORY)) {
+ g_dwDevOverrideFlags |= DEVOVERRIDE_LOCAL;
+ }
+ else {
+ // Clear the path just for goodness sake
+ g_wzLocalDevOverridePath[0] = W('\0');
+ }
+ }
+
+ // Check global dev path
+ dwSize = sizeof(g_wzGlobalDevOverridePath);
+
+ lResult = WszRegQueryValueEx(hKey, REG_VAL_DEVOVERRIDE_PATH, NULL, &dwType, (LPBYTE)g_wzGlobalDevOverridePath, &dwSize);
+ if (lResult == ERROR_SUCCESS && lstrlenW(g_wzGlobalDevOverridePath) && dwType == REG_SZ) {
+
+ dwFileAttr = WszGetFileAttributes(g_wzGlobalDevOverridePath);
+ if ((dwFileAttr != INVALID_FILE_ATTRIBUTES) && (dwFileAttr & FILE_ATTRIBUTE_DIRECTORY)) {
+ g_dwDevOverrideFlags |= DEVOVERRIDE_GLOBAL;
+ }
+ else {
+ // Clear the path just for goodness sake
+ g_wzGlobalDevOverridePath[0] = W('\0');
+ }
+ }
+
+ // BINDING_CONFIGURATION Env check
+ if(pwzBindingConfigDevOverridePath && pwzBindingConfigDevOverridePath[0]) {
+ WCHAR wzTempPath[MAX_PATH + 1];
+ BOOL fExists = FALSE;
+ WIN32_FILE_ATTRIBUTE_DATA fileInfo;
+
+ wzTempPath[0] = W('\0');
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:25025)
+#endif
+ // usage of PathCanonicalize is safe if we ensure that
+ // length of buffer specified by parameter1 is >= length of buffer specified by parameter2
+ if(lstrlenW(pwzBindingConfigDevOverridePath) + 1 > COUNTOF(wzTempPath)) {
+ hr = HRESULT_FROM_WIN32(ERROR_BUFFER_OVERFLOW);
+ goto Exit;
+ }
+
+ if(!PathCanonicalize(wzTempPath, pwzBindingConfigDevOverridePath)) {
+ hr = E_FAIL;
+ goto Exit;
+ }
+
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+ if(!WszGetFileAttributesEx(wzTempPath, GetFileExInfoStandard, &fileInfo)) {
+ hr = HRESULT_FROM_WIN32(GetLastError());
+ goto Exit;
+ }
+
+ if(!(fileInfo.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
+ hr = HRESULT_FROM_WIN32(ERROR_FILE_EXISTS);
+ goto Exit;
+ }
+
+ hr = StringCbCopy(g_wzGlobalDevOverridePath, sizeof(g_wzGlobalDevOverridePath), wzTempPath);
+ if (FAILED(hr)) {
+ goto Exit;
+ }
+
+ g_dwDevOverrideFlags |= DEVOVERRIDE_GLOBAL;
+ }
+
+ if(g_dwDevOverrideFlags & DEVOVERRIDE_GLOBAL) {
+ PathAddBackslashWrap(g_wzGlobalDevOverridePath, MAX_PATH);
+ }
+ }
+
+Exit:
+
+ if (hKey) {
+ RegCloseKey(hKey);
+ }
+
+ return hr;
+}
+
diff --git a/src/vm/fusionsink.cpp b/src/vm/fusionsink.cpp
new file mode 100644
index 0000000000..9c7db6a0a4
--- /dev/null
+++ b/src/vm/fusionsink.cpp
@@ -0,0 +1,216 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: FusionSink.cpp
+**
+** Purpose: Implements FusionSink, event objects that block
+** the current thread waiting for an asynchronous load
+** of an assembly to succeed.
+**
+**
+
+
+===========================================================*/
+
+#include "common.h"
+
+#include <stdlib.h>
+#include "fusionsink.h"
+#include "ex.h"
+
+STDMETHODIMP FusionSink::QueryInterface(REFIID riid, void** ppv)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ *ppv = NULL;
+
+ if (riid == IID_IUnknown)
+ *ppv = (IUnknown*) (IAssemblyBindSink*) this;
+ else if (riid == IID_IAssemblyBindSink)
+ *ppv = (IAssemblyBindSink*)this;
+ else if (riid == IID_INativeImageEvaluate)
+ *ppv = (INativeImageEvaluate*)this;
+ if (*ppv == NULL)
+ hr = E_NOINTERFACE;
+ else
+ AddRef();
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+
+STDMETHODIMP FusionSink::OnProgress(DWORD dwNotification,
+ HRESULT hrNotification,
+ LPCWSTR szNotification,
+ DWORD dwProgress,
+ DWORD dwProgressMax,
+ LPVOID pvBindInfo,
+ IUnknown* punk)
+{
+ LIMITED_METHOD_CONTRACT;
+ HRESULT hr = S_OK;
+ switch(dwNotification) {
+ case ASM_NOTIFICATION_DONE:
+ m_LastResult = hrNotification;
+ if(m_pAbortUnk) {
+ m_pAbortUnk->Release();
+ m_pAbortUnk = NULL;
+ }
+
+ if(punk && SUCCEEDED(hrNotification))
+ hr = punk->QueryInterface(IID_IUnknown, (void**) &m_punk);
+ m_hEvent->Set();
+ break;
+ case ASM_NOTIFICATION_NATIVE_IMAGE_DONE:
+ if(punk && SUCCEEDED(hrNotification))
+ hr = punk->QueryInterface(IID_IUnknown, (void**) &m_pNIunk);
+ break;
+
+ case ASM_NOTIFICATION_START:
+ if(punk)
+ hr = punk->QueryInterface(IID_IUnknown, (void**) &m_pAbortUnk);
+ break;
+
+ case ASM_NOTIFICATION_ATTEMPT_NEXT_CODEBASE:
+ break;
+
+ case ASM_NOTIFICATION_BIND_INFO:
+ FusionBindInfo *pBindInfo;
+
+ pBindInfo = (FusionBindInfo *)pvBindInfo;
+
+ if (pBindInfo && m_pFusionLog == NULL) {
+ m_pFusionLog = pBindInfo->pdbglog;
+ if (m_pFusionLog) {
+ m_pFusionLog->AddRef();
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return hr;
+}
+
+ULONG FusionSink::AddRef()
+{
+ LIMITED_METHOD_CONTRACT;
+ ULONG cRefCount = 0;
+ //BEGIN_ENTRYPOINT_VOIDRET;
+
+ cRefCount = (InterlockedIncrement(&m_cRef));
+ //END_ENTRYPOINT_VOIDRET;
+ return cRefCount;
+}
+
+ULONG FusionSink::Release()
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_ENTRY_POINT;
+ BEGIN_CLEANUP_ENTRYPOINT;
+
+ ULONG cRef = InterlockedDecrement(&m_cRef);
+ if (!cRef) {
+ Reset();
+ delete this;
+ }
+ END_CLEANUP_ENTRYPOINT;
+ return (cRef);
+}
+
+HRESULT FusionSink::AssemblyResetEvent()
+{
+ WRAPPER_NO_CONTRACT;
+ HRESULT hr = AssemblyCreateEvent();
+ if(FAILED(hr)) return hr;
+
+ if(!m_hEvent->Reset()) {
+ hr = HRESULT_FROM_GetLastErrorNA();
+ }
+
+ return hr;
+}
+
+HRESULT FusionSink::AssemblyCreateEvent()
+{
+ STATIC_CONTRACT_NOTHROW;
+ HRESULT hr = S_OK;
+ if(m_hEvent == NULL) {
+ // Initialize the event to require manual reset
+ // and to initially signaled.
+ EX_TRY {
+ m_hEvent = new Event();
+ m_hEvent->CreateManualEvent(TRUE);
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+ return hr;
+}
+
+HRESULT FusionSink::Wait()
+{
+ STATIC_CONTRACT_NOTHROW;
+
+#if CHECK_INVARIANTS
+ _ASSERTE(CheckPointer(this));
+ _ASSERTE(CheckPointer(m_hEvent));
+#endif // CHECK_INVARIANTS
+
+ HRESULT hr = S_OK;
+ DWORD dwReturn = 0;
+
+ // CLREvent::Wait will switch mode if needed;
+
+ // Waiting for a signal from fusion - which we are guaranteed to get.
+ // We do a WaitForMultipleObjects (STA and MTA) and pump messages in the STA case
+ // in the call so we shouldn't freeze the system.
+ EX_TRY
+ {
+ dwReturn = m_hEvent->Wait(INFINITE,TRUE);
+ }
+ EX_CATCH
+ {
+ // Fusion uses us via COM interface so we need to swallow exceptions
+ hr = GET_EXCEPTION()->GetHR();
+
+ //@todo: is it right thing to do to swallow exceptions here
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return hr;
+}
+
+HRESULT FusionSink::Evaluate (
+ IAssembly *pILAssembly,
+ IAssembly *pNativeAssembly,
+ BYTE * pbCachedData,
+ DWORD dwDataSize
+ )
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_ENTRY_POINT;
+
+ return S_OK;
+}
+
diff --git a/src/vm/gc.h b/src/vm/gc.h
new file mode 100644
index 0000000000..1f19e2bc20
--- /dev/null
+++ b/src/vm/gc.h
@@ -0,0 +1,6 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+#include "../gc/gc.h"
diff --git a/src/vm/gccover.cpp b/src/vm/gccover.cpp
new file mode 100644
index 0000000000..57b11016d6
--- /dev/null
+++ b/src/vm/gccover.cpp
@@ -0,0 +1,1683 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+/****************************************************************************/
+/* gccover.cpp */
+/****************************************************************************/
+
+/* This file holds code that is designed to test GC pointer tracking in
+ fully interruptible code. We basically do a GC everywhere we can in
+ jitted code
+ */
+/****************************************************************************/
+
+
+#include "common.h"
+
+#ifdef HAVE_GCCOVER
+
+#pragma warning(disable:4663)
+
+#include "eeconfig.h"
+#include "gms.h"
+#include "utsem.h"
+#include "gccover.h"
+#include "virtualcallstub.h"
+#include "threadsuspend.h"
+
+#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM_)
+#include "gcinfodecoder.h"
+#endif
+
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable:4244)
+#endif // _MSC_VER
+
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+#undef free
+
+// This pragma is needed because public\vc\inc\xiosbase contains
+// a static local variable
+#pragma warning(disable : 4640)
+#include "msvcdis.h"
+#pragma warning(default : 4640)
+
+#include "disx86.h"
+
+#define free(memblock) Use_free(memblock)
+
+ // We need a X86 instruction walker (disassembler), here are some
+ // routines for caching such a disassembler in a concurrent environment.
+static DIS* g_Disasm = 0;
+
+
+static DIS* GetDisasm() {
+ DIS* myDisasm = FastInterlockExchangePointer(&g_Disasm, 0);
+ if (myDisasm == 0)
+ {
+#ifdef _TARGET_X86_
+ myDisasm = DIS::PdisNew(DIS::distX86);
+#elif defined(_TARGET_AMD64_)
+ myDisasm = DIS::PdisNew(DIS::distX8664);
+#endif
+ }
+ _ASSERTE(myDisasm);
+ return(myDisasm);
+}
+
+static void ReleaseDisasm(DIS* myDisasm) {
+ myDisasm = FastInterlockExchangePointer(&g_Disasm, myDisasm);
+ delete myDisasm;
+}
+
+#endif // defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+
+
+/****************************************************************************/
+
+MethodDesc* AsMethodDesc(size_t addr);
+static SLOT getTargetOfCall(SLOT instrPtr, PCONTEXT regs, SLOT*nextInstr);
+static void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID codeStart);
+static bool replaceInterruptibleRangesWithGcStressInstr (UINT32 startOffset, UINT32 stopOffset, LPVOID codeStart);
+
+static MethodDesc* getTargetMethodDesc(PCODE target)
+{
+ MethodDesc* targetMD = ExecutionManager::GetCodeMethodDesc(target);
+ if (targetMD == 0)
+ {
+ VirtualCallStubManager::StubKind vsdStubKind = VirtualCallStubManager::SK_UNKNOWN;
+ VirtualCallStubManager *pVSDStubManager = VirtualCallStubManager::FindStubManager(target, &vsdStubKind);
+ if (vsdStubKind != VirtualCallStubManager::SK_BREAKPOINT && vsdStubKind != VirtualCallStubManager::SK_UNKNOWN)
+ {
+ DispatchToken token = VirtualCallStubManager::GetTokenFromStubQuick(pVSDStubManager, target, vsdStubKind);
+ _ASSERTE(token.IsValid());
+ targetMD = VirtualCallStubManager::GetInterfaceMethodDescFromToken(token);
+ }
+ else
+ {
+ targetMD = AsMethodDesc(size_t(MethodDesc::GetMethodDescFromStubAddr(target, TRUE)));
+ }
+ }
+ return targetMD;
+}
+
+
+void SetupAndSprinkleBreakpoints(
+ MethodDesc * pMD,
+ EECodeInfo * pCodeInfo,
+ IJitManager::MethodRegionInfo methodRegionInfo,
+ BOOL fZapped
+ )
+{
+ // Allocate room for the GCCoverageInfo and copy of the method instructions
+ size_t memSize = sizeof(GCCoverageInfo) + methodRegionInfo.hotSize + methodRegionInfo.coldSize;
+ GCCoverageInfo* gcCover = (GCCoverageInfo*)(void*) pMD->GetLoaderAllocatorForCode()->GetHighFrequencyHeap()->AllocAlignedMem(memSize, CODE_SIZE_ALIGN);
+
+ memset(gcCover, 0, sizeof(GCCoverageInfo));
+
+ gcCover->methodRegion = methodRegionInfo;
+ gcCover->codeMan = pCodeInfo->GetCodeManager();
+ gcCover->gcInfo = pCodeInfo->GetGCInfo();
+ gcCover->callerThread = 0;
+ gcCover->doingEpilogChecks = true;
+
+ gcCover->lastMD = pMD; /* pass pMD to SprinkleBreakpoints */
+
+ gcCover->SprinkleBreakpoints(gcCover->savedCode,
+ gcCover->methodRegion.hotStartAddress,
+ gcCover->methodRegion.hotSize,
+ 0,
+ fZapped);
+
+ // This is not required for ARM* as the above call does the work for both hot & cold regions
+#if !defined(_TARGET_ARM_) && !defined(_TARGET_ARM64_)
+ if (gcCover->methodRegion.coldSize != 0)
+ {
+ gcCover->SprinkleBreakpoints(gcCover->savedCode + gcCover->methodRegion.hotSize,
+ gcCover->methodRegion.coldStartAddress,
+ gcCover->methodRegion.coldSize,
+ gcCover->methodRegion.hotSize,
+ fZapped);
+ }
+#endif
+
+ gcCover->lastMD = NULL; /* clear lastMD */
+
+ _ASSERTE(!pMD->m_GcCover);
+ *EnsureWritablePages(&pMD->m_GcCover) = gcCover;
+}
+
+void SetupAndSprinkleBreakpointsForJittedMethod(MethodDesc * pMD,
+ PCODE codeStart
+ )
+{
+ EECodeInfo codeInfo(codeStart);
+ _ASSERTE(codeInfo.IsValid());
+ _ASSERTE(codeInfo.GetRelOffset() == 0);
+
+ IJitManager::MethodRegionInfo methodRegionInfo;
+ codeInfo.GetMethodRegionInfo(&methodRegionInfo);
+
+ _ASSERTE(PCODEToPINSTR(codeStart) == methodRegionInfo.hotStartAddress);
+
+#ifdef _DEBUG
+ if (!g_pConfig->SkipGCCoverage(pMD->GetModule()->GetSimpleName()))
+#endif
+ SetupAndSprinkleBreakpoints(pMD,
+ &codeInfo,
+ methodRegionInfo,
+ FALSE
+ );
+}
+
+/****************************************************************************/
+/* called when a method is first jitted when GCStress level 4 or 8 is on */
+
+void SetupGcCoverage(MethodDesc* pMD, BYTE* methodStartPtr) {
+
+#ifdef _DEBUG
+ if (!g_pConfig->ShouldGcCoverageOnMethod(pMD->m_pszDebugMethodName)) {
+ return;
+ }
+#endif
+
+ if (pMD->m_GcCover)
+ return;
+
+ //
+ // In the gcstress=4 case, we can easily piggy-back onto the JITLock because we
+ // have a JIT operation that needs to take that lock already. But in the case of
+ // gcstress=8, we cannot do this because the code already exists, and if gccoverage
+ // were not in the picture, we're happy to race to do the prestub work because all
+ // threads end up with the same answer and don't leak any resources in the process.
+ //
+ // However, with gccoverage, we need to exclude all other threads from mucking with
+ // the code while we fill in the breakpoints and make our shadow copy of the code.
+ //
+ {
+ BaseDomain* pDomain = pMD->GetDomain();
+ // Enter the global lock which protects the list of all functions being JITd
+ ListLockHolder pJitLock(pDomain->GetJitLock());
+
+
+ // It is possible that another thread stepped in before we entered the global lock for the first time.
+ if (pMD->m_GcCover)
+ {
+ // We came in to jit but someone beat us so return the jitted method!
+ return;
+ }
+ else
+ {
+ const char *description = "jit lock (gc cover)";
+#ifdef _DEBUG
+ description = pMD->m_pszDebugMethodName;
+#endif
+ ListLockEntryHolder pEntry(ListLockEntry::Find(pJitLock, pMD, description));
+
+ // We have an entry now, we can release the global lock
+ pJitLock.Release();
+
+ // Take the entry lock
+ {
+ ListLockEntryLockHolder pEntryLock(pEntry, FALSE);
+
+ if (pEntryLock.DeadlockAwareAcquire())
+ {
+ // we have the lock...
+ }
+ else
+ {
+ // Note that at this point we don't have the lock, but that's OK because the
+ // thread which does have the lock is blocked waiting for us.
+ }
+
+ if (pMD->m_GcCover)
+ {
+ return;
+ }
+
+ PCODE codeStart = (PCODE) methodStartPtr;
+
+ SetupAndSprinkleBreakpointsForJittedMethod(pMD,
+ codeStart
+ );
+ }
+ }
+ }
+}
+
+#ifdef FEATURE_PREJIT
+
+void SetupGcCoverageForNativeMethod(MethodDesc* pMD,
+ PCODE codeStart,
+ IJitManager::MethodRegionInfo& methodRegionInfo
+ )
+{
+
+ EECodeInfo codeInfo(codeStart);
+ _ASSERTE(codeInfo.IsValid());
+ _ASSERTE(codeInfo.GetRelOffset() == 0);
+
+ _ASSERTE(PCODEToPINSTR(codeStart) == methodRegionInfo.hotStartAddress);
+
+ SetupAndSprinkleBreakpoints(pMD,
+ &codeInfo,
+ methodRegionInfo,
+ TRUE
+ );
+}
+
+void SetupGcCoverageForNativeImage(Module* module)
+{
+ // Disable IBC logging here because of NGen image is not fully initialized yet. Eager bound
+ // indirection cells are not initialized yet and so IBC logging would crash while attempting to dereference them.
+ IBCLoggingDisabler disableLogging;
+
+#if 0
+ // Debug code
+ LPWSTR wszSetupGcCoverage = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_SetupGcCoverage);
+
+ if (!wszSetupGcCoverage)
+ {
+ printf("wszSetupGcCoverage is NULL. Will not SetupGcCoverage for any module.\n");
+ return;
+ }
+ else
+ {
+ if ((wcscmp(W("*"), wszSetupGcCoverage) == 0) || // "*" means will gcstress all modules
+ (wcsstr(module->GetDebugName(), wszSetupGcCoverage) != NULL))
+ {
+ printf("[%ws] matched %ws\n", wszSetupGcCoverage, module->GetDebugName());
+ // Fall through
+ }
+ else
+ {
+ printf("[%ws] NOT match %ws\n", wszSetupGcCoverage, module->GetDebugName());
+ return;
+ }
+ }
+#endif
+
+#ifdef _DEBUG
+ if (g_pConfig->SkipGCCoverage(module->GetSimpleName()))
+ return;
+#endif
+
+ MethodIterator mi(module);
+ while (mi.Next())
+ {
+ PTR_MethodDesc pMD = mi.GetMethodDesc();
+ PCODE pMethodStart = mi.GetMethodStartAddress();
+
+ IJitManager::MethodRegionInfo methodRegionInfo;
+ mi.GetMethodRegionInfo(&methodRegionInfo);
+
+ SetupGcCoverageForNativeMethod(pMD, pMethodStart, methodRegionInfo);
+ }
+}
+#endif
+
+#ifdef _TARGET_AMD64_
+
+class GCCoverageRangeEnumerator
+{
+private:
+
+ ICodeManager *m_pCodeManager;
+ LPVOID m_pvGCInfo;
+ BYTE *m_codeStart;
+ BYTE *m_codeEnd;
+ BYTE *m_curFuncletEnd;
+ BYTE *m_nextFunclet;
+
+
+ BYTE* GetNextFunclet ()
+ {
+ if (m_nextFunclet == NULL)
+ return m_codeEnd;
+
+ BYTE *pCurFunclet = (BYTE*)EECodeInfo::findNextFunclet(m_nextFunclet, m_codeEnd - m_nextFunclet, (LPVOID*)&m_curFuncletEnd);
+ m_nextFunclet = (pCurFunclet != NULL) ? m_curFuncletEnd : NULL;
+
+ if (pCurFunclet == NULL)
+ return m_codeEnd;
+
+ LOG((LF_JIT, LL_INFO1000, "funclet range %p-%p\n", pCurFunclet, m_curFuncletEnd));
+
+ //
+ // workaround - adjust the funclet end address to exclude uninterruptible
+ // code at the end of each funclet. The jit currently puts data like
+ // jump tables in the code portion of the allocation, instead of the
+ // read-only portion.
+ //
+ // TODO: If the entire range is uninterruptible, we should skip the
+ // entire funclet.
+ //
+ unsigned ofsLastInterruptible = m_pCodeManager->FindEndOfLastInterruptibleRegion(
+ pCurFunclet - m_codeStart,
+ m_curFuncletEnd - m_codeStart,
+ m_pvGCInfo);
+
+ if (ofsLastInterruptible)
+ {
+ m_curFuncletEnd = m_codeStart + ofsLastInterruptible;
+ LOG((LF_JIT, LL_INFO1000, "adjusted end to %p\n", m_curFuncletEnd));
+ }
+
+ return pCurFunclet;
+ }
+
+
+public:
+
+ GCCoverageRangeEnumerator (ICodeManager *pCodeManager, LPVOID pvGCInfo, BYTE *codeStart, SIZE_T codeSize)
+ {
+ m_pCodeManager = pCodeManager;
+ m_pvGCInfo = pvGCInfo;
+ m_codeStart = codeStart;
+ m_codeEnd = codeStart + codeSize;
+ m_nextFunclet = codeStart;
+
+ GetNextFunclet();
+ }
+
+ // Checks that the given pointer is inside of a range where gc should be
+ // tested. If not, increments the pointer until it is, and returns the
+ // new pointer.
+ BYTE *EnsureInRange (BYTE *cur)
+ {
+ if (cur >= m_curFuncletEnd)
+ {
+ cur = GetNextFunclet();
+ }
+
+ return cur;
+ }
+
+ BYTE *SkipToNextRange ()
+ {
+ return GetNextFunclet();
+ }
+};
+
+#endif // _TARGET_AMD64_
+
+/****************************************************************************/
+/* sprinkle interupt instructions that will stop on every GCSafe location
+ regionOffsetAdj - Represents the offset of the current region
+ from the beginning of the method (is 0 for hot region)
+*/
+
+void GCCoverageInfo::SprinkleBreakpoints(
+ BYTE * saveAddr,
+ PCODE pCode,
+ size_t codeSize,
+ size_t regionOffsetAdj,
+ BOOL fZapped)
+{
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+
+ BYTE * codeStart = (BYTE *)pCode;
+
+ memcpy(saveAddr, codeStart, codeSize);
+
+ // For prejitted code we have to remove the write-protect on the code page
+ if (fZapped)
+ {
+ DWORD oldProtect;
+ ClrVirtualProtect(codeStart, codeSize, PAGE_EXECUTE_READWRITE, &oldProtect);
+ }
+
+ SLOT cur;
+ BYTE* codeEnd = codeStart + codeSize;
+
+ EECodeInfo codeInfo((PCODE)codeStart);
+
+ static ConfigDWORD fGcStressOnDirectCalls; // ConfigDWORD must be a static variable
+
+
+#ifdef _TARGET_AMD64_
+ GCCoverageRangeEnumerator rangeEnum(codeMan, gcInfo, codeStart, codeSize);
+
+ GcInfoDecoder safePointDecoder((const BYTE*)gcInfo, (GcInfoDecoderFlags)0, 0);
+ bool fSawPossibleSwitch = false;
+#endif
+
+ cur = codeStart;
+ DIS* pdis = GetDisasm();
+
+ // When we find a direct call instruction and we are partially-interruptible
+ // we determine the target and place a breakpoint after the call
+ // to simulate the hijack
+ // However, we need to wait until we disassemble the instruction
+ // after the call in order to put the breakpoint or we'll mess up
+ // the disassembly
+ // This variable is non-null if the previous instruction was a direct call,
+ // and we have found it's target MethodDesc
+ MethodDesc* prevDirectCallTargetMD = NULL;
+
+ /* TODO. Simulating the hijack could cause problems in cases where the
+ return register is not always a valid GC ref on the return offset.
+ That could happen if we got to the return offset via a branch
+ and not via return from the preceding call. However, this has not been
+ an issue so far.
+
+ Example:
+ mov eax, someval
+ test eax, eax
+ jCC AFTERCALL
+ call MethodWhichReturnsGCobject // return value is not used
+ AFTERCALL:
+ */
+
+ while (cur < codeEnd)
+ {
+ _ASSERTE(*cur != INTERRUPT_INSTR && *cur != INTERRUPT_INSTR_CALL);
+
+ MethodDesc* targetMD = NULL;
+ size_t len = pdis->CbDisassemble(0, cur, codeEnd-cur);
+
+#ifdef _TARGET_AMD64_
+ // REVISIT_TODO apparently the jit does not use the entire RUNTIME_FUNCTION range
+ // for code. It uses some for switch tables. Because the first few offsets
+ // may be decodable as instructions, we can't reason about where we should
+ // encounter invalid instructions. However, we do not want to silently skip
+ // large chunks of methods just becuase the JIT started emitting a new
+ // instruction, so only assume it is a switch table if we've seen the switch
+ // code (an indirect unconditional jump)
+ if ((len == 0) && fSawPossibleSwitch)
+ {
+ LOG((LF_JIT, LL_WARNING, "invalid instruction at %p (possibly start of switch table)\n", cur));
+ cur = rangeEnum.SkipToNextRange();
+ prevDirectCallTargetMD = NULL;
+ fSawPossibleSwitch = false;
+ continue;
+ }
+#endif
+
+ _ASSERTE(len > 0);
+ _ASSERTE(len <= (size_t)(codeEnd-cur));
+
+ switch(pdis->Trmt())
+ {
+ case DIS::trmtCallInd:
+#ifdef _TARGET_AMD64_
+ if(safePointDecoder.IsSafePoint((UINT32)(cur + len - codeStart + regionOffsetAdj)))
+#endif
+ {
+ *cur = INTERRUPT_INSTR_CALL; // return value. May need to protect
+ }
+ break;
+
+ case DIS::trmtCall:
+ if(fGcStressOnDirectCalls.val(CLRConfig::INTERNAL_GcStressOnDirectCalls))
+ {
+#ifdef _TARGET_AMD64_
+ if(safePointDecoder.IsSafePoint((UINT32)(cur + len - codeStart + regionOffsetAdj)))
+#endif
+ {
+ SLOT nextInstr;
+ SLOT target = getTargetOfCall(cur, NULL, &nextInstr);
+
+ if (target != 0)
+ {
+ targetMD = getTargetMethodDesc((PCODE)target);
+ }
+ }
+ }
+ break;
+#ifdef _TARGET_AMD64_
+ case DIS::trmtBraInd:
+ fSawPossibleSwitch = true;
+ break;
+#endif
+ }
+
+ if (prevDirectCallTargetMD != 0)
+ {
+ if (prevDirectCallTargetMD->ReturnsObject(true) != MetaSig::RETNONOBJ)
+ *cur = INTERRUPT_INSTR_PROTECT_RET;
+ else
+ *cur = INTERRUPT_INSTR;
+ }
+
+ // For fully interruptible code, we end up whacking every instruction
+ // to INTERRUPT_INSTR. For non-fully interruptible code, we end
+ // up only touching the call instructions (specially so that we
+ // can really do the GC on the instruction just after the call).
+ _ASSERTE(FitsIn<DWORD>((cur - codeStart) + regionOffsetAdj));
+ if (codeMan->IsGcSafe(&codeInfo, (cur - codeStart) + (DWORD)regionOffsetAdj))
+ *cur = INTERRUPT_INSTR;
+
+#ifdef _TARGET_X86_
+ // we will whack every instruction in the prolog and epilog to make certain
+ // our unwinding logic works there.
+ if (codeMan->IsInPrologOrEpilog((cur - codeStart) + (DWORD)regionOffsetAdj, gcInfo, NULL)) {
+ *cur = INTERRUPT_INSTR;
+ }
+#endif
+
+ // If we couldn't find the method desc targetMD is zero
+ prevDirectCallTargetMD = targetMD;
+
+ cur += len;
+
+#ifdef _TARGET_AMD64_
+ SLOT newCur = rangeEnum.EnsureInRange(cur);
+ if(newCur != cur)
+ {
+ prevDirectCallTargetMD = NULL;
+ cur = newCur;
+ fSawPossibleSwitch = false;
+ }
+#endif
+ }
+
+ // If we are not able to place an interrupt at the first instruction, this means that
+ // we are partially interruptible with no prolog. Just don't bother to do the
+ // the epilog checks, since the epilog will be trival (a single return instr)
+ assert(codeSize > 0);
+ if ((regionOffsetAdj==0) && (*codeStart != INTERRUPT_INSTR))
+ doingEpilogChecks = false;
+
+ ReleaseDisasm(pdis);
+
+#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+ //Save the method code from hotRegion
+ memcpy(saveAddr, (BYTE*)methodRegion.hotStartAddress, methodRegion.hotSize);
+
+ if (methodRegion.coldSize > 0)
+ {
+ //Save the method code from coldRegion
+ memcpy(saveAddr+methodRegion.hotSize, (BYTE*)methodRegion.coldStartAddress, methodRegion.coldSize);
+ }
+
+ // For prejitted code we have to remove the write-protect on the code page
+ if (fZapped)
+ {
+ DWORD oldProtect;
+ ClrVirtualProtect((BYTE*)methodRegion.hotStartAddress, methodRegion.hotSize, PAGE_EXECUTE_READWRITE, &oldProtect);
+
+ if (methodRegion.coldSize > 0)
+ {
+ ClrVirtualProtect((BYTE*)methodRegion.coldStartAddress, methodRegion.coldSize, PAGE_EXECUTE_READWRITE, &oldProtect);
+ }
+ }
+
+ GcInfoDecoder safePointDecoder((const BYTE*)gcInfo, (GcInfoDecoderFlags)0, 0);
+
+ assert(methodRegion.hotSize > 0);
+
+#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
+ safePointDecoder.EnumerateSafePoints(&replaceSafePointInstructionWithGcStressInstr,this);
+#endif // PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
+
+ safePointDecoder.EnumerateInterruptibleRanges(&replaceInterruptibleRangesWithGcStressInstr, this);
+
+ FlushInstructionCache(GetCurrentProcess(), (BYTE*)methodRegion.hotStartAddress, methodRegion.hotSize);
+
+ if (methodRegion.coldSize > 0)
+ {
+ FlushInstructionCache(GetCurrentProcess(), (BYTE*)methodRegion.coldStartAddress, methodRegion.coldSize);
+ }
+
+#else
+ _ASSERTE(!"not implemented for platform");
+#endif // _TARGET_X86_
+}
+
+#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+
+#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
+
+void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID pGCCover)
+{
+ PCODE pCode = NULL;
+ IJitManager::MethodRegionInfo *ptr = &(((GCCoverageInfo*)pGCCover)->methodRegion);
+
+ //Get code address from offset
+ if (safePointOffset < ptr->hotSize)
+ pCode = ptr->hotStartAddress + safePointOffset;
+ else if(safePointOffset - ptr->hotSize < ptr->coldSize)
+ {
+ SIZE_T coldOffset = safePointOffset - ptr->hotSize;
+ pCode = ptr->coldStartAddress + coldOffset;
+ }
+ else
+ {
+ //For some methods( eg MCCTest.MyClass.GetSum2 in test file jit\jit64\mcc\interop\mcc_i07.il) gcinfo points to a safepoint
+ //beyond the length of the method. So commenting the below assert.
+ //_ASSERTE(safePointOffset - ptr->hotSize < ptr->coldSize);
+ return;
+ }
+
+ SLOT instrPtr = (BYTE*)PCODEToPINSTR(pCode);
+
+ // For code sequences of the type
+ // BL func1
+ // BL func2 // Safe point 1
+ // mov r1 r0 // Safe point 2
+ // Both the above safe points instruction must be replaced with gcStress instruction.
+ // However as the first safe point is already replaced with gcstress instruction, decoding of the call
+ // instruction will fail when processing for the 2nd safe point. Therefore saved instruction must be used instead of
+ // instrPtr for decoding the call instruction.
+ SLOT savedInstrPtr = ((GCCoverageInfo*)pGCCover)->savedCode + safePointOffset;
+
+ //Determine if instruction before the safe point is call using immediate (BLX Imm) or call by register (BLX Rm)
+ BOOL instructionIsACallThroughRegister = FALSE;
+ BOOL instructionIsACallThroughImmediate = FALSE;
+#if defined(_TARGET_ARM_)
+
+ // call by register instruction is two bytes (BL<c> Reg T1 encoding)
+ WORD instr = *((WORD*)savedInstrPtr - 1);
+
+ instr = instr & 0xff87;
+ if((instr ^ 0x4780) == 0)
+ // It is call by register
+ instructionIsACallThroughRegister = TRUE;
+
+ // call using immediate instructions are 4 bytes (BL<c> <label> T1 encoding)
+ instr = *((WORD*)savedInstrPtr - 2);
+ instr = instr & 0xf800;
+ if((instr ^ 0xf000) == 0)
+ if((*(((WORD*)savedInstrPtr)-1) & 0xd000) == 0xd000)
+ // It is call by immediate
+ instructionIsACallThroughImmediate = TRUE;
+#elif defined(_TARGET_ARM64_)
+ DWORD instr = *((DWORD*)savedInstrPtr - 1);
+
+ // Is the call through a register or an immediate offset
+ // BL
+ // Encoding: 0x94000000 & [imm26]
+ if ((instr & 0xFC000000) == 0x94000000)
+ {
+ instructionIsACallThroughImmediate = TRUE;
+ }
+ // BLR
+ // Encoding: 0xD63F0000 & (Rn<<5)
+ else if ((instr & 0xFFFFFC1F) == 0xD63F0000)
+ {
+ instructionIsACallThroughRegister = TRUE;
+ }
+#endif
+ // safe point must always be after a call instruction
+ // and cannot be both call by register & immediate
+ // The safe points are also marked at jump calls( a special variant of
+ // tail call). However that call site will never appear on the stack.
+ // So commenting the assert for now. As for such places the previous
+ // instruction will not be a call instruction.
+ //_ASSERTE(instructionIsACallThroughRegister ^ instructionIsACallThroughImmediate);
+
+ if(instructionIsACallThroughRegister)
+ {
+ // If it is call by register then cannot know MethodDesc so replace the call instruction with illegal instruction
+ // safe point will be replaced with appropiate illegal instruction at execution time when reg value is known
+#if defined(_TARGET_ARM_)
+ *((WORD*)instrPtr - 1) = INTERRUPT_INSTR_CALL;
+#elif defined(_TARGET_ARM64_)
+ *((DWORD*)instrPtr - 1) = INTERRUPT_INSTR_CALL;
+#endif
+ }
+ else if(instructionIsACallThroughImmediate)
+ {
+ // If it is call by immediate then find the methodDesc
+ SLOT nextInstr;
+ SLOT target = getTargetOfCall((SLOT)((WORD*)savedInstrPtr-2), NULL, &nextInstr);
+
+ if (target != 0)
+ {
+ //Target is calculated wrt the saved instruction pointer
+ //Find the real target wrt the real instruction pointer
+ int delta = target - savedInstrPtr;
+ target = delta + instrPtr;
+
+ MethodDesc* targetMD = getTargetMethodDesc((PCODE)target);
+
+ if (targetMD != 0)
+ {
+
+ // The instruction about to be replaced cannot already be a gcstress instruction
+#if defined(_TARGET_ARM_)
+ size_t instrLen = GetARMInstructionLength(instrPtr);
+ if (instrLen == 2)
+ {
+ _ASSERTE(*((WORD*)instrPtr) != INTERRUPT_INSTR &&
+ *((WORD*)instrPtr) != INTERRUPT_INSTR_CALL &&
+ *((WORD*)instrPtr) != INTERRUPT_INSTR_PROTECT_RET);
+ }
+ else
+ {
+ _ASSERTE(*((DWORD*)instrPtr) != INTERRUPT_INSTR_32 &&
+ *((DWORD*)instrPtr) != INTERRUPT_INSTR_CALL_32 &&
+ *((DWORD*)instrPtr) != INTERRUPT_INSTR_PROTECT_RET_32);
+ }
+#elif defined(_TARGET_ARM64_)
+ {
+ _ASSERTE(*((DWORD*)instrPtr) != INTERRUPT_INSTR &&
+ *((DWORD*)instrPtr) != INTERRUPT_INSTR_CALL &&
+ *((DWORD*)instrPtr) != INTERRUPT_INSTR_PROTECT_RET);
+ }
+#endif
+ //
+ // When applying GC coverage breakpoints at native image load time, the code here runs
+ // before eager fixups are applied for the module being loaded. The direct call target
+ // never requires restore, however it is possible that it is initially in an invalid state
+ // and remains invalid until one or more eager fixups are applied.
+ //
+ // MethodDesc::ReturnsObject() consults the method signature, meaning it consults the
+ // metadata in the owning module. For generic instantiations stored in non-preferred
+ // modules, reaching the owning module requires following the module override pointer for
+ // the enclosing MethodTable. In this case, the module override pointer is generally
+ // invalid until an associated eager fixup is applied.
+ //
+ // In situations like this, MethodDesc::ReturnsObject() will try to dereference an
+ // unresolved fixup and will AV.
+ //
+ // Given all of this, skip the MethodDesc::ReturnsObject() call by default to avoid
+ // unexpected AVs. This implies leaving out the GC coverage breakpoints for direct calls
+ // unless COMPLUS_GcStressOnDirectCalls=1 is explicitly set in the environment.
+ //
+
+ static ConfigDWORD fGcStressOnDirectCalls;
+
+ if (fGcStressOnDirectCalls.val(CLRConfig::INTERNAL_GcStressOnDirectCalls))
+ {
+ // If the method returns an object then should protect the return object
+ if (targetMD->ReturnsObject(true) != MetaSig::RETNONOBJ)
+ {
+ // replace with corresponding 2 or 4 byte illegal instruction (which roots the return value)
+#if defined(_TARGET_ARM_)
+ if (instrLen == 2)
+ *((WORD*)instrPtr) = INTERRUPT_INSTR_PROTECT_RET;
+ else
+ *((DWORD*)instrPtr) = INTERRUPT_INSTR_PROTECT_RET_32;
+#elif defined(_TARGET_ARM64_)
+ *((DWORD*)instrPtr) = INTERRUPT_INSTR_PROTECT_RET;
+#endif
+ }
+ else // method does not return an objectref
+ {
+ // replace with corresponding 2 or 4 byte illegal instruction
+#if defined(_TARGET_ARM_)
+ if (instrLen == 2)
+ *((WORD*)instrPtr) = INTERRUPT_INSTR;
+ else
+ *((DWORD*)instrPtr) = INTERRUPT_INSTR_32;
+#elif defined(_TARGET_ARM64_)
+ *((DWORD*)instrPtr) = INTERRUPT_INSTR;
+#endif
+ }
+ }
+ }
+ }
+ }
+}
+#endif
+
+//Replaces the provided interruptible range with corresponding 2 or 4 byte gcStress illegal instruction
+bool replaceInterruptibleRangesWithGcStressInstr (UINT32 startOffset, UINT32 stopOffset, LPVOID pGCCover)
+{
+ PCODE pCode = NULL;
+ SLOT rangeStart = NULL;
+ SLOT rangeStop = NULL;
+
+ //Interruptible range can span accross hot & cold region
+ int acrossHotRegion = 1; // 1 means range is not across end of hot region & 2 is when it is across end of hot region
+
+ //Find the code addresses from offsets
+ IJitManager::MethodRegionInfo *ptr = &(((GCCoverageInfo*)pGCCover)->methodRegion);
+ if (startOffset < ptr->hotSize)
+ {
+ pCode = ptr->hotStartAddress + startOffset;
+ rangeStart = (BYTE*)PCODEToPINSTR(pCode);
+
+ if(stopOffset <= ptr->hotSize)
+ {
+ pCode = ptr->hotStartAddress + stopOffset;
+ rangeStop = (BYTE*)PCODEToPINSTR(pCode);
+ }
+ else
+ {
+ //Interruptible range is spanning across hot & cold region
+ pCode = ptr->hotStartAddress + ptr->hotSize;
+ rangeStop = (BYTE*)PCODEToPINSTR(pCode);
+ acrossHotRegion++;
+ }
+ }
+ else
+ {
+ SIZE_T coldOffset = startOffset - ptr->hotSize;
+ _ASSERTE(coldOffset < ptr->coldSize);
+ pCode = ptr->coldStartAddress + coldOffset;
+ rangeStart = (BYTE*)PCODEToPINSTR(pCode);
+
+ coldOffset = stopOffset - ptr->hotSize;
+ _ASSERTE(coldOffset <= ptr->coldSize);
+ pCode = ptr->coldStartAddress + coldOffset;
+ rangeStop = (BYTE*)PCODEToPINSTR(pCode);
+ }
+
+ // Need to do two iterations if interruptible range spans across hot & cold region
+ while(acrossHotRegion--)
+ {
+ SLOT instrPtr = rangeStart;
+ while(instrPtr < rangeStop)
+ {
+
+ // The instruction about to be replaced cannot already be a gcstress instruction
+#if defined(_TARGET_ARM_)
+ size_t instrLen = GetARMInstructionLength(instrPtr);
+ if (instrLen == 2)
+ {
+ _ASSERTE(*((WORD*)instrPtr) != INTERRUPT_INSTR &&
+ *((WORD*)instrPtr) != INTERRUPT_INSTR_CALL &&
+ *((WORD*)instrPtr) != INTERRUPT_INSTR_PROTECT_RET);
+ }
+ else
+ {
+ _ASSERTE(*((DWORD*)instrPtr) != INTERRUPT_INSTR_32 &&
+ *((DWORD*)instrPtr) != INTERRUPT_INSTR_CALL_32 &&
+ *((DWORD*)instrPtr) != INTERRUPT_INSTR_PROTECT_RET_32);
+ }
+
+ if (instrLen == 2)
+ *((WORD*)instrPtr) = INTERRUPT_INSTR;
+ else
+ *((DWORD*)instrPtr) = INTERRUPT_INSTR_32;
+
+ instrPtr += instrLen;
+#elif defined(_TARGET_ARM64_)
+ {
+ _ASSERTE(*((DWORD*)instrPtr) != INTERRUPT_INSTR &&
+ *((DWORD*)instrPtr) != INTERRUPT_INSTR_CALL &&
+ *((DWORD*)instrPtr) != INTERRUPT_INSTR_PROTECT_RET);
+ }
+
+ *((DWORD*)instrPtr) = INTERRUPT_INSTR;
+ instrPtr += 4;
+#endif
+
+ }
+
+ if(acrossHotRegion)
+ {
+ //Set rangeStart & rangeStop for the second iteration
+ _ASSERTE(acrossHotRegion==1);
+ rangeStart = (BYTE*)PCODEToPINSTR(ptr->coldStartAddress);
+ pCode = ptr->coldStartAddress + stopOffset - ptr->hotSize;
+ rangeStop = (BYTE*)PCODEToPINSTR(pCode);
+ }
+ }
+ return FALSE;
+}
+#endif
+
+static size_t getRegVal(unsigned regNum, PCONTEXT regs)
+{
+ return *getRegAddr(regNum, regs);
+}
+
+/****************************************************************************/
+static SLOT getTargetOfCall(SLOT instrPtr, PCONTEXT regs, SLOT*nextInstr) {
+
+ BYTE sibindexadj = 0;
+ BYTE baseadj = 0;
+ WORD displace = 0;
+
+#ifdef _TARGET_ARM_
+ if((instrPtr[1] & 0xf0) == 0xf0) // direct call
+ {
+ int imm32 = GetThumb2BlRel24((UINT16 *)instrPtr);
+ *nextInstr = instrPtr + 4;
+ return instrPtr + 4 + imm32;
+ }
+ else if(((instrPtr[1] & 0x47) == 0x47) & ((instrPtr[0] & 0x80) == 0x80)) // indirect call
+ {
+ *nextInstr = instrPtr + 2;
+ unsigned int regnum = (instrPtr[0] & 0x78) >> 3;
+ return (BYTE *)getRegVal(regnum, regs);
+ }
+#elif defined(_TARGET_ARM64_)
+ if (((*reinterpret_cast<DWORD*>(instrPtr)) & 0xFC000000) == 0x94000000)
+ {
+ // call through immediate
+ int imm26 = ((*((DWORD*)instrPtr)) & 0x03FFFFFF)<<2;
+ // SignExtend the immediate value.
+ imm26 = (imm26 << 4) >> 4;
+ *nextInstr = instrPtr + 4;
+ return instrPtr + imm26;
+ }
+ else if (((*reinterpret_cast<DWORD*>(instrPtr)) & 0xFFFFC1F) == 0xD63F0000)
+ {
+ // call through register
+ *nextInstr = instrPtr + 4;
+ unsigned int regnum = ((*(DWORD*)instrPtr) >> 5) & 0x1F;
+ return (BYTE *)getRegVal(regnum, regs);
+ }
+ else
+ {
+ return 0; // Fail
+ }
+#endif
+
+#ifdef _TARGET_AMD64_
+
+ if ((instrPtr[0] & 0xf0) == REX_PREFIX_BASE)
+ {
+ static_assert_no_msg(REX_SIB_BASE_EXT == REX_MODRM_RM_EXT);
+ if (instrPtr[0] & REX_SIB_BASE_EXT)
+ baseadj = 8;
+
+ if (instrPtr[0] & REX_SIB_INDEX_EXT)
+ sibindexadj = 8;
+
+ instrPtr++;
+ }
+
+#endif // _TARGET_AMD64_
+
+ if (instrPtr[0] == 0xE8) {
+ *nextInstr = instrPtr + 5;
+
+ size_t base = (size_t) instrPtr + 5;
+
+ INT32 displacement = (INT32) (
+ ((UINT32)instrPtr[1]) +
+ (((UINT32)instrPtr[2]) << 8) +
+ (((UINT32)instrPtr[3]) << 16) +
+ (((UINT32)instrPtr[4]) << 24)
+ );
+
+ // Note that the signed displacement is sign-extended
+ // to 64-bit on AMD64
+ return((SLOT)(base + (SSIZE_T)displacement));
+ }
+
+ if (instrPtr[0] == 0xFF) {
+
+ _ASSERTE(regs);
+
+ BYTE mod = (instrPtr[1] & 0xC0) >> 6;
+ BYTE rm = (instrPtr[1] & 0x7);
+ SLOT result;
+
+ switch (mod) {
+ case 0:
+ case 1:
+ case 2:
+
+ if (rm == 4) {
+
+ //
+ // Get values from the SIB byte
+ //
+ BYTE ss = (instrPtr[2] & 0xC0) >> 6;
+ BYTE index = (instrPtr[2] & 0x38) >> 3;
+ BYTE base = (instrPtr[2] & 0x7);
+
+ //
+ // Get starting value
+ //
+ if ((mod == 0) && (base == 5)) {
+ result = 0;
+ } else {
+ result = (BYTE *)getRegVal(baseadj + base, regs);
+ }
+
+ //
+ // Add in the [index]
+ //
+ if (index != 0x4) {
+ result = result + (getRegVal(sibindexadj + index, regs) << ss);
+ }
+
+ //
+ // Finally add in the offset
+ //
+ if (mod == 0) {
+
+ if (base == 5) {
+ result = result + *((int *)&instrPtr[3]);
+ displace += 7;
+ } else {
+ displace += 3;
+ }
+
+ } else if (mod == 1) {
+
+ result = result + *((char *)&instrPtr[3]);
+ displace += 4;
+
+ } else { // == 2
+
+ result = result + *((int *)&instrPtr[3]);
+ displace += 7;
+
+ }
+
+ } else {
+
+ //
+ // Get the value we need from the register.
+ //
+
+ if ((mod == 0) && (rm == 5)) {
+#ifdef _TARGET_AMD64_
+ // at this point instrPtr should be pointing at the beginning
+ // of the byte sequence for the call instruction. the operand
+ // is a RIP-relative address from the next instruction, so to
+ // calculate the address of the next instruction we need to
+ // jump forward 6 bytes: 1 for the opcode, 1 for the ModRM byte,
+ // and 4 for the operand. see AMD64 Programmer's Manual Vol 3.
+ result = instrPtr + 6;
+#else
+ result = 0;
+#endif // _TARGET_AMD64_
+ } else {
+ result = (SLOT)getRegVal(baseadj + rm, regs);
+ }
+
+ if (mod == 0) {
+
+ if (rm == 5) {
+ result = result + *((int *)&instrPtr[2]);
+ displace += 6;
+ } else {
+ displace += 2;
+ }
+
+ } else if (mod == 1) {
+
+ result = result + *((char *)&instrPtr[2]);
+ displace += 3;
+
+ } else { // == 2
+
+ result = result + *((int *)&instrPtr[2]);
+ displace += 6;
+
+ }
+
+ }
+
+ //
+ // Now dereference thru the result to get the resulting IP.
+ //
+ result = (SLOT)(*((SLOT *)result));
+
+ break;
+
+ case 3:
+ default:
+
+ result = (SLOT)getRegVal(baseadj + rm, regs);
+ displace += 2;
+ break;
+
+ }
+
+ *nextInstr = instrPtr + displace;
+ return result;
+
+ }
+
+ return(0); // Fail
+}
+
+/****************************************************************************/
+
+#ifdef _TARGET_X86_
+
+void checkAndUpdateReg(DWORD& origVal, DWORD curVal, bool gcHappened) {
+ if (origVal == curVal)
+ return;
+
+ // If these asserts go off, they indicate either that unwinding out of a epilog is wrong or that
+ // the validation infrastructure has got a bug.
+
+ _ASSERTE(gcHappened); // If the register values are different, a GC must have happened
+ _ASSERTE(GCHeap::GetGCHeap()->IsHeapPointer((BYTE*) size_t(origVal))); // And the pointers involved are on the GCHeap
+ _ASSERTE(GCHeap::GetGCHeap()->IsHeapPointer((BYTE*) size_t(curVal)));
+ origVal = curVal; // this is now the best estimate of what should be returned.
+}
+
+#endif // _TARGET_X86_
+
+
+int GCcoverCount = 0;
+
+void* forceStack[8];
+
+/****************************************************************************/
+BOOL OnGcCoverageInterrupt(PCONTEXT regs)
+{
+ SO_NOT_MAINLINE_FUNCTION;
+
+ // So that you can set counted breakpoint easily;
+ GCcoverCount++;
+ forceStack[0]= &regs; // This is so I can see it fastchecked
+
+ BYTE* pControlPc = (BYTE*)GetIP(regs);
+
+ Volatile<BYTE>* instrPtr = (Volatile<BYTE>*)pControlPc;
+ forceStack[0] = &instrPtr; // This is so I can see it fastchecked
+
+ EECodeInfo codeInfo((PCODE)pControlPc);
+ if (!codeInfo.IsValid())
+ return(FALSE);
+
+ MethodDesc* pMD = codeInfo.GetMethodDesc();
+ DWORD offset = codeInfo.GetRelOffset();
+
+ forceStack[1] = &pMD; // This is so I can see it fastchecked
+ forceStack[2] = &offset; // This is so I can see it fastchecked
+
+ GCCoverageInfo* gcCover = pMD->m_GcCover;
+ forceStack[3] = &gcCover; // This is so I can see it fastchecked
+ if (gcCover == 0)
+ return(FALSE); // we aren't doing code gcCoverage on this function
+
+ /****
+ if (gcCover->curInstr != 0)
+ *gcCover->curInstr = INTERRUPT_INSTR;
+ ****/
+
+ Thread* pThread = GetThread();
+ _ASSERTE(pThread);
+
+#ifdef USE_REDIRECT_FOR_GCSTRESS
+ // If we're unable to redirect, then we simply won't test GC at this
+ // location.
+ if (!pThread->CheckForAndDoRedirectForGCStress(regs))
+ {
+ /* remove the interrupt instruction */
+ BYTE * savedInstrPtr = &gcCover->savedCode[offset];
+
+#ifdef _TARGET_ARM_
+ if (GetARMInstructionLength(savedInstrPtr) == 2)
+ *(WORD *)instrPtr = *(WORD *)savedInstrPtr;
+ else
+ *(DWORD *)instrPtr = *(DWORD *)savedInstrPtr;
+#elif defined(_TARGET_ARM64_)
+ *(DWORD *)instrPtr = *(DWORD *)savedInstrPtr;
+#else
+ *instrPtr = *savedInstrPtr;
+#endif
+ }
+#else // !USE_REDIRECT_FOR_GCSTRESS
+
+#ifdef _DEBUG
+ if (!g_pConfig->SkipGCCoverage(pMD->GetModule()->GetSimpleName()))
+#endif
+ DoGcStress(regs, pMD);
+
+#endif // !USE_REDIRECT_FOR_GCSTRESS
+
+ return TRUE;
+}
+
+// There are some code path in DoGcStress to return without doing a GC but we
+// now relies on EE suspension to update the GC STRESS instruction.
+// We need to do a extra EE suspension/resume even without GC.
+FORCEINLINE void UpdateGCStressInstructionWithoutGC ()
+{
+ ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_OTHER);
+ ThreadSuspend::RestartEE(TRUE, TRUE);
+}
+
+/****************************************************************************/
+
+void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
+{
+ BYTE* pControlPc = (BYTE*)GetIP(regs);
+ Volatile<BYTE>* instrPtr = (Volatile<BYTE>*)pControlPc;
+
+ if (!pMD)
+ {
+ pMD = ExecutionManager::GetCodeMethodDesc((PCODE)pControlPc);
+ if (!pMD)
+ return;
+ }
+
+ GCCoverageInfo *gcCover = pMD->m_GcCover;
+
+ EECodeInfo codeInfo((TADDR)instrPtr);
+ _ASSERTE(codeInfo.GetMethodDesc() == pMD);
+ DWORD offset = codeInfo.GetRelOffset();
+
+ Thread *pThread = GetThread();
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+
+ BYTE instrVal = *instrPtr;
+ forceStack[6] = &instrVal; // This is so I can see it fastchecked
+
+ if (instrVal != INTERRUPT_INSTR &&
+ instrVal != INTERRUPT_INSTR_CALL &&
+ instrVal != INTERRUPT_INSTR_PROTECT_RET) {
+ _ASSERTE(instrVal == gcCover->savedCode[offset]); // someone beat us to it.
+ return; // Someone beat us to it, just go on running
+ }
+
+ bool atCall = (instrVal == INTERRUPT_INSTR_CALL);
+ bool afterCallProtect = (instrVal == INTERRUPT_INSTR_PROTECT_RET);
+
+#elif defined(_TARGET_ARM_)
+
+ _ASSERTE(((TADDR)instrPtr) & THUMB_CODE);
+ instrPtr = instrPtr - THUMB_CODE;
+
+ WORD instrVal = *(WORD*)instrPtr;
+ forceStack[6] = &instrVal; // This is so I can see it fastchecked
+
+ size_t instrLen = GetARMInstructionLength(instrVal);
+
+ bool atCall;
+ bool afterCallProtect;
+
+ if (instrLen == 2)
+ {
+ if (instrVal != INTERRUPT_INSTR &&
+ instrVal != INTERRUPT_INSTR_CALL &&
+ instrVal != INTERRUPT_INSTR_PROTECT_RET) {
+ _ASSERTE(instrVal == *(WORD*)(gcCover->savedCode + offset)); // someone beat us to it.
+ return; // Someone beat us to it, just go on running
+ }
+
+ atCall = (instrVal == INTERRUPT_INSTR_CALL);
+ afterCallProtect = (instrVal == INTERRUPT_INSTR_PROTECT_RET);
+ }
+ else
+ {
+ _ASSERTE(instrLen == 4);
+
+ DWORD instrVal32 = *(DWORD*)instrPtr;
+
+ if (instrVal32 != INTERRUPT_INSTR_32 &&
+ instrVal32 != INTERRUPT_INSTR_CALL_32 &&
+ instrVal32 != INTERRUPT_INSTR_PROTECT_RET_32) {
+ _ASSERTE(instrVal32 == *(DWORD*)(gcCover->savedCode + offset)); // someone beat us to it.
+ return; // Someone beat us to it, just go on running
+ }
+
+ atCall = (instrVal32 == INTERRUPT_INSTR_CALL_32);
+ afterCallProtect = (instrVal32 == INTERRUPT_INSTR_PROTECT_RET_32);
+ }
+#elif defined(_TARGET_ARM64_)
+ DWORD instrVal = *(DWORD *)instrPtr;
+ forceStack[6] = &instrVal; // This is so I can see it fastchecked
+
+ if (instrVal != INTERRUPT_INSTR &&
+ instrVal != INTERRUPT_INSTR_CALL &&
+ instrVal != INTERRUPT_INSTR_PROTECT_RET) {
+ _ASSERTE(instrVal == *(DWORD *)(gcCover->savedCode + offset)); // someone beat us to it.
+ return; // Someone beat us to it, just go on running
+ }
+
+ bool atCall = (instrVal == INTERRUPT_INSTR_CALL);
+ bool afterCallProtect = (instrVal == INTERRUPT_INSTR_PROTECT_RET);
+
+#endif // _TARGET_*
+
+#ifdef _TARGET_X86_
+ /* are we at the very first instruction? If so, capture the register state */
+ bool bShouldUpdateProlog = true;
+ if (gcCover->doingEpilogChecks) {
+ if (offset == 0) {
+ if (gcCover->callerThread == 0) {
+ if (FastInterlockCompareExchangePointer(&gcCover->callerThread, pThread, 0) == 0) {
+ gcCover->callerRegs = *regs;
+ gcCover->gcCount = GCHeap::GetGCHeap()->GetGcCount();
+ bShouldUpdateProlog = false;
+ }
+ }
+ else {
+ // We have been in this routine before. Give up on epilog checking because
+ // it is hard to insure that the saved caller register state is correct
+ // This also has the effect of only doing the checking once per routine
+ // (Even if there are multiple epilogs)
+ gcCover->doingEpilogChecks = false;
+ }
+ }
+
+ // If some other thread removes interrupt points, we abandon epilog testing
+ // for this routine since the barrier at the begining of the routine may not
+ // be up anymore, and thus the caller context is now not guarenteed to be correct.
+ // This should happen only very rarely so is not a big deal.
+ if (gcCover->callerThread != pThread)
+ gcCover->doingEpilogChecks = false;
+ }
+
+ instrVal = gcCover->savedCode[offset];
+#endif // _TARGET_X86_
+
+
+ // <GCStress instruction update race>
+ // Remove the interrupt instruction the next time we suspend the EE,
+ // which should happen below in the call to StressHeap(). This is
+ // done with the EE suspended so that we do not race with the executing
+ // code on some other thread. If we allow that race, we may sometimes
+ // get a STATUS_ACCESS_VIOLATION instead of the expected
+ // STATUS_PRIVILEGED_INSTRUCTION because the OS has to inspect the code
+ // stream to determine which exception code to raise. As a result, some
+ // thread may take the exception due to the HLT, but by the time the OS
+ // inspects the code stream, the HLT may be replaced with the original
+ // code and it will just raise a STATUS_ACCESS_VIOLATION.
+#ifdef _TARGET_X86_
+ // only restore the original instruction if:
+ // this is not the first instruction in the method's prolog, or
+ // if it is, only if this is the second time we run in this method
+ // note that if this is the second time in the prolog we've already disabled epilog checks
+ if (offset != 0 || bShouldUpdateProlog)
+#endif
+ pThread->PostGCStressInstructionUpdate((BYTE*)instrPtr, &gcCover->savedCode[offset]);
+
+#ifdef _TARGET_X86_
+ /* are we in a prolog or epilog? If so just test the unwind logic
+ but don't actually do a GC since the prolog and epilog are not
+ GC safe points */
+ if (gcCover->codeMan->IsInPrologOrEpilog(offset, gcCover->gcInfo, NULL))
+ {
+ // We are not at a GC safe point so we can't Suspend EE (Suspend EE will yield to GC).
+ // But we still have to update the GC Stress instruction. We do it directly without suspending
+ // other threads, which means a race on updating is still possible. But for X86 the window of
+ // race is so small that we could ignore it. We need a better solution if the race becomes a real problem.
+ // see details about <GCStress instruction update race> in comments above
+ pThread->CommitGCStressInstructionUpdate ();
+
+ REGDISPLAY regDisp;
+ CONTEXT copyRegs = *regs;
+
+ pThread->Thread::InitRegDisplay(&regDisp, &copyRegs, true);
+ pThread->UnhijackThread();
+
+ CodeManState codeManState;
+ codeManState.dwIsSet = 0;
+
+ // unwind out of the prolog or epilog
+ gcCover->codeMan->UnwindStackFrame(&regDisp,
+ &codeInfo, UpdateAllRegs, &codeManState, NULL);
+
+ // Note we always doing the unwind, since that at does some checking (that we
+ // unwind to a valid return address), but we only do the precise checking when
+ // we are certain we have a good caller state
+ if (gcCover->doingEpilogChecks) {
+ // Confirm that we recovered our register state properly
+ _ASSERTE(regDisp.PCTAddr == TADDR(gcCover->callerRegs.Esp));
+
+ // If a GC happened in this function, then the registers will not match
+ // precisely. However there is still checks we can do. Also we can update
+ // the saved register to its new value so that if a GC does not happen between
+ // instructions we can recover (and since GCs are not allowed in the
+ // prologs and epilogs, we get get complete coverage except for the first
+ // instruction in the epilog (TODO: fix it for the first instr Case)
+
+ _ASSERTE(pThread->PreemptiveGCDisabled()); // Epilogs should be in cooperative mode, no GC can happen right now.
+ bool gcHappened = gcCover->gcCount != GCHeap::GetGCHeap()->GetGcCount();
+ checkAndUpdateReg(gcCover->callerRegs.Edi, *regDisp.pEdi, gcHappened);
+ checkAndUpdateReg(gcCover->callerRegs.Esi, *regDisp.pEsi, gcHappened);
+ checkAndUpdateReg(gcCover->callerRegs.Ebx, *regDisp.pEbx, gcHappened);
+ checkAndUpdateReg(gcCover->callerRegs.Ebp, *regDisp.pEbp, gcHappened);
+
+ gcCover->gcCount = GCHeap::GetGCHeap()->GetGcCount();
+
+ }
+ return;
+ }
+#endif // _TARGET_X86_
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+
+ /* In non-fully interrruptable code, if the EIP is just after a call instr
+ means something different because it expects that that we are IN the
+ called method, not actually at the instruction just after the call. This
+ is important, because until the called method returns, IT is responsible
+ for protecting the return value. Thus just after a call instruction
+ we have to protect EAX if the method being called returns a GC pointer.
+
+ To figure this out, we need to stop AT the call so we can determine the
+ target (and thus whether it returns a GC pointer), and then place the
+ a different interrupt instruction so that the GCCover harness protects
+ EAX before doing the GC). This effectively simulates a hijack in
+ non-fully interruptible code */
+
+ /* TODO. Simulating the hijack could cause problems in cases where the
+ return register is not always a valid GC ref on the return offset.
+ That could happen if we got to the return offset via a branch
+ and not via return from the preceding call. However, this has not been
+ an issue so far.
+
+ Example:
+ mov eax, someval
+ test eax, eax
+ jCC AFTERCALL
+ call MethodWhichReturnsGCobject // return value is not used
+ AFTERCALL:
+ */
+
+ if (atCall) {
+ // We need to update the GC Stress instruction. With partially-interruptible code
+ // the call instruction is not a GC safe point so we can't use
+ // StressHeap or UpdateGCStressInstructionWithoutGC to take care of updating;
+ // So we just update the instruction directly. There are still chances for a race,
+ // but it's not been a problem so far.
+ // see details about <GCStress instruction update race> in comments above
+ pThread->CommitGCStressInstructionUpdate ();
+ BYTE* nextInstr;
+ SLOT target = getTargetOfCall((BYTE*) instrPtr, regs, (BYTE**)&nextInstr);
+ if (target != 0)
+ {
+ if (!pThread->PreemptiveGCDisabled())
+ {
+ // We are in preemtive mode in JITTed code. This implies that we are into IL stub
+ // close to PINVOKE method. This call will never return objectrefs.
+#ifdef _TARGET_ARM_
+ size_t instrLen = GetARMInstructionLength(nextInstr);
+ if (instrLen == 2)
+ *(WORD*)nextInstr = INTERRUPT_INSTR;
+ else
+ *(DWORD*)nextInstr = INTERRUPT_INSTR_32;
+#elif defined(_TARGET_ARM64_)
+ *(DWORD*)nextInstr = INTERRUPT_INSTR;
+#else
+ *nextInstr = INTERRUPT_INSTR;
+#endif
+ }
+ else
+ {
+ MethodDesc* targetMD = getTargetMethodDesc((PCODE)target);
+
+ if (targetMD != 0)
+ {
+ // Mark that we are performing a stackwalker like operation on the current thread.
+ // This is necessary to allow the ReturnsObject function to work without triggering any loads
+ ClrFlsValueSwitch _threadStackWalking(TlsIdx_StackWalkerWalkingThread, pThread);
+
+ // @Todo: possible race here, might need to be fixed if it become a problem.
+ // It could become a problem if 64bit does partially interrupt work.
+ // OK, we have the MD, mark the instruction afer the CALL
+ // appropriately
+#ifdef _TARGET_ARM_
+ size_t instrLen = GetARMInstructionLength(nextInstr);
+ if (targetMD->ReturnsObject(true) != MetaSig::RETNONOBJ)
+ if (instrLen == 2)
+ *(WORD*)nextInstr = INTERRUPT_INSTR_PROTECT_RET;
+ else
+ *(DWORD*)nextInstr = INTERRUPT_INSTR_PROTECT_RET_32;
+ else
+ if (instrLen == 2)
+ *(WORD*)nextInstr = INTERRUPT_INSTR;
+ else
+ *(DWORD*)nextInstr = INTERRUPT_INSTR_32;
+#elif defined(_TARGET_ARM64_)
+ if (targetMD->ReturnsObject(true) != MetaSig::RETNONOBJ)
+ *(DWORD *)nextInstr = INTERRUPT_INSTR_PROTECT_RET;
+ else
+ *(DWORD *)nextInstr = INTERRUPT_INSTR;
+#else
+ if (targetMD->ReturnsObject(true) != MetaSig::RETNONOBJ)
+ *nextInstr = INTERRUPT_INSTR_PROTECT_RET;
+ else
+ *nextInstr = INTERRUPT_INSTR;
+#endif
+ }
+ }
+ }
+
+ // Must flush instruction cache before returning as instruction has been modified.
+ FlushInstructionCache(GetCurrentProcess(), instrPtr, 6);
+
+ // It's not GC safe point, the GC Stress instruction is
+ // already commited and interrupt is already put at next instruction so we just return.
+ return;
+ }
+#else
+ PORTABILITY_ASSERT("DoGcStress - NYI on this platform");
+#endif // _TARGET_*
+
+ bool enableWhenDone = false;
+ if (!pThread->PreemptiveGCDisabled())
+ {
+#ifdef _TARGET_X86_
+ // We are in preemtive mode in JITTed code. currently this can only
+ // happen in a couple of instructions when we have an inlined PINVOKE
+ // method.
+
+ // Better be a CALL (direct or indirect),
+ // or a MOV instruction (three flavors),
+ // or pop ECX or add ESP xx (for cdecl pops, two flavors)
+ // or cmp, je (for the PINVOKE ESP checks)
+ // or lea (for PInvoke stack resilience)
+ if (!(instrVal == 0xE8 || instrVal == 0xFF ||
+ instrVal == 0x89 || instrVal == 0x8B || instrVal == 0xC6 ||
+ instrVal == 0x59 || instrVal == 0x81 || instrVal == 0x83 ||
+ instrVal == 0x3B || instrVal == 0x74 || instrVal == 0x8D))
+ {
+ _ASSERTE(!"Unexpected instruction in preemtive JITTED code");
+ }
+#endif // _TARGET_X86_
+ pThread->DisablePreemptiveGC();
+ enableWhenDone = true;
+ }
+
+
+#if 0
+ // TODO currently disabled. we only do a GC once per instruction location.
+
+ /* note that for multiple threads, we can loose track and
+ forget to set reset the interrupt after we executed
+ an instruction, so some instruction points will not be
+ executed twice, but we still ge350t very good coverage
+ (perfect for single threaded cases) */
+
+ /* if we have not run this instruction in the past */
+ /* remember to wack it to an INTERUPT_INSTR again */
+
+ if (!gcCover->IsBitSetForOffset(offset)) {
+ // gcCover->curInstr = instrPtr;
+ gcCover->SetBitForOffset(offset);
+ }
+#endif // 0
+
+
+#if !defined(USE_REDIRECT_FOR_GCSTRESS)
+ //
+ // If we redirect for gc stress, we don't need this frame on the stack,
+ // the redirection will push a resumable frame.
+ //
+ FrameWithCookie<ResumableFrame> frame(regs);
+ frame.Push(pThread);
+#endif // USE_REDIRECT_FOR_GCSTRESS
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+ FrameWithCookie<GCFrame> gcFrame;
+ DWORD_PTR retVal = 0;
+
+ if (afterCallProtect) // Do I need to protect return value?
+ {
+#ifdef _TARGET_AMD64_
+ retVal = regs->Rax;
+#elif defined(_TARGET_X86_)
+ retVal = regs->Eax;
+#elif defined(_TARGET_ARM_)
+ retVal = regs->R0;
+#elif defined(_TARGET_ARM64_)
+ retVal = regs->X[0];
+#else
+ PORTABILITY_ASSERT("DoGCStress - return register");
+#endif
+ gcFrame.Init(pThread, (OBJECTREF*) &retVal, 1, TRUE);
+ }
+#endif // _TARGET_*
+
+ if (gcCover->lastMD != pMD)
+ {
+ LOG((LF_GCROOTS, LL_INFO100000, "GCCOVER: Doing GC at method %s::%s offset 0x%x\n",
+ pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName, offset));
+ gcCover->lastMD =pMD;
+ }
+ else
+ {
+ LOG((LF_GCROOTS, LL_EVERYTHING, "GCCOVER: Doing GC at method %s::%s offset 0x%x\n",
+ pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName, offset));
+ }
+
+ //-------------------------------------------------------------------------
+ // Do the actual stress work
+ //
+
+ if (!GCHeap::GetGCHeap()->StressHeap())
+ UpdateGCStressInstructionWithoutGC ();
+
+ // Must flush instruction cache before returning as instruction has been modified.
+ FlushInstructionCache(GetCurrentProcess(), instrPtr, 4);
+
+ CONSISTENCY_CHECK(!pThread->HasPendingGCStressInstructionUpdate());
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+ if (afterCallProtect)
+ {
+#ifdef _TARGET_AMD64_
+ regs->Rax = retVal;
+#elif defined(_TARGET_X86_)
+ regs->Eax = retVal;
+#elif defined(_TARGET_ARM_)
+ regs->R0 = retVal;
+#elif defined(_TARGET_ARM64_)
+ regs->X[0] = retVal;
+#else
+ PORTABILITY_ASSERT("DoGCStress - return register");
+#endif
+ gcFrame.Pop();
+ }
+#endif // _TARGET_*
+
+#if !defined(USE_REDIRECT_FOR_GCSTRESS)
+ frame.Pop(pThread);
+#endif // USE_REDIRECT_FOR_GCSTRESS
+
+ if (enableWhenDone)
+ {
+ BOOL b = GC_ON_TRANSITIONS(FALSE); // Don't do a GCStress 3 GC here
+ pThread->EnablePreemptiveGC();
+ GC_ON_TRANSITIONS(b);
+ }
+
+ return;
+
+}
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif // _MSC_VER: warning C4244
+#endif // _TARGET_X86_ || _TARGET_AMD64_
+
+#endif // HAVE_GCCOVER
+
diff --git a/src/vm/gccover.h b/src/vm/gccover.h
new file mode 100644
index 0000000000..2fa6868196
--- /dev/null
+++ b/src/vm/gccover.h
@@ -0,0 +1,112 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#ifndef __GCCOVER_H__
+#define __GCCOVER_H__
+
+#ifdef HAVE_GCCOVER
+
+/****************************************************************************/
+/* GCCOverageInfo holds the state of which instructions have been visited by
+ a GC and which ones have not */
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable : 4200 ) // zero-sized array
+#endif // _MSC_VER
+
+class GCCoverageInfo {
+public:
+ IJitManager::MethodRegionInfo methodRegion;
+ BYTE* curInstr; // The last instruction that was able to execute
+ MethodDesc* lastMD; // Used to quickly figure out the culprite
+
+ // Following 6 variables are for prolog / epilog walking coverage
+ ICodeManager* codeMan; // CodeMan for this method
+ void* gcInfo; // gcInfo for this method
+
+ Thread* callerThread; // Thread associated with context callerRegs
+ T_CONTEXT callerRegs; // register state when method was entered
+ unsigned gcCount; // GC count at the time we caputured the regs
+ bool doingEpilogChecks; // are we doing epilog unwind checks? (do we care about callerRegs?)
+
+ enum { hasExecutedSize = 4 };
+ unsigned hasExecuted[hasExecutedSize];
+ unsigned totalCount;
+
+ union
+ {
+ BYTE savedCode[0]; // really variable sized
+ // Note that DAC doesn't marshal the entire byte array automatically.
+ // Any client of this field needs to get the TADDR of this field and
+ // marshal over the bytes properly.
+ };
+
+ // Sloppy bitsets (will wrap, and not threadsafe) but best effort is OK
+ // since we just need half decent coverage.
+ BOOL IsBitSetForOffset(unsigned offset) {
+ unsigned dword = hasExecuted[(offset >> 5) % hasExecutedSize];
+ return(dword & (1 << (offset & 0x1F)));
+ }
+
+ void SetBitForOffset(unsigned offset) {
+ unsigned* dword = &hasExecuted[(offset >> 5) % hasExecutedSize];
+ *dword |= (1 << (offset & 0x1F)) ;
+ }
+
+ void SprinkleBreakpoints(BYTE * saveAddr, PCODE codeStart, size_t codeSize, size_t regionOffsetAdj, BOOL fZapped);
+
+};
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif // _MSC_VER
+
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+
+#define INTERRUPT_INSTR 0xF4 // X86 HLT instruction (any 1 byte illegal instruction will do)
+#define INTERRUPT_INSTR_CALL 0xFA // X86 CLI instruction
+#define INTERRUPT_INSTR_PROTECT_RET 0xFB // X86 STI instruction
+
+#elif defined(_TARGET_ARM_)
+
+// 16-bit illegal instructions which will cause exception and cause
+// control to go to GcStress codepath
+#define INTERRUPT_INSTR 0xde00
+#define INTERRUPT_INSTR_CALL 0xde01
+#define INTERRUPT_INSTR_PROTECT_RET 0xde02
+
+// 32-bit illegal instructions. It is necessary to replace a 16-bit instruction
+// with a 16-bit illegal instruction, and a 32-bit instruction with a 32-bit
+// illegal instruction, to make GC stress with the "IT" instruction work, since
+// it counts the number of instructions that follow it, so we can't change that
+// number by replacing a 32-bit instruction with a 16-bit illegal instruction
+// followed by 16 bits of junk that might end up being a legal instruction.
+// Use the "Permanently UNDEFINED" section in the "ARM Architecture Reference Manual",
+// section A6.3.4 "Branches and miscellaneous control" table.
+// Note that we write these as a single 32-bit write, not two 16-bit writes, so the values
+// need to be arranged as the ARM decoder wants them, with the high-order halfword first
+// (in little-endian order).
+#define INTERRUPT_INSTR_32 0xa001f7f0 // 0xf7f0a001
+#define INTERRUPT_INSTR_CALL_32 0xa002f7f0 // 0xf7f0a002
+#define INTERRUPT_INSTR_PROTECT_RET_32 0xa003f7f0 // 0xf7f0a003
+
+#elif defined(_TARGET_ARM64_)
+
+// The following encodings are undefined. They fall into section C4.5.8 - Data processing (2 source) of
+// "Arm Architecture Reference Manual ARMv8"
+//
+#define INTERRUPT_INSTR 0xBADC0DE0
+#define INTERRUPT_INSTR_CALL 0xBADC0DE1
+#define INTERRUPT_INSTR_PROTECT_RET 0xBADC0DE2
+
+#endif // _TARGET_*
+
+#endif // HAVE_GCCOVER
+
+#endif // !__GCCOVER_H__
diff --git a/src/vm/gcdecode.cpp b/src/vm/gcdecode.cpp
new file mode 100644
index 0000000000..f22bf24a5d
--- /dev/null
+++ b/src/vm/gcdecode.cpp
@@ -0,0 +1,16 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "common.h"
+
+#define FPO_INTERRUPTIBLE 0
+
+/* Precompiled header nonsense requires that we do it this way */
+
+/* GCDecoder.cpp is a common source file bewtween VM and JIT/IL */
+/* GCDecoder.cpp is located in $COM99/inc */
+
+#include "gcdecoder.cpp"
diff --git a/src/vm/gcdesc.h b/src/vm/gcdesc.h
new file mode 100644
index 0000000000..f2865d1134
--- /dev/null
+++ b/src/vm/gcdesc.h
@@ -0,0 +1,6 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+#include "../gc/gcdesc.h"
diff --git a/src/vm/gcenv.cpp b/src/vm/gcenv.cpp
new file mode 100644
index 0000000000..ceba57cb59
--- /dev/null
+++ b/src/vm/gcenv.cpp
@@ -0,0 +1,563 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*
+ * GCENV.CPP
+ *
+ * GCToEEInterface implementation
+ *
+
+ *
+ */
+
+#include "common.h"
+
+#include "gcenv.h"
+
+#include "threadsuspend.h"
+
+#ifdef FEATURE_COMINTEROP
+#include "runtimecallablewrapper.h"
+#include "rcwwalker.h"
+#endif // FEATURE_COMINTEROP
+
+void GCToEEInterface::SuspendEE(SUSPEND_REASON reason)
+{
+ WRAPPER_NO_CONTRACT;
+
+ static_assert_no_msg(SUSPEND_FOR_GC == ThreadSuspend::SUSPEND_FOR_GC);
+ static_assert_no_msg(SUSPEND_FOR_GC_PREP == ThreadSuspend::SUSPEND_FOR_GC_PREP);
+
+ _ASSERTE(reason == SUSPEND_FOR_GC || reason == SUSPEND_FOR_GC_PREP);
+
+ ThreadSuspend::SuspendEE((ThreadSuspend::SUSPEND_REASON)reason);
+}
+
+void GCToEEInterface::RestartEE(BOOL bFinishedGC)
+{
+ WRAPPER_NO_CONTRACT;
+
+ ThreadSuspend::RestartEE(bFinishedGC, TRUE);
+}
+
+/*
+ * GcEnumObject()
+ *
+ * This is the JIT compiler (or any remote code manager)
+ * GC enumeration callback
+ */
+
+void GcEnumObject(LPVOID pData, OBJECTREF *pObj, DWORD flags)
+{
+ Object ** ppObj = (Object **)pObj;
+ GCCONTEXT * pCtx = (GCCONTEXT *) pData;
+
+ // Since we may be asynchronously walking another thread's stack,
+ // check (frequently) for stack-buffer-overrun corruptions after
+ // any long operation
+ if (pCtx->cf != NULL)
+ pCtx->cf->CheckGSCookies();
+
+ //
+ // Sanity check that the flags contain only these three values
+ //
+ assert((flags & ~(GC_CALL_INTERIOR|GC_CALL_PINNED|GC_CALL_CHECK_APP_DOMAIN)) == 0);
+
+ // for interior pointers, we optimize the case in which
+ // it points into the current threads stack area
+ //
+ if (flags & GC_CALL_INTERIOR)
+ PromoteCarefully(pCtx->f, ppObj, pCtx->sc, flags);
+ else
+ (pCtx->f)(ppObj, pCtx->sc, flags);
+}
+
+//-----------------------------------------------------------------------------
+void GcReportLoaderAllocator(promote_func* fn, ScanContext* sc, LoaderAllocator *pLoaderAllocator)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (pLoaderAllocator != NULL && pLoaderAllocator->IsCollectible())
+ {
+ Object *refCollectionObject = OBJECTREFToObject(pLoaderAllocator->GetExposedObject());
+
+#ifdef _DEBUG
+ Object *oldObj = refCollectionObject;
+#endif
+
+ _ASSERTE(refCollectionObject != NULL);
+ fn(&refCollectionObject, sc, CHECK_APP_DOMAIN);
+
+ // We are reporting the location of a local variable, assert it doesn't change.
+ _ASSERTE(oldObj == refCollectionObject);
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Determine whether we should report the generic parameter context
+//
+// This is meant to detect the situation where a ThreadAbortException is raised
+// in the prolog of a managed method, before the location for the generics
+// context has been initialized; when such a TAE is raised, we are open to a
+// race with the GC (e.g. while creating the managed object for the TAE).
+// The GC would cause a stack walk, and if we report the stack location for
+// the generic param context at this time we'd crash.
+// The long term solution is to avoid raising TAEs in any non-GC safe points,
+// and to additionally ensure that we do not expose the runtime to TAE
+// starvation.
+inline bool SafeToReportGenericParamContext(CrawlFrame* pCF)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (!pCF->IsFrameless() || !(pCF->IsActiveFrame() || pCF->IsInterrupted()))
+ {
+ return true;
+ }
+
+#ifndef USE_GC_INFO_DECODER
+
+ ICodeManager * pEECM = pCF->GetCodeManager();
+ if (pEECM != NULL && pEECM->IsInPrologOrEpilog(pCF->GetRelOffset(), pCF->GetGCInfo(), NULL))
+ {
+ return false;
+ }
+
+#else // USE_GC_INFO_DECODER
+
+ GcInfoDecoder gcInfoDecoder((PTR_CBYTE)pCF->GetGCInfo(),
+ DECODE_PROLOG_LENGTH,
+ 0);
+ UINT32 prologLength = gcInfoDecoder.GetPrologSize();
+ if (pCF->GetRelOffset() < prologLength)
+ {
+ return false;
+ }
+
+#endif // USE_GC_INFO_DECODER
+
+ return true;
+}
+
+//-----------------------------------------------------------------------------
+StackWalkAction GcStackCrawlCallBack(CrawlFrame* pCF, VOID* pData)
+{
+ //
+ // KEEP IN SYNC WITH DacStackReferenceWalker::Callback in debug\daccess\daccess.cpp
+ //
+
+ Frame *pFrame;
+ GCCONTEXT *gcctx = (GCCONTEXT*) pData;
+
+#if CHECK_APP_DOMAIN_LEAKS
+ gcctx->sc->pCurrentDomain = pCF->GetAppDomain();
+#endif //CHECK_APP_DOMAIN_LEAKS
+
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+ if (g_fEnableARM)
+ {
+ gcctx->sc->pCurrentDomain = pCF->GetAppDomain();
+ }
+#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
+
+ MethodDesc *pMD = pCF->GetFunction();
+
+#ifdef GC_PROFILING
+ gcctx->sc->pMD = pMD;
+#endif //GC_PROFILING
+
+ // Clear it on exit so that we never have a stale CrawlFrame
+ ResetPointerHolder<CrawlFrame*> rph(&gcctx->cf);
+ // put it somewhere so that GcEnumObject can get to it.
+ gcctx->cf = pCF;
+
+ bool fReportGCReferences = true;
+#if defined(WIN64EXCEPTIONS)
+ // On Win64 and ARM, we may have unwound this crawlFrame and thus, shouldn't report the invalid
+ // references it may contain.
+ fReportGCReferences = pCF->ShouldCrawlframeReportGCReferences();
+#endif // defined(WIN64EXCEPTIONS)
+
+ if (fReportGCReferences)
+ {
+ if (pCF->IsFrameless())
+ {
+ ICodeManager * pCM = pCF->GetCodeManager();
+ _ASSERTE(pCM != NULL);
+
+ unsigned flags = pCF->GetCodeManagerFlags();
+
+ #ifdef _TARGET_X86_
+ STRESS_LOG3(LF_GCROOTS, LL_INFO1000, "Scanning Frameless method %pM EIP = %p &EIP = %p\n",
+ pMD, GetControlPC(pCF->GetRegisterSet()), pCF->GetRegisterSet()->PCTAddr);
+ #else
+ STRESS_LOG2(LF_GCROOTS, LL_INFO1000, "Scanning Frameless method %pM ControlPC = %p\n",
+ pMD, GetControlPC(pCF->GetRegisterSet()));
+ #endif
+
+ _ASSERTE(pMD != 0);
+
+ #ifdef _DEBUG
+ LOG((LF_GCROOTS, LL_INFO1000, "Scanning Frame for method %s:%s\n",
+ pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName));
+ #endif // _DEBUG
+
+#if 0
+ printf("Scanning Frame for method %s\n", pMD->m_pszDebugMethodName);
+#endif // _DEBUG
+
+ pCM->EnumGcRefs(pCF->GetRegisterSet(),
+ pCF->GetCodeInfo(),
+ flags,
+ GcEnumObject,
+ pData);
+ }
+ else
+ {
+ Frame * pFrame = pCF->GetFrame();
+
+ STRESS_LOG3(LF_GCROOTS, LL_INFO1000,
+ "Scanning ExplicitFrame %p AssocMethod = %pM frameVTable = %pV\n",
+ pFrame, pFrame->GetFunction(), *((void**) pFrame));
+ pFrame->GcScanRoots( gcctx->f, gcctx->sc);
+ }
+ }
+
+
+ // If we're executing a LCG dynamic method then we must promote the associated resolver to ensure it
+ // doesn't get collected and yank the method code out from under us).
+
+ // Be careful to only promote the reference -- we can also be called to relocate the reference and
+ // that can lead to all sorts of problems since we could be racing for the relocation with the long
+ // weak handle we recover the reference from. Promoting the reference is enough, the handle in the
+ // reference will be relocated properly as long as we keep it alive till the end of the collection
+ // as long as the reference is actually maintained by the long weak handle.
+ if (pMD && gcctx->sc->promotion)
+ {
+ BOOL fMaybeCollectibleMethod = TRUE;
+
+ // If this is a frameless method then the jitmanager can answer the question of whether
+ // or not this is LCG simply by looking at the heap where the code lives, however there
+ // is also the prestub case where we need to explicitly look at the MD for stuff that isn't
+ // ngen'd
+ if (pCF->IsFrameless())
+ {
+ fMaybeCollectibleMethod = ExecutionManager::IsCollectibleMethod(pCF->GetMethodToken());
+ }
+
+ if (fMaybeCollectibleMethod && pMD->IsLCGMethod())
+ {
+ Object *refResolver = OBJECTREFToObject(pMD->AsDynamicMethodDesc()->GetLCGMethodResolver()->GetManagedResolver());
+#ifdef _DEBUG
+ Object *oldObj = refResolver;
+#endif
+ _ASSERTE(refResolver != NULL);
+ (*gcctx->f)(&refResolver, gcctx->sc, CHECK_APP_DOMAIN);
+ _ASSERTE(!pMD->IsSharedByGenericInstantiations());
+
+ // We are reporting the location of a local variable, assert it doesn't change.
+ _ASSERTE(oldObj == refResolver);
+ }
+ else
+ {
+ if (fMaybeCollectibleMethod)
+ {
+ GcReportLoaderAllocator(gcctx->f, gcctx->sc, pMD->GetLoaderAllocator());
+ }
+
+ if (fReportGCReferences)
+ {
+ GenericParamContextType paramContextType = GENERIC_PARAM_CONTEXT_NONE;
+
+ if (pCF->IsFrameless())
+ {
+ // We need to grab the Context Type here because there are cases where the MethodDesc
+ // is shared, and thus indicates there should be an instantion argument, but the JIT
+ // was still allowed to optimize it away and we won't grab it below because we're not
+ // reporting any references from this frame.
+ paramContextType = pCF->GetCodeManager()->GetParamContextType(pCF->GetRegisterSet(), pCF->GetCodeInfo());
+ }
+ else
+ {
+ if (pMD->RequiresInstMethodDescArg())
+ paramContextType = GENERIC_PARAM_CONTEXT_METHODDESC;
+ else if (pMD->RequiresInstMethodTableArg())
+ paramContextType = GENERIC_PARAM_CONTEXT_METHODTABLE;
+ }
+
+ if (SafeToReportGenericParamContext(pCF))
+ {
+ // Handle the case where the method is a static shared generic method and we need to keep the type
+ // of the generic parameters alive
+ if (paramContextType == GENERIC_PARAM_CONTEXT_METHODDESC)
+ {
+ MethodDesc *pMDReal = dac_cast<PTR_MethodDesc>(pCF->GetParamTypeArg());
+ _ASSERTE((pMDReal != NULL) || !pCF->IsFrameless());
+ if (pMDReal != NULL)
+ {
+ GcReportLoaderAllocator(gcctx->f, gcctx->sc, pMDReal->GetLoaderAllocator());
+ }
+ }
+ else if (paramContextType == GENERIC_PARAM_CONTEXT_METHODTABLE)
+ {
+ MethodTable *pMTReal = dac_cast<PTR_MethodTable>(pCF->GetParamTypeArg());
+ _ASSERTE((pMTReal != NULL) || !pCF->IsFrameless());
+ if (pMTReal != NULL)
+ {
+ GcReportLoaderAllocator(gcctx->f, gcctx->sc, pMTReal->GetLoaderAllocator());
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Since we may be asynchronously walking another thread's stack,
+ // check (frequently) for stack-buffer-overrun corruptions after
+ // any long operation
+ pCF->CheckGSCookies();
+
+ return SWA_CONTINUE;
+}
+
+static void CALLBACK CheckPromoted(_UNCHECKED_OBJECTREF *pObjRef, LPARAM *pExtraInfo, LPARAM lp1, LPARAM lp2)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ LOG((LF_GC, LL_INFO100000, LOG_HANDLE_OBJECT_CLASS("Checking referent of Weak-", pObjRef, "to ", *pObjRef)));
+
+ Object **pRef = (Object **)pObjRef;
+ if (!GCHeap::GetGCHeap()->IsPromoted(*pRef))
+ {
+ LOG((LF_GC, LL_INFO100, LOG_HANDLE_OBJECT_CLASS("Severing Weak-", pObjRef, "to unreachable ", *pObjRef)));
+
+ *pRef = NULL;
+ }
+ else
+ {
+ LOG((LF_GC, LL_INFO1000000, "reachable " LOG_OBJECT_CLASS(*pObjRef)));
+ }
+}
+
+VOID GCToEEInterface::SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, LPARAM lp1, LPARAM lp2)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ SyncBlockCache::GetSyncBlockCache()->GCWeakPtrScan(scanProc, lp1, lp2);
+}
+
+
+//EE can perform post stack scanning action, while the
+// user threads are still suspended
+VOID GCToEEInterface::AfterGcScanRoots (int condemned, int max_gen,
+ ScanContext* sc)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_COMINTEROP
+ // Go through all the app domains and for each one detach all the *unmarked* RCWs to prevent
+ // the RCW cache from resurrecting them.
+ UnsafeAppDomainIterator i(TRUE);
+ i.Init();
+
+ while (i.Next())
+ {
+ i.GetDomain()->DetachRCWs();
+ }
+#endif // FEATURE_COMINTEROP
+}
+
+void GCToEEInterface::ScanStaticGCRefsOpportunistically(promote_func* fn, ScanContext* sc)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ SystemDomain::EnumAllStaticGCRefs(fn, sc);
+}
+
+/*
+ * Scan all stack roots
+ */
+
+VOID GCToEEInterface::ScanStackRoots(Thread * pThread, promote_func* fn, ScanContext* sc)
+{
+ GCCONTEXT gcctx;
+
+ gcctx.f = fn;
+ gcctx.sc = sc;
+ gcctx.cf = NULL;
+
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+ // Either we are in a concurrent situation (in which case the thread is unknown to
+ // us), or we are performing a synchronous GC and we are the GC thread, holding
+ // the threadstore lock.
+
+ _ASSERTE(dbgOnly_IsSpecialEEThread() ||
+ GetThread() == NULL ||
+ // this is for background GC threads which always call this when EE is suspended.
+ IsGCSpecialThread() ||
+ (GetThread() == ThreadSuspend::GetSuspensionThread() && ThreadStore::HoldingThreadStore()));
+
+ pThread->SetHasPromotedBytes();
+
+#ifdef FEATURE_CONSERVATIVE_GC
+ if (g_pConfig->GetGCConservative())
+ {
+ // Conservative stack root reporting
+ // We will treat everything on stack as a pinned interior GC pointer
+ // Since we report every thing as pinned, we don't need to run following code for relocation phase.
+ if (sc->promotion)
+ {
+ Object ** topStack = (Object **)pThread->GetFrame();
+ Object ** bottomStack = (Object **) pThread->GetCachedStackBase();
+ Object ** walk;
+ for (walk = topStack; walk < bottomStack; walk ++)
+ {
+ if (((void*)*walk > (void*)bottomStack || (void*)*walk < (void*)topStack) &&
+ ((void*)*walk >= (void*)g_lowest_address && (void*)*walk <= (void*)g_highest_address)
+ )
+ {
+ //DbgPrintf("promote " FMT_ADDR " : " FMT_ADDR "\n", walk, *walk);
+ fn(walk, sc, GC_CALL_INTERIOR|GC_CALL_PINNED);
+ }
+ }
+ }
+
+ // Also ask the explicit Frames to report any references they might know about.
+ // Generally these will be a subset of the objects reported below but there's
+ // nothing that guarantees that and in the specific case of a GC protect frame the
+ // references it protects may live at a lower address than the frame itself (and
+ // thus escape the stack range we scanned above).
+ Frame *pFrame = pThread->GetFrame();
+ while (pFrame != FRAME_TOP)
+ {
+ pFrame->GcScanRoots(fn, sc);
+ pFrame = pFrame->PtrNextFrame();
+ }
+ }
+ else
+#endif
+ {
+ unsigned flagsStackWalk = ALLOW_ASYNC_STACK_WALK | ALLOW_INVALID_OBJECTS;
+#if defined(WIN64EXCEPTIONS)
+ flagsStackWalk |= GC_FUNCLET_REFERENCE_REPORTING;
+#endif // defined(WIN64EXCEPTIONS)
+ pThread->StackWalkFrames( GcStackCrawlCallBack, &gcctx, flagsStackWalk);
+ }
+}
+
+void GCToEEInterface::GcStartWork (int condemned, int max_gen)
+{
+ CONTRACTL
+ {
+ THROWS; // StubHelpers::ProcessByrefValidationList throws
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // Update AppDomain stage here.
+ SystemDomain::System()->ProcessClearingDomains();
+
+#ifdef VERIFY_HEAP
+ // Validate byrefs pinned by IL stubs since the last GC.
+ StubHelpers::ProcessByrefValidationList();
+#endif // VERIFY_HEAP
+
+ ExecutionManager::CleanupCodeHeaps();
+
+#ifdef FEATURE_EVENT_TRACE
+ ETW::TypeSystemLog::Cleanup();
+#endif
+
+#ifdef FEATURE_COMINTEROP
+ //
+ // Let GC detect managed/native cycles with input from jupiter
+ // Jupiter will
+ // 1. Report reference from RCW to CCW based on native reference in Jupiter
+ // 2. Identify the subset of CCWs that needs to be rooted
+ //
+ // We'll build the references from RCW to CCW using
+ // 1. Preallocated arrays
+ // 2. Dependent handles
+ //
+ RCWWalker::OnGCStarted(condemned);
+#endif // FEATURE_COMINTEROP
+}
+
+void GCToEEInterface::GcDone(int condemned)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_COMINTEROP
+ //
+ // Tell Jupiter GC has finished
+ //
+ RCWWalker::OnGCFinished(condemned);
+#endif // FEATURE_COMINTEROP
+}
+
+void GCToEEInterface::GcBeforeBGCSweepWork()
+{
+ CONTRACTL
+ {
+ THROWS; // StubHelpers::ProcessByrefValidationList throws
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#ifdef VERIFY_HEAP
+ // Validate byrefs pinned by IL stubs since the last GC.
+ StubHelpers::ProcessByrefValidationList();
+#endif // VERIFY_HEAP
+}
+
+void GCToEEInterface::SyncBlockCacheDemote(int max_gen)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ SyncBlockCache::GetSyncBlockCache()->GCDone(TRUE, max_gen);
+}
+
+void GCToEEInterface::SyncBlockCachePromotionsGranted(int max_gen)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ SyncBlockCache::GetSyncBlockCache()->GCDone(FALSE, max_gen);
+}
diff --git a/src/vm/gcenv.h b/src/vm/gcenv.h
new file mode 100644
index 0000000000..fc5ca6de86
--- /dev/null
+++ b/src/vm/gcenv.h
@@ -0,0 +1,108 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+#ifndef GCENV_H_
+#define GCENV_H_
+
+//
+// Extra VM headers required to compile GC-related files
+//
+
+#include "finalizerthread.h"
+
+#include "threadsuspend.h"
+
+#ifdef FEATURE_COMINTEROP
+#include <windows.ui.xaml.h>
+#endif
+
+#include "stubhelpers.h"
+
+#include "eeprofinterfaces.inl"
+
+#ifdef GC_PROFILING
+#include "eetoprofinterfaceimpl.h"
+#include "eetoprofinterfaceimpl.inl"
+#include "profilepriv.h"
+#endif
+
+#ifdef DEBUGGING_SUPPORTED
+#include "dbginterface.h"
+#endif
+
+#ifdef FEATURE_COMINTEROP
+#include "runtimecallablewrapper.h"
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+
+#ifdef FEATURE_UEF_CHAINMANAGER
+// This is required to register our UEF callback with the UEF chain manager
+#include <mscoruefwrapper.h>
+#endif // FEATURE_UEF_CHAINMANAGER
+
+
+struct ScanContext;
+class CrawlFrame;
+
+typedef void promote_func(PTR_PTR_Object, ScanContext*, DWORD);
+
+typedef struct
+{
+ promote_func* f;
+ ScanContext* sc;
+ CrawlFrame * cf;
+} GCCONTEXT;
+
+
+class GCToEEInterface
+{
+public:
+ //
+ // Suspend/Resume callbacks
+ //
+ typedef enum
+ {
+ SUSPEND_FOR_GC = 1,
+ SUSPEND_FOR_GC_PREP = 6
+ } SUSPEND_REASON;
+
+ static void SuspendEE(SUSPEND_REASON reason);
+ static void RestartEE(BOOL bFinishedGC); //resume threads.
+
+ //
+ // The GC roots enumeration callback
+ //
+ static void ScanStackRoots(Thread * pThread, promote_func* fn, ScanContext* sc);
+
+ // Optional static GC refs scanning for better parallelization of server GC marking
+ static void ScanStaticGCRefsOpportunistically(promote_func* fn, ScanContext* sc);
+
+ //
+ // Callbacks issues during GC that the execution engine can do its own bookeeping
+ //
+
+ // start of GC call back - single threaded
+ static void GcStartWork(int condemned, int max_gen);
+
+ //EE can perform post stack scanning action, while the
+ // user threads are still suspended
+ static void AfterGcScanRoots(int condemned, int max_gen, ScanContext* sc);
+
+ // Called before BGC starts sweeping, the heap is walkable
+ static void GcBeforeBGCSweepWork();
+
+ // post-gc callback.
+ static void GcDone(int condemned);
+
+ // Sync block cache management
+ static void SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, LPARAM lp1, LPARAM lp2);
+ static void SyncBlockCacheDemote(int max_gen);
+ static void SyncBlockCachePromotionsGranted(int max_gen);
+};
+
+#endif // GCENV_H_ \ No newline at end of file
diff --git a/src/vm/gchelpers.cpp b/src/vm/gchelpers.cpp
new file mode 100644
index 0000000000..d7a1cacefe
--- /dev/null
+++ b/src/vm/gchelpers.cpp
@@ -0,0 +1,1334 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*
+ * GCHELPERS.CPP
+ *
+ * GC Allocation and Write Barrier Helpers
+ *
+
+ *
+ */
+
+#include "common.h"
+#include "object.h"
+#include "threads.h"
+#include "eetwain.h"
+#include "eeconfig.h"
+#include "gc.h"
+#include "corhost.h"
+#include "threads.h"
+#include "fieldmarshaler.h"
+#include "interoputil.h"
+#include "constrainedexecutionregion.h"
+#include "dynamicmethod.h"
+#include "stubhelpers.h"
+#include "eventtrace.h"
+
+#include "excep.h"
+
+#include "eeprofinterfaces.inl"
+
+#ifdef FEATURE_COMINTEROP
+#include "runtimecallablewrapper.h"
+#endif // FEATURE_COMINTEROP
+
+#include "rcwwalker.h"
+
+//========================================================================
+//
+// ALLOCATION HELPERS
+//
+//========================================================================
+
+#define ProfileTrackArrayAlloc(orObject) \
+ OBJECTREF objref = ObjectToOBJECTREF((Object*)orObject);\
+ GCPROTECT_BEGIN(objref);\
+ ProfilerObjectAllocatedCallback(objref, (ClassID) orObject->GetTypeHandle().AsPtr());\
+ GCPROTECT_END();\
+ orObject = (ArrayBase *) OBJECTREFToObject(objref);
+
+
+inline alloc_context* GetThreadAllocContext()
+{
+ WRAPPER_NO_CONTRACT;
+
+ assert(GCHeap::UseAllocationContexts());
+
+ return & GetThread()->m_alloc_context;
+}
+
+
+// There are only three ways to get into allocate an object.
+// * Call optimized helpers that were generated on the fly. This is how JIT compiled code does most
+// allocations, however they fall back code:Alloc, when for all but the most common code paths. These
+// helpers are NOT used if profiler has asked to track GC allocation (see code:TrackAllocations)
+// * Call code:Alloc - When the jit helpers fall back, or we do allocations within the runtime code
+// itself, we ultimately call here.
+// * Call code:AllocLHeap - Used very rarely to force allocation to be on the large object heap.
+//
+// While this is a choke point into allocating an object, it is primitive (it does not want to know about
+// MethodTable and thus does not initialize that poitner. It also does not know if the object is finalizable
+// or contains pointers. Thus we quickly wrap this function in more user-friendly ones that know about
+// MethodTables etc. (see code:FastAllocatePrimitiveArray code:AllocateArrayEx code:AllocateObject)
+//
+// You can get an exhaustive list of code sites that allocate GC objects by finding all calls to
+// code:ProfilerObjectAllocatedCallback (since the profiler has to hook them all).
+inline Object* Alloc(size_t size, BOOL bFinalize, BOOL bContainsPointers )
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
+ } CONTRACTL_END;
+
+ _ASSERTE(!NingenEnabled() && "You cannot allocate managed objects inside the ngen compilation process.");
+
+#ifdef _DEBUG
+ if (g_pConfig->ShouldInjectFault(INJECTFAULT_GCHEAP))
+ {
+ char *a = new char;
+ delete a;
+ }
+#endif
+
+ DWORD flags = ((bContainsPointers ? GC_ALLOC_CONTAINS_REF : 0) |
+ (bFinalize ? GC_ALLOC_FINALIZE : 0));
+
+ Object *retVal = NULL;
+
+ // We don't want to throw an SO during the GC, so make sure we have plenty
+ // of stack before calling in.
+ INTERIOR_STACK_PROBE_FOR(GetThread(), static_cast<unsigned>(DEFAULT_ENTRY_PROBE_AMOUNT * 1.5));
+ if (GCHeap::UseAllocationContexts())
+ retVal = GCHeap::GetGCHeap()->Alloc(GetThreadAllocContext(), size, flags);
+ else
+ retVal = GCHeap::GetGCHeap()->Alloc(size, flags);
+ END_INTERIOR_STACK_PROBE;
+ return retVal;
+}
+
+#ifdef FEATURE_64BIT_ALIGNMENT
+// Helper for allocating 8-byte aligned objects (on platforms where this doesn't happen naturally, e.g. 32-bit
+// platforms).
+inline Object* AllocAlign8(size_t size, BOOL bFinalize, BOOL bContainsPointers, BOOL bAlignBias)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
+ } CONTRACTL_END;
+
+ DWORD flags = ((bContainsPointers ? GC_ALLOC_CONTAINS_REF : 0) |
+ (bFinalize ? GC_ALLOC_FINALIZE : 0) |
+ (bAlignBias ? GC_ALLOC_ALIGN8_BIAS : 0));
+
+ Object *retVal = NULL;
+
+ // We don't want to throw an SO during the GC, so make sure we have plenty
+ // of stack before calling in.
+ INTERIOR_STACK_PROBE_FOR(GetThread(), static_cast<unsigned>(DEFAULT_ENTRY_PROBE_AMOUNT * 1.5));
+ if (GCHeap::UseAllocationContexts())
+ retVal = GCHeap::GetGCHeap()->AllocAlign8(GetThreadAllocContext(), size, flags);
+ else
+ retVal = GCHeap::GetGCHeap()->AllocAlign8(size, flags);
+
+ END_INTERIOR_STACK_PROBE;
+ return retVal;
+}
+#endif // FEATURE_64BIT_ALIGNMENT
+
+// This is one of three ways of allocating an object (see code:Alloc for more). This variation is used in the
+// rare circumstance when you want to allocate an object on the large object heap but the object is not big
+// enough to naturally go there.
+//
+// One (and only?) example of where this is needed is 8 byte aligning of arrays of doubles. See
+// code:EEConfig.GetDoubleArrayToLargeObjectHeapThreshold and code:CORINFO_HELP_NEWARR_1_ALIGN8 for more.
+inline Object* AllocLHeap(size_t size, BOOL bFinalize, BOOL bContainsPointers )
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE; // returns an objref without pinning it => cooperative (don't assume large heap doesn't compact!)
+ } CONTRACTL_END;
+
+
+ _ASSERTE(!NingenEnabled() && "You cannot allocate managed objects inside the ngen compilation process.");
+
+#ifdef _DEBUG
+ if (g_pConfig->ShouldInjectFault(INJECTFAULT_GCHEAP))
+ {
+ char *a = new char;
+ delete a;
+ }
+#endif
+
+ DWORD flags = ((bContainsPointers ? GC_ALLOC_CONTAINS_REF : 0) |
+ (bFinalize ? GC_ALLOC_FINALIZE : 0));
+
+ Object *retVal = NULL;
+
+ // We don't want to throw an SO during the GC, so make sure we have plenty
+ // of stack before calling in.
+ INTERIOR_STACK_PROBE_FOR(GetThread(), static_cast<unsigned>(DEFAULT_ENTRY_PROBE_AMOUNT * 1.5));
+ retVal = GCHeap::GetGCHeap()->AllocLHeap(size, flags);
+ END_INTERIOR_STACK_PROBE;
+ return retVal;
+}
+
+
+#ifdef _LOGALLOC
+int g_iNumAllocs = 0;
+
+bool ToLogOrNotToLog(size_t size, const char *typeName)
+{
+ WRAPPER_NO_CONTRACT;
+
+ g_iNumAllocs++;
+
+ if (g_iNumAllocs > g_pConfig->AllocNumThreshold())
+ return true;
+
+ if (size > (size_t)g_pConfig->AllocSizeThreshold())
+ return true;
+
+ if (g_pConfig->ShouldLogAlloc(typeName))
+ return true;
+
+ return false;
+
+}
+
+// READ THIS!!!!!
+// this function is called on managed allocation path with unprotected Object*
+// as a result LogAlloc cannot call anything that would toggle the GC mode else
+// you'll introduce several GC holes!
+inline void LogAlloc(size_t size, MethodTable *pMT, Object* object)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+#ifdef LOGGING
+ if (LoggingOn(LF_GCALLOC, LL_INFO10))
+ {
+ LogSpewAlways("Allocated %5d bytes for %s_TYPE" FMT_ADDR FMT_CLASS "\n",
+ size,
+ pMT->IsValueType() ? "VAL" : "REF",
+ DBG_ADDR(object),
+ DBG_CLASS_NAME_MT(pMT));
+
+ if (LoggingOn(LF_GCALLOC, LL_INFO1000000) ||
+ (LoggingOn(LF_GCALLOC, LL_INFO100) &&
+ ToLogOrNotToLog(size, DBG_CLASS_NAME_MT(pMT))))
+ {
+ void LogStackTrace();
+ LogStackTrace();
+ }
+ }
+#endif
+}
+#else
+#define LogAlloc(size, pMT, object)
+#endif
+
+
+inline SIZE_T MaxArrayLength(SIZE_T componentSize)
+{
+ // Impose limits on maximum array length in each dimension to allow efficient
+ // implementation of advanced range check elimination in future. We have to allow
+ // higher limit for array of bytes (or one byte structs) for backward compatibility.
+ // Keep in sync with Array.MaxArrayLength in BCL.
+ return (componentSize == 1) ? 0X7FFFFFC7 : 0X7FEFFFFF;
+}
+
+OBJECTREF AllocateValueSzArray(TypeHandle elementType, INT32 length)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
+ } CONTRACTL_END;
+
+ return AllocateArrayEx(elementType.MakeSZArray(), &length, 1);
+}
+
+void ThrowOutOfMemoryDimensionsExceeded()
+{
+ CONTRACTL {
+ THROWS;
+ } CONTRACTL_END;
+
+#ifdef _WIN64
+ EX_THROW(EEMessageException, (kOutOfMemoryException, IDS_EE_ARRAY_DIMENSIONS_EXCEEDED));
+#else
+ ThrowOutOfMemory();
+#endif
+}
+
+//
+// Handles arrays of arbitrary dimensions
+//
+// If dwNumArgs is set to greater than 1 for a SZARRAY this function will recursively
+// allocate sub-arrays and fill them in.
+//
+// For arrays with lower bounds, pBounds is <lower bound 1>, <count 1>, <lower bound 2>, ...
+OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, BOOL bAllocateInLargeHeap
+ DEBUG_ARG(BOOL bDontSetAppDomain))
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
+ PRECONDITION(CheckPointer(pArgs));
+ PRECONDITION(dwNumArgs > 0);
+ } CONTRACTL_END;
+
+ ArrayBase * orArray = NULL;
+
+#ifdef _DEBUG
+ if (g_pConfig->ShouldInjectFault(INJECTFAULT_GCHEAP))
+ {
+ char *a = new char;
+ delete a;
+ }
+#endif
+
+ ArrayTypeDesc* arrayDesc = arrayType.AsArray();
+ MethodTable* pArrayMT = arrayDesc->GetMethodTable();
+ _ASSERTE(pArrayMT->CheckInstanceActivated());
+ PREFIX_ASSUME(pArrayMT != NULL);
+ CorElementType kind = arrayType.GetInternalCorElementType();
+ _ASSERTE(kind == ELEMENT_TYPE_ARRAY || kind == ELEMENT_TYPE_SZARRAY);
+
+ CorElementType elemType = arrayDesc->GetTypeParam().GetInternalCorElementType();
+ // Disallow the creation of void[,] (a multi-dim array of System.Void)
+ if (elemType == ELEMENT_TYPE_VOID)
+ COMPlusThrow(kArgumentException);
+
+ // Calculate the total number of elements in the array
+ UINT32 cElements;
+
+ // IBC Log MethodTable access
+ g_IBCLogger.LogMethodTableAccess(pArrayMT);
+ SetTypeHandleOnThreadForAlloc(arrayType);
+
+ SIZE_T componentSize = pArrayMT->GetComponentSize();
+ bool maxArrayDimensionLengthOverflow = false;
+ bool providedLowerBounds = false;
+
+ if (kind == ELEMENT_TYPE_ARRAY)
+ {
+ unsigned rank = arrayDesc->GetRank();
+ _ASSERTE(dwNumArgs == rank || dwNumArgs == 2*rank);
+
+ // Morph a ARRAY rank 1 with 0 lower bound into an SZARRAY
+ if (rank == 1 && (dwNumArgs == 1 || pArgs[0] == 0))
+ { // lower bound is zero
+
+ // This recursive call doesn't go any farther, because the dwNumArgs will be 1,
+ // so don't bother with stack probe.
+ TypeHandle szArrayType = ClassLoader::LoadArrayTypeThrowing(arrayDesc->GetArrayElementTypeHandle(), ELEMENT_TYPE_SZARRAY, 1);
+ return AllocateArrayEx(szArrayType, &pArgs[dwNumArgs - 1], 1, bAllocateInLargeHeap DEBUG_ARG(bDontSetAppDomain));
+ }
+
+ providedLowerBounds = (dwNumArgs == 2*rank);
+
+ S_UINT32 safeTotalElements = S_UINT32(1);
+
+ for (unsigned i = 0; i < dwNumArgs; i++)
+ {
+ int lowerBound = 0;
+ if (providedLowerBounds)
+ {
+ lowerBound = pArgs[i];
+ i++;
+ }
+ int length = pArgs[i];
+ if (length < 0)
+ COMPlusThrow(kOverflowException);
+ if ((SIZE_T)length > MaxArrayLength(componentSize))
+ maxArrayDimensionLengthOverflow = true;
+ if ((length > 0) && (lowerBound + (length - 1) < lowerBound))
+ COMPlusThrow(kArgumentOutOfRangeException, W("ArgumentOutOfRange_ArrayLBAndLength"));
+ safeTotalElements = safeTotalElements * S_UINT32(length);
+ if (safeTotalElements.IsOverflow())
+ ThrowOutOfMemoryDimensionsExceeded();
+ }
+
+ cElements = safeTotalElements.Value();
+ }
+ else
+ {
+ int length = pArgs[0];
+ if (length < 0)
+ COMPlusThrow(kOverflowException);
+ if ((SIZE_T)length > MaxArrayLength(componentSize))
+ maxArrayDimensionLengthOverflow = true;
+ cElements = length;
+ }
+
+ // Throw this exception only after everything else was validated for backward compatibility.
+ if (maxArrayDimensionLengthOverflow)
+ ThrowOutOfMemoryDimensionsExceeded();
+
+ // Allocate the space from the GC heap
+ S_SIZE_T safeTotalSize = S_SIZE_T(cElements) * S_SIZE_T(componentSize) + S_SIZE_T(pArrayMT->GetBaseSize());
+ if (safeTotalSize.IsOverflow())
+ ThrowOutOfMemoryDimensionsExceeded();
+
+ size_t totalSize = safeTotalSize.Value();
+
+#ifdef FEATURE_DOUBLE_ALIGNMENT_HINT
+ if ((elemType == ELEMENT_TYPE_R8) &&
+ (cElements >= g_pConfig->GetDoubleArrayToLargeObjectHeapThreshold()))
+ {
+ STRESS_LOG2(LF_GC, LL_INFO10, "Allocating double MD array of size %d and length %d to large object heap\n", totalSize, cElements);
+ bAllocateInLargeHeap = TRUE;
+ }
+#endif
+
+ if (bAllocateInLargeHeap)
+ {
+ orArray = (ArrayBase *) AllocLHeap(totalSize, FALSE, pArrayMT->ContainsPointers());
+ orArray->SetMethodTableForLargeObject(pArrayMT);
+ }
+ else
+ {
+#ifdef FEATURE_64BIT_ALIGNMENT
+ MethodTable *pElementMT = arrayDesc->GetTypeParam().GetMethodTable();
+ if (pElementMT->RequiresAlign8() && pElementMT->IsValueType())
+ {
+ // This platform requires that certain fields are 8-byte aligned (and the runtime doesn't provide
+ // this guarantee implicitly, e.g. on 32-bit platforms). Since it's the array payload, not the
+ // header that requires alignment we need to be careful. However it just so happens that all the
+ // cases we care about (single and multi-dim arrays of value types) have an even number of DWORDs
+ // in their headers so the alignment requirements for the header and the payload are the same.
+ _ASSERTE(((pArrayMT->GetBaseSize() - SIZEOF_OBJHEADER) & 7) == 0);
+ orArray = (ArrayBase *) AllocAlign8(totalSize, FALSE, pArrayMT->ContainsPointers(), FALSE);
+ }
+ else
+#endif
+ {
+ orArray = (ArrayBase *) Alloc(totalSize, FALSE, pArrayMT->ContainsPointers());
+ }
+ orArray->SetMethodTable(pArrayMT);
+ }
+
+ // Initialize Object
+ orArray->m_NumComponents = cElements;
+
+ if (bAllocateInLargeHeap ||
+ (totalSize >= LARGE_OBJECT_SIZE))
+ {
+ GCHeap::GetGCHeap()->PublishObject((BYTE*)orArray);
+ }
+
+#ifdef _LOGALLOC
+ LogAlloc(totalSize, pArrayMT, orArray);
+#endif // _LOGALLOC
+
+#ifdef _DEBUG
+ // Ensure the typehandle has been interned prior to allocation.
+ // This is important for OOM reliability.
+ OBJECTREF objref = ObjectToOBJECTREF((Object *) orArray);
+ GCPROTECT_BEGIN(objref);
+
+ orArray->GetTypeHandle();
+
+ GCPROTECT_END();
+ orArray = (ArrayBase *) OBJECTREFToObject(objref);
+#endif
+
+#if CHECK_APP_DOMAIN_LEAKS
+ if (!bDontSetAppDomain && g_pConfig->AppDomainLeaks())
+ orArray->SetAppDomain();
+#endif
+
+ if (kind == ELEMENT_TYPE_ARRAY)
+ {
+ INT32 *pCountsPtr = (INT32 *) orArray->GetBoundsPtr();
+ INT32 *pLowerBoundsPtr = (INT32 *) orArray->GetLowerBoundsPtr();
+ for (unsigned i = 0; i < dwNumArgs; i++)
+ {
+ if (providedLowerBounds)
+ *pLowerBoundsPtr++ = pArgs[i++]; // if not stated, lower bound becomes 0
+ *pCountsPtr++ = pArgs[i];
+ }
+ }
+
+ // Notify the profiler of the allocation
+ // do this after initializing bounds so callback has size information
+ if (TrackAllocations())
+ {
+ ProfileTrackArrayAlloc(orArray);
+ }
+
+#ifdef FEATURE_EVENT_TRACE
+ // Send ETW event for allocation
+ if(ETW::TypeSystemLog::IsHeapAllocEventEnabled())
+ {
+ ETW::TypeSystemLog::SendObjectAllocatedEvent(orArray);
+ }
+#endif // FEATURE_EVENT_TRACE
+
+ if (kind != ELEMENT_TYPE_ARRAY)
+ {
+ // Handle allocating multiple jagged array dimensions at once
+ if (dwNumArgs > 1)
+ {
+ PTRARRAYREF outerArray = (PTRARRAYREF) ObjectToOBJECTREF((Object *) orArray);
+ GCPROTECT_BEGIN(outerArray);
+
+ // Turn off GC stress, it is of little value here
+ {
+ GCStressPolicy::InhibitHolder iholder;
+
+ // Allocate dwProvidedBounds arrays
+ if (!arrayDesc->GetArrayElementTypeHandle().IsArray())
+ {
+ orArray = NULL;
+ }
+ else
+ {
+ // Since we're about to *really* recurse, probe for stack.
+ // @todo: is the default amount really correct?
+ _ASSERTE(GetThread());
+ INTERIOR_STACK_PROBE(GetThread());
+
+ TypeHandle subArrayType = arrayDesc->GetArrayElementTypeHandle();
+ for (UINT32 i = 0; i < cElements; i++)
+ {
+ OBJECTREF obj = AllocateArrayEx(subArrayType, &pArgs[1], dwNumArgs-1, bAllocateInLargeHeap DEBUG_ARG(bDontSetAppDomain));
+ outerArray->SetAt(i, obj);
+ }
+
+ iholder.Release();
+
+ END_INTERIOR_STACK_PROBE
+
+ orArray = (ArrayBase *) OBJECTREFToObject(outerArray);
+ }
+ } // GcStressPolicy::~InhibitHolder()
+
+ GCPROTECT_END();
+ }
+ }
+
+ return ObjectToOBJECTREF((Object *) orArray);
+}
+
+/*
+ * Allocates a single dimensional array of primitive types.
+ */
+OBJECTREF AllocatePrimitiveArray(CorElementType type, DWORD cElements, BOOL bAllocateInLargeHeap)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
+ }
+ CONTRACTL_END
+
+
+ // Allocating simple primite arrays is done in various places as internal storage.
+ // Because this is unlikely to result in any bad recursions, we will override the type limit
+ // here rather forever chase down all the callers.
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+
+ _ASSERTE(CorTypeInfo::IsPrimitiveType(type));
+
+ // Fetch the proper array type
+ if (g_pPredefinedArrayTypes[type] == NULL)
+ {
+ TypeHandle elemType = TypeHandle(MscorlibBinder::GetElementType(type));
+ TypeHandle typHnd = ClassLoader::LoadArrayTypeThrowing(elemType, ELEMENT_TYPE_SZARRAY, 0);
+ g_pPredefinedArrayTypes[type] = typHnd.AsArray();
+ }
+ return FastAllocatePrimitiveArray(g_pPredefinedArrayTypes[type]->GetMethodTable(), cElements, bAllocateInLargeHeap);
+}
+
+/*
+ * Allocates a single dimensional array of primitive types.
+ */
+
+OBJECTREF FastAllocatePrimitiveArray(MethodTable* pMT, DWORD cElements, BOOL bAllocateInLargeHeap)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
+ PRECONDITION(pMT->CheckInstanceActivated());
+ } CONTRACTL_END;
+
+#ifdef _DEBUG
+ if (g_pConfig->ShouldInjectFault(INJECTFAULT_GCHEAP))
+ {
+ char *a = new char;
+ delete a;
+ }
+#endif
+
+ _ASSERTE(pMT && pMT->IsArray());
+ _ASSERTE(pMT->IsRestored_NoLogging());
+ _ASSERTE(CorTypeInfo::IsPrimitiveType(pMT->GetArrayElementType()) &&
+ g_pPredefinedArrayTypes[pMT->GetArrayElementType()] != NULL);
+
+ g_IBCLogger.LogMethodTableAccess(pMT);
+ SetTypeHandleOnThreadForAlloc(TypeHandle(pMT));
+
+ SIZE_T componentSize = pMT->GetComponentSize();
+ if (cElements > MaxArrayLength(componentSize))
+ ThrowOutOfMemory();
+
+ S_SIZE_T safeTotalSize = S_SIZE_T(cElements) * S_SIZE_T(componentSize) + S_SIZE_T(pMT->GetBaseSize());
+ if (safeTotalSize.IsOverflow())
+ ThrowOutOfMemory();
+
+ size_t totalSize = safeTotalSize.Value();
+
+ BOOL bPublish = bAllocateInLargeHeap;
+
+ ArrayBase* orObject;
+ if (bAllocateInLargeHeap)
+ {
+ orObject = (ArrayBase*) AllocLHeap(totalSize, FALSE, FALSE);
+ }
+ else
+ {
+ ArrayTypeDesc *pArrayR8TypeDesc = g_pPredefinedArrayTypes[ELEMENT_TYPE_R8];
+ if (DATA_ALIGNMENT < sizeof(double) && pArrayR8TypeDesc != NULL && pMT == pArrayR8TypeDesc->GetMethodTable() && totalSize < LARGE_OBJECT_SIZE - MIN_OBJECT_SIZE)
+ {
+ // Creation of an array of doubles, not in the large object heap.
+ // We want to align the doubles to 8 byte boundaries, but the GC gives us pointers aligned
+ // to 4 bytes only (on 32 bit platforms). To align, we ask for 12 bytes more to fill with a
+ // dummy object.
+ // If the GC gives us a 8 byte aligned address, we use it for the array and place the dummy
+ // object after the array, otherwise we put the dummy object first, shifting the base of
+ // the array to an 8 byte aligned address.
+ // Note: on 64 bit platforms, the GC always returns 8 byte aligned addresses, and we don't
+ // execute this code because DATA_ALIGNMENT < sizeof(double) is false.
+
+ _ASSERTE(DATA_ALIGNMENT == sizeof(double)/2);
+ _ASSERTE((MIN_OBJECT_SIZE % sizeof(double)) == DATA_ALIGNMENT); // used to change alignment
+ _ASSERTE(pMT->GetComponentSize() == sizeof(double));
+ _ASSERTE(g_pObjectClass->GetBaseSize() == MIN_OBJECT_SIZE);
+ _ASSERTE(totalSize < totalSize + MIN_OBJECT_SIZE);
+ orObject = (ArrayBase*) Alloc(totalSize + MIN_OBJECT_SIZE, FALSE, FALSE);
+
+ Object *orDummyObject;
+ if((size_t)orObject % sizeof(double))
+ {
+ orDummyObject = orObject;
+ orObject = (ArrayBase*) ((size_t)orObject + MIN_OBJECT_SIZE);
+ }
+ else
+ {
+ orDummyObject = (Object*) ((size_t)orObject + totalSize);
+ }
+ _ASSERTE(((size_t)orObject % sizeof(double)) == 0);
+ orDummyObject->SetMethodTable(g_pObjectClass);
+ }
+ else
+ {
+ orObject = (ArrayBase*) Alloc(totalSize, FALSE, FALSE);
+ bPublish = (totalSize >= LARGE_OBJECT_SIZE);
+ }
+ }
+
+ // Initialize Object
+ orObject->SetMethodTable( pMT );
+ _ASSERTE(orObject->GetMethodTable() != NULL);
+ orObject->m_NumComponents = cElements;
+
+ if (bPublish)
+ {
+ GCHeap::GetGCHeap()->PublishObject((BYTE*)orObject);
+ }
+
+ // Notify the profiler of the allocation
+ if (TrackAllocations())
+ {
+ OBJECTREF objref = ObjectToOBJECTREF((Object*)orObject);
+ GCPROTECT_BEGIN(objref);
+ ProfilerObjectAllocatedCallback(objref, (ClassID) orObject->GetTypeHandle().AsPtr());
+ GCPROTECT_END();
+
+ orObject = (ArrayBase *) OBJECTREFToObject(objref);
+ }
+
+#ifdef FEATURE_EVENT_TRACE
+ // Send ETW event for allocation
+ if(ETW::TypeSystemLog::IsHeapAllocEventEnabled())
+ {
+ ETW::TypeSystemLog::SendObjectAllocatedEvent(orObject);
+ }
+#endif // FEATURE_EVENT_TRACE
+
+ // IBC Log MethodTable access
+ g_IBCLogger.LogMethodTableAccess(pMT);
+
+ LogAlloc(totalSize, pMT, orObject);
+
+#if CHECK_APP_DOMAIN_LEAKS
+ if (g_pConfig->AppDomainLeaks())
+ orObject->SetAppDomain();
+#endif
+
+ return( ObjectToOBJECTREF((Object*)orObject) );
+}
+
+//
+// Allocate an array which is the same size as pRef. However, do not zero out the array.
+//
+OBJECTREF DupArrayForCloning(BASEARRAYREF pRef, BOOL bAllocateInLargeHeap)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
+ } CONTRACTL_END;
+
+ ArrayTypeDesc arrayType(pRef->GetMethodTable(), pRef->GetArrayElementTypeHandle());
+ unsigned rank = arrayType.GetRank();
+
+ DWORD numArgs = rank*2;
+ INT32* args = (INT32*) _alloca(sizeof(INT32)*numArgs);
+
+ if (arrayType.GetInternalCorElementType() == ELEMENT_TYPE_ARRAY)
+ {
+ const INT32* bounds = pRef->GetBoundsPtr();
+ const INT32* lowerBounds = pRef->GetLowerBoundsPtr();
+ for(unsigned int i=0; i < rank; i++)
+ {
+ args[2*i] = lowerBounds[i];
+ args[2*i+1] = bounds[i];
+ }
+ }
+ else
+ {
+ numArgs = 1;
+ args[0] = pRef->GetNumComponents();
+ }
+ return AllocateArrayEx(TypeHandle(&arrayType), args, numArgs, bAllocateInLargeHeap DEBUG_ARG(FALSE));
+}
+
+#if defined(_TARGET_X86_)
+
+// The fast version always allocates in the normal heap
+OBJECTREF AllocatePrimitiveArray(CorElementType type, DWORD cElements)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
+ } CONTRACTL_END;
+
+#ifdef _DEBUG
+ // fastPrimitiveArrayAllocator is called by VM and managed code. If called from managed code, we
+ // make sure that the thread is in SOTolerantState.
+#ifdef FEATURE_STACK_PROBE
+ Thread::DisableSOCheckInHCALL disableSOCheckInHCALL;
+#endif // FEATURE_STACK_PROBE
+#endif // _DEBUG
+ return OBJECTREF( HCCALL2(fastPrimitiveArrayAllocator, type, cElements) );
+}
+
+// The fast version always allocates in the normal heap
+OBJECTREF AllocateObjectArray(DWORD cElements, TypeHandle ElementType)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
+ } CONTRACTL_END;
+
+
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+
+ // We must call this here to ensure the typehandle for this object is
+ // interned before the object is allocated. As soon as the object is allocated,
+ // the profiler could do a heapwalk and it expects to find an interned
+ // typehandle for every object in the heap.
+ TypeHandle ArrayType = ClassLoader::LoadArrayTypeThrowing(ElementType);
+
+#ifdef _DEBUG
+ // fastObjectArrayAllocator is called by VM and managed code. If called from managed code, we
+ // make sure that the thread is in SOTolerantState.
+#ifdef FEATURE_STACK_PROBE
+ Thread::DisableSOCheckInHCALL disableSOCheckInHCALL;
+#endif // FEATURE_STACK_PROBE
+#endif // _DEBUG
+ return OBJECTREF( HCCALL2(fastObjectArrayAllocator, ArrayType.AsPtr(), cElements));
+}
+
+STRINGREF AllocateString( DWORD cchStringLength )
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
+ } CONTRACTL_END;
+
+#ifdef _DEBUG
+ // fastStringAllocator is called by VM and managed code. If called from managed code, we
+ // make sure that the thread is in SOTolerantState.
+#ifdef FEATURE_STACK_PROBE
+ Thread::DisableSOCheckInHCALL disableSOCheckInHCALL;
+#endif // FEATURE_STACK_PROBE
+#endif // _DEBUG
+ return STRINGREF(HCCALL1(fastStringAllocator, cchStringLength));
+}
+
+#endif
+
+//
+// Helper for parts of the EE which are allocating arrays
+//
+OBJECTREF AllocateObjectArray(DWORD cElements, TypeHandle elementType, BOOL bAllocateInLargeHeap)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
+ } CONTRACTL_END;
+
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+
+ // The object array class is loaded at startup.
+ _ASSERTE(g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT] != NULL);
+
+#ifdef _DEBUG
+ ArrayTypeDesc arrayType(g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT]->GetMethodTable(), elementType);
+ _ASSERTE(arrayType.GetRank() == 1);
+ _ASSERTE(arrayType.GetInternalCorElementType() == ELEMENT_TYPE_SZARRAY);
+#endif //_DEBUG
+
+ return AllocateArrayEx(ClassLoader::LoadArrayTypeThrowing(elementType),
+ (INT32 *)(&cElements),
+ 1,
+ bAllocateInLargeHeap
+ DEBUG_ARG(FALSE));
+}
+
+
+STRINGREF SlowAllocateString( DWORD cchStringLength )
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
+ } CONTRACTL_END;
+
+ StringObject *orObject = NULL;
+
+#ifdef _DEBUG
+ if (g_pConfig->ShouldInjectFault(INJECTFAULT_GCHEAP))
+ {
+ char *a = new char;
+ delete a;
+ }
+#endif
+
+ // Limit the maximum string size to <2GB to mitigate risk of security issues caused by 32-bit integer
+ // overflows in buffer size calculations.
+ if (cchStringLength > 0x3FFFFFDF)
+ ThrowOutOfMemory();
+
+ SIZE_T ObjectSize = PtrAlign(StringObject::GetSize(cchStringLength));
+ _ASSERTE(ObjectSize > cchStringLength);
+
+ SetTypeHandleOnThreadForAlloc(TypeHandle(g_pStringClass));
+
+ orObject = (StringObject *)Alloc( ObjectSize, FALSE, FALSE );
+
+ // Object is zero-init already
+ _ASSERTE( orObject->HasEmptySyncBlockInfo() );
+
+ // Initialize Object
+ //<TODO>@TODO need to build a LARGE g_pStringMethodTable before</TODO>
+ orObject->SetMethodTable( g_pStringClass );
+ orObject->SetStringLength( cchStringLength );
+
+ if (ObjectSize >= LARGE_OBJECT_SIZE)
+ {
+ GCHeap::GetGCHeap()->PublishObject((BYTE*)orObject);
+ }
+
+ // Notify the profiler of the allocation
+ if (TrackAllocations())
+ {
+ OBJECTREF objref = ObjectToOBJECTREF((Object*)orObject);
+ GCPROTECT_BEGIN(objref);
+ ProfilerObjectAllocatedCallback(objref, (ClassID) orObject->GetTypeHandle().AsPtr());
+ GCPROTECT_END();
+
+ orObject = (StringObject *) OBJECTREFToObject(objref);
+ }
+
+#ifdef FEATURE_EVENT_TRACE
+ // Send ETW event for allocation
+ if(ETW::TypeSystemLog::IsHeapAllocEventEnabled())
+ {
+ ETW::TypeSystemLog::SendObjectAllocatedEvent(orObject);
+ }
+#endif // FEATURE_EVENT_TRACE
+
+ LogAlloc(ObjectSize, g_pStringClass, orObject);
+
+#if CHECK_APP_DOMAIN_LEAKS
+ if (g_pConfig->AppDomainLeaks())
+ orObject->SetAppDomain();
+#endif
+
+ return( ObjectToSTRINGREF(orObject) );
+}
+
+#ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+// OBJECTREF AllocateComClassObject(ComClassFactory* pComClsFac)
+void AllocateComClassObject(ComClassFactory* pComClsFac, OBJECTREF* ppRefClass)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE; // returns an objref (out param) without pinning it => cooperative
+ PRECONDITION(CheckPointer(pComClsFac));
+ PRECONDITION(CheckPointer(ppRefClass));
+ } CONTRACTL_END;
+
+ // Create a COM+ Class object.
+ MethodTable *pMT = g_pRuntimeTypeClass;
+ _ASSERTE(pMT != NULL);
+ *ppRefClass= AllocateObject(pMT);
+
+ if (*ppRefClass != NULL)
+ {
+ SyncBlock* pSyncBlock = (*((REFLECTCLASSBASEREF*) ppRefClass))->GetSyncBlock();
+
+ // <TODO> This needs to support a COM version of ReflectClass. Right now we
+ // still work as we used to <darylo> </TODO>
+ MethodTable* pComMT = g_pBaseCOMObject;
+ _ASSERTE(pComMT != NULL);
+
+ // class for ComObject
+ (*((REFLECTCLASSBASEREF*) ppRefClass))->SetType(TypeHandle(pComMT));
+
+ pSyncBlock->GetInteropInfo()->SetComClassFactory(pComClsFac);
+ }
+}
+#endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+
+// AllocateObject will throw OutOfMemoryException so don't need to check
+// for NULL return value from it.
+OBJECTREF AllocateObject(MethodTable *pMT
+#ifdef FEATURE_COMINTEROP
+ , bool fHandleCom
+#endif
+ )
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(pMT->CheckInstanceActivated());
+ } CONTRACTL_END;
+
+ Object *orObject = NULL;
+ // use unchecked oref here to avoid triggering assert in Validate that the AD is
+ // not set becuase it isn't until near the end of the fcn at which point we can allow
+ // the check.
+ _UNCHECKED_OBJECTREF oref;
+
+ g_IBCLogger.LogMethodTableAccess(pMT);
+ SetTypeHandleOnThreadForAlloc(TypeHandle(pMT));
+
+ if (pMT->HasCriticalFinalizer())
+ PrepareCriticalFinalizerObject(pMT);
+
+#ifdef FEATURE_COMINTEROP
+#ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+ if (fHandleCom && pMT->IsComObjectType() && !pMT->IsWinRTObjectType())
+ {
+ // Create a instance of __ComObject here is not allowed as we don't know what COM object to create
+ if (pMT == g_pBaseCOMObject)
+ COMPlusThrow(kInvalidComObjectException, IDS_EE_NO_BACKING_CLASS_FACTORY);
+
+ oref = OBJECTREF_TO_UNCHECKED_OBJECTREF(AllocateComObject_ForManaged(pMT));
+ }
+ else
+#endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+#endif // FEATURE_COMINTEROP
+ {
+ DWORD baseSize = pMT->GetBaseSize();
+
+#ifdef FEATURE_64BIT_ALIGNMENT
+ if (pMT->RequiresAlign8())
+ {
+ // The last argument to the allocation, indicates whether the alignment should be "biased". This
+ // means that the object is allocated so that its header lies exactly between two 8-byte
+ // boundaries. This is required in cases where we need to mis-align the header in order to align
+ // the actual payload. Currently this is false for classes (where we apply padding to ensure the
+ // first field is aligned relative to the header) and true for boxed value types (where we can't
+ // do the same padding without introducing more complexity in type layout and unboxing stubs).
+ _ASSERTE(sizeof(Object) == 4);
+ orObject = (Object *) AllocAlign8(baseSize,
+ pMT->HasFinalizer(),
+ pMT->ContainsPointers(),
+ pMT->IsValueType());
+ }
+ else
+#endif // FEATURE_64BIT_ALIGNMENT
+ {
+ orObject = (Object *) Alloc(baseSize,
+ pMT->HasFinalizer(),
+ pMT->ContainsPointers());
+ }
+
+ // verify zero'd memory (at least for sync block)
+ _ASSERTE( orObject->HasEmptySyncBlockInfo() );
+
+
+ if ((baseSize >= LARGE_OBJECT_SIZE))
+ {
+ orObject->SetMethodTableForLargeObject(pMT);
+ GCHeap::GetGCHeap()->PublishObject((BYTE*)orObject);
+ }
+ else
+ {
+ orObject->SetMethodTable(pMT);
+ }
+
+#if CHECK_APP_DOMAIN_LEAKS
+ if (g_pConfig->AppDomainLeaks())
+ orObject->SetAppDomain();
+ else
+#endif
+ if (pMT->HasFinalizer())
+ orObject->SetAppDomain();
+
+ // Notify the profiler of the allocation
+ if (TrackAllocations())
+ {
+ OBJECTREF objref = ObjectToOBJECTREF((Object*)orObject);
+ GCPROTECT_BEGIN(objref);
+ ProfilerObjectAllocatedCallback(objref, (ClassID) orObject->GetTypeHandle().AsPtr());
+ GCPROTECT_END();
+
+ orObject = (Object *) OBJECTREFToObject(objref);
+ }
+
+#ifdef FEATURE_EVENT_TRACE
+ // Send ETW event for allocation
+ if(ETW::TypeSystemLog::IsHeapAllocEventEnabled())
+ {
+ ETW::TypeSystemLog::SendObjectAllocatedEvent(orObject);
+ }
+#endif // FEATURE_EVENT_TRACE
+
+ LogAlloc(pMT->GetBaseSize(), pMT, orObject);
+
+ oref = OBJECTREF_TO_UNCHECKED_OBJECTREF(orObject);
+ }
+
+ return UNCHECKED_OBJECTREF_TO_OBJECTREF(oref);
+}
+
+//========================================================================
+//
+// WRITE BARRIER HELPERS
+//
+//========================================================================
+
+
+#if defined(_WIN64)
+// Card byte shift is different on 64bit.
+#define card_byte_shift 11
+#else
+#define card_byte_shift 10
+#endif
+
+#define card_byte(addr) (((size_t)(addr)) >> card_byte_shift)
+#define card_bit(addr) (1 << ((((size_t)(addr)) >> (card_byte_shift - 3)) & 7))
+
+
+#ifdef FEATURE_USE_ASM_GC_WRITE_BARRIERS
+
+// implemented in assembly
+// extern "C" HCIMPL2_RAW(VOID, JIT_CheckedWriteBarrier, Object **dst, Object *refUNSAFE)
+// extern "C" HCIMPL2_RAW(VOID, JIT_WriteBarrier, Object **dst, Object *refUNSAFE)
+
+#else // FEATURE_USE_ASM_GC_WRITE_BARRIERS
+
+// NOTE: non-ASM write barriers only work with Workstation GC.
+
+#ifdef FEATURE_COUNT_GC_WRITE_BARRIERS
+static UINT64 CheckedBarrierCount = 0;
+static UINT64 CheckedBarrierRetBufCount = 0;
+static UINT64 CheckedBarrierByrefArgCount = 0;
+static UINT64 CheckedBarrierByrefOtherLocalCount = 0;
+static UINT64 CheckedBarrierAddrOfLocalCount = 0;
+static UINT64 UncheckedBarrierCount = 0;
+static UINT64 CheckedAfterHeapFilter = 0;
+static UINT64 CheckedAfterRefInEphemFilter = 0;
+static UINT64 CheckedAfterAlreadyDirtyFilter = 0;
+static UINT64 CheckedDestInEphem = 0;
+static UINT64 UncheckedAfterRefInEphemFilter = 0;
+static UINT64 UncheckedAfterAlreadyDirtyFilter = 0;
+static UINT64 UncheckedDestInEphem = 0;
+
+const unsigned BarrierCountPrintInterval = 1000000;
+static unsigned CheckedBarrierInterval = BarrierCountPrintInterval;
+static unsigned UncheckedBarrierInterval = BarrierCountPrintInterval;
+
+
+void IncCheckedBarrierCount()
+{
+ ++CheckedBarrierCount;
+ if (--CheckedBarrierInterval == 0)
+ {
+ CheckedBarrierInterval = BarrierCountPrintInterval;
+ printf("GC write barrier counts: checked = %lld, unchecked = %lld, total = %lld.\n",
+ CheckedBarrierCount, UncheckedBarrierCount, (CheckedBarrierCount + UncheckedBarrierCount));
+ printf(" [Checked: %lld after heap check, %lld after ephem check, %lld after already dirty check.]\n",
+ CheckedAfterHeapFilter, CheckedAfterRefInEphemFilter, CheckedAfterAlreadyDirtyFilter);
+ printf(" [Unchecked: %lld after ephem check, %lld after already dirty check.]\n",
+ UncheckedAfterRefInEphemFilter, UncheckedAfterAlreadyDirtyFilter);
+ printf(" [Dest in ephem: checked = %lld, unchecked = %lld.]\n",
+ CheckedDestInEphem, UncheckedDestInEphem);
+ printf(" [Checked: %lld are stores to fields of ret buff, %lld via byref args,\n",
+ CheckedBarrierRetBufCount, CheckedBarrierByrefArgCount);
+ printf(" %lld via other locals, %lld via addr of local.]\n",
+ CheckedBarrierByrefOtherLocalCount, CheckedBarrierAddrOfLocalCount);
+ }
+}
+
+void IncUncheckedBarrierCount()
+{
+ ++UncheckedBarrierCount;
+ if (--UncheckedBarrierInterval == 0)
+ {
+ printf("GC write barrier counts: checked = %lld, unchecked = %lld, total = %lld.\n",
+ CheckedBarrierCount, UncheckedBarrierCount, (CheckedBarrierCount + UncheckedBarrierCount));
+ UncheckedBarrierInterval = BarrierCountPrintInterval;
+ }
+}
+#endif // FEATURE_COUNT_GC_WRITE_BARRIERS
+
+#ifdef FEATURE_COUNT_GC_WRITE_BARRIERS
+// (We ignore the advice below on using a _RAW macro for this performance diagnostic mode, which need not function properly in
+// all situations...)
+extern "C" HCIMPL3(VOID, JIT_CheckedWriteBarrier, Object **dst, Object *ref, CheckedWriteBarrierKinds kind)
+#else
+
+// This function is a JIT helper, but it must NOT use HCIMPL2 because it
+// modifies Thread state that will not be restored if an exception occurs
+// inside of memset. A normal EH unwind will not occur.
+extern "C" HCIMPL2_RAW(VOID, JIT_CheckedWriteBarrier, Object **dst, Object *ref)
+#endif
+{
+ // Must use static contract here, because if an AV occurs, a normal EH
+ // unwind will not occur, and destructors will not run.
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+#ifdef FEATURE_COUNT_GC_WRITE_BARRIERS
+ IncCheckedBarrierCount();
+ switch (kind)
+ {
+ case CWBKind_RetBuf:
+ CheckedBarrierRetBufCount++;
+ break;
+ case CWBKind_ByRefArg:
+ CheckedBarrierByrefArgCount++;
+ break;
+ case CWBKind_OtherByRefLocal:
+ CheckedBarrierByrefOtherLocalCount++;
+ break;
+ case CWBKind_AddrOfLocal:
+ CheckedBarrierAddrOfLocalCount++;
+ break;
+ case CWBKind_Unclassified:
+ break;
+ default:
+ // It should be some member of the enumeration.
+ _ASSERTE_ALL_BUILDS(__FILE__, false);
+ break;
+ }
+#endif // FEATURE_COUNT_GC_WRITE_BARRIERS
+
+ // no HELPER_METHOD_FRAME because we are MODE_COOPERATIVE, GC_NOTRIGGER
+
+ *dst = ref;
+
+ // if the dst is outside of the heap (unboxed value classes) then we
+ // simply exit
+ if (((BYTE*)dst < g_lowest_address) || ((BYTE*)dst >= g_highest_address))
+ return;
+
+#ifdef FEATURE_COUNT_GC_WRITE_BARRIERS
+ CheckedAfterHeapFilter++;
+#endif
+
+#ifdef WRITE_BARRIER_CHECK
+ updateGCShadow(dst, ref); // support debugging write barrier
+#endif
+
+#ifdef FEATURE_COUNT_GC_WRITE_BARRIERS
+ if((BYTE*) dst >= g_ephemeral_low && (BYTE*) dst < g_ephemeral_high)
+ {
+ CheckedDestInEphem++;
+ }
+#endif
+ if((BYTE*) ref >= g_ephemeral_low && (BYTE*) ref < g_ephemeral_high)
+ {
+#ifdef FEATURE_COUNT_GC_WRITE_BARRIERS
+ CheckedAfterRefInEphemFilter++;
+#endif
+ // VolatileLoadWithoutBarrier() is used here to prevent fetch of g_card_table from being reordered
+ // with g_lowest/highest_address check above. See comment in code:gc_heap::grow_brick_card_tables.
+ BYTE* pCardByte = (BYTE *)VolatileLoadWithoutBarrier(&g_card_table) + card_byte((BYTE *)dst);
+ if(*pCardByte != 0xFF)
+ {
+#ifdef FEATURE_COUNT_GC_WRITE_BARRIERS
+ CheckedAfterAlreadyDirtyFilter++;
+#endif
+ *pCardByte = 0xFF;
+ }
+ }
+}
+HCIMPLEND_RAW
+
+// This function is a JIT helper, but it must NOT use HCIMPL2 because it
+// modifies Thread state that will not be restored if an exception occurs
+// inside of memset. A normal EH unwind will not occur.
+extern "C" HCIMPL2_RAW(VOID, JIT_WriteBarrier, Object **dst, Object *ref)
+{
+ // Must use static contract here, because if an AV occurs, a normal EH
+ // unwind will not occur, and destructors will not run.
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+#ifdef FEATURE_COUNT_GC_WRITE_BARRIERS
+ IncUncheckedBarrierCount();
+#endif
+ // no HELPER_METHOD_FRAME because we are MODE_COOPERATIVE, GC_NOTRIGGER
+
+ *dst = ref;
+
+ // If the store above succeeded, "dst" should be in the heap.
+ assert(GCHeap::GetGCHeap()->IsHeapPointer((void*)dst));
+
+#ifdef WRITE_BARRIER_CHECK
+ updateGCShadow(dst, ref); // support debugging write barrier
+#endif
+
+#ifdef FEATURE_COUNT_GC_WRITE_BARRIERS
+ if((BYTE*) dst >= g_ephemeral_low && (BYTE*) dst < g_ephemeral_high)
+ {
+ UncheckedDestInEphem++;
+ }
+#endif
+ if((BYTE*) ref >= g_ephemeral_low && (BYTE*) ref < g_ephemeral_high)
+ {
+#ifdef FEATURE_COUNT_GC_WRITE_BARRIERS
+ UncheckedAfterRefInEphemFilter++;
+#endif
+ BYTE* pCardByte = (BYTE *)VolatileLoadWithoutBarrier(&g_card_table) + card_byte((BYTE *)dst);
+ if(*pCardByte != 0xFF)
+ {
+#ifdef FEATURE_COUNT_GC_WRITE_BARRIERS
+ UncheckedAfterAlreadyDirtyFilter++;
+#endif
+ *pCardByte = 0xFF;
+ }
+ }
+}
+HCIMPLEND_RAW
+
+#endif // FEATURE_USE_ASM_GC_WRITE_BARRIERS
+
+extern "C" HCIMPL2_RAW(VOID, JIT_WriteBarrierEnsureNonHeapTarget, Object **dst, Object *ref)
+{
+ // Must use static contract here, because if an AV occurs, a normal EH
+ // unwind will not occur, and destructors will not run.
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ assert(!GCHeap::GetGCHeap()->IsHeapPointer((void*)dst));
+
+ // no HELPER_METHOD_FRAME because we are MODE_COOPERATIVE, GC_NOTRIGGER
+
+ *dst = ref;
+}
+HCIMPLEND_RAW
+
+// This function sets the card table with the granularity of 1 byte, to avoid ghost updates
+// that could occur if multiple threads were trying to set different bits in the same card.
+
+#include <optsmallperfcritical.h>
+void ErectWriteBarrier(OBJECTREF *dst, OBJECTREF ref)
+{
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ // if the dst is outside of the heap (unboxed value classes) then we
+ // simply exit
+ if (((BYTE*)dst < g_lowest_address) || ((BYTE*)dst >= g_highest_address))
+ return;
+
+#ifdef WRITE_BARRIER_CHECK
+ updateGCShadow((Object**) dst, OBJECTREFToObject(ref)); // support debugging write barrier
+#endif
+
+ if((BYTE*) OBJECTREFToObject(ref) >= g_ephemeral_low && (BYTE*) OBJECTREFToObject(ref) < g_ephemeral_high)
+ {
+ // VolatileLoadWithoutBarrier() is used here to prevent fetch of g_card_table from being reordered
+ // with g_lowest/highest_address check above. See comment in code:gc_heap::grow_brick_card_tables.
+ BYTE* pCardByte = (BYTE *)VolatileLoadWithoutBarrier(&g_card_table) + card_byte((BYTE *)dst);
+ if(*pCardByte != 0xFF)
+ *pCardByte = 0xFF;
+ }
+}
+#include <optdefault.h>
+
+void ErectWriteBarrierForMT(MethodTable **dst, MethodTable *ref)
+{
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ *dst = ref;
+
+#ifdef _DEBUG
+ updateGCShadow((Object **)dst, (Object *)ref); // support debugging write barrier, updateGCShadow only cares that these are pointers
+#endif
+
+ if (ref->Collectible())
+ {
+ BYTE *refObject = *(BYTE **)((MethodTable*)ref)->GetLoaderAllocatorObjectHandle();
+ if((BYTE*) refObject >= g_ephemeral_low && (BYTE*) refObject < g_ephemeral_high)
+ {
+ // See comment above
+ BYTE* pCardByte = (BYTE *)VolatileLoadWithoutBarrier(&g_card_table) + card_byte((BYTE *)dst);
+ if( !((*pCardByte) & card_bit((BYTE *)dst)) )
+ {
+ *pCardByte = 0xFF;
+ }
+ }
+ }
+}
diff --git a/src/vm/gchelpers.h b/src/vm/gchelpers.h
new file mode 100644
index 0000000000..4a192972d3
--- /dev/null
+++ b/src/vm/gchelpers.h
@@ -0,0 +1,124 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*
+ * GCHELPERS.H
+ *
+ * GC Allocation and Write Barrier Helpers
+ *
+
+ *
+ */
+
+#ifndef _GCHELPERS_H_
+#define _GCHELPERS_H_
+
+//========================================================================
+//
+// ALLOCATION HELPERS
+//
+//========================================================================
+
+OBJECTREF AllocateValueSzArray(TypeHandle elementType, INT32 length);
+ // The main Array allocation routine, can do multi-dimensional
+OBJECTREF AllocateArrayEx(TypeHandle arrayClass, INT32 *pArgs, DWORD dwNumArgs, BOOL bAllocateInLargeHeap = FALSE
+ DEBUG_ARG(BOOL bDontSetAppDomain = FALSE));
+ // Optimized verion of above
+OBJECTREF FastAllocatePrimitiveArray(MethodTable* arrayType, DWORD cElements, BOOL bAllocateInLargeHeap = FALSE);
+
+
+#if defined(_TARGET_X86_)
+
+ // for x86, we generate efficient allocators for some special cases
+ // these are called via inline wrappers that call the generated allocators
+ // via function pointers.
+
+
+ // Create a SD array of primitive types
+typedef HCCALL2_PTR(Object*, FastPrimitiveArrayAllocatorFuncPtr, CorElementType type, DWORD cElements);
+
+extern FastPrimitiveArrayAllocatorFuncPtr fastPrimitiveArrayAllocator;
+
+ // The fast version always allocates in the normal heap
+OBJECTREF AllocatePrimitiveArray(CorElementType type, DWORD cElements);
+
+ // The slow version is distinguished via overloading by an additional parameter
+OBJECTREF AllocatePrimitiveArray(CorElementType type, DWORD cElements, BOOL bAllocateInLargeHeap);
+
+
+// Allocate SD array of object pointers. StubLinker-generated asm code might
+// implement this, so the element TypeHandle is passed as a PVOID to avoid any
+// struct calling convention weirdness.
+typedef HCCALL2_PTR(Object*, FastObjectArrayAllocatorFuncPtr, /*TypeHandle*/PVOID ArrayType, DWORD cElements);
+
+extern FastObjectArrayAllocatorFuncPtr fastObjectArrayAllocator;
+
+ // The fast version always allocates in the normal heap
+OBJECTREF AllocateObjectArray(DWORD cElements, TypeHandle ElementType);
+
+ // The slow version is distinguished via overloading by an additional parameter
+OBJECTREF AllocateObjectArray(DWORD cElements, TypeHandle ElementType, BOOL bAllocateInLargeHeap);
+
+
+ // Allocate string
+typedef HCCALL1_PTR(StringObject*, FastStringAllocatorFuncPtr, DWORD cchArrayLength);
+
+extern FastStringAllocatorFuncPtr fastStringAllocator;
+
+STRINGREF AllocateString( DWORD cchStringLength );
+
+ // The slow version, implemented in gcscan.cpp
+STRINGREF SlowAllocateString( DWORD cchStringLength );
+
+#else
+
+// On other platforms, go to the (somewhat less efficient) implementations in gcscan.cpp
+
+ // Create a SD array of primitive types
+OBJECTREF AllocatePrimitiveArray(CorElementType type, DWORD cElements, BOOL bAllocateInLargeHeap = FALSE);
+
+ // Allocate SD array of object pointers
+OBJECTREF AllocateObjectArray(DWORD cElements, TypeHandle ElementType, BOOL bAllocateInLargeHeap = FALSE);
+
+STRINGREF SlowAllocateString( DWORD cchStringLength );
+
+inline STRINGREF AllocateString( DWORD cchStringLength )
+{
+ WRAPPER_NO_CONTRACT;
+
+ return SlowAllocateString( cchStringLength );
+}
+
+#endif
+
+OBJECTREF DupArrayForCloning(BASEARRAYREF pRef, BOOL bAllocateInLargeHeap = FALSE);
+
+// The JIT requests the EE to specify an allocation helper to use at each new-site.
+// The EE makes this choice based on whether context boundaries may be involved,
+// whether the type is a COM object, whether it is a large object,
+// whether the object requires finalization.
+// These functions will throw OutOfMemoryException so don't need to check
+// for NULL return value from them.
+
+OBJECTREF AllocateObject(MethodTable *pMT
+#ifdef FEATURE_COMINTEROP
+ , bool fHandleCom = true
+#endif
+ );
+
+extern void StompWriteBarrierEphemeral();
+extern void StompWriteBarrierResize(BOOL bReqUpperBoundsCheck);
+
+extern void ThrowOutOfMemoryDimensionsExceeded();
+
+//========================================================================
+//
+// WRITE BARRIER HELPERS
+//
+//========================================================================
+
+void ErectWriteBarrier(OBJECTREF* dst, OBJECTREF ref);
+
+#endif // _GCHELPERS_H_
diff --git a/src/vm/gchost.cpp b/src/vm/gchost.cpp
new file mode 100644
index 0000000000..9676ecc9c2
--- /dev/null
+++ b/src/vm/gchost.cpp
@@ -0,0 +1,277 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// gchost.cpp
+//
+// This module contains the implementation for the IGCController interface.
+// This interface is published through the gchost.idl file. It allows a host
+// environment to set config values for the GC.
+//
+
+//
+//*****************************************************************************
+
+//********** Includes *********************************************************
+
+#include "common.h"
+#include "vars.hpp"
+#include "eeconfig.h"
+#include "perfcounters.h"
+#include "gchost.h"
+#include "corhost.h"
+#include "excep.h"
+#include "field.h"
+#include "gc.h"
+
+#if !defined(FEATURE_CORECLR)
+inline size_t SizeInKBytes(size_t cbSize)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ size_t cb = (cbSize % 1024) ? 1 : 0;
+ return ((cbSize / 1024) + cb);
+}
+
+// IGCController
+
+HRESULT CorGCHost::_SetGCSegmentSize(SIZE_T SegmentSize)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Sanity check the value, it must be a power of two and big enough.
+ if (!GCHeap::IsValidSegmentSize(SegmentSize))
+ {
+ hr = E_INVALIDARG;
+ }
+ else
+ {
+ Host_SegmentSize = SegmentSize;
+ Host_fSegmentSizeSet = TRUE;
+ }
+
+ return (hr);
+}
+
+HRESULT CorGCHost::_SetGCMaxGen0Size(SIZE_T MaxGen0Size)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Sanity check the value is at least large enough.
+ if (!GCHeap::IsValidGen0MaxSize(MaxGen0Size))
+ {
+ hr = E_INVALIDARG;
+ }
+ else
+ {
+ Host_MaxGen0Size = MaxGen0Size;
+ Host_fMaxGen0SizeSet = TRUE;
+ }
+
+ return (hr);
+}
+
+HRESULT CorGCHost::SetGCStartupLimits(
+ DWORD SegmentSize,
+ DWORD MaxGen0Size)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Set default overrides if specified by caller.
+ if (SegmentSize != (DWORD) ~0 && SegmentSize > 0)
+ {
+ hr = _SetGCSegmentSize(SegmentSize);
+ }
+
+ if (SUCCEEDED(hr) && MaxGen0Size != (DWORD) ~0 && MaxGen0Size > 0)
+ {
+ hr = _SetGCMaxGen0Size(MaxGen0Size);
+ }
+
+ return (hr);
+}
+
+HRESULT CorGCHost::SetGCStartupLimitsEx(
+ SIZE_T SegmentSize,
+ SIZE_T MaxGen0Size)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Set default overrides if specified by caller.
+ if (SegmentSize != (SIZE_T) ~0 && SegmentSize > 0)
+ {
+ hr = _SetGCSegmentSize(SegmentSize);
+ }
+
+ if (SUCCEEDED(hr) && MaxGen0Size != (SIZE_T) ~0 && MaxGen0Size > 0)
+ {
+ hr = _SetGCMaxGen0Size(MaxGen0Size);
+ }
+
+ return (hr);
+}
+
+// Collect the requested generation.
+HRESULT CorGCHost::Collect(
+ LONG Generation)
+{
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ HRESULT hr = E_FAIL;
+
+ if (Generation > (int) GCHeap::GetGCHeap()->GetMaxGeneration())
+ hr = E_INVALIDARG;
+ else
+ {
+ // Set up a Thread object if this is called on a native thread.
+ Thread *pThread;
+ pThread = GetThread();
+ if (pThread == NULL)
+ pThread = SetupThreadNoThrow(&hr);
+
+ if (pThread != NULL)
+ {
+ // Put thread into co-operative mode, which is how GC must run.
+ GCX_COOP();
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+
+ EX_TRY
+ {
+ hr = GCHeap::GetGCHeap()->GarbageCollect(Generation);
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ END_SO_INTOLERANT_CODE;
+ }
+ }
+ return (hr);
+}
+
+
+// Return GC counters in the gchost format.
+HRESULT CorGCHost::GetStats(
+ COR_GC_STATS *pStats)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#if defined(ENABLE_PERF_COUNTERS)
+
+ Perf_GC *pgc = &GetPerfCounters().m_GC;
+
+ if (!pStats)
+ return (E_INVALIDARG);
+
+ if (pStats->Flags & COR_GC_COUNTS)
+ {
+ pStats->ExplicitGCCount = pgc->cInducedGCs;
+ for (int idx=0; idx<3; idx++)
+ {
+ pStats->GenCollectionsTaken[idx] = pgc->cGenCollections[idx];
+ }
+ }
+
+ if (pStats->Flags & COR_GC_MEMORYUSAGE)
+ {
+ pStats->CommittedKBytes = SizeInKBytes(pgc->cTotalCommittedBytes);
+ pStats->ReservedKBytes = SizeInKBytes(pgc->cTotalReservedBytes);
+ pStats->Gen0HeapSizeKBytes = SizeInKBytes(pgc->cGenHeapSize[0]);
+ pStats->Gen1HeapSizeKBytes = SizeInKBytes(pgc->cGenHeapSize[1]);
+ pStats->Gen2HeapSizeKBytes = SizeInKBytes(pgc->cGenHeapSize[2]);
+ pStats->LargeObjectHeapSizeKBytes = SizeInKBytes(pgc->cLrgObjSize);
+ pStats->KBytesPromotedFromGen0 = SizeInKBytes(pgc->cbPromotedMem[0]);
+ pStats->KBytesPromotedFromGen1 = SizeInKBytes(pgc->cbPromotedMem[1]);
+ }
+ return (S_OK);
+#else
+ return (E_NOTIMPL);
+#endif // ENABLE_PERF_COUNTERS
+}
+
+// Return per-thread allocation information.
+HRESULT CorGCHost::GetThreadStats(
+ DWORD *pFiberCookie,
+ COR_GC_THREAD_STATS *pStats)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ Thread *pThread;
+
+ // Get the thread from the caller or the current thread.
+ if (!pFiberCookie)
+ pThread = GetThread();
+ else
+ pThread = (Thread *) pFiberCookie;
+ if (!pThread)
+ return (E_INVALIDARG);
+
+ return pThread->GetMemStats (pStats);
+}
+
+// Return per-thread allocation information.
+HRESULT CorGCHost::SetVirtualMemLimit(
+ SIZE_T sztMaxVirtualMemMB)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ GCHeap::GetGCHeap()->SetReservedVMLimit (sztMaxVirtualMemMB);
+ return (S_OK);
+}
+#endif // !defined(FEATURE_CORECLR)
+
+
diff --git a/src/vm/gcinfodecoder.cpp b/src/vm/gcinfodecoder.cpp
new file mode 100644
index 0000000000..a10ab93739
--- /dev/null
+++ b/src/vm/gcinfodecoder.cpp
@@ -0,0 +1,1853 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "common.h"
+#include "gcinfodecoder.h"
+
+
+#ifdef USE_GC_INFO_DECODER
+
+#ifndef CHECK_APP_DOMAIN
+#if CHECK_APP_DOMAIN_LEAKS
+#define CHECK_APP_DOMAIN GC_CALL_CHECK_APP_DOMAIN
+#else
+#define CHECK_APP_DOMAIN 0
+#endif
+#endif
+
+#ifndef GCINFODECODER_CONTRACT
+#define GCINFODECODER_CONTRACT(contract) contract
+#endif // !GCINFODECODER_CONTRACT
+
+
+#ifndef GET_CALLER_SP
+#define GET_CALLER_SP(pREGDISPLAY) EECodeManager::GetCallerSp(pREGDISPLAY)
+#endif // !GET_CALLER_SP
+
+#ifndef VALIDATE_OBJECTREF
+#if defined(DACCESS_COMPILE) || defined(CROSSGEN_COMPILE)
+#define VALIDATE_OBJECTREF(objref, fDeep)
+#else // DACCESS_COMPILE || CROSSGEN_COMPILE
+#define VALIDATE_OBJECTREF(objref, fDeep) OBJECTREF_TO_UNCHECKED_OBJECTREF(objref)->Validate(fDeep)
+#endif // DACCESS_COMPILE || CROSSGEN_COMPILE
+#endif // !VALIDATE_OBJECTREF
+
+#ifndef VALIDATE_ROOT
+#include "gcenv.h"
+#define VALIDATE_ROOT(isInterior, hCallBack, pObjRef) \
+ do { \
+ /* Only call Object::Validate() with bDeep == TRUE if we are in the promote phase. */ \
+ /* We should call Validate() with bDeep == FALSE if we are in the relocation phase. */ \
+ /* Actually with the introduction of the POPO feature, we cannot validate during */ \
+ /* relocate because POPO might have written over the object. It will require non */ \
+ /* trivial amount of work to make this work.*/ \
+ \
+ GCCONTEXT* pGCCtx = (GCCONTEXT*)(hCallBack); \
+ \
+ if (!(isInterior) && !(m_Flags & DECODE_NO_VALIDATION) && (pGCCtx->sc->promotion)) { \
+ VALIDATE_OBJECTREF(*(pObjRef), pGCCtx->sc->promotion == TRUE); \
+ } \
+ } while (0)
+#endif // !VALIDATE_ROOT
+
+
+bool GcInfoDecoder::SetIsInterruptibleCB (UINT32 startOffset, UINT32 stopOffset, LPVOID hCallback)
+{
+ GcInfoDecoder *pThis = (GcInfoDecoder*)hCallback;
+
+
+ bool fStop = pThis->m_InstructionOffset >= startOffset && pThis->m_InstructionOffset < stopOffset;
+
+ if (fStop)
+ pThis->m_IsInterruptible = true;
+
+ return fStop;
+}
+
+
+GcInfoDecoder::GcInfoDecoder(
+ PTR_CBYTE gcInfoAddr,
+ GcInfoDecoderFlags flags,
+ UINT32 breakOffset
+ )
+ : m_Reader( gcInfoAddr
+#ifdef VERIFY_GCINFO
+ + sizeof(size_t)
+#endif
+ )
+ , m_InstructionOffset(breakOffset)
+ , m_IsInterruptible(false)
+#ifdef _DEBUG
+ , m_Flags( flags )
+ , m_GcInfoAddress(gcInfoAddr)
+#endif
+#ifdef VERIFY_GCINFO
+ , m_DbgDecoder(gcInfoAddr+
+ (((UINT32)((PTR_BYTE)(TADDR)gcInfoAddr)[3])<<24)+
+ (((UINT32)((PTR_BYTE)(TADDR)gcInfoAddr)[2])<<16)+
+ (((UINT32)((PTR_BYTE)(TADDR)gcInfoAddr)[1])<<8)+
+ ((PTR_BYTE)(TADDR)gcInfoAddr)[0],
+ flags, breakOffset)
+#endif
+{
+ _ASSERTE( (flags & (DECODE_INTERRUPTIBILITY | DECODE_GC_LIFETIMES)) || (0 == breakOffset) );
+
+ // The current implementation doesn't support the two flags together
+ _ASSERTE(
+ ((flags & (DECODE_INTERRUPTIBILITY | DECODE_GC_LIFETIMES)) != (DECODE_INTERRUPTIBILITY | DECODE_GC_LIFETIMES))
+ );
+
+ //--------------------------------------------
+ // Pre-decode information
+ //--------------------------------------------
+
+ GcInfoHeaderFlags headerFlags;
+ bool slimHeader = (m_Reader.ReadOneFast() == 0);
+
+ if (slimHeader)
+ {
+ headerFlags = (GcInfoHeaderFlags)(m_Reader.ReadOneFast() ? GC_INFO_HAS_STACK_BASE_REGISTER : 0);
+ }
+ else
+ {
+ headerFlags = (GcInfoHeaderFlags) m_Reader.Read(GC_INFO_FLAGS_BIT_SIZE);
+ }
+
+ m_IsVarArg = headerFlags & GC_INFO_IS_VARARG;
+ int hasSecurityObject = headerFlags & GC_INFO_HAS_SECURITY_OBJECT;
+ int hasGSCookie = headerFlags & GC_INFO_HAS_GS_COOKIE;
+ int hasPSPSym = headerFlags & GC_INFO_HAS_PSP_SYM;
+ int hasGenericsInstContext = (headerFlags & GC_INFO_HAS_GENERICS_INST_CONTEXT_MASK) != GC_INFO_HAS_GENERICS_INST_CONTEXT_NONE;
+ m_GenericSecretParamIsMD = (headerFlags & GC_INFO_HAS_GENERICS_INST_CONTEXT_MASK) == GC_INFO_HAS_GENERICS_INST_CONTEXT_MD;
+ m_GenericSecretParamIsMT = (headerFlags & GC_INFO_HAS_GENERICS_INST_CONTEXT_MASK) == GC_INFO_HAS_GENERICS_INST_CONTEXT_MT;
+ int hasStackBaseRegister = headerFlags & GC_INFO_HAS_STACK_BASE_REGISTER;
+ m_WantsReportOnlyLeaf = ((headerFlags & GC_INFO_WANTS_REPORT_ONLY_LEAF) != 0);
+ int hasSizeOfEditAndContinuePreservedArea = headerFlags & GC_INFO_HAS_EDIT_AND_CONTINUE_PRESERVED_SLOTS;
+
+ m_CodeLength = (UINT32) DENORMALIZE_CODE_LENGTH((UINT32) m_Reader.DecodeVarLengthUnsigned(CODE_LENGTH_ENCBASE));
+
+ if (flags == DECODE_CODE_LENGTH)
+ {
+ // If we are only interested in the code length, then bail out now.
+ return;
+ }
+
+ if (hasGSCookie)
+ {
+ // Note that normalization as a code offset can be different than
+ // normalization as code legnth
+ UINT32 normCodeLength = NORMALIZE_CODE_OFFSET(m_CodeLength);
+
+ // Decode prolog/epilog information
+ UINT32 normPrologSize = (UINT32) m_Reader.DecodeVarLengthUnsigned(NORM_PROLOG_SIZE_ENCBASE) + 1;
+ UINT32 normEpilogSize = (UINT32) m_Reader.DecodeVarLengthUnsigned(NORM_EPILOG_SIZE_ENCBASE);
+
+ m_ValidRangeStart = (UINT32) DENORMALIZE_CODE_OFFSET(normPrologSize);
+ m_ValidRangeEnd = (UINT32) DENORMALIZE_CODE_OFFSET(normCodeLength - normEpilogSize);
+ _ASSERTE(m_ValidRangeStart < m_ValidRangeEnd);
+ }
+ else if (hasSecurityObject || hasGenericsInstContext)
+ {
+ // Decode prolog information
+ UINT32 normPrologSize = (UINT32) m_Reader.DecodeVarLengthUnsigned(NORM_PROLOG_SIZE_ENCBASE) + 1;
+ m_ValidRangeStart = (UINT32) DENORMALIZE_CODE_OFFSET(normPrologSize);
+ // satisfy asserts that assume m_GSCookieValidRangeStart != 0 ==> m_GSCookieValidRangeStart < m_GSCookieValidRangeEnd
+ m_ValidRangeEnd = m_ValidRangeStart + 1;
+ }
+ else
+ {
+ m_ValidRangeStart = m_ValidRangeEnd = 0;
+ }
+
+ if (flags == DECODE_PROLOG_LENGTH)
+ {
+ // if we are only interested in the prolog size, then bail out now
+ return;
+ }
+
+ // Decode the offset to the security object.
+ if(hasSecurityObject)
+ {
+ m_SecurityObjectStackSlot = (INT32) DENORMALIZE_STACK_SLOT(m_Reader.DecodeVarLengthSigned(SECURITY_OBJECT_STACK_SLOT_ENCBASE));
+ }
+ else
+ {
+ m_SecurityObjectStackSlot = NO_SECURITY_OBJECT;
+ }
+ if (flags == DECODE_SECURITY_OBJECT)
+ {
+ // If we are only interested in the security object, then bail out now.
+ return;
+ }
+
+ // Decode the offset to the GS cookie.
+ if(hasGSCookie)
+ {
+ m_GSCookieStackSlot = (INT32) DENORMALIZE_STACK_SLOT(m_Reader.DecodeVarLengthSigned(GS_COOKIE_STACK_SLOT_ENCBASE));
+ }
+ else
+ {
+ m_GSCookieStackSlot = NO_GS_COOKIE;
+ }
+ if (flags == DECODE_GS_COOKIE)
+ {
+ // If we are only interested in the GS cookie, then bail out now.
+ return;
+ }
+
+ // Decode the offset to the PSPSym.
+ // The PSPSym is relative to the caller SP on IA64 and the initial stack pointer before any stack allocation on X64 (InitialSP).
+ if(hasPSPSym)
+ {
+ m_PSPSymStackSlot = (INT32) DENORMALIZE_STACK_SLOT(m_Reader.DecodeVarLengthSigned(PSP_SYM_STACK_SLOT_ENCBASE));
+ }
+ else
+ {
+ m_PSPSymStackSlot = NO_PSP_SYM;
+ }
+ if (flags == DECODE_PSP_SYM)
+ {
+ // If we are only interested in the PSPSym, then bail out now.
+ return;
+ }
+
+ // Decode the offset to the generics type context.
+ if(hasGenericsInstContext)
+ {
+ m_GenericsInstContextStackSlot = (INT32) DENORMALIZE_STACK_SLOT(m_Reader.DecodeVarLengthSigned(GENERICS_INST_CONTEXT_STACK_SLOT_ENCBASE));
+ }
+ else
+ {
+ m_GenericsInstContextStackSlot = NO_GENERICS_INST_CONTEXT;
+ }
+ if (flags == DECODE_GENERICS_INST_CONTEXT)
+ {
+ // If we are only interested in the generics token, then bail out now.
+ return;
+ }
+
+ if(hasStackBaseRegister)
+ {
+ if (slimHeader)
+ {
+ m_StackBaseRegister = (UINT32) DENORMALIZE_STACK_BASE_REGISTER(0);
+ }
+ else
+ {
+ m_StackBaseRegister = (UINT32) DENORMALIZE_STACK_BASE_REGISTER(m_Reader.DecodeVarLengthUnsigned(STACK_BASE_REGISTER_ENCBASE));
+ }
+ }
+ else
+ {
+ m_StackBaseRegister = NO_STACK_BASE_REGISTER;
+ }
+
+ if (hasSizeOfEditAndContinuePreservedArea)
+ {
+ m_SizeOfEditAndContinuePreservedArea = (UINT32) m_Reader.DecodeVarLengthUnsigned(SIZE_OF_EDIT_AND_CONTINUE_PRESERVED_AREA_ENCBASE);
+ }
+ else
+ {
+ m_SizeOfEditAndContinuePreservedArea = NO_SIZE_OF_EDIT_AND_CONTINUE_PRESERVED_AREA;
+ }
+
+#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
+ if (slimHeader)
+ {
+ m_SizeOfStackOutgoingAndScratchArea = 0;
+ }
+ else
+ {
+ m_SizeOfStackOutgoingAndScratchArea = (UINT32)DENORMALIZE_SIZE_OF_STACK_AREA(m_Reader.DecodeVarLengthUnsigned(SIZE_OF_STACK_AREA_ENCBASE));
+ }
+#endif // FIXED_STACK_PARAMETER_SCRATCH_AREA
+
+#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
+ m_NumSafePoints = (UINT32) DENORMALIZE_NUM_SAFE_POINTS(m_Reader.DecodeVarLengthUnsigned(NUM_SAFE_POINTS_ENCBASE));
+#endif
+
+ if (slimHeader)
+ {
+ m_NumInterruptibleRanges = 0;
+ }
+ else
+ {
+ m_NumInterruptibleRanges = (UINT32) DENORMALIZE_NUM_INTERRUPTIBLE_RANGES(m_Reader.DecodeVarLengthUnsigned(NUM_INTERRUPTIBLE_RANGES_ENCBASE));
+ }
+
+#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
+ if(flags & (DECODE_INTERRUPTIBILITY | DECODE_GC_LIFETIMES))
+ {
+ if(m_NumSafePoints)
+ {
+ m_SafePointIndex = FindSafePoint(m_InstructionOffset);
+ }
+ else
+ {
+ m_SafePointIndex = 0;
+ }
+ }
+ else if(flags & DECODE_FOR_RANGES_CALLBACK)
+ {
+ // Note that normalization as a code offset can be different than
+ // normalization as code legnth
+ UINT32 normCodeLength = NORMALIZE_CODE_OFFSET(m_CodeLength);
+
+ UINT32 numBitsPerOffset = CeilOfLog2(normCodeLength);
+ m_Reader.Skip(m_NumSafePoints * numBitsPerOffset);
+ }
+#endif
+
+ if(!m_IsInterruptible && (flags & DECODE_INTERRUPTIBILITY))
+ {
+ EnumerateInterruptibleRanges(&SetIsInterruptibleCB, this);
+ }
+
+#ifdef VERIFY_GCINFO
+#if 0
+ if(flags & DECODE_INTERRUPTIBILITY)
+ _ASSERTE(IsInterruptible() == m_DbgDecoder.IsInterruptible());
+#endif
+ if(flags & DECODE_SECURITY_OBJECT)
+ _ASSERTE(GetSecurityObjectStackSlot() == m_DbgDecoder.GetSecurityObjectStackSlot());
+ if(flags & DECODE_GENERICS_INST_CONTEXT)
+ {
+ _ASSERTE(GetGenericsInstContextStackSlot() == m_DbgDecoder.GetGenericsInstContextStackSlot());
+ _ASSERTE(GetPSPSymStackSlot() == m_DbgDecoder.GetPSPSymStackSlot());
+ }
+ if(flags & DECODE_VARARG)
+ _ASSERTE(GetIsVarArg() == m_DbgDecoder.GetIsVarArg());
+ if(flags & DECODE_CODE_LENGTH)
+ _ASSERTE(GetCodeLength() == m_DbgDecoder.GetCodeLength());
+ _ASSERTE(GetStackBaseRegister() == m_DbgDecoder.GetStackBaseRegister());
+ _ASSERTE(GetSizeOfEditAndContinuePreservedArea() == m_DbgDecoder.GetSizeOfEditAndContinuePreservedArea());
+#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
+ _ASSERTE(GetSizeOfStackParameterArea() == m_DbgDecoder.GetSizeOfStackParameterArea());
+#endif
+#endif
+
+}
+
+bool GcInfoDecoder::IsInterruptible()
+{
+ _ASSERTE( m_Flags & DECODE_INTERRUPTIBILITY );
+ return m_IsInterruptible;
+}
+
+bool GcInfoDecoder::HasMethodDescGenericsInstContext()
+{
+ _ASSERTE( m_Flags & DECODE_GENERICS_INST_CONTEXT );
+ return m_GenericSecretParamIsMD;
+}
+
+bool GcInfoDecoder::HasMethodTableGenericsInstContext()
+{
+ _ASSERTE( m_Flags & DECODE_GENERICS_INST_CONTEXT );
+ return m_GenericSecretParamIsMT;
+}
+
+#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
+
+// This is used for gccoverage: is the given offset
+// a call-return offset with partially-interruptible GC info?
+bool GcInfoDecoder::IsSafePoint(UINT32 codeOffset)
+{
+ _ASSERTE(m_Flags == 0 && m_InstructionOffset == 0);
+ if(m_NumSafePoints == 0)
+ return false;
+
+#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM_)
+ // Safepoints are encoded with a -1 adjustment
+ codeOffset--;
+#endif
+ size_t savedPos = m_Reader.GetCurrentPos();
+ UINT32 safePointIndex = FindSafePoint(codeOffset);
+ m_Reader.SetCurrentPos(savedPos);
+ return (bool) (safePointIndex != m_NumSafePoints);
+
+}
+
+UINT32 GcInfoDecoder::FindSafePoint(UINT32 breakOffset)
+{
+ if(m_NumSafePoints == 0)
+ return 0;
+
+ const size_t savedPos = m_Reader.GetCurrentPos();
+ const UINT32 numBitsPerOffset = CeilOfLog2(NORMALIZE_CODE_OFFSET(m_CodeLength));
+ UINT32 result = m_NumSafePoints;
+
+#if defined(_TARGET_ARM_)
+ // Safepoints are encoded with a -1 adjustment
+ // but normalizing them masks off the low order bit
+ // Thus only bother looking if the address is odd
+ if ((breakOffset & 1) != 0)
+#endif
+ {
+ const UINT32 normBreakOffset = NORMALIZE_CODE_OFFSET(breakOffset);
+
+ INT32 low = 0;
+ INT32 high = (INT32)m_NumSafePoints;
+
+ while(low < high)
+ {
+ const INT32 mid = (low+high)/2;
+ _ASSERTE(mid >= 0 && mid < (INT32)m_NumSafePoints);
+ m_Reader.SetCurrentPos(savedPos + (UINT32)mid * numBitsPerOffset);
+ UINT32 normOffset = (UINT32)m_Reader.Read(numBitsPerOffset);
+ if(normOffset == normBreakOffset)
+ {
+ result = (UINT32) mid;
+ break;
+ }
+
+ if(normOffset < normBreakOffset)
+ low = mid+1;
+ else
+ high = mid;
+ }
+ }
+
+ m_Reader.SetCurrentPos(savedPos + m_NumSafePoints * numBitsPerOffset);
+ return result;
+}
+
+void GcInfoDecoder::EnumerateSafePoints(EnumerateSafePointsCallback *pCallback, LPVOID hCallback)
+{
+ if(m_NumSafePoints == 0)
+ return;
+
+ const UINT32 numBitsPerOffset = CeilOfLog2(NORMALIZE_CODE_OFFSET(m_CodeLength));
+
+ for(UINT i = 0; i < m_NumSafePoints; i++)
+ {
+ UINT32 normOffset = (UINT32)m_Reader.Read(numBitsPerOffset);
+ UINT32 offset = DENORMALIZE_CODE_OFFSET(normOffset) + 2;
+ pCallback(offset, hCallback);
+ }
+}
+#endif
+
+void GcInfoDecoder::EnumerateInterruptibleRanges (
+ EnumerateInterruptibleRangesCallback *pCallback,
+ LPVOID hCallback)
+{
+ // If no info is found for the call site, we default to fully-interruptbile
+ LOG((LF_GCROOTS, LL_INFO1000000, "No GC info found for call site at offset %x. Defaulting to fully-interruptible information.\n", (int) m_InstructionOffset));
+
+ UINT32 lastInterruptibleRangeStopOffsetNormalized = 0;
+
+ for(UINT32 i=0; i<m_NumInterruptibleRanges; i++)
+ {
+ UINT32 normStartDelta = (UINT32) m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA1_ENCBASE );
+ UINT32 normStopDelta = (UINT32) m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA2_ENCBASE ) + 1;
+
+ UINT32 rangeStartOffsetNormalized = lastInterruptibleRangeStopOffsetNormalized + normStartDelta;
+ UINT32 rangeStopOffsetNormalized = rangeStartOffsetNormalized + normStopDelta;
+
+ UINT32 rangeStartOffset = DENORMALIZE_CODE_OFFSET(rangeStartOffsetNormalized);
+ UINT32 rangeStopOffset = DENORMALIZE_CODE_OFFSET(rangeStopOffsetNormalized);
+
+ bool fStop = pCallback(rangeStartOffset, rangeStopOffset, hCallback);
+ if (fStop)
+ return;
+
+ lastInterruptibleRangeStopOffsetNormalized = rangeStopOffsetNormalized;
+ }
+}
+
+INT32 GcInfoDecoder::GetSecurityObjectStackSlot()
+{
+ _ASSERTE( m_Flags & DECODE_SECURITY_OBJECT );
+ return m_SecurityObjectStackSlot;
+}
+
+INT32 GcInfoDecoder::GetGSCookieStackSlot()
+{
+ _ASSERTE( m_Flags & DECODE_GS_COOKIE );
+ return m_GSCookieStackSlot;
+}
+
+UINT32 GcInfoDecoder::GetGSCookieValidRangeStart()
+{
+ _ASSERTE( m_Flags & DECODE_GS_COOKIE );
+ return m_ValidRangeStart;
+}
+UINT32 GcInfoDecoder::GetGSCookieValidRangeEnd()
+{
+ _ASSERTE( m_Flags & DECODE_GS_COOKIE );
+ return m_ValidRangeEnd;
+}
+
+UINT32 GcInfoDecoder::GetPrologSize()
+{
+ _ASSERTE( m_Flags & DECODE_PROLOG_LENGTH );
+
+ return m_ValidRangeStart;
+}
+
+INT32 GcInfoDecoder::GetGenericsInstContextStackSlot()
+{
+ _ASSERTE( m_Flags & DECODE_GENERICS_INST_CONTEXT );
+ return m_GenericsInstContextStackSlot;
+}
+
+INT32 GcInfoDecoder::GetPSPSymStackSlot()
+{
+ _ASSERTE( m_Flags & DECODE_PSP_SYM );
+ return m_PSPSymStackSlot;
+}
+
+bool GcInfoDecoder::GetIsVarArg()
+{
+ _ASSERTE( m_Flags & DECODE_VARARG );
+ return m_IsVarArg;
+}
+
+bool GcInfoDecoder::WantsReportOnlyLeaf()
+{
+ return m_WantsReportOnlyLeaf;
+}
+
+UINT32 GcInfoDecoder::GetCodeLength()
+{
+// SUPPORTS_DAC;
+ _ASSERTE( m_Flags & DECODE_CODE_LENGTH );
+ return m_CodeLength;
+}
+
+UINT32 GcInfoDecoder::GetStackBaseRegister()
+{
+ return m_StackBaseRegister;
+}
+
+UINT32 GcInfoDecoder::GetSizeOfEditAndContinuePreservedArea()
+{
+ _ASSERTE( m_Flags & DECODE_EDIT_AND_CONTINUE );
+ return m_SizeOfEditAndContinuePreservedArea;
+}
+
+size_t GcInfoDecoder::GetNumBytesRead()
+{
+ return (m_Reader.GetCurrentPos() + 7) / 8;
+}
+
+
+#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
+
+UINT32 GcInfoDecoder::GetSizeOfStackParameterArea()
+{
+ return m_SizeOfStackOutgoingAndScratchArea;
+}
+
+#endif // FIXED_STACK_PARAMETER_SCRATCH_AREA
+
+
+bool GcInfoDecoder::EnumerateLiveSlots(
+ PREGDISPLAY pRD,
+ bool reportScratchSlots,
+ unsigned inputFlags,
+ GCEnumCallback pCallBack,
+ LPVOID hCallBack
+ )
+{
+
+ unsigned executionAborted = (inputFlags & ExecutionAborted);
+
+ // In order to make ARM more x86-like we only ever report the leaf frame
+ // of any given function. We accomplish this by having the stackwalker
+ // pass a flag whenever walking the frame of a method where it has
+ // previously visited a child funclet
+ if (WantsReportOnlyLeaf() && (inputFlags & ParentOfFuncletStackFrame))
+ {
+ LOG((LF_GCROOTS, LL_INFO100000, "Not reporting this frame because it was already reported via another funclet.\n"));
+ return true;
+ }
+
+#ifdef VERIFY_GCINFO
+ m_DbgDecoder.EnumerateLiveSlots(
+ pRD,
+ reportScratchSlots,
+ inputFlags,
+ pCallBack,
+ hCallBack
+ );
+#endif
+
+ //
+ // If this is a non-leaf frame and we are executing a call, the unwinder has given us the PC
+ // of the call instruction. We should adjust it to the PC of the instruction after the call in order to
+ // obtain transition information for scratch slots. However, we always assume scratch slots to be
+ // dead for non-leaf frames (except for ResumableFrames), so we don't need to adjust the PC.
+ // If this is a non-leaf frame and we are not executing a call (i.e.: a fault occurred in the function),
+ // then it would be incorrect to adjust the PC
+ //
+
+ _ASSERTE(GC_SLOT_INTERIOR == GC_CALL_INTERIOR);
+ _ASSERTE(GC_SLOT_PINNED == GC_CALL_PINNED);
+
+ _ASSERTE( m_Flags & DECODE_GC_LIFETIMES );
+
+ GcSlotDecoder slotDecoder;
+
+ UINT32 normBreakOffset = NORMALIZE_CODE_OFFSET(m_InstructionOffset);
+
+#if 0
+ // This is currently disabled because sometimes on IA64 we need
+ // to make call sites non-interruptible
+ // TODO: review this
+#ifdef _DEBUG
+ if(!executionAborted)
+ {
+ GcInfoDecoder _decoder2(
+ m_GcInfoAddress,
+ DECODE_INTERRUPTIBILITY,
+ m_InstructionOffset
+ );
+
+ _ASSERTE(_decoder2.IsInterruptible());
+ }
+#endif
+#endif
+
+ // Normalized break offset
+ // Relative to interruptible ranges #if PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
+#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
+ UINT32 pseudoBreakOffset = 0;
+ UINT32 numInterruptibleLength = 0;
+#else
+ UINT32 pseudoBreakOffset = normBreakOffset;
+ UINT32 numInterruptibleLength = NORMALIZE_CODE_OFFSET(m_CodeLength);
+#endif
+
+
+#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
+#ifndef DISABLE_EH_VECTORS
+ if(m_SafePointIndex < m_NumSafePoints || executionAborted)
+ {
+ // Skip interruptibility information
+ for(UINT32 i=0; i<m_NumInterruptibleRanges; i++)
+ {
+ m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA1_ENCBASE );
+ m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA2_ENCBASE );
+ }
+ }
+ else
+ {
+ //
+ // We didn't find the break offset in the list of call sites
+ // and are not in an executionAborted frame
+ // So we must have fully-interruptible information
+ //
+ _ASSERTE(m_NumInterruptibleRanges);
+
+#ifdef _DEBUG
+ int dbgCountIntersections = 0;
+#endif
+ UINT32 lastNormStop = 0;
+ for(UINT32 i=0; i<m_NumInterruptibleRanges; i++)
+ {
+ UINT32 normStartDelta = (UINT32) m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA1_ENCBASE );
+ UINT32 normStopDelta = (UINT32) m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA2_ENCBASE ) + 1;
+
+ UINT32 normStart = lastNormStop + normStartDelta;
+ UINT32 normStop = normStart + normStopDelta;
+ if(normBreakOffset >= normStart && normBreakOffset < normStop)
+ {
+ _ASSERTE(pseudoBreakOffset == 0);
+ _ASSERTE(dbgCountIntersections++ == 0);
+ pseudoBreakOffset = numInterruptibleLength + normBreakOffset - normStart;
+ }
+ numInterruptibleLength += normStopDelta;
+ lastNormStop = normStop;
+ }
+ _ASSERTE(dbgCountIntersections == 1);
+ }
+#else // DISABLE_EH_VECTORS
+ if(m_SafePointIndex < m_NumSafePoints && !executionAborted)
+ {
+ // Skip interruptibility information
+ for(UINT32 i=0; i<m_NumInterruptibleRanges; i++)
+ {
+ m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA1_ENCBASE );
+ m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA2_ENCBASE );
+ }
+ }
+ else
+ {
+ //
+ // We didn't find the break offset in the list of call sites
+ // or we are in an executionAborted frame
+ // So either we have fully-interruptible information,
+ // or execution will not resume at the current method
+ // and nothing should be reported
+ //
+ if(!executionAborted)
+ {
+ _ASSERTE(m_NumInterruptibleRanges);
+ }
+
+ int countIntersections = 0;
+ UINT32 lastNormStop = 0;
+ for(UINT32 i=0; i<m_NumInterruptibleRanges; i++)
+ {
+ UINT32 normStartDelta = (UINT32) m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA1_ENCBASE );
+ UINT32 normStopDelta = (UINT32) m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA2_ENCBASE ) + 1;
+
+ UINT32 normStart = lastNormStop + normStartDelta;
+ UINT32 normStop = normStart + normStopDelta;
+ if(normBreakOffset >= normStart && normBreakOffset < normStop)
+ {
+ _ASSERTE(pseudoBreakOffset == 0);
+ countIntersections++;
+ pseudoBreakOffset = numInterruptibleLength + normBreakOffset - normStart;
+ }
+ numInterruptibleLength += normStopDelta;
+ lastNormStop = normStop;
+ }
+ _ASSERTE(countIntersections <= 1);
+ if(countIntersections == 0)
+ {
+ _ASSERTE(executionAborted);
+ LOG((LF_GCROOTS, LL_INFO100000, "Not reporting this frame because it is aborted and not fully interruptible.\n"));
+ goto ExitSuccess;
+ }
+ }
+#endif // DISABLE_EH_VECTORS
+#else // !PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
+
+ // Skip interruptibility information
+ for(UINT32 i=0; i<m_NumInterruptibleRanges; i++)
+ {
+ m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA1_ENCBASE );
+ m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA2_ENCBASE );
+ }
+#endif
+
+
+ //------------------------------------------------------------------------------
+ // Read the slot table
+ //------------------------------------------------------------------------------
+
+
+ slotDecoder.DecodeSlotTable(m_Reader);
+
+ {
+ UINT32 numSlots = slotDecoder.GetNumTracked();
+
+ if(!numSlots)
+ goto ReportUntracked;
+
+#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
+
+ UINT32 numBitsPerOffset = 0;
+ // Duplicate the encoder's heuristic to determine if we have indirect live
+ // slot table (similar to the chunk pointers)
+ if ((m_NumSafePoints > 0) && m_Reader.ReadOneFast())
+ {
+ numBitsPerOffset = (UINT32) m_Reader.DecodeVarLengthUnsigned(POINTER_SIZE_ENCBASE) + 1;
+ _ASSERTE(numBitsPerOffset != 0);
+ }
+
+ //------------------------------------------------------------------------------
+ // Try partially interruptible first
+ //------------------------------------------------------------------------------
+
+ if(executionAborted)
+ {
+#ifndef DISABLE_EH_VECTORS
+ m_Reader.Skip(m_NumSafePoints * numSlots);
+
+ UINT32 numClauses = (UINT32) m_Reader.DecodeVarLengthUnsigned(NUM_EH_CLAUSES_ENCBASE);
+
+ if(numClauses)
+ {
+ UINT32 numBitsPerOffset = CeilOfLog2(NORMALIZE_CODE_OFFSET(m_CodeLength));
+
+ for(UINT32 i = 0; i < numClauses; i++)
+ {
+ UINT32 startOffset = (UINT32) DENORMALIZE_CODE_OFFSET(m_Reader.Read(numBitsPerOffset));
+ UINT32 stopOffset = (UINT32) DENORMALIZE_CODE_OFFSET(m_Reader.Read(numBitsPerOffset) + 1);
+
+ if(m_InstructionOffset >= startOffset
+ && m_InstructionOffset < stopOffset)
+ {
+ for(UINT32 slotIndex = 0; slotIndex < numSlots; slotIndex++)
+ {
+ if(m_Reader.ReadOneFast())
+ {
+ ReportSlotToGC(
+ slotDecoder,
+ slotIndex,
+ pRD,
+ reportScratchSlots,
+ inputFlags,
+ pCallBack,
+ hCallBack
+ );
+ }
+ }
+ }
+ else
+ {
+ m_Reader.Skip(numSlots);
+ }
+ }
+ }
+ goto ReportUntracked;
+#else //DISABLE_EH_VECTORS
+
+ _ASSERTE(m_NumSafePoints == 0);
+ m_Reader.Skip(m_NumSafePoints * numSlots);
+
+#endif //DISABLE_EH_VECTORS
+ }
+ else if( m_SafePointIndex != m_NumSafePoints )
+ {
+ if (numBitsPerOffset)
+ {
+ const size_t offsetTablePos = m_Reader.GetCurrentPos();
+ m_Reader.Skip(m_SafePointIndex * numBitsPerOffset);
+ const size_t liveStatesOffset = m_Reader.Read(numBitsPerOffset);
+ const size_t liveStatesStart = ((offsetTablePos + m_NumSafePoints * numBitsPerOffset + 7) & (~7));
+ m_Reader.SetCurrentPos(liveStatesStart + liveStatesOffset);
+ if (m_Reader.ReadOneFast()) {
+ // RLE encoded
+ bool fSkip = (m_Reader.ReadOneFast() == 0);
+ bool fReport = true;
+ UINT32 readSlots = (UINT32)m_Reader.DecodeVarLengthUnsigned( fSkip ? LIVESTATE_RLE_SKIP_ENCBASE : LIVESTATE_RLE_RUN_ENCBASE );
+ fSkip = !fSkip;
+ while (readSlots < numSlots)
+ {
+ UINT32 cnt = (UINT32)m_Reader.DecodeVarLengthUnsigned( fSkip ? LIVESTATE_RLE_SKIP_ENCBASE : LIVESTATE_RLE_RUN_ENCBASE ) + 1;
+ if (fReport)
+ {
+ for(UINT32 slotIndex = readSlots; slotIndex < readSlots + cnt; slotIndex++)
+ {
+ ReportSlotToGC(slotDecoder,
+ slotIndex,
+ pRD,
+ reportScratchSlots,
+ inputFlags,
+ pCallBack,
+ hCallBack
+ );
+ }
+ }
+ readSlots += cnt;
+ fSkip = !fSkip;
+ fReport = !fReport;
+ }
+ _ASSERTE(readSlots == numSlots);
+ goto ReportUntracked;
+ }
+ // Just a normal live state (1 bit per slot), so use the normal decoding loop
+ }
+ else
+ {
+ m_Reader.Skip(m_SafePointIndex * numSlots);
+ }
+
+ for(UINT32 slotIndex = 0; slotIndex < numSlots; slotIndex++)
+ {
+ if(m_Reader.ReadOneFast())
+ {
+ ReportSlotToGC(
+ slotDecoder,
+ slotIndex,
+ pRD,
+ reportScratchSlots,
+ inputFlags,
+ pCallBack,
+ hCallBack
+ );
+ }
+ }
+ goto ReportUntracked;
+ }
+ else
+ {
+ m_Reader.Skip(m_NumSafePoints * numSlots);
+
+#ifndef DISABLE_EH_VECTORS
+ UINT32 numClauses = (UINT32) m_Reader.DecodeVarLengthUnsigned(NUM_EH_CLAUSES_ENCBASE);
+ UINT32 numBitsPerOffset = CeilOfLog2(NORMALIZE_CODE_OFFSET(m_CodeLength));
+
+ m_Reader.Skip((numBitsPerOffset * 2 + numSlots) * numClauses);
+#endif //DISABLE_EH_VECTORS
+ }
+
+#endif // PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
+
+ _ASSERTE(m_NumInterruptibleRanges);
+ _ASSERTE(numInterruptibleLength);
+
+ // If no info is found for the call site, we default to fully-interruptbile
+ LOG((LF_GCROOTS, LL_INFO1000000, "No GC info found for call site at offset %x. Defaulting to fully-interruptible information.\n", (int) m_InstructionOffset));
+
+ UINT32 numChunks = (numInterruptibleLength + NUM_NORM_CODE_OFFSETS_PER_CHUNK - 1) / NUM_NORM_CODE_OFFSETS_PER_CHUNK;
+ UINT32 breakChunk = pseudoBreakOffset / NUM_NORM_CODE_OFFSETS_PER_CHUNK;
+ _ASSERTE(breakChunk < numChunks);
+
+ UINT32 numBitsPerPointer = (UINT32) m_Reader.DecodeVarLengthUnsigned(POINTER_SIZE_ENCBASE);
+
+ if(!numBitsPerPointer)
+ goto ReportUntracked;
+
+ size_t pointerTablePos = m_Reader.GetCurrentPos();
+
+ size_t chunkPointer;
+ UINT32 chunk = breakChunk;
+ for(;;)
+ {
+ m_Reader.SetCurrentPos(pointerTablePos + chunk * numBitsPerPointer);
+ chunkPointer = m_Reader.Read(numBitsPerPointer);
+ if(chunkPointer)
+ break;
+
+ if(chunk-- == 0)
+ goto ReportUntracked;
+ }
+
+ size_t chunksStartPos = ((pointerTablePos + numChunks * numBitsPerPointer + 7) & (~7));
+ size_t chunkPos = chunksStartPos + chunkPointer - 1;
+ m_Reader.SetCurrentPos(chunkPos);
+
+ {
+ BitStreamReader couldBeLiveReader(m_Reader);
+
+ UINT32 numCouldBeLiveSlots = 0;
+ // A potentially compressed bit vector of which slots have any lifetimes
+ if (m_Reader.ReadOneFast())
+ {
+ // RLE encoded
+ bool fSkip = (m_Reader.ReadOneFast() == 0);
+ bool fReport = true;
+ UINT32 readSlots = (UINT32)m_Reader.DecodeVarLengthUnsigned( fSkip ? LIVESTATE_RLE_SKIP_ENCBASE : LIVESTATE_RLE_RUN_ENCBASE );
+ fSkip = !fSkip;
+ while (readSlots < numSlots)
+ {
+ UINT32 cnt = (UINT32)m_Reader.DecodeVarLengthUnsigned( fSkip ? LIVESTATE_RLE_SKIP_ENCBASE : LIVESTATE_RLE_RUN_ENCBASE ) + 1;
+ if (fReport)
+ {
+ numCouldBeLiveSlots += cnt;
+ }
+ readSlots += cnt;
+ fSkip = !fSkip;
+ fReport = !fReport;
+ }
+ _ASSERTE(readSlots == numSlots);
+
+ }
+ else
+ {
+ for(UINT32 i = 0; i < numSlots; i++)
+ {
+ if(m_Reader.ReadOneFast())
+ numCouldBeLiveSlots++;
+ }
+ }
+ _ASSERTE(numCouldBeLiveSlots > 0);
+
+ BitStreamReader finalStateReader(m_Reader);
+
+ m_Reader.Skip(numCouldBeLiveSlots);
+
+ int lifetimeTransitionsCount = 0;
+
+ UINT32 slotIndex = 0;
+ bool fSimple = (couldBeLiveReader.ReadOneFast() == 0);
+ bool fSkipFirst = false; // silence the warning
+ UINT32 cnt = 0;
+ if (!fSimple)
+ {
+ fSkipFirst = (couldBeLiveReader.ReadOneFast() == 0);
+ slotIndex = -1;
+ }
+ for(UINT32 i = 0; i < numCouldBeLiveSlots; i++)
+ {
+ if (fSimple)
+ {
+ while(!couldBeLiveReader.ReadOneFast())
+ slotIndex++;
+ }
+ else if (cnt > 0)
+ {
+ // We have more from the last run to report
+ cnt--;
+ }
+ // We need to find a new run
+ else if (fSkipFirst)
+ {
+ UINT32 tmp = (UINT32)couldBeLiveReader.DecodeVarLengthUnsigned( LIVESTATE_RLE_SKIP_ENCBASE ) + 1;
+ slotIndex += tmp;
+ cnt = (UINT32)couldBeLiveReader.DecodeVarLengthUnsigned( LIVESTATE_RLE_RUN_ENCBASE );
+ }
+ else
+ {
+ UINT32 tmp = (UINT32)couldBeLiveReader.DecodeVarLengthUnsigned( LIVESTATE_RLE_RUN_ENCBASE ) + 1;
+ slotIndex += tmp;
+ cnt = (UINT32)couldBeLiveReader.DecodeVarLengthUnsigned( LIVESTATE_RLE_SKIP_ENCBASE );
+ }
+
+ UINT32 isLive = (UINT32) finalStateReader.Read(1);
+
+ if(chunk == breakChunk)
+ {
+ // Read transitions
+ UINT32 normBreakOffsetDelta = pseudoBreakOffset % NUM_NORM_CODE_OFFSETS_PER_CHUNK;
+ for(;;)
+ {
+ if(!m_Reader.ReadOneFast())
+ break;
+
+ UINT32 transitionOffset = (UINT32) m_Reader.Read(NUM_NORM_CODE_OFFSETS_PER_CHUNK_LOG2);
+
+ lifetimeTransitionsCount++;
+ _ASSERTE(transitionOffset && transitionOffset < NUM_NORM_CODE_OFFSETS_PER_CHUNK);
+ if(transitionOffset > normBreakOffsetDelta)
+ {
+ isLive ^= 1;
+ }
+ }
+ }
+
+ if(isLive)
+ {
+ ReportSlotToGC(
+ slotDecoder,
+ slotIndex,
+ pRD,
+ reportScratchSlots,
+ inputFlags,
+ pCallBack,
+ hCallBack
+ );
+ }
+
+ slotIndex++;
+ }
+
+ LOG((LF_GCROOTS, LL_INFO1000000, "Decoded %d lifetime transitions.\n", (int) lifetimeTransitionsCount ));
+ }
+ }
+
+ReportUntracked:
+
+ //------------------------------------------------------------------------------
+ // Last report anything untracked
+ // But only for the leaf funclet/frame
+ // Turned on in the VM for regular GC reporting and the DAC for !CLRStack -gc
+ // But turned off in the #includes for nidump and sos's !u -gcinfo and !gcinfo
+ //------------------------------------------------------------------------------
+
+ if (slotDecoder.GetNumUntracked() && !(inputFlags & (ParentOfFuncletStackFrame | NoReportUntracked)))
+ {
+ ReportUntrackedSlots(slotDecoder, pRD, inputFlags, pCallBack, hCallBack);
+ }
+
+#ifdef DISABLE_EH_VECTORS
+ExitSuccess:
+#endif
+
+#ifdef VERIFY_GCINFO
+#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
+ if(!executionAborted)
+#endif
+ m_DbgDecoder.DoFinalVerification();
+#endif
+
+ return true;
+}
+
+void GcInfoDecoder::EnumerateUntrackedSlots(
+ PREGDISPLAY pRD,
+ unsigned inputFlags,
+ GCEnumCallback pCallBack,
+ LPVOID hCallBack
+ )
+{
+ _ASSERTE(GC_SLOT_INTERIOR == GC_CALL_INTERIOR);
+ _ASSERTE(GC_SLOT_PINNED == GC_CALL_PINNED);
+
+ _ASSERTE( m_Flags & DECODE_GC_LIFETIMES );
+
+ GcSlotDecoder slotDecoder;
+
+ // Skip interruptibility information
+ for(UINT32 i=0; i<m_NumInterruptibleRanges; i++)
+ {
+ m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA1_ENCBASE );
+ m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA2_ENCBASE );
+ }
+
+ //------------------------------------------------------------------------------
+ // Read the slot table
+ //------------------------------------------------------------------------------
+
+ slotDecoder.DecodeSlotTable(m_Reader);
+
+ if (slotDecoder.GetNumUntracked())
+ {
+ ReportUntrackedSlots(slotDecoder, pRD, inputFlags, pCallBack, hCallBack);
+ }
+}
+
+void GcInfoDecoder::ReportUntrackedSlots(
+ GcSlotDecoder& slotDecoder,
+ PREGDISPLAY pRD,
+ unsigned inputFlags,
+ GCEnumCallback pCallBack,
+ LPVOID hCallBack
+ )
+{
+ for(UINT32 slotIndex = slotDecoder.GetNumTracked(); slotIndex < slotDecoder.GetNumSlots(); slotIndex++)
+ {
+ ReportSlotToGC(slotDecoder,
+ slotIndex,
+ pRD,
+ true, // Report everything (although there should *never* be any scratch slots that are untracked)
+ inputFlags,
+ pCallBack,
+ hCallBack
+ );
+ }
+}
+
+void GcSlotDecoder::DecodeSlotTable(BitStreamReader& reader)
+{
+ if (reader.ReadOneFast())
+ {
+ m_NumRegisters = (UINT32) reader.DecodeVarLengthUnsigned(NUM_REGISTERS_ENCBASE);
+ }
+ else
+ {
+ m_NumRegisters = 0;
+ }
+ UINT32 numStackSlots;
+ if (reader.ReadOneFast())
+ {
+ numStackSlots = (UINT32) reader.DecodeVarLengthUnsigned(NUM_STACK_SLOTS_ENCBASE);
+ m_NumUntracked = (UINT32) reader.DecodeVarLengthUnsigned(NUM_UNTRACKED_SLOTS_ENCBASE);
+ }
+ else
+ {
+ numStackSlots = 0;
+ m_NumUntracked = 0;
+ }
+ m_NumSlots = m_NumRegisters + numStackSlots + m_NumUntracked;
+
+ UINT32 i = 0;
+
+ if(m_NumRegisters > 0)
+ {
+ // We certainly predecode the first register
+
+ _ASSERTE(i < MAX_PREDECODED_SLOTS);
+
+ UINT32 normRegNum = (UINT32) reader.DecodeVarLengthUnsigned(REGISTER_ENCBASE);
+ UINT32 regNum = DENORMALIZE_REGISTER(normRegNum);
+ GcSlotFlags flags = (GcSlotFlags) reader.Read(2);
+
+ m_SlotArray[0].Slot.RegisterNumber = regNum;
+ m_SlotArray[0].Flags = flags;
+
+ UINT32 loopEnd = min(m_NumRegisters, MAX_PREDECODED_SLOTS);
+ for(i++; i < loopEnd; i++)
+ {
+ if(flags)
+ {
+ normRegNum = (UINT32) reader.DecodeVarLengthUnsigned(REGISTER_ENCBASE);
+ regNum = DENORMALIZE_REGISTER(normRegNum);
+ flags = (GcSlotFlags) reader.Read(2);
+ }
+ else
+ {
+ UINT32 normRegDelta = (UINT32) reader.DecodeVarLengthUnsigned(REGISTER_DELTA_ENCBASE) + 1;
+ normRegNum += normRegDelta;
+ regNum = DENORMALIZE_REGISTER(normRegNum);
+ }
+
+ m_SlotArray[i].Slot.RegisterNumber = regNum;
+ m_SlotArray[i].Flags = flags;
+ }
+ }
+
+ if((numStackSlots > 0) && (i < MAX_PREDECODED_SLOTS))
+ {
+ // We have stack slots left and more room to predecode
+
+ GcStackSlotBase spBase = (GcStackSlotBase) reader.Read(2);
+ UINT32 normSpOffset = (INT32) reader.DecodeVarLengthSigned(STACK_SLOT_ENCBASE);
+ INT32 spOffset = DENORMALIZE_STACK_SLOT(normSpOffset);
+ GcSlotFlags flags = (GcSlotFlags) reader.Read(2);
+
+ m_SlotArray[i].Slot.Stack.SpOffset = spOffset;
+ m_SlotArray[i].Slot.Stack.Base = spBase;
+ m_SlotArray[i].Flags = flags;
+
+ UINT32 loopEnd = min(m_NumRegisters + numStackSlots, MAX_PREDECODED_SLOTS);
+ for(i++; i < loopEnd; i++)
+ {
+ spBase = (GcStackSlotBase) reader.Read(2);
+
+ if(flags)
+ {
+ normSpOffset = (INT32) reader.DecodeVarLengthSigned(STACK_SLOT_ENCBASE);
+ spOffset = DENORMALIZE_STACK_SLOT(normSpOffset);
+ flags = (GcSlotFlags) reader.Read(2);
+ }
+ else
+ {
+ INT32 normSpOffsetDelta = (INT32) reader.DecodeVarLengthUnsigned(STACK_SLOT_DELTA_ENCBASE);
+ normSpOffset += normSpOffsetDelta;
+ spOffset = DENORMALIZE_STACK_SLOT(normSpOffset);
+ }
+
+ m_SlotArray[i].Slot.Stack.SpOffset = spOffset;
+ m_SlotArray[i].Slot.Stack.Base = spBase;
+ m_SlotArray[i].Flags = flags;
+ }
+ }
+
+ if((m_NumUntracked > 0) && (i < MAX_PREDECODED_SLOTS))
+ {
+ // We have untracked stack slots left and more room to predecode
+
+ GcStackSlotBase spBase = (GcStackSlotBase) reader.Read(2);
+ UINT32 normSpOffset = (INT32) reader.DecodeVarLengthSigned(STACK_SLOT_ENCBASE);
+ INT32 spOffset = DENORMALIZE_STACK_SLOT(normSpOffset);
+ GcSlotFlags flags = (GcSlotFlags) reader.Read(2);
+
+ m_SlotArray[i].Slot.Stack.SpOffset = spOffset;
+ m_SlotArray[i].Slot.Stack.Base = spBase;
+ m_SlotArray[i].Flags = flags;
+
+ UINT32 loopEnd = min(m_NumSlots, MAX_PREDECODED_SLOTS);
+ for(i++; i < loopEnd; i++)
+ {
+ spBase = (GcStackSlotBase) reader.Read(2);
+
+ if(flags)
+ {
+ normSpOffset = (INT32) reader.DecodeVarLengthSigned(STACK_SLOT_ENCBASE);
+ spOffset = DENORMALIZE_STACK_SLOT(normSpOffset);
+ flags = (GcSlotFlags) reader.Read(2);
+ }
+ else
+ {
+ INT32 normSpOffsetDelta = (INT32) reader.DecodeVarLengthUnsigned(STACK_SLOT_DELTA_ENCBASE);
+ normSpOffset += normSpOffsetDelta;
+ spOffset = DENORMALIZE_STACK_SLOT(normSpOffset);
+ }
+
+ m_SlotArray[i].Slot.Stack.SpOffset = spOffset;
+ m_SlotArray[i].Slot.Stack.Base = spBase;
+ m_SlotArray[i].Flags = flags;
+ }
+ }
+
+ // Done pre-decoding
+
+ if(i < m_NumSlots)
+ {
+ // Prepare for lazy decoding
+
+ _ASSERTE(i == MAX_PREDECODED_SLOTS);
+ m_NumDecodedSlots = i;
+ m_pLastSlot = &m_SlotArray[MAX_PREDECODED_SLOTS - 1];
+
+ m_SlotReader = reader;
+
+ // Move the argument reader past the end of the table
+
+ GcSlotFlags flags = m_pLastSlot->Flags;
+
+ // Skip any remaining registers
+
+ for(; i < m_NumRegisters; i++)
+ {
+ if(flags)
+ {
+ reader.DecodeVarLengthUnsigned(REGISTER_ENCBASE);
+ flags = (GcSlotFlags) reader.Read(2);
+ }
+ else
+ {
+ reader.DecodeVarLengthUnsigned(REGISTER_DELTA_ENCBASE);
+ }
+ }
+
+ if(numStackSlots > 0)
+ {
+ if(i == m_NumRegisters)
+ {
+ // Skip the first stack slot
+
+ reader.Read(2);
+ reader.DecodeVarLengthSigned(STACK_SLOT_ENCBASE);
+ flags = (GcSlotFlags) reader.Read(2);
+ i++;
+ }
+
+ // Skip any remaining stack slots
+
+ const UINT32 loopEnd = m_NumRegisters + numStackSlots;
+ for(; i < loopEnd; i++)
+ {
+ reader.Read(2);
+
+ if(flags)
+ {
+ reader.DecodeVarLengthSigned(STACK_SLOT_ENCBASE);
+ flags = (GcSlotFlags) reader.Read(2);
+ }
+ else
+ {
+ reader.DecodeVarLengthUnsigned(STACK_SLOT_DELTA_ENCBASE);
+ }
+ }
+ }
+
+ if(m_NumUntracked > 0)
+ {
+ if(i == m_NumRegisters + numStackSlots)
+ {
+ // Skip the first untracked slot
+
+ reader.Read(2);
+ reader.DecodeVarLengthSigned(STACK_SLOT_ENCBASE);
+ flags = (GcSlotFlags) reader.Read(2);
+ i++;
+ }
+
+ // Skip any remaining untracked slots
+
+ for(; i < m_NumSlots; i++)
+ {
+ reader.Read(2);
+
+ if(flags)
+ {
+ reader.DecodeVarLengthSigned(STACK_SLOT_ENCBASE);
+ flags = (GcSlotFlags) reader.Read(2);
+ }
+ else
+ {
+ reader.DecodeVarLengthUnsigned(STACK_SLOT_DELTA_ENCBASE);
+ }
+ }
+ }
+ }
+}
+
+const GcSlotDesc* GcSlotDecoder::GetSlotDesc(UINT32 slotIndex)
+{
+ _ASSERTE(slotIndex < m_NumSlots);
+
+ if(slotIndex < MAX_PREDECODED_SLOTS)
+ {
+ return &m_SlotArray[slotIndex];
+ }
+
+ _ASSERTE(m_NumDecodedSlots >= MAX_PREDECODED_SLOTS && m_NumDecodedSlots < m_NumSlots);
+ _ASSERTE(m_NumDecodedSlots <= slotIndex);
+
+ while(m_NumDecodedSlots <= slotIndex)
+ {
+ if(m_NumDecodedSlots < m_NumRegisters)
+ {
+ //
+ // Decode a register
+ //
+
+ if(m_NumDecodedSlots == 0)
+ {
+ // Decode the first register
+ UINT32 normRegNum = (UINT32) m_SlotReader.DecodeVarLengthUnsigned(REGISTER_ENCBASE);
+ m_pLastSlot->Slot.RegisterNumber = DENORMALIZE_REGISTER(normRegNum);
+ m_pLastSlot->Flags = (GcSlotFlags) m_SlotReader.Read(2);
+ }
+ else
+ {
+ if(m_pLastSlot->Flags)
+ {
+ UINT32 normRegNum = (UINT32) m_SlotReader.DecodeVarLengthUnsigned(REGISTER_ENCBASE);
+ m_pLastSlot->Slot.RegisterNumber = DENORMALIZE_REGISTER(normRegNum);
+ m_pLastSlot->Flags = (GcSlotFlags) m_SlotReader.Read(2);
+ }
+ else
+ {
+ UINT32 normRegDelta = (UINT32) m_SlotReader.DecodeVarLengthUnsigned(REGISTER_DELTA_ENCBASE) + 1;
+ UINT32 normRegNum = normRegDelta + NORMALIZE_REGISTER(m_pLastSlot->Slot.RegisterNumber);
+ m_pLastSlot->Slot.RegisterNumber = DENORMALIZE_REGISTER(normRegNum);
+ }
+ }
+ }
+ else
+ {
+ //
+ // Decode a stack slot
+ //
+
+ if((m_NumDecodedSlots == m_NumRegisters) || (m_NumDecodedSlots == GetNumTracked()))
+ {
+ // Decode the first stack slot or first untracked slot
+ m_pLastSlot->Slot.Stack.Base = (GcStackSlotBase) m_SlotReader.Read(2);
+ UINT32 normSpOffset = (INT32) m_SlotReader.DecodeVarLengthSigned(STACK_SLOT_ENCBASE);
+ m_pLastSlot->Slot.Stack.SpOffset = DENORMALIZE_STACK_SLOT(normSpOffset);
+ m_pLastSlot->Flags = (GcSlotFlags) m_SlotReader.Read(2);
+ }
+ else
+ {
+ m_pLastSlot->Slot.Stack.Base = (GcStackSlotBase) m_SlotReader.Read(2);
+
+ if(m_pLastSlot->Flags)
+ {
+ INT32 normSpOffset = (INT32) m_SlotReader.DecodeVarLengthSigned(STACK_SLOT_ENCBASE);
+ m_pLastSlot->Slot.Stack.SpOffset = DENORMALIZE_STACK_SLOT(normSpOffset);
+ m_pLastSlot->Flags = (GcSlotFlags) m_SlotReader.Read(2);
+ }
+ else
+ {
+ INT32 normSpOffsetDelta = (INT32) m_SlotReader.DecodeVarLengthUnsigned(STACK_SLOT_DELTA_ENCBASE);
+ INT32 normSpOffset = normSpOffsetDelta + NORMALIZE_STACK_SLOT(m_pLastSlot->Slot.Stack.SpOffset);
+ m_pLastSlot->Slot.Stack.SpOffset = DENORMALIZE_STACK_SLOT(normSpOffset);
+ }
+ }
+ }
+
+ m_NumDecodedSlots++;
+ }
+
+ return m_pLastSlot;
+}
+
+
+//-----------------------------------------------------------------------------
+// Platform-specific methods
+//-----------------------------------------------------------------------------
+
+#if defined(_TARGET_AMD64_)
+
+
+OBJECTREF* GcInfoDecoder::GetRegisterSlot(
+ int regNum,
+ PREGDISPLAY pRD
+ )
+{
+ _ASSERTE(regNum >= 0 && regNum <= 16);
+ _ASSERTE(regNum != 4); // rsp
+
+ // The fields of KNONVOLATILE_CONTEXT_POINTERS are in the same order as
+ // the processor encoding numbers.
+
+ ULONGLONG **ppRax;
+ ppRax = &pRD->pCurrentContextPointers->Rax;
+
+ return (OBJECTREF*)*(ppRax + regNum);
+}
+
+
+bool GcInfoDecoder::IsScratchRegister(int regNum, PREGDISPLAY pRD)
+{
+ _ASSERTE(regNum >= 0 && regNum <= 16);
+ _ASSERTE(regNum != 4); // rsp
+
+ UINT16 PreservedRegMask =
+ (1 << 3) // rbx
+ | (1 << 5) // rbp
+ | (1 << 6) // rsi
+ | (1 << 7) // rdi
+ | (1 << 12) // r12
+ | (1 << 13) // r13
+ | (1 << 14) // r14
+ | (1 << 15); // r15
+
+ return !(PreservedRegMask & (1 << regNum));
+}
+
+
+bool GcInfoDecoder::IsScratchStackSlot(INT32 spOffset, GcStackSlotBase spBase, PREGDISPLAY pRD)
+{
+#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
+ _ASSERTE( m_Flags & DECODE_GC_LIFETIMES );
+
+ ULONGLONG pSlot = (ULONGLONG) GetStackSlot(spOffset, spBase, pRD);
+ _ASSERTE(pSlot >= pRD->SP);
+
+ return (pSlot < pRD->SP + m_SizeOfStackOutgoingAndScratchArea);
+#else
+ return FALSE;
+#endif
+}
+
+
+void GcInfoDecoder::ReportRegisterToGC( // AMD64
+ int regNum,
+ unsigned gcFlags,
+ PREGDISPLAY pRD,
+ unsigned flags,
+ GCEnumCallback pCallBack,
+ LPVOID hCallBack)
+{
+ GCINFODECODER_CONTRACT(CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END);
+
+ _ASSERTE(regNum >= 0 && regNum <= 16);
+ _ASSERTE(regNum != 4); // rsp
+
+ LOG((LF_GCROOTS, LL_INFO1000, "Reporting " FMT_REG, regNum ));
+
+ OBJECTREF* pObjRef = GetRegisterSlot( regNum, pRD );
+
+#ifdef _DEBUG
+ if(IsScratchRegister(regNum, pRD))
+ {
+ // Scratch registers cannot be reported for non-leaf frames
+ _ASSERTE(flags & ActiveStackFrame);
+ }
+
+ LOG((LF_GCROOTS, LL_INFO1000, /* Part Two */
+ "at" FMT_ADDR "as ", DBG_ADDR(pObjRef) ));
+
+ VALIDATE_ROOT((gcFlags & GC_CALL_INTERIOR), hCallBack, pObjRef);
+
+ LOG((LF_GCROOTS, LL_INFO1000, /* Part Three */
+ LOG_PIPTR_OBJECT_CLASS(OBJECTREF_TO_UNCHECKED_OBJECTREF(*pObjRef), (gcFlags & GC_CALL_PINNED), (gcFlags & GC_CALL_INTERIOR))));
+#endif //_DEBUG
+
+ gcFlags |= CHECK_APP_DOMAIN;
+
+ pCallBack(hCallBack, pObjRef, gcFlags DAC_ARG(DacSlotLocation(regNum, 0, false)));
+}
+
+#elif defined(_TARGET_ARM_)
+
+OBJECTREF* GcInfoDecoder::GetRegisterSlot(
+ int regNum,
+ PREGDISPLAY pRD
+ )
+{
+ _ASSERTE(regNum >= 0 && regNum <= 14);
+ _ASSERTE(regNum != 13); // sp
+
+ DWORD **ppReg;
+
+ if(regNum <= 3)
+ {
+ ppReg = &pRD->volatileCurrContextPointers.R0;
+ return (OBJECTREF*)*(ppReg + regNum);
+ }
+ else if(regNum == 12)
+ {
+ return (OBJECTREF*) pRD->volatileCurrContextPointers.R12;
+ }
+ else if(regNum == 14)
+ {
+ return (OBJECTREF*) pRD->pCurrentContextPointers->Lr;
+ }
+
+ ppReg = &pRD->pCurrentContextPointers->R4;
+
+ return (OBJECTREF*)*(ppReg + regNum-4);
+
+}
+
+
+bool GcInfoDecoder::IsScratchRegister(int regNum, PREGDISPLAY pRD)
+{
+ _ASSERTE(regNum >= 0 && regNum <= 14);
+ _ASSERTE(regNum != 13); // sp
+
+ return regNum <= 3 || regNum >= 12; // R12 and R14/LR are both scratch registers
+}
+
+
+bool GcInfoDecoder::IsScratchStackSlot(INT32 spOffset, GcStackSlotBase spBase, PREGDISPLAY pRD)
+{
+#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
+ _ASSERTE( m_Flags & DECODE_GC_LIFETIMES );
+
+ DWORD pSlot = (DWORD) GetStackSlot(spOffset, spBase, pRD);
+ _ASSERTE(pSlot >= pRD->SP);
+
+ return (pSlot < pRD->SP + m_SizeOfStackOutgoingAndScratchArea);
+#else
+ return FALSE;
+#endif
+}
+
+
+void GcInfoDecoder::ReportRegisterToGC( // ARM
+ int regNum,
+ unsigned gcFlags,
+ PREGDISPLAY pRD,
+ unsigned flags,
+ GCEnumCallback pCallBack,
+ LPVOID hCallBack)
+{
+ GCINFODECODER_CONTRACT(CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END);
+
+ _ASSERTE(regNum >= 0 && regNum <= 14);
+ _ASSERTE(regNum != 13); // sp
+
+ LOG((LF_GCROOTS, LL_INFO1000, "Reporting " FMT_REG, regNum ));
+
+ OBJECTREF* pObjRef = GetRegisterSlot( regNum, pRD );
+
+#ifdef _DEBUG
+ if(IsScratchRegister(regNum, pRD))
+ {
+ // Scratch registers cannot be reported for non-leaf frames
+ _ASSERTE(flags & ActiveStackFrame);
+ }
+
+ LOG((LF_GCROOTS, LL_INFO1000, /* Part Two */
+ "at" FMT_ADDR "as ", DBG_ADDR(pObjRef) ));
+
+ VALIDATE_ROOT((gcFlags & GC_CALL_INTERIOR), hCallBack, pObjRef);
+
+ LOG((LF_GCROOTS, LL_INFO1000, /* Part Three */
+ LOG_PIPTR_OBJECT_CLASS(OBJECTREF_TO_UNCHECKED_OBJECTREF(*pObjRef), (gcFlags & GC_CALL_PINNED), (gcFlags & GC_CALL_INTERIOR))));
+#endif //_DEBUG
+
+ gcFlags |= CHECK_APP_DOMAIN;
+
+ pCallBack(hCallBack, pObjRef, gcFlags DAC_ARG(DacSlotLocation(regNum, 0, false)));
+}
+
+#elif defined(_TARGET_ARM64_)
+
+OBJECTREF* GcInfoDecoder::GetRegisterSlot(
+ int regNum,
+ PREGDISPLAY pRD
+ )
+{
+ _ASSERTE(regNum >= 0 && regNum <= 30);
+ _ASSERTE(regNum != 18); // TEB
+
+ DWORD64 **ppReg;
+
+ if(regNum <= 17)
+ {
+ ppReg = &pRD->volatileCurrContextPointers.X0;
+ return (OBJECTREF*)*(ppReg + regNum);
+ }
+ else if(regNum == 29)
+ {
+ return (OBJECTREF*) pRD->pCurrentContextPointers->Fp;
+ }
+ else if(regNum == 30)
+ {
+ return (OBJECTREF*) pRD->pCurrentContextPointers->Lr;
+ }
+
+ ppReg = &pRD->pCurrentContextPointers->X19;
+
+ return (OBJECTREF*)*(ppReg + regNum-19);
+}
+
+bool GcInfoDecoder::IsScratchRegister(int regNum, PREGDISPLAY pRD)
+{
+ _ASSERTE(regNum >= 0 && regNum <= 30);
+ _ASSERTE(regNum != 18);
+
+ return regNum <= 17 || regNum >= 29; // R12 and R14/LR are both scratch registers
+}
+
+bool GcInfoDecoder::IsScratchStackSlot(INT32 spOffset, GcStackSlotBase spBase, PREGDISPLAY pRD)
+{
+#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
+ _ASSERTE( m_Flags & DECODE_GC_LIFETIMES );
+
+ ULONGLONG pSlot = (ULONGLONG) GetStackSlot(spOffset, spBase, pRD);
+ _ASSERTE(pSlot >= pRD->SP);
+
+ return (pSlot < pRD->SP + m_SizeOfStackOutgoingAndScratchArea);
+#else
+ return FALSE;
+#endif
+
+}
+
+void GcInfoDecoder::ReportRegisterToGC( // ARM64
+ int regNum,
+ unsigned gcFlags,
+ PREGDISPLAY pRD,
+ unsigned flags,
+ GCEnumCallback pCallBack,
+ LPVOID hCallBack)
+{
+ GCINFODECODER_CONTRACT(CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END);
+
+ _ASSERTE(regNum >= 0 && regNum <= 30);
+ _ASSERTE(regNum != 18);
+
+ LOG((LF_GCROOTS, LL_INFO1000, "Reporting " FMT_REG, regNum ));
+
+ OBJECTREF* pObjRef = GetRegisterSlot( regNum, pRD );
+
+#ifdef _DEBUG
+ if(IsScratchRegister(regNum, pRD))
+ {
+ // Scratch registers cannot be reported for non-leaf frames
+ _ASSERTE(flags & ActiveStackFrame);
+ }
+
+ LOG((LF_GCROOTS, LL_INFO1000, /* Part Two */
+ "at" FMT_ADDR "as ", DBG_ADDR(pObjRef) ));
+
+ VALIDATE_ROOT((gcFlags & GC_CALL_INTERIOR), hCallBack, pObjRef);
+
+ LOG((LF_GCROOTS, LL_INFO1000, /* Part Three */
+ LOG_PIPTR_OBJECT_CLASS(OBJECTREF_TO_UNCHECKED_OBJECTREF(*pObjRef), (gcFlags & GC_CALL_PINNED), (gcFlags & GC_CALL_INTERIOR))));
+#endif //_DEBUG
+
+ gcFlags |= CHECK_APP_DOMAIN;
+
+ pCallBack(hCallBack, pObjRef, gcFlags DAC_ARG(DacSlotLocation(regNum, 0, false)));
+}
+
+#else // Unknown platform
+
+OBJECTREF* GcInfoDecoder::GetRegisterSlot(
+ int regNum,
+ PREGDISPLAY pRD
+ )
+{
+ PORTABILITY_ASSERT("GcInfoDecoder::GetRegisterSlot");
+ return NULL;
+}
+
+bool GcInfoDecoder::IsScratchRegister(int regNum, PREGDISPLAY pRD)
+{
+ PORTABILITY_ASSERT("GcInfoDecoder::IsScratchRegister");
+ return false;
+}
+
+bool GcInfoDecoder::IsScratchStackSlot(INT32 spOffset, GcStackSlotBase spBase, PREGDISPLAY pRD)
+{
+ _ASSERTE( !"NYI" );
+ return false;
+}
+
+void GcInfoDecoder::ReportRegisterToGC(
+ int regNum,
+ unsigned gcFlags,
+ PREGDISPLAY pRD,
+ unsigned flags,
+ GCEnumCallback pCallBack,
+ LPVOID hCallBack)
+{
+ _ASSERTE( !"NYI" );
+}
+
+#endif // Unknown platform
+
+
+OBJECTREF* GcInfoDecoder::GetStackSlot(
+ INT32 spOffset,
+ GcStackSlotBase spBase,
+ PREGDISPLAY pRD
+ )
+{
+#ifdef CROSSGEN_COMPILE
+ _ASSERTE(!"GcInfoDecoder::GetStackSlot not supported in this build configuration");
+ return NULL;
+#else // CROSSGEN_COMPILE
+ OBJECTREF* pObjRef;
+
+ if( GC_SP_REL == spBase )
+ {
+ pObjRef = (OBJECTREF*) ((SIZE_T)GetRegdisplaySP(pRD) + spOffset);
+ }
+ else if( GC_CALLER_SP_REL == spBase )
+ {
+ pObjRef = (OBJECTREF*) (GET_CALLER_SP(pRD) + spOffset);
+ }
+ else
+ {
+ _ASSERTE( GC_FRAMEREG_REL == spBase );
+ _ASSERTE( NO_STACK_BASE_REGISTER != m_StackBaseRegister );
+
+ pObjRef = (OBJECTREF*)((*((SIZE_T*)(GetRegisterSlot( m_StackBaseRegister, pRD )))) + spOffset);
+ }
+
+ return pObjRef;
+#endif // CROSSGEN_COMPILE
+}
+
+#ifdef DACCESS_COMPILE
+int GcInfoDecoder::GetStackReg(int spBase)
+{
+#if defined(_TARGET_AMD64_)
+ int esp = 4;
+#elif defined(_TARGET_ARM_)
+ int esp = 13;
+#elif defined(_TARGET_ARM64_)
+ _ASSERTE("ARM64:NYI");
+ int esp = 30;
+#endif
+
+ if( GC_SP_REL == spBase )
+ return esp;
+ else if ( GC_CALLER_SP_REL == spBase )
+ return -(esp+1);
+ else
+ return m_StackBaseRegister;
+}
+#endif // DACCESS_COMPILE
+
+void GcInfoDecoder::ReportStackSlotToGC(
+ INT32 spOffset,
+ GcStackSlotBase spBase,
+ unsigned gcFlags,
+ PREGDISPLAY pRD,
+ unsigned flags,
+ GCEnumCallback pCallBack,
+ LPVOID hCallBack)
+{
+ GCINFODECODER_CONTRACT(CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END);
+
+ OBJECTREF* pObjRef = GetStackSlot(spOffset, spBase, pRD);
+ _ASSERTE( IS_ALIGNED( pObjRef, sizeof( Object* ) ) );
+
+#ifdef _DEBUG
+ LOG((LF_GCROOTS, LL_INFO1000, /* Part One */
+ "Reporting %s" FMT_STK,
+ ( (GC_SP_REL == spBase) ? "" :
+ ((GC_CALLER_SP_REL == spBase) ? "caller's " :
+ ((GC_FRAMEREG_REL == spBase) ? "frame " : "<unrecognized GcStackSlotBase> "))),
+ DBG_STK(spOffset) ));
+
+ LOG((LF_GCROOTS, LL_INFO1000, /* Part Two */
+ "at" FMT_ADDR "as ", DBG_ADDR(pObjRef) ));
+
+ VALIDATE_ROOT((gcFlags & GC_CALL_INTERIOR), hCallBack, pObjRef);
+
+ LOG((LF_GCROOTS, LL_INFO1000, /* Part Three */
+ LOG_PIPTR_OBJECT_CLASS(OBJECTREF_TO_UNCHECKED_OBJECTREF(*pObjRef), (gcFlags & GC_CALL_PINNED), (gcFlags & GC_CALL_INTERIOR))));
+#endif
+
+ gcFlags |= CHECK_APP_DOMAIN;
+
+ pCallBack(hCallBack, pObjRef, gcFlags DAC_ARG(DacSlotLocation(GetStackReg(spBase), spOffset, true)));
+}
+
+
+#endif // USE_GC_INFO_DECODER
+
diff --git a/src/vm/gcscan.h b/src/vm/gcscan.h
new file mode 100644
index 0000000000..2cdfe3c1a0
--- /dev/null
+++ b/src/vm/gcscan.h
@@ -0,0 +1,6 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+#include "../gc/gcscan.h"
diff --git a/src/vm/gcstress.h b/src/vm/gcstress.h
new file mode 100644
index 0000000000..adc02d7470
--- /dev/null
+++ b/src/vm/gcstress.h
@@ -0,0 +1,555 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+//
+// #Overview
+//
+// This file provides convenient wrappers for the GC stress functionality.
+//
+// Exposed APIs:
+// GCStressPolicy::InhibitHolder
+// GCStressPolicy::GlobalEnable()
+// GCStressPolicy::GlobalDisable()
+// GCStressPolicy::IsEnabled()
+//
+// GCStress<> template classes with its IsEnabled() & MaybeTrigger members.
+//
+// Use GCStress<> to abstract away the GC stress related decissions. The
+// template definitions will resolve to nothing when STRESS_HEAP is not
+// defined, and will inline the function body at the call site otherwise.
+//
+// Examples:
+// GCStress<cfg_any>::IsEnabled()
+// GCStress<cfg_any, EeconfigFastGcSPolicy, CoopGcModePolicy>::MaybeTrigger()
+//
+
+#ifndef _GC_STRESS_
+#define _GC_STRESS_
+
+#include "mpl/type_list"
+
+
+struct alloc_context;
+
+
+enum gcs_trigger_points {
+ // generic handling based on EEConfig settings
+ cfg_any, // any bit set in EEConfig::iGCStress
+ cfg_alloc, // trigger on GC allocations
+ cfg_transition, // trigger on transitions
+ cfg_instr_jit, // trigger on JITted instructions
+ cfg_instr_ngen, // trigger on NGENed instructions
+ cfg_easy, // trigger on allocs or transitions
+ cfg_instr, // trigger on managed instructions (JITted or NGENed)
+ cfg_last, // boundary
+
+ // special handling at particular trigger points
+ jit_on_create_jump_stub,
+ jit_on_create_il_stub,
+ gc_on_alloc,
+ vsd_on_resolve
+};
+
+
+namespace GCStressPolicy
+{
+
+#ifdef STRESS_HEAP
+
+#ifdef __GNUC__
+#define UNUSED_ATTR __attribute__ ((unused))
+#else // __GNUC__
+#define UNUSED_ATTR
+#endif // __GNUC__
+
+#ifndef __UNUSED
+#define __UNUSED(x) ((void)(x))
+#endif // __UNUSED
+
+ class InhibitHolder
+ {
+ private:
+ // This static controls whether GC stress may induce GCs. EEConfig::GetGCStressLevel() still
+ // controls when GCs may occur.
+ static Volatile<DWORD> s_nGcStressDisabled;
+
+ bool m_bAquired;
+
+ public:
+ InhibitHolder()
+ { LIMITED_METHOD_CONTRACT; ++s_nGcStressDisabled; m_bAquired = true; }
+
+ ~InhibitHolder()
+ { LIMITED_METHOD_CONTRACT; Release(); }
+
+ void Release()
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (m_bAquired)
+ {
+ --s_nGcStressDisabled;
+ m_bAquired = false;
+ }
+ }
+
+ friend bool IsEnabled();
+ friend void GlobalDisable();
+ friend void GlobalEnable();
+ } UNUSED_ATTR;
+
+ FORCEINLINE bool IsEnabled()
+ { return InhibitHolder::s_nGcStressDisabled == 0U; }
+
+ FORCEINLINE void GlobalDisable()
+ { ++InhibitHolder::s_nGcStressDisabled; }
+
+ FORCEINLINE void GlobalEnable()
+ { --InhibitHolder::s_nGcStressDisabled; }
+
+#else // STRESS_HEAP
+
+ class InhibitHolder
+ { void Release() {} };
+
+ FORCEINLINE bool IsEnabled()
+ { return false; }
+
+ FORCEINLINE void GlobalDisable()
+ {}
+
+ FORCEINLINE void GlobalEnable()
+ {}
+
+#endif // STRESS_HEAP
+}
+
+
+
+namespace _GCStress
+{
+
+#ifdef STRESS_HEAP
+
+ // Support classes to allow easy customization of GC Stress policies
+ namespace detail
+ {
+ using namespace mpl;
+
+ // Selecting a policy from a type list and a fallback/default policy
+ // GetPolicy<>:type will represent either a type in ListT with the same "tag" as DefPolicy
+ // or DefPolicy, based on the Traits passed in.
+ template <
+ typename ListT,
+ typename DefPolicy,
+ template <typename> class Traits
+ >
+ struct GetPolicy;
+
+ // Common case: recurse over the type list
+ template <
+ typename HeadT,
+ typename TailT,
+ typename DefPolicy,
+ template<typename> class Traits
+ >
+ struct GetPolicy<type_list<HeadT, TailT>, DefPolicy, Traits>
+ {
+ // is true if HeadT and DefPolicy evaluate to the same tag,
+ // through Traits<>
+ static const bool sameTag = std::is_same<
+ typename Traits<HeadT>::tag,
+ typename Traits<DefPolicy>::tag
+ >::value;
+
+ typedef typename std::conditional<
+ sameTag,
+ HeadT,
+ typename GetPolicy<TailT, DefPolicy, Traits>::type
+ >::type type;
+ };
+
+ // Termination case.
+ template <
+ typename DefPolicy,
+ template<typename> class Traits
+ >
+ struct GetPolicy <null_type, DefPolicy, Traits>
+ {
+ typedef DefPolicy type;
+ };
+ }
+
+
+ // GC stress specific EEConfig accessors
+ namespace detail
+ {
+ // no definition provided so that absence of concrete implementations cause compiler errors
+ template <enum gcs_trigger_points>
+ static bool IsEnabled();
+
+ template<> FORCEINLINE
+ static bool IsEnabled<cfg_any>()
+ {
+ // Most correct would be to test for each specific bits, but we've
+ // always only tested against 0...
+ return g_pConfig->GetGCStressLevel() != 0;
+ // return (g_pConfig->GetGCStressLevel() &
+ // (EEConfig::GCSTRESS_ALLOC|EEConfig::GCSTRESS_TRANSITION|
+ // EEConfig::GCSTRESS_INSTR_JIT|EEConfig::GCSTRESS_INSTR_NGEN) != 0);
+ }
+
+ #define DefineIsEnabled(cfg_enum, eeconfig_bits) \
+ template<> FORCEINLINE \
+ static bool IsEnabled<cfg_enum>() \
+ { \
+ return (g_pConfig->GetGCStressLevel() & (eeconfig_bits)) != 0; \
+ }
+
+ DefineIsEnabled(cfg_alloc, EEConfig::GCSTRESS_ALLOC);
+ DefineIsEnabled(cfg_transition, EEConfig::GCSTRESS_TRANSITION);
+ DefineIsEnabled(cfg_instr_jit, EEConfig::GCSTRESS_INSTR_JIT);
+ DefineIsEnabled(cfg_instr_ngen, EEConfig::GCSTRESS_INSTR_NGEN);
+ DefineIsEnabled(cfg_easy, EEConfig::GCSTRESS_ALLOC|EEConfig::GCSTRESS_TRANSITION);
+ DefineIsEnabled(cfg_instr, EEConfig::GCSTRESS_INSTR_JIT|EEConfig::GCSTRESS_INSTR_NGEN);
+
+ #undef DefineIsEnabled
+
+ }
+
+
+ //
+ // GC stress policy classes used by GCSBase and GCStress template classes
+ //
+
+ // Fast GS stress policies that dictate whether GCStress<>::MaybeTrigger()
+ // will consider g_pConfig->FastGCStressLevel() when deciding whether
+ // to trigger a GC or not.
+
+ // This is the default Fast GC stress policy that ignores the EEConfig
+ // setting
+ class IgnoreFastGcSPolicy
+ {
+ public:
+ FORCEINLINE
+ static bool FastGcSEnabled(DWORD minValue = 0)
+ { return false; }
+ };
+
+ // This is the overriding Fast GC stress policy that considers the
+ // EEConfig setting on checked/debug builds
+ class EeconfigFastGcSPolicy
+ {
+ public:
+ FORCEINLINE
+ static bool FastGcSEnabled(DWORD minValue = 0)
+ {
+ #ifdef _DEBUG
+ return g_pConfig->FastGCStressLevel() > minValue;
+ #else // _DEBUG
+ return false;
+ #endif // _DEBUG
+ }
+ };
+
+ // GC Mode policies that determines whether to switch the GC mode before
+ // triggering the GC.
+
+ // This is the default GC Mode stress policy that does not switch GC modes
+ class AnyGcModePolicy
+ {
+ };
+
+ // This is the overriding GC Mode stress policy that forces a switch to
+ // cooperative mode before MaybeTrigger() will trigger a GC
+ class CoopGcModePolicy
+ {
+#ifndef DACCESS_COMPILE
+ // implicit constructor an destructor will do the right thing
+ GCCoop m_coop;
+#endif // DACCESS_COMPILE
+
+ public:
+ FORCEINLINE CoopGcModePolicy()
+ { WRAPPER_NO_CONTRACT; }
+ FORCEINLINE ~CoopGcModePolicy()
+ { WRAPPER_NO_CONTRACT; }
+ } UNUSED_ATTR;
+
+ // GC Trigger policy classes define how a garbage collection is triggered
+
+ // This is the default GC Trigger policy that simply calls
+ // GCHeap::StressHeap
+ class StressGcTriggerPolicy
+ {
+ public:
+ FORCEINLINE
+ static void Trigger()
+ { GCHeap::GetGCHeap()->StressHeap(); }
+
+ FORCEINLINE
+ static void Trigger(::alloc_context* acontext)
+ { GCHeap::GetGCHeap()->StressHeap(acontext); }
+ };
+
+ // This is an overriding GC Trigger policy that triggers a GC by calling
+ // PulseGCMode
+ class PulseGcTriggerPolicy
+ {
+ public:
+ FORCEINLINE
+ static void Trigger()
+ {
+ DEBUG_ONLY_REGION();
+ GetThread()->PulseGCMode();
+ }
+ };
+
+
+ // GC stress policy tags
+ struct fast_gcs_policy_tag {};
+ struct gc_mode_policy_tag {};
+ struct gc_trigger_policy_tag {};
+
+
+ template <class GCSPolicy>
+ struct GcStressTraits
+ { typedef mpl::null_type tag; };
+
+ #define DefineGCStressTraits(Policy, policy_tag) \
+ template <> struct GcStressTraits<Policy> \
+ { typedef policy_tag tag; }
+
+ DefineGCStressTraits(IgnoreFastGcSPolicy, fast_gcs_policy_tag);
+ DefineGCStressTraits(EeconfigFastGcSPolicy, fast_gcs_policy_tag);
+
+ DefineGCStressTraits(AnyGcModePolicy, gc_mode_policy_tag);
+ DefineGCStressTraits(CoopGcModePolicy, gc_mode_policy_tag);
+
+ DefineGCStressTraits(StressGcTriggerPolicy, gc_trigger_policy_tag);
+ DefineGCStressTraits(PulseGcTriggerPolicy, gc_trigger_policy_tag);
+
+ #undef DefineGCStressTraits
+
+ // Special handling for GC stress policies
+ template <class GCPolicies, class DefPolicy>
+ struct GetPolicy:
+ public detail::GetPolicy<GCPolicies, DefPolicy, GcStressTraits>
+ {};
+
+
+ //
+ // The base for any customization GCStress class. It accepts an identifying
+ // GC stress trigger point and at most three overriding policies.
+ //
+ // It defines FastGcSPolicy, GcModePolicy, and GcTriggerPolicy as either
+ // the overriding policy or the default policy, if no corresponding
+ // overriding policy is specified in the list. These names can then be
+ // accessed from the derived GCStress class.
+ //
+ // Additionally it defines the static methods IsEnabled and MaybeTrigger and
+ // how the policy classes influence their behavior.
+ //
+ template <
+ enum gcs_trigger_points tp,
+ class Policy1 = mpl::null_type,
+ class Policy2 = mpl::null_type,
+ class Policy3 = mpl::null_type
+ >
+ class GCSBase
+ {
+ public:
+ typedef typename mpl::make_type_list<Policy1, Policy2, Policy3>::type Policies;
+
+ typedef typename GetPolicy<Policies, IgnoreFastGcSPolicy>::type FastGcSPolicy;
+ typedef typename GetPolicy<Policies, AnyGcModePolicy>::type GcModePolicy;
+ typedef typename GetPolicy<Policies, StressGcTriggerPolicy>::type GcTriggerPolicy;
+
+ typedef GCSBase<tp, FastGcSPolicy, GcModePolicy, GcTriggerPolicy> GcStressBase;
+
+ // Returns true iff:
+ // . the bitflag in EEConfig::GetStressLevel() corresponding to the
+ // gc stress trigger point is set AND
+ // . when a Fast GC Stress policy is specified, if the minFastGC argument
+ // is below the EEConfig::FastGCStressLevel
+ FORCEINLINE
+ static bool IsEnabled(DWORD minFastGc = 0)
+ {
+ static_assert(tp < cfg_last, "GCSBase only supports cfg_ trigger points.");
+ return detail::IsEnabled<tp>() && !FastGcSPolicy::FastGcSEnabled(minFastGc);
+ }
+
+ // Triggers a GC iff
+ // . the GC stress is not disabled globally (thru GCStressPolicy::GlobalDisable)
+ // AND
+ // . IsEnabled() returns true.
+ // Additionally it switches the GC mode as specified by GcModePolicy, and it
+ // uses GcTriggerPolicy::Trigger() to actually trigger the GC
+ FORCEINLINE
+ static void MaybeTrigger(DWORD minFastGc = 0)
+ {
+ if (IsEnabled(minFastGc) && GCStressPolicy::IsEnabled())
+ {
+ GcModePolicy gcModeObj; __UNUSED(gcModeObj);
+ GcTriggerPolicy::Trigger();
+ }
+ }
+
+ // Triggers a GC iff
+ // . the GC stress is not disabled globally (thru GCStressPolicy::GlobalDisable)
+ // AND
+ // . IsEnabled() returns true.
+ // Additionally it switches the GC mode as specified by GcModePolicy, and it
+ // uses GcTriggerPolicy::Trigger(alloc_context*) to actually trigger the GC
+ FORCEINLINE
+ static void MaybeTrigger(::alloc_context* acontext, DWORD minFastGc = 0)
+ {
+ if (IsEnabled(minFastGc) && GCStressPolicy::IsEnabled())
+ {
+ GcModePolicy gcModeObj; __UNUSED(gcModeObj);
+ GcTriggerPolicy::Trigger(acontext);
+ }
+ }
+ };
+
+ template <
+ enum gcs_trigger_points tp,
+ class Policy1 = mpl::null_type,
+ class Policy2 = mpl::null_type,
+ class Policy3 = mpl::null_type
+ >
+ class GCStress
+ : public GCSBase<tp, Policy1, Policy2, Policy3>
+ {
+ };
+
+
+ //
+ // Partial specializations of GCStress for trigger points requiring non-default
+ // handling.
+ //
+
+ template <>
+ class GCStress<jit_on_create_jump_stub>
+ : public GCSBase<cfg_any>
+ {
+ public:
+
+ FORCEINLINE
+ static void MaybeTrigger(DWORD minFastGc = 0)
+ {
+ if ((GetThreadNULLOk() != NULL) && (GetThreadNULLOk()->PreemptiveGCDisabled()))
+ {
+ // Force a GC if the stress level is high enough
+ GcStressBase::MaybeTrigger(minFastGc);
+ }
+ }
+
+ };
+
+ template <>
+ class GCStress<gc_on_alloc>
+ : public GCSBase<cfg_alloc>
+ {
+ public:
+
+ FORCEINLINE
+ static void MaybeTrigger(::alloc_context* acontext)
+ {
+ GcStressBase::MaybeTrigger(acontext);
+
+#ifdef _DEBUG
+ Thread *pThread = GetThread();
+ if (pThread)
+ {
+ pThread->EnableStressHeap();
+ }
+#endif //_DEBUG
+ }
+ };
+
+ template <>
+ class GCStress<vsd_on_resolve>
+ : public GCSBase<cfg_any>
+ {
+ public:
+
+ // Triggers a GC iff
+ // . the GC stress is not disabled globally (thru GCStressPolicy::GlobalDisable)
+ // AND
+ // . IsEnabled() returns true.
+ // Additionally it protects the passed in OBJECTREF&, and it uses
+ // GcTriggerPolicy::Trigger() to actually trigger the GC
+ //
+ // Note: the OBJECTREF must be passed by reference so MaybeTrigger can protect
+ // the calling function's stack slot.
+ FORCEINLINE
+ static void MaybeTriggerAndProtect(OBJECTREF& objref)
+ {
+ if (GcStressBase::IsEnabled() && GCStressPolicy::IsEnabled())
+ {
+ GCPROTECT_BEGIN(objref);
+ GcTriggerPolicy::Trigger();
+ GCPROTECT_END();
+ }
+ }
+ };
+
+#else // STRESS_HEAP
+
+ class IgnoreFastGcSPolicy {};
+ class EeconfigFastGcSPolicy {};
+ class AnyGcModePolicy {};
+ class CoopGcModePolicy {};
+ class StressGcTriggerPolicy {};
+ class PulseGcTriggerPolicy {};
+
+ // Everything here should resolve to inlined empty blocks or "false"
+ template <
+ enum gcs_trigger_points tp,
+ class Policy1 = mpl::null_type,
+ class Policy2 = mpl::null_type,
+ class Policy3 = mpl::null_type
+ >
+ class GCStress
+ {
+ public:
+ FORCEINLINE
+ static bool IsEnabled(DWORD minFastGc = 0)
+ {
+ static_assert(tp < cfg_last, "GCStress::IsEnabled only supports cfg_ trigger points.");
+ return false;
+ }
+
+ FORCEINLINE
+ static void MaybeTrigger(DWORD minFastGc = 0)
+ {}
+
+ FORCEINLINE
+ static void MaybeTrigger(::alloc_context* acontext, DWORD minFastGc = 0)
+ {}
+
+ template<typename T>
+ FORCEINLINE
+ static void MaybeTrigger(T arg)
+ {}
+ };
+
+#endif // STRESS_HEAP
+
+}
+
+using _GCStress::IgnoreFastGcSPolicy;
+using _GCStress::EeconfigFastGcSPolicy;
+using _GCStress::AnyGcModePolicy;
+using _GCStress::CoopGcModePolicy;
+using _GCStress::StressGcTriggerPolicy;
+using _GCStress::PulseGcTriggerPolicy;
+
+using _GCStress::GCStress;
+
+
+
+#endif
diff --git a/src/vm/genericdict.cpp b/src/vm/genericdict.cpp
new file mode 100644
index 0000000000..b57b13c924
--- /dev/null
+++ b/src/vm/genericdict.cpp
@@ -0,0 +1,970 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: genericdict.cpp
+//
+
+//
+// WARNING: Do NOT turn try to save dictionary slots except in the
+// hardbind case. Saving further dictionary slots can lead
+// to ComputeNeedsRestore returning TRUE for the dictionary and
+// the associated method table (though of course only if some
+// entries in the dictionary are prepopulated). However at
+// earlier stages in the NGEN, code may have been compiled
+// under the assumption that ComputeNeedsRestore was
+// FALSE for the assocaited method table, and indeed this result
+// may have been cached in the ComputeNeedsRestore
+// for the MethodTable. Thus the combination of populating
+// the dictionary and saving further dictionary slots could lead
+// to inconsistencies and unsoundnesses in compilation.
+//
+
+//
+// ============================================================================
+
+#include "common.h"
+#include "genericdict.h"
+#include "typestring.h"
+#include "field.h"
+#include "typectxt.h"
+#include "virtualcallstub.h"
+#include "sigbuilder.h"
+#include "compile.h"
+
+#ifndef DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+//
+//static
+DictionaryLayout *
+DictionaryLayout::Allocate(
+ WORD numSlots,
+ LoaderAllocator * pAllocator,
+ AllocMemTracker * pamTracker)
+{
+ CONTRACT(DictionaryLayout*)
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(CheckPointer(pAllocator));
+ PRECONDITION(numSlots > 0);
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END
+
+ S_SIZE_T bytes = S_SIZE_T(sizeof(DictionaryLayout)) + S_SIZE_T(sizeof(DictionaryEntryLayout)) * S_SIZE_T(numSlots-1);
+
+ TaggedMemAllocPtr ptr = pAllocator->GetLowFrequencyHeap()->AllocMem(bytes);
+
+ if (pamTracker != NULL)
+ pamTracker->Track(ptr);
+
+ DictionaryLayout * pD = (DictionaryLayout *)(void *)ptr;
+
+ // When bucket spills we'll allocate another layout structure
+ pD->m_pNext = NULL;
+
+ // This is the number of slots excluding the type parameters
+ pD->m_numSlots = numSlots;
+
+ RETURN pD;
+} // DictionaryLayout::Allocate
+
+#endif //!DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+//
+// Count the number of bytes that are required by the first bucket in a dictionary with the specified layout
+//
+//static
+DWORD
+DictionaryLayout::GetFirstDictionaryBucketSize(
+ DWORD numGenericArgs,
+ PTR_DictionaryLayout pDictLayout)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ PRECONDITION(numGenericArgs > 0);
+ PRECONDITION(CheckPointer(pDictLayout, NULL_OK));
+
+ DWORD bytes = numGenericArgs * sizeof(TypeHandle);
+ if (pDictLayout != NULL)
+ bytes += pDictLayout->m_numSlots * sizeof(void*);
+
+ return bytes;
+}
+
+#ifndef DACCESS_COMPILE
+//---------------------------------------------------------------------------------------
+//
+// Find a token in the dictionary layout and return the offsets of indirections
+// required to get to its slot in the actual dictionary
+//
+// NOTE: We will currently never return more than one indirection. We don't
+// cascade dictionaries but we will record overflows in the dictionary layout
+// (and cascade that accordingly) so we can prepopulate the overflow hash in
+// reliability scenarios.
+//
+// Optimize the case of a token being !i (for class dictionaries) or !!i (for method dictionaries)
+//
+//static
+BOOL
+DictionaryLayout::FindToken(
+ LoaderAllocator * pAllocator,
+ DWORD numGenericArgs,
+ DictionaryLayout * pDictLayout,
+ CORINFO_RUNTIME_LOOKUP * pResult,
+ SigBuilder * pSigBuilder,
+ int nFirstOffset)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(numGenericArgs > 0);
+ PRECONDITION(CheckPointer(pDictLayout));
+ }
+ CONTRACTL_END
+
+ BOOL isFirstBucket = TRUE;
+
+ // First bucket also contains type parameters
+ _ASSERTE(FitsIn<WORD>(numGenericArgs));
+ WORD slot = static_cast<WORD>(numGenericArgs);
+ for (;;)
+ {
+ for (DWORD iSlot = 0; iSlot < pDictLayout->m_numSlots; iSlot++)
+ {
+ RetryMatch:
+ BYTE * pCandidate = (BYTE *)pDictLayout->m_slots[iSlot].m_signature;
+ if (pCandidate != NULL)
+ {
+ DWORD cbSig;
+ BYTE * pSig = (BYTE *)pSigBuilder->GetSignature(&cbSig);
+
+ // Compare the signatures. We do not need to worry about the size of pCandidate.
+ // As long as we are comparing one byte at a time we are guaranteed to not overrun.
+ DWORD j;
+ for (j = 0; j < cbSig; j++)
+ {
+ if (pCandidate[j] != pSig[j])
+ break;
+ }
+
+ // We've found it
+ if (j == cbSig)
+ {
+ pResult->signature = pDictLayout->m_slots[iSlot].m_signature;
+
+ // We don't store entries outside the first bucket in the layout in the dictionary (they'll be cached in a hash
+ // instead).
+ if (!isFirstBucket)
+ {
+ return FALSE;
+ }
+ _ASSERTE(FitsIn<WORD>(nFirstOffset + 1));
+ pResult->indirections = static_cast<WORD>(nFirstOffset+1);
+ pResult->offsets[nFirstOffset] = slot * sizeof(DictionaryEntry);
+ return TRUE;
+ }
+ }
+ // If we hit an empty slot then there's no more so use it
+ else
+ {
+ {
+ BaseDomain::LockHolder lh(pAllocator->GetDomain());
+
+ if (pDictLayout->m_slots[iSlot].m_signature != NULL)
+ goto RetryMatch;
+
+ pSigBuilder->AppendData(isFirstBucket ? slot : 0);
+
+ DWORD cbSig;
+ PVOID pSig = pSigBuilder->GetSignature(&cbSig);
+
+ PVOID pPersisted = pAllocator->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(cbSig));
+ memcpy(pPersisted, pSig, cbSig);
+
+ *EnsureWritablePages(&(pDictLayout->m_slots[iSlot].m_signature)) = pPersisted;
+ }
+
+ pResult->signature = pDictLayout->m_slots[iSlot].m_signature;
+
+ // Again, we only store entries in the first layout bucket in the dictionary.
+ if (!isFirstBucket)
+ {
+ return FALSE;
+ }
+ _ASSERTE(FitsIn<WORD>(nFirstOffset + 1));
+ pResult->indirections = static_cast<WORD>(nFirstOffset+1);
+ pResult->offsets[nFirstOffset] = slot * sizeof(DictionaryEntry);
+ return TRUE;
+ }
+ slot++;
+ }
+
+ // If we've reached the end of the chain we need to allocate another bucket. Make the pointer update carefully to avoid
+ // orphaning a bucket in a race. We leak the loser in such a race (since the allocation comes from the loader heap) but both
+ // the race and the overflow should be very rare.
+ if (pDictLayout->m_pNext == NULL)
+ FastInterlockCompareExchangePointer(EnsureWritablePages(&(pDictLayout->m_pNext)), Allocate(4, pAllocator, NULL), 0);
+
+ pDictLayout = pDictLayout->m_pNext;
+ isFirstBucket = FALSE;
+ }
+} // DictionaryLayout::FindToken
+#endif //!DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+//
+DWORD
+DictionaryLayout::GetMaxSlots()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_numSlots;
+}
+
+//---------------------------------------------------------------------------------------
+//
+DWORD
+DictionaryLayout::GetNumUsedSlots()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DWORD numUsedSlots = 0;
+ for (DWORD i = 0; i < m_numSlots; i++)
+ {
+ if (GetEntryLayout(i)->m_signature != NULL)
+ numUsedSlots++;
+ }
+ return numUsedSlots;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+DictionaryEntryKind
+DictionaryEntryLayout::GetKind()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (m_signature == NULL)
+ return EmptySlot;
+
+ SigPointer ptr((PCCOR_SIGNATURE)dac_cast<TADDR>(m_signature));
+
+ ULONG kind; // DictionaryEntryKind
+ IfFailThrow(ptr.GetData(&kind));
+
+ return (DictionaryEntryKind)kind;
+}
+
+#ifndef DACCESS_COMPILE
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+
+//---------------------------------------------------------------------------------------
+//
+DWORD
+DictionaryLayout::GetObjectSize()
+{
+ LIMITED_METHOD_CONTRACT;
+ return sizeof(DictionaryLayout) + sizeof(DictionaryEntryLayout) * (m_numSlots-1);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Save the dictionary layout for prejitting
+//
+void
+DictionaryLayout::Save(
+ DataImage * image)
+{
+ STANDARD_VM_CONTRACT;
+
+ DictionaryLayout *pDictLayout = this;
+
+ while (pDictLayout)
+ {
+ image->StoreStructure(pDictLayout, pDictLayout->GetObjectSize(), DataImage::ITEM_DICTIONARY_LAYOUT);
+ pDictLayout = pDictLayout->m_pNext;
+ }
+
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Save the dictionary layout for prejitting
+//
+void
+DictionaryLayout::Trim()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // Only the last bucket in the chain may have unused entries
+ DictionaryLayout *pDictLayout = this;
+ while (pDictLayout->m_pNext)
+ pDictLayout = pDictLayout->m_pNext;
+
+ // Trim down the size to what's actually used
+ DWORD dwSlots = pDictLayout->GetNumUsedSlots();
+ _ASSERTE(FitsIn<WORD>(dwSlots));
+ *EnsureWritablePages(&pDictLayout->m_numSlots) = static_cast<WORD>(dwSlots);
+
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Fixup pointers in the dictionary layout for prejitting
+//
+void
+DictionaryLayout::Fixup(
+ DataImage * image,
+ BOOL fMethod)
+{
+ STANDARD_VM_CONTRACT;
+
+ DictionaryLayout *pDictLayout = this;
+
+ while (pDictLayout)
+ {
+ for (DWORD i = 0; i < pDictLayout->m_numSlots; i++)
+ {
+ PVOID signature = pDictLayout->m_slots[i].m_signature;
+ if (signature != NULL)
+ {
+ image->FixupFieldToNode(pDictLayout, (BYTE *)&pDictLayout->m_slots[i].m_signature - (BYTE *)pDictLayout,
+ image->GetGenericSignature(signature, fMethod));
+ }
+ }
+ image->FixupPointerField(pDictLayout, offsetof(DictionaryLayout, m_pNext));
+ pDictLayout = pDictLayout->m_pNext;
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Fixup pointers in the actual dictionary, including the type arguments. Delete entries
+// that are expensive or difficult to restore.
+//
+void
+Dictionary::Fixup(
+ DataImage * image,
+ BOOL canSaveInstantiation,
+ BOOL canSaveSlots,
+ DWORD numGenericArgs,
+ Module * pModule,
+ DictionaryLayout * pDictLayout)
+{
+ STANDARD_VM_CONTRACT;
+
+ // First fixup the type handles in the instantiation itself
+ FixupPointer<TypeHandle> *pInst = GetInstantiation();
+ for (DWORD j = 0; j < numGenericArgs; j++)
+ {
+ if (canSaveInstantiation)
+ {
+ image->FixupTypeHandlePointer(pInst, &pInst[j]);
+ }
+ else
+ {
+ image->ZeroPointerField(AsPtr(), j * sizeof(DictionaryEntry));
+ }
+ }
+
+ // Now traverse the remaining slots
+ if (pDictLayout != NULL)
+ {
+ for (DWORD i = 0; i < pDictLayout->m_numSlots; i++)
+ {
+ int slotOffset = (numGenericArgs + i) * sizeof(DictionaryEntry);
+
+ // First check if we can simply hardbind to a prerestored object
+ DictionaryEntryLayout *pLayout = pDictLayout->GetEntryLayout(i);
+ switch (pLayout->GetKind())
+ {
+ case TypeHandleSlot:
+ case DeclaringTypeHandleSlot:
+ if (canSaveSlots &&
+ !IsSlotEmpty(numGenericArgs,i) &&
+ image->CanPrerestoreEagerBindToTypeHandle(GetTypeHandleSlot(numGenericArgs, i), NULL) &&
+ image->CanHardBindToZapModule(GetTypeHandleSlot(numGenericArgs, i).GetLoaderModule()))
+ {
+ image->HardBindTypeHandlePointer(AsPtr(), slotOffset);
+ }
+ else
+ {
+ // Otherwise just zero the slot
+ image->ZeroPointerField(AsPtr(), slotOffset);
+ }
+ break;
+ case MethodDescSlot:
+ if (canSaveSlots &&
+ !IsSlotEmpty(numGenericArgs,i) &&
+ image->CanPrerestoreEagerBindToMethodDesc(GetMethodDescSlot(numGenericArgs,i), NULL) &&
+ image->CanHardBindToZapModule(GetMethodDescSlot(numGenericArgs,i)->GetLoaderModule()))
+ {
+ image->FixupPointerField(AsPtr(), slotOffset);
+ }
+ else
+ {
+ // Otherwise just zero the slot
+ image->ZeroPointerField(AsPtr(), slotOffset);
+ }
+ break;
+ case FieldDescSlot:
+ if (canSaveSlots &&
+ !IsSlotEmpty(numGenericArgs,i) &&
+ image->CanEagerBindToFieldDesc(GetFieldDescSlot(numGenericArgs,i)) &&
+ image->CanHardBindToZapModule(GetFieldDescSlot(numGenericArgs,i)->GetLoaderModule()))
+ {
+ image->FixupPointerField(AsPtr(), slotOffset);
+ }
+ else
+ {
+ // Otherwise just zero the slot
+ image->ZeroPointerField(AsPtr(), slotOffset);
+ }
+ break;
+ default:
+ // <TODO> Method entry points are currently not saved </TODO>
+ // <TODO> Stub dispatch slots are currently not saved </TODO>
+ // Otherwise just zero the slot
+ image->ZeroPointerField(AsPtr(), slotOffset);
+ }
+ }
+ }
+} // Dictionary::Fixup
+
+//---------------------------------------------------------------------------------------
+//
+BOOL
+Dictionary::IsWriteable(
+ DataImage * image,
+ BOOL canSaveSlots,
+ DWORD numGenericArgs, // Must be non-zero
+ Module * pModule, // module of the generic code
+ DictionaryLayout * pDictLayout)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Traverse dictionary slots
+ if (pDictLayout != NULL)
+ {
+ for (DWORD i = 0; i < pDictLayout->m_numSlots; i++)
+ {
+ // First check if we can simply hardbind to a prerestored object
+ DictionaryEntryLayout *pLayout = pDictLayout->GetEntryLayout(i);
+ switch (pLayout->GetKind())
+ {
+ case TypeHandleSlot:
+ case DeclaringTypeHandleSlot:
+ if (canSaveSlots &&
+ !IsSlotEmpty(numGenericArgs,i) &&
+ image->CanPrerestoreEagerBindToTypeHandle(GetTypeHandleSlot(numGenericArgs, i), NULL) &&
+ image->CanHardBindToZapModule(GetTypeHandleSlot(numGenericArgs, i).GetLoaderModule()))
+ {
+ // do nothing
+ }
+ else
+ {
+ return TRUE;
+ }
+ break;
+ case MethodDescSlot:
+ if (canSaveSlots &&
+ !IsSlotEmpty(numGenericArgs,i) &&
+ image->CanPrerestoreEagerBindToMethodDesc(GetMethodDescSlot(numGenericArgs,i), NULL) &&
+ image->CanHardBindToZapModule(GetMethodDescSlot(numGenericArgs,i)->GetLoaderModule()))
+ {
+ // do nothing
+ }
+ else
+ {
+ return TRUE;
+ }
+ break;
+ case FieldDescSlot:
+ if (canSaveSlots &&
+ !IsSlotEmpty(numGenericArgs,i) &&
+ image->CanEagerBindToFieldDesc(GetFieldDescSlot(numGenericArgs,i)) &&
+ image->CanHardBindToZapModule(GetFieldDescSlot(numGenericArgs,i)->GetLoaderModule()))
+ {
+ // do nothing
+ }
+ else
+ {
+ return TRUE;
+ }
+ break;
+ default:
+ // <TODO> Method entry points are currently not saved </TODO>
+ // <TODO> Stub dispatch slots are currently not saved </TODO>
+ return TRUE;
+ }
+ }
+ }
+
+ return FALSE;
+} // Dictionary::IsWriteable
+
+//---------------------------------------------------------------------------------------
+//
+BOOL
+Dictionary::ComputeNeedsRestore(
+ DataImage * image,
+ TypeHandleList * pVisited,
+ DWORD numGenericArgs)
+{
+ STANDARD_VM_CONTRACT;
+
+ // First check the type handles in the instantiation itself
+ FixupPointer<TypeHandle> *inst = GetInstantiation();
+ for (DWORD j = 0; j < numGenericArgs; j++)
+ {
+ if (!image->CanPrerestoreEagerBindToTypeHandle(inst[j].GetValue(), pVisited))
+ return TRUE;
+ }
+
+ // Unless prepopulating we don't need to check the entries
+ // of the dictionary because if we can't
+ // hardbind to them we just zero the dictionary entry and recover
+ // it on demand.
+
+ return FALSE;
+}
+#endif //FEATURE_NATIVE_IMAGE_GENERATION
+
+#ifdef FEATURE_PREJIT
+//---------------------------------------------------------------------------------------
+//
+void
+Dictionary::Restore(
+ DWORD numGenericArgs,
+ ClassLoadLevel level)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INSTANCE_CHECK;
+ }
+ CONTRACTL_END
+
+ // First restore the type handles in the instantiation itself
+ FixupPointer<TypeHandle> *inst = GetInstantiation();
+ for (DWORD j = 0; j < numGenericArgs; j++)
+ {
+ Module::RestoreTypeHandlePointer(&inst[j], NULL, level);
+ }
+
+ // We don't restore the remainder of the dictionary - see
+ // long comment at the start of this file as to why
+}
+#endif // FEATURE_PREJIT
+
+//---------------------------------------------------------------------------------------
+//
+DictionaryEntry
+Dictionary::PopulateEntry(
+ MethodDesc * pMD,
+ MethodTable * pMT,
+ LPVOID signature,
+ BOOL nonExpansive,
+ DictionaryEntry ** ppSlot)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ CORINFO_GENERIC_HANDLE result = NULL;
+ *ppSlot = NULL;
+
+ SigPointer ptr((PCCOR_SIGNATURE)signature);
+
+ Dictionary * pDictionary = NULL;
+
+ ULONG kind; // DictionaryEntryKind
+ IfFailThrow(ptr.GetData(&kind));
+
+ if (pMT != NULL)
+ {
+ // We need to normalize the class passed in (if any) for reliability purposes. That's because preparation of a code region that
+ // contains these handle lookups depends on being able to predict exactly which lookups are required (so we can pre-cache the
+ // answers and remove any possibility of failure at runtime). This is hard to do if the lookup (in this case the lookup of the
+ // dictionary overflow cache) is keyed off the somewhat arbitrary type of the instance on which the call is made (we'd need to
+ // prepare for every possible derived type of the type containing the method). So instead we have to locate the exactly
+ // instantiated (non-shared) super-type of the class passed in.
+
+ ULONG dictionaryIndex = 0;
+ IfFailThrow(ptr.GetData(&dictionaryIndex));
+
+ pDictionary = pMT->GetDictionary();
+
+ // MethodTable is expected to be normalized
+ _ASSERTE(pDictionary == pMT->GetPerInstInfo()[dictionaryIndex]);
+ }
+ else
+ {
+ pDictionary = pMD->GetMethodDictionary();
+ }
+
+ {
+ SigTypeContext typeContext;
+
+ if (pMT != NULL)
+ {
+ SigTypeContext::InitTypeContext(pMT, &typeContext);
+ }
+ else
+ {
+ SigTypeContext::InitTypeContext(pMD, &typeContext);
+ }
+
+
+ Module * pContainingZapModule = ExecutionManager::FindZapModule(dac_cast<TADDR>(signature));
+
+ ZapSig::Context zapSigContext(
+ MscorlibBinder::GetModule(),
+ (void *)pContainingZapModule,
+ ZapSig::NormalTokens);
+ ZapSig::Context * pZapSigContext = (pContainingZapModule != NULL) ? &zapSigContext : NULL;
+
+ TypeHandle constraintType;
+ TypeHandle declaringType;
+
+ switch (kind)
+ {
+ case DeclaringTypeHandleSlot:
+ {
+ declaringType = ptr.GetTypeHandleThrowing(
+ MscorlibBinder::GetModule(),
+ &typeContext,
+ (nonExpansive ? ClassLoader::DontLoadTypes : ClassLoader::LoadTypes),
+ CLASS_LOADED,
+ FALSE,
+ NULL,
+ pZapSigContext);
+ if (declaringType.IsNull())
+ {
+ _ASSERTE(nonExpansive);
+ return NULL;
+ }
+ IfFailThrow(ptr.SkipExactlyOne());
+
+ // fall through
+ }
+
+ case TypeHandleSlot:
+ {
+ TypeHandle th = ptr.GetTypeHandleThrowing(
+ MscorlibBinder::GetModule(),
+ &typeContext,
+ (nonExpansive ? ClassLoader::DontLoadTypes : ClassLoader::LoadTypes),
+ CLASS_LOADED,
+ FALSE,
+ NULL,
+ pZapSigContext);
+ if (th.IsNull())
+ {
+ _ASSERTE(nonExpansive);
+ return NULL;
+ }
+ IfFailThrow(ptr.SkipExactlyOne());
+
+ if (!declaringType.IsNull())
+ {
+ th = th.GetMethodTable()->GetMethodTableMatchingParentClass(declaringType.AsMethodTable());
+ }
+
+ result = (CORINFO_GENERIC_HANDLE)th.AsPtr();
+ break;
+ }
+
+ case ConstrainedMethodEntrySlot:
+ {
+ constraintType = ptr.GetTypeHandleThrowing(
+ MscorlibBinder::GetModule(),
+ &typeContext,
+ (nonExpansive ? ClassLoader::DontLoadTypes : ClassLoader::LoadTypes),
+ CLASS_LOADED,
+ FALSE,
+ NULL,
+ pZapSigContext);
+ if (constraintType.IsNull())
+ {
+ _ASSERTE(nonExpansive);
+ return NULL;
+ }
+ IfFailThrow(ptr.SkipExactlyOne());
+
+ // fall through
+ }
+
+ case MethodDescSlot:
+ case DispatchStubAddrSlot:
+ case MethodEntrySlot:
+ {
+ TypeHandle ownerType = ptr.GetTypeHandleThrowing(
+ MscorlibBinder::GetModule(),
+ &typeContext,
+ (nonExpansive ? ClassLoader::DontLoadTypes : ClassLoader::LoadTypes),
+ CLASS_LOADED,
+ FALSE,
+ NULL,
+ pZapSigContext);
+ if (ownerType.IsNull())
+ {
+ _ASSERTE(nonExpansive);
+ return NULL;
+ }
+ IfFailThrow(ptr.SkipExactlyOne());
+
+ // <NICE> wsperf: Create a path that doesn't load types or create new handles if nonExpansive is set </NICE>
+ if (nonExpansive)
+ return NULL;
+
+ MethodTable * pOwnerMT = ownerType.GetMethodTable();
+ _ASSERTE(pOwnerMT != NULL);
+
+ DWORD methodFlags;
+ IfFailThrow(ptr.GetData(&methodFlags));
+
+ BOOL isInstantiatingStub = ((methodFlags & ENCODE_METHOD_SIG_InstantiatingStub) != 0);
+ BOOL isUnboxingStub = ((methodFlags & ENCODE_METHOD_SIG_UnboxingStub) != 0);
+ BOOL fMethodNeedsInstantiation = ((methodFlags & ENCODE_METHOD_SIG_MethodInstantiation) != 0);
+
+ MethodDesc * pMethod = NULL;
+
+ if ((methodFlags & ENCODE_METHOD_SIG_SlotInsteadOfToken) != 0)
+ {
+ // get the method desc using slot number
+ DWORD slot;
+ IfFailThrow(ptr.GetData(&slot));
+
+ if (kind == DispatchStubAddrSlot)
+ {
+ if (NingenEnabled())
+ return NULL;
+
+#ifndef CROSSGEN_COMPILE
+ // Generate a dispatch stub and store it in the dictionary.
+ //
+ // We generate an indirection so we don't have to write to the dictionary
+ // when we do updates, and to simplify stub indirect callsites. Stubs stored in
+ // dictionaries use "RegisterIndirect" stub calling, e.g. "call [eax]",
+ // i.e. here the register "eax" would contain the value fetched from the dictionary,
+ // which in turn points to the stub indirection which holds the value the current stub
+ // address itself. If we just used "call eax" then we wouldn't know which stub indirection
+ // to update. If we really wanted to avoid the extra indirection we could return the _address_ of the
+ // dictionary entry to the caller, still using "call [eax]", and then the
+ // stub dispatch mechanism can update the dictitonary itself and we don't
+ // need an indirection.
+ LoaderAllocator * pDictLoaderAllocator = (pMT != NULL) ? pMT->GetLoaderAllocator() : pMD->GetLoaderAllocator();
+
+ VirtualCallStubManager * pMgr = pDictLoaderAllocator->GetVirtualCallStubManager();
+
+ // We indirect through a cell so that updates can take place atomically.
+ // The call stub and the indirection cell have the same lifetime as the dictionary itself, i.e.
+ // are allocated in the domain of the dicitonary.
+ //
+ // In the case of overflow (where there is no dictionary, just a global hash table) then
+ // the entry will be placed in the overflow hash table (JitGenericHandleCache). This
+ // is partitioned according to domain, i.e. is scraped each time an AppDomain gets unloaded.
+ PCODE addr = pMgr->GetCallStub(ownerType, slot);
+
+ result = (CORINFO_GENERIC_HANDLE)pMgr->GenerateStubIndirection(addr);
+ break;
+#endif // CROSSGEN_COMPILE
+ }
+
+ pMethod = pOwnerMT->GetMethodDescForSlot(slot);
+ }
+ else
+ {
+ // Decode type where the method token is defined
+ TypeHandle thMethodDefType = ptr.GetTypeHandleThrowing(
+ MscorlibBinder::GetModule(),
+ &typeContext,
+ (nonExpansive ? ClassLoader::DontLoadTypes : ClassLoader::LoadTypes),
+ CLASS_LOADED,
+ FALSE,
+ NULL,
+ pZapSigContext);
+ if (thMethodDefType.IsNull())
+ {
+ _ASSERTE(nonExpansive);
+ return NULL;
+ }
+ IfFailThrow(ptr.SkipExactlyOne());
+ MethodTable * pMethodDefMT = thMethodDefType.GetMethodTable();
+ _ASSERTE(pMethodDefMT != NULL);
+
+ // decode method token
+ RID rid;
+ IfFailThrow(ptr.GetData(&rid));
+ mdMethodDef token = TokenFromRid(rid, mdtMethodDef);
+
+ // The RID map should have been filled out if we fully loaded the class
+ pMethod = pMethodDefMT->GetModule()->LookupMethodDef(token);
+ _ASSERTE(pMethod != NULL);
+ pMethod->CheckRestore();
+ }
+
+ Instantiation inst;
+
+ // Instantiate the method if needed, or create a stub to a static method in a generic class.
+ if (fMethodNeedsInstantiation)
+ {
+ DWORD nargs;
+ IfFailThrow(ptr.GetData(&nargs));
+
+ SIZE_T cbMem;
+
+ if (!ClrSafeInt<SIZE_T>::multiply(nargs, sizeof(TypeHandle), cbMem/* passed by ref */))
+ ThrowHR(COR_E_OVERFLOW);
+
+ TypeHandle * pInst = (TypeHandle*) _alloca(cbMem);
+ for (DWORD i = 0; i < nargs; i++)
+ {
+ pInst[i] = ptr.GetTypeHandleThrowing(
+ MscorlibBinder::GetModule(),
+ &typeContext,
+ ClassLoader::LoadTypes,
+ CLASS_LOADED,
+ FALSE,
+ NULL,
+ pZapSigContext);
+ IfFailThrow(ptr.SkipExactlyOne());
+ }
+
+ inst = Instantiation(pInst, nargs);
+ }
+ else
+ {
+ inst = pMethod->GetMethodInstantiation();
+ }
+
+ // This must be called even if nargs == 0, in order to create an instantiating
+ // stub for static methods in generic classees if needed, also for BoxedEntryPointStubs
+ // in non-generic structs.
+ pMethod = MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pMethod,
+ pOwnerMT,
+ isUnboxingStub,
+ inst,
+ (!isInstantiatingStub && !isUnboxingStub));
+
+ if (kind == ConstrainedMethodEntrySlot)
+ {
+ _ASSERTE(!constraintType.IsNull());
+
+ MethodDesc *pResolvedMD = constraintType.GetMethodTable()->TryResolveConstraintMethodApprox(ownerType, pMethod);
+
+ // All such calls should be resolvable. If not then for now just throw an error.
+ _ASSERTE(pResolvedMD);
+ INDEBUG(if (!pResolvedMD) constraintType.GetMethodTable()->TryResolveConstraintMethodApprox(ownerType, pMethod);)
+ if (!pResolvedMD)
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+
+ result = (CORINFO_GENERIC_HANDLE)pResolvedMD->GetMultiCallableAddrOfCode();
+ }
+ else
+ if (kind == MethodEntrySlot)
+ {
+ result = (CORINFO_GENERIC_HANDLE)pMethod->GetMultiCallableAddrOfCode();
+ }
+ else
+ {
+ _ASSERTE(kind == MethodDescSlot);
+ result = (CORINFO_GENERIC_HANDLE)pMethod;
+ }
+ break;
+ }
+
+ case FieldDescSlot:
+ {
+ TypeHandle th = ptr.GetTypeHandleThrowing(
+ MscorlibBinder::GetModule(),
+ &typeContext,
+ (nonExpansive ? ClassLoader::DontLoadTypes : ClassLoader::LoadTypes),
+ CLASS_LOADED,
+ FALSE,
+ NULL,
+ pZapSigContext);
+ if (th.IsNull())
+ {
+ _ASSERTE(nonExpansive);
+ return NULL;
+ }
+ IfFailThrow(ptr.SkipExactlyOne());
+
+ DWORD fieldIndex;
+ IfFailThrow(ptr.GetData(&fieldIndex));
+
+ result = (CORINFO_GENERIC_HANDLE)th.AsMethodTable()->GetFieldDescByIndex(fieldIndex);
+ break;
+ }
+
+ default:
+ _ASSERTE(!"Invalid DictionaryEntryKind");
+ break;
+ }
+
+ ULONG slotIndex;
+ IfFailThrow(ptr.GetData(&slotIndex));
+
+ MemoryBarrier();
+
+ if ((slotIndex != 0) && !IsCompilationProcess())
+ {
+ *EnsureWritablePages(pDictionary->GetSlotAddr(0, slotIndex)) = result;
+ *ppSlot = pDictionary->GetSlotAddr(0, slotIndex);
+ }
+ }
+
+ return result;
+} // Dictionary::PopulateEntry
+
+//---------------------------------------------------------------------------------------
+//
+void
+Dictionary::PrepopulateDictionary(
+ MethodDesc * pMD,
+ MethodTable * pMT,
+ BOOL nonExpansive)
+{
+ STANDARD_VM_CONTRACT;
+
+ DictionaryLayout * pDictLayout = (pMT != NULL) ? pMT->GetClass()->GetDictionaryLayout() : pMD->GetDictionaryLayout();
+ DWORD numGenericArgs = (pMT != NULL) ? pMT->GetNumGenericArgs() : pMD->GetNumGenericMethodArgs();
+
+ if (pDictLayout != NULL)
+ {
+ for (DWORD i = 0; i < pDictLayout->GetNumUsedSlots(); i++)
+ {
+ if (IsSlotEmpty(numGenericArgs,i))
+ {
+ DictionaryEntry * pSlot;
+ DictionaryEntry entry;
+ entry = PopulateEntry(
+ pMD,
+ pMT,
+ pDictLayout->GetEntryLayout(i)->m_signature,
+ nonExpansive,
+ &pSlot);
+
+ _ASSERT((entry == NULL) || (entry == GetSlot(numGenericArgs,i)) || IsCompilationProcess());
+ _ASSERT((pSlot == NULL) || (pSlot == GetSlotAddr(numGenericArgs,i)));
+ }
+ }
+ }
+} // Dictionary::PrepopulateDictionary
+
+#endif //!DACCESS_COMPILE
diff --git a/src/vm/genericdict.h b/src/vm/genericdict.h
new file mode 100644
index 0000000000..9b6944be43
--- /dev/null
+++ b/src/vm/genericdict.h
@@ -0,0 +1,302 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: genericdict.h
+//
+
+//
+// Definitions for "dictionaries" used to encapsulate generic instantiations
+// and instantiation-specific information for shared-code generics
+//
+
+//
+// ============================================================================
+
+#ifndef _GENERICDICT_H
+#define _GENERICDICT_H
+
+#ifdef FEATURE_PREJIT
+#include "dataimage.h"
+#endif
+
+// DICTIONARIES
+//
+// A dictionary is a cache of handles associated with particular
+// instantiations of generic classes and generic methods, containing
+// - the instantiation itself (a list of TypeHandles)
+// - handles created on demand at runtime when code shared between
+// multiple instantiations needs to lookup an instantiation-specific
+// handle (for example, in newobj C<!0> and castclass !!0[])
+//
+// DICTIONARY ENTRIES
+//
+// Dictionary entries (abstracted as the type DictionaryEntry) can be:
+// a TypeHandle (for type arguments and entries associated with a TypeSpec token)
+// a MethodDesc* (for entries associated with a method MemberRef or MethodSpec token)
+// a FieldDesc* (for entries associated with a field MemberRef token)
+// a code pointer (e.g. for entries associated with an EntryPointAnnotation annotated token)
+// a dispatch stub address (for entries associated with an StubAddrAnnotation annotated token)
+//
+// DICTIONARY LAYOUTS
+//
+// A dictionary layout describes the layout of all dictionaries that can be
+// accessed from the same shared code. For example, Hashtable<string,int> and
+// Hashtable<object,int> share one layout, and Hashtable<int,string> and Hashtable<int,object>
+// share another layout. For generic types, the dictionary layout is stored in the EEClass
+// that is shared across compatible instantiations. For generic methods, the layout
+// is stored in the InstantiatedMethodDesc associated with the shared generic code itself.
+//
+
+class TypeHandleList;
+class Module;
+class BaseDomain;
+class SigTypeContext;
+class SigBuilder;
+
+enum DictionaryEntryKind
+{
+ EmptySlot = 0,
+ TypeHandleSlot = 1,
+ MethodDescSlot = 2,
+ MethodEntrySlot = 3,
+ ConstrainedMethodEntrySlot = 4,
+ DispatchStubAddrSlot = 5,
+ FieldDescSlot = 6,
+ DeclaringTypeHandleSlot = 7,
+};
+
+#ifndef BINDER
+class DictionaryEntryLayout
+{
+public:
+ DictionaryEntryLayout(PTR_VOID signature)
+ { LIMITED_METHOD_CONTRACT; m_signature = signature; }
+
+ DictionaryEntryKind GetKind();
+
+ PTR_VOID m_signature;
+};
+
+typedef DPTR(DictionaryEntryLayout) PTR_DictionaryEntryLayout;
+
+
+class DictionaryLayout;
+typedef DPTR(DictionaryLayout) PTR_DictionaryLayout;
+
+// The type of dictionary layouts. We don't include the number of type
+// arguments as this is obtained elsewhere
+class DictionaryLayout
+{
+ friend class Dictionary;
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+private:
+ // Next bucket of slots (only used to track entries that won't fit in the dictionary)
+ DictionaryLayout* m_pNext;
+
+ // Number of non-type-argument slots in this bucket
+ WORD m_numSlots;
+
+ // m_numSlots of these
+ DictionaryEntryLayout m_slots[1];
+
+public:
+ // Create an initial dictionary layout with a single bucket containing numSlots slots
+ static DictionaryLayout* Allocate(WORD numSlots, LoaderAllocator *pAllocator, AllocMemTracker *pamTracker);
+
+ // Bytes used for the first bucket of this dictionary, which might be stored inline in
+ // another structure (e.g. MethodTable)
+ static DWORD GetFirstDictionaryBucketSize(DWORD numGenericArgs, PTR_DictionaryLayout pDictLayout);
+
+ static BOOL FindToken(LoaderAllocator *pAllocator,
+ DWORD numGenericArgs,
+ DictionaryLayout *pDictLayout,
+ CORINFO_RUNTIME_LOOKUP *pResult,
+ SigBuilder * pSigBuilder,
+ int nFirstOffset);
+
+ DWORD GetMaxSlots();
+ DWORD GetNumUsedSlots();
+
+ PTR_DictionaryEntryLayout GetEntryLayout(DWORD i)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(i >= 0 && i < GetMaxSlots());
+ return dac_cast<PTR_DictionaryEntryLayout>(
+ dac_cast<TADDR>(this) + offsetof(DictionaryLayout, m_slots) + sizeof(DictionaryEntryLayout) * i);
+ }
+
+ DictionaryLayout* GetNextLayout() { LIMITED_METHOD_CONTRACT; return m_pNext; }
+
+#ifdef FEATURE_PREJIT
+ DWORD GetObjectSize();
+
+ // Trim the canonical dictionary layout to include only those used slots actually used.
+ // WARNING!!!
+ // You should only call this if
+ // (a) you're actually saving this shared instantiation into it's PreferredZapModule,
+ // i.e. you must be both saving the shared instantiation and the module
+ // you're ngen'ing MUST be that the PreferredZapModule.
+ // (b) you're sure you've compiled all the shared code for this type
+ // within the context of this NGEN session.
+ // This is currently the same as saying we can hardbind to the EEClass - if it's in another
+ // module then we will have already trimmed the layout, and if it's being saved into the
+ // current module then we can only hardbind to it if the current module is the PreferredZapModule.
+ //
+ // Note after calling this both GetObjectSize for this layout and the
+ // computed dictionary size for all dictionaries based on this layout may
+ // be reduced. This may in turn affect the size of all non-canonical
+ // method tables, potentially trimming some dictionary words off the end
+ // of the method table.
+ void Trim();
+ void Save(DataImage *image);
+ void Fixup(DataImage *image, BOOL fMethod);
+#endif // FEATURE_PREJIT
+
+};
+#endif // BINDER
+
+
+// The type of dictionaries. This is just an abstraction around an open-ended array
+class Dictionary
+{
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+ private:
+ // First N entries are generic instantiations arguments. They are stored as FixupPointers
+ // in NGen images. It means that the lowest bit is used to mark optional indirection (see code:FixupPointer).
+ // The rest of the open array are normal pointers (no optional indirection).
+ DictionaryEntry m_pEntries[1];
+
+ TADDR EntryAddr(ULONG32 idx)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return PTR_HOST_MEMBER_TADDR(Dictionary, this, m_pEntries) +
+ idx * sizeof(m_pEntries[0]);
+ }
+
+ public:
+ inline DPTR(FixupPointer<TypeHandle>) GetInstantiation()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return dac_cast<DPTR(FixupPointer<TypeHandle>)>(EntryAddr(0));
+ }
+
+#ifndef DACCESS_COMPILE
+ inline void* AsPtr()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (void*) m_pEntries;
+ }
+#endif // #ifndef DACCESS_COMPILE
+
+ private:
+
+#ifndef DACCESS_COMPILE
+
+ inline TypeHandle GetTypeHandleSlot(DWORD numGenericArgs, DWORD i)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return *GetTypeHandleSlotAddr(numGenericArgs, i);
+ }
+ inline MethodDesc *GetMethodDescSlot(DWORD numGenericArgs, DWORD i)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return *GetMethodDescSlotAddr(numGenericArgs,i);
+ }
+ inline FieldDesc *GetFieldDescSlot(DWORD numGenericArgs, DWORD i)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return *GetFieldDescSlotAddr(numGenericArgs,i);
+ }
+ inline TypeHandle *GetTypeHandleSlotAddr(DWORD numGenericArgs, DWORD i)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ((TypeHandle *) &m_pEntries[numGenericArgs + i]);
+ }
+ inline MethodDesc **GetMethodDescSlotAddr(DWORD numGenericArgs, DWORD i)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ((MethodDesc **) &m_pEntries[numGenericArgs + i]);
+ }
+ inline FieldDesc **GetFieldDescSlotAddr(DWORD numGenericArgs, DWORD i)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ((FieldDesc **) &m_pEntries[numGenericArgs + i]);
+ }
+ inline DictionaryEntry *GetSlotAddr(DWORD numGenericArgs, DWORD i)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ((void **) &m_pEntries[numGenericArgs + i]);
+ }
+ inline DictionaryEntry GetSlot(DWORD numGenericArgs, DWORD i)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return *GetSlotAddr(numGenericArgs,i);
+ }
+ inline BOOL IsSlotEmpty(DWORD numGenericArgs, DWORD i)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return GetSlot(numGenericArgs,i) == NULL;
+ }
+
+#endif // #ifndef DACCESS_COMPILE
+
+ public:
+
+#ifndef DACCESS_COMPILE
+
+ static DictionaryEntry PopulateEntry(MethodDesc * pMD,
+ MethodTable * pMT,
+ LPVOID signature,
+ BOOL nonExpansive,
+ DictionaryEntry ** ppSlot);
+ void PrepopulateDictionary(MethodDesc * pMD,
+ MethodTable * pMT,
+ BOOL nonExpansive);
+
+#endif // #ifndef DACCESS_COMPILE
+
+ public:
+
+#ifndef BINDER
+#ifdef FEATURE_PREJIT
+
+ // Fixup the dictionary entries, including the type arguments
+ //
+ // WARNING!!!
+ // You should only pass "canSaveSlots=TRUE" if you are certain the dictionary layout
+ // matches that which will be used at runtime. This means you must either
+ // be able to hard-bind to the EEClass of the canonical instantiation, or else
+ // you are saving a copy of the canonical instantiation itself.
+ //
+ // If we can't save slots, then we will zero all entries in the dictionary (apart from the
+ // instantiation itself) and load at runtime.
+ void Fixup(DataImage *image,
+ BOOL canSaveInstantiation,
+ BOOL canSaveSlots,
+ DWORD numGenericArgs, // Must be non-zero
+ Module *pModule, // module of the generic code
+ DictionaryLayout *pDictLayout); // If NULL, then only type arguments are present
+
+ BOOL IsWriteable(DataImage *image,
+ BOOL canSaveSlots,
+ DWORD numGenericArgs, // Must be non-zero
+ Module *pModule, // module of the generic code
+ DictionaryLayout *pDictLayout); // If NULL, then only type arguments are present
+
+ BOOL ComputeNeedsRestore(DataImage *image,
+ TypeHandleList *pVisited,
+ DWORD numGenericArgs);
+ void Restore(DWORD numGenericArgs, ClassLoadLevel level);
+#endif // FEATURE_PREJIT
+#endif // BINDER
+};
+
+#endif
diff --git a/src/vm/generics.cpp b/src/vm/generics.cpp
new file mode 100644
index 0000000000..a9173b059c
--- /dev/null
+++ b/src/vm/generics.cpp
@@ -0,0 +1,1146 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: generics.cpp
+//
+
+
+//
+// Helper functions for generics prototype
+//
+
+//
+// ============================================================================
+
+#include "common.h"
+#include "method.hpp"
+#include "field.h"
+#include "eeconfig.h"
+#include "generics.h"
+#include "genericdict.h"
+#include "stackprobe.h"
+#include "typestring.h"
+#include "typekey.h"
+#include "dumpcommon.h"
+#include "array.h"
+
+#include "generics.inl"
+#ifdef FEATURE_COMINTEROP
+#include "winrttypenameconverter.h"
+#endif // FEATURE_COMINTEROP
+
+/* static */
+TypeHandle ClassLoader::CanonicalizeGenericArg(TypeHandle thGenericArg)
+{
+ CONTRACT(TypeHandle)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END
+
+#if defined(FEATURE_SHARE_GENERIC_CODE)
+ CorElementType et = thGenericArg.GetSignatureCorElementType();
+
+ // Note that generic variables do not share
+
+ if (CorTypeInfo::IsObjRef_NoThrow(et))
+ RETURN(TypeHandle(g_pCanonMethodTableClass));
+
+ if (et == ELEMENT_TYPE_VALUETYPE)
+ {
+ // Don't share structs. But sharability must be propagated through
+ // them (i.e. struct<object> * shares with struct<string> *)
+ RETURN(TypeHandle(thGenericArg.GetCanonicalMethodTable()));
+ }
+
+ _ASSERTE(et != ELEMENT_TYPE_PTR && et != ELEMENT_TYPE_FNPTR);
+ RETURN(thGenericArg);
+#else
+ RETURN (thGenericArg);
+#endif // FEATURE_SHARE_GENERIC_CODE
+}
+
+ // Given the build-time ShareGenericCode setting, is the specified type
+// representation-sharable as a type parameter to a generic type or method ?
+/* static */ BOOL ClassLoader::IsSharableInstantiation(Instantiation inst)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ if (CanonicalizeGenericArg(inst[i]).IsCanonicalSubtype())
+ return TRUE;
+ }
+ return FALSE;
+}
+
+/* static */ BOOL ClassLoader::IsCanonicalGenericInstantiation(Instantiation inst)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ if (CanonicalizeGenericArg(inst[i]) != inst[i])
+ return FALSE;
+ }
+ return TRUE;
+}
+
+/* static */ BOOL ClassLoader::IsTypicalSharedInstantiation(Instantiation inst)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ if (inst[i] != TypeHandle(g_pCanonMethodTableClass))
+ return FALSE;
+ }
+ return TRUE;
+}
+
+#ifndef DACCESS_COMPILE
+
+TypeHandle ClassLoader::LoadCanonicalGenericInstantiation(TypeKey *pTypeKey,
+ LoadTypesFlag fLoadTypes/*=LoadTypes*/,
+ ClassLoadLevel level/*=CLASS_LOADED*/)
+{
+ CONTRACT(TypeHandle)
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED() || fLoadTypes != LoadTypes) { LOADS_TYPE(CLASS_LOAD_BEGIN); } else { LOADS_TYPE(level); }
+ POSTCONDITION(CheckPointer(RETVAL, ((fLoadTypes == LoadTypes) ? NULL_NOT_OK : NULL_OK)));
+ POSTCONDITION(RETVAL.IsNull() || RETVAL.CheckLoadLevel(level));
+ }
+ CONTRACT_END
+
+ Instantiation inst = pTypeKey->GetInstantiation();
+ DWORD ntypars = inst.GetNumArgs();
+
+ // Canonicalize the type arguments.
+ DWORD dwAllocSize = 0;
+ if (!ClrSafeInt<DWORD>::multiply(ntypars, sizeof(TypeHandle), dwAllocSize))
+ ThrowHR(COR_E_OVERFLOW);
+
+ TypeHandle ret = TypeHandle();
+ DECLARE_INTERIOR_STACK_PROBE;
+#ifndef DACCESS_COMPILE
+ if ((dwAllocSize/PAGE_SIZE+1) >= 2)
+ {
+ DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD((10+dwAllocSize/PAGE_SIZE+1), NO_FORBIDGC_LOADER_USE_ThrowSO(););
+ }
+#endif // DACCESS_COMPILE
+ TypeHandle *repInst = (TypeHandle*) _alloca(dwAllocSize);
+
+ for (DWORD i = 0; i < ntypars; i++)
+ {
+ repInst[i] = ClassLoader::CanonicalizeGenericArg(inst[i]);
+ }
+
+ // Load the canonical instantiation
+ TypeKey canonKey(pTypeKey->GetModule(), pTypeKey->GetTypeToken(), Instantiation(repInst, ntypars));
+ ret = ClassLoader::LoadConstructedTypeThrowing(&canonKey, fLoadTypes, level);
+
+ END_INTERIOR_STACK_PROBE;
+ RETURN(ret);
+}
+
+// Create a non-canonical instantiation of a generic type, by
+// copying the method table of the canonical instantiation
+//
+/* static */
+TypeHandle
+ClassLoader::CreateTypeHandleForNonCanonicalGenericInstantiation(
+ TypeKey *pTypeKey,
+ AllocMemTracker *pamTracker)
+{
+ CONTRACT(TypeHandle)
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pTypeKey));
+ PRECONDITION(CheckPointer(pamTracker));
+ PRECONDITION(pTypeKey->HasInstantiation());
+ PRECONDITION(ClassLoader::IsSharableInstantiation(pTypeKey->GetInstantiation()));
+ PRECONDITION(!TypeHandle::IsCanonicalSubtypeInstantiation(pTypeKey->GetInstantiation()));
+ POSTCONDITION(CheckPointer(RETVAL));
+ POSTCONDITION(RETVAL.CheckMatchesKey(pTypeKey));
+ }
+ CONTRACT_END
+
+ Module *pLoaderModule = ClassLoader::ComputeLoaderModule(pTypeKey);
+ LoaderAllocator* pAllocator=pLoaderModule->GetLoaderAllocator();
+
+ Instantiation inst = pTypeKey->GetInstantiation();
+ pAllocator->EnsureInstantiation(pTypeKey->GetModule(), inst);
+ DWORD ntypars = inst.GetNumArgs();
+
+#ifdef _DEBUG
+ if (LoggingOn(LF_CLASSLOADER, LL_INFO1000) || g_pConfig->BreakOnInstantiationEnabled())
+ {
+ StackSString debugTypeKeyName;
+ TypeString::AppendTypeKeyDebug(debugTypeKeyName, pTypeKey);
+ LOG((LF_CLASSLOADER, LL_INFO1000, "GENERICS: New instantiation requested: %S\n", debugTypeKeyName.GetUnicode()));
+
+ StackScratchBuffer buf;
+ if (g_pConfig->ShouldBreakOnInstantiation(debugTypeKeyName.GetUTF8(buf)))
+ CONSISTENCY_CHECK_MSGF(false, ("BreakOnInstantiation: typename '%s' ", debugTypeKeyName.GetUTF8(buf)));
+ }
+#endif // _DEBUG
+
+ TypeHandle canonType;
+ {
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOAD_APPROXPARENTS);
+ canonType = ClassLoader::LoadCanonicalGenericInstantiation(pTypeKey, ClassLoader::LoadTypes, CLASS_LOAD_APPROXPARENTS);
+ }
+
+ // Now fabricate a method table
+ MethodTable* pOldMT = canonType.AsMethodTable();
+
+ // We only need true vtable entries as the rest can be found in the representative method table
+ WORD cSlots = static_cast<WORD>(pOldMT->GetNumVirtuals());
+
+ BOOL fContainsGenericVariables = MethodTable::ComputeContainsGenericVariables(inst);
+
+ // These are all copied across from the old MT, i.e. don't depend on the
+ // instantiation.
+#ifdef FEATURE_REMOTING
+ BOOL fHasRemotingVtsInfo = pOldMT->HasRemotingVtsInfo();
+ BOOL fHasContextStatics = pOldMT->HasContextStatics();
+#else
+ BOOL fHasRemotingVtsInfo = FALSE;
+ BOOL fHasContextStatics = FALSE;
+#endif
+ BOOL fHasGenericsStaticsInfo = pOldMT->HasGenericsStaticsInfo();
+ BOOL fHasThreadStatics = (pOldMT->GetNumThreadStaticFields() > 0);
+
+#ifdef FEATURE_COMINTEROP
+ BOOL fHasDynamicInterfaceMap = pOldMT->HasDynamicInterfaceMap();
+ BOOL fHasRCWPerTypeData = pOldMT->HasRCWPerTypeData();
+#else // FEATURE_COMINTEROP
+ BOOL fHasDynamicInterfaceMap = FALSE;
+ BOOL fHasRCWPerTypeData = FALSE;
+#endif // FEATURE_COMINTEROP
+
+ // Collectible types have some special restrictions
+ if (pAllocator->IsCollectible())
+ {
+ if (fHasThreadStatics || fHasContextStatics)
+ {
+ ClassLoader::ThrowTypeLoadException(pTypeKey, IDS_CLASSLOAD_COLLECTIBLESPECIALSTATICS);
+ }
+ else if (pOldMT->HasFixedAddressVTStatics())
+ {
+ ClassLoader::ThrowTypeLoadException(pTypeKey, IDS_CLASSLOAD_COLLECTIBLEFIXEDVTATTR);
+ }
+ }
+
+ // The number of bytes used for GC info
+ size_t cbGC = pOldMT->ContainsPointers() ? ((CGCDesc*) pOldMT)->GetSize() : 0;
+
+ // Bytes are required for the vtable itself
+ S_SIZE_T safe_cbMT = S_SIZE_T( cbGC ) + S_SIZE_T( sizeof(MethodTable) );
+ safe_cbMT += MethodTable::GetNumVtableIndirections(cSlots) * sizeof(PTR_PCODE);
+ if (safe_cbMT.IsOverflow())
+ {
+ ThrowHR(COR_E_OVERFLOW);
+ }
+ const size_t cbMT = safe_cbMT.Value();
+
+ // After the optional members (see below) comes the duplicated interface map.
+ // For dynamic interfaces the interface map area begins one word
+ // before the location returned by GetInterfaceMap()
+ WORD wNumInterfaces = static_cast<WORD>(pOldMT->GetNumInterfaces());
+ DWORD cbIMap = pOldMT->GetInterfaceMapSize();
+ InterfaceInfo_t * pOldIMap = (InterfaceInfo_t *)pOldMT->GetInterfaceMap();
+
+ BOOL fHasGuidInfo = FALSE;
+ BOOL fHasCCWTemplate = FALSE;
+
+ Generics::DetermineCCWTemplateAndGUIDPresenceOnNonCanonicalMethodTable(pOldMT, fContainsGenericVariables, &fHasGuidInfo, &fHasCCWTemplate);
+
+ DWORD dwMultipurposeSlotsMask = 0;
+ dwMultipurposeSlotsMask |= MethodTable::enum_flag_HasPerInstInfo;
+ if (wNumInterfaces != 0)
+ dwMultipurposeSlotsMask |= MethodTable::enum_flag_HasInterfaceMap;
+
+ // NonVirtualSlots, DispatchMap and ModuleOverride multipurpose slots are used
+ // from the canonical methodtable, so we do not need to store them here.
+
+ // We need space for the optional members.
+ DWORD cbOptional = MethodTable::GetOptionalMembersAllocationSize(dwMultipurposeSlotsMask,
+ FALSE, // fHasRemotableMethodInfo
+ fHasGenericsStaticsInfo,
+ fHasGuidInfo,
+ fHasCCWTemplate,
+ fHasRCWPerTypeData,
+ fHasRemotingVtsInfo,
+ fHasContextStatics,
+ pOldMT->HasTokenOverflow());
+
+ // We need space for the PerInstInfo, i.e. the generic dictionary pointers...
+ DWORD cbPerInst = sizeof(GenericsDictInfo) + pOldMT->GetPerInstInfoSize();
+
+ // Finally we need space for the instantiation/dictionary for this type
+ DWORD cbInstAndDict = pOldMT->GetInstAndDictSize();
+
+ // Allocate from the high frequence heap of the correct domain
+ S_SIZE_T allocSize = safe_cbMT;
+ allocSize += cbOptional;
+ allocSize += cbIMap;
+ allocSize += cbPerInst;
+ allocSize += cbInstAndDict;
+
+ if (allocSize.IsOverflow())
+ {
+ ThrowHR(COR_E_OVERFLOW);
+ }
+
+#ifdef FEATURE_PREJIT
+ Module *pComputedPZM = Module::ComputePreferredZapModule(pTypeKey);
+ BOOL canShareVtableChunks = MethodTable::CanShareVtableChunksFrom(pOldMT, pLoaderModule, pComputedPZM);
+#else
+ BOOL canShareVtableChunks = MethodTable::CanShareVtableChunksFrom(pOldMT, pLoaderModule);
+#endif // FEATURE_PREJIT
+
+ SIZE_T offsetOfUnsharedVtableChunks = allocSize.Value();
+
+ // We either share all of the canonical's virtual slots or none of them
+ // If none, we need to allocate space for the slots
+ if (!canShareVtableChunks)
+ {
+ allocSize += S_SIZE_T( cSlots ) * S_SIZE_T( sizeof(PCODE) );
+ }
+
+ if (allocSize.IsOverflow())
+ {
+ ThrowHR(COR_E_OVERFLOW);
+ }
+
+ BYTE* pMemory = (BYTE *) pamTracker->Track(pAllocator->GetHighFrequencyHeap()->AllocMem( allocSize ));
+
+ // Head of MethodTable memory
+ MethodTable *pMT = (MethodTable*) (pMemory + cbGC);
+
+ // Copy of GC
+ memcpy((BYTE*)pMT - cbGC, (BYTE*) pOldMT - cbGC, cbGC);
+
+ // Allocate the private data block ("private" during runtime in the ngen'ed case)
+ MethodTableWriteableData * pMTWriteableData = (MethodTableWriteableData *) (BYTE *)
+ pamTracker->Track(pAllocator->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(MethodTableWriteableData))));
+ // Note: Memory allocated on loader heap is zero filled
+ pMT->SetWriteableData(pMTWriteableData);
+
+ // This also disables IBC logging until the type is sufficiently intitialized so
+ // it needs to be done early
+ pMTWriteableData->SetIsNotFullyLoadedForBuildMethodTable();
+
+ // <TODO> this is incredibly fragile. We should just construct the MT all over agin. </TODO>
+ pMT->CopyFlags(pOldMT);
+
+ pMT->ClearFlag(MethodTable::enum_flag_MultipurposeSlotsMask);
+ pMT->SetMultipurposeSlotsMask(dwMultipurposeSlotsMask);
+
+ // Set generics flags
+ pMT->ClearFlag(MethodTable::enum_flag_GenericsMask);
+ pMT->SetFlag(MethodTable::enum_flag_GenericsMask_GenericInst);
+
+ // Freshly allocated - does not need restore
+ pMT->ClearFlag(MethodTable::enum_flag_IsZapped);
+ pMT->ClearFlag(MethodTable::enum_flag_IsPreRestored);
+
+ pMT->ClearFlag(MethodTable::enum_flag_HasIndirectParent);
+
+ // Non non-virtual slots
+ pMT->ClearFlag(MethodTable::enum_flag_HasSingleNonVirtualSlot);
+
+ pMT->SetBaseSize(pOldMT->GetBaseSize());
+ pMT->SetParentMethodTable(pOldMT->GetParentMethodTable());
+ pMT->SetCanonicalMethodTable(pOldMT);
+
+ pMT->m_wNumInterfaces = pOldMT->m_wNumInterfaces;
+
+#ifdef FEATURE_TYPEEQUIVALENCE
+ if (pMT->IsInterface() && !pMT->HasTypeEquivalence())
+ {
+ // fHasTypeEquivalence flag is "inherited" from generic arguments so we can quickly detect
+ // types like IList<IFoo> where IFoo is an interface with the TypeIdentifierAttribute.
+ for (DWORD i = 0; i < ntypars; i++)
+ {
+ if (inst[i].HasTypeEquivalence())
+ {
+ pMT->SetHasTypeEquivalence();
+ break;
+ }
+ }
+ }
+#endif // FEATURE_TYPEEQUIVALENCE
+
+ if (pOldMT->IsInterface() && IsImplicitInterfaceOfSZArray(pOldMT))
+ {
+ // Determine if we are creating an interface methodtable that may be used to dispatch through VSD
+ // on an array object using a generic interface (such as IList<T>).
+ // Please read comments in IsArray block of code:MethodTable::FindDispatchImpl.
+ //
+ // Arrays are special because we use the same method table (object[]) for all arrays of reference
+ // classes (eg string[]). This means that the method table for an array is not a complete description of
+ // the type of the array and thus the target of if something list IList<T>::IndexOf can not be determined
+ // simply by looking at the method table of T[] (which might be the method table of object[], if T is a
+ // reference type).
+ //
+ // This is done to minimize MethodTables, but as a side-effect of this optimization,
+ // we end up using a domain-shared type (object[]) with a domain-specific dispatch token.
+ // This is a problem because the same domain-specific dispatch token value can appear in
+ // multiple unshared domains (VSD takes advantage of the fact that in general a shared type
+ // cannot implement an unshared interface). This means that the same <token, object[]> pair
+ // value can mean different things in different domains (since the token could represent
+ // IList<Foo> in one domain and IEnumerable<Bar> in another). This is a problem because the
+ // VSD polymorphic lookup mechanism relies on a process-wide cache table, and as a result
+ // these duplicate values would collide if we didn't use fat dispatch token to ensure uniqueness
+ // and the interface methodtable is not in the shared domain.
+ //
+ // Of note: there is also some interesting array-specific behaviour where if B inherits from A
+ // and you have an array of B (B[]) then B[] implements IList<B> and IList<A>, but a dispatch
+ // on an IList<A> reference results in a dispatch to SZArrayHelper<A> rather than
+ // SZArrayHelper<B> (i.e., the variance implemention is not done like virtual methods).
+ //
+ // For example If Sub inherits from Super inherits from Object, then
+ // * Sub[] implements IList<Super>
+ // * Sub[] implements IList<Sub>
+ //
+ // And as a result we have the following mappings:
+ // * IList<Super>::IndexOf for Sub[] goes to SZArrayHelper<Super>::IndexOf
+ // * IList<Sub>::IndexOf for Sub[] goes to SZArrayHelper<Sub>::IndexOf
+ //
+ pMT->SetRequiresFatDispatchTokens();
+ }
+
+ // Number of slots only includes vtable slots
+ pMT->SetNumVirtuals(cSlots);
+
+ // Fill out the vtable indirection slots
+ MethodTable::VtableIndirectionSlotIterator it = pMT->IterateVtableIndirectionSlots();
+ while (it.Next())
+ {
+ if (canShareVtableChunks)
+ {
+ // Share the canonical chunk
+ it.SetIndirectionSlot(pOldMT->GetVtableIndirections()[it.GetIndex()]);
+ }
+ else
+ {
+ // Use the locally allocated chunk
+ it.SetIndirectionSlot((PTR_PCODE)(pMemory+offsetOfUnsharedVtableChunks));
+ offsetOfUnsharedVtableChunks += it.GetSize();
+ }
+ }
+
+ // If we are not sharing parent chunks, copy down the slot contents
+ if (!canShareVtableChunks)
+ {
+ // Need to assign the slots one by one to filter out jump thunks
+ for (DWORD i = 0; i < cSlots; i++)
+ {
+ pMT->SetSlot(i, pOldMT->GetRestoredSlot(i));
+ }
+ }
+
+ // All flags on m_pNgenPrivateData data apart
+ // are initially false for a dynamically generated instantiation.
+ //
+ // Last time this was checked this included
+ // enum_flag_RemotingConfigChecked
+ // enum_flag_RequiresManagedActivation
+ // enum_flag_Unrestored
+ // enum_flag_CriticalTypePrepared
+#ifdef FEATURE_PREJIT
+ // enum_flag_NGEN_IsFixedUp
+ // enum_flag_NGEN_NeedsRestoreCached
+ // enum_flag_NGEN_NeedsRestore
+#endif // FEATURE_PREJIT
+
+#if defined(_DEBUG) && defined (FEATURE_REMOTING)
+ if (pOldMT->IsContextful() || pOldMT->GetClass()->HasRemotingProxyAttribute())
+ {
+ _ASSERTE(pOldMT->RequiresManagedActivation());
+ }
+#endif // _DEBUG
+ if (pOldMT->RequiresManagedActivation())
+ {
+ // Will also set enum_flag_RemotingConfigChecked
+ pMT->SetRequiresManagedActivation();
+ }
+
+ if (fContainsGenericVariables)
+ pMT->SetContainsGenericVariables();
+
+ if (fHasGenericsStaticsInfo)
+ pMT->SetDynamicStatics(TRUE);
+
+#ifdef FEATURE_REMOTING
+ if (fHasRemotingVtsInfo)
+ pMT->SetHasRemotingVtsInfo();
+ if (fHasContextStatics)
+ pMT->SetHasContextStatics();
+#endif
+
+#ifdef FEATURE_COMINTEROP
+ if (fHasCCWTemplate)
+ pMT->SetHasCCWTemplate();
+ if (fHasGuidInfo)
+ pMT->SetHasGuidInfo();
+#endif
+
+ // Since we are fabricating a new MT based on an existing one, the per-inst info should
+ // be non-null
+ _ASSERTE(pOldMT->HasPerInstInfo());
+
+ // Fill in per-inst map pointer (which points to the array of generic dictionary pointers)
+ pMT->SetPerInstInfo ((Dictionary**) (pMemory + cbMT + cbOptional + cbIMap + sizeof(GenericsDictInfo)));
+ _ASSERTE(FitsIn<WORD>(pOldMT->GetNumDicts()));
+ _ASSERTE(FitsIn<WORD>(pOldMT->GetNumGenericArgs()));
+ pMT->SetDictInfo(static_cast<WORD>(pOldMT->GetNumDicts()), static_cast<WORD>(pOldMT->GetNumGenericArgs()));
+
+ // Fill in the last entry in the array of generic dictionary pointers ("per inst info")
+ // The others are filled in by LoadExactParents which copied down any inherited generic
+ // dictionary pointers.
+ Dictionary * pDict = (Dictionary*) (pMemory + cbMT + cbOptional + cbIMap + cbPerInst);
+ *(pMT->GetPerInstInfo() + (pOldMT->GetNumDicts()-1)) = pDict;
+
+ // Fill in the instantiation section of the generic dictionary. The remainder of the
+ // generic dictionary will be zeroed, which is the correct initial state.
+ TypeHandle * pInstDest = (TypeHandle *)pDict->GetInstantiation();
+ for (DWORD iArg = 0; iArg < ntypars; iArg++)
+ {
+ pInstDest[iArg] = inst[iArg];
+ }
+
+ // Copy interface map across
+ InterfaceInfo_t * pInterfaceMap = (InterfaceInfo_t *)(pMemory + cbMT + cbOptional + (fHasDynamicInterfaceMap ? sizeof(DWORD_PTR) : 0));
+
+#ifdef FEATURE_COMINTEROP
+ // Extensible RCW's are prefixed with the count of dynamic interfaces.
+ if (fHasDynamicInterfaceMap)
+ {
+ *(((DWORD_PTR *)pInterfaceMap) - 1) = 0;
+ }
+#endif // FEATURE_COMINTEROP
+
+ for (WORD iItf = 0; iItf < wNumInterfaces; iItf++)
+ {
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOAD_APPROXPARENTS);
+ pInterfaceMap[iItf].SetMethodTable(pOldIMap[iItf].GetApproxMethodTable(pOldMT->GetLoaderModule()));
+ }
+
+ // Set the interface map pointer stored in the main section of the vtable (actually
+ // an optional member) to point to the correct region within the newly
+ // allocated method table.
+
+ // Fill in interface map pointer
+ pMT->SetInterfaceMap(wNumInterfaces, pInterfaceMap);
+
+ // Copy across extra flags for these interfaces as well. We may need additional memory for this.
+ PVOID pExtraInterfaceInfo = NULL;
+ SIZE_T cbExtraInterfaceInfo = MethodTable::GetExtraInterfaceInfoSize(wNumInterfaces);
+ if (cbExtraInterfaceInfo)
+ pExtraInterfaceInfo = pamTracker->Track(pAllocator->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(cbExtraInterfaceInfo)));
+
+ // Call this even in the case where pExtraInterfaceInfo == NULL (certain cases are optimized and don't
+ // require extra buffer space).
+ pMT->InitializeExtraInterfaceInfo(pExtraInterfaceInfo);
+
+ for (UINT32 i = 0; i < pOldMT->GetNumInterfaces(); i++)
+ {
+ if (pOldMT->IsInterfaceDeclaredOnClass(i))
+ pMT->SetInterfaceDeclaredOnClass(i);
+ }
+
+ pMT->SetLoaderModule(pLoaderModule);
+ pMT->SetLoaderAllocator(pAllocator);
+
+
+#ifdef _DEBUG
+ // Name for debugging
+ StackSString debug_ClassNameString;
+ TypeString::AppendTypeKey(debug_ClassNameString, pTypeKey, TypeString::FormatNamespace | TypeString::FormatAngleBrackets | TypeString::FormatFullInst);
+ StackScratchBuffer debug_ClassNameBuffer;
+ const char *debug_szClassNameBuffer = debug_ClassNameString.GetUTF8(debug_ClassNameBuffer);
+ S_SIZE_T safeLen = S_SIZE_T(strlen(debug_szClassNameBuffer)) + S_SIZE_T(1);
+ if (safeLen.IsOverflow()) COMPlusThrowHR(COR_E_OVERFLOW);
+
+ size_t len = safeLen.Value();
+ char *debug_szClassName = (char *)pamTracker->Track(pAllocator->GetLowFrequencyHeap()->AllocMem(safeLen));
+ strcpy_s(debug_szClassName, len, debug_szClassNameBuffer);
+ pMT->SetDebugClassName(debug_szClassName);
+
+ // Debugging information
+ if (pOldMT->Debug_HasInjectedInterfaceDuplicates())
+ pMT->Debug_SetHasInjectedInterfaceDuplicates();
+#endif // _DEBUG
+
+ // <NICE>This logic is identical to logic in class.cpp. Factor these out.</NICE>
+ // No need to generate IDs for open types. However
+ // we still leave the optional member in the MethodTable holding the value -1 for the ID.
+ if (fHasGenericsStaticsInfo)
+ {
+ FieldDesc* pStaticFieldDescs = NULL;
+
+ if (pOldMT->GetNumStaticFields() != 0)
+ {
+ pStaticFieldDescs = (FieldDesc*) pamTracker->Track(pAllocator->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(FieldDesc)) * S_SIZE_T(pOldMT->GetNumStaticFields())));
+ FieldDesc* pOldFD = pOldMT->GetGenericsStaticFieldDescs();
+
+ g_IBCLogger.LogFieldDescsAccess(pOldFD);
+
+ for (DWORD i = 0; i < pOldMT->GetNumStaticFields(); i++)
+ {
+ pStaticFieldDescs[i] = pOldFD[i];
+ pStaticFieldDescs[i].SetMethodTable(pMT);
+ }
+ }
+ pMT->SetupGenericsStaticsInfo(pStaticFieldDescs);
+ }
+
+#ifdef FEATURE_REMOTING
+ // We do not cache the data for for non-canonical methods.
+ _ASSERTE(!pMT->HasRemotableMethodInfo());
+#endif
+
+ // VTS info doesn't depend on the exact instantiation but we make a copy
+ // anyway since we can't currently deal with the possibility of having a
+ // cross module pointer to the data block. Eventually we might be able to
+ // tokenize this reference, but determine first whether there's enough
+ // performance degradation to justify the extra complexity.
+#ifdef FEATURE_REMOTING
+ if (fHasRemotingVtsInfo)
+ {
+ RemotingVtsInfo *pOldInfo = pOldMT->GetRemotingVtsInfo();
+ DWORD cbInfo = RemotingVtsInfo::GetSize(pOldMT->GetNumIntroducedInstanceFields());
+ RemotingVtsInfo *pNewInfo = (RemotingVtsInfo*)pamTracker->Track(pAllocator->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(cbInfo)));
+
+ memcpyNoGCRefs(pNewInfo, pOldInfo, cbInfo);
+
+ *(pMT->GetRemotingVtsInfoPtr()) = pNewInfo;
+ }
+
+ // if there are thread or context static make room for them there is no sharing with the other MethodTable
+ if (fHasContextStatics)
+ {
+ // this is responsible for setting the flag and allocation in the loader heap
+ pMT->SetupContextStatics(pamTracker, pOldMT->GetContextStaticsSize());
+ }
+#endif //FEATURE_REMOTING
+
+ pMT->SetCl(pOldMT->GetCl());
+
+ // Check we've set up the flags correctly on the new method table
+ _ASSERTE(!fContainsGenericVariables == !pMT->ContainsGenericVariables());
+ _ASSERTE(!fHasGenericsStaticsInfo == !pMT->HasGenericsStaticsInfo());
+ _ASSERTE(!pLoaderModule->GetAssembly()->IsDomainNeutral() == !pMT->IsDomainNeutral());
+#ifdef FEATURE_REMOTING
+ _ASSERTE(!fHasRemotingVtsInfo == !pMT->HasRemotingVtsInfo());
+ _ASSERTE(!fHasContextStatics == !pMT->HasContextStatics());
+#endif
+#ifdef FEATURE_COMINTEROP
+ _ASSERTE(!fHasDynamicInterfaceMap == !pMT->HasDynamicInterfaceMap());
+ _ASSERTE(!fHasRCWPerTypeData == !pMT->HasRCWPerTypeData());
+ _ASSERTE(!fHasCCWTemplate == !pMT->HasCCWTemplate());
+ _ASSERTE(!fHasGuidInfo == !pMT->HasGuidInfo());
+#endif
+
+ LOG((LF_CLASSLOADER, LL_INFO1000, "GENERICS: Replicated methodtable to create type %s\n", pMT->GetDebugClassName()));
+
+#ifdef _DEBUG
+ if (g_pConfig->ShouldDumpOnClassLoad(debug_szClassName))
+ {
+ LOG((LF_ALWAYS, LL_ALWAYS,
+ "Method table summary for '%s' (instantiation):\n",
+ pMT->GetDebugClassName()));
+ pMT->Debug_DumpInterfaceMap("Approximate");
+ }
+#endif //_DEBUG
+
+#ifdef FEATURE_PREJIT
+ _ASSERTE(pComputedPZM == Module::GetPreferredZapModuleForMethodTable(pMT));
+#endif //FEATURE_PREJIT
+
+ // We never have non-virtual slots in this method table (set SetNumVtableSlots and SetNumVirtuals above)
+ _ASSERTE(!pMT->HasNonVirtualSlots());
+
+ pMTWriteableData->SetIsRestoredForBuildMethodTable();
+
+ RETURN(TypeHandle(pMT));
+} // ClassLoader::CreateTypeHandleForNonCanonicalGenericInstantiation
+
+namespace Generics
+{
+
+BOOL CheckInstantiation(Instantiation inst)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END
+
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ TypeHandle th = inst[i];
+ if (th.IsNull())
+ {
+ return FALSE;
+ }
+
+ CorElementType type = th.GetSignatureCorElementType();
+ if (CorTypeInfo::IsGenericVariable_NoThrow(type))
+ {
+ return TRUE;
+ }
+
+ g_IBCLogger.LogTypeMethodTableAccess(&th);
+
+ if ( type == ELEMENT_TYPE_BYREF
+ || type == ELEMENT_TYPE_TYPEDBYREF
+ || type == ELEMENT_TYPE_VOID
+ || type == ELEMENT_TYPE_PTR
+ || type == ELEMENT_TYPE_FNPTR)
+ {
+ return FALSE;
+ }
+
+ MethodTable* pMT = th.GetMethodTable();
+ if (pMT != NULL)
+ {
+ if (pMT->ContainsStackPtr())
+ {
+ return FALSE;
+ }
+ }
+ }
+ return TRUE;
+}
+
+// Just records the owner and links to the previous graph.
+RecursionGraph::RecursionGraph(RecursionGraph *pPrev, TypeHandle thOwner)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_pPrev = pPrev;
+ m_thOwner = thOwner;
+
+ m_pNodes = NULL;
+}
+
+RecursionGraph::~RecursionGraph()
+{
+ WRAPPER_NO_CONTRACT;
+ if (m_pNodes != NULL)
+ delete [] m_pNodes;
+}
+
+// Adds edges generated by the parent and implemented interfaces; returns TRUE iff
+// an expanding cycle was found.
+BOOL RecursionGraph::CheckForIllegalRecursion()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(!m_thOwner.IsTypeDesc());
+ }
+ CONTRACTL_END;
+
+ MethodTable *pMT = m_thOwner.AsMethodTable();
+
+ Instantiation inst = pMT->GetInstantiation();
+
+ // Initialize the node array.
+ m_pNodes = new Node[inst.GetNumArgs()];
+
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ m_pNodes[i].SetSourceVar(inst[i].AsGenericVariable());
+ }
+
+ // Record edges generated by inheriting from the parent.
+ MethodTable *pParentMT = pMT->GetParentMethodTable();
+ if (pParentMT)
+ {
+ AddDependency(pParentMT);
+ }
+
+ // Record edges generated by implementing interfaces.
+ MethodTable::InterfaceMapIterator it = pMT->IterateInterfaceMap();
+ while (it.Next())
+ {
+ AddDependency(it.GetInterface());
+ }
+
+ // Check all owned nodes for expanding cycles. The edges recorded above must all
+ // go from owned nodes so it suffices to look only at these.
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ if (HasExpandingCycle(&m_pNodes[i], &m_pNodes[i]))
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+// Returns TRUE iff the given type is already on the stack (in fact an analogue of
+// code:TypeHandleList::Exists).
+//
+// static
+BOOL RecursionGraph::HasSeenType(RecursionGraph *pDepGraph, TypeHandle thType)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ while (pDepGraph != NULL)
+ {
+ if (pDepGraph->m_thOwner == thType) return TRUE;
+ pDepGraph = pDepGraph->m_pPrev;
+ }
+ return FALSE;
+}
+
+// Adds the specified MT as a dependency (parent or interface) of the owner.
+void RecursionGraph::AddDependency(MethodTable *pMT, TypeHandleList *pExpansionVars /*= NULL*/)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(pMT != NULL);
+ }
+ CONTRACTL_END
+
+ // ECMA:
+ // - If T appears as the actual type argument to be substituted for U in some referenced
+ // type D<..., U, ...> add a non-expanding (->) edge from T to U.
+ // - If T appears somewhere inside (but not as) the actual type argument to be substituted
+ // for U in referenced type D<..., U, ...> add an expanding (=>) edge from T to U.
+
+ // Non-generic dependencies are not interesting.
+ if (!pMT->HasInstantiation())
+ return;
+
+ // Get the typical instantiation of pMT to figure out its type vars.
+ TypeHandle thTypical = ClassLoader::LoadTypeDefThrowing(
+ pMT->GetModule(), pMT->GetCl(),
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef, tdNoTypes,
+ CLASS_LOAD_APPROXPARENTS);
+
+ Instantiation inst = pMT->GetInstantiation();
+ Instantiation typicalInst = thTypical.GetInstantiation();
+
+ _ASSERTE(inst.GetNumArgs() == typicalInst.GetNumArgs());
+
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ TypeHandle thArg = inst[i];
+ TypeHandle thVar = typicalInst[i];
+ if (thArg.IsGenericVariable())
+ {
+ // Add a non-expanding edge from thArg to i-th generic parameter of pMT.
+ AddEdge(thArg.AsGenericVariable(), thVar.AsGenericVariable(), FALSE);
+
+ // Process the backlog.
+ TypeHandle thTo;
+ TypeHandleList *pList = pExpansionVars;
+ while (TypeHandleList::GetNext(&pList, &thTo))
+ {
+ AddEdge(thArg.AsGenericVariable(), thTo.AsGenericVariable(), TRUE);
+ }
+ }
+ else
+ {
+ while (thArg.IsTypeDesc())
+ {
+ _ASSERTE(thArg.HasTypeParam());
+ thArg = (static_cast<PTR_ParamTypeDesc>(thArg.AsTypeDesc()))->GetModifiedType();
+
+ if (thArg.IsGenericVariable()) // : A<!T[]>
+ {
+ // Add an expanding edge from thArg to i-th parameter of pMT.
+ AddEdge(thArg.AsGenericVariable(), thVar.AsGenericVariable(), TRUE);
+ break;
+ }
+ }
+
+ if (!thArg.IsTypeDesc()) // : A<B<!T>>
+ {
+ // We will add an expanding edge but we do not yet know from which variable(s).
+ // Add the to-variable to the list and call recursively to inspect thArg's
+ // instantiation.
+ TypeHandleList newExpansionVars(thVar, pExpansionVars);
+ AddDependency(thArg.AsMethodTable(), &newExpansionVars);
+ }
+ }
+ }
+}
+
+// Add an edge from pFromVar to pToVar - either non-expanding or expanding.
+void RecursionGraph::AddEdge(TypeVarTypeDesc *pFromVar, TypeVarTypeDesc *pToVar, BOOL fExpanding)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(pFromVar != NULL);
+ PRECONDITION(pToVar != NULL);
+ }
+ CONTRACTL_END
+
+ LOG((LF_CLASSLOADER, LL_INFO10000, "GENERICS: Adding %s edge: from %x(0x%x) to %x(0x%x) into recursion graph owned by MT: %x\n",
+ (fExpanding ? "EXPANDING" : "NON-EXPANDING"),
+ pFromVar->GetToken(), pFromVar->GetModule(),
+ pToVar->GetToken(), pToVar->GetModule(),
+ m_thOwner.AsMethodTable()));
+
+ // Get the source node.
+ Node *pNode = &m_pNodes[pFromVar->GetIndex()];
+ _ASSERTE(pFromVar == pNode->GetSourceVar());
+
+ // Add the edge.
+ ULONG_PTR edge = (ULONG_PTR)pToVar;
+ if (fExpanding) edge |= Node::EDGE_EXPANDING_FLAG;
+
+ IfFailThrow(pNode->GetEdges()->Append((void *)edge));
+}
+
+// Recursive worker that checks whether this node is part of an expanding cycle.
+BOOL RecursionGraph::HasExpandingCycle(Node *pCurrentNode, Node *pStartNode, BOOL fExpanded /*= FALSE*/)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pCurrentNode));
+ PRECONDITION(CheckPointer(pStartNode));
+ }
+ CONTRACTL_END;
+
+ // This method performs a modified DFS. We are not looking for any cycle but for a cycle
+ // which has at least one expanding edge. Therefore we:
+ // 1) Pass aroung the fExpanded flag to indicate that we've seen an expanding edge.
+ // 2) Explicitly check for returning to the starting point rather an arbitrary visited node.
+
+ // Did we just find the cycle?
+ if (fExpanded && pCurrentNode == pStartNode)
+ return TRUE;
+
+ // Have we been here before or is this a dead end?
+ if (pCurrentNode->IsVisited() || pCurrentNode->GetEdges()->GetCount() == 0)
+ return FALSE;
+
+ pCurrentNode->SetVisited();
+
+ ArrayList::Iterator iter = pCurrentNode->GetEdges()->Iterate();
+ while (iter.Next())
+ {
+ ULONG_PTR edge = (ULONG_PTR)iter.GetElement();
+
+ BOOL fExpanding = (edge & Node::EDGE_EXPANDING_FLAG);
+
+ TypeVarTypeDesc *pToVar = (TypeVarTypeDesc *)(edge & ~Node::EDGE_EXPANDING_FLAG);
+ unsigned int dwIndex = pToVar->GetIndex();
+
+ Node *pNode = NULL;
+ RecursionGraph *pGraph = this;
+
+ // Find the destination node.
+ do
+ {
+ if (pGraph->m_pNodes != NULL &&
+ dwIndex < pGraph->m_thOwner.GetNumGenericArgs() &&
+ pGraph->m_pNodes[dwIndex].GetSourceVar() == pToVar)
+ {
+ pNode = &pGraph->m_pNodes[dwIndex];
+ break;
+ }
+ pGraph = pGraph->m_pPrev;
+ }
+ while (pGraph != NULL);
+
+ if (pNode != NULL)
+ {
+ // The new path is expanding if it was expanding already or if the edge we follow is expanding.
+ if (HasExpandingCycle(pNode, pStartNode, fExpanded || fExpanding))
+ return TRUE;
+ }
+ }
+
+ pCurrentNode->ClearVisited();
+
+ return FALSE;
+}
+
+} // namespace Generics
+
+#endif // !DACCESS_COMPILE
+
+namespace Generics
+{
+
+/*
+ * GetExactInstantiationsOfMethodAndItsClassFromCallInformation
+ *
+ * This routine takes in the various pieces of information of a call site to managed code
+ * and returns the exact instatiations for the method and the class on which the method is defined.
+ *
+ * Parameters:
+ * pRepMethod - A MethodDesc to the representative instantiation method.
+ * pThis - The OBJECTREF that is being passed to pRepMethod.
+ * pParamTypeArg - The extra argument passed to pRepMethod when pRepMethod is either
+ * RequiresInstMethodTableArg() or RequiresInstMethodDescArg().
+ * pSpecificClass - A pointer to a TypeHandle for storing the exact instantiation
+ * of the class on which pRepMethod is defined, based on the call information
+ * pSpecificMethod - A pointer to a MethodDesc* for storing the exact instantiation
+ * of pRepMethod, based on the call information
+ *
+ * Returns:
+ * TRUE if successful.
+ * FALSE if could not get the exact TypeHandle & MethodDesc requested. In this case,
+ * the SpecificClass may be correct, iff the class is not a generic class.
+ *
+ */
+BOOL GetExactInstantiationsOfMethodAndItsClassFromCallInformation(
+ /* in */ MethodDesc *pRepMethod,
+ /* in */ OBJECTREF pThis,
+ /* in */ PTR_VOID pParamTypeArg,
+ /* out*/ TypeHandle *pSpecificClass,
+ /* out*/ MethodDesc** pSpecificMethod
+ )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ CANNOT_TAKE_LOCK;
+ PRECONDITION(CheckPointer(pRepMethod));
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ PTR_VOID pExactGenericArgsToken = NULL;
+
+ if (pRepMethod->AcquiresInstMethodTableFromThis())
+ {
+ if (pThis != NULL)
+ {
+ // We could be missing the memory from a dump, or the target could have simply been corrupted.
+ ALLOW_DATATARGET_MISSING_MEMORY(
+ pExactGenericArgsToken = dac_cast<PTR_VOID>(pThis->GetMethodTable());
+ );
+ }
+ }
+ else
+ {
+ pExactGenericArgsToken = pParamTypeArg;
+ }
+
+ return GetExactInstantiationsOfMethodAndItsClassFromCallInformation(pRepMethod, pExactGenericArgsToken,
+ pSpecificClass, pSpecificMethod);
+}
+
+BOOL GetExactInstantiationsOfMethodAndItsClassFromCallInformation(
+ /* in */ MethodDesc *pRepMethod,
+ /* in */ PTR_VOID pExactGenericArgsToken,
+ /* out*/ TypeHandle *pSpecificClass,
+ /* out*/ MethodDesc** pSpecificMethod
+ )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ CANNOT_TAKE_LOCK;
+ PRECONDITION(CheckPointer(pRepMethod));
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ //
+ // Start with some decent default values.
+ //
+ MethodDesc * pMD = pRepMethod;
+ MethodTable * pMT = pRepMethod->GetMethodTable();
+
+ *pSpecificMethod = pMD;
+ *pSpecificClass = pMT;
+
+ if (!pRepMethod->IsSharedByGenericInstantiations())
+ {
+ return TRUE;
+ }
+
+ if (pExactGenericArgsToken == NULL)
+ {
+ return FALSE;
+ }
+
+ BOOL retVal = FALSE;
+
+ // The following target memory reads will not necessarily succeed against dumps, and will throw on failure.
+ EX_TRY_ALLOW_DATATARGET_MISSING_MEMORY
+ {
+ if (pRepMethod->RequiresInstMethodTableArg())
+ {
+ pMT = dac_cast<PTR_MethodTable>(pExactGenericArgsToken);
+ retVal = TRUE;
+ }
+ else if (pRepMethod->RequiresInstMethodDescArg())
+ {
+ pMD = dac_cast<PTR_MethodDesc>(pExactGenericArgsToken);
+ pMT = pMD->GetMethodTable();
+ retVal = TRUE;
+ }
+ else if (pRepMethod->AcquiresInstMethodTableFromThis())
+ {
+ // The exact token might actually be a child class of the class containing
+ // the specified function so walk up the parent chain to make sure we return
+ // an exact instantiation of the CORRECT parent class.
+ pMT = pMD->GetExactDeclaringType(dac_cast<PTR_MethodTable>(pExactGenericArgsToken));
+ _ASSERTE(pMT != NULL);
+ retVal = TRUE;
+ }
+ else
+ {
+ _ASSERTE(!"Should not happen.");
+ }
+ }
+ EX_END_CATCH_ALLOW_DATATARGET_MISSING_MEMORY
+
+ *pSpecificMethod = pMD;
+ *pSpecificClass = pMT;
+
+ return retVal;
+}
+
+} // namespace Generics;
+
diff --git a/src/vm/generics.h b/src/vm/generics.h
new file mode 100644
index 0000000000..93d0e583d0
--- /dev/null
+++ b/src/vm/generics.h
@@ -0,0 +1,181 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: generics.cpp
+//
+
+
+//
+// Helper functions for generics prototype
+//
+
+//
+// ============================================================================
+
+#ifndef _GENERICS_H
+#define _GENERICS_H
+
+#include "typehandle.h"
+#include "arraylist.h"
+
+class CrawlFrame;
+class DictionaryEntryLayout;
+
+// Generics helper functions
+namespace Generics
+{
+ // Part of the recursive inheritance graph as defined by ECMA part.II §9.2.
+ //
+ // code:MethodTable.DoFullyLoad and code:TypeDesc.DoFullyLoad declare local variable of
+ // this type and initialize it with:
+ // - pointer to the previous (in terms of callstack) RecursionGraph instance,
+ // - the type handle representing the type that is being fully-loaded.
+ //
+ // By walking the RecursionGraph chain, it is possible to tell whether the same type is
+ // not being fully-loaded already. So far this could as well be a description of the
+ // code:TypeHandleList. But aside from the "owner type", RecursionGraph can also hold
+ // part of a directed graph in which nodes are generic variables and edges represent the
+ // is-substituted-by relation. In particular, one RecursionGraph instance maintains nodes
+ // corresponding to generic variables declared by the owner type, and all edges going
+ // out of these nodes.
+ //
+ // As an example consider:
+ // class B<U> { }
+ // class A<T> : B<T> { }
+ //
+ // B's RecursionGraph has one node (U) and no edges. A's RecursionGraph has one node (T)
+ // and one edge from T to U.
+ //
+ // This is how it looks like on the stack:
+ //
+ // A's DoFullyLoad activation - RecursionGraph(NULL, A<>) -> [T]
+ // ^-------- |
+ // | v
+ // B's DoFullyLoad activation - RecursionGraph( | , B<>) -> [U]
+ //
+ // The edges are obviously not real pointers because the destination may not yet be
+ // present on the stack when the edge is being added. Instead the edge end points are
+ // identified by TypeVarTypeDesc pointers. Edges come in two flavors - non-expanding
+ // and expanding, please see ECMA for detailed explanation. Finding an expanding cycle
+ // (i.e. cycle with at least one expanding edge) in the graph means that the types
+ // currently on stack are defined recursively and should be refused by the loader.
+ // Reliable detection of this condition is the ultimate purpose of this class.
+ //
+ // We do not always see all dependencies of a type on the stack. If the dependencies
+ // have been loaded earlier, loading stops there and part of the graph may be missing.
+ // However, this is of no concern because we are only interested in types with cyclic
+ // dependencies, and loading any type from such a closure will cause loading the rest
+ // of it. If part of the rest had been loaded earlier, it would have triggered loading
+ // the current type so there's really no way how expanding cycles can go undetected.
+ //
+ // Observation: if there is a cycle in type dependencies, there will be a moment when
+ // we'll have all the types participating in the cycle on the stack.
+ //
+ // Note that having a cycle in type dependencies is OK as long as it is not an expanding
+ // cycle. The simplest example of a cycle that is not expanding is A<T> : B<A<T>>. That
+ // is a perfectly valid type.
+ //
+ // The most interesting methods in this class are:
+ // * code:RecursionGraph.AddDependency - adds edges according to a type's instantiation.
+ // * code:RecursionGraph.HasExpandingCycle - looks for expanding cycles in the graph.
+ //
+ class RecursionGraph
+ {
+ public:
+ // Just records the owner and links to the previous graph. To actually construct the
+ // graph, call CheckForIllegalRecursion. Without CheckForIllegalRecursion, the
+ // functionality is limited to that of code:TypeHandleList.
+ RecursionGraph(RecursionGraph *pPrev, TypeHandle thOwner);
+ ~RecursionGraph();
+
+ // Adds edges generated by the parent and implemented interfaces; returns TRUE iff
+ // an expanding cycle was found after adding the edges.
+ BOOL CheckForIllegalRecursion();
+
+ // Returns TRUE iff the given type is already on the stack (in fact an analogue of
+ // code:TypeHandleList.Exists). This is to prevent recursively loading exactly the
+ // same type.
+ static BOOL HasSeenType(RecursionGraph *pDepGraph, TypeHandle thType);
+
+#ifndef DACCESS_COMPILE
+ protected:
+ // Adds the specified MT as a dependency (parent or interface) of the owner.
+ // pExpansionVars used internally.
+ void AddDependency(MethodTable *pMT, TypeHandleList *pExpansionVars = NULL);
+
+ // Adds an edge from pFromVar to pToVar, non-expanding or expanding.
+ void AddEdge(TypeVarTypeDesc *pFromVar, TypeVarTypeDesc *pToVar, BOOL fExpanding);
+
+ // Represents a node (a generic variable).
+ class Node
+ {
+ friend class RecursionGraph;
+
+ union
+ {
+ TypeVarTypeDesc *m_pFromVar; // The generic variable represented by this node.
+ ULONG_PTR m_pFromVarAsPtr; // The lowest bit determines the is-visited state.
+ };
+ ArrayList m_edges; // The outgoing edges (pointers to TypeVarTypeDesc).
+
+ enum
+ {
+ NODE_VISITED_FLAG = 0x1, // ORed with m_pFromVar if this node is currently being visited.
+ EDGE_EXPANDING_FLAG = 0x1 // ORed with an m_edges element if the edge is expanding.
+ };
+
+ public:
+ Node() : m_pFromVar(NULL)
+ { LIMITED_METHOD_CONTRACT; }
+
+ inline ArrayList *GetEdges() { LIMITED_METHOD_CONTRACT; return &m_edges; }
+ inline TypeVarTypeDesc *GetSourceVar() { LIMITED_METHOD_CONTRACT; return ((TypeVarTypeDesc *)(m_pFromVarAsPtr & ~NODE_VISITED_FLAG)); }
+ inline void SetSourceVar(TypeVarTypeDesc *pVar) { LIMITED_METHOD_CONTRACT; _ASSERTE(!m_pFromVar); m_pFromVar = pVar; }
+
+ inline BOOL IsVisited() { LIMITED_METHOD_CONTRACT; return (m_pFromVarAsPtr & NODE_VISITED_FLAG); }
+ inline void SetVisited() { LIMITED_METHOD_CONTRACT; m_pFromVarAsPtr |= NODE_VISITED_FLAG; }
+ inline void ClearVisited() { LIMITED_METHOD_CONTRACT; m_pFromVarAsPtr &= ~NODE_VISITED_FLAG; }
+ };
+
+ // Recursive worker that checks whether a node is part of an expanding cycle.
+ BOOL HasExpandingCycle(Node *pCurrentNode, Node *pStartNode, BOOL fExpanded = FALSE);
+
+ protected:
+ // Array of nodes, each representing a generic variable owned by m_thOwner. The
+ // number of nodes is m_thOwner.GetNumGenericArgs() and the order corresponds
+ // to m_thOwner's instantiation.
+ Node *m_pNodes;
+#endif // !DACCESS_COMPILE
+
+ protected:
+ RecursionGraph *m_pPrev;
+ TypeHandle m_thOwner;
+ };
+
+ // Check for legal instantiations. Returns true if the instantiation is legal.
+ BOOL CheckInstantiation(Instantiation inst);
+
+ BOOL GetExactInstantiationsOfMethodAndItsClassFromCallInformation(
+ /* in */ MethodDesc *pRepMethod,
+ /* in */ OBJECTREF pThis,
+ /* in */ PTR_VOID pParamTypeArg,
+ /* out*/ TypeHandle *pSpecificClass,
+ /* out*/ MethodDesc** pSpecificMethod);
+
+ BOOL GetExactInstantiationsOfMethodAndItsClassFromCallInformation(
+ /* in */ MethodDesc *pRepMethod,
+ /* in */ PTR_VOID pExactGenericArgsToken,
+ /* out*/ TypeHandle *pSpecificClass,
+ /* out*/ MethodDesc** pSpecificMethod);
+
+ inline void DetermineCCWTemplateAndGUIDPresenceOnNonCanonicalMethodTable(
+ // Input
+ MethodTable *pOldMT,
+ BOOL fNewMTContainsGenericVariables,
+ // Output
+ BOOL *pfHasGuidInfo, BOOL *pfHasCCWTemplate);
+};
+
+#endif
diff --git a/src/vm/generics.inl b/src/vm/generics.inl
new file mode 100644
index 0000000000..a7e93ab5ea
--- /dev/null
+++ b/src/vm/generics.inl
@@ -0,0 +1,107 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: generics.inl
+//
+
+//
+// Helper functions for generics implementation
+//
+
+//
+// ============================================================================
+
+#ifndef GENERICS_INL
+#define GENERICS_INL
+
+#ifdef FEATURE_COMINTEROP
+#include "winrttypenameconverter.h"
+#endif
+
+// Generics helper functions
+namespace Generics
+{
+#ifndef DACCESS_COMPILE
+ inline void DetermineCCWTemplateAndGUIDPresenceOnNonCanonicalMethodTable(
+ // Input
+ MethodTable *pOldMT, BOOL fNewMTContainsGenericVariables,
+ // Output
+ BOOL *pfHasGuidInfo, BOOL *pfHasCCWTemplate)
+ {
+ STANDARD_VM_CONTRACT;
+
+#ifdef FEATURE_COMINTEROP
+ WORD wNumInterfaces = static_cast<WORD>(pOldMT->GetNumInterfaces());
+
+#ifdef CLR_STANDALONE_BINDER
+ InterfaceInfo_t * pOldIMap = BinderMethodTable::GetInterfaceMap(pOldMT);
+#else // !CLR_STANDALONE_BINDER
+ InterfaceInfo_t * pOldIMap = (InterfaceInfo_t *)pOldMT->GetInterfaceMap();
+#endif // !CLR_STANDALONE_BINDER
+
+ BOOL fHasGuidInfo = FALSE;
+
+ // Generic WinRT delegates expose a class interface and need the CCW template
+ BOOL fHasCCWTemplate = FALSE;
+
+ if (!fNewMTContainsGenericVariables)
+ {
+ if (pOldMT->IsInterface())
+ {
+ fHasGuidInfo = (pOldMT->IsProjectedFromWinRT() || WinRTTypeNameConverter::IsRedirectedType(pOldMT, WinMDAdapter::WinMDTypeKind_PInterface));
+ }
+ else if (pOldMT->IsDelegate())
+ {
+ fHasGuidInfo = (pOldMT->IsProjectedFromWinRT() || WinRTTypeNameConverter::IsRedirectedType(pOldMT, WinMDAdapter::WinMDTypeKind_PDelegate));
+
+ // Generic WinRT delegates expose a class interface and need a CCW template
+ fHasCCWTemplate = fHasGuidInfo;
+ }
+
+ if (!fHasCCWTemplate)
+ {
+ if (pOldMT->IsInterface())
+ {
+ // Interfaces need the CCW template if they are redirected and need variance
+ if (pOldMT->HasVariance() &&
+ (pOldMT->IsProjectedFromWinRT() || WinRTTypeNameConverter::IsRedirectedType(pOldMT, WinMDAdapter::WinMDTypeKind_PInterface)))
+ {
+ fHasCCWTemplate = TRUE;
+ }
+ }
+ else
+ {
+ // Other types may need the CCW template if they implement generic interfaces
+ for (WORD iItf = 0; iItf < wNumInterfaces; iItf++)
+ {
+ // If the class implements a generic WinRT interface, it needs its own (per-instantiation)
+ // CCW template as the one on EEClass would be shared and hence useless.
+#ifdef CLR_STANDALONE_BINDER
+ MethodTable *pItfMT = pOldIMap[iItf].m_pMethodTable.GetValue();
+#else // !CLR_STANDALONE_BINDER
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOAD_APPROXPARENTS);
+ MethodTable *pItfMT = pOldIMap[iItf].GetApproxMethodTable(pOldMT->GetLoaderModule());
+#endif // !CLR_STANDALONE_BINDER
+ if (pItfMT->HasInstantiation() &&
+ (pItfMT->IsProjectedFromWinRT() || WinRTTypeNameConverter::IsRedirectedType(pItfMT, WinMDAdapter::WinMDTypeKind_PInterface)))
+ {
+ fHasCCWTemplate = TRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+#else // FEATURE_COMINTEROP
+ BOOL fHasGuidInfo = FALSE;
+ BOOL fHasCCWTemplate = FALSE;
+#endif // FEATURE_COMINTEROP
+ *pfHasGuidInfo = fHasGuidInfo;
+ *pfHasCCWTemplate = fHasCCWTemplate;
+ }
+#endif // DACCESS_COMPILE
+}
+
+#endif // GENERICS_INL
diff --git a/src/vm/genmeth.cpp b/src/vm/genmeth.cpp
new file mode 100644
index 0000000000..3de0205156
--- /dev/null
+++ b/src/vm/genmeth.cpp
@@ -0,0 +1,1790 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// File: genmeth.cpp
+//
+// Most functionality for generic methods is put here
+//
+
+
+
+#include "common.h"
+#include "method.hpp"
+#include "field.h"
+#include "eeconfig.h"
+#include "perfcounters.h"
+#include "crst.h"
+#include "generics.h"
+#include "genericdict.h"
+#include "instmethhash.h"
+#include "typestring.h"
+#include "typedesc.h"
+#include "comdelegate.h"
+
+// Instantiated generic methods
+//
+// Method descriptors for instantiated generic methods are allocated on demand and inserted
+// into the InstMethodHashTable for the LoaderModule of the descriptor. (See ceeload.h for more
+// information about loader modules).
+//
+// For non-shared instantiations, entering the prestub for such a method descriptor causes the method to
+// be JIT-compiled, specialized to that instantiation.
+//
+// For shared instantiations, entering the prestub generates a piece of stub code that passes the
+// method descriptor as an extra argument and then jumps to code shared between compatible
+// instantiations. This code has its own method descriptor whose instantiation is *canonical*
+// (with reference-type type parameters replaced by Object).
+//
+// Thus for example the shared method descriptor for m<object> is different to the
+// exact-instantiation method descriptor for m<object>.
+//
+// Complete example:
+//
+// class C<T> { public void m<S>(S x, T y) { ... } }
+//
+// Suppose that code sharing is turned on.
+//
+// Upon compiling calls to C<string>.m<string>, C<string>.m<Type>, C<Type>.m<string> and C<Type>.m<Type>
+
+// Given a generic method descriptor and an instantiation, create a new instantiated method
+// descriptor and chain it into the list attached to the generic method descriptor
+//
+// pMT is the owner method table. If looking for a shared MD this should be
+// the MT for the shared class.
+//
+// pGenericMD is the generic method descriptor (owner may be instantiated)
+// pWrappedMD is the corresponding shared md for use when creating stubs
+// nGenericMethodArgs/genericMethodArgs is the instantiation
+// getWrappedCode=TRUE if you want a shared instantiated md whose code expects an extra argument. In this
+// case pWrappedMD should be NULL.
+//
+// The result is put in ppMD
+//
+// If getWrappedCode. In thise case the genericMethodArgs
+// should be the normalized representative genericMethodArgs (see typehandle.h)
+//
+
+
+// Helper method that creates a method-desc off a template method desc
+static MethodDesc* CreateMethodDesc(LoaderAllocator *pAllocator,
+ MethodTable *pMT,
+ MethodDesc *pTemplateMD,
+ DWORD classification,
+ BOOL fNativeCodeSlot,
+ BOOL fComPlusCallInfo,
+ AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(CheckPointer(pAllocator));
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(CheckPointer(pTemplateMD));
+ PRECONDITION(pTemplateMD->IsRestored());
+ PRECONDITION(pMT->IsRestored_NoLogging());
+ PRECONDITION(pTemplateMD->GetMethodTable()->GetCanonicalMethodTable() == pMT->GetCanonicalMethodTable());
+ }
+ CONTRACTL_END
+
+ mdMethodDef token = pTemplateMD->GetMemberDef();
+
+ // Create a singleton chunk for the method desc
+ MethodDescChunk *pChunk =
+ MethodDescChunk::CreateChunk(pAllocator->GetHighFrequencyHeap(),
+ 1,
+ classification,
+ TRUE /* fNonVtableSlot*/,
+ fNativeCodeSlot,
+ fComPlusCallInfo,
+ pMT,
+ pamTracker);
+
+ // Now initialize the MDesc at the single method descriptor in
+ // the new chunk
+ MethodDesc *pMD = pChunk->GetFirstMethodDesc();
+
+ //We copy over the flags one by one. This is fragile w.r.t. adding
+ // new flags, but other techniques are also fragile. <NICE>We should move
+ // to using constructors on MethodDesc</NICE>
+ if (pTemplateMD->IsStatic())
+ {
+ pMD->SetStatic();
+ }
+ if (pTemplateMD->IsNotInline())
+ {
+ pMD->SetNotInline(true);
+ }
+ if (pTemplateMD->IsSynchronized())
+ {
+ pMD->SetSynchronized();
+ }
+ if (pTemplateMD->RequiresLinktimeCheck())
+ {
+ pMD->SetRequiresLinktimeCheck();
+ }
+ if (pTemplateMD->RequiresInheritanceCheck())
+ {
+ pMD->SetRequiresInheritanceCheck();
+ }
+ if (pTemplateMD->ParentRequiresInheritanceCheck())
+ {
+ pMD->SetParentRequiresInheritanceCheck();
+ }
+ if (pTemplateMD->IsInterceptedForDeclSecurity())
+ {
+ pMD->SetInterceptedForDeclSecurity();
+ }
+ if (pTemplateMD->IsInterceptedForDeclSecurityCASDemandsOnly())
+ {
+ pMD->SetInterceptedForDeclSecurityCASDemandsOnly();
+ }
+ if (pTemplateMD->HasCriticalTransparentInfo())
+ {
+ pMD->SetCriticalTransparentInfo(pTemplateMD->IsCritical(), pTemplateMD->IsTreatAsSafe());
+ }
+ if (pTemplateMD->RequiresLinkTimeCheckHostProtectionOnly())
+ {
+ pMD->SetRequiresLinkTimeCheckHostProtectionOnly();
+ }
+
+ pMD->SetMemberDef(token);
+ pMD->SetSlot(pTemplateMD->GetSlot());
+
+#ifdef _DEBUG
+ pMD->m_pszDebugMethodName = pTemplateMD->m_pszDebugMethodName;
+ //<NICE> more info here</NICE>
+ pMD->m_pszDebugMethodSignature = "<generic method signature>";
+ pMD->m_pszDebugClassName = "<generic method class name>";
+ pMD->m_pszDebugMethodName = "<generic method name>";
+ pMD->m_pDebugMethodTable.SetValue(pMT);
+#endif // _DEBUG
+
+ return pMD;
+}
+
+//
+// The following methods map between tightly bound boxing and unboxing MethodDesc.
+// We always layout boxing and unboxing MethodDescs next to each other in same
+// MethodDescChunk. It allows us to avoid brute-force iteration over all methods
+// on the type to perform the mapping.
+//
+
+//
+// Find matching tightly-bound methoddesc
+//
+static MethodDesc * FindTightlyBoundWrappedMethodDesc(MethodDesc * pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END
+
+ if (pMD->IsUnboxingStub() && pMD->GetClassification() == mcInstantiated)
+ pMD = pMD->AsInstantiatedMethodDesc()->IMD_GetWrappedMethodDesc();
+
+ // Find matching MethodDesc in the MethodTable
+ if (!pMD->IsTightlyBoundToMethodTable())
+ pMD = pMD->GetCanonicalMethodTable()->GetParallelMethodDesc(pMD);
+ _ASSERTE(pMD->IsTightlyBoundToMethodTable());
+
+ // Real MethodDesc immediately follows unboxing stub
+ if (pMD->IsUnboxingStub())
+ pMD = MethodTable::IntroducedMethodIterator::GetNext(pMD);
+ _ASSERTE(!pMD->IsUnboxingStub());
+
+ return pMD;
+}
+
+//
+// Find matching tightly-bound unboxing stub if there is one
+//
+static MethodDesc * FindTightlyBoundUnboxingStub(MethodDesc * pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END
+
+ // Find matching MethodDesc in the MethodTable
+ if (!pMD->IsTightlyBoundToMethodTable())
+ pMD = pMD->GetCanonicalMethodTable()->GetParallelMethodDesc(pMD);
+ _ASSERTE(pMD->IsTightlyBoundToMethodTable());
+
+ // We are done if we have unboxing stub already
+ if (pMD->IsUnboxingStub())
+ return pMD;
+
+ //
+ // Unboxing stub immediately precedes real methoddesc
+ //
+ MethodDesc * pCurMD = pMD->GetMethodDescChunk()->GetFirstMethodDesc();
+
+ if (pCurMD == pMD)
+ return NULL;
+
+ for (;;)
+ {
+ MethodDesc * pNextMD = MethodTable::IntroducedMethodIterator::GetNext(pCurMD);
+ if (pNextMD == pMD)
+ break;
+ pCurMD = pNextMD;
+ }
+
+ return pCurMD->IsUnboxingStub() ? pCurMD : NULL;
+}
+
+#ifdef _DEBUG
+//
+// Alternative brute-force implementation of FindTightlyBoundWrappedMethodDesc for debug-only check.
+//
+// Please note that this does not do the same up-front checks as the non-debug version to
+// see whether or not the input pMD is even an unboxing stub in the first place.
+//
+static MethodDesc * FindTightlyBoundWrappedMethodDesc_DEBUG(MethodDesc * pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END
+
+ mdMethodDef methodDef = pMD->GetMemberDef();
+ Module *pModule = pMD->GetModule();
+
+ MethodTable::MethodIterator it(pMD->GetCanonicalMethodTable());
+ it.MoveToEnd();
+ for (; it.IsValid(); it.Prev()) {
+ if (!it.IsVirtual()) {
+ // Get the MethodDesc for current method
+ MethodDesc* pCurMethod = it.GetMethodDesc();
+
+ if (pCurMethod && !pCurMethod->IsUnboxingStub()) {
+ if ((pCurMethod->GetMemberDef() == methodDef) &&
+ (pCurMethod->GetModule() == pModule))
+ {
+ return pCurMethod;
+ }
+ }
+ }
+ }
+ return NULL;
+}
+
+//
+// Alternative brute-force implementation of FindTightlyBoundUnboxingStub for debug-only check
+//
+// Please note that this does not do the same up-front checks as the non-debug version to
+// see whether or not the input pMD even qualifies to have a corresponding unboxing stub.
+//
+static MethodDesc * FindTightlyBoundUnboxingStub_DEBUG(MethodDesc * pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END
+
+ mdMethodDef methodDef = pMD->GetMemberDef();
+ Module *pModule = pMD->GetModule();
+
+ MethodTable::MethodIterator it(pMD->GetCanonicalMethodTable());
+ it.MoveToEnd();
+ for (; it.IsValid(); it.Prev()) {
+ if (it.IsVirtual()) {
+ MethodDesc* pCurMethod = it.GetMethodDesc();
+ if (pCurMethod && pCurMethod->IsUnboxingStub()) {
+ if ((pCurMethod->GetMemberDef() == methodDef) &&
+ (pCurMethod->GetModule() == pModule)) {
+ return pCurMethod;
+ }
+ }
+ }
+ }
+ return NULL;
+}
+#endif // _DEBUG
+
+/* static */
+InstantiatedMethodDesc *
+InstantiatedMethodDesc::NewInstantiatedMethodDesc(MethodTable *pExactMT,
+ MethodDesc* pGenericMDescInRepMT,
+ MethodDesc* pWrappedMD,
+ Instantiation methodInst,
+ BOOL getWrappedCode)
+{
+ CONTRACT(InstantiatedMethodDesc*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(CheckPointer(pExactMT));
+ PRECONDITION(CheckPointer(pGenericMDescInRepMT));
+ PRECONDITION(pGenericMDescInRepMT->IsRestored());
+ PRECONDITION(pWrappedMD == NULL || pWrappedMD->IsRestored());
+ PRECONDITION(methodInst.IsEmpty() || pGenericMDescInRepMT->IsGenericMethodDefinition());
+ PRECONDITION(methodInst.GetNumArgs() == pGenericMDescInRepMT->GetNumGenericMethodArgs());
+ POSTCONDITION(CheckPointer(RETVAL));
+ POSTCONDITION(RETVAL->IsRestored());
+ POSTCONDITION(getWrappedCode == RETVAL->IsSharedByGenericInstantiations());
+ POSTCONDITION(methodInst.IsEmpty() || RETVAL->HasMethodInstantiation());
+ }
+ CONTRACT_END;
+
+ // All instantiated method descs live off the RepMT for the
+ // instantiated class they live in.
+ INDEBUG(MethodTable * pCanonMT = pExactMT->GetCanonicalMethodTable();)
+
+ _ASSERTE(pGenericMDescInRepMT->GetMethodTable() == pCanonMT);
+
+ if (getWrappedCode)
+ {
+ _ASSERTE(pWrappedMD == NULL);
+ _ASSERTE(pExactMT->IsCanonicalMethodTable());
+ _ASSERTE(pCanonMT == pExactMT);
+ _ASSERTE(pExactMT->IsSharedByGenericInstantiations() || ClassLoader::IsSharableInstantiation(methodInst));
+
+ }
+
+ InstantiatedMethodDesc *pNewMD;
+ //@todo : move this into the domain
+ Module * pExactMDLoaderModule = ClassLoader::ComputeLoaderModule(pExactMT, pGenericMDescInRepMT->GetMemberDef(), methodInst);
+
+ LoaderAllocator * pAllocator = pExactMDLoaderModule->GetLoaderAllocator();
+
+ // Create LoaderAllocator to LoaderAllocator links for members of the instantiations of this method
+ pAllocator->EnsureInstantiation(pExactMT->GetLoaderModule(), pExactMT->GetInstantiation());
+ pAllocator->EnsureInstantiation(pGenericMDescInRepMT->GetLoaderModule(), methodInst);
+
+ {
+ // Acquire crst to prevent tripping up other threads searching in the same hashtable
+ CrstHolder ch(&pExactMDLoaderModule->m_InstMethodHashTableCrst);
+
+ // Check whether another thread beat us to it!
+ pNewMD = FindLoadedInstantiatedMethodDesc(pExactMT,
+ pGenericMDescInRepMT->GetMemberDef(),
+ methodInst,
+ getWrappedCode);
+
+ // Crst goes out of scope here
+ // We don't need to hold the crst while we build the MethodDesc, but we reacquire it later
+ }
+
+#ifdef FEATURE_PREJIT
+ // This section is the search for an instantiation in the various NGEN images
+ // where we may have precompiled the instantiation.
+ // Never use dyn link zap items during ngen time. We will independently decide later
+ // whether we want to store the item into ngen image or not.
+ if ((pNewMD == NULL) && !IsCompilationProcess())
+ {
+ // We need to know which domain the item must live in (DomainNeutral or AppDomain)
+ // <TODO>We can't use pDomain because at NGEN
+ // time this may not be accurate - this must be cleaned up as part of getting
+ // rid of GetLoaderModule() altogether.... </TODO>
+ BaseDomain * pRequiredDomain = BaseDomain::ComputeBaseDomain(
+ pExactMT->GetDomain(),
+ pExactMT->GetInstantiation(),
+ methodInst);
+
+ // Next look in each ngen'ed image in turn
+ AppDomain::AssemblyIterator assemblyIterator = GetAppDomain()->IterateAssembliesEx((AssemblyIterationFlags)(
+ kIncludeLoaded |
+ (pExactMT->IsIntrospectionOnly() ? kIncludeIntrospection : kIncludeExecution)));
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+ while ((pNewMD == NULL) && assemblyIterator.Next(pDomainAssembly.This()))
+ {
+ // Make sure the domain of the NGEN'd images associated with the assembly matches...
+ // No need to check this when NGEN'ing
+ CollectibleAssemblyHolder<Assembly *> pAssembly = pDomainAssembly->GetLoadedAssembly();
+ if (GetAppDomain()->IsCompilationDomain() || (pAssembly->GetDomain() == pRequiredDomain))
+ {
+ DomainAssembly::ModuleIterator i = pDomainAssembly->IterateModules(kModIterIncludeLoaded);
+ while ((pNewMD == NULL) && i.Next())
+ {
+ Module * pModule = i.GetLoadedModule();
+ if (!pModule->HasNativeImage())
+ continue;
+ _ASSERTE(!pModule->IsCollectible());
+
+ // We don't need to track references to normal (non-collectible) assemblies
+ pNewMD = (InstantiatedMethodDesc *)pModule->GetInstMethodHashTable()->FindMethodDesc(
+ TypeHandle(pExactMT),
+ pGenericMDescInRepMT->GetMemberDef(),
+ FALSE /* not forceBoxedEntryPoint */,
+ methodInst,
+ getWrappedCode);
+ if (pNewMD == NULL)
+ continue;
+#ifdef _DEBUG
+#ifndef DACCESS_COMPILE
+ if (LoggingOn(LF_CLASSLOADER, LL_INFO10000))
+ {
+ StackSString methodName;
+ pNewMD->CheckRestore();
+ TypeString::AppendMethodDebug(methodName, pNewMD);
+ LOG((LF_CLASSLOADER, LL_INFO10000, "Found method %S in non-preferred zap module %S\n", methodName.GetUnicode(), pModule->GetPath().GetUnicode()));
+ }
+#endif //!DACCESS_COMPILE
+#endif //_DEBUG
+ }
+ }
+ else
+ {
+ LOG((LF_CLASSLOADER, LL_INFO10000, "Skipping assembly %S due to domain mismatch when searching for prejitted instantiation\n",
+ pAssembly->GetDebugName()));
+ }
+ }
+ }
+#endif // FEATURE_PREJIT
+
+ if (pNewMD != NULL)
+ {
+ pNewMD->CheckRestore();
+ }
+ else
+ {
+ TypeHandle *pInstOrPerInstInfo = NULL;
+ DictionaryLayout *pDL = NULL;
+ DWORD infoSize = 0;
+ IBCLoggerAwareAllocMemTracker amt;
+
+ if (!methodInst.IsEmpty())
+ {
+ if (pWrappedMD)
+ {
+ if (pWrappedMD->IsSharedByGenericMethodInstantiations())
+ {
+ pDL = pWrappedMD->AsInstantiatedMethodDesc()->m_pDictLayout;
+ }
+ }
+ else if (getWrappedCode)
+ {
+ // 4 seems like a good number
+ pDL = DictionaryLayout::Allocate(4, pAllocator, &amt);
+#ifdef _DEBUG
+ {
+ SString name;
+ TypeString::AppendMethodDebug(name, pGenericMDescInRepMT);
+ LOG((LF_JIT, LL_INFO1000, "GENERICS: Created new dictionary layout for dictionary of size %d for %S\n",
+ DictionaryLayout::GetFirstDictionaryBucketSize(pGenericMDescInRepMT->GetNumGenericMethodArgs(), pDL), name.GetUnicode()));
+ }
+#endif // _DEBUG
+ }
+
+ // Allocate space for the instantiation and dictionary
+ infoSize = DictionaryLayout::GetFirstDictionaryBucketSize(methodInst.GetNumArgs(), pDL);
+ pInstOrPerInstInfo = (TypeHandle *) (void*) amt.Track(pAllocator->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(infoSize)));
+ for (DWORD i = 0; i < methodInst.GetNumArgs(); i++)
+ pInstOrPerInstInfo[i] = methodInst[i];
+ }
+
+ BOOL forComInterop = FALSE;
+#ifdef FEATURE_COMINTEROP
+ if (pExactMT->IsProjectedFromWinRT())
+ {
+ forComInterop = (pExactMT->IsInterface() || (pExactMT->IsDelegate() && COMDelegate::IsDelegateInvokeMethod(pGenericMDescInRepMT)));
+ }
+ else
+ {
+ // redirected interfaces and delegates also support interop
+ forComInterop = (pExactMT->IsWinRTRedirectedInterface(TypeHandle::Interop_ManagedToNative) ||
+ (pExactMT->IsWinRTRedirectedDelegate() && COMDelegate::IsDelegateInvokeMethod(pGenericMDescInRepMT)));
+ }
+#endif // FEATURE_COMINTEROP
+
+ // Create a new singleton chunk for the new instantiated method descriptor
+ // Notice that we've passed in the method table pointer; this gets
+ // used in some of the subsequent setup methods for method descs.
+ //
+ pNewMD = (InstantiatedMethodDesc*) (CreateMethodDesc(pAllocator,
+ pExactMT,
+ pGenericMDescInRepMT,
+ mcInstantiated,
+ !pWrappedMD, // This is pesimistic estimate for fNativeCodeSlot
+ forComInterop,
+ &amt));
+
+ // Initialize the MD the way it needs to be
+ if (pWrappedMD)
+ {
+ pNewMD->SetupWrapperStubWithInstantiations(pWrappedMD, methodInst.GetNumArgs(), pInstOrPerInstInfo);
+ _ASSERTE(pNewMD->IsInstantiatingStub());
+ }
+ else if (getWrappedCode)
+ {
+ pNewMD->SetupSharedMethodInstantiation(methodInst.GetNumArgs(), pInstOrPerInstInfo, pDL);
+ _ASSERTE(!pNewMD->IsInstantiatingStub());
+ }
+ else
+ {
+ pNewMD->SetupUnsharedMethodInstantiation(methodInst.GetNumArgs(), pInstOrPerInstInfo);
+ }
+
+ // Check that whichever field holds the inst. got setup correctly
+ _ASSERTE((PVOID)pNewMD->GetMethodInstantiation().GetRawArgs() == (PVOID)pInstOrPerInstInfo);
+
+ pNewMD->SetTemporaryEntryPoint(pAllocator, &amt);
+
+ {
+ // The canonical instantiation is exempt from constraint checks. It's used as the basis
+ // for all other reference instantiations so we can't not load it. The Canon type is
+ // not visible to users so it can't be abused.
+
+ BOOL fExempt =
+ TypeHandle::IsCanonicalSubtypeInstantiation(methodInst) ||
+ TypeHandle::IsCanonicalSubtypeInstantiation(pNewMD->GetClassInstantiation());
+
+ if (!fExempt)
+ {
+ pNewMD->SatisfiesMethodConstraints(TypeHandle(pExactMT), TRUE);
+ }
+ }
+
+ // OK, now we have a candidate MethodDesc.
+ {
+ CrstHolder ch(&pExactMDLoaderModule->m_InstMethodHashTableCrst);
+
+ // We checked before, but make sure again that another thread didn't beat us to it!
+ InstantiatedMethodDesc *pOldMD = FindLoadedInstantiatedMethodDesc(pExactMT,
+ pGenericMDescInRepMT->GetMemberDef(),
+ methodInst,
+ getWrappedCode);
+
+ if (pOldMD == NULL)
+ {
+ // No one else got there first, our MethodDesc wins.
+ amt.SuppressRelease();
+
+#ifdef _DEBUG
+ SString name(SString::Utf8);
+ TypeString::AppendMethodDebug(name, pNewMD);
+ StackScratchBuffer buff;
+ const char* pDebugNameUTF8 = name.GetUTF8(buff);
+ const char* verb = "Created";
+ if (pWrappedMD)
+ LOG((LF_CLASSLOADER, LL_INFO1000,
+ "GENERICS: %s instantiating-stub method desc %s with dictionary size %d\n",
+ verb, pDebugNameUTF8, infoSize));
+ else
+ LOG((LF_CLASSLOADER, LL_INFO1000,
+ "GENERICS: %s instantiated method desc %s\n",
+ verb, pDebugNameUTF8));
+
+ S_SIZE_T safeLen = S_SIZE_T(strlen(pDebugNameUTF8))+S_SIZE_T(1);
+ if(safeLen.IsOverflow()) COMPlusThrowHR(COR_E_OVERFLOW);
+
+ size_t len = safeLen.Value();
+ pNewMD->m_pszDebugMethodName = (char*) (void*)pAllocator->GetLowFrequencyHeap()->AllocMem(safeLen);
+ _ASSERTE(pNewMD->m_pszDebugMethodName);
+ strcpy_s((char *) pNewMD->m_pszDebugMethodName, len, pDebugNameUTF8);
+ pNewMD->m_pszDebugClassName = pExactMT->GetDebugClassName();
+ pNewMD->m_pszDebugMethodSignature = (LPUTF8)pNewMD->m_pszDebugMethodName;
+#endif // _DEBUG
+
+ // Generic methods can't be varargs. code:MethodTableBuilder::ValidateMethods should have checked it.
+ _ASSERTE(!pNewMD->IsVarArg());
+
+ // Verify that we are not creating redundant MethodDescs
+ _ASSERTE(!pNewMD->IsTightlyBoundToMethodTable());
+
+ // The method desc is fully set up; now add to the table
+ InstMethodHashTable* pTable = pExactMDLoaderModule->GetInstMethodHashTable();
+ pTable->InsertMethodDesc(pNewMD);
+ }
+ else
+ pNewMD = pOldMD;
+ // CrstHolder goes out of scope here
+ }
+
+ }
+
+ RETURN pNewMD;
+}
+
+// Calling this method is equivalent to
+// FindOrCreateAssociatedMethodDesc(pCanonicalMD, pExactMT, FALSE, Instantiation(), FALSE, TRUE)
+// except that it also creates InstantiatedMethodDescs based on shared class methods. This is
+// convenient for interop where, unlike ordinary managed methods, marshaling stubs for say Foo<string>
+// and Foo<object> look very different and need separate representation.
+InstantiatedMethodDesc*
+InstantiatedMethodDesc::FindOrCreateExactClassMethod(MethodTable *pExactMT,
+ MethodDesc *pCanonicalMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(!pExactMT->IsSharedByGenericInstantiations());
+ PRECONDITION(pCanonicalMD->IsSharedByGenericInstantiations());
+ }
+ CONTRACTL_END;
+
+ InstantiatedMethodDesc *pInstMD = FindLoadedInstantiatedMethodDesc(pExactMT,
+ pCanonicalMD->GetMemberDef(),
+ Instantiation(),
+ FALSE);
+
+ if (pInstMD == NULL)
+ {
+ // create a new MD if not found
+ pInstMD = NewInstantiatedMethodDesc(pExactMT,
+ pCanonicalMD,
+ pCanonicalMD,
+ Instantiation(),
+ FALSE);
+ }
+
+ return pInstMD;
+}
+
+// N.B. it is not guarantee that the returned InstantiatedMethodDesc is restored.
+// It is the caller's responsibility to call CheckRestore on the returned value.
+/* static */
+InstantiatedMethodDesc*
+InstantiatedMethodDesc::FindLoadedInstantiatedMethodDesc(MethodTable *pExactOrRepMT,
+ mdMethodDef methodDef,
+ Instantiation methodInst,
+ BOOL getWrappedCode)
+{
+ CONTRACT(InstantiatedMethodDesc *)
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ PRECONDITION(CheckPointer(pExactOrRepMT));
+
+ // All wrapped method descriptors (except BoxedEntryPointStubs, which don't use this path) are
+ // canonical and exhibit some kind of code sharing.
+ PRECONDITION(!getWrappedCode || pExactOrRepMT->IsCanonicalMethodTable());
+ PRECONDITION(!getWrappedCode || pExactOrRepMT->IsSharedByGenericInstantiations() || ClassLoader::IsSharableInstantiation(methodInst));
+
+ // Unboxing stubs are dealt with separately in FindOrCreateAssociatedMethodDesc. This should
+ // probably be streamlined...
+ POSTCONDITION(!RETVAL || !RETVAL->IsUnboxingStub());
+
+ // All wrapped method descriptors (except BoxedEntryPointStubs, which don't use this path) take an inst arg.
+ // The only ones that don't should have been found in the type's meth table.
+ POSTCONDITION(!getWrappedCode || !RETVAL || !RETVAL->IsRestored() || RETVAL->RequiresInstArg());
+ }
+ CONTRACT_END
+
+
+ // First look in the table for the runtime loader module in case someone created it before any
+ // zap modules got loaded
+ Module *pLoaderModule = ClassLoader::ComputeLoaderModule(pExactOrRepMT, methodDef, methodInst);
+
+ InstMethodHashTable* pTable = pLoaderModule->GetInstMethodHashTable();
+ MethodDesc *resultMD = pTable->FindMethodDesc(TypeHandle(pExactOrRepMT),
+ methodDef,
+ FALSE /* not forceBoxedEntryPoint */,
+ methodInst,
+ getWrappedCode);
+
+ if (resultMD != NULL)
+ RETURN((InstantiatedMethodDesc*) resultMD);
+
+#ifdef FEATURE_PREJIT
+ // Next look in the preferred zap module
+ Module *pPreferredZapModule = Module::ComputePreferredZapModule(pExactOrRepMT->GetModule(),
+ pExactOrRepMT->GetInstantiation(),
+ methodInst);
+ if (pPreferredZapModule->HasNativeImage())
+ {
+ resultMD = pPreferredZapModule->GetInstMethodHashTable()->FindMethodDesc(TypeHandle(pExactOrRepMT),
+ methodDef,
+ FALSE /* not forceBoxedEntryPoint */,
+ methodInst,
+ getWrappedCode);
+
+ if (resultMD != NULL)
+ RETURN((InstantiatedMethodDesc*) resultMD);
+ }
+#endif // FEATURE_PREJIT
+
+ RETURN(NULL);
+}
+
+
+// Given a method descriptor, find (or create) an instantiated
+// method descriptor or BoxedEntryPointStub associated with that method
+// and a particular instantiation of any generic method arguments. Also check
+// the method instantiation is valid.
+//
+// This routine also works for non-generic methods - it will be fast
+// in most cases. In this case nothing in particular
+// occurs except for static methods in shared generic classes, where an
+// instantiating stub is needed.
+//
+// The generic parameters provided are only those for the generic method.
+// pExactMT should be used to specify any class parameters.
+//
+// Unboxing stubs
+// --------------
+//
+// These are required to provide callable addresses with a uniform calling convention
+// for all methods on a boxed value class object. There are a wide range of possible
+// methods:
+// 1 virtual, non-generic instance methods
+// 2 non-virtual, non-generic instance methods
+// 3 virtual, generic instance methods
+// 4 non-virtual, generic instance methods
+// There is no substantial difference between case 3 and case 4: the only times
+// when BoxedEntryPointStubs are used for non-virtual methods are when calling a delegate or
+// making a reflection call.
+//
+// The only substantial difference between 1 and 2 is that the stubs are stored in
+// different places - we are forced to create the BoxedEntryPointStubs for (1) at class
+// creation time (they form part of the vtable and dispatch maps). Hence these
+// stubs are "owned" by method tables. We store all other stubs in the AssociatedMethTable.
+//
+// Unboxing stubs and generics
+// ---------------------------
+//
+// Generics code sharing complicates matters. The typical cases are where the struct
+// is in a shared-codegenerics struct such as
+//
+// struct Pair<string,object>
+//
+// which shares code with other types such as Pair<object,object>. All the code that ends up
+// being run for all the methods in such a struct takes an instantiation parameter, i.e.
+// is RequiresInstArg(), a non-uniform calling convention. We obviously can't give these out as
+// targets of delegate calls. Hence we have to wrap this shared code in various stubs in
+// order to get the right type context parameter provided to the shared code.
+//
+// Unboxing stubs on shared-code generic structs, e.g. Pair<object,string>,
+// acquire the class-portion of their type context from the "this" pointer.
+//
+// Thus there are two flavours of BoxedEntryPointStubs:
+//
+// - Methods that are not themselves generic:
+//
+// These wrap possibly-shared code (hence allowInstParam == TRUE).
+//
+// These directly call the possible-shared code for the instance method. This code
+// can have the following calling conventions:
+// - RequiresMethodTableInstArg() (if pMT->SharedByGenericInstantiations())
+// - Uniform (if !pMT->SharedByGenericInstantiations())
+//
+// Thus if the code they are
+//
+// - Methods that are themselves generic:
+//
+// These wrap unshared code (hence allowInstParam == FALSE):
+//
+// These are always invoked by slow paths (how often do you use a generic method in a struct?),
+// such as JIT_VirtualFunctionPointer or a reflection call. These paths eventually
+// use FindOrCreateAssociatedMethodDesc to piece together the exact instantiation provided by the "this"
+// pointer with the exact instantiation provided by the wrapped method pointer.
+//
+// These call a stub for the instance method which provides the instantiation
+// context, possibly in turn calling further shared code. This stub will
+// always be !RequiresInstArg()
+//
+// If the method being called is aMethod calls via BoxedEntryPointStubs
+//
+// Remotable methods
+// -----------------
+//
+// Remoting has high requirements for method descs passed to it (i.e. the method desc that represents the client "view" of the
+// method to be called on the real server object). Since the identity of the method call is serialized and passed on the wire before
+// be resolved into the real target method on the server remoting needs to be able to extract exact instantiation information from
+// its inputs (a method desc and a this pointer).
+//
+// To that end generic methods should always be passed via an instantiating stub (i.e. set allowInstParam to FALSE when calling
+// FindOrCreateAssociatedMethodDesc).
+//
+// There's a more subtle problem though. If the client method call is via a non-generic method on a generic interface we won't have
+// enough information to serialize the call. That's because such methods don't have instantiated method descs by default (these are
+// weighty structures and most of the runtime would never use the extra information). The this pointer doesn't help provide the
+// additional information in this case (consider the case of a class that implements both IFoo<String> and IFoo<Random>).
+//
+// So instead we create instantiated interface method descs on demand (i.e. during stub-based interface dispatch). Setting the
+// forceRemotableMethod predicate to TRUE below will ensure this (it's a no-op for methods that don't match this pattern, so can be
+// freely set to true for all calls intended to produce a remotable ready method). This characteristic of a methoddesc that is fully
+// descriptive of the method and class used is also necessary in certain places in reflection. In particular, it is known to be needed
+// for the Delegate.CreateDelegate logic.
+//
+// allowCreate may be set to FALSE to enforce that the method searched
+// should already be in existence - thus preventing creation and GCs during
+// inappropriate times.
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#endif
+/* static */
+MethodDesc*
+MethodDesc::FindOrCreateAssociatedMethodDesc(MethodDesc* pDefMD,
+ MethodTable *pExactMT,
+ BOOL forceBoxedEntryPoint,
+ Instantiation methodInst,
+ BOOL allowInstParam,
+ BOOL forceRemotableMethod,
+ BOOL allowCreate,
+ ClassLoadLevel level)
+{
+ CONTRACT(MethodDesc*)
+ {
+ THROWS;
+ if (allowCreate) { GC_TRIGGERS; } else { GC_NOTRIGGER; }
+ INJECT_FAULT(COMPlusThrowOM(););
+
+ PRECONDITION(CheckPointer(pDefMD));
+ PRECONDITION(CheckPointer(pExactMT));
+ PRECONDITION(pDefMD->IsRestored_NoLogging());
+ PRECONDITION(pExactMT->IsRestored_NoLogging());
+
+ // If the method descriptor belongs to a generic type then
+ // the input exact type must be an instantiation of that type.
+ // DISABLED PRECONDITION - too strict - the classes may be in
+ // a subtype relation to each other.
+ //
+ // PRECONDITION(!pDefMD->HasClassInstantiation() || pDefMD->GetMethodTable()->HasSameTypeDefAs(pExactMT));
+
+ // You may only request an BoxedEntryPointStub for an instance method on a value type
+ PRECONDITION(!forceBoxedEntryPoint || pExactMT->IsValueType());
+ PRECONDITION(!forceBoxedEntryPoint || !pDefMD->IsStatic());
+
+ // For remotable methods we better not be allowing instantiation parameters.
+ PRECONDITION(!forceRemotableMethod || !allowInstParam);
+
+ POSTCONDITION(((RETVAL == NULL) && !allowCreate) || CheckPointer(RETVAL));
+ POSTCONDITION(((RETVAL == NULL) && !allowCreate) || RETVAL->IsRestored());
+ POSTCONDITION(((RETVAL == NULL) && !allowCreate) || forceBoxedEntryPoint || !RETVAL->IsUnboxingStub());
+ POSTCONDITION(((RETVAL == NULL) && !allowCreate) || allowInstParam || !RETVAL->RequiresInstArg());
+ }
+ CONTRACT_END;
+
+ // Quick exit for the common cases where the result is the same as the primary MD we are given
+ if (!pDefMD->HasClassOrMethodInstantiation() &&
+ methodInst.IsEmpty() &&
+ !forceBoxedEntryPoint &&
+ !pDefMD->IsUnboxingStub())
+ {
+ // Make sure that pDefMD->GetMethodTable() and pExactMT are related types even
+ // if we took the fast path.
+ _ASSERTE(pDefMD->IsArray() || pDefMD->GetExactDeclaringType(pExactMT) != NULL);
+
+ RETURN pDefMD;
+ }
+
+ // Get the version of the method desc. for the instantiated shared class, e.g.
+ // e.g. if pDefMD == List<T>.m()
+ // pExactMT = List<string>
+ // then pMDescInCanonMT = List<object>.m()
+ // or
+ // e.g. if pDefMD == List<T>.m<U>()
+ // pExactMT = List<string>
+ // then pMDescInCanonMT = List<object>.m<U>()
+
+ MethodDesc * pMDescInCanonMT = pDefMD;
+
+ // Some callers pass a pExactMT that is a subtype of a parent type of pDefMD.
+ // Find the actual exact parent of pDefMD.
+ pExactMT = pDefMD->GetExactDeclaringType(pExactMT);
+ _ASSERTE(pExactMT != NULL);
+
+ if (pDefMD->HasClassOrMethodInstantiation() || !methodInst.IsEmpty())
+ {
+ // General checks related to generics: arity (if any) must match and generic method
+ // instantiation (if any) must be well-formed.
+ if (pDefMD->GetNumGenericMethodArgs() != methodInst.GetNumArgs() ||
+ !Generics::CheckInstantiation(methodInst))
+ {
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+
+ pMDescInCanonMT = pExactMT->GetCanonicalMethodTable()->GetParallelMethodDesc(pDefMD);
+
+ if (!allowCreate && (!pMDescInCanonMT->IsRestored() ||
+ !pMDescInCanonMT->GetMethodTable()->IsFullyLoaded()))
+
+ {
+ RETURN(NULL);
+ }
+
+ pMDescInCanonMT->CheckRestore(level);
+ }
+
+ // This case covers nearly all "normal" (i.e. non-associate) MethodDescs. Just return
+ // the MethodDesc in the canonical method table.
+ //
+ // Also, it will be taken for methods which acquire their type context from the "this" parameter
+ // - we don't need instantiating stubs for these.
+ if ( methodInst.IsEmpty()
+ && (allowInstParam || !pMDescInCanonMT->RequiresInstArg())
+ && (forceBoxedEntryPoint == pMDescInCanonMT->IsUnboxingStub())
+ && (!forceRemotableMethod || !pMDescInCanonMT->IsInterface()
+ || !pMDescInCanonMT->GetMethodTable()->IsSharedByGenericInstantiations()) )
+ {
+ RETURN(pMDescInCanonMT);
+ }
+
+ // Unboxing stubs
+ else if (forceBoxedEntryPoint)
+ {
+
+ // This assert isn't quite right, for example the repro from NDPWhidbey 18737
+ // fires it, because we fetch an BoxedEntryPointStub for a virtual method on a struct
+ // when the uninstantiated MethodDesc for the generic virtual method actually
+ // qualifies as an BoxedEntryPointStub... Hence we weaken the assert a little.
+ //
+ // _ASSERTE(!pDefMD->IsUnboxingStub());
+ // _ASSERTE(pDefMD->IsGenericMethodDefinition() || !pDefMD->IsUnboxingStub());
+
+ //Unboxing stubs for non-generic methods and generic methods are
+ // subtly different... For non-generic methods we can look in the
+ // shared vtable, and then go to the hash table only if needed.
+ // Furthermore even if we have to go to hash table we still base
+ // the BoxedEntryPointStub on an unerlying _shared_ method descriptor.
+ //
+ // For generic methods we must build an BoxedEntryPointStub that calls an
+ // underlying instantiating stub. The underlying instantiating stub
+ // will be an _exact_ method descriptor.
+ MethodDesc *pResultMD;
+ if (methodInst.IsEmpty())
+ {
+ // First search for the unboxing MD in the shared vtable for the value type
+ pResultMD = FindTightlyBoundUnboxingStub(pMDescInCanonMT);
+
+ // Verify that we get the same result by alternative method. There is a possibility
+ // that there is no associated unboxing stub, and FindTightlyBoundUnboxingStub takes
+ // this into account but the _DEBUG version does not, so only use it if the method
+ // returned is actually different.
+ _ASSERTE(pResultMD == pMDescInCanonMT ||
+ pResultMD == FindTightlyBoundUnboxingStub_DEBUG(pMDescInCanonMT));
+
+ if (pResultMD != NULL)
+ {
+ _ASSERTE(pResultMD->IsRestored() && pResultMD->GetMethodTable()->IsFullyLoaded());
+ g_IBCLogger.LogMethodDescAccess(pResultMD);
+ RETURN(pResultMD);
+ }
+
+ MethodTable *pRepMT = pMDescInCanonMT->GetMethodTable();
+ mdMethodDef methodDef = pDefMD->GetMemberDef();
+
+ Module *pLoaderModule = ClassLoader::ComputeLoaderModule(pRepMT, methodDef, methodInst);
+ LoaderAllocator* pAllocator=pLoaderModule->GetLoaderAllocator();
+
+ InstMethodHashTable* pTable = pLoaderModule->GetInstMethodHashTable();
+ // If we didn't find it there then go to the hash table
+ pResultMD = pTable->FindMethodDesc(TypeHandle(pRepMT),
+ methodDef,
+ TRUE /* forceBoxedEntryPoint */,
+ Instantiation(),
+ FALSE /* no inst param */);
+
+ // If we didn't find it then create it...
+ if (!pResultMD)
+ {
+ // !allowCreate ==> GC_NOTRIGGER ==> no entering Crst
+ if (!allowCreate)
+ {
+ RETURN(NULL);
+ }
+
+ CrstHolder ch(&pLoaderModule->m_InstMethodHashTableCrst);
+
+ // Check whether another thread beat us to it!
+ pResultMD = pTable->FindMethodDesc(TypeHandle(pRepMT),
+ methodDef,
+ TRUE,
+ Instantiation(),
+ FALSE);
+ if (pResultMD == NULL)
+ {
+ IBCLoggerAwareAllocMemTracker amt;
+
+ pResultMD = CreateMethodDesc(pAllocator,
+ pRepMT,
+ pMDescInCanonMT,
+ mcInstantiated,
+ FALSE /* fNativeCodeSlot */,
+ FALSE /* fComPlusCallInfo */,
+ &amt);
+
+ // Indicate that this is a stub method which takes a BOXed this pointer.
+ // An BoxedEntryPointStub may still be an InstantiatedMethodDesc
+ pResultMD->SetIsUnboxingStub();
+ pResultMD->AsInstantiatedMethodDesc()->SetupWrapperStubWithInstantiations(pMDescInCanonMT, NULL, NULL);
+
+ pResultMD->SetTemporaryEntryPoint(pAllocator, &amt);
+
+ amt.SuppressRelease();
+
+ // Verify that we are not creating redundant MethodDescs
+ _ASSERTE(!pResultMD->IsTightlyBoundToMethodTable());
+
+ // Add it to the table
+ pTable->InsertMethodDesc(pResultMD);
+ }
+
+ // CrstHolder goes out of scope here
+ }
+
+ }
+ else
+ {
+ mdMethodDef methodDef = pDefMD->GetMemberDef();
+
+ Module *pLoaderModule = ClassLoader::ComputeLoaderModule(pExactMT, methodDef, methodInst);
+ LoaderAllocator* pAllocator = pLoaderModule->GetLoaderAllocator();
+
+ InstMethodHashTable* pTable = pLoaderModule->GetInstMethodHashTable();
+ // First check the hash table...
+ pResultMD = pTable->FindMethodDesc(TypeHandle(pExactMT),
+ methodDef,
+ TRUE, /* forceBoxedEntryPoint */
+ methodInst,
+ FALSE /* no inst param */);
+
+ if (!pResultMD)
+ {
+ // !allowCreate ==> GC_NOTRIGGER ==> no entering Crst
+ if (!allowCreate)
+ {
+ RETURN(NULL);
+ }
+
+ // Enter the critical section *after* we've found or created the non-unboxing instantiating stub (else we'd have a race)
+ CrstHolder ch(&pLoaderModule->m_InstMethodHashTableCrst);
+
+ // Check whether another thread beat us to it!
+ pResultMD = pTable->FindMethodDesc(TypeHandle(pExactMT),
+ methodDef,
+ TRUE, /* forceBoxedEntryPoint */
+ methodInst,
+ FALSE /* no inst param */);
+
+ if (pResultMD == NULL)
+ {
+ // Recursively get the non-unboxing instantiating stub. Thus we chain an unboxing
+ // stub with an instantiating stub.
+ MethodDesc* pNonUnboxingStub=
+ MethodDesc::FindOrCreateAssociatedMethodDesc(pDefMD,
+ pExactMT,
+ FALSE /* not Unboxing */,
+ methodInst,
+ FALSE);
+
+ _ASSERTE(pNonUnboxingStub->GetClassification() == mcInstantiated);
+ _ASSERTE(!pNonUnboxingStub->RequiresInstArg());
+ _ASSERTE(!pNonUnboxingStub->IsUnboxingStub());
+
+ IBCLoggerAwareAllocMemTracker amt;
+
+ _ASSERTE(pDefMD->GetClassification() == mcInstantiated);
+
+ pResultMD = CreateMethodDesc(pAllocator,
+ pExactMT,
+ pNonUnboxingStub,
+ mcInstantiated,
+ FALSE /* fNativeCodeSlot */,
+ FALSE /* fComPlusCallInfo */,
+ &amt);
+
+ pResultMD->SetIsUnboxingStub();
+ pResultMD->AsInstantiatedMethodDesc()->SetupWrapperStubWithInstantiations(pNonUnboxingStub,
+ pNonUnboxingStub->GetNumGenericMethodArgs(),
+ (TypeHandle *)pNonUnboxingStub->GetMethodInstantiation().GetRawArgs());
+
+ pResultMD->SetTemporaryEntryPoint(pAllocator, &amt);
+
+ amt.SuppressRelease();
+
+ // Verify that we are not creating redundant MethodDescs
+ _ASSERTE(!pResultMD->IsTightlyBoundToMethodTable());
+
+ pTable->InsertMethodDesc(pResultMD);
+ }
+
+ // CrstHolder goes out of scope here
+ }
+ }
+ _ASSERTE(pResultMD);
+
+ if (!allowCreate && (!pResultMD->IsRestored() || !pResultMD->GetMethodTable()->IsFullyLoaded()))
+ {
+ RETURN(NULL);
+ }
+
+ pResultMD->CheckRestore(level);
+ _ASSERTE(pResultMD->IsUnboxingStub());
+ _ASSERTE(!pResultMD->IsInstantiatingStub());
+ RETURN(pResultMD);
+ }
+
+
+ // Now all generic method instantiations and static/shared-struct-instance-method wrappers...
+ else
+ {
+ _ASSERTE(!forceBoxedEntryPoint);
+
+ mdMethodDef methodDef = pDefMD->GetMemberDef();
+ Module *pModule = pDefMD->GetModule();
+
+ // Some unboxed entry points are attached to canonical method tables. This is because
+ // we have to fill in vtables and/or dispatch maps at load time,
+ // and boxed entry points are created to do this. (note vtables and dispatch maps
+ // are only created for canonical instantiations). These boxed entry points
+ // in turn refer to unboxed entry points.
+
+ if (// Check if we're looking for something at the canonical instantiation
+ (allowInstParam || pExactMT->IsCanonicalMethodTable()) &&
+ // Only value types have BoxedEntryPointStubs in the canonical method table
+ pExactMT->IsValueType() &&
+ // The only generic methods whose BoxedEntryPointStubs are in the canonical method table
+ // are those open MethodDescs at the "typical" isntantiation, e.g.
+ // VC<int>.m<T>
+ // <NICE> This is probably actually not needed </NICE>
+ ClassLoader::IsTypicalInstantiation(pModule, methodDef, methodInst)
+
+ )
+ {
+ MethodDesc * pResultMD = FindTightlyBoundWrappedMethodDesc(pMDescInCanonMT);
+
+ // Verify that we get the same result by alternative method. There is a possibility
+ // that this is not an unboxing stub, and FindTightlyBoundWrappedMethodDesc takes
+ // this into account but the _DEBUG version does not, so only use it if the method
+ // returned is actually different.
+ _ASSERTE(pResultMD == pMDescInCanonMT ||
+ pResultMD == FindTightlyBoundWrappedMethodDesc_DEBUG(pMDescInCanonMT));
+
+ if (pResultMD != NULL)
+ {
+ _ASSERTE(pResultMD->IsRestored() && pResultMD->GetMethodTable()->IsFullyLoaded());
+
+ g_IBCLogger.LogMethodDescAccess(pResultMD);
+
+ if (allowInstParam || !pResultMD->RequiresInstArg())
+ {
+ RETURN(pResultMD);
+ }
+ }
+ }
+
+ // Are either the generic type arguments or the generic method arguments shared?
+ BOOL sharedInst =
+ pExactMT->GetCanonicalMethodTable()->IsSharedByGenericInstantiations()
+ || ClassLoader::IsSharableInstantiation(methodInst);
+
+ // Is it the "typical" instantiation in the correct type that does not require wrapper?
+ if (!sharedInst &&
+ pExactMT == pMDescInCanonMT->GetMethodTable() &&
+ ClassLoader::IsTypicalInstantiation(pModule, methodDef, methodInst))
+ {
+ _ASSERTE(!pMDescInCanonMT->IsUnboxingStub());
+ RETURN(pMDescInCanonMT);
+ }
+
+ // OK, so we now know the thing we're looking for can only be found in the MethodDesc table.
+
+ // If getWrappedCode == true, we are looking for a wrapped MethodDesc
+
+ BOOL getWrappedCode = allowInstParam && sharedInst;
+ BOOL getWrappedThenStub = !allowInstParam && sharedInst;
+
+ CQuickBytes qbRepInst;
+ TypeHandle *repInst = NULL;
+ if (getWrappedCode || getWrappedThenStub)
+ {
+ // Canonicalize the type arguments.
+ DWORD cbAllocaSize = 0;
+ if (!ClrSafeInt<DWORD>::multiply(methodInst.GetNumArgs(), sizeof(TypeHandle), cbAllocaSize))
+ ThrowHR(COR_E_OVERFLOW);
+
+ repInst = reinterpret_cast<TypeHandle *>(qbRepInst.AllocThrows(cbAllocaSize));
+
+ for (DWORD i = 0; i < methodInst.GetNumArgs(); i++)
+ {
+ repInst[i] = ClassLoader::CanonicalizeGenericArg(methodInst[i]);
+ }
+ }
+
+ // <NICE> These paths can probably be merged together more nicely, and the lookup-lock-lookup pattern made much
+ // more obvious </NICE>
+ InstantiatedMethodDesc *pInstMD;
+ if (getWrappedCode)
+ {
+ // Get the underlying shared code using the canonical instantiations
+ pInstMD =
+ InstantiatedMethodDesc::FindLoadedInstantiatedMethodDesc(pExactMT->GetCanonicalMethodTable(),
+ methodDef,
+ Instantiation(repInst, methodInst.GetNumArgs()),
+ TRUE);
+
+ // No - so create one.
+ if (pInstMD == NULL)
+ {
+ if (!allowCreate)
+ {
+ RETURN(NULL);
+ }
+
+ pInstMD = InstantiatedMethodDesc::NewInstantiatedMethodDesc(pExactMT->GetCanonicalMethodTable(),
+ pMDescInCanonMT,
+ NULL,
+ Instantiation(repInst, methodInst.GetNumArgs()),
+ TRUE);
+ }
+ }
+ else if (getWrappedThenStub)
+ {
+ // See if we've already got the instantiated method desc for this one.
+ pInstMD =
+ InstantiatedMethodDesc::FindLoadedInstantiatedMethodDesc(pExactMT,
+ methodDef,
+ methodInst,
+ FALSE);
+
+ // No - so create one. Go fetch the shared one first
+ if (pInstMD == NULL)
+ {
+ if (!allowCreate)
+ {
+ RETURN(NULL);
+ }
+
+ // This always returns the shared code. Repeat the original call except with
+ // approximate params and allowInstParam=true
+ MethodDesc* pWrappedMD = FindOrCreateAssociatedMethodDesc(pDefMD,
+ pExactMT->GetCanonicalMethodTable(),
+ FALSE,
+ Instantiation(repInst, methodInst.GetNumArgs()),
+ TRUE);
+
+ _ASSERTE(pWrappedMD->IsSharedByGenericInstantiations());
+ _ASSERTE(!methodInst.IsEmpty() || !pWrappedMD->IsSharedByGenericMethodInstantiations());
+
+ pInstMD = InstantiatedMethodDesc::NewInstantiatedMethodDesc(pExactMT,
+ pMDescInCanonMT,
+ pWrappedMD,
+ methodInst,
+ FALSE);
+ }
+ }
+ else
+ {
+ // See if we've already got the instantiated method desc for this one.
+ // If looking for shared code use the representative inst.
+ pInstMD =
+ InstantiatedMethodDesc::FindLoadedInstantiatedMethodDesc(pExactMT,
+ methodDef,
+ methodInst,
+ FALSE);
+
+ // No - so create one.
+ if (pInstMD == NULL)
+ {
+ if (!allowCreate)
+ {
+ RETURN(NULL);
+ }
+
+ pInstMD = InstantiatedMethodDesc::NewInstantiatedMethodDesc(pExactMT,
+ pMDescInCanonMT,
+ NULL,
+ methodInst,
+ FALSE);
+ }
+ }
+ _ASSERTE(pInstMD);
+
+ if (!allowCreate && (!pInstMD->IsRestored() || !pInstMD->GetMethodTable()->IsFullyLoaded()))
+ {
+ RETURN(NULL);
+ }
+
+ pInstMD->CheckRestore(level);
+
+ RETURN(pInstMD);
+ }
+}
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+// Normalize the methoddesc for reflection
+/*static*/ MethodDesc* MethodDesc::FindOrCreateAssociatedMethodDescForReflection(
+ MethodDesc *pMethod,
+ TypeHandle instType,
+ Instantiation methodInst)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS; // Because allowCreate is TRUE
+ PRECONDITION(CheckPointer(pMethod));
+ }
+ CONTRACTL_END;
+
+ MethodDesc *pInstMD = pMethod;
+
+ // no stubs for TypeDesc
+ if (instType.IsTypeDesc())
+ return pInstMD;
+
+ MethodTable* pMT = instType.AsMethodTable();
+
+ if (!methodInst.IsEmpty())
+ {
+ // method.BindGenericParameters() was called and we need to retrieve an instantiating stub
+
+ // pMethod is not necessarily a generic method definition, ResolveMethod could pass in an
+ // instantiated generic method.
+ _ASSERTE(pMethod->HasMethodInstantiation());
+
+ if (methodInst.GetNumArgs() != pMethod->GetNumGenericMethodArgs())
+ COMPlusThrow(kArgumentException);
+
+ // we base the creation of an unboxing stub on whether the original method was one already
+ // that keeps the reflection logic the same for value types
+ pInstMD = MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pMethod,
+ pMT,
+ pMethod->IsUnboxingStub(),
+ methodInst,
+ FALSE, /* no allowInstParam */
+ TRUE /* force remotable method (i.e. inst wrappers for non-generic methods on generic interfaces) */);
+ }
+ else if ( !pMethod->HasMethodInstantiation() &&
+ ( instType.IsValueType() ||
+ ( instType.HasInstantiation() &&
+ !instType.IsGenericTypeDefinition() &&
+ ( instType.IsInterface() || pMethod->IsStatic() ) ) ) )
+ {
+ //
+ // Called at MethodInfos cache creation
+ // the method is either a normal method or a generic method definition
+ // Also called at MethodBase.GetMethodBaseFromHandle
+ // the method is either a normal method, a generic method definition, or an instantiated generic method
+ // Needs an instantiating stub if
+ // - non generic static method on a generic class
+ // - non generic instance method on a struct
+ // - non generic method on a generic interface
+ //
+
+ // we base the creation of an unboxing stub on whether the original method was one already
+ // that keeps the reflection logic the same for value types
+
+ // we need unboxing stubs for virtual methods on value types unless the method is generic
+ BOOL fNeedUnboxingStub = pMethod->IsUnboxingStub() ||
+ ( instType.IsValueType() && pMethod->IsVirtual() );
+
+ pInstMD = MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pMethod, /* the original MD */
+ pMT, /* the method table */
+ fNeedUnboxingStub, /* create boxing stub */
+ Instantiation(), /* no generic instantiation */
+ FALSE, /* no allowInstParam */
+ TRUE /* force remotable method (i.e. inst wrappers for non-generic methods on generic interfaces) */);
+ }
+
+ return pInstMD;
+}
+
+// Given a typical method desc (i.e. instantiated at formal type
+// parameters if it is a generic method or lives in a generic class),
+// instantiate any type parameters at <__Canon>
+//
+// NOTE: If allowCreate is FALSE, typically you must also set ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE()
+// allowCreate may be set to FALSE to enforce that the method searched
+// should already be in existence - thus preventing creation and GCs during
+// inappropriate times.
+//
+MethodDesc * MethodDesc::FindOrCreateTypicalSharedInstantiation(BOOL allowCreate /* = TRUE */)
+{
+ CONTRACT(MethodDesc*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(IsTypicalMethodDefinition());
+ POSTCONDITION(CheckPointer(RETVAL));
+ POSTCONDITION(RETVAL->IsTypicalSharedInstantiation());
+ }
+ CONTRACT_END
+
+ MethodDesc *pMD = this;
+ MethodTable *pMT = pMD->GetMethodTable();
+
+ // First instantiate the declaring type at <__Canon,...,__Canon>
+ DWORD nGenericClassArgs = pMT->GetNumGenericArgs();
+ DWORD dwAllocSize = 0;
+ if (!ClrSafeInt<DWORD>::multiply(sizeof(TypeHandle), nGenericClassArgs, dwAllocSize))
+ ThrowHR(COR_E_OVERFLOW);
+
+ CQuickBytes qbGenericClassArgs;
+ TypeHandle* pGenericClassArgs = reinterpret_cast<TypeHandle*>(qbGenericClassArgs.AllocThrows(dwAllocSize));
+
+ for (DWORD i = 0; i < nGenericClassArgs; i++)
+ {
+ pGenericClassArgs[i] = TypeHandle(g_pCanonMethodTableClass);
+ }
+
+ pMT = ClassLoader::LoadGenericInstantiationThrowing(pMT->GetModule(),
+ pMT->GetCl(),
+ Instantiation(pGenericClassArgs, nGenericClassArgs),
+ allowCreate ? ClassLoader::LoadTypes : ClassLoader::DontLoadTypes
+ ).GetMethodTable();
+
+ if (pMT == NULL)
+ {
+ _ASSERTE(!allowCreate);
+ return NULL;
+ }
+
+ // Now instantiate the method at <__Canon,...,__Canon>, creating the shared code.
+ // This will not create an instantiating stub just yet.
+ DWORD nGenericMethodArgs = pMD->GetNumGenericMethodArgs();
+ CQuickBytes qbGenericMethodArgs;
+ TypeHandle *genericMethodArgs = NULL;
+
+ // The rest of this method instantiates a generic method
+ // Instantiate at "__Canon" if a NULL "genericMethodArgs" is given
+ if (nGenericMethodArgs)
+ {
+ dwAllocSize = 0;
+ if (!ClrSafeInt<DWORD>::multiply(sizeof(TypeHandle), nGenericMethodArgs, dwAllocSize))
+ ThrowHR(COR_E_OVERFLOW);
+
+ genericMethodArgs = reinterpret_cast<TypeHandle*>(qbGenericMethodArgs.AllocThrows(dwAllocSize));
+
+ for (DWORD i =0; i < nGenericMethodArgs; i++)
+ genericMethodArgs[i] = TypeHandle(g_pCanonMethodTableClass);
+ }
+
+ RETURN(MethodDesc::FindOrCreateAssociatedMethodDesc(pMD,
+ pMT,
+ FALSE, /* don't get unboxing entry point */
+ Instantiation(genericMethodArgs, nGenericMethodArgs),
+ TRUE,
+ FALSE,
+ allowCreate));
+}
+
+//@GENERICSVER: Set the typical (ie. formal) instantiation
+void InstantiatedMethodDesc::SetupGenericMethodDefinition(IMDInternalImport *pIMDII,
+ LoaderAllocator* pAllocator,
+ AllocMemTracker *pamTracker,
+ Module *pModule,
+ mdMethodDef tok)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(CheckPointer(pIMDII));
+ }
+ CONTRACTL_END;
+
+ // The first field is never used
+ m_wFlags2 = GenericMethodDefinition | (m_wFlags2 & ~KindMask);
+
+ //@GENERICSVER: allocate space for and initialize the typical instantiation
+ //we share the typical instantiation among all instantiations by placing it in the generic method desc
+ LOG((LF_JIT, LL_INFO10000, "GENERICSVER: Initializing typical method instantiation with type handles\n"));
+ mdGenericParam tkTyPar;
+ HENUMInternalHolder hEnumTyPars(pIMDII);
+ hEnumTyPars.EnumInit(mdtGenericParam, tok);
+
+ // Initialize the typical instantiation
+ DWORD numTyPars = hEnumTyPars.EnumGetCount();
+ if (!FitsIn<WORD>(numTyPars))
+ {
+ LPCSTR szMethodName;
+ if (FAILED(pIMDII->GetNameOfMethodDef(tok, &szMethodName)))
+ {
+ szMethodName = "Invalid MethodDef record";
+ }
+ pModule->GetAssembly()->ThrowTypeLoadException(szMethodName, IDS_CLASSLOAD_TOOMANYGENERICARGS);
+ }
+ m_wNumGenericArgs = static_cast<WORD>(numTyPars);
+ _ASSERTE(m_wNumGenericArgs > 0);
+
+ S_SIZE_T dwAllocSize = S_SIZE_T(numTyPars) * S_SIZE_T(sizeof(TypeHandle));
+
+ // the memory allocated for m_pMethInst will be freed if the declaring type fails to load
+ m_pPerInstInfo = (Dictionary *) pamTracker->Track(pAllocator->GetLowFrequencyHeap()->AllocMem(dwAllocSize));
+
+ TypeHandle * pInstDest = (TypeHandle *)m_pPerInstInfo;
+ for(unsigned int i = 0; i < numTyPars; i++)
+ {
+ hEnumTyPars.EnumNext(&tkTyPar);
+
+ // code:Module.m_GenericParamToDescMap maps generic parameter RIDs to TypeVarTypeDesc
+ // instances so that we do not leak by allocating them all over again, if the declaring
+ // type repeatedly fails to load.
+ TypeVarTypeDesc *pTypeVarTypeDesc = pModule->LookupGenericParam(tkTyPar);
+ if (pTypeVarTypeDesc == NULL)
+ {
+ // Do NOT use pamTracker for this memory as we need it stay allocated even if the load fails.
+ void *mem = (void *)pAllocator->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(TypeVarTypeDesc)));
+ pTypeVarTypeDesc = new (mem) TypeVarTypeDesc(pModule, tok, i, tkTyPar);
+
+ // No race here - the row in GenericParam table is owned exclusively by this method and we
+ // are holding a lock preventing other threads from loading the declaring type and setting
+ // up this method desc.
+ pModule->StoreGenericParamThrowing(tkTyPar, pTypeVarTypeDesc);
+ }
+ pInstDest[i] = TypeHandle(pTypeVarTypeDesc);
+ }
+ LOG((LF_JIT, LL_INFO10000, "GENERICSVER: Initialized typical method instantiation with %d type handles\n",numTyPars));
+}
+
+void InstantiatedMethodDesc::SetupWrapperStubWithInstantiations(MethodDesc* wrappedMD,DWORD numGenericArgs, TypeHandle *pInst)
+{
+ WRAPPER_NO_CONTRACT;
+
+ //_ASSERTE(sharedMD->IMD_IsSharedByGenericMethodInstantiations());
+
+ m_pWrappedMethodDesc.SetValue(wrappedMD);
+ m_wFlags2 = WrapperStubWithInstantiations | (m_wFlags2 & ~KindMask);
+ m_pPerInstInfo = (Dictionary*)pInst;
+
+ _ASSERTE(FitsIn<WORD>(numGenericArgs));
+ m_wNumGenericArgs = static_cast<WORD>(numGenericArgs);
+
+ _ASSERTE(IMD_IsWrapperStubWithInstantiations());
+ _ASSERTE(((MethodDesc *) this)->IsInstantiatingStub() || ((MethodDesc *) this)->IsUnboxingStub());
+}
+
+
+// Set the instantiation in the per-inst section (this is actually a dictionary)
+void InstantiatedMethodDesc::SetupSharedMethodInstantiation(DWORD numGenericArgs, TypeHandle *pPerInstInfo, DictionaryLayout *pDL)
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(numGenericArgs != 0);
+ // Initially the dictionary layout is empty
+ m_wFlags2 = SharedMethodInstantiation | (m_wFlags2 & ~KindMask);
+ m_pPerInstInfo = (Dictionary *)pPerInstInfo;
+
+ _ASSERTE(FitsIn<WORD>(numGenericArgs));
+ m_wNumGenericArgs = static_cast<WORD>(numGenericArgs);
+
+ m_pDictLayout = pDL;
+
+
+ _ASSERTE(IMD_IsSharedByGenericMethodInstantiations());
+}
+
+// Set the instantiation in the per-inst section (this is actually a dictionary)
+void InstantiatedMethodDesc::SetupUnsharedMethodInstantiation(DWORD numGenericArgs, TypeHandle *pInst)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // The first field is never used
+ m_wFlags2 = UnsharedMethodInstantiation | (m_wFlags2 & ~KindMask);
+ m_pPerInstInfo = (Dictionary *)pInst;
+
+ _ASSERTE(FitsIn<WORD>(numGenericArgs));
+ m_wNumGenericArgs = static_cast<WORD>(numGenericArgs);
+
+ _ASSERTE(!IsUnboxingStub());
+ _ASSERTE(!IsInstantiatingStub());
+ _ASSERTE(!IMD_IsWrapperStubWithInstantiations());
+ _ASSERTE(!IMD_IsSharedByGenericMethodInstantiations());
+ _ASSERTE(!IMD_IsGenericMethodDefinition());
+}
+
+
+// A type variable is bounded to some depth iff it
+// has no chain of type variable bounds of that depth.
+// We use this is a simple test for circularity among class and method type parameter constraints:
+// the constraints on a set of n variables are well-founded iff every variable is bounded by n.
+// The test is cheap for the common case that few, if any, constraints are variables.
+BOOL Bounded(TypeVarTypeDesc *tyvar, DWORD depth) {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(tyvar));
+ } CONTRACTL_END;
+
+ if (depth == 0)
+ {
+ return FALSE;
+ }
+
+ DWORD numConstraints;
+ TypeHandle *constraints = tyvar->GetConstraints(&numConstraints, CLASS_DEPENDENCIES_LOADED);
+ for (unsigned i = 0; i < numConstraints; i++)
+ {
+ TypeHandle constraint = constraints[i];
+ if (constraint.IsGenericVariable())
+ {
+ TypeVarTypeDesc* constraintVar = (TypeVarTypeDesc*) constraint.AsTypeDesc();
+ //only consider bounds between same sort of variables (VAR or MVAR)
+ if (tyvar->GetInternalCorElementType() == constraintVar->GetInternalCorElementType())
+ {
+ if (!Bounded(constraintVar, depth - 1))
+ return FALSE;
+ }
+ }
+ }
+ return TRUE;
+}
+
+void MethodDesc::LoadConstraintsForTypicalMethodDefinition(BOOL *pfHasCircularClassConstraints, BOOL *pfHasCircularMethodConstraints, ClassLoadLevel level/* = CLASS_LOADED*/)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(IsTypicalMethodDefinition());
+ PRECONDITION(CheckPointer(pfHasCircularClassConstraints));
+ PRECONDITION(CheckPointer(pfHasCircularMethodConstraints));
+ } CONTRACTL_END;
+
+ *pfHasCircularClassConstraints = FALSE;
+ *pfHasCircularMethodConstraints = FALSE;
+
+ // Force a load of the constraints on the type parameters
+ Instantiation classInst = GetClassInstantiation();
+ for (DWORD i = 0; i < classInst.GetNumArgs(); i++)
+ {
+ TypeVarTypeDesc* tyvar = classInst[i].AsGenericVariable();
+ _ASSERTE(tyvar != NULL);
+ tyvar->LoadConstraints(level);
+ }
+
+ Instantiation methodInst = GetMethodInstantiation();
+ for (DWORD i = 0; i < methodInst.GetNumArgs(); i++)
+ {
+ TypeVarTypeDesc* tyvar = methodInst[i].AsGenericVariable();
+ _ASSERTE(tyvar != NULL);
+ tyvar->LoadConstraints(level);
+
+ VOID DoAccessibilityCheckForConstraints(MethodTable *pAskingMT, TypeVarTypeDesc *pTyVar, UINT resIDWhy);
+ DoAccessibilityCheckForConstraints(GetMethodTable(), tyvar, E_ACCESSDENIED);
+ }
+
+ // reject circular class constraints
+ for (DWORD i = 0; i < classInst.GetNumArgs(); i++)
+ {
+ TypeVarTypeDesc* tyvar = classInst[i].AsGenericVariable();
+ _ASSERTE(tyvar != NULL);
+ if(!Bounded(tyvar, classInst.GetNumArgs()))
+ {
+ *pfHasCircularClassConstraints = TRUE;
+ }
+ }
+
+ // reject circular method constraints
+ for (DWORD i = 0; i < methodInst.GetNumArgs(); i++)
+ {
+ TypeVarTypeDesc* tyvar = methodInst[i].AsGenericVariable();
+ _ASSERTE(tyvar != NULL);
+ if(!Bounded(tyvar, methodInst.GetNumArgs()))
+ {
+ *pfHasCircularMethodConstraints = TRUE;
+ }
+ }
+
+ return;
+}
+
+
+#ifdef FEATURE_PREJIT
+
+void MethodDesc::PrepopulateDictionary(DataImage * image, BOOL nonExpansive)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Note the strong similarity to MethodTable::PrepopulateDictionary
+ if (GetMethodDictionary())
+ {
+ LOG((LF_JIT, LL_INFO10000, "GENERICS: Prepopulating dictionary for MD %s\n", this));
+ GetMethodDictionary()->PrepopulateDictionary(this, NULL, nonExpansive);
+ }
+}
+
+#endif // FEATURE_PREJIT
+
+#ifndef DACCESS_COMPILE
+
+BOOL MethodDesc::SatisfiesMethodConstraints(TypeHandle thParent, BOOL fThrowIfNotSatisfied/* = FALSE*/)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ // nice: cache (positive?) result in (instantiated) methoddesc
+ // caveat: this would be unsafe for instantiated method desc living in generic,
+ // hence possibly shared classes (with varying class instantiations).
+
+ if (!HasMethodInstantiation())
+ return TRUE;
+
+ Instantiation methodInst = LoadMethodInstantiation();
+ Instantiation typicalInst = LoadTypicalMethodDefinition()->GetMethodInstantiation();
+
+ //NB: according to the constructor's signature, thParent should be the declaring type,
+ // but the code appears to admit derived types too.
+ SigTypeContext typeContext(this,thParent);
+
+ for (DWORD i = 0; i < methodInst.GetNumArgs(); i++)
+ {
+ TypeHandle thArg = methodInst[i];
+ _ASSERTE(!thArg.IsNull());
+
+ TypeVarTypeDesc* tyvar = (TypeVarTypeDesc*) (typicalInst[i].AsTypeDesc());
+ _ASSERTE(tyvar != NULL);
+ _ASSERTE(TypeFromToken(tyvar->GetTypeOrMethodDef()) == mdtMethodDef);
+
+ tyvar->LoadConstraints(); //TODO: is this necessary for anything but the typical method?
+
+ if (!tyvar->SatisfiesConstraints(&typeContext,thArg))
+ {
+ if (fThrowIfNotSatisfied)
+ {
+ SString sParentName;
+ TypeString::AppendType(sParentName, thParent);
+
+ SString sMethodName(SString::Utf8, GetName());
+
+ SString sActualParamName;
+ TypeString::AppendType(sActualParamName, methodInst[i]);
+
+ SString sFormalParamName;
+ TypeString::AppendType(sFormalParamName, typicalInst[i]);
+
+ COMPlusThrow(kVerificationException,
+ IDS_EE_METHOD_CONSTRAINTS_VIOLATION,
+ sParentName.GetUnicode(),
+ sMethodName.GetUnicode(),
+ sActualParamName.GetUnicode(),
+ sFormalParamName.GetUnicode()
+ );
+
+
+ }
+ return FALSE;
+ }
+
+ }
+ return TRUE;
+}
+
+#endif // !DACCESS_COMPILE
diff --git a/src/vm/gms.h b/src/vm/gms.h
new file mode 100644
index 0000000000..d0af22f45d
--- /dev/null
+++ b/src/vm/gms.h
@@ -0,0 +1,7 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "gmscpu.h"
diff --git a/src/vm/h2inc.pl b/src/vm/h2inc.pl
new file mode 100644
index 0000000000..78ff0cb7b9
--- /dev/null
+++ b/src/vm/h2inc.pl
@@ -0,0 +1,65 @@
+# ==++==
+#
+# Copyright (c) Microsoft. All rights reserved.
+# Licensed under the MIT license. See LICENSE file in the project root for full license information.
+#
+# ==--==
+
+# C to MASM include file translator
+# This is replacement for the deprecated h2inc tool that used to be part of VS.
+
+use File::Basename;
+
+sub ProcessFile($) {
+ my ($input_file) = @_;
+
+ local *INPUT_FILE;
+ if (!open(INPUT_FILE, $input_file))
+ {
+ print "#error: File can not be opened: $input_file\n";
+ return;
+ }
+
+ print ("// File start: $input_file\n");
+
+ while(<INPUT_FILE>) {
+ # Skip all pragmas
+ if (m/^\s*#\s*pragma/) {
+ next;
+ }
+
+ # Expand includes.
+ if (m/\s*#\s*include\s*\"(.+)\"/) {
+ ProcessFile(dirname($input_file) . "/" . $1);
+ next;
+ }
+
+ # Augment #defines with their MASM equivalent
+ if (m/^\s*#\s*define\s+(\S+)\s+(.*)/) {
+ my $name = $1;
+ my $value = $2;
+
+ # Note that we do not handle multiline constants
+
+ # Strip comments from value
+ $value =~ s/\/\/.*//;
+ $value =~ s/\/\*.*\*\///g;
+
+ # Strip whitespaces from value
+ $value =~ s/\s+$//;
+
+ # ignore #defines with arguments
+ if (!($name =~ m/\(/)) {
+ my $number = 0;
+ $number |= ($value =~ s/\b0x(\w+)\b/0\1h/g); # Convert hex constants
+ $number |= ($value =~ s/(-?\b\d+\b)/\1t/g); # Convert dec constants
+ print $number ? "$name EQU $value\n" : "$name TEXTEQU <$value>\n";
+ }
+ }
+ print;
+ }
+
+ print ("// File end: $input_file\n");
+}
+
+ProcessFile($ARGV[0]);
diff --git a/src/vm/h2inc.ps1 b/src/vm/h2inc.ps1
new file mode 100644
index 0000000000..40dbe1d72c
--- /dev/null
+++ b/src/vm/h2inc.ps1
@@ -0,0 +1,65 @@
+# ==++==
+#
+# Copyright (c) Microsoft. All rights reserved.
+# Licensed under the MIT license. See LICENSE file in the project root for full license information.
+#
+# ==--==
+
+# C to MASM include file translator
+# This is replacement for the deprecated h2inc tool that used to be part of VS.
+
+Function ProcessFile($filePath) {
+
+ Write-Output "// File start: $filePath"
+
+ Get-Content $filePath | ForEach-Object {
+
+ if ($_ -match "^\s*#\spragma") {
+ # Ignore pragmas
+ return
+ }
+
+ if ($_ -match "^\s*#\s*include\s*`"(.*)`"")
+ {
+ # Expand includes.
+ ProcessFile(Join-Path (Split-Path -Parent $filePath) $Matches[1])
+ return
+ }
+
+ if ($_ -match "^\s*#define\s+(\S+)\s*(.*)")
+ {
+ # Augment #defines with their MASM equivalent
+ $name = $Matches[1]
+ $value = $Matches[2]
+
+ # Note that we do not handle multiline constants
+
+ # Strip comments from value
+ $value = $value -replace "//.*", ""
+ $value = $value -replace "/\*.*\*/", ""
+
+ # Strip whitespaces from value
+ $value = $value -replace "\s+$", ""
+
+ # ignore #defines with arguments
+ if ($name -notmatch "\(") {
+ $HEX_NUMBER_PATTERN = "\b0x(\w+)\b"
+ $DECIMAL_NUMBER_PATTERN = "(-?\b\d+\b)"
+
+ if ($value -match $HEX_NUMBER_PATTERN -or $value -match $DECIMAL_NUMBER_PATTERN) {
+ $value = $value -replace $HEX_NUMBER_PATTERN, "0`$1h" # Convert hex constants
+ $value = $value -replace $DECIMAL_NUMBER_PATTERN, "`$1t" # Convert dec constants
+ Write-Output "$name EQU $value"
+ } else {
+ Write-Output "$name TEXTEQU <$value>"
+ }
+ }
+ }
+
+ Write-Output $_
+ }
+
+ Write-Output "// File end: $filePath"
+}
+
+ProcessFile $args[0]
diff --git a/src/vm/handletable.h b/src/vm/handletable.h
new file mode 100644
index 0000000000..fb742c12a1
--- /dev/null
+++ b/src/vm/handletable.h
@@ -0,0 +1,6 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+#include "../gc/handletable.h"
diff --git a/src/vm/handletable.inl b/src/vm/handletable.inl
new file mode 100644
index 0000000000..84160cd5b1
--- /dev/null
+++ b/src/vm/handletable.inl
@@ -0,0 +1,6 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+#include "../gc/handletable.inl"
diff --git a/src/vm/hash.cpp b/src/vm/hash.cpp
new file mode 100644
index 0000000000..8a0ab28652
--- /dev/null
+++ b/src/vm/hash.cpp
@@ -0,0 +1,1235 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+/*++
+
+Module Name:
+
+ synchash.cpp
+
+--*/
+
+#include "common.h"
+
+#include "hash.h"
+
+#include "excep.h"
+
+#include "syncclean.hpp"
+
+#include "threadsuspend.h"
+
+//---------------------------------------------------------------------
+// Array of primes, used by hash table to choose the number of buckets
+// Review: would we want larger primes? e.g., for 64-bit?
+
+const DWORD g_rgPrimes[] = {
+5,11,17,23,29,37,47,59,71,89,107,131,163,197,239,293,353,431,521,631,761,919,
+1103,1327,1597,1931,2333,2801,3371,4049,4861,5839,7013,8419,10103,12143,14591,
+17519,21023,25229,30293,36353,43627,52361,62851,75431,90523, 108631, 130363,
+156437, 187751, 225307, 270371, 324449, 389357, 467237, 560689, 672827, 807403,
+968897, 1162687, 1395263, 1674319, 2009191, 2411033, 2893249, 3471899, 4166287,
+4999559, 5999471, 7199369
+};
+const SIZE_T g_rgNumPrimes = sizeof(g_rgPrimes) / sizeof(*g_rgPrimes);
+
+const unsigned int SLOTS_PER_BUCKET = 4;
+
+#ifndef DACCESS_COMPILE
+
+void *PtrHashMap::operator new(size_t size, LoaderHeap *pHeap)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FAULT; //return NULL;
+
+ return pHeap->AllocMem(S_SIZE_T(size));
+}
+
+void PtrHashMap::operator delete(void *p)
+{
+}
+
+
+//-----------------------------------------------------------------
+// Bucket methods
+
+BOOL Bucket::InsertValue(const UPTR key, const UPTR value)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FAULT; //return FALSE;
+
+ _ASSERTE(key != EMPTY);
+ _ASSERTE(key != DELETED);
+
+ if (!HasFreeSlots())
+ return false; //no free slots
+
+ // might have a free slot
+ for (UPTR i = 0; i < SLOTS_PER_BUCKET; i++)
+ {
+ //@NOTE we can't reuse DELETED slots
+ if (m_rgKeys[i] == EMPTY)
+ {
+ SetValue (value, i);
+
+ // On multiprocessors we should make sure that
+ // the value is propagated before we proceed.
+ // inline memory barrier call, refer to
+ // function description at the beginning of this
+ MemoryBarrier();
+
+ m_rgKeys[i] = key;
+ return true;
+ }
+ } // for i= 0; i < SLOTS_PER_BUCKET; loop
+
+ SetCollision(); // otherwise set the collision bit
+ return false;
+}
+
+#endif // !DACCESS_COMPILE
+
+//---------------------------------------------------------------------
+// inline Bucket* HashMap::Buckets()
+// get the pointer to the bucket array
+inline
+PTR_Bucket HashMap::Buckets()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+ _ASSERTE (!g_fEEStarted || !m_fAsyncMode || GetThread() == NULL || GetThread()->PreemptiveGCDisabled() || IsGCThread());
+#endif
+ return m_rgBuckets + 1;
+}
+
+//---------------------------------------------------------------------
+// inline size_t HashMap::GetSize(PTR_Bucket rgBuckets)
+// get the number of buckets
+inline
+DWORD HashMap::GetSize(PTR_Bucket rgBuckets)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ PTR_size_t pSize = dac_cast<PTR_size_t>(rgBuckets - 1);
+ _ASSERTE(FitsIn<DWORD>(pSize[0]));
+ return static_cast<DWORD>(pSize[0]);
+}
+
+
+//---------------------------------------------------------------------
+// inline size_t HashMap::HashFunction(UPTR key, UINT numBuckets, UINT &seed, UINT &incr)
+// get the first & second hash function.
+// H(key, i) = h1(key) + i*h2(key, hashSize); 0 <= i < numBuckets
+// h2 must return a value >= 1 and < numBuckets.
+inline
+void HashMap::HashFunction(const UPTR key, const UINT numBuckets, UINT &seed, UINT &incr)
+{
+ LIMITED_METHOD_CONTRACT;
+ // First hash function
+ // We commonly use pointers, which are 4 byte aligned, so the two least
+ // significant bits are often 0, then we mod this value by something like
+ // 11. We can get a better distribution for pointers by dividing by 4.
+ // REVIEW: Is 64-bit truncation better or should we be doing something with the
+ // upper 32-bits in either of these hash functions.
+ seed = static_cast<UINT>(key >> 2);
+ // Second hash function
+ incr = (UINT)(1 + (((static_cast<UINT>(key >> 5)) + 1) % ((UINT)numBuckets - 1)));
+ _ASSERTE(incr > 0 && incr < numBuckets);
+}
+
+#ifndef DACCESS_COMPILE
+
+//---------------------------------------------------------------------
+// inline void HashMap::SetSize(Bucket *rgBuckets, size_t size)
+// set the number of buckets
+inline
+void HashMap::SetSize(Bucket *rgBuckets, size_t size)
+{
+ LIMITED_METHOD_CONTRACT;
+ ((size_t*)rgBuckets)[0] = size;
+}
+
+//---------------------------------------------------------------------
+// HashMap::HashMap()
+// constructor, initialize all values
+//
+HashMap::HashMap()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ m_rgBuckets = NULL;
+ m_pCompare = NULL; // comparsion object
+ m_cbInserts = 0; // track inserts
+ m_cbDeletes = 0; // track deletes
+ m_cbPrevSlotsInUse = 0; // track valid slots present during previous rehash
+
+ //Debug data member
+#ifdef _DEBUG
+ m_fInSyncCode = false;
+#endif
+ // profile data members
+#ifdef HASHTABLE_PROFILE
+ m_cbRehash = 0;
+ m_cbRehashSlots = 0;
+ m_cbObsoleteTables = 0;
+ m_cbTotalBuckets =0;
+ m_cbInsertProbesGt8 = 0; // inserts that needed more than 8 probes
+ maxFailureProbe =0;
+ memset(m_rgLookupProbes,0,HASHTABLE_LOOKUP_PROBES_DATA*sizeof(LONG));
+#endif // HASHTABLE_PROFILE
+#ifdef _DEBUG
+ m_lockData = NULL;
+ m_pfnLockOwner = NULL;
+#endif // _DEBUG
+}
+
+//---------------------------------------------------------------------
+// void HashMap::Init(unsigned cbInitialSize, CompareFnPtr ptr, bool fAsyncMode)
+// set the initial size of the hash table and provide the comparison
+// function pointer
+//
+void HashMap::Init(DWORD cbInitialSize, CompareFnPtr ptr, BOOL fAsyncMode, LockOwner *pLock)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ Compare* pCompare = NULL;
+ if (ptr != NULL)
+ {
+ pCompare = new Compare(ptr);
+ }
+ Init(cbInitialSize, pCompare, fAsyncMode, pLock);
+}
+
+DWORD HashMap::GetNearestIndex(DWORD cbInitialSize)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DWORD lowIndex = 0;
+ DWORD highIndex = g_rgNumPrimes - 1;
+ DWORD midIndex = (highIndex + 1) / 2;
+
+ if (cbInitialSize <= g_rgPrimes[0])
+ return 0;
+
+ if (cbInitialSize >= g_rgPrimes[highIndex])
+ return highIndex;
+
+ while (true)
+ {
+ if (cbInitialSize < g_rgPrimes[midIndex])
+ {
+ highIndex = midIndex;
+ }
+ else
+ {
+ if (cbInitialSize == g_rgPrimes[midIndex])
+ return midIndex;
+ lowIndex = midIndex;
+ }
+ midIndex = lowIndex + (highIndex - lowIndex + 1)/2;
+ if (highIndex == midIndex)
+ {
+ _ASSERTE(g_rgPrimes[highIndex] >= cbInitialSize);
+ _ASSERTE(highIndex < g_rgNumPrimes);
+ return highIndex;
+ }
+ }
+}
+
+//---------------------------------------------------------------------
+// void HashMap::Init(unsigned cbInitialSize, Compare* pCompare, bool fAsyncMode)
+// set the initial size of the hash table and provide the comparison
+// function pointer
+//
+void HashMap::Init(DWORD cbInitialSize, Compare* pCompare, BOOL fAsyncMode, LockOwner *pLock)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ m_iPrimeIndex = GetNearestIndex(cbInitialSize);
+ DWORD size = g_rgPrimes[m_iPrimeIndex];
+ PREFIX_ASSUME(size < 0x7fffffff);
+
+ m_rgBuckets = new Bucket[size+1];
+
+ memset (m_rgBuckets, 0, (size+1)*sizeof(Bucket));
+ SetSize(m_rgBuckets, size);
+
+ m_pCompare = pCompare;
+
+ m_fAsyncMode = fAsyncMode != FALSE;
+
+ // assert null comparison returns true
+ //ASSERT(
+ // m_pCompare == NULL ||
+ // (m_pCompare->CompareHelper(0,0) != 0)
+ // );
+
+#ifdef HASHTABLE_PROFILE
+ m_cbTotalBuckets = size+1;
+#endif
+
+#ifdef _DEBUG
+ if (pLock == NULL) {
+ m_lockData = NULL;
+ m_pfnLockOwner = NULL;
+ }
+ else
+ {
+ m_lockData = pLock->lock;
+ m_pfnLockOwner = pLock->lockOwnerFunc;
+ }
+ if (m_pfnLockOwner == NULL) {
+ m_writerThreadId.SetThreadId();
+ }
+#endif // _DEBUG
+}
+
+//---------------------------------------------------------------------
+// void PtrHashMap::Init(unsigned cbInitialSize, CompareFnPtr ptr, bool fAsyncMode)
+// set the initial size of the hash table and provide the comparison
+// function pointer
+//
+void PtrHashMap::Init(DWORD cbInitialSize, CompareFnPtr ptr, BOOL fAsyncMode, LockOwner *pLock)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ ComparePtr *compare = NULL;
+ if (ptr != NULL)
+ compare = new ComparePtr(ptr);
+
+ m_HashMap.Init(cbInitialSize, compare, fAsyncMode, pLock);
+}
+
+//---------------------------------------------------------------------
+// HashMap::~HashMap()
+// destructor, free the current array of buckets
+//
+HashMap::~HashMap()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ // free the current table
+ Clear();
+ // compare object
+ if (NULL != m_pCompare)
+ delete m_pCompare;
+}
+
+
+//---------------------------------------------------------------------
+// HashMap::Clear()
+// Remove all elements from table
+//
+void HashMap::Clear()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ // free the current table
+ delete [] m_rgBuckets;
+
+ m_rgBuckets = NULL;
+}
+
+
+//---------------------------------------------------------------------
+// UPTR HashMap::CompareValues(const UPTR value1, const UPTR value2)
+// compare values with the function pointer provided
+//
+#ifndef _DEBUG
+inline
+#endif
+UPTR HashMap::CompareValues(const UPTR value1, const UPTR value2)
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifndef _DEBUG
+ CONTRACTL
+ {
+ DISABLED(THROWS); // This is not a bug, we cannot decide, since the function ptr called may be either.
+ DISABLED(GC_NOTRIGGER); // This is not a bug, we cannot decide, since the function ptr called may be either.
+ }
+ CONTRACTL_END;
+#endif // !_DEBUG
+
+ /// NOTE:: the ordering of arguments are random
+ return (m_pCompare == NULL || m_pCompare->CompareHelper(value1,value2));
+}
+
+//---------------------------------------------------------------------
+// bool HashMap::Enter()
+// bool HashMap::Leave()
+// check valid use of the hash table in synchronus mode
+
+#ifdef _DEBUG
+#ifndef DACCESS_COMPILE
+void HashMap::Enter(HashMap *map)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // check proper concurrent use of the hash table
+ if (map->m_fInSyncCode)
+ ASSERT(0); // oops multiple access to sync.-critical code
+ map->m_fInSyncCode = true;
+}
+#else
+// In DAC builds, we don't want to take the lock, we just want to know if it's held. If it is,
+// we assume the hash map is in an inconsistent state and throw an exception.
+// Arguments:
+// input: map - the map controlled by the lock.
+// Note: Throws
+void HashMap::Enter(HashMap *map)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // check proper concurrent use of the hash table
+ if (map->m_fInSyncCode)
+ {
+ ThrowHR(CORDBG_E_PROCESS_NOT_SYNCHRONIZED); // oops multiple access to sync.-critical code
+ }
+}
+#endif // DACCESS_COMPILE
+
+void HashMap::Leave(HashMap *map)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // check proper concurrent use of the hash table
+ if (map->m_fInSyncCode == false)
+ ASSERT(0); // oops multiple access to sync.-critical code
+ map->m_fInSyncCode = false;
+}
+#endif // _DEBUG
+
+#endif // !DACCESS_COMPILE
+
+//---------------------------------------------------------------------
+// void HashMap::ProfileLookup(unsigned ntry)
+// profile helper code
+void HashMap::ProfileLookup(UPTR ntry, UPTR retValue)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+#ifndef DACCESS_COMPILE
+ #ifdef HASHTABLE_PROFILE
+ if (ntry < HASHTABLE_LOOKUP_PROBES_DATA - 2)
+ FastInterlockIncrement(&m_rgLookupProbes[ntry]);
+ else
+ FastInterlockIncrement(&m_rgLookupProbes[HASHTABLE_LOOKUP_PROBES_DATA - 2]);
+
+ if (retValue == NULL)
+ { // failure probes
+ FastInterlockIncrement(&m_rgLookupProbes[HASHTABLE_LOOKUP_PROBES_DATA - 1]);
+ // the following code is usually executed
+ // only for special case of lookup done before insert
+ // check hash.h SyncHash::InsertValue
+ if (maxFailureProbe < ntry)
+ {
+ maxFailureProbe = ntry;
+ }
+ }
+ #endif // HASHTABLE_PROFILE
+#endif // !DACCESS_COMPILE
+}
+
+#ifndef DACCESS_COMPILE
+
+//---------------------------------------------------------------------
+// void HashMap::InsertValue (UPTR key, UPTR value)
+// Insert into hash table, if the number of retries
+// becomes greater than threshold, expand hash table
+//
+void HashMap::InsertValue (UPTR key, UPTR value)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FAULT;
+
+ _ASSERTE (OwnLock());
+
+ // BROKEN: This is called for the RCWCache on the GC thread
+ GCX_MAYBE_COOP_NO_THREAD_BROKEN(m_fAsyncMode);
+
+ ASSERT(m_rgBuckets != NULL);
+
+ // check proper use in synchronous mode
+ SyncAccessHolder holder(this); // no-op in NON debug code
+
+ ASSERT(value <= VALUE_MASK);
+
+ ASSERT (key > DELETED);
+
+ Bucket* rgBuckets = Buckets();
+ DWORD cbSize = GetSize(rgBuckets);
+
+ UINT seed, incr;
+ HashFunction(key, cbSize, seed, incr);
+
+ for (UPTR ntry =0; ntry < 8; ntry++)
+ {
+ Bucket* pBucket = &rgBuckets[seed % cbSize];
+ if(pBucket->InsertValue(key,value))
+ {
+ goto LReturn;
+ }
+
+ seed += incr;
+ } // for ntry loop
+
+ // We need to expand to keep lookup short
+ Rehash();
+
+ // Try again
+ PutEntry (Buckets(), key,value);
+
+LReturn: // label for return
+
+ m_cbInserts++;
+
+ #ifdef _DEBUG
+ ASSERT (m_pCompare != NULL || value == LookupValue (key,value));
+ // check proper concurrent use of the hash table in synchronous mode
+ #endif // _DEBUG
+
+ return;
+}
+#endif // !DACCESS_COMPILE
+
+//---------------------------------------------------------------------
+// UPTR HashMap::LookupValue(UPTR key, UPTR value)
+// Lookup value in the hash table, use the comparison function
+// to verify the values match
+//
+UPTR HashMap::LookupValue(UPTR key, UPTR value)
+{
+ CONTRACTL
+ {
+ DISABLED(THROWS); // This is not a bug, we cannot decide, since the function ptr called may be either.
+ DISABLED(GC_NOTRIGGER); // This is not a bug, we cannot decide, since the function ptr called may be either.
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ SCAN_IGNORE_THROW; // See contract above.
+ SCAN_IGNORE_TRIGGER; // See contract above.
+
+#ifndef DACCESS_COMPILE
+ _ASSERTE (m_fAsyncMode || OwnLock());
+
+ // BROKEN: This is called for the RCWCache on the GC thread
+ // Also called by AppDomain::FindCachedAssembly to resolve AssemblyRef -- this is used by stack walking on the GC thread.
+ // See comments in GCHeap::RestartEE (above the call to SyncClean::CleanUp) for reason to enter COOP mode.
+ // However, if the current thread is the GC thread, we know we're not going to call GCHeap::RestartEE
+ // while accessing the HashMap, so it's safe to proceed.
+ // (m_fAsyncMode && !IsGCThread() is the condition for entering COOP mode. I.e., enable COOP GC only if
+ // the HashMap is in async mode and this is not a GC thread.)
+ GCX_MAYBE_COOP_NO_THREAD_BROKEN(m_fAsyncMode && !IsGCThread());
+
+ ASSERT(m_rgBuckets != NULL);
+ // This is necessary in case some other thread
+ // replaces m_rgBuckets
+ ASSERT (key > DELETED);
+
+ // perform this check at lookup time as well
+ ASSERT(value <= VALUE_MASK);
+#endif // !DACCESS_COMPILE
+
+ PTR_Bucket rgBuckets = Buckets(); //atomic fetch
+ DWORD cbSize = GetSize(rgBuckets);
+
+ UINT seed, incr;
+ HashFunction(key, cbSize, seed, incr);
+
+ UPTR ntry;
+ for(ntry =0; ntry < cbSize; ntry++)
+ {
+ PTR_Bucket pBucket = rgBuckets+(seed % cbSize);
+ for (unsigned int i = 0; i < SLOTS_PER_BUCKET; i++)
+ {
+ if (pBucket->m_rgKeys[i] == key) // keys match
+ {
+
+ // inline memory barrier call, refer to
+ // function description at the beginning of this
+ MemoryBarrier();
+
+ UPTR storedVal = pBucket->GetValue(i);
+ // if compare function is provided
+ // dupe keys are possible, check if the value matches,
+// Not using compare function in DAC build.
+#ifndef DACCESS_COMPILE
+ if (CompareValues(value,storedVal))
+#endif
+ {
+ ProfileLookup(ntry,storedVal); //no-op in non HASHTABLE_PROFILE code
+
+ // return the stored value
+ return storedVal;
+ }
+ }
+ }
+
+ seed += incr;
+ if(!pBucket->IsCollision())
+ break;
+ } // for ntry loop
+
+ // not found
+ ProfileLookup(ntry,INVALIDENTRY); //no-op in non HASHTABLE_PROFILE code
+
+ return INVALIDENTRY;
+}
+
+#ifndef DACCESS_COMPILE
+
+//---------------------------------------------------------------------
+// UPTR HashMap::ReplaceValue(UPTR key, UPTR value)
+// Replace existing value in the hash table, use the comparison function
+// to verify the values match
+//
+UPTR HashMap::ReplaceValue(UPTR key, UPTR value)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ _ASSERTE(OwnLock());
+
+ // BROKEN: This is called for the RCWCache on the GC thread
+ GCX_MAYBE_COOP_NO_THREAD_BROKEN(m_fAsyncMode);
+
+ ASSERT(m_rgBuckets != NULL);
+ // This is necessary in case some other thread
+ // replaces m_rgBuckets
+ ASSERT (key > DELETED);
+
+ // perform this check during replacing as well
+ ASSERT(value <= VALUE_MASK);
+
+ Bucket* rgBuckets = Buckets(); //atomic fetch
+ DWORD cbSize = GetSize(rgBuckets);
+
+ UINT seed, incr;
+ HashFunction(key, cbSize, seed, incr);
+
+ UPTR ntry;
+ for(ntry =0; ntry < cbSize; ntry++)
+ {
+ Bucket* pBucket = &rgBuckets[seed % cbSize];
+ for (unsigned int i = 0; i < SLOTS_PER_BUCKET; i++)
+ {
+ if (pBucket->m_rgKeys[i] == key) // keys match
+ {
+
+ // inline memory barrier call, refer to
+ // function description at the beginning of this
+ MemoryBarrier();
+
+ UPTR storedVal = pBucket->GetValue(i);
+ // if compare function is provided
+ // dupe keys are possible, check if the value matches,
+ if (CompareValues(value,storedVal))
+ {
+ ProfileLookup(ntry,storedVal); //no-op in non HASHTABLE_PROFILE code
+
+ pBucket->SetValue(value, i);
+
+ // On multiprocessors we should make sure that
+ // the value is propagated before we proceed.
+ // inline memory barrier call, refer to
+ // function description at the beginning of this
+ MemoryBarrier();
+
+ // return the previous stored value
+ return storedVal;
+ }
+ }
+ }
+
+ seed += incr;
+ if(!pBucket->IsCollision())
+ break;
+ } // for ntry loop
+
+ // not found
+ ProfileLookup(ntry,INVALIDENTRY); //no-op in non HASHTABLE_PROFILE code
+
+ return INVALIDENTRY;
+}
+
+//---------------------------------------------------------------------
+// UPTR HashMap::DeleteValue (UPTR key, UPTR value)
+// if found mark the entry deleted and return the stored value
+//
+UPTR HashMap::DeleteValue (UPTR key, UPTR value)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ _ASSERTE (OwnLock());
+
+ // BROKEN: This is called for the RCWCache on the GC thread
+ GCX_MAYBE_COOP_NO_THREAD_BROKEN(m_fAsyncMode);
+
+ // check proper use in synchronous mode
+ SyncAccessHolder holoder(this); //no-op in non DEBUG code
+
+ ASSERT(m_rgBuckets != NULL);
+ // This is necessary in case some other thread
+ // replaces m_rgBuckets
+ ASSERT (key > DELETED);
+
+ // perform this check during replacing as well
+ ASSERT(value <= VALUE_MASK);
+
+ Bucket* rgBuckets = Buckets();
+ DWORD cbSize = GetSize(rgBuckets);
+
+ UINT seed, incr;
+ HashFunction(key, cbSize, seed, incr);
+
+ UPTR ntry;
+ for(ntry =0; ntry < cbSize; ntry++)
+ {
+ Bucket* pBucket = &rgBuckets[seed % cbSize];
+ for (unsigned int i = 0; i < SLOTS_PER_BUCKET; i++)
+ {
+ if (pBucket->m_rgKeys[i] == key) // keys match
+ {
+ // inline memory barrier call, refer to
+ // function description at the beginning of this
+ MemoryBarrier();
+
+ UPTR storedVal = pBucket->GetValue(i);
+ // if compare function is provided
+ // dupe keys are possible, check if the value matches,
+ if (CompareValues(value,storedVal))
+ {
+ if(m_fAsyncMode)
+ {
+ pBucket->m_rgKeys[i] = DELETED; // mark the key as DELETED
+ }
+ else
+ {
+ pBucket->m_rgKeys[i] = EMPTY;// otherwise mark the entry as empty
+ pBucket->SetFreeSlots();
+ }
+ m_cbDeletes++; // track the deletes
+
+ ProfileLookup(ntry,storedVal); //no-op in non HASHTABLE_PROFILE code
+
+ // return the stored value
+ return storedVal;
+ }
+ }
+ }
+
+ seed += incr;
+ if(!pBucket->IsCollision())
+ break;
+ } // for ntry loop
+
+ // not found
+ ProfileLookup(ntry,INVALIDENTRY); //no-op in non HASHTABLE_PROFILE code
+
+#ifdef _DEBUG
+ ASSERT (m_pCompare != NULL || (UPTR) INVALIDENTRY == LookupValue (key,value));
+ // check proper concurrent use of the hash table in synchronous mode
+#endif // _DEBUG
+
+ return INVALIDENTRY;
+}
+
+
+//---------------------------------------------------------------------
+// UPTR HashMap::Gethash (UPTR key)
+// use this for lookups with unique keys
+// don't need to pass an input value to perform the lookup
+//
+UPTR HashMap::Gethash (UPTR key)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ return LookupValue(key,NULL);
+}
+
+
+//---------------------------------------------------------------------
+// UPTR PutEntry (Bucket* rgBuckets, UPTR key, UPTR value)
+// helper used by expand method below
+
+UPTR HashMap::PutEntry (Bucket* rgBuckets, UPTR key, UPTR value)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ ASSERT (value > 0);
+ ASSERT (key > DELETED);
+
+ DWORD size = GetSize(rgBuckets);
+ UINT seed, incr;
+ HashFunction(key, size, seed, incr);
+
+ UPTR ntry;
+ for (ntry =0; ntry < size; ntry++)
+ {
+ Bucket* pBucket = &rgBuckets[seed % size];
+ if(pBucket->InsertValue(key,value))
+ {
+ return ntry;
+ }
+
+ seed += incr;
+ } // for ntry loop
+ _ASSERTE(!"Hash table insert failed. Bug in PutEntry or the code that resizes the hash table?");
+ return INVALIDENTRY;
+}
+
+//---------------------------------------------------------------------
+//
+// UPTR HashMap::NewSize()
+// compute the new size based on the number of free slots
+//
+inline
+UPTR HashMap::NewSize()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ ASSERT(m_cbInserts >= m_cbDeletes);
+ UPTR cbValidSlots = m_cbInserts-m_cbDeletes;
+ UPTR cbNewSlots = m_cbInserts > m_cbPrevSlotsInUse ? m_cbInserts - m_cbPrevSlotsInUse : 0;
+
+ ASSERT(cbValidSlots >=0 );
+ if (cbValidSlots == 0)
+ return g_rgPrimes[0]; // Minimum size for this hash table.
+
+ UPTR cbTotalSlots = (m_fAsyncMode) ? (UPTR)(cbValidSlots*3/2+cbNewSlots*.6) : cbValidSlots*3/2;
+
+ //UPTR cbTotalSlots = cbSlotsInUse*3/2+m_cbDeletes;
+
+ UPTR iPrimeIndex;
+ for (iPrimeIndex = 0; iPrimeIndex < g_rgNumPrimes; iPrimeIndex++)
+ {
+ if (g_rgPrimes[iPrimeIndex] > cbTotalSlots)
+ {
+ return iPrimeIndex;
+ }
+ }
+ ASSERT(iPrimeIndex == g_rgNumPrimes);
+ ASSERT(0 && !"Hash table walked beyond end of primes array");
+ return g_rgNumPrimes - 1;
+}
+
+//---------------------------------------------------------------------
+// void HashMap::Rehash()
+// Rehash the hash table, create a new array of buckets and rehash
+// all non deleted values from the previous array
+//
+void HashMap::Rehash()
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FAULT;
+
+ // BROKEN: This is called for the RCWCache on the GC thread
+ GCX_MAYBE_COOP_NO_THREAD_BROKEN(m_fAsyncMode);
+
+#ifndef CROSSGEN_COMPILE
+ _ASSERTE (!g_fEEStarted || !m_fAsyncMode || GetThread() == NULL || GetThread()->PreemptiveGCDisabled());
+ _ASSERTE (OwnLock());
+#endif
+
+ DWORD cbNewSize = g_rgPrimes[m_iPrimeIndex = NewSize()];
+
+ ASSERT(m_iPrimeIndex < 70);
+
+ Bucket* rgBuckets = Buckets();
+ UPTR cbCurrSize = GetSize(rgBuckets);
+
+ S_SIZE_T cbNewBuckets = (S_SIZE_T(cbNewSize) + S_SIZE_T(1)) * S_SIZE_T(sizeof(Bucket));
+
+ if (cbNewBuckets.IsOverflow())
+ ThrowHR(COR_E_OVERFLOW);
+
+ Bucket* rgNewBuckets = (Bucket *) new BYTE[cbNewBuckets.Value()];
+ memset (rgNewBuckets, 0, cbNewBuckets.Value());
+ SetSize(rgNewBuckets, cbNewSize);
+
+ // current valid slots
+ UPTR cbValidSlots = m_cbInserts-m_cbDeletes;
+ m_cbInserts = cbValidSlots; // reset insert count to the new valid count
+ m_cbPrevSlotsInUse = cbValidSlots; // track the previous delete count
+ m_cbDeletes = 0; // reset delete count
+ // rehash table into it
+
+ if (cbValidSlots) // if there are valid slots to be rehashed
+ {
+ for (unsigned int nb = 0; nb < cbCurrSize; nb++)
+ {
+ for (unsigned int i = 0; i < SLOTS_PER_BUCKET; i++)
+ {
+ UPTR key =rgBuckets[nb].m_rgKeys[i];
+ if (key > DELETED)
+ {
+#ifdef HASHTABLE_PROFILE
+ UPTR ntry =
+#endif
+ PutEntry (rgNewBuckets+1, key, rgBuckets[nb].GetValue (i));
+ #ifdef HASHTABLE_PROFILE
+ if(ntry >=8)
+ m_cbInsertProbesGt8++;
+ #endif // HASHTABLE_PROFILE
+
+ // check if we can bail out
+ if (--cbValidSlots == 0)
+ goto LDone; // break out of both the loops
+ }
+ } // for i =0 thru SLOTS_PER_BUCKET
+ } //for all buckets
+ }
+
+
+LDone:
+
+ Bucket* pObsoleteTables = m_rgBuckets;
+
+ // memory barrier, to replace the pointer to array of bucket
+ MemoryBarrier();
+
+ // replace the old array with the new one.
+ m_rgBuckets = rgNewBuckets;
+
+ #ifdef HASHTABLE_PROFILE
+ m_cbRehash++;
+ m_cbRehashSlots+=m_cbInserts;
+ m_cbObsoleteTables++; // track statistics
+ m_cbTotalBuckets += (cbNewSize+1);
+ #endif // HASHTABLE_PROFILE
+
+#ifdef _DEBUG
+
+ unsigned nb;
+ if (m_fAsyncMode)
+ {
+ // for all non deleted keys in the old table, make sure the corresponding values
+ // are in the new lookup table
+
+ for (nb = 1; nb <= ((size_t*)pObsoleteTables)[0]; nb++)
+ {
+ for (unsigned int i =0; i < SLOTS_PER_BUCKET; i++)
+ {
+ if (pObsoleteTables[nb].m_rgKeys[i] > DELETED)
+ {
+ UPTR value = pObsoleteTables[nb].GetValue (i);
+ // make sure the value is present in the new table
+ ASSERT (m_pCompare != NULL || value == LookupValue (pObsoleteTables[nb].m_rgKeys[i], value));
+ }
+ }
+ }
+ }
+
+ // make sure there are no deleted entries in the new lookup table
+ // if the compare function provided is null, then keys must be unique
+ for (nb = 0; nb < cbNewSize; nb++)
+ {
+ for (unsigned int i = 0; i < SLOTS_PER_BUCKET; i++)
+ {
+ UPTR keyv = Buckets()[nb].m_rgKeys[i];
+ ASSERT (keyv != DELETED);
+ if (m_pCompare == NULL && keyv != EMPTY)
+ {
+ ASSERT ((Buckets()[nb].GetValue (i)) == Gethash (keyv));
+ }
+ }
+ }
+#endif // _DEBUG
+
+ if (m_fAsyncMode)
+ {
+ // If we are allowing asynchronous reads, we must delay bucket cleanup until GC time.
+ SyncClean::AddHashMap (pObsoleteTables);
+ }
+ else
+ {
+ Bucket* pBucket = pObsoleteTables;
+ while (pBucket) {
+ Bucket* pNextBucket = NextObsolete(pBucket);
+ delete [] pBucket;
+ pBucket = pNextBucket;
+ }
+ }
+
+}
+
+//---------------------------------------------------------------------
+// void HashMap::Compact()
+// delete obsolete tables, try to compact deleted slots by sliding entries
+// in the bucket, note we can slide only if the bucket's collison bit is reset
+// otherwise the lookups will break
+// @perf, use the m_cbDeletes to m_cbInserts ratio to reduce the size of the hash
+// table
+//
+void HashMap::Compact()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ _ASSERTE (OwnLock());
+
+ //
+ GCX_MAYBE_COOP_NO_THREAD_BROKEN(m_fAsyncMode);
+ ASSERT(m_rgBuckets != NULL);
+
+ // Try to resize if that makes sense (reduce the size of the table), but
+ // don't fail the operation simply because we've run out of memory.
+ UPTR iNewIndex = NewSize();
+ if (iNewIndex != m_iPrimeIndex)
+ {
+ EX_TRY
+ {
+ FAULT_NOT_FATAL();
+ Rehash();
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+ }
+
+ //compact deleted slots, mark them as EMPTY
+
+ if (m_cbDeletes)
+ {
+ UPTR cbCurrSize = GetSize(Buckets());
+ Bucket *pBucket = Buckets();
+ Bucket *pSentinel;
+
+ for (pSentinel = pBucket+cbCurrSize; pBucket < pSentinel; pBucket++)
+ { //loop thru all buckets
+ for (unsigned int i = 0; i < SLOTS_PER_BUCKET; i++)
+ { //loop through all slots
+ if (pBucket->m_rgKeys[i] == DELETED)
+ {
+ pBucket->m_rgKeys[i] = EMPTY;
+ pBucket->SetFreeSlots(); // mark the bucket as containing
+ // free slots
+
+ // Need to decrement insert and delete counts at the same
+ // time to preserve correct live count.
+ _ASSERTE(m_cbInserts >= m_cbDeletes);
+ --m_cbInserts;
+
+ if(--m_cbDeletes == 0) // decrement count
+ return;
+ }
+ }
+ }
+ }
+
+}
+
+#ifdef _DEBUG
+// A thread must own a lock for a hash if it is a writer.
+BOOL HashMap::OwnLock()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ DEBUG_ONLY_FUNCTION;
+
+ if (m_pfnLockOwner == NULL) {
+ return m_writerThreadId.IsSameThread();
+ }
+ else {
+ BOOL ret = m_pfnLockOwner(m_lockData);
+ if (!ret) {
+ if (Debug_IsLockedViaThreadSuspension()) {
+ ret = TRUE;
+ }
+ }
+ return ret;
+ }
+}
+#endif // _DEBUG
+
+#ifdef HASHTABLE_PROFILE
+//---------------------------------------------------------------------
+// void HashMap::DumpStatistics()
+// dump statistics collected in profile mode
+//
+void HashMap::DumpStatistics()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ cout << "\n Hash Table statistics "<< endl;
+ cout << "--------------------------------------------------" << endl;
+
+ cout << "Current Insert count " << m_cbInserts << endl;
+ cout << "Current Delete count "<< m_cbDeletes << endl;
+
+ cout << "Current # of tables " << m_cbObsoleteTables << endl;
+ cout << "Total # of times Rehashed " << m_cbRehash<< endl;
+ cout << "Total # of slots rehashed " << m_cbRehashSlots << endl;
+
+ cout << "Insert : Probes gt. 8 during rehash " << m_cbInsertProbesGt8 << endl;
+
+ cout << " Max # of probes for a failed lookup " << maxFailureProbe << endl;
+
+ cout << "Prime Index " << m_iPrimeIndex << endl;
+ cout << "Current Buckets " << g_rgPrimes[m_iPrimeIndex]+1 << endl;
+
+ cout << "Total Buckets " << m_cbTotalBuckets << endl;
+
+ cout << " Lookup Probes " << endl;
+ for (unsigned i = 0; i < HASHTABLE_LOOKUP_PROBES_DATA; i++)
+ {
+ cout << "# Probes:" << i << " #entries:" << m_rgLookupProbes[i] << endl;
+ }
+ cout << "\n--------------------------------------------------" << endl;
+}
+#endif // HASHTABLE_PROFILE
+
+#endif // !DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+
+void
+HashMap::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+
+ // Assumed to be embedded, so no this enumeration.
+
+ if (m_rgBuckets.IsValid())
+ {
+ ULONG32 numBuckets = (ULONG32)GetSize(Buckets()) + 1;
+ DacEnumMemoryRegion(dac_cast<TADDR>(m_rgBuckets),
+ numBuckets * sizeof(Bucket));
+
+ for (size_t i = 0; i < numBuckets; i++)
+ {
+ PTR_Bucket bucket = m_rgBuckets + i;
+ if (bucket.IsValid())
+ {
+ bucket.EnumMem();
+ }
+ }
+ }
+}
+
+#endif // DACCESS_COMPILE
+
+#if 0 // Perf test code, enabled on-demand for private testing.
+#ifndef DACCESS_COMPILE
+// This is for testing purposes only!
+void HashMap::HashMapTest()
+{
+ printf("HashMap test\n");
+
+ const unsigned int MinValue = 2; // Deleted is reserved, and is 1.
+ const unsigned int MinThreshold = 10000;
+ const unsigned int MaxThreshold = 30000;
+ HashMap * table = new HashMap();
+ Crst m_lock("HashMap", CrstSyncHashLock, CrstFlags(CRST_REENTRANCY | CRST_UNSAFE_ANYMODE));
+ CrstHolder holder(&m_lock);
+ LockOwner lock = {&m_lock, IsOwnerOfCrst};
+ table->Init(10, (CompareFnPtr) NULL, false, &lock);
+ for(unsigned int i=MinValue; i < MinThreshold; i++)
+ table->InsertValue(i, i);
+ printf("Added %d values.\n", MinThreshold);
+ //table.DumpStatistics();
+
+ LookupPerfTest(table, MinThreshold);
+
+ INT64 t0 = GetTickCount();
+ INT64 t1;
+ for(int rep = 0; rep < 10000000; rep++) {
+ for(unsigned int i=MinThreshold; i < MaxThreshold; i++) {
+ table->InsertValue(rep + i, rep + i);
+ }
+ for(unsigned int i=MinThreshold; i < MaxThreshold; i++) {
+ table->DeleteValue(rep + i, rep + i);
+ }
+ for(unsigned int i=MinValue; i < MinThreshold; i++)
+ table->DeleteValue(i, i);
+ for(unsigned int i=MinValue; i < MinThreshold; i++)
+ table->InsertValue(i, i);
+
+ if (rep % 500 == 0) {
+ t1 = GetTickCount();
+ printf("Repetition %d, took %d ms\n", rep, (int) (t1-t0));
+ t0 = t1;
+ LookupPerfTest(table, MinThreshold);
+ //table.DumpStatistics();
+ }
+ }
+ delete table;
+}
+
+// For testing purposes only.
+void HashMap::LookupPerfTest(HashMap * table, const unsigned int MinThreshold)
+{
+ INT64 t0 = GetTickCount();
+ for(int rep = 0; rep < 1000; rep++) {
+ for(unsigned int i=2; i<MinThreshold; i++) {
+ UPTR v = table->LookupValue(i, i);
+ if (v != i) {
+ printf("LookupValue didn't return the expected value!");
+ _ASSERTE(v == i);
+ }
+ }
+ }
+ INT64 t1 = GetTickCount();
+ for(unsigned int i = MinThreshold * 80; i < MinThreshold * 80 + 1000; i++)
+ table->LookupValue(i, i);
+ //cout << "Lookup perf test (1000 * " << MinThreshold << ": " << (t1-t0) << " ms." << endl;
+#ifdef HASHTABLE_PROFILE
+ printf("Lookup perf test time: %d ms table size: %d max failure probe: %d longest collision chain: %d\n", (int) (t1-t0), (int) table->GetSize(table->Buckets()), (int) table->maxFailureProbe, (int) table->m_cbMaxCollisionLength);
+ table->DumpStatistics();
+#else // !HASHTABLE_PROFILE
+ printf("Lookup perf test time: %d ms table size: %d\n", (int) (t1-t0), table->GetSize(table->Buckets()));
+#endif // !HASHTABLE_PROFILE
+}
+#endif // !DACCESS_COMPILE
+#endif // 0 // Perf test code, enabled on-demand for private testing.
diff --git a/src/vm/hash.h b/src/vm/hash.h
new file mode 100644
index 0000000000..8929c5c0a9
--- /dev/null
+++ b/src/vm/hash.h
@@ -0,0 +1,786 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+/*++---------------------------------------------------------------------------------------
+
+Module Name:
+
+ hash.h
+
+Abstract:
+
+ Fast hash table classes,
+--*/
+
+#ifndef _HASH_H_
+#define _HASH_H_
+
+#ifndef ASSERT
+#define ASSERT _ASSERTE
+#endif
+
+
+#include "crst.h"
+
+// #define HASHTABLE_PROFILE
+
+// Track collision chains of up to length X
+const unsigned int HASHTABLE_LOOKUP_PROBES_DATA = 20;
+
+//-------------------------------------------------------
+// enums for special Key values used in hash table
+//
+enum
+{
+ EMPTY = 0,
+ DELETED = 1,
+ INVALIDENTRY = ~0
+};
+
+typedef ULONG_PTR UPTR;
+
+//------------------------------------------------------------------------------
+// classes in use
+//------------------------------------------------------------------------------
+class Bucket;
+class HashMap;
+
+//-------------------------------------------------------
+// class Bucket
+// used by hash table implementation
+//
+typedef DPTR(class Bucket) PTR_Bucket;
+class Bucket
+{
+public:
+ UPTR m_rgKeys[4];
+ UPTR m_rgValues[4];
+
+#define VALUE_MASK (sizeof(LPVOID) == 4 ? 0x7FFFFFFF : I64(0x7FFFFFFFFFFFFFFF))
+
+ void SetValue (UPTR value, UPTR i)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ ASSERT(value <= VALUE_MASK);
+ m_rgValues[i] = (UPTR) ((m_rgValues[i] & ~VALUE_MASK) | value);
+ }
+
+ UPTR GetValue (UPTR i)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (UPTR)(m_rgValues[i] & VALUE_MASK);
+ }
+
+ UPTR IsCollision() // useful sentinel for fast fail of lookups
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (UPTR) (m_rgValues[0] & ~VALUE_MASK);
+ }
+
+ void SetCollision()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_rgValues[0] |= ~VALUE_MASK; // set collision bit
+ m_rgValues[1] &= VALUE_MASK; // reset has free slots bit
+ }
+
+ BOOL HasFreeSlots()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // check for free slots available in the bucket
+ // either there is no collision or a free slot has been during
+ // compaction
+ return (!IsCollision() || (m_rgValues[1] & ~VALUE_MASK));
+ }
+
+ void SetFreeSlots()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_rgValues[1] |= ~VALUE_MASK; // set has free slots bit
+ }
+
+ BOOL InsertValue(const UPTR key, const UPTR value);
+};
+
+
+//------------------------------------------------------------------------------
+// bool (*CompareFnPtr)(UPTR,UPTR); pointer to a function that takes 2 UPTRs
+// and returns a boolean, provide a function with this signature to the HashTable
+// to use for comparing Values during lookup
+//------------------------------------------------------------------------------
+typedef BOOL (*CompareFnPtr)(UPTR,UPTR);
+
+class Compare
+{
+protected:
+ Compare()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_ptr = NULL;
+ }
+public:
+ CompareFnPtr m_ptr;
+
+ Compare(CompareFnPtr ptr)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(ptr != NULL);
+ m_ptr = ptr;
+ }
+
+ virtual UPTR CompareHelper(UPTR val1, UPTR storedval)
+ {
+ WRAPPER_NO_CONTRACT;
+
+#ifndef _DEBUG
+ CONTRACTL
+ {
+ DISABLED(THROWS); // This is not a bug, we cannot decide, since the function ptr called may be either.
+ DISABLED(GC_NOTRIGGER); // This is not a bug, we cannot decide, since the function ptr called may be either.
+ }
+ CONTRACTL_END;
+#endif // !_DEBUG
+
+ return (*m_ptr)(val1,storedval);
+ }
+};
+
+class ComparePtr : public Compare
+{
+public:
+ ComparePtr (CompareFnPtr ptr)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(ptr != NULL);
+ m_ptr = ptr;
+ }
+
+ virtual UPTR CompareHelper(UPTR val1, UPTR storedval)
+ {
+ WRAPPER_NO_CONTRACT;
+
+#ifndef _DEBUG
+ CONTRACTL
+ {
+ DISABLED(THROWS); // This is not a bug, we cannot decide, since the function ptr called may be either.
+ DISABLED(GC_NOTRIGGER); // This is not a bug, we cannot decide, since the function ptr called may be either.
+ }
+ CONTRACTL_END;
+#endif // !_DEBUG
+
+ storedval <<=1;
+ return (*m_ptr)(val1,storedval);
+ }
+};
+
+//------------------------------------------------------------------------------
+// Class HashMap
+// Fast Hash table, for concurrent use,
+// stores a 4 byte Key and a 4 byte Value for each slot.
+// Duplicate keys are allowed, (keys are compared as 4 byte UPTRs)
+// Duplicate values are allowed,(values are compared using comparison fn. provided)
+// but if no comparison function is provided then the values should be unique
+//
+// Lookup's don't require to take locks, unless you specify fAsyncMode.
+// Insert and Delete operations require locks
+// Inserting a duplicate value will assert in DEBUG mode, the PROPER way to perform inserts
+// is to take a lock, do a lookup and if the lookup fails then Insert
+//
+// In async mode, deleted slots are not immediately reclaimed (until a rehash), and
+// accesses to the hash table cause a transition to cooperative GC mode, and reclamation of old
+// hash maps (after a rehash) are deferred until GC time.
+// In sync mode, none of this is necessary; however calls to LookupValue must be synchronized as well.
+//
+// Algorithm:
+// The Hash table is an array of buckets, each bucket can contain 4 key/value pairs
+// Special key values are used to identify EMPTY and DELETED slots
+// Hash function uses the current size of the hash table and a SEED based on the key
+// to choose the bucket, seed starts of being the key and gets refined every time
+// the hash function is re-applied.
+//
+// Inserts choose an empty slot in the current bucket for new entries, if the current bucket
+// is full, then the seed is refined and a new bucket is choosen, if an empty slot is not found
+// after 8 retries, the hash table is expanded, this causes the current array of buckets to
+// be put in a free list and a new array of buckets is allocated and all non-deleted entries
+// from the old hash table are rehashed to the new array
+// The old arrays are reclaimed during Compact phase, which should only be called during GC or
+// any other time it is guaranteed that no Lookups are taking place.
+// Concurrent Insert and Delete operations need to be serialized
+//
+// Delete operations, mark the Key in the slot as DELETED, the value is not removed and inserts
+// don't reuse these slots, they get reclaimed during expansion and compact phases.
+//
+//------------------------------------------------------------------------------
+
+class HashMap
+{
+public:
+
+ //@constructor
+ HashMap() DAC_EMPTY();
+ //destructor
+ ~HashMap() DAC_EMPTY();
+
+ // Init
+ void Init(BOOL fAsyncMode, LockOwner *pLock)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ Init(0, (Compare *)NULL,fAsyncMode, pLock);
+ }
+ // Init
+ void Init(DWORD cbInitialSize, BOOL fAsyncMode, LockOwner *pLock)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ Init(cbInitialSize, (Compare*)NULL, fAsyncMode, pLock);
+ }
+ // Init
+ void Init(CompareFnPtr ptr, BOOL fAsyncMode, LockOwner *pLock)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ Init(0, ptr, fAsyncMode, pLock);
+ }
+
+ // Init method
+ void Init(DWORD cbInitialSize, CompareFnPtr ptr, BOOL fAsyncMode, LockOwner *pLock);
+
+
+ //Init method
+ void Init(DWORD cbInitialSize, Compare* pCompare, BOOL fAsyncMode, LockOwner *pLock);
+
+ // check to see if the value is already in the Hash Table
+ // key should be > DELETED
+ // if provided, uses the comparison function ptr to compare values
+ // returns INVALIDENTRY if not found
+ UPTR LookupValue(UPTR key, UPTR value);
+
+ // Insert if the value is not already present
+ // it is illegal to insert duplicate values in the hash map
+ // do a lookup to verify the value is not already present
+
+ void InsertValue(UPTR key, UPTR value);
+
+ // Replace the value if present
+ // returns the previous value, or INVALIDENTRY if not present
+ // does not insert a new value under any circumstances
+
+ UPTR ReplaceValue(UPTR key, UPTR value);
+
+ // mark the entry as deleted and return the stored value
+ // returns INVALIDENTRY, if not found
+ UPTR DeleteValue (UPTR key, UPTR value);
+
+ // for unique keys, use this function to get the value that is
+ // stored in the hash table, returns INVALIDENTRY if key not found
+ UPTR Gethash(UPTR key);
+
+ // Called only when all threads are frozed, like during GC
+ // for a SINGLE user mode, call compact after every delete
+ // operation on the hash table
+ void Compact();
+
+ // Remove all entries from the hash tablex
+ void Clear();
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+ // inline helper, in non HASHTABLE_PROFILE mode becomes a NO-OP
+ void ProfileLookup(UPTR ntry, UPTR retValue);
+ // data members used for profiling
+#ifdef HASHTABLE_PROFILE
+ unsigned m_cbRehash; // number of times rehashed
+ unsigned m_cbRehashSlots; // number of slots that were rehashed
+ unsigned m_cbObsoleteTables;
+ unsigned m_cbTotalBuckets;
+ unsigned m_cbInsertProbesGt8; // inserts that needed more than 8 probes
+ LONG m_rgLookupProbes[HASHTABLE_LOOKUP_PROBES_DATA]; // lookup probes
+ UPTR maxFailureProbe; // cost of failed lookup
+
+ void DumpStatistics();
+#endif // HASHTABLE_PROFILE
+
+#if 0 // Test-only code for debugging this class.
+#ifndef DACCESS_COMPILE
+ static void LookupPerfTest(HashMap * table, const unsigned int MinThreshold);
+ static void HashMapTest();
+#endif // !DACCESS_COMPILE
+#endif // 0 // Test-only code for debugging this class.
+
+protected:
+ // static helper function
+ static UPTR PutEntry (Bucket* rgBuckets, UPTR key, UPTR value);
+private:
+
+ DWORD GetNearestIndex(DWORD cbInitialSize);
+
+#ifdef _DEBUG
+ static void Enter(HashMap *); // check valid to enter
+ static void Leave(HashMap *); // check valid to leave
+
+ typedef Holder<HashMap *, HashMap::Enter, HashMap::Leave> SyncAccessHolder;
+ BOOL m_fInSyncCode; // test for non-synchronus access
+#else // !_DEBUG
+ // in non DEBUG mode use a no-op helper
+ typedef NoOpBaseHolder<HashMap *> SyncAccessHolder;
+#endif // !_DEBUG
+
+ // compute the new size, based on the number of free slots
+ // available, compact or expand
+ UPTR NewSize();
+ // create a new bucket array and rehash the non-deleted entries
+ void Rehash();
+ static DWORD GetSize(PTR_Bucket rgBuckets);
+ static void SetSize(Bucket* rgBuckets, size_t size);
+ PTR_Bucket Buckets();
+ UPTR CompareValues(const UPTR value1, const UPTR value2);
+
+ // For double hashing, compute the second hash function once, then add.
+ // H(key, i) = H1(key) + i * H2(key), where 0 <= i < numBuckets
+ static void HashFunction(const UPTR key, const UINT numBuckets, UINT &seed, UINT &incr);
+
+ Compare* m_pCompare; // compare object to be used in lookup
+ SIZE_T m_iPrimeIndex; // current size (index into prime array)
+ PTR_Bucket m_rgBuckets; // array of buckets
+
+ // track the number of inserts and deletes
+ SIZE_T m_cbPrevSlotsInUse;
+ SIZE_T m_cbInserts;
+ SIZE_T m_cbDeletes;
+ // mode of operation, synchronus or single user
+ bool m_fAsyncMode;
+
+#ifdef _DEBUG
+ LPVOID m_lockData;
+ FnLockOwner m_pfnLockOwner;
+ EEThreadId m_writerThreadId;
+#endif // _DEBUG
+
+#ifdef _DEBUG
+ // A thread must own a lock for a hash if it is a writer.
+ BOOL OwnLock();
+#endif // _DEBUG
+
+public:
+ ///---------Iterator----------------
+
+ // Iterator,
+ class Iterator
+ {
+ PTR_Bucket m_pBucket;
+ PTR_Bucket m_pSentinel;
+ int m_id;
+ BOOL m_fEnd;
+
+ public:
+
+ // Constructor
+ Iterator(Bucket* pBucket) :
+ m_pBucket(dac_cast<PTR_Bucket>(pBucket)),
+ m_id(-1), m_fEnd(false)
+ {
+ SUPPORTS_DAC;
+ WRAPPER_NO_CONTRACT;
+
+ if (!m_pBucket) {
+ m_pSentinel = NULL;
+ m_fEnd = true;
+ return;
+ }
+ size_t cbSize = (PTR_size_t(m_pBucket))[0];
+ m_pBucket++;
+ m_pSentinel = m_pBucket+cbSize;
+ MoveNext(); // start
+ }
+
+ Iterator(const Iterator& iter)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_pBucket = iter.m_pBucket;
+ m_pSentinel = iter.m_pSentinel;
+ m_id = iter.m_id;
+ m_fEnd = iter.m_fEnd;
+
+ }
+
+ //destructor
+ ~Iterator(){ LIMITED_METHOD_DAC_CONTRACT; };
+
+ // friend operator==
+ friend bool operator == (const Iterator& lhs, const Iterator& rhs)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (lhs.m_pBucket == rhs.m_pBucket && lhs.m_id == rhs.m_id);
+ }
+ // operator =
+ inline Iterator& operator= (const Iterator& iter)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_pBucket = iter.m_pBucket;
+ m_pSentinel = iter.m_pSentinel;
+ m_id = iter.m_id;
+ m_fEnd = iter.m_fEnd;
+ return *this;
+ }
+
+ // operator ++
+ inline void operator++ ()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(!m_fEnd); // check we are not alredy at end
+ MoveNext();
+ }
+ // operator --
+
+
+
+ //accessors : GetDisc() , returns the discriminator
+ inline UPTR GetKey()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!m_fEnd); // check we are not alredy at end
+ return m_pBucket->m_rgKeys[m_id];
+ }
+ //accessors : SetDisc() , sets the discriminator
+
+
+ //accessors : GetValue(),
+ // returns the pointer that corresponds to the discriminator
+ inline UPTR GetValue()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(!m_fEnd); // check we are not alredy at end
+ return m_pBucket->GetValue(m_id);
+ }
+
+
+ // end(), check if the iterator is at the end of the bucket
+ inline BOOL end() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return m_fEnd;
+ }
+
+ protected:
+
+ void MoveNext()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ for (m_pBucket = m_pBucket;m_pBucket < m_pSentinel; m_pBucket++)
+ { //loop thru all buckets
+ for (m_id = m_id+1; m_id < 4; m_id++)
+ { //loop through all slots
+ if (m_pBucket->m_rgKeys[m_id] > DELETED)
+ {
+ return;
+ }
+ }
+ m_id = -1;
+ }
+ m_fEnd = true;
+ }
+
+ };
+
+ inline Bucket* firstBucket()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ return m_rgBuckets;
+ }
+
+ // return an iterator, positioned at the beginning of the bucket
+ inline Iterator begin()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return Iterator(m_rgBuckets);
+ }
+
+ inline SIZE_T GetCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_cbInserts-m_cbDeletes;
+ }
+};
+
+//---------------------------------------------------------------------------------------
+// class PtrHashMap
+// Wrapper class for using Hash table to store pointer values
+// HashMap class requires that high bit is always reset
+// The allocator used within the runtime, always allocates objects 8 byte aligned
+// so we can shift right one bit, and store the result in the hash table
+class PtrHashMap
+{
+ HashMap m_HashMap;
+
+ // key really acts as a hash code. Sanitize it from special values used by the underlying HashMap.
+ inline static UPTR SanitizeKey(UPTR key)
+ {
+ return (key > DELETED) ? key : (key + 100);
+ }
+
+public:
+#ifndef DACCESS_COMPILE
+ void *operator new(size_t size, LoaderHeap *pHeap);
+ void operator delete(void *p);
+#endif // !DACCESS_COMPILE
+
+ // Init
+ void Init(BOOL fAsyncMode, LockOwner *pLock)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ Init(0,NULL,fAsyncMode,pLock);
+ }
+ // Init
+ void Init(DWORD cbInitialSize, BOOL fAsyncMode, LockOwner *pLock)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ Init(cbInitialSize, NULL, fAsyncMode,pLock);
+ }
+ // Init
+ void Init(CompareFnPtr ptr, BOOL fAsyncMode, LockOwner *pLock)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ Init(0, ptr, fAsyncMode,pLock);
+ }
+
+ // Init method
+ void Init(DWORD cbInitialSize, CompareFnPtr ptr, BOOL fAsyncMode, LockOwner *pLock);
+
+ // check to see if the value is already in the Hash Table
+ LPVOID LookupValue(UPTR key, LPVOID pv)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ key = SanitizeKey(key);
+
+ // gmalloc allocator, always allocates 8 byte aligned
+ // so we can shift out the lowest bit
+ // ptr right shift by 1
+ UPTR value = (UPTR)pv;
+ _ASSERTE((value & 0x1) == 0);
+ value>>=1;
+ UPTR val = m_HashMap.LookupValue (key, value);
+ if (val != (UPTR) INVALIDENTRY)
+ {
+ val<<=1;
+ }
+ return (LPVOID)val;
+ }
+
+ // Insert if the value is not already present
+ // it is illegal to insert duplicate values in the hash map
+ // users should do a lookup to verify the value is not already present
+
+ void InsertValue(UPTR key, LPVOID pv)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ key = SanitizeKey(key);
+
+ // gmalloc allocator, always allocates 8 byte aligned
+ // so we can shift out the lowest bit
+ // ptr right shift by 1
+ UPTR value = (UPTR)pv;
+ _ASSERTE((value & 0x1) == 0);
+ value>>=1;
+ m_HashMap.InsertValue (key, value);
+ }
+
+ // Replace the value if present
+ // returns the previous value, or INVALIDENTRY if not present
+ // does not insert a new value under any circumstances
+
+ LPVOID ReplaceValue(UPTR key, LPVOID pv)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ key = SanitizeKey(key);
+
+ // gmalloc allocator, always allocates 8 byte aligned
+ // so we can shift out the lowest bit
+ // ptr right shift by 1
+ UPTR value = (UPTR)pv;
+ _ASSERTE((value & 0x1) == 0);
+ value>>=1;
+ UPTR val = m_HashMap.ReplaceValue (key, value);
+ if (val != (UPTR) INVALIDENTRY)
+ {
+ val<<=1;
+ }
+ return (LPVOID)val;
+ }
+
+ // mark the entry as deleted and return the stored value
+ // returns INVALIDENTRY if not found
+ LPVOID DeleteValue (UPTR key,LPVOID pv)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ key = SanitizeKey(key);
+
+ UPTR value = (UPTR)pv;
+ _ASSERTE((value & 0x1) == 0);
+ value >>=1 ;
+ UPTR val = m_HashMap.DeleteValue(key, value);
+ if (val != (UPTR) INVALIDENTRY)
+ {
+ val <<= 1;
+ }
+ return (LPVOID)val;
+ }
+
+ // for unique keys, use this function to get the value that is
+ // stored in the hash table, returns INVALIDENTRY if key not found
+ LPVOID Gethash(UPTR key)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ key = SanitizeKey(key);
+
+ UPTR val = m_HashMap.Gethash(key);
+ if (val != (UPTR) INVALIDENTRY)
+ {
+ val <<= 1;
+ }
+ return (LPVOID)val;
+ }
+
+ void Compact()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ m_HashMap.Compact();
+ }
+
+ void Clear()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ m_HashMap.Clear();
+ }
+
+ class PtrIterator
+ {
+ HashMap::Iterator iter;
+
+ public:
+ PtrIterator(HashMap& hashMap) : iter(hashMap.begin())
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ }
+ PtrIterator(Bucket* bucket) : iter(bucket)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ }
+
+ ~PtrIterator()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ }
+
+ BOOL end()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ return iter.end();
+ }
+
+ PTR_VOID GetValue()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ UPTR val = iter.GetValue();
+ if (val != (UPTR) INVALIDENTRY)
+ {
+ val <<= 1;
+ }
+ return PTR_VOID(val);
+ }
+
+ void operator++()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ iter.operator++();
+ }
+ };
+
+ inline Bucket* firstBucket()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ return m_HashMap.firstBucket();
+ }
+
+ // return an iterator, positioned at the beginning of the bucket
+ inline PtrIterator begin()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return PtrIterator(m_HashMap);
+ }
+
+ inline SIZE_T GetCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_HashMap.GetCount();
+ }
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+ {
+ SUPPORTS_DAC;
+ m_HashMap.EnumMemoryRegions(flags);
+ }
+#endif // DACCESS_COMPILE
+};
+
+//---------------------------------------------------------------------
+// inline Bucket*& NextObsolete (Bucket* rgBuckets)
+// get the next obsolete bucket in the chain
+inline
+Bucket*& NextObsolete (Bucket* rgBuckets)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return *(Bucket**)&((size_t*)rgBuckets)[1];
+}
+
+#endif // !_HASH_H_
diff --git a/src/vm/hillclimbing.cpp b/src/vm/hillclimbing.cpp
new file mode 100644
index 0000000000..97cc72b818
--- /dev/null
+++ b/src/vm/hillclimbing.cpp
@@ -0,0 +1,440 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//=========================================================================
+
+//
+// HillClimbing.cpp
+//
+// Defines classes for the ThreadPool's HillClimbing concurrency-optimization
+// algorithm.
+//
+
+//=========================================================================
+
+//
+// TODO: write an essay about how/why this works. Maybe put it in BotR?
+//
+
+#include "common.h"
+#include "hillclimbing.h"
+#include "win32threadpool.h"
+
+//
+// Default compilation mode is /fp:precise, which disables fp intrinsics. This causes us to pull in FP stuff (sin,cos,etc.) from
+// The CRT, and increases our download size by ~5k. We don't need the extra precision this gets us, so let's switch to
+// the intrinsic versions.
+//
+#ifdef _MSC_VER
+#pragma float_control(precise, off)
+#endif
+
+
+
+const double pi = 3.141592653589793;
+
+void HillClimbing::Initialize()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_wavePeriod = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_HillClimbing_WavePeriod);
+ m_maxThreadWaveMagnitude = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_HillClimbing_MaxWaveMagnitude);
+ m_threadMagnitudeMultiplier = (double)CLRConfig::GetConfigValue(CLRConfig::INTERNAL_HillClimbing_WaveMagnitudeMultiplier) / 100.0;
+ m_samplesToMeasure = m_wavePeriod * (int)CLRConfig::GetConfigValue(CLRConfig::INTERNAL_HillClimbing_WaveHistorySize);
+ m_targetThroughputRatio = (double)CLRConfig::GetConfigValue(CLRConfig::INTERNAL_HillClimbing_Bias) / 100.0;
+ m_targetSignalToNoiseRatio = (double)CLRConfig::GetConfigValue(CLRConfig::INTERNAL_HillClimbing_TargetSignalToNoiseRatio) / 100.0;
+ m_maxChangePerSecond = (double)CLRConfig::GetConfigValue(CLRConfig::INTERNAL_HillClimbing_MaxChangePerSecond);
+ m_maxChangePerSample = (double)CLRConfig::GetConfigValue(CLRConfig::INTERNAL_HillClimbing_MaxChangePerSample);
+ m_sampleIntervalLow = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_HillClimbing_SampleIntervalLow);
+ m_sampleIntervalHigh = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_HillClimbing_SampleIntervalHigh);
+ m_throughputErrorSmoothingFactor = (double)CLRConfig::GetConfigValue(CLRConfig::INTERNAL_HillClimbing_ErrorSmoothingFactor) / 100.0;
+ m_gainExponent = (double)CLRConfig::GetConfigValue(CLRConfig::INTERNAL_HillClimbing_GainExponent) / 100.0;
+ m_maxSampleError = (double)CLRConfig::GetConfigValue(CLRConfig::INTERNAL_HillClimbing_MaxSampleErrorPercent) / 100.0;
+ m_currentControlSetting = 0;
+ m_totalSamples = 0;
+ m_lastThreadCount = 0;
+ m_averageThroughputNoise = 0;
+ m_elapsedSinceLastChange = 0;
+ m_completionsSinceLastChange = 0;
+ m_accumulatedCompletionCount = 0;
+ m_accumulatedSampleDuration = 0;
+
+ m_samples = new double[m_samplesToMeasure];
+ m_threadCounts = new double[m_samplesToMeasure];
+
+ // seed our random number generator with the CLR instance ID and the process ID, to avoid correlations with other CLR ThreadPool instances.
+#ifndef DACCESS_COMPILE
+ m_randomIntervalGenerator.Init(((int)GetClrInstanceId() << 16) ^ (int)GetCurrentProcessId());
+#endif
+ m_currentSampleInterval = m_randomIntervalGenerator.Next(m_sampleIntervalLow, m_sampleIntervalHigh+1);
+}
+
+int HillClimbing::Update(int currentThreadCount, double sampleDuration, int numCompletions, int* pNewSampleInterval)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef DACCESS_COMPILE
+ return 1;
+#else
+
+ //
+ // If someone changed the thread count without telling us, update our records accordingly.
+ //
+ if (currentThreadCount != m_lastThreadCount)
+ ForceChange(currentThreadCount, Initializing);
+
+ //
+ // Update the cumulative stats for this thread count
+ //
+ m_elapsedSinceLastChange += sampleDuration;
+ m_completionsSinceLastChange += numCompletions;
+
+ //
+ // Add in any data we've already collected about this sample
+ //
+ sampleDuration += m_accumulatedSampleDuration;
+ numCompletions += m_accumulatedCompletionCount;
+
+ //
+ // We need to make sure we're collecting reasonably accurate data. Since we're just counting the end
+ // of each work item, we are goinng to be missing some data about what really happened during the
+ // sample interval. The count produced by each thread includes an initial work item that may have
+ // started well before the start of the interval, and each thread may have been running some new
+ // work item for some time before the end of the interval, which did not yet get counted. So
+ // our count is going to be off by +/- threadCount workitems.
+ //
+ // The exception is that the thread that reported to us last time definitely wasn't running any work
+ // at that time, and the thread that's reporting now definitely isn't running a work item now. So
+ // we really only need to consider threadCount-1 threads.
+ //
+ // Thus the percent error in our count is +/- (threadCount-1)/numCompletions.
+ //
+ // We cannot rely on the frequency-domain analysis we'll be doing later to filter out this error, because
+ // of the way it accumulates over time. If this sample is off by, say, 33% in the negative direction,
+ // then the next one likely will be too. The one after that will include the sum of the completions
+ // we missed in the previous samples, and so will be 33% positive. So every three samples we'll have
+ // two "low" samples and one "high" sample. This will appear as periodic variation right in the frequency
+ // range we're targeting, which will not be filtered by the frequency-domain translation.
+ //
+ if (m_totalSamples > 0 && ((currentThreadCount-1.0) / numCompletions) >= m_maxSampleError)
+ {
+ // not accurate enough yet. Let's accumulate the data so far, and tell the ThreadPool
+ // to collect a little more.
+ m_accumulatedSampleDuration = sampleDuration;
+ m_accumulatedCompletionCount = numCompletions;
+ *pNewSampleInterval = 10;
+ return currentThreadCount;
+ }
+
+ //
+ // We've got enouugh data for our sample; reset our accumulators for next time.
+ //
+ m_accumulatedSampleDuration = 0;
+ m_accumulatedCompletionCount = 0;
+
+ //
+ // Add the current thread count and throughput sample to our history
+ //
+ double throughput = (double)numCompletions / sampleDuration;
+ FireEtwThreadPoolWorkerThreadAdjustmentSample(throughput, GetClrInstanceId());
+
+ int sampleIndex = m_totalSamples % m_samplesToMeasure;
+ m_samples[sampleIndex] = throughput;
+ m_threadCounts[sampleIndex] = currentThreadCount;
+ m_totalSamples++;
+
+ //
+ // Set up defaults for our metrics
+ //
+ Complex threadWaveComponent = 0;
+ Complex throughputWaveComponent = 0;
+ double throughputErrorEstimate = 0;
+ Complex ratio = 0;
+ double confidence = 0;
+
+ HillClimbingStateTransition transition = Warmup;
+
+ //
+ // How many samples will we use? It must be at least the three wave periods we're looking for, and it must also be a whole
+ // multiple of the primary wave's period; otherwise the frequency we're looking for will fall between two frequency bands
+ // in the Fourier analysis, and we won't be able to measure it accurately.
+ //
+ int sampleCount = ((int)min(m_totalSamples-1, m_samplesToMeasure) / m_wavePeriod) * m_wavePeriod;
+
+ if (sampleCount > m_wavePeriod)
+ {
+ //
+ // Average the throughput and thread count samples, so we can scale the wave magnitudes later.
+ //
+ double sampleSum = 0;
+ double threadSum = 0;
+ for (int i = 0; i < sampleCount; i++)
+ {
+ sampleSum += m_samples[(m_totalSamples - sampleCount + i) % m_samplesToMeasure];
+ threadSum += m_threadCounts[(m_totalSamples - sampleCount + i) % m_samplesToMeasure];
+ }
+ double averageThroughput = sampleSum / sampleCount;
+ double averageThreadCount = threadSum / sampleCount;
+
+ if (averageThroughput > 0 && averageThreadCount > 0)
+ {
+ //
+ // Calculate the periods of the adjacent frequency bands we'll be using to measure noise levels.
+ // We want the two adjacent Fourier frequency bands.
+ //
+ double adjacentPeriod1 = sampleCount / (((double)sampleCount / (double)m_wavePeriod) + 1);
+ double adjacentPeriod2 = sampleCount / (((double)sampleCount / (double)m_wavePeriod) - 1);
+
+ //
+ // Get the the three different frequency components of the throughput (scaled by average
+ // throughput). Our "error" estimate (the amount of noise that might be present in the
+ // frequency band we're really interested in) is the average of the adjacent bands.
+ //
+ throughputWaveComponent = GetWaveComponent(m_samples, sampleCount, m_wavePeriod) / averageThroughput;
+ throughputErrorEstimate = abs(GetWaveComponent(m_samples, sampleCount, adjacentPeriod1) / averageThroughput);
+ if (adjacentPeriod2 <= sampleCount)
+ throughputErrorEstimate = max(throughputErrorEstimate, abs(GetWaveComponent(m_samples, sampleCount, adjacentPeriod2) / averageThroughput));
+
+ //
+ // Do the same for the thread counts, so we have something to compare to. We don't measure thread count
+ // noise, because there is none; these are exact measurements.
+ //
+ threadWaveComponent = GetWaveComponent(m_threadCounts, sampleCount, m_wavePeriod) / averageThreadCount;
+
+ //
+ // Update our moving average of the throughput noise. We'll use this later as feedback to
+ // determine the new size of the thread wave.
+ //
+ if (m_averageThroughputNoise == 0)
+ m_averageThroughputNoise = throughputErrorEstimate;
+ else
+ m_averageThroughputNoise = (m_throughputErrorSmoothingFactor * throughputErrorEstimate) + ((1.0-m_throughputErrorSmoothingFactor) * m_averageThroughputNoise);
+
+ if (abs(threadWaveComponent) > 0)
+ {
+ //
+ // Adjust the throughput wave so it's centered around the target wave, and then calculate the adjusted throughput/thread ratio.
+ //
+ ratio = (throughputWaveComponent - (m_targetThroughputRatio * threadWaveComponent)) / threadWaveComponent;
+ transition = ClimbingMove;
+ }
+ else
+ {
+ ratio = 0;
+ transition = Stabilizing;
+ }
+
+ //
+ // Calculate how confident we are in the ratio. More noise == less confident. This has
+ // the effect of slowing down movements that might be affected by random noise.
+ //
+ double noiseForConfidence = max(m_averageThroughputNoise, throughputErrorEstimate);
+ if (noiseForConfidence > 0)
+ confidence = (abs(threadWaveComponent) / noiseForConfidence) / m_targetSignalToNoiseRatio;
+ else
+ confidence = 1.0; //there is no noise!
+
+ }
+ }
+
+ //
+ // We use just the real part of the complex ratio we just calculated. If the throughput signal
+ // is exactly in phase with the thread signal, this will be the same as taking the magnitude of
+ // the complex move and moving that far up. If they're 180 degrees out of phase, we'll move
+ // backward (because this indicates that our changes are having the opposite of the intended effect).
+ // If they're 90 degrees out of phase, we won't move at all, because we can't tell wether we're
+ // having a negative or positive effect on throughput.
+ //
+ double move = min(1.0, max(-1.0, ratio.r));
+
+ //
+ // Apply our confidence multiplier.
+ //
+ move *= min(1.0, max(0.0, confidence));
+
+ //
+ // Now apply non-linear gain, such that values around zero are attenuated, while higher values
+ // are enhanced. This allows us to move quickly if we're far away from the target, but more slowly
+ // if we're getting close, giving us rapid ramp-up without wild oscillations around the target.
+ //
+ double gain = m_maxChangePerSecond * sampleDuration;
+ move = pow(fabs(move), m_gainExponent) * (move >= 0.0 ? 1 : -1) * gain;
+ move = min(move, m_maxChangePerSample);
+
+ //
+ // If the result was positive, and CPU is > 95%, refuse the move.
+ //
+ if (move > 0.0 && ThreadpoolMgr::cpuUtilization > CpuUtilizationHigh)
+ move = 0.0;
+
+ //
+ // Apply the move to our control setting
+ //
+ m_currentControlSetting += move;
+
+ //
+ // Calculate the new thread wave magnitude, which is based on the moving average we've been keeping of
+ // the throughput error. This average starts at zero, so we'll start with a nice safe little wave at first.
+ //
+ int newThreadWaveMagnitude = (int)(0.5 + (m_currentControlSetting * m_averageThroughputNoise * m_targetSignalToNoiseRatio * m_threadMagnitudeMultiplier * 2.0));
+ newThreadWaveMagnitude = min(newThreadWaveMagnitude, m_maxThreadWaveMagnitude);
+ newThreadWaveMagnitude = max(newThreadWaveMagnitude, 1);
+
+ //
+ // Make sure our control setting is within the ThreadPool's limits
+ //
+ m_currentControlSetting = min(ThreadpoolMgr::MaxLimitTotalWorkerThreads-newThreadWaveMagnitude, m_currentControlSetting);
+ m_currentControlSetting = max(ThreadpoolMgr::MinLimitTotalWorkerThreads, m_currentControlSetting);
+
+ //
+ // Calculate the new thread count (control setting + square wave)
+ //
+ int newThreadCount = (int)(m_currentControlSetting + newThreadWaveMagnitude * ((m_totalSamples / (m_wavePeriod/2)) % 2));
+
+ //
+ // Make sure the new thread count doesn't exceed the ThreadPool's limits
+ //
+ newThreadCount = min(ThreadpoolMgr::MaxLimitTotalWorkerThreads, newThreadCount);
+ newThreadCount = max(ThreadpoolMgr::MinLimitTotalWorkerThreads, newThreadCount);
+
+ //
+ // Record these numbers for posterity
+ //
+ FireEtwThreadPoolWorkerThreadAdjustmentStats(
+ sampleDuration,
+ throughput,
+ threadWaveComponent.r,
+ throughputWaveComponent.r,
+ throughputErrorEstimate,
+ m_averageThroughputNoise,
+ ratio.r,
+ confidence,
+ m_currentControlSetting,
+ (unsigned short)newThreadWaveMagnitude,
+ GetClrInstanceId());
+
+ //
+ // If all of this caused an actual change in thread count, log that as well.
+ //
+ if (newThreadCount != currentThreadCount)
+ ChangeThreadCount(newThreadCount, transition);
+
+ //
+ // Return the new thread count and sample interval. This is randomized to prevent correlations with other periodic
+ // changes in throughput. Among other things, this prevents us from getting confused by Hill Climbing instances
+ // running in other processes.
+ //
+ // If we're at minThreads, and we seem to be hurting performance by going higher, we can't go any lower to fix this. So
+ // we'll simply stay at minThreads much longer, and only occasionally try a higher value.
+ //
+ if (ratio.r < 0.0 && newThreadCount == ThreadpoolMgr::MinLimitTotalWorkerThreads)
+ *pNewSampleInterval = (int)(0.5 + m_currentSampleInterval * (10.0 * max(-ratio.r, 1.0)));
+ else
+ *pNewSampleInterval = m_currentSampleInterval;
+
+ return newThreadCount;
+
+#endif //DACCESS_COMPILE
+}
+
+
+void HillClimbing::ForceChange(int newThreadCount, HillClimbingStateTransition transition)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (newThreadCount != m_lastThreadCount)
+ {
+ m_currentControlSetting += (newThreadCount - m_lastThreadCount);
+ ChangeThreadCount(newThreadCount, transition);
+ }
+}
+
+
+void HillClimbing::ChangeThreadCount(int newThreadCount, HillClimbingStateTransition transition)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_lastThreadCount = newThreadCount;
+ m_currentSampleInterval = m_randomIntervalGenerator.Next(m_sampleIntervalLow, m_sampleIntervalHigh+1);
+ double throughput = (m_elapsedSinceLastChange > 0) ? (m_completionsSinceLastChange / m_elapsedSinceLastChange) : 0;
+ LogTransition(newThreadCount, throughput, transition);
+ m_elapsedSinceLastChange = 0;
+ m_completionsSinceLastChange = 0;
+}
+
+
+GARY_IMPL(HillClimbingLogEntry, HillClimbingLog, HillClimbingLogCapacity);
+GVAL_IMPL(int, HillClimbingLogFirstIndex);
+GVAL_IMPL(int, HillClimbingLogSize);
+
+
+void HillClimbing::LogTransition(int threadCount, double throughput, HillClimbingStateTransition transition)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifndef DACCESS_COMPILE
+ int index = (HillClimbingLogFirstIndex + HillClimbingLogSize) % HillClimbingLogCapacity;
+
+ if (HillClimbingLogSize == HillClimbingLogCapacity)
+ {
+ HillClimbingLogFirstIndex = (HillClimbingLogFirstIndex + 1) % HillClimbingLogCapacity;
+ HillClimbingLogSize--; //hide this slot while we update it
+ }
+
+ HillClimbingLogEntry* entry = &HillClimbingLog[index];
+
+ entry->TickCount = GetTickCount();
+ entry->Transition = transition;
+ entry->NewControlSetting = threadCount;
+
+ entry->LastHistoryCount = (int)(min(m_totalSamples, m_samplesToMeasure) / m_wavePeriod) * m_wavePeriod;
+ entry->LastHistoryMean = (float) throughput;
+
+ HillClimbingLogSize++;
+
+ FireEtwThreadPoolWorkerThreadAdjustmentAdjustment(
+ throughput,
+ threadCount,
+ transition,
+ GetClrInstanceId());
+
+#endif //DACCESS_COMPILE
+}
+
+Complex HillClimbing::GetWaveComponent(double* samples, int sampleCount, double period)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(sampleCount >= period); //can't measure a wave that doesn't fit
+ _ASSERTE(period >= 2); //can't measure above the Nyquist frequency
+
+ //
+ // Calculate the sinusoid with the given period.
+ // We're using the Goertzel algorithm for this. See http://en.wikipedia.org/wiki/Goertzel_algorithm.
+ //
+ double w = 2.0 * pi / period;
+ double cosine = cos(w);
+ double sine = sin(w);
+ double coeff = 2.0 * cosine;
+ double q0 = 0, q1 = 0, q2 = 0;
+
+ for (int i = 0; i < sampleCount; i++)
+ {
+ double sample = samples[(m_totalSamples - sampleCount + i) % m_samplesToMeasure];
+
+ q0 = coeff * q1 - q2 + sample;
+ q2 = q1;
+ q1 = q0;
+ }
+
+ return Complex(q1 - q2 * cosine, q2 * sine) / (double)sampleCount;
+}
+
diff --git a/src/vm/hillclimbing.h b/src/vm/hillclimbing.h
new file mode 100644
index 0000000000..1523294b76
--- /dev/null
+++ b/src/vm/hillclimbing.h
@@ -0,0 +1,98 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//=========================================================================
+
+//
+// HillClimbing.h
+//
+// Defines classes for the ThreadPool's HillClimbing concurrency-optimization
+// algorithm.
+//
+
+//=========================================================================
+
+#ifndef _HILLCLIMBING_H
+#define _HILLCLIMBING_H
+
+#include "complex.h"
+#include "random.h"
+
+enum HillClimbingStateTransition
+{
+ Warmup,
+ Initializing,
+ RandomMove,
+ ClimbingMove,
+ ChangePoint,
+ Stabilizing,
+ Starvation, //used by ThreadpoolMgr
+ ThreadTimedOut, //used by ThreadpoolMgr
+ Undefined,
+};
+
+
+class HillClimbing
+{
+private:
+ int m_wavePeriod;
+ int m_samplesToMeasure;
+ double m_targetThroughputRatio;
+ double m_targetSignalToNoiseRatio;
+ double m_maxChangePerSecond;
+ double m_maxChangePerSample;
+ int m_maxThreadWaveMagnitude;
+ DWORD m_sampleIntervalLow;
+ double m_threadMagnitudeMultiplier;
+ DWORD m_sampleIntervalHigh;
+ double m_throughputErrorSmoothingFactor;
+ double m_gainExponent;
+ double m_maxSampleError;
+
+ double m_currentControlSetting;
+ LONGLONG m_totalSamples;
+ int m_lastThreadCount;
+ double m_elapsedSinceLastChange; //elapsed seconds since last thread count change
+ double m_completionsSinceLastChange; //number of completions since last thread count change
+
+ double m_averageThroughputNoise;
+
+ double* m_samples; //Circular buffer of the last m_samplesToMeasure samples
+ double* m_threadCounts; //Thread counts effective at each of m_samples
+
+ unsigned int m_currentSampleInterval;
+ CLRRandom m_randomIntervalGenerator;
+
+ int m_accumulatedCompletionCount;
+ double m_accumulatedSampleDuration;
+
+ void ChangeThreadCount(int newThreadCount, HillClimbingStateTransition transition);
+ void LogTransition(int threadCount, double throughput, HillClimbingStateTransition transition);
+
+ Complex GetWaveComponent(double* samples, int sampleCount, double period);
+
+public:
+ void Initialize();
+ int Update(int currentThreadCount, double sampleDuration, int numCompletions, int* pNewSampleInterval);
+ void ForceChange(int newThreadCount, HillClimbingStateTransition transition);
+};
+
+#define HillClimbingLogCapacity 200
+
+struct HillClimbingLogEntry
+{
+ DWORD TickCount;
+ HillClimbingStateTransition Transition;
+ int NewControlSetting;
+ int LastHistoryCount;
+ float LastHistoryMean;
+};
+
+GARY_DECL(HillClimbingLogEntry, HillClimbingLog, HillClimbingLogCapacity);
+GVAL_DECL(int, HillClimbingLogFirstIndex);
+GVAL_DECL(int, HillClimbingLogSize);
+typedef DPTR(HillClimbingLogEntry) PTR_HillClimbingLogEntry;
+
+#endif
diff --git a/src/vm/hostexecutioncontext.cpp b/src/vm/hostexecutioncontext.cpp
new file mode 100644
index 0000000000..e5f1dec4a3
--- /dev/null
+++ b/src/vm/hostexecutioncontext.cpp
@@ -0,0 +1,231 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#include "common.h"
+#ifdef FEATURE_CAS_POLICY
+
+#include "hostexecutioncontext.h"
+#include "corhost.h"
+#include "security.h"
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+IHostSecurityContext *HostExecutionContextManager::m_pRestrictedHostContext = NULL;
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+// initialize HostRestrictedContext
+void HostExecutionContextManager::InitializeRestrictedContext()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ _ASSERTE(m_pRestrictedHostContext == NULL);
+
+ IHostSecurityManager *pSM = CorHost2::GetHostSecurityManager();
+ if (pSM)
+ {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ pSM->GetSecurityContext(eRestrictedContext, &m_pRestrictedHostContext);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+}
+// notify the Host to SetRestrictedContext
+void HostExecutionContextManager::SetHostRestrictedContext()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if(m_pRestrictedHostContext != NULL)
+ {
+ IHostSecurityManager *pSM = CorHost2::GetHostSecurityManager();
+ if (pSM)
+ {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ pSM->SetSecurityContext(eRestrictedContext, m_pRestrictedHostContext);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+}
+
+FCIMPL0(FC_BOOL_RET, HostExecutionContextManager::HostPresent)
+{
+ FCALL_CONTRACT;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ FC_RETURN_BOOL(CorHost2::GetHostSecurityManager() != NULL);
+#else // !FEATURE_INCLUDE_ALL_INTERFACES
+ FC_RETURN_BOOL(FALSE);
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+}
+FCIMPLEND
+
+FCIMPL1(HRESULT, HostExecutionContextManager::ReleaseSecurityContext, LPVOID handle)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(handle));
+ } CONTRACTL_END;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL();
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostSecurityManager *pSM = CorHost2::GetHostSecurityManager();
+ if (pSM)
+ {
+ // get the IUnknown pointer from handle
+ IHostSecurityContext* pSecurityContext = (IHostSecurityContext*)handle;
+ // null out the IUnknown pointer in the handle
+ //hTokenSAFE->SetHandle((void*)NULL);
+ // release the IUnknown pointer if it is non null
+ if (pSecurityContext != NULL)
+ {
+ pSecurityContext->Release();
+ }
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ HELPER_METHOD_FRAME_END();
+ return S_OK;
+
+}
+FCIMPLEND
+
+FCIMPL1(HRESULT, HostExecutionContextManager::CaptureSecurityContext, SafeHandle* hTokenUNSAFE)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(hTokenUNSAFE));
+ } CONTRACTL_END;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostSecurityContext* pCurrentHostSecurityContext = NULL;
+ IHostSecurityContext* pCapturedSecurityContext = NULL;
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ HRESULT hr = S_OK;
+ SAFEHANDLE hTokenSAFE = (SAFEHANDLE) hTokenUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(hTokenSAFE);
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostSecurityManager *pSM = CorHost2::GetHostSecurityManager();
+ if (pSM)
+ {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pSM->GetSecurityContext(eCurrentContext, &pCurrentHostSecurityContext);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (hr == S_OK)
+ {
+ if(pCurrentHostSecurityContext != NULL)
+ {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pCurrentHostSecurityContext->Capture(&pCapturedSecurityContext);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ hTokenSAFE->SetHandle((void*)pCapturedSecurityContext);
+ SafeRelease(pCurrentHostSecurityContext);
+ }
+ }
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ if (FAILED(hr))
+ COMPlusThrowHR(hr);
+
+ HELPER_METHOD_FRAME_END();
+ return hr;
+
+}
+FCIMPLEND
+
+FCIMPL2(HRESULT, HostExecutionContextManager::CloneSecurityContext, SafeHandle* hTokenUNSAFE, SafeHandle* hTokenClonedUNSAFE)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(hTokenUNSAFE));
+ PRECONDITION(CheckPointer(hTokenClonedUNSAFE));
+ } CONTRACTL_END;
+
+ SAFEHANDLE hTokenClonedSAFE = (SAFEHANDLE) hTokenClonedUNSAFE;
+ SAFEHANDLE hTokenSAFE = (SAFEHANDLE)hTokenUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_2(hTokenSAFE, hTokenClonedSAFE);
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostSecurityManager *pSM = CorHost2::GetHostSecurityManager();
+ if (pSM)
+ {
+ IHostSecurityContext* pSecurityContext = (IHostSecurityContext*)hTokenSAFE->GetHandle();
+ if (pSecurityContext != NULL)
+ {
+ pSecurityContext->AddRef();
+ hTokenClonedSAFE->SetHandle((void*)pSecurityContext);
+ }
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ HELPER_METHOD_FRAME_END();
+ return S_OK;
+}
+FCIMPLEND
+
+FCIMPL3(HRESULT, HostExecutionContextManager::SetSecurityContext, SafeHandle* hTokenUNSAFE, CLR_BOOL fReturnPrevious, SafeHandle* hTokenPreviousUNSAFE)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(hTokenUNSAFE));
+ } CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ SAFEHANDLE hTokenPreviousSAFE = (SAFEHANDLE) hTokenPreviousUNSAFE;
+ SAFEHANDLE hTokenSAFE = (SAFEHANDLE) hTokenUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_2(hTokenSAFE, hTokenPreviousSAFE);
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostSecurityManager *pSM = CorHost2::GetHostSecurityManager();
+ if (pSM)
+ {
+ if (fReturnPrevious)
+ {
+ IHostSecurityContext* pPreviousHostSecurityContext = NULL;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pSM->GetSecurityContext(eCurrentContext, &pPreviousHostSecurityContext);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (FAILED(hr))
+ COMPlusThrowHR(hr);
+ // store the previous host context in the safe handle
+ hTokenPreviousSAFE->SetHandle((void*)pPreviousHostSecurityContext);
+ }
+
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pSM->SetSecurityContext(eCurrentContext, (IHostSecurityContext*)hTokenSAFE->GetHandle());
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (FAILED(hr))
+ COMPlusThrowHR(hr);
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ HELPER_METHOD_FRAME_END();
+ return hr;
+}
+FCIMPLEND
+#endif // #ifdef FEATURE_CAS_POLICY
+
diff --git a/src/vm/hostexecutioncontext.h b/src/vm/hostexecutioncontext.h
new file mode 100644
index 0000000000..d67c734755
--- /dev/null
+++ b/src/vm/hostexecutioncontext.h
@@ -0,0 +1,30 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#ifndef __hostexecutioncontext_h__
+#define __hostexecutioncontext_h__
+
+#ifdef FEATURE_CAS_POLICY
+
+class HostExecutionContextManager
+{
+public:
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ static IHostSecurityContext* m_pRestrictedHostContext;
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ static void InitializeRestrictedContext();
+ static void SetHostRestrictedContext();
+
+ static FCDECL0(FC_BOOL_RET, HostPresent);
+ static FCDECL1(HRESULT, ReleaseSecurityContext, LPVOID handle);
+ static FCDECL1(HRESULT, CaptureSecurityContext, SafeHandle* hTokenUNSAFE);
+ static FCDECL2(HRESULT, CloneSecurityContext, SafeHandle* hTokenUNSAFE, SafeHandle* hTokenClonedUNSAFE);
+ static FCDECL3(HRESULT, SetSecurityContext, SafeHandle* hTokenUNSAFE, CLR_BOOL fReturnPrevious, SafeHandle* hTokenPreviousUNSAFE);
+};
+#endif // #ifdef FEATURE_CAS_POLICY
+#endif // __hostexecutioncontext_h__
+
diff --git a/src/vm/hosting.cpp b/src/vm/hosting.cpp
new file mode 100644
index 0000000000..6003aefb32
--- /dev/null
+++ b/src/vm/hosting.cpp
@@ -0,0 +1,1906 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#include "common.h"
+
+#include "hosting.h"
+#include "mscoree.h"
+#include "mscoreepriv.h"
+#include "corhost.h"
+#include "threads.h"
+
+#if defined(FEATURE_CLICKONCE)
+#include "isolationpriv.h"
+#include "shlwapi.h"
+#endif
+
+#define countof(x) (sizeof(x) / sizeof(x[0]))
+
+//Copied from winbase.h
+#ifndef STARTF_TITLEISAPPID
+#define STARTF_TITLEISAPPID 0x00001000
+#endif
+#ifndef STARTF_PREVENTPINNING
+#define STARTF_PREVENTPINNING 0x00002000
+#endif
+
+//Flags encoded in the first parameter of CorLaunchApplication.
+#define MASK_NOTPINNABLE 0x80000000
+#define MASK_HOSTTYPE 0x00000003
+#define MASK_DONT_SHOW_INSTALL_DIALOG 0x00000100
+
+#ifdef _DEBUG
+// This function adds a static annotation read by SCAN to indicate HOST_CALLS. Its
+// purpose is to be called from the BEGIN_SO_TOLERANT_CODE_CALLING_HOST macro, to
+// effectively mark all functions that use BEGIN_SO_TOLERANT_CODE_CALLING_HOST as being
+// HOST_CALLS. If you hit a SCAN violation that references AddHostCallsStaticMarker, then
+// you have a function marked as HOST_NOCALLS that eventually calls into a function that
+// uses BEGIN_SO_TOLERANT_CODE_CALLING_HOST.
+DEBUG_NOINLINE void AddHostCallsStaticMarker()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_CANNOT_TAKE_LOCK;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_HOST_CALLS;
+
+ METHOD_CANNOT_BE_FOLDED_DEBUG;
+}
+#endif //_DEBUG
+
+//
+// memory management functions
+//
+
+// global debug only tracking utilities
+#ifdef _DEBUG
+
+static const LONG MaxGlobalAllocCount = 8;
+
+class GlobalAllocStore {
+public:
+ static void AddAlloc (LPVOID p)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (!p) {
+ return;
+ }
+ if (m_Disabled) {
+ return;
+ }
+
+ //InterlockedIncrement (&numMemWriter);
+ //if (CheckMemFree) {
+ // goto Return;
+ //}
+
+ //m_Count is number of allocation we've ever tried, it's OK to be bigger than
+ //size of m_Alloc[]
+ InterlockedIncrement (&m_Count);
+
+ //this is by no means an accurate record of heap allocation.
+ //the algorithm used here can't guarantee an allocation is saved in
+ //m_Alloc[] even there's enough free space. However this is only used
+ //for debugging purpose and most importantly, m_Count is accurate.
+ for (size_t n = 0; n < countof(m_Alloc); n ++) {
+ if (m_Alloc[n] == 0) {
+ if (InterlockedCompareExchangeT(&m_Alloc[n],p,0) == 0) {
+ return;
+ }
+ }
+ }
+
+ //InterlockedDecrement (&numMemWriter);
+ }
+
+ //this is called in non-host case where we don't care the free after
+ //alloc store is disabled
+ static BOOL RemoveAlloc (LPVOID p)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_Disabled)
+ {
+ return TRUE;
+ }
+ //decrement the counter even we might not find the allocation
+ //in m_Alloc. Because it's possible for an allocation not to be saved
+ //in the array
+ InterlockedDecrement (&m_Count);
+ // Binary search
+ for (size_t n = 0; n < countof(m_Alloc); n ++) {
+ if (m_Alloc[n] == p) {
+ m_Alloc[n] = 0;
+ return TRUE;
+ }
+ }
+ return FALSE;
+ }
+
+ //this is called in host case where if the store is disabled, we want to
+ //guarantee we don't try to free anything the host doesn't know about
+ static void ValidateFree(LPVOID p)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (p == 0) {
+ return;
+ }
+ if (m_Disabled) {
+ for (size_t n = 0; n < countof(m_Alloc); n ++) {
+ //there could be miss, because an allocation might not be saved
+ //in the array
+ if (m_Alloc[n] == p) {
+ _ASSERTE (!"Free a memory that host interface does not know");
+ return;
+ }
+ }
+ }
+ }
+
+ static void Validate()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_Count > MaxGlobalAllocCount) {
+ _ASSERTE (!"Using too many memory allocator before Host Interface is set up");
+ }
+
+ //while (numMemWriter != 0) {
+ // Sleep(5);
+ //}
+ //qsort (GlobalMemAddr, (MemAllocCount>MaxAllocCount)?MaxAllocCount:MemAllocCount, sizeof(LPVOID), MemAddrCompare);
+ }
+
+ static void Disable ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (!m_Disabled)
+ {
+ // Let all threads know
+ InterlockedIncrement((LONG*)&m_Disabled);
+ }
+ }
+
+private:
+ static BOOL m_Disabled;
+ static LPVOID m_Alloc[MaxGlobalAllocCount];
+ //m_Count is number of allocation we tried, it's legal to be bigger than
+ //size of m_Alloc[]
+ static LONG m_Count;
+ // static LONG numMemWriter = 0;
+};
+
+// used from corhost.cpp
+void ValidateHostInterface()
+{
+ WRAPPER_NO_CONTRACT;
+
+ GlobalAllocStore::Validate();
+ GlobalAllocStore::Disable();
+}
+
+void DisableGlobalAllocStore ()
+{
+ WRAPPER_NO_CONTRACT;
+ GlobalAllocStore::Disable();
+}
+LPVOID GlobalAllocStore::m_Alloc[MaxGlobalAllocCount];
+LONG GlobalAllocStore::m_Count = 0;
+BOOL GlobalAllocStore::m_Disabled = FALSE;
+
+#endif
+
+#if defined(_DEBUG) && !defined(FEATURE_CORECLR)
+// The helper thread can't call regular new / delete b/c of interop-debugging deadlocks.
+// It must use the (InteropSafe) heap from debugger.h, you also can't allocate normally
+// when we have any other thread hard-suspended.
+
+// Telesto doesn't support interop-debugging, so this won't be an issue.
+
+void AssertAllocationAllowed();
+#endif
+
+
+HANDLE g_ExecutableHeapHandle = NULL;
+
+#undef VirtualAlloc
+LPVOID EEVirtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect) {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifdef FAILPOINTS_ENABLED
+ if (RFS_HashStack ())
+ return NULL;
+#endif
+
+#if defined(_DEBUG) && !defined(FEATURE_CORECLR)
+ AssertAllocationAllowed();
+#endif
+
+#ifdef _DEBUG
+ if (g_fEEStarted) {
+ _ASSERTE (!EEAllocationDisallowed());
+ }
+ _ASSERTE (lpAddress || (dwSize % g_SystemInfo.dwAllocationGranularity) == 0);
+#endif
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostMemoryManager *pMM = CorHost2::GetHostMemoryManager();
+ if (pMM) {
+ LPVOID pMem;
+ EMemoryCriticalLevel eLevel = eTaskCritical;
+ if (!g_fEEStarted)
+ {
+ eLevel = eProcessCritical;
+ }
+ else
+ {
+ Thread *pThread = GetThread();
+ if (pThread && pThread->HasLockInCurrentDomain())
+ {
+ if (GetAppDomain()->IsDefaultDomain())
+ {
+ eLevel = eProcessCritical;
+ }
+ else
+ {
+ eLevel = eAppDomainCritical;
+ }
+ }
+ }
+ HRESULT hr = S_OK;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pMM->VirtualAlloc (lpAddress, dwSize, flAllocationType, flProtect, eLevel, &pMem);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+
+ if(hr != S_OK)
+ {
+ STRESS_LOG_OOM_STACK(dwSize);
+ }
+
+ return (hr == S_OK) ? pMem : NULL;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+
+ LPVOID p = NULL;
+
+#ifdef _DEBUG
+ {
+ DEBUG_ONLY_REGION();
+
+ if (lpAddress == NULL && (flAllocationType & MEM_RESERVE) != 0 && PEDecoder::GetForceRelocs())
+ {
+#ifdef _WIN64
+ // Try to allocate memory all over the place when we are stressing relocations on _WIN64.
+ // This will make sure that we generate jump stubs correctly among other things.
+ static BYTE* ptr = (BYTE*)0x234560000;
+ ptr += 0x123450000;
+ // Wrap around
+ if (ptr < (BYTE *)BOT_MEMORY || ptr > (BYTE *)TOP_MEMORY)
+ {
+ // Make sure to keep the alignment of the ptr so that we are not
+ // trying the same places over and over again
+ ptr = (BYTE*)BOT_MEMORY + (((SIZE_T)ptr) & 0xFFFFFFFF);
+ }
+ p = ::VirtualAlloc(ptr, dwSize, flAllocationType, flProtect);
+#else
+ // Allocate memory top to bottom to stress ngen fixups with LARGEADDRESSAWARE support.
+ p = ::VirtualAlloc(lpAddress, dwSize, flAllocationType | MEM_TOP_DOWN, flProtect);
+#endif // _WIN64
+ }
+ }
+#endif // _DEBUG
+
+ // Fall back to the default method if the forced relocation failed
+ if (p == NULL)
+ {
+ p = ::VirtualAlloc (lpAddress, dwSize, flAllocationType, flProtect);
+ }
+
+#ifdef _DEBUG
+ GlobalAllocStore::AddAlloc (p);
+#endif
+
+ if(p == NULL){
+ STRESS_LOG_OOM_STACK(dwSize);
+ }
+
+ return p;
+ }
+
+}
+#define VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect) Dont_Use_VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect)
+
+#undef VirtualFree
+BOOL EEVirtualFree(LPVOID lpAddress, SIZE_T dwSize, DWORD dwFreeType) {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ BOOL retVal = FALSE;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostMemoryManager *pMM = CorHost2::GetHostMemoryManager();
+ if (pMM) {
+#ifdef _DEBUG
+ GlobalAllocStore::ValidateFree(lpAddress);
+#endif
+
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ retVal = pMM->VirtualFree (lpAddress, dwSize, dwFreeType) == S_OK;
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+#ifdef _DEBUG
+ GlobalAllocStore::RemoveAlloc (lpAddress);
+#endif
+
+ retVal = (BOOL)(BYTE)::VirtualFree (lpAddress, dwSize, dwFreeType);
+ }
+
+ return retVal;
+}
+#define VirtualFree(lpAddress, dwSize, dwFreeType) Dont_Use_VirtualFree(lpAddress, dwSize, dwFreeType)
+
+#undef VirtualQuery
+SIZE_T EEVirtualQuery(LPCVOID lpAddress, PMEMORY_BASIC_INFORMATION lpBuffer, SIZE_T dwLength)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostMemoryManager *pMM = CorHost2::GetHostMemoryManager();
+ if (pMM) {
+ SIZE_T result;
+ HRESULT hr = S_OK;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pMM->VirtualQuery((void*)lpAddress, lpBuffer, dwLength, &result);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (FAILED(hr))
+ return 0;
+ return result;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ return ::VirtualQuery(lpAddress, lpBuffer, dwLength);
+ }
+}
+#define VirtualQuery(lpAddress, lpBuffer, dwLength) Dont_Use_VirtualQuery(lpAddress, lpBuffer, dwLength)
+
+#undef VirtualProtect
+BOOL EEVirtualProtect(LPVOID lpAddress, SIZE_T dwSize, DWORD flNewProtect, PDWORD lpflOldProtect)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostMemoryManager *pMM = CorHost2::GetHostMemoryManager();
+ if (pMM) {
+ BOOL result = FALSE;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ result = pMM->VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect) == S_OK;
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ return result;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ return ::VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect);
+ }
+}
+#define VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect) Dont_Use_VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect)
+
+#undef GetProcessHeap
+HANDLE EEGetProcessHeap()
+{
+ // Note: this can be called a little early for real contracts, so we use static contracts instead.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostMemoryManager *pMM = CorHost2::GetHostMemoryManager();
+ if (pMM) {
+ return (HANDLE)1; // pretending we return an handle is ok because handles are ignored by the hosting api
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ return GetProcessHeap();
+ }
+}
+#define GetProcessHeap() Dont_Use_GetProcessHeap()
+
+#undef HeapCreate
+HANDLE EEHeapCreate(DWORD flOptions, SIZE_T dwInitialSize, SIZE_T dwMaximumSize)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_PAL
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostMalloc *pHM = CorHost2::GetHostMalloc();
+ if (pHM)
+ {
+ return NULL;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ return ::HeapCreate(flOptions, dwInitialSize, dwMaximumSize);
+ }
+#else // !FEATURE_PAL
+ return NULL;
+#endif // !FEATURE_PAL
+}
+#define HeapCreate(flOptions, dwInitialSize, dwMaximumSize) Dont_Use_HeapCreate(flOptions, dwInitialSize, dwMaximumSize)
+
+#undef HeapDestroy
+BOOL EEHeapDestroy(HANDLE hHeap)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_PAL
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostMalloc *pHM = CorHost2::GetHostMalloc();
+ if (pHM)
+ {
+ return TRUE;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ return ::HeapDestroy(hHeap);
+ }
+#else // !FEATURE_PAL
+ UNREACHABLE();
+#endif // !FEATURE_PAL
+}
+#define HeapDestroy(hHeap) Dont_Use_HeapDestroy(hHeap)
+
+#ifdef _DEBUG
+#ifdef _TARGET_X86_
+#define OS_HEAP_ALIGN 8
+#else
+#define OS_HEAP_ALIGN 16
+#endif
+#endif
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+LPVOID EEHeapAllocHosted(IHostMalloc * pHM, SIZE_T dwBytes)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+ Thread * pThread = GetThreadNULLOk();
+ EMemoryCriticalLevel eLevel = eTaskCritical;
+ if (!g_fEEStarted)
+ {
+ eLevel = eProcessCritical;
+ }
+ else
+ {
+ if (pThread && pThread->HasLockInCurrentDomain())
+ {
+ if (GetAppDomain()->IsDefaultDomain())
+ {
+ eLevel = eProcessCritical;
+ }
+ else
+ {
+ eLevel = eAppDomainCritical;
+ }
+ }
+ }
+ LPVOID pMem = NULL;
+ HRESULT hr = S_OK;
+ {
+ CantAllocHolder caHolder;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(pThread);
+ hr = pHM->Alloc(dwBytes, eLevel, &pMem);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+
+ if(hr != S_OK
+ //under OOM, we might not be able to get Execution Engine and can't access stress log
+ && GetExecutionEngine ()
+ // If we have not created StressLog ring buffer, we should not try to use it.
+ // StressLog is going to do a memory allocation. We may enter an endless loop.
+ && ClrFlsGetValue(TlsIdx_StressLog) != NULL )
+ {
+ STRESS_LOG_OOM_STACK(dwBytes);
+ }
+
+ return (hr == S_OK) ? pMem : NULL;
+}
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+#undef HeapAlloc
+LPVOID EEHeapAlloc(HANDLE hHeap, DWORD dwFlags, SIZE_T dwBytes)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+#ifdef FAILPOINTS_ENABLED
+ if (RFS_HashStack ())
+ return NULL;
+#endif
+
+#if defined(_DEBUG) && !defined(FEATURE_CORECLR)
+ AssertAllocationAllowed();
+#endif
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostMalloc *pHM = CorHost2::GetHostMalloc();
+
+ // TODO: implement hosted executable heap
+ if (pHM && hHeap != g_ExecutableHeapHandle)
+ {
+ return EEHeapAllocHosted(pHM, dwBytes);
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+
+ LPVOID p = NULL;
+#ifdef _DEBUG
+ // Store the heap handle to detect heap contamination
+ p = ::HeapAlloc (hHeap, dwFlags, dwBytes + OS_HEAP_ALIGN);
+ if(p)
+ {
+ *((HANDLE*)p) = hHeap;
+ p = (BYTE*)p + OS_HEAP_ALIGN;
+ }
+ GlobalAllocStore::AddAlloc (p);
+#else
+ p = ::HeapAlloc (hHeap, dwFlags, dwBytes);
+#endif
+
+ if(p == NULL
+ //under OOM, we might not be able to get Execution Engine and can't access stress log
+ && GetExecutionEngine ()
+ // If we have not created StressLog ring buffer, we should not try to use it.
+ // StressLog is going to do a memory allocation. We may enter an endless loop.
+ && ClrFlsGetValue(TlsIdx_StressLog) != NULL )
+ {
+ STRESS_LOG_OOM_STACK(dwBytes);
+ }
+
+ return p;
+ }
+}
+#define HeapAlloc(hHeap, dwFlags, dwBytes) Dont_Use_HeapAlloc(hHeap, dwFlags, dwBytes)
+
+LPVOID EEHeapAllocInProcessHeap(DWORD dwFlags, SIZE_T dwBytes)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ static HANDLE ProcessHeap = NULL;
+
+ // We need to guarentee a very small stack consumption in allocating. And we can't allow
+ // an SO to happen while calling into the host. This will force a hard SO which is OK because
+ // we shouldn't ever get this close inside the EE in SO-intolerant code, so this should
+ // only fail if we call directly in from outside the EE, such as the JIT.
+ MINIMAL_STACK_PROBE_CHECK_THREAD(GetThread());
+
+ if (ProcessHeap == NULL)
+ ProcessHeap = EEGetProcessHeap();
+
+ return EEHeapAlloc(ProcessHeap,dwFlags,dwBytes);
+}
+
+#undef HeapFree
+BOOL EEHeapFree(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ // @todo - Need a backout validation here.
+ CONTRACT_VIOLATION(SOToleranceViolation);
+
+#if defined(_DEBUG) && !defined(FEATURE_CORECLR)
+ AssertAllocationAllowed();
+#endif
+
+ BOOL retVal = FALSE;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostMalloc *pHM = CorHost2::GetHostMalloc();
+
+ // TODO: implement hosted executable heap
+ if (pHM && hHeap != g_ExecutableHeapHandle)
+ {
+ if (lpMem == NULL) {
+ retVal = TRUE;
+ }
+#ifdef _DEBUG
+ GlobalAllocStore::ValidateFree(lpMem);
+#endif
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ retVal = pHM->Free(lpMem) == S_OK;
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+#ifdef _DEBUG
+ GlobalAllocStore::RemoveAlloc (lpMem);
+
+ // Check the heap handle to detect heap contamination
+ lpMem = (BYTE*)lpMem - OS_HEAP_ALIGN;
+ HANDLE storedHeapHandle = *((HANDLE*)lpMem);
+ if(storedHeapHandle != hHeap)
+ _ASSERTE(!"Heap contamination detected! HeapFree was called on a heap other than the one that memory was allocated from.\n"
+ "Possible cause: you used new (executable) to allocate the memory, but didn't use DeleteExecutable() to free it.");
+#endif
+ // DON'T REMOVE THIS SEEMINGLY USELESS CAST
+ //
+ // On AMD64 the OS HeapFree calls RtlFreeHeap which returns a 1byte
+ // BOOLEAN, HeapFree then doesn't correctly clean the return value
+ // so the other 3 bytes which come back can be junk and in that case
+ // this return value can never be false.
+ retVal = (BOOL)(BYTE)::HeapFree (hHeap, dwFlags, lpMem);
+ }
+
+ return retVal;
+}
+#define HeapFree(hHeap, dwFlags, lpMem) Dont_Use_HeapFree(hHeap, dwFlags, lpMem)
+
+BOOL EEHeapFreeInProcessHeap(DWORD dwFlags, LPVOID lpMem)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Take a look at comment in EEHeapFree and EEHeapAllocInProcessHeap, obviously someone
+ // needs to take a little time to think more about this code.
+ //CONTRACT_VIOLATION(SOToleranceViolation);
+
+ static HANDLE ProcessHeap = NULL;
+
+ if (ProcessHeap == NULL)
+ ProcessHeap = EEGetProcessHeap();
+
+ return EEHeapFree(ProcessHeap,dwFlags,lpMem);
+}
+
+
+#undef HeapValidate
+BOOL EEHeapValidate(HANDLE hHeap, DWORD dwFlags, LPCVOID lpMem) {
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+#ifndef FEATURE_PAL
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostMalloc *pHM = CorHost2::GetHostMalloc();
+ if (pHM)
+ {
+ return TRUE;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ return ::HeapValidate(hHeap, dwFlags, lpMem);
+ }
+#else // !FEATURE_PAL
+ return TRUE;
+#endif // !FEATURE_PAL
+}
+#define HeapValidate(hHeap, dwFlags, lpMem) Dont_Use_HeapValidate(hHeap, dwFlags, lpMem)
+
+HANDLE EEGetProcessExecutableHeap() {
+ // Note: this can be called a little early for real contracts, so we use static contracts instead.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+#ifdef FEATURE_CORECLR
+
+#ifndef FEATURE_PAL
+
+ //
+ // Create the executable heap lazily
+ //
+#undef HeapCreate
+#undef HeapDestroy
+ if (g_ExecutableHeapHandle == NULL)
+ {
+
+ HANDLE ExecutableHeapHandle = HeapCreate(
+ HEAP_CREATE_ENABLE_EXECUTE, // heap allocation attributes
+ 0, // initial heap size
+ 0 // maximum heap size; 0 == growable
+ );
+
+ if (ExecutableHeapHandle == NULL)
+ return NULL;
+
+ HANDLE ExistingValue = InterlockedCompareExchangeT(&g_ExecutableHeapHandle, ExecutableHeapHandle, NULL);
+ if (ExistingValue != NULL)
+ {
+ HeapDestroy(ExecutableHeapHandle);
+ }
+ }
+
+#define HeapCreate(flOptions, dwInitialSize, dwMaximumSize) Dont_Use_HeapCreate(flOptions, dwInitialSize, dwMaximumSize)
+#define HeapDestroy(hHeap) Dont_Use_HeapDestroy(hHeap)
+
+#else // !FEATURE_PAL
+ UNREACHABLE();
+#endif // !FEATURE_PAL
+
+#else // FEATURE_CORECLR
+
+ //
+ // Use process executable heap created by the shim
+ //
+ if (g_ExecutableHeapHandle == NULL)
+ {
+ extern HANDLE GetProcessExecutableHeap();
+ g_ExecutableHeapHandle = GetProcessExecutableHeap();
+ }
+
+#endif // FEATURE_CORECLR
+
+ // TODO: implement hosted executable heap
+ return g_ExecutableHeapHandle;
+}
+
+
+#undef SleepEx
+#undef Sleep
+DWORD EESleepEx(DWORD dwMilliseconds, BOOL bAlertable)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ DWORD res;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostTaskManager *provider = CorHost2::GetHostTaskManager();
+ if ((provider != NULL)){
+ DWORD option = 0;
+ if (bAlertable)
+ {
+ option = WAIT_ALERTABLE;
+ }
+
+
+ HRESULT hr;
+
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = provider->Sleep(dwMilliseconds, option);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+
+ if (hr == S_OK) {
+ res = WAIT_OBJECT_0;
+ }
+ else if (hr == HOST_E_INTERRUPTED) {
+ _ASSERTE(bAlertable);
+ Thread *pThread = GetThread();
+ if (pThread)
+ {
+ pThread->UserInterruptAPC(APC_Code);
+ }
+ res = WAIT_IO_COMPLETION;
+ }
+ else
+ {
+ _ASSERTE (!"Unknown return from host Sleep\n");
+ res = WAIT_OBJECT_0;
+ }
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ res = ::SleepEx(dwMilliseconds, bAlertable);
+ }
+
+ return res;
+}
+#define SleepEx(dwMilliseconds,bAlertable) \
+ Dont_Use_SleepEx(dwMilliseconds,bAlertable)
+#define Sleep(a) Dont_Use_Sleep(a)
+
+// non-zero return value if this function causes the OS to switch to another thread
+// See file:spinlock.h#SwitchToThreadSpinning for an explanation of dwSwitchCount
+BOOL __SwitchToThread (DWORD dwSleepMSec, DWORD dwSwitchCount)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ return __DangerousSwitchToThread(dwSleepMSec, dwSwitchCount, FALSE);
+}
+
+#undef SleepEx
+BOOL __DangerousSwitchToThread (DWORD dwSleepMSec, DWORD dwSwitchCount, BOOL goThroughOS)
+{
+ // If you sleep for a long time, the thread should be in Preemptive GC mode.
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(dwSleepMSec < 10000 || GetThread() == NULL || !GetThread()->PreemptiveGCDisabled());
+ }
+ CONTRACTL_END;
+
+ if (CLRTaskHosted())
+ {
+ Thread *pThread = GetThread();
+ if (pThread && pThread->HasThreadState(Thread::TS_YieldRequested))
+ {
+ pThread->ResetThreadState(Thread::TS_YieldRequested);
+ }
+ }
+
+ if (dwSleepMSec > 0)
+ {
+ // when called with goThroughOS make sure to not call into the host. This function
+ // may be called from GetRuntimeFunctionCallback() which is called by the OS to determine
+ // the personality routine when it needs to unwind managed code off the stack. when this
+ // happens in the context of an SO we want to avoid calling into the host
+ if (goThroughOS)
+ ::SleepEx(dwSleepMSec, FALSE);
+ else
+ ClrSleepEx(dwSleepMSec,FALSE);
+ return TRUE;
+ }
+
+ // In deciding when to insert sleeps, we wait until we have been spinning
+ // for a long time and then always sleep. The former is to let short perf-critical
+ // __SwitchToThread loops avoid context switches. The latter is to ensure
+ // that if many threads are spinning waiting for a lower-priority thread
+ // to run that they will eventually all be asleep at the same time.
+ //
+ // The specific values are derived from the NDP 2.0 SP1 fix: it waits for
+ // 8 million cycles of __SwitchToThread calls where each takes ~300-500,
+ // which means we should wait in the neighborhood of 25000 calls.
+ //
+ // As of early 2011, ARM CPUs are much slower, so we need a lower threshold.
+ // The following two values appear to yield roughly equivalent spin times
+ // on their respective platforms.
+ //
+#ifdef _TARGET_ARM_
+ #define SLEEP_START_THRESHOLD (5 * 1024)
+#else
+ #define SLEEP_START_THRESHOLD (32 * 1024)
+#endif
+
+ _ASSERTE(CALLER_LIMITS_SPINNING < SLEEP_START_THRESHOLD);
+ if (dwSwitchCount >= SLEEP_START_THRESHOLD)
+ {
+ if (goThroughOS)
+ ::SleepEx(1, FALSE);
+ else
+ ClrSleepEx(1, FALSE);
+ }
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostTaskManager *provider = CorHost2::GetHostTaskManager();
+ if ((provider != NULL) && (goThroughOS == FALSE))
+ {
+ DWORD option = 0;
+
+ HRESULT hr;
+
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = provider->SwitchToTask(option);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+
+ return hr == S_OK;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ return SwitchToThread();
+ }
+}
+#define SleepEx(dwMilliseconds,bAlertable) \
+ Dont_Use_SleepEx(dwMilliseconds,bAlertable)
+
+// Locking routines supplied by the EE to the other DLLs of the CLR. In a _DEBUG
+// build of the EE, we poison the Crst as a poor man's attempt to do some argument
+// validation.
+#define POISON_BITS 3
+
+static inline CRITSEC_COOKIE CrstToCookie(Crst * pCrst) {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE((((uintptr_t) pCrst) & POISON_BITS) == 0);
+#ifdef _DEBUG
+ if (pCrst)
+ {
+ pCrst = (Crst *) (((uintptr_t) pCrst) | POISON_BITS);
+ }
+#endif
+ return (CRITSEC_COOKIE) pCrst;
+}
+
+static inline Crst *CookieToCrst(CRITSEC_COOKIE cookie) {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE((((uintptr_t) cookie) & POISON_BITS) == POISON_BITS);
+#ifdef _DEBUG
+ cookie = (CRITSEC_COOKIE) (((uintptr_t) cookie) & ~POISON_BITS);
+#endif
+ return (Crst *) cookie;
+}
+
+CRITSEC_COOKIE EECreateCriticalSection(CrstType crstType, CrstFlags flags) {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ CRITSEC_COOKIE ret = NULL;
+
+ EX_TRY
+ {
+ // This may be controversial, but seems like the correct discipline. If the
+ // EE has called out to any other DLL of the CLR in cooperative mode, we
+ // arbitrarily force lock acquisition to occur in preemptive mode. See our
+ // treatment of AcquireLock below.
+ //_ASSERTE((flags & (CRST_UNSAFE_COOPGC | CRST_UNSAFE_ANYMODE)) == 0);
+ ret = CrstToCookie(new Crst(crstType, flags));
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ // Note: we'll return NULL if the create fails. That's a true NULL, not a poisoned NULL.
+ return ret;
+}
+
+void EEDeleteCriticalSection(CRITSEC_COOKIE cookie)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ WRAPPER(GC_NOTRIGGER);
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ VALIDATE_BACKOUT_STACK_CONSUMPTION;
+
+ Crst *pCrst = CookieToCrst(cookie);
+ _ASSERTE(pCrst);
+
+ delete pCrst;
+}
+
+DEBUG_NOINLINE void EEEnterCriticalSection(CRITSEC_COOKIE cookie) {
+
+ // Entering a critical section has many different contracts
+ // depending on the flags used to initialize the critical section.
+ // See CrstBase::Enter() for the actual contract. It's much too
+ // complex to repeat here.
+
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+
+ Crst *pCrst = CookieToCrst(cookie);
+ _ASSERTE(pCrst);
+
+ pCrst->Enter();
+}
+
+DEBUG_NOINLINE void EELeaveCriticalSection(CRITSEC_COOKIE cookie)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+
+ Crst *pCrst = CookieToCrst(cookie);
+ _ASSERTE(pCrst);
+
+ pCrst->Leave();
+}
+
+LPVOID EETlsGetValue(DWORD slot)
+{
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_CANNOT_TAKE_LOCK;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ //
+ // @todo: we don't want TlsGetValue to throw, but CheckThreadState throws right now. Either modify
+ // CheckThreadState to not throw, or catch any exception and just return NULL.
+ //
+ //CONTRACT_VIOLATION(ThrowsViolation);
+ SCAN_IGNORE_THROW;
+
+ void **pTlsData = CExecutionEngine::CheckThreadState(slot, FALSE);
+
+ if (pTlsData)
+ return pTlsData[slot];
+ else
+ return NULL;
+}
+
+BOOL EETlsCheckValue(DWORD slot, LPVOID * pValue)
+{
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ //
+ // @todo: we don't want TlsGetValue to throw, but CheckThreadState throws right now. Either modify
+ // CheckThreadState to not throw, or catch any exception and just return NULL.
+ //
+ //CONTRACT_VIOLATION(ThrowsViolation);
+ SCAN_IGNORE_THROW;
+
+ void **pTlsData = CExecutionEngine::CheckThreadState(slot, FALSE);
+
+ if (pTlsData)
+ {
+ *pValue = pTlsData[slot];
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+VOID EETlsSetValue(DWORD slot, LPVOID pData)
+{
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ void **pTlsData = CExecutionEngine::CheckThreadState(slot);
+
+ if (pTlsData) // Yes, CheckThreadState(slot, TRUE) can return NULL now.
+ {
+ pTlsData[slot] = pData;
+ }
+}
+
+BOOL EEAllocationDisallowed()
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef _DEBUG
+ // On Debug build we make sure that a thread is not going to do memory allocation
+ // after it suspends another thread, since the another thread may be suspended while
+ // having OS Heap lock.
+ return !Thread::Debug_AllowCallout();
+#else
+ return FALSE;
+#endif
+}
+
+#ifdef FEATURE_CLICKONCE
+
+HRESULT GetApplicationManifest (LPCWSTR pwzAppFullName,
+ DWORD dwManifestPaths,
+ LPCWSTR *ppwzManifestPaths,
+ __out_z __deref_out_opt LPWSTR *ppwzApplicationFolderPath,
+ __out_z __deref_out_opt LPWSTR *ppszKeyForm,
+ ICMS **ppApplicationManifest)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pwzAppFullName));
+ PRECONDITION(CheckPointer(ppwzManifestPaths, NULL_OK));
+ PRECONDITION(CheckPointer(ppApplicationManifest));
+ } CONTRACTL_END;
+
+ ReleaseHolder<IStore> pStore(NULL);
+ ReleaseHolder<IAppIdAuthority> pAppIdAuth(NULL);
+ ReleaseHolder<IDefinitionAppId> pDefinitionIdentity(NULL);
+ ReleaseHolder<IEnumDefinitionIdentity> pEnumDefinitionIdentity(NULL);
+ ReleaseHolder<IDefinitionIdentity> pDeploymentDefinitionIdentity(NULL);
+ ReleaseHolder<IDefinitionIdentity> pApplicationDefinitionIdentity(NULL);
+ ReleaseHolder<IDefinitionIdentity> pSubscriptionIdentity(NULL);
+ ReleaseHolder<IDefinitionAppId> pSubscriptionAppId(NULL);
+
+ ReleaseHolder<IUnknown> TempFetched(NULL);
+ HRESULT hr = S_OK;
+
+ // Maybe this is not an installed application. Grab the manifest path if specified and parse the manifest.
+ if (dwManifestPaths > 0) {
+ if (dwManifestPaths < 2)
+ goto ErrExit;
+
+ hr = ParseManifest(ppwzManifestPaths[1], NULL, __uuidof(ICMS), &TempFetched);
+ if (TempFetched == NULL)
+ {
+ goto ErrExit;
+ }
+
+ IfFailGo(TempFetched->QueryInterface(__uuidof(ICMS), (void**) ppApplicationManifest));
+ TempFetched.Release();
+
+ // Set the application directory to be the location of the application manifest.
+ if (ppwzApplicationFolderPath) {
+ LPCWSTR pszSlash;
+ if (((pszSlash = wcsrchr(ppwzManifestPaths[1], W('\\'))) != NULL) || ((pszSlash = wcsrchr(ppwzManifestPaths[1], W('/'))) != NULL)) {
+ DWORD cchDirectory = (DWORD) (pszSlash - ppwzManifestPaths[1] + 1);
+ *ppwzApplicationFolderPath = (LPWSTR) CoTaskMemAlloc(2 * (cchDirectory + 1));
+
+ if (*ppwzApplicationFolderPath == NULL)
+ {
+ hr = E_OUTOFMEMORY;
+ goto ErrExit;
+ }
+
+ memcpy(*ppwzApplicationFolderPath, ppwzManifestPaths[1], 2 * cchDirectory);
+ (*ppwzApplicationFolderPath)[cchDirectory] = W('\0');
+ }
+ }
+ goto ErrExit;
+ }
+
+ // Get the user store.
+ IfFailGo(GetUserStore(0, NULL, __uuidof(IStore), &pStore));
+
+ // Get the AppId authority
+ IfFailGo(GetAppIdAuthority(&pAppIdAuth));
+
+ // Get the IDefintionIdentity of the application full name passed in as an argument.
+ IfFailGo(pAppIdAuth->TextToDefinition(0, pwzAppFullName, &pDefinitionIdentity));
+
+ // Get the ICMS object representing the application manifest.
+ IfFailGo(pDefinitionIdentity->EnumAppPath(&pEnumDefinitionIdentity));
+ IfFailGo(pEnumDefinitionIdentity->Reset());
+ ULONG numItems = 0;
+ IfFailGo(pEnumDefinitionIdentity->Next(1, &pDeploymentDefinitionIdentity, &numItems));
+ if (numItems < 1) {
+ hr = HRESULT_FROM_WIN32(ERROR_INVALID_DATA);
+ goto ErrExit;
+ }
+ IfFailGo(pEnumDefinitionIdentity->Next(1, &pApplicationDefinitionIdentity, &numItems));
+ if (numItems < 1) {
+ hr = HRESULT_FROM_WIN32(ERROR_INVALID_DATA);
+ goto ErrExit;
+ }
+
+ if (ppszKeyForm){
+ // Create subscription identity from deployment identity.
+ IfFailGo(pDeploymentDefinitionIdentity->Clone(0,NULL,&pSubscriptionIdentity));
+ IfFailGo(pSubscriptionIdentity->SetAttribute(NULL,W("version"),NULL));
+
+ // Create the subscription app id.
+ IfFailGo(pAppIdAuth->CreateDefinition(&pSubscriptionAppId));
+
+ IDefinitionIdentity *defIdentityArray[1];
+ defIdentityArray[0] = pSubscriptionIdentity;
+
+ IfFailGo(pSubscriptionAppId->SetAppPath(1,defIdentityArray));
+ IfFailGo(pAppIdAuth->GenerateDefinitionKey(0,pSubscriptionAppId,ppszKeyForm));
+ }
+
+ hr = pStore->GetAssemblyInformation(0, pApplicationDefinitionIdentity, __uuidof(ICMS), &TempFetched);
+ if (SUCCEEDED(hr)) {
+ if (ppwzApplicationFolderPath) {
+ // Get the application folder path.
+ LPVOID cookie = NULL;
+ IfFailGo(pStore->LockApplicationPath(0, pDefinitionIdentity, &cookie, ppwzApplicationFolderPath));
+ IfFailGo(pStore->ReleaseApplicationPath(cookie));
+ }
+ }
+ IfFailGo(TempFetched->QueryInterface(__uuidof(ICMS), (void**) ppApplicationManifest));
+ TempFetched.Release();
+
+ErrExit:
+ pStore.Release();
+ pAppIdAuth.Release();
+ pDefinitionIdentity.Release();
+ pEnumDefinitionIdentity.Release();
+ pDeploymentDefinitionIdentity.Release();
+ pApplicationDefinitionIdentity.Release();
+ pSubscriptionIdentity.Release();
+ pSubscriptionAppId.Release();
+
+ return hr;
+}
+
+BOOL DoesMarkOfTheWebExist (LPCWSTR pwzAppFullName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pwzAppFullName));
+ } CONTRACTL_END;
+
+ HANDLE alternateStreamHandle = INVALID_HANDLE_VALUE;
+
+ StackSString alternateStreamPath(pwzAppFullName);
+ alternateStreamPath.Append(W(":Zone.Identifier"));
+
+ // Try to open alternate file stream
+ alternateStreamHandle = WszCreateFile(
+ alternateStreamPath.GetUnicode(),
+ GENERIC_READ,
+ FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+ NULL,
+ OPEN_EXISTING,
+ 0,
+ NULL);
+
+ if (INVALID_HANDLE_VALUE != alternateStreamHandle)
+ {
+ CloseHandle(alternateStreamHandle);
+
+ // We only check if MOTW (alternate stream) is present,
+ // no matter what the zone is.
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+HRESULT GetApplicationEntryPointInfo (LPCWSTR pwzAppFullName,
+ DWORD dwManifestPaths,
+ LPCWSTR *ppwzManifestPaths,
+ __out_z __deref_out_opt LPWSTR *ppwzApplicationFolderPath,
+ LPCWSTR *ppwzCodeBase,
+ LPCWSTR *ppwzParameters,
+ __out_z __deref_out_opt LPWSTR *ppwzProcessorArch,
+ __out_z __deref_out_opt LPWSTR *ppwzAppIdKeyForm)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pwzAppFullName));
+ PRECONDITION(CheckPointer(ppwzManifestPaths, NULL_OK));
+ PRECONDITION(CheckPointer(ppwzCodeBase, NULL_OK));
+ PRECONDITION(CheckPointer(ppwzParameters, NULL_OK));
+ PRECONDITION(CheckPointer(ppwzProcessorArch, NULL_OK));
+ } CONTRACTL_END;
+
+ ReleaseHolder<ICMS> pApplicationManifest(NULL);
+ ReleaseHolder<ISection> pEntrySection(NULL);
+ ReleaseHolder<IEnumUnknown> pEntryEnum(NULL);
+ ReleaseHolder<IEntryPointEntry> pEntry(NULL);
+ ReleaseHolder<IReferenceIdentity> pReferenceId(NULL);
+ ReleaseHolder<ISectionWithStringKey> pNamedRefSection(NULL);
+ ReleaseHolder<ISectionWithReferenceIdentityKey> pRefSection(NULL);
+ ReleaseHolder<IAssemblyReferenceEntry> pRefEntry(NULL);
+ ReleaseHolder<IAssemblyReferenceDependentAssemblyEntry> pDependentAssemblyEntry(NULL);
+ CoTaskMemHolder<WCHAR> pwszDependencyName = NULL;
+
+ ReleaseHolder<IUnknown> TempFetched(NULL);
+ ReleaseHolder<ISection> TempFetchedSection(NULL);
+ HRESULT hr = S_OK;
+
+ // Get the ICMS object representing the application manifest.
+ IfFailGo(GetApplicationManifest(pwzAppFullName, dwManifestPaths, ppwzManifestPaths, ppwzApplicationFolderPath, ppwzAppIdKeyForm,&pApplicationManifest));
+
+ // Get the app entry point section.
+ IfFailGo(pApplicationManifest->get_EntryPointSection(&pEntrySection));
+ if (pEntrySection == NULL) {
+ hr = HRESULT_FROM_WIN32(ERROR_INVALID_DATA);
+ goto ErrExit;
+ }
+
+ // Get the entry point enum.
+ IfFailGo(pEntrySection->get__NewEnum(&TempFetched));
+ IfFailGo(TempFetched->QueryInterface(__uuidof(IEnumUnknown), &pEntryEnum));
+ TempFetched.Release();
+
+ // Get the first entry point.
+ ULONG numItems = 0;
+ IfFailGo(pEntryEnum->Next(1, &TempFetched, &numItems));
+ if (numItems < 1) {
+ hr = HRESULT_FROM_WIN32(ERROR_INVALID_DATA);
+ goto ErrExit;
+ }
+ IfFailGo(TempFetched->QueryInterface(__uuidof(IEntryPointEntry), &pEntry));
+ TempFetched.Release();
+
+ // We support both name and identity based entry points.
+ IfFailGo(pEntry->get_Identity(&pReferenceId));
+ if (pReferenceId == NULL) {
+ hr = HRESULT_FROM_WIN32(ERROR_INVALID_DATA);
+ goto ErrExit;
+ }
+
+ // Get the assembly reference section.
+ IfFailGo(pApplicationManifest->get_AssemblyReferenceSection(&TempFetchedSection));
+ IfFailGo(TempFetchedSection->QueryInterface(__uuidof(ISectionWithReferenceIdentityKey), &pRefSection));
+ TempFetchedSection.Release();
+
+#ifdef CLICKONCE_LONGHORN_RELATED
+ //
+ // If a reference assembly matching entry point does not exist, use the codebase
+ // of command line file.
+ //
+ if (FAILED(pRefSection->Lookup(pReferenceId, &TempFetched)))
+ {
+ if (ppwzCodeBase) {
+ IfFailGo(pEntry->get_CommandLine_File(ppwzCodeBase));
+ }
+ }
+ else
+#endif
+ {
+ // Lookup the assembly reference entry.
+ IfFailGo(pRefSection->Lookup(pReferenceId, &TempFetched));
+ IfFailGo(TempFetched->QueryInterface(__uuidof(IAssemblyReferenceEntry), &pRefEntry));
+ TempFetched.Release();
+
+ // Get the assembly codebase. Codebase may either come from <dependentAssembly> or <installFrom>.
+ // In a valid reference there should always be a <dependentAssembly> section.
+ IfFailGo(pRefEntry->get_DependentAssembly(&pDependentAssemblyEntry));
+
+ if (ppwzCodeBase) {
+ IfFailGo(pDependentAssemblyEntry->get_Codebase(ppwzCodeBase));
+ }
+ }
+
+ // Get the parameters
+ if (ppwzParameters)
+ IfFailGo(pEntry->get_CommandLine_Parameters(ppwzParameters));
+
+ // Get the processor architecture requested in the app manifest
+ if (ppwzProcessorArch)
+ IfFailGo(pReferenceId->GetAttribute(NULL, W("processorArchitecture"), ppwzProcessorArch));
+
+ErrExit:
+ pApplicationManifest.Release();
+ pEntrySection.Release();
+ pEntryEnum.Release();
+ pEntry.Release();
+ pReferenceId.Release();
+ pNamedRefSection.Release();
+ pRefSection.Release();
+ pRefEntry.Release();
+ pDependentAssemblyEntry.Release();
+ pwszDependencyName.Release();
+
+ return hr;
+}
+
+//
+// Export used in the ClickOnce installer for launching manifest-based applications.
+//
+
+typedef struct _tagNameMap {
+ LPWSTR pwszProcessorArch;
+ DWORD dwRuntimeInfoFlag;
+} NAME_MAP;
+
+DWORD g_DfSvcSpinLock = 0;
+void EnterDfSvcSpinLock () {
+ WRAPPER_NO_CONTRACT;
+ while (1) {
+ if (InterlockedExchange ((LPLONG)&g_DfSvcSpinLock, 1) == 1)
+ ClrSleepEx (5, FALSE);
+ else
+ return;
+ }
+}
+
+void LeaveDfSvcSpinLock () {
+ InterlockedExchange ((LPLONG)&g_DfSvcSpinLock, 0);
+}
+
+//
+// ThreadProc used by SHCreateProcess call - to activate ClickOnce app with ShellExecuteEx
+// ShellExecuteEx can only be used from STA threads - we are creating our own STA thread
+//
+DWORD CorLaunchApplication_ThreadProc(void*)
+{
+ return 0;
+}
+
+//
+// This callback is executed as the sync-callback on SHCreateThread.
+// SHCreateThread does not return till this callback returns.
+//
+DWORD CorLaunchApplication_Callback(void* pv)
+{
+ SHELLEXECUTEINFO *pSei = static_cast<SHELLEXECUTEINFO *>(pv);
+ IUnknown* pDummyUnknown;
+ CreateStreamOnHGlobal(NULL, TRUE, (LPSTREAM*) &pDummyUnknown);
+
+ if (RunningOnWin8())
+ {
+ // When SEE_MASK_FLAG_HINST_IS_SITE is specified SHELLEXECUTEINFO.hInstApp is used as an
+ // _In_ parameter and specifies a IUnknown* to be used as a site pointer. The site pointer
+ // is used to provide services to shell execute, the handler binding process and the verb handlers
+ // once they are invoked.
+ //
+ // SEE_MASK_HINST_IS_SITE is available on Win8+
+ // Defining it locally in Win8-conditioned code
+ //
+ const ULONG SEE_MASK_HINST_IS_SITE = 0x08000000;
+
+ pSei->fMask = SEE_MASK_HINST_IS_SITE;
+ pSei->hInstApp = reinterpret_cast<HINSTANCE>(pDummyUnknown);
+ }
+
+ WszShellExecuteEx(pSei);
+ // We ignore all errors from ShellExecute.
+ //
+ // This may change with Win8:783168
+
+ if (pDummyUnknown)
+ {
+ pDummyUnknown->Release();
+ }
+
+ return 0;
+}
+
+//-----------------------------------------------------------------------------
+// WszSHCreateThread
+//
+// @func calls SHCreateThread with the provided parameters
+//
+// @rdesc Result
+//-----------------------------------------------------------------------------------
+HRESULT WszSHCreateThread(
+ LPTHREAD_START_ROUTINE pfnThreadProc,
+ void *pData,
+ SHCT_FLAGS dwFlags,
+ LPTHREAD_START_ROUTINE pfnCallback
+)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_PREEMPTIVE;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ HMODULE _hmodShlwapi = 0;
+
+ typedef BOOL (*PFNSHCREATETHREAD) (
+ __in LPTHREAD_START_ROUTINE pfnThreadProc,
+ __in_opt void *pData,
+ __in SHCT_FLAGS dwFlags,
+ __in_opt LPTHREAD_START_ROUTINE pfnCallback
+ );
+
+ static PFNSHCREATETHREAD pfnW = NULL;
+ if (NULL == pfnW)
+ {
+ _hmodShlwapi = CLRLoadLibrary(W("shlwapi.dll"));
+
+ if (_hmodShlwapi)
+ {
+ pfnW = (PFNSHCREATETHREAD)GetProcAddress(_hmodShlwapi, "SHCreateThread");
+ }
+ }
+
+ if (pfnW)
+ {
+ BOOL bRet = pfnW(pfnThreadProc, pData, dwFlags, pfnCallback);
+
+ if (!bRet)
+ {
+ hr = HRESULT_FROM_WIN32(GetLastError());
+ }
+ }
+ else
+ {
+ hr = HRESULT_FROM_WIN32(GetLastError());
+ }
+
+ // NOTE: We leak the module handles and let the OS gather them at process shutdown.
+
+ return hr;
+}
+
+STDAPI CorLaunchApplication (HOST_TYPE dwClickOnceHost,
+ LPCWSTR pwzAppFullName,
+ DWORD dwManifestPaths,
+ LPCWSTR *ppwzManifestPaths,
+ DWORD dwActivationData,
+ LPCWSTR *ppwzActivationData,
+ LPPROCESS_INFORMATION lpProcessInformation)
+{
+ // HostType is encoded in the lowest 2 bits.
+ unsigned hostType = dwClickOnceHost & MASK_HOSTTYPE;
+
+ // NoPinnableBit is the highest bit.
+ unsigned notPinnableBit = dwClickOnceHost & MASK_NOTPINNABLE;
+
+ // DontShowInstallDialog bit
+ unsigned dontShowInstallDialog = dwClickOnceHost & MASK_DONT_SHOW_INSTALL_DIALOG;
+
+ bool bUseShellExecute = false;
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ ENTRY_POINT;
+ PRECONDITION(CheckPointer(pwzAppFullName, NULL_OK));
+ PRECONDITION(CheckPointer(ppwzManifestPaths, NULL_OK));
+ PRECONDITION(CheckPointer(ppwzActivationData, NULL_OK));
+ PRECONDITION(hostType == HOST_TYPE_DEFAULT || hostType == HOST_TYPE_APPLAUNCH || hostType == HOST_TYPE_CORFLAG);
+ PRECONDITION(CheckPointer(lpProcessInformation));
+ } CONTRACTL_END;
+
+
+ if (pwzAppFullName == NULL)
+ return E_POINTER;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ LPVOID lpEnvironment = NULL;
+ EX_TRY
+ {
+ StackSString commandLine(StackSString::Ascii, "\""); // put quotes around path to the command line.
+ StackSString appEntryPath(W("")); // the path to the entry point(the exe to run) of the application, initialized to empty string
+ NewArrayHolder<WCHAR> wszDirectory(NULL);
+ NewArrayHolder<WCHAR> wszVersion(NULL);
+ CoTaskMemHolder<WCHAR> pwszApplicationFolderPath(NULL);
+ CoTaskMemHolder<WCHAR> pwszAppIdKeyForm(NULL);
+ CoTaskMemHolder<WCHAR> pwszCodebase(NULL);
+ CoTaskMemHolder<WCHAR> pwszParameters(NULL);
+ CoTaskMemHolder<WCHAR> pwszProcessorArch(NULL);
+
+ hr = GetApplicationEntryPointInfo(pwzAppFullName, dwManifestPaths, ppwzManifestPaths, (LPWSTR*) (void*) &pwszApplicationFolderPath, (LPCWSTR*) (void*) &pwszCodebase, (LPCWSTR*) (void*) &pwszParameters, (LPWSTR*) (void*) &pwszProcessorArch,(LPWSTR*) (void*) &pwszAppIdKeyForm);
+
+ if (SUCCEEDED(hr)) {
+ // construct the application Entry Path
+ if (pwszApplicationFolderPath != NULL) {
+ appEntryPath.Append(pwszApplicationFolderPath);
+ SString::CIterator i = appEntryPath.End()-1;
+ if (i[0] != '\\')
+ appEntryPath.Append(W("\\"));
+ }
+ appEntryPath.Append(pwszCodebase);
+
+ if (hostType == HOST_TYPE_CORFLAG) {
+ // construct the command line
+ commandLine.Append(appEntryPath);
+ commandLine.Append(W("\""));
+
+ if (RunningOnWin8() &&
+ DoesMarkOfTheWebExist(appEntryPath.GetUnicode()))
+ {
+ // We will use ShellExecute for any zone set in MOTW stream.
+ // ShellExecute would call Application Reputation API if the zone is the one
+ // that requires AppRep validation. At the moment, they would do this for Internet Zone only,
+ // but there are talks about changing the behavior to include some of the other zones.
+ // By not checking the zone here we leave to AppRep/ShellExecute to decide,
+ // which is exactly what we want.
+ bUseShellExecute = true;
+ }
+ else
+ {
+ if (pwszParameters != NULL) {
+ commandLine.Append(W(" "));
+ commandLine.Append(pwszParameters);
+ }
+ }
+
+ // now construct the environment variables
+ EnterDfSvcSpinLock();
+ WszSetEnvironmentVariable(g_pwzClickOnceEnv_FullName, pwzAppFullName);
+
+ if (dwManifestPaths > 0 && ppwzManifestPaths) {
+ for (DWORD i=0; i<dwManifestPaths; i++) {
+ StackSString manifestFile(g_pwzClickOnceEnv_Manifest);
+ StackSString buf;
+ COUNT_T size = buf.GetUnicodeAllocation();
+ _itow_s(i, buf.OpenUnicodeBuffer(size), size, 10);
+ buf.CloseBuffer();
+ manifestFile.Append(buf);
+ WszSetEnvironmentVariable(manifestFile.GetUnicode(), *ppwzManifestPaths++);
+ }
+ }
+
+ if (dwActivationData > 0 && ppwzActivationData) {
+ for (DWORD i=0; i<dwActivationData; i++) {
+ StackSString activationData(g_pwzClickOnceEnv_Parameter);
+ StackSString buf;
+ COUNT_T size = buf.GetUnicodeAllocation();
+ _itow_s(i, buf.OpenUnicodeBuffer(size), size, 10);
+ buf.CloseBuffer();
+ activationData.Append(buf);
+ WszSetEnvironmentVariable(activationData.GetUnicode(), *ppwzActivationData++);
+ }
+ }
+
+#undef GetEnvironmentStrings
+#undef GetEnvironmentStringsW
+ lpEnvironment = (LPVOID) GetEnvironmentStringsW();
+#define GetEnvironmentStringsW() Use_WszGetEnvironmentStrings()
+#define GetEnvironmentStrings() Use_WszGetEnvironmentStrings()
+ } else {
+ // application folder is required to determine appEntryPath for framework version selection,
+ // but should not be used as working directory for partial trust apps
+ pwszApplicationFolderPath.Clear();
+
+ // find the architecture from manifest and required version from the application itself
+ static const NAME_MAP g_NameMapArray[] = {
+ {W("x86"), RUNTIME_INFO_REQUEST_X86},
+ {W("ia64"), RUNTIME_INFO_REQUEST_IA64},
+ {W("amd64"), RUNTIME_INFO_REQUEST_AMD64},
+ };
+
+ DWORD dwRuntimeInfoFlags = RUNTIME_INFO_UPGRADE_VERSION |
+ RUNTIME_INFO_CONSIDER_POST_2_0 |
+ RUNTIME_INFO_EMULATE_EXE_LAUNCH;
+
+ // We want to control whether shim should show install dialog or not,
+ // and not leave this decision to the Shim.
+ if (dontShowInstallDialog > 0)
+ {
+ dwRuntimeInfoFlags |= RUNTIME_INFO_DONT_SHOW_ERROR_DIALOG;
+ }
+ else
+ {
+ // show even if SEM_CRITICAL is set
+ dwRuntimeInfoFlags |= METAHOST_POLICY_IGNORE_ERROR_MODE;
+ }
+
+ if (pwszProcessorArch) {
+ for (DWORD index = 0; index < sizeof(g_NameMapArray) / sizeof(NAME_MAP); index++) {
+ if (SString::_wcsicmp(g_NameMapArray[index].pwszProcessorArch, pwszProcessorArch) == 0) {
+ dwRuntimeInfoFlags |= g_NameMapArray[index].dwRuntimeInfoFlag;
+ break;
+ }
+ }
+ }
+ wszDirectory = new WCHAR[MAX_PATH + 1];
+ wszVersion = new WCHAR[MAX_PATH + 1];
+ wszVersion[0] = 0; // we don't prefer any version
+ DWORD cchBuffer = MAX_PATH;
+
+ // Use GetRequestedRuntimeInfo because MetaHost APIs do not yet support architecture arguments.
+ // Calls to GetRequestedRuntimeInfo() will goes to a local copy inside clr.dll,
+ // have to call mscoree::GetRequestedRuntimeInfo.
+ typedef HRESULT (*PFNGetRequestedRuntimeInfo)(LPCWSTR pExe,
+ LPCWSTR pwszVersion,
+ LPCWSTR pConfigurationFile,
+ DWORD startupFlags,
+ DWORD runtimeInfoFlags,
+ LPWSTR pDirectory,
+ DWORD dwDirectory,
+ DWORD *dwDirectoryLength,
+ LPWSTR pVersion,
+ DWORD cchBuffer,
+ DWORD* dwlength);
+ PFNGetRequestedRuntimeInfo pfnGetRequestedRuntimeInfo = NULL;
+ HMODULE hMscoree = GetModuleHandleW( W("mscoree.dll") ); // mscoree.dll should have already been loaded
+ if( hMscoree != NULL )
+ pfnGetRequestedRuntimeInfo = (PFNGetRequestedRuntimeInfo)GetProcAddress( hMscoree, "GetRequestedRuntimeInfo" );
+ if( pfnGetRequestedRuntimeInfo == NULL )
+ pfnGetRequestedRuntimeInfo = GetRequestedRuntimeInfoInternal; // in case mscoree has not been loaded, use the built in function
+ hr = pfnGetRequestedRuntimeInfo(appEntryPath.GetUnicode(), // Use the image path to guide all version binding
+ NULL, // Do not prime with any preferred version
+ NULL, // No explicit config file - pick up on one next to image if there.
+ 0, // startupFlags
+ dwRuntimeInfoFlags, // Will bind to post-v2 runtimes if EXE PE runtime version is post-v2
+ // or EXE has config file binding to post-v2 runtime.
+ wszDirectory, MAX_PATH, NULL, // Retrieve bound directory
+ wszVersion, MAX_PATH, NULL); // Retrieve bound version
+
+ if (SUCCEEDED(hr)) {
+ commandLine.Append(wszDirectory);
+ commandLine.Append(wszVersion);
+ commandLine.Append(W("\\applaunch.exe"));
+ commandLine.Append(W("\" /activate \""));
+ commandLine.Append(pwzAppFullName);
+ commandLine.Append(W("\" "));
+
+ if (dwManifestPaths > 0 && ppwzManifestPaths) {
+ commandLine.Append(W("/manifests "));
+ for (DWORD i=0; i<dwManifestPaths; i++) {
+ commandLine.Append(W("\""));
+ commandLine.Append(*ppwzManifestPaths++);
+ commandLine.Append(W("\" "));
+ }
+ }
+
+ if (dwActivationData > 0 && ppwzActivationData) {
+ commandLine.Append(W("/parameters "));
+ for (DWORD i=0; i<dwActivationData; i++) {
+ commandLine.Append(W("\""));
+ commandLine.Append(*ppwzActivationData++);
+ commandLine.Append(W("\" "));
+ }
+ }
+ }
+ }
+ }
+
+ if (SUCCEEDED(hr)) {
+ // CreateProcess won't let this parameter be const
+ // (it writes a NULL in the middle), so we create a writable version
+ LPCWSTR wszCommandLineNonWritable = commandLine.GetUnicode();
+ size_t len = wcslen(wszCommandLineNonWritable);
+ NewArrayHolder<WCHAR> wszCommandLine(new WCHAR[len + 1]);
+ memcpy(wszCommandLine, wszCommandLineNonWritable, len * sizeof(WCHAR));
+ wszCommandLine[len] = W('\0');
+
+ STARTUPINFO sui;
+ memset(&sui, 0, sizeof(STARTUPINFO));
+ sui.cb = sizeof(STARTUPINFO);
+ sui.lpTitle = pwszAppIdKeyForm;
+ sui.dwFlags = STARTF_TITLEISAPPID;
+
+ if (notPinnableBit>0)
+ sui.dwFlags |= STARTF_PREVENTPINNING;
+
+ // ClickOnce uses ShellExecute to utilize Win8+ Application Reputation service.
+ // Application Reputation validates applications coming from the Internet.
+ // ClickOnce will use ShellExecute only if there is a Mark-of-the-Web file-stream for the executable.
+ // In all other cases we continue to use CreateProcess. CreateProcess does not use AppRep service.
+ if (bUseShellExecute)
+ {
+ SHELLEXECUTEINFO sei;
+ memset(&sei, 0, sizeof(SHELLEXECUTEINFO));
+ sei.cbSize = sizeof(SHELLEXECUTEINFO);
+ sei.hwnd = NULL;
+ sei.lpVerb = NULL;
+ sei.lpFile = wszCommandLine;
+ sei.lpParameters = pwszParameters;
+ sei.lpDirectory = pwszApplicationFolderPath;
+ sei.nShow = SW_SHOWDEFAULT;
+ sei.hInstApp = NULL;
+
+ // Application Reputation is a COM Shell Extension that requires a calling thread to be an STA
+ // CorLaunchApplication_Callback calls ShellExecuteEx.
+ hr = WszSHCreateThread((LPTHREAD_START_ROUTINE) CorLaunchApplication_ThreadProc, &sei, CTF_COINIT_STA,
+ (LPTHREAD_START_ROUTINE) CorLaunchApplication_Callback);
+ }
+ else
+ {
+ // Launch the child process
+ BOOL result = WszCreateProcess(NULL,
+ wszCommandLine,
+ NULL, NULL, FALSE,
+ (lpEnvironment) ? NORMAL_PRIORITY_CLASS | CREATE_UNICODE_ENVIRONMENT | CREATE_DEFAULT_ERROR_MODE : NORMAL_PRIORITY_CLASS | CREATE_DEFAULT_ERROR_MODE,
+ lpEnvironment, pwszApplicationFolderPath,
+ &sui, lpProcessInformation);
+ if (!result)
+ hr = HRESULT_FROM_GetLastError();
+ }
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ // cleanup
+ if (hostType == HOST_TYPE_CORFLAG) {
+ // free the environment block
+#undef FreeEnvironmentStringsA
+#undef FreeEnvironmentStringsW
+ if (NULL != lpEnvironment) {
+ FreeEnvironmentStringsW((LPWSTR) lpEnvironment);
+ }
+#define FreeEnvironmentStringsW(lpEnvironment) Use_WszFreeEnvironmentStrings(lpEnvironment)
+#define FreeEnvironmentStringsA(lpEnvironment) Use_WszFreeEnvironmentStrings(lpEnvironment)
+ // reset the environment variables
+ WszSetEnvironmentVariable(g_pwzClickOnceEnv_FullName, NULL);
+ EX_TRY
+ {
+ if (dwManifestPaths > 0 && ppwzManifestPaths) {
+ for (DWORD i=0; i<dwManifestPaths; i++) {
+ StackSString manifestFile(g_pwzClickOnceEnv_Manifest);
+ StackSString buf;
+ COUNT_T size = buf.GetUnicodeAllocation();
+ _itow_s(i, buf.OpenUnicodeBuffer(size), size, 10);
+ buf.CloseBuffer();
+ manifestFile.Append(buf);
+ WszSetEnvironmentVariable(manifestFile.GetUnicode(), NULL);
+ }
+ }
+ if (dwActivationData > 0 && ppwzActivationData) {
+ for (DWORD i=0; i<dwActivationData; i++) {
+ StackSString activationData(g_pwzClickOnceEnv_Parameter);
+ StackSString buf;
+ COUNT_T size = buf.GetUnicodeAllocation();
+ _itow_s(i, buf.OpenUnicodeBuffer(size), size, 10);
+ buf.CloseBuffer();
+ activationData.Append(buf);
+ WszSetEnvironmentVariable(activationData.GetUnicode(), NULL);
+ }
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+ // leave the spin lock so other requests can be served.
+ LeaveDfSvcSpinLock();
+ }
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+}
+#endif // FEATURE_CLICKONCE
diff --git a/src/vm/hosting.h b/src/vm/hosting.h
new file mode 100644
index 0000000000..ea43d0f081
--- /dev/null
+++ b/src/vm/hosting.h
@@ -0,0 +1,65 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#ifndef __HOSTING_H__
+#define __HOSTING_H__
+
+#include "clrhost.h"
+
+#define ClrVirtualAlloc EEVirtualAlloc
+#define ClrVirtualFree EEVirtualFree
+#define ClrVirtualQuery EEVirtualQuery
+#define ClrVirtualProtect EEVirtualProtect
+#define ClrHeapCreate EEHeapCreate
+#define ClrHeapDestroy EEHeapDestroy
+#define ClrHeapAlloc EEHeapAlloc
+#define ClrHeapFree EEHeapFree
+#define ClrHeapValidate EEHeapValidate
+#define ClrCreateCriticalSection EECreateCriticalSection
+#define ClrDestroyCriticalSection EEDestroyCriticalSection
+#define ClrEnterCriticalSection EEEnterCriticalSection
+#define ClrLeaveCriticalSection EELeaveCriticalSection
+#define ClrSleepEx EESleepEx
+#define ClrTlsSetValue EETlsSetValue
+#define ClrTlsGetValue EETlsGetValue
+
+#define ClrAllocationDisallowed EEAllocationDisallowed
+
+// memory management function
+LPVOID EEVirtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect);
+BOOL EEVirtualFree(LPVOID lpAddress, SIZE_T dwSize, DWORD dwFreeType);
+SIZE_T EEVirtualQuery(LPCVOID lpAddress, PMEMORY_BASIC_INFORMATION lpBuffer, SIZE_T dwLength);
+BOOL EEVirtualProtect(LPVOID lpAddress, SIZE_T dwSize, DWORD flNewProtect, PDWORD lpflOldProtect);
+HANDLE EEGetProcessHeap();
+HANDLE EEHeapCreate(DWORD flOptions, SIZE_T dwInitialSize, SIZE_T dwMaximumSize);
+BOOL EEHeapDestroy(HANDLE hHeap);
+LPVOID EEHeapAlloc(HANDLE hHeap, DWORD dwFlags, SIZE_T dwBytes);
+BOOL EEHeapFree(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem);
+BOOL EEHeapValidate(HANDLE hHeap, DWORD dwFlags, LPCVOID lpMem);
+
+BOOL EEAllocationDisallowed();
+HANDLE EEGetProcessExecutableHeap();
+
+// critical section functions
+CRITSEC_COOKIE EECreateCriticalSection(CrstType crstType, CrstFlags flags);
+void EEDeleteCriticalSection(CRITSEC_COOKIE cookie);
+void EEEnterCriticalSection(CRITSEC_COOKIE cookie);
+void EELeaveCriticalSection(CRITSEC_COOKIE cookie);
+
+DWORD EESleepEx(DWORD dwMilliseconds, BOOL bAlertable);
+
+// TLS functions
+LPVOID EETlsGetValue(DWORD slot);
+BOOL EETlsCheckValue(DWORD slot, LPVOID * pValue);
+VOID EETlsSetValue(DWORD slot, LPVOID pData);
+
+
+
+#endif
+
diff --git a/src/vm/i386/.gitmirror b/src/vm/i386/.gitmirror
new file mode 100644
index 0000000000..f507630f94
--- /dev/null
+++ b/src/vm/i386/.gitmirror
@@ -0,0 +1 @@
+Only contents of this folder, excluding subfolders, will be mirrored by the Git-TFS Mirror. \ No newline at end of file
diff --git a/src/vm/i386/CLRErrorReporting.vrg b/src/vm/i386/CLRErrorReporting.vrg
new file mode 100644
index 0000000000..6e45ba967c
--- /dev/null
+++ b/src/vm/i386/CLRErrorReporting.vrg
@@ -0,0 +1,5 @@
+VSREG 7
+
+[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\Eventlog\Application\.NET Runtime 4.0 Error Reporting]
+"EventMessageFile"="[DWFolder.D0DF3458_A845_11D3_8D0A_0050046416B9]DW20.EXE"
+"TypesSupported"=dword:00000007
diff --git a/src/vm/i386/RedirectedHandledJITCase.asm b/src/vm/i386/RedirectedHandledJITCase.asm
new file mode 100644
index 0000000000..90d4519b19
--- /dev/null
+++ b/src/vm/i386/RedirectedHandledJITCase.asm
@@ -0,0 +1,137 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;
+
+;
+; ==--==
+; ***********************************************************************
+; File: RedirectedHandledJITCase.asm
+;
+; ***********************************************************************
+;
+
+; This contains thread-redirecting helper routines that are 100% x86 assembly
+
+ .586
+ .model flat
+
+ include asmconstants.inc
+
+ option casemap:none
+ .code
+
+EXTERN _GetCurrentSavedRedirectContext@0:PROC
+
+;
+; WARNING!! These functions immediately ruin thread unwindability. This is
+; WARNING!! OK as long as there is a mechanism for saving the thread context
+; WARNING!! prior to running these functions as well as a mechanism for
+; WARNING!! restoring the context prior to any stackwalk. This means that
+; WARNING!! we need to ensure that no GC can occur while the stack is
+; WARNING!! unwalkable. This further means that we cannot allow any exception
+; WARNING!! to occure when the stack is unwalkable
+;
+
+
+; If you edit this macro, make sure you update GetCONTEXTFromRedirectedStubStackFrame.
+; This function is used by both the personality routine and the debugger to retrieve the original CONTEXT.
+GenerateRedirectedHandledJITCaseStub MACRO reason
+
+EXTERN ?RedirectedHandledJITCaseFor&reason&@Thread@@CGXXZ:proc
+
+ ALIGN 4
+_RedirectedHandledJITCaseFor&reason&_Stub@0 PROC PUBLIC
+
+ push eax ; where to stuff the fake return address
+ push ebp ; save interrupted ebp for stack walk
+ mov ebp, esp
+ sub esp, 4 ; stack slot to save the CONTEXT *
+
+ ;
+ ; Save a copy of the redirect CONTEXT*.
+ ; This is needed for the debugger to unwind the stack.
+ ;
+ call _GetCurrentSavedRedirectContext@0
+
+ mov [ebp-4], eax
+.errnz REDIRECTSTUB_EBP_OFFSET_CONTEXT + 4, REDIRECTSTUB_EBP_OFFSET_CONTEXT has changed - update asm stubs
+
+ ;
+ ; Fetch the interrupted eip and save it as our return address.
+ ;
+ mov eax, [eax + CONTEXT_Eip]
+ mov [ebp+4], eax
+
+ ;
+ ; Call target, which will do whatever we needed to do in the context
+ ; of the target thread, and will RtlRestoreContext when it is done.
+ ;
+ call ?RedirectedHandledJITCaseFor&reason&@Thread@@CGXXZ
+
+ int 3 ; target shouldn't return.
+
+; Put a label here to tell the debugger where the end of this function is.
+PUBLIC _RedirectedHandledJITCaseFor&reason&_StubEnd@0
+_RedirectedHandledJITCaseFor&reason&_StubEnd@0:
+
+_RedirectedHandledJITCaseFor&reason&_Stub@0 ENDP
+
+ENDM
+
+; HijackFunctionStart and HijackFunctionEnd are used to tell BBT to keep the hijacking functions together.
+; Debugger uses range to check whether IP falls into one of them (see code:Debugger::s_hijackFunction).
+
+_HijackFunctionStart@0 proc public
+ret
+_HijackFunctionStart@0 endp
+
+GenerateRedirectedHandledJITCaseStub <GCThreadControl>
+GenerateRedirectedHandledJITCaseStub <DbgThreadControl>
+GenerateRedirectedHandledJITCaseStub <UserSuspend>
+GenerateRedirectedHandledJITCaseStub <YieldTask>
+
+; Hijack for exceptions.
+; This can be used to hijack at a 2nd-chance exception and execute the UEF
+
+EXTERN _ExceptionHijackWorker@16:PROC
+
+_ExceptionHijack@0 PROC PUBLIC
+
+ ; This is where we land when we're hijacked from an IP by the debugger.
+ ; The debugger has already pushed the args:
+ ; - a CONTEXT
+ ; - a EXCEPTION_RECORD onto the stack
+ ; - an DWORD to use to mulitplex the hijack
+ ; - an arbitrary void* data parameter
+ call _ExceptionHijackWorker@16
+
+ ; Don't expect to return from here. Debugger will unhijack us. It has the full
+ ; context and can properly restore us.
+ int 3
+
+; Put a label here to tell the debugger where the end of this function is.
+public _ExceptionHijackEnd@0
+_ExceptionHijackEnd@0:
+
+_ExceptionHijack@0 ENDP
+
+; It is very important to have a dummy function here.
+; Without it, the image has two labels without any instruction in between:
+; One for the last label in this function, and one for the first function in the image following this asm file.
+; Then the linker is free to remove from PDB the function symbol for the function
+; immediately following this, and replace the reference with the last label in this file.
+; When this happens, BBT loses info about function, moves pieces within the function to random place, and generates bad code.
+_HijackFunctionLast@0 proc public
+ret
+_HijackFunctionLast@0 endp
+
+; This is the first function outside the "keep together range". Used by BBT scripts.
+_HijackFunctionEnd@0 proc public
+ret
+_HijackFunctionEnd@0 endp
+
+END
diff --git a/src/vm/i386/asmconstants.h b/src/vm/i386/asmconstants.h
new file mode 100644
index 0000000000..748569a8a9
--- /dev/null
+++ b/src/vm/i386/asmconstants.h
@@ -0,0 +1,491 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// asmconstants.h -
+//
+// This header defines field offsets and constants used by assembly code
+// Be sure to rebuild clr/src/vm/ceemain.cpp after changing this file, to
+// ensure that the constants match the expected C/C++ values
+
+//
+// If you need to figure out a constant that has changed and is causing
+// a compile-time assert, check out USE_COMPILE_TIME_CONSTANT_FINDER.
+// TODO: put the constant finder in a common place so other platforms can use it.
+
+#ifndef _TARGET_X86_
+#error this file should only be used on an X86 platform
+#endif
+
+#include "../../inc/switches.h"
+
+#ifndef ASMCONSTANTS_C_ASSERT
+#define ASMCONSTANTS_C_ASSERT(cond)
+#endif
+
+#ifndef ASMCONSTANTS_RUNTIME_ASSERT
+#define ASMCONSTANTS_RUNTIME_ASSERT(cond)
+#endif
+
+// Some contants are different in _DEBUG builds. This macro factors out ifdefs from below.
+#ifdef _DEBUG
+#define DBG_FRE(dbg,fre) dbg
+#else
+#define DBG_FRE(dbg,fre) fre
+#endif
+
+//***************************************************************************
+#if defined(_DEBUG) && defined(_TARGET_X86_) && !defined(FEATURE_CORECLR)
+ #define HAS_TRACK_CXX_EXCEPTION_CODE_HACK 1
+ #define TRACK_CXX_EXCEPTION_CODE_HACK
+#else
+ #define HAS_TRACK_CXX_EXCEPTION_CODE_HACK 0
+#endif
+
+#define INITIAL_SUCCESS_COUNT 0x100
+
+#define DynamicHelperFrameFlags_Default 0
+#define DynamicHelperFrameFlags_ObjectArg 1
+#define DynamicHelperFrameFlags_ObjectArg2 2
+
+#ifdef FEATURE_REMOTING
+#define TransparentProxyObject___stubData 0x8
+ASMCONSTANTS_C_ASSERT(TransparentProxyObject___stubData == offsetof(TransparentProxyObject, _stubData))
+
+#define TransparentProxyObject___stub 0x14
+ASMCONSTANTS_C_ASSERT(TransparentProxyObject___stub == offsetof(TransparentProxyObject, _stub))
+
+#define TransparentProxyObject___pMT 0xc
+ASMCONSTANTS_C_ASSERT(TransparentProxyObject___pMT == offsetof(TransparentProxyObject, _pMT))
+#endif // FEATURE_REMOTING
+
+// CONTEXT from rotor_pal.h
+#define CONTEXT_Edi 0x9c
+ASMCONSTANTS_C_ASSERT(CONTEXT_Edi == offsetof(CONTEXT,Edi))
+
+#define CONTEXT_Esi 0xa0
+ASMCONSTANTS_C_ASSERT(CONTEXT_Esi == offsetof(CONTEXT,Esi))
+
+#define CONTEXT_Ebx 0xa4
+ASMCONSTANTS_C_ASSERT(CONTEXT_Ebx == offsetof(CONTEXT,Ebx))
+
+#define CONTEXT_Edx 0xa8
+ASMCONSTANTS_C_ASSERT(CONTEXT_Edx == offsetof(CONTEXT,Edx))
+
+#define CONTEXT_Eax 0xb0
+ASMCONSTANTS_C_ASSERT(CONTEXT_Eax == offsetof(CONTEXT,Eax))
+
+#define CONTEXT_Ebp 0xb4
+ASMCONSTANTS_C_ASSERT(CONTEXT_Ebp == offsetof(CONTEXT,Ebp))
+
+#define CONTEXT_Eip 0xb8
+ASMCONSTANTS_C_ASSERT(CONTEXT_Eip == offsetof(CONTEXT,Eip))
+
+#define CONTEXT_Esp 0xc4
+ASMCONSTANTS_C_ASSERT(CONTEXT_Esp == offsetof(CONTEXT,Esp))
+
+// SYSTEM_INFO from rotor_pal.h
+#define SYSTEM_INFO_dwNumberOfProcessors 20
+ASMCONSTANTS_C_ASSERT(SYSTEM_INFO_dwNumberOfProcessors == offsetof(SYSTEM_INFO,dwNumberOfProcessors))
+
+// SpinConstants from clr/src/vars.h
+#define SpinConstants_dwInitialDuration 0
+ASMCONSTANTS_C_ASSERT(SpinConstants_dwInitialDuration == offsetof(SpinConstants,dwInitialDuration))
+
+#define SpinConstants_dwMaximumDuration 4
+ASMCONSTANTS_C_ASSERT(SpinConstants_dwMaximumDuration == offsetof(SpinConstants,dwMaximumDuration))
+
+#define SpinConstants_dwBackoffFactor 8
+ASMCONSTANTS_C_ASSERT(SpinConstants_dwBackoffFactor == offsetof(SpinConstants,dwBackoffFactor))
+
+// EHContext from clr/src/vm/i386/cgencpu.h
+#define EHContext_Eax 0x00
+ASMCONSTANTS_C_ASSERT(EHContext_Eax == offsetof(EHContext,Eax))
+
+#define EHContext_Ebx 0x04
+ASMCONSTANTS_C_ASSERT(EHContext_Ebx == offsetof(EHContext,Ebx))
+
+#define EHContext_Ecx 0x08
+ASMCONSTANTS_C_ASSERT(EHContext_Ecx == offsetof(EHContext,Ecx))
+
+#define EHContext_Edx 0x0c
+ASMCONSTANTS_C_ASSERT(EHContext_Edx == offsetof(EHContext,Edx))
+
+#define EHContext_Esi 0x10
+ASMCONSTANTS_C_ASSERT(EHContext_Esi == offsetof(EHContext,Esi))
+
+#define EHContext_Edi 0x14
+ASMCONSTANTS_C_ASSERT(EHContext_Edi == offsetof(EHContext,Edi))
+
+#define EHContext_Ebp 0x18
+ASMCONSTANTS_C_ASSERT(EHContext_Ebp == offsetof(EHContext,Ebp))
+
+#define EHContext_Esp 0x1c
+ASMCONSTANTS_C_ASSERT(EHContext_Esp == offsetof(EHContext,Esp))
+
+#define EHContext_Eip 0x20
+ASMCONSTANTS_C_ASSERT(EHContext_Eip == offsetof(EHContext,Eip))
+
+
+// from clr/src/fjit/helperframe.h
+#define SIZEOF_MachState 40
+ASMCONSTANTS_C_ASSERT(SIZEOF_MachState == sizeof(MachState))
+
+#define MachState__pEdi 0
+ASMCONSTANTS_C_ASSERT(MachState__pEdi == offsetof(MachState, _pEdi))
+
+#define MachState__edi 4
+ASMCONSTANTS_C_ASSERT(MachState__edi == offsetof(MachState, _edi))
+
+#define MachState__pEsi 8
+ASMCONSTANTS_C_ASSERT(MachState__pEsi == offsetof(MachState, _pEsi))
+
+#define MachState__esi 12
+ASMCONSTANTS_C_ASSERT(MachState__esi == offsetof(MachState, _esi))
+
+#define MachState__pEbx 16
+ASMCONSTANTS_C_ASSERT(MachState__pEbx == offsetof(MachState, _pEbx))
+
+#define MachState__ebx 20
+ASMCONSTANTS_C_ASSERT(MachState__ebx == offsetof(MachState, _ebx))
+
+#define MachState__pEbp 24
+ASMCONSTANTS_C_ASSERT(MachState__pEbp == offsetof(MachState, _pEbp))
+
+#define MachState__ebp 28
+ASMCONSTANTS_C_ASSERT(MachState__ebp == offsetof(MachState, _ebp))
+
+#define MachState__esp 32
+ASMCONSTANTS_C_ASSERT(MachState__esp == offsetof(MachState, _esp))
+
+#define MachState__pRetAddr 36
+ASMCONSTANTS_C_ASSERT(MachState__pRetAddr == offsetof(MachState, _pRetAddr))
+
+#define LazyMachState_captureEbp 40
+ASMCONSTANTS_C_ASSERT(LazyMachState_captureEbp == offsetof(LazyMachState, captureEbp))
+
+#define LazyMachState_captureEsp 44
+ASMCONSTANTS_C_ASSERT(LazyMachState_captureEsp == offsetof(LazyMachState, captureEsp))
+
+#define LazyMachState_captureEip 48
+ASMCONSTANTS_C_ASSERT(LazyMachState_captureEip == offsetof(LazyMachState, captureEip))
+
+
+#define VASigCookie__StubOffset 4
+ASMCONSTANTS_C_ASSERT(VASigCookie__StubOffset == offsetof(VASigCookie, pNDirectILStub))
+
+#define SIZEOF_TailCallFrame 32
+ASMCONSTANTS_C_ASSERT(SIZEOF_TailCallFrame == sizeof(TailCallFrame))
+
+#define SIZEOF_GSCookie 4
+
+// ICodeManager::SHADOW_SP_IN_FILTER from clr/src/inc/eetwain.h
+#define SHADOW_SP_IN_FILTER_ASM 0x1
+ASMCONSTANTS_C_ASSERT(SHADOW_SP_IN_FILTER_ASM == ICodeManager::SHADOW_SP_IN_FILTER)
+
+// from clr/src/inc/corinfo.h
+#define CORINFO_NullReferenceException_ASM 0
+ASMCONSTANTS_C_ASSERT(CORINFO_NullReferenceException_ASM == CORINFO_NullReferenceException)
+
+#define CORINFO_IndexOutOfRangeException_ASM 3
+ASMCONSTANTS_C_ASSERT(CORINFO_IndexOutOfRangeException_ASM == CORINFO_IndexOutOfRangeException)
+
+#define CORINFO_OverflowException_ASM 4
+ASMCONSTANTS_C_ASSERT(CORINFO_OverflowException_ASM == CORINFO_OverflowException)
+
+#define CORINFO_SynchronizationLockException_ASM 5
+ASMCONSTANTS_C_ASSERT(CORINFO_SynchronizationLockException_ASM == CORINFO_SynchronizationLockException)
+
+#define CORINFO_ArrayTypeMismatchException_ASM 6
+ASMCONSTANTS_C_ASSERT(CORINFO_ArrayTypeMismatchException_ASM == CORINFO_ArrayTypeMismatchException)
+
+#define CORINFO_ArgumentNullException_ASM 8
+ASMCONSTANTS_C_ASSERT(CORINFO_ArgumentNullException_ASM == CORINFO_ArgumentNullException)
+
+#define CORINFO_ArgumentException_ASM 9
+ASMCONSTANTS_C_ASSERT(CORINFO_ArgumentException_ASM == CORINFO_ArgumentException)
+
+
+#ifndef CROSSGEN_COMPILE
+
+// from clr/src/vm/threads.h
+#if defined(TRACK_CXX_EXCEPTION_CODE_HACK) // Is C++ exception code tracking turned on?
+ #define Thread_m_LastCxxSEHExceptionCode 0x20
+ ASMCONSTANTS_C_ASSERT(Thread_m_LastCxxSEHExceptionCode == offsetof(Thread, m_LastCxxSEHExceptionCode))
+
+ #define Thread_m_Context 0x3C
+#else
+ #define Thread_m_Context 0x38
+#endif // TRACK_CXX_EXCEPTION_CODE_HACK
+ASMCONSTANTS_C_ASSERT(Thread_m_Context == offsetof(Thread, m_Context))
+
+#define Thread_m_State 0x04
+ASMCONSTANTS_C_ASSERT(Thread_m_State == offsetof(Thread, m_State))
+#endif // CROSSGEN_COMPILE
+
+#define Thread_m_fPreemptiveGCDisabled 0x08
+#ifndef CROSSGEN_COMPILE
+ASMCONSTANTS_C_ASSERT(Thread_m_fPreemptiveGCDisabled == offsetof(Thread, m_fPreemptiveGCDisabled))
+#endif // CROSSGEN_COMPILE
+
+#define Thread_m_pFrame 0x0C
+#ifndef CROSSGEN_COMPILE
+ASMCONSTANTS_C_ASSERT(Thread_m_pFrame == offsetof(Thread, m_pFrame))
+#endif // CROSSGEN_COMPILE
+
+#ifndef CROSSGEN_COMPILE
+#define Thread_m_dwLockCount 0x18
+ASMCONSTANTS_C_ASSERT(Thread_m_dwLockCount == offsetof(Thread, m_dwLockCount))
+
+#define Thread_m_ThreadId 0x1C
+ASMCONSTANTS_C_ASSERT(Thread_m_ThreadId == offsetof(Thread, m_ThreadId))
+
+#define TS_CatchAtSafePoint_ASM 0x5F
+ASMCONSTANTS_C_ASSERT(Thread::TS_CatchAtSafePoint == TS_CatchAtSafePoint_ASM)
+
+#ifdef FEATURE_HIJACK
+#define TS_Hijacked_ASM 0x80
+ASMCONSTANTS_C_ASSERT(Thread::TS_Hijacked == TS_Hijacked_ASM)
+#endif
+
+#endif // CROSSGEN_COMPILE
+
+
+// from clr/src/vm/appdomain.hpp
+
+#define AppDomain__m_dwId 0x4
+ASMCONSTANTS_C_ASSERT(AppDomain__m_dwId == offsetof(AppDomain, m_dwId));
+
+// from clr/src/vm/ceeload.cpp
+#ifdef FEATURE_MIXEDMODE
+#define IJWNOADThunk__m_cache 0x1C
+ASMCONSTANTS_C_ASSERT(IJWNOADThunk__m_cache == offsetof(IJWNOADThunk, m_cache))
+
+#define IJWNOADThunk__NextCacheOffset 0x8
+ASMCONSTANTS_C_ASSERT(IJWNOADThunk__NextCacheOffset == sizeof(IJWNOADThunkStubCache))
+
+#define IJWNOADThunk__CodeAddrOffsetFromADID 0x4
+ASMCONSTANTS_C_ASSERT(IJWNOADThunk__CodeAddrOffsetFromADID == offsetof(IJWNOADThunkStubCache, m_CodeAddr))
+#endif //FEATURE_MIXEDMODE
+
+// from clr/src/vm/syncblk.h
+#define SizeOfSyncTableEntry_ASM 8
+ASMCONSTANTS_C_ASSERT(sizeof(SyncTableEntry) == SizeOfSyncTableEntry_ASM)
+
+#define SyncBlockIndexOffset_ASM 4
+ASMCONSTANTS_C_ASSERT(sizeof(ObjHeader) - offsetof(ObjHeader, m_SyncBlockValue) == SyncBlockIndexOffset_ASM)
+
+#ifndef __GNUC__
+#define SyncTableEntry_m_SyncBlock 0
+ASMCONSTANTS_C_ASSERT(offsetof(SyncTableEntry, m_SyncBlock) == SyncTableEntry_m_SyncBlock)
+
+#define SyncBlock_m_Monitor 0
+ASMCONSTANTS_C_ASSERT(offsetof(SyncBlock, m_Monitor) == SyncBlock_m_Monitor)
+
+#define AwareLock_m_MonitorHeld 0
+ASMCONSTANTS_C_ASSERT(offsetof(AwareLock, m_MonitorHeld) == AwareLock_m_MonitorHeld)
+#else
+// The following 3 offsets have value of 0, and must be
+// defined to be an empty string. Otherwise, gas may generate assembly
+// code with 0 displacement if 0 is left in the displacement field
+// of an instruction.
+#define SyncTableEntry_m_SyncBlock // 0
+ASMCONSTANTS_C_ASSERT(offsetof(SyncTableEntry, m_SyncBlock) == 0)
+
+#define SyncBlock_m_Monitor // 0
+ASMCONSTANTS_C_ASSERT(offsetof(SyncBlock, m_Monitor) == 0)
+
+#define AwareLock_m_MonitorHeld // 0
+ASMCONSTANTS_C_ASSERT(offsetof(AwareLock, m_MonitorHeld) == 0)
+#endif // !__GNUC__
+
+#define AwareLock_m_HoldingThread 8
+ASMCONSTANTS_C_ASSERT(offsetof(AwareLock, m_HoldingThread) == AwareLock_m_HoldingThread)
+
+#define AwareLock_m_Recursion 4
+ASMCONSTANTS_C_ASSERT(offsetof(AwareLock, m_Recursion) == AwareLock_m_Recursion)
+
+#define BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX_ASM 0x08000000
+ASMCONSTANTS_C_ASSERT(BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX_ASM == BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX)
+
+#define BIT_SBLK_SPIN_LOCK_ASM 0x10000000
+ASMCONSTANTS_C_ASSERT(BIT_SBLK_SPIN_LOCK_ASM == BIT_SBLK_SPIN_LOCK)
+
+#define SBLK_MASK_LOCK_THREADID_ASM 0x000003FF // special value of 0 + 1023 thread ids
+ASMCONSTANTS_C_ASSERT(SBLK_MASK_LOCK_THREADID_ASM == SBLK_MASK_LOCK_THREADID)
+
+#define SBLK_MASK_LOCK_RECLEVEL_ASM 0x0000FC00 // 64 recursion levels
+ASMCONSTANTS_C_ASSERT(SBLK_MASK_LOCK_RECLEVEL_ASM == SBLK_MASK_LOCK_RECLEVEL)
+
+#define SBLK_LOCK_RECLEVEL_INC_ASM 0x00000400 // each level is this much higher than the previous one
+ASMCONSTANTS_C_ASSERT(SBLK_LOCK_RECLEVEL_INC_ASM == SBLK_LOCK_RECLEVEL_INC)
+
+#define BIT_SBLK_IS_HASHCODE_ASM 0x04000000
+ASMCONSTANTS_C_ASSERT(BIT_SBLK_IS_HASHCODE_ASM == BIT_SBLK_IS_HASHCODE)
+
+#define MASK_SYNCBLOCKINDEX_ASM 0x03ffffff // ((1<<SYNCBLOCKINDEX_BITS)-1)
+ASMCONSTANTS_C_ASSERT(MASK_SYNCBLOCKINDEX_ASM == MASK_SYNCBLOCKINDEX)
+
+// BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX_ASM + BIT_SBLK_SPIN_LOCK_ASM +
+// SBLK_MASK_LOCK_THREADID_ASM + SBLK_MASK_LOCK_RECLEVEL_ASM
+#define SBLK_COMBINED_MASK_ASM 0x1800ffff
+ASMCONSTANTS_C_ASSERT(SBLK_COMBINED_MASK_ASM == (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_SPIN_LOCK + SBLK_MASK_LOCK_THREADID + SBLK_MASK_LOCK_RECLEVEL))
+
+// BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX_ASM + BIT_SBLK_SPIN_LOCK_ASM
+#define BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX_SPIN_LOCK_ASM 0x18000000
+ASMCONSTANTS_C_ASSERT(BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX_SPIN_LOCK_ASM == (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_SPIN_LOCK))
+
+// BIT_SBLK_IS_HASHCODE + BIT_SBLK_SPIN_LOCK
+#define BIT_SBLK_IS_HASHCODE_OR_SPIN_LOCK_ASM 0x14000000
+ASMCONSTANTS_C_ASSERT(BIT_SBLK_IS_HASHCODE_OR_SPIN_LOCK_ASM == (BIT_SBLK_IS_HASHCODE + BIT_SBLK_SPIN_LOCK))
+
+// This is the offset from EBP at which the original CONTEXT is stored in one of the
+// RedirectedHandledJITCase*_Stub functions.
+#define REDIRECTSTUB_EBP_OFFSET_CONTEXT (-4)
+
+#define MethodTable_m_wNumInterfaces 0x0E
+ASMCONSTANTS_C_ASSERT(MethodTable_m_wNumInterfaces == offsetof(MethodTable, m_wNumInterfaces))
+
+#define MethodTable_m_dwFlags 0x0
+ASMCONSTANTS_C_ASSERT(MethodTable_m_dwFlags == offsetof(MethodTable, m_dwFlags))
+
+#define MethodTable_m_pInterfaceMap DBG_FRE(0x28, 0x24)
+ASMCONSTANTS_C_ASSERT(MethodTable_m_pInterfaceMap == offsetof(MethodTable, m_pMultipurposeSlot2))
+
+#define SIZEOF_MethodTable DBG_FRE(0x2C, 0x28)
+ASMCONSTANTS_C_ASSERT(SIZEOF_MethodTable == sizeof(MethodTable))
+
+#define SIZEOF_InterfaceInfo_t 0x4
+ASMCONSTANTS_C_ASSERT(SIZEOF_InterfaceInfo_t == sizeof(InterfaceInfo_t))
+
+#ifdef FEATURE_COMINTEROP
+
+#define SIZEOF_FrameHandlerExRecord 0x0c
+#define OFFSETOF__FrameHandlerExRecord__m_ExReg__Next 0
+#define OFFSETOF__FrameHandlerExRecord__m_ExReg__Handler 4
+#define OFFSETOF__FrameHandlerExRecord__m_pEntryFrame 8
+ASMCONSTANTS_C_ASSERT(SIZEOF_FrameHandlerExRecord == sizeof(FrameHandlerExRecord))
+ASMCONSTANTS_C_ASSERT(OFFSETOF__FrameHandlerExRecord__m_ExReg__Next == offsetof(FrameHandlerExRecord, m_ExReg) + offsetof(EXCEPTION_REGISTRATION_RECORD, Next))
+ASMCONSTANTS_C_ASSERT(OFFSETOF__FrameHandlerExRecord__m_ExReg__Handler == offsetof(FrameHandlerExRecord, m_ExReg) + offsetof(EXCEPTION_REGISTRATION_RECORD, Handler))
+ASMCONSTANTS_C_ASSERT(OFFSETOF__FrameHandlerExRecord__m_pEntryFrame == offsetof(FrameHandlerExRecord, m_pEntryFrame))
+
+#ifdef _DEBUG
+#ifndef STACK_OVERWRITE_BARRIER_SIZE
+#define STACK_OVERWRITE_BARRIER_SIZE 20
+#endif
+#ifndef STACK_OVERWRITE_BARRIER_VALUE
+#define STACK_OVERWRITE_BARRIER_VALUE 0xabcdefab
+#endif
+
+#define SIZEOF_FrameHandlerExRecordWithBarrier 0x5c
+ASMCONSTANTS_C_ASSERT(SIZEOF_FrameHandlerExRecordWithBarrier == sizeof(FrameHandlerExRecordWithBarrier))
+#endif
+
+
+#ifdef MDA_SUPPORTED
+#define SIZEOF_StackImbalanceCookie 0x14
+ASMCONSTANTS_C_ASSERT(SIZEOF_StackImbalanceCookie == sizeof(StackImbalanceCookie))
+
+#define StackImbalanceCookie__m_pMD 0x00
+#define StackImbalanceCookie__m_pTarget 0x04
+#define StackImbalanceCookie__m_dwStackArgSize 0x08
+#define StackImbalanceCookie__m_callConv 0x0c
+#define StackImbalanceCookie__m_dwSavedEsp 0x10
+#define StackImbalanceCookie__HAS_FP_RETURN_VALUE 0x80000000
+
+ASMCONSTANTS_C_ASSERT(StackImbalanceCookie__m_pMD == offsetof(StackImbalanceCookie, m_pMD))
+ASMCONSTANTS_C_ASSERT(StackImbalanceCookie__m_pTarget == offsetof(StackImbalanceCookie, m_pTarget))
+ASMCONSTANTS_C_ASSERT(StackImbalanceCookie__m_dwStackArgSize == offsetof(StackImbalanceCookie, m_dwStackArgSize))
+ASMCONSTANTS_C_ASSERT(StackImbalanceCookie__m_callConv == offsetof(StackImbalanceCookie, m_callConv))
+ASMCONSTANTS_C_ASSERT(StackImbalanceCookie__m_dwSavedEsp == offsetof(StackImbalanceCookie, m_dwSavedEsp))
+ASMCONSTANTS_C_ASSERT(StackImbalanceCookie__HAS_FP_RETURN_VALUE == StackImbalanceCookie::HAS_FP_RETURN_VALUE)
+#endif // MDA_SUPPORTED
+
+#define MethodDesc_m_wFlags DBG_FRE(0x1a, 0x06)
+ASMCONSTANTS_C_ASSERT(MethodDesc_m_wFlags == offsetof(MethodDesc, m_wFlags))
+
+#define MethodDesc_mdcClassification 7
+ASMCONSTANTS_C_ASSERT(MethodDesc_mdcClassification == mdcClassification)
+
+#define MethodDesc_mcComInterop 6
+ASMCONSTANTS_C_ASSERT(MethodDesc_mcComInterop == mcComInterop)
+
+#define ComPlusCallMethodDesc__m_pComPlusCallInfo DBG_FRE(0x1C, 0x8)
+ASMCONSTANTS_C_ASSERT(ComPlusCallMethodDesc__m_pComPlusCallInfo == offsetof(ComPlusCallMethodDesc, m_pComPlusCallInfo))
+
+#define ComPlusCallInfo__m_pRetThunk 0x10
+ASMCONSTANTS_C_ASSERT(ComPlusCallInfo__m_pRetThunk == offsetof(ComPlusCallInfo, m_pRetThunk))
+
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_COMINTEROP
+#define NonTrivialInterfaceCastFlags 0x40080000
+ASMCONSTANTS_C_ASSERT(NonTrivialInterfaceCastFlags == MethodTable::public_enum_flag_NonTrivialInterfaceCast)
+#else
+#define NonTrivialInterfaceCastFlags 0x00080000
+ASMCONSTANTS_C_ASSERT(NonTrivialInterfaceCastFlags == MethodTable::public_enum_flag_NonTrivialInterfaceCast)
+#endif
+
+#define ASM__VTABLE_SLOTS_PER_CHUNK 8
+ASMCONSTANTS_C_ASSERT(ASM__VTABLE_SLOTS_PER_CHUNK == VTABLE_SLOTS_PER_CHUNK)
+
+#define ASM__VTABLE_SLOTS_PER_CHUNK_LOG2 3
+ASMCONSTANTS_C_ASSERT(ASM__VTABLE_SLOTS_PER_CHUNK_LOG2 == VTABLE_SLOTS_PER_CHUNK_LOG2)
+
+#define TLS_GETTER_MAX_SIZE_ASM DBG_FRE(0x20, 0x10)
+ASMCONSTANTS_C_ASSERT(TLS_GETTER_MAX_SIZE_ASM == TLS_GETTER_MAX_SIZE)
+
+#define JIT_TailCall_StackOffsetToFlags 0x08
+
+#define CallDescrData__pSrc 0x00
+#define CallDescrData__numStackSlots 0x04
+#define CallDescrData__pArgumentRegisters 0x08
+#define CallDescrData__fpReturnSize 0x0C
+#define CallDescrData__pTarget 0x10
+#ifndef __GNUC__
+#define CallDescrData__returnValue 0x18
+#else
+#define CallDescrData__returnValue 0x14
+#endif
+
+ASMCONSTANTS_C_ASSERT(CallDescrData__pSrc == offsetof(CallDescrData, pSrc))
+ASMCONSTANTS_C_ASSERT(CallDescrData__numStackSlots == offsetof(CallDescrData, numStackSlots))
+ASMCONSTANTS_C_ASSERT(CallDescrData__pArgumentRegisters == offsetof(CallDescrData, pArgumentRegisters))
+ASMCONSTANTS_C_ASSERT(CallDescrData__fpReturnSize == offsetof(CallDescrData, fpReturnSize))
+ASMCONSTANTS_C_ASSERT(CallDescrData__pTarget == offsetof(CallDescrData, pTarget))
+ASMCONSTANTS_C_ASSERT(CallDescrData__returnValue == offsetof(CallDescrData, returnValue))
+
+#undef ASMCONSTANTS_C_ASSERT
+#undef ASMCONSTANTS_RUNTIME_ASSERT
+
+// #define USE_COMPILE_TIME_CONSTANT_FINDER // Uncomment this line to use the constant finder
+#if defined(__cplusplus) && defined(USE_COMPILE_TIME_CONSTANT_FINDER)
+// This class causes the compiler to emit an error with the constant we're interested in
+// in the error message. This is useful if a size or offset changes. To use, comment out
+// the compile-time assert that is firing, enable the constant finder, add the appropriate
+// constant to find to BogusFunction(), and build.
+//
+// Here's a sample compiler error:
+// d:\dd\clr\src\ndp\clr\src\vm\i386\asmconstants.h(326) : error C2248: 'FindCompileTimeConstant<N>::FindCompileTimeConstant' : cannot access private member declared in class 'FindCompileTimeConstant<N>'
+// with
+// [
+// N=1520
+// ]
+// d:\dd\clr\src\ndp\clr\src\vm\i386\asmconstants.h(321) : see declaration of 'FindCompileTimeConstant<N>::FindCompileTimeConstant'
+// with
+// [
+// N=1520
+// ]
+template<size_t N>
+class FindCompileTimeConstant
+{
+private:
+ FindCompileTimeConstant();
+};
+
+void BogusFunction()
+{
+ // Sample usage to generate the error
+ FindCompileTimeConstant<offsetof(AppDomain, m_dwId)> bogus_variable;
+}
+#endif // defined(__cplusplus) && defined(USE_COMPILE_TIME_CONSTANT_FINDER)
diff --git a/src/vm/i386/asmhelpers.asm b/src/vm/i386/asmhelpers.asm
new file mode 100644
index 0000000000..782fcd8f82
--- /dev/null
+++ b/src/vm/i386/asmhelpers.asm
@@ -0,0 +1,2446 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;
+
+;
+; ==--==
+;
+; FILE: asmhelpers.asm
+;
+; *** NOTE: If you make changes to this file, propagate the changes to
+; asmhelpers.s in this directory
+;
+
+;
+; ======================================================================================
+
+ .586
+ .model flat
+
+include asmconstants.inc
+
+ assume fs: nothing
+ option casemap:none
+ .code
+
+EXTERN __imp__RtlUnwind@16:DWORD
+ifdef _DEBUG
+EXTERN _HelperMethodFrameConfirmState@20:PROC
+endif
+ifdef FEATURE_MIXEDMODE
+EXTERN _IJWNOADThunkJumpTargetHelper@4:PROC
+endif
+EXTERN _StubRareEnableWorker@4:PROC
+ifdef FEATURE_COMINTEROP
+EXTERN _StubRareDisableHRWorker@4:PROC
+endif ; FEATURE_COMINTEROP
+EXTERN _StubRareDisableTHROWWorker@4:PROC
+EXTERN __imp__TlsGetValue@4:DWORD
+TlsGetValue PROTO stdcall
+ifdef FEATURE_HIJACK
+EXTERN _OnHijackObjectWorker@4:PROC
+EXTERN _OnHijackInteriorPointerWorker@4:PROC
+EXTERN _OnHijackScalarWorker@4:PROC
+endif ;FEATURE_HIJACK
+EXTERN _COMPlusEndCatch@20:PROC
+EXTERN _COMPlusFrameHandler:PROC
+ifdef FEATURE_COMINTEROP
+EXTERN _COMPlusFrameHandlerRevCom:PROC
+endif ; FEATURE_COMINTEROP
+EXTERN __alloca_probe:PROC
+EXTERN _NDirectImportWorker@4:PROC
+EXTERN _UMThunkStubRareDisableWorker@8:PROC
+ifndef FEATURE_IMPLICIT_TLS
+ifdef ENABLE_GET_THREAD_GENERIC_FULL_CHECK
+; This is defined in C (threads.cpp) and enforces EE_THREAD_NOT_REQUIRED contracts
+GetThreadGenericFullCheck EQU ?GetThreadGenericFullCheck@@YGPAVThread@@XZ
+EXTERN GetThreadGenericFullCheck:PROC
+endif ; ENABLE_GET_THREAD_GENERIC_FULL_CHECK
+
+EXTERN _gThreadTLSIndex:DWORD
+EXTERN _gAppDomainTLSIndex:DWORD
+endif ; FEATURE_IMPLICIT_TLS
+
+EXTERN _VarargPInvokeStubWorker@12:PROC
+EXTERN _GenericPInvokeCalliStubWorker@12:PROC
+
+; To debug that LastThrownObjectException really is EXCEPTION_COMPLUS
+ifdef TRACK_CXX_EXCEPTION_CODE_HACK
+EXTERN __imp____CxxFrameHandler:PROC
+endif
+
+EXTERN _GetThread@0:PROC
+EXTERN _GetAppDomain@0:PROC
+
+ifdef MDA_SUPPORTED
+EXTERN _PInvokeStackImbalanceWorker@8:PROC
+endif
+
+ifndef FEATURE_CORECLR
+EXTERN _CopyCtorCallStubWorker@4:PROC
+endif
+
+EXTERN _PreStubWorker@8:PROC
+
+ifdef FEATURE_COMINTEROP
+EXTERN _CLRToCOMWorker@8:PROC
+endif
+
+ifdef FEATURE_REMOTING
+EXTERN _TransparentProxyStubWorker@8:PROC
+endif
+
+ifdef FEATURE_PREJIT
+EXTERN _ExternalMethodFixupWorker@16:PROC
+EXTERN _VirtualMethodFixupWorker@8:PROC
+EXTERN _StubDispatchFixupWorker@16:PROC
+endif
+
+ifdef FEATURE_COMINTEROP
+EXTERN _ComPreStubWorker@8:PROC
+endif
+
+ifdef FEATURE_READYTORUN
+EXTERN _DynamicHelperWorker@20:PROC
+endif
+
+ifdef FEATURE_REMOTING
+EXTERN _InContextTPQuickDispatchAsmStub@0:PROC
+endif
+
+EXTERN @JIT_InternalThrow@4:PROC
+
+EXTERN @ProfileEnter@8:PROC
+EXTERN @ProfileLeave@8:PROC
+EXTERN @ProfileTailcall@8:PROC
+
+FASTCALL_FUNC macro FuncName,cbArgs
+FuncNameReal EQU @&FuncName&@&cbArgs
+FuncNameReal proc public
+endm
+
+FASTCALL_ENDFUNC macro
+FuncNameReal endp
+endm
+
+ifdef FEATURE_COMINTEROP
+ifdef _DEBUG
+ CPFH_STACK_SIZE equ SIZEOF_FrameHandlerExRecord + STACK_OVERWRITE_BARRIER_SIZE*4
+else ; _DEBUG
+ CPFH_STACK_SIZE equ SIZEOF_FrameHandlerExRecord
+endif ; _DEBUG
+
+PUSH_CPFH_FOR_COM macro trashReg, pFrameBaseReg, pFrameOffset
+
+ ;
+ ; Setup the FrameHandlerExRecord
+ ;
+ push dword ptr [pFrameBaseReg + pFrameOffset]
+ push _COMPlusFrameHandlerRevCom
+ mov trashReg, fs:[0]
+ push trashReg
+ mov fs:[0], esp
+
+ifdef _DEBUG
+ mov trashReg, STACK_OVERWRITE_BARRIER_SIZE
+@@:
+ push STACK_OVERWRITE_BARRIER_VALUE
+ dec trashReg
+ jnz @B
+endif ; _DEBUG
+
+endm ; PUSH_CPFH_FOR_COM
+
+
+POP_CPFH_FOR_COM macro trashReg
+
+ ;
+ ; Unlink FrameHandlerExRecord from FS:0 chain
+ ;
+ifdef _DEBUG
+ add esp, STACK_OVERWRITE_BARRIER_SIZE*4
+endif
+ mov trashReg, [esp + OFFSETOF__FrameHandlerExRecord__m_ExReg__Next]
+ mov fs:[0], trashReg
+ add esp, SIZEOF_FrameHandlerExRecord
+
+endm ; POP_CPFH_FOR_COM
+endif ; FEATURE_COMINTEROP
+
+;
+; FramedMethodFrame prolog
+;
+STUB_PROLOG macro
+ ; push ebp-frame
+ push ebp
+ mov ebp,esp
+
+ ; save CalleeSavedRegisters
+ push ebx
+ push esi
+ push edi
+
+ ; push ArgumentRegisters
+ push ecx
+ push edx
+endm
+
+;
+; FramedMethodFrame epilog
+;
+STUB_EPILOG macro
+ ; pop ArgumentRegisters
+ pop edx
+ pop ecx
+
+ ; pop CalleeSavedRegisters
+ pop edi
+ pop esi
+ pop ebx
+ pop ebp
+endm
+
+;
+; FramedMethodFrame epilog
+;
+STUB_EPILOG_RETURN macro
+ ; pop ArgumentRegisters
+ add esp, 8
+
+ ; pop CalleeSavedRegisters
+ pop edi
+ pop esi
+ pop ebx
+ pop ebp
+endm
+
+STUB_PROLOG_2_HIDDEN_ARGS macro
+
+ ;
+ ; The stub arguments are where we want to setup the TransitionBlock. We will
+ ; setup the TransitionBlock later once we can trash them
+ ;
+ ; push ebp-frame
+ ; push ebp
+ ; mov ebp,esp
+
+ ; save CalleeSavedRegisters
+ ; push ebx
+
+ push esi
+ push edi
+
+ ; push ArgumentRegisters
+ push ecx
+ push edx
+
+ mov ecx, [esp + 4*4]
+ mov edx, [esp + 5*4]
+
+ ; Setup up proper EBP frame now that the stub arguments can be trashed
+ mov [esp + 4*4],ebx
+ mov [esp + 5*4],ebp
+ lea ebp, [esp + 5*4]
+endm
+
+ResetCurrentContext PROC stdcall public
+ LOCAL ctrlWord:WORD
+
+ ; Clear the direction flag (used for rep instructions)
+ cld
+
+ fnstcw ctrlWord
+ fninit ; reset FPU
+ and ctrlWord, 0f00h ; preserve precision and rounding control
+ or ctrlWord, 007fh ; mask all exceptions
+ fldcw ctrlWord ; preserve precision control
+ RET
+ResetCurrentContext ENDP
+
+;Incoming:
+; ESP+4: Pointer to buffer to which FPU state should be saved
+_CaptureFPUContext@4 PROC public
+
+ mov ecx, [esp+4]
+ fnstenv [ecx]
+ retn 4
+
+_CaptureFPUContext@4 ENDP
+
+; Incoming:
+; ESP+4: Pointer to buffer from which FPU state should be restored
+_RestoreFPUContext@4 PROC public
+
+ mov ecx, [esp+4]
+ fldenv [ecx]
+ retn 4
+
+_RestoreFPUContext@4 ENDP
+
+ifndef FEATURE_CORECLR
+ifdef _DEBUG
+; For C++ exceptions, we desperately need to know the SEH code. This allows us to properly
+; distinguish managed exceptions from C++ exceptions from standard SEH like hard stack overflow.
+; We do this by providing our own handler that squirrels away the exception code and then
+; defers to the C++ service. Fortunately, two symbols exist for the C++ symbol.
+___CxxFrameHandler3 PROC public
+
+ ; We don't know what arguments are passed to us (except for the first arg on stack)
+ ; It turns out that EAX is part of the non-standard calling convention of this
+ ; function.
+
+ push eax
+ push edx
+
+ cmp dword ptr [_gThreadTLSIndex], -1
+ je Chain ; CLR is not initialized yet
+
+ call _GetThread@0
+
+ test eax, eax ; not a managed thread
+ jz Chain
+
+ mov edx, [esp + 0ch] ; grab the first argument
+ mov edx, [edx] ; grab the SEH exception code
+
+ mov dword ptr [eax + Thread_m_LastCxxSEHExceptionCode], edx
+
+Chain:
+
+ pop edx
+
+ ; [esp] contains the value of EAX we must restore. We would like
+ ; [esp] to contain the address of the real imported CxxFrameHandler
+ ; so we can chain to it.
+
+ mov eax, [__imp____CxxFrameHandler]
+ mov eax, [eax]
+ xchg [esp], eax
+
+ ret
+
+___CxxFrameHandler3 ENDP
+endif ; _DEBUG
+endif ; FEATURE_CORECLR
+
+; Note that RtlUnwind trashes EBX, ESI and EDI, so this wrapper preserves them
+CallRtlUnwind PROC stdcall public USES ebx esi edi, pEstablisherFrame :DWORD, callback :DWORD, pExceptionRecord :DWORD, retVal :DWORD
+
+ push retVal
+ push pExceptionRecord
+ push callback
+ push pEstablisherFrame
+ call dword ptr [__imp__RtlUnwind@16]
+
+ ; return 1
+ push 1
+ pop eax
+
+ RET
+CallRtlUnwind ENDP
+
+_ResumeAtJitEHHelper@4 PROC public
+ mov edx, [esp+4] ; edx = pContext (EHContext*)
+
+ mov ebx, [edx+EHContext_Ebx]
+ mov esi, [edx+EHContext_Esi]
+ mov edi, [edx+EHContext_Edi]
+ mov ebp, [edx+EHContext_Ebp]
+ mov ecx, [edx+EHContext_Esp]
+ mov eax, [edx+EHContext_Eip]
+ mov [ecx-4], eax
+ mov eax, [edx+EHContext_Eax]
+ mov [ecx-8], eax
+ mov eax, [edx+EHContext_Ecx]
+ mov [ecx-0Ch], eax
+ mov eax, [edx+EHContext_Edx]
+ mov [ecx-10h], eax
+ lea esp, [ecx-10h]
+ pop edx
+ pop ecx
+ pop eax
+ ret
+_ResumeAtJitEHHelper@4 ENDP
+
+; int __stdcall CallJitEHFilterHelper(size_t *pShadowSP, EHContext *pContext);
+; on entry, only the pContext->Esp, Ebx, Esi, Edi, Ebp, and Eip are initialized
+_CallJitEHFilterHelper@8 PROC public
+ push ebp
+ mov ebp, esp
+ push ebx
+ push esi
+ push edi
+
+ pShadowSP equ [ebp+8]
+ pContext equ [ebp+12]
+
+ mov eax, pShadowSP ; Write esp-4 to the shadowSP slot
+ test eax, eax
+ jz DONE_SHADOWSP_FILTER
+ mov ebx, esp
+ sub ebx, 4
+ or ebx, SHADOW_SP_IN_FILTER_ASM
+ mov [eax], ebx
+ DONE_SHADOWSP_FILTER:
+
+ mov edx, [pContext]
+ mov eax, [edx+EHContext_Eax]
+ mov ebx, [edx+EHContext_Ebx]
+ mov esi, [edx+EHContext_Esi]
+ mov edi, [edx+EHContext_Edi]
+ mov ebp, [edx+EHContext_Ebp]
+
+ call dword ptr [edx+EHContext_Eip]
+ifdef _DEBUG
+ nop ; Indicate that it is OK to call managed code directly from here
+endif
+
+ pop edi
+ pop esi
+ pop ebx
+ pop ebp ; don't use 'leave' here, as ebp as been trashed
+ retn 8
+_CallJitEHFilterHelper@8 ENDP
+
+
+; void __stdcall CallJITEHFinallyHelper(size_t *pShadowSP, EHContext *pContext);
+; on entry, only the pContext->Esp, Ebx, Esi, Edi, Ebp, and Eip are initialized
+_CallJitEHFinallyHelper@8 PROC public
+ push ebp
+ mov ebp, esp
+ push ebx
+ push esi
+ push edi
+
+ pShadowSP equ [ebp+8]
+ pContext equ [ebp+12]
+
+ mov eax, pShadowSP ; Write esp-4 to the shadowSP slot
+ test eax, eax
+ jz DONE_SHADOWSP_FINALLY
+ mov ebx, esp
+ sub ebx, 4
+ mov [eax], ebx
+ DONE_SHADOWSP_FINALLY:
+
+ mov edx, [pContext]
+ mov eax, [edx+EHContext_Eax]
+ mov ebx, [edx+EHContext_Ebx]
+ mov esi, [edx+EHContext_Esi]
+ mov edi, [edx+EHContext_Edi]
+ mov ebp, [edx+EHContext_Ebp]
+ call dword ptr [edx+EHContext_Eip]
+ifdef _DEBUG
+ nop ; Indicate that it is OK to call managed code directly from here
+endif
+
+ ; Reflect the changes to the context and only update non-volatile registers.
+ ; This will be used later to update REGDISPLAY
+ mov edx, [esp+12+12]
+ mov [edx+EHContext_Ebx], ebx
+ mov [edx+EHContext_Esi], esi
+ mov [edx+EHContext_Edi], edi
+ mov [edx+EHContext_Ebp], ebp
+
+ pop edi
+ pop esi
+ pop ebx
+ pop ebp ; don't use 'leave' here, as ebp as been trashed
+ retn 8
+_CallJitEHFinallyHelper@8 ENDP
+
+
+_GetSpecificCpuTypeAsm@0 PROC public
+ push ebx ; ebx is trashed by the cpuid calls
+
+ ; See if the chip supports CPUID
+ pushfd
+ pop ecx ; Get the EFLAGS
+ mov eax, ecx ; Save for later testing
+ xor ecx, 200000h ; Invert the ID bit.
+ push ecx
+ popfd ; Save the updated flags.
+ pushfd
+ pop ecx ; Retrive the updated flags
+ xor ecx, eax ; Test if it actually changed (bit set means yes)
+ push eax
+ popfd ; Restore the flags
+
+ test ecx, 200000h
+ jz Assume486
+
+ xor eax, eax
+ cpuid
+
+ test eax, eax
+ jz Assume486 ; brif CPUID1 not allowed
+
+ mov eax, 1
+ cpuid
+
+ ; filter out everything except family and model
+ ; Note that some multi-procs have different stepping number for each proc
+ and eax, 0ff0h
+
+ jmp CpuTypeDone
+
+Assume486:
+ mov eax, 0400h ; report 486
+CpuTypeDone:
+ pop ebx
+ retn
+_GetSpecificCpuTypeAsm@0 ENDP
+
+; DWORD __stdcall GetSpecificCpuFeaturesAsm(DWORD *pInfo);
+_GetSpecificCpuFeaturesAsm@4 PROC public
+ push ebx ; ebx is trashed by the cpuid calls
+
+ ; See if the chip supports CPUID
+ pushfd
+ pop ecx ; Get the EFLAGS
+ mov eax, ecx ; Save for later testing
+ xor ecx, 200000h ; Invert the ID bit.
+ push ecx
+ popfd ; Save the updated flags.
+ pushfd
+ pop ecx ; Retrive the updated flags
+ xor ecx, eax ; Test if it actually changed (bit set means yes)
+ push eax
+ popfd ; Restore the flags
+
+ test ecx, 200000h
+ jz CpuFeaturesFail
+
+ xor eax, eax
+ cpuid
+
+ test eax, eax
+ jz CpuFeaturesDone ; br if CPUID1 not allowed
+
+ mov eax, 1
+ cpuid
+ mov eax, edx ; return all feature flags
+ mov edx, [esp+8]
+ test edx, edx
+ jz CpuFeaturesDone
+ mov [edx],ebx ; return additional useful information
+ jmp CpuFeaturesDone
+
+CpuFeaturesFail:
+ xor eax, eax ; Nothing to report
+CpuFeaturesDone:
+ pop ebx
+ retn 4
+_GetSpecificCpuFeaturesAsm@4 ENDP
+
+
+;-----------------------------------------------------------------------
+; The out-of-line portion of the code to enable preemptive GC.
+; After the work is done, the code jumps back to the "pRejoinPoint"
+; which should be emitted right after the inline part is generated.
+;
+; Assumptions:
+; ebx = Thread
+; Preserves
+; all registers except ecx.
+;
+;-----------------------------------------------------------------------
+_StubRareEnable proc public
+ push eax
+ push edx
+
+ push ebx
+ call _StubRareEnableWorker@4
+
+ pop edx
+ pop eax
+ retn
+_StubRareEnable ENDP
+
+ifdef FEATURE_COMINTEROP
+_StubRareDisableHR proc public
+ push edx
+
+ push ebx ; Thread
+ call _StubRareDisableHRWorker@4
+
+ pop edx
+ retn
+_StubRareDisableHR ENDP
+endif ; FEATURE_COMINTEROP
+
+_StubRareDisableTHROW proc public
+ push eax
+ push edx
+
+ push ebx ; Thread
+ call _StubRareDisableTHROWWorker@4
+
+ pop edx
+ pop eax
+ retn
+_StubRareDisableTHROW endp
+
+
+ifdef FEATURE_MIXEDMODE
+; VOID __stdcall IJWNOADThunkJumpTarget(void);
+; This routine is used by the IJWNOADThunk to determine the callsite of the domain-specific stub to call.
+_IJWNOADThunkJumpTarget@0 proc public
+
+ push ebp
+ mov ebp, esp
+
+ ; EAX contains IJWNOADThunk*
+ ; Must retain ebx, ecx, edx, esi, edi.
+
+ ; save ebx - holds the IJWNOADThunk*
+ ; save ecx - holds the current AppDomain ID.
+ ; save edx - holds the cached AppDomain ID.
+ push ebx
+ push ecx
+
+ ; put the IJWNOADThunk into ebx for safe keeping
+ mov ebx, eax
+
+ ; get thread - assumes registers are preserved
+ call _GetThread@0
+
+ ; if thread is null, go down un-optimized path
+ test eax,eax
+ jz cachemiss
+
+ ; get current domain - assumes registers are preserved
+ call _GetAppDomain@0
+
+ ; if domain is null, go down un-optimized path
+ test eax,eax
+ jz cachemiss
+
+ ; get the current appdomain id
+ mov ecx, [eax + AppDomain__m_dwId]
+
+ ; test it against each cache location
+ mov eax, ebx
+ add eax, IJWNOADThunk__m_cache
+ cmp ecx, [eax]
+ je cachehit
+
+ add eax, IJWNOADThunk__NextCacheOffset
+ cmp ecx, [eax]
+ je cachehit
+
+ add eax, IJWNOADThunk__NextCacheOffset
+ cmp ecx, [eax]
+ je cachehit
+
+ add eax, IJWNOADThunk__NextCacheOffset
+ cmp ecx, [eax]
+ je cachehit
+
+cachemiss:
+ ; save extra registers
+ push edx
+ push esi
+ push edi
+
+ ; call unoptimized path
+ push ebx ; only arg is IJWNOADThunk*
+ call _IJWNOADThunkJumpTargetHelper@4
+
+ ; restore extra registers
+ pop edi
+ pop esi
+ pop edx
+
+ ; jump back up to the epilog
+ jmp complete
+
+cachehit:
+ ; found a matching ADID, get the code addr.
+ mov eax, [eax + IJWNOADThunk__CodeAddrOffsetFromADID]
+
+ ; if the callsite is null, go down the un-optimized path
+ test eax, eax
+ jz cachemiss
+
+complete:
+ ; restore regs
+ pop ecx
+ pop ebx
+
+ mov esp, ebp
+ pop ebp
+
+ ; Jump to callsite
+ jmp eax
+
+ ; This will never be executed. It is just to help out stack-walking logic
+ ; which disassembles the epilog to unwind the stack.
+ ret
+_IJWNOADThunkJumpTarget@0 endp
+
+endif
+
+InternalExceptionWorker proc public
+ pop edx ; recover RETADDR
+ add esp, eax ; release caller's args
+ push edx ; restore RETADDR
+ jmp @JIT_InternalThrow@4
+InternalExceptionWorker endp
+
+; EAX -> number of caller arg bytes on the stack that we must remove before going
+; to the throw helper, which assumes the stack is clean.
+_ArrayOpStubNullException proc public
+ ; kFactorReg and kTotalReg could not have been modified, but let's pop
+ ; them anyway for consistency and to avoid future bugs.
+ pop esi
+ pop edi
+ mov ecx, CORINFO_NullReferenceException_ASM
+ jmp InternalExceptionWorker
+_ArrayOpStubNullException endp
+
+; EAX -> number of caller arg bytes on the stack that we must remove before going
+; to the throw helper, which assumes the stack is clean.
+_ArrayOpStubRangeException proc public
+ ; kFactorReg and kTotalReg could not have been modified, but let's pop
+ ; them anyway for consistency and to avoid future bugs.
+ pop esi
+ pop edi
+ mov ecx, CORINFO_IndexOutOfRangeException_ASM
+ jmp InternalExceptionWorker
+_ArrayOpStubRangeException endp
+
+; EAX -> number of caller arg bytes on the stack that we must remove before going
+; to the throw helper, which assumes the stack is clean.
+_ArrayOpStubTypeMismatchException proc public
+ ; kFactorReg and kTotalReg could not have been modified, but let's pop
+ ; them anyway for consistency and to avoid future bugs.
+ pop esi
+ pop edi
+ mov ecx, CORINFO_ArrayTypeMismatchException_ASM
+ jmp InternalExceptionWorker
+_ArrayOpStubTypeMismatchException endp
+
+;------------------------------------------------------------------------------
+; This helper routine enregisters the appropriate arguments and makes the
+; actual call.
+;------------------------------------------------------------------------------
+; void STDCALL CallDescrWorkerInternal(CallDescrWorkerParams * pParams)
+CallDescrWorkerInternal PROC stdcall public USES EBX,
+ pParams: DWORD
+
+ mov ebx, pParams
+
+ mov ecx, [ebx+CallDescrData__numStackSlots]
+ mov eax, [ebx+CallDescrData__pSrc] ; copy the stack
+ test ecx, ecx
+ jz donestack
+ lea eax, [eax+4*ecx-4] ; last argument
+ push dword ptr [eax]
+ dec ecx
+ jz donestack
+ sub eax, 4
+ push dword ptr [eax]
+ dec ecx
+ jz donestack
+stackloop:
+ sub eax, 4
+ push dword ptr [eax]
+ dec ecx
+ jnz stackloop
+donestack:
+
+ ; now we must push each field of the ArgumentRegister structure
+ mov eax, [ebx+CallDescrData__pArgumentRegisters]
+ mov edx, dword ptr [eax]
+ mov ecx, dword ptr [eax+4]
+
+ call [ebx+CallDescrData__pTarget]
+ifdef _DEBUG
+ nop ; This is a tag that we use in an assert. Fcalls expect to
+ ; be called from Jitted code or from certain blessed call sites like
+ ; this one. (See HelperMethodFrame::InsureInit)
+endif
+
+ ; Save FP return value if necessary
+ mov ecx, [ebx+CallDescrData__fpReturnSize]
+ cmp ecx, 0
+ je ReturnsInt
+
+ cmp ecx, 4
+ je ReturnsFloat
+ cmp ecx, 8
+ je ReturnsDouble
+ ; unexpected
+ jmp Epilog
+
+ReturnsInt:
+ mov [ebx+CallDescrData__returnValue], eax
+ mov [ebx+CallDescrData__returnValue+4], edx
+
+Epilog:
+ RET
+
+ReturnsFloat:
+ fstp dword ptr [ebx+CallDescrData__returnValue] ; Spill the Float return value
+ jmp Epilog
+
+ReturnsDouble:
+ fstp qword ptr [ebx+CallDescrData__returnValue] ; Spill the Double return value
+ jmp Epilog
+
+CallDescrWorkerInternal endp
+
+ifdef _DEBUG
+; int __fastcall HelperMethodFrameRestoreState(HelperMethodFrame*, struct MachState *)
+FASTCALL_FUNC HelperMethodFrameRestoreState,8
+ mov eax, edx ; eax = MachState*
+else
+; int __fastcall HelperMethodFrameRestoreState(struct MachState *)
+FASTCALL_FUNC HelperMethodFrameRestoreState,4
+ mov eax, ecx ; eax = MachState*
+endif
+ ; restore the registers from the m_MachState stucture. Note that
+ ; we only do this for register that where not saved on the stack
+ ; at the time the machine state snapshot was taken.
+
+ cmp [eax+MachState__pRetAddr], 0
+
+ifdef _DEBUG
+ jnz noConfirm
+ push ebp
+ push ebx
+ push edi
+ push esi
+ push ecx ; HelperFrame*
+ call _HelperMethodFrameConfirmState@20
+ ; on return, eax = MachState*
+ cmp [eax+MachState__pRetAddr], 0
+noConfirm:
+endif
+
+ jz doRet
+
+ lea edx, [eax+MachState__esi] ; Did we have to spill ESI
+ cmp [eax+MachState__pEsi], edx
+ jnz SkipESI
+ mov esi, [edx] ; Then restore it
+SkipESI:
+
+ lea edx, [eax+MachState__edi] ; Did we have to spill EDI
+ cmp [eax+MachState__pEdi], edx
+ jnz SkipEDI
+ mov edi, [edx] ; Then restore it
+SkipEDI:
+
+ lea edx, [eax+MachState__ebx] ; Did we have to spill EBX
+ cmp [eax+MachState__pEbx], edx
+ jnz SkipEBX
+ mov ebx, [edx] ; Then restore it
+SkipEBX:
+
+ lea edx, [eax+MachState__ebp] ; Did we have to spill EBP
+ cmp [eax+MachState__pEbp], edx
+ jnz SkipEBP
+ mov ebp, [edx] ; Then restore it
+SkipEBP:
+
+doRet:
+ xor eax, eax
+ retn
+FASTCALL_ENDFUNC HelperMethodFrameRestoreState
+
+
+ifndef FEATURE_IMPLICIT_TLS
+;---------------------------------------------------------------------------
+; Portable GetThread() function: used if no platform-specific optimizations apply.
+; This is in assembly code because we count on edx not getting trashed on calls
+; to this function.
+;---------------------------------------------------------------------------
+; Thread* __stdcall GetThreadGeneric(void);
+GetThreadGeneric PROC stdcall public USES ecx edx
+
+ifdef _DEBUG
+ cmp dword ptr [_gThreadTLSIndex], -1
+ jnz @F
+ int 3
+@@:
+endif
+ifdef ENABLE_GET_THREAD_GENERIC_FULL_CHECK
+ ; non-PAL, debug-only GetThreadGeneric should defer to GetThreadGenericFullCheck
+ ; to do extra contract enforcement. (See GetThreadGenericFullCheck for details.)
+ ; This code is intentionally not added to asmhelper.s, as this enforcement is only
+ ; implemented for non-PAL builds.
+ call GetThreadGenericFullCheck
+else
+ push dword ptr [_gThreadTLSIndex]
+ call dword ptr [__imp__TlsGetValue@4]
+endif
+ ret
+GetThreadGeneric ENDP
+
+;---------------------------------------------------------------------------
+; Portable GetAppdomain() function: used if no platform-specific optimizations apply.
+; This is in assembly code because we count on edx not getting trashed on calls
+; to this function.
+;---------------------------------------------------------------------------
+; Appdomain* __stdcall GetAppDomainGeneric(void);
+GetAppDomainGeneric PROC stdcall public USES ecx edx
+
+ifdef _DEBUG
+ cmp dword ptr [_gAppDomainTLSIndex], -1
+ jnz @F
+ int 3
+@@:
+endif
+
+ push dword ptr [_gAppDomainTLSIndex]
+ call dword ptr [__imp__TlsGetValue@4]
+ ret
+GetAppDomainGeneric ENDP
+endif
+
+
+ifdef FEATURE_HIJACK
+
+; A JITted method's return address was hijacked to return to us here. What we do
+; is make a __cdecl call with 2 ints. One is the return value we wish to preserve.
+; The other is space for our real return address.
+;
+;VOID __stdcall OnHijackObjectTripThread();
+OnHijackObjectTripThread PROC stdcall public
+
+ ; Don't fiddle with this unless you change HijackFrame::UpdateRegDisplay
+ ; and HijackArgs
+ push eax ; make room for the real return address (Eip)
+ push ebp
+ push eax
+ push ecx
+ push edx
+ push ebx
+ push esi
+ push edi
+
+ ; unused space for floating point state
+ sub esp,12
+
+ push esp
+ call _OnHijackObjectWorker@4
+
+ ; unused space for floating point state
+ add esp,12
+
+ pop edi
+ pop esi
+ pop ebx
+ pop edx
+ pop ecx
+ pop eax
+ pop ebp
+ retn ; return to the correct place, adjusted by our caller
+OnHijackObjectTripThread ENDP
+
+
+; VOID OnHijackInteriorPointerTripThread()
+OnHijackInteriorPointerTripThread PROC stdcall public
+
+ ; Don't fiddle with this unless you change HijackFrame::UpdateRegDisplay
+ ; and HijackArgs
+ push eax ; make room for the real return address (Eip)
+ push ebp
+ push eax
+ push ecx
+ push edx
+ push ebx
+ push esi
+ push edi
+
+ ; unused space for floating point state
+ sub esp,12
+
+ push esp
+ call _OnHijackInteriorPointerWorker@4
+
+ ; unused space for floating point state
+ add esp,12
+
+ pop edi
+ pop esi
+ pop ebx
+ pop edx
+ pop ecx
+ pop eax
+ pop ebp
+ retn ; return to the correct place, adjusted by our caller
+OnHijackInteriorPointerTripThread ENDP
+
+; VOID OnHijackScalarTripThread()
+OnHijackScalarTripThread PROC stdcall public
+
+ ; Don't fiddle with this unless you change HijackFrame::UpdateRegDisplay
+ ; and HijackArgs
+ push eax ; make room for the real return address (Eip)
+ push ebp
+ push eax
+ push ecx
+ push edx
+ push ebx
+ push esi
+ push edi
+
+ ; unused space for floating point state
+ sub esp,12
+
+ push esp
+ call _OnHijackScalarWorker@4
+
+ ; unused space for floating point state
+ add esp,12
+
+ pop edi
+ pop esi
+ pop ebx
+ pop edx
+ pop ecx
+ pop eax
+ pop ebp
+ retn ; return to the correct place, adjusted by our caller
+OnHijackScalarTripThread ENDP
+
+; VOID OnHijackFloatingPointTripThread()
+OnHijackFloatingPointTripThread PROC stdcall public
+
+ ; Don't fiddle with this unless you change HijackFrame::UpdateRegDisplay
+ ; and HijackArgs
+ push eax ; make room for the real return address (Eip)
+ push ebp
+ push eax
+ push ecx
+ push edx
+ push ebx
+ push esi
+ push edi
+
+ sub esp,12
+
+ ; save top of the floating point stack (there is return value passed in it)
+ ; save full 10 bytes to avoid precision loss
+ fstp tbyte ptr [esp]
+
+ push esp
+ call _OnHijackScalarWorker@4
+
+ ; restore top of the floating point stack
+ fld tbyte ptr [esp]
+
+ add esp,12
+
+ pop edi
+ pop esi
+ pop ebx
+ pop edx
+ pop ecx
+ pop eax
+ pop ebp
+ retn ; return to the correct place, adjusted by our caller
+OnHijackFloatingPointTripThread ENDP
+
+endif ; FEATURE_HIJACK
+
+
+; Note that the debugger skips this entirely when doing SetIP,
+; since COMPlusCheckForAbort should always return 0. Excep.cpp:LeaveCatch
+; asserts that to be true. If this ends up doing more work, then the
+; debugger may need additional support.
+; void __stdcall JIT_EndCatch();
+JIT_EndCatch PROC stdcall public
+
+ ; make temp storage for return address, and push the address of that
+ ; as the last arg to COMPlusEndCatch
+ mov ecx, [esp]
+ push ecx;
+ push esp;
+
+ ; push the rest of COMPlusEndCatch's args, right-to-left
+ push esi
+ push edi
+ push ebx
+ push ebp
+
+ call _COMPlusEndCatch@20 ; returns old esp value in eax, stores jump address
+ ; now eax = new esp, [esp] = new eip
+
+ pop edx ; edx = new eip
+ mov esp, eax ; esp = new esp
+ jmp edx ; eip = new eip
+
+JIT_EndCatch ENDP
+
+;==========================================================================
+; This function is reached only via the embedded ImportThunkGlue code inside
+; an NDirectMethodDesc. It's purpose is to load the DLL associated with an
+; N/Direct method, then backpatch the DLL target into the methoddesc.
+;
+; Initial state:
+;
+; Preemptive GC is *enabled*: we are actually in an unmanaged state.
+;
+;
+; [esp+...] - The *unmanaged* parameters to the DLL target.
+; [esp+4] - Return address back into the JIT'ted code that made
+; the DLL call.
+; [esp] - Contains the "return address." Because we got here
+; thru a call embedded inside a MD, this "return address"
+; gives us an easy to way to find the MD (which was the
+; whole purpose of the embedded call manuever.)
+;
+;
+;
+;==========================================================================
+_NDirectImportThunk@0 proc public
+
+ ; Preserve argument registers
+ push ecx
+ push edx
+
+ ; Invoke the function that does the real work.
+ push eax
+ call _NDirectImportWorker@4
+
+ ; Restore argument registers
+ pop edx
+ pop ecx
+
+ ; If we got back from NDirectImportWorker, the MD has been successfully
+ ; linked and "eax" contains the DLL target. Proceed to execute the
+ ; original DLL call.
+ jmp eax ; Jump to DLL target
+_NDirectImportThunk@0 endp
+
+;==========================================================================
+; The call in fixup precode initally points to this function.
+; The pupose of this function is to load the MethodDesc and forward the call the prestub.
+_PrecodeFixupThunk@0 proc public
+
+ pop eax ; Pop the return address. It points right after the call instruction in the precode.
+ push esi
+ push edi
+
+ ; Inline computation done by FixupPrecode::GetMethodDesc()
+ movzx esi,byte ptr [eax+2] ; m_PrecodeChunkIndex
+ movzx edi,byte ptr [eax+1] ; m_MethodDescChunkIndex
+ mov eax,dword ptr [eax+esi*8+3]
+ lea eax,[eax+edi*4]
+
+ pop edi
+ pop esi
+ jmp _ThePreStub@0
+
+_PrecodeFixupThunk@0 endp
+
+; LPVOID __stdcall CTPMethodTable__CallTargetHelper2(
+; const void *pTarget,
+; LPVOID pvFirst,
+; LPVOID pvSecond)
+CTPMethodTable__CallTargetHelper2 proc stdcall public,
+ pTarget : DWORD,
+ pvFirst : DWORD,
+ pvSecond : DWORD
+ mov ecx, pvFirst
+ mov edx, pvSecond
+
+ call pTarget
+ifdef _DEBUG
+ nop ; Mark this as a special call site that can
+ ; directly call unmanaged code
+endif
+ ret
+CTPMethodTable__CallTargetHelper2 endp
+
+; LPVOID __stdcall CTPMethodTable__CallTargetHelper3(
+; const void *pTarget,
+; LPVOID pvFirst,
+; LPVOID pvSecond,
+; LPVOID pvThird)
+CTPMethodTable__CallTargetHelper3 proc stdcall public,
+ pTarget : DWORD,
+ pvFirst : DWORD,
+ pvSecond : DWORD,
+ pvThird : DWORD
+ push pvThird
+
+ mov ecx, pvFirst
+ mov edx, pvSecond
+
+ call pTarget
+ifdef _DEBUG
+ nop ; Mark this as a special call site that can
+ ; directly call unmanaged code
+endif
+ ret
+CTPMethodTable__CallTargetHelper3 endp
+
+
+; void __stdcall setFPReturn(int fpSize, INT64 retVal)
+_setFPReturn@12 proc public
+ mov ecx, [esp+4]
+
+ ; leave the return value in eax:edx if it is not the floating point case
+ mov eax, [esp+8]
+ mov edx, [esp+12]
+
+ cmp ecx, 4
+ jz setFPReturn4
+
+ cmp ecx, 8
+ jnz setFPReturnNot8
+ fld qword ptr [esp+8]
+setFPReturnNot8:
+ retn 12
+
+setFPReturn4:
+ fld dword ptr [esp+8]
+ retn 12
+_setFPReturn@12 endp
+
+; void __stdcall getFPReturn(int fpSize, INT64 *pretVal)
+_getFPReturn@8 proc public
+ mov ecx, [esp+4]
+ mov eax, [esp+8]
+ cmp ecx, 4
+ jz getFPReturn4
+
+ cmp ecx, 8
+ jnz getFPReturnNot8
+ fstp qword ptr [eax]
+getFPReturnNot8:
+ retn 8
+
+getFPReturn4:
+ fstp dword ptr [eax]
+ retn 8
+_getFPReturn@8 endp
+
+; void __stdcall UM2MThunk_WrapperHelper(void *pThunkArgs,
+; int argLen,
+; void *pAddr,
+; UMEntryThunk *pEntryThunk,
+; Thread *pThread)
+UM2MThunk_WrapperHelper proc stdcall public,
+ pThunkArgs : DWORD,
+ argLen : DWORD,
+ pAddr : DWORD,
+ pEntryThunk : DWORD,
+ pThread : DWORD
+
+ push ebx
+
+ mov eax, pEntryThunk
+ mov ecx, pThread
+ mov ebx, pThunkArgs
+ call pAddr
+
+ pop ebx
+
+ ret
+UM2MThunk_WrapperHelper endp
+
+; VOID __cdecl UMThunkStubRareDisable()
+;<TODO>
+; @todo: this is very similar to StubRareDisable
+;</TODO>
+_UMThunkStubRareDisable proc public
+ push eax
+ push ecx
+
+ push eax ; Push the UMEntryThunk
+ push ecx ; Push thread
+ call _UMThunkStubRareDisableWorker@8
+
+ pop ecx
+ pop eax
+ retn
+_UMThunkStubRareDisable endp
+
+
+;+----------------------------------------------------------------------------
+;
+; Method: CRemotingServices::CheckForContextMatch public
+;
+; Synopsis: This code generates a check to see if the current context and
+; the context of the proxy match.
+;
+;+----------------------------------------------------------------------------
+;
+; returns zero if contexts match
+; returns non-zero if contexts do not match
+;
+; UINT_PTR __stdcall CRemotingServices__CheckForContextMatch(Object* pStubData)
+ifdef FEATURE_REMOTING
+_CRemotingServices__CheckForContextMatch@4 proc public
+ push ebx ; spill ebx
+ mov ebx, [eax+4] ; Get the internal context id by unboxing
+ ; the stub data
+ call _GetThread@0 ; Get the current thread, assumes that the
+ ; registers are preserved
+ mov eax, [eax+Thread_m_Context] ; Get the current context from the
+ ; thread
+ sub eax, ebx ; Get the pointer to the context from the
+ ; proxy and compare with the current context
+ pop ebx ; restore the value of ebx
+ retn
+_CRemotingServices__CheckForContextMatch@4 endp
+endif ; FEATURE_REMOTING
+
+;+----------------------------------------------------------------------------
+;
+; Method: CRemotingServices::DispatchInterfaceCall public
+;
+; Synopsis:
+; Push that method desc on the stack and jump to the
+; transparent proxy stub to execute the call.
+; WARNING!! This MethodDesc is not the methoddesc in the vtable
+; of the object instead it is the methoddesc in the vtable of
+; the interface class. Since we use the MethodDesc only to probe
+; the stack via the signature of the method call we are safe.
+; If we want to get any object vtable/class specific
+; information this is not safe.
+;
+;
+;+----------------------------------------------------------------------------
+; void __stdcall CRemotingServices__DispatchInterfaceCall()
+ifdef FEATURE_REMOTING
+_CRemotingServices__DispatchInterfaceCall@0 proc public
+ ; push MethodDesc* passed in eax by precode and forward to the worker
+ push eax
+
+ ; NOTE: At this point the stack looks like
+ ;
+ ; esp---> saved MethodDesc of Interface method
+ ; return addr of calling function
+ ;
+ mov eax, [ecx + TransparentProxyObject___stubData]
+ call [ecx + TransparentProxyObject___stub]
+ifdef _DEBUG
+ nop ; Mark this as a special call site that can directly
+ ; call managed code
+endif
+ test eax, eax
+ jnz CtxMismatch
+ jmp _InContextTPQuickDispatchAsmStub@0
+
+CtxMismatch:
+ pop eax ; restore MethodDesc *
+ jmp _TransparentProxyStub_CrossContext@0 ; jump to slow TP stub
+_CRemotingServices__DispatchInterfaceCall@0 endp
+endif ; FEATURE_REMOTING
+
+
+;+----------------------------------------------------------------------------
+;
+; Method: CRemotingServices::CallFieldGetter private
+;
+; Synopsis: Calls the field getter function (Object::__FieldGetter) in
+; managed code by setting up the stack and calling the target
+;
+;
+;+----------------------------------------------------------------------------
+; void __stdcall CRemotingServices__CallFieldGetter(
+; MethodDesc *pMD,
+; LPVOID pThis,
+; LPVOID pFirst,
+; LPVOID pSecond,
+; LPVOID pThird)
+ifdef FEATURE_REMOTING
+CRemotingServices__CallFieldGetter proc stdcall public,
+ pMD : DWORD,
+ pThis : DWORD,
+ pFirst : DWORD,
+ pSecond : DWORD,
+ pThird : DWORD
+
+ push [pSecond] ; push the second argument on the stack
+ push [pThird] ; push the third argument on the stack
+
+ mov ecx, [pThis] ; enregister pThis, the 'this' pointer
+ mov edx, [pFirst] ; enregister pFirst, the first argument
+
+ mov eax, [pMD] ; load MethodDesc of object::__FieldGetter
+ call _TransparentProxyStub_CrossContext@0 ; call the TP stub
+
+ ret
+CRemotingServices__CallFieldGetter endp
+endif ; FEATURE_REMOTING
+
+;+----------------------------------------------------------------------------
+;
+; Method: CRemotingServices::CallFieldSetter private
+;
+; Synopsis: Calls the field setter function (Object::__FieldSetter) in
+; managed code by setting up the stack and calling the target
+;
+;
+;+----------------------------------------------------------------------------
+; void __stdcall CRemotingServices__CallFieldSetter(
+; MethodDesc *pMD,
+; LPVOID pThis,
+; LPVOID pFirst,
+; LPVOID pSecond,
+; LPVOID pThird)
+ifdef FEATURE_REMOTING
+CRemotingServices__CallFieldSetter proc stdcall public,
+ pMD : DWORD,
+ pThis : DWORD,
+ pFirst : DWORD,
+ pSecond : DWORD,
+ pThird : DWORD
+
+ push [pSecond] ; push the field name (second arg)
+ push [pThird] ; push the object (third arg) on the stack
+
+ mov ecx, [pThis] ; enregister pThis, the 'this' pointer
+ mov edx, [pFirst] ; enregister the first argument
+
+ mov eax, [pMD] ; load MethodDesc of object::__FieldGetter
+ call _TransparentProxyStub_CrossContext@0 ; call the TP stub
+
+ ret
+CRemotingServices__CallFieldSetter endp
+endif ; FEATURE_REMOTING
+
+;+----------------------------------------------------------------------------
+;
+; Method: CTPMethodTable::GenericCheckForContextMatch private
+;
+; Synopsis: Calls the stub in the TP & returns TRUE if the contexts
+; match, FALSE otherwise.
+;
+; Note: 1. Called during FieldSet/Get, used for proxy extensibility
+;
+;+----------------------------------------------------------------------------
+; BOOL __stdcall CTPMethodTable__GenericCheckForContextMatch(Object* orTP)
+ifdef FEATURE_REMOTING
+CTPMethodTable__GenericCheckForContextMatch proc stdcall public uses ecx, tp : DWORD
+
+ mov ecx, [tp]
+ mov eax, [ecx + TransparentProxyObject___stubData]
+ call [ecx + TransparentProxyObject___stub]
+ifdef _DEBUG
+ nop ; Mark this as a special call site that can directly
+ ; call managed code
+endif
+ test eax, eax
+ mov eax, 0
+ setz al
+ ; NOTE: In the CheckForXXXMatch stubs (for URT ctx/ Ole32 ctx) eax is
+ ; non-zero if contexts *do not* match & zero if they do.
+ ret
+CTPMethodTable__GenericCheckForContextMatch endp
+endif ; FEATURE_REMOTING
+
+
+; void __stdcall JIT_ProfilerEnterLeaveTailcallStub(UINT_PTR ProfilerHandle)
+_JIT_ProfilerEnterLeaveTailcallStub@4 proc public
+ ; this function must preserve all registers, including scratch
+ retn 4
+_JIT_ProfilerEnterLeaveTailcallStub@4 endp
+
+;
+; Used to get the current instruction pointer value
+;
+; UINT_PTR __stdcall GetCurrentIP(void);
+_GetCurrentIP@0 proc public
+ mov eax, [esp]
+ retn
+_GetCurrentIP@0 endp
+
+; LPVOID __stdcall GetCurrentSP(void);
+_GetCurrentSP@0 proc public
+ mov eax, esp
+ retn
+_GetCurrentSP@0 endp
+
+
+; void __stdcall ProfileEnterNaked(FunctionIDOrClientID functionIDOrClientID);
+_ProfileEnterNaked@4 proc public
+ push esi
+ push edi
+
+ ;
+ ; Push in reverse order the fields of ProfilePlatformSpecificData
+ ;
+ push dword ptr [esp+8] ; EIP of the managed code that we return to. -- struct ip field
+ push ebp ; Methods are always EBP framed
+ add [esp], 8 ; Skip past the return IP, straight to the stack args that were passed to our caller
+ ; Skip past saved EBP value: 4 bytes
+ ; - plus return address from caller's caller: 4 bytes
+ ;
+ ; Assuming Foo() calls Bar(), and Bar() calls ProfileEnterNake() as illustrated (stack
+ ; grows up). We want to get what Foo() passed on the stack to Bar(), so we need to pass
+ ; the return address from caller's caller which is Foo() in this example.
+ ;
+ ; ProfileEnterNaked()
+ ; Bar()
+ ; Foo()
+ ;
+ ; [ESP] is now the ESP of caller's caller pointing to the arguments to the caller.
+
+ push ecx ; -- struct ecx field
+ push edx ; -- struct edx field
+ push eax ; -- struct eax field
+ push 0 ; Create buffer space in the structure -- struct floatingPointValuePresent field
+ push 0 ; Create buffer space in the structure -- struct floatBuffer field
+ push 0 ; Create buffer space in the structure -- struct doubleBuffer2 field
+ push 0 ; Create buffer space in the structure -- struct doubleBuffer1 field
+ push 0 ; Create buffer space in the structure -- struct functionId field
+
+ mov edx, esp ; the address of the Platform structure
+ mov ecx, [esp+52]; The functionIDOrClientID parameter that was pushed to FunctionEnter
+ ; Skip past ProfilePlatformSpecificData we pushed: 40 bytes
+ ; - plus saved edi, esi : 8 bytes
+ ; - plus return address from caller: 4 bytes
+
+ call @ProfileEnter@8
+
+ add esp, 20 ; Remove buffer space
+ pop eax
+ pop edx
+ pop ecx
+ add esp, 8 ; Remove buffer space
+ pop edi
+ pop esi
+
+ retn 4
+_ProfileEnterNaked@4 endp
+
+; void __stdcall ProfileLeaveNaked(FunctionIDOrClientID functionIDOrClientID);
+_ProfileLeaveNaked@4 proc public
+ push ecx ; We do not strictly need to save ECX, however
+ ; emitNoGChelper(CORINFO_HELP_PROF_FCN_LEAVE) returns true in the JITcompiler
+ push edx ; Return value may be in EAX:EDX
+
+ ;
+ ; Push in reverse order the fields of ProfilePlatformSpecificData
+ ;
+ push dword ptr [esp+8] ; EIP of the managed code that we return to. -- struct ip field
+ push ebp ; Methods are always EBP framed
+ add [esp], 8 ; Skip past the return IP, straight to the stack args that were passed to our caller
+ ; Skip past saved EBP value: 4 bytes
+ ; - plus return address from caller's caller: 4 bytes
+ ;
+ ; Assuming Foo() calls Bar(), and Bar() calls ProfileEnterNake() as illustrated (stack
+ ; grows up). We want to get what Foo() passed on the stack to Bar(), so we need to pass
+ ; the return address from caller's caller which is Foo() in this example.
+ ;
+ ; ProfileEnterNaked()
+ ; Bar()
+ ; Foo()
+ ;
+ ; [ESP] is now the ESP of caller's caller pointing to the arguments to the caller.
+
+ push ecx ; -- struct ecx field
+ push edx ; -- struct edx field
+ push eax ; -- struct eax field
+
+ ; Check if we need to save off any floating point registers
+ fstsw ax
+ and ax, 3800h ; Check the top-of-fp-stack bits
+ cmp ax, 0 ; If non-zero, we have something to save
+ jnz SaveFPReg
+
+ push 0 ; Create buffer space in the structure -- struct floatingPointValuePresent field
+ push 0 ; Create buffer space in the structure -- struct floatBuffer field
+ push 0 ; Create buffer space in the structure -- struct doubleBuffer2 field
+ push 0 ; Create buffer space in the structure -- struct doubleBuffer1 field
+ jmp Continue
+
+SaveFPReg:
+ push 1 ; mark that a float value is present -- struct floatingPointValuePresent field
+ sub esp, 4 ; Make room for the FP value
+ fst dword ptr [esp] ; Copy the FP value to the buffer as a float -- struct floatBuffer field
+ sub esp, 8 ; Make room for the FP value
+ fstp qword ptr [esp] ; Copy FP values to the buffer as a double -- struct doubleBuffer1 and doubleBuffer2 fields
+
+Continue:
+ push 0 ; Create buffer space in the structure -- struct functionId field
+
+ mov edx, esp ; the address of the Platform structure
+ mov ecx, [esp+52]; The clientData that was pushed to FunctionEnter
+ ; Skip past ProfilePlatformSpecificData we pushed: 40 bytes
+ ; - plus saved edx, ecx : 8 bytes
+ ; - plus return address from caller: 4 bytes
+
+ call @ProfileLeave@8
+
+ ;
+ ; Now see if we have to restore and floating point registers
+ ;
+
+ cmp [esp + 16], 0
+ jz NoRestore
+
+ fld qword ptr [esp + 4]
+
+NoRestore:
+
+ add esp, 20 ; Remove buffer space
+ pop eax
+ add esp, 16 ; Remove buffer space
+ pop edx
+ pop ecx
+ retn 4
+_ProfileLeaveNaked@4 endp
+
+
+; void __stdcall ProfileTailcallNaked(FunctionIDOrClientID functionIDOrClientID);
+_ProfileTailcallNaked@4 proc public
+ push ecx
+ push edx
+
+ ;
+ ; Push in reverse order the fields of ProfilePlatformSpecificData
+ ;
+ push dword ptr [esp+8] ; EIP of the managed code that we return to. -- struct ip field
+ push ebp ; Methods are always EBP framed
+ add [esp], 8 ; Skip past the return IP, straight to the stack args that were passed to our caller
+ ; Skip past saved EBP value: 4 bytes
+ ; - plus return address from caller's caller: 4 bytes
+ ;
+ ; Assuming Foo() calls Bar(), and Bar() calls ProfileEnterNake() as illustrated (stack
+ ; grows up). We want to get what Foo() passed on the stack to Bar(), so we need to pass
+ ; the return address from caller's caller which is Foo() in this example.
+ ;
+ ; ProfileEnterNaked()
+ ; Bar()
+ ; Foo()
+ ;
+ ; [ESP] is now the ESP of caller's caller pointing to the arguments to the caller.
+
+ push ecx ; -- struct ecx field
+ push edx ; -- struct edx field
+ push eax ; -- struct eax field
+ push 0 ; Create buffer space in the structure -- struct floatingPointValuePresent field
+ push 0 ; Create buffer space in the structure -- struct floatBuffer field
+ push 0 ; Create buffer space in the structure -- struct doubleBuffer2 field
+ push 0 ; Create buffer space in the structure -- struct doubleBuffer1 field
+ push 0 ; Create buffer space in the structure -- struct functionId field
+
+ mov edx, esp ; the address of the Platform structure
+ mov ecx, [esp+52]; The clientData that was pushed to FunctionEnter
+ ; Skip past ProfilePlatformSpecificData we pushed: 40 bytes
+ ; - plus saved edx, ecx : 8 bytes
+ ; - plus return address from caller: 4 bytes
+
+ call @ProfileTailcall@8
+
+ add esp, 40 ; Remove buffer space
+ pop edx
+ pop ecx
+ retn 4
+_ProfileTailcallNaked@4 endp
+
+;==========================================================================
+; Invoked for vararg forward P/Invoke calls as a stub.
+; Except for secret return buffer, arguments come on the stack so EDX is available as scratch.
+; EAX - the NDirectMethodDesc
+; ECX - may be return buffer address
+; [ESP + 4] - the VASigCookie
+;
+_VarargPInvokeStub@0 proc public
+ ; EDX <- VASigCookie
+ mov edx, [esp + 4] ; skip retaddr
+
+ mov edx, [edx + VASigCookie__StubOffset]
+ test edx, edx
+
+ jz GoCallVarargWorker
+ ; ---------------------------------------
+
+ ; EAX contains MD ptr for the IL stub
+ jmp edx
+
+GoCallVarargWorker:
+ ;
+ ; MD ptr in EAX, VASigCookie ptr at [esp+4]
+ ;
+
+ STUB_PROLOG
+
+ mov esi, esp
+
+ ; save pMD
+ push eax
+
+ push eax ; pMD
+ push dword ptr [esi + 4*7] ; pVaSigCookie
+ push esi ; pTransitionBlock
+
+ call _VarargPInvokeStubWorker@12
+
+ ; restore pMD
+ pop eax
+
+ STUB_EPILOG
+
+ ; jump back to the helper - this time it won't come back here as the stub already exists
+ jmp _VarargPInvokeStub@0
+
+_VarargPInvokeStub@0 endp
+
+;==========================================================================
+; Invoked for marshaling-required unmanaged CALLI calls as a stub.
+; EAX - the unmanaged target
+; ECX, EDX - arguments
+; [ESP + 4] - the VASigCookie
+;
+_GenericPInvokeCalliHelper@0 proc public
+ ; save the target
+ push eax
+
+ ; EAX <- VASigCookie
+ mov eax, [esp + 8] ; skip target and retaddr
+
+ mov eax, [eax + VASigCookie__StubOffset]
+ test eax, eax
+
+ jz GoCallCalliWorker
+ ; ---------------------------------------
+
+ push eax
+
+ ; stack layout at this point:
+ ;
+ ; | ... |
+ ; | stack arguments | ESP + 16
+ ; +----------------------+
+ ; | VASigCookie* | ESP + 12
+ ; +----------------------+
+ ; | return address | ESP + 8
+ ; +----------------------+
+ ; | CALLI target address | ESP + 4
+ ; +----------------------+
+ ; | stub entry point | ESP + 0
+ ; ------------------------
+
+ ; remove VASigCookie from the stack
+ mov eax, [esp + 8]
+ mov [esp + 12], eax
+
+ ; move stub entry point below the RA
+ mov eax, [esp]
+ mov [esp + 8], eax
+
+ ; load EAX with the target address
+ pop eax
+ pop eax
+
+ ; stack layout at this point:
+ ;
+ ; | ... |
+ ; | stack arguments | ESP + 8
+ ; +----------------------+
+ ; | return address | ESP + 4
+ ; +----------------------+
+ ; | stub entry point | ESP + 0
+ ; ------------------------
+
+ ; CALLI target address is in EAX
+ ret
+
+GoCallCalliWorker:
+ ; the target is on the stack and will become m_Datum of PInvokeCalliFrame
+ ; call the stub generating worker
+ pop eax
+
+ ;
+ ; target ptr in EAX, VASigCookie ptr in EDX
+ ;
+
+ STUB_PROLOG
+
+ mov esi, esp
+
+ ; save target
+ push eax
+
+ push eax ; unmanaged target
+ push dword ptr [esi + 4*7] ; pVaSigCookie (first stack argument)
+ push esi ; pTransitionBlock
+
+ call _GenericPInvokeCalliStubWorker@12
+
+ ; restore target
+ pop eax
+
+ STUB_EPILOG
+
+ ; jump back to the helper - this time it won't come back here as the stub already exists
+ jmp _GenericPInvokeCalliHelper@0
+
+_GenericPInvokeCalliHelper@0 endp
+
+ifdef MDA_SUPPORTED
+
+;==========================================================================
+; Invoked from on-the-fly generated stubs when the stack imbalance MDA is
+; enabled. The common low-level work for both direct P/Invoke and unmanaged
+; delegate P/Invoke happens here. PInvokeStackImbalanceWorker is where the
+; actual imbalance check is implemented.
+; [ESP + 4] - the StackImbalanceCookie
+; [EBP + 8] - stack arguments (EBP frame pushed by the calling stub)
+;
+_PInvokeStackImbalanceHelper@0 proc public
+ ; StackImbalanceCookie to EBX
+ push ebx
+ lea ebx, [esp + 8]
+
+ push esi
+ push edi
+
+ ; copy stack args
+ mov edx, ecx
+ mov ecx, [ebx + StackImbalanceCookie__m_dwStackArgSize]
+ sub esp, ecx
+
+ shr ecx, 2
+ lea edi, [esp]
+ lea esi, [ebp + 8]
+
+ cld
+ rep movsd
+
+ ; record pre-call ESP
+ mov [ebx + StackImbalanceCookie__m_dwSavedEsp], esp
+
+ ; call the target (restore ECX in case it's a thiscall)
+ mov ecx, edx
+ call [ebx + StackImbalanceCookie__m_pTarget]
+
+ ; record post-call ESP and restore ESP to pre-pushed state
+ mov ecx, esp
+ lea esp, [ebp - SIZEOF_StackImbalanceCookie - 16] ; 4 DWORDs and the cookie have been pushed
+
+ ; save return value
+ push eax
+ push edx
+ sub esp, 12
+
+.errnz (StackImbalanceCookie__HAS_FP_RETURN_VALUE AND 00ffffffh), HAS_FP_RETURN_VALUE has changed - update asm code
+
+ ; save top of the floating point stack if the target has FP retval
+ test byte ptr [ebx + StackImbalanceCookie__m_callConv + 3], (StackImbalanceCookie__HAS_FP_RETURN_VALUE SHR 24)
+ jz noFPURetVal
+ fstp tbyte ptr [esp] ; save full 10 bytes to avoid precision loss
+noFPURetVal:
+
+ ; call PInvokeStackImbalanceWorker(StackImbalanceCookie *pSICookie, DWORD dwPostESP)
+ push ecx
+ push ebx
+ call _PInvokeStackImbalanceWorker@8
+
+ ; restore return value
+ test byte ptr [ebx + StackImbalanceCookie__m_callConv + 3], (StackImbalanceCookie__HAS_FP_RETURN_VALUE SHR 24)
+ jz noFPURetValToRestore
+ fld tbyte ptr [esp]
+noFPURetValToRestore:
+
+ add esp, 12
+ pop edx
+ pop eax
+
+ ; restore registers
+ pop edi
+ pop esi
+
+ pop ebx
+
+ ; EBP frame and original stack arguments will be removed by the caller
+ ret
+_PInvokeStackImbalanceHelper@0 endp
+
+endif ; MDA_SUPPORTED
+
+ifdef FEATURE_COMINTEROP
+
+;==========================================================================
+; This is a fast alternative to CallDescr* tailored specifically for
+; COM to CLR calls. Stack arguments don't come in a continuous buffer
+; and secret argument can be passed in EAX.
+;
+
+; extern "C" ARG_SLOT __fastcall COMToCLRDispatchHelper(
+; INT_PTR dwArgECX, ; ecx
+; INT_PTR dwArgEDX, ; edx
+; PCODE pTarget, ; [esp + 4]
+; PCODE pSecretArg, ; [esp + 8]
+; INT_PTR *pInputStack, ; [esp + c]
+; WORD wOutputStackSlots, ; [esp +10]
+; UINT16 *pOutputStackOffsets, ; [esp +14]
+; Frame *pCurFrame); ; [esp +18]
+
+FASTCALL_FUNC COMToCLRDispatchHelper, 32
+
+ ; ecx: dwArgECX
+ ; edx: dwArgEDX
+
+ offset_pTarget equ 4
+ offset_pSecretArg equ 8
+ offset_pInputStack equ 0Ch
+ offset_wOutputStackSlots equ 10h
+ offset_pOutputStackOffsets equ 14h
+ offset_pCurFrame equ 18h
+
+ movzx eax, word ptr [esp + offset_wOutputStackSlots]
+ test eax, eax
+ jnz CopyStackArgs
+
+ ; There are no stack args to copy and ECX and EDX are already setup
+ ; with the correct arguments for the callee, so we just have to
+ ; push the CPFH and make the call.
+
+ PUSH_CPFH_FOR_COM eax, esp, offset_pCurFrame ; trashes eax
+
+ mov eax, [esp + offset_pSecretArg + CPFH_STACK_SIZE]
+ call [esp + offset_pTarget + CPFH_STACK_SIZE]
+ifdef _DEBUG
+ nop ; This is a tag that we use in an assert.
+endif
+
+ POP_CPFH_FOR_COM ecx ; trashes ecx
+
+ ret 18h
+
+
+CopyStackArgs:
+ ; eax: num stack slots
+ ; ecx: dwArgECX
+ ; edx: dwArgEDX
+
+ push ebp
+ mov ebp, esp
+ push ebx
+ push esi
+ push edi
+
+ ebpFrame_adjust equ 4h
+ ebp_offset_pCurFrame equ ebpFrame_adjust + offset_pCurFrame
+
+ PUSH_CPFH_FOR_COM ebx, ebp, ebp_offset_pCurFrame ; trashes ebx
+
+ mov edi, [ebp + ebpFrame_adjust + offset_pOutputStackOffsets]
+ mov esi, [ebp + ebpFrame_adjust + offset_pInputStack]
+
+ ; eax: num stack slots
+ ; ecx: dwArgECX
+ ; edx: dwArgEDX
+ ; edi: pOutputStackOffsets
+ ; esi: pInputStack
+
+CopyStackLoop:
+ dec eax
+ movzx ebx, word ptr [edi + 2 * eax] ; ebx <- input stack offset
+ push [esi + ebx] ; stack <- value on the input stack
+ jnz CopyStackLoop
+
+ ; ECX and EDX are setup with the correct arguments for the callee,
+ ; and we've copied the stack arguments over as well, so now it's
+ ; time to make the call.
+
+ mov eax, [ebp + ebpFrame_adjust + offset_pSecretArg]
+ call [ebp + ebpFrame_adjust + offset_pTarget]
+ifdef _DEBUG
+ nop ; This is a tag that we use in an assert.
+endif
+
+ POP_CPFH_FOR_COM ecx ; trashes ecx
+
+ pop edi
+ pop esi
+ pop ebx
+ pop ebp
+
+ ret 18h
+
+FASTCALL_ENDFUNC
+
+endif ; FEATURE_COMINTEROP
+
+ifndef FEATURE_CORECLR
+
+;==========================================================================
+; This is small stub whose purpose is to record current stack pointer and
+; call CopyCtorCallStubWorker to invoke copy constructors and destructors
+; as appropriate. This stub operates on arguments already pushed to the
+; stack by JITted IL stub and must not create a new frame, i.e. it must tail
+; call to the target for it to see the arguments that copy ctors have been
+; called on.
+;
+_CopyCtorCallStub@0 proc public
+ ; there may be an argument in ecx - save it
+ push ecx
+
+ ; push pointer to arguments
+ lea edx, [esp + 8]
+ push edx
+
+ call _CopyCtorCallStubWorker@4
+
+ ; restore ecx and tail call to the target
+ pop ecx
+ jmp eax
+_CopyCtorCallStub@0 endp
+
+endif ; !FEATURE_CORECLR
+
+ifdef FEATURE_PREJIT
+
+;==========================================================================
+_StubDispatchFixupStub@0 proc public
+
+ STUB_PROLOG
+
+ mov esi, esp
+
+ push 0
+ push 0
+
+ push eax ; siteAddrForRegisterIndirect (for tailcalls)
+ push esi ; pTransitionBlock
+
+ call _StubDispatchFixupWorker@16
+
+ STUB_EPILOG
+
+_StubDispatchFixupPatchLabel@0:
+public _StubDispatchFixupPatchLabel@0
+
+ ; Tailcall target
+ jmp eax
+
+ ; This will never be executed. It is just to help out stack-walking logic
+ ; which disassembles the epilog to unwind the stack.
+ ret
+
+_StubDispatchFixupStub@0 endp
+
+;==========================================================================
+_ExternalMethodFixupStub@0 proc public
+
+ pop eax ; pop off the return address to the stub
+ ; leaving the actual caller's return address on top of the stack
+
+ STUB_PROLOG
+
+ mov esi, esp
+
+ ; EAX is return address into CORCOMPILE_EXTERNAL_METHOD_THUNK. Subtract 5 to get start address.
+ sub eax, 5
+
+ push 0
+ push 0
+
+ push eax
+
+ ; pTransitionBlock
+ push esi
+
+ call _ExternalMethodFixupWorker@16
+
+ ; eax now contains replacement stub. PreStubWorker will never return
+ ; NULL (it throws an exception if stub creation fails.)
+
+ ; From here on, mustn't trash eax
+
+ STUB_EPILOG
+
+_ExternalMethodFixupPatchLabel@0:
+public _ExternalMethodFixupPatchLabel@0
+
+ ; Tailcall target
+ jmp eax
+
+ ; This will never be executed. It is just to help out stack-walking logic
+ ; which disassembles the epilog to unwind the stack.
+ ret
+
+_ExternalMethodFixupStub@0 endp
+
+ifdef FEATURE_READYTORUN
+;==========================================================================
+_DelayLoad_MethodCall@0 proc public
+
+ STUB_PROLOG_2_HIDDEN_ARGS
+
+ mov esi, esp
+
+ push ecx
+ push edx
+
+ push eax
+
+ ; pTransitionBlock
+ push esi
+
+ call _ExternalMethodFixupWorker@16
+
+ ; eax now contains replacement stub. PreStubWorker will never return
+ ; NULL (it throws an exception if stub creation fails.)
+
+ ; From here on, mustn't trash eax
+
+ STUB_EPILOG
+
+ ; Share the patch label
+ jmp _ExternalMethodFixupPatchLabel@0
+
+ ; This will never be executed. It is just to help out stack-walking logic
+ ; which disassembles the epilog to unwind the stack.
+ ret
+
+_DelayLoad_MethodCall@0 endp
+endif
+
+;=======================================================================================
+; The call in softbound vtable slots initially points to this function.
+; The pupose of this function is to transfer the control to right target and
+; to optionally patch the target of the jump so that we do not take this slow path again.
+;
+_VirtualMethodFixupStub@0 proc public
+
+ pop eax ; Pop the return address. It points right after the call instruction in the thunk.
+ sub eax,5 ; Calculate the address of the thunk
+
+ ; Push ebp frame to get good callstack under debugger
+ push ebp
+ mov ebp, esp
+
+ ; Preserve argument registers
+ push ecx
+ push edx
+
+ push eax ; address of the thunk
+ push ecx ; this ptr
+ call _VirtualMethodFixupWorker@8
+
+ ; Restore argument registers
+ pop edx
+ pop ecx
+
+ ; Pop ebp frame
+ pop ebp
+
+_VirtualMethodFixupPatchLabel@0:
+public _VirtualMethodFixupPatchLabel@0
+
+ ; Proceed to execute the actual method.
+ jmp eax
+
+ ; This will never be executed. It is just to help out stack-walking logic
+ ; which disassembles the epilog to unwind the stack.
+ ret
+
+_VirtualMethodFixupStub@0 endp
+
+endif ; FEATURE_PREJIT
+
+;==========================================================================
+; The prestub
+_ThePreStub@0 proc public
+
+ STUB_PROLOG
+
+ mov esi, esp
+
+ ; EAX contains MethodDesc* from the precode. Push it here as argument
+ ; for PreStubWorker
+ push eax
+
+ push esi
+
+ call _PreStubWorker@8
+
+ ; eax now contains replacement stub. PreStubWorker will never return
+ ; NULL (it throws an exception if stub creation fails.)
+
+ ; From here on, mustn't trash eax
+
+ STUB_EPILOG
+
+ ; Tailcall target
+ jmp eax
+
+ ; This will never be executed. It is just to help out stack-walking logic
+ ; which disassembles the epilog to unwind the stack.
+ ret
+
+_ThePreStub@0 endp
+
+; This method does nothing. It's just a fixed function for the debugger to put a breakpoint
+; on so that it can trace a call target.
+_ThePreStubPatch@0 proc public
+ ; make sure that the basic block is unique
+ test eax,34
+_ThePreStubPatchLabel@0:
+public _ThePreStubPatchLabel@0
+ ret
+_ThePreStubPatch@0 endp
+
+ifdef FEATURE_COMINTEROP
+;==========================================================================
+; CLR -> COM generic or late-bound call
+_GenericComPlusCallStub@0 proc public
+
+ STUB_PROLOG
+
+ ; pTransitionBlock
+ mov esi, esp
+
+ ; return value
+ sub esp, 8
+
+ ; save pMD
+ mov ebx, eax
+
+ push eax ; pMD
+ push esi ; pTransitionBlock
+ call _CLRToCOMWorker@8
+
+ push eax
+ call _setFPReturn@12 ; pop & set the return value
+
+ ; From here on, mustn't trash eax:edx
+
+ ; Get pComPlusCallInfo for return thunk
+ mov ecx, [ebx + ComPlusCallMethodDesc__m_pComPlusCallInfo]
+
+ STUB_EPILOG_RETURN
+
+ ; Tailcall return thunk
+ jmp [ecx + ComPlusCallInfo__m_pRetThunk]
+
+ ; This will never be executed. It is just to help out stack-walking logic
+ ; which disassembles the epilog to unwind the stack.
+ ret
+
+_GenericComPlusCallStub@0 endp
+endif ; FEATURE_COMINTEROP
+
+ifdef FEATURE_REMOTING
+_TransparentProxyStub@0 proc public
+ ; push slot passed in eax
+ push eax
+
+ ; Move into eax the stub data and call the stub
+ mov eax, [ecx + TransparentProxyObject___stubData]
+ call [ecx + TransparentProxyObject___stub]
+ifdef _DEBUG
+ nop ; Mark this as a special call site that can directly
+ ; call managed code
+endif
+ test eax, eax
+ jnz CtxMismatch2
+
+ mov eax, [ecx + TransparentProxyObject___pMT]
+
+ push ebx ; spill EBX
+
+ ; Convert the slot number into the code address
+ ; See MethodTable.h for details on vtable layout
+
+ mov ebx, [esp + 4] ; Reload the slot
+ shr ebx, ASM__VTABLE_SLOTS_PER_CHUNK_LOG2 ; indirectionSlotNumber
+
+ mov eax,[eax + ebx*4 + SIZEOF_MethodTable]
+
+ mov ebx, [esp + 4] ; use unchanged slot from above
+ and ebx, ASM__VTABLE_SLOTS_PER_CHUNK-1 ; offsetInChunk
+ mov eax, [eax + ebx*4]
+
+ ; At this point, eax contains the code address
+
+ ; Restore EBX
+ pop ebx
+
+ ; Remove the slot number from the stack
+ lea esp, [esp+4]
+
+ jmp eax
+
+ ; CONTEXT MISMATCH CASE, call out to the real proxy to dispatch
+
+CtxMismatch2:
+ pop eax ; restore MethodDesc *
+ jmp _TransparentProxyStub_CrossContext@0 ; jump to slow TP stub
+
+_TransparentProxyStub@0 endp
+
+_TransparentProxyStub_CrossContext@0 proc public
+
+ STUB_PROLOG
+
+ ; pTransitionBlock
+ mov esi, esp
+
+ ; return value
+ sub esp, 3*4 ; 64-bit return value + cb stack pop
+
+ push eax ; pMD
+ push esi ; pTransitionBlock
+ call _TransparentProxyStubWorker@8
+
+ pop ebx ; cbStackPop
+
+ push eax
+ call _setFPReturn@12 ; pop & set the return value
+
+ ; From here on, mustn't trash eax:edx
+ mov ecx, ebx ; cbStackPop
+
+ mov ebx, [esp+6*4] ; get retaddr
+ mov [esp+6*4+ecx], ebx ; put it where it belongs
+
+ STUB_EPILOG_RETURN
+
+ add esp, ecx ; pop all the args
+ ret
+
+_TransparentProxyStub_CrossContext@0 endp
+
+; This method does nothing. It's just a fixed function for the debugger to put a breakpoint
+; on so that it can trace a call target.
+_TransparentProxyStubPatch@0 proc public
+ ; make sure that the basic block is unique
+ test eax,12
+_TransparentProxyStubPatchLabel@0:
+public _TransparentProxyStubPatchLabel@0
+ ret
+_TransparentProxyStubPatch@0 endp
+
+endif ; FEATURE_REMOTING
+
+ifdef FEATURE_COMINTEROP
+;--------------------------------------------------------------------------
+; This is the code that all com call method stubs run initially.
+; Most of the real work occurs in ComStubWorker(), a C++ routine.
+; The template only does the part that absolutely has to be in assembly
+; language.
+;--------------------------------------------------------------------------
+_ComCallPreStub@0 proc public
+ pop eax ;ComCallMethodDesc*
+
+ ; push ebp-frame
+ push ebp
+ mov ebp,esp
+
+ ; save CalleeSavedRegisters
+ push ebx
+ push esi
+ push edi
+
+ push eax ; ComCallMethodDesc*
+ sub esp, 5*4 ; next, vtable, gscookie, 64-bit error return
+
+ lea edi, [esp]
+ lea esi, [esp+3*4]
+
+ push edi ; pErrorReturn
+ push esi ; pFrame
+ call _ComPreStubWorker@8
+
+ ; eax now contains replacement stub. ComStubWorker will return NULL if stub creation fails
+ cmp eax, 0
+ je nostub ; oops we could not create a stub
+
+ add esp, 6*4
+
+ ; pop CalleeSavedRegisters
+ pop edi
+ pop esi
+ pop ebx
+ pop ebp
+
+ jmp eax ; Reexecute with replacement stub.
+ ; We will never get here. This "ret" is just so that code-disassembling
+ ; profilers know to stop disassembling any further
+ ret
+
+nostub:
+
+ ; Even though the ComPreStubWorker sets a 64 bit value as the error return code.
+ ; Only the lower 32 bits contain usefula data. The reason for this is that the
+ ; possible error return types are: failure HRESULT, 0 and floating point 0.
+ ; In each case, the data fits in 32 bits. Instead, we use the upper half of
+ ; the return value to store number of bytes to pop
+ mov eax, [edi]
+ mov edx, [edi+4]
+
+ add esp, 6*4
+
+ ; pop CalleeSavedRegisters
+ pop edi
+ pop esi
+ pop ebx
+ pop ebp
+
+ pop ecx ; return address
+ add esp, edx ; pop bytes of the stack
+ push ecx ; return address
+
+ ; We need to deal with the case where the method is PreserveSig=true and has an 8
+ ; byte return type. There are 2 types of 8 byte return types: integer and floating point.
+ ; For integer 8 byte return types, we always return 0 in case of failure. For floating
+ ; point return types, we return the value in the floating point register. In both cases
+ ; edx should be 0.
+ xor edx, edx ; edx <-- 0
+
+ ret
+
+_ComCallPreStub@0 endp
+endif ; FEATURE_COMINTEROP
+
+ifdef FEATURE_READYTORUN
+;==========================================================================
+; Define helpers for delay loading of readytorun helpers
+
+DYNAMICHELPER macro frameFlags, suffix
+
+_DelayLoad_Helper&suffix&@0 proc public
+
+ STUB_PROLOG_2_HIDDEN_ARGS
+
+ mov esi, esp
+
+ push frameFlags
+ push ecx ; module
+ push edx ; section index
+
+ push eax ; indirection cell address.
+ push esi ; pTransitionBlock
+
+ call _DynamicHelperWorker@20
+ test eax,eax
+ jnz @F
+
+ mov eax, [esi] ; The result is stored in the argument area of the transition block
+ STUB_EPILOG_RETURN
+ ret
+
+@@:
+ STUB_EPILOG
+ jmp eax
+
+_DelayLoad_Helper&suffix&@0 endp
+
+ endm
+
+DYNAMICHELPER DynamicHelperFrameFlags_Default
+DYNAMICHELPER DynamicHelperFrameFlags_ObjectArg, _Obj
+DYNAMICHELPER <DynamicHelperFrameFlags_ObjectArg OR DynamicHelperFrameFlags_ObjectArg2>, _ObjObj
+
+endif ; FEATURE_READYTORUN
+
+ end
diff --git a/src/vm/i386/cgencpu.h b/src/vm/i386/cgencpu.h
new file mode 100644
index 0000000000..77681aeeab
--- /dev/null
+++ b/src/vm/i386/cgencpu.h
@@ -0,0 +1,577 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// CGENX86.H -
+//
+// Various helper routines for generating x86 assembly code.
+//
+// DO NOT INCLUDE THIS FILE DIRECTLY - ALWAYS USE CGENSYS.H INSTEAD
+//
+
+
+
+#ifndef _TARGET_X86_
+#error Should only include "cgenx86.h" for X86 builds
+#endif // _TARGET_X86_
+
+#ifndef __cgenx86_h__
+#define __cgenx86_h__
+
+#include "utilcode.h"
+
+// Given a return address retrieved during stackwalk,
+// this is the offset by which it should be decremented to lend somewhere in a call instruction.
+#define STACKWALK_CONTROLPC_ADJUST_OFFSET 1
+
+// preferred alignment for data
+#define DATA_ALIGNMENT 4
+
+class MethodDesc;
+class FramedMethodFrame;
+class Module;
+class ComCallMethodDesc;
+class BaseDomain;
+
+// CPU-dependent functions
+Stub * GenerateInitPInvokeFrameHelper();
+
+#ifdef MDA_SUPPORTED
+EXTERN_C void STDCALL PInvokeStackImbalanceHelper(void);
+#endif // MDA_SUPPORTED
+
+#ifndef FEATURE_CORECLR
+EXTERN_C void STDCALL CopyCtorCallStub(void);
+#endif // !FEATURE_CORECLR
+
+BOOL Runtime_Test_For_SSE2();
+
+#ifdef CROSSGEN_COMPILE
+#define GetEEFuncEntryPoint(pfn) 0x1001
+#else
+#define GetEEFuncEntryPoint(pfn) GFN_TADDR(pfn)
+#endif
+
+//**********************************************************************
+// To be used with GetSpecificCpuInfo()
+
+#define CPU_X86_FAMILY(cpuType) (((cpuType) & 0x0F00) >> 8)
+#define CPU_X86_MODEL(cpuType) (((cpuType) & 0x00F0) >> 4)
+// Stepping is masked out by GetSpecificCpuInfo()
+// #define CPU_X86_STEPPING(cpuType) (((cpuType) & 0x000F) )
+
+#define CPU_X86_USE_CMOV(cpuFeat) ((cpuFeat & 0x00008001) == 0x00008001)
+#define CPU_X86_USE_SSE2(cpuFeat) (((cpuFeat & 0x04000000) == 0x04000000) && Runtime_Test_For_SSE2())
+
+// Values for CPU_X86_FAMILY(cpuType)
+#define CPU_X86_486 4
+#define CPU_X86_PENTIUM 5
+#define CPU_X86_PENTIUM_PRO 6
+#define CPU_X86_PENTIUM_4 0xF
+
+// Values for CPU_X86_MODEL(cpuType) for CPU_X86_PENTIUM_PRO
+#define CPU_X86_MODEL_PENTIUM_PRO_BANIAS 9 // Pentium M (Mobile PPro with P4 feautres)
+
+#define COMMETHOD_PREPAD 8 // # extra bytes to allocate in addition to sizeof(ComCallMethodDesc)
+#ifdef FEATURE_COMINTEROP
+#define COMMETHOD_CALL_PRESTUB_SIZE 5 // x86: CALL(E8) xx xx xx xx
+#define COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET 1 // the offset of the call target address inside the prestub
+#endif // FEATURE_COMINTEROP
+
+#define STACK_ALIGN_SIZE 4
+
+#define JUMP_ALLOCATE_SIZE 8 // # bytes to allocate for a jump instruction
+#define BACK_TO_BACK_JUMP_ALLOCATE_SIZE 8 // # bytes to allocate for a back to back jump instruction
+
+#define HAS_COMPACT_ENTRYPOINTS 1
+
+// Needed for PInvoke inlining in ngened images
+#define HAS_NDIRECT_IMPORT_PRECODE 1
+
+#ifdef FEATURE_REMOTING
+#define HAS_REMOTING_PRECODE 1
+#endif
+#ifdef FEATURE_PREJIT
+#define HAS_FIXUP_PRECODE 1
+#define HAS_FIXUP_PRECODE_CHUNKS 1
+#endif
+
+// ThisPtrRetBufPrecode one is necessary for closed delegates over static methods with return buffer
+#define HAS_THISPTR_RETBUF_PRECODE 1
+
+#define CODE_SIZE_ALIGN 4
+#define CACHE_LINE_SIZE 32 // As per Intel Optimization Manual the cache line size is 32 bytes
+#define LOG2SLOT LOG2_PTRSIZE
+
+#define ENREGISTERED_RETURNTYPE_MAXSIZE 8
+#define ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE 4
+#define CALLDESCR_ARGREGS 1 // CallDescrWorker has ArgumentRegister parameter
+
+// Max size of patched TLS helpers
+#ifdef _DEBUG
+// Debug build needs extra space for last error trashing
+#define TLS_GETTER_MAX_SIZE 0x20
+#else
+#define TLS_GETTER_MAX_SIZE 0x10
+#endif
+
+//=======================================================================
+// IMPORTANT: This value is used to figure out how much to allocate
+// for a fixed array of FieldMarshaler's. That means it must be at least
+// as large as the largest FieldMarshaler subclass. This requirement
+// is guarded by an assert.
+//=======================================================================
+#define MAXFIELDMARSHALERSIZE 24
+
+//**********************************************************************
+// Parameter size
+//**********************************************************************
+
+typedef INT32 StackElemType;
+#define STACK_ELEM_SIZE sizeof(StackElemType)
+
+
+
+#include "stublinkerx86.h"
+
+
+
+// !! This expression assumes STACK_ELEM_SIZE is a power of 2.
+#define StackElemSize(parmSize) (((parmSize) + STACK_ELEM_SIZE - 1) & ~((ULONG)(STACK_ELEM_SIZE - 1)))
+
+
+//**********************************************************************
+// Frames
+//**********************************************************************
+//--------------------------------------------------------------------
+// This represents some of the FramedMethodFrame fields that are
+// stored at negative offsets.
+//--------------------------------------------------------------------
+typedef DPTR(struct CalleeSavedRegisters) PTR_CalleeSavedRegisters;
+struct CalleeSavedRegisters {
+ INT32 edi;
+ INT32 esi;
+ INT32 ebx;
+ INT32 ebp;
+};
+
+//--------------------------------------------------------------------
+// This represents the arguments that are stored in volatile registers.
+// This should not overlap the CalleeSavedRegisters since those are already
+// saved separately and it would be wasteful to save the same register twice.
+// If we do use a non-volatile register as an argument, then the ArgIterator
+// will probably have to communicate this back to the PromoteCallerStack
+// routine to avoid a double promotion.
+//--------------------------------------------------------------------
+#define ENUM_ARGUMENT_REGISTERS() \
+ ARGUMENT_REGISTER(ECX) \
+ ARGUMENT_REGISTER(EDX)
+
+#define ENUM_ARGUMENT_REGISTERS_BACKWARD() \
+ ARGUMENT_REGISTER(EDX) \
+ ARGUMENT_REGISTER(ECX)
+
+typedef DPTR(struct ArgumentRegisters) PTR_ArgumentRegisters;
+struct ArgumentRegisters {
+ #define ARGUMENT_REGISTER(regname) INT32 regname;
+ ENUM_ARGUMENT_REGISTERS_BACKWARD()
+ #undef ARGUMENT_REGISTER
+};
+#define NUM_ARGUMENT_REGISTERS 2
+
+#define SCRATCH_REGISTER_X86REG kEAX
+
+#define THIS_REG ECX
+#define THIS_kREG kECX
+
+#define ARGUMENT_REG1 ECX
+#define ARGUMENT_REG2 EDX
+
+// forward decl
+struct REGDISPLAY;
+typedef REGDISPLAY *PREGDISPLAY;
+
+// Sufficient context for Try/Catch restoration.
+struct EHContext {
+ INT32 Eax;
+ INT32 Ebx;
+ INT32 Ecx;
+ INT32 Edx;
+ INT32 Esi;
+ INT32 Edi;
+ INT32 Ebp;
+ INT32 Esp;
+ INT32 Eip;
+
+ void Setup(PCODE resumePC, PREGDISPLAY regs);
+ void UpdateFrame(PREGDISPLAY regs);
+
+ inline TADDR GetSP() {
+ LIMITED_METHOD_CONTRACT;
+ return (TADDR)Esp;
+ }
+ inline void SetSP(LPVOID esp) {
+ LIMITED_METHOD_CONTRACT;
+ Esp = (INT32)(size_t)esp;
+ }
+
+ inline LPVOID GetFP() {
+ LIMITED_METHOD_CONTRACT;
+ return (LPVOID)(UINT_PTR)Ebp;
+ }
+
+ inline void SetArg(LPVOID arg) {
+ LIMITED_METHOD_CONTRACT;
+ Eax = (INT32)(size_t)arg;
+ }
+
+ inline void Init()
+ {
+ Eax = 0;
+ Ebx = 0;
+ Ecx = 0;
+ Edx = 0;
+ Esi = 0;
+ Edi = 0;
+ Ebp = 0;
+ Esp = 0;
+ Eip = 0;
+ }
+};
+
+#define ARGUMENTREGISTERS_SIZE sizeof(ArgumentRegisters)
+
+//**********************************************************************
+// Exception handling
+//**********************************************************************
+
+inline PCODE GetIP(const CONTEXT * context) {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return PCODE(context->Eip);
+}
+
+inline void SetIP(CONTEXT *context, PCODE eip) {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ context->Eip = (DWORD)eip;
+}
+
+inline TADDR GetSP(const CONTEXT * context) {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (TADDR)(context->Esp);
+}
+
+EXTERN_C LPVOID STDCALL GetCurrentSP();
+
+inline void SetSP(CONTEXT *context, TADDR esp) {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ context->Esp = (DWORD)esp;
+}
+
+inline void SetFP(CONTEXT *context, TADDR ebp) {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ context->Ebp = (INT32)ebp;
+}
+
+inline TADDR GetFP(const CONTEXT * context)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (TADDR)context->Ebp;
+}
+
+// Get Rel32 destination, emit jumpStub if necessary
+inline INT32 rel32UsingJumpStub(INT32 UNALIGNED * pRel32, PCODE target, MethodDesc *pMethod = NULL, LoaderAllocator *pLoaderAllocator = NULL)
+{
+ // We do not need jump stubs on i386
+ LIMITED_METHOD_CONTRACT;
+
+ TADDR baseAddr = (TADDR)pRel32 + 4;
+ return (INT32)(target - baseAddr);
+}
+
+#ifndef CLR_STANDALONE_BINDER
+
+#ifdef FEATURE_COMINTEROP
+inline void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target)
+{
+ WRAPPER_NO_CONTRACT;
+
+ BYTE *pBuffer = (BYTE*)pCOMMethod - COMMETHOD_CALL_PRESTUB_SIZE;
+
+ pBuffer[0] = X86_INSTR_CALL_REL32; //CALLNEAR32
+ *((LPVOID*)(1+pBuffer)) = (LPVOID) (((LPBYTE)target) - (pBuffer+5));
+
+ _ASSERTE(IS_ALIGNED(pBuffer + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET, sizeof(void*)) &&
+ *((SSIZE_T*)(pBuffer + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET)) == ((LPBYTE)target - (LPBYTE)pCOMMethod));
+}
+#endif // FEATURE_COMINTEROP
+
+//------------------------------------------------------------------------
+WORD GetUnpatchedCodeData(LPCBYTE pAddr);
+
+//------------------------------------------------------------------------
+inline WORD GetUnpatchedOpcodeWORD(LPCBYTE pAddr)
+{
+ WRAPPER_NO_CONTRACT;
+ if (CORDebuggerAttached())
+ {
+ return GetUnpatchedCodeData(pAddr);
+ }
+ else
+ {
+ return *((WORD *)pAddr);
+ }
+}
+
+//------------------------------------------------------------------------
+inline BYTE GetUnpatchedOpcodeBYTE(LPCBYTE pAddr)
+{
+ WRAPPER_NO_CONTRACT;
+ if (CORDebuggerAttached())
+ {
+ return (BYTE) GetUnpatchedCodeData(pAddr);
+ }
+ else
+ {
+ return *pAddr;
+ }
+}
+
+ //------------------------------------------------------------------------
+// The following must be a distinguishable set of instruction sequences for
+// various stub dispatch calls.
+//
+// An x86 JIT which uses full stub dispatch must generate only
+// the following stub dispatch calls:
+//
+// (1) isCallRelativeIndirect:
+// call dword ptr [rel32] ; FF 15 ---rel32----
+// (2) isCallRelative:
+// call abc ; E8 ---rel32----
+// (3) isCallRegisterIndirect:
+// 3-byte nop ;
+// call dword ptr [eax] ; FF 10
+//
+// NOTE: You must be sure that pRetAddr is a true return address for
+// a stub dispatch call.
+
+BOOL isCallRelativeIndirect(const BYTE *pRetAddr);
+BOOL isCallRelative(const BYTE *pRetAddr);
+BOOL isCallRegisterIndirect(const BYTE *pRetAddr);
+
+inline BOOL isCallRelativeIndirect(const BYTE *pRetAddr)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ BOOL fRet = (GetUnpatchedOpcodeWORD(&pRetAddr[-6]) == X86_INSTR_CALL_IND);
+ _ASSERTE(!fRet || !isCallRelative(pRetAddr));
+ _ASSERTE(!fRet || !isCallRegisterIndirect(pRetAddr));
+ return fRet;
+}
+
+inline BOOL isCallRelative(const BYTE *pRetAddr)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ BOOL fRet = (GetUnpatchedOpcodeBYTE(&pRetAddr[-5]) == X86_INSTR_CALL_REL32);
+ _ASSERTE(!fRet || !isCallRelativeIndirect(pRetAddr));
+ _ASSERTE(!fRet || !isCallRegisterIndirect(pRetAddr));
+ return fRet;
+}
+
+inline BOOL isCallRegisterIndirect(const BYTE *pRetAddr)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ BOOL fRet = (GetUnpatchedOpcodeWORD(&pRetAddr[-5]) == X86_INSTR_NOP3_1)
+ && (GetUnpatchedOpcodeBYTE(&pRetAddr[-3]) == X86_INSTR_NOP3_3)
+ && (GetUnpatchedOpcodeWORD(&pRetAddr[-2]) == X86_INSTR_CALL_IND_EAX);
+ _ASSERTE(!fRet || !isCallRelative(pRetAddr));
+ _ASSERTE(!fRet || !isCallRelativeIndirect(pRetAddr));
+ return fRet;
+}
+
+//------------------------------------------------------------------------
+inline void emitJump(LPBYTE pBuffer, LPVOID target)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ pBuffer[0] = X86_INSTR_JMP_REL32; //JUMPNEAR32
+ *((LPVOID*)(1+pBuffer)) = (LPVOID) (((LPBYTE)target) - (pBuffer+5));
+}
+
+//------------------------------------------------------------------------
+inline void emitJumpInd(LPBYTE pBuffer, LPVOID target)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ *((WORD*)pBuffer) = X86_INSTR_JMP_IND; // 0x25FF jmp dword ptr[addr32]
+ *((LPVOID*)(2+pBuffer)) = target;
+}
+
+//------------------------------------------------------------------------
+inline PCODE isJump(PCODE pCode)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return *PTR_BYTE(pCode) == X86_INSTR_JMP_REL32;
+}
+
+//------------------------------------------------------------------------
+// Given the same pBuffer that was used by emitJump this method
+// decodes the instructions and returns the jump target
+inline PCODE decodeJump(PCODE pCode)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ CONSISTENCY_CHECK(*PTR_BYTE(pCode) == X86_INSTR_JMP_REL32);
+ return rel32Decode(pCode+1);
+}
+
+//
+// On IA64 back to back jumps should be separated by a nop bundle to get
+// the best performance from the hardware's branch prediction logic.
+// For all other platforms back to back jumps don't require anything special
+// That is why we have these two wrapper functions that call emitJump and decodeJump
+//
+
+//------------------------------------------------------------------------
+inline void emitBackToBackJump(LPBYTE pBuffer, LPVOID target)
+{
+ WRAPPER_NO_CONTRACT;
+ emitJump(pBuffer, target);
+}
+
+//------------------------------------------------------------------------
+inline PCODE isBackToBackJump(PCODE pBuffer)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return isJump(pBuffer);
+}
+
+//------------------------------------------------------------------------
+inline PCODE decodeBackToBackJump(PCODE pBuffer)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return decodeJump(pBuffer);
+}
+
+EXTERN_C void __stdcall setFPReturn(int fpSize, INT64 retVal);
+EXTERN_C void __stdcall getFPReturn(int fpSize, INT64 *pretval);
+
+
+// SEH info forward declarations
+
+inline BOOL IsUnmanagedValueTypeReturnedByRef(UINT sizeofvaluetype)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // odd-sized small structures are not
+ // enregistered e.g. struct { char a,b,c; }
+ return (sizeofvaluetype > 8) ||
+ (sizeofvaluetype & (sizeofvaluetype - 1)); // check that the size is power of two
+}
+
+#include <pshpack1.h>
+DECLSPEC_ALIGN(4) struct UMEntryThunkCode
+{
+ BYTE m_alignpad[2]; // used to guarantee alignment of backpactched portion
+ BYTE m_movEAX; //MOV EAX,imm32
+ LPVOID m_uet; // pointer to start of this structure
+ BYTE m_jmp; //JMP NEAR32
+ const BYTE * m_execstub; // pointer to destination code // make sure the backpatched portion is dword aligned.
+
+ void Encode(BYTE* pTargetCode, void* pvSecretParam);
+
+ LPCBYTE GetEntryPoint() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (LPCBYTE)&m_movEAX;
+ }
+
+ static int GetEntryPointOffset()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return 2;
+ }
+};
+#include <poppack.h>
+
+struct HijackArgs
+{
+ DWORD FPUState[3]; // 12 bytes for FPU state (10 bytes for FP top-of-stack + 2 bytes padding)
+ DWORD Edi;
+ DWORD Esi;
+ DWORD Ebx;
+ DWORD Edx;
+ DWORD Ecx;
+ union
+ {
+ DWORD Eax;
+ size_t ReturnValue;
+ };
+ DWORD Ebp;
+ union
+ {
+ DWORD Eip;
+ size_t ReturnAddress;
+ };
+};
+
+#endif //!CLR_STANDALONE_BINDER
+
+// ClrFlushInstructionCache is used when we want to call FlushInstructionCache
+// for a specific architecture in the common code, but not for other architectures.
+// On IA64 ClrFlushInstructionCache calls the Kernel FlushInstructionCache function
+// to flush the instruction cache.
+// We call ClrFlushInstructionCache whenever we create or modify code in the heap.
+// Currently ClrFlushInstructionCache has no effect on X86
+//
+
+inline BOOL ClrFlushInstructionCache(LPCVOID pCodeAddr, size_t sizeOfCode)
+{
+ // FlushInstructionCache(GetCurrentProcess(), pCodeAddr, sizeOfCode);
+ MemoryBarrier();
+ return TRUE;
+}
+
+#ifndef FEATURE_IMPLICIT_TLS
+//
+// JIT HELPER ALIASING FOR PORTABILITY.
+//
+// Create alias for optimized implementations of helpers provided on this platform
+//
+
+#define JIT_MonEnter JIT_MonEnterWorker
+#define JIT_MonEnterWorker JIT_MonEnterWorker
+#define JIT_MonReliableEnter JIT_MonReliableEnter
+#define JIT_MonTryEnter JIT_MonTryEnter
+#define JIT_MonExit JIT_MonExitWorker
+#define JIT_MonExitWorker JIT_MonExitWorker
+#define JIT_MonEnterStatic JIT_MonEnterStatic
+#define JIT_MonExitStatic JIT_MonExitStatic
+
+#endif
+
+// optimized static helpers generated dynamically at runtime
+// #define JIT_GetSharedGCStaticBase
+// #define JIT_GetSharedNonGCStaticBase
+// #define JIT_GetSharedGCStaticBaseNoCtor
+// #define JIT_GetSharedNonGCStaticBaseNoCtor
+
+#define JIT_ChkCastClass JIT_ChkCastClass
+#define JIT_ChkCastClassSpecial JIT_ChkCastClassSpecial
+#define JIT_IsInstanceOfClass JIT_IsInstanceOfClass
+#define JIT_ChkCastInterface JIT_ChkCastInterface
+#define JIT_IsInstanceOfInterface JIT_IsInstanceOfInterface
+#define JIT_NewCrossContext JIT_NewCrossContext
+#define JIT_Stelem_Ref JIT_Stelem_Ref
+
+#endif // __cgenx86_h__
diff --git a/src/vm/i386/cgenx86.cpp b/src/vm/i386/cgenx86.cpp
new file mode 100644
index 0000000000..43409e2fee
--- /dev/null
+++ b/src/vm/i386/cgenx86.cpp
@@ -0,0 +1,2146 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// CGENX86.CPP -
+//
+// Various helper routines for generating x86 assembly code.
+//
+//
+
+// Precompiled Header
+
+#include "common.h"
+
+#include "field.h"
+#include "stublink.h"
+#include "cgensys.h"
+#include "frames.h"
+#include "excep.h"
+#include "dllimport.h"
+#include "comdelegate.h"
+#include "log.h"
+#include "security.h"
+#include "comdelegate.h"
+#include "array.h"
+#include "jitinterface.h"
+#include "codeman.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "dbginterface.h"
+#include "eeprofinterfaces.h"
+#include "eeconfig.h"
+#include "asmconstants.h"
+#include "class.h"
+#include "virtualcallstub.h"
+#include "mdaassistants.h"
+
+#ifdef FEATURE_COMINTEROP
+#include "comtoclrcall.h"
+#include "runtimecallablewrapper.h"
+#include "comcache.h"
+#include "olevariant.h"
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_PREJIT
+#include "compile.h"
+#endif
+
+#include "stublink.inl"
+
+extern "C" DWORD STDCALL GetSpecificCpuTypeAsm(void);
+extern "C" DWORD STDCALL GetSpecificCpuFeaturesAsm(DWORD *pInfo);
+
+// NOTE on Frame Size C_ASSERT usage in this file
+// if the frame size changes then the stubs have to be revisited for correctness
+// kindly revist the logic and then update the constants so that the C_ASSERT will again fire
+// if someone changes the frame size. You are expected to keep this hard coded constant
+// up to date so that changes in the frame size trigger errors at compile time if the code is not altered
+
+void generate_noref_copy (unsigned nbytes, StubLinkerCPU* sl);
+
+#ifndef DACCESS_COMPILE
+
+//=============================================================================
+// Runtime test to see if the OS has enabled support for the SSE2 instructions
+//
+//
+BOOL Runtime_Test_For_SSE2()
+{
+#ifdef FEATURE_CORESYSTEM
+ return TRUE;
+#else
+
+ BOOL result = IsProcessorFeaturePresent(PF_XMMI64_INSTRUCTIONS_AVAILABLE);
+
+ if (result == FALSE)
+ return FALSE;
+
+ // **********************************************************************
+ // *** ***
+ // *** IMPORTANT NOTE: ***
+ // *** ***
+ // *** All of these RunningOnXXX APIs return true when ***
+ // *** the OS that you are running on is that OS or later. ***
+ // *** For example RunningOnWin2003() will return true ***
+ // *** when you are running on Win2k3, Vista, Win7 or later. ***
+ // *** ***
+ // **********************************************************************
+
+
+ // Windows 7 and later should alwys be using SSE2 instructions
+ // this is true for both for native and Wow64
+ //
+ if (RunningOnWin7())
+ return TRUE;
+
+ if (RunningInWow64())
+ {
+ // There is an issue with saving/restoring the SSE2 registers under wow64
+ // So we figure out if we are running on an impacted OS and Service Pack level
+ // See DevDiv Bugs 89587 for the wow64 bug.
+ //
+
+ _ASSERTE(ExOSInfoAvailable()); // This is always available on Vista and later
+
+ //
+ // The issue is fixed in Windows Server 2008 or Vista/SP1
+ //
+ // It is not fixed in Vista/RTM, so check for that case
+ //
+ if ((ExOSInfoRunningOnServer() == FALSE))
+ {
+ OSVERSIONINFOEX osvi;
+
+ ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
+ osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
+ osvi.wServicePackMajor = 0;
+
+ DWORDLONG dwlConditionMask = 0;
+ VER_SET_CONDITION( dwlConditionMask, CLR_VER_SERVICEPACKMAJOR, VER_EQUAL);
+
+ if (VerifyVersionInfo(&osvi, CLR_VER_SERVICEPACKMAJOR, dwlConditionMask))
+ result = FALSE;
+ }
+ }
+
+ return result;
+#endif
+}
+
+//---------------------------------------------------------------
+// Returns the type of CPU (the value of x of x86)
+// (Please note, that it returns 6 for P5-II)
+//---------------------------------------------------------------
+void GetSpecificCpuInfo(CORINFO_CPU * cpuInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ static CORINFO_CPU val = { 0, 0, 0 };
+
+ if (val.dwCPUType)
+ {
+ *cpuInfo = val;
+ return;
+ }
+
+ CORINFO_CPU tempVal;
+ tempVal.dwCPUType = GetSpecificCpuTypeAsm(); // written in ASM & doesn't participate in contracts
+ _ASSERTE(tempVal.dwCPUType);
+
+#ifdef _DEBUG
+ {
+ SO_NOT_MAINLINE_REGION();
+
+ /* Set Family+Model+Stepping string (eg., x690 for Banias, or xF30 for P4 Prescott)
+ * instead of Family only
+ */
+
+ const DWORD cpuDefault = 0xFFFFFFFF;
+ static ConfigDWORD cpuFamily;
+ DWORD configCpuFamily = cpuFamily.val_DontUse_(CLRConfig::INTERNAL_CPUFamily, cpuDefault);
+ if (configCpuFamily != cpuDefault)
+ {
+ assert((configCpuFamily & 0xFFF) == configCpuFamily);
+ tempVal.dwCPUType = (tempVal.dwCPUType & 0xFFFF0000) | configCpuFamily;
+ }
+ }
+#endif
+
+ tempVal.dwFeatures = GetSpecificCpuFeaturesAsm(&tempVal.dwExtendedFeatures); // written in ASM & doesn't participate in contracts
+
+#ifdef _DEBUG
+ {
+ SO_NOT_MAINLINE_REGION();
+
+ /* Set the 32-bit feature mask
+ */
+
+ const DWORD cpuFeaturesDefault = 0xFFFFFFFF;
+ static ConfigDWORD cpuFeatures;
+ DWORD configCpuFeatures = cpuFeatures.val_DontUse_(CLRConfig::INTERNAL_CPUFeatures, cpuFeaturesDefault);
+ if (configCpuFeatures != cpuFeaturesDefault)
+ {
+ tempVal.dwFeatures = configCpuFeatures;
+ }
+ }
+#endif
+
+ val = *cpuInfo = tempVal;
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+
+//---------------------------------------------------------------------------------------
+//
+// Initialize the EHContext using the resume PC and the REGDISPLAY. The EHContext is currently used in two
+// scenarios: to store the register state before calling an EH clause, and to retrieve the ambient SP of a
+// particular stack frame. resumePC means different things in the two scenarios. In the former case, it
+// is the IP at which we are going to resume execution when we call an EH clause. In the latter case, it
+// is just the current IP.
+//
+// Arguments:
+// resumePC - refer to the comment above
+// regs - This is the REGDISPLAY obtained from the CrawlFrame used in the stackwalk. It represents the
+// stack frame of the method containing the EH clause we are about to call. For getting the
+// ambient SP, this is the stack frame we are interested in.
+//
+
+void EHContext::Setup(PCODE resumePC, PREGDISPLAY regs)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // EAX ECX EDX are scratch
+ this->Esp = regs->Esp;
+ this->Ebx = *regs->pEbx;
+ this->Esi = *regs->pEsi;
+ this->Edi = *regs->pEdi;
+ this->Ebp = *regs->pEbp;
+
+ this->Eip = (ULONG)(size_t)resumePC;
+}
+
+//
+// Update the registers using new context
+//
+// This is necessary to reflect GC pointer changes during the middle of a unwind inside a
+// finally clause, because:
+// 1. GC won't see the part of stack inside try (which has thrown an exception) that is already
+// unwinded and thus GC won't update GC pointers for this portion of the stack, but rather the
+// call stack in finally.
+// 2. upon return of finally, the unwind process continues and unwinds stack based on the part
+// of stack inside try and won't see the updated values in finally.
+// As a result, we need to manually update the context using register values upon return of finally
+//
+// Note that we only update the registers for finally clause because
+// 1. For filter handlers, stack walker is able to see the whole stack (including the try part)
+// with the help of ExceptionFilterFrame as filter handlers are called in first pass
+// 2. For catch handlers, the current unwinding is already finished
+//
+void EHContext::UpdateFrame(PREGDISPLAY regs)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // EAX ECX EDX are scratch.
+ // No need to update ESP as unwinder takes care of that for us
+
+ LOG((LF_EH, LL_INFO1000, "Updating saved EBX: *%p= %p\n", regs->pEbx, this->Ebx));
+ LOG((LF_EH, LL_INFO1000, "Updating saved ESI: *%p= %p\n", regs->pEsi, this->Esi));
+ LOG((LF_EH, LL_INFO1000, "Updating saved EDI: *%p= %p\n", regs->pEdi, this->Edi));
+ LOG((LF_EH, LL_INFO1000, "Updating saved EBP: *%p= %p\n", regs->pEbp, this->Ebp));
+
+ *regs->pEbx = this->Ebx;
+ *regs->pEsi = this->Esi;
+ *regs->pEdi = this->Edi;
+ *regs->pEbp = this->Ebp;
+}
+
+void TransitionFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+ MethodDesc * pFunc = GetFunction();
+ _ASSERTE(pFunc != NULL);
+ UpdateRegDisplayHelper(pRD, pFunc->CbStackPop());
+
+ RETURN;
+}
+
+void TransitionFrame::UpdateRegDisplayHelper(const PREGDISPLAY pRD, UINT cbStackPop)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ CalleeSavedRegisters* regs = GetCalleeSavedRegisters();
+
+ // reset pContext; it's only valid for active (top-most) frame
+
+ pRD->pContext = NULL;
+
+ pRD->pEdi = (DWORD*) &regs->edi;
+ pRD->pEsi = (DWORD*) &regs->esi;
+ pRD->pEbx = (DWORD*) &regs->ebx;
+ pRD->pEbp = (DWORD*) &regs->ebp;
+ pRD->PCTAddr = GetReturnAddressPtr();
+ pRD->ControlPC = *PTR_PCODE(pRD->PCTAddr);
+ pRD->Esp = (DWORD)(pRD->PCTAddr + sizeof(TADDR) + cbStackPop);
+
+ RETURN;
+}
+
+void HelperMethodFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ HOST_NOCALLS;
+ PRECONDITION(m_MachState.isValid()); // InsureInit has been called
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+ // reset pContext; it's only valid for active (top-most) frame
+ pRD->pContext = NULL;
+
+#ifdef DACCESS_COMPILE
+
+ //
+ // In the dac case we may have gotten here
+ // without the frame being initialized, so
+ // try and initialize on the fly.
+ //
+
+ if (!m_MachState.isValid())
+ {
+ MachState unwindState;
+
+ InsureInit(false, &unwindState);
+ pRD->PCTAddr = dac_cast<TADDR>(unwindState.pRetAddr());
+ pRD->ControlPC = unwindState.GetRetAddr();
+ pRD->Esp = unwindState._esp;
+
+ // Get some special host instance memory
+ // so we have a place to point to.
+ // This host memory has no target address
+ // and so won't be looked up or used for
+ // anything else.
+ MachState* thisState = (MachState*)
+ DacAllocHostOnlyInstance(sizeof(*thisState), true);
+
+ thisState->_edi = unwindState._edi;
+ pRD->pEdi = (DWORD *)&thisState->_edi;
+ thisState->_esi = unwindState._esi;
+ pRD->pEsi = (DWORD *)&thisState->_esi;
+ thisState->_ebx = unwindState._ebx;
+ pRD->pEbx = (DWORD *)&thisState->_ebx;
+ thisState->_ebp = unwindState._ebp;
+ pRD->pEbp = (DWORD *)&thisState->_ebp;
+
+ // InsureInit always sets m_RegArgs to zero
+ // in the real code. I'm not sure exactly
+ // what should happen in the on-the-fly case,
+ // but go with what would happen from an InsureInit.
+ RETURN;
+ }
+
+#endif // #ifdef DACCESS_COMPILE
+
+ // DACCESS: The MachState pointers are kept as PTR_TADDR so
+ // the host pointers here refer to the appropriate size and
+ // these casts are not a problem.
+ pRD->pEdi = (DWORD*) m_MachState.pEdi();
+ pRD->pEsi = (DWORD*) m_MachState.pEsi();
+ pRD->pEbx = (DWORD*) m_MachState.pEbx();
+ pRD->pEbp = (DWORD*) m_MachState.pEbp();
+ pRD->PCTAddr = dac_cast<TADDR>(m_MachState.pRetAddr());
+ pRD->ControlPC = m_MachState.GetRetAddr();
+ pRD->Esp = (DWORD) m_MachState.esp();
+
+ RETURN;
+}
+
+#ifdef _DEBUG_IMPL
+// Confirm that if the machine state was not initialized, then
+// any unspilled callee saved registers did not change
+EXTERN_C MachState* STDCALL HelperMethodFrameConfirmState(HelperMethodFrame* frame, void* esiVal, void* ediVal, void* ebxVal, void* ebpVal)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ DEBUG_ONLY;
+ }
+ CONTRACTL_END;
+
+ MachState* state = frame->MachineState();
+
+ // if we've already executed this check once for this helper method frame then
+ // we don't do the check again because it is very expensive.
+ if (frame->HaveDoneConfirmStateCheck())
+ {
+ return state;
+ }
+
+ // probe to avoid a kazillion violations in the code that follows.
+ BEGIN_DEBUG_ONLY_CODE;
+ if (!state->isValid())
+ {
+ frame->InsureInit(false, NULL);
+ _ASSERTE(state->_pEsi != &state->_esi || state->_esi == (TADDR)esiVal);
+ _ASSERTE(state->_pEdi != &state->_edi || state->_edi == (TADDR)ediVal);
+ _ASSERTE(state->_pEbx != &state->_ebx || state->_ebx == (TADDR)ebxVal);
+ _ASSERTE(state->_pEbp != &state->_ebp || state->_ebp == (TADDR)ebpVal);
+ }
+ END_DEBUG_ONLY_CODE;
+
+ // set that we have executed this check once for this helper method frame.
+ frame->SetHaveDoneConfirmStateCheck();
+
+ return state;
+}
+#endif
+
+void ExternalMethodFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ UpdateRegDisplayHelper(pRD, CbStackPopUsingGCRefMap(GetGCRefMap()));
+
+ RETURN;
+}
+
+
+void StubDispatchFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ PTR_BYTE pGCRefMap = GetGCRefMap();
+ if (pGCRefMap != NULL)
+ {
+ UpdateRegDisplayHelper(pRD, CbStackPopUsingGCRefMap(pGCRefMap));
+ }
+ else
+ if (GetFunction() != NULL)
+ {
+ FramedMethodFrame::UpdateRegDisplay(pRD);
+ }
+ else
+ {
+ UpdateRegDisplayHelper(pRD, 0);
+
+ // If we do not have owning MethodDesc, we need to pretend that
+ // the call happened on the call instruction to get the ESP unwound properly.
+ //
+ // This path is hit when we are throwing null reference exception from
+ // code:VSD_ResolveWorker or code:StubDispatchFixupWorker
+ pRD->ControlPC = GetAdjustedCallAddress(pRD->ControlPC);
+ }
+
+ RETURN;
+}
+
+PCODE StubDispatchFrame::GetReturnAddress()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ PCODE retAddress = FramedMethodFrame::GetReturnAddress();
+ if (GetFunction() == NULL && GetGCRefMap() == NULL)
+ {
+ // See comment in code:StubDispatchFrame::UpdateRegDisplay
+ retAddress = GetAdjustedCallAddress(retAddress);
+ }
+ return retAddress;
+}
+
+void FaultingExceptionFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ CalleeSavedRegisters* regs = GetCalleeSavedRegisters();
+
+ // reset pContext; it's only valid for active (top-most) frame
+ pRD->pContext = NULL;
+
+ pRD->pEdi = (DWORD*) &regs->edi;
+ pRD->pEsi = (DWORD*) &regs->esi;
+ pRD->pEbx = (DWORD*) &regs->ebx;
+ pRD->pEbp = (DWORD*) &regs->ebp;
+ pRD->PCTAddr = GetReturnAddressPtr();
+ pRD->ControlPC = *PTR_PCODE(pRD->PCTAddr);
+ pRD->Esp = m_Esp;
+ RETURN;
+}
+
+void InlinedCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ // We should skip over InlinedCallFrame if it is not active.
+ // It will be part of a JITed method's frame, and the stack-walker
+ // can handle such a case.
+#ifdef PROFILING_SUPPORTED
+ PRECONDITION(CORProfilerStackSnapshotEnabled() || InlinedCallFrame::FrameHasActiveCall(this));
+#endif
+ HOST_NOCALLS;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ // @TODO: Remove this after the debugger is fixed to avoid stack-walks from bad places
+ // @TODO: This may be still needed for sampling profilers
+ if (!InlinedCallFrame::FrameHasActiveCall(this))
+ {
+ LOG((LF_CORDB, LL_ERROR, "WARNING: InlinedCallFrame::UpdateRegDisplay called on inactive frame %p\n", this));
+ return;
+ }
+
+ DWORD stackArgSize = (DWORD) dac_cast<TADDR>(m_Datum);
+
+ if (stackArgSize & ~0xFFFF)
+ {
+ NDirectMethodDesc * pMD = PTR_NDirectMethodDesc(m_Datum);
+
+ /* if this is not an NDirect frame, something is really wrong */
+
+ _ASSERTE(pMD->SanityCheck() && pMD->IsNDirect());
+
+ stackArgSize = pMD->GetStackArgumentSize();
+ }
+
+ // reset pContext; it's only valid for active (top-most) frame
+ pRD->pContext = NULL;
+
+
+ pRD->pEbp = (DWORD*) &m_pCalleeSavedFP;
+
+ /* The return address is just above the "ESP" */
+ pRD->PCTAddr = PTR_HOST_MEMBER_TADDR(InlinedCallFrame, this,
+ m_pCallerReturnAddress);
+ pRD->ControlPC = *PTR_PCODE(pRD->PCTAddr);
+
+ /* Now we need to pop off the outgoing arguments */
+ pRD->Esp = (DWORD) dac_cast<TADDR>(m_pCallSiteSP) + stackArgSize;
+ RETURN;
+}
+
+#ifdef FEATURE_HIJACK
+//==========================
+// Resumable Exception Frame
+//
+TADDR ResumableFrame::GetReturnAddressPtr()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return dac_cast<TADDR>(m_Regs) + offsetof(CONTEXT, Eip);
+}
+
+void ResumableFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ // reset pContext; it's only valid for active (top-most) frame
+ pRD->pContext = NULL;
+
+ CONTEXT* pUnwoundContext = m_Regs;
+
+#if !defined(DACCESS_COMPILE)
+ // "pContextForUnwind" field is only used on X86 since not only is it initialized just for it,
+ // but its used only under the confines of STACKWALKER_MAY_POP_FRAMES preprocessor define,
+ // which is defined for x86 only (refer to its definition in stackwalk.cpp).
+ if (pRD->pContextForUnwind != NULL)
+ {
+ pUnwoundContext = pRD->pContextForUnwind;
+
+ pUnwoundContext->Eax = m_Regs->Eax;
+ pUnwoundContext->Ecx = m_Regs->Ecx;
+ pUnwoundContext->Edx = m_Regs->Edx;
+
+ pUnwoundContext->Edi = m_Regs->Edi;
+ pUnwoundContext->Esi = m_Regs->Esi;
+ pUnwoundContext->Ebx = m_Regs->Ebx;
+ pUnwoundContext->Ebp = m_Regs->Ebp;
+ pUnwoundContext->Eip = m_Regs->Eip;
+ }
+#endif // !defined(DACCESS_COMPILE)
+
+ pRD->pEax = &pUnwoundContext->Eax;
+ pRD->pEcx = &pUnwoundContext->Ecx;
+ pRD->pEdx = &pUnwoundContext->Edx;
+
+ pRD->pEdi = &pUnwoundContext->Edi;
+ pRD->pEsi = &pUnwoundContext->Esi;
+ pRD->pEbx = &pUnwoundContext->Ebx;
+ pRD->pEbp = &pUnwoundContext->Ebp;
+
+ pRD->ControlPC = pUnwoundContext->Eip;
+ pRD->PCTAddr = dac_cast<TADDR>(m_Regs) + offsetof(CONTEXT, Eip);
+
+ pRD->Esp = m_Regs->Esp;
+
+ RETURN;
+}
+
+// The HijackFrame has to know the registers that are pushed by OnHijackObjectTripThread
+// and OnHijackScalarTripThread, so all three are implemented together.
+void HijackFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ // This only describes the top-most frame
+ pRD->pContext = NULL;
+
+ pRD->pEdi = &m_Args->Edi;
+ pRD->pEsi = &m_Args->Esi;
+ pRD->pEbx = &m_Args->Ebx;
+ pRD->pEdx = &m_Args->Edx;
+ pRD->pEcx = &m_Args->Ecx;
+ pRD->pEax = &m_Args->Eax;
+
+ pRD->pEbp = &m_Args->Ebp;
+ pRD->PCTAddr = dac_cast<TADDR>(m_Args) + offsetof(HijackArgs, Eip);
+ pRD->ControlPC = *PTR_PCODE(pRD->PCTAddr);
+ pRD->Esp = (DWORD)(pRD->PCTAddr + sizeof(TADDR));
+}
+
+#endif // FEATURE_HIJACK
+
+void PInvokeCalliFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ VASigCookie *pVASigCookie = GetVASigCookie();
+ UpdateRegDisplayHelper(pRD, pVASigCookie->sizeOfArgs+sizeof(int));
+
+ RETURN;
+}
+
+void TailCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ // reset pContext; it's only valid for active (top-most) frame
+ pRD->pContext = NULL;
+
+ pRD->pEdi = (DWORD*)&m_regs.edi;
+ pRD->pEsi = (DWORD*)&m_regs.esi;
+ pRD->pEbx = (DWORD*)&m_regs.ebx;
+ pRD->pEbp = (DWORD*)&m_regs.ebp;
+
+ pRD->PCTAddr = GetReturnAddressPtr();
+ pRD->ControlPC = *PTR_PCODE(pRD->PCTAddr);
+ pRD->Esp = (DWORD)(pRD->PCTAddr + sizeof(TADDR));
+
+ RETURN;
+}
+
+//------------------------------------------------------------------------
+// This is declared as returning WORD instead of PRD_TYPE because of
+// header issues with cgencpu.h including dbginterface.h.
+WORD GetUnpatchedCodeData(LPCBYTE pAddr)
+{
+#ifndef _TARGET_X86_
+#error Make sure this works before porting to platforms other than x86.
+#endif
+ CONTRACT(WORD) {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CORDebuggerAttached());
+ PRECONDITION(CheckPointer(pAddr));
+ SO_TOLERANT;
+ } CONTRACT_END;
+
+ // Ordering is because x86 is little-endien.
+ BYTE bLow = pAddr[0];
+ BYTE bHigh = pAddr[1];
+
+#ifndef DACCESS_COMPILE
+ // Need to make sure that the code we're reading is free of breakpoint patches.
+ PRD_TYPE unpatchedOpcode;
+ if (g_pDebugInterface->CheckGetPatchedOpcode((CORDB_ADDRESS_TYPE *)pAddr,
+ &unpatchedOpcode))
+ {
+ // PRD_TYPE is supposed to be an opaque debugger structure representing data to remove a patch.
+ // Although PRD_TYPE is currently typedef'ed to be a DWORD_PTR, it's actually semantically just a BYTE.
+ // (since a patch on x86 is just an 0xCC instruction).
+ // Ideally, the debugger subsystem would expose a patch-code stripper that returns BYTE/WORD/etc, and
+ // not force us to crack it ourselves here.
+ bLow = (BYTE) unpatchedOpcode;
+ }
+ //
+#endif
+
+ WORD w = bLow + (bHigh << 8);
+ RETURN w;
+}
+
+
+#ifndef DACCESS_COMPILE
+
+//-------------------------------------------------------------------------
+// One-time creation of special prestub to initialize UMEntryThunks.
+//-------------------------------------------------------------------------
+Stub *GenerateUMThunkPrestub()
+{
+ CONTRACT(Stub*)
+ {
+ STANDARD_VM_CHECK;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ CPUSTUBLINKER sl;
+ CPUSTUBLINKER *psl = &sl;
+
+ CodeLabel* rgRareLabels[] = { psl->NewCodeLabel(),
+ psl->NewCodeLabel(),
+ psl->NewCodeLabel()
+ };
+
+
+ CodeLabel* rgRejoinLabels[] = { psl->NewCodeLabel(),
+ psl->NewCodeLabel(),
+ psl->NewCodeLabel()
+ };
+
+ // emit the initial prolog
+ psl->EmitComMethodStubProlog(UMThkCallFrame::GetMethodFrameVPtr(), rgRareLabels, rgRejoinLabels, FALSE /*Don't profile*/);
+
+ // mov ecx, [esi+UMThkCallFrame.pUMEntryThunk]
+ psl->X86EmitIndexRegLoad(kECX, kESI, UMThkCallFrame::GetOffsetOfUMEntryThunk());
+
+ // The call conv is a __stdcall
+ psl->X86EmitPushReg(kECX);
+
+ // call UMEntryThunk::DoRunTimeInit
+ psl->X86EmitCall(psl->NewExternalCodeLabel((LPVOID)UMEntryThunk::DoRunTimeInit), 4);
+
+ // mov ecx, [esi+UMThkCallFrame.pUMEntryThunk]
+ psl->X86EmitIndexRegLoad(kEAX, kESI, UMThkCallFrame::GetOffsetOfUMEntryThunk());
+
+ // lea eax, [eax + UMEntryThunk.m_code] // point to fixedup UMEntryThunk
+ psl->X86EmitOp(0x8d, kEAX, kEAX,
+ UMEntryThunk::GetCodeOffset() + UMEntryThunkCode::GetEntryPointOffset());
+
+ psl->EmitComMethodStubEpilog(UMThkCallFrame::GetMethodFrameVPtr(), rgRareLabels, rgRejoinLabels, FALSE /*Don't profile*/);
+
+ RETURN psl->Link(SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap());
+}
+
+Stub *GenerateInitPInvokeFrameHelper()
+{
+ CONTRACT(Stub*)
+ {
+ STANDARD_VM_CHECK;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ CPUSTUBLINKER sl;
+ CPUSTUBLINKER *psl = &sl;
+
+ CORINFO_EE_INFO::InlinedCallFrameInfo FrameInfo;
+ InlinedCallFrame::GetEEInfo(&FrameInfo);
+
+ // EDI contains address of the frame on stack (the frame ptr, not its negspace)
+ unsigned negSpace = FrameInfo.offsetOfFrameVptr;
+
+ // mov esi, GetThread()
+ psl->X86EmitCurrentThreadFetch(kESI, (1<<kEDI)|(1<<kEBX)|(1<<kECX)|(1<<kEDX));
+
+ // mov [edi + FrameInfo.offsetOfGSCookie], GetProcessGSCookie()
+ psl->X86EmitOffsetModRM(0xc7, (X86Reg)0x0, kEDI, FrameInfo.offsetOfGSCookie - negSpace);
+ psl->Emit32(GetProcessGSCookie());
+
+ // mov [edi + FrameInfo.offsetOfFrameVptr], InlinedCallFrame::GetFrameVtable()
+ psl->X86EmitOffsetModRM(0xc7, (X86Reg)0x0, kEDI, FrameInfo.offsetOfFrameVptr - negSpace);
+ psl->Emit32(InlinedCallFrame::GetMethodFrameVPtr());
+
+ // mov eax, [esi + offsetof(Thread, m_pFrame)]
+ // mov [edi + FrameInfo.offsetOfFrameLink], eax
+ psl->X86EmitIndexRegLoad(kEAX, kESI, offsetof(Thread, m_pFrame));
+ psl->X86EmitIndexRegStore(kEDI, FrameInfo.offsetOfFrameLink - negSpace, kEAX);
+
+ // mov [edi + FrameInfo.offsetOfCalleeSavedEbp], ebp
+ psl->X86EmitIndexRegStore(kEDI, FrameInfo.offsetOfCalleeSavedFP - negSpace, kEBP);
+
+ // mov [edi + FrameInfo.offsetOfReturnAddress], 0
+ psl->X86EmitOffsetModRM(0xc7, (X86Reg)0x0, kEDI, FrameInfo.offsetOfReturnAddress - negSpace);
+ psl->Emit32(0);
+
+ // mov [esi + offsetof(Thread, m_pFrame)], edi
+ psl->X86EmitIndexRegStore(kESI, offsetof(Thread, m_pFrame), kEDI);
+
+ // leave current Thread in ESI
+ psl->X86EmitReturn(0);
+
+ // A single process-wide stub that will never unload
+ RETURN psl->Link(SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap());
+}
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+
+static void STDCALL LeaveRuntimeHelperWithFrame (Thread *pThread, size_t target, Frame *pFrame)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ Thread::LeaveRuntimeThrowComplus(target);
+ GCX_COOP_THREAD_EXISTS(pThread);
+ pFrame->Push(pThread);
+
+}
+
+static void STDCALL EnterRuntimeHelperWithFrame (Thread *pThread, Frame *pFrame)
+{
+ // make sure we restore the original Win32 last error before leaving this function - we are
+ // called right after returning from the P/Invoke target and the error has not been saved yet
+ BEGIN_PRESERVE_LAST_ERROR;
+
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ {
+ HRESULT hr = Thread::EnterRuntimeNoThrow();
+ GCX_COOP_THREAD_EXISTS(pThread);
+ if (FAILED(hr))
+ {
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+ ThrowHR (hr);
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+ }
+
+ pFrame->Pop(pThread);
+ }
+
+ END_PRESERVE_LAST_ERROR;
+}
+
+// "ip" is the return address
+// This function disassembles the code at the return address to determine
+// how many arguments to pop off.
+// Returns the number of DWORDs that should be popped off on return.
+
+static int STDCALL GetStackSizeForVarArgCall(BYTE* ip)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ int retValue = 0;
+ //BEGIN_ENTRYPOINT_VOIDRET;
+
+ // The instruction immediately following the call may be a move into esp used for
+ // P/Invoke stack resilience. For caller-pop calls it's always mov esp, [ebp-n].
+ if (ip[0] == 0x8b)
+ {
+ if (ip[1] == 0x65)
+ {
+ // mov esp, [ebp+disp8]
+ ip += 3;
+ }
+ else if (ip[1] == 0xa5)
+ {
+ // mov esp, [ebp+disp32]
+ ip += 6;
+ }
+ }
+
+ if (ip[0] == 0x81 && ip[1] == 0xc4)
+ {
+ // add esp, imm32
+ retValue = (*(int*)&ip[2])/4;
+ }
+ else if (ip[0] == 0x83 && ip[1] == 0xc4)
+ {
+ // add esp, imm8
+ retValue = ip[2]/4;
+ }
+ else if (ip[0] == 0x59)
+ {
+ // pop ecx
+ retValue = 1;
+ }
+ else
+ {
+ retValue = 0;
+ }
+ //END_ENTRYPOINT_VOIDRET;
+ return retValue;
+}
+
+void LeaveRuntimeStackProbeOnly()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_STACK_PROBE
+ RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT));
+#endif
+}
+
+//-----------------------------------------------------------------------------
+// Hosting stub for calls from CLR code to unmanaged code
+//
+// We push a LeaveRuntimeFrame, and then re-push all the arguments.
+// Note that we have to support all the different native calling conventions
+// viz. stdcall, thiscall, cdecl, varargs
+
+#if 0
+
+This is a diagramatic description of what the stub does:
+
+ (lower addresses)
+
+ | |
+ +----------------+ <--- ESP
+ | |
+ | copied |
+ | arguments |
+ | |
+ | |
+ +----------------+
+ | EDX |
+ | ECX |
+ +----------------+
+| | | GSCookie |
+| | +----------------+ <--- ESI
+| | | vptr |
+| | +----------------+
+| | | m_Next |
+| | +----------------+
+| | | EDI | Scratch register
+| | | ESI | For LeaveRuntimeFrame*
+| | | EBX | For Thread*
+| | +----------------+ <--- EBP
+| | | EBP |
++----------------+ <---ESP +----------------+
+| ret addr | | ret addr |
++----------------+ +----------------+
+| | | |
+| arguments | | arguments |
+| | | |
+| | | |
++----------------+ +----------------+
+| | | |
+| caller's frame | | caller's frame |
+| | | |
+
+ (higher addresses)
+
+ Stack on entry Stack before the call
+ to this stub. to unmanaged code.
+
+#endif
+
+//-----------------------------------------------------------------------------
+// This the layout of the frame of the stub
+
+struct StubForHostStackFrame
+{
+ LPVOID m_outgingArgs[1];
+ ArgumentRegisters m_argumentRegisters;
+ GSCookie m_gsCookie;
+ LeaveRuntimeFrame m_LeaveRuntimeFrame;
+ CalleeSavedRegisters m_calleeSavedRegisters;
+ LPVOID m_retAddr;
+ LPVOID m_incomingArgs[1];
+
+public:
+
+ // Where does the FP/EBP point to?
+ static INT32 GetFPpositionOffset()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return offsetof(StubForHostStackFrame, m_calleeSavedRegisters) +
+ offsetof(CalleeSavedRegisters, ebp);
+ }
+
+ static INT32 GetFPrelOffsOfArgumentRegisters()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return offsetof(StubForHostStackFrame, m_argumentRegisters) - GetFPpositionOffset();
+ }
+
+ static INT32 GetFPrelOffsOfCalleeSavedRegisters()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return offsetof(StubForHostStackFrame, m_calleeSavedRegisters) - GetFPpositionOffset();
+ }
+
+ static INT32 GetFPrelOffsOfRetAddr()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return offsetof(StubForHostStackFrame, m_retAddr) - GetFPpositionOffset();
+ }
+
+ static INT32 GetFPrelOffsOfIncomingArgs()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return offsetof(StubForHostStackFrame, m_incomingArgs) - GetFPpositionOffset();
+ }
+};
+
+static Stub *GenerateStubForHostWorker(LoaderHeap *pHeap,
+ LPVOID pNativeTarget, // NULL to fetch from the last pushed argument (COM)
+ Stub *pInnerStub, // stub to call instead of pNativeTarget, or NULL
+ LONG dwComSlot, // only valid if pNativeTarget is NULL
+ WORD wStackArgumentSize, // -1 for varargs
+ WORD wStackPopSize) // 0 for cdecl
+{
+ STANDARD_VM_CONTRACT;
+
+ // We need to call LeaveRuntime before the target, and EnterRuntime after the target
+ CPUSTUBLINKER sl;
+
+ sl.X86EmitPushEBPframe();
+
+ // save EBX, ESI, EDI
+ sl.X86EmitPushReg(kEBX);
+ sl.X86EmitPushReg(kESI);
+ sl.X86EmitPushReg(kEDI);
+
+ // Frame
+ sl.X86EmitPushReg(kDummyPushReg); // m_Next
+ sl.X86EmitPushImm32((UINT)(size_t)LeaveRuntimeFrame::GetMethodFrameVPtr());
+
+ // mov esi, esp; esi is Frame
+ sl.X86EmitMovRegSP(kESI);
+
+ sl.X86EmitPushImmPtr((LPVOID)GetProcessGSCookie());
+
+ // Save outgoing arguments on the stack
+ sl.X86EmitPushReg(kECX);
+ sl.X86EmitPushReg(kEDX);
+
+ INT32 offs = 0;
+ if (wStackArgumentSize == (WORD)-1)
+ {
+ // Re-push the return address as an argument to GetStackSizeForVarArgCall()
+ // This will return the number of stack arguments (in DWORDs)
+ sl.X86EmitIndexPush(kEBP, StubForHostStackFrame::GetFPrelOffsOfRetAddr());
+ sl.X86EmitCall(sl.NewExternalCodeLabel((LPVOID)GetStackSizeForVarArgCall), 4);
+
+ // We generate the following code sequence to re-push all the arguments
+ //
+ // Note that we cannot use "sub ESP, EAX" as ESP might jump past the
+ // stack guard-page.
+ //
+ // cmp EAX, 0
+ // LoopTop:
+ // jz LoopDone
+ // push dword ptr[EBP + EAX*4 + 4]
+ // sub EAX, 1
+ // jmp LoopTop
+ // LoopDone:
+ // ...
+
+ sl.X86EmitCmpRegImm32(kEAX, 0);
+ CodeLabel * pLoopTop = sl.EmitNewCodeLabel();
+ CodeLabel * pLoopDone = sl.NewCodeLabel();
+ sl.X86EmitCondJump(pLoopDone, X86CondCode::kJZ);
+ sl.X86EmitBaseIndexPush(kEBP, kEAX, 4, StubForHostStackFrame::GetFPrelOffsOfIncomingArgs() - sizeof(LPVOID));
+ sl.X86EmitSubReg(kEAX, 1);
+ sl.X86EmitNearJump(pLoopTop);
+ sl.EmitLabel(pLoopDone);
+ }
+ else
+ {
+ offs = StubForHostStackFrame::GetFPrelOffsOfIncomingArgs() + wStackArgumentSize;
+
+ int numStackSlots = wStackArgumentSize / sizeof(LPVOID);
+ for (int i = 0; i < numStackSlots; i++) {
+ offs -= sizeof(LPVOID);
+ sl.X86EmitIndexPush(kEBP, offs);
+ }
+ }
+
+ //-------------------------------------------------------------------------
+
+ // EBX has Thread*
+ // X86TLSFetch_TRASHABLE_REGS will get trashed
+ sl.X86EmitCurrentThreadFetch(kEBX, 0);
+
+ if (pNativeTarget != NULL)
+ {
+ // push Frame
+ sl.X86EmitPushReg(kESI);
+
+ // push target
+ if (pNativeTarget == (LPVOID)-1)
+ {
+ // target comes right above arguments
+ sl.X86EmitIndexPush(kEBP, StubForHostStackFrame::GetFPrelOffsOfIncomingArgs() + wStackArgumentSize);
+ }
+ else
+ {
+ // target is fixed
+ sl.X86EmitPushImm32((UINT)(size_t)pNativeTarget);
+ }
+ }
+ else
+ {
+ // mov eax, [first_arg]
+ // mov eax, [eax]
+ // push [eax + slot_offset]
+ sl.X86EmitIndexRegLoad(kEAX, kEBP, offs);
+ sl.X86EmitIndexRegLoad(kEAX, kEAX, 0);
+ sl.X86EmitIndexPush(kEAX, sizeof(LPVOID) * dwComSlot);
+
+ // push Frame
+ sl.X86EmitPushReg(kESI);
+ // push [esp + 4]
+ sl.X86EmitEspOffset(0xff, (X86Reg)6, 4);
+ }
+
+ // push Thread
+ sl.X86EmitPushReg(kEBX);
+ sl.X86EmitCall(sl.NewExternalCodeLabel((LPVOID)LeaveRuntimeHelperWithFrame), 0xc);
+
+ //-------------------------------------------------------------------------
+ // call NDirect
+ // See diagram above to see what the stack looks like at this point
+
+ // Restore outgoing arguments
+ unsigned offsToArgRegs = StubForHostStackFrame::GetFPrelOffsOfArgumentRegisters();
+ sl.X86EmitIndexRegLoad(kECX, kEBP, offsToArgRegs + offsetof(ArgumentRegisters, ECX));
+ sl.X86EmitIndexRegLoad(kEDX, kEBP, offsToArgRegs + offsetof(ArgumentRegisters, EDX));
+
+ if (pNativeTarget != NULL || pInnerStub != NULL)
+ {
+ if (pNativeTarget == (LPVOID)-1)
+ {
+ // mov eax, target
+ sl.X86EmitIndexRegLoad(kEAX, kEBP, StubForHostStackFrame::GetFPrelOffsOfIncomingArgs() + wStackArgumentSize);
+ // call eax
+ sl.Emit16(X86_INSTR_CALL_EAX);
+ }
+ else
+ {
+ if (pNativeTarget == NULL)
+ {
+ // pop target and discard it (we go to the inner stub)
+ _ASSERTE(pInnerStub != NULL);
+ sl.X86EmitPopReg(kEAX);
+ }
+
+ LPVOID pTarget = (pInnerStub != NULL ? (LPVOID)pInnerStub->GetEntryPoint() : pNativeTarget);
+ sl.X86EmitCall(sl.NewExternalCodeLabel(pTarget), wStackPopSize / 4);
+ }
+ }
+ else
+ {
+ // pop target
+ sl.X86EmitPopReg(kEAX);
+ // call eax
+ sl.Emit16(X86_INSTR_CALL_EAX);
+ }
+
+ //-------------------------------------------------------------------------
+ // Save return value registers and call EnterRuntimeHelperWithFrame
+ //
+
+ sl.X86EmitPushReg(kEAX);
+ sl.X86EmitPushReg(kEDX);
+
+ // push Frame
+ sl.X86EmitPushReg(kESI);
+ // push Thread
+ sl.X86EmitPushReg(kEBX);
+ // call EnterRuntime
+ sl.X86EmitCall(sl.NewExternalCodeLabel((LPVOID)EnterRuntimeHelperWithFrame), 8);
+
+ sl.X86EmitPopReg(kEDX);
+ sl.X86EmitPopReg(kEAX);
+
+ //-------------------------------------------------------------------------
+ // Tear down the frame
+ //
+
+ sl.EmitCheckGSCookie(kESI, LeaveRuntimeFrame::GetOffsetOfGSCookie());
+
+ // lea esp, [ebp - offsToCalleeSavedRegs]
+ unsigned offsToCalleeSavedRegs = StubForHostStackFrame::GetFPrelOffsOfCalleeSavedRegisters();
+ sl.X86EmitIndexLea((X86Reg)kESP_Unsafe, kEBP, offsToCalleeSavedRegs);
+
+ sl.X86EmitPopReg(kEDI);
+ sl.X86EmitPopReg(kESI);
+ sl.X86EmitPopReg(kEBX);
+
+ sl.X86EmitPopReg(kEBP);
+
+ // ret [wStackPopSize]
+ sl.X86EmitReturn(wStackPopSize);
+
+ if (pInnerStub != NULL)
+ {
+ // this stub calls another stub
+ return sl.LinkInterceptor(pHeap, pInnerStub, pNativeTarget);
+ }
+ else
+ {
+ return sl.Link(pHeap);
+ }
+}
+
+
+//-----------------------------------------------------------------------------
+Stub *NDirectMethodDesc::GenerateStubForHost(LPVOID pNativeTarget, Stub *pInnerStub)
+{
+ STANDARD_VM_CONTRACT;
+
+ // We need to call LeaveRuntime before the target, and EnterRuntime after the target
+
+ if (IsQCall())
+ {
+ // We need just the stack probe for QCalls
+ CPUSTUBLINKER sl;
+ sl.X86EmitCall(sl.NewExternalCodeLabel((LPVOID)LeaveRuntimeStackProbeOnly), 0);
+
+ sl.X86EmitNearJump(sl.NewExternalCodeLabel((LPVOID)pNativeTarget));
+
+ return sl.Link(GetLoaderAllocator()->GetStubHeap());
+ }
+
+ WORD wArgSize = (IsVarArgs() ? (WORD)-1 : GetStackArgumentSize());
+ WORD wPopSize = ((IsStdCall() || IsThisCall()) ? GetStackArgumentSize() : 0);
+
+ return GenerateStubForHostWorker(GetDomain()->GetLoaderAllocator()->GetStubHeap(),
+ pNativeTarget,
+ pInnerStub,
+ 0,
+ wArgSize,
+ wPopSize);
+}
+
+
+#ifdef FEATURE_COMINTEROP
+
+//-----------------------------------------------------------------------------
+Stub *ComPlusCallInfo::GenerateStubForHost(LoaderHeap *pHeap, Stub *pInnerStub)
+{
+ STANDARD_VM_CONTRACT;
+
+ WORD wArgSize = GetStackArgumentSize();
+
+ return GenerateStubForHostWorker(pHeap,
+ NULL,
+ pInnerStub,
+ m_cachedComSlot,
+ wArgSize,
+ wArgSize); // always stdcall
+}
+
+#endif // FEATURE_COMINTEROP
+
+//-----------------------------------------------------------------------------
+// static
+Stub *COMDelegate::GenerateStubForHost(MethodDesc *pInvokeMD, MethodDesc *pStubMD, LPVOID pNativeTarget, Stub *pInnerStub)
+{
+ STANDARD_VM_CONTRACT;
+
+ // get unmanaged calling convention from pInvokeMD's metadata
+ PInvokeStaticSigInfo sigInfo(pInvokeMD);
+ CorPinvokeMap callConv = sigInfo.GetCallConv();
+
+ WORD wArgSize = pStubMD->AsDynamicMethodDesc()->GetNativeStackArgSize();
+ WORD wPopSize = (callConv == pmCallConvCdecl ? 0 : wArgSize);
+
+ return GenerateStubForHostWorker(NULL, // we want to free this stub when the delegate dies
+ pNativeTarget,
+ pInnerStub,
+ 0,
+ wArgSize,
+ wPopSize);
+}
+
+//-----------------------------------------------------------------------------
+// static
+Stub *NDirect::GenerateStubForHost(Module *pModule, CorUnmanagedCallingConvention callConv, WORD wArgSize)
+{
+ STANDARD_VM_CONTRACT;
+
+ // This one is for unmanaged CALLI where the target is passed as last argument
+ // (first pushed to stack)
+
+ WORD wPopSize = (callConv == IMAGE_CEE_CS_CALLCONV_C ? 0 : (wArgSize + STACK_ELEM_SIZE));
+
+ return GenerateStubForHostWorker(pModule->GetDomain()->GetLoaderAllocator()->GetStubHeap(),
+ (LPVOID)-1,
+ NULL,
+ 0,
+ wArgSize,
+ wPopSize);
+}
+
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+
+#ifdef MDA_SUPPORTED
+
+//-----------------------------------------------------------------------------
+Stub *NDirectMethodDesc::GenerateStubForMDA(LPVOID pNativeTarget, Stub *pInnerStub, BOOL fCalledByStub)
+{
+ STANDARD_VM_CONTRACT;
+
+ CPUSTUBLINKER sl;
+ sl.X86EmitPushEBPframe();
+
+ DWORD callConv = (DWORD)(IsThisCall() ? pmCallConvThiscall : (IsStdCall() ? pmCallConvStdcall : pmCallConvCdecl));
+ _ASSERTE((callConv & StackImbalanceCookie::HAS_FP_RETURN_VALUE) == 0);
+
+ MetaSig msig(this);
+ if (msig.HasFPReturn())
+ {
+ // check for the HRESULT swapping impl flag
+ DWORD dwImplFlags;
+ IfFailThrow(GetMDImport()->GetMethodImplProps(GetMemberDef(), NULL, &dwImplFlags));
+
+ if (dwImplFlags & miPreserveSig)
+ {
+ // pass a flag to PInvokeStackImbalanceHelper that it should save & restore FPU return value
+ callConv |= StackImbalanceCookie::HAS_FP_RETURN_VALUE;
+ }
+ }
+
+ // init StackImbalanceCookie
+ sl.X86EmitPushReg(kEAX); // m_dwSavedEsp (just making space)
+ sl.X86EmitPushImm32(callConv); // m_callConv
+
+ if (IsVarArgs())
+ {
+ // Re-push the return address as an argument to GetStackSizeForVarArgCall()
+ if (fCalledByStub)
+ {
+ // We will be called by another stub that doesn't know the stack size,
+ // so we need to skip a frame to get to the managed caller.
+ sl.X86EmitIndexRegLoad(kEAX, kEBP, 0);
+ sl.X86EmitIndexPush(kEAX, 4);
+ }
+ else
+ {
+ sl.X86EmitIndexPush(kEBP, 4);
+ }
+
+ // This will return the number of stack arguments (in DWORDs)
+ sl.X86EmitCall(sl.NewExternalCodeLabel((LPVOID)GetStackSizeForVarArgCall), 4);
+
+ // shl eax,2
+ sl.Emit16(0xe0c1);
+ sl.Emit8(0x02);
+
+ sl.X86EmitPushReg(kEAX); // m_dwStackArgSize
+ }
+ else
+ {
+ sl.X86EmitPushImm32(GetStackArgumentSize()); // m_dwStackArgSize
+ }
+
+ LPVOID pTarget = (pInnerStub != NULL ? (LPVOID)pInnerStub->GetEntryPoint() : pNativeTarget);
+ sl.X86EmitPushImmPtr(pTarget); // m_pTarget
+ sl.X86EmitPushImmPtr(this); // m_pMD
+
+ // stack layout at this point
+
+ // | ... |
+ // | stack arguments | EBP + 8
+ // +-----------------------+
+ // | return address | EBP + 4
+ // +-----------------------+
+ // | saved EBP | EBP + 0
+ // +-----------------------+
+ // | SIC::m_dwSavedEsp |
+ // | SIC::m_callConv |
+ // | SIC::m_dwStackArgSize |
+ // | SIC::m_pTarget |
+ // | SIC::m_pMD | EBP - 20
+ // ------------------------
+
+ // call the helper
+ sl.X86EmitCall(sl.NewExternalCodeLabel(PInvokeStackImbalanceHelper), sizeof(StackImbalanceCookie));
+
+ // pop StackImbalanceCookie
+ sl.X86EmitMovSPReg(kEBP);
+
+ sl.X86EmitPopReg(kEBP);
+ sl.X86EmitReturn((IsStdCall() || IsThisCall()) ? GetStackArgumentSize() : 0);
+
+ if (pInnerStub)
+ {
+ return sl.LinkInterceptor(GetLoaderAllocator()->GetStubHeap(), pInnerStub, pNativeTarget);
+ }
+ else
+ {
+ return sl.Link(GetLoaderAllocator()->GetStubHeap());
+ }
+}
+
+//-----------------------------------------------------------------------------
+// static
+Stub *COMDelegate::GenerateStubForMDA(MethodDesc *pInvokeMD, MethodDesc *pStubMD, LPVOID pNativeTarget, Stub *pInnerStub)
+{
+ STANDARD_VM_CONTRACT;
+
+ WORD wStackArgSize = pStubMD->AsDynamicMethodDesc()->GetNativeStackArgSize();
+
+ // get unmanaged calling convention from pInvokeMD's metadata
+ PInvokeStaticSigInfo sigInfo(pInvokeMD);
+ DWORD callConv = (DWORD)sigInfo.GetCallConv();
+ _ASSERTE((callConv & StackImbalanceCookie::HAS_FP_RETURN_VALUE) == 0);
+
+ MetaSig msig(pInvokeMD);
+ if (msig.HasFPReturn())
+ {
+ // pass a flag to PInvokeStackImbalanceHelper that it should save & restore FPU return value
+ callConv |= StackImbalanceCookie::HAS_FP_RETURN_VALUE;
+ }
+
+ CPUSTUBLINKER sl;
+ sl.X86EmitPushEBPframe();
+
+ LPVOID pTarget = (pInnerStub != NULL ? (LPVOID)pInnerStub->GetEntryPoint() : pNativeTarget);
+
+ // init StackImbalanceCookie
+ sl.X86EmitPushReg(kEAX); // m_dwSavedEsp (just making space)
+ sl.X86EmitPushImm32(callConv); // m_callConv
+ sl.X86EmitPushImm32(wStackArgSize); // m_dwStackArgSize
+ sl.X86EmitPushImmPtr(pTarget); // m_pTarget
+ sl.X86EmitPushImmPtr(pInvokeMD); // m_pMD
+
+ // stack layout at this point
+
+ // | ... |
+ // | stack arguments | EBP + 8
+ // +-----------------------+
+ // | return address | EBP + 4
+ // +-----------------------+
+ // | saved EBP | EBP + 0
+ // +-----------------------+
+ // | SIC::m_dwSavedEsp |
+ // | SIC::m_callConv |
+ // | SIC::m_dwStackArgSize |
+ // | SIC::m_pTarget |
+ // | SIC::m_pMD | EBP - 20
+ // ------------------------
+
+ // call the helper
+ sl.X86EmitCall(sl.NewExternalCodeLabel(PInvokeStackImbalanceHelper), sizeof(StackImbalanceCookie));
+
+ // pop StackImbalanceCookie
+ sl.X86EmitMovSPReg(kEBP);
+
+ sl.X86EmitPopReg(kEBP);
+ sl.X86EmitReturn(callConv == pmCallConvCdecl ? 0 : wStackArgSize);
+
+ if (pInnerStub != NULL)
+ {
+ return sl.LinkInterceptor(pInnerStub, pNativeTarget);
+ }
+ else
+ {
+ return sl.Link(); // don't use loader heap as we want to be able to free the stub
+ }
+}
+
+#endif // MDA_SUPPORTED
+
+extern "C" VOID STDCALL StubRareEnableWorker(Thread *pThread)
+{
+ WRAPPER_NO_CONTRACT;
+
+ //printf("RareEnable\n");
+ pThread->RareEnablePreemptiveGC();
+}
+
+
+
+
+// Disable when calling into managed code from a place that fails via Exceptions
+extern "C" VOID STDCALL StubRareDisableTHROWWorker(Thread *pThread)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ // Do not add a CONTRACT here. We haven't set up SEH. We rely
+ // on HandleThreadAbort and COMPlusThrowBoot dealing with this situation properly.
+
+ // WARNING!!!!
+ // when we start executing here, we are actually in cooperative mode. But we
+ // haven't synchronized with the barrier to reentry yet. So we are in a highly
+ // dangerous mode. If we call managed code, we will potentially be active in
+ // the GC heap, even as GC's are occuring!
+
+ // Check for ShutDown scenario. This happens only when we have initiated shutdown
+ // and someone is trying to call in after the CLR is suspended. In that case, we
+ // must either raise an unmanaged exception or return an HRESULT, depending on the
+ // expectations of our caller.
+ if (!CanRunManagedCode())
+ {
+ // DO NOT IMPROVE THIS EXCEPTION! It cannot be a managed exception. It
+ // cannot be a real exception object because we cannot execute any managed
+ // code here.
+ pThread->m_fPreemptiveGCDisabled = 0;
+ COMPlusThrowBoot(E_PROCESS_SHUTDOWN_REENTRY);
+ }
+
+ // We must do the following in this order, because otherwise we would be constructing
+ // the exception for the abort without synchronizing with the GC. Also, we have no
+ // CLR SEH set up, despite the fact that we may throw a ThreadAbortException.
+ pThread->RareDisablePreemptiveGC();
+ pThread->HandleThreadAbort();
+}
+
+// Note that this logic is copied below, in PopSEHRecords
+__declspec(naked)
+VOID __cdecl PopSEHRecords(LPVOID pTargetSP)
+{
+ // No CONTRACT possible on naked functions
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ __asm{
+ mov ecx, [esp+4] ;; ecx <- pTargetSP
+ mov eax, fs:[0] ;; get current SEH record
+ poploop:
+ cmp eax, ecx
+ jge done
+ mov eax, [eax] ;; get next SEH record
+ jmp poploop
+ done:
+ mov fs:[0], eax
+ retn
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// JITInterface
+//
+//////////////////////////////////////////////////////////////////////////////
+
+/*********************************************************************/
+#ifdef EnC_SUPPORTED
+#pragma warning (disable : 4731)
+void ResumeAtJit(PCONTEXT pContext, LPVOID oldESP)
+{
+ // No CONTRACT here, because we can't run the risk of it pushing any SEH into the
+ // current method.
+
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+#ifdef _DEBUG
+ DWORD curESP;
+ __asm mov curESP, esp
+#endif
+
+ if (oldESP)
+ {
+ _ASSERTE(curESP < (DWORD)(size_t)oldESP);
+ // should have popped the SEH records by now as stack has been overwritten
+ _ASSERTE(GetCurrentSEHRecord() > oldESP);
+ }
+
+ // For the "push Eip, ..., ret"
+ _ASSERTE(curESP < pContext->Esp - sizeof(DWORD));
+ pContext->Esp -= sizeof(DWORD);
+
+ __asm {
+ mov ebp, pContext
+
+ // Push Eip onto the targetESP, so that the final "ret" will consume it
+ mov ecx, [ebp]CONTEXT.Esp
+ mov edx, [ebp]CONTEXT.Eip
+ mov [ecx], edx
+
+ // Restore all registers except Esp, Ebp, Eip
+ mov eax, [ebp]CONTEXT.Eax
+ mov ebx, [ebp]CONTEXT.Ebx
+ mov ecx, [ebp]CONTEXT.Ecx
+ mov edx, [ebp]CONTEXT.Edx
+ mov esi, [ebp]CONTEXT.Esi
+ mov edi, [ebp]CONTEXT.Edi
+
+ push [ebp]CONTEXT.Esp // pContext->Esp is (targetESP-sizeof(DWORD))
+ push [ebp]CONTEXT.Ebp
+ pop ebp
+ pop esp
+
+ // esp is (targetESP-sizeof(DWORD)), and [esp] is the targetEIP.
+ // The ret will set eip to targetEIP and esp will be automatically
+ // incremented to targetESP
+
+ ret
+ }
+}
+#pragma warning (default : 4731)
+#endif // !EnC_SUPPORTED
+
+
+#pragma warning(push)
+#pragma warning(disable: 4035)
+DWORD getcpuid(DWORD arg, unsigned char result[16])
+{
+ LIMITED_METHOD_CONTRACT
+
+ __asm
+ {
+ push ebx
+ push esi
+ mov eax, arg
+ cpuid
+ mov esi, result
+ mov [esi+ 0], eax
+ mov [esi+ 4], ebx
+ mov [esi+ 8], ecx
+ mov [esi+12], edx
+ pop esi
+ pop ebx
+ }
+}
+
+// The following function uses Deterministic Cache Parameter leafs to determine the cache hierarchy information on Prescott & Above platforms.
+// This function takes 3 arguments:
+// Arg1 is an input to ECX. Used as index to specify which cache level to return infoformation on by CPUID.
+// Arg2 is an input to EAX. For deterministic code enumeration, we pass in 4H in arg2.
+// Arg3 is a pointer to the return buffer
+// No need to check whether or not CPUID is supported because we have already called CPUID with success to come here.
+
+DWORD getextcpuid(DWORD arg1, DWORD arg2, unsigned char result[16])
+{
+ LIMITED_METHOD_CONTRACT
+
+ __asm
+ {
+ push ebx
+ push esi
+ mov ecx, arg1
+ mov eax, arg2
+ cpuid
+ mov esi, result
+ mov [esi+ 0], eax
+ mov [esi+ 4], ebx
+ mov [esi+ 8], ecx
+ mov [esi+12], edx
+ pop esi
+ pop ebx
+ }
+}
+
+#pragma warning(pop)
+
+
+// This function returns the number of logical processors on a given physical chip. If it cannot
+// determine the number of logical cpus, or the machine is not populated uniformly with the same
+// type of processors, this function returns 1.
+DWORD GetLogicalCpuCount()
+{
+ // No CONTRACT possible because GetLogicalCpuCount uses SEH
+
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ static DWORD val = 0;
+
+ // cache value for later re-use
+ if (val)
+ {
+ return val;
+ }
+
+ struct Param : DefaultCatchFilterParam
+ {
+ DWORD retVal;
+ } param;
+ param.pv = COMPLUS_EXCEPTION_EXECUTE_HANDLER;
+ param.retVal = 1;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ unsigned char buffer[16];
+
+ DWORD maxCpuId = getcpuid(0, buffer);
+
+ if (maxCpuId < 1)
+ goto lDone;
+
+ DWORD* dwBuffer = (DWORD*)buffer;
+
+ if (dwBuffer[1] == 'uneG') {
+ if (dwBuffer[3] == 'Ieni') {
+ if (dwBuffer[2] == 'letn') { // get SMT/multicore enumeration for Intel EM64T
+
+ // TODO: Currently GetLogicalCpuCountFromOS() and GetLogicalCpuCountFallback() are broken on
+ // multi-core processor, but we never call into those two functions since we don't halve the
+ // gen0size when it's prescott and above processor. We keep the old version here for earlier
+ // generation system(Northwood based), perf data suggests on those systems, halve gen0 size
+ // still boost the performance(ex:Biztalk boosts about 17%). So on earlier systems(Northwood)
+ // based, we still go ahead and halve gen0 size. The logic in GetLogicalCpuCountFromOS()
+ // and GetLogicalCpuCountFallback() works fine for those earlier generation systems.
+ // If it's a Prescott and above processor or Multi-core, perf data suggests not to halve gen0
+ // size at all gives us overall better performance.
+ // This is going to be fixed with a new version in orcas time frame.
+
+ if( (maxCpuId > 3) && (maxCpuId < 0x80000000) )
+ goto lDone;
+
+ val = GetLogicalCpuCountFromOS(); //try to obtain HT enumeration from OS API
+ if (val )
+ {
+ pParam->retVal = val; // OS API HT enumeration successful, we are Done
+ goto lDone;
+ }
+
+ val = GetLogicalCpuCountFallback(); // OS API failed, Fallback to HT enumeration using CPUID
+ if( val )
+ pParam->retVal = val;
+ }
+ }
+ }
+lDone: ;
+ }
+ PAL_EXCEPT_FILTER(DefaultCatchFilter)
+ {
+ }
+ PAL_ENDTRY
+
+ if (val == 0)
+ {
+ val = param.retVal;
+ }
+
+ return param.retVal;
+}
+
+void UMEntryThunkCode::Encode(BYTE* pTargetCode, void* pvSecretParam)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef _DEBUG
+ m_alignpad[0] = X86_INSTR_INT3;
+ m_alignpad[1] = X86_INSTR_INT3;
+#endif // _DEBUG
+ m_movEAX = X86_INSTR_MOV_EAX_IMM32;
+ m_uet = pvSecretParam;
+ m_jmp = X86_INSTR_JMP_REL32;
+ m_execstub = (BYTE*) ((pTargetCode) - (4+((BYTE*)&m_execstub)));
+
+ FlushInstructionCache(GetCurrentProcess(),GetEntryPoint(),sizeof(UMEntryThunkCode));
+}
+
+UMEntryThunk* UMEntryThunk::Decode(LPVOID pCallback)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (*((BYTE*)pCallback) != X86_INSTR_MOV_EAX_IMM32 ||
+ ( ((size_t)pCallback) & 3) != 2) {
+ return NULL;
+ }
+ return *(UMEntryThunk**)( 1 + (BYTE*)pCallback );
+}
+
+BOOL DoesSlotCallPrestub(PCODE pCode)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ PRECONDITION(pCode != NULL);
+ PRECONDITION(pCode != GetPreStubEntryPoint());
+ } CONTRACTL_END;
+
+ // x86 has the following possible sequences for prestub logic:
+ // 1. slot -> temporary entrypoint -> prestub
+ // 2. slot -> precode -> prestub
+ // 3. slot -> precode -> jumprel32 (NGEN case) -> prestub
+
+#ifdef HAS_COMPACT_ENTRYPOINTS
+ if (MethodDescChunk::GetMethodDescFromCompactEntryPoint(pCode, TRUE) != NULL)
+ {
+ return TRUE;
+ }
+#endif // HAS_COMPACT_ENTRYPOINTS
+
+ if (!IS_ALIGNED(pCode, PRECODE_ALIGNMENT))
+ {
+ return FALSE;
+ }
+
+#ifdef HAS_FIXUP_PRECODE
+ if (*PTR_BYTE(pCode) == X86_INSTR_CALL_REL32)
+ {
+ // Note that call could have been patched to jmp in the meantime
+ pCode = rel32Decode(pCode+1);
+
+ // NGEN case
+ if (*PTR_BYTE(pCode) == X86_INSTR_JMP_REL32) {
+ pCode = rel32Decode(pCode+1);
+ }
+
+ return pCode == (TADDR)PrecodeFixupThunk;
+ }
+#endif
+
+ if (*PTR_BYTE(pCode) != X86_INSTR_MOV_EAX_IMM32 ||
+ *PTR_BYTE(pCode+5) != X86_INSTR_MOV_RM_R ||
+ *PTR_BYTE(pCode+7) != X86_INSTR_JMP_REL32)
+ {
+ return FALSE;
+ }
+ pCode = rel32Decode(pCode+8);
+
+ // NGEN case
+ if (*PTR_BYTE(pCode) == X86_INSTR_JMP_REL32) {
+ pCode = rel32Decode(pCode+1);
+ }
+
+ return pCode == GetPreStubEntryPoint();
+}
+
+//==========================================================================================
+// In NGen image, virtual slots inherited from cross-module dependencies point to jump thunks.
+// These jump thunk initially point to VirtualMethodFixupStub which transfers control here.
+// This method 'VirtualMethodFixupWorker' will patch the jump thunk to point to the actual
+// inherited method body after we have execute the precode and a stable entry point.
+//
+EXTERN_C PVOID STDCALL VirtualMethodFixupWorker(Object * pThisPtr, CORCOMPILE_VIRTUAL_IMPORT_THUNK *pThunk)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pThisPtr != NULL);
+ VALIDATEOBJECT(pThisPtr);
+
+ MethodTable * pMT = pThisPtr->GetTrueMethodTable();
+
+ WORD slotNumber = pThunk->slotNum;
+ _ASSERTE(slotNumber != (WORD)-1);
+
+ PCODE pCode = pMT->GetRestoredSlot(slotNumber);
+
+ if (!DoesSlotCallPrestub(pCode))
+ {
+ // Skip fixup precode jump for better perf
+ PCODE pDirectTarget = Precode::TryToSkipFixupPrecode(pCode);
+ if (pDirectTarget != NULL)
+ pCode = pDirectTarget;
+
+ INT64 oldValue = *(INT64*)pThunk;
+ BYTE* pOldValue = (BYTE*)&oldValue;
+
+ if (pOldValue[0] == X86_INSTR_CALL_REL32)
+ {
+ INT64 newValue = oldValue;
+ BYTE* pNewValue = (BYTE*)&newValue;
+ pNewValue[0] = X86_INSTR_JMP_REL32;
+
+ INT_PTR pcRelOffset = (BYTE*)pCode - &pThunk->callJmp[5];
+ *(INT32 *)(&pNewValue[1]) = (INT32) pcRelOffset;
+
+ _ASSERTE(IS_ALIGNED(pThunk, sizeof(INT64)));
+ if (EnsureWritableExecutablePagesNoThrow(pThunk, sizeof(INT64)))
+ FastInterlockCompareExchangeLong((INT64*)pThunk, newValue, oldValue);
+
+ FlushInstructionCache(GetCurrentProcess(), pThunk, 8);
+ }
+ }
+
+ return PVOID(pCode);
+}
+
+
+#ifdef FEATURE_READYTORUN
+
+//
+// Allocation of dynamic helpers
+//
+
+#define DYNAMIC_HELPER_ALIGNMENT sizeof(TADDR)
+
+#define BEGIN_DYNAMIC_HELPER_EMIT(size) \
+ SIZE_T cb = size; \
+ SIZE_T cbAligned = ALIGN_UP(cb, DYNAMIC_HELPER_ALIGNMENT); \
+ BYTE * pStart = (BYTE *)(void *)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(cbAligned, DYNAMIC_HELPER_ALIGNMENT); \
+ BYTE * p = pStart;
+
+#define END_DYNAMIC_HELPER_EMIT() \
+ _ASSERTE(pStart + cb == p); \
+ while (p < pStart + cbAligned) *p++ = X86_INSTR_INT3; \
+ ClrFlushInstructionCache(pStart, cbAligned); \
+ return (PCODE)pStart
+
+PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
+{
+ STANDARD_VM_CONTRACT;
+
+ BEGIN_DYNAMIC_HELPER_EMIT(10);
+
+ *p++ = 0xB9; // mov ecx, XXXXXX
+ *(INT32 *)p = (INT32)arg;
+ p += 4;
+
+ *p++ = X86_INSTR_JMP_REL32; // jmp rel32
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target);
+ p += 4;
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+PCODE DynamicHelpers::CreateHelperWithArg(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT(10);
+
+ *p++ = 0xBA; // mov edx, XXXXXX
+ *(INT32 *)p = (INT32)arg;
+ p += 4;
+
+ *p++ = X86_INSTR_JMP_REL32; // jmp rel32
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target);
+ p += 4;
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, TADDR arg2, PCODE target)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT(15);
+
+ *p++ = 0xB9; // mov ecx, XXXXXX
+ *(INT32 *)p = (INT32)arg;
+ p += 4;
+
+ *p++ = 0xBA; // mov edx, XXXXXX
+ *(INT32 *)p = (INT32)arg2;
+ p += 4;
+
+ *p++ = X86_INSTR_JMP_REL32; // jmp rel32
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target);
+ p += 4;
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+PCODE DynamicHelpers::CreateHelperArgMove(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT(12);
+
+ *(UINT16 *)p = 0xD18B; // mov edx, ecx
+ p += 2;
+
+ *p++ = 0xB9; // mov ecx, XXXXXX
+ *(INT32 *)p = (INT32)arg;
+ p += 4;
+
+ *p++ = X86_INSTR_JMP_REL32; // jmp rel32
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target);
+ p += 4;
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+PCODE DynamicHelpers::CreateReturn(LoaderAllocator * pAllocator)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT(1);
+
+ *p++ = 0xC3; // ret
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+PCODE DynamicHelpers::CreateReturnConst(LoaderAllocator * pAllocator, TADDR arg)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT(6);
+
+ *p++ = 0xB8; // mov eax, XXXXXX
+ *(INT32 *)p = (INT32)arg;
+ p += 4;
+
+ *p++ = 0xC3; // ret
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+PCODE DynamicHelpers::CreateReturnIndirConst(LoaderAllocator * pAllocator, TADDR arg, INT8 offset)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT((offset != 0) ? 9 : 6);
+
+ *p++ = 0xA1; // mov eax, [XXXXXX]
+ *(INT32 *)p = (INT32)arg;
+ p += 4;
+
+ if (offset != 0)
+ {
+ // add eax, <offset>
+ *p++ = 0x83;
+ *p++ = 0xC0;
+ *p++ = offset;
+ }
+
+ *p++ = 0xC3; // ret
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT(12);
+
+ // pop eax
+ *p++ = 0x58;
+
+ // push arg
+ *p++ = 0x68;
+ *(INT32 *)p = arg;
+ p += 4;
+
+ // push eax
+ *p++ = 0x50;
+
+ *p++ = X86_INSTR_JMP_REL32; // jmp rel32
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target);
+ p += 4;
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADDR arg, TADDR arg2, PCODE target)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT(17);
+
+ // pop eax
+ *p++ = 0x58;
+
+ // push arg
+ *p++ = 0x68;
+ *(INT32 *)p = arg;
+ p += 4;
+
+ // push arg2
+ *p++ = 0x68;
+ *(INT32 *)p = arg2;
+ p += 4;
+
+ // push eax
+ *p++ = 0x50;
+
+ *p++ = X86_INSTR_JMP_REL32; // jmp rel32
+ *(INT32 *)p = rel32UsingJumpStub((INT32 *)p, target);
+ p += 4;
+
+ END_DYNAMIC_HELPER_EMIT();
+}
+
+#endif // FEATURE_READYTORUN
+
+
+#endif // DACCESS_COMPILE
diff --git a/src/vm/i386/excepcpu.h b/src/vm/i386/excepcpu.h
new file mode 100644
index 0000000000..0c44f89e5d
--- /dev/null
+++ b/src/vm/i386/excepcpu.h
@@ -0,0 +1,88 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+// EXCEPX86.H -
+//
+// This header file is optionally included from Excep.h if the target platform is x86
+//
+
+
+#ifndef __excepx86_h__
+#define __excepx86_h__
+
+#include "corerror.h" // HResults for the COM+ Runtime
+
+#include "../dlls/mscorrc/resource.h"
+
+#define THROW_CONTROL_FOR_THREAD_FUNCTION ThrowControlForThread
+
+#define STATUS_CLR_GCCOVER_CODE STATUS_PRIVILEGED_INSTRUCTION
+
+class Thread;
+
+#if defined(_MSC_VER)
+#pragma warning(disable:4733) // Inline asm assigning to `FS:0` : handler not registered as safe handler
+ // Actually, the handler getting set is properly registered
+#endif
+
+#define INSTALL_EXCEPTION_HANDLING_RECORD(record) \
+ { \
+ PEXCEPTION_REGISTRATION_RECORD __record = (record); \
+ _ASSERTE(__record < GetCurrentSEHRecord()); \
+ __record->Next = (PEXCEPTION_REGISTRATION_RECORD)__readfsdword(0); \
+ __writefsdword(0, (DWORD)__record); \
+ }
+
+//
+// Note: this only pops a handler from the top of the stack. It will not remove a record from the middle of the
+// chain, and I can assure you that you don't want to do that anyway.
+//
+#define UNINSTALL_EXCEPTION_HANDLING_RECORD(record) \
+ { \
+ PEXCEPTION_REGISTRATION_RECORD __record = (record); \
+ _ASSERTE(__record == GetCurrentSEHRecord()); \
+ __writefsdword(0, (DWORD)__record->Next); \
+ }
+
+// stackOverwriteBarrier is used to detect overwriting of stack which will mess up handler registration
+#if defined(_DEBUG)
+#define DECLARE_CPFH_EH_RECORD(pCurThread) \
+ FrameHandlerExRecordWithBarrier *___pExRecordWithBarrier = (FrameHandlerExRecordWithBarrier *)_alloca(sizeof(FrameHandlerExRecordWithBarrier)); \
+ for (int ___i =0; ___i < STACK_OVERWRITE_BARRIER_SIZE; ___i++) \
+ ___pExRecordWithBarrier->m_StackOverwriteBarrier[___i] = STACK_OVERWRITE_BARRIER_VALUE; \
+ FrameHandlerExRecord *___pExRecord = &(___pExRecordWithBarrier->m_ExRecord); \
+ ___pExRecord->m_ExReg.Handler = (PEXCEPTION_ROUTINE)COMPlusFrameHandler; \
+ ___pExRecord->m_pEntryFrame = (pCurThread)->GetFrame();
+
+#else
+#define DECLARE_CPFH_EH_RECORD(pCurThread) \
+ FrameHandlerExRecord *___pExRecord = (FrameHandlerExRecord *)_alloca(sizeof(FrameHandlerExRecord)); \
+ ___pExRecord->m_ExReg.Handler = (PEXCEPTION_ROUTINE)COMPlusFrameHandler; \
+ ___pExRecord->m_pEntryFrame = (pCurThread)->GetFrame();
+
+#endif
+
+//
+// Retrieves the redirected CONTEXT* from the stack frame of one of the
+// RedirectedHandledJITCaseForXXX_Stub's.
+//
+PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(CONTEXT * pContext);
+
+PEXCEPTION_REGISTRATION_RECORD GetCurrentSEHRecord();
+PEXCEPTION_REGISTRATION_RECORD GetFirstCOMPlusSEHRecord(Thread*);
+
+// Determine the address of the instruction that made the current call.
+inline
+PCODE GetAdjustedCallAddress(PCODE returnAddress)
+{
+ LIMITED_METHOD_CONTRACT;
+ return returnAddress - 5;
+}
+
+BOOL AdjustContextForVirtualStub(EXCEPTION_RECORD *pExceptionRecord, CONTEXT *pContext);
+
+#endif // __excepx86_h__
diff --git a/src/vm/i386/excepx86.cpp b/src/vm/i386/excepx86.cpp
new file mode 100644
index 0000000000..4cad260815
--- /dev/null
+++ b/src/vm/i386/excepx86.cpp
@@ -0,0 +1,3734 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+/* EXCEP.CPP:
+ *
+ */
+#include "common.h"
+
+#include "frames.h"
+#include "excep.h"
+#include "object.h"
+#include "field.h"
+#include "dbginterface.h"
+#include "cgensys.h"
+#include "comutilnative.h"
+#include "sigformat.h"
+#include "siginfo.hpp"
+#include "gc.h"
+#include "eedbginterfaceimpl.h" //so we can clearexception in COMPlusThrow
+#include "perfcounters.h"
+#include "eventtrace.h"
+#include "eetoprofinterfacewrapper.inl"
+#include "eedbginterfaceimpl.inl"
+#include "dllimportcallback.h"
+#include "threads.h"
+#ifdef FEATURE_REMOTING
+#include "appdomainhelper.h"
+#endif
+#include "eeconfig.h"
+#include "vars.hpp"
+#include "generics.h"
+#include "securityprincipal.h"
+
+#include "asmconstants.h"
+#include "virtualcallstub.h"
+
+MethodDesc * GetUserMethodForILStub(Thread * pThread, UINT_PTR uStubSP, MethodDesc * pILStubMD, Frame ** ppFrameOut);
+
+#if !defined(DACCESS_COMPILE)
+
+#define FORMAT_MESSAGE_BUFFER_LENGTH 1024
+
+BOOL ComPlusFrameSEH(EXCEPTION_REGISTRATION_RECORD*);
+PEXCEPTION_REGISTRATION_RECORD GetPrevSEHRecord(EXCEPTION_REGISTRATION_RECORD*);
+
+extern "C" {
+// in asmhelpers.asm:
+VOID STDCALL ResumeAtJitEHHelper(EHContext *pContext);
+int STDCALL CallJitEHFilterHelper(size_t *pShadowSP, EHContext *pContext);
+VOID STDCALL CallJitEHFinallyHelper(size_t *pShadowSP, EHContext *pContext);
+
+BOOL CallRtlUnwind(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
+ void *callback,
+ EXCEPTION_RECORD *pExceptionRecord,
+ void *retval);
+
+BOOL CallRtlUnwindSafe(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
+ void *callback,
+ EXCEPTION_RECORD *pExceptionRecord,
+ void *retval);
+}
+
+static inline BOOL
+CPFH_ShouldUnwindStack(const EXCEPTION_RECORD * pCER) {
+
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(pCER != NULL);
+
+ // We can only unwind those exceptions whose context/record we don't need for a
+ // rethrow. This is complus, and stack overflow. For all the others, we
+ // need to keep the context around for a rethrow, which means they can't
+ // be unwound.
+ if (IsComPlusException(pCER) || pCER->ExceptionCode == STATUS_STACK_OVERFLOW)
+ return TRUE;
+ else
+ return FALSE;
+}
+
+static inline BOOL IsComPlusNestedExceptionRecord(EXCEPTION_REGISTRATION_RECORD* pEHR)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (pEHR->Handler == (PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler)
+ return TRUE;
+ return FALSE;
+}
+
+EXCEPTION_REGISTRATION_RECORD *TryFindNestedEstablisherFrame(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame)
+{
+ LIMITED_METHOD_CONTRACT;
+ while (pEstablisherFrame->Handler != (PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler) {
+ pEstablisherFrame = pEstablisherFrame->Next;
+ if (pEstablisherFrame == EXCEPTION_CHAIN_END) return 0;
+ }
+ return pEstablisherFrame;
+}
+
+#ifdef _DEBUG
+// stores last handler we went to in case we didn't get an endcatch and stack is
+// corrupted we can figure out who did it.
+static MethodDesc *gLastResumedExceptionFunc = NULL;
+static DWORD gLastResumedExceptionHandler = 0;
+#endif
+
+//---------------------------------------------------------------------
+// void RtlUnwindCallback()
+// call back function after global unwind, rtlunwind calls this function
+//---------------------------------------------------------------------
+static void RtlUnwindCallback()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"Should never get here");
+}
+
+BOOL NExportSEH(EXCEPTION_REGISTRATION_RECORD* pEHR)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if ((LPVOID)pEHR->Handler == (LPVOID)UMThunkPrestubHandler)
+ {
+ return TRUE;
+ }
+ return FALSE;
+}
+
+BOOL FastNExportSEH(EXCEPTION_REGISTRATION_RECORD* pEHR)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if ((LPVOID)pEHR->Handler == (LPVOID)FastNExportExceptHandler)
+ return TRUE;
+ return FALSE;
+}
+
+BOOL ReverseCOMSEH(EXCEPTION_REGISTRATION_RECORD* pEHR)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef FEATURE_COMINTEROP
+ if ((LPVOID)pEHR->Handler == (LPVOID)COMPlusFrameHandlerRevCom)
+ return TRUE;
+#endif // FEATURE_COMINTEROP
+ return FALSE;
+}
+
+
+//
+// Returns true if the given SEH handler is one of our SEH handlers that is responsible for managing exceptions in
+// regions of managed code.
+//
+BOOL IsUnmanagedToManagedSEHHandler(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame)
+{
+ WRAPPER_NO_CONTRACT;
+
+ //
+ // ComPlusFrameSEH() is for COMPlusFrameHandler & COMPlusNestedExceptionHandler.
+ // FastNExportSEH() is for FastNExportExceptHandler.
+ // NExportSEH() is for UMThunkPrestubHandler.
+ //
+ return (ComPlusFrameSEH(pEstablisherFrame) || FastNExportSEH(pEstablisherFrame) || NExportSEH(pEstablisherFrame) || ReverseCOMSEH(pEstablisherFrame));
+}
+
+Frame *GetCurrFrame(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame)
+{
+ Frame *pFrame;
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(IsUnmanagedToManagedSEHHandler(pEstablisherFrame));
+ if (NExportSEH(pEstablisherFrame))
+ pFrame = ((ComToManagedExRecord *)pEstablisherFrame)->GetCurrFrame();
+ else
+ pFrame = ((FrameHandlerExRecord *)pEstablisherFrame)->GetCurrFrame();
+
+ _ASSERTE(GetThread() == NULL || GetThread()->GetFrame() <= pFrame);
+
+ return pFrame;
+}
+
+EXCEPTION_REGISTRATION_RECORD* GetNextCOMPlusSEHRecord(EXCEPTION_REGISTRATION_RECORD* pRec) {
+ WRAPPER_NO_CONTRACT;
+ if (pRec == EXCEPTION_CHAIN_END)
+ return EXCEPTION_CHAIN_END;
+
+ do {
+ _ASSERTE(pRec != 0);
+ pRec = pRec->Next;
+ } while (pRec != EXCEPTION_CHAIN_END && !IsUnmanagedToManagedSEHHandler(pRec));
+
+ _ASSERTE(pRec == EXCEPTION_CHAIN_END || IsUnmanagedToManagedSEHHandler(pRec));
+ return pRec;
+}
+
+
+/*
+ * GetClrSEHRecordServicingStackPointer
+ *
+ * This function searchs all the Frame SEH records, and finds the one that is
+ * currently signed up to do all exception handling for the given stack pointer
+ * on the given thread.
+ *
+ * Parameters:
+ * pThread - The thread to search on.
+ * pStackPointer - The stack location that we are finding the Frame SEH Record for.
+ *
+ * Returns
+ * A pointer to the SEH record, or EXCEPTION_CHAIN_END if none was found.
+ *
+ */
+
+PEXCEPTION_REGISTRATION_RECORD
+GetClrSEHRecordServicingStackPointer(Thread *pThread,
+ void *pStackPointer)
+{
+ ThreadExceptionState* pExState = pThread->GetExceptionState();
+
+ //
+ // We can only do this if there is a context in the pExInfo. There are cases (most notably the
+ // EEPolicy::HandleFatalError case) where we don't have that. In these cases we will return
+ // no enclosing handler since we cannot accurately determine the FS:0 entry which services
+ // this stack address.
+ //
+ // The side effect of this is that for these cases, the debugger cannot intercept
+ // the exception
+ //
+ CONTEXT* pContextRecord = pExState->GetContextRecord();
+ if (pContextRecord == NULL)
+ {
+ return EXCEPTION_CHAIN_END;
+ }
+
+ void *exceptionSP = dac_cast<PTR_VOID>(GetSP(pContextRecord));
+
+
+ //
+ // Now set the establishing frame. What this means in English is that we need to find
+ // the fs:0 entry that handles exceptions for the place on the stack given in stackPointer.
+ //
+ PEXCEPTION_REGISTRATION_RECORD pSEHRecord = GetFirstCOMPlusSEHRecord(pThread);
+
+ while (pSEHRecord != EXCEPTION_CHAIN_END)
+ {
+
+ //
+ // Skip any SEHRecord which is not a CLR record or was pushed after the exception
+ // on this thread occurred.
+ //
+ if (IsUnmanagedToManagedSEHHandler(pSEHRecord) && (exceptionSP <= (void *)pSEHRecord))
+ {
+ Frame *pFrame = GetCurrFrame(pSEHRecord);
+ //
+ // Arcane knowledge here. All Frame records are stored on the stack by the runtime
+ // in ever decreasing address space. So, we merely have to search back until
+ // we find the first frame record with a higher stack value to find the
+ // establishing frame for the given stack address.
+ //
+ if (((void *)pFrame) >= pStackPointer)
+ {
+ break;
+ }
+
+ }
+
+ pSEHRecord = GetNextCOMPlusSEHRecord(pSEHRecord);
+ }
+
+ return pSEHRecord;
+}
+
+#ifdef _DEBUG
+// We've deteremined during a stack walk that managed code is transitioning to unamanaged (EE) code. Check that the
+// state of the EH chain is correct.
+//
+// For x86, check that we do INSTALL_COMPLUS_EXCEPTION_HANDLER before calling managed code. This check should be
+// done for all managed code sites, not just transistions. But this will catch most problem cases.
+void VerifyValidTransitionFromManagedCode(Thread *pThread, CrawlFrame *pCF)
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(ExecutionManager::IsManagedCode(GetControlPC(pCF->GetRegisterSet())));
+
+ // Cannot get to the TEB of other threads. So ignore them.
+ if (pThread != GetThread())
+ {
+ return;
+ }
+
+ // Find the EH record guarding the current region of managed code, based on the CrawlFrame passed in.
+ PEXCEPTION_REGISTRATION_RECORD pEHR = GetCurrentSEHRecord();
+
+ while ((pEHR != EXCEPTION_CHAIN_END) && ((ULONG_PTR)pEHR < GetRegdisplaySP(pCF->GetRegisterSet())))
+ {
+ pEHR = pEHR->Next;
+ }
+
+ // VerifyValidTransitionFromManagedCode can be called before the CrawlFrame's MethodDesc is initialized.
+ // Fix that if necessary for the consistency check.
+ MethodDesc * pFunction = pCF->GetFunction();
+ if ((!IsUnmanagedToManagedSEHHandler(pEHR)) && // Will the assert fire? If not, don't waste our time.
+ (pFunction == NULL))
+ {
+ _ASSERTE(pCF->GetRegisterSet());
+ PCODE ip = GetControlPC(pCF->GetRegisterSet());
+ pFunction = ExecutionManager::GetCodeMethodDesc(ip);
+ _ASSERTE(pFunction);
+ }
+
+ // Great, we've got the EH record that's next up the stack from the current SP (which is in managed code). That
+ // had better be a record for one of our handlers responsible for handling exceptions in managed code. If its
+ // not, then someone made it into managed code without setting up one of our EH handlers, and that's really
+ // bad.
+ CONSISTENCY_CHECK_MSGF(IsUnmanagedToManagedSEHHandler(pEHR),
+ ("Invalid transition into managed code!\n\n"
+ "We're walking this thread's stack and we've reached a managed frame at Esp=0x%p. "
+ "(The method is %s::%s) "
+ "The very next FS:0 record (0x%p) up from this point on the stack should be one of "
+ "our 'unmanaged to managed SEH handlers', but its not... its something else, and "
+ "that's very bad. It indicates that someone managed to call into managed code without "
+ "setting up the proper exception handling.\n\n"
+ "Get a good unmanaged stack trace for this thread. All FS:0 records are on the stack, "
+ "so you can see who installed the last handler. Somewhere between that function and "
+ "where the thread is now is where the bad transition occured.\n\n"
+ "A little extra info: FS:0 = 0x%p, pEHR->Handler = 0x%p\n",
+ GetRegdisplaySP(pCF->GetRegisterSet()),
+ pFunction ->m_pszDebugClassName,
+ pFunction ->m_pszDebugMethodName,
+ pEHR,
+ GetCurrentSEHRecord(),
+ pEHR->Handler));
+}
+
+#endif
+
+//================================================================================
+
+// There are some things that should never be true when handling an
+// exception. This function checks for them. Will assert or trap
+// if it finds an error.
+static inline void
+CPFH_VerifyThreadIsInValidState(Thread* pThread, DWORD exceptionCode, EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame) {
+ WRAPPER_NO_CONTRACT;
+
+ if ( exceptionCode == STATUS_BREAKPOINT
+ || exceptionCode == STATUS_SINGLE_STEP) {
+ return;
+ }
+
+#ifdef _DEBUG
+ // check for overwriting of stack
+ CheckStackBarrier(pEstablisherFrame);
+ // trigger check for bad fs:0 chain
+ GetCurrentSEHRecord();
+#endif
+
+ if (!g_fEEShutDown) {
+ // An exception on the GC thread, or while holding the thread store lock, will likely lock out the entire process.
+ if (::IsGCThread() || ThreadStore::HoldingThreadStore())
+ {
+ _ASSERTE(!"Exception during garbage collection or while holding thread store");
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
+ }
+ }
+}
+
+
+#ifdef FEATURE_HIJACK
+void
+CPFH_AdjustContextForThreadSuspensionRace(CONTEXT *pContext, Thread *pThread)
+{
+ WRAPPER_NO_CONTRACT;
+
+ PCODE f_IP = GetIP(pContext);
+ if (Thread::IsAddrOfRedirectFunc((PVOID)f_IP)) {
+
+ // This is a very rare case where we tried to redirect a thread that was
+ // just about to dispatch an exception, and our update of EIP took, but
+ // the thread continued dispatching the exception.
+ //
+ // If this should happen (very rare) then we fix it up here.
+ //
+ _ASSERTE(pThread->GetSavedRedirectContext());
+ SetIP(pContext, GetIP(pThread->GetSavedRedirectContext()));
+ STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_AdjustContextForThreadSuspensionRace: Case 1 setting IP = %x\n", pContext->Eip);
+ }
+
+ if (f_IP == GetEEFuncEntryPoint(THROW_CONTROL_FOR_THREAD_FUNCTION)) {
+
+ // This is a very rare case where we tried to redirect a thread that was
+ // just about to dispatch an exception, and our update of EIP took, but
+ // the thread continued dispatching the exception.
+ //
+ // If this should happen (very rare) then we fix it up here.
+ //
+ SetIP(pContext, GetIP(pThread->m_OSContext));
+ STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_AdjustContextForThreadSuspensionRace: Case 2 setting IP = %x\n", pContext->Eip);
+ }
+
+// We have another even rarer race condition:
+// - A) On thread A, Debugger puts an int 3 in the code stream at address X
+// - A) We hit it and the begin an exception. The eip will be X + 1 (int3 is special)
+// - B) Meanwhile, thread B redirects A's eip to Y. (Although A is really somewhere
+// in the kernel, it looks like it's still in user code, so it can fall under the
+// HandledJitCase and can be redirected)
+// - A) The OS, trying to be nice, expects we have a breakpoint exception at X+1,
+// but does -1 on the address since it knows int3 will leave the eip +1.
+// So the context structure it will pass to the Handler is ideally (X+1)-1 = X
+//
+// ** Here's the race: Since thread B redirected A, the eip is actually Y (not X+1),
+// but the kernel still touches it up to Y-1. So there's a window between when we hit a
+// bp and when the handler gets called that this can happen.
+// This causes an unhandled BP (since the debugger doesn't recognize the bp at Y-1)
+//
+// So what to do: If we land at Y-1 (ie, if f_IP+1 is the addr of a Redirected Func),
+// then restore the EIP back to X. This will skip the redirection.
+// Fortunately, this only occurs in cases where it's ok
+// to skip. The debugger will recognize the patch and handle it.
+
+ if (Thread::IsAddrOfRedirectFunc((PVOID)(f_IP + 1))) {
+ _ASSERTE(pThread->GetSavedRedirectContext());
+ SetIP(pContext, GetIP(pThread->GetSavedRedirectContext()) - 1);
+ STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_AdjustContextForThreadSuspensionRace: Case 3 setting IP = %x\n", pContext->Eip);
+ }
+
+ if (f_IP + 1 == GetEEFuncEntryPoint(THROW_CONTROL_FOR_THREAD_FUNCTION)) {
+ SetIP(pContext, GetIP(pThread->m_OSContext) - 1);
+ STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_AdjustContextForThreadSuspensionRace: Case 4 setting IP = %x\n", pContext->Eip);
+ }
+}
+#endif // FEATURE_HIJACK
+
+
+// We want to leave true null reference exceptions alone. But if we are
+// trashing memory, we don't want the application to swallow it. The 0x100
+// below will give us false positives for debugging, if the app is accessing
+// a field more than 256 bytes down an object, where the reference is null.
+//
+// Removed use of the IgnoreUnmanagedExceptions reg key...simply return false now.
+//
+static inline BOOL
+CPFH_ShouldIgnoreException(EXCEPTION_RECORD *pExceptionRecord) {
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+}
+
+static inline void
+CPFH_UpdatePerformanceCounters() {
+ WRAPPER_NO_CONTRACT;
+ COUNTER_ONLY(GetPerfCounters().m_Excep.cThrown++);
+}
+
+
+//******************************************************************************
+EXCEPTION_DISPOSITION COMPlusAfterUnwind(
+ EXCEPTION_RECORD *pExceptionRecord,
+ EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
+ ThrowCallbackType& tct)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Note: we've completed the unwind pass up to the establisher frame, and we're headed off to finish our
+ // cleanup and end up back in jitted code. Any more FS0 handlers pushed from this point on out will _not_ be
+ // unwound. We go ahead and assert right here that indeed there are no handlers below the establisher frame
+ // before we go any further.
+ _ASSERTE(pEstablisherFrame == GetCurrentSEHRecord());
+
+ Thread* pThread = GetThread();
+
+ _ASSERTE(tct.pCurrentExceptionRecord == pEstablisherFrame);
+
+ NestedHandlerExRecord nestedHandlerExRecord;
+ nestedHandlerExRecord.Init((PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler, GetCurrFrame(pEstablisherFrame));
+
+ // ... and now, put the nested record back on.
+ INSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg));
+
+ // We entered COMPlusAfterUnwind in PREEMP, but we need to be in COOP from here on out
+ GCX_COOP_NO_DTOR();
+
+ tct.bIsUnwind = TRUE;
+ tct.pProfilerNotify = NULL;
+
+ LOG((LF_EH, LL_INFO100, "COMPlusFrameHandler: unwinding\n"));
+
+ tct.bUnwindStack = CPFH_ShouldUnwindStack(pExceptionRecord);
+
+ LOG((LF_EH, LL_INFO1000, "COMPlusAfterUnwind: going to: pFunc:%#X, pStack:%#X\n",
+ tct.pFunc, tct.pStack));
+
+ // TODO: UnwindFrames ends up calling into StackWalkFrames which is SO_INTOLERANT
+ // as is UnwindFrames, etc... Should we make COMPlusAfterUnwind SO_INTOLERANT???
+ ANNOTATION_VIOLATION(SOToleranceViolation);
+
+ UnwindFrames(pThread, &tct);
+
+#ifdef DEBUGGING_SUPPORTED
+ ExInfo* pExInfo = pThread->GetExceptionState()->GetCurrentExceptionTracker();
+ if (pExInfo->m_ValidInterceptionContext)
+ {
+ // By now we should have all unknown FS:[0] handlers unwinded along with the managed Frames until
+ // the interception point. We can now pop nested exception handlers and resume at interception context.
+ EHContext context = pExInfo->m_InterceptionContext;
+ pExInfo->m_InterceptionContext.Init();
+ pExInfo->m_ValidInterceptionContext = FALSE;
+
+ UnwindExceptionTrackerAndResumeInInterceptionFrame(pExInfo, &context);
+ }
+#endif // DEBUGGING_SUPPORTED
+
+ _ASSERTE(!"Should not get here");
+ return ExceptionContinueSearch;
+} // EXCEPTION_DISPOSITION COMPlusAfterUnwind()
+
+#ifdef DEBUGGING_SUPPORTED
+
+//---------------------------------------------------------------------------------------
+//
+// This function is called to intercept an exception and start an unwind.
+//
+// Arguments:
+// pCurrentEstablisherFrame - the exception registration record covering the stack range
+// containing the interception point
+// pExceptionRecord - EXCEPTION_RECORD of the exception being intercepted
+//
+// Return Value:
+// ExceptionContinueSearch if the exception cannot be intercepted
+//
+// Notes:
+// If the exception is intercepted, this function never returns.
+//
+
+EXCEPTION_DISPOSITION ClrDebuggerDoUnwindAndIntercept(EXCEPTION_REGISTRATION_RECORD *pCurrentEstablisherFrame,
+ EXCEPTION_RECORD *pExceptionRecord)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!CheckThreadExceptionStateForInterception())
+ {
+ return ExceptionContinueSearch;
+ }
+
+ Thread* pThread = GetThread();
+ ThreadExceptionState* pExState = pThread->GetExceptionState();
+
+ EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame;
+ ThrowCallbackType tct;
+ tct.Init();
+
+ pExState->GetDebuggerState()->GetDebuggerInterceptInfo(&pEstablisherFrame,
+ &(tct.pFunc),
+ &(tct.dHandler),
+ &(tct.pStack),
+ NULL,
+ &(tct.pBottomFrame)
+ );
+
+ //
+ // If the handler that we've selected as the handler for the target frame of the unwind is in fact above the
+ // handler that we're currently executing in, then use the current handler instead. Why? Our handlers for
+ // nested exceptions actually process managed frames that live above them, up to the COMPlusFrameHanlder that
+ // pushed the nested handler. If the user selectes a frame above the nested handler, then we will have selected
+ // the COMPlusFrameHandler above the current nested handler. But we don't want to ask RtlUnwind to unwind past
+ // the nested handler that we're currently executing in.
+ //
+ if (pEstablisherFrame > pCurrentEstablisherFrame)
+ {
+ // This should only happen if we're in a COMPlusNestedExceptionHandler.
+ _ASSERTE(IsComPlusNestedExceptionRecord(pCurrentEstablisherFrame));
+
+ pEstablisherFrame = pCurrentEstablisherFrame;
+ }
+
+#ifdef _DEBUG
+ tct.pCurrentExceptionRecord = pEstablisherFrame;
+#endif
+
+ LOG((LF_EH|LF_CORDB, LL_INFO100, "ClrDebuggerDoUnwindAndIntercept: Intercepting at %s\n", tct.pFunc->m_pszDebugMethodName));
+ LOG((LF_EH|LF_CORDB, LL_INFO100, "\t\t: pFunc is 0x%X\n", tct.pFunc));
+ LOG((LF_EH|LF_CORDB, LL_INFO100, "\t\t: pStack is 0x%X\n", tct.pStack));
+
+ CallRtlUnwindSafe(pEstablisherFrame, RtlUnwindCallback, pExceptionRecord, 0);
+
+ ExInfo* pExInfo = pThread->GetExceptionState()->GetCurrentExceptionTracker();
+ if (pExInfo->m_ValidInterceptionContext)
+ {
+ // By now we should have all unknown FS:[0] handlers unwinded along with the managed Frames until
+ // the interception point. We can now pop nested exception handlers and resume at interception context.
+ GCX_COOP();
+ EHContext context = pExInfo->m_InterceptionContext;
+ pExInfo->m_InterceptionContext.Init();
+ pExInfo->m_ValidInterceptionContext = FALSE;
+
+ UnwindExceptionTrackerAndResumeInInterceptionFrame(pExInfo, &context);
+ }
+
+ // on x86 at least, RtlUnwind always returns
+
+ // Note: we've completed the unwind pass up to the establisher frame, and we're headed off to finish our
+ // cleanup and end up back in jitted code. Any more FS0 handlers pushed from this point on out will _not_ be
+ // unwound.
+ return COMPlusAfterUnwind(pExState->GetExceptionRecord(), pEstablisherFrame, tct);
+} // EXCEPTION_DISPOSITION ClrDebuggerDoUnwindAndIntercept()
+
+#endif // DEBUGGING_SUPPORTED
+
+// This is a wrapper around the assembly routine that invokes RtlUnwind in the OS.
+// When we invoke RtlUnwind, the OS will modify the ExceptionFlags field in the
+// exception record to reflect unwind. Since we call RtlUnwind in the first pass
+// with a valid exception record when we find an exception handler AND because RtlUnwind
+// returns on x86, the OS would have flagged the exception record for unwind.
+//
+// Incase the exception is rethrown from the catch/filter-handler AND it's a non-COMPLUS
+// exception, the runtime will use the reference to the saved exception record to reraise
+// the exception, as part of rethrow fixup. Since the OS would have modified the exception record
+// to reflect unwind, this wrapper will "reset" the ExceptionFlags field when RtlUnwind returns.
+// Otherwise, the rethrow will result in second pass, as opposed to first, since the ExceptionFlags
+// would indicate an unwind.
+//
+// This rethrow issue does not affect COMPLUS exceptions since we always create a brand new exception
+// record for them in RaiseTheExceptionInternalOnly.
+BOOL CallRtlUnwindSafe(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
+ void *callback,
+ EXCEPTION_RECORD *pExceptionRecord,
+ void *retval)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Save the ExceptionFlags value before invoking RtlUnwind.
+ DWORD dwExceptionFlags = pExceptionRecord->ExceptionFlags;
+
+ BOOL fRetVal = CallRtlUnwind(pEstablisherFrame, callback, pExceptionRecord, retval);
+
+ // Reset ExceptionFlags field, if applicable
+ if (pExceptionRecord->ExceptionFlags != dwExceptionFlags)
+ {
+ // We would expect the 32bit OS to have set the unwind flag at this point.
+ _ASSERTE(pExceptionRecord->ExceptionFlags & EXCEPTION_UNWINDING);
+ LOG((LF_EH, LL_INFO100, "CallRtlUnwindSafe: Resetting ExceptionFlags from %lu to %lu\n", pExceptionRecord->ExceptionFlags, dwExceptionFlags));
+ pExceptionRecord->ExceptionFlags = dwExceptionFlags;
+ }
+
+ return fRetVal;
+}
+
+//******************************************************************************
+// The essence of the first pass handler (after we've decided to actually do
+// the first pass handling).
+//******************************************************************************
+inline EXCEPTION_DISPOSITION __cdecl
+CPFH_RealFirstPassHandler( // ExceptionContinueSearch, etc.
+ EXCEPTION_RECORD *pExceptionRecord, // The exception record, with exception type.
+ EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame, // Exception frame on whose behalf this is called.
+ CONTEXT *pContext, // Context from the exception.
+ void *pDispatcherContext, // @todo
+ BOOL bAsynchronousThreadStop, // @todo
+ BOOL fPGCDisabledOnEntry) // @todo
+{
+ // We don't want to use a runtime contract here since this codepath is used during
+ // the processing of a hard SO. Contracts use a significant amount of stack
+ // which we can't afford for those cases.
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+#ifdef _DEBUG
+ static int breakOnFirstPass = -1;
+
+ if (breakOnFirstPass == -1)
+ breakOnFirstPass = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnFirstPass);
+
+ if (breakOnFirstPass != 0)
+ {
+ _ASSERTE(!"First pass exception handler");
+ }
+#endif
+
+ EXCEPTION_DISPOSITION retval;
+ DWORD exceptionCode = pExceptionRecord->ExceptionCode;
+ Thread *pThread = GetThread();
+
+#ifdef _DEBUG
+ static int breakOnSO = -1;
+
+ if (breakOnSO == -1)
+ breakOnSO = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_BreakOnSO);
+
+ if (breakOnSO != 0 && exceptionCode == STATUS_STACK_OVERFLOW)
+ {
+ DebugBreak(); // ASSERTing will overwrite the guard region
+ }
+#endif
+
+ // We always want to be in co-operative mode when we run this function and whenever we return
+ // from it, want to go to pre-emptive mode because are returning to OS.
+ _ASSERTE(pThread->PreemptiveGCDisabled());
+
+ BOOL bPopNestedHandlerExRecord = FALSE;
+ LFH found = LFH_NOT_FOUND; // Result of calling LookForHandler.
+ BOOL bRethrownException = FALSE;
+ BOOL bNestedException = FALSE;
+
+#if defined(USE_FEF)
+ BOOL bPopFaultingExceptionFrame = FALSE;
+ FrameWithCookie<FaultingExceptionFrame> faultingExceptionFrame;
+#endif // USE_FEF
+ ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
+
+ ThrowCallbackType tct;
+ tct.Init();
+
+ tct.pTopFrame = GetCurrFrame(pEstablisherFrame); // highest frame to search to
+
+#ifdef _DEBUG
+ tct.pCurrentExceptionRecord = pEstablisherFrame;
+ tct.pPrevExceptionRecord = GetPrevSEHRecord(pEstablisherFrame);
+#endif // _DEBUG
+
+ BOOL fIsManagedCode = pContext ? ExecutionManager::IsManagedCode(GetIP(pContext)) : FALSE;
+
+
+ // this establishes a marker so can determine if are processing a nested exception
+ // don't want to use the current frame to limit search as it could have been unwound by
+ // the time get to nested handler (ie if find an exception, unwind to the call point and
+ // then resume in the catch and then get another exception) so make the nested handler
+ // have the same boundary as this one. If nested handler can't find a handler, we won't
+ // end up searching this frame list twice because the nested handler will set the search
+ // boundary in the thread and so if get back to this handler it will have a range that starts
+ // and ends at the same place.
+
+ NestedHandlerExRecord nestedHandlerExRecord;
+ nestedHandlerExRecord.Init((PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler, GetCurrFrame(pEstablisherFrame));
+
+ INSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg));
+ bPopNestedHandlerExRecord = TRUE;
+
+#if defined(USE_FEF)
+ // Note: don't attempt to push a FEF for an exception in managed code if we weren't in cooperative mode when
+ // the exception was received. If preemptive GC was enabled when we received the exception, then it means the
+ // exception was rethrown from unmangaed code (including EE impl), and we shouldn't push a FEF.
+ if (fIsManagedCode &&
+ fPGCDisabledOnEntry &&
+ (pThread->m_pFrame == FRAME_TOP ||
+ pThread->m_pFrame->GetVTablePtr() != FaultingExceptionFrame::GetMethodFrameVPtr() ||
+ (size_t)pThread->m_pFrame > (size_t)pEstablisherFrame))
+ {
+ // setup interrupted frame so that GC during calls to init won't collect the frames
+ // only need it for non COM+ exceptions in managed code when haven't already
+ // got one on the stack (will have one already if we have called rtlunwind because
+ // the instantiation that called unwind would have installed one)
+ faultingExceptionFrame.InitAndLink(pContext);
+ bPopFaultingExceptionFrame = TRUE;
+ }
+#endif // USE_FEF
+
+ OBJECTREF e;
+ e = pThread->LastThrownObject();
+
+ STRESS_LOG7(LF_EH, LL_INFO10, "CPFH_RealFirstPassHandler: code:%X, LastThrownObject:%p, MT:%pT"
+ ", IP:%p, SP:%p, pContext:%p, pEstablisherFrame:%p\n",
+ exceptionCode, OBJECTREFToObject(e), (e!=0)?e->GetMethodTable():0,
+ pContext ? GetIP(pContext) : 0, pContext ? GetSP(pContext) : 0,
+ pContext, pEstablisherFrame);
+
+#ifdef LOGGING
+ // If it is a complus exception, and there is a thrown object, get its name, for better logging.
+ if (IsComPlusException(pExceptionRecord))
+ {
+ const char * eClsName = "!EXCEPTION_COMPLUS";
+ if (e != 0)
+ {
+ eClsName = e->GetTrueMethodTable()->GetDebugClassName();
+ }
+ LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: exception: 0x%08X, class: '%s', IP: 0x%p\n",
+ exceptionCode, eClsName, pContext ? GetIP(pContext) : NULL));
+ }
+#endif
+
+ EXCEPTION_POINTERS exceptionPointers = {pExceptionRecord, pContext};
+
+ STRESS_LOG4(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: setting boundaries: Exinfo: 0x%p, BottomMostHandler:0x%p, SearchBoundary:0x%p, TopFrame:0x%p\n",
+ pExInfo, pExInfo->m_pBottomMostHandler, pExInfo->m_pSearchBoundary, tct.pTopFrame);
+
+ // Here we are trying to decide if we are coming in as:
+ // 1) first handler in a brand new exception
+ // 2) a subsequent handler in an exception
+ // 3) a nested exception
+ // m_pBottomMostHandler is the registration structure (establisher frame) for the most recent (ie lowest in
+ // memory) non-nested handler that was installed and pEstablisher frame is what the current handler
+ // was registered with.
+ // The OS calls each registered handler in the chain, passing its establisher frame to it.
+ if (pExInfo->m_pBottomMostHandler != NULL && pEstablisherFrame > pExInfo->m_pBottomMostHandler)
+ {
+ STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: detected subsequent handler. ExInfo:0x%p, BottomMost:0x%p SearchBoundary:0x%p\n",
+ pExInfo, pExInfo->m_pBottomMostHandler, pExInfo->m_pSearchBoundary);
+
+ // If the establisher frame of this handler is greater than the bottommost then it must have been
+ // installed earlier and therefore we are case 2
+ if (pThread->GetThrowable() == NULL)
+ {
+ // Bottommost didn't setup a throwable, so not exception not for us
+ retval = ExceptionContinueSearch;
+ goto exit;
+ }
+
+ // setup search start point
+ tct.pBottomFrame = pExInfo->m_pSearchBoundary;
+
+ if (tct.pTopFrame == tct.pBottomFrame)
+ {
+ // this will happen if our nested handler already searched for us so we don't want
+ // to search again
+ retval = ExceptionContinueSearch;
+ goto exit;
+ }
+ }
+ else
+ { // we are either case 1 or case 3
+#if defined(_DEBUG_IMPL)
+ //@todo: merge frames, context, handlers
+ if (pThread->GetFrame() != FRAME_TOP)
+ pThread->GetFrame()->LogFrameChain(LF_EH, LL_INFO1000);
+#endif // _DEBUG_IMPL
+
+ // If the exception was rethrown, we'll create a new ExInfo, which will represent the rethrown exception.
+ // The original exception is not the rethrown one.
+ if (pExInfo->m_ExceptionFlags.IsRethrown() && pThread->LastThrownObject() != NULL)
+ {
+ pExInfo->m_ExceptionFlags.ResetIsRethrown();
+ bRethrownException = TRUE;
+
+#if defined(USE_FEF)
+ if (bPopFaultingExceptionFrame)
+ {
+ // if we added a FEF, it will refer to the frame at the point of the original exception which is
+ // already unwound so don't want it.
+ // If we rethrew the exception we have already added a helper frame for the rethrow, so don't
+ // need this one. If we didn't rethrow it, (ie rethrow from native) then there the topmost frame will
+ // be a transition to native frame in which case we don't need it either
+ faultingExceptionFrame.Pop();
+ bPopFaultingExceptionFrame = FALSE;
+ }
+#endif
+ }
+
+ // If the establisher frame is less than the bottommost handler, then this is nested because the
+ // establisher frame was installed after the bottommost.
+ if (pEstablisherFrame < pExInfo->m_pBottomMostHandler
+ /* || IsComPlusNestedExceptionRecord(pEstablisherFrame) */ )
+ {
+ bNestedException = TRUE;
+
+ // case 3: this is a nested exception. Need to save and restore the thread info
+ STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: ExInfo:0x%p detected nested exception 0x%p < 0x%p\n",
+ pExInfo, pEstablisherFrame, pExInfo->m_pBottomMostHandler);
+
+ EXCEPTION_REGISTRATION_RECORD* pNestedER = TryFindNestedEstablisherFrame(pEstablisherFrame);
+ ExInfo *pNestedExInfo;
+
+ if (!pNestedER || pNestedER >= pExInfo->m_pBottomMostHandler )
+ {
+ // RARE CASE. We've re-entered the EE from an unmanaged filter.
+ //
+ // OR
+ //
+ // We can be here if we dont find a nested exception handler. This is exemplified using
+ // call chain of scenario 2 explained further below.
+ //
+ // Assuming __try of NativeB throws an exception E1 and it gets caught in ManagedA2, then
+ // bottom-most handler (BMH) is going to be CPFH_A. The catch will trigger an unwind
+ // and invoke __finally in NativeB. Let the __finally throw a new exception E2.
+ //
+ // Assuming ManagedB2 has a catch block to catch E2, when we enter CPFH_B looking for a
+ // handler for E2, our establisher frame will be that of CPFH_B, which will be lower
+ // in stack than current BMH (which is CPFH_A). Thus, we will come here, determining
+ // E2 to be nested exception correctly but not find a nested exception handler.
+ void *limit = (void *) GetPrevSEHRecord(pExInfo->m_pBottomMostHandler);
+
+ pNestedExInfo = new (nothrow) ExInfo(); // Very rare failure here; need robust allocator.
+ if (pNestedExInfo == NULL)
+ { // if we can't allocate memory, we can't correctly continue.
+ #if defined(_DEBUG)
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_NestedEhOom))
+ _ASSERTE(!"OOM in callback from unmanaged filter.");
+ #endif // _DEBUG
+
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_OUTOFMEMORY);
+ }
+
+
+ pNestedExInfo->m_StackAddress = limit; // Note: this is also the flag that tells us this
+ // ExInfo was stack allocated.
+ }
+ else
+ {
+ pNestedExInfo = &((NestedHandlerExRecord*)pNestedER)->m_handlerInfo;
+ }
+
+ LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: PushExInfo() current: 0x%p previous: 0x%p\n",
+ pExInfo->m_StackAddress, pNestedExInfo->m_StackAddress));
+
+ _ASSERTE(pNestedExInfo);
+ pNestedExInfo->m_hThrowable = NULL; // pNestedExInfo may be stack allocated, and as such full of
+ // garbage. m_hThrowable must be sane, so set it to NULL. (We could
+ // zero the entire record, but this is cheaper.)
+
+ pNestedExInfo->CopyAndClearSource(pExInfo);
+
+ pExInfo->m_pPrevNestedInfo = pNestedExInfo; // Save at head of nested info chain
+
+#if 0
+/* the following code was introduced in Whidbey as part of the Faulting Exception Frame removal (12/03).
+ However it isn't correct. If any nested exceptions occur while processing a rethrow, we would
+ incorrectly consider the nested exception to be a rethrow. See VSWhidbey 349379 for an example.
+
+ Therefore I am disabling this code until we see a failure that explains why it was added in the first
+ place. cwb 9/04.
+*/
+ // If we're here as a result of a rethrown exception, set the rethrown flag on the new ExInfo.
+ if (bRethrownException)
+ {
+ pExInfo->m_ExceptionFlags.SetIsRethrown();
+ }
+#endif
+ }
+ else
+ {
+ // At this point, either:
+ //
+ // 1) the bottom-most handler is NULL, implying this is a new exception for which we are getting ready, OR
+ // 2) the bottom-most handler is not-NULL, implying that a there is already an existing exception in progress.
+ //
+ // Scenario 1 is that of a new throw and is easy to understand. Scenario 2 is the interesting one.
+ //
+ // ManagedA1 -> ManagedA2 -> ManagedA3 -> NativeCodeA -> ManagedB1 -> ManagedB2 -> ManagedB3 -> NativeCodeB
+ //
+ // On x86, each block of managed code is protected by one COMPlusFrameHandler [CPFH] (CLR's exception handler
+ // for managed code), unlike 64bit where each frame has a personality routine attached to it. Thus,
+ // for the example above, assume CPFH_A protects ManagedA* blocks and is setup just before the call to
+ // ManagedA1. Likewise, CPFH_B protects ManagedB* blocks and is setup just before the call to ManagedB1.
+ //
+ // When ManagedB3 throws an exception, CPFH_B is invoked to look for a handler in all of the ManagedB* blocks.
+ // At this point, it is setup as the "bottom-most-handler" (BMH). If no handler is found and exception reaches
+ // ManagedA* blocks, CPFH_A is invoked to look for a handler and thus, becomes BMH.
+ //
+ // Thus, in the first pass on x86 for a given exception, a particular CPFH will be invoked only once when looking
+ // for a handler and thus, registered as BMH only once. Either the exception goes unhandled and the process will
+ // terminate or a handler will be found and second pass will commence.
+ //
+ // However, assume NativeCodeB had a __try/__finally and raised an exception [E1] within the __try. Let's assume
+ // it gets caught in ManagedB1 and thus, unwind is triggered. At this point, the active exception tracker
+ // has context about the exception thrown out of __try and CPFH_B is registered as BMH.
+ //
+ // If the __finally throws a new exception [E2], CPFH_B will be invoked again for first pass while looking for
+ // a handler for the thrown exception. Since BMH is already non-NULL, we will come here since EstablisherFrame will be
+ // the same as BMH (because EstablisherFrame will be that of CPFH_B). We will proceed to overwrite the "required" parts
+ // of the existing exception tracker with the details of E2 (see setting of exception record and context below), erasing
+ // any artifact of E1.
+ //
+ // This is unlike Scenario 1 when exception tracker is completely initialized to default values. This is also
+ // unlike 64bit which will detect that E1 and E2 are different exceptions and hence, will setup a new tracker
+ // to track E2, effectively behaving like Scenario 1 above. X86 cannot do this since there is no nested exception
+ // tracker setup that gets to see the new exception.
+ //
+ // Thus, if E1 was a CSE and E2 isn't, we will come here and treat E2 as a CSE as well since corruption severity
+ // is initialized as part of exception tracker initialization. Thus, E2 will start to be treated as CSE, which is
+ // incorrect. Similar argument applies to delivery of First chance exception notification delivery.
+ //
+ // <QUIP> Another example why we should unify EH systems :) </QUIP>
+ //
+ // To address this issue, we will need to reset exception tracker here, just like the overwriting of "required"
+ // parts of exception tracker.
+
+ // If the current establisher frame is the same as the bottom-most-handler and we are here
+ // in the first pass, assert that current exception and the one tracked by active exception tracker
+ // are indeed different exceptions. In such a case, we must reset the exception tracker so that it can be
+ // setup correctly further down when CEHelper::SetupCorruptionSeverityForActiveException is invoked.
+
+ if ((pExInfo->m_pBottomMostHandler != NULL) &&
+ (pEstablisherFrame == pExInfo->m_pBottomMostHandler))
+ {
+ // Current exception should be different from the one exception tracker is already tracking.
+ _ASSERTE(pExceptionRecord != pExInfo->m_pExceptionRecord);
+
+ // This cannot be nested exceptions - they are handled earlier (see above).
+ _ASSERTE(!bNestedException);
+
+ LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: Bottom-most handler (0x%p) is the same as EstablisherFrame.\n",
+ pExInfo->m_pBottomMostHandler));
+ LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: Exception record in exception tracker is 0x%p, while that of new exception is 0x%p.\n",
+ pExInfo->m_pExceptionRecord, pExceptionRecord));
+ LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: Resetting exception tracker (0x%p).\n", pExInfo));
+
+ // This will reset the exception tracker state, including the corruption severity.
+ pExInfo->Init();
+ }
+ }
+
+ // If we are handling a fault from managed code, we need to set the Thread->ExInfo->pContext to
+ // the current fault context, which is used in the stack walk to get back into the managed
+ // stack with the correct registers. (Previously, this was done by linking in a FaultingExceptionFrame
+ // record.)
+ // We are about to create the managed exception object, which may trigger a GC, so set this up now.
+
+ pExInfo->m_pExceptionRecord = pExceptionRecord;
+ pExInfo->m_pContext = pContext;
+ if (pContext && ShouldHandleManagedFault(pExceptionRecord, pContext, pEstablisherFrame, pThread))
+ { // If this was a fault in managed code, rather than create a Frame for stackwalking,
+ // we can use this exinfo (after all, it has all the register info.)
+ pExInfo->m_ExceptionFlags.SetUseExInfoForStackwalk();
+ }
+
+ // It should now be safe for a GC to happen.
+
+ // case 1 & 3: this is the first time through of a new, nested, or rethrown exception, so see if we can
+ // find a handler. Only setup throwable if are bottommost handler
+ if (IsComPlusException(pExceptionRecord) && (!bAsynchronousThreadStop))
+ {
+
+ // Update the throwable from the last thrown object. Note: this may cause OOM, in which case we replace
+ // both throwables with the preallocated OOM exception.
+ pThread->SafeSetThrowables(pThread->LastThrownObject());
+
+ // now we've got a COM+ exception, fall through to so see if we handle it
+
+ STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: fall through ExInfo:0x%p setting m_pBottomMostHandler to 0x%p from 0x%p\n",
+ pExInfo, pEstablisherFrame, pExInfo->m_pBottomMostHandler);
+ pExInfo->m_pBottomMostHandler = pEstablisherFrame;
+ }
+ else if (bRethrownException)
+ {
+ // If it was rethrown and not COM+, will still be the last one thrown. Either we threw it last and
+ // stashed it here or someone else caught it and rethrew it, in which case it will still have been
+ // originally stashed here.
+
+ // Update the throwable from the last thrown object. Note: this may cause OOM, in which case we replace
+ // both throwables with the preallocated OOM exception.
+ pThread->SafeSetThrowables(pThread->LastThrownObject());
+ STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: rethrow non-COM+ ExInfo:0x%p setting m_pBottomMostHandler to 0x%p from 0x%p\n",
+ pExInfo, pEstablisherFrame, pExInfo->m_pBottomMostHandler);
+ pExInfo->m_pBottomMostHandler = pEstablisherFrame;
+ }
+ else
+ {
+ if (!fIsManagedCode)
+ {
+ tct.bDontCatch = false;
+ }
+
+ if (exceptionCode == STATUS_BREAKPOINT)
+ {
+ // don't catch int 3
+ retval = ExceptionContinueSearch;
+ goto exit;
+ }
+
+ // We need to set m_pBottomMostHandler here, Thread::IsExceptionInProgress returns 1.
+ // This is a necessary part of suppressing thread abort exceptions in the constructor
+ // of any exception object we might create.
+ STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_RealFirstPassHandler: setting ExInfo:0x%p m_pBottomMostHandler for IsExceptionInProgress to 0x%p from 0x%p\n",
+ pExInfo, pEstablisherFrame, pExInfo->m_pBottomMostHandler);
+ pExInfo->m_pBottomMostHandler = pEstablisherFrame;
+
+ // Create the managed exception object.
+ OBJECTREF throwable = CreateCOMPlusExceptionObject(pThread, pExceptionRecord, bAsynchronousThreadStop);
+
+ // Set the throwables on the thread to the newly created object. If this fails, it will return a
+ // preallocated exception object instead. This also updates the last thrown exception, for rethrows.
+ throwable = pThread->SafeSetThrowables(throwable);
+
+ // Set the exception code and pointers. We set these after setting the throwables on the thread,
+ // because if the proper exception is replaced by an OOM exception, we still want the exception code
+ // and pointers set in the OOM exception.
+ EXCEPTIONREF exceptionRef = (EXCEPTIONREF)throwable;
+ exceptionRef->SetXCode(pExceptionRecord->ExceptionCode);
+ exceptionRef->SetXPtrs(&exceptionPointers);
+ }
+
+ tct.pBottomFrame = NULL;
+
+ EEToProfilerExceptionInterfaceWrapper::ExceptionThrown(pThread);
+
+ CPFH_UpdatePerformanceCounters();
+ } // End of case-1-or-3
+
+ {
+ // Allocate storage for the stack trace.
+ OBJECTREF throwable = NULL;
+ GCPROTECT_BEGIN(throwable);
+ throwable = pThread->GetThrowable();
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ {
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+ // Setup the state in current exception tracker indicating the corruption severity
+ // of the active exception.
+ CEHelper::SetupCorruptionSeverityForActiveException(bRethrownException, bNestedException,
+ CEHelper::ShouldTreatActiveExceptionAsNonCorrupting());
+ END_SO_INTOLERANT_CODE;
+ }
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+#ifdef FEATURE_CORECLR
+ // Check if we are dealing with AV or not and if we are,
+ // ensure that this is a real AV and not managed AV exception
+ BOOL fIsThrownExceptionAV = FALSE;
+ if ((pExceptionRecord->ExceptionCode == STATUS_ACCESS_VIOLATION) &&
+ (MscorlibBinder::GetException(kAccessViolationException) == throwable->GetMethodTable()))
+ {
+ // Its an AV - set the flag
+ fIsThrownExceptionAV = TRUE;
+ }
+
+ // Did we get an AV?
+ if (fIsThrownExceptionAV == TRUE)
+ {
+ // Get the escalation policy action for handling AV
+ EPolicyAction actionAV = GetEEPolicy()->GetActionOnFailure(FAIL_AccessViolation);
+
+ // Valid actions are: eNoAction (default behviour) or eRudeExitProcess
+ _ASSERTE(((actionAV == eNoAction) || (actionAV == eRudeExitProcess)));
+ if (actionAV == eRudeExitProcess)
+ {
+ LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: AccessViolation handler found and doing RudeExitProcess due to escalation policy (eRudeExitProcess)\n"));
+
+ // EEPolicy::HandleFatalError will help us RudeExit the process.
+ // RudeExitProcess due to AV is to prevent a security risk - we are ripping
+ // at the boundary, without looking for the handlers.
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_SECURITY);
+ }
+ }
+#endif // FEATURE_CORECLR
+
+ // If we're out of memory, then we figure there's probably not memory to maintain a stack trace, so we skip it.
+ // If we've got a stack overflow, then we figure the stack will be so huge as to make tracking the stack trace
+ // impracticle, so we skip it.
+ if ((throwable == CLRException::GetPreallocatedOutOfMemoryException()) ||
+ (throwable == CLRException::GetPreallocatedStackOverflowException()))
+ {
+ tct.bAllowAllocMem = FALSE;
+ }
+ else
+ {
+ pExInfo->m_StackTraceInfo.AllocateStackTrace();
+ }
+
+ GCPROTECT_END();
+ }
+
+ // Set up information for GetExceptionPointers()/GetExceptionCode() callback.
+ pExInfo->SetExceptionCode(pExceptionRecord);
+
+ pExInfo->m_pExceptionPointers = &exceptionPointers;
+
+ if (bRethrownException || bNestedException)
+ {
+ _ASSERTE(pExInfo->m_pPrevNestedInfo != NULL);
+
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+ SetStateForWatsonBucketing(bRethrownException, pExInfo->GetPreviousExceptionTracker()->GetThrowableAsHandle());
+ END_SO_INTOLERANT_CODE;
+ }
+
+#ifdef DEBUGGING_SUPPORTED
+ //
+ // At this point the exception is still fresh to us, so assert that
+ // there should be nothing from the debugger on it.
+ //
+ _ASSERTE(!pExInfo->m_ExceptionFlags.DebuggerInterceptInfo());
+#endif
+
+ if (pThread->IsRudeAbort())
+ {
+ OBJECTREF rudeAbortThrowable = CLRException::GetPreallocatedRudeThreadAbortException();
+
+ if (pThread->GetThrowable() != rudeAbortThrowable)
+ {
+ // Neither of these sets will throw because the throwable that we're setting is a preallocated
+ // exception. This also updates the last thrown exception, for rethrows.
+ pThread->SafeSetThrowables(rudeAbortThrowable);
+ }
+
+ if (!pThread->IsRudeAbortInitiated())
+ {
+ pThread->PreWorkForThreadAbort();
+ }
+ }
+
+ LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: looking for handler bottom %x, top %x\n",
+ tct.pBottomFrame, tct.pTopFrame));
+ tct.bReplaceStack = pExInfo->m_pBottomMostHandler == pEstablisherFrame && !bRethrownException;
+ tct.bSkipLastElement = bRethrownException && bNestedException;
+ found = LookForHandler(&exceptionPointers,
+ pThread,
+ &tct);
+
+ // We have searched this far.
+ pExInfo->m_pSearchBoundary = tct.pTopFrame;
+ LOG((LF_EH, LL_INFO1000, "CPFH_RealFirstPassHandler: set pSearchBoundary to 0x%p\n", pExInfo->m_pSearchBoundary));
+
+ if ((found == LFH_NOT_FOUND)
+#ifdef DEBUGGING_SUPPORTED
+ && !pExInfo->m_ExceptionFlags.DebuggerInterceptInfo()
+#endif
+ )
+ {
+ LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: NOT_FOUND\n"));
+
+ if (tct.pTopFrame == FRAME_TOP)
+ {
+ LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: NOT_FOUND at FRAME_TOP\n"));
+ }
+
+ retval = ExceptionContinueSearch;
+ goto exit;
+ }
+ else
+ {
+ // so we are going to handle the exception
+
+ // Remove the nested exception record -- before calling RtlUnwind.
+ // The second-pass callback for a NestedExceptionRecord assumes that if it's
+ // being unwound, it should pop one exception from the pExInfo chain. This is
+ // true for any older NestedRecords that might be unwound -- but not for the
+ // new one we're about to add. To avoid this, we remove the new record
+ // before calling Unwind.
+ //
+ // <TODO>@NICE: This can probably be a little cleaner -- the nested record currently
+ // is also used to guard the running of the filter code. When we clean up the
+ // behaviour of exceptions within filters, we should be able to get rid of this
+ // PUSH/POP/PUSH behaviour.</TODO>
+ _ASSERTE(bPopNestedHandlerExRecord);
+
+ UNINSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg));
+
+ // Since we are going to handle the exception we switch into preemptive mode
+ GCX_PREEMP_NO_DTOR();
+
+#ifdef DEBUGGING_SUPPORTED
+ //
+ // Check if the debugger wants to intercept this frame at a different point than where we are.
+ //
+ if (pExInfo->m_ExceptionFlags.DebuggerInterceptInfo())
+ {
+ ClrDebuggerDoUnwindAndIntercept(pEstablisherFrame, pExceptionRecord);
+
+ //
+ // If this returns, then the debugger couldn't do it's stuff and we default to the found handler.
+ //
+ if (found == LFH_NOT_FOUND)
+ {
+ retval = ExceptionContinueSearch;
+ // we need to be sure to switch back into Cooperative mode since we are going to
+ // jump to the exit: label and follow the normal return path (it is expected that
+ // CPFH_RealFirstPassHandler returns in COOP.
+ GCX_PREEMP_NO_DTOR_END();
+ goto exit;
+ }
+ }
+#endif
+
+ LOG((LF_EH, LL_INFO100, "CPFH_RealFirstPassHandler: handler found: %s\n", tct.pFunc->m_pszDebugMethodName));
+
+ CallRtlUnwindSafe(pEstablisherFrame, RtlUnwindCallback, pExceptionRecord, 0);
+ // on x86 at least, RtlUnwind always returns
+
+ // Note: we've completed the unwind pass up to the establisher frame, and we're headed off to finish our
+ // cleanup and end up back in jitted code. Any more FS0 handlers pushed from this point on out will _not_ be
+ // unwound.
+ // Note: we are still in Preemptive mode here and that is correct, COMPlusAfterUnwind will switch us back
+ // into Cooperative mode.
+ return COMPlusAfterUnwind(pExceptionRecord, pEstablisherFrame, tct);
+ }
+
+exit:
+ {
+ // We need to be in COOP if we get here
+ GCX_ASSERT_COOP();
+ }
+
+ // If we got as far as saving pExInfo, save the context pointer so it's available for the unwind.
+ if (pExInfo)
+ {
+ pExInfo->m_pContext = pContext;
+ // pExInfo->m_pExceptionPointers points to a local structure, which is now going out of scope.
+ pExInfo->m_pExceptionPointers = NULL;
+ }
+
+#if defined(USE_FEF)
+ if (bPopFaultingExceptionFrame)
+ {
+ faultingExceptionFrame.Pop();
+ }
+#endif // USE_FEF
+
+ if (bPopNestedHandlerExRecord)
+ {
+ UNINSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg));
+ }
+ return retval;
+} // CPFH_RealFirstPassHandler()
+
+
+//******************************************************************************
+//
+void InitializeExceptionHandling()
+{
+ WRAPPER_NO_CONTRACT;
+
+ InitSavedExceptionInfo();
+
+ CLRAddVectoredHandlers();
+
+ // Initialize the lock used for synchronizing access to the stacktrace in the exception object
+ g_StackTraceArrayLock.Init(LOCK_TYPE_DEFAULT, TRUE);
+}
+
+//******************************************************************************
+static inline EXCEPTION_DISPOSITION __cdecl
+CPFH_FirstPassHandler(EXCEPTION_RECORD *pExceptionRecord,
+ EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
+ CONTEXT *pContext,
+ DISPATCHER_CONTEXT *pDispatcherContext)
+{
+ WRAPPER_NO_CONTRACT;
+ EXCEPTION_DISPOSITION retval;
+
+ _ASSERTE (!(pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND)));
+
+ DWORD exceptionCode = pExceptionRecord->ExceptionCode;
+
+ Thread *pThread = GetThread();
+
+ STRESS_LOG4(LF_EH, LL_INFO100,
+ "CPFH_FirstPassHandler: pEstablisherFrame = %x EH code = %x EIP = %x with ESP = %x\n",
+ pEstablisherFrame, exceptionCode, pContext ? GetIP(pContext) : 0, pContext ? GetSP(pContext) : 0);
+
+ EXCEPTION_POINTERS ptrs = { pExceptionRecord, pContext };
+
+ // Call to the vectored handler to give other parts of the Runtime a chance to jump in and take over an
+ // exception before we do too much with it. The most important point in the vectored handler is not to toggle
+ // the GC mode.
+ DWORD filter = CLRVectoredExceptionHandler(&ptrs);
+
+ if (filter == (DWORD) EXCEPTION_CONTINUE_EXECUTION)
+ {
+ return ExceptionContinueExecution;
+ }
+ else if (filter == EXCEPTION_CONTINUE_SEARCH)
+ {
+ return ExceptionContinueSearch;
+ }
+
+#if defined(STRESS_HEAP)
+ //
+ // Check to see if this exception is due to GCStress. Since the GCStress mechanism only injects these faults
+ // into managed code, we only need to check for them in CPFH_FirstPassHandler.
+ //
+ if (IsGcMarker(exceptionCode, pContext))
+ {
+ return ExceptionContinueExecution;
+ }
+#endif // STRESS_HEAP
+
+ // We always want to be in co-operative mode when we run this function and whenever we return
+ // from it, want to go to pre-emptive mode because are returning to OS.
+ BOOL disabled = pThread->PreemptiveGCDisabled();
+ GCX_COOP_NO_DTOR();
+
+ BOOL bAsynchronousThreadStop = IsThreadHijackedForThreadStop(pThread, pExceptionRecord);
+
+ if (bAsynchronousThreadStop)
+ {
+ // If we ever get here in preemptive mode, we're in trouble. We've
+ // changed the thread's IP to point at a little function that throws ... if
+ // the thread were to be in preemptive mode and a GC occured, the stack
+ // crawl would have been all messed up (becuase we have no frame that points
+ // us back to the right place in managed code).
+ _ASSERTE(disabled);
+
+ AdjustContextForThreadStop(pThread, pContext);
+ LOG((LF_EH, LL_INFO100, "CPFH_FirstPassHandler is Asynchronous Thread Stop or Abort\n"));
+ }
+
+ pThread->ResetThrowControlForThread();
+
+ CPFH_VerifyThreadIsInValidState(pThread, exceptionCode, pEstablisherFrame);
+
+ // If we were in cooperative mode when we came in here, then its okay to see if we should do HandleManagedFault
+ // and push a FaultingExceptionFrame. If we weren't in coop mode coming in here, then it means that there's no
+ // way the exception could really be from managed code. I might look like it was from managed code, but in
+ // reality its a rethrow from unmanaged code, either unmanaged user code, or unmanaged EE implementation.
+ if (disabled && ShouldHandleManagedFault(pExceptionRecord, pContext, pEstablisherFrame, pThread))
+ {
+#if defined(USE_FEF)
+ HandleManagedFault(pExceptionRecord, pContext, pEstablisherFrame, pThread);
+ retval = ExceptionContinueExecution;
+ goto exit;
+#else // USE_FEF
+ // Save the context pointer in the Thread's EXInfo, so that a stack crawl can recover the
+ // register values from the fault.
+
+ //@todo: I haven't yet found any case where we need to do anything here. If there are none, eliminate
+ // this entire if () {} block.
+#endif // USE_FEF
+ }
+
+ // OK. We're finally ready to start the real work. Nobody else grabbed the exception in front of us. Now we can
+ // get started.
+ retval = CPFH_RealFirstPassHandler(pExceptionRecord,
+ pEstablisherFrame,
+ pContext,
+ pDispatcherContext,
+ bAsynchronousThreadStop,
+ disabled);
+
+#if defined(USE_FEF) // This label is only used in the HandleManagedFault() case above.
+exit:
+#endif
+ if (retval != ExceptionContinueExecution || !disabled)
+ {
+ GCX_PREEMP_NO_DTOR();
+ }
+
+ STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_FirstPassHandler: exiting with retval %d\n", retval);
+ return retval;
+} // CPFH_FirstPassHandler()
+
+//******************************************************************************
+inline void
+CPFH_UnwindFrames1(Thread* pThread, EXCEPTION_REGISTRATION_RECORD* pEstablisherFrame, DWORD exceptionCode)
+{
+ WRAPPER_NO_CONTRACT;
+
+ ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
+
+ // Ready to unwind the stack...
+ ThrowCallbackType tct;
+ tct.Init();
+ tct.bIsUnwind = TRUE;
+ tct.pTopFrame = GetCurrFrame(pEstablisherFrame); // highest frame to search to
+ tct.pBottomFrame = NULL;
+
+ // Set the flag indicating if the current exception represents a longjmp.
+ // See comment in COMPlusUnwindCallback for details.
+ CORRUPTING_EXCEPTIONS_ONLY(tct.m_fIsLongJump = (exceptionCode == STATUS_LONGJUMP);)
+
+ #ifdef _DEBUG
+ tct.pCurrentExceptionRecord = pEstablisherFrame;
+ tct.pPrevExceptionRecord = GetPrevSEHRecord(pEstablisherFrame);
+ #endif
+
+ #ifdef DEBUGGING_SUPPORTED
+ EXCEPTION_REGISTRATION_RECORD *pInterceptEstablisherFrame = NULL;
+
+ // If the exception is intercepted, use information stored in the DebuggerExState to unwind the stack.
+ if (pExInfo->m_ExceptionFlags.DebuggerInterceptInfo())
+ {
+ pExInfo->m_DebuggerExState.GetDebuggerInterceptInfo(&pInterceptEstablisherFrame,
+ NULL, // MethodDesc **ppFunc,
+ NULL, // int *pdHandler,
+ NULL, // BYTE **ppStack
+ NULL, // ULONG_PTR *pNativeOffset,
+ NULL // Frame **ppFrame)
+ );
+ LOG((LF_EH, LL_INFO1000, "CPFH_UnwindFrames1: frames are Est 0x%X, Intercept 0x%X\n",
+ pEstablisherFrame, pInterceptEstablisherFrame));
+
+ //
+ // When we set up for the interception we store off the CPFH or CPNEH that we
+ // *know* will handle unwinding the destination of the intercept.
+ //
+ // However, a CPNEH with the same limiting Capital-F-rame could do the work
+ // and unwind us, so...
+ //
+ // If this is the exact frame handler we are supposed to search for, or
+ // if this frame handler services the same Capital-F-rame as the frame handler
+ // we are looking for (i.e. this frame handler may do the work that we would
+ // expect our frame handler to do),
+ // then
+ // we need to pass the interception destination during this unwind.
+ //
+ _ASSERTE(IsUnmanagedToManagedSEHHandler(pEstablisherFrame));
+
+ if ((pEstablisherFrame == pInterceptEstablisherFrame) ||
+ (GetCurrFrame(pEstablisherFrame) == GetCurrFrame(pInterceptEstablisherFrame)))
+ {
+ pExInfo->m_DebuggerExState.GetDebuggerInterceptInfo(NULL,
+ &(tct.pFunc),
+ &(tct.dHandler),
+ &(tct.pStack),
+ NULL,
+ &(tct.pBottomFrame)
+ );
+
+ LOG((LF_EH, LL_INFO1000, "CPFH_UnwindFrames1: going to: pFunc:%#X, pStack:%#X\n",
+ tct.pFunc, tct.pStack));
+
+ }
+
+ }
+ #endif
+
+ UnwindFrames(pThread, &tct);
+
+ LOG((LF_EH, LL_INFO1000, "CPFH_UnwindFrames1: after unwind ec:%#x, tct.pTopFrame:0x%p, pSearchBndry:0x%p\n"
+ " pEstFrame:0x%p, IsC+NestExRec:%d, !Nest||Active:%d\n",
+ exceptionCode, tct.pTopFrame, pExInfo->m_pSearchBoundary, pEstablisherFrame,
+ IsComPlusNestedExceptionRecord(pEstablisherFrame),
+ (!IsComPlusNestedExceptionRecord(pEstablisherFrame) || reinterpret_cast<NestedHandlerExRecord*>(pEstablisherFrame)->m_ActiveForUnwind)));
+
+ if (tct.pTopFrame >= pExInfo->m_pSearchBoundary &&
+ (!IsComPlusNestedExceptionRecord(pEstablisherFrame) ||
+ reinterpret_cast<NestedHandlerExRecord*>(pEstablisherFrame)->m_ActiveForUnwind) )
+ {
+ // If this is the search boundary, and we're not a nested handler, then
+ // this is the last time we'll see this exception. Time to unwind our
+ // exinfo.
+ STRESS_LOG0(LF_EH, LL_INFO100, "CPFH_UnwindFrames1: Exception unwind -- unmanaged catcher detected\n");
+ pExInfo->UnwindExInfo((VOID*)pEstablisherFrame);
+ }
+} // CPFH_UnwindFrames1()
+
+//******************************************************************************
+inline EXCEPTION_DISPOSITION __cdecl
+CPFH_UnwindHandler(EXCEPTION_RECORD *pExceptionRecord,
+ EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
+ CONTEXT *pContext,
+ void *pDispatcherContext)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE (pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND));
+
+ #ifdef _DEBUG
+ // Note: you might be inclined to write "static int breakOnSecondPass = CLRConfig::GetConfigValue(...);", but
+ // you can't do that here. That causes C++ EH to be generated under the covers for this function, and this
+ // function isn't allowed to have any C++ EH in it because its never going to return.
+ static int breakOnSecondPass; // = 0
+ static BOOL breakOnSecondPassSetup; // = FALSE
+ if (!breakOnSecondPassSetup)
+ {
+ breakOnSecondPass = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnSecondPass);
+ breakOnSecondPassSetup = TRUE;
+ }
+ if (breakOnSecondPass != 0)
+ {
+ _ASSERTE(!"Unwind handler");
+ }
+ #endif
+
+ DWORD exceptionCode = pExceptionRecord->ExceptionCode;
+ Thread *pThread = GetThread();
+
+ ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
+
+ STRESS_LOG4(LF_EH, LL_INFO100, "In CPFH_UnwindHandler EHCode = %x EIP = %x with ESP = %x, pEstablisherFrame = 0x%p\n", exceptionCode,
+ pContext ? GetIP(pContext) : 0, pContext ? GetSP(pContext) : 0, pEstablisherFrame);
+
+ // We always want to be in co-operative mode when we run this function. Whenever we return
+ // from it, want to go to pre-emptive mode because are returning to OS.
+
+ {
+ // needs to be in its own scope to avoid polluting the namespace, since
+ // we don't do a _END then we don't revert the state
+ GCX_COOP_NO_DTOR();
+ }
+
+ CPFH_VerifyThreadIsInValidState(pThread, exceptionCode, pEstablisherFrame);
+
+ if (IsComPlusNestedExceptionRecord(pEstablisherFrame))
+ {
+ NestedHandlerExRecord *pHandler = reinterpret_cast<NestedHandlerExRecord*>(pEstablisherFrame);
+ if (pHandler->m_pCurrentExInfo != NULL)
+ {
+ // See the comment at the end of COMPlusNestedExceptionHandler about nested exception.
+ // OS is going to skip the EstablisherFrame before our NestedHandler.
+ if (pHandler->m_pCurrentExInfo->m_pBottomMostHandler <= pHandler->m_pCurrentHandler)
+ {
+ // We're unwinding -- the bottom most handler is potentially off top-of-stack now. If
+ // it is, change it to the next COM+ frame. (This one is not good, as it's about to
+ // disappear.)
+ EXCEPTION_REGISTRATION_RECORD *pNextBottomMost = GetNextCOMPlusSEHRecord(pHandler->m_pCurrentHandler);
+
+ STRESS_LOG3(LF_EH, LL_INFO10000, "COMPlusNestedExceptionHandler: setting ExInfo:0x%p m_pBottomMostHandler from 0x%p to 0x%p\n",
+ pHandler->m_pCurrentExInfo, pHandler->m_pCurrentExInfo->m_pBottomMostHandler, pNextBottomMost);
+
+ pHandler->m_pCurrentExInfo->m_pBottomMostHandler = pNextBottomMost;
+ }
+ }
+ }
+
+ // this establishes a marker so can determine if are processing a nested exception
+ // don't want to use the current frame to limit search as it could have been unwound by
+ // the time get to nested handler (ie if find an exception, unwind to the call point and
+ // then resume in the catch and then get another exception) so make the nested handler
+ // have the same boundary as this one. If nested handler can't find a handler, we won't
+ // end up searching this frame list twice because the nested handler will set the search
+ // boundary in the thread and so if get back to this handler it will have a range that starts
+ // and ends at the same place.
+ NestedHandlerExRecord nestedHandlerExRecord;
+ nestedHandlerExRecord.Init((PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler, GetCurrFrame(pEstablisherFrame));
+
+ nestedHandlerExRecord.m_ActiveForUnwind = TRUE;
+ nestedHandlerExRecord.m_pCurrentExInfo = pExInfo;
+ nestedHandlerExRecord.m_pCurrentHandler = pEstablisherFrame;
+
+ INSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg));
+
+ // Unwind the stack. The establisher frame sets the boundary.
+ CPFH_UnwindFrames1(pThread, pEstablisherFrame, exceptionCode);
+
+ // We're unwinding -- the bottom most handler is potentially off top-of-stack now. If
+ // it is, change it to the next COM+ frame. (This one is not good, as it's about to
+ // disappear.)
+ if (pExInfo->m_pBottomMostHandler &&
+ pExInfo->m_pBottomMostHandler <= pEstablisherFrame)
+ {
+ EXCEPTION_REGISTRATION_RECORD *pNextBottomMost = GetNextCOMPlusSEHRecord(pEstablisherFrame);
+
+ // If there is no previous COM+ SEH handler, GetNextCOMPlusSEHRecord() will return -1. Much later, we will dereference that and AV.
+ _ASSERTE (pNextBottomMost != EXCEPTION_CHAIN_END);
+
+ STRESS_LOG3(LF_EH, LL_INFO10000, "CPFH_UnwindHandler: setting ExInfo:0x%p m_pBottomMostHandler from 0x%p to 0x%p\n",
+ pExInfo, pExInfo->m_pBottomMostHandler, pNextBottomMost);
+
+ pExInfo->m_pBottomMostHandler = pNextBottomMost;
+ }
+
+ {
+ // needs to be in its own scope to avoid polluting the namespace, since
+ // we don't do a _END then we don't revert the state
+ GCX_PREEMP_NO_DTOR();
+ }
+ UNINSTALL_EXCEPTION_HANDLING_RECORD(&(nestedHandlerExRecord.m_ExReg));
+
+ // If we are here, then exception was not caught in managed code protected by this
+ // ComplusFrameHandler. Hence, reset thread abort state if this is the last personality routine,
+ // for managed code, on the stack.
+ ResetThreadAbortState(pThread, pEstablisherFrame);
+
+ STRESS_LOG0(LF_EH, LL_INFO100, "CPFH_UnwindHandler: Leaving with ExceptionContinueSearch\n");
+ return ExceptionContinueSearch;
+} // CPFH_UnwindHandler()
+
+//******************************************************************************
+// This is the first handler that is called in the context of managed code
+// It is the first level of defense and tries to find a handler in the user
+// code to handle the exception
+//-------------------------------------------------------------------------
+// EXCEPTION_DISPOSITION __cdecl COMPlusFrameHandler(
+// EXCEPTION_RECORD *pExceptionRecord,
+// _EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
+// CONTEXT *pContext,
+// DISPATCHER_CONTEXT *pDispatcherContext)
+//
+// See http://www.microsoft.com/msj/0197/exception/exception.aspx for a background piece on Windows
+// unmanaged structured exception handling.
+EXCEPTION_HANDLER_IMPL(COMPlusFrameHandler)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(!DebugIsEECxxException(pExceptionRecord) && "EE C++ Exception leaked into managed code!");
+
+ STRESS_LOG5(LF_EH, LL_INFO100, "In COMPlusFrameHander EH code = %x flag = %x EIP = %x with ESP = %x, pEstablisherFrame = 0x%p\n",
+ pExceptionRecord->ExceptionCode, pExceptionRecord->ExceptionFlags,
+ pContext ? GetIP(pContext) : 0, pContext ? GetSP(pContext) : 0, pEstablisherFrame);
+
+ _ASSERTE((pContext == NULL) || ((pContext->ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL));
+
+ if (g_fNoExceptions)
+ return ExceptionContinueSearch; // No EH during EE shutdown.
+
+ // Check if the exception represents a GCStress Marker. If it does,
+ // we shouldnt record its entry in the TLS as such exceptions are
+ // continuable and can confuse the VM to treat them as CSE,
+ // as they are implemented using illegal instruction exception.
+
+ bool fIsGCMarker = false;
+
+#ifdef HAVE_GCCOVER // This is a debug only macro
+ if (GCStress<cfg_instr_jit>::IsEnabled())
+ {
+ // UnsafeTlsGetValue trashes last error. When Complus_GCStress=4, GC is invoked
+ // on every allowable JITed instruction by means of our exception handling machanism
+ // it is very easy to trash the last error. For example, a p/invoke called a native method
+ // which sets last error. Before we getting the last error in the IL stub, it is trashed here
+ DWORD dwLastError = GetLastError();
+ fIsGCMarker = IsGcMarker(pExceptionRecord->ExceptionCode, pContext);
+ if (!fIsGCMarker)
+ {
+ SaveCurrentExceptionInfo(pExceptionRecord, pContext);
+ }
+ SetLastError(dwLastError);
+ }
+ else
+#endif
+ {
+ // GCStress does not exist on retail builds (see IsGcMarker implementation for details).
+ SaveCurrentExceptionInfo(pExceptionRecord, pContext);
+ }
+
+ if (fIsGCMarker)
+ {
+ // If this was a GCStress marker exception, then return
+ // ExceptionContinueExecution to the OS.
+ return ExceptionContinueExecution;
+ }
+
+ EXCEPTION_DISPOSITION retVal = ExceptionContinueSearch;
+
+ Thread *pThread = GetThread();
+ if ((pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND)) == 0)
+ {
+ if (IsSOExceptionCode(pExceptionRecord->ExceptionCode))
+ {
+ EEPolicy::HandleStackOverflow(SOD_ManagedFrameHandler, (void*)pEstablisherFrame);
+
+ // VC's unhandled exception filter plays with stack. It VirtualAlloc's a new stack, and
+ // then launch Watson from the new stack. When Watson asks CLR to save required data, we
+ // are not able to walk the stack.
+ // Setting Context in ExInfo so that our Watson dump routine knows how to walk this stack.
+ ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
+ pExInfo->m_pContext = pContext;
+
+ // Save the reference to the topmost handler we see during first pass when an SO goes past us.
+ // When an unwind gets triggered for the exception, we will reset the frame chain when we reach
+ // the topmost handler we saw during the first pass.
+ //
+ // This unifies, behaviour-wise, 32bit with 64bit.
+ if ((pExInfo->m_pTopMostHandlerDuringSO == NULL) ||
+ (pEstablisherFrame > pExInfo->m_pTopMostHandlerDuringSO))
+ {
+ pExInfo->m_pTopMostHandlerDuringSO = pEstablisherFrame;
+ }
+
+ // Switch to preemp mode since we are returning back to the OS.
+ // We will do the quick switch since we are short of stack
+ FastInterlockAnd (&pThread->m_fPreemptiveGCDisabled, 0);
+
+ return ExceptionContinueSearch;
+ }
+ else
+ {
+#ifdef FEATURE_STACK_PROBE
+ if (GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) == eRudeUnloadAppDomain)
+ {
+ RetailStackProbe(static_cast<unsigned int>(ADJUST_PROBE(BACKOUT_CODE_STACK_LIMIT)), pThread);
+ }
+#endif
+ }
+ }
+ else
+ {
+ DWORD exceptionCode = pExceptionRecord->ExceptionCode;
+
+ if (exceptionCode == STATUS_UNWIND)
+ {
+ // If exceptionCode is STATUS_UNWIND, RtlUnwind is called with a NULL ExceptionRecord,
+ // therefore OS uses a faked ExceptionRecord with STATUS_UNWIND code. Then we need to
+ // look at our saved exception code.
+ exceptionCode = GetCurrentExceptionCode();
+ }
+
+ if (IsSOExceptionCode(exceptionCode))
+ {
+ // We saved the context during the first pass in case the stack overflow exception is
+ // unhandled and Watson dump code needs it. Now we are in the second pass, therefore
+ // either the exception is handled by user code, or we have finished unhandled exception
+ // filter process, and the OS is unwinding the stack. Either way, we don't need the
+ // context any more. It is very important to reset the context so that our code does not
+ // accidentally walk the frame using the dangling context in ExInfoWalker::WalkToPosition.
+ ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
+ pExInfo->m_pContext = NULL;
+
+ // We should have the reference to the topmost handler seen during the first pass of SO
+ _ASSERTE(pExInfo->m_pTopMostHandlerDuringSO != NULL);
+
+ // Reset frame chain till we reach the topmost establisher frame we saw in the first pass.
+ // This will ensure that if any intermediary frame calls back into managed (e.g. native frame
+ // containing a __finally that reverse pinvokes into managed), then we have the correct
+ // explicit frame on the stack. Resetting the frame chain only when we reach the topmost
+ // personality routine seen in the first pass may not result in expected behaviour,
+ // specially during stack walks when crawl frame needs to be initialized from
+ // explicit frame.
+ if (pEstablisherFrame <= pExInfo->m_pTopMostHandlerDuringSO)
+ {
+ GCX_COOP_NO_DTOR();
+
+ if (pThread->GetFrame() < GetCurrFrame(pEstablisherFrame))
+ {
+ // We are very short of stack. We avoid calling UnwindFrame which may
+ // run unknown code here.
+ pThread->SetFrame(GetCurrFrame(pEstablisherFrame));
+ }
+ }
+
+ // Switch to preemp mode since we are returning back to the OS.
+ // We will do the quick switch since we are short of stack
+ FastInterlockAnd(&pThread->m_fPreemptiveGCDisabled, 0);
+
+ return ExceptionContinueSearch;
+ }
+ }
+
+ // <TODO> . We need to probe here, but can't introduce destructors etc. </TODO>
+ BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
+
+ if (pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND))
+ {
+ retVal = CPFH_UnwindHandler(pExceptionRecord,
+ pEstablisherFrame,
+ pContext,
+ pDispatcherContext);
+ }
+ else
+ {
+
+ /* Make no assumptions about the current machine state.
+ <TODO>@PERF: Only needs to be called by the very first handler invoked by SEH </TODO>*/
+ ResetCurrentContext();
+
+ retVal = CPFH_FirstPassHandler(pExceptionRecord,
+ pEstablisherFrame,
+ pContext,
+ pDispatcherContext);
+
+ }
+
+ END_CONTRACT_VIOLATION;
+
+ return retVal;
+} // COMPlusFrameHandler()
+
+
+//-------------------------------------------------------------------------
+// This is called by the EE to restore the stack pointer if necessary.
+//-------------------------------------------------------------------------
+
+// This can't be inlined into the caller to avoid introducing EH frame
+NOINLINE LPVOID COMPlusEndCatchWorker(Thread * pThread)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+ LOG((LF_EH, LL_INFO1000, "COMPlusPEndCatch:called with "
+ "pThread:0x%x\n",pThread));
+
+ // indicate that we are out of the managed clause as early as possible
+ ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
+ pExInfo->m_EHClauseInfo.SetManagedCodeEntered(FALSE);
+
+ void* esp = NULL;
+
+ // @todo . We need to probe in the EH code, but can't introduce destructors etc.
+ BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
+
+ // Notify the profiler that the catcher has finished running
+ // IL stubs don't contain catch blocks so inability to perform this check does not matter.
+ // if (!pFunc->IsILStub())
+ EEToProfilerExceptionInterfaceWrapper::ExceptionCatcherLeave();
+
+ // no need to set pExInfo->m_ClauseType = (DWORD)COR_PRF_CLAUSE_NONE now that the
+ // notification is done because because the ExInfo record is about to be popped off anyway
+
+ LOG((LF_EH, LL_INFO1000, "COMPlusPEndCatch:pThread:0x%x\n",pThread));
+
+#ifdef _DEBUG
+ gLastResumedExceptionFunc = NULL;
+ gLastResumedExceptionHandler = 0;
+#endif
+ // Set the thrown object to NULL as no longer needed. This also sets the last thrown object to NULL.
+ pThread->SafeSetThrowables(NULL);
+
+ // reset the stashed exception info
+ pExInfo->m_pExceptionRecord = NULL;
+ pExInfo->m_pContext = NULL;
+ pExInfo->m_pExceptionPointers = NULL;
+
+ if (pExInfo->m_pShadowSP)
+ {
+ *pExInfo->m_pShadowSP = 0; // Reset the shadow SP
+ }
+
+ // pExInfo->m_dEsp was set in ResumeAtJITEH(). It is the Esp of the
+ // handler nesting level which catches the exception.
+ esp = (void*)(size_t)pExInfo->m_dEsp;
+
+ pExInfo->UnwindExInfo(esp);
+
+ // This will set the last thrown to be either null if we have handled all the exceptions in the nested chain or
+ // to whatever the current exception is.
+ //
+ // In a case when we're nested inside another catch block, the domain in which we're executing may not be the
+ // same as the one the domain of the throwable that was just made the current throwable above. Therefore, we
+ // make a special effort to preserve the domain of the throwable as we update the the last thrown object.
+ pThread->SafeUpdateLastThrownObject();
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // Since the catch clause has successfully executed and we are exiting it, reset the corruption severity
+ // in the ThreadExceptionState for the last active exception. This will ensure that when the next exception
+ // gets thrown/raised, EH tracker wont pick up an invalid value.
+ //
+ // This function (COMPlusEndCatch) can also be called by the in-proc debugger helper thread on x86 when
+ // an attempt to SetIP takes place to set IP outside the catch clause. In such a case, managed thread object
+ // will not be available. Thus, we should reset the severity only if its not such a thread.
+ //
+ // This behaviour (of debugger doing SetIP) is not allowed on 64bit since the catch clauses are implemented
+ // as a seperate funclet and it's just not allowed to set the IP across EH scopes, such as from inside a catch
+ // clause to outside of the catch clause.
+
+ bool fIsDebuggerHelperThread = (g_pDebugInterface == NULL) ? false : g_pDebugInterface->ThisIsHelperThread();
+ if (fIsDebuggerHelperThread == false)
+ {
+ CEHelper::ResetLastActiveCorruptionSeverityPostCatchHandler();
+ }
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ LOG((LF_EH, LL_INFO1000, "COMPlusPEndCatch: esp=%p\n", esp));
+
+ END_CONTRACT_VIOLATION;
+
+ return esp;
+}
+
+//
+// This function works in conjunction with JIT_EndCatch. On input, the parameters are set as follows:
+// ebp, ebx, edi, esi: the values of these registers at the end of the catch block
+// *pRetAddress: the next instruction after the call to JIT_EndCatch
+//
+// On output, *pRetAddress is the instruction at which to resume execution. This may be user code,
+// or it may be ThrowControlForThread (which will re-raise a pending ThreadAbortException).
+//
+// Returns the esp to set before resuming at *pRetAddress.
+//
+LPVOID STDCALL COMPlusEndCatch(LPVOID ebp, DWORD ebx, DWORD edi, DWORD esi, LPVOID* pRetAddress)
+{
+ //
+ // PopNestedExceptionRecords directly manipulates fs:[0] chain. This method can't have any EH!
+ //
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+ void* esp = COMPlusEndCatchWorker(GetThread());
+
+ // We are going to resume at a handler nesting level whose esp is dEsp. Pop off any SEH records below it. This
+ // would be the COMPlusNestedExceptionHandler we had inserted.
+ PopNestedExceptionRecords(esp);
+
+ //
+ // Set up m_OSContext for the call to COMPlusCheckForAbort
+ //
+ Thread* pThread = GetThread();
+ _ASSERTE(pThread != NULL);
+
+ SetIP(pThread->m_OSContext, (PCODE)*pRetAddress);
+ SetSP(pThread->m_OSContext, (TADDR)esp);
+ SetFP(pThread->m_OSContext, (TADDR)ebp);
+ pThread->m_OSContext->Ebx = ebx;
+ pThread->m_OSContext->Edi = edi;
+ pThread->m_OSContext->Esi = esi;
+
+ LPVOID throwControl = COMPlusCheckForAbort((UINT_PTR)*pRetAddress);
+ if (throwControl)
+ *pRetAddress = throwControl;
+
+ return esp;
+}
+
+#endif // !DACCESS_COMPILE
+
+PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(CONTEXT * pContext)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ UINT_PTR stackSlot = pContext->Ebp + REDIRECTSTUB_EBP_OFFSET_CONTEXT;
+ PTR_PTR_CONTEXT ppContext = dac_cast<PTR_PTR_CONTEXT>((TADDR)stackSlot);
+ return *ppContext;
+}
+
+#if !defined(DACCESS_COMPILE)
+
+PEXCEPTION_REGISTRATION_RECORD GetCurrentSEHRecord()
+{
+ WRAPPER_NO_CONTRACT;
+
+ LPVOID fs0 = (LPVOID)__readfsdword(0);
+
+#if 0 // This walk is too expensive considering we hit it every time we a CONTRACT(NOTHROW)
+#ifdef _DEBUG
+ EXCEPTION_REGISTRATION_RECORD *pEHR = (EXCEPTION_REGISTRATION_RECORD *)fs0;
+ LPVOID spVal;
+ __asm {
+ mov spVal, esp
+ }
+
+ // check that all the eh frames are all greater than the current stack value. If not, the
+ // stack has been updated somehow w/o unwinding the SEH chain.
+
+ // LOG((LF_EH, LL_INFO1000000, "ER Chain:\n"));
+ while (pEHR != NULL && pEHR != EXCEPTION_CHAIN_END) {
+ // LOG((LF_EH, LL_INFO1000000, "\tp: prev:p handler:%x\n", pEHR, pEHR->Next, pEHR->Handler));
+ if (pEHR < spVal) {
+ if (gLastResumedExceptionFunc != 0)
+ _ASSERTE(!"Stack is greater than start of SEH chain - possible missing leave in handler. See gLastResumedExceptionHandler & gLastResumedExceptionFunc for info");
+ else
+ _ASSERTE(!"Stack is greater than start of SEH chain (FS:0)");
+ }
+ if (pEHR->Handler == (void *)-1)
+ _ASSERTE(!"Handler value has been corrupted");
+
+ _ASSERTE(pEHR < pEHR->Next);
+
+ pEHR = pEHR->Next;
+ }
+#endif
+#endif
+
+ return (EXCEPTION_REGISTRATION_RECORD*) fs0;
+}
+
+PEXCEPTION_REGISTRATION_RECORD GetFirstCOMPlusSEHRecord(Thread *pThread) {
+ WRAPPER_NO_CONTRACT;
+ EXCEPTION_REGISTRATION_RECORD *pEHR = *(pThread->GetExceptionListPtr());
+ if (pEHR == EXCEPTION_CHAIN_END || IsUnmanagedToManagedSEHHandler(pEHR)) {
+ return pEHR;
+ } else {
+ return GetNextCOMPlusSEHRecord(pEHR);
+ }
+}
+
+
+PEXCEPTION_REGISTRATION_RECORD GetPrevSEHRecord(EXCEPTION_REGISTRATION_RECORD *next)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(IsUnmanagedToManagedSEHHandler(next));
+
+ EXCEPTION_REGISTRATION_RECORD *pEHR = GetCurrentSEHRecord();
+ _ASSERTE(pEHR != 0 && pEHR != EXCEPTION_CHAIN_END);
+
+ EXCEPTION_REGISTRATION_RECORD *pBest = 0;
+ while (pEHR != next) {
+ if (IsUnmanagedToManagedSEHHandler(pEHR))
+ pBest = pEHR;
+ pEHR = pEHR->Next;
+ _ASSERTE(pEHR != 0 && pEHR != EXCEPTION_CHAIN_END);
+ }
+
+ return pBest;
+}
+
+VOID SetCurrentSEHRecord(EXCEPTION_REGISTRATION_RECORD *pSEH)
+{
+ WRAPPER_NO_CONTRACT;
+ *GetThread()->GetExceptionListPtr() = pSEH;
+}
+
+
+//
+// Unwind pExinfo, pops FS:[0] handlers until the interception context SP, and
+// resumes at interception context.
+//
+VOID UnwindExceptionTrackerAndResumeInInterceptionFrame(ExInfo* pExInfo, EHContext* context)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ _ASSERTE(pExInfo && context);
+
+ pExInfo->UnwindExInfo((LPVOID)(size_t)context->Esp);
+ PopNestedExceptionRecords((LPVOID)(size_t)context->Esp);
+
+ STRESS_LOG3(LF_EH|LF_CORDB, LL_INFO100, "UnwindExceptionTrackerAndResumeInInterceptionFrame: completing intercept at EIP = %p ESP = %p EBP = %p\n", context->Eip, context->Esp, context->Ebp);
+
+ ResumeAtJitEHHelper(context);
+ UNREACHABLE_MSG("Should never return from ResumeAtJitEHHelper!");
+}
+
+//
+// Pop SEH records below the given target ESP. This is only used to pop nested exception records.
+// If bCheckForUnknownHandlers is set, it only checks for unknown FS:[0] handlers.
+//
+BOOL PopNestedExceptionRecords(LPVOID pTargetSP, BOOL bCheckForUnknownHandlers)
+{
+ // No CONTRACT here, because we can't run the risk of it pushing any SEH into the current method.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ PEXCEPTION_REGISTRATION_RECORD pEHR = GetCurrentSEHRecord();
+
+ while ((LPVOID)pEHR < pTargetSP)
+ {
+ //
+ // The only handler type we're allowed to have below the limit on the FS:0 chain in these cases is a nested
+ // exception record, so we verify that here.
+ //
+ // There is a special case, of course: for an unhandled exception, when the default handler does the exit
+ // unwind, we may have an exception that escapes a finally clause, thus replacing the original unhandled
+ // exception. If we find a catcher for that new exception, then we'll go ahead and do our own unwind, then
+ // jump to the catch. When we are called here, just before jumpping to the catch, we'll pop off our nested
+ // handlers, then we'll pop off one more handler: the handler that ntdll!ExecuteHandler2 pushed before
+ // calling our nested handler. We go ahead and pop off that handler, too. Its okay, its only there to catch
+ // exceptions from handlers and turn them into collided unwind status codes... there's no cleanup in the
+ // handler that we're removing, and that's the important point. The handler that ExecuteHandler2 pushes
+ // isn't a public export from ntdll, but its named "UnwindHandler" and is physically shortly after
+ // ExecuteHandler2 in ntdll.
+ //
+ static HINSTANCE ExecuteHandler2Module = 0;
+ static BOOL ExecuteHandler2ModuleInited = FALSE;
+
+ // Cache the handle to the dll with the handler pushed by ExecuteHandler2.
+ if (!ExecuteHandler2ModuleInited)
+ {
+ ExecuteHandler2Module = WszGetModuleHandle(W("ntdll.dll"));
+ ExecuteHandler2ModuleInited = TRUE;
+ }
+
+ if (bCheckForUnknownHandlers)
+ {
+ if (!IsComPlusNestedExceptionRecord(pEHR) ||
+ !((ExecuteHandler2Module != NULL) && IsIPInModule(ExecuteHandler2Module, (PCODE)pEHR->Handler)))
+ {
+ return TRUE;
+ }
+ }
+#ifdef _DEBUG
+ else
+ {
+ // Note: if we can't find the module containing ExecuteHandler2, we'll just be really strict and require
+ // that we're only popping nested handlers.
+ _ASSERTE(IsComPlusNestedExceptionRecord(pEHR) ||
+ ((ExecuteHandler2Module != NULL) && IsIPInModule(ExecuteHandler2Module, (PCODE)pEHR->Handler)));
+ }
+#endif // _DEBUG
+
+ pEHR = pEHR->Next;
+ }
+
+ if (!bCheckForUnknownHandlers)
+ {
+ SetCurrentSEHRecord(pEHR);
+ }
+ return FALSE;
+}
+
+//
+// This is implemented differently from the PopNestedExceptionRecords above because it's called in the context of
+// the DebuggerRCThread to operate on the stack of another thread.
+//
+VOID PopNestedExceptionRecords(LPVOID pTargetSP, CONTEXT *pCtx, void *pSEH)
+{
+ // No CONTRACT here, because we can't run the risk of it pushing any SEH into the current method.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+#ifdef _DEBUG
+ LOG((LF_CORDB,LL_INFO1000, "\nPrintSEHRecords:\n"));
+
+ EXCEPTION_REGISTRATION_RECORD *pEHR = (EXCEPTION_REGISTRATION_RECORD *)(size_t)*(DWORD *)pSEH;
+
+ // check that all the eh frames are all greater than the current stack value. If not, the
+ // stack has been updated somehow w/o unwinding the SEH chain.
+ while (pEHR != NULL && pEHR != EXCEPTION_CHAIN_END)
+ {
+ LOG((LF_EH, LL_INFO1000000, "\t%08x: next:%08x handler:%x\n", pEHR, pEHR->Next, pEHR->Handler));
+ pEHR = pEHR->Next;
+ }
+#endif
+
+ DWORD dwCur = *(DWORD*)pSEH; // 'EAX' in the original routine
+ DWORD dwPrev = (DWORD)(size_t)pSEH;
+
+ while (dwCur < (DWORD)(size_t)pTargetSP)
+ {
+ // Watch for the OS handler
+ // for nested exceptions, or any C++ handlers for destructors in our call
+ // stack, or anything else.
+ if (dwCur < (DWORD)GetSP(pCtx))
+ dwPrev = dwCur;
+
+ dwCur = *(DWORD *)(size_t)dwCur;
+
+ LOG((LF_CORDB,LL_INFO10000, "dwCur: 0x%x dwPrev:0x%x pTargetSP:0x%x\n",
+ dwCur, dwPrev, pTargetSP));
+ }
+
+ *(DWORD *)(size_t)dwPrev = dwCur;
+
+#ifdef _DEBUG
+ pEHR = (EXCEPTION_REGISTRATION_RECORD *)(size_t)*(DWORD *)pSEH;
+ // check that all the eh frames are all greater than the current stack value. If not, the
+ // stack has been updated somehow w/o unwinding the SEH chain.
+
+ LOG((LF_CORDB,LL_INFO1000, "\nPopSEHRecords:\n"));
+ while (pEHR != NULL && pEHR != (void *)-1)
+ {
+ LOG((LF_EH, LL_INFO1000000, "\t%08x: next:%08x handler:%x\n", pEHR, pEHR->Next, pEHR->Handler));
+ pEHR = pEHR->Next;
+ }
+#endif
+}
+
+//==========================================================================
+// COMPlusThrowCallback
+//
+//==========================================================================
+
+/*
+ *
+ * COMPlusThrowCallbackHelper
+ *
+ * This function is a simple helper function for COMPlusThrowCallback. It is needed
+ * because of the EX_TRY macro. This macro does an alloca(), which allocates space
+ * off the stack, not free'ing it. Thus, doing a EX_TRY in a loop can easily result
+ * in a stack overflow error. By factoring out the EX_TRY into a separate function,
+ * we recover that stack space.
+ *
+ * Parameters:
+ * pJitManager - The JIT manager that will filter the EH.
+ * pCf - The frame to crawl.
+ * EHClausePtr
+ * nestingLevel
+ * pThread - Used to determine if the thread is throwable or not.
+ *
+ * Return:
+ * Exception status.
+ *
+ */
+int COMPlusThrowCallbackHelper(IJitManager *pJitManager,
+ CrawlFrame *pCf,
+ ThrowCallbackType* pData,
+ EE_ILEXCEPTION_CLAUSE *EHClausePtr,
+ DWORD nestingLevel,
+ OBJECTREF throwable
+ )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ int iFilt = 0;
+ BOOL impersonating = FALSE;
+
+ EX_TRY
+ {
+ GCPROTECT_BEGIN (throwable);
+ if (pData->hCallerToken != NULL)
+ {
+ STRESS_LOG1(LF_EH, LL_INFO100, "In COMPlusThrowCallbackHelper hCallerToken = %d\n",pData->hCallerToken);
+ // CLR_ImpersonateLoggedOnUser fails fast on error
+ COMPrincipal::CLR_ImpersonateLoggedOnUser(pData->hCallerToken);
+ impersonating = TRUE;
+ }
+
+ // We want to call filters even if the thread is aborting, so suppress abort
+ // checks while the filter runs.
+ ThreadPreventAsyncHolder preventAbort;
+
+ BYTE* startAddress = (BYTE*)pCf->GetCodeInfo()->GetStartAddress();
+ iFilt = ::CallJitEHFilter(pCf, startAddress, EHClausePtr, nestingLevel, throwable);
+
+ if (impersonating)
+ {
+ STRESS_LOG1(LF_EH, LL_INFO100, "In COMPlusThrowCallbackHelper hImpersonationToken = %d\n",pData->hImpersonationToken);
+ // CLR_ImpersonateLoggedOnUser fails fast on error
+ COMPrincipal::CLR_ImpersonateLoggedOnUser(pData->hImpersonationToken);
+ impersonating = FALSE;
+ }
+ GCPROTECT_END();
+ }
+ EX_CATCH
+ {
+ if (impersonating)
+ {
+ STRESS_LOG1(LF_EH, LL_INFO100, "In COMPlusThrowCallbackHelper EX_CATCH hImpersonationToken = %d\n",pData->hImpersonationToken);
+ // CLR_ImpersonateLoggedOnUser fails fast on error
+ COMPrincipal::CLR_ImpersonateLoggedOnUser(pData->hImpersonationToken);
+ impersonating = FALSE;
+ }
+
+ //
+ // Swallow exception. Treat as exception continue search.
+ //
+ iFilt = EXCEPTION_CONTINUE_SEARCH;
+
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ return iFilt;
+}
+
+//******************************************************************************
+// The stack walk callback for exception handling on x86.
+// Returns one of:
+// SWA_CONTINUE = 0, // continue walking
+// SWA_ABORT = 1, // stop walking, early out in "failure case"
+// SWA_FAILED = 2 // couldn't walk stack
+StackWalkAction COMPlusThrowCallback( // SWA value
+ CrawlFrame *pCf, // Data from StackWalkFramesEx
+ ThrowCallbackType *pData) // Context data passed through from CPFH
+{
+ // We don't want to use a runtime contract here since this codepath is used during
+ // the processing of a hard SO. Contracts use a significant amount of stack
+ // which we can't afford for those cases.
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ Frame *pFrame = pCf->GetFrame();
+ MethodDesc *pFunc = pCf->GetFunction();
+
+ #if defined(_DEBUG)
+ #define METHODNAME(pFunc) (pFunc?pFunc->m_pszDebugMethodName:"<n/a>")
+ #else
+ #define METHODNAME(pFunc) "<n/a>"
+ #endif
+ STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusThrowCallback: STACKCRAWL method:%pM ('%s'), Frame:%p, FrameVtable = %pV\n",
+ pFunc, METHODNAME(pFunc), pFrame, pCf->IsFrameless()?0:(*(void**)pFrame));
+ #undef METHODNAME
+
+ Thread *pThread = GetThread();
+
+ if (pFrame && pData->pTopFrame == pFrame)
+ /* Don't look past limiting frame if there is one */
+ return SWA_ABORT;
+
+ if (!pFunc)
+ return SWA_CONTINUE;
+
+ if (pThread->IsRudeAbortInitiated() && !pThread->IsWithinCer(pCf))
+ {
+ return SWA_CONTINUE;
+ }
+
+ ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
+
+ _ASSERTE(!pData->bIsUnwind);
+#ifdef _DEBUG
+ // It SHOULD be the case that any frames we consider live between this exception
+ // record and the previous one.
+ if (!pExInfo->m_pPrevNestedInfo) {
+ if (pData->pCurrentExceptionRecord) {
+ if (pFrame) _ASSERTE(pData->pCurrentExceptionRecord > pFrame);
+ if (pCf->IsFrameless()) _ASSERTE((ULONG_PTR)pData->pCurrentExceptionRecord >= GetRegdisplaySP(pCf->GetRegisterSet()));
+ }
+ if (pData->pPrevExceptionRecord) {
+ // FCALLS have an extra SEH record in debug because of the desctructor
+ // associated with ForbidGC checking. This is benign, so just ignore it.
+ if (pFrame) _ASSERTE(pData->pPrevExceptionRecord < pFrame || pFrame->GetVTablePtr() == HelperMethodFrame::GetMethodFrameVPtr());
+ if (pCf->IsFrameless()) _ASSERTE((ULONG_PTR)pData->pPrevExceptionRecord <= GetRegdisplaySP(pCf->GetRegisterSet()));
+ }
+ }
+#endif
+
+ UINT_PTR currentIP = 0;
+ UINT_PTR currentSP = 0;
+
+ if (pCf->IsFrameless())
+ {
+ currentIP = (UINT_PTR)GetControlPC(pCf->GetRegisterSet());
+ currentSP = (UINT_PTR)GetRegdisplaySP(pCf->GetRegisterSet());
+ }
+ else if (InlinedCallFrame::FrameHasActiveCall(pFrame))
+ {
+ // don't have the IP, SP for native code
+ currentIP = 0;
+ currentSP = 0;
+ }
+ else
+ {
+ currentIP = (UINT_PTR)(pCf->GetFrame()->GetIP());
+ currentSP = 0; //Don't have an SP to get.
+ }
+
+ if (!pFunc->IsILStub())
+ {
+ // Append the current frame to the stack trace and save the save trace to the managed Exception object.
+ pExInfo->m_StackTraceInfo.AppendElement(pData->bAllowAllocMem, currentIP, currentSP, pFunc, pCf);
+
+ pExInfo->m_StackTraceInfo.SaveStackTrace(pData->bAllowAllocMem,
+ pThread->GetThrowableAsHandle(),
+ pData->bReplaceStack,
+ pData->bSkipLastElement);
+ }
+ else
+ {
+ LOG((LF_EH, LL_INFO1000, "COMPlusThrowCallback: Skipping AppendElement/SaveStackTrace for IL stub MD %p\n", pFunc));
+ }
+
+ // Fire an exception thrown ETW event when an exception occurs
+ ETW::ExceptionLog::ExceptionThrown(pCf, pData->bSkipLastElement, pData->bReplaceStack);
+
+ // Reset the flags. These flags are set only once before each stack walk done by LookForHandler(), and
+ // they apply only to the first frame we append to the stack trace. Subsequent frames are always appended.
+ if (pData->bReplaceStack)
+ {
+ pData->bReplaceStack = FALSE;
+ }
+ if (pData->bSkipLastElement)
+ {
+ pData->bSkipLastElement = FALSE;
+ }
+
+ // Check for any impersonation on the frame and save that for use during EH filter callbacks
+ OBJECTREF* pRefSecDesc = pCf->GetAddrOfSecurityObject();
+ if (pRefSecDesc != NULL && *pRefSecDesc != NULL)
+ {
+ FRAMESECDESCREF fsdRef = (FRAMESECDESCREF)*pRefSecDesc;
+ if (fsdRef->GetCallerToken() != NULL)
+ {
+ // Impersonation info present on the Frame
+ pData->hCallerToken = fsdRef->GetCallerToken();
+ STRESS_LOG1(LF_EH, LL_INFO100, "In COMPlusThrowCallback. Found non-NULL callertoken on FSD:%d\n",pData->hCallerToken);
+ if (!pData->bImpersonationTokenSet)
+ {
+ pData->hImpersonationToken = fsdRef->GetImpersonationToken();
+ STRESS_LOG1(LF_EH, LL_INFO100, "In COMPlusThrowCallback. Found non-NULL impersonationtoken on FSD:%d\n",pData->hImpersonationToken);
+ pData->bImpersonationTokenSet = TRUE;
+ }
+ }
+ }
+
+ // now we've got the stack trace, if we aren't allowed to catch this and we're first pass, return
+ if (pData->bDontCatch)
+ return SWA_CONTINUE;
+
+ if (!pCf->IsFrameless())
+ {
+ // @todo - remove this once SIS is fully enabled.
+ extern bool g_EnableSIS;
+ if (g_EnableSIS)
+ {
+ // For debugger, we may want to notify 1st chance exceptions if they're coming out of a stub.
+ // We recognize stubs as Frames with a M2U transition type. The debugger's stackwalker also
+ // recognizes these frames and publishes ICorDebugInternalFrames in the stackwalk. It's
+ // important to use pFrame as the stack address so that the Exception callback matches up
+ // w/ the ICorDebugInternlFrame stack range.
+ if (CORDebuggerAttached())
+ {
+ Frame * pFrameStub = pCf->GetFrame();
+ Frame::ETransitionType t = pFrameStub->GetTransitionType();
+ if (t == Frame::TT_M2U)
+ {
+ // Use address of the frame as the stack address.
+ currentSP = (SIZE_T) ((void*) pFrameStub);
+ currentIP = 0; // no IP.
+ EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedException(pThread, (SIZE_T)currentIP, (SIZE_T)currentSP);
+#ifdef FEATURE_EXCEPTION_NOTIFICATIONS
+ // Deliver the FirstChanceNotification after the debugger, if not already delivered.
+ if (!pExInfo->DeliveredFirstChanceNotification())
+ {
+ ExceptionNotifications::DeliverFirstChanceNotification();
+ }
+#endif // FEATURE_EXCEPTION_NOTIFICATIONS
+ }
+ }
+ }
+ return SWA_CONTINUE;
+ }
+
+ bool fIsILStub = pFunc->IsILStub();
+ bool fGiveDebuggerAndProfilerNotification = !fIsILStub;
+ BOOL fMethodCanHandleException = TRUE;
+
+ MethodDesc * pUserMDForILStub = NULL;
+ Frame * pILStubFrame = NULL;
+ if (fIsILStub)
+ pUserMDForILStub = GetUserMethodForILStub(pThread, currentSP, pFunc, &pILStubFrame);
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ CorruptionSeverity currentSeverity = pThread->GetExceptionState()->GetCurrentExceptionTracker()->GetCorruptionSeverity();
+ {
+ // We must defer to the MethodDesc of the user method instead of the IL stub
+ // itself because the user can specify the policy on a per-method basis and
+ // that won't be reflected via the IL stub's MethodDesc.
+ MethodDesc * pMDWithCEAttribute = fIsILStub ? pUserMDForILStub : pFunc;
+
+ // Check if the exception can be delivered to the method? It will check if the exception
+ // is a CE or not. If it is, it will check if the method can process it or not.
+ fMethodCanHandleException = CEHelper::CanMethodHandleException(currentSeverity, pMDWithCEAttribute);
+ }
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ // Let the profiler know that we are searching for a handler within this function instance
+ if (fGiveDebuggerAndProfilerNotification)
+ EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionEnter(pFunc);
+
+ // The following debugger notification and AppDomain::FirstChanceNotification should be scoped together
+ // since the AD notification *must* follow immediately after the debugger's notification.
+ {
+#ifdef DEBUGGING_SUPPORTED
+ //
+ // Go ahead and notify any debugger of this exception.
+ //
+ EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedException(pThread, (SIZE_T)currentIP, (SIZE_T)currentSP);
+
+ if (CORDebuggerAttached() && pExInfo->m_ExceptionFlags.DebuggerInterceptInfo())
+ {
+ return SWA_ABORT;
+ }
+#endif // DEBUGGING_SUPPORTED
+
+#ifdef FEATURE_EXCEPTION_NOTIFICATIONS
+ // Attempt to deliver the first chance notification to the AD only *AFTER* the debugger
+ // has done that, provided we have not already done that.
+ if (!pExInfo->DeliveredFirstChanceNotification())
+ {
+ ExceptionNotifications::DeliverFirstChanceNotification();
+ }
+#endif // FEATURE_EXCEPTION_NOTIFICATIONS
+ }
+ IJitManager* pJitManager = pCf->GetJitManager();
+ _ASSERTE(pJitManager);
+ EH_CLAUSE_ENUMERATOR pEnumState;
+ unsigned EHCount = 0;
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // If exception cannot be handled, then just bail out. We shouldnt examine the EH clauses
+ // in such a method.
+ if (!fMethodCanHandleException)
+ {
+ LOG((LF_EH, LL_INFO100, "COMPlusThrowCallback - CEHelper decided not to look for exception handlers in the method(MD:%p).\n", pFunc));
+
+ // Set the flag to skip this frame since the CE cannot be delivered
+ _ASSERTE(currentSeverity == ProcessCorrupting);
+
+ // Ensure EHClause count is zero
+ EHCount = 0;
+ }
+ else
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ {
+ EHCount = pJitManager->InitializeEHEnumeration(pCf->GetMethodToken(), &pEnumState);
+ }
+
+ if (EHCount == 0)
+ {
+ // Inform the profiler that we're leaving, and what pass we're on
+ if (fGiveDebuggerAndProfilerNotification)
+ EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc);
+ return SWA_CONTINUE;
+ }
+
+ TypeHandle thrownType = TypeHandle();
+ // if we are being called on an unwind for an exception that we did not try to catch, eg.
+ // an internal EE exception, then pThread->GetThrowable will be null
+ {
+ OBJECTREF throwable = pThread->GetThrowable();
+ if (throwable != NULL)
+ {
+ throwable = PossiblyUnwrapThrowable(throwable, pCf->GetAssembly());
+ thrownType = TypeHandle(throwable->GetTrueMethodTable());
+ }
+ }
+
+ PREGDISPLAY regs = pCf->GetRegisterSet();
+ BYTE *pStack = (BYTE *) GetRegdisplaySP(regs);
+#ifdef DEBUGGING_SUPPORTED
+ BYTE *pHandlerEBP = (BYTE *) GetRegdisplayFP(regs);
+#endif
+
+ DWORD offs = (DWORD)pCf->GetRelOffset(); //= (BYTE*) (*regs->pPC) - (BYTE*) pCf->GetStartAddress();
+ STRESS_LOG1(LF_EH, LL_INFO10000, "COMPlusThrowCallback: offset is %d\n", offs);
+
+ EE_ILEXCEPTION_CLAUSE EHClause;
+ unsigned start_adjust, end_adjust;
+
+ start_adjust = !(pCf->HasFaulted() || pCf->IsIPadjusted());
+ end_adjust = pCf->IsActiveFunc();
+
+ for(ULONG i=0; i < EHCount; i++)
+ {
+ pJitManager->GetNextEHClause(&pEnumState, &EHClause);
+ _ASSERTE(IsValidClause(&EHClause));
+
+ STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusThrowCallback: considering '%s' clause [%d,%d], ofs:%d\n",
+ (IsFault(&EHClause) ? "fault" : (
+ IsFinally(&EHClause) ? "finally" : (
+ IsFilterHandler(&EHClause) ? "filter" : (
+ IsTypedHandler(&EHClause) ? "typed" : "unknown")))),
+ EHClause.TryStartPC,
+ EHClause.TryEndPC,
+ offs
+ );
+
+ // Checking the exception range is a bit tricky because
+ // on CPU faults (null pointer access, div 0, ..., the IP points
+ // to the faulting instruction, but on calls, the IP points
+ // to the next instruction.
+ // This means that we should not include the start point on calls
+ // as this would be a call just preceding the try block.
+ // Also, we should include the end point on calls, but not faults.
+
+ // If we're in the FILTER part of a filter clause, then we
+ // want to stop crawling. It's going to be caught in a
+ // EX_CATCH just above us. If not, the exception
+ if ( IsFilterHandler(&EHClause)
+ && ( offs > EHClause.FilterOffset
+ || offs == EHClause.FilterOffset && !start_adjust)
+ && ( offs < EHClause.HandlerStartPC
+ || offs == EHClause.HandlerStartPC && !end_adjust)) {
+
+ STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusThrowCallback: Fault inside filter [%d,%d] startAdj %d endAdj %d\n",
+ EHClause.FilterOffset, EHClause.HandlerStartPC, start_adjust, end_adjust);
+
+ if (fGiveDebuggerAndProfilerNotification)
+ EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc);
+ return SWA_ABORT;
+ }
+
+ if ( (offs < EHClause.TryStartPC) ||
+ (offs > EHClause.TryEndPC) ||
+ (offs == EHClause.TryStartPC && start_adjust) ||
+ (offs == EHClause.TryEndPC && end_adjust))
+ continue;
+
+ BOOL typeMatch = FALSE;
+ BOOL isTypedHandler = IsTypedHandler(&EHClause);
+
+ if (isTypedHandler && !thrownType.IsNull())
+ {
+ if (EHClause.TypeHandle == (void*)(size_t)mdTypeRefNil)
+ {
+ // this is a catch(...)
+ typeMatch = TRUE;
+ }
+ else
+ {
+ TypeHandle exnType = pJitManager->ResolveEHClause(&EHClause,pCf);
+
+ // if doesn't have cached class then class wasn't loaded so couldn't have been thrown
+ typeMatch = !exnType.IsNull() && ExceptionIsOfRightType(exnType, thrownType);
+ }
+ }
+
+ // <TODO>@PERF: Is this too expensive? Consider storing the nesting level
+ // instead of the HandlerEndPC.</TODO>
+
+ // Determine the nesting level of EHClause. Just walk the table
+ // again, and find out how many handlers enclose it
+ DWORD nestingLevel = 0;
+
+ if (IsFaultOrFinally(&EHClause))
+ continue;
+ if (isTypedHandler)
+ {
+ LOG((LF_EH, LL_INFO100, "COMPlusThrowCallback: %s match for typed handler.\n", typeMatch?"Found":"Did not find"));
+ if (!typeMatch)
+ {
+ continue;
+ }
+ }
+ else
+ {
+ // Must be an exception filter (__except() part of __try{}__except(){}).
+ nestingLevel = ComputeEnclosingHandlerNestingLevel(pJitManager,
+ pCf->GetMethodToken(),
+ EHClause.HandlerStartPC);
+
+ // We just need *any* address within the method. This will let the debugger
+ // resolve the EnC version of the method.
+ PCODE pMethodAddr = GetControlPC(regs);
+ if (fGiveDebuggerAndProfilerNotification)
+ EEToDebuggerExceptionInterfaceWrapper::ExceptionFilter(pFunc, pMethodAddr, EHClause.FilterOffset, pHandlerEBP);
+
+ UINT_PTR uStartAddress = (UINT_PTR)pCf->GetCodeInfo()->GetStartAddress();
+
+ // save clause information in the exinfo
+ pExInfo->m_EHClauseInfo.SetInfo(COR_PRF_CLAUSE_FILTER,
+ uStartAddress + EHClause.FilterOffset,
+ StackFrame((UINT_PTR)pHandlerEBP));
+
+ // Let the profiler know we are entering a filter
+ if (fGiveDebuggerAndProfilerNotification)
+ EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFilterEnter(pFunc);
+
+ COUNTER_ONLY(GetPerfCounters().m_Excep.cFiltersExecuted++);
+
+ STRESS_LOG3(LF_EH, LL_INFO10, "COMPlusThrowCallback: calling filter code, EHClausePtr:%08x, Start:%08x, End:%08x\n",
+ &EHClause, EHClause.HandlerStartPC, EHClause.HandlerEndPC);
+
+ OBJECTREF throwable = PossiblyUnwrapThrowable(pThread->GetThrowable(), pCf->GetAssembly());
+
+ pExInfo->m_EHClauseInfo.SetManagedCodeEntered(TRUE);
+
+ int iFilt = COMPlusThrowCallbackHelper(pJitManager,
+ pCf,
+ pData,
+ &EHClause,
+ nestingLevel,
+ throwable);
+
+ pExInfo->m_EHClauseInfo.SetManagedCodeEntered(FALSE);
+
+ // Let the profiler know we are leaving a filter
+ if (fGiveDebuggerAndProfilerNotification)
+ EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFilterLeave();
+
+ pExInfo->m_EHClauseInfo.ResetInfo();
+
+ if (pThread->IsRudeAbortInitiated() && !pThread->IsWithinCer(pCf))
+ {
+ if (fGiveDebuggerAndProfilerNotification)
+ EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc);
+ return SWA_CONTINUE;
+ }
+
+ // If this filter didn't want the exception, keep looking.
+ if (EXCEPTION_EXECUTE_HANDLER != iFilt)
+ continue;
+ }
+
+ // Record this location, to stop the unwind phase, later.
+ pData->pFunc = pFunc;
+ pData->dHandler = i;
+ pData->pStack = pStack;
+
+ // Notify the profiler that a catcher has been found
+ if (fGiveDebuggerAndProfilerNotification)
+ {
+ EEToProfilerExceptionInterfaceWrapper::ExceptionSearchCatcherFound(pFunc);
+ EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc);
+ }
+
+#ifdef DEBUGGING_SUPPORTED
+ //
+ // Notify debugger that a catcher has been found.
+ //
+ if (fIsILStub)
+ {
+ EEToDebuggerExceptionInterfaceWrapper::NotifyOfCHFFilter(pExInfo->m_pExceptionPointers, pILStubFrame);
+ }
+ else
+ if (fGiveDebuggerAndProfilerNotification &&
+ CORDebuggerAttached() && !pExInfo->m_ExceptionFlags.DebuggerInterceptInfo())
+ {
+ _ASSERTE(pData);
+ // We just need *any* address within the method. This will let the debugger
+ // resolve the EnC version of the method.
+ PCODE pMethodAddr = GetControlPC(regs);
+
+ EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedExceptionCatcherFound(pThread,
+ pData->pFunc, pMethodAddr,
+ (SIZE_T)pData->pStack,
+ &EHClause);
+ }
+#endif // DEBUGGING_SUPPORTED
+
+ return SWA_ABORT;
+ }
+ if (fGiveDebuggerAndProfilerNotification)
+ EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pFunc);
+ return SWA_CONTINUE;
+} // StackWalkAction COMPlusThrowCallback()
+
+
+//==========================================================================
+// COMPlusUnwindCallback
+//==========================================================================
+
+#if defined(_MSC_VER)
+#pragma warning(push)
+#pragma warning (disable : 4740) // There is inline asm code in this function, which disables
+ // global optimizations.
+#pragma warning (disable : 4731)
+#endif
+StackWalkAction COMPlusUnwindCallback (CrawlFrame *pCf, ThrowCallbackType *pData)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ _ASSERTE(pData->bIsUnwind);
+
+ Frame *pFrame = pCf->GetFrame();
+ MethodDesc *pFunc = pCf->GetFunction();
+
+ #if defined(_DEBUG)
+ #define METHODNAME(pFunc) (pFunc?pFunc->m_pszDebugMethodName:"<n/a>")
+ #else
+ #define METHODNAME(pFunc) "<n/a>"
+ #endif
+ STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusUnwindCallback: STACKCRAWL method:%pM ('%s'), Frame:%p, FrameVtable = %pV\n",
+ pFunc, METHODNAME(pFunc), pFrame, pCf->IsFrameless()?0:(*(void**)pFrame));
+ #undef METHODNAME
+
+ if (pFrame && pData->pTopFrame == pFrame)
+ /* Don't look past limiting frame if there is one */
+ return SWA_ABORT;
+
+ if (!pFunc)
+ return SWA_CONTINUE;
+
+ if (!pCf->IsFrameless())
+ return SWA_CONTINUE;
+
+ Thread *pThread = GetThread();
+
+ // If the thread is being RudeAbort, we will not run any finally
+ if (pThread->IsRudeAbortInitiated() && !pThread->IsWithinCer(pCf))
+ {
+ return SWA_CONTINUE;
+ }
+
+ IJitManager* pJitManager = pCf->GetJitManager();
+ _ASSERTE(pJitManager);
+
+ ExInfo *pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
+
+ PREGDISPLAY regs = pCf->GetRegisterSet();
+ BYTE *pStack = (BYTE *) GetRegdisplaySP(regs);
+
+ TypeHandle thrownType = TypeHandle();
+
+ BOOL fCanMethodHandleException = TRUE;
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // MethodDesc's security information (i.e. whether it is critical or transparent) is calculated lazily.
+ // If this method's security information was not precalculated, then it would have been in the first pass
+ // already using Security::IsMethodCritical which could take have taken us down a path which is GC_TRIGGERS.
+ //
+ //
+ // However, this unwind callback (for X86) is GC_NOTRIGGER and at this point the security information would have been
+ // calculated already. Hence, we wouldnt endup in the GC_TRIGGERS path. Thus, to keep SCAN.EXE (static contract analyzer) happy,
+ // we will pass a FALSE to the CanMethodHandleException call, indicating we dont need to calculate security information (and thus,
+ // not go down the GC_TRIGGERS path.
+ //
+ // Check if the exception can be delivered to the method? It will check if the exception
+ // is a CE or not. If it is, it will check if the method can process it or not.
+ CorruptionSeverity currentSeverity = pThread->GetExceptionState()->GetCurrentExceptionTracker()->GetCorruptionSeverity();
+
+ // We have to do this check for x86 since, unlike 64bit which will setup a new exception tracker for longjmp,
+ // x86 only sets up new trackers in the first pass (and longjmp is 2nd pass only exception). Hence, we pass
+ // this information in the callback structure without affecting any existing exception tracker (incase longjmp was
+ // a nested exception).
+ if (pData->m_fIsLongJump)
+ {
+ // Longjump is not a CSE. With a CSE in progress, this can be invoked by either:
+ //
+ // 1) Managed code (e.g. finally/fault/catch), OR
+ // 2) By native code
+ //
+ // In scenario (1), managed code can invoke it only if it was attributed with HPCSE attribute. Thus,
+ // longjmp is no different than managed code doing a "throw new Exception();".
+ //
+ // In scenario (2), longjmp is no different than any other non-CSE native exception raised.
+ //
+ // In both these case, longjmp should be treated as non-CSE. Since x86 does not setup a tracker for
+ // it (see comment above), we pass this information (of whether the current exception is a longjmp or not)
+ // to this callback (from UnwindFrames) to setup the correct corruption severity.
+ //
+ // http://www.nynaeve.net/?p=105 has a brief description of how exception-safe setjmp/longjmp works.
+ currentSeverity = NotCorrupting;
+ }
+ {
+ MethodDesc * pFuncWithCEAttribute = pFunc;
+ Frame * pILStubFrame = NULL;
+ if (pFunc->IsILStub())
+ {
+ // We must defer to the MethodDesc of the user method instead of the IL stub
+ // itself because the user can specify the policy on a per-method basis and
+ // that won't be reflected via the IL stub's MethodDesc.
+ pFuncWithCEAttribute = GetUserMethodForILStub(pThread, (UINT_PTR)pStack, pFunc, &pILStubFrame);
+ }
+ fCanMethodHandleException = CEHelper::CanMethodHandleException(currentSeverity, pFuncWithCEAttribute, FALSE);
+ }
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+#ifdef DEBUGGING_SUPPORTED
+ LOG((LF_EH, LL_INFO1000, "COMPlusUnwindCallback: Intercept %d, pData->pFunc 0x%X, pFunc 0x%X, pData->pStack 0x%X, pStack 0x%X\n",
+ pExInfo->m_ExceptionFlags.DebuggerInterceptInfo(),
+ pData->pFunc,
+ pFunc,
+ pData->pStack,
+ pStack));
+
+ //
+ // If the debugger wants to intercept this exception here, go do that.
+ //
+ if (pExInfo->m_ExceptionFlags.DebuggerInterceptInfo() && (pData->pFunc == pFunc) && (pData->pStack == pStack))
+ {
+ goto LDoDebuggerIntercept;
+ }
+#endif
+
+ bool fGiveDebuggerAndProfilerNotification;
+ fGiveDebuggerAndProfilerNotification = !pFunc->IsILStub();
+
+ // Notify the profiler of the function we're dealing with in the unwind phase
+ if (fGiveDebuggerAndProfilerNotification)
+ EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionEnter(pFunc);
+
+ EH_CLAUSE_ENUMERATOR pEnumState;
+ unsigned EHCount;
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ if (!fCanMethodHandleException)
+ {
+ LOG((LF_EH, LL_INFO100, "COMPlusUnwindCallback - CEHelper decided not to look for exception handlers in the method(MD:%p).\n", pFunc));
+
+ // Set the flag to skip this frame since the CE cannot be delivered
+ _ASSERTE(currentSeverity == ProcessCorrupting);
+
+ // Force EHClause count to be zero
+ EHCount = 0;
+ }
+ else
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ {
+ EHCount = pJitManager->InitializeEHEnumeration(pCf->GetMethodToken(), &pEnumState);
+ }
+
+ if (EHCount == 0)
+ {
+ // Inform the profiler that we're leaving, and what pass we're on
+ if (fGiveDebuggerAndProfilerNotification)
+ EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionLeave(pFunc);
+
+ return SWA_CONTINUE;
+ }
+
+ // if we are being called on an unwind for an exception that we did not try to catch, eg.
+ // an internal EE exception, then pThread->GetThrowable will be null
+ {
+ OBJECTREF throwable = pThread->GetThrowable();
+ if (throwable != NULL)
+ {
+ throwable = PossiblyUnwrapThrowable(throwable, pCf->GetAssembly());
+ thrownType = TypeHandle(throwable->GetTrueMethodTable());
+ }
+ }
+#ifdef DEBUGGING_SUPPORTED
+ BYTE *pHandlerEBP;
+ pHandlerEBP = (BYTE *) GetRegdisplayFP(regs);
+#endif
+
+ DWORD offs;
+ offs = (DWORD)pCf->GetRelOffset(); //= (BYTE*) (*regs->pPC) - (BYTE*) pCf->GetStartAddress();
+
+ LOG((LF_EH, LL_INFO100, "COMPlusUnwindCallback: current EIP offset in method 0x%x, \n", offs));
+
+ EE_ILEXCEPTION_CLAUSE EHClause;
+ unsigned start_adjust, end_adjust;
+
+ start_adjust = !(pCf->HasFaulted() || pCf->IsIPadjusted());
+ end_adjust = pCf->IsActiveFunc();
+
+ for(ULONG i=0; i < EHCount; i++)
+ {
+ pJitManager->GetNextEHClause(&pEnumState, &EHClause);
+ _ASSERTE(IsValidClause(&EHClause));
+
+ STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusUnwindCallback: considering '%s' clause [%d,%d], offs:%d\n",
+ (IsFault(&EHClause) ? "fault" : (
+ IsFinally(&EHClause) ? "finally" : (
+ IsFilterHandler(&EHClause) ? "filter" : (
+ IsTypedHandler(&EHClause) ? "typed" : "unknown")))),
+ EHClause.TryStartPC,
+ EHClause.TryEndPC,
+ offs
+ );
+
+ // Checking the exception range is a bit tricky because
+ // on CPU faults (null pointer access, div 0, ..., the IP points
+ // to the faulting instruction, but on calls, the IP points
+ // to the next instruction.
+ // This means that we should not include the start point on calls
+ // as this would be a call just preceding the try block.
+ // Also, we should include the end point on calls, but not faults.
+
+ if ( IsFilterHandler(&EHClause)
+ && ( offs > EHClause.FilterOffset
+ || offs == EHClause.FilterOffset && !start_adjust)
+ && ( offs < EHClause.HandlerStartPC
+ || offs == EHClause.HandlerStartPC && !end_adjust)
+ ) {
+ STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusUnwindCallback: Fault inside filter [%d,%d] startAdj %d endAdj %d\n",
+ EHClause.FilterOffset, EHClause.HandlerStartPC, start_adjust, end_adjust);
+
+ // Make the filter as done. See comment in CallJitEHFilter
+ // on why we have to do it here.
+ Frame* pFilterFrame = pThread->GetFrame();
+ _ASSERTE(pFilterFrame->GetVTablePtr() == ExceptionFilterFrame::GetMethodFrameVPtr());
+ ((ExceptionFilterFrame*)pFilterFrame)->SetFilterDone();
+
+ // Inform the profiler that we're leaving, and what pass we're on
+ if (fGiveDebuggerAndProfilerNotification)
+ EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionLeave(pFunc);
+
+ return SWA_ABORT;
+ }
+
+ if ( (offs < EHClause.TryStartPC) ||
+ (offs > EHClause.TryEndPC) ||
+ (offs == EHClause.TryStartPC && start_adjust) ||
+ (offs == EHClause.TryEndPC && end_adjust))
+ continue;
+
+ // <TODO>@PERF : Is this too expensive? Consider storing the nesting level
+ // instead of the HandlerEndPC.</TODO>
+
+ // Determine the nesting level of EHClause. Just walk the table
+ // again, and find out how many handlers enclose it
+
+ DWORD nestingLevel = ComputeEnclosingHandlerNestingLevel(pJitManager,
+ pCf->GetMethodToken(),
+ EHClause.HandlerStartPC);
+
+ // We just need *any* address within the method. This will let the debugger
+ // resolve the EnC version of the method.
+ PCODE pMethodAddr = GetControlPC(regs);
+
+ UINT_PTR uStartAddress = (UINT_PTR)pCf->GetCodeInfo()->GetStartAddress();
+
+ if (IsFaultOrFinally(&EHClause))
+ {
+ COUNTER_ONLY(GetPerfCounters().m_Excep.cFinallysExecuted++);
+
+ if (fGiveDebuggerAndProfilerNotification)
+ EEToDebuggerExceptionInterfaceWrapper::ExceptionHandle(pFunc, pMethodAddr, EHClause.HandlerStartPC, pHandlerEBP);
+
+ pExInfo->m_EHClauseInfo.SetInfo(COR_PRF_CLAUSE_FINALLY,
+ uStartAddress + EHClause.HandlerStartPC,
+ StackFrame((UINT_PTR)pHandlerEBP));
+
+ // Notify the profiler that we are about to execute the finally code
+ if (fGiveDebuggerAndProfilerNotification)
+ EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFinallyEnter(pFunc);
+
+ LOG((LF_EH, LL_INFO100, "COMPlusUnwindCallback: finally clause [%d,%d] - call\n", EHClause.TryStartPC, EHClause.TryEndPC));
+
+ pExInfo->m_EHClauseInfo.SetManagedCodeEntered(TRUE);
+
+ ::CallJitEHFinally(pCf, (BYTE *)uStartAddress, &EHClause, nestingLevel);
+
+ pExInfo->m_EHClauseInfo.SetManagedCodeEntered(FALSE);
+
+ LOG((LF_EH, LL_INFO100, "COMPlusUnwindCallback: finally - returned\n"));
+
+ // Notify the profiler that we are done with the finally code
+ if (fGiveDebuggerAndProfilerNotification)
+ EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFinallyLeave();
+
+ pExInfo->m_EHClauseInfo.ResetInfo();
+
+ continue;
+ }
+
+ // Current is not a finally, check if it's the catching handler (or filter).
+ if (pData->pFunc != pFunc || (ULONG)(pData->dHandler) != i || pData->pStack != pStack)
+ {
+ continue;
+ }
+
+#ifdef _DEBUG
+ gLastResumedExceptionFunc = pCf->GetFunction();
+ gLastResumedExceptionHandler = i;
+#endif
+
+ // save clause information in the exinfo
+ pExInfo->m_EHClauseInfo.SetInfo(COR_PRF_CLAUSE_CATCH,
+ uStartAddress + EHClause.HandlerStartPC,
+ StackFrame((UINT_PTR)pHandlerEBP));
+
+ // Notify the profiler that we are about to resume at the catcher.
+ if (fGiveDebuggerAndProfilerNotification)
+ {
+ DACNotify::DoExceptionCatcherEnterNotification(pFunc, EHClause.HandlerStartPC);
+
+ EEToProfilerExceptionInterfaceWrapper::ExceptionCatcherEnter(pThread, pFunc);
+
+ EEToDebuggerExceptionInterfaceWrapper::ExceptionHandle(pFunc, pMethodAddr, EHClause.HandlerStartPC, pHandlerEBP);
+ }
+
+ STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusUnwindCallback: offset 0x%x matches clause [0x%x, 0x%x) matches in method %pM\n",
+ offs, EHClause.TryStartPC, EHClause.TryEndPC, pFunc);
+
+ // ResumeAtJitEH will set pExInfo->m_EHClauseInfo.m_fManagedCodeEntered = TRUE; at the appropriate time
+ ::ResumeAtJitEH(pCf, (BYTE *)uStartAddress, &EHClause, nestingLevel, pThread, pData->bUnwindStack);
+ //UNREACHABLE_MSG("ResumeAtJitEH shouldn't have returned!");
+
+ // we do not set pExInfo->m_EHClauseInfo.m_fManagedCodeEntered = FALSE here,
+ // that happens when the catch clause calls back to COMPlusEndCatch
+
+ }
+
+ STRESS_LOG1(LF_EH, LL_INFO100, "COMPlusUnwindCallback: no handler found in method %pM\n", pFunc);
+ if (fGiveDebuggerAndProfilerNotification)
+ EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionLeave(pFunc);
+
+ return SWA_CONTINUE;
+
+
+#ifdef DEBUGGING_SUPPORTED
+LDoDebuggerIntercept:
+
+ STRESS_LOG1(LF_EH|LF_CORDB, LL_INFO100, "COMPlusUnwindCallback: Intercepting in method %pM\n", pFunc);
+
+ //
+ // Setup up the easy parts of the context to restart at.
+ //
+ EHContext context;
+
+ //
+ // Note: EAX ECX EDX are scratch
+ //
+ context.Esp = (DWORD)(size_t)(GetRegdisplaySP(regs));
+ context.Ebx = *regs->pEbx;
+ context.Esi = *regs->pEsi;
+ context.Edi = *regs->pEdi;
+ context.Ebp = *regs->pEbp;
+
+ //
+ // Set scratch registers to 0 to avoid reporting incorrect values to GC in case of debugger changing the IP
+ // in the middle of a scratch register lifetime (see Dev10 754922)
+ //
+ context.Eax = 0;
+ context.Ecx = 0;
+ context.Edx = 0;
+
+ //
+ // Ok, now set the target Eip to the address the debugger requested.
+ //
+ ULONG_PTR nativeOffset;
+ pExInfo->m_DebuggerExState.GetDebuggerInterceptInfo(NULL, NULL, NULL, NULL, &nativeOffset, NULL);
+ context.Eip = GetControlPC(regs) - (pCf->GetRelOffset() - nativeOffset);
+
+ //
+ // Finally we need to get the correct Esp for this nested level
+ //
+
+ context.Esp = pCf->GetCodeManager()->GetAmbientSP(regs,
+ pCf->GetCodeInfo(),
+ nativeOffset,
+ pData->dHandler,
+ pCf->GetCodeManState()
+ );
+ //
+ // In case we see unknown FS:[0] handlers we delay the interception point until we reach the handler that protects the interception point.
+ // This way we have both FS:[0] handlers being poped up by RtlUnwind and managed capital F Frames being unwinded by managed stackwalker.
+ //
+ BOOL fCheckForUnknownHandler = TRUE;
+ if (PopNestedExceptionRecords((LPVOID)(size_t)context.Esp, fCheckForUnknownHandler))
+ {
+ // Let ClrDebuggerDoUnwindAndIntercept RtlUnwind continue to unwind frames until we reach the handler protected by COMPlusNestedExceptionHandler.
+ pExInfo->m_InterceptionContext = context;
+ pExInfo->m_ValidInterceptionContext = TRUE;
+ STRESS_LOG0(LF_EH|LF_CORDB, LL_INFO100, "COMPlusUnwindCallback: Skip interception until unwinding reaches the actual handler protected by COMPlusNestedExceptionHandler\n");
+ }
+ else
+ {
+ //
+ // Pop off all the Exception information up to this point in the stack
+ //
+ UnwindExceptionTrackerAndResumeInInterceptionFrame(pExInfo, &context);
+ }
+ return SWA_ABORT;
+#endif // DEBUGGING_SUPPORTED
+} // StackWalkAction COMPlusUnwindCallback ()
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif
+
+#if defined(_MSC_VER)
+#pragma warning(push)
+#pragma warning (disable : 4740) // There is inline asm code in this function, which disables
+ // global optimizations.
+#pragma warning (disable : 4731)
+#endif
+void ResumeAtJitEH(CrawlFrame* pCf,
+ BYTE* startPC,
+ EE_ILEXCEPTION_CLAUSE *EHClausePtr,
+ DWORD nestingLevel,
+ Thread *pThread,
+ BOOL unwindStack)
+{
+ // No dynamic contract here because this function doesn't return and destructors wouldn't be executed
+ WRAPPER_NO_CONTRACT;
+
+ EHContext context;
+
+ context.Setup(PCODE(startPC + EHClausePtr->HandlerStartPC), pCf->GetRegisterSet());
+
+ size_t * pShadowSP = NULL; // Write Esp to *pShadowSP before jumping to handler
+ size_t * pHandlerEnd = NULL;
+
+ OBJECTREF throwable = PossiblyUnwrapThrowable(pThread->GetThrowable(), pCf->GetAssembly());
+
+ pCf->GetCodeManager()->FixContext(ICodeManager::CATCH_CONTEXT,
+ &context,
+ pCf->GetCodeInfo(),
+ EHClausePtr->HandlerStartPC,
+ nestingLevel,
+ throwable,
+ pCf->GetCodeManState(),
+ &pShadowSP,
+ &pHandlerEnd);
+
+ if (pHandlerEnd)
+ {
+ *pHandlerEnd = EHClausePtr->HandlerEndPC;
+ }
+
+ // save esp so that endcatch can restore it (it always restores, so want correct value)
+ ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
+ pExInfo->m_dEsp = (LPVOID)context.GetSP();
+ LOG((LF_EH, LL_INFO1000, "ResumeAtJitEH: current m_dEsp set to %p\n", context.GetSP()));
+
+ PVOID dEsp = GetCurrentSP();
+
+ if (!unwindStack)
+ {
+ // If we don't want to unwind the stack, then the guard page had better not be gone!
+ _ASSERTE(pThread->DetermineIfGuardPagePresent());
+
+ // so down below won't really update esp
+ context.SetSP(dEsp);
+ pExInfo->m_pShadowSP = pShadowSP; // so that endcatch can zero it back
+
+ if (pShadowSP)
+ {
+ *pShadowSP = (size_t)dEsp;
+ }
+ }
+ else
+ {
+ // so shadow SP has the real SP as we are going to unwind the stack
+ dEsp = (LPVOID)context.GetSP();
+
+ // BEGIN: pExInfo->UnwindExInfo(dEsp);
+ ExInfo *pPrevNestedInfo = pExInfo->m_pPrevNestedInfo;
+
+ while (pPrevNestedInfo && pPrevNestedInfo->m_StackAddress < dEsp)
+ {
+ LOG((LF_EH, LL_INFO1000, "ResumeAtJitEH: popping nested ExInfo at 0x%p\n", pPrevNestedInfo->m_StackAddress));
+
+ pPrevNestedInfo->DestroyExceptionHandle();
+ pPrevNestedInfo->m_StackTraceInfo.FreeStackTrace();
+
+#ifdef DEBUGGING_SUPPORTED
+ if (g_pDebugInterface != NULL)
+ {
+ g_pDebugInterface->DeleteInterceptContext(pPrevNestedInfo->m_DebuggerExState.GetDebuggerInterceptContext());
+ }
+#endif // DEBUGGING_SUPPORTED
+
+ pPrevNestedInfo = pPrevNestedInfo->m_pPrevNestedInfo;
+ }
+
+ pExInfo->m_pPrevNestedInfo = pPrevNestedInfo;
+
+ _ASSERTE(pExInfo->m_pPrevNestedInfo == 0 || pExInfo->m_pPrevNestedInfo->m_StackAddress >= dEsp);
+
+ // Before we unwind the SEH records, get the Frame from the top-most nested exception record.
+ Frame* pNestedFrame = GetCurrFrame(FindNestedEstablisherFrame(GetCurrentSEHRecord()));
+
+ PopNestedExceptionRecords((LPVOID)(size_t)dEsp);
+
+ EXCEPTION_REGISTRATION_RECORD* pNewBottomMostHandler = GetCurrentSEHRecord();
+
+ pExInfo->m_pShadowSP = pShadowSP;
+
+ // The context and exception record are no longer any good.
+ _ASSERTE(pExInfo->m_pContext < dEsp); // It must be off the top of the stack.
+ pExInfo->m_pContext = 0; // Whack it.
+ pExInfo->m_pExceptionRecord = 0;
+ pExInfo->m_pExceptionPointers = 0;
+
+ // We're going to put one nested record back on the stack before we resume. This is
+ // where it goes.
+ NestedHandlerExRecord *pNestedHandlerExRecord = (NestedHandlerExRecord*)((BYTE*)dEsp - ALIGN_UP(sizeof(NestedHandlerExRecord), STACK_ALIGN_SIZE));
+
+ // The point of no return. The next statement starts scribbling on the stack. It's
+ // deep enough that we won't hit our own locals. (That's important, 'cuz we're still
+ // using them.)
+ //
+ _ASSERTE(dEsp > &pCf);
+ pNestedHandlerExRecord->m_handlerInfo.m_hThrowable=NULL; // This is random memory. Handle
+ // must be initialized to null before
+ // calling Init(), as Init() will try
+ // to free any old handle.
+ pNestedHandlerExRecord->Init((PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler, pNestedFrame);
+
+ INSTALL_EXCEPTION_HANDLING_RECORD(&(pNestedHandlerExRecord->m_ExReg));
+
+ context.SetSP(pNestedHandlerExRecord);
+
+ // We might have moved the bottommost handler. The nested record itself is never
+ // the bottom most handler -- it's pushed afte the fact. So we have to make the
+ // bottom-most handler the one BEFORE the nested record.
+ if (pExInfo->m_pBottomMostHandler < pNewBottomMostHandler)
+ {
+ STRESS_LOG3(LF_EH, LL_INFO10000, "ResumeAtJitEH: setting ExInfo:0x%p m_pBottomMostHandler from 0x%p to 0x%p\n",
+ pExInfo, pExInfo->m_pBottomMostHandler, pNewBottomMostHandler);
+ pExInfo->m_pBottomMostHandler = pNewBottomMostHandler;
+ }
+
+ if (pShadowSP)
+ {
+ *pShadowSP = context.GetSP();
+ }
+ }
+
+ STRESS_LOG3(LF_EH, LL_INFO100, "ResumeAtJitEH: resuming at EIP = %p ESP = %p EBP = %p\n",
+ context.Eip, context.GetSP(), context.GetFP());
+
+#ifdef STACK_GUARDS_DEBUG
+ // We are transitioning back to managed code, so ensure that we are in
+ // SO-tolerant mode before we do so.
+ RestoreSOToleranceState();
+#endif
+
+ // we want this to happen as late as possible but certainly after the notification
+ // that the handle for the current ExInfo has been freed has been delivered
+ pExInfo->m_EHClauseInfo.SetManagedCodeEntered(TRUE);
+
+ ResumeAtJitEHHelper(&context);
+ UNREACHABLE_MSG("Should never return from ResumeAtJitEHHelper!");
+
+ // we do not set pExInfo->m_EHClauseInfo.m_fManagedCodeEntered = FALSE here,
+ // that happens when the catch clause calls back to COMPlusEndCatch
+ // we don't return to this point so it would be moot (see unreachable_msg above)
+
+}
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif
+
+// Must be in a separate function because INSTALL_COMPLUS_EXCEPTION_HANDLER has a filter
+int CallJitEHFilterWorker(size_t *pShadowSP, EHContext *pContext)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+ int retVal = EXCEPTION_CONTINUE_SEARCH;
+
+ BEGIN_CALL_TO_MANAGED();
+
+ retVal = CallJitEHFilterHelper(pShadowSP, pContext);
+
+ END_CALL_TO_MANAGED();
+
+ return retVal;
+}
+
+int CallJitEHFilter(CrawlFrame* pCf, BYTE* startPC, EE_ILEXCEPTION_CLAUSE *EHClausePtr, DWORD nestingLevel, OBJECTREF thrownObj)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ int retVal = EXCEPTION_CONTINUE_SEARCH;
+ size_t * pShadowSP = NULL;
+ EHContext context;
+
+ context.Setup(PCODE(startPC + EHClausePtr->FilterOffset), pCf->GetRegisterSet());
+
+ size_t * pEndFilter = NULL; // Write
+ pCf->GetCodeManager()->FixContext(ICodeManager::FILTER_CONTEXT, &context, pCf->GetCodeInfo(),
+ EHClausePtr->FilterOffset, nestingLevel, thrownObj, pCf->GetCodeManState(),
+ &pShadowSP, &pEndFilter);
+
+ // End of the filter is the same as start of handler
+ if (pEndFilter)
+ {
+ *pEndFilter = EHClausePtr->HandlerStartPC;
+ }
+
+ // ExceptionFilterFrame serves two purposes:
+ //
+ // 1. It serves as a frame that stops the managed search for handler
+ // if we fault in the filter. ThrowCallbackType.pTopFrame is going point
+ // to this frame during search for exception handler inside filter.
+ // The search for handler needs a frame to stop. If we had no frame here,
+ // the exceptions in filters would not be swallowed correctly since we would
+ // walk past the EX_TRY/EX_CATCH block in COMPlusThrowCallbackHelper.
+ //
+ // 2. It allows setting of SHADOW_SP_FILTER_DONE flag in UnwindFrames()
+ // if we fault in the filter. We have to set this flag together with unwinding
+ // of the filter frame. Using a regular C++ holder to clear this flag here would cause
+ // GC holes. The stack would be in inconsistent state when we trigger gc just before
+ // returning from UnwindFrames.
+
+ FrameWithCookie<ExceptionFilterFrame> exceptionFilterFrame(pShadowSP);
+ retVal = CallJitEHFilterWorker(pShadowSP, &context);
+
+ exceptionFilterFrame.Pop();
+
+ return retVal;
+}
+
+void CallJitEHFinally(CrawlFrame* pCf, BYTE* startPC, EE_ILEXCEPTION_CLAUSE *EHClausePtr, DWORD nestingLevel)
+{
+ WRAPPER_NO_CONTRACT;
+
+ EHContext context;
+ context.Setup(PCODE(startPC + EHClausePtr->HandlerStartPC), pCf->GetRegisterSet());
+
+ size_t * pShadowSP = NULL; // Write Esp to *pShadowSP before jumping to handler
+
+ size_t * pFinallyEnd = NULL;
+ pCf->GetCodeManager()->FixContext(
+ ICodeManager::FINALLY_CONTEXT, &context, pCf->GetCodeInfo(),
+ EHClausePtr->HandlerStartPC, nestingLevel, ObjectToOBJECTREF((Object *) NULL), pCf->GetCodeManState(),
+ &pShadowSP, &pFinallyEnd);
+
+ if (pFinallyEnd)
+ {
+ *pFinallyEnd = EHClausePtr->HandlerEndPC;
+ }
+
+ CallJitEHFinallyHelper(pShadowSP, &context);
+
+ //
+ // Update the registers using new context
+ //
+ // This is necessary to reflect GC pointer changes during the middle of a unwind inside a
+ // finally clause, because:
+ // 1. GC won't see the part of stack inside try (which has thrown an exception) that is already
+ // unwinded and thus GC won't update GC pointers for this portion of the stack, but rather the
+ // call stack in finally.
+ // 2. upon return of finally, the unwind process continues and unwinds stack based on the part
+ // of stack inside try and won't see the updated values in finally.
+ // As a result, we need to manually update the context using register values upon return of finally
+ //
+ // Note that we only update the registers for finally clause because
+ // 1. For filter handlers, stack walker is able to see the whole stack (including the try part)
+ // with the help of ExceptionFilterFrame as filter handlers are called in first pass
+ // 2. For catch handlers, the current unwinding is already finished
+ //
+ context.UpdateFrame(pCf->GetRegisterSet());
+
+ // This does not need to be guarded by a holder because the frame is dead if an exception gets thrown. Filters are different
+ // since they are run in the first pass, so we must update the shadowSP reset in CallJitEHFilter.
+ if (pShadowSP) {
+ *pShadowSP = 0; // reset the shadowSP to 0
+ }
+}
+#if defined(_MSC_VER)
+#pragma warning (default : 4731)
+#endif
+
+//=====================================================================
+// *********************************************************************
+BOOL ComPlusFrameSEH(EXCEPTION_REGISTRATION_RECORD* pEHR)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return ((LPVOID)pEHR->Handler == (LPVOID)COMPlusFrameHandler || (LPVOID)pEHR->Handler == (LPVOID)COMPlusNestedExceptionHandler);
+}
+
+
+//
+//-------------------------------------------------------------------------
+// This is installed when we call COMPlusFrameHandler to provide a bound to
+// determine when are within a nested exception
+//-------------------------------------------------------------------------
+EXCEPTION_HANDLER_IMPL(COMPlusNestedExceptionHandler)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND))
+ {
+ LOG((LF_EH, LL_INFO100, " COMPlusNestedHandler(unwind) with %x at %x\n", pExceptionRecord->ExceptionCode,
+ pContext ? GetIP(pContext) : 0));
+
+
+ // We're unwinding past a nested exception record, which means that we've thrown
+ // a new exception out of a region in which we're handling a previous one. The
+ // previous exception is overridden -- and needs to be unwound.
+
+ // The preceding is ALMOST true. There is one more case, where we use setjmp/longjmp
+ // from withing a nested handler. We won't have a nested exception in that case -- just
+ // the unwind.
+
+ Thread* pThread = GetThread();
+ _ASSERTE(pThread);
+ ExInfo* pExInfo = &(pThread->GetExceptionState()->m_currentExInfo);
+ ExInfo* pPrevNestedInfo = pExInfo->m_pPrevNestedInfo;
+
+ if (pPrevNestedInfo == &((NestedHandlerExRecord*)pEstablisherFrame)->m_handlerInfo)
+ {
+ _ASSERTE(pPrevNestedInfo);
+
+ LOG((LF_EH, LL_INFO100, "COMPlusNestedExceptionHandler: PopExInfo(): popping nested ExInfo at 0x%p\n", pPrevNestedInfo));
+
+ pPrevNestedInfo->DestroyExceptionHandle();
+ pPrevNestedInfo->m_StackTraceInfo.FreeStackTrace();
+
+#ifdef DEBUGGING_SUPPORTED
+ if (g_pDebugInterface != NULL)
+ {
+ g_pDebugInterface->DeleteInterceptContext(pPrevNestedInfo->m_DebuggerExState.GetDebuggerInterceptContext());
+ }
+#endif // DEBUGGING_SUPPORTED
+
+ pExInfo->m_pPrevNestedInfo = pPrevNestedInfo->m_pPrevNestedInfo;
+
+ } else {
+ // The whacky setjmp/longjmp case. Nothing to do.
+ }
+
+ } else {
+ LOG((LF_EH, LL_INFO100, " InCOMPlusNestedHandler with %x at %x\n", pExceptionRecord->ExceptionCode,
+ pContext ? GetIP(pContext) : 0));
+ }
+
+
+ // There is a nasty "gotcha" in the way exception unwinding, finally's, and nested exceptions
+ // interact. Here's the scenario ... it involves two exceptions, one normal one, and one
+ // raised in a finally.
+ //
+ // The first exception occurs, and is caught by some handler way up the stack. That handler
+ // calls RtlUnwind -- and handlers that didn't catch this first exception are called again, with
+ // the UNWIND flag set. If, one of the handlers throws an exception during
+ // unwind (like, a throw from a finally) -- then that same handler is not called during
+ // the unwind pass of the second exception. [ASIDE: It is called on first-pass.]
+ //
+ // What that means is -- the COMPlusExceptionHandler, can't count on unwinding itself correctly
+ // if an exception is thrown from a finally. Instead, it relies on the NestedExceptionHandler
+ // that it pushes for this.
+ //
+
+ EXCEPTION_DISPOSITION retval = EXCEPTION_HANDLER_FWD(COMPlusFrameHandler);
+ LOG((LF_EH, LL_INFO100, "Leaving COMPlusNestedExceptionHandler with %d\n", retval));
+ return retval;
+}
+
+EXCEPTION_REGISTRATION_RECORD *FindNestedEstablisherFrame(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ while (pEstablisherFrame->Handler != (PEXCEPTION_ROUTINE)COMPlusNestedExceptionHandler) {
+ pEstablisherFrame = pEstablisherFrame->Next;
+ _ASSERTE(pEstablisherFrame != EXCEPTION_CHAIN_END); // should always find one
+ }
+ return pEstablisherFrame;
+}
+
+EXCEPTION_HANDLER_IMPL(FastNExportExceptHandler)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Most of our logic is in commin with COMPlusFrameHandler.
+ EXCEPTION_DISPOSITION retval = EXCEPTION_HANDLER_FWD(COMPlusFrameHandler);
+
+#ifdef _DEBUG
+ // If the exception is escaping the last CLR personality routine on the stack,
+ // then state a flag on the thread to indicate so.
+ if (retval == ExceptionContinueSearch)
+ {
+ SetReversePInvokeEscapingUnhandledExceptionStatus(IS_UNWINDING(pExceptionRecord->ExceptionFlags), pEstablisherFrame);
+ }
+#endif // _DEBUG
+
+ return retval;
+}
+
+
+// Just like a regular NExport handler -- except it pops an extra frame on unwind. A handler
+// like this is needed by the COMMethodStubProlog code. It first pushes a frame -- and then
+// pushes a handler. When we unwind, we need to pop the extra frame to avoid corrupting the
+// frame chain in the event of an unmanaged catcher.
+//
+EXCEPTION_HANDLER_IMPL(UMThunkPrestubHandler)
+{
+ // @todo: we'd like to have a dynamic contract here, but there's a problem. (Bug 129180) Enter on the CRST used
+ // in HandleManagedFault leaves the no-trigger count incremented. The destructor of this contract will restore
+ // it to zero, then when we leave the CRST in LinkFrameAndThrow, we assert because we're trying to decrement the
+ // gc-trigger count down past zero. The solution is to fix what we're doing with this CRST. </TODO>
+ STATIC_CONTRACT_THROWS; // COMPlusFrameHandler throws
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+
+ EXCEPTION_DISPOSITION retval = ExceptionContinueSearch;
+
+ BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
+
+ // We must forward to the COMPlusFrameHandler. This will unwind the Frame Chain up to here, and also leave the
+ // preemptive GC mode set correctly.
+ retval = EXCEPTION_HANDLER_FWD(COMPlusFrameHandler);
+
+#ifdef _DEBUG
+ // If the exception is escaping the last CLR personality routine on the stack,
+ // then state a flag on the thread to indicate so.
+ if (retval == ExceptionContinueSearch)
+ {
+ SetReversePInvokeEscapingUnhandledExceptionStatus(IS_UNWINDING(pExceptionRecord->ExceptionFlags), pEstablisherFrame);
+ }
+#endif // _DEBUG
+
+ if (IS_UNWINDING(pExceptionRecord->ExceptionFlags))
+ {
+ // Pops an extra frame on unwind.
+
+ GCX_COOP(); // Must be cooperative to modify frame chain.
+
+ Thread *pThread = GetThread();
+ _ASSERTE(pThread);
+ Frame *pFrame = pThread->GetFrame();
+ pFrame->ExceptionUnwind();
+ pFrame->Pop(pThread);
+ }
+
+ END_CONTRACT_VIOLATION;
+
+ return retval;
+}
+
+
+bool IsInstrModifyFault(PEXCEPTION_POINTERS pExceptionInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // No longer implemented on x86.
+ return false;
+}
+
+LONG CLRNoCatchHandler(EXCEPTION_POINTERS* pExceptionInfo, PVOID pv)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_ENTRY_POINT;
+
+ LONG result = EXCEPTION_CONTINUE_SEARCH;
+
+ // This function can be called during the handling of a SO
+ //BEGIN_ENTRYPOINT_VOIDRET;
+
+ result = CLRVectoredExceptionHandler(pExceptionInfo);
+
+ if (EXCEPTION_EXECUTE_HANDLER == result)
+ {
+ result = EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ //END_ENTRYPOINT_VOIDRET;
+
+ return result;
+}
+
+#ifdef FEATURE_COMINTEROP
+// The reverse COM interop path needs to be sure to pop the ComMethodFrame that is pushed, but we do not want
+// to have an additional FS:0 handler between the COM callsite and the call into managed. So we push this
+// FS:0 handler, which will defer to the usual COMPlusFrameHandler and then perform the cleanup of the
+// ComMethodFrame, if needed.
+EXCEPTION_HANDLER_IMPL(COMPlusFrameHandlerRevCom)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+
+ // Defer to COMPlusFrameHandler
+ EXCEPTION_DISPOSITION result = EXCEPTION_HANDLER_FWD(COMPlusFrameHandler);
+
+ if (pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND))
+ {
+ // Do cleanup as needed
+ ComMethodFrame::DoSecondPassHandlerCleanup(GetCurrFrame(pEstablisherFrame));
+ }
+
+ return result;
+}
+#endif // FEATURE_COMINTEROP
+
+
+// Returns TRUE if caller should resume execution.
+BOOL
+AdjustContextForVirtualStub(
+ EXCEPTION_RECORD *pExceptionRecord,
+ CONTEXT *pContext)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ Thread * pThread = GetThread();
+
+ // We may not have a managed thread object. Example is an AV on the helper thread.
+ // (perhaps during StubManager::IsStub)
+ if (pThread == NULL)
+ {
+ return FALSE;
+ }
+
+ PCODE f_IP = GetIP(pContext);
+
+ VirtualCallStubManager::StubKind sk;
+ /* VirtualCallStubManager *pMgr = */ VirtualCallStubManager::FindStubManager(f_IP, &sk);
+
+ if (sk == VirtualCallStubManager::SK_DISPATCH)
+ {
+ if (*PTR_WORD(f_IP) != X86_INSTR_CMP_IND_ECX_IMM32)
+ {
+ _ASSERTE(!"AV in DispatchStub at unknown instruction");
+ return FALSE;
+ }
+ }
+ else
+ if (sk == VirtualCallStubManager::SK_RESOLVE)
+ {
+ if (*PTR_WORD(f_IP) != X86_INSTR_MOV_EAX_ECX_IND)
+ {
+ _ASSERTE(!"AV in ResolveStub at unknown instruction");
+ return FALSE;
+ }
+
+ SetSP(pContext, dac_cast<PCODE>(dac_cast<PTR_BYTE>(GetSP(pContext)) + sizeof(void*))); // rollback push eax
+ }
+ else
+ {
+ return FALSE;
+ }
+
+ PCODE callsite = GetAdjustedCallAddress(*dac_cast<PTR_PCODE>(GetSP(pContext)));
+ pExceptionRecord->ExceptionAddress = (PVOID)callsite;
+ SetIP(pContext, callsite);
+
+ // put ESP back to what it was before the call.
+ SetSP(pContext, dac_cast<PCODE>(dac_cast<PTR_BYTE>(GetSP(pContext)) + sizeof(void*)));
+
+ return TRUE;
+}
+
+#endif // !DACCESS_COMPILE
diff --git a/src/vm/i386/fptext.asm b/src/vm/i386/fptext.asm
new file mode 100644
index 0000000000..621e4c6c60
--- /dev/null
+++ b/src/vm/i386/fptext.asm
@@ -0,0 +1,278 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;
+
+;
+; ==--==
+ .386
+ .model flat
+
+ option casemap:none
+ public _DoubleToNumber,_NumberToDouble
+
+; NUMBER structure
+
+nPrecision equ (dword ptr 0)
+nScale equ (dword ptr 4)
+nSign equ (dword ptr 8)
+nDigits equ (word ptr 12)
+
+ .code
+
+; Powers of 10 from 1.0E1 to 1.0E15 increasing by 1
+
+Pow10By1 label tbyte
+
+ dt 1.0E1
+ dt 1.0E2
+ dt 1.0E3
+ dt 1.0E4
+ dt 1.0E5
+ dt 1.0E6
+ dt 1.0E7
+ dt 1.0E8
+ dt 1.0E9
+ dt 1.0E10
+ dt 1.0E11
+ dt 1.0E12
+ dt 1.0E13
+ dt 1.0E14
+ dt 1.0E15
+
+; Powers of 10 from 1.0E16 to 1.0E336 increasing by 16
+
+Pow10By16 label tbyte
+
+ dt 1.0E16
+ dt 1.0E32
+ dt 1.0E48
+ dt 1.0E64
+ dt 1.0E80
+ dt 1.0E96
+ dt 1.0E112
+ dt 1.0E128
+ dt 1.0E144
+ dt 1.0E160
+ dt 1.0E176
+ dt 1.0E192
+ dt 1.0E208
+ dt 1.0E224
+ dt 1.0E240
+ dt 1.0E256
+ dt 1.0E272
+ dt 1.0E288
+ dt 1.0E304
+ dt 1.0E320
+ dt 1.0E336
+
+; Single precision constants
+
+Single10 dd 10.0
+SingleINF dd 7F800000H
+
+g_CwStd dw 137fH ;Mask all errors, 64-bit, round near
+
+; void _cdecl DoubleToNumber(double value, int precision, NUMBER* number)
+
+_DoubleToNumber proc
+
+value equ (qword ptr [ebp+8])
+precision equ (dword ptr [ebp+16])
+number equ (dword ptr [ebp+20])
+paramSize = 16
+
+cwsave equ (word ptr [ebp-24])
+digits equ (tbyte ptr [ebp-20])
+temp equ (tbyte ptr [ebp-10])
+localSize = 24
+
+ push ebp
+ mov ebp,esp
+ sub esp,localSize
+ push edi
+ push ebx
+ fnstcw cwsave
+ fldcw g_CwStd
+ fld value
+ fstp temp
+ mov edi,number
+ mov eax,precision
+ mov nPrecision[edi],eax
+ movzx eax,word ptr temp[8]
+ mov edx,eax
+ shr edx,15
+ mov nSign[edi],edx
+ and eax,7FFFH
+ je DN1
+ cmp eax,7FFFH
+ jne DN10
+ mov eax,80000000H
+ cmp dword ptr temp[4],eax
+ jne DN1
+ cmp dword ptr temp[0],0
+ jne DN1
+ dec eax
+DN1: mov nScale[edi],eax
+ mov nDigits[edi],0
+ jmp DN30
+DN10: fld value
+ sub eax,16382+58 ;Remove bias and 58 bits
+ imul eax,19728 ;log10(2) * 2^16 = .30103 * 65536
+ add eax,0FFFFH ;Round up
+ sar eax,16 ;Only use high half
+ lea edx,[eax+18]
+ mov nScale[edi],edx
+ neg eax
+ call ScaleByPow10
+ fbstp digits
+ xor eax,eax
+ xor ebx,ebx
+ mov ecx,precision
+ inc ecx
+ mov edx,8
+ mov al,byte ptr digits[8]
+ test al,0F0H
+ jne DN11
+ dec nScale[edi]
+ jmp DN12
+DN11: shr al,4
+ dec ecx
+ je DN20
+ add al,'0'
+ mov nDigits[edi+ebx*2],ax
+ inc ebx
+ mov al,byte ptr digits[edx]
+DN12: and al,0FH
+ dec ecx
+ je DN20
+ add al,'0'
+ mov nDigits[edi+ebx*2],ax
+ inc ebx
+ dec edx
+ jl DN22 ; We've run out of digits & don't have a rounding digit, so we'll skip the rounding step.
+ mov al,byte ptr digits[edx]
+ jmp DN11
+DN20: cmp al,5
+ jb DN22
+DN21: dec ebx
+ inc nDigits[edi+ebx*2]
+ cmp nDigits[edi+ebx*2],'9'
+ jbe DN23
+ or ebx,ebx
+ jne DN21
+ mov nDigits[edi+ebx*2],'1'
+ inc nScale[edi]
+ jmp DN23
+DN22: dec ebx
+ cmp nDigits[edi+ebx*2],'0'
+ je DN22
+DN23: mov nDigits[edi+ebx*2+2],0
+DN30:
+ fldcw cwsave ;;Restore original CW
+ pop ebx
+ pop edi
+ mov esp,ebp
+ pop ebp
+ ret ;made _cdecl for WinCE paramSize
+
+_DoubleToNumber endp
+
+; void _cdecl NumberToDouble(NUMBER* number, double* value)
+_NumberToDouble proc
+
+number equ (dword ptr [ebp+8])
+value equ (dword ptr [ebp+12])
+paramSize = 8
+
+cwsave equ (word ptr [ebp-8])
+temp equ (dword ptr [ebp-4])
+localSize = 8
+
+ push ebp
+ mov ebp,esp ; Save the stack ptr
+ sub esp,localSize ;
+ fnstcw cwsave
+ fldcw g_CwStd
+ fldz ; zero the register
+ mov ecx,number ; move precision into ecx
+ xor edx,edx ; clear edx
+ cmp dx,nDigits[ecx] ; if the first digit is 0 goto SignResult
+ je SignResult
+ mov eax,nScale[ecx] ; store the scale in eax
+ cmp eax,-330 ; if the scale is less than or equal to -330 goto Cleanup
+ jle Cleanup
+ cmp eax,310 ; if the scale is less than 310, goto ParseDigits
+ jl ParseDigits
+ fstp st(0) ; store value on the top of the floating point stack
+ fld SingleINF ; Load infinity
+ jmp SignResult ; Goto SignResult
+ParseDigits:
+ movzx eax,nDigits[ecx+edx*2]; load the character at nDigits[edx];
+ sub eax,'0' ; subtract '0'
+ jc ScaleResult ; jump to ScaleResult if this produces a negative value
+ mov temp,eax ; store the first digit in temp
+ fmul Single10 ; Multiply by 10
+ fiadd temp ; Add the digit which we just found
+ inc edx ; increment the counter
+ cmp edx,18 ; if (eax<18) goto ParseDigits
+ jb ParseDigits
+ScaleResult:
+ mov eax,nScale[ecx] ; eax = scale
+ sub eax,edx ; scale -= (number of digits)
+ call ScaleByPow10 ; multiply the result by 10^scale
+SignResult:
+ cmp nSign[ecx],0 ; If the sign is 0 already go to Cleanup, otherwise change the sign.
+ je Cleanup
+ fchs
+Cleanup:
+ mov edx,value ; store value in edx
+ fstp qword ptr [edx] ; copy from value to the fp stack
+ fldcw cwsave ; Restore original CW
+ mov esp,ebp ; restore the stack frame & exit.
+ pop ebp
+ ret ;Made _cdecl for WinCE paramSize
+
+_NumberToDouble endp
+
+; Scale st(0) by 10^eax
+
+ScaleByPow10 proc
+ test eax,eax
+ je SP2
+ jl SP3
+ mov edx,eax
+ and edx,0FH
+ je SP1
+ lea edx,[edx+edx*4]
+ fld Pow10By1[edx*2-10]
+ fmul
+SP1: mov edx,eax
+ shr edx,4
+ test edx, edx ; remove partial flag stall caused by shr
+ je SP2
+ lea edx,[edx+edx*4]
+ fld Pow10By16[edx*2-10]
+ fmul
+SP2: ret
+SP3: neg eax
+ mov edx,eax
+ and edx,0FH
+ je SP4
+ lea edx,[edx+edx*4]
+ fld Pow10By1[edx*2-10]
+ fdiv
+SP4: mov edx,eax
+ shr edx,4
+ test edx, edx ; remove partial flag stall caused by shr
+ je SP5
+ lea edx,[edx+edx*4]
+ fld Pow10By16[edx*2-10]
+ fdiv
+SP5: ret
+ScaleByPow10 endp
+
+ end
diff --git a/src/vm/i386/gmsasm.asm b/src/vm/i386/gmsasm.asm
new file mode 100644
index 0000000000..4b6ecbd254
--- /dev/null
+++ b/src/vm/i386/gmsasm.asm
@@ -0,0 +1,38 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;
+
+;
+; ==--==
+;
+; *** NOTE: If you make changes to this file, propagate the changes to
+; gmsasm.s in this directory
+
+ .586
+ .model flat
+
+include asmconstants.inc
+
+ option casemap:none
+ .code
+
+; int __fastcall LazyMachStateCaptureState(struct LazyMachState *pState);
+@LazyMachStateCaptureState@4 proc public
+ mov [ecx+MachState__pRetAddr], 0 ; marks that this is not yet valid
+ mov [ecx+MachState__edi], edi ; remember register values
+ mov [ecx+MachState__esi], esi
+ mov [ecx+MachState__ebx], ebx
+ mov [ecx+LazyMachState_captureEbp], ebp
+ mov [ecx+LazyMachState_captureEsp], esp
+
+ mov eax, [esp] ; capture return address
+ mov [ecx+LazyMachState_captureEip], eax
+ xor eax, eax
+ retn
+@LazyMachStateCaptureState@4 endp
+
+end
diff --git a/src/vm/i386/gmscpu.h b/src/vm/i386/gmscpu.h
new file mode 100644
index 0000000000..202ded593f
--- /dev/null
+++ b/src/vm/i386/gmscpu.h
@@ -0,0 +1,140 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/**************************************************************/
+/* gmscpu.h */
+/**************************************************************/
+/* HelperFrame is defines 'GET_STATE(machState)' macro, which
+ figures out what the state of the machine will be when the
+ current method returns. It then stores the state in the
+ JIT_machState structure. */
+
+/**************************************************************/
+
+#ifndef __gmsx86_h__
+#define __gmsx86_h__
+
+#define __gmsx86_h__
+
+#ifdef _DEBUG
+class HelperMethodFrame;
+struct MachState;
+EXTERN_C MachState* STDCALL HelperMethodFrameConfirmState(HelperMethodFrame* frame, void* esiVal, void* ediVal, void* ebxVal, void* ebpVal);
+#endif
+
+ // A MachState indicates the register state of the processor at some point in time (usually
+ // just before or after a call is made). It can be made one of two ways. Either explicitly
+ // (when you for some reason know the values of all the registers), or implicitly using the
+ // GET_STATE macros.
+
+typedef DPTR(struct MachState) PTR_MachState;
+struct MachState {
+
+ MachState()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ INDEBUG(memset(this, 0xCC, sizeof(MachState));)
+ }
+
+ bool isValid() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE(dac_cast<TADDR>(_pRetAddr) != INVALID_POINTER_CC); return(_pRetAddr != 0); }
+ TADDR* pEdi() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE(dac_cast<TADDR>(_pEdi) != INVALID_POINTER_CC); return(_pEdi); }
+ TADDR* pEsi() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE(dac_cast<TADDR>(_pEsi) != INVALID_POINTER_CC); return(_pEsi); }
+ TADDR* pEbx() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE(dac_cast<TADDR>(_pEbx) != INVALID_POINTER_CC); return(_pEbx); }
+ TADDR* pEbp() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE(dac_cast<TADDR>(_pEbp) != INVALID_POINTER_CC); return(_pEbp); }
+ TADDR esp() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE(isValid()); return(_esp); }
+ PTR_TADDR pRetAddr() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE(isValid()); return(_pRetAddr); }
+ TADDR GetRetAddr() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE(isValid()); return *_pRetAddr; }
+#ifndef DACCESS_COMPILE
+ void SetRetAddr(TADDR* addr) { LIMITED_METHOD_CONTRACT; _ASSERTE(isValid()); _pRetAddr = addr; }
+#endif
+
+ friend class HelperMethodFrame;
+ friend class CheckAsmOffsets;
+ friend struct LazyMachState;
+#ifdef _DEBUG
+ friend MachState* STDCALL HelperMethodFrameConfirmState(HelperMethodFrame* frame, void* esiVal, void* ediVal, void* ebxVal, void* ebpVal);
+#endif
+
+
+protected:
+ // Note the fields are layed out to make generating a
+ // MachState structure from assembly code very easy
+
+ // The state of all the callee saved registers.
+ // If the register has been spill to the stack p<REG>
+ // points at this location, otherwise it points
+ // at the field <REG> field itself
+ PTR_TADDR _pEdi;
+ TADDR _edi;
+ PTR_TADDR _pEsi;
+ TADDR _esi;
+ PTR_TADDR _pEbx;
+ TADDR _ebx;
+ PTR_TADDR _pEbp;
+ TADDR _ebp;
+
+ TADDR _esp; // stack pointer after the function returns
+ PTR_TADDR _pRetAddr; // The address of the stored IP address (points into the stack)
+};
+
+/********************************************************************/
+/* This allows you to defer the computation of the Machine state
+ until later. Note that we don't reuse slots, because we want
+ this to be threadsafe without locks */
+
+struct LazyMachState;
+typedef DPTR(LazyMachState) PTR_LazyMachState;
+struct LazyMachState : public MachState {
+ // compute the machine state of the processor as it will exist just
+ // after the return after at most'funCallDepth' number of functions.
+ // if 'testFtn' is non-NULL, the return address is tested at each
+ // return instruction encountered. If this test returns non-NULL,
+ // then stack walking stops (thus you can walk up to the point that the
+ // return address matches some criteria
+
+ // Normally this is called with funCallDepth=1 and testFtn = 0 so that
+ // it returns the state of the processor after the function that called 'captureState()'
+ void setLazyStateFromUnwind(MachState* copy);
+ static void unwindLazyState(LazyMachState* baseState,
+ MachState* lazyState,
+ int funCallDepth = 1,
+ HostCallPreference hostCallPreference = AllowHostCalls);
+
+ friend class HelperMethodFrame;
+ friend class CheckAsmOffsets;
+private:
+ TADDR captureEbp; // Ebp at the time of capture
+ TADDR captureEsp; // Esp at the time of capture
+ TADDR captureEip; // Eip at the time of capture
+};
+
+inline void LazyMachState::setLazyStateFromUnwind(MachState* copy)
+{
+ // _pRetAddr has to be the last thing updated when we make the copy (because its
+ // is the the _pRetAddr becoming non-zero that flips this from invalid to valid.
+ // we assert that it is the last field in the struct.
+ static_assert_no_msg(offsetof(MachState, _pRetAddr) + sizeof(_pRetAddr) == sizeof(MachState));
+
+ memcpy(this, copy, offsetof(MachState, _pRetAddr));
+
+ // this has to be last
+ VolatileStore((TADDR*)&_pRetAddr, dac_cast<TADDR>(copy->_pRetAddr));
+}
+
+// Do the initial capture of the machine state. This is meant to be
+// as light weight as possible, as we may never need the state that
+// we capture. Thus to complete the process you need to call
+// 'getMachState()', which finishes the process
+EXTERN_C int __fastcall LazyMachStateCaptureState(struct LazyMachState *pState);
+
+// CAPTURE_STATE captures just enough register state so that the state of the
+// processor can be deterined just after the the routine that has CAPTURE_STATE in
+// it returns.
+
+// Note that the return is never taken, is is there for epilog walking
+#define CAPTURE_STATE(machState, ret) \
+ if (LazyMachStateCaptureState(machState)) ret
+
+#endif
diff --git a/src/vm/i386/gmsx86.cpp b/src/vm/i386/gmsx86.cpp
new file mode 100644
index 0000000000..3bc9002211
--- /dev/null
+++ b/src/vm/i386/gmsx86.cpp
@@ -0,0 +1,1238 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/**************************************************************/
+/* gmsx86.cpp */
+/**************************************************************/
+
+#include "common.h"
+#include "gmscpu.h"
+
+/***************************************************************/
+/* setMachState figures out what the state of the CPU will be
+ when the function that calls 'setMachState' returns. It stores
+ this information in 'frame'
+
+ setMachState works by simulating the execution of the
+ instructions starting at the instruction following the
+ call to 'setMachState' and continuing until a return instruction
+ is simulated. To avoid having to process arbitrary code, the
+ call to 'setMachState' should be called as follows
+
+ if (machState.setMachState != 0) return;
+
+ setMachState is guarnenteed to return 0 (so the return
+ statement will never be executed), but the expression above
+ insures insures that there is a 'quick' path to epilog
+ of the function. This insures that setMachState will only
+ have to parse a limited number of X86 instructions. */
+
+
+/***************************************************************/
+#ifndef POISONC
+#define POISONC ((sizeof(int *) == 4)?0xCCCCCCCCU:UI64(0xCCCCCCCCCCCCCCCC))
+#endif
+
+/***************************************************************/
+/* the 'zeroFtn and 'recursiveFtn' are only here to determine
+ if if mscorwks itself has been instrumented by a profiler
+ that intercepts calls or epilogs of functions. (the
+ callsInstrumented and epilogInstrumented functions). */
+
+#if !defined(DACCESS_COMPILE)
+
+#pragma optimize("gsy", on ) // optimize to insure that code generation does not have junk in it
+#pragma warning(disable:4717)
+
+static int __stdcall zeroFtn() {
+ return 0;
+}
+
+static int __stdcall recursiveFtn() {
+ return recursiveFtn()+1;
+}
+
+#pragma optimize("", on )
+
+
+/* Has mscorwks been instrumented so that calls are morphed into push XXXX call <helper> */
+static bool callsInstrumented() {
+ // Does the recusive function begin with push XXXX call <helper>
+ PTR_BYTE ptr = PTR_BYTE(recursiveFtn);
+
+ return (ptr[0] == 0x68 && ptr[5] == 0xe8); // PUSH XXXX, call <helper>
+}
+
+/* Has mscorwks been instrumented so function prolog and epilogs are replaced with
+ jmp [XXXX] */
+
+static bool epilogInstrumented() {
+
+ PTR_BYTE ptr = PTR_BYTE(zeroFtn);
+ if (ptr[0] == 0xe8) // call <helper> (prolog instrumentation)
+ ptr += 5;
+ if (ptr[0] == 0x33 && ptr[1] == 0xc0) // xor eax eax
+ ptr += 2;
+ return (ptr[0] == 0xeb || ptr[0] == 0xe9); // jmp <XXXX>
+}
+
+#else
+
+ // Note that we have the callsInstrumeted and epilogInstrumented
+ // functions so that the looser heuristics used for instrumented code
+ // can't foul up an instrumented mscorwks. For simplicity sake we
+ // don't bother with this in the DAC, which means that the DAC could
+ // be misled more frequently than mscorwks itself, but I still think
+ // it will not be misled in any real scenario
+static bool callsInstrumented() { LIMITED_METHOD_DAC_CONTRACT; return true; }
+static bool epilogInstrumented() { LIMITED_METHOD_DAC_CONTRACT; return true; }
+
+#endif // !defined(DACCESS_COMPILE)
+
+/***************************************************************/
+/* returns true if a call to 'ip' should be entered by the
+ epilog walker. Bascically we are looking for things that look
+ like __SEH_epilog. In particular we look for things that
+ pops a register before doing a push. If we see something
+ that we don't recognise, we dont consider it a epilog helper
+ and return false.
+*/
+
+static bool shouldEnterCall(PTR_BYTE ip) {
+ SUPPORTS_DAC;
+
+ int datasize; // helper variable for decoding of address modes
+ int mod; // helper variable for decoding of mod r/m
+ int rm; // helper variable for decoding of mod r/m
+
+ int pushes = 0;
+
+ // we should start unbalenced pops within 48 instrs. If not, it is not a special epilog function
+ // the only reason we need as many instructions as we have below is because coreclr
+ // gets instrumented for profiling, code coverage, BBT etc, and we want these things to
+ // just work.
+ for (int i = 0; i < 48; i++) {
+ switch(*ip) {
+ case 0x68: // push 0xXXXXXXXX
+ ip += 5;
+
+ // For office profiler. They morph tail calls into push TARGET; jmp helper
+ // so if you see
+ //
+ // push XXXX
+ // jmp xxxx
+ //
+ // and we notice that coreclr has been instrumented and
+ // xxxx starts with a JMP [] then do what you would do for jmp XXXX
+ if (*ip == 0xE9 && callsInstrumented()) { // jmp helper
+ PTR_BYTE tmpIp = ip + 5;
+ PTR_BYTE target = tmpIp + (__int32)*((PTR_TADDR)(PTR_TO_TADDR(tmpIp) - 4));
+ if (target[0] == 0xFF && target[1] == 0x25) { // jmp [xxxx] (to external dll)
+ ip = PTR_BYTE(*((PTR_TADDR)(PTR_TO_TADDR(ip) - 4)));
+ }
+ }
+ else {
+ pushes++;
+ }
+ break;
+
+ case 0x50: // push EAX
+ case 0x51: // push ECX
+ case 0x52: // push EDX
+ case 0x53: // push EBX
+ case 0x55: // push EBP
+ case 0x56: // push ESI
+ case 0x57: // push EDI
+ pushes++;
+ ip++;
+ break;
+
+ case 0xE8: // call <disp32>
+ ip += 5;
+ pushes = 0; // This assumes that all of the previous pushes are arguments to this call
+ break;
+
+ case 0xFF:
+ if (ip[1] != 0x15) // call [XXXX] is OK (prolog of epilog helper is intrumented)
+ return false; // but everything else is not OK.
+ ip += 6;
+ pushes = 0; // This assumes that all of the previous pushes are arguments to this call
+ break;
+
+ case 0x9C: // pushfd
+ case 0x9D: // popfd
+ // a pushfd can never be an argument, so we model a pair of
+ // these instruction as not changing the stack so that a call
+ // that occurs between them does not consume the value of pushfd
+ ip++;
+ break;
+
+ case 0x5D: // pop EBP
+ case 0x5E: // pop ESI
+ case 0x5F: // pop EDI
+ case 0x5B: // pop EBX
+ case 0x58: // pop EAX
+ case 0x59: // pop ECX
+ case 0x5A: // pop EDX
+ if (pushes <= 0) {
+ // We now have more pops than pushes. This is our indication
+ // that we are in an EH_epilog function so we return true.
+ // This is the only way to exit this method with a retval of true.
+ return true;
+ }
+ --pushes;
+ ip++;
+ break;
+
+ case 0xA1: // MOV EAX, [XXXX]
+ ip += 5;
+ break;
+
+ case 0xC6: // MOV r/m8, imm8
+ datasize = 1;
+ goto decodeRM;
+
+ case 0x89: // MOV r/m, reg
+ if (ip[1] == 0xE5) // MOV EBP, ESP
+ return false;
+ if (ip[1] == 0xEC) // MOV ESP, EBP
+ return false;
+ goto move;
+
+ case 0x8B: // MOV reg, r/m
+ if (ip[1] == 0xE5) // MOV ESP, EBP
+ return false;
+ if (ip[1] == 0xEC) // MOV EBP, ESP
+ return false;
+ goto move;
+
+ case 0x88: // MOV reg, r/m (BYTE)
+ case 0x8A: // MOV r/m, reg (BYTE)
+
+ case 0x31: // XOR
+ case 0x32: // XOR
+ case 0x33: // XOR
+
+ move:
+ datasize = 0;
+
+ decodeRM:
+ // Note that we don't want to read from ip[] after
+ // we do ANY incrementing of ip
+
+ mod = (ip[1] & 0xC0) >> 6;
+ if (mod != 3) {
+ rm = (ip[1] & 0x07);
+ if (mod == 0) { // (mod == 0)
+ if (rm == 5)
+ ip += 4; // disp32
+ else if (rm == 4)
+ ip += 1; // [reg*K+reg]
+ // otherwise [reg]
+
+ }
+ else if (mod == 1) { // (mod == 1)
+ ip += 1; // for disp8
+ if (rm == 4)
+ ip += 1; // [reg*K+reg+disp8]
+ // otherwise [reg+disp8]
+ }
+ else { // (mod == 2)
+ ip += 4; // for disp32
+ if (rm == 4)
+ ip += 1; // [reg*K+reg+disp32]
+ // otherwise [reg+disp32]
+ }
+ }
+
+ ip += 2;
+ ip += datasize;
+ break;
+
+ case 0x64: // FS: prefix
+ ip++;
+ break;
+
+ case 0xEB: // jmp <disp8>
+ ip += (signed __int8) ip[1] + 2;
+ break;
+
+ case 0xE9: // jmp <disp32>
+ ip += (__int32)*PTR_DWORD(PTR_TO_TADDR(ip) + 1) + 5;
+ break;
+
+ case 0xF7: // test r/m32, imm32
+ // Magellan code coverage build
+ if ( (ip[1] & 0x38) == 0x00)
+ {
+ datasize = 4;
+ goto decodeRM;
+ }
+ else
+ {
+ return false;
+ }
+ break;
+
+ case 0x75: // jnz <target>
+ // Magellan code coverage build
+ // We always follow forward jump to avoid possible looping.
+ {
+ PTR_BYTE tmpIp = ip + (TADDR)(signed __int8) ip[1] + 2;
+ if (tmpIp > ip) {
+ ip = tmpIp; // follow forwards jump
+ }
+ else {
+ return false; // backwards jump implies not EH_epilog function
+ }
+ }
+ break;
+
+ case 0xC2: // ret
+ case 0xC3: // ret n
+ default:
+ return false;
+ }
+ }
+
+ return false;
+}
+
+
+/***************************************************************/
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#endif
+
+/***************************************************************/
+// A fundamental requirement of managed code is that we need to be able to enumerate all GC references on the
+// stack at GC time. To do this we need to be able to ‘crawl’ the stack. We know how to do this in JIT
+// compiled code (it generates additional information like the frame size etc), but we don’t know how to do
+// this for unmanaged code. For PINVOKE calls, we leave a pointer to the transition boundary between managed
+// and unmanaged code and we simply ignore the lower part of the stack. However setting up this transition is
+// a bit expensive (1-2 dozen instructions), and while that is acceptable for PINVOKE, it is not acceptable
+// for high volume calls, like NEW, CAST, WriterBarrier, Stack field fetch and others.
+//
+// To get around this, for transitions into the runtime (which we call FCALLS), we DEFER setting up the
+// boundary variables (what we call the transition frame), until we actually need it (we will do an operation
+// that might cause a GC). This allow us to handle the common case (where we might find the thing in a cache,
+// or be service the ‘new’ from a allocation quantum), and only pay the cost of setting up the transition
+// frame when it will actually be used.
+//
+// The problem is that in order to set up a transition frame we need to be able to find ALL REGISTERS AT THE
+// TIME THE TRANSITION TO UNMANAGED CODE WAS MADE (because we might need to update them if they have GC
+// references). Because we have executed ordinary C++ code (which might spill the registers to the stack at
+// any time), we have a problem. LazyMachState is our ‘solution’ to this problem. We take advantage of the
+// fact that the C++ code MUST RESTORE the register before returning. Thus we simulate the execution from the
+// current location to the return and ‘watch’ where the registers got restored from. This is what
+// unwindLazyState does (determine what the registers would be IF you had never executed and unmanaged C++
+// code).
+//
+// By design, this code does not handle all X86 instructions, but only those instructions needed in an
+// epilog. If you get a failure because of a missing instruction, it MAY simply be because the compiler
+// changed and now emits a new instruction in the epilog, but it MAY also be because the unwinder is
+// 'confused' and is trying to follow a code path that is NOT AN EPILOG, and in this case adding
+// instructions to 'fix' it is inappropriate.
+//
+void LazyMachState::unwindLazyState(LazyMachState* baseState,
+ MachState* lazyState,
+ int funCallDepth /* = 1 */,
+ HostCallPreference hostCallPreference /* = (HostCallPreference)(-1) */)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ lazyState->_edi = baseState->_edi;
+ lazyState->_esi = baseState->_esi;
+ lazyState->_ebx = baseState->_ebx;
+ lazyState->_ebp = baseState->captureEbp;
+#ifndef DACCESS_COMPILE
+ lazyState->_pEdi = &baseState->_edi;
+ lazyState->_pEsi = &baseState->_esi;
+ lazyState->_pEbx = &baseState->_ebx;
+ lazyState->_pEbp = &baseState->_ebp;
+#endif
+
+ // We have captured the state of the registers as they exist in 'captureState'
+ // we need to simulate execution from the return address captured in 'captureState
+ // until we return from the caller of captureState.
+
+ PTR_BYTE ip = PTR_BYTE(baseState->captureEip);
+ PTR_TADDR ESP = PTR_TADDR(baseState->captureEsp);
+ ESP++; // pop captureState's return address
+
+
+ // VC now has small helper calls that it uses in epilogs. We need to walk into these
+ // helpers if we are to decode the stack properly. After we walk the helper we need
+ // to return and continue walking the epiliog. This varaible remembers were to return to
+ PTR_BYTE epilogCallRet = PTR_BYTE((TADDR)0);
+
+ // The very first conditional jump that we are going to encounter is
+ // the one testing for the return value of LazyMachStateCaptureState.
+ // The non-zero path is the one directly leading to a return statement.
+ // This variable keeps track of whether we are still looking for that
+ // first conditional jump.
+ BOOL bFirstCondJmp = TRUE;
+
+ // The general strategy is that we always try to plough forward:
+ // we follow a conditional jump if and only if it is a forward jump.
+ // However, in fcall functions that set up a HELPER_METHOD_FRAME in
+ // more than one place, gcc will have both of them share the same
+ // epilog - and the second one may actually be a backward jump.
+ // This can lead us to loop in a destructor code loop. To protect
+ // against this, we remember the ip of the last conditional jump
+ // we followed, and if we encounter it again, we take the other branch.
+ PTR_BYTE lastCondJmpIp = PTR_BYTE((TADDR)0);
+
+ int datasize; // helper variable for decoding of address modes
+ int mod; // helper variable for decoding of mod r/m
+ int rm; // helper variable for decoding of mod r/m
+
+#ifdef _DEBUG
+ int count = 0;
+ const DWORD cInstructions = 1000;
+ PTR_BYTE *instructionBytes = (PTR_BYTE*)alloca(cInstructions * sizeof(PTR_BYTE));
+ memset(instructionBytes, 0, cInstructions * sizeof(PTR_BYTE));
+#endif
+ bool bset16bit=false;
+ bool b16bit=false;
+ for(;;)
+ {
+ _ASSERTE(count++ < 1000); // we should never walk more than 1000 instructions!
+ b16bit=bset16bit;
+ bset16bit=false;
+
+#ifndef DACCESS_COMPILE
+ again:
+#endif
+#ifdef _DEBUG
+ instructionBytes[count-1] = ip;
+#endif
+ switch(*ip)
+ {
+
+ case 0x64: // FS: prefix
+ bset16bit=b16bit; // In case we have just seen a 0x66 prefix
+ goto incIp1;
+
+ case 0x66:
+ bset16bit=true; // Remember that we saw the 0x66 prefix [16-bit datasize override]
+ goto incIp1;
+
+ case 0x50: // push EAX
+ case 0x51: // push ECX
+ case 0x52: // push EDX
+ case 0x53: // push EBX
+ case 0x55: // push EBP
+ case 0x56: // push ESI
+ case 0x57: // push EDI
+ case 0x9C: // pushfd
+ --ESP;
+ case 0x40: // inc EAX
+ case 0x41: // inc ECX
+ case 0x42: // inc EDX
+ case 0x43: // inc EBX
+ case 0x46: // inc ESI
+ case 0x47: // inc EDI
+ goto incIp1;
+
+ case 0x58: // pop EAX
+ case 0x59: // pop ECX
+ case 0x5A: // pop EDX
+ case 0x9D: // popfd
+ ESP++;
+ // FALL THROUGH
+
+ case 0x90: // nop
+ incIp1:
+ ip++;
+ break;
+
+ case 0x5B: // pop EBX
+ lazyState->_pEbx = ESP;
+ lazyState->_ebx = *ESP++;
+ goto incIp1;
+ case 0x5D: // pop EBP
+ lazyState->_pEbp = ESP;
+ lazyState->_ebp = *ESP++;
+ goto incIp1;
+ case 0x5E: // pop ESI
+ lazyState->_pEsi = ESP;
+ lazyState->_esi = *ESP++;
+ goto incIp1;
+ case 0x5F: // pop EDI
+ lazyState->_pEdi = ESP;
+ lazyState->_edi = *ESP++;
+ goto incIp1;
+
+ case 0xEB: // jmp <disp8>
+ ip += (signed __int8) ip[1] + 2;
+ break;
+
+ case 0x72: // jb <disp8> for gcc.
+ {
+ PTR_BYTE tmpIp = ip + (int)(signed __int8)ip[1] + 2;
+ if (tmpIp > ip)
+ ip = tmpIp;
+ else
+ ip += 2;
+ }
+ break;
+
+ case 0xE8: // call <disp32>
+ ip += 5;
+ if (epilogCallRet == 0)
+ {
+ PTR_BYTE target = ip + (__int32)*PTR_DWORD(PTR_TO_TADDR(ip) - 4); // calculate target
+
+ if (shouldEnterCall(target))
+ {
+ epilogCallRet = ip; // remember our return address
+ --ESP; // simulate pushing the return address
+ ip = target;
+ }
+ }
+ break;
+
+ case 0xE9: // jmp <disp32>
+ {
+ PTR_BYTE tmpIp = ip
+ + ((__int32)*dac_cast<PTR_DWORD>(ip + 1) + 5);
+ ip = tmpIp;
+ }
+ break;
+
+ case 0x0f: // follow non-zero jumps:
+ if (ip[1] >= 0x90 && ip[1] <= 0x9f) {
+ if ((ip[2] & 0xC0) != 0xC0) // set<cc> reg
+ goto badOpcode;
+ ip += 3;
+ break;
+ }
+ else if ((ip[1] & 0xf0) == 0x40) { //cmov mod/rm
+ ++ip;
+ datasize = 0;
+ goto decodeRM;
+ }
+ else if (ip[1] >= 0x10 && ip[1] <= 0x17) { // movups, movlps, movhps, unpcklpd, unpckhpd
+ ++ip;
+ datasize = 0;
+ goto decodeRM;
+ }
+ else if (ip[1] == 0x1f) { // nop (multi-byte)
+ ++ip;
+ datasize = 0;
+ goto decodeRM;
+ }
+ else if (ip[1] == 0x57) { // xorps
+ ++ip;
+ datasize = 0;
+ goto decodeRM;
+ }
+ else if (ip[1] == 0xb6 || ip[1] == 0xb7) { //movzx reg, r/m8
+ ++ip;
+ datasize = 0;
+ goto decodeRM;
+ }
+ else if (ip[1] == 0xbf) { //movsx reg, r/m16
+ ++ip;
+ datasize = 0;
+ goto decodeRM;
+ }
+ else if (ip[1] == 0xd6 || ip[1] == 0x7e) { // movq
+ ++ip;
+ datasize = 0;
+ goto decodeRM;
+ }
+ else if (bFirstCondJmp) {
+ bFirstCondJmp = FALSE;
+ if (ip[1] == 0x85) // jne <disp32>
+ ip += (__int32)*dac_cast<PTR_DWORD>(ip + 2) + 6;
+ else if (ip[1] >= 0x80 && ip[1] <= 0x8F) // jcc <disp32>
+ ip += 6;
+ else
+ goto badOpcode;
+ }
+ else {
+ if ((ip[1] >= 0x80) && (ip[1] <= 0x8F)) {
+ PTR_BYTE tmpIp = ip + (__int32)*dac_cast<PTR_DWORD>(ip + 2) + 6;
+
+ if ((tmpIp > ip) == (lastCondJmpIp != ip)) {
+ lastCondJmpIp = ip;
+ ip = tmpIp;
+ }
+ else {
+ lastCondJmpIp = ip;
+ ip += 6;
+ }
+ }
+ else
+ goto badOpcode;
+ }
+ break;
+
+ // This is here because VC seems to not always optimize
+ // away a test for a literal constant
+ case 0x6A: // push 0xXX
+ ip += 2;
+ --ESP;
+ break;
+
+ case 0x68: // push 0xXXXXXXXX
+ if ((ip[5] == 0xFF) && (ip[6] == 0x15)) {
+ ip += 11; //
+ }
+ else {
+ ip += 5;
+
+ // For office profiler. They morph calls into push TARGET; call helper
+ // so if you see
+ //
+ // push XXXX
+ // call xxxx
+ //
+ // and we notice that mscorwks has been instrumented and
+ // xxxx starts with a JMP [] then do what you would do for call XXXX
+ if ((*ip & 0xFE) == 0xE8 && callsInstrumented()) { // It is a call or a jump (E8 or E9)
+ PTR_BYTE tmpIp = ip + 5;
+ PTR_BYTE target = tmpIp + (__int32)*PTR_DWORD(PTR_TO_TADDR(tmpIp) - 4);
+ if (target[0] == 0xFF && target[1] == 0x25) { // jmp [xxxx] (to external dll)
+ target = PTR_BYTE(*PTR_TADDR(PTR_TO_TADDR(ip) - 4));
+ if (*ip == 0xE9) { // Do logic for jmp
+ ip = target;
+ }
+ else if (shouldEnterCall(target)) { // Do logic for calls
+ epilogCallRet = ip; // remember our return address
+ --ESP; // simulate pushing the return address
+ ip = target;
+ }
+ }
+ }
+ }
+ break;
+
+ case 0x74: // jz <target>
+ if (bFirstCondJmp) {
+ bFirstCondJmp = FALSE;
+ ip += 2; // follow the non-zero path
+ break;
+ }
+ goto condJumpDisp8;
+
+ case 0x75: // jnz <target>
+ // Except the first jump, we always follow forward jump to avoid possible looping.
+ //
+ if (bFirstCondJmp) {
+ bFirstCondJmp = FALSE;
+ ip += (signed __int8) ip[1] + 2; // follow the non-zero path
+ break;
+ }
+ goto condJumpDisp8;
+
+ case 0x77: // ja <target>
+ case 0x78: // js <target>
+ case 0x79: // jns <target>
+ case 0x7d: // jge <target>
+ case 0x7c: // jl <target>
+ goto condJumpDisp8;
+
+ condJumpDisp8:
+ {
+ PTR_BYTE tmpIp = ip + (TADDR)(signed __int8) ip[1] + 2;
+ if ((tmpIp > ip) == (lastCondJmpIp != ip)) {
+ lastCondJmpIp = ip;
+ ip = tmpIp;
+ }
+ else {
+ lastCondJmpIp = ip;
+ ip += 2;
+ }
+ }
+ break;
+
+ case 0x84:
+ case 0x85:
+ mod = (ip[1] & 0xC0) >> 6;
+ if (mod != 3) // test reg1, reg2
+ goto badOpcode;
+ ip += 2;
+ break;
+
+ case 0x31:
+ case 0x32:
+ case 0x33:
+#ifdef __GNUC__
+ //there are lots of special workarounds for XOR for msvc. For GnuC
+ //just do the normal Mod/rm stuff.
+ datasize = 0;
+ goto decodeRM;
+#else
+ mod = (ip[1] & 0xC0) >> 6;
+ if (mod == 3)
+ {
+ // XOR reg1, reg2
+
+ // VC generates this sequence in some code:
+ // xor reg, reg
+ // test reg reg
+ // je <target>
+ // This is just an unconditional branch, so jump to it
+ if ((ip[1] & 7) == ((ip[1] >> 3) & 7)) { // reg1 == reg2?
+ if (ip[2] == 0x85 && ip[3] == ip[1]) { // TEST reg, reg
+ if (ip[4] == 0x74) {
+ ip += (signed __int8) ip[5] + 6; // follow the non-zero path
+ break;
+ }
+ _ASSERTE(ip[4] != 0x0f || ((ip[5] & 0xF0)!=0x80)); // If this goes off, we need the big jumps
+ }
+ else
+ {
+ if (ip[2]==0x74)
+ {
+ ip += (signed __int8) ip[3] + 4;
+ break;
+ }
+ _ASSERTE(ip[2] != 0x0f || ((ip[3] & 0xF0)!=0x80)); // If this goes off, we need the big jumps
+ }
+ }
+ ip += 2;
+ }
+ else if (mod == 1)
+ {
+ // XOR reg1, [reg+offs8]
+ // Used by the /GS flag for call to __security_check_cookie()
+ // Should only be XOR ECX,[EBP+4]
+ _ASSERTE((((ip[1] >> 3) & 0x7) == 0x1) && ((ip[1] & 0x7) == 0x5) && (ip[2] == 4));
+ ip += 3;
+ }
+ else if (mod == 2)
+ {
+ // XOR reg1, [reg+offs32]
+ // Should not happen but may occur with __security_check_cookie()
+ _ASSERTE(!"Unexpected XOR reg1, [reg+offs32]");
+ ip += 6;
+ }
+ else // (mod == 0)
+ {
+ // XOR reg1, [reg]
+ goto badOpcode;
+ }
+ break;
+#endif
+
+ case 0x05:
+ // added to handle gcc 3.3 generated code
+ // add %reg, constant
+ ip += 5;
+ break;
+
+ case 0xFF:
+ if ( (ip[1] & 0x38) == 0x30)
+ {
+ // opcode generated by Vulcan/BBT instrumentation
+ // search for push dword ptr[esp]; push imm32; call disp32 and if found ignore it
+ if ((ip[1] == 0x34) && (ip[2] == 0x24) && // push dword ptr[esp] (length 3 bytes)
+ (ip[3] == 0x68) && // push imm32 (length 5 bytes)
+ (ip[8] == 0xe8)) // call disp32 (length 5 bytes)
+ {
+ // found the magic seq emitted by Vulcan instrumentation
+ ip += 13; // (3+5+5)
+ break;
+ }
+
+ --ESP; // push r/m
+ datasize = 0;
+ goto decodeRM;
+ }
+ else if ( (ip[1] & 0x38) == 0x10)
+ {
+ // added to handle gcc 3.3 generated code
+ // This is a call *(%eax) generated by gcc for destructor calls.
+ // We can safely skip over the call
+ datasize = 0;
+ goto decodeRM;
+ }
+ else if (ip[1] == 0xe0)
+ {
+ goto badOpcode;
+#if 0
+ // Handles jmp *%eax from gcc
+ datasize = 0;
+ goto decodeRM;
+#endif
+ }
+ else if (ip[1] == 0x25 && epilogInstrumented()) // is it jmp [XXXX]
+ {
+ // this is a office profiler epilog (this jmp is acting as a return instruction)
+ PTR_BYTE epilogHelper = PTR_BYTE(*PTR_TADDR(*PTR_TADDR(PTR_TO_TADDR(ip) + 2)));
+
+ ip = PTR_BYTE(*ESP);
+ lazyState->_pRetAddr = ESP++;
+
+ if (epilogHelper[0] != 0x6A) // push <number of dwords to pop>
+ goto badOpcode;
+ unsigned disp = *PTR_BYTE(PTR_TO_TADDR(epilogHelper) + 1) * 4;
+ ESP = PTR_TADDR(PTR_TO_TADDR(ESP) + disp); // pop args
+ goto ret_with_epilogHelperCheck;
+
+ }
+ else
+ {
+ goto badOpcode;
+ }
+ break;
+
+ case 0x39: // comp r/m, reg
+ case 0x3B: // comp reg, r/m
+ datasize = 0;
+ goto decodeRM;
+
+ case 0xA1: // MOV EAX, [XXXX]
+ ip += 5;
+ break;
+
+ case 0x89: // MOV r/m, reg
+ if (ip[1] == 0xEC) // MOV ESP, EBP
+ goto mov_esp_ebp;
+ // FALL THROUGH
+
+ case 0x18: // SBB r/m8, r8
+ case 0x19: // SBB r/m[16|32], r[16|32]
+ case 0x1A: // SBB r8, r/m8
+ case 0x1B: // SBB r[16|32], r/m[16|32]
+
+ case 0x88: // MOV reg, r/m (BYTE)
+ case 0x8A: // MOV r/m, reg (BYTE)
+
+ move:
+ datasize = 0;
+
+ decodeRM:
+ // Note that we don't want to read from ip[]
+ // after we do ANY incrementing of ip
+
+ mod = (ip[1] & 0xC0) >> 6;
+ if (mod != 3) {
+ rm = (ip[1] & 0x07);
+ if (mod == 0) { // (mod == 0)
+ if (rm == 5) // has disp32?
+ ip += 4; // [disp32]
+ else if (rm == 4) // has SIB byte?
+ ip += 1; // [reg*K+reg]
+ }
+ else if (mod == 1) { // (mod == 1)
+ if (rm == 4) // has SIB byte?
+ ip += 1; // [reg*K+reg+disp8]
+ ip += 1; // for disp8
+ }
+ else { // (mod == 2)
+ if (rm == 4) // has SIB byte?
+ ip += 1; // [reg*K+reg+disp32]
+ ip += 4; // for disp32
+ }
+ }
+ ip += 2; // opcode and Mod R/M byte
+ ip += datasize;
+ break;
+
+ case 0x80: // OP r/m8, <imm8>
+ datasize = 1;
+ goto decodeRM;
+
+ case 0x81: // OP r/m32, <imm32>
+ if (!b16bit && ip[1] == 0xC4) { // ADD ESP, <imm32>
+ ESP = dac_cast<PTR_TADDR>(dac_cast<TADDR>(ESP) +
+ (__int32)*dac_cast<PTR_DWORD>(ip + 2));
+ ip += 6;
+ break;
+ } else if (!b16bit && ip[1] == 0xC5) { // ADD EBP, <imm32>
+ lazyState->_ebp += (__int32)*dac_cast<PTR_DWORD>(ip + 2);
+ ip += 6;
+ break;
+ }
+
+ datasize = b16bit?2:4;
+ goto decodeRM;
+
+ case 0x29: // SUB mod/rm
+ case 0x2B:
+ datasize = 0;
+ goto decodeRM;
+ case 0x83: // OP r/m32, <imm8>
+ if (ip[1] == 0xC4) { // ADD ESP, <imm8>
+ ESP = dac_cast<PTR_TADDR>(dac_cast<TADDR>(ESP) + (signed __int8)ip[2]);
+ ip += 3;
+ break;
+ }
+ if (ip[1] == 0xec) { // SUB ESP, <imm8>
+ ESP = PTR_TADDR(PTR_TO_TADDR(ESP) - (signed __int8)ip[2]);
+ ip += 3;
+ break;
+ }
+ if (ip[1] == 0xe4) { // AND ESP, <imm8>
+ ESP = PTR_TADDR(PTR_TO_TADDR(ESP) & (signed __int8)ip[2]);
+ ip += 3;
+ break;
+ }
+ if (ip[1] == 0xc5) { // ADD EBP, <imm8>
+ lazyState->_ebp += (signed __int8)ip[2];
+ ip += 3;
+ break;
+ }
+
+ datasize = 1;
+ goto decodeRM;
+
+ case 0x8B: // MOV reg, r/m
+ if (ip[1] == 0xE5) { // MOV ESP, EBP
+ mov_esp_ebp:
+ ESP = PTR_TADDR(lazyState->_ebp);
+ ip += 2;
+ break;
+ }
+
+ if ((ip[1] & 0xc7) == 0x4 && ip[2] == 0x24) // move reg, [esp]
+ {
+ if ( ip[1] == 0x1C ) { // MOV EBX, [ESP]
+ lazyState->_pEbx = ESP;
+ lazyState->_ebx = *lazyState->_pEbx;
+ }
+ else if ( ip[1] == 0x34 ) { // MOV ESI, [ESP]
+ lazyState->_pEsi = ESP;
+ lazyState->_esi = *lazyState->_pEsi;
+ }
+ else if ( ip[1] == 0x3C ) { // MOV EDI, [ESP]
+ lazyState->_pEdi = ESP;
+ lazyState->_edi = *lazyState->_pEdi;
+ }
+ else if ( ip[1] == 0x24 /*ESP*/ || ip[1] == 0x2C /*EBP*/)
+ goto badOpcode;
+
+ ip += 3;
+ break;
+ }
+
+ if ((ip[1] & 0xc7) == 0x44 && ip[2] == 0x24) // move reg, [esp+imm8]
+ {
+ if ( ip[1] == 0x5C ) { // MOV EBX, [ESP+XX]
+ lazyState->_pEbx = PTR_TADDR(PTR_TO_TADDR(ESP) + (signed __int8)ip[3]);
+ lazyState->_ebx = *lazyState->_pEbx ;
+ }
+ else if ( ip[1] == 0x74 ) { // MOV ESI, [ESP+XX]
+ lazyState->_pEsi = PTR_TADDR(PTR_TO_TADDR(ESP) + (signed __int8)ip[3]);
+ lazyState->_esi = *lazyState->_pEsi;
+ }
+ else if ( ip[1] == 0x7C ) { // MOV EDI, [ESP+XX]
+ lazyState->_pEdi = PTR_TADDR(PTR_TO_TADDR(ESP) + (signed __int8)ip[3]);
+ lazyState->_edi = *lazyState->_pEdi;
+ }
+ else if ( ip[1] == 0x64 /*ESP*/ || ip[1] == 0x6C /*EBP*/)
+ goto badOpcode;
+
+ ip += 4;
+ break;
+ }
+
+ if ((ip[1] & 0xC7) == 0x45) { // MOV reg, [EBP + imm8]
+ // gcc sometimes restores callee-preserved registers
+ // via 'mov reg, [ebp-xx]' instead of 'pop reg'
+ if ( ip[1] == 0x5D ) { // MOV EBX, [EBP+XX]
+ lazyState->_pEbx = PTR_TADDR(lazyState->_ebp + (signed __int8)ip[2]);
+ lazyState->_ebx = *lazyState->_pEbx ;
+ }
+ else if ( ip[1] == 0x75 ) { // MOV ESI, [EBP+XX]
+ lazyState->_pEsi = PTR_TADDR(lazyState->_ebp + (signed __int8)ip[2]);
+ lazyState->_esi = *lazyState->_pEsi;
+ }
+ else if ( ip[1] == 0x7D ) { // MOV EDI, [EBP+XX]
+ lazyState->_pEdi = PTR_TADDR(lazyState->_ebp + (signed __int8)ip[2]);
+ lazyState->_edi = *lazyState->_pEdi;
+ }
+ else if ( ip[1] == 0x65 /*ESP*/ || ip[1] == 0x6D /*EBP*/)
+ goto badOpcode;
+
+ // We don't track the values of EAX,ECX,EDX
+
+ ip += 3; // MOV reg, [reg + imm8]
+ break;
+ }
+
+ if ((ip[1] & 0xC7) == 0x85) { // MOV reg, [EBP+imm32]
+ // gcc sometimes restores callee-preserved registers
+ // via 'mov reg, [ebp-xx]' instead of 'pop reg'
+ if ( ip[1] == 0xDD ) { // MOV EBX, [EBP+XXXXXXXX]
+ lazyState->_pEbx = PTR_TADDR(lazyState->_ebp + (__int32)*dac_cast<PTR_DWORD>(ip + 2));
+ lazyState->_ebx = *lazyState->_pEbx ;
+ }
+ else if ( ip[1] == 0xF5 ) { // MOV ESI, [EBP+XXXXXXXX]
+ lazyState->_pEsi = PTR_TADDR(lazyState->_ebp + (__int32)*dac_cast<PTR_DWORD>(ip + 2));
+ lazyState->_esi = *lazyState->_pEsi;
+ }
+ else if ( ip[1] == 0xFD ) { // MOV EDI, [EBP+XXXXXXXX]
+ lazyState->_pEdi = PTR_TADDR(lazyState->_ebp + (__int32)*dac_cast<PTR_DWORD>(ip + 2));
+ lazyState->_edi = *lazyState->_pEdi;
+ }
+ else if ( ip[1] == 0xE5 /*ESP*/ || ip[1] == 0xED /*EBP*/)
+ goto badOpcode; // Add more registers
+
+ // We don't track the values of EAX,ECX,EDX
+
+ ip += 6; // MOV reg, [reg + imm32]
+ break;
+ }
+ goto move;
+
+ case 0x8D: // LEA
+ if ((ip[1] & 0x38) == 0x20) { // Don't allow ESP to be updated
+ if (ip[1] == 0xA5) // LEA ESP, [EBP+XXXX]
+ ESP = PTR_TADDR(lazyState->_ebp + (__int32)*dac_cast<PTR_DWORD>(ip + 2));
+ else if (ip[1] == 0x65) // LEA ESP, [EBP+XX]
+ ESP = PTR_TADDR(lazyState->_ebp + (signed __int8) ip[2]);
+ else if (ip[1] == 0x24 && ip[2] == 0x24) // LEA ESP, [ESP]
+ ;
+ else if (ip[1] == 0xa4 && ip[2] == 0x24 && *((DWORD *)(&ip[3])) == 0) // Another form of: LEA ESP, [ESP]
+ ;
+ else if (ip[1] == 0x64 && ip[2] == 0x24 && ip[3] == 0) // Yet another form of: LEA ESP, [ESP] (8 bit offset)
+ ;
+ else
+ {
+ goto badOpcode;
+ }
+ }
+
+ datasize = 0;
+ goto decodeRM;
+
+ case 0xB0: // MOV AL, imm8
+ ip += 2;
+ break;
+ case 0xB8: // MOV EAX, imm32
+ case 0xB9: // MOV ECX, imm32
+ case 0xBA: // MOV EDX, imm32
+ case 0xBB: // MOV EBX, imm32
+ case 0xBE: // MOV ESI, imm32
+ case 0xBF: // MOV EDI, imm32
+ if(b16bit)
+ ip += 3;
+ else
+ ip += 5;
+ break;
+
+ case 0xC2: // ret N
+ {
+ unsigned __int16 disp = *dac_cast<PTR_WORD>(ip + 1);
+ ip = PTR_BYTE(*ESP);
+ lazyState->_pRetAddr = ESP++;
+ _ASSERTE(disp < 64); // sanity check (although strictly speaking not impossible)
+ ESP = dac_cast<PTR_TADDR>(dac_cast<TADDR>(ESP) + disp); // pop args
+ goto ret;
+ }
+ case 0xC3: // ret
+ ip = PTR_BYTE(*ESP);
+ lazyState->_pRetAddr = ESP++;
+
+ ret_with_epilogHelperCheck:
+ if (epilogCallRet != 0) { // we are returning from a special epilog helper
+ ip = epilogCallRet;
+ epilogCallRet = 0;
+ break; // this does not count toward funCallDepth
+ }
+ ret:
+ if (funCallDepth > 0)
+ {
+ --funCallDepth;
+ if (funCallDepth == 0)
+ goto done;
+ }
+ else
+ {
+ // Determine whether given IP resides in JITted code. (It returns nonzero in that case.)
+ // Use it now to see if we've unwound to managed code yet.
+ BOOL fFailedReaderLock = FALSE;
+ BOOL fIsManagedCode = ExecutionManager::IsManagedCode(*lazyState->pRetAddr(), hostCallPreference, &fFailedReaderLock);
+ if (fFailedReaderLock)
+ {
+ // We don't know if we would have been able to find a JIT
+ // manager, because we couldn't enter the reader lock without
+ // yielding (and our caller doesn't want us to yield). So abort
+ // now.
+
+ // Invalidate the lazyState we're returning, so the caller knows
+ // we aborted before we could fully unwind
+ lazyState->_pRetAddr = NULL;
+ return;
+ }
+
+ if (fIsManagedCode)
+ goto done;
+ }
+
+ bFirstCondJmp = TRUE;
+ break;
+
+ case 0xC6: // MOV r/m8, imm8
+ datasize = 1;
+ goto decodeRM;
+
+ case 0xC7: // MOV r/m32, imm32
+ datasize = b16bit?2:4;
+ goto decodeRM;
+
+ case 0xC9: // leave
+ ESP = PTR_TADDR(lazyState->_ebp);
+ lazyState->_pEbp = ESP;
+ lazyState->_ebp = *ESP++;
+ ip++;
+ break;
+
+#ifndef DACCESS_COMPILE
+ case 0xCC:
+ if (IsDebuggerPresent())
+ {
+ OutputDebugStringA("CLR: Invalid breakpoint in a helpermethod frame epilog\n");
+ DebugBreak();
+ goto again;
+ }
+#ifndef _PREFIX_
+ *((int*) 0) = 1; // If you get at this error, it is because yout
+ // set a breakpoint in a helpermethod frame epilog
+ // you can't do that unfortunately. Just move it
+ // into the interior of the method to fix it
+#endif // !_PREFIX_
+ goto done;
+#endif //!DACCESS_COMPILE
+
+ case 0xD0: // shl REG16, 1
+ case 0xD1: // shl REG32, 1
+ if (0xE4 == ip[1] || 0xE5 == ip[1]) // shl, ESP, 1 or shl EBP, 1
+ goto badOpcode; // Doesn't look like valid code
+ ip += 2;
+ break;
+
+ case 0xC1: // shl REG32, imm8
+ if (0xE4 == ip[1] || 0xE5 == ip[1]) // shl, ESP, imm8 or shl EBP, imm8
+ goto badOpcode; // Doesn't look like valid code
+ ip += 3;
+ break;
+
+ case 0xD9: // single prefix
+ if (0xEE == ip[1])
+ {
+ ip += 2; // FLDZ
+ break;
+ }
+ //
+ // INTENTIONAL FALL THRU
+ //
+ case 0xDD: // double prefix
+ if ((ip[1] & 0xC0) != 0xC0)
+ {
+ datasize = 0; // floatop r/m
+ goto decodeRM;
+ }
+ else
+ {
+ goto badOpcode;
+ }
+ break;
+
+ case 0xf2: // repne prefix
+ case 0xF3: // rep prefix
+ ip += 1;
+ break;
+
+ case 0xA4: // MOVS byte
+ case 0xA5: // MOVS word/dword
+ ip += 1;
+ break;
+
+ case 0xA8: //test AL, imm8
+ ip += 2;
+ break;
+ case 0xA9: //test EAX, imm32
+ ip += 5;
+ break;
+ case 0xF6:
+ if ( (ip[1] & 0x38) == 0x00) // TEST r/m8, imm8
+ {
+ datasize = 1;
+ goto decodeRM;
+ }
+ else
+ {
+ goto badOpcode;
+ }
+ break;
+
+ case 0xF7:
+ if ( (ip[1] & 0x38) == 0x00) // TEST r/m32, imm32
+ {
+ datasize = b16bit?2:4;
+ goto decodeRM;
+ }
+ else if ((ip[1] & 0xC8) == 0xC8) //neg reg
+ {
+ ip += 2;
+ break;
+ }
+ else if ((ip[1] & 0x30) == 0x30) //div eax by mod/rm
+ {
+ datasize = 0;
+ goto decodeRM;
+ }
+ else
+ {
+ goto badOpcode;
+ }
+ break;
+
+#ifdef __GNUC__
+ case 0x2e:
+ // Group 2 instruction prefix.
+ if (ip[1] == 0x0f && ip[2] == 0x1f)
+ {
+ // Although not the recommended multi-byte sequence for 9-byte
+ // nops (the suggestion is to use 0x66 as the prefix), this shows
+ // up in GCC-optimized code.
+ ip += 2;
+ datasize = 0;
+ goto decodeRM;
+ }
+ else
+ {
+ goto badOpcode;
+ }
+ break;
+#endif // __GNUC__
+
+ default:
+ badOpcode:
+ _ASSERTE(!"Bad opcode");
+ // FIX what to do here?
+#ifndef DACCESS_COMPILE
+#ifndef _PREFIX_
+ *((unsigned __int8**) 0) = ip; // cause an access violation (Free Build assert)
+#endif // !_PREFIX_
+#else
+ DacNotImpl();
+#endif
+ goto done;
+ }
+ }
+done:
+ _ASSERTE(epilogCallRet == 0);
+
+ // At this point the fields in 'frame' coorespond exactly to the register
+ // state when the the helper returns to its caller.
+ lazyState->_esp = dac_cast<TADDR>(ESP);
+}
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
diff --git a/src/vm/i386/jithelp.asm b/src/vm/i386/jithelp.asm
new file mode 100644
index 0000000000..b571b3142f
--- /dev/null
+++ b/src/vm/i386/jithelp.asm
@@ -0,0 +1,2575 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;
+
+;
+; ==--==
+; ***********************************************************************
+; File: JIThelp.asm
+;
+; ***********************************************************************
+;
+; *** NOTE: If you make changes to this file, propagate the changes to
+; jithelp.s in this directory
+
+; This contains JITinterface routines that are 100% x86 assembly
+
+ .586
+ .model flat
+
+ include asmconstants.inc
+
+ option casemap:none
+ .code
+;
+; <TODO>@TODO Switch to g_ephemeral_low and g_ephemeral_high
+; @TODO instead of g_lowest_address, g_highest address</TODO>
+;
+
+ARGUMENT_REG1 equ ecx
+ARGUMENT_REG2 equ edx
+g_ephemeral_low TEXTEQU <_g_ephemeral_low>
+g_ephemeral_high TEXTEQU <_g_ephemeral_high>
+g_lowest_address TEXTEQU <_g_lowest_address>
+g_highest_address TEXTEQU <_g_highest_address>
+g_card_table TEXTEQU <_g_card_table>
+WriteBarrierAssert TEXTEQU <_WriteBarrierAssert@8>
+JIT_LLsh TEXTEQU <_JIT_LLsh@0>
+JIT_LRsh TEXTEQU <_JIT_LRsh@0>
+JIT_LRsz TEXTEQU <_JIT_LRsz@0>
+JIT_LMul TEXTEQU <@JIT_LMul@16>
+JIT_Dbl2LngOvf TEXTEQU <@JIT_Dbl2LngOvf@8>
+JIT_Dbl2Lng TEXTEQU <@JIT_Dbl2Lng@8>
+JIT_Dbl2IntSSE2 TEXTEQU <@JIT_Dbl2IntSSE2@8>
+JIT_Dbl2LngP4x87 TEXTEQU <@JIT_Dbl2LngP4x87@8>
+JIT_Dbl2LngSSE3 TEXTEQU <@JIT_Dbl2LngSSE3@8>
+JIT_InternalThrowFromHelper TEXTEQU <@JIT_InternalThrowFromHelper@4>
+JIT_WriteBarrierReg_PreGrow TEXTEQU <_JIT_WriteBarrierReg_PreGrow@0>
+JIT_WriteBarrierReg_PostGrow TEXTEQU <_JIT_WriteBarrierReg_PostGrow@0>
+JIT_TailCall TEXTEQU <_JIT_TailCall@0>
+JIT_TailCallLeave TEXTEQU <_JIT_TailCallLeave@0>
+JIT_TailCallVSDLeave TEXTEQU <_JIT_TailCallVSDLeave@0>
+JIT_TailCallHelper TEXTEQU <_JIT_TailCallHelper@4>
+JIT_TailCallReturnFromVSD TEXTEQU <_JIT_TailCallReturnFromVSD@0>
+
+EXTERN g_ephemeral_low:DWORD
+EXTERN g_ephemeral_high:DWORD
+EXTERN g_lowest_address:DWORD
+EXTERN g_highest_address:DWORD
+EXTERN g_card_table:DWORD
+ifdef _DEBUG
+EXTERN WriteBarrierAssert:PROC
+endif ; _DEBUG
+EXTERN JIT_InternalThrowFromHelper:PROC
+ifdef FEATURE_HIJACK
+EXTERN JIT_TailCallHelper:PROC
+endif
+EXTERN _g_TailCallFrameVptr:DWORD
+EXTERN @JIT_FailFast@0:PROC
+EXTERN _s_gsCookie:DWORD
+EXTERN @JITutil_IsInstanceOfInterface@8:PROC
+EXTERN @JITutil_ChkCastInterface@8:PROC
+EXTERN @JITutil_IsInstanceOfAny@8:PROC
+EXTERN @JITutil_ChkCastAny@8:PROC
+ifdef FEATURE_IMPLICIT_TLS
+EXTERN _GetThread@0:PROC
+endif
+
+ifdef WRITE_BARRIER_CHECK
+; Those global variables are always defined, but should be 0 for Server GC
+g_GCShadow TEXTEQU <?g_GCShadow@@3PAEA>
+g_GCShadowEnd TEXTEQU <?g_GCShadowEnd@@3PAEA>
+EXTERN g_GCShadow:DWORD
+EXTERN g_GCShadowEnd:DWORD
+INVALIDGCVALUE equ 0CCCCCCCDh
+endif
+
+ifdef FEATURE_REMOTING
+EXTERN _TransparentProxyStub_CrossContext@0:PROC
+EXTERN _InContextTPQuickDispatchAsmStub@0:PROC
+endif
+
+.686P
+.XMM
+; The following macro is needed because of a MASM issue with the
+; movsd mnemonic
+;
+$movsd MACRO op1, op2
+ LOCAL begin_movsd, end_movsd
+begin_movsd:
+ movupd op1, op2
+end_movsd:
+ org begin_movsd
+ db 0F2h
+ org end_movsd
+ENDM
+.586
+
+; The following macro is used to match the JITs
+; multi-byte NOP sequence
+$nop3 MACRO
+ db 090h
+ db 090h
+ db 090h
+ENDM
+
+
+
+;***
+;JIT_WriteBarrier* - GC write barrier helper
+;
+;Purpose:
+; Helper calls in order to assign an object to a field
+; Enables book-keeping of the GC.
+;
+;Entry:
+; EDX - address of ref-field (assigned to)
+; the resp. other reg - RHS of assignment
+;
+;Exit:
+;
+;Uses:
+; EDX is destroyed.
+;
+;Exceptions:
+;
+;*******************************************************************************
+
+; The code here is tightly coupled with AdjustContextForWriteBarrier, if you change
+; anything here, you might need to change AdjustContextForWriteBarrier as well
+WriteBarrierHelper MACRO rg
+ ALIGN 4
+
+ ;; The entry point is the fully 'safe' one in which we check if EDX (the REF
+ ;; begin updated) is actually in the GC heap
+
+PUBLIC _JIT_CheckedWriteBarrier&rg&@0
+_JIT_CheckedWriteBarrier&rg&@0 PROC
+ ;; check in the REF being updated is in the GC heap
+ cmp edx, g_lowest_address
+ jb WriteBarrier_NotInHeap_&rg
+ cmp edx, g_highest_address
+ jae WriteBarrier_NotInHeap_&rg
+
+ ;; fall through to unchecked routine
+ ;; note that its entry point also happens to be aligned
+
+ifdef WRITE_BARRIER_CHECK
+ ;; This entry point is used when you know the REF pointer being updated
+ ;; is in the GC heap
+PUBLIC _JIT_DebugWriteBarrier&rg&@0
+_JIT_DebugWriteBarrier&rg&@0:
+endif
+
+ifdef _DEBUG
+ push edx
+ push ecx
+ push eax
+
+ push rg
+ push edx
+ call WriteBarrierAssert
+
+ pop eax
+ pop ecx
+ pop edx
+endif ;_DEBUG
+
+ ; in the !WRITE_BARRIER_CHECK case this will be the move for all
+ ; addresses in the GCHeap, addresses outside the GCHeap will get
+ ; taken care of below at WriteBarrier_NotInHeap_&rg
+
+ifndef WRITE_BARRIER_CHECK
+ mov DWORD PTR [edx], rg
+endif
+
+ifdef WRITE_BARRIER_CHECK
+ ; Test dest here so if it is bad AV would happen before we change register/stack
+ ; status. This makes job of AdjustContextForWriteBarrier easier.
+ cmp [edx], 0
+ ;; ALSO update the shadow GC heap if that is enabled
+ ; Make ebp into the temporary src register. We need to do this so that we can use ecx
+ ; in the calculation of the shadow GC address, but still have access to the src register
+ push ecx
+ push ebp
+ mov ebp, rg
+
+ ; if g_GCShadow is 0, don't perform the check
+ cmp g_GCShadow, 0
+ je WriteBarrier_NoShadow_&rg
+
+ mov ecx, edx
+ sub ecx, g_lowest_address ; U/V
+ jb WriteBarrier_NoShadow_&rg
+ add ecx, [g_GCShadow]
+ cmp ecx, [g_GCShadowEnd]
+ ja WriteBarrier_NoShadow_&rg
+
+ ; TODO: In Orcas timeframe if we move to P4+ only on X86 we should enable
+ ; mfence barriers on either side of these two writes to make sure that
+ ; they stay as close together as possible
+
+ ; edx contains address in GC
+ ; ecx contains address in ShadowGC
+ ; ebp temporarially becomes the src register
+
+ ;; When we're writing to the shadow GC heap we want to be careful to minimize
+ ;; the risk of a race that can occur here where the GC and ShadowGC don't match
+ mov DWORD PTR [edx], ebp
+ mov DWORD PTR [ecx], ebp
+
+ ;; We need a scratch register to verify the shadow heap. We also need to
+ ;; construct a memory barrier so that the write to the shadow heap happens
+ ;; before the read from the GC heap. We can do both by using SUB/XCHG
+ ;; rather than PUSH.
+ ;;
+ ;; TODO: Should be changed to a push if the mfence described above is added.
+ ;;
+ sub esp, 4
+ xchg [esp], eax
+
+ ;; As part of our race avoidance (see above) we will now check whether the values
+ ;; in the GC and ShadowGC match. There is a possibility that we're wrong here but
+ ;; being overaggressive means we might mask a case where someone updates GC refs
+ ;; without going to a write barrier, but by its nature it will be indeterminant
+ ;; and we will find real bugs whereas the current implementation is indeterminant
+ ;; but only leads to investigations that find that this code is fundamentally flawed
+ mov eax, [edx]
+ cmp [ecx], eax
+ je WriteBarrier_CleanupShadowCheck_&rg
+ mov [ecx], INVALIDGCVALUE
+
+WriteBarrier_CleanupShadowCheck_&rg:
+ pop eax
+
+ jmp WriteBarrier_ShadowCheckEnd_&rg
+
+WriteBarrier_NoShadow_&rg:
+ ; If we come here then we haven't written the value to the GC and need to.
+ ; ebp contains rg
+ ; We restore ebp/ecx immediately after this, and if either of them is the src
+ ; register it will regain its value as the src register.
+ mov DWORD PTR [edx], ebp
+WriteBarrier_ShadowCheckEnd_&rg:
+ pop ebp
+ pop ecx
+endif
+ cmp rg, g_ephemeral_low
+ jb WriteBarrier_NotInEphemeral_&rg
+ cmp rg, g_ephemeral_high
+ jae WriteBarrier_NotInEphemeral_&rg
+
+ shr edx, 10
+ add edx, [g_card_table]
+ cmp BYTE PTR [edx], 0FFh
+ jne WriteBarrier_UpdateCardTable_&rg
+ ret
+
+WriteBarrier_UpdateCardTable_&rg:
+ mov BYTE PTR [edx], 0FFh
+ ret
+
+WriteBarrier_NotInHeap_&rg:
+ ; If it wasn't in the heap then we haven't updated the dst in memory yet
+ mov DWORD PTR [edx], rg
+WriteBarrier_NotInEphemeral_&rg:
+ ; If it is in the GC Heap but isn't in the ephemeral range we've already
+ ; updated the Heap with the Object*.
+ ret
+_JIT_CheckedWriteBarrier&rg&@0 ENDP
+
+ENDM
+
+
+;***
+;JIT_ByRefWriteBarrier* - GC write barrier helper
+;
+;Purpose:
+; Helper calls in order to assign an object to a byref field
+; Enables book-keeping of the GC.
+;
+;Entry:
+; EDI - address of ref-field (assigned to)
+; ESI - address of the data (source)
+; ECX can be trashed
+;
+;Exit:
+;
+;Uses:
+; EDI and ESI are incremented by a DWORD
+;
+;Exceptions:
+;
+;*******************************************************************************
+
+; The code here is tightly coupled with AdjustContextForWriteBarrier, if you change
+; anything here, you might need to change AdjustContextForWriteBarrier as well
+
+ByRefWriteBarrierHelper MACRO
+ ALIGN 4
+PUBLIC _JIT_ByRefWriteBarrier@0
+_JIT_ByRefWriteBarrier@0 PROC
+ ;;test for dest in range
+ mov ecx, [esi]
+ cmp edi, g_lowest_address
+ jb ByRefWriteBarrier_NotInHeap
+ cmp edi, g_highest_address
+ jae ByRefWriteBarrier_NotInHeap
+
+ifndef WRITE_BARRIER_CHECK
+ ;;write barrier
+ mov [edi],ecx
+endif
+
+ifdef WRITE_BARRIER_CHECK
+ ; Test dest here so if it is bad AV would happen before we change register/stack
+ ; status. This makes job of AdjustContextForWriteBarrier easier.
+ cmp [edi], 0
+
+ ;; ALSO update the shadow GC heap if that is enabled
+
+ ; use edx for address in GC Shadow,
+ push edx
+
+ ;if g_GCShadow is 0, don't do the update
+ cmp g_GCShadow, 0
+ je ByRefWriteBarrier_NoShadow
+
+ mov edx, edi
+ sub edx, g_lowest_address ; U/V
+ jb ByRefWriteBarrier_NoShadow
+ add edx, [g_GCShadow]
+ cmp edx, [g_GCShadowEnd]
+ ja ByRefWriteBarrier_NoShadow
+
+ ; TODO: In Orcas timeframe if we move to P4+ only on X86 we should enable
+ ; mfence barriers on either side of these two writes to make sure that
+ ; they stay as close together as possible
+
+ ; edi contains address in GC
+ ; edx contains address in ShadowGC
+ ; ecx is the value to assign
+
+ ;; When we're writing to the shadow GC heap we want to be careful to minimize
+ ;; the risk of a race that can occur here where the GC and ShadowGC don't match
+ mov DWORD PTR [edi], ecx
+ mov DWORD PTR [edx], ecx
+
+ ;; We need a scratch register to verify the shadow heap. We also need to
+ ;; construct a memory barrier so that the write to the shadow heap happens
+ ;; before the read from the GC heap. We can do both by using SUB/XCHG
+ ;; rather than PUSH.
+ ;;
+ ;; TODO: Should be changed to a push if the mfence described above is added.
+ ;;
+ sub esp, 4
+ xchg [esp], eax
+
+ ;; As part of our race avoidance (see above) we will now check whether the values
+ ;; in the GC and ShadowGC match. There is a possibility that we're wrong here but
+ ;; being overaggressive means we might mask a case where someone updates GC refs
+ ;; without going to a write barrier, but by its nature it will be indeterminant
+ ;; and we will find real bugs whereas the current implementation is indeterminant
+ ;; but only leads to investigations that find that this code is fundamentally flawed
+
+ mov eax, [edi]
+ cmp [edx], eax
+ je ByRefWriteBarrier_CleanupShadowCheck
+ mov [edx], INVALIDGCVALUE
+ByRefWriteBarrier_CleanupShadowCheck:
+ pop eax
+ jmp ByRefWriteBarrier_ShadowCheckEnd
+
+ByRefWriteBarrier_NoShadow:
+ ; If we come here then we haven't written the value to the GC and need to.
+ mov DWORD PTR [edi], ecx
+
+ByRefWriteBarrier_ShadowCheckEnd:
+ pop edx
+endif
+ ;;test for *src in ephemeral segement
+ cmp ecx, g_ephemeral_low
+ jb ByRefWriteBarrier_NotInEphemeral
+ cmp ecx, g_ephemeral_high
+ jae ByRefWriteBarrier_NotInEphemeral
+
+ mov ecx, edi
+ add esi,4
+ add edi,4
+
+ shr ecx, 10
+ add ecx, [g_card_table]
+ cmp byte ptr [ecx], 0FFh
+ jne ByRefWriteBarrier_UpdateCardTable
+ ret
+ByRefWriteBarrier_UpdateCardTable:
+ mov byte ptr [ecx], 0FFh
+ ret
+
+ByRefWriteBarrier_NotInHeap:
+ ; If it wasn't in the heap then we haven't updated the dst in memory yet
+ mov [edi],ecx
+ByRefWriteBarrier_NotInEphemeral:
+ ; If it is in the GC Heap but isn't in the ephemeral range we've already
+ ; updated the Heap with the Object*.
+ add esi,4
+ add edi,4
+ ret
+_JIT_ByRefWriteBarrier@0 ENDP
+ENDM
+
+;*******************************************************************************
+; Write barrier wrappers with fcall calling convention
+;
+UniversalWriteBarrierHelper MACRO name
+ ALIGN 4
+PUBLIC @JIT_&name&@8
+@JIT_&name&@8 PROC
+ mov eax,edx
+ mov edx,ecx
+ jmp _JIT_&name&EAX@0
+@JIT_&name&@8 ENDP
+ENDM
+
+; WriteBarrierStart and WriteBarrierEnd are used to determine bounds of
+; WriteBarrier functions so can determine if got AV in them.
+;
+PUBLIC _JIT_WriteBarrierStart@0
+_JIT_WriteBarrierStart@0 PROC
+ret
+_JIT_WriteBarrierStart@0 ENDP
+
+ifdef FEATURE_USE_ASM_GC_WRITE_BARRIERS
+; Only define these if we're using the ASM GC write barriers; if this flag is not defined,
+; we'll use C++ versions of these write barriers.
+UniversalWriteBarrierHelper <CheckedWriteBarrier>
+UniversalWriteBarrierHelper <WriteBarrier>
+endif
+
+WriteBarrierHelper <EAX>
+WriteBarrierHelper <EBX>
+WriteBarrierHelper <ECX>
+WriteBarrierHelper <ESI>
+WriteBarrierHelper <EDI>
+WriteBarrierHelper <EBP>
+
+ByRefWriteBarrierHelper
+
+PUBLIC _JIT_WriteBarrierLast@0
+_JIT_WriteBarrierLast@0 PROC
+ret
+_JIT_WriteBarrierLast@0 ENDP
+
+; This is the first function outside the "keep together range". Used by BBT scripts.
+PUBLIC _JIT_WriteBarrierEnd@0
+_JIT_WriteBarrierEnd@0 PROC
+ret
+_JIT_WriteBarrierEnd@0 ENDP
+
+;*********************************************************************/
+; In cases where we support it we have an optimized GC Poll callback. Normall (when we're not trying to
+; suspend for GC, the CORINFO_HELP_POLL_GC helper points to this nop routine. When we're ready to suspend
+; for GC, we whack the Jit Helper table entry to point to the real helper. When we're done with GC we
+; whack it back.
+PUBLIC @JIT_PollGC_Nop@0
+@JIT_PollGC_Nop@0 PROC
+ret
+@JIT_PollGC_Nop@0 ENDP
+
+;*********************************************************************/
+;llshl - long shift left
+;
+;Purpose:
+; Does a Long Shift Left (signed and unsigned are identical)
+; Shifts a long left any number of bits.
+;
+; NOTE: This routine has been adapted from the Microsoft CRTs.
+;
+;Entry:
+; EDX:EAX - long value to be shifted
+; ECX - number of bits to shift by
+;
+;Exit:
+; EDX:EAX - shifted value
+;
+ ALIGN 16
+PUBLIC JIT_LLsh
+JIT_LLsh PROC
+; Handle shifts of between bits 0 and 31
+ cmp ecx, 32
+ jae short LLshMORE32
+ shld edx,eax,cl
+ shl eax,cl
+ ret
+; Handle shifts of between bits 32 and 63
+LLshMORE32:
+ ; The x86 shift instructions only use the lower 5 bits.
+ mov edx,eax
+ xor eax,eax
+ shl edx,cl
+ ret
+JIT_LLsh ENDP
+
+
+;*********************************************************************/
+;LRsh - long shift right
+;
+;Purpose:
+; Does a signed Long Shift Right
+; Shifts a long right any number of bits.
+;
+; NOTE: This routine has been adapted from the Microsoft CRTs.
+;
+;Entry:
+; EDX:EAX - long value to be shifted
+; ECX - number of bits to shift by
+;
+;Exit:
+; EDX:EAX - shifted value
+;
+ ALIGN 16
+PUBLIC JIT_LRsh
+JIT_LRsh PROC
+; Handle shifts of between bits 0 and 31
+ cmp ecx, 32
+ jae short LRshMORE32
+ shrd eax,edx,cl
+ sar edx,cl
+ ret
+; Handle shifts of between bits 32 and 63
+LRshMORE32:
+ ; The x86 shift instructions only use the lower 5 bits.
+ mov eax,edx
+ sar edx, 31
+ sar eax,cl
+ ret
+JIT_LRsh ENDP
+
+
+;*********************************************************************/
+; LRsz:
+;Purpose:
+; Does a unsigned Long Shift Right
+; Shifts a long right any number of bits.
+;
+; NOTE: This routine has been adapted from the Microsoft CRTs.
+;
+;Entry:
+; EDX:EAX - long value to be shifted
+; ECX - number of bits to shift by
+;
+;Exit:
+; EDX:EAX - shifted value
+;
+ ALIGN 16
+PUBLIC JIT_LRsz
+JIT_LRsz PROC
+; Handle shifts of between bits 0 and 31
+ cmp ecx, 32
+ jae short LRszMORE32
+ shrd eax,edx,cl
+ shr edx,cl
+ ret
+; Handle shifts of between bits 32 and 63
+LRszMORE32:
+ ; The x86 shift instructions only use the lower 5 bits.
+ mov eax,edx
+ xor edx,edx
+ shr eax,cl
+ ret
+JIT_LRsz ENDP
+
+;*********************************************************************/
+; LMul:
+;Purpose:
+; Does a long multiply (same for signed/unsigned)
+;
+; NOTE: This routine has been adapted from the Microsoft CRTs.
+;
+;Entry:
+; Parameters are passed on the stack:
+; 1st pushed: multiplier (QWORD)
+; 2nd pushed: multiplicand (QWORD)
+;
+;Exit:
+; EDX:EAX - product of multiplier and multiplicand
+;
+ ALIGN 16
+PUBLIC JIT_LMul
+JIT_LMul PROC
+
+; AHI, BHI : upper 32 bits of A and B
+; ALO, BLO : lower 32 bits of A and B
+;
+; ALO * BLO
+; ALO * BHI
+; + BLO * AHI
+; ---------------------
+
+ mov eax,[esp + 8] ; AHI
+ mov ecx,[esp + 16] ; BHI
+ or ecx,eax ;test for both hiwords zero.
+ mov ecx,[esp + 12] ; BLO
+ jnz LMul_hard ;both are zero, just mult ALO and BLO
+
+ mov eax,[esp + 4]
+ mul ecx
+
+ ret 16 ; callee restores the stack
+
+LMul_hard:
+ push ebx
+
+ mul ecx ;eax has AHI, ecx has BLO, so AHI * BLO
+ mov ebx,eax ;save result
+
+ mov eax,[esp + 8] ; ALO
+ mul dword ptr [esp + 20] ;ALO * BHI
+ add ebx,eax ;ebx = ((ALO * BHI) + (AHI * BLO))
+
+ mov eax,[esp + 8] ; ALO ;ecx = BLO
+ mul ecx ;so edx:eax = ALO*BLO
+ add edx,ebx ;now edx has all the LO*HI stuff
+
+ pop ebx
+
+ ret 16 ; callee restores the stack
+
+JIT_LMul ENDP
+
+;*********************************************************************/
+; JIT_Dbl2LngOvf
+
+;Purpose:
+; converts a double to a long truncating toward zero (C semantics)
+; with check for overflow
+;
+; uses stdcall calling conventions
+;
+PUBLIC JIT_Dbl2LngOvf
+JIT_Dbl2LngOvf PROC
+ fnclex
+ fld qword ptr [esp+4]
+ push ecx
+ push ecx
+ fstp qword ptr [esp]
+ call JIT_Dbl2Lng
+ mov ecx,eax
+ fnstsw ax
+ test ax,01h
+ jnz Dbl2LngOvf_throw
+ mov eax,ecx
+ ret 8
+
+Dbl2LngOvf_throw:
+ mov ECX, CORINFO_OverflowException_ASM
+ call JIT_InternalThrowFromHelper
+ ret 8
+JIT_Dbl2LngOvf ENDP
+
+;*********************************************************************/
+; JIT_Dbl2Lng
+
+;Purpose:
+; converts a double to a long truncating toward zero (C semantics)
+;
+; uses stdcall calling conventions
+;
+; note that changing the rounding mode is very expensive. This
+; routine basiclly does the truncation sematics without changing
+; the rounding mode, resulting in a win.
+;
+PUBLIC JIT_Dbl2Lng
+JIT_Dbl2Lng PROC
+ fld qword ptr[ESP+4] ; fetch arg
+ lea ecx,[esp-8]
+ sub esp,16 ; allocate frame
+ and ecx,-8 ; align pointer on boundary of 8
+ fld st(0) ; duplciate top of stack
+ fistp qword ptr[ecx] ; leave arg on stack, also save in temp
+ fild qword ptr[ecx] ; arg, round(arg) now on stack
+ mov edx,[ecx+4] ; high dword of integer
+ mov eax,[ecx] ; low dword of integer
+ test eax,eax
+ je integer_QNaN_or_zero
+
+arg_is_not_integer_QNaN:
+ fsubp st(1),st ; TOS=d-round(d),
+ ; { st(1)=st(1)-st & pop ST }
+ test edx,edx ; what's sign of integer
+ jns positive
+ ; number is negative
+ ; dead cycle
+ ; dead cycle
+ fstp dword ptr[ecx] ; result of subtraction
+ mov ecx,[ecx] ; dword of difference(single precision)
+ add esp,16
+ xor ecx,80000000h
+ add ecx,7fffffffh ; if difference>0 then increment integer
+ adc eax,0 ; inc eax (add CARRY flag)
+ adc edx,0 ; propagate carry flag to upper bits
+ ret 8
+
+positive:
+ fstp dword ptr[ecx] ;17-18 ; result of subtraction
+ mov ecx,[ecx] ; dword of difference (single precision)
+ add esp,16
+ add ecx,7fffffffh ; if difference<0 then decrement integer
+ sbb eax,0 ; dec eax (subtract CARRY flag)
+ sbb edx,0 ; propagate carry flag to upper bits
+ ret 8
+
+integer_QNaN_or_zero:
+ test edx,7fffffffh
+ jnz arg_is_not_integer_QNaN
+ fstp st(0) ;; pop round(arg)
+ fstp st(0) ;; arg
+ add esp,16
+ ret 8
+JIT_Dbl2Lng ENDP
+
+;*********************************************************************/
+; JIT_Dbl2LngP4x87
+
+;Purpose:
+; converts a double to a long truncating toward zero (C semantics)
+;
+; uses stdcall calling conventions
+;
+; This code is faster on a P4 than the Dbl2Lng code above, but is
+; slower on a PIII. Hence we choose this code when on a P4 or above.
+;
+PUBLIC JIT_Dbl2LngP4x87
+JIT_Dbl2LngP4x87 PROC
+arg1 equ <[esp+0Ch]>
+
+ sub esp, 8 ; get some local space
+
+ fld qword ptr arg1 ; fetch arg
+ fnstcw word ptr arg1 ; store FPCW
+ movzx eax, word ptr arg1 ; zero extend - wide
+ or ah, 0Ch ; turn on OE and DE flags
+ mov dword ptr [esp], eax ; store new FPCW bits
+ fldcw word ptr [esp] ; reload FPCW with new bits
+ fistp qword ptr [esp] ; convert
+ mov eax, dword ptr [esp] ; reload FP result
+ mov edx, dword ptr [esp+4] ;
+ fldcw word ptr arg1 ; reload original FPCW value
+
+ add esp, 8 ; restore stack
+
+ ret 8
+JIT_Dbl2LngP4x87 ENDP
+
+;*********************************************************************/
+; JIT_Dbl2LngSSE3
+
+;Purpose:
+; converts a double to a long truncating toward zero (C semantics)
+;
+; uses stdcall calling conventions
+;
+; This code is faster than the above P4 x87 code for Intel processors
+; equal or later than Core2 and Atom that have SSE3 support
+;
+.686P
+.XMM
+PUBLIC JIT_Dbl2LngSSE3
+JIT_Dbl2LngSSE3 PROC
+arg1 equ <[esp+0Ch]>
+
+ sub esp, 8 ; get some local space
+
+ fld qword ptr arg1 ; fetch arg
+ fisttp qword ptr [esp] ; convert
+ mov eax, dword ptr [esp] ; reload FP result
+ mov edx, dword ptr [esp+4]
+
+ add esp, 8 ; restore stack
+
+ ret 8
+JIT_Dbl2LngSSE3 ENDP
+.586
+
+;*********************************************************************/
+; JIT_Dbl2IntSSE2
+
+;Purpose:
+; converts a double to a long truncating toward zero (C semantics)
+;
+; uses stdcall calling conventions
+;
+; This code is even faster than the P4 x87 code for Dbl2LongP4x87,
+; but only returns a 32 bit value (only good for int).
+;
+.686P
+.XMM
+PUBLIC JIT_Dbl2IntSSE2
+JIT_Dbl2IntSSE2 PROC
+ $movsd xmm0, [esp+4]
+ cvttsd2si eax, xmm0
+ ret 8
+JIT_Dbl2IntSSE2 ENDP
+.586
+
+
+;*********************************************************************/
+; This is the small write barrier thunk we use when we know the
+; ephemeral generation is higher in memory than older generations.
+; The 0x0F0F0F0F values are bashed by the two functions above.
+; This the generic version - wherever the code says ECX,
+; the specific register is patched later into a copy
+; Note: do not replace ECX by EAX - there is a smaller encoding for
+; the compares just for EAX, which won't work for other registers.
+;
+; READ THIS!!!!!!
+; it is imperative that the addresses of of the values that we overwrite
+; (card table, ephemeral region ranges, etc) are naturally aligned since
+; there are codepaths that will overwrite these values while the EE is running.
+;
+PUBLIC JIT_WriteBarrierReg_PreGrow
+JIT_WriteBarrierReg_PreGrow PROC
+ mov DWORD PTR [edx], ecx
+ cmp ecx, 0F0F0F0F0h
+ jb NoWriteBarrierPre
+
+ shr edx, 10
+ nop ; padding for alignment of constant
+ cmp byte ptr [edx+0F0F0F0F0h], 0FFh
+ jne WriteBarrierPre
+NoWriteBarrierPre:
+ ret
+ nop ; padding for alignment of constant
+ nop ; padding for alignment of constant
+WriteBarrierPre:
+ mov byte ptr [edx+0F0F0F0F0h], 0FFh
+ ret
+JIT_WriteBarrierReg_PreGrow ENDP
+
+;*********************************************************************/
+; This is the larger write barrier thunk we use when we know that older
+; generations may be higher in memory than the ephemeral generation
+; The 0x0F0F0F0F values are bashed by the two functions above.
+; This the generic version - wherever the code says ECX,
+; the specific register is patched later into a copy
+; Note: do not replace ECX by EAX - there is a smaller encoding for
+; the compares just for EAX, which won't work for other registers.
+; NOTE: we need this aligned for our validation to work properly
+ ALIGN 4
+PUBLIC JIT_WriteBarrierReg_PostGrow
+JIT_WriteBarrierReg_PostGrow PROC
+ mov DWORD PTR [edx], ecx
+ cmp ecx, 0F0F0F0F0h
+ jb NoWriteBarrierPost
+ cmp ecx, 0F0F0F0F0h
+ jae NoWriteBarrierPost
+
+ shr edx, 10
+ nop ; padding for alignment of constant
+ cmp byte ptr [edx+0F0F0F0F0h], 0FFh
+ jne WriteBarrierPost
+NoWriteBarrierPost:
+ ret
+ nop ; padding for alignment of constant
+ nop ; padding for alignment of constant
+WriteBarrierPost:
+ mov byte ptr [edx+0F0F0F0F0h], 0FFh
+ ret
+JIT_WriteBarrierReg_PostGrow ENDP
+
+;*********************************************************************/
+;
+
+ ; a fake virtual stub dispatch register indirect callsite
+ $nop3
+ call dword ptr [eax]
+
+
+PUBLIC JIT_TailCallReturnFromVSD
+JIT_TailCallReturnFromVSD:
+ifdef _DEBUG
+ nop ; blessed callsite
+endif
+ call VSDHelperLabel ; keep call-ret count balanced.
+VSDHelperLabel:
+
+; Stack at this point :
+; ...
+; m_ReturnAddress
+; m_regs
+; m_CallerAddress
+; m_pThread
+; vtbl
+; GSCookie
+; &VSDHelperLabel
+OffsetOfTailCallFrame = 8
+
+; ebx = pThread
+
+ifdef _DEBUG
+ mov esi, _s_gsCookie ; GetProcessGSCookie()
+ cmp dword ptr [esp+OffsetOfTailCallFrame-SIZEOF_GSCookie], esi
+ je TailCallFrameGSCookieIsValid
+ call @JIT_FailFast@0
+ TailCallFrameGSCookieIsValid:
+endif
+ ; remove the padding frame from the chain
+ mov esi, dword ptr [esp+OffsetOfTailCallFrame+4] ; esi = TailCallFrame::m_Next
+ mov dword ptr [ebx + Thread_m_pFrame], esi
+
+ ; skip the frame
+ add esp, 20 ; &VSDHelperLabel, GSCookie, vtbl, m_Next, m_CallerAddress
+
+ pop edi ; restore callee saved registers
+ pop esi
+ pop ebx
+ pop ebp
+
+ ret ; return to m_ReturnAddress
+
+;------------------------------------------------------------------------------
+;
+
+PUBLIC JIT_TailCall
+JIT_TailCall PROC
+
+; the stack layout at this point is:
+;
+; ebp+8+4*nOldStackArgs <- end of argument destination
+; ... ...
+; ebp+8+ old args (size is nOldStackArgs)
+; ... ...
+; ebp+8 <- start of argument destination
+; ebp+4 ret addr
+; ebp+0 saved ebp
+; ebp-c saved ebx, esi, edi (if have callee saved regs = 1)
+;
+; other stuff (local vars) in the jitted callers' frame
+;
+; esp+20+4*nNewStackArgs <- end of argument source
+; ... ...
+; esp+20+ new args (size is nNewStackArgs) to be passed to the target of the tail-call
+; ... ...
+; esp+20 <- start of argument source
+; esp+16 nOldStackArgs
+; esp+12 nNewStackArgs
+; esp+8 flags (1 = have callee saved regs, 2 = virtual stub dispatch)
+; esp+4 target addr
+; esp+0 retaddr
+;
+; If you change this function, make sure you update code:TailCallStubManager as well.
+
+RetAddr equ 0
+TargetAddr equ 4
+nNewStackArgs equ 12
+nOldStackArgs equ 16
+NewArgs equ 20
+
+; extra space is incremented as we push things on the stack along the way
+ExtraSpace = 0
+
+ call _GetThread@0; eax = Thread*
+ push eax ; Thread*
+
+ ; save ArgumentRegisters
+ push ecx
+ push edx
+
+ExtraSpace = 12 ; pThread, ecx, edx
+
+ifdef FEATURE_HIJACK
+ ; Make sure that the EE does have the return address patched. So we can move it around.
+ test dword ptr [eax+Thread_m_State], TS_Hijacked_ASM
+ jz NoHijack
+
+ ; JIT_TailCallHelper(Thread *)
+ push eax
+ call JIT_TailCallHelper ; this is __stdcall
+
+NoHijack:
+endif
+
+ mov edx, dword ptr [esp+ExtraSpace+JIT_TailCall_StackOffsetToFlags] ; edx = flags
+
+ mov eax, dword ptr [esp+ExtraSpace+nOldStackArgs] ; eax = nOldStackArgs
+ mov ecx, dword ptr [esp+ExtraSpace+nNewStackArgs] ; ecx = nNewStackArgs
+
+ ; restore callee saved registers
+ ; <TODO>@TODO : esp based - doesnt work with localloc</TODO>
+ test edx, 1
+ jz NoCalleeSaveRegisters
+
+ mov edi, dword ptr [ebp-4] ; restore edi
+ mov esi, dword ptr [ebp-8] ; restore esi
+ mov ebx, dword ptr [ebp-12] ; restore ebx
+
+NoCalleeSaveRegisters:
+
+ push dword ptr [ebp+4] ; save the original return address for later
+ push edi
+ push esi
+
+ExtraSpace = 24 ; pThread, ecx, edx, orig retaddr, edi, esi
+CallersEsi = 0
+CallersEdi = 4
+OrigRetAddr = 8
+pThread = 20
+
+ lea edi, [ebp+8+4*eax] ; edi = the end of argument destination
+ lea esi, [esp+ExtraSpace+NewArgs+4*ecx] ; esi = the end of argument source
+
+ mov ebp, dword ptr [ebp] ; restore ebp (do not use ebp as scratch register to get a good stack trace in debugger)
+
+ test edx, 2
+ jnz VSDTailCall
+
+ ; copy the arguments to the final destination
+ test ecx, ecx
+ jz ArgumentsCopied
+ArgumentCopyLoop:
+ ; At this point, this is the value of the registers :
+ ; edi = end of argument dest
+ ; esi = end of argument source
+ ; ecx = nNewStackArgs
+ mov eax, dword ptr [esi-4]
+ sub edi, 4
+ sub esi, 4
+ mov dword ptr [edi], eax
+ dec ecx
+ jnz ArgumentCopyLoop
+ArgumentsCopied:
+
+ ; edi = the start of argument destination
+
+ mov eax, dword ptr [esp+4+4] ; return address
+ mov ecx, dword ptr [esp+ExtraSpace+TargetAddr] ; target address
+
+ mov dword ptr [edi-4], eax ; return address
+ mov dword ptr [edi-8], ecx ; target address
+
+ lea eax, [edi-8] ; new value for esp
+
+ pop esi
+ pop edi
+ pop ecx ; skip original return address
+ pop edx
+ pop ecx
+
+ mov esp, eax
+
+PUBLIC JIT_TailCallLeave ; add a label here so that TailCallStubManager can access it
+JIT_TailCallLeave:
+ retn ; Will branch to targetAddr. This matches the
+ ; "call" done by JITted code, keeping the
+ ; call-ret count balanced.
+
+ ;----------------------------------------------------------------------
+VSDTailCall:
+ ;----------------------------------------------------------------------
+
+ ; For the Virtual Stub Dispatch, we create a fake callsite to fool
+ ; the callsite probes. In order to create the call site, we need to insert TailCallFrame
+ ; if we do not have one already.
+ ;
+ ; ecx = nNewStackArgs
+ ; esi = the end of argument source
+ ; edi = the end of argument destination
+ ;
+ ; The stub has pushed the following onto the stack at this point :
+ ; pThread, ecx, edx, orig retaddr, edi, esi
+
+
+ cmp dword ptr [esp+OrigRetAddr], JIT_TailCallReturnFromVSD
+ jz VSDTailCallFrameInserted_DoSlideUpArgs ; There is an exiting TailCallFrame that can be reused
+
+ ; try to allocate space for the frame / check whether there is enough space
+ ; If there is sufficient space, we will setup the frame and then slide
+ ; the arguments up the stack. Else, we first need to slide the arguments
+ ; down the stack to make space for the TailCallFrame
+ sub edi, (SIZEOF_GSCookie + SIZEOF_TailCallFrame)
+ cmp edi, esi
+ jae VSDSpaceForFrameChecked
+
+ ; There is not sufficient space to wedge in the TailCallFrame without
+ ; overwriting the new arguments.
+ ; We need to allocate the extra space on the stack,
+ ; and slide down the new arguments
+
+ mov eax, esi
+ sub eax, edi
+ sub esp, eax
+
+ mov eax, ecx ; to subtract the size of arguments
+ mov edx, ecx ; for counter
+
+ neg eax
+
+ ; copy down the arguments to the final destination, need to copy all temporary storage as well
+ add edx, (ExtraSpace+NewArgs)/4
+
+ lea esi, [esi+4*eax-(ExtraSpace+NewArgs)]
+ lea edi, [edi+4*eax-(ExtraSpace+NewArgs)]
+
+VSDAllocFrameCopyLoop:
+ mov eax, dword ptr [esi]
+ mov dword ptr [edi], eax
+ add esi, 4
+ add edi, 4
+ dec edx
+ jnz VSDAllocFrameCopyLoop
+
+ ; the argument source and destination are same now
+ mov esi, edi
+
+VSDSpaceForFrameChecked:
+
+ ; At this point, we have enough space on the stack for the TailCallFrame,
+ ; and we may already have slided down the arguments
+
+ mov eax, _s_gsCookie ; GetProcessGSCookie()
+ mov dword ptr [edi], eax ; set GSCookie
+ mov eax, _g_TailCallFrameVptr ; vptr
+ mov edx, dword ptr [esp+OrigRetAddr] ; orig return address
+ mov dword ptr [edi+SIZEOF_GSCookie], eax ; TailCallFrame::vptr
+ mov dword ptr [edi+SIZEOF_GSCookie+28], edx ; TailCallFrame::m_ReturnAddress
+
+ mov eax, dword ptr [esp+CallersEdi] ; restored edi
+ mov edx, dword ptr [esp+CallersEsi] ; restored esi
+ mov dword ptr [edi+SIZEOF_GSCookie+12], eax ; TailCallFrame::m_regs::edi
+ mov dword ptr [edi+SIZEOF_GSCookie+16], edx ; TailCallFrame::m_regs::esi
+ mov dword ptr [edi+SIZEOF_GSCookie+20], ebx ; TailCallFrame::m_regs::ebx
+ mov dword ptr [edi+SIZEOF_GSCookie+24], ebp ; TailCallFrame::m_regs::ebp
+
+ mov ebx, dword ptr [esp+pThread] ; ebx = pThread
+
+ mov eax, dword ptr [ebx+Thread_m_pFrame]
+ lea edx, [edi+SIZEOF_GSCookie]
+ mov dword ptr [edi+SIZEOF_GSCookie+4], eax ; TailCallFrame::m_pNext
+ mov dword ptr [ebx+Thread_m_pFrame], edx ; hook the new frame into the chain
+
+ ; setup ebp chain
+ lea ebp, [edi+SIZEOF_GSCookie+24] ; TailCallFrame::m_regs::ebp
+
+ ; Do not copy arguments again if they are in place already
+ ; Otherwise, we will need to slide the new arguments up the stack
+ cmp esi, edi
+ jne VSDTailCallFrameInserted_DoSlideUpArgs
+
+ ; At this point, we must have already previously slided down the new arguments,
+ ; or the TailCallFrame is a perfect fit
+ ; set the caller address
+ mov edx, dword ptr [esp+ExtraSpace+RetAddr] ; caller address
+ mov dword ptr [edi+SIZEOF_GSCookie+8], edx ; TailCallFrame::m_CallerAddress
+
+ ; adjust edi as it would by copying
+ neg ecx
+ lea edi, [edi+4*ecx]
+
+ jmp VSDArgumentsCopied
+
+VSDTailCallFrameInserted_DoSlideUpArgs:
+ ; set the caller address
+ mov edx, dword ptr [esp+ExtraSpace+RetAddr] ; caller address
+ mov dword ptr [edi+SIZEOF_GSCookie+8], edx ; TailCallFrame::m_CallerAddress
+
+ ; copy the arguments to the final destination
+ test ecx, ecx
+ jz VSDArgumentsCopied
+VSDArgumentCopyLoop:
+ mov eax, dword ptr [esi-4]
+ sub edi, 4
+ sub esi, 4
+ mov dword ptr [edi], eax
+ dec ecx
+ jnz VSDArgumentCopyLoop
+VSDArgumentsCopied:
+
+ ; edi = the start of argument destination
+
+ mov ecx, dword ptr [esp+ExtraSpace+TargetAddr] ; target address
+
+ mov dword ptr [edi-4], JIT_TailCallReturnFromVSD ; return address
+ mov dword ptr [edi-12], ecx ; address of indirection cell
+ mov ecx, [ecx]
+ mov dword ptr [edi-8], ecx ; target address
+
+ ; skip original return address and saved esi, edi
+ add esp, 12
+
+ pop edx
+ pop ecx
+
+ lea esp, [edi-12] ; new value for esp
+ pop eax
+
+PUBLIC JIT_TailCallVSDLeave ; add a label here so that TailCallStubManager can access it
+JIT_TailCallVSDLeave:
+ retn ; Will branch to targetAddr. This matches the
+ ; "call" done by JITted code, keeping the
+ ; call-ret count balanced.
+
+JIT_TailCall ENDP
+
+
+;------------------------------------------------------------------------------
+
+; HCIMPL2_VV(float, JIT_FltRem, float dividend, float divisor)
+@JIT_FltRem@8 proc public
+ fld dword ptr [esp+4] ; divisor
+ fld dword ptr [esp+8] ; dividend
+fremloop:
+ fprem
+ fstsw ax
+ fwait
+ sahf
+ jp fremloop ; Continue while the FPU status bit C2 is set
+ fxch ; swap, so divisor is on top and result is in st(1)
+ fstp ST(0) ; Pop the divisor from the FP stack
+ retn 8 ; Return value is in st(0)
+@JIT_FltRem@8 endp
+
+; HCIMPL2_VV(float, JIT_DblRem, float dividend, float divisor)
+@JIT_DblRem@16 proc public
+ fld qword ptr [esp+4] ; divisor
+ fld qword ptr [esp+12] ; dividend
+fremloopd:
+ fprem
+ fstsw ax
+ fwait
+ sahf
+ jp fremloopd ; Continue while the FPU status bit C2 is set
+ fxch ; swap, so divisor is on top and result is in st(1)
+ fstp ST(0) ; Pop the divisor from the FP stack
+ retn 16 ; Return value is in st(0)
+@JIT_DblRem@16 endp
+
+;------------------------------------------------------------------------------
+
+g_SystemInfo TEXTEQU <?g_SystemInfo@@3U_SYSTEM_INFO@@A>
+g_SpinConstants TEXTEQU <?g_SpinConstants@@3USpinConstants@@A>
+g_pSyncTable TEXTEQU <?g_pSyncTable@@3PAVSyncTableEntry@@A>
+JITutil_MonEnterWorker TEXTEQU <@JITutil_MonEnterWorker@4>
+JITutil_MonReliableEnter TEXTEQU <@JITutil_MonReliableEnter@8>
+JITutil_MonTryEnter TEXTEQU <@JITutil_MonTryEnter@12>
+JITutil_MonExitWorker TEXTEQU <@JITutil_MonExitWorker@4>
+JITutil_MonContention TEXTEQU <@JITutil_MonContention@4>
+JITutil_MonReliableContention TEXTEQU <@JITutil_MonReliableContention@8>
+JITutil_MonSignal TEXTEQU <@JITutil_MonSignal@4>
+JIT_InternalThrow TEXTEQU <@JIT_InternalThrow@4>
+EXTRN g_SystemInfo:BYTE
+EXTRN g_SpinConstants:BYTE
+EXTRN g_pSyncTable:DWORD
+EXTRN JITutil_MonEnterWorker:PROC
+EXTRN JITutil_MonReliableEnter:PROC
+EXTRN JITutil_MonTryEnter:PROC
+EXTRN JITutil_MonExitWorker:PROC
+EXTRN JITutil_MonContention:PROC
+EXTRN JITutil_MonReliableContention:PROC
+EXTRN JITutil_MonSignal:PROC
+EXTRN JIT_InternalThrow:PROC
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+EnterSyncHelper TEXTEQU <_EnterSyncHelper@8>
+LeaveSyncHelper TEXTEQU <_LeaveSyncHelper@8>
+EXTRN EnterSyncHelper:PROC
+EXTRN LeaveSyncHelper:PROC
+endif ;TRACK_SYNC
+endif ;MON_DEBUG
+
+; The following macro is needed because MASM returns
+; "instruction prefix not allowed" error message for
+; rep nop mnemonic
+$repnop MACRO
+ db 0F3h
+ db 090h
+ENDM
+
+; Safe ThreadAbort does not abort a thread if it is running finally or has lock counts.
+; At the time we call Monitor.Enter, we initiate the abort if we can.
+; We do not need to do the same for Monitor.Leave, since most of time, Monitor.Leave is called
+; during finally.
+
+;**********************************************************************
+; This is a frameless helper for entering a monitor on a object.
+; The object is in ARGUMENT_REG1. This tries the normal case (no
+; blocking or object allocation) in line and calls a framed helper
+; for the other cases.
+; ***** NOTE: if you make any changes to this routine, build with MON_DEBUG undefined
+; to make sure you don't break the non-debug build. This is very fragile code.
+; Also, propagate the changes to jithelp.s which contains the same helper and assembly code
+; (in AT&T syntax) for gnu assembler.
+@JIT_MonEnterWorker@4 proc public
+ ; Initialize delay value for retry with exponential backoff
+ push ebx
+ mov ebx, dword ptr g_SpinConstants+SpinConstants_dwInitialDuration
+
+ ; We need yet another register to avoid refetching the thread object
+ push esi
+
+ ; Check if the instance is NULL.
+ test ARGUMENT_REG1, ARGUMENT_REG1
+ jz MonEnterFramedLockHelper
+
+ call _GetThread@0
+ mov esi,eax
+
+ ; Check if we can abort here
+ mov eax, [esi+Thread_m_State]
+ and eax, TS_CatchAtSafePoint_ASM
+ jz MonEnterRetryThinLock
+ ; go through the slow code path to initiate ThreadAbort.
+ jmp MonEnterFramedLockHelper
+
+MonEnterRetryThinLock:
+ ; Fetch the object header dword
+ mov eax, [ARGUMENT_REG1-SyncBlockIndexOffset_ASM]
+
+ ; Check whether we have the "thin lock" layout, the lock is free and the spin lock bit not set
+ ; SBLK_COMBINED_MASK_ASM = BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_SPIN_LOCK + SBLK_MASK_LOCK_THREADID + SBLK_MASK_LOCK_RECLEVEL
+ test eax, SBLK_COMBINED_MASK_ASM
+ jnz MonEnterNeedMoreTests
+
+ ; Everything is fine - get the thread id to store in the lock
+ mov edx, [esi+Thread_m_ThreadId]
+
+ ; If the thread id is too large, we need a syncblock for sure
+ cmp edx, SBLK_MASK_LOCK_THREADID_ASM
+ ja MonEnterFramedLockHelper
+
+ ; We want to store a new value with the current thread id set in the low 10 bits
+ or edx,eax
+ lock cmpxchg dword ptr [ARGUMENT_REG1-SyncBlockIndexOffset_ASM], edx
+ jnz MonEnterPrepareToWaitThinLock
+
+ ; Everything went fine and we're done
+ add [esi+Thread_m_dwLockCount],1
+ pop esi
+ pop ebx
+ ret
+
+MonEnterNeedMoreTests:
+ ; Ok, it's not the simple case - find out which case it is
+ test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX_ASM
+ jnz MonEnterHaveHashOrSyncBlockIndex
+
+ ; The header is transitioning or the lock - treat this as if the lock was taken
+ test eax, BIT_SBLK_SPIN_LOCK_ASM
+ jnz MonEnterPrepareToWaitThinLock
+
+ ; Here we know we have the "thin lock" layout, but the lock is not free.
+ ; It could still be the recursion case - compare the thread id to check
+ mov edx,eax
+ and edx, SBLK_MASK_LOCK_THREADID_ASM
+ cmp edx, [esi+Thread_m_ThreadId]
+ jne MonEnterPrepareToWaitThinLock
+
+ ; Ok, the thread id matches, it's the recursion case.
+ ; Bump up the recursion level and check for overflow
+ lea edx, [eax+SBLK_LOCK_RECLEVEL_INC_ASM]
+ test edx, SBLK_MASK_LOCK_RECLEVEL_ASM
+ jz MonEnterFramedLockHelper
+
+ ; Try to put the new recursion level back. If the header was changed in the meantime,
+ ; we need a full retry, because the layout could have changed.
+ lock cmpxchg [ARGUMENT_REG1-SyncBlockIndexOffset_ASM], edx
+ jnz MonEnterRetryHelperThinLock
+
+ ; Everything went fine and we're done
+ pop esi
+ pop ebx
+ ret
+
+MonEnterPrepareToWaitThinLock:
+ ; If we are on an MP system, we try spinning for a certain number of iterations
+ cmp dword ptr g_SystemInfo+SYSTEM_INFO_dwNumberOfProcessors,1
+ jle MonEnterFramedLockHelper
+
+ ; exponential backoff: delay by approximately 2*ebx clock cycles (on a PIII)
+ mov eax, ebx
+MonEnterdelayLoopThinLock:
+ $repnop ; indicate to the CPU that we are spin waiting (useful for some Intel P4 multiprocs)
+ dec eax
+ jnz MonEnterdelayLoopThinLock
+
+ ; next time, wait a factor longer
+ imul ebx, dword ptr g_SpinConstants+SpinConstants_dwBackoffFactor
+
+ cmp ebx, dword ptr g_SpinConstants+SpinConstants_dwMaximumDuration
+ jle MonEnterRetryHelperThinLock
+
+ jmp MonEnterFramedLockHelper
+
+MonEnterRetryHelperThinLock:
+ jmp MonEnterRetryThinLock
+
+MonEnterHaveHashOrSyncBlockIndex:
+ ; If we have a hash code already, we need to create a sync block
+ test eax, BIT_SBLK_IS_HASHCODE_ASM
+ jnz MonEnterFramedLockHelper
+
+ ; Ok, we have a sync block index - just and out the top bits and grab the syncblock index
+ and eax, MASK_SYNCBLOCKINDEX_ASM
+
+ ; Get the sync block pointer.
+ mov ARGUMENT_REG2, dword ptr g_pSyncTable
+ mov ARGUMENT_REG2, [ARGUMENT_REG2+eax*SizeOfSyncTableEntry_ASM+SyncTableEntry_m_SyncBlock]
+
+ ; Check if the sync block has been allocated.
+ test ARGUMENT_REG2, ARGUMENT_REG2
+ jz MonEnterFramedLockHelper
+
+ ; Get a pointer to the lock object.
+ lea ARGUMENT_REG2, [ARGUMENT_REG2+SyncBlock_m_Monitor]
+
+ ; Attempt to acquire the lock.
+MonEnterRetrySyncBlock:
+ mov eax, [ARGUMENT_REG2+AwareLock_m_MonitorHeld]
+ test eax,eax
+ jne MonEnterHaveWaiters
+
+ ; Common case, lock isn't held and there are no waiters. Attempt to
+ ; gain ownership ourselves.
+ mov ARGUMENT_REG1,1
+ lock cmpxchg [ARGUMENT_REG2+AwareLock_m_MonitorHeld], ARGUMENT_REG1
+ jnz MonEnterRetryHelperSyncBlock
+
+ ; Success. Save the thread object in the lock and increment the use count.
+ mov dword ptr [ARGUMENT_REG2+AwareLock_m_HoldingThread],esi
+ inc dword ptr [esi+Thread_m_dwLockCount]
+ inc dword ptr [ARGUMENT_REG2+AwareLock_m_Recursion]
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ push ARGUMENT_REG2 ; AwareLock
+ push [esp+4] ; return address
+ call EnterSyncHelper
+endif ;TRACK_SYNC
+endif ;MON_DEBUG
+ pop esi
+ pop ebx
+ ret
+
+ ; It's possible to get here with waiters but no lock held, but in this
+ ; case a signal is about to be fired which will wake up a waiter. So
+ ; for fairness sake we should wait too.
+ ; Check first for recursive lock attempts on the same thread.
+MonEnterHaveWaiters:
+ ; Is mutex already owned by current thread?
+ cmp [ARGUMENT_REG2+AwareLock_m_HoldingThread],esi
+ jne MonEnterPrepareToWait
+
+ ; Yes, bump our use count.
+ inc dword ptr [ARGUMENT_REG2+AwareLock_m_Recursion]
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ push ARGUMENT_REG2 ; AwareLock
+ push [esp+4] ; return address
+ call EnterSyncHelper
+endif ;TRACK_SYNC
+endif ;MON_DEBUG
+ pop esi
+ pop ebx
+ ret
+
+MonEnterPrepareToWait:
+ ; If we are on an MP system, we try spinning for a certain number of iterations
+ cmp dword ptr g_SystemInfo+SYSTEM_INFO_dwNumberOfProcessors,1
+ jle MonEnterHaveWaiters1
+
+ ; exponential backoff: delay by approximately 2*ebx clock cycles (on a PIII)
+ mov eax,ebx
+MonEnterdelayLoop:
+ $repnop ; indicate to the CPU that we are spin waiting (useful for some Intel P4 multiprocs)
+ dec eax
+ jnz MonEnterdelayLoop
+
+ ; next time, wait a factor longer
+ imul ebx, dword ptr g_SpinConstants+SpinConstants_dwBackoffFactor
+
+ cmp ebx, dword ptr g_SpinConstants+SpinConstants_dwMaximumDuration
+ jle MonEnterRetrySyncBlock
+
+MonEnterHaveWaiters1:
+
+ pop esi
+ pop ebx
+
+ ; Place AwareLock in arg1 then call contention helper.
+ mov ARGUMENT_REG1, ARGUMENT_REG2
+ jmp JITutil_MonContention
+
+MonEnterRetryHelperSyncBlock:
+ jmp MonEnterRetrySyncBlock
+
+ ; ECX has the object to synchronize on
+MonEnterFramedLockHelper:
+ pop esi
+ pop ebx
+ jmp JITutil_MonEnterWorker
+
+@JIT_MonEnterWorker@4 endp
+
+;**********************************************************************
+; This is a frameless helper for entering a monitor on a object, and
+; setting a flag to indicate that the lock was taken.
+; The object is in ARGUMENT_REG1. The flag is in ARGUMENT_REG2.
+; This tries the normal case (no blocking or object allocation) in line
+; and calls a framed helper for the other cases.
+; ***** NOTE: if you make any changes to this routine, build with MON_DEBUG undefined
+; to make sure you don't break the non-debug build. This is very fragile code.
+; Also, propagate the changes to jithelp.s which contains the same helper and assembly code
+; (in AT&T syntax) for gnu assembler.
+@JIT_MonReliableEnter@8 proc public
+ ; Initialize delay value for retry with exponential backoff
+ push ebx
+ mov ebx, dword ptr g_SpinConstants+SpinConstants_dwInitialDuration
+
+ ; Put pbLockTaken in edi
+ push edi
+ mov edi, ARGUMENT_REG2
+
+ ; We need yet another register to avoid refetching the thread object
+ push esi
+
+ ; Check if the instance is NULL.
+ test ARGUMENT_REG1, ARGUMENT_REG1
+ jz MonReliableEnterFramedLockHelper
+
+ call _GetThread@0
+ mov esi,eax
+
+ ; Check if we can abort here
+ mov eax, [esi+Thread_m_State]
+ and eax, TS_CatchAtSafePoint_ASM
+ jz MonReliableEnterRetryThinLock
+ ; go through the slow code path to initiate ThreadAbort.
+ jmp MonReliableEnterFramedLockHelper
+
+MonReliableEnterRetryThinLock:
+ ; Fetch the object header dword
+ mov eax, [ARGUMENT_REG1-SyncBlockIndexOffset_ASM]
+
+ ; Check whether we have the "thin lock" layout, the lock is free and the spin lock bit not set
+ ; SBLK_COMBINED_MASK_ASM = BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_SPIN_LOCK + SBLK_MASK_LOCK_THREADID + SBLK_MASK_LOCK_RECLEVEL
+ test eax, SBLK_COMBINED_MASK_ASM
+ jnz MonReliableEnterNeedMoreTests
+
+ ; Everything is fine - get the thread id to store in the lock
+ mov edx, [esi+Thread_m_ThreadId]
+
+ ; If the thread id is too large, we need a syncblock for sure
+ cmp edx, SBLK_MASK_LOCK_THREADID_ASM
+ ja MonReliableEnterFramedLockHelper
+
+ ; We want to store a new value with the current thread id set in the low 10 bits
+ or edx,eax
+ lock cmpxchg dword ptr [ARGUMENT_REG1-SyncBlockIndexOffset_ASM], edx
+ jnz MonReliableEnterPrepareToWaitThinLock
+
+ ; Everything went fine and we're done
+ add [esi+Thread_m_dwLockCount],1
+ ; Set *pbLockTaken=true
+ mov byte ptr [edi],1
+ pop esi
+ pop edi
+ pop ebx
+ ret
+
+MonReliableEnterNeedMoreTests:
+ ; Ok, it's not the simple case - find out which case it is
+ test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX_ASM
+ jnz MonReliableEnterHaveHashOrSyncBlockIndex
+
+ ; The header is transitioning or the lock - treat this as if the lock was taken
+ test eax, BIT_SBLK_SPIN_LOCK_ASM
+ jnz MonReliableEnterPrepareToWaitThinLock
+
+ ; Here we know we have the "thin lock" layout, but the lock is not free.
+ ; It could still be the recursion case - compare the thread id to check
+ mov edx,eax
+ and edx, SBLK_MASK_LOCK_THREADID_ASM
+ cmp edx, [esi+Thread_m_ThreadId]
+ jne MonReliableEnterPrepareToWaitThinLock
+
+ ; Ok, the thread id matches, it's the recursion case.
+ ; Bump up the recursion level and check for overflow
+ lea edx, [eax+SBLK_LOCK_RECLEVEL_INC_ASM]
+ test edx, SBLK_MASK_LOCK_RECLEVEL_ASM
+ jz MonReliableEnterFramedLockHelper
+
+ ; Try to put the new recursion level back. If the header was changed in the meantime,
+ ; we need a full retry, because the layout could have changed.
+ lock cmpxchg [ARGUMENT_REG1-SyncBlockIndexOffset_ASM], edx
+ jnz MonReliableEnterRetryHelperThinLock
+
+ ; Everything went fine and we're done
+ ; Set *pbLockTaken=true
+ mov byte ptr [edi],1
+ pop esi
+ pop edi
+ pop ebx
+ ret
+
+MonReliableEnterPrepareToWaitThinLock:
+ ; If we are on an MP system, we try spinning for a certain number of iterations
+ cmp dword ptr g_SystemInfo+SYSTEM_INFO_dwNumberOfProcessors,1
+ jle MonReliableEnterFramedLockHelper
+
+ ; exponential backoff: delay by approximately 2*ebx clock cycles (on a PIII)
+ mov eax, ebx
+MonReliableEnterdelayLoopThinLock:
+ $repnop ; indicate to the CPU that we are spin waiting (useful for some Intel P4 multiprocs)
+ dec eax
+ jnz MonReliableEnterdelayLoopThinLock
+
+ ; next time, wait a factor longer
+ imul ebx, dword ptr g_SpinConstants+SpinConstants_dwBackoffFactor
+
+ cmp ebx, dword ptr g_SpinConstants+SpinConstants_dwMaximumDuration
+ jle MonReliableEnterRetryHelperThinLock
+
+ jmp MonReliableEnterFramedLockHelper
+
+MonReliableEnterRetryHelperThinLock:
+ jmp MonReliableEnterRetryThinLock
+
+MonReliableEnterHaveHashOrSyncBlockIndex:
+ ; If we have a hash code already, we need to create a sync block
+ test eax, BIT_SBLK_IS_HASHCODE_ASM
+ jnz MonReliableEnterFramedLockHelper
+
+ ; Ok, we have a sync block index - just and out the top bits and grab the syncblock index
+ and eax, MASK_SYNCBLOCKINDEX_ASM
+
+ ; Get the sync block pointer.
+ mov ARGUMENT_REG2, dword ptr g_pSyncTable
+ mov ARGUMENT_REG2, [ARGUMENT_REG2+eax*SizeOfSyncTableEntry_ASM+SyncTableEntry_m_SyncBlock]
+
+ ; Check if the sync block has been allocated.
+ test ARGUMENT_REG2, ARGUMENT_REG2
+ jz MonReliableEnterFramedLockHelper
+
+ ; Get a pointer to the lock object.
+ lea ARGUMENT_REG2, [ARGUMENT_REG2+SyncBlock_m_Monitor]
+
+ ; Attempt to acquire the lock.
+MonReliableEnterRetrySyncBlock:
+ mov eax, [ARGUMENT_REG2+AwareLock_m_MonitorHeld]
+ test eax,eax
+ jne MonReliableEnterHaveWaiters
+
+ ; Common case, lock isn't held and there are no waiters. Attempt to
+ ; gain ownership ourselves.
+ mov ARGUMENT_REG1,1
+ lock cmpxchg [ARGUMENT_REG2+AwareLock_m_MonitorHeld], ARGUMENT_REG1
+ jnz MonReliableEnterRetryHelperSyncBlock
+
+ ; Success. Save the thread object in the lock and increment the use count.
+ mov dword ptr [ARGUMENT_REG2+AwareLock_m_HoldingThread],esi
+ inc dword ptr [esi+Thread_m_dwLockCount]
+ inc dword ptr [ARGUMENT_REG2+AwareLock_m_Recursion]
+ ; Set *pbLockTaken=true
+ mov byte ptr [edi],1
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ push ARGUMENT_REG2 ; AwareLock
+ push [esp+4] ; return address
+ call EnterSyncHelper
+endif ;TRACK_SYNC
+endif ;MON_DEBUG
+ pop esi
+ pop edi
+ pop ebx
+ ret
+
+ ; It's possible to get here with waiters but no lock held, but in this
+ ; case a signal is about to be fired which will wake up a waiter. So
+ ; for fairness sake we should wait too.
+ ; Check first for recursive lock attempts on the same thread.
+MonReliableEnterHaveWaiters:
+ ; Is mutex already owned by current thread?
+ cmp [ARGUMENT_REG2+AwareLock_m_HoldingThread],esi
+ jne MonReliableEnterPrepareToWait
+
+ ; Yes, bump our use count.
+ inc dword ptr [ARGUMENT_REG2+AwareLock_m_Recursion]
+ ; Set *pbLockTaken=true
+ mov byte ptr [edi],1
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ push ARGUMENT_REG2 ; AwareLock
+ push [esp+4] ; return address
+ call EnterSyncHelper
+endif ;TRACK_SYNC
+endif ;MON_DEBUG
+ pop esi
+ pop edi
+ pop ebx
+ ret
+
+MonReliableEnterPrepareToWait:
+ ; If we are on an MP system, we try spinning for a certain number of iterations
+ cmp dword ptr g_SystemInfo+SYSTEM_INFO_dwNumberOfProcessors,1
+ jle MonReliableEnterHaveWaiters1
+
+ ; exponential backoff: delay by approximately 2*ebx clock cycles (on a PIII)
+ mov eax,ebx
+MonReliableEnterdelayLoop:
+ $repnop ; indicate to the CPU that we are spin waiting (useful for some Intel P4 multiprocs)
+ dec eax
+ jnz MonReliableEnterdelayLoop
+
+ ; next time, wait a factor longer
+ imul ebx, dword ptr g_SpinConstants+SpinConstants_dwBackoffFactor
+
+ cmp ebx, dword ptr g_SpinConstants+SpinConstants_dwMaximumDuration
+ jle MonReliableEnterRetrySyncBlock
+
+MonReliableEnterHaveWaiters1:
+
+ ; Place AwareLock in arg1, pbLockTaken in arg2, then call contention helper.
+ mov ARGUMENT_REG1, ARGUMENT_REG2
+ mov ARGUMENT_REG2, edi
+
+ pop esi
+ pop edi
+ pop ebx
+
+ jmp JITutil_MonReliableContention
+
+MonReliableEnterRetryHelperSyncBlock:
+ jmp MonReliableEnterRetrySyncBlock
+
+ ; ECX has the object to synchronize on
+MonReliableEnterFramedLockHelper:
+ mov ARGUMENT_REG2, edi
+ pop esi
+ pop edi
+ pop ebx
+ jmp JITutil_MonReliableEnter
+
+@JIT_MonReliableEnter@8 endp
+
+;************************************************************************
+; This is a frameless helper for trying to enter a monitor on a object.
+; The object is in ARGUMENT_REG1 and a timeout in ARGUMENT_REG2. This tries the
+; normal case (no object allocation) in line and calls a framed helper for the
+; other cases.
+; ***** NOTE: if you make any changes to this routine, build with MON_DEBUG undefined
+; to make sure you don't break the non-debug build. This is very fragile code.
+; Also, propagate the changes to jithelp.s which contains the same helper and assembly code
+; (in AT&T syntax) for gnu assembler.
+@JIT_MonTryEnter@12 proc public
+ ; Save the timeout parameter.
+ push ARGUMENT_REG2
+
+ ; Initialize delay value for retry with exponential backoff
+ push ebx
+ mov ebx, dword ptr g_SpinConstants+SpinConstants_dwInitialDuration
+
+ ; The thin lock logic needs another register to store the thread
+ push esi
+
+ ; Check if the instance is NULL.
+ test ARGUMENT_REG1, ARGUMENT_REG1
+ jz MonTryEnterFramedLockHelper
+
+ ; Check if the timeout looks valid
+ cmp ARGUMENT_REG2,-1
+ jl MonTryEnterFramedLockHelper
+
+ ; Get the thread right away, we'll need it in any case
+ call _GetThread@0
+ mov esi,eax
+
+ ; Check if we can abort here
+ mov eax, [esi+Thread_m_State]
+ and eax, TS_CatchAtSafePoint_ASM
+ jz MonTryEnterRetryThinLock
+ ; go through the slow code path to initiate ThreadAbort.
+ jmp MonTryEnterFramedLockHelper
+
+MonTryEnterRetryThinLock:
+ ; Get the header dword and check its layout
+ mov eax, [ARGUMENT_REG1-SyncBlockIndexOffset_ASM]
+
+ ; Check whether we have the "thin lock" layout, the lock is free and the spin lock bit not set
+ ; SBLK_COMBINED_MASK_ASM = BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_SPIN_LOCK + SBLK_MASK_LOCK_THREADID + SBLK_MASK_LOCK_RECLEVEL
+ test eax, SBLK_COMBINED_MASK_ASM
+ jnz MonTryEnterNeedMoreTests
+
+ ; Ok, everything is fine. Fetch the thread id and make sure it's small enough for thin locks
+ mov edx, [esi+Thread_m_ThreadId]
+ cmp edx, SBLK_MASK_LOCK_THREADID_ASM
+ ja MonTryEnterFramedLockHelper
+
+ ; Try to put our thread id in there
+ or edx,eax
+ lock cmpxchg [ARGUMENT_REG1-SyncBlockIndexOffset_ASM],edx
+ jnz MonTryEnterRetryHelperThinLock
+
+ ; Got the lock - everything is fine"
+ add [esi+Thread_m_dwLockCount],1
+ pop esi
+
+ ; Delay value no longer needed
+ pop ebx
+
+ ; Timeout parameter not needed, ditch it from the stack.
+ add esp,4
+
+ mov eax, [esp+4]
+ mov byte ptr [eax], 1
+ ret 4
+
+MonTryEnterNeedMoreTests:
+ ; Ok, it's not the simple case - find out which case it is
+ test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX_ASM
+ jnz MonTryEnterHaveSyncBlockIndexOrHash
+
+ ; The header is transitioning or the lock is taken
+ test eax, BIT_SBLK_SPIN_LOCK_ASM
+ jnz MonTryEnterRetryHelperThinLock
+
+ mov edx, eax
+ and edx, SBLK_MASK_LOCK_THREADID_ASM
+ cmp edx, [esi+Thread_m_ThreadId]
+ jne MonTryEnterPrepareToWaitThinLock
+
+ ; Ok, the thread id matches, it's the recursion case.
+ ; Bump up the recursion level and check for overflow
+ lea edx, [eax+SBLK_LOCK_RECLEVEL_INC_ASM]
+ test edx, SBLK_MASK_LOCK_RECLEVEL_ASM
+ jz MonTryEnterFramedLockHelper
+
+ ; Try to put the new recursion level back. If the header was changed in the meantime,
+ ; we need a full retry, because the layout could have changed.
+ lock cmpxchg [ARGUMENT_REG1-SyncBlockIndexOffset_ASM],edx
+ jnz MonTryEnterRetryHelperThinLock
+
+ ; Everything went fine and we're done
+ pop esi
+ pop ebx
+
+ ; Timeout parameter not needed, ditch it from the stack.
+ add esp, 4
+ mov eax, [esp+4]
+ mov byte ptr [eax], 1
+ ret 4
+
+MonTryEnterPrepareToWaitThinLock:
+ ; If we are on an MP system, we try spinning for a certain number of iterations
+ cmp dword ptr g_SystemInfo+SYSTEM_INFO_dwNumberOfProcessors,1
+ jle MonTryEnterFramedLockHelper
+
+ ; exponential backoff: delay by approximately 2*ebx clock cycles (on a PIII)
+ mov eax, ebx
+MonTryEnterdelayLoopThinLock:
+ $repnop ; indicate to the CPU that we are spin waiting (useful for some Intel P4 multiprocs)
+ dec eax
+ jnz MonTryEnterdelayLoopThinLock
+
+ ; next time, wait a factor longer
+ imul ebx, dword ptr g_SpinConstants+SpinConstants_dwBackoffFactor
+
+ cmp ebx, dword ptr g_SpinConstants+SpinConstants_dwMaximumDuration
+ jle MonTryEnterRetryHelperThinLock
+
+ jmp MonTryEnterWouldBlock
+
+MonTryEnterRetryHelperThinLock:
+ jmp MonTryEnterRetryThinLock
+
+
+MonTryEnterHaveSyncBlockIndexOrHash:
+ ; If we have a hash code already, we need to create a sync block
+ test eax, BIT_SBLK_IS_HASHCODE_ASM
+ jnz MonTryEnterFramedLockHelper
+
+ ; Just and out the top bits and grab the syncblock index
+ and eax, MASK_SYNCBLOCKINDEX_ASM
+
+ ; Get the sync block pointer.
+ mov ARGUMENT_REG2, dword ptr g_pSyncTable
+ mov ARGUMENT_REG2, [ARGUMENT_REG2+eax*SizeOfSyncTableEntry_ASM+SyncTableEntry_m_SyncBlock]
+
+ ; Check if the sync block has been allocated.
+ test ARGUMENT_REG2, ARGUMENT_REG2
+ jz MonTryEnterFramedLockHelper
+
+ ; Get a pointer to the lock object.
+ lea ARGUMENT_REG2, [ARGUMENT_REG2+SyncBlock_m_Monitor]
+
+MonTryEnterRetrySyncBlock:
+ ; Attempt to acquire the lock.
+ mov eax, [ARGUMENT_REG2+AwareLock_m_MonitorHeld]
+ test eax,eax
+ jne MonTryEnterHaveWaiters
+
+ ; We need another scratch register for what follows, so save EBX now so"
+ ; we can use it for that purpose."
+ push ebx
+
+ ; Common case, lock isn't held and there are no waiters. Attempt to
+ ; gain ownership ourselves.
+ mov ebx,1
+ lock cmpxchg [ARGUMENT_REG2+AwareLock_m_MonitorHeld],ebx
+
+ pop ebx
+
+ jnz MonTryEnterRetryHelperSyncBlock
+
+ ; Success. Save the thread object in the lock and increment the use count.
+ mov dword ptr [ARGUMENT_REG2+AwareLock_m_HoldingThread],esi
+ inc dword ptr [ARGUMENT_REG2+AwareLock_m_Recursion]
+ inc dword ptr [esi+Thread_m_dwLockCount]
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ push ARGUMENT_REG2 ; AwareLock
+ push [esp+4] ; return address
+ call EnterSyncHelper
+endif ;TRACK_SYNC
+endif ;MON_DEBUG
+
+ pop esi
+ pop ebx
+
+ ; Timeout parameter not needed, ditch it from the stack."
+ add esp,4
+
+ mov eax, [esp+4]
+ mov byte ptr [eax], 1
+ ret 4
+
+ ; It's possible to get here with waiters but no lock held, but in this
+ ; case a signal is about to be fired which will wake up a waiter. So
+ ; for fairness sake we should wait too.
+ ; Check first for recursive lock attempts on the same thread.
+MonTryEnterHaveWaiters:
+ ; Is mutex already owned by current thread?
+ cmp [ARGUMENT_REG2+AwareLock_m_HoldingThread],esi
+ jne MonTryEnterPrepareToWait
+
+ ; Yes, bump our use count.
+ inc dword ptr [ARGUMENT_REG2+AwareLock_m_Recursion]
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ push ARGUMENT_REG2 ; AwareLock
+ push [esp+4] ; return address
+ call EnterSyncHelper
+endif ;TRACK_SYNC
+endif ;MON_DEBUG
+ pop esi
+ pop ebx
+
+ ; Timeout parameter not needed, ditch it from the stack.
+ add esp,4
+
+ mov eax, [esp+4]
+ mov byte ptr [eax], 1
+ ret 4
+
+MonTryEnterPrepareToWait:
+ ; If we are on an MP system, we try spinning for a certain number of iterations
+ cmp dword ptr g_SystemInfo+SYSTEM_INFO_dwNumberOfProcessors,1
+ jle MonTryEnterWouldBlock
+
+ ; exponential backoff: delay by approximately 2*ebx clock cycles (on a PIII)
+ mov eax, ebx
+MonTryEnterdelayLoop:
+ $repnop ; indicate to the CPU that we are spin waiting (useful for some Intel P4 multiprocs)
+ dec eax
+ jnz MonTryEnterdelayLoop
+
+ ; next time, wait a factor longer
+ imul ebx, dword ptr g_SpinConstants+SpinConstants_dwBackoffFactor
+
+ cmp ebx, dword ptr g_SpinConstants+SpinConstants_dwMaximumDuration
+ jle MonTryEnterRetrySyncBlock
+
+ ; We would need to block to enter the section. Return failure if
+ ; timeout is zero, else call the framed helper to do the blocking
+ ; form of TryEnter."
+MonTryEnterWouldBlock:
+ pop esi
+ pop ebx
+ pop ARGUMENT_REG2
+ test ARGUMENT_REG2, ARGUMENT_REG2
+ jnz MonTryEnterBlock
+ mov eax, [esp+4]
+ mov byte ptr [eax], 0
+ ret 4
+
+MonTryEnterRetryHelperSyncBlock:
+ jmp MonTryEnterRetrySyncBlock
+
+MonTryEnterFramedLockHelper:
+ ; ARGUMENT_REG1 has the object to synchronize on, must retrieve the
+ ; timeout parameter from the stack.
+ pop esi
+ pop ebx
+ pop ARGUMENT_REG2
+MonTryEnterBlock:
+ jmp JITutil_MonTryEnter
+
+@JIT_MonTryEnter@12 endp
+
+;**********************************************************************
+; This is a frameless helper for exiting a monitor on a object.
+; The object is in ARGUMENT_REG1. This tries the normal case (no
+; blocking or object allocation) in line and calls a framed helper
+; for the other cases.
+; ***** NOTE: if you make any changes to this routine, build with MON_DEBUG undefined
+; to make sure you don't break the non-debug build. This is very fragile code.
+; Also, propagate the changes to jithelp.s which contains the same helper and assembly code
+; (in AT&T syntax) for gnu assembler.
+@JIT_MonExitWorker@4 proc public
+ ; The thin lock logic needs an additional register to hold the thread, unfortunately
+ push esi
+
+ ; Check if the instance is NULL.
+ test ARGUMENT_REG1, ARGUMENT_REG1
+ jz MonExitFramedLockHelper
+
+ call _GetThread@0
+ mov esi,eax
+
+MonExitRetryThinLock:
+ ; Fetch the header dword and check its layout and the spin lock bit
+ mov eax, [ARGUMENT_REG1-SyncBlockIndexOffset_ASM]
+ ;BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX_SPIN_LOCK_ASM = BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_SPIN_LOCK
+ test eax, BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX_SPIN_LOCK_ASM
+ jnz MonExitNeedMoreTests
+
+ ; Ok, we have a "thin lock" layout - check whether the thread id matches
+ mov edx,eax
+ and edx, SBLK_MASK_LOCK_THREADID_ASM
+ cmp edx, [esi+Thread_m_ThreadId]
+ jne MonExitFramedLockHelper
+
+ ; Check the recursion level
+ test eax, SBLK_MASK_LOCK_RECLEVEL_ASM
+ jne MonExitDecRecursionLevel
+
+ ; It's zero - we're leaving the lock.
+ ; So try to put back a zero thread id.
+ ; edx and eax match in the thread id bits, and edx is zero elsewhere, so the xor is sufficient
+ xor edx,eax
+ lock cmpxchg [ARGUMENT_REG1-SyncBlockIndexOffset_ASM],edx
+ jnz MonExitRetryHelperThinLock
+
+ ; We're done
+ sub [esi+Thread_m_dwLockCount],1
+ pop esi
+ ret
+
+MonExitDecRecursionLevel:
+ lea edx, [eax-SBLK_LOCK_RECLEVEL_INC_ASM]
+ lock cmpxchg [ARGUMENT_REG1-SyncBlockIndexOffset_ASM],edx
+ jnz MonExitRetryHelperThinLock
+
+ ; We're done
+ pop esi
+ ret
+
+MonExitNeedMoreTests:
+ ;Forward all special cases to the slow helper
+ ;BIT_SBLK_IS_HASHCODE_OR_SPIN_LOCK_ASM = BIT_SBLK_IS_HASHCODE + BIT_SBLK_SPIN_LOCK
+ test eax, BIT_SBLK_IS_HASHCODE_OR_SPIN_LOCK_ASM
+ jnz MonExitFramedLockHelper
+
+ ; Get the sync block index and use it to compute the sync block pointer
+ mov ARGUMENT_REG2, dword ptr g_pSyncTable
+ and eax, MASK_SYNCBLOCKINDEX_ASM
+ mov ARGUMENT_REG2, [ARGUMENT_REG2+eax*SizeOfSyncTableEntry_ASM+SyncTableEntry_m_SyncBlock]
+
+ ; was there a sync block?
+ test ARGUMENT_REG2, ARGUMENT_REG2
+ jz MonExitFramedLockHelper
+
+ ; Get a pointer to the lock object.
+ lea ARGUMENT_REG2, [ARGUMENT_REG2+SyncBlock_m_Monitor]
+
+ ; Check if lock is held.
+ cmp [ARGUMENT_REG2+AwareLock_m_HoldingThread],esi
+ jne MonExitFramedLockHelper
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ push ARGUMENT_REG1 ; preserve regs
+ push ARGUMENT_REG2
+
+ push ARGUMENT_REG2 ; AwareLock
+ push [esp+8] ; return address
+ call LeaveSyncHelper
+
+ pop ARGUMENT_REG2 ; restore regs
+ pop ARGUMENT_REG1
+endif ;TRACK_SYNC
+endif ;MON_DEBUG
+ ; Reduce our recursion count.
+ dec dword ptr [ARGUMENT_REG2+AwareLock_m_Recursion]
+ jz MonExitLastRecursion
+
+ pop esi
+ ret
+
+MonExitRetryHelperThinLock:
+ jmp MonExitRetryThinLock
+
+MonExitFramedLockHelper:
+ pop esi
+ jmp JITutil_MonExitWorker
+
+ ; This is the last count we held on this lock, so release the lock.
+MonExitLastRecursion:
+ dec dword ptr [esi+Thread_m_dwLockCount]
+ mov dword ptr [ARGUMENT_REG2+AwareLock_m_HoldingThread],0
+
+MonExitRetry:
+ mov eax, [ARGUMENT_REG2+AwareLock_m_MonitorHeld]
+ lea esi, [eax-1]
+ lock cmpxchg [ARGUMENT_REG2+AwareLock_m_MonitorHeld], esi
+ jne MonExitRetryHelper
+ pop esi
+ test eax,0FFFFFFFEh
+ jne MonExitMustSignal
+
+ ret
+
+MonExitMustSignal:
+ mov ARGUMENT_REG1, ARGUMENT_REG2
+ jmp JITutil_MonSignal
+
+MonExitRetryHelper:
+ jmp MonExitRetry
+
+@JIT_MonExitWorker@4 endp
+
+;**********************************************************************
+; This is a frameless helper for entering a static monitor on a class.
+; The methoddesc is in ARGUMENT_REG1. This tries the normal case (no
+; blocking or object allocation) in line and calls a framed helper
+; for the other cases.
+; Note we are changing the methoddesc parameter to a pointer to the
+; AwareLock.
+; ***** NOTE: if you make any changes to this routine, build with MON_DEBUG undefined
+; to make sure you don't break the non-debug build. This is very fragile code.
+; Also, propagate the changes to jithelp.s which contains the same helper and assembly code
+; (in AT&T syntax) for gnu assembler.
+@JIT_MonEnterStatic@4 proc public
+ ; We need another scratch register for what follows, so save EBX now so
+ ; we can use it for that purpose.
+ push ebx
+
+ ; Attempt to acquire the lock
+MonEnterStaticRetry:
+ mov eax, [ARGUMENT_REG1+AwareLock_m_MonitorHeld]
+ test eax,eax
+ jne MonEnterStaticHaveWaiters
+
+ ; Common case, lock isn't held and there are no waiters. Attempt to
+ ; gain ownership ourselves.
+ mov ebx,1
+ lock cmpxchg [ARGUMENT_REG1+AwareLock_m_MonitorHeld],ebx
+ jnz MonEnterStaticRetryHelper
+
+ pop ebx
+
+ ; Success. Save the thread object in the lock and increment the use count.
+ call _GetThread@0
+ mov [ARGUMENT_REG1+AwareLock_m_HoldingThread], eax
+ inc dword ptr [ARGUMENT_REG1+AwareLock_m_Recursion]
+ inc dword ptr [eax+Thread_m_dwLockCount]
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ push ARGUMENT_REG1 ; AwareLock
+ push [esp+4] ; return address
+ call EnterSyncHelper
+endif ;TRACK_SYNC
+endif ;MON_DEBUG
+ ret
+
+ ; It's possible to get here with waiters but no lock held, but in this
+ ; case a signal is about to be fired which will wake up a waiter. So
+ ; for fairness sake we should wait too.
+ ; Check first for recursive lock attempts on the same thread.
+MonEnterStaticHaveWaiters:
+ ; Get thread but preserve EAX (contains cached contents of m_MonitorHeld).
+ push eax
+ call _GetThread@0
+ mov ebx,eax
+ pop eax
+
+ ; Is mutex already owned by current thread?
+ cmp [ARGUMENT_REG1+AwareLock_m_HoldingThread],ebx
+ jne MonEnterStaticPrepareToWait
+
+ ; Yes, bump our use count.
+ inc dword ptr [ARGUMENT_REG1+AwareLock_m_Recursion]
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ push ARGUMENT_REG1 ; AwareLock
+ push [esp+4] ; return address
+ call EnterSyncHelper
+endif ;TRACK_SYNC
+endif ;MON_DEBUG
+ pop ebx
+ ret
+
+MonEnterStaticPrepareToWait:
+ pop ebx
+
+ ; ARGUMENT_REG1 should have AwareLock. Call contention helper.
+ jmp JITutil_MonContention
+
+MonEnterStaticRetryHelper:
+ jmp MonEnterStaticRetry
+@JIT_MonEnterStatic@4 endp
+
+;**********************************************************************
+; A frameless helper for exiting a static monitor on a class.
+; The methoddesc is in ARGUMENT_REG1. This tries the normal case (no
+; blocking or object allocation) in line and calls a framed helper
+; for the other cases.
+; Note we are changing the methoddesc parameter to a pointer to the
+; AwareLock.
+; ***** NOTE: if you make any changes to this routine, build with MON_DEBUG undefined
+; to make sure you don't break the non-debug build. This is very fragile code.
+; Also, propagate the changes to jithelp.s which contains the same helper and assembly code
+; (in AT&T syntax) for gnu assembler.
+@JIT_MonExitStatic@4 proc public
+
+ifdef MON_DEBUG
+ifdef TRACK_SYNC
+ push ARGUMENT_REG1 ; preserve regs
+
+ push ARGUMENT_REG1 ; AwareLock
+ push [esp+8] ; return address
+ call LeaveSyncHelper
+
+ pop [ARGUMENT_REG1] ; restore regs
+endif ;TRACK_SYNC
+endif ;MON_DEBUG
+
+ ; Check if lock is held.
+ call _GetThread@0
+ cmp [ARGUMENT_REG1+AwareLock_m_HoldingThread],eax
+ jne MonExitStaticLockError
+
+ ; Reduce our recursion count.
+ dec dword ptr [ARGUMENT_REG1+AwareLock_m_Recursion]
+ jz MonExitStaticLastRecursion
+
+ ret
+
+ ; This is the last count we held on this lock, so release the lock.
+MonExitStaticLastRecursion:
+ ; eax must have the thread object
+ dec dword ptr [eax+Thread_m_dwLockCount]
+ mov dword ptr [ARGUMENT_REG1+AwareLock_m_HoldingThread],0
+ push ebx
+
+MonExitStaticRetry:
+ mov eax, [ARGUMENT_REG1+AwareLock_m_MonitorHeld]
+ lea ebx, [eax-1]
+ lock cmpxchg [ARGUMENT_REG1+AwareLock_m_MonitorHeld],ebx
+ jne MonExitStaticRetryHelper
+ pop ebx
+ test eax,0FFFFFFFEh
+ jne MonExitStaticMustSignal
+
+ ret
+
+MonExitStaticMustSignal:
+ jmp JITutil_MonSignal
+
+MonExitStaticRetryHelper:
+ jmp MonExitStaticRetry
+ ; Throw a synchronization lock exception.
+MonExitStaticLockError:
+ mov ARGUMENT_REG1, CORINFO_SynchronizationLockException_ASM
+ jmp JIT_InternalThrow
+
+@JIT_MonExitStatic@4 endp
+
+; PatchedCodeStart and PatchedCodeEnd are used to determine bounds of patched code.
+;
+
+_JIT_PatchedCodeStart@0 proc public
+ret
+_JIT_PatchedCodeStart@0 endp
+
+;
+; Optimized TLS getters
+;
+
+ ALIGN 4
+
+ifndef FEATURE_IMPLICIT_TLS
+_GetThread@0 proc public
+ ; This will be overwritten at runtime with optimized GetThread implementation
+ jmp short _GetTLSDummy@0
+ ; Just allocate space that will be filled in at runtime
+ db (TLS_GETTER_MAX_SIZE_ASM - 2) DUP (0CCh)
+_GetThread@0 endp
+
+ ALIGN 4
+
+_GetAppDomain@0 proc public
+ ; This will be overwritten at runtime with optimized GetAppDomain implementation
+ jmp short _GetTLSDummy@0
+ ; Just allocate space that will be filled in at runtime
+ db (TLS_GETTER_MAX_SIZE_ASM - 2) DUP (0CCh)
+_GetAppDomain@0 endp
+
+_GetTLSDummy@0 proc public
+ xor eax,eax
+ ret
+_GetTLSDummy@0 endp
+
+ ALIGN 4
+
+_ClrFlsGetBlock@0 proc public
+ ; This will be overwritten at runtime with optimized ClrFlsGetBlock implementation
+ jmp short _GetTLSDummy@0
+ ; Just allocate space that will be filled in at runtime
+ db (TLS_GETTER_MAX_SIZE_ASM - 2) DUP (0CCh)
+_ClrFlsGetBlock@0 endp
+endif
+
+;**********************************************************************
+; Write barriers generated at runtime
+
+PUBLIC _JIT_PatchedWriteBarrierStart@0
+_JIT_PatchedWriteBarrierStart@0 PROC
+ret
+_JIT_PatchedWriteBarrierStart@0 ENDP
+
+PatchedWriteBarrierHelper MACRO rg
+ ALIGN 8
+PUBLIC _JIT_WriteBarrier&rg&@0
+_JIT_WriteBarrier&rg&@0 PROC
+ ; Just allocate space that will be filled in at runtime
+ db (48) DUP (0CCh)
+_JIT_WriteBarrier&rg&@0 ENDP
+
+ENDM
+
+PatchedWriteBarrierHelper <EAX>
+PatchedWriteBarrierHelper <EBX>
+PatchedWriteBarrierHelper <ECX>
+PatchedWriteBarrierHelper <ESI>
+PatchedWriteBarrierHelper <EDI>
+PatchedWriteBarrierHelper <EBP>
+
+PUBLIC _JIT_PatchedWriteBarrierLast@0
+_JIT_PatchedWriteBarrierLast@0 PROC
+ret
+_JIT_PatchedWriteBarrierLast@0 ENDP
+
+;**********************************************************************
+; PrecodeRemotingThunk is patched at runtime to activate it
+ifdef FEATURE_REMOTING
+ ALIGN 16
+_PrecodeRemotingThunk@0 proc public
+
+ ret ; This is going to be patched to "test ecx,ecx"
+ nop
+
+ jz RemotingDone ; predicted not taken
+
+ cmp dword ptr [ecx],11111111h ; This is going to be patched to address of the transparent proxy
+ je RemotingCheck ; predicted not taken
+
+RemotingDone:
+ ret
+
+RemotingCheck:
+ push eax ; save method desc
+ mov eax, dword ptr [ecx + TransparentProxyObject___stubData]
+ call [ecx + TransparentProxyObject___stub]
+ test eax, eax
+ jnz RemotingCtxMismatch
+ mov eax, [esp]
+ mov ax, [eax + MethodDesc_m_wFlags]
+ and ax, MethodDesc_mdcClassification
+ cmp ax, MethodDesc_mcComInterop
+ je ComPlusCall
+ pop eax ; throw away method desc
+ jmp RemotingDone
+
+RemotingCtxMismatch:
+ pop eax ; restore method desc
+ add esp, 4 ; pop return address into the precode
+ jmp _TransparentProxyStub_CrossContext@0
+
+ComPlusCall:
+ pop eax ; restore method desc
+ mov [esp],eax ; replace return address into the precode with method desc (argument for TP stub)
+ jmp _InContextTPQuickDispatchAsmStub@0
+
+_PrecodeRemotingThunk@0 endp
+endif ; FEATURE_REMOTING
+
+_JIT_PatchedCodeLast@0 proc public
+ret
+_JIT_PatchedCodeLast@0 endp
+
+; This is the first function outside the "keep together range". Used by BBT scripts.
+_JIT_PatchedCodeEnd@0 proc public
+ret
+_JIT_PatchedCodeEnd@0 endp
+
+; This is the ASM portion of JIT_IsInstanceOfInterface. For all the bizarre cases, it quickly
+; fails and falls back on the JITutil_IsInstanceOfAny helper. So all failure cases take
+; the slow path, too.
+;
+; ARGUMENT_REG1 = array or interface to check for.
+; ARGUMENT_REG2 = instance to be cast.
+
+ ALIGN 16
+PUBLIC @JIT_IsInstanceOfInterface@8
+@JIT_IsInstanceOfInterface@8 PROC
+ test ARGUMENT_REG2, ARGUMENT_REG2
+ jz IsNullInst
+
+ mov eax, [ARGUMENT_REG2] ; get MethodTable
+
+ push ebx
+ push esi
+ movzx ebx, word ptr [eax+MethodTable_m_wNumInterfaces]
+
+ ; check if this MT implements any interfaces
+ test ebx, ebx
+ jz IsInstanceOfInterfaceDoBizarre
+
+ ; move Interface map ptr into eax
+ mov eax, [eax+MethodTable_m_pInterfaceMap]
+
+IsInstanceOfInterfaceTop:
+ ; eax -> current InterfaceInfo_t entry in interface map list
+ifdef FEATURE_PREJIT
+ mov esi, [eax]
+ test esi, 1
+ ; Move the deference out of line so that this jump is correctly predicted for the case
+ ; when there is no indirection
+ jnz IsInstanceOfInterfaceIndir
+ cmp ARGUMENT_REG1, esi
+else
+ cmp ARGUMENT_REG1, [eax]
+endif
+ je IsInstanceOfInterfaceFound
+
+IsInstanceOfInterfaceNext:
+ add eax, SIZEOF_InterfaceInfo_t
+ dec ebx
+ jnz IsInstanceOfInterfaceTop
+
+ ; fall through to DoBizarre
+
+IsInstanceOfInterfaceDoBizarre:
+ pop esi
+ pop ebx
+ mov eax, [ARGUMENT_REG2] ; get MethodTable
+ test dword ptr [eax+MethodTable_m_dwFlags], NonTrivialInterfaceCastFlags
+ jnz IsInstanceOfInterfaceNonTrivialCast
+
+IsNullInst:
+ xor eax,eax
+ ret
+
+ifdef FEATURE_PREJIT
+IsInstanceOfInterfaceIndir:
+ cmp ARGUMENT_REG1,[esi-1]
+ jne IsInstanceOfInterfaceNext
+endif
+
+IsInstanceOfInterfaceFound:
+ pop esi
+ pop ebx
+ mov eax, ARGUMENT_REG2 ; the successful instance
+ ret
+
+IsInstanceOfInterfaceNonTrivialCast:
+ jmp @JITutil_IsInstanceOfInterface@8
+
+@JIT_IsInstanceOfInterface@8 endp
+
+; This is the ASM portion of JIT_ChkCastInterface. For all the bizarre cases, it quickly
+; fails and falls back on the JITutil_ChkCastAny helper. So all failure cases take
+; the slow path, too.
+;
+; ARGUMENT_REG1 = array or interface to check for.
+; ARGUMENT_REG2 = instance to be cast.
+
+ ALIGN 16
+PUBLIC @JIT_ChkCastInterface@8
+@JIT_ChkCastInterface@8 PROC
+ test ARGUMENT_REG2, ARGUMENT_REG2
+ jz ChkCastInterfaceIsNullInst
+
+ mov eax, [ARGUMENT_REG2] ; get MethodTable
+
+ push ebx
+ push esi
+ movzx ebx, word ptr [eax+MethodTable_m_wNumInterfaces]
+
+ ; speculatively move Interface map ptr into eax
+ mov eax, [eax+MethodTable_m_pInterfaceMap]
+
+ ; check if this MT implements any interfaces
+ test ebx, ebx
+ jz ChkCastInterfaceDoBizarre
+
+ChkCastInterfaceTop:
+ ; eax -> current InterfaceInfo_t entry in interface map list
+ifdef FEATURE_PREJIT
+ mov esi, [eax]
+ test esi, 1
+ ; Move the deference out of line so that this jump is correctly predicted for the case
+ ; when there is no indirection
+ jnz ChkCastInterfaceIndir
+ cmp ARGUMENT_REG1, esi
+else
+ cmp ARGUMENT_REG1, [eax]
+endif
+ je ChkCastInterfaceFound
+
+ChkCastInterfaceNext:
+ add eax, SIZEOF_InterfaceInfo_t
+ dec ebx
+ jnz ChkCastInterfaceTop
+
+ ; fall through to DoBizarre
+
+ChkCastInterfaceDoBizarre:
+ pop esi
+ pop ebx
+ jmp @JITutil_ChkCastInterface@8
+
+ifdef FEATURE_PREJIT
+ChkCastInterfaceIndir:
+ cmp ARGUMENT_REG1,[esi-1]
+ jne ChkCastInterfaceNext
+endif
+
+ChkCastInterfaceFound:
+ pop esi
+ pop ebx
+
+ChkCastInterfaceIsNullInst:
+ mov eax, ARGUMENT_REG2 ; either null, or the successful instance
+ ret
+
+@JIT_ChkCastInterface@8 endp
+
+ end
diff --git a/src/vm/i386/jitinterfacex86.cpp b/src/vm/i386/jitinterfacex86.cpp
new file mode 100644
index 0000000000..b1bb97b5f5
--- /dev/null
+++ b/src/vm/i386/jitinterfacex86.cpp
@@ -0,0 +1,1918 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: JITinterfaceX86.CPP
+//
+// ===========================================================================
+
+// This contains JITinterface routines that are tailored for
+// X86 platforms. Non-X86 versions of these can be found in
+// JITinterfaceGen.cpp
+
+
+#include "common.h"
+#include "jitinterface.h"
+#include "eeconfig.h"
+#include "excep.h"
+#include "comdelegate.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h" // create context bound and remote class instances
+#endif
+#include "field.h"
+#include "ecall.h"
+#include "asmconstants.h"
+#include "virtualcallstub.h"
+#include "eventtrace.h"
+#include "threadsuspend.h"
+
+#if defined(_DEBUG) && !defined (WRITE_BARRIER_CHECK)
+#define WRITE_BARRIER_CHECK 1
+#endif
+
+// To test with MON_DEBUG off, comment out the following line. DO NOT simply define
+// to be 0 as the checks are for #ifdef not #if 0.
+//
+#ifdef _DEBUG
+#define MON_DEBUG 1
+#endif
+
+class generation;
+extern "C" generation generation_table[];
+
+extern "C" void STDCALL JIT_WriteBarrierReg_PreGrow();// JIThelp.asm/JIThelp.s
+extern "C" void STDCALL JIT_WriteBarrierReg_PostGrow();// JIThelp.asm/JIThelp.s
+
+#ifdef _DEBUG
+extern "C" void STDCALL WriteBarrierAssert(BYTE* ptr, Object* obj)
+{
+ STATIC_CONTRACT_SO_TOLERANT;
+ WRAPPER_NO_CONTRACT;
+
+ static BOOL fVerifyHeap = -1;
+
+ if (fVerifyHeap == -1)
+ fVerifyHeap = g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC;
+
+ if (fVerifyHeap)
+ {
+ obj->Validate(FALSE);
+ if(GCHeap::GetGCHeap()->IsHeapPointer(ptr))
+ {
+ Object* pObj = *(Object**)ptr;
+ _ASSERTE (pObj == NULL || GCHeap::GetGCHeap()->IsHeapPointer(pObj));
+ }
+ }
+ else
+ {
+ _ASSERTE((g_lowest_address <= ptr && ptr < g_highest_address) ||
+ ((size_t)ptr < MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT));
+ }
+}
+
+#endif // _DEBUG
+
+/****************************************************************************/
+/* assigns 'val to 'array[idx], after doing all the proper checks */
+
+/* note that we can do almost as well in portable code, but this
+ squezes the last little bit of perf out */
+
+__declspec(naked) void F_CALL_CONV JIT_Stelem_Ref(PtrArray* array, unsigned idx, Object* val)
+{
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ enum { CanCast = TypeHandle::CanCast,
+#if CHECK_APP_DOMAIN_LEAKS
+ EEClassFlags = EEClass::AUXFLAG_APP_DOMAIN_AGILE |
+ EEClass::AUXFLAG_CHECK_APP_DOMAIN_AGILE,
+#endif // CHECK_APP_DOMAIN_LEAKS
+ };
+
+ __asm {
+ mov EAX, [ESP+4] // EAX = val
+
+ test ECX, ECX
+ je ThrowNullReferenceException
+
+ cmp EDX, [ECX+4]; // test if in bounds
+ jae ThrowIndexOutOfRangeException
+
+ test EAX, EAX
+ jz Assigning0
+
+#if CHECK_APP_DOMAIN_LEAKS
+ mov EAX,[g_pConfig]
+ movzx EAX, [EAX]EEConfig.fAppDomainLeaks;
+ test EAX, EAX
+ jz NoCheck
+ // Check if the instance is agile or check agile
+ mov EAX, [ECX]
+ mov EAX, [EAX]MethodTable.m_ElementTypeHnd
+ test EAX, 2 // Check for non-MT
+ jnz NoCheck
+ // Check VMflags of element type
+ mov EAX, [EAX]MethodTable.m_pEEClass
+ mov EAX, dword ptr [EAX]EEClass.m_wAuxFlags
+ test EAX, EEClassFlags
+ jnz NeedFrame // Jump to the generic case so we can do an app domain check
+ NoCheck:
+ mov EAX, [ESP+4] // EAX = val
+#endif // CHECK_APP_DOMAIN_LEAKS
+
+ push EDX
+ mov EDX, [ECX]
+ mov EDX, [EDX]MethodTable.m_ElementTypeHnd
+
+ cmp EDX, [EAX] // do we have an exact match
+ jne NotExactMatch
+
+DoWrite2:
+ pop EDX
+ lea EDX, [ECX + 4*EDX + 8]
+ call JIT_WriteBarrierEAX
+ ret 4
+
+Assigning0:
+ // write barrier is not necessary for assignment of NULL references
+ mov [ECX + 4*EDX + 8], EAX
+ ret 4
+
+DoWrite:
+ mov EAX, [ESP+4] // EAX = val
+ lea EDX, [ECX + 4*EDX + 8]
+ call JIT_WriteBarrierEAX
+ ret 4
+
+NotExactMatch:
+ cmp EDX, [g_pObjectClass] // are we assigning to Array of objects
+ je DoWrite2
+
+ // push EDX // caller-save ECX and EDX
+ push ECX
+
+ push EDX // element type handle
+ push EAX // object
+
+ call ObjIsInstanceOfNoGC
+
+ pop ECX // caller-restore ECX and EDX
+ pop EDX
+
+ cmp EAX, CanCast
+ je DoWrite
+
+#if CHECK_APP_DOMAIN_LEAKS
+NeedFrame:
+#endif
+ // Call the helper that knows how to erect a frame
+ push EDX
+ push ECX
+
+ lea ECX, [ESP+8+4] // ECX = address of object being stored
+ lea EDX, [ESP] // EDX = address of array
+
+ call ArrayStoreCheck
+
+ pop ECX // these might have been updated!
+ pop EDX
+
+ cmp EAX, EAX // set zero flag
+ jnz Epilog // This jump never happens, it keeps the epilog walker happy
+
+ jmp DoWrite
+
+ThrowNullReferenceException:
+ mov ECX, CORINFO_NullReferenceException
+ jmp Throw
+
+ThrowIndexOutOfRangeException:
+ mov ECX, CORINFO_IndexOutOfRangeException
+
+Throw:
+ call JIT_InternalThrowFromHelper
+Epilog:
+ ret 4
+ }
+}
+
+extern "C" __declspec(naked) Object* F_CALL_CONV JIT_IsInstanceOfClass(MethodTable *pMT, Object *pObject)
+{
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+#if defined(FEATURE_TYPEEQUIVALENCE) || defined(FEATURE_REMOTING)
+ enum
+ {
+ MTEquivalenceFlags = MethodTable::public_enum_flag_HasTypeEquivalence,
+ };
+#endif
+
+ __asm
+ {
+ // Check if the instance is NULL
+ test ARGUMENT_REG2, ARGUMENT_REG2
+ je ReturnInst
+
+ // Get the method table for the instance.
+ mov eax, dword ptr [ARGUMENT_REG2]
+
+ // Check if they are the same.
+ cmp eax, ARGUMENT_REG1
+ jne CheckParent
+
+ ReturnInst:
+ // We matched the class.
+ mov eax, ARGUMENT_REG2
+ ret
+
+ // Check if the parent class matches.
+ CheckParent:
+ mov eax, dword ptr [eax]MethodTable.m_pParentMethodTable
+ cmp eax, ARGUMENT_REG1
+ je ReturnInst
+
+ // Check if we hit the top of the hierarchy.
+ test eax, eax
+ jne CheckParent
+
+ // Check if the instance is a proxy.
+#if defined(FEATURE_TYPEEQUIVALENCE) || defined(FEATURE_REMOTING)
+ mov eax, [ARGUMENT_REG2]
+ test dword ptr [eax]MethodTable.m_dwFlags, MTEquivalenceFlags
+ jne SlowPath
+#endif
+ // It didn't match and it isn't a proxy and it doesn't have type equivalence
+ xor eax, eax
+ ret
+
+ // Cast didn't match, so try the worker to check for the proxy/equivalence case.
+#if defined(FEATURE_TYPEEQUIVALENCE) || defined(FEATURE_REMOTING)
+ SlowPath:
+ jmp JITutil_IsInstanceOfAny
+#endif
+ }
+}
+
+extern "C" __declspec(naked) Object* F_CALL_CONV JIT_ChkCastClass(MethodTable *pMT, Object *pObject)
+{
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ __asm
+ {
+ // Check if the instance is NULL
+ test ARGUMENT_REG2, ARGUMENT_REG2
+ je ReturnInst
+
+ // Get the method table for the instance.
+ mov eax, dword ptr [ARGUMENT_REG2]
+
+ // Check if they are the same.
+ cmp eax, ARGUMENT_REG1
+ jne CheckParent
+
+ ReturnInst:
+ // We matched the class.
+ mov eax, ARGUMENT_REG2
+ ret
+
+ // Check if the parent class matches.
+ CheckParent:
+ mov eax, dword ptr [eax]MethodTable.m_pParentMethodTable
+ cmp eax, ARGUMENT_REG1
+ je ReturnInst
+
+ // Check if we hit the top of the hierarchy.
+ test eax, eax
+ jne CheckParent
+
+ // Call out to JITutil_ChkCastAny to handle the proxy case and throw a rich
+ // InvalidCastException in case of failure.
+ jmp JITutil_ChkCastAny
+ }
+}
+
+extern "C" __declspec(naked) Object* F_CALL_CONV JIT_ChkCastClassSpecial(MethodTable *pMT, Object *pObject)
+{
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ // Assumes that the check for the trivial cases has been inlined by the JIT.
+
+ __asm
+ {
+ // Get the method table for the instance.
+ mov eax, dword ptr [ARGUMENT_REG2]
+
+ // Check if the parent class matches.
+ CheckParent:
+ mov eax, dword ptr [eax]MethodTable.m_pParentMethodTable
+ cmp eax, ARGUMENT_REG1
+ jne CheckNull
+
+ // We matched the class.
+ mov eax, ARGUMENT_REG2
+ ret
+
+ CheckNull:
+ // Check if we hit the top of the hierarchy.
+ test eax, eax
+ jne CheckParent
+
+ // Call out to JITutil_ChkCastAny to handle the proxy case and throw a rich
+ // InvalidCastException in case of failure.
+ jmp JITutil_ChkCastAny
+ }
+}
+
+HCIMPL1_V(INT32, JIT_Dbl2IntOvf, double val)
+{
+ FCALL_CONTRACT;
+
+ INT64 ret = HCCALL1_V(JIT_Dbl2Lng, val);
+
+ if (ret != (INT32) ret)
+ goto THROW;
+
+ return (INT32) ret;
+
+THROW:
+ FCThrow(kOverflowException);
+}
+HCIMPLEND
+
+
+FCDECL1(Object*, JIT_New, CORINFO_CLASS_HANDLE typeHnd_);
+
+#ifdef FEATURE_REMOTING
+HCIMPL1(Object*, JIT_NewCrossContextHelper, CORINFO_CLASS_HANDLE typeHnd_)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ TypeHandle typeHnd(typeHnd_);
+
+ OBJECTREF newobj = NULL;
+ HELPER_METHOD_FRAME_BEGIN_RET_0(); // Set up a frame
+
+ _ASSERTE(!typeHnd.IsTypeDesc()); // we never use this helper for arrays
+ MethodTable *pMT = typeHnd.AsMethodTable();
+ pMT->CheckRestore();
+
+ // Remoting services determines if the current context is appropriate
+ // for activation. If the current context is OK then it creates an object
+ // else it creates a proxy.
+ // Note: 3/20/03 Added fIsNewObj flag to indicate that CreateProxyOrObject
+ // is being called from Jit_NewObj ... the fIsCom flag is FALSE by default -
+ // which used to be the case before this change as well.
+ newobj = CRemotingServices::CreateProxyOrObject(pMT,FALSE /*fIsCom*/,TRUE/*fIsNewObj*/);
+
+ HELPER_METHOD_FRAME_END();
+ return(OBJECTREFToObject(newobj));
+}
+HCIMPLEND
+#endif // FEATURE_REMOTING
+
+HCIMPL1(Object*, AllocObjectWrapper, MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF newObj = NULL;
+ HELPER_METHOD_FRAME_BEGIN_RET_0(); // Set up a frame
+ newObj = AllocateObject(pMT);
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(newObj);
+}
+HCIMPLEND
+
+/*********************************************************************/
+// This is a frameless helper for allocating an object whose type derives
+// from marshalbyref. We check quickly to see if it is configured to
+// have remote activation. If not, we use the superfast allocator to
+// allocate the object. Otherwise, we take the slow path of allocating
+// the object via remoting services.
+#ifdef FEATURE_REMOTING
+__declspec(naked) Object* F_CALL_CONV JIT_NewCrossContext(CORINFO_CLASS_HANDLE typeHnd_)
+{
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ _asm
+ {
+ // Check if remoting has been configured
+ push ARGUMENT_REG1 // save registers
+ push ARGUMENT_REG1
+ call CRemotingServices::RequiresManagedActivation
+ test eax, eax
+ // Jump to the slow path
+ jne SpecialOrXCtxHelper
+#ifdef _DEBUG
+ push LL_INFO10
+ push LF_GCALLOC
+ call LoggingOn
+ test eax, eax
+ jne AllocWithLogHelper
+#endif // _DEBUG
+
+ // if the object doesn't have a finalizer and the size is small, jump to super fast asm helper
+ mov ARGUMENT_REG1, [esp]
+ call MethodTable::CannotUseSuperFastHelper
+ test eax, eax
+ jne FastHelper
+
+ pop ARGUMENT_REG1
+ // Jump to the super fast helper
+ jmp dword ptr [hlpDynamicFuncTable + DYNAMIC_CORINFO_HELP_NEWSFAST * SIZE VMHELPDEF]VMHELPDEF.pfnHelper
+
+FastHelper:
+ pop ARGUMENT_REG1
+ // Jump to the helper
+ jmp JIT_New
+
+SpecialOrXCtxHelper:
+#ifdef FEATURE_COMINTEROP
+ test eax, ComObjectType
+ jz XCtxHelper
+ pop ARGUMENT_REG1
+ // Jump to the helper
+ jmp JIT_New
+
+XCtxHelper:
+#endif // FEATURE_COMINTEROP
+
+ pop ARGUMENT_REG1
+ // Jump to the helper
+ jmp JIT_NewCrossContextHelper
+
+#ifdef _DEBUG
+AllocWithLogHelper:
+ pop ARGUMENT_REG1
+ // Jump to the helper
+ jmp AllocObjectWrapper
+#endif // _DEBUG
+ }
+}
+#endif // FEATURE_REMOTING
+
+
+/*********************************************************************/
+extern "C" void* g_TailCallFrameVptr;
+void* g_TailCallFrameVptr;
+
+#ifdef FEATURE_HIJACK
+extern "C" void STDCALL JIT_TailCallHelper(Thread * pThread);
+void STDCALL JIT_TailCallHelper(Thread * pThread)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ pThread->UnhijackThread();
+}
+#endif // FEATURE_HIJACK
+
+#if CHECK_APP_DOMAIN_LEAKS
+HCIMPL1(void *, SetObjectAppDomain, Object *pObject)
+{
+ FCALL_CONTRACT;
+ DEBUG_ONLY_FUNCTION;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_NOPOLL(Frame::FRAME_ATTR_CAPTURE_DEPTH_2|Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_NO_THREAD_ABORT);
+ pObject->SetAppDomain();
+ HELPER_METHOD_FRAME_END();
+
+ return pObject;
+}
+HCIMPLEND
+#endif // CHECK_APP_DOMAIN_LEAKS
+
+ // emit code that adds MIN_OBJECT_SIZE to reg if reg is unaligned thus making it aligned
+void JIT_TrialAlloc::EmitAlignmentRoundup(CPUSTUBLINKER *psl, X86Reg testAlignReg, X86Reg adjReg, Flags flags)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE((MIN_OBJECT_SIZE & 7) == 4); // want to change alignment
+
+ CodeLabel *AlreadyAligned = psl->NewCodeLabel();
+
+ // test reg, 7
+ psl->Emit16(0xC0F7 | (static_cast<unsigned short>(testAlignReg) << 8));
+ psl->Emit32(0x7);
+
+ // jz alreadyAligned
+ if (flags & ALIGN8OBJ)
+ {
+ psl->X86EmitCondJump(AlreadyAligned, X86CondCode::kJNZ);
+ }
+ else
+ {
+ psl->X86EmitCondJump(AlreadyAligned, X86CondCode::kJZ);
+ }
+
+ psl->X86EmitAddReg(adjReg, MIN_OBJECT_SIZE);
+ // AlreadyAligned:
+ psl->EmitLabel(AlreadyAligned);
+}
+
+ // if 'reg' is unaligned, then set the dummy object at EAX and increment EAX past
+ // the dummy object
+void JIT_TrialAlloc::EmitDummyObject(CPUSTUBLINKER *psl, X86Reg alignTestReg, Flags flags)
+{
+ STANDARD_VM_CONTRACT;
+
+ CodeLabel *AlreadyAligned = psl->NewCodeLabel();
+
+ // test reg, 7
+ psl->Emit16(0xC0F7 | (static_cast<unsigned short>(alignTestReg) << 8));
+ psl->Emit32(0x7);
+
+ // jz alreadyAligned
+ if (flags & ALIGN8OBJ)
+ {
+ psl->X86EmitCondJump(AlreadyAligned, X86CondCode::kJNZ);
+ }
+ else
+ {
+ psl->X86EmitCondJump(AlreadyAligned, X86CondCode::kJZ);
+ }
+
+ // Make the fake object
+ // mov EDX, [g_pObjectClass]
+ psl->Emit16(0x158B);
+ psl->Emit32((int)(size_t)&g_pObjectClass);
+
+ // mov [EAX], EDX
+ psl->X86EmitOffsetModRM(0x89, kEDX, kEAX, 0);
+
+#if CHECK_APP_DOMAIN_LEAKS
+ EmitSetAppDomain(psl);
+#endif
+
+ // add EAX, MIN_OBJECT_SIZE
+ psl->X86EmitAddReg(kEAX, MIN_OBJECT_SIZE);
+
+ // AlreadyAligned:
+ psl->EmitLabel(AlreadyAligned);
+}
+
+void JIT_TrialAlloc::EmitCore(CPUSTUBLINKER *psl, CodeLabel *noLock, CodeLabel *noAlloc, Flags flags)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Upon entry here, ecx contains the method we are to try allocate memory for
+ // Upon exit, eax contains the allocated memory, edx is trashed, and ecx undisturbed
+
+ if (flags & MP_ALLOCATOR)
+ {
+ if (flags & (ALIGN8 | SIZE_IN_EAX | ALIGN8OBJ))
+ {
+ if (flags & ALIGN8OBJ)
+ {
+ // mov eax, [ecx]MethodTable.m_BaseSize
+ psl->X86EmitIndexRegLoad(kEAX, kECX, offsetof(MethodTable, m_BaseSize));
+ }
+
+ psl->X86EmitPushReg(kEBX); // we need a spare register
+ }
+ else
+ {
+ // mov eax, [ecx]MethodTable.m_BaseSize
+ psl->X86EmitIndexRegLoad(kEAX, kECX, offsetof(MethodTable, m_BaseSize));
+ }
+
+ assert( ((flags & ALIGN8)==0 || // EAX loaded by else statement
+ (flags & SIZE_IN_EAX) || // EAX already comes filled out
+ (flags & ALIGN8OBJ) ) // EAX loaded in the if (flags & ALIGN8OBJ) statement
+ && "EAX should contain size for allocation and it doesnt!!!");
+
+ // Fetch current thread into EDX, preserving EAX and ECX
+ psl->X86EmitCurrentThreadFetch(kEDX, (1<<kEAX)|(1<<kECX));
+
+ // Try the allocation.
+
+
+ if (flags & (ALIGN8 | SIZE_IN_EAX | ALIGN8OBJ))
+ {
+ // MOV EBX, [edx]Thread.m_alloc_context.alloc_ptr
+ psl->X86EmitOffsetModRM(0x8B, kEBX, kEDX, offsetof(Thread, m_alloc_context) + offsetof(alloc_context, alloc_ptr));
+ // add EAX, EBX
+ psl->Emit16(0xC303);
+ if (flags & ALIGN8)
+ EmitAlignmentRoundup(psl, kEBX, kEAX, flags); // bump EAX up size by 12 if EBX unaligned (so that we are aligned)
+ }
+ else
+ {
+ // add eax, [edx]Thread.m_alloc_context.alloc_ptr
+ psl->X86EmitOffsetModRM(0x03, kEAX, kEDX, offsetof(Thread, m_alloc_context) + offsetof(alloc_context, alloc_ptr));
+ }
+
+ // cmp eax, [edx]Thread.m_alloc_context.alloc_limit
+ psl->X86EmitOffsetModRM(0x3b, kEAX, kEDX, offsetof(Thread, m_alloc_context) + offsetof(alloc_context, alloc_limit));
+
+ // ja noAlloc
+ psl->X86EmitCondJump(noAlloc, X86CondCode::kJA);
+
+ // Fill in the allocation and get out.
+
+ // mov [edx]Thread.m_alloc_context.alloc_ptr, eax
+ psl->X86EmitIndexRegStore(kEDX, offsetof(Thread, m_alloc_context) + offsetof(alloc_context, alloc_ptr), kEAX);
+
+ if (flags & (ALIGN8 | SIZE_IN_EAX | ALIGN8OBJ))
+ {
+ // mov EAX, EBX
+ psl->Emit16(0xC38B);
+ // pop EBX
+ psl->X86EmitPopReg(kEBX);
+
+ if (flags & ALIGN8)
+ EmitDummyObject(psl, kEAX, flags);
+ }
+ else
+ {
+ // sub eax, [ecx]MethodTable.m_BaseSize
+ psl->X86EmitOffsetModRM(0x2b, kEAX, kECX, offsetof(MethodTable, m_BaseSize));
+ }
+
+ // mov dword ptr [eax], ecx
+ psl->X86EmitIndexRegStore(kEAX, 0, kECX);
+ }
+ else
+ {
+ // Take the GC lock (there is no lock prefix required - we will use JIT_TrialAllocSFastMP on an MP System).
+ // inc dword ptr [m_GCLock]
+ psl->Emit16(0x05ff);
+ psl->Emit32((int)(size_t)&m_GCLock);
+
+ // jnz NoLock
+ psl->X86EmitCondJump(noLock, X86CondCode::kJNZ);
+
+ if (flags & SIZE_IN_EAX)
+ {
+ // mov edx, eax
+ psl->Emit16(0xd08b);
+ }
+ else
+ {
+ // mov edx, [ecx]MethodTable.m_BaseSize
+ psl->X86EmitIndexRegLoad(kEDX, kECX, offsetof(MethodTable, m_BaseSize));
+ }
+
+ // mov eax, dword ptr [generation_table]
+ psl->Emit8(0xA1);
+ psl->Emit32((int)(size_t)&generation_table);
+
+ // Try the allocation.
+ // add edx, eax
+ psl->Emit16(0xd003);
+
+ if (flags & (ALIGN8 | ALIGN8OBJ))
+ EmitAlignmentRoundup(psl, kEAX, kEDX, flags); // bump up EDX size by 12 if EAX unaligned (so that we are aligned)
+
+ // cmp edx, dword ptr [generation_table+4]
+ psl->Emit16(0x153b);
+ psl->Emit32((int)(size_t)&generation_table + 4);
+
+ // ja noAlloc
+ psl->X86EmitCondJump(noAlloc, X86CondCode::kJA);
+
+ // Fill in the allocation and get out.
+ // mov dword ptr [generation_table], edx
+ psl->Emit16(0x1589);
+ psl->Emit32((int)(size_t)&generation_table);
+
+ if (flags & (ALIGN8 | ALIGN8OBJ))
+ EmitDummyObject(psl, kEAX, flags);
+
+ // mov dword ptr [eax], ecx
+ psl->X86EmitIndexRegStore(kEAX, 0, kECX);
+
+ // mov dword ptr [m_GCLock], 0FFFFFFFFh
+ psl->Emit16(0x05C7);
+ psl->Emit32((int)(size_t)&m_GCLock);
+ psl->Emit32(0xFFFFFFFF);
+ }
+
+
+#ifdef INCREMENTAL_MEMCLR
+ // <TODO>We're planning to get rid of this anyhow according to Patrick</TODO>
+ _ASSERTE(!"NYI");
+#endif // INCREMENTAL_MEMCLR
+}
+
+#if CHECK_APP_DOMAIN_LEAKS
+void JIT_TrialAlloc::EmitSetAppDomain(CPUSTUBLINKER *psl)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (!g_pConfig->AppDomainLeaks())
+ return;
+
+ // At both entry & exit, eax contains the allocated object.
+ // ecx is preserved, edx is not.
+
+ //
+ // Add in a call to SetAppDomain. (Note that this
+ // probably would have been easier to implement by just not using
+ // the generated helpers in a checked build, but we'd lose code
+ // coverage that way.)
+ //
+
+ // Save ECX over function call
+ psl->X86EmitPushReg(kECX);
+
+ // mov object to ECX
+ // mov ecx, eax
+ psl->Emit16(0xc88b);
+
+ // SetObjectAppDomain pops its arg & returns object in EAX
+ psl->X86EmitCall(psl->NewExternalCodeLabel((LPVOID)SetObjectAppDomain), 4);
+
+ psl->X86EmitPopReg(kECX);
+}
+
+#endif // CHECK_APP_DOMAIN_LEAKS
+
+
+void JIT_TrialAlloc::EmitNoAllocCode(CPUSTUBLINKER *psl, Flags flags)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (flags & MP_ALLOCATOR)
+ {
+ if (flags & (ALIGN8|SIZE_IN_EAX))
+ psl->X86EmitPopReg(kEBX);
+ }
+ else
+ {
+ // mov dword ptr [m_GCLock], 0FFFFFFFFh
+ psl->Emit16(0x05c7);
+ psl->Emit32((int)(size_t)&m_GCLock);
+ psl->Emit32(0xFFFFFFFF);
+ }
+}
+
+void *JIT_TrialAlloc::GenAllocSFast(Flags flags)
+{
+ STANDARD_VM_CONTRACT;
+
+ CPUSTUBLINKER sl;
+
+ CodeLabel *noLock = sl.NewCodeLabel();
+ CodeLabel *noAlloc = sl.NewCodeLabel();
+
+ // Emit the main body of the trial allocator, be it SP or MP
+ EmitCore(&sl, noLock, noAlloc, flags);
+
+#if CHECK_APP_DOMAIN_LEAKS
+ EmitSetAppDomain(&sl);
+#endif
+
+ // Here we are at the end of the success case - just emit a ret
+ sl.X86EmitReturn(0);
+
+ // Come here in case of no space
+ sl.EmitLabel(noAlloc);
+
+ // Release the lock in the uniprocessor case
+ EmitNoAllocCode(&sl, flags);
+
+ // Come here in case of failure to get the lock
+ sl.EmitLabel(noLock);
+
+ // Jump to the framed helper
+ sl.X86EmitNearJump(sl.NewExternalCodeLabel((LPVOID)JIT_New));
+
+ Stub *pStub = sl.Link(SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap());
+
+ return (void *)pStub->GetEntryPoint();
+}
+
+
+void *JIT_TrialAlloc::GenBox(Flags flags)
+{
+ STANDARD_VM_CONTRACT;
+
+ CPUSTUBLINKER sl;
+
+ CodeLabel *noLock = sl.NewCodeLabel();
+ CodeLabel *noAlloc = sl.NewCodeLabel();
+
+ // Save address of value to be boxed
+ sl.X86EmitPushReg(kEBX);
+ sl.Emit16(0xda8b);
+
+ // Save the MethodTable ptr
+ sl.X86EmitPushReg(kECX);
+
+ // mov ecx, [ecx]MethodTable.m_pWriteableData
+ sl.X86EmitOffsetModRM(0x8b, kECX, kECX, offsetof(MethodTable, m_pWriteableData));
+
+ // Check whether the class has not been initialized
+ // test [ecx]MethodTableWriteableData.m_dwFlags,MethodTableWriteableData::enum_flag_Unrestored
+ sl.X86EmitOffsetModRM(0xf7, (X86Reg)0x0, kECX, offsetof(MethodTableWriteableData, m_dwFlags));
+ sl.Emit32(MethodTableWriteableData::enum_flag_Unrestored);
+
+ // Restore the MethodTable ptr in ecx
+ sl.X86EmitPopReg(kECX);
+
+ // jne noAlloc
+ sl.X86EmitCondJump(noAlloc, X86CondCode::kJNE);
+
+ // Emit the main body of the trial allocator
+ EmitCore(&sl, noLock, noAlloc, flags);
+
+#if CHECK_APP_DOMAIN_LEAKS
+ EmitSetAppDomain(&sl);
+#endif
+
+ // Here we are at the end of the success case
+
+ // Check whether the object contains pointers
+ // test [ecx]MethodTable.m_dwFlags,MethodTable::enum_flag_ContainsPointers
+ sl.X86EmitOffsetModRM(0xf7, (X86Reg)0x0, kECX, offsetof(MethodTable, m_dwFlags));
+ sl.Emit32(MethodTable::enum_flag_ContainsPointers);
+
+ CodeLabel *pointerLabel = sl.NewCodeLabel();
+
+ // jne pointerLabel
+ sl.X86EmitCondJump(pointerLabel, X86CondCode::kJNE);
+
+ // We have no pointers - emit a simple inline copy loop
+
+ // mov ecx, [ecx]MethodTable.m_BaseSize
+ sl.X86EmitOffsetModRM(0x8b, kECX, kECX, offsetof(MethodTable, m_BaseSize));
+
+ // sub ecx,12
+ sl.X86EmitSubReg(kECX, 12);
+
+ CodeLabel *loopLabel = sl.NewCodeLabel();
+
+ sl.EmitLabel(loopLabel);
+
+ // mov edx,[ebx+ecx]
+ sl.X86EmitOp(0x8b, kEDX, kEBX, 0, kECX, 1);
+
+ // mov [eax+ecx+4],edx
+ sl.X86EmitOp(0x89, kEDX, kEAX, 4, kECX, 1);
+
+ // sub ecx,4
+ sl.X86EmitSubReg(kECX, 4);
+
+ // jg loopLabel
+ sl.X86EmitCondJump(loopLabel, X86CondCode::kJGE);
+
+ sl.X86EmitPopReg(kEBX);
+
+ sl.X86EmitReturn(0);
+
+ // Arrive at this label if there are pointers in the object
+ sl.EmitLabel(pointerLabel);
+
+ // Do call to CopyValueClassUnchecked(object, data, pMT)
+
+ // Pass pMT (still in ECX)
+ sl.X86EmitPushReg(kECX);
+
+ // Pass data (still in EBX)
+ sl.X86EmitPushReg(kEBX);
+
+ // Save the address of the object just allocated
+ // mov ebx,eax
+ sl.Emit16(0xD88B);
+
+
+ // Pass address of first user byte in the newly allocated object
+ sl.X86EmitAddReg(kEAX, 4);
+ sl.X86EmitPushReg(kEAX);
+
+ // call CopyValueClass
+ sl.X86EmitCall(sl.NewExternalCodeLabel((LPVOID) CopyValueClassUnchecked), 12);
+
+ // Restore the address of the newly allocated object and return it.
+ // mov eax,ebx
+ sl.Emit16(0xC38B);
+
+ sl.X86EmitPopReg(kEBX);
+
+ sl.X86EmitReturn(0);
+
+ // Come here in case of no space
+ sl.EmitLabel(noAlloc);
+
+ // Release the lock in the uniprocessor case
+ EmitNoAllocCode(&sl, flags);
+
+ // Come here in case of failure to get the lock
+ sl.EmitLabel(noLock);
+
+ // Restore the address of the value to be boxed
+ // mov edx,ebx
+ sl.Emit16(0xD38B);
+
+ // pop ebx
+ sl.X86EmitPopReg(kEBX);
+
+ // Jump to the slow version of JIT_Box
+ sl.X86EmitNearJump(sl.NewExternalCodeLabel((LPVOID) JIT_Box));
+
+ Stub *pStub = sl.Link(SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap());
+
+ return (void *)pStub->GetEntryPoint();
+}
+
+
+HCIMPL2_RAW(Object*, UnframedAllocateObjectArray, /*TypeHandle*/PVOID ArrayType, DWORD cElements)
+{
+ // This isn't _really_ an FCALL and therefore shouldn't have the
+ // SO_TOLERANT part of the FCALL_CONTRACT b/c it is not entered
+ // from managed code.
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_INTOLERANT;
+ } CONTRACTL_END;
+
+ return OBJECTREFToObject(AllocateArrayEx(TypeHandle::FromPtr(ArrayType),
+ (INT32 *)(&cElements),
+ 1,
+ FALSE
+ DEBUG_ARG(FALSE)));
+}
+HCIMPLEND_RAW
+
+
+HCIMPL2_RAW(Object*, UnframedAllocatePrimitiveArray, CorElementType type, DWORD cElements)
+{
+ // This isn't _really_ an FCALL and therefore shouldn't have the
+ // SO_TOLERANT part of the FCALL_CONTRACT b/c it is not entered
+ // from managed code.
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_INTOLERANT;
+ } CONTRACTL_END;
+
+ return OBJECTREFToObject( AllocatePrimitiveArray(type, cElements, FALSE) );
+}
+HCIMPLEND_RAW
+
+
+void *JIT_TrialAlloc::GenAllocArray(Flags flags)
+{
+ STANDARD_VM_CONTRACT;
+
+ CPUSTUBLINKER sl;
+
+ CodeLabel *noLock = sl.NewCodeLabel();
+ CodeLabel *noAlloc = sl.NewCodeLabel();
+
+ // We were passed a type descriptor in ECX, which contains the (shared)
+ // array method table and the element type.
+
+ // If this is the allocator for use from unmanaged code, ECX contains the
+ // element type descriptor, or the CorElementType.
+
+ // We need to save ECX for later
+
+ // push ecx
+ sl.X86EmitPushReg(kECX);
+
+ // The element count is in EDX - we need to save it for later.
+
+ // push edx
+ sl.X86EmitPushReg(kEDX);
+
+ if (flags & NO_FRAME)
+ {
+ if (flags & OBJ_ARRAY)
+ {
+ // we need to load the true method table from the type desc
+ sl.X86EmitIndexRegLoad(kECX, kECX, offsetof(ArrayTypeDesc,m_TemplateMT)-2);
+ }
+ else
+ {
+ // mov ecx,[g_pPredefinedArrayTypes+ecx*4]
+ sl.Emit8(0x8b);
+ sl.Emit16(0x8d0c);
+ sl.Emit32((int)(size_t)&g_pPredefinedArrayTypes);
+
+ // test ecx,ecx
+ sl.Emit16(0xc985);
+
+ // je noLock
+ sl.X86EmitCondJump(noLock, X86CondCode::kJZ);
+
+ // we need to load the true method table from the type desc
+ sl.X86EmitIndexRegLoad(kECX, kECX, offsetof(ArrayTypeDesc,m_TemplateMT));
+ }
+ }
+ else
+ {
+ // we need to load the true method table from the type desc
+ sl.X86EmitIndexRegLoad(kECX, kECX, offsetof(ArrayTypeDesc,m_TemplateMT)-2);
+
+#ifdef FEATURE_PREJIT
+ CodeLabel *indir = sl.NewCodeLabel();
+
+ // test cl,1
+ sl.Emit16(0xC1F6);
+ sl.Emit8(0x01);
+
+ // je indir
+ sl.X86EmitCondJump(indir, X86CondCode::kJZ);
+
+ // mov ecx, [ecx-1]
+ sl.X86EmitIndexRegLoad(kECX, kECX, -1);
+
+ sl.EmitLabel(indir);
+#endif
+ }
+
+ // Do a conservative check here. This is to avoid doing overflow checks within this function. We'll
+ // still have to do a size check before running through the body of EmitCore. The way we do the check
+ // against the allocation quantum there requires that we not overflow when adding the size to the
+ // current allocation context pointer. There is exactly LARGE_OBJECT_SIZE of headroom there, so do that
+ // check before we EmitCore.
+ //
+ // For reference types, we can just pick the correct value of maxElems and skip the second check.
+ //
+ // By the way, we use 258 as a "slack" value to ensure that we don't overflow because of the size of the
+ // array header or alignment.
+ sl.Emit16(0xfa81);
+
+
+ // The large object heap is 8 byte aligned, so for double arrays we
+ // want to bias toward putting things in the large object heap
+ unsigned maxElems = 0xffff - 256;
+
+ if ((flags & ALIGN8) && g_pConfig->GetDoubleArrayToLargeObjectHeapThreshold() < maxElems)
+ maxElems = g_pConfig->GetDoubleArrayToLargeObjectHeapThreshold();
+ if (flags & OBJ_ARRAY)
+ {
+ //Since we know that the array elements are sizeof(OBJECTREF), set maxElems exactly here (use the
+ //same slack from above.
+ maxElems = min(maxElems, (LARGE_OBJECT_SIZE/sizeof(OBJECTREF)) - 256);
+ }
+ sl.Emit32(maxElems);
+
+
+ // jae noLock - seems tempting to jump to noAlloc, but we haven't taken the lock yet
+ sl.X86EmitCondJump(noLock, X86CondCode::kJAE);
+
+ if (flags & OBJ_ARRAY)
+ {
+ // In this case we know the element size is sizeof(void *), or 4 for x86
+ // This helps us in two ways - we can shift instead of multiplying, and
+ // there's no need to align the size either
+
+ _ASSERTE(sizeof(void *) == 4);
+
+ // mov eax, [ecx]MethodTable.m_BaseSize
+ sl.X86EmitIndexRegLoad(kEAX, kECX, offsetof(MethodTable, m_BaseSize));
+
+ // lea eax, [eax+edx*4]
+ sl.X86EmitOp(0x8d, kEAX, kEAX, 0, kEDX, 4);
+ }
+ else
+ {
+ // movzx eax, [ECX]MethodTable.m_dwFlags /* component size */
+ sl.Emit8(0x0f);
+ sl.X86EmitOffsetModRM(0xb7, kEAX, kECX, offsetof(MethodTable, m_dwFlags /* component size */));
+
+ // mul eax, edx
+ sl.Emit16(0xe2f7);
+
+ // add eax, [ecx]MethodTable.m_BaseSize
+ sl.X86EmitOffsetModRM(0x03, kEAX, kECX, offsetof(MethodTable, m_BaseSize));
+
+ // Since this is an array of value classes, we need an extra compare here to make sure we're still
+ // less than LARGE_OBJECT_SIZE. This is the last bit of arithmetic before we compare against the
+ // allocation context, so do it here.
+
+ // cmp eax, LARGE_OBJECT_SIZE
+ // ja noLock
+ sl.Emit8(0x3d);
+ sl.Emit32(LARGE_OBJECT_SIZE);
+ sl.X86EmitCondJump(noLock, X86CondCode::kJA);
+ }
+
+#if DATA_ALIGNMENT == 4
+ if (flags & OBJ_ARRAY)
+ {
+ // No need for rounding in this case - element size is 4, and m_BaseSize is guaranteed
+ // to be a multiple of 4.
+ }
+ else
+#endif // DATA_ALIGNMENT == 4
+ {
+ // round the size to a multiple of 4
+
+ // add eax, 3
+ sl.X86EmitAddReg(kEAX, (DATA_ALIGNMENT-1));
+
+ // and eax, ~3
+ sl.Emit16(0xe083);
+ sl.Emit8(~(DATA_ALIGNMENT-1));
+ }
+
+ flags = (Flags)(flags | SIZE_IN_EAX);
+
+ // Emit the main body of the trial allocator, be it SP or MP
+ EmitCore(&sl, noLock, noAlloc, flags);
+
+ // Here we are at the end of the success case - store element count
+ // and possibly the element type descriptor and return
+
+ // pop edx - element count
+ sl.X86EmitPopReg(kEDX);
+
+ // pop ecx - array type descriptor
+ sl.X86EmitPopReg(kECX);
+
+ // mov dword ptr [eax]ArrayBase.m_NumComponents, edx
+ sl.X86EmitIndexRegStore(kEAX, offsetof(ArrayBase,m_NumComponents), kEDX);
+
+#if CHECK_APP_DOMAIN_LEAKS
+ EmitSetAppDomain(&sl);
+#endif
+
+ // no stack parameters
+ sl.X86EmitReturn(0);
+
+ // Come here in case of no space
+ sl.EmitLabel(noAlloc);
+
+ // Release the lock in the uniprocessor case
+ EmitNoAllocCode(&sl, flags);
+
+ // Come here in case of failure to get the lock
+ sl.EmitLabel(noLock);
+
+ // pop edx - element count
+ sl.X86EmitPopReg(kEDX);
+
+ // pop ecx - array type descriptor
+ sl.X86EmitPopReg(kECX);
+
+ CodeLabel * target;
+ if (flags & NO_FRAME)
+ {
+ if (flags & OBJ_ARRAY)
+ {
+ // Jump to the unframed helper
+ target = sl.NewExternalCodeLabel((LPVOID)UnframedAllocateObjectArray);
+ _ASSERTE(target->e.m_pExternalAddress);
+ }
+ else
+ {
+ // Jump to the unframed helper
+ target = sl.NewExternalCodeLabel((LPVOID)UnframedAllocatePrimitiveArray);
+ _ASSERTE(target->e.m_pExternalAddress);
+ }
+ }
+ else
+ {
+ // Jump to the framed helper
+ target = sl.NewExternalCodeLabel((LPVOID)JIT_NewArr1);
+ _ASSERTE(target->e.m_pExternalAddress);
+ }
+ sl.X86EmitNearJump(target);
+
+ Stub *pStub = sl.Link(SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap());
+
+ return (void *)pStub->GetEntryPoint();
+}
+
+
+void *JIT_TrialAlloc::GenAllocString(Flags flags)
+{
+ STANDARD_VM_CONTRACT;
+
+ CPUSTUBLINKER sl;
+
+ CodeLabel *noLock = sl.NewCodeLabel();
+ CodeLabel *noAlloc = sl.NewCodeLabel();
+
+ // We were passed the number of characters in ECX
+
+ // push ecx
+ sl.X86EmitPushReg(kECX);
+
+ // mov eax, ecx
+ sl.Emit16(0xc18b);
+
+ // we need to load the method table for string from the global
+
+ // mov ecx, [g_pStringMethodTable]
+ sl.Emit16(0x0d8b);
+ sl.Emit32((int)(size_t)&g_pStringClass);
+
+ // Instead of doing elaborate overflow checks, we just limit the number of elements
+ // to (LARGE_OBJECT_SIZE - 256)/sizeof(WCHAR) or less.
+ // This will avoid avoid all overflow problems, as well as making sure
+ // big string objects are correctly allocated in the big object heap.
+
+ _ASSERTE(sizeof(WCHAR) == 2);
+
+ // cmp edx,(LARGE_OBJECT_SIZE - 256)/sizeof(WCHAR)
+ sl.Emit16(0xf881);
+ sl.Emit32((LARGE_OBJECT_SIZE - 256)/sizeof(WCHAR));
+
+ // jae noLock - seems tempting to jump to noAlloc, but we haven't taken the lock yet
+ sl.X86EmitCondJump(noLock, X86CondCode::kJAE);
+
+ // mov edx, [ecx]MethodTable.m_BaseSize
+ sl.X86EmitIndexRegLoad(kEDX, kECX, offsetof(MethodTable,m_BaseSize));
+
+ // Calculate the final size to allocate.
+ // We need to calculate baseSize + cnt*2, then round that up by adding 3 and anding ~3.
+
+ // lea eax, [edx+eax*2+5]
+ sl.X86EmitOp(0x8d, kEAX, kEDX, (DATA_ALIGNMENT-1), kEAX, 2);
+
+ // and eax, ~3
+ sl.Emit16(0xe083);
+ sl.Emit8(~(DATA_ALIGNMENT-1));
+
+ flags = (Flags)(flags | SIZE_IN_EAX);
+
+ // Emit the main body of the trial allocator, be it SP or MP
+ EmitCore(&sl, noLock, noAlloc, flags);
+
+ // Here we are at the end of the success case - store element count
+ // and possibly the element type descriptor and return
+
+ // pop ecx - element count
+ sl.X86EmitPopReg(kECX);
+
+ // mov dword ptr [eax]ArrayBase.m_StringLength, ecx
+ sl.X86EmitIndexRegStore(kEAX, offsetof(StringObject,m_StringLength), kECX);
+
+#if CHECK_APP_DOMAIN_LEAKS
+ EmitSetAppDomain(&sl);
+#endif
+
+ // no stack parameters
+ sl.X86EmitReturn(0);
+
+ // Come here in case of no space
+ sl.EmitLabel(noAlloc);
+
+ // Release the lock in the uniprocessor case
+ EmitNoAllocCode(&sl, flags);
+
+ // Come here in case of failure to get the lock
+ sl.EmitLabel(noLock);
+
+ // pop ecx - element count
+ sl.X86EmitPopReg(kECX);
+
+ CodeLabel * target;
+ if (flags & NO_FRAME)
+ {
+ // Jump to the unframed helper
+ target = sl.NewExternalCodeLabel((LPVOID)UnframedAllocateString);
+ }
+ else
+ {
+ // Jump to the framed helper
+ target = sl.NewExternalCodeLabel((LPVOID)FramedAllocateString);
+ }
+ sl.X86EmitNearJump(target);
+
+ Stub *pStub = sl.Link(SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap());
+
+ return (void *)pStub->GetEntryPoint();
+}
+
+
+FastStringAllocatorFuncPtr fastStringAllocator = UnframedAllocateString;
+
+FastObjectArrayAllocatorFuncPtr fastObjectArrayAllocator = UnframedAllocateObjectArray;
+
+FastPrimitiveArrayAllocatorFuncPtr fastPrimitiveArrayAllocator = UnframedAllocatePrimitiveArray;
+
+// For this helper,
+// If bCCtorCheck == true
+// ECX contains the domain neutral module ID
+// EDX contains the class domain ID, and the
+// else
+// ECX contains the domain neutral module ID
+// EDX is junk
+// shared static base is returned in EAX.
+
+// "init" should be the address of a routine which takes an argument of
+// the module domain ID, the class domain ID, and returns the static base pointer
+void EmitFastGetSharedStaticBase(CPUSTUBLINKER *psl, CodeLabel *init, bool bCCtorCheck, bool bGCStatic, bool bSingleAppDomain)
+{
+ STANDARD_VM_CONTRACT;
+
+ CodeLabel *DoInit = 0;
+ if (bCCtorCheck)
+ {
+ DoInit = psl->NewCodeLabel();
+ }
+
+ // mov eax, ecx
+ psl->Emit8(0x89);
+ psl->Emit8(0xc8);
+
+ if(!bSingleAppDomain)
+ {
+ // Check tag
+ CodeLabel *cctorCheck = psl->NewCodeLabel();
+
+
+ // test eax, 1
+ psl->Emit8(0xa9);
+ psl->Emit32(1);
+
+ // jz cctorCheck
+ psl->X86EmitCondJump(cctorCheck, X86CondCode::kJZ);
+
+ // mov eax GetAppDomain()
+ psl->X86EmitCurrentAppDomainFetch(kEAX, (1<<kECX)|(1<<kEDX));
+
+ // mov eax [eax->m_sDomainLocalBlock.m_pModuleSlots]
+ psl->X86EmitIndexRegLoad(kEAX, kEAX, (__int32) AppDomain::GetOffsetOfModuleSlotsPointer());
+
+ // Note: weird address arithmetic effectively does:
+ // shift over 1 to remove tag bit (which is always 1), then multiply by 4.
+ // mov eax [eax + ecx*2 - 2]
+ psl->X86EmitOp(0x8b, kEAX, kEAX, -2, kECX, 2);
+
+ // cctorCheck:
+ psl->EmitLabel(cctorCheck);
+
+ }
+
+ if (bCCtorCheck)
+ {
+ // test [eax + edx + offsetof(DomainLocalModule, m_pDataBlob], ClassInitFlags::INITIALIZED_FLAG // Is class inited
+ _ASSERTE(FitsInI1(ClassInitFlags::INITIALIZED_FLAG));
+ _ASSERTE(FitsInI1(DomainLocalModule::GetOffsetOfDataBlob()));
+
+ BYTE testClassInit[] = { 0xF6, 0x44, 0x10,
+ (BYTE) DomainLocalModule::GetOffsetOfDataBlob(), (BYTE)ClassInitFlags::INITIALIZED_FLAG };
+
+ psl->EmitBytes(testClassInit, sizeof(testClassInit));
+
+ // jz init // no, init it
+ psl->X86EmitCondJump(DoInit, X86CondCode::kJZ);
+ }
+
+ if (bGCStatic)
+ {
+ // Indirect to get the pointer to the first GC Static
+ psl->X86EmitIndexRegLoad(kEAX, kEAX, (__int32) DomainLocalModule::GetOffsetOfGCStaticPointer());
+ }
+
+ // ret
+ psl->X86EmitReturn(0);
+
+ if (bCCtorCheck)
+ {
+ // DoInit:
+ psl->EmitLabel(DoInit);
+
+ // push edx (must be preserved)
+ psl->X86EmitPushReg(kEDX);
+
+ // call init
+ psl->X86EmitCall(init, 0);
+
+ // pop edx
+ psl->X86EmitPopReg(kEDX);
+
+ // ret
+ psl->X86EmitReturn(0);
+ }
+
+}
+
+void *GenFastGetSharedStaticBase(bool bCheckCCtor, bool bGCStatic, bool bSingleAppDomain)
+{
+ STANDARD_VM_CONTRACT;
+
+ CPUSTUBLINKER sl;
+
+ CodeLabel *init;
+ if (bGCStatic)
+ {
+ init = sl.NewExternalCodeLabel((LPVOID)JIT_GetSharedGCStaticBase);
+ }
+ else
+ {
+ init = sl.NewExternalCodeLabel((LPVOID)JIT_GetSharedNonGCStaticBase);
+ }
+
+ EmitFastGetSharedStaticBase(&sl, init, bCheckCCtor, bGCStatic, bSingleAppDomain);
+
+ Stub *pStub = sl.Link(SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap());
+
+ return (void*) pStub->GetEntryPoint();
+}
+
+
+#ifdef ENABLE_FAST_GCPOLL_HELPER
+void EnableJitGCPoll()
+{
+ SetJitHelperFunction(CORINFO_HELP_POLL_GC, (void*)JIT_PollGC);
+}
+void DisableJitGCPoll()
+{
+ SetJitHelperFunction(CORINFO_HELP_POLL_GC, (void*)JIT_PollGC_Nop);
+}
+#endif
+
+#define NUM_WRITE_BARRIERS 6
+
+static const BYTE c_rgWriteBarrierRegs[NUM_WRITE_BARRIERS] = {
+ 0, // EAX
+ 1, // ECX
+ 3, // EBX
+ 6, // ESI
+ 7, // EDI
+ 5, // EBP
+};
+
+static const void * const c_rgWriteBarriers[NUM_WRITE_BARRIERS] = {
+ (void *)JIT_WriteBarrierEAX,
+ (void *)JIT_WriteBarrierECX,
+ (void *)JIT_WriteBarrierEBX,
+ (void *)JIT_WriteBarrierESI,
+ (void *)JIT_WriteBarrierEDI,
+ (void *)JIT_WriteBarrierEBP,
+};
+
+#ifdef WRITE_BARRIER_CHECK
+static const void * const c_rgDebugWriteBarriers[NUM_WRITE_BARRIERS] = {
+ (void *)JIT_DebugWriteBarrierEAX,
+ (void *)JIT_DebugWriteBarrierECX,
+ (void *)JIT_DebugWriteBarrierEBX,
+ (void *)JIT_DebugWriteBarrierESI,
+ (void *)JIT_DebugWriteBarrierEDI,
+ (void *)JIT_DebugWriteBarrierEBP,
+};
+#endif // WRITE_BARRIER_CHECK
+
+#define DEBUG_RANDOM_BARRIER_CHECK DbgGetEXETimeStamp() % 7 == 4
+
+/*********************************************************************/
+// Initialize the part of the JIT helpers that require very little of
+// EE infrastructure to be in place.
+/*********************************************************************/
+void InitJITHelpers1()
+{
+ STANDARD_VM_CONTRACT;
+
+#define ETW_NUM_JIT_HELPERS 10
+ static const LPCWSTR pHelperNames[ETW_NUM_JIT_HELPERS] = {
+ W("@NewObject"),
+ W("@NewObjectAlign8"),
+ W("@Box"),
+ W("@NewArray1Object"),
+ W("@NewArray1ValueType"),
+ W("@NewArray1ObjectAlign8"),
+ W("@StaticBaseObject"),
+ W("@StaticBaseNonObject"),
+ W("@StaticBaseObjectNoCCtor"),
+ W("@StaticBaseNonObjectNoCCtor")
+ };
+
+ PVOID pMethodAddresses[ETW_NUM_JIT_HELPERS]={0};
+
+ _ASSERTE(g_SystemInfo.dwNumberOfProcessors != 0);
+
+ JIT_TrialAlloc::Flags flags = GCHeap::UseAllocationContexts() ?
+ JIT_TrialAlloc::MP_ALLOCATOR : JIT_TrialAlloc::NORMAL;
+
+ // Get CPU features and check for SSE2 support.
+ // This code should eventually probably be moved into codeman.cpp,
+ // where we set the cpu feature flags for the JIT based on CPU type and features.
+ DWORD dwCPUFeaturesECX;
+ DWORD dwCPUFeaturesEDX;
+
+ __asm
+ {
+ pushad
+ mov eax, 1
+ cpuid
+ mov dwCPUFeaturesECX, ecx
+ mov dwCPUFeaturesEDX, edx
+ popad
+ }
+
+ // If bit 26 (SSE2) is set, then we can use the SSE2 flavors
+ // and faster x87 implementation for the P4 of Dbl2Lng.
+ if (dwCPUFeaturesEDX & (1<<26))
+ {
+ SetJitHelperFunction(CORINFO_HELP_DBL2INT, JIT_Dbl2IntSSE2);
+ if (dwCPUFeaturesECX & 1) // check SSE3
+ {
+ SetJitHelperFunction(CORINFO_HELP_DBL2UINT, JIT_Dbl2LngSSE3);
+ SetJitHelperFunction(CORINFO_HELP_DBL2LNG, JIT_Dbl2LngSSE3);
+ }
+ else
+ {
+ SetJitHelperFunction(CORINFO_HELP_DBL2UINT, JIT_Dbl2LngP4x87); // SSE2 only for signed
+ SetJitHelperFunction(CORINFO_HELP_DBL2LNG, JIT_Dbl2LngP4x87);
+ }
+ }
+
+ if (!(TrackAllocationsEnabled()
+ || LoggingOn(LF_GCALLOC, LL_INFO10)
+#ifdef _DEBUG
+ || (g_pConfig->ShouldInjectFault(INJECTFAULT_GCHEAP) != 0)
+#endif
+ )
+ )
+ {
+ // Replace the slow helpers with faster version
+
+ pMethodAddresses[0] = JIT_TrialAlloc::GenAllocSFast(flags);
+ SetJitHelperFunction(CORINFO_HELP_NEWSFAST, pMethodAddresses[0]);
+ pMethodAddresses[1] = JIT_TrialAlloc::GenAllocSFast((JIT_TrialAlloc::Flags)(flags|JIT_TrialAlloc::ALIGN8 | JIT_TrialAlloc::ALIGN8OBJ));
+ SetJitHelperFunction(CORINFO_HELP_NEWSFAST_ALIGN8, pMethodAddresses[1]);
+ pMethodAddresses[2] = JIT_TrialAlloc::GenBox(flags);
+ SetJitHelperFunction(CORINFO_HELP_BOX, pMethodAddresses[2]);
+ pMethodAddresses[3] = JIT_TrialAlloc::GenAllocArray((JIT_TrialAlloc::Flags)(flags|JIT_TrialAlloc::OBJ_ARRAY));
+ SetJitHelperFunction(CORINFO_HELP_NEWARR_1_OBJ, pMethodAddresses[3]);
+ pMethodAddresses[4] = JIT_TrialAlloc::GenAllocArray(flags);
+ SetJitHelperFunction(CORINFO_HELP_NEWARR_1_VC, pMethodAddresses[4]);
+ pMethodAddresses[5] = JIT_TrialAlloc::GenAllocArray((JIT_TrialAlloc::Flags)(flags|JIT_TrialAlloc::ALIGN8));
+ SetJitHelperFunction(CORINFO_HELP_NEWARR_1_ALIGN8, pMethodAddresses[5]);
+
+ fastObjectArrayAllocator = (FastObjectArrayAllocatorFuncPtr)JIT_TrialAlloc::GenAllocArray((JIT_TrialAlloc::Flags)(flags|JIT_TrialAlloc::NO_FRAME|JIT_TrialAlloc::OBJ_ARRAY));
+ fastPrimitiveArrayAllocator = (FastPrimitiveArrayAllocatorFuncPtr)JIT_TrialAlloc::GenAllocArray((JIT_TrialAlloc::Flags)(flags|JIT_TrialAlloc::NO_FRAME));
+
+ // If allocation logging is on, then we divert calls to FastAllocateString to an Ecall method, not this
+ // generated method. Find this workaround in Ecall::Init() in ecall.cpp.
+ ECall::DynamicallyAssignFCallImpl((PCODE) JIT_TrialAlloc::GenAllocString(flags), ECall::FastAllocateString);
+
+ // generate another allocator for use from unmanaged code (won't need a frame)
+ fastStringAllocator = (FastStringAllocatorFuncPtr) JIT_TrialAlloc::GenAllocString((JIT_TrialAlloc::Flags)(flags|JIT_TrialAlloc::NO_FRAME));
+ //UnframedAllocateString;
+ }
+
+ bool bSingleAppDomain = IsSingleAppDomain();
+
+ // Replace static helpers with faster assembly versions
+ pMethodAddresses[6] = GenFastGetSharedStaticBase(true, true, bSingleAppDomain);
+ SetJitHelperFunction(CORINFO_HELP_GETSHARED_GCSTATIC_BASE, pMethodAddresses[6]);
+ pMethodAddresses[7] = GenFastGetSharedStaticBase(true, false, bSingleAppDomain);
+ SetJitHelperFunction(CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE, pMethodAddresses[7]);
+ pMethodAddresses[8] = GenFastGetSharedStaticBase(false, true, bSingleAppDomain);
+ SetJitHelperFunction(CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR, pMethodAddresses[8]);
+ pMethodAddresses[9] = GenFastGetSharedStaticBase(false, false, bSingleAppDomain);
+ SetJitHelperFunction(CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR, pMethodAddresses[9]);
+
+ ETW::MethodLog::StubsInitialized(pMethodAddresses, (PVOID *)pHelperNames, ETW_NUM_JIT_HELPERS);
+
+#ifdef ENABLE_FAST_GCPOLL_HELPER
+ // code:JIT_PollGC_Nop
+ SetJitHelperFunction(CORINFO_HELP_POLL_GC, (void*)JIT_PollGC_Nop);
+#endif //ENABLE_FAST_GCPOLL_HELPER
+
+ // All write barrier helpers should fit into one page.
+ // If you hit this assert on retail build, there is most likely problem with BBT script.
+ _ASSERTE_ALL_BUILDS("clr/src/VM/i386/JITinterfaceX86.cpp", (BYTE*)JIT_WriteBarrierLast - (BYTE*)JIT_WriteBarrierStart < PAGE_SIZE);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/i386/JITinterfaceX86.cpp", (BYTE*)JIT_PatchedWriteBarrierLast - (BYTE*)JIT_PatchedWriteBarrierStart < PAGE_SIZE);
+
+ // Copy the write barriers to their final resting place.
+ for (int iBarrier = 0; iBarrier < NUM_WRITE_BARRIERS; iBarrier++)
+ {
+ BYTE * pfunc = (BYTE *) JIT_WriteBarrierReg_PreGrow;
+
+ BYTE * pBuf = (BYTE *)c_rgWriteBarriers[iBarrier];
+ int reg = c_rgWriteBarrierRegs[iBarrier];
+
+ memcpy(pBuf, pfunc, 34);
+
+ // assert the copied code ends in a ret to make sure we got the right length
+ _ASSERTE(pBuf[33] == 0xC3);
+
+ // We need to adjust registers in a couple of instructions
+ // It would be nice to have the template contain all zeroes for
+ // the register fields (corresponding to EAX), but that doesn't
+ // work because then we get a smaller encoding for the compares
+ // that only works for EAX but not the other registers.
+ // So we always have to clear the register fields before updating them.
+
+ // First instruction to patch is a mov [edx], reg
+
+ _ASSERTE(pBuf[0] == 0x89);
+ // Update the reg field (bits 3..5) of the ModR/M byte of this instruction
+ pBuf[1] &= 0xc7;
+ pBuf[1] |= reg << 3;
+
+ // Second instruction to patch is cmp reg, imm32 (low bound)
+
+ _ASSERTE(pBuf[2] == 0x81);
+ // Here the lowest three bits in ModR/M field are the register
+ pBuf[3] &= 0xf8;
+ pBuf[3] |= reg;
+
+#ifdef WRITE_BARRIER_CHECK
+ // Don't do the fancy optimization just jump to the old one
+ // Use the slow one from time to time in a debug build because
+ // there are some good asserts in the unoptimized one
+ if ((g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_BARRIERCHECK) || DEBUG_RANDOM_BARRIER_CHECK) {
+ pfunc = &pBuf[0];
+ *pfunc++ = 0xE9; // JMP c_rgDebugWriteBarriers[iBarrier]
+ *((DWORD*) pfunc) = (BYTE*) c_rgDebugWriteBarriers[iBarrier] - (pfunc + sizeof(DWORD));
+ }
+#endif // WRITE_BARRIER_CHECK
+ }
+
+#ifndef CODECOVERAGE
+ ValidateWriteBarrierHelpers();
+#endif
+
+ // Leave the patched region writable for StompWriteBarrierEphemeral(), StompWriteBarrierResize()
+ // and CTPMethodTable::ActivatePrecodeRemotingThunk
+
+ // Initialize g_TailCallFrameVptr for JIT_TailCall helper
+ g_TailCallFrameVptr = (void*)TailCallFrame::GetMethodFrameVPtr();
+}
+
+// these constans are offsets into our write barrier helpers for values that get updated as the bounds of the managed heap change.
+// ephemeral region
+const int AnyGrow_EphemeralLowerBound = 4; // offset is the same for both pre and post grow functions
+const int PostGrow_EphemeralUpperBound = 12;
+
+// card table
+const int PreGrow_CardTableFirstLocation = 16;
+const int PreGrow_CardTableSecondLocation = 28;
+const int PostGrow_CardTableFirstLocation = 24;
+const int PostGrow_CardTableSecondLocation = 36;
+
+
+#ifndef CODECOVERAGE // Deactivate alignment validation for code coverage builds
+ // because the instrumented binaries will not preserve alignmant constraits and we will fail.
+
+void ValidateWriteBarrierHelpers()
+{
+ // we have an invariant that the addresses of all the values that we update in our write barrier
+ // helpers must be naturally aligned, this is so that the update can happen atomically since there
+ // are places where we update these values while the EE is running
+
+#ifdef WRITE_BARRIER_CHECK
+ // write barrier checking uses the slower helpers that we don't bash so there is no need for validation
+ if ((g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_BARRIERCHECK) || DEBUG_RANDOM_BARRIER_CHECK)
+ return;
+#endif // WRITE_BARRIER_CHECK
+
+ // first validate the PreGrow helper
+ BYTE* pWriteBarrierFunc = reinterpret_cast<BYTE*>(JIT_WriteBarrierEAX);
+
+ // ephemeral region
+ DWORD* pLocation = reinterpret_cast<DWORD*>(&pWriteBarrierFunc[AnyGrow_EphemeralLowerBound]);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/i386/JITinterfaceX86.cpp", (reinterpret_cast<DWORD>(pLocation) & 0x3) == 0);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/i386/JITinterfaceX86.cpp", *pLocation == 0xf0f0f0f0);
+
+ // card table
+ pLocation = reinterpret_cast<DWORD*>(&pWriteBarrierFunc[PreGrow_CardTableFirstLocation]);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/i386/JITinterfaceX86.cpp", (reinterpret_cast<DWORD>(pLocation) & 0x3) == 0);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/i386/JITinterfaceX86.cpp", *pLocation == 0xf0f0f0f0);
+ pLocation = reinterpret_cast<DWORD*>(&pWriteBarrierFunc[PreGrow_CardTableSecondLocation]);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/i386/JITinterfaceX86.cpp", (reinterpret_cast<DWORD>(pLocation) & 0x3) == 0);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/i386/JITinterfaceX86.cpp", *pLocation == 0xf0f0f0f0);
+
+ // now validate the PostGrow helper
+ pWriteBarrierFunc = reinterpret_cast<BYTE*>(JIT_WriteBarrierReg_PostGrow);
+
+ // ephemeral region
+ pLocation = reinterpret_cast<DWORD*>(&pWriteBarrierFunc[AnyGrow_EphemeralLowerBound]);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/i386/JITinterfaceX86.cpp", (reinterpret_cast<DWORD>(pLocation) & 0x3) == 0);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/i386/JITinterfaceX86.cpp", *pLocation == 0xf0f0f0f0);
+ pLocation = reinterpret_cast<DWORD*>(&pWriteBarrierFunc[PostGrow_EphemeralUpperBound]);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/i386/JITinterfaceX86.cpp", (reinterpret_cast<DWORD>(pLocation) & 0x3) == 0);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/i386/JITinterfaceX86.cpp", *pLocation == 0xf0f0f0f0);
+
+ // card table
+ pLocation = reinterpret_cast<DWORD*>(&pWriteBarrierFunc[PostGrow_CardTableFirstLocation]);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/i386/JITinterfaceX86.cpp", (reinterpret_cast<DWORD>(pLocation) & 0x3) == 0);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/i386/JITinterfaceX86.cpp", *pLocation == 0xf0f0f0f0);
+ pLocation = reinterpret_cast<DWORD*>(&pWriteBarrierFunc[PostGrow_CardTableSecondLocation]);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/i386/JITinterfaceX86.cpp", (reinterpret_cast<DWORD>(pLocation) & 0x3) == 0);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/i386/JITinterfaceX86.cpp", *pLocation == 0xf0f0f0f0);
+}
+
+#endif //CODECOVERAGE
+/*********************************************************************/
+
+#define WriteBarrierIsPreGrow() (((BYTE *)JIT_WriteBarrierEAX)[10] == 0xc1)
+
+
+/*********************************************************************/
+// When a GC happens, the upper and lower bounds of the ephemeral
+// generation change. This routine updates the WriteBarrier thunks
+// with the new values.
+void StompWriteBarrierEphemeral()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+#ifdef WRITE_BARRIER_CHECK
+ // Don't do the fancy optimization if we are checking write barrier
+ if (((BYTE *)JIT_WriteBarrierEAX)[0] == 0xE9) // we are using slow write barrier
+ return;
+#endif // WRITE_BARRIER_CHECK
+
+ BOOL flushICache = FALSE;
+
+ // Update the lower bound.
+ for (int iBarrier = 0; iBarrier < NUM_WRITE_BARRIERS; iBarrier++)
+ {
+ BYTE * pBuf = (BYTE *)c_rgWriteBarriers[iBarrier];
+
+ // assert there is in fact a cmp r/m32, imm32 there
+ _ASSERTE(pBuf[2] == 0x81);
+
+ // Update the immediate which is the lower bound of the ephemeral generation
+ size_t *pfunc = (size_t *) &pBuf[AnyGrow_EphemeralLowerBound];
+ //avoid trivial self modifying code
+ if (*pfunc != (size_t) g_ephemeral_low)
+ {
+ flushICache = TRUE;
+ *pfunc = (size_t) g_ephemeral_low;
+ }
+ if (!WriteBarrierIsPreGrow())
+ {
+ // assert there is in fact a cmp r/m32, imm32 there
+ _ASSERTE(pBuf[10] == 0x81);
+
+ // Update the upper bound if we are using the PostGrow thunk.
+ pfunc = (size_t *) &pBuf[PostGrow_EphemeralUpperBound];
+ //avoid trivial self modifying code
+ if (*pfunc != (size_t) g_ephemeral_high)
+ {
+ flushICache = TRUE;
+ *pfunc = (size_t) g_ephemeral_high;
+ }
+ }
+ }
+
+ if (flushICache)
+ FlushInstructionCache(GetCurrentProcess(), (void *)JIT_PatchedWriteBarrierStart,
+ (BYTE*)JIT_PatchedWriteBarrierLast - (BYTE*)JIT_PatchedWriteBarrierStart);
+}
+
+/*********************************************************************/
+// When the GC heap grows, the ephemeral generation may no longer
+// be after the older generations. If this happens, we need to switch
+// to the PostGrow thunk that checks both upper and lower bounds.
+// regardless we need to update the thunk with the
+// card_table - lowest_address.
+void StompWriteBarrierResize(BOOL bReqUpperBoundsCheck)
+{
+ CONTRACTL {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {GC_NOTRIGGER;}
+ } CONTRACTL_END;
+
+#ifdef WRITE_BARRIER_CHECK
+ // Don't do the fancy optimization if we are checking write barrier
+ if (((BYTE *)JIT_WriteBarrierEAX)[0] == 0xE9) // we are using slow write barrier
+ return;
+#endif // WRITE_BARRIER_CHECK
+
+ bool bWriteBarrierIsPreGrow = WriteBarrierIsPreGrow();
+ bool bStompWriteBarrierEphemeral = false;
+
+ BOOL bEESuspended = FALSE;
+
+ for (int iBarrier = 0; iBarrier < NUM_WRITE_BARRIERS; iBarrier++)
+ {
+ BYTE * pBuf = (BYTE *)c_rgWriteBarriers[iBarrier];
+ int reg = c_rgWriteBarrierRegs[iBarrier];
+
+ size_t *pfunc;
+
+ // Check if we are still using the pre-grow version of the write barrier.
+ if (bWriteBarrierIsPreGrow)
+ {
+ // Check if we need to use the upper bounds checking barrier stub.
+ if (bReqUpperBoundsCheck)
+ {
+ GCX_MAYBE_COOP_NO_THREAD_BROKEN((GetThread()!=NULL));
+ if( !IsGCThread() && !bEESuspended) {
+ ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_FOR_GC_PREP);
+ bEESuspended = TRUE;
+ }
+
+ pfunc = (size_t *) JIT_WriteBarrierReg_PostGrow;
+ memcpy(pBuf, pfunc, 42);
+
+ // assert the copied code ends in a ret to make sure we got the right length
+ _ASSERTE(pBuf[41] == 0xC3);
+
+ // We need to adjust registers in a couple of instructions
+ // It would be nice to have the template contain all zeroes for
+ // the register fields (corresponding to EAX), but that doesn't
+ // work because then we get a smaller encoding for the compares
+ // that only works for EAX but not the other registers
+ // So we always have to clear the register fields before updating them.
+
+ // First instruction to patch is a mov [edx], reg
+
+ _ASSERTE(pBuf[0] == 0x89);
+ // Update the reg field (bits 3..5) of the ModR/M byte of this instruction
+ pBuf[1] &= 0xc7;
+ pBuf[1] |= reg << 3;
+
+ // Second instruction to patch is cmp reg, imm32 (low bound)
+
+ _ASSERTE(pBuf[2] == 0x81);
+ // Here the lowest three bits in ModR/M field are the register
+ pBuf[3] &= 0xf8;
+ pBuf[3] |= reg;
+
+ // Third instruction to patch is another cmp reg, imm32 (high bound)
+
+ _ASSERTE(pBuf[10] == 0x81);
+ // Here the lowest three bits in ModR/M field are the register
+ pBuf[11] &= 0xf8;
+ pBuf[11] |= reg;
+
+ bStompWriteBarrierEphemeral = true;
+ // What we're trying to update is the offset field of a
+
+ // cmp offset[edx], 0ffh instruction
+ _ASSERTE(pBuf[22] == 0x80);
+ pfunc = (size_t *) &pBuf[PostGrow_CardTableFirstLocation];
+ *pfunc = (size_t) g_card_table;
+
+ // What we're trying to update is the offset field of a
+ // mov offset[edx], 0ffh instruction
+ _ASSERTE(pBuf[34] == 0xC6);
+ pfunc = (size_t *) &pBuf[PostGrow_CardTableSecondLocation];
+
+ }
+ else
+ {
+ // What we're trying to update is the offset field of a
+
+ // cmp offset[edx], 0ffh instruction
+ _ASSERTE(pBuf[14] == 0x80);
+ pfunc = (size_t *) &pBuf[PreGrow_CardTableFirstLocation];
+ *pfunc = (size_t) g_card_table;
+
+ // What we're trying to update is the offset field of a
+
+ // mov offset[edx], 0ffh instruction
+ _ASSERTE(pBuf[26] == 0xC6);
+ pfunc = (size_t *) &pBuf[PreGrow_CardTableSecondLocation];
+ }
+ }
+ else
+ {
+ // What we're trying to update is the offset field of a
+
+ // cmp offset[edx], 0ffh instruction
+ _ASSERTE(pBuf[22] == 0x80);
+ pfunc = (size_t *) &pBuf[PostGrow_CardTableFirstLocation];
+ *pfunc = (size_t) g_card_table;
+
+ // What we're trying to update is the offset field of a
+ // mov offset[edx], 0ffh instruction
+ _ASSERTE(pBuf[34] == 0xC6);
+ pfunc = (size_t *) &pBuf[PostGrow_CardTableSecondLocation];
+ }
+
+ // Stick in the adjustment value.
+ *pfunc = (size_t) g_card_table;
+ }
+
+ if (bStompWriteBarrierEphemeral)
+ StompWriteBarrierEphemeral();
+ else
+ FlushInstructionCache(GetCurrentProcess(), (void *)JIT_PatchedWriteBarrierStart,
+ (BYTE*)JIT_PatchedWriteBarrierLast - (BYTE*)JIT_PatchedWriteBarrierStart);
+
+ if(bEESuspended)
+ ThreadSuspend::RestartEE(FALSE, TRUE);
+}
+
diff --git a/src/vm/i386/profiler.cpp b/src/vm/i386/profiler.cpp
new file mode 100644
index 0000000000..6611de6989
--- /dev/null
+++ b/src/vm/i386/profiler.cpp
@@ -0,0 +1,339 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// FILE: profiler.cpp
+//
+
+//
+
+//
+// ======================================================================================
+
+#include "common.h"
+
+#ifdef PROFILING_SUPPORTED
+#include "proftoeeinterfaceimpl.h"
+
+//
+// The following structure is the format on x86 builds of the data
+// being passed in plaformSpecificHandle for ProfileEnter/Leave/Tailcall
+//
+typedef struct _PROFILE_PLATFORM_SPECIFIC_DATA
+{
+ FunctionID functionId;
+ DWORD doubleBuffer1;
+ DWORD doubleBuffer2;
+ DWORD floatBuffer;
+ DWORD floatingPointValuePresent;
+ UINT_PTR eax; // eax and edx must be continuous in this structure to make getting 64 bit return values easier.
+ UINT_PTR edx;
+ UINT_PTR ecx;
+ UINT_PTR esp;
+ UINT_PTR ip;
+} PROFILE_PLATFORM_SPECIFIC_DATA, *PPROFILE_PLATFORM_SPECIFIC_DATA;
+
+
+/*
+ * ProfileGetIPFromPlatformSpecificHandle
+ *
+ * This routine takes the platformSpecificHandle and retrieves from it the
+ * IP value.
+ *
+ * Parameters:
+ * handle - the platformSpecificHandle passed to ProfileEnter/Leave/Tailcall
+ *
+ * Returns:
+ * The IP value stored in the handle.
+ */
+UINT_PTR ProfileGetIPFromPlatformSpecificHandle(void *handle)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return ((PROFILE_PLATFORM_SPECIFIC_DATA *)handle)->ip;
+}
+
+
+/*
+ * ProfileSetFunctionIDInPlatformSpecificHandle
+ *
+ * This routine takes the platformSpecificHandle and functionID, and assign
+ * functionID to functionID field of platformSpecificHandle.
+ *
+ * Parameters:
+ * pPlatformSpecificHandle - the platformSpecificHandle passed to ProfileEnter/Leave/Tailcall
+ * functionID - the FunctionID to be assigned
+ *
+ * Returns:
+ * None
+ */
+void ProfileSetFunctionIDInPlatformSpecificHandle(void * pPlatformSpecificHandle, FunctionID functionID)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(pPlatformSpecificHandle != NULL);
+ _ASSERTE(functionID != NULL);
+
+ PROFILE_PLATFORM_SPECIFIC_DATA * pData = reinterpret_cast<PROFILE_PLATFORM_SPECIFIC_DATA *>(pPlatformSpecificHandle);
+ pData->functionId = functionID;
+}
+
+/*
+ * ProfileArgIterator::ProfileArgIterator
+ *
+ * Constructor. Initializes for arg iteration.
+ *
+ * Parameters:
+ * pMetaSig - The signature of the method we are going iterate over
+ * platformSpecificHandle - the value passed to ProfileEnter/Leave/Tailcall
+ *
+ * Returns:
+ * None.
+ */
+ProfileArgIterator::ProfileArgIterator(MetaSig * pMetaSig, void * platformSpecificHandle):
+ m_argIterator(pMetaSig)
+{
+ //
+ // It would be really nice to contract this, but the underlying functions are convolutedly
+ // contracted. Basically everything should be loaded by the time the profiler gets a call
+ // back, so everything is NOTHROW/NOTRIGGER, but there is not mechanism for saying that the
+ // contracts in called functions should be for the best case, not the worst case, now.
+ //
+ WRAPPER_NO_CONTRACT;
+
+ m_handle = platformSpecificHandle;
+}
+
+/*
+ * ProfileArgIterator::~ProfileArgIterator
+ *
+ * Destructor, releases all resources.
+ *
+ */
+ProfileArgIterator::~ProfileArgIterator()
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+/*
+ * ProfileArgIterator::GetNextArgAddr
+ *
+ * After initialization, this method is called repeatedly until it
+ * returns NULL to get the address of each arg. Note: this address
+ * could be anywhere on the stack.
+ *
+ * Returns:
+ * Address of the argument, or NULL if iteration is complete.
+ */
+LPVOID ProfileArgIterator::GetNextArgAddr()
+{
+ //
+ // It would be really nice to contract this, but the underlying functions are convolutedly
+ // contracted. Basically everything should be loaded by the time the profiler gets a call
+ // back, so everything is NOTHROW/NOTRIGGER, but there is not mechanism for saying that the
+ // contracts in called functions should be for the best case, not the worst case, now.
+ //
+ WRAPPER_NO_CONTRACT;
+
+ int argOffset = m_argIterator.GetNextOffset();
+
+ //
+ // Value is enregistered, figure out where and return that.
+ //
+ PROFILE_PLATFORM_SPECIFIC_DATA *pData = (PROFILE_PLATFORM_SPECIFIC_DATA *)m_handle;
+
+ //
+ // Zero indicates the end of the args.
+ //
+ if (argOffset == TransitionBlock::InvalidOffset)
+ {
+ return NULL;
+ }
+
+ if (pData == NULL)
+ {
+ //
+ // Something wrong.
+ //
+ _ASSERTE(!"Why do we have a NULL data pointer here?");
+ return NULL;
+ }
+
+ //
+ // If this is not enregistered, return the value
+ //
+ if (TransitionBlock::IsStackArgumentOffset(argOffset))
+ {
+ return ((LPBYTE)pData->esp) + (argOffset - TransitionBlock::GetOffsetOfArgs());
+ }
+
+ switch (argOffset - TransitionBlock::GetOffsetOfArgumentRegisters())
+ {
+ case offsetof(ArgumentRegisters, ECX):
+ return &(pData->ecx);
+ case offsetof(ArgumentRegisters, EDX):
+ return &(pData->edx);
+ }
+
+ _ASSERTE(!"Arg is an unsaved register!");
+ return NULL;
+}
+
+/*
+ * ProfileArgIterator::GetHiddenArgValue
+ *
+ * Called after initialization, any number of times, to retrieve any
+ * hidden argument, so that resolution for Generics can be done.
+ *
+ * Parameters:
+ * None.
+ *
+ * Returns:
+ * Value of the hidden parameter, or NULL if none exists.
+ */
+LPVOID ProfileArgIterator::GetHiddenArgValue(void)
+{
+ //
+ // It would be really nice to contract this, but the underlying functions are convolutedly
+ // contracted. Basically everything should be loaded by the time the profiler gets a call
+ // back, so everything is NOTHROW/NOTRIGGER, but there is not mechanism for saying that the
+ // contracts in called functions should be for the best case, not the worst case, now.
+ //
+ WRAPPER_NO_CONTRACT;
+
+ PROFILE_PLATFORM_SPECIFIC_DATA *pData = (PROFILE_PLATFORM_SPECIFIC_DATA *)m_handle;
+
+ MethodDesc *pMethodDesc = FunctionIdToMethodDesc(pData->functionId);
+
+ if (!pMethodDesc->RequiresInstArg())
+ {
+ return NULL;
+ }
+
+ //
+ // The ArgIterator::GetParamTypeOffset() can only be called after calling GetNextOffset until the
+ // entire signature has been walked, but *before* GetNextOffset returns TransitionBlock::InvalidOffset
+ // - indicating the end.
+ //
+
+ //
+ // Get the offset of the hidden arg
+ //
+ int argOffset = m_argIterator.GetParamTypeArgOffset();
+
+ //
+ // If this is not enregistered, return the value
+ //
+ if (TransitionBlock::IsStackArgumentOffset(argOffset))
+ {
+ return *(LPVOID *)(((LPBYTE)pData->esp) + (argOffset - TransitionBlock::GetOffsetOfArgs()));
+ }
+
+ switch (argOffset - TransitionBlock::GetOffsetOfArgumentRegisters())
+ {
+ case offsetof(ArgumentRegisters, ECX):
+ return (LPVOID)(pData->ecx);
+ case offsetof(ArgumentRegisters, EDX):
+ return (LPVOID)(pData->edx);
+ }
+
+ _ASSERTE(!"Arg is an unsaved register!");
+ return NULL;
+}
+
+/*
+ * ProfileArgIterator::GetThis
+ *
+ * Called after initialization, any number of times, to retrieve the
+ * value of 'this'.
+ *
+ * Parameters:
+ * None.
+ *
+ * Returns:
+ * value of the 'this' parameter, or NULL if none exists.
+ */
+LPVOID ProfileArgIterator::GetThis(void)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ PROFILE_PLATFORM_SPECIFIC_DATA *pData = (PROFILE_PLATFORM_SPECIFIC_DATA *)m_handle;
+
+ if (pData->ip == 0)
+ {
+ return NULL;
+ }
+
+ if (!m_argIterator.HasThis())
+ {
+ return NULL;
+ }
+
+ switch (offsetof(ArgumentRegisters, THIS_REG))
+ {
+ case offsetof(ArgumentRegisters, ECX):
+ return (LPVOID)pData->ecx;
+
+ case offsetof(ArgumentRegisters, EDX):
+ return (LPVOID)pData->edx;
+ }
+
+ _ASSERTE(!"This is an unsaved register!");
+ return NULL;
+}
+
+
+
+/*
+ * ProfileArgIterator::GetReturnBufferAddr
+ *
+ * Called after initialization, any number of times, to retrieve the
+ * address of the return buffer. NULL indicates no return value.
+ *
+ * Parameters:
+ * None.
+ *
+ * Returns:
+ * Address of the return buffer, or NULL if none exists.
+ */
+LPVOID ProfileArgIterator::GetReturnBufferAddr(void)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ PROFILE_PLATFORM_SPECIFIC_DATA *pData = (PROFILE_PLATFORM_SPECIFIC_DATA *)m_handle;
+
+ if (m_argIterator.HasRetBuffArg())
+ {
+ return (void *)(pData->eax);
+ }
+
+ switch (m_argIterator.GetSig()->GetReturnType())
+ {
+ case ELEMENT_TYPE_R8:
+ _ASSERTE(pData->floatingPointValuePresent);
+ return (void *)(&(pData->doubleBuffer1));
+
+ case ELEMENT_TYPE_R4:
+ _ASSERTE(pData->floatingPointValuePresent);
+ return (void *)(&(pData->floatBuffer));
+
+ default:
+ return &(pData->eax);
+ }
+
+ _ASSERTE(!"SHOULD NOT REACH HERE!");
+}
+
+#endif // PROFILING_SUPPORTED
+
diff --git a/src/vm/i386/remotingx86.cpp b/src/vm/i386/remotingx86.cpp
new file mode 100644
index 0000000000..913e87a2b4
--- /dev/null
+++ b/src/vm/i386/remotingx86.cpp
@@ -0,0 +1,226 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+//
+//
+// File: remotingx86.cpp
+//
+
+//
+//
+// Purpose: Defines various remoting related functions for the x86 architecture
+//
+
+//
+//
+
+//
+
+#include "common.h"
+
+#ifdef FEATURE_REMOTING
+
+#include "excep.h"
+#include "comdelegate.h"
+#include "remoting.h"
+#include "field.h"
+#include "siginfo.hpp"
+#include "stackbuildersink.h"
+#include "threads.h"
+#include "method.hpp"
+#include "asmconstants.h"
+#include "interoputil.h"
+#include "virtualcallstub.h"
+
+#ifdef FEATURE_COMINTEROP
+#include "comcallablewrapper.h"
+#include "comcache.h"
+#endif // FEATURE_COMINTEROP
+
+//+----------------------------------------------------------------------------
+//
+// Method: CTPMethodTable::CreateThunkForVirtualMethod private
+//
+// Synopsis: Creates the thunk that pushes the supplied slot number and jumps
+// to TP Stub
+//
+//+----------------------------------------------------------------------------
+PCODE CTPMethodTable::CreateThunkForVirtualMethod(DWORD dwSlot, BYTE *startaddr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(startaddr));
+ }
+ CONTRACTL_END;
+
+ BYTE *pCode = startaddr;
+
+ // 0000 B8 67 45 23 01 MOV EAX, dwSlot
+ // 0005 E9 ?? ?? ?? ?? JMP TransparentProxyStub
+ *pCode++ = 0xB8;
+ *((DWORD *) pCode) = dwSlot;
+ pCode += sizeof(DWORD);
+ *pCode++ = 0xE9;
+ // self-relative call, based on the start of the next instruction.
+ *((LONG *) pCode) = (LONG)((size_t)GetTPStubEntryPoint() - (size_t) (pCode + sizeof(LONG)));
+
+ _ASSERTE(CVirtualThunkMgr::IsThunkByASM((PCODE)startaddr));
+
+ return (PCODE)startaddr;
+}
+
+
+//+----------------------------------------------------------------------------
+//
+// Method: CTPMethodTable::ActivatePrecodeRemotingThunk private
+//
+// Synopsis: Patch the precode remoting thunk to begin interception
+//
+//+----------------------------------------------------------------------------
+void CTPMethodTable::ActivatePrecodeRemotingThunk()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Before activation:
+ // 0000 C3 ret
+ // 0001 90 nop
+
+ // After activation:
+ // 0000 85 C9 test ecx,ecx
+
+ // 0002 74 XX je RemotingDone
+ // 0004 81 39 XX XX XX XX cmp dword ptr [ecx],11111111h
+ // 000A 74 XX je RemotingCheck
+
+ // Switch offset and size of patch based on the jump opcode used.
+ BYTE* pCode = (BYTE*)PrecodeRemotingThunk;
+
+ SIZE_T mtOffset = 0x0006;
+ SIZE_T size = 0x000A;
+
+ // Patch "ret + nop" to "test ecx,ecx"
+ *(UINT16 *)pCode = 0xC985;
+
+ // Replace placeholder value with the actual address of TP method table
+ _ASSERTE(*(PVOID*)(pCode+mtOffset) == (PVOID*)0x11111111);
+ *(PVOID*)(pCode+mtOffset) = GetMethodTable();
+
+ FlushInstructionCache(GetCurrentProcess(), pCode, size);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CVirtualThunkMgr::DoTraceStub public
+//
+// Synopsis: Traces the stub given the starting address
+//
+//+----------------------------------------------------------------------------
+BOOL CVirtualThunkMgr::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(stubStartAddress != NULL);
+ PRECONDITION(CheckPointer(trace));
+ }
+ CONTRACTL_END;
+
+ BOOL bIsStub = FALSE;
+
+ // Find a thunk whose code address matching the starting address
+ LPBYTE pThunk = FindThunk((LPBYTE)stubStartAddress);
+ if(NULL != pThunk)
+ {
+ LPBYTE pbAddr = NULL;
+ LONG destAddress = 0;
+ if((LPBYTE)stubStartAddress == pThunk)
+ {
+
+ // Extract the long which gives the self relative address
+ // of the destination
+ pbAddr = pThunk + sizeof(BYTE) + sizeof(DWORD) + sizeof(BYTE);
+ destAddress = *(LONG *)pbAddr;
+
+ // Calculate the absolute address by adding the offset of the next
+ // instruction after the call instruction
+ destAddress += (LONG)(size_t)(pbAddr + sizeof(LONG));
+
+ }
+
+ // We cannot tell where the stub will end up until OnCall is reached.
+ // So we tell the debugger to run till OnCall is reached and then
+ // come back and ask us again for the actual destination address of
+ // the call
+
+ Stub *stub = Stub::RecoverStub((TADDR)destAddress);
+
+ trace->InitForFramePush(stub->GetPatchAddress());
+ bIsStub = TRUE;
+ }
+
+ return bIsStub;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CVirtualThunkMgr::IsThunkByASM public
+//
+// Synopsis: Check assembly to see if this one of our thunks
+//
+//+----------------------------------------------------------------------------
+BOOL CVirtualThunkMgr::IsThunkByASM(PCODE startaddr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(startaddr != NULL);
+ }
+ CONTRACTL_END;
+
+ PTR_BYTE pbCode = PTR_BYTE(startaddr);
+
+ return ((pbCode[0] == 0xB8) &&
+ (pbCode[5] == 0xe9) &&
+ (rel32Decode((TADDR)(pbCode + 6)) == CTPMethodTable::GetTPStubEntryPoint()));
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CVirtualThunkMgr::GetMethodDescByASM public
+//
+// Synopsis: Parses MethodDesc out of assembly code
+//
+//+----------------------------------------------------------------------------
+MethodDesc *CVirtualThunkMgr::GetMethodDescByASM(PCODE startaddr, MethodTable *pMT)
+{
+ CONTRACT (MethodDesc*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(startaddr != NULL);
+ PRECONDITION(CheckPointer(pMT));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN (pMT->GetMethodDescForSlot(*((DWORD *) (startaddr + 1))));
+}
+
+#endif// FEATURE_REMOTING
+
diff --git a/src/vm/i386/stublinkerx86.cpp b/src/vm/i386/stublinkerx86.cpp
new file mode 100644
index 0000000000..eb744615eb
--- /dev/null
+++ b/src/vm/i386/stublinkerx86.cpp
@@ -0,0 +1,6741 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+// NOTE on Frame Size C_ASSERT usage in this file
+// if the frame size changes then the stubs have to be revisited for correctness
+// kindly revist the logic and then update the constants so that the C_ASSERT will again fire
+// if someone changes the frame size. You are expected to keep this hard coded constant
+// up to date so that changes in the frame size trigger errors at compile time if the code is not altered
+
+// Precompiled Header
+
+#include "common.h"
+
+#include "field.h"
+#include "stublink.h"
+
+#include "tls.h"
+#include "frames.h"
+#include "excep.h"
+#include "dllimport.h"
+#include "log.h"
+#include "security.h"
+#include "comdelegate.h"
+#include "array.h"
+#include "jitinterface.h"
+#include "codeman.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "dbginterface.h"
+#include "eeprofinterfaces.h"
+#include "eeconfig.h"
+#include "securitydeclarative.h"
+#ifdef _TARGET_X86_
+#include "asmconstants.h"
+#endif // _TARGET_X86_
+#include "class.h"
+#include "stublink.inl"
+
+#ifdef FEATURE_COMINTEROP
+#include "comtoclrcall.h"
+#include "runtimecallablewrapper.h"
+#include "comcache.h"
+#include "olevariant.h"
+#include "notifyexternals.h"
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_PREJIT
+#include "compile.h"
+#endif
+
+#if defined(_DEBUG) && defined(STUBLINKER_GENERATES_UNWIND_INFO)
+#include <psapi.h>
+#endif
+
+
+#ifndef DACCESS_COMPILE
+
+extern "C" VOID __cdecl StubRareEnable(Thread *pThread);
+#ifdef FEATURE_COMINTEROP
+extern "C" HRESULT __cdecl StubRareDisableHR(Thread *pThread);
+#endif // FEATURE_COMINTEROP
+extern "C" VOID __cdecl StubRareDisableTHROW(Thread *pThread, Frame *pFrame);
+
+extern "C" VOID __cdecl ArrayOpStubNullException(void);
+extern "C" VOID __cdecl ArrayOpStubRangeException(void);
+extern "C" VOID __cdecl ArrayOpStubTypeMismatchException(void);
+
+#if defined(_TARGET_AMD64_)
+#define EXCEPTION_HELPERS(base) \
+ extern "C" VOID __cdecl base##_RSIRDI_ScratchArea(void); \
+ extern "C" VOID __cdecl base##_ScratchArea(void); \
+ extern "C" VOID __cdecl base##_RSIRDI(void); \
+ extern "C" VOID __cdecl base(void)
+EXCEPTION_HELPERS(ArrayOpStubNullException);
+EXCEPTION_HELPERS(ArrayOpStubRangeException);
+EXCEPTION_HELPERS(ArrayOpStubTypeMismatchException);
+#undef EXCEPTION_HELPERS
+
+#if defined(_DEBUG)
+extern "C" VOID __cdecl DebugCheckStubUnwindInfo();
+#endif
+#endif // _TARGET_AMD64_
+
+// Presumably this code knows what it is doing with TLS. If we are hiding these
+// services from normal code, reveal them here.
+#ifdef TlsGetValue
+#undef TlsGetValue
+#endif
+
+#ifdef FEATURE_COMINTEROP
+Thread* __stdcall CreateThreadBlockReturnHr(ComMethodFrame *pFrame);
+#endif
+
+
+
+#ifdef _TARGET_AMD64_
+
+BOOL IsPreservedReg (X86Reg reg)
+{
+ UINT16 PreservedRegMask =
+ (1 << kRBX)
+ | (1 << kRBP)
+ | (1 << kRSI)
+ | (1 << kRDI)
+ | (1 << kR12)
+ | (1 << kR13)
+ | (1 << kR14)
+ | (1 << kR15);
+ return PreservedRegMask & (1 << reg);
+}
+
+#endif // _TARGET_AMD64_
+
+#ifdef _TARGET_AMD64_
+//-----------------------------------------------------------------------
+// InstructionFormat for near Jump and short Jump
+//-----------------------------------------------------------------------
+
+//X64EmitTailcallWithRSPAdjust
+class X64NearJumpSetup : public InstructionFormat
+{
+ public:
+ X64NearJumpSetup() : InstructionFormat( InstructionFormat::k8|InstructionFormat::k32
+ | InstructionFormat::k64Small | InstructionFormat::k64
+ )
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual UINT GetSizeOfInstruction(UINT refsize, UINT variationCode)
+ {
+ LIMITED_METHOD_CONTRACT
+ switch (refsize)
+ {
+ case k8:
+ return 0;
+
+ case k32:
+ return 0;
+
+ case k64Small:
+ return 5;
+
+ case k64:
+ return 10;
+
+ default:
+ _ASSERTE(!"unexpected refsize");
+ return 0;
+
+ }
+ }
+
+ virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT variationCode, BYTE *pDataBuffer)
+ {
+ LIMITED_METHOD_CONTRACT
+ if (k8 == refsize)
+ {
+ // do nothing, X64NearJump will take care of this
+ }
+ else if (k32 == refsize)
+ {
+ // do nothing, X64NearJump will take care of this
+ }
+ else if (k64Small == refsize)
+ {
+ UINT64 TargetAddress = (INT64)pOutBuffer + fixedUpReference + GetSizeOfInstruction(refsize, variationCode);
+ _ASSERTE(FitsInU4(TargetAddress));
+
+ // mov eax, imm32 ; zero-extended
+ pOutBuffer[0] = 0xB8;
+ *((UINT32*)&pOutBuffer[1]) = (UINT32)TargetAddress;
+ }
+ else if (k64 == refsize)
+ {
+ // mov rax, imm64
+ pOutBuffer[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
+ pOutBuffer[1] = 0xB8;
+ *((UINT64*)&pOutBuffer[2]) = (UINT64)(((INT64)pOutBuffer) + fixedUpReference + GetSizeOfInstruction(refsize, variationCode));
+ }
+ else
+ {
+ _ASSERTE(!"unreached");
+ }
+ }
+
+ virtual BOOL CanReach(UINT refsize, UINT variationCode, BOOL fExternal, INT_PTR offset)
+ {
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+
+ if (fExternal)
+ {
+ switch (refsize)
+ {
+ case InstructionFormat::k8:
+ // For external, we don't have enough info to predict
+ // the offset.
+ return FALSE;
+
+ case InstructionFormat::k32:
+ return sizeof(PVOID) <= sizeof(UINT32);
+
+ case InstructionFormat::k64Small:
+ return FitsInI4(offset);
+
+ case InstructionFormat::k64:
+ // intentional fallthru
+ case InstructionFormat::kAllowAlways:
+ return TRUE;
+
+ default:
+ _ASSERTE(0);
+ return FALSE;
+ }
+ }
+ else
+ {
+ switch (refsize)
+ {
+ case InstructionFormat::k8:
+ return FitsInI1(offset);
+
+ case InstructionFormat::k32:
+ return FitsInI4(offset);
+
+ case InstructionFormat::k64Small:
+ // EmitInstruction emits a non-relative jmp for
+ // k64Small. We don't have enough info to predict the
+ // target address. (Even if we did, this would only
+ // handle the set of unsigned offsets with bit 31 set
+ // and no higher bits set, too uncommon/hard to test.)
+ return FALSE;
+
+ case InstructionFormat::k64:
+ // intentional fallthru
+ case InstructionFormat::kAllowAlways:
+ return TRUE;
+ default:
+ _ASSERTE(0);
+ return FALSE;
+ }
+ }
+ }
+};
+
+class X64NearJumpExecute : public InstructionFormat
+{
+ public:
+ X64NearJumpExecute() : InstructionFormat( InstructionFormat::k8|InstructionFormat::k32
+ | InstructionFormat::k64Small | InstructionFormat::k64
+ )
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual UINT GetSizeOfInstruction(UINT refsize, UINT variationCode)
+ {
+ LIMITED_METHOD_CONTRACT
+ switch (refsize)
+ {
+ case k8:
+ return 2;
+
+ case k32:
+ return 5;
+
+ case k64Small:
+ return 3;
+
+ case k64:
+ return 3;
+
+ default:
+ _ASSERTE(!"unexpected refsize");
+ return 0;
+
+ }
+ }
+
+ virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT variationCode, BYTE *pDataBuffer)
+ {
+ LIMITED_METHOD_CONTRACT
+ if (k8 == refsize)
+ {
+ pOutBuffer[0] = 0xeb;
+ *((__int8*)(pOutBuffer+1)) = (__int8)fixedUpReference;
+ }
+ else if (k32 == refsize)
+ {
+ pOutBuffer[0] = 0xe9;
+ *((__int32*)(pOutBuffer+1)) = (__int32)fixedUpReference;
+ }
+ else if (k64Small == refsize)
+ {
+ // REX.W jmp rax
+ pOutBuffer[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
+ pOutBuffer[1] = 0xFF;
+ pOutBuffer[2] = 0xE0;
+ }
+ else if (k64 == refsize)
+ {
+ // REX.W jmp rax
+ pOutBuffer[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
+ pOutBuffer[1] = 0xFF;
+ pOutBuffer[2] = 0xE0;
+ }
+ else
+ {
+ _ASSERTE(!"unreached");
+ }
+ }
+
+ virtual BOOL CanReach(UINT refsize, UINT variationCode, BOOL fExternal, INT_PTR offset)
+ {
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+
+ if (fExternal)
+ {
+ switch (refsize)
+ {
+ case InstructionFormat::k8:
+ // For external, we don't have enough info to predict
+ // the offset.
+ return FALSE;
+
+ case InstructionFormat::k32:
+ return sizeof(PVOID) <= sizeof(UINT32);
+
+ case InstructionFormat::k64Small:
+ return FitsInI4(offset);
+
+ case InstructionFormat::k64:
+ // intentional fallthru
+ case InstructionFormat::kAllowAlways:
+ return TRUE;
+
+ default:
+ _ASSERTE(0);
+ return FALSE;
+ }
+ }
+ else
+ {
+ switch (refsize)
+ {
+ case InstructionFormat::k8:
+ return FitsInI1(offset);
+
+ case InstructionFormat::k32:
+ return FitsInI4(offset);
+
+ case InstructionFormat::k64Small:
+ // EmitInstruction emits a non-relative jmp for
+ // k64Small. We don't have enough info to predict the
+ // target address. (Even if we did, this would only
+ // handle the set of unsigned offsets with bit 31 set
+ // and no higher bits set, too uncommon/hard to test.)
+ return FALSE;
+
+ case InstructionFormat::k64:
+ // intentional fallthru
+ case InstructionFormat::kAllowAlways:
+ return TRUE;
+ default:
+ _ASSERTE(0);
+ return FALSE;
+ }
+ }
+ }
+};
+
+#endif
+
+//-----------------------------------------------------------------------
+// InstructionFormat for near Jump and short Jump
+//-----------------------------------------------------------------------
+class X86NearJump : public InstructionFormat
+{
+ public:
+ X86NearJump() : InstructionFormat( InstructionFormat::k8|InstructionFormat::k32
+#ifdef _TARGET_AMD64_
+ | InstructionFormat::k64Small | InstructionFormat::k64
+#endif // _TARGET_AMD64_
+ )
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual UINT GetSizeOfInstruction(UINT refsize, UINT variationCode)
+ {
+ LIMITED_METHOD_CONTRACT
+ switch (refsize)
+ {
+ case k8:
+ return 2;
+
+ case k32:
+ return 5;
+#ifdef _TARGET_AMD64_
+ case k64Small:
+ return 5 + 2;
+
+ case k64:
+ return 12;
+#endif // _TARGET_AMD64_
+ default:
+ _ASSERTE(!"unexpected refsize");
+ return 0;
+
+ }
+ }
+
+ virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT variationCode, BYTE *pDataBuffer)
+ {
+ LIMITED_METHOD_CONTRACT
+ if (k8 == refsize)
+ {
+ pOutBuffer[0] = 0xeb;
+ *((__int8*)(pOutBuffer+1)) = (__int8)fixedUpReference;
+ }
+ else if (k32 == refsize)
+ {
+ pOutBuffer[0] = 0xe9;
+ *((__int32*)(pOutBuffer+1)) = (__int32)fixedUpReference;
+ }
+#ifdef _TARGET_AMD64_
+ else if (k64Small == refsize)
+ {
+ UINT64 TargetAddress = (INT64)pOutBuffer + fixedUpReference + GetSizeOfInstruction(refsize, variationCode);
+ _ASSERTE(FitsInU4(TargetAddress));
+
+ // mov eax, imm32 ; zero-extended
+ pOutBuffer[0] = 0xB8;
+ *((UINT32*)&pOutBuffer[1]) = (UINT32)TargetAddress;
+
+ // jmp rax
+ pOutBuffer[5] = 0xFF;
+ pOutBuffer[6] = 0xE0;
+ }
+ else if (k64 == refsize)
+ {
+ // mov rax, imm64
+ pOutBuffer[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
+ pOutBuffer[1] = 0xB8;
+ *((UINT64*)&pOutBuffer[2]) = (UINT64)(((INT64)pOutBuffer) + fixedUpReference + GetSizeOfInstruction(refsize, variationCode));
+
+ // jmp rax
+ pOutBuffer[10] = 0xFF;
+ pOutBuffer[11] = 0xE0;
+ }
+#endif // _TARGET_AMD64_
+ else
+ {
+ _ASSERTE(!"unreached");
+ }
+ }
+
+ virtual BOOL CanReach(UINT refsize, UINT variationCode, BOOL fExternal, INT_PTR offset)
+ {
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+
+ if (fExternal)
+ {
+ switch (refsize)
+ {
+ case InstructionFormat::k8:
+ // For external, we don't have enough info to predict
+ // the offset.
+ return FALSE;
+
+ case InstructionFormat::k32:
+ return sizeof(PVOID) <= sizeof(UINT32);
+
+#ifdef _TARGET_AMD64_
+ case InstructionFormat::k64Small:
+ return FitsInI4(offset);
+
+ case InstructionFormat::k64:
+ // intentional fallthru
+#endif
+ case InstructionFormat::kAllowAlways:
+ return TRUE;
+
+ default:
+ _ASSERTE(0);
+ return FALSE;
+ }
+ }
+ else
+ {
+ switch (refsize)
+ {
+ case InstructionFormat::k8:
+ return FitsInI1(offset);
+
+ case InstructionFormat::k32:
+#ifdef _TARGET_AMD64_
+ return FitsInI4(offset);
+#else
+ return TRUE;
+#endif
+
+#ifdef _TARGET_AMD64_
+ case InstructionFormat::k64Small:
+ // EmitInstruction emits a non-relative jmp for
+ // k64Small. We don't have enough info to predict the
+ // target address. (Even if we did, this would only
+ // handle the set of unsigned offsets with bit 31 set
+ // and no higher bits set, too uncommon/hard to test.)
+ return FALSE;
+
+ case InstructionFormat::k64:
+ // intentional fallthru
+#endif
+ case InstructionFormat::kAllowAlways:
+ return TRUE;
+ default:
+ _ASSERTE(0);
+ return FALSE;
+ }
+ }
+ }
+};
+
+
+//-----------------------------------------------------------------------
+// InstructionFormat for conditional jump. Set the variationCode
+// to members of X86CondCode.
+//-----------------------------------------------------------------------
+class X86CondJump : public InstructionFormat
+{
+ public:
+ X86CondJump(UINT allowedSizes) : InstructionFormat(allowedSizes)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual UINT GetSizeOfInstruction(UINT refsize, UINT variationCode)
+ {
+ LIMITED_METHOD_CONTRACT
+ return (refsize == k8 ? 2 : 6);
+ }
+
+ virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT variationCode, BYTE *pDataBuffer)
+ {
+ LIMITED_METHOD_CONTRACT
+ if (refsize == k8)
+ {
+ pOutBuffer[0] = static_cast<BYTE>(0x70 | variationCode);
+ *((__int8*)(pOutBuffer+1)) = (__int8)fixedUpReference;
+ }
+ else
+ {
+ pOutBuffer[0] = 0x0f;
+ pOutBuffer[1] = static_cast<BYTE>(0x80 | variationCode);
+ *((__int32*)(pOutBuffer+2)) = (__int32)fixedUpReference;
+ }
+ }
+};
+
+
+//-----------------------------------------------------------------------
+// InstructionFormat for near call.
+//-----------------------------------------------------------------------
+class X86Call : public InstructionFormat
+{
+ public:
+ X86Call ()
+ : InstructionFormat( InstructionFormat::k32
+#ifdef _TARGET_AMD64_
+ | InstructionFormat::k64Small | InstructionFormat::k64
+#endif // _TARGET_AMD64_
+ )
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual UINT GetSizeOfInstruction(UINT refsize, UINT variationCode)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ switch (refsize)
+ {
+ case k32:
+ return 5;
+
+#ifdef _TARGET_AMD64_
+ case k64Small:
+ return 5 + 2;
+
+ case k64:
+ return 10 + 2;
+#endif // _TARGET_AMD64_
+
+ default:
+ _ASSERTE(!"unexpected refsize");
+ return 0;
+ }
+ }
+
+ virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT variationCode, BYTE *pDataBuffer)
+ {
+ LIMITED_METHOD_CONTRACT
+
+ switch (refsize)
+ {
+ case k32:
+ pOutBuffer[0] = 0xE8;
+ *((__int32*)(1+pOutBuffer)) = (__int32)fixedUpReference;
+ break;
+
+#ifdef _TARGET_AMD64_
+ case k64Small:
+ UINT64 TargetAddress;
+
+ TargetAddress = (INT64)pOutBuffer + fixedUpReference + GetSizeOfInstruction(refsize, variationCode);
+ _ASSERTE(FitsInU4(TargetAddress));
+
+ // mov eax,<fixedUpReference> ; zero-extends
+ pOutBuffer[0] = 0xB8;
+ *((UINT32*)&pOutBuffer[1]) = (UINT32)TargetAddress;
+
+ // call rax
+ pOutBuffer[5] = 0xff;
+ pOutBuffer[6] = 0xd0;
+ break;
+
+ case k64:
+ // mov rax,<fixedUpReference>
+ pOutBuffer[0] = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
+ pOutBuffer[1] = 0xB8;
+ *((UINT64*)&pOutBuffer[2]) = (UINT64)(((INT64)pOutBuffer) + fixedUpReference + GetSizeOfInstruction(refsize, variationCode));
+
+ // call rax
+ pOutBuffer[10] = 0xff;
+ pOutBuffer[11] = 0xd0;
+ break;
+#endif // _TARGET_AMD64_
+
+ default:
+ _ASSERTE(!"unreached");
+ break;
+ }
+ }
+
+// For x86, the default CanReach implementation will suffice. It only needs
+// to handle k32.
+#ifdef _TARGET_AMD64_
+ virtual BOOL CanReach(UINT refsize, UINT variationCode, BOOL fExternal, INT_PTR offset)
+ {
+ if (fExternal)
+ {
+ switch (refsize)
+ {
+ case InstructionFormat::k32:
+ // For external, we don't have enough info to predict
+ // the offset.
+ return FALSE;
+
+ case InstructionFormat::k64Small:
+ return FitsInI4(offset);
+
+ case InstructionFormat::k64:
+ // intentional fallthru
+ case InstructionFormat::kAllowAlways:
+ return TRUE;
+
+ default:
+ _ASSERTE(0);
+ return FALSE;
+ }
+ }
+ else
+ {
+ switch (refsize)
+ {
+ case InstructionFormat::k32:
+ return FitsInI4(offset);
+
+ case InstructionFormat::k64Small:
+ // EmitInstruction emits a non-relative jmp for
+ // k64Small. We don't have enough info to predict the
+ // target address. (Even if we did, this would only
+ // handle the set of unsigned offsets with bit 31 set
+ // and no higher bits set, too uncommon/hard to test.)
+ return FALSE;
+
+ case InstructionFormat::k64:
+ // intentional fallthru
+ case InstructionFormat::kAllowAlways:
+ return TRUE;
+ default:
+ _ASSERTE(0);
+ return FALSE;
+ }
+ }
+ }
+#endif // _TARGET_AMD64_
+};
+
+
+//-----------------------------------------------------------------------
+// InstructionFormat for push imm32.
+//-----------------------------------------------------------------------
+class X86PushImm32 : public InstructionFormat
+{
+ public:
+ X86PushImm32(UINT allowedSizes) : InstructionFormat(allowedSizes)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual UINT GetSizeOfInstruction(UINT refsize, UINT variationCode)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return 5;
+ }
+
+ virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT variationCode, BYTE *pDataBuffer)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ pOutBuffer[0] = 0x68;
+ // only support absolute pushimm32 of the label address. The fixedUpReference is
+ // the offset to the label from the current point, so add to get address
+ *((__int32*)(1+pOutBuffer)) = (__int32)(fixedUpReference);
+ }
+};
+
+#if defined(_TARGET_AMD64_)
+//-----------------------------------------------------------------------
+// InstructionFormat for lea reg, [RIP relative].
+//-----------------------------------------------------------------------
+class X64LeaRIP : public InstructionFormat
+{
+ public:
+ X64LeaRIP() : InstructionFormat(InstructionFormat::k64Small)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual UINT GetSizeOfInstruction(UINT refsize, UINT variationCode)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return 7;
+ }
+
+ virtual BOOL CanReach(UINT refsize, UINT variationCode, BOOL fExternal, INT_PTR offset)
+ {
+ if (fExternal)
+ {
+ switch (refsize)
+ {
+ case InstructionFormat::k64Small:
+ // For external, we don't have enough info to predict
+ // the offset.
+ return FALSE;
+
+ case InstructionFormat::k64:
+ // intentional fallthru
+ case InstructionFormat::kAllowAlways:
+ return TRUE;
+
+ default:
+ _ASSERTE(0);
+ return FALSE;
+ }
+ }
+ else
+ {
+ switch (refsize)
+ {
+ case InstructionFormat::k64Small:
+ return FitsInI4(offset);
+
+ case InstructionFormat::k64:
+ // intentional fallthru
+ case InstructionFormat::kAllowAlways:
+ return TRUE;
+
+ default:
+ _ASSERTE(0);
+ return FALSE;
+ }
+ }
+ }
+
+ virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBuffer, UINT variationCode, BYTE *pDataBuffer)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ X86Reg reg = (X86Reg)variationCode;
+ BYTE rex = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
+
+ if (reg >= kR8)
+ {
+ rex |= REX_MODRM_REG_EXT;
+ reg = X86RegFromAMD64Reg(reg);
+ }
+
+ pOutBuffer[0] = rex;
+ pOutBuffer[1] = 0x8D;
+ pOutBuffer[2] = 0x05 | (reg << 3);
+ // only support absolute pushimm32 of the label address. The fixedUpReference is
+ // the offset to the label from the current point, so add to get address
+ *((__int32*)(3+pOutBuffer)) = (__int32)(fixedUpReference);
+ }
+};
+
+#endif // _TARGET_AMD64_
+
+#if defined(_TARGET_AMD64_)
+static BYTE gX64NearJumpSetup[sizeof(X64NearJumpSetup)];
+static BYTE gX64NearJumpExecute[sizeof(X64NearJumpExecute)];
+static BYTE gX64LeaRIP[sizeof(X64LeaRIP)];
+#endif
+
+static BYTE gX86NearJump[sizeof(X86NearJump)];
+static BYTE gX86CondJump[sizeof(X86CondJump)];
+static BYTE gX86Call[sizeof(X86Call)];
+static BYTE gX86PushImm32[sizeof(X86PushImm32)];
+
+/* static */ void StubLinkerCPU::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+ new (gX86NearJump) X86NearJump();
+ new (gX86CondJump) X86CondJump( InstructionFormat::k8|InstructionFormat::k32);
+ new (gX86Call) X86Call();
+ new (gX86PushImm32) X86PushImm32(InstructionFormat::k32);
+
+#if defined(_TARGET_AMD64_)
+ new (gX64NearJumpSetup) X64NearJumpSetup();
+ new (gX64NearJumpExecute) X64NearJumpExecute();
+ new (gX64LeaRIP) X64LeaRIP();
+#endif
+}
+
+//---------------------------------------------------------------
+// Emits:
+// mov destReg, srcReg
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitMovRegReg(X86Reg destReg, X86Reg srcReg)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef _TARGET_AMD64_
+ BYTE rex = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
+
+ if (destReg >= kR8)
+ {
+ rex |= REX_MODRM_RM_EXT;
+ destReg = X86RegFromAMD64Reg(destReg);
+ }
+ if (srcReg >= kR8)
+ {
+ rex |= REX_MODRM_REG_EXT;
+ srcReg = X86RegFromAMD64Reg(srcReg);
+ }
+ Emit8(rex);
+#endif
+
+ Emit8(0x89);
+ Emit8(static_cast<UINT8>(0xC0 | (srcReg << 3) | destReg));
+}
+
+//---------------------------------------------------------------
+
+VOID StubLinkerCPU::X86EmitMovSPReg(X86Reg srcReg)
+{
+ STANDARD_VM_CONTRACT;
+ const X86Reg kESP = (X86Reg)4;
+ X86EmitMovRegReg(kESP, srcReg);
+}
+
+VOID StubLinkerCPU::X86EmitMovRegSP(X86Reg destReg)
+{
+ STANDARD_VM_CONTRACT;
+ const X86Reg kESP = (X86Reg)4;
+ X86EmitMovRegReg(destReg, kESP);
+}
+
+
+//---------------------------------------------------------------
+// Emits:
+// PUSH <reg32>
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitPushReg(X86Reg reg)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ X86Reg origReg = reg;
+#endif
+
+#ifdef _TARGET_AMD64_
+ if (reg >= kR8)
+ {
+ Emit8(REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT | REX_OPCODE_REG_EXT);
+ reg = X86RegFromAMD64Reg(reg);
+ }
+#endif
+ Emit8(static_cast<UINT8>(0x50 + reg));
+
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ if (IsPreservedReg(origReg))
+ {
+ UnwindPushedReg(origReg);
+ }
+ else
+#endif
+ {
+ Push(sizeof(void*));
+ }
+}
+
+
+//---------------------------------------------------------------
+// Emits:
+// POP <reg32>
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitPopReg(X86Reg reg)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef _TARGET_AMD64_
+ if (reg >= kR8)
+ {
+ Emit8(REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT | REX_OPCODE_REG_EXT);
+ reg = X86RegFromAMD64Reg(reg);
+ }
+#endif // _TARGET_AMD64_
+
+ Emit8(static_cast<UINT8>(0x58 + reg));
+ Pop(sizeof(void*));
+}
+
+//---------------------------------------------------------------
+// Emits:
+// PUSH <imm32>
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitPushImm32(UINT32 value)
+{
+ STANDARD_VM_CONTRACT;
+
+ Emit8(0x68);
+ Emit32(value);
+ Push(sizeof(void*));
+}
+
+
+//---------------------------------------------------------------
+// Emits:
+// PUSH <imm32>
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitPushImm32(CodeLabel &target)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLabelRef(&target, reinterpret_cast<X86PushImm32&>(gX86PushImm32), 0);
+}
+
+
+//---------------------------------------------------------------
+// Emits:
+// PUSH <imm8>
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitPushImm8(BYTE value)
+{
+ STANDARD_VM_CONTRACT;
+
+ Emit8(0x6a);
+ Emit8(value);
+ Push(sizeof(void*));
+}
+
+
+//---------------------------------------------------------------
+// Emits:
+// PUSH <ptr>
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitPushImmPtr(LPVOID value WIN64_ARG(X86Reg tmpReg /*=kR10*/))
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef _TARGET_AMD64_
+ X86EmitRegLoad(tmpReg, (UINT_PTR) value);
+ X86EmitPushReg(tmpReg);
+#else
+ X86EmitPushImm32((UINT_PTR) value);
+#endif
+}
+
+//---------------------------------------------------------------
+// Emits:
+// XOR <reg32>,<reg32>
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitZeroOutReg(X86Reg reg)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef _TARGET_AMD64_
+ // 32-bit results are zero-extended, so we only need the REX byte if
+ // it's an extended register.
+ if (reg >= kR8)
+ {
+ Emit8(REX_PREFIX_BASE | REX_MODRM_REG_EXT | REX_MODRM_RM_EXT);
+ reg = X86RegFromAMD64Reg(reg);
+ }
+#endif
+ Emit8(0x33);
+ Emit8(static_cast<UINT8>(0xc0 | (reg << 3) | reg));
+}
+
+//---------------------------------------------------------------
+// Emits:
+// jmp [reg]
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitJumpReg(X86Reg reg)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+
+ Emit8(0xff);
+ Emit8(static_cast<BYTE>(0xe0) | static_cast<BYTE>(reg));
+}
+
+//---------------------------------------------------------------
+// Emits:
+// CMP <reg32>,imm32
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitCmpRegImm32(X86Reg reg, INT32 imm32)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION((int) reg < NumX86Regs);
+ }
+ CONTRACTL_END;
+
+#ifdef _TARGET_AMD64_
+ BYTE rex = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
+
+ if (reg >= kR8)
+ {
+ rex |= REX_OPCODE_REG_EXT;
+ reg = X86RegFromAMD64Reg(reg);
+ }
+ Emit8(rex);
+#endif
+
+ if (FitsInI1(imm32)) {
+ Emit8(0x83);
+ Emit8(static_cast<UINT8>(0xF8 | reg));
+ Emit8((INT8)imm32);
+ } else {
+ Emit8(0x81);
+ Emit8(static_cast<UINT8>(0xF8 | reg));
+ Emit32(imm32);
+ }
+}
+
+#ifdef _TARGET_AMD64_
+//---------------------------------------------------------------
+// Emits:
+// CMP [reg+offs], imm32
+// CMP [reg], imm32
+//---------------------------------------------------------------
+VOID StubLinkerCPU:: X86EmitCmpRegIndexImm32(X86Reg reg, INT32 offs, INT32 imm32)
+{
+ STANDARD_VM_CONTRACT;
+
+ BYTE rex = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
+
+ if (reg >= kR8)
+ {
+ rex |= REX_OPCODE_REG_EXT;
+ reg = X86RegFromAMD64Reg(reg);
+ }
+ Emit8(rex);
+
+ X64EmitCmp32RegIndexImm32(reg, offs, imm32);
+}
+
+VOID StubLinkerCPU:: X64EmitCmp32RegIndexImm32(X86Reg reg, INT32 offs, INT32 imm32)
+#else // _TARGET_AMD64_
+VOID StubLinkerCPU:: X86EmitCmpRegIndexImm32(X86Reg reg, INT32 offs, INT32 imm32)
+#endif // _TARGET_AMD64_
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION((int) reg < NumX86Regs);
+ }
+ CONTRACTL_END;
+
+ //
+ // The binary representation of "cmp [mem], imm32" is :
+ // 1000-00sw mod11-1r/m
+ //
+
+ unsigned wBit = (FitsInI1(imm32) ? 0 : 1);
+ Emit8(static_cast<UINT8>(0x80 | wBit));
+
+ unsigned modBits;
+ if (offs == 0)
+ modBits = 0;
+ else if (FitsInI1(offs))
+ modBits = 1;
+ else
+ modBits = 2;
+
+ Emit8(static_cast<UINT8>((modBits << 6) | 0x38 | reg));
+
+ if (offs)
+ {
+ if (FitsInI1(offs))
+ Emit8((INT8)offs);
+ else
+ Emit32(offs);
+ }
+
+ if (FitsInI1(imm32))
+ Emit8((INT8)imm32);
+ else
+ Emit32(imm32);
+}
+
+//---------------------------------------------------------------
+// Emits:
+#if defined(_TARGET_AMD64_)
+// mov rax, <target>
+// add rsp, imm32
+// jmp rax
+#else
+// add rsp, imm32
+// jmp <target>
+#endif
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitTailcallWithESPAdjust(CodeLabel *pTarget, INT32 imm32)
+{
+ STANDARD_VM_CONTRACT;
+
+#if defined(_TARGET_AMD64_)
+ EmitLabelRef(pTarget, reinterpret_cast<X64NearJumpSetup&>(gX64NearJumpSetup), 0);
+ X86EmitAddEsp(imm32);
+ EmitLabelRef(pTarget, reinterpret_cast<X64NearJumpExecute&>(gX64NearJumpExecute), 0);
+#else
+ X86EmitAddEsp(imm32);
+ X86EmitNearJump(pTarget);
+#endif
+}
+
+//---------------------------------------------------------------
+// Emits:
+#if defined(_TARGET_AMD64_)
+// mov rax, <target>
+// pop reg
+// jmp rax
+#else
+// pop reg
+// jmp <target>
+#endif
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitTailcallWithSinglePop(CodeLabel *pTarget, X86Reg reg)
+{
+ STANDARD_VM_CONTRACT;
+
+#if defined(_TARGET_AMD64_)
+ EmitLabelRef(pTarget, reinterpret_cast<X64NearJumpSetup&>(gX64NearJumpSetup), 0);
+ X86EmitPopReg(reg);
+ EmitLabelRef(pTarget, reinterpret_cast<X64NearJumpExecute&>(gX64NearJumpExecute), 0);
+#else
+ X86EmitPopReg(reg);
+ X86EmitNearJump(pTarget);
+#endif
+}
+
+//---------------------------------------------------------------
+// Emits:
+// JMP <ofs8> or
+// JMP <ofs32}
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitNearJump(CodeLabel *target)
+{
+ STANDARD_VM_CONTRACT;
+ EmitLabelRef(target, reinterpret_cast<X86NearJump&>(gX86NearJump), 0);
+}
+
+
+//---------------------------------------------------------------
+// Emits:
+// Jcc <ofs8> or
+// Jcc <ofs32>
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitCondJump(CodeLabel *target, X86CondCode::cc condcode)
+{
+ STANDARD_VM_CONTRACT;
+ EmitLabelRef(target, reinterpret_cast<X86CondJump&>(gX86CondJump), condcode);
+}
+
+
+//---------------------------------------------------------------
+// Emits:
+// call <ofs32>
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitCall(CodeLabel *target, int iArgBytes)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLabelRef(target, reinterpret_cast<X86Call&>(gX86Call), 0);
+
+ INDEBUG(Emit8(0x90)); // Emit a nop after the call in debug so that
+ // we know that this is a call that can directly call
+ // managed code
+#ifndef _TARGET_AMD64_
+ Pop(iArgBytes);
+#endif // !_TARGET_AMD64_
+}
+
+
+//---------------------------------------------------------------
+// Emits:
+// ret n
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitReturn(WORD wArgBytes)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+#ifdef _TARGET_AMD64_
+ PRECONDITION(wArgBytes == 0);
+#endif
+
+ }
+ CONTRACTL_END;
+
+ if (wArgBytes == 0)
+ Emit8(0xc3);
+ else
+ {
+ Emit8(0xc2);
+ Emit16(wArgBytes);
+ }
+
+ Pop(wArgBytes);
+}
+
+#ifdef _TARGET_AMD64_
+//---------------------------------------------------------------
+// Emits:
+// JMP <ofs8> or
+// JMP <ofs32}
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitLeaRIP(CodeLabel *target, X86Reg reg)
+{
+ STANDARD_VM_CONTRACT;
+ EmitLabelRef(target, reinterpret_cast<X64LeaRIP&>(gX64LeaRIP), reg);
+}
+#endif // _TARGET_AMD64_
+
+
+
+VOID StubLinkerCPU::X86EmitPushRegs(unsigned regSet)
+{
+ STANDARD_VM_CONTRACT;
+
+ for (X86Reg r = kEAX; r <= NumX86Regs; r = (X86Reg)(r+1))
+ if (regSet & (1U<<r))
+ {
+ X86EmitPushReg(r);
+ }
+}
+
+
+VOID StubLinkerCPU::X86EmitPopRegs(unsigned regSet)
+{
+ STANDARD_VM_CONTRACT;
+
+ for (X86Reg r = NumX86Regs; r >= kEAX; r = (X86Reg)(r-1))
+ if (regSet & (1U<<r))
+ X86EmitPopReg(r);
+}
+
+
+//---------------------------------------------------------------
+// Emits:
+// mov <dstreg>, [<srcreg> + <ofs>]
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitIndexRegLoad(X86Reg dstreg,
+ X86Reg srcreg,
+ __int32 ofs)
+{
+ STANDARD_VM_CONTRACT;
+ X86EmitOffsetModRM(0x8b, dstreg, srcreg, ofs);
+}
+
+
+//---------------------------------------------------------------
+// Emits:
+// mov [<dstreg> + <ofs>],<srcreg>
+//
+// Note: If you intend to use this to perform 64bit moves to a RSP
+// based offset, then this method may not work. Consider
+// using X86EmitIndexRegStoreRSP.
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitIndexRegStore(X86Reg dstreg,
+ __int32 ofs,
+ X86Reg srcreg)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (dstreg != kESP_Unsafe)
+ X86EmitOffsetModRM(0x89, srcreg, dstreg, ofs);
+ else
+ X86EmitOp(0x89, srcreg, (X86Reg)kESP_Unsafe, ofs);
+}
+
+#if defined(_TARGET_AMD64_)
+//---------------------------------------------------------------
+// Emits:
+// mov [RSP + <ofs>],<srcreg>
+//
+// It marks the instruction has 64bit so that the processor
+// performs a 8byte data move to a RSP based stack location.
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitIndexRegStoreRSP(__int32 ofs,
+ X86Reg srcreg)
+{
+ STANDARD_VM_CONTRACT;
+
+ X86EmitOp(0x89, srcreg, (X86Reg)kESP_Unsafe, ofs, (X86Reg)0, 0, k64BitOp);
+}
+
+//---------------------------------------------------------------
+// Emits:
+// mov [R12 + <ofs>],<srcreg>
+//
+// It marks the instruction has 64bit so that the processor
+// performs a 8byte data move to a R12 based stack location.
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitIndexRegStoreR12(__int32 ofs,
+ X86Reg srcreg)
+{
+ STANDARD_VM_CONTRACT;
+
+ X86EmitOp(0x89, srcreg, (X86Reg)kR12, ofs, (X86Reg)0, 0, k64BitOp);
+}
+#endif // defined(_TARGET_AMD64_)
+
+//---------------------------------------------------------------
+// Emits:
+// push dword ptr [<srcreg> + <ofs>]
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitIndexPush(X86Reg srcreg, __int32 ofs)
+{
+ STANDARD_VM_CONTRACT;
+
+ if(srcreg != kESP_Unsafe)
+ X86EmitOffsetModRM(0xff, (X86Reg)0x6, srcreg, ofs);
+ else
+ X86EmitOp(0xff,(X86Reg)0x6, srcreg, ofs);
+
+ Push(sizeof(void*));
+}
+
+//---------------------------------------------------------------
+// Emits:
+// push dword ptr [<baseReg> + <indexReg>*<scale> + <ofs>]
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitBaseIndexPush(
+ X86Reg baseReg,
+ X86Reg indexReg,
+ __int32 scale,
+ __int32 ofs)
+{
+ STANDARD_VM_CONTRACT;
+
+ X86EmitOffsetModRmSIB(0xff, (X86Reg)0x6, baseReg, indexReg, scale, ofs);
+ Push(sizeof(void*));
+}
+
+//---------------------------------------------------------------
+// Emits:
+// push dword ptr [ESP + <ofs>]
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitSPIndexPush(__int32 ofs)
+{
+ STANDARD_VM_CONTRACT;
+
+ __int8 ofs8 = (__int8) ofs;
+ if (ofs == (__int32) ofs8)
+ {
+ // The offset can be expressed in a byte (can use the byte
+ // form of the push esp instruction)
+
+ BYTE code[] = {0xff, 0x74, 0x24, ofs8};
+ EmitBytes(code, sizeof(code));
+ }
+ else
+ {
+ // The offset requires 4 bytes (need to use the long form
+ // of the push esp instruction)
+
+ BYTE code[] = {0xff, 0xb4, 0x24, 0x0, 0x0, 0x0, 0x0};
+ *(__int32 *)(&code[3]) = ofs;
+ EmitBytes(code, sizeof(code));
+ }
+
+ Push(sizeof(void*));
+}
+
+
+//---------------------------------------------------------------
+// Emits:
+// pop dword ptr [<srcreg> + <ofs>]
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitIndexPop(X86Reg srcreg, __int32 ofs)
+{
+ STANDARD_VM_CONTRACT;
+
+ if(srcreg != kESP_Unsafe)
+ X86EmitOffsetModRM(0x8f, (X86Reg)0x0, srcreg, ofs);
+ else
+ X86EmitOp(0x8f,(X86Reg)0x0, srcreg, ofs);
+
+ Pop(sizeof(void*));
+}
+
+//---------------------------------------------------------------
+// Emits:
+// lea <dstreg>, [<srcreg> + <ofs>
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitIndexLea(X86Reg dstreg, X86Reg srcreg, __int32 ofs)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION((int) dstreg < NumX86Regs);
+ PRECONDITION((int) srcreg < NumX86Regs);
+ }
+ CONTRACTL_END;
+
+ X86EmitOffsetModRM(0x8d, dstreg, srcreg, ofs);
+}
+
+#if defined(_TARGET_AMD64_)
+VOID StubLinkerCPU::X86EmitIndexLeaRSP(X86Reg dstreg, X86Reg srcreg, __int32 ofs)
+{
+ STANDARD_VM_CONTRACT;
+
+ X86EmitOp(0x8d, dstreg, (X86Reg)kESP_Unsafe, ofs, (X86Reg)0, 0, k64BitOp);
+}
+#endif // defined(_TARGET_AMD64_)
+
+//---------------------------------------------------------------
+// Emits:
+// sub esp, IMM
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitSubEsp(INT32 imm32)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (imm32 < 0x1000-100)
+ {
+ // As long as the esp size is less than 1 page plus a small
+ // safety fudge factor, we can just bump esp.
+ X86EmitSubEspWorker(imm32);
+ }
+ else
+ {
+ // Otherwise, must touch at least one byte for each page.
+ while (imm32 >= 0x1000)
+ {
+
+ X86EmitSubEspWorker(0x1000-4);
+ X86EmitPushReg(kEAX);
+
+ imm32 -= 0x1000;
+ }
+ if (imm32 < 500)
+ {
+ X86EmitSubEspWorker(imm32);
+ }
+ else
+ {
+ // If the remainder is large, touch the last byte - again,
+ // as a fudge factor.
+ X86EmitSubEspWorker(imm32-4);
+ X86EmitPushReg(kEAX);
+ }
+ }
+}
+
+
+//---------------------------------------------------------------
+// Emits:
+// sub esp, IMM
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitSubEspWorker(INT32 imm32)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ // On Win32, stacks must be faulted in one page at a time.
+ PRECONDITION(imm32 < 0x1000);
+ }
+ CONTRACTL_END;
+
+ if (!imm32)
+ {
+ // nop
+ }
+ else
+ {
+ X86_64BitOperands();
+
+ if (FitsInI1(imm32))
+ {
+ Emit16(0xec83);
+ Emit8((INT8)imm32);
+ }
+ else
+ {
+ Emit16(0xec81);
+ Emit32(imm32);
+ }
+
+ Push(imm32);
+ }
+}
+
+
+//---------------------------------------------------------------
+// Emits:
+// add esp, IMM
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitAddEsp(INT32 imm32)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (!imm32)
+ {
+ // nop
+ }
+ else
+ {
+ X86_64BitOperands();
+
+ if (FitsInI1(imm32))
+ {
+ Emit16(0xc483);
+ Emit8((INT8)imm32);
+ }
+ else
+ {
+ Emit16(0xc481);
+ Emit32(imm32);
+ }
+ }
+ Pop(imm32);
+}
+
+VOID StubLinkerCPU::X86EmitAddReg(X86Reg reg, INT32 imm32)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION((int) reg < NumX86Regs);
+ }
+ CONTRACTL_END;
+
+ if (imm32 == 0)
+ return;
+
+#ifdef _TARGET_AMD64_
+ BYTE rex = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
+
+ if (reg >= kR8)
+ {
+ rex |= REX_OPCODE_REG_EXT;
+ reg = X86RegFromAMD64Reg(reg);
+ }
+ Emit8(rex);
+#endif
+
+ if (FitsInI1(imm32)) {
+ Emit8(0x83);
+ Emit8(static_cast<UINT8>(0xC0 | reg));
+ Emit8(static_cast<UINT8>(imm32));
+ } else {
+ Emit8(0x81);
+ Emit8(static_cast<UINT8>(0xC0 | reg));
+ Emit32(imm32);
+ }
+}
+
+//---------------------------------------------------------------
+// Emits: add destReg, srcReg
+//---------------------------------------------------------------
+
+VOID StubLinkerCPU::X86EmitAddRegReg(X86Reg destReg, X86Reg srcReg)
+{
+ STANDARD_VM_CONTRACT;
+
+ X86EmitR2ROp(0x01, srcReg, destReg);
+}
+
+
+
+
+VOID StubLinkerCPU::X86EmitSubReg(X86Reg reg, INT32 imm32)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION((int) reg < NumX86Regs);
+ }
+ CONTRACTL_END;
+
+#ifdef _TARGET_AMD64_
+ BYTE rex = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
+
+ if (reg >= kR8)
+ {
+ rex |= REX_OPCODE_REG_EXT;
+ reg = X86RegFromAMD64Reg(reg);
+ }
+ Emit8(rex);
+#endif
+
+ if (FitsInI1(imm32)) {
+ Emit8(0x83);
+ Emit8(static_cast<UINT8>(0xE8 | reg));
+ Emit8(static_cast<UINT8>(imm32));
+ } else {
+ Emit8(0x81);
+ Emit8(static_cast<UINT8>(0xE8 | reg));
+ Emit32(imm32);
+ }
+}
+
+//---------------------------------------------------------------
+// Emits: sub destReg, srcReg
+//---------------------------------------------------------------
+
+VOID StubLinkerCPU::X86EmitSubRegReg(X86Reg destReg, X86Reg srcReg)
+{
+ STANDARD_VM_CONTRACT;
+
+ X86EmitR2ROp(0x29, srcReg, destReg);
+}
+
+#if defined(_TARGET_AMD64_)
+
+//---------------------------------------------------------------
+// movdqa destXmmreg, srcXmmReg
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X64EmitMovXmmXmm(X86Reg destXmmreg, X86Reg srcXmmReg)
+{
+ STANDARD_VM_CONTRACT;
+ // There are several that could be used to mov xmm registers. MovAps is
+ // what C++ compiler uses so let's use it here too.
+ X86EmitR2ROp(X86_INSTR_MOVAPS_R_RM, destXmmreg, srcXmmReg, k32BitOp);
+}
+
+//---------------------------------------------------------------
+// movdqa XmmN, [baseReg + offset]
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X64EmitMovdqaFromMem(X86Reg Xmmreg, X86Reg baseReg, __int32 ofs)
+{
+ STANDARD_VM_CONTRACT;
+ X64EmitMovXmmWorker(0x66, 0x6F, Xmmreg, baseReg, ofs);
+}
+
+//---------------------------------------------------------------
+// movdqa [baseReg + offset], XmmN
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X64EmitMovdqaToMem(X86Reg Xmmreg, X86Reg baseReg, __int32 ofs)
+{
+ STANDARD_VM_CONTRACT;
+ X64EmitMovXmmWorker(0x66, 0x7F, Xmmreg, baseReg, ofs);
+}
+
+//---------------------------------------------------------------
+// movsd XmmN, [baseReg + offset]
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X64EmitMovSDFromMem(X86Reg Xmmreg, X86Reg baseReg, __int32 ofs)
+{
+ STANDARD_VM_CONTRACT;
+ X64EmitMovXmmWorker(0xF2, 0x10, Xmmreg, baseReg, ofs);
+}
+
+//---------------------------------------------------------------
+// movsd [baseReg + offset], XmmN
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X64EmitMovSDToMem(X86Reg Xmmreg, X86Reg baseReg, __int32 ofs)
+{
+ STANDARD_VM_CONTRACT;
+ X64EmitMovXmmWorker(0xF2, 0x11, Xmmreg, baseReg, ofs);
+}
+
+//---------------------------------------------------------------
+// movss XmmN, [baseReg + offset]
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X64EmitMovSSFromMem(X86Reg Xmmreg, X86Reg baseReg, __int32 ofs)
+{
+ STANDARD_VM_CONTRACT;
+ X64EmitMovXmmWorker(0xF3, 0x10, Xmmreg, baseReg, ofs);
+}
+
+//---------------------------------------------------------------
+// movss [baseReg + offset], XmmN
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X64EmitMovSSToMem(X86Reg Xmmreg, X86Reg baseReg, __int32 ofs)
+{
+ STANDARD_VM_CONTRACT;
+ X64EmitMovXmmWorker(0xF3, 0x11, Xmmreg, baseReg, ofs);
+}
+
+//---------------------------------------------------------------
+// Helper method for emitting of XMM from/to memory moves
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X64EmitMovXmmWorker(BYTE prefix, BYTE opcode, X86Reg Xmmreg, X86Reg baseReg, __int32 ofs)
+{
+ STANDARD_VM_CONTRACT;
+
+ BYTE codeBuffer[10];
+ unsigned int nBytes = 0;
+
+ // Setup the legacyPrefix for movsd
+ codeBuffer[nBytes++] = prefix;
+
+ // By default, assume we dont have to emit the REX byte.
+ bool fEmitRex = false;
+
+ BYTE rex = REX_PREFIX_BASE;
+
+ if (baseReg >= kR8)
+ {
+ rex |= REX_MODRM_RM_EXT;
+ baseReg = X86RegFromAMD64Reg(baseReg);
+ fEmitRex = true;
+ }
+ if (Xmmreg >= kXMM8)
+ {
+ rex |= REX_MODRM_REG_EXT;
+ Xmmreg = X86RegFromAMD64Reg(Xmmreg);
+ fEmitRex = true;
+ }
+
+ if (fEmitRex == true)
+ {
+ codeBuffer[nBytes++] = rex;
+ }
+
+ // Next, specify the two byte opcode - first byte is always 0x0F.
+ codeBuffer[nBytes++] = 0x0F;
+ codeBuffer[nBytes++] = opcode;
+
+ BYTE modrm = static_cast<BYTE>((Xmmreg << 3) | baseReg);
+ bool fOffsetFitsInSignedByte = FitsInI1(ofs)?true:false;
+
+ if (fOffsetFitsInSignedByte)
+ codeBuffer[nBytes++] = 0x40|modrm;
+ else
+ codeBuffer[nBytes++] = 0x80|modrm;
+
+ // If we are dealing with RSP or R12 as the baseReg, we need to emit the SIB byte.
+ if ((baseReg == (X86Reg)4 /*kRSP*/) || (baseReg == kR12))
+ {
+ codeBuffer[nBytes++] = 0x24;
+ }
+
+ // Finally, specify the offset
+ if (fOffsetFitsInSignedByte)
+ {
+ codeBuffer[nBytes++] = (BYTE)ofs;
+ }
+ else
+ {
+ *((__int32*)(codeBuffer+nBytes)) = ofs;
+ nBytes += 4;
+ }
+
+ _ASSERTE(nBytes <= _countof(codeBuffer));
+
+ // Lastly, emit the encoded bytes
+ EmitBytes(codeBuffer, nBytes);
+}
+
+#endif // defined(_TARGET_AMD64_)
+
+//---------------------------------------------------------------
+// Emits a MOD/RM for accessing a dword at [<indexreg> + ofs32]
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitOffsetModRM(BYTE opcode, X86Reg opcodereg, X86Reg indexreg, __int32 ofs)
+{
+ STANDARD_VM_CONTRACT;
+
+ BYTE codeBuffer[7];
+ BYTE* code = codeBuffer;
+ int nBytes = 0;
+#ifdef _TARGET_AMD64_
+ code++;
+ //
+ // code points to base X86 instruction,
+ // codeBuffer points to full AMD64 instruction
+ //
+ BYTE rex = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
+
+ if (indexreg >= kR8)
+ {
+ rex |= REX_MODRM_RM_EXT;
+ indexreg = X86RegFromAMD64Reg(indexreg);
+ }
+ if (opcodereg >= kR8)
+ {
+ rex |= REX_MODRM_REG_EXT;
+ opcodereg = X86RegFromAMD64Reg(opcodereg);
+ }
+
+ nBytes++;
+ code[-1] = rex;
+#endif
+ code[0] = opcode;
+ nBytes++;
+ BYTE modrm = static_cast<BYTE>((opcodereg << 3) | indexreg);
+ if (ofs == 0 && indexreg != kEBP)
+ {
+ code[1] = modrm;
+ nBytes++;
+ EmitBytes(codeBuffer, nBytes);
+ }
+ else if (FitsInI1(ofs))
+ {
+ code[1] = 0x40|modrm;
+ code[2] = (BYTE)ofs;
+ nBytes += 2;
+ EmitBytes(codeBuffer, nBytes);
+ }
+ else
+ {
+ code[1] = 0x80|modrm;
+ *((__int32*)(2+code)) = ofs;
+ nBytes += 5;
+ EmitBytes(codeBuffer, nBytes);
+ }
+}
+
+//---------------------------------------------------------------
+// Emits a MOD/RM for accessing a dword at [<baseReg> + <indexReg>*<scale> + ofs32]
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitOffsetModRmSIB(BYTE opcode, X86Reg opcodeOrReg, X86Reg baseReg, X86Reg indexReg, __int32 scale, __int32 ofs)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(scale == 1 || scale == 2 || scale == 4 || scale == 8);
+ PRECONDITION(indexReg != kESP_Unsafe);
+ }
+ CONTRACTL_END;
+
+ BYTE codeBuffer[8];
+ BYTE* code = codeBuffer;
+ int nBytes = 0;
+
+#ifdef _TARGET_AMD64_
+ _ASSERTE(!"NYI");
+#endif
+ code[0] = opcode;
+ nBytes++;
+
+ BYTE scaleEnc = 0;
+ switch(scale)
+ {
+ case 1: scaleEnc = 0; break;
+ case 2: scaleEnc = 1; break;
+ case 4: scaleEnc = 2; break;
+ case 8: scaleEnc = 3; break;
+ default: _ASSERTE(!"Unexpected");
+ }
+
+ BYTE sib = static_cast<BYTE>((scaleEnc << 6) | (indexReg << 3) | baseReg);
+
+ if (FitsInI1(ofs))
+ {
+ code[1] = static_cast<BYTE>(0x44 | (opcodeOrReg << 3));
+ code[2] = sib;
+ code[3] = (BYTE)ofs;
+ nBytes += 3;
+ EmitBytes(codeBuffer, nBytes);
+ }
+ else
+ {
+ code[1] = static_cast<BYTE>(0x84 | (opcodeOrReg << 3));
+ code[2] = sib;
+ *(__int32*)(&code[3]) = ofs;
+ nBytes += 6;
+ EmitBytes(codeBuffer, nBytes);
+ }
+}
+
+
+
+VOID StubLinkerCPU::X86EmitRegLoad(X86Reg reg, UINT_PTR imm)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (!imm)
+ {
+ X86EmitZeroOutReg(reg);
+ return;
+ }
+
+ UINT cbimm = sizeof(void*);
+
+#ifdef _TARGET_AMD64_
+ // amd64 zero-extends all 32-bit operations. If the immediate will fit in
+ // 32 bits, use the smaller encoding.
+
+ if (reg >= kR8 || !FitsInU4(imm))
+ {
+ BYTE rex = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
+ if (reg >= kR8)
+ {
+ rex |= REX_MODRM_RM_EXT;
+ reg = X86RegFromAMD64Reg(reg);
+ }
+ Emit8(rex);
+ }
+ else
+ {
+ // amd64 is little endian, so the &imm below will correctly read off
+ // the low 4 bytes.
+ cbimm = sizeof(UINT32);
+ }
+#endif // _TARGET_AMD64_
+ Emit8(0xB8 | (BYTE)reg);
+ EmitBytes((BYTE*)&imm, cbimm);
+}
+
+
+//---------------------------------------------------------------
+// Emits the most efficient form of the operation:
+//
+// opcode altreg, [basereg + scaledreg*scale + ofs]
+//
+// or
+//
+// opcode [basereg + scaledreg*scale + ofs], altreg
+//
+// (the opcode determines which comes first.)
+//
+//
+// Limitations:
+//
+// scale must be 0,1,2,4 or 8.
+// if scale == 0, scaledreg is ignored.
+// basereg and altreg may be equal to 4 (ESP) but scaledreg cannot
+// for some opcodes, "altreg" may actually select an operation
+// rather than a second register argument.
+// if basereg is EBP, scale must be 0.
+//
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitOp(WORD opcode,
+ X86Reg altreg,
+ X86Reg basereg,
+ __int32 ofs /*=0*/,
+ X86Reg scaledreg /*=0*/,
+ BYTE scale /*=0*/
+ AMD64_ARG(X86OperandSize OperandSize /*= k32BitOp*/))
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ // All 2-byte opcodes start with 0x0f.
+ PRECONDITION(!(opcode >> 8) || (opcode & 0xff) == 0x0f);
+
+ PRECONDITION(scale == 0 || scale == 1 || scale == 2 || scale == 4 || scale == 8);
+ PRECONDITION(scaledreg != (X86Reg)4);
+ PRECONDITION(!(basereg == kEBP && scale != 0));
+
+ PRECONDITION( ((UINT)basereg) < NumX86Regs );
+ PRECONDITION( ((UINT)scaledreg) < NumX86Regs );
+ PRECONDITION( ((UINT)altreg) < NumX86Regs );
+ }
+ CONTRACTL_END;
+
+#ifdef _TARGET_AMD64_
+ if ( k64BitOp == OperandSize
+ || altreg >= kR8
+ || basereg >= kR8
+ || scaledreg >= kR8)
+ {
+ BYTE rex = REX_PREFIX_BASE;
+
+ if (k64BitOp == OperandSize)
+ rex |= REX_OPERAND_SIZE_64BIT;
+
+ if (altreg >= kR8)
+ {
+ rex |= REX_MODRM_REG_EXT;
+ altreg = X86RegFromAMD64Reg(altreg);
+ }
+
+ if (basereg >= kR8)
+ {
+ // basereg might be in the modrm or sib fields. This will be
+ // decided below, but the encodings are the same either way.
+ _ASSERTE(REX_SIB_BASE_EXT == REX_MODRM_RM_EXT);
+ rex |= REX_SIB_BASE_EXT;
+ basereg = X86RegFromAMD64Reg(basereg);
+ }
+
+ if (scaledreg >= kR8)
+ {
+ rex |= REX_SIB_INDEX_EXT;
+ scaledreg = X86RegFromAMD64Reg(scaledreg);
+ }
+
+ Emit8(rex);
+ }
+#endif // _TARGET_AMD64_
+
+ BYTE modrmbyte = static_cast<BYTE>(altreg << 3);
+ BOOL fNeedSIB = FALSE;
+ BYTE SIBbyte = 0;
+ BYTE ofssize;
+ BYTE scaleselect= 0;
+
+ if (ofs == 0 && basereg != kEBP)
+ {
+ ofssize = 0; // Don't change this constant!
+ }
+ else if (FitsInI1(ofs))
+ {
+ ofssize = 1; // Don't change this constant!
+ }
+ else
+ {
+ ofssize = 2; // Don't change this constant!
+ }
+
+ switch (scale)
+ {
+ case 1: scaleselect = 0; break;
+ case 2: scaleselect = 1; break;
+ case 4: scaleselect = 2; break;
+ case 8: scaleselect = 3; break;
+ }
+
+ if (scale == 0 && basereg != (X86Reg)4 /*ESP*/)
+ {
+ // [basereg + ofs]
+ modrmbyte |= basereg | (ofssize << 6);
+ }
+ else if (scale == 0)
+ {
+ // [esp + ofs]
+ _ASSERTE(basereg == (X86Reg)4);
+ fNeedSIB = TRUE;
+ SIBbyte = 0044;
+
+ modrmbyte |= 4 | (ofssize << 6);
+ }
+ else
+ {
+
+ //[basereg + scaledreg*scale + ofs]
+
+ modrmbyte |= 0004 | (ofssize << 6);
+ fNeedSIB = TRUE;
+ SIBbyte = static_cast<BYTE>((scaleselect << 6) | (scaledreg << 3) | basereg);
+
+ }
+
+ //Some sanity checks:
+ _ASSERTE(!(fNeedSIB && basereg == kEBP)); // EBP not valid as a SIB base register.
+ _ASSERTE(!( (!fNeedSIB) && basereg == (X86Reg)4 )) ; // ESP addressing requires SIB byte
+
+ Emit8((BYTE)opcode);
+
+ if (opcode >> 8)
+ Emit8(opcode >> 8);
+
+ Emit8(modrmbyte);
+ if (fNeedSIB)
+ {
+ Emit8(SIBbyte);
+ }
+ switch (ofssize)
+ {
+ case 0: break;
+ case 1: Emit8( (__int8)ofs ); break;
+ case 2: Emit32( ofs ); break;
+ default: _ASSERTE(!"Can't get here.");
+ }
+}
+
+
+// Emits
+//
+// opcode altreg, modrmreg
+//
+// or
+//
+// opcode modrmreg, altreg
+//
+// (the opcode determines which one comes first)
+//
+// For single-operand opcodes, "altreg" actually selects
+// an operation rather than a register.
+
+VOID StubLinkerCPU::X86EmitR2ROp (WORD opcode,
+ X86Reg altreg,
+ X86Reg modrmreg
+ AMD64_ARG(X86OperandSize OperandSize /*= k64BitOp*/)
+ )
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ // All 2-byte opcodes start with 0x0f.
+ PRECONDITION(!(opcode >> 8) || (opcode & 0xff) == 0x0f);
+
+ PRECONDITION( ((UINT)altreg) < NumX86Regs );
+ PRECONDITION( ((UINT)modrmreg) < NumX86Regs );
+ }
+ CONTRACTL_END;
+
+#ifdef _TARGET_AMD64_
+ BYTE rex = 0;
+
+ if (modrmreg >= kR8)
+ {
+ rex |= REX_MODRM_RM_EXT;
+ modrmreg = X86RegFromAMD64Reg(modrmreg);
+ }
+
+ if (altreg >= kR8)
+ {
+ rex |= REX_MODRM_REG_EXT;
+ altreg = X86RegFromAMD64Reg(altreg);
+ }
+
+ if (k64BitOp == OperandSize)
+ rex |= REX_OPERAND_SIZE_64BIT;
+
+ if (rex)
+ Emit8(REX_PREFIX_BASE | rex);
+#endif // _TARGET_AMD64_
+
+ Emit8((BYTE)opcode);
+
+ if (opcode >> 8)
+ Emit8(opcode >> 8);
+
+ Emit8(static_cast<UINT8>(0300 | (altreg << 3) | modrmreg));
+}
+
+
+//---------------------------------------------------------------
+// Emits:
+// op altreg, [esp+ofs]
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitEspOffset(BYTE opcode,
+ X86Reg altreg,
+ __int32 ofs
+ AMD64_ARG(X86OperandSize OperandSize /*= k64BitOp*/)
+ )
+{
+ STANDARD_VM_CONTRACT;
+
+ BYTE codeBuffer[8];
+ BYTE *code = codeBuffer;
+ int nBytes;
+
+#ifdef _TARGET_AMD64_
+ BYTE rex = 0;
+
+ if (k64BitOp == OperandSize)
+ rex |= REX_OPERAND_SIZE_64BIT;
+
+ if (altreg >= kR8)
+ {
+ rex |= REX_MODRM_REG_EXT;
+ altreg = X86RegFromAMD64Reg(altreg);
+ }
+
+ if (rex)
+ {
+ *code = (REX_PREFIX_BASE | rex);
+ code++;
+ nBytes = 1;
+ }
+ else
+#endif // _TARGET_AMD64_
+ {
+ nBytes = 0;
+ }
+
+ code[0] = opcode;
+ BYTE modrm = static_cast<BYTE>((altreg << 3) | 004);
+ if (ofs == 0)
+ {
+ code[1] = modrm;
+ code[2] = 0044;
+ EmitBytes(codeBuffer, 3 + nBytes);
+ }
+ else if (FitsInI1(ofs))
+ {
+ code[1] = 0x40|modrm;
+ code[2] = 0044;
+ code[3] = (BYTE)ofs;
+ EmitBytes(codeBuffer, 4 + nBytes);
+ }
+ else
+ {
+ code[1] = 0x80|modrm;
+ code[2] = 0044;
+ *((__int32*)(3+code)) = ofs;
+ EmitBytes(codeBuffer, 7 + nBytes);
+ }
+
+}
+
+//---------------------------------------------------------------
+
+VOID StubLinkerCPU::X86EmitPushEBPframe()
+{
+ STANDARD_VM_CONTRACT;
+
+ // push ebp
+ X86EmitPushReg(kEBP);
+ // mov ebp,esp
+ X86EmitMovRegSP(kEBP);
+}
+
+#ifdef _DEBUG
+//---------------------------------------------------------------
+// Emits:
+// mov <reg32>,0xcccccccc
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitDebugTrashReg(X86Reg reg)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef _TARGET_AMD64_
+ BYTE rex = REX_PREFIX_BASE | REX_OPERAND_SIZE_64BIT;
+
+ if (reg >= kR8)
+ {
+ rex |= REX_OPCODE_REG_EXT;
+ reg = X86RegFromAMD64Reg(reg);
+ }
+ Emit8(rex);
+ Emit8(0xb8|reg);
+ Emit64(0xcccccccccccccccc);
+#else
+ Emit8(static_cast<UINT8>(0xb8 | reg));
+ Emit32(0xcccccccc);
+#endif
+}
+#endif //_DEBUG
+
+
+// Get X86Reg indexes of argument registers based on offset into ArgumentRegister
+X86Reg GetX86ArgumentRegisterFromOffset(size_t ofs)
+{
+ CONTRACT(X86Reg)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+
+ }
+ CONTRACT_END;
+
+ #define ARGUMENT_REGISTER(reg) if (ofs == offsetof(ArgumentRegisters, reg)) RETURN k##reg ;
+ ENUM_ARGUMENT_REGISTERS();
+ #undef ARGUMENT_REGISTER
+
+ _ASSERTE(0);//Can't get here.
+ RETURN kEBP;
+}
+
+
+#ifdef _TARGET_AMD64_
+static const X86Reg c_argRegs[] = {
+ #define ARGUMENT_REGISTER(regname) k##regname,
+ ENUM_ARGUMENT_REGISTERS()
+ #undef ARGUMENT_REGISTER
+};
+#endif
+
+
+#ifndef CROSSGEN_COMPILE
+
+#if defined(_DEBUG) && (defined(_TARGET_AMD64_) || defined(_TARGET_X86_)) && !defined(FEATURE_PAL)
+void StubLinkerCPU::EmitJITHelperLoggingThunk(PCODE pJitHelper, LPVOID helperFuncCount)
+{
+ STANDARD_VM_CONTRACT;
+
+ VMHELPCOUNTDEF* pHelperFuncCount = (VMHELPCOUNTDEF*)helperFuncCount;
+/*
+ push rcx
+ mov rcx, &(pHelperFuncCount->count)
+ lock inc [rcx]
+ pop rcx
+#ifdef _TARGET_AMD64_
+ mov rax, <pJitHelper>
+ jmp rax
+#else
+ jmp <pJitHelper>
+#endif
+*/
+
+ // push rcx
+ // mov rcx, &(pHelperFuncCount->count)
+ X86EmitPushReg(kECX);
+ X86EmitRegLoad(kECX, (UINT_PTR)(&(pHelperFuncCount->count)));
+
+ // lock inc [rcx]
+ BYTE lock_inc_RCX[] = { 0xf0, 0xff, 0x01 };
+ EmitBytes(lock_inc_RCX, sizeof(lock_inc_RCX));
+
+#if defined(_TARGET_AMD64_)
+ // mov rax, <pJitHelper>
+ // pop rcx
+ // jmp rax
+#else
+ // pop rcx
+ // jmp <pJitHelper>
+#endif
+ X86EmitTailcallWithSinglePop(NewExternalCodeLabel(pJitHelper), kECX);
+}
+#endif // _DEBUG && (_TARGET_AMD64_ || _TARGET_X86_) && !FEATURE_PAL
+
+#ifndef FEATURE_IMPLICIT_TLS
+//---------------------------------------------------------------
+// Emit code to store the current Thread structure in dstreg
+// preservedRegSet is a set of registers to be preserved
+// TRASHES EAX, EDX, ECX unless they are in preservedRegSet.
+// RESULTS dstreg = current Thread
+//---------------------------------------------------------------
+VOID StubLinkerCPU::X86EmitTLSFetch(DWORD idx, X86Reg dstreg, unsigned preservedRegSet)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ // It doesn't make sense to have the destination register be preserved
+ PRECONDITION((preservedRegSet & (1<<dstreg)) == 0);
+ AMD64_ONLY(PRECONDITION(dstreg < 8)); // code below doesn't support high registers
+ }
+ CONTRACTL_END;
+
+ TLSACCESSMODE mode = GetTLSAccessMode(idx);
+
+#ifdef _DEBUG
+ {
+ static BOOL f = TRUE;
+ f = !f;
+ if (f)
+ {
+ mode = TLSACCESS_GENERIC;
+ }
+ }
+#endif
+
+ switch (mode)
+ {
+ case TLSACCESS_WNT:
+ {
+ unsigned __int32 tlsofs = offsetof(TEB, TlsSlots) + (idx * sizeof(void*));
+#ifdef _TARGET_AMD64_
+ BYTE code[] = {0x65,0x48,0x8b,0x04,0x25}; // mov dstreg, qword ptr gs:[IMM32]
+ static const int regByteIndex = 3;
+#elif defined(_TARGET_X86_)
+ BYTE code[] = {0x64,0x8b,0x05}; // mov dstreg, dword ptr fs:[IMM32]
+ static const int regByteIndex = 2;
+#endif
+ code[regByteIndex] |= (dstreg << 3);
+
+ EmitBytes(code, sizeof(code));
+ Emit32(tlsofs);
+ }
+ break;
+
+ case TLSACCESS_GENERIC:
+
+ X86EmitPushRegs(preservedRegSet & ((1<<kEAX)|(1<<kEDX)|(1<<kECX)));
+
+ X86EmitPushImm32(idx);
+#ifdef _TARGET_AMD64_
+ X86EmitPopReg (kECX); // arg in reg
+#endif
+
+ // call TLSGetValue
+ X86EmitCall(NewExternalCodeLabel((LPVOID) TlsGetValue), sizeof(void*));
+
+ // mov dstreg, eax
+ X86EmitMovRegReg(dstreg, kEAX);
+
+ X86EmitPopRegs(preservedRegSet & ((1<<kEAX)|(1<<kEDX)|(1<<kECX)));
+
+ break;
+
+ default:
+ _ASSERTE(0);
+ }
+
+#ifdef _DEBUG
+ // Trash caller saved regs that we were not told to preserve, and that aren't the dstreg.
+ preservedRegSet |= 1<<dstreg;
+ if (!(preservedRegSet & (1<<kEAX)))
+ X86EmitDebugTrashReg(kEAX);
+ if (!(preservedRegSet & (1<<kEDX)))
+ X86EmitDebugTrashReg(kEDX);
+ if (!(preservedRegSet & (1<<kECX)))
+ X86EmitDebugTrashReg(kECX);
+#endif
+
+}
+#endif // FEATURE_IMPLICIT_TLS
+
+VOID StubLinkerCPU::X86EmitCurrentThreadFetch(X86Reg dstreg, unsigned preservedRegSet)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ // It doesn't make sense to have the destination register be preserved
+ PRECONDITION((preservedRegSet & (1<<dstreg)) == 0);
+ AMD64_ONLY(PRECONDITION(dstreg < 8)); // code below doesn't support high registers
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_IMPLICIT_TLS
+
+ X86EmitPushRegs(preservedRegSet & ((1<<kEAX)|(1<<kEDX)|(1<<kECX)));
+
+ //TODO: Inline the instruction instead of a call
+ // call GetThread
+ X86EmitCall(NewExternalCodeLabel((LPVOID) GetThread), sizeof(void*));
+
+ // mov dstreg, eax
+ X86EmitMovRegReg(dstreg, kEAX);
+
+ X86EmitPopRegs(preservedRegSet & ((1<<kEAX)|(1<<kEDX)|(1<<kECX)));
+
+#ifdef _DEBUG
+ // Trash caller saved regs that we were not told to preserve, and that aren't the dstreg.
+ preservedRegSet |= 1<<dstreg;
+ if (!(preservedRegSet & (1<<kEAX)))
+ X86EmitDebugTrashReg(kEAX);
+ if (!(preservedRegSet & (1<<kEDX)))
+ X86EmitDebugTrashReg(kEDX);
+ if (!(preservedRegSet & (1<<kECX)))
+ X86EmitDebugTrashReg(kECX);
+#endif // _DEBUG
+
+#else // FEATURE_IMPLICIT_TLS
+
+ X86EmitTLSFetch(GetThreadTLSIndex(), dstreg, preservedRegSet);
+
+#endif // FEATURE_IMPLICIT_TLS
+
+}
+
+VOID StubLinkerCPU::X86EmitCurrentAppDomainFetch(X86Reg dstreg, unsigned preservedRegSet)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ // It doesn't make sense to have the destination register be preserved
+ PRECONDITION((preservedRegSet & (1<<dstreg)) == 0);
+ AMD64_ONLY(PRECONDITION(dstreg < 8)); // code below doesn't support high registers
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_IMPLICIT_TLS
+ X86EmitPushRegs(preservedRegSet & ((1<<kEAX)|(1<<kEDX)|(1<<kECX)));
+
+ //TODO: Inline the instruction instead of a call
+ // call GetThread
+ X86EmitCall(NewExternalCodeLabel((LPVOID) GetAppDomain), sizeof(void*));
+
+ // mov dstreg, eax
+ X86EmitMovRegReg(dstreg, kEAX);
+
+ X86EmitPopRegs(preservedRegSet & ((1<<kEAX)|(1<<kEDX)|(1<<kECX)));
+
+#ifdef _DEBUG
+ // Trash caller saved regs that we were not told to preserve, and that aren't the dstreg.
+ preservedRegSet |= 1<<dstreg;
+ if (!(preservedRegSet & (1<<kEAX)))
+ X86EmitDebugTrashReg(kEAX);
+ if (!(preservedRegSet & (1<<kEDX)))
+ X86EmitDebugTrashReg(kEDX);
+ if (!(preservedRegSet & (1<<kECX)))
+ X86EmitDebugTrashReg(kECX);
+#endif
+
+#else // FEATURE_IMPLICIT_TLS
+
+ X86EmitTLSFetch(GetAppDomainTLSIndex(), dstreg, preservedRegSet);
+
+#endif // FEATURE_IMPLICIT_TLS
+}
+
+#ifdef _TARGET_X86_
+
+#ifdef PROFILING_SUPPORTED
+VOID StubLinkerCPU::EmitProfilerComCallProlog(TADDR pFrameVptr, X86Reg regFrame)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (pFrameVptr == UMThkCallFrame::GetMethodFrameVPtr())
+ {
+ // Load the methoddesc into ECX (UMThkCallFrame->m_pvDatum->m_pMD)
+ X86EmitIndexRegLoad(kECX, regFrame, UMThkCallFrame::GetOffsetOfDatum());
+ X86EmitIndexRegLoad(kECX, kECX, UMEntryThunk::GetOffsetOfMethodDesc());
+
+ // Push arguments and notify profiler
+ X86EmitPushImm32(COR_PRF_TRANSITION_CALL); // Reason
+ X86EmitPushReg(kECX); // MethodDesc*
+ X86EmitCall(NewExternalCodeLabel((LPVOID) ProfilerUnmanagedToManagedTransitionMD), 2*sizeof(void*));
+ }
+
+#ifdef FEATURE_COMINTEROP
+ else if (pFrameVptr == ComMethodFrame::GetMethodFrameVPtr())
+ {
+ // Load the methoddesc into ECX (Frame->m_pvDatum->m_pMD)
+ X86EmitIndexRegLoad(kECX, regFrame, ComMethodFrame::GetOffsetOfDatum());
+ X86EmitIndexRegLoad(kECX, kECX, ComCallMethodDesc::GetOffsetOfMethodDesc());
+
+ // Push arguments and notify profiler
+ X86EmitPushImm32(COR_PRF_TRANSITION_CALL); // Reason
+ X86EmitPushReg(kECX); // MethodDesc*
+ X86EmitCall(NewExternalCodeLabel((LPVOID) ProfilerUnmanagedToManagedTransitionMD), 2*sizeof(void*));
+ }
+#endif // FEATURE_COMINTEROP
+
+ // Unrecognized frame vtbl
+ else
+ {
+ _ASSERTE(!"Unrecognized vtble passed to EmitComMethodStubProlog with profiling turned on.");
+ }
+}
+
+
+VOID StubLinkerCPU::EmitProfilerComCallEpilog(TADDR pFrameVptr, X86Reg regFrame)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+#ifdef FEATURE_COMINTEROP
+ PRECONDITION(pFrameVptr == UMThkCallFrame::GetMethodFrameVPtr() || pFrameVptr == ComMethodFrame::GetMethodFrameVPtr());
+#else
+ PRECONDITION(pFrameVptr == UMThkCallFrame::GetMethodFrameVPtr());
+#endif // FEATURE_COMINTEROP
+ }
+ CONTRACTL_END;
+
+ if (pFrameVptr == UMThkCallFrame::GetMethodFrameVPtr())
+ {
+ // Load the methoddesc into ECX (UMThkCallFrame->m_pvDatum->m_pMD)
+ X86EmitIndexRegLoad(kECX, regFrame, UMThkCallFrame::GetOffsetOfDatum());
+ X86EmitIndexRegLoad(kECX, kECX, UMEntryThunk::GetOffsetOfMethodDesc());
+
+ // Push arguments and notify profiler
+ X86EmitPushImm32(COR_PRF_TRANSITION_RETURN); // Reason
+ X86EmitPushReg(kECX); // MethodDesc*
+ X86EmitCall(NewExternalCodeLabel((LPVOID) ProfilerManagedToUnmanagedTransitionMD), 2*sizeof(void*));
+ }
+
+#ifdef FEATURE_COMINTEROP
+ else if (pFrameVptr == ComMethodFrame::GetMethodFrameVPtr())
+ {
+ // Load the methoddesc into ECX (Frame->m_pvDatum->m_pMD)
+ X86EmitIndexRegLoad(kECX, regFrame, ComMethodFrame::GetOffsetOfDatum());
+ X86EmitIndexRegLoad(kECX, kECX, ComCallMethodDesc::GetOffsetOfMethodDesc());
+
+ // Push arguments and notify profiler
+ X86EmitPushImm32(COR_PRF_TRANSITION_RETURN); // Reason
+ X86EmitPushReg(kECX); // MethodDesc*
+ X86EmitCall(NewExternalCodeLabel((LPVOID) ProfilerManagedToUnmanagedTransitionMD), 2*sizeof(void*));
+ }
+#endif // FEATURE_COMINTEROP
+
+ // Unrecognized frame vtbl
+ else
+ {
+ _ASSERTE(!"Unrecognized vtble passed to EmitComMethodStubEpilog with profiling turned on.");
+ }
+}
+#endif // PROFILING_SUPPORTED
+
+
+//========================================================================
+// Prolog for entering managed code from COM
+// pushes the appropriate frame ptr
+// sets up a thread and returns a label that needs to be emitted by the caller
+// At the end:
+// ESI will hold the pointer to the ComMethodFrame or UMThkCallFrame
+// EBX will hold the result of GetThread()
+// EDI will hold the previous Frame ptr
+
+void StubLinkerCPU::EmitComMethodStubProlog(TADDR pFrameVptr,
+ CodeLabel** rgRareLabels,
+ CodeLabel** rgRejoinLabels,
+ BOOL bShouldProfile)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(rgRareLabels != NULL);
+ PRECONDITION(rgRareLabels[0] != NULL && rgRareLabels[1] != NULL && rgRareLabels[2] != NULL);
+ PRECONDITION(rgRejoinLabels != NULL);
+ PRECONDITION(rgRejoinLabels[0] != NULL && rgRejoinLabels[1] != NULL && rgRejoinLabels[2] != NULL);
+ }
+ CONTRACTL_END;
+
+ // push ebp ;; save callee-saved register
+ // push ebx ;; save callee-saved register
+ // push esi ;; save callee-saved register
+ // push edi ;; save callee-saved register
+ X86EmitPushEBPframe();
+
+ X86EmitPushReg(kEBX);
+ X86EmitPushReg(kESI);
+ X86EmitPushReg(kEDI);
+
+ // push eax ; datum
+ X86EmitPushReg(kEAX);
+
+ // push edx ;leave room for m_next (edx is an arbitrary choice)
+ X86EmitPushReg(kEDX);
+
+ // push IMM32 ; push Frame vptr
+ X86EmitPushImmPtr((LPVOID) pFrameVptr);
+
+ X86EmitPushImmPtr((LPVOID)GetProcessGSCookie());
+
+ // lea esi, [esp+4] ;; set ESI -> new frame
+ X86EmitEspOffset(0x8d, kESI, 4); // lea ESI, [ESP+4]
+
+ if (pFrameVptr == UMThkCallFrame::GetMethodFrameVPtr())
+ {
+ // Preserve argument registers for thiscall/fastcall
+ X86EmitPushReg(kECX);
+ X86EmitPushReg(kEDX);
+ }
+
+ // Emit Setup thread
+ EmitSetup(rgRareLabels[0]); // rareLabel for rare setup
+ EmitLabel(rgRejoinLabels[0]); // rejoin label for rare setup
+
+#ifdef PROFILING_SUPPORTED
+ // If profiling is active, emit code to notify profiler of transition
+ // Must do this before preemptive GC is disabled, so no problem if the
+ // profiler blocks.
+ if (CORProfilerTrackTransitions() && bShouldProfile)
+ {
+ EmitProfilerComCallProlog(pFrameVptr, /*Frame*/ kESI);
+ }
+#endif // PROFILING_SUPPORTED
+
+ //-----------------------------------------------------------------------
+ // Generate the inline part of disabling preemptive GC. It is critical
+ // that this part happen before we link in the frame. That's because
+ // we won't be able to unlink the frame from preemptive mode. And during
+ // shutdown, we cannot switch to cooperative mode under some circumstances
+ //-----------------------------------------------------------------------
+ EmitDisable(rgRareLabels[1], /*fCallIn=*/TRUE, kEBX); // rare disable gc
+ EmitLabel(rgRejoinLabels[1]); // rejoin for rare disable gc
+
+ // If we take an SO after installing the new frame but before getting the exception
+ // handlers in place, we will have a corrupt frame stack. So probe-by-touch first for
+ // sufficient stack space to erect the handler. Because we know we will be touching
+ // that stack right away when install the handler, this probe-by-touch will not incur
+ // unnecessary cache misses. And this allows us to do the probe with one instruction.
+
+ // Note that for Win64, the personality routine will handle unlinking the frame, so
+ // we don't need to probe in the Win64 stubs. The exception is ComToCLRWorker
+ // where we don't setup a personality routine. However, we push the frame inside
+ // that function and it is probe-protected with an entry point probe first, so we are
+ // OK there too.
+
+ // We push two registers to setup the EH handler and none to setup the frame
+ // so probe for double that to give ourselves a small margin for error.
+ // mov eax, [esp+n] ;; probe for sufficient stack to setup EH
+ X86EmitEspOffset(0x8B, kEAX, -0x20);
+ // mov edi,[ebx + Thread.GetFrame()] ;; get previous frame
+ X86EmitIndexRegLoad(kEDI, kEBX, Thread::GetOffsetOfCurrentFrame());
+
+ // mov [esi + Frame.m_next], edi
+ X86EmitIndexRegStore(kESI, Frame::GetOffsetOfNextLink(), kEDI);
+
+ // mov [ebx + Thread.GetFrame()], esi
+ X86EmitIndexRegStore(kEBX, Thread::GetOffsetOfCurrentFrame(), kESI);
+
+ if (pFrameVptr == UMThkCallFrame::GetMethodFrameVPtr())
+ {
+ // push UnmanagedToManagedExceptHandler
+ X86EmitPushImmPtr((LPVOID)UMThunkPrestubHandler);
+
+ // mov eax, fs:[0]
+ static const BYTE codeSEH1[] = { 0x64, 0xA1, 0x0, 0x0, 0x0, 0x0};
+ EmitBytes(codeSEH1, sizeof(codeSEH1));
+
+ // push eax
+ X86EmitPushReg(kEAX);
+
+ // mov dword ptr fs:[0], esp
+ static const BYTE codeSEH2[] = { 0x64, 0x89, 0x25, 0x0, 0x0, 0x0, 0x0};
+ EmitBytes(codeSEH2, sizeof(codeSEH2));
+ }
+
+#if _DEBUG
+ if (Frame::ShouldLogTransitions())
+ {
+ // call LogTransition
+ X86EmitPushReg(kESI);
+ X86EmitCall(NewExternalCodeLabel((LPVOID) Frame::LogTransition), sizeof(void*));
+ }
+#endif
+}
+
+//========================================================================
+// Epilog for stubs that enter managed code from COM
+//
+// At this point of the stub, the state should be as follows:
+// ESI holds the ComMethodFrame or UMThkCallFrame ptr
+// EBX holds the result of GetThread()
+// EDI holds the previous Frame ptr
+//
+void StubLinkerCPU::EmitComMethodStubEpilog(TADDR pFrameVptr,
+ CodeLabel** rgRareLabels,
+ CodeLabel** rgRejoinLabels,
+ BOOL bShouldProfile)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(rgRareLabels != NULL);
+ PRECONDITION(rgRareLabels[0] != NULL && rgRareLabels[1] != NULL && rgRareLabels[2] != NULL);
+ PRECONDITION(rgRejoinLabels != NULL);
+ PRECONDITION(rgRejoinLabels[0] != NULL && rgRejoinLabels[1] != NULL && rgRejoinLabels[2] != NULL);
+ }
+ CONTRACTL_END;
+
+ EmitCheckGSCookie(kESI, UnmanagedToManagedFrame::GetOffsetOfGSCookie());
+
+ if (pFrameVptr == UMThkCallFrame::GetMethodFrameVPtr())
+ {
+ // if we are using exceptions, unlink the SEH
+ // mov ecx,[esp] ;;pointer to the next exception record
+ X86EmitEspOffset(0x8b, kECX, 0);
+
+ // mov dword ptr fs:[0], ecx
+ static const BYTE codeSEH[] = { 0x64, 0x89, 0x0D, 0x0, 0x0, 0x0, 0x0 };
+ EmitBytes(codeSEH, sizeof(codeSEH));
+
+ X86EmitAddEsp(sizeof(EXCEPTION_REGISTRATION_RECORD));
+ }
+
+ // mov [ebx + Thread.GetFrame()], edi ;; restore previous frame
+ X86EmitIndexRegStore(kEBX, Thread::GetOffsetOfCurrentFrame(), kEDI);
+
+ //-----------------------------------------------------------------------
+ // Generate the inline part of disabling preemptive GC
+ //-----------------------------------------------------------------------
+ EmitEnable(rgRareLabels[2]); // rare gc
+ EmitLabel(rgRejoinLabels[2]); // rejoin for rare gc
+
+ if (pFrameVptr == UMThkCallFrame::GetMethodFrameVPtr())
+ {
+ // Restore argument registers for thiscall/fastcall
+ X86EmitPopReg(kEDX);
+ X86EmitPopReg(kECX);
+ }
+
+ // add esp, popstack
+ X86EmitAddEsp(sizeof(GSCookie) + UnmanagedToManagedFrame::GetOffsetOfCalleeSavedRegisters());
+
+ // pop edi ; restore callee-saved registers
+ // pop esi
+ // pop ebx
+ // pop ebp
+ X86EmitPopReg(kEDI);
+ X86EmitPopReg(kESI);
+ X86EmitPopReg(kEBX);
+ X86EmitPopReg(kEBP);
+
+ // jmp eax //reexecute!
+ X86EmitR2ROp(0xff, (X86Reg)4, kEAX);
+
+ // ret
+ // This will never be executed. It is just to help out stack-walking logic
+ // which disassembles the epilog to unwind the stack. A "ret" instruction
+ // indicates that no more code needs to be disassembled, if the stack-walker
+ // keeps on going past the previous "jmp eax".
+ X86EmitReturn(0);
+
+ //-----------------------------------------------------------------------
+ // The out-of-line portion of enabling preemptive GC - rarely executed
+ //-----------------------------------------------------------------------
+ EmitLabel(rgRareLabels[2]); // label for rare enable gc
+ EmitRareEnable(rgRejoinLabels[2]); // emit rare enable gc
+
+ //-----------------------------------------------------------------------
+ // The out-of-line portion of disabling preemptive GC - rarely executed
+ //-----------------------------------------------------------------------
+ EmitLabel(rgRareLabels[1]); // label for rare disable gc
+ EmitRareDisable(rgRejoinLabels[1]); // emit rare disable gc
+
+ //-----------------------------------------------------------------------
+ // The out-of-line portion of setup thread - rarely executed
+ //-----------------------------------------------------------------------
+ EmitLabel(rgRareLabels[0]); // label for rare setup thread
+ EmitRareSetup(rgRejoinLabels[0], /*fThrow*/ TRUE); // emit rare setup thread
+}
+
+//---------------------------------------------------------------
+// Emit code to store the setup current Thread structure in eax.
+// TRASHES eax,ecx&edx.
+// RESULTS ebx = current Thread
+//---------------------------------------------------------------
+VOID StubLinkerCPU::EmitSetup(CodeLabel *pForwardRef)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef FEATURE_IMPLICIT_TLS
+ DWORD idx = 0;
+ TLSACCESSMODE mode = TLSACCESS_GENERIC;
+#else
+ DWORD idx = GetThreadTLSIndex();
+ TLSACCESSMODE mode = GetTLSAccessMode(idx);
+#endif
+
+#ifdef _DEBUG
+ {
+ static BOOL f = TRUE;
+ f = !f;
+ if (f)
+ {
+ mode = TLSACCESS_GENERIC;
+ }
+ }
+#endif
+
+ switch (mode)
+ {
+ case TLSACCESS_WNT:
+ {
+ unsigned __int32 tlsofs = offsetof(TEB, TlsSlots) + (idx * sizeof(void*));
+
+ static const BYTE code[] = {0x64,0x8b,0x1d}; // mov ebx, dword ptr fs:[IMM32]
+ EmitBytes(code, sizeof(code));
+ Emit32(tlsofs);
+ }
+ break;
+
+ case TLSACCESS_GENERIC:
+#ifdef FEATURE_IMPLICIT_TLS
+ X86EmitCall(NewExternalCodeLabel((LPVOID) GetThread), sizeof(void*));
+#else
+ X86EmitPushImm32(idx);
+
+ // call TLSGetValue
+ X86EmitCall(NewExternalCodeLabel((LPVOID) TlsGetValue), sizeof(void*));
+#endif
+ // mov ebx,eax
+ Emit16(0xc389);
+ break;
+ default:
+ _ASSERTE(0);
+ }
+
+ // cmp ebx, 0
+ static const BYTE b[] = { 0x83, 0xFB, 0x0};
+
+ EmitBytes(b, sizeof(b));
+
+ // jz RarePath
+ X86EmitCondJump(pForwardRef, X86CondCode::kJZ);
+
+#ifdef _DEBUG
+ X86EmitDebugTrashReg(kECX);
+ X86EmitDebugTrashReg(kEDX);
+#endif
+
+}
+
+VOID StubLinkerCPU::EmitRareSetup(CodeLabel *pRejoinPoint, BOOL fThrow)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifndef FEATURE_COMINTEROP
+ _ASSERTE(fThrow);
+#else // !FEATURE_COMINTEROP
+ if (!fThrow)
+ {
+ X86EmitPushReg(kESI);
+ X86EmitCall(NewExternalCodeLabel((LPVOID) CreateThreadBlockReturnHr), sizeof(void*));
+ }
+ else
+#endif // !FEATURE_COMINTEROP
+ {
+ X86EmitCall(NewExternalCodeLabel((LPVOID) CreateThreadBlockThrow), 0);
+ }
+
+ // mov ebx,eax
+ Emit16(0xc389);
+ X86EmitNearJump(pRejoinPoint);
+}
+
+//========================================================================
+#endif // _TARGET_X86_
+//========================================================================
+#if defined(FEATURE_COMINTEROP) && defined(_TARGET_X86_)
+//========================================================================
+// Epilog for stubs that enter managed code from COM
+//
+// On entry, ESI points to the Frame
+// ESP points to below FramedMethodFrame::m_vc5Frame
+// EBX hold GetThread()
+// EDI holds the previous Frame
+
+void StubLinkerCPU::EmitSharedComMethodStubEpilog(TADDR pFrameVptr,
+ CodeLabel** rgRareLabels,
+ CodeLabel** rgRejoinLabels,
+ unsigned offsetRetThunk,
+ BOOL bShouldProfile)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(rgRareLabels != NULL);
+ PRECONDITION(rgRareLabels[0] != NULL && rgRareLabels[1] != NULL && rgRareLabels[2] != NULL);
+ PRECONDITION(rgRejoinLabels != NULL);
+ PRECONDITION(rgRejoinLabels[0] != NULL && rgRejoinLabels[1] != NULL && rgRejoinLabels[2] != NULL);
+ }
+ CONTRACTL_END;
+
+ CodeLabel *NoEntryLabel;
+ NoEntryLabel = NewCodeLabel();
+
+ EmitCheckGSCookie(kESI, UnmanagedToManagedFrame::GetOffsetOfGSCookie());
+
+ // mov [ebx + Thread.GetFrame()], edi ;; restore previous frame
+ X86EmitIndexRegStore(kEBX, Thread::GetOffsetOfCurrentFrame(), kEDI);
+
+ //-----------------------------------------------------------------------
+ // Generate the inline part of enabling preemptive GC
+ //-----------------------------------------------------------------------
+ EmitLabel(NoEntryLabel); // need to enable preemp mode even when we fail the disable as rare disable will return in coop mode
+
+ EmitEnable(rgRareLabels[2]); // rare enable gc
+ EmitLabel(rgRejoinLabels[2]); // rejoin for rare enable gc
+
+#ifdef PROFILING_SUPPORTED
+ // If profiling is active, emit code to notify profiler of transition
+ if (CORProfilerTrackTransitions() && bShouldProfile)
+ {
+ // Save return value
+ X86EmitPushReg(kEAX);
+ X86EmitPushReg(kEDX);
+
+ EmitProfilerComCallEpilog(pFrameVptr, kESI);
+
+ // Restore return value
+ X86EmitPopReg(kEDX);
+ X86EmitPopReg(kEAX);
+ }
+#endif // PROFILING_SUPPORTED
+
+ X86EmitAddEsp(sizeof(GSCookie) + UnmanagedToManagedFrame::GetOffsetOfDatum());
+
+ // pop ecx
+ X86EmitPopReg(kECX); // pop the MethodDesc*
+
+ // pop edi ; restore callee-saved registers
+ // pop esi
+ // pop ebx
+ // pop ebp
+ X86EmitPopReg(kEDI);
+ X86EmitPopReg(kESI);
+ X86EmitPopReg(kEBX);
+ X86EmitPopReg(kEBP);
+
+ // add ecx, offsetRetThunk
+ X86EmitAddReg(kECX, offsetRetThunk);
+
+ // jmp ecx
+ // This will jump to the "ret cbStackArgs" instruction in COMMETHOD_PREPAD.
+ static const BYTE bjmpecx[] = { 0xff, 0xe1 };
+ EmitBytes(bjmpecx, sizeof(bjmpecx));
+
+ // ret
+ // This will never be executed. It is just to help out stack-walking logic
+ // which disassembles the epilog to unwind the stack. A "ret" instruction
+ // indicates that no more code needs to be disassembled, if the stack-walker
+ // keeps on going past the previous "jmp ecx".
+ X86EmitReturn(0);
+
+ //-----------------------------------------------------------------------
+ // The out-of-line portion of enabling preemptive GC - rarely executed
+ //-----------------------------------------------------------------------
+ EmitLabel(rgRareLabels[2]); // label for rare enable gc
+ EmitRareEnable(rgRejoinLabels[2]); // emit rare enable gc
+
+ //-----------------------------------------------------------------------
+ // The out-of-line portion of disabling preemptive GC - rarely executed
+ //-----------------------------------------------------------------------
+ EmitLabel(rgRareLabels[1]); // label for rare disable gc
+ EmitRareDisableHRESULT(rgRejoinLabels[1], NoEntryLabel);
+
+ //-----------------------------------------------------------------------
+ // The out-of-line portion of setup thread - rarely executed
+ //-----------------------------------------------------------------------
+ EmitLabel(rgRareLabels[0]); // label for rare setup thread
+ EmitRareSetup(rgRejoinLabels[0],/*fThrow*/ FALSE); // emit rare setup thread
+}
+
+//========================================================================
+#endif // defined(FEATURE_COMINTEROP) && defined(_TARGET_X86_)
+
+#ifndef FEATURE_STUBS_AS_IL
+/*==============================================================================
+ Pushes a TransitionFrame on the stack
+ If you make any changes to the prolog instruction sequence, be sure
+ to update UpdateRegdisplay, too!! This service should only be called from
+ within the runtime. It should not be called for any unmanaged -> managed calls in.
+
+ At the end of the generated prolog stub code:
+ pFrame is in ESI/RSI.
+ the previous pFrame is in EDI/RDI
+ The current Thread* is in EBX/RBX.
+ For x86, ESP points to TransitionFrame
+ For amd64, ESP points to the space reserved for the outgoing argument registers
+*/
+
+VOID StubLinkerCPU::EmitMethodStubProlog(TADDR pFrameVptr, int transitionBlockOffset)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef _TARGET_AMD64_
+ X86EmitPushReg(kR15); // CalleeSavedRegisters
+ X86EmitPushReg(kR14);
+ X86EmitPushReg(kR13);
+ X86EmitPushReg(kR12);
+ X86EmitPushReg(kRBP);
+ X86EmitPushReg(kRBX);
+ X86EmitPushReg(kRSI);
+ X86EmitPushReg(kRDI);
+
+ // Push m_datum
+ X86EmitPushReg(SCRATCH_REGISTER_X86REG);
+
+ // push edx ;leave room for m_next (edx is an arbitrary choice)
+ X86EmitPushReg(kEDX);
+
+ // push Frame vptr
+ X86EmitPushImmPtr((LPVOID) pFrameVptr);
+
+ // mov rsi, rsp
+ X86EmitR2ROp(0x8b, kRSI, (X86Reg)4 /*kESP*/);
+ UnwindSetFramePointer(kRSI);
+
+ // Save ArgumentRegisters
+ #define ARGUMENT_REGISTER(regname) X86EmitRegSave(k##regname, SecureDelegateFrame::GetOffsetOfTransitionBlock() + \
+ sizeof(TransitionBlock) + offsetof(ArgumentRegisters, regname));
+ ENUM_ARGUMENT_REGISTERS();
+ #undef ARGUMENT_REGISTER
+
+ _ASSERTE(((Frame*)&pFrameVptr)->GetGSCookiePtr() == PTR_GSCookie(PBYTE(&pFrameVptr) - sizeof(GSCookie)));
+ X86EmitPushImmPtr((LPVOID)GetProcessGSCookie());
+
+ // sub rsp, 4*sizeof(void*) ;; allocate callee scratch area and ensure rsp is 16-byte-aligned
+ const INT32 padding = sizeof(ArgumentRegisters) + ((sizeof(FramedMethodFrame) % (2 * sizeof(LPVOID))) ? 0 : sizeof(LPVOID));
+ X86EmitSubEsp(padding);
+#endif // _TARGET_AMD64_
+
+#ifdef _TARGET_X86_
+ // push ebp ;; save callee-saved register
+ // mov ebp,esp
+ // push ebx ;; save callee-saved register
+ // push esi ;; save callee-saved register
+ // push edi ;; save callee-saved register
+ X86EmitPushEBPframe();
+
+ X86EmitPushReg(kEBX);
+ X86EmitPushReg(kESI);
+ X86EmitPushReg(kEDI);
+
+ // Push & initialize ArgumentRegisters
+ #define ARGUMENT_REGISTER(regname) X86EmitPushReg(k##regname);
+ ENUM_ARGUMENT_REGISTERS();
+ #undef ARGUMENT_REGISTER
+
+ // Push m_datum
+ X86EmitPushReg(kEAX);
+
+ // push edx ;leave room for m_next (edx is an arbitrary choice)
+ X86EmitPushReg(kEDX);
+
+ // push Frame vptr
+ X86EmitPushImmPtr((LPVOID) pFrameVptr);
+
+ // mov esi,esp
+ X86EmitMovRegSP(kESI);
+
+ X86EmitPushImmPtr((LPVOID)GetProcessGSCookie());
+#endif // _TARGET_X86_
+
+ // ebx <-- GetThread()
+ // Trashes X86TLSFetch_TRASHABLE_REGS
+ X86EmitCurrentThreadFetch(kEBX, 0);
+
+#if _DEBUG
+
+ // call ObjectRefFlush
+#ifdef _TARGET_AMD64_
+
+ // mov rcx, rbx
+ X86EmitR2ROp(0x8b, kECX, kEBX); // arg in reg
+
+#else // !_TARGET_AMD64_
+ X86EmitPushReg(kEBX); // arg on stack
+#endif // _TARGET_AMD64_
+
+ // Make the call
+ X86EmitCall(NewExternalCodeLabel((LPVOID) Thread::ObjectRefFlush), sizeof(void*));
+
+#endif // _DEBUG
+
+ // mov edi,[ebx + Thread.GetFrame()] ;; get previous frame
+ X86EmitIndexRegLoad(kEDI, kEBX, Thread::GetOffsetOfCurrentFrame());
+
+ // mov [esi + Frame.m_next], edi
+ X86EmitIndexRegStore(kESI, Frame::GetOffsetOfNextLink(), kEDI);
+
+ // mov [ebx + Thread.GetFrame()], esi
+ X86EmitIndexRegStore(kEBX, Thread::GetOffsetOfCurrentFrame(), kESI);
+
+#if _DEBUG
+
+ if (Frame::ShouldLogTransitions())
+ {
+ // call LogTransition
+#ifdef _TARGET_AMD64_
+
+ // mov rcx, rsi
+ X86EmitR2ROp(0x8b, kECX, kESI); // arg in reg
+
+#else // !_TARGET_AMD64_
+ X86EmitPushReg(kESI); // arg on stack
+#endif // _TARGET_AMD64_
+
+ X86EmitCall(NewExternalCodeLabel((LPVOID) Frame::LogTransition), sizeof(void*));
+
+#ifdef _TARGET_AMD64_
+ // Reload parameter registers
+ // mov r, [esp+offs]
+ #define ARGUMENT_REGISTER(regname) X86EmitEspOffset(0x8b, k##regname, sizeof(ArgumentRegisters) + \
+ sizeof(TransitionFrame) + offsetof(ArgumentRegisters, regname));
+ ENUM_ARGUMENT_REGISTERS();
+ #undef ARGUMENT_REGISTER
+
+#endif // _TARGET_AMD64_
+ }
+
+#endif // _DEBUG
+
+
+#ifdef _TARGET_AMD64_
+ // OK for the debugger to examine the new frame now
+ // (Note that if it's not OK yet for some stub, another patch label
+ // can be emitted later which will override this one.)
+ EmitPatchLabel();
+#else
+ // For x86, the patch label can be specified only after the GSCookie is pushed
+ // Otherwise the debugger will see a Frame without a valid GSCookie
+#endif
+}
+
+/*==============================================================================
+ EmitMethodStubEpilog generates the part of the stub that will pop off the
+ Frame
+
+ restoreArgRegs - indicates whether the argument registers need to be
+ restored from m_argumentRegisters
+
+ At this point of the stub:
+ pFrame is in ESI/RSI.
+ the previous pFrame is in EDI/RDI
+ The current Thread* is in EBX/RBX.
+ For x86, ESP points to the FramedMethodFrame::NegInfo
+*/
+
+VOID StubLinkerCPU::EmitMethodStubEpilog(WORD numArgBytes, int transitionBlockOffset)
+{
+ STANDARD_VM_CONTRACT;
+
+ // mov [ebx + Thread.GetFrame()], edi ;; restore previous frame
+ X86EmitIndexRegStore(kEBX, Thread::GetOffsetOfCurrentFrame(), kEDI);
+
+#ifdef _TARGET_X86_
+ // deallocate Frame
+ X86EmitAddEsp(sizeof(GSCookie) + transitionBlockOffset + TransitionBlock::GetOffsetOfCalleeSavedRegisters());
+
+#elif defined(_TARGET_AMD64_)
+ // lea rsp, [rsi + <offset of preserved registers>]
+ X86EmitOffsetModRM(0x8d, (X86Reg)4 /*kRSP*/, kRSI, transitionBlockOffset + TransitionBlock::GetOffsetOfCalleeSavedRegisters());
+#endif // _TARGET_AMD64_
+
+ // pop edi ; restore callee-saved registers
+ // pop esi
+ // pop ebx
+ // pop ebp
+ X86EmitPopReg(kEDI);
+ X86EmitPopReg(kESI);
+ X86EmitPopReg(kEBX);
+ X86EmitPopReg(kEBP);
+
+#ifdef _TARGET_AMD64_
+ X86EmitPopReg(kR12);
+ X86EmitPopReg(kR13);
+ X86EmitPopReg(kR14);
+ X86EmitPopReg(kR15);
+#endif
+
+#ifdef _TARGET_AMD64_
+ // Caller deallocates argument space. (Bypasses ASSERT in
+ // X86EmitReturn.)
+ numArgBytes = 0;
+#endif
+
+ X86EmitReturn(numArgBytes);
+}
+
+
+// On entry, ESI should be pointing to the Frame
+
+VOID StubLinkerCPU::EmitCheckGSCookie(X86Reg frameReg, int gsCookieOffset)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef _DEBUG
+ // cmp dword ptr[frameReg-gsCookieOffset], gsCookie
+#ifdef _TARGET_X86_
+ X86EmitCmpRegIndexImm32(frameReg, gsCookieOffset, GetProcessGSCookie());
+#else
+ X64EmitCmp32RegIndexImm32(frameReg, gsCookieOffset, (INT32)GetProcessGSCookie());
+#endif
+
+ CodeLabel * pLabel = NewCodeLabel();
+ X86EmitCondJump(pLabel, X86CondCode::kJE);
+
+ X86EmitCall(NewExternalCodeLabel((LPVOID) JIT_FailFast), 0);
+
+ EmitLabel(pLabel);
+#endif
+}
+#endif // !FEATURE_STUBS_AS_IL
+
+
+// This method unboxes the THIS pointer and then calls pRealMD
+// If it's shared code for a method in a generic value class, then also extract the vtable pointer
+// and pass it as an extra argument. Thus this stub generator really covers both
+// - Unboxing, non-instantiating stubs
+// - Unboxing, method-table-instantiating stubs
+VOID StubLinkerCPU::EmitUnboxMethodStub(MethodDesc* pUnboxMD)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(!pUnboxMD->IsStatic());
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_STUBS_AS_IL
+ _ASSERTE(!pUnboxMD->RequiresInstMethodTableArg());
+#else
+ if (pUnboxMD->RequiresInstMethodTableArg())
+ {
+ EmitInstantiatingMethodStub(pUnboxMD, NULL);
+ return;
+ }
+#endif
+
+ //
+ // unboxing a value class simply means adding sizeof(void*) to the THIS pointer
+ //
+#ifdef _TARGET_AMD64_
+ X86EmitAddReg(THIS_kREG, sizeof(void*));
+
+ // Use direct call if possible
+ if (pUnboxMD->HasStableEntryPoint())
+ {
+ X86EmitRegLoad(kRAX, pUnboxMD->GetStableEntryPoint());// MOV RAX, DWORD
+ }
+ else
+ {
+ X86EmitRegLoad(kRAX, (UINT_PTR)pUnboxMD->GetAddrOfSlot()); // MOV RAX, DWORD
+
+ X86EmitIndexRegLoad(kRAX, kRAX); // MOV RAX, [RAX]
+ }
+
+ Emit16(X86_INSTR_JMP_EAX); // JMP EAX
+#else // _TARGET_AMD64_
+ X86EmitAddReg(THIS_kREG, sizeof(void*));
+
+ // Use direct call if possible
+ if (pUnboxMD->HasStableEntryPoint())
+ {
+ X86EmitNearJump(NewExternalCodeLabel((LPVOID) pUnboxMD->GetStableEntryPoint()));
+ }
+ else
+ {
+ // jmp [slot]
+ Emit16(0x25ff);
+ Emit32((DWORD)(size_t)pUnboxMD->GetAddrOfSlot());
+ }
+#endif //_TARGET_AMD64_
+}
+
+
+#if defined(FEATURE_SHARE_GENERIC_CODE) && !defined(FEATURE_STUBS_AS_IL)
+// The stub generated by this method passes an extra dictionary argument before jumping to
+// shared-instantiation generic code.
+//
+// pMD is either
+// * An InstantiatedMethodDesc for a generic method whose code is shared across instantiations.
+// In this case, the extra argument is the InstantiatedMethodDesc for the instantiation-specific stub itself.
+// or * A MethodDesc for a static method in a generic class whose code is shared across instantiations.
+// In this case, the extra argument is the MethodTable pointer of the instantiated type.
+// or * A MethodDesc for unboxing stub. In this case, the extra argument is null.
+VOID StubLinkerCPU::EmitInstantiatingMethodStub(MethodDesc* pMD, void* extra)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(pMD->RequiresInstArg());
+ }
+ CONTRACTL_END;
+
+ MetaSig msig(pMD);
+ ArgIterator argit(&msig);
+
+#ifdef _TARGET_AMD64_
+ int paramTypeArgOffset = argit.GetParamTypeArgOffset();
+ int paramTypeArgIndex = TransitionBlock::GetArgumentIndexFromOffset(paramTypeArgOffset);
+
+ CorElementType argTypes[5];
+
+ int firstRealArg = paramTypeArgIndex + 1;
+ int argNum = firstRealArg;
+
+ //
+ // Compute types of the 4 register args and first stack arg
+ //
+
+ CorElementType sigType;
+ while ((sigType = msig.NextArgNormalized()) != ELEMENT_TYPE_END)
+ {
+ argTypes[argNum++] = sigType;
+ if (argNum > 4)
+ break;
+ }
+ msig.Reset();
+
+ BOOL fUseInstantiatingMethodStubWorker = FALSE;
+
+ if (argNum > 4)
+ {
+ //
+ // We will need to go through assembly helper.
+ //
+ fUseInstantiatingMethodStubWorker = TRUE;
+
+ // Allocate space for frame before pushing the arguments for the assembly helper
+ X86EmitSubEsp((INT32)(AlignUp(sizeof(void *) /* extra stack param */ + sizeof(GSCookie) + sizeof(StubHelperFrame), 16) - sizeof(void *) /* return address */));
+
+ //
+ // Store extra arg stack arg param for the helper.
+ //
+ CorElementType argType = argTypes[--argNum];
+ switch (argType)
+ {
+ case ELEMENT_TYPE_R4:
+ // movss dword ptr [rsp], xmm?
+ X64EmitMovSSToMem(kXMM3, (X86Reg)4 /*kRSP*/);
+ break;
+ case ELEMENT_TYPE_R8:
+ // movsd qword ptr [rsp], xmm?
+ X64EmitMovSDToMem(kXMM3, (X86Reg)4 /*kRSP*/);
+ break;
+ default:
+ X86EmitIndexRegStoreRSP(0, kR9);
+ break;
+ }
+ }
+
+ //
+ // Shuffle the register arguments
+ //
+ while (argNum > firstRealArg)
+ {
+ CorElementType argType = argTypes[--argNum];
+
+ switch (argType)
+ {
+ case ELEMENT_TYPE_R4:
+ case ELEMENT_TYPE_R8:
+ // mov xmm#, xmm#-1
+ X64EmitMovXmmXmm((X86Reg)argNum, (X86Reg)(argNum - 1));
+ break;
+ default:
+ //mov reg#, reg#-1
+ X86EmitMovRegReg(c_argRegs[argNum], c_argRegs[argNum-1]);
+ break;
+ }
+ }
+
+ //
+ // Setup the hidden instantiation argument
+ //
+ if (extra != NULL)
+ {
+ X86EmitRegLoad(c_argRegs[paramTypeArgIndex], (UINT_PTR)extra);
+ }
+ else
+ {
+ X86EmitIndexRegLoad(c_argRegs[paramTypeArgIndex], THIS_kREG);
+
+ X86EmitAddReg(THIS_kREG, sizeof(void*));
+ }
+
+ // Use direct call if possible
+ if (pMD->HasStableEntryPoint())
+ {
+ X86EmitRegLoad(kRAX, pMD->GetStableEntryPoint());// MOV RAX, DWORD
+ }
+ else
+ {
+ X86EmitRegLoad(kRAX, (UINT_PTR)pMD->GetAddrOfSlot()); // MOV RAX, DWORD
+
+ X86EmitIndexRegLoad(kRAX, kRAX); // MOV RAX, [RAX]
+ }
+
+ if (fUseInstantiatingMethodStubWorker)
+ {
+ X86EmitPushReg(kRAX);
+
+ UINT cbStack = argit.SizeOfArgStack();
+ _ASSERTE(cbStack > 0);
+
+ X86EmitPushImm32((AlignUp(cbStack, 16) / sizeof(void*)) - 1); // -1 for extra stack arg
+
+ X86EmitRegLoad(kRAX, GetEEFuncEntryPoint(InstantiatingMethodStubWorker));// MOV RAX, DWORD
+ }
+ else
+ {
+ _ASSERTE(argit.SizeOfArgStack() == 0);
+ }
+
+ Emit16(X86_INSTR_JMP_EAX);
+
+#else
+ int paramTypeArgOffset = argit.GetParamTypeArgOffset();
+
+ // It's on the stack
+ if (TransitionBlock::IsStackArgumentOffset(paramTypeArgOffset))
+ {
+ // Pop return address into AX
+ X86EmitPopReg(kEAX);
+
+ if (extra != NULL)
+ {
+ // Push extra dictionary argument
+ X86EmitPushImmPtr(extra);
+ }
+ else
+ {
+ // Push the vtable pointer from "this"
+ X86EmitIndexPush(THIS_kREG, 0);
+ }
+
+ // Put return address back
+ X86EmitPushReg(kEAX);
+ }
+ // It's in a register
+ else
+ {
+ X86Reg paramReg = GetX86ArgumentRegisterFromOffset(paramTypeArgOffset - TransitionBlock::GetOffsetOfArgumentRegisters());
+
+ if (extra != NULL)
+ {
+ X86EmitRegLoad(paramReg, (UINT_PTR)extra);
+ }
+ else
+ {
+ // Just extract the vtable pointer from "this"
+ X86EmitIndexRegLoad(paramReg, THIS_kREG);
+ }
+ }
+
+ if (extra == NULL)
+ {
+ // Unboxing stub case.
+ X86EmitAddReg(THIS_kREG, sizeof(void*));
+ }
+
+ // Use direct call if possible
+ if (pMD->HasStableEntryPoint())
+ {
+ X86EmitNearJump(NewExternalCodeLabel((LPVOID) pMD->GetStableEntryPoint()));
+ }
+ else
+ {
+ // jmp [slot]
+ Emit16(0x25ff);
+ Emit32((DWORD)(size_t)pMD->GetAddrOfSlot());
+ }
+#endif //
+}
+#endif // FEATURE_SHARE_GENERIC_CODE && FEATURE_STUBS_AS_IL
+
+
+#if defined(_DEBUG) && defined(STUBLINKER_GENERATES_UNWIND_INFO)
+
+typedef BOOL GetModuleInformationProc(
+ HANDLE hProcess,
+ HMODULE hModule,
+ LPMODULEINFO lpmodinfo,
+ DWORD cb
+);
+
+GetModuleInformationProc *g_pfnGetModuleInformation = NULL;
+
+extern "C" VOID __cdecl DebugCheckStubUnwindInfoWorker (CONTEXT *pStubContext)
+{
+ BEGIN_ENTRYPOINT_VOIDRET;
+
+ LOG((LF_STUBS, LL_INFO1000000, "checking stub unwind info:\n"));
+
+ //
+ // Make a copy of the CONTEXT. RtlVirtualUnwind will modify this copy.
+ // DebugCheckStubUnwindInfo will need to restore registers from the
+ // original CONTEXT.
+ //
+ CONTEXT ctx = *pStubContext;
+ ctx.ContextFlags = (CONTEXT_CONTROL | CONTEXT_INTEGER);
+
+ //
+ // Find the upper bound of the stack and address range of KERNEL32. This
+ // is where we expect the unwind to stop.
+ //
+ void *pvStackTop = GetThread()->GetCachedStackBase();
+
+ if (!g_pfnGetModuleInformation)
+ {
+ HMODULE hmodPSAPI = WszGetModuleHandle(W("PSAPI.DLL"));
+
+ if (!hmodPSAPI)
+ {
+ hmodPSAPI = WszLoadLibrary(W("PSAPI.DLL"));
+ if (!hmodPSAPI)
+ {
+ _ASSERTE(!"unable to load PSAPI.DLL");
+ goto ErrExit;
+ }
+ }
+
+ g_pfnGetModuleInformation = (GetModuleInformationProc*)GetProcAddress(hmodPSAPI, "GetModuleInformation");
+ if (!g_pfnGetModuleInformation)
+ {
+ _ASSERTE(!"can't find PSAPI!GetModuleInformation");
+ goto ErrExit;
+ }
+
+ // Intentionally leak hmodPSAPI. We don't want to
+ // LoadLibrary/FreeLibrary every time, this is slow + produces lots of
+ // debugger spew. This is just debugging code after all...
+ }
+
+ HMODULE hmodKERNEL32 = WszGetModuleHandle(W("KERNEL32"));
+ _ASSERTE(hmodKERNEL32);
+
+ MODULEINFO modinfoKERNEL32;
+ if (!g_pfnGetModuleInformation(GetCurrentProcess(), hmodKERNEL32, &modinfoKERNEL32, sizeof(modinfoKERNEL32)))
+ {
+ _ASSERTE(!"unable to get bounds of KERNEL32");
+ goto ErrExit;
+ }
+
+ //
+ // Unwind until IP is 0, sp is at the stack top, and callee IP is in kernel32.
+ //
+
+ for (;;)
+ {
+ ULONG64 ControlPc = (ULONG64)GetIP(&ctx);
+
+ LOG((LF_STUBS, LL_INFO1000000, "pc %p, sp %p\n", ControlPc, GetSP(&ctx)));
+
+ ULONG64 ImageBase;
+ RUNTIME_FUNCTION *pFunctionEntry = RtlLookupFunctionEntry(
+ ControlPc,
+ &ImageBase,
+ NULL);
+ if (pFunctionEntry)
+ {
+ PVOID HandlerData;
+ ULONG64 EstablisherFrame;
+
+ RtlVirtualUnwind(
+ 0,
+ ImageBase,
+ ControlPc,
+ pFunctionEntry,
+ &ctx,
+ &HandlerData,
+ &EstablisherFrame,
+ NULL);
+
+ ULONG64 NewControlPc = (ULONG64)GetIP(&ctx);
+
+ LOG((LF_STUBS, LL_INFO1000000, "function %p, image %p, new pc %p, new sp %p\n", pFunctionEntry, ImageBase, NewControlPc, GetSP(&ctx)));
+
+ if (!NewControlPc)
+ {
+ if (dac_cast<PTR_BYTE>(GetSP(&ctx)) < (BYTE*)pvStackTop - 0x100)
+ {
+ _ASSERTE(!"SP did not end up at top of stack");
+ goto ErrExit;
+ }
+
+ if (!( ControlPc > (ULONG64)modinfoKERNEL32.lpBaseOfDll
+ && ControlPc < (ULONG64)modinfoKERNEL32.lpBaseOfDll + modinfoKERNEL32.SizeOfImage))
+ {
+ _ASSERTE(!"PC did not end up in KERNEL32");
+ goto ErrExit;
+ }
+
+ break;
+ }
+ }
+ else
+ {
+ // Nested functions that do not use any stack space or nonvolatile
+ // registers are not required to have unwind info (ex.
+ // USER32!ZwUserCreateWindowEx).
+ ctx.Rip = *(ULONG64*)(ctx.Rsp);
+ ctx.Rsp += sizeof(ULONG64);
+ }
+ }
+ErrExit:
+
+ END_ENTRYPOINT_VOIDRET;
+ return;
+}
+
+//virtual
+VOID StubLinkerCPU::EmitUnwindInfoCheckWorker (CodeLabel *pCheckLabel)
+{
+ STANDARD_VM_CONTRACT;
+ X86EmitCall(pCheckLabel, 0);
+}
+
+//virtual
+VOID StubLinkerCPU::EmitUnwindInfoCheckSubfunction()
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef _TARGET_AMD64_
+ // X86EmitCall will generate "mov rax, target/jmp rax", so we have to save
+ // rax on the stack. DO NOT use X86EmitPushReg. That will induce infinite
+ // recursion, since the push may require more unwind info. This "push rax"
+ // will be accounted for by DebugCheckStubUnwindInfo's unwind info
+ // (considered part of its locals), so there doesn't have to be unwind
+ // info for it.
+ Emit8(0x50);
+#endif
+
+ X86EmitNearJump(NewExternalCodeLabel(DebugCheckStubUnwindInfo));
+}
+
+#endif // defined(_DEBUG) && defined(STUBLINKER_GENERATES_UNWIND_INFO)
+
+
+#ifdef _TARGET_X86_
+
+//-----------------------------------------------------------------------
+// Generates the inline portion of the code to enable preemptive GC. Hopefully,
+// the inline code is all that will execute most of the time. If this code
+// path is entered at certain times, however, it will need to jump out to
+// a separate out-of-line path which is more expensive. The "pForwardRef"
+// label indicates the start of the out-of-line path.
+//
+// Assumptions:
+// ebx = Thread
+// Preserves
+// all registers except ecx.
+//
+//-----------------------------------------------------------------------
+VOID StubLinkerCPU::EmitEnable(CodeLabel *pForwardRef)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(4 == sizeof( ((Thread*)0)->m_State ));
+ PRECONDITION(4 == sizeof( ((Thread*)0)->m_fPreemptiveGCDisabled ));
+ }
+ CONTRACTL_END;
+
+ // move byte ptr [ebx + Thread.m_fPreemptiveGCDisabled],0
+ X86EmitOffsetModRM(0xc6, (X86Reg)0, kEBX, Thread::GetOffsetOfGCFlag());
+ Emit8(0);
+
+ _ASSERTE(FitsInI1(Thread::TS_CatchAtSafePoint));
+
+ // test byte ptr [ebx + Thread.m_State], TS_CatchAtSafePoint
+ X86EmitOffsetModRM(0xf6, (X86Reg)0, kEBX, Thread::GetOffsetOfState());
+ Emit8(Thread::TS_CatchAtSafePoint);
+
+ // jnz RarePath
+ X86EmitCondJump(pForwardRef, X86CondCode::kJNZ);
+
+#ifdef _DEBUG
+ X86EmitDebugTrashReg(kECX);
+#endif
+
+}
+
+
+//-----------------------------------------------------------------------
+// Generates the out-of-line portion of the code to enable preemptive GC.
+// After the work is done, the code jumps back to the "pRejoinPoint"
+// which should be emitted right after the inline part is generated.
+//
+// Assumptions:
+// ebx = Thread
+// Preserves
+// all registers except ecx.
+//
+//-----------------------------------------------------------------------
+VOID StubLinkerCPU::EmitRareEnable(CodeLabel *pRejoinPoint)
+{
+ STANDARD_VM_CONTRACT;
+
+ X86EmitCall(NewExternalCodeLabel((LPVOID) StubRareEnable), 0);
+#ifdef _DEBUG
+ X86EmitDebugTrashReg(kECX);
+#endif
+ if (pRejoinPoint)
+ {
+ X86EmitNearJump(pRejoinPoint);
+ }
+
+}
+
+
+//-----------------------------------------------------------------------
+// Generates the inline portion of the code to disable preemptive GC. Hopefully,
+// the inline code is all that will execute most of the time. If this code
+// path is entered at certain times, however, it will need to jump out to
+// a separate out-of-line path which is more expensive. The "pForwardRef"
+// label indicates the start of the out-of-line path.
+//
+// Assumptions:
+// ebx = Thread
+// Preserves
+// all registers except ecx.
+//
+//-----------------------------------------------------------------------
+VOID StubLinkerCPU::EmitDisable(CodeLabel *pForwardRef, BOOL fCallIn, X86Reg ThreadReg)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(4 == sizeof( ((Thread*)0)->m_fPreemptiveGCDisabled ));
+ PRECONDITION(4 == sizeof(g_TrapReturningThreads));
+ }
+ CONTRACTL_END;
+
+#if defined(FEATURE_COMINTEROP) && defined(MDA_SUPPORTED)
+ // If we are checking whether the current thread is already holds the loader lock, vector
+ // such cases to the rare disable pathway, where we can check again.
+ if (fCallIn && (NULL != MDA_GET_ASSISTANT(Reentrancy)))
+ {
+ CodeLabel *pNotReentrantLabel = NewCodeLabel();
+
+ // test byte ptr [ebx + Thread.m_fPreemptiveGCDisabled],1
+ X86EmitOffsetModRM(0xf6, (X86Reg)0, ThreadReg, Thread::GetOffsetOfGCFlag());
+ Emit8(1);
+
+ // jz NotReentrant
+ X86EmitCondJump(pNotReentrantLabel, X86CondCode::kJZ);
+
+ X86EmitPushReg(kEAX);
+ X86EmitPushReg(kEDX);
+ X86EmitPushReg(kECX);
+
+ X86EmitCall(NewExternalCodeLabel((LPVOID) HasIllegalReentrancy), 0);
+
+ // If the probe fires, we go ahead and allow the call anyway. At this point, there could be
+ // GC heap corruptions. So the probe detects the illegal case, but doesn't prevent it.
+
+ X86EmitPopReg(kECX);
+ X86EmitPopReg(kEDX);
+ X86EmitPopReg(kEAX);
+
+ EmitLabel(pNotReentrantLabel);
+ }
+#endif
+
+ // move byte ptr [ebx + Thread.m_fPreemptiveGCDisabled],1
+ X86EmitOffsetModRM(0xc6, (X86Reg)0, ThreadReg, Thread::GetOffsetOfGCFlag());
+ Emit8(1);
+
+ // cmp dword ptr g_TrapReturningThreads, 0
+ Emit16(0x3d83);
+ EmitPtr((void *)&g_TrapReturningThreads);
+ Emit8(0);
+
+ // jnz RarePath
+ X86EmitCondJump(pForwardRef, X86CondCode::kJNZ);
+
+#if defined(FEATURE_COMINTEROP) && !defined(FEATURE_CORESYSTEM)
+ // If we are checking whether the current thread holds the loader lock, vector
+ // such cases to the rare disable pathway, where we can check again.
+ if (fCallIn && ShouldCheckLoaderLock())
+ {
+ X86EmitPushReg(kEAX);
+ X86EmitPushReg(kEDX);
+
+ if (ThreadReg == kECX)
+ X86EmitPushReg(kECX);
+
+ // BOOL AuxUlibIsDLLSynchronizationHeld(BOOL *IsHeld)
+ //
+ // So we need to be sure that both the return value and the passed BOOL are both TRUE.
+ // If either is FALSE, then the call failed or the lock is not held. Either way, the
+ // probe should not fire.
+
+ X86EmitPushReg(kEDX); // BOOL temp
+ Emit8(0x54); // push ESP because arg is &temp
+ X86EmitCall(NewExternalCodeLabel((LPVOID) AuxUlibIsDLLSynchronizationHeld), 0);
+
+ // callee has popped.
+ X86EmitPopReg(kEDX); // recover temp
+
+ CodeLabel *pPopLabel = NewCodeLabel();
+
+ Emit16(0xc085); // test eax, eax
+ X86EmitCondJump(pPopLabel, X86CondCode::kJZ);
+
+ Emit16(0xd285); // test edx, edx
+
+ EmitLabel(pPopLabel); // retain the conditional flags across the pops
+
+ if (ThreadReg == kECX)
+ X86EmitPopReg(kECX);
+
+ X86EmitPopReg(kEDX);
+ X86EmitPopReg(kEAX);
+
+ X86EmitCondJump(pForwardRef, X86CondCode::kJNZ);
+ }
+#endif
+
+#ifdef _DEBUG
+ if (ThreadReg != kECX)
+ X86EmitDebugTrashReg(kECX);
+#endif
+
+}
+
+
+//-----------------------------------------------------------------------
+// Generates the out-of-line portion of the code to disable preemptive GC.
+// After the work is done, the code jumps back to the "pRejoinPoint"
+// which should be emitted right after the inline part is generated. However,
+// if we cannot execute managed code at this time, an exception is thrown
+// which cannot be caught by managed code.
+//
+// Assumptions:
+// ebx = Thread
+// Preserves
+// all registers except ecx, eax.
+//
+//-----------------------------------------------------------------------
+VOID StubLinkerCPU::EmitRareDisable(CodeLabel *pRejoinPoint)
+{
+ STANDARD_VM_CONTRACT;
+
+ X86EmitCall(NewExternalCodeLabel((LPVOID) StubRareDisableTHROW), 0);
+
+#ifdef _DEBUG
+ X86EmitDebugTrashReg(kECX);
+#endif
+ X86EmitNearJump(pRejoinPoint);
+}
+
+#ifdef FEATURE_COMINTEROP
+//-----------------------------------------------------------------------
+// Generates the out-of-line portion of the code to disable preemptive GC.
+// After the work is done, the code normally jumps back to the "pRejoinPoint"
+// which should be emitted right after the inline part is generated. However,
+// if we cannot execute managed code at this time, an HRESULT is returned
+// via the ExitPoint.
+//
+// Assumptions:
+// ebx = Thread
+// Preserves
+// all registers except ecx, eax.
+//
+//-----------------------------------------------------------------------
+VOID StubLinkerCPU::EmitRareDisableHRESULT(CodeLabel *pRejoinPoint, CodeLabel *pExitPoint)
+{
+ STANDARD_VM_CONTRACT;
+
+ X86EmitCall(NewExternalCodeLabel((LPVOID) StubRareDisableHR), 0);
+
+#ifdef _DEBUG
+ X86EmitDebugTrashReg(kECX);
+#endif
+
+ // test eax, eax ;; test the result of StubRareDisableHR
+ Emit16(0xc085);
+
+ // JZ pRejoinPoint
+ X86EmitCondJump(pRejoinPoint, X86CondCode::kJZ);
+
+ X86EmitNearJump(pExitPoint);
+}
+#endif // FEATURE_COMINTEROP
+
+#endif // _TARGET_X86_
+
+#endif // CROSSGEN_COMPILE
+
+
+VOID StubLinkerCPU::EmitShuffleThunk(ShuffleEntry *pShuffleEntryArray)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef _TARGET_AMD64_
+
+ // mov SCRATCHREG,rsp
+ X86_64BitOperands();
+ Emit8(0x8b);
+ Emit8(0304 | (SCRATCH_REGISTER_X86REG << 3));
+
+ // save the real target in r11, will jump to it later. r10 is used below.
+ // Windows: mov r11, rcx
+ // Unix: mov r11, rdi
+ X86EmitMovRegReg(kR11, THIS_kREG);
+
+#ifdef UNIX_AMD64_ABI
+ for (ShuffleEntry* pEntry = pShuffleEntryArray; pEntry->srcofs != ShuffleEntry::SENTINEL; pEntry++)
+ {
+ if (pEntry->srcofs & ShuffleEntry::REGMASK)
+ {
+ // If source is present in register then destination must also be a register
+ _ASSERTE(pEntry->dstofs & ShuffleEntry::REGMASK);
+
+ X86EmitMovRegReg(c_argRegs[pEntry->dstofs & ShuffleEntry::OFSMASK], c_argRegs[pEntry->srcofs & ShuffleEntry::OFSMASK]);
+ }
+ else if (pEntry->dstofs & ShuffleEntry::REGMASK)
+ {
+ // source must be on the stack
+ _ASSERTE(!(pEntry->srcofs & ShuffleEntry::REGMASK));
+
+ // mov dstreg, [rax + src]
+ X86EmitIndexRegLoad(c_argRegs[pEntry->dstofs & ShuffleEntry::OFSMASK], SCRATCH_REGISTER_X86REG, (pEntry->srcofs + 1) * sizeof(void*));
+ }
+ else
+ {
+ // source must be on the stack
+ _ASSERTE(!(pEntry->srcofs & ShuffleEntry::REGMASK));
+
+ // dest must be on the stack
+ _ASSERTE(!(pEntry->dstofs & ShuffleEntry::REGMASK));
+
+ // mov r10, [rax + src]
+ X86EmitIndexRegLoad (kR10, SCRATCH_REGISTER_X86REG, (pEntry->srcofs + 1) * sizeof(void*));
+
+ // mov [rax + dst], r10
+ X86EmitIndexRegStore (SCRATCH_REGISTER_X86REG, (pEntry->dstofs + 1) * sizeof(void*), kR10);
+ }
+ }
+#else // UNIX_AMD64_ABI
+ UINT step = 1;
+
+ if (pShuffleEntryArray->argtype == ELEMENT_TYPE_END)
+ {
+ // Special handling of open instance methods with return buffer. Move "this"
+ // by two slots, and leave the "retbufptr" between the two slots intact.
+
+ // mov rcx, r8
+ X86EmitMovRegReg(kRCX, kR8);
+
+ // Skip this entry
+ pShuffleEntryArray++;
+
+ // Skip this entry and leave retbufptr intact
+ step += 2;
+ }
+
+ // Now shuffle the args by one position:
+ // steps 1-3 : reg args (rcx, rdx, r8)
+ // step 4 : stack->reg arg (r9)
+ // step >4 : stack args
+
+ for(;
+ pShuffleEntryArray->srcofs != ShuffleEntry::SENTINEL;
+ step++, pShuffleEntryArray++)
+ {
+ switch (step)
+ {
+ case 1:
+ case 2:
+ case 3:
+ switch (pShuffleEntryArray->argtype)
+ {
+ case ELEMENT_TYPE_R4:
+ case ELEMENT_TYPE_R8:
+ // mov xmm-1#, xmm#
+ X64EmitMovXmmXmm((X86Reg)(step - 1), (X86Reg)(step));
+ break;
+ default:
+ // mov argRegs[step-1], argRegs[step]
+ X86EmitMovRegReg(c_argRegs[step-1], c_argRegs[step]);
+ break;
+ }
+ break;
+
+ case 4:
+ {
+ switch (pShuffleEntryArray->argtype)
+ {
+ case ELEMENT_TYPE_R4:
+ X64EmitMovSSFromMem(kXMM3, kRAX, 0x28);
+ break;
+
+ case ELEMENT_TYPE_R8:
+ X64EmitMovSDFromMem(kXMM3, kRAX, 0x28);
+ break;
+
+ default:
+ // mov r9, [rax + 28h]
+ X86EmitIndexRegLoad (kR9, SCRATCH_REGISTER_X86REG, 5*sizeof(void*));
+ }
+ break;
+ }
+ default:
+
+ // mov r10, [rax + (step+1)*sizeof(void*)]
+ X86EmitIndexRegLoad (kR10, SCRATCH_REGISTER_X86REG, (step+1)*sizeof(void*));
+
+ // mov [rax + step*sizeof(void*)], r10
+ X86EmitIndexRegStore (SCRATCH_REGISTER_X86REG, step*sizeof(void*), kR10);
+ }
+ }
+#endif // UNIX_AMD64_ABI
+
+ // mov r10, [r11 + Delegate._methodptraux]
+ X86EmitIndexRegLoad(kR10, kR11, DelegateObject::GetOffsetOfMethodPtrAux());
+ // add r11, DelegateObject::GetOffsetOfMethodPtrAux() - load the indirection cell into r11
+ X86EmitAddReg(kR11, DelegateObject::GetOffsetOfMethodPtrAux());
+ // Now jump to real target
+ // jmp r10
+ X86EmitR2ROp(0xff, (X86Reg)4, kR10);
+
+#else // _TARGET_AMD64_
+
+ UINT espadjust = 0;
+ BOOL haveMemMemMove = FALSE;
+
+ ShuffleEntry *pWalk = NULL;
+ for (pWalk = pShuffleEntryArray; pWalk->srcofs != ShuffleEntry::SENTINEL; pWalk++)
+ {
+ if (!(pWalk->dstofs & ShuffleEntry::REGMASK) &&
+ !(pWalk->srcofs & ShuffleEntry::REGMASK) &&
+ pWalk->srcofs != pWalk->dstofs)
+ {
+ haveMemMemMove = TRUE;
+ espadjust = sizeof(void*);
+ break;
+ }
+ }
+
+ if (haveMemMemMove)
+ {
+ // push ecx
+ X86EmitPushReg(THIS_kREG);
+ }
+ else
+ {
+ // mov eax, ecx
+ Emit8(0x8b);
+ Emit8(0300 | SCRATCH_REGISTER_X86REG << 3 | THIS_kREG);
+ }
+
+ UINT16 emptySpot = 0x4 | ShuffleEntry::REGMASK;
+
+ while (true)
+ {
+ for (pWalk = pShuffleEntryArray; pWalk->srcofs != ShuffleEntry::SENTINEL; pWalk++)
+ if (pWalk->dstofs == emptySpot)
+ break;
+
+ if (pWalk->srcofs == ShuffleEntry::SENTINEL)
+ break;
+
+ if ((pWalk->dstofs & ShuffleEntry::REGMASK))
+ {
+ if (pWalk->srcofs & ShuffleEntry::REGMASK)
+ {
+ // mov <dstReg>,<srcReg>
+ Emit8(0x8b);
+ Emit8(static_cast<UINT8>(0300 |
+ (GetX86ArgumentRegisterFromOffset( pWalk->dstofs & ShuffleEntry::OFSMASK ) << 3) |
+ (GetX86ArgumentRegisterFromOffset( pWalk->srcofs & ShuffleEntry::OFSMASK ))));
+ }
+ else
+ {
+ X86EmitEspOffset(0x8b, GetX86ArgumentRegisterFromOffset( pWalk->dstofs & ShuffleEntry::OFSMASK ), pWalk->srcofs+espadjust);
+ }
+ }
+ else
+ {
+ // if the destination is not a register, the source shouldn't be either.
+ _ASSERTE(!(pWalk->srcofs & ShuffleEntry::REGMASK));
+ if (pWalk->srcofs != pWalk->dstofs)
+ {
+ X86EmitEspOffset(0x8b, kEAX, pWalk->srcofs+espadjust);
+ X86EmitEspOffset(0x89, kEAX, pWalk->dstofs+espadjust);
+ }
+ }
+ emptySpot = pWalk->srcofs;
+ }
+
+ // Capture the stacksizedelta while we're at the end of the list.
+ _ASSERTE(pWalk->srcofs == ShuffleEntry::SENTINEL);
+
+ if (haveMemMemMove)
+ X86EmitPopReg(SCRATCH_REGISTER_X86REG);
+
+ if (pWalk->stacksizedelta)
+ X86EmitAddEsp(pWalk->stacksizedelta);
+
+ // Now jump to real target
+ // JMP [SCRATCHREG]
+ // we need to jump indirect so that for virtual delegates eax contains a pointer to the indirection cell
+ X86EmitAddReg(SCRATCH_REGISTER_X86REG, DelegateObject::GetOffsetOfMethodPtrAux());
+ static const BYTE bjmpeax[] = { 0xff, 0x20 };
+ EmitBytes(bjmpeax, sizeof(bjmpeax));
+
+#endif // _TARGET_AMD64_
+}
+
+
+#if !defined(CROSSGEN_COMPILE) && !defined(FEATURE_STUBS_AS_IL)
+
+//===========================================================================
+// Computes hash code for MulticastDelegate.Invoke()
+UINT_PTR StubLinkerCPU::HashMulticastInvoke(MetaSig* pSig)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ ArgIterator argit(pSig);
+
+ UINT numStackBytes = argit.SizeOfArgStack();
+
+ if (numStackBytes > 0x7FFF)
+ COMPlusThrow(kNotSupportedException, W("NotSupported_TooManyArgs"));
+
+#ifdef _TARGET_AMD64_
+ // Generate a hash key as follows:
+ // UINT Arg0Type:2; // R4 (1), R8 (2), other (3)
+ // UINT Arg1Type:2; // R4 (1), R8 (2), other (3)
+ // UINT Arg2Type:2; // R4 (1), R8 (2), other (3)
+ // UINT Arg3Type:2; // R4 (1), R8 (2), other (3)
+ // UINT NumArgs:24; // number of arguments
+ // (This should cover all the prestub variations)
+
+ _ASSERTE(!(numStackBytes & 7));
+ UINT hash = (numStackBytes / sizeof(void*)) << 8;
+
+ UINT argNum = 0;
+
+ // NextArg() doesn't take into account the "this" pointer.
+ // That's why we have to special case it here.
+ if (argit.HasThis())
+ {
+ hash |= 3 << (2*argNum);
+ argNum++;
+ }
+
+ if (argit.HasRetBuffArg())
+ {
+ hash |= 3 << (2*argNum);
+ argNum++;
+ }
+
+ for (; argNum < 4; argNum++)
+ {
+ switch (pSig->NextArgNormalized())
+ {
+ case ELEMENT_TYPE_END:
+ argNum = 4;
+ break;
+ case ELEMENT_TYPE_R4:
+ hash |= 1 << (2*argNum);
+ break;
+ case ELEMENT_TYPE_R8:
+ hash |= 2 << (2*argNum);
+ break;
+ default:
+ hash |= 3 << (2*argNum);
+ break;
+ }
+ }
+
+#else // _TARGET_AMD64_
+
+ // check if the function is returning a float, in which case the stub has to take
+ // care of popping the floating point stack except for the last invocation
+
+ _ASSERTE(!(numStackBytes & 3));
+
+ UINT hash = numStackBytes;
+
+ if (CorTypeInfo::IsFloat(pSig->GetReturnType()))
+ {
+ hash |= 2;
+ }
+#endif // _TARGET_AMD64_
+
+ return hash;
+}
+
+#ifdef _TARGET_X86_
+//===========================================================================
+// Emits code for MulticastDelegate.Invoke()
+VOID StubLinkerCPU::EmitDelegateInvoke()
+{
+ STANDARD_VM_CONTRACT;
+
+ CodeLabel *pNullLabel = NewCodeLabel();
+
+ // test THISREG, THISREG
+ X86EmitR2ROp(0x85, THIS_kREG, THIS_kREG);
+
+ // jz null
+ X86EmitCondJump(pNullLabel, X86CondCode::kJZ);
+
+ // mov SCRATCHREG, [THISREG + Delegate.FP] ; Save target stub in register
+ X86EmitIndexRegLoad(SCRATCH_REGISTER_X86REG, THIS_kREG, DelegateObject::GetOffsetOfMethodPtr());
+
+ // mov THISREG, [THISREG + Delegate.OR] ; replace "this" pointer
+ X86EmitIndexRegLoad(THIS_kREG, THIS_kREG, DelegateObject::GetOffsetOfTarget());
+
+ // jmp SCRATCHREG
+ Emit16(0xe0ff | (SCRATCH_REGISTER_X86REG<<8));
+
+ // Do a null throw
+ EmitLabel(pNullLabel);
+
+ // mov ECX, CORINFO_NullReferenceException
+ Emit8(0xb8+kECX);
+ Emit32(CORINFO_NullReferenceException);
+
+ X86EmitCall(NewExternalCodeLabel(GetEEFuncEntryPoint(JIT_InternalThrowFromHelper)), 0);
+
+ X86EmitReturn(0);
+}
+#endif // _TARGET_X86_
+
+VOID StubLinkerCPU::EmitMulticastInvoke(UINT_PTR hash)
+{
+ STANDARD_VM_CONTRACT;
+
+ int thisRegOffset = MulticastFrame::GetOffsetOfTransitionBlock() +
+ TransitionBlock::GetOffsetOfArgumentRegisters() + offsetof(ArgumentRegisters, THIS_REG);
+
+ // push the methoddesc on the stack
+ // mov eax, [ecx + offsetof(_methodAuxPtr)]
+ X86EmitIndexRegLoad(SCRATCH_REGISTER_X86REG, THIS_kREG, DelegateObject::GetOffsetOfMethodPtrAux());
+
+ // Push a MulticastFrame on the stack.
+ EmitMethodStubProlog(MulticastFrame::GetMethodFrameVPtr(), MulticastFrame::GetOffsetOfTransitionBlock());
+
+#ifdef _TARGET_X86_
+ // Frame is ready to be inspected by debugger for patch location
+ EmitPatchLabel();
+#else // _TARGET_AMD64_
+
+ // Save register arguments in their home locations.
+ // Non-FP registers are already saved by EmitMethodStubProlog.
+ // (Assumes Sig.NextArg() does not enum RetBuffArg or "this".)
+
+ int argNum = 0;
+ __int32 argOfs = MulticastFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgs();
+ CorElementType argTypes[4];
+ CorElementType argType;
+
+ // 'this'
+ argOfs += sizeof(void*);
+ argTypes[argNum] = ELEMENT_TYPE_I8;
+ argNum++;
+
+ do
+ {
+ argType = ELEMENT_TYPE_END;
+
+ switch ((hash >> (2 * argNum)) & 3)
+ {
+ case 0:
+ argType = ELEMENT_TYPE_END;
+ break;
+ case 1:
+ argType = ELEMENT_TYPE_R4;
+
+ // movss dword ptr [rsp + argOfs], xmm?
+ X64EmitMovSSToMem((X86Reg)argNum, kRSI, argOfs);
+ break;
+ case 2:
+ argType = ELEMENT_TYPE_R8;
+
+ // movsd qword ptr [rsp + argOfs], xmm?
+ X64EmitMovSDToMem((X86Reg)argNum, kRSI, argOfs);
+ break;
+ default:
+ argType = ELEMENT_TYPE_I;
+ break;
+ }
+
+ argOfs += sizeof(void*);
+ argTypes[argNum] = argType;
+ argNum++;
+ }
+ while (argNum < 4 && ELEMENT_TYPE_END != argType);
+
+ _ASSERTE(4 == argNum || ELEMENT_TYPE_END == argTypes[argNum-1]);
+
+#endif // _TARGET_AMD64_
+
+ // TODO: on AMD64, pick different regs for locals so don't need the pushes
+
+ // push edi ;; Save EDI (want to use it as loop index)
+ X86EmitPushReg(kEDI);
+
+ // xor edi,edi ;; Loop counter: EDI=0,1,2...
+ X86EmitZeroOutReg(kEDI);
+
+ CodeLabel *pLoopLabel = NewCodeLabel();
+ CodeLabel *pEndLoopLabel = NewCodeLabel();
+
+ EmitLabel(pLoopLabel);
+
+ // Entry:
+ // EDI == iteration counter
+
+ // mov ecx, [esi + this] ;; get delegate
+ X86EmitIndexRegLoad(THIS_kREG, kESI, thisRegOffset);
+
+ // cmp edi,[ecx]._invocationCount
+ X86EmitOp(0x3b, kEDI, THIS_kREG, DelegateObject::GetOffsetOfInvocationCount());
+
+ // je ENDLOOP
+ X86EmitCondJump(pEndLoopLabel, X86CondCode::kJZ);
+
+#ifdef _TARGET_AMD64_
+
+ INT32 numStackBytes = (INT32)((hash >> 8) * sizeof(void *));
+
+ INT32 stackUsed, numStackArgs, ofs;
+
+ // Push any stack args, plus an extra location
+ // for rsp alignment if needed
+
+ numStackArgs = numStackBytes / sizeof(void*);
+
+ // 1 push above, so stack is currently misaligned
+ const unsigned STACK_ALIGN_ADJUST = 8;
+
+ if (!numStackArgs)
+ {
+ // sub rsp, 28h ;; 4 reg arg home locs + rsp alignment
+ stackUsed = 0x20 + STACK_ALIGN_ADJUST;
+ X86EmitSubEsp(stackUsed);
+ }
+ else
+ {
+ stackUsed = numStackArgs * sizeof(void*);
+
+ // If the stack is misaligned, then an odd number of arguments
+ // will naturally align the stack.
+ if ( ((numStackArgs & 1) == 0)
+ != (STACK_ALIGN_ADJUST == 0))
+ {
+ X86EmitPushReg(kRAX);
+ stackUsed += sizeof(void*);
+ }
+
+ ofs = MulticastFrame::GetOffsetOfTransitionBlock() +
+ TransitionBlock::GetOffsetOfArgs() + sizeof(ArgumentRegisters) + numStackBytes;
+
+ while (numStackArgs--)
+ {
+ ofs -= sizeof(void*);
+
+ // push [rsi + ofs] ;; Push stack args
+ X86EmitIndexPush(kESI, ofs);
+ }
+
+ // sub rsp, 20h ;; Create 4 reg arg home locations
+ X86EmitSubEsp(0x20);
+
+ stackUsed += 0x20;
+ }
+
+ for(
+ argNum = 0, argOfs = MulticastFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgs();
+ argNum < 4 && argTypes[argNum] != ELEMENT_TYPE_END;
+ argNum++, argOfs += sizeof(void*)
+ )
+ {
+ switch (argTypes[argNum])
+ {
+ case ELEMENT_TYPE_R4:
+ // movss xmm?, dword ptr [rsi + argOfs]
+ X64EmitMovSSFromMem((X86Reg)argNum, kRSI, argOfs);
+ break;
+ case ELEMENT_TYPE_R8:
+ // movsd xmm?, qword ptr [rsi + argOfs]
+ X64EmitMovSDFromMem((X86Reg)argNum, kRSI, argOfs);
+ break;
+ default:
+ if (c_argRegs[argNum] != THIS_kREG)
+ {
+ // mov r*, [rsi + dstOfs]
+ X86EmitIndexRegLoad(c_argRegs[argNum], kESI,argOfs);
+ }
+ break;
+ } // switch
+ }
+
+ // mov SCRATCHREG, [rcx+Delegate._invocationList] ;;fetch invocation list
+ X86EmitIndexRegLoad(SCRATCH_REGISTER_X86REG, THIS_kREG, DelegateObject::GetOffsetOfInvocationList());
+
+ // mov SCRATCHREG, [SCRATCHREG+m_Array+rdi*8] ;; index into invocation list
+ X86EmitOp(0x8b, kEAX, SCRATCH_REGISTER_X86REG, static_cast<int>(PtrArray::GetDataOffset()), kEDI, sizeof(void*), k64BitOp);
+
+ // mov THISREG, [SCRATCHREG+Delegate.object] ;;replace "this" pointer
+ X86EmitIndexRegLoad(THIS_kREG, SCRATCH_REGISTER_X86REG, DelegateObject::GetOffsetOfTarget());
+
+ // call [SCRATCHREG+Delegate.target] ;; call current subscriber
+ X86EmitOffsetModRM(0xff, (X86Reg)2, SCRATCH_REGISTER_X86REG, DelegateObject::GetOffsetOfMethodPtr());
+
+ // add rsp, stackUsed ;; Clean up stack
+ X86EmitAddEsp(stackUsed);
+
+ // inc edi
+ Emit16(0xC7FF);
+
+#else // _TARGET_AMD64_
+
+ UINT16 numStackBytes = static_cast<UINT16>(hash & ~3);
+
+ // ..repush & reenregister args..
+ INT32 ofs = numStackBytes + MulticastFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgs();
+ while (ofs != MulticastFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgs())
+ {
+ ofs -= sizeof(void*);
+ X86EmitIndexPush(kESI, ofs);
+ }
+
+ #define ARGUMENT_REGISTER(regname) if (k##regname != THIS_kREG) { X86EmitIndexRegLoad(k##regname, kESI, \
+ offsetof(ArgumentRegisters, regname) + MulticastFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgumentRegisters()); }
+
+ ENUM_ARGUMENT_REGISTERS_BACKWARD();
+
+ #undef ARGUMENT_REGISTER
+
+ // mov SCRATCHREG, [ecx+Delegate._invocationList] ;;fetch invocation list
+ X86EmitIndexRegLoad(SCRATCH_REGISTER_X86REG, THIS_kREG, DelegateObject::GetOffsetOfInvocationList());
+
+ // mov SCRATCHREG, [SCRATCHREG+m_Array+edi*4] ;; index into invocation list
+ X86EmitOp(0x8b, kEAX, SCRATCH_REGISTER_X86REG, PtrArray::GetDataOffset(), kEDI, sizeof(void*));
+
+ // mov THISREG, [SCRATCHREG+Delegate.object] ;;replace "this" pointer
+ X86EmitIndexRegLoad(THIS_kREG, SCRATCH_REGISTER_X86REG, DelegateObject::GetOffsetOfTarget());
+
+ // call [SCRATCHREG+Delegate.target] ;; call current subscriber
+ X86EmitOffsetModRM(0xff, (X86Reg)2, SCRATCH_REGISTER_X86REG, DelegateObject::GetOffsetOfMethodPtr());
+ INDEBUG(Emit8(0x90)); // Emit a nop after the call in debug so that
+ // we know that this is a call that can directly call
+ // managed code
+
+ // inc edi
+ Emit8(0x47);
+
+ if (hash & 2) // CorTypeInfo::IsFloat(pSig->GetReturnType())
+ {
+ // if the return value is a float/double check if we just did the last call - if not,
+ // emit the pop of the float stack
+
+ // mov SCRATCHREG, [esi + this] ;; get delegate
+ X86EmitIndexRegLoad(SCRATCH_REGISTER_X86REG, kESI, thisRegOffset);
+
+ // cmp edi,[SCRATCHREG]._invocationCount
+ X86EmitOffsetModRM(0x3b, kEDI, SCRATCH_REGISTER_X86REG, DelegateObject::GetOffsetOfInvocationCount());
+
+ CodeLabel *pNoFloatStackPopLabel = NewCodeLabel();
+
+ // je NOFLOATSTACKPOP
+ X86EmitCondJump(pNoFloatStackPopLabel, X86CondCode::kJZ);
+
+ // fstp 0
+ Emit16(0xd8dd);
+
+ // NoFloatStackPopLabel:
+ EmitLabel(pNoFloatStackPopLabel);
+ }
+
+#endif // _TARGET_AMD64_
+
+ // The debugger may need to stop here, so grab the offset of this code.
+ EmitPatchLabel();
+
+ // jmp LOOP
+ X86EmitNearJump(pLoopLabel);
+
+ //ENDLOOP:
+ EmitLabel(pEndLoopLabel);
+
+ // pop edi ;; Restore edi
+ X86EmitPopReg(kEDI);
+
+ EmitCheckGSCookie(kESI, MulticastFrame::GetOffsetOfGSCookie());
+
+ // Epilog
+ EmitMethodStubEpilog(numStackBytes, MulticastFrame::GetOffsetOfTransitionBlock());
+}
+
+VOID StubLinkerCPU::EmitSecureDelegateInvoke(UINT_PTR hash)
+{
+ STANDARD_VM_CONTRACT;
+
+ int thisRegOffset = SecureDelegateFrame::GetOffsetOfTransitionBlock() +
+ TransitionBlock::GetOffsetOfArgumentRegisters() + offsetof(ArgumentRegisters, THIS_REG);
+
+ // push the methoddesc on the stack
+ // mov eax, [ecx + offsetof(_invocationCount)]
+ X86EmitIndexRegLoad(SCRATCH_REGISTER_X86REG, THIS_kREG, DelegateObject::GetOffsetOfInvocationCount());
+
+ // Push a SecureDelegateFrame on the stack.
+ EmitMethodStubProlog(SecureDelegateFrame::GetMethodFrameVPtr(), SecureDelegateFrame::GetOffsetOfTransitionBlock());
+
+#ifdef _TARGET_X86_
+ // Frame is ready to be inspected by debugger for patch location
+ EmitPatchLabel();
+#else // _TARGET_AMD64_
+
+ // Save register arguments in their home locations.
+ // Non-FP registers are already saved by EmitMethodStubProlog.
+ // (Assumes Sig.NextArg() does not enum RetBuffArg or "this".)
+
+ int argNum = 0;
+ __int32 argOfs = SecureDelegateFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgs();
+ CorElementType argTypes[4];
+ CorElementType argType;
+
+ // 'this'
+ argOfs += sizeof(void*);
+ argTypes[argNum] = ELEMENT_TYPE_I8;
+ argNum++;
+
+ do
+ {
+ argType = ELEMENT_TYPE_END;
+
+ switch ((hash >> (2 * argNum)) & 3)
+ {
+ case 0:
+ argType = ELEMENT_TYPE_END;
+ break;
+ case 1:
+ argType = ELEMENT_TYPE_R4;
+
+ // movss dword ptr [rsp + argOfs], xmm?
+ X64EmitMovSSToMem((X86Reg)argNum, kRSI, argOfs);
+ break;
+ case 2:
+ argType = ELEMENT_TYPE_R8;
+
+ // movsd qword ptr [rsp + argOfs], xmm?
+ X64EmitMovSSToMem((X86Reg)argNum, kRSI, argOfs);
+ break;
+ default:
+ argType = ELEMENT_TYPE_I;
+ break;
+ }
+
+ argOfs += sizeof(void*);
+ argTypes[argNum] = argType;
+ argNum++;
+ }
+ while (argNum < 4 && ELEMENT_TYPE_END != argType);
+
+ _ASSERTE(4 == argNum || ELEMENT_TYPE_END == argTypes[argNum-1]);
+
+#endif // _TARGET_AMD64_
+
+ // mov ecx, [esi + this] ;; get delegate
+ X86EmitIndexRegLoad(THIS_kREG, kESI, thisRegOffset);
+
+#ifdef _TARGET_AMD64_
+
+ INT32 numStackBytes = (INT32)((hash >> 8) * sizeof(void *));
+
+ INT32 stackUsed, numStackArgs, ofs;
+
+ // Push any stack args, plus an extra location
+ // for rsp alignment if needed
+
+ numStackArgs = numStackBytes / sizeof(void*);
+
+ // 1 push above, so stack is currently misaligned
+ const unsigned STACK_ALIGN_ADJUST = 0;
+
+ if (!numStackArgs)
+ {
+ // sub rsp, 28h ;; 4 reg arg home locs + rsp alignment
+ stackUsed = 0x20 + STACK_ALIGN_ADJUST;
+ X86EmitSubEsp(stackUsed);
+ }
+ else
+ {
+ stackUsed = numStackArgs * sizeof(void*);
+
+ // If the stack is misaligned, then an odd number of arguments
+ // will naturally align the stack.
+ if ( ((numStackArgs & 1) == 0)
+ != (STACK_ALIGN_ADJUST == 0))
+ {
+ X86EmitPushReg(kRAX);
+ stackUsed += sizeof(void*);
+ }
+
+ ofs = SecureDelegateFrame::GetOffsetOfTransitionBlock() +
+ TransitionBlock::GetOffsetOfArgs() + sizeof(ArgumentRegisters) + numStackBytes;
+
+ while (numStackArgs--)
+ {
+ ofs -= sizeof(void*);
+
+ // push [rsi + ofs] ;; Push stack args
+ X86EmitIndexPush(kESI, ofs);
+ }
+
+ // sub rsp, 20h ;; Create 4 reg arg home locations
+ X86EmitSubEsp(0x20);
+
+ stackUsed += 0x20;
+ }
+
+ int thisArgNum = 0;
+
+ for(
+ argNum = 0, argOfs = SecureDelegateFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgs();
+ argNum < 4 && argTypes[argNum] != ELEMENT_TYPE_END;
+ argNum++, argOfs += sizeof(void*)
+ )
+ {
+ switch (argTypes[argNum])
+ {
+ case ELEMENT_TYPE_R4:
+ // movss xmm?, dword ptr [rsi + argOfs]
+ X64EmitMovSSFromMem((X86Reg)argNum, kRSI, argOfs);
+ break;
+ case ELEMENT_TYPE_R8:
+ // movsd xmm?, qword ptr [rsi + argOfs]
+ X64EmitMovSDFromMem((X86Reg)argNum, kRSI, argOfs);
+ break;
+ default:
+ if (c_argRegs[argNum] != THIS_kREG)
+ {
+ // mov r*, [rsi + dstOfs]
+ X86EmitIndexRegLoad(c_argRegs[argNum], kESI,argOfs);
+ }
+ break;
+ } // switch
+ }
+
+ // mov SCRATCHREG, [rcx+Delegate._invocationList] ;;fetch the inner delegate
+ X86EmitIndexRegLoad(SCRATCH_REGISTER_X86REG, THIS_kREG, DelegateObject::GetOffsetOfInvocationList());
+
+ // mov THISREG, [SCRATCHREG+Delegate.object] ;;replace "this" pointer
+ X86EmitIndexRegLoad(c_argRegs[thisArgNum], SCRATCH_REGISTER_X86REG, DelegateObject::GetOffsetOfTarget());
+
+ // call [SCRATCHREG+Delegate.target] ;; call current subscriber
+ X86EmitOffsetModRM(0xff, (X86Reg)2, SCRATCH_REGISTER_X86REG, DelegateObject::GetOffsetOfMethodPtr());
+
+ // add rsp, stackUsed ;; Clean up stack
+ X86EmitAddEsp(stackUsed);
+
+#else // _TARGET_AMD64_
+
+ UINT16 numStackBytes = static_cast<UINT16>(hash & ~3);
+
+ // ..repush & reenregister args..
+ INT32 ofs = numStackBytes + SecureDelegateFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgs();
+ while (ofs != SecureDelegateFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgs())
+ {
+ ofs -= sizeof(void*);
+ X86EmitIndexPush(kESI, ofs);
+ }
+
+ #define ARGUMENT_REGISTER(regname) if (k##regname != THIS_kREG) { X86EmitIndexRegLoad(k##regname, kESI, \
+ offsetof(ArgumentRegisters, regname) + SecureDelegateFrame::GetOffsetOfTransitionBlock() + TransitionBlock::GetOffsetOfArgumentRegisters()); }
+
+ ENUM_ARGUMENT_REGISTERS_BACKWARD();
+
+ #undef ARGUMENT_REGISTER
+
+ // mov SCRATCHREG, [ecx+Delegate._invocationList] ;;fetch the inner delegate
+ X86EmitIndexRegLoad(SCRATCH_REGISTER_X86REG, THIS_kREG, DelegateObject::GetOffsetOfInvocationList());
+
+ // mov THISREG, [SCRATCHREG+Delegate.object] ;;replace "this" pointer
+ X86EmitIndexRegLoad(THIS_kREG, SCRATCH_REGISTER_X86REG, DelegateObject::GetOffsetOfTarget());
+
+ // call [SCRATCHREG+Delegate.target] ;; call current subscriber
+ X86EmitOffsetModRM(0xff, (X86Reg)2, SCRATCH_REGISTER_X86REG, DelegateObject::GetOffsetOfMethodPtr());
+ INDEBUG(Emit8(0x90)); // Emit a nop after the call in debug so that
+ // we know that this is a call that can directly call
+ // managed code
+
+#endif // _TARGET_AMD64_
+
+ // The debugger may need to stop here, so grab the offset of this code.
+ EmitPatchLabel();
+
+ EmitCheckGSCookie(kESI, SecureDelegateFrame::GetOffsetOfGSCookie());
+
+ // Epilog
+ EmitMethodStubEpilog(numStackBytes, SecureDelegateFrame::GetOffsetOfTransitionBlock());
+}
+
+#ifndef FEATURE_ARRAYSTUB_AS_IL
+
+// Little helper to generate code to move nbytes bytes of non Ref memory
+
+void generate_noref_copy (unsigned nbytes, StubLinkerCPU* sl)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // If the size is pointer-aligned, we'll use movsd
+ if (IS_ALIGNED(nbytes, sizeof(void*)))
+ {
+ // If there are less than 4 pointers to copy, "unroll" the "rep movsd"
+ if (nbytes <= 3*sizeof(void*))
+ {
+ while (nbytes > 0)
+ {
+ // movsd
+ sl->X86_64BitOperands();
+ sl->Emit8(0xa5);
+
+ nbytes -= sizeof(void*);
+ }
+ }
+ else
+ {
+ // mov ECX, size / 4
+ sl->Emit8(0xb8+kECX);
+ sl->Emit32(nbytes / sizeof(void*));
+
+ // repe movsd
+ sl->Emit8(0xf3);
+ sl->X86_64BitOperands();
+ sl->Emit8(0xa5);
+ }
+ }
+ else
+ {
+ // mov ECX, size
+ sl->Emit8(0xb8+kECX);
+ sl->Emit32(nbytes);
+
+ // repe movsb
+ sl->Emit16(0xa4f3);
+ }
+}
+
+
+X86Reg LoadArrayOpArg (
+ UINT32 idxloc,
+ StubLinkerCPU *psl,
+ X86Reg kRegIfFromMem,
+ UINT ofsadjust
+ AMD64_ARG(StubLinkerCPU::X86OperandSize OperandSize = StubLinkerCPU::k64BitOp)
+ )
+{
+ STANDARD_VM_CONTRACT;
+
+ if (!TransitionBlock::IsStackArgumentOffset(idxloc))
+ return GetX86ArgumentRegisterFromOffset(idxloc - TransitionBlock::GetOffsetOfArgumentRegisters());
+
+ psl->X86EmitEspOffset(0x8b, kRegIfFromMem, idxloc + ofsadjust AMD64_ARG(OperandSize));
+ return kRegIfFromMem;
+}
+
+VOID StubLinkerCPU::EmitArrayOpStubThrow(unsigned exConst, unsigned cbRetArg)
+{
+ STANDARD_VM_CONTRACT;
+
+ //ArrayOpStub*Exception
+ X86EmitPopReg(kESI);
+ X86EmitPopReg(kEDI);
+
+ //mov CORINFO_NullReferenceException_ASM, %ecx
+ Emit8(0xb8 | kECX);
+ Emit32(exConst);
+ //InternalExceptionWorker
+
+ X86EmitPopReg(kEDX);
+ // add pArrayOpScript->m_cbretpop, %esp (was add %eax, %esp)
+ Emit8(0x81);
+ Emit8(0xc0 | 0x4);
+ Emit32(cbRetArg);
+ X86EmitPushReg(kEDX);
+ X86EmitNearJump(NewExternalCodeLabel((PVOID)JIT_InternalThrow));
+}
+
+//===========================================================================
+// Emits code to do an array operation.
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#endif
+VOID StubLinkerCPU::EmitArrayOpStub(const ArrayOpScript* pArrayOpScript)
+{
+ STANDARD_VM_CONTRACT;
+
+ // This is the offset to the parameters/what's already pushed on the stack:
+ // return address.
+ const INT locsize = sizeof(void*);
+
+ // ArrayOpScript's stack offsets are built using ArgIterator, which
+ // assumes a TransitionBlock has been pushed, which is not the case
+ // here. rsp + ofsadjust should point at the first argument. Any further
+ // stack modifications below need to adjust ofsadjust appropriately.
+ // baseofsadjust needs to be the stack adjustment at the entry point -
+ // this is used further below to compute how much stack space was used.
+
+ INT ofsadjust = locsize - (INT)sizeof(TransitionBlock);
+
+ // Register usage
+ //
+ // x86 AMD64
+ // Inputs:
+ // managed array THIS_kREG (ecx) THIS_kREG (rcx)
+ // index 0 edx rdx
+ // index 1/value <stack> r8
+ // index 2/value <stack> r9
+ // expected element type for LOADADDR eax rax rdx
+ // Working registers:
+ // total (accumulates unscaled offset) edi r10
+ // factor (accumulates the slice factor) esi r11
+ X86Reg kArrayRefReg = THIS_kREG;
+#ifdef _TARGET_AMD64_
+ const X86Reg kArrayMTReg = kR10;
+ const X86Reg kTotalReg = kR10;
+ const X86Reg kFactorReg = kR11;
+#else
+ const X86Reg kArrayMTReg = kESI;
+ const X86Reg kTotalReg = kEDI;
+ const X86Reg kFactorReg = kESI;
+#endif
+
+#ifdef _TARGET_AMD64_
+ // Simplifying assumption for fNeedPrologue.
+ _ASSERTE(!pArrayOpScript->m_gcDesc || (pArrayOpScript->m_flags & ArrayOpScript::NEEDSWRITEBARRIER));
+ // Simplifying assumption for saving rsi and rdi.
+ _ASSERTE(!(pArrayOpScript->m_flags & ArrayOpScript::HASRETVALBUFFER) || ArgIterator::IsArgPassedByRef(pArrayOpScript->m_elemsize));
+
+ // Cases where we need to make calls
+ BOOL fNeedScratchArea = ( (pArrayOpScript->m_flags & (ArrayOpScript::NEEDSTYPECHECK | ArrayOpScript::NEEDSWRITEBARRIER))
+ && ( pArrayOpScript->m_op == ArrayOpScript::STORE
+ || ( pArrayOpScript->m_op == ArrayOpScript::LOAD
+ && (pArrayOpScript->m_flags & ArrayOpScript::HASRETVALBUFFER))));
+
+ // Cases where we need to copy large values
+ BOOL fNeedRSIRDI = ( ArgIterator::IsArgPassedByRef(pArrayOpScript->m_elemsize)
+ && ArrayOpScript::LOADADDR != pArrayOpScript->m_op);
+
+ BOOL fNeedPrologue = ( fNeedScratchArea
+ || fNeedRSIRDI);
+#endif
+
+ X86Reg kValueReg;
+
+ CodeLabel *Epilog = NewCodeLabel();
+ CodeLabel *Inner_nullexception = NewCodeLabel();
+ CodeLabel *Inner_rangeexception = NewCodeLabel();
+ CodeLabel *Inner_typeMismatchexception = NULL;
+
+ //
+ // Set up the stack frame.
+ //
+ //
+ // x86:
+ // value
+ // <index n-1>
+ // ...
+ // <index 1>
+ // return address
+ // saved edi
+ // esp -> saved esi
+ //
+ //
+ // AMD64:
+ // value, if rank > 2
+ // ...
+ // + 0x48 more indices
+ // + 0x40 r9 home
+ // + 0x38 r8 home
+ // + 0x30 rdx home
+ // + 0x28 rcx home
+ // + 0x20 return address
+ // + 0x18 scratch area (callee's r9)
+ // + 0x10 scratch area (callee's r8)
+ // + 8 scratch area (callee's rdx)
+ // rsp -> scratch area (callee's rcx)
+ //
+ // If the element type is a value class w/ object references, then rsi
+ // and rdi will also be saved above the scratch area:
+ //
+ // ...
+ // + 0x28 saved rsi
+ // + 0x20 saved rdi
+ // + 0x18 scratch area (callee's r9)
+ // + 0x10 scratch area (callee's r8)
+ // + 8 scratch area (callee's rdx)
+ // rsp -> scratch area (callee's rcx)
+ //
+ // And if no call or movsb is necessary, then the scratch area sits
+ // directly under the MethodDesc*.
+
+ BOOL fSavedESI = FALSE;
+ BOOL fSavedEDI = FALSE;
+
+#ifdef _TARGET_AMD64_
+ if (fNeedPrologue)
+ {
+ // Save argument registers if we'll be making a call before using
+ // them. Note that in this case the element value will always be an
+ // object type, and never be in an xmm register.
+
+ if ( (pArrayOpScript->m_flags & ArrayOpScript::NEEDSTYPECHECK)
+ && ArrayOpScript::STORE == pArrayOpScript->m_op)
+ {
+ // mov [rsp+0x08], rcx
+ X86EmitEspOffset(0x89, kRCX, 0x08);
+ X86EmitEspOffset(0x89, kRDX, 0x10);
+ X86EmitEspOffset(0x89, kR8, 0x18);
+
+ if (pArrayOpScript->m_rank >= 2)
+ X86EmitEspOffset(0x89, kR9, 0x20);
+ }
+
+ if (fNeedRSIRDI)
+ {
+ X86EmitPushReg(kRSI);
+ X86EmitPushReg(kRDI);
+
+ fSavedESI = fSavedEDI = TRUE;
+
+ ofsadjust += 0x10;
+ }
+
+ if (fNeedScratchArea)
+ {
+ // Callee scratch area (0x8 for aligned esp)
+ X86EmitSubEsp(sizeof(ArgumentRegisters) + 0x8);
+ ofsadjust += sizeof(ArgumentRegisters) + 0x8;
+ }
+ }
+#else
+ // Preserve the callee-saved registers
+ // NOTE: if you change the sequence of these pushes, you must also update:
+ // ArrayOpStubNullException
+ // ArrayOpStubRangeException
+ // ArrayOpStubTypeMismatchException
+ _ASSERTE( kTotalReg == kEDI);
+ X86EmitPushReg(kTotalReg);
+ _ASSERTE( kFactorReg == kESI);
+ X86EmitPushReg(kFactorReg);
+
+ fSavedESI = fSavedEDI = TRUE;
+
+ ofsadjust += 2*sizeof(void*);
+#endif
+
+ // Check for null.
+ X86EmitR2ROp(0x85, kArrayRefReg, kArrayRefReg); // TEST ECX, ECX
+ X86EmitCondJump(Inner_nullexception, X86CondCode::kJZ); // jz Inner_nullexception
+
+ // Do Type Check if needed
+ if (pArrayOpScript->m_flags & ArrayOpScript::NEEDSTYPECHECK)
+ {
+ if (pArrayOpScript->m_op == ArrayOpScript::STORE)
+ {
+ // Get the value to be stored.
+ kValueReg = LoadArrayOpArg(pArrayOpScript->m_fValLoc, this, kEAX, ofsadjust);
+
+ X86EmitR2ROp(0x85, kValueReg, kValueReg); // TEST kValueReg, kValueReg
+ CodeLabel *CheckPassed = NewCodeLabel();
+ X86EmitCondJump(CheckPassed, X86CondCode::kJZ); // storing NULL is OK
+
+ // mov EAX, element type ; possibly trashes kValueReg
+ X86EmitOp(0x8b, kArrayMTReg, kArrayRefReg, 0 AMD64_ARG(k64BitOp)); // mov ESI/R10, [kArrayRefReg]
+
+ X86EmitOp(0x8b, kEAX, kValueReg, 0 AMD64_ARG(k64BitOp)); // mov EAX, [kValueReg] ; possibly trashes kValueReg
+ // cmp EAX, [ESI/R10+m_ElementType]
+ X86_64BitOperands();
+ X86EmitOp(0x3b, kEAX, kArrayMTReg, MethodTable::GetOffsetOfArrayElementTypeHandle());
+ X86EmitCondJump(CheckPassed, X86CondCode::kJZ); // Exact match is OK
+
+ X86EmitRegLoad(kEAX, (UINT_PTR)g_pObjectClass); // mov EAX, g_pObjectMethodTable
+ // cmp EAX, [ESI/R10+m_ElementType]
+ X86_64BitOperands();
+ X86EmitOp(0x3b, kEAX, kArrayMTReg, MethodTable::GetOffsetOfArrayElementTypeHandle());
+ X86EmitCondJump(CheckPassed, X86CondCode::kJZ); // Assigning to array of object is OK
+
+ // Try to call the fast helper first ( ObjIsInstanceOfNoGC ).
+ // If that fails we will fall back to calling the slow helper ( ArrayStoreCheck ) that erects a frame.
+ // See also JitInterfaceX86::JIT_Stelem_Ref
+
+#ifdef _TARGET_AMD64_
+ // RCX contains pointer to object to check (Object*)
+ // RDX contains array type handle
+
+ // mov RCX, [rsp+offsetToObject] ; RCX = Object*
+ X86EmitEspOffset(0x8b, kRCX, ofsadjust + pArrayOpScript->m_fValLoc);
+
+ // get Array TypeHandle
+ // mov RDX, [RSP+offsetOfTypeHandle]
+
+ X86EmitEspOffset(0x8b, kRDX, ofsadjust
+ + TransitionBlock::GetOffsetOfArgumentRegisters()
+ + FIELD_OFFSET(ArgumentRegisters, THIS_REG));
+
+ // mov RDX, [kArrayMTReg+offsetof(MethodTable, m_ElementType)]
+ X86EmitIndexRegLoad(kRDX, kArrayMTReg, MethodTable::GetOffsetOfArrayElementTypeHandle());
+
+#else
+ X86EmitPushReg(kEDX); // Save EDX
+ X86EmitPushReg(kECX); // Pass array object
+
+ X86EmitIndexPush(kArrayMTReg, MethodTable::GetOffsetOfArrayElementTypeHandle()); // push [kArrayMTReg + m_ElementType] ; Array element type handle
+
+ // get address of value to store
+ _ASSERTE(TransitionBlock::IsStackArgumentOffset(pArrayOpScript->m_fValLoc)); // on x86, value will never get a register
+ X86EmitSPIndexPush(pArrayOpScript->m_fValLoc + ofsadjust + 3*sizeof(void*)); // push [ESP+offset] ; the object pointer
+
+#endif //_AMD64
+
+
+ // emit a call to the fast helper
+ // One side effect of this is that we are going to generate a "jnz Epilog" and we DON'T need it
+ // in the fast path, however there are no side effects in emitting
+ // it in the fast path anyway. the reason for that is that it makes
+ // the cleanup code much easier ( we have only 1 place to cleanup the stack and
+ // restore it to the original state )
+ X86EmitCall(NewExternalCodeLabel((LPVOID)ObjIsInstanceOfNoGC), 0);
+ X86EmitCmpRegImm32( kEAX, TypeHandle::CanCast); // CMP EAX, CanCast ; if ObjIsInstanceOfNoGC returns CanCast, we will go the fast path
+ CodeLabel * Cleanup = NewCodeLabel();
+ X86EmitCondJump(Cleanup, X86CondCode::kJZ);
+
+#ifdef _TARGET_AMD64_
+ // get address of value to store
+ // lea rcx, [rsp+offs]
+ X86EmitEspOffset(0x8d, kRCX, ofsadjust + pArrayOpScript->m_fValLoc);
+
+ // get address of 'this'/rcx
+ // lea rdx, [rsp+offs]
+ X86EmitEspOffset(0x8d, kRDX, ofsadjust
+ + TransitionBlock::GetOffsetOfArgumentRegisters()
+ + FIELD_OFFSET(ArgumentRegisters, THIS_REG));
+
+#else
+ // The stack is already setup correctly for the slow helper.
+ _ASSERTE(TransitionBlock::IsStackArgumentOffset(pArrayOpScript->m_fValLoc)); // on x86, value will never get a register
+ X86EmitEspOffset(0x8d, kECX, pArrayOpScript->m_fValLoc + ofsadjust + 2*sizeof(void*)); // lea ECX, [ESP+offset]
+
+ // get address of 'this'
+ X86EmitEspOffset(0x8d, kEDX, 0); // lea EDX, [ESP] ; (address of ECX)
+
+
+#endif
+ AMD64_ONLY(_ASSERTE(fNeedScratchArea));
+ X86EmitCall(NewExternalCodeLabel((LPVOID)ArrayStoreCheck), 0);
+
+ EmitLabel(Cleanup);
+#ifdef _TARGET_AMD64_
+ X86EmitEspOffset(0x8b, kRCX, 0x00 + ofsadjust + TransitionBlock::GetOffsetOfArgumentRegisters());
+ X86EmitEspOffset(0x8b, kRDX, 0x08 + ofsadjust + TransitionBlock::GetOffsetOfArgumentRegisters());
+ X86EmitEspOffset(0x8b, kR8, 0x10 + ofsadjust + TransitionBlock::GetOffsetOfArgumentRegisters());
+
+ if (pArrayOpScript->m_rank >= 2)
+ X86EmitEspOffset(0x8b, kR9, 0x18 + ofsadjust + TransitionBlock::GetOffsetOfArgumentRegisters());
+#else
+ X86EmitPopReg(kECX); // restore regs
+ X86EmitPopReg(kEDX);
+
+
+ X86EmitR2ROp(0x3B, kEAX, kEAX); // CMP EAX, EAX
+ X86EmitCondJump(Epilog, X86CondCode::kJNZ); // This branch never taken, but epilog walker uses it
+#endif
+
+ EmitLabel(CheckPassed);
+ }
+ else
+ {
+ _ASSERTE(pArrayOpScript->m_op == ArrayOpScript::LOADADDR);
+
+ // Load up the hidden type parameter into 'typeReg'
+ X86Reg typeReg = LoadArrayOpArg(pArrayOpScript->m_typeParamOffs, this, kEAX, ofsadjust);
+
+ // 'typeReg' holds the typeHandle for the ARRAY. This must be a ArrayTypeDesc*, so
+ // mask off the low two bits to get the TypeDesc*
+ X86EmitR2ROp(0x83, (X86Reg)4, typeReg); // AND typeReg, 0xFFFFFFFC
+ Emit8(0xFC);
+
+ // If 'typeReg' is NULL then we're executing the readonly ::Address and no type check is
+ // needed.
+ CodeLabel *Inner_passedTypeCheck = NewCodeLabel();
+
+ X86EmitCondJump(Inner_passedTypeCheck, X86CondCode::kJZ);
+
+ // Get the parameter of the parameterize type
+ // mov typeReg, [typeReg.m_Arg]
+ X86EmitOp(0x8b, typeReg, typeReg, offsetof(ParamTypeDesc, m_Arg) AMD64_ARG(k64BitOp));
+
+ // Compare this against the element type of the array.
+ // mov ESI/R10, [kArrayRefReg]
+ X86EmitOp(0x8b, kArrayMTReg, kArrayRefReg, 0 AMD64_ARG(k64BitOp));
+ // cmp typeReg, [ESI/R10+m_ElementType];
+ X86EmitOp(0x3b, typeReg, kArrayMTReg, MethodTable::GetOffsetOfArrayElementTypeHandle() AMD64_ARG(k64BitOp));
+
+ // Throw error if not equal
+ Inner_typeMismatchexception = NewCodeLabel();
+ X86EmitCondJump(Inner_typeMismatchexception, X86CondCode::kJNZ);
+ EmitLabel(Inner_passedTypeCheck);
+ }
+ }
+
+ CodeLabel* DoneCheckLabel = 0;
+ if (pArrayOpScript->m_rank == 1 && pArrayOpScript->m_fHasLowerBounds)
+ {
+ DoneCheckLabel = NewCodeLabel();
+ CodeLabel* NotSZArrayLabel = NewCodeLabel();
+
+ // for rank1 arrays, we might actually have two different layouts depending on
+ // if we are ELEMENT_TYPE_ARRAY or ELEMENT_TYPE_SZARRAY.
+
+ // mov EAX, [ARRAY] // EAX holds the method table
+ X86_64BitOperands();
+ X86EmitOp(0x8b, kEAX, kArrayRefReg);
+
+ // test [EAX + m_dwFlags], enum_flag_Category_IfArrayThenSzArray
+ X86_64BitOperands();
+ X86EmitOffsetModRM(0xf7, (X86Reg)0, kEAX, MethodTable::GetOffsetOfFlags());
+ Emit32(MethodTable::GetIfArrayThenSzArrayFlag());
+
+ // jz NotSZArrayLabel
+ X86EmitCondJump(NotSZArrayLabel, X86CondCode::kJZ);
+
+ //Load the passed-in index into the scratch register.
+ const ArrayOpIndexSpec *pai = pArrayOpScript->GetArrayOpIndexSpecs();
+ X86Reg idxReg = LoadArrayOpArg(pai->m_idxloc, this, SCRATCH_REGISTER_X86REG, ofsadjust);
+
+ // cmp idxReg, [kArrayRefReg + LENGTH]
+ X86EmitOp(0x3b, idxReg, kArrayRefReg, ArrayBase::GetOffsetOfNumComponents());
+
+ // jae Inner_rangeexception
+ X86EmitCondJump(Inner_rangeexception, X86CondCode::kJAE);
+
+ // <TODO> if we cared efficiency of this, this move can be optimized</TODO>
+ X86EmitR2ROp(0x8b, kTotalReg, idxReg AMD64_ARG(k32BitOp));
+
+ // sub ARRAY. 8 // 8 is accounts for the Lower bound and Dim count in the ARRAY
+ X86EmitSubReg(kArrayRefReg, 8); // adjust this pointer so that indexing works out for SZARRAY
+
+ X86EmitNearJump(DoneCheckLabel);
+ EmitLabel(NotSZArrayLabel);
+ }
+
+ // For each index, range-check and mix into accumulated total.
+ UINT idx = pArrayOpScript->m_rank;
+ BOOL firstTime = TRUE;
+ while (idx--)
+ {
+ const ArrayOpIndexSpec *pai = pArrayOpScript->GetArrayOpIndexSpecs() + idx;
+
+ //Load the passed-in index into the scratch register.
+ X86Reg srcreg = LoadArrayOpArg(pai->m_idxloc, this, SCRATCH_REGISTER_X86REG, ofsadjust AMD64_ARG(k32BitOp));
+ if (SCRATCH_REGISTER_X86REG != srcreg)
+ X86EmitR2ROp(0x8b, SCRATCH_REGISTER_X86REG, srcreg AMD64_ARG(k32BitOp));
+
+ // sub SCRATCH, dword ptr [kArrayRefReg + LOWERBOUND]
+ if (pArrayOpScript->m_fHasLowerBounds)
+ {
+ X86EmitOp(0x2b, SCRATCH_REGISTER_X86REG, kArrayRefReg, pai->m_lboundofs);
+ }
+
+ // cmp SCRATCH, dword ptr [kArrayRefReg + LENGTH]
+ X86EmitOp(0x3b, SCRATCH_REGISTER_X86REG, kArrayRefReg, pai->m_lengthofs);
+
+ // jae Inner_rangeexception
+ X86EmitCondJump(Inner_rangeexception, X86CondCode::kJAE);
+
+
+ // SCRATCH == idx - LOWERBOUND
+ //
+ // imul SCRATCH, FACTOR
+ if (!firstTime)
+ {
+ //Can skip the first time since FACTOR==1
+ X86EmitR2ROp(0xaf0f, SCRATCH_REGISTER_X86REG, kFactorReg AMD64_ARG(k32BitOp));
+ }
+
+ // TOTAL += SCRATCH
+ if (firstTime)
+ {
+ // First time, we must zero-init TOTAL. Since
+ // zero-initing and then adding is just equivalent to a
+ // "mov", emit a "mov"
+ // mov TOTAL, SCRATCH
+ X86EmitR2ROp(0x8b, kTotalReg, SCRATCH_REGISTER_X86REG AMD64_ARG(k32BitOp));
+ }
+ else
+ {
+ // add TOTAL, SCRATCH
+ X86EmitR2ROp(0x03, kTotalReg, SCRATCH_REGISTER_X86REG AMD64_ARG(k32BitOp));
+ }
+
+ // FACTOR *= [kArrayRefReg + LENGTH]
+ if (idx != 0)
+ {
+ // No need to update FACTOR on the last iteration
+ // since we won't use it again
+
+ if (firstTime)
+ {
+ // must init FACTOR to 1 first: hence,
+ // the "imul" becomes a "mov"
+ // mov FACTOR, [kArrayRefReg + LENGTH]
+ X86EmitOp(0x8b, kFactorReg, kArrayRefReg, pai->m_lengthofs);
+ }
+ else
+ {
+ // imul FACTOR, [kArrayRefReg + LENGTH]
+ X86EmitOp(0xaf0f, kFactorReg, kArrayRefReg, pai->m_lengthofs);
+ }
+ }
+
+ firstTime = FALSE;
+ }
+
+ if (DoneCheckLabel != 0)
+ EmitLabel(DoneCheckLabel);
+
+ // Pass these values to X86EmitArrayOp() to generate the element address.
+ X86Reg elemBaseReg = kArrayRefReg;
+ X86Reg elemScaledReg = kTotalReg;
+ UINT32 elemSize = pArrayOpScript->m_elemsize;
+ UINT32 elemOfs = pArrayOpScript->m_ofsoffirst;
+
+ if (!(elemSize == 1 || elemSize == 2 || elemSize == 4 || elemSize == 8))
+ {
+ switch (elemSize)
+ {
+ // No way to express this as a SIB byte. Fold the scale
+ // into TOTAL.
+
+ case 16:
+ // shl TOTAL,4
+ X86EmitR2ROp(0xc1, (X86Reg)4, kTotalReg AMD64_ARG(k32BitOp));
+ Emit8(4);
+ break;
+
+ case 32:
+ // shl TOTAL,5
+ X86EmitR2ROp(0xc1, (X86Reg)4, kTotalReg AMD64_ARG(k32BitOp));
+ Emit8(5);
+ break;
+
+ case 64:
+ // shl TOTAL,6
+ X86EmitR2ROp(0xc1, (X86Reg)4, kTotalReg AMD64_ARG(k32BitOp));
+ Emit8(6);
+ break;
+
+ default:
+ // imul TOTAL, elemScale
+ X86EmitR2ROp(0x69, kTotalReg, kTotalReg AMD64_ARG(k32BitOp));
+ Emit32(elemSize);
+ break;
+ }
+ elemSize = 1;
+ }
+
+ _ASSERTE(FitsInU1(elemSize));
+ BYTE elemScale = static_cast<BYTE>(elemSize);
+
+ // Now, do the operation:
+
+ switch (pArrayOpScript->m_op)
+ {
+ case ArrayOpScript::LOADADDR:
+ // lea eax, ELEMADDR
+ X86EmitOp(0x8d, kEAX, elemBaseReg, elemOfs, elemScaledReg, elemScale AMD64_ARG(k64BitOp));
+ break;
+
+ case ArrayOpScript::LOAD:
+ if (pArrayOpScript->m_flags & ArrayOpScript::HASRETVALBUFFER)
+ {
+ // Ensure that these registers have been saved!
+ _ASSERTE(fSavedESI && fSavedEDI);
+
+ //lea esi, ELEMADDR
+ X86EmitOp(0x8d, kESI, elemBaseReg, elemOfs, elemScaledReg, elemScale AMD64_ARG(k64BitOp));
+
+ _ASSERTE(!TransitionBlock::IsStackArgumentOffset(pArrayOpScript->m_fRetBufLoc));
+ // mov edi, retbufptr
+ X86EmitR2ROp(0x8b, kEDI, GetX86ArgumentRegisterFromOffset(pArrayOpScript->m_fRetBufLoc - TransitionBlock::GetOffsetOfArgumentRegisters()));
+
+COPY_VALUE_CLASS:
+ {
+ size_t size = pArrayOpScript->m_elemsize;
+ size_t total = 0;
+ if(pArrayOpScript->m_gcDesc)
+ {
+ CGCDescSeries* cur = pArrayOpScript->m_gcDesc->GetHighestSeries();
+ if ((cur->startoffset-elemOfs) > 0)
+ generate_noref_copy ((unsigned) (cur->startoffset - elemOfs), this);
+ total += cur->startoffset - elemOfs;
+
+ SSIZE_T cnt = (SSIZE_T) pArrayOpScript->m_gcDesc->GetNumSeries();
+ // special array encoding
+ _ASSERTE(cnt < 0);
+
+ for (SSIZE_T __i = 0; __i > cnt; __i--)
+ {
+ HALF_SIZE_T skip = cur->val_serie[__i].skip;
+ HALF_SIZE_T nptrs = cur->val_serie[__i].nptrs;
+ total += nptrs*sizeof (DWORD*);
+ do
+ {
+ AMD64_ONLY(_ASSERTE(fNeedScratchArea));
+
+ X86EmitCall(NewExternalCodeLabel((LPVOID) JIT_ByRefWriteBarrier), 0);
+ } while (--nptrs);
+ if (skip > 0)
+ {
+ //check if we are at the end of the series
+ if (__i == (cnt + 1))
+ skip = skip - (HALF_SIZE_T)(cur->startoffset - elemOfs);
+ if (skip > 0)
+ generate_noref_copy (skip, this);
+ }
+ total += skip;
+ }
+
+ _ASSERTE (size == total);
+ }
+ else
+ {
+ // no ref anywhere, just copy the bytes.
+ _ASSERTE (size);
+ generate_noref_copy ((unsigned)size, this);
+ }
+ }
+ }
+ else
+ {
+ switch (pArrayOpScript->m_elemsize)
+ {
+ case 1:
+ // mov[zs]x eax, byte ptr ELEMADDR
+ X86EmitOp(pArrayOpScript->m_signed ? 0xbe0f : 0xb60f, kEAX, elemBaseReg, elemOfs, elemScaledReg, elemScale);
+ break;
+
+ case 2:
+ // mov[zs]x eax, word ptr ELEMADDR
+ X86EmitOp(pArrayOpScript->m_signed ? 0xbf0f : 0xb70f, kEAX, elemBaseReg, elemOfs, elemScaledReg, elemScale);
+ break;
+
+ case 4:
+ if (pArrayOpScript->m_flags & ArrayOpScript::ISFPUTYPE)
+ {
+#ifdef _TARGET_AMD64_
+ // movss xmm0, dword ptr ELEMADDR
+ Emit8(0xf3);
+ X86EmitOp(0x100f, (X86Reg)0, elemBaseReg, elemOfs, elemScaledReg, elemScale);
+#else // !_TARGET_AMD64_
+ // fld dword ptr ELEMADDR
+ X86EmitOp(0xd9, (X86Reg)0, elemBaseReg, elemOfs, elemScaledReg, elemScale);
+#endif // !_TARGET_AMD64_
+ }
+ else
+ {
+ // mov eax, ELEMADDR
+ X86EmitOp(0x8b, kEAX, elemBaseReg, elemOfs, elemScaledReg, elemScale);
+ }
+ break;
+
+ case 8:
+ if (pArrayOpScript->m_flags & ArrayOpScript::ISFPUTYPE)
+ {
+#ifdef _TARGET_AMD64_
+ // movsd xmm0, qword ptr ELEMADDR
+ Emit8(0xf2);
+ X86EmitOp(0x100f, (X86Reg)0, elemBaseReg, elemOfs, elemScaledReg, elemScale);
+#else // !_TARGET_AMD64_
+ // fld qword ptr ELEMADDR
+ X86EmitOp(0xdd, (X86Reg)0, elemBaseReg, elemOfs, elemScaledReg, elemScale);
+#endif // !_TARGET_AMD64_
+ }
+ else
+ {
+ // mov eax, ELEMADDR
+ X86EmitOp(0x8b, kEAX, elemBaseReg, elemOfs, elemScaledReg, elemScale AMD64_ARG(k64BitOp));
+#ifdef _TARGET_X86_
+ // mov edx, ELEMADDR + 4
+ X86EmitOp(0x8b, kEDX, elemBaseReg, elemOfs + 4, elemScaledReg, elemScale);
+#endif
+ }
+ break;
+
+ default:
+ _ASSERTE(0);
+ }
+ }
+
+ break;
+
+ case ArrayOpScript::STORE:
+
+ switch (pArrayOpScript->m_elemsize)
+ {
+ case 1:
+ // mov SCRATCH, [esp + valoffset]
+ kValueReg = LoadArrayOpArg(pArrayOpScript->m_fValLoc, this, SCRATCH_REGISTER_X86REG, ofsadjust);
+ // mov byte ptr ELEMADDR, SCRATCH.b
+ X86EmitOp(0x88, kValueReg, elemBaseReg, elemOfs, elemScaledReg, elemScale);
+ break;
+ case 2:
+ // mov SCRATCH, [esp + valoffset]
+ kValueReg = LoadArrayOpArg(pArrayOpScript->m_fValLoc, this, SCRATCH_REGISTER_X86REG, ofsadjust);
+ // mov word ptr ELEMADDR, SCRATCH.w
+ Emit8(0x66);
+ X86EmitOp(0x89, kValueReg, elemBaseReg, elemOfs, elemScaledReg, elemScale);
+ break;
+ case 4:
+#ifndef _TARGET_AMD64_
+ if (pArrayOpScript->m_flags & ArrayOpScript::NEEDSWRITEBARRIER)
+ {
+ // mov SCRATCH, [esp + valoffset]
+ kValueReg = LoadArrayOpArg(pArrayOpScript->m_fValLoc, this, SCRATCH_REGISTER_X86REG, ofsadjust);
+
+ _ASSERTE(SCRATCH_REGISTER_X86REG == kEAX); // value to store is already in EAX where we want it.
+ // lea edx, ELEMADDR
+ X86EmitOp(0x8d, kEDX, elemBaseReg, elemOfs, elemScaledReg, elemScale);
+
+ // call JIT_Writeable_Thunks_Buf.WriteBarrierReg[0] (== EAX)
+ X86EmitCall(NewExternalCodeLabel((LPVOID) &JIT_WriteBarrierEAX), 0);
+ }
+ else
+#else // _TARGET_AMD64_
+ if (pArrayOpScript->m_flags & ArrayOpScript::ISFPUTYPE)
+ {
+ if (!TransitionBlock::IsStackArgumentOffset(pArrayOpScript->m_fValLoc))
+ {
+ kValueReg = (X86Reg)TransitionBlock::GetArgumentIndexFromOffset(pArrayOpScript->m_fValLoc);
+ }
+ else
+ {
+ kValueReg = (X86Reg)0; // xmm0
+
+ // movss xmm0, dword ptr [rsp+??]
+ Emit8(0xf3);
+ X86EmitOp(0x100f, kValueReg, (X86Reg)4 /*rsp*/, ofsadjust + pArrayOpScript->m_fValLoc);
+ }
+
+ // movss dword ptr ELEMADDR, xmm?
+ Emit8(0xf3);
+ X86EmitOp(0x110f, kValueReg, elemBaseReg, elemOfs, elemScaledReg, elemScale);
+ }
+ else
+#endif // _TARGET_AMD64_
+ {
+ // mov SCRATCH, [esp + valoffset]
+ kValueReg = LoadArrayOpArg(pArrayOpScript->m_fValLoc, this, SCRATCH_REGISTER_X86REG, ofsadjust AMD64_ARG(k32BitOp));
+
+ // mov ELEMADDR, SCRATCH
+ X86EmitOp(0x89, kValueReg, elemBaseReg, elemOfs, elemScaledReg, elemScale);
+ }
+ break;
+
+ case 8:
+
+ if (!(pArrayOpScript->m_flags & ArrayOpScript::NEEDSWRITEBARRIER))
+ {
+#ifdef _TARGET_AMD64_
+ if (pArrayOpScript->m_flags & ArrayOpScript::ISFPUTYPE)
+ {
+ if (!TransitionBlock::IsStackArgumentOffset(pArrayOpScript->m_fValLoc))
+ {
+ kValueReg = (X86Reg)TransitionBlock::GetArgumentIndexFromOffset(pArrayOpScript->m_fValLoc);
+ }
+ else
+ {
+ kValueReg = (X86Reg)0; // xmm0
+
+ // movsd xmm0, qword ptr [rsp+??]
+ Emit8(0xf2);
+ X86EmitOp(0x100f, kValueReg, (X86Reg)4 /*rsp*/, ofsadjust + pArrayOpScript->m_fValLoc);
+ }
+
+ // movsd qword ptr ELEMADDR, xmm?
+ Emit8(0xf2);
+ X86EmitOp(0x110f, kValueReg, elemBaseReg, elemOfs, elemScaledReg, elemScale);
+ }
+ else
+ {
+ // mov SCRATCH, [esp + valoffset]
+ kValueReg = LoadArrayOpArg(pArrayOpScript->m_fValLoc, this, SCRATCH_REGISTER_X86REG, ofsadjust);
+
+ // mov ELEMADDR, SCRATCH
+ X86EmitOp(0x89, kValueReg, elemBaseReg, elemOfs, elemScaledReg, elemScale, k64BitOp);
+ }
+#else // !_TARGET_AMD64_
+ _ASSERTE(TransitionBlock::IsStackArgumentOffset(pArrayOpScript->m_fValLoc)); // on x86, value will never get a register: so too lazy to implement that case
+ // mov SCRATCH, [esp + valoffset]
+ X86EmitEspOffset(0x8b, SCRATCH_REGISTER_X86REG, pArrayOpScript->m_fValLoc + ofsadjust);
+ // mov ELEMADDR, SCRATCH
+ X86EmitOp(0x89, SCRATCH_REGISTER_X86REG, elemBaseReg, elemOfs, elemScaledReg, elemScale);
+
+ _ASSERTE(TransitionBlock::IsStackArgumentOffset(pArrayOpScript->m_fValLoc)); // on x86, value will never get a register: so too lazy to implement that case
+ // mov SCRATCH, [esp + valoffset + 4]
+ X86EmitEspOffset(0x8b, SCRATCH_REGISTER_X86REG, pArrayOpScript->m_fValLoc + ofsadjust + 4);
+ // mov ELEMADDR+4, SCRATCH
+ X86EmitOp(0x89, SCRATCH_REGISTER_X86REG, elemBaseReg, elemOfs+4, elemScaledReg, elemScale);
+#endif // !_TARGET_AMD64_
+ break;
+ }
+#ifdef _TARGET_AMD64_
+ else
+ {
+ _ASSERTE(SCRATCH_REGISTER_X86REG == kEAX); // value to store is already in EAX where we want it.
+ // lea rcx, ELEMADDR
+ X86EmitOp(0x8d, kRCX, elemBaseReg, elemOfs, elemScaledReg, elemScale, k64BitOp);
+
+ // mov rdx, [rsp + valoffset]
+ kValueReg = LoadArrayOpArg(pArrayOpScript->m_fValLoc, this, kRDX, ofsadjust);
+ _ASSERT(kRCX != kValueReg);
+ if (kRDX != kValueReg)
+ X86EmitR2ROp(0x8b, kRDX, kValueReg);
+
+ _ASSERTE(fNeedScratchArea);
+ X86EmitCall(NewExternalCodeLabel((PVOID)JIT_WriteBarrier), 0);
+ break;
+ }
+#endif // _TARGET_AMD64_
+ // FALL THROUGH (on x86)
+ default:
+ // Ensure that these registers have been saved!
+ _ASSERTE(fSavedESI && fSavedEDI);
+
+#ifdef _TARGET_AMD64_
+ // mov rsi, [rsp + valoffset]
+ kValueReg = LoadArrayOpArg(pArrayOpScript->m_fValLoc, this, kRSI, ofsadjust);
+ if (kRSI != kValueReg)
+ X86EmitR2ROp(0x8b, kRSI, kValueReg);
+#else // !_TARGET_AMD64_
+ _ASSERTE(TransitionBlock::IsStackArgumentOffset(pArrayOpScript->m_fValLoc));
+ // lea esi, [esp + valoffset]
+ X86EmitEspOffset(0x8d, kESI, pArrayOpScript->m_fValLoc + ofsadjust);
+#endif // !_TARGET_AMD64_
+
+ // lea edi, ELEMADDR
+ X86EmitOp(0x8d, kEDI, elemBaseReg, elemOfs, elemScaledReg, elemScale AMD64_ARG(k64BitOp));
+ goto COPY_VALUE_CLASS;
+ }
+ break;
+
+ default:
+ _ASSERTE(0);
+ }
+
+ EmitLabel(Epilog);
+
+#ifdef _TARGET_AMD64_
+ if (fNeedPrologue)
+ {
+ if (fNeedScratchArea)
+ {
+ // Throw away scratch area
+ X86EmitAddEsp(sizeof(ArgumentRegisters) + 0x8);
+ }
+
+ if (fSavedEDI)
+ X86EmitPopReg(kRDI);
+
+ if (fSavedESI)
+ X86EmitPopReg(kRSI);
+ }
+
+ X86EmitReturn(0);
+#else // !_TARGET_AMD64_
+ // Restore the callee-saved registers
+ X86EmitPopReg(kFactorReg);
+ X86EmitPopReg(kTotalReg);
+
+ // ret N
+ X86EmitReturn(pArrayOpScript->m_cbretpop);
+#endif // !_TARGET_AMD64_
+
+ // Exception points must clean up the stack for all those extra args.
+ // kFactorReg and kTotalReg will be popped by the jump targets.
+
+ void *pvExceptionThrowFn;
+
+#if defined(_TARGET_AMD64_)
+#define ARRAYOP_EXCEPTION_HELPERS(base) { (PVOID)base, (PVOID)base##_RSIRDI, (PVOID)base##_ScratchArea, (PVOID)base##_RSIRDI_ScratchArea }
+ static void *rgNullExceptionHelpers[] = ARRAYOP_EXCEPTION_HELPERS(ArrayOpStubNullException);
+ static void *rgRangeExceptionHelpers[] = ARRAYOP_EXCEPTION_HELPERS(ArrayOpStubRangeException);
+ static void *rgTypeMismatchExceptionHelpers[] = ARRAYOP_EXCEPTION_HELPERS(ArrayOpStubTypeMismatchException);
+#undef ARRAYOP_EXCEPTION_HELPERS
+
+ UINT iExceptionHelper = (fNeedRSIRDI ? 1 : 0) + (fNeedScratchArea ? 2 : 0);
+#endif // defined(_TARGET_AMD64_)
+
+ EmitLabel(Inner_nullexception);
+
+#ifndef _TARGET_AMD64_
+ pvExceptionThrowFn = (LPVOID)ArrayOpStubNullException;
+
+ Emit8(0xb8); // mov EAX, <stack cleanup>
+ Emit32(pArrayOpScript->m_cbretpop);
+#else //_TARGET_AMD64_
+ pvExceptionThrowFn = rgNullExceptionHelpers[iExceptionHelper];
+#endif //!_TARGET_AMD64_
+ X86EmitNearJump(NewExternalCodeLabel(pvExceptionThrowFn));
+
+ EmitLabel(Inner_rangeexception);
+#ifndef _TARGET_AMD64_
+ pvExceptionThrowFn = (LPVOID)ArrayOpStubRangeException;
+ Emit8(0xb8); // mov EAX, <stack cleanup>
+ Emit32(pArrayOpScript->m_cbretpop);
+#else //_TARGET_AMD64_
+ pvExceptionThrowFn = rgRangeExceptionHelpers[iExceptionHelper];
+#endif //!_TARGET_AMD64_
+ X86EmitNearJump(NewExternalCodeLabel(pvExceptionThrowFn));
+
+ if (Inner_typeMismatchexception != NULL)
+ {
+ EmitLabel(Inner_typeMismatchexception);
+#ifndef _TARGET_AMD64_
+ pvExceptionThrowFn = (LPVOID)ArrayOpStubTypeMismatchException;
+ Emit8(0xb8); // mov EAX, <stack cleanup>
+ Emit32(pArrayOpScript->m_cbretpop);
+#else //_TARGET_AMD64_
+ pvExceptionThrowFn = rgTypeMismatchExceptionHelpers[iExceptionHelper];
+#endif //!_TARGET_AMD64_
+ X86EmitNearJump(NewExternalCodeLabel(pvExceptionThrowFn));
+ }
+}
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+#endif // FEATURE_ARRAYSTUB_AS_IL
+
+//===========================================================================
+// Emits code to break into debugger
+VOID StubLinkerCPU::EmitDebugBreak()
+{
+ STANDARD_VM_CONTRACT;
+
+ // int3
+ Emit8(0xCC);
+}
+
+#if defined(FEATURE_COMINTEROP) && defined(_TARGET_X86_)
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning (disable : 4740) // There is inline asm code in this function, which disables
+ // global optimizations.
+#pragma warning (disable : 4731)
+#endif // _MSC_VER
+Thread* __stdcall CreateThreadBlockReturnHr(ComMethodFrame *pFrame)
+{
+
+ WRAPPER_NO_CONTRACT;
+
+ Thread *pThread = NULL;
+
+ HRESULT hr = S_OK;
+
+ // This means that a thread is FIRST coming in from outside the EE.
+ BEGIN_ENTRYPOINT_THROWS;
+ pThread = SetupThreadNoThrow(&hr);
+ END_ENTRYPOINT_THROWS;
+
+ if (pThread == NULL) {
+ // Unwind stack, and return hr
+ // NOTE: assumes __stdcall
+ // Note that this code does not handle the rare COM signatures that do not return HRESULT
+ // compute the callee pop stack bytes
+ UINT numArgStackBytes = pFrame->GetNumCallerStackBytes();
+ unsigned frameSize = sizeof(Frame) + sizeof(LPVOID);
+ LPBYTE iEsp = ((LPBYTE)pFrame) + ComMethodFrame::GetOffsetOfCalleeSavedRegisters();
+ __asm
+ {
+ mov eax, hr
+ mov edx, numArgStackBytes
+ //*****************************************
+ // reset the stack pointer
+ // none of the locals above can be used in the asm below
+ // if we wack the stack pointer
+ mov esp, iEsp
+ // pop callee saved registers
+ pop edi
+ pop esi
+ pop ebx
+ pop ebp
+ pop ecx ; //return address
+ // pop the callee cleanup stack args
+ add esp, edx ;// callee cleanup of args
+ jmp ecx; // jump to the address to continue execution
+
+ // We will never get here. This "ret" is just so that code-disassembling
+ // profilers know to stop disassembling any further
+ ret
+ }
+ }
+
+ return pThread;
+}
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif
+
+#endif // defined(FEATURE_COMINTEROP) && defined(_TARGET_X86_)
+
+#endif // !defined(CROSSGEN_COMPILE) && !defined(FEATURE_STUBS_AS_IL)
+
+#endif // !DACCESS_COMPILE
+
+
+#ifdef _TARGET_AMD64_
+
+//
+// TailCallFrame Object Scanning
+//
+// This handles scanning/promotion of GC objects that were
+// protected by the TailCallHelper routine. Note that the objects
+// being protected is somewhat dynamic and is dependent upon the
+// the callee...
+//
+
+void TailCallFrame::GcScanRoots(promote_func *fn, ScanContext* sc)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (m_pGCLayout != NULL)
+ {
+ struct FrameOffsetDecoder {
+ private:
+ TADDR prevOffset;
+ TADDR rangeEnd;
+ BOOL maybeInterior;
+ BOOL atEnd;
+ PTR_SBYTE pbOffsets;
+
+ DWORD ReadNumber() {
+ signed char i;
+ DWORD offset = 0;
+ while ((i = *pbOffsets++) >= 0)
+ {
+ offset = (offset << 7) | i;
+ }
+ offset = (offset << 7) | (i & 0x7F);
+ return offset;
+ }
+
+ public:
+ FrameOffsetDecoder(PTR_GSCookie _base, TADDR offsets)
+ : prevOffset(dac_cast<TADDR>(_base)), rangeEnd(~0LL), atEnd(FALSE), pbOffsets(dac_cast<PTR_SBYTE>(offsets)) { maybeInterior = FALSE;}
+
+ bool MoveNext() {
+ LIMITED_METHOD_CONTRACT;
+
+ if (rangeEnd < prevOffset)
+ {
+ prevOffset -= sizeof(void*);
+ return true;
+ }
+ if (atEnd) return false;
+ DWORD offset = ReadNumber();
+ atEnd = (offset & 1);
+ BOOL range = (offset & 2);
+ maybeInterior = (offset & 0x80000000);
+
+ offset &= 0x7FFFFFFC;
+
+#ifdef _WIN64
+ offset <<= 1;
+#endif
+ offset += sizeof(void*);
+ _ASSERTE(prevOffset > offset);
+ prevOffset -= offset;
+
+ if (range)
+ {
+ _ASSERTE(!atEnd);
+ _ASSERTE(!maybeInterior);
+ DWORD offsetEnd = ReadNumber();
+ atEnd = (offsetEnd & 1);
+ offsetEnd = (offsetEnd & ~1) << 1;
+ // range encoding starts with a range of 3 (2 is better to encode as
+ // 2 offsets), so 0 == 3
+ offsetEnd += sizeof(void*) * 3;
+ rangeEnd = prevOffset - offsetEnd;
+ }
+
+ return true;
+ }
+
+ BOOL MaybeInterior() const { return maybeInterior; }
+
+ PTR_PTR_Object Current() const { return PTR_PTR_Object(prevOffset); }
+
+ } decoder(GetGSCookiePtr(), m_pGCLayout);
+
+ while (decoder.MoveNext())
+ {
+ PTR_PTR_Object ppRef = decoder.Current();
+
+ LOG((LF_GC, INFO3, "Tail Call Frame Promoting" FMT_ADDR "to",
+ DBG_ADDR(OBJECTREF_TO_UNCHECKED_OBJECTREF(*ppRef)) ));
+ if (decoder.MaybeInterior())
+ PromoteCarefully(fn, ppRef, sc, GC_CALL_INTERIOR|CHECK_APP_DOMAIN);
+ else
+ (*fn)(ppRef, sc, 0);
+ LOG((LF_GC, INFO3, FMT_ADDR "\n", DBG_ADDR(OBJECTREF_TO_UNCHECKED_OBJECTREF(*ppRef)) ));
+ }
+ }
+}
+
+#ifndef DACCESS_COMPILE
+static void EncodeOneGCOffset(CPUSTUBLINKER *pSl, ULONG delta, BOOL maybeInterior, BOOL range, BOOL last)
+{
+ CONTRACTL
+ {
+ THROWS; // From the stublinker
+ MODE_ANY;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // Everything should be pointer aligned
+ // but we use a high bit for interior, and the 0 bit to denote the end of the list
+ // we use the 1 bit to denote a range
+ _ASSERTE((delta % sizeof(void*)) == 0);
+
+#if defined(_WIN64)
+ // For 64-bit, we have 3 bits of alignment, so we allow larger frames
+ // by shifting and gaining a free high-bit.
+ ULONG encodedDelta = delta >> 1;
+#else
+ // For 32-bit, we just limit our frame size to <2GB. (I know, such a bummer!)
+ ULONG encodedDelta = delta;
+#endif
+ _ASSERTE((encodedDelta & 0x80000003) == 0);
+ if (last)
+ {
+ encodedDelta |= 1;
+ }
+
+ if (range)
+ {
+ encodedDelta |= 2;
+ }
+ else if (maybeInterior)
+ {
+ _ASSERTE(!range);
+ encodedDelta |= 0x80000000;
+ }
+
+ BYTE bytes[5];
+ UINT index = 5;
+ bytes[--index] = (BYTE)((encodedDelta & 0x7F) | 0x80);
+ encodedDelta >>= 7;
+ while (encodedDelta > 0)
+ {
+ bytes[--index] = (BYTE)(encodedDelta & 0x7F);
+ encodedDelta >>= 7;
+ }
+ pSl->EmitBytes(&bytes[index], 5 - index);
+}
+
+static void EncodeGCOffsets(CPUSTUBLINKER *pSl, /* const */ ULONGARRAY & gcOffsets)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(gcOffsets.Count() > 0);
+
+ ULONG prevOffset = 0;
+ int i = 0;
+ BOOL last = FALSE;
+ do {
+ ULONG offset = gcOffsets[i];
+ // Everything should be pointer aligned
+ // but we use the 0-bit to mean maybeInterior, for byrefs.
+ _ASSERTE(((offset % sizeof(void*)) == 0) || ((offset % sizeof(void*)) == 1));
+ BOOL maybeInterior = (offset & 1);
+ offset &= ~1;
+
+ // Encode just deltas because they're smaller (and the list should be sorted)
+ _ASSERTE(offset >= (prevOffset + sizeof(void*)));
+ ULONG delta = offset - (prevOffset + sizeof(void*));
+ if (!maybeInterior && gcOffsets.Count() > i + 2)
+ {
+ // Check for a potential range.
+ // Only do it if we have 3 or more pointers in a row
+ ULONG rangeOffset = offset;
+ int j = i + 1;
+ do {
+ ULONG nextOffset = gcOffsets[j];
+ // interior pointers can't be in ranges
+ if (nextOffset & 1)
+ break;
+ // ranges must be saturated
+ if (nextOffset != (rangeOffset + sizeof(void*)))
+ break;
+ j++;
+ rangeOffset = nextOffset;
+ } while(j < gcOffsets.Count());
+
+ if (j > (i + 2))
+ {
+ EncodeOneGCOffset(pSl, delta, FALSE, TRUE, last);
+ i = j - 1;
+ _ASSERTE(rangeOffset >= (offset + (sizeof(void*) * 3)));
+ delta = rangeOffset - (offset + (sizeof(void*) * 3));
+ offset = rangeOffset;
+ }
+ }
+ last = (++i == gcOffsets.Count());
+
+
+ EncodeOneGCOffset(pSl, delta, maybeInterior, FALSE, last);
+
+ prevOffset = offset;
+ } while (!last);
+}
+
+static void AppendGCLayout(ULONGARRAY &gcLayout, size_t baseOffset, BOOL fIsTypedRef, TypeHandle VMClsHnd)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE((baseOffset % 16) == 0);
+ _ASSERTE(FitsInU4(baseOffset));
+
+ if (fIsTypedRef)
+ {
+ *gcLayout.AppendThrowing() = (ULONG)(baseOffset | 1); // "| 1" to mark it as an interior pointer
+ }
+ else if (!VMClsHnd.IsNativeValueType())
+ {
+ MethodTable* pMT = VMClsHnd.GetMethodTable();
+ _ASSERTE(pMT);
+ _ASSERTE(pMT->IsValueType());
+
+ // walk the GC descriptors, reporting the correct offsets
+ if (pMT->ContainsPointers())
+ {
+ // size of instance when unboxed must be adjusted for the syncblock
+ // index and the VTable pointer.
+ DWORD size = pMT->GetBaseSize();
+
+ // we don't include this term in our 'ppstop' calculation below.
+ _ASSERTE(pMT->GetComponentSize() == 0);
+
+ CGCDesc* map = CGCDesc::GetCGCDescFromMT(pMT);
+ CGCDescSeries* cur = map->GetLowestSeries();
+ CGCDescSeries* last = map->GetHighestSeries();
+
+ _ASSERTE(cur <= last);
+ do
+ {
+ // offset to embedded references in this series must be
+ // adjusted by the VTable pointer, when in the unboxed state.
+ size_t adjustOffset = cur->GetSeriesOffset() - sizeof(void *);
+
+ _ASSERTE(baseOffset >= adjustOffset);
+ size_t start = baseOffset - adjustOffset;
+ size_t stop = start - (cur->GetSeriesSize() + size);
+ for (size_t off = stop + sizeof(void*); off <= start; off += sizeof(void*))
+ {
+ _ASSERTE(gcLayout.Count() == 0 || off > gcLayout[gcLayout.Count() - 1]);
+ _ASSERTE(FitsInU4(off));
+ *gcLayout.AppendThrowing() = (ULONG)off;
+ }
+ cur++;
+
+ } while (cur <= last);
+ }
+ }
+}
+
+Stub * StubLinkerCPU::CreateTailCallCopyArgsThunk(CORINFO_SIG_INFO * pSig,
+ CorInfoHelperTailCallSpecialHandling flags)
+{
+ STANDARD_VM_CONTRACT;
+
+ CPUSTUBLINKER sl;
+ CPUSTUBLINKER* pSl = &sl;
+
+ // Generates a function that looks like this:
+ // size_t CopyArguments(va_list args, (RCX)
+ // CONTEXT *pCtx, (RDX)
+ // DWORD64 *pvStack, (R8)
+ // size_t cbStack) (R9)
+ // {
+ // if (pCtx != NULL) {
+ // foreach (arg in args) {
+ // copy into pCtx or pvStack
+ // }
+ // }
+ // return <size of stack needed>;
+ // }
+ //
+
+ CodeLabel *pNullLabel = pSl->NewCodeLabel();
+
+ // test rdx, rdx
+ pSl->X86EmitR2ROp(0x85, kRDX, kRDX);
+
+ // jz NullLabel
+ pSl->X86EmitCondJump(pNullLabel, X86CondCode::kJZ);
+
+ UINT nArgSlot = 0;
+ UINT totalArgs = pSig->totalILArgs() + ((pSig->isVarArg() || pSig->hasTypeArg()) ? 1 : 0);
+ bool fR10Loaded = false;
+ UINT cbArg;
+ static const UINT rgcbArgRegCtxtOffsets[4] = { offsetof(CONTEXT, Rcx), offsetof(CONTEXT, Rdx),
+ offsetof(CONTEXT, R8), offsetof(CONTEXT, R9) };
+ static const UINT rgcbFpArgRegCtxtOffsets[4] = { offsetof(CONTEXT, Xmm0.Low), offsetof(CONTEXT, Xmm1.Low),
+ offsetof(CONTEXT, Xmm2.Low), offsetof(CONTEXT, Xmm3.Low) };
+
+ ULONGARRAY gcLayout;
+
+ // On input to the function R9 contains the size of the buffer
+ // The first time this macro runs, R10 is loaded with the 'top' of the Frame
+ // and R9 is changed to point to the 'top' of the copy buffer.
+ // Then both R9 and R10 are decremented by the size of the struct we're copying
+ // So R10 is the value to put in the argument slot, and R9 is where the data
+ // should be copied to (or zeroed out in the case of the return buffer).
+#define LOAD_STRUCT_OFFSET_IF_NEEDED(cbSize) \
+ { \
+ _ASSERTE(cbSize > 0); \
+ _ASSERTE(FitsInI4(cbSize)); \
+ __int32 offset = (__int32)cbSize; \
+ if (!fR10Loaded) { \
+ /* mov r10, [rdx + offset of RSP] */ \
+ pSl->X86EmitIndexRegLoad(kR10, kRDX, offsetof(CONTEXT, Rsp)); \
+ /* add an extra 8 because RSP is pointing at the return address */ \
+ offset -= 8; \
+ /* add r10, r9 */ \
+ pSl->X86EmitAddRegReg(kR10, kR9); \
+ /* add r9, r8 */ \
+ pSl->X86EmitAddRegReg(kR9, kR8); \
+ fR10Loaded = true; \
+ } \
+ /* sub r10, offset */ \
+ pSl->X86EmitSubReg(kR10, offset); \
+ /* sub r9, cbSize */ \
+ pSl->X86EmitSubReg(kR9, cbSize); \
+ }
+
+
+ if (flags & CORINFO_TAILCALL_STUB_DISPATCH_ARG) {
+ // This is set for stub dispatch
+ // The JIT placed an extra argument in the list that needs to
+ // get shoved into R11, and not counted.
+ // pCtx->R11 = va_arg(args, DWORD64);
+
+ // mov rax, [rcx]
+ pSl->X86EmitIndexRegLoad(kRAX, kRCX, 0);
+ // add rcx, 8
+ pSl->X86EmitAddReg(kRCX, 8);
+ // mov [rdx + offset of R11], rax
+ pSl->X86EmitIndexRegStore(kRDX, offsetof(CONTEXT, R11), kRAX);
+ }
+
+ ULONG cbStructOffset = 0;
+
+ // First comes the 'this' pointer
+ if (pSig->hasThis()) {
+ // mov rax, [rcx]
+ pSl->X86EmitIndexRegLoad(kRAX, kRCX, 0);
+ // add rcx, 8
+ pSl->X86EmitAddReg(kRCX, 8);
+ // mov [rdx + offset of RCX/RDX], rax
+ pSl->X86EmitIndexRegStore(kRDX, rgcbArgRegCtxtOffsets[nArgSlot++], kRAX);
+ }
+
+ // Next the return buffer
+ cbArg = 0;
+ TypeHandle th(pSig->retTypeClass);
+ if ((pSig->retType == CORINFO_TYPE_REFANY) || (pSig->retType == CORINFO_TYPE_VALUECLASS)) {
+ cbArg = th.GetSize();
+ }
+
+ if (ArgIterator::IsArgPassedByRef(cbArg)) {
+ totalArgs++;
+
+ // We always reserve space for the return buffer, and we always zero it out,
+ // so the GC won't complain, but if it's already pointing above the frame,
+ // then we need to pass it in (so it will get passed out).
+ // Otherwise we assume the caller is returning void, so we just pass in
+ // dummy space to be overwritten.
+ UINT cbUsed = (cbArg + 0xF) & ~0xF;
+ LOAD_STRUCT_OFFSET_IF_NEEDED(cbUsed);
+ // now emit a 'memset(r9, 0, cbUsed)'
+ {
+ // xorps xmm0, xmm0
+ pSl->X86EmitR2ROp(X86_INSTR_XORPS, kXMM0, kXMM0);
+ if (cbUsed <= 4 * 16) {
+ // movaps [r9], xmm0
+ pSl->X86EmitOp(X86_INSTR_MOVAPS_RM_R, kXMM0, kR9, 0);
+ if (16 < cbUsed) {
+ // movaps [r9 + 16], xmm0
+ pSl->X86EmitOp(X86_INSTR_MOVAPS_RM_R, kXMM0, kR9, 16);
+ if (32 < cbUsed) {
+ // movaps [r9 + 32], xmm0
+ pSl->X86EmitOp(X86_INSTR_MOVAPS_RM_R, kXMM0, kR9, 32);
+ if (48 < cbUsed) {
+ // movaps [r9 + 48], xmm0
+ pSl->X86EmitOp(X86_INSTR_MOVAPS_RM_R, kXMM0, kR9, 48);
+ }
+ }
+ }
+ }
+ else {
+ // a loop (one double-quadword at a time)
+ pSl->X86EmitZeroOutReg(kR11);
+ // LoopLabel:
+ CodeLabel *pLoopLabel = pSl->NewCodeLabel();
+ pSl->EmitLabel(pLoopLabel);
+ // movaps [r9 + r11], xmm0
+ pSl->X86EmitOp(X86_INSTR_MOVAPS_RM_R, kXMM0, kR9, 0, kR11, 1);
+ // add r11, 16
+ pSl->X86EmitAddReg(kR11, 16);
+ // cmp r11, cbUsed
+ pSl->X86EmitCmpRegImm32(kR11, cbUsed);
+ // jl LoopLabel
+ pSl->X86EmitCondJump(pLoopLabel, X86CondCode::kJL);
+ }
+ }
+ cbStructOffset += cbUsed;
+ AppendGCLayout(gcLayout, cbStructOffset, pSig->retType == CORINFO_TYPE_REFANY, th);
+
+ // mov rax, [rcx]
+ pSl->X86EmitIndexRegLoad(kRAX, kRCX, 0);
+ // add rcx, 8
+ pSl->X86EmitAddReg(kRCX, 8);
+ // cmp rax, [rdx + offset of R12]
+ pSl->X86EmitOffsetModRM(0x3B, kRAX, kRDX, offsetof(CONTEXT, R12));
+
+ CodeLabel *pSkipLabel = pSl->NewCodeLabel();
+ // jnb SkipLabel
+ pSl->X86EmitCondJump(pSkipLabel, X86CondCode::kJNB);
+
+ // Also check the lower bound of the stack in case the return buffer is on the GC heap
+ // and the GC heap is below the stack
+ // cmp rax, rsp
+ pSl->X86EmitR2ROp(0x3B, kRAX, (X86Reg)4 /*kRSP*/);
+ // jna SkipLabel
+ pSl->X86EmitCondJump(pSkipLabel, X86CondCode::kJB);
+ // mov rax, r10
+ pSl->X86EmitMovRegReg(kRAX, kR10);
+ // SkipLabel:
+ pSl->EmitLabel(pSkipLabel);
+ // mov [rdx + offset of RCX], rax
+ pSl->X86EmitIndexRegStore(kRDX, rgcbArgRegCtxtOffsets[nArgSlot++], kRAX);
+ }
+
+ // VarArgs Cookie *or* Generics Instantiation Parameter
+ if (pSig->hasTypeArg() || pSig->isVarArg()) {
+ // mov rax, [rcx]
+ pSl->X86EmitIndexRegLoad(kRAX, kRCX, 0);
+ // add rcx, 8
+ pSl->X86EmitAddReg(kRCX, 8);
+ // mov [rdx + offset of RCX/RDX], rax
+ pSl->X86EmitIndexRegStore(kRDX, rgcbArgRegCtxtOffsets[nArgSlot++], kRAX);
+ }
+
+ _ASSERTE(nArgSlot <= 4);
+
+ // Now for *all* the 'real' arguments
+ SigPointer ptr((PCCOR_SIGNATURE)pSig->args);
+ Module * module = GetModule(pSig->scope);
+ Instantiation classInst((TypeHandle*)pSig->sigInst.classInst, pSig->sigInst.classInstCount);
+ Instantiation methodInst((TypeHandle*)pSig->sigInst.methInst, pSig->sigInst.methInstCount);
+ SigTypeContext typeCtxt(classInst, methodInst);
+
+ for( ;nArgSlot < totalArgs; ptr.SkipExactlyOne()) {
+ CorElementType et = ptr.PeekElemTypeNormalized(module, &typeCtxt);
+ if (et == ELEMENT_TYPE_SENTINEL)
+ continue;
+
+ // mov rax, [rcx]
+ pSl->X86EmitIndexRegLoad(kRAX, kRCX, 0);
+ // add rcx, 8
+ pSl->X86EmitAddReg(kRCX, 8);
+ switch (et) {
+ case ELEMENT_TYPE_INTERNAL:
+ // TODO
+ _ASSERTE(!"Shouldn't see ELEMENT_TYPE_INTERNAL");
+ break;
+ case ELEMENT_TYPE_TYPEDBYREF:
+ case ELEMENT_TYPE_VALUETYPE:
+ th = ptr.GetTypeHandleThrowing(module, &typeCtxt, ClassLoader::LoadTypes, CLASS_LOAD_UNRESTOREDTYPEKEY);
+ _ASSERTE(!th.IsNull());
+ g_IBCLogger.LogEEClassAndMethodTableAccess(th.GetMethodTable());
+ cbArg = (UINT)th.GetSize();
+ if (ArgIterator::IsArgPassedByRef(cbArg)) {
+ UINT cbUsed = (cbArg + 0xF) & ~0xF;
+ LOAD_STRUCT_OFFSET_IF_NEEDED(cbUsed);
+ // rax has the source pointer
+ // r9 has the intermediate copy location
+ // r10 has the final destination
+ if (nArgSlot < 4) {
+ pSl->X86EmitIndexRegStore(kRDX, rgcbArgRegCtxtOffsets[nArgSlot++], kR10);
+ }
+ else {
+ pSl->X86EmitIndexRegStore(kR8, 8 * nArgSlot++, kR10);
+ }
+ // now emit a 'memcpy(rax, r9, cbUsed)'
+ // These structs are supposed to be 16-byte aligned, but
+ // Reflection puts them on the GC heap, which is only 8-byte
+ // aligned. It also means we have to be careful about not
+ // copying too much (because we might cross a page boundary)
+ UINT cbUsed16 = (cbArg + 7) & ~0xF;
+ _ASSERTE((cbUsed16 == cbUsed) || ((cbUsed16 + 16) == cbUsed));
+
+ if (cbArg <= 192) {
+ // Unrolled version (6 x 16 bytes in parallel)
+ UINT offset = 0;
+ while (offset < cbUsed16) {
+ // movups xmm0, [rax + offset]
+ pSl->X86EmitOp(X86_INSTR_MOVUPS_R_RM, kXMM0, kRAX, offset);
+ if (offset + 16 < cbUsed16) {
+ // movups xmm1, [rax + offset + 16]
+ pSl->X86EmitOp(X86_INSTR_MOVUPS_R_RM, kXMM1, kRAX, offset + 16);
+ if (offset + 32 < cbUsed16) {
+ // movups xmm2, [rax + offset + 32]
+ pSl->X86EmitOp(X86_INSTR_MOVUPS_R_RM, kXMM2, kRAX, offset + 32);
+ if (offset + 48 < cbUsed16) {
+ // movups xmm3, [rax + offset + 48]
+ pSl->X86EmitOp(X86_INSTR_MOVUPS_R_RM, kXMM3, kRAX, offset + 48);
+ if (offset + 64 < cbUsed16) {
+ // movups xmm4, [rax + offset + 64]
+ pSl->X86EmitOp(X86_INSTR_MOVUPS_R_RM, kXMM4, kRAX, offset + 64);
+ if (offset + 80 < cbUsed16) {
+ // movups xmm5, [rax + offset + 80]
+ pSl->X86EmitOp(X86_INSTR_MOVUPS_R_RM, kXMM5, kRAX, offset + 80);
+ }
+ }
+ }
+ }
+ }
+ // movaps [r9 + offset], xmm0
+ pSl->X86EmitOp(X86_INSTR_MOVAPS_RM_R, kXMM0, kR9, offset);
+ offset += 16;
+ if (offset < cbUsed16) {
+ // movaps [r9 + 16], xmm1
+ pSl->X86EmitOp(X86_INSTR_MOVAPS_RM_R, kXMM1, kR9, offset);
+ offset += 16;
+ if (offset < cbUsed16) {
+ // movaps [r9 + 32], xmm2
+ pSl->X86EmitOp(X86_INSTR_MOVAPS_RM_R, kXMM2, kR9, offset);
+ offset += 16;
+ if (offset < cbUsed16) {
+ // movaps [r9 + 48], xmm3
+ pSl->X86EmitOp(X86_INSTR_MOVAPS_RM_R, kXMM3, kR9, offset);
+ offset += 16;
+ if (offset < cbUsed16) {
+ // movaps [r9 + 64], xmm4
+ pSl->X86EmitOp(X86_INSTR_MOVAPS_RM_R, kXMM4, kR9, offset);
+ offset += 16;
+ if (offset < cbUsed16) {
+ // movaps [r9 + 80], xmm5
+ pSl->X86EmitOp(X86_INSTR_MOVAPS_RM_R, kXMM5, kR9, offset);
+ offset += 16;
+ }
+ }
+ }
+ }
+ }
+ }
+ // Copy the last 8 bytes if needed
+ if (cbUsed > cbUsed16) {
+ _ASSERTE(cbUsed16 < cbArg);
+ // movlps xmm0, [rax + offset]
+ pSl->X86EmitOp(X86_INSTR_MOVLPS_R_RM, kXMM0, kRAX, offset);
+ // movlps [r9 + offset], xmm0
+ pSl->X86EmitOp(X86_INSTR_MOVLPS_RM_R, kXMM0, kR9, offset);
+ }
+ }
+ else {
+ // a loop (one double-quadword at a time)
+ pSl->X86EmitZeroOutReg(kR11);
+ // LoopLabel:
+ CodeLabel *pLoopLabel = pSl->NewCodeLabel();
+ pSl->EmitLabel(pLoopLabel);
+ // movups xmm0, [rax + r11]
+ pSl->X86EmitOp(X86_INSTR_MOVUPS_R_RM, kXMM0, kRAX, 0, kR11, 1);
+ // movaps [r9 + r11], xmm0
+ pSl->X86EmitOp(X86_INSTR_MOVAPS_RM_R, kXMM0, kR9, 0, kR11, 1);
+ // add r11, 16
+ pSl->X86EmitAddReg(kR11, 16);
+ // cmp r11, cbUsed16
+ pSl->X86EmitCmpRegImm32(kR11, cbUsed16);
+ // jl LoopLabel
+ pSl->X86EmitCondJump(pLoopLabel, X86CondCode::kJL);
+ if (cbArg > cbUsed16) {
+ _ASSERTE(cbUsed16 + 8 >= cbArg);
+ // movlps xmm0, [rax + r11]
+ pSl->X86EmitOp(X86_INSTR_MOVLPS_R_RM, kXMM0, kRAX, 0, kR11, 1);
+ // movlps [r9 + r11], xmm0
+ pSl->X86EmitOp(X86_INSTR_MOVLPS_RM_R, kXMM0, kR9, 0, kR11, 1);
+ }
+ }
+ cbStructOffset += cbUsed;
+ AppendGCLayout(gcLayout, cbStructOffset, et == ELEMENT_TYPE_TYPEDBYREF, th);
+ break;
+ }
+
+ //
+ // Explicit Fall-Through for non-IsArgPassedByRef
+ //
+
+ default:
+ if (nArgSlot < 4) {
+ pSl->X86EmitIndexRegStore(kRDX, rgcbArgRegCtxtOffsets[nArgSlot], kRAX);
+ if ((et == ELEMENT_TYPE_R4) || (et == ELEMENT_TYPE_R8)) {
+ pSl->X86EmitIndexRegStore(kRDX, rgcbFpArgRegCtxtOffsets[nArgSlot], kRAX);
+ }
+ }
+ else {
+ pSl->X86EmitIndexRegStore(kR8, 8 * nArgSlot, kRAX);
+ }
+ nArgSlot++;
+ break;
+ }
+ }
+
+#undef LOAD_STRUCT_OFFSET_IF_NEEDED
+
+ // Keep our 4 shadow slots and even number of slots (to keep 16-byte aligned)
+ if (nArgSlot < 4)
+ nArgSlot = 4;
+ else if (nArgSlot & 1)
+ nArgSlot++;
+
+ _ASSERTE((cbStructOffset % 16) == 0);
+
+ // xor eax, eax
+ pSl->X86EmitZeroOutReg(kRAX);
+ // ret
+ pSl->X86EmitReturn(0);
+
+ // NullLabel:
+ pSl->EmitLabel(pNullLabel);
+
+ CodeLabel *pGCLayoutLabel = NULL;
+ if (gcLayout.Count() == 0) {
+ // xor eax, eax
+ pSl->X86EmitZeroOutReg(kRAX);
+ }
+ else {
+ // lea rax, [rip + offset to gclayout]
+ pGCLayoutLabel = pSl->NewCodeLabel();
+ pSl->X86EmitLeaRIP(pGCLayoutLabel, kRAX);
+ }
+ // mov [r9], rax
+ pSl->X86EmitIndexRegStore(kR9, 0, kRAX);
+ // mov rax, cbStackNeeded
+ pSl->X86EmitRegLoad(kRAX, cbStructOffset + nArgSlot * 8);
+ // ret
+ pSl->X86EmitReturn(0);
+
+ if (gcLayout.Count() > 0) {
+ // GCLayout:
+ pSl->EmitLabel(pGCLayoutLabel);
+ EncodeGCOffsets(pSl, gcLayout);
+ }
+
+ return pSl->Link();
+}
+#endif // DACCESS_COMPILE
+
+#endif // _TARGET_AMD64_
+
+
+#ifdef HAS_FIXUP_PRECODE
+
+#ifdef HAS_FIXUP_PRECODE_CHUNKS
+TADDR FixupPrecode::GetMethodDesc()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ // This lookup is also manually inlined in PrecodeFixupThunk assembly code
+ TADDR base = *PTR_TADDR(GetBase());
+ if (base == NULL)
+ return NULL;
+ return base + (m_MethodDescChunkIndex * MethodDesc::ALIGNMENT);
+}
+#endif
+
+#ifdef DACCESS_COMPILE
+void FixupPrecode::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ DacEnumMemoryRegion(dac_cast<TADDR>(this), sizeof(FixupPrecode));
+
+ DacEnumMemoryRegion(GetBase(), sizeof(TADDR));
+}
+#endif // DACCESS_COMPILE
+
+#endif // HAS_FIXUP_PRECODE
+
+#ifndef DACCESS_COMPILE
+
+BOOL rel32SetInterlocked(/*PINT32*/ PVOID pRel32, TADDR target, TADDR expected, MethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ THROWS; // Creating a JumpStub could throw OutOfMemory
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ BYTE* callAddrAdj = (BYTE*)pRel32 + 4;
+ INT32 expectedRel32 = static_cast<INT32>((BYTE*)expected - callAddrAdj);
+
+ INT32 targetRel32 = rel32UsingJumpStub((INT32*)pRel32, target, pMD);
+
+ _ASSERTE(IS_ALIGNED(pRel32, sizeof(INT32)));
+ return FastInterlockCompareExchange((LONG*)pRel32, (LONG)targetRel32, (LONG)expectedRel32) == (LONG)expectedRel32;
+}
+
+void StubPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator /* = NULL */,
+ BYTE type /* = StubPrecode::Type */, TADDR target /* = NULL */)
+{
+ WRAPPER_NO_CONTRACT;
+
+ IN_WIN64(m_movR10 = X86_INSTR_MOV_R10_IMM64); // mov r10, pMethodDesc
+ IN_WIN32(m_movEAX = X86_INSTR_MOV_EAX_IMM32); // mov eax, pMethodDesc
+ m_pMethodDesc = (TADDR)pMD;
+ IN_WIN32(m_mov_rm_r = X86_INSTR_MOV_RM_R); // mov reg,reg
+ m_type = type;
+ m_jmp = X86_INSTR_JMP_REL32; // jmp rel32
+
+ if (pLoaderAllocator != NULL)
+ {
+ // Use pMD == NULL in all precode initialization methods to allocate the initial jump stub in non-dynamic heap
+ // that has the same lifetime like as the precode itself
+ if (target == NULL)
+ target = GetPreStubEntryPoint();
+ m_rel32 = rel32UsingJumpStub(&m_rel32, target, NULL /* pMD */, pLoaderAllocator);
+ }
+}
+
+#ifdef HAS_NDIRECT_IMPORT_PRECODE
+
+void NDirectImportPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
+{
+ WRAPPER_NO_CONTRACT;
+ StubPrecode::Init(pMD, pLoaderAllocator, NDirectImportPrecode::Type, GetEEFuncEntryPoint(NDirectImportThunk));
+}
+
+#endif // HAS_NDIRECT_IMPORT_PRECODE
+
+
+#ifdef HAS_REMOTING_PRECODE
+
+void RemotingPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator /* = NULL */)
+{
+ WRAPPER_NO_CONTRACT;
+
+ IN_WIN64(m_movR10 = X86_INSTR_MOV_R10_IMM64); // mov r10, pMethodDesc
+ IN_WIN32(m_movEAX = X86_INSTR_MOV_EAX_IMM32); // mov eax, pMethodDesc
+ m_pMethodDesc = (TADDR)pMD;
+ m_type = PRECODE_REMOTING; // nop
+ m_call = X86_INSTR_CALL_REL32;
+ m_jmp = X86_INSTR_JMP_REL32; // jmp rel32
+
+ if (pLoaderAllocator != NULL)
+ {
+ m_callRel32 = rel32UsingJumpStub(&m_callRel32,
+ GetEEFuncEntryPoint(PrecodeRemotingThunk), NULL /* pMD */, pLoaderAllocator);
+ m_rel32 = rel32UsingJumpStub(&m_rel32,
+ GetPreStubEntryPoint(), NULL /* pMD */, pLoaderAllocator);
+ }
+}
+
+#endif // HAS_REMOTING_PRECODE
+
+
+#ifdef HAS_FIXUP_PRECODE
+void FixupPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int iMethodDescChunkIndex /*=0*/, int iPrecodeChunkIndex /*=0*/)
+{
+ WRAPPER_NO_CONTRACT;
+
+ m_op = X86_INSTR_CALL_REL32; // call PrecodeFixupThunk
+ m_type = FixupPrecode::TypePrestub;
+
+ // Initialize chunk indices only if they are not initialized yet. This is necessary to make MethodDesc::Reset work.
+ if (m_PrecodeChunkIndex == 0)
+ {
+ _ASSERTE(FitsInU1(iPrecodeChunkIndex));
+ m_PrecodeChunkIndex = static_cast<BYTE>(iPrecodeChunkIndex);
+ }
+
+ if (iMethodDescChunkIndex != -1)
+ {
+ if (m_MethodDescChunkIndex == 0)
+ {
+ _ASSERTE(FitsInU1(iMethodDescChunkIndex));
+ m_MethodDescChunkIndex = static_cast<BYTE>(iMethodDescChunkIndex);
+ }
+
+ if (*(void**)GetBase() == NULL)
+ *(void**)GetBase() = (BYTE*)pMD - (iMethodDescChunkIndex * MethodDesc::ALIGNMENT);
+ }
+
+ _ASSERTE(GetMethodDesc() == (TADDR)pMD);
+
+ if (pLoaderAllocator != NULL)
+ {
+ m_rel32 = rel32UsingJumpStub(&m_rel32,
+ GetEEFuncEntryPoint(PrecodeFixupThunk), NULL /* pMD */, pLoaderAllocator);
+ }
+}
+
+BOOL FixupPrecode::SetTargetInterlocked(TADDR target, TADDR expected)
+{
+ CONTRACTL
+ {
+ THROWS; // Creating a JumpStub could throw OutOfMemory
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ INT64 oldValue = *(INT64*)this;
+ BYTE* pOldValue = (BYTE*)&oldValue;
+
+ if (pOldValue[OFFSETOF_PRECODE_TYPE_CALL_OR_JMP] != FixupPrecode::TypePrestub)
+ return FALSE;
+
+ MethodDesc * pMD = (MethodDesc*)GetMethodDesc();
+ g_IBCLogger.LogMethodPrecodeWriteAccess(pMD);
+
+ INT64 newValue = oldValue;
+ BYTE* pNewValue = (BYTE*)&newValue;
+
+ pNewValue[OFFSETOF_PRECODE_TYPE_CALL_OR_JMP] = FixupPrecode::Type;
+
+ pOldValue[offsetof(FixupPrecode,m_op)] = X86_INSTR_CALL_REL32;
+ pNewValue[offsetof(FixupPrecode,m_op)] = X86_INSTR_JMP_REL32;
+
+ *(INT32*)(&pNewValue[offsetof(FixupPrecode,m_rel32)]) = rel32UsingJumpStub(&m_rel32, target, pMD);
+
+ _ASSERTE(IS_ALIGNED(this, sizeof(INT64)));
+ EnsureWritableExecutablePages(this, sizeof(INT64));
+ return FastInterlockCompareExchangeLong((INT64*) this, newValue, oldValue) == oldValue;
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+// Partial initialization. Used to save regrouped chunks.
+void FixupPrecode::InitForSave(int iPrecodeChunkIndex)
+{
+ m_op = X86_INSTR_CALL_REL32; // call PrecodeFixupThunk
+ m_type = FixupPrecode::TypePrestub;
+
+ _ASSERTE(FitsInU1(iPrecodeChunkIndex));
+ m_PrecodeChunkIndex = static_cast<BYTE>(iPrecodeChunkIndex);
+
+ // The rest is initialized in code:FixupPrecode::Fixup
+}
+
+void FixupPrecode::Fixup(DataImage *image, MethodDesc * pMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Note that GetMethodDesc() does not return the correct value because of
+ // regrouping of MethodDescs into hot and cold blocks. That's why the caller
+ // has to supply the actual MethodDesc
+
+ SSIZE_T mdChunkOffset;
+ ZapNode * pMDChunkNode = image->GetNodeForStructure(pMD, &mdChunkOffset);
+ ZapNode * pHelperThunk = image->GetHelperThunk(CORINFO_HELP_EE_PRECODE_FIXUP);
+
+ image->FixupFieldToNode(this, offsetof(FixupPrecode, m_rel32),
+ pHelperThunk, 0, IMAGE_REL_BASED_REL32);
+
+ // Set the actual chunk index
+ FixupPrecode * pNewPrecode = (FixupPrecode *)image->GetImagePointer(this);
+
+ size_t mdOffset = mdChunkOffset - sizeof(MethodDescChunk);
+ size_t chunkIndex = mdOffset / MethodDesc::ALIGNMENT;
+ _ASSERTE(FitsInU1(chunkIndex));
+ pNewPrecode->m_MethodDescChunkIndex = (BYTE) chunkIndex;
+
+ // Fixup the base of MethodDescChunk
+ if (m_PrecodeChunkIndex == 0)
+ {
+ image->FixupFieldToNode(this, (BYTE *)GetBase() - (BYTE *)this,
+ pMDChunkNode, sizeof(MethodDescChunk));
+ }
+}
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+#endif // HAS_FIXUP_PRECODE
+
+#ifdef HAS_THISPTR_RETBUF_PRECODE
+
+void ThisPtrRetBufPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
+{
+ WRAPPER_NO_CONTRACT;
+
+ IN_WIN64(m_nop1 = X86_INSTR_NOP;) // nop
+#ifdef UNIX_AMD64_ABI
+ m_prefix1 = 0x48;
+ m_movScratchArg0 = 0xC78B; // mov rax,rdi
+ m_prefix2 = 0x48;
+ m_movArg0Arg1 = 0xFE8B; // mov rdi,rsi
+ m_prefix3 = 0x48;
+ m_movArg1Scratch = 0xF08B; // mov rsi,rax
+#else
+ IN_WIN64(m_prefix1 = 0x48;)
+ m_movScratchArg0 = 0xC889; // mov r/eax,r/ecx
+ IN_WIN64(m_prefix2 = 0x48;)
+ m_movArg0Arg1 = 0xD189; // mov r/ecx,r/edx
+ IN_WIN64(m_prefix3 = 0x48;)
+ m_movArg1Scratch = 0xC289; // mov r/edx,r/eax
+#endif
+ m_nop2 = X86_INSTR_NOP; // nop
+ m_jmp = X86_INSTR_JMP_REL32; // jmp rel32
+ m_pMethodDesc = (TADDR)pMD;
+
+ if (pLoaderAllocator != NULL)
+ {
+ m_rel32 = rel32UsingJumpStub(&m_rel32,
+ GetPreStubEntryPoint(), NULL /* pMD */, pLoaderAllocator);
+ }
+}
+
+#endif // HAS_THISPTR_RETBUF_PRECODE
+
+#endif // !DACCESS_COMPILE
diff --git a/src/vm/i386/stublinkerx86.h b/src/vm/i386/stublinkerx86.h
new file mode 100644
index 0000000000..5ed300357c
--- /dev/null
+++ b/src/vm/i386/stublinkerx86.h
@@ -0,0 +1,802 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+#ifndef STUBLINKERX86_H_
+#define STUBLINKERX86_H_
+
+#ifndef CLR_STANDALONE_BINDER
+#include "stublink.h"
+#endif // !CLR_STANDALONE_BINDER
+
+struct ArrayOpScript;
+class MetaSig;
+
+//=======================================================================
+
+#define X86_INSTR_CALL_REL32 0xE8 // call rel32
+#define X86_INSTR_CALL_IND 0x15FF // call dword ptr[addr32]
+#define X86_INSTR_CALL_IND_EAX 0x10FF // call dword ptr[eax]
+#define X86_INSTR_CALL_IND_EAX_OFFSET 0x50FF // call dword ptr[eax + offset] ; where offset follows these 2 bytes
+#define X86_INSTR_CALL_EAX 0xD0FF // call eax
+#define X86_INSTR_JMP_REL32 0xE9 // jmp rel32
+#define X86_INSTR_JMP_IND 0x25FF // jmp dword ptr[addr32]
+#define X86_INSTR_JMP_EAX 0xE0FF // jmp eax
+#define X86_INSTR_MOV_EAX_IMM32 0xB8 // mov eax, imm32
+#define X86_INSTR_MOV_EAX_ECX_IND 0x018b // mov eax, [ecx]
+#define X86_INSTR_CMP_IND_ECX_IMM32 0x3981 // cmp [ecx], imm32
+#define X86_INSTR_MOV_RM_R 0x89 // mov r/m,reg
+
+#define X86_INSTR_MOV_AL 0xB0 // mov al, imm8
+#define X86_INSTR_JMP_REL8 0xEB // jmp short rel8
+
+#define X86_INSTR_NOP 0x90 // nop
+#define X86_INSTR_NOP3_1 0x9090 // 1st word of 3-byte nop
+#define X86_INSTR_NOP3_3 0x90 // 3rd byte of 3-byte nop
+#define X86_INSTR_INT3 0xCC // int 3
+#define X86_INSTR_HLT 0xF4 // hlt
+
+#define X86_INSTR_MOVAPS_R_RM 0x280F // movaps xmm1, xmm2/mem128
+#define X86_INSTR_MOVAPS_RM_R 0x290F // movaps xmm1/mem128, xmm2
+#define X86_INSTR_MOVLPS_R_RM 0x120F // movlps xmm1, xmm2/mem128
+#define X86_INSTR_MOVLPS_RM_R 0x130F // movlps xmm1/mem128, xmm2
+#define X86_INSTR_MOVUPS_R_RM 0x100F // movups xmm1, xmm2/mem128
+#define X86_INSTR_MOVUPS_RM_R 0x110F // movups xmm1/mem128, xmm2
+#define X86_INSTR_XORPS 0x570F // xorps xmm1, xmm2/mem128
+
+#ifdef _TARGET_AMD64_
+#define X86_INSTR_MOV_R10_IMM64 0xBA49 // mov r10, imm64
+#endif
+
+//----------------------------------------------------------------------
+// Encodes X86 registers. The numbers are chosen to match Intel's opcode
+// encoding.
+//----------------------------------------------------------------------
+enum X86Reg
+{
+ kEAX = 0,
+ kECX = 1,
+ kEDX = 2,
+ kEBX = 3,
+ // kESP intentionally omitted because of its irregular treatment in MOD/RM
+ kEBP = 5,
+ kESI = 6,
+ kEDI = 7,
+
+#ifdef _TARGET_X86_
+ NumX86Regs = 8,
+#endif // _TARGET_X86_
+
+ kXMM0 = 0,
+ kXMM1 = 1,
+ kXMM2 = 2,
+ kXMM3 = 3,
+ kXMM4 = 4,
+ kXMM5 = 5,
+#if defined(_TARGET_AMD64_)
+ kXMM6 = 6,
+ kXMM7 = 7,
+ kXMM8 = 8,
+ kXMM9 = 9,
+ kXMM10 = 10,
+ kXMM11 = 11,
+ kXMM12 = 12,
+ kXMM13 = 13,
+ kXMM14 = 14,
+ kXMM15 = 15,
+ // Integer registers commence here
+ kRAX = 0,
+ kRCX = 1,
+ kRDX = 2,
+ kRBX = 3,
+ // kRSP intentionally omitted because of its irregular treatment in MOD/RM
+ kRBP = 5,
+ kRSI = 6,
+ kRDI = 7,
+ kR8 = 8,
+ kR9 = 9,
+ kR10 = 10,
+ kR11 = 11,
+ kR12 = 12,
+ kR13 = 13,
+ kR14 = 14,
+ kR15 = 15,
+ NumX86Regs = 16,
+
+#endif // _TARGET_AMD64_
+
+ // We use "push ecx" instead of "sub esp, sizeof(LPVOID)"
+ kDummyPushReg = kECX
+};
+
+
+// Use this only if you are absolutely sure that the instruction format
+// handles it. This is not declared as X86Reg so that users are forced
+// to add a cast and think about what exactly they are doing.
+const int kESP_Unsafe = 4;
+
+//----------------------------------------------------------------------
+// Encodes X86 conditional jumps. The numbers are chosen to match
+// Intel's opcode encoding.
+//----------------------------------------------------------------------
+class X86CondCode {
+ public:
+ enum cc {
+ kJA = 0x7,
+ kJAE = 0x3,
+ kJB = 0x2,
+ kJBE = 0x6,
+ kJC = 0x2,
+ kJE = 0x4,
+ kJZ = 0x4,
+ kJG = 0xf,
+ kJGE = 0xd,
+ kJL = 0xc,
+ kJLE = 0xe,
+ kJNA = 0x6,
+ kJNAE = 0x2,
+ kJNB = 0x3,
+ kJNBE = 0x7,
+ kJNC = 0x3,
+ kJNE = 0x5,
+ kJNG = 0xe,
+ kJNGE = 0xc,
+ kJNL = 0xd,
+ kJNLE = 0xf,
+ kJNO = 0x1,
+ kJNP = 0xb,
+ kJNS = 0x9,
+ kJNZ = 0x5,
+ kJO = 0x0,
+ kJP = 0xa,
+ kJPE = 0xa,
+ kJPO = 0xb,
+ kJS = 0x8,
+ };
+};
+
+//----------------------------------------------------------------------
+// StubLinker with extensions for generating X86 code.
+//----------------------------------------------------------------------
+#ifndef CLR_STANDALONE_BINDER
+class StubLinkerCPU : public StubLinker
+{
+ public:
+
+#ifdef _TARGET_AMD64_
+ enum X86OperandSize
+ {
+ k32BitOp,
+ k64BitOp,
+ };
+#endif
+
+ VOID X86EmitAddReg(X86Reg reg, INT32 imm32);
+ VOID X86EmitAddRegReg(X86Reg destreg, X86Reg srcReg);
+ VOID X86EmitSubReg(X86Reg reg, INT32 imm32);
+ VOID X86EmitSubRegReg(X86Reg destreg, X86Reg srcReg);
+
+ VOID X86EmitMovRegReg(X86Reg destReg, X86Reg srcReg);
+ VOID X86EmitMovSPReg(X86Reg srcReg);
+ VOID X86EmitMovRegSP(X86Reg destReg);
+
+ VOID X86EmitPushReg(X86Reg reg);
+ VOID X86EmitPopReg(X86Reg reg);
+ VOID X86EmitPushRegs(unsigned regSet);
+ VOID X86EmitPopRegs(unsigned regSet);
+ VOID X86EmitPushImm32(UINT value);
+ VOID X86EmitPushImm32(CodeLabel &pTarget);
+ VOID X86EmitPushImm8(BYTE value);
+ VOID X86EmitPushImmPtr(LPVOID value WIN64_ARG(X86Reg tmpReg = kR10));
+
+ VOID X86EmitCmpRegImm32(X86Reg reg, INT32 imm32); // cmp reg, imm32
+ VOID X86EmitCmpRegIndexImm32(X86Reg reg, INT32 offs, INT32 imm32); // cmp [reg+offs], imm32
+#ifdef _TARGET_AMD64_
+ VOID X64EmitCmp32RegIndexImm32(X86Reg reg, INT32 offs, INT32 imm32); // cmp dword ptr [reg+offs], imm32
+
+ VOID X64EmitMovXmmXmm(X86Reg destXmmreg, X86Reg srcXmmReg);
+ VOID X64EmitMovdqaFromMem(X86Reg Xmmreg, X86Reg baseReg, __int32 ofs = 0);
+ VOID X64EmitMovdqaToMem(X86Reg Xmmreg, X86Reg baseReg, __int32 ofs = 0);
+ VOID X64EmitMovSDFromMem(X86Reg Xmmreg, X86Reg baseReg, __int32 ofs = 0);
+ VOID X64EmitMovSDToMem(X86Reg Xmmreg, X86Reg baseReg, __int32 ofs = 0);
+ VOID X64EmitMovSSFromMem(X86Reg Xmmreg, X86Reg baseReg, __int32 ofs = 0);
+ VOID X64EmitMovSSToMem(X86Reg Xmmreg, X86Reg baseReg, __int32 ofs = 0);
+
+ VOID X64EmitMovXmmWorker(BYTE prefix, BYTE opcode, X86Reg Xmmreg, X86Reg baseReg, __int32 ofs = 0);
+#endif
+
+ VOID X86EmitZeroOutReg(X86Reg reg);
+ VOID X86EmitJumpReg(X86Reg reg);
+
+ VOID X86EmitOffsetModRM(BYTE opcode, X86Reg altreg, X86Reg indexreg, __int32 ofs);
+ VOID X86EmitOffsetModRmSIB(BYTE opcode, X86Reg opcodeOrReg, X86Reg baseReg, X86Reg indexReg, __int32 scale, __int32 ofs);
+
+ VOID X86EmitTailcallWithESPAdjust(CodeLabel *pTarget, INT32 imm32);
+ VOID X86EmitTailcallWithSinglePop(CodeLabel *pTarget, X86Reg reg);
+
+ VOID X86EmitNearJump(CodeLabel *pTarget);
+ VOID X86EmitCondJump(CodeLabel *pTarget, X86CondCode::cc condcode);
+ VOID X86EmitCall(CodeLabel *target, int iArgBytes);
+ VOID X86EmitReturn(WORD wArgBytes);
+#ifdef _TARGET_AMD64_
+ VOID X86EmitLeaRIP(CodeLabel *target, X86Reg reg);
+#endif
+
+ static const unsigned X86TLSFetch_TRASHABLE_REGS = (1<<kEAX) | (1<<kEDX) | (1<<kECX);
+ VOID X86EmitTLSFetch(DWORD idx, X86Reg dstreg, unsigned preservedRegSet);
+
+ VOID X86EmitCurrentThreadFetch(X86Reg dstreg, unsigned preservedRegSet);
+ VOID X86EmitCurrentAppDomainFetch(X86Reg dstreg, unsigned preservedRegSet);
+
+ VOID X86EmitIndexRegLoad(X86Reg dstreg, X86Reg srcreg, __int32 ofs = 0);
+ VOID X86EmitIndexRegStore(X86Reg dstreg, __int32 ofs, X86Reg srcreg);
+#if defined(_TARGET_AMD64_)
+ VOID X86EmitIndexRegStoreRSP(__int32 ofs, X86Reg srcreg);
+ VOID X86EmitIndexRegStoreR12(__int32 ofs, X86Reg srcreg);
+#endif // defined(_TARGET_AMD64_)
+
+ VOID X86EmitIndexPush(X86Reg srcreg, __int32 ofs);
+ VOID X86EmitBaseIndexPush(X86Reg baseReg, X86Reg indexReg, __int32 scale, __int32 ofs);
+ VOID X86EmitIndexPop(X86Reg srcreg, __int32 ofs);
+ VOID X86EmitIndexLea(X86Reg dstreg, X86Reg srcreg, __int32 ofs);
+#if defined(_TARGET_AMD64_)
+ VOID X86EmitIndexLeaRSP(X86Reg dstreg, X86Reg srcreg, __int32 ofs);
+#endif // defined(_TARGET_AMD64_)
+
+ VOID X86EmitSPIndexPush(__int32 ofs);
+ VOID X86EmitSubEsp(INT32 imm32);
+ VOID X86EmitAddEsp(INT32 imm32);
+ VOID X86EmitEspOffset(BYTE opcode,
+ X86Reg altreg,
+ __int32 ofs
+ AMD64_ARG(X86OperandSize OperandSize = k64BitOp)
+ );
+ VOID X86EmitPushEBPframe();
+
+ // These are used to emit calls to notify the profiler of transitions in and out of
+ // managed code through COM->COM+ interop or N/Direct
+ VOID EmitProfilerComCallProlog(TADDR pFrameVptr, X86Reg regFrame);
+ VOID EmitProfilerComCallEpilog(TADDR pFrameVptr, X86Reg regFrame);
+
+
+
+ // Emits the most efficient form of the operation:
+ //
+ // opcode altreg, [basereg + scaledreg*scale + ofs]
+ //
+ // or
+ //
+ // opcode [basereg + scaledreg*scale + ofs], altreg
+ //
+ // (the opcode determines which comes first.)
+ //
+ //
+ // Limitations:
+ //
+ // scale must be 0,1,2,4 or 8.
+ // if scale == 0, scaledreg is ignored.
+ // basereg and altreg may be equal to 4 (ESP) but scaledreg cannot
+ // for some opcodes, "altreg" may actually select an operation
+ // rather than a second register argument.
+ //
+
+ VOID X86EmitOp(WORD opcode,
+ X86Reg altreg,
+ X86Reg basereg,
+ __int32 ofs = 0,
+ X86Reg scaledreg = (X86Reg)0,
+ BYTE scale = 0
+ AMD64_ARG(X86OperandSize OperandSize = k32BitOp)
+ );
+
+#ifdef _TARGET_AMD64_
+ FORCEINLINE
+ VOID X86EmitOp(WORD opcode,
+ X86Reg altreg,
+ X86Reg basereg,
+ __int32 ofs,
+ X86OperandSize OperandSize
+ )
+ {
+ X86EmitOp(opcode, altreg, basereg, ofs, (X86Reg)0, 0, OperandSize);
+ }
+#endif // _TARGET_AMD64_
+
+ // Emits
+ //
+ // opcode altreg, modrmreg
+ //
+ // or
+ //
+ // opcode modrmreg, altreg
+ //
+ // (the opcode determines which one comes first)
+ //
+ // For single-operand opcodes, "altreg" actually selects
+ // an operation rather than a register.
+
+ VOID X86EmitR2ROp(WORD opcode,
+ X86Reg altreg,
+ X86Reg modrmreg
+ AMD64_ARG(X86OperandSize OperandSize = k64BitOp)
+ );
+
+ VOID X86EmitRegLoad(X86Reg reg, UINT_PTR imm);
+
+ VOID X86EmitRegSave(X86Reg altreg, __int32 ofs)
+ {
+ LIMITED_METHOD_CONTRACT;
+ X86EmitEspOffset(0x89, altreg, ofs);
+ // X86Reg values never are outside a byte.
+ UnwindSavedReg(static_cast<UCHAR>(altreg), ofs);
+ }
+
+ VOID X86_64BitOperands ()
+ {
+ WRAPPER_NO_CONTRACT;
+#ifdef _TARGET_AMD64_
+ Emit8(0x48);
+#endif
+ }
+
+ VOID EmitEnable(CodeLabel *pForwardRef);
+ VOID EmitRareEnable(CodeLabel *pRejoinPoint);
+
+ VOID EmitDisable(CodeLabel *pForwardRef, BOOL fCallIn, X86Reg ThreadReg);
+ VOID EmitRareDisable(CodeLabel *pRejoinPoint);
+ VOID EmitRareDisableHRESULT(CodeLabel *pRejoinPoint, CodeLabel *pExitPoint);
+
+ VOID EmitSetup(CodeLabel *pForwardRef);
+ VOID EmitRareSetup(CodeLabel* pRejoinPoint, BOOL fThrow);
+ VOID EmitCheckGSCookie(X86Reg frameReg, int gsCookieOffset);
+
+#ifdef _TARGET_X86_
+ void EmitComMethodStubProlog(TADDR pFrameVptr, CodeLabel** rgRareLabels,
+ CodeLabel** rgRejoinLabels, BOOL bShouldProfile);
+
+ void EmitComMethodStubEpilog(TADDR pFrameVptr, CodeLabel** rgRareLabels,
+ CodeLabel** rgRejoinLabels, BOOL bShouldProfile);
+#endif
+
+ VOID EmitMethodStubProlog(TADDR pFrameVptr, int transitionBlockOffset);
+ VOID EmitMethodStubEpilog(WORD numArgBytes, int transitionBlockOffset);
+
+ VOID EmitUnboxMethodStub(MethodDesc* pRealMD);
+#if defined(FEATURE_SHARE_GENERIC_CODE)
+ VOID EmitInstantiatingMethodStub(MethodDesc* pSharedMD, void* extra);
+#endif // FEATURE_SHARE_GENERIC_CODE
+
+#if defined(FEATURE_COMINTEROP) && defined(_TARGET_X86_)
+ //========================================================================
+ // shared Epilog for stubs that enter managed code from COM
+ // uses a return thunk within the method desc
+ void EmitSharedComMethodStubEpilog(TADDR pFrameVptr,
+ CodeLabel** rgRareLabels,
+ CodeLabel** rgRejoinLabels,
+ unsigned offsetReturnThunk,
+ BOOL bShouldProfile);
+#endif // FEATURE_COMINTEROP && _TARGET_X86_
+
+ //===========================================================================
+ // Computes hash code for MulticastDelegate.Invoke()
+ static UINT_PTR HashMulticastInvoke(MetaSig* pSig);
+
+ //===========================================================================
+ // Emits code for Delegate.Invoke() any delegate type
+ VOID EmitDelegateInvoke();
+
+ //===========================================================================
+ // Emits code for MulticastDelegate.Invoke() - sig specific
+ VOID EmitMulticastInvoke(UINT_PTR hash);
+
+ //===========================================================================
+ // Emits code for Delegate.Invoke() on delegates that recorded creator assembly
+ VOID EmitSecureDelegateInvoke(UINT_PTR hash);
+
+ //===========================================================================
+ // Emits code to adjust for a static delegate target.
+ VOID EmitShuffleThunk(struct ShuffleEntry *pShuffleEntryArray);
+
+
+ //===========================================================================
+ // Emits code to do an array operation.
+ VOID EmitArrayOpStub(const ArrayOpScript*);
+
+ //Worker function to emit throw helpers for array ops.
+ VOID EmitArrayOpStubThrow(unsigned exConst, unsigned cbRetArg);
+
+ //===========================================================================
+ // Emits code to break into debugger
+ VOID EmitDebugBreak();
+
+#if defined(_DEBUG) && (defined(_TARGET_AMD64_) || defined(_TARGET_X86_)) && !defined(FEATURE_PAL)
+ //===========================================================================
+ // Emits code to log JITHelper access
+ void EmitJITHelperLoggingThunk(PCODE pJitHelper, LPVOID helperFuncCount);
+#endif
+
+#ifdef _DEBUG
+ VOID X86EmitDebugTrashReg(X86Reg reg);
+#endif
+
+#if defined(_DEBUG) && defined(STUBLINKER_GENERATES_UNWIND_INFO) && !defined(CROSSGEN_COMPILE)
+ virtual VOID EmitUnwindInfoCheckWorker (CodeLabel *pCheckLabel);
+ virtual VOID EmitUnwindInfoCheckSubfunction();
+#endif
+
+#ifdef _TARGET_AMD64_
+
+ static Stub * CreateTailCallCopyArgsThunk(CORINFO_SIG_INFO * pSig,
+ CorInfoHelperTailCallSpecialHandling flags);
+
+#endif // _TARGET_AMD64_
+
+ private:
+ VOID X86EmitSubEspWorker(INT32 imm32);
+
+ public:
+ static void Init();
+
+};
+#endif // !CLR_STANDALONE_BINDER
+
+inline TADDR rel32Decode(/*PTR_INT32*/ TADDR pRel32)
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return pRel32 + 4 + *PTR_INT32(pRel32);
+}
+
+BOOL rel32SetInterlocked(/*PINT32*/ PVOID pRel32, TADDR target, TADDR expected, MethodDesc* pMD);
+
+//------------------------------------------------------------------------
+//
+// Precode definitions
+//
+//------------------------------------------------------------------------
+
+EXTERN_C VOID STDCALL PrecodeFixupThunk();
+
+#ifdef _WIN64
+
+#define OFFSETOF_PRECODE_TYPE 0
+#define OFFSETOF_PRECODE_TYPE_CALL_OR_JMP 5
+#define OFFSETOF_PRECODE_TYPE_MOV_R10 10
+
+#define SIZEOF_PRECODE_BASE 16
+
+#else
+
+EXTERN_C VOID STDCALL PrecodeRemotingThunk();
+
+#define OFFSETOF_PRECODE_TYPE 5
+#define OFFSETOF_PRECODE_TYPE_CALL_OR_JMP 5
+#define OFFSETOF_PRECODE_TYPE_MOV_RM_R 6
+
+#define SIZEOF_PRECODE_BASE 8
+
+#endif // _WIN64
+
+
+#include <pshpack1.h>
+
+// Invalid precode type
+struct InvalidPrecode {
+ // int3
+ static const int Type = 0xCC;
+};
+
+
+// Regular precode
+struct StubPrecode {
+
+#ifdef _WIN64
+ static const BYTE Type = 0x40;
+ // mov r10,pMethodDesc
+ // inc eax
+ // jmp Stub
+#else
+ static const BYTE Type = 0xED;
+ // mov eax,pMethodDesc
+ // mov ebp,ebp
+ // jmp Stub
+#endif // _WIN64
+
+ IN_WIN64(USHORT m_movR10;)
+ IN_WIN32(BYTE m_movEAX;)
+ TADDR m_pMethodDesc;
+ IN_WIN32(BYTE m_mov_rm_r;)
+ BYTE m_type;
+ BYTE m_jmp;
+ INT32 m_rel32;
+
+ void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator = NULL, BYTE type = StubPrecode::Type, TADDR target = NULL);
+
+ TADDR GetMethodDesc()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return m_pMethodDesc;
+ }
+
+ PCODE GetTarget()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return rel32Decode(PTR_HOST_MEMBER_TADDR(StubPrecode, this, m_rel32));
+ }
+
+ BOOL SetTargetInterlocked(TADDR target, TADDR expected)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ EnsureWritableExecutablePages(&m_rel32);
+ return rel32SetInterlocked(&m_rel32, target, expected, (MethodDesc*)GetMethodDesc());
+ }
+};
+IN_WIN64(static_assert_no_msg(offsetof(StubPrecode, m_movR10) == OFFSETOF_PRECODE_TYPE);)
+IN_WIN64(static_assert_no_msg(offsetof(StubPrecode, m_type) == OFFSETOF_PRECODE_TYPE_MOV_R10);)
+IN_WIN32(static_assert_no_msg(offsetof(StubPrecode, m_mov_rm_r) == OFFSETOF_PRECODE_TYPE);)
+IN_WIN32(static_assert_no_msg(offsetof(StubPrecode, m_type) == OFFSETOF_PRECODE_TYPE_MOV_RM_R);)
+typedef DPTR(StubPrecode) PTR_StubPrecode;
+
+
+#ifdef HAS_NDIRECT_IMPORT_PRECODE
+
+// NDirect import precode
+// (This is fake precode. VTable slot does not point to it.)
+struct NDirectImportPrecode : StubPrecode {
+
+#ifdef _WIN64
+ static const int Type = 0x48;
+ // mov r10,pMethodDesc
+ // dec eax
+ // jmp NDirectImportThunk
+#else
+ static const int Type = 0xC0;
+ // mov eax,pMethodDesc
+ // mov eax,eax
+ // jmp NDirectImportThunk
+#endif // _WIN64
+
+ void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
+
+ LPVOID GetEntrypoint()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return this;
+ }
+};
+typedef DPTR(NDirectImportPrecode) PTR_NDirectImportPrecode;
+
+#endif // HAS_NDIRECT_IMPORT_PRECODE
+
+
+#ifdef HAS_REMOTING_PRECODE
+
+// Precode with embedded remoting interceptor
+struct RemotingPrecode {
+
+#ifdef _WIN64
+ static const int Type = XXX; // NYI
+ // mov r10,pMethodDesc
+ // call PrecodeRemotingThunk
+ // jmp Prestub/Stub/NativeCode
+#else
+ static const int Type = 0x90;
+ // mov eax,pMethodDesc
+ // nop
+ // call PrecodeRemotingThunk
+ // jmp Prestub/Stub/NativeCode
+#endif // _WIN64
+
+ IN_WIN64(USHORT m_movR10;)
+ IN_WIN32(BYTE m_movEAX;)
+ TADDR m_pMethodDesc;
+ BYTE m_type;
+ BYTE m_call;
+ INT32 m_callRel32;
+ BYTE m_jmp;
+ INT32 m_rel32;
+
+ void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator = NULL);
+
+ TADDR GetMethodDesc()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return m_pMethodDesc;
+ }
+
+ PCODE GetTarget()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return rel32Decode(PTR_HOST_MEMBER_TADDR(RemotingPrecode, this, m_rel32));
+ }
+
+ BOOL SetTargetInterlocked(TADDR target, TADDR expected)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ EnsureWritableExecutablePages(&m_rel32);
+ return rel32SetInterlocked(&m_rel32, target, expected, (MethodDesc*)GetMethodDesc());
+ }
+};
+IN_WIN64(static_assert_no_msg(offsetof(RemotingPrecode, m_movR10) == OFFSETOF_PRECODE_TYPE);)
+IN_WIN64(static_assert_no_msg(offsetof(RemotingPrecode, m_type) == OFFSETOF_PRECODE_TYPE_MOV_R10);)
+IN_WIN32(static_assert_no_msg(offsetof(RemotingPrecode, m_type) == OFFSETOF_PRECODE_TYPE);)
+typedef DPTR(RemotingPrecode) PTR_RemotingPrecode;
+
+#endif // HAS_REMOTING_PRECODE
+
+
+#ifdef HAS_FIXUP_PRECODE
+
+// Fixup precode is used in ngen images when the prestub does just one time fixup.
+// The fixup precode is simple jump once patched. It does not have the two instruction overhead of regular precode.
+struct FixupPrecode {
+
+ static const int TypePrestub = 0x5E;
+ // The entrypoint has to be 8-byte aligned so that the "call PrecodeFixupThunk" can be patched to "jmp NativeCode" atomically.
+ // call PrecodeFixupThunk
+ // db TypePrestub (pop esi)
+ // db MethodDescChunkIndex
+ // db PrecodeChunkIndex
+
+ static const int Type = 0x5F;
+ // After it has been patched to point to native code
+ // jmp NativeCode
+ // db Type (pop edi)
+
+ BYTE m_op;
+ INT32 m_rel32;
+ BYTE m_type;
+ BYTE m_MethodDescChunkIndex;
+ BYTE m_PrecodeChunkIndex;
+#ifdef HAS_FIXUP_PRECODE_CHUNKS
+ // Fixup precode chunk is associated with MethodDescChunk. The layout of the fixup precode chunk is:
+ //
+ // FixupPrecode Entrypoint PrecodeChunkIndex = 2
+ // FixupPrecode Entrypoint PrecodeChunkIndex = 1
+ // FixupPrecode Entrypoint PrecodeChunkIndex = 0
+ // TADDR Base of MethodDescChunk
+#else
+ TADDR m_pMethodDesc;
+#endif
+
+ void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int iMethodDescChunkIndex = 0, int iPrecodeChunkIndex = 0);
+
+#ifdef HAS_FIXUP_PRECODE_CHUNKS
+ TADDR GetBase()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return dac_cast<TADDR>(this) + (m_PrecodeChunkIndex + 1) * sizeof(FixupPrecode);
+ }
+
+ TADDR GetMethodDesc();
+#else // HAS_FIXUP_PRECODE_CHUNKS
+ TADDR GetMethodDesc()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pMethodDesc;
+ }
+#endif // HAS_FIXUP_PRECODE_CHUNKS
+
+ PCODE GetTarget()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return rel32Decode(PTR_HOST_MEMBER_TADDR(FixupPrecode, this, m_rel32));
+ }
+
+ BOOL SetTargetInterlocked(TADDR target, TADDR expected);
+
+ static BOOL IsFixupPrecodeByASM(TADDR addr)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return *dac_cast<PTR_BYTE>(addr) == X86_INSTR_JMP_REL32;
+ }
+
+#ifdef FEATURE_PREJIT
+ // Partial initialization. Used to save regrouped chunks.
+ void InitForSave(int iPrecodeChunkIndex);
+
+ void Fixup(DataImage *image, MethodDesc * pMD);
+#endif
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+};
+IN_WIN32(static_assert_no_msg(offsetof(FixupPrecode, m_type) == OFFSETOF_PRECODE_TYPE));
+IN_WIN64(static_assert_no_msg(offsetof(FixupPrecode, m_op) == OFFSETOF_PRECODE_TYPE);)
+IN_WIN64(static_assert_no_msg(offsetof(FixupPrecode, m_type) == OFFSETOF_PRECODE_TYPE_CALL_OR_JMP);)
+
+typedef DPTR(FixupPrecode) PTR_FixupPrecode;
+
+#endif // HAS_FIXUP_PRECODE
+
+#ifdef HAS_THISPTR_RETBUF_PRECODE
+
+// Precode to stuffle this and retbuf for closed delegates over static methods with return buffer
+struct ThisPtrRetBufPrecode {
+
+#ifdef _WIN64
+ static const int Type = 0x90;
+#else
+ static const int Type = 0xC2;
+#endif // _WIN64
+
+ // mov regScratch,regArg0
+ // mov regArg0,regArg1
+ // mov regArg1,regScratch
+ // nop
+ // jmp EntryPoint
+ // dw pMethodDesc
+
+ IN_WIN64(BYTE m_nop1;)
+ IN_WIN64(BYTE m_prefix1;)
+ WORD m_movScratchArg0;
+ IN_WIN64(BYTE m_prefix2;)
+ WORD m_movArg0Arg1;
+ IN_WIN64(BYTE m_prefix3;)
+ WORD m_movArg1Scratch;
+ BYTE m_nop2;
+ BYTE m_jmp;
+ INT32 m_rel32;
+ TADDR m_pMethodDesc;
+
+ void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
+
+ TADDR GetMethodDesc()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return m_pMethodDesc;
+ }
+
+ PCODE GetTarget()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return rel32Decode(PTR_HOST_MEMBER_TADDR(ThisPtrRetBufPrecode, this, m_rel32));
+ }
+
+ BOOL SetTargetInterlocked(TADDR target, TADDR expected)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ EnsureWritableExecutablePages(&m_rel32);
+ return rel32SetInterlocked(&m_rel32, target, expected, (MethodDesc*)GetMethodDesc());
+ }
+};
+IN_WIN32(static_assert_no_msg(offsetof(ThisPtrRetBufPrecode, m_movArg1Scratch) + 1 == OFFSETOF_PRECODE_TYPE);)
+typedef DPTR(ThisPtrRetBufPrecode) PTR_ThisPtrRetBufPrecode;
+
+#endif // HAS_THISPTR_RETBUF_PRECODE
+
+#include <poppack.h>
+
+#endif // STUBLINKERX86_H_
diff --git a/src/vm/i386/virtualcallstubcpu.hpp b/src/vm/i386/virtualcallstubcpu.hpp
new file mode 100644
index 0000000000..345cda5b30
--- /dev/null
+++ b/src/vm/i386/virtualcallstubcpu.hpp
@@ -0,0 +1,1078 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: virtualcallstubcpu.hpp
+//
+
+
+//
+
+//
+// ============================================================================
+
+#ifndef _VIRTUAL_CALL_STUB_X86_H
+#define _VIRTUAL_CALL_STUB_X86_H
+
+#ifdef DECLARE_DATA
+#include "asmconstants.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#endif
+
+#include <pshpack1.h> // Since we are placing code, we want byte packing of the structs
+
+#define USES_LOOKUP_STUBS 1
+
+/*********************************************************************************************
+Stubs that contain code are all part of larger structs called Holders. There is a
+Holder for each kind of stub, i.e XXXStub is contained with XXXHolder. Holders are
+essentially an implementation trick that allowed rearranging the code sequences more
+easily while trying out different alternatives, and for dealing with any alignment
+issues in a way that was mostly immune to the actually code sequences. These Holders
+should be revisited when the stub code sequences are fixed, since in many cases they
+add extra space to a stub that is not really needed.
+
+Stubs are placed in cache and hash tables. Since unaligned access of data in memory
+is very slow, the keys used in those tables should be aligned. The things used as keys
+typically also occur in the generated code, e.g. a token as an immediate part of an instruction.
+For now, to avoid alignment computations as different code strategies are tried out, the key
+fields are all in the Holders. Eventually, many of these fields should be dropped, and the instruction
+streams aligned so that the immediate fields fall on aligned boundaries.
+*/
+
+#if USES_LOOKUP_STUBS
+
+struct LookupStub;
+struct LookupHolder;
+
+/*LookupStub**************************************************************************************
+Virtual and interface call sites are initially setup to point at LookupStubs.
+This is because the runtime type of the <this> pointer is not yet known,
+so the target cannot be resolved. Note: if the jit is able to determine the runtime type
+of the <this> pointer, it should be generating a direct call not a virtual or interface call.
+This stub pushes a lookup token onto the stack to identify the sought after method, and then
+jumps into the EE (VirtualCallStubManager::ResolveWorkerStub) to effectuate the lookup and
+transfer of control to the appropriate target method implementation, perhaps patching of the call site
+along the way to point to a more appropriate stub. Hence callsites that point to LookupStubs
+get quickly changed to point to another kind of stub.
+*/
+struct LookupStub
+{
+ inline PCODE entryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)&_entryPoint[0]; }
+ inline size_t token() { LIMITED_METHOD_CONTRACT; return _token; }
+ inline size_t size() { LIMITED_METHOD_CONTRACT; return sizeof(LookupStub); }
+
+private:
+ friend struct LookupHolder;
+
+ // DispatchStub:: _entryPoint expects:
+ // ecx: object (the "this" pointer)
+ // eax: siteAddrForRegisterIndirect if this is a RegisterIndirect dispatch call
+ BYTE _entryPoint [2]; // 50 push eax ;save siteAddrForRegisterIndirect - this may be an indirect call
+ // 68 push
+ size_t _token; // xx xx xx xx 32-bit constant
+#ifdef STUB_LOGGING
+ BYTE cntr2[2]; // ff 05 inc
+ size_t* c_lookup; // xx xx xx xx [call_lookup_counter]
+#endif //STUB_LOGGING
+ BYTE part2 [1]; // e9 jmp
+ DISPL _resolveWorkerDispl;// xx xx xx xx pc-rel displ
+};
+
+/* LookupHolders are the containers for LookupStubs, they provide for any alignment of
+stubs as necessary. In the case of LookupStubs, alignment is necessary since
+LookupStubs are placed in a hash table keyed by token. */
+struct LookupHolder
+{
+ static void InitializeStatic();
+
+ void Initialize(PCODE resolveWorkerTarget, size_t dispatchToken);
+
+ LookupStub* stub() { LIMITED_METHOD_CONTRACT; return &_stub; }
+
+ static LookupHolder* FromLookupEntry(PCODE lookupEntry);
+
+private:
+ friend struct LookupStub;
+
+ BYTE align[(sizeof(void*)-(offsetof(LookupStub,_token)%sizeof(void*)))%sizeof(void*)];
+ LookupStub _stub;
+ BYTE pad[sizeof(void*) -
+ ((sizeof(void*)-(offsetof(LookupStub,_token)%sizeof(void*))) +
+ (sizeof(LookupStub))
+ ) % sizeof(void*)]; //complete DWORD
+
+ static_assert_no_msg((sizeof(void*) -
+ ((sizeof(void*)-(offsetof(LookupStub,_token)%sizeof(void*))) +
+ (sizeof(LookupStub))
+ ) % sizeof(void*)) != 0);
+};
+
+#endif // USES_LOOKUP_STUBS
+
+struct DispatchStub;
+struct DispatchHolder;
+
+/*DispatchStub**************************************************************************************
+Monomorphic and mostly monomorphic call sites eventually point to DispatchStubs.
+A dispatch stub has an expected type (expectedMT), target address (target) and fail address (failure).
+If the calling frame does in fact have the <this> type be of the expected type, then
+control is transfered to the target address, the method implementation. If not,
+then control is transfered to the fail address, a fail stub (see below) where a polymorphic
+lookup is done to find the correct address to go to.
+
+implementation note: Order, choice of instructions, and branch directions
+should be carefully tuned since it can have an inordinate effect on performance. Particular
+attention needs to be paid to the effects on the BTB and branch prediction, both in the small
+and in the large, i.e. it needs to run well in the face of BTB overflow--using static predictions.
+Note that since this stub is only used for mostly monomorphic callsites (ones that are not, get patched
+to something else), therefore the conditional jump "jne failure" is mostly not taken, and hence it is important
+that the branch prediction staticly predict this, which means it must be a forward jump. The alternative
+is to reverse the order of the jumps and make sure that the resulting conditional jump "je implTarget"
+is statically predicted as taken, i.e a backward jump. The current choice was taken since it was easier
+to control the placement of the stubs than control the placement of the jitted code and the stubs. */
+struct DispatchStub
+{
+ inline PCODE entryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)&_entryPoint[0]; }
+
+ inline size_t expectedMT() { LIMITED_METHOD_CONTRACT; return _expectedMT; }
+ inline PCODE implTarget() { LIMITED_METHOD_CONTRACT; return (PCODE) &_implDispl + sizeof(DISPL) + _implDispl; }
+ inline PCODE failTarget() { LIMITED_METHOD_CONTRACT; return (PCODE) &_failDispl + sizeof(DISPL) + _failDispl; }
+ inline size_t size() { LIMITED_METHOD_CONTRACT; return sizeof(DispatchStub); }
+
+private:
+ friend struct DispatchHolder;
+
+ // DispatchStub:: _entryPoint expects:
+ // ecx: object (the "this" pointer)
+ // eax: siteAddrForRegisterIndirect if this is a RegisterIndirect dispatch call
+#ifndef STUB_LOGGING
+ BYTE _entryPoint [2]; // 81 39 cmp [ecx], ; This is the place where we are going to fault on null this.
+ size_t _expectedMT; // xx xx xx xx expectedMT ; If you change it, change also AdjustContextForVirtualStub in excep.cpp!!!
+ BYTE jmpOp1[2]; // 0f 85 jne
+ DISPL _failDispl; // xx xx xx xx failEntry ;must be forward jmp for perf reasons
+ BYTE jmpOp2; // e9 jmp
+ DISPL _implDispl; // xx xx xx xx implTarget
+#else //STUB_LOGGING
+ BYTE _entryPoint [2]; // ff 05 inc
+ size_t* d_call; // xx xx xx xx [call_mono_counter]
+ BYTE cmpOp [2]; // 81 39 cmp [ecx],
+ size_t _expectedMT; // xx xx xx xx expectedMT
+ BYTE jmpOp1[2]; // 0f 84 je
+ DISPL _implDispl; // xx xx xx xx implTarget ;during logging, perf is not so important
+ BYTE fail [2]; // ff 05 inc
+ size_t* d_miss; // xx xx xx xx [miss_mono_counter]
+ BYTE jmpFail; // e9 jmp
+ DISPL _failDispl; // xx xx xx xx failEntry
+#endif //STUB_LOGGING
+};
+
+/* DispatchHolders are the containers for DispatchStubs, they provide for any alignment of
+stubs as necessary. DispatchStubs are placed in a hashtable and in a cache. The keys for both
+are the pair expectedMT and token. Efficiency of the of the hash table is not a big issue,
+since lookups in it are fairly rare. Efficiency of the cache is paramount since it is accessed frequently
+o(see ResolveStub below). Currently we are storing both of these fields in the DispatchHolder to simplify
+alignment issues. If inlineMT in the stub itself was aligned, then it could be the expectedMT field.
+While the token field can be logically gotten by following the failure target to the failEntryPoint
+of the ResolveStub and then to the token over there, for perf reasons of cache access, it is duplicated here.
+This allows us to use DispatchStubs in the cache. The alternative is to provide some other immutable struct
+for the cache composed of the triplet (expectedMT, token, target) and some sort of reclaimation scheme when
+they are thrown out of the cache via overwrites (since concurrency will make the obvious approaches invalid).
+*/
+
+/* @workaround for ee resolution - Since the EE does not currently have a resolver function that
+does what we want, see notes in implementation of VirtualCallStubManager::Resolver, we are
+using dispatch stubs to siumulate what we want. That means that inlineTarget, which should be immutable
+is in fact written. Hence we have moved target out into the holder and aligned it so we can
+atomically update it. When we get a resolver function that does what we want, we can drop this field,
+and live with just the inlineTarget field in the stub itself, since immutability will hold.*/
+struct DispatchHolder
+{
+ static void InitializeStatic();
+
+ void Initialize(PCODE implTarget, PCODE failTarget, size_t expectedMT);
+
+ DispatchStub* stub() { LIMITED_METHOD_CONTRACT; return &_stub; }
+
+ static DispatchHolder* FromDispatchEntry(PCODE dispatchEntry);
+
+private:
+ //force expectedMT to be aligned since used as key in hash tables.
+#ifndef STUB_LOGGING
+ BYTE align[(sizeof(void*)-(offsetof(DispatchStub,_expectedMT)%sizeof(void*)))%sizeof(void*)];
+#endif
+ DispatchStub _stub;
+ BYTE pad[(sizeof(void*)-(sizeof(DispatchStub)%sizeof(void*))+offsetof(DispatchStub,_expectedMT))%sizeof(void*)]; //complete DWORD
+};
+
+struct ResolveStub;
+struct ResolveHolder;
+
+/*ResolveStub**************************************************************************************
+Polymorphic call sites and monomorphic calls that fail end up in a ResolverStub. There is only
+one resolver stub built for any given token, even though there may be many call sites that
+use that token and many distinct <this> types that are used in the calling call frames. A resolver stub
+actually has two entry points, one for polymorphic call sites and one for dispatch stubs that fail on their
+expectedMT test. There is a third part of the resolver stub that enters the ee when a decision should
+be made about changing the callsite. Therefore, we have defined the resolver stub as three distinct pieces,
+even though they are actually allocated as a single contiguous block of memory. These pieces are:
+
+A ResolveStub has two entry points:
+
+FailEntry - where the dispatch stub goes if the expected MT test fails. This piece of the stub does
+a check to see how often we are actually failing. If failures are frequent, control transfers to the
+patch piece to cause the call site to be changed from a mostly monomorphic callsite
+(calls dispatch stub) to a polymorphic callsize (calls resolve stub). If failures are rare, control
+transfers to the resolve piece (see ResolveStub). The failEntryPoint decrements a counter
+every time it is entered. The ee at various times will add a large chunk to the counter.
+
+ResolveEntry - does a lookup via in a cache by hashing the actual type of the calling frame s
+<this> and the token identifying the (contract,method) pair desired. If found, control is transfered
+to the method implementation. If not found in the cache, the token is pushed and the ee is entered via
+the ResolveWorkerStub to do a full lookup and eventual transfer to the correct method implementation. Since
+there is a different resolve stub for every token, the token can be inlined and the token can be pre-hashed.
+The effectiveness of this approach is highly sensitive to the effectiveness of the hashing algorithm used,
+as well as its speed. It turns out it is very important to make the hash function sensitive to all
+of the bits of the method table, as method tables are laid out in memory in a very non-random way. Before
+making any changes to the code sequences here, it is very important to measure and tune them as perf
+can vary greatly, in unexpected ways, with seeming minor changes.
+
+Implementation note - Order, choice of instructions, and branch directions
+should be carefully tuned since it can have an inordinate effect on performance. Particular
+attention needs to be paid to the effects on the BTB and branch prediction, both in the small
+and in the large, i.e. it needs to run well in the face of BTB overflow--using static predictions.
+Note that this stub is called in highly polymorphic cases, but the cache should have been sized
+and the hash function chosen to maximize the cache hit case. Hence the cmp/jcc instructions should
+mostly be going down the cache hit route, and it is important that this be statically predicted as so.
+Hence the 3 jcc instrs need to be forward jumps. As structured, there is only one jmp/jcc that typically
+gets put in the BTB since all the others typically fall straight thru. Minimizing potential BTB entries
+is important. */
+
+struct ResolveStub
+{
+ inline PCODE failEntryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)&_failEntryPoint[0]; }
+ inline PCODE resolveEntryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)&_resolveEntryPoint[0]; }
+ inline PCODE slowEntryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)&_slowEntryPoint[0]; }
+
+ inline INT32* pCounter() { LIMITED_METHOD_CONTRACT; return _pCounter; }
+ inline UINT32 hashedToken() { LIMITED_METHOD_CONTRACT; return _hashedToken >> LOG2_PTRSIZE; }
+ inline size_t cacheAddress() { LIMITED_METHOD_CONTRACT; return _cacheAddress; }
+ inline size_t token() { LIMITED_METHOD_CONTRACT; return _token; }
+ inline size_t size() { LIMITED_METHOD_CONTRACT; return sizeof(ResolveStub); }
+
+private:
+ friend struct ResolveHolder;
+
+ // ResolveStub::_failEntryPoint expects:
+ // ecx: object (the "this" pointer)
+ // eax: siteAddrForRegisterIndirect if this is a RegisterIndirect dispatch call
+ BYTE _failEntryPoint [2]; // 83 2d sub
+ INT32* _pCounter; // xx xx xx xx [counter],
+ BYTE part0 [2]; // 01 01
+ // 7c jl
+ BYTE toPatcher; // xx backpatcher ;must be forward jump, for perf reasons
+ // ;fall into the resolver stub
+
+ // ResolveStub::_resolveEntryPoint expects:
+ // ecx: object (the "this" pointer)
+ // eax: siteAddrForRegisterIndirect if this is a RegisterIndirect dispatch call
+ BYTE _resolveEntryPoint[6]; // 50 push eax ;save siteAddrForRegisterIndirect - this may be an indirect call
+ // 8b 01 mov eax,[ecx] ;get the method table from the "this" pointer. This is the place
+ // ; where we are going to fault on null this. If you change it,
+ // ; change also AdjustContextForVirtualStub in excep.cpp!!!
+ // 52 push edx
+ // 8b d0 mov edx, eax
+ BYTE part1 [6]; // c1 e8 0C shr eax,12 ;we are adding upper bits into lower bits of mt
+ // 03 c2 add eax,edx
+ // 35 xor eax,
+ UINT32 _hashedToken; // xx xx xx xx hashedToken ;along with pre-hashed token
+ BYTE part2 [1]; // 25 and eax,
+ size_t mask; // xx xx xx xx cache_mask
+ BYTE part3 [2]; // 8b 80 mov eax, [eax+
+ size_t _cacheAddress; // xx xx xx xx lookupCache]
+#ifdef STUB_LOGGING
+ BYTE cntr1[2]; // ff 05 inc
+ size_t* c_call; // xx xx xx xx [call_cache_counter]
+#endif //STUB_LOGGING
+ BYTE part4 [2]; // 3b 10 cmp edx,[eax+
+ // BYTE mtOffset; // ResolverCacheElem.pMT]
+ BYTE part5 [1]; // 75 jne
+ BYTE toMiss1; // xx miss ;must be forward jump, for perf reasons
+ BYTE part6 [2]; // 81 78 cmp [eax+
+ BYTE tokenOffset; // xx ResolverCacheElem.token],
+ size_t _token; // xx xx xx xx token
+ BYTE part7 [1]; // 75 jne
+ BYTE toMiss2; // xx miss ;must be forward jump, for perf reasons
+ BYTE part8 [2]; // 8B 40 xx mov eax,[eax+
+ BYTE targetOffset; // ResolverCacheElem.target]
+ BYTE part9 [6]; // 5a pop edx
+ // 83 c4 04 add esp,4 ;throw away siteAddrForRegisterIndirect - we don't need it now
+ // ff e0 jmp eax
+ // miss:
+ BYTE miss [1]; // 5a pop edx ; don't pop siteAddrForRegisterIndirect - leave it on the stack for use by ResolveWorkerChainLookupAsmStub and/or ResolveWorkerAsmStub
+ BYTE _slowEntryPoint[1]; // 68 push
+ size_t _tokenPush; // xx xx xx xx token
+#ifdef STUB_LOGGING
+ BYTE cntr2[2]; // ff 05 inc
+ size_t* c_miss; // xx xx xx xx [miss_cache_counter]
+#endif //STUB_LOGGING
+ BYTE part10 [1]; // e9 jmp
+ DISPL _resolveWorkerDispl; // xx xx xx xx resolveWorker == ResolveWorkerChainLookupAsmStub or ResolveWorkerAsmStub
+ BYTE patch[1]; // e8 call
+ DISPL _backpatcherDispl; // xx xx xx xx backpatcherWorker == BackPatchWorkerAsmStub
+ BYTE part11 [1]; // eb jmp
+ BYTE toResolveStub; // xx resolveStub, i.e. go back to _resolveEntryPoint
+};
+
+/* ResolveHolders are the containers for ResolveStubs, They provide
+for any alignment of the stubs as necessary. The stubs are placed in a hash table keyed by
+the token for which they are built. Efficiency of access requires that this token be aligned.
+For now, we have copied that field into the ResolveHolder itself, if the resolve stub is arranged such that
+any of its inlined tokens (non-prehashed) is aligned, then the token field in the ResolveHolder
+is not needed. */
+struct ResolveHolder
+{
+ static void InitializeStatic();
+
+ void Initialize(PCODE resolveWorkerTarget, PCODE patcherTarget,
+ size_t dispatchToken, UINT32 hashedToken,
+ void * cacheAddr, INT32 * counterAddr);
+
+ ResolveStub* stub() { LIMITED_METHOD_CONTRACT; return &_stub; }
+
+ static ResolveHolder* FromFailEntry(PCODE failEntry);
+ static ResolveHolder* FromResolveEntry(PCODE resolveEntry);
+
+private:
+ //align _token in resolve stub
+
+ BYTE align[(sizeof(void*)-((offsetof(ResolveStub,_token))%sizeof(void*)))%sizeof(void*)
+#ifdef STUB_LOGGING // This turns out to be zero-sized in stub_logging case, and is an error. So round up.
+ +sizeof(void*)
+#endif
+ ];
+
+ ResolveStub _stub;
+
+//#ifdef STUB_LOGGING // This turns out to be zero-sized in non stub_logging case, and is an error. So remove
+ BYTE pad[(sizeof(void*)-((sizeof(ResolveStub))%sizeof(void*))+offsetof(ResolveStub,_token))%sizeof(void*)]; //fill out DWORD
+//#endif
+};
+#include <poppack.h>
+
+
+#ifdef DECLARE_DATA
+
+#ifndef DACCESS_COMPILE
+
+#ifdef _MSC_VER
+
+#ifdef CHAIN_LOOKUP
+/* This will perform a chained lookup of the entry if the initial cache lookup fails
+
+ Entry stack:
+ dispatch token
+ siteAddrForRegisterIndirect (used only if this is a RegisterIndirect dispatch call)
+ return address of caller to stub
+ Also, EAX contains the pointer to the first ResolveCacheElem pointer for the calculated
+ bucket in the cache table.
+*/
+__declspec (naked) void ResolveWorkerChainLookupAsmStub()
+{
+ enum
+ {
+ e_token_size = 4,
+ e_indirect_addr_size = 4,
+ e_caller_ret_addr_size = 4,
+ };
+ enum
+ {
+ // this is the part of the stack that is present as we enter this function:
+ e_token = 0,
+ e_indirect_addr = e_token + e_token_size,
+ e_caller_ret_addr = e_indirect_addr + e_indirect_addr_size,
+ e_ret_esp = e_caller_ret_addr + e_caller_ret_addr_size,
+ };
+ enum
+ {
+ e_spilled_reg_size = 8,
+ };
+
+ // main loop setup
+ __asm {
+#ifdef STUB_LOGGING
+ inc g_chained_lookup_call_counter
+#endif
+ // spill regs
+ push edx
+ push ecx
+ // move the token into edx
+ mov edx,[esp+e_spilled_reg_size+e_token]
+ // move the MT into ecx
+ mov ecx,[ecx]
+ }
+ main_loop:
+ __asm {
+ // get the next entry in the chain (don't bother checking the first entry again)
+ mov eax,[eax+e_resolveCacheElem_offset_next]
+ // test if we hit a terminating NULL
+ test eax,eax
+ jz fail
+ // compare the MT of the ResolveCacheElem
+ cmp ecx,[eax+e_resolveCacheElem_offset_mt]
+ jne main_loop
+ // compare the token of the ResolveCacheElem
+ cmp edx,[eax+e_resolveCacheElem_offset_token]
+ jne main_loop
+ // success
+ // decrement success counter and move entry to start if necessary
+ sub g_dispatch_cache_chain_success_counter,1
+ //@TODO: Perhaps this should be a jl for better branch prediction?
+ jge nopromote
+ // be quick to reset the counter so we don't get a bunch of contending threads
+ add g_dispatch_cache_chain_success_counter,CALL_STUB_CACHE_INITIAL_SUCCESS_COUNT
+ // promote the entry to the beginning of the chain
+ mov ecx,eax
+ call VirtualCallStubManager::PromoteChainEntry
+ }
+ nopromote:
+ __asm {
+ // clean up the stack and jump to the target
+ pop ecx
+ pop edx
+ add esp,(e_caller_ret_addr - e_token)
+ mov eax,[eax+e_resolveCacheElem_offset_target]
+ jmp eax
+ }
+ fail:
+ __asm {
+#ifdef STUB_LOGGING
+ inc g_chained_lookup_miss_counter
+#endif
+ // restore registers
+ pop ecx
+ pop edx
+ jmp ResolveWorkerAsmStub
+ }
+}
+#endif
+
+/* Call the resolver, it will return where we are supposed to go.
+ There is a little stack magic here, in that we are entered with one
+ of the arguments for the resolver (the token) on the stack already.
+ We just push the other arguments, <this> in the call frame and the call site pointer,
+ and call the resolver.
+
+ On return we have the stack frame restored to the way it was when the ResolveStub
+ was called, i.e. as it was at the actual call site. The return value from
+ the resolver is the address we need to transfer control to, simulating a direct
+ call from the original call site. If we get passed back NULL, it means that the
+ resolution failed, an unimpelemented method is being called.
+
+ Entry stack:
+ dispatch token
+ siteAddrForRegisterIndirect (used only if this is a RegisterIndirect dispatch call)
+ return address of caller to stub
+
+ Call stack:
+ pointer to TransitionBlock
+ call site
+ dispatch token
+ TransitionBlock
+ ArgumentRegisters (ecx, edx)
+ CalleeSavedRegisters (ebp, ebx, esi, edi)
+ return address of caller to stub
+ */
+__declspec (naked) void ResolveWorkerAsmStub()
+{
+ CANNOT_HAVE_CONTRACT;
+
+ __asm {
+ //
+ // The stub arguments are where we want to setup the TransitionBlock. We will
+ // setup the TransitionBlock later once we can trash them
+ //
+ // push ebp-frame
+ // push ebp
+ // mov ebp,esp
+
+ // save CalleeSavedRegisters
+ // push ebx
+
+ push esi
+ push edi
+
+ // push ArgumentRegisters
+ push ecx
+ push edx
+
+ mov esi, esp
+
+ push [esi + 4*4] // dispatch token
+ push [esi + 5*4] // siteAddrForRegisterIndirect
+ push esi // pTransitionBlock
+
+ // Setup up proper EBP frame now that the stub arguments can be trashed
+ mov [esi + 4*4],ebx
+ mov [esi + 5*4],ebp
+ lea ebp, [esi + 5*4]
+
+ // Make the call
+ call VSD_ResolveWorker
+
+ // From here on, mustn't trash eax
+
+ // pop ArgumentRegisters
+ pop edx
+ pop ecx
+
+ // pop CalleeSavedRegisters
+ pop edi
+ pop esi
+ pop ebx
+ pop ebp
+
+ // Now jump to the target
+ jmp eax // continue on into the method
+ }
+}
+
+#ifdef FEATURE_REMOTING
+/* For an in-context dispatch, we will find the target. This
+ is the slow path, and erects a MachState structure for
+ creating a HelperMethodFrame
+
+ Entry stack:
+ dispatch token
+ return address of caller to stub
+
+ Call stack:
+ pointer to StubDispatchFrame
+ call site
+ dispatch token
+ StubDispatchFrame
+ GSCookie
+ negspace
+ vptr
+ datum
+ ArgumentRegisters (ecx, edx)
+ CalleeSavedRegisters (ebp, ebx, esi, edi)
+ return address of caller to stub
+*/
+__declspec (naked) void InContextTPDispatchAsmStub()
+{
+ CANNOT_HAVE_CONTRACT;
+
+ __asm {
+ // Pop dispatch token
+ pop eax
+
+ // push ebp-frame
+ push ebp
+ mov ebp,esp
+
+ // save CalleeSavedRegisters
+ push ebx
+ push esi
+ push edi
+
+ // push ArgumentRegisters
+ push ecx
+ push edx
+
+ mov esi, esp
+
+ push eax // token
+ push esi // pTransitionContext
+
+ // Make the call
+ call VSD_GetTargetForTPWorker
+
+ // From here on, mustn't trash eax
+
+ // pop ArgumentRegisters
+ pop edx
+ pop ecx
+
+ // pop CalleeSavedRegisters
+ pop edi
+ pop esi
+ pop ebx
+ pop ebp
+
+ // Now jump to the target
+ jmp eax // continue on into the method
+ }
+}
+
+/* For an in-context dispatch, we will try to find the target in
+ the resolve cache. If this fails, we will jump to the full
+ version of InContextTPDispatchAsmStub
+
+ Entry stack:
+ dispatch slot number of interface MD
+ caller return address
+ ECX: this object
+*/
+__declspec (naked) void InContextTPQuickDispatchAsmStub()
+{
+ CANNOT_HAVE_CONTRACT;
+
+ __asm {
+ // Spill registers
+ push ecx
+ push edx
+
+ // Arg 2 - token
+ mov eax, [esp + 8]
+ push eax
+
+ // Arg 1 - this
+ push ecx
+
+ // Make the call
+ call VSD_GetTargetForTPWorkerQuick
+
+ // Restore registers
+ pop edx
+ pop ecx
+
+ // Test to see if we found a target
+ test eax, eax
+ jnz TargetFound
+
+ // If no target, jump to the slow worker
+ jmp InContextTPDispatchAsmStub
+
+ TargetFound:
+ // We got a target, so pop off the token and jump to it
+ add esp,4
+ jmp eax
+ }
+}
+#endif // FEATURE_REMOTING
+
+/* Call the callsite back patcher. The fail stub piece of the resolver is being
+call too often, i.e. dispatch stubs are failing the expect MT test too often.
+In this stub wraps the call to the BackPatchWorker to take care of any stack magic
+needed.
+*/
+__declspec (naked) void BackPatchWorkerAsmStub()
+{
+ CANNOT_HAVE_CONTRACT;
+
+ __asm {
+ push EBP
+ mov ebp,esp
+ push EAX // it may contain siteAddrForRegisterIndirect
+ push ECX
+ push EDX
+ push EAX // push any indirect call address as the second arg to BackPatchWorker
+ push [EBP+8] // and push return address as the first arg to BackPatchWorker
+ call VirtualCallStubManager::BackPatchWorkerStatic
+ pop EDX
+ pop ECX
+ pop EAX
+ mov esp,ebp
+ pop ebp
+ ret
+ }
+}
+
+#endif // _MSC_VER
+
+#ifdef _DEBUG
+//
+// This function verifies that a pointer to an indirection cell lives inside a delegate object.
+// In the delegate case the indirection cell is held by the delegate itself in _methodPtrAux, when the delegate Invoke is
+// called the shuffle thunk is first invoked and that will call into the virtual dispatch stub.
+// Before control is given to the virtual dispatch stub a pointer to the indirection cell (thus an interior pointer to the delegate)
+// is pushed in EAX
+//
+BOOL isDelegateCall(BYTE *interiorPtr)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (GCHeap::GetGCHeap()->IsHeapPointer((void*)interiorPtr))
+ {
+ Object *delegate = (Object*)(interiorPtr - DelegateObject::GetOffsetOfMethodPtrAux());
+ VALIDATEOBJECTREF(ObjectToOBJECTREF(delegate));
+ _ASSERTE(delegate->GetMethodTable()->IsDelegate());
+
+ return TRUE;
+ }
+ return FALSE;
+}
+#endif
+
+StubCallSite::StubCallSite(TADDR siteAddrForRegisterIndirect, PCODE returnAddr)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Not used
+ // if (isCallRelative(returnAddr))
+ // {
+ // m_siteAddr = returnAddr - sizeof(DISPL);
+ // }
+ // else
+ if (isCallRelativeIndirect((BYTE *)returnAddr))
+ {
+ m_siteAddr = *dac_cast<PTR_PTR_PCODE>(returnAddr - sizeof(PCODE));
+ }
+ else
+ {
+ _ASSERTE(isCallRegisterIndirect((BYTE *)returnAddr) || isDelegateCall((BYTE *)siteAddrForRegisterIndirect));
+ m_siteAddr = dac_cast<PTR_PCODE>(siteAddrForRegisterIndirect);
+ }
+}
+
+// the special return address for VSD tailcalls
+extern "C" void STDCALL JIT_TailCallReturnFromVSD();
+
+PCODE StubCallSite::GetCallerAddress()
+{
+ LIMITED_METHOD_CONTRACT;
+ if (m_returnAddr != (PCODE)JIT_TailCallReturnFromVSD)
+ return m_returnAddr;
+
+ // Find the tailcallframe in the frame chain and get the actual caller from the first TailCallFrame
+ return TailCallFrame::FindTailCallFrame(GetThread()->GetFrame())->GetCallerAddress();
+}
+
+#ifdef STUB_LOGGING
+extern size_t g_lookup_inline_counter;
+extern size_t g_mono_call_counter;
+extern size_t g_mono_miss_counter;
+extern size_t g_poly_call_counter;
+extern size_t g_poly_miss_counter;
+#endif
+
+/* Template used to generate the stub. We generate a stub by allocating a block of
+ memory and copy the template over it and just update the specific fields that need
+ to be changed.
+*/
+LookupStub lookupInit;
+
+void LookupHolder::InitializeStatic()
+{
+ static_assert_no_msg(((offsetof(LookupStub, _token)+offsetof(LookupHolder, _stub)) % sizeof(void*)) == 0);
+ static_assert_no_msg((sizeof(LookupHolder) % sizeof(void*)) == 0);
+
+ lookupInit._entryPoint [0] = 0x50;
+ lookupInit._entryPoint [1] = 0x68;
+ static_assert_no_msg(sizeof(lookupInit._entryPoint) == 2);
+ lookupInit._token = 0xcccccccc;
+#ifdef STUB_LOGGING
+ lookupInit.cntr2 [0] = 0xff;
+ lookupInit.cntr2 [1] = 0x05;
+ static_assert_no_msg(sizeof(lookupInit.cntr2) == 2);
+ lookupInit.c_lookup = &g_call_lookup_counter;
+#endif //STUB_LOGGING
+ lookupInit.part2 [0] = 0xe9;
+ static_assert_no_msg(sizeof(lookupInit.part2) == 1);
+ lookupInit._resolveWorkerDispl = 0xcccccccc;
+}
+
+void LookupHolder::Initialize(PCODE resolveWorkerTarget, size_t dispatchToken)
+{
+ _stub = lookupInit;
+
+ //fill in the stub specific fields
+ //@TODO: Get rid of this duplication of data.
+ _stub._token = dispatchToken;
+ _stub._resolveWorkerDispl = resolveWorkerTarget - ((PCODE) &_stub._resolveWorkerDispl + sizeof(DISPL));
+}
+
+LookupHolder* LookupHolder::FromLookupEntry(PCODE lookupEntry)
+{
+ LIMITED_METHOD_CONTRACT;
+ LookupHolder* lookupHolder = (LookupHolder*) ( lookupEntry - offsetof(LookupHolder, _stub) - offsetof(LookupStub, _entryPoint) );
+ // _ASSERTE(lookupHolder->_stub._entryPoint[0] == lookupInit._entryPoint[0]);
+ return lookupHolder;
+}
+
+
+/* Template used to generate the stub. We generate a stub by allocating a block of
+ memory and copy the template over it and just update the specific fields that need
+ to be changed.
+*/
+DispatchStub dispatchInit;
+
+void DispatchHolder::InitializeStatic()
+{
+ // Check that _expectedMT is aligned in the DispatchHolder
+ static_assert_no_msg(((offsetof(DispatchHolder, _stub) + offsetof(DispatchStub,_expectedMT)) % sizeof(void*)) == 0);
+ static_assert_no_msg((sizeof(DispatchHolder) % sizeof(void*)) == 0);
+
+#ifndef STUB_LOGGING
+ dispatchInit._entryPoint [0] = 0x81;
+ dispatchInit._entryPoint [1] = 0x39;
+ static_assert_no_msg(sizeof(dispatchInit._entryPoint) == 2);
+
+ dispatchInit._expectedMT = 0xcccccccc;
+ dispatchInit.jmpOp1 [0] = 0x0f;
+ dispatchInit.jmpOp1 [1] = 0x85;
+ static_assert_no_msg(sizeof(dispatchInit.jmpOp1) == 2);
+
+ dispatchInit._failDispl = 0xcccccccc;
+ dispatchInit.jmpOp2 = 0xe9;
+ dispatchInit._implDispl = 0xcccccccc;
+#else //STUB_LOGGING
+ dispatchInit._entryPoint [0] = 0xff;
+ dispatchInit._entryPoint [1] = 0x05;
+ static_assert_no_msg(sizeof(dispatchInit._entryPoint) == 2);
+
+ dispatchInit.d_call = &g_mono_call_counter;
+ dispatchInit.cmpOp [0] = 0x81;
+ dispatchInit.cmpOp [1] = 0x39;
+ static_assert_no_msg(sizeof(dispatchInit.cmpOp) == 2);
+
+ dispatchInit._expectedMT = 0xcccccccc;
+ dispatchInit.jmpOp1 [0] = 0x0f;
+ dispatchInit.jmpOp1 [1] = 0x84;
+ static_assert_no_msg(sizeof(dispatchInit.jmpOp1) == 2);
+
+ dispatchInit._implDispl = 0xcccccccc;
+ dispatchInit.fail [0] = 0xff;
+ dispatchInit.fail [1] = 0x05;
+ static_assert_no_msg(sizeof(dispatchInit.fail) == 2);
+
+ dispatchInit.d_miss = &g_mono_miss_counter;
+ dispatchInit.jmpFail = 0xe9;
+ dispatchInit._failDispl = 0xcccccccc;
+#endif //STUB_LOGGING
+};
+
+void DispatchHolder::Initialize(PCODE implTarget, PCODE failTarget, size_t expectedMT)
+{
+ _stub = dispatchInit;
+
+ //fill in the stub specific fields
+ _stub._expectedMT = (size_t) expectedMT;
+ _stub._failDispl = failTarget - ((PCODE) &_stub._failDispl + sizeof(DISPL));
+ _stub._implDispl = implTarget - ((PCODE) &_stub._implDispl + sizeof(DISPL));
+}
+
+DispatchHolder* DispatchHolder::FromDispatchEntry(PCODE dispatchEntry)
+{
+ LIMITED_METHOD_CONTRACT;
+ DispatchHolder* dispatchHolder = (DispatchHolder*) ( dispatchEntry - offsetof(DispatchHolder, _stub) - offsetof(DispatchStub, _entryPoint) );
+ // _ASSERTE(dispatchHolder->_stub._entryPoint[0] == dispatchInit._entryPoint[0]);
+ return dispatchHolder;
+}
+
+
+/* Template used to generate the stub. We generate a stub by allocating a block of
+ memory and copy the template over it and just update the specific fields that need
+ to be changed.
+*/
+
+ResolveStub resolveInit;
+
+void ResolveHolder::InitializeStatic()
+{
+ //Check that _token is aligned in ResolveHolder
+ static_assert_no_msg(((offsetof(ResolveHolder, _stub) + offsetof(ResolveStub, _token)) % sizeof(void*)) == 0);
+ static_assert_no_msg((sizeof(ResolveHolder) % sizeof(void*)) == 0);
+
+ resolveInit._failEntryPoint [0] = 0x83;
+ resolveInit._failEntryPoint [1] = 0x2d;
+ static_assert_no_msg(sizeof(resolveInit._failEntryPoint) == 2);
+
+ resolveInit._pCounter = (INT32 *) (size_t) 0xcccccccc;
+ resolveInit.part0 [0] = 0x01;
+ resolveInit.part0 [1] = 0x7c;
+ static_assert_no_msg(sizeof(resolveInit.part0) == 2);
+
+ resolveInit.toPatcher = (offsetof(ResolveStub, patch) - (offsetof(ResolveStub, toPatcher) + 1)) & 0xFF;
+
+ resolveInit._resolveEntryPoint [0] = 0x50;
+ resolveInit._resolveEntryPoint [1] = 0x8b;
+ resolveInit._resolveEntryPoint [2] = 0x01;
+ resolveInit._resolveEntryPoint [3] = 0x52;
+ resolveInit._resolveEntryPoint [4] = 0x8b;
+ resolveInit._resolveEntryPoint [5] = 0xd0;
+ static_assert_no_msg(sizeof(resolveInit._resolveEntryPoint) == 6);
+
+ resolveInit.part1 [0] = 0xc1;
+ resolveInit.part1 [1] = 0xe8;
+ resolveInit.part1 [2] = CALL_STUB_CACHE_NUM_BITS;
+ resolveInit.part1 [3] = 0x03;
+ resolveInit.part1 [4] = 0xc2;
+ resolveInit.part1 [5] = 0x35;
+ static_assert_no_msg(sizeof(resolveInit.part1) == 6);
+
+ resolveInit._hashedToken = 0xcccccccc;
+ resolveInit.part2 [0] = 0x25;
+ static_assert_no_msg(sizeof(resolveInit.part2) == 1);
+
+ resolveInit.mask = (CALL_STUB_CACHE_MASK << LOG2_PTRSIZE);
+ resolveInit.part3 [0] = 0x8b;
+ resolveInit.part3 [1] = 0x80;;
+ static_assert_no_msg(sizeof(resolveInit.part3) == 2);
+
+ resolveInit._cacheAddress = 0xcccccccc;
+#ifdef STUB_LOGGING
+ resolveInit.cntr1 [0] = 0xff;
+ resolveInit.cntr1 [1] = 0x05;
+ static_assert_no_msg(sizeof(resolveInit.cntr1) == 2);
+
+ resolveInit.c_call = &g_poly_call_counter;
+#endif //STUB_LOGGING
+ resolveInit.part4 [0] = 0x3b;
+ resolveInit.part4 [1] = 0x10;
+ static_assert_no_msg(sizeof(resolveInit.part4) == 2);
+
+ // resolveInit.mtOffset = offsetof(ResolveCacheElem,pMT) & 0xFF;
+ static_assert_no_msg(offsetof(ResolveCacheElem,pMT) == 0);
+
+ resolveInit.part5 [0] = 0x75;
+ static_assert_no_msg(sizeof(resolveInit.part5) == 1);
+
+ resolveInit.toMiss1 = offsetof(ResolveStub,miss)-(offsetof(ResolveStub,toMiss1)+1);
+
+ resolveInit.part6 [0] = 0x81;
+ resolveInit.part6 [1] = 0x78;
+ static_assert_no_msg(sizeof(resolveInit.part6) == 2);
+
+ resolveInit.tokenOffset = offsetof(ResolveCacheElem,token) & 0xFF;
+
+ resolveInit._token = 0xcccccccc;
+
+ resolveInit.part7 [0] = 0x75;
+ static_assert_no_msg(sizeof(resolveInit.part7) == 1);
+
+ resolveInit.part8 [0] = 0x8b;
+ resolveInit.part8 [1] = 0x40;
+ static_assert_no_msg(sizeof(resolveInit.part8) == 2);
+
+ resolveInit.targetOffset = offsetof(ResolveCacheElem,target) & 0xFF;
+
+ resolveInit.toMiss2 = offsetof(ResolveStub,miss)-(offsetof(ResolveStub,toMiss2)+1);
+
+ resolveInit.part9 [0] = 0x5a;
+ resolveInit.part9 [1] = 0x83;
+ resolveInit.part9 [2] = 0xc4;
+ resolveInit.part9 [3] = 0x04;
+ resolveInit.part9 [4] = 0xff;
+ resolveInit.part9 [5] = 0xe0;
+ static_assert_no_msg(sizeof(resolveInit.part9) == 6);
+
+ resolveInit.miss [0] = 0x5a;
+// resolveInit.miss [1] = 0xb8;
+// resolveInit._hashedTokenMov = 0xcccccccc;
+ resolveInit._slowEntryPoint [0] = 0x68;
+ resolveInit._tokenPush = 0xcccccccc;
+#ifdef STUB_LOGGING
+ resolveInit.cntr2 [0] = 0xff;
+ resolveInit.cntr2 [1] = 0x05;
+ resolveInit.c_miss = &g_poly_miss_counter;
+#endif //STUB_LOGGING
+ resolveInit.part10 [0] = 0xe9;
+ resolveInit._resolveWorkerDispl = 0xcccccccc;
+
+ resolveInit.patch [0] = 0xe8;
+ resolveInit._backpatcherDispl = 0xcccccccc;
+ resolveInit.part11 [0] = 0xeb;
+ resolveInit.toResolveStub = (offsetof(ResolveStub, _resolveEntryPoint) - (offsetof(ResolveStub, toResolveStub) + 1)) & 0xFF;
+};
+
+void ResolveHolder::Initialize(PCODE resolveWorkerTarget, PCODE patcherTarget,
+ size_t dispatchToken, UINT32 hashedToken,
+ void * cacheAddr, INT32 * counterAddr)
+{
+ _stub = resolveInit;
+
+ //fill in the stub specific fields
+ _stub._pCounter = counterAddr;
+ _stub._hashedToken = hashedToken << LOG2_PTRSIZE;
+ _stub._cacheAddress = (size_t) cacheAddr;
+ _stub._token = dispatchToken;
+// _stub._hashedTokenMov = hashedToken;
+ _stub._tokenPush = dispatchToken;
+ _stub._resolveWorkerDispl = resolveWorkerTarget - ((PCODE) &_stub._resolveWorkerDispl + sizeof(DISPL));
+ _stub._backpatcherDispl = patcherTarget - ((PCODE) &_stub._backpatcherDispl + sizeof(DISPL));
+}
+
+ResolveHolder* ResolveHolder::FromFailEntry(PCODE failEntry)
+{
+ LIMITED_METHOD_CONTRACT;
+ ResolveHolder* resolveHolder = (ResolveHolder*) ( failEntry - offsetof(ResolveHolder, _stub) - offsetof(ResolveStub, _failEntryPoint) );
+ // _ASSERTE(resolveHolder->_stub._resolveEntryPoint[0] == resolveInit._resolveEntryPoint[0]);
+ return resolveHolder;
+}
+
+ResolveHolder* ResolveHolder::FromResolveEntry(PCODE resolveEntry)
+{
+ LIMITED_METHOD_CONTRACT;
+ ResolveHolder* resolveHolder = (ResolveHolder*) ( resolveEntry - offsetof(ResolveHolder, _stub) - offsetof(ResolveStub, _resolveEntryPoint) );
+ // _ASSERTE(resolveHolder->_stub._resolveEntryPoint[0] == resolveInit._resolveEntryPoint[0]);
+ return resolveHolder;
+}
+
+#endif // DACCESS_COMPILE
+
+VirtualCallStubManager::StubKind VirtualCallStubManager::predictStubKind(PCODE stubStartAddress)
+{
+ SUPPORTS_DAC;
+#ifdef DACCESS_COMPILE
+
+ return SK_BREAKPOINT; // Dac always uses the slower lookup
+
+#else
+
+ StubKind stubKind = SK_UNKNOWN;
+
+ EX_TRY
+ {
+ // If stubStartAddress is completely bogus, then this might AV,
+ // so we protect it with SEH. An AV here is OK.
+ AVInRuntimeImplOkayHolder AVOkay;
+
+ WORD firstWord = *((WORD*) stubStartAddress);
+
+#ifndef STUB_LOGGING
+ if (firstWord == 0x3981)
+#else //STUB_LOGGING
+ if (firstWord == 0x05ff)
+#endif
+ {
+ stubKind = SK_DISPATCH;
+ }
+ else if (firstWord == 0x6850)
+ {
+ stubKind = SK_LOOKUP;
+ }
+ else if (firstWord == 0x8b50)
+ {
+ stubKind = SK_RESOLVE;
+ }
+ else
+ {
+ BYTE firstByte = ((BYTE*) stubStartAddress)[0];
+ BYTE secondByte = ((BYTE*) stubStartAddress)[1];
+
+ if ((firstByte == X86_INSTR_INT3) ||
+ (secondByte == X86_INSTR_INT3))
+ {
+ stubKind = SK_BREAKPOINT;
+ }
+ }
+ }
+ EX_CATCH
+ {
+ stubKind = SK_UNKNOWN;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return stubKind;
+
+#endif // DACCESS_COMPILE
+}
+
+#endif //DECLARE_DATA
+
+#endif // _VIRTUAL_CALL_STUB_X86_H
diff --git a/src/vm/ibclogger.cpp b/src/vm/ibclogger.cpp
new file mode 100644
index 0000000000..7bfe8b945b
--- /dev/null
+++ b/src/vm/ibclogger.cpp
@@ -0,0 +1,1198 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// IBClogger.CPP
+//
+
+//
+// Infrastructure for recording touches of EE data structures
+//
+//
+
+
+#include "common.h"
+#ifdef IBCLOGGER_ENABLED
+#include "method.hpp"
+#include "corbbtprof.h"
+#include "metadatatracker.h"
+#include "field.h"
+#include "typekey.h"
+#include "ibclogger.h"
+
+//#ifdef _DEBUG
+//#define DEBUG_IBCLOGGER
+//#endif
+
+#ifdef DEBUG_IBCLOGGER
+
+#define DEBUG_PRINTF1(a) printf(a)
+#define DEBUG_PRINTF2(a,b) printf(a,b)
+#define DEBUG_PRINTF3(a,b,c) printf(a,b,c)
+#define DEBUG_PRINTF4(a,b,c,d) printf(a,b,c,d)
+#define DEBUG_PRINTF5(a,b,c,d,e) printf(a,b,c,d,e)
+#else
+#define DEBUG_PRINTF1(a)
+#define DEBUG_PRINTF2(a,b)
+#define DEBUG_PRINTF3(a,b,c)
+#define DEBUG_PRINTF4(a,b,c,d)
+#define DEBUG_PRINTF5(a,b,c,d,e)
+#endif
+
+DWORD dwIBCLogCount = 0;
+
+#ifdef _DEBUG
+/*static*/ unsigned IbcCallback::s_highestId = 0;
+#endif
+
+IBCLoggingDisabler::IBCLoggingDisabler()
+{
+ m_pInfo = NULL;
+ m_fDisabled = false;
+
+ if (g_IBCLogger.InstrEnabled())
+ {
+ m_pInfo = GetThread()->GetIBCInfo();
+ if (m_pInfo != NULL)
+ {
+ m_fDisabled = m_pInfo->DisableLogging();
+ }
+ }
+}
+
+IBCLoggingDisabler::IBCLoggingDisabler(bool ignore)
+{
+ m_pInfo = NULL;
+ m_fDisabled = false;
+
+ if (ignore == false)
+ {
+ if (g_IBCLogger.InstrEnabled())
+ {
+ m_pInfo = GetThread()->GetIBCInfo();
+ if (m_pInfo != NULL)
+ {
+ m_fDisabled = m_pInfo->DisableLogging();
+ }
+ }
+ }
+}
+
+IBCLoggingDisabler::IBCLoggingDisabler(ThreadLocalIBCInfo* pInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_pInfo = pInfo;
+
+ if (m_pInfo != NULL)
+ {
+ m_fDisabled = m_pInfo->DisableLogging();
+ }
+ else
+ {
+ m_fDisabled = false;
+ }
+}
+
+IBCLoggingDisabler::~IBCLoggingDisabler()
+{
+ LIMITED_METHOD_CONTRACT;
+ if (m_fDisabled)
+ m_pInfo->EnableLogging();
+}
+
+IBCLoggerAwareAllocMemTracker::~IBCLoggerAwareAllocMemTracker()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!m_fReleased)
+ {
+ GetThread()->FlushIBCInfo();
+ }
+}
+
+IBCLogger::IBCLogger()
+ : dwInstrEnabled(0)
+ , m_sync(NULL)
+{ LIMITED_METHOD_CONTRACT;}
+
+IBCLogger::~IBCLogger()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (m_sync)
+ {
+ delete m_sync;
+ m_sync = NULL;
+ }
+}
+
+void IBCLogger::LogAccessThreadSafeHelperStatic(const void * p, pfnIBCAccessCallback callback)
+{
+ WRAPPER_NO_CONTRACT;
+ /* To make the logging callsite as small as possible keep the part that passes extra */
+ /* argument to LogAccessThreadSafeHelper in separate non-inlined function */
+ g_IBCLogger.LogAccessThreadSafeHelper(p, callback);
+}
+
+void IBCLogger::LogAccessThreadSafeHelper(const void * p, pfnIBCAccessCallback callback)
+{
+ WRAPPER_NO_CONTRACT;
+ SO_NOT_MAINLINE_FUNCTION;
+ CONTRACT_VIOLATION( HostViolation );
+
+ /* For the Global Class we may see p == NULL */
+ if (p == NULL)
+ return;
+
+ Thread * pThread = GetThread();
+
+ /* This could be called by the concurrent GC thread*/
+ /* where GetThread() returns NULL. In such cases,*/
+ /* we want to log data accessed by the GC, but we will just ignore it for now.*/
+ if (pThread == NULL)
+ return;
+
+ ThreadLocalIBCInfo* pInfo = pThread->GetIBCInfo();
+ if (pInfo == NULL)
+ {
+ CONTRACT_VIOLATION( ThrowsViolation | FaultViolation);
+ pInfo = new ThreadLocalIBCInfo();
+ pThread->SetIBCInfo(pInfo);
+ }
+
+ //
+ // During certain events we disable IBC logging.
+ // This may be to prevent deadlocks or we might
+ // not want to have IBC logging during these events.
+ //
+ if ( !pInfo->IsLoggingDisabled() )
+ {
+ CONTRACT_VIOLATION( ThrowsViolation | TakesLockViolation | FaultViolation);
+ pInfo->CallbackHelper(p, callback);
+ }
+}
+
+Crst * IBCLogger::GetSync()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ if (!m_sync)
+ {
+ Crst * pNewSync = new Crst(CrstIbcProfile, CrstFlags(CRST_UNSAFE_ANYMODE | CRST_REENTRANCY | CRST_DEBUGGER_THREAD));
+ if (FastInterlockCompareExchangePointer(m_sync.GetPointer(), pNewSync, NULL) != NULL)
+ {
+ // We lost the race
+ delete pNewSync;
+ }
+ }
+ MemoryBarrier();
+
+ return m_sync;
+}
+
+void IBCLogger::DelayedCallbackPtr(pfnIBCAccessCallback callback, const void * pValue1, const void * pValue2 /*=NULL*/)
+{
+ WRAPPER_NO_CONTRACT;
+
+ ThreadLocalIBCInfo* pInfo = GetThread()->GetIBCInfo();
+
+ // record that we could not currently resolve this callback
+ pInfo->SetCallbackFailed();
+
+ // If we are processing the delayed list then we don't want or need to
+ // add this pair <callback, pValue> to the delay list.
+ if (pInfo->ProcessingDelayedList())
+ {
+ return;
+ }
+
+ // We could throw an out of memory exception
+ CONTRACT_VIOLATION( ThrowsViolation );
+
+ // Get our thread local hashtable
+ DelayCallbackTable * pTable = pInfo->GetPtrDelayList();
+
+ // Create IbcCallback in our stack frame to use as a key for the Lookup
+ IbcCallback key(callback, pValue1, pValue2);
+
+ // Perform lookup of this key in our hashtable
+ IbcCallback * pEntry = pTable->Lookup(&key);
+
+ // If we already have this pair <callback, pValue> in our table
+ // then just return, because we don't need to add a duplicate
+ if (pEntry != NULL)
+ {
+ // Print out a debug message if we are debugging this
+ DEBUG_PRINTF4("Did not add duplicate delayed ptr callback: pfn=0x%08x, pValue1=0x%8p, pValue2=0x%8p\n",
+ pEntry->GetPfn(), pEntry->GetValue1(), pEntry->GetValue2());
+ return;
+ }
+ // Now that we know that we will add a new entry into our hashtable
+ // We create a new IbcCallback in the heap to use as a persisted key
+ pEntry = new IbcCallback(callback, pValue1, pValue2);
+
+ // Mark this key as new valid IbcCallback
+ pEntry->SetValid();
+
+ // Add the entry into our hashtable.
+ pTable->Add(pEntry);
+
+ // Print out a debug message if we are debugging this
+ DEBUG_PRINTF4("Added a new delayed ptr callback: pfn=0x%08x, pValue1=0x%8p, pValue2=0x%8p\n",
+ key.GetPfn(), key.GetValue1(), key.GetValue2());
+}
+
+// some of IBC probes never complete successfully at all.
+// and there is no point for them to stay in the delay list forever,
+// because it significantly slows down the IBC instrumentation.
+// c_maxRetries: the maximun number of times the unsuccessful IBC probe is tried
+// c_minCount: is the minimum number of entries in the delay list that we
+// need before we will call ProcessDelayedCallbacks()
+// c_minCountIncr: is the minimum number of entries in the delay list that we
+// need to add before we will call ProcessDelayedCallbacks() again
+//
+static const int c_maxRetries = 10;
+static const int c_minCount = 8;
+static const int c_minCountIncr = 8;
+
+ThreadLocalIBCInfo::ThreadLocalIBCInfo()
+{
+ LIMITED_METHOD_CONTRACT;
+ SO_NOT_MAINLINE_FUNCTION;
+
+ m_fCallbackFailed = false;
+ m_fProcessingDelayedList = false;
+ m_fLoggingDisabled = false;
+ m_iMinCountToProcess = c_minCount;
+ m_pDelayList = NULL;
+}
+
+ThreadLocalIBCInfo:: ~ThreadLocalIBCInfo()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (m_pDelayList != NULL)
+ {
+ // We have one last call to the CallbackHelper to
+ // flush out any remaining items on our delay list
+ //
+ // CONTRACT_VIOLATION( ThrowsViolation | TakesLockViolation );
+ // CallbackHelper(NULL, NULL);
+
+ DeleteDelayedCallbacks();
+ }
+}
+
+void ThreadLocalIBCInfo::DeleteDelayedCallbacks()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ for (DelayCallbackTable::Iterator elem = m_pDelayList->Begin(),
+ end = m_pDelayList->End();
+ (elem != end); elem++)
+ {
+ IbcCallback * pCallback = const_cast<IbcCallback *>(*elem);
+
+ _ASSERTE(pCallback->IsValid());
+
+ // free up each of the IbcCallback pointers that we allocated
+ pCallback->Invalidate();
+ delete pCallback;
+ }
+
+ delete m_pDelayList;
+ m_pDelayList = NULL;
+}
+
+void ThreadLocalIBCInfo::FlushDelayedCallbacks()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ if (m_pDelayList != NULL)
+ {
+ CONTRACT_VIOLATION( ThrowsViolation );
+ CallbackHelper(NULL, NULL);
+
+ DeleteDelayedCallbacks();
+ }
+}
+
+DelayCallbackTable * ThreadLocalIBCInfo::GetPtrDelayList()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ if (m_pDelayList == NULL)
+ {
+ m_pDelayList = new DelayCallbackTable;
+ }
+
+ return m_pDelayList;
+}
+
+int ThreadLocalIBCInfo::ProcessDelayedCallbacks()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ int removedCount = 0; // Our return result
+
+ _ASSERTE(m_pDelayList != NULL);
+ _ASSERTE(m_fProcessingDelayedList == false);
+
+ m_fProcessingDelayedList = true;
+
+ // Processing Delayed Callback list
+ DEBUG_PRINTF2("Processing Delayed Callback list: GetCount()=%d\n", m_pDelayList->GetCount());
+
+ // try callbacks in the list
+ for (DelayCallbackTable::Iterator elem = m_pDelayList->Begin(),
+ end = m_pDelayList->End();
+ (elem != end); elem++)
+ {
+ IbcCallback * pCallback = const_cast<IbcCallback *>(*elem);
+
+ _ASSERTE(pCallback->IsValid());
+
+ // For each callback that we process we use the
+ // field m_fCallbackFailed to record wheather we
+ // failed or succeeded in resolving the callback
+ //
+ m_fCallbackFailed = false;
+
+ pCallback->Invoke();
+
+ if (m_fCallbackFailed == false)
+ {
+ // Successfully proccessed a delayed callback
+ DEBUG_PRINTF5("Successfully processed a delayed callback: pfn=0x%08x, value1=0x%8p, value2=0x%8p, retries=%d\n",
+ pCallback->GetPfn(), pCallback->GetValue1(), pCallback->GetValue2(), pCallback->GetTryCount());
+
+ m_pDelayList->Remove(pCallback);
+ pCallback->Invalidate();
+ delete pCallback;
+ removedCount++;
+ }
+ else if (pCallback->IncrementTryCount() > c_maxRetries)
+ {
+ // Failed a delayed callback by hitting c_maxRetries
+ DEBUG_PRINTF4("Failed a delayed callback by hitting c_maxRetries: pfn=0x%08x, value1=0x%8p, value2=0x%8p\n",
+ pCallback->GetPfn(), pCallback->GetValue1(), pCallback->GetValue2());
+
+ m_pDelayList->Remove(pCallback);
+ pCallback->Invalidate();
+ delete pCallback;
+ removedCount++;
+ }
+ }
+
+ // Done Processing Delayed Callback list
+ DEBUG_PRINTF3("Done Processing Delayed Callback list: removed %d items, %d remain\n",
+ removedCount, m_pDelayList->GetCount());
+
+ _ASSERTE(m_fProcessingDelayedList == true);
+ m_fProcessingDelayedList = false;
+
+ return removedCount;
+}
+
+void ThreadLocalIBCInfo::CallbackHelper(const void * p, pfnIBCAccessCallback callback)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ // Acquire the Crst lock before creating the IBCLoggingDisabler object.
+ // Only one thread at a time can be processing an IBC logging event.
+ CrstHolder lock(g_IBCLogger.GetSync());
+ {
+ // @ToDo: methods called from here should assert that they have the lock that we just took
+
+ IBCLoggingDisabler disableLogging( this ); // runs IBCLoggingDisabler::DisableLogging
+
+ // Just in case the processing of delayed list was terminated with exception
+ m_fProcessingDelayedList = false;
+
+ if (callback != NULL)
+ {
+ _ASSERTE(p != NULL);
+
+ // For each callback that we process we use the
+ // field m_fCallbackFailed to record whether we
+ // failed or succeeded in resolving the callback
+ //
+ m_fCallbackFailed = false;
+
+ callback(&g_IBCLogger, p, NULL);
+
+ if (m_fCallbackFailed == false)
+ {
+ // If we were able to successfully process this ibc probe then
+ // the chances are good that the delayed probes will succeed too.
+ // Thus it may be worth proccessing the delayed call back list.
+ // We will process this list if it currently has at least
+ // MinCountToProcess items in the delay list.
+ //
+ int delayListAfter = (m_pDelayList == NULL) ? 0 : m_pDelayList->GetCount();
+ if (delayListAfter >= GetMinCountToProcess())
+ {
+ int numRemoved = ProcessDelayedCallbacks();
+ if (numRemoved > 0)
+ {
+ // Reset the min count back down to the number that we still have remaining
+ m_iMinCountToProcess = m_pDelayList->GetCount();
+ }
+
+ // we increase the minCount by the min count increment so
+ // that we have to add a few new items to the delay list
+ // before we retry ProcessDelayedCallbacks() again.
+ IncMinCountToProcess(c_minCountIncr);
+ }
+ }
+ }
+ else // (callback == NULL) -- This is a special case
+ {
+ _ASSERTE(p == NULL);
+
+ // We just need to call ProcessDelayedCallbacks() unconditionally
+ if (m_pDelayList->GetCount() > 0)
+ {
+ ProcessDelayedCallbacks();
+ }
+ }
+
+ // runs IBCLoggingDisabler::~IBCLoggingDisabler
+ // which runs IBCLoggingDisabler::EnableLogging
+ }
+}
+
+
+void IBCLogger::LogMethodAccessHelper(const MethodDesc* pMD, ULONG flagNum)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_NOT_MAINLINE;
+ PRECONDITION(g_IBCLogger.InstrEnabled());
+ }
+ CONTRACTL_END;
+
+ {
+ // Don't set the ReadMethodCode flag for EE implemented methods such as Invoke
+ if ((flagNum == ReadMethodCode) && pMD->IsEEImpl())
+ return;
+
+ // we cannot log before the ObjectClass or StringClass are loaded
+ if (g_pObjectClass == NULL || g_pStringClass == NULL)
+ goto DelayCallback;
+
+ RelativeFixupPointer<PTR_MethodTable> * ppMT = pMD->GetMethodTablePtr();
+ if (ppMT->IsNull())
+ goto DelayCallback;
+
+ TADDR pMaybeTaggedMT = ppMT->GetValueMaybeTagged((TADDR)ppMT);
+ if (CORCOMPILE_IS_POINTER_TAGGED(pMaybeTaggedMT))
+ goto DelayCallback;
+
+ MethodTable *pMT = (MethodTable *)pMaybeTaggedMT;
+ if (!pMT->IsRestored_NoLogging())
+ goto DelayCallback;
+
+ LogMethodTableAccessHelper(pMT);
+
+ Module *pModule = pMT->GetModule();
+
+ if (MethodDescAccessInstrEnabled())
+ {
+ mdToken token;
+ if ( pMD->HasClassOrMethodInstantiation_NoLogging() )
+ {
+ // We will need to defer the Logging if we cannot compute the PreferredZapModule
+
+ //
+ // If we are creating a generic type or method we can have null TypeHandle args
+ // TFS: 749998
+ // We can also have unrestored MethodTables in our Instantiation args during FixupNativeEntry
+ //
+ Instantiation classInst = pMD->GetClassInstantiation();
+ Instantiation methodInst = pMD->GetMethodInstantiation();
+ for (DWORD i = 0; i < classInst.GetNumArgs(); i++)
+ {
+ TypeHandle thArg = classInst[i];
+ if (thArg.IsNull() || thArg.IsEncodedFixup() || !thArg.IsRestored_NoLogging())
+ goto DelayCallback;
+ }
+ for (DWORD i = 0; i < methodInst.GetNumArgs(); i++)
+ {
+ TypeHandle thArg = methodInst[i];
+ if (thArg.IsNull() || thArg.IsEncodedFixup() || !thArg.IsRestored_NoLogging())
+ goto DelayCallback;
+ }
+
+ Module *pPZModule = Module::GetPreferredZapModuleForMethodDesc(pMD);
+ token = pPZModule->LogInstantiatedMethod(pMD, flagNum);
+ if (!IsNilToken(token))
+ {
+ pPZModule->LogTokenAccess(token, MethodProfilingData, flagNum);
+ }
+ }
+ else
+ {
+ token = pMD->GetMemberDef_NoLogging();
+ pModule->LogTokenAccess(token, MethodProfilingData, flagNum);
+ }
+ }
+ return;
+ }
+
+DelayCallback:
+ DelayedCallbackPtr(LogMethodAccessWrapper, pMD, (void *)(SIZE_T)flagNum);
+}
+
+void IBCLogger::LogMethodAccessWrapper(IBCLogger* pLogger, const void * pValue1, const void * pValue2)
+{
+ WRAPPER_NO_CONTRACT;
+ pLogger->LogMethodAccessHelper((MethodDesc *)pValue1, (ULONG)(SIZE_T)pValue2);
+}
+
+void IBCLogger::LogMethodDescAccessHelper(const MethodDesc *pMD)
+{
+ WRAPPER_NO_CONTRACT;
+ SO_NOT_MAINLINE_FUNCTION;
+
+ LogMethodAccessHelper(pMD, ReadMethodDesc);
+}
+
+void IBCLogger::LogMethodDescWriteAccessHelper(MethodDesc *pMD)
+{
+ WRAPPER_NO_CONTRACT;
+ SO_NOT_MAINLINE_FUNCTION;
+
+ LogMethodAccessHelper(pMD, ReadMethodDesc);
+ LogMethodAccessHelper(pMD, WriteMethodDesc);
+}
+
+void IBCLogger::LogMethodPrecodeAccessHelper(MethodDesc *pMD)
+{
+ WRAPPER_NO_CONTRACT;
+ SO_NOT_MAINLINE_FUNCTION;
+
+ LogMethodAccessHelper(pMD, ReadMethodPrecode);
+}
+
+void IBCLogger::LogMethodPrecodeWriteAccessHelper(MethodDesc *pMD)
+{
+ WRAPPER_NO_CONTRACT;
+ SO_NOT_MAINLINE_FUNCTION;
+
+ LogMethodAccessHelper(pMD, ReadMethodPrecode);
+ LogMethodAccessHelper(pMD, WriteMethodPrecode);
+}
+
+// Log access to method code or method header
+void IBCLogger::LogMethodCodeAccessHelper(MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_NOT_MAINLINE;
+ PRECONDITION(g_IBCLogger.InstrEnabled());
+ }
+ CONTRACTL_END;
+
+ LogMethodAccessHelper(pMD, ReadMethodCode);
+}
+
+// Log access to the method code and method header for NDirect calls
+void IBCLogger::LogNDirectCodeAccessHelper(MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_NOT_MAINLINE;
+ PRECONDITION(g_IBCLogger.InstrEnabled());
+ }
+ CONTRACTL_END;
+
+ LogMethodAccessHelper(pMD, ReadMethodDesc);
+ LogMethodAccessHelper(pMD, ReadMethodCode);
+}
+
+
+// Log access to method gc info
+void IBCLogger::LogMethodGCInfoAccessHelper(MethodDesc *pMD)
+{
+ WRAPPER_NO_CONTRACT;
+ SO_NOT_MAINLINE_FUNCTION;
+
+ _ASSERTE(InstrEnabled());
+
+ LogMethodAccessHelper(pMD, ReadGCInfo);
+ LogMethodAccessHelper(pMD, CommonReadGCInfo);
+}
+
+// Log access to method table
+void IBCLogger::LogMethodTableAccessHelper(MethodTable const * pMT)
+{
+ WRAPPER_NO_CONTRACT;
+ SO_NOT_MAINLINE_FUNCTION;
+
+ LogTypeAccessHelper(pMT, ReadMethodTable);
+}
+
+// Log access to method table
+void IBCLogger::LogTypeMethodTableAccessHelper(const TypeHandle *th)
+{
+ WRAPPER_NO_CONTRACT;
+ SO_NOT_MAINLINE_FUNCTION;
+
+ LogTypeAccessHelper(*th, ReadMethodTable);
+}
+
+// Log write access to method table
+void IBCLogger::LogTypeMethodTableWriteableAccessHelper(const TypeHandle *th)
+{
+ WRAPPER_NO_CONTRACT;
+ SO_NOT_MAINLINE_FUNCTION;
+
+ LogTypeAccessHelper(*th, ReadTypeDesc);
+ LogTypeAccessHelper(*th, WriteTypeDesc);
+}
+
+// Log access via method table, to a token-based type or an instantiated type.
+void IBCLogger::LogTypeAccessHelper(TypeHandle th, ULONG flagNum)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_NOT_MAINLINE;
+ PRECONDITION(g_IBCLogger.InstrEnabled());
+ }
+ CONTRACTL_END;
+
+ CONTRACT_VIOLATION( ThrowsViolation );
+
+ idTypeSpec token = idTypeSpecNil;
+ Module* pPreferredZapModule = NULL;
+
+ if (th.IsNull() || th.IsEncodedFixup())
+ return;
+
+ // we cannot do any logging before the ObjectClass and StringClass are loaded
+ if (g_pObjectClass == NULL || g_pStringClass == NULL)
+ goto DelayCallback;
+
+ if (!th.IsRestored_NoLogging())
+ goto DelayCallback;
+
+ //
+ // We assign the pPreferredZapModule and the token, then fall out to the LogTokenAccess
+ //
+ // Logging accesses to TypeDescs is done by blob and we create a special IBC token for the blob
+ if (th.IsTypeDesc())
+ {
+ pPreferredZapModule = Module::GetPreferredZapModuleForTypeHandle(th);
+
+ token = pPreferredZapModule->LogInstantiatedType(th, flagNum);
+ }
+ else
+ {
+ MethodTable *pMT = th.AsMethodTable();
+
+ if (pMT->IsArray())
+ {
+ pPreferredZapModule = Module::GetPreferredZapModuleForMethodTable(pMT);
+
+ token = pPreferredZapModule->LogInstantiatedType(th, flagNum);
+ }
+ else
+ {
+ Module* pModule = pMT->GetModule();
+
+ // Instantiations of generic types (like other parameterized types like arrays)
+ // need to be handled specially. Generic instantiations do not have a ready-made token
+ // in the loader module and need special handling
+ //
+ if (pMT->HasInstantiation() && // Is this any of List<T>, List<Blah<T>>, or List<String>?
+ !pMT->IsGenericTypeDefinition() && // Ignore the type definition (List<T>) as it corresponds to the typeDef token
+ !pMT->ContainsGenericVariables()) // We more or less don't save these anyway, apart from the GenericTypeDefinition
+ {
+ Instantiation inst = pMT->GetInstantiation();
+
+ // This function can get called from BuildMethodTableThrowing(). The instantiation info is not yet set then
+ if (!inst.IsEmpty() && !inst[0].IsNull())
+ {
+ pPreferredZapModule = Module::GetPreferredZapModuleForMethodTable(pMT);
+
+ token = pPreferredZapModule->LogInstantiatedType(th, flagNum);
+ }
+ }
+ else
+ {
+ pPreferredZapModule = pModule;
+ token = pMT->GetCl_NoLogging();
+ }
+ }
+ }
+
+ if (!IsNilToken(token))
+ pPreferredZapModule->LogTokenAccess(token, TypeProfilingData, flagNum);
+
+ return;
+
+DelayCallback:
+ DelayedCallbackPtr(LogTypeAccessWrapper, th.AsPtr(), (void *)(SIZE_T)flagNum);
+}
+
+void IBCLogger::LogTypeAccessWrapper(IBCLogger* pLogger, const void * pValue, const void * pValue2)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ pLogger->LogTypeAccessHelper(TypeHandle::FromPtr((void *)pValue), (ULONG)(SIZE_T)pValue2);
+}
+
+// Log access to method tables which are private (i.e. methodtables that are updated in the ngen image)
+void IBCLogger::LogMethodTableWriteableDataAccessHelper(MethodTable const * pMT)
+{
+ WRAPPER_NO_CONTRACT;
+ SO_NOT_MAINLINE_FUNCTION;
+
+ LogTypeAccessHelper(pMT, ReadMethodTable);
+ LogTypeAccessHelper(pMT, ReadMethodTableWriteableData);
+}
+
+// Log access to method tables which are private (i.e. methodtables that are updated in the ngen image)
+void IBCLogger::LogMethodTableWriteableDataWriteAccessHelper(MethodTable *pMT)
+{
+ WRAPPER_NO_CONTRACT;
+ SO_NOT_MAINLINE_FUNCTION;
+
+ LogTypeAccessHelper(pMT, ReadMethodTable);
+ LogTypeAccessHelper(pMT, WriteMethodTableWriteableData);
+}
+
+void IBCLogger::LogMethodTableNonVirtualSlotsAccessHelper(MethodTable const * pMT)
+{
+ WRAPPER_NO_CONTRACT;
+ SO_NOT_MAINLINE_FUNCTION;
+
+ LogTypeAccessHelper(pMT, ReadMethodTable);
+ LogTypeAccessHelper(pMT, ReadNonVirtualSlots);
+}
+
+// Log access to EEClass
+void IBCLogger::LogEEClassAndMethodTableAccessHelper(MethodTable * pMT)
+{
+ WRAPPER_NO_CONTRACT;
+ SO_NOT_MAINLINE_FUNCTION;
+
+ if (pMT == NULL)
+ return;
+
+ LogTypeAccessHelper(pMT, ReadMethodTable);
+
+ if (!pMT->IsCanonicalMethodTable()) {
+ pMT = pMT->GetCanonicalMethodTable();
+ LogTypeAccessHelper(pMT, ReadMethodTable);
+ }
+
+ LogTypeAccessHelper(pMT, ReadEEClass);
+}
+
+// Log write to EEClass
+void IBCLogger::LogEEClassCOWTableAccessHelper(MethodTable * pMT)
+{
+ WRAPPER_NO_CONTRACT;
+ SO_NOT_MAINLINE_FUNCTION;
+
+ if (pMT == NULL)
+ return;
+
+ LogTypeAccessHelper(pMT, ReadMethodTable);
+
+ if (!pMT->IsCanonicalMethodTable()) {
+ pMT = pMT->GetCanonicalMethodTable();
+ LogTypeAccessHelper(pMT, ReadMethodTable);
+ }
+
+ LogTypeAccessHelper(pMT, ReadEEClass);
+ LogTypeAccessHelper(pMT, WriteEEClass);
+}
+
+// Log access to FieldDescs list in EEClass
+void IBCLogger::LogFieldDescsAccessHelper(FieldDesc * pFD)
+{
+ WRAPPER_NO_CONTRACT;
+ SO_NOT_MAINLINE_FUNCTION;
+
+ MethodTable * pMT = pFD->GetApproxEnclosingMethodTable_NoLogging();
+
+ LogTypeAccessHelper(pMT, ReadMethodTable);
+
+ if (!pMT->IsCanonicalMethodTable()) {
+ pMT = pMT->GetCanonicalMethodTable();
+ LogTypeAccessHelper(pMT, ReadMethodTable);
+ }
+
+ LogTypeAccessHelper(pMT, ReadFieldDescs);
+}
+
+void IBCLogger::LogDispatchMapAccessHelper(MethodTable *pMT)
+{
+ WRAPPER_NO_CONTRACT;
+ SO_NOT_MAINLINE_FUNCTION;
+
+ LogTypeAccessHelper(pMT, ReadMethodTable);
+ LogTypeAccessHelper(pMT, ReadDispatchMap);
+}
+
+void IBCLogger::LogDispatchTableAccessHelper(MethodTable *pMT)
+{
+ WRAPPER_NO_CONTRACT;
+ SO_NOT_MAINLINE_FUNCTION;
+
+ LogTypeAccessHelper(pMT, ReadMethodTable);
+ LogTypeAccessHelper(pMT, ReadDispatchMap);
+ LogTypeAccessHelper(pMT, ReadDispatchTable);
+}
+
+void IBCLogger::LogDispatchTableSlotAccessHelper(DispatchSlot *pDS)
+{
+ WRAPPER_NO_CONTRACT;
+ SO_NOT_MAINLINE_FUNCTION;
+
+ if (pDS->IsNull())
+ return;
+
+ MethodDesc *pMD = MethodTable::GetMethodDescForSlotAddress(pDS->GetTarget());
+ MethodTable *pMT = pMD->GetMethodTable_NoLogging();
+ LogDispatchTableAccessHelper(pMT);
+}
+
+// Log write to EEClass
+void IBCLogger::LogFieldMarshalersReadAccessHelper(MethodTable * pMT)
+{
+ WRAPPER_NO_CONTRACT;
+ SO_NOT_MAINLINE_FUNCTION;
+
+ if (pMT == NULL)
+ return;
+
+ LogTypeAccessHelper(pMT, ReadMethodTable);
+
+ if (!pMT->IsCanonicalMethodTable()) {
+ pMT = pMT->GetCanonicalMethodTable();
+ LogTypeAccessHelper(pMT, ReadMethodTable);
+ }
+
+ LogTypeAccessHelper(pMT, ReadEEClass);
+ LogTypeAccessHelper(pMT, ReadFieldMarshalers);
+}
+
+// Log access to cctor info table
+void IBCLogger::LogCCtorInfoReadAccessHelper(MethodTable *pMT)
+{
+ WRAPPER_NO_CONTRACT;
+ SO_NOT_MAINLINE_FUNCTION;
+ LogTypeAccessHelper(pMT, ReadCCtorInfo);
+}
+
+
+void IBCLogger::LogTypeHashTableAccessHelper(const TypeHandle *th)
+{
+ WRAPPER_NO_CONTRACT;
+ SO_NOT_MAINLINE_FUNCTION;
+
+ LogTypeAccessHelper(*th, ReadTypeHashTable);
+}
+
+// Log access to class hash table
+void IBCLogger::LogClassHashTableAccessHelper(EEClassHashEntry *pEntry)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_NOT_MAINLINE;
+ PRECONDITION(g_IBCLogger.InstrEnabled());
+ }
+ CONTRACTL_END;
+
+ // ExecutionManager::FindZapModule may enter the host (if we were hosted), but it's
+ // ok since we're just logging IBC data.
+ CONTRACT_VIOLATION( HostViolation );
+
+ Module *pModule = ExecutionManager::FindZapModule(dac_cast<TADDR>(pEntry));
+ if (pModule == NULL)
+ {
+ // if FindZapModule returns NULL, it always will return NULL
+ // so there is no point in adding a DelayedCallback here.
+ return;
+ }
+
+ // we cannot log before the ObjectClass or StringClass are loaded
+ if (g_pObjectClass == NULL || g_pStringClass == NULL)
+ goto DelayCallback;
+
+ HashDatum datum;
+ datum = pEntry->GetData();
+ mdToken token;
+ if ((((ULONG_PTR) datum) & EECLASSHASH_TYPEHANDLE_DISCR) == 0)
+ {
+ TypeHandle t = TypeHandle::FromPtr(datum);
+ _ASSERTE(!t.IsNull());
+ MethodTable *pMT = t.GetMethodTable();
+ if (pMT == NULL)
+ goto DelayCallback;
+
+ token = pMT->GetCl_NoLogging();
+ }
+ else if (((ULONG_PTR)datum & EECLASSHASH_MDEXPORT_DISCR) == 0)
+ {
+ DWORD dwDatum = (DWORD)(DWORD_PTR)(datum); // <TODO> WIN64 - Pointer Truncation</TODO>
+ token = ((dwDatum >> 1) & 0x00ffffff) | mdtTypeDef;
+ }
+ else
+ return;
+
+ pModule->LogTokenAccess(token, TypeProfilingData, ReadClassHashTable);
+ return;
+
+DelayCallback:
+ DelayedCallbackPtr(LogClassHashTableAccessWrapper, pEntry);
+}
+
+// Log access to meta data
+void IBCLogger::LogMetaDataAccessHelper(const void * addr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_NOT_MAINLINE;
+ PRECONDITION(g_IBCLogger.InstrEnabled());
+ }
+ CONTRACTL_END;
+
+ // ExecutionManager::FindZapModule may enter the host (if we were hosted), but it's
+ // ok since we're just logging IBC data.
+ CONTRACT_VIOLATION( HostViolation );
+
+#if METADATATRACKER_ENABLED
+ if (Module *pModule = ExecutionManager::FindZapModule(dac_cast<TADDR>(addr)))
+ {
+ mdToken token = MetaDataTracker::MapAddrToToken(addr);
+
+ pModule->LogTokenAccess(token, ProfilingFlags_MetaData);
+ pModule->LogTokenAccess(token, CommonMetaData);
+ return;
+ }
+#endif //METADATATRACKER_ENABLED
+
+ // if FindZapModule returns NULL, it always will return NULL
+ // so there is no point in adding a DelayedCallback here.
+}
+
+// Log a search to meta data
+// See the comment above CMiniMdRW::GetHotMetadataTokensSearchAware
+void IBCLogger::LogMetaDataSearchAccessHelper(const void * result)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_NOT_MAINLINE;
+ PRECONDITION(g_IBCLogger.InstrEnabled());
+ }
+ CONTRACTL_END;
+
+ // ExecutionManager::FindZapModule may enter the host (if we were hosted), but it's
+ // ok since we're just logging IBC data.
+ CONTRACT_VIOLATION( HostViolation );
+
+#if METADATATRACKER_ENABLED
+ if (Module *pModule = ExecutionManager::FindZapModule(dac_cast<TADDR>(result)))
+ {
+ mdToken token = MetaDataTracker::MapAddrToToken(result);
+
+ pModule->LogTokenAccess(token, ProfilingFlags_MetaData);
+ pModule->LogTokenAccess(token, CommonMetaData);
+ pModule->LogTokenAccess(token, ProfilingFlags_MetaDataSearch);
+ return;
+ }
+#endif //METADATATRACKER_ENABLED
+
+ // if FindZapModule returns NULL, it always will return NULL
+ // so there is no point in adding a DelayedCallback here.
+}
+
+// Log access to method list associated with a CER
+void IBCLogger::LogCerMethodListReadAccessHelper(MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_NOT_MAINLINE;
+ PRECONDITION(g_IBCLogger.InstrEnabled());
+ }
+ CONTRACTL_END;
+
+ LogMethodAccessHelper(pMD, ReadCerMethodList);
+}
+
+void IBCLogger::LogRidMapAccessHelper( RidMapLogData data )
+{
+ WRAPPER_NO_CONTRACT;
+ SO_NOT_MAINLINE_FUNCTION;
+
+ data.First()->LogTokenAccess( data.Second(), RidMap );
+}
+
+// Log access to RVA data
+void IBCLogger::LogRVADataAccessHelper(FieldDesc *pFD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_NOT_MAINLINE;
+ PRECONDITION(g_IBCLogger.InstrEnabled());
+ }
+ CONTRACTL_END;
+
+ // we cannot log before the ObjectClass or StringClass are loaded
+ if (g_pObjectClass == NULL || g_pStringClass == NULL)
+ goto DelayCallback;
+
+ if (CORCOMPILE_IS_POINTER_TAGGED(SIZE_T(pFD)))
+ return;
+
+ MethodTable * pMT;
+ pMT = pFD->GetApproxEnclosingMethodTable();
+
+ if (!pMT->IsRestored_NoLogging())
+ goto DelayCallback;
+
+ if (pMT->HasInstantiation())
+ return;
+
+ pMT->GetModule()->LogTokenAccess(pFD->GetMemberDef(), TypeProfilingData, RVAFieldData);
+ return;
+
+DelayCallback:
+ DelayedCallbackPtr(LogRVADataAccessWrapper, pFD);
+}
+
+
+#define LOADORDER_INSTR 0x00000001
+#define RID_ACCESSORDER_INSTR 0x00000002
+#define METHODDESC_ACCESS_INSTR 0x00000004
+#define ALL_INSTR (LOADORDER_INSTR | RID_ACCESSORDER_INSTR | METHODDESC_ACCESS_INSTR)
+
+void IBCLogger::EnableAllInstr()
+{
+ LIMITED_METHOD_CONTRACT;
+#if METADATATRACKER_ENABLED
+ MetaDataTracker::Enable();
+ MetaDataTracker::s_IBCLogMetaDataAccess = IBCLogger::LogMetaDataAccessStatic;
+ MetaDataTracker::s_IBCLogMetaDataSearch = IBCLogger::LogMetaDataSearchAccessStatic;
+#endif //METADATATRACKER_ENABLED
+ dwInstrEnabled = ALL_INSTR;
+}
+
+void IBCLogger::DisableAllInstr()
+{
+ LIMITED_METHOD_CONTRACT;
+ dwInstrEnabled = 0;
+}
+
+void IBCLogger::DisableRidAccessOrderInstr()
+{
+ LIMITED_METHOD_CONTRACT;
+ dwInstrEnabled &= (~RID_ACCESSORDER_INSTR);
+}
+
+void IBCLogger::DisableMethodDescAccessInstr()
+{
+ LIMITED_METHOD_CONTRACT;
+ dwInstrEnabled &= (~METHODDESC_ACCESS_INSTR);
+}
+
+BOOL IBCLogger::MethodDescAccessInstrEnabled()
+{
+ LIMITED_METHOD_CONTRACT;
+ return (dwInstrEnabled & METHODDESC_ACCESS_INSTR);
+}
+
+BOOL IBCLogger::RidAccessInstrEnabled()
+{
+ LIMITED_METHOD_CONTRACT;
+ return (dwInstrEnabled & RID_ACCESSORDER_INSTR);
+}
+
+#endif // IBCLOGGER_ENABLED
diff --git a/src/vm/ibclogger.h b/src/vm/ibclogger.h
new file mode 100644
index 0000000000..30d166d78f
--- /dev/null
+++ b/src/vm/ibclogger.h
@@ -0,0 +1,622 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// IBClogger.H
+//
+
+//
+// Infrastructure for recording touches of EE data structures
+//
+//
+
+
+#ifndef IBCLOGGER_H
+#define IBCLOGGER_H
+
+#include <holder.h>
+#include <sarray.h>
+#include <crst.h>
+#include <synch.h>
+#include <shash.h>
+
+// The IBCLogger class records touches of EE data structures. It is important to
+// minimize the overhead of IBC recording on non-recording scenarios. Our goal is
+// for all public methods to be inlined, and that the cost of doing the instrumentation
+// check does not exceed one comparison and one branch.
+//
+
+class MethodDesc;
+class MethodTable;
+class EEClass;
+class TypeHandle;
+struct DispatchSlot;
+class Module;
+struct EEClassHashEntry;
+class IBCLogger;
+
+extern IBCLogger g_IBCLogger;
+
+typedef PTR_VOID HashDatum;
+
+typedef Pair< Module*, mdToken > RidMapLogData;
+
+#if defined(FEATURE_PREJIT) && !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+#define IBCLOGGER_ENABLED
+#endif
+
+#ifdef IBCLOGGER_ENABLED
+//
+// Base class for IBC probe callback
+//
+typedef void (* const pfnIBCAccessCallback)(IBCLogger* pLogger, const void * pValue, const void * pValue2);
+
+class IbcCallback
+{
+public:
+ IbcCallback(pfnIBCAccessCallback pCallback, const void * pValue1, const void * pValue2)
+ : m_pCallback(pCallback),
+ m_pValue1(pValue1),
+ m_pValue2(pValue2),
+ m_tryCount(0)
+#ifdef _DEBUG
+ , m_id(0)
+#endif
+ { LIMITED_METHOD_CONTRACT; }
+
+ void Invoke() const
+ {
+ WRAPPER_NO_CONTRACT;
+
+ m_pCallback(&g_IBCLogger, m_pValue1, m_pValue2);
+ }
+
+ SIZE_T GetPfn() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (SIZE_T) m_pCallback;
+ }
+
+ pfnIBCAccessCallback GetCallback() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pCallback;
+ }
+
+ const void * GetValue1() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pValue1;
+ }
+
+ const void * GetValue2() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pValue2;
+ }
+
+ void SetValid()
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifdef _DEBUG
+ m_id = ++s_highestId;
+#endif
+ }
+
+ void Invalidate()
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifdef _DEBUG
+ m_id = 0;
+#endif
+ }
+
+ bool IsValid() const
+ {
+ WRAPPER_NO_CONTRACT;
+
+#ifdef _DEBUG
+ return (m_id > 0) && (m_id <= s_highestId);
+#else
+ return true;
+#endif
+ }
+
+ int IncrementTryCount()
+ {
+ return ++m_tryCount;
+ }
+
+ int GetTryCount() const
+ {
+ return m_tryCount;
+ }
+
+private:
+ pfnIBCAccessCallback m_pCallback;
+ const void * m_pValue1;
+ const void * m_pValue2;
+
+ int m_tryCount;
+
+#ifdef _DEBUG
+ unsigned m_id;
+ static unsigned s_highestId;
+#endif
+};
+
+class DelayCallbackTableTraits : public DefaultSHashTraits< IbcCallback * >
+{
+public:
+ typedef IbcCallback * key_t;
+
+ static key_t GetKey(element_t e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return e;
+ }
+
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (k1->GetCallback() == k2->GetCallback()) &&
+ (k1->GetValue1() == k2->GetValue1()) &&
+ (k1->GetValue2() == k2->GetValue2());
+ }
+
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ SIZE_T hashLarge = (SIZE_T)k->GetCallback() ^
+ (SIZE_T)k->GetValue1() ^
+ (SIZE_T)k->GetValue2();
+
+#if POINTER_BITS == 32
+ // sizeof(SIZE_T) == sizeof(COUNT_T)
+ return hashLarge;
+#else
+ // xor in the upper half as well.
+ count_t hash = *(count_t *)(&hashLarge);
+ for (unsigned int i = 1; i < POINTER_BITS / 32; i++)
+ {
+ hash ^= ((count_t *)&hashLarge)[i];
+ }
+
+ return hash;
+#endif // POINTER_BITS
+ }
+
+ static const element_t Null()
+{
+ WRAPPER_NO_CONTRACT;
+ return NULL;
+ }
+
+ static bool IsNull(element_t e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return e == NULL;
+ }
+
+ static const element_t Deleted()
+ {
+ WRAPPER_NO_CONTRACT;
+ return (element_t)-1;
+ }
+
+ static bool IsDeleted(const element_t e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return e == (element_t)-1;
+ }
+};
+
+typedef SHash< DelayCallbackTableTraits > DelayCallbackTable;
+
+class ThreadLocalIBCInfo
+{
+public:
+ ThreadLocalIBCInfo();
+ ~ThreadLocalIBCInfo();
+
+ // BOOL IsLoggingDisable()
+ // This indicates that logging is currently disabled for this thread
+ // This is used to prevent the logging functionality from
+ // triggerring more logging (and thus causing a deadlock)
+ // It is also used to prevent IBC logging whenever a IBCLoggingDisabler
+ // object is used. For example we use this to disable IBC profiling
+ // whenever a thread starts a JIT compile event. That is because we
+ // don't want to "pollute" the IBC data gathering for the things
+ // that the JIT compiler touches.
+ // Finally since our IBC logging will need to allocate unmanaged memory
+ // we also disable IBC logging when we are inside a "can't alloc region"
+ // Typically this occurs when a thread is performing a GC.
+ BOOL IsLoggingDisabled()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_fLoggingDisabled || IsInCantAllocRegion();
+ }
+
+ // We want to disable IBC logging, any further log calls are to be ignored until
+ // we call EnableLogging()
+ //
+ // This method returns true if it changed the value of m_fLoggingDisabled from false to true
+ // it returns false if the value of m_fLoggingDisabled was already set to true
+ // after this method executes the value of m_fLoggingDisabled will be true
+ bool DisableLogging()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ bool result = (m_fLoggingDisabled == false);
+ m_fLoggingDisabled = true;
+
+ return result;
+ }
+
+ // We want to re-enable IBC logging
+ void EnableLogging()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(m_fLoggingDisabled == true);
+
+ m_fLoggingDisabled = false;
+ }
+
+ bool ProcessingDelayedList()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_fProcessingDelayedList;
+ }
+
+ void SetCallbackFailed()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_fCallbackFailed = true;
+ }
+
+ int GetMinCountToProcess()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_iMinCountToProcess;
+ }
+
+ void IncMinCountToProcess(int increment)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_iMinCountToProcess += increment;
+ }
+
+ DelayCallbackTable * GetPtrDelayList();
+
+ void DeleteDelayedCallbacks();
+
+ void FlushDelayedCallbacks();
+
+ int ProcessDelayedCallbacks();
+
+ void CallbackHelper(const void * p, pfnIBCAccessCallback callback);
+
+private:
+ bool m_fProcessingDelayedList;
+ bool m_fCallbackFailed;
+ bool m_fLoggingDisabled;
+
+ int m_iMinCountToProcess;
+
+ DelayCallbackTable * m_pDelayList;
+};
+
+class IBCLoggingDisabler
+{
+public:
+ IBCLoggingDisabler();
+ IBCLoggingDisabler(bool ignore); // When ignore is true we treat this as a nop
+ IBCLoggingDisabler(ThreadLocalIBCInfo* pInfo);
+ ~IBCLoggingDisabler();
+
+private:
+ ThreadLocalIBCInfo* m_pInfo;
+ bool m_fDisabled; // true if this holder actually disable the logging
+ // false when this is a nested occurance and logging was already disabled
+};
+
+//
+// IBCLoggerAwareAllocMemTracker should be used for allocation of IBC tracked structures during type loading.
+//
+// If type loading fails, the delayed IBC callbacks may contain pointers to the failed type or method.
+// IBCLoggerAwareAllocMemTracker will ensure that the delayed IBC callbacks are flushed before the memory of
+// the failed type or method is reclaimed. Otherwise, there would be stale pointers in the delayed IBC callbacks
+// that would cause crashed during IBC logging.
+//
+class IBCLoggerAwareAllocMemTracker : public AllocMemTracker
+{
+public:
+ IBCLoggerAwareAllocMemTracker()
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ ~IBCLoggerAwareAllocMemTracker();
+};
+
+#else // IBCLOGGER_ENABLED
+
+typedef const void * pfnIBCAccessCallback;
+
+class IBCLoggingDisabler
+{
+public:
+ IBCLoggingDisabler()
+ {
+ }
+
+ ~IBCLoggingDisabler()
+ {
+ }
+};
+
+class ThreadLocalIBCInfo
+{
+public:
+ ThreadLocalIBCInfo()
+ {
+ }
+
+ ~ThreadLocalIBCInfo()
+ {
+ }
+};
+
+class IBCLoggerAwareAllocMemTracker : public AllocMemTracker
+{
+public:
+ IBCLoggerAwareAllocMemTracker()
+ {
+ }
+
+ ~IBCLoggerAwareAllocMemTracker()
+ {
+ }
+};
+
+#endif // IBCLOGGER_ENABLED
+
+
+// IBCLogger is responsible for collecting profile data. Logging is turned on by the
+// COMPLUS_ZapBBInstr environment variable, and the actual writing to the file
+// occurs in code:Module.WriteMethodProfileDataLogFile
+class IBCLogger
+{
+ //
+ // Methods for logging EE data structure accesses. All methods should be defined
+ // using the LOGACCESS macros, which creates the wrapper method that calls the
+ // helper when instrumentation is enabled. The public name of these methods should
+ // be of the form Log##name##Access where name describes the type of access to be
+ // logged. The private helpers are implemented in IBClogger.cpp.
+ //
+
+#ifdef IBCLOGGER_ENABLED
+
+#define LOGACCESS_PTR(name, type) \
+ LOGACCESS(name, type*, (type*), (const void *));
+
+#define LOGACCESS_VALUE(name, type) \
+ LOGACCESS(name, type, *(type*), (const void *)&);
+
+#define LOGACCESS(name, type, totype, toptr) \
+public: \
+ __forceinline void Log##name##Access(type p) \
+ { \
+ WRAPPER_NO_CONTRACT; \
+ /* We expect this to get inlined, so that it */ \
+ /* has low overhead when not instrumenting. */ \
+ /* So keep the function really small */ \
+ if ( InstrEnabled() ) \
+ Log##name##AccessStatic(toptr p); \
+ } \
+ \
+private: \
+ __declspec(noinline) static void Log##name##AccessStatic(const void * p) \
+ { \
+ WRAPPER_NO_CONTRACT; \
+ /* To make the logging callsite as small as */ \
+ /* possible keep the part that passes extra */ \
+ /* argument to LogAccessThreadSafeHelper */ \
+ /* in separate non-inlined static functions */ \
+ LogAccessThreadSafeHelperStatic(p, Log##name##AccessWrapper); \
+ } \
+ \
+ static void Log##name##AccessWrapper(IBCLogger* pLogger, const void * pValue1, const void * pValue2) \
+ { \
+ WRAPPER_NO_CONTRACT; \
+ return pLogger->Log##name##AccessHelper(totype pValue1); \
+ } \
+ void Log##name##AccessHelper(type p); \
+
+private:
+ static void LogAccessThreadSafeHelperStatic( const void * p, pfnIBCAccessCallback callback);
+ void LogAccessThreadSafeHelper( const void * p, pfnIBCAccessCallback callback);
+
+ void DelayedCallbackPtr(pfnIBCAccessCallback callback, const void * pValue1, const void * pValue2 = NULL);
+
+#else // FEATURE_PREJIT && !DACCESS_COMPILE
+
+#define LOGACCESS_PTR(name,type) \
+public: \
+ void Log##name##Access(type* p) { SUPPORTS_DAC; } \
+
+#define LOGACCESS_VALUE(name, type) \
+public: \
+ void Log##name##Access(type p) { SUPPORTS_DAC; } \
+
+#endif // FEATURE_PREJIT && !DACCESS_COMPILE
+
+ // Log access to method desc (which adds the method desc to the required list)
+ // Implemented by : code:IBCLogger.LogMethodDescAccessHelper
+ LOGACCESS_PTR(MethodDesc, const MethodDesc)
+
+ // Log access to method code or method header
+ // Implemented by : code:IBCLogger.LogMethodCodeAccessHelper
+ LOGACCESS_PTR(MethodCode, MethodDesc)
+
+ // Log access to the NDirect data stored for a MethodDesc
+ // also implies that the IL_STUB for the NDirect method is executed
+ // Implemented by : code:IBCLogger.LogNDirectCodeAccessHelper
+ LOGACCESS_PTR(NDirectCode,MethodDesc)
+
+ // Log access to method desc (which addes the method desc to the required list)
+ // Implemented by : code:IBCLogger.LogMethodDescWriteAccessHelper
+ LOGACCESS_PTR(MethodDescWrite,MethodDesc)
+
+ // Log access to method desc (which adds the method desc to the required list)
+ // Implemented by : code:IBCLogger.LogMethodPrecodeAccessHelper
+ LOGACCESS_PTR(MethodPrecode, MethodDesc)
+
+ // Log access to method desc (which addes the method desc to the required list)
+ // Implemented by : code:IBCLogger.LogMethodPrecodeWriteAccessHelper
+ LOGACCESS_PTR(MethodPrecodeWrite,MethodDesc)
+
+ // Log access to gc info
+ // Implemented by : code:IBCLogger.LogMethodGCInfoAccessHelper
+ LOGACCESS_PTR(MethodGCInfo, MethodDesc)
+
+ // Log access to method table
+ // Implemented by : code:IBCLogger.LogMethodTableAccessHelper
+ LOGACCESS_PTR(MethodTable, MethodTable const)
+
+ // Log access to method table
+ // Implemented by : code:IBCLogger.LogTypeMethodTableAccessHelper
+ LOGACCESS_PTR(TypeMethodTable, TypeHandle const)
+
+ // Log write access to method table
+ // Implemented by : code:IBCLogger.LogTypeMethodTableWriteableAccessHelper
+ LOGACCESS_PTR(TypeMethodTableWriteable, TypeHandle const)
+
+ // Log read access to private (written to) method table area
+ // Macro expands to : code:LogMethodTableWriteableDataAccessHelper
+ LOGACCESS_PTR(MethodTableWriteableData, MethodTable const)
+
+ // Log write access to private (written to) method table area
+ // Implemented by : code:IBCLogger.LogMethodTableWriteableDataWriteAccessHelper
+ LOGACCESS_PTR(MethodTableWriteableDataWrite,MethodTable)
+
+ // Log access to method table's NonVirtualSlotsArray
+ // Implemented by : code:IBCLogger.LogMethodTableNonVirtualSlotsAccessHelper
+ LOGACCESS_PTR(MethodTableNonVirtualSlots, MethodTable const)
+
+ // Log access to EEClass
+ // Implemented by : code:IBCLogger.LogEEClassAndMethodTableAccessHelper
+ LOGACCESS_PTR(EEClassAndMethodTable, MethodTable)
+
+ // Log access to EEClass COW table
+ // Implemented by : code:IBCLogger.LogEEClassCOWTableAccessHelper
+ LOGACCESS_PTR(EEClassCOWTable, MethodTable)
+
+ // Log access to the FieldDescs list in the EEClass
+ // Implemented by : code:IBCLogger.LogFieldDescsAccessHelper
+ LOGACCESS_PTR(FieldDescs, FieldDesc)
+
+ // Log access to the MTs dispatch map
+ // Implemented by : code:IBCLogger.LogDispatchMapAccessHelper
+ LOGACCESS_PTR(DispatchMap,MethodTable)
+
+ // Log read access to the MTs dispatch implementation table
+ // Implemented by : code:IBCLogger.LogDispatchTableAccessHelper
+ LOGACCESS_PTR(DispatchTable,MethodTable)
+
+ // Log read access to the MTs dispatch implementation table
+ // Implemented by : code:IBCLogger.LogDispatchTableAccessHelper
+ LOGACCESS_PTR(DispatchTableSlot,DispatchSlot)
+
+ // Log an update to the field marshalers
+ // Implemented by : code:IBCLogger.LogFieldMarshalersReadAccessHelper
+ LOGACCESS_PTR(FieldMarshalersRead,MethodTable)
+
+ // Log a lookup in the cctor info table
+ // Implemented by : code:IBCLogger.LogCCtorInfoReadAccessHelper
+ LOGACCESS_PTR(CCtorInfoRead,MethodTable)
+
+ // Log a lookup in the class hash table
+ // Implemented by : code:IBCLogger.LogClassHashTableAccessHelper
+ LOGACCESS_PTR(ClassHashTable,EEClassHashEntry)
+
+ // Log a lookup of the method list for a CER
+ // Implemented by : code:IBCLogger.LogCerMethodListReadAccessHelper
+ LOGACCESS_PTR(CerMethodListRead,MethodDesc)
+
+ // Log a metadata access
+ // Implemented by : code:IBCLogger.LogMetaDataAccessHelper
+ LOGACCESS_PTR(MetaData,const void)
+
+ // Log a metadata search
+ // Implemented by : code:IBCLogger.LogMetaDataSearchAccessHelper
+ LOGACCESS_PTR(MetaDataSearch,const void)
+
+ // Log a RVA fielddesc access */
+ // Implemented by : code:IBCLogger.LogRVADataAccessHelper
+ LOGACCESS_PTR(RVAData,FieldDesc)
+
+ // Log a lookup in the type hash table
+ // Implemented by : code:IBCLogger.LogTypeHashTableAccessHelper
+ LOGACCESS_PTR(TypeHashTable,TypeHandle const)
+
+ // Log a lookup in the Rid map
+ // Implemented by : code:IBCLogger.LogRidMapAccessHelper
+ LOGACCESS_VALUE( RidMap, RidMapLogData );
+
+public:
+
+#ifdef IBCLOGGER_ENABLED
+ IBCLogger();
+ ~IBCLogger();
+
+ // Methods for enabling/disabling instrumentation.
+ void EnableAllInstr();
+ void DisableAllInstr();
+#else // IBCLOGGER_ENABLED
+ void EnableAllInstr()
+ {
+ }
+
+ void DisableAllInstr()
+ {
+ }
+#endif // IBCLOGGER_ENABLED
+
+#ifndef DACCESS_COMPILE
+ void DisableRidAccessOrderInstr();
+ void DisableMethodDescAccessInstr();
+
+ inline BOOL InstrEnabled()
+ {
+ SUPPORTS_DAC;
+ return (dwInstrEnabled != 0);
+ }
+
+ Crst * GetSync();
+
+private:
+ void LogMethodAccessHelper(const MethodDesc* pMD, ULONG flagNum);
+ static void LogMethodAccessWrapper(IBCLogger* pLogger, const void * pValue1, const void * pValue2);
+
+ void LogTypeAccessHelper(TypeHandle th, ULONG flagNum);
+ static void LogTypeAccessWrapper(IBCLogger* pLogger, const void * pValue1, const void * pValue2);
+
+ BOOL MethodDescAccessInstrEnabled();
+ BOOL RidAccessInstrEnabled();
+
+private:
+ DWORD dwInstrEnabled;
+
+ Volatile<Crst*> m_sync;
+#endif // DACCESS_COMPILE
+};
+
+#endif // IBCLOGGER_H
diff --git a/src/vm/ildump.h b/src/vm/ildump.h
new file mode 100644
index 0000000000..195a9f523c
--- /dev/null
+++ b/src/vm/ildump.h
@@ -0,0 +1,223 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#define ILDUMP_VOID BYTE
+
+IL_OPCODE(0x00, "nop ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x01, "break ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x02, "ldarg.0 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x03, "ldarg.1 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x04, "ldarg.2 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x05, "ldarg.3 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x06, "ldloc.0 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x07, "ldloc.1 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x08, "ldloc.2 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x09, "ldloc.3 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x0a, "stloc.0 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x0b, "stloc.1 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x0c, "stloc.2 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x0d, "stloc.3 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x0e, "ldarg.s ", 1, BYTE, "%d", 0)
+IL_OPCODE(0x0f, "ldarga.s ", 1, BYTE, "%d", 0)
+IL_OPCODE(0x10, "starg.s ", 1, BYTE, "%d", 0)
+IL_OPCODE(0x11, "ldloc.s ", 1, BYTE, "%d", 0)
+IL_OPCODE(0x12, "ldloca.s ", 1, BYTE, "%d", 0)
+IL_OPCODE(0x13, "stloc.s ", 1, BYTE, "%d", 0)
+IL_OPCODE(0x14, "ldnull ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x15, "ldc.i4.m1 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x16, "ldc.i4.0 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x17, "ldc.i4.1 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x18, "ldc.i4.2 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x19, "ldc.i4.3 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x1a, "ldc.i4.4 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x1b, "ldc.i4.5 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x1c, "ldc.i4.6 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x1d, "ldc.i4.7 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x1e, "ldc.i4.8 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x1f, "ldc.i4.s ", 1, BYTE, "%d", 0)
+IL_OPCODE(0x20, "ldc.i4 ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x21, "ldc.i8 ", 8, UNALIGNED INT64, "0x%I64x", 0)
+IL_OPCODE(0x22, "ldc.r4 ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x23, "ldc.r8 ", 8, UNALIGNED INT64, "0x%I64x", 0)
+IL_OPCODE(0x25, "dup ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x26, "pop ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x27, "jmp ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x28, "call ", 4, UNALIGNED INT32, "0x%08x", 1)
+IL_OPCODE(0x29, "calli ", 4, UNALIGNED INT32, "0x%08x", 1)
+IL_OPCODE(0x2a, "ret ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x2b, "br.s ", 1, BYTE, "%d", 0)
+IL_OPCODE(0x2c, "brfalse.s ", 1, BYTE, "%d", 0)
+IL_OPCODE(0x2d, "brtrue.s ", 1, BYTE, "%d", 0)
+IL_OPCODE(0x2e, "beq.s ", 1, BYTE, "%d", 0)
+IL_OPCODE(0x2f, "bge.s ", 1, BYTE, "%d", 0)
+IL_OPCODE(0x30, "bgt.s ", 1, BYTE, "%d", 0)
+IL_OPCODE(0x31, "ble.s ", 1, BYTE, "%d", 0)
+IL_OPCODE(0x32, "blt.s ", 1, BYTE, "%d", 0)
+IL_OPCODE(0x33, "bne.un.s ", 1, BYTE, "%d", 0)
+IL_OPCODE(0x34, "bge.un.s ", 1, BYTE, "%d", 0)
+IL_OPCODE(0x35, "bgt.un.s ", 1, BYTE, "%d", 0)
+IL_OPCODE(0x36, "ble.un.s ", 1, BYTE, "%d", 0)
+IL_OPCODE(0x37, "blt.un.s ", 1, BYTE, "%d", 0)
+IL_OPCODE(0x38, "br ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x39, "brfalse ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x3a, "brtrue ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x3b, "beq ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x3c, "bge ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x3d, "bgt ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x3e, "ble ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x3f, "blt ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x40, "bne.un ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x41, "bge.un ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x42, "bgt.un ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x43, "ble.un ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x44, "blt.un ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x46, "ldind.i1 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x47, "ldind.u1 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x48, "ldind.i2 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x49, "ldind.u2 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x4a, "ldind.i4 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x4b, "ldind.u4 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x4c, "ldind.i8 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x4d, "ldind.i ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x4e, "ldind.r4 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x4f, "ldind.r8 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x50, "ldind.ref ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x51, "stind.ref ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x52, "stind.i1 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x53, "stind.i2 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x54, "stind.i4 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x55, "stind.i8 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x56, "stind.r4 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x57, "stind.r8 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x58, "add ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x59, "sub ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x5a, "mul ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x5b, "div ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x5c, "div.un ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x5d, "rem ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x5e, "rem.un ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x5f, "and ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x60, "or ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x61, "xor ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x62, "shl ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x63, "shr ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x64, "shr.un ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x65, "neg ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x66, "not ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x67, "conv.i1 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x68, "conv.i2 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x69, "conv.i4 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x6a, "conv.i8 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x6b, "conv.r4 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x6c, "conv.r8 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x6d, "conv.u4 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x6e, "conv.u8 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x6f, "callvirt ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x70, "cpobj ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x71, "ldobj ", 4, UNALIGNED INT32, "0x%08x", 1)
+IL_OPCODE(0x72, "ldstr ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x73, "newobj ", 4, UNALIGNED INT32, "0x%08x", 1)
+IL_OPCODE(0x74, "castclass ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x75, "isinst ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x76, "conv.r.un ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x79, "unbox ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x7a, "throw ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x7b, "ldfld ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x7c, "ldflda ", 4, UNALIGNED INT32, "0x%08x", 1)
+IL_OPCODE(0x7d, "stfld ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x7e, "ldsfld ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x7f, "ldsflda ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x80, "stsfld ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x81, "stobj ", 4, UNALIGNED INT32, "0x%08x", 1)
+IL_OPCODE(0x82, "conv.ovf.i1.un ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x83, "conv.ovf.i2.un ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x84, "conv.ovf.i4.un ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x85, "conv.ovf.i8.un ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x86, "conv.ovf.u1.un ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x87, "conv.ovf.u2.un ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x88, "conv.ovf.u4.un ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x89, "conv.ovf.u8.un ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x8a, "conv.ovf.i.un ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x8b, "conv.ovf.u.un ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x8c, "box ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x8d, "newarr ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x8e, "ldlen ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x8f, "ldelema ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0x90, "ldelem.i1 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x91, "ldelem.u1 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x92, "ldelem.i2 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x93, "ldelem.u2 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x94, "ldelem.i4 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x95, "ldelem.u4 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x96, "ldelem.i8 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x97, "ldelem.i ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x98, "ldelem.r4 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x99, "ldelem.r8 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x9a, "ldelem.ref ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x9b, "stelem.i ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x9c, "stelem.i1 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x9d, "stelem.i2 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x9e, "stelem.i4 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0x9f, "stelem.i8 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xa0, "stelem.r4 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xa1, "stelem.r8 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xa2, "stelem.ref ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xb3, "conv.ovf.i1 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xb4, "conv.ovf.u1 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xb5, "conv.ovf.i2 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xb6, "conv.ovf.u2 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xb7, "conv.ovf.i4 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xb8, "conv.ovf.u4 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xb9, "conv.ovf.i8 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xba, "conv.ovf.u8 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xc2, "refanyval ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0xc3, "ckfinite ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xc6, "mkrefany ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0xd0, "ldtoken ", 4, UNALIGNED INT32, "0x%08x", 1)
+IL_OPCODE(0xd1, "conv.u2 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xd2, "conv.u1 ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xd3, "conv.i ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xd4, "conv.ovf.i ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xd5, "conv.ovf.u ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xd6, "add.ovf ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xd7, "add.ovf.un ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xd8, "mul.ovf ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xd9, "mul.ovf.un ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xda, "sub.ovf ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xdb, "sub.ovf.un ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xdc, "endfinally ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xdd, "leave ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE(0xde, "leave.s ", 1, BYTE, "%d", 0)
+IL_OPCODE(0xdf, "stind.i ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE(0xe0, "conv.u ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE_EXT(0xfe, 0x00, "arglist ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE_EXT(0xfe, 0x01, "ceq ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE_EXT(0xfe, 0x02, "cgt ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE_EXT(0xfe, 0x03, "cgt.un ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE_EXT(0xfe, 0x04, "clt ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE_EXT(0xfe, 0x05, "clt.un ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE_EXT(0xfe, 0x06, "ldftn ", 4, UNALIGNED INT32, "0x%08x", 1)
+IL_OPCODE_EXT(0xfe, 0x07, "ldvirtftn ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE_EXT(0xfe, 0x09, "ldarg ", 2, UNALIGNED INT16, "0x%04x", 0)
+IL_OPCODE_EXT(0xfe, 0x0a, "ldarga ", 2, UNALIGNED INT16, "0x%04x", 0)
+IL_OPCODE_EXT(0xfe, 0x0b, "starg ", 2, UNALIGNED INT16, "0x%04x", 0)
+IL_OPCODE_EXT(0xfe, 0x0c, "ldloc ", 2, UNALIGNED INT16, "0x%04x", 0)
+IL_OPCODE_EXT(0xfe, 0x0d, "ldloca ", 2, UNALIGNED INT16, "0x%04x", 0)
+IL_OPCODE_EXT(0xfe, 0x0e, "stloc ", 2, UNALIGNED INT16, "0x%04x", 0)
+IL_OPCODE_EXT(0xfe, 0x0f, "localloc ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE_EXT(0xfe, 0x11, "endfilter ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE_EXT(0xfe, 0x12, "unaligned. ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE_EXT(0xfe, 0x13, "volatile. ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE_EXT(0xfe, 0x14, "tail. ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE_EXT(0xfe, 0x15, "initobj ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE_EXT(0xfe, 0x17, "cpblk ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE_EXT(0xfe, 0x18, "initblk ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE_EXT(0xfe, 0x1a, "rethrow ", 0, ILDUMP_VOID, "", 0)
+IL_OPCODE_EXT(0xfe, 0x1c, "sizeof ", 4, UNALIGNED INT32, "0x%08x", 0)
+IL_OPCODE_EXT(0xfe, 0x1d, "refanytype ", 0, ILDUMP_VOID, "", 0)
+
+#undef ILDUMP_VOID
+
diff --git a/src/vm/ilmarshalers.cpp b/src/vm/ilmarshalers.cpp
new file mode 100644
index 0000000000..af45e83a9c
--- /dev/null
+++ b/src/vm/ilmarshalers.cpp
@@ -0,0 +1,6140 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: ILMarshalers.cpp
+//
+
+//
+
+
+#include "common.h"
+#include "dllimport.h"
+#include "mlinfo.h"
+#include "ilmarshalers.h"
+#include "olevariant.h"
+#include "comdatetime.h"
+#include "fieldmarshaler.h"
+
+LocalDesc ILReflectionObjectMarshaler::GetManagedType()
+{
+ STANDARD_VM_CONTRACT;
+
+ return LocalDesc(MscorlibBinder::GetClass(GetManagedTypeBinderID()));
+}
+
+LocalDesc ILReflectionObjectMarshaler::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_I);
+}
+
+void ILReflectionObjectMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ int tokObject__m_handle = pslILEmit->GetToken(MscorlibBinder::GetField(GetObjectFieldID()));
+ int tokStruct__m_object = 0;
+ BinderFieldID structField = GetStructureFieldID();
+
+ // This marshaler can generate code for marshaling an object containing a handle, and for
+ // marshaling a struct referring to an object containing a handle.
+ if (structField != 0)
+ {
+ tokStruct__m_object = pslILEmit->GetToken(MscorlibBinder::GetField(structField));
+ }
+
+ ILCodeLabel* pNullLabel = pslILEmit->NewCodeLabel();
+
+ pslILEmit->EmitLoadNullPtr();
+ EmitStoreNativeValue(pslILEmit);
+
+ if (tokStruct__m_object != 0)
+ {
+ EmitLoadManagedHomeAddr(pslILEmit);
+ pslILEmit->EmitLDFLD(tokStruct__m_object);
+ }
+ else
+ {
+ EmitLoadManagedValue(pslILEmit);
+ }
+ pslILEmit->EmitBRFALSE(pNullLabel);
+
+ if (tokStruct__m_object != 0)
+ {
+ EmitLoadManagedHomeAddr(pslILEmit);
+ pslILEmit->EmitLDFLD(tokStruct__m_object);
+ }
+ else
+ {
+ EmitLoadManagedValue(pslILEmit);
+ }
+
+ pslILEmit->EmitLDFLD(tokObject__m_handle);
+ EmitStoreNativeValue(pslILEmit);
+
+ pslILEmit->EmitLabel(pNullLabel);
+
+ if (IsCLRToNative(m_dwMarshalFlags))
+ {
+ // keep the object alive across the call-out to native
+ if (tokStruct__m_object != 0)
+ {
+ EmitLoadManagedHomeAddr(m_pcsUnmarshal);
+ m_pcsUnmarshal->EmitLDFLD(tokStruct__m_object);
+ }
+ else
+ {
+ EmitLoadManagedValue(m_pcsUnmarshal);
+ }
+ m_pcsUnmarshal->EmitCALL(METHOD__GC__KEEP_ALIVE, 1, 0);
+ }
+}
+
+void ILReflectionObjectMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ COMPlusThrow(kTypeLoadException, IDS_EE_COM_UNSUPPORTED_SIG);
+}
+
+LocalDesc ILDelegateMarshaler::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_I);
+}
+
+LocalDesc ILDelegateMarshaler::GetManagedType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(m_pargs->m_pMT);
+}
+
+void ILDelegateMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel* pNullLabel = pslILEmit->NewCodeLabel();
+
+ pslILEmit->EmitLoadNullPtr();
+ EmitStoreNativeValue(pslILEmit);
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullLabel);
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__MARSHAL__GET_FUNCTION_POINTER_FOR_DELEGATE, 1, 1);
+ EmitStoreNativeValue(pslILEmit);
+
+ pslILEmit->EmitLabel(pNullLabel);
+
+ //
+ // @TODO: is there a better way to do this?
+ //
+ if (IsCLRToNative(m_dwMarshalFlags))
+ {
+ // keep the delegate ref alive across the call-out to native
+ EmitLoadManagedValue(m_pcsUnmarshal);
+ m_pcsUnmarshal->EmitCALL(METHOD__GC__KEEP_ALIVE, 1, 0);
+ }
+}
+
+void ILDelegateMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel* pNullLabel = pslILEmit->NewCodeLabel();
+
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullLabel);
+
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitLDTOKEN(pslILEmit->GetToken(m_pargs->m_pMT));
+ pslILEmit->EmitCALL(METHOD__TYPE__GET_TYPE_FROM_HANDLE, 1, 1); // Type System.Type.GetTypeFromHandle(RuntimeTypeHandle handle)
+ pslILEmit->EmitCALL(METHOD__MARSHAL__GET_DELEGATE_FOR_FUNCTION_POINTER, 2, 1); // Delegate System.Marshal.GetDelegateForFunctionPointer(IntPtr p, Type t)
+ EmitStoreManagedValue(pslILEmit);
+
+ pslILEmit->EmitLabel(pNullLabel);
+}
+
+
+LocalDesc ILBoolMarshaler::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(GetNativeBoolElementType());
+}
+
+LocalDesc ILBoolMarshaler::GetManagedType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_BOOLEAN);
+}
+
+void ILBoolMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel* pLoadFalseLabel = pslILEmit->NewCodeLabel();
+ ILCodeLabel* pDoneLabel = pslILEmit->NewCodeLabel();
+
+
+ int trueValue = GetNativeTrueValue();
+ int falseValue = GetNativeFalseValue();
+
+ EmitLoadManagedValue(pslILEmit);
+
+ if (falseValue == 0 && trueValue == 1)
+ {
+ // this can be done without jumps
+ pslILEmit->EmitLDC(0);
+ pslILEmit->EmitCEQ();
+ pslILEmit->EmitLDC(0);
+ pslILEmit->EmitCEQ();
+ }
+ else
+ {
+ pslILEmit->EmitBRFALSE(pLoadFalseLabel);
+ pslILEmit->EmitLDC(trueValue);
+ pslILEmit->EmitBR(pDoneLabel);
+#ifdef _DEBUG
+ pslILEmit->EmitPOP(); // keep the simple stack level calculator happy
+#endif // _DEBUG
+ pslILEmit->EmitLabel(pLoadFalseLabel);
+ pslILEmit->EmitLDC(falseValue);
+ pslILEmit->EmitLabel(pDoneLabel);
+ }
+
+ EmitStoreNativeValue(pslILEmit);
+}
+
+void ILBoolMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ int falseValue = GetNativeFalseValue();
+
+ EmitLoadNativeValue(pslILEmit);
+
+ pslILEmit->EmitLDC(falseValue);
+ pslILEmit->EmitCEQ();
+ pslILEmit->EmitLDC(0);
+ pslILEmit->EmitCEQ();
+
+ EmitStoreManagedValue(pslILEmit);
+}
+
+
+LocalDesc ILWSTRMarshaler::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ //
+ // pointer to value class
+ //
+ return LocalDesc(ELEMENT_TYPE_I);
+}
+
+LocalDesc ILWSTRMarshaler::GetManagedType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ //
+ // value class
+ //
+ return LocalDesc(ELEMENT_TYPE_STRING);
+}
+
+void ILWSTRMarshaler::EmitConvertSpaceCLRToNative(ILCodeStream* pslILEmit)
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE_MSG("Should be in-only and all other paths are covered by the EmitConvertSpaceAndContents* paths");
+}
+
+void ILWSTRMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE_MSG("Should be in-only and all other paths are covered by the EmitConvertSpaceAndContents* paths");
+}
+
+void ILWSTRMarshaler::EmitConvertSpaceNativeToCLR(ILCodeStream* pslILEmit)
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE_MSG("Should be in-only and all other paths are covered by the EmitConvertSpaceAndContents* paths");
+}
+
+void ILWSTRMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE_MSG("Should be in-only and all other paths are covered by the EmitConvertSpaceAndContents* paths");
+}
+
+bool ILWSTRMarshaler::NeedsClearNative()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // will evaluate to true iff there is something CoTaskMemAlloc'ed that we need to free
+ bool needsClear = (IsByref(m_dwMarshalFlags) && IsOut(m_dwMarshalFlags)) || IsRetval(m_dwMarshalFlags);
+
+ // m_fCoMemoryAllocated => needsClear
+ // (if we allocated the memory, we will free it; for byref [out] and retval we free memory allocated by the callee)
+ _ASSERTE(!m_fCoMemoryAllocated || needsClear);
+
+ return needsClear;
+}
+
+void ILWSTRMarshaler::EmitClearNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadNativeValue(pslILEmit);
+ // static void CoTaskMemFree(IntPtr ptr)
+ pslILEmit->EmitCALL(METHOD__WIN32NATIVE__COTASKMEMFREE, 1, 0);
+}
+
+void ILWSTRMarshaler::EmitClearNativeTemp(ILCodeStream* pslILEmit)
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE_MSG("The string is either pinned or a copy is stack-allocated, NeedsClearNative should have returned false");
+}
+
+bool ILWSTRMarshaler::CanUsePinnedManagedString(DWORD dwMarshalFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+ return IsCLRToNative(dwMarshalFlags) && !IsByref(dwMarshalFlags) && IsIn(dwMarshalFlags) && !IsOut(dwMarshalFlags);
+}
+
+//
+// input stack: 0: managed string
+// output stack: 0: (string_length+1) * sizeof(WCHAR)
+//
+void ILWSTRMarshaler::EmitCheckManagedStringLength(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ pslILEmit->EmitCALL(METHOD__STRING__GET_LENGTH, 1, 1);
+ pslILEmit->EmitLDC(1);
+ pslILEmit->EmitADD();
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitCALL(METHOD__STUBHELPERS__CHECK_STRING_LENGTH, 1, 0);
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitADD(); // (length+1) * sizeof(WCHAR)
+}
+
+void ILWSTRMarshaler::EmitConvertSpaceAndContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ INDEBUG(m_fCoMemoryAllocated = true);
+
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+ DWORD dwLengthLocalNum = pslILEmit->NewLocal(ELEMENT_TYPE_I4);
+
+ pslILEmit->EmitLoadNullPtr();
+ EmitStoreNativeValue(pslILEmit);
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+
+ EmitLoadManagedValue(pslILEmit);
+ EmitCheckManagedStringLength(pslILEmit);
+
+ // cb
+
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitSTLOC(dwLengthLocalNum);
+
+ // cb
+
+ // static IntPtr AllocCoTaskMem(int cb)
+ pslILEmit->EmitCALL(METHOD__MARSHAL__ALLOC_CO_TASK_MEM, 1, 1);
+ EmitStoreNativeValue(pslILEmit);
+
+ EmitLoadManagedValue(pslILEmit);
+ EmitLoadNativeValue(pslILEmit);
+
+ // src, dst
+
+ pslILEmit->EmitLDLOC(dwLengthLocalNum); // length
+
+ // static void System.String.InternalCopy(String src, IntPtr dest,int len)
+ pslILEmit->EmitCALL(METHOD__STRING__INTERNAL_COPY, 3, 0);
+ pslILEmit->EmitLabel(pNullRefLabel);
+}
+
+void ILWSTRMarshaler::EmitConvertSpaceAndContentsCLRToNativeTemp(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (CanUsePinnedManagedString(m_dwMarshalFlags))
+ {
+ LocalDesc locDesc = GetManagedType();
+ locDesc.MakePinned();
+ DWORD dwPinnedLocal = pslILEmit->NewLocal(locDesc);
+ int fieldDef = pslILEmit->GetToken(MscorlibBinder::GetField(FIELD__STRING__M_FIRST_CHAR));
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+
+ pslILEmit->EmitLoadNullPtr();
+ EmitStoreNativeValue(pslILEmit);
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitSTLOC(dwPinnedLocal);
+ pslILEmit->EmitLDLOC(dwPinnedLocal);
+ pslILEmit->EmitLDFLDA(fieldDef);
+ EmitStoreNativeValue(pslILEmit);
+
+ if (g_pConfig->InteropLogArguments())
+ {
+ m_pslNDirect->EmitLogNativeArgument(pslILEmit, dwPinnedLocal);
+ }
+
+ pslILEmit->EmitLabel(pNullRefLabel);
+
+ }
+ else
+ {
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+ DWORD dwLengthLocalNum = pslILEmit->NewLocal(ELEMENT_TYPE_I4);
+
+ pslILEmit->EmitLoadNullPtr();
+ EmitStoreNativeValue(pslILEmit);
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+
+ EmitLoadManagedValue(pslILEmit);
+ EmitCheckManagedStringLength(pslILEmit);
+
+ // cb
+
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitSTLOC(dwLengthLocalNum);
+
+ // cb
+
+ pslILEmit->EmitLOCALLOC(); // @TODO: add a non-localloc path for large strings
+ EmitStoreNativeValue(pslILEmit);
+
+ EmitLoadManagedValue(pslILEmit);
+ EmitLoadNativeValue(pslILEmit);
+
+ // src, dst
+
+ pslILEmit->EmitLDLOC(dwLengthLocalNum); // length
+
+ // static void System.String.InternalCopy(String src, IntPtr dest,int len)
+ pslILEmit->EmitCALL(METHOD__STRING__INTERNAL_COPY, 3, 0);
+ pslILEmit->EmitLabel(pNullRefLabel);
+ }
+}
+
+//
+// input stack: 0: native string
+// output stack: 0: num chars, no null
+//
+void ILWSTRMarshaler::EmitCheckNativeStringLength(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ pslILEmit->EmitCALL(METHOD__STRING__WCSLEN, 1, 1);
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitCALL(METHOD__STUBHELPERS__CHECK_STRING_LENGTH, 1, 0);
+}
+
+void ILWSTRMarshaler::EmitConvertSpaceAndContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel* pIsNullLabelByref = pslILEmit->NewCodeLabel();
+
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pIsNullLabelByref);
+
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitDUP();
+ EmitCheckNativeStringLength(pslILEmit);
+ pslILEmit->EmitPOP(); // pop num chars
+
+ pslILEmit->EmitNEWOBJ(METHOD__STRING__CTOR_CHARPTR, 1);
+ EmitStoreManagedValue(pslILEmit);
+
+ pslILEmit->EmitLabel(pIsNullLabelByref);
+}
+
+
+LocalDesc ILOptimizedAllocMarshaler::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+ return LocalDesc(ELEMENT_TYPE_I);
+}
+
+bool ILOptimizedAllocMarshaler::NeedsClearNative()
+{
+ LIMITED_METHOD_CONTRACT;
+ return true;
+}
+
+void ILOptimizedAllocMarshaler::EmitClearNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel *pOptimize = NULL;
+
+ if (m_dwLocalBuffer != LOCAL_NUM_UNUSED)
+ {
+ pOptimize = pslILEmit->NewCodeLabel();
+
+ // if (m_dwLocalBuffer) goto Optimize
+ pslILEmit->EmitLDLOC(m_dwLocalBuffer);
+ pslILEmit->EmitBRTRUE(pOptimize);
+ }
+
+ EmitLoadNativeValue(pslILEmit);
+ // static void m_idClearNative(IntPtr ptr)
+ pslILEmit->EmitCALL(m_idClearNative, 1, 0);
+
+ // Optimize:
+ if (m_dwLocalBuffer != LOCAL_NUM_UNUSED)
+ {
+ pslILEmit->EmitLabel(pOptimize);
+ }
+}
+
+LocalDesc ILWSTRBufferMarshaler::GetManagedType()
+{
+ STANDARD_VM_CONTRACT;
+
+ return LocalDesc(MscorlibBinder::GetClass(CLASS__STRING_BUILDER));
+}
+
+void ILWSTRBufferMarshaler::EmitConvertSpaceCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+
+ pslILEmit->EmitLoadNullPtr();
+ EmitStoreNativeValue(pslILEmit);
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+
+ EmitLoadManagedValue(pslILEmit);
+ // int System.Text.StringBuilder.get_Capacity()
+ pslILEmit->EmitCALL(METHOD__STRING_BUILDER__GET_CAPACITY, 1, 1);
+ pslILEmit->EmitDUP();
+
+ // static void StubHelpers.CheckStringLength(int length)
+ pslILEmit->EmitCALL(METHOD__STUBHELPERS__CHECK_STRING_LENGTH, 1, 0);
+
+ // stack: capacity
+
+ pslILEmit->EmitLDC(2);
+ pslILEmit->EmitMUL();
+
+ // stack: capacity_in_bytes
+
+ pslILEmit->EmitLDC(2);
+ pslILEmit->EmitADD();
+
+ // stack: offset_of_secret_null
+
+ DWORD dwTmpOffsetOfSecretNull = pslILEmit->NewLocal(ELEMENT_TYPE_I4);
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitSTLOC(dwTmpOffsetOfSecretNull); // make sure the stack is empty for localloc
+
+ pslILEmit->EmitLDC(2);
+ pslILEmit->EmitADD();
+
+ // stack: alloc_size_in_bytes
+ ILCodeLabel *pAllocRejoin = pslILEmit->NewCodeLabel();
+ if (IsCLRToNative(m_dwMarshalFlags) && !IsByref(m_dwMarshalFlags))
+ {
+ ILCodeLabel *pNoOptimize = pslILEmit->NewCodeLabel();
+ m_dwLocalBuffer = pslILEmit->NewLocal(ELEMENT_TYPE_I);
+
+ // LocalBuffer = 0
+ pslILEmit->EmitLoadNullPtr();
+ pslILEmit->EmitSTLOC(m_dwLocalBuffer);
+
+ // if (alloc_size_in_bytes > MAX_LOCAL_BUFFER_LENGTH) goto NoOptimize
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitLDC(MAX_LOCAL_BUFFER_LENGTH);
+ pslILEmit->EmitCGT_UN();
+ pslILEmit->EmitBRTRUE(pNoOptimize);
+
+ pslILEmit->EmitLOCALLOC();
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitSTLOC(m_dwLocalBuffer);
+ pslILEmit->EmitBR(pAllocRejoin);
+
+ pslILEmit->EmitLabel(pNoOptimize);
+ }
+
+ // static IntPtr AllocCoTaskMem(int cb)
+ pslILEmit->EmitCALL(METHOD__MARSHAL__ALLOC_CO_TASK_MEM, 1, 1);
+
+ pslILEmit->EmitLabel(pAllocRejoin);
+
+ // stack: native_addr
+
+ pslILEmit->EmitDUP();
+ EmitStoreNativeValue(pslILEmit);
+
+ pslILEmit->EmitLDLOC(dwTmpOffsetOfSecretNull);
+
+ // stack: offset_of_secret_null native_addr
+
+ pslILEmit->EmitADD();
+
+ // stack: addr_of_secret_null
+
+ pslILEmit->EmitLDC(0);
+
+ // stack: addr_of_secret_null 0
+
+ pslILEmit->EmitSTIND_I2();
+ pslILEmit->EmitLabel(pNullRefLabel);
+}
+
+void ILWSTRBufferMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ DWORD dwTempNumBytesLocal = pslILEmit->NewLocal(ELEMENT_TYPE_I4);
+
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitDUP();
+
+ // stack: StringBuilder StringBuilder
+
+ // int System.Text.StringBuilder.get_Length()
+ pslILEmit->EmitCALL(METHOD__STRING_BUILDER__GET_LENGTH, 1, 1);
+
+ // stack: StringBuilder length
+
+ // if (!fConvertSpaceJustCalled)
+ {
+ // we don't need to double-check the length because the length
+ // must be smaller than the capacity and the capacity was already
+ // checked by EmitConvertSpaceCLRToNative
+
+ pslILEmit->EmitDUP();
+ // static void StubHelpers.CheckStringLength(int length)
+ pslILEmit->EmitCALL(METHOD__STUBHELPERS__CHECK_STRING_LENGTH, 1, 0);
+ }
+
+ // stack: StringBuilder length
+
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitADD();
+
+ // stack: StringBuilder cb
+
+ pslILEmit->EmitSTLOC(dwTempNumBytesLocal);
+
+ // stack: StringBuilder
+
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitLDLOC(dwTempNumBytesLocal);
+
+ // stack: stringbuilder native_buffer cb
+
+ // void System.Text.StringBuilder.InternalCopy(IntPtr dest,int len)
+ pslILEmit->EmitCALL(METHOD__STRING_BUILDER__INTERNAL_COPY, 3, 0);
+
+ //
+ // null-terminate the native string
+ //
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitLDLOC(dwTempNumBytesLocal);
+ pslILEmit->EmitADD();
+ pslILEmit->EmitLDC(0);
+ pslILEmit->EmitSTIND_I2();
+
+ pslILEmit->EmitLabel(pNullRefLabel);
+}
+
+void ILWSTRBufferMarshaler::EmitConvertSpaceNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+
+ if (IsIn(m_dwMarshalFlags) || IsCLRToNative(m_dwMarshalFlags))
+ {
+ EmitLoadNativeValue(pslILEmit);
+ // static int System.String.wcslen(char *ptr)
+ pslILEmit->EmitCALL(METHOD__STRING__WCSLEN, 1, 1);
+ }
+ else
+ {
+ // don't touch the native buffer in the native->CLR out-only case
+ pslILEmit->EmitLDC(0);
+ }
+
+ // System.Text.StringBuilder..ctor(int capacity)
+ pslILEmit->EmitNEWOBJ(METHOD__STRING_BUILDER__CTOR_INT, 1);
+ EmitStoreManagedValue(pslILEmit);
+
+ pslILEmit->EmitLabel(pNullRefLabel);
+}
+
+void ILWSTRBufferMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+
+ EmitLoadManagedValue(pslILEmit);
+ EmitLoadNativeValue(pslILEmit);
+
+ pslILEmit->EmitDUP();
+ // static int System.String.wcslen(char *ptr)
+ pslILEmit->EmitCALL(METHOD__STRING__WCSLEN, 1, 1);
+
+ // void System.Text.StringBuilder.ReplaceBuffer(char* newBuffer, int newLength);
+ pslILEmit->EmitCALL(METHOD__STRING_BUILDER__REPLACE_BUFFER_INTERNAL, 3, 0);
+ pslILEmit->EmitLabel(pNullRefLabel);
+}
+
+LocalDesc ILCSTRBufferMarshaler::GetManagedType()
+{
+ STANDARD_VM_CONTRACT;
+
+ return LocalDesc(MscorlibBinder::GetClass(CLASS__STRING_BUILDER));
+}
+
+void ILCSTRBufferMarshaler::EmitConvertSpaceCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+
+ pslILEmit->EmitLoadNullPtr();
+ EmitStoreNativeValue(pslILEmit);
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+
+ EmitLoadManagedValue(pslILEmit);
+ // int System.Text.StringBuilder.get_Capacity()
+ pslILEmit->EmitCALL(METHOD__STRING_BUILDER__GET_CAPACITY, 1, 1);
+ pslILEmit->EmitDUP();
+
+ // static void StubHelpers.CheckStringLength(int length)
+ pslILEmit->EmitCALL(METHOD__STUBHELPERS__CHECK_STRING_LENGTH, 1, 0);
+
+ // stack: capacity
+
+ pslILEmit->EmitLDSFLD(pslILEmit->GetToken(MscorlibBinder::GetField(FIELD__MARSHAL__SYSTEM_MAX_DBCS_CHAR_SIZE)));
+ pslILEmit->EmitMUL();
+
+ // stack: capacity_in_bytes
+
+ pslILEmit->EmitLDC(1);
+ pslILEmit->EmitADD();
+
+ // stack: offset_of_secret_null
+
+ DWORD dwTmpOffsetOfSecretNull = pslILEmit->NewLocal(ELEMENT_TYPE_I4);
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitSTLOC(dwTmpOffsetOfSecretNull); // make sure the stack is empty for localloc
+
+ pslILEmit->EmitLDC(3);
+ pslILEmit->EmitADD();
+
+ // stack: alloc_size_in_bytes
+ ILCodeLabel *pAllocRejoin = pslILEmit->NewCodeLabel();
+ if (IsCLRToNative(m_dwMarshalFlags) && !IsByref(m_dwMarshalFlags))
+ {
+ ILCodeLabel *pNoOptimize = pslILEmit->NewCodeLabel();
+ m_dwLocalBuffer = pslILEmit->NewLocal(ELEMENT_TYPE_I);
+
+ // LocalBuffer = 0
+ pslILEmit->EmitLoadNullPtr();
+ pslILEmit->EmitSTLOC(m_dwLocalBuffer);
+
+ // if (alloc_size_in_bytes > MAX_LOCAL_BUFFER_LENGTH) goto NoOptimize
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitLDC(MAX_LOCAL_BUFFER_LENGTH);
+ pslILEmit->EmitCGT_UN();
+ pslILEmit->EmitBRTRUE(pNoOptimize);
+
+ pslILEmit->EmitLOCALLOC();
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitSTLOC(m_dwLocalBuffer);
+ pslILEmit->EmitBR(pAllocRejoin);
+
+ pslILEmit->EmitLabel(pNoOptimize);
+ }
+
+ // static IntPtr AllocCoTaskMem(int cb)
+ pslILEmit->EmitCALL(METHOD__MARSHAL__ALLOC_CO_TASK_MEM, 1, 1);
+
+ pslILEmit->EmitLabel(pAllocRejoin);
+
+ // stack: native_addr
+
+ pslILEmit->EmitDUP();
+ EmitStoreNativeValue(pslILEmit);
+
+ pslILEmit->EmitLDLOC(dwTmpOffsetOfSecretNull);
+
+ // stack: native_addr offset_of_secret_null
+
+ pslILEmit->EmitADD();
+
+ // stack: addr_of_secret_null0
+
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitLDC(0);
+ pslILEmit->EmitSTIND_I1();
+
+ // stack: addr_of_secret_null0
+
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitLDC(1);
+ pslILEmit->EmitADD();
+ pslILEmit->EmitLDC(0);
+ pslILEmit->EmitSTIND_I1();
+
+ // stack: addr_of_secret_null0
+
+ pslILEmit->EmitLDC(2);
+ pslILEmit->EmitADD();
+ pslILEmit->EmitLDC(0);
+ pslILEmit->EmitSTIND_I1();
+
+ pslILEmit->EmitLabel(pNullRefLabel);
+}
+
+void ILCSTRBufferMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+ DWORD dwNumBytesLocalNum = pslILEmit->NewLocal(ELEMENT_TYPE_I4);
+ DWORD dwSrcLocal = pslILEmit->NewLocal(ELEMENT_TYPE_OBJECT);
+
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+
+ EmitLoadManagedValue(pslILEmit);
+ // int System.Text.StringBuilder.get_Length()
+ pslILEmit->EmitCALL(METHOD__STRING_BUILDER__GET_LENGTH, 1, 1);
+ // static void StubHelpers.CheckStringLength(int length)
+ pslILEmit->EmitCALL(METHOD__STUBHELPERS__CHECK_STRING_LENGTH, 1, 0);
+
+ EmitLoadManagedValue(pslILEmit);
+ // String System.Text.StringBuilder.ToString()
+ pslILEmit->EmitCALL(METHOD__STRING_BUILDER__TO_STRING, 1, 1);
+ pslILEmit->EmitLDC(m_pargs->m_pMarshalInfo->GetBestFitMapping());
+ pslILEmit->EmitLDC(m_pargs->m_pMarshalInfo->GetThrowOnUnmappableChar());
+ pslILEmit->EmitLDLOCA(dwNumBytesLocalNum);
+
+ // static byte[] DoAnsiConversion(string str, bool fBestFit, bool fThrowOnUnmappableChar, out int cbLength)
+ pslILEmit->EmitCALL(METHOD__ANSICHARMARSHALER__DO_ANSI_CONVERSION, 4, 1);
+ pslILEmit->EmitSTLOC(dwSrcLocal);
+ EmitLoadNativeValue(pslILEmit); // pDest
+ pslILEmit->EmitLDC(0); // destIndex
+ pslILEmit->EmitLDLOC(dwSrcLocal); // src[]
+ pslILEmit->EmitLDC(0); // srcIndex
+ pslILEmit->EmitLDLOC(dwNumBytesLocalNum); // len
+
+ // static void Memcpy(byte* pDest, int destIndex, byte[] src, int srcIndex, int len)
+ pslILEmit->EmitCALL(METHOD__BUFFER__MEMCPY_PTRBYTE_ARRBYTE, 5, 0);
+
+ // null terminate the string
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitLDLOC(dwNumBytesLocalNum);
+ pslILEmit->EmitADD();
+ pslILEmit->EmitLDC(0);
+ pslILEmit->EmitSTIND_I1();
+
+ pslILEmit->EmitLabel(pNullRefLabel);
+}
+
+void ILCSTRBufferMarshaler::EmitConvertSpaceNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+
+ if (IsIn(m_dwMarshalFlags) || IsCLRToNative(m_dwMarshalFlags))
+ {
+ EmitLoadNativeValue(pslILEmit);
+ // static int System.StubHelpers.StubHelpers.strlen(sbyte* ptr)
+ pslILEmit->EmitCALL(METHOD__STUBHELPERS__STRLEN, 1, 1);
+ }
+ else
+ {
+ // don't touch the native buffer in the native->CLR out-only case
+ pslILEmit->EmitLDC(0);
+ }
+
+ // System.Text.StringBuilder..ctor(int capacity)
+ pslILEmit->EmitNEWOBJ(METHOD__STRING_BUILDER__CTOR_INT, 1);
+ EmitStoreManagedValue(pslILEmit);
+
+ pslILEmit->EmitLabel(pNullRefLabel);
+}
+
+void ILCSTRBufferMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+
+ EmitLoadManagedValue(pslILEmit);
+ EmitLoadNativeValue(pslILEmit);
+
+ pslILEmit->EmitDUP();
+ // static int System.StubHelpers.StubHelpers.strlen(sbyte* ptr)
+ pslILEmit->EmitCALL(METHOD__STUBHELPERS__STRLEN, 1, 1);
+
+ // void System.Text.StringBuilder.ReplaceBuffer(sbyte* newBuffer, int newLength);
+ pslILEmit->EmitCALL(METHOD__STRING_BUILDER__REPLACE_BUFFER_ANSI_INTERNAL, 3, 0);
+
+ pslILEmit->EmitLabel(pNullRefLabel);
+}
+
+
+
+LocalDesc ILValueClassMarshaler::GetNativeType()
+{
+ STANDARD_VM_CONTRACT;
+
+ return LocalDesc(TypeHandle(m_pargs->m_pMT).MakeNativeValueType());
+}
+
+LocalDesc ILValueClassMarshaler::GetManagedType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(m_pargs->m_pMT);
+}
+
+void ILValueClassMarshaler::EmitReInitNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadNativeHomeAddr(pslILEmit);
+ pslILEmit->EmitINITOBJ(pslILEmit->GetToken(TypeHandle(m_pargs->m_pMT).MakeNativeValueType()));
+}
+
+bool ILValueClassMarshaler::NeedsClearNative()
+{
+ return true;
+}
+
+void ILValueClassMarshaler::EmitClearNative(ILCodeStream * pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ mdToken managedVCToken = pslILEmit->GetToken(m_pargs->m_pMT);
+
+ EmitLoadNativeHomeAddr(pslILEmit);
+ pslILEmit->EmitLDTOKEN(managedVCToken); // pMT
+ pslILEmit->EmitCALL(METHOD__RT_TYPE_HANDLE__GETVALUEINTERNAL, 1, 1);
+ pslILEmit->EmitCALL(METHOD__VALUECLASSMARSHALER__CLEAR_NATIVE, 2, 0);
+}
+
+
+void ILValueClassMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ mdToken managedVCToken = pslILEmit->GetToken(m_pargs->m_pMT);
+
+ EmitLoadNativeHomeAddr(pslILEmit); // dst
+ EmitLoadManagedHomeAddr(pslILEmit); // src
+ pslILEmit->EmitLDTOKEN(managedVCToken); // pMT
+ pslILEmit->EmitCALL(METHOD__RT_TYPE_HANDLE__GETVALUEINTERNAL, 1, 1); // Convert RTH to IntPtr
+
+ if (IsCLRToNative(m_dwMarshalFlags))
+ {
+ // this should only be needed in CLR-to-native scenarios for the SafeHandle field marshaler
+ m_pslNDirect->LoadCleanupWorkList(pslILEmit);
+ }
+ else
+ {
+ pslILEmit->EmitLoadNullPtr();
+ }
+
+ pslILEmit->EmitCALL(METHOD__VALUECLASSMARSHALER__CONVERT_TO_NATIVE, 4, 0); // void ConvertToNative(IntPtr dst, IntPtr src, IntPtr pMT, ref CleanupWorkList pCleanupWorkList)
+}
+
+void ILValueClassMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ mdToken managedVCToken = pslILEmit->GetToken(m_pargs->m_pMT);
+
+ EmitLoadManagedHomeAddr(pslILEmit); // dst
+ EmitLoadNativeHomeAddr(pslILEmit); // src
+ pslILEmit->EmitLDTOKEN(managedVCToken); // pMT
+ pslILEmit->EmitCALL(METHOD__RT_TYPE_HANDLE__GETVALUEINTERNAL, 1, 1);
+ pslILEmit->EmitCALL(METHOD__VALUECLASSMARSHALER__CONVERT_TO_MANAGED, 3, 0); // void ConvertToManaged(IntPtr dst, IntPtr src, IntPtr pMT)
+}
+
+
+#ifdef FEATURE_COMINTEROP
+LocalDesc ILObjectMarshaler::GetNativeType()
+{
+ STANDARD_VM_CONTRACT;
+
+ return LocalDesc(TypeHandle(MscorlibBinder::GetClass(CLASS__NATIVEVARIANT)));
+}
+
+LocalDesc ILObjectMarshaler::GetManagedType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_OBJECT);
+}
+
+void ILObjectMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (!IsCLRToNative(m_dwMarshalFlags) && IsByref(m_dwMarshalFlags) && IsIn(m_dwMarshalFlags))
+ {
+ // Keep the VARIANT as it is - the stubhelper will do a VT_BYREF check on it.
+ }
+ else
+ {
+ // V_VT(pDest) = VT_EMPTY
+ EmitReInitNative(pslILEmit);
+ }
+
+ EmitLoadManagedValue(pslILEmit); // load src
+ EmitLoadNativeHomeAddr(pslILEmit); // load dst
+ pslILEmit->EmitCALL(METHOD__OBJECTMARSHALER__CONVERT_TO_NATIVE, 2, 0); // void ConvertToNative(object objSrc, IntPtr pDstVariant)
+}
+
+void ILObjectMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadNativeHomeAddr(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__OBJECTMARSHALER__CONVERT_TO_MANAGED, 1, 1); // object ConvertToManaged(IntPtr pSrcVariant);
+ EmitStoreManagedValue(pslILEmit);
+}
+
+bool ILObjectMarshaler::NeedsClearNative()
+{
+ LIMITED_METHOD_CONTRACT;
+ return true;
+}
+
+void ILObjectMarshaler::EmitClearNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (!IsCLRToNative(m_dwMarshalFlags) && IsByref(m_dwMarshalFlags) && IsIn(m_dwMarshalFlags))
+ {
+ // We don't want to clear variants passed from native by-ref here as we
+ // want to be able to detect the VT_BYREF case during backpropagation.
+
+ // @TODO: We shouldn't be skipping the call if pslILEmit is ILStubLinker::kExceptionCleanup
+ // because we always want to do real cleanup in this stream.
+ }
+ else
+ {
+ EmitLoadNativeHomeAddr(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__OBJECTMARSHALER__CLEAR_NATIVE, 1, 0);
+ }
+}
+
+void ILObjectMarshaler::EmitReInitNative(ILCodeStream* pslILEmit)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ CONSISTENCY_CHECK(offsetof(VARIANT, vt) == 0);
+ }
+ CONTRACTL_END;
+
+ if (!IsCLRToNative(m_dwMarshalFlags) && IsByref(m_dwMarshalFlags) && IsIn(m_dwMarshalFlags))
+ {
+ // We don't want to clear variants passed from native by-ref here as we
+ // want to be able to detect the VT_BYREF case during backpropagation.
+ }
+ else
+ {
+ EmitLoadNativeHomeAddr(pslILEmit);
+ pslILEmit->EmitLDC(VT_EMPTY);
+ pslILEmit->EmitSTIND_I2();
+ }
+}
+#endif // FEATURE_COMINTEROP
+
+LocalDesc ILDateMarshaler::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_R8);
+}
+
+LocalDesc ILDateMarshaler::GetManagedType()
+{
+ STANDARD_VM_CONTRACT;
+
+ return LocalDesc(MscorlibBinder::GetClass(CLASS__DATE_TIME));
+}
+
+void ILDateMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadManagedValue(pslILEmit);
+ // double ConvertToNative(INT64 managedDate)
+ pslILEmit->EmitCALL(METHOD__DATEMARSHALER__CONVERT_TO_NATIVE, 1, 1);
+ EmitStoreNativeValue(pslILEmit);
+}
+
+void ILDateMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ // will call DateTime constructor on managed home
+ EmitLoadManagedHomeAddr(pslILEmit);
+
+ EmitLoadNativeValue(pslILEmit);
+ // long ConvertToNative(double nativeData)
+ pslILEmit->EmitCALL(METHOD__DATEMARSHALER__CONVERT_TO_MANAGED, 1, 1);
+
+ pslILEmit->EmitCALL(METHOD__DATE_TIME__LONG_CTOR, 2, 0);
+}
+
+void ILDateMarshaler::EmitReInitNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ // ldc.i4.0, conv.r8 is shorter than ldc.r8 0.0
+ pslILEmit->EmitLDC(0);
+ pslILEmit->EmitCONV_R8();
+ EmitStoreNativeValue(pslILEmit);
+}
+
+LocalDesc ILCurrencyMarshaler::GetNativeType()
+{
+ STANDARD_VM_CONTRACT;
+
+ return LocalDesc(TypeHandle(MscorlibBinder::GetClass(CLASS__CURRENCY)));
+}
+
+LocalDesc ILCurrencyMarshaler::GetManagedType()
+{
+ STANDARD_VM_CONTRACT;
+
+ return LocalDesc(TypeHandle(MscorlibBinder::GetClass(CLASS__DECIMAL)));
+}
+
+
+void ILCurrencyMarshaler::EmitReInitNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadNativeHomeAddr(pslILEmit);
+ pslILEmit->EmitINITOBJ(pslILEmit->GetToken(TypeHandle(MscorlibBinder::GetClass(CLASS__CURRENCY))));
+}
+
+void ILCurrencyMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadNativeHomeAddr(pslILEmit);
+ EmitLoadManagedValue(pslILEmit);
+
+ pslILEmit->EmitCALL(METHOD__CURRENCY__DECIMAL_CTOR, 2, 0);
+}
+
+void ILCurrencyMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadManagedHomeAddr(pslILEmit);
+ EmitLoadNativeValue(pslILEmit);
+
+ pslILEmit->EmitCALL(METHOD__DECIMAL__CURRENCY_CTOR, 2, 0);
+
+ EmitLoadManagedHomeAddr(pslILEmit);
+
+ // static void System.StubHelpers.DecimalCanonicalizeInternal(ref Decimal dec);
+ pslILEmit->EmitCALL(METHOD__STUBHELPERS__DECIMAL_CANONICALIZE_INTERNAL, 1, 0);
+}
+
+
+#ifdef FEATURE_COMINTEROP
+LocalDesc ILInterfaceMarshaler::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_I);
+}
+
+LocalDesc ILInterfaceMarshaler::GetManagedType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_OBJECT);
+}
+
+void ILInterfaceMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ItfMarshalInfo itfInfo;
+ m_pargs->m_pMarshalInfo->GetItfMarshalInfo(&itfInfo);
+
+ EmitLoadManagedValue(pslILEmit);
+
+ if (itfInfo.thNativeItf.GetMethodTable())
+ {
+ pslILEmit->EmitLDTOKEN(pslILEmit->GetToken(itfInfo.thNativeItf.GetMethodTable()));
+ pslILEmit->EmitCALL(METHOD__RT_TYPE_HANDLE__GETVALUEINTERNAL, 1, 1);
+ }
+ else
+ {
+ pslILEmit->EmitLoadNullPtr();
+ }
+
+ if (itfInfo.thClass.GetMethodTable())
+ {
+ pslILEmit->EmitLDTOKEN(pslILEmit->GetToken(itfInfo.thClass.GetMethodTable()));
+ pslILEmit->EmitCALL(METHOD__RT_TYPE_HANDLE__GETVALUEINTERNAL, 1, 1);
+ }
+ else
+ {
+ pslILEmit->EmitLoadNullPtr();
+ }
+ pslILEmit->EmitLDC(itfInfo.dwFlags);
+
+ // static IntPtr ConvertToNative(object objSrc, IntPtr itfMT, IntPtr classMT, int flags);
+ pslILEmit->EmitCALL(METHOD__INTERFACEMARSHALER__CONVERT_TO_NATIVE, 4, 1);
+
+ EmitStoreNativeValue(pslILEmit);
+
+ if (IsCLRToNative(m_dwMarshalFlags) &&
+ m_pargs->m_pMarshalInfo->IsWinRTScenario())
+ {
+ // If we are calling from CLR into WinRT and we are passing an interface to WinRT, we need to
+ // keep the object alive across unmanaged call because Jupiter might need to add this
+ // RCW into their live tree and whatever CCWs referenced by this RCW could get collected
+ // before the call to native, for example:
+ //
+ // Button btn = new Button();
+ // btn.OnClick += ...
+ // m_grid.Children.Add(btn)
+ //
+ // In this case, btn could be collected and takes the delegate CCW with it, before Children.add
+ // native method is called, and as a result Jupiter will add the neutered CCW into the tree
+ //
+ // The fix is to extend the lifetime of the argument across the call to native by doing a GC.KeepAlive
+ // keep the delegate ref alive across the call-out to native
+ EmitLoadManagedValue(m_pcsUnmarshal);
+ m_pcsUnmarshal->EmitCALL(METHOD__GC__KEEP_ALIVE, 1, 0);
+ }
+}
+
+void ILInterfaceMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ItfMarshalInfo itfInfo;
+ m_pargs->m_pMarshalInfo->GetItfMarshalInfo(&itfInfo);
+
+ // the helper may assign NULL to the home (see below)
+ EmitLoadNativeHomeAddr(pslILEmit);
+
+ if (IsCLRToNative(m_dwMarshalFlags) && m_pargs->m_pMarshalInfo->IsWinRTScenario())
+ {
+ // We are converting an interface pointer to object in a CLR->native stub which means
+ // that the interface pointer has been AddRef'ed for us by the callee. If we end up
+ // wrapping it with a new RCW, we can omit another AddRef/Release pair. Note that if
+ // a new RCW is created the native home will be zeroed out by the helper so the call
+ // to InterfaceMarshaler__ClearNative will become a no-op.
+
+ // Note that we are only doing this for WinRT scenarios to reduce the risk of this change
+ itfInfo.dwFlags |= ItfMarshalInfo::ITF_MARSHAL_SUPPRESS_ADDREF;
+ }
+
+ if (itfInfo.thItf.GetMethodTable())
+ {
+ pslILEmit->EmitLDTOKEN(pslILEmit->GetToken(itfInfo.thItf.GetMethodTable()));
+ pslILEmit->EmitCALL(METHOD__RT_TYPE_HANDLE__GETVALUEINTERNAL, 1, 1);
+ }
+ else
+ {
+ pslILEmit->EmitLoadNullPtr();
+ }
+
+ if (itfInfo.thClass.GetMethodTable())
+ {
+ pslILEmit->EmitLDTOKEN(pslILEmit->GetToken(itfInfo.thClass.GetMethodTable()));
+ pslILEmit->EmitCALL(METHOD__RT_TYPE_HANDLE__GETVALUEINTERNAL, 1, 1);
+ }
+ else
+ {
+ pslILEmit->EmitLoadNullPtr();
+ }
+ pslILEmit->EmitLDC(itfInfo.dwFlags);
+
+ // static object ConvertToManaged(IntPtr pUnk, IntPtr itfMT, IntPtr classMT, int flags);
+ pslILEmit->EmitCALL(METHOD__INTERFACEMARSHALER__CONVERT_TO_MANAGED, 4, 1);
+
+ EmitStoreManagedValue(pslILEmit);
+}
+
+bool ILInterfaceMarshaler::NeedsClearNative()
+{
+ LIMITED_METHOD_CONTRACT;
+ return true;
+}
+
+void ILMarshaler::EmitInterfaceClearNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel *pSkipClearNativeLabel = pslILEmit->NewCodeLabel();
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pSkipClearNativeLabel);
+ EmitLoadNativeValue(pslILEmit);
+ // static void ClearNative(IntPtr pUnk);
+ pslILEmit->EmitCALL(METHOD__INTERFACEMARSHALER__CLEAR_NATIVE, 1, 0);
+ pslILEmit->EmitLabel(pSkipClearNativeLabel);
+}
+
+void ILInterfaceMarshaler::EmitClearNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+ EmitInterfaceClearNative(pslILEmit);
+}
+#endif // FEATURE_COMINTEROP
+
+
+LocalDesc ILAnsiCharMarshaler::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_U1);
+}
+
+LocalDesc ILAnsiCharMarshaler::GetManagedType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_CHAR);
+}
+
+void ILAnsiCharMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitLDC(m_pargs->m_pMarshalInfo->GetBestFitMapping());
+ pslILEmit->EmitLDC(m_pargs->m_pMarshalInfo->GetThrowOnUnmappableChar());
+ pslILEmit->EmitCALL(METHOD__ANSICHARMARSHALER__CONVERT_TO_NATIVE, 3, 1);
+ EmitStoreNativeValue(pslILEmit);
+}
+
+void ILAnsiCharMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__ANSICHARMARSHALER__CONVERT_TO_MANAGED, 1, 1);
+ EmitStoreManagedValue(pslILEmit);
+}
+
+#ifdef FEATURE_COMINTEROP
+LocalDesc ILOleColorMarshaler::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_I4);
+}
+
+LocalDesc ILOleColorMarshaler::GetManagedType()
+{
+ STANDARD_VM_CONTRACT;
+
+ BaseDomain* pDomain = m_pargs->m_pMarshalInfo->GetModule()->GetDomain();
+ TypeHandle hndColorType = pDomain->GetMarshalingData()->GetOleColorMarshalingInfo()->GetColorType();
+
+ //
+ // value class
+ //
+ return LocalDesc(hndColorType); // System.Drawing.Color
+}
+
+void ILOleColorMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ BaseDomain* pDomain = m_pargs->m_pMarshalInfo->GetModule()->GetDomain();
+ MethodDesc* pConvertMD = pDomain->GetMarshalingData()->GetOleColorMarshalingInfo()->GetSystemColorToOleColorMD();
+
+ EmitLoadManagedValue(pslILEmit);
+ // int System.Drawing.ColorTranslator.ToOle(System.Drawing.Color c)
+ pslILEmit->EmitCALL(pslILEmit->GetToken(pConvertMD), 1, 1);
+ EmitStoreNativeValue(pslILEmit);
+}
+
+void ILOleColorMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ BaseDomain* pDomain = m_pargs->m_pMarshalInfo->GetModule()->GetDomain();
+ MethodDesc* pConvertMD = pDomain->GetMarshalingData()->GetOleColorMarshalingInfo()->GetOleColorToSystemColorMD();
+
+ EmitLoadNativeValue(pslILEmit);
+ // System.Drawing.Color System.Drawing.ColorTranslator.FromOle(int oleColor)
+ pslILEmit->EmitCALL(pslILEmit->GetToken(pConvertMD), 1, 1);
+ EmitStoreManagedValue(pslILEmit);
+}
+
+bool ILVBByValStrWMarshaler::SupportsArgumentMarshal(DWORD dwMarshalFlags, UINT* pErrorResID)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (IsCLRToNative(dwMarshalFlags) && IsByref(dwMarshalFlags) && IsIn(dwMarshalFlags) && IsOut(dwMarshalFlags))
+ {
+ return true;
+ }
+
+ *pErrorResID = IDS_EE_BADMARSHAL_VBBYVALSTRRESTRICTION;
+ return false;
+}
+
+bool ILVBByValStrWMarshaler::SupportsReturnMarshal(DWORD dwMarshalFlags, UINT* pErrorResID)
+{
+ LIMITED_METHOD_CONTRACT;
+ *pErrorResID = IDS_EE_BADMARSHAL_VBBYVALSTRRESTRICTION;
+ return false;
+}
+
+LocalDesc ILVBByValStrWMarshaler::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_I); // BSTR
+}
+
+LocalDesc ILVBByValStrWMarshaler::GetManagedType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_STRING);
+}
+
+bool ILVBByValStrWMarshaler::IsNativePassedByRef()
+{
+ LIMITED_METHOD_CONTRACT;
+ return false;
+}
+
+void ILVBByValStrWMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeStream *pcsSetup = m_pslNDirect->GetSetupCodeStream();
+ m_dwLocalBuffer = pcsSetup->NewLocal(ELEMENT_TYPE_I);
+ pcsSetup->EmitLoadNullPtr();
+ pcsSetup->EmitSTLOC(m_dwLocalBuffer);
+
+
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+ m_dwCCHLocal = pslILEmit->NewLocal(ELEMENT_TYPE_I4);
+ DWORD dwNumBytesLocal = pslILEmit->NewLocal(ELEMENT_TYPE_I4);
+
+ pslILEmit->EmitLoadNullPtr();
+ EmitStoreNativeValue(pslILEmit);
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__STRING__GET_LENGTH, 1, 1);
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitSTLOC(m_dwCCHLocal);
+
+ // cch
+
+ pslILEmit->EmitLDC(1);
+ pslILEmit->EmitADD();
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitCALL(METHOD__STUBHELPERS__CHECK_STRING_LENGTH, 1, 0);
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitADD(); // (length+1) * sizeof(WCHAR)
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitSTLOC(dwNumBytesLocal); // len <- doesn't include size of the DWORD preceeding the string
+ pslILEmit->EmitLDC(sizeof(DWORD));
+ pslILEmit->EmitADD(); // (length+1) * sizeof(WCHAR) + sizeof(DWORD)
+
+ // cb
+
+ ILCodeLabel* pNoOptimizeLabel = pslILEmit->NewCodeLabel();
+ ILCodeLabel* pAllocRejoinLabel = pslILEmit->NewCodeLabel();
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitLDC(MAX_LOCAL_BUFFER_LENGTH);
+ pslILEmit->EmitCGT_UN();
+ pslILEmit->EmitBRTRUE(pNoOptimizeLabel);
+
+ pslILEmit->EmitLOCALLOC();
+ pslILEmit->EmitBR(pAllocRejoinLabel);
+
+ pslILEmit->EmitLabel(pNoOptimizeLabel);
+ pslILEmit->EmitCALL(METHOD__MARSHAL__ALLOC_CO_TASK_MEM, 1, 1);
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitSTLOC(m_dwLocalBuffer);
+
+ pslILEmit->EmitLabel(pAllocRejoinLabel);
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitLDLOC(m_dwCCHLocal);
+ pslILEmit->EmitSTIND_I4();
+ pslILEmit->EmitLDC(sizeof(DWORD));
+ pslILEmit->EmitADD();
+ EmitStoreNativeValue(pslILEmit);
+
+ // <emtpy>
+
+ EmitLoadManagedValue(pslILEmit); // src
+ EmitLoadNativeValue(pslILEmit); // dest
+ pslILEmit->EmitLDLOC(dwNumBytesLocal); // len
+
+ // static void System.String.InternalCopy(String src, IntPtr dest,int len)
+ pslILEmit->EmitCALL(METHOD__STRING__INTERNAL_COPY, 3, 0);
+
+ pslILEmit->EmitLabel(pNullRefLabel);
+}
+
+void ILVBByValStrWMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+
+ pslILEmit->EmitLDNULL(); // this
+ EmitLoadNativeValue(pslILEmit); // ptr
+ pslILEmit->EmitLDC(0); // startIndex
+ pslILEmit->EmitLDLOC(m_dwCCHLocal); // length
+
+ // String CtorCharPtrStartLength(char *ptr, int startIndex, int length)
+ // TODO Phase5: Why do we call this weirdo?
+ pslILEmit->EmitCALL(METHOD__STRING__CTORF_CHARPTR_START_LEN, 4, 1);
+
+ EmitStoreManagedValue(pslILEmit);
+ pslILEmit->EmitLabel(pNullRefLabel);
+}
+
+
+bool ILVBByValStrWMarshaler::NeedsClearNative()
+{
+ LIMITED_METHOD_CONTRACT;
+ return true;
+}
+
+void ILVBByValStrWMarshaler::EmitClearNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel* pExitLabel = pslILEmit->NewCodeLabel();
+ pslILEmit->EmitLDLOC(m_dwLocalBuffer);
+ pslILEmit->EmitBRFALSE(pExitLabel);
+ pslILEmit->EmitLDLOC(m_dwLocalBuffer);
+ pslILEmit->EmitCALL(METHOD__WIN32NATIVE__COTASKMEMFREE, 1, 0);
+ pslILEmit->EmitLabel(pExitLabel);
+}
+
+
+bool ILVBByValStrMarshaler::SupportsArgumentMarshal(DWORD dwMarshalFlags, UINT* pErrorResID)
+{
+ if (IsCLRToNative(dwMarshalFlags) && IsByref(dwMarshalFlags) && IsIn(dwMarshalFlags) && IsOut(dwMarshalFlags))
+ {
+ return true;
+ }
+
+ *pErrorResID = IDS_EE_BADMARSHAL_VBBYVALSTRRESTRICTION;
+ return false;
+}
+
+bool ILVBByValStrMarshaler::SupportsReturnMarshal(DWORD dwMarshalFlags, UINT* pErrorResID)
+{
+ *pErrorResID = IDS_EE_BADMARSHAL_VBBYVALSTRRESTRICTION;
+ return false;
+}
+
+LocalDesc ILVBByValStrMarshaler::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_I); // BSTR
+}
+
+LocalDesc ILVBByValStrMarshaler::GetManagedType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_STRING);
+}
+
+bool ILVBByValStrMarshaler::IsNativePassedByRef()
+{
+ LIMITED_METHOD_CONTRACT;
+ return false;
+}
+
+void ILVBByValStrMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ m_dwCCHLocal = pslILEmit->NewLocal(ELEMENT_TYPE_I4);
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitLDC(m_pargs->m_pMarshalInfo->GetBestFitMapping());
+ pslILEmit->EmitLDC(m_pargs->m_pMarshalInfo->GetThrowOnUnmappableChar());
+ pslILEmit->EmitLDLOCA(m_dwCCHLocal);
+
+ // static IntPtr ConvertToNative(string strManaged, bool fBestFit, bool fThrowOnUnmappableChar, ref int cch)
+ pslILEmit->EmitCALL(METHOD__VBBYVALSTRMARSHALER__CONVERT_TO_NATIVE, 4, 1);
+
+ EmitStoreNativeValue(pslILEmit);
+}
+
+void ILVBByValStrMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadNativeValue(pslILEmit); // pNative
+ pslILEmit->EmitLDLOC(m_dwCCHLocal); // cch
+
+ // static string ConvertToManaged(IntPtr pNative, int cch)
+ pslILEmit->EmitCALL(METHOD__VBBYVALSTRMARSHALER__CONVERT_TO_MANAGED, 2, 1);
+
+ EmitStoreManagedValue(pslILEmit);
+}
+
+bool ILVBByValStrMarshaler::NeedsClearNative()
+{
+ LIMITED_METHOD_CONTRACT;
+ return true;
+}
+
+void ILVBByValStrMarshaler::EmitClearNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadNativeValue(pslILEmit); // pNative
+
+ // static void ClearNative(IntPtr pNative);
+ pslILEmit->EmitCALL(METHOD__VBBYVALSTRMARSHALER__CLEAR_NATIVE, 1, 0);
+}
+
+LocalDesc ILBSTRMarshaler::GetManagedType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_STRING);
+}
+
+void ILBSTRMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel* pRejoinLabel = pslILEmit->NewCodeLabel();
+
+ EmitLoadManagedValue(pslILEmit);
+
+ if (IsCLRToNative(m_dwMarshalFlags) && !IsByref(m_dwMarshalFlags))
+ {
+ ILCodeLabel *pNoOptimizeLabel = pslILEmit->NewCodeLabel();
+ m_dwLocalBuffer = pslILEmit->NewLocal(ELEMENT_TYPE_I);
+
+ // LocalBuffer = 0
+ pslILEmit->EmitLoadNullPtr();
+ pslILEmit->EmitSTLOC(m_dwLocalBuffer);
+
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitBRFALSE(pNoOptimizeLabel);
+
+ // String.Length
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitCALL(METHOD__STRING__GET_LENGTH, 1, 1);
+
+ // if (length > (MAX_LOCAL_BUFFER_LENGTH - 6) / 2) goto NoOptimize
+ pslILEmit->EmitLDC((MAX_LOCAL_BUFFER_LENGTH - 6) / 2); // number of Unicode characters - terminator - length dword
+ pslILEmit->EmitCGT_UN();
+ pslILEmit->EmitBRTRUE(pNoOptimizeLabel);
+
+ // LocalBuffer = localloc[(String.Length * 2) + 6]
+ pslILEmit->EmitCALL(METHOD__STRING__GET_LENGTH, 1, 1);
+ pslILEmit->EmitLDC(2);
+ pslILEmit->EmitMUL();
+ pslILEmit->EmitLDC(7); // + length (4B) + terminator (2B) + possible trailing byte (1B)
+ pslILEmit->EmitADD();
+
+#ifdef _DEBUG
+ // Save the buffer length
+ DWORD dwTmpAllocSize = pslILEmit->NewLocal(ELEMENT_TYPE_I4);
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitSTLOC(dwTmpAllocSize);
+#endif // _DEBUG
+
+ pslILEmit->EmitLOCALLOC();
+
+#ifdef _DEBUG
+ // Pass buffer length in the first DWORD so the helper is able to assert that
+ // the buffer is large enough.
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitLDLOC(dwTmpAllocSize);
+ pslILEmit->EmitSTIND_I4();
+#endif // _DEBUG
+
+ pslILEmit->EmitSTLOC(m_dwLocalBuffer);
+
+ // load string and LocalBuffer
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitLDLOC(m_dwLocalBuffer);
+ pslILEmit->EmitBR(pRejoinLabel);
+
+ pslILEmit->EmitLabel(pNoOptimizeLabel);
+ }
+ pslILEmit->EmitLoadNullPtr();
+
+ pslILEmit->EmitLabel(pRejoinLabel);
+ pslILEmit->EmitCALL(METHOD__BSTRMARSHALER__CONVERT_TO_NATIVE, 2, 1);
+ EmitStoreNativeValue(pslILEmit);
+}
+
+void ILBSTRMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__BSTRMARSHALER__CONVERT_TO_MANAGED, 1, 1);
+ EmitStoreManagedValue(pslILEmit);
+}
+
+
+LocalDesc ILAnsiBSTRMarshaler::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_I); // BSTR
+}
+
+LocalDesc ILAnsiBSTRMarshaler::GetManagedType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_STRING);
+}
+
+void ILAnsiBSTRMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ DWORD dwAnsiMarshalFlags =
+ (m_pargs->m_pMarshalInfo->GetBestFitMapping() & 0xFF) |
+ (m_pargs->m_pMarshalInfo->GetThrowOnUnmappableChar() << 8);
+
+ pslILEmit->EmitLDC(dwAnsiMarshalFlags);
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__ANSIBSTRMARSHALER__CONVERT_TO_NATIVE, 2, 1);
+ EmitStoreNativeValue(pslILEmit);
+}
+
+void ILAnsiBSTRMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__ANSIBSTRMARSHALER__CONVERT_TO_MANAGED, 1, 1);
+ EmitStoreManagedValue(pslILEmit);
+}
+
+bool ILAnsiBSTRMarshaler::NeedsClearNative()
+{
+ LIMITED_METHOD_CONTRACT;
+ return true;
+}
+
+void ILAnsiBSTRMarshaler::EmitClearNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__ANSIBSTRMARSHALER__CLEAR_NATIVE, 1, 0);
+}
+
+LocalDesc ILHSTRINGMarshaler::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+ return LocalDesc(ELEMENT_TYPE_I); // HSTRING
+}
+
+LocalDesc ILHSTRINGMarshaler::GetManagedType()
+{
+ LIMITED_METHOD_CONTRACT;
+ return LocalDesc(ELEMENT_TYPE_STRING);
+}
+
+bool ILHSTRINGMarshaler::NeedsClearNative()
+{
+ LIMITED_METHOD_CONTRACT;
+ return true;
+}
+
+void ILHSTRINGMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pslILEmit));
+ }
+ CONTRACTL_END;
+
+ // If we're only going into native code, then we can optimize and create a HSTRING reference over
+ // the pinned System.String. However, if the parameter will remain in native code as an out
+ // value, then we need to create a real HSTRING.
+ if (!IsOut(m_dwMarshalFlags) && !IsRetval(m_dwMarshalFlags))
+ {
+ EmitConvertCLRToHSTRINGReference(pslILEmit);
+ }
+ else
+ {
+ EmitConvertCLRToHSTRING(pslILEmit);
+ }
+}
+
+void ILHSTRINGMarshaler::EmitConvertCLRToHSTRINGReference(ILCodeStream* pslILEmit)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pslILEmit));
+ PRECONDITION(!IsOut(m_dwMarshalFlags));
+ PRECONDITION(!IsRetval(m_dwMarshalFlags));
+ }
+ CONTRACTL_END;
+
+ //
+ // The general strategy for fast path marshaling a short lived System.String -> HSTRING is:
+ // 1. Pin the System.String
+ // 2. Create an HSTRING Reference over the pinned string
+ // 3. Pass that reference to native code
+ //
+
+ // Local to hold the HSTRING_HEADER of the HSTRING reference
+ MethodTable *pHStringHeaderMT = MscorlibBinder::GetClass(CLASS__HSTRING_HEADER_MANAGED);
+ DWORD dwHStringHeaderLocal = pslILEmit->NewLocal(pHStringHeaderMT);
+
+ // Local to hold the pinned input string
+ LocalDesc pinnedStringDesc = GetManagedType();
+ pinnedStringDesc.MakePinned();
+ DWORD dwPinnedStringLocal = pslILEmit->NewLocal(pinnedStringDesc);
+
+ // pinnedString = managed
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitSTLOC(dwPinnedStringLocal);
+
+ // hstring = HSTRINGMarshaler.ConvertManagedToNativeReference(pinnedString, out HStringHeader)
+ pslILEmit->EmitLDLOC(dwPinnedStringLocal);
+ pslILEmit->EmitLDLOCA(dwHStringHeaderLocal);
+ pslILEmit->EmitCALL(METHOD__HSTRINGMARSHALER__CONVERT_TO_NATIVE_REFERENCE, 2, 1);
+
+ if (g_pConfig->InteropLogArguments())
+ {
+ m_pslNDirect->EmitLogNativeArgument(pslILEmit, dwPinnedStringLocal);
+ }
+
+ EmitStoreNativeValue(pslILEmit);
+}
+
+void ILHSTRINGMarshaler::EmitConvertCLRToHSTRING(ILCodeStream* pslILEmit)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pslILEmit));
+ }
+ CONTRACTL_END;
+
+ // hstring = HSTRINGMarshaler.ConvertManagedToNative(managed);
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__HSTRINGMARSHALER__CONVERT_TO_NATIVE, 1, 1);
+ EmitStoreNativeValue(pslILEmit);
+}
+
+void ILHSTRINGMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ //
+ // To convert an HSTRING to a CLR String:
+ // 1. WindowsGetStringRawBuffer() to get the raw string data
+ // 2. WindowsGetStringLen() to get the string length
+ // 3. Construct a System.String from these parameters
+ // 4. Release the HSTRING
+ //
+
+ // string = HSTRINGMarshaler.ConvertNativeToManaged(native);
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__HSTRINGMARSHALER__CONVERT_TO_MANAGED, 1, 1);
+ EmitStoreManagedValue(pslILEmit);
+}
+
+
+void ILHSTRINGMarshaler::EmitClearNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ // HStringMarshaler.ClearNative(hstring)
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__HSTRINGMARSHALER__CLEAR_NATIVE, 1, 0);
+}
+
+#endif // FEATURE_COMINTEROP
+
+
+LocalDesc ILCSTRMarshaler::GetManagedType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_STRING);
+}
+
+void ILCSTRMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ DWORD dwAnsiMarshalFlags =
+ (m_pargs->m_pMarshalInfo->GetBestFitMapping() & 0xFF) |
+ (m_pargs->m_pMarshalInfo->GetThrowOnUnmappableChar() << 8);
+
+ bool bPassByValueInOnly = IsIn(m_dwMarshalFlags) && !IsOut(m_dwMarshalFlags) && !IsByref(m_dwMarshalFlags);
+ if (bPassByValueInOnly)
+ {
+ DWORD dwBufSize = pslILEmit->NewLocal(ELEMENT_TYPE_I4);
+ m_dwLocalBuffer = pslILEmit->NewLocal(ELEMENT_TYPE_I);
+
+ // LocalBuffer = 0
+ pslILEmit->EmitLoadNullPtr();
+ pslILEmit->EmitSTLOC(m_dwLocalBuffer);
+
+ ILCodeLabel* pNoOptimize = pslILEmit->NewCodeLabel();
+
+ // if == NULL, goto NoOptimize
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNoOptimize);
+
+ // String.Length + 2
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__STRING__GET_LENGTH, 1, 1);
+ pslILEmit->EmitLDC(2);
+ pslILEmit->EmitADD();
+
+ // (String.Length + 2) * GetMaxDBCSCharByteSize()
+ pslILEmit->EmitLDSFLD(pslILEmit->GetToken(MscorlibBinder::GetField(FIELD__MARSHAL__SYSTEM_MAX_DBCS_CHAR_SIZE)));
+ pslILEmit->EmitMUL();
+
+ // BufSize = (String.Length + 2) * GetMaxDBCSCharByteSize()
+ pslILEmit->EmitSTLOC(dwBufSize);
+
+ // if (MAX_LOCAL_BUFFER_LENGTH < BufSize ) goto NoOptimize
+ pslILEmit->EmitLDC(MAX_LOCAL_BUFFER_LENGTH);
+ pslILEmit->EmitLDLOC(dwBufSize);
+ pslILEmit->EmitCLT();
+ pslILEmit->EmitBRTRUE(pNoOptimize);
+
+ // LocalBuffer = localloc(BufSize);
+ pslILEmit->EmitLDLOC(dwBufSize);
+ pslILEmit->EmitLOCALLOC();
+ pslILEmit->EmitSTLOC(m_dwLocalBuffer);
+
+ // NoOptimize:
+ pslILEmit->EmitLabel(pNoOptimize);
+ }
+
+ // CSTRMarshaler.ConvertToNative pManaged, dwAnsiMarshalFlags, pLocalBuffer
+ pslILEmit->EmitLDC(dwAnsiMarshalFlags);
+ EmitLoadManagedValue(pslILEmit);
+
+ if (m_dwLocalBuffer != LOCAL_NUM_UNUSED)
+ {
+ pslILEmit->EmitLDLOC(m_dwLocalBuffer);
+ }
+ else
+ {
+ pslILEmit->EmitLoadNullPtr();
+ }
+
+ pslILEmit->EmitCALL(METHOD__CSTRMARSHALER__CONVERT_TO_NATIVE, 3, 1);
+
+ EmitStoreNativeValue(pslILEmit);
+}
+
+void ILCSTRMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__CSTRMARSHALER__CONVERT_TO_MANAGED, 1, 1);
+ EmitStoreManagedValue(pslILEmit);
+}
+
+LocalDesc ILLayoutClassPtrMarshalerBase::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_I); // ptr to struct
+}
+
+LocalDesc ILLayoutClassPtrMarshalerBase::GetManagedType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(m_pargs->m_pMT);
+}
+
+void ILLayoutClassPtrMarshalerBase::EmitConvertSpaceCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+ UINT uNativeSize = m_pargs->m_pMT->GetNativeSize();
+
+ pslILEmit->EmitLoadNullPtr();
+ EmitStoreNativeValue(pslILEmit);
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+ pslILEmit->EmitLDC(uNativeSize);
+ pslILEmit->EmitCALL(METHOD__MARSHAL__ALLOC_CO_TASK_MEM, 1, 1);
+ pslILEmit->EmitDUP(); // for INITBLK
+ EmitStoreNativeValue(pslILEmit);
+
+ // initialize local block we just allocated
+ pslILEmit->EmitLDC(0);
+ pslILEmit->EmitLDC(uNativeSize);
+ pslILEmit->EmitINITBLK();
+
+ pslILEmit->EmitLabel(pNullRefLabel);
+}
+
+void ILLayoutClassPtrMarshalerBase::EmitConvertSpaceCLRToNativeTemp(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ UINT uNativeSize = m_pargs->m_pMT->GetNativeSize();
+ if (uNativeSize > s_cbStackAllocThreshold)
+ {
+ EmitConvertSpaceCLRToNative(pslILEmit);
+ }
+ else
+ {
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+
+ pslILEmit->EmitLoadNullPtr();
+ EmitStoreNativeValue(pslILEmit);
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+
+ pslILEmit->EmitLDC(uNativeSize);
+ pslILEmit->EmitLOCALLOC();
+ pslILEmit->EmitDUP(); // for INITBLK
+ EmitStoreNativeValue(pslILEmit);
+
+ // initialize local block we just allocated
+ pslILEmit->EmitLDC(0);
+ pslILEmit->EmitLDC(uNativeSize);
+ pslILEmit->EmitINITBLK();
+
+ pslILEmit->EmitLabel(pNullRefLabel);
+ }
+}
+
+void ILLayoutClassPtrMarshalerBase::EmitConvertSpaceAndContentsCLRToNativeTemp(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitConvertSpaceCLRToNativeTemp(pslILEmit);
+ EmitConvertContentsCLRToNative(pslILEmit);
+}
+
+void ILLayoutClassPtrMarshalerBase::EmitConvertSpaceNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+
+ pslILEmit->EmitLDTOKEN(pslILEmit->GetToken(m_pargs->m_pMT));
+ pslILEmit->EmitCALL(METHOD__RT_TYPE_HANDLE__GETVALUEINTERNAL, 1, 1);
+ // static object AllocateInternal(IntPtr typeHandle);
+ pslILEmit->EmitCALL(METHOD__STUBHELPERS__ALLOCATE_INTERNAL, 1, 1);
+ EmitStoreManagedValue(pslILEmit);
+ pslILEmit->EmitLabel(pNullRefLabel);
+}
+
+
+bool ILLayoutClassPtrMarshalerBase::NeedsClearNative()
+{
+ LIMITED_METHOD_CONTRACT;
+ return true;
+}
+
+void ILLayoutClassPtrMarshalerBase::EmitClearNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+
+ EmitClearNativeContents(pslILEmit);
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__WIN32NATIVE__COTASKMEMFREE, 1, 0);
+
+ pslILEmit->EmitLabel(pNullRefLabel);
+}
+
+void ILLayoutClassPtrMarshalerBase::EmitClearNativeTemp(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ UINT uNativeSize = m_pargs->m_pMT->GetNativeSize();
+ if (uNativeSize > s_cbStackAllocThreshold)
+ {
+ EmitClearNative(pslILEmit);
+ }
+ else
+ {
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+
+ EmitClearNativeContents(pslILEmit);
+
+ pslILEmit->EmitLabel(pNullRefLabel);
+ }
+}
+
+
+
+void ILLayoutClassPtrMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+ UINT uNativeSize = m_pargs->m_pMT->GetNativeSize();
+
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitLDC(0);
+ pslILEmit->EmitLDC(uNativeSize);
+ pslILEmit->EmitINITBLK();
+
+ EmitLoadManagedValue(pslILEmit);
+ EmitLoadNativeValue(pslILEmit);
+
+ if (IsCLRToNative(m_dwMarshalFlags))
+ {
+ m_pslNDirect->LoadCleanupWorkList(pslILEmit);
+ }
+ else
+ {
+ //
+ // The assertion here is as follows:
+ // 1) the only field marshaler that requires the CleanupWorkList is FieldMarshaler_SafeHandle
+ // 2) SafeHandle marshaling is disallowed in the native-to-CLR direction, so we'll never see it..
+ //
+ pslILEmit->EmitLDNULL(); // pass a NULL CleanupWorkList in the native-to-CLR case
+ }
+
+ // static void FmtClassUpdateNativeInternal(object obj, byte* pNative, IntPtr pOptionalCleanupList);
+
+ pslILEmit->EmitCALL(METHOD__STUBHELPERS__FMT_CLASS_UPDATE_NATIVE_INTERNAL, 3, 0);
+ pslILEmit->EmitLabel(pNullRefLabel);
+}
+
+void ILLayoutClassPtrMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+
+ EmitLoadManagedValue(pslILEmit);
+ EmitLoadNativeValue(pslILEmit);
+
+ // static void FmtClassUpdateCLRInternal(object obj, byte* pNative);
+ pslILEmit->EmitCALL(METHOD__STUBHELPERS__FMT_CLASS_UPDATE_CLR_INTERNAL, 2, 0);
+ pslILEmit->EmitLabel(pNullRefLabel);
+}
+
+void ILLayoutClassPtrMarshaler::EmitClearNativeContents(ILCodeStream * pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ int tokManagedType = pslILEmit->GetToken(m_pargs->m_pMT);
+
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitLDTOKEN(tokManagedType);
+ pslILEmit->EmitCALL(METHOD__RT_TYPE_HANDLE__GETVALUEINTERNAL, 1, 1);
+
+ // static void LayoutDestroyNativeInternal(byte* pNative, IntPtr pMT);
+ pslILEmit->EmitCALL(METHOD__STUBHELPERS__LAYOUT_DESTROY_NATIVE_INTERNAL, 2, 0);
+}
+
+
+void ILBlittablePtrMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+ UINT uNativeSize = m_pargs->m_pMT->GetNativeSize();
+ int fieldDef = pslILEmit->GetToken(MscorlibBinder::GetField(FIELD__PINNING_HELPER__M_DATA));
+
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+
+ EmitLoadNativeValue(pslILEmit); // dest
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitLDFLDA(fieldDef); // src
+
+ pslILEmit->EmitLDC(uNativeSize); // size
+
+ pslILEmit->EmitCPBLK();
+ pslILEmit->EmitLabel(pNullRefLabel);
+}
+
+void ILBlittablePtrMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+ UINT uNativeSize = m_pargs->m_pMT->GetNativeSize();
+ int fieldDef = pslILEmit->GetToken(MscorlibBinder::GetField(FIELD__PINNING_HELPER__M_DATA));
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitLDFLDA(fieldDef); // dest
+
+ EmitLoadNativeValue(pslILEmit); // src
+
+ pslILEmit->EmitLDC(uNativeSize); // size
+
+ pslILEmit->EmitCPBLK();
+ pslILEmit->EmitLabel(pNullRefLabel);
+}
+
+void ILBlittablePtrMarshaler::EmitMarshalArgumentCLRToNative()
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(IsCLRToNative(m_dwMarshalFlags) && !IsByref(m_dwMarshalFlags));
+ }
+ CONTRACTL_END;
+
+ EmitSetupSigAndDefaultHomesCLRToNative();
+
+ //
+ // marshal
+ //
+
+ ILCodeLabel* pSkipAddLabel = m_pcsMarshal->NewCodeLabel();
+ LocalDesc managedTypePinned = GetManagedType();
+ managedTypePinned.MakePinned();
+ DWORD dwPinnedLocal = m_pcsMarshal->NewLocal(managedTypePinned);
+
+ EmitLoadManagedValue(m_pcsMarshal);
+
+ m_pcsMarshal->EmitSTLOC(dwPinnedLocal);
+ m_pcsMarshal->EmitLDLOC(dwPinnedLocal);
+ m_pcsMarshal->EmitCONV_U();
+ m_pcsMarshal->EmitDUP();
+ m_pcsMarshal->EmitBRFALSE(pSkipAddLabel);
+ m_pcsMarshal->EmitLDC(Object::GetOffsetOfFirstField());
+ m_pcsMarshal->EmitADD();
+ m_pcsMarshal->EmitLabel(pSkipAddLabel);
+
+ if (g_pConfig->InteropLogArguments())
+ {
+ m_pslNDirect->EmitLogNativeArgument(m_pcsMarshal, dwPinnedLocal);
+ }
+
+ EmitStoreNativeValue(m_pcsMarshal);
+}
+
+
+
+
+MarshalerOverrideStatus ILHandleRefMarshaler::ArgumentOverride(NDirectStubLinker* psl,
+ BOOL byref,
+ BOOL fin,
+ BOOL fout,
+ BOOL fManagedToNative,
+ OverrideProcArgs* pargs,
+ UINT* pResID,
+ UINT argidx,
+ UINT nativeStackOffset)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ILCodeStream* pcsMarshal = psl->GetMarshalCodeStream();
+ ILCodeStream* pcsDispatch = psl->GetDispatchCodeStream();
+
+ if (fManagedToNative && !byref)
+ {
+ pcsMarshal->SetStubTargetArgType(ELEMENT_TYPE_I);
+
+
+ // HandleRefs are valuetypes, so pinning is not needed.
+ // The argument address is on the stack and will not move.
+ pcsDispatch->EmitLDARGA(argidx);
+ pcsDispatch->EmitLDC(offsetof(HANDLEREF, m_handle));
+ pcsDispatch->EmitADD();
+ pcsDispatch->EmitLDIND_I();
+ return OVERRIDDEN;
+ }
+ else
+ {
+ *pResID = IDS_EE_BADMARSHAL_HANDLEREFRESTRICTION;
+ return DISALLOWED;
+ }
+}
+
+MarshalerOverrideStatus ILHandleRefMarshaler::ReturnOverride(NDirectStubLinker* psl,
+ BOOL fManagedToNative,
+ BOOL fHresultSwap,
+ OverrideProcArgs* pargs,
+ UINT* pResID)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ *pResID = IDS_EE_BADMARSHAL_HANDLEREFRESTRICTION;
+ return DISALLOWED;
+}
+
+LocalDesc ILSafeHandleMarshaler::GetManagedType()
+{
+ STANDARD_VM_CONTRACT;
+
+ return LocalDesc(MscorlibBinder::GetClass(CLASS__SAFE_HANDLE));
+}
+
+LocalDesc ILSafeHandleMarshaler::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_I);
+}
+
+bool ILSafeHandleMarshaler::NeedsClearNative()
+{
+ LIMITED_METHOD_CONTRACT;
+ return true;
+}
+
+void ILSafeHandleMarshaler::EmitClearNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(IsCLRToNative(m_dwMarshalFlags) && !IsByref(m_dwMarshalFlags));
+
+ // call StubHelpers::SafeHandleRelease
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__STUBHELPERS__SAFE_HANDLE_RELEASE, 1, 0);
+}
+
+void ILSafeHandleMarshaler::EmitMarshalArgumentCLRToNative()
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(IsCLRToNative(m_dwMarshalFlags) && !IsByref(m_dwMarshalFlags));
+ }
+ CONTRACTL_END;
+
+ EmitSetupSigAndDefaultHomesCLRToNative();
+
+ // by-value CLR-to-native SafeHandle is always passed in-only regardless of [In], [Out]
+ // marshal and cleanup communicate via an extra local and are both emitted in this method
+
+ // bool <dwHandleAddRefedLocalNum> = false
+ ILCodeStream *pcsSetup = m_pslNDirect->GetSetupCodeStream();
+ DWORD dwHandleAddRefedLocalNum = pcsSetup->NewLocal(ELEMENT_TYPE_BOOLEAN);
+
+ pcsSetup->EmitLDC(0);
+ pcsSetup->EmitSTLOC(dwHandleAddRefedLocalNum);
+
+ // <nativeHandle> = StubHelpers::SafeHandleAddRef(<managedSH>, ref <dwHandleAddRefedLocalNum>)
+ EmitLoadManagedValue(m_pcsMarshal);
+ m_pcsMarshal->EmitLDLOCA(dwHandleAddRefedLocalNum);
+ m_pcsMarshal->EmitCALL(METHOD__STUBHELPERS__SAFE_HANDLE_ADD_REF, 2, 1);
+ EmitStoreNativeValue(m_pcsMarshal);
+
+ // cleanup:
+ // if (<dwHandleAddRefedLocalNum>) StubHelpers.SafeHandleRelease(<managedSH>)
+ ILCodeStream *pcsCleanup = m_pslNDirect->GetCleanupCodeStream();
+ ILCodeLabel *pSkipClearNativeLabel = pcsCleanup->NewCodeLabel();
+
+ pcsCleanup->EmitLDLOC(dwHandleAddRefedLocalNum);
+ pcsCleanup->EmitBRFALSE(pSkipClearNativeLabel);
+
+ EmitClearNativeTemp(pcsCleanup);
+ m_pslNDirect->SetCleanupNeeded();
+
+ pcsCleanup->EmitLabel(pSkipClearNativeLabel);
+}
+
+MarshalerOverrideStatus ILSafeHandleMarshaler::ArgumentOverride(NDirectStubLinker* psl,
+ BOOL byref,
+ BOOL fin,
+ BOOL fout,
+ BOOL fManagedToNative,
+ OverrideProcArgs* pargs,
+ UINT* pResID,
+ UINT argidx,
+ UINT nativeStackOffset)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ILCodeStream* pslIL = psl->GetMarshalCodeStream();
+ ILCodeStream* pslILDispatch = psl->GetDispatchCodeStream();
+
+ if (fManagedToNative)
+ {
+ if (byref)
+ {
+ pslIL->SetStubTargetArgType(ELEMENT_TYPE_I);
+
+ // The specific SafeHandle subtype we're dealing with here.
+ MethodTable *pHandleType = pargs->m_pMT;
+
+ // Out SafeHandle parameters must not be abstract.
+ if (fout && pHandleType->IsAbstract())
+ {
+ *pResID = IDS_EE_BADMARSHAL_ABSTRACTOUTSAFEHANDLE;
+ return DISALLOWED;
+ }
+
+ // We rely on the SafeHandle having a default constructor.
+ if (!pHandleType->HasDefaultConstructor())
+ {
+ MAKE_WIDEPTR_FROMUTF8(wzMethodName, COR_CTOR_METHOD_NAME);
+ COMPlusThrowNonLocalized(kMissingMethodException, wzMethodName);
+ }
+
+ // Grab the token for the native handle field embedded inside the SafeHandle. We'll be using it to direct access the
+ // native handle later.
+ mdToken tkNativeHandleField = pslIL->GetToken(MscorlibBinder::GetField(FIELD__SAFE_HANDLE__HANDLE));
+
+ // The high level logic (note that the parameter may be in, out or both):
+ // 1) If this is an input parameter we need to AddRef the SafeHandle and schedule a Release cleanup item.
+ // 2) If this is an output parameter we need to preallocate a SafeHandle to wrap the new native handle value. We
+ // must allocate this before the native call to avoid a failure point when we already have a native resource
+ // allocated. We must allocate a new SafeHandle even if we have one on input since both input and output native
+ // handles need to be tracked and released by a SafeHandle.
+ // 3) Initialize a local IntPtr that will be passed to the native call. If we have an input SafeHandle the value
+ // comes from there otherwise we get it from the new SafeHandle (which is guaranteed to be initialized to an
+ // invalid handle value).
+ // 4) If this is a out parameter we also store the original handle value (that we just computed above) in a local
+ // variable.
+ // 5) After the native call, if this is an output parameter and the handle value we passed to native differs from
+ // the local copy we made then the new handle value is written into the output SafeHandle and that SafeHandle
+ // is propagated back to the caller.
+
+ // Locals:
+ DWORD dwInputHandleLocal = 0; // The input safe handle (in only)
+ DWORD dwOutputHandleLocal = 0; // The output safe handle (out only)
+ DWORD dwOldNativeHandleLocal = 0; // The original native handle value for comparison (out only)
+ DWORD dwNativeHandleLocal; // The input (and possibly updated) native handle value
+
+ if (fin)
+ {
+ LocalDesc locInputHandle(pHandleType);
+ dwInputHandleLocal = pslIL->NewLocal(locInputHandle);
+ }
+ if (fout)
+ {
+ LocalDesc locOutputHandle(pHandleType);
+ dwOutputHandleLocal = pslIL->NewLocal(locOutputHandle);
+
+ dwOldNativeHandleLocal = pslIL->NewLocal(ELEMENT_TYPE_I);
+ }
+
+ dwNativeHandleLocal = pslIL->NewLocal(ELEMENT_TYPE_I);
+
+ // Call StubHelpers.AddToCleanupList to atomically AddRef incoming SafeHandle and schedule a cleanup work item to
+ // perform Release after the call. The helper also returns the native handle value to us so take the opportunity
+ // to store this in the NativeHandle local we've allocated.
+ if (fin)
+ {
+ pslIL->EmitLDARG(argidx);
+ pslIL->EmitLDIND_REF();
+
+ pslIL->EmitSTLOC(dwInputHandleLocal);
+
+ // Release the original input SafeHandle after the call.
+ psl->LoadCleanupWorkList(pslIL);
+ pslIL->EmitLDLOC(dwInputHandleLocal);
+
+ // This is realiable, i.e. the cleanup will happen if and only if the SH was actually AddRef'ed.
+ pslIL->EmitCALL(METHOD__STUBHELPERS__ADD_TO_CLEANUP_LIST, 2, 1);
+
+ pslIL->EmitSTLOC(dwNativeHandleLocal);
+
+ }
+
+ // For output parameters we need to allocate a new SafeHandle to hold the result.
+ if (fout)
+ {
+ MethodDesc* pMDCtor = pHandleType->GetDefaultConstructor();
+ pslIL->EmitNEWOBJ(pslIL->GetToken(pMDCtor), 0);
+ pslIL->EmitSTLOC(dwOutputHandleLocal);
+
+ // If we didn't provide an input handle then we initialize the NativeHandle local with the (initially invalid)
+ // handle field set up inside the output handle by the constructor.
+ if (!fin)
+ {
+ pslIL->EmitLDLOC(dwOutputHandleLocal);
+ pslIL->EmitLDFLD(tkNativeHandleField);
+ pslIL->EmitSTLOC(dwNativeHandleLocal);
+ }
+
+ // Remember the handle value we start out with so we know whether to back propagate after the native call.
+ pslIL->EmitLDLOC(dwNativeHandleLocal);
+ pslIL->EmitSTLOC(dwOldNativeHandleLocal);
+ }
+
+ // Leave the address of the native handle local as the argument to the native method.
+ pslILDispatch->EmitLDLOCA(dwNativeHandleLocal);
+
+ // On the output side we only backpropagate the native handle into the output SafeHandle and the output SafeHandle
+ // to the caller if the native handle actually changed (otherwise we can end up with two SafeHandles wrapping the
+ // same native handle, which is bad).
+ if (fout)
+ {
+ // We will use cleanup stream to avoid leaking the handle on thread abort.
+ psl->EmitSetArgMarshalIndex(pslIL, NDirectStubLinker::CLEANUP_INDEX_ARG0_MARSHAL + argidx);
+
+ psl->SetCleanupNeeded();
+ ILCodeStream *pslCleanupIL = psl->GetCleanupCodeStream();
+
+ ILCodeLabel *pDoneLabel = pslCleanupIL->NewCodeLabel();
+
+ psl->EmitCheckForArgCleanup(pslCleanupIL,
+ NDirectStubLinker::CLEANUP_INDEX_ARG0_MARSHAL + argidx,
+ NDirectStubLinker::BranchIfNotMarshaled,
+ pDoneLabel);
+
+ // If this is an [in, out] handle check if the native handles have changed. If not we're finished.
+ if (fin)
+ {
+ pslCleanupIL->EmitLDLOC(dwNativeHandleLocal);
+ pslCleanupIL->EmitLDLOC(dwOldNativeHandleLocal);
+ pslCleanupIL->EmitCEQ();
+ pslCleanupIL->EmitBRTRUE(pDoneLabel);
+ }
+
+ // Propagate the native handle into the output SafeHandle.
+ pslCleanupIL->EmitLDLOC(dwOutputHandleLocal);
+ pslCleanupIL->EmitLDLOC(dwNativeHandleLocal);
+ pslCleanupIL->EmitSTFLD(tkNativeHandleField);
+
+ // Propagate the output SafeHandle back to the caller.
+ pslCleanupIL->EmitLDARG(argidx);
+ pslCleanupIL->EmitLDLOC(dwOutputHandleLocal);
+ pslCleanupIL->EmitSTIND_REF();
+
+ pslCleanupIL->EmitLabel(pDoneLabel);
+ }
+ }
+ else
+ {
+ // Avoid using the cleanup list in this common case for perf reasons (cleanup list is
+ // unmanaged and destroying it means excessive managed<->native transitions; in addition,
+ // as X86 IL stubs do not use interop frames, there's nothing protecting the cleanup list
+ // and the SafeHandle references must be GC handles which does not help perf either).
+ //
+ // This code path generates calls to StubHelpers.SafeHandleAddRef and SafeHandleRelease.
+ // NICE: Could SafeHandle.DangerousAddRef and DangerousRelease be implemented in managed?
+ return HANDLEASNORMAL;
+ }
+
+ return OVERRIDDEN;
+ }
+ else
+ {
+ *pResID = IDS_EE_BADMARSHAL_SAFEHANDLENATIVETOCOM;
+ return DISALLOWED;
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+MarshalerOverrideStatus
+ILSafeHandleMarshaler::ReturnOverride(
+ NDirectStubLinker * psl,
+ BOOL fManagedToNative,
+ BOOL fHresultSwap,
+ OverrideProcArgs * pargs,
+ UINT * pResID)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(psl));
+ PRECONDITION(CheckPointer(pargs));
+ PRECONDITION(CheckPointer(pResID));
+ }
+ CONTRACTL_END;
+
+ ILCodeStream * pslIL = psl->GetMarshalCodeStream();
+ ILCodeStream * pslPostIL = psl->GetReturnUnmarshalCodeStream();
+ ILCodeStream * pslILDispatch = psl->GetDispatchCodeStream();
+
+ if (!fManagedToNative)
+ {
+ *pResID = IDS_EE_BADMARSHAL_RETURNSHCOMTONATIVE;
+ return DISALLOWED;
+ }
+
+ // Returned SafeHandle parameters must not be abstract.
+ if (pargs->m_pMT->IsAbstract())
+ {
+ *pResID = IDS_EE_BADMARSHAL_ABSTRACTRETSAFEHANDLE;
+ return DISALLOWED;
+ }
+
+ // 1) create local for new safehandle
+ // 2) prealloc a safehandle
+ // 3) create local to hold returned handle
+ // 4) [byref] add byref IntPtr to native sig
+ // 5) [byref] pass address of local as last arg
+ // 6) store return value in safehandle
+
+ // 1) create local for new safehandle
+ MethodTable * pMT = pargs->m_pMT;
+ LocalDesc locDescReturnHandle(pMT);
+ DWORD dwReturnHandleLocal;
+
+ dwReturnHandleLocal = pslIL->NewLocal(locDescReturnHandle);
+
+ if (!pMT->HasDefaultConstructor())
+ {
+ MAKE_WIDEPTR_FROMUTF8(wzMethodName, COR_CTOR_METHOD_NAME);
+ COMPlusThrowNonLocalized(kMissingMethodException, wzMethodName);
+ }
+
+ // 2) prealloc a safehandle
+ MethodDesc* pMDCtor = pMT->GetDefaultConstructor();
+ pslIL->EmitNEWOBJ(pslIL->GetToken(pMDCtor), 0);
+ pslIL->EmitSTLOC(dwReturnHandleLocal);
+
+ mdToken tkNativeHandleField = pslPostIL->GetToken(MscorlibBinder::GetField(FIELD__SAFE_HANDLE__HANDLE));
+
+ // 3) create local to hold returned handle
+ DWORD dwReturnNativeHandleLocal = pslIL->NewLocal(ELEMENT_TYPE_I);
+
+ if (fHresultSwap)
+ {
+ // initialize the native handle
+ pslIL->EmitLDLOC(dwReturnHandleLocal);
+ pslIL->EmitLDFLD(tkNativeHandleField);
+ pslIL->EmitSTLOC(dwReturnNativeHandleLocal);
+
+ pslIL->SetStubTargetReturnType(ELEMENT_TYPE_I4); // native method returns an HRESULT
+
+ // 4) [byref] add byref IntPtr to native sig
+ locDescReturnHandle.ElementType[0] = ELEMENT_TYPE_BYREF;
+ locDescReturnHandle.ElementType[1] = ELEMENT_TYPE_I;
+ locDescReturnHandle.cbType = 2;
+ pslIL->SetStubTargetArgType(&locDescReturnHandle, false); // extra arg is a byref IntPtr
+
+ // 5) [byref] pass address of local as last arg
+ pslILDispatch->EmitLDLOCA(dwReturnNativeHandleLocal);
+
+ // We will use cleanup stream to avoid leaking the handle on thread abort.
+ psl->EmitSetArgMarshalIndex(pslIL, NDirectStubLinker::CLEANUP_INDEX_RETVAL_UNMARSHAL);
+
+ psl->SetCleanupNeeded();
+ ILCodeStream *pslCleanupIL = psl->GetCleanupCodeStream();
+ ILCodeLabel *pDoneLabel = pslCleanupIL->NewCodeLabel();
+
+ psl->EmitCheckForArgCleanup(pslCleanupIL,
+ NDirectStubLinker::CLEANUP_INDEX_RETVAL_UNMARSHAL,
+ NDirectStubLinker::BranchIfNotMarshaled,
+ pDoneLabel);
+
+ // 6) store return value in safehandle
+ pslCleanupIL->EmitLDLOC(dwReturnHandleLocal);
+ pslCleanupIL->EmitLDLOC(dwReturnNativeHandleLocal);
+ pslCleanupIL->EmitSTFLD(tkNativeHandleField);
+ pslCleanupIL->EmitLabel(pDoneLabel);
+
+ pslPostIL->EmitLDLOC(dwReturnHandleLocal);
+ }
+ else
+ {
+ pslIL->SetStubTargetReturnType(ELEMENT_TYPE_I);
+ pslPostIL->EmitSTLOC(dwReturnNativeHandleLocal);
+
+ // 6) store return value in safehandle
+ // The thread abort logic knows that it must not interrupt the stub so we will
+ // always be able to execute this sequence after returning from the call.
+ pslPostIL->EmitLDLOC(dwReturnHandleLocal);
+ pslPostIL->EmitLDLOC(dwReturnNativeHandleLocal);
+ pslPostIL->EmitSTFLD(tkNativeHandleField);
+ pslPostIL->EmitLDLOC(dwReturnHandleLocal);
+ }
+
+ return OVERRIDDEN;
+} // ILSafeHandleMarshaler::ReturnOverride
+
+
+//---------------------------------------------------------------------------------------
+//
+MarshalerOverrideStatus ILCriticalHandleMarshaler::ArgumentOverride(NDirectStubLinker* psl,
+ BOOL byref,
+ BOOL fin,
+ BOOL fout,
+ BOOL fManagedToNative,
+ OverrideProcArgs* pargs,
+ UINT* pResID,
+ UINT argidx,
+ UINT nativeStackOffset)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ILCodeStream* pslIL = psl->GetMarshalCodeStream();
+ ILCodeStream* pslPostIL = psl->GetUnmarshalCodeStream();
+ ILCodeStream* pslILDispatch = psl->GetDispatchCodeStream();
+
+ if (fManagedToNative)
+ {
+ pslIL->SetStubTargetArgType(ELEMENT_TYPE_I);
+
+ // Grab the token for the native handle field embedded inside the CriticalHandle. We'll be using it to direct access
+ // the native handle later.
+ mdToken tkNativeHandleField = pslIL->GetToken(MscorlibBinder::GetField(FIELD__CRITICAL_HANDLE__HANDLE));
+
+ if (byref)
+ {
+ // The specific CriticalHandle subtype we're dealing with here.
+ MethodTable *pHandleType = pargs->m_pMT;
+
+ // Out CriticalHandle parameters must not be abstract.
+ if (fout && pHandleType->IsAbstract())
+ {
+ *pResID = IDS_EE_BADMARSHAL_ABSTRACTOUTCRITICALHANDLE;
+ return DISALLOWED;
+ }
+
+ // We rely on the CriticalHandle having a default constructor.
+ if (!pHandleType->HasDefaultConstructor())
+ {
+ MAKE_WIDEPTR_FROMUTF8(wzMethodName, COR_CTOR_METHOD_NAME);
+ COMPlusThrowNonLocalized(kMissingMethodException, wzMethodName);
+ }
+
+ // The high level logic (note that the parameter may be in, out or both):
+ // 1) If this is an output parameter we need to preallocate a CriticalHandle to wrap the new native handle value. We
+ // must allocate this before the native call to avoid a failure point when we already have a native resource
+ // allocated. We must allocate a new CriticalHandle even if we have one on input since both input and output native
+ // handles need to be tracked and released by a CriticalHandle.
+ // 2) Initialize a local IntPtr that will be passed to the native call. If we have an input CriticalHandle the value
+ // comes from there otherwise we get it from the new CriticalHandle (which is guaranteed to be initialized to an
+ // invalid handle value).
+ // 3) If this is a out parameter we also store the original handle value (that we just computed above) in a local
+ // variable.
+ // 4) After the native call, if this is an output parameter and the handle value we passed to native differs from
+ // the local copy we made then the new handle value is written into the output CriticalHandle and that
+ // CriticalHandle is propagated back to the caller.
+
+ // Locals:
+ LocalDesc locOutputHandle;
+ DWORD dwOutputHandleLocal = 0; // The output critical handle (out only)
+ DWORD dwOldNativeHandleLocal = 0; // The original native handle value for comparison (out only)
+ DWORD dwNativeHandleLocal; // The input (and possibly updated) native handle value
+
+ if (fout)
+ {
+ locOutputHandle.ElementType[0] = ELEMENT_TYPE_INTERNAL;
+ locOutputHandle.cbType = 1;
+ locOutputHandle.InternalToken = pHandleType;
+
+ dwOutputHandleLocal = pslIL->NewLocal(locOutputHandle);
+
+ dwOldNativeHandleLocal = pslIL->NewLocal(ELEMENT_TYPE_I);
+ }
+
+ dwNativeHandleLocal = pslIL->NewLocal(ELEMENT_TYPE_I);
+
+
+ // If we have an input CriticalHandle then initialize our NativeHandle local with it.
+ if (fin)
+ {
+ pslIL->EmitLDARG(argidx);
+ pslIL->EmitLDIND_REF();
+ pslIL->EmitLDFLD(tkNativeHandleField);
+ pslIL->EmitSTLOC(dwNativeHandleLocal);
+ }
+
+ // For output parameters we need to allocate a new CriticalHandle to hold the result.
+ if (fout)
+ {
+ MethodDesc* pMDCtor = pHandleType->GetDefaultConstructor();
+ pslIL->EmitNEWOBJ(pslIL->GetToken(pMDCtor), 0);
+ pslIL->EmitSTLOC(dwOutputHandleLocal);
+
+ // If we didn't provide an input handle then we initialize the NativeHandle local with the (initially invalid)
+ // handle field set up inside the output handle by the constructor.
+ if (!fin)
+ {
+ pslIL->EmitLDLOC(dwOutputHandleLocal);
+ pslIL->EmitLDFLD(tkNativeHandleField);
+ pslIL->EmitSTLOC(dwNativeHandleLocal);
+ }
+
+ // Remember the handle value we start out with so we know whether to back propagate after the native call.
+ pslIL->EmitLDLOC(dwNativeHandleLocal);
+ pslIL->EmitSTLOC(dwOldNativeHandleLocal);
+ }
+
+ // Leave the address of the native handle local as the argument to the native method.
+ pslILDispatch->EmitLDLOCA(dwNativeHandleLocal);
+
+ if (fin)
+ {
+ // prevent the CriticalHandle from being finalized during the call-out to native
+ pslPostIL->EmitLDARG(argidx);
+ pslPostIL->EmitLDIND_REF();
+ pslPostIL->EmitCALL(METHOD__GC__KEEP_ALIVE, 1, 0);
+ }
+
+ // On the output side we only backpropagate the native handle into the output CriticalHandle and the output
+ // CriticalHandle to the caller if the native handle actually changed (otherwise we can end up with two
+ // CriticalHandles wrapping the same native handle, which is bad).
+ if (fout)
+ {
+ // We will use cleanup stream to avoid leaking the handle on thread abort.
+ psl->EmitSetArgMarshalIndex(pslIL, NDirectStubLinker::CLEANUP_INDEX_ARG0_MARSHAL + argidx);
+
+ psl->SetCleanupNeeded();
+ ILCodeStream *pslCleanupIL = psl->GetCleanupCodeStream();
+
+ ILCodeLabel *pDoneLabel = pslCleanupIL->NewCodeLabel();
+
+ psl->EmitCheckForArgCleanup(pslCleanupIL,
+ NDirectStubLinker::CLEANUP_INDEX_ARG0_MARSHAL + argidx,
+ NDirectStubLinker::BranchIfNotMarshaled,
+ pDoneLabel);
+
+ // If this is an [in, out] handle check if the native handles have changed. If not we're finished.
+ if (fin)
+ {
+ pslCleanupIL->EmitLDLOC(dwNativeHandleLocal);
+ pslCleanupIL->EmitLDLOC(dwOldNativeHandleLocal);
+ pslCleanupIL->EmitCEQ();
+ pslCleanupIL->EmitBRTRUE(pDoneLabel);
+ }
+
+ // Propagate the native handle into the output CriticalHandle.
+ pslCleanupIL->EmitLDLOC(dwOutputHandleLocal);
+ pslCleanupIL->EmitLDLOC(dwNativeHandleLocal);
+ pslCleanupIL->EmitSTFLD(tkNativeHandleField);
+
+ // Propagate the output CriticalHandle back to the caller.
+ pslCleanupIL->EmitLDARG(argidx);
+ pslCleanupIL->EmitLDLOC(dwOutputHandleLocal);
+ pslCleanupIL->EmitSTIND_REF();
+
+ pslCleanupIL->EmitLabel(pDoneLabel);
+ }
+ }
+ else
+ {
+ pslILDispatch->EmitLDARG(argidx);
+ pslILDispatch->EmitLDFLD(tkNativeHandleField);
+
+ // prevent the CriticalHandle from being finalized during the call-out to native
+ pslPostIL->EmitLDARG(argidx);
+ pslPostIL->EmitCALL(METHOD__GC__KEEP_ALIVE, 1, 0);
+ }
+
+ return OVERRIDDEN;
+ }
+ else
+ {
+ *pResID = IDS_EE_BADMARSHAL_CRITICALHANDLENATIVETOCOM;
+ return DISALLOWED;
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+MarshalerOverrideStatus
+ILCriticalHandleMarshaler::ReturnOverride(
+ NDirectStubLinker * psl,
+ BOOL fManagedToNative,
+ BOOL fHresultSwap,
+ OverrideProcArgs * pargs,
+ UINT * pResID)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(psl));
+ PRECONDITION(CheckPointer(pargs));
+ PRECONDITION(CheckPointer(pResID));
+ }
+ CONTRACTL_END;
+
+ if (!fManagedToNative)
+ {
+ *pResID = IDS_EE_BADMARSHAL_RETURNSHCOMTONATIVE;
+ return DISALLOWED;
+ }
+
+ // Returned CriticalHandle parameters must not be abstract.
+ if (pargs->m_pMT->IsAbstract())
+ {
+ *pResID = IDS_EE_BADMARSHAL_ABSTRACTRETCRITICALHANDLE;
+ return DISALLOWED;
+ }
+
+ ILCodeStream * pslIL = psl->GetMarshalCodeStream();
+ ILCodeStream * pslPostIL = psl->GetReturnUnmarshalCodeStream();
+ ILCodeStream * pslILDispatch = psl->GetDispatchCodeStream();
+
+ // 1) create local for new criticalhandle
+ // 2) prealloc a criticalhandle
+ // 3) create local to hold returned handle
+ // 4) [byref] add byref IntPtr to native sig
+ // 5) [byref] pass address of local as last arg
+ // 6) store return value in criticalhandle
+
+ // 1) create local for new criticalhandle
+ MethodTable * pMT = pargs->m_pMT;
+ LocalDesc locDescReturnHandle(pMT);
+ DWORD dwReturnHandleLocal;
+
+ dwReturnHandleLocal = pslIL->NewLocal(locDescReturnHandle);
+
+ if (!pMT->HasDefaultConstructor())
+ {
+ MAKE_WIDEPTR_FROMUTF8(wzMethodName, COR_CTOR_METHOD_NAME);
+ COMPlusThrowNonLocalized(kMissingMethodException, wzMethodName);
+ }
+
+ // 2) prealloc a criticalhandle
+ MethodDesc * pMDCtor = pMT->GetDefaultConstructor();
+ pslIL->EmitNEWOBJ(pslIL->GetToken(pMDCtor), 0);
+ pslIL->EmitSTLOC(dwReturnHandleLocal);
+
+ mdToken tkNativeHandleField = pslPostIL->GetToken(MscorlibBinder::GetField(FIELD__CRITICAL_HANDLE__HANDLE));
+
+ // 3) create local to hold returned handle
+ DWORD dwReturnNativeHandleLocal = pslIL->NewLocal(ELEMENT_TYPE_I);
+
+ if (fHresultSwap)
+ {
+ // initialize the native handle
+ pslIL->EmitLDLOC(dwReturnHandleLocal);
+ pslIL->EmitLDFLD(tkNativeHandleField);
+ pslIL->EmitSTLOC(dwReturnNativeHandleLocal);
+
+ pslIL->SetStubTargetReturnType(ELEMENT_TYPE_I4); // native method returns an HRESULT
+
+ // 4) [byref] add byref IntPtr to native sig
+ locDescReturnHandle.ElementType[0] = ELEMENT_TYPE_BYREF;
+ locDescReturnHandle.ElementType[1] = ELEMENT_TYPE_I;
+ locDescReturnHandle.cbType = 2;
+ pslIL->SetStubTargetArgType(&locDescReturnHandle, false); // extra arg is a byref IntPtr
+
+ // 5) [byref] pass address of local as last arg
+ pslILDispatch->EmitLDLOCA(dwReturnNativeHandleLocal);
+
+ // We will use cleanup stream to avoid leaking the handle on thread abort.
+ psl->EmitSetArgMarshalIndex(pslIL, NDirectStubLinker::CLEANUP_INDEX_RETVAL_UNMARSHAL);
+
+ psl->SetCleanupNeeded();
+ ILCodeStream *pslCleanupIL = psl->GetCleanupCodeStream();
+ ILCodeLabel *pDoneLabel = pslCleanupIL->NewCodeLabel();
+
+ // 6) store return value in criticalhandle
+ psl->EmitCheckForArgCleanup(pslCleanupIL,
+ NDirectStubLinker::CLEANUP_INDEX_RETVAL_UNMARSHAL,
+ NDirectStubLinker::BranchIfNotMarshaled,
+ pDoneLabel);
+
+ pslCleanupIL->EmitLDLOC(dwReturnHandleLocal);
+ pslCleanupIL->EmitLDLOC(dwReturnNativeHandleLocal);
+ pslCleanupIL->EmitSTFLD(tkNativeHandleField);
+ pslCleanupIL->EmitLabel(pDoneLabel);
+
+ pslPostIL->EmitLDLOC(dwReturnHandleLocal);
+ }
+ else
+ {
+ pslIL->SetStubTargetReturnType(ELEMENT_TYPE_I);
+ pslPostIL->EmitSTLOC(dwReturnNativeHandleLocal);
+
+ // 6) store return value in criticalhandle
+ // The thread abort logic knows that it must not interrupt the stub so we will
+ // always be able to execute this sequence after returning from the call.
+ pslPostIL->EmitLDLOC(dwReturnHandleLocal);
+ pslPostIL->EmitLDLOC(dwReturnNativeHandleLocal);
+ pslPostIL->EmitSTFLD(tkNativeHandleField);
+ pslPostIL->EmitLDLOC(dwReturnHandleLocal);
+ }
+
+ return OVERRIDDEN;
+} // ILCriticalHandleMarshaler::ReturnOverride
+
+#ifndef FEATURE_CORECLR
+//---------------------------------------------------------------------------------------
+//
+MarshalerOverrideStatus ILBlittableValueClassWithCopyCtorMarshaler::ArgumentOverride(NDirectStubLinker* psl,
+ BOOL byref,
+ BOOL fin,
+ BOOL fout,
+ BOOL fManagedToNative,
+ OverrideProcArgs* pargs,
+ UINT* pResID,
+ UINT argidx,
+ UINT nativeStackOffset)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ILCodeStream* pslIL = psl->GetMarshalCodeStream();
+ ILCodeStream* pslILDispatch = psl->GetDispatchCodeStream();
+
+ if (byref)
+ {
+ *pResID = IDS_EE_BADMARSHAL_COPYCTORRESTRICTION;
+ return DISALLOWED;
+ }
+
+ if (fManagedToNative)
+ {
+#ifdef _TARGET_X86_
+ _ASSERTE(nativeStackOffset != (UINT)-1);
+
+ // get a new copy ctor cookie
+ DWORD dwCookieLocalNum = psl->CreateCopyCtorCookie(pslIL);
+
+ // and initialize it with our values
+ pslIL->EmitLDLOCA(dwCookieLocalNum);
+ pslIL->EmitLDARG(argidx);
+ pslIL->EmitLDC(nativeStackOffset);
+
+ // SetData takes pointers to managed methods although code:CopyCtorCallStubWorker
+ // currently calls them via reverse P/Invokes
+ if (pargs->mm.m_pCopyCtor)
+ {
+ pslIL->EmitLDFTN(pslIL->GetToken(pargs->mm.m_pCopyCtor));
+ }
+ else
+ {
+ pslIL->EmitLoadNullPtr();
+ }
+
+ if (pargs->mm.m_pDtor)
+ {
+ pslIL->EmitLDFTN(pslIL->GetToken(pargs->mm.m_pDtor));
+ }
+ else
+ {
+ pslIL->EmitLoadNullPtr();
+ }
+
+ // <dwCookieLocalNum>.SetData(<argidx>, <nativeStackOffset>, ctorPtr, dtorPtr)
+ pslIL->EmitCALL(METHOD__COPYCTORSTUBCOOKIE__SET_DATA, 5, 0);
+
+ LocalDesc locDesc(pargs->mm.m_pMT);
+ pslIL->SetStubTargetArgType(&locDesc); // native type is the value type
+
+ pslILDispatch->EmitLDARG(argidx); // we load the argument directly
+ pslILDispatch->EmitLDOBJ(pslILDispatch->GetToken(pargs->mm.m_pMT));
+#else // _TARGET_X86_
+ // On WIN64 platforms, copy-constructed arguments are actually passed by reference.
+ // This is the same calling convention as used by managed code, but to maintain parity,
+ // we mimic the x86 behaviour:
+ //
+ // 1) create new native value type local
+ // 2) run new->CopyCtor(old)
+ // 3) run old->Dtor()
+
+ LocalDesc locDesc(pargs->mm.m_pMT);
+
+ DWORD dwNewValueTypeLocal;
+
+ // Step 1
+ dwNewValueTypeLocal = pslIL->NewLocal(locDesc);
+
+ // Step 2
+ if (pargs->mm.m_pCopyCtor)
+ {
+ pslIL->EmitLDLOCA(dwNewValueTypeLocal);
+ pslIL->EmitLDARG(argidx);
+ pslIL->EmitCALL(pslIL->GetToken(pargs->mm.m_pCopyCtor), 2, 0);
+ }
+ else
+ {
+ pslIL->EmitLDARG(argidx);
+ pslIL->EmitLDOBJ(pslIL->GetToken(pargs->mm.m_pMT));
+ pslIL->EmitSTLOC(dwNewValueTypeLocal);
+ }
+
+ // Step 3
+ if (pargs->mm.m_pDtor)
+ {
+ pslIL->EmitLDARG(argidx);
+ pslIL->EmitCALL(pslIL->GetToken(pargs->mm.m_pDtor), 1, 0);
+ }
+
+ pslIL->SetStubTargetArgType(ELEMENT_TYPE_I); // native type is a pointer
+ pslILDispatch->EmitLDLOCA(dwNewValueTypeLocal);
+#endif // _TARGET_X86_
+
+ return OVERRIDDEN;
+ }
+ else
+ {
+ // nothing to do but pass the value along
+ // note that on x86 the argument comes by-value but is converted to pointer by the UM thunk
+ // so that we don't make copies that would not be accounted for by copy ctors
+ LocalDesc locDesc(pargs->mm.m_pMT);
+ locDesc.MakeCopyConstructedPointer();
+
+ pslIL->SetStubTargetArgType(&locDesc); // native type is a pointer
+ pslILDispatch->EmitLDARG(argidx);
+
+ return OVERRIDDEN;
+ }
+}
+#endif // FEATURE_CORECLR
+
+LocalDesc ILArgIteratorMarshaler::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_I); // va_list
+}
+
+LocalDesc ILArgIteratorMarshaler::GetManagedType()
+{
+ STANDARD_VM_CONTRACT;
+
+ return LocalDesc(MscorlibBinder::GetClass(CLASS__ARG_ITERATOR));
+}
+
+bool ILArgIteratorMarshaler::SupportsArgumentMarshal(DWORD dwMarshalFlags, UINT* pErrorResID)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (IsByref(dwMarshalFlags))
+ {
+ *pErrorResID = IDS_EE_BADMARSHAL_ARGITERATORRESTRICTION;
+ return false;
+ }
+
+ return true;
+}
+
+void ILArgIteratorMarshaler::EmitMarshalArgumentCLRToNative()
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(IsCLRToNative(m_dwMarshalFlags) && !IsByref(m_dwMarshalFlags));
+ }
+ CONTRACTL_END;
+
+ EmitSetupSigAndDefaultHomesCLRToNative();
+
+ //
+ // marshal
+ //
+
+ // Allocate enough memory for va_list
+ DWORD dwVaListSizeLocal = m_pcsMarshal->NewLocal(LocalDesc(ELEMENT_TYPE_U4));
+ EmitLoadManagedHomeAddr(m_pcsMarshal);
+ m_pcsMarshal->EmitCALL(METHOD__STUBHELPERS__CALC_VA_LIST_SIZE, 1, 1);
+ m_pcsMarshal->EmitSTLOC(dwVaListSizeLocal);
+ m_pcsMarshal->EmitLDLOC(dwVaListSizeLocal);
+ m_pcsMarshal->EmitLOCALLOC();
+ EmitStoreNativeValue(m_pcsMarshal);
+
+ // void MarshalToUnmanagedVaListInternal(cbVaListSize, va_list, VARARGS* data)
+ EmitLoadNativeValue(m_pcsMarshal);
+ m_pcsMarshal->EmitLDLOC(dwVaListSizeLocal);
+ EmitLoadManagedHomeAddr(m_pcsMarshal);
+ m_pcsMarshal->EmitCALL(METHOD__STUBHELPERS__MARSHAL_TO_UNMANAGED_VA_LIST_INTERNAL, 3, 0);
+}
+
+void ILArgIteratorMarshaler::EmitMarshalArgumentNativeToCLR()
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(!IsCLRToNative(m_dwMarshalFlags) && !IsByref(m_dwMarshalFlags));
+ }
+ CONTRACTL_END;
+
+ EmitSetupSigAndDefaultHomesNativeToCLR();
+
+ EmitLoadNativeValue(m_pcsMarshal);
+ EmitLoadManagedHomeAddr(m_pcsMarshal);
+
+ // void MarshalToManagedVaList(va_list va, VARARGS *dataout)
+ m_pcsMarshal->EmitCALL(METHOD__STUBHELPERS__MARSHAL_TO_MANAGED_VA_LIST_INTERNAL, 2, 0);
+}
+
+
+LocalDesc ILArrayWithOffsetMarshaler::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_I);
+}
+
+LocalDesc ILArrayWithOffsetMarshaler::GetManagedType()
+{
+ STANDARD_VM_CONTRACT;
+
+ return LocalDesc(MscorlibBinder::GetClass(CLASS__ARRAY_WITH_OFFSET));
+}
+
+bool ILArrayWithOffsetMarshaler::SupportsArgumentMarshal(DWORD dwMarshalFlags, UINT* pErrorResID)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (IsCLRToNative(dwMarshalFlags) && !IsByref(dwMarshalFlags) && IsIn(dwMarshalFlags) && IsOut(dwMarshalFlags))
+ {
+ return true;
+ }
+
+ *pErrorResID = IDS_EE_BADMARSHAL_AWORESTRICTION;
+
+ return false;
+}
+
+void ILArrayWithOffsetMarshaler::EmitConvertSpaceAndContentsCLRToNativeTemp(ILCodeStream* pslILEmit)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ CONSISTENCY_CHECK(LOCAL_NUM_UNUSED == m_dwCountLocalNum);
+ CONSISTENCY_CHECK(LOCAL_NUM_UNUSED == m_dwOffsetLocalNum);
+ CONSISTENCY_CHECK(LOCAL_NUM_UNUSED == m_dwPinnedLocalNum);
+ }
+ CONTRACTL_END;
+
+ int tokArrayWithOffset_m_array = pslILEmit->GetToken(MscorlibBinder::GetField(FIELD__ARRAY_WITH_OFFSET__M_ARRAY));
+ int tokArrayWithOffset_m_count = pslILEmit->GetToken(MscorlibBinder::GetField(FIELD__ARRAY_WITH_OFFSET__M_COUNT));
+
+ ILCodeLabel* pNonNullLabel = pslILEmit->NewCodeLabel();
+ ILCodeLabel* pSlowAllocPathLabel = pslILEmit->NewCodeLabel();
+ ILCodeLabel* pDoneLabel = pslILEmit->NewCodeLabel();
+
+ m_dwCountLocalNum = pslILEmit->NewLocal(ELEMENT_TYPE_I4);
+
+ //
+ // Convert the space
+ //
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitLDFLD(tokArrayWithOffset_m_array);
+ pslILEmit->EmitBRTRUE(pNonNullLabel);
+
+ pslILEmit->EmitLoadNullPtr();
+ pslILEmit->EmitBR(pDoneLabel);
+ pslILEmit->EmitLabel(pNonNullLabel);
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitLDFLD(tokArrayWithOffset_m_count);
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitSTLOC(m_dwCountLocalNum);
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitLDC(s_cbStackAllocThreshold);
+ pslILEmit->EmitCGT_UN();
+ pslILEmit->EmitBRTRUE(pSlowAllocPathLabel);
+
+ // localloc
+ pslILEmit->EmitLOCALLOC();
+
+ pslILEmit->EmitBR(pDoneLabel);
+ pslILEmit->EmitLabel(pSlowAllocPathLabel);
+
+ // AllocCoTaskMem
+ pslILEmit->EmitCALL(METHOD__MARSHAL__ALLOC_CO_TASK_MEM, 1, 1);
+
+ pslILEmit->EmitLabel(pDoneLabel);
+ EmitStoreNativeValue(pslILEmit);
+
+ //
+ // Convert the contents
+ //
+
+ int tokArrayWithOffset_m_offset = pslILEmit->GetToken(MscorlibBinder::GetField(FIELD__ARRAY_WITH_OFFSET__M_OFFSET));
+
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+
+ LocalDesc locDescPinned;
+ locDescPinned.cbType = 2;
+ locDescPinned.ElementType[0] = ELEMENT_TYPE_PINNED;
+ locDescPinned.ElementType[1] = ELEMENT_TYPE_OBJECT;
+ m_dwPinnedLocalNum = pslILEmit->NewLocal(locDescPinned);
+ m_dwOffsetLocalNum = pslILEmit->NewLocal(ELEMENT_TYPE_I4);
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitLDFLD(tokArrayWithOffset_m_array);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitLDFLD(tokArrayWithOffset_m_array);
+ pslILEmit->EmitSTLOC(m_dwPinnedLocalNum);
+
+ EmitLoadNativeValue(pslILEmit); // dest
+
+ pslILEmit->EmitLDLOC(m_dwPinnedLocalNum);
+ pslILEmit->EmitCONV_I();
+ pslILEmit->EmitLDLOC(m_dwPinnedLocalNum);
+ pslILEmit->EmitCALL(METHOD__ARRAY__GET_DATA_PTR_OFFSET_INTERNAL, 1, 1);
+ pslILEmit->EmitADD(); // TODO Phase5: Use UnsafeAddrOfPinnedArrayElement
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitLDFLD(tokArrayWithOffset_m_offset);
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitSTLOC(m_dwOffsetLocalNum);
+ pslILEmit->EmitADD(); // src
+ pslILEmit->EmitLDLOC(m_dwCountLocalNum); // len
+
+ // static void Memcpy(byte* dest, byte* src, int len)
+ pslILEmit->EmitCALL(METHOD__BUFFER__MEMCPY, 3, 0);
+
+ pslILEmit->EmitLDNULL();
+ pslILEmit->EmitSTLOC(m_dwPinnedLocalNum);
+
+ pslILEmit->EmitLabel(pNullRefLabel);
+}
+
+void ILArrayWithOffsetMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ CONSISTENCY_CHECK(LOCAL_NUM_UNUSED != m_dwCountLocalNum);
+ CONSISTENCY_CHECK(LOCAL_NUM_UNUSED != m_dwOffsetLocalNum);
+ CONSISTENCY_CHECK(LOCAL_NUM_UNUSED != m_dwPinnedLocalNum);
+ }
+ CONTRACTL_END;
+
+ int tokArrayWithOffset_m_array = pslILEmit->GetToken(MscorlibBinder::GetField(FIELD__ARRAY_WITH_OFFSET__M_ARRAY));
+
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitLDFLD(tokArrayWithOffset_m_array);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitLDFLD(tokArrayWithOffset_m_array);
+ pslILEmit->EmitSTLOC(m_dwPinnedLocalNum);
+
+ pslILEmit->EmitLDLOC(m_dwPinnedLocalNum);
+ pslILEmit->EmitCONV_I();
+ pslILEmit->EmitLDLOC(m_dwPinnedLocalNum);
+ pslILEmit->EmitCALL(METHOD__ARRAY__GET_DATA_PTR_OFFSET_INTERNAL, 1, 1);
+ pslILEmit->EmitADD(); // TODO Phase5: Use UnsafeAddrOfPinnedArrayElement
+
+ pslILEmit->EmitLDLOC(m_dwOffsetLocalNum);
+ pslILEmit->EmitADD(); // dest
+
+ EmitLoadNativeValue(pslILEmit); // src
+
+ pslILEmit->EmitLDLOC(m_dwCountLocalNum); // len
+
+ // static void Memcpy(byte* dest, byte* src, int len)
+ pslILEmit->EmitCALL(METHOD__BUFFER__MEMCPY, 3, 0);
+
+ pslILEmit->EmitLDNULL();
+ pslILEmit->EmitSTLOC(m_dwPinnedLocalNum);
+
+ pslILEmit->EmitLabel(pNullRefLabel);
+}
+
+void ILArrayWithOffsetMarshaler::EmitClearNativeTemp(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel* pDoneLabel = pslILEmit->NewCodeLabel();
+
+ pslILEmit->EmitLDLOC(m_dwCountLocalNum);
+ pslILEmit->EmitLDC(s_cbStackAllocThreshold);
+ pslILEmit->EmitCGT_UN();
+ pslILEmit->EmitBRFALSE(pDoneLabel);
+
+ // CoTaskMemFree
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__WIN32NATIVE__COTASKMEMFREE, 1, 0);
+
+ pslILEmit->EmitLabel(pDoneLabel);
+}
+
+LocalDesc ILAsAnyMarshalerBase::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_I);
+}
+
+LocalDesc ILAsAnyMarshalerBase::GetManagedType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_OBJECT);
+}
+
+bool ILAsAnyMarshalerBase::SupportsArgumentMarshal(DWORD dwMarshalFlags, UINT* pErrorResID)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (IsCLRToNative(dwMarshalFlags) && !IsByref(dwMarshalFlags))
+ {
+ return true;
+ }
+
+ *pErrorResID = IDS_EE_BADMARSHAL_ASANYRESTRICTION;
+ return false;
+}
+
+bool ILAsAnyMarshalerBase::SupportsReturnMarshal(DWORD dwMarshalFlags, UINT* pErrorResID)
+{
+ LIMITED_METHOD_CONTRACT;
+ *pErrorResID = IDS_EE_BADMARSHAL_ASANYRESTRICTION;
+ return false;
+}
+
+void ILAsAnyMarshalerBase::EmitMarshalArgumentCLRToNative()
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(IsCLRToNative(m_dwMarshalFlags) && !IsByref(m_dwMarshalFlags));
+ CONSISTENCY_CHECK(LOCAL_NUM_UNUSED == m_dwMarshalerLocalNum);
+ }
+ CONTRACTL_END;
+
+ EmitSetupSigAndDefaultHomesCLRToNative();
+
+ BYTE inout = (IsIn(m_dwMarshalFlags) ? ML_IN : 0) | (IsOut(m_dwMarshalFlags) ? ML_OUT : 0);
+ BYTE fIsAnsi = IsAnsi() ? 1 : 0;
+ BYTE fBestFit = m_pargs->m_pMarshalInfo->GetBestFitMapping();
+ BYTE fThrow = m_pargs->m_pMarshalInfo->GetThrowOnUnmappableChar();
+
+ DWORD dwFlags = 0;
+
+ dwFlags |= inout << 24;
+ dwFlags |= fIsAnsi << 16;
+ dwFlags |= fThrow << 8;
+ dwFlags |= fBestFit << 0;
+
+ //
+ // marshal
+ //
+
+ LocalDesc marshalerType(MscorlibBinder::GetClass(CLASS__ASANY_MARSHALER));
+ m_dwMarshalerLocalNum = m_pcsMarshal->NewLocal(marshalerType);
+ DWORD dwTmpLocalNum = m_pcsMarshal->NewLocal(ELEMENT_TYPE_I);
+
+ m_pcsMarshal->EmitLDC(sizeof(MngdNativeArrayMarshaler));
+ m_pcsMarshal->EmitLOCALLOC();
+ m_pcsMarshal->EmitSTLOC(dwTmpLocalNum);
+
+ // marshaler = new AsAnyMarshaler(local_buffer)
+ m_pcsMarshal->EmitLDLOCA(m_dwMarshalerLocalNum);
+ m_pcsMarshal->EmitINITOBJ(m_pcsMarshal->GetToken(marshalerType.InternalToken));
+
+ m_pcsMarshal->EmitLDLOCA(m_dwMarshalerLocalNum);
+ m_pcsMarshal->EmitLDLOC(dwTmpLocalNum);
+ m_pcsMarshal->EmitCALL(METHOD__ASANY_MARSHALER__CTOR, 2, 0);
+
+ // nativeValue = marshaler.ConvertToNative(managedValue, flags);
+ m_pcsMarshal->EmitLDLOCA(m_dwMarshalerLocalNum);
+ EmitLoadManagedValue(m_pcsMarshal);
+ m_pcsMarshal->EmitLDC(dwFlags);
+ m_pcsMarshal->EmitCALL(METHOD__ASANY_MARSHALER__CONVERT_TO_NATIVE, 3, 1);
+ EmitStoreNativeValue(m_pcsMarshal);
+
+ //
+ // unmarshal
+ //
+ if (IsOut(m_dwMarshalFlags))
+ {
+ // marshaler.ConvertToManaged(managedValue, nativeValue)
+ m_pcsUnmarshal->EmitLDLOCA(m_dwMarshalerLocalNum);
+ EmitLoadManagedValue(m_pcsUnmarshal);
+ EmitLoadNativeValue(m_pcsUnmarshal);
+ m_pcsUnmarshal->EmitCALL(METHOD__ASANY_MARSHALER__CONVERT_TO_MANAGED, 3, 0);
+ }
+
+ //
+ // cleanup
+ //
+ EmitCleanupCLRToNativeTemp();
+}
+
+bool ILAsAnyMarshalerBase::NeedsClearNative()
+{
+ LIMITED_METHOD_CONTRACT;
+ return true;
+}
+
+void ILAsAnyMarshalerBase::EmitClearNativeTemp(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ // marshaler.ClearNative(nativeHome)
+ pslILEmit->EmitLDLOCA(m_dwMarshalerLocalNum);
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__ASANY_MARSHALER__CLEAR_NATIVE, 2, 0);
+}
+
+// we can get away with putting the GetManagedType and GetNativeType on ILMngdMarshaler because
+// currently it is only used for reference marshaling where this is appropriate. If it became
+// used for something else, we would want to move this down in the inheritence tree..
+LocalDesc ILMngdMarshaler::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_I);
+}
+
+LocalDesc ILMngdMarshaler::GetManagedType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(ELEMENT_TYPE_OBJECT);
+}
+
+void ILMngdMarshaler::EmitCallMngdMarshalerMethod(ILCodeStream* pslILEmit, MethodDesc *pMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (pMD != NULL)
+ {
+ MetaSig sig(pMD);
+ UINT numArgs = sig.NumFixedArgs();
+
+ if (numArgs == 3)
+ {
+ EmitLoadMngdMarshaler(pslILEmit);
+ }
+ else
+ {
+ _ASSERTE(numArgs == 2);
+ }
+
+ EmitLoadManagedHomeAddr(pslILEmit);
+ EmitLoadNativeHomeAddr(pslILEmit);
+
+ pslILEmit->EmitCALL(pslILEmit->GetToken(pMD), numArgs, 0);
+ }
+}
+
+bool ILNativeArrayMarshaler::UsePinnedArraySpecialCase()
+{
+ if (IsCLRToNative(m_dwMarshalFlags) && !IsByref(m_dwMarshalFlags) && (NULL == OleVariant::GetMarshalerForVarType(m_pargs->na.m_vt, TRUE)))
+ {
+ return true;
+ }
+
+ return false;
+}
+
+void ILNativeArrayMarshaler::EmitCreateMngdMarshaler(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (UsePinnedArraySpecialCase())
+ {
+ return;
+ }
+
+ m_dwMngdMarshalerLocalNum = pslILEmit->NewLocal(ELEMENT_TYPE_I);
+
+ pslILEmit->EmitLDC(sizeof(MngdNativeArrayMarshaler));
+ pslILEmit->EmitLOCALLOC();
+ pslILEmit->EmitSTLOC(m_dwMngdMarshalerLocalNum);
+
+ CREATE_MARSHALER_CARRAY_OPERANDS mops;
+ m_pargs->m_pMarshalInfo->GetMops(&mops);
+
+ pslILEmit->EmitLDLOC(m_dwMngdMarshalerLocalNum);
+
+ pslILEmit->EmitLDTOKEN(pslILEmit->GetToken(mops.methodTable));
+ pslILEmit->EmitCALL(METHOD__RT_TYPE_HANDLE__GETVALUEINTERNAL, 1, 1);
+
+ DWORD dwFlags = mops.elementType;
+ dwFlags |= (((DWORD)mops.bestfitmapping) << 16);
+ dwFlags |= (((DWORD)mops.throwonunmappablechar) << 24);
+
+ if (!IsCLRToNative(m_dwMarshalFlags) && IsOut(m_dwMarshalFlags) && IsIn(m_dwMarshalFlags))
+ {
+ // Unmanaged->managed in/out is the only case where we expect the native buffer to contain valid data.
+ _ASSERTE((dwFlags & MngdNativeArrayMarshaler::FLAG_NATIVE_DATA_VALID) == 0);
+ dwFlags |= MngdNativeArrayMarshaler::FLAG_NATIVE_DATA_VALID;
+ }
+
+ pslILEmit->EmitLDC(dwFlags);
+
+ pslILEmit->EmitCALL(METHOD__MNGD_NATIVE_ARRAY_MARSHALER__CREATE_MARSHALER, 3, 0);
+}
+
+
+void ILNativeArrayMarshaler::EmitMarshalArgumentCLRToNative()
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(IsCLRToNative(m_dwMarshalFlags) && !IsByref(m_dwMarshalFlags));
+ }
+ CONTRACTL_END;
+
+ if (UsePinnedArraySpecialCase())
+ {
+ //
+ // Replicate ML_PINNEDISOMORPHICARRAY_C2N_EXPRESS behavior -- note that this
+ // gives in/out semantics "for free" even if the app doesn't specify one or
+ // the other. Since there is no enforcement of this, apps blithely depend
+ // on it.
+ //
+
+ // The base offset should only be 0 for System.Array parameters for which
+ // OleVariant::GetMarshalerForVarType(vt) should never return NULL.
+ _ASSERTE(m_pargs->na.m_optionalbaseoffset != 0);
+
+ EmitSetupSigAndDefaultHomesCLRToNative();
+
+ LocalDesc managedType = GetManagedType();
+ managedType.MakePinned();
+
+ DWORD dwPinnedLocal = m_pcsMarshal->NewLocal(managedType);
+ ILCodeLabel* pNullRefLabel = m_pcsMarshal->NewCodeLabel();
+
+ m_pcsMarshal->EmitLoadNullPtr();
+ EmitStoreNativeValue(m_pcsMarshal);
+
+ EmitLoadManagedValue(m_pcsMarshal);
+ m_pcsMarshal->EmitBRFALSE(pNullRefLabel);
+
+ EmitLoadManagedValue(m_pcsMarshal);
+ m_pcsMarshal->EmitSTLOC(dwPinnedLocal);
+ m_pcsMarshal->EmitLDLOC(dwPinnedLocal);
+ m_pcsMarshal->EmitCONV_I();
+ m_pcsMarshal->EmitLDC(m_pargs->na.m_optionalbaseoffset);
+ m_pcsMarshal->EmitADD();
+ EmitStoreNativeValue(m_pcsMarshal);
+
+ if (g_pConfig->InteropLogArguments())
+ {
+ m_pslNDirect->EmitLogNativeArgument(m_pcsMarshal, dwPinnedLocal);
+ }
+
+ m_pcsMarshal->EmitLabel(pNullRefLabel);
+ }
+ else
+ {
+ ILMngdMarshaler::EmitMarshalArgumentCLRToNative();
+ }
+}
+
+//
+// Peek at the SizeParamIndex argument
+// 1) See if the SizeParamIndex argument is being passed by ref
+// 2) Get the element type of SizeParamIndex argument
+//
+BOOL ILNativeArrayMarshaler::CheckSizeParamIndexArg(
+ const CREATE_MARSHALER_CARRAY_OPERANDS &mops,
+ CorElementType *pElementType)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(m_pargs != NULL);
+ PRECONDITION(m_pargs->m_pMarshalInfo != NULL);
+ }
+ CONTRACTL_END;
+
+ MethodDesc *pMD = m_pargs->m_pMarshalInfo->GetMethodDesc();
+ _ASSERT(pMD);
+
+ Module *pModule = m_pargs->m_pMarshalInfo->GetModule();
+ _ASSERT(pModule);
+
+ SigTypeContext emptyTypeContext; // this is an empty type context: ndirect and COM calls are guaranteed to not be generics.
+ MetaSig msig(pMD->GetSignature(),
+ pModule,
+ &emptyTypeContext);
+
+ //
+ // Go to the SizeParamIndex argument
+ // Note that we already have check in place to make sure SizeParamIndex is within range
+ //
+ if (msig.HasExplicitThis())
+ msig.SkipArg();
+
+ for (int i = 0; i < mops.countParamIdx; ++i)
+ msig.SkipArg();
+
+ msig.NextArg();
+
+ SigPointer sigPointer = msig.GetArgProps();
+
+ // Peek into the SizeParamIndex argument
+ CorElementType elementType;
+ IfFailThrow(sigPointer.PeekElemType(&elementType));
+
+ if (elementType != ELEMENT_TYPE_BYREF)
+ {
+ if (elementType == ELEMENT_TYPE_STRING ||
+ elementType == ELEMENT_TYPE_ARRAY ||
+ elementType == ELEMENT_TYPE_FNPTR ||
+ elementType == ELEMENT_TYPE_OBJECT ||
+ elementType == ELEMENT_TYPE_SZARRAY ||
+ elementType == ELEMENT_TYPE_TYPEDBYREF)
+ {
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_SIZECONTROLBADTYPE);
+ }
+
+ *pElementType = elementType;
+ return FALSE;
+ }
+
+ // Get the real type
+ IfFailThrow(sigPointer.GetElemType(NULL));
+ IfFailThrow(sigPointer.PeekElemType(&elementType));
+
+ // All the integral types are supported
+ switch(elementType)
+ {
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_U:
+ break;
+
+ default :
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_SIZECONTROLBADTYPE);
+ }
+
+ *pElementType = elementType;
+ return TRUE;
+}
+
+//
+// Calculate the number of elements and load it into stack
+//
+void ILNativeArrayMarshaler::EmitLoadElementCount(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ //
+ // Determine the element count and load into evaluation stack
+ //
+ CREATE_MARSHALER_CARRAY_OPERANDS mops;
+ m_pargs->m_pMarshalInfo->GetMops(&mops);
+
+ if (mops.multiplier != 0)
+ {
+ //
+ // SizeParamIndex arg fix up for LCID
+ //
+ unsigned countParamIdx = mops.countParamIdx;
+ if (!IsCLRToNative(m_dwMarshalFlags))
+ {
+ int lcidParamIdx = m_pslNDirect->GetLCIDParamIdx();
+
+ if (lcidParamIdx >= 0 && (unsigned)lcidParamIdx <= countParamIdx)
+ {
+ // the LCID is injected before the count parameter so the index
+ // has to be incremented to get the unmanaged parameter number
+ countParamIdx++;
+ }
+ }
+
+ //
+ // Load SizeParamIndex argument
+ //
+ pslILEmit->EmitLDARG(countParamIdx);
+
+ //
+ // By-Ref support
+ //
+
+ // Is the SizeParamIndex points to a by-ref parameter?
+ CorElementType sizeParamIndexArgType;
+ if (CheckSizeParamIndexArg(mops, &sizeParamIndexArgType))
+ {
+ // Load the by-ref parameter
+ switch (sizeParamIndexArgType)
+ {
+ case ELEMENT_TYPE_I1:
+ pslILEmit->EmitLDIND_I1();
+ break;
+
+ case ELEMENT_TYPE_U1:
+ pslILEmit->EmitLDIND_U1();
+ break;
+
+ case ELEMENT_TYPE_I2:
+ pslILEmit->EmitLDIND_I2();
+ break;
+
+ case ELEMENT_TYPE_U2:
+ pslILEmit->EmitLDIND_U2();
+ break;
+
+ case ELEMENT_TYPE_I4:
+ pslILEmit->EmitLDIND_I4();
+ break;
+
+ case ELEMENT_TYPE_U4:
+ pslILEmit->EmitLDIND_U4();
+ break;
+
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_I8:
+ pslILEmit->EmitLDIND_I8();
+ break;
+
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_U:
+ pslILEmit->EmitLDIND_I();
+ break;
+
+ default :
+ // Should not go here because we should've thrown exception
+ _ASSERT(FALSE);
+ }
+
+ }
+
+ pslILEmit->EmitCONV_OVF_I4();
+
+ // multiplier * arg + additive
+ pslILEmit->EmitLDC(mops.multiplier);
+ pslILEmit->EmitMUL_OVF();
+ pslILEmit->EmitLDC(mops.additive);
+ pslILEmit->EmitADD_OVF();
+ }
+ else
+ {
+ pslILEmit->EmitLDC((int)mops.additive);
+ }
+}
+
+void ILNativeArrayMarshaler::EmitConvertSpaceNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadMngdMarshaler(pslILEmit);
+ EmitLoadManagedHomeAddr(pslILEmit);
+ EmitLoadNativeHomeAddr(pslILEmit);
+
+ if (IsByref(m_dwMarshalFlags))
+ {
+ //
+ // Reset the element count just in case there is a exception thrown in the code emitted by
+ // EmitLoadElementCount. The best thing we can do here is to avoid a crash.
+ //
+ _ASSERTE(m_dwSavedSizeArg != LOCAL_NUM_UNUSED);
+ pslILEmit->EmitLDC(0);
+ pslILEmit->EmitSTLOC(m_dwSavedSizeArg);
+ }
+
+ // Dynamically calculate element count using SizeParamIndex argument
+ EmitLoadElementCount(pslILEmit);
+
+ if (IsByref(m_dwMarshalFlags))
+ {
+ //
+ // Save the native array size before converting it to managed and load it again
+ //
+ _ASSERTE(m_dwSavedSizeArg != LOCAL_NUM_UNUSED);
+ pslILEmit->EmitSTLOC(m_dwSavedSizeArg);
+ pslILEmit->EmitLDLOC(m_dwSavedSizeArg);
+ }
+
+ // MngdNativeArrayMarshaler::ConvertSpaceToManaged
+ pslILEmit->EmitCALL(pslILEmit->GetToken(GetConvertSpaceToManagedMethod()), 4, 0);
+}
+
+void ILNativeArrayMarshaler::EmitConvertSpaceCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (IsByref(m_dwMarshalFlags))
+ {
+ _ASSERTE(m_dwSavedSizeArg != LOCAL_NUM_UNUSED);
+
+ //
+ // Save the array size before converting it to native
+ //
+ EmitLoadManagedValue(pslILEmit);
+ ILCodeLabel *pManagedHomeIsNull = pslILEmit->NewCodeLabel();
+ pslILEmit->EmitBRFALSE(pManagedHomeIsNull);
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitLDLEN();
+ pslILEmit->EmitSTLOC(m_dwSavedSizeArg);
+ pslILEmit->EmitLabel(pManagedHomeIsNull);
+ }
+
+
+ ILMngdMarshaler::EmitConvertSpaceCLRToNative(pslILEmit);
+}
+
+void ILNativeArrayMarshaler::EmitClearNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadMngdMarshaler(pslILEmit);
+ EmitLoadNativeHomeAddr(pslILEmit);
+ EmitLoadNativeSize(pslILEmit);
+
+ pslILEmit->EmitCALL(pslILEmit->GetToken(GetClearNativeMethod()), 3, 0);
+}
+
+void ILNativeArrayMarshaler::EmitLoadNativeSize(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (IsByref(m_dwMarshalFlags))
+ {
+ _ASSERT(m_dwSavedSizeArg != LOCAL_NUM_UNUSED);
+ pslILEmit->EmitLDLOC(m_dwSavedSizeArg);
+ }
+ else
+ {
+ pslILEmit->EmitLDC(0);
+ EmitLoadManagedValue(pslILEmit);
+ ILCodeLabel *pManagedHomeIsNull = pslILEmit->NewCodeLabel();
+ pslILEmit->EmitBRFALSE(pManagedHomeIsNull);
+ pslILEmit->EmitPOP(); // Pop the 0 on the stack
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitLDLEN();
+ pslILEmit->EmitCONV_OVF_I4();
+ pslILEmit->EmitLabel(pManagedHomeIsNull); // Keep the 0 on the stack
+ }
+}
+
+void ILNativeArrayMarshaler::EmitClearNativeContents(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadMngdMarshaler(pslILEmit);
+ EmitLoadNativeHomeAddr(pslILEmit);
+ EmitLoadNativeSize(pslILEmit);
+
+ pslILEmit->EmitCALL(pslILEmit->GetToken(GetClearNativeContentsMethod()), 3, 0);
+}
+
+void ILNativeArrayMarshaler::EmitNewSavedSizeArgLocal()
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(m_dwSavedSizeArg == LOCAL_NUM_UNUSED);
+ ILCodeStream *pcsSetup = m_pslNDirect->GetSetupCodeStream();
+ m_dwSavedSizeArg = pcsSetup->NewLocal(ELEMENT_TYPE_I4);
+ pcsSetup->EmitLDC(0);
+ pcsSetup->EmitSTLOC(m_dwSavedSizeArg);
+}
+
+void ILNativeArrayMarshaler::EmitMarshalArgumentNativeToCLRByref()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (IsByref(m_dwMarshalFlags))
+ {
+ EmitNewSavedSizeArgLocal();
+ }
+
+ ILMngdMarshaler::EmitMarshalArgumentNativeToCLRByref();
+}
+
+void ILNativeArrayMarshaler::EmitMarshalArgumentCLRToNativeByref()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (IsByref(m_dwMarshalFlags))
+ {
+ EmitNewSavedSizeArgLocal();
+ }
+
+ ILMngdMarshaler::EmitMarshalArgumentCLRToNativeByref();
+}
+
+
+#ifndef CROSSGEN_COMPILE
+
+FCIMPL3(void, MngdNativeArrayMarshaler::CreateMarshaler, MngdNativeArrayMarshaler* pThis, MethodTable* pMT, UINT32 dwFlags)
+{
+ FCALL_CONTRACT;
+
+ // Don't check whether the input values are negative - passing negative size-controlling
+ // arguments and compensating them with a positive SizeConst has always worked.
+ pThis->m_pElementMT = pMT;
+ pThis->m_vt = (VARTYPE)(dwFlags);
+ pThis->m_NativeDataValid = (BYTE)((dwFlags & FLAG_NATIVE_DATA_VALID) != 0);
+ dwFlags &= ~FLAG_NATIVE_DATA_VALID;
+ pThis->m_BestFitMap = (BYTE)(dwFlags >> 16);
+ pThis->m_ThrowOnUnmappableChar = (BYTE)(dwFlags >> 24);
+ pThis->m_Array = TypeHandle();
+}
+FCIMPLEND
+
+FCIMPL3(void, MngdNativeArrayMarshaler::ConvertSpaceToNative, MngdNativeArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ BASEARRAYREF arrayRef = (BASEARRAYREF) *pManagedHome;
+
+ if (arrayRef == NULL)
+ {
+ *pNativeHome = NULL;
+ }
+ else
+ {
+ SIZE_T cElements = arrayRef->GetNumComponents();
+ SIZE_T cbElement = OleVariant::GetElementSizeForVarType(pThis->m_vt, pThis->m_pElementMT);
+
+ if (cbElement == 0)
+ COMPlusThrow(kArgumentException, IDS_EE_COM_UNSUPPORTED_SIG);
+
+ SIZE_T cbArray = cElements;
+ if ( (!SafeMulSIZE_T(&cbArray, cbElement)) || cbArray > MAX_SIZE_FOR_INTEROP)
+ COMPlusThrow(kArgumentException, IDS_EE_STRUCTARRAYTOOLARGE);
+
+ *pNativeHome = CoTaskMemAlloc(cbArray);
+ if (*pNativeHome == NULL)
+ ThrowOutOfMemory();
+
+ // initialize the array
+ FillMemory(*pNativeHome, cbArray, 0);
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL3(void, MngdNativeArrayMarshaler::ConvertContentsToNative, MngdNativeArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ BASEARRAYREF* pArrayRef = (BASEARRAYREF *) pManagedHome;
+
+ if (*pArrayRef != NULL)
+ {
+ const OleVariant::Marshaler* pMarshaler = OleVariant::GetMarshalerForVarType(pThis->m_vt, TRUE);
+
+ if (pMarshaler == NULL || pMarshaler->ComToOleArray == NULL)
+ {
+ SIZE_T cElements = (*pArrayRef)->GetNumComponents();
+ SIZE_T cbArray = cElements;
+ if ( (!SafeMulSIZE_T(&cbArray, OleVariant::GetElementSizeForVarType(pThis->m_vt, pThis->m_pElementMT))) || cbArray > MAX_SIZE_FOR_INTEROP)
+ COMPlusThrow(kArgumentException, IDS_EE_STRUCTARRAYTOOLARGE);
+
+ _ASSERTE(!GetTypeHandleForCVType(OleVariant::GetCVTypeForVarType(pThis->m_vt)).GetMethodTable()->ContainsPointers());
+ memcpyNoGCRefs(*pNativeHome, (*pArrayRef)->GetDataPtr(), cbArray);
+ }
+ else
+ {
+ pMarshaler->ComToOleArray(pArrayRef, *pNativeHome, pThis->m_pElementMT, pThis->m_BestFitMap, pThis->m_ThrowOnUnmappableChar, pThis->m_NativeDataValid);
+ }
+ }
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL4(void, MngdNativeArrayMarshaler::ConvertSpaceToManaged, MngdNativeArrayMarshaler* pThis,
+ OBJECTREF* pManagedHome, void** pNativeHome, INT32 cElements)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ if (*pNativeHome == NULL)
+ {
+ SetObjectReference(pManagedHome, NULL, GetAppDomain());
+ }
+ else
+ {
+ // <TODO>@todo: lookup this class before marshal time</TODO>
+ if (pThis->m_Array.IsNull())
+ {
+ // Get proper array class name & type
+ pThis->m_Array = OleVariant::GetArrayForVarType(pThis->m_vt, TypeHandle(pThis->m_pElementMT));
+ if (pThis->m_Array.IsNull())
+ COMPlusThrow(kTypeLoadException);
+ }
+ //
+ // Allocate array
+ //
+ SetObjectReference(pManagedHome, AllocateArrayEx(pThis->m_Array, &cElements, 1), GetAppDomain());
+ }
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL3(void, MngdNativeArrayMarshaler::ConvertContentsToManaged, MngdNativeArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ if (*pNativeHome != NULL)
+ {
+ const OleVariant::Marshaler *pMarshaler = OleVariant::GetMarshalerForVarType(pThis->m_vt, TRUE);
+
+ BASEARRAYREF* pArrayRef = (BASEARRAYREF*) pManagedHome;
+
+ if (pMarshaler == NULL || pMarshaler->OleToComArray == NULL)
+ {
+ SIZE_T cElements = (*pArrayRef)->GetNumComponents();
+ SIZE_T cbArray = cElements;
+ if ( (!SafeMulSIZE_T(&cbArray, OleVariant::GetElementSizeForVarType(pThis->m_vt, pThis->m_pElementMT))) || cbArray > MAX_SIZE_FOR_INTEROP)
+ COMPlusThrow(kArgumentException, IDS_EE_STRUCTARRAYTOOLARGE);
+
+ // If we are copying variants, strings, etc, we need to use write barrier
+ _ASSERTE(!GetTypeHandleForCVType(OleVariant::GetCVTypeForVarType(pThis->m_vt)).GetMethodTable()->ContainsPointers());
+ memcpyNoGCRefs((*pArrayRef)->GetDataPtr(), *pNativeHome, cbArray );
+ }
+ else
+ {
+ pMarshaler->OleToComArray(*pNativeHome, pArrayRef, pThis->m_pElementMT);
+ }
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL3(void, MngdNativeArrayMarshaler::ClearNative, MngdNativeArrayMarshaler* pThis, void** pNativeHome, INT32 cElements)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ if (*pNativeHome != NULL)
+ {
+ DoClearNativeContents(pThis, pNativeHome, cElements);
+ CoTaskMemFree(*pNativeHome);
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL3(void, MngdNativeArrayMarshaler::ClearNativeContents, MngdNativeArrayMarshaler* pThis, void** pNativeHome, INT32 cElements)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ DoClearNativeContents(pThis, pNativeHome, cElements);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+void MngdNativeArrayMarshaler::DoClearNativeContents(MngdNativeArrayMarshaler* pThis, void** pNativeHome, INT32 cElements)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (*pNativeHome != NULL)
+ {
+ const OleVariant::Marshaler *pMarshaler = OleVariant::GetMarshalerForVarType(pThis->m_vt, FALSE);
+
+ if (pMarshaler != NULL && pMarshaler->ClearOleArray != NULL)
+ {
+ pMarshaler->ClearOleArray(*pNativeHome, cElements, pThis->m_pElementMT);
+ }
+ }
+}
+
+#endif // CROSSGEN_COMPILE
+
+
+#ifdef FEATURE_COMINTEROP
+void ILSafeArrayMarshaler::EmitCreateMngdMarshaler(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ m_dwMngdMarshalerLocalNum = pslILEmit->NewLocal(ELEMENT_TYPE_I);
+
+ pslILEmit->EmitLDC(sizeof(MngdSafeArrayMarshaler));
+ pslILEmit->EmitLOCALLOC();
+ pslILEmit->EmitSTLOC(m_dwMngdMarshalerLocalNum);
+
+ CREATE_MARSHALER_CARRAY_OPERANDS mops;
+ m_pargs->m_pMarshalInfo->GetMops(&mops);
+
+ DWORD dwFlags = mops.elementType;
+ BYTE fStatic = 0;
+
+ if (NeedsCheckForStatic())
+ {
+ fStatic |= MngdSafeArrayMarshaler::SCSF_CheckForStatic;
+ }
+
+ if (!IsCLRToNative(m_dwMarshalFlags) && IsOut(m_dwMarshalFlags) && IsIn(m_dwMarshalFlags))
+ {
+ // Unmanaged->managed in/out is the only case where we expect the native buffer to contain valid data.
+ fStatic |= MngdSafeArrayMarshaler::SCSF_NativeDataValid;
+ }
+
+ dwFlags |= fStatic << 16;
+ dwFlags |= ((BYTE)!!m_pargs->m_pMarshalInfo->GetNoLowerBounds()) << 24;
+
+ pslILEmit->EmitLDLOC(m_dwMngdMarshalerLocalNum);
+ pslILEmit->EmitLDTOKEN(pslILEmit->GetToken(mops.methodTable));
+ pslILEmit->EmitCALL(METHOD__RT_TYPE_HANDLE__GETVALUEINTERNAL, 1, 1);
+ pslILEmit->EmitLDC(m_pargs->m_pMarshalInfo->GetArrayRank());
+ pslILEmit->EmitLDC(dwFlags);
+
+ pslILEmit->EmitCALL(METHOD__MNGD_SAFE_ARRAY_MARSHALER__CREATE_MARSHALER, 4, 0);
+}
+
+void ILSafeArrayMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILMngdMarshaler::EmitConvertContentsNativeToCLR(pslILEmit);
+
+ if (NeedsCheckForStatic())
+ {
+ CONSISTENCY_CHECK(-1 == m_dwOriginalManagedLocalNum);
+ m_dwOriginalManagedLocalNum = pslILEmit->NewLocal(ELEMENT_TYPE_OBJECT);
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitSTLOC(m_dwOriginalManagedLocalNum);
+ }
+}
+
+void ILSafeArrayMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadMngdMarshaler(pslILEmit);
+ EmitLoadManagedHomeAddr(pslILEmit);
+ EmitLoadNativeHomeAddr(pslILEmit);
+ if (NeedsCheckForStatic())
+ {
+ CONSISTENCY_CHECK(-1 != m_dwOriginalManagedLocalNum);
+ pslILEmit->EmitLDLOC(m_dwOriginalManagedLocalNum);
+ }
+ else
+ {
+ pslILEmit->EmitLDNULL();
+ }
+ pslILEmit->EmitCALL(METHOD__MNGD_SAFE_ARRAY_MARSHALER__CONVERT_CONTENTS_TO_NATIVE, 4, 0);
+}
+
+
+#ifndef CROSSGEN_COMPILE
+
+FCIMPL4(void, MngdSafeArrayMarshaler::CreateMarshaler, MngdSafeArrayMarshaler* pThis, MethodTable* pMT, UINT32 iRank, UINT32 dwFlags)
+{
+ FCALL_CONTRACT;
+
+ pThis->m_pElementMT = pMT;
+ pThis->m_iRank = iRank;
+ pThis->m_vt = (VARTYPE)dwFlags;
+ pThis->m_fStatic = (BYTE)(dwFlags >> 16);
+ pThis->m_nolowerbounds = (BYTE)(dwFlags >> 24);
+}
+FCIMPLEND
+
+FCIMPL3(void, MngdSafeArrayMarshaler::ConvertSpaceToNative, MngdSafeArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome)
+{
+ FCALL_CONTRACT;
+
+ if (pThis->m_fStatic & SCSF_IsStatic)
+ return;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(pThis->m_vt != VT_EMPTY);
+ PRECONDITION(CheckPointer(pThis->m_pElementMT));
+ }
+ CONTRACTL_END;
+
+ if (*pManagedHome != NULL)
+ {
+ *pNativeHome = (void *) OleVariant::CreateSafeArrayForArrayRef((BASEARRAYREF*) pManagedHome, pThis->m_vt, pThis->m_pElementMT);
+ }
+ else
+ {
+ *pNativeHome = NULL;
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL4(void, MngdSafeArrayMarshaler::ConvertContentsToNative, MngdSafeArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome, Object* pOriginalManagedUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(pThis->m_vt != VT_EMPTY);
+ PRECONDITION(CheckPointer(pThis->m_pElementMT));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF pOriginalManaged = ObjectToOBJECTREF(pOriginalManagedUNSAFE);
+ HELPER_METHOD_FRAME_BEGIN_1(pOriginalManaged);
+
+ if ((pThis->m_fStatic & SCSF_IsStatic) &&
+ (*pManagedHome != pOriginalManaged))
+ {
+ COMPlusThrow(kInvalidOperationException, IDS_INVALID_REDIM);
+ }
+
+ if (*pManagedHome != NULL)
+ {
+ OleVariant::MarshalSafeArrayForArrayRef((BASEARRAYREF *) pManagedHome,
+ (SAFEARRAY*)*pNativeHome,
+ pThis->m_vt,
+ pThis->m_pElementMT,
+ (pThis->m_fStatic & SCSF_NativeDataValid));
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL3(void, MngdSafeArrayMarshaler::ConvertSpaceToManaged, MngdSafeArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(pThis->m_vt != VT_EMPTY);
+ PRECONDITION(CheckPointer(pThis->m_pElementMT));
+ }
+ CONTRACTL_END;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ if (*pNativeHome != NULL)
+ {
+ // If the managed array has a rank defined then make sure the rank of the
+ // SafeArray matches the defined rank.
+ if (pThis->m_iRank != -1)
+ {
+ int iSafeArrayRank = SafeArrayGetDim((SAFEARRAY*) *pNativeHome);
+ if (pThis->m_iRank != iSafeArrayRank)
+ {
+ WCHAR strExpectedRank[64];
+ WCHAR strActualRank[64];
+ _ltow_s(pThis->m_iRank, strExpectedRank, COUNTOF(strExpectedRank), 10);
+ _ltow_s(iSafeArrayRank, strActualRank, COUNTOF(strActualRank), 10);
+ COMPlusThrow(kSafeArrayRankMismatchException, IDS_EE_SAFEARRAYRANKMISMATCH, strActualRank, strExpectedRank);
+ }
+ }
+
+ if (pThis->m_nolowerbounds)
+ {
+ LONG lowerbound;
+ if ( (SafeArrayGetDim( (SAFEARRAY*)*pNativeHome ) != 1) ||
+ (FAILED(SafeArrayGetLBound( (SAFEARRAY*)*pNativeHome, 1, &lowerbound))) ||
+ lowerbound != 0 )
+ {
+ COMPlusThrow(kSafeArrayRankMismatchException, IDS_EE_SAFEARRAYSZARRAYMISMATCH);
+ }
+ }
+
+ SetObjectReference(pManagedHome,
+ (OBJECTREF) OleVariant::CreateArrayRefForSafeArray((SAFEARRAY*) *pNativeHome,
+ pThis->m_vt,
+ pThis->m_pElementMT), GetAppDomain());
+ }
+ else
+ {
+ SetObjectReference(pManagedHome, NULL, GetAppDomain());
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL3(void, MngdSafeArrayMarshaler::ConvertContentsToManaged, MngdSafeArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(pThis->m_vt != VT_EMPTY);
+ PRECONDITION(CheckPointer(pThis->m_pElementMT));
+ }
+ CONTRACTL_END;
+
+ SAFEARRAY* pNative = *(SAFEARRAY**)pNativeHome;
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ if (pNative && pNative->fFeatures & FADF_STATIC)
+ {
+ pThis->m_fStatic |= SCSF_IsStatic;
+ }
+
+ if (*pNativeHome != NULL)
+ {
+ OleVariant::MarshalArrayRefForSafeArray((SAFEARRAY*)*pNativeHome,
+ (BASEARRAYREF *) pManagedHome,
+ pThis->m_vt,
+ pThis->m_pElementMT);
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL3(void, MngdSafeArrayMarshaler::ClearNative, MngdSafeArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome)
+{
+ FCALL_CONTRACT;
+
+ if (pThis->m_fStatic & SCSF_IsStatic)
+ return;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ if (*pNativeHome != NULL)
+ {
+ GCX_PREEMP();
+ _ASSERTE(GetModuleHandleA("oleaut32.dll") != NULL);
+ // SafeArray has been created. Oleaut32.dll must have been loaded.
+ CONTRACT_VIOLATION(ThrowsViolation);
+ SafeArrayDestroy((SAFEARRAY*)*pNativeHome);
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+#endif // CROSSGEN_COMPILE
+
+
+LocalDesc ILHiddenLengthArrayMarshaler::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+ return LocalDesc(ELEMENT_TYPE_I);
+}
+
+LocalDesc ILHiddenLengthArrayMarshaler::GetManagedType()
+{
+ LIMITED_METHOD_CONTRACT;
+ return LocalDesc(ELEMENT_TYPE_OBJECT);
+}
+
+void ILHiddenLengthArrayMarshaler::EmitCreateMngdMarshaler(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (!CanUsePinnedArray())
+ {
+ m_dwMngdMarshalerLocalNum = pslILEmit->NewLocal(ELEMENT_TYPE_I);
+
+ pslILEmit->EmitLDC(sizeof(MngdHiddenLengthArrayMarshaler));
+ pslILEmit->EmitLOCALLOC();
+ pslILEmit->EmitSTLOC(m_dwMngdMarshalerLocalNum);
+
+ MethodTable *pElementMT = m_pargs->m_pMarshalInfo->GetArrayElementTypeHandle().GetMethodTable();
+ pslILEmit->EmitLDLOC(m_dwMngdMarshalerLocalNum);
+ pslILEmit->EmitLDTOKEN(pslILEmit->GetToken(pElementMT));
+ pslILEmit->EmitCALL(METHOD__RT_TYPE_HANDLE__GETVALUEINTERNAL, 1, 1);
+
+ pslILEmit->EmitLDC(m_pargs->na.m_cbElementSize);
+ pslILEmit->EmitLDC(m_pargs->na.m_vt);
+
+ pslILEmit->EmitCALL(METHOD__MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER__CREATE_MARSHALER, 4, 0);
+ }
+}
+
+void ILHiddenLengthArrayMarshaler::EmitMarshalArgumentCLRToNative()
+{
+ STANDARD_VM_CONTRACT;
+
+ // If we can pin the array, then do that rather than marshaling it in a more heavy weight way
+ // Otherwise, fall back to doing a full marshal
+ if (CanUsePinnedArray())
+ {
+ EmitSetupSigAndDefaultHomesCLRToNative();
+
+ LocalDesc managedType = GetManagedType();
+ managedType.MakePinned();
+ DWORD dwPinnedLocal = m_pcsMarshal->NewLocal(managedType);
+
+ ILCodeLabel* pMarshalDoneLabel = m_pcsMarshal->NewCodeLabel();
+
+ // native = NULL
+ m_pcsMarshal->EmitLoadNullPtr();
+ EmitStoreNativeValue(m_pcsMarshal);
+
+ // if (managed == null) goto MarshalDone
+ EmitLoadManagedValue(m_pcsMarshal);
+ m_pcsMarshal->EmitBRFALSE(pMarshalDoneLabel);
+
+ // pinnedLocal = managed;
+ EmitLoadManagedValue(m_pcsMarshal);
+ m_pcsMarshal->EmitSTLOC(dwPinnedLocal);
+
+ // native = pinnedLocal + dataOffset
+ m_pcsMarshal->EmitLDLOC(dwPinnedLocal);
+ m_pcsMarshal->EmitCONV_I();
+ m_pcsMarshal->EmitLDC(m_pargs->na.m_optionalbaseoffset);
+ m_pcsMarshal->EmitADD();
+ EmitStoreNativeValue(m_pcsMarshal);
+
+ if (g_pConfig->InteropLogArguments())
+ {
+ m_pslNDirect->EmitLogNativeArgument(m_pcsMarshal, dwPinnedLocal);
+ }
+
+ // MarshalDone:
+ m_pcsMarshal->EmitLabel(pMarshalDoneLabel);
+ }
+ else
+ {
+ ILMngdMarshaler::EmitMarshalArgumentCLRToNative();
+ }
+
+}
+
+void ILHiddenLengthArrayMarshaler::EmitConvertSpaceNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (!CanUsePinnedArray())
+ {
+ EmitLoadMngdMarshaler(pslILEmit);
+ EmitLoadManagedHomeAddr(pslILEmit);
+ EmitLoadNativeHomeAddr(pslILEmit);
+ EmitLoadNativeArrayLength(pslILEmit);
+
+ // MngdHiddenLengthArrayMarshaler::ConvertSpaceToManaged
+ pslILEmit->EmitCALL(pslILEmit->GetToken(GetConvertSpaceToManagedMethod()), 4, 0);
+ }
+}
+
+void ILHiddenLengthArrayMarshaler::EmitConvertSpaceCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ // If we're marshaling out to native code, then we need to set the length out parameter
+ if (!IsCLRToNative(m_dwMarshalFlags))
+ {
+ if (IsByref(m_dwMarshalFlags) || IsRetval(m_dwMarshalFlags) || IsOut(m_dwMarshalFlags))
+ {
+ ILCodeLabel *pSkipGetLengthLabel = m_pcsMarshal->NewCodeLabel();
+
+ // nativeLen = 0
+ pslILEmit->EmitLDC(0);
+ pslILEmit->EmitCONV_T(m_pargs->m_pMarshalInfo->GetHiddenLengthParamElementType());
+ pslILEmit->EmitSTLOC(m_pargs->m_pMarshalInfo->GetHiddenLengthNativeHome());
+
+ // if (array == null) goto SkipGetLength
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pSkipGetLengthLabel);
+
+ // nativeLen = array.Length
+ // SkipGetLength:
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitLDLEN();
+ pslILEmit->EmitCONV_T(m_pargs->m_pMarshalInfo->GetHiddenLengthParamElementType());
+ pslILEmit->EmitSTLOC(m_pargs->m_pMarshalInfo->GetHiddenLengthNativeHome());
+ pslILEmit->EmitLabel(pSkipGetLengthLabel);
+
+ // nativeLenParam = nativeLen
+ LocalDesc nativeParamType(m_pargs->m_pMarshalInfo->GetHiddenLengthParamElementType());
+ pslILEmit->EmitLDARG(m_pargs->m_pMarshalInfo->HiddenLengthParamIndex());
+ pslILEmit->EmitLDLOC(m_pargs->m_pMarshalInfo->GetHiddenLengthNativeHome());
+ pslILEmit->EmitSTIND_T(&nativeParamType);
+ }
+ }
+
+ if (!CanUsePinnedArray())
+ {
+ ILMngdMarshaler::EmitConvertSpaceCLRToNative(pslILEmit);
+ }
+}
+
+void ILHiddenLengthArrayMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (!CanUsePinnedArray())
+ {
+ if (m_pargs->na.m_vt == VTHACK_REDIRECTEDTYPE &&
+ (m_pargs->na.m_redirectedTypeIndex == WinMDAdapter::RedirectedTypeIndex_System_Uri ||
+ m_pargs->na.m_redirectedTypeIndex == WinMDAdapter::RedirectedTypeIndex_System_Collections_Specialized_NotifyCollectionChangedEventArgs ||
+ m_pargs->na.m_redirectedTypeIndex == WinMDAdapter::RedirectedTypeIndex_System_ComponentModel_PropertyChangedEventArgs))
+ {
+ // System.Uri/NotifyCollectionChangedEventArgs don't live in mscorlib so there's no marshaling helper to call - inline the loop
+ DWORD dwLoopCounterLocalNum = pslILEmit->NewLocal(ELEMENT_TYPE_I4);
+ DWORD dwNativePtrLocalNum = pslILEmit->NewLocal(ELEMENT_TYPE_I);
+ ILCodeLabel *pConditionLabel = pslILEmit->NewCodeLabel();
+ ILCodeLabel *pLoopBodyLabel = pslILEmit->NewCodeLabel();
+
+ // for (IntPtr ptr = pNative, int i = 0; ...
+ pslILEmit->EmitLDC(0);
+ pslILEmit->EmitSTLOC(dwLoopCounterLocalNum);
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitSTLOC(dwNativePtrLocalNum);
+ pslILEmit->EmitBR(pConditionLabel);
+
+ // *ptr = EmitConvertCLR*ToWinRT*(pManaged[i]);
+ pslILEmit->EmitLabel(pLoopBodyLabel);
+ pslILEmit->EmitLDLOC(dwNativePtrLocalNum);
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitLDLOC(dwLoopCounterLocalNum);
+ pslILEmit->EmitLDELEM_REF();
+
+ switch (m_pargs->na.m_redirectedTypeIndex)
+ {
+ case WinMDAdapter::RedirectedTypeIndex_System_Uri:
+ ILUriMarshaler::EmitConvertCLRUriToWinRTUri(pslILEmit, m_pargs->m_pMarshalInfo->GetModule()->GetDomain());
+ break;
+
+ case WinMDAdapter::RedirectedTypeIndex_System_Collections_Specialized_NotifyCollectionChangedEventArgs:
+ ILNCCEventArgsMarshaler::EmitConvertCLREventArgsToWinRTEventArgs(pslILEmit, m_pargs->m_pMarshalInfo->GetModule()->GetDomain());
+ break;
+
+ case WinMDAdapter::RedirectedTypeIndex_System_ComponentModel_PropertyChangedEventArgs:
+ ILPCEventArgsMarshaler::EmitConvertCLREventArgsToWinRTEventArgs(pslILEmit, m_pargs->m_pMarshalInfo->GetModule()->GetDomain());
+ break;
+
+ default: UNREACHABLE();
+ }
+
+ pslILEmit->EmitSTIND_I();
+
+ // ... i++, ptr += IntPtr.Size ...
+ pslILEmit->EmitLDLOC(dwLoopCounterLocalNum);
+ pslILEmit->EmitLDC(1);
+ pslILEmit->EmitADD();
+ pslILEmit->EmitSTLOC(dwLoopCounterLocalNum);
+ pslILEmit->EmitLDLOC(dwNativePtrLocalNum);
+ pslILEmit->EmitLDC(sizeof(LPVOID));
+ pslILEmit->EmitADD();
+ pslILEmit->EmitSTLOC(dwNativePtrLocalNum);
+
+ // ... i < pManaged.Length; ...
+ pslILEmit->EmitLabel(pConditionLabel);
+ pslILEmit->EmitLDLOC(dwLoopCounterLocalNum);
+ EmitLoadNativeArrayLength(pslILEmit);
+ pslILEmit->EmitBLT(pLoopBodyLabel);
+ }
+ else
+ {
+ ILMngdMarshaler::EmitConvertContentsCLRToNative(pslILEmit);
+ }
+ }
+}
+
+void ILHiddenLengthArrayMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (!CanUsePinnedArray())
+ {
+ if (m_pargs->na.m_vt == VTHACK_REDIRECTEDTYPE &&
+ (m_pargs->na.m_redirectedTypeIndex == WinMDAdapter::RedirectedTypeIndex_System_Uri ||
+ m_pargs->na.m_redirectedTypeIndex == WinMDAdapter::RedirectedTypeIndex_System_Collections_Specialized_NotifyCollectionChangedEventArgs ||
+ m_pargs->na.m_redirectedTypeIndex == WinMDAdapter::RedirectedTypeIndex_System_ComponentModel_PropertyChangedEventArgs))
+ {
+ // System.Uri/NotifyCollectionChangedEventArgs don't live in mscorlib so there's no marshaling helper to call - inline the loop
+ DWORD dwLoopCounterLocalNum = pslILEmit->NewLocal(ELEMENT_TYPE_I4);
+ DWORD dwNativePtrLocalNum = pslILEmit->NewLocal(ELEMENT_TYPE_I);
+ ILCodeLabel *pConditionLabel = pslILEmit->NewCodeLabel();
+ ILCodeLabel *pLoopBodyLabel = pslILEmit->NewCodeLabel();
+
+ // for (IntPtr ptr = pNative, int i = 0; ...
+ pslILEmit->EmitLDC(0);
+ pslILEmit->EmitSTLOC(dwLoopCounterLocalNum);
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitSTLOC(dwNativePtrLocalNum);
+ pslILEmit->EmitBR(pConditionLabel);
+
+ // pManaged[i] = EmitConvertWinRT*ToCLR*(*ptr);
+ pslILEmit->EmitLabel(pLoopBodyLabel);
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitLDLOC(dwLoopCounterLocalNum);
+ pslILEmit->EmitLDLOC(dwNativePtrLocalNum);
+ pslILEmit->EmitLDIND_I();
+
+ switch (m_pargs->na.m_redirectedTypeIndex)
+ {
+ case WinMDAdapter::RedirectedTypeIndex_System_Uri:
+ ILUriMarshaler::EmitConvertWinRTUriToCLRUri(pslILEmit, m_pargs->m_pMarshalInfo->GetModule()->GetDomain());
+ break;
+
+ case WinMDAdapter::RedirectedTypeIndex_System_Collections_Specialized_NotifyCollectionChangedEventArgs:
+ ILNCCEventArgsMarshaler::EmitConvertWinRTEventArgsToCLREventArgs(pslILEmit, m_pargs->m_pMarshalInfo->GetModule()->GetDomain());
+ break;
+
+ case WinMDAdapter::RedirectedTypeIndex_System_ComponentModel_PropertyChangedEventArgs:
+ ILPCEventArgsMarshaler::EmitConvertWinRTEventArgsToCLREventArgs(pslILEmit, m_pargs->m_pMarshalInfo->GetModule()->GetDomain());
+ break;
+
+ default: UNREACHABLE();
+ }
+
+ pslILEmit->EmitSTELEM_REF();
+
+ // ... i++, ptr += IntPtr.Size)
+ pslILEmit->EmitLDLOC(dwLoopCounterLocalNum);
+ pslILEmit->EmitLDC(1);
+ pslILEmit->EmitADD();
+ pslILEmit->EmitSTLOC(dwLoopCounterLocalNum);
+ pslILEmit->EmitLDLOC(dwNativePtrLocalNum);
+ pslILEmit->EmitLDC(sizeof(LPVOID));
+ pslILEmit->EmitADD();
+ pslILEmit->EmitSTLOC(dwNativePtrLocalNum);
+
+ // ... i < pManaged.Length; ...
+ pslILEmit->EmitLabel(pConditionLabel);
+ pslILEmit->EmitLDLOC(dwLoopCounterLocalNum);
+ EmitLoadNativeArrayLength(pslILEmit);
+ pslILEmit->EmitBLT(pLoopBodyLabel);
+ }
+ else
+ {
+ ILMngdMarshaler::EmitConvertContentsNativeToCLR(pslILEmit);
+ }
+ }
+}
+
+void ILHiddenLengthArrayMarshaler::EmitClearNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitClearNativeContents(pslILEmit);
+
+ if (!CanUsePinnedArray())
+ {
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitCALL(pslILEmit->GetToken(GetClearNativeMethod()), 1, 0);
+ }
+}
+
+void ILHiddenLengthArrayMarshaler::EmitClearNativeContents(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (!CanUsePinnedArray())
+ {
+ MethodDesc *pMD = GetClearNativeContentsMethod();
+ if (pMD != NULL)
+ {
+ MetaSig sig(pMD);
+ UINT numArgs = sig.NumFixedArgs();
+
+ if (numArgs == 3)
+ {
+ EmitLoadMngdMarshaler(pslILEmit);
+ }
+ else
+ {
+ _ASSERTE(numArgs == 2);
+ }
+
+ EmitLoadNativeHomeAddr(pslILEmit);
+ EmitLoadNativeArrayLength(pslILEmit);
+ pslILEmit->EmitCALL(pslILEmit->GetToken(pMD), numArgs, 0);
+ }
+ }
+}
+
+// Determine if we can simply pin the managed array, rather than doing a full marshal
+bool ILHiddenLengthArrayMarshaler::CanUsePinnedArray()
+{
+ STANDARD_VM_CONTRACT;
+
+ // If the array is only going from managed to native, and it contains only blittable data, and
+ // we know where that data is located in the array then we can take the fast path
+ if (!IsCLRToNative(m_dwMarshalFlags))
+ {
+ return false;
+ }
+
+ if (m_pargs->na.m_vt != VTHACK_BLITTABLERECORD)
+ {
+ return false;
+ }
+
+ if (IsByref(m_dwMarshalFlags))
+ {
+ return false;
+ }
+
+ if (!IsIn(m_dwMarshalFlags))
+ {
+ return false;
+ }
+
+ if (IsRetval(m_dwMarshalFlags))
+ {
+ return false;
+ }
+
+ if (m_pargs->na.m_optionalbaseoffset == 0)
+ {
+ return false;
+ }
+
+ return true;
+}
+
+void ILHiddenLengthArrayMarshaler::EmitLoadNativeArrayLength(ILCodeStream *pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ // For return values, the native length won't yet be marshaled back to its managed home
+ // so it needs to be read directly
+ if (IsRetval(m_dwMarshalFlags))
+ {
+ pslILEmit->EmitLDLOC(m_pargs->m_pMarshalInfo->GetHiddenLengthNativeHome());
+ }
+ else
+ {
+ pslILEmit->EmitLDLOC(m_pargs->m_pMarshalInfo->GetHiddenLengthManagedHome());
+ }
+
+ pslILEmit->EmitCONV_OVF_I4();
+}
+
+MethodDesc *ILHiddenLengthArrayMarshaler::GetConvertContentsToManagedMethod()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (m_pargs->na.m_vt == VTHACK_REDIRECTEDTYPE)
+ {
+ switch (m_pargs->na.m_redirectedTypeIndex)
+ {
+ case WinMDAdapter::RedirectedTypeIndex_System_DateTimeOffset:
+ return MscorlibBinder::GetMethod(METHOD__MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER__CONVERT_CONTENTS_TO_MANAGED_DATETIME);
+
+ case WinMDAdapter::RedirectedTypeIndex_System_Type:
+ return MscorlibBinder::GetMethod(METHOD__MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER__CONVERT_CONTENTS_TO_MANAGED_TYPE);
+
+ case WinMDAdapter::RedirectedTypeIndex_System_Exception:
+ return MscorlibBinder::GetMethod(METHOD__MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER__CONVERT_CONTENTS_TO_MANAGED_EXCEPTION);
+
+ case WinMDAdapter::RedirectedTypeIndex_System_Nullable:
+ {
+ MethodDesc *pMD = MscorlibBinder::GetMethod(METHOD__MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER__CONVERT_CONTENTS_TO_MANAGED_NULLABLE);
+ return GetExactMarshalerMethod(pMD);
+ }
+
+ case WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_KeyValuePair:
+ {
+ MethodDesc *pMD = MscorlibBinder::GetMethod(METHOD__MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER__CONVERT_CONTENTS_TO_MANAGED_KEYVALUEPAIR);
+ return GetExactMarshalerMethod(pMD);
+ }
+
+ default:
+ UNREACHABLE_MSG("Unrecognized redirected type.");
+ }
+ }
+ return ILMngdMarshaler::GetConvertContentsToManagedMethod();
+}
+
+MethodDesc *ILHiddenLengthArrayMarshaler::GetConvertContentsToNativeMethod()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (m_pargs->na.m_vt == VTHACK_REDIRECTEDTYPE)
+ {
+ switch (m_pargs->na.m_redirectedTypeIndex)
+ {
+ case WinMDAdapter::RedirectedTypeIndex_System_DateTimeOffset:
+ return MscorlibBinder::GetMethod(METHOD__MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER__CONVERT_CONTENTS_TO_NATIVE_DATETIME);
+
+ case WinMDAdapter::RedirectedTypeIndex_System_Type:
+ return MscorlibBinder::GetMethod(METHOD__MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER__CONVERT_CONTENTS_TO_NATIVE_TYPE);
+
+ case WinMDAdapter::RedirectedTypeIndex_System_Exception:
+ return MscorlibBinder::GetMethod(METHOD__MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER__CONVERT_CONTENTS_TO_NATIVE_EXCEPTION);
+
+ case WinMDAdapter::RedirectedTypeIndex_System_Nullable:
+ {
+ MethodDesc *pMD = MscorlibBinder::GetMethod(METHOD__MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER__CONVERT_CONTENTS_TO_NATIVE_NULLABLE);
+ return GetExactMarshalerMethod(pMD);
+ }
+
+ case WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_KeyValuePair:
+ {
+ MethodDesc *pMD = MscorlibBinder::GetMethod(METHOD__MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER__CONVERT_CONTENTS_TO_NATIVE_KEYVALUEPAIR);
+ return GetExactMarshalerMethod(pMD);
+ }
+
+ default:
+ UNREACHABLE_MSG("Unrecognized redirected type.");
+ }
+ }
+ return ILMngdMarshaler::GetConvertContentsToNativeMethod();
+}
+
+MethodDesc *ILHiddenLengthArrayMarshaler::GetClearNativeContentsMethod()
+{
+ switch (m_pargs->na.m_vt)
+ {
+ // HSTRINGs, interface pointers, and non-blittable structs need contents cleanup
+ case VTHACK_HSTRING:
+ case VTHACK_INSPECTABLE:
+ case VTHACK_NONBLITTABLERECORD:
+ break;
+
+ // blittable structs don't need contents cleanup
+ case VTHACK_BLITTABLERECORD:
+ return NULL;
+
+ case VTHACK_REDIRECTEDTYPE:
+ {
+ switch (m_pargs->na.m_redirectedTypeIndex)
+ {
+ // System.Type, Uri, Nullable, KeyValuePair, NCCEventArgs, and PCEventArgs need cleanup
+ case WinMDAdapter::RedirectedTypeIndex_System_Type:
+ return MscorlibBinder::GetMethod(METHOD__MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER__CLEAR_NATIVE_CONTENTS_TYPE);
+
+ case WinMDAdapter::RedirectedTypeIndex_System_Uri:
+ case WinMDAdapter::RedirectedTypeIndex_System_Nullable:
+ case WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_KeyValuePair:
+ case WinMDAdapter::RedirectedTypeIndex_System_Collections_Specialized_NotifyCollectionChangedEventArgs:
+ case WinMDAdapter::RedirectedTypeIndex_System_ComponentModel_PropertyChangedEventArgs:
+ break;
+
+ // other redirected types don't
+ default:
+ return NULL;
+ }
+ break;
+ }
+
+ default:
+ UNREACHABLE_MSG("Unexpected hidden-length array element VT");
+ }
+
+ return ILMngdMarshaler::GetClearNativeContentsMethod();
+}
+
+MethodDesc *ILHiddenLengthArrayMarshaler::GetExactMarshalerMethod(MethodDesc *pGenericMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ return MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pGenericMD,
+ pGenericMD->GetMethodTable(),
+ FALSE, // forceBoxedEntryPoint
+ m_pargs->na.m_pMT->GetInstantiation(), // methodInst
+ FALSE, // allowInstParam
+ TRUE); // forceRemotableMethod
+}
+
+#ifndef CROSSGEN_COMPILE
+
+FCIMPL4(void, MngdHiddenLengthArrayMarshaler::CreateMarshaler, MngdHiddenLengthArrayMarshaler* pThis, MethodTable* pMT, SIZE_T cbElementSize, UINT16 vt)
+{
+ FCALL_CONTRACT;
+
+ pThis->m_pElementMT = pMT;
+ pThis->m_cbElementSize = cbElementSize;
+ pThis->m_vt = (VARTYPE)vt;
+}
+FCIMPLEND
+
+FCIMPL3(void, MngdHiddenLengthArrayMarshaler::ConvertSpaceToNative, MngdHiddenLengthArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome)
+{
+ FCALL_CONTRACT;
+
+ BASEARRAYREF arrayRef = (BASEARRAYREF) *pManagedHome;
+ HELPER_METHOD_FRAME_BEGIN_1(arrayRef);
+
+ if (arrayRef == NULL)
+ {
+ *pNativeHome = NULL;
+ }
+ else
+ {
+ SIZE_T cbArray = pThis->GetArraySize(arrayRef->GetNumComponents());
+
+ *pNativeHome = CoTaskMemAlloc(cbArray);
+ if (*pNativeHome == NULL)
+ {
+ ThrowOutOfMemory();
+ }
+
+ // initialize the array
+ FillMemory(*pNativeHome, cbArray, 0);
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL3(void, MngdHiddenLengthArrayMarshaler::ConvertContentsToNative, MngdHiddenLengthArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome)
+{
+ FCALL_CONTRACT;
+
+ struct
+ {
+ PTRARRAYREF arrayRef;
+ STRINGREF currentStringRef;
+ OBJECTREF currentObjectRef;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+ gc.arrayRef = (PTRARRAYREF)*pManagedHome;
+
+ HELPER_METHOD_FRAME_BEGIN_PROTECT(gc);
+
+ if (gc.arrayRef != NULL)
+ {
+ // There are these choices:
+ // * the array is made up of entirely blittable data, in which case we can directly copy it,
+ // * it is an array of strings that need to be marshaled as HSTRING,
+ // * it is an array of non-blittable structures
+ // * it is an array of interface pointers (interface, runtime class, delegate, System.Object)
+ switch (pThis->m_vt)
+ {
+ case VTHACK_BLITTABLERECORD:
+ {
+ // Just do a raw memcpy into the array
+ SIZE_T cbArray = pThis->GetArraySize(gc.arrayRef->GetNumComponents());
+ memcpyNoGCRefs(*pNativeHome, gc.arrayRef->GetDataPtr(), cbArray);
+ break;
+ }
+
+ case VTHACK_HSTRING:
+ {
+ // Marshal a string array as an array of HSTRINGs
+ if (!WinRTSupported())
+ {
+ COMPlusThrow(kPlatformNotSupportedException, W("PlatformNotSupported_WinRT"));
+ }
+
+ HSTRING *pDestinationStrings = reinterpret_cast<HSTRING *>(*pNativeHome);
+
+ for (SIZE_T i = 0; i < gc.arrayRef->GetNumComponents(); ++i)
+ {
+ gc.currentStringRef = (STRINGREF)gc.arrayRef->GetAt(i);
+ if (gc.currentStringRef == NULL)
+ {
+ StackSString ssIndex;
+ ssIndex.Printf(W("%d"), i);
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_BADMARSHALARRAY_NULL_HSTRING, ssIndex.GetUnicode());
+ }
+
+ IfFailThrow(WindowsCreateString(gc.currentStringRef->GetBuffer(), gc.currentStringRef->GetStringLength(), &(pDestinationStrings[i])));
+ }
+ break;
+ }
+
+ case VTHACK_NONBLITTABLERECORD:
+ {
+ BYTE *pNativeStart = reinterpret_cast<BYTE *>(*pNativeHome);
+ SIZE_T managedOffset = ArrayBase::GetDataPtrOffset(gc.arrayRef->GetMethodTable());
+ SIZE_T nativeOffset = 0;
+ SIZE_T managedSize = gc.arrayRef->GetComponentSize();
+ SIZE_T nativeSize = pThis->m_pElementMT->GetNativeSize();
+ for (SIZE_T i = 0; i < gc.arrayRef->GetNumComponents(); ++i)
+ {
+ LayoutUpdateNative(reinterpret_cast<LPVOID *>(&gc.arrayRef), managedOffset, pThis->m_pElementMT, pNativeStart + nativeOffset, NULL);
+ managedOffset += managedSize;
+ nativeOffset += nativeSize;
+ }
+ break;
+ }
+
+ case VTHACK_INSPECTABLE:
+ {
+ // interface pointers
+ IUnknown **pDestinationIPs = reinterpret_cast<IUnknown **>(*pNativeHome);
+
+ // If this turns out to be a perf issue, we can precompute the ItfMarshalInfo
+ // and generate code that passes it to the marshaler at creation time.
+ ItfMarshalInfo itfInfo;
+ MarshalInfo::GetItfMarshalInfo(TypeHandle(pThis->m_pElementMT), TypeHandle(), FALSE, TRUE, MarshalInfo::MARSHAL_SCENARIO_WINRT, &itfInfo);
+
+ for (SIZE_T i = 0; i < gc.arrayRef->GetNumComponents(); ++i)
+ {
+ gc.currentObjectRef = gc.arrayRef->GetAt(i);
+ pDestinationIPs[i] = MarshalObjectToInterface(
+ &gc.currentObjectRef,
+ itfInfo.thNativeItf.GetMethodTable(),
+ itfInfo.thClass.GetMethodTable(),
+ itfInfo.dwFlags);
+ }
+ break;
+ }
+
+ default:
+ UNREACHABLE_MSG("Unrecognized array element VARTYPE");
+
+ }
+ }
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL4(void, MngdHiddenLengthArrayMarshaler::ConvertSpaceToManaged, MngdHiddenLengthArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome, INT32 cElements)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ if (*pNativeHome == NULL)
+ {
+ SetObjectReference(pManagedHome, NULL, GetAppDomain());
+ }
+ else
+ {
+ TypeHandle elementType(pThis->m_pElementMT);
+ TypeHandle arrayType = ClassLoader::LoadArrayTypeThrowing(elementType);
+ SetObjectReference(pManagedHome, AllocateArrayEx(arrayType, &cElements, 1), GetAppDomain());
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL3(void, MngdHiddenLengthArrayMarshaler::ConvertContentsToManaged, MngdHiddenLengthArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome)
+{
+ FCALL_CONTRACT;
+
+ struct
+ {
+ PTRARRAYREF arrayRef;
+ STRINGREF stringRef;
+ OBJECTREF objectRef;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+ gc.arrayRef = (PTRARRAYREF)*pManagedHome;
+
+ HELPER_METHOD_FRAME_BEGIN_PROTECT(gc);
+
+ if (*pNativeHome != NULL)
+ {
+ // There are these choices:
+ // * the array is made up of entirely blittable data, in which case we can directly copy it,
+ // * it is an array of strings that need to be marshaled as HSTRING,
+ // * it is an array of non-blittable structures
+ // * it is an array of interface pointers (interface, runtime class, delegate, System.Object)
+ switch (pThis->m_vt)
+ {
+ case VTHACK_BLITTABLERECORD:
+ {
+ // Just do a raw memcpy into the array
+ SIZE_T cbArray = pThis->GetArraySize(gc.arrayRef->GetNumComponents());
+ memcpyNoGCRefs(gc.arrayRef->GetDataPtr(), *pNativeHome, cbArray);
+ break;
+ }
+
+ case VTHACK_HSTRING:
+ {
+ // Strings are in HSRING format on the native side
+ if (!WinRTSupported())
+ {
+ COMPlusThrow(kPlatformNotSupportedException, W("PlatformNotSupported_WinRT"));
+ }
+
+ HSTRING *pSourceStrings = reinterpret_cast<HSTRING *>(*pNativeHome);
+
+ for (SIZE_T i = 0; i < gc.arrayRef->GetNumComponents(); ++i)
+ {
+ // NULL HSTRINGS are equivilent to empty strings
+ UINT32 cchString = 0;
+ LPCWSTR pwszString = W("");
+
+ if (pSourceStrings[i] != NULL)
+ {
+ pwszString = WindowsGetStringRawBuffer(pSourceStrings[i], &cchString);
+ }
+
+ gc.stringRef = StringObject::NewString(pwszString, cchString);
+ gc.arrayRef->SetAt(i, gc.stringRef);
+ }
+ break;
+ }
+
+ case VTHACK_NONBLITTABLERECORD:
+ {
+ // Defer to the field marshaler to handle structures
+ BYTE *pNativeStart = reinterpret_cast<BYTE *>(*pNativeHome);
+ SIZE_T managedOffset = ArrayBase::GetDataPtrOffset(gc.arrayRef->GetMethodTable());
+ SIZE_T nativeOffset = 0;
+ SIZE_T managedSize = gc.arrayRef->GetComponentSize();
+ SIZE_T nativeSize = pThis->m_pElementMT->GetNativeSize();
+ for (SIZE_T i = 0; i < gc.arrayRef->GetNumComponents(); ++i)
+ {
+ LayoutUpdateCLR(reinterpret_cast<LPVOID *>(&gc.arrayRef), managedOffset, pThis->m_pElementMT, pNativeStart + nativeOffset);
+ managedOffset += managedSize;
+ nativeOffset += nativeSize;
+ }
+ break;
+ }
+
+ case VTHACK_INSPECTABLE:
+ {
+ // interface pointers
+ IUnknown **pSourceIPs = reinterpret_cast<IUnknown **>(*pNativeHome);
+
+ // If this turns out to be a perf issue, we can precompute the ItfMarshalInfo
+ // and generate code that passes it to the marshaler at creation time.
+ ItfMarshalInfo itfInfo;
+ MarshalInfo::GetItfMarshalInfo(TypeHandle(pThis->m_pElementMT), TypeHandle(), FALSE, TRUE, MarshalInfo::MARSHAL_SCENARIO_WINRT, &itfInfo);
+
+ for (SIZE_T i = 0; i < gc.arrayRef->GetNumComponents(); ++i)
+ {
+ gc.objectRef = gc.arrayRef->GetAt(i);
+ UnmarshalObjectFromInterface(
+ &gc.objectRef,
+ &pSourceIPs[i],
+ itfInfo.thItf.GetMethodTable(),
+ itfInfo.thClass.GetMethodTable(),
+ itfInfo.dwFlags);
+ gc.arrayRef->SetAt(i, gc.objectRef);
+ }
+ break;
+ }
+
+ default:
+ UNREACHABLE_MSG("Unrecognized array element VARTYPE");
+ }
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL3(void, MngdHiddenLengthArrayMarshaler::ClearNativeContents, MngdHiddenLengthArrayMarshaler* pThis, void** pNativeHome, INT32 cElements)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ if (*pNativeHome != NULL)
+ {
+ pThis->DoClearNativeContents(pNativeHome, cElements);
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+#endif // CROSSGEN_COMPILE
+
+
+SIZE_T MngdHiddenLengthArrayMarshaler::GetArraySize(SIZE_T elements)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE_MSG(m_cbElementSize != 0, "You have to set the native size for your array element type");
+
+ SIZE_T cbArray;
+
+ if (!ClrSafeInt<SIZE_T>::multiply(elements, m_cbElementSize, cbArray))
+ {
+ COMPlusThrow(kArgumentException, IDS_EE_STRUCTARRAYTOOLARGE);
+ }
+
+ // This array size limit is carried over from the equivilent limit for other array marshaling code
+ if (cbArray > MAX_SIZE_FOR_INTEROP)
+ {
+ COMPlusThrow(kArgumentException, IDS_EE_STRUCTARRAYTOOLARGE);
+ }
+
+ return cbArray;
+}
+
+#ifndef CROSSGEN_COMPILE
+void MngdHiddenLengthArrayMarshaler::DoClearNativeContents(void** pNativeHome, INT32 cElements)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(pNativeHome != NULL);
+ }
+ CONTRACTL_END;
+
+ VARTYPE vt = m_vt;
+ if (vt == VTHACK_REDIRECTEDTYPE)
+ {
+ // the redirected types that use this helper are interface pointers on the WinRT side
+ vt = VTHACK_INSPECTABLE;
+ }
+
+ switch (vt)
+ {
+ case VTHACK_HSTRING:
+ {
+ if (WinRTSupported())
+ {
+ HSTRING *pStrings = reinterpret_cast<HSTRING *>(*pNativeHome);
+ for (INT32 i = 0; i < cElements; ++i)
+ {
+ if (pStrings[i] != NULL)
+ {
+ WindowsDeleteString(pStrings[i]);
+ }
+ }
+ }
+ break;
+ }
+
+ case VTHACK_NONBLITTABLERECORD:
+ {
+ SIZE_T cbArray = GetArraySize(cElements);
+ BYTE *pNativeCurrent = reinterpret_cast<BYTE *>(*pNativeHome);
+ BYTE *pNativeEnd = pNativeCurrent + cbArray;
+
+ while (pNativeCurrent < pNativeEnd)
+ {
+ LayoutDestroyNative(pNativeCurrent, m_pElementMT);
+ pNativeCurrent += m_pElementMT->GetNativeSize();
+ }
+ break;
+ }
+
+ case VTHACK_INSPECTABLE:
+ {
+ IInspectable **pIPs = reinterpret_cast<IInspectable **>(*pNativeHome);
+ for (INT32 i = 0; i < cElements; ++i)
+ {
+ if (pIPs[i] != NULL)
+ {
+ SafeRelease(pIPs[i]);
+ }
+ }
+ break;
+ }
+
+ default:
+ UNREACHABLE_MSG("Unexpected hidden-length array element VT");
+ }
+}
+#endif //CROSSGEN_COMPILE
+#endif // FEATURE_COMINTEROP
+
+void ILReferenceCustomMarshaler::EmitCreateMngdMarshaler(ILCodeStream* pslILEmit)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(-1 == m_dwMngdMarshalerLocalNum);
+ }
+ CONTRACTL_END;
+
+ //
+ // allocate space for marshaler
+ //
+
+ m_dwMngdMarshalerLocalNum = pslILEmit->NewLocal(ELEMENT_TYPE_I);
+
+ pslILEmit->EmitLDC(sizeof(MngdRefCustomMarshaler));
+ pslILEmit->EmitLOCALLOC();
+ pslILEmit->EmitSTLOC(m_dwMngdMarshalerLocalNum);
+
+ pslILEmit->EmitLDLOC(m_dwMngdMarshalerLocalNum); // arg to CreateMarshaler
+
+ //
+ // call CreateCustomMarshalerHelper
+ //
+
+ pslILEmit->EmitLDTOKEN(pslILEmit->GetToken(m_pargs->rcm.m_pMD));
+ pslILEmit->EmitCALL(METHOD__METHOD_HANDLE__GETVALUEINTERNAL, 1, 1);
+
+ pslILEmit->EmitLDC(m_pargs->rcm.m_paramToken);
+
+ pslILEmit->EmitLDTOKEN(pslILEmit->GetToken(TypeHandle::FromPtr(m_pargs->rcm.m_hndManagedType)));
+ pslILEmit->EmitCALL(METHOD__RT_TYPE_HANDLE__GETVALUEINTERNAL, 1, 1);
+
+ pslILEmit->EmitCALL(METHOD__STUBHELPERS__CREATE_CUSTOM_MARSHALER_HELPER, 3, 1); // arg to CreateMarshaler
+
+ //
+ // call MngdRefCustomMarshaler::CreateMarshaler
+ //
+
+ pslILEmit->EmitCALL(METHOD__MNGD_REF_CUSTOM_MARSHALER__CREATE_MARSHALER, 2, 0);
+}
+
+
+#ifndef CROSSGEN_COMPILE
+
+FCIMPL2(void, MngdRefCustomMarshaler::CreateMarshaler, MngdRefCustomMarshaler* pThis, void* pCMHelper)
+{
+ FCALL_CONTRACT;
+
+ pThis->m_pCMHelper = (CustomMarshalerHelper*)pCMHelper;
+}
+FCIMPLEND
+
+
+FCIMPL3(void, MngdRefCustomMarshaler::ConvertContentsToNative, MngdRefCustomMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pManagedHome));
+ }
+ CONTRACTL_END;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ *pNativeHome = pThis->m_pCMHelper->InvokeMarshalManagedToNativeMeth(*pManagedHome);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+
+FCIMPL3(void, MngdRefCustomMarshaler::ConvertContentsToManaged, MngdRefCustomMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pManagedHome));
+ }
+ CONTRACTL_END;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ SetObjectReference(pManagedHome, pThis->m_pCMHelper->InvokeMarshalNativeToManagedMeth(*pNativeHome), GetAppDomain());
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL3(void, MngdRefCustomMarshaler::ClearNative, MngdRefCustomMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ pThis->m_pCMHelper->InvokeCleanUpNativeMeth(*pNativeHome);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL3(void, MngdRefCustomMarshaler::ClearManaged, MngdRefCustomMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pManagedHome));
+ }
+ CONTRACTL_END;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ pThis->m_pCMHelper->InvokeCleanUpManagedMeth(*pManagedHome);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+#endif // CROSSGEN_COMPILE
+
+
+#ifdef FEATURE_COMINTEROP
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// ILUriMarshaler implementation
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+LocalDesc ILUriMarshaler::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+ return LocalDesc(ELEMENT_TYPE_I);
+}
+
+LocalDesc ILUriMarshaler::GetManagedType()
+{
+ STANDARD_VM_CONTRACT;;
+ BaseDomain* pDomain = m_pargs->m_pMarshalInfo->GetModule()->GetDomain();
+ TypeHandle hndUriType = pDomain->GetMarshalingData()->GetUriMarshalingInfo()->GetSystemUriType();
+
+ return LocalDesc(hndUriType); // System.Uri
+}
+
+bool ILUriMarshaler::NeedsClearNative()
+{
+ LIMITED_METHOD_CONTRACT;
+ return true;
+}
+
+// Note that this method expects the CLR Uri on top of the evaluation stack and leaves the WinRT Uri there.
+//static
+void ILUriMarshaler::EmitConvertCLRUriToWinRTUri(ILCodeStream* pslILEmit, BaseDomain* pDomain)
+{
+ STANDARD_VM_CONTRACT;
+
+ UriMarshalingInfo* marshalingInfo = pDomain->GetMarshalingData()->GetUriMarshalingInfo();
+
+ ILCodeLabel *pNotNullLabel = pslILEmit->NewCodeLabel();
+ ILCodeLabel *pDoneLabel = pslILEmit->NewCodeLabel();
+
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitBRTRUE(pNotNullLabel);
+
+ pslILEmit->EmitPOP();
+ pslILEmit->EmitLoadNullPtr();
+ pslILEmit->EmitBR(pDoneLabel);
+
+ pslILEmit->EmitLabel(pNotNullLabel);
+
+ // System.Uri.get_OriginalString()
+ MethodDesc* pSystemUriOriginalStringMD = marshalingInfo->GetSystemUriOriginalStringMD();
+ pslILEmit->EmitCALL(pslILEmit->GetToken(pSystemUriOriginalStringMD), 1, 1);
+
+ pslILEmit->EmitCALL(METHOD__URIMARSHALER__CREATE_NATIVE_URI_INSTANCE, 1, 1);
+
+ pslILEmit->EmitLabel(pDoneLabel);
+}
+
+void ILUriMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadManagedValue(pslILEmit);
+ EmitConvertCLRUriToWinRTUri(pslILEmit, m_pargs->m_pMarshalInfo->GetModule()->GetDomain());
+ EmitStoreNativeValue(pslILEmit);
+}
+
+// Note that this method expects the WinRT Uri on top of the evaluation stack and leaves the CLR Uri there.
+//static
+void ILUriMarshaler::EmitConvertWinRTUriToCLRUri(ILCodeStream* pslILEmit, BaseDomain* pDomain)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc* pSystemUriCtorMD = pDomain->GetMarshalingData()->GetUriMarshalingInfo()->GetSystemUriCtorMD();
+
+ ILCodeLabel *pNotNullLabel = pslILEmit->NewCodeLabel();
+ ILCodeLabel *pDoneLabel = pslILEmit->NewCodeLabel();
+
+ pslILEmit->EmitDUP();
+ pslILEmit->EmitBRTRUE(pNotNullLabel);
+
+ pslILEmit->EmitPOP();
+ pslILEmit->EmitLDNULL();
+ pslILEmit->EmitBR(pDoneLabel);
+
+ pslILEmit->EmitLabel(pNotNullLabel);
+
+ // string UriMarshaler.GetRawUriFromNative(IntPtr)
+ pslILEmit->EmitCALL(METHOD__URIMARSHALER__GET_RAWURI_FROM_NATIVE, 1, 1);
+
+ // System.Uri..ctor(string)
+ pslILEmit->EmitNEWOBJ(pslILEmit->GetToken(pSystemUriCtorMD), 1);
+
+ pslILEmit->EmitLabel(pDoneLabel);
+}
+
+void ILUriMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadNativeValue(pslILEmit);
+ EmitConvertWinRTUriToCLRUri(pslILEmit, m_pargs->m_pMarshalInfo->GetModule()->GetDomain());
+ EmitStoreManagedValue(pslILEmit);
+}
+
+void ILUriMarshaler::EmitClearNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+ EmitInterfaceClearNative(pslILEmit);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// ILNCCEventArgsMarshaler implementation
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+LocalDesc ILNCCEventArgsMarshaler::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+ return LocalDesc(ELEMENT_TYPE_I);
+}
+
+LocalDesc ILNCCEventArgsMarshaler::GetManagedType()
+{
+ STANDARD_VM_CONTRACT;;
+
+ BaseDomain *pDomain = m_pargs->m_pMarshalInfo->GetModule()->GetDomain();
+ TypeHandle hndNCCEventArgType = pDomain->GetMarshalingData()->GetEventArgsMarshalingInfo()->GetSystemNCCEventArgsType();
+
+ return LocalDesc(hndNCCEventArgType); // System.Collections.Specialized.NotifyCollectionChangedEventArgs
+}
+
+bool ILNCCEventArgsMarshaler::NeedsClearNative()
+{
+ LIMITED_METHOD_CONTRACT;
+ return true;
+}
+
+// Note that this method expects the CLR NotifyCollectionChangedEventArgs on top of the evaluation stack and
+// leaves the WinRT NotifyCollectionChangedEventArgs IP there.
+//static
+void ILNCCEventArgsMarshaler::EmitConvertCLREventArgsToWinRTEventArgs(ILCodeStream *pslILEmit, BaseDomain *pDomain)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc *pConvertMD = pDomain->GetMarshalingData()->GetEventArgsMarshalingInfo()->GetSystemNCCEventArgsToWinRTNCCEventArgsMD();
+
+ // IntPtr System.Runtime.InteropServices.WindowsRuntime.NotifyCollectionChangedEventArgsMarshaler.ConvertToNative(NotifyCollectionChangedEventArgs)
+ pslILEmit->EmitCALL(pslILEmit->GetToken(pConvertMD), 1, 1);
+}
+
+void ILNCCEventArgsMarshaler::EmitConvertContentsCLRToNative(ILCodeStream *pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadManagedValue(pslILEmit);
+ EmitConvertCLREventArgsToWinRTEventArgs(pslILEmit, m_pargs->m_pMarshalInfo->GetModule()->GetDomain());
+ EmitStoreNativeValue(pslILEmit);
+}
+
+// Note that this method expects the WinRT NotifyCollectionChangedEventArgs on top of the evaluation stack and
+// leaves the CLR NotifyCollectionChangedEventArgs there.
+//static
+void ILNCCEventArgsMarshaler::EmitConvertWinRTEventArgsToCLREventArgs(ILCodeStream* pslILEmit, BaseDomain* pDomain)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc *pConvertMD = pDomain->GetMarshalingData()->GetEventArgsMarshalingInfo()->GetWinRTNCCEventArgsToSystemNCCEventArgsMD();
+
+ // NotifyCollectionChangedEventArgs System.Runtime.InteropServices.WindowsRuntime.NotifyCollectionChangedEventArgsMarshaler.ConvertToManaged(IntPtr)
+ pslILEmit->EmitCALL(pslILEmit->GetToken(pConvertMD), 1, 1);
+}
+
+void ILNCCEventArgsMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadNativeValue(pslILEmit);
+ EmitConvertWinRTEventArgsToCLREventArgs(pslILEmit, m_pargs->m_pMarshalInfo->GetModule()->GetDomain());
+ EmitStoreManagedValue(pslILEmit);
+}
+
+void ILNCCEventArgsMarshaler::EmitClearNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+ EmitInterfaceClearNative(pslILEmit);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// ILPCEventArgsMarshaler implementation
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+LocalDesc ILPCEventArgsMarshaler::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+ return LocalDesc(ELEMENT_TYPE_I);
+}
+
+LocalDesc ILPCEventArgsMarshaler::GetManagedType()
+{
+ STANDARD_VM_CONTRACT;;
+
+ BaseDomain *pDomain = m_pargs->m_pMarshalInfo->GetModule()->GetDomain();
+ TypeHandle hndPCEventArgType = pDomain->GetMarshalingData()->GetEventArgsMarshalingInfo()->GetSystemPCEventArgsType();
+
+ return LocalDesc(hndPCEventArgType); // System.ComponentModel.PropertyChangedEventArgs
+}
+
+bool ILPCEventArgsMarshaler::NeedsClearNative()
+{
+ LIMITED_METHOD_CONTRACT;
+ return true;
+}
+
+// Note that this method expects the CLR PropertyChangedEventArgs on top of the evaluation stack and
+// leaves the WinRT PropertyChangedEventArgs IP there.
+//static
+void ILPCEventArgsMarshaler::EmitConvertCLREventArgsToWinRTEventArgs(ILCodeStream *pslILEmit, BaseDomain *pDomain)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc *pConvertMD = pDomain->GetMarshalingData()->GetEventArgsMarshalingInfo()->GetSystemPCEventArgsToWinRTPCEventArgsMD();
+
+ // IntPtr System.Runtime.InteropServices.WindowsRuntime.PropertyChangedEventArgsMarshaler.ConvertToNative(PropertyChangedEventArgs)
+ pslILEmit->EmitCALL(pslILEmit->GetToken(pConvertMD), 1, 1);
+}
+
+void ILPCEventArgsMarshaler::EmitConvertContentsCLRToNative(ILCodeStream *pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadManagedValue(pslILEmit);
+ EmitConvertCLREventArgsToWinRTEventArgs(pslILEmit, m_pargs->m_pMarshalInfo->GetModule()->GetDomain());
+ EmitStoreNativeValue(pslILEmit);
+}
+
+// Note that this method expects the WinRT PropertyChangedEventArgs on top of the evaluation stack and
+// leaves the CLR PropertyChangedEventArgs there.
+//static
+void ILPCEventArgsMarshaler::EmitConvertWinRTEventArgsToCLREventArgs(ILCodeStream* pslILEmit, BaseDomain* pDomain)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc *pConvertMD = pDomain->GetMarshalingData()->GetEventArgsMarshalingInfo()->GetWinRTPCEventArgsToSystemPCEventArgsMD();
+
+ // PropertyChangedEventArgs System.Runtime.InteropServices.WindowsRuntime.PropertyChangedEventArgsMarshaler.ConvertToManaged(IntPtr)
+ pslILEmit->EmitCALL(pslILEmit->GetToken(pConvertMD), 1, 1);
+}
+
+void ILPCEventArgsMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadNativeValue(pslILEmit);
+ EmitConvertWinRTEventArgsToCLREventArgs(pslILEmit, m_pargs->m_pMarshalInfo->GetModule()->GetDomain());
+ EmitStoreManagedValue(pslILEmit);
+}
+
+void ILPCEventArgsMarshaler::EmitClearNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+ EmitInterfaceClearNative(pslILEmit);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// ILDateTimeMarshaler implementation
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+LocalDesc ILDateTimeMarshaler::GetNativeType()
+{
+ STANDARD_VM_CONTRACT;;
+ return LocalDesc(MscorlibBinder::GetClass(CLASS__DATETIMENATIVE));
+}
+
+LocalDesc ILDateTimeMarshaler::GetManagedType()
+{
+ STANDARD_VM_CONTRACT;;
+ return LocalDesc(MscorlibBinder::GetClass(CLASS__DATE_TIME_OFFSET));
+}
+
+bool ILDateTimeMarshaler::NeedsClearNative()
+{
+ LIMITED_METHOD_CONTRACT;
+ return false;
+}
+
+void ILDateTimeMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pslILEmit));
+ }
+ CONTRACTL_END;
+
+ // DateTimeOffsetMarshaler.ConvertManagedToNative(ref managedDTO, out nativeTicks);
+ EmitLoadManagedHomeAddr(pslILEmit);
+ EmitLoadNativeHomeAddr(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__DATETIMEOFFSETMARSHALER__CONVERT_TO_NATIVE, 2, 0);
+}
+
+void ILDateTimeMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ // DateTimeOffsetMarshaler.ConvertNativeToManaged(out managedLocalDTO, ref nativeTicks);
+ EmitLoadManagedHomeAddr(pslILEmit);
+ EmitLoadNativeHomeAddr(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__DATETIMEOFFSETMARSHALER__CONVERT_TO_MANAGED, 2, 0);
+}
+
+void ILDateTimeMarshaler::EmitReInitNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadNativeHomeAddr(pslILEmit);
+ pslILEmit->EmitINITOBJ(pslILEmit->GetToken(MscorlibBinder::GetClass(CLASS__DATETIMENATIVE)));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// ILNullableMarshaler implementation
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+LocalDesc ILNullableMarshaler::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+ return LocalDesc(ELEMENT_TYPE_I);
+}
+
+LocalDesc ILNullableMarshaler::GetManagedType()
+{
+ LIMITED_METHOD_CONTRACT;;
+ return LocalDesc(m_pargs->m_pMT);
+}
+
+bool ILNullableMarshaler::NeedsClearNative()
+{
+ LIMITED_METHOD_CONTRACT;
+ return true;
+}
+
+void ILNullableMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pslILEmit));
+ }
+ CONTRACTL_END;
+
+ // pNative = NullableMarshaler<T>.ConvertToNative(ref pManaged);
+ EmitLoadManagedHomeAddr(pslILEmit);
+
+ MethodDesc *pMD = GetExactMarshalerMethod(MscorlibBinder::GetMethod(METHOD__NULLABLEMARSHALER__CONVERT_TO_NATIVE));
+ pslILEmit->EmitCALL(pslILEmit->GetToken(pMD), 1, 1);
+
+ EmitStoreNativeValue(pslILEmit);
+}
+
+void ILNullableMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ // pManaged = NullableMarshaler.ConvertToManaged(pNative);
+ EmitLoadNativeValue(pslILEmit);
+
+ MethodDesc *pMD = GetExactMarshalerMethod(MscorlibBinder::GetMethod(METHOD__NULLABLEMARSHALER__CONVERT_TO_MANAGED));
+ pslILEmit->EmitCALL(pslILEmit->GetToken(pMD), 1, 1);
+
+ EmitStoreManagedValue(pslILEmit);
+}
+
+void ILNullableMarshaler::EmitClearNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+ EmitInterfaceClearNative(pslILEmit);
+}
+
+MethodDesc *ILNullableMarshaler::GetExactMarshalerMethod(MethodDesc *pGenericMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ return MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pGenericMD,
+ pGenericMD->GetMethodTable(),
+ FALSE, // forceBoxedEntryPoint
+ m_pargs->m_pMT->GetInstantiation(), // methodInst
+ FALSE, // allowInstParam
+ TRUE); // forceRemotableMethod
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// ILSystemTypeMarshaler implementation
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+LocalDesc ILSystemTypeMarshaler::GetNativeType()
+{
+ STANDARD_VM_CONTRACT;
+
+ return LocalDesc(MscorlibBinder::GetClass(CLASS__TYPENAMENATIVE));
+}
+
+LocalDesc ILSystemTypeMarshaler::GetManagedType()
+{
+ STANDARD_VM_CONTRACT;
+
+ return LocalDesc(MscorlibBinder::GetClass(CLASS__TYPE));
+}
+
+bool ILSystemTypeMarshaler::NeedsClearNative()
+{
+ LIMITED_METHOD_CONTRACT;
+ return true;
+}
+
+void ILSystemTypeMarshaler::EmitConvertContentsCLRToNative(ILCodeStream * pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ // SystemTypeMarshaler.ConvertToNative(Type, pTypeName);
+ EmitLoadManagedValue(pslILEmit);
+ EmitLoadNativeHomeAddr(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__SYSTEMTYPEMARSHALER__CONVERT_TO_NATIVE, 2, 0);
+}
+
+void ILSystemTypeMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream * pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ // type = SystemTypeMarshaler.ConvertNativeToManaged(pTypeName, ref Type);
+ EmitLoadNativeHomeAddr(pslILEmit);
+ EmitLoadManagedHomeAddr(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__SYSTEMTYPEMARSHALER__CONVERT_TO_MANAGED, 2, 0);
+}
+
+
+void ILSystemTypeMarshaler::EmitClearNative(ILCodeStream * pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ // SystemTypeMarshaler.ClearNative(pTypeName)
+ EmitLoadNativeHomeAddr(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__SYSTEMTYPEMARSHALER__CLEAR_NATIVE, 1, 0);
+}
+
+void ILSystemTypeMarshaler::EmitReInitNative(ILCodeStream * pslILEmit)
+{
+ EmitLoadNativeHomeAddr(pslILEmit);
+ pslILEmit->EmitINITOBJ(pslILEmit->GetToken(MscorlibBinder::GetClass(CLASS__TYPENAMENATIVE)));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// ILHResultExceptionMarshaler implementation
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+LocalDesc ILHResultExceptionMarshaler::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+ return LocalDesc(ELEMENT_TYPE_I4);
+}
+
+LocalDesc ILHResultExceptionMarshaler::GetManagedType()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_pargs->m_pMT != NULL);
+ return LocalDesc(m_pargs->m_pMT);
+}
+
+bool ILHResultExceptionMarshaler::NeedsClearNative()
+{
+ LIMITED_METHOD_CONTRACT;
+ return false;
+}
+
+void ILHResultExceptionMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pslILEmit));
+ }
+ CONTRACTL_END;
+
+ // int HResultExceptionMarshaler.ConvertManagedToNative(Exception);
+ EmitLoadManagedValue(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__HRESULTEXCEPTIONMARSHALER__CONVERT_TO_NATIVE, 1, 1);
+ EmitStoreNativeValue(pslILEmit);
+}
+
+void ILHResultExceptionMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pslILEmit));
+ }
+ CONTRACTL_END;
+
+ // Exception HResultExceptionMarshaler.ConvertNativeToManaged(int hr);
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitCALL(METHOD__HRESULTEXCEPTIONMARSHALER__CONVERT_TO_MANAGED, 1, 1);
+ EmitStoreManagedValue(pslILEmit);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// ILKeyValuePairMarshaler implementation
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+LocalDesc ILKeyValuePairMarshaler::GetNativeType()
+{
+ LIMITED_METHOD_CONTRACT;
+ return LocalDesc(ELEMENT_TYPE_I);
+}
+
+LocalDesc ILKeyValuePairMarshaler::GetManagedType()
+{
+ LIMITED_METHOD_CONTRACT;;
+ return LocalDesc(m_pargs->m_pMT);
+}
+
+bool ILKeyValuePairMarshaler::NeedsClearNative()
+{
+ LIMITED_METHOD_CONTRACT;
+ return true;
+}
+
+void ILKeyValuePairMarshaler::EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Native = KeyValueMarshaler<K, V>.ConvertToNative([In] ref Managed);
+ EmitLoadManagedHomeAddr(pslILEmit);
+
+ MethodDesc *pMD = GetExactMarshalerMethod(MscorlibBinder::GetMethod(METHOD__KEYVALUEPAIRMARSHALER__CONVERT_TO_NATIVE));
+ pslILEmit->EmitCALL(pslILEmit->GetToken(pMD), 1, 1);
+
+ EmitStoreNativeValue(pslILEmit);
+}
+
+void ILKeyValuePairMarshaler::EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Managed = KeyValuePairMarshaler<K, V>.ConvertToManaged(Native);
+ EmitLoadNativeValue(pslILEmit);
+
+ MethodDesc *pMD = GetExactMarshalerMethod(MscorlibBinder::GetMethod(METHOD__KEYVALUEPAIRMARSHALER__CONVERT_TO_MANAGED));
+ pslILEmit->EmitCALL(pslILEmit->GetToken(pMD), 1, 1);
+
+ EmitStoreManagedValue(pslILEmit);
+}
+
+void ILKeyValuePairMarshaler::EmitClearNative(ILCodeStream* pslILEmit)
+{
+ STANDARD_VM_CONTRACT;
+ EmitInterfaceClearNative(pslILEmit);
+}
+
+MethodDesc *ILKeyValuePairMarshaler::GetExactMarshalerMethod(MethodDesc *pGenericMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ // KeyValuePairMarshaler methods are generic - find/create the exact method.
+ return MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pGenericMD,
+ pGenericMD->GetMethodTable(),
+ FALSE, // forceBoxedEntryPoint
+ m_pargs->m_pMT->GetInstantiation(), // methodInst
+ FALSE, // allowInstParam
+ TRUE); // forceRemotableMethod
+}
+
+#endif // FEATURE_COMINTEROP
diff --git a/src/vm/ilmarshalers.h b/src/vm/ilmarshalers.h
new file mode 100644
index 0000000000..e78a54f658
--- /dev/null
+++ b/src/vm/ilmarshalers.h
@@ -0,0 +1,3367 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: ILMarshalers.h
+//
+
+//
+
+
+#include "common.h"
+#ifdef FEATURE_COMINTEROP
+#include "winstring.h"
+#endif //FEATURE_COMINTEROP
+#include "stubgen.h"
+#include "binder.h"
+#include "marshalnative.h"
+#include "clrvarargs.h"
+#ifdef FEATURE_COMINTEROP
+#include "stdinterfaces.h"
+#endif
+
+#define LOCAL_NUM_UNUSED ((DWORD)-1)
+
+class ILStubMarshalHome
+{
+public:
+ typedef enum
+ {
+ HomeType_Unspecified = 0,
+ HomeType_ILLocal = 1,
+ HomeType_ILArgument = 2,
+ HomeType_ILByrefLocal = 3,
+ HomeType_ILByrefArgument = 4
+ } MarshalHomeType;
+
+private:
+ MarshalHomeType m_homeType;
+ DWORD m_dwHomeIndex;
+
+public:
+ void InitHome(MarshalHomeType homeType, DWORD dwHomeIndex)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_homeType = homeType;
+ m_dwHomeIndex = dwHomeIndex;
+ }
+
+ void EmitLoadHome(ILCodeStream* pslILEmit)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ switch (m_homeType)
+ {
+ case HomeType_ILLocal: pslILEmit->EmitLDLOC(m_dwHomeIndex); break;
+ case HomeType_ILArgument: pslILEmit->EmitLDARG(m_dwHomeIndex); break;
+
+ default:
+ UNREACHABLE_MSG("unexpected homeType passed to EmitLoadHome");
+ break;
+ }
+ }
+
+ void EmitLoadHomeAddr(ILCodeStream* pslILEmit)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ switch (m_homeType)
+ {
+ case HomeType_ILLocal: pslILEmit->EmitLDLOCA(m_dwHomeIndex); break;
+ case HomeType_ILArgument: pslILEmit->EmitLDARGA(m_dwHomeIndex); break;
+ case HomeType_ILByrefLocal: pslILEmit->EmitLDLOC(m_dwHomeIndex); break;
+ case HomeType_ILByrefArgument: pslILEmit->EmitLDARG(m_dwHomeIndex); break;
+
+ default:
+ UNREACHABLE_MSG("unexpected homeType passed to EmitLoadHomeAddr");
+ break;
+ }
+ }
+
+ void EmitStoreHome(ILCodeStream* pslILEmit)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ switch (m_homeType)
+ {
+ case HomeType_ILLocal: pslILEmit->EmitSTLOC(m_dwHomeIndex); break;
+ case HomeType_ILArgument: pslILEmit->EmitSTARG(m_dwHomeIndex); break;
+
+ default:
+ UNREACHABLE_MSG("unexpected homeType passed to EmitStoreHome");
+ break;
+ }
+ }
+
+ void EmitStoreHomeAddr(ILCodeStream* pslILEmit)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ switch (m_homeType)
+ {
+ case HomeType_ILByrefLocal: pslILEmit->EmitSTLOC(m_dwHomeIndex); break;
+ case HomeType_ILByrefArgument: pslILEmit->EmitSTARG(m_dwHomeIndex); break;
+
+ default:
+ UNREACHABLE_MSG("unexpected homeType passed to EmitStoreHomeAddr");
+ break;
+ }
+ }
+
+ void EmitCopyFromByrefArg(ILCodeStream* pslILEmit, LocalDesc* pManagedType, DWORD argidx)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ CONSISTENCY_CHECK(pManagedType->cbType == 1);
+ if (pManagedType->IsValueClass())
+ {
+ EmitLoadHomeAddr(pslILEmit); // dest
+ pslILEmit->EmitLDARG(argidx); // src
+ pslILEmit->EmitCPOBJ(pslILEmit->GetToken(pManagedType->InternalToken));
+ }
+ else
+ {
+ pslILEmit->EmitLDARG(argidx);
+ pslILEmit->EmitLDIND_T(pManagedType);
+ EmitStoreHome(pslILEmit);
+ }
+ }
+
+ void EmitCopyToByrefArg(ILCodeStream* pslILEmit, LocalDesc* pManagedType, DWORD argidx)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (pManagedType->IsValueClass())
+ {
+ pslILEmit->EmitLDARG(argidx); // dest
+ EmitLoadHomeAddr(pslILEmit); // src
+ pslILEmit->EmitCPOBJ(pslILEmit->GetToken(pManagedType->InternalToken));
+ }
+ else
+ {
+ pslILEmit->EmitLDARG(argidx);
+ EmitLoadHome(pslILEmit);
+ pslILEmit->EmitSTIND_T(pManagedType);
+ }
+ }
+
+ void EmitCopyToByrefArgWithNullCheck(ILCodeStream* pslILEmit, LocalDesc* pManagedType, DWORD argidx)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ILCodeLabel* pNullRefLabel = pslILEmit->NewCodeLabel();
+
+ // prevent null-ref exception by an explicit check
+ pslILEmit->EmitLDARG(argidx);
+ pslILEmit->EmitBRFALSE(pNullRefLabel);
+
+ EmitCopyToByrefArg(pslILEmit, pManagedType, argidx);
+
+ pslILEmit->EmitLabel(pNullRefLabel);
+ }
+};
+
+
+class ILMarshaler
+{
+protected:
+
+#ifdef _DEBUG
+ const static UINT s_cbStackAllocThreshold = 128;
+#else
+ const static UINT s_cbStackAllocThreshold = 2048;
+#endif // _DEBUG
+
+ OverrideProcArgs* m_pargs;
+ NDirectStubLinker* m_pslNDirect;
+ ILCodeStream* m_pcsMarshal;
+ ILCodeStream* m_pcsUnmarshal;
+ UINT m_argidx;
+
+ DWORD m_dwMarshalFlags;
+
+ ILStubMarshalHome m_nativeHome;
+ ILStubMarshalHome m_managedHome;
+
+ DWORD m_dwMngdMarshalerLocalNum;
+
+public:
+
+ ILMarshaler() :
+ m_pslNDirect(NULL)
+ {
+ }
+
+ void SetNDirectStubLinker(NDirectStubLinker* pslNDirect)
+ {
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(NULL == m_pslNDirect);
+ m_pslNDirect = pslNDirect;
+ }
+
+ void Init(ILCodeStream* pcsMarshal,
+ ILCodeStream* pcsUnmarshal,
+ UINT argidx,
+ DWORD dwMarshalFlags,
+ OverrideProcArgs* pargs)
+ {
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK_MSG(m_pslNDirect != NULL, "please call SetNDirectStubLinker() before EmitMarshalArgument or EmitMarshalReturnValue");
+ m_pcsMarshal = pcsMarshal;
+ m_pcsUnmarshal = pcsUnmarshal;
+ m_pargs = pargs;
+ m_dwMarshalFlags = dwMarshalFlags;
+ m_argidx = argidx;
+ m_dwMngdMarshalerLocalNum = -1;
+ }
+
+protected:
+ static inline bool IsCLRToNative(DWORD dwMarshalFlags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (0 != (dwMarshalFlags & MARSHAL_FLAG_CLR_TO_NATIVE));
+ }
+
+ static inline bool IsIn(DWORD dwMarshalFlags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (0 != (dwMarshalFlags & MARSHAL_FLAG_IN));
+ }
+
+ static inline bool IsOut(DWORD dwMarshalFlags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (0 != (dwMarshalFlags & MARSHAL_FLAG_OUT));
+ }
+
+ static inline bool IsByref(DWORD dwMarshalFlags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (0 != (dwMarshalFlags & MARSHAL_FLAG_BYREF));
+ }
+
+ static inline bool IsHresultSwap(DWORD dwMarshalFlags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (0 != (dwMarshalFlags & MARSHAL_FLAG_HRESULT_SWAP));
+ }
+
+ static inline bool IsRetval(DWORD dwMarshalFlags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (0 != (dwMarshalFlags & MARSHAL_FLAG_RETVAL));
+ }
+
+ static inline bool IsHiddenLengthParam(DWORD dwMarshalFlags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (0 != (dwMarshalFlags & MARSHAL_FLAG_HIDDENLENPARAM));
+ }
+
+ void EmitLoadManagedValue(ILCodeStream* pslILEmit)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_managedHome.EmitLoadHome(pslILEmit);
+ }
+
+ void EmitLoadNativeValue(ILCodeStream* pslILEmit)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_nativeHome.EmitLoadHome(pslILEmit);
+ }
+
+ void EmitLoadManagedHomeAddr(ILCodeStream* pslILEmit)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_managedHome.EmitLoadHomeAddr(pslILEmit);
+ }
+
+ void EmitLoadNativeHomeAddr(ILCodeStream* pslILEmit)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_nativeHome.EmitLoadHomeAddr(pslILEmit);
+ }
+
+ void EmitStoreManagedValue(ILCodeStream* pslILEmit)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_managedHome.EmitStoreHome(pslILEmit);
+ }
+
+ void EmitStoreManagedHomeAddr(ILCodeStream* pslILEmit)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_managedHome.EmitStoreHomeAddr(pslILEmit);
+ }
+
+ void EmitStoreNativeValue(ILCodeStream* pslILEmit)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_nativeHome.EmitStoreHome(pslILEmit);
+ }
+
+ void EmitStoreNativeHomeAddr(ILCodeStream* pslILEmit)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_nativeHome.EmitStoreHomeAddr(pslILEmit);
+ }
+
+public:
+
+ virtual bool SupportsArgumentMarshal(DWORD dwMarshalFlags, UINT* pErrorResID)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return true;
+ }
+
+ virtual bool SupportsReturnMarshal(DWORD dwMarshalFlags, UINT* pErrorResID)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return true;
+ }
+
+ // True if marshaling creates data that could need cleanup.
+ bool NeedsMarshalCleanupIndex()
+ {
+ WRAPPER_NO_CONTRACT;
+ return (NeedsClearNative() || NeedsClearCLR());
+ }
+
+ // True if unmarshaling creates data that could need exception cleanup ("rollback").
+ bool NeedsUnmarshalCleanupIndex()
+ {
+ WRAPPER_NO_CONTRACT;
+ return (NeedsClearNative() && !IsCLRToNative(m_dwMarshalFlags));
+ }
+
+ void EmitMarshalArgument(
+ ILCodeStream* pcsMarshal,
+ ILCodeStream* pcsUnmarshal,
+ UINT argidx,
+ DWORD dwMarshalFlags,
+ OverrideProcArgs* pargs)
+ {
+ STANDARD_VM_CONTRACT;
+
+ Init(pcsMarshal, pcsUnmarshal, argidx, dwMarshalFlags, pargs);
+
+ // We could create the marshaler in the marshal stream right before it's needed (i.e. within the try block),
+ // or in the setup stream (outside of the try block). For managed-to-unmanaged marshaling it does not actually
+ // make much difference except that using setup stream saves us from cleaning up already-marshaled arguments
+ // in case of an exception. For unmanaged-to-managed, we may need to perform cleanup of the incoming arguments
+ // before we were able to marshal them. Therefore this must not happen within the try block so we don't try
+ // to use marshalers that have not been initialized. Potentially leaking unmanaged resources is by-design and
+ // there's not much we can do about it (we cannot do cleanup if we cannot create the marshaler).
+ EmitCreateMngdMarshaler(m_pslNDirect->GetSetupCodeStream());
+
+ if (IsCLRToNative(dwMarshalFlags))
+ {
+ if (IsByref(dwMarshalFlags))
+ {
+ EmitMarshalArgumentCLRToNativeByref();
+ }
+ else
+ {
+ EmitMarshalArgumentCLRToNative();
+ }
+ }
+ else
+ {
+ if (IsByref(dwMarshalFlags))
+ {
+ EmitMarshalArgumentNativeToCLRByref();
+ }
+ else
+ {
+ EmitMarshalArgumentNativeToCLR();
+ }
+ }
+ }
+
+#ifdef FEATURE_COMINTEROP
+ void EmitMarshalHiddenLengthArgument(ILCodeStream *pcsMarshal,
+ ILCodeStream *pcsUnmarshal,
+ MarshalInfo *pArrayInfo,
+ UINT arrayIndex,
+ DWORD dwMarshalFlags,
+ UINT hiddenArgIndex,
+ OverrideProcArgs *pargs,
+ __out DWORD *pdwHiddenLengthManagedHomeLocal,
+ __out DWORD *pdwHiddenLengthNativeHomeLocal)
+ {
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(IsHiddenLengthParam(dwMarshalFlags));
+ }
+ CONTRACTL_END;
+
+ Init(pcsMarshal, pcsUnmarshal, hiddenArgIndex, dwMarshalFlags, pargs);
+ EmitCreateMngdMarshaler(m_pslNDirect->GetSetupCodeStream());
+
+ // Create a local to be the home of the length parameter
+ DWORD dwManagedLocalHome = m_pcsMarshal->NewLocal(GetManagedType());
+ m_managedHome.InitHome(ILStubMarshalHome::HomeType_ILLocal, dwManagedLocalHome);
+ *pdwHiddenLengthManagedHomeLocal = dwManagedLocalHome;
+
+ // managed length = 0
+ m_pcsMarshal->EmitLDC(0);
+ m_pcsMarshal->EmitCONV_T(pArrayInfo->GetHiddenLengthParamElementType());
+ m_pcsMarshal->EmitSTLOC(dwManagedLocalHome);
+
+ // And a local to be the home of the marshaled length
+ LocalDesc nativeArgType(GetNativeType());
+ DWORD dwNativeHomeLocal = m_pcsMarshal->NewLocal(nativeArgType);
+ if (IsByref(dwMarshalFlags))
+ {
+ nativeArgType.MakeByRef();
+ }
+ m_nativeHome.InitHome(ILStubMarshalHome::HomeType_ILLocal, dwNativeHomeLocal);
+ *pdwHiddenLengthNativeHomeLocal = dwNativeHomeLocal;
+
+ // Update the native signature to contain the new native parameter
+ m_pcsMarshal->SetStubTargetArgType(&nativeArgType, false);
+
+ if (IsCLRToNative(dwMarshalFlags))
+ {
+ // Load the length of the array into the local
+ if (IsIn(dwMarshalFlags))
+ {
+ ILCodeLabel *pSkipGetLengthLabel = m_pcsMarshal->NewCodeLabel();
+ m_pcsMarshal->EmitLDARG(arrayIndex);
+ m_pcsMarshal->EmitBRFALSE(pSkipGetLengthLabel);
+
+ m_pcsMarshal->EmitLDARG(arrayIndex);
+
+ if (IsByref(dwMarshalFlags))
+ {
+ // if (*array == null) goto pSkipGetLengthLabel
+ m_pcsMarshal->EmitLDIND_REF();
+ m_pcsMarshal->EmitBRFALSE(pSkipGetLengthLabel);
+
+ // array = *array
+ m_pcsMarshal->EmitLDARG(arrayIndex);
+ m_pcsMarshal->EmitLDIND_REF();
+ }
+
+ m_pcsMarshal->EmitLDLEN();
+ m_pcsMarshal->EmitCONV_T(pArrayInfo->GetHiddenLengthParamElementType());
+ m_pcsMarshal->EmitSTLOC(dwManagedLocalHome);
+ m_pcsMarshal->EmitLabel(pSkipGetLengthLabel);
+ }
+
+ if (IsByref(dwMarshalFlags))
+ {
+ EmitMarshalArgumentContentsCLRToNativeByref(true);
+ }
+ else
+ {
+ EmitMarshalArgumentContentsCLRToNative();
+ }
+ }
+ else
+ {
+ // Load the length of the array into the local
+ if (IsIn(dwMarshalFlags))
+ {
+ m_pcsMarshal->EmitLDARG(hiddenArgIndex);
+ if (IsByref(dwMarshalFlags))
+ {
+ LocalDesc nativeParamType(GetNativeType());
+ m_pcsMarshal->EmitLDIND_T(&nativeParamType);
+ }
+ m_pcsMarshal->EmitSTLOC(dwNativeHomeLocal);
+ }
+
+ if (IsByref(dwMarshalFlags))
+ {
+ EmitMarshalArgumentContentsNativeToCLRByref(true);
+ }
+ else
+ {
+ EmitMarshalArgumentContentsNativeToCLR();
+ }
+
+ // We can't copy the final length back to the parameter just yet, since we don't know what
+ // local the array lives in. Instead, we rely on the hidden length array marshaler to copy
+ // the value into the out parameter for us.
+ }
+ }
+
+#endif // FEATURE_COMINTEROP
+
+ virtual void EmitSetupArgument(ILCodeStream* pslILEmit)
+ {
+ STANDARD_VM_CONTRACT;
+
+ if (IsCLRToNative(m_dwMarshalFlags))
+ {
+ if (IsNativePassedByRef())
+ {
+ EmitLoadNativeHomeAddr(pslILEmit);
+ }
+ else
+ {
+ EmitLoadNativeValue(pslILEmit);
+ }
+ }
+ else
+ {
+ if (IsManagedPassedByRef())
+ {
+ EmitLoadManagedHomeAddr(pslILEmit);
+ }
+ else
+ {
+ EmitLoadManagedValue(pslILEmit);
+ }
+ }
+ }
+
+ virtual void EmitMarshalReturnValue(
+ ILCodeStream* pcsMarshal,
+ ILCodeStream* pcsUnmarshal,
+ ILCodeStream* pcsDispatch,
+ UINT argidx,
+ UINT16 wNativeSize,
+ DWORD dwMarshalFlags,
+ OverrideProcArgs* pargs)
+ {
+ STANDARD_VM_CONTRACT;
+
+ Init(pcsMarshal, pcsUnmarshal, argidx, dwMarshalFlags, pargs);
+
+ LocalDesc nativeType = GetNativeType();
+ LocalDesc managedType = GetManagedType();
+
+ bool byrefNativeReturn = false;
+ CorElementType typ = ELEMENT_TYPE_VOID;
+ UINT32 nativeSize = 0;
+
+ // we need to convert value type return types to primitives as
+ // JIT does not inline P/Invoke calls that return structures
+ if (nativeType.IsValueClass())
+ {
+ if (wNativeSize == VARIABLESIZE)
+ {
+ // the unmanaged type size is variable
+ nativeSize = m_pargs->m_pMT->GetNativeSize();
+ }
+ else
+ {
+ // the unmanaged type size is fixed
+ nativeSize = wNativeSize;
+ }
+
+#ifndef _TARGET_ARM_
+ switch (nativeSize)
+ {
+ case 1: typ = ELEMENT_TYPE_U1; break;
+ case 2: typ = ELEMENT_TYPE_U2; break;
+ case 4: typ = ELEMENT_TYPE_U4; break;
+ case 8: typ = ELEMENT_TYPE_U8; break;
+ default: byrefNativeReturn = true; break;
+ }
+#endif
+ }
+
+ if (IsHresultSwap(dwMarshalFlags) || (byrefNativeReturn && IsCLRToNative(dwMarshalFlags)))
+ {
+ LocalDesc extraParamType = nativeType;
+ extraParamType.MakeByRef();
+
+ m_pcsMarshal->SetStubTargetArgType(&extraParamType, false);
+
+ if (IsHresultSwap(dwMarshalFlags))
+ {
+ // HRESULT swapping: the original return value is transformed into an extra
+ // byref parameter and the target is expected to return an HRESULT
+ m_pcsMarshal->SetStubTargetReturnType(ELEMENT_TYPE_I4); // native method returns an HRESULT
+ }
+ else
+ {
+ // byref structure return: the original return value is transformed into an
+ // extra byref parameter and the target is not expected to return anything
+ //
+ // note: we do this only for forward calls because [unmanaged calling conv.
+ // uses byref return] implies [managed calling conv. uses byref return]
+ m_pcsMarshal->SetStubTargetReturnType(ELEMENT_TYPE_VOID);
+ }
+ }
+ else
+ {
+ if (typ != ELEMENT_TYPE_VOID)
+ {
+ // small structure return: the original return value is transformed into
+ // ELEMENT_TYPE_U1, ELEMENT_TYPE_U2, ELEMENT_TYPE_U4, or ELEMENT_TYPE_U8
+ m_pcsMarshal->SetStubTargetReturnType(typ);
+ }
+ else
+ {
+ m_pcsMarshal->SetStubTargetReturnType(&nativeType);
+ }
+ }
+
+ m_managedHome.InitHome(ILStubMarshalHome::HomeType_ILLocal, m_pcsMarshal->NewLocal(managedType));
+ m_nativeHome.InitHome(ILStubMarshalHome::HomeType_ILLocal, m_pcsMarshal->NewLocal(nativeType));
+
+ EmitCreateMngdMarshaler(m_pcsMarshal);
+
+ if (IsCLRToNative(dwMarshalFlags))
+ {
+ if (IsHresultSwap(dwMarshalFlags) || byrefNativeReturn)
+ {
+ EmitReInitNative(m_pcsMarshal);
+ EmitLoadNativeHomeAddr(pcsDispatch); // load up the byref native type as an extra arg
+ }
+ else
+ {
+ if (typ != ELEMENT_TYPE_VOID)
+ {
+ // small structure forward: the returned integer is memcpy'd into native home
+ // of the structure
+
+ DWORD dwTempLocalNum = m_pcsUnmarshal->NewLocal(typ);
+ m_pcsUnmarshal->EmitSTLOC(dwTempLocalNum);
+
+ // cpblk
+ m_nativeHome.EmitLoadHomeAddr(m_pcsUnmarshal);
+ m_pcsUnmarshal->EmitLDLOCA(dwTempLocalNum);
+ m_pcsUnmarshal->EmitLDC(nativeSize);
+ m_pcsUnmarshal->EmitCPBLK();
+ }
+ else
+ {
+ EmitStoreNativeValue(m_pcsUnmarshal);
+ }
+ }
+
+ if (NeedsMarshalCleanupIndex())
+ {
+ m_pslNDirect->EmitSetArgMarshalIndex(m_pcsUnmarshal, NDirectStubLinker::CLEANUP_INDEX_ARG0_MARSHAL + m_argidx);
+ }
+
+ EmitConvertSpaceAndContentsNativeToCLR(m_pcsUnmarshal);
+
+ EmitCleanupCLRToNative();
+
+ EmitLoadManagedValue(m_pcsUnmarshal);
+ }
+ else
+ {
+ EmitStoreManagedValue(m_pcsUnmarshal);
+
+ if (NeedsMarshalCleanupIndex())
+ {
+ m_pslNDirect->EmitSetArgMarshalIndex(m_pcsUnmarshal, NDirectStubLinker::CLEANUP_INDEX_ARG0_MARSHAL + m_argidx);
+ }
+
+ if (IsHresultSwap(dwMarshalFlags))
+ {
+ // we have to skip unmarshaling return value into the HRESULT-swapped argument
+ // if the argument came as NULL (otherwise we would leak unmanaged resources as
+ // we have no way of passing them back to the caller)
+ ILCodeLabel *pSkipConversionLabel = m_pcsUnmarshal->NewCodeLabel();
+
+ m_pcsUnmarshal->EmitLDARG(argidx);
+ m_pcsUnmarshal->EmitBRFALSE(pSkipConversionLabel);
+ EmitConvertSpaceAndContentsCLRToNative(m_pcsUnmarshal);
+ m_pcsUnmarshal->EmitLabel(pSkipConversionLabel);
+ }
+ else
+ {
+ EmitConvertSpaceAndContentsCLRToNative(m_pcsUnmarshal);
+ }
+
+ if (NeedsUnmarshalCleanupIndex())
+ {
+ // if an exception is thrown after this point, we will clean up the unmarshaled retval
+ m_pslNDirect->EmitSetArgMarshalIndex(m_pcsUnmarshal, NDirectStubLinker::CLEANUP_INDEX_RETVAL_UNMARSHAL);
+ }
+
+ EmitCleanupNativeToCLR();
+
+ if (IsHresultSwap(dwMarshalFlags))
+ {
+ // we tolerate NULL here mainly for backward compatibility reasons
+ m_nativeHome.EmitCopyToByrefArgWithNullCheck(m_pcsUnmarshal, &nativeType, argidx);
+ m_pcsUnmarshal->EmitLDC(S_OK);
+ }
+ else
+ {
+ if (typ != ELEMENT_TYPE_VOID)
+ {
+ // small structure return (reverse): native home of the structure is memcpy'd
+ // into the integer to be returned from the stub
+
+ DWORD dwTempLocalNum = m_pcsUnmarshal->NewLocal(typ);
+
+ // cpblk
+ m_pcsUnmarshal->EmitLDLOCA(dwTempLocalNum);
+ m_nativeHome.EmitLoadHomeAddr(m_pcsUnmarshal);
+ m_pcsUnmarshal->EmitLDC(nativeSize);
+ m_pcsUnmarshal->EmitCPBLK();
+
+ m_pcsUnmarshal->EmitLDLOC(dwTempLocalNum);
+ }
+ else
+ {
+ EmitLoadNativeValue(m_pcsUnmarshal);
+ }
+ }
+
+ // make sure we free (and zero) the return value if an exception is thrown
+ EmitExceptionCleanupNativeToCLR();
+ }
+ }
+
+
+protected:
+
+ virtual void EmitCreateMngdMarshaler(ILCodeStream* pslILEmit)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual void EmitLoadMngdMarshaler(ILCodeStream* pslILEmit)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ CONSISTENCY_CHECK((DWORD)-1 != m_dwMngdMarshalerLocalNum);
+ pslILEmit->EmitLDLOC(m_dwMngdMarshalerLocalNum);
+ }
+
+ void EmitSetupSigAndDefaultHomesCLRToNative()
+ {
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(IsCLRToNative(m_dwMarshalFlags) && !IsByref(m_dwMarshalFlags));
+ }
+ CONTRACTL_END;
+
+ LocalDesc nativeArgType = GetNativeType();
+ DWORD dwNativeHomeLocalNum = m_pcsMarshal->NewLocal(nativeArgType);
+ m_pcsMarshal->SetStubTargetArgType(&nativeArgType);
+
+ m_managedHome.InitHome(ILStubMarshalHome::HomeType_ILArgument, m_argidx);
+ m_nativeHome.InitHome(ILStubMarshalHome::HomeType_ILLocal, dwNativeHomeLocalNum);
+ }
+
+ void EmitCleanupCLRToNativeTemp()
+ {
+ STANDARD_VM_CONTRACT;
+
+ if (NeedsClearNative())
+ {
+ CONSISTENCY_CHECK(NeedsMarshalCleanupIndex());
+
+ ILCodeStream* pcsCleanup = m_pslNDirect->GetCleanupCodeStream();
+ ILCodeLabel* pSkipClearNativeLabel = pcsCleanup->NewCodeLabel();
+
+ m_pslNDirect->EmitCheckForArgCleanup(pcsCleanup,
+ NDirectStubLinker::CLEANUP_INDEX_ARG0_MARSHAL + m_argidx,
+ NDirectStubLinker::BranchIfNotMarshaled,
+ pSkipClearNativeLabel);
+
+ EmitClearNativeTemp(pcsCleanup);
+ pcsCleanup->EmitLabel(pSkipClearNativeLabel);
+ }
+ }
+
+ void EmitCleanupCLRToNative()
+ {
+ STANDARD_VM_CONTRACT;
+
+ if (NeedsClearNative())
+ {
+ CONSISTENCY_CHECK(NeedsMarshalCleanupIndex());
+
+ ILCodeStream* pcsCleanup = m_pslNDirect->GetCleanupCodeStream();
+ ILCodeLabel* pSkipClearNativeLabel = pcsCleanup->NewCodeLabel();
+
+ m_pslNDirect->EmitCheckForArgCleanup(pcsCleanup,
+ NDirectStubLinker::CLEANUP_INDEX_ARG0_MARSHAL + m_argidx,
+ NDirectStubLinker::BranchIfNotMarshaled,
+ pSkipClearNativeLabel);
+
+ EmitClearNative(pcsCleanup);
+ pcsCleanup->EmitLabel(pSkipClearNativeLabel);
+ }
+ }
+
+ virtual void EmitMarshalArgumentCLRToNative()
+ {
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(IsCLRToNative(m_dwMarshalFlags) && !IsByref(m_dwMarshalFlags));
+ }
+ CONTRACTL_END;
+
+ EmitSetupSigAndDefaultHomesCLRToNative();
+ EmitMarshalArgumentContentsCLRToNative();
+ }
+
+ void EmitMarshalArgumentContentsCLRToNative()
+ {
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(IsCLRToNative(m_dwMarshalFlags) && !IsByref(m_dwMarshalFlags));
+ }
+ CONTRACTL_END;
+
+ //
+ // marshal
+ //
+ if (IsIn(m_dwMarshalFlags))
+ {
+ EmitConvertSpaceAndContentsCLRToNativeTemp(m_pcsMarshal);
+ }
+ else
+ {
+ EmitConvertSpaceCLRToNativeTemp(m_pcsMarshal);
+ }
+
+ //
+ // unmarshal
+ //
+ if (IsOut(m_dwMarshalFlags))
+ {
+ if (IsIn(m_dwMarshalFlags))
+ {
+ EmitClearCLRContents(m_pcsUnmarshal);
+ }
+ EmitConvertContentsNativeToCLR(m_pcsUnmarshal);
+ }
+
+ EmitCleanupCLRToNativeTemp();
+ }
+
+ void EmitSetupSigAndDefaultHomesCLRToNativeByref(bool fBlittable = false)
+ {
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(IsCLRToNative(m_dwMarshalFlags) && IsByref(m_dwMarshalFlags));
+ }
+ CONTRACTL_END;
+
+ LocalDesc nativeType = GetNativeType();
+ LocalDesc managedType = GetManagedType();
+
+ LocalDesc nativeArgType = nativeType;
+ nativeArgType.MakeByRef();
+ m_pcsMarshal->SetStubTargetArgType(&nativeArgType);
+
+ if (fBlittable)
+ {
+ // we will not work with the actual data but only with a pointer to that data
+ // (the managed and native type had better be the same if it's blittable)
+ _ASSERTE(nativeType.ElementType[0] == managedType.ElementType[0]);
+
+ // native home will keep the containing object pinned
+ nativeType.MakeByRef();
+ nativeType.MakePinned();
+
+ m_managedHome.InitHome(ILStubMarshalHome::HomeType_ILByrefArgument, m_argidx);
+ m_nativeHome.InitHome(ILStubMarshalHome::HomeType_ILByrefLocal, m_pcsMarshal->NewLocal(nativeType));
+ }
+ else
+ {
+ m_managedHome.InitHome(ILStubMarshalHome::HomeType_ILLocal, m_pcsMarshal->NewLocal(managedType));
+ m_nativeHome.InitHome(ILStubMarshalHome::HomeType_ILLocal, m_pcsMarshal->NewLocal(nativeType));
+ }
+ }
+
+ virtual void EmitMarshalArgumentCLRToNativeByref()
+ {
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(IsCLRToNative(m_dwMarshalFlags) && IsByref(m_dwMarshalFlags));
+ }
+ CONTRACTL_END;
+
+ EmitSetupSigAndDefaultHomesCLRToNativeByref();
+ EmitMarshalArgumentContentsCLRToNativeByref(false);
+ }
+
+ void EmitMarshalArgumentContentsCLRToNativeByref(bool managedHomeAlreadyInitialized)
+ {
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(IsCLRToNative(m_dwMarshalFlags) && IsByref(m_dwMarshalFlags));
+ }
+ CONTRACTL_END;
+
+ LocalDesc managedType = GetManagedType();
+
+ //
+ // marshal
+ //
+ if (IsIn(m_dwMarshalFlags) && ! IsOut(m_dwMarshalFlags))
+ {
+ if (!managedHomeAlreadyInitialized)
+ {
+ m_managedHome.EmitCopyFromByrefArg(m_pcsMarshal, &managedType, m_argidx);
+ }
+
+ EmitConvertSpaceAndContentsCLRToNativeTemp(m_pcsMarshal);
+ }
+ else if (IsIn(m_dwMarshalFlags) && IsOut(m_dwMarshalFlags))
+ {
+ if (!managedHomeAlreadyInitialized)
+ {
+ m_managedHome.EmitCopyFromByrefArg(m_pcsMarshal, &managedType, m_argidx);
+ }
+
+ EmitConvertSpaceAndContentsCLRToNative(m_pcsMarshal);
+ }
+ else
+ {
+ EmitReInitNative(m_pcsMarshal);
+ }
+
+ //
+ // unmarshal
+ //
+ if (IsOut(m_dwMarshalFlags))
+ {
+ EmitClearCLR(m_pcsUnmarshal);
+
+ EmitConvertSpaceAndContentsNativeToCLR(m_pcsUnmarshal);
+
+ if (!managedHomeAlreadyInitialized)
+ {
+ m_managedHome.EmitCopyToByrefArg(m_pcsUnmarshal, &managedType, m_argidx);
+ }
+
+ EmitCleanupCLRToNative();
+ }
+ else
+ {
+ EmitCleanupCLRToNativeTemp();
+ }
+ //
+ // @TODO: ensure ReInitNative is called on [in,out] byref args when an exception occurs
+ //
+ }
+
+ void EmitSetupSigAndDefaultHomesNativeToCLR()
+ {
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(!IsCLRToNative(m_dwMarshalFlags) && !IsByref(m_dwMarshalFlags));
+ }
+ CONTRACTL_END;
+
+ LocalDesc nativeArgType = GetNativeType();
+ m_pcsMarshal->SetStubTargetArgType(&nativeArgType);
+
+ m_managedHome.InitHome(ILStubMarshalHome::HomeType_ILLocal, m_pcsMarshal->NewLocal(GetManagedType()));
+ m_nativeHome.InitHome(ILStubMarshalHome::HomeType_ILArgument, m_argidx);
+ }
+
+ void EmitCleanupNativeToCLR()
+ {
+ STANDARD_VM_CONTRACT;
+
+ if (NeedsClearCLR())
+ {
+ CONSISTENCY_CHECK(NeedsMarshalCleanupIndex());
+
+ ILCodeStream* pcsCleanup = m_pslNDirect->GetCleanupCodeStream();
+ ILCodeLabel* pSkipClearCLRLabel = pcsCleanup->NewCodeLabel();
+
+ m_pslNDirect->EmitCheckForArgCleanup(pcsCleanup,
+ NDirectStubLinker::CLEANUP_INDEX_ARG0_MARSHAL + m_argidx,
+ NDirectStubLinker::BranchIfNotMarshaled,
+ pSkipClearCLRLabel);
+
+ EmitClearCLR(pcsCleanup);
+ pcsCleanup->EmitLabel(pSkipClearCLRLabel);
+ }
+ }
+
+ // Emits cleanup code that runs only if an exception is thrown during execution of an IL stub (its try
+ // block to be precise). The goal is to roll back allocations of native resources that may have already
+ // happened to prevent leaks, and also clear output arguments to prevent passing out invalid data - most
+ // importantly dangling pointers. The canonical example is an exception thrown during unmarshaling of
+ // an argument at which point other arguments have already been unmarshaled.
+ void EmitExceptionCleanupNativeToCLR()
+ {
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(IsRetval(m_dwMarshalFlags) || IsOut(m_dwMarshalFlags));
+
+ LocalDesc nativeType = GetNativeType();
+ ILCodeStream *pcsCleanup = m_pslNDirect->GetExceptionCleanupCodeStream();
+
+ if (NeedsClearNative())
+ {
+ m_pslNDirect->SetExceptionCleanupNeeded();
+
+ ILCodeLabel *pSkipCleanupLabel = pcsCleanup->NewCodeLabel();
+
+ // if this is byref in/out and we have not marshaled this argument
+ // yet, we need to populate the native home with the incoming value
+ if (IsIn(m_dwMarshalFlags) && IsByref(m_dwMarshalFlags))
+ {
+ ILCodeLabel *pSkipCopyLabel = pcsCleanup->NewCodeLabel();
+
+ CONSISTENCY_CHECK(NeedsMarshalCleanupIndex());
+ m_pslNDirect->EmitCheckForArgCleanup(pcsCleanup,
+ NDirectStubLinker::CLEANUP_INDEX_ARG0_MARSHAL + m_argidx,
+ NDirectStubLinker::BranchIfMarshaled,
+ pSkipCopyLabel);
+
+ pcsCleanup->EmitLDARG(m_argidx);
+ pcsCleanup->EmitBRFALSE(pSkipCleanupLabel); // if the argument is NULL, skip cleanup completely
+
+ m_nativeHome.EmitCopyFromByrefArg(pcsCleanup, &nativeType, m_argidx);
+
+ pcsCleanup->EmitLabel(pSkipCopyLabel);
+ }
+
+ // if this is retval or out-only, the native home does not get initialized until we unmarshal it
+ if (IsRetval(m_dwMarshalFlags) || !IsIn(m_dwMarshalFlags))
+ {
+ CONSISTENCY_CHECK(NeedsUnmarshalCleanupIndex());
+
+ UINT uArgIdx = (IsRetval(m_dwMarshalFlags) ?
+ NDirectStubLinker::CLEANUP_INDEX_RETVAL_UNMARSHAL :
+ NDirectStubLinker::CLEANUP_INDEX_ARG0_UNMARSHAL + m_argidx);
+
+ m_pslNDirect->EmitCheckForArgCleanup(pcsCleanup,
+ uArgIdx,
+ NDirectStubLinker::BranchIfNotMarshaled,
+ pSkipCleanupLabel);
+ }
+
+ // we know that native home needs to be cleaned up at this point
+ if (IsRetval(m_dwMarshalFlags) || (IsOut(m_dwMarshalFlags) && IsByref(m_dwMarshalFlags)))
+ {
+ // we own the buffer - clear everything
+ EmitClearNative(pcsCleanup);
+ }
+ else
+ {
+ // this is a caller supplied buffer - clear only its contents
+ EmitClearNativeContents(pcsCleanup);
+ }
+
+ pcsCleanup->EmitLabel(pSkipCleanupLabel);
+ }
+
+ // if there is an output buffer, zero it out so the caller does not get pointer to already freed data
+ if (!IsHiddenLengthParam(m_dwMarshalFlags))
+ {
+ if (IsRetval(m_dwMarshalFlags) || (IsOut(m_dwMarshalFlags) && IsByref(m_dwMarshalFlags)))
+ {
+ m_pslNDirect->SetExceptionCleanupNeeded();
+
+ EmitReInitNative(pcsCleanup);
+ if (IsHresultSwap(m_dwMarshalFlags) || IsOut(m_dwMarshalFlags))
+ {
+ m_nativeHome.EmitCopyToByrefArgWithNullCheck(pcsCleanup, &nativeType, m_argidx);
+ }
+ }
+ }
+ }
+
+ virtual void EmitMarshalArgumentNativeToCLR()
+ {
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(!IsCLRToNative(m_dwMarshalFlags) && !IsByref(m_dwMarshalFlags));
+ }
+ CONTRACTL_END;
+
+ EmitSetupSigAndDefaultHomesNativeToCLR();
+ EmitMarshalArgumentContentsNativeToCLR();
+ }
+
+ void EmitMarshalArgumentContentsNativeToCLR()
+ {
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(!IsCLRToNative(m_dwMarshalFlags) && !IsByref(m_dwMarshalFlags));
+ }
+ CONTRACTL_END;
+
+ //
+ // marshal
+ //
+ if (IsIn(m_dwMarshalFlags))
+ {
+ EmitConvertSpaceAndContentsNativeToCLR(m_pcsMarshal);
+ }
+ else
+ {
+ EmitConvertSpaceNativeToCLR(m_pcsMarshal);
+ }
+
+ //
+ // unmarshal
+ //
+ if (IsOut(m_dwMarshalFlags))
+ {
+ if (IsIn(m_dwMarshalFlags))
+ {
+ EmitClearNativeContents(m_pcsUnmarshal);
+ }
+ EmitConvertContentsCLRToNative(m_pcsUnmarshal);
+
+ // make sure we free the argument if an exception is thrown
+ EmitExceptionCleanupNativeToCLR();
+ }
+ EmitCleanupNativeToCLR();
+ }
+
+ void EmitSetupSigAndDefaultHomesNativeToCLRByref(bool fBlittable = false)
+ {
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(!IsCLRToNative(m_dwMarshalFlags) && IsByref(m_dwMarshalFlags));
+ }
+ CONTRACTL_END;
+
+ LocalDesc nativeType = GetNativeType();
+ LocalDesc managedType = GetManagedType();
+ LocalDesc nativeArgType = nativeType;
+ nativeArgType.MakeByRef();
+ m_pcsMarshal->SetStubTargetArgType(&nativeArgType);
+
+ if (fBlittable)
+ {
+ // we will not work with the actual data but only with a pointer to that data
+ // (the managed and native type had better be the same if it's blittable)
+ _ASSERTE(nativeType.ElementType[0] == managedType.ElementType[0]);
+
+ managedType.MakeByRef();
+
+ m_managedHome.InitHome(ILStubMarshalHome::HomeType_ILByrefLocal, m_pcsMarshal->NewLocal(managedType));
+ m_nativeHome.InitHome(ILStubMarshalHome::HomeType_ILByrefArgument, m_argidx);
+ }
+ else
+ {
+ m_managedHome.InitHome(ILStubMarshalHome::HomeType_ILLocal, m_pcsMarshal->NewLocal(managedType));
+ m_nativeHome.InitHome(ILStubMarshalHome::HomeType_ILLocal, m_pcsMarshal->NewLocal(nativeType));
+ }
+ }
+
+ virtual void EmitMarshalArgumentNativeToCLRByref()
+ {
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(!IsCLRToNative(m_dwMarshalFlags) && IsByref(m_dwMarshalFlags));
+ }
+ CONTRACTL_END;
+
+ EmitSetupSigAndDefaultHomesNativeToCLRByref();
+ EmitMarshalArgumentContentsNativeToCLRByref(false);
+ }
+
+ void EmitMarshalArgumentContentsNativeToCLRByref(bool nativeHomeAlreadyInitialized)
+ {
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(!IsCLRToNative(m_dwMarshalFlags) && IsByref(m_dwMarshalFlags));
+ }
+ CONTRACTL_END;
+
+ LocalDesc nativeType = GetNativeType();
+
+ //
+ // marshal
+ //
+ if (IsIn(m_dwMarshalFlags))
+ {
+ if (!nativeHomeAlreadyInitialized)
+ {
+ m_nativeHome.EmitCopyFromByrefArg(m_pcsMarshal, &nativeType, m_argidx);
+ }
+
+ EmitConvertSpaceAndContentsNativeToCLR(m_pcsMarshal);
+ }
+ else
+ {
+ // dereference the argument so we throw before calling the managed target - this is the fastest way
+ // to check for NULL (we can still throw later if the pointer is invalid yet non-NULL but we cannot
+ // detect this realiably - the memory may get unmapped etc., NULL check is the best we can do here)
+ m_pcsMarshal->EmitLDARG(m_argidx);
+ m_pcsMarshal->EmitLDIND_I1();
+ m_pcsMarshal->EmitPOP();
+ }
+
+ //
+ // unmarshal
+ //
+ if (IsOut(m_dwMarshalFlags))
+ {
+ if (IsIn(m_dwMarshalFlags))
+ {
+ EmitClearNative(m_pcsUnmarshal);
+ EmitReInitNative(m_pcsUnmarshal);
+ }
+
+ EmitConvertSpaceAndContentsCLRToNative(m_pcsUnmarshal);
+
+ if (!nativeHomeAlreadyInitialized)
+ {
+ m_nativeHome.EmitCopyToByrefArg(m_pcsUnmarshal, &nativeType, m_argidx);
+ }
+
+ // make sure we free and zero the by-ref argument if an exception is thrown
+ EmitExceptionCleanupNativeToCLR();
+ }
+
+ EmitCleanupNativeToCLR();
+ }
+
+ virtual LocalDesc GetNativeType() = 0;
+ virtual LocalDesc GetManagedType() = 0;
+
+ //
+ // Native-to-CLR
+ //
+ virtual void EmitConvertSpaceNativeToCLR(ILCodeStream* pslILEmit)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual void EmitConvertSpaceAndContentsNativeToCLR(ILCodeStream* pslILEmit)
+ {
+ WRAPPER_NO_CONTRACT;
+ EmitConvertSpaceNativeToCLR(pslILEmit);
+ EmitConvertContentsNativeToCLR(pslILEmit);
+ }
+
+
+ //
+ // CLR-to-Native
+ //
+ virtual void EmitConvertSpaceCLRToNative(ILCodeStream* pslILEmit)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual void EmitConvertSpaceCLRToNativeTemp(ILCodeStream* pslILEmit)
+ {
+ LIMITED_METHOD_CONTRACT;
+ EmitConvertSpaceCLRToNative(pslILEmit);
+ }
+
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual void EmitConvertSpaceAndContentsCLRToNative(ILCodeStream* pslILEmit)
+ {
+ STANDARD_VM_CONTRACT;
+ EmitConvertSpaceCLRToNative(pslILEmit);
+ EmitConvertContentsCLRToNative(pslILEmit);
+ }
+
+ virtual void EmitConvertSpaceAndContentsCLRToNativeTemp(ILCodeStream* pslILEmit)
+ {
+ WRAPPER_NO_CONTRACT;
+ EmitConvertSpaceAndContentsCLRToNative(pslILEmit);
+ }
+
+ //
+ // Misc
+ //
+ virtual void EmitClearCLRContents(ILCodeStream* pslILEmit)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual bool NeedsClearNative()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return false;
+ }
+
+ virtual void EmitClearNative(ILCodeStream* pslILEmit)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual void EmitClearNativeTemp(ILCodeStream* pslILEmit)
+ {
+ LIMITED_METHOD_CONTRACT;
+ EmitClearNative(pslILEmit);
+ }
+
+ virtual void EmitClearNativeContents(ILCodeStream* pslILEmit)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual bool NeedsClearCLR()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return false;
+ }
+
+ virtual void EmitClearCLR(ILCodeStream* pslILEmit)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual void EmitReInitNative(ILCodeStream* pslILEmit)
+ {
+ STANDARD_VM_CONTRACT;
+
+ // Friendly Reminder:
+ // You should implement your own EmitReInitNative if your native type is a struct,
+ // as the following instructions apparently won't work on value types and will trigger
+ // an ASSERT in JIT
+ _ASSERTE(!GetNativeType().IsValueClass());
+
+ pslILEmit->EmitLDC(0);
+ pslILEmit->EmitCONV_T(static_cast<CorElementType>(GetNativeType().ElementType[0]));
+
+ EmitStoreNativeValue(pslILEmit);
+ }
+
+ virtual bool IsManagedPassedByRef()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return IsByref(m_dwMarshalFlags);
+ }
+
+ virtual bool IsNativePassedByRef()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return IsByref(m_dwMarshalFlags);
+ }
+
+ void EmitInterfaceClearNative(ILCodeStream* pslILEmit);
+
+public:
+ static MarshalerOverrideStatus ArgumentOverride(NDirectStubLinker* psl,
+ BOOL byref,
+ BOOL fin,
+ BOOL fout,
+ BOOL fManagedToNative,
+ OverrideProcArgs* pargs,
+ UINT* pResID,
+ UINT argidx,
+ UINT nativeStackOffset)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return HANDLEASNORMAL;
+ }
+
+ static MarshalerOverrideStatus ReturnOverride(NDirectStubLinker* psl,
+ BOOL fManagedToNative,
+ BOOL fHresultSwap,
+ OverrideProcArgs* pargs,
+ UINT* pResID)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return HANDLEASNORMAL;
+ }
+};
+
+
+class ILCopyMarshalerBase : public ILMarshaler
+{
+ virtual LocalDesc GetManagedType()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetNativeType();
+ }
+
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+ {
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadManagedValue(pslILEmit);
+ EmitStoreNativeValue(pslILEmit);
+ }
+
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+ {
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadNativeValue(pslILEmit);
+ EmitStoreManagedValue(pslILEmit);
+ }
+
+ //
+ // It's very unforunate that x86 used ML_COPYPINNEDGCREF for byref args.
+ // The result is that developers can get away with being lazy about their
+ // in/out semantics and often times in/out byref params are marked out-
+ // only, but because of ML_COPYPINNEDGCREF, they get in/out behavior.
+ //
+ // There are even lazier developers who use byref params to pass arrays.
+ // Pinning ensures that the native side 'sees' the entire array even when
+ // only reference to one element was passed.
+ //
+ // This method was changed to pin instead of copy in Dev10 in order
+ // to match the original ML behavior.
+ //
+ virtual void EmitMarshalArgumentCLRToNativeByref()
+ {
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(IsCLRToNative(m_dwMarshalFlags) && IsByref(m_dwMarshalFlags));
+ }
+ CONTRACTL_END;
+
+ EmitSetupSigAndDefaultHomesCLRToNativeByref(true);
+
+ //
+ // marshal
+ //
+ EmitLoadManagedHomeAddr(m_pcsMarshal);
+ EmitStoreNativeHomeAddr(m_pcsMarshal);
+
+ //
+ // no unmarshaling is necessary since we directly passed the pinned byref to native,
+ // the argument is therefore automatically in/out
+ //
+ }
+
+ //
+ // Similarly to the other direction, ML used ML_COPYPINNEDGCREF on x86 to
+ // directly pass the unmanaged pointer as a byref argument to managed code.
+ // This also makes an observable difference (allows passing NULL, changes
+ // made to the original value during the call are visible in managed).
+ //
+ // This method was changed to pass pointer instead of copy in Dev10 in order
+ // to match the original ML behavior. Note that in this direction we don't
+ // need to pin the pointer - if it is pointing to GC heap, it must have been
+ // pinned on the way to unmanaged.
+ //
+ virtual void EmitMarshalArgumentNativeToCLRByref()
+ {
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(!IsCLRToNative(m_dwMarshalFlags) && IsByref(m_dwMarshalFlags));
+ }
+ CONTRACTL_END;
+
+ EmitSetupSigAndDefaultHomesNativeToCLRByref(true);
+
+ //
+ // marshal
+ //
+ EmitLoadNativeHomeAddr(m_pcsMarshal);
+ EmitStoreManagedHomeAddr(m_pcsMarshal);
+
+ //
+ // no unmarshaling is necessary since we directly passed the pointer to managed
+ // as a byref, the argument is therefore automatically in/out
+ //
+ }
+};
+
+template <CorElementType ELEMENT_TYPE, class PROMOTED_ELEMENT>
+class ILCopyMarshalerSimple : public ILCopyMarshalerBase
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_nativeSize = sizeof(PROMOTED_ELEMENT),
+ c_CLRSize = sizeof(PROMOTED_ELEMENT),
+ };
+
+ bool IsSmallValueTypeSpecialCase()
+ {
+ //
+ // Special case for small value types that get
+ // mapped to MARSHAL_TYPE_GENERIC_8 -- use the
+ // valuetype type so the JIT is happy.
+ //
+
+ return (ELEMENT_TYPE ==
+#ifdef _WIN64
+ ELEMENT_TYPE_I8
+#else // _WIN64
+ ELEMENT_TYPE_I4
+#endif // _WIN64
+ ) && (NULL != m_pargs->m_pMT);
+ }
+
+ bool NeedToPromoteTo8Bytes()
+ {
+ WRAPPER_NO_CONTRACT;
+
+#if defined(_TARGET_AMD64_)
+ // If the argument is passed by value,
+ if (!IsByref(m_dwMarshalFlags) && !IsRetval(m_dwMarshalFlags))
+ {
+ // and it is an I4 or an U4,
+ if ( (ELEMENT_TYPE == ELEMENT_TYPE_I4) ||
+ (ELEMENT_TYPE == ELEMENT_TYPE_U4) )
+ {
+ // and we are doing a managed-to-unmanaged call,
+ if (IsCLRToNative(m_dwMarshalFlags))
+ {
+ // then we have to promote the native argument type to an I8 or an U8.
+ return true;
+ }
+ }
+ }
+#endif // _TARGET_AMD64_
+
+ return false;
+ }
+
+ CorElementType GetConversionType(CorElementType type)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // I4 <-> I8; U4 <-> U8
+ if (type == ELEMENT_TYPE_I4)
+ {
+ return ELEMENT_TYPE_I8;
+ }
+ else if (type == ELEMENT_TYPE_U4)
+ {
+ return ELEMENT_TYPE_U8;
+ }
+ else
+ {
+ return ELEMENT_TYPE_END;
+ }
+ }
+
+ void EmitTypePromotion(ILCodeStream* pslILEmit)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ CorElementType promotedType = GetConversionType(ELEMENT_TYPE);
+ if (promotedType == ELEMENT_TYPE_I8)
+ {
+ pslILEmit->EmitCONV_I8();
+ }
+ else if (promotedType == ELEMENT_TYPE_U8)
+ {
+ pslILEmit->EmitCONV_U8();
+ }
+ }
+
+ virtual LocalDesc GetNativeType()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (NeedToPromoteTo8Bytes())
+ {
+ return LocalDesc(GetConversionType(ELEMENT_TYPE));
+ }
+ else
+ {
+ return GetManagedType();
+ }
+ }
+
+ virtual LocalDesc GetManagedType()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (IsSmallValueTypeSpecialCase())
+ {
+ return LocalDesc(m_pargs->m_pMT);
+ }
+ else
+ {
+ return LocalDesc(ELEMENT_TYPE);
+ }
+ }
+
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+ {
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadManagedValue(pslILEmit);
+ if (NeedToPromoteTo8Bytes())
+ {
+ EmitTypePromotion(pslILEmit);
+ }
+ EmitStoreNativeValue(pslILEmit);
+ }
+
+ virtual void EmitReInitNative(ILCodeStream* pslILEmit)
+ {
+ STANDARD_VM_CONTRACT;
+
+ if (IsSmallValueTypeSpecialCase())
+ {
+ EmitLoadNativeHomeAddr(pslILEmit);
+ pslILEmit->EmitINITOBJ(pslILEmit->GetToken(m_pargs->m_pMT));
+ }
+ else
+ {
+ // ldc.i4.0, conv.i8/u8/r4/r8 is shorter than ldc.i8/r4/r8 0
+ pslILEmit->EmitLDC(0);
+ pslILEmit->EmitCONV_T(ELEMENT_TYPE);
+
+ EmitStoreNativeValue(pslILEmit);
+ }
+ }
+};
+
+typedef ILCopyMarshalerSimple<ELEMENT_TYPE_I1, INT_PTR> ILCopyMarshaler1;
+typedef ILCopyMarshalerSimple<ELEMENT_TYPE_U1, UINT_PTR> ILCopyMarshalerU1;
+typedef ILCopyMarshalerSimple<ELEMENT_TYPE_I2, INT_PTR> ILCopyMarshaler2;
+typedef ILCopyMarshalerSimple<ELEMENT_TYPE_U2, UINT_PTR> ILCopyMarshalerU2;
+typedef ILCopyMarshalerSimple<ELEMENT_TYPE_I4, INT_PTR> ILCopyMarshaler4;
+typedef ILCopyMarshalerSimple<ELEMENT_TYPE_U4, UINT_PTR> ILCopyMarshalerU4;
+typedef ILCopyMarshalerSimple<ELEMENT_TYPE_I8, INT64> ILCopyMarshaler8;
+typedef ILCopyMarshalerSimple<ELEMENT_TYPE_R4, float> ILFloatMarshaler;
+typedef ILCopyMarshalerSimple<ELEMENT_TYPE_R8, double> ILDoubleMarshaler;
+
+template <BinderClassID CLASS__ID, class PROMOTED_ELEMENT>
+class ILCopyMarshalerKnownStruct : public ILCopyMarshalerBase
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_nativeSize = sizeof(PROMOTED_ELEMENT),
+ c_CLRSize = sizeof(PROMOTED_ELEMENT),
+ };
+
+ virtual void EmitReInitNative(ILCodeStream* pslILEmit)
+ {
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadNativeHomeAddr(pslILEmit);
+ pslILEmit->EmitINITOBJ(pslILEmit->GetToken(MscorlibBinder::GetClass(CLASS__ID)));
+ }
+
+ virtual LocalDesc GetNativeType()
+ {
+ STANDARD_VM_CONTRACT;
+
+ return LocalDesc(MscorlibBinder::GetClass(CLASS__ID));
+ }
+};
+
+typedef ILCopyMarshalerKnownStruct<CLASS__DECIMAL, DECIMAL> ILDecimalMarshaler;
+typedef ILCopyMarshalerKnownStruct<CLASS__GUID, GUID> ILGuidMarshaler;
+
+class ILBlittableValueClassMarshaler : public ILCopyMarshalerBase
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_nativeSize = VARIABLESIZE,
+ c_CLRSize = VARIABLESIZE,
+ };
+
+ virtual void EmitReInitNative(ILCodeStream* pslILEmit)
+ {
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadNativeHomeAddr(pslILEmit);
+ pslILEmit->EmitINITOBJ(pslILEmit->GetToken(m_pargs->m_pMT));
+ }
+
+ virtual LocalDesc GetNativeType()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return LocalDesc(m_pargs->m_pMT);
+ }
+};
+
+
+class ILDelegateMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_nativeSize = sizeof(void *),
+ c_CLRSize = sizeof(OBJECTREF),
+ };
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+};
+
+class ILReflectionObjectMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_nativeSize = sizeof(void *),
+ c_CLRSize = sizeof(OBJECTREF),
+ };
+
+protected:
+ virtual LocalDesc GetManagedType();
+ virtual LocalDesc GetNativeType();
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+ virtual BinderFieldID GetStructureFieldID() {LIMITED_METHOD_CONTRACT; return (BinderFieldID)0;}
+ virtual BinderFieldID GetObjectFieldID() = 0;
+ virtual BinderClassID GetManagedTypeBinderID() = 0;
+};
+
+class ILIRuntimeMethodInfoMarshaler : public ILReflectionObjectMarshaler
+{
+protected:
+ virtual BinderFieldID GetObjectFieldID() { LIMITED_METHOD_CONTRACT; return FIELD__STUBMETHODINFO__HANDLE; }
+ virtual BinderClassID GetManagedTypeBinderID() { LIMITED_METHOD_CONTRACT; return CLASS__STUBMETHODINFO; }
+};
+
+class ILRuntimeModuleMarshaler : public ILReflectionObjectMarshaler
+{
+protected:
+ virtual BinderFieldID GetObjectFieldID() { LIMITED_METHOD_CONTRACT; return FIELD__MODULE__DATA; }
+ virtual BinderClassID GetManagedTypeBinderID() { LIMITED_METHOD_CONTRACT; return CLASS__MODULE; }
+};
+
+class ILRuntimeAssemblyMarshaler : public ILReflectionObjectMarshaler
+{
+protected:
+ virtual BinderFieldID GetObjectFieldID() { LIMITED_METHOD_CONTRACT; return FIELD__ASSEMBLY__HANDLE; }
+ virtual BinderClassID GetManagedTypeBinderID() { LIMITED_METHOD_CONTRACT; return CLASS__ASSEMBLY; }
+};
+
+class ILRuntimeTypeHandleMarshaler : public ILReflectionObjectMarshaler
+{
+protected:
+ virtual BinderFieldID GetStructureFieldID() { LIMITED_METHOD_CONTRACT; return FIELD__RT_TYPE_HANDLE__M_TYPE; }
+ virtual BinderFieldID GetObjectFieldID() { LIMITED_METHOD_CONTRACT; return FIELD__CLASS__TYPEHANDLE; }
+ virtual BinderClassID GetManagedTypeBinderID() { LIMITED_METHOD_CONTRACT; return CLASS__RT_TYPE_HANDLE; }
+};
+
+class ILRuntimeMethodHandleMarshaler : public ILReflectionObjectMarshaler
+{
+protected:
+ virtual BinderFieldID GetStructureFieldID() { LIMITED_METHOD_CONTRACT; return FIELD__METHOD_HANDLE__METHOD; }
+ virtual BinderFieldID GetObjectFieldID() { LIMITED_METHOD_CONTRACT; return FIELD__STUBMETHODINFO__HANDLE; }
+ virtual BinderClassID GetManagedTypeBinderID() { LIMITED_METHOD_CONTRACT; return CLASS__METHOD_HANDLE; }
+};
+
+class ILRuntimeFieldHandleMarshaler : public ILReflectionObjectMarshaler
+{
+protected:
+ virtual BinderFieldID GetStructureFieldID() { LIMITED_METHOD_CONTRACT; return FIELD__FIELD_HANDLE__M_FIELD; }
+ virtual BinderFieldID GetObjectFieldID() { LIMITED_METHOD_CONTRACT; return FIELD__RT_FIELD_INFO__HANDLE; }
+ virtual BinderClassID GetManagedTypeBinderID() { LIMITED_METHOD_CONTRACT; return CLASS__FIELD_HANDLE; }
+};
+
+class ILBoolMarshaler : public ILMarshaler
+{
+public:
+
+ virtual CorElementType GetNativeBoolElementType() = 0;
+ virtual int GetNativeTrueValue() = 0;
+ virtual int GetNativeFalseValue() = 0;
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+};
+
+class ILWinBoolMarshaler : public ILBoolMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_nativeSize = sizeof(BOOL),
+ c_CLRSize = sizeof(INT8),
+ };
+
+protected:
+ virtual CorElementType GetNativeBoolElementType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ELEMENT_TYPE_I4;
+ }
+
+ virtual int GetNativeTrueValue()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+ }
+
+ virtual int GetNativeFalseValue()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return 0;
+ }
+};
+
+class ILCBoolMarshaler : public ILBoolMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_nativeSize = sizeof(BYTE),
+ c_CLRSize = sizeof(INT8),
+ };
+
+protected:
+ virtual CorElementType GetNativeBoolElementType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ELEMENT_TYPE_I1;
+ }
+
+ virtual int GetNativeTrueValue()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+ }
+
+ virtual int GetNativeFalseValue()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return 0;
+ }
+};
+
+#ifdef FEATURE_COMINTEROP
+class ILVtBoolMarshaler : public ILBoolMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_nativeSize = sizeof(VARIANT_BOOL),
+ c_CLRSize = sizeof(INT8),
+ };
+
+protected:
+ virtual CorElementType GetNativeBoolElementType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ELEMENT_TYPE_I2;
+ }
+
+ virtual int GetNativeTrueValue()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return VARIANT_TRUE;
+ }
+
+ virtual int GetNativeFalseValue()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return VARIANT_FALSE;
+ }
+};
+#endif // FEATURE_COMINTEROP
+
+class ILWSTRMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_nativeSize = sizeof(void *),
+ c_CLRSize = sizeof(OBJECTREF),
+ };
+
+#ifdef _DEBUG
+ bool m_fCoMemoryAllocated;
+
+ ILWSTRMarshaler()
+ {
+ m_fCoMemoryAllocated = false;
+ }
+#endif // _DEBUG
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+
+ virtual void EmitConvertSpaceCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertSpaceAndContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertSpaceAndContentsCLRToNativeTemp(ILCodeStream* pslILEmit);
+
+ virtual void EmitConvertSpaceNativeToCLR(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+ virtual void EmitConvertSpaceAndContentsNativeToCLR(ILCodeStream* pslILEmit);
+
+ virtual bool NeedsClearNative();
+ virtual void EmitClearNative(ILCodeStream* pslILEmit);
+ virtual void EmitClearNativeTemp(ILCodeStream* pslILEmit);
+
+ static bool CanUsePinnedManagedString(DWORD dwMarshalFlags);
+ static void EmitCheckManagedStringLength(ILCodeStream* pslILEmit);
+ static void EmitCheckNativeStringLength(ILCodeStream* pslILEmit);
+};
+
+// A marshaler that makes run-time decision based on argument size whether native space will
+// be allocated using localloc or on the heap. The ctor argument is a heap free function.
+class ILOptimizedAllocMarshaler : public ILMarshaler
+{
+public:
+ ILOptimizedAllocMarshaler(BinderMethodID clearNat) :
+ m_idClearNative(clearNat),
+ m_dwLocalBuffer((DWORD)-1)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual LocalDesc GetNativeType();
+ virtual bool NeedsClearNative();
+ virtual void EmitClearNative(ILCodeStream* pslILEmit);
+
+protected:
+ const BinderMethodID m_idClearNative;
+ DWORD m_dwLocalBuffer; // localloc'ed temp buffer variable or -1 if not used
+};
+
+class ILWSTRBufferMarshaler : public ILOptimizedAllocMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = FALSE,
+ c_nativeSize = sizeof(void *),
+ c_CLRSize = sizeof(OBJECTREF),
+ };
+
+ enum
+ {
+ // If required buffer length > MAX_LOCAL_BUFFER_LENGTH, don't optimize by allocating memory on stack
+ MAX_LOCAL_BUFFER_LENGTH = (MAX_PATH + 1) * 2
+ };
+
+ ILWSTRBufferMarshaler() :
+ ILOptimizedAllocMarshaler(METHOD__WIN32NATIVE__COTASKMEMFREE)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual LocalDesc GetManagedType();
+ virtual void EmitConvertSpaceCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertSpaceNativeToCLR(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+};
+
+class ILCSTRBufferMarshaler : public ILOptimizedAllocMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = FALSE,
+ c_nativeSize = sizeof(void *),
+ c_CLRSize = sizeof(OBJECTREF),
+ };
+
+ enum
+ {
+ // If required buffer length > MAX_LOCAL_BUFFER_LENGTH, don't optimize by allocating memory on stack
+ MAX_LOCAL_BUFFER_LENGTH = MAX_PATH + 1
+ };
+
+ ILCSTRBufferMarshaler() :
+ ILOptimizedAllocMarshaler(METHOD__WIN32NATIVE__COTASKMEMFREE)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual LocalDesc GetManagedType();
+ virtual void EmitConvertSpaceCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertSpaceNativeToCLR(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+};
+
+
+class ILHandleRefMarshaler : public ILMarshaler
+{
+ // Managed layout for SRI.HandleRef class
+ struct HANDLEREF
+ {
+ OBJECTREF m_wrapper;
+ LPVOID m_handle;
+ };
+
+public:
+ enum
+ {
+ c_fInOnly = FALSE,
+ c_nativeSize = sizeof(LPVOID),
+ c_CLRSize = sizeof(HANDLEREF),
+ };
+
+ LocalDesc GetManagedType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return LocalDesc();
+ }
+
+ LocalDesc GetNativeType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return LocalDesc();
+ }
+
+ static MarshalerOverrideStatus ArgumentOverride(NDirectStubLinker* psl,
+ BOOL byref,
+ BOOL fin,
+ BOOL fout,
+ BOOL fManagedToNative,
+ OverrideProcArgs* pargs,
+ UINT* pResID,
+ UINT argidx,
+ UINT nativeStackOffset);
+
+ static MarshalerOverrideStatus ReturnOverride(NDirectStubLinker* psl,
+ BOOL fManagedToNative,
+ BOOL fHresultSwap,
+ OverrideProcArgs* pargs,
+ UINT* pResID);
+};
+
+class ILSafeHandleMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = FALSE,
+ c_nativeSize = sizeof(LPVOID),
+ c_CLRSize = sizeof(SAFEHANDLE),
+ };
+
+ virtual LocalDesc GetManagedType();
+ virtual LocalDesc GetNativeType();
+
+ virtual bool NeedsClearNative();
+ virtual void EmitClearNative(ILCodeStream* pslILEmit);
+
+ virtual void EmitMarshalArgumentCLRToNative();
+
+ static MarshalerOverrideStatus ArgumentOverride(NDirectStubLinker* psl,
+ BOOL byref,
+ BOOL fin,
+ BOOL fout,
+ BOOL fManagedToNative,
+ OverrideProcArgs* pargs,
+ UINT* pResID,
+ UINT argidx,
+ UINT nativeStackOffset);
+
+ static MarshalerOverrideStatus ReturnOverride(NDirectStubLinker *psl,
+ BOOL fManagedToNative,
+ BOOL fHresultSwap,
+ OverrideProcArgs *pargs,
+ UINT *pResID);
+};
+
+
+class ILCriticalHandleMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = FALSE,
+ c_nativeSize = sizeof(LPVOID),
+ c_CLRSize = sizeof(CRITICALHANDLE),
+ };
+
+public:
+
+ LocalDesc GetManagedType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return LocalDesc();
+ }
+
+ LocalDesc GetNativeType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return LocalDesc();
+ }
+
+ static MarshalerOverrideStatus ArgumentOverride(NDirectStubLinker* psl,
+ BOOL byref,
+ BOOL fin,
+ BOOL fout,
+ BOOL fManagedToNative,
+ OverrideProcArgs* pargs,
+ UINT* pResID,
+ UINT argidx,
+ UINT nativeStackOffset);
+
+ static MarshalerOverrideStatus ReturnOverride(NDirectStubLinker *psl,
+ BOOL fManagedToNative,
+ BOOL fHresultSwap,
+ OverrideProcArgs *pargs,
+ UINT *pResID);
+};
+
+
+class ILValueClassMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_nativeSize = VARIABLESIZE,
+ c_CLRSize = VARIABLESIZE,
+ };
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+ virtual void EmitReInitNative(ILCodeStream* pslILEmit);
+ virtual bool NeedsClearNative();
+ virtual void EmitClearNative(ILCodeStream * pslILEmit);
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+};
+
+#ifdef FEATURE_COMINTEROP
+class ILObjectMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_CLRSize = sizeof(OBJECTREF),
+ c_nativeSize = sizeof(VARIANT),
+ };
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+ virtual bool NeedsClearNative();
+ virtual void EmitClearNative(ILCodeStream* pslILEmit);
+ virtual void EmitReInitNative(ILCodeStream* pslILEmit);
+};
+#endif // FEATURE_COMINTEROP
+
+class ILDateMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_nativeSize = sizeof(DATE),
+ c_CLRSize = sizeof(INT64),
+ };
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+ virtual void EmitReInitNative(ILCodeStream* pslILEmit);
+};
+
+
+class ILCurrencyMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_nativeSize = sizeof(CURRENCY),
+ c_CLRSize = sizeof(DECIMAL),
+ };
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+ virtual void EmitReInitNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+};
+
+
+#ifdef FEATURE_COMINTEROP
+class ILInterfaceMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_nativeSize = sizeof(void *),
+ c_CLRSize = sizeof(OBJECTREF),
+ };
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+ virtual bool NeedsClearNative();
+ virtual void EmitClearNative(ILCodeStream* pslILEmit);
+};
+#endif // FEATURE_COMINTEROP
+
+
+class ILAnsiCharMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_nativeSize = sizeof(UINT8),
+ c_CLRSize = sizeof(UINT16),
+ };
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+};
+
+
+template <BinderClassID CLASS__ID, class ELEMENT>
+class ILValueClassPtrMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_nativeSize = sizeof(ELEMENT *),
+ c_CLRSize = sizeof(ELEMENT),
+ };
+
+protected:
+ virtual LocalDesc GetNativeType()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ //
+ // pointer to value class
+ //
+ return LocalDesc(ELEMENT_TYPE_I);
+ }
+
+ virtual LocalDesc GetManagedType()
+ {
+ STANDARD_VM_CONTRACT;
+
+ //
+ // value class
+ //
+ return LocalDesc(MscorlibBinder::GetClass(CLASS__ID));
+ }
+
+ virtual bool NeedsClearNative()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (IsByref(m_dwMarshalFlags) && IsOut(m_dwMarshalFlags));
+ }
+
+ virtual void EmitClearNative(ILCodeStream* pslILEmit)
+ {
+ STANDARD_VM_CONTRACT;
+
+ EmitLoadNativeValue(pslILEmit);
+ // static void CoTaskMemFree(IntPtr ptr)
+ pslILEmit->EmitCALL(METHOD__WIN32NATIVE__COTASKMEMFREE, 1, 0);
+ }
+
+ virtual void EmitConvertSpaceCLRToNative(ILCodeStream* pslILEmit)
+ {
+ STANDARD_VM_CONTRACT;
+
+ if (NeedsClearNative())
+ {
+ pslILEmit->EmitLDC(sizeof(ELEMENT));
+ pslILEmit->EmitCONV_U();
+ // static IntPtr CoTaskMemAlloc(UIntPtr cb)
+ pslILEmit->EmitCALL(METHOD__WIN32NATIVE__COTASKMEMALLOC, 1, 1);
+ EmitStoreNativeValue(pslILEmit);
+ }
+ }
+
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+ {
+ STANDARD_VM_CONTRACT;
+
+ if (NeedsClearNative())
+ {
+ EmitLoadNativeValue(pslILEmit); // dest
+ EmitLoadManagedHomeAddr(pslILEmit); // src
+ pslILEmit->EmitCPOBJ(pslILEmit->GetToken(MscorlibBinder::GetClass(CLASS__ID)));
+ }
+ else
+ {
+ EmitLoadManagedHomeAddr(pslILEmit);
+ EmitStoreNativeValue(pslILEmit);
+ }
+ }
+
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+ {
+ STANDARD_VM_CONTRACT;
+
+ int tokType = pslILEmit->GetToken(MscorlibBinder::GetClass(CLASS__ID));
+ ILCodeLabel *pNullLabel = pslILEmit->NewCodeLabel();
+ ILCodeLabel *pJoinLabel = pslILEmit->NewCodeLabel();
+
+ EmitLoadNativeValue(pslILEmit);
+ pslILEmit->EmitBRFALSE(pNullLabel);
+
+ // the incoming pointer is non-null -> dereference it and copy the struct
+ EmitLoadManagedHomeAddr(pslILEmit); // dest
+ EmitLoadNativeValue(pslILEmit); // src
+ pslILEmit->EmitCPOBJ(tokType);
+
+ pslILEmit->EmitBR(pJoinLabel);
+
+ // the incoming pointer is null -> just initobj (i.e. zero) the struct
+ pslILEmit->EmitLabel(pNullLabel);
+
+ EmitLoadManagedHomeAddr(pslILEmit);
+ pslILEmit->EmitINITOBJ(tokType);
+
+ pslILEmit->EmitLabel(pJoinLabel);
+ }
+};
+
+typedef ILValueClassPtrMarshaler<CLASS__GUID, GUID> ILGuidPtrMarshaler;
+typedef ILValueClassPtrMarshaler<CLASS__DECIMAL, DECIMAL> ILDecimalPtrMarshaler;
+
+#ifdef FEATURE_COMINTEROP
+class ILOleColorMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_nativeSize = sizeof(OLE_COLOR),
+ c_CLRSize = sizeof(SYSTEMCOLOR),
+ };
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+};
+
+class ILVBByValStrWMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = FALSE,
+ c_nativeSize = sizeof(BSTR),
+ c_CLRSize = sizeof(OBJECTREF*),
+ };
+
+ enum
+ {
+ // If required buffer length > MAX_LOCAL_BUFFER_LENGTH, don't optimize by allocating memory on stack
+ MAX_LOCAL_BUFFER_LENGTH = (MAX_PATH + 1) * 2 + sizeof(DWORD)
+ };
+
+
+ ILVBByValStrWMarshaler() :
+ m_dwCCHLocal(-1)
+ ,m_dwLocalBuffer(-1)
+ {
+ }
+
+ virtual bool SupportsArgumentMarshal(DWORD dwMarshalFlags, UINT* pErrorResID);
+ virtual bool SupportsReturnMarshal(DWORD dwMarshalFlags, UINT* pErrorResID);
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+ virtual bool NeedsClearNative();
+ virtual void EmitClearNative(ILCodeStream* pslILEmit);
+ virtual bool IsNativePassedByRef();
+
+ DWORD m_dwCCHLocal;
+ DWORD m_dwLocalBuffer;
+};
+
+class ILVBByValStrMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = FALSE,
+ c_nativeSize = sizeof(LPSTR),
+ c_CLRSize = sizeof(OBJECTREF *),
+ };
+
+ ILVBByValStrMarshaler() :
+ m_dwCCHLocal(-1)
+ {
+ }
+
+ virtual bool SupportsArgumentMarshal(DWORD dwMarshalFlags, UINT* pErrorResID);
+ virtual bool SupportsReturnMarshal(DWORD dwMarshalFlags, UINT* pErrorResID);
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+ virtual bool NeedsClearNative();
+ virtual void EmitClearNative(ILCodeStream* pslILEmit);
+ virtual bool IsNativePassedByRef();
+
+ DWORD m_dwCCHLocal;
+};
+
+class ILHSTRINGMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = FALSE,
+ c_nativeSize = sizeof(HSTRING),
+ c_CLRSize = sizeof(OBJECTREF),
+ };
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ void EmitConvertCLRToHSTRINGReference(ILCodeStream* pslILEmit);
+ void EmitConvertCLRToHSTRING(ILCodeStream* pslILEmit);
+
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+
+ virtual bool NeedsClearNative();
+ virtual void EmitClearNative(ILCodeStream* pslILEmit);
+};
+#endif // FEATURE_COMINTEROP
+
+class ILCSTRMarshaler : public ILOptimizedAllocMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_nativeSize = sizeof(void *),
+ c_CLRSize = sizeof(OBJECTREF),
+ };
+
+ enum
+ {
+ // If required buffer length > MAX_LOCAL_BUFFER_LENGTH, don't optimize by allocating memory on stack
+ MAX_LOCAL_BUFFER_LENGTH = MAX_PATH + 1
+ };
+
+ ILCSTRMarshaler() :
+ ILOptimizedAllocMarshaler(METHOD__CSTRMARSHALER__CLEAR_NATIVE)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+protected:
+ virtual LocalDesc GetManagedType();
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+};
+
+#ifdef FEATURE_COMINTEROP
+class ILBSTRMarshaler : public ILOptimizedAllocMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_nativeSize = sizeof(void *),
+ c_CLRSize = sizeof(OBJECTREF),
+ };
+
+ enum
+ {
+ // If required buffer length > MAX_LOCAL_BUFFER_LENGTH, don't optimize by allocating memory on stack
+ MAX_LOCAL_BUFFER_LENGTH = (MAX_PATH + 1) * 2 + 4
+ };
+
+ ILBSTRMarshaler() :
+ ILOptimizedAllocMarshaler(METHOD__BSTRMARSHALER__CLEAR_NATIVE)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+protected:
+ virtual LocalDesc GetManagedType();
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+};
+
+
+class ILAnsiBSTRMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_nativeSize = sizeof(void *),
+ c_CLRSize = sizeof(OBJECTREF),
+ };
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+ virtual bool NeedsClearNative();
+ virtual void EmitClearNative(ILCodeStream* pslILEmit);
+};
+#endif // FEATURE_COMINTEROP
+
+class ILLayoutClassPtrMarshalerBase : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_nativeSize = sizeof(void *),
+ c_CLRSize = sizeof(OBJECTREF),
+ };
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+ virtual void EmitConvertSpaceCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertSpaceCLRToNativeTemp(ILCodeStream* pslILEmit);
+ virtual void EmitConvertSpaceAndContentsCLRToNativeTemp(ILCodeStream* pslILEmit);
+ virtual void EmitConvertSpaceNativeToCLR(ILCodeStream* pslILEmit);
+ virtual bool NeedsClearNative();
+ virtual void EmitClearNative(ILCodeStream* pslILEmit);
+ virtual void EmitClearNativeTemp(ILCodeStream* pslILEmit);
+};
+
+class ILLayoutClassPtrMarshaler : public ILLayoutClassPtrMarshalerBase
+{
+public:
+ enum
+ {
+ c_fInOnly = FALSE,
+ };
+
+protected:
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+ virtual void EmitClearNativeContents(ILCodeStream * pslILEmit);
+};
+
+class ILBlittablePtrMarshaler : public ILLayoutClassPtrMarshalerBase
+{
+public:
+ enum
+ {
+ c_fInOnly = FALSE,
+ };
+
+protected:
+ virtual void EmitMarshalArgumentCLRToNative();
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+};
+
+
+#ifndef FEATURE_CORECLR
+class ILBlittableValueClassWithCopyCtorMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_nativeSize = VARIABLESIZE,
+ c_CLRSize = sizeof(OBJECTREF),
+ };
+
+ LocalDesc GetManagedType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return LocalDesc();
+ }
+
+ LocalDesc GetNativeType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return LocalDesc();
+ }
+
+ static MarshalerOverrideStatus ArgumentOverride(NDirectStubLinker* psl,
+ BOOL byref,
+ BOOL fin,
+ BOOL fout,
+ BOOL fManagedToNative,
+ OverrideProcArgs* pargs,
+ UINT* pResID,
+ UINT argidx,
+ UINT nativeStackOffset);
+
+
+};
+#endif // !FEATURE_CORECLR
+
+
+class ILArgIteratorMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_nativeSize = sizeof(va_list),
+ c_CLRSize = sizeof(VARARGS),
+ };
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+ virtual bool SupportsArgumentMarshal(DWORD dwMarshalFlags, UINT* pErrorResID);
+ virtual void EmitMarshalArgumentCLRToNative();
+ virtual void EmitMarshalArgumentNativeToCLR();
+};
+
+class ILArrayWithOffsetMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = FALSE,
+ c_nativeSize = sizeof(LPVOID),
+ c_CLRSize = sizeof(ArrayWithOffsetData),
+ };
+
+ ILArrayWithOffsetMarshaler() :
+ m_dwCountLocalNum(-1),
+ m_dwOffsetLocalNum(-1),
+ m_dwPinnedLocalNum(-1)
+ {
+ }
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+ virtual bool SupportsArgumentMarshal(DWORD dwMarshalFlags, UINT* pErrorResID);
+
+ virtual void EmitConvertSpaceAndContentsCLRToNativeTemp(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+ virtual void EmitClearNativeTemp(ILCodeStream* pslILEmit);
+
+
+ DWORD m_dwCountLocalNum;
+ DWORD m_dwOffsetLocalNum;
+ DWORD m_dwPinnedLocalNum;
+};
+
+class ILAsAnyMarshalerBase : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_nativeSize = sizeof(void *),
+ c_CLRSize = sizeof(OBJECTREF),
+ };
+
+ ILAsAnyMarshalerBase() :
+ m_dwMarshalerLocalNum(-1)
+ {
+ }
+
+protected:
+ static const BYTE ML_IN = 0x10;
+ static const BYTE ML_OUT = 0x20;
+
+ virtual bool IsAnsi() = 0;
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+ virtual bool SupportsArgumentMarshal(DWORD dwMarshalFlags, UINT* pErrorResID);
+ virtual bool SupportsReturnMarshal(DWORD dwMarshalFlags, UINT* pErrorResID);
+ virtual void EmitMarshalArgumentCLRToNative();
+ virtual bool NeedsClearNative();
+ virtual void EmitClearNativeTemp(ILCodeStream* pslILEmit);
+
+ DWORD m_dwMarshalerLocalNum;
+};
+
+class ILAsAnyWMarshaler : public ILAsAnyMarshalerBase
+{
+public:
+ enum
+ {
+ c_fInOnly = FALSE,
+ };
+
+protected:
+ virtual bool IsAnsi()
+ {
+ return false;
+ }
+};
+
+class ILAsAnyAMarshaler : public ILAsAnyMarshalerBase
+{
+public:
+ enum
+ {
+ c_fInOnly = FALSE,
+ };
+
+protected:
+ virtual bool IsAnsi()
+ {
+ return true;
+ }
+};
+
+
+class ILMngdMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_nativeSize = sizeof(void *),
+ c_CLRSize = sizeof(OBJECTREF),
+ };
+
+ ILMngdMarshaler(BinderMethodID space2Man,
+ BinderMethodID contents2Man,
+ BinderMethodID space2Nat,
+ BinderMethodID contents2Nat,
+ BinderMethodID clearNat,
+ BinderMethodID clearNatContents,
+ BinderMethodID clearMan) :
+ m_idConvertSpaceToManaged(space2Man),
+ m_idConvertContentsToManaged(contents2Man),
+ m_idConvertSpaceToNative(space2Nat),
+ m_idConvertContentsToNative(contents2Nat),
+ m_idClearNative(clearNat),
+ m_idClearNativeContents(clearNatContents),
+ m_idClearManaged(clearMan)
+ {
+ }
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+
+ virtual void EmitCreateMngdMarshaler(ILCodeStream* pslILEmit) = 0;
+
+ virtual void EmitCallMngdMarshalerMethod(ILCodeStream* pslILEmit, MethodDesc *pMD);
+
+ virtual void EmitConvertSpaceNativeToCLR(ILCodeStream* pslILEmit)
+ {
+ WRAPPER_NO_CONTRACT;
+ EmitCallMngdMarshalerMethod(pslILEmit, GetConvertSpaceToManagedMethod());
+ }
+
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit)
+ {
+ WRAPPER_NO_CONTRACT;
+ EmitCallMngdMarshalerMethod(pslILEmit, GetConvertContentsToManagedMethod());
+ }
+
+ virtual void EmitConvertSpaceCLRToNative(ILCodeStream* pslILEmit)
+ {
+ WRAPPER_NO_CONTRACT;
+ EmitCallMngdMarshalerMethod(pslILEmit, GetConvertSpaceToNativeMethod());
+ }
+
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit)
+ {
+ WRAPPER_NO_CONTRACT;
+ EmitCallMngdMarshalerMethod(pslILEmit, GetConvertContentsToNativeMethod());
+ }
+
+ virtual bool NeedsClearNative()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (NULL != GetClearNativeMethod())
+ {
+ return true;
+ }
+
+ return false;
+ }
+
+ virtual void EmitClearNative(ILCodeStream* pslILEmit)
+ {
+ WRAPPER_NO_CONTRACT;
+ EmitCallMngdMarshalerMethod(pslILEmit, GetClearNativeMethod());
+ }
+
+ virtual void EmitClearNativeContents(ILCodeStream* pslILEmit)
+ {
+ WRAPPER_NO_CONTRACT;
+ EmitCallMngdMarshalerMethod(pslILEmit, GetClearNativeContentsMethod());
+ }
+
+
+ virtual bool NeedsClearCLR()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (NULL != GetClearManagedMethod())
+ {
+ return true;
+ }
+
+ return false;
+ }
+
+ virtual void EmitClearCLR(ILCodeStream* pslILEmit)
+ {
+ WRAPPER_NO_CONTRACT;
+ EmitCallMngdMarshalerMethod(pslILEmit, GetClearManagedMethod());
+ }
+
+ virtual MethodDesc *GetConvertSpaceToManagedMethod() { WRAPPER_NO_CONTRACT; return (m_idConvertSpaceToManaged == METHOD__NIL ? NULL : MscorlibBinder::GetMethod(m_idConvertSpaceToManaged)); }
+ virtual MethodDesc *GetConvertContentsToManagedMethod() { WRAPPER_NO_CONTRACT; return (m_idConvertContentsToManaged == METHOD__NIL ? NULL : MscorlibBinder::GetMethod(m_idConvertContentsToManaged)); }
+ virtual MethodDesc *GetConvertSpaceToNativeMethod() { WRAPPER_NO_CONTRACT; return (m_idConvertSpaceToNative == METHOD__NIL ? NULL : MscorlibBinder::GetMethod(m_idConvertSpaceToNative)); }
+ virtual MethodDesc *GetConvertContentsToNativeMethod() { WRAPPER_NO_CONTRACT; return (m_idConvertContentsToNative == METHOD__NIL ? NULL : MscorlibBinder::GetMethod(m_idConvertContentsToNative)); }
+ virtual MethodDesc *GetClearNativeMethod() { WRAPPER_NO_CONTRACT; return (m_idClearNative == METHOD__NIL ? NULL : MscorlibBinder::GetMethod(m_idClearNative)); }
+ virtual MethodDesc *GetClearNativeContentsMethod() { WRAPPER_NO_CONTRACT; return (m_idClearNativeContents == METHOD__NIL ? NULL : MscorlibBinder::GetMethod(m_idClearNativeContents)); }
+ virtual MethodDesc *GetClearManagedMethod() { WRAPPER_NO_CONTRACT; return (m_idClearManaged == METHOD__NIL ? NULL : MscorlibBinder::GetMethod(m_idClearManaged)); }
+
+ const BinderMethodID m_idConvertSpaceToManaged;
+ const BinderMethodID m_idConvertContentsToManaged;
+ const BinderMethodID m_idConvertSpaceToNative;
+ const BinderMethodID m_idConvertContentsToNative;
+ const BinderMethodID m_idClearNative;
+ const BinderMethodID m_idClearNativeContents;
+ const BinderMethodID m_idClearManaged;
+};
+
+class ILNativeArrayMarshaler : public ILMngdMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = FALSE,
+ };
+
+ ILNativeArrayMarshaler() :
+ ILMngdMarshaler(
+ METHOD__MNGD_NATIVE_ARRAY_MARSHALER__CONVERT_SPACE_TO_MANAGED,
+ METHOD__MNGD_NATIVE_ARRAY_MARSHALER__CONVERT_CONTENTS_TO_MANAGED,
+ METHOD__MNGD_NATIVE_ARRAY_MARSHALER__CONVERT_SPACE_TO_NATIVE,
+ METHOD__MNGD_NATIVE_ARRAY_MARSHALER__CONVERT_CONTENTS_TO_NATIVE,
+ METHOD__MNGD_NATIVE_ARRAY_MARSHALER__CLEAR_NATIVE,
+ METHOD__MNGD_NATIVE_ARRAY_MARSHALER__CLEAR_NATIVE_CONTENTS,
+ METHOD__NIL
+ )
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_dwSavedSizeArg = LOCAL_NUM_UNUSED;
+ }
+
+ virtual void EmitMarshalArgumentCLRToNative();
+ virtual void EmitConvertSpaceNativeToCLR(ILCodeStream* pslILEmit);
+ virtual void EmitConvertSpaceCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitClearNative(ILCodeStream* pslILEmit);
+ virtual void EmitClearNativeContents(ILCodeStream* pslILEmit);
+ virtual void EmitMarshalArgumentNativeToCLRByref();
+ virtual void EmitMarshalArgumentCLRToNativeByref();
+
+protected:
+
+ bool UsePinnedArraySpecialCase();
+
+ BOOL CheckSizeParamIndexArg(const CREATE_MARSHALER_CARRAY_OPERANDS &mops, CorElementType *pElementType);
+
+ // Calculate element count and load it on evaluation stack
+ void EmitLoadElementCount(ILCodeStream* pslILEmit);
+
+ virtual void EmitCreateMngdMarshaler(ILCodeStream* pslILEmit);
+
+ void EmitLoadNativeSize(ILCodeStream* pslILEmit);
+ void EmitNewSavedSizeArgLocal();
+
+private :
+ DWORD m_dwSavedSizeArg;
+};
+
+class MngdNativeArrayMarshaler
+{
+public:
+ static FCDECL3(void, CreateMarshaler, MngdNativeArrayMarshaler* pThis, MethodTable* pMT, UINT32 dwFlags);
+ static FCDECL3(void, ConvertSpaceToNative, MngdNativeArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome);
+ static FCDECL3(void, ConvertContentsToNative, MngdNativeArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome);
+ static FCDECL4(void, ConvertSpaceToManaged, MngdNativeArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome, INT32 cElements);
+ static FCDECL3(void, ConvertContentsToManaged, MngdNativeArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome);
+ static FCDECL3(void, ClearNative, MngdNativeArrayMarshaler* pThis, void** pNativeHome, INT32 cElements);
+ static FCDECL3(void, ClearNativeContents, MngdNativeArrayMarshaler* pThis, void** pNativeHome, INT32 cElements);
+
+ static void DoClearNativeContents(MngdNativeArrayMarshaler* pThis, void** pNativeHome, INT32 cElements);
+
+ enum
+ {
+ FLAG_NATIVE_DATA_VALID = 0x40000000
+ };
+
+ MethodTable* m_pElementMT;
+ TypeHandle m_Array;
+ BOOL m_NativeDataValid;
+ BOOL m_BestFitMap;
+ BOOL m_ThrowOnUnmappableChar;
+ VARTYPE m_vt;
+};
+
+
+#ifdef FEATURE_COMINTEROP
+class ILSafeArrayMarshaler : public ILMngdMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = FALSE,
+ };
+
+ ILSafeArrayMarshaler() :
+ ILMngdMarshaler(
+ METHOD__MNGD_SAFE_ARRAY_MARSHALER__CONVERT_SPACE_TO_MANAGED,
+ METHOD__MNGD_SAFE_ARRAY_MARSHALER__CONVERT_CONTENTS_TO_MANAGED,
+ METHOD__MNGD_SAFE_ARRAY_MARSHALER__CONVERT_SPACE_TO_NATIVE,
+ METHOD__MNGD_SAFE_ARRAY_MARSHALER__CONVERT_CONTENTS_TO_NATIVE,
+ METHOD__MNGD_SAFE_ARRAY_MARSHALER__CLEAR_NATIVE,
+ METHOD__NIL,
+ METHOD__NIL
+ ),
+ m_dwOriginalManagedLocalNum(-1)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+protected:
+
+ virtual void EmitCreateMngdMarshaler(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+
+ virtual void EmitReInitNative(ILCodeStream* pslILEmit)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ if (NeedsCheckForStatic() && pslILEmit->GetStreamType() != ILStubLinker::kExceptionCleanup)
+ {
+ // Keep the original value in native home as we are not going to allocate a new
+ // one. If we cleared it here, we wouldn't be able to ConvertContentsToNative.
+ // Always perform the real re-init in the ExceptionCleanup stream so the caller
+ // doesn't get back garbage.
+ }
+ else
+ {
+ ILMngdMarshaler::EmitReInitNative(pslILEmit);
+ }
+ }
+
+ bool NeedsCheckForStatic()
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsByref(m_dwMarshalFlags) && !IsCLRToNative(m_dwMarshalFlags) && IsIn(m_dwMarshalFlags) && IsOut(m_dwMarshalFlags);
+ }
+
+ DWORD m_dwOriginalManagedLocalNum;
+};
+
+class MngdSafeArrayMarshaler
+{
+public:
+ static FCDECL4(void, CreateMarshaler, MngdSafeArrayMarshaler* pThis, MethodTable* pMT, UINT32 iRank, UINT32 dwFlags);
+ static FCDECL3(void, ConvertSpaceToNative, MngdSafeArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome);
+ static FCDECL4(void, ConvertContentsToNative, MngdSafeArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome, Object* pOriginalManagedUNSAFE);
+ static FCDECL3(void, ConvertSpaceToManaged, MngdSafeArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome);
+ static FCDECL3(void, ConvertContentsToManaged, MngdSafeArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome);
+ static FCDECL3(void, ClearNative, MngdSafeArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome);
+
+ enum StaticCheckStateFlags
+ {
+ SCSF_CheckForStatic = 1,
+ SCSF_IsStatic = 2,
+ SCSF_NativeDataValid = 4
+ };
+
+ MethodTable* m_pElementMT;
+ int m_iRank;
+ VARTYPE m_vt;
+ BYTE m_fStatic; // StaticCheckStateFlags
+ BYTE m_nolowerbounds;
+};
+
+class ILHiddenLengthArrayMarshaler : public ILMngdMarshaler
+{
+ friend class MngdHiddenLengthArrayMarshaler;
+
+public:
+ enum
+ {
+ c_nativeSize = sizeof(LPVOID),
+ c_CLRSize = sizeof(OBJECTREF),
+ c_fInOnly = FALSE,
+ };
+
+ ILHiddenLengthArrayMarshaler() :
+ ILMngdMarshaler(METHOD__MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER__CONVERT_SPACE_TO_MANAGED,
+ METHOD__MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER__CONVERT_CONTENTS_TO_MANAGED,
+ METHOD__MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER__CONVERT_SPACE_TO_NATIVE,
+ METHOD__MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER__CONVERT_CONTENTS_TO_NATIVE,
+ METHOD__WIN32NATIVE__COTASKMEMFREE,
+ METHOD__MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER__CLEAR_NATIVE_CONTENTS,
+ METHOD__NIL)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_dwMngdMarshalerLocalNum = -1;
+ }
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+
+ virtual void EmitCreateMngdMarshaler(ILCodeStream* pslILEmit);
+ virtual void EmitMarshalArgumentCLRToNative();
+ virtual void EmitConvertSpaceCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertSpaceNativeToCLR(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+ virtual void EmitClearNative(ILCodeStream* pslILEmit);
+ virtual void EmitClearNativeContents(ILCodeStream* pslILEmit);
+
+private:
+ bool CanUsePinnedArray();
+ void EmitLoadNativeArrayLength(ILCodeStream *pslILEmit);
+
+ virtual MethodDesc *GetConvertContentsToManagedMethod();
+ virtual MethodDesc *GetConvertContentsToNativeMethod();
+ virtual MethodDesc *GetClearNativeContentsMethod();
+
+ MethodDesc *GetExactMarshalerMethod(MethodDesc *pGenericMD);
+};
+
+class MngdHiddenLengthArrayMarshaler
+{
+public:
+ static FCDECL4(void, CreateMarshaler, MngdHiddenLengthArrayMarshaler* pThis, MethodTable* pMT, SIZE_T cbElement, UINT16 vt);
+ static FCDECL3(void, ConvertSpaceToNative, MngdHiddenLengthArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome);
+ static FCDECL3(void, ConvertContentsToNative, MngdHiddenLengthArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome);
+ static FCDECL4(void, ConvertSpaceToManaged, MngdHiddenLengthArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome, INT32 cElements);
+ static FCDECL3(void, ConvertContentsToManaged, MngdHiddenLengthArrayMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome);
+ static FCDECL3(void, ClearNativeContents, MngdHiddenLengthArrayMarshaler* pThis, void** pNativeHome, INT32 cElements);
+
+
+private:
+ SIZE_T GetArraySize(SIZE_T elements);
+ void DoClearNativeContents(void** pNativeHome, INT32 cElements);
+
+private:
+ MethodTable *m_pElementMT;
+ SIZE_T m_cbElementSize;
+ VARTYPE m_vt;
+};
+#endif // FEATURE_COMINTEROP
+
+
+class ILReferenceCustomMarshaler : public ILMngdMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = FALSE,
+ };
+
+ ILReferenceCustomMarshaler() :
+ ILMngdMarshaler(
+ METHOD__NIL,
+ METHOD__MNGD_REF_CUSTOM_MARSHALER__CONVERT_CONTENTS_TO_MANAGED,
+ METHOD__NIL,
+ METHOD__MNGD_REF_CUSTOM_MARSHALER__CONVERT_CONTENTS_TO_NATIVE,
+ METHOD__MNGD_REF_CUSTOM_MARSHALER__CLEAR_NATIVE,
+ METHOD__NIL,
+ METHOD__MNGD_REF_CUSTOM_MARSHALER__CLEAR_MANAGED
+ )
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+protected:
+ virtual void EmitCreateMngdMarshaler(ILCodeStream* pslILEmit);
+};
+
+class MngdRefCustomMarshaler
+{
+public:
+ static FCDECL2(void, CreateMarshaler, MngdRefCustomMarshaler* pThis, void* pCMHelper);
+ static FCDECL3(void, ConvertContentsToNative, MngdRefCustomMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome);
+ static FCDECL3(void, ConvertContentsToManaged, MngdRefCustomMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome);
+ static FCDECL3(void, ClearNative, MngdRefCustomMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome);
+ static FCDECL3(void, ClearManaged, MngdRefCustomMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome);
+
+ static void DoClearNativeContents(MngdRefCustomMarshaler* pThis, OBJECTREF* pManagedHome, void** pNativeHome);
+
+ CustomMarshalerHelper* m_pCMHelper;
+};
+
+
+#ifdef FEATURE_COMINTEROP
+class ILUriMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_nativeSize = sizeof(LPVOID),
+ c_CLRSize = sizeof(OBJECTREF),
+ };
+
+ static void EmitConvertCLRUriToWinRTUri(ILCodeStream* pslILEmit, BaseDomain* pDomain);
+ static void EmitConvertWinRTUriToCLRUri(ILCodeStream* pslILEmit, BaseDomain* pDomain);
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+
+ virtual bool NeedsClearNative();
+ void EmitClearNative(ILCodeStream* pslILEmit);
+};
+
+class ILNCCEventArgsMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_nativeSize = sizeof(LPVOID),
+ c_CLRSize = sizeof(OBJECTREF),
+ };
+
+ static void EmitConvertCLREventArgsToWinRTEventArgs(ILCodeStream* pslILEmit, BaseDomain* pDomain);
+ static void EmitConvertWinRTEventArgsToCLREventArgs(ILCodeStream* pslILEmit, BaseDomain* pDomain);
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+
+ virtual bool NeedsClearNative();
+ void EmitClearNative(ILCodeStream* pslILEmit);
+};
+
+class ILPCEventArgsMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = TRUE,
+ c_nativeSize = sizeof(LPVOID),
+ c_CLRSize = sizeof(OBJECTREF),
+ };
+
+ static void EmitConvertCLREventArgsToWinRTEventArgs(ILCodeStream* pslILEmit, BaseDomain* pDomain);
+ static void EmitConvertWinRTEventArgsToCLREventArgs(ILCodeStream* pslILEmit, BaseDomain* pDomain);
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+
+ virtual bool NeedsClearNative();
+ void EmitClearNative(ILCodeStream* pslILEmit);
+};
+
+class ILDateTimeMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = FALSE,
+ c_nativeSize = sizeof(INT64), // = sizeof(Windows::Foundation::DateTime)
+ c_CLRSize = VARIABLESIZE,
+ };
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+
+ virtual bool NeedsClearNative();
+ virtual void EmitReInitNative(ILCodeStream* pslILEmit);
+};
+
+class ILNullableMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = FALSE,
+ c_nativeSize = sizeof(LPVOID),
+ c_CLRSize = VARIABLESIZE,
+ };
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+ virtual bool NeedsClearNative();
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+ virtual void EmitClearNative(ILCodeStream* pslILEmit);
+
+private:
+ MethodDesc *GetExactMarshalerMethod(MethodDesc *pGenericMD);
+};
+
+class ILSystemTypeMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = FALSE,
+ c_nativeSize = sizeof(TypeNameNative),
+ c_CLRSize = sizeof(OBJECTREF)
+ };
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream * pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream * pslILEmit);
+
+ virtual bool NeedsClearNative();
+ virtual void EmitClearNative(ILCodeStream * pslILEmit);
+ virtual void EmitReInitNative(ILCodeStream * pslILEmit);
+};
+
+class ILHResultExceptionMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = FALSE,
+ c_nativeSize = sizeof(INT32), // = sizeof(Windows::Foundation::HResult)
+ c_CLRSize = sizeof(OBJECTREF),
+ };
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+
+ virtual bool NeedsClearNative();
+};
+
+class ILKeyValuePairMarshaler : public ILMarshaler
+{
+public:
+ enum
+ {
+ c_fInOnly = FALSE,
+ c_nativeSize = sizeof(LPVOID),
+ c_CLRSize = VARIABLESIZE,
+ };
+
+protected:
+ virtual LocalDesc GetNativeType();
+ virtual LocalDesc GetManagedType();
+ virtual bool NeedsClearNative();
+ virtual void EmitConvertContentsCLRToNative(ILCodeStream* pslILEmit);
+ virtual void EmitConvertContentsNativeToCLR(ILCodeStream* pslILEmit);
+ virtual void EmitClearNative(ILCodeStream* pslILEmit);
+
+private:
+ MethodDesc *GetExactMarshalerMethod(MethodDesc *pGenericMD);
+};
+
+#endif // FEATURE_COMINTEROP
diff --git a/src/vm/ilstubcache.cpp b/src/vm/ilstubcache.cpp
new file mode 100644
index 0000000000..3f2e38b24a
--- /dev/null
+++ b/src/vm/ilstubcache.cpp
@@ -0,0 +1,971 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: ILStubCache.cpp
+//
+
+//
+
+
+#include "common.h"
+#include "ilstubcache.h"
+#include "dllimport.h"
+#include <formattype.h>
+#include "jitinterface.h"
+#include "sigbuilder.h"
+#include "ngenhash.inl"
+#include "compile.h"
+
+#include "eventtrace.h"
+
+const char* FormatSig(MethodDesc* pMD, LoaderHeap *pHeap, AllocMemTracker *pamTracker);
+
+ILStubCache::ILStubCache(LoaderHeap *pHeap) :
+ CClosedHashBase(
+#ifdef _DEBUG
+ 3,
+#else
+ 17, // CClosedHashTable will grow as necessary
+#endif
+
+ sizeof(ILCHASHENTRY),
+ FALSE
+ ),
+ m_crst(CrstStubCache, CRST_UNSAFE_ANYMODE),
+ m_heap(pHeap),
+ m_pStubMT(NULL)
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+void ILStubCache::Init(LoaderHeap* pHeap)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ CONSISTENCY_CHECK(NULL == m_heap);
+ m_heap = pHeap;
+}
+
+
+#ifndef DACCESS_COMPILE
+
+void CreateModuleIndependentSignature(LoaderHeap* pCreationHeap,
+ AllocMemTracker* pamTracker,
+ Module* pSigModule,
+ PCCOR_SIGNATURE pSig, DWORD cbSig,
+ SigTypeContext *pTypeContext,
+ PCCOR_SIGNATURE* ppNewSig, DWORD* pcbNewSig)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pSigModule, NULL_NOT_OK));
+ PRECONDITION(CheckPointer(ppNewSig, NULL_NOT_OK));
+ PRECONDITION(CheckPointer(pcbNewSig, NULL_NOT_OK));
+ }
+ CONTRACTL_END;
+
+ SigPointer sigPtr(pSig, cbSig);
+
+ SigBuilder sigBuilder;
+ sigPtr.ConvertToInternalSignature(pSigModule, pTypeContext, &sigBuilder);
+
+ DWORD cbNewSig;
+ PVOID pConvertedSig = sigBuilder.GetSignature(&cbNewSig);
+
+ PVOID pNewSig = pamTracker->Track(pCreationHeap->AllocMem(S_SIZE_T(cbNewSig)));
+ memcpy(pNewSig, pConvertedSig, cbNewSig);
+
+ *ppNewSig = (PCCOR_SIGNATURE)pNewSig;
+ *pcbNewSig = cbNewSig;
+}
+
+#ifndef CLR_STANDALONE_BINDER
+// static
+MethodDesc* ILStubCache::CreateAndLinkNewILStubMethodDesc(LoaderAllocator* pAllocator, MethodTable* pMT, DWORD dwStubFlags,
+ Module* pSigModule, PCCOR_SIGNATURE pSig, DWORD cbSig, SigTypeContext *pTypeContext,
+ ILStubLinker* pStubLinker)
+{
+ CONTRACT (MethodDesc*)
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pMT, NULL_NOT_OK));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+ AllocMemTracker amTracker;
+
+ MethodDesc *pStubMD = ILStubCache::CreateNewMethodDesc(pAllocator->GetHighFrequencyHeap(),
+ pMT,
+ dwStubFlags,
+ pSigModule,
+ pSig, cbSig,
+ pTypeContext,
+ &amTracker);
+
+ amTracker.SuppressRelease();
+
+ ILStubResolver *pResolver = pStubMD->AsDynamicMethodDesc()->GetILStubResolver();
+
+ pResolver->SetStubMethodDesc(pStubMD);
+
+
+ {
+ UINT maxStack;
+ size_t cbCode;
+ DWORD cbSig;
+ BYTE * pbBuffer;
+ BYTE * pbLocalSig;
+
+ cbCode = pStubLinker->Link(&maxStack);
+ cbSig = pStubLinker->GetLocalSigSize();
+
+ COR_ILMETHOD_DECODER * pILHeader = pResolver->AllocGeneratedIL(cbCode, cbSig, maxStack);
+ pbBuffer = (BYTE *)pILHeader->Code;
+ pbLocalSig = (BYTE *)pILHeader->LocalVarSig;
+ _ASSERTE(cbSig == pILHeader->cbLocalVarSig);
+
+ pStubLinker->GenerateCode(pbBuffer, cbCode);
+ pStubLinker->GetLocalSig(pbLocalSig, cbSig);
+
+ pResolver->SetJitFlags(CORJIT_FLG_IL_STUB);
+ }
+
+ pResolver->SetTokenLookupMap(pStubLinker->GetTokenLookupMap());
+
+ RETURN pStubMD;
+
+}
+#endif
+
+// static
+MethodDesc* ILStubCache::CreateNewMethodDesc(LoaderHeap* pCreationHeap, MethodTable* pMT, DWORD dwStubFlags,
+ Module* pSigModule, PCCOR_SIGNATURE pSig, DWORD cbSig, SigTypeContext *pTypeContext,
+ AllocMemTracker* pamTracker)
+{
+ CONTRACT (MethodDesc*)
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pMT, NULL_NOT_OK));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ // @TODO: reuse the same chunk for multiple methods
+ MethodDescChunk* pChunk = MethodDescChunk::CreateChunk(pCreationHeap,
+ 1,
+ mcDynamic,
+ TRUE /* fNonVtableSlot */,
+ TRUE /* fNativeCodeSlot */,
+ FALSE /* fComPlusCallInfo */,
+ pMT,
+ pamTracker);
+
+ // Note: The method desc memory is zero initialized
+
+ DynamicMethodDesc* pMD = (DynamicMethodDesc*)pChunk->GetFirstMethodDesc();
+
+ pMD->SetMemberDef(0);
+ pMD->SetSlot(MethodTable::NO_SLOT); // we can't ever use the slot for dynamic methods
+ // the no metadata part of the method desc
+ pMD->m_pszMethodName = (PTR_CUTF8)"IL_STUB";
+ pMD->m_dwExtendedFlags = mdPublic | DynamicMethodDesc::nomdILStub;
+
+ pMD->SetTemporaryEntryPoint(pMT->GetLoaderAllocator(), pamTracker);
+
+ //
+ // convert signature to a compatible signature if needed
+ //
+ PCCOR_SIGNATURE pNewSig;
+ DWORD cbNewSig;
+
+ // If we are in the same module and don't have any generics, we can use the incoming signature.
+ // Note that pTypeContext may be non-empty and the signature can still have no E_T_(M)VAR in it.
+ // We could do a more precise check if we cared.
+ if (pMT->GetModule() == pSigModule && (pTypeContext == NULL || pTypeContext->IsEmpty()))
+ {
+ pNewSig = pSig;
+ cbNewSig = cbSig;
+ }
+ else
+ {
+ CreateModuleIndependentSignature(pCreationHeap, pamTracker, pSigModule, pSig, cbSig, pTypeContext, &pNewSig, &cbNewSig);
+ }
+ pMD->SetStoredMethodSig(pNewSig, cbNewSig);
+
+ SigPointer sigPtr(pNewSig, cbNewSig);
+ ULONG callConvInfo;
+ IfFailThrow(sigPtr.GetCallingConvInfo(&callConvInfo));
+
+ if (!(callConvInfo & CORINFO_CALLCONV_HASTHIS))
+ {
+ pMD->m_dwExtendedFlags |= mdStatic;
+ pMD->SetStatic();
+ }
+
+ pMD->m_pResolver = (ILStubResolver*)pamTracker->Track(pCreationHeap->AllocMem(S_SIZE_T(sizeof(ILStubResolver))));
+#ifdef _DEBUG
+ memset(pMD->m_pResolver, 0xCC, sizeof(ILStubResolver));
+#endif // _DEBUG
+ pMD->m_pResolver = new (pMD->m_pResolver) ILStubResolver();
+
+#ifdef FEATURE_ARRAYSTUB_AS_IL
+ if (SF_IsArrayOpStub(dwStubFlags))
+ {
+ pMD->GetILStubResolver()->SetStubType(ILStubResolver::ArrayOpStub);
+ }
+ else
+#endif
+#ifdef FEATURE_STUBS_AS_IL
+ if (SF_IsMulticastDelegateStub(dwStubFlags))
+ {
+ pMD->m_dwExtendedFlags |= DynamicMethodDesc::nomdMulticastStub;
+ pMD->GetILStubResolver()->SetStubType(ILStubResolver::MulticastDelegateStub);
+ }
+ else
+ if (SF_IsUnboxingILStub(dwStubFlags))
+ {
+ pMD->m_dwExtendedFlags |= DynamicMethodDesc::nomdUnboxingILStub;
+ pMD->GetILStubResolver()->SetStubType(ILStubResolver::UnboxingILStub);
+ }
+ else
+ if (SF_IsInstantiatingStub(dwStubFlags))
+ {
+ pMD->GetILStubResolver()->SetStubType(ILStubResolver::InstantiatingStub);
+ }
+ else
+#endif
+#ifdef FEATURE_COMINTEROP
+ if (SF_IsCOMStub(dwStubFlags))
+ {
+ // mark certain types of stub MDs with random flags so ILStubManager recognizes them
+ if (SF_IsReverseStub(dwStubFlags))
+ {
+ pMD->m_dwExtendedFlags |= DynamicMethodDesc::nomdReverseStub;
+
+ ILStubResolver::ILStubType type = (SF_IsWinRTStub(dwStubFlags) ? ILStubResolver::WinRTToCLRInteropStub : ILStubResolver::COMToCLRInteropStub);
+ pMD->GetILStubResolver()->SetStubType(type);
+ }
+ else
+ {
+ ILStubResolver::ILStubType type = (SF_IsWinRTStub(dwStubFlags) ? ILStubResolver::CLRToWinRTInteropStub : ILStubResolver::CLRToCOMInteropStub);
+ pMD->GetILStubResolver()->SetStubType(type);
+ }
+
+ if (SF_IsWinRTDelegateStub(dwStubFlags))
+ {
+ pMD->m_dwExtendedFlags |= DynamicMethodDesc::nomdDelegateCOMStub;
+ }
+ }
+ else
+#endif
+ {
+ // mark certain types of stub MDs with random flags so ILStubManager recognizes them
+ if (SF_IsReverseStub(dwStubFlags))
+ {
+ pMD->m_dwExtendedFlags |= DynamicMethodDesc::nomdReverseStub;
+ pMD->GetILStubResolver()->SetStubType(ILStubResolver::NativeToCLRInteropStub);
+ }
+ else
+ {
+ if (SF_IsDelegateStub(dwStubFlags))
+ {
+ pMD->m_dwExtendedFlags |= DynamicMethodDesc::nomdDelegateStub;
+ }
+ else if (SF_IsCALLIStub(dwStubFlags))
+ {
+ pMD->m_dwExtendedFlags |= DynamicMethodDesc::nomdCALLIStub;
+ }
+ pMD->GetILStubResolver()->SetStubType(ILStubResolver::CLRToNativeInteropStub);
+ }
+ }
+
+// if we made it this far, we can set a more descriptive stub name
+#ifdef FEATURE_ARRAYSTUB_AS_IL
+ if (SF_IsArrayOpStub(dwStubFlags))
+ {
+ switch(dwStubFlags)
+ {
+ case ILSTUB_ARRAYOP_GET: pMD->m_pszMethodName = (PTR_CUTF8)"IL_STUB_Array_Get";
+ break;
+ case ILSTUB_ARRAYOP_SET: pMD->m_pszMethodName = (PTR_CUTF8)"IL_STUB_Array_Set";
+ break;
+ case ILSTUB_ARRAYOP_ADDRESS: pMD->m_pszMethodName = (PTR_CUTF8)"IL_STUB_Array_Address";
+ break;
+ default: _ASSERTE(!"Unknown array il stub");
+ }
+ }
+ else
+#endif
+ {
+ pMD->m_pszMethodName = pMD->GetILStubResolver()->GetStubMethodName();
+ }
+
+
+#ifdef _DEBUG
+ pMD->m_pszDebugMethodName = pMD->m_pszMethodName;
+ pMD->m_pszDebugClassName = ILStubResolver::GetStubClassName(pMD); // must be called after type is set
+ pMD->m_pszDebugMethodSignature = FormatSig(pMD, pCreationHeap, pamTracker);
+ pMD->m_pDebugMethodTable.SetValue(pMT);
+#endif // _DEBUG
+
+ RETURN pMD;
+}
+
+//
+// This will get or create a MethodTable in the Module/AppDomain on which
+// we can place a new IL stub MethodDesc.
+//
+MethodTable* ILStubCache::GetOrCreateStubMethodTable(Module* pModule)
+{
+ CONTRACT (MethodTable*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+#ifdef _DEBUG
+ if (pModule->GetDomain()->IsSharedDomain() || pModule->GetDomain()->AsAppDomain()->IsCompilationDomain())
+ {
+ // in the shared domain and compilation AD we are associated with the module
+ CONSISTENCY_CHECK(pModule->GetILStubCache() == this);
+ }
+ else
+ {
+ // otherwise we are associated with the AD
+ AppDomain* pStubCacheDomain = AppDomain::GetDomain(this);
+ CONSISTENCY_CHECK(pStubCacheDomain == pModule->GetDomain()->AsAppDomain());
+ }
+#endif // _DEBUG
+
+ if (NULL == m_pStubMT)
+ {
+ CrstHolder ch(&m_crst);
+
+ if (NULL == m_pStubMT)
+ {
+ AllocMemTracker amt;
+ MethodTable* pNewMT = CreateMinimalMethodTable(pModule, m_heap, &amt);
+ amt.SuppressRelease();
+ VolatileStore<MethodTable*>(&m_pStubMT, pNewMT);
+ }
+ }
+
+ RETURN m_pStubMT;
+}
+
+#endif // DACCESS_COMPILE
+
+//
+// NGEN'ed IL stubs
+//
+// - We will never NGEN a CALLI pinvoke or vararg pinvoke
+//
+// - We will always place the IL stub MethodDesc on the same MethodTable that the
+// PInvoke or COM Interop call declaration lives on.
+//
+// - We will not pre-populate our runtime ILStubCache with compile-time
+// information (i.e. NGENed stubs are only reachable from the same NGEN image.)
+//
+// JIT'ed IL stubs
+//
+// - The ILStubCache is per-BaseDomain
+//
+// - Each BaseDomain's ILStubCache will lazily create a "minimal MethodTable" to
+// serve as the home for IL stub MethodDescs
+//
+// - The created MethodTables will use the Module belonging to one of the
+// following, based on what type of interop stub we need to create first.
+//
+// - If that stub is for a static-sig-based pinvoke, we will use the
+// Module belonging to that pinvoke's MethodDesc.
+//
+// - If that stub is for a CALLI or vararg pinvoke, we will use the
+// Module belonging to the VASigCookie that the caller supplied to us.
+//
+// It's important to point out that the Module we latch onto here has no knowledge
+// of the MethodTable that we've just "added" to it. There only exists a "back
+// pointer" to the Module from the MethodTable itself. So we're really only using
+// that module to answer the question of what BaseDomain the MethodTable lives in.
+// So as long as the BaseDomain for that module is the same as the BaseDomain the
+// ILStubCache lives in, I think we have a fairly consistent story here.
+//
+// We're relying on the fact that a VASigCookie may only mention types within the
+// corresponding module used to qualify the signature and the fact that interop
+// stubs may only reference mscorlib code or code related to a type mentioned in
+// the signature. Both of these are true unless the sig is allowed to contain
+// ELEMENT_TYPE_INTERNAL, which may refer to any type.
+//
+// We can only access E_T_INTERNAL through LCG, which does not permit referring
+// to types in other BaseDomains.
+//
+//
+// Places for improvement:
+//
+// - allow NGEN'ing of CALLI pinvoke and vararg pinvoke
+//
+// - pre-populate the per-BaseDomain cache with IL stubs from NGEN'ed image
+//
+
+MethodDesc* ILStubCache::GetStubMethodDesc(
+ MethodDesc *pTargetMD,
+ ILStubHashBlob* pParams,
+ DWORD dwStubFlags,
+ Module* pSigModule,
+ PCCOR_SIGNATURE pSig,
+ DWORD cbSig,
+ AllocMemTracker* pamTracker,
+ bool& bILStubCreator,
+ MethodDesc *pLastMD)
+{
+ CONTRACT (MethodDesc*)
+ {
+ STANDARD_VM_CHECK;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ MethodDesc* pMD = NULL;
+ bool bFireETWCacheHitEvent = true;
+
+#ifndef DACCESS_COMPILE
+ ILStubHashBlob* pBlob = NULL;
+
+ INDEBUG(LPCSTR pszResult = "[hit cache]");
+
+
+ if (SF_IsSharedStub(dwStubFlags))
+ {
+ CrstHolder ch(&m_crst);
+
+ // Try to find the stub
+ ILCHASHENTRY* phe = NULL;
+
+ phe = (ILCHASHENTRY*)Find((LPVOID)pParams);
+ if (phe)
+ {
+ pMD = phe->m_pMethodDesc;
+ if (pMD == pLastMD)
+ bFireETWCacheHitEvent = false;
+ }
+ }
+
+ if (!pMD)
+ {
+ size_t cbSizeOfBlob = pParams->m_cbSizeOfBlob;
+ AllocMemHolder<ILStubHashBlob> pBlobHolder( m_heap->AllocMem(S_SIZE_T(cbSizeOfBlob)) );
+
+
+ //
+ // Couldn't find it, let's make a new one.
+ //
+
+ Module *pContainingModule = pSigModule;
+ if (pTargetMD != NULL)
+ {
+ // loader module may be different from signature module for generic targets
+ pContainingModule = pTargetMD->GetLoaderModule();
+ }
+
+ MethodTable *pStubMT = GetOrCreateStubMethodTable(pContainingModule);
+
+ SigTypeContext typeContext;
+ if (pTargetMD != NULL)
+ {
+ SigTypeContext::InitTypeContext(pTargetMD, &typeContext);
+ }
+
+ pMD = ILStubCache::CreateNewMethodDesc(m_heap, pStubMT, dwStubFlags, pSigModule, pSig, cbSig, &typeContext, pamTracker);
+
+ if (SF_IsSharedStub(dwStubFlags))
+ {
+
+ CrstHolder ch(&m_crst);
+
+ ILCHASHENTRY* phe = NULL;
+
+ bool bNew;
+ phe = (ILCHASHENTRY*)FindOrAdd((LPVOID)pParams, bNew);
+ bILStubCreator |= bNew;
+
+ if (NULL != phe)
+ {
+ if (bNew)
+ {
+ pBlobHolder.SuppressRelease();
+
+ phe->m_pMethodDesc = pMD;
+ pBlob = pBlobHolder;
+ phe->m_pBlob = pBlob;
+
+ _ASSERTE(pParams->m_cbSizeOfBlob == cbSizeOfBlob);
+ memcpy(pBlob, pParams, cbSizeOfBlob);
+
+ INDEBUG(pszResult = "[missed cache]");
+ bFireETWCacheHitEvent = false;
+ }
+ else
+ {
+ INDEBUG(pszResult = "[hit cache][wasted new MethodDesc due to race]");
+ }
+ pMD = phe->m_pMethodDesc;
+ }
+ else
+ {
+ pMD = NULL;
+ }
+ }
+ else
+ {
+ INDEBUG(pszResult = "[cache disabled for COM->CLR field access stubs]");
+ }
+ }
+
+#ifndef FEATURE_CORECLR
+ //
+ // Publish ETW events for IL stubs
+ //
+ if (bFireETWCacheHitEvent)
+ {
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, ILStubCacheHit))
+ {
+
+ SString strNamespaceOrClassName, strMethodName, strMethodSignature;
+ UINT64 uModuleId = 0;
+
+ if (pTargetMD)
+ {
+ pTargetMD->GetMethodInfoWithNewSig(strNamespaceOrClassName, strMethodName, strMethodSignature);
+ uModuleId = (UINT64)pTargetMD->GetModule()->GetAddrModuleID();
+ }
+
+ DWORD dwToken = 0;
+ if (pTargetMD)
+ dwToken = pTargetMD->GetMemberDef();
+
+ //
+ // Truncate string fields. Make sure the whole event is less than 64KB
+ //
+ TruncateUnicodeString(strNamespaceOrClassName, ETW_IL_STUB_EVENT_STRING_FIELD_MAXSIZE);
+ TruncateUnicodeString(strMethodName, ETW_IL_STUB_EVENT_STRING_FIELD_MAXSIZE);
+ TruncateUnicodeString(strMethodSignature, ETW_IL_STUB_EVENT_STRING_FIELD_MAXSIZE);
+
+ FireEtwILStubCacheHit(
+ GetClrInstanceId(), // ClrInstanceId
+ uModuleId, // ModuleIdentifier
+ (UINT64)pMD, // StubMethodIdentifier
+ dwToken, // ManagedInteropMethodToken
+ strNamespaceOrClassName.GetUnicode(), // ManagedInteropMethodNamespace
+ strMethodName.GetUnicode(), // ManagedInteropMethodName
+ strMethodSignature.GetUnicode() // ManagedInteropMethodSignature
+ );
+ }
+ }
+#endif // !FEATURE_CORECLR
+
+ if (!pMD)
+ {
+ // Couldn't grow hash table due to lack of memory.
+ COMPlusThrowOM();
+ }
+
+#ifdef _DEBUG
+ CQuickBytes qbManaged;
+ PrettyPrintSig(pSig, cbSig, "*", &qbManaged, pSigModule->GetMDImport(), NULL);
+ LOG((LF_STUBS, LL_INFO1000, "ILSTUBCACHE: ILStubCache::GetStubMethodDesc %s StubMD: %p module: %p blob: %p sig: %s\n", pszResult, pMD, pSigModule, pBlob, qbManaged.Ptr()));
+#endif // _DEBUG
+#endif // DACCESS_COMPILE
+
+ RETURN pMD;
+}
+
+void ILStubCache::DeleteEntry(void* pParams)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ CrstHolder ch(&m_crst);
+
+ ILCHASHENTRY* phe = NULL;
+
+ phe = (ILCHASHENTRY*)Find((LPVOID)pParams);
+ if (phe)
+ {
+#ifdef _DEBUG
+ LOG((LF_STUBS, LL_INFO1000, "ILSTUBCACHE: ILStubCache::DeleteEntry StubMD: %p\n", phe->m_pMethodDesc));
+#endif
+
+ Delete(pParams);
+ }
+}
+
+void ILStubCache::AddMethodDescChunkWithLockTaken(MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+ CrstHolder ch(&m_crst);
+
+ pMD->GetMethodTable()->GetClass()->AddChunkIfItHasNotBeenAdded(pMD->GetMethodDescChunk());
+#endif // DACCESS_COMPILE
+}
+
+//---------------------------------------------------------
+// Destructor
+//---------------------------------------------------------
+ILStubCache::~ILStubCache()
+{
+}
+
+
+//*****************************************************************************
+// Hash is called with a pointer to an element in the table. You must override
+// this method and provide a hash algorithm for your element type.
+//*****************************************************************************
+unsigned int ILStubCache::Hash( // The key value.
+ void const* pData) // Raw data to hash.
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ const ILStubHashBlob* pBlob = (const ILStubHashBlob *)pData;
+
+ size_t cb = pBlob->m_cbSizeOfBlob - sizeof(ILStubHashBlobBase);
+ int hash = 0;
+
+ for (size_t i = 0; i < cb; i++)
+ {
+ hash = _rotl(hash,1) + pBlob->m_rgbBlobData[i];
+ }
+
+ return hash;
+}
+
+//*****************************************************************************
+// Compare is used in the typical memcmp way, 0 is eqaulity, -1/1 indicate
+// direction of miscompare. In this system everything is always equal or not.
+//*****************************************************************************
+unsigned int ILStubCache::Compare( // 0, -1, or 1.
+ void const* pData, // Raw key data on lookup.
+ BYTE* pElement) // The element to compare data against.
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ const ILStubHashBlob* pBlob1 = (const ILStubHashBlob*)pData;
+ const ILStubHashBlob* pBlob2 = (const ILStubHashBlob*)GetKey(pElement);
+ size_t cb1 = pBlob1->m_cbSizeOfBlob - sizeof(ILStubHashBlobBase);
+ size_t cb2 = pBlob2->m_cbSizeOfBlob - sizeof(ILStubHashBlobBase);
+
+ if (cb1 != cb2)
+ {
+ return 1; // not equal
+ }
+ else
+ {
+ // @TODO: use memcmp
+ for (size_t i = 0; i < cb1; i++)
+ {
+ if (pBlob1->m_rgbBlobData[i] != pBlob2->m_rgbBlobData[i])
+ {
+ return 1; // not equal
+ }
+ }
+ return 0; // equal
+ }
+}
+
+//*****************************************************************************
+// Return true if the element is free to be used.
+//*****************************************************************************
+CClosedHashBase::ELEMENTSTATUS ILStubCache::Status( // The status of the entry.
+ BYTE* pElement) // The element to check.
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodDesc* pMD = ((ILCHASHENTRY*)pElement)->m_pMethodDesc;
+
+ if (pMD == NULL)
+ {
+ return FREE;
+ }
+ else if (pMD == (MethodDesc*)(-((INT_PTR)1)))
+ {
+ return DELETED;
+ }
+ else
+ {
+ return USED;
+ }
+}
+
+//*****************************************************************************
+// Sets the status of the given element.
+//*****************************************************************************
+void ILStubCache::SetStatus(
+ BYTE* pElement, // The element to set status for.
+ CClosedHashBase::ELEMENTSTATUS eStatus) // New status.
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ILCHASHENTRY* phe = (ILCHASHENTRY*)pElement;
+
+ switch (eStatus)
+ {
+ case FREE: phe->m_pMethodDesc = NULL; break;
+ case DELETED: phe->m_pMethodDesc = (MethodDesc*)(-((INT_PTR)1)); break;
+ default:
+ _ASSERTE(!"MLCacheEntry::SetStatus(): Bad argument.");
+ }
+}
+
+//*****************************************************************************
+// Returns the internal key value for an element.
+//*****************************************************************************
+void* ILStubCache::GetKey( // The data to hash on.
+ BYTE* pElement) // The element to return data ptr for.
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ILCHASHENTRY* phe = (ILCHASHENTRY*)pElement;
+ return (void *)(phe->m_pBlob);
+}
+
+#ifdef FEATURE_PREJIT
+
+// ============================================================================
+// Stub method hash entry methods
+// ============================================================================
+PTR_MethodDesc StubMethodHashEntry::GetMethod()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return pMD;
+}
+
+PTR_MethodDesc StubMethodHashEntry::GetStubMethod()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return pStubMD;
+}
+
+#ifndef DACCESS_COMPILE
+
+void StubMethodHashEntry::SetMethodAndStub(MethodDesc *pMD, MethodDesc *pStubMD)
+{
+ LIMITED_METHOD_CONTRACT;
+ this->pMD = pMD;
+ this->pStubMD = pStubMD;
+}
+
+// ============================================================================
+// Stub method hash table methods
+// ============================================================================
+/* static */ StubMethodHashTable *StubMethodHashTable::Create(LoaderAllocator *pAllocator, Module *pModule, DWORD dwNumBuckets, AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ LoaderHeap *pHeap = pAllocator->GetLowFrequencyHeap();
+ StubMethodHashTable *pThis = (StubMethodHashTable *)pamTracker->Track(pHeap->AllocMem((S_SIZE_T)sizeof(StubMethodHashTable)));
+
+ new (pThis) StubMethodHashTable(pModule, pHeap, dwNumBuckets);
+
+ return pThis;
+}
+
+// Calculate a hash value for a key
+static DWORD Hash(MethodDesc *pMD)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DWORD dwHash = 0x87654321;
+#define INST_HASH_ADD(_value) dwHash = ((dwHash << 5) + dwHash) ^ (_value)
+
+ INST_HASH_ADD(pMD->GetMemberDef());
+
+ Instantiation inst = pMD->GetClassInstantiation();
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ TypeHandle thArg = inst[i];
+
+ if (thArg.GetMethodTable())
+ {
+ INST_HASH_ADD(thArg.GetCl());
+
+ Instantiation sArgInst = thArg.GetInstantiation();
+ for (DWORD j = 0; j < sArgInst.GetNumArgs(); j++)
+ {
+ TypeHandle thSubArg = sArgInst[j];
+ if (thSubArg.GetMethodTable())
+ INST_HASH_ADD(thSubArg.GetCl());
+ else
+ INST_HASH_ADD(thSubArg.GetSignatureCorElementType());
+ }
+ }
+ else
+ INST_HASH_ADD(thArg.GetSignatureCorElementType());
+ }
+
+ return dwHash;
+}
+
+MethodDesc *StubMethodHashTable::FindMethodDesc(MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ MethodDesc *pMDResult = NULL;
+
+ DWORD dwHash = Hash(pMD);
+ StubMethodHashEntry_t* pSearch;
+ LookupContext sContext;
+
+ for (pSearch = BaseFindFirstEntryByHash(dwHash, &sContext);
+ pSearch != NULL;
+ pSearch = BaseFindNextEntryByHash(&sContext))
+ {
+ if (pSearch->GetMethod() == pMD)
+ {
+ pMDResult = pSearch->GetStubMethod();
+ break;
+ }
+ }
+
+ return pMDResult;
+}
+
+// Add method desc to the hash table; must not be present already
+void StubMethodHashTable::InsertMethodDesc(MethodDesc *pMD, MethodDesc *pStubMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(CheckPointer(pStubMD));
+ }
+ CONTRACTL_END
+
+ StubMethodHashEntry_t *pNewEntry = (StubMethodHashEntry_t *)BaseAllocateEntry(NULL);
+ pNewEntry->SetMethodAndStub(pMD, pStubMD);
+
+ DWORD dwHash = Hash(pMD);
+ BaseInsertEntry(dwHash, pNewEntry);
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+// Save the hash table and any method descriptors referenced by it
+void StubMethodHashTable::Save(DataImage *image, CorProfileData *pProfileData)
+{
+ WRAPPER_NO_CONTRACT;
+ BaseSave(image, pProfileData);
+}
+
+void StubMethodHashTable::Fixup(DataImage *image)
+{
+ WRAPPER_NO_CONTRACT;
+ BaseFixup(image);
+}
+
+void StubMethodHashTable::FixupEntry(DataImage *pImage, StubMethodHashEntry_t *pEntry, void *pFixupBase, DWORD cbFixupOffset)
+{
+ WRAPPER_NO_CONTRACT;
+ pImage->FixupField(pFixupBase, cbFixupOffset + offsetof(StubMethodHashEntry_t, pMD), pEntry->GetMethod());
+ pImage->FixupField(pFixupBase, cbFixupOffset + offsetof(StubMethodHashEntry_t, pStubMD), pEntry->GetStubMethod());
+}
+
+bool StubMethodHashTable::ShouldSave(DataImage *pImage, StubMethodHashEntry_t *pEntry)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc *pMD = pEntry->GetMethod();
+ if (pMD->GetClassification() == mcInstantiated)
+ {
+ // save entries only for "accepted" methods
+ if (!pImage->GetPreloader()->IsMethodInTransitiveClosureOfInstantiations(CORINFO_METHOD_HANDLE(pMD)))
+ return false;
+ }
+
+ // Save the entry only if the native code was successfully generated for the stub
+ if (pImage->GetCodeAddress(pEntry->GetStubMethod()) == NULL)
+ return false;
+
+ return true;
+}
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+#endif // !DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+
+void StubMethodHashTable::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ BaseEnumMemoryRegions(flags);
+}
+
+void StubMethodHashTable::EnumMemoryRegionsForEntry(StubMethodHashEntry_t *pEntry, CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ if (pEntry->GetMethod().IsValid())
+ pEntry->GetMethod()->EnumMemoryRegions(flags);
+}
+
+#endif // DACCESS_COMPILE
+
+#endif // FEATURE_PREJIT
diff --git a/src/vm/ilstubcache.h b/src/vm/ilstubcache.h
new file mode 100644
index 0000000000..1f9556fc71
--- /dev/null
+++ b/src/vm/ilstubcache.h
@@ -0,0 +1,244 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: ILStubCache.h
+//
+
+//
+
+
+#ifdef _MSC_VER
+#pragma once
+#endif // _MSC_VER
+#ifndef _ILSTUBCACHE_H
+#define _ILSTUBCACHE_H
+
+
+#include "vars.hpp"
+#include "util.hpp"
+#include "crst.h"
+#include "ngenhash.h"
+#ifndef CLR_STANDALONE_BINDER
+#include "stubgen.h"
+#endif
+
+class ILStubHashBlobBase
+{
+public:
+ size_t m_cbSizeOfBlob; // this is size of entire object!!
+};
+
+class ILStubHashBlob : public ILStubHashBlobBase
+{
+public:
+ BYTE m_rgbBlobData[1];
+};
+
+
+//
+// This class caches MethodDesc's for dynamically generated IL stubs, it is not
+// persisted in NGEN images.
+//
+class ILStubCache : private CClosedHashBase
+{
+private:
+ //---------------------------------------------------------
+ // Hash entry for CClosedHashBase.
+ //---------------------------------------------------------
+ struct ILCHASHENTRY
+ {
+ // Values:
+ // NULL = free
+ // -1 = deleted
+ // other = used
+ MethodDesc* m_pMethodDesc;
+ ILStubHashBlob* m_pBlob;
+ };
+
+public:
+
+ //---------------------------------------------------------
+ // Constructor
+ //---------------------------------------------------------
+ ILStubCache(LoaderHeap* heap = NULL);
+
+ //---------------------------------------------------------
+ // Destructor
+ //---------------------------------------------------------
+ ~ILStubCache();
+
+ void Init(LoaderHeap* pHeap);
+
+ MethodDesc* GetStubMethodDesc(
+ MethodDesc *pTargetMD,
+ ILStubHashBlob* pParams,
+ DWORD dwStubFlags, // bitmask of NDirectStubFlags
+ Module* pSigModule,
+ PCCOR_SIGNATURE pSig,
+ DWORD cbSig,
+ AllocMemTracker* pamTracker,
+ bool& bILStubCreator,
+ MethodDesc* pLastMD);
+
+ void DeleteEntry(void *pParams);
+
+ void AddMethodDescChunkWithLockTaken(MethodDesc *pMD);
+
+#ifndef CLR_STANDALONE_BINDER
+ static MethodDesc* CreateAndLinkNewILStubMethodDesc(
+ LoaderAllocator* pAllocator,
+ MethodTable* pMT,
+ DWORD dwStubFlags, // bitmask of NDirectStubFlags
+ Module* pSigModule,
+ PCCOR_SIGNATURE pSig,
+ DWORD cbSig,
+ SigTypeContext *pTypeContext,
+ ILStubLinker* pStubLinker);
+#endif
+ MethodTable * GetStubMethodTable()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pStubMT;
+ }
+
+ MethodTable* GetOrCreateStubMethodTable(Module* pLoaderModule);
+
+private:
+
+ static MethodDesc* CreateNewMethodDesc(
+ LoaderHeap* pCreationHeap,
+ MethodTable* pMT,
+ DWORD dwStubFlags, // bitmask of NDirectStubFlags
+ Module* pSigModule,
+ PCCOR_SIGNATURE pSig,
+ DWORD cbSig,
+ SigTypeContext *pTypeContext,
+ AllocMemTracker* pamTracker);
+
+ // *** OVERRIDES FOR CClosedHashBase ***/
+
+ //*****************************************************************************
+ // Hash is called with a pointer to an element in the table. You must override
+ // this method and provide a hash algorithm for your element type.
+ //*****************************************************************************
+ virtual unsigned int Hash( // The key value.
+ void const* pData); // Raw data to hash.
+
+ //*****************************************************************************
+ // Compare is used in the typical memcmp way, 0 is eqaulity, -1/1 indicate
+ // direction of miscompare. In this system everything is always equal or not.
+ //*****************************************************************************
+ virtual unsigned int Compare( // 0, -1, or 1.
+ void const* pData, // Raw key data on lookup.
+ BYTE* pElement); // The element to compare data against.
+
+ //*****************************************************************************
+ // Return true if the element is free to be used.
+ //*****************************************************************************
+ virtual ELEMENTSTATUS Status( // The status of the entry.
+ BYTE* pElement); // The element to check.
+
+ //*****************************************************************************
+ // Sets the status of the given element.
+ //*****************************************************************************
+ virtual void SetStatus(
+ BYTE* pElement, // The element to set status for.
+ ELEMENTSTATUS eStatus); // New status.
+
+ //*****************************************************************************
+ // Returns the internal key value for an element.
+ //*****************************************************************************
+ virtual void* GetKey( // The data to hash on.
+ BYTE* pElement); // The element to return data ptr for.
+
+private:
+ Crst m_crst;
+ LoaderHeap* m_heap;
+ MethodTable* m_pStubMT;
+};
+
+
+#ifdef FEATURE_PREJIT
+//========================================================================================
+//
+// This hash table is used by interop to lookup NGENed marshaling stubs for methods
+// in cases where the MethodDesc cannot point to the stub directly.
+//
+// Keys are arbitrary MethodDesc's, values are IL stub MethodDescs.
+//
+//========================================================================================
+
+typedef DPTR(struct StubMethodHashEntry) PTR_StubMethodHashEntry;
+typedef struct StubMethodHashEntry
+{
+ PTR_MethodDesc GetMethod();
+ PTR_MethodDesc GetStubMethod();
+#ifndef DACCESS_COMPILE
+ void SetMethodAndStub(MethodDesc *pMD, MethodDesc *pStubMD);
+#endif // !DACCESS_COMPILE
+
+private:
+ friend class StubMethodHashTable;
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+
+ PTR_MethodDesc pMD;
+ PTR_MethodDesc pStubMD;
+
+#ifdef BINDER
+ friend class MdilModule;
+#endif
+} StubMethodHashEntry_t;
+
+
+// The hash table itself
+typedef DPTR(class StubMethodHashTable) PTR_StubMethodHashTable;
+class StubMethodHashTable : public NgenHashTable<StubMethodHashTable, StubMethodHashEntry, 2>
+{
+#ifndef DACCESS_COMPILE
+ StubMethodHashTable();
+
+ StubMethodHashTable(Module *pModule, LoaderHeap *pHeap, DWORD cInitialBuckets) :
+ NgenHashTable<StubMethodHashTable, StubMethodHashEntry, 2>(pModule, pHeap, cInitialBuckets) {}
+
+ ~StubMethodHashTable();
+#endif
+public:
+ static StubMethodHashTable *Create(LoaderAllocator *pAllocator, Module *pModule, DWORD dwNumBuckets, AllocMemTracker *pamTracker);
+
+private:
+ void operator delete(void *p);
+
+public:
+ // Looks up a stub MethodDesc in the hash table, returns NULL if not found
+ MethodDesc *FindMethodDesc(MethodDesc *pMD);
+
+#ifndef DACCESS_COMPILE
+ // Inserts a method-stub pair into the hash table
+ VOID InsertMethodDesc(MethodDesc *pMD, MethodDesc *pStubMD);
+
+ void Save(DataImage *image, CorProfileData *profileData);
+ void Fixup(DataImage *image);
+
+ bool ShouldSave(DataImage *pImage, StubMethodHashEntry_t *pEntry);
+
+ bool IsHotEntry(StubMethodHashEntry_t *pEntry, CorProfileData *pProfileData)
+ { LIMITED_METHOD_CONTRACT; return true; }
+
+ bool SaveEntry(DataImage *pImage, CorProfileData *pProfileData, StubMethodHashEntry_t *pOldEntry, StubMethodHashEntry_t *pNewEntry, EntryMappingTable *pMap)
+ { LIMITED_METHOD_CONTRACT; return false; }
+
+ void FixupEntry(DataImage *pImage, StubMethodHashEntry_t *pEntry, void *pFixupBase, DWORD cbFixupOffset);
+#endif // !DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+ void EnumMemoryRegionsForEntry(StubMethodHashEntry_t *pEntry, CLRDataEnumMemoryFlags flags);
+#endif
+};
+#endif // FEATURE_PREJIT
+
+#endif //_ILSTUBCACHE_H
diff --git a/src/vm/ilstubresolver.cpp b/src/vm/ilstubresolver.cpp
new file mode 100644
index 0000000000..5c717d7805
--- /dev/null
+++ b/src/vm/ilstubresolver.cpp
@@ -0,0 +1,521 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: ILStubResolver.cpp
+//
+
+//
+
+
+#include "common.h"
+
+#include "field.h"
+
+// returns pointer to IL code
+BYTE* ILStubResolver::GetCodeInfo(unsigned* pCodeSize, unsigned* pStackSize, CorInfoOptions* pOptions, unsigned* pEHSize)
+{
+ CONTRACT(BYTE*)
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pCodeSize));
+ PRECONDITION(CheckPointer(pStackSize));
+ PRECONDITION(CheckPointer(pOptions));
+ PRECONDITION(CheckPointer(pEHSize));
+ PRECONDITION(CheckPointer(m_pCompileTimeState));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+#ifndef DACCESS_COMPILE
+ CORINFO_METHOD_INFO methodInfo;
+ getMethodInfoILMethodHeaderHelper(&m_pCompileTimeState->m_ILHeader, &methodInfo);
+
+ *pCodeSize = methodInfo.ILCodeSize;
+ *pStackSize = methodInfo.maxStack;
+ *pOptions = methodInfo.options;
+ *pEHSize = methodInfo.EHcount;
+
+ RETURN methodInfo.ILCode;
+#else // DACCESS_COMPILE
+ DacNotImpl();
+ RETURN NULL;
+#endif // DACCESS_COMPILE
+}
+
+// static
+LPCUTF8 ILStubResolver::GetStubClassName(MethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ SUPPORTS_DAC;
+ PRECONDITION(pMD->IsILStub());
+ }
+ CONTRACTL_END;
+
+ if (pMD->GetDomain()->IsSharedDomain())
+ {
+ return "DomainNeutralILStubClass";
+ }
+ else
+ {
+ return "DomainBoundILStubClass";
+ }
+}
+
+LPCUTF8 ILStubResolver::GetStubMethodName()
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ switch (m_type)
+ {
+ case CLRToNativeInteropStub: return "IL_STUB_PInvoke";
+ case CLRToCOMInteropStub: return "IL_STUB_CLRtoCOM";
+ case CLRToWinRTInteropStub: return "IL_STUB_CLRtoWinRT";
+ case NativeToCLRInteropStub: return "IL_STUB_ReversePInvoke";
+ case COMToCLRInteropStub: return "IL_STUB_COMtoCLR";
+ case WinRTToCLRInteropStub: return "IL_STUB_WinRTtoCLR";
+#ifdef FEATURE_ARRAYSTUB_AS_IL
+ case ArrayOpStub: return "IL_STUB_Array";
+#endif
+#ifdef FEATURE_STUBS_AS_IL
+ case MulticastDelegateStub: return "IL_STUB_MulticastDelegate_Invoke";
+ case UnboxingILStub: return "IL_STUB_UnboxingStub";
+ case InstantiatingStub: return "IL_STUB_InstantiatingStub";
+#endif
+ default:
+ UNREACHABLE_MSG("Unknown stub type");
+ }
+}
+
+void ILStubResolver::GetJitContext(SecurityControlFlags* pSecurityControlFlags,
+ TypeHandle* pTypeOwner)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pSecurityControlFlags));
+ PRECONDITION(CheckPointer(pTypeOwner));
+ }
+ CONTRACTL_END;
+
+ *pSecurityControlFlags = DynamicResolver::SkipVisibilityChecks;
+ *pTypeOwner = TypeHandle();
+}
+
+ChunkAllocator* ILStubResolver::GetJitMetaHeap()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(FALSE);
+ return NULL;
+}
+
+SigPointer
+ILStubResolver::GetLocalSig()
+{
+ STANDARD_VM_CONTRACT;
+
+ return SigPointer(
+ m_pCompileTimeState->m_ILHeader.LocalVarSig,
+ m_pCompileTimeState->m_ILHeader.cbLocalVarSig);
+}
+
+OBJECTHANDLE ILStubResolver::ConstructStringLiteral(mdToken token)
+{
+ STANDARD_VM_CONTRACT;
+ _ASSERTE(FALSE);
+ return NULL;
+}
+
+BOOL ILStubResolver::IsValidStringRef(mdToken metaTok)
+{
+ STANDARD_VM_CONTRACT;
+ _ASSERTE(FALSE);
+ return FALSE;
+}
+
+void ILStubResolver::ResolveToken(mdToken token, TypeHandle * pTH, MethodDesc ** ppMD, FieldDesc ** ppFD)
+{
+ STANDARD_VM_CONTRACT;
+
+ *pTH = NULL;
+ *ppMD = NULL;
+ *ppFD = NULL;
+
+ switch (TypeFromToken(token))
+ {
+ case mdtMethodDef:
+ {
+ MethodDesc* pMD = m_pCompileTimeState->m_tokenLookupMap.LookupMethodDef(token);
+ _ASSERTE(pMD);
+ *ppMD = pMD;
+ *pTH = TypeHandle(pMD->GetMethodTable());
+ }
+ break;
+
+ case mdtTypeDef:
+ {
+ TypeHandle typeHnd = m_pCompileTimeState->m_tokenLookupMap.LookupTypeDef(token);
+ _ASSERTE(!typeHnd.IsNull());
+ *pTH = typeHnd;
+ }
+ break;
+
+ case mdtFieldDef:
+ {
+ FieldDesc* pFD = m_pCompileTimeState->m_tokenLookupMap.LookupFieldDef(token);
+ _ASSERTE(pFD);
+ *ppFD = pFD;
+ *pTH = TypeHandle(pFD->GetEnclosingMethodTable());
+ }
+ break;
+
+ default:
+ UNREACHABLE_MSG("unexpected metadata token type");
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+SigPointer
+ILStubResolver::ResolveSignature(
+ mdToken token)
+{
+ STANDARD_VM_CONTRACT;
+ CONSISTENCY_CHECK_MSG(token == TOKEN_ILSTUB_TARGET_SIG, "IL stubs do not support any other signature tokens!");
+
+ return m_pCompileTimeState->m_StubTargetMethodSig;
+}
+
+//---------------------------------------------------------------------------------------
+//
+SigPointer
+ILStubResolver::ResolveSignatureForVarArg(
+ mdToken token)
+{
+ STANDARD_VM_CONTRACT;
+ _ASSERTE(FALSE);
+ return SigPointer();
+}
+
+//---------------------------------------------------------------------------------------
+//
+void ILStubResolver::GetEHInfo(unsigned EHnumber, CORINFO_EH_CLAUSE* clause)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(m_pCompileTimeState));
+ PRECONDITION(CheckPointer(m_pCompileTimeState->m_ILHeader.EH));
+ PRECONDITION(EHnumber < m_pCompileTimeState->m_ILHeader.EH->EHCount());
+ }
+ CONTRACTL_END;
+
+ COR_ILMETHOD_SECT_EH_CLAUSE_FAT ehClause;
+ const COR_ILMETHOD_SECT_EH_CLAUSE_FAT* ehInfo;
+ ehInfo = (COR_ILMETHOD_SECT_EH_CLAUSE_FAT*)m_pCompileTimeState->m_ILHeader.EH->EHClause(EHnumber, &ehClause);
+ clause->Flags = (CORINFO_EH_CLAUSE_FLAGS)ehInfo->GetFlags();
+ clause->TryOffset = ehInfo->GetTryOffset();
+ clause->TryLength = ehInfo->GetTryLength();
+ clause->HandlerOffset = ehInfo->GetHandlerOffset();
+ clause->HandlerLength = ehInfo->GetHandlerLength();
+ clause->ClassToken = ehInfo->GetClassToken();
+ clause->FilterOffset = ehInfo->GetFilterOffset();
+}
+
+bool ILStubResolver::IsNativeToCLRInteropStub()
+{
+ return (m_type == NativeToCLRInteropStub);
+}
+
+void ILStubResolver::SetStubType(ILStubType stubType)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_type = stubType;
+}
+
+void ILStubResolver::SetStubMethodDesc(MethodDesc* pStubMD)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_pStubMD = PTR_MethodDesc(pStubMD);
+}
+
+void ILStubResolver::SetStubTargetMethodDesc(MethodDesc* pStubTargetMD)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_pStubTargetMD = PTR_MethodDesc(pStubTargetMD);
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+void
+ILStubResolver::SetStubTargetMethodSig(
+ PCCOR_SIGNATURE pStubTargetMethodSig,
+ DWORD cbStubTargetSigLength)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(m_pCompileTimeState));
+ }
+ CONTRACTL_END;
+
+ NewHolder<BYTE> pNewSig = new BYTE[cbStubTargetSigLength];
+
+ memcpyNoGCRefs((void *)pNewSig, pStubTargetMethodSig, cbStubTargetSigLength);
+
+ m_pCompileTimeState->m_StubTargetMethodSig = SigPointer(pNewSig, cbStubTargetSigLength);
+ pNewSig.SuppressRelease();
+}
+
+//---------------------------------------------------------------------------------------
+//
+MethodDesc *
+ILStubResolver::GetStubTargetMethodDesc()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pStubTargetMD;
+}
+
+MethodDesc* ILStubResolver::GetStubMethodDesc()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pStubMD;
+}
+
+ILStubResolver::ILStubResolver() :
+ m_pCompileTimeState(dac_cast<PTR_CompileTimeState>(ILNotYetGenerated)),
+ m_pStubMD(dac_cast<PTR_MethodDesc>(NULL)),
+ m_pStubTargetMD(dac_cast<PTR_MethodDesc>(NULL)),
+ m_type(Unassigned),
+ m_dwJitFlags(0)
+{
+ LIMITED_METHOD_CONTRACT;
+
+}
+
+//---------------------------------------------------------------------------------------
+//
+COR_ILMETHOD_DECODER *
+ILStubResolver::AllocGeneratedIL(
+ size_t cbCode,
+ DWORD cbLocalSig,
+ UINT maxStack)
+{
+ STANDARD_VM_CONTRACT;
+
+#if !defined(DACCESS_COMPILE)
+ _ASSERTE(0 != cbCode);
+
+ NewHolder<BYTE> pNewILCodeBuffer = NULL;
+ NewHolder<BYTE> pNewLocalSig = NULL;
+ NewHolder<CompileTimeState> pNewCompileTimeState = NULL;
+
+ pNewCompileTimeState = (CompileTimeState *)new BYTE[sizeof(CompileTimeState)];
+ memset(pNewCompileTimeState, 0, sizeof(CompileTimeState));
+
+ pNewILCodeBuffer = new BYTE[cbCode];
+
+ if (0 != cbLocalSig)
+ {
+ pNewLocalSig = new BYTE[cbLocalSig];
+ }
+
+ COR_ILMETHOD_DECODER* pILHeader = &pNewCompileTimeState->m_ILHeader;
+
+ pILHeader->Flags = 0;
+ pILHeader->CodeSize = (DWORD)cbCode;
+ pILHeader->MaxStack = maxStack;
+ pILHeader->EH = 0;
+ pILHeader->Sect = 0;
+ pILHeader->Code = pNewILCodeBuffer;
+ pILHeader->LocalVarSig = pNewLocalSig;
+ pILHeader->cbLocalVarSig = cbLocalSig;
+
+#ifdef _DEBUG
+ LPVOID pPrevCompileTimeState =
+#endif // _DEBUG
+ FastInterlockExchangePointer(&m_pCompileTimeState, pNewCompileTimeState.GetValue());
+ CONSISTENCY_CHECK(ILNotYetGenerated == (UINT_PTR)pPrevCompileTimeState);
+
+ pNewLocalSig.SuppressRelease();
+ pNewILCodeBuffer.SuppressRelease();
+ pNewCompileTimeState.SuppressRelease();
+
+ return pILHeader;
+
+#else // DACCESS_COMPILE
+ DacNotImpl();
+ return NULL;
+
+#endif // DACCESS_COMPILE
+} // ILStubResolver::AllocGeneratedIL
+
+//---------------------------------------------------------------------------------------
+//
+COR_ILMETHOD_DECODER* ILStubResolver::GetILHeader()
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(m_pCompileTimeState));
+ }
+ CONTRACTL_END;
+
+ return &m_pCompileTimeState->m_ILHeader;
+}
+
+COR_ILMETHOD_SECT_EH* ILStubResolver::AllocEHSect(size_t nClauses)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (nClauses >= 1)
+ {
+ size_t cbSize = sizeof(COR_ILMETHOD_SECT_EH)
+ - sizeof(IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT)
+ + (nClauses * sizeof(IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT));
+ m_pCompileTimeState->m_pEHSect = (COR_ILMETHOD_SECT_EH*) new BYTE[cbSize];
+ CONSISTENCY_CHECK(NULL == m_pCompileTimeState->m_ILHeader.EH);
+ m_pCompileTimeState->m_ILHeader.EH = m_pCompileTimeState->m_pEHSect;
+ return m_pCompileTimeState->m_pEHSect;
+ }
+ else
+ {
+ return NULL;
+ }
+}
+
+
+void ILStubResolver::FreeCompileTimeState()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if ((ILNotYetGenerated == dac_cast<TADDR>(m_pCompileTimeState)) ||
+ (ILGeneratedAndFreed == dac_cast<TADDR>(m_pCompileTimeState)))
+ {
+ return;
+ }
+
+ ClearCompileTimeState(ILGeneratedAndFreed);
+}
+
+//---------------------------------------------------------------------------------------
+//
+void
+ILStubResolver::ClearCompileTimeState(CompileTimeStatePtrSpecialValues newState)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ //
+ // See allocations in AllocGeneratedIL and SetStubTargetMethodSig
+ //
+
+ COR_ILMETHOD_DECODER * pILHeader = &m_pCompileTimeState->m_ILHeader;
+
+ CONSISTENCY_CHECK(NULL != pILHeader->Code);
+ delete[] pILHeader->Code;
+
+ if (NULL != pILHeader->LocalVarSig)
+ {
+ delete[] pILHeader->LocalVarSig;
+ }
+
+ if (!m_pCompileTimeState->m_StubTargetMethodSig.IsNull())
+ {
+ delete[] m_pCompileTimeState->m_StubTargetMethodSig.GetPtr();
+ }
+
+ if (NULL != m_pCompileTimeState->m_pEHSect)
+ {
+ delete[] m_pCompileTimeState->m_pEHSect;
+ }
+
+ delete m_pCompileTimeState;
+
+ FastInterlockExchangePointer(&m_pCompileTimeState, dac_cast<PTR_CompileTimeState>((TADDR)newState));
+} // ILStubResolver::ClearCompileTimeState
+
+//---------------------------------------------------------------------------------------
+//
+void
+ILStubResolver::SetTokenLookupMap(
+ TokenLookupMap * pMap)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(m_pCompileTimeState));
+ }
+ CONTRACTL_END;
+
+ // run copy ctor
+ new (&m_pCompileTimeState->m_tokenLookupMap) TokenLookupMap(pMap);
+}
+
+bool ILStubResolver::IsCompiled()
+{
+ LIMITED_METHOD_CONTRACT;
+ return (dac_cast<TADDR>(m_pCompileTimeState) == ILGeneratedAndFreed);
+}
+
+bool ILStubResolver::IsILGenerated()
+{
+ return (dac_cast<TADDR>(m_pCompileTimeState) != ILNotYetGenerated);
+}
+
+void ILStubResolver::SetJitFlags(DWORD dwFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_dwJitFlags = dwFlags;
+}
+
+DWORD ILStubResolver::GetJitFlags()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_dwJitFlags;
+}
+
+// static
+void ILStubResolver::StubGenFailed(ILStubResolver* pResolver)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if ((ILNotYetGenerated == dac_cast<TADDR>(pResolver->m_pCompileTimeState)) ||
+ (ILGeneratedAndFreed == dac_cast<TADDR>(pResolver->m_pCompileTimeState)))
+ {
+ return;
+ }
+
+ pResolver->ClearCompileTimeState(ILNotYetGenerated);
+}
diff --git a/src/vm/ilstubresolver.h b/src/vm/ilstubresolver.h
new file mode 100644
index 0000000000..5741be915a
--- /dev/null
+++ b/src/vm/ilstubresolver.h
@@ -0,0 +1,125 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: ILStubResolver.h
+//
+
+//
+
+
+#ifndef __ILSTUBRESOLVER_H__
+#define __ILSTUBRESOLVER_H__
+
+#include "stubgen.h"
+class ILStubResolver : DynamicResolver
+{
+ friend class ILStubCache;
+ friend class ILStubLinker;
+
+public:
+
+ // -----------------------------------
+ // DynamicResolver interface methods
+ // -----------------------------------
+
+ void FreeCompileTimeState();
+ void GetJitContext(SecurityControlFlags* pSecurityControlFlags,
+ TypeHandle* pTypeOwner);
+ ChunkAllocator* GetJitMetaHeap();
+
+ BYTE* GetCodeInfo(unsigned* pCodeSize, unsigned* pStackSize, CorInfoOptions* pOptions, unsigned* pEHSize);
+ SigPointer GetLocalSig();
+
+ OBJECTHANDLE ConstructStringLiteral(mdToken metaTok);
+ BOOL IsValidStringRef(mdToken metaTok);
+ void ResolveToken(mdToken token, TypeHandle * pTH, MethodDesc ** ppMD, FieldDesc ** ppFD);
+ SigPointer ResolveSignature(mdToken token);
+ SigPointer ResolveSignatureForVarArg(mdToken token);
+ void GetEHInfo(unsigned EHnumber, CORINFO_EH_CLAUSE* clause);
+
+ static LPCUTF8 GetStubClassName(MethodDesc* pMD);
+ LPCUTF8 GetStubMethodName();
+
+ MethodDesc* GetDynamicMethod() { LIMITED_METHOD_CONTRACT; return m_pStubMD; }
+
+ // -----------------------------------
+ // ILStubResolver-specific methods
+ // -----------------------------------
+ bool IsNativeToCLRInteropStub();
+ MethodDesc* GetStubMethodDesc();
+ MethodDesc* GetStubTargetMethodDesc();
+ void SetStubTargetMethodDesc(MethodDesc* pStubTargetMD);
+ void SetStubTargetMethodSig(PCCOR_SIGNATURE pStubTargetMethodSig, DWORD cbStubTargetSigLength);
+ void SetStubMethodDesc(MethodDesc* pStubMD);
+
+ COR_ILMETHOD_DECODER * AllocGeneratedIL(size_t cbCode, DWORD cbLocalSig, UINT maxStack);
+ COR_ILMETHOD_DECODER * GetILHeader();
+ COR_ILMETHOD_SECT_EH* AllocEHSect(size_t nClauses);
+
+ bool IsCompiled();
+ bool IsILGenerated();
+
+ ILStubResolver();
+
+ void SetTokenLookupMap(TokenLookupMap* pMap);
+
+ void SetJitFlags(DWORD dwJitFlags);
+ DWORD GetJitFlags();
+
+ static void StubGenFailed(ILStubResolver* pResolver);
+
+protected:
+ enum ILStubType
+ {
+ Unassigned = 0,
+ CLRToNativeInteropStub,
+ CLRToCOMInteropStub,
+ CLRToWinRTInteropStub,
+ NativeToCLRInteropStub,
+ COMToCLRInteropStub,
+ WinRTToCLRInteropStub,
+#ifdef FEATURE_ARRAYSTUB_AS_IL
+ ArrayOpStub,
+#endif
+#ifdef FEATURE_STUBS_AS_IL
+ MulticastDelegateStub,
+ UnboxingILStub,
+ InstantiatingStub,
+#endif
+ };
+
+ enum CompileTimeStatePtrSpecialValues
+ {
+ ILNotYetGenerated = NULL,
+ ILGeneratedAndFreed = 1,
+ };
+
+ void ClearCompileTimeState(CompileTimeStatePtrSpecialValues newState);
+ void SetStubType(ILStubType stubType);
+
+ //
+ // This stuff is only needed during JIT
+ //
+ struct CompileTimeState
+ {
+ COR_ILMETHOD_DECODER m_ILHeader;
+ COR_ILMETHOD_SECT_EH * m_pEHSect;
+ SigPointer m_StubTargetMethodSig;
+ TokenLookupMap m_tokenLookupMap;
+ };
+ typedef DPTR(struct CompileTimeState) PTR_CompileTimeState;
+
+ PTR_CompileTimeState m_pCompileTimeState;
+
+ PTR_MethodDesc m_pStubMD;
+ PTR_MethodDesc m_pStubTargetMD;
+ ILStubType m_type;
+ DWORD m_dwJitFlags;
+};
+
+typedef Holder<ILStubResolver*, DoNothing<ILStubResolver*>, ILStubResolver::StubGenFailed, NULL> ILStubGenHolder;
+
+
+#endif // __ILSTUBRESOLVER_H__
diff --git a/src/vm/inlinetracking.cpp b/src/vm/inlinetracking.cpp
new file mode 100644
index 0000000000..d9d91abcb3
--- /dev/null
+++ b/src/vm/inlinetracking.cpp
@@ -0,0 +1,430 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// =============================================================================================
+// Code for tracking method inlinings in NGen images.
+// The only information stored is "who" got inlined "where", no offsets or inlining depth tracking.
+// (No good for debugger yet.)
+// This information is later exposed to profilers and can be useful for ReJIT.
+// Runtime inlining is not being tracked because profilers can deduce it via callbacks anyway.
+// =============================================================================================
+#include "common.h"
+#include "inlinetracking.h"
+#include "ceeload.h"
+
+bool MethodInModule::operator <(const MethodInModule& other) const
+{
+ STANDARD_VM_CONTRACT;
+ if (m_module == other.m_module)
+ {
+ return m_methodDef < other.m_methodDef;
+ }
+ else
+ {
+ // Since NGen images are supposed to be determenistic,
+ // we need stable sort order that isn't changing between different runs
+ // That's why we use names and GUIDs instead of just doing m_module < other.m_module
+
+ // First we try to compare simple names (should be fast enough)
+ LPCUTF8 simpleName = m_module ? m_module->GetSimpleName() : "";
+ LPCUTF8 otherSimpleName = other.m_module ? other.m_module->GetSimpleName() : "";
+ int nameCmpResult = strcmp(simpleName, otherSimpleName);
+
+ if (nameCmpResult == 0)
+ {
+ // Names are equal but module addresses aren't, it's suspicious
+ // falling back to module GUIDs
+ GUID thisGuid, otherGuid;
+ if (m_module == NULL)
+ {
+ memset(&thisGuid, 0, sizeof(GUID));
+ }
+ else
+ {
+ m_module->GetFile()->GetMVID(&thisGuid);
+ }
+
+ if (other.m_module == NULL)
+ {
+ memset(&otherGuid, 0, sizeof(GUID));
+ }
+ else
+ {
+ other.m_module->GetFile()->GetMVID(&otherGuid);
+ }
+
+ return memcmp(&thisGuid, &otherGuid, sizeof(GUID)) < 0;
+ }
+ else
+ {
+ return nameCmpResult < 0;
+ }
+ }
+}
+
+bool MethodInModule::operator ==(const MethodInModule& other) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_methodDef == other.m_methodDef &&
+ m_module == other.m_module;
+}
+
+bool MethodInModule::operator !=(const MethodInModule& other) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_methodDef != other.m_methodDef ||
+ m_module != other.m_module;
+}
+
+
+void InlineTrackingEntry::SortAndDeduplicate()
+{
+ STANDARD_VM_CONTRACT;
+
+ //Sort
+ MethodInModule *begin = &m_inliners[0];
+ MethodInModule *end = begin + m_inliners.GetCount();
+ util::sort(begin, end);
+
+ //Deduplicate
+ MethodInModule *left = begin;
+ MethodInModule *right = left + 1;
+ while (right < end)
+ {
+ auto rvalue = *right;
+ if (*left != rvalue)
+ {
+ left++;
+ if (left != right)
+ {
+ *left = rvalue;
+ }
+ }
+ right++;
+ }
+
+ //Shrink
+ int newCount = (int)(left - begin + 1);
+ m_inliners.SetCount(newCount);
+}
+
+InlineTrackingEntry::InlineTrackingEntry(const InlineTrackingEntry& other)
+ :m_inlinee(other.m_inlinee)
+{
+ STANDARD_VM_CONTRACT;
+ m_inliners.Set(other.m_inliners);
+}
+
+InlineTrackingEntry & InlineTrackingEntry::operator = (const InlineTrackingEntry &other)
+{
+ STANDARD_VM_CONTRACT;
+ m_inlinee = other.m_inlinee;
+ m_inliners.Set(other.m_inliners);
+ return *this;
+}
+
+
+#ifndef DACCESS_COMPILE
+COUNT_T PersistentInlineTrackingMap::GetInliners(PTR_Module inlineeOwnerMod, mdMethodDef inlineeTkn, COUNT_T inlinersSize, MethodInModule inliners[], BOOL *incompleteData)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(inlineeOwnerMod);
+ _ASSERTE(inliners);
+
+ if (incompleteData)
+ {
+ *incompleteData = FALSE;
+ }
+ if (m_inlineeIndex == NULL || m_inlinersBuffer == NULL)
+ {
+ //No inlines saved in this image.
+ return 0;
+ }
+
+ // Binary search to find all records matching (inlineeTkn/inlineeOwnerMod)
+ InlineeRecord probeRecord(RidFromToken(inlineeTkn), inlineeOwnerMod->GetSimpleName());
+ InlineeRecord *begin = m_inlineeIndex;
+ InlineeRecord *end = m_inlineeIndex + m_inlineeIndexSize;
+ InlineeRecord *foundRecord = util::lower_bound(begin, end, probeRecord);
+ DWORD result = 0;
+ DWORD outputIndex = 0;
+
+ // Go through all matching records
+ for (; foundRecord < end && *foundRecord == probeRecord; foundRecord++)
+ {
+ DWORD offset = foundRecord->m_offset;
+ NibbleReader stream(m_inlinersBuffer + offset, m_inlinersBufferSize - offset);
+
+ DWORD inlineeModuleZapIndex = stream.ReadEncodedU32();
+ Module *decodedInlineeModule = GetModuleByIndex(inlineeModuleZapIndex);
+
+ // Check if this is just token/method name hash collision
+ if (decodedInlineeModule == inlineeOwnerMod)
+ {
+ // We found the token and the module we were looking for!
+ DWORD inlinerModuleZapIndex = stream.ReadEncodedU32(); //read inliner module, it is same for all inliners
+ Module *inlinerModule = GetModuleByIndex(inlinerModuleZapIndex);
+
+ if (inlinerModule != NULL)
+ {
+ DWORD inlinersCount = stream.ReadEncodedU32();
+ _ASSERTE(inlinersCount > 0);
+
+ RID inlinerRid = 0;
+ // Reading inliner RIDs one by one, each RID is represented as an adjustment (diff) to the previous one.
+ // Adding inliners module and coping to the output buffer
+ for (DWORD i = 0; i < inlinersCount && outputIndex < inlinersSize; i++)
+ {
+ inlinerRid += stream.ReadEncodedU32();
+ mdMethodDef inlinerTkn = TokenFromRid(inlinerRid, mdtMethodDef);
+ inliners[outputIndex++] = MethodInModule(inlinerModule, inlinerTkn);
+ }
+ result += inlinersCount;
+ }
+ else
+ {
+ // We can't find module for this inlineeModuleZapIndex, it means it hasn't been loaded yet
+ // (maybe it never will be), we just report it to the profiler.
+ // Profiler might want to try later when more modules are loaded.
+ if (incompleteData)
+ {
+ *incompleteData = TRUE;
+ }
+ }
+ }
+ }
+
+ return result;
+}
+
+Module *PersistentInlineTrackingMap::GetModuleByIndex(DWORD index)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // This "black magic spell" has in fact nothing to do with GenericInstantiationCompare per se, but just sets a thread flag
+ // that later activates more thorough search inside Module::GetAssemblyIfLoaded, which is indirectly called from GetModuleFromIndexIfLoaded.
+ // This is useful when ngen image was compiler against a different assembly version than the one loaded now.
+ ClrFlsThreadTypeSwitch genericInstantionCompareHolder(ThreadType_GenericInstantiationCompare);
+
+ return m_module->GetModuleFromIndexIfLoaded(index);
+}
+
+PersistentInlineTrackingMap::InlineeRecord::InlineeRecord(RID rid, LPCUTF8 simpleName)
+{
+ LIMITED_METHOD_CONTRACT;
+ //XOR of up to first 24 bytes in module name
+ DWORD hash = 0;
+ for (int i = 0; simpleName[i] && i < 24; i++)
+ hash ^= (BYTE)simpleName[i];
+
+ // This key contains 24 bits of RID and 8 bits from module name.
+ // Since RID can't be longer than 24 bits, we can't have method RID collistions,
+ // that's why PersistentInlineTrackingMap::GetInliners only deals with module collisions.
+ m_key = (hash << 24) | rid;
+}
+
+InlineTrackingMap::InlineTrackingMap()
+ : m_mapCrst(CrstInlineTrackingMap)
+{
+ STANDARD_VM_CONTRACT;
+}
+
+void InlineTrackingMap::AddInlining(MethodDesc *inliner, MethodDesc *inlinee)
+{
+ STANDARD_VM_CONTRACT;
+ _ASSERTE(inliner != NULL);
+ _ASSERTE(inlinee != NULL);
+
+ MethodInModule inlineeMnM(inlinee->GetModule(), inlinee->GetMemberDef());
+
+ if (RidFromToken(inlineeMnM.m_methodDef) == 0 || RidFromToken(inliner->GetMemberDef()) == 0)
+ {
+ // Sometimes we do see methods that don't have valid tokens (stubs etc)
+ // we just ignore them.
+ return;
+ }
+
+ CrstHolder lock(&m_mapCrst);
+ InlineTrackingEntry *existingEntry = const_cast<InlineTrackingEntry *>(LookupPtr(inlineeMnM));
+ if (existingEntry)
+ {
+ // We saw this inlinee before, just add one more inliner
+ existingEntry->Add(inliner);
+ }
+ else
+ {
+ // We haven't seen this inlinee before, create a new record in the hashtable
+ // and add a first inliner to it.
+ InlineTrackingEntry newEntry;
+ newEntry.m_inlinee = inlineeMnM;
+ newEntry.Add(inliner);
+ Add(newEntry);
+ }
+}
+
+void InlineTrackingEntry::Add(PTR_MethodDesc inliner)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodInModule method(inliner->GetModule(), inliner->GetMemberDef());
+
+ // Going through last 10 inliners to check if a given inliner has recently been registered.
+ // It allows to filter out most duplicates without having to scan through hundreds of inliners
+ // for methods like Object.ctor or Monitor.Enter.
+ // We are OK to keep occasional duplicates in m_inliners, we'll get rid of them
+ // in SortAndDeduplicate() anyway.
+ int count = static_cast<int>(m_inliners.GetCount());
+ int start = max(0, count - 10);
+ for (int i = count - 1; i >= start; i--)
+ {
+ if (m_inliners[i] == method)
+ return;
+ }
+
+ //look like we see this inliner for the first time, add it to the collection
+ m_inliners.Append(method);
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+
+void PersistentInlineTrackingMap::ProcessInlineTrackingEntry(DataImage *image, SBuffer *inlinersBuffer, SArray<InlineeRecord> *inlineeIndex, InlineTrackingEntry *entry)
+{
+ STANDARD_VM_CONTRACT;
+ // This call removes duplicates from inliners and makes sure they are sorted by module
+ entry->SortAndDeduplicate();
+ MethodInModule inlinee = entry->m_inlinee;
+ DWORD inlineeModuleZapIndex = image->GetModuleImportIndex(inlinee.m_module);
+ InlineSArray<MethodInModule, 3> &inliners = entry->m_inliners;
+ COUNT_T tatalInlinersCount = inliners.GetCount();
+ _ASSERTE(tatalInlinersCount > 0);
+
+ COUNT_T sameModuleCount;
+ // Going through all inliners and grouping them by their module, for each module we'll create
+ // InlineeRecord and encode inliners as bytes in inlinersBuffer.
+ for (COUNT_T thisModuleBegin = 0; thisModuleBegin < tatalInlinersCount; thisModuleBegin += sameModuleCount)
+ {
+ Module *lastInlinerModule = inliners[thisModuleBegin].m_module;
+ DWORD lastInlinerModuleZapIndex = image->GetModuleImportIndex(lastInlinerModule);
+
+ // Counting how many inliners belong to this module
+ sameModuleCount = 1;
+ while (thisModuleBegin + sameModuleCount < tatalInlinersCount &&
+ inliners[thisModuleBegin + sameModuleCount].m_module == lastInlinerModule)
+ {
+ sameModuleCount++;
+ }
+
+ // Saving module indexes and number of inliners
+ NibbleWriter inlinersStream;
+ inlinersStream.WriteEncodedU32(inlineeModuleZapIndex);
+ inlinersStream.WriteEncodedU32(lastInlinerModuleZapIndex);
+ inlinersStream.WriteEncodedU32(sameModuleCount);
+
+ // Saving inliners RIDs, each new RID is represented as an adjustment (diff) to the previous one
+ RID prevMethodRid = 0;
+ for (COUNT_T i = thisModuleBegin; i < thisModuleBegin + sameModuleCount; i++)
+ {
+ RID methodRid = RidFromToken(inliners[i].m_methodDef);
+ _ASSERTE(methodRid >= prevMethodRid);
+ inlinersStream.WriteEncodedU32(methodRid - prevMethodRid);
+ prevMethodRid = methodRid;
+ }
+ inlinersStream.Flush();
+
+ // Copy output of NibbleWriter into a big buffer (inlinersBuffer) for inliners from the same module
+ // and create an InlineeRecord with correct offset
+ InlineeRecord record(RidFromToken(inlinee.m_methodDef), inlinee.m_module->GetSimpleName());
+ DWORD inlinersStreamSize;
+ const BYTE *inlinersStreamPtr = (const BYTE *)inlinersStream.GetBlob(&inlinersStreamSize);
+ record.m_offset = inlinersBuffer->GetSize();
+ inlinersBuffer->Insert(inlinersBuffer->End(), SBuffer(SBuffer::Immutable, inlinersStreamPtr, inlinersStreamSize));
+
+ inlineeIndex->Append(record);
+ }
+}
+
+bool compare_entry(const InlineTrackingEntry* first, const InlineTrackingEntry* second)
+{
+ return first->m_inlinee < second->m_inlinee;
+}
+
+void PersistentInlineTrackingMap::Save(DataImage *image, InlineTrackingMap* runtimeMap)
+{
+ STANDARD_VM_CONTRACT;
+ _ASSERTE(image != NULL);
+ _ASSERTE(runtimeMap != NULL);
+
+ SArray<InlineeRecord> inlineeIndex;
+ SBuffer inlinersBuffer;
+
+ // Sort records from runtimeMap, because we need to make sure
+ // we save everything in deterministic order. Hashtable iteration is not deterministic.
+ COUNT_T runtimeMapCount = runtimeMap->GetCount();
+ InlineTrackingEntry **inlinees = new InlineTrackingEntry *[runtimeMapCount];
+ NewArrayHolder<InlineTrackingEntry *>inlineesHolder(inlinees);
+ int index = 0;
+ for (auto iter = runtimeMap->Begin(), end = runtimeMap->End(); iter != end; ++iter)
+ {
+ inlinees[index++] = const_cast<InlineTrackingEntry *>(&*iter);
+ }
+ util::sort(inlinees, inlinees + runtimeMapCount, compare_entry);
+
+
+ // Iterate throught each inlinee record from the InlineTrackingMap
+ // and write corresponding records into inlineeIndex and inlinersBuffer
+ for (COUNT_T i = 0; i < runtimeMapCount; i++)
+ {
+ ProcessInlineTrackingEntry(image, &inlinersBuffer, &inlineeIndex, inlinees[i]);
+ }
+
+ m_inlineeIndexSize = inlineeIndex.GetCount();
+ m_inlinersBufferSize = inlinersBuffer.GetSize();
+ _ASSERTE((m_inlineeIndexSize == 0) == (m_inlinersBufferSize == 0));
+
+ if (m_inlineeIndexSize != 0 && m_inlinersBufferSize != 0)
+ {
+ // Copy everything to the class fields, we didn't use the class fields for addition
+ // because we want to make sure we don't waste memory for buffer's amortized growth
+ m_inlineeIndex = new (image->GetHeap()) InlineeRecord[m_inlineeIndexSize];
+ inlineeIndex.Copy(m_inlineeIndex, inlineeIndex.Begin(), m_inlineeIndexSize);
+
+ m_inlinersBuffer = new (image->GetHeap()) BYTE[m_inlinersBufferSize];
+ inlinersBuffer.Copy(m_inlinersBuffer, inlinersBuffer.Begin(), m_inlinersBufferSize);
+
+ //Sort m_inlineeIndex so we can later use binary search
+ util::sort(m_inlineeIndex, m_inlineeIndex + m_inlineeIndexSize);
+
+ //Making sure all this memory actually gets saved into NGEN image
+ image->StoreStructure(m_inlineeIndex, m_inlineeIndexSize * sizeof(m_inlineeIndex[0]), DataImage::ITEM_INLINING_DATA);
+ image->StoreStructure(m_inlinersBuffer, m_inlinersBufferSize, DataImage::ITEM_INLINING_DATA);
+ }
+
+ image->StoreStructure(this, sizeof(*this), DataImage::ITEM_INLINING_DATA);
+ LOG((LF_ZAP, LL_INFO100000,
+ "PersistentInlineTrackingMap saved. InlineeIndexSize: %d bytes, InlinersBufferSize: %d bytes\n",
+ m_inlineeIndexSize * sizeof(m_inlineeIndex[0]), m_inlinersBufferSize));
+}
+
+void PersistentInlineTrackingMap::Fixup(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+ image->FixupPointerField(this, offsetof(PersistentInlineTrackingMap, m_module));
+ image->FixupPointerField(this, offsetof(PersistentInlineTrackingMap, m_inlineeIndex));
+ image->FixupPointerField(this, offsetof(PersistentInlineTrackingMap, m_inlinersBuffer));
+}
+#endif //FEATURE_NATIVE_IMAGE_GENERATION
+#endif //!DACCESS_COMPILE \ No newline at end of file
diff --git a/src/vm/inlinetracking.h b/src/vm/inlinetracking.h
new file mode 100644
index 0000000000..768095e3f7
--- /dev/null
+++ b/src/vm/inlinetracking.h
@@ -0,0 +1,233 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// =============================================================================================
+// Definitions for tracking method inlinings in NGen images.
+// The only information stored is "who" got inlined "where", no offsets or inlining depth tracking.
+// (No good for debugger yet.)
+// This information is later exposed to profilers and can be useful for ReJIT.
+// Runtime inlining is not being tracked because profilers can deduce it via callbacks anyway.
+// =============================================================================================
+#ifndef INLINETRACKING_H_
+#define INLINETRACKING_H_
+#include "corhdr.h"
+#include "shash.h"
+#include "sarray.h"
+#include "crsttypes.h"
+#include "daccess.h"
+
+class MethodDesc;
+typedef DPTR(class MethodDesc) PTR_MethodDesc;
+
+struct MethodInModule
+{
+ Module *m_module;
+ mdMethodDef m_methodDef;
+
+ bool operator <(const MethodInModule& other) const;
+
+ bool operator ==(const MethodInModule& other) const;
+
+ bool operator !=(const MethodInModule& other) const;
+
+ MethodInModule(Module * module, mdMethodDef methodDef)
+ :m_module(module), m_methodDef(methodDef)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ }
+
+ MethodInModule()
+ :m_module(NULL), m_methodDef(0)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ }
+
+};
+
+struct InlineTrackingEntry
+{
+ MethodInModule m_inlinee;
+
+ //Our research shows that 70% of methods are inlined less than 4 times
+ //so it's probably worth to inline enough storage for 3 inlines.
+ InlineSArray<MethodInModule, 3> m_inliners;
+
+
+ // SArray and SBuffer don't have sane implementations for operator=
+ // but SHash uses operator= for moving values, so we have to provide
+ // implementations that don't corrupt memory.
+ InlineTrackingEntry(const InlineTrackingEntry& other);
+ InlineTrackingEntry &operator=(const InlineTrackingEntry &other);
+
+ InlineTrackingEntry()
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ void Add(PTR_MethodDesc inliner);
+ void SortAndDeduplicate();
+};
+
+class InlineTrackingMapTraits : public NoRemoveSHashTraits <DefaultSHashTraits<InlineTrackingEntry> >
+{
+public:
+ typedef MethodInModule key_t;
+
+ static key_t GetKey(const element_t &e)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return e.m_inlinee;
+ }
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (k1 == k2);
+ }
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return ((count_t)k.m_methodDef ^ (count_t)k.m_module);
+ }
+ static const element_t Null()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ InlineTrackingEntry e;
+ return e;
+ }
+ static bool IsNull(const element_t &e)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return !e.m_inlinee.m_module;
+ }
+
+ static const bool s_NoThrow = false;
+};
+
+// This is a hashtable that is used by each module to track inlines in the code inside this module.
+// For each key (MethodInModule) it stores an array of methods (MethodInModule), each of those methods
+// directly or indirectly inlined code from MethodInModule specified by the key.
+//
+// It is important to understand that even though each module has an its own instance of the map,
+// map can had methods from other modules both as keys and values.
+// - If module has code inlined from other modules we naturally get methods from other modules as keys in the map.
+// - During NGgen process, modules can generate code for generic classes and methods from other modules and
+// embed them into the image (like List<MyStruct>.FindAll() might get embeded into module of MyStruct).
+// In such cases values of the map can belong to other modules.
+//
+// Currently this map is created and updated by modules only during native image generation
+// and later saved as PersistentInlineTrackingMap.
+class InlineTrackingMap : public SHash < InlineTrackingMapTraits >
+{
+private:
+ Crst m_mapCrst;
+
+public:
+ InlineTrackingMap();
+ void AddInlining(MethodDesc *inliner, MethodDesc *inlinee);
+};
+
+typedef DPTR(InlineTrackingMap) PTR_InlineTrackingMap;
+
+// This is a persistent map that is stored inside each NGen-ed module image and is used to track
+// inlines in the NGEN-ed code inside this module.
+// At runtime this map is used by profiler to track methods that inline a given method,
+// thus answering a question "give me all methods from this native image that has code from this method?"
+// It doesn't require any load time unpacking and serves requests directly from NGEN image.
+//
+// It is composed of two arrays:
+// m_inlineeIndex - sorted (by InlineeRecord.key i.e. by module then token) array of InlineeRecords, given an inlinee module name hash (8 bits)
+// and a method token (24 bits) we use binary search to find if this method has ever been inlined in NGen-ed code of this image.
+// Each record has m_offset, which is an offset inside m_inlinersBuffer, it has more data on where the method got inlined.
+//
+// It is totally possible to have more than one InlineeRecords with the same key, not only due hash collision, but also due to
+// the fact that we create one record for each (inlinee module / inliner module) pair.
+// For example: we have MyModule!MyType that uses mscorlib!List<T>. Let's say List<T>.ctor got inlined into
+// MyType.GetAllThinds() and into List<MyType>.FindAll. In this case we'll have two InlineeRecords for mscorlib!List<T>.ctor
+// one for MyModule and another one for mscorlib.
+// PersistentInlineTrackingMap.GetInliners() always reads all InlineeRecords as long as they have the same key, few of them filtered out as hash collisions
+// others provide legitimate inlining information for methods from different modules.
+//
+// m_inlinersBuffer - byte array compressed by NibbleWriter. At any valid offset taken from InlineeRecord from m_inlineeIndex, there is a compressed chunk
+// of this format:
+// [InlineeModuleZapIndex][InlinerModuleZapIndex] [N - # of following inliners] [#1 inliner method RID] ... [#N inliner method RID]
+// [InlineeModuleZapIndex] is used to verify that we actually found a desired inlinee module (not just a name hash collision).
+// [InlinerModuleZapIndex] is an index of a module that owns following method tokens (inliners)
+// [1..N inliner RID] are the sorted diff compressed method RIDs from the module specified by InlinerModuleZapIndex,
+// those methods directly or indirectly inlined code from inlinee method specified by InlineeRecord.
+// Since all the RIDs are sorted we'are actually able to save some space by using diffs instead of values, because NibbleWriter
+// is good at saving small numbers.
+// For example for RIDs: 5, 6, 19, 25, 30, we'll write: 5, 1 (=6-5), 13 (=19-6), 6 (=25-19), 5 (=30-25)
+//
+// m_inlineeIndex
+// +-----+-----+--------------------------------------------------+-----+-----+
+// | - | - | m_key {module name hash, method token); m_offset | - | - |
+// +-----+-----+--------------------------------------------|-----+-----+-----+
+// |
+// +-----------------------------------+
+// |
+// m_inlinersBuffer \-/
+// +-----------------+-----------------------+------------------------+------------------------+------+------+--------+------+-------------+
+// | - - - | InlineeModuleZapIndex | InlinerModuleZapIndex | SavedInlinersCount (N) | rid1 | rid2 | ...... | ridN | - - - |
+// +-----------------+-----------------------+------------------------+------------------------+------+------+--------+------+-------------+
+//
+class PersistentInlineTrackingMap
+{
+private:
+ struct InlineeRecord
+ {
+ DWORD m_key;
+ DWORD m_offset;
+
+ InlineeRecord()
+ : m_key(0)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ InlineeRecord(RID rid, LPCUTF8 simpleName);
+
+ bool operator <(const InlineeRecord& other) const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_key < other.m_key;
+ }
+
+ bool operator ==(const InlineeRecord& other) const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_key == other.m_key;
+ }
+ };
+ typedef DPTR(InlineeRecord) PTR_InlineeRecord;
+
+ PTR_Module m_module;
+
+ PTR_InlineeRecord m_inlineeIndex;
+ DWORD m_inlineeIndexSize;
+
+ PTR_BYTE m_inlinersBuffer;
+ DWORD m_inlinersBufferSize;
+
+public:
+
+ PersistentInlineTrackingMap(Module *module)
+ : m_module(dac_cast<PTR_Module>(module))
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(module != NULL);
+ }
+
+ void Save(DataImage *image, InlineTrackingMap* runtimeMap);
+ void Fixup(DataImage *image);
+
+ COUNT_T GetInliners(PTR_Module inlineeOwnerMod, mdMethodDef inlineeTkn, COUNT_T inlinersSize, MethodInModule inliners[], BOOL *incompleteData);
+
+private:
+ void ProcessInlineTrackingEntry(DataImage *image, SBuffer *inlinersBuffer, SArray<InlineeRecord> *inlineeIndex, InlineTrackingEntry *entry);
+ Module *GetModuleByIndex(DWORD index);
+};
+
+typedef DPTR(PersistentInlineTrackingMap) PTR_PersistentInlineTrackingMap;
+
+#endif //INLINETRACKING_H_ \ No newline at end of file
diff --git a/src/vm/instmethhash.cpp b/src/vm/instmethhash.cpp
new file mode 100644
index 0000000000..60ab0b9e9c
--- /dev/null
+++ b/src/vm/instmethhash.cpp
@@ -0,0 +1,441 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: instmethhash.cpp
+//
+
+
+//
+
+//
+// ============================================================================
+
+#include "common.h"
+#include "excep.h"
+#include "instmethhash.h"
+#include "eeconfig.h"
+#include "generics.h"
+#include "typestring.h"
+#ifdef FEATURE_PREJIT
+#include "zapsig.h"
+#include "compile.h"
+#endif
+#include "ngenhash.inl"
+
+PTR_MethodDesc InstMethodHashEntry::GetMethod()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return dac_cast<PTR_MethodDesc>(dac_cast<TADDR>(data) & ~0x3);
+}
+
+DWORD InstMethodHashEntry::GetFlags()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (DWORD)(dac_cast<TADDR>(data) & 0x3);
+}
+
+#ifndef DACCESS_COMPILE
+
+void InstMethodHashEntry::SetMethodAndFlags(MethodDesc *pMethod, DWORD dwFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(dwFlags <= 0x3);
+ _ASSERTE(((TADDR)pMethod & 0x3) == 0);
+
+ data = (MethodDesc*)((TADDR)pMethod | dwFlags);
+}
+
+// ============================================================================
+// Instantiated method hash table methods
+// ============================================================================
+/* static */ InstMethodHashTable *InstMethodHashTable::Create(LoaderAllocator *pAllocator, Module *pModule, DWORD dwNumBuckets, AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ LoaderHeap *pHeap = pAllocator->GetLowFrequencyHeap();
+ InstMethodHashTable *pThis = (InstMethodHashTable*)pamTracker->Track(pHeap->AllocMem((S_SIZE_T)sizeof(InstMethodHashTable)));
+
+ new (pThis) InstMethodHashTable(pModule, pHeap, dwNumBuckets);
+
+#ifdef _DEBUG
+ pThis->InitUnseal();
+#endif
+
+ pThis->m_pLoaderAllocator = pAllocator;
+
+ return pThis;
+}
+
+PTR_LoaderAllocator InstMethodHashTable::GetLoaderAllocator()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (m_pLoaderAllocator)
+ {
+ return m_pLoaderAllocator;
+ }
+ else
+ {
+ _ASSERTE(m_pModule != NULL);
+ return m_pModule->GetLoaderAllocator();
+ }
+}
+
+
+// Calculate a hash value for a method-desc key
+static DWORD Hash(TypeHandle declaringType, mdMethodDef token, Instantiation inst)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ DWORD dwHash = 0x87654321;
+#define INST_HASH_ADD(_value) dwHash = ((dwHash << 5) + dwHash) ^ (_value)
+
+ INST_HASH_ADD(declaringType.GetCl());
+ INST_HASH_ADD(token);
+
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ TypeHandle thArg = inst[i];
+
+ if (thArg.GetMethodTable())
+ {
+ INST_HASH_ADD(thArg.GetCl());
+
+ Instantiation sArgInst = thArg.GetInstantiation();
+ for (DWORD j = 0; j < sArgInst.GetNumArgs(); j++)
+ {
+ TypeHandle thSubArg = sArgInst[j];
+ if (thSubArg.GetMethodTable())
+ INST_HASH_ADD(thSubArg.GetCl());
+ else
+ INST_HASH_ADD(thSubArg.GetSignatureCorElementType());
+ }
+ }
+ else
+ INST_HASH_ADD(thArg.GetSignatureCorElementType());
+ }
+
+ return dwHash;
+}
+
+MethodDesc* InstMethodHashTable::FindMethodDesc(TypeHandle declaringType,
+ mdMethodDef token,
+ BOOL unboxingStub,
+ Instantiation inst,
+ BOOL getSharedNotStub)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ PRECONDITION(CheckPointer(declaringType));
+ }
+ CONTRACTL_END
+
+ // We temporarily disable IBC logging here
+ // because the pMD that we search through may not be restored
+ // and ComputePrefferedZapModule will assert on finding an
+ // encode fixup pointer
+ //
+ IBCLoggingDisabler disableIbcLogging;
+
+ MethodDesc *pMDResult = NULL;
+
+ DWORD dwHash = Hash(declaringType, token, inst);
+ InstMethodHashEntry_t* pSearch;
+ LookupContext sContext;
+
+ for (pSearch = BaseFindFirstEntryByHash(dwHash, &sContext);
+ pSearch != NULL;
+ pSearch = BaseFindNextEntryByHash(&sContext))
+ {
+#ifdef FEATURE_PREJIT
+ // This ensures that GetAssemblyIfLoaded operations that may be triggered by signature walks will succeed if at all possible.
+ ClrFlsThreadTypeSwitch genericInstantionCompareHolder(ThreadType_GenericInstantiationCompare);
+#endif
+
+ MethodDesc *pMD = pSearch->GetMethod();
+
+ if (pMD->GetMemberDef() != token)
+ continue; // Next iteration of the for loop
+
+ if (pMD->GetNumGenericMethodArgs() != inst.GetNumArgs())
+ continue; // Next iteration of the for loop
+
+ DWORD dwKeyFlags = pSearch->GetFlags();
+
+ if ( ((dwKeyFlags & InstMethodHashEntry::RequiresInstArg) == 0) != (getSharedNotStub == 0) )
+ continue;
+
+ if ( ((dwKeyFlags & InstMethodHashEntry::UnboxingStub) == 0) != (unboxingStub == 0) )
+ continue;
+
+ // Note pMD->GetMethodTable() might not be restored at this point.
+
+ RelativeFixupPointer<PTR_MethodTable> * ppMT = pMD->GetMethodTablePtr();
+ TADDR pMT = ppMT->GetValueMaybeTagged((TADDR)ppMT);
+
+ if (!ZapSig::CompareTaggedPointerToTypeHandle(m_pModule, pMT, declaringType))
+ {
+ continue; // Next iteration of the for loop
+ }
+
+ if (!inst.IsEmpty())
+ {
+ Instantiation candidateInst = pMD->GetMethodInstantiation();
+
+ // We have matched the method already, thus the number of arguments in the instantiation should match too.
+ _ASSERTE(inst.GetNumArgs() == candidateInst.GetNumArgs());
+
+ bool match = true; // This is true when all instantiation arguments match
+
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ // Fetch the type handle as TADDR. It may be may be encoded fixup - TypeHandle debug-only validation
+ // asserts on encoded fixups.
+ TADDR candidateArg = ((FixupPointer<TADDR> *)candidateInst.GetRawArgs())[i].GetValue();
+
+ if (!ZapSig::CompareTaggedPointerToTypeHandle(m_pModule, candidateArg, inst[i]))
+ {
+ match = false;
+ break;
+ }
+ }
+ if (!match)
+ continue; // Next iteration of the pSearch for loop;
+ }
+ //
+ // Success, we found a pMD that matches
+
+ pMDResult = pMD;
+ break; // Exit the for loop and jump to the return pMDResult
+ }
+
+ return pMDResult;
+}
+
+BOOL InstMethodHashTable::ContainsMethodDesc(MethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return FindMethodDesc(
+ pMD->GetMethodTable(), pMD->GetMemberDef(), pMD->IsUnboxingStub(),
+ pMD->GetMethodInstantiation(), pMD->RequiresInstArg()) != NULL;
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+void InstMethodHashTable::Iterator::Reset()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (m_pTable)
+ {
+#ifdef _DEBUG
+ m_pTable->Unseal();
+#endif
+ m_pTable = NULL;
+ }
+
+ Init();
+}
+
+void InstMethodHashTable::Iterator::Init()
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef _DEBUG
+ if (m_pTable)
+ m_pTable->Seal(); // The table cannot be changing while it is being iterated
+#endif
+
+ m_fIterating = false;
+}
+
+InstMethodHashTable::Iterator::Iterator()
+{
+ WRAPPER_NO_CONTRACT;
+ m_pTable = NULL;
+ Init();
+}
+
+InstMethodHashTable::Iterator::Iterator(InstMethodHashTable * pTable)
+{
+ WRAPPER_NO_CONTRACT;
+ m_pTable = pTable;
+ Init();
+}
+
+InstMethodHashTable::Iterator::~Iterator()
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef _DEBUG
+ if (m_pTable)
+ m_pTable->Unseal(); // Done with the iterator so we unseal
+#endif
+}
+
+BOOL InstMethodHashTable::FindNext(Iterator *it, InstMethodHashEntry **ppEntry)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (!it->m_fIterating)
+ {
+ BaseInitIterator(&it->m_sIterator);
+ it->m_fIterating = true;
+ }
+
+ *ppEntry = it->m_sIterator.Next();
+ return *ppEntry ? TRUE : FALSE;
+}
+
+DWORD InstMethodHashTable::GetCount()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return BaseGetElementCount();
+}
+
+#ifndef DACCESS_COMPILE
+
+// Add method desc to the hash table; must not be present already
+void InstMethodHashTable::InsertMethodDesc(MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(IsUnsealed()); // If we are sealed then we should not be adding to this hashtable
+ PRECONDITION(CheckPointer(pMD));
+
+ // Generic method definitions (e.g. D.m<U> or C<int>.m<U>) belong in method tables, not here
+ PRECONDITION(!pMD->IsGenericMethodDefinition());
+ }
+ CONTRACTL_END
+
+ InstMethodHashEntry_t * pNewEntry = (InstMethodHashEntry_t*)BaseAllocateEntry(NULL);
+
+ DWORD dwKeyFlags = 0;
+ if (pMD->RequiresInstArg())
+ dwKeyFlags |= InstMethodHashEntry::RequiresInstArg;
+ if (pMD->IsUnboxingStub())
+ dwKeyFlags |= InstMethodHashEntry::UnboxingStub;
+ pNewEntry->SetMethodAndFlags(pMD, dwKeyFlags);
+
+ DWORD dwHash = Hash(pMD->GetMethodTable(), pMD->GetMemberDef(), pMD->GetMethodInstantiation());
+ BaseInsertEntry(dwHash, pNewEntry);
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+
+#ifdef _DEBUG
+void InstMethodHashTableSeal(InstMethodHashTable * pTable) { WRAPPER_NO_CONTRACT; pTable->Seal(); }
+void InstMethodHashTableUnseal(InstMethodHashTable * pTable) { WRAPPER_NO_CONTRACT; pTable->Unseal(); }
+typedef Wrapper<InstMethodHashTable *, InstMethodHashTableSeal, InstMethodHashTableUnseal> InstMethodHashTableSealHolder;
+#endif
+
+// Save the hash table and any method descriptors referenced by it
+void InstMethodHashTable::Save(DataImage *image, CorProfileData *pProfileData)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(image->GetModule() == m_pModule);
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ // The table should not change while we are walking the buckets
+ InstMethodHashTableSealHolder h(this);
+#endif
+
+ BaseSave(image, pProfileData);
+}
+
+bool InstMethodHashTable::ShouldSave(DataImage *pImage, InstMethodHashEntry_t *pEntry)
+{
+ STANDARD_VM_CONTRACT;
+
+ return !!pImage->GetPreloader()->IsMethodInTransitiveClosureOfInstantiations(CORINFO_METHOD_HANDLE(pEntry->GetMethod()));
+}
+
+bool InstMethodHashTable::IsHotEntry(InstMethodHashEntry_t *pEntry, CorProfileData *pProfileData)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return true;
+}
+
+bool InstMethodHashTable::SaveEntry(DataImage *pImage, CorProfileData *pProfileData, InstMethodHashEntry_t *pOldEntry, InstMethodHashEntry_t *pNewEntry, EntryMappingTable *pMap)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return false;
+}
+
+void InstMethodHashTable::Fixup(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ BaseFixup(image);
+
+ image->ZeroPointerField(this, offsetof(InstMethodHashTable, m_pLoaderAllocator));
+
+#ifdef _DEBUG
+ // The persisted table should be unsealed.
+ InstMethodHashTable *pNewTable = (InstMethodHashTable*) image->GetImagePointer(this);
+ pNewTable->InitUnseal();
+#endif
+}
+
+void InstMethodHashTable::FixupEntry(DataImage *pImage, InstMethodHashEntry_t *pEntry, void *pFixupBase, DWORD cbFixupOffset)
+{
+ STANDARD_VM_CONTRACT;
+
+ pImage->FixupField(pFixupBase, cbFixupOffset + offsetof(InstMethodHashEntry_t, data), pEntry->GetMethod(), pEntry->GetFlags());
+
+ pEntry->GetMethod()->Fixup(pImage);
+}
+#endif // FEATURE_PREJIT
+
+#endif // #ifndef DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+void
+InstMethodHashTable::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+
+ BaseEnumMemoryRegions(flags);
+}
+
+void InstMethodHashTable::EnumMemoryRegionsForEntry(InstMethodHashEntry_t *pEntry, CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+
+ if (pEntry->GetMethod().IsValid())
+ pEntry->GetMethod()->EnumMemoryRegions(flags);
+}
+#endif // #ifdef DACCESS_COMPILE
diff --git a/src/vm/instmethhash.h b/src/vm/instmethhash.h
new file mode 100644
index 0000000000..429ba30b5f
--- /dev/null
+++ b/src/vm/instmethhash.h
@@ -0,0 +1,177 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: instmethhash.h
+//
+
+
+//
+
+//
+// ============================================================================
+
+#ifndef _INSTMETHHASH_H
+#define _INSTMETHHASH_H
+
+#include "ngenhash.h"
+
+class AllocMemTracker;
+
+//========================================================================================
+// The hash table types defined in this header file are used by the loader to
+// look up instantiation-specific methods:
+// - per-instantiation static method stubs e.g ArrayList<string>.HelperMeth
+// - instantiated methods e.g. Array.Sort<string>
+//
+// Each persisted Module has an InstMethodHashTable used for such methods that
+// were ngen'ed into that module. See ceeload.hpp for more information about ngen modules.
+//
+// Methods created at runtime are placed in an InstMethHashTable in BaseDomain.
+//
+// Keys are always derivable from the data stored in the table (MethodDesc)
+//
+// Keys are always derivable from the data stored in the table (MethodDesc),
+// with the exception of some flag values that cannot be computed for unrestore MDs
+// (we need to be able to look up entries without restoring other entries along
+// the way!)
+//
+// The table is safe for multiple readers and a single writer i.e. only one thread
+// can be in InsertMethodDesc but multiple threads can be in FindMethodDesc.
+//========================================================================================
+
+class InstMethodHashTable;
+
+// One of these is present for each element in the table
+// It simply chains together (hash,data) pairs
+typedef DPTR(struct InstMethodHashEntry) PTR_InstMethodHashEntry;
+typedef struct InstMethodHashEntry
+{
+ PTR_MethodDesc GetMethod();
+ DWORD GetFlags();
+#ifndef DACCESS_COMPILE
+ void SetMethodAndFlags(MethodDesc *pMethod, DWORD dwFlags);
+#endif // !DACCESS_COMPILE
+
+ enum
+ {
+ UnboxingStub = 0x01,
+ RequiresInstArg = 0x02
+ };
+
+private:
+ friend class InstMethodHashTable;
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+
+#ifdef BINDER
+ friend class MdilModule;
+#endif
+
+ PTR_MethodDesc data;
+} InstMethodHashEntry_t;
+
+
+// The method-desc hash table itself
+typedef DPTR(class InstMethodHashTable) PTR_InstMethodHashTable;
+class InstMethodHashTable : public NgenHashTable<InstMethodHashTable, InstMethodHashEntry, 4>
+{
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+
+public:
+ // This is the allocator
+ PTR_LoaderAllocator m_pLoaderAllocator;
+
+#ifdef _DEBUG
+private:
+ Volatile<LONG> m_dwSealCount; // Can more types be added to the table?
+
+public:
+ void InitUnseal() { LIMITED_METHOD_CONTRACT; m_dwSealCount = 0; }
+ bool IsUnsealed() { LIMITED_METHOD_CONTRACT; return (m_dwSealCount == 0); }
+ void Seal() { LIMITED_METHOD_CONTRACT; FastInterlockIncrement(&m_dwSealCount); }
+ void Unseal() { LIMITED_METHOD_CONTRACT; FastInterlockDecrement(&m_dwSealCount); }
+#endif // _DEBUG
+
+private:
+ InstMethodHashTable();
+ ~InstMethodHashTable();
+
+public:
+ static InstMethodHashTable* Create(LoaderAllocator *pAllocator, Module *pModule, DWORD dwNumBuckets, AllocMemTracker *pamTracker);
+
+private:
+ friend class NgenHashTable<InstMethodHashTable, InstMethodHashEntry, 4>;
+
+#ifndef DACCESS_COMPILE
+ InstMethodHashTable(Module *pModule, LoaderHeap *pHeap, DWORD cInitialBuckets) :
+ NgenHashTable<InstMethodHashTable, InstMethodHashEntry, 4>(pModule, pHeap, cInitialBuckets) {}
+#endif
+ void operator delete(void *p);
+
+public:
+ // Add a method desc to the hash table
+ void InsertMethodDesc(MethodDesc *pMD);
+
+ // Look up a method in the hash table
+ MethodDesc *FindMethodDesc(TypeHandle declaringType,
+ mdMethodDef token,
+ BOOL unboxingStub,
+ Instantiation inst,
+ BOOL getSharedNotStub);
+
+ BOOL ContainsMethodDesc(MethodDesc* pMD);
+
+#if defined(FEATURE_PREJIT) && !defined(DACCESS_COMPILE)
+ // Save the hash table and any method descriptors referenced by it
+ void Save(DataImage *image, CorProfileData *pProfileData);
+
+ // Record fixups required on the hash table
+ // Recurse into method descriptors referenced by it
+ void Fixup(DataImage *image);
+
+ bool ShouldSave(DataImage *pImage, InstMethodHashEntry_t *pEntry);
+ bool IsHotEntry(InstMethodHashEntry_t *pEntry, CorProfileData *pProfileData);
+ bool SaveEntry(DataImage *pImage, CorProfileData *pProfileData, InstMethodHashEntry_t *pOldEntry, InstMethodHashEntry_t *pNewEntry, EntryMappingTable *pMap);
+ void FixupEntry(DataImage *pImage, InstMethodHashEntry_t *pEntry, void *pFixupBase, DWORD cbFixupOffset);
+#endif // FEATURE_PREJIT && !DACCESS_COMPILE
+
+ // An iterator for the table, currently used only by Module::Save
+ struct Iterator
+ {
+ public:
+ // This iterator can be reused for walking different tables
+ void Reset();
+ Iterator();
+
+ Iterator(InstMethodHashTable * pTable);
+ ~Iterator();
+
+ private:
+ friend class InstMethodHashTable;
+
+ void Init();
+
+ InstMethodHashTable*m_pTable;
+ BaseIterator m_sIterator;
+ bool m_fIterating;
+ };
+
+ BOOL FindNext(Iterator *it, InstMethodHashEntry **ppEntry);
+
+ DWORD GetCount();
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+ void EnumMemoryRegionsForEntry(InstMethodHashEntry_t *pEntry, CLRDataEnumMemoryFlags flags);
+#endif
+
+private:
+ LoaderAllocator* GetLoaderAllocator();
+};
+
+#endif /* _INSTMETHHASH_H */
diff --git a/src/vm/interopconverter.cpp b/src/vm/interopconverter.cpp
new file mode 100644
index 0000000000..4ec41d03b5
--- /dev/null
+++ b/src/vm/interopconverter.cpp
@@ -0,0 +1,985 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "common.h"
+#include "vars.hpp"
+#include "excep.h"
+#include "interoputil.h"
+#include "interopconverter.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "olevariant.h"
+#include "comcallablewrapper.h"
+
+#ifdef FEATURE_COMINTEROP
+
+#include "stdinterfaces.h"
+#include "runtimecallablewrapper.h"
+#include "cominterfacemarshaler.h"
+#include "mdaassistants.h"
+#include "binder.h"
+#include "winrttypenameconverter.h"
+#include "typestring.h"
+
+struct MshlPacket
+{
+ DWORD size;
+};
+
+// if the object we are creating is a proxy to another appdomain, want to create the wrapper for the
+// new object in the appdomain of the proxy target
+IUnknown* GetIUnknownForMarshalByRefInServerDomain(OBJECTREF* poref)
+{
+ CONTRACT (IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(poref));
+ PRECONDITION((*poref)->GetTrueMethodTable()->IsMarshaledByRef());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ Context *pContext = NULL;
+
+#ifdef FEATURE_REMOTING
+ // so this is an proxy type,
+ // now get it's underlying appdomain which will be null if non-local
+ if ((*poref)->IsTransparentProxy())
+ pContext = CRemotingServices::GetServerContextForProxy(*poref);
+#endif
+
+ if (pContext == NULL)
+ pContext = GetCurrentContext();
+
+ _ASSERTE(pContext->GetDomain() == GetCurrentContext()->GetDomain());
+
+ CCWHolder pWrap = ComCallWrapper::InlineGetWrapper(poref);
+
+ IUnknown* pUnk = ComCallWrapper::GetComIPFromCCW(pWrap, IID_IUnknown, NULL);
+
+ RETURN pUnk;
+}
+
+#ifdef FEATURE_REMOTING
+//+----------------------------------------------------------------------------
+// IUnknown* GetIUnknownForTransparentProxy(OBJECTREF otp)
+//+----------------------------------------------------------------------------
+
+IUnknown* GetIUnknownForTransparentProxy(OBJECTREF* poref, BOOL fIsBeingMarshalled)
+{
+ CONTRACT (IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(poref));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ GCX_COOP();
+
+ IUnknown* pUnk;
+
+ OBJECTREF realProxy = ObjectToOBJECTREF(CRemotingServices::GetRealProxy(OBJECTREFToObject(*poref)));
+ _ASSERTE(realProxy != NULL);
+
+ GCPROTECT_BEGIN(realProxy);
+
+ MethodDescCallSite getDCOMProxy(METHOD__REAL_PROXY__GETDCOMPROXY, &realProxy);
+
+ ARG_SLOT args[] = {
+ ObjToArgSlot(realProxy),
+ BoolToArgSlot(fIsBeingMarshalled),
+ };
+
+ ARG_SLOT ret = getDCOMProxy.Call_RetArgSlot(args);
+
+ pUnk = (IUnknown*)ret;
+
+ GCPROTECT_END();
+
+ RETURN pUnk;
+}
+#endif // FEATURE_REMOTING
+
+//--------------------------------------------------------------------------------
+// IUnknown *GetComIPFromObjectRef(OBJECTREF *poref, MethodTable *pMT, ...)
+// Convert ObjectRef to a COM IP, based on MethodTable* pMT.
+IUnknown *GetComIPFromObjectRef(OBJECTREF *poref, MethodTable *pMT, BOOL bSecurityCheck, BOOL bEnableCustomizedQueryInterface)
+{
+ CONTRACT (IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(poref));
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(g_fComStarted && "COM has not been started up, make sure EnsureComStarted is called before any COM objects are used!");
+ POSTCONDITION((*poref) != NULL ? CheckPointer(RETVAL) : CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ BOOL fReleaseWrapper = false;
+ HRESULT hr = E_NOINTERFACE;
+ SafeComHolder<IUnknown> pUnk = NULL;
+ size_t ul = 0;
+
+ if (*poref == NULL)
+ RETURN NULL;
+
+#ifdef FEATURE_REMOTING
+ if ((*poref)->IsTransparentProxy())
+ {
+ // Retrieve the IID of the interface to QI for.
+ IID iid;
+ if (pMT->IsInterface())
+ {
+ pMT->GetGuid(&iid, TRUE);
+ }
+ else
+ {
+ ComCallWrapperTemplate *pTemplate = ComCallWrapperTemplate::GetTemplate(TypeHandle(pMT));
+ if (pTemplate->SupportsIClassX())
+ {
+ ComMethodTable *pComMT = pTemplate->GetClassComMT();
+ iid = pComMT->GetIID();
+ }
+ else
+ {
+ // if IClassX is not supported, we try the default interface of the class
+ MethodTable *pDefItfMT = pMT->GetDefaultWinRTInterface();
+ if (pDefItfMT != NULL)
+ {
+ pDefItfMT->GetGuid(&iid, TRUE);
+ }
+ else
+ {
+ // else we fail because the class has no IID associated with it
+ IfFailThrow(E_NOINTERFACE);
+ }
+ }
+ }
+
+ // Retrieve an IUnknown for the TP.
+ SafeComHolder<IUnknown> pProxyUnk = GetIUnknownForTransparentProxy(poref, FALSE);
+
+ // QI for the requested interface.
+ IfFailThrow(SafeQueryInterface(pProxyUnk, iid, &pUnk));
+ goto LExit;
+ }
+#endif // FEATURE_REMOTING
+
+ SyncBlock* pBlock = (*poref)->GetSyncBlock();
+
+ InteropSyncBlockInfo* pInteropInfo = pBlock->GetInteropInfo();
+
+ // If we have a CCW, or a NULL CCW but the RCW field was never used,
+ // get the pUnk from the ComCallWrapper, otherwise from the RCW
+ if ((NULL != pInteropInfo->GetCCW()) || (!pInteropInfo->RCWWasUsed()))
+ {
+ CCWHolder pCCWHold = ComCallWrapper::InlineGetWrapper(poref);
+
+ GetComIPFromCCW::flags flags = GetComIPFromCCW::None;
+ if (!bSecurityCheck) { flags |= GetComIPFromCCW::SuppressSecurityCheck; }
+ if (!bEnableCustomizedQueryInterface) { flags |= GetComIPFromCCW::SuppressCustomizedQueryInterface; }
+
+ pUnk = ComCallWrapper::GetComIPFromCCW(pCCWHold, GUID_NULL, pMT, flags);
+ }
+ else
+ {
+ RCWHolder pRCW(GetThread());
+ RCWPROTECT_BEGIN(pRCW, pBlock);
+
+ // The interface will be returned addref'ed.
+ pUnk = pRCW->GetComIPFromRCW(pMT);
+
+ RCWPROTECT_END(pRCW);
+ }
+
+#ifdef FEATURE_REMOTING
+LExit:
+#endif
+ // If we failed to retrieve an IP then throw an exception.
+ if (pUnk == NULL)
+ COMPlusThrowHR(hr);
+
+ pUnk.SuppressRelease();
+ RETURN pUnk;
+}
+
+
+//--------------------------------------------------------------------------------
+// IUnknown *GetComIPFromObjectRef(OBJECTREF *poref, ComIpType ReqIpType, ComIpType *pFetchedIpType);
+// Convert ObjectRef to a COM IP of the requested type.
+IUnknown *GetComIPFromObjectRef(OBJECTREF *poref, ComIpType ReqIpType, ComIpType *pFetchedIpType)
+{
+ CONTRACT (IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION((ReqIpType & (ComIpType_Dispatch | ComIpType_Unknown | ComIpType_Inspectable)) != 0);
+ PRECONDITION(CheckPointer(poref));
+ PRECONDITION(ReqIpType != 0);
+ POSTCONDITION((*poref) != NULL ? CheckPointer(RETVAL) : CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ // COM had better be started up at this point.
+ _ASSERTE(g_fComStarted && "COM has not been started up, make sure EnsureComStarted is called before any COM objects are used!");
+
+ BOOL fReleaseWrapper = false;
+ HRESULT hr = E_NOINTERFACE;
+ IUnknown* pUnk = NULL;
+ size_t ul = 0;
+ ComIpType FetchedIpType = ComIpType_None;
+
+ if (*poref == NULL)
+ RETURN NULL;
+
+ MethodTable *pMT = (*poref)->GetMethodTable();
+#ifdef FEATURE_REMOTING
+ if (pMT->IsTransparentProxy())
+ {
+ SafeComHolder<IUnknown> pProxyUnk = GetIUnknownForTransparentProxy(poref, FALSE);
+
+ if (ReqIpType & ComIpType_Dispatch)
+ {
+ hr = SafeQueryInterface(pProxyUnk, IID_IDispatch, &pUnk);
+ if (SUCCEEDED(hr))
+ {
+ // In Whidbey we used to return ComIpType_Unknown here to maintain backward compatibility with
+ // previous releases where we had mistakenly returned ComIpType_None (which was interpreted as
+ // ComIpType_Unknown by the callers of this method).
+ FetchedIpType = ComIpType_Dispatch;
+ goto LExit;
+ }
+ }
+
+ if (ReqIpType & ComIpType_Inspectable)
+ {
+ hr = SafeQueryInterface(pProxyUnk, IID_IInspectable, &pUnk);
+ if (SUCCEEDED(hr))
+ {
+ FetchedIpType = ComIpType_Inspectable;
+ goto LExit;
+ }
+ }
+
+ if (ReqIpType & ComIpType_Unknown)
+ {
+ hr = SafeQueryInterface(pProxyUnk, IID_IUnknown, &pUnk);
+ if (SUCCEEDED(hr))
+ {
+ FetchedIpType = ComIpType_Unknown;
+ goto LExit;
+ }
+ }
+
+ goto LExit;
+ }
+#endif // FEATURE_REMOTING
+
+ SyncBlock* pBlock = (*poref)->GetSyncBlock();
+
+ InteropSyncBlockInfo* pInteropInfo = pBlock->GetInteropInfo();
+
+ if ( (NULL != pInteropInfo->GetCCW()) || (!pInteropInfo->RCWWasUsed()) )
+ {
+ CCWHolder pCCWHold = ComCallWrapper::InlineGetWrapper(poref);
+
+ // If the user requested IDispatch, then check for IDispatch first.
+ if (ReqIpType & ComIpType_Dispatch)
+ {
+ pUnk = ComCallWrapper::GetComIPFromCCW(pCCWHold, IID_IDispatch, NULL);
+ if (pUnk)
+ FetchedIpType = ComIpType_Dispatch;
+ }
+
+ if (ReqIpType & ComIpType_Inspectable)
+ {
+ WinMDAdapter::RedirectedTypeIndex redirectedTypeIndex;
+
+ MethodTable * pMT = (*poref)->GetMethodTable();
+
+ //
+ // Check whether this object is of a legal WinRT type (including array)
+ //
+ // Note that System.RuntimeType is a weird case - we only redirect System.Type at type
+ // level, but when we boxing the actual instance, we expect it to be a System.RuntimeType
+ // instance, which is not redirected and not a legal WinRT type
+ //
+ // Therefore, special case for System.RuntimeType and treat it as a legal WinRT type
+ // only for boxing
+ //
+ if (pMT->IsLegalWinRTType(poref) ||
+ MscorlibBinder::IsClass(pMT, CLASS__CLASS))
+ {
+ // The managed signature contains Object, and native signature is IInspectable.
+ // "Box" value types by allocating an IReference<T> and storing them inside it.
+ // Similarly, String must be an IReference<HSTRING>. Delegates get wrapped too.
+ // Arrays must be stored in an IReferenceArray<T>.
+ // System.Type is in fact internal type System.RuntimeType (CLASS__CLASS) that inherits from it.
+ // Note: We do not allow System.ReflectionOnlyType that inherits from System.RuntimeType.
+ // KeyValuePair`2 must be exposed as CLRIKeyValuePair.
+ if (pMT->HasInstantiation() && pMT->HasSameTypeDefAs(MscorlibBinder::GetClass(CLASS__KEYVALUEPAIRGENERIC)))
+ {
+ TypeHandle th = TypeHandle(MscorlibBinder::GetClass(CLASS__CLRIKEYVALUEPAIRIMPL)).Instantiate(pMT->GetInstantiation());
+
+ MethodDesc *method = MethodDesc::FindOrCreateAssociatedMethodDesc(
+ MscorlibBinder::GetMethod(METHOD__CLRIKEYVALUEPAIRIMPL__BOXHELPER),
+ th.GetMethodTable(),
+ FALSE,
+ Instantiation(),
+ FALSE);
+ _ASSERTE(method != NULL);
+
+ MethodDescCallSite boxHelper(method);
+
+ ARG_SLOT Args[] =
+ {
+ ObjToArgSlot(*poref),
+ };
+ OBJECTREF orCLRKeyValuePair = boxHelper.Call_RetOBJECTREF(Args);
+
+ GCPROTECT_BEGIN(orCLRKeyValuePair);
+ CCWHolder pCCWHoldBoxed = ComCallWrapper::InlineGetWrapper(&orCLRKeyValuePair);
+ pUnk = ComCallWrapper::GetComIPFromCCW(pCCWHoldBoxed, IID_IInspectable, NULL);
+ GCPROTECT_END();
+ }
+ else if ((pMT->IsValueType() ||
+ pMT->IsStringOrArray() ||
+ pMT->IsDelegate() ||
+ MscorlibBinder::IsClass(pMT, CLASS__CLASS)))
+ {
+ OBJECTREF orBoxedIReference = NULL;
+ MethodDescCallSite createIReference(METHOD__FACTORYFORIREFERENCE__CREATE_IREFERENCE);
+
+ ARG_SLOT Args[] =
+ {
+ ObjToArgSlot(*poref),
+ };
+
+ // Call FactoryForIReference::CreateIReference(Object) for an IReference<T> or IReferenceArray<T>.
+ orBoxedIReference = createIReference.Call_RetOBJECTREF(Args);
+
+ GCPROTECT_BEGIN(orBoxedIReference);
+ CCWHolder pCCWHoldBoxed = ComCallWrapper::InlineGetWrapper(&orBoxedIReference);
+ pUnk = ComCallWrapper::GetComIPFromCCW(pCCWHoldBoxed, IID_IInspectable, NULL);
+ GCPROTECT_END();
+ }
+ else if (WinRTTypeNameConverter::ResolveRedirectedType(pMT, &redirectedTypeIndex))
+ {
+ // This is a redirected type - see if we need to manually marshal it
+ if (redirectedTypeIndex == WinMDAdapter::RedirectedTypeIndex_System_Uri)
+ {
+ UriMarshalingInfo *pUriMarshalInfo = GetAppDomain()->GetMarshalingData()->GetUriMarshalingInfo();
+ struct
+ {
+ OBJECTREF ref;
+ STRINGREF refRawUri;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+ GCPROTECT_BEGIN(gc);
+
+ gc.ref = *poref;
+
+ MethodDescCallSite getRawURI(pUriMarshalInfo->GetSystemUriOriginalStringMD());
+ ARG_SLOT getRawURIArgs[] =
+ {
+ ObjToArgSlot(gc.ref)
+ };
+
+ gc.refRawUri = (STRINGREF)getRawURI.Call_RetOBJECTREF(getRawURIArgs);
+
+ DWORD cchRawUri = gc.refRawUri->GetStringLength();
+ LPCWSTR wszRawUri = gc.refRawUri->GetBuffer();
+
+ {
+ GCX_PREEMP();
+ pUnk = CreateWinRTUri(wszRawUri, static_cast<INT32>(cchRawUri));
+ }
+
+ GCPROTECT_END();
+ }
+ else if (redirectedTypeIndex == WinMDAdapter::RedirectedTypeIndex_System_Collections_Specialized_NotifyCollectionChangedEventArgs ||
+ redirectedTypeIndex == WinMDAdapter::RedirectedTypeIndex_System_ComponentModel_PropertyChangedEventArgs)
+ {
+ MethodDesc *pMD;
+ EventArgsMarshalingInfo *pInfo = GetAppDomain()->GetMarshalingData()->GetEventArgsMarshalingInfo();
+
+ if (redirectedTypeIndex == WinMDAdapter::RedirectedTypeIndex_System_Collections_Specialized_NotifyCollectionChangedEventArgs)
+ pMD = pInfo->GetSystemNCCEventArgsToWinRTNCCEventArgsMD();
+ else
+ pMD = pInfo->GetSystemPCEventArgsToWinRTPCEventArgsMD();
+
+ MethodDescCallSite marshalMethod(pMD);
+ ARG_SLOT methodArgs[] =
+ {
+ ObjToArgSlot(*poref)
+ };
+ pUnk = (IUnknown *)marshalMethod.Call_RetLPVOID(methodArgs);
+ }
+ else
+ {
+ _ASSERTE(!W("Unexpected redirected type seen in GetComIPFromObjectRef"));
+ }
+ }
+ else
+ {
+ //
+ // WinRT reference type - marshal as IInspectable
+ //
+ pUnk = ComCallWrapper::GetComIPFromCCW(pCCWHold, IID_IInspectable, /* pIntfMT = */ NULL);
+ }
+ }
+ else
+ {
+ //
+ // Marshal non-WinRT types as IInspectable* to enable round-tripping (for example, TextBox.Tag property)
+ // By default, this returns ICustomPropertyProvider;
+ //
+ pUnk = ComCallWrapper::GetComIPFromCCW(pCCWHold, IID_IInspectable, /* pIntfMT = */ NULL);
+ }
+
+ if (pUnk)
+ FetchedIpType = ComIpType_Inspectable;
+ }
+
+ // If the ObjectRef doesn't support IDispatch and the caller also accepts
+ // an IUnknown pointer, then check for IUnknown.
+ if (!pUnk && (ReqIpType & ComIpType_Unknown))
+ {
+ if (ReqIpType & ComIpType_OuterUnknown)
+ {
+ // check if the object is aggregated
+ SimpleComCallWrapper* pSimpleWrap = pCCWHold->GetSimpleWrapper();
+ if (pSimpleWrap)
+ {
+ pUnk = pSimpleWrap->GetOuter();
+ if (pUnk)
+ SafeAddRef(pUnk);
+ }
+ }
+ if (!pUnk)
+ pUnk = ComCallWrapper::GetComIPFromCCW(pCCWHold, IID_IUnknown, NULL);
+ if (pUnk)
+ FetchedIpType = ComIpType_Unknown;
+ }
+ }
+ else
+ {
+ RCWHolder pRCW(GetThread());
+
+ // This code is hot, use a simple RCWHolder check (i.e. don't increment the use count on the RCW).
+ // @TODO: Cache IInspectable & IDispatch so we don't have to QI every time we come here.
+ pRCW.InitFastCheck(pBlock);
+
+ // If the user requested IDispatch, then check for IDispatch first.
+ if (ReqIpType & ComIpType_Dispatch)
+ {
+ pUnk = pRCW->GetIDispatch();
+ if (pUnk)
+ FetchedIpType = ComIpType_Dispatch;
+ }
+
+ if (ReqIpType & ComIpType_Inspectable)
+ {
+ pUnk = pRCW->GetIInspectable();
+ if (pUnk)
+ FetchedIpType = ComIpType_Inspectable;
+ }
+
+ // If the ObjectRef doesn't support IDispatch and the caller also accepts
+ // an IUnknown pointer, then check for IUnknown.
+ if (!pUnk && (ReqIpType & ComIpType_Unknown))
+ {
+ pUnk = pRCW->GetIUnknown();
+ if (pUnk)
+ FetchedIpType = ComIpType_Unknown;
+ }
+ }
+
+#ifdef FEATURE_REMOTING
+LExit:
+#endif
+ // If we failed to retrieve an IP then throw an exception.
+ if (pUnk == NULL)
+ COMPlusThrowHR(hr);
+
+ // If the caller wants to know the fetched IP type, then set pFetchedIpType
+ // to the type of the IP.
+ if (pFetchedIpType)
+ *pFetchedIpType = FetchedIpType;
+
+ RETURN pUnk;
+}
+
+
+//+----------------------------------------------------------------------------
+// IUnknown *GetComIPFromObjectRef(OBJECTREF *poref, REFIID iid);
+// convert ComIP to an ObjectRef, based on riid
+//+----------------------------------------------------------------------------
+IUnknown *GetComIPFromObjectRef(OBJECTREF *poref, REFIID iid, bool throwIfNoComIP /* = true */)
+{
+ CONTRACT (IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(poref));
+ POSTCONDITION((*poref) != NULL ? CheckPointer(RETVAL, throwIfNoComIP ? NULL_NOT_OK : NULL_OK) : CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ ASSERT_PROTECTED(poref);
+
+ // COM had better be started up at this point.
+ _ASSERTE(g_fComStarted && "COM has not been started up, make sure EnsureComStarted is called before any COM objects are used!");
+
+ BOOL fReleaseWrapper = false;
+ HRESULT hr = E_NOINTERFACE;
+ IUnknown* pUnk = NULL;
+ size_t ul = 0;
+
+ if (*poref == NULL)
+ RETURN NULL;
+
+ MethodTable *pMT = (*poref)->GetMethodTable();
+#ifdef FEATURE_REMOTING
+ if (pMT->IsTransparentProxy())
+ {
+ SafeComHolder<IUnknown> pProxyUnk = GetIUnknownForTransparentProxy(poref, FALSE);
+ IfFailThrow(SafeQueryInterface(pProxyUnk, iid, &pUnk));
+ goto LExit;
+ }
+#endif
+
+ SyncBlock* pBlock = (*poref)->GetSyncBlock();
+
+ InteropSyncBlockInfo* pInteropInfo = pBlock->GetInteropInfo();
+
+ if ((NULL != pInteropInfo->GetCCW()) || (!pInteropInfo->RCWWasUsed()))
+ {
+ CCWHolder pCCWHold = ComCallWrapper::InlineGetWrapper(poref);
+ pUnk = ComCallWrapper::GetComIPFromCCW(pCCWHold, iid, NULL);
+ }
+ else
+ {
+ SafeComHolder<IUnknown> pUnkHolder;
+
+ RCWHolder pRCW(GetThread());
+ RCWPROTECT_BEGIN(pRCW, pBlock);
+
+ // The interface will be returned addref'ed.
+ pUnkHolder = pRCW->GetComIPFromRCW(iid);
+
+ RCWPROTECT_END(pRCW);
+
+ pUnk = pUnkHolder.Extract();
+ }
+
+#ifdef FEATURE_REMOTING
+LExit:
+#endif
+ if (throwIfNoComIP && pUnk == NULL)
+ COMPlusThrowHR(hr);
+
+ RETURN pUnk;
+}
+
+#ifdef FEATURE_REMOTING
+OBJECTREF GetObjectRefFromComIP_CrossDomain(ADID objDomainId, ComCallWrapper* pWrap)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF oref = NULL;
+
+ EX_TRY
+ {
+ // the CCW belongs to a different domain..
+ // unmarshal the object to the current domain
+ if (!UnMarshalObjectForCurrentDomain(objDomainId, pWrap, &oref))
+ oref = NULL;
+ }
+ EX_CATCH
+ {
+ // fall back to creating an RCW if we were unable to
+ // marshal the object (most commonly because the object
+ // graph is not serializable)
+ oref = NULL;
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ return oref;
+}
+#endif //#ifdef FEATURE_REMOTING
+
+//+----------------------------------------------------------------------------
+// GetObjectRefFromComIP
+// pUnk : input IUnknown
+// pMTClass : specifies the type of instance to be returned
+// NOTE:** As per COM Rules, the IUnknown passed is shouldn't be AddRef'ed
+//+----------------------------------------------------------------------------
+void GetObjectRefFromComIP(OBJECTREF* pObjOut, IUnknown **ppUnk, MethodTable *pMTClass, MethodTable *pItfMT, DWORD dwFlags)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(ppUnk));
+ PRECONDITION(CheckPointer(*ppUnk, NULL_OK));
+ PRECONDITION(CheckPointer(pMTClass, NULL_OK));
+ PRECONDITION(IsProtectedByGCFrame(pObjOut));
+ PRECONDITION(pItfMT == NULL || pItfMT->IsInterface());
+ }
+ CONTRACTL_END;
+
+ // COM had better be started up at this point.
+ _ASSERTE(g_fComStarted && "COM has not been started up, make sure EnsureComStarted is called before any COM objects are used!");
+
+ IUnknown *pUnk = *ppUnk;
+ Thread * pThread = GetThread();
+
+#ifdef MDA_SUPPORTED
+ MdaInvalidIUnknown* mda = MDA_GET_ASSISTANT(InvalidIUnknown);
+ if (mda && pUnk)
+ {
+ // Test pUnk
+ SafeComHolder<IUnknown> pTemp;
+ HRESULT hr = SafeQueryInterface(pUnk, IID_IUnknown, &pTemp);
+ if (hr != S_OK)
+ mda->ReportViolation();
+ }
+#endif
+
+ *pObjOut = NULL;
+ IUnknown* pOuter = pUnk;
+ SafeComHolder<IUnknown> pAutoOuterUnk = NULL;
+
+ if (pUnk != NULL)
+ {
+ // get CCW for IUnknown
+ ComCallWrapper* pWrap = GetCCWFromIUnknown(pUnk);
+ if (pWrap == NULL)
+ {
+ // could be aggregated scenario
+ HRESULT hr = SafeQueryInterface(pUnk, IID_IUnknown, &pOuter);
+ LogInteropQI(pUnk, IID_IUnknown, hr, "GetObjectRefFromComIP: QI for Outer");
+ IfFailThrow(hr);
+
+ // store the outer in the auto pointer
+ pAutoOuterUnk = pOuter;
+ pWrap = GetCCWFromIUnknown(pOuter);
+ }
+
+ if (pWrap != NULL)
+ { // our tear-off
+ _ASSERTE(pWrap != NULL);
+ AppDomain* pCurrDomain = pThread->GetDomain();
+ ADID pObjDomain = pWrap->GetDomainID();
+#ifdef FEATURE_REMOTING
+ if (pObjDomain == pCurrDomain->GetId())
+ *pObjOut = pWrap->GetObjectRef();
+ else
+ *pObjOut = GetObjectRefFromComIP_CrossDomain(pObjDomain, pWrap);
+#else
+ _ASSERTE(pObjDomain == pCurrDomain->GetId());
+ *pObjOut = pWrap->GetObjectRef();
+#endif
+ }
+
+ if (*pObjOut != NULL)
+ {
+ if (!(dwFlags & ObjFromComIP::IGNORE_WINRT_AND_SKIP_UNBOXING))
+ {
+ // Unbox objects from a CLRIReferenceImpl<T> or CLRIReferenceArrayImpl<T>.
+ MethodTable *pMT = (*pObjOut)->GetMethodTable();
+ if (pMT->HasInstantiation())
+ {
+ DWORD nGenericArgs = pMT->GetNumGenericArgs();
+ if (nGenericArgs == 1)
+ {
+ // See if this type C<SomeType> is a G<T>.
+ if (pMT->HasSameTypeDefAs(MscorlibBinder::GetClass(CLASS__CLRIREFERENCEIMPL)))
+ {
+ TypeHandle thType = pMT->GetInstantiation()[0];
+ COMInterfaceMarshaler::IReferenceOrIReferenceArrayUnboxWorker(*pObjOut, thType, FALSE, pObjOut);
+ }
+ else if (pMT->HasSameTypeDefAs(MscorlibBinder::GetClass(CLASS__CLRIREFERENCEARRAYIMPL)))
+ {
+ TypeHandle thArrayElementType = pMT->GetInstantiation()[0];
+ COMInterfaceMarshaler::IReferenceOrIReferenceArrayUnboxWorker(*pObjOut, thArrayElementType, TRUE, pObjOut);
+ }
+ }
+ else if ((nGenericArgs == 2) && pMT->HasSameTypeDefAs(MscorlibBinder::GetClass(CLASS__CLRIKEYVALUEPAIRIMPL)))
+ {
+ // Unbox IKeyValuePair from CLRIKeyValuePairImpl
+ COMInterfaceMarshaler::IKeyValuePairUnboxWorker(*pObjOut, pObjOut);
+ }
+ }
+ }
+ }
+ else
+ {
+ // Only pass in the class method table to the interface marshaler if
+ // it is a COM import (or COM import derived) class or a WinRT delegate.
+ MethodTable *pComClassMT = NULL;
+ if (pMTClass)
+ {
+ if (pMTClass->IsComObjectType() ||
+ (pMTClass->IsDelegate() && (pMTClass->IsProjectedFromWinRT() || WinRTTypeNameConverter::IsRedirectedType(pMTClass))))
+ {
+ pComClassMT = pMTClass;
+ }
+ }
+
+ DWORD flags = RCW::CreationFlagsFromObjForComIPFlags((ObjFromComIP::flags)dwFlags);
+
+ // Convert the IP to an OBJECTREF.
+ COMInterfaceMarshaler marshaler;
+
+ marshaler.Init(pOuter, pComClassMT, pThread, flags);
+
+ if (flags & ObjFromComIP::SUPPRESS_ADDREF)
+ {
+ // We can swallow the reference in ppUnk
+ // This only happens in WinRT
+ *pObjOut = marshaler.FindOrCreateObjectRef(ppUnk, pItfMT);
+ }
+ else
+ {
+ *pObjOut = marshaler.FindOrCreateObjectRef(pUnk, pItfMT);
+ }
+ }
+ }
+
+
+ if ((0 == (dwFlags & ObjFromComIP::CLASS_IS_HINT)) && (*pObjOut != NULL))
+ {
+ // make sure we can cast to the specified class
+ if (pMTClass != NULL)
+ {
+ FAULT_NOT_FATAL();
+
+ // Bad format exception thrown for backward compatibility
+ THROW_BAD_FORMAT_MAYBE(pMTClass->IsArray() == FALSE, BFA_UNEXPECTED_ARRAY_TYPE, pMTClass);
+
+ if (!CanCastComObject(*pObjOut, pMTClass))
+ {
+ StackSString ssObjClsName;
+ StackSString ssDestClsName;
+
+ (*pObjOut)->GetTrueMethodTable()->_GetFullyQualifiedNameForClass(ssObjClsName);
+ pMTClass->_GetFullyQualifiedNameForClass(ssDestClsName);
+
+ COMPlusThrow(kInvalidCastException, IDS_EE_CANNOTCAST,
+ ssObjClsName.GetUnicode(), ssDestClsName.GetUnicode());
+ }
+ }
+ else if (dwFlags & ObjFromComIP::REQUIRE_IINSPECTABLE)
+ {
+ MethodTable *pMT = (*pObjOut)->GetMethodTable();
+ if (pMT->IsDelegate() && pMT->IsProjectedFromWinRT())
+ {
+ // This is a WinRT delegate - WinRT delegate doesn't implement IInspectable but we allow unboxing a WinRT delegate
+ // from a IReference<T>
+ }
+ else
+ {
+ // Just call GetComIPFromObjectRef. We could be more efficient here but the code would get complicated
+ // which doesn't seem to be worth it. The function throws an exception if the QI/cast fails.
+ SafeComHolder<IUnknown> pInsp = GetComIPFromObjectRef(pObjOut, ComIpType_Inspectable, NULL);
+ _ASSERTE(pInsp != NULL);
+ }
+ }
+ }
+}
+#endif // FEATURE_COMINTEROP
+
+
+#ifdef FEATURE_REMOTING
+//--------------------------------------------------------
+// ConvertObjectToBSTR
+// serializes object to a BSTR, caller needs to SysFree the Bstr
+// and GCPROTECT the oref parameter.
+//--------------------------------------------------------------------------------
+BOOL ConvertObjectToBSTR(OBJECTREF* oref, BOOL fCrossRuntime, BSTR* pBStr)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pBStr));
+ PRECONDITION(IsProtectedByGCFrame (oref));
+ }
+ CONTRACTL_END;
+
+ *pBStr = NULL;
+
+ MethodTable *pMT = (*oref)->GetMethodTable();
+ if (!pMT->IsTransparentProxy() && !pMT->IsMarshaledByRef() && !pMT->IsSerializable())
+ {
+ // The object is not serializable - don't waste time calling to managed and trying to
+ // serialize it with a formatter. This is an optimization so we don't throw and catch
+ // SerializationException unnecessarily.
+ return FALSE;
+ }
+
+ // We will be using the remoting services so make sure remoting is started up.
+ CRemotingServices::EnsureRemotingStarted();
+
+ MethodDescCallSite marshalToBuffer(METHOD__REMOTING_SERVICES__MARSHAL_TO_BUFFER);
+
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(*oref),
+ BoolToArgSlot(fCrossRuntime)
+ };
+
+ BASEARRAYREF aref = (BASEARRAYREF) marshalToBuffer.Call_RetOBJECTREF(args);
+
+ if (aref != NULL)
+ {
+ _ASSERTE(!aref->IsMultiDimArray());
+ //@todo ASSERTE that the array is a byte array
+
+ ULONG cbSize = aref->GetNumComponents();
+ BYTE* pBuf = (BYTE *)aref->GetDataPtr();
+
+ BSTR bstr = SysAllocStringByteLen(NULL, cbSize);
+ if (bstr == NULL)
+ COMPlusThrowOM();
+
+ CopyMemory(bstr, pBuf, cbSize);
+ *pBStr = bstr;
+ }
+
+ return TRUE;
+}
+
+//--------------------------------------------------------------------------------
+// ConvertBSTRToObject
+// deserializes a BSTR, created using ConvertObjectToBSTR, this api SysFree's the BSTR
+//--------------------------------------------------------------------------------
+OBJECTREF ConvertBSTRToObject(BSTR bstr, BOOL fCrossRuntime)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ BSTRHolder localBstr(bstr);
+
+ OBJECTREF oref = NULL;
+
+ // We will be using the remoting services so make sure remoting is started up.
+ CRemotingServices::EnsureRemotingStarted();
+
+ MethodDescCallSite unmarshalFromBuffer(METHOD__REMOTING_SERVICES__UNMARSHAL_FROM_BUFFER);
+
+ // convert BSTR to a byte array
+
+ // allocate a byte array
+ INT32 elementCount = SysStringByteLen(bstr);
+ TypeHandle t = OleVariant::GetArrayForVarType(VT_UI1, TypeHandle((MethodTable *)NULL));
+ BASEARRAYREF aref = (BASEARRAYREF) AllocateArrayEx(t, &elementCount, 1);
+ // copy the bstr data into the managed byte array
+ memcpyNoGCRefs(aref->GetDataPtr(), bstr, elementCount);
+
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot((OBJECTREF)aref),
+ BoolToArgSlot(fCrossRuntime)
+ };
+
+ oref = unmarshalFromBuffer.Call_RetOBJECTREF(args);
+
+ return oref;
+}
+
+//--------------------------------------------------------------------------------
+// UnMarshalObjectForCurrentDomain
+// unmarshal the managed object for the current domain
+//--------------------------------------------------------------------------------
+struct ConvertObjectToBSTR_Args
+{
+ OBJECTREF* oref;
+ BOOL fCrossRuntime;
+ BSTR *pBStr;
+ BOOL fResult;
+};
+
+void ConvertObjectToBSTR_Wrapper(LPVOID ptr)
+{
+ WRAPPER_NO_CONTRACT;
+
+ ConvertObjectToBSTR_Args *args = (ConvertObjectToBSTR_Args *)ptr;
+ args->fResult = ConvertObjectToBSTR(args->oref, args->fCrossRuntime, args->pBStr);
+}
+
+
+BOOL UnMarshalObjectForCurrentDomain(ADID pObjDomain, ComCallWrapper* pWrap, OBJECTREF* pResult)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pWrap));
+ PRECONDITION(CheckPointer(pResult));
+ }
+ CONTRACTL_END;
+
+ Thread* pThread = GetThread();
+ _ASSERTE(pThread);
+
+ _ASSERTE(pThread->GetDomain() != NULL);
+ _ASSERTE(pThread->GetDomain()->GetId()!= pObjDomain);
+
+ BSTR bstr = NULL;
+ ConvertObjectToBSTR_Args args;
+ args.fCrossRuntime = FALSE;
+ args.pBStr = &bstr;
+
+ OBJECTREF oref = pWrap->GetObjectRef();
+
+ GCPROTECT_BEGIN(oref);
+ {
+ args.oref = &oref;
+ pThread->DoADCallBack(pObjDomain, ConvertObjectToBSTR_Wrapper, &args);
+ }
+ GCPROTECT_END();
+
+ if (args.fResult)
+ {
+ _ASSERTE(bstr != NULL);
+ *pResult = ConvertBSTRToObject(bstr, FALSE);
+ }
+ else
+ {
+ *pResult = NULL;
+ }
+
+ return args.fResult;
+}
+#endif //FEATURE_REMOTING
diff --git a/src/vm/interopconverter.h b/src/vm/interopconverter.h
new file mode 100644
index 0000000000..16ff78dcd6
--- /dev/null
+++ b/src/vm/interopconverter.h
@@ -0,0 +1,184 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#ifndef _H_INTEROPCONVERTER_
+#define _H_INTEROPCONVERTER_
+
+#include "debugmacros.h"
+
+
+struct ItfMarshalInfo
+{
+ enum ItfMarshalFlags
+ {
+ ITF_MARSHAL_INSP_ITF = 0x01, // IInspectable-based interface
+ ITF_MARSHAL_SUPPRESS_ADDREF = 0x02,
+ ITF_MARSHAL_CLASS_IS_HINT = 0x04,
+ ITF_MARSHAL_DISP_ITF = 0x08,
+ ITF_MARSHAL_USE_BASIC_ITF = 0x10,
+ ITF_MARSHAL_WINRT_SCENARIO = 0x20, // WinRT scenario only
+ };
+
+ TypeHandle thClass;
+ TypeHandle thItf;
+ TypeHandle thNativeItf;
+ DWORD dwFlags;
+};
+
+/*
+ enum CreationFlags // member of RCW struct
+ {
+ CF_None = 0x00,
+ CF_SupportsIInspectable = 0x01, // the underlying object supports IInspectable
+ CF_SuppressAddRef = 0x02, // do not AddRef the underlying interface pointer
+ CF_IsWeakReference = 0x04, // mark the RCW as "weak"
+ CF_NeedUniqueObject = 0x08, // always create a new RCW/object even if we have one cached already
+ CF_DontResolveClass = 0x10, // don't attempt to create a strongly typed RCW
+ };
+*/
+
+
+/*
+01 REQUIRE_IINSPECTABLE 01 ITF_MARSHAL_INSP_ITF 01 CF_SupportsIInspectable
+02 SUPPRESS_ADDREF 02 ITF_MARSHAL_SUPPRESS_ADDREF
+ 04 CF_IsWeakReference
+04 CLASS_IS_HINT 04 ITF_MARSHAL_CLASS_IS_HINT
+08 UNIQUE_OBJECT 08 CF_NeedUniqueObject
+ 08 ITF_MARSHAL_DISP_ITF
+10 IGNORE_WINRT_AND_SKIP_UNBOXING 10 CF_DontResolveClass
+ 10 ITF_MARSHAL_USE_BASIC_ITF
+ 20 ITF_MARSHAL_WINRT_SCENARIO
+*/
+
+struct ObjFromComIP
+{
+ enum flags
+ {
+ NONE = 0x00,
+ REQUIRE_IINSPECTABLE = 0x01, // ITF_MARSHAL_INSP_ITF = 0x01 // CF_SupportsIInspectable = 0x01
+ SUPPRESS_ADDREF = 0x02, // ITF_MARSHAL_SUPPRESS_ADDREF = 0x02 // CF_SuppressAddRef = 0x02
+ CLASS_IS_HINT = 0x04, // ITF_MARSHAL_CLASS_IS_HINT = 0x04
+ UNIQUE_OBJECT = 0x08, // CF_NeedUniqueObject = 0x04
+ IGNORE_WINRT_AND_SKIP_UNBOXING = 0x10, // CF_DontResolveClass = 0x10
+ };
+
+ static flags FromItfMarshalInfoFlags(DWORD dwFlags)
+ {
+ static_assert_no_msg(((DWORD)CLASS_IS_HINT) == ((DWORD)ItfMarshalInfo::ITF_MARSHAL_CLASS_IS_HINT));
+ static_assert_no_msg(((DWORD)REQUIRE_IINSPECTABLE) == ((DWORD)ItfMarshalInfo::ITF_MARSHAL_INSP_ITF));
+ static_assert_no_msg(((DWORD)SUPPRESS_ADDREF) == ((DWORD)ItfMarshalInfo::ITF_MARSHAL_SUPPRESS_ADDREF));
+
+ DWORD dwResult = (dwFlags &
+ (ItfMarshalInfo::ITF_MARSHAL_CLASS_IS_HINT|
+ ItfMarshalInfo::ITF_MARSHAL_INSP_ITF|
+ ItfMarshalInfo::ITF_MARSHAL_SUPPRESS_ADDREF));
+ return (flags)dwResult;
+ }
+};
+
+inline ObjFromComIP::flags operator|(ObjFromComIP::flags lhs, ObjFromComIP::flags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ return static_cast<ObjFromComIP::flags>(static_cast<DWORD>(lhs) | static_cast<DWORD>(rhs));
+}
+inline ObjFromComIP::flags operator|=(ObjFromComIP::flags & lhs, ObjFromComIP::flags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ lhs = static_cast<ObjFromComIP::flags>(static_cast<DWORD>(lhs) | static_cast<DWORD>(rhs));
+ return lhs;
+}
+
+
+//
+// THE FOLLOWING ARE THE MAIN APIS USED BY EVERYONE TO CONVERT BETWEEN
+// OBJECTREFs AND COM IPs
+
+#ifdef FEATURE_COMINTEROP
+
+//--------------------------------------------------------------------------------
+// The type of COM IP to convert the OBJECTREF to.
+enum ComIpType
+{
+ ComIpType_None = 0x0,
+ ComIpType_Unknown = 0x1,
+ ComIpType_Dispatch = 0x2,
+ ComIpType_Both = 0x3,
+ ComIpType_OuterUnknown = 0x5,
+ ComIpType_Inspectable = 0x8,
+};
+
+
+//--------------------------------------------------------------------------------
+// GetIUnknownForMarshalByRefInServerDomain
+// setup a CCW for Transparent proxy/marshalbyref in the server domain
+// either the object is in-proc & the domains match, or its out-of proc
+// and we don't care about appdomains
+IUnknown* GetIUnknownForMarshalByRefInServerDomain(OBJECTREF* poref);
+
+//--------------------------------------------------------------------------------
+// GetIUnknownForTransparentProxy
+// delegates the call to the managed implementation in the real proxy
+
+IUnknown* GetIUnknownForTransparentProxy(OBJECTREF* poref, BOOL fIsBeingMarshalled);
+
+//--------------------------------------------------------------------------------
+// IUnknown *GetComIPFromObjectRef(OBJECTREF *poref, MethodTable *pMT, ...);
+// Convert ObjectRef to a COM IP, based on MethodTable* pMT.
+IUnknown *GetComIPFromObjectRef(OBJECTREF *poref, MethodTable *pMT, BOOL bSecurityCheck = TRUE, BOOL bEnableCustomizedQueryInterface = TRUE);
+
+
+//--------------------------------------------------------------------------------
+// IUnknown *GetComIPFromObjectRef(OBJECTREF *poref, MethodTable *pMT);
+// Convert ObjectRef to a COM IP of the requested type.
+IUnknown *GetComIPFromObjectRef(OBJECTREF *poref,
+ ComIpType ReqIpType = ComIpType_Unknown, ComIpType *pFetchedIpType = NULL);
+
+
+//--------------------------------------------------------------------------------
+// IUnknown *GetComIPFromObjectRef(OBJECTREF *poref, REFIID iid);
+// Convert ObjectRef to a COM IP, based on riid.
+IUnknown *GetComIPFromObjectRef(OBJECTREF *poref, REFIID iid, bool throwIfNoComIP = true);
+
+
+//--------------------------------------------------------------------------------
+// GetObjectRefFromComIP(IUnknown **ppUnk, MethodTable *pMTClass, ...)
+// Converts a COM IP to ObjectRef, pMTClass is the desired RCW type. If bSuppressAddRef and a new RCW is created,
+// *ppUnk will be assigned NULL to signal to the caller that it is no longer responsible for releasing the IP.
+void GetObjectRefFromComIP(OBJECTREF* pObjOut, IUnknown **ppUnk, MethodTable *pMTClass, MethodTable *pItfMT, DWORD dwFlags); // ObjFromComIP::flags
+
+//--------------------------------------------------------------------------------
+// GetObjectRefFromComIP(IUnknown *pUnk, MethodTable *pMTClass, ...)
+// Converts a COM IP to ObjectRef, pMTClass is the desired RCW type.
+inline void GetObjectRefFromComIP(OBJECTREF* pObjOut, IUnknown *pUnk, MethodTable *pMTClass = NULL, MethodTable *pItfMT = NULL, DWORD dwFlags = ObjFromComIP::NONE)
+{
+ WRAPPER_NO_CONTRACT;
+ return GetObjectRefFromComIP(pObjOut, &pUnk, pMTClass, pItfMT, dwFlags);
+}
+
+#ifdef FEATURE_REMOTING // used only by remoting
+//--------------------------------------------------------
+// managed serialization helpers
+//--------------------------------------------------------
+// ConvertObjectToBSTR
+// serializes object to a BSTR, caller needs to SysFree the Bstr
+// and GCPROTECT the oref parameter.
+BOOL ConvertObjectToBSTR(OBJECTREF* oref, BOOL fCrossRuntime, BSTR* pBStr);
+
+
+//--------------------------------------------------------------------------------
+// ConvertBSTRToObject
+// deserializes a BSTR, created using ConvertObjectToBSTR, this api SysFree's the BSTR
+OBJECTREF ConvertBSTRToObject(BSTR bstr, BOOL fCrossRuntime);
+#endif
+
+//--------------------------------------------------------------------------------
+// UnMarshalObjectForCurrentDomain
+// unmarshal the managed object for the current domain
+BOOL UnMarshalObjectForCurrentDomain(ADID pObjDomain, ComCallWrapper* pWrap, OBJECTREF* pResult);
+
+#endif // FEATURE_COMINTEROP
+
+#endif // #ifndef _H_INTEROPCONVERTER_
diff --git a/src/vm/interoputil.cpp b/src/vm/interoputil.cpp
new file mode 100644
index 0000000000..682bf53218
--- /dev/null
+++ b/src/vm/interoputil.cpp
@@ -0,0 +1,7225 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "common.h"
+#include "vars.hpp"
+#include "excep.h"
+#include "interoputil.h"
+#include "cachelinealloc.h"
+#include "comutilnative.h"
+#include "field.h"
+#include "guidfromname.h"
+#include "eeconfig.h"
+#include "mlinfo.h"
+#include "comdelegate.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "appdomain.hpp"
+#include "prettyprintsig.h"
+#include "util.hpp"
+#include "interopconverter.h"
+#include "wrappers.h"
+#include "invokeutil.h"
+#include "mdaassistants.h"
+#include "comcallablewrapper.h"
+#include "../md/compiler/custattr.h"
+#include "siginfo.hpp"
+#include "eemessagebox.h"
+#include "finalizerthread.h"
+
+#ifdef FEATURE_COMINTEROP
+#include "cominterfacemarshaler.h"
+#include <roerrorapi.h>
+#endif
+
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+#include "olecontexthelpers.h"
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+#ifdef FEATURE_COMINTEROP
+#include "dispex.h"
+#include "runtimecallablewrapper.h"
+#include "comtoclrcall.h"
+#include "clrtocomcall.h"
+#include "comcache.h"
+#include "commtmemberinfomap.h"
+#ifdef FEATURE_COMINTEROP_TLB_SUPPORT
+#include "comtypelibconverter.h"
+#endif
+#include "olevariant.h"
+#include "stdinterfaces.h"
+#include "notifyexternals.h"
+#include "typeparse.h"
+#include "..\md\winmd\inc\adapter.h"
+#include "winrttypenameconverter.h"
+#include "interoputil.inl"
+#include "typestring.h"
+
+#ifndef __ILanguageExceptionErrorInfo_INTERFACE_DEFINED__
+#define __ILanguageExceptionErrorInfo_INTERFACE_DEFINED__
+ EXTERN_C const IID IID_ILanguageExceptionErrorInfo;
+
+ MIDL_INTERFACE("04a2dbf3-df83-116c-0946-0812abf6e07d")
+ ILanguageExceptionErrorInfo : public IUnknown
+ {
+ public:
+ virtual HRESULT STDMETHODCALLTYPE GetLanguageException(
+ /* [out] */ __RPC__deref_out_opt IUnknown **languageException) = 0;
+
+ };
+#endif // !__ILanguageExceptionErrorInfo_INTERFACE_DEFINED__
+
+#define STANDARD_DISPID_PREFIX W("[DISPID")
+#define STANDARD_DISPID_PREFIX_LENGTH 7
+#define GET_ENUMERATOR_METHOD_NAME W("GetEnumerator")
+
+// Note: All of the methods below must appear in the order in which the interfaces are defined in IL.
+// WinRT -> CLR adapters
+static const BinderMethodID s_stubsIterableToEnumerable[] =
+{
+ METHOD__ITERABLE_TO_ENUMERABLE_ADAPTER__GET_ENUMERATOR_STUB
+};
+static const BinderMethodID s_stubsVectorToList[] =
+{
+ METHOD__VECTOR_TO_LIST_ADAPTER__INDEXER_GET,
+ METHOD__VECTOR_TO_LIST_ADAPTER__INDEXER_SET,
+ METHOD__VECTOR_TO_LIST_ADAPTER__INDEX_OF,
+ METHOD__VECTOR_TO_LIST_ADAPTER__INSERT,
+ METHOD__VECTOR_TO_LIST_ADAPTER__REMOVE_AT
+};
+static const BinderMethodID s_stubsVectorToCollection[] =
+{
+ METHOD__VECTOR_TO_COLLECTION_ADAPTER__COUNT,
+ METHOD__VECTOR_TO_COLLECTION_ADAPTER__IS_READ_ONLY,
+ METHOD__VECTOR_TO_COLLECTION_ADAPTER__ADD,
+ METHOD__VECTOR_TO_COLLECTION_ADAPTER__CLEAR,
+ METHOD__VECTOR_TO_COLLECTION_ADAPTER__CONTAINS,
+ METHOD__VECTOR_TO_COLLECTION_ADAPTER__COPY_TO,
+ METHOD__VECTOR_TO_COLLECTION_ADAPTER__REMOVE
+};
+static const BinderMethodID s_stubsMapToDictionary[] =
+{
+ METHOD__MAP_TO_DICTIONARY_ADAPTER__INDEXER_GET,
+ METHOD__MAP_TO_DICTIONARY_ADAPTER__INDEXER_SET,
+ METHOD__MAP_TO_DICTIONARY_ADAPTER__KEYS,
+ METHOD__MAP_TO_DICTIONARY_ADAPTER__VALUES,
+ METHOD__MAP_TO_DICTIONARY_ADAPTER__CONTAINS_KEY,
+ METHOD__MAP_TO_DICTIONARY_ADAPTER__ADD,
+ METHOD__MAP_TO_DICTIONARY_ADAPTER__REMOVE,
+ METHOD__MAP_TO_DICTIONARY_ADAPTER__TRY_GET_VALUE
+};
+static const BinderMethodID s_stubsMapToCollection[] =
+{
+ METHOD__MAP_TO_COLLECTION_ADAPTER__COUNT,
+ METHOD__MAP_TO_COLLECTION_ADAPTER__IS_READ_ONLY,
+ METHOD__MAP_TO_COLLECTION_ADAPTER__ADD,
+ METHOD__MAP_TO_COLLECTION_ADAPTER__CLEAR,
+ METHOD__MAP_TO_COLLECTION_ADAPTER__CONTAINS,
+ METHOD__MAP_TO_COLLECTION_ADAPTER__COPY_TO,
+ METHOD__MAP_TO_COLLECTION_ADAPTER__REMOVE
+};
+static const BinderMethodID s_stubsIVectorViewToIReadOnlyCollection[] =
+{
+ METHOD__IVECTORVIEW_TO_IREADONLYCOLLECTION_ADAPTER__COUNT,
+};
+static const BinderMethodID s_stubsIVectorViewToIReadOnlyList[] =
+{
+ METHOD__IVECTORVIEW_TO_IREADONLYLIST_ADAPTER__INDEXER_GET,
+};
+static const BinderMethodID s_stubsIMapViewToIReadOnlyCollection[] =
+{
+ METHOD__IMAPVIEW_TO_IREADONLYCOLLECTION_ADAPTER__COUNT,
+};
+static const BinderMethodID s_stubsIMapViewToIReadOnlyDictionary[] =
+{
+ METHOD__IMAPVIEW_TO_IREADONLYDICTIONARY_ADAPTER__CONTAINSKEY,
+ METHOD__IMAPVIEW_TO_IREADONLYDICTIONARY_ADAPTER__TRYGETVALUE,
+ METHOD__IMAPVIEW_TO_IREADONLYDICTIONARY_ADAPTER__INDEXER_GET,
+ METHOD__IMAPVIEW_TO_IREADONLYDICTIONARY_ADAPTER__KEYS,
+ METHOD__IMAPVIEW_TO_IREADONLYDICTIONARY_ADAPTER__VALUES
+};
+static const BinderMethodID s_stubsBindableIterableToEnumerable[] =
+{
+ METHOD__BINDABLEITERABLE_TO_ENUMERABLE_ADAPTER__GET_ENUMERATOR_STUB
+};
+static const BinderMethodID s_stubsBindableVectorToList[] =
+{
+ METHOD__BINDABLEVECTOR_TO_LIST_ADAPTER__INDEXER_GET,
+ METHOD__BINDABLEVECTOR_TO_LIST_ADAPTER__INDEXER_SET,
+ METHOD__BINDABLEVECTOR_TO_LIST_ADAPTER__ADD,
+ METHOD__BINDABLEVECTOR_TO_LIST_ADAPTER__CONTAINS,
+ METHOD__BINDABLEVECTOR_TO_LIST_ADAPTER__CLEAR,
+ METHOD__BINDABLEVECTOR_TO_LIST_ADAPTER__IS_READ_ONLY,
+ METHOD__BINDABLEVECTOR_TO_LIST_ADAPTER__IS_FIXED_SIZE,
+ METHOD__BINDABLEVECTOR_TO_LIST_ADAPTER__INDEX_OF,
+ METHOD__BINDABLEVECTOR_TO_LIST_ADAPTER__INSERT,
+ METHOD__BINDABLEVECTOR_TO_LIST_ADAPTER__REMOVE,
+ METHOD__BINDABLEVECTOR_TO_LIST_ADAPTER__REMOVE_AT
+};
+static const BinderMethodID s_stubsBindableVectorToCollection[] =
+{
+ METHOD__BINDABLEVECTOR_TO_COLLECTION_ADAPTER__COPY_TO,
+ METHOD__BINDABLEVECTOR_TO_COLLECTION_ADAPTER__COUNT,
+ METHOD__BINDABLEVECTOR_TO_COLLECTION_ADAPTER__SYNC_ROOT,
+ METHOD__BINDABLEVECTOR_TO_COLLECTION_ADAPTER__IS_SYNCHRONIZED
+};
+static const BinderMethodID s_stubsNotifyCollectionChangedToManaged[] =
+{
+ (BinderMethodID)0, // add_CollectionChanged
+ (BinderMethodID)1, // remove_CollectionChanged
+};
+static const BinderMethodID s_stubsNotifyPropertyChangedToManaged[] =
+{
+ (BinderMethodID)0, // add_PropertyChanged
+ (BinderMethodID)1, // remove_PropertyChanged
+};
+static const BinderMethodID s_stubsICommandToManaged[] =
+{
+ (BinderMethodID)0, // add_CanExecuteChanged
+ (BinderMethodID)1, // remove_CanExecuteChanged
+ (BinderMethodID)2, // CanExecute
+ (BinderMethodID)3, // Execute
+};
+
+static const BinderMethodID s_stubsClosableToDisposable[] =
+{
+ METHOD__ICLOSABLE_TO_IDISPOSABLE_ADAPTER__DISPOSE
+};
+
+// CLR -> WinRT adapters
+static const BinderMethodID s_stubsEnumerableToIterable[] =
+{
+ METHOD__ENUMERABLE_TO_ITERABLE_ADAPTER__FIRST_STUB
+};
+static const BinderMethodID s_stubsListToVector[] =
+{
+ METHOD__LIST_TO_VECTOR_ADAPTER__GET_AT,
+ METHOD__LIST_TO_VECTOR_ADAPTER__SIZE,
+ METHOD__LIST_TO_VECTOR_ADAPTER__GET_VIEW,
+ METHOD__LIST_TO_VECTOR_ADAPTER__INDEX_OF,
+ METHOD__LIST_TO_VECTOR_ADAPTER__SET_AT,
+ METHOD__LIST_TO_VECTOR_ADAPTER__INSERT_AT,
+ METHOD__LIST_TO_VECTOR_ADAPTER__REMOVE_AT,
+ METHOD__LIST_TO_VECTOR_ADAPTER__APPEND,
+ METHOD__LIST_TO_VECTOR_ADAPTER__REMOVE_AT_END,
+ METHOD__LIST_TO_VECTOR_ADAPTER__CLEAR,
+ METHOD__LIST_TO_VECTOR_ADAPTER__GET_MANY,
+ METHOD__LIST_TO_VECTOR_ADAPTER__REPLACE_ALL,
+};
+static const BinderMethodID s_stubsDictionaryToMap[] =
+{
+ METHOD__DICTIONARY_TO_MAP_ADAPTER__LOOKUP,
+ METHOD__DICTIONARY_TO_MAP_ADAPTER__SIZE,
+ METHOD__DICTIONARY_TO_MAP_ADAPTER__HAS_KEY,
+ METHOD__DICTIONARY_TO_MAP_ADAPTER__GET_VIEW,
+ METHOD__DICTIONARY_TO_MAP_ADAPTER__INSERT,
+ METHOD__DICTIONARY_TO_MAP_ADAPTER__REMOVE,
+ METHOD__DICTIONARY_TO_MAP_ADAPTER__CLEAR,
+};
+static const BinderMethodID s_stubsIReadOnlyListToIVectorView[] =
+{
+ METHOD__IREADONLYLIST_TO_IVECTORVIEW_ADAPTER__GETAT,
+ METHOD__IREADONLYLIST_TO_IVECTORVIEW_ADAPTER__SIZE,
+ METHOD__IREADONLYLIST_TO_IVECTORVIEW_ADAPTER__INDEXOF,
+ METHOD__IREADONLYLIST_TO_IVECTORVIEW_ADAPTER__GETMANY,
+};
+static const BinderMethodID s_stubsIReadOnlyDictionaryToIMapView[] =
+{
+ METHOD__IREADONLYDICTIONARY_TO_IMAPVIEW_ADAPTER__LOOKUP,
+ METHOD__IREADONLYDICTIONARY_TO_IMAPVIEW_ADAPTER__SIZE,
+ METHOD__IREADONLYDICTIONARY_TO_IMAPVIEW_ADAPTER__HASKEY,
+ METHOD__IREADONLYDICTIONARY_TO_IMAPVIEW_ADAPTER__SPLIT,
+};
+static const BinderMethodID s_stubsEnumerableToBindableIterable[] =
+{
+ METHOD__ENUMERABLE_TO_BINDABLEITERABLE_ADAPTER__FIRST_STUB
+};
+static const BinderMethodID s_stubsListToBindableVector[] =
+{
+ METHOD__LIST_TO_BINDABLEVECTOR_ADAPTER__GET_AT,
+ METHOD__LIST_TO_BINDABLEVECTOR_ADAPTER__SIZE,
+ METHOD__LIST_TO_BINDABLEVECTOR_ADAPTER__GET_VIEW,
+ METHOD__LIST_TO_BINDABLEVECTOR_ADAPTER__INDEX_OF,
+ METHOD__LIST_TO_BINDABLEVECTOR_ADAPTER__SET_AT,
+ METHOD__LIST_TO_BINDABLEVECTOR_ADAPTER__INSERT_AT,
+ METHOD__LIST_TO_BINDABLEVECTOR_ADAPTER__REMOVE_AT,
+ METHOD__LIST_TO_BINDABLEVECTOR_ADAPTER__APPEND,
+ METHOD__LIST_TO_BINDABLEVECTOR_ADAPTER__REMOVE_AT_END,
+ METHOD__LIST_TO_BINDABLEVECTOR_ADAPTER__CLEAR
+};
+static const BinderMethodID s_stubsNotifyCollectionChangedToWinRT[] =
+{
+ (BinderMethodID)0, // add_CollectionChanged
+ (BinderMethodID)1, // remove_CollectionChanged
+};
+static const BinderMethodID s_stubsNotifyPropertyChangedToWinRT[] =
+{
+ (BinderMethodID)0, // add_PropertyChanged
+ (BinderMethodID)1, // remove_PropertyChanged
+};
+static const BinderMethodID s_stubsICommandToWinRT[] =
+{
+ (BinderMethodID)0, // add_CanExecuteChanged
+ (BinderMethodID)1, // remove_CanExecuteChanged
+ (BinderMethodID)2, // CanExecute
+ (BinderMethodID)3, // Execute
+};
+
+
+static const LPCUTF8 s_stubNamesNotifyCollectionChanged[] =
+{
+ "add_CollectionChanged", // 0
+ "remove_CollectionChanged", // 1
+};
+
+static const LPCUTF8 s_stubNamesNotifyPropertyChanged[] =
+{
+ "add_PropertyChanged", // 0
+ "remove_PropertyChanged", // 1
+};
+
+static const LPCUTF8 s_stubNamesICommand[] =
+{
+ "add_CanExecuteChanged", // 0
+ "remove_CanExecuteChanged", // 1
+ "CanExecute", // 2
+ "Execute", // 3
+};
+
+static const BinderMethodID s_stubsDisposableToClosable[] =
+{
+ METHOD__IDISPOSABLE_TO_ICLOSABLE_ADAPTER__CLOSE
+};
+
+const WinRTInterfaceRedirector::NonMscorlibRedirectedInterfaceInfo WinRTInterfaceRedirector::s_rNonMscorlibInterfaceInfos[3] =
+{
+ { WinMDAdapter::FrameworkAssembly_System, W("System.Runtime.InteropServices.WindowsRuntime.INotifyCollectionChanged_WinRT"),
+ W("System.Runtime.InteropServices.WindowsRuntime.NotifyCollectionChangedToManagedAdapter"),
+ W("System.Runtime.InteropServices.WindowsRuntime.NotifyCollectionChangedToWinRTAdapter"),
+ s_stubNamesNotifyCollectionChanged },
+ { WinMDAdapter::FrameworkAssembly_System, W("System.Runtime.InteropServices.WindowsRuntime.INotifyPropertyChanged_WinRT"),
+ W("System.Runtime.InteropServices.WindowsRuntime.NotifyPropertyChangedToManagedAdapter"),
+ W("System.Runtime.InteropServices.WindowsRuntime.NotifyPropertyChangedToWinRTAdapter"),
+ s_stubNamesNotifyPropertyChanged },
+ { WinMDAdapter::FrameworkAssembly_System, W("System.Runtime.InteropServices.WindowsRuntime.ICommand_WinRT"),
+ W("System.Runtime.InteropServices.WindowsRuntime.ICommandToManagedAdapter"),
+ W("System.Runtime.InteropServices.WindowsRuntime.ICommandToWinRTAdapter"),
+ s_stubNamesICommand },
+};
+
+#define SYSTEMDLL__INOTIFYCOLLECTIONCHANGED ((BinderClassID)(WinRTInterfaceRedirector::NON_MSCORLIB_MARKER | 0))
+#define SYSTEMDLL__INOTIFYPROPERTYCHANGED ((BinderClassID)(WinRTInterfaceRedirector::NON_MSCORLIB_MARKER | 1))
+#define SYSTEMDLL__ICOMMAND ((BinderClassID)(WinRTInterfaceRedirector::NON_MSCORLIB_MARKER | 2))
+
+const WinRTInterfaceRedirector::RedirectedInterfaceStubInfo WinRTInterfaceRedirector::s_rInterfaceStubInfos[2 * s_NumRedirectedInterfaces] =
+{
+ { CLASS__IITERABLE, _countof(s_stubsIterableToEnumerable), s_stubsIterableToEnumerable, _countof(s_stubsEnumerableToIterable), s_stubsEnumerableToIterable },
+ { CLASS__IVECTOR, _countof(s_stubsVectorToList), s_stubsVectorToList, _countof(s_stubsListToVector), s_stubsListToVector },
+ { CLASS__IMAP, _countof(s_stubsMapToDictionary), s_stubsMapToDictionary, _countof(s_stubsDictionaryToMap), s_stubsDictionaryToMap },
+ { CLASS__IVECTORVIEW, _countof(s_stubsIVectorViewToIReadOnlyList), s_stubsIVectorViewToIReadOnlyList, _countof(s_stubsIReadOnlyListToIVectorView), s_stubsIReadOnlyListToIVectorView },
+ { CLASS__IMAPVIEW, _countof(s_stubsIMapViewToIReadOnlyDictionary), s_stubsIMapViewToIReadOnlyDictionary, _countof(s_stubsIReadOnlyDictionaryToIMapView), s_stubsIReadOnlyDictionaryToIMapView },
+ { CLASS__IBINDABLEITERABLE, _countof(s_stubsBindableIterableToEnumerable), s_stubsBindableIterableToEnumerable, _countof(s_stubsEnumerableToBindableIterable), s_stubsEnumerableToBindableIterable },
+ { CLASS__IBINDABLEVECTOR, _countof(s_stubsBindableVectorToList), s_stubsBindableVectorToList, _countof(s_stubsListToBindableVector), s_stubsListToBindableVector },
+ { SYSTEMDLL__INOTIFYCOLLECTIONCHANGED, _countof(s_stubsNotifyCollectionChangedToManaged), s_stubsNotifyCollectionChangedToManaged, _countof(s_stubsNotifyCollectionChangedToWinRT), s_stubsNotifyCollectionChangedToWinRT },
+ { SYSTEMDLL__INOTIFYPROPERTYCHANGED, _countof(s_stubsNotifyPropertyChangedToManaged), s_stubsNotifyPropertyChangedToManaged, _countof(s_stubsNotifyPropertyChangedToWinRT), s_stubsNotifyPropertyChangedToWinRT },
+ { SYSTEMDLL__ICOMMAND, _countof(s_stubsICommandToManaged), s_stubsICommandToManaged, _countof(s_stubsICommandToWinRT), s_stubsICommandToWinRT },
+ { CLASS__ICLOSABLE, _countof(s_stubsClosableToDisposable), s_stubsClosableToDisposable, _countof(s_stubsClosableToDisposable), s_stubsDisposableToClosable },
+
+ // ICollection/ICollection<> stubs:
+ { (BinderClassID)0, 0, NULL, 0, NULL },
+ { CLASS__IVECTOR, _countof(s_stubsVectorToCollection), s_stubsVectorToCollection, 0, NULL },
+ { CLASS__IMAP, _countof(s_stubsMapToCollection), s_stubsMapToCollection, 0, NULL },
+ { CLASS__IVECTORVIEW, _countof(s_stubsIVectorViewToIReadOnlyCollection), s_stubsIVectorViewToIReadOnlyCollection, 0, NULL },
+ { CLASS__IMAPVIEW, _countof(s_stubsIMapViewToIReadOnlyCollection), s_stubsIMapViewToIReadOnlyCollection, 0, NULL },
+ { (BinderClassID)0, 0, NULL, 0, NULL },
+ { CLASS__IBINDABLEVECTOR, _countof(s_stubsBindableVectorToCollection), s_stubsBindableVectorToCollection, 0, NULL },
+ { (BinderClassID)0, 0, NULL, 0, NULL },
+ { (BinderClassID)0, 0, NULL, 0, NULL },
+ { (BinderClassID)0, 0, NULL, 0, NULL },
+ { (BinderClassID)0, 0, NULL, 0, NULL },
+};
+
+#ifdef _DEBUG
+ VOID IntializeInteropLogging();
+#endif
+
+struct ByrefArgumentInfo
+{
+ BOOL m_bByref;
+ VARIANT m_Val;
+};
+
+// Flag indicating if COM support has been initialized.
+BOOL g_fComStarted = FALSE;
+
+#ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+void AllocateComClassObject(ComClassFactory* pComClsFac, OBJECTREF* pComObj);
+#endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+
+#endif // FEATURE_COMINTEROP
+
+
+#ifndef CROSSGEN_COMPILE
+//------------------------------------------------------------------
+// setup error info for exception object
+//
+#ifdef FEATURE_COMINTEROP
+HRESULT SetupErrorInfo(OBJECTREF pThrownObject, ComCallMethodDesc *pCMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pCMD));
+ }
+ CONTRACTL_END;
+
+ return SetupErrorInfo(pThrownObject, pCMD->IsWinRTCall());
+}
+
+typedef BOOL (*pfnRoOriginateLanguageException)(HRESULT error,
+ HSTRING message,
+ IUnknown* languageException);
+typedef HRESULT (*pfnGetRestrictedErrorInfo)(IRestrictedErrorInfo ** ppRestrictedErrorInfo);
+typedef HRESULT (*pfnSetRestrictedErrorInfo)(IRestrictedErrorInfo * pRestrictedErrorInfo);
+
+pfnRoOriginateLanguageException g_pfnRoOriginateLanguageException = nullptr;
+pfnGetRestrictedErrorInfo g_pfnGetRestrictedErrorInfo = nullptr;
+pfnSetRestrictedErrorInfo g_pfnSetRestrictedErrorInfo = nullptr;
+
+Volatile<bool> g_bCheckedWinRTErrorDllPresent = false;
+
+//--------------------------------------------------------------------------------
+// Attempts to load WinRT error API functions from the appropriate system library,
+// and populates g_pfnRoOriginateLanguageException, g_pfnGetRestrictedErrorInfo,
+// and g_pfnSetRestrictedErrorInfo.
+//
+// This is shared logic for loading the WinRT error libraries that should not be
+// called directly.
+void LoadProcAddressForWinRTErrorAPIs_Internal()
+{
+ WRAPPER_NO_CONTRACT;
+
+ GCX_PREEMP();
+
+ if (!g_bCheckedWinRTErrorDllPresent)
+ {
+ HMODULE hModWinRTError11Dll = WszLoadLibraryEx(W("api-ms-win-core-winrt-error-l1-1-1.dll"), NULL, LOAD_LIBRARY_SEARCH_SYSTEM32);
+
+ // We never release the library since we can only do it at AppDomain shutdown and there is no good way to release it then.
+ if (hModWinRTError11Dll)
+ {
+ g_pfnRoOriginateLanguageException = (pfnRoOriginateLanguageException)GetProcAddress(hModWinRTError11Dll, "RoOriginateLanguageException");
+ g_pfnSetRestrictedErrorInfo = (pfnSetRestrictedErrorInfo)GetProcAddress(hModWinRTError11Dll, "SetRestrictedErrorInfo");
+ g_pfnGetRestrictedErrorInfo = (pfnGetRestrictedErrorInfo)GetProcAddress(hModWinRTError11Dll, "GetRestrictedErrorInfo");
+ }
+ else
+ {
+ // Downlevel versions of WinRT that do not have the language-projected exceptions will still have
+ // APIs for IRestrictedErrorInfo, so we should still try to load those.
+ HMODULE hModWinRTError10Dll = WszLoadLibraryEx(L"api-ms-win-core-winrt-error-l1-1-0.dll", NULL, LOAD_LIBRARY_SEARCH_SYSTEM32);
+
+ if (hModWinRTError10Dll)
+ {
+ g_pfnSetRestrictedErrorInfo = (pfnSetRestrictedErrorInfo)GetProcAddress(hModWinRTError10Dll, "SetRestrictedErrorInfo");
+ g_pfnGetRestrictedErrorInfo = (pfnGetRestrictedErrorInfo)GetProcAddress(hModWinRTError10Dll, "GetRestrictedErrorInfo");
+ }
+ }
+
+ g_bCheckedWinRTErrorDllPresent = true;
+ }
+}
+
+//--------------------------------------------------------------------------------
+// Attempts to load the IRestrictedErrorInfo APIs into the function pointers
+// g_pfnGetRestrictedErrorInfo and g_pfnSetRestrictedErrorInfo. This is used for
+// WinRT scenarios where we don't care about support for language-projected exception
+// support. Returns S_OK if both of these functions could be loaded, and E_FAIL
+// otherwise.
+HRESULT LoadProcAddressForRestrictedErrorInfoAPIs()
+{
+ WRAPPER_NO_CONTRACT;
+
+ LoadProcAddressForWinRTErrorAPIs_Internal();
+
+ if (g_pfnSetRestrictedErrorInfo != NULL && g_pfnGetRestrictedErrorInfo != NULL)
+ return S_OK;
+ else
+ return E_FAIL;
+}
+
+//--------------------------------------------------------------------------------
+// Attempts to load the RoOriginateLanguageException API for language-projected
+// exceptions into the function pointer g_pfnRoOriginateLanguageException. Returns
+// S_OK if this function could be loaded, and E_FAIL otherwise.
+HRESULT LoadProcAddressForRoOriginateLanguageExceptionAPI()
+{
+ WRAPPER_NO_CONTRACT;
+
+ LoadProcAddressForWinRTErrorAPIs_Internal();
+
+ if (g_pfnRoOriginateLanguageException != NULL)
+ return S_OK;
+ else
+ return E_FAIL;
+}
+
+//--------------------------------------------------------------------------------
+// GetRestrictedErrorInfo helper, enables and disables GC during call-outs
+HRESULT SafeGetRestrictedErrorInfo(IRestrictedErrorInfo **ppIRestrictedErrInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(ppIRestrictedErrInfo));
+ }
+ CONTRACTL_END;
+
+ *ppIRestrictedErrInfo = NULL;
+ HRESULT hr = S_OK;
+
+ if(SUCCEEDED(LoadProcAddressForRestrictedErrorInfoAPIs()))
+ {
+ GCX_PREEMP();
+
+ EX_TRY
+ {
+ hr = (*g_pfnGetRestrictedErrorInfo)(ppIRestrictedErrInfo);
+ }
+ EX_CATCH
+ {
+ hr = E_OUTOFMEMORY;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+
+ return hr;
+}
+
+// This method checks whether the given IErrorInfo is actually a managed CLR object.
+BOOL IsManagedObject(IUnknown *pIUnknown)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pIUnknown));
+ }
+ CONTRACTL_END;
+
+ if (AppX::IsAppXProcess())
+ {
+ //In AppX we don't support IManagedObject so we'll do the check based on
+ //IUnknown slots, i.e. we'll see whether the IP maps to a CCW.
+ if (MapIUnknownToWrapper(pIUnknown) != NULL)
+ {
+ // We found an existing CCW hence this is a managed exception.
+ return TRUE;
+ }
+ }
+ else
+ {
+ SafeComHolder<IManagedObject> pManagedObject = NULL;
+ HRESULT hrLocal = SafeQueryInterface(pIUnknown, IID_IManagedObject, (IUnknown**)&pManagedObject);
+ LogInteropQI(pIUnknown, IID_IManagedObject, hrLocal, "QI to determine if IErrorInfo is a managed exception");
+ if(SUCCEEDED(hrLocal))
+ {
+ return TRUE;
+ }
+
+ }
+ return FALSE;
+}
+
+// This method returns the IErrorInfo associated with the IRestrictedErrorInfo.
+// Return Value - a. IErrorInfo which corresponds to a managed exception object, where *bHasNonCLRLanguageErrorObject = FALSE
+// b. IErrorInfo corresponding to a non-CLR exception object , where *bHasNonCLRLanguageErrorObject = TRUE
+// c. NULL in case the current hr value is different from the one associated with IRestrictedErrorInfo.
+IErrorInfo *GetCorrepondingErrorInfo_WinRT(HRESULT hr, IRestrictedErrorInfo *pResErrInfo, BOOL* bHasNonCLRLanguageErrorObject)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pResErrInfo));
+ }
+ CONTRACTL_END;
+
+ *bHasNonCLRLanguageErrorObject = FALSE;
+ // This function must run in preemptive GC mode.
+ {
+ GCX_PREEMP();
+ HRESULT hrLocal = S_OK;
+
+ SafeComHolderPreemp<ILanguageExceptionErrorInfo> pLangException;
+
+ // 1. Check whether the given IRestrictedErrorInfo supports ILanguageExceptionErrorInfo
+ // 2. If so, retrieve the language specific IInspectable by calling GetLanguageException.
+ // 3. Check whether the IInspectable is CLR specific.
+ // 4. If so, return the IInspectable as it is also the IErrorInfo.
+ // 5. If not, check whether the HResult returned by the API is same as the one stored in IRestrictedErrorInfo.
+ // 6. If so simply QI for IErrorInfo
+ // 7. If QI succeeds return IErrorInfo else return NULL.
+
+ hrLocal = SafeQueryInterfacePreemp(pResErrInfo, IID_ILanguageExceptionErrorInfo, (IUnknown **) &pLangException);
+ LogInteropQI(pResErrInfo, IID_ILanguageExceptionErrorInfo, hr, "ILanguageExceptionErrorInfo");
+ if (SUCCEEDED(hrLocal))
+ {
+ IUnknown* pUnk;
+ if(pLangException != NULL && SUCCEEDED(pLangException->GetLanguageException((IUnknown**) &pUnk)) && pUnk != NULL)
+ {
+ if(IsManagedObject(pUnk))
+ {
+ // Since this represent a managed CCW, this is our exception object and will always be an IErrorInfo.
+ // Hence type casting to IErrorInfo is safe.
+ return (IErrorInfo*)pUnk;
+ }
+ else
+ {
+ // pUnk represents an exception object of a different language.
+ // We simply need to store that the exception object represents a non-CLR exception and can release the actual exception object.
+ SafeReleasePreemp(pUnk);
+ *bHasNonCLRLanguageErrorObject = TRUE;
+ }
+ }
+ }
+ if(SUCCEEDED(GetRestrictedErrorDetails(pResErrInfo, NULL, NULL, &hrLocal, NULL)))
+ {
+ if(hr == hrLocal)
+ {
+ IErrorInfo *pErrInfo = NULL ;
+ hrLocal = SafeQueryInterfacePreemp(pResErrInfo, IID_IErrorInfo, (IUnknown **) &pErrInfo);
+ LogInteropQI(pResErrInfo, IID_IErrorInfo, hrLocal, "IErrorInfo");
+ if(SUCCEEDED(hrLocal))
+ {
+ return pErrInfo;
+ }
+ }
+ }
+ }
+
+ return NULL;
+}
+
+HRESULT GetRestrictedErrorDetails(IRestrictedErrorInfo *pRestrictedErrorInfo, BSTR *perrorDescription, BSTR *pErrorRestrictedDescription, HRESULT *pHr, BSTR *pErrorCapabilitySid)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pRestrictedErrorInfo));
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+
+ BSTR errDesc;
+ BSTR errResDesc;
+ BSTR errCapSid;
+ HRESULT hrLocal;
+
+ if(SUCCEEDED(pRestrictedErrorInfo->GetErrorDetails(&errDesc, &hrLocal, &errResDesc, &errCapSid)))
+ {
+ if(perrorDescription)
+ *perrorDescription = errDesc;
+ else
+ ::SysFreeString(errDesc);
+
+ if(pErrorRestrictedDescription)
+ *pErrorRestrictedDescription = errResDesc;
+ else
+ ::SysFreeString(errResDesc);
+
+ if(pErrorCapabilitySid)
+ *pErrorCapabilitySid = errCapSid;
+ else
+ ::SysFreeString(errCapSid);
+ if(pHr)
+ *pHr = hrLocal;
+
+ return S_OK;
+ }
+
+ return E_FAIL;
+}
+#endif // FEATURE_COMINTEROP
+
+HRESULT SetupErrorInfo(OBJECTREF pThrownObject, BOOL bIsWinRTScenario /* = FALSE */)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_FAIL;
+
+#ifdef FEATURE_COMINTEROP
+ Exception* pException = NULL;
+#endif
+
+ GCPROTECT_BEGIN(pThrownObject)
+ {
+ EX_TRY
+ {
+ // Calls to COM up ahead.
+ hr = EnsureComStartedNoThrow();
+ if (SUCCEEDED(hr) && pThrownObject != NULL)
+ {
+#ifdef _DEBUG
+ EX_TRY
+ {
+ StackSString message;
+ GetExceptionMessage(pThrownObject, message);
+
+ if (g_pConfig->ShouldExposeExceptionsInCOMToConsole())
+ {
+ PrintToStdOutW(W(".NET exception in COM\n"));
+ if (!message.IsEmpty())
+ PrintToStdOutW(message.GetUnicode());
+ else
+ PrintToStdOutW(W("No exception info available"));
+ }
+
+ if (g_pConfig->ShouldExposeExceptionsInCOMToMsgBox())
+ {
+ GCX_PREEMP();
+ if (!message.IsEmpty())
+ EEMessageBoxNonLocalizedDebugOnly((LPWSTR)message.GetUnicode(), W(".NET exception in COM"), MB_ICONSTOP | MB_OK);
+ else
+ EEMessageBoxNonLocalizedDebugOnly(W("No exception information available"), W(".NET exception in COM"),MB_ICONSTOP | MB_OK);
+ }
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH (SwallowAllExceptions);
+#endif
+
+#ifdef FEATURE_COMINTEROP
+ IErrorInfo* pErr = NULL;
+ EX_TRY
+ {
+ // This handles a special case for a newer subset of WinRT scenarios, starting in Windows
+ // 8.1, where we have support for language-projected extensions. In this case, we can use
+ // the thrown object to set up a projected IErrorInfo that we'll send back to native code.
+ //
+ // In all other scenarios (including WinRT prior to Windows 8.1), we just use the legacy
+ // IErrorInfo COM APIs.
+ if (bIsWinRTScenario &&
+ SUCCEEDED(LoadProcAddressForRestrictedErrorInfoAPIs()) &&
+ SUCCEEDED(LoadProcAddressForRoOriginateLanguageExceptionAPI()))
+ {
+ // In case of WinRT we check whether we have an already existing uncaught language exception.
+ // If so we simply SetRestrictedErrorInfo on that ensuring that the other language can catch that exception
+ // and we do not RoOriginateError from our side.
+ IRestrictedErrorInfo *pRestrictedErrorInfo = GetRestrictedErrorInfoFromErrorObject(pThrownObject);
+ if(pRestrictedErrorInfo != NULL)
+ {
+ (*g_pfnSetRestrictedErrorInfo)(pRestrictedErrorInfo);
+ GetRestrictedErrorDetails(pRestrictedErrorInfo, NULL, NULL, &hr, NULL);
+ }
+ else
+ {
+ // If there is no existing language exception we save the errorInfo on the current thread by storing the following information
+ // 1. HResult
+ // 2. ErrorMsg
+ // 3. The managed exception object, which can be later retrieved if needed.
+
+ pErr = (IErrorInfo *)GetComIPFromObjectRef(&pThrownObject, IID_IErrorInfo);
+
+ StackSString message;
+ HRESULT errorHr;
+ HSTRING errorMsgString;
+
+ GetExceptionMessage(pThrownObject, message);
+ errorHr = GetHRFromThrowable(pThrownObject);
+
+ if(FAILED(WindowsCreateString(message.GetUnicode(), message.GetCount(), &errorMsgString)))
+ errorMsgString = NULL;
+
+ //
+ // WinRT change to convert ObjectDisposedException into RO_E_CLOSED
+ // if we are calling into a WinRT managed object
+ //
+ if (errorHr == COR_E_OBJECTDISPOSED)
+ errorHr = RO_E_CLOSED;
+
+ // Set the managed exception
+ {
+ GCX_PREEMP();
+ // This Windows API call will store the pErr as the LanguageException and
+ // construct an IRestrictedErrorInfo from the errorHr and errorMsgString
+ // which can then be later retrieved using GetRestrictedErrorInfo.
+ (*g_pfnRoOriginateLanguageException)(errorHr, errorMsgString, pErr);
+ }
+ }
+ }
+ else
+ {
+ // set the error info object for the exception that was thrown.
+ pErr = (IErrorInfo *)GetComIPFromObjectRef(&pThrownObject, IID_IErrorInfo);
+ {
+ GCX_PREEMP();
+ LeaveRuntimeHolder lrh((size_t)SetErrorInfo);
+ SetErrorInfo(0, pErr);
+ }
+ }
+
+ // Release the pErr in case it exists.
+ if (pErr)
+ {
+ hr = GetHRFromCLRErrorInfo(pErr);
+ ULONG cbRef = SafeRelease(pErr);
+ LogInteropRelease(pErr, cbRef, "IErrorInfo");
+ }
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ //
+ // WinRT change to convert ObjectDisposedException into RO_E_CLOSED
+ // if we are calling into a WinRT managed object
+ //
+ if (hr == COR_E_OBJECTDISPOSED && bIsWinRTScenario)
+ hr = RO_E_CLOSED;
+#endif // FEATURE_COMINTEROP
+ }
+ }
+ EX_CATCH
+ {
+ if (SUCCEEDED(hr))
+ hr = E_FAIL;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+ GCPROTECT_END();
+ return hr;
+}
+
+//-------------------------------------------------------------------
+// Called from DLLMain, to initialize com specific data structures.
+//-------------------------------------------------------------------
+void FillExceptionData(ExceptionData* pedata, IErrorInfo* pErrInfo, IRestrictedErrorInfo* pRestrictedErrorInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pedata));
+ }
+ CONTRACTL_END;
+
+ if (pErrInfo != NULL)
+ {
+ Thread* pThread = GetThread();
+ if (pThread != NULL)
+ {
+ GCX_PREEMP();
+
+ pErrInfo->GetSource (&pedata->bstrSource);
+ pErrInfo->GetDescription (&pedata->bstrDescription);
+ pErrInfo->GetHelpFile (&pedata->bstrHelpFile);
+ pErrInfo->GetHelpContext (&pedata->dwHelpContext );
+ pErrInfo->GetGUID(&pedata->guid);
+
+#ifdef FEATURE_COMINTEROP
+ HRESULT hr = S_OK;
+ if(pRestrictedErrorInfo == NULL)
+ {
+ hr = SafeQueryInterfacePreemp(pErrInfo, IID_IRestrictedErrorInfo, (IUnknown **) &pRestrictedErrorInfo);
+ LogInteropQI(pErrInfo, IID_IRestrictedErrorInfo, hr, "IRestrictedErrorInfo");
+ }
+
+ if (SUCCEEDED(hr) && pRestrictedErrorInfo != NULL)
+ {
+ // Keep a AddRef-ed IRestrictedErrorInfo*
+ pedata->pRestrictedErrorInfo = pRestrictedErrorInfo;
+
+ // Retrieve restricted error information
+ BSTR bstrDescription = NULL;
+ HRESULT hrError;
+ if (SUCCEEDED(GetRestrictedErrorDetails(pRestrictedErrorInfo, &bstrDescription, &pedata->bstrRestrictedError, &hrError, &pedata->bstrCapabilitySid)))
+ {
+ if (bstrDescription != NULL)
+ {
+ ::SysFreeString(pedata->bstrDescription);
+ pedata->bstrDescription = bstrDescription;
+ }
+
+ _ASSERTE(hrError == pedata->hr);
+ }
+
+ // Retrieve reference string and ignore error
+ pRestrictedErrorInfo->GetReference(&pedata->bstrReference);
+ }
+#endif
+ ULONG cbRef = SafeRelease(pErrInfo); // release the IErrorInfo interface pointer
+ LogInteropRelease(pErrInfo, cbRef, "IErrorInfo");
+ }
+ }
+}
+#endif // CROSSGEN_COMPILE
+
+#ifndef FEATURE_CORECLR
+//---------------------------------------------------------------------------
+//returns true if pImport has DefaultDllImportSearchPathsAttribute
+//if true, also returns dllImportSearchPathFlag and searchAssemblyDirectory values.
+BOOL GetDefaultDllImportSearchPathsAttributeValue(IMDInternalImport *pImport, mdToken token, DWORD * pDllImportSearchPathFlag)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pImport));
+ }
+ CONTRACTL_END;
+
+ BYTE* pData = NULL;
+ LONG cData = 0;
+
+ HRESULT hr = pImport->GetCustomAttributeByName(token,
+ g_DefaultDllImportSearchPathsAttribute,
+ (const VOID **)(&pData),
+ (ULONG *)&cData);
+
+ IfFailThrow(hr);
+ if(cData == 0 )
+ {
+ return FALSE;
+ }
+
+ CustomAttributeParser ca(pData, cData);
+ CaArg args[1];
+ args[0].InitEnum(SERIALIZATION_TYPE_U4, (ULONG)0);
+
+ ParseKnownCaArgs(ca, args, lengthof(args));
+ *pDllImportSearchPathFlag = args[0].val.u4;
+ return TRUE;
+}
+#endif // !FEATURE_CORECLR
+
+
+//---------------------------------------------------------------------------
+// Returns the index of the LCID parameter if one exists and -1 otherwise.
+int GetLCIDParameterIndex(MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ int iLCIDParam = -1;
+ HRESULT hr;
+ const BYTE * pVal;
+ ULONG cbVal;
+
+ if (!pMD->GetMethodTable()->IsProjectedFromWinRT()) // ignore LCIDConversionAttribute on WinRT methods
+ {
+ // Check to see if the method has the LCIDConversionAttribute.
+ hr = pMD->GetMDImport()->GetCustomAttributeByName(pMD->GetMemberDef(), INTEROP_LCIDCONVERSION_TYPE, (const void**)&pVal, &cbVal);
+ if (hr == S_OK)
+ {
+ CustomAttributeParser caLCID(pVal, cbVal);
+ CaArg args[1];
+ args[0].Init(SERIALIZATION_TYPE_I4, 0);
+ IfFailGo(ParseKnownCaArgs(caLCID, args, lengthof(args)));
+ iLCIDParam = args[0].val.i4;
+ }
+ }
+
+ErrExit:
+ return iLCIDParam;
+}
+
+#ifndef CROSSGEN_COMPILE
+//---------------------------------------------------------------------------
+// Transforms an LCID into a CultureInfo.
+void GetCultureInfoForLCID(LCID lcid, OBJECTREF *pCultureObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pCultureObj));
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_USE_LCID
+ OBJECTREF CultureObj = NULL;
+ GCPROTECT_BEGIN(CultureObj)
+ {
+ // Allocate a CultureInfo with the specified LCID.
+ CultureObj = AllocateObject(MscorlibBinder::GetClass(CLASS__CULTURE_INFO));
+
+ MethodDescCallSite cultureInfoCtor(METHOD__CULTURE_INFO__INT_CTOR, &CultureObj);
+
+ // Call the CultureInfo(int culture) constructor.
+ ARG_SLOT pNewArgs[] = {
+ ObjToArgSlot(CultureObj),
+ (ARG_SLOT)lcid
+ };
+ cultureInfoCtor.Call(pNewArgs);
+
+ // Set the returned culture object.
+ *pCultureObj = CultureObj;
+ }
+ GCPROTECT_END();
+#else
+ COMPlusThrow(kNotSupportedException);
+#endif
+}
+#endif // CROSSGEN_COMPILE
+
+//---------------------------------------------------------------------------
+// This method determines if a member is visible from COM.
+BOOL IsMemberVisibleFromCom(MethodTable *pDeclaringMT, mdToken tk, mdMethodDef mdAssociate)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pDeclaringMT));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ const BYTE * pVal;
+ ULONG cbVal;
+ DWORD dwFlags;
+
+ IMDInternalImport *pInternalImport = pDeclaringMT->GetMDImport();
+
+ // Check to see if the member is public.
+ switch (TypeFromToken(tk))
+ {
+ case mdtFieldDef:
+ _ASSERTE(IsNilToken(mdAssociate));
+ if (FAILED(pInternalImport->GetFieldDefProps(tk, &dwFlags)))
+ {
+ return FALSE;
+ }
+ if (!IsFdPublic(dwFlags))
+ return FALSE;
+ break;
+
+ case mdtMethodDef:
+ _ASSERTE(IsNilToken(mdAssociate));
+ if (FAILED(pInternalImport->GetMethodDefProps(tk, &dwFlags)))
+ {
+ return FALSE;
+ }
+ if (!IsMdPublic(dwFlags))
+ {
+ return FALSE;
+ }
+ {
+ // Generic Methods are not visible from COM
+ MDEnumHolder hEnumTyPars(pInternalImport);
+ if (FAILED(pInternalImport->EnumInit(mdtGenericParam, tk, &hEnumTyPars)))
+ return FALSE;
+
+ if (pInternalImport->EnumGetCount(&hEnumTyPars) != 0)
+ return FALSE;
+ }
+ break;
+
+ case mdtProperty:
+ _ASSERTE(!IsNilToken(mdAssociate));
+ if (FAILED(pInternalImport->GetMethodDefProps(mdAssociate, &dwFlags)))
+ {
+ return FALSE;
+ }
+ if (!IsMdPublic(dwFlags))
+ return FALSE;
+
+ if (!pDeclaringMT->IsProjectedFromWinRT() && !pDeclaringMT->IsExportedToWinRT() && !pDeclaringMT->IsWinRTObjectType())
+ {
+ // Check to see if the associate has the ComVisible attribute set (non-WinRT members only).
+ hr = pInternalImport->GetCustomAttributeByName(mdAssociate, INTEROP_COMVISIBLE_TYPE, (const void**)&pVal, &cbVal);
+ if (hr == S_OK)
+ {
+ CustomAttributeParser cap(pVal, cbVal);
+ if (FAILED(cap.SkipProlog()))
+ return FALSE;
+
+ UINT8 u1;
+ if (FAILED(cap.GetU1(&u1)))
+ return FALSE;
+
+ return (BOOL)u1;
+ }
+ }
+ break;
+
+ default:
+ _ASSERTE(!"The type of the specified member is not handled by IsMemberVisibleFromCom");
+ break;
+ }
+
+ if (!pDeclaringMT->IsProjectedFromWinRT() && !pDeclaringMT->IsExportedToWinRT() && !pDeclaringMT->IsWinRTObjectType())
+ {
+ // Check to see if the member has the ComVisible attribute set (non-WinRT members only).
+ hr = pInternalImport->GetCustomAttributeByName(tk, INTEROP_COMVISIBLE_TYPE, (const void**)&pVal, &cbVal);
+ if (hr == S_OK)
+ {
+ CustomAttributeParser cap(pVal, cbVal);
+ if (FAILED(cap.SkipProlog()))
+ return FALSE;
+
+ UINT8 u1;
+ if (FAILED(cap.GetU1(&u1)))
+ return FALSE;
+
+ return (BOOL)u1;
+ }
+ }
+
+ // The member is visible.
+ return TRUE;
+}
+
+
+ULONG GetStringizedMethodDef(MethodTable *pDeclaringMT, mdToken tkMb, CQuickArray<BYTE> &rDef, ULONG cbCur)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pDeclaringMT));
+ }
+ CONTRACTL_END;
+
+ IMDInternalImport *pMDImport = pDeclaringMT->GetMDImport();
+ CQuickBytes rSig;
+ MDEnumHolder ePm(pMDImport); // For enumerating params.
+ mdParamDef tkPm; // A param token.
+ DWORD dwFlags; // Param flags.
+ USHORT usSeq; // Sequence of a parameter.
+ ULONG cPm; // Count of params.
+ PCCOR_SIGNATURE pSig;
+ ULONG cbSig;
+
+ // Don't count invisible members.
+ if (!IsMemberVisibleFromCom(pDeclaringMT, tkMb, mdMethodDefNil))
+ return cbCur;
+
+ // accumulate the signatures.
+ IfFailThrow(pMDImport->GetSigOfMethodDef(tkMb, &cbSig, &pSig));
+ IfFailThrow(::PrettyPrintSigInternalLegacy(pSig, cbSig, "", &rSig, pMDImport));
+
+ // Get the parameter flags.
+ IfFailThrow(pMDImport->EnumInit(mdtParamDef, tkMb, &ePm));
+ cPm = pMDImport->EnumGetCount(&ePm);
+
+ // Resize for sig and params. Just use 1 byte of param.
+ rDef.ReSizeThrows(cbCur + rSig.Size() + cPm);
+ memcpy(rDef.Ptr() + cbCur, rSig.Ptr(), rSig.Size());
+ cbCur += (ULONG)(rSig.Size()-1);
+
+ // Enumerate through the params and get the flags.
+ while (pMDImport->EnumNext(&ePm, &tkPm))
+ {
+ LPCSTR szParamName_Ignore;
+ IfFailThrow(pMDImport->GetParamDefProps(tkPm, &usSeq, &dwFlags, &szParamName_Ignore));
+ if (usSeq == 0) // Skip return type flags.
+ continue;
+ rDef[cbCur++] = (BYTE)dwFlags;
+ }
+
+ // Return the number of bytes.
+ return cbCur;
+} // void GetStringizedMethodDef()
+
+
+ULONG GetStringizedFieldDef(MethodTable *pDeclaringMT, mdToken tkMb, CQuickArray<BYTE> &rDef, ULONG cbCur)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pDeclaringMT));
+ }
+ CONTRACTL_END;
+
+ CQuickBytes rSig;
+ PCCOR_SIGNATURE pSig;
+ ULONG cbSig;
+
+ // Don't count invisible members.
+ if (!IsMemberVisibleFromCom(pDeclaringMT, tkMb, mdMethodDefNil))
+ return cbCur;
+
+ IMDInternalImport *pMDImport = pDeclaringMT->GetMDImport();
+
+ // accumulate the signatures.
+ IfFailThrow(pMDImport->GetSigOfFieldDef(tkMb, &cbSig, &pSig));
+ IfFailThrow(::PrettyPrintSigInternalLegacy(pSig, cbSig, "", &rSig, pMDImport));
+ rDef.ReSizeThrows(cbCur + rSig.Size());
+ memcpy(rDef.Ptr() + cbCur, rSig.Ptr(), rSig.Size());
+ cbCur += (ULONG)(rSig.Size()-1);
+
+ // Return the number of bytes.
+ return cbCur;
+} // void GetStringizedFieldDef()
+
+//--------------------------------------------------------------------------------
+// This method generates a stringized version of an interface that contains the
+// name of the interface along with the signature of all the methods.
+SIZE_T GetStringizedItfDef(TypeHandle InterfaceType, CQuickArray<BYTE> &rDef)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodTable* pIntfMT = InterfaceType.GetMethodTable();
+ PREFIX_ASSUME(pIntfMT != NULL);
+
+ IMDInternalImport* pMDImport = pIntfMT->GetMDImport();
+ PREFIX_ASSUME(pMDImport != NULL);
+
+ LPCWSTR szName;
+ ULONG cchName;
+ MDEnumHolder eMb(pMDImport); // For enumerating methods and fields.
+ mdToken tkMb; // A method or field token.
+ SIZE_T cbCur;
+
+ // Make sure the specified type is an interface with a valid token.
+ _ASSERTE(!IsNilToken(pIntfMT->GetCl()) && pIntfMT->IsInterface());
+
+ // Get the name of the class.
+ DefineFullyQualifiedNameForClassW();
+ szName = GetFullyQualifiedNameForClassNestedAwareW(pIntfMT);
+
+ cchName = (ULONG)wcslen(szName);
+
+ // Start with the interface name.
+ cbCur = cchName * sizeof(WCHAR);
+ rDef.ReSizeThrows(cbCur + sizeof(WCHAR));
+ wcscpy_s(reinterpret_cast<LPWSTR>(rDef.Ptr()), rDef.Size()/sizeof(WCHAR), szName);
+
+ // Enumerate the methods...
+ IfFailThrow(pMDImport->EnumInit(mdtMethodDef, pIntfMT->GetCl(), &eMb));
+ while(pMDImport->EnumNext(&eMb, &tkMb))
+ { // accumulate the signatures.
+ cbCur = GetStringizedMethodDef(pIntfMT, tkMb, rDef, (ULONG)cbCur);
+ }
+ pMDImport->EnumClose(&eMb);
+
+ // Enumerate the fields...
+ IfFailThrow(pMDImport->EnumInit(mdtFieldDef, pIntfMT->GetCl(), &eMb));
+ while(pMDImport->EnumNext(&eMb, &tkMb))
+ { // accumulate the signatures.
+ cbCur = GetStringizedFieldDef(pIntfMT, tkMb, rDef, (ULONG)cbCur);
+ }
+
+ // Return the number of bytes.
+ return cbCur;
+} // ULONG GetStringizedItfDef()
+
+//--------------------------------------------------------------------------------
+// Helper to get the stringized form of typelib guid.
+HRESULT GetStringizedTypeLibGuidForAssembly(Assembly *pAssembly, CQuickArray<BYTE> &rDef, ULONG cbCur, ULONG *pcbFetched)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pAssembly));
+ PRECONDITION(CheckPointer(pcbFetched));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK; // A result.
+ LPCUTF8 pszName = NULL; // Library name in UTF8.
+ ULONG cbName; // Length of name, UTF8 characters.
+ LPWSTR pName; // Pointer to library name.
+ ULONG cchName; // Length of name, wide chars.
+ LPWSTR pch=0; // Pointer into lib name.
+ const void *pSN=NULL; // Pointer to public key.
+ DWORD cbSN=0; // Size of public key.
+ USHORT usMajorVersion; // The major version number.
+ USHORT usMinorVersion; // The minor version number.
+ USHORT usBuildNumber; // The build number.
+ USHORT usRevisionNumber; // The revision number.
+ const BYTE *pbData = NULL; // Pointer to a custom attribute data.
+ ULONG cbData = 0; // Size of custom attribute data.
+ static char szTypeLibKeyName[] = {"TypeLib"};
+
+ // Get the name, and determine its length.
+ pszName = pAssembly->GetSimpleName();
+ cbName=(ULONG)strlen(pszName);
+ cchName = WszMultiByteToWideChar(CP_ACP,0, pszName,cbName+1, 0,0);
+
+ // See if there is a public key.
+ EX_TRY
+ {
+ pSN = pAssembly->GetPublicKey(&cbSN);
+ }
+ EX_CATCH
+ {
+ IfFailGo(COR_E_BADIMAGEFORMAT);
+ }
+ EX_END_CATCH(RethrowTerminalExceptions)
+
+
+#ifdef FEATURE_COMINTEROP
+ if (pAssembly->IsWinMD())
+ {
+ // ignore classic COM interop CA on .winmd
+ hr = S_FALSE;
+ }
+ else
+ {
+ // If the ComCompatibleVersionAttribute is set, then use the version
+ // number in the attribute when generating the GUID.
+ IfFailGo(pAssembly->GetManifestImport()->GetCustomAttributeByName(TokenFromRid(1, mdtAssembly), INTEROP_COMCOMPATIBLEVERSION_TYPE, (const void**)&pbData, &cbData));
+ }
+
+ if (hr == S_OK && cbData >= (2 + 4 * sizeof(INT32)))
+ {
+ CustomAttributeParser cap(pbData, cbData);
+ IfFailRet(cap.SkipProlog());
+
+ // Retrieve the major and minor version from the attribute.
+ UINT32 u4;
+
+ IfFailRet(cap.GetU4(&u4));
+ usMajorVersion = GET_VERSION_USHORT_FROM_INT(u4);
+ IfFailRet(cap.GetU4(&u4));
+ usMinorVersion = GET_VERSION_USHORT_FROM_INT(u4);
+ IfFailRet(cap.GetU4(&u4));
+ usBuildNumber = GET_VERSION_USHORT_FROM_INT(u4);
+ IfFailRet(cap.GetU4(&u4));
+ usRevisionNumber = GET_VERSION_USHORT_FROM_INT(u4);
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ pAssembly->GetVersion(&usMajorVersion, &usMinorVersion, &usBuildNumber, &usRevisionNumber);
+ }
+
+ // Get the version information.
+ struct versioninfo
+ {
+ USHORT usMajorVersion; // Major Version.
+ USHORT usMinorVersion; // Minor Version.
+ USHORT usBuildNumber; // Build Number.
+ USHORT usRevisionNumber; // Revision Number.
+ } ver;
+
+ // <REVISIT_TODO> An issue here is that usMajor is used twice and usMinor not at all.
+ // We're not fixing that because everyone has a major version, so all the
+ // generated guids would change, which is breaking. To compensate, if
+ // the minor is non-zero, we add it separately, below.</REVISIT_TODO>
+ ver.usMajorVersion = usMajorVersion;
+ ver.usMinorVersion = usMajorVersion; // Don't fix this line!
+ ver.usBuildNumber = usBuildNumber;
+ ver.usRevisionNumber = usRevisionNumber;
+
+ // Resize the output buffer.
+ IfFailGo(rDef.ReSizeNoThrow(cbCur + cchName*sizeof(WCHAR) + sizeof(szTypeLibKeyName)-1 + cbSN + sizeof(ver)+sizeof(USHORT)));
+
+ // Put it all together. Name first.
+ WszMultiByteToWideChar(CP_ACP,0, pszName,cbName+1, (LPWSTR)(&rDef[cbCur]),cchName);
+ pName = (LPWSTR)(&rDef[cbCur]);
+ for (pch=pName; *pch; ++pch)
+ if (*pch == '.' || *pch == ' ')
+ *pch = '_';
+ else
+ if (iswupper(*pch))
+ *pch = towlower(*pch);
+ cbCur += (cchName-1)*sizeof(WCHAR);
+ memcpy(&rDef[cbCur], szTypeLibKeyName, sizeof(szTypeLibKeyName)-1);
+ cbCur += sizeof(szTypeLibKeyName)-1;
+
+ // Version.
+ memcpy(&rDef[cbCur], &ver, sizeof(ver));
+ cbCur += sizeof(ver);
+
+ // If minor version is non-zero, add it to the hash. It should have been in the ver struct,
+ // but due to a bug, it was omitted there, and fixing it "right" would have been
+ // breaking. So if it isn't zero, add it; if it is zero, don't add it. Any
+ // possible value of minor thus generates a different guid, and a value of 0 still generates
+ // the guid that the original, buggy, code generated.
+ if (usMinorVersion != 0)
+ {
+ SET_UNALIGNED_16(&rDef[cbCur], usMinorVersion);
+ cbCur += sizeof(USHORT);
+ }
+
+ // Public key.
+ memcpy(&rDef[cbCur], pSN, cbSN);
+ cbCur += cbSN;
+
+ if (pcbFetched)
+ *pcbFetched = cbCur;
+
+ErrExit:
+ return hr;
+}
+
+void SafeRelease_OnException(IUnknown* pUnk, RCW* pRCW
+#ifdef MDA_SUPPORTED
+ , MdaReportAvOnComRelease* pProbe
+#endif // MDA_SUPPORTED
+ )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifndef CROSSGEN_COMPILE
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return;)
+
+#ifdef MDA_SUPPORTED
+ // Report the exception that was thrown.
+ if (pProbe)
+ pProbe->ReportHandledException(pRCW);
+#endif // MDA_SUPPORTED
+
+#ifdef FEATURE_COMINTEROP
+ LogInterop(W("An exception occured during release"));
+ LogInteropLeak(pUnk);
+#endif // FEATURE_COMINTEROP
+
+ END_SO_INTOLERANT_CODE;
+#endif // CROSSGEN_COMPILE
+}
+
+#include <optsmallperfcritical.h>
+//--------------------------------------------------------------------------------
+// Release helper, must be called in preemptive mode. Only use this variant if
+// you already know you're in preemptive mode for other reasons.
+ULONG SafeReleasePreemp(IUnknown * pUnk, RCW * pRCW)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pUnk, NULL_OK));
+ } CONTRACTL_END;
+
+ if (pUnk == NULL)
+ return 0;
+
+ ULONG res = 0;
+ Thread * const pThread = GetThreadNULLOk();
+
+ // Message pump could happen, so arbitrary managed code could run.
+ CONTRACT_VIOLATION(ThrowsViolation | FaultViolation);
+
+#ifdef MDA_SUPPORTED
+ // Mode where we just let the fault occur.
+ MdaReportAvOnComRelease* pProbe = MDA_GET_ASSISTANT_EX(ReportAvOnComRelease);
+ if (pProbe && pProbe->AllowAV())
+ {
+ LeaveRuntimeHolderNoThrow lrh(*((*(size_t**)pUnk)+2));
+ if (FAILED(lrh.GetHR()))
+ return -1;
+ return pUnk->Release();
+ }
+#endif // MDA_SUPPORTED
+
+ bool fException = false;
+
+ SCAN_EHMARKER();
+ PAL_CPP_TRY
+ {
+ SCAN_EHMARKER_TRY();
+ // This is a holder to tell the contract system that we're catching all exceptions.
+ CLR_TRY_MARKER();
+
+ // Its very possible that the punk has gone bad before we could release it. This is a common application
+ // error. We may AV trying to call Release, and that AV will show up as an AV in mscorwks, so we'll take
+ // down the Runtime. Mark that an AV is alright, and handled, in this scope using this holder.
+ AVInRuntimeImplOkayHolder AVOkay(pThread);
+
+ if (CLRTaskHosted()) // Check hoisted out of LeaveRuntimeHolder to
+ { // keep LeaveRuntimeHolder off of common path.
+ LeaveRuntimeHolder lrh(*((*(size_t**)pUnk)+2));
+ res = pUnk->Release();
+ }
+ else
+ {
+ res = pUnk->Release();
+ }
+ SCAN_EHMARKER_END_TRY();
+ }
+ PAL_CPP_CATCH_ALL
+ {
+ SCAN_EHMARKER_CATCH();
+#if defined(STACK_GUARDS_DEBUG)
+ // Catching and just swallowing an exception means we need to tell
+ // the SO code that it should go back to normal operation, as it
+ // currently thinks that the exception is still on the fly.
+ pThread->GetCurrentStackGuard()->RestoreCurrentGuard();
+#endif
+ fException = true;
+ SCAN_EHMARKER_END_CATCH();
+ }
+ PAL_CPP_ENDTRY;
+
+ if (fException)
+ {
+ SafeRelease_OnException(pUnk, pRCW
+#ifdef MDA_SUPPORTED
+ , pProbe
+#endif // MDA_SUPPORTED
+ );
+ }
+
+ return res;
+}
+
+#ifdef _TARGET_AMD64_
+// codegen bug on amd64 causes BBT to fail for the following function. as a
+// workaround I have disabled optimizations for it until we get an updated toolset.
+#pragma optimize( "", off )
+#endif
+//--------------------------------------------------------------------------------
+// Release helper, enables and disables GC during call-outs
+ULONG SafeRelease(IUnknown* pUnk, RCW* pRCW)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pUnk, NULL_OK));
+ } CONTRACTL_END;
+
+ if (pUnk == NULL)
+ return 0;
+
+ ULONG res = 0;
+ Thread * const pThread = GetThreadNULLOk();
+ GCX_PREEMP_NO_DTOR_HAVE_THREAD(pThread);
+
+ // Message pump could happen, so arbitrary managed code could run.
+ CONTRACT_VIOLATION(ThrowsViolation | FaultViolation);
+
+#ifdef MDA_SUPPORTED
+ // Mode where we just let the fault occur.
+ MdaReportAvOnComRelease* pProbe = MDA_GET_ASSISTANT_EX(ReportAvOnComRelease);
+ if (pProbe && pProbe->AllowAV())
+ {
+ LeaveRuntimeHolderNoThrow lrh(*((*(size_t**)pUnk)+2));
+ if (FAILED(lrh.GetHR()))
+ return -1;
+ return pUnk->Release();
+ }
+#endif // MDA_SUPPORTED
+
+ bool fException = false;
+
+ SCAN_EHMARKER();
+ PAL_CPP_TRY
+ {
+ SCAN_EHMARKER_TRY();
+ // This is a holder to tell the contract system that we're catching all exceptions.
+ CLR_TRY_MARKER();
+
+ // Its very possible that the punk has gone bad before we could release it. This is a common application
+ // error. We may AV trying to call Release, and that AV will show up as an AV in mscorwks, so we'll take
+ // down the Runtime. Mark that an AV is alright, and handled, in this scope using this holder.
+ AVInRuntimeImplOkayHolder AVOkay(pThread);
+
+ if (CLRTaskHosted()) // Check hoisted out of LeaveRuntimeHolder to
+ { // keep LeaveRuntimeHolder off of common path.
+ LeaveRuntimeHolder lrh(*((*(size_t**)pUnk)+2));
+ res = pUnk->Release();
+ }
+ else
+ {
+ res = pUnk->Release();
+ }
+ SCAN_EHMARKER_END_TRY();
+ }
+ PAL_CPP_CATCH_ALL
+ {
+ SCAN_EHMARKER_CATCH();
+#if defined(STACK_GUARDS_DEBUG)
+ // Catching and just swallowing an exception means we need to tell
+ // the SO code that it should go back to normal operation, as it
+ // currently thinks that the exception is still on the fly.
+ pThread->GetCurrentStackGuard()->RestoreCurrentGuard();
+#endif
+ fException = true;
+ SCAN_EHMARKER_END_CATCH();
+ }
+ PAL_CPP_ENDTRY;
+
+ if (fException)
+ {
+ SafeRelease_OnException(pUnk, pRCW
+#ifdef MDA_SUPPORTED
+ , pProbe
+#endif // MDA_SUPPORTED
+ );
+ }
+
+ GCX_PREEMP_NO_DTOR_END();
+
+ return res;
+}
+#ifdef _TARGET_AMD64_
+// turn optimizations back on
+#pragma optimize( "", on )
+#endif
+
+#include <optdefault.h>
+
+//--------------------------------------------------------------------------------
+// Determines if a COM object can be cast to the specified type.
+BOOL CanCastComObject(OBJECTREF obj, MethodTable * pTargetMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (!obj)
+ return TRUE;
+
+ if (pTargetMT->IsInterface())
+ {
+ return Object::SupportsInterface(obj, pTargetMT);
+ }
+ else
+ {
+ return obj->GetTrueMethodTable()->CanCastToClass(pTargetMT);
+ }
+}
+
+VOID
+ReadBestFitCustomAttribute(MethodDesc* pMD, BOOL* BestFit, BOOL* ThrowOnUnmappableChar)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ReadBestFitCustomAttribute(pMD->GetMDImport(),
+ pMD->GetMethodTable()->GetCl(),
+ BestFit, ThrowOnUnmappableChar);
+}
+
+VOID
+ReadBestFitCustomAttribute(IMDInternalImport* pInternalImport, mdTypeDef cl, BOOL* BestFit, BOOL* ThrowOnUnmappableChar)
+{
+ // Set the attributes to their defaults, just to be safe.
+ *BestFit = TRUE;
+ *ThrowOnUnmappableChar = FALSE;
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pInternalImport));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ BYTE* pData;
+ ULONG cbCount;
+
+ // A well-formed BestFitMapping attribute will have at least 5 bytes
+ // 1,2 for the prolog (should be 0x1, 0x0)
+ // 3 for the BestFitMapping bool
+ // 4,5 for the number of named parameters (will be 0 if ThrowOnUnmappableChar doesn't exist)
+ // 6 - 29 for the description of ThrowOnUnmappableChar
+ // 30 for the ThrowOnUnmappableChar bool
+
+ // Try the assembly first
+ hr = pInternalImport->GetCustomAttributeByName(TokenFromRid(1, mdtAssembly), INTEROP_BESTFITMAPPING_TYPE, (const VOID**)(&pData), &cbCount);
+ if ((hr == S_OK) && (pData) && (cbCount > 4) && (pData[0] == 1) && (pData[1] == 0))
+ {
+ _ASSERTE((cbCount == 5) || (cbCount == 30));
+
+ // index to 2 to skip prolog
+ *BestFit = pData[2] != 0;
+
+ // If this parameter exists,
+ if (cbCount == 30)
+ // index to end of data to skip description of named argument
+ *ThrowOnUnmappableChar = pData[29] != 0;
+ }
+
+ // Now try the interface/class/struct
+ if (IsNilToken(cl))
+ return;
+ hr = pInternalImport->GetCustomAttributeByName(cl, INTEROP_BESTFITMAPPING_TYPE, (const VOID**)(&pData), &cbCount);
+ if ((hr == S_OK) && (pData) && (cbCount > 4) && (pData[0] == 1) && (pData[1] == 0))
+ {
+ _ASSERTE((cbCount == 5) || (cbCount == 30));
+
+ // index to 2 to skip prolog
+ *BestFit = pData[2] != 0;
+
+ // If this parameter exists,
+ if (cbCount == 30)
+ // index to end of data to skip description of named argument
+ *ThrowOnUnmappableChar = pData[29] != 0;
+ }
+}
+
+
+int InternalWideToAnsi(__in_ecount(iNumWideChars) LPCWSTR szWideString, int iNumWideChars, __out_ecount_opt(cbAnsiBufferSize) LPSTR szAnsiString, int cbAnsiBufferSize, BOOL fBestFit, BOOL fThrowOnUnmappableChar)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+
+ if ((szWideString == 0) || (iNumWideChars == 0) || (szAnsiString == 0) || (cbAnsiBufferSize == 0))
+ return 0;
+
+ DWORD flags = 0;
+ int retval;
+
+ if (fBestFit == FALSE)
+ flags = WC_NO_BEST_FIT_CHARS;
+
+ if (fThrowOnUnmappableChar)
+ {
+ BOOL DefaultCharUsed = FALSE;
+ retval = WszWideCharToMultiByte(CP_ACP,
+ flags,
+ szWideString,
+ iNumWideChars,
+ szAnsiString,
+ cbAnsiBufferSize,
+ NULL,
+ &DefaultCharUsed);
+ DWORD lastError = GetLastError();
+
+ if (retval == 0)
+ {
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+ COMPlusThrowHR(HRESULT_FROM_WIN32(lastError));
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+ }
+
+ if (DefaultCharUsed)
+ {
+ struct HelperThrow
+ {
+ static void Throw()
+ {
+ COMPlusThrow( kArgumentException, IDS_EE_MARSHAL_UNMAPPABLE_CHAR );
+ }
+ };
+
+ ENCLOSE_IN_EXCEPTION_HANDLER( HelperThrow::Throw );
+ }
+
+ }
+ else
+ {
+ retval = WszWideCharToMultiByte(CP_ACP,
+ flags,
+ szWideString,
+ iNumWideChars,
+ szAnsiString,
+ cbAnsiBufferSize,
+ NULL,
+ NULL);
+ DWORD lastError = GetLastError();
+
+ if (retval == 0)
+ {
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+ COMPlusThrowHR(HRESULT_FROM_WIN32(lastError));
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+ }
+ }
+
+ return retval;
+}
+
+//---------------------------------------------------------
+// Read the ClassInterfaceType custom attribute info from
+// both assembly level and interface level
+//---------------------------------------------------------
+CorClassIfaceAttr ReadClassInterfaceTypeCustomAttribute(TypeHandle type)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(!type.IsInterface());
+ }
+ CONTRACTL_END
+
+ const BYTE *pVal;
+ ULONG cbVal;
+
+ if (!type.GetMethodTable()->IsWinRTObjectType() && !type.GetMethodTable()->IsExportedToWinRT()) // ignore classic COM interop CA on WinRT types
+ {
+ // First look for the class interface attribute at the class level.
+ HRESULT hr = type.GetMethodTable()->GetMDImport()->GetCustomAttributeByName(type.GetCl(), INTEROP_CLASSINTERFACE_TYPE, (const void**)&pVal, &cbVal);
+ if (hr == S_OK)
+ {
+ CustomAttributeParser cap(pVal, cbVal);
+ U1 u1;
+
+ if (FAILED(cap.ValidateProlog()))
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_CLASS_INT_CA_FORMAT);
+ }
+ if (FAILED(cap.GetU1(&u1)))
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_CLASS_INT_CA_FORMAT);
+ }
+ if ((CorClassIfaceAttr)(u1) < clsIfLast)
+ {
+ return (CorClassIfaceAttr)(u1);
+ }
+ }
+
+ // If we haven't found the class interface attribute at the class level then look at the
+ // assembly level.
+ Assembly *pAssembly = type.GetAssembly();
+ hr = pAssembly->GetManifestImport()->GetCustomAttributeByName(pAssembly->GetManifestToken(), INTEROP_CLASSINTERFACE_TYPE, (const void**)&pVal, &cbVal);
+ IfFailThrow(hr);
+ if (hr == S_OK)
+ {
+ CustomAttributeParser cap(pVal, cbVal);
+ U1 u1;
+
+ if (FAILED(cap.ValidateProlog()))
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_CLASS_INT_CA_FORMAT);
+ }
+ if (FAILED(cap.GetU1(&u1)))
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_CLASS_INT_CA_FORMAT);
+ }
+ if ((CorClassIfaceAttr)(u1) < clsIfLast)
+ {
+ return (CorClassIfaceAttr)(u1);
+ }
+ }
+ }
+
+ return DEFAULT_CLASS_INTERFACE_TYPE;
+}
+
+//--------------------------------------------------------------------------------
+// GetErrorInfo helper, enables and disables GC during call-outs
+HRESULT SafeGetErrorInfo(IErrorInfo **ppIErrInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(ppIErrInfo));
+ }
+ CONTRACTL_END;
+
+ *ppIErrInfo = NULL;
+
+#ifdef FEATURE_COMINTEROP
+ GCX_PREEMP();
+
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ LeaveRuntimeHolder lrh((size_t)GetErrorInfo);
+ hr = GetErrorInfo(0, ppIErrInfo);
+ }
+ EX_CATCH
+ {
+ hr = E_OUTOFMEMORY;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return hr;
+#else // FEATURE_COMINTEROP
+ // Indicate no error object
+ return S_FALSE;
+#endif
+}
+
+HRESULT SafeQueryInterfaceHosted(IUnknown* pUnk, REFIID riid, IUnknown** pResUnk)
+{
+ CONTRACTL
+ {
+ THROWS; // message pump could happen, so arbitrary managed code could run
+ GC_TRIGGERS;
+ DISABLED(MODE_PREEMPTIVE); // disabled because I couldn't figure out how to tell SCAN about my
+ // manual mode change in SafeQueryInterface
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ LeaveRuntimeHolder lrh(*((*(size_t**)pUnk)+0));
+ return pUnk->QueryInterface(riid, (void**) pResUnk);
+}
+
+
+#include <optsmallperfcritical.h>
+//--------------------------------------------------------------------------------
+// QI helper, enables and disables GC during call-outs
+HRESULT SafeQueryInterface(IUnknown* pUnk, REFIID riid, IUnknown** pResUnk)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_SO_TOLERANT;
+ _ASSERTE(pUnk);
+ _ASSERTE(pResUnk);
+
+ Thread * const pThread = GetThreadNULLOk();
+
+ *pResUnk = NULL;
+ HRESULT hr = E_FAIL;
+
+ GCX_PREEMP_NO_DTOR_HAVE_THREAD(pThread);
+
+ BEGIN_CONTRACT_VIOLATION(ThrowsViolation); // message pump could happen, so arbitrary managed code could run
+ BEGIN_SO_TOLERANT_CODE(pThread);
+
+ struct Param { HRESULT * const hr; IUnknown** const pUnk; REFIID riid; IUnknown*** const pResUnk; } param = { &hr, &pUnk, riid, &pResUnk };
+#define PAL_TRY_ARG(argName) (*(pParam->argName))
+#define PAL_TRY_REFARG(argName) (pParam->argName)
+ PAL_TRY(Param * const, pParam, &param)
+ {
+ if (CLRTaskHosted())
+ {
+ PAL_TRY_ARG(hr) = SafeQueryInterfaceHosted(PAL_TRY_ARG(pUnk), PAL_TRY_REFARG(riid), PAL_TRY_ARG(pResUnk));
+ }
+ else
+ {
+ PAL_TRY_ARG(hr) = PAL_TRY_ARG(pUnk)->QueryInterface(PAL_TRY_REFARG(riid), (void**) PAL_TRY_ARG(pResUnk));
+ }
+ }
+ PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ {
+#if defined(STACK_GUARDS_DEBUG)
+ // Catching and just swallowing an exception means we need to tell
+ // the SO code that it should go back to normal operation, as it
+ // currently thinks that the exception is still on the fly.
+ GetThread()->GetCurrentStackGuard()->RestoreCurrentGuard();
+#endif
+ }
+ PAL_ENDTRY;
+#undef PAL_TRY_ARG
+#undef PAL_TRY_REFARG
+
+ END_SO_TOLERANT_CODE;
+ END_CONTRACT_VIOLATION;
+
+ LOG((LF_INTEROP, LL_EVERYTHING, hr == S_OK ? "QI Succeeded\n" : "QI Failed\n"));
+
+ // Ensure if the QI returned ok that it actually set a pointer.
+ if (hr == S_OK)
+ {
+ if (*pResUnk == NULL)
+ hr = E_NOINTERFACE;
+ }
+
+ GCX_PREEMP_NO_DTOR_END();
+
+ return hr;
+}
+
+
+//--------------------------------------------------------------------------------
+// QI helper, must be called in preemptive mode. Faster than the MODE_ANY version
+// because it doesn't need to toggle the mode. Use this version only if you already
+// know that you're in preemptive mode for other reasons.
+HRESULT SafeQueryInterfacePreemp(IUnknown* pUnk, REFIID riid, IUnknown** pResUnk)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_PREEMPTIVE;
+ STATIC_CONTRACT_SO_TOLERANT;
+ _ASSERTE(pUnk);
+ _ASSERTE(pResUnk);
+
+ Thread * const pThread = GetThreadNULLOk();
+
+ *pResUnk = NULL;
+ HRESULT hr = E_FAIL;
+
+ BEGIN_CONTRACT_VIOLATION(ThrowsViolation); // message pump could happen, so arbitrary managed code could run
+ BEGIN_SO_TOLERANT_CODE(pThread);
+
+ struct Param { HRESULT * const hr; IUnknown** const pUnk; REFIID riid; IUnknown*** const pResUnk; } param = { &hr, &pUnk, riid, &pResUnk };
+#define PAL_TRY_ARG(argName) (*(pParam->argName))
+#define PAL_TRY_REFARG(argName) (pParam->argName)
+ PAL_TRY(Param * const, pParam, &param)
+ {
+ if (CLRTaskHosted())
+ {
+ PAL_TRY_ARG(hr) = SafeQueryInterfaceHosted(PAL_TRY_ARG(pUnk), PAL_TRY_REFARG(riid), PAL_TRY_ARG(pResUnk));
+ }
+ else
+ {
+ PAL_TRY_ARG(hr) = PAL_TRY_ARG(pUnk)->QueryInterface(PAL_TRY_REFARG(riid), (void**) PAL_TRY_ARG(pResUnk));
+ }
+ }
+ PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ {
+#if defined(STACK_GUARDS_DEBUG)
+ // Catching and just swallowing an exception means we need to tell
+ // the SO code that it should go back to normal operation, as it
+ // currently thinks that the exception is still on the fly.
+ GetThread()->GetCurrentStackGuard()->RestoreCurrentGuard();
+#endif
+ }
+ PAL_ENDTRY;
+#undef PAL_TRY_ARG
+#undef PAL_TRY_REFARG
+
+ END_SO_TOLERANT_CODE;
+ END_CONTRACT_VIOLATION;
+
+
+ LOG((LF_INTEROP, LL_EVERYTHING, hr == S_OK ? "QI Succeeded\n" : "QI Failed\n"));
+
+ // Ensure if the QI returned ok that it actually set a pointer.
+ if (hr == S_OK)
+ {
+ if (*pResUnk == NULL)
+ hr = E_NOINTERFACE;
+ }
+
+ return hr;
+}
+#include <optdefault.h>
+
+#ifdef FEATURE_COMINTEROP
+
+// process unique GUID, every process has a unique GUID
+static Volatile<BSTR> bstrProcessGUID = NULL;
+
+// Global process GUID to identify the process
+BSTR GetProcessGUID()
+{
+ CONTRACT (BSTR)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ // See if we were beaten to it.
+ if (bstrProcessGUID.Load() == NULL)
+ {
+ // setup a process unique GUID
+ GUID processGUID = GUID_NULL;
+ HRESULT hr = CoCreateGuid(&processGUID);
+ _ASSERTE(hr == S_OK);
+ if (hr != S_OK)
+ RETURN NULL;
+
+ // This is a global memory alloc that will live as long as the process.
+ NewArrayHolder<WCHAR> guidstr = new (nothrow) WCHAR[48];
+ if (!guidstr)
+ RETURN NULL;
+
+ int cbLen = GuidToLPWSTR (processGUID, guidstr, 46);
+ _ASSERTE(cbLen <= 46);
+
+ // Save this new stub on the DelegateEEClass.
+ if (FastInterlockCompareExchangePointer(bstrProcessGUID.GetPointer(), guidstr.GetValue(), NULL ) == NULL)
+ {
+ guidstr.SuppressRelease();
+ }
+
+ }
+
+ RETURN bstrProcessGUID;
+}
+
+
+#ifndef CROSSGEN_COMPILE
+
+//--------------------------------------------------------------------------------
+// Cleanup helpers
+//--------------------------------------------------------------------------------
+void MinorCleanupSyncBlockComData(InteropSyncBlockInfo* pInteropInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION( GCHeap::IsGCInProgress() || ( (g_fEEShutDown & ShutDown_SyncBlock) && g_fProcessDetach ) );
+ }
+ CONTRACTL_END;
+
+ // No need to notify the thread that the RCW is in use here.
+ // This is a privileged function called during GC or shutdown.
+ RCW* pRCW = pInteropInfo->GetRawRCW();
+ if (pRCW)
+ pRCW->MinorCleanup();
+}
+
+void CleanupSyncBlockComData(InteropSyncBlockInfo* pInteropInfo)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if ((g_fEEShutDown & ShutDown_SyncBlock) && g_fProcessDetach )
+ MinorCleanupSyncBlockComData(pInteropInfo);
+
+#ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+ ComClassFactory* pComClassFactory = pInteropInfo->GetComClassFactory();
+ if (pComClassFactory)
+ {
+ delete pComClassFactory;
+ pInteropInfo->SetComClassFactory(NULL);
+ }
+#endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+
+ // No need to notify the thread that the RCW is in use here.
+ // This is only called during finalization of a __ComObject so no one
+ // else could have a reference to this object.
+ RCW* pRCW = pInteropInfo->GetRawRCW();
+ if (pRCW)
+ {
+ pInteropInfo->SetRawRCW(NULL);
+ pRCW->Cleanup();
+ }
+
+ ComCallWrapper* pCCW = pInteropInfo->GetCCW();
+ if (pCCW)
+ {
+ pInteropInfo->SetCCW(NULL);
+ pCCW->Cleanup();
+ }
+}
+
+void ReleaseRCWsInCachesNoThrow(LPVOID pCtxCookie)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCtxCookie, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ EX_TRY
+ {
+ ReleaseRCWsInCaches(pCtxCookie);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+//--------------------------------------------------------------------------------
+// Helper to release all of the RCWs in the specified context across all caches.
+// If pCtxCookie is NULL, release all RCWs
+void ReleaseRCWsInCaches(LPVOID pCtxCookie)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCtxCookie, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // Go through all the app domains and for each one release all the
+ // RCW's that live in the current context.
+ AppDomainIterator i(TRUE);
+ while (i.Next())
+ i.GetDomain()->ReleaseRCWs(pCtxCookie);
+
+ if (!g_fEEShutDown)
+ {
+ GCX_COOP();
+
+ // If the finalizer thread has sync blocks to clean up or if it is in the process
+ // of cleaning up the sync blocks, we need to wait for it to finish.
+ if (FinalizerThread::GetFinalizerThread()->RequireSyncBlockCleanup() || SyncBlockCache::GetSyncBlockCache()->IsSyncBlockCleanupInProgress())
+ FinalizerThread::FinalizerThreadWait();
+
+ // If more sync blocks were added while the finalizer thread was calling the finalizers
+ // or while it was transitioning into a context to clean up the IP's, we need to wake
+ // it up again to have it clean up the newly added sync blocks.
+ if (FinalizerThread::GetFinalizerThread()->RequireSyncBlockCleanup() || SyncBlockCache::GetSyncBlockCache()->IsSyncBlockCleanupInProgress())
+ FinalizerThread::FinalizerThreadWait();
+ }
+}
+
+// Out-of-line to keep SafeComHolder's required C++ prolog/epilog out of GetCCWfromIUnknown
+NOINLINE ComCallWrapper* GetCCWFromIUnknown_CrossDomain(IUnknown* pUnk, ComCallWrapper* pWrap, AppDomain* pDomain)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // We ignore PreferComInsteadOfManagedRemoting/ICustomQueryInterface if the CCW is from
+ // the current domain (we never create RCWs pointing to CCWs in the same domain).
+
+ if (pDomain && pDomain->GetPreferComInsteadOfManagedRemoting())
+ {
+ return NULL;
+ }
+
+ // If the user specifically does not support IManagedObject then don't do this bypass
+ if (pWrap && pWrap->GetSimpleWrapper() && pWrap->GetSimpleWrapper()->SupportsICustomQueryInterface())
+ {
+ SafeComHolder<IUnknown> pUnk = ComCallWrapper::GetComIPFromCCWNoThrow(
+ pWrap, IID_IManagedObject, NULL,
+ GetComIPFromCCW::CheckVisibility);
+ if(!pUnk)
+ {
+ // They bypassed QueryInterface so don't collapse this IUnknown to the underlying managed object
+ pWrap = NULL;
+ }
+ }
+ return pWrap;
+}
+
+//--------------------------------------------------------------------------------
+// Marshalling Helpers
+//--------------------------------------------------------------------------------
+
+
+// Convert an IUnknown to CCW, returns NULL if the pUnk is not on
+// a managed tear-off (OR) if the pUnk is to a managed tear-off that
+// has been aggregated
+ComCallWrapper* GetCCWFromIUnknown(IUnknown* pUnk, BOOL bEnableCustomization)
+{
+ CONTRACT (ComCallWrapper*)
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pUnk));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pUnk);
+ if (pWrap != NULL)
+ {
+ // check if this wrapper is aggregated
+ if (pWrap->GetOuter() != NULL)
+ {
+ pWrap = NULL;
+ }
+
+ // Only check for an interface if caller set bEnableCustomization to TRUE
+ if (bEnableCustomization)
+ {
+ AppDomain *pDomain = GetAppDomain();
+ if (pDomain == NULL || pWrap == NULL || pDomain->GetId() != pWrap->GetDomainID())
+ {
+ pWrap = GetCCWFromIUnknown_CrossDomain(pUnk, pWrap, pDomain);
+ }
+ }
+ }
+
+ RETURN pWrap;
+}
+
+
+HRESULT LoadRegTypeLibWithFlags(REFGUID guid,
+ unsigned short wVerMajor,
+ unsigned short wVerMinor,
+ int flags,
+ ITypeLib** pptlib)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ *pptlib = NULL;
+
+ GCX_PREEMP();
+
+ BSTRHolder wzPath;
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ LeaveRuntimeHolder lrh((size_t)QueryPathOfRegTypeLib);
+ hr = QueryPathOfRegTypeLib(guid, wVerMajor, wVerMinor, LOCALE_USER_DEFAULT, &wzPath);
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (FAILED(hr))
+ return hr;
+
+ hr = LoadTypeLibExWithFlags(wzPath, flags, pptlib);
+
+ return hr;
+}
+
+
+HRESULT LoadTypeLibExWithFlags(LPCOLESTR szFile,
+ int flags,
+ ITypeLib** pptlib)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_COMINTEROP_TLB_SUPPORT
+ return E_FAIL;
+#else //FEATURE_COMINTEROP_TLB_SUPPORT
+
+ *pptlib = NULL;
+
+ GCX_PREEMP();
+
+ REGKIND rk = REGKIND_NONE;
+
+ if ((flags & TlbExporter_ExportAs64Bit) == TlbExporter_ExportAs64Bit)
+ {
+ rk = (REGKIND)(REGKIND_NONE | LOAD_TLB_AS_64BIT);
+ }
+ else if ((flags & TlbExporter_ExportAs32Bit) == TlbExporter_ExportAs32Bit)
+ {
+ rk = (REGKIND)(REGKIND_NONE | LOAD_TLB_AS_32BIT);
+ }
+
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ LeaveRuntimeHolder lrh((size_t)LoadTypeLibEx);
+
+ hr = LoadTypeLibEx(szFile, rk, pptlib);
+
+ // If we fail with E_INVALIDARG, it's probably because we're on a downlevel
+ // platform that doesn't support loading type libraries by bitness.
+ if (hr == E_INVALIDARG)
+ {
+ hr = LoadTypeLibEx(szFile, REGKIND_NONE, pptlib);
+ }
+ }
+ EX_CATCH
+ {
+ hr = E_OUTOFMEMORY;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return hr;
+#endif //FEATURE_COMINTEROP_TLB_SUPPORT
+}
+
+
+
+// HRESULT for CLR created IErrorInfo pointers are accessible
+// from the enclosing simple wrapper
+// This is in-proc only.
+HRESULT GetHRFromCLRErrorInfo(IErrorInfo* pErr)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pErr));
+ PRECONDITION(IsInProcCCWTearOff(pErr));
+ PRECONDITION(IsSimpleTearOff(pErr));
+ }
+ CONTRACTL_END;
+
+ SimpleComCallWrapper* pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pErr);
+ return pSimpleWrap->IErrorInfo_hr();
+}
+
+VOID EnsureComStarted(BOOL fCoInitCurrentThread)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(GetThread() || !fCoInitCurrentThread);
+ PRECONDITION(g_fEEStarted);
+ }
+ CONTRACTL_END;
+
+ if (g_fComStarted == FALSE)
+ {
+ FinalizerThread::GetFinalizerThread()->SetRequiresCoInitialize();
+
+ // Attempt to set the thread's apartment model (to MTA by default). May not
+ // succeed (if someone beat us to the punch). That doesn't matter (since
+ // COM+ objects are now apartment agile), we only care that a CoInitializeEx
+ // has been performed on this thread by us.
+ if (fCoInitCurrentThread)
+ GetThread()->SetApartment(Thread::AS_InMTA, FALSE);
+
+ // set the finalizer event
+ FinalizerThread::EnableFinalization();
+
+ g_fComStarted = TRUE;
+ }
+}
+
+HRESULT EnsureComStartedNoThrow(BOOL fCoInitCurrentThread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(g_fEEStarted);
+ PRECONDITION(GetThread() != NULL); // Should always be inside BEGIN_EXTERNAL_ENTRYPOINT
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ if (!g_fComStarted)
+ {
+ GCX_COOP();
+ EX_TRY
+ {
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+
+ EnsureComStarted(fCoInitCurrentThread);
+
+ END_SO_INTOLERANT_CODE;
+ }
+ EX_CATCH_HRESULT(hr);
+ }
+
+ return hr;
+}
+
+//--------------------------------------------------------------------------------
+// BOOL ExtendsComImport(MethodTable* pMT);
+// check if the class is OR extends a COM Imported class
+BOOL ExtendsComImport(MethodTable* pMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ while (pMT != NULL && !pMT->IsComImport())
+ {
+ pMT = pMT->GetParentMethodTable();
+ }
+ return pMT != NULL;
+}
+
+#ifdef FEATURE_CLASSIC_COMINTEROP
+//--------------------------------------------------------------------------------
+// Gets the CLSID from the specified Prog ID.
+
+HRESULT GetCLSIDFromProgID(__in_z WCHAR *strProgId, GUID *pGuid)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+#ifdef FEATURE_CORESYSTEM
+ LeaveRuntimeHolderNoThrow lrh((size_t)CLSIDFromProgID);
+#else
+ LeaveRuntimeHolderNoThrow lrh((size_t)CLSIDFromProgIDEx);
+#endif
+ hr = lrh.GetHR();
+ if (FAILED(hr))
+ return hr;
+
+#ifdef FEATURE_CORESYSTEM
+ return CLSIDFromProgID(strProgId, pGuid);
+#else
+ return CLSIDFromProgIDEx(strProgId, pGuid);
+#endif
+}
+#endif // FEATURE_CLASSIC_COMINTEROP
+
+NOINLINE ULONG SafeAddRefHosted(IUnknown* pUnk)
+{
+ CONTRACTL
+ {
+ THROWS; // arbitrary managed code could run
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ LeaveRuntimeHolderNoThrow lrh(*((*(size_t**)pUnk)+1));
+ if (FAILED(lrh.GetHR()))
+ return ~0;
+
+ return pUnk->AddRef();
+}
+
+#include <optsmallperfcritical.h>
+//--------------------------------------------------------------------------------
+// AddRef helper, enables and disables GC during call-outs
+ULONG SafeAddRef(IUnknown* pUnk)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ ULONG res = ~0;
+ if (pUnk == NULL)
+ return res;
+
+ GCX_PREEMP_NO_DTOR();
+
+ // @TODO: Consider special-casing this when we know it's one of ours so
+ // that we can avoid having to 'leave' and then 'enter'.
+
+ CONTRACT_VIOLATION(ThrowsViolation); // arbitrary managed code could run
+
+ if (CLRTaskHosted()) // Check hoisted out of LeaveRuntimeHolder to
+ { // keep LeaveRuntimeHolder off of common path.
+ res = SafeAddRefHosted(pUnk);
+ }
+ else
+ {
+ res = pUnk->AddRef();
+ }
+
+ GCX_PREEMP_NO_DTOR_END();
+
+ return res;
+}
+
+//--------------------------------------------------------------------------------
+// AddRef helper, must be called in preemptive mode. Only use this variant if
+// you already know you're in preemptive mode for other reasons.
+ULONG SafeAddRefPreemp(IUnknown* pUnk)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ ULONG res = ~0;
+ if (pUnk == NULL)
+ return res;
+
+ // @TODO: Consider special-casing this when we know it's one of ours so
+ // that we can avoid having to 'leave' and then 'enter'.
+
+ CONTRACT_VIOLATION(ThrowsViolation); // arbitrary managed code could run
+
+ if (CLRTaskHosted()) // Check hoisted out of LeaveRuntimeHolder to
+ { // keep LeaveRuntimeHolder off of common path.
+ res = SafeAddRefHosted(pUnk);
+ }
+ else
+ {
+ res = pUnk->AddRef();
+ }
+
+ return res;
+}
+#include <optdefault.h>
+
+//--------------------------------------------------------------------------------
+// Ole RPC seems to return an inconsistent SafeArray for arrays created with
+// SafeArrayVector(VT_BSTR). OleAut's SafeArrayGetVartype() doesn't notice
+// the inconsistency and returns a valid-seeming (but wrong vartype.)
+// Our version is more discriminating. This should only be used for
+// marshaling scenarios where we can assume unmanaged code permissions
+// (and hence are already in a position of trusting unmanaged data.)
+
+HRESULT ClrSafeArrayGetVartype(SAFEARRAY *psa, VARTYPE *pvt)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(psa));
+ PRECONDITION(CheckPointer(pvt));
+ }
+ CONTRACTL_END;
+
+ if (pvt == NULL || psa == NULL)
+ {
+ // with both args null this is what oleaut would return on call to SafeArrayGetVarType
+ return E_INVALIDARG;
+ }
+
+ USHORT fFeatures = psa->fFeatures;
+ USHORT hardwiredType = (fFeatures & (FADF_BSTR|FADF_UNKNOWN|FADF_DISPATCH|FADF_VARIANT));
+
+ if (hardwiredType == FADF_BSTR && psa->cbElements == sizeof(BSTR))
+ {
+ *pvt = VT_BSTR;
+ return S_OK;
+ }
+ else if (hardwiredType == FADF_UNKNOWN && psa->cbElements == sizeof(IUnknown*))
+ {
+ *pvt = VT_UNKNOWN;
+ return S_OK;
+ }
+ else if (hardwiredType == FADF_DISPATCH && psa->cbElements == sizeof(IDispatch*))
+ {
+ *pvt = VT_DISPATCH;
+ return S_OK;
+ }
+ else if (hardwiredType == FADF_VARIANT && psa->cbElements == sizeof(VARIANT))
+ {
+ *pvt = VT_VARIANT;
+ return S_OK;
+ }
+ else
+ {
+ _ASSERTE(GetModuleHandleA("oleaut32.dll") != NULL);
+ // We have got a SAFEARRAY. Oleaut32.dll should have been loaded.
+ CONTRACT_VIOLATION(ThrowsViolation);
+ return ::SafeArrayGetVartype(psa, pvt);
+ }
+}
+
+//--------------------------------------------------------------------------------
+// // safe VariantChangeType
+// Release helper, enables and disables GC during call-outs
+HRESULT SafeVariantChangeType(VARIANT* pVarRes, VARIANT* pVarSrc,
+ unsigned short wFlags, VARTYPE vt)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pVarRes));
+ PRECONDITION(CheckPointer(pVarSrc));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ if (pVarRes)
+ {
+ GCX_PREEMP();
+ EX_TRY
+ {
+ LeaveRuntimeHolderNoThrow lrh((size_t)VariantChangeType);
+ hr = lrh.GetHR();
+
+ if (!FAILED(hr))
+ hr = VariantChangeType(pVarRes, pVarSrc, wFlags, vt);
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+
+ return hr;
+}
+
+//--------------------------------------------------------------------------------
+HRESULT SafeVariantChangeTypeEx(VARIANT* pVarRes, VARIANT* pVarSrc,
+ LCID lcid, unsigned short wFlags, VARTYPE vt)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pVarRes));
+ PRECONDITION(CheckPointer(pVarSrc));
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+ _ASSERTE(GetModuleHandleA("oleaut32.dll") != NULL);
+ CONTRACT_VIOLATION(ThrowsViolation);
+
+ HRESULT hr = S_OK;
+ LeaveRuntimeHolderNoThrow lrh((size_t)VariantChangeTypeEx);
+ hr = lrh.GetHR();
+
+ if (!FAILED(hr))
+ hr = VariantChangeTypeEx (pVarRes, pVarSrc,lcid,wFlags,vt);
+
+ return hr;
+}
+
+//--------------------------------------------------------------------------------
+void SafeVariantInit(VARIANT* pVar)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pVar));
+ }
+ CONTRACTL_END;
+
+ // From the oa sources
+ V_VT(pVar) = VT_EMPTY;
+}
+
+//--------------------------------------------------------------------------------
+// void SafeReleaseStream(IStream *pStream)
+void SafeReleaseStream(IStream *pStream)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pStream));
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+
+ {
+ HRESULT hr = S_OK;
+
+ LeaveRuntimeHolderNoThrow lrh((size_t)CoReleaseMarshalData);
+ hr = lrh.GetHR();
+
+ if (!FAILED(hr))
+ {
+ hr = CoReleaseMarshalData(pStream);
+
+#ifdef _DEBUG
+ wchar_t logStr[200];
+ swprintf_s(logStr, NumItems(logStr), W("Object gone: CoReleaseMarshalData returned %x, file %s, line %d\n"), hr, __FILE__, __LINE__);
+ LogInterop(logStr);
+ if (hr != S_OK)
+ {
+ // Reset the stream to the begining
+ LARGE_INTEGER li;
+ LISet32(li, 0);
+ ULARGE_INTEGER li2;
+ pStream->Seek(li, STREAM_SEEK_SET, &li2);
+ hr = CoReleaseMarshalData(pStream);
+ swprintf_s(logStr, NumItems(logStr), W("Object gone: CoReleaseMarshalData returned %x, file %s, line %d\n"), hr, __FILE__, __LINE__);
+ LogInterop(logStr);
+ }
+#endif
+ }
+ }
+
+ ULONG cbRef = SafeReleasePreemp(pStream);
+ LogInteropRelease(pStream, cbRef, "Release marshal Stream");
+}
+
+//---------------------------------------------------------------------------
+// is the iid represent an IClassX for this class
+BOOL IsIClassX(MethodTable *pMT, REFIID riid, ComMethodTable **ppComMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(CheckPointer(ppComMT));
+ }
+ CONTRACTL_END;
+
+ // Walk up the hierarchy starting at the specified method table and compare
+ // the IID's of the IClassX's against the specified IID.
+ while (pMT != NULL)
+ {
+ ComCallWrapperTemplate *pTemplate = ComCallWrapperTemplate::GetTemplate(pMT);
+ if (pTemplate->SupportsIClassX())
+ {
+ ComMethodTable *pComMT =
+ ComCallWrapperTemplate::SetupComMethodTableForClass(pMT, FALSE);
+ _ASSERTE(pComMT);
+
+ if (IsEqualIID(riid, pComMT->GetIID()))
+ {
+ *ppComMT = pComMT;
+ return TRUE;
+ }
+ }
+
+ pMT = pMT->GetComPlusParentMethodTable();
+ }
+
+ return FALSE;
+}
+
+#endif //#ifndef CROSSGEN_COMPILE
+
+
+//---------------------------------------------------------------------------
+// Returns TRUE if we support IClassX (the auto-generated class interface)
+// for the given class.
+BOOL ClassSupportsIClassX(MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // WinRT delegates use IClassX
+ if (pMT->IsWinRTDelegate())
+ return TRUE;
+
+ if (pMT->IsWinRTObjectType() || pMT->IsExportedToWinRT())
+ {
+ // Other than that WinRT does not need IClassX so the goal is to return FALSE for
+ // anything that is guaranteed to not be a legacy classic COM interop scenario
+ return FALSE;
+ }
+
+ // If the class is decorated with an explicit ClassInterfaceAttribute, we're going to say yes.
+ if (S_OK == pMT->GetMDImport()->GetCustomAttributeByName(pMT->GetCl(), INTEROP_CLASSINTERFACE_TYPE, NULL, NULL))
+ return TRUE;
+
+ MethodTable::InterfaceMapIterator it = pMT->IterateInterfaceMap();
+ while (it.Next())
+ {
+ MethodTable *pItfMT = it.GetInterfaceInfo()->GetApproxMethodTable(pMT->GetLoaderModule());
+ if (pItfMT->IsProjectedFromWinRT())
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+
+#ifndef CROSSGEN_COMPILE
+
+#ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+//---------------------------------------------------------------------------
+// OBJECTREF AllocateComObject_ForManaged(MethodTable* pMT)
+OBJECTREF AllocateComObject_ForManaged(MethodTable* pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(pMT->IsComObjectType());
+ PRECONDITION(!pMT->IsProjectedFromWinRT());
+ }
+ CONTRACTL_END;
+
+ // Calls to COM up ahead.
+ HRESULT hr = S_OK;
+ EnsureComStarted();
+
+ ComClassFactory *pComClsFac = (ComClassFactory *)GetComClassFactory(pMT);
+ return pComClsFac->CreateInstance(pMT, TRUE);
+}
+#endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+
+#ifdef FEATURE_CLASSIC_COMINTEROP
+
+//---------------------------------------------------------------------------
+// get/load type for a given clsid
+MethodTable* GetTypeForCLSID(REFCLSID rclsid, BOOL* pfAssemblyInReg)
+{
+ CONTRACT (MethodTable*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ AppDomain* pDomain = GetAppDomain();
+ _ASSERTE(pDomain);
+
+ // check to see if we have this class cached
+ MethodTable *pMT= pDomain->LookupClass(rclsid);
+ if (pMT == NULL)
+ {
+ pMT = pDomain->LoadCOMClass(rclsid, FALSE, pfAssemblyInReg);
+ if (pMT != NULL)
+ pDomain->InsertClassForCLSID(pMT, TRUE);
+ }
+ RETURN pMT;
+}
+
+
+//---------------------------------------------------------------------------
+// get/load a value class for a given guid
+MethodTable* GetValueTypeForGUID(REFCLSID guid)
+{
+ CONTRACT (MethodTable*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ AppDomain* pDomain = GetAppDomain();
+ _ASSERTE(pDomain);
+
+ // Check to see if we have this value class cached
+ MethodTable *pMT = pDomain->LookupClass(guid);
+ if (pMT == NULL)
+ pMT = pDomain->LoadCOMClass(guid, TRUE, NULL);
+
+ if (pMT)
+ {
+ // Make sure the class is a value class.
+ if (!pMT->IsValueType())
+ {
+ DefineFullyQualifiedNameForClassW();
+ COMPlusThrow(kArgumentException, IDS_EE_GUID_REPRESENTS_NON_VC,
+ GetFullyQualifiedNameForClassNestedAwareW(pMT));
+ }
+
+ // Insert the type in our map from CLSID to method table.
+ pDomain->InsertClassForCLSID(pMT, TRUE);
+ }
+
+ RETURN pMT;
+}
+
+#endif // FEATURE_CLASSIC_COMINTEROP
+
+#endif //#ifndef CROSSGEN_COMPILE
+
+
+//---------------------------------------------------------------------------
+// This method returns the default interface for the class.
+DefaultInterfaceType GetDefaultInterfaceForClassInternal(TypeHandle hndClass, TypeHandle *pHndDefClass)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(!hndClass.IsNull());
+ PRECONDITION(CheckPointer(pHndDefClass));
+ PRECONDITION(!hndClass.GetMethodTable()->IsInterface());
+ }
+ CONTRACTL_END;
+
+ // Set ppDefComMT to NULL before we start.
+ *pHndDefClass = TypeHandle();
+
+ HRESULT hr = S_FALSE;
+ MethodTable* pClassMT = hndClass.GetMethodTable();
+ const void* pvData;
+ ULONG cbData;
+ CorClassIfaceAttr ClassItfType;
+ BOOL bComVisible;
+
+ PREFIX_ASSUME(pClassMT != NULL);
+
+ if (pClassMT->IsWinRTObjectType() || pClassMT->IsExportedToWinRT())
+ {
+ // there's no point executing the rest of the function for WinRT classes
+ return DefaultInterfaceType_IUnknown;
+ }
+
+ if (pClassMT->IsComImport())
+ {
+ ClassItfType = clsIfNone;
+ bComVisible = TRUE;
+ }
+ else
+ {
+ ClassItfType = pClassMT->GetComClassInterfaceType();
+ bComVisible = IsTypeVisibleFromCom(hndClass);
+ }
+
+ // If the class is not COM visible, then its default interface is IUnknown.
+ if (!bComVisible)
+ return DefaultInterfaceType_IUnknown;
+
+ // Start by checking for the ComDefaultInterface attribute.
+ hr = pClassMT->GetMDImport()->GetCustomAttributeByName(pClassMT->GetCl(), INTEROP_COMDEFAULTINTERFACE_TYPE, &pvData, &cbData);
+ IfFailThrow(hr);
+ if (hr == S_OK && cbData > 2)
+ {
+ TypeHandle DefItfType;
+ AppDomain *pCurrDomain = SystemDomain::GetCurrentDomain();
+
+ CustomAttributeParser cap(pvData, cbData);
+ IfFailThrow(cap.SkipProlog());
+
+ LPCUTF8 szStr;
+ ULONG cbStr;
+ IfFailThrow(cap.GetNonNullString(&szStr, &cbStr));
+
+ // Allocate a new buffer that will contain the name of the default COM interface.
+ StackSString defItf(SString::Utf8, szStr, cbStr);
+
+ // Load the default COM interface specified in the CA.
+ {
+ GCX_COOP();
+
+ DefItfType = TypeName::GetTypeUsingCASearchRules(defItf.GetUnicode(), pClassMT->GetAssembly());
+
+ // If the type handle isn't a named type, then throw an exception using
+ // the name of the type obtained from pCurrInterfaces.
+ if (!DefItfType.GetMethodTable())
+ {
+ // This should only occur for TypeDesc's.
+ StackSString ssClassName;
+ DefineFullyQualifiedNameForClassW()
+ COMPlusThrow(kTypeLoadException, IDS_EE_INVALIDCOMDEFITF,
+ GetFullyQualifiedNameForClassW(pClassMT),
+ defItf.GetUnicode());
+ }
+
+ // Otherwise, if the type is not an interface thrown an exception using the actual
+ // name of the type.
+ if (!DefItfType.IsInterface())
+ {
+ StackSString ssClassName;
+ StackSString ssInvalidItfName;
+ pClassMT->_GetFullyQualifiedNameForClass(ssClassName);
+ DefItfType.GetMethodTable()->_GetFullyQualifiedNameForClass(ssInvalidItfName);
+ COMPlusThrow(kTypeLoadException, IDS_EE_INVALIDCOMDEFITF,
+ ssClassName.GetUnicode(), ssInvalidItfName.GetUnicode());
+ }
+
+ // Make sure the class implements the interface.
+ if (!pClassMT->CanCastToNonVariantInterface(DefItfType.GetMethodTable()))
+ {
+ StackSString ssClassName;
+ StackSString ssInvalidItfName;
+ pClassMT->_GetFullyQualifiedNameForClass(ssClassName);
+ DefItfType.GetMethodTable()->_GetFullyQualifiedNameForClass(ssInvalidItfName);
+ COMPlusThrow(kTypeLoadException, IDS_EE_COMDEFITFNOTSUPPORTED,
+ ssClassName.GetUnicode(), ssInvalidItfName.GetUnicode());
+ }
+ }
+
+ // The default interface is valid so return it.
+ *pHndDefClass = DefItfType;
+ return DefaultInterfaceType_Explicit;
+ }
+
+ // If the class's interface type is AutoDispatch or AutoDual then return either the
+ // IClassX for the current class or IDispatch.
+ if (ClassItfType != clsIfNone)
+ {
+ *pHndDefClass = hndClass;
+ return ClassItfType == clsIfAutoDisp ? DefaultInterfaceType_AutoDispatch : DefaultInterfaceType_AutoDual;
+ }
+
+ // The class interface is set to NONE for this level of the hierarchy. So we need to check
+ // to see if this class implements an interface.
+
+ // Search for the first COM visible implemented interface. We start with the most
+ // derived class and work our way up the hierarchy.
+ for (MethodTable *pParentMT = pClassMT->GetParentMethodTable(); pParentMT; pParentMT = pParentMT->GetParentMethodTable())
+ {
+ MethodTable::InterfaceMapIterator it = pClassMT->IterateInterfaceMap();
+ while (it.Next())
+ {
+ MethodTable *pItfMT = it.GetInterfaceInfo()->GetApproxMethodTable(pClassMT->GetLoaderModule());
+
+ // Skip generic interfaces. Classic COM interop does not support these and we don't
+ // use the result of this function in WinRT scenarios. WinRT parameter marshaling
+ // doesn't come here at all because the default interface is always specified using
+ // the DefaultAttribute. Field marshaling does come here but WinRT does not support
+ // fields of reference types other than string.
+ if (!pItfMT->HasInstantiation())
+ {
+ // If the interface is visible from COM and not implemented by our parent,
+ // then use it as the default.
+ if (IsTypeVisibleFromCom(TypeHandle(pItfMT)) && !pParentMT->ImplementsInterface(pItfMT))
+ {
+ *pHndDefClass = TypeHandle(pItfMT);
+ return DefaultInterfaceType_Explicit;
+ }
+ }
+ }
+ }
+
+ // If the class is a COM import with no interfaces, then its default interface will
+ // be IUnknown.
+ if (pClassMT->IsComImport())
+ return DefaultInterfaceType_IUnknown;
+
+ // If we have a managed parent class then return its default interface.
+ MethodTable *pParentClass = pClassMT->GetComPlusParentMethodTable();
+ if (pParentClass)
+ return GetDefaultInterfaceForClassWrapper(TypeHandle(pParentClass), pHndDefClass);
+
+ // Check to see if the class is an extensible RCW.
+ if (pClassMT->IsComObjectType())
+ return DefaultInterfaceType_BaseComClass;
+
+ // The class has no interfaces and is marked as ClassInterfaceType.None.
+ return DefaultInterfaceType_IUnknown;
+}
+
+
+DefaultInterfaceType GetDefaultInterfaceForClassWrapper(TypeHandle hndClass, TypeHandle *pHndDefClass)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(!hndClass.IsNull());
+ }
+ CONTRACTL_END;
+
+#ifndef CROSSGEN_COMPILE
+ if (!hndClass.IsTypeDesc())
+ {
+ ComCallWrapperTemplate *pTemplate = hndClass.AsMethodTable()->GetComCallWrapperTemplate();
+ if (pTemplate != NULL)
+ {
+ // if CCW template is available, use its cache
+ MethodTable *pDefaultItf;
+ DefaultInterfaceType itfType = pTemplate->GetDefaultInterface(&pDefaultItf);
+
+ *pHndDefClass = TypeHandle(pDefaultItf);
+ return itfType;
+ }
+ }
+#endif // CROSSGEN_COMPILE
+
+ return GetDefaultInterfaceForClassInternal(hndClass, pHndDefClass);
+}
+
+
+#ifndef CROSSGEN_COMPILE
+
+HRESULT TryGetDefaultInterfaceForClass(TypeHandle hndClass, TypeHandle *pHndDefClass, DefaultInterfaceType *pDefItfType)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(!hndClass.IsNull());
+ PRECONDITION(CheckPointer(pHndDefClass));
+ PRECONDITION(CheckPointer(pDefItfType));
+ }
+ CONTRACTL_END;
+
+ GCX_COOP();
+
+ HRESULT hr = S_OK;
+ OBJECTREF pThrowable = NULL;
+
+ GCPROTECT_BEGIN(pThrowable)
+ {
+ EX_TRY
+ {
+ *pDefItfType = GetDefaultInterfaceForClassWrapper(hndClass, pHndDefClass);
+ }
+ EX_CATCH
+ {
+ pThrowable = GET_THROWABLE();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (pThrowable != NULL)
+ hr = SetupErrorInfo(pThrowable);
+ }
+ GCPROTECT_END();
+ return hr;
+}
+
+// Returns the default interface for a class if it's an explicit interface or the AutoDual
+// class interface. Sets *pbDispatch otherwise. This is the logic used by array marshaling
+// in code:OleVariant::MarshalInterfaceArrayComToOleHelper.
+MethodTable *GetDefaultInterfaceMTForClass(MethodTable *pMT, BOOL *pbDispatch)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(!pMT->IsInterface());
+ PRECONDITION(CheckPointer(pbDispatch));
+ }
+ CONTRACTL_END;
+
+ TypeHandle hndDefItfClass;
+ DefaultInterfaceType DefItfType = GetDefaultInterfaceForClassWrapper(TypeHandle(pMT), &hndDefItfClass);
+
+ switch (DefItfType)
+ {
+ case DefaultInterfaceType_Explicit:
+ case DefaultInterfaceType_AutoDual:
+ {
+ return hndDefItfClass.GetMethodTable();
+ }
+
+ case DefaultInterfaceType_IUnknown:
+ case DefaultInterfaceType_BaseComClass:
+ {
+ *pbDispatch = FALSE;
+ return NULL;
+ }
+
+ case DefaultInterfaceType_AutoDispatch:
+ {
+ *pbDispatch = TRUE;
+ return NULL;
+ }
+
+ default:
+ {
+ _ASSERTE(!"Invalid default interface type!");
+ return NULL;
+ }
+ }
+}
+
+//---------------------------------------------------------------------------
+// This method retrieves the list of source interfaces for a given class.
+void GetComSourceInterfacesForClass(MethodTable *pMT, CQuickArray<MethodTable *> &rItfList)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ const void* pvData;
+ ULONG cbData;
+ CQuickArray<CHAR> qbCurrInterfaces;
+
+ GCX_COOP();
+
+ // Reset the size of the interface list to 0.
+ rItfList.Shrink(0);
+
+ if (pMT->IsWinRTObjectType() || pMT->IsExportedToWinRT())
+ {
+ // classic COM eventing is not supported in WinRT
+ return;
+ }
+
+ // Starting at the specified class MT retrieve the COM source interfaces
+ // of all the striped of the hierarchy.
+ for (; pMT != NULL; pMT = pMT->GetParentMethodTable())
+ {
+ // See if there is any [source] interface at this level of the hierarchy.
+ hr = pMT->GetMDImport()->GetCustomAttributeByName(pMT->GetCl(), INTEROP_COMSOURCEINTERFACES_TYPE, &pvData, &cbData);
+ IfFailThrow(hr);
+ if (hr == S_OK && cbData > 2)
+ {
+ AppDomain *pCurrDomain = SystemDomain::GetCurrentDomain();
+
+ CustomAttributeParser cap(pvData, cbData);
+ IfFailThrow(cap.SkipProlog());
+
+ while (cap.BytesLeft() != 0)
+ {
+ // Uncompress the current string of source interfaces.
+ BYTE const *pbStr;
+ ULONG cbStr;
+ IfFailThrow(cap.GetData(&pbStr, &cbStr));
+
+ // Allocate a new buffer that will contain the current list of source interfaces.
+ qbCurrInterfaces.ReSizeThrows(cbStr + 1);
+ LPUTF8 strCurrInterfaces = qbCurrInterfaces.Ptr();
+ memcpyNoGCRefs(strCurrInterfaces, pbStr, cbStr);
+ strCurrInterfaces[cbStr] = 0;
+ LPUTF8 pCurrInterfaces = strCurrInterfaces;
+ LPUTF8 pCurrInterfacesEnd = pCurrInterfaces + cbStr + 1;
+
+ while (pCurrInterfaces < pCurrInterfacesEnd && *pCurrInterfaces != 0)
+ {
+ // Load the COM source interface specified in the CA.
+ TypeHandle ItfType;
+ ItfType = TypeName::GetTypeUsingCASearchRules(pCurrInterfaces, pMT->GetAssembly());
+
+ // If the type handle isn't a named type, then throw an exception using
+ // the name of the type obtained from pCurrInterfaces.
+ if (!ItfType.GetMethodTable())
+ {
+ // This should only occur for TypeDesc's.
+ StackSString ssInvalidItfName(SString::Utf8, pCurrInterfaces);
+ DefineFullyQualifiedNameForClassW()
+ COMPlusThrow(kTypeLoadException, IDS_EE_INVALIDCOMSOURCEITF,
+ GetFullyQualifiedNameForClassW(pMT),
+ ssInvalidItfName.GetUnicode());
+ }
+
+ // Otherwise, if the type is not an interface thrown an exception using the actual
+ // name of the type.
+ if (!ItfType.IsInterface())
+ {
+ StackSString ssClassName;
+ StackSString ssInvalidItfName;
+ pMT->_GetFullyQualifiedNameForClass(ssClassName);
+ ItfType.GetMethodTable()->_GetFullyQualifiedNameForClass(ssInvalidItfName);
+ COMPlusThrow(kTypeLoadException, IDS_EE_INVALIDCOMSOURCEITF,
+ ssClassName.GetUnicode(), ssInvalidItfName.GetUnicode());
+ }
+
+ // Ensure the source interface is not generic.
+ if (ItfType.HasInstantiation())
+ {
+ StackSString ssClassName;
+ StackSString ssInvalidItfName;
+ pMT->_GetFullyQualifiedNameForClass(ssClassName);
+ ItfType.GetMethodTable()->_GetFullyQualifiedNameForClass(ssInvalidItfName);
+ COMPlusThrow(kTypeLoadException, IDS_EE_INVALIDCOMSOURCEITF,
+ ssClassName.GetUnicode(), ssInvalidItfName.GetUnicode());
+ }
+
+
+ // Retrieve the IID of the COM source interface.
+ IID ItfIID;
+ ItfType.GetMethodTable()->GetGuid(&ItfIID, TRUE);
+
+ // Go through the list of source interfaces and check to see if the new one is a duplicate.
+ // It can be a duplicate either if it is the same interface or if it has the same IID.
+ BOOL bItfInList = FALSE;
+ for (UINT i = 0; i < rItfList.Size(); i++)
+ {
+ if (rItfList[i] == ItfType.GetMethodTable())
+ {
+ bItfInList = TRUE;
+ break;
+ }
+
+ IID ItfIID2;
+ rItfList[i]->GetGuid(&ItfIID2, TRUE);
+ if (IsEqualIID(ItfIID, ItfIID2))
+ {
+ bItfInList = TRUE;
+ break;
+ }
+ }
+
+ // If the COM source interface is not in the list then add it.
+ if (!bItfInList)
+ {
+ size_t OldSize = rItfList.Size();
+ rItfList.ReSizeThrows(OldSize + 1);
+ rItfList[OldSize] = ItfType.GetMethodTable();
+ }
+
+ // Process the next COM source interfaces in the CA.
+ pCurrInterfaces += strlen(pCurrInterfaces) + 1;
+ }
+ }
+ }
+ }
+}
+
+
+//--------------------------------------------------------------------------------
+// These methods convert a native IEnumVARIANT to a managed IEnumerator.
+OBJECTREF ConvertEnumVariantToMngEnum(IEnumVARIANT *pNativeEnum)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF MngEnum = NULL;
+ OBJECTREF EnumeratorToEnumVariantMarshaler = NULL;
+ GCPROTECT_BEGIN(EnumeratorToEnumVariantMarshaler)
+ {
+ // Retrieve the custom marshaler and the MD to use to convert the IEnumVARIANT.
+ StdMngIEnumerator *pStdMngIEnumInfo = SystemDomain::GetCurrentDomain()->GetMngStdInterfacesInfo()->GetStdMngIEnumerator();
+ MethodDesc *pEnumNativeToManagedMD = pStdMngIEnumInfo->GetCustomMarshalerMD(CustomMarshalerMethods_MarshalNativeToManaged);
+ EnumeratorToEnumVariantMarshaler = pStdMngIEnumInfo->GetCustomMarshaler();
+ MethodDescCallSite enumNativeToManaged(pEnumNativeToManagedMD, &EnumeratorToEnumVariantMarshaler);
+
+ // Prepare the arguments that will be passed to MarshalNativeToManaged.
+ ARG_SLOT MarshalNativeToManagedArgs[] = {
+ ObjToArgSlot(EnumeratorToEnumVariantMarshaler),
+ (ARG_SLOT)pNativeEnum
+ };
+
+ // Retrieve the managed view for the current native interface pointer.
+ MngEnum = enumNativeToManaged.Call_RetOBJECTREF(MarshalNativeToManagedArgs);
+ }
+ GCPROTECT_END();
+
+ return MngEnum;
+}
+
+//--------------------------------------------------------------------------------
+// This method converts an OLE_COLOR to a System.Color.
+void ConvertOleColorToSystemColor(OLE_COLOR SrcOleColor, SYSTEMCOLOR *pDestSysColor)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // Retrieve the method desc to use for the current AD.
+ MethodDesc *pOleColorToSystemColorMD =
+ GetAppDomain()->GetMarshalingData()->GetOleColorMarshalingInfo()->GetOleColorToSystemColorMD();
+
+ MethodDescCallSite oleColorToSystemColor(pOleColorToSystemColorMD);
+
+ _ASSERTE(pOleColorToSystemColorMD->HasRetBuffArg());
+
+ ARG_SLOT Args[] =
+ {
+ PtrToArgSlot(pDestSysColor),
+ PtrToArgSlot(SrcOleColor)
+ };
+
+ oleColorToSystemColor.Call(Args);
+}
+
+//--------------------------------------------------------------------------------
+// This method converts a System.Color to an OLE_COLOR.
+OLE_COLOR ConvertSystemColorToOleColor(OBJECTREF *pSrcObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // Retrieve the method desc to use for the current AD.
+ MethodDesc *pSystemColorToOleColorMD =
+ GetAppDomain()->GetMarshalingData()->GetOleColorMarshalingInfo()->GetSystemColorToOleColorMD();
+ MethodDescCallSite systemColorToOleColor(pSystemColorToOleColorMD);
+
+ // Set up the args and call the method.
+ SYSTEMCOLOR *pSrcSysColor = (SYSTEMCOLOR *)(*pSrcObj)->UnBox();
+ return systemColorToOleColor.CallWithValueTypes_RetOleColor((const ARG_SLOT *)&pSrcSysColor);
+}
+
+//--------------------------------------------------------------------------------
+// This method generates a stringized version of a class interface that contains
+// the signatures of all the methods and fields.
+ULONG GetStringizedClassItfDef(TypeHandle InterfaceType, CQuickArray<BYTE> &rDef)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(!InterfaceType.IsNull());
+ }
+ CONTRACTL_END;
+
+ LPCWSTR szName;
+ ULONG cchName;
+ MethodTable* pIntfMT = InterfaceType.GetMethodTable();
+ PREFIX_ASSUME(pIntfMT != NULL);
+
+ MethodTable* pDeclaringMT = NULL;
+ DWORD nSlots; // Slots on the pseudo interface.
+ mdToken tkMb; // A method or field token.
+ ULONG cbCur;
+ HRESULT hr = S_OK;
+ ULONG i;
+
+ // Should be an actual class.
+ _ASSERTE(!pIntfMT->IsInterface());
+
+ // See what sort of IClassX this class gets.
+ TypeHandle thDefItf;
+ BOOL bGenerateMethods = FALSE;
+ DefaultInterfaceType DefItfType = GetDefaultInterfaceForClassWrapper(TypeHandle(pIntfMT), &thDefItf);
+
+ // The results apply to this class if the thDefItf is this class itself, not a parent class.
+ // A side effect is that [ComVisible(false)] types' guids are generated without members.
+ if (thDefItf.GetMethodTable() == pIntfMT && DefItfType == DefaultInterfaceType_AutoDual)
+ bGenerateMethods = TRUE;
+
+ // Get the name of the class.
+ DefineFullyQualifiedNameForClassW();
+ szName = GetFullyQualifiedNameForClassNestedAwareW(pIntfMT);
+ cchName = (ULONG)wcslen(szName);
+
+ // Start with the interface name.
+ cbCur = cchName * sizeof(WCHAR);
+ rDef.ReSizeThrows(cbCur + sizeof(WCHAR));
+ wcscpy_s(reinterpret_cast<LPWSTR>(rDef.Ptr()), rDef.Size()/sizeof(WCHAR), szName);
+
+ if (bGenerateMethods)
+ {
+ ComMTMemberInfoMap MemberMap(pIntfMT); // The map of members.
+
+ // Retrieve the method properties.
+ MemberMap.Init(sizeof(void*));
+
+ CQuickArray<ComMTMethodProps> &rProps = MemberMap.GetMethods();
+ nSlots = (DWORD)rProps.Size();
+
+ // Now add the methods to the TypeInfo.
+ for (i=0; i<nSlots; ++i)
+ {
+ ComMTMethodProps *pProps = &rProps[i];
+ if (pProps->bMemberVisible)
+ {
+ if (pProps->semantic < FieldSemanticOffset)
+ {
+ pDeclaringMT = pProps->pMeth->GetMethodTable();
+ tkMb = pProps->pMeth->GetMemberDef();
+ cbCur = GetStringizedMethodDef(pDeclaringMT, tkMb, rDef, cbCur);
+ }
+ else
+ {
+ ComCallMethodDesc *pFieldMeth; // A MethodDesc for a field call.
+ FieldDesc *pField; // A FieldDesc.
+ pFieldMeth = reinterpret_cast<ComCallMethodDesc*>(pProps->pMeth);
+ pField = pFieldMeth->GetFieldDesc();
+ pDeclaringMT = pField->GetApproxEnclosingMethodTable();
+ tkMb = pField->GetMemberDef();
+ cbCur = GetStringizedFieldDef(pDeclaringMT, tkMb, rDef, cbCur);
+ }
+ }
+ }
+ }
+
+ // Return the number of bytes.
+ return cbCur;
+} // ULONG GetStringizedClassItfDef()
+
+//--------------------------------------------------------------------------------
+// Helper to get the GUID of a class interface.
+void GenerateClassItfGuid(TypeHandle InterfaceType, GUID *pGuid)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(!InterfaceType.IsNull());
+ PRECONDITION(CheckPointer(pGuid));
+ }
+ CONTRACTL_END;
+
+ LPWSTR szName; // Name to turn to a guid.
+ ULONG cchName; // Length of the name (possibly after decoration).
+ CQuickArray<BYTE> rName; // Buffer to accumulate signatures.
+ ULONG cbCur; // Current offset.
+ HRESULT hr = S_OK; // A result.
+
+ cbCur = GetStringizedClassItfDef(InterfaceType, rName);
+
+ // Pad up to a whole WCHAR.
+ if (cbCur % sizeof(WCHAR))
+ {
+ int cbDelta = sizeof(WCHAR) - (cbCur % sizeof(WCHAR));
+ rName.ReSizeThrows(cbCur + cbDelta);
+ memset(rName.Ptr() + cbCur, 0, cbDelta);
+ cbCur += cbDelta;
+ }
+
+ // Point to the new buffer.
+ cchName = cbCur / sizeof(WCHAR);
+ szName = reinterpret_cast<LPWSTR>(rName.Ptr());
+
+ // Generate guid from name.
+ CorGuidFromNameW(pGuid, szName, cchName);
+} // void GenerateClassItfGuid()
+
+HRESULT TryGenerateClassItfGuid(TypeHandle InterfaceType, GUID *pGuid)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(!InterfaceType.IsNull());
+ PRECONDITION(CheckPointer(pGuid));
+ }
+ CONTRACTL_END;
+
+ GCX_COOP();
+
+ HRESULT hr = S_OK;
+ OBJECTREF pThrowable = NULL;
+
+ GCPROTECT_BEGIN(pThrowable)
+ {
+ EX_TRY
+ {
+ GenerateClassItfGuid(InterfaceType, pGuid);
+ }
+ EX_CATCH
+ {
+ pThrowable = GET_THROWABLE();
+ }
+ EX_END_CATCH (SwallowAllExceptions);
+
+ if (pThrowable != NULL)
+ hr = SetupErrorInfo(pThrowable);
+ }
+ GCPROTECT_END();
+
+ return hr;
+}
+
+//--------------------------------------------------------------------------------
+// Helper to get the GUID of the typelib that is created from an assembly.
+HRESULT GetTypeLibGuidForAssembly(Assembly *pAssembly, GUID *pGuid)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pAssembly));
+ PRECONDITION(CheckPointer(pGuid));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ CQuickArray<BYTE> rName; // String for guid.
+ ULONG cbData; // Size of the string in bytes.
+
+ // Get GUID from Assembly, else from Manifest Module, else Generate from name.
+ hr = pAssembly->GetManifestImport()->GetItemGuid(TokenFromRid(1, mdtAssembly), pGuid);
+
+ if (*pGuid == GUID_NULL)
+ {
+ // Get the string.
+ IfFailGo(GetStringizedTypeLibGuidForAssembly(pAssembly, rName, 0, &cbData));
+
+ // Pad to a whole WCHAR.
+ if (cbData % sizeof(WCHAR))
+ {
+ IfFailGo(rName.ReSizeNoThrow(cbData + sizeof(WCHAR)-(cbData%sizeof(WCHAR))));
+ while (cbData % sizeof(WCHAR))
+ rName[cbData++] = 0;
+ }
+
+ // Turn into guid
+ CorGuidFromNameW(pGuid, (LPWSTR)rName.Ptr(), cbData/sizeof(WCHAR));
+}
+
+ErrExit:
+ return hr;
+} // HRESULT GetTypeLibGuidForAssembly()
+
+//--------------------------------------------------------------------------------
+// Helper to get the version of the typelib that is created from an assembly.
+HRESULT GetTypeLibVersionForAssembly(
+ Assembly *pAssembly,
+ USHORT *pMajorVersion,
+ USHORT *pMinorVersion)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pAssembly));
+ PRECONDITION(CheckPointer(pMajorVersion));
+ PRECONDITION(CheckPointer(pMinorVersion));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK; // A result.
+ const BYTE *pbData = NULL; // Pointer to a custom attribute data.
+ ULONG cbData = 0; // Size of custom attribute data.
+
+ if (pAssembly->IsWinMD())
+ {
+ // ignore classic COM interop CA on .winmd
+ hr = S_FALSE;
+ }
+ else
+ {
+ // Check to see if the TypeLibVersionAttribute is set.
+ IfFailGo(pAssembly->GetManifestImport()->GetCustomAttributeByName(TokenFromRid(1, mdtAssembly), INTEROP_TYPELIBVERSION_TYPE, (const void**)&pbData, &cbData));
+ }
+
+ if (hr == S_OK && cbData >= (2 + 2 * sizeof(INT32)))
+ {
+ CustomAttributeParser cap(pbData, cbData);
+ IfFailRet(cap.SkipProlog());
+
+ // Retrieve the major and minor version from the attribute.
+ UINT32 u4;
+ IfFailRet(cap.GetU4(&u4));
+ *pMajorVersion = GET_VERSION_USHORT_FROM_INT(u4);
+ IfFailRet(cap.GetU4(&u4));
+ *pMinorVersion = GET_VERSION_USHORT_FROM_INT(u4);
+ }
+ else
+ {
+ // Use the assembly's major and minor version number.
+ hr = S_OK;
+ pAssembly->GetVersion(pMajorVersion, pMinorVersion, NULL, NULL);
+ }
+
+ // VB6 doesn't deal very well with a typelib a version of 0.0 so if that happens
+ // we change it to 1.0.
+ if (*pMajorVersion == 0 && *pMinorVersion == 0)
+ *pMajorVersion = 1;
+
+ErrExit:
+ return hr;
+} // HRESULT TypeLibExporter::GetTypeLibVersionFromAssembly()
+
+#endif //CROSSGEN_COMPILE
+
+
+//---------------------------------------------------------------------------
+// This method determines if a member is visible from COM.
+BOOL IsMethodVisibleFromCom(MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ mdProperty pd;
+ LPCUTF8 pPropName;
+ ULONG uSemantic;
+ mdMethodDef md = pMD->GetMemberDef();
+
+ // See if there is property information for this member.
+ hr = pMD->GetModule()->GetPropertyInfoForMethodDef(md, &pd, &pPropName, &uSemantic);
+ IfFailThrow(hr);
+
+ if (hr == S_OK)
+ {
+ return IsMemberVisibleFromCom(pMD->GetMethodTable(), pd, md);
+ }
+ else
+ {
+ return IsMemberVisibleFromCom(pMD->GetMethodTable(), md, mdTokenNil);
+ }
+}
+
+//---------------------------------------------------------------------------
+// This method determines if a type is visible from COM or not based on
+// its visibility. This version of the method works with a type handle.
+// This version will ignore a type's generic attributes.
+//
+// This API should *never* be called directly!!!
+BOOL SpecialIsGenericTypeVisibleFromCom(TypeHandle hndType)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(!hndType.IsNull());
+ }
+ CONTRACTL_END;
+
+ DWORD dwFlags;
+ mdTypeDef tdEnclosingType;
+ HRESULT hr;
+ const BYTE * pVal;
+ ULONG cbVal;
+ MethodTable * pMT = hndType.GetMethodTable();
+ _ASSERTE(pMT);
+
+ mdTypeDef mdType = pMT->GetCl();
+ IMDInternalImport * pInternalImport = pMT->GetMDImport();
+ Assembly * pAssembly = pMT->GetAssembly();
+
+ // If the type is a COM imported interface then it is visible from COM.
+ if (pMT->IsInterface() && pMT->IsComImport())
+ return TRUE;
+
+ // If the type is imported from WinRT (has the tdWindowsRuntime flag set), then it is visible from COM.
+ if (pMT->IsProjectedFromWinRT())
+ return TRUE;
+
+ // If the type is an array, then it is not visible from COM.
+ if (pMT->IsArray())
+ return FALSE;
+
+ // Retrieve the flags for the current type.
+ tdEnclosingType = mdType;
+ if (FAILED(pInternalImport->GetTypeDefProps(tdEnclosingType, &dwFlags, 0)))
+ {
+ return FALSE;
+ }
+
+ // Handle nested types.
+ while (IsTdNestedPublic(dwFlags))
+ {
+ hr = pInternalImport->GetNestedClassProps(tdEnclosingType, &tdEnclosingType);
+ if (FAILED(hr))
+ {
+ return FALSE;
+ }
+
+ // Retrieve the flags for the enclosing type.
+ if (FAILED(pInternalImport->GetTypeDefProps(tdEnclosingType, &dwFlags, 0)))
+ {
+ return FALSE;
+ }
+ }
+
+ // If the outermost type is not visible then the specified type is not visible.
+ if (!IsTdPublic(dwFlags))
+ return FALSE;
+
+ // Check to see if the type has the ComVisible attribute set.
+ hr = pInternalImport->GetCustomAttributeByName(mdType, INTEROP_COMVISIBLE_TYPE, (const void**)&pVal, &cbVal);
+ if (hr == S_OK)
+ {
+ CustomAttributeParser cap(pVal, cbVal);
+ if (FAILED(cap.SkipProlog()))
+ return FALSE;
+
+ UINT8 u1;
+ if (FAILED(cap.GetU1(&u1)))
+ return FALSE;
+
+ return (BOOL)u1;
+ }
+
+ // Check to see if the assembly has the ComVisible attribute set.
+ hr = pAssembly->GetManifestImport()->GetCustomAttributeByName(pAssembly->GetManifestToken(), INTEROP_COMVISIBLE_TYPE, (const void**)&pVal, &cbVal);
+ if (hr == S_OK)
+ {
+ CustomAttributeParser cap(pVal, cbVal);
+ if (FAILED(cap.SkipProlog()))
+ return FALSE;
+
+ UINT8 u1;
+ if (FAILED(cap.GetU1(&u1)))
+ return FALSE;
+
+ return (BOOL)u1;
+ }
+
+ // The type is visible.
+ return TRUE;
+}
+
+
+//---------------------------------------------------------------------------
+// This method determines if a type is visible from COM or not based on
+// its visibility. This version of the method works with a type handle.
+BOOL IsTypeVisibleFromCom(TypeHandle hndType)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(!hndType.IsNull());
+ }
+ CONTRACTL_END;
+
+ if (!hndType.SupportsGenericInterop(TypeHandle::Interop_NativeToManaged))
+ {
+ // If the type is a generic type, then it is not visible from COM.
+ if (hndType.HasInstantiation() || hndType.IsGenericVariable())
+ return FALSE;
+ }
+
+ // If the type is collectible, then it is not visible from COM.
+ if (hndType.GetLoaderAllocator()->IsCollectible())
+ return FALSE;
+
+ return SpecialIsGenericTypeVisibleFromCom(hndType);
+}
+
+//---------------------------------------------------------------------------
+// Determines if a method is likely to be used for forward COM/WinRT interop.
+BOOL MethodNeedsForwardComStub(MethodDesc *pMD, DataImage *pImage)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodTable *pMT = pMD->GetMethodTable();
+
+ if (pMT->HasInstantiation() && !pMT->SupportsGenericInterop(TypeHandle::Interop_ManagedToNative))
+ {
+ // method is declared on an unsupported generic type -> stub not needed
+ return FALSE;
+ }
+
+ if (pMT->IsProjectedFromWinRT() && pMT->IsComImport() && pMD->IsPrivate())
+ {
+ // private WinRT method -> stub not needed
+ return FALSE;
+ }
+
+ if (pMT->IsWinRTObjectType())
+ {
+ // WinRT runtime class -> stub needed
+ return TRUE;
+ }
+
+ if (pMT->IsWinRTRedirectedInterface(TypeHandle::Interop_ManagedToNative))
+ {
+ if (!pMT->HasInstantiation())
+ {
+ // non-generic redirected interface -> stub needed
+ return TRUE;
+ }
+
+ // Generating stubs for generic redirected interfaces into all assemblies would grow NetFX native images
+ // by several per cent. See BCL\System\Internal.cs if you need to add specific instantiations in mscorlib.
+ DWORD assemblyFlags = pImage->GetModule()->GetAssembly()->GetFlags();
+ if (IsAfContentType_WindowsRuntime(assemblyFlags))
+ {
+ // generic redirected interface while NGENing a .winmd -> stub needed
+ return TRUE;
+ }
+ }
+
+ GUID guid;
+ pMT->GetGuid(&guid, FALSE);
+
+ if (guid != GUID_NULL)
+ {
+ // explicit GUID defined in metadata -> stub needed
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+//---------------------------------------------------------------------------
+// Determines if a method is visible from COM in a way that requires a marshaling
+// stub, i.e. it allows early binding.
+BOOL MethodNeedsReverseComStub(MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ BOOL fIsAllowedCtorOrStatic = FALSE;
+ MethodTable *pMT = pMD->GetMethodTable();
+
+ if (pMT->IsInterface())
+ {
+ if (!pMT->IsComImport() && !IsTypeVisibleFromCom(TypeHandle(pMT)))
+ return FALSE;
+
+ if (pMT->HasInstantiation() && !pMT->SupportsGenericInterop(TypeHandle::Interop_NativeToManaged))
+ return FALSE;
+
+ // declaring interface must be InterfaceIsIUnknown or InterfaceIsDual
+ if (pMT->GetComInterfaceType() == ifDispatch)
+ return FALSE;
+
+ Assembly * pAssembly = pMT->GetAssembly();
+ if (pAssembly->IsWinMD() && !pAssembly->IsManagedWinMD())
+ {
+ //
+ // Internal interfaces defined in native winmds can only ever be implemented by native components.
+ // Managed classes won't be able to implement the internal interfaces, and so the reverse COM stubs
+ // are not needed for them.
+ //
+ if (IsTdNotPublic(pMT->GetClass()->GetProtection()))
+ {
+ //
+ // However, we do need CCWs for internal interfaces that define protected members of inheritable classes
+ // (for example, Windows.UI.Xaml.Application implements both IApplication, which we don’t need
+ // a CCW for and IApplicationOverrides, which we do need).
+ //
+ if (!pMT->GetWriteableData()->IsOverridingInterface())
+ {
+ return FALSE;
+ }
+ }
+ }
+ }
+ else
+ {
+ if (!IsTypeVisibleFromCom(TypeHandle(pMT)))
+ return FALSE;
+
+ if (pMT->IsDelegate())
+ {
+ // the 'Invoke' method of a WinRT delegate needs stub
+ return ((pMT->IsProjectedFromWinRT() || WinRTTypeNameConverter::IsRedirectedType(pMT)) &&
+ pMD->HasSameMethodDefAs(COMDelegate::FindDelegateInvokeMethod(pMT)));
+ }
+
+ if (pMT->IsExportedToWinRT() && (pMD->IsCtor() || pMD->IsStatic()))
+ {
+ fIsAllowedCtorOrStatic = TRUE;
+ }
+ else
+ {
+ // declaring class must be AutoDual
+ if (pMT->GetComClassInterfaceType() != clsIfAutoDual)
+ return FALSE;
+ }
+ }
+
+ // static methods and ctors are not exposed to COM except for WinRT
+ if (!fIsAllowedCtorOrStatic && (pMD->IsCtor() || pMD->IsStatic()))
+ return FALSE;
+
+ // NGen can't compile stubs for var arg methods
+ if (pMD->IsVarArg())
+ return FALSE;
+
+ return IsMethodVisibleFromCom(pMD);
+}
+
+
+#ifndef CROSSGEN_COMPILE
+
+//--------------------------------------------------------------------------------
+// Validate that the given target is valid for the specified type.
+BOOL IsComTargetValidForType(REFLECTCLASSBASEREF* pRefClassObj, OBJECTREF* pTarget)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pRefClassObj));
+ PRECONDITION(CheckPointer(pTarget));
+ }
+ CONTRACTL_END;
+
+ MethodTable* pInvokedMT = (*pRefClassObj)->GetType().GetMethodTable();
+
+ MethodTable* pTargetMT = (*pTarget)->GetTrueMethodTable();
+ _ASSERTE(pTargetMT);
+ PREFIX_ASSUME(pInvokedMT != NULL);
+
+ // If the target class and the invoke class are identical then the invoke is valid.
+ if (pTargetMT == pInvokedMT)
+ return TRUE;
+
+ // We always allow calling InvokeMember on a __ComObject type regardless of the type
+ // of the target object.
+ if (IsComObjectClass((*pRefClassObj)->GetType()))
+ return TRUE;
+
+ // If the class that is being invoked on is an interface then check to see if the
+ // target class supports that interface.
+ if (pInvokedMT->IsInterface())
+ return Object::SupportsInterface(*pTarget, pInvokedMT);
+
+ // Check to see if the target class inherits from the invoked class.
+ while (pTargetMT)
+ {
+ pTargetMT = pTargetMT->GetParentMethodTable();
+ if (pTargetMT == pInvokedMT)
+ {
+ // The target class inherits from the invoked class.
+ return TRUE;
+ }
+ }
+
+ // There is no valid relationship between the invoked and the target classes.
+ return FALSE;
+}
+
+DISPID ExtractStandardDispId(__in_z LPWSTR strStdDispIdMemberName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Find the first character after the = in the standard DISPID member name.
+ LPWSTR strDispId = wcsstr(&strStdDispIdMemberName[STANDARD_DISPID_PREFIX_LENGTH], W("=")) + 1;
+ if (!strDispId)
+ COMPlusThrow(kArgumentException, IDS_EE_INVALID_STD_DISPID_NAME);
+
+ // Validate that the last character of the standard member name is a ].
+ LPWSTR strClosingBracket = wcsstr(strDispId, W("]"));
+ if (!strClosingBracket || (strClosingBracket[1] != 0))
+ COMPlusThrow(kArgumentException, IDS_EE_INVALID_STD_DISPID_NAME);
+
+ // Extract the number from the standard DISPID member name.
+ return _wtoi(strDispId);
+}
+
+static HRESULT InvokeExHelper(
+ IDispatchEx * pDispEx,
+ DISPID MemberID,
+ LCID lcid,
+ WORD flags,
+ DISPPARAMS * pDispParams,
+ VARIANT* pVarResult,
+ EXCEPINFO * pExcepInfo,
+ IServiceProvider *pspCaller)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+ _ASSERTE(pDispEx != NULL);
+
+ struct Param : CallOutFilterParam {
+ HRESULT hr;
+ IDispatchEx * pDispEx;
+ DISPID MemberID;
+ LCID lcid;
+ WORD flags;
+ DISPPARAMS * pDispParams;
+ VARIANT* pVarResult;
+ EXCEPINFO * pExcepInfo;
+ IServiceProvider * pspCaller;
+ }; Param param;
+
+ param.OneShot = TRUE; // Inherited from CallOutFilterParam
+ param.hr = S_OK;
+ param.pDispEx = pDispEx;
+ param.MemberID = MemberID;
+ param.lcid = lcid;
+ param.flags = flags;
+ param.pDispParams = pDispParams;
+ param.pVarResult = pVarResult;
+ param.pExcepInfo = pExcepInfo;
+ param.pspCaller = pspCaller;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ BEGIN_SO_TOLERANT_CODE(GetThread());
+
+ pParam->hr = pParam->pDispEx->InvokeEx(pParam->MemberID,
+ pParam->lcid,
+ pParam->flags,
+ pParam->pDispParams,
+ pParam->pVarResult,
+ pParam->pExcepInfo,
+ pParam->pspCaller);
+
+ END_SO_TOLERANT_CODE;
+ }
+ PAL_EXCEPT_FILTER(CallOutFilter)
+ {
+ _ASSERTE(!"CallOutFilter returned EXECUTE_HANDLER.");
+ }
+ PAL_ENDTRY;
+
+ return param.hr;
+}
+
+static HRESULT InvokeHelper(
+ IDispatch * pDisp,
+ DISPID MemberID,
+ REFIID riid,
+ LCID lcid,
+ WORD flags,
+ DISPPARAMS * pDispParams,
+ VARIANT* pVarResult,
+ EXCEPINFO * pExcepInfo,
+ UINT *piArgErr)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+ _ASSERTE(pDisp != NULL);
+
+ struct Param : CallOutFilterParam {
+ HRESULT hr;
+ IDispatch * pDisp;
+ DISPID MemberID;
+ REFIID riid;
+ LCID lcid;
+ WORD flags;
+ DISPPARAMS * pDispParams;
+ VARIANT * pVarResult;
+ EXCEPINFO * pExcepInfo;
+ UINT * piArgErr;
+
+ Param(REFIID _riid) : riid(_riid) {}
+ }; Param param(riid);
+
+ param.OneShot = TRUE; // Inherited from CallOutFilterParam
+ param.hr = S_OK;
+ param.pDisp = pDisp;
+ param.MemberID = MemberID;
+ //param.riid = riid;
+ param.lcid = lcid;
+ param.flags = flags;
+ param.pDispParams = pDispParams;
+ param.pVarResult = pVarResult;
+ param.pExcepInfo = pExcepInfo;
+ param.piArgErr = piArgErr;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ BEGIN_SO_TOLERANT_CODE(GetThread());
+
+ pParam->hr = pParam->pDisp->Invoke(pParam->MemberID,
+ pParam->riid,
+ pParam->lcid,
+ pParam->flags,
+ pParam->pDispParams,
+ pParam->pVarResult,
+ pParam->pExcepInfo,
+ pParam->piArgErr);
+
+ END_SO_TOLERANT_CODE;
+ }
+ PAL_EXCEPT_FILTER(CallOutFilter)
+ {
+ _ASSERTE(!"CallOutFilter returned EXECUTE_HANDLER.");
+ }
+ PAL_ENDTRY;
+
+ return param.hr;
+}
+
+
+void DispInvokeConvertObjectToVariant(OBJECTREF *pSrcObj, VARIANT *pDestVar, ByrefArgumentInfo *pByrefArgInfo)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pSrcObj));
+ PRECONDITION(IsProtectedByGCFrame (pSrcObj));
+ PRECONDITION(CheckPointer(pDestVar));
+ PRECONDITION(CheckPointer(pByrefArgInfo));
+ }
+ CONTRACTL_END;
+
+ if (pByrefArgInfo->m_bByref)
+ {
+ if (*pSrcObj == NULL)
+ {
+ V_VT(pDestVar) = VT_VARIANT | VT_BYREF;
+ pDestVar->pvarVal = &pByrefArgInfo->m_Val;
+ }
+ else if (MscorlibBinder::IsClass((*pSrcObj)->GetMethodTable(), CLASS__VARIANT_WRAPPER))
+ {
+ OBJECTREF WrappedObj = (*((VARIANTWRAPPEROBJECTREF*)pSrcObj))->GetWrappedObject();
+ GCPROTECT_BEGIN(WrappedObj)
+ {
+ OleVariant::MarshalOleVariantForObject(&WrappedObj, &pByrefArgInfo->m_Val);
+ V_VT(pDestVar) = VT_VARIANT | VT_BYREF;
+ pDestVar->pvarVal = &pByrefArgInfo->m_Val;
+ }
+ GCPROTECT_END();
+ }
+ else
+ {
+ OleVariant::MarshalOleVariantForObject(pSrcObj, &pByrefArgInfo->m_Val);
+ OleVariant::CreateByrefVariantForVariant(&pByrefArgInfo->m_Val, pDestVar);
+ }
+ }
+ else
+ {
+ OleVariant::MarshalOleVariantForObject(pSrcObj, pDestVar);
+ }
+}
+
+static void DoIUInvokeDispMethod(IDispatchEx* pDispEx, IDispatch* pDisp, DISPID MemberID, LCID lcid,
+ WORD flags, DISPPARAMS* pDispParams, VARIANT* pVarResult)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ UINT iArgErr;
+ EXCEPINFO ExcepInfo;
+ HRESULT hr;
+
+ memset(&ExcepInfo, 0, sizeof(EXCEPINFO));
+
+#ifdef MDA_SUPPORTED
+ MDA_TRIGGER_ASSISTANT(GcManagedToUnmanaged, TriggerGC());
+#endif
+
+ GCX_COOP();
+ OBJECTREF pThrowable = NULL;
+ GCPROTECT_BEGIN(pThrowable);
+ {
+ // Call the method
+ EX_TRY
+ {
+ {
+ // We are about to make call's to COM so switch to preemptive GC.
+ GCX_PREEMP();
+
+ if (pDispEx)
+ {
+ LeaveRuntimeHolder holder(**(size_t**)pDispEx);
+ hr = InvokeExHelper(pDispEx, MemberID, lcid, flags, pDispParams,
+ pVarResult, &ExcepInfo, NULL);
+ }
+ else
+ {
+ LeaveRuntimeHolder holder(**(size_t**)pDisp);
+ hr = InvokeHelper( pDisp, MemberID, IID_NULL, lcid, flags,
+ pDispParams, pVarResult, &ExcepInfo, &iArgErr);
+ }
+ }
+
+#ifdef MDA_SUPPORTED
+ EX_TRY
+ {
+ MDA_TRIGGER_ASSISTANT(GcUnmanagedToManaged, TriggerGC());
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+#endif
+
+ // If the invoke call failed then throw an exception based on the EXCEPINFO.
+ if (FAILED(hr))
+ {
+ if (hr == DISP_E_EXCEPTION)
+ {
+ // This method will free the BSTR's in the EXCEPINFO.
+ COMPlusThrowHR(&ExcepInfo);
+ }
+ else
+ {
+ COMPlusThrowHR(hr);
+ }
+ }
+ }
+ EX_CATCH
+ {
+ // If we get here we need to throw an TargetInvocationException
+ pThrowable = GET_THROWABLE();
+ _ASSERTE(pThrowable != NULL);
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ if (pThrowable != NULL)
+ {
+ COMPlusThrow(InvokeUtil::CreateTargetExcept(&pThrowable));
+ }
+ }
+ GCPROTECT_END();
+}
+
+
+FORCEINLINE void DispParamHolderRelease(VARIANT* value)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (value)
+ {
+ if (V_VT(value) & VT_BYREF)
+ {
+ VariantHolder TmpVar;
+ OleVariant::ExtractContentsFromByrefVariant(value, &TmpVar);
+ }
+
+ SafeVariantClear(value);
+ }
+}
+
+class DispParamHolder : public Wrapper<VARIANT*, DispParamHolderDoNothing, DispParamHolderRelease, NULL>
+{
+public:
+ DispParamHolder(VARIANT* p = NULL)
+ : Wrapper<VARIANT*, DispParamHolderDoNothing, DispParamHolderRelease, NULL>(p)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ FORCEINLINE void operator=(VARIANT* p)
+ {
+ WRAPPER_NO_CONTRACT;
+ Wrapper<VARIANT*, DispParamHolderDoNothing, DispParamHolderRelease, NULL>::operator=(p);
+ }
+};
+
+
+
+//--------------------------------------------------------------------------------
+// InvokeDispMethod will convert a set of managed objects and call IDispatch. The
+// result will be returned as a CLR Variant pointed to by pRetVal.
+void IUInvokeDispMethod(REFLECTCLASSBASEREF* pRefClassObj, OBJECTREF* pTarget, OBJECTREF* pName, DISPID *pMemberID,
+ OBJECTREF* pArgs, OBJECTREF* pByrefModifiers, OBJECTREF* pNamedArgs, OBJECTREF* pRetVal, LCID lcid, WORD flags, BOOL bIgnoreReturn, BOOL bIgnoreCase)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pTarget));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ UINT i;
+ UINT iSrcArg;
+ UINT iDestArg;
+ VARIANT VarResult;
+ UINT cArgs = 0;
+ UINT cNamedArgs = 0;
+ DISPPARAMS DispParams = {0};
+ DISPID* aDispID = NULL;
+ DISPID MemberID = 0;
+ ByrefArgumentInfo* aByrefArgInfos = NULL;
+ BOOL bSomeArgsAreByref = FALSE;
+ SafeComHolder<IDispatch> pDisp = NULL;
+ SafeComHolder<IDispatchEx> pDispEx = NULL;
+ VariantPtrHolder pVarResult = NULL;
+ NewArrayHolder<DispParamHolder> params = NULL;
+
+ //
+ // Function initialization.
+ //
+
+ SafeVariantInit(&VarResult);
+
+
+ // InteropUtil.h does not know about anything other than OBJECTREF so
+ // convert the OBJECTREF's to their real type.
+
+ STRINGREF* pStrName = (STRINGREF*) pName;
+ PTRARRAYREF* pArrArgs = (PTRARRAYREF*) pArgs;
+ PTRARRAYREF* pArrByrefModifiers = (PTRARRAYREF*) pByrefModifiers;
+ PTRARRAYREF* pArrNamedArgs = (PTRARRAYREF*) pNamedArgs;
+ MethodTable* pInvokedMT = (*pRefClassObj)->GetType().GetMethodTable();
+ PREFIX_ASSUME(pInvokedMT != NULL);
+
+ // Retrieve the total count of arguments.
+ if (*pArrArgs != NULL)
+ cArgs = (*pArrArgs)->GetNumComponents();
+
+ // Retrieve the count of named arguments.
+ if (*pArrNamedArgs != NULL)
+ cNamedArgs = (*pArrNamedArgs)->GetNumComponents();
+
+ // Validate that the target is valid for the specified type.
+ if (!IsComTargetValidForType(pRefClassObj, pTarget))
+ COMPlusThrow(kTargetException, W("RFLCT.Targ_ITargMismatch"));
+
+ // If the invoked type is an interface, make sure it is IDispatch based.
+ if (pInvokedMT->IsInterface())
+ {
+ CorIfaceAttr ifaceType = pInvokedMT->GetComInterfaceType();
+ if (!IsDispatchBasedItf(ifaceType))
+ COMPlusThrow(kTargetInvocationException, IDS_EE_INTERFACE_NOT_DISPATCH_BASED);
+ }
+
+ // Validate that the target is a COM object.
+ _ASSERTE((*pTarget)->GetMethodTable()->IsComObjectType());
+
+ //
+ // Initialize the DISPPARAMS structure.
+ //
+ if (cArgs > 0)
+ {
+ UINT cPositionalArgs = cArgs - cNamedArgs;
+
+ DispParams.cArgs = cArgs;
+ DispParams.rgvarg = (VARIANTARG *)_alloca(cArgs * sizeof(VARIANTARG));
+ params = new DispParamHolder[cArgs];
+
+ // Initialize all the variants.
+ GCX_PREEMP();
+ for (i = 0; i < cArgs; i++)
+ {
+ SafeVariantInit(&DispParams.rgvarg[i]);
+ params[i] = &DispParams.rgvarg[i];
+ }
+ }
+
+
+ //
+ // Retrieve the IDispatch interface that will be invoked on.
+ //
+
+ if (pInvokedMT->IsInterface())
+ {
+ // The invoked type is a dispatch or dual interface so we will make the
+ // invocation on it.
+ pDisp = (IDispatch *)ComObject::GetComIPFromRCWThrowing(pTarget, pInvokedMT);
+ }
+ else
+ {
+ // A class was passed in so we will make the invocation on the default
+ // IDispatch for the COM component.
+
+ RCWHolder pRCW(GetThread());
+ RCWPROTECT_BEGIN(pRCW, *pTarget);
+
+ // Retrieve the IDispath pointer from the wrapper.
+ pDisp = (IDispatch*)pRCW->GetIDispatch();
+ if (!pDisp)
+ COMPlusThrow(kTargetInvocationException, IDS_EE_NO_IDISPATCH_ON_TARGET);
+
+ // If we aren't ignoring case, then we need to try and QI for IDispatchEx to
+ // be able to use IDispatchEx::GetDispID() which has a flag to control case
+ // sentisitivity.
+ if (!bIgnoreCase && cNamedArgs == 0)
+ {
+ RCW_VTABLEPTR(pRCW);
+ hr = SafeQueryInterface(pDisp, IID_IDispatchEx, (IUnknown**)&pDispEx);
+ if (FAILED(hr))
+ pDispEx = NULL;
+ }
+
+ RCWPROTECT_END(pRCW);
+ }
+ _ASSERTE((IUnknown*)pDisp != NULL);
+
+
+ //
+ // Prepare the DISPID's that will be passed to invoke.
+ //
+
+ if (pMemberID && (*pMemberID != DISPID_UNKNOWN) && (cNamedArgs == 0))
+ {
+ // The caller specified a member ID and we don't have any named arguments so
+ // we can simply use the member ID the caller specified.
+ MemberID = *pMemberID;
+ }
+ else
+ {
+ int strNameLength = (*pStrName)->GetStringLength();
+
+ // Check if we are invoking on the default member.
+ if (strNameLength == 0)
+ {
+ // Set the DISPID to 0 (default member).
+ MemberID = 0;
+
+ _ASSERTE(cNamedArgs == 0);
+ if (cNamedArgs != 0)
+ COMPlusThrow(kNotSupportedException,W("NotSupported_IDispInvokeDefaultMemberWithNamedArgs"));
+ }
+ else
+ {
+ //
+ // Create an array of strings that will be passed to GetIDsOfNames().
+ //
+
+ UINT cNamesToConvert = cNamedArgs + 1;
+ LPWSTR strTmpName = NULL;
+
+ // Allocate the array of strings to convert, the array of pinned handles and the
+ // array of converted DISPID's.
+ size_t allocSize = cNamesToConvert * sizeof(LPWSTR);
+ if (allocSize < cNamesToConvert)
+ COMPlusThrowArgumentOutOfRange(W("namedParameters"), W("ArgumentOutOfRange_Capacity"));
+ LPWSTR *aNamesToConvert = (LPWSTR *)_alloca(allocSize);
+
+ allocSize = cNamesToConvert * sizeof(DISPID);
+ if (allocSize < cNamesToConvert)
+ COMPlusThrowArgumentOutOfRange(W("namedParameters"), W("ArgumentOutOfRange_Capacity"));
+ aDispID = (DISPID *)_alloca(allocSize);
+
+ // The first name to convert is the name of the method itself.
+ aNamesToConvert[0] = (*pStrName)->GetBuffer();
+
+ // Check to see if the name is for a standard DISPID.
+ if (SString::_wcsnicmp(aNamesToConvert[0], STANDARD_DISPID_PREFIX, STANDARD_DISPID_PREFIX_LENGTH) == 0)
+ {
+ // The name is for a standard DISPID so extract it from the name.
+ MemberID = ExtractStandardDispId(aNamesToConvert[0]);
+
+ // Make sure there are no named arguments to convert.
+ if (cNamedArgs > 0)
+ {
+ STRINGREF *pNamedArgsData = (STRINGREF *)(*pArrNamedArgs)->GetDataPtr();
+
+ for (i = 0; i < cNamedArgs; i++)
+ {
+ // The first name to convert is the name of the method itself.
+ strTmpName = pNamedArgsData[i]->GetBuffer();
+
+ // Check to see if the name is for a standard DISPID.
+ if (SString::_wcsnicmp(strTmpName, STANDARD_DISPID_PREFIX, STANDARD_DISPID_PREFIX_LENGTH) != 0)
+ COMPlusThrow(kArgumentException, IDS_EE_NON_STD_NAME_WITH_STD_DISPID);
+
+ // The name is for a standard DISPID so extract it from the name.
+ aDispID[i + 1] = ExtractStandardDispId(strTmpName);
+ }
+ }
+ }
+ else
+ {
+ BOOL fGotIt = FALSE;
+ BOOL fIsNonGenericComObject = pInvokedMT->IsInterface() || (pInvokedMT != g_pBaseCOMObject && pInvokedMT->IsComObjectType());
+ BOOL fUseCache = fIsNonGenericComObject && !(IUnknown*)pDispEx && strNameLength <= ReflectionMaxCachedNameLength && cNamedArgs == 0;
+ DispIDCacheElement vDispIDElement;
+
+ // If the object is not a generic COM object and the member meets the criteria to be
+ // in the cache then look up the DISPID in the cache.
+ if (fUseCache)
+ {
+ vDispIDElement.pMT = pInvokedMT;
+ vDispIDElement.strNameLength = strNameLength;
+ vDispIDElement.lcid = lcid;
+ wcscpy_s(vDispIDElement.strName, COUNTOF(vDispIDElement.strName), aNamesToConvert[0]);
+
+ // Only look up if the cache has already been created.
+ DispIDCache* pDispIDCache = GetAppDomain()->GetRefDispIDCache();
+ fGotIt = pDispIDCache->GetFromCache (&vDispIDElement, MemberID);
+ }
+
+ if (!fGotIt)
+ {
+ NewArrayHolder<PinningHandleHolder> ahndPinnedObjs = new PinningHandleHolder[cNamesToConvert];
+ ahndPinnedObjs[0] = GetAppDomain()->CreatePinningHandle((OBJECTREF)*pStrName);
+
+ // Copy the named arguments into the array of names to convert.
+ if (cNamedArgs > 0)
+ {
+ STRINGREF *pNamedArgsData = (STRINGREF *)(*pArrNamedArgs)->GetDataPtr();
+
+ for (i = 0; i < cNamedArgs; i++)
+ {
+ // Pin the string object and retrieve a pointer to its data.
+ ahndPinnedObjs[i + 1] = GetAppDomain()->CreatePinningHandle((OBJECTREF)pNamedArgsData[i]);
+ aNamesToConvert[i + 1] = pNamedArgsData[i]->GetBuffer();
+ }
+ }
+
+ //
+ // Call GetIDsOfNames to convert the names to DISPID's
+ //
+
+ {
+ // We are about to make call's to COM so switch to preemptive GC.
+ GCX_PREEMP();
+
+ if ((IUnknown*)pDispEx)
+ {
+ // We should only get here if we are doing a case sensitive lookup and
+ // we don't have any named arguments.
+ _ASSERTE(cNamedArgs == 0);
+ _ASSERTE(!bIgnoreCase);
+
+ // We managed to retrieve an IDispatchEx IP so we will use it to
+ // retrieve the DISPID.
+ BSTRHolder bstrTmpName = SysAllocString(aNamesToConvert[0]);
+ if (!bstrTmpName)
+ COMPlusThrowOM();
+
+ LeaveRuntimeHolder lrh(**(size_t**)(IUnknown*)pDispEx);
+ hr = pDispEx->GetDispID(bstrTmpName, fdexNameCaseSensitive, aDispID);
+ }
+ else
+ {
+ // Call GetIdsOfNames() to retrieve the DISPID's of the method and of the arguments.
+ LeaveRuntimeHolder lrh(**(size_t**)(IUnknown*)pDisp);
+ hr = pDisp->GetIDsOfNames(
+ IID_NULL,
+ aNamesToConvert,
+ cNamesToConvert,
+ lcid,
+ aDispID
+ );
+ }
+ }
+
+ if (FAILED(hr))
+ {
+ // Check to see if the user wants to invoke the new enum member.
+ if (cNamesToConvert == 1 && SString::_wcsicmp(aNamesToConvert[0], GET_ENUMERATOR_METHOD_NAME) == 0)
+ {
+ // Invoke the new enum member.
+ MemberID = DISPID_NEWENUM;
+ }
+ else
+ {
+ // The name is unknown.
+ COMPlusThrowHR(hr);
+ }
+ }
+ else
+ {
+ // The member ID is the first elements of the array we got back from GetIdsOfNames.
+ MemberID = aDispID[0];
+ }
+
+ // If the object is not a generic COM object and the member meets the criteria to be
+ // in the cache then insert the member in the cache.
+ if (fUseCache)
+ {
+ DispIDCache *pDispIDCache = GetAppDomain()->GetRefDispIDCache();
+ pDispIDCache->AddToCache (&vDispIDElement, MemberID);
+ }
+ }
+ }
+ }
+
+ // Store the member ID if the caller passed in a place to store it.
+ if (pMemberID)
+ *pMemberID = MemberID;
+ }
+
+
+ //
+ // Fill in the DISPPARAMS structure.
+ //
+
+ if (cArgs > 0)
+ {
+ // Allocate the byref argument information.
+ aByrefArgInfos = (ByrefArgumentInfo*)_alloca(cArgs * sizeof(ByrefArgumentInfo));
+ memset(aByrefArgInfos, 0, cArgs * sizeof(ByrefArgumentInfo));
+
+ // Set the byref bit on the arguments that have the byref modifier.
+ if (*pArrByrefModifiers != NULL)
+ {
+ BYTE *aByrefModifiers = (BYTE*)(*pArrByrefModifiers)->GetDataPtr();
+ for (i = 0; i < cArgs; i++)
+ {
+ if (aByrefModifiers[i])
+ {
+ aByrefArgInfos[i].m_bByref = TRUE;
+ bSomeArgsAreByref = TRUE;
+ }
+ }
+ }
+
+ // We need to protect the temporary object that will be used to convert from
+ // the managed objects to OLE variants.
+ OBJECTREF TmpObj = NULL;
+ GCPROTECT_BEGIN(TmpObj)
+ {
+ if (!(flags & (DISPATCH_PROPERTYPUT | DISPATCH_PROPERTYPUTREF)))
+ {
+ // For anything other than a put or a putref we just use the specified
+ // named arguments.
+ DispParams.cNamedArgs = cNamedArgs;
+ DispParams.rgdispidNamedArgs = (cNamedArgs == 0) ? NULL : &aDispID[1];
+
+ // Convert the named arguments from COM+ to OLE. These arguments are in the same order
+ // on both sides.
+ for (i = 0; i < cNamedArgs; i++)
+ {
+ iSrcArg = i;
+ iDestArg = i;
+ TmpObj = ((OBJECTREF*)(*pArrArgs)->GetDataPtr())[iSrcArg];
+ DispInvokeConvertObjectToVariant(&TmpObj, &DispParams.rgvarg[iDestArg], &aByrefArgInfos[iSrcArg]);
+ }
+
+ // Convert the unnamed arguments. These need to be presented in reverse order to IDispatch::Invoke().
+ for (iSrcArg = cNamedArgs, iDestArg = cArgs - 1; iSrcArg < cArgs; iSrcArg++, iDestArg--)
+ {
+ TmpObj = ((OBJECTREF*)(*pArrArgs)->GetDataPtr())[iSrcArg];
+ DispInvokeConvertObjectToVariant(&TmpObj, &DispParams.rgvarg[iDestArg], &aByrefArgInfos[iSrcArg]);
+ }
+ }
+ else
+ {
+ // If we are doing a property put then we need to set the DISPID of the
+ // argument to DISP_PROPERTYPUT if there is at least one argument.
+ DispParams.cNamedArgs = cNamedArgs + 1;
+ DispParams.rgdispidNamedArgs = (DISPID*)_alloca((cNamedArgs + 1) * sizeof(DISPID));
+
+ // Fill in the array of named arguments.
+ DispParams.rgdispidNamedArgs[0] = DISPID_PROPERTYPUT;
+ for (i = 1; i < cNamedArgs; i++)
+ DispParams.rgdispidNamedArgs[i] = aDispID[i];
+
+ // The last argument from reflection becomes the first argument that must be passed to IDispatch.
+ iSrcArg = cArgs - 1;
+ iDestArg = 0;
+ TmpObj = ((OBJECTREF*)(*pArrArgs)->GetDataPtr())[iSrcArg];
+ DispInvokeConvertObjectToVariant(&TmpObj, &DispParams.rgvarg[iDestArg], &aByrefArgInfos[iSrcArg]);
+
+ // Convert the named arguments from COM+ to OLE. These arguments are in the same order
+ // on both sides.
+ for (i = 0; i < cNamedArgs; i++)
+ {
+ iSrcArg = i;
+ iDestArg = i + 1;
+ TmpObj = ((OBJECTREF*)(*pArrArgs)->GetDataPtr())[iSrcArg];
+ DispInvokeConvertObjectToVariant(&TmpObj, &DispParams.rgvarg[iDestArg], &aByrefArgInfos[iSrcArg]);
+ }
+
+ // Convert the unnamed arguments. These need to be presented in reverse order to IDispatch::Invoke().
+ for (iSrcArg = cNamedArgs, iDestArg = cArgs - 1; iSrcArg < cArgs - 1; iSrcArg++, iDestArg--)
+ {
+ TmpObj = ((OBJECTREF*)(*pArrArgs)->GetDataPtr())[iSrcArg];
+ DispInvokeConvertObjectToVariant(&TmpObj, &DispParams.rgvarg[iDestArg], &aByrefArgInfos[iSrcArg]);
+ }
+ }
+ }
+ GCPROTECT_END();
+ }
+ else
+ {
+ // There are no arguments.
+ DispParams.cArgs = cArgs;
+ DispParams.cNamedArgs = 0;
+ DispParams.rgdispidNamedArgs = NULL;
+ DispParams.rgvarg = NULL;
+ }
+
+ // If we're calling on DISPID=-4, then pass both METHOD and PROPERTYGET
+ if (MemberID == DISPID_NEWENUM)
+ {
+ _ASSERTE((flags & DISPATCH_METHOD) && "Expected DISPATCH_METHOD to be set.");
+ flags |= DISPATCH_METHOD | DISPATCH_PROPERTYGET;
+ }
+
+ //
+ // Call invoke on the target's IDispatch.
+ //
+
+ if (!bIgnoreReturn)
+ pVarResult = &VarResult;
+
+ DoIUInvokeDispMethod(pDispEx, pDisp, MemberID, lcid, flags, &DispParams, pVarResult);
+
+
+ //
+ // Return value handling and cleanup.
+ //
+
+ // Back propagate any byref args.
+ if (bSomeArgsAreByref)
+ {
+ OBJECTREF TmpObj = NULL;
+ GCPROTECT_BEGIN(TmpObj)
+ {
+ for (i = 0; i < cArgs; i++)
+ {
+ if (aByrefArgInfos[i].m_bByref)
+ {
+ // Convert the variant back to an object.
+ OleVariant::MarshalObjectForOleVariant(&aByrefArgInfos[i].m_Val, &TmpObj);
+ (*pArrArgs)->SetAt(i, TmpObj);
+ }
+ }
+ }
+ GCPROTECT_END();
+ }
+
+ if (!bIgnoreReturn)
+ {
+ if (MemberID == DISPID_NEWENUM)
+ {
+ //
+ // Use a custom marshaler to convert the IEnumVARIANT to an IEnumerator.
+ //
+
+ // Start by making sure that the variant we got back contains an IP.
+ if ((VarResult.vt != VT_UNKNOWN) || !VarResult.punkVal)
+ COMPlusThrow(kInvalidCastException, IDS_EE_INVOKE_NEW_ENUM_INVALID_RETURN);
+
+ // Have the custom marshaler do the conversion.
+ *pRetVal = ConvertEnumVariantToMngEnum((IEnumVARIANT *)VarResult.punkVal);
+ }
+ else
+ {
+ // Convert the return variant to a COR variant.
+ OleVariant::MarshalObjectForOleVariant(&VarResult, pRetVal);
+ }
+ }
+}
+
+#if defined(FEATURE_COMINTEROP_UNMANAGED_ACTIVATION) && defined(FEATURE_CLASSIC_COMINTEROP)
+//-------------------------------------------------------------
+// returns a ComClass reflect class that wraps the IClassFactory
+void GetComClassFromProgID(STRINGREF srefProgID, STRINGREF srefServer, OBJECTREF *pRef)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(srefProgID != NULL);
+ PRECONDITION(pRef != NULL);
+ }
+ CONTRACTL_END;
+
+ NewArrayHolder<WCHAR> wszProgID;
+ NewArrayHolder<WCHAR> wszServer;
+ HRESULT hr = S_OK;
+ MethodTable* pMT = NULL;
+ CLSID clsid = {0};
+ BOOL bServerIsLocal = (srefServer == NULL);
+
+ //
+ // Allocate strings for the ProgID and the server.
+ //
+
+ int len = srefProgID->GetStringLength();
+
+ wszProgID = new WCHAR[len+1];
+
+ if (len)
+ memcpy(wszProgID, srefProgID->GetBuffer(), (len*2));
+ wszProgID[len] = W('\0');
+
+ if (srefServer != NULL)
+ {
+ len = srefServer->GetStringLength();
+
+ wszServer = new WCHAR[len+1];
+
+ if (len)
+ memcpy(wszServer, srefServer->GetBuffer(), (len*2));
+ wszServer[len] = W('\0');
+ }
+
+
+ //
+ // Call GetCLSIDFromProgID() to convert the ProgID to a CLSID.
+ //
+
+ EnsureComStarted();
+
+ {
+ GCX_PREEMP();
+ hr = GetCLSIDFromProgID(wszProgID, &clsid);
+ }
+
+ if (FAILED(hr))
+ COMPlusThrowHR(hr);
+
+ //
+ // If no server name has been specified, see if we can find the well known
+ // managed class for this CLSID.
+ //
+
+ if (bServerIsLocal)
+ {
+ BOOL fAssemblyInReg = FALSE;
+ // @TODO(DM): Do we really need to be this forgiving ? We should
+ // look into letting the type load exceptions percolate
+ // up to the user instead of swallowing them and using __ComObject.
+ EX_TRY
+ {
+ pMT = GetTypeForCLSID(clsid, &fAssemblyInReg);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(RethrowTerminalExceptions)
+ }
+
+ if (pMT != NULL)
+ {
+ //
+ // There is a managed class for this ProgID.
+ //
+
+ *pRef = pMT->GetManagedClassObject();
+ }
+ else
+ {
+ // Check if we have in the hash.
+ OBJECTHANDLE hRef;
+ ClassFactoryInfo ClassFactInfo;
+ ClassFactInfo.m_clsid = clsid;
+ ClassFactInfo.m_strServerName = wszServer;
+ EEClassFactoryInfoHashTable *pClassFactHash = GetAppDomain()->GetClassFactHash();
+
+ if (pClassFactHash->GetValue(&ClassFactInfo, (HashDatum *)&hRef))
+ {
+ *pRef = ObjectFromHandle(hRef);
+ }
+ else
+ {
+ GetComClassHelper(pRef, pClassFactHash, &ClassFactInfo, wszProgID);
+ }
+ }
+
+ // If we made it this far *pRef better be set.
+ _ASSERTE(*pRef != NULL);
+}
+
+//-------------------------------------------------------------
+// returns a ComClass reflect class that wraps the IClassFactory
+void GetComClassFromCLSID(REFCLSID clsid, STRINGREF srefServer, OBJECTREF *pRef)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(pRef != NULL);
+ }
+ CONTRACTL_END;
+
+ NewArrayHolder<WCHAR> wszServer;
+ HRESULT hr = S_OK;
+ MethodTable* pMT = NULL;
+ BOOL bServerIsLocal = (srefServer == NULL);
+
+ //
+ // Allocate strings for the server.
+ //
+
+ if (srefServer != NULL)
+ {
+ int len = srefServer->GetStringLength();
+
+ wszServer = new WCHAR[len+1];
+
+ if (len)
+ memcpy(wszServer, srefServer->GetBuffer(), (len*2));
+
+ wszServer[len] = W('\0');
+ }
+
+
+ //
+ // If no server name has been specified, see if we can find the well known
+ // managed class for this CLSID.
+ //
+
+ if (bServerIsLocal)
+ {
+ // @TODO(DM): Do we really need to be this forgiving ? We should
+ // look into letting the type load exceptions percolate
+ // up to the user instead of swallowing them and using __ComObject.
+ EX_TRY
+ {
+ pMT = GetTypeForCLSID(clsid);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(RethrowTerminalExceptions)
+ }
+
+ if (pMT != NULL)
+ {
+ //
+ // There is a managed class for this CLSID.
+ //
+
+ *pRef = pMT->GetManagedClassObject();
+ }
+ else
+ {
+ // Check if we have in the hash.
+ OBJECTHANDLE hRef;
+ ClassFactoryInfo ClassFactInfo;
+ ClassFactInfo.m_clsid = clsid;
+ ClassFactInfo.m_strServerName = wszServer;
+ EEClassFactoryInfoHashTable *pClassFactHash = GetAppDomain()->GetClassFactHash();
+
+ if (pClassFactHash->GetValue(&ClassFactInfo, (HashDatum*) &hRef))
+ {
+ *pRef = ObjectFromHandle(hRef);
+ }
+ else
+ {
+ GetComClassHelper(pRef, pClassFactHash, &ClassFactInfo, NULL);
+ }
+ }
+
+ // If we made it this far *pRef better be set.
+ _ASSERTE(*pRef != NULL);
+}
+
+void GetComClassHelper(OBJECTREF *pRef, EEClassFactoryInfoHashTable *pClassFactHash, ClassFactoryInfo *pClassFactInfo, __in_opt __in_z WCHAR *wszProgID)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(ThrowOutOfMemory());
+ PRECONDITION(CheckPointer(pRef));
+ PRECONDITION(CheckPointer(pClassFactHash));
+ PRECONDITION(CheckPointer(pClassFactInfo));
+ PRECONDITION(CheckPointer(wszProgID, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ OBJECTHANDLE hRef;
+ AppDomain *pDomain = GetAppDomain();
+
+ CrstHolder ch(pDomain->GetRefClassFactCrst());
+
+ // Check again.
+ if (pClassFactHash->GetValue(pClassFactInfo, (HashDatum *)&hRef))
+ {
+ *pRef = ObjectFromHandle(hRef);
+ }
+ else
+ {
+ //
+ // There is no managed class for this CLSID
+ // so we will create a ComClassFactory to
+ // represent it.
+ //
+
+ NewHolder<ComClassFactory> pComClsFac = ComClassFactoryCreator::Create(pClassFactInfo->m_clsid);
+ pComClsFac->SetManagedVersion();
+
+ NewArrayHolder<WCHAR> wszRefProgID = NULL;
+ if (wszProgID)
+ {
+ size_t len = wcslen(wszProgID)+1;
+ wszRefProgID = new WCHAR[len];
+ wcscpy_s(wszRefProgID, len, wszProgID);
+ }
+
+ NewArrayHolder<WCHAR> wszRefServer = NULL;
+ if (pClassFactInfo->m_strServerName)
+ {
+ size_t len = wcslen(pClassFactInfo->m_strServerName)+1;
+ wszRefServer = new WCHAR[len];
+ wcscpy_s(wszRefServer, len, pClassFactInfo->m_strServerName);
+ }
+
+ pComClsFac->Init(wszRefProgID, wszRefServer, NULL);
+ AllocateComClassObject(pComClsFac, pRef);
+
+ // Insert to hash.
+ hRef = pDomain->CreateHandle(*pRef);
+ pClassFactHash->InsertValue(pClassFactInfo, (LPVOID)hRef);
+
+ // Make sure the hash code is working.
+ _ASSERTE (pClassFactHash->GetValue(pClassFactInfo, (HashDatum *)&hRef));
+
+ wszRefProgID.SuppressRelease();
+ wszRefServer.SuppressRelease();
+ pComClsFac.SuppressRelease();
+ }
+}
+
+#endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION && FEATURE_CLASSIC_COMINTEROP
+
+#endif //#ifndef CROSSGEN_COMPILE
+
+
+#ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+//-------------------------------------------------------------
+// check if a ComClassFactory has been setup for this class
+// if not set one up
+ClassFactoryBase *GetComClassFactory(MethodTable* pClassMT)
+{
+ CONTRACT (ClassFactoryBase*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(ThrowOutOfMemory());
+ PRECONDITION(CheckPointer(pClassMT));
+ PRECONDITION(pClassMT->IsComObjectType() || pClassMT->IsExportedToWinRT());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ if (!pClassMT->IsExportedToWinRT())
+ {
+ // Work our way up the hierachy until we find the first COM import type.
+ while (!pClassMT->IsComImport())
+ {
+ pClassMT = pClassMT->GetParentMethodTable();
+ _ASSERTE(pClassMT != NULL);
+ _ASSERTE(pClassMT->IsComObjectType());
+ }
+ }
+
+ // check if com data has been setup for this class
+ ClassFactoryBase *pClsFac = pClassMT->GetComClassFactory();
+
+ if (pClsFac == NULL)
+ {
+ //
+ // Collectible types do not support com interop
+ //
+ if (pClassMT->Collectible())
+ {
+ COMPlusThrow(kNotSupportedException, W("NotSupported_CollectibleCOM"));
+ }
+
+ NewHolder<ClassFactoryBase> pNewFactory;
+
+ if (pClassMT->IsExportedToWinRT())
+ {
+ WinRTManagedClassFactory *pWinRTMngClsFac = new WinRTManagedClassFactory(pClassMT);
+ pNewFactory = pWinRTMngClsFac;
+
+ pWinRTMngClsFac->Init();
+ }
+ else if (pClassMT->IsProjectedFromWinRT())
+ {
+ WinRTClassFactory *pWinRTClsFac = new WinRTClassFactory(pClassMT);
+ pNewFactory = pWinRTClsFac;
+
+ pWinRTClsFac->Init();
+ }
+ else
+ {
+#ifdef FEATURE_WINDOWSPHONE
+ //
+ // On the phone, anyone can activate WinRT objects, but only platform code can do legacy COM interop.
+ // (Hosts can override this.)
+ //
+ if (!pClassMT->GetModule()->GetFile()->GetAssembly()->IsProfileAssembly() &&
+ !GetAppDomain()->EnablePInvokeAndClassicComInterop())
+ {
+ COMPlusThrow(kNotSupportedException, W("NotSupported_UserCOM"));
+ }
+#endif //FEATURE_WINDOWSPHONE
+
+ GUID guid;
+ pClassMT->GetGuid(&guid, TRUE);
+
+ ComClassFactory *pComClsFac = ComClassFactoryCreator::Create(guid);
+
+ pNewFactory = pComClsFac;
+
+ pComClsFac->Init(NULL, NULL, pClassMT);
+ }
+
+ // store the class factory in EE Class
+ if (!pClassMT->SetComClassFactory(pNewFactory))
+ {
+ // another thread beat us to it
+ pNewFactory = pClassMT->GetComClassFactory();
+ }
+
+ pClsFac = pNewFactory.Extract();
+ }
+
+ RETURN pClsFac;
+}
+#endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+
+
+//-------------------------------------------------------------------
+// BOOL InitializeComInterop()
+// Called from EEStartup, to initialize com Interop specific data
+// structures.
+//-------------------------------------------------------------------
+void InitializeComInterop()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ InitializeSListHead(&RCW::s_RCWStandbyList);
+ ComCall::Init();
+#ifdef _TARGET_X86_
+ ComPlusCall::Init();
+#endif
+#ifndef CROSSGEN_COMPILE
+ CtxEntryCache::Init();
+ ComCallWrapperTemplate::Init();
+#ifdef _DEBUG
+ IntializeInteropLogging();
+#endif //_DEBUG
+#endif //CROSSGEN_COMPILE
+}
+
+// Try to load a WinRT type.
+TypeHandle GetWinRTType(SString* ssTypeName, BOOL bThrowIfNotFound)
+{
+ CONTRACT (TypeHandle)
+ {
+ MODE_ANY;
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACT_END;
+
+ TypeHandle typeHandle;
+
+ SString ssAssemblyName(SString::Utf8Literal, "WindowsRuntimeAssemblyName, ContentType=WindowsRuntime");
+ DomainAssembly *pAssembly = LoadDomainAssembly(&ssAssemblyName, NULL,
+#ifdef FEATURE_HOSTED_BINDER
+ NULL,
+#endif
+ bThrowIfNotFound, FALSE, ssTypeName);
+ if (pAssembly != NULL)
+ {
+ typeHandle = TypeName::GetTypeFromAssembly(*ssTypeName, pAssembly->GetAssembly(), bThrowIfNotFound);
+ }
+
+ RETURN typeHandle;
+}
+
+// Makes a IRoSimpleMetaDataBuilder callback for a runtime class.
+// static
+HRESULT WinRTGuidGenerator::MetaDataLocator::LocateTypeWithDefaultInterface(MethodTable *pMT, LPCWSTR pszName, IRoSimpleMetaDataBuilder &metaDataDestination)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(!pMT->IsInterface());
+ PRECONDITION(CheckPointer(pszName));
+ }
+ CONTRACTL_END;
+
+ MethodTable *pDefItfMT = pMT->GetDefaultWinRTInterface();
+ if (pDefItfMT == NULL)
+ {
+ StackSString ss;
+ TypeString::AppendType(ss, TypeHandle(pMT));
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, IDS_EE_WINRT_IID_NODEFAULTINTERFACE, ss.GetUnicode());
+ }
+
+ DefineFullyQualifiedNameForClassW();
+
+ GUID iid;
+ pDefItfMT->GetGuid(&iid, FALSE);
+
+ if (pDefItfMT->HasInstantiation())
+ {
+ SArray<BYTE> namesBuf;
+ PCWSTR *pNamePointers;
+ COUNT_T cNames;
+ PopulateNames(pDefItfMT, namesBuf, pNamePointers, cNames);
+
+ // runtime class with generic default interface
+ return metaDataDestination.SetRuntimeClassParameterizedDefault(
+ pszName,
+ cNames,
+ pNamePointers);
+ }
+ else
+ {
+ LPCWSTR pszDefItfName = GetFullyQualifiedNameForClassW_WinRT(pDefItfMT);
+
+ // runtime class with non-generic default interface
+ return metaDataDestination.SetRuntimeClassSimpleDefault(
+ pszName,
+ pszDefItfName,
+ &iid);
+ }
+}
+
+// Makes a IRoSimpleMetaDataBuilder callback for a structure.
+// static
+HRESULT WinRTGuidGenerator::MetaDataLocator::LocateStructure(MethodTable *pMT, LPCWSTR pszName, IRoSimpleMetaDataBuilder &metaDataDestination)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(CheckPointer(pszName));
+ }
+ CONTRACTL_END;
+
+ SArray<BYTE> namesBuf;
+ COUNT_T cNames = 0;
+
+ ApproxFieldDescIterator fieldIterator(pMT, ApproxFieldDescIterator::INSTANCE_FIELDS);
+ for (FieldDesc *pFD = fieldIterator.Next(); pFD != NULL; pFD = fieldIterator.Next())
+ {
+ TypeHandle th = pFD->GetApproxFieldTypeHandleThrowing();
+ if (th.IsTypeDesc())
+ {
+ // WinRT structures should not have TypeDesc fields
+ IfFailThrowBF(E_FAIL, BFA_BAD_SIGNATURE, pMT->GetModule());
+ }
+
+ PopulateNamesAppendTypeName(th.AsMethodTable(), namesBuf, cNames);
+ }
+
+ PCWSTR *pNamePointers;
+ PopulateNamesAppendNamePointers(pMT, namesBuf, pNamePointers, cNames);
+
+ return metaDataDestination.SetStruct(
+ pszName,
+ cNames,
+ pNamePointers);
+}
+
+
+//
+// Tables of information about the redirected types used to setup GUID marshaling
+//
+
+struct RedirectedEnumInfo
+{
+ LPCWSTR wszBackingField;
+};
+
+#define DEFINE_PROJECTED_TYPE(szWinRTNS, szWinRTName, szClrNS, szClrName, nClrAsmIdx, nContractAsmIdx, nWinRTIndex, nClrIndex, nWinMDTypeKind) \
+ { nullptr },
+#define DEFINE_PROJECTED_ENUM(szWinRTNamespace, szWinRTName, szClrNamespace, szClrName, nClrAssemblyIndex, nContractAsmIdx, WinRTRedirectedTypeIndex, ClrRedirectedTypeIndex, szBackingFieldSize) \
+ { L ## szBackingFieldSize },
+
+static const RedirectedEnumInfo g_redirectedEnumInfo[WinMDAdapter::RedirectedTypeIndex_Count] =
+{
+#include "winrtprojectedtypes.h"
+};
+
+#undef DEFINE_PROJECTED_TYPE
+#undef DEFINE_PROJECTED_ENUM
+
+struct RedirectedPInterfaceInfo
+{
+ DWORD cGenericParameters;
+ const GUID IID;
+};
+
+#define DEFINE_PROJECTED_TYPE(szWinRTNS, szWinRTName, szClrNS, szClrName, nClrAsmIdx, nContractAsmIdx, nWinRTIndex, nClrIndex, nWinMDTypeKind) \
+ { 0, { 0 } },
+#define DEFINE_PROJECTED_INTERFACE(szWinRTNamespace, szWinRTName, szClrNamespace, szClrName, nClrAssemblyIndex, nContractAsmIdx, WinRTRedirectedTypeIndex, ClrRedirectedTypeIndex, PIID) \
+ { 0, PIID },
+#define DEFINE_PROJECTED_PINTERFACE(szWinRTNamespace, szWinRTName, szClrNamespace, szClrName, nClrAssemblyIndex, nContractAsmIdx, WinRTRedirectedTypeIndex, ClrRedirectedTypeIndex, GenericTypeParameterCount, PIID) \
+ { GenericTypeParameterCount, PIID },
+
+static const RedirectedPInterfaceInfo g_redirectedPInterfaceInfo[] =
+{
+#include "winrtprojectedtypes.h"
+};
+
+#undef DEFINE_PROJECTED_TYPE
+#undef DEFINE_PROJECTED_INTERFACE
+#undef DEFINE_PROJECTED_PINTERFACE
+
+struct RedirectedPDelegateInfo
+{
+ const DWORD cGenericParameters;
+ const GUID IID;
+};
+
+#define DEFINE_PROJECTED_TYPE(szWinRTNS, szWinRTName, szClrNS, szClrName, nClrAsmIdx, nContractAsmIdx, nWinRTIndex, nClrIndex, nWinMDTypeKind) \
+ { 0, { 0 } },
+#define DEFINE_PROJECTED_DELEGATE(szWinRTNamespace, szWinRTName, szClrNamespace, szClrName, nClrAssemblyIndex, nContractAsmIdx, WinRTRedirectedTypeIndex, ClrRedirectedTypeIndex, PIID) \
+ { 0, PIID },
+#define DEFINE_PROJECTED_PDELEGATE(szWinRTNamespace, szWinRTName, szClrNamespace, szClrName, nClrAssemblyIndex, nContractAsmIdx, WinRTRedirectedTypeIndex, ClrRedirectedTypeIndex, GenericTypeParameterCount, PIID) \
+ { GenericTypeParameterCount, PIID },
+
+static const RedirectedPDelegateInfo g_redirectedPDelegateInfo[] =
+{
+#include "winrtprojectedtypes.h"
+};
+
+#undef DEFINE_PROJECTED_TYPE
+#undef DEFINE_PROJECTED_DELEGATE
+#undef DEFINE_PROJECTED_PDELEGATE
+
+struct RedirectedRuntimeclassInfo
+{
+ LPCWSTR wszDefaultIntefaceName;
+ const GUID IID;
+};
+
+#define DEFINE_PROJECTED_TYPE(szWinRTNS, szWinRTName, szClrNS, szClrName, nClrAsmIdx, nContractAsmIdx, nWinRTIndex, nClrIndex, nWinMDTypeKind) \
+ { nullptr, { 0 } },
+#define DEFINE_PROJECTED_RUNTIMECLASS(szWinRTNamespace, szWinRTName, szClrNamespace, szClrName, nClrAssemblyIndex, nContractAsmIdx, WinRTRedirectedTypeIndex, ClrRedirectedTypeIndex, szDefaultInterfaceName, DefaultInterfaceIID) \
+ { L ## szDefaultInterfaceName, DefaultInterfaceIID },
+
+static RedirectedRuntimeclassInfo const g_redirectedRuntimeclassInfo[] =
+{
+#include "winrtprojectedtypes.h"
+};
+
+#undef DEFINE_PROJECTED_TYPE
+#undef DEFINE_PROJECTED_RUNTIMECLASS
+
+struct RedirectedStructInfo
+{
+ const DWORD cFields;
+ const LPCWSTR *pwzFields;
+};
+
+#define DEFINE_PROJECTED_TYPE(szWinRTNS, szWinRTName, szClrNS, szClrName, nClrAsmIdx, nContractAsmIdx, nWinRTIndex, nClrIndex, nWinMDTypeKind)
+#define DEFINE_PROJECTED_STRUCT(szWinRTNamespace, szWinRTName, szClrNamespace, szClrName, nClrAssemblyIndex, nContractAsmIdx, WinRTRedirectedTypeIndex, ClrRedirectedTypeIndex, FieldSizes) \
+static const LPCWSTR g_ ## WinRTRedirectedTypeIndex ## _Fields[] = { FieldSizes };
+#define DEFINE_PROJECTED_JUPITER_STRUCT(szWinRTNamespace, szWinRTName, szClrNamespace, szClrName, nClrAssemblyIndex, nContractAsmIdx, WinRTRedirectedTypeIndex, ClrRedirectedTypeIndex, FieldSizes) \
+static const LPCWSTR g_ ## WinRTRedirectedTypeIndex ## _Fields[] = { FieldSizes };
+#include "winrtprojectedtypes.h"
+#undef DEFINE_PROJECTED_TYPE
+#undef DEFINE_PROJECTED_STRUCT
+#undef DEFINE_PROJECTED_JUPITER_STRUCT
+
+#define DEFINE_PROJECTED_TYPE(szWinRTNS, szWinRTName, szClrNS, szClrName, nClrAsmIdx, nContractAsmIdx, nWinRTIndex, nClrIndex, nWinMDTypeKind) \
+ { 0, nullptr },
+#define DEFINE_PROJECTED_STRUCT(szWinRTNamespace, szWinRTName, szClrNamespace, szClrName, nClrAssemblyIndex, nContractAsmIdx, WinRTRedirectedTypeIndex, ClrRedirectedTypeIndex, FieldSizes) \
+ { COUNTOF(g_ ## WinRTRedirectedTypeIndex ## _Fields), g_ ## WinRTRedirectedTypeIndex ## _Fields },
+#define DEFINE_PROJECTED_JUPITER_STRUCT(szWinRTNamespace, szWinRTName, szClrNamespace, szClrName, nClrAssemblyIndex, nContractAsmIdx, WinRTRedirectedTypeIndex, ClrRedirectedTypeIndex, FieldSizes) \
+ { COUNTOF(g_ ## WinRTRedirectedTypeIndex ## _Fields), g_ ## WinRTRedirectedTypeIndex ## _Fields },
+
+static const RedirectedStructInfo g_redirectedStructInfo[WinMDAdapter::RedirectedTypeIndex_Count] =
+{
+#include "winrtprojectedtypes.h"
+};
+
+#undef DEFINE_PROJECTED_TYPE
+#undef DEFINE_PROJECTED_STRUCT
+#undef DEFINE_PROJECTED_JUPITER_STRUCT
+
+// Makes a IRoSimpleMetaDataBuilder callback for a redirected type or returns S_FALSE.
+// static
+HRESULT WinRTGuidGenerator::MetaDataLocator::LocateRedirectedType(
+ MethodTable * pMT,
+ IRoSimpleMetaDataBuilder & metaDataDestination)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ WinMDAdapter::RedirectedTypeIndex nRedirectedTypeIndex;
+ if (!WinRTTypeNameConverter::ResolveRedirectedType(pMT, &nRedirectedTypeIndex))
+ {
+ // this is not a redirected type
+ return S_FALSE;
+ }
+
+ WinMDAdapter::WinMDTypeKind typeKind;
+ WinMDAdapter::GetRedirectedTypeInfo(nRedirectedTypeIndex, nullptr, nullptr, nullptr, nullptr, nullptr, &typeKind);
+ switch (typeKind)
+ {
+ case WinMDAdapter::WinMDTypeKind_Attribute:
+ {
+ // not a runtime class -> throw
+ StackSString ss;
+ TypeString::AppendType(ss, TypeHandle(pMT));
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, IDS_EE_WINRT_IID_NODEFAULTINTERFACE, ss.GetUnicode());
+ }
+
+ case WinMDAdapter::WinMDTypeKind_Enum:
+ return metaDataDestination.SetEnum(WinMDAdapter::GetRedirectedTypeFullWinRTName(nRedirectedTypeIndex),
+ g_redirectedEnumInfo[nRedirectedTypeIndex].wszBackingField);
+
+ case WinMDAdapter::WinMDTypeKind_Delegate:
+ return metaDataDestination.SetDelegate(g_redirectedPDelegateInfo[nRedirectedTypeIndex].IID);
+
+ case WinMDAdapter::WinMDTypeKind_PDelegate:
+ return metaDataDestination.SetParameterizedDelegate(g_redirectedPDelegateInfo[nRedirectedTypeIndex].IID,
+ g_redirectedPDelegateInfo[nRedirectedTypeIndex].cGenericParameters);
+
+ case WinMDAdapter::WinMDTypeKind_Interface:
+ return metaDataDestination.SetWinRtInterface(g_redirectedPInterfaceInfo[nRedirectedTypeIndex].IID);
+
+ case WinMDAdapter::WinMDTypeKind_PInterface:
+ return metaDataDestination.SetParameterizedInterface(g_redirectedPInterfaceInfo[nRedirectedTypeIndex].IID,
+ g_redirectedPInterfaceInfo[nRedirectedTypeIndex].cGenericParameters);
+
+ case WinMDAdapter::WinMDTypeKind_Runtimeclass:
+ return metaDataDestination.SetRuntimeClassSimpleDefault(WinMDAdapter::GetRedirectedTypeFullWinRTName(nRedirectedTypeIndex),
+ g_redirectedRuntimeclassInfo[nRedirectedTypeIndex].wszDefaultIntefaceName,
+ &g_redirectedRuntimeclassInfo[nRedirectedTypeIndex].IID);
+
+ case WinMDAdapter::WinMDTypeKind_Struct:
+ return metaDataDestination.SetStruct(WinMDAdapter::GetRedirectedTypeFullWinRTName(nRedirectedTypeIndex),
+ g_redirectedStructInfo[nRedirectedTypeIndex].cFields,
+ g_redirectedStructInfo[nRedirectedTypeIndex].pwzFields);
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+HRESULT STDMETHODCALLTYPE WinRTGuidGenerator::MetaDataLocator::Locate(PCWSTR nameElement, IRoSimpleMetaDataBuilder &metaDataDestination) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(nameElement));
+ }
+ CONTRACTL_END;
+
+ // All names that we feed to RoGetParameterizedTypeInstanceIID begin with 'T'
+ // which is followed by the MethodTable address printed as a pointer (in hex).
+ MethodTable *pMT;
+ if (swscanf_s(nameElement, W("T%p"), (LPVOID *)&pMT) != 1)
+ {
+ // Except that it could be a field from a redirected structure
+ if (wcscmp(nameElement, W("Windows.Foundation.TimeSpan")) == 0)
+ {
+ LPCWSTR pwszFields[] = { W("Int64") };
+ return metaDataDestination.SetStruct(nameElement, COUNTOF(pwszFields), pwszFields);
+ }
+ else if (wcscmp(nameElement, W("Windows.UI.Xaml.DurationType")) == 0)
+ {
+ return metaDataDestination.SetEnum(nameElement, W("Int32"));
+ }
+ else if (wcscmp(nameElement, W("Windows.UI.Xaml.GridUnitType")) == 0)
+ {
+ return metaDataDestination.SetEnum(nameElement, W("Int32"));
+ }
+ else if (wcscmp(nameElement, W("Windows.UI.Xaml.Interop.TypeKind")) == 0)
+ {
+ return metaDataDestination.SetEnum(nameElement, W("Int32"));
+ }
+ else if (wcscmp(nameElement, W("Windows.UI.Xaml.Media.Animation.RepeatBehaviorType")) == 0)
+ {
+ return metaDataDestination.SetEnum(nameElement, W("Int32"));
+ }
+ else if (wcscmp(nameElement, W("Windows.Foundation.Numerics.Vector3")) == 0)
+ {
+ LPCWSTR pwszFields[] = { W("Single"), W("Single"), W("Single") };
+ return metaDataDestination.SetStruct(nameElement, COUNTOF(pwszFields), pwszFields);
+ }
+
+ return E_FAIL;
+ }
+
+ // do a check for a redirected type first
+ HRESULT hr = LocateRedirectedType(pMT, metaDataDestination);
+ if (hr == S_OK || FAILED(hr))
+ {
+ // already handled by LocateRedirectedType
+ return hr;
+ }
+
+ GUID iid;
+ DefineFullyQualifiedNameForClassW();
+
+ if (pMT->IsValueType())
+ {
+ if (pMT->IsEnum())
+ {
+ // enum
+ StackSString ssBaseType;
+ VERIFY(WinRTTypeNameConverter::AppendWinRTNameForPrimitiveType(MscorlibBinder::GetElementType(pMT->GetInternalCorElementType()), ssBaseType));
+
+ return metaDataDestination.SetEnum(
+ GetFullyQualifiedNameForClassW_WinRT(pMT),
+ ssBaseType.GetUnicode());
+ }
+ else
+ {
+ // struct
+ return LocateStructure(
+ pMT,
+ GetFullyQualifiedNameForClassW_WinRT(pMT),
+ metaDataDestination);
+ }
+ }
+ else
+ {
+ if (pMT->IsInterface())
+ {
+ pMT->GetGuid(&iid, FALSE);
+ if (pMT->HasInstantiation())
+ {
+ // generic interface
+ return metaDataDestination.SetParameterizedInterface(
+ iid,
+ pMT->GetNumGenericArgs());
+ }
+ else
+ {
+ // non-generic interface
+ return metaDataDestination.SetWinRtInterface(iid);
+ }
+ }
+ else
+ {
+ if (pMT->IsDelegate())
+ {
+ pMT->GetGuid(&iid, FALSE);
+ if (pMT->HasInstantiation())
+ {
+ // generic delegate
+ return metaDataDestination.SetParameterizedDelegate(
+ iid,
+ pMT->GetNumGenericArgs());
+ }
+ else
+ {
+ // non-generic delegate
+ return metaDataDestination.SetDelegate(iid);
+ }
+ }
+ else
+ {
+ // runtime class
+ return LocateTypeWithDefaultInterface(
+ pMT,
+ GetFullyQualifiedNameForClassW_WinRT(pMT),
+ metaDataDestination);
+ }
+ }
+ }
+}
+
+
+void WinRTGuidGenerator::PopulateNames(MethodTable *pMT, SArray<BYTE> &namesBuf, PCWSTR* &pszNames, COUNT_T &cNames)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ cNames = 0;
+
+ // Fill namesBuf with a pile of strings.
+ PopulateNamesAppendTypeName(pMT, namesBuf, cNames);
+ PopulateNamesAppendNamePointers(pMT, namesBuf, pszNames, cNames);
+}
+
+void WinRTGuidGenerator::PopulateNamesAppendNamePointers(MethodTable *pMT, SArray<BYTE> &namesBuf, PCWSTR* &pszNames, COUNT_T cNames)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ // Get pointers to internal strings
+ COUNT_T cbNamesOld = (COUNT_T)ALIGN_UP(namesBuf.GetCount(), sizeof(PWSTR)); // End of strings is not necessarily pointer aligned, align so that the follow on pointers are aligned.
+ COUNT_T cbNamePointers = cNames * sizeof(PWSTR); // How much space do we need for the pointers to the names?
+ COUNT_T cbNamesNew = cbNamesOld + cbNamePointers; // Total space.
+
+ BYTE *pBuffer = namesBuf.OpenRawBuffer(cbNamesNew);
+
+ // Scan through strings, and build list of pointers to them. This assumes that the strings are seperated by a single null character
+ PWSTR pszName = (PWSTR)pBuffer;
+ pszNames = (PCWSTR*)(pBuffer + cbNamesOld);
+ for (COUNT_T i = 0, P; i < cNames; i++)
+ {
+ pszNames[i] = pszName;
+ pszName += wcslen(pszName) + 1;
+ }
+
+ namesBuf.CloseRawBuffer(cbNamesNew);
+}
+
+void WinRTGuidGenerator::PopulateNamesAppendTypeName(MethodTable *pMT, SArray<BYTE> &namesBuf, COUNT_T &cNames)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ SmallStackSString name;
+
+#ifdef _DEBUG
+ pMT->CheckLoadLevel(CLASS_LOAD_EXACTPARENTS);
+#endif // _DEBUG
+
+ if (!WinRTTypeNameConverter::AppendWinRTNameForPrimitiveType(pMT, name))
+ {
+ if (pMT->HasInstantiation())
+ {
+ // get the typical instantiation
+ TypeHandle typicalInst = ClassLoader::LoadTypeDefThrowing(pMT->GetModule(),
+ pMT->GetCl(),
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef
+ , tdNoTypes
+ , CLASS_LOAD_EXACTPARENTS
+ );
+
+ name.Printf(W("T%p"), typicalInst.AsPtr());
+ }
+ else
+ {
+ name.Printf(W("T%p"), (void *)pMT);
+ }
+ }
+
+ COUNT_T cbNamesOld = namesBuf.GetCount();
+ COUNT_T cbNewName = (COUNT_T)(name.GetCount() + 1) * 2;
+ COUNT_T cbNamesNew = cbNamesOld + cbNewName;
+ memcpy(namesBuf.OpenRawBuffer(cbNamesNew) + cbNamesOld, name.GetUnicode(), cbNewName);
+ namesBuf.CloseRawBuffer(cbNamesNew);
+ cNames++;
+
+ if (pMT->HasInstantiation())
+ {
+ Instantiation inst = pMT->GetInstantiation();
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ PopulateNamesAppendTypeName(inst[i].GetMethodTable(), namesBuf, cNames);
+ }
+ }
+}
+
+// We need to be able to compute IIDs of generic interfaces for WinRT interop even on Win7 when we NGEN.
+// Otherwise Framework assemblies that contain projected WinRT types would end up with different native
+// images depending on the OS they were compiled on.
+namespace ParamInstanceAPI_StaticallyLinked
+{
+ // make sure that paraminstanceapi.h can be dropped in without extensive modifications
+ namespace std
+ {
+ static const NoThrow nothrow = ::nothrow;
+ }
+
+#pragma warning(push)
+#pragma warning (disable: 4640)
+ #include "paraminstanceapi.h"
+#pragma warning(pop)
+}
+
+// Although the IRoMetaDataLocator and IRoSimpleMetaDataBuilder interfaces may currently be structurally
+// equivalent, use proper wrappers instead of dirty casts. This will trigger compile errors if the two
+// implementations diverge from each other in the future.
+class MetaDataLocatorWrapper : public ParamInstanceAPI_StaticallyLinked::IRoMetaDataLocator
+{
+ class SimpleMetaDataBuilderWrapper : public ::IRoSimpleMetaDataBuilder
+ {
+ ParamInstanceAPI_StaticallyLinked::IRoSimpleMetaDataBuilder &m_destination;
+
+ public:
+ SimpleMetaDataBuilderWrapper(ParamInstanceAPI_StaticallyLinked::IRoSimpleMetaDataBuilder &destination)
+ : m_destination(destination)
+ { }
+
+ STDMETHOD(SetWinRtInterface)(GUID iid)
+ { WRAPPER_NO_CONTRACT; return m_destination.SetWinRtInterface(iid); }
+
+ STDMETHOD(SetDelegate)(GUID iid)
+ { WRAPPER_NO_CONTRACT; return m_destination.SetDelegate(iid); }
+
+ STDMETHOD(SetInterfaceGroupSimpleDefault)(PCWSTR name, PCWSTR defaultInterfaceName, const GUID *defaultInterfaceIID)
+ { WRAPPER_NO_CONTRACT; return m_destination.SetInterfaceGroupSimpleDefault(name, defaultInterfaceName, defaultInterfaceIID); }
+
+ STDMETHOD(SetInterfaceGroupParameterizedDefault)(PCWSTR name, UINT32 elementCount, PCWSTR *defaultInterfaceNameElements)
+ { WRAPPER_NO_CONTRACT; return m_destination.SetInterfaceGroupParameterizedDefault(name, elementCount, defaultInterfaceNameElements); }
+
+ STDMETHOD(SetRuntimeClassSimpleDefault)(PCWSTR name, PCWSTR defaultInterfaceName, const GUID *defaultInterfaceIID)
+ { WRAPPER_NO_CONTRACT; return m_destination.SetRuntimeClassSimpleDefault(name, defaultInterfaceName, defaultInterfaceIID); }
+
+ STDMETHOD(SetRuntimeClassParameterizedDefault)(PCWSTR name, UINT32 elementCount, const PCWSTR *defaultInterfaceNameElements)
+ { WRAPPER_NO_CONTRACT; return m_destination.SetRuntimeClassParameterizedDefault(name, elementCount, const_cast<PCWSTR *>(defaultInterfaceNameElements)); }
+
+ STDMETHOD(SetStruct)(PCWSTR name, UINT32 numFields, const PCWSTR *fieldTypeNames)
+ { WRAPPER_NO_CONTRACT; return m_destination.SetStruct(name, numFields, const_cast<PCWSTR *>(fieldTypeNames)); }
+
+ STDMETHOD(SetEnum)(PCWSTR name, PCWSTR baseType)
+ { WRAPPER_NO_CONTRACT; return m_destination.SetEnum(name, baseType); }
+
+ STDMETHOD(SetParameterizedInterface)(GUID piid, UINT32 numArgs)
+ { WRAPPER_NO_CONTRACT; return m_destination.SetParameterizedInterface(piid, numArgs); }
+
+ STDMETHOD(SetParameterizedDelegate)(GUID piid, UINT32 numArgs)
+ { WRAPPER_NO_CONTRACT; return m_destination.SetParameterizedDelegate(piid, numArgs); }
+ };
+
+ ::IRoMetaDataLocator &m_locator;
+
+public:
+ MetaDataLocatorWrapper(::IRoMetaDataLocator &locator)
+ : m_locator(locator)
+ { }
+
+ STDMETHOD(Locate)(PCWSTR nameElement, ParamInstanceAPI_StaticallyLinked::IRoSimpleMetaDataBuilder &destination) const
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SimpleMetaDataBuilderWrapper destinationWrapper(destination);
+ return m_locator.Locate(nameElement, destinationWrapper);
+ }
+};
+
+
+//--------------------------------------------------------------------------
+// pGuid is filled with the constructed IID by the function.
+// static
+void WinRTGuidGenerator::ComputeGuidForGenericType(MethodTable *pMT, GUID *pGuid)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(pMT->SupportsGenericInterop(TypeHandle::Interop_NativeToManaged));
+ }
+ CONTRACTL_END;
+
+ // throw a nice exception if the instantiation is not WinRT-legal
+ if (!pMT->IsLegalNonArrayWinRTType())
+ {
+ StackSString ss;
+ TypeString::AppendType(ss, TypeHandle(pMT));
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, IDS_EE_WINRT_IID_ILLEGALTYPE, ss.GetUnicode());
+ }
+
+ // create an array of name elements describing the type
+ SArray<BYTE> namesBuf;
+ PCWSTR *pNamePointers;
+ COUNT_T cNames;
+ PopulateNames(pMT, namesBuf, pNamePointers, cNames);
+
+ // pass the array to the REX API
+ MetaDataLocator metadataLocator;
+#ifndef CROSSGEN_COMPILE
+ if (WinRTSupported())
+ {
+ IfFailThrow(RoGetParameterizedTypeInstanceIID(
+ cNames,
+ pNamePointers,
+ metadataLocator,
+ pGuid,
+ NULL));
+
+#ifdef _DEBUG
+ // assert that the two implementations computed the same Guid
+ GUID pGuidForAssert;
+
+ MetaDataLocatorWrapper metadataLocatorWrapper(metadataLocator);
+ IfFailThrow(ParamInstanceAPI_StaticallyLinked::RoGetParameterizedTypeInstanceIID(
+ cNames,
+ pNamePointers,
+ metadataLocatorWrapper,
+ &pGuidForAssert,
+ NULL));
+
+ _ASSERTE_MSG(*pGuid == pGuidForAssert, "Guid computed by Win8 API does not match the one computed by statically linked RoGetParameterizedTypeInstanceIID");
+#endif // _DEBUG
+ }
+ else
+#endif //#ifndef CROSSGEN_COMPILE
+ {
+ // we should not be calling this on downlevel outside of NGEN
+ _ASSERTE(GetAppDomain()->IsCompilationDomain());
+
+ MetaDataLocatorWrapper metadataLocatorWrapper(metadataLocator);
+ IfFailThrow(ParamInstanceAPI_StaticallyLinked::RoGetParameterizedTypeInstanceIID(
+ cNames,
+ pNamePointers,
+ metadataLocatorWrapper,
+ pGuid,
+ NULL));
+ }
+}
+
+// Returns MethodTable (typical instantiation) of the mscorlib copy of the specified redirected WinRT interface.
+MethodTable *WinRTInterfaceRedirector::GetWinRTTypeForRedirectedInterfaceIndex(WinMDAdapter::RedirectedTypeIndex index)
+{
+ CONTRACT(MethodTable *)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ BinderClassID id = s_rInterfaceStubInfos[GetStubInfoIndex(index)].m_WinRTInterface;
+
+ if ((id & NON_MSCORLIB_MARKER) == 0)
+ {
+ // the redirected interface lives in mscorlib
+ RETURN MscorlibBinder::GetClass(id);
+ }
+ else
+ {
+ // the redirected interface lives in some other Framework assembly
+ const NonMscorlibRedirectedInterfaceInfo *pInfo = &s_rNonMscorlibInterfaceInfos[id & ~NON_MSCORLIB_MARKER];
+ RETURN LoadTypeFromRedirectedAssembly(pInfo->m_AssemblyIndex, pInfo->m_wzWinRTInterfaceTypeName);
+ }
+}
+
+//
+MethodTable *WinRTInterfaceRedirector::LoadTypeFromRedirectedAssembly(WinMDAdapter::FrameworkAssemblyIndex index, LPCWSTR wzTypeName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Assembly *pAssembly;
+
+ if (!::GetAppDomain()->FindRedirectedAssemblyFromIndexIfLoaded(index, &pAssembly))
+ return NULL;
+
+ return TypeName::GetTypeFromAssembly(wzTypeName, pAssembly).GetMethodTable();
+}
+
+//
+MethodDesc *WinRTInterfaceRedirector::LoadMethodFromRedirectedAssembly(WinMDAdapter::FrameworkAssemblyIndex index, LPCWSTR wzTypeName, LPCUTF8 szMethodName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodTable *pMT = LoadTypeFromRedirectedAssembly(index, wzTypeName);
+ return MemberLoader::FindMethodByName(pMT, szMethodName);
+}
+
+#ifdef _DEBUG
+void WinRTInterfaceRedirector::VerifyRedirectedInterfaceStubs()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Verify signatures of all stub methods by calling GetStubMethodForRedirectedInterface with all valid
+ // combination of arguments.
+ for (int i = 0; i < WinMDAdapter::RedirectedTypeIndex_Count; i++)
+ {
+ if (i == WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IEnumerable ||
+ i == WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IList ||
+ i == WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IDictionary ||
+ i == WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IReadOnlyList ||
+ i == WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IReadOnlyDictionary ||
+ i == WinMDAdapter::RedirectedTypeIndex_System_IDisposable)
+ {
+ int stubInfoIndex = GetStubInfoIndex((WinMDAdapter::RedirectedTypeIndex)i);
+
+ // WinRT -> CLR
+ for (int slot = 0; slot < s_rInterfaceStubInfos[stubInfoIndex].m_iCLRMethodCount; slot++)
+ {
+ GetStubMethodForRedirectedInterface((WinMDAdapter::RedirectedTypeIndex)i, slot, TypeHandle::Interop_ManagedToNative, FALSE);
+ }
+ if (i == WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IList ||
+ i == WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IDictionary)
+ {
+ // WinRT -> CLR ICollection
+ for (int slot = 0; slot < s_rInterfaceStubInfos[stubInfoIndex + s_NumRedirectedInterfaces].m_iCLRMethodCount; slot++)
+ {
+ GetStubMethodForRedirectedInterface((WinMDAdapter::RedirectedTypeIndex)i, slot, TypeHandle::Interop_ManagedToNative, TRUE);
+ }
+ }
+
+ // CLR -> WinRT
+ for (int slot = 0; slot < s_rInterfaceStubInfos[stubInfoIndex].m_iWinRTMethodCount; slot++)
+ {
+ GetStubMethodForRedirectedInterface((WinMDAdapter::RedirectedTypeIndex)i, slot, TypeHandle::Interop_NativeToManaged, FALSE);
+ }
+ }
+ }
+}
+#endif // _DEBUG
+
+// Returns a MethodDesc to be used as an interop stub for the given redirected interface/slot/direction.
+MethodDesc *WinRTInterfaceRedirector::GetStubMethodForRedirectedInterface(WinMDAdapter::RedirectedTypeIndex interfaceIndex,
+ int slot,
+ TypeHandle::InteropKind interopKind,
+ BOOL fICollectionStub,
+ Instantiation methodInst /*= Instantiation()*/)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(!(fICollectionStub && interopKind == TypeHandle::Interop_NativeToManaged));
+ }
+ CONTRACTL_END;
+
+ int stubInfoIndex = GetStubInfoIndex(interfaceIndex);
+ _ASSERTE(stubInfoIndex < s_NumRedirectedInterfaces);
+ _ASSERTE(stubInfoIndex < _countof(s_rInterfaceStubInfos));
+
+ const RedirectedInterfaceStubInfo *pStubInfo;
+ pStubInfo = &s_rInterfaceStubInfos[fICollectionStub ? stubInfoIndex + s_NumRedirectedInterfaces : stubInfoIndex];
+
+ BinderMethodID method;
+ if (interopKind == TypeHandle::Interop_NativeToManaged)
+ {
+ _ASSERTE(slot < pStubInfo->m_iWinRTMethodCount);
+ method = pStubInfo->m_rWinRTStubMethods[slot];
+ }
+ else
+ {
+ _ASSERTE(slot < pStubInfo->m_iCLRMethodCount);
+ method = pStubInfo->m_rCLRStubMethods[slot];
+ }
+
+ MethodDesc *pMD;
+ if ((pStubInfo->m_WinRTInterface & NON_MSCORLIB_MARKER) == 0)
+ {
+ if (!methodInst.IsEmpty() &&
+ (method == METHOD__ITERABLE_TO_ENUMERABLE_ADAPTER__GET_ENUMERATOR_STUB ||
+ method == METHOD__IVECTORVIEW_TO_IREADONLYLIST_ADAPTER__INDEXER_GET))
+ {
+ if (GetStructureBaseType(methodInst) != BaseType_None)
+ {
+ // This instantiation has ambiguous run-time behavior because it can be assigned by co-variance
+ // from another instantiation in which the type argument is not an interface pointer in the WinRT
+ // world. We have to use a special stub for these which performs a run-time check to see how to
+ // marshal the argument.
+
+ method = (method == METHOD__ITERABLE_TO_ENUMERABLE_ADAPTER__GET_ENUMERATOR_STUB) ?
+ METHOD__ITERABLE_TO_ENUMERABLE_ADAPTER__GET_ENUMERATOR_VARIANCE_STUB :
+ METHOD__IVECTORVIEW_TO_IREADONLYLIST_ADAPTER__INDEXER_GET_VARIANCE;
+ }
+ }
+
+ pMD = MscorlibBinder::GetMethod(method);
+ }
+ else
+ {
+ // the stub method does not live in mscorlib
+ const NonMscorlibRedirectedInterfaceInfo *pInfo = &s_rNonMscorlibInterfaceInfos[pStubInfo->m_WinRTInterface & ~NON_MSCORLIB_MARKER];
+
+ pMD = LoadMethodFromRedirectedAssembly(
+ pInfo->m_AssemblyIndex,
+ (interopKind == TypeHandle::Interop_NativeToManaged) ? pInfo->m_wzWinRTStubClassTypeName : pInfo->m_wzCLRStubClassTypeName,
+ pInfo->m_rszMethodNames[method]);
+ }
+
+#ifdef _DEBUG
+ // Verify that the signature of the stub method matches the corresponding interface method.
+ MethodTable *pItfMT = NULL;
+ Instantiation inst = pMD->GetMethodInstantiation();
+
+ if (interopKind == TypeHandle::Interop_NativeToManaged)
+ {
+ // we are interested in the WinRT interface method
+ pItfMT = GetWinRTTypeForRedirectedInterfaceIndex(interfaceIndex);
+ }
+ else
+ {
+ // we are interested in the CLR interface method
+ if (fICollectionStub)
+ {
+ if (pMD->HasMethodInstantiation())
+ {
+ if (interfaceIndex == WinMDAdapter::RedirectedTypeIndex_Windows_Foundation_Collections_IVectorView ||
+ interfaceIndex == WinMDAdapter::RedirectedTypeIndex_Windows_Foundation_Collections_IMapView)
+ pItfMT = MscorlibBinder::GetExistingClass(CLASS__IREADONLYCOLLECTIONGENERIC);
+ else
+ pItfMT = MscorlibBinder::GetExistingClass(CLASS__ICOLLECTIONGENERIC);
+
+ if (interfaceIndex == WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IDictionary ||
+ interfaceIndex == WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IReadOnlyDictionary)
+ {
+ TypeHandle thKvPair = TypeHandle(MscorlibBinder::GetClass(CLASS__KEYVALUEPAIRGENERIC)).Instantiate(inst);
+ inst = Instantiation(&thKvPair, 1);
+ }
+ }
+ else
+ {
+ pItfMT = MscorlibBinder::GetExistingClass(CLASS__ICOLLECTION);
+ }
+ }
+ else
+ {
+ pItfMT = GetAppDomain()->GetRedirectedType(interfaceIndex);
+ }
+ }
+
+ // get signature of the stub method
+ PCCOR_SIGNATURE pSig1;
+ DWORD cSig1;
+
+ pMD->GetSig(&pSig1, &cSig1);
+ SigTypeContext typeContext1;
+ SigTypeContext::InitTypeContext(Instantiation(), pMD->GetMethodInstantiation(), &typeContext1);
+ MetaSig sig1(pSig1, cSig1, pMD->GetModule(), &typeContext1);
+
+ // get signature of the interface method
+ PCCOR_SIGNATURE pSig2;
+ DWORD cSig2;
+
+ MethodDesc *pItfMD = pItfMT->GetMethodDescForSlot(slot);
+ pItfMD->GetSig(&pSig2, &cSig2);
+ SigTypeContext typeContext2;
+ SigTypeContext::InitTypeContext(inst, Instantiation(), &typeContext2);
+ MetaSig sig2(pSig2, cSig2, pItfMD->GetModule(), &typeContext2);
+
+ _ASSERTE_MSG(MetaSig::CompareMethodSigs(sig1, sig2, FALSE), "Stub method signature does not match the corresponding interface method.");
+#endif // _DEBUG
+
+ if (!methodInst.IsEmpty())
+ {
+ _ASSERTE(pMD->HasMethodInstantiation());
+ _ASSERTE(pMD->GetNumGenericMethodArgs() == methodInst.GetNumArgs());
+
+ pMD = MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pMD,
+ pMD->GetMethodTable(),
+ FALSE, // forceBoxedEntryPoint
+ methodInst, // methodInst
+ FALSE, // allowInstParam
+ TRUE); // forceRemotableMethod
+ }
+
+ return pMD;
+}
+
+// static
+MethodDesc *WinRTInterfaceRedirector::GetStubMethodForRedirectedInterfaceMethod(MethodDesc *pMD, TypeHandle::InteropKind interopKind)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodTable *pMT = pMD->GetMethodTable();
+
+ //
+ // If we are calling into a class method instead of interface method,
+ // convert it to first implemented interface method (we are always calling into the first
+ // one - see code:ComPlusCall::PopulateComPlusCallMethodDesc for more details)
+ //
+ if (!pMT->IsInterface())
+ {
+ pMD = pMD->GetInterfaceMD();
+ pMT = pMD->GetMethodTable();
+ }
+
+ bool fICollectionStub = false;
+ if (interopKind == TypeHandle::Interop_ManagedToNative)
+ {
+ MethodTable *pResolvedMT = RCW::ResolveICollectionInterface(pMT, TRUE /* fPreferIDictionary */, NULL);
+ if (pResolvedMT != NULL)
+ {
+ fICollectionStub = true;
+ pMT = pResolvedMT;
+ }
+ }
+
+ WinMDAdapter::RedirectedTypeIndex index;
+ if (WinRTInterfaceRedirector::ResolveRedirectedInterface(pMT, &index))
+ {
+ // make sure we return an exact MD that takes no extra instantiating arguments
+ return WinRTInterfaceRedirector::GetStubMethodForRedirectedInterface(
+ index,
+ pMD->GetSlot(),
+ interopKind,
+ fICollectionStub,
+ pMT->GetInstantiation());
+ }
+
+ return NULL;
+}
+
+// static
+MethodTable *WinRTDelegateRedirector::GetWinRTTypeForRedirectedDelegateIndex(WinMDAdapter::RedirectedTypeIndex index)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ switch (index)
+ {
+ case WinMDAdapter::RedirectedTypeIndex_System_EventHandlerGeneric:
+ return MscorlibBinder::GetClass(CLASS__WINDOWS_FOUNDATION_EVENTHANDLER);
+
+ case WinMDAdapter::RedirectedTypeIndex_System_Collections_Specialized_NotifyCollectionChangedEventHandler:
+ {
+ Assembly *pAssembly;
+ VERIFY(::GetAppDomain()->FindRedirectedAssemblyFromIndexIfLoaded(WinMDAdapter::FrameworkAssembly_System, &pAssembly));
+ return TypeName::GetTypeFromAssembly(W("System.Runtime.InteropServices.WindowsRuntime.NotifyCollectionChangedEventHandler_WinRT"), pAssembly).GetMethodTable();
+ }
+
+ case WinMDAdapter::RedirectedTypeIndex_System_ComponentModel_PropertyChangedEventHandler:
+ {
+ Assembly *pAssembly;
+ VERIFY(::GetAppDomain()->FindRedirectedAssemblyFromIndexIfLoaded(WinMDAdapter::FrameworkAssembly_System, &pAssembly));
+ return TypeName::GetTypeFromAssembly(W("System.Runtime.InteropServices.WindowsRuntime.PropertyChangedEventHandler_WinRT"), pAssembly).GetMethodTable();
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+// Returns TRUE iff the argument represents the "__ComObject" type or
+// any type derived from it (i.e. typelib-imported RCWs).
+BOOL IsComWrapperClass(TypeHandle type)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodTable* pMT = type.GetMethodTable();
+ if (pMT == NULL)
+ return FALSE;
+
+ return pMT->IsComObjectType();
+}
+
+// Returns TRUE iff the argument represents the "__ComObject" type.
+BOOL IsComObjectClass(TypeHandle type)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (!type.IsTypeDesc())
+ {
+ MethodTable *pMT = type.AsMethodTable();
+
+ if (pMT->IsComObjectType())
+ {
+ // May be __ComObject or typed RCW. __ComObject must have already been loaded
+ // if we see an MT marked like this so calling the *NoInit method is sufficient.
+ return (pMT == g_pBaseCOMObject);
+ }
+ }
+
+ return FALSE;
+}
+
+
+#ifndef CROSSGEN_COMPILE
+
+#ifdef _DEBUG
+//-------------------------------------------------------------------
+// LOGGING APIS
+//-------------------------------------------------------------------
+
+static int g_TraceCount = 0;
+static IUnknown* g_pTraceIUnknown = 0;
+
+VOID IntializeInteropLogging()
+{
+ WRAPPER_NO_CONTRACT;
+
+ g_pTraceIUnknown = g_pConfig->GetTraceIUnknown();
+ g_TraceCount = g_pConfig->GetTraceWrapper();
+}
+
+VOID LogInterop(__in_z LPSTR szMsg)
+{
+ LIMITED_METHOD_CONTRACT;
+ LOG( (LF_INTEROP, LL_INFO10, "%s\n",szMsg) );
+}
+
+VOID LogInterop(__in_z LPWSTR wszMsg)
+{
+ LIMITED_METHOD_CONTRACT;
+ LOG( (LF_INTEROP, LL_INFO10, "%S\n", wszMsg) );
+}
+
+//-------------------------------------------------------------------
+// VOID LogRCWCreate(RCW* pWrap, IUnknown* pUnk)
+// log wrapper create
+//-------------------------------------------------------------------
+VOID LogRCWCreate(RCW* pWrap, IUnknown* pUnk)
+{
+ if (!LoggingOn(LF_INTEROP, LL_ALWAYS))
+ return;
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ static int count = 0;
+ LPVOID pCurrCtx = GetCurrentCtxCookie();
+
+ // pre-increment the count, so it can never be zero
+ count++;
+
+ if (count == g_TraceCount)
+ {
+ g_pTraceIUnknown = pUnk;
+ }
+
+ if (g_pTraceIUnknown == 0 || g_pTraceIUnknown == pUnk)
+ {
+ LOG( (LF_INTEROP,
+ LL_INFO10,
+ "Create RCW: Wrapper %p #%d IUnknown:%p Context %p\n",
+ pWrap, count,
+ pUnk,
+ pCurrCtx) );
+ }
+}
+
+//-------------------------------------------------------------------
+// VOID LogRCWMinorCleanup(RCW* pWrap)
+// log wrapper minor cleanup
+//-------------------------------------------------------------------
+VOID LogRCWMinorCleanup(RCW* pWrap)
+{
+ if (!LoggingOn(LF_INTEROP, LL_ALWAYS))
+ return;
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pWrap));
+ }
+ CONTRACTL_END;
+
+ static int dest_count = 0;
+ dest_count++;
+
+ IUnknown *pUnk = pWrap->GetRawIUnknown_NoAddRef_NoThrow();
+
+ if (g_pTraceIUnknown == 0 || g_pTraceIUnknown == pUnk)
+ {
+ LPVOID pCurrCtx = GetCurrentCtxCookie();
+ LOG( (LF_INTEROP,
+ LL_INFO10,
+ "Minor Cleanup RCW: Wrapper %p #%d IUnknown %p Context: %p\n",
+ pWrap, dest_count,
+ pUnk,
+ pCurrCtx) );
+ }
+}
+
+//-------------------------------------------------------------------
+// VOID LogRCWDestroy(RCW* pWrap, IUnknown* pUnk)
+// log wrapper destroy
+//-------------------------------------------------------------------
+VOID LogRCWDestroy(RCW* pWrap)
+{
+ if (!LoggingOn(LF_INTEROP, LL_ALWAYS))
+ return;
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pWrap));
+ }
+ CONTRACTL_END;
+
+ static int dest_count = 0;
+ dest_count++;
+
+ IUnknown *pUnk = pWrap->GetRawIUnknown_NoAddRef_NoThrow();
+
+ if (g_pTraceIUnknown == 0 || g_pTraceIUnknown == pUnk)
+ {
+ LPVOID pCurrCtx = GetCurrentCtxCookie();
+ STRESS_LOG4(
+ LF_INTEROP,
+ LL_INFO10,
+ "Destroy RCW: Wrapper %p #%d IUnknown %p Context: %p\n",
+ pWrap, dest_count,
+ pUnk,
+ pCurrCtx);
+ }
+}
+
+//-------------------------------------------------------------------
+// VOID LogInteropLeak(IUnkEntry * pEntry)
+//-------------------------------------------------------------------
+VOID LogInteropLeak(IUnkEntry * pEntry)
+{
+ if (!LoggingOn(LF_INTEROP, LL_ALWAYS))
+ return;
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pEntry));
+ }
+ CONTRACTL_END;
+
+ IUnknown *pUnk = pEntry->GetRawIUnknown_NoAddRef_NoThrow();
+
+ if (g_pTraceIUnknown == 0 || g_pTraceIUnknown == pUnk)
+ {
+ LOG( (LF_INTEROP,
+ LL_INFO10,
+ "IUnkEntry Leak: %p Context: %p\n",
+ pUnk,
+ pEntry->GetCtxCookie()) );
+ }
+}
+
+//-------------------------------------------------------------------
+// VOID LogInteropLeak(IUnknown* pItf)
+//-------------------------------------------------------------------
+VOID LogInteropLeak(IUnknown* pItf)
+{
+ if (!LoggingOn(LF_INTEROP, LL_ALWAYS))
+ return;
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LPVOID pCurrCtx = NULL;
+
+ if (g_pTraceIUnknown == 0 || g_pTraceIUnknown == pItf)
+ {
+ pCurrCtx = GetCurrentCtxCookie();
+ LOG((LF_INTEROP,
+ LL_EVERYTHING,
+ "Leak: Itf = %p, CurrCtx = %p\n",
+ pItf, pCurrCtx));
+ }
+}
+
+//-------------------------------------------------------------------
+// VOID LogInteropQI(IUnknown* pItf, REFIID iid, HRESULT hr, LPCSTR szMsg)
+//-------------------------------------------------------------------
+VOID LogInteropQI(IUnknown* pItf, REFIID iid, HRESULT hrArg, __in_z LPCSTR szMsg)
+{
+ if (!LoggingOn(LF_INTEROP, LL_ALWAYS))
+ return;
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pItf));
+ }
+ CONTRACTL_END;
+
+ LPVOID pCurrCtx = NULL;
+ HRESULT hr = S_OK;
+ SafeComHolder<IUnknown> pUnk = NULL;
+ int cch = 0;
+ WCHAR wszIID[64];
+
+ hr = SafeQueryInterface(pItf, IID_IUnknown, &pUnk);
+
+ if (g_pTraceIUnknown == 0 || g_pTraceIUnknown == pUnk)
+ {
+ pCurrCtx = GetCurrentCtxCookie();
+
+ cch = StringFromGUID2(iid, wszIID, sizeof(wszIID) / sizeof(WCHAR));
+ _ASSERTE(cch > 0);
+
+ if (SUCCEEDED(hrArg))
+ {
+ LOG((LF_INTEROP,
+ LL_EVERYTHING,
+ "Succeeded QI: Unk = %p, Itf = %p, CurrCtx = %p, IID = %S, Msg: %s\n",
+ (IUnknown*)pUnk, pItf, pCurrCtx, wszIID, szMsg));
+ }
+ else
+ {
+ LOG((LF_INTEROP,
+ LL_EVERYTHING,
+ "Failed QI: Unk = %p, Itf = %p, CurrCtx = %p, IID = %S, HR = %p, Msg: %s\n",
+ (IUnknown*)pUnk, pItf, pCurrCtx, wszIID, hrArg, szMsg));
+ }
+ }
+}
+
+//-------------------------------------------------------------------
+// VOID LogInteropAddRef(IUnknown* pItf, ULONG cbRef, LPCSTR szMsg)
+//-------------------------------------------------------------------
+VOID LogInteropAddRef(IUnknown* pItf, ULONG cbRef, __in_z LPCSTR szMsg)
+{
+ if (!LoggingOn(LF_INTEROP, LL_ALWAYS))
+ return;
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pItf));
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ LPVOID pCurrCtx = NULL;
+ HRESULT hr = S_OK;
+ SafeComHolder<IUnknown> pUnk = NULL;
+
+ hr = SafeQueryInterface(pItf, IID_IUnknown, &pUnk);
+
+ if (g_pTraceIUnknown == 0 || g_pTraceIUnknown == pUnk)
+ {
+ pCurrCtx = GetCurrentCtxCookie();
+ LOG((LF_INTEROP,
+ LL_EVERYTHING,
+ "AddRef: Unk = %p, Itf = %p, CurrCtx = %p, RefCount = %d, Msg: %s\n",
+ (IUnknown*)pUnk, pItf, pCurrCtx, cbRef, szMsg));
+ }
+}
+
+//-------------------------------------------------------------------
+// VOID LogInteropRelease(IUnknown* pItf, ULONG cbRef, LPCSTR szMsg)
+//-------------------------------------------------------------------
+VOID LogInteropRelease(IUnknown* pItf, ULONG cbRef, __in_z LPCSTR szMsg)
+{
+ if (!LoggingOn(LF_INTEROP, LL_ALWAYS))
+ return;
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pItf, NULL_OK));
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ LPVOID pCurrCtx = NULL;
+
+ if (g_pTraceIUnknown == 0 || g_pTraceIUnknown == pItf)
+ {
+ pCurrCtx = GetCurrentCtxCookie();
+ LOG((LF_INTEROP,
+ LL_EVERYTHING,
+ "Release: Itf = %p, CurrCtx = %p, RefCount = %d, Msg: %s\n",
+ pItf, pCurrCtx, cbRef, szMsg));
+ }
+}
+
+#endif // _DEBUG
+
+IUnknown* MarshalObjectToInterface(OBJECTREF* ppObject, MethodTable* pItfMT, MethodTable* pClassMT, DWORD dwFlags)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ // When an interface method table is specified, fDispIntf must be consistent with the
+ // interface type.
+ BOOL bDispatch = (dwFlags & ItfMarshalInfo::ITF_MARSHAL_DISP_ITF);
+ BOOL bInspectable = (dwFlags & ItfMarshalInfo::ITF_MARSHAL_INSP_ITF);
+ BOOL bUseBasicItf = (dwFlags & ItfMarshalInfo::ITF_MARSHAL_USE_BASIC_ITF);
+
+ _ASSERTE(!pItfMT || (!pItfMT->IsInterface() && bDispatch) ||
+ (!!bDispatch == IsDispatchBasedItf(pItfMT->GetComInterfaceType())) ||
+ (!!bInspectable == (pItfMT->GetComInterfaceType() == ifInspectable) || pItfMT->IsWinRTRedirectedInterface(TypeHandle::Interop_ManagedToNative)));
+
+ if (pItfMT)
+ {
+ return GetComIPFromObjectRef(ppObject, pItfMT);
+ }
+ else if (!bUseBasicItf)
+ {
+ return GetComIPFromObjectRef(ppObject, pClassMT);
+ }
+ else
+ {
+ ComIpType ReqIpType = bDispatch ? ComIpType_Dispatch : (bInspectable ? ComIpType_Inspectable : ComIpType_Unknown);
+ return GetComIPFromObjectRef(ppObject, ReqIpType, NULL);
+ }
+}
+
+void UnmarshalObjectFromInterface(OBJECTREF *ppObjectDest, IUnknown **ppUnkSrc, MethodTable *pItfMT, MethodTable *pClassMT, DWORD dwFlags)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ PRECONDITION(IsProtectedByGCFrame(ppObjectDest));
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!pClassMT || !pClassMT->IsInterface());
+
+ bool fIsInterface = (pItfMT != NULL && pItfMT->IsInterface());
+
+ DWORD dwObjFromComIPFlags = ObjFromComIP::FromItfMarshalInfoFlags(dwFlags);
+ GetObjectRefFromComIP(
+ ppObjectDest, // Object
+ ppUnkSrc, // Interface pointer
+ pClassMT, // Class type
+ fIsInterface ? pItfMT : NULL, // Interface type - used to cache the incoming interface pointer
+ dwObjFromComIPFlags // Flags
+ );
+
+ // Make sure the interface is supported.
+ _ASSERTE(!pItfMT || pItfMT->IsInterface() || pItfMT->GetComClassInterfaceType() != clsIfNone);
+
+ if (fIsInterface)
+ {
+ if ((dwFlags & ItfMarshalInfo::ITF_MARSHAL_WINRT_SCENARIO) == 0)
+ {
+ // We only verify that the object supports the interface for non-WinRT scenarios because we
+ // believe that the likelihood of improperly constructed programs is significantly lower
+ // with WinRT and the Object::SupportsInterface check is very expensive.
+ if (!(*ppObjectDest)->IsTransparentProxy() && !Object::SupportsInterface(*ppObjectDest, pItfMT))
+ {
+ COMPlusThrowInvalidCastException(ppObjectDest, TypeHandle(pItfMT));
+ }
+ }
+ }
+}
+
+#ifdef FEATURE_CLASSIC_COMINTEROP
+
+//--------------------------------------------------------------------------------
+// Check if the pUnk implements IProvideClassInfo and try to figure
+// out the class from there
+MethodTable* GetClassFromIProvideClassInfo(IUnknown* pUnk)
+{
+ CONTRACT (MethodTable*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pUnk));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ MethodTable* pClassMT = NULL;
+ SafeComHolder<ITypeInfo> pTypeInfo = NULL;
+ SafeComHolder<IProvideClassInfo> pclsInfo = NULL;
+
+ // Use IProvideClassInfo to detect the appropriate class to use for wrapping
+ HRESULT hr = SafeQueryInterface(pUnk, IID_IProvideClassInfo, (IUnknown **)&pclsInfo);
+ LogInteropQI(pUnk, IID_IProvideClassInfo, hr, "GetClassFromIProvideClassInfo: QIing for IProvideClassinfo");
+ if (hr == S_OK && pclsInfo)
+ {
+ hr = E_FAIL;
+
+ // Make sure the class info is not our own
+ if (!IsSimpleTearOff(pclsInfo))
+ {
+ GCX_PREEMP();
+
+ LeaveRuntimeHolder lrh(**(size_t**)(IUnknown*)pclsInfo);
+ hr = pclsInfo->GetClassInfo(&pTypeInfo);
+ }
+
+ // If we succeded in retrieving the type information then keep going.
+ TYPEATTRHolder ptattr(pTypeInfo);
+ if (hr == S_OK && pTypeInfo)
+ {
+ {
+ GCX_PREEMP();
+ LeaveRuntimeHolder lrh(**(size_t**)(IUnknown*)pTypeInfo);
+ hr = pTypeInfo->GetTypeAttr(&ptattr);
+ }
+
+ // If we succeeded in retrieving the attributes and they represent
+ // a CoClass, then look up the class from the CLSID.
+ if (hr == S_OK && ptattr->typekind == TKIND_COCLASS)
+ {
+ GCX_ASSERT_COOP();
+ pClassMT = GetTypeForCLSID(ptattr->guid);
+ }
+ }
+ }
+
+ RETURN pClassMT;
+}
+
+#endif // FEATURE_CLASSIC_COMINTEROP
+
+
+enum IInspectableQueryResults {
+ IInspectableQueryResults_SupportsIReference = 0x1,
+ IInspectableQueryResults_SupportsIReferenceArray = 0x2,
+};
+
+//--------------------------------------------------------------------------------
+// Try to get the class from IInspectable. If *pfSupportsIInspectable is true, pUnk
+// is assumed to be an IInspectable-derived interface. Otherwise, this function will
+// QI for IInspectable and set *pfSupportsIInspectable accordingly.
+
+TypeHandle GetClassFromIInspectable(IUnknown* pUnk, bool *pfSupportsIInspectable, bool *pfSupportsIReference, bool *pfSupportsIReferenceArray)
+{
+ CONTRACT (TypeHandle)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(CheckPointer(pfSupportsIInspectable));
+ PRECONDITION(CheckPointer(pfSupportsIReference));
+ PRECONDITION(CheckPointer(pfSupportsIReferenceArray));
+ }
+ CONTRACT_END;
+
+ *pfSupportsIReference = false;
+ *pfSupportsIReferenceArray = false;
+
+ HRESULT hr = S_OK;
+
+ SafeComHolder<IInspectable> pInsp = NULL;
+ if (*pfSupportsIInspectable)
+ {
+ // we know that pUnk is an IInspectable
+ pInsp = static_cast<IInspectable *>(pUnk);
+ pInsp.SuppressRelease();
+ }
+ else
+ {
+ hr = SafeQueryInterface(pUnk, IID_IInspectable, (IUnknown **)&pInsp);
+ LogInteropQI(pUnk, IID_IInspectable, hr, "GetClassFromIInspectable: QIing for IInspectable");
+
+ if (SUCCEEDED(hr))
+ {
+ *pfSupportsIInspectable = true;
+ }
+ else
+ {
+ RETURN TypeHandle();
+ }
+ }
+
+ WinRtString winrtClassName;
+ {
+ GCX_PREEMP();
+ LeaveRuntimeHolder lrh(**(size_t**)(IUnknown*)pInsp);
+ if (FAILED(pInsp->GetRuntimeClassName(winrtClassName.Address())))
+ {
+ RETURN TypeHandle();
+ }
+ }
+
+ // Early return if the class name is NULL
+ if (winrtClassName == NULL)
+ RETURN TypeHandle();
+
+ // we have a class name
+ UINT32 cchClassName;
+ LPCWSTR pwszClassName = winrtClassName.GetRawBuffer(&cchClassName);
+ SString ssClassName(SString::Literal, pwszClassName, cchClassName);
+
+#ifndef FEATURE_CORECLR
+ if (ETW_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, TRACE_LEVEL_INFORMATION, CLR_PRIVATE_DYNAMICTYPEUSAGE_KEYWORD))
+ {
+ FireEtwIInspectableRuntimeClassName(pwszClassName, GetClrInstanceId());
+ }
+#endif
+
+ // Check a cache to see if this has already been looked up.
+ AppDomain *pDomain = GetAppDomain();
+ UINT vCacheVersion = 0;
+ BYTE bFlags;
+ TypeHandle classTypeHandle = pDomain->LookupTypeByName(ssClassName, &vCacheVersion, &bFlags);
+
+ if (!classTypeHandle.IsNull())
+ {
+ *pfSupportsIReference = ((bFlags & IInspectableQueryResults_SupportsIReference) != 0);
+ *pfSupportsIReferenceArray = ((bFlags & IInspectableQueryResults_SupportsIReferenceArray) != 0);
+ }
+ else
+ {
+ // use a copy of the original class name in case we peel off IReference/IReferenceArray below
+ StackSString ssTmpClassName;
+
+ // Check whether this is a value type, String, or T[] "boxed" in a IReference<T> or IReferenceArray<T>.
+ if (ssClassName.BeginsWith(W("Windows.Foundation.IReference`1<")) && ssClassName.EndsWith(W(">")))
+ {
+ ssTmpClassName.Set(ssClassName);
+ ssTmpClassName.Delete(ssTmpClassName.Begin(), _countof(W("Windows.Foundation.IReference`1<")) - 1);
+ ssTmpClassName.Delete(ssTmpClassName.End() - 1, 1);
+ *pfSupportsIReference = true;
+ }
+ else if (ssClassName.BeginsWith(W("Windows.Foundation.IReferenceArray`1<")) && ssClassName.EndsWith(W(">")))
+ {
+ ssTmpClassName.Set(ssClassName);
+ ssTmpClassName.Delete(ssTmpClassName.Begin(), _countof(W("Windows.Foundation.IReferenceArray`1<")) - 1);
+ ssTmpClassName.Delete(ssTmpClassName.End() - 1, 1);
+ *pfSupportsIReferenceArray = true;
+ }
+
+ EX_TRY
+ {
+ LPCWSTR pszWinRTTypeName = (ssTmpClassName.IsEmpty() ? ssClassName : ssTmpClassName);
+ classTypeHandle = WinRTTypeNameConverter::GetManagedTypeFromWinRTTypeName(pszWinRTTypeName, /*pbIsPrimitive = */ NULL);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(RethrowTerminalExceptions)
+
+ if (!classTypeHandle.IsNull())
+ {
+ // cache the (positive) result
+ BYTE bFlags = 0;
+ if (*pfSupportsIReference)
+ bFlags |= IInspectableQueryResults_SupportsIReference;
+ if (*pfSupportsIReferenceArray)
+ bFlags |= IInspectableQueryResults_SupportsIReferenceArray;
+ pDomain->CacheTypeByName(ssClassName, vCacheVersion, classTypeHandle, bFlags);
+ }
+ }
+
+ RETURN classTypeHandle;
+}
+
+//--------------------------------------------------------------------------
+// switch objects for this wrapper
+// used by JIT&ObjectPooling to ensure a deactivated CCW can point to a new object
+// during reactivate
+//--------------------------------------------------------------------------
+BOOL ReconnectWrapper(OBJECTREF* pOldRef, OBJECTREF* pNewRef)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pOldRef));
+ PRECONDITION(CheckPointer(pNewRef));
+ }
+ CONTRACTL_END;
+
+ if (!(*pOldRef)->IsTransparentProxy())
+ {
+ COMPlusThrowArgumentException(W("oldtp"), W("Argument_NotATP"));
+ }
+ else if (!(*pNewRef)->IsTransparentProxy())
+ {
+ COMPlusThrowArgumentException(W("newtp"), W("Argument_NotATP"));
+ }
+
+ _ASSERTE((*pOldRef)->GetTrueMethodTable() == (*pNewRef)->GetTrueMethodTable());
+
+ // grab the sync block for the current object
+ SyncBlock* pOldSyncBlock = (*pOldRef)->GetSyncBlock();
+ _ASSERTE(pOldSyncBlock);
+
+ // get the wrapper for the old object
+ InteropSyncBlockInfo* pInteropInfo = pOldSyncBlock->GetInteropInfo();
+ ComCallWrapper* pCCW = pInteropInfo->GetCCW();
+ if (pCCW == NULL)
+ COMPlusThrowArgumentException(W("oldtp"), W("Argument_NoUnderlyingCCW"));
+
+ // get the syncblock for the new object and allocate an InteropSyncBlockInfo structure
+ SyncBlock* pNewSyncBlock = (*pNewRef)->GetSyncBlock();
+ _ASSERTE(pNewSyncBlock != NULL);
+
+ NewHolder<InteropSyncBlockInfo> pNewInteropInfo = new InteropSyncBlockInfo();
+ bool check = pNewSyncBlock->SetInteropInfo(pNewInteropInfo);
+
+ //
+ // Now we switch
+ //
+
+ // First, prevent the old object from getting to the CCW
+ pInteropInfo->SetCCW(NULL);
+
+ // Next, point the CCW at the new object
+ StoreObjectInHandle(pCCW->GetObjectHandle(), (*pNewRef));
+
+ // Finally, point the new object at the CCW
+ pNewSyncBlock->GetInteropInfo()->SetCCW(pCCW);
+
+ // store other information about the new server
+ SimpleComCallWrapper* pSimpleWrap = pCCW->GetSimpleWrapper();
+ _ASSERTE(pSimpleWrap);
+ pSimpleWrap->ReInit(pNewSyncBlock);
+
+ if (check)
+ pNewInteropInfo.SuppressRelease();
+
+ return TRUE;
+}
+
+ABI::Windows::Foundation::IUriRuntimeClass *CreateWinRTUri(LPCWSTR wszUri, INT32 cchUri)
+{
+ STANDARD_VM_CONTRACT;
+
+ UriMarshalingInfo* marshalingInfo = GetAppDomain()->GetMarshalingData()->GetUriMarshalingInfo();
+
+ // Get the cached factory from the UriMarshalingInfo object of the current appdomain
+ ABI::Windows::Foundation::IUriRuntimeClassFactory* pFactory = marshalingInfo->GetUriFactory();
+
+ SafeComHolder<ABI::Windows::Foundation::IUriRuntimeClass> pIUriRC;
+ LeaveRuntimeHolder lrh(**(size_t**)(IUnknown*)pFactory);
+ HRESULT hrCreate = pFactory->CreateUri(WinRtStringRef(wszUri, cchUri), &pIUriRC);
+ if (FAILED(hrCreate))
+ {
+ if (hrCreate == E_INVALIDARG)
+ {
+ COMPlusThrow(kArgumentException, IDS_EE_INVALIDARG_WINRT_INVALIDURI);
+ }
+ else
+ {
+ ThrowHR(hrCreate);
+ }
+ }
+
+ return pIUriRC.Extract();
+}
+
+
+// static
+void DECLSPEC_NORETURN ThrowTypeLoadExceptionWithInner(MethodTable *pClassMT, LPCWSTR pwzName, HRESULT hr, unsigned resID)
+{
+ CONTRACTL
+ {
+ THROWS;
+ DISABLED(GC_NOTRIGGER); // Must sanitize first pass handling to enable this
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ StackSString simpleName(SString::Utf8, pClassMT->GetAssembly()->GetSimpleName());
+
+ EEMessageException ex(hr);
+ EX_THROW_WITH_INNER(EETypeLoadException, (pwzName, simpleName.GetUnicode(), nullptr, resID), &ex);
+}
+
+//
+// Creates activation factory and wraps it with a RCW
+//
+void GetNativeWinRTFactoryObject(MethodTable *pMT, Thread *pThread, MethodTable *pFactoryIntfMT, BOOL bNeedUniqueRCW, ICOMInterfaceMarshalerCallback *pCallback, OBJECTREF *prefFactory)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(CheckPointer(pThread));
+ PRECONDITION(CheckPointer(pFactoryIntfMT, NULL_OK));
+ PRECONDITION(CheckPointer(pCallback, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ if (!WinRTSupported())
+ {
+ COMPlusThrow(kPlatformNotSupportedException, W("PlatformNotSupported_WinRT"));
+ }
+
+ HSTRING hName = GetComClassFactory(pMT)->AsWinRTClassFactory()->GetClassName();
+
+ HRESULT hr;
+ SafeComHolder<IInspectable> pFactory;
+ {
+ GCX_PREEMP();
+ hr = clr::winrt::GetActivationFactory<IInspectable>(hName, &pFactory);
+ }
+
+ // There are a few particular failures that we'd like to map a specific exception type to
+ // - the factory interface is for a WinRT type which is not registered => TypeLoadException
+ // - the factory interface is not a factory for the WinRT type => ArgumentException
+ if (hr == REGDB_E_CLASSNOTREG)
+ {
+ ThrowTypeLoadExceptionWithInner(pMT, WindowsGetStringRawBuffer(hName, nullptr), hr, IDS_EE_WINRT_TYPE_NOT_REGISTERED);
+ }
+ else if (hr == E_NOINTERFACE)
+ {
+ LPCWSTR wzTN = WindowsGetStringRawBuffer(hName, nullptr);
+ if (pFactoryIntfMT)
+ {
+ InlineSString<DEFAULT_NONSTACK_CLASSNAME_SIZE> ssFactoryName;
+ pFactoryIntfMT->_GetFullyQualifiedNameForClass(ssFactoryName);
+ EEMessageException ex(hr);
+ EX_THROW_WITH_INNER(EEMessageException, (kArgumentException, IDS_EE_WINRT_NOT_FACTORY_FOR_TYPE, ssFactoryName.GetUnicode(), wzTN), &ex);
+ }
+ else
+ {
+ EEMessageException ex(hr);
+ EX_THROW_WITH_INNER(EEMessageException, (kArgumentException, IDS_EE_WINRT_INVALID_FACTORY_FOR_TYPE, wzTN), &ex);
+ }
+ }
+ else
+ {
+ IfFailThrow(hr);
+ }
+
+ DWORD flags =
+ RCW::CF_SupportsIInspectable | // Returns a WinRT RCW
+ RCW::CF_DontResolveClass; // Don't care about the exact type
+
+#ifdef FEATURE_WINDOWSPHONE
+ flags |= RCW::CF_DetectDCOMProxy; // Attempt to detect that the factory is a DCOM proxy in order to suppress caching
+#endif // FEATURE_WINDOWSPHONE
+
+ if (bNeedUniqueRCW)
+ flags |= RCW::CF_NeedUniqueObject; // Returns a unique RCW
+
+ COMInterfaceMarshaler marshaler;
+ marshaler.Init(
+ pFactory,
+ g_pBaseCOMObject, // Always System.__ComObject
+ pThread,
+ flags
+ );
+
+ if (pCallback)
+ marshaler.SetCallback(pCallback);
+
+ // Find an existing RCW or create a new RCW
+ *prefFactory = marshaler.FindOrCreateObjectRef(pFactory);
+
+ return;
+}
+
+#endif //#ifndef CROSSGEN_COMPILE
+
+
+#endif // FEATURE_COMINTEROP
diff --git a/src/vm/interoputil.h b/src/vm/interoputil.h
new file mode 100644
index 0000000000..72224a8797
--- /dev/null
+++ b/src/vm/interoputil.h
@@ -0,0 +1,531 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#ifndef _H_INTEROP_UTIL
+#define _H_INTEROP_UTIL
+
+#include "debugmacros.h"
+#include "interopconverter.h"
+
+struct VariantData;
+
+// Out of memory helper.
+#define IfNullThrow(EXPR) \
+do {if ((EXPR) == 0) {ThrowOutOfMemory();} } while (0)
+
+
+// Helper to determine the version number from an int.
+#define GET_VERSION_USHORT_FROM_INT(x) ((x < 0) || (x > (INT)((USHORT)-1))) ? 0 : static_cast<USHORT>(x)
+
+#ifdef FEATURE_COMINTEROP
+#include "winrttypenameconverter.h"
+#include "roparameterizediid.h"
+#include "../md/winmd/inc/adapter.h"
+#include <windows.foundation.h>
+
+// The format string to use to format unknown members to be passed to
+// invoke member
+#define DISPID_NAME_FORMAT_STRING W("[DISPID=%i]")
+
+//---------------------------------------------------------------------------
+// This method returns the default interface for the class as well as the
+// type of default interface we are dealing with.
+enum DefaultInterfaceType
+{
+ DefaultInterfaceType_Explicit = 0,
+ DefaultInterfaceType_IUnknown = 1,
+ DefaultInterfaceType_AutoDual = 2,
+ DefaultInterfaceType_AutoDispatch = 3,
+ DefaultInterfaceType_BaseComClass = 4
+};
+
+// System.Drawing.Color struct definition.
+
+struct SYSTEMCOLOR
+{
+#ifdef _WIN64
+ STRINGREF name;
+ INT64 value;
+#else
+ INT64 value;
+ STRINGREF name;
+#endif
+ short knownColor;
+ short state;
+};
+
+struct ComMethodTable;
+struct IUnkEntry;
+interface IStream;
+class ComCallWrapper;
+class InteropSyncBlockInfo;
+
+#endif //FEATURE_COMINTEROP
+
+#if FEATURE_COMINTEROP
+#include <restrictederrorInfo.h>
+#endif
+#ifndef __IRestrictedErrorInfo_INTERFACE_DEFINED__
+DEFINE_GUID(IID_IRestrictedErrorInfo, 0x82BA7092,0x4C88,0x427D,0xA7,0xBC,0x16,0xDD,0x93,0xFE,0xB6,0x7E);
+MIDL_INTERFACE("82BA7092-4C88-427D-A7BC-16DD93FEB67E")
+IRestrictedErrorInfo : public IUnknown
+{
+public:
+};
+#endif // !__IRestrictedErrorInfo_INTERFACE_DEFINED__
+
+class FieldDesc;
+struct ExceptionData;
+
+//------------------------------------------------------------------
+ // setup error info for exception object
+//
+#ifdef FEATURE_COMINTEROP
+HRESULT SetupErrorInfo(OBJECTREF pThrownObject, ComCallMethodDesc *pCMD);
+HRESULT SafeGetRestrictedErrorInfo(IRestrictedErrorInfo **ppIErrInfo);
+BOOL IsManagedObject(IUnknown *pErrInfo);
+IErrorInfo *GetCorrepondingErrorInfo_WinRT(HRESULT hr, IRestrictedErrorInfo *pResErrInfo, BOOL* bHasLangRestrictedErrInfo);
+HRESULT GetRestrictedErrorDetails(IRestrictedErrorInfo *pRestrictedErrorInfo, BSTR *perrorDescription, BSTR *pErrorRestrictedDescription, HRESULT *hr, BSTR *pErrorCapabilitySid);
+
+#endif // FEATURE_COMINTEROP
+
+HRESULT SetupErrorInfo(OBJECTREF pThrownObject, BOOL bIsWinRTScenario = FALSE);
+
+//--------------------------------------------------------------------------------
+ // Release helper, enables and disables GC during call-outs
+ULONG SafeRelease(IUnknown* pUnk, RCW* pRCW = NULL);
+
+//--------------------------------------------------------------------------------
+// Release helper, must be called in preemptive mode. Only use this variant if
+// you already know you're in preemptive mode for other reasons.
+ULONG SafeReleasePreemp(IUnknown* pUnk, RCW* pRCW = NULL);
+
+//--------------------------------------------------------------------------------
+// Determines if a COM object can be cast to the specified type.
+BOOL CanCastComObject(OBJECTREF obj, MethodTable * pTargetMT);
+
+//---------------------------------------------------------
+// Read the BestFit custom attribute info from
+// both assembly level and interface level
+//---------------------------------------------------------
+VOID ReadBestFitCustomAttribute(MethodDesc* pMD, BOOL* BestFit, BOOL* ThrowOnUnmappableChar);
+VOID ReadBestFitCustomAttribute(IMDInternalImport* pInternalImport, mdTypeDef cl, BOOL* BestFit, BOOL* ThrowOnUnmappableChar);
+int InternalWideToAnsi(__in_ecount(iNumWideChars) LPCWSTR szWideString, int iNumWideChars, __out_ecount_opt(cbAnsiBufferSize) LPSTR szAnsiString, int cbAnsiBufferSize, BOOL fBestFit, BOOL fThrowOnUnmappableChar);
+
+//---------------------------------------------------------
+// Read the ClassInterfaceType custom attribute info from
+// both assembly level and interface level
+//---------------------------------------------------------
+CorClassIfaceAttr ReadClassInterfaceTypeCustomAttribute(TypeHandle type);
+
+//-------------------------------------------------------------------
+ // Called from DLLMain, to initialize com specific data structures.
+//-------------------------------------------------------------------
+void FillExceptionData(ExceptionData* pedata, IErrorInfo* pErrInfo, IRestrictedErrorInfo* pRestrictedErrInfo);
+
+//------------------------------------------------------------------------------
+ // helper to access fields from an object
+INT64 FieldAccessor(FieldDesc* pFD, OBJECTREF oref, INT64 val, BOOL isGetter, U1 cbSize);
+
+#ifndef FEATURE_CORECLR
+//---------------------------------------------------------------------------
+//returns true if pImport has DefaultDllImportSearchPathsAttribute
+//if true, also returns dllImportSearchPathFlag and searchAssemblyDirectory values.
+BOOL GetDefaultDllImportSearchPathsAttributeValue(IMDInternalImport *pImport, mdToken token, DWORD * pDlImportSearchPathFlag);
+#endif // !FEATURE_CORECLR
+
+//---------------------------------------------------------------------------
+// Returns the index of the LCID parameter if one exists and -1 otherwise.
+int GetLCIDParameterIndex(MethodDesc *pMD);
+
+//---------------------------------------------------------------------------
+// Transforms an LCID into a CultureInfo.
+void GetCultureInfoForLCID(LCID lcid, OBJECTREF *pCultureObj);
+
+//---------------------------------------------------------------------------
+// This method determines if a member is visible from COM.
+BOOL IsMemberVisibleFromCom(MethodTable *pDeclaringMT, mdToken tk, mdMethodDef mdAssociate);
+
+//--------------------------------------------------------------------------------
+// This method generates a stringized version of an interface that contains the
+// name of the interface along with the signature of all the methods.
+SIZE_T GetStringizedItfDef(TypeHandle InterfaceType, CQuickArray<BYTE> &rDef);
+
+//--------------------------------------------------------------------------------
+// Helper to get the stringized form of typelib guid.
+HRESULT GetStringizedTypeLibGuidForAssembly(Assembly *pAssembly, CQuickArray<BYTE> &rDef, ULONG cbCur, ULONG *pcbFetched);
+
+//--------------------------------------------------------------------------------
+// GetErrorInfo helper, enables and disables GC during call-outs
+HRESULT SafeGetErrorInfo(IErrorInfo **ppIErrInfo);
+
+//--------------------------------------------------------------------------------
+// QI helper, enables and disables GC during call-outs
+HRESULT SafeQueryInterface(IUnknown* pUnk, REFIID riid, IUnknown** pResUnk);
+
+//--------------------------------------------------------------------------------
+// QI helper, must be called in preemptive mode. Faster than the MODE_ANY version
+// because it doesn't need to toggle the mode. Use this version only if you already
+// know that you're in preemptive mode for other reasons.
+HRESULT SafeQueryInterfacePreemp(IUnknown* pUnk, REFIID riid, IUnknown** pResUnk);
+
+#ifdef FEATURE_COMINTEROP
+
+// Convert an IUnknown to CCW, does not handle aggregation and ICustomQI.
+ComCallWrapper* MapIUnknownToWrapper(IUnknown* pUnk);
+
+// Convert an IUnknown to CCW, returns NULL if the pUnk is not on
+// a managed tear-off (OR) if the pUnk is to a managed tear-off that
+// has been aggregated
+ComCallWrapper* GetCCWFromIUnknown(IUnknown* pUnk, BOOL bEnableCustomization = TRUE);
+
+// A version of LoadRegTypeLib that loads based on bitness and platform support
+// and loads with LCID == LOCALE_USER_DEFAULT
+HRESULT LoadRegTypeLibWithFlags(REFGUID rguid,
+ unsigned short wVerMajor,
+ unsigned short wVerMinor,
+ int flags,
+ ITypeLib FAR* FAR* pptlib);
+
+// A version of LoadTypeLibEx that loads based on bitness and platform support.
+HRESULT LoadTypeLibExWithFlags(LPCOLESTR szFile,
+ int flags,
+ ITypeLib** pptlib);
+
+//-------------------------------------------------------------------------------
+// Given an IErrorInfo pointer created on a com exception obect
+// obtain the hresult stored in the exception object
+HRESULT GetHRFromCLRErrorInfo(IErrorInfo* pErr);
+
+//--------------------------------------------------------------------------------
+// Called from EEStartup, to initialize com Interop specific data structures.
+void InitializeComInterop();
+
+//--------------------------------------------------------------------------------
+// Clean up Helpers
+//--------------------------------------------------------------------------------
+// called by syncblock, on the finalizer thread to do major cleanup
+void CleanupSyncBlockComData(InteropSyncBlockInfo* pInteropInfo);
+
+// called by syncblock, during GC, do only minimal work
+void MinorCleanupSyncBlockComData(InteropSyncBlockInfo* pInteropInfo);
+
+// Helper to release all of the RCWs in the specified context, across all caches.
+// If context is null, release all RCWs, otherwise release RCWs created in the
+// given context, including Jupiter RCWs
+void ReleaseRCWsInCaches(LPVOID pCtxCookie);
+
+// A wrapper that catches all exceptions - used in the OnThreadTerminate case.
+void ReleaseRCWsInCachesNoThrow(LPVOID pCtxCookie);
+
+
+//--------------------------------------------------------------------------------
+// AddRef helper, enables and disables GC during call-outs
+ULONG SafeAddRef(IUnknown* pUnk);
+//--------------------------------------------------------------------------------
+// AddRef helper, must be called in preemptive mode. Only use this variant if
+// you already know you're in preemptive mode for other reasons.
+ULONG SafeAddRefPreemp(IUnknown* pUnk);
+
+//--------------------------------------------------------------------------------
+// Release helper, enables and disables GC during call-outs
+HRESULT SafeVariantChangeType(VARIANT* pVarRes, VARIANT* pVarSrc,
+ unsigned short wFlags, VARTYPE vt);
+
+//--------------------------------------------------------------------------------
+// Release helper, enables and disables GC during call-outs
+HRESULT SafeVariantChangeTypeEx(VARIANT* pVarRes, VARIANT* pVarSrc,
+ LCID lcid, unsigned short wFlags, VARTYPE vt);
+
+//--------------------------------------------------------------------------------
+// Init helper, enables and disables GC during call-outs
+void SafeVariantInit(VARIANT* pVar);
+
+//--------------------------------------------------------------------------------
+// Releases the data in the stream and then releases the stream itself.
+void SafeReleaseStream(IStream *pStream);
+
+//--------------------------------------------------------------------------------
+// Ole RPC seems to return an inconsistent SafeArray for arrays created with
+// SafeArrayVector(VT_BSTR). OleAut's SafeArrayGetVartype() doesn't notice
+// the inconsistency and returns a valid-seeming (but wrong vartype.)
+// Our version is more discriminating. This should only be used for
+// marshaling scenarios where we can assume unmanaged code permissions
+// (and hence are already in a position of trusting unmanaged data.)
+HRESULT ClrSafeArrayGetVartype(SAFEARRAY *psa, VARTYPE *vt);
+
+//Helpers
+
+//
+// Macros that defines how to recognize tear off
+//
+#define TEAR_OFF_SLOT 1
+#define TEAR_OFF_STANDARD Unknown_AddRef
+#define TEAR_OFF_SIMPLE_INNER Unknown_AddRefInner
+#define TEAR_OFF_SIMPLE Unknown_AddRefSpecial
+
+BOOL ComInterfaceSlotIs(IUnknown* pUnk, int slot, LPVOID pvFunction);
+
+// Is the tear-off a CLR created tear-off
+BOOL IsInProcCCWTearOff(IUnknown* pUnk);
+
+// is the tear-off represent one of the standard interfaces such as IProvideClassInfo, IErrorInfo etc.
+BOOL IsSimpleTearOff(IUnknown* pUnk);
+
+// Is the tear-off represent the inner unknown or the original unknown for the object
+BOOL IsInnerUnknown(IUnknown* pUnk);
+
+// Is this one of our "standard" ComCallWrappers
+BOOL IsStandardTearOff(IUnknown* pUnk);
+
+//---------------------------------------------------------------------------
+ // is the iid represent an IClassX for this class
+BOOL IsIClassX(MethodTable *pMT, REFIID riid, ComMethodTable **ppComMT);
+
+// Returns TRUE if we support IClassX for the given class.
+BOOL ClassSupportsIClassX(MethodTable *pMT);
+
+#ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+//---------------------------------------------------------------------------
+ // Calls COM class factory and instantiates a new RCW.
+OBJECTREF AllocateComObject_ForManaged(MethodTable* pMT);
+#endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+
+//---------------------------------------------------------------------------
+ // get/load data for a given clsid
+MethodTable* GetTypeForCLSID(REFCLSID rclsid, BOOL* pfAssemblyInReg = NULL);
+
+
+//---------------------------------------------------------------------------
+ // get/load a value class for a given guid
+#ifdef FEATURE_CLASSIC_COMINTEROP
+MethodTable* GetValueTypeForGUID(REFCLSID guid);
+#endif
+
+DefaultInterfaceType GetDefaultInterfaceForClassInternal(TypeHandle hndClass, TypeHandle *pHndDefClass);
+DefaultInterfaceType GetDefaultInterfaceForClassWrapper(TypeHandle hndClass, TypeHandle *pHndDefClass);
+
+HRESULT TryGetDefaultInterfaceForClass(TypeHandle hndClass, TypeHandle *pHndDefClass, DefaultInterfaceType *pDefItfType);
+
+MethodTable *GetDefaultInterfaceMTForClass(MethodTable *pMT, BOOL *pbDispatch);
+
+//---------------------------------------------------------------------------
+// This method retrieves the list of source interfaces for a given class.
+void GetComSourceInterfacesForClass(MethodTable *pClassMT, CQuickArray<MethodTable *> &rItfList);
+
+//--------------------------------------------------------------------------------
+// This methods converts an IEnumVARIANT to a managed IEnumerator.
+OBJECTREF ConvertEnumVariantToMngEnum(IEnumVARIANT *pNativeEnum);
+
+//--------------------------------------------------------------------------------
+// These methods convert an OLE_COLOR to a System.Color and vice versa.
+void ConvertOleColorToSystemColor(OLE_COLOR SrcOleColor, SYSTEMCOLOR *pDestSysColor);
+OLE_COLOR ConvertSystemColorToOleColor(SYSTEMCOLOR *pSrcSysColor);
+OLE_COLOR ConvertSystemColorToOleColor(OBJECTREF *pSrcObj);
+
+//--------------------------------------------------------------------------------
+// This method generates a stringized version of a class interface that contains
+// the signatures of all the methods and fields.
+ULONG GetStringizedClassItfDef(TypeHandle InterfaceType, CQuickArray<BYTE> &rDef);
+
+//--------------------------------------------------------------------------------
+// Helper to get the GUID of a class interface.
+void GenerateClassItfGuid(TypeHandle InterfaceType, GUID *pGuid);
+
+// Try/Catch wrapped version of the method.
+HRESULT TryGenerateClassItfGuid(TypeHandle InterfaceType, GUID *pGuid);
+
+//--------------------------------------------------------------------------------
+// Helper to get the GUID of the typelib that is created from an assembly.
+HRESULT GetTypeLibGuidForAssembly(Assembly *pAssembly, GUID *pGuid);
+
+//--------------------------------------------------------------------------------
+// Helper to get the version of the typelib that is created from an assembly.
+HRESULT GetTypeLibVersionForAssembly(Assembly *pAssembly, USHORT *pMajorVersion, USHORT *pMinorVersion);
+
+//---------------------------------------------------------------------------
+// This method determines if a member is visible from COM.
+BOOL IsMethodVisibleFromCom(MethodDesc *pMD);
+
+//---------------------------------------------------------------------------
+// This method determines if a type is visible from COM or not based on
+// its visibility. This version of the method works with a type handle.
+// This version will ignore a type's generic attributes.
+//
+// This API should *never* be called directly!!!
+BOOL SpecialIsGenericTypeVisibleFromCom(TypeHandle hndType);
+
+//---------------------------------------------------------------------------
+// This method determines if a type is visible from COM or not based on
+// its visibility. This version of the method works with a type handle.
+BOOL IsTypeVisibleFromCom(TypeHandle hndType);
+
+//---------------------------------------------------------------------------
+// Determines if a method is likely to be used for forward COM/WinRT interop.
+BOOL MethodNeedsForwardComStub(MethodDesc *pMD, DataImage *pImage);
+
+//---------------------------------------------------------------------------
+// Determines if a method is visible from COM in a way that requires a marshaling stub.
+BOOL MethodNeedsReverseComStub(MethodDesc *pMD);
+
+//--------------------------------------------------------------------------------
+// InvokeDispMethod will convert a set of managed objects and call IDispatch. The
+// result will be returned as a COM+ Variant pointed to by pRetVal.
+void IUInvokeDispMethod(REFLECTCLASSBASEREF* pRefClassObj, OBJECTREF* pTarget,OBJECTREF* pName, DISPID *pMemberID, OBJECTREF* pArgs, OBJECTREF* pModifiers,
+ OBJECTREF* pNamedArgs, OBJECTREF* pRetVal, LCID lcid, WORD flags, BOOL bIgnoreReturn, BOOL bIgnoreCase);
+
+#ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+// Class Factory helpers
+
+//--------------------------------------------------------------------------
+// GetComClassFromProgID used by reflection class to setup a Class based on ProgID
+void GetComClassFromProgID(STRINGREF srefProgID, STRINGREF srefServer, OBJECTREF* pRef);
+
+//--------------------------------------------------------------------------
+// GetComClassFromCLSID used by reflection class to setup a Class based on CLSID
+void GetComClassFromCLSID(REFCLSID clsid, STRINGREF srefServer, OBJECTREF* pRef);
+
+// Helper used by GetComClassFromProgID and GetComClassFromCLSID
+void GetComClassHelper(OBJECTREF *pRef,
+ EEClassFactoryInfoHashTable *pClassFactHash,
+ ClassFactoryInfo *pClassFactInfo,
+ __in_opt __in_z WCHAR *wszProgID);
+
+//-------------------------------------------------------------
+// check if a ComClassFactory/WinRTClassFactory has been setup for this class
+// if not set one up
+ClassFactoryBase *GetComClassFactory(MethodTable* pClassMT);
+#endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+
+
+// logging APIs
+
+#ifdef _DEBUG
+
+VOID LogInterop(__in_z LPSTR szMsg);
+VOID LogInterop(__in_z LPWSTR szMsg);
+
+VOID LogInteropLeak(IUnkEntry * pEntry);
+VOID LogInteropLeak(IUnknown* pItf);
+VOID LogInteropQI(IUnknown* pItf, REFIID riid, HRESULT hr, __in_z LPCSTR szMsg);
+VOID LogInteropAddRef(IUnknown* pItf, ULONG cbRef, __in_z LPCSTR szMsg);
+VOID LogInteropRelease(IUnknown* pItf, ULONG cbRef, __in_z LPCSTR szMsg);
+
+VOID LogRCWCreate(RCW* pWrap, IUnknown* pUnk);
+VOID LogRCWMinorCleanup(RCW* pWrap);
+VOID LogRCWDestroy(RCW* pWrap);
+
+#else
+
+#define LogInterop(x)
+#define LogInteropLeak(x)
+#define LogInteropQI(x, y, z, w)
+#define LogInteropAddRef(x, y, z)
+#define LogInteropRelease(x, y, z)
+#define LogRCWCreate(x, y)
+#define LogRCWMinorCleanup(x)
+#define LogRCWDestroy(x)
+
+#endif
+
+//--------------------------------------------------------------------------------
+// Ensure COM is started up.
+HRESULT EnsureComStartedNoThrow(BOOL fCoInitCurrentThread = TRUE);
+VOID EnsureComStarted(BOOL fCoInitCurrentThread = TRUE);
+
+//--------------------------------------------------------------------------------
+// check if the class is OR extends a COM Imported class
+BOOL ExtendsComImport(MethodTable* pMT);
+
+//--------------------------------------------------------------------------------
+// Gets the CLSID from the specified Prog ID.
+HRESULT GetCLSIDFromProgID(__in_z WCHAR *strProgId, GUID *pGuid);
+
+//--------------------------------------------------------------------------------
+// Check if the pUnk implements IProvideClassInfo and try to figure
+// out the class from there
+MethodTable* GetClassFromIProvideClassInfo(IUnknown* pUnk);
+
+//--------------------------------------------------------------------------------
+// Try to load a WinRT type.
+TypeHandle GetWinRTType(SString* ssTypeName, BOOL bThrowIfNotFound);
+
+//--------------------------------------------------------------------------------
+// Try to get the class from IInspectable.
+TypeHandle GetClassFromIInspectable(IUnknown* pUnk, bool *pfSupportsIInspectable, bool *pfSupportsIReference, bool *pfSupportsIReferenceArray);
+
+//--------------------------------------------------------------------------------
+// Build a WinRT URI for a given raw URI
+ABI::Windows::Foundation::IUriRuntimeClass *CreateWinRTUri(LPCWSTR wszUri, INT32 cchUri);
+
+// Global process GUID to identify the process
+BSTR GetProcessGUID();
+
+//--------------------------------------------------------------------------
+ // switch objects for this wrapper
+// used by JIT&ObjectPooling to ensure a deactivated CCW can point to a new object
+// during reactivate
+//--------------------------------------------------------------------------
+BOOL ReconnectWrapper(OBJECTREF* pOldRef, OBJECTREF* pNewRef);
+
+// Generates GUIDs for parameterized WinRT types.
+class WinRTGuidGenerator
+{
+ class MetaDataLocator : public IRoMetaDataLocator
+ {
+ // IRoMetaDataLocator implementation:
+ STDMETHOD(Locate)(PCWSTR nameElement, IRoSimpleMetaDataBuilder &metaDataDestination) const;
+
+ // helper methods:
+ static HRESULT LocateTypeWithDefaultInterface(MethodTable *pMT, LPCWSTR pszName, IRoSimpleMetaDataBuilder &metaDataDestination);
+ static HRESULT LocateStructure(MethodTable *pMT, LPCWSTR pszName, IRoSimpleMetaDataBuilder &metaDataDestination);
+ static HRESULT LocateRedirectedType(MethodTable *pMT, IRoSimpleMetaDataBuilder &metaDataDestination);
+ };
+
+ static void PopulateNames(MethodTable *pMT, SArray<BYTE> &namesBuf, PCWSTR* &pszNames, COUNT_T &cNames);
+ static void PopulateNamesAppendNamePointers(MethodTable *pMT, SArray<BYTE> &namesBuf, PCWSTR* &pszNames, COUNT_T cNames);
+ static void PopulateNamesAppendTypeName(MethodTable *pMT, SArray<BYTE> &namesBuf, COUNT_T &cNames);
+public:
+ //--------------------------------------------------------------------------
+ // pGuid is filled with the constructed IID by the function.
+ static void ComputeGuidForGenericType(MethodTable *pMT, GUID *pGuid);
+}; // class WinRTGuidGenerator
+
+
+// includes Types which hold a "ComObject" class
+// and types which are imported through typelib
+BOOL IsComWrapperClass(TypeHandle type);
+
+// includes Type which hold a "__ComObject" class
+BOOL IsComObjectClass(TypeHandle type);
+
+IUnknown* MarshalObjectToInterface(OBJECTREF* ppObject, MethodTable* pItfMT, MethodTable* pClassMT, DWORD dwFlags);
+void UnmarshalObjectFromInterface(OBJECTREF *ppObjectDest, IUnknown **ppUnkSrc, MethodTable *pItfMT, MethodTable *pClassMT, DWORD dwFlags);
+
+#define DEFINE_ASM_QUAL_TYPE_NAME(varname, typename, asmname, version, publickeytoken) static const char varname##[] = { typename##", "##asmname##", Culture=neutral, PublicKeyToken="##publickeytoken##", Version="##version };
+
+class ICOMInterfaceMarshalerCallback;
+void GetNativeWinRTFactoryObject(MethodTable *pMT, Thread *pThread, MethodTable *pFactoryIntfMT, BOOL bNeedUniqueRCW, ICOMInterfaceMarshalerCallback *pCallback, OBJECTREF *prefFactory);
+
+#else // FEATURE_COMINTEROP
+
+inline HRESULT EnsureComStartedNoThrow()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return S_OK;
+}
+
+inline VOID EnsureComStarted()
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+#define LogInteropRelease(x, y, z)
+
+#endif // FEATURE_COMINTEROP
+
+#endif // _H_INTEROP_UTIL
diff --git a/src/vm/interoputil.inl b/src/vm/interoputil.inl
new file mode 100644
index 0000000000..ed0ab029c2
--- /dev/null
+++ b/src/vm/interoputil.inl
@@ -0,0 +1,80 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//
+#include "comcallablewrapper.h"
+
+#ifndef DACCESS_COMPILE
+inline BOOL ComInterfaceSlotIs(IUnknown* pUnk, int slot, LPVOID pvFunction)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pUnk));
+ }
+ CONTRACTL_END;
+
+ LPVOID pvRetVal = (*((LPVOID**)pUnk))[slot];
+
+ return (pvRetVal == (LPVOID)GetEEFuncEntryPoint(pvFunction));
+}
+
+//Helpers
+// Is the tear-off a CLR created tear-off
+inline BOOL IsInProcCCWTearOff(IUnknown* pUnk)
+{
+ WRAPPER_NO_CONTRACT;
+ return ComInterfaceSlotIs(pUnk, 0, Unknown_QueryInterface) ||
+ ComInterfaceSlotIs(pUnk, 0, Unknown_QueryInterface_IErrorInfo) ||
+ ComInterfaceSlotIs(pUnk, 0, Unknown_QueryInterface_ICCW);
+}
+
+// is the tear-off represent one of the standard interfaces such as IProvideClassInfo, IErrorInfo etc.
+inline BOOL IsSimpleTearOff(IUnknown* pUnk)
+{
+ WRAPPER_NO_CONTRACT;
+ return ComInterfaceSlotIs(pUnk, TEAR_OFF_SLOT, TEAR_OFF_SIMPLE);
+}
+
+// Is the tear-off represent the inner unknown or the original unknown for the object
+inline BOOL IsInnerUnknown(IUnknown* pUnk)
+{
+ WRAPPER_NO_CONTRACT;
+ return ComInterfaceSlotIs(pUnk, TEAR_OFF_SLOT, TEAR_OFF_SIMPLE_INNER);
+}
+
+// Is this one of our "standard" ComCallWrappers
+inline BOOL IsStandardTearOff(IUnknown* pUnk)
+{
+ WRAPPER_NO_CONTRACT;
+ return ComInterfaceSlotIs(pUnk, TEAR_OFF_SLOT, TEAR_OFF_STANDARD);
+}
+
+// Convert an IUnknown to CCW, does not handle aggregation and ICustomQI.
+FORCEINLINE ComCallWrapper* MapIUnknownToWrapper(IUnknown* pUnk)
+{
+ CONTRACT (ComCallWrapper*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pUnk, NULL_OK));
+ }
+ CONTRACT_END;
+
+ if (IsStandardTearOff(pUnk))
+ RETURN ComCallWrapper::GetWrapperFromIP(pUnk);
+
+ if (IsSimpleTearOff(pUnk) || IsInnerUnknown(pUnk))
+ RETURN SimpleComCallWrapper::GetWrapperFromIP(pUnk)->GetMainWrapper();
+
+ RETURN NULL;
+}
+#endif // !DACCESS_COMPILE
diff --git a/src/vm/interpreter.cpp b/src/vm/interpreter.cpp
new file mode 100644
index 0000000000..4d6a47d298
--- /dev/null
+++ b/src/vm/interpreter.cpp
@@ -0,0 +1,12272 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//
+#include "common.h"
+
+#ifdef FEATURE_INTERPRETER
+
+#include "interpreter.h"
+#include "interpreter.hpp"
+#include "cgencpu.h"
+#include "stublink.h"
+#include "openum.h"
+#include "fcall.h"
+#include "frames.h"
+#include "gc.h"
+#include <float.h>
+#include "jitinterface.h"
+#include "safemath.h"
+#include "exceptmacros.h"
+#include "runtimeexceptionkind.h"
+#include "runtimehandles.h"
+#include "vars.hpp"
+#include "cycletimer.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+
+inline CORINFO_CALLINFO_FLAGS combine(CORINFO_CALLINFO_FLAGS flag1, CORINFO_CALLINFO_FLAGS flag2)
+{
+ return (CORINFO_CALLINFO_FLAGS) (flag1 | flag2);
+}
+
+static CorInfoType asCorInfoType(CORINFO_CLASS_HANDLE clsHnd)
+{
+ TypeHandle typeHnd(clsHnd);
+ return CEEInfo::asCorInfoType(typeHnd.GetInternalCorElementType(), typeHnd, NULL);
+}
+
+InterpreterMethodInfo::InterpreterMethodInfo(CEEInfo* comp, CORINFO_METHOD_INFO* methInfo)
+ : m_method(methInfo->ftn),
+ m_module(methInfo->scope),
+ m_ILCode(methInfo->ILCode),
+ m_ILCodeEnd(methInfo->ILCode + methInfo->ILCodeSize),
+ m_maxStack(methInfo->maxStack),
+ m_numArgs(methInfo->args.numArgs),
+ m_flags(0),
+ m_argDescs(NULL),
+ m_numLocals(methInfo->locals.numArgs),
+ m_returnType(methInfo->args.retType),
+ m_invocations(0),
+ m_jittedCode(0),
+#if INTERP_PROFILE
+ m_totIlInstructionsExeced(0),
+ m_maxIlInstructionsExeced(0),
+#endif
+ m_ehClauseCount(methInfo->EHcount),
+ m_varArgHandleArgNum(NO_VA_ARGNUM),
+ m_methodCache(NULL)
+{
+ // Overflow sanity check. (Can ILCodeSize ever be zero?)
+ assert(m_ILCode <= m_ILCodeEnd);
+
+ // Does the calling convention indicate an implicit "this" (first arg) or generic type context arg (last arg)?
+ SetFlag<Flag_hasThisArg>((methInfo->args.callConv & CORINFO_CALLCONV_HASTHIS) != 0);
+ if (GetFlag<Flag_hasThisArg>())
+ {
+ GCX_PREEMP();
+ CORINFO_CLASS_HANDLE methClass = comp->getMethodClass(methInfo->ftn);
+ DWORD attribs = comp->getClassAttribs(methClass);
+ SetFlag<Flag_thisArgIsObjPtr>((attribs & CORINFO_FLG_VALUECLASS) == 0);
+ }
+
+#if INTERP_PROFILE || defined(_DEBUG)
+ {
+ const char* clsName;
+#if defined(_DEBUG)
+ m_methName = ::eeGetMethodFullName(comp, methInfo->ftn, &clsName);
+#else
+ m_methName = comp->getMethodName(methInfo->ftn, &clsName);
+#endif
+ char* myClsName = new char[strlen(clsName) + 1];
+ strcpy(myClsName, clsName);
+ m_clsName = myClsName;
+ }
+#endif // INTERP_PROFILE
+
+ // Do we have a ret buff? If its a struct or refany, then *maybe*, depending on architecture...
+ bool hasRetBuff = (methInfo->args.retType == CORINFO_TYPE_VALUECLASS || methInfo->args.retType == CORINFO_TYPE_REFANY);
+#if defined(FEATURE_HFA)
+ // ... unless its an HFA type (and not varargs)...
+ if (hasRetBuff && CorInfoTypeIsFloatingPoint(comp->getHFAType(methInfo->args.retTypeClass)) && methInfo->args.getCallConv() != CORINFO_CALLCONV_VARARG)
+ {
+ hasRetBuff = false;
+ }
+#endif
+#if defined(_ARM_) || defined(_AMD64_)|| defined(_ARM64_)
+ // ...or it fits into one register.
+ if (hasRetBuff && getClassSize(methInfo->args.retTypeClass) <= sizeof(void*))
+ {
+ hasRetBuff = false;
+ }
+#endif
+ SetFlag<Flag_hasRetBuffArg>(hasRetBuff);
+
+ MetaSig sig(reinterpret_cast<MethodDesc*>(methInfo->ftn));
+ SetFlag<Flag_hasGenericsContextArg>((methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0);
+ SetFlag<Flag_isVarArg>((methInfo->args.callConv & CORINFO_CALLCONV_VARARG) != 0);
+ SetFlag<Flag_typeHasGenericArgs>(methInfo->args.sigInst.classInstCount > 0);
+ SetFlag<Flag_methHasGenericArgs>(methInfo->args.sigInst.methInstCount > 0);
+ _ASSERTE_MSG(!GetFlag<Flag_hasGenericsContextArg>()
+ || ((GetFlag<Flag_typeHasGenericArgs>() & !(GetFlag<Flag_hasThisArg>() && GetFlag<Flag_thisArgIsObjPtr>())) || GetFlag<Flag_methHasGenericArgs>()),
+ "If the method takes a generic parameter, is a static method of generic class (or meth of a value class), and/or itself takes generic parameters");
+
+ if (GetFlag<Flag_hasThisArg>())
+ {
+ m_numArgs++;
+ }
+ if (GetFlag<Flag_hasRetBuffArg>())
+ {
+ m_numArgs++;
+ }
+ if (GetFlag<Flag_isVarArg>())
+ {
+ m_numArgs++;
+ }
+ if (GetFlag<Flag_hasGenericsContextArg>())
+ {
+ m_numArgs++;
+ }
+ if (m_numArgs == 0)
+ {
+ m_argDescs = NULL;
+ }
+ else
+ {
+ m_argDescs = new ArgDesc[m_numArgs];
+ }
+
+ // Now we'll do the locals.
+ m_localDescs = new LocalDesc[m_numLocals];
+ // Allocate space for the pinning reference bits (lazily).
+ m_localIsPinningRefBits = NULL;
+
+ // Now look at each local.
+ CORINFO_ARG_LIST_HANDLE localsPtr = methInfo->locals.args;
+ CORINFO_CLASS_HANDLE vcTypeRet;
+ unsigned curLargeStructOffset = 0;
+ for (unsigned k = 0; k < methInfo->locals.numArgs; k++)
+ {
+ // TODO: if this optimization succeeds, the switch below on localType
+ // can become much simpler.
+ m_localDescs[k].m_offset = 0;
+#ifdef _DEBUG
+ vcTypeRet = NULL;
+#endif
+ CorInfoTypeWithMod localTypWithMod = comp->getArgType(&methInfo->locals, localsPtr, &vcTypeRet);
+ // If the local vars is a pinning reference, set the bit to indicate this.
+ if ((localTypWithMod & CORINFO_TYPE_MOD_PINNED) != 0)
+ {
+ SetPinningBit(k);
+ }
+
+ CorInfoType localType = strip(localTypWithMod);
+ switch (localType)
+ {
+ case CORINFO_TYPE_VALUECLASS:
+ case CORINFO_TYPE_REFANY: // Just a special case: vcTypeRet is handle for TypedReference in this case...
+ {
+ InterpreterType tp = InterpreterType(comp, vcTypeRet);
+ unsigned size = static_cast<unsigned>(tp.Size(comp));
+ size = max(size, sizeof(void*));
+ m_localDescs[k].m_type = tp;
+ if (tp.IsLargeStruct(comp))
+ {
+ m_localDescs[k].m_offset = curLargeStructOffset;
+ curLargeStructOffset += size;
+ }
+ }
+ break;
+
+ case CORINFO_TYPE_VAR:
+ NYI_INTERP("argument of generic parameter type"); // Should not happen;
+ break;
+
+ default:
+ m_localDescs[k].m_type = InterpreterType(localType);
+ break;
+ }
+ m_localDescs[k].m_typeStackNormal = m_localDescs[k].m_type.StackNormalize();
+ localsPtr = comp->getArgNext(localsPtr);
+ }
+ m_largeStructLocalSize = curLargeStructOffset;
+}
+
+void InterpreterMethodInfo::InitArgInfo(CEEInfo* comp, CORINFO_METHOD_INFO* methInfo, short* argOffsets_)
+{
+ unsigned numSigArgsPlusThis = methInfo->args.numArgs;
+ if (GetFlag<Flag_hasThisArg>())
+ {
+ numSigArgsPlusThis++;
+ }
+
+ // The m_argDescs array is constructed in the following "canonical" order:
+ // 1. 'this' pointer
+ // 2. signature arguments
+ // 3. return buffer
+ // 4. type parameter -or- vararg cookie
+ //
+ // argOffsets_ is passed in this order, and serves to establish the offsets to arguments
+ // when the interpreter is invoked using the native calling convention (i.e., not directly).
+ //
+ // When the interpreter is invoked directly, the arguments will appear in the same order
+ // and form as arguments passed to MethodDesc::CallDescr(). This ordering is as follows:
+ // 1. 'this' pointer
+ // 2. return buffer
+ // 3. signature arguments
+ //
+ // MethodDesc::CallDescr() does not support generic parameters or varargs functions.
+
+ _ASSERTE_MSG((methInfo->args.callConv & (CORINFO_CALLCONV_EXPLICITTHIS)) == 0,
+ "Don't yet handle EXPLICITTHIS calling convention modifier.");
+ switch (methInfo->args.callConv & CORINFO_CALLCONV_MASK)
+ {
+ case CORINFO_CALLCONV_DEFAULT:
+ case CORINFO_CALLCONV_VARARG:
+ {
+ unsigned k = 0;
+ ARG_SLOT* directOffset = NULL;
+ short directRetBuffOffset = 0;
+ short directVarArgOffset = 0;
+ short directTypeParamOffset = 0;
+
+ // If there's a "this" argument, handle it.
+ if (GetFlag<Flag_hasThisArg>())
+ {
+ m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_UNDEF);
+#ifdef FEATURE_STUBS_AS_IL
+ MethodDesc *pMD = reinterpret_cast<MethodDesc*>(methInfo->ftn);
+ // The signature of the ILStubs may be misleading.
+ // If a StubTarget is ever set, we'll find the correct type by inspecting the
+ // target, rather than the stub.
+ if (pMD->IsILStub())
+ {
+
+ if (pMD->AsDynamicMethodDesc()->IsUnboxingILStub())
+ {
+ // This is an unboxing stub where the thisptr is passed as a boxed VT.
+ m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_CLASS);
+ }
+ else
+ {
+ MethodDesc *pTargetMD = pMD->AsDynamicMethodDesc()->GetILStubResolver()->GetStubTargetMethodDesc();
+ if (pTargetMD != NULL)
+ {
+ if (pTargetMD->GetMethodTable()->IsValueType())
+ {
+ m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_BYREF);
+ }
+ else
+ {
+ m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_CLASS);
+ }
+
+ }
+ }
+ }
+
+#endif // FEATURE_STUBS_AS_IL
+ if (m_argDescs[k].m_type == InterpreterType(CORINFO_TYPE_UNDEF))
+ {
+ CORINFO_CLASS_HANDLE cls = comp->getMethodClass(methInfo->ftn);
+ DWORD attribs = comp->getClassAttribs(cls);
+ if (attribs & CORINFO_FLG_VALUECLASS)
+ {
+ m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_BYREF);
+ }
+ else
+ {
+ m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_CLASS);
+ }
+ }
+ m_argDescs[k].m_typeStackNormal = m_argDescs[k].m_type;
+ m_argDescs[k].m_nativeOffset = argOffsets_[k];
+ m_argDescs[k].m_directOffset = reinterpret_cast<short>(ArgSlotEndianessFixup(directOffset, sizeof(void*)));
+ directOffset++;
+ k++;
+ }
+
+ // If there is a return buffer, it will appear next in the arguments list for a direct call.
+ // Reserve its offset now, for use after the explicit arguments.
+#if defined(_ARM_)
+ // On ARM, for direct calls we always treat HFA return types as having ret buffs.
+ // So figure out if we have an HFA return type.
+ bool hasHFARetType =
+ methInfo->args.retType == CORINFO_TYPE_VALUECLASS
+ && CorInfoTypeIsFloatingPoint(comp->getHFAType(methInfo->args.retTypeClass))
+ && methInfo->args.getCallConv() != CORINFO_CALLCONV_VARARG;
+#endif // defined(_ARM_)
+
+ if (GetFlag<Flag_hasRetBuffArg>()
+#if defined(_ARM_)
+ // On ARM, for direct calls we always treat HFA return types as having ret buffs.
+ || hasHFARetType
+#endif // defined(_ARM_)
+ )
+ {
+ directRetBuffOffset = reinterpret_cast<short>(ArgSlotEndianessFixup(directOffset, sizeof(void*)));
+ directOffset++;
+ }
+#if defined(_AMD64_)
+ if (GetFlag<Flag_isVarArg>())
+ {
+ directVarArgOffset = reinterpret_cast<short>(ArgSlotEndianessFixup(directOffset, sizeof(void*)));
+ directOffset++;
+ }
+ if (GetFlag<Flag_hasGenericsContextArg>())
+ {
+ directTypeParamOffset = reinterpret_cast<short>(ArgSlotEndianessFixup(directOffset, sizeof(void*)));
+ directOffset++;
+ }
+#endif
+
+ // Now record the argument types for the rest of the arguments.
+ InterpreterType it;
+ CORINFO_CLASS_HANDLE vcTypeRet;
+ CORINFO_ARG_LIST_HANDLE argPtr = methInfo->args.args;
+ for (; k < numSigArgsPlusThis; k++)
+ {
+ CorInfoTypeWithMod argTypWithMod = comp->getArgType(&methInfo->args, argPtr, &vcTypeRet);
+ CorInfoType argType = strip(argTypWithMod);
+ switch (argType)
+ {
+ case CORINFO_TYPE_VALUECLASS:
+ case CORINFO_TYPE_REFANY: // Just a special case: vcTypeRet is handle for TypedReference in this case...
+ it = InterpreterType(comp, vcTypeRet);
+ break;
+ default:
+ // Everything else is just encoded as a shifted CorInfoType.
+ it = InterpreterType(argType);
+ break;
+ }
+ m_argDescs[k].m_type = it;
+ m_argDescs[k].m_typeStackNormal = it.StackNormalize();
+ m_argDescs[k].m_nativeOffset = argOffsets_[k];
+ // When invoking the interpreter directly, large value types are always passed by reference.
+ if (it.IsLargeStruct(comp))
+ {
+ m_argDescs[k].m_directOffset = reinterpret_cast<short>(ArgSlotEndianessFixup(directOffset, sizeof(void*)));
+ }
+ else
+ {
+ m_argDescs[k].m_directOffset = reinterpret_cast<short>(ArgSlotEndianessFixup(directOffset, it.Size(comp)));
+ }
+ argPtr = comp->getArgNext(argPtr);
+ directOffset++;
+ }
+
+ if (GetFlag<Flag_hasRetBuffArg>())
+ {
+ // The generic type context is an unmanaged pointer (native int).
+ m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_BYREF);
+ m_argDescs[k].m_typeStackNormal = m_argDescs[k].m_type;
+ m_argDescs[k].m_nativeOffset = argOffsets_[k];
+ m_argDescs[k].m_directOffset = directRetBuffOffset;
+ k++;
+ }
+
+ if (GetFlag<Flag_hasGenericsContextArg>())
+ {
+ // The vararg cookie is an unmanaged pointer (native int).
+ m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_NATIVEINT);
+ m_argDescs[k].m_typeStackNormal = m_argDescs[k].m_type;
+ m_argDescs[k].m_nativeOffset = argOffsets_[k];
+ m_argDescs[k].m_directOffset = directTypeParamOffset;
+ directOffset++;
+ k++;
+ }
+ if (GetFlag<Flag_isVarArg>())
+ {
+ // The generic type context is an unmanaged pointer (native int).
+ m_argDescs[k].m_type = InterpreterType(CORINFO_TYPE_NATIVEINT);
+ m_argDescs[k].m_typeStackNormal = m_argDescs[k].m_type;
+ m_argDescs[k].m_nativeOffset = argOffsets_[k];
+ m_argDescs[k].m_directOffset = directVarArgOffset;
+ k++;
+ }
+ }
+ break;
+
+ case CORINFO_CALLCONV_C:
+ NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_C");
+ break;
+
+ case CORINFO_CALLCONV_STDCALL:
+ NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_STDCALL");
+ break;
+
+ case CORINFO_CALLCONV_THISCALL:
+ NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_THISCALL");
+ break;
+
+ case CORINFO_CALLCONV_FASTCALL:
+ NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_FASTCALL");
+ break;
+
+ case CORINFO_CALLCONV_FIELD:
+ NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_FIELD");
+ break;
+
+ case CORINFO_CALLCONV_LOCAL_SIG:
+ NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_LOCAL_SIG");
+ break;
+
+ case CORINFO_CALLCONV_PROPERTY:
+ NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_PROPERTY");
+ break;
+
+ case CORINFO_CALLCONV_NATIVEVARARG:
+ NYI_INTERP("InterpreterMethodInfo::InitArgInfo -- CORINFO_CALLCONV_NATIVEVARARG");
+ break;
+
+ default:
+ _ASSERTE_ALL_BUILDS(__FILE__, false); // shouldn't get here
+ }
+}
+
+InterpreterMethodInfo::~InterpreterMethodInfo()
+{
+ if (m_methodCache != NULL)
+ {
+ delete m_methodCache;
+ }
+}
+
+void InterpreterMethodInfo::AllocPinningBitsIfNeeded()
+{
+ if (m_localIsPinningRefBits != NULL)
+ return;
+
+ unsigned numChars = (m_numLocals + 7) / 8;
+ m_localIsPinningRefBits = new char[numChars];
+ for (unsigned i = 0; i < numChars; i++)
+ {
+ m_localIsPinningRefBits[i] = char(0);
+ }
+}
+
+
+void InterpreterMethodInfo::SetPinningBit(unsigned locNum)
+{
+ _ASSERTE_MSG(locNum < m_numLocals, "Precondition");
+ AllocPinningBitsIfNeeded();
+
+ unsigned ind = locNum / 8;
+ unsigned bitNum = locNum - (ind * 8);
+ m_localIsPinningRefBits[ind] |= (1 << bitNum);
+}
+
+bool InterpreterMethodInfo::GetPinningBit(unsigned locNum)
+{
+ _ASSERTE_MSG(locNum < m_numLocals, "Precondition");
+ if (m_localIsPinningRefBits == NULL)
+ return false;
+
+ unsigned ind = locNum / 8;
+ unsigned bitNum = locNum - (ind * 8);
+ return (m_localIsPinningRefBits[ind] & (1 << bitNum)) != 0;
+}
+
+void Interpreter::ArgState::AddArg(unsigned canonIndex, short numSlots, bool noReg, bool twoSlotAlign)
+{
+#if defined(_AMD64_)
+ assert(!noReg);
+ assert(!twoSlotAlign);
+ AddArgAmd64(canonIndex, numSlots, /*isFloatingType*/false);
+#else // !_AMD64_
+#if defined(_X86_) || defined(_ARM64_)
+ assert(!twoSlotAlign); // Shouldn't use this flag on x86 (it wouldn't work right in the stack, at least).
+#endif
+ // If the argument requires two-slot alignment, make sure we have it. This is the
+ // ARM model: both in regs and on the stack.
+ if (twoSlotAlign)
+ {
+ if (!noReg && numRegArgs < NumberOfIntegerRegArgs())
+ {
+ if ((numRegArgs % 2) != 0)
+ {
+ numRegArgs++;
+ }
+ }
+ else
+ {
+ if ((callerArgStackSlots % 2) != 0)
+ {
+ callerArgStackSlots++;
+ }
+ }
+ }
+
+#if defined(_ARM64_)
+ // On ARM64 we're not going to place an argument 'partially' on the stack
+ // if all slots fits into registers, they go into registers, otherwise they go into stack.
+ if (!noReg && numRegArgs+numSlots <= NumberOfIntegerRegArgs())
+#else
+ if (!noReg && numRegArgs < NumberOfIntegerRegArgs())
+#endif
+ {
+ argIsReg[canonIndex] = ARS_IntReg;
+ argOffsets[canonIndex] = numRegArgs * sizeof(void*);
+ numRegArgs += numSlots;
+ // If we overflowed the regs, we consume some stack arg space.
+ if (numRegArgs > NumberOfIntegerRegArgs())
+ {
+ callerArgStackSlots += (numRegArgs - NumberOfIntegerRegArgs());
+ }
+ }
+ else
+ {
+#if defined(_X86_)
+ // On X86, stack args are pushed in order. We will add the total size of the arguments to this offset,
+ // so we set this to a negative number relative to the SP before the first arg push.
+ callerArgStackSlots += numSlots;
+ ClrSafeInt<short> offset(-callerArgStackSlots);
+#elif defined(_ARM_) || defined(_ARM64_)
+ // On ARM, args are pushed in *reverse* order. So we will create an offset relative to the address
+ // of the first stack arg; later, we will add the size of the non-stack arguments.
+ ClrSafeInt<short> offset(callerArgStackSlots);
+#endif
+ offset *= static_cast<short>(sizeof(void*));
+ assert(!offset.IsOverflow());
+ argOffsets[canonIndex] = offset.Value();
+#if defined(_ARM_) || defined(_ARM64_)
+ callerArgStackSlots += numSlots;
+#endif;
+ }
+#endif // !_AMD64_
+}
+
+#if defined(_AMD64_)
+// AMD64 calling convention allows any type that can be contained in 64 bits to be passed in registers,
+// if not contained or they are of a size not a power of 2, then they are passed by reference on the stack.
+// RCX, RDX, R8, R9 are the int arg registers. XMM0-3 overlap with the integer registers and are used
+// for floating point arguments.
+void Interpreter::ArgState::AddArgAmd64(unsigned canonIndex, unsigned short numSlots, bool isFloatingType)
+{
+ // If floating type and there are slots use a float reg slot.
+ if (isFloatingType && (numFPRegArgSlots < MaxNumFPRegArgSlots))
+ {
+ assert(numSlots == 1);
+ argIsReg[canonIndex] = ARS_FloatReg;
+ argOffsets[canonIndex] = numFPRegArgSlots * sizeof(void*);
+ fpArgsUsed |= (0x1 << (numFPRegArgSlots + 1));
+ numFPRegArgSlots += 1;
+ numRegArgs += 1; // Increment int reg count due to shadowing.
+ return;
+ }
+
+ // If we have an integer/aligned-struct arg or a reference of a struct that got copied on
+ // to the stack, it would go into a register or a stack slot.
+ if (numRegArgs != NumberOfIntegerRegArgs())
+ {
+ argIsReg[canonIndex] = ARS_IntReg;
+ argOffsets[canonIndex] = numRegArgs * sizeof(void*);
+ numRegArgs += 1;
+ numFPRegArgSlots += 1; // Increment FP reg count due to shadowing.
+ }
+ else
+ {
+ argIsReg[canonIndex] = ARS_NotReg;
+ ClrSafeInt<short> offset(callerArgStackSlots * sizeof(void*));
+ assert(!offset.IsOverflow());
+ argOffsets[canonIndex] = offset.Value();
+ callerArgStackSlots += 1;
+ }
+}
+#endif
+
+void Interpreter::ArgState::AddFPArg(unsigned canonIndex, unsigned short numSlots, bool twoSlotAlign)
+{
+#if defined(_AMD64_)
+ assert(!twoSlotAlign);
+ assert(numSlots == 1);
+ AddArgAmd64(canonIndex, numSlots, /*isFloatingType*/ true);
+#elif defined(_X86_)
+ assert(false); // Don't call this on x86; we pass all FP on the stack.
+#elif defined(_ARM_)
+ // We require "numSlots" alignment.
+ assert(numFPRegArgSlots + numSlots <= MaxNumFPRegArgSlots);
+ argIsReg[canonIndex] = ARS_FloatReg;
+
+ if (twoSlotAlign)
+ {
+ // If we require two slot alignment, the number of slots must be a multiple of two.
+ assert((numSlots % 2) == 0);
+
+ // Skip a slot if necessary.
+ if ((numFPRegArgSlots % 2) != 0)
+ {
+ numFPRegArgSlots++;
+ }
+ // We always use new slots for two slot aligned args precision...
+ argOffsets[canonIndex] = numFPRegArgSlots * sizeof(void*);
+ for (unsigned short i = 0; i < numSlots/2; i++)
+ {
+ fpArgsUsed |= (0x3 << (numFPRegArgSlots + i));
+ }
+ numFPRegArgSlots += numSlots;
+ }
+ else
+ {
+ if (numSlots == 1)
+ {
+ // A single-precision (float) argument. We must do "back-filling" where possible, searching
+ // for previous unused registers.
+ unsigned slot = 0;
+ while (slot < 32 && (fpArgsUsed & (1 << slot))) slot++;
+ assert(slot < 32); // Search succeeded.
+ assert(slot <= numFPRegArgSlots); // No bits at or above numFPRegArgSlots are set (regs used).
+ argOffsets[canonIndex] = slot * sizeof(void*);
+ fpArgsUsed |= (0x1 << slot);
+ if (slot == numFPRegArgSlots)
+ numFPRegArgSlots += numSlots;
+ }
+ else
+ {
+ // We can always allocate at after the last used slot.
+ argOffsets[numFPRegArgSlots] = numFPRegArgSlots * sizeof(void*);
+ for (unsigned i = 0; i < numSlots; i++)
+ {
+ fpArgsUsed |= (0x1 << (numFPRegArgSlots + i));
+ }
+ numFPRegArgSlots += numSlots;
+ }
+ }
+#elif defined(_ARM64_)
+
+ assert(numFPRegArgSlots + numSlots <= MaxNumFPRegArgSlots);
+ assert(!twoSlotAlign);
+ argIsReg[canonIndex] = ARS_FloatReg;
+
+ argOffsets[canonIndex] = numFPRegArgSlots * sizeof(void*);
+ for (unsigned i = 0; i < numSlots; i++)
+ {
+ fpArgsUsed |= (0x1 << (numFPRegArgSlots + i));
+ }
+ numFPRegArgSlots += numSlots;
+
+#else
+#error "Unsupported architecture"
+#endif
+}
+
+
+// static
+CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp,
+ CORINFO_METHOD_INFO* info,
+ /*OUT*/ BYTE **nativeEntry,
+ /*OUT*/ ULONG *nativeSizeOfCode,
+ InterpreterMethodInfo** ppInterpMethodInfo,
+ bool jmpCall)
+{
+ //
+ // First, ensure that the compiler-specific statics are initialized.
+ //
+
+ InitializeCompilerStatics(comp);
+
+ //
+ // Next, use switches and IL scanning to determine whether to interpret this method.
+ //
+
+#if INTERP_TRACING
+#define TRACE_SKIPPED(cls, meth, reason) \
+ if (s_DumpInterpreterStubsFlag.val(CLRConfig::INTERNAL_DumpInterpreterStubs)) { \
+ fprintf(GetLogFile(), "Skipping %s:%s (%s).\n", cls, meth, reason); \
+ }
+#else
+#define TRACE_SKIPPED(cls, meth, reason)
+#endif
+
+
+ // If jmpCall, we only need to do computations involving method info.
+ if (!jmpCall)
+ {
+ const char* clsName;
+ const char* methName = comp->getMethodName(info->ftn, &clsName);
+ if ( !s_InterpretMeths.contains(methName, clsName, info->args.pSig)
+ || s_InterpretMethsExclude.contains(methName, clsName, info->args.pSig))
+ {
+ TRACE_SKIPPED(clsName, methName, "not in set of methods to interpret");
+ return CORJIT_SKIPPED;
+ }
+
+ unsigned methHash = comp->getMethodHash(info->ftn);
+ if ( methHash < s_InterpretMethHashMin.val(CLRConfig::INTERNAL_InterpreterMethHashMin)
+ || methHash > s_InterpretMethHashMax.val(CLRConfig::INTERNAL_InterpreterMethHashMax))
+ {
+ TRACE_SKIPPED(clsName, methName, "hash not within range to interpret");
+ return CORJIT_SKIPPED;
+ }
+
+ MethodDesc* pMD = reinterpret_cast<MethodDesc*>(info->ftn);
+
+#if !INTERP_ILSTUBS
+ if (pMD->IsILStub())
+ {
+ TRACE_SKIPPED(clsName, methName, "interop stubs not supported");
+ return CORJIT_SKIPPED;
+ }
+ else
+#endif // !INTERP_ILSTUBS
+
+ if (!s_InterpreterDoLoopMethods && MethodMayHaveLoop(info->ILCode, info->ILCodeSize))
+ {
+ TRACE_SKIPPED(clsName, methName, "has loop, not interpreting loop methods.");
+ return CORJIT_SKIPPED;
+ }
+
+ s_interpreterStubNum++;
+
+#if INTERP_TRACING
+ if (s_interpreterStubNum < s_InterpreterStubMin.val(CLRConfig::INTERNAL_InterpreterStubMin)
+ || s_interpreterStubNum > s_InterpreterStubMax.val(CLRConfig::INTERNAL_InterpreterStubMax))
+ {
+ TRACE_SKIPPED(clsName, methName, "stub num not in range, not interpreting.");
+ return CORJIT_SKIPPED;
+ }
+
+ if (s_DumpInterpreterStubsFlag.val(CLRConfig::INTERNAL_DumpInterpreterStubs))
+ {
+ unsigned hash = comp->getMethodHash(info->ftn);
+ fprintf(GetLogFile(), "Generating interpretation stub (# %d = 0x%x, hash = 0x%x) for %s:%s.\n",
+ s_interpreterStubNum, s_interpreterStubNum, hash, clsName, methName);
+ fflush(GetLogFile());
+ }
+#endif
+ }
+
+ //
+ // Finally, generate an interpreter entry-point stub.
+ //
+
+ // @TODO: this structure clearly needs some sort of lifetime management. It is the moral equivalent
+ // of compiled code, and should be associated with an app domain. In addition, when I get to it, we should
+ // delete it when/if we actually compile the method. (Actually, that's complicated, since there may be
+ // VSD stubs still bound to the interpreter stub. The check there will get to the jitted code, but we want
+ // to eventually clean those up at some safe point...)
+ InterpreterMethodInfo* interpMethInfo = new InterpreterMethodInfo(comp, info);
+ if (ppInterpMethodInfo != nullptr)
+ {
+ *ppInterpMethodInfo = interpMethInfo;
+ }
+ interpMethInfo->m_stubNum = s_interpreterStubNum;
+ MethodDesc* methodDesc = reinterpret_cast<MethodDesc*>(info->ftn);
+ if (!jmpCall)
+ {
+ interpMethInfo = RecordInterpreterMethodInfoForMethodHandle(info->ftn, interpMethInfo);
+ }
+
+#if FEATURE_INTERPRETER_DEADSIMPLE_OPT
+ unsigned offsetOfLd;
+ if (IsDeadSimpleGetter(comp, methodDesc, &offsetOfLd))
+ {
+ interpMethInfo->SetFlag<InterpreterMethodInfo::Flag_methIsDeadSimpleGetter>(true);
+ if (offsetOfLd == ILOffsetOfLdFldInDeadSimpleInstanceGetterDbg)
+ {
+ interpMethInfo->SetFlag<InterpreterMethodInfo::Flag_methIsDeadSimpleGetterIsDbgForm>(true);
+ }
+ else
+ {
+ assert(offsetOfLd == ILOffsetOfLdFldInDeadSimpleInstanceGetterOpt);
+ }
+ }
+#endif // FEATURE_INTERPRETER_DEADSIMPLE_OPT
+
+ // Used to initialize the arg offset information.
+ Stub* stub = NULL;
+
+ // We assume that the stack contains (with addresses growing upwards, assuming a downwards-growing stack):
+ //
+ // [Non-reg arg N-1]
+ // ...
+ // [Non-reg arg <# of reg args>]
+ // [return PC]
+ //
+ // Then push the register args to get:
+ //
+ // [Non-reg arg N-1]
+ // ...
+ // [Non-reg arg <# of reg args>]
+ // [return PC]
+ // [reg arg <# of reg args>-1]
+ // ...
+ // [reg arg 0]
+ //
+ // Pass the address of this argument array, and the MethodDesc pointer for the method, as arguments to
+ // Interpret.
+ //
+ // So the structure of the code will look like this (in the non-ILstub case):
+ //
+#if defined(_X86_) || defined(_AMD64_)
+ // First do "short-circuiting" if the method has JITted code, and we couldn't find/update the call site:
+ // eax = &interpMethInfo
+ // eax = [eax + offsetof(m_jittedCode)]
+ // if (eax == zero) goto doInterpret:
+ // /*else*/ jmp [eax]
+ // doInterpret:
+ // push ebp
+ // mov ebp, esp
+ // [if there are register arguments in ecx or edx, push them]
+ // ecx := addr of InterpretMethodInfo for the method to be intepreted.
+ // edx = esp /*pointer to argument structure*/
+ // call to Interpreter::InterpretMethod
+ // [if we pushed register arguments, increment esp by the right amount.]
+ // pop ebp
+ // ret <n> ; where <n> is the number of argument stack slots in the call to the stub.
+#elif defined (_ARM_)
+ // TODO.
+#endif
+
+ // The IL stub case is hard. The portion of the interpreter stub that short-circuits
+ // to JITted code requires an extra "scratch" volatile register, not an argument register;
+ // in the IL stub case, it too is using such a register, as an extra argument, to hold the stub context.
+ // On x86 and ARM, there is only one such extra volatile register, and we've got a conundrum.
+ // The cases where this short-circuiting is important is when the address of an interpreter stub
+ // becomes "embedded" in other code. The examples I know of are VSD stubs and delegates.
+ // The first of these is not a problem for IL stubs -- methods invoked via p/Invoke (the ones that
+ // [I think!] use IL stubs) are static, and cannot be invoked via VSD stubs. Delegates, on the other
+ // remain a problem [I believe].
+ // For the short term, we'll ignore this issue, and never do short-circuiting for IL stubs.
+ // So interpreter stubs embedded in delegates will continue to interpret the IL stub, even after
+ // the stub has been JITted.
+ // The long-term intention is that when we JIT a method with an interpreter stub, we keep a mapping
+ // from interpreter stub address to corresponding native code address. If this mapping is non-empty,
+ // at GC time we would visit the locations in which interpreter stub addresses might be located, like
+ // VSD stubs and delegate objects, and update them to point to new addresses. This would be a necessary
+ // part of any scheme to GC interpreter stubs, and InterpreterMethodInfos.
+
+ // If we *really* wanted to make short-circuiting work for the IL stub case, we would have to
+ // (in the x86 case, which should be sufficiently illustrative):
+ // push eax
+ // <get the address of JITted code, if any, into eax>
+ // if there is JITted code in eax, we'd have to
+ // push 2 non-volatile registers, say esi and edi.
+ // copy the JITted code address from eax into esi.
+ // copy the method arguments (without the return address) down the stack, using edi
+ // as a scratch register.
+ // restore the original stub context value into eax from the stack
+ // call (not jmp) to the JITted code address in esi
+ // pop esi and edi from the stack.
+ // now the stack has original args, followed by original return address. Do a "ret"
+ // that returns to the return address, and also pops the original args from the stack.
+ // If we did this, we'd have to give this portion of the stub proper unwind info.
+ // Also, we'd have to adjust the rest of the stub to pop eax from the stack.
+
+ // TODO: much of the interpreter stub code should be is shareable. In the non-IL stub case,
+ // at least, we could have a small per-method stub that puts the address of the method-specific
+ // InterpreterMethodInfo into eax, and then branches to a shared part. Probably we would want to
+ // always push all integer args on x86, as we do already on ARM. On ARM, we'd need several versions
+ // of the shared stub, for different numbers of floating point register args, cross different kinds of
+ // HFA return values. But these could still be shared, and the per-method stub would decide which of
+ // these to target.
+ //
+ // In the IL stub case, which uses eax, it would be problematic to do this sharing.
+
+ StubLinkerCPU sl;
+ MethodDesc* pMD = reinterpret_cast<MethodDesc*>(info->ftn);
+ if (!jmpCall)
+ {
+ sl.Init();
+#if defined(_X86_) || defined(_AMD64_)
+ // First we do "short-circuiting" if the method has JITted code.
+#if INTERP_ILSTUBS
+ if (!pMD->IsILStub()) // As discussed above, we don't do short-circuiting for IL stubs.
+#endif
+ {
+ // First read the m_jittedCode field.
+ sl.X86EmitRegLoad(kEAX, UINT_PTR(interpMethInfo));
+ sl.X86EmitOffsetModRM(0x8b, kEAX, kEAX, offsetof(InterpreterMethodInfo, m_jittedCode));
+ // If it is still zero, then go on to do the interpretation.
+ sl.X86EmitCmpRegImm32(kEAX, 0);
+ CodeLabel* doInterpret = sl.NewCodeLabel();
+ sl.X86EmitCondJump(doInterpret, X86CondCode::kJE);
+ // Otherwise...
+ sl.X86EmitJumpReg(kEAX); // tail call to JITted code.
+ sl.EmitLabel(doInterpret);
+ }
+#if defined(_X86_)
+ // Start regular interpretation
+ sl.X86EmitPushReg(kEBP);
+ sl.X86EmitMovRegReg(kEBP, static_cast<X86Reg>(kESP_Unsafe));
+#endif
+#elif defined(_ARM_)
+ // On ARM we use R12 as a "scratch" register -- callee-trashed, not used
+ // for arguments.
+ ThumbReg r11 = ThumbReg(11);
+ ThumbReg r12 = ThumbReg(12);
+
+#if INTERP_ILSTUBS
+ if (!pMD->IsILStub()) // As discussed above, we don't do short-circuiting for IL stubs.
+#endif
+ {
+ // But we also have to use r4, because ThumbEmitCondRegJump below requires a low register.
+ sl.ThumbEmitMovConstant(r12, UINT_PTR(interpMethInfo));
+ sl.ThumbEmitLoadRegIndirect(r12, r12, offsetof(InterpreterMethodInfo, m_jittedCode));
+ sl.ThumbEmitCmpImm(r12, 0); // Set condition codes.
+ // If r12 is zero, then go on to do the interpretation.
+ CodeLabel* doInterpret = sl.NewCodeLabel();
+ sl.ThumbEmitCondFlagJump(doInterpret, thumbCondEq.cond);
+ sl.ThumbEmitJumpRegister(r12); // If non-zero, tail call to JITted code.
+ sl.EmitLabel(doInterpret);
+ }
+
+ // Start regular interpretation
+
+#elif defined(_ARM64_)
+ // x8 through x15 are scratch registers on ARM64.
+ IntReg x8 = IntReg(8);
+ IntReg x9 = IntReg(9);
+
+#if INTERP_ILSTUBS
+ if (!pMD->IsILStub()) // As discussed above, we don't do short-circuiting for IL stubs.
+#endif
+ {
+ sl.EmitMovConstant(x8, UINT64(interpMethInfo));
+ sl.EmitLoadStoreRegImm(StubLinkerCPU::eLOAD, x9, x8, offsetof(InterpreterMethodInfo, m_jittedCode));
+ sl.EmitCmpImm(x9, 0);
+ CodeLabel* doInterpret = sl.NewCodeLabel();
+ sl.EmitCondFlagJump(doInterpret, CondEq.cond);
+ sl.EmitJumpRegister(x9);
+ sl.EmitLabel(doInterpret);
+ }
+
+ // Start regular interpretation
+#else
+#error unsupported platform
+#endif
+ }
+
+ MetaSig sig(methodDesc);
+
+ unsigned totalArgs = info->args.numArgs;
+ unsigned sigArgsPlusThis = totalArgs;
+ bool hasThis = false;
+ bool hasRetBuff = false;
+ bool isVarArg = false;
+ bool hasGenericsContextArg = false;
+
+ // Below, we will increment "totalArgs" for any of the "this" argument,
+ // a ret buff argument, and/or a generics context argument.
+ //
+ // There will be four arrays allocated below, each with this increased "totalArgs" elements:
+ // argOffsets, argIsReg, argPerm, and, later, m_argDescs.
+ //
+ // They will be indexed in the order (0-based, [] indicating optional)
+ //
+ // [this] sigArgs [retBuff] [VASigCookie] [genCtxt]
+ //
+ // We will call this "canonical order". It is architecture-independent, and
+ // does not necessarily correspond to the architecture-dependent physical order
+ // in which the registers are actually passed. (That's actually the purpose of
+ // "argPerm": to record the correspondence between canonical order and physical
+ // order.) We could have chosen any order for the first three of these, but it's
+ // simplest to let m_argDescs have all the passed IL arguments passed contiguously
+ // at the beginning, allowing it to be indexed by IL argument number.
+
+ int genericsContextArgIndex = 0;
+ int retBuffArgIndex = 0;
+ int vaSigCookieIndex = 0;
+
+ if (sig.HasThis())
+ {
+ assert(info->args.callConv & CORINFO_CALLCONV_HASTHIS);
+ hasThis = true;
+ totalArgs++; sigArgsPlusThis++;
+ }
+
+ if (methodDesc->HasRetBuffArg())
+ {
+ hasRetBuff = true;
+ retBuffArgIndex = totalArgs;
+ totalArgs++;
+ }
+
+ if (sig.GetCallingConventionInfo() & CORINFO_CALLCONV_VARARG)
+ {
+ isVarArg = true;
+ vaSigCookieIndex = totalArgs;
+ totalArgs++;
+ }
+
+ if (sig.GetCallingConventionInfo() & CORINFO_CALLCONV_PARAMTYPE)
+ {
+ assert(info->args.callConv & CORINFO_CALLCONV_PARAMTYPE);
+ hasGenericsContextArg = true;
+ genericsContextArgIndex = totalArgs;
+ totalArgs++;
+ }
+
+ // The non-this sig args have indices starting after these.
+
+ // We will first encode the arg offsets as *negative* offsets from the address above the first
+ // stack arg, and later add in the total size of the stack args to get a positive offset.
+ // The first sigArgsPlusThis elements are the offsets of the IL-addressable arguments. After that,
+ // there may be up to two more: generics context arg, if present, and return buff pointer, if present.
+ // (Note that the latter is actually passed after the "this" pointer, or else first if no "this" pointer
+ // is present. We re-arrange to preserve the easy IL-addressability.)
+ ArgState argState(totalArgs);
+
+ // This is the permutation that translates from an index in the argOffsets/argIsReg arrays to
+ // the platform-specific order in which the arguments are passed.
+ unsigned* argPerm = new unsigned[totalArgs];
+
+ // The number of register argument slots we end up pushing.
+ unsigned short regArgsFound = 0;
+
+ unsigned physArgIndex = 0;
+
+#if defined(_ARM_)
+ // The stub linker has a weird little limitation: all stubs it's used
+ // for on ARM push some callee-saved register, so the unwind info
+ // code was written assuming at least one would be pushed. I don't know how to
+ // fix it, so I'm meeting this requirement, by pushing one callee-save.
+#define STUB_LINK_EMIT_PROLOG_REQUIRES_CALLEE_SAVE_PUSH 1
+
+#if STUB_LINK_EMIT_PROLOG_REQUIRES_CALLEE_SAVE_PUSH
+ const int NumberOfCalleeSaveRegsToPush = 1;
+#else
+ const int NumberOfCalleeSaveRegsToPush = 0;
+#endif
+ // The "1" here is for the return address.
+ const int NumberOfFixedPushes = 1 + NumberOfCalleeSaveRegsToPush;
+#elif defined(_ARM64_)
+ // FP, LR
+ const int NumberOfFixedPushes = 2;
+#endif
+
+#if defined(FEATURE_HFA)
+#if defined(_ARM_) || defined(_ARM64_)
+ // On ARM, a non-retBuffArg method that returns a struct type might be an HFA return. Figure
+ // that out.
+ unsigned HFARetTypeSize = 0;
+#endif
+#if defined(_ARM64_)
+ unsigned cHFAVars = 0;
+#endif
+ if (info->args.retType == CORINFO_TYPE_VALUECLASS
+ && CorInfoTypeIsFloatingPoint(comp->getHFAType(info->args.retTypeClass))
+ && info->args.getCallConv() != CORINFO_CALLCONV_VARARG)
+ {
+ HFARetTypeSize = getClassSize(info->args.retTypeClass);
+#if defined(_ARM_)
+ // Round up to a double boundary;
+ HFARetTypeSize = ((HFARetTypeSize+ sizeof(double) - 1) / sizeof(double)) * sizeof(double);
+#elif defined(_ARM64_)
+ // We don't need to round it up to double. Unlike ARM, whether it's a float or a double each field will
+ // occupy one slot. We'll handle the stack alignment in the prolog where we have all the information about
+ // what is going to be pushed on the stack.
+ // Instead on ARM64 we'll need to know how many slots we'll need.
+ // for instance a VT with two float fields will have the same size as a VT with 1 double field. (ARM64TODO: Verify it)
+ // It works on ARM because the overlapping layout of the floating point registers
+ // but it won't work on ARM64.
+ cHFAVars = (comp->getHFAType(info->args.retTypeClass) == ELEMENT_TYPE_R4) ? HFARetTypeSize/sizeof(float) : HFARetTypeSize/sizeof(double);
+#endif
+ }
+
+#endif // defined(FEATURE_HFA)
+
+ _ASSERTE_MSG((info->args.callConv & (CORINFO_CALLCONV_EXPLICITTHIS)) == 0,
+ "Don't yet handle EXPLICITTHIS calling convention modifier.");
+
+ switch (info->args.callConv & CORINFO_CALLCONV_MASK)
+ {
+ case CORINFO_CALLCONV_DEFAULT:
+ case CORINFO_CALLCONV_VARARG:
+ {
+ unsigned firstSigArgIndex = 0;
+ if (hasThis)
+ {
+ argPerm[0] = physArgIndex; physArgIndex++;
+ argState.AddArg(0);
+ firstSigArgIndex++;
+ }
+
+ if (hasRetBuff)
+ {
+ argPerm[retBuffArgIndex] = physArgIndex; physArgIndex++;
+ argState.AddArg(retBuffArgIndex);
+ }
+
+ if (isVarArg)
+ {
+ argPerm[vaSigCookieIndex] = physArgIndex; physArgIndex++;
+ interpMethInfo->m_varArgHandleArgNum = vaSigCookieIndex;
+ argState.AddArg(vaSigCookieIndex);
+ }
+
+#if defined(_ARM_) || defined(_AMD64_) || defined(_ARM64_)
+ // Generics context comes before args on ARM. Would be better if I factored this out as a call,
+ // to avoid large swatches of duplicate code.
+ if (hasGenericsContextArg)
+ {
+ argPerm[genericsContextArgIndex] = physArgIndex; physArgIndex++;
+ argState.AddArg(genericsContextArgIndex);
+ }
+#endif // _ARM_ || _AMD64_ || _ARM64_
+
+ CORINFO_ARG_LIST_HANDLE argPtr = info->args.args;
+ // Some arguments are have been passed in registers, some in memory. We must generate code that
+ // moves the register arguments to memory, and determines a pointer into the stack from which all
+ // the arguments can be accessed, according to the offsets in "argOffsets."
+ //
+ // In the first pass over the arguments, we will label and count the register arguments, and
+ // initialize entries in "argOffsets" for the non-register arguments -- relative to the SP at the
+ // time of the call. Then when we have counted the number of register arguments, we will adjust
+ // the offsets for the non-register arguments to account for those. Then, in the second pass, we
+ // will push the register arguments on the stack, and capture the final stack pointer value as
+ // the argument vector pointer.
+ CORINFO_CLASS_HANDLE vcTypeRet;
+ // This iteration starts at the first signature argument, and iterates over all the
+ // canonical indices for the signature arguments.
+ for (unsigned k = firstSigArgIndex; k < sigArgsPlusThis; k++)
+ {
+ argPerm[k] = physArgIndex; physArgIndex++;
+
+ CorInfoTypeWithMod argTypWithMod = comp->getArgType(&info->args, argPtr, &vcTypeRet);
+ CorInfoType argType = strip(argTypWithMod);
+ switch (argType)
+ {
+ case CORINFO_TYPE_UNDEF:
+ case CORINFO_TYPE_VOID:
+ case CORINFO_TYPE_VAR:
+ _ASSERTE_ALL_BUILDS(__FILE__, false); // Should not happen;
+ break;
+
+ // One integer slot arguments:
+ case CORINFO_TYPE_BOOL:
+ case CORINFO_TYPE_CHAR:
+ case CORINFO_TYPE_BYTE:
+ case CORINFO_TYPE_UBYTE:
+ case CORINFO_TYPE_SHORT:
+ case CORINFO_TYPE_USHORT:
+ case CORINFO_TYPE_INT:
+ case CORINFO_TYPE_UINT:
+ case CORINFO_TYPE_NATIVEINT:
+ case CORINFO_TYPE_NATIVEUINT:
+ case CORINFO_TYPE_BYREF:
+ case CORINFO_TYPE_CLASS:
+ case CORINFO_TYPE_STRING:
+ case CORINFO_TYPE_PTR:
+ argState.AddArg(k);
+ break;
+
+ // Two integer slot arguments.
+ case CORINFO_TYPE_LONG:
+ case CORINFO_TYPE_ULONG:
+#if defined(_X86_)
+ // Longs are always passed on the stack -- with no obvious alignment.
+ argState.AddArg(k, 2, /*noReg*/true);
+#elif defined(_ARM_)
+ // LONGS have 2-reg alignment; inc reg if necessary.
+ argState.AddArg(k, 2, /*noReg*/false, /*twoSlotAlign*/true);
+#elif defined(_AMD64_) || defined(_ARM64_)
+ argState.AddArg(k);
+#else
+#error unknown platform
+#endif
+ break;
+
+ // One float slot args:
+ case CORINFO_TYPE_FLOAT:
+#if defined(_X86_)
+ argState.AddArg(k, 1, /*noReg*/true);
+#elif defined(_ARM_)
+ argState.AddFPArg(k, 1, /*twoSlotAlign*/false);
+#elif defined(_AMD64_) || defined(_ARM64_)
+ argState.AddFPArg(k, 1, false);
+#else
+#error unknown platform
+#endif
+ break;
+
+ // Two float slot args
+ case CORINFO_TYPE_DOUBLE:
+#if defined(_X86_)
+ argState.AddArg(k, 2, /*noReg*/true);
+#elif defined(_ARM_)
+ argState.AddFPArg(k, 2, /*twoSlotAlign*/true);
+#elif defined(_AMD64_) || defined(_ARM64_)
+ argState.AddFPArg(k, 1, false);
+#else
+#error unknown platform
+#endif
+ break;
+
+ // Value class args:
+ case CORINFO_TYPE_VALUECLASS:
+ case CORINFO_TYPE_REFANY:
+ {
+ unsigned sz = getClassSize(vcTypeRet);
+ unsigned szSlots = max(1, sz / sizeof(void*));
+#if defined(_X86_)
+ argState.AddArg(k, static_cast<short>(szSlots), /*noReg*/true);
+#elif defined(_AMD64_)
+ argState.AddArg(k, static_cast<short>(szSlots));
+#elif defined(_ARM_) || defined(_ARM64_)
+ CorInfoType hfaType = comp->getHFAType(vcTypeRet);
+ if (CorInfoTypeIsFloatingPoint(hfaType))
+ {
+ argState.AddFPArg(k, szSlots,
+#if defined(_ARM_)
+ /*twoSlotAlign*/ (hfaType == CORINFO_TYPE_DOUBLE)
+#elif defined(_ARM64_)
+ /*twoSlotAlign*/ false // unlike ARM32 FP args always consume 1 slot on ARM64
+#endif
+ );
+ }
+ else
+ {
+ unsigned align = comp->getClassAlignmentRequirement(vcTypeRet, FALSE);
+ argState.AddArg(k, static_cast<short>(szSlots), /*noReg*/false,
+#if defined(_ARM_)
+ /*twoSlotAlign*/ (align == 8)
+#elif defined(_ARM64_)
+ /*twoSlotAlign*/ false
+#endif
+ );
+ }
+#else
+#error unknown platform
+#endif
+ }
+ break;
+
+
+ default:
+ _ASSERTE_MSG(false, "should not reach here, unknown arg type");
+ }
+ argPtr = comp->getArgNext(argPtr);
+ }
+
+#if defined(_X86_)
+ // Generics context comes last on _X86_. Would be better if I factored this out as a call,
+ // to avoid large swatches of duplicate code.
+ if (hasGenericsContextArg)
+ {
+ argPerm[genericsContextArgIndex] = physArgIndex; physArgIndex++;
+ argState.AddArg(genericsContextArgIndex);
+ }
+
+ // Now we have counted the number of register arguments, so we can update the offsets for the
+ // non-register arguments. "+ 2" below is to account for the return address from the call, and
+ // pushing of EBP.
+ unsigned short stackArgBaseOffset = (argState.numRegArgs + 2 + argState.callerArgStackSlots) * sizeof(void*);
+ unsigned intRegArgBaseOffset = 0;
+
+#elif defined(_ARM_)
+
+ // We're choosing to always push all arg regs on ARM -- this is the only option
+ // that ThumbEmitProlog currently gives.
+ argState.numRegArgs = 4;
+
+ // On ARM, we push the (integer) arg regs before we push the return address, so we don't add an
+ // extra constant. And the offset is the address of the last pushed argument, which is the first
+ // stack argument in signature order.
+
+ // Round up to a double boundary...
+ unsigned fpStackSlots = ((argState.numFPRegArgSlots + 1) / 2) * 2;
+ unsigned intRegArgBaseOffset = (fpStackSlots + NumberOfFixedPushes) * sizeof(void*);
+ unsigned short stackArgBaseOffset = intRegArgBaseOffset + (argState.numRegArgs) * sizeof(void*);
+#elif defined(_ARM64_)
+
+ // See StubLinkerCPU::EmitProlog for the layout of the stack
+ unsigned intRegArgBaseOffset = (argState.numFPRegArgSlots) * sizeof(void*);
+ unsigned short stackArgBaseOffset = (unsigned short) ((argState.numRegArgs + argState.numFPRegArgSlots) * sizeof(void*));
+#elif defined(_AMD64_)
+ unsigned short stackArgBaseOffset = (argState.numRegArgs) * sizeof(void*);
+#else
+#error unsupported platform
+#endif
+
+#if defined(_ARM_)
+ WORD regArgMask = 0;
+#endif // defined(_ARM_)
+ // argPerm maps from an index into the argOffsets/argIsReg arrays to
+ // the order that the arguments are passed.
+ unsigned* argPermInverse = new unsigned[totalArgs];
+ for (unsigned t = 0; t < totalArgs; t++)
+ {
+ argPermInverse[argPerm[t]] = t;
+ }
+
+ for (unsigned kk = 0; kk < totalArgs; kk++)
+ {
+ // Let "k" be the index of the kk'th input in the argOffsets and argIsReg arrays.
+ // To compute "k" we need to invert argPerm permutation -- determine the "k" such
+ // that argPerm[k] == kk.
+ unsigned k = argPermInverse[kk];
+
+ assert(k < totalArgs);
+
+ if (argState.argIsReg[k] == ArgState::ARS_IntReg)
+ {
+ regArgsFound++;
+ // If any int reg args are used on ARM, we push them all (in ThumbEmitProlog)
+#if defined(_X86_)
+ if (regArgsFound == 1)
+ {
+ if (!jmpCall) { sl.X86EmitPushReg(kECX); }
+ argState.argOffsets[k] = (argState.numRegArgs - regArgsFound)*sizeof(void*); // General form, good for general # of reg args.
+ }
+ else
+ {
+ assert(regArgsFound == 2);
+ if (!jmpCall) { sl.X86EmitPushReg(kEDX); }
+ argState.argOffsets[k] = (argState.numRegArgs - regArgsFound)*sizeof(void*);
+ }
+#elif defined(_ARM_) || defined(_ARM64_)
+ argState.argOffsets[k] += intRegArgBaseOffset;
+#elif defined(_AMD64_)
+ // First home the register arguments in the stack space allocated by the caller.
+ // Refer to Stack Allocation on x64 [http://msdn.microsoft.com/en-US/library/ew5tede7(v=vs.80).aspx]
+ X86Reg argRegs[] = { kECX, kEDX, kR8, kR9 };
+ if (!jmpCall) { sl.X86EmitIndexRegStoreRSP(regArgsFound * sizeof(void*), argRegs[regArgsFound - 1]); }
+ argState.argOffsets[k] = (regArgsFound - 1) * sizeof(void*);
+#else
+#error unsupported platform
+#endif
+ }
+#if defined(_AMD64_)
+ else if (argState.argIsReg[k] == ArgState::ARS_FloatReg)
+ {
+ // Increment regArgsFound since float/int arguments have overlapping registers.
+ regArgsFound++;
+ // Home the float arguments.
+ X86Reg argRegs[] = { kXMM0, kXMM1, kXMM2, kXMM3 };
+ if (!jmpCall) { sl.X64EmitMovSDToMem(argRegs[regArgsFound - 1], static_cast<X86Reg>(kESP_Unsafe), regArgsFound * sizeof(void*)); }
+ argState.argOffsets[k] = (regArgsFound - 1) * sizeof(void*);
+ }
+#endif
+ else if (argState.argIsReg[k] == ArgState::ARS_NotReg)
+ {
+ argState.argOffsets[k] += stackArgBaseOffset;
+ }
+ // So far, x86 doesn't have any FP reg args, and ARM and ARM64 puts them at offset 0, so no
+ // adjustment is necessary (yet) for arguments passed in those registers.
+ }
+ delete[] argPermInverse;
+ }
+ break;
+
+ case CORINFO_CALLCONV_C:
+ NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_C");
+ break;
+
+ case CORINFO_CALLCONV_STDCALL:
+ NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_STDCALL");
+ break;
+
+ case CORINFO_CALLCONV_THISCALL:
+ NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_THISCALL");
+ break;
+
+ case CORINFO_CALLCONV_FASTCALL:
+ NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_FASTCALL");
+ break;
+
+ case CORINFO_CALLCONV_FIELD:
+ NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_FIELD");
+ break;
+
+ case CORINFO_CALLCONV_LOCAL_SIG:
+ NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_LOCAL_SIG");
+ break;
+
+ case CORINFO_CALLCONV_PROPERTY:
+ NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_PROPERTY");
+ break;
+
+ case CORINFO_CALLCONV_NATIVEVARARG:
+ NYI_INTERP("GenerateInterpreterStub -- CORINFO_CALLCONV_NATIVEVARARG");
+ break;
+
+ default:
+ _ASSERTE_ALL_BUILDS(__FILE__, false); // shouldn't get here
+ }
+
+ delete[] argPerm;
+
+ PCODE interpretMethodFunc;
+ if (!jmpCall)
+ {
+ switch (info->args.retType)
+ {
+ case CORINFO_TYPE_FLOAT:
+ interpretMethodFunc = reinterpret_cast<PCODE>(&InterpretMethodFloat);
+ break;
+ case CORINFO_TYPE_DOUBLE:
+ interpretMethodFunc = reinterpret_cast<PCODE>(&InterpretMethodDouble);
+ break;
+ default:
+ interpretMethodFunc = reinterpret_cast<PCODE>(&InterpretMethod);
+ break;
+ }
+ // The argument registers have been pushed by now, so we can use them.
+#if defined(_X86_)
+ // First arg is pointer to the base of the ILargs arr -- i.e., the current stack value.
+ sl.X86EmitMovRegReg(kEDX, static_cast<X86Reg>(kESP_Unsafe));
+ // InterpretMethod uses F_CALL_CONV == __fastcall; pass 2 args in regs.
+#if INTERP_ILSTUBS
+ if (pMD->IsILStub())
+ {
+ // Third argument is stubcontext, in eax.
+ sl.X86EmitPushReg(kEAX);
+ }
+ else
+#endif
+ {
+ // For a non-ILStub method, push NULL as the StubContext argument.
+ sl.X86EmitZeroOutReg(kECX);
+ sl.X86EmitPushReg(kECX);
+ }
+ // sl.X86EmitAddReg(kECX, reinterpret_cast<UINT>(interpMethInfo));
+ sl.X86EmitRegLoad(kECX, reinterpret_cast<UINT>(interpMethInfo));
+ sl.X86EmitCall(sl.NewExternalCodeLabel(interpretMethodFunc), 0);
+ // Now we will deallocate the stack slots we pushed to hold register arguments.
+ if (argState.numRegArgs > 0)
+ {
+ sl.X86EmitAddEsp(argState.numRegArgs * sizeof(void*));
+ }
+ sl.X86EmitPopReg(kEBP);
+ sl.X86EmitReturn(static_cast<WORD>(argState.callerArgStackSlots * sizeof(void*)));
+#elif defined(_AMD64_)
+ // EDX has "ilArgs" i.e., just the point where registers have been homed.
+ sl.X86EmitIndexLeaRSP(kEDX, static_cast<X86Reg>(kESP_Unsafe), 8);
+
+ // Allocate space for homing callee's (InterpretMethod's) arguments.
+ // Calling convention requires a default allocation space of 4,
+ // but to double align the stack frame, we'd allocate 5.
+ int interpMethodArgSize = 5 * sizeof(void*);
+ sl.X86EmitSubEsp(interpMethodArgSize);
+
+ // If we have IL stubs pass the stub context in R10 or else pass NULL.
+#if INTERP_ILSTUBS
+ if (pMD->IsILStub())
+ {
+ sl.X86EmitMovRegReg(kR8, kR10);
+ }
+ else
+#endif
+ {
+ // For a non-ILStub method, push NULL as the StubContext argument.
+ sl.X86EmitZeroOutReg(kRCX);
+ sl.X86EmitMovRegReg(kR8, kRCX);
+ }
+ sl.X86EmitRegLoad(kRCX, reinterpret_cast<UINT_PTR>(interpMethInfo));
+ sl.X86EmitCall(sl.NewExternalCodeLabel(interpretMethodFunc), 0);
+ sl.X86EmitAddEsp(interpMethodArgSize);
+ sl.X86EmitReturn(0);
+#elif defined(_ARM_)
+
+ // We have to maintain 8-byte stack alignment. So if the number of
+ // slots we would normally push is not a multiple of two, add a random
+ // register. (We will not pop this register, but rather, increment
+ // sp by an amount that includes it.)
+ bool oddPushes = (((argState.numRegArgs + NumberOfFixedPushes) % 2) != 0);
+
+ UINT stackFrameSize = 0;
+ if (oddPushes) stackFrameSize = sizeof(void*);
+ // Now, if any FP regs are used as arguments, we will copy those to the stack; reserve space for that here.
+ // (We push doubles to keep the stack aligned...)
+ unsigned short doublesToPush = (argState.numFPRegArgSlots + 1)/2;
+ stackFrameSize += (doublesToPush*2*sizeof(void*));
+
+ // The last argument here causes this to generate code to push all int arg regs.
+ sl.ThumbEmitProlog(/*cCalleeSavedRegs*/NumberOfCalleeSaveRegsToPush, /*cbStackFrame*/stackFrameSize, /*fPushArgRegs*/TRUE);
+
+ // Now we will generate code to copy the floating point registers to the stack frame.
+ if (doublesToPush > 0)
+ {
+ sl.ThumbEmitStoreMultipleVFPDoubleReg(ThumbVFPDoubleReg(0), thumbRegSp, doublesToPush*2);
+ }
+
+#if INTERP_ILSTUBS
+ if (pMD->IsILStub())
+ {
+ // Third argument is stubcontext, in r12.
+ sl.ThumbEmitMovRegReg(ThumbReg(2), ThumbReg(12));
+ }
+ else
+#endif
+ {
+ // For a non-ILStub method, push NULL as the third StubContext argument.
+ sl.ThumbEmitMovConstant(ThumbReg(2), 0);
+ }
+ // Second arg is pointer to the base of the ILargs arr -- i.e., the current stack value.
+ sl.ThumbEmitMovRegReg(ThumbReg(1), thumbRegSp);
+
+ // First arg is the pointer to the interpMethInfo structure.
+ sl.ThumbEmitMovConstant(ThumbReg(0), reinterpret_cast<int>(interpMethInfo));
+
+ // If there's an HFA return, add space for that.
+ if (HFARetTypeSize > 0)
+ {
+ sl.ThumbEmitSubSp(HFARetTypeSize);
+ }
+
+ // Now we can call the right method.
+ // No "direct call" instruction, so load into register first. Can use R3.
+ sl.ThumbEmitMovConstant(ThumbReg(3), static_cast<int>(interpretMethodFunc));
+ sl.ThumbEmitCallRegister(ThumbReg(3));
+
+ // If there's an HFA return, copy to FP regs, and deallocate the stack space.
+ if (HFARetTypeSize > 0)
+ {
+ sl.ThumbEmitLoadMultipleVFPDoubleReg(ThumbVFPDoubleReg(0), thumbRegSp, HFARetTypeSize/sizeof(void*));
+ sl.ThumbEmitAddSp(HFARetTypeSize);
+ }
+
+ sl.ThumbEmitEpilog();
+
+#elif defined(_ARM64_)
+
+ UINT stackFrameSize = argState.numFPRegArgSlots;
+
+ sl.EmitProlog(argState.numRegArgs, argState.numFPRegArgSlots, 0 /*cCalleeSavedRegs*/, static_cast<unsigned short>(cHFAVars*sizeof(void*)));
+
+#if INTERP_ILSTUBS
+ if (pMD->IsILStub())
+ {
+ // Third argument is stubcontext, in x12 (METHODDESC_REGISTER)
+ sl.EmitMovReg(IntReg(2), IntReg(12));
+ }
+ else
+#endif
+ {
+ // For a non-ILStub method, push NULL as the third stubContext argument
+ sl.EmitMovConstant(IntReg(2), 0);
+ }
+
+ // Second arg is pointer to the basei of the ILArgs -- i.e., the current stack value
+ sl.EmitAddImm(IntReg(1), RegSp, sl.GetSavedRegArgsOffset());
+
+ // First arg is the pointer to the interpMethodInfo structure
+#if INTERP_ILSTUBS
+ if (!pMD->IsILStub())
+#endif
+ {
+ // interpMethodInfo is already in x8, so copy it from x8
+ sl.EmitMovReg(IntReg(0), IntReg(8));
+ }
+#if INTERP_ILSTUBS
+ else
+ {
+ // We didn't do the short-circuiting, therefore interpMethInfo is
+ // not stored in a register (x8) before. so do it now.
+ sl.EmitMovConstant(IntReg(0), reinterpret_cast<UINT64>(interpMethInfo));
+ }
+#endif
+
+ sl.EmitCallLabel(sl.NewExternalCodeLabel((LPVOID)interpretMethodFunc), FALSE, FALSE);
+
+ // If there's an HFA return, copy to FP regs
+ if (cHFAVars > 0)
+ {
+ for (unsigned i=0; i<=(cHFAVars/2)*2;i+=2)
+ sl.EmitLoadStoreRegPairImm(StubLinkerCPU::eLOAD, VecReg(i), VecReg(i+1), RegSp, i*sizeof(void*));
+ if ((cHFAVars % 2) == 1)
+ sl.EmitLoadStoreRegImm(StubLinkerCPU::eLOAD,VecReg(cHFAVars-1), RegSp, cHFAVars*sizeof(void*));
+
+ }
+
+ sl.EmitEpilog();
+
+
+#else
+#error unsupported platform
+#endif
+ stub = sl.Link();
+
+ *nativeSizeOfCode = static_cast<ULONG>(stub->GetNumCodeBytes());
+ // TODO: manage reference count of interpreter stubs. Look for examples...
+ *nativeEntry = dac_cast<BYTE*>(stub->GetEntryPoint());
+ }
+
+ // Initialize the arg offset information.
+ interpMethInfo->InitArgInfo(comp, info, argState.argOffsets);
+
+#ifdef _DEBUG
+ AddInterpMethInfo(interpMethInfo);
+#endif // _DEBUG
+ if (!jmpCall)
+ {
+ // Remember the mapping between code address and MethodDesc*.
+ RecordInterpreterStubForMethodDesc(info->ftn, *nativeEntry);
+ }
+
+ return CORJIT_OK;
+#undef TRACE_SKIPPED
+}
+
+size_t Interpreter::GetFrameSize(InterpreterMethodInfo* interpMethInfo)
+{
+ size_t sz = interpMethInfo->LocalMemSize();
+#if COMBINE_OPSTACK_VAL_TYPE
+ sz += (interpMethInfo->m_maxStack * sizeof(OpStackValAndType));
+#else
+ sz += (interpMethInfo->m_maxStack * (sizeof(INT64) + sizeof(InterpreterType*)));
+#endif
+ return sz;
+}
+
+// static
+ARG_SLOT Interpreter::ExecuteMethodWrapper(struct InterpreterMethodInfo* interpMethInfo, bool directCall, BYTE* ilArgs, void* stubContext, __out bool* pDoJmpCall, CORINFO_RESOLVED_TOKEN* pResolvedToken)
+{
+#define INTERP_DYNAMIC_CONTRACTS 1
+#if INTERP_DYNAMIC_CONTRACTS
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+#else
+ // Dynamic contract occupies too much stack.
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+#endif
+
+ size_t sizeWithGS = GetFrameSize(interpMethInfo) + sizeof(GSCookie);
+ BYTE* frameMemoryGS = static_cast<BYTE*>(_alloca(sizeWithGS));
+
+ ARG_SLOT retVal = 0;
+ unsigned jmpCallToken = 0;
+
+ Interpreter interp(interpMethInfo, directCall, ilArgs, stubContext, frameMemoryGS);
+
+ // Make sure we can do a GC Scan properly.
+ FrameWithCookie<InterpreterFrame> interpFrame(&interp);
+
+ // Update the interpretation count.
+ InterlockedIncrement(reinterpret_cast<LONG *>(&interpMethInfo->m_invocations));
+
+ // Need to wait until this point to do this JITting, since it may trigger a GC.
+ JitMethodIfAppropriate(interpMethInfo);
+
+ // Pass buffers to get jmpCall flag and the token, if necessary.
+ interp.ExecuteMethod(&retVal, pDoJmpCall, &jmpCallToken);
+
+ if (*pDoJmpCall)
+ {
+ GCX_PREEMP();
+ interp.ResolveToken(pResolvedToken, jmpCallToken, CORINFO_TOKENKIND_Method InterpTracingArg(RTK_Call));
+ }
+
+ interpFrame.Pop();
+ return retVal;
+}
+
+// TODO: Add GSCookie checks
+
+// static
+inline ARG_SLOT Interpreter::InterpretMethodBody(struct InterpreterMethodInfo* interpMethInfo, bool directCall, BYTE* ilArgs, void* stubContext)
+{
+#if INTERP_DYNAMIC_CONTRACTS
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+#else
+ // Dynamic contract occupies too much stack.
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+#endif
+
+ CEEInfo* jitInfo = NULL;
+ for (bool doJmpCall = true; doJmpCall; )
+ {
+ unsigned jmpCallToken = 0;
+ CORINFO_RESOLVED_TOKEN methTokPtr;
+ ARG_SLOT retVal = ExecuteMethodWrapper(interpMethInfo, directCall, ilArgs, stubContext, &doJmpCall, &methTokPtr);
+ // Clear any allocated jitInfo.
+ delete jitInfo;
+
+ // Nothing to do if the recent method asks not to do a jmpCall.
+ if (!doJmpCall)
+ {
+ return retVal;
+ }
+
+ // The recently executed method wants us to perform a jmpCall.
+ MethodDesc* pMD = GetMethod(methTokPtr.hMethod);
+ interpMethInfo = MethodHandleToInterpreterMethInfoPtr(CORINFO_METHOD_HANDLE(pMD));
+
+ // Allocate a new jitInfo and also a new interpMethInfo.
+ if (interpMethInfo == NULL)
+ {
+ assert(doJmpCall);
+ jitInfo = new CEEInfo(pMD, true);
+
+ CORINFO_METHOD_INFO methInfo;
+
+ GCX_PREEMP();
+ jitInfo->getMethodInfo(CORINFO_METHOD_HANDLE(pMD), &methInfo);
+ GenerateInterpreterStub(jitInfo, &methInfo, NULL, 0, &interpMethInfo, true);
+ }
+ }
+ UNREACHABLE();
+}
+
+void Interpreter::JitMethodIfAppropriate(InterpreterMethodInfo* interpMethInfo, bool force)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ unsigned int MaxInterpretCount = s_InterpreterJITThreshold.val(CLRConfig::INTERNAL_InterpreterJITThreshold);
+
+ if (force || interpMethInfo->m_invocations > MaxInterpretCount)
+ {
+ GCX_PREEMP();
+ MethodDesc *md = reinterpret_cast<MethodDesc *>(interpMethInfo->m_method);
+ PCODE stub = md->GetNativeCode();
+
+ if (InterpretationStubToMethodInfo(stub) == md)
+ {
+#ifdef _DEBUG
+ if (s_TraceInterpreterJITTransitionFlag.val(CLRConfig::INTERNAL_TraceInterpreterJITTransition))
+ {
+ fprintf(GetLogFile(), "JITting method %s:%s.\n", md->m_pszDebugClassName, md->m_pszDebugMethodName);
+ }
+#endif // _DEBUG
+ DWORD dwFlags = CORJIT_FLG_MAKEFINALCODE;
+ NewHolder<COR_ILMETHOD_DECODER> pDecoder(NULL);
+ // Dynamic methods (e.g., IL stubs) do not have an IL decoder but may
+ // require additional flags. Ordinary methods require the opposite.
+ if (md->IsDynamicMethod())
+ {
+ dwFlags |= md->AsDynamicMethodDesc()->GetILStubResolver()->GetJitFlags();
+ }
+ else
+ {
+ COR_ILMETHOD_DECODER::DecoderStatus status;
+ pDecoder = new COR_ILMETHOD_DECODER(md->GetILHeader(TRUE),
+ md->GetMDImport(),
+ &status);
+ }
+ PCODE res = md->MakeJitWorker(pDecoder, dwFlags, 0);
+ interpMethInfo->m_jittedCode = res;
+ }
+ }
+}
+
+// static
+HCIMPL3(float, InterpretMethodFloat, struct InterpreterMethodInfo* interpMethInfo, BYTE* ilArgs, void* stubContext)
+{
+ FCALL_CONTRACT;
+
+ ARG_SLOT retVal = 0;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
+ retVal = (ARG_SLOT)Interpreter::InterpretMethodBody(interpMethInfo, false, ilArgs, stubContext);
+ HELPER_METHOD_FRAME_END();
+
+ return *reinterpret_cast<float*>(ArgSlotEndianessFixup(&retVal, sizeof(float)));
+}
+HCIMPLEND
+
+// static
+HCIMPL3(double, InterpretMethodDouble, struct InterpreterMethodInfo* interpMethInfo, BYTE* ilArgs, void* stubContext)
+{
+ FCALL_CONTRACT;
+
+ ARG_SLOT retVal = 0;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
+ retVal = Interpreter::InterpretMethodBody(interpMethInfo, false, ilArgs, stubContext);
+ HELPER_METHOD_FRAME_END();
+
+ return *reinterpret_cast<double*>(ArgSlotEndianessFixup(&retVal, sizeof(double)));
+}
+HCIMPLEND
+
+// static
+HCIMPL3(INT64, InterpretMethod, struct InterpreterMethodInfo* interpMethInfo, BYTE* ilArgs, void* stubContext)
+{
+ FCALL_CONTRACT;
+
+ ARG_SLOT retVal = 0;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
+ retVal = Interpreter::InterpretMethodBody(interpMethInfo, false, ilArgs, stubContext);
+ HELPER_METHOD_FRAME_END();
+
+ return static_cast<INT64>(retVal);
+}
+HCIMPLEND
+
+bool Interpreter::IsInCalleesFrames(void* stackPtr)
+{
+ // We assume a downwards_growing stack.
+ return stackPtr < (m_localVarMemory - sizeof(GSCookie));
+}
+
+// I want an enumeration with values for the second byte of 2-byte opcodes.
+enum OPCODE_2BYTE {
+#define OPDEF(c,s,pop,push,args,type,l,s1,s2,ctrl) TWOBYTE_##c = unsigned(s2),
+#include "opcode.def"
+#undef OPDEF
+};
+
+#ifdef _DEBUG
+static const char* getMethodName(CEEInfo* info, CORINFO_METHOD_HANDLE meth, const char** pClsName)
+{
+ GCX_PREEMP();
+ return info->getMethodName(meth, pClsName);
+}
+#endif // _DEBUG
+
+// Optimize the interpreter loop for speed.
+#ifdef _MSC_VER
+#pragma optimize("t", on)
+#endif
+
+// Duplicating code from JitHelpers for MonEnter,MonExit,MonEnter_Static,
+// MonExit_Static because it sets up helper frame for the JIT.
+static void MonitorEnter(Object* obj, BYTE* pbLockTaken)
+{
+
+ OBJECTREF objRef = ObjectToOBJECTREF(obj);
+
+
+ if (objRef == NULL)
+ COMPlusThrow(kArgumentNullException);
+
+ GCPROTECT_BEGININTERIOR(pbLockTaken);
+
+#ifdef _DEBUG
+ Thread *pThread = GetThread();
+ DWORD lockCount = pThread->m_dwLockCount;
+#endif
+ if (GET_THREAD()->CatchAtSafePointOpportunistic())
+ {
+ GET_THREAD()->PulseGCMode();
+ }
+ objRef->EnterObjMonitor();
+ _ASSERTE ((objRef->GetSyncBlock()->GetMonitor()->m_Recursion == 1 && pThread->m_dwLockCount == lockCount + 1) ||
+ pThread->m_dwLockCount == lockCount);
+ if (pbLockTaken != 0) *pbLockTaken = 1;
+
+ GCPROTECT_END();
+}
+
+static void MonitorExit(Object* obj, BYTE* pbLockTaken)
+{
+ OBJECTREF objRef = ObjectToOBJECTREF(obj);
+
+ if (objRef == NULL)
+ COMPlusThrow(kArgumentNullException);
+
+ if (!objRef->LeaveObjMonitor())
+ COMPlusThrow(kSynchronizationLockException);
+
+ if (pbLockTaken != 0) *pbLockTaken = 0;
+
+ TESTHOOKCALL(AppDomainCanBeUnloaded(GET_THREAD()->GetDomain()->GetId().m_dwId,FALSE));
+
+ if (GET_THREAD()->IsAbortRequested()) {
+ GET_THREAD()->HandleThreadAbort();
+ }
+}
+
+static void MonitorEnterStatic(AwareLock *lock, BYTE* pbLockTaken)
+{
+ lock->Enter();
+ MONHELPER_STATE(*pbLockTaken = 1;)
+}
+
+static void MonitorExitStatic(AwareLock *lock, BYTE* pbLockTaken)
+{
+ // Error, yield or contention
+ if (!lock->Leave())
+ COMPlusThrow(kSynchronizationLockException);
+
+ TESTHOOKCALL(AppDomainCanBeUnloaded(GET_THREAD()->GetDomain()->GetId().m_dwId,FALSE));
+ if (GET_THREAD()->IsAbortRequested()) {
+ GET_THREAD()->HandleThreadAbort();
+ }
+}
+
+
+AwareLock* Interpreter::GetMonitorForStaticMethod()
+{
+ MethodDesc* pMD = reinterpret_cast<MethodDesc*>(m_methInfo->m_method);
+ CORINFO_LOOKUP_KIND kind;
+ {
+ GCX_PREEMP();
+ kind = m_interpCeeInfo.getLocationOfThisType(m_methInfo->m_method);
+ }
+ if (!kind.needsRuntimeLookup)
+ {
+ OBJECTREF ref = pMD->GetMethodTable()->GetManagedClassObject();
+ return (AwareLock*) ref->GetSyncBlock()->GetMonitor();
+ }
+ else
+ {
+ CORINFO_CLASS_HANDLE classHnd = nullptr;
+ switch (kind.runtimeLookupKind)
+ {
+ case CORINFO_LOOKUP_CLASSPARAM:
+ {
+ classHnd = (CORINFO_CLASS_HANDLE) GetPreciseGenericsContext();
+ }
+ break;
+ case CORINFO_LOOKUP_METHODPARAM:
+ {
+ MethodDesc* pMD = (MethodDesc*) GetPreciseGenericsContext();
+ classHnd = (CORINFO_CLASS_HANDLE) pMD->GetMethodTable();
+ }
+ break;
+ default:
+ NYI_INTERP("Unknown lookup for synchronized methods");
+ break;
+ }
+ MethodTable* pMT = GetMethodTableFromClsHnd(classHnd);
+ OBJECTREF ref = pMT->GetManagedClassObject();
+ ASSERT(ref);
+ return (AwareLock*) ref->GetSyncBlock()->GetMonitor();
+ }
+}
+
+void Interpreter::DoMonitorEnterWork()
+{
+ MethodDesc* pMD = reinterpret_cast<MethodDesc*>(m_methInfo->m_method);
+ if (pMD->IsSynchronized())
+ {
+ if (pMD->IsStatic())
+ {
+ AwareLock* lock = GetMonitorForStaticMethod();
+ MonitorEnterStatic(lock, &m_monAcquired);
+ }
+ else
+ {
+ MonitorEnter((Object*) m_thisArg, &m_monAcquired);
+ }
+ }
+}
+
+void Interpreter::DoMonitorExitWork()
+{
+ MethodDesc* pMD = reinterpret_cast<MethodDesc*>(m_methInfo->m_method);
+ if (pMD->IsSynchronized())
+ {
+ if (pMD->IsStatic())
+ {
+ AwareLock* lock = GetMonitorForStaticMethod();
+ MonitorExitStatic(lock, &m_monAcquired);
+ }
+ else
+ {
+ MonitorExit((Object*) m_thisArg, &m_monAcquired);
+ }
+ }
+}
+
+
+void Interpreter::ExecuteMethod(ARG_SLOT* retVal, __out bool* pDoJmpCall, __out unsigned* pJmpCallToken)
+{
+#if INTERP_DYNAMIC_CONTRACTS
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+#else
+ // Dynamic contract occupies too much stack.
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+#endif
+
+ *pDoJmpCall = false;
+
+ // Normally I'd prefer to declare these in small case-block scopes, but most C++ compilers
+ // do not realize that their lifetimes do not overlap, so that makes for a large stack frame.
+ // So I avoid that by outside declarations (sigh).
+ char offsetc, valc;
+ unsigned char argNumc;
+ unsigned short argNums;
+ INT32 vali;
+ INT64 vall;
+ InterpreterType it;
+ size_t sz;
+
+ unsigned short ops;
+
+ // Make sure that the .cctor for the current method's class has been run.
+ MethodDesc* pMD = reinterpret_cast<MethodDesc*>(m_methInfo->m_method);
+ EnsureClassInit(pMD->GetMethodTable());
+
+#if INTERP_TRACING
+ const char* methName = eeGetMethodFullName(m_methInfo->m_method);
+ unsigned ilOffset = 0;
+
+ unsigned curInvocation = InterlockedIncrement(&s_totalInvocations);
+ if (s_TraceInterpreterEntriesFlag.val(CLRConfig::INTERNAL_TraceInterpreterEntries))
+ {
+ fprintf(GetLogFile(), "Entering method #%d (= 0x%x): %s.\n", curInvocation, curInvocation, methName);
+ fprintf(GetLogFile(), " arguments:\n");
+ PrintArgs();
+ }
+#endif // INTERP_TRACING
+
+#if LOOPS_VIA_INSTRS
+ unsigned instrs = 0;
+#else
+#if INTERP_PROFILE
+ unsigned instrs = 0;
+#endif
+#endif
+
+EvalLoop:
+ GCX_ASSERT_COOP();
+ // Catch any exceptions raised.
+ EX_TRY {
+ // Optional features...
+#define INTERPRETER_CHECK_LARGE_STRUCT_STACK_HEIGHT 1
+
+#if INTERP_ILCYCLE_PROFILE
+ m_instr = CEE_COUNT; // Flag to indicate first instruction.
+ m_exemptCycles = 0;
+#endif // INTERP_ILCYCLE_PROFILE
+
+ DoMonitorEnterWork();
+
+ INTERPLOG("START %d, %s\n", m_methInfo->m_stubNum, methName);
+ for (;;)
+ {
+ // TODO: verify that m_ILCodePtr is legal, and we haven't walked off the end of the IL array? (i.e., bad IL).
+ // Note that ExecuteBranch() should be called for every branch. That checks that we aren't either before or
+ // after the IL range. Here, we would only need to check that we haven't gone past the end (not before the beginning)
+ // because everything that doesn't call ExecuteBranch() should only add to m_ILCodePtr.
+
+#if INTERP_TRACING
+ ilOffset = CurOffset();
+#endif // _DEBUG
+#if INTERP_TRACING
+ if (s_TraceInterpreterOstackFlag.val(CLRConfig::INTERNAL_TraceInterpreterOstack))
+ {
+ PrintOStack();
+ }
+#if INTERPRETER_CHECK_LARGE_STRUCT_STACK_HEIGHT
+ _ASSERTE_MSG(LargeStructStackHeightIsValid(), "Large structure stack height invariant violated."); // Check the large struct stack invariant.
+#endif
+ if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL))
+ {
+ fprintf(GetLogFile(), " %#4x: %s\n", ilOffset, ILOp(m_ILCodePtr));
+ fflush(GetLogFile());
+ }
+#endif // INTERP_TRACING
+#if LOOPS_VIA_INSTRS
+ instrs++;
+#else
+#if INTERP_PROFILE
+ instrs++;
+#endif
+#endif
+
+#if INTERP_ILINSTR_PROFILE
+#if INTERP_ILCYCLE_PROFILE
+ UpdateCycleCount();
+#endif // INTERP_ILCYCLE_PROFILE
+
+ InterlockedIncrement(&s_ILInstrExecs[*m_ILCodePtr]);
+#endif // INTERP_ILINSTR_PROFILE
+
+ switch (*m_ILCodePtr)
+ {
+ case CEE_NOP:
+ m_ILCodePtr++;
+ continue;
+ case CEE_BREAK: // TODO: interact with the debugger?
+ m_ILCodePtr++;
+ continue;
+ case CEE_LDARG_0:
+ LdArg(0);
+ break;
+ case CEE_LDARG_1:
+ LdArg(1);
+ break;
+ case CEE_LDARG_2:
+ LdArg(2);
+ break;
+ case CEE_LDARG_3:
+ LdArg(3);
+ break;
+ case CEE_LDLOC_0:
+ LdLoc(0);
+ m_ILCodePtr++;
+ continue;
+ case CEE_LDLOC_1:
+ LdLoc(1);
+ break;
+ case CEE_LDLOC_2:
+ LdLoc(2);
+ break;
+ case CEE_LDLOC_3:
+ LdLoc(3);
+ break;
+ case CEE_STLOC_0:
+ StLoc(0);
+ break;
+ case CEE_STLOC_1:
+ StLoc(1);
+ break;
+ case CEE_STLOC_2:
+ StLoc(2);
+ break;
+ case CEE_STLOC_3:
+ StLoc(3);
+ break;
+ case CEE_LDARG_S:
+ m_ILCodePtr++;
+ argNumc = *m_ILCodePtr;
+ LdArg(argNumc);
+ break;
+ case CEE_LDARGA_S:
+ m_ILCodePtr++;
+ argNumc = *m_ILCodePtr;
+ LdArgA(argNumc);
+ break;
+ case CEE_STARG_S:
+ m_ILCodePtr++;
+ argNumc = *m_ILCodePtr;
+ StArg(argNumc);
+ break;
+ case CEE_LDLOC_S:
+ argNumc = *(m_ILCodePtr + 1);
+ LdLoc(argNumc);
+ m_ILCodePtr += 2;
+ continue;
+ case CEE_LDLOCA_S:
+ m_ILCodePtr++;
+ argNumc = *m_ILCodePtr;
+ LdLocA(argNumc);
+ break;
+ case CEE_STLOC_S:
+ argNumc = *(m_ILCodePtr + 1);
+ StLoc(argNumc);
+ m_ILCodePtr += 2;
+ continue;
+ case CEE_LDNULL:
+ LdNull();
+ break;
+ case CEE_LDC_I4_M1:
+ LdIcon(-1);
+ break;
+ case CEE_LDC_I4_0:
+ LdIcon(0);
+ break;
+ case CEE_LDC_I4_1:
+ LdIcon(1);
+ m_ILCodePtr++;
+ continue;
+ case CEE_LDC_I4_2:
+ LdIcon(2);
+ break;
+ case CEE_LDC_I4_3:
+ LdIcon(3);
+ break;
+ case CEE_LDC_I4_4:
+ LdIcon(4);
+ break;
+ case CEE_LDC_I4_5:
+ LdIcon(5);
+ break;
+ case CEE_LDC_I4_6:
+ LdIcon(6);
+ break;
+ case CEE_LDC_I4_7:
+ LdIcon(7);
+ break;
+ case CEE_LDC_I4_8:
+ LdIcon(8);
+ break;
+ case CEE_LDC_I4_S:
+ valc = getI1(m_ILCodePtr + 1);
+ LdIcon(valc);
+ m_ILCodePtr += 2;
+ continue;
+ case CEE_LDC_I4:
+ vali = getI4LittleEndian(m_ILCodePtr + 1);
+ LdIcon(vali);
+ m_ILCodePtr += 5;
+ continue;
+ case CEE_LDC_I8:
+ vall = getI8LittleEndian(m_ILCodePtr + 1);
+ LdLcon(vall);
+ m_ILCodePtr += 9;
+ continue;
+ case CEE_LDC_R4:
+ // We use I4 here because we just care about the bit pattern.
+ // LdR4Con will push the right InterpreterType.
+ vali = getI4LittleEndian(m_ILCodePtr + 1);
+ LdR4con(vali);
+ m_ILCodePtr += 5;
+ continue;
+ case CEE_LDC_R8:
+ // We use I4 here because we just care about the bit pattern.
+ // LdR8Con will push the right InterpreterType.
+ vall = getI8LittleEndian(m_ILCodePtr + 1);
+ LdR8con(vall);
+ m_ILCodePtr += 9;
+ continue;
+ case CEE_DUP:
+ assert(m_curStackHt > 0);
+ it = OpStackTypeGet(m_curStackHt - 1);
+ OpStackTypeSet(m_curStackHt, it);
+ if (it.IsLargeStruct(&m_interpCeeInfo))
+ {
+ sz = it.Size(&m_interpCeeInfo);
+ void* dest = LargeStructOperandStackPush(sz);
+ memcpy(dest, OpStackGet<void*>(m_curStackHt - 1), sz);
+ OpStackSet<void*>(m_curStackHt, dest);
+ }
+ else
+ {
+ OpStackSet<INT64>(m_curStackHt, OpStackGet<INT64>(m_curStackHt - 1));
+ }
+ m_curStackHt++;
+ break;
+ case CEE_POP:
+ assert(m_curStackHt > 0);
+ m_curStackHt--;
+ it = OpStackTypeGet(m_curStackHt);
+ if (it.IsLargeStruct(&m_interpCeeInfo))
+ {
+ LargeStructOperandStackPop(it.Size(&m_interpCeeInfo), OpStackGet<void*>(m_curStackHt));
+ }
+ break;
+
+ case CEE_JMP:
+ *pJmpCallToken = getU4LittleEndian(m_ILCodePtr + sizeof(byte));
+ *pDoJmpCall = true;
+ goto ExitEvalLoop;
+
+ case CEE_CALL:
+ DoCall(/*virtualCall*/false);
+#ifdef _DEBUG
+ if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL))
+ {
+ fprintf(GetLogFile(), " Returning to method %s, stub num %d.\n", methName, m_methInfo->m_stubNum);
+ }
+#endif // _DEBUG
+ continue;
+
+ case CEE_CALLVIRT:
+ DoCall(/*virtualCall*/true);
+#ifdef _DEBUG
+ if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL))
+ {
+ fprintf(GetLogFile(), " Returning to method %s, stub num %d.\n", methName, m_methInfo->m_stubNum);
+ }
+#endif // _DEBUG
+ continue;
+
+ // HARD
+ case CEE_CALLI:
+ CallI();
+ continue;
+
+ case CEE_RET:
+ if (m_methInfo->m_returnType == CORINFO_TYPE_VOID)
+ {
+ assert(m_curStackHt == 0);
+ }
+ else
+ {
+ assert(m_curStackHt == 1);
+ InterpreterType retValIt = OpStackTypeGet(0);
+ bool looseInt = s_InterpreterLooseRules &&
+ CorInfoTypeIsIntegral(m_methInfo->m_returnType) &&
+ (CorInfoTypeIsIntegral(retValIt.ToCorInfoType()) || CorInfoTypeIsPointer(retValIt.ToCorInfoType())) &&
+ (m_methInfo->m_returnType != retValIt.ToCorInfoType());
+
+ bool looseFloat = s_InterpreterLooseRules &&
+ CorInfoTypeIsFloatingPoint(m_methInfo->m_returnType) &&
+ CorInfoTypeIsFloatingPoint(retValIt.ToCorInfoType()) &&
+ (m_methInfo->m_returnType != retValIt.ToCorInfoType());
+
+ // Make sure that the return value "matches" (which allows certain relaxations) the declared return type.
+ assert((m_methInfo->m_returnType == CORINFO_TYPE_VALUECLASS && retValIt.ToCorInfoType() == CORINFO_TYPE_VALUECLASS) ||
+ (m_methInfo->m_returnType == CORINFO_TYPE_REFANY && retValIt.ToCorInfoType() == CORINFO_TYPE_VALUECLASS) ||
+ (m_methInfo->m_returnType == CORINFO_TYPE_REFANY && retValIt.ToCorInfoType() == CORINFO_TYPE_REFANY) ||
+ (looseInt || looseFloat) ||
+ InterpreterType(m_methInfo->m_returnType).StackNormalize().Matches(retValIt, &m_interpCeeInfo));
+
+ size_t sz = retValIt.Size(&m_interpCeeInfo);
+#if defined(FEATURE_HFA)
+ CorInfoType cit = CORINFO_TYPE_UNDEF;
+ {
+ GCX_PREEMP();
+ if(m_methInfo->m_returnType == CORINFO_TYPE_VALUECLASS)
+ cit = m_interpCeeInfo.getHFAType(retValIt.ToClassHandle());
+ }
+#endif
+ if (m_methInfo->GetFlag<InterpreterMethodInfo::Flag_hasRetBuffArg>())
+ {
+ assert((m_methInfo->m_returnType == CORINFO_TYPE_VALUECLASS && retValIt.ToCorInfoType() == CORINFO_TYPE_VALUECLASS) ||
+ (m_methInfo->m_returnType == CORINFO_TYPE_REFANY && retValIt.ToCorInfoType() == CORINFO_TYPE_VALUECLASS) ||
+ (m_methInfo->m_returnType == CORINFO_TYPE_REFANY && retValIt.ToCorInfoType() == CORINFO_TYPE_REFANY));
+ if (retValIt.ToCorInfoType() == CORINFO_TYPE_REFANY)
+ {
+ InterpreterType typedRefIT = GetTypedRefIT(&m_interpCeeInfo);
+ TypedByRef* ptr = OpStackGet<TypedByRef*>(0);
+ *((TypedByRef*) m_retBufArg) = *ptr;
+ }
+ else if (retValIt.IsLargeStruct(&m_interpCeeInfo))
+ {
+ MethodTable* clsMt = GetMethodTableFromClsHnd(retValIt.ToClassHandle());
+ // The ostack value is a pointer to the struct value.
+ CopyValueClassUnchecked(m_retBufArg, OpStackGet<void*>(0), clsMt);
+ }
+ else
+ {
+ MethodTable* clsMt = GetMethodTableFromClsHnd(retValIt.ToClassHandle());
+ // The ostack value *is* the struct value.
+ CopyValueClassUnchecked(m_retBufArg, OpStackGetAddr(0, sz), clsMt);
+ }
+ }
+#if defined(FEATURE_HFA)
+ // Is it an HFA?
+ else if (m_methInfo->m_returnType == CORINFO_TYPE_VALUECLASS
+ && CorInfoTypeIsFloatingPoint(cit)
+ && (MetaSig(reinterpret_cast<MethodDesc*>(m_methInfo->m_method)).GetCallingConventionInfo() & CORINFO_CALLCONV_VARARG) == 0)
+ {
+ if (retValIt.IsLargeStruct(&m_interpCeeInfo))
+ {
+ // The ostack value is a pointer to the struct value.
+ memcpy(GetHFARetBuffAddr(static_cast<unsigned>(sz)), OpStackGet<void*>(0), sz);
+ }
+ else
+ {
+ // The ostack value *is* the struct value.
+ memcpy(GetHFARetBuffAddr(static_cast<unsigned>(sz)), OpStackGetAddr(0, sz), sz);
+ }
+ }
+#endif
+ else if (CorInfoTypeIsFloatingPoint(m_methInfo->m_returnType) &&
+ CorInfoTypeIsFloatingPoint(retValIt.ToCorInfoType()))
+ {
+ double val = (sz <= sizeof(INT32)) ? OpStackGet<float>(0) : OpStackGet<double>(0);
+ if (m_methInfo->m_returnType == CORINFO_TYPE_DOUBLE)
+ {
+ memcpy(retVal, &val, sizeof(double));
+ }
+ else
+ {
+ float val2 = (float) val;
+ memcpy(retVal, &val2, sizeof(float));
+ }
+ }
+ else
+ {
+ if (sz <= sizeof(INT32))
+ {
+ *retVal = OpStackGet<INT32>(0);
+ }
+ else
+ {
+ // If looseInt is true, we are relying on auto-downcast in case *retVal
+ // is small (but this is guaranteed not to happen by def'n of ARG_SLOT.)
+ assert(sz == sizeof(INT64));
+ *retVal = OpStackGet<INT64>(0);
+ }
+ }
+ }
+
+
+#if INTERP_PROFILE
+ // We're not capturing instructions executed in a method that terminates via exception,
+ // but that's OK...
+ m_methInfo->RecordExecInstrs(instrs);
+#endif
+#if INTERP_TRACING
+ // We keep this live until we leave.
+ delete methName;
+#endif // INTERP_TRACING
+
+#if INTERP_ILCYCLE_PROFILE
+ // Finish off accounting for the "RET" before we return
+ UpdateCycleCount();
+#endif // INTERP_ILCYCLE_PROFILE
+
+ goto ExitEvalLoop;
+
+ case CEE_BR_S:
+ m_ILCodePtr++;
+ offsetc = *m_ILCodePtr;
+ // The offset is wrt the beginning of the following instruction, so the +1 is to get to that
+ // m_ILCodePtr value before adding the offset.
+ ExecuteBranch(m_ILCodePtr + offsetc + 1);
+ continue; // Skip the default m_ILCodePtr++ at bottom of loop.
+
+ case CEE_LEAVE_S:
+ // LEAVE empties the operand stack.
+ m_curStackHt = 0;
+ m_largeStructOperandStackHt = 0;
+ offsetc = getI1(m_ILCodePtr + 1);
+
+ {
+ // The offset is wrt the beginning of the following instruction, so the +2 is to get to that
+ // m_ILCodePtr value before adding the offset.
+ BYTE* leaveTarget = m_ILCodePtr + offsetc + 2;
+ unsigned leaveOffset = CurOffset();
+ m_leaveInfoStack.Push(LeaveInfo(leaveOffset, leaveTarget));
+ if (!SearchForCoveringFinally())
+ {
+ m_leaveInfoStack.Pop();
+ ExecuteBranch(leaveTarget);
+ }
+ }
+ continue; // Skip the default m_ILCodePtr++ at bottom of loop.
+
+ // Abstract the next pair out to something common with templates.
+ case CEE_BRFALSE_S:
+ BrOnValue<false, 1>();
+ continue;
+
+ case CEE_BRTRUE_S:
+ BrOnValue<true, 1>();
+ continue;
+
+ case CEE_BEQ_S:
+ BrOnComparison<CO_EQ, false, 1>();
+ continue;
+ case CEE_BGE_S:
+ assert(m_curStackHt >= 2);
+ // ECMA spec gives different semantics for different operand types:
+ switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
+ {
+ case CORINFO_TYPE_FLOAT:
+ case CORINFO_TYPE_DOUBLE:
+ BrOnComparison<CO_LT_UN, true, 1>();
+ break;
+ default:
+ BrOnComparison<CO_LT, true, 1>();
+ break;
+ }
+ continue;
+ case CEE_BGT_S:
+ BrOnComparison<CO_GT, false, 1>();
+ continue;
+ case CEE_BLE_S:
+ assert(m_curStackHt >= 2);
+ // ECMA spec gives different semantics for different operand types:
+ switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
+ {
+ case CORINFO_TYPE_FLOAT:
+ case CORINFO_TYPE_DOUBLE:
+ BrOnComparison<CO_GT_UN, true, 1>();
+ break;
+ default:
+ BrOnComparison<CO_GT, true, 1>();
+ break;
+ }
+ continue;
+ case CEE_BLT_S:
+ BrOnComparison<CO_LT, false, 1>();
+ continue;
+ case CEE_BNE_UN_S:
+ BrOnComparison<CO_EQ, true, 1>();
+ continue;
+ case CEE_BGE_UN_S:
+ assert(m_curStackHt >= 2);
+ // ECMA spec gives different semantics for different operand types:
+ switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
+ {
+ case CORINFO_TYPE_FLOAT:
+ case CORINFO_TYPE_DOUBLE:
+ BrOnComparison<CO_LT, true, 1>();
+ break;
+ default:
+ BrOnComparison<CO_LT_UN, true, 1>();
+ break;
+ }
+ continue;
+ case CEE_BGT_UN_S:
+ BrOnComparison<CO_GT_UN, false, 1>();
+ continue;
+ case CEE_BLE_UN_S:
+ assert(m_curStackHt >= 2);
+ // ECMA spec gives different semantics for different operand types:
+ switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
+ {
+ case CORINFO_TYPE_FLOAT:
+ case CORINFO_TYPE_DOUBLE:
+ BrOnComparison<CO_GT, true, 1>();
+ break;
+ default:
+ BrOnComparison<CO_GT_UN, true, 1>();
+ break;
+ }
+ continue;
+ case CEE_BLT_UN_S:
+ BrOnComparison<CO_LT_UN, false, 1>();
+ continue;
+
+ case CEE_BR:
+ m_ILCodePtr++;
+ vali = getI4LittleEndian(m_ILCodePtr);
+ vali += 4; // +4 for the length of the offset.
+ ExecuteBranch(m_ILCodePtr + vali);
+ if (vali < 0)
+ {
+ // Backwards branch -- enable caching.
+ BackwardsBranchActions(vali);
+ }
+
+ continue;
+
+ case CEE_LEAVE:
+ // LEAVE empties the operand stack.
+ m_curStackHt = 0;
+ m_largeStructOperandStackHt = 0;
+ vali = getI4LittleEndian(m_ILCodePtr + 1);
+
+ {
+ // The offset is wrt the beginning of the following instruction, so the +5 is to get to that
+ // m_ILCodePtr value before adding the offset.
+ BYTE* leaveTarget = m_ILCodePtr + (vali + 5);
+ unsigned leaveOffset = CurOffset();
+ m_leaveInfoStack.Push(LeaveInfo(leaveOffset, leaveTarget));
+ if (!SearchForCoveringFinally())
+ {
+ (void)m_leaveInfoStack.Pop();
+ if (vali < 0)
+ {
+ // Backwards branch -- enable caching.
+ BackwardsBranchActions(vali);
+ }
+ ExecuteBranch(leaveTarget);
+ }
+ }
+ continue; // Skip the default m_ILCodePtr++ at bottom of loop.
+
+ case CEE_BRFALSE:
+ BrOnValue<false, 4>();
+ continue;
+ case CEE_BRTRUE:
+ BrOnValue<true, 4>();
+ continue;
+
+ case CEE_BEQ:
+ BrOnComparison<CO_EQ, false, 4>();
+ continue;
+ case CEE_BGE:
+ assert(m_curStackHt >= 2);
+ // ECMA spec gives different semantics for different operand types:
+ switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
+ {
+ case CORINFO_TYPE_FLOAT:
+ case CORINFO_TYPE_DOUBLE:
+ BrOnComparison<CO_LT_UN, true, 4>();
+ break;
+ default:
+ BrOnComparison<CO_LT, true, 4>();
+ break;
+ }
+ continue;
+ case CEE_BGT:
+ BrOnComparison<CO_GT, false, 4>();
+ continue;
+ case CEE_BLE:
+ assert(m_curStackHt >= 2);
+ // ECMA spec gives different semantics for different operand types:
+ switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
+ {
+ case CORINFO_TYPE_FLOAT:
+ case CORINFO_TYPE_DOUBLE:
+ BrOnComparison<CO_GT_UN, true, 4>();
+ break;
+ default:
+ BrOnComparison<CO_GT, true, 4>();
+ break;
+ }
+ continue;
+ case CEE_BLT:
+ BrOnComparison<CO_LT, false, 4>();
+ continue;
+ case CEE_BNE_UN:
+ BrOnComparison<CO_EQ, true, 4>();
+ continue;
+ case CEE_BGE_UN:
+ assert(m_curStackHt >= 2);
+ // ECMA spec gives different semantics for different operand types:
+ switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
+ {
+ case CORINFO_TYPE_FLOAT:
+ case CORINFO_TYPE_DOUBLE:
+ BrOnComparison<CO_LT, true, 4>();
+ break;
+ default:
+ BrOnComparison<CO_LT_UN, true, 4>();
+ break;
+ }
+ continue;
+ case CEE_BGT_UN:
+ BrOnComparison<CO_GT_UN, false, 4>();
+ continue;
+ case CEE_BLE_UN:
+ assert(m_curStackHt >= 2);
+ // ECMA spec gives different semantics for different operand types:
+ switch (OpStackTypeGet(m_curStackHt-1).ToCorInfoType())
+ {
+ case CORINFO_TYPE_FLOAT:
+ case CORINFO_TYPE_DOUBLE:
+ BrOnComparison<CO_GT, true, 4>();
+ break;
+ default:
+ BrOnComparison<CO_GT_UN, true, 4>();
+ break;
+ }
+ continue;
+ case CEE_BLT_UN:
+ BrOnComparison<CO_LT_UN, false, 4>();
+ continue;
+
+ case CEE_SWITCH:
+ {
+ assert(m_curStackHt > 0);
+ m_curStackHt--;
+#ifdef _DEBUG
+ CorInfoType cit = OpStackTypeGet(m_curStackHt).ToCorInfoType();
+ assert(cit == CORINFO_TYPE_INT || cit == CORINFO_TYPE_UINT || cit == CORINFO_TYPE_NATIVEINT);
+#endif // _DEBUG
+#if defined(_AMD64_)
+ UINT32 val = (cit == CORINFO_TYPE_NATIVEINT) ? (INT32) OpStackGet<NativeInt>(m_curStackHt)
+ : OpStackGet<INT32>(m_curStackHt);
+#else
+ UINT32 val = OpStackGet<INT32>(m_curStackHt);
+#endif
+ UINT32 n = getU4LittleEndian(m_ILCodePtr + 1);
+ UINT32 instrSize = 1 + (n + 1)*4;
+ if (val < n)
+ {
+ vali = getI4LittleEndian(m_ILCodePtr + (5 + val * 4));
+ ExecuteBranch(m_ILCodePtr + instrSize + vali);
+ }
+ else
+ {
+ m_ILCodePtr += instrSize;
+ }
+ }
+ continue;
+
+ case CEE_LDIND_I1:
+ LdIndShort<INT8, /*isUnsigned*/false>();
+ break;
+ case CEE_LDIND_U1:
+ LdIndShort<UINT8, /*isUnsigned*/true>();
+ break;
+ case CEE_LDIND_I2:
+ LdIndShort<INT16, /*isUnsigned*/false>();
+ break;
+ case CEE_LDIND_U2:
+ LdIndShort<UINT16, /*isUnsigned*/true>();
+ break;
+ case CEE_LDIND_I4:
+ LdInd<INT32, CORINFO_TYPE_INT>();
+ break;
+ case CEE_LDIND_U4:
+ LdInd<UINT32, CORINFO_TYPE_INT>();
+ break;
+ case CEE_LDIND_I8:
+ LdInd<INT64, CORINFO_TYPE_LONG>();
+ break;
+ case CEE_LDIND_I:
+ LdInd<NativeInt, CORINFO_TYPE_NATIVEINT>();
+ break;
+ case CEE_LDIND_R4:
+ LdInd<float, CORINFO_TYPE_FLOAT>();
+ break;
+ case CEE_LDIND_R8:
+ LdInd<double, CORINFO_TYPE_DOUBLE>();
+ break;
+ case CEE_LDIND_REF:
+ LdInd<Object*, CORINFO_TYPE_CLASS>();
+ break;
+ case CEE_STIND_REF:
+ StInd_Ref();
+ break;
+ case CEE_STIND_I1:
+ StInd<INT8>();
+ break;
+ case CEE_STIND_I2:
+ StInd<INT16>();
+ break;
+ case CEE_STIND_I4:
+ StInd<INT32>();
+ break;
+ case CEE_STIND_I8:
+ StInd<INT64>();
+ break;
+ case CEE_STIND_R4:
+ StInd<float>();
+ break;
+ case CEE_STIND_R8:
+ StInd<double>();
+ break;
+ case CEE_ADD:
+ BinaryArithOp<BA_Add>();
+ m_ILCodePtr++;
+ continue;
+ case CEE_SUB:
+ BinaryArithOp<BA_Sub>();
+ break;
+ case CEE_MUL:
+ BinaryArithOp<BA_Mul>();
+ break;
+ case CEE_DIV:
+ BinaryArithOp<BA_Div>();
+ break;
+ case CEE_DIV_UN:
+ BinaryIntOp<BIO_DivUn>();
+ break;
+ case CEE_REM:
+ BinaryArithOp<BA_Rem>();
+ break;
+ case CEE_REM_UN:
+ BinaryIntOp<BIO_RemUn>();
+ break;
+ case CEE_AND:
+ BinaryIntOp<BIO_And>();
+ break;
+ case CEE_OR:
+ BinaryIntOp<BIO_Or>();
+ break;
+ case CEE_XOR:
+ BinaryIntOp<BIO_Xor>();
+ break;
+ case CEE_SHL:
+ ShiftOp<CEE_SHL>();
+ break;
+ case CEE_SHR:
+ ShiftOp<CEE_SHR>();
+ break;
+ case CEE_SHR_UN:
+ ShiftOp<CEE_SHR_UN>();
+ break;
+ case CEE_NEG:
+ Neg();
+ break;
+ case CEE_NOT:
+ Not();
+ break;
+ case CEE_CONV_I1:
+ Conv<INT8, /*TIsUnsigned*/false, /*TCanHoldPtr*/false, /*TIsShort*/true, CORINFO_TYPE_INT>();
+ break;
+ case CEE_CONV_I2:
+ Conv<INT16, /*TIsUnsigned*/false, /*TCanHoldPtr*/false, /*TIsShort*/true, CORINFO_TYPE_INT>();
+ break;
+ case CEE_CONV_I4:
+ Conv<INT32, /*TIsUnsigned*/false, /*TCanHoldPtr*/false, /*TIsShort*/false, CORINFO_TYPE_INT>();
+ break;
+ case CEE_CONV_I8:
+ Conv<INT64, /*TIsUnsigned*/false, /*TCanHoldPtr*/true, /*TIsShort*/false, CORINFO_TYPE_LONG>();
+ break;
+ case CEE_CONV_R4:
+ Conv<float, /*TIsUnsigned*/false, /*TCanHoldPtr*/false, /*TIsShort*/false, CORINFO_TYPE_FLOAT>();
+ break;
+ case CEE_CONV_R8:
+ Conv<double, /*TIsUnsigned*/false, /*TCanHoldPtr*/false, /*TIsShort*/false, CORINFO_TYPE_DOUBLE>();
+ break;
+ case CEE_CONV_U4:
+ Conv<UINT32, /*TIsUnsigned*/true, /*TCanHoldPtr*/false, /*TIsShort*/false, CORINFO_TYPE_INT>();
+ break;
+ case CEE_CONV_U8:
+ Conv<UINT64, /*TIsUnsigned*/true, /*TCanHoldPtr*/true, /*TIsShort*/false, CORINFO_TYPE_LONG>();
+ break;
+
+ case CEE_CPOBJ:
+ CpObj();
+ continue;
+ case CEE_LDOBJ:
+ LdObj();
+ continue;
+ case CEE_LDSTR:
+ LdStr();
+ continue;
+ case CEE_NEWOBJ:
+ NewObj();
+#ifdef _DEBUG
+ if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL))
+ {
+ fprintf(GetLogFile(), " Returning to method %s, stub num %d.\n", methName, m_methInfo->m_stubNum);
+ }
+#endif // _DEBUG
+ continue;
+ case CEE_CASTCLASS:
+ CastClass();
+ continue;
+ case CEE_ISINST:
+ IsInst();
+ continue;
+ case CEE_CONV_R_UN:
+ ConvRUn();
+ break;
+ case CEE_UNBOX:
+ Unbox();
+ continue;
+ case CEE_THROW:
+ Throw();
+ break;
+ case CEE_LDFLD:
+ LdFld();
+ continue;
+ case CEE_LDFLDA:
+ LdFldA();
+ continue;
+ case CEE_STFLD:
+ StFld();
+ continue;
+ case CEE_LDSFLD:
+ LdSFld();
+ continue;
+ case CEE_LDSFLDA:
+ LdSFldA();
+ continue;
+ case CEE_STSFLD:
+ StSFld();
+ continue;
+ case CEE_STOBJ:
+ StObj();
+ continue;
+ case CEE_CONV_OVF_I1_UN:
+ ConvOvfUn<INT8, SCHAR_MIN, SCHAR_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
+ break;
+ case CEE_CONV_OVF_I2_UN:
+ ConvOvfUn<INT16, SHRT_MIN, SHRT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
+ break;
+ case CEE_CONV_OVF_I4_UN:
+ ConvOvfUn<INT32, INT_MIN, INT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
+ break;
+ case CEE_CONV_OVF_I8_UN:
+ ConvOvfUn<INT64, _I64_MIN, _I64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_LONG>();
+ break;
+ case CEE_CONV_OVF_U1_UN:
+ ConvOvfUn<UINT8, 0, UCHAR_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
+ break;
+ case CEE_CONV_OVF_U2_UN:
+ ConvOvfUn<UINT16, 0, USHRT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
+ break;
+ case CEE_CONV_OVF_U4_UN:
+ ConvOvfUn<UINT32, 0, UINT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
+ break;
+ case CEE_CONV_OVF_U8_UN:
+ ConvOvfUn<UINT64, 0, _UI64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_LONG>();
+ break;
+ case CEE_CONV_OVF_I_UN:
+ if (sizeof(NativeInt) == 4)
+ {
+ ConvOvfUn<NativeInt, INT_MIN, INT_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
+ }
+ else
+ {
+ assert(sizeof(NativeInt) == 8);
+ ConvOvfUn<NativeInt, _I64_MIN, _I64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
+ }
+ break;
+ case CEE_CONV_OVF_U_UN:
+ if (sizeof(NativeUInt) == 4)
+ {
+ ConvOvfUn<NativeUInt, 0, UINT_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
+ }
+ else
+ {
+ assert(sizeof(NativeUInt) == 8);
+ ConvOvfUn<NativeUInt, 0, _UI64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
+ }
+ break;
+ case CEE_BOX:
+ Box();
+ continue;
+ case CEE_NEWARR:
+ NewArr();
+ continue;
+ case CEE_LDLEN:
+ LdLen();
+ break;
+ case CEE_LDELEMA:
+ LdElem</*takeAddr*/true>();
+ continue;
+ case CEE_LDELEM_I1:
+ LdElemWithType<INT8, false, CORINFO_TYPE_INT>();
+ break;
+ case CEE_LDELEM_U1:
+ LdElemWithType<UINT8, false, CORINFO_TYPE_INT>();
+ break;
+ case CEE_LDELEM_I2:
+ LdElemWithType<INT16, false, CORINFO_TYPE_INT>();
+ break;
+ case CEE_LDELEM_U2:
+ LdElemWithType<UINT16, false, CORINFO_TYPE_INT>();
+ break;
+ case CEE_LDELEM_I4:
+ LdElemWithType<INT32, false, CORINFO_TYPE_INT>();
+ break;
+ case CEE_LDELEM_U4:
+ LdElemWithType<UINT32, false, CORINFO_TYPE_INT>();
+ break;
+ case CEE_LDELEM_I8:
+ LdElemWithType<INT64, false, CORINFO_TYPE_LONG>();
+ break;
+ // Note that the ECMA spec defines a "LDELEM_U8", but it is the same instruction number as LDELEM_I8 (since
+ // when loading to the widest width, signed/unsigned doesn't matter).
+ case CEE_LDELEM_I:
+ LdElemWithType<NativeInt, false, CORINFO_TYPE_NATIVEINT>();
+ break;
+ case CEE_LDELEM_R4:
+ LdElemWithType<float, false, CORINFO_TYPE_FLOAT>();
+ break;
+ case CEE_LDELEM_R8:
+ LdElemWithType<double, false, CORINFO_TYPE_DOUBLE>();
+ break;
+ case CEE_LDELEM_REF:
+ LdElemWithType<Object*, true, CORINFO_TYPE_CLASS>();
+ break;
+ case CEE_STELEM_I:
+ StElemWithType<NativeInt, false>();
+ break;
+ case CEE_STELEM_I1:
+ StElemWithType<INT8, false>();
+ break;
+ case CEE_STELEM_I2:
+ StElemWithType<INT16, false>();
+ break;
+ case CEE_STELEM_I4:
+ StElemWithType<INT32, false>();
+ break;
+ case CEE_STELEM_I8:
+ StElemWithType<INT64, false>();
+ break;
+ case CEE_STELEM_R4:
+ StElemWithType<float, false>();
+ break;
+ case CEE_STELEM_R8:
+ StElemWithType<double, false>();
+ break;
+ case CEE_STELEM_REF:
+ StElemWithType<Object*, true>();
+ break;
+ case CEE_LDELEM:
+ LdElem</*takeAddr*/false>();
+ continue;
+ case CEE_STELEM:
+ StElem();
+ continue;
+ case CEE_UNBOX_ANY:
+ UnboxAny();
+ continue;
+ case CEE_CONV_OVF_I1:
+ ConvOvf<INT8, SCHAR_MIN, SCHAR_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
+ break;
+ case CEE_CONV_OVF_U1:
+ ConvOvf<UINT8, 0, UCHAR_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
+ break;
+ case CEE_CONV_OVF_I2:
+ ConvOvf<INT16, SHRT_MIN, SHRT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
+ break;
+ case CEE_CONV_OVF_U2:
+ ConvOvf<UINT16, 0, USHRT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
+ break;
+ case CEE_CONV_OVF_I4:
+ ConvOvf<INT32, INT_MIN, INT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
+ break;
+ case CEE_CONV_OVF_U4:
+ ConvOvf<UINT32, 0, UINT_MAX, /*TCanHoldPtr*/false, CORINFO_TYPE_INT>();
+ break;
+ case CEE_CONV_OVF_I8:
+ ConvOvf<INT64, _I64_MIN, _I64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_LONG>();
+ break;
+ case CEE_CONV_OVF_U8:
+ ConvOvf<UINT64, 0, _UI64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_LONG>();
+ break;
+ case CEE_REFANYVAL:
+ RefanyVal();
+ continue;
+ case CEE_CKFINITE:
+ CkFinite();
+ break;
+ case CEE_MKREFANY:
+ MkRefany();
+ continue;
+ case CEE_LDTOKEN:
+ LdToken();
+ continue;
+ case CEE_CONV_U2:
+ Conv<UINT16, /*TIsUnsigned*/true, /*TCanHoldPtr*/false, /*TIsShort*/true, CORINFO_TYPE_INT>();
+ break;
+ case CEE_CONV_U1:
+ Conv<UINT8, /*TIsUnsigned*/true, /*TCanHoldPtr*/false, /*TIsShort*/true, CORINFO_TYPE_INT>();
+ break;
+ case CEE_CONV_I:
+ Conv<NativeInt, /*TIsUnsigned*/false, /*TCanHoldPtr*/true, /*TIsShort*/false, CORINFO_TYPE_NATIVEINT>();
+ break;
+ case CEE_CONV_OVF_I:
+ if (sizeof(NativeInt) == 4)
+ {
+ ConvOvf<NativeInt, INT_MIN, INT_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
+ }
+ else
+ {
+ assert(sizeof(NativeInt) == 8);
+ ConvOvf<NativeInt, _I64_MIN, _I64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
+ }
+ break;
+ case CEE_CONV_OVF_U:
+ if (sizeof(NativeUInt) == 4)
+ {
+ ConvOvf<NativeUInt, 0, UINT_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
+ }
+ else
+ {
+ assert(sizeof(NativeUInt) == 8);
+ ConvOvf<NativeUInt, 0, _UI64_MAX, /*TCanHoldPtr*/true, CORINFO_TYPE_NATIVEINT>();
+ }
+ break;
+ case CEE_ADD_OVF:
+ BinaryArithOvfOp<BA_Add, /*asUnsigned*/false>();
+ break;
+ case CEE_ADD_OVF_UN:
+ BinaryArithOvfOp<BA_Add, /*asUnsigned*/true>();
+ break;
+ case CEE_MUL_OVF:
+ BinaryArithOvfOp<BA_Mul, /*asUnsigned*/false>();
+ break;
+ case CEE_MUL_OVF_UN:
+ BinaryArithOvfOp<BA_Mul, /*asUnsigned*/true>();
+ break;
+ case CEE_SUB_OVF:
+ BinaryArithOvfOp<BA_Sub, /*asUnsigned*/false>();
+ break;
+ case CEE_SUB_OVF_UN:
+ BinaryArithOvfOp<BA_Sub, /*asUnsigned*/true>();
+ break;
+ case CEE_ENDFINALLY:
+ // We have just ended a finally.
+ // If we were called during exception dispatch,
+ // rethrow the exception on our way out.
+ if (m_leaveInfoStack.IsEmpty())
+ {
+ Object* finallyException = NULL;
+
+ {
+ GCX_FORBID();
+ assert(m_inFlightException != NULL);
+ finallyException = m_inFlightException;
+ INTERPLOG("endfinally handling for %s, %p, %p\n", methName, m_methInfo, finallyException);
+ m_inFlightException = NULL;
+ }
+
+ COMPlusThrow(ObjectToOBJECTREF(finallyException));
+ UNREACHABLE();
+ }
+ // Otherwise, see if there's another finally block to
+ // execute as part of processing the current LEAVE...
+ else if (!SearchForCoveringFinally())
+ {
+ // No, there isn't -- go to the leave target.
+ assert(!m_leaveInfoStack.IsEmpty());
+ LeaveInfo li = m_leaveInfoStack.Pop();
+ ExecuteBranch(li.m_target);
+ }
+ // Yes, there, is, and SearchForCoveringFinally set us up to start executing it.
+ continue; // Skip the default m_ILCodePtr++ at bottom of loop.
+
+ case CEE_STIND_I:
+ StInd<NativeInt>();
+ break;
+ case CEE_CONV_U:
+ Conv<NativeUInt, /*TIsUnsigned*/true, /*TCanHoldPtr*/true, /*TIsShort*/false, CORINFO_TYPE_NATIVEINT>();
+ break;
+ case CEE_PREFIX7:
+ NYI_INTERP("Unimplemented opcode: CEE_PREFIX7");
+ break;
+ case CEE_PREFIX6:
+ NYI_INTERP("Unimplemented opcode: CEE_PREFIX6");
+ break;
+ case CEE_PREFIX5:
+ NYI_INTERP("Unimplemented opcode: CEE_PREFIX5");
+ break;
+ case CEE_PREFIX4:
+ NYI_INTERP("Unimplemented opcode: CEE_PREFIX4");
+ break;
+ case CEE_PREFIX3:
+ NYI_INTERP("Unimplemented opcode: CEE_PREFIX3");
+ break;
+ case CEE_PREFIX2:
+ NYI_INTERP("Unimplemented opcode: CEE_PREFIX2");
+ break;
+ case CEE_PREFIX1:
+ // This is the prefix for all the 2-byte opcodes.
+ // Figure out the second byte of the 2-byte opcode.
+ ops = *(m_ILCodePtr + 1);
+#if INTERP_ILINSTR_PROFILE
+ // Take one away from PREFIX1, which we won't count.
+ InterlockedDecrement(&s_ILInstrExecs[CEE_PREFIX1]);
+ // Credit instead to the 2-byte instruction index.
+ InterlockedIncrement(&s_ILInstr2ByteExecs[ops]);
+#endif // INTERP_ILINSTR_PROFILE
+ switch (ops)
+ {
+ case TWOBYTE_CEE_ARGLIST:
+ // NYI_INTERP("Unimplemented opcode: TWOBYTE_CEE_ARGLIST");
+ assert(m_methInfo->m_varArgHandleArgNum != NO_VA_ARGNUM);
+ LdArgA(m_methInfo->m_varArgHandleArgNum);
+ m_ILCodePtr += 2;
+ break;
+
+ case TWOBYTE_CEE_CEQ:
+ CompareOp<CO_EQ>();
+ m_ILCodePtr += 2;
+ break;
+ case TWOBYTE_CEE_CGT:
+ CompareOp<CO_GT>();
+ m_ILCodePtr += 2;
+ break;
+ case TWOBYTE_CEE_CGT_UN:
+ CompareOp<CO_GT_UN>();
+ m_ILCodePtr += 2;
+ break;
+ case TWOBYTE_CEE_CLT:
+ CompareOp<CO_LT>();
+ m_ILCodePtr += 2;
+ break;
+ case TWOBYTE_CEE_CLT_UN:
+ CompareOp<CO_LT_UN>();
+ m_ILCodePtr += 2;
+ break;
+
+ case TWOBYTE_CEE_LDARG:
+ m_ILCodePtr += 2;
+ argNums = getU2LittleEndian(m_ILCodePtr);
+ LdArg(argNums);
+ m_ILCodePtr += 2;
+ break;
+ case TWOBYTE_CEE_LDARGA:
+ m_ILCodePtr += 2;
+ argNums = getU2LittleEndian(m_ILCodePtr);
+ LdArgA(argNums);
+ m_ILCodePtr += 2;
+ break;
+ case TWOBYTE_CEE_STARG:
+ m_ILCodePtr += 2;
+ argNums = getU2LittleEndian(m_ILCodePtr);
+ StArg(argNums);
+ m_ILCodePtr += 2;
+ break;
+
+ case TWOBYTE_CEE_LDLOC:
+ m_ILCodePtr += 2;
+ argNums = getU2LittleEndian(m_ILCodePtr);
+ LdLoc(argNums);
+ m_ILCodePtr += 2;
+ break;
+ case TWOBYTE_CEE_LDLOCA:
+ m_ILCodePtr += 2;
+ argNums = getU2LittleEndian(m_ILCodePtr);
+ LdLocA(argNums);
+ m_ILCodePtr += 2;
+ break;
+ case TWOBYTE_CEE_STLOC:
+ m_ILCodePtr += 2;
+ argNums = getU2LittleEndian(m_ILCodePtr);
+ StLoc(argNums);
+ m_ILCodePtr += 2;
+ break;
+
+ case TWOBYTE_CEE_CONSTRAINED:
+ RecordConstrainedCall();
+ break;
+
+ case TWOBYTE_CEE_VOLATILE:
+ // Set a flag that causes a memory barrier to be associated with the next load or store.
+ m_volatileFlag = true;
+ m_ILCodePtr += 2;
+ break;
+
+ case TWOBYTE_CEE_LDFTN:
+ LdFtn();
+ break;
+
+ case TWOBYTE_CEE_INITOBJ:
+ InitObj();
+ break;
+
+ case TWOBYTE_CEE_LOCALLOC:
+ LocAlloc();
+ m_ILCodePtr += 2;
+ break;
+
+ case TWOBYTE_CEE_LDVIRTFTN:
+ LdVirtFtn();
+ break;
+
+ case TWOBYTE_CEE_SIZEOF:
+ Sizeof();
+ break;
+
+ case TWOBYTE_CEE_RETHROW:
+ Rethrow();
+ break;
+
+ case TWOBYTE_CEE_READONLY:
+ m_readonlyFlag = true;
+ m_ILCodePtr += 2;
+ // A comment in importer.cpp indicates that READONLY may also apply to calls. We'll see.
+ _ASSERTE_MSG(*m_ILCodePtr == CEE_LDELEMA, "According to the ECMA spec, READONLY may only precede LDELEMA");
+ break;
+
+ case TWOBYTE_CEE_INITBLK:
+ InitBlk();
+ break;
+
+ case TWOBYTE_CEE_CPBLK:
+ CpBlk();
+ break;
+
+ case TWOBYTE_CEE_ENDFILTER:
+ EndFilter();
+ break;
+
+ case TWOBYTE_CEE_UNALIGNED:
+ // Nothing to do here.
+ m_ILCodePtr += 3;
+ break;
+
+ case TWOBYTE_CEE_TAILCALL:
+ // TODO: Needs revisiting when implementing tail call.
+ // NYI_INTERP("Unimplemented opcode: TWOBYTE_CEE_TAILCALL");
+ m_ILCodePtr += 2;
+ break;
+
+ case TWOBYTE_CEE_REFANYTYPE:
+ RefanyType();
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ continue;
+
+ case CEE_PREFIXREF:
+ NYI_INTERP("Unimplemented opcode: CEE_PREFIXREF");
+ m_ILCodePtr++;
+ continue;
+
+ default:
+ UNREACHABLE();
+ continue;
+ }
+ m_ILCodePtr++;
+ }
+ExitEvalLoop:;
+ INTERPLOG("DONE %d, %s\n", m_methInfo->m_stubNum, m_methInfo->m_methName);
+ }
+ EX_CATCH
+ {
+ INTERPLOG("EXCEPTION %d (throw), %s\n", m_methInfo->m_stubNum, m_methInfo->m_methName);
+
+ bool handleException = false;
+ OBJECTREF orThrowable = NULL;
+ GCX_COOP_NO_DTOR();
+
+ orThrowable = GET_THROWABLE();
+
+ if (m_filterNextScan != 0)
+ {
+ // We are in the middle of a filter scan and an exception is thrown inside
+ // a filter. We are supposed to swallow it and assume the filter did not
+ // handle the exception.
+ m_curStackHt = 0;
+ m_largeStructOperandStackHt = 0;
+ LdIcon(0);
+ EndFilter();
+ handleException = true;
+ }
+ else
+ {
+ // orThrowable must be protected. MethodHandlesException() will place orThrowable
+ // into the operand stack (a permanently protected area) if it returns true.
+ GCPROTECT_BEGIN(orThrowable);
+ handleException = MethodHandlesException(orThrowable);
+ GCPROTECT_END();
+ }
+
+ if (handleException)
+ {
+ GetThread()->SafeSetThrowables(orThrowable
+ DEBUG_ARG(ThreadExceptionState::STEC_CurrentTrackerEqualNullOkForInterpreter));
+ goto EvalLoop;
+ }
+ else
+ {
+ INTERPLOG("EXCEPTION %d (rethrow), %s\n", m_methInfo->m_stubNum, m_methInfo->m_methName);
+ EX_RETHROW;
+ }
+ }
+ EX_END_CATCH(RethrowTransientExceptions)
+}
+
+#ifdef _MSC_VER
+#pragma optimize("", on)
+#endif
+
+void Interpreter::EndFilter()
+{
+ unsigned handles = OpStackGet<unsigned>(0);
+ // If the filter decides to handle the exception, then go to the handler offset.
+ if (handles)
+ {
+ // We decided to handle the exception, so give all EH entries a chance to
+ // handle future exceptions. Clear scan.
+ m_filterNextScan = 0;
+ ExecuteBranch(m_methInfo->m_ILCode + m_filterHandlerOffset);
+ }
+ // The filter decided not to handle the exception, ask if there is some other filter
+ // lined up to try to handle it or some other catch/finally handlers will handle it.
+ // If no one handles the exception, rethrow and be done with it.
+ else
+ {
+ bool handlesEx = false;
+ {
+ OBJECTREF orThrowable = ObjectToOBJECTREF(m_inFlightException);
+ GCPROTECT_BEGIN(orThrowable);
+ handlesEx = MethodHandlesException(orThrowable);
+ GCPROTECT_END();
+ }
+ if (!handlesEx)
+ {
+ // Just clear scan before rethrowing to give any EH entry a chance to handle
+ // the "rethrow".
+ m_filterNextScan = 0;
+ Object* filterException = NULL;
+ {
+ GCX_FORBID();
+ assert(m_inFlightException != NULL);
+ filterException = m_inFlightException;
+ INTERPLOG("endfilter handling for %s, %p, %p\n", m_methInfo->m_methName, m_methInfo, filterException);
+ m_inFlightException = NULL;
+ }
+
+ COMPlusThrow(ObjectToOBJECTREF(filterException));
+ UNREACHABLE();
+ }
+ else
+ {
+ // Let it do another round of filter:end-filter or handler block.
+ // During the next end filter, we will reuse m_filterNextScan and
+ // continue searching where we left off. Note however, while searching,
+ // any of the filters could throw an exception. But this is supposed to
+ // be swallowed and endfilter should be called with a value of 0 on the
+ // stack.
+ }
+ }
+}
+
+bool Interpreter::MethodHandlesException(OBJECTREF orThrowable)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ bool handlesEx = false;
+
+ if (orThrowable != NULL)
+ {
+ PTR_Thread pCurThread = GetThread();
+
+ // Don't catch ThreadAbort and other uncatchable exceptions
+ if (!IsUncatchable(&orThrowable))
+ {
+ // Does the current method catch this? The clauses are defined by offsets, so get that.
+ // However, if we are in the middle of a filter scan, make sure we get the offset of the
+ // excepting code, rather than the offset of the filter body.
+ DWORD curOffset = (m_filterNextScan != 0) ? m_filterExcILOffset : CurOffset();
+ TypeHandle orThrowableTH = TypeHandle(orThrowable->GetMethodTable());
+
+ GCPROTECT_BEGIN(orThrowable);
+ GCX_PREEMP();
+
+ // Perform a filter scan or regular walk of the EH Table. Filter scan is performed when
+ // we are evaluating a series of filters to handle the exception until the first handler
+ // (filter's or otherwise) that will handle the exception.
+ for (unsigned XTnum = m_filterNextScan; XTnum < m_methInfo->m_ehClauseCount; XTnum++)
+ {
+ CORINFO_EH_CLAUSE clause;
+ m_interpCeeInfo.getEHinfo(m_methInfo->m_method, XTnum, &clause);
+ assert(clause.HandlerLength != (unsigned)-1); // @DEPRECATED
+
+ // First, is the current offset in the try block?
+ if (clause.TryOffset <= curOffset && curOffset < clause.TryOffset + clause.TryLength)
+ {
+ unsigned handlerOffset = 0;
+ // CORINFO_EH_CLAUSE_NONE represents 'catch' blocks
+ if (clause.Flags == CORINFO_EH_CLAUSE_NONE)
+ {
+ // Now, does the catch block handle the thrown exception type?
+ CORINFO_CLASS_HANDLE excType = FindClass(clause.ClassToken InterpTracingArg(RTK_CheckHandlesException));
+ if (ExceptionIsOfRightType(TypeHandle::FromPtr(excType), orThrowableTH))
+ {
+ GCX_COOP();
+ // Push the exception object onto the operand stack.
+ OpStackSet<OBJECTREF>(0, orThrowable);
+ OpStackTypeSet(0, InterpreterType(CORINFO_TYPE_CLASS));
+ m_curStackHt = 1;
+ m_largeStructOperandStackHt = 0;
+ handlerOffset = clause.HandlerOffset;
+ handlesEx = true;
+ m_filterNextScan = 0;
+ }
+ else
+ {
+ GCX_COOP();
+ // Handle a wrapped exception.
+ OBJECTREF orUnwrapped = PossiblyUnwrapThrowable(orThrowable, GetMethodDesc()->GetAssembly());
+ if (ExceptionIsOfRightType(TypeHandle::FromPtr(excType), orUnwrapped->GetTrueTypeHandle()))
+ {
+ // Push the exception object onto the operand stack.
+ OpStackSet<OBJECTREF>(0, orUnwrapped);
+ OpStackTypeSet(0, InterpreterType(CORINFO_TYPE_CLASS));
+ m_curStackHt = 1;
+ m_largeStructOperandStackHt = 0;
+ handlerOffset = clause.HandlerOffset;
+ handlesEx = true;
+ m_filterNextScan = 0;
+ }
+ }
+ }
+ else if (clause.Flags == CORINFO_EH_CLAUSE_FILTER)
+ {
+ GCX_COOP();
+ // Push the exception object onto the operand stack.
+ OpStackSet<OBJECTREF>(0, orThrowable);
+ OpStackTypeSet(0, InterpreterType(CORINFO_TYPE_CLASS));
+ m_curStackHt = 1;
+ m_largeStructOperandStackHt = 0;
+ handlerOffset = clause.FilterOffset;
+ m_inFlightException = OBJECTREFToObject(orThrowable);
+ handlesEx = true;
+ m_filterHandlerOffset = clause.HandlerOffset;
+ m_filterNextScan = XTnum + 1;
+ m_filterExcILOffset = curOffset;
+ }
+ else if (clause.Flags == CORINFO_EH_CLAUSE_FAULT ||
+ clause.Flags == CORINFO_EH_CLAUSE_FINALLY)
+ {
+ GCX_COOP();
+ // Save the exception object to rethrow.
+ m_inFlightException = OBJECTREFToObject(orThrowable);
+ // Empty the operand stack.
+ m_curStackHt = 0;
+ m_largeStructOperandStackHt = 0;
+ handlerOffset = clause.HandlerOffset;
+ handlesEx = true;
+ m_filterNextScan = 0;
+ }
+
+ // Reset the interpreter loop in preparation of calling the handler.
+ if (handlesEx)
+ {
+ // Set the IL offset of the handler.
+ ExecuteBranch(m_methInfo->m_ILCode + handlerOffset);
+
+ // If an exception occurs while attempting to leave a protected scope,
+ // we empty the 'leave' info stack upon entering the handler.
+ while (!m_leaveInfoStack.IsEmpty())
+ {
+ m_leaveInfoStack.Pop();
+ }
+
+ // Some things are set up before a call, and must be cleared on an exception caught be the caller.
+ // A method that returns a struct allocates local space for the return value, and "registers" that
+ // space and the type so that it's scanned if a GC happens. "Unregister" it if we throw an exception
+ // in the call, and handle it in the caller. (If it's not handled by the caller, the Interpreter is
+ // deallocated, so it's value doesn't matter.)
+ m_structRetValITPtr = NULL;
+ m_callThisArg = NULL;
+ m_argsSize = 0;
+
+ break;
+ }
+ }
+ }
+ GCPROTECT_END();
+ }
+ if (!handlesEx)
+ {
+ DoMonitorExitWork();
+ }
+ }
+ return handlesEx;
+}
+
+static unsigned OpFormatExtraSize(opcode_format_t format) {
+ switch (format)
+ {
+ case InlineNone:
+ return 0;
+ case InlineVar:
+ return 2;
+ case InlineI:
+ case InlineBrTarget:
+ case InlineMethod:
+ case InlineField:
+ case InlineType:
+ case InlineString:
+ case InlineSig:
+ case InlineRVA:
+ case InlineTok:
+ case ShortInlineR:
+ return 4;
+
+ case InlineR:
+ case InlineI8:
+ return 8;
+
+ case InlineSwitch:
+ return 0; // We'll handle this specially.
+
+ case ShortInlineVar:
+ case ShortInlineI:
+ case ShortInlineBrTarget:
+ return 1;
+
+ default:
+ assert(false);
+ return 0;
+ }
+}
+
+
+
+static unsigned opSizes1Byte[CEE_COUNT];
+static bool opSizes1ByteInit = false;
+
+static void OpSizes1ByteInit()
+{
+ if (opSizes1ByteInit) return;
+#define OPDEF(name, stringname, stackpop, stackpush, params, kind, len, byte1, byte2, ctrl) \
+ opSizes1Byte[name] = len + OpFormatExtraSize(params);
+#include "opcode.def"
+#undef OPDEF
+ opSizes1ByteInit = true;
+};
+
+// static
+bool Interpreter::MethodMayHaveLoop(BYTE* ilCode, unsigned codeSize)
+{
+ OpSizes1ByteInit();
+ int delta;
+ BYTE* ilCodeLim = ilCode + codeSize;
+ while (ilCode < ilCodeLim)
+ {
+ unsigned op = *ilCode;
+ switch (op)
+ {
+ case CEE_BR_S: case CEE_BRFALSE_S: case CEE_BRTRUE_S:
+ case CEE_BEQ_S: case CEE_BGE_S: case CEE_BGT_S: case CEE_BLE_S: case CEE_BLT_S:
+ case CEE_BNE_UN_S: case CEE_BGE_UN_S: case CEE_BGT_UN_S: case CEE_BLE_UN_S: case CEE_BLT_UN_S:
+ case CEE_LEAVE_S:
+ delta = getI1(ilCode + 1);
+ if (delta < 0) return true;
+ ilCode += 2;
+ break;
+
+ case CEE_BR: case CEE_BRFALSE: case CEE_BRTRUE:
+ case CEE_BEQ: case CEE_BGE: case CEE_BGT: case CEE_BLE: case CEE_BLT:
+ case CEE_BNE_UN: case CEE_BGE_UN: case CEE_BGT_UN: case CEE_BLE_UN: case CEE_BLT_UN:
+ case CEE_LEAVE:
+ delta = getI4LittleEndian(ilCode + 1);
+ if (delta < 0) return true;
+ ilCode += 5;
+ break;
+
+ case CEE_SWITCH:
+ {
+ UINT32 n = getU4LittleEndian(ilCode + 1);
+ UINT32 instrSize = 1 + (n + 1)*4;
+ for (unsigned i = 0; i < n; i++) {
+ delta = getI4LittleEndian(ilCode + (5 + i * 4));
+ if (delta < 0) return true;
+ }
+ ilCode += instrSize;
+ break;
+ }
+
+ case CEE_PREFIX1:
+ op = *(ilCode + 1) + 0x100;
+ assert(op < CEE_COUNT); // Bounds check for below.
+ // deliberate fall-through here.
+ default:
+ // For the rest of the 1-byte instructions, we'll use a table-driven approach.
+ ilCode += opSizes1Byte[op];
+ break;
+ }
+ }
+ return false;
+
+}
+
+void Interpreter::BackwardsBranchActions(int offset)
+{
+ // TODO: Figure out how to do a GC poll.
+}
+
+bool Interpreter::SearchForCoveringFinally()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ _ASSERTE_MSG(!m_leaveInfoStack.IsEmpty(), "precondition");
+
+ LeaveInfo& li = m_leaveInfoStack.PeekRef();
+
+ GCX_PREEMP();
+
+ for (unsigned XTnum = li.m_nextEHIndex; XTnum < m_methInfo->m_ehClauseCount; XTnum++)
+ {
+ CORINFO_EH_CLAUSE clause;
+ m_interpCeeInfo.getEHinfo(m_methInfo->m_method, XTnum, &clause);
+ assert(clause.HandlerLength != (unsigned)-1); // @DEPRECATED
+
+ // First, is the offset of the leave instruction in the try block?
+ unsigned tryEndOffset = clause.TryOffset + clause.TryLength;
+ if (clause.TryOffset <= li.m_offset && li.m_offset < tryEndOffset)
+ {
+ // Yes: is it a finally, and is its target outside the try block?
+ size_t targOffset = (li.m_target - m_methInfo->m_ILCode);
+ if (clause.Flags == CORINFO_EH_CLAUSE_FINALLY
+ && !(clause.TryOffset <= targOffset && targOffset < tryEndOffset))
+ {
+ m_ILCodePtr = m_methInfo->m_ILCode + clause.HandlerOffset;
+ li.m_nextEHIndex = XTnum + 1;
+ return true;
+ }
+ }
+ }
+
+ // Caller will handle popping the leave info stack.
+ return false;
+}
+
+// static
+void Interpreter::GCScanRoots(promote_func* pf, ScanContext* sc, void* interp0)
+{
+ Interpreter* interp = reinterpret_cast<Interpreter*>(interp0);
+ interp->GCScanRoots(pf, sc);
+}
+
+void Interpreter::GCScanRoots(promote_func* pf, ScanContext* sc)
+{
+ // Report inbound arguments, if the interpreter has not been invoked directly.
+ // (In the latter case, the arguments are reported by the calling method.)
+ if (!m_directCall)
+ {
+ for (unsigned i = 0; i < m_methInfo->m_numArgs; i++)
+ {
+ GCScanRootAtLoc(reinterpret_cast<Object**>(GetArgAddr(i)), GetArgType(i), pf, sc);
+ }
+ }
+
+ if (m_methInfo->GetFlag<InterpreterMethodInfo::Flag_hasThisArg>())
+ {
+ if (m_methInfo->GetFlag<InterpreterMethodInfo::Flag_thisArgIsObjPtr>())
+ {
+ GCScanRootAtLoc(&m_thisArg, InterpreterType(CORINFO_TYPE_CLASS), pf, sc);
+ }
+ else
+ {
+ GCScanRootAtLoc(&m_thisArg, InterpreterType(CORINFO_TYPE_BYREF), pf, sc);
+ }
+ }
+
+ // This is the "this" argument passed in to DoCallWork. (Note that we treat this as a byref; it
+ // might be, for a struct instance method, and this covers the object pointer case as well.)
+ GCScanRootAtLoc(reinterpret_cast<Object**>(&m_callThisArg), InterpreterType(CORINFO_TYPE_BYREF), pf, sc);
+
+ // Scan the exception object that we'll rethrow at the end of the finally block.
+ GCScanRootAtLoc(reinterpret_cast<Object**>(&m_inFlightException), InterpreterType(CORINFO_TYPE_CLASS), pf, sc);
+
+ // A retBufArg, may, in some cases, be a byref into the heap.
+ if (m_retBufArg != NULL)
+ {
+ GCScanRootAtLoc(reinterpret_cast<Object**>(&m_retBufArg), InterpreterType(CORINFO_TYPE_BYREF), pf, sc);
+ }
+
+ if (m_structRetValITPtr != NULL)
+ {
+ GCScanRootAtLoc(reinterpret_cast<Object**>(m_structRetValTempSpace), *m_structRetValITPtr, pf, sc);
+ }
+
+ // We'll conservatively assume that we might have a security object.
+ GCScanRootAtLoc(reinterpret_cast<Object**>(&m_securityObject), InterpreterType(CORINFO_TYPE_CLASS), pf, sc);
+
+ // Do locals.
+ for (unsigned i = 0; i < m_methInfo->m_numLocals; i++)
+ {
+ InterpreterType it = m_methInfo->m_localDescs[i].m_type;
+ void* localPtr = NULL;
+ if (it.IsLargeStruct(&m_interpCeeInfo))
+ {
+ void* structPtr = ArgSlotEndianessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(i)), sizeof(void**));
+ localPtr = *reinterpret_cast<void**>(structPtr);
+ }
+ else
+ {
+ localPtr = ArgSlotEndianessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(i)), it.Size(&m_interpCeeInfo));
+ }
+ GCScanRootAtLoc(reinterpret_cast<Object**>(localPtr), it, pf, sc, m_methInfo->GetPinningBit(i));
+ }
+
+ // Do current ostack.
+ for (unsigned i = 0; i < m_curStackHt; i++)
+ {
+ InterpreterType it = OpStackTypeGet(i);
+ if (it.IsLargeStruct(&m_interpCeeInfo))
+ {
+ Object** structPtr = reinterpret_cast<Object**>(OpStackGet<void*>(i));
+ // If the ostack value is a pointer to a local var value, don't scan, since we already
+ // scanned the variable value above.
+ if (!IsInLargeStructLocalArea(structPtr))
+ {
+ GCScanRootAtLoc(structPtr, it, pf, sc);
+ }
+ }
+ else
+ {
+ void* stackPtr = OpStackGetAddr(i, it.Size(&m_interpCeeInfo));
+ GCScanRootAtLoc(reinterpret_cast<Object**>(stackPtr), it, pf, sc);
+ }
+ }
+
+ // Any outgoing arguments for a call in progress.
+ for (unsigned i = 0; i < m_argsSize; i++)
+ {
+ // If a call has a large struct argument, we'll have pushed a pointer to the entry for that argument on the
+ // largeStructStack of the current Interpreter. That will be scanned by the code above, so just skip it.
+ InterpreterType undef(CORINFO_TYPE_UNDEF);
+ InterpreterType it = m_argTypes[i];
+ if (it != undef && !it.IsLargeStruct(&m_interpCeeInfo))
+ {
+ BYTE* argPtr = ArgSlotEndianessFixup(&m_args[i], it.Size(&m_interpCeeInfo));
+ GCScanRootAtLoc(reinterpret_cast<Object**>(argPtr), it, pf, sc);
+ }
+ }
+}
+
+void Interpreter::GCScanRootAtLoc(Object** loc, InterpreterType it, promote_func* pf, ScanContext* sc, bool pinningRef)
+{
+ switch (it.ToCorInfoType())
+ {
+ case CORINFO_TYPE_CLASS:
+ case CORINFO_TYPE_STRING:
+ {
+ DWORD flags = 0;
+ if (pinningRef) flags |= GC_CALL_PINNED;
+ (*pf)(loc, sc, flags);
+ }
+ break;
+
+ case CORINFO_TYPE_BYREF:
+ case CORINFO_TYPE_REFANY:
+ {
+ DWORD flags = GC_CALL_INTERIOR;
+ if (pinningRef) flags |= GC_CALL_PINNED;
+ (*pf)(loc, sc, flags);
+ }
+ break;
+
+ case CORINFO_TYPE_VALUECLASS:
+ assert(!pinningRef);
+ GCScanValueClassRootAtLoc(loc, it.ToClassHandle(), pf, sc);
+ break;
+
+ default:
+ assert(!pinningRef);
+ break;
+ }
+}
+
+void Interpreter::GCScanValueClassRootAtLoc(Object** loc, CORINFO_CLASS_HANDLE valueClsHnd, promote_func* pf, ScanContext* sc)
+{
+ MethodTable* valClsMT = GetMethodTableFromClsHnd(valueClsHnd);
+ ReportPointersFromValueType(pf, sc, valClsMT, loc);
+}
+
+// Returns "true" iff "cit" is "stack-normal": all integer types with byte size less than 4
+// are folded to CORINFO_TYPE_INT; all remaining unsigned types are folded to their signed counterparts.
+bool IsStackNormalType(CorInfoType cit)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ switch (cit)
+ {
+ case CORINFO_TYPE_UNDEF:
+ case CORINFO_TYPE_VOID:
+ case CORINFO_TYPE_BOOL:
+ case CORINFO_TYPE_CHAR:
+ case CORINFO_TYPE_BYTE:
+ case CORINFO_TYPE_UBYTE:
+ case CORINFO_TYPE_SHORT:
+ case CORINFO_TYPE_USHORT:
+ case CORINFO_TYPE_UINT:
+ case CORINFO_TYPE_NATIVEUINT:
+ case CORINFO_TYPE_ULONG:
+ case CORINFO_TYPE_VAR:
+ case CORINFO_TYPE_STRING:
+ case CORINFO_TYPE_PTR:
+ return false;
+
+ case CORINFO_TYPE_INT:
+ case CORINFO_TYPE_NATIVEINT:
+ case CORINFO_TYPE_BYREF:
+ case CORINFO_TYPE_CLASS:
+ case CORINFO_TYPE_LONG:
+ case CORINFO_TYPE_VALUECLASS:
+ case CORINFO_TYPE_REFANY:
+ // I chose to consider both float and double stack-normal; together these comprise
+ // the "F" type of the ECMA spec. This means I have to consider these to freely
+ // interconvert.
+ case CORINFO_TYPE_FLOAT:
+ case CORINFO_TYPE_DOUBLE:
+ return true;
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+CorInfoType CorInfoTypeStackNormalize(CorInfoType cit)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ switch (cit)
+ {
+ case CORINFO_TYPE_UNDEF:
+ return CORINFO_TYPE_UNDEF;
+
+ case CORINFO_TYPE_VOID:
+ case CORINFO_TYPE_VAR:
+ _ASSERTE_MSG(false, "Type that cannot be on the ostack.");
+ return CORINFO_TYPE_UNDEF;
+
+ case CORINFO_TYPE_BOOL:
+ case CORINFO_TYPE_CHAR:
+ case CORINFO_TYPE_BYTE:
+ case CORINFO_TYPE_UBYTE:
+ case CORINFO_TYPE_SHORT:
+ case CORINFO_TYPE_USHORT:
+ case CORINFO_TYPE_UINT:
+ return CORINFO_TYPE_INT;
+
+ case CORINFO_TYPE_NATIVEUINT:
+ case CORINFO_TYPE_PTR:
+ return CORINFO_TYPE_NATIVEINT;
+
+ case CORINFO_TYPE_ULONG:
+ return CORINFO_TYPE_LONG;
+
+ case CORINFO_TYPE_STRING:
+ return CORINFO_TYPE_CLASS;
+
+ case CORINFO_TYPE_INT:
+ case CORINFO_TYPE_NATIVEINT:
+ case CORINFO_TYPE_BYREF:
+ case CORINFO_TYPE_CLASS:
+ case CORINFO_TYPE_LONG:
+ case CORINFO_TYPE_VALUECLASS:
+ case CORINFO_TYPE_REFANY:
+ // I chose to consider both float and double stack-normal; together these comprise
+ // the "F" type of the ECMA spec. This means I have to consider these to freely
+ // interconvert.
+ case CORINFO_TYPE_FLOAT:
+ case CORINFO_TYPE_DOUBLE:
+ assert(IsStackNormalType(cit));
+ return cit;
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+InterpreterType InterpreterType::StackNormalize() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ switch (ToCorInfoType())
+ {
+ case CORINFO_TYPE_BOOL:
+ case CORINFO_TYPE_CHAR:
+ case CORINFO_TYPE_BYTE:
+ case CORINFO_TYPE_UBYTE:
+ case CORINFO_TYPE_SHORT:
+ case CORINFO_TYPE_USHORT:
+ case CORINFO_TYPE_UINT:
+ return InterpreterType(CORINFO_TYPE_INT);
+
+ case CORINFO_TYPE_NATIVEUINT:
+ case CORINFO_TYPE_PTR:
+ return InterpreterType(CORINFO_TYPE_NATIVEINT);
+
+ case CORINFO_TYPE_ULONG:
+ return InterpreterType(CORINFO_TYPE_LONG);
+
+ case CORINFO_TYPE_STRING:
+ return InterpreterType(CORINFO_TYPE_CLASS);
+
+ case CORINFO_TYPE_INT:
+ case CORINFO_TYPE_NATIVEINT:
+ case CORINFO_TYPE_BYREF:
+ case CORINFO_TYPE_CLASS:
+ case CORINFO_TYPE_LONG:
+ case CORINFO_TYPE_VALUECLASS:
+ case CORINFO_TYPE_REFANY:
+ case CORINFO_TYPE_FLOAT:
+ case CORINFO_TYPE_DOUBLE:
+ return *const_cast<InterpreterType*>(this);
+
+ case CORINFO_TYPE_UNDEF:
+ case CORINFO_TYPE_VOID:
+ case CORINFO_TYPE_VAR:
+ default:
+ _ASSERTE_MSG(false, "should not reach here");
+ return *const_cast<InterpreterType*>(this);
+ }
+}
+
+#ifdef _DEBUG
+bool InterpreterType::MatchesWork(const InterpreterType it2, CEEInfo* info) const
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ if (*this == it2) return true;
+
+ // Otherwise...
+ CorInfoType cit1 = ToCorInfoType();
+ CorInfoType cit2 = it2.ToCorInfoType();
+
+ GCX_PREEMP();
+
+ // An approximation: valueclasses of the same size match.
+ if (cit1 == CORINFO_TYPE_VALUECLASS &&
+ cit2 == CORINFO_TYPE_VALUECLASS &&
+ Size(info) == it2.Size(info))
+ {
+ return true;
+ }
+
+ // NativeInt matches byref. (In unsafe code).
+ if ((cit1 == CORINFO_TYPE_BYREF && cit2 == CORINFO_TYPE_NATIVEINT))
+ return true;
+
+ // apparently the VM may do the optimization of reporting the return type of a method that
+ // returns a struct of a single nativeint field *as* nativeint; and similarly with at least some other primitive types.
+ // So weaken this check to allow that.
+ // (The check is actually a little weaker still, since I don't want to crack the return type and make sure
+ // that it has only a single nativeint member -- so I just ensure that the total size is correct).
+ switch (cit1)
+ {
+ case CORINFO_TYPE_NATIVEINT:
+ case CORINFO_TYPE_NATIVEUINT:
+ assert(sizeof(NativeInt) == sizeof(NativeUInt));
+ if (it2.Size(info) == sizeof(NativeInt))
+ return true;
+ break;
+
+ case CORINFO_TYPE_INT:
+ case CORINFO_TYPE_UINT:
+ assert(sizeof(INT32) == sizeof(UINT32));
+ if (it2.Size(info) == sizeof(INT32))
+ return true;
+ break;
+
+ default:
+ break;
+ }
+
+ // See if the second is a value type synonym for a primitive.
+ if (cit2 == CORINFO_TYPE_VALUECLASS)
+ {
+ CorInfoType cit2prim = info->getTypeForPrimitiveValueClass(it2.ToClassHandle());
+ if (cit2prim != CORINFO_TYPE_UNDEF)
+ {
+ InterpreterType it2prim(cit2prim);
+ if (*this == it2prim.StackNormalize())
+ return true;
+ }
+ }
+
+ // Otherwise...
+ return false;
+}
+#endif // _DEBUG
+
+// Static
+size_t CorInfoTypeSizeArray[] =
+{
+ /*CORINFO_TYPE_UNDEF = 0x0*/0,
+ /*CORINFO_TYPE_VOID = 0x1*/0,
+ /*CORINFO_TYPE_BOOL = 0x2*/1,
+ /*CORINFO_TYPE_CHAR = 0x3*/2,
+ /*CORINFO_TYPE_BYTE = 0x4*/1,
+ /*CORINFO_TYPE_UBYTE = 0x5*/1,
+ /*CORINFO_TYPE_SHORT = 0x6*/2,
+ /*CORINFO_TYPE_USHORT = 0x7*/2,
+ /*CORINFO_TYPE_INT = 0x8*/4,
+ /*CORINFO_TYPE_UINT = 0x9*/4,
+ /*CORINFO_TYPE_LONG = 0xa*/8,
+ /*CORINFO_TYPE_ULONG = 0xb*/8,
+ /*CORINFO_TYPE_NATIVEINT = 0xc*/sizeof(void*),
+ /*CORINFO_TYPE_NATIVEUINT = 0xd*/sizeof(void*),
+ /*CORINFO_TYPE_FLOAT = 0xe*/4,
+ /*CORINFO_TYPE_DOUBLE = 0xf*/8,
+ /*CORINFO_TYPE_STRING = 0x10*/sizeof(void*),
+ /*CORINFO_TYPE_PTR = 0x11*/sizeof(void*),
+ /*CORINFO_TYPE_BYREF = 0x12*/sizeof(void*),
+ /*CORINFO_TYPE_VALUECLASS = 0x13*/0,
+ /*CORINFO_TYPE_CLASS = 0x14*/sizeof(void*),
+ /*CORINFO_TYPE_REFANY = 0x15*/sizeof(void*)*2,
+ /*CORINFO_TYPE_VAR = 0x16*/0,
+};
+
+bool CorInfoTypeIsUnsigned(CorInfoType cit)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ switch (cit)
+ {
+ case CORINFO_TYPE_UINT:
+ case CORINFO_TYPE_NATIVEUINT:
+ case CORINFO_TYPE_ULONG:
+ case CORINFO_TYPE_UBYTE:
+ case CORINFO_TYPE_USHORT:
+ case CORINFO_TYPE_CHAR:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool CorInfoTypeIsIntegral(CorInfoType cit)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ switch (cit)
+ {
+ case CORINFO_TYPE_UINT:
+ case CORINFO_TYPE_NATIVEUINT:
+ case CORINFO_TYPE_ULONG:
+ case CORINFO_TYPE_UBYTE:
+ case CORINFO_TYPE_USHORT:
+ case CORINFO_TYPE_INT:
+ case CORINFO_TYPE_NATIVEINT:
+ case CORINFO_TYPE_LONG:
+ case CORINFO_TYPE_BYTE:
+ case CORINFO_TYPE_BOOL:
+ case CORINFO_TYPE_SHORT:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool CorInfoTypeIsFloatingPoint(CorInfoType cit)
+{
+ return cit == CORINFO_TYPE_FLOAT || cit == CORINFO_TYPE_DOUBLE;
+}
+
+
+bool CorElemTypeIsUnsigned(CorElementType cet)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ switch (cet)
+ {
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_U:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool CorInfoTypeIsPointer(CorInfoType cit)
+{
+ LIMITED_METHOD_CONTRACT;
+ switch (cit)
+ {
+ case CORINFO_TYPE_PTR:
+ case CORINFO_TYPE_BYREF:
+ case CORINFO_TYPE_NATIVEINT:
+ case CORINFO_TYPE_NATIVEUINT:
+ return true;
+
+ // It seems like the ECMA spec doesn't allow this, but (at least) the managed C++
+ // compiler expects the explicitly-sized pointer type of the platform pointer size to work:
+ case CORINFO_TYPE_INT:
+ case CORINFO_TYPE_UINT:
+ return sizeof(NativeInt) == sizeof(INT32);
+ case CORINFO_TYPE_LONG:
+ case CORINFO_TYPE_ULONG:
+ return sizeof(NativeInt) == sizeof(INT64);
+
+ default:
+ return false;
+ }
+}
+
+void Interpreter::LdArg(int argNum)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ LdFromMemAddr(GetArgAddr(argNum), GetArgType(argNum));
+}
+
+void Interpreter::LdArgA(int argNum)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_BYREF));
+ OpStackSet<void*>(m_curStackHt, reinterpret_cast<void*>(GetArgAddr(argNum)));
+ m_curStackHt++;
+}
+
+void Interpreter::StArg(int argNum)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ StToLocalMemAddr(GetArgAddr(argNum), GetArgType(argNum));
+}
+
+
+void Interpreter::LdLocA(int locNum)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ InterpreterType tp = m_methInfo->m_localDescs[locNum].m_type;
+ void* addr;
+ if (tp.IsLargeStruct(&m_interpCeeInfo))
+ {
+ void* structPtr = ArgSlotEndianessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(locNum)), sizeof(void**));
+ addr = *reinterpret_cast<void**>(structPtr);
+ }
+ else
+ {
+ addr = ArgSlotEndianessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(locNum)), tp.Size(&m_interpCeeInfo));
+ }
+ // The "addr" above, while a byref, is never a heap pointer, so we're robust if
+ // any of these were to cause a GC.
+ OpStackSet<void*>(m_curStackHt, addr);
+ OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_BYREF));
+ m_curStackHt++;
+}
+
+void Interpreter::LdIcon(INT32 c)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_INT));
+ OpStackSet<INT32>(m_curStackHt, c);
+ m_curStackHt++;
+}
+
+void Interpreter::LdR4con(INT32 c)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_FLOAT));
+ OpStackSet<INT32>(m_curStackHt, c);
+ m_curStackHt++;
+}
+
+void Interpreter::LdLcon(INT64 c)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_LONG));
+ OpStackSet<INT64>(m_curStackHt, c);
+ m_curStackHt++;
+}
+
+void Interpreter::LdR8con(INT64 c)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_DOUBLE));
+ OpStackSet<INT64>(m_curStackHt, c);
+ m_curStackHt++;
+}
+
+void Interpreter::LdNull()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_CLASS));
+ OpStackSet<void*>(m_curStackHt, NULL);
+ m_curStackHt++;
+}
+
+template<typename T, CorInfoType cit>
+void Interpreter::LdInd()
+{
+ assert(TOSIsPtr());
+ assert(IsStackNormalType(cit));
+ unsigned curStackInd = m_curStackHt-1;
+ T* ptr = OpStackGet<T*>(curStackInd);
+ ThrowOnInvalidPointer(ptr);
+ OpStackSet<T>(curStackInd, *ptr);
+ OpStackTypeSet(curStackInd, InterpreterType(cit));
+ BarrierIfVolatile();
+}
+
+template<typename T, bool isUnsigned>
+void Interpreter::LdIndShort()
+{
+ assert(TOSIsPtr());
+ assert(sizeof(T) < 4);
+ unsigned curStackInd = m_curStackHt-1;
+ T* ptr = OpStackGet<T*>(curStackInd);
+ ThrowOnInvalidPointer(ptr);
+ if (isUnsigned)
+ {
+ OpStackSet<UINT32>(curStackInd, *ptr);
+ }
+ else
+ {
+ OpStackSet<INT32>(curStackInd, *ptr);
+ }
+ // All short integers are normalized to INT as their stack type.
+ OpStackTypeSet(curStackInd, InterpreterType(CORINFO_TYPE_INT));
+ BarrierIfVolatile();
+}
+
+template<typename T>
+void Interpreter::StInd()
+{
+ assert(m_curStackHt >= 2);
+ assert(CorInfoTypeIsPointer(OpStackTypeGet(m_curStackHt-2).ToCorInfoType()));
+ BarrierIfVolatile();
+ unsigned stackInd0 = m_curStackHt-2;
+ unsigned stackInd1 = m_curStackHt-1;
+ T val = OpStackGet<T>(stackInd1);
+ T* ptr = OpStackGet<T*>(stackInd0);
+ ThrowOnInvalidPointer(ptr);
+ *ptr = val;
+ m_curStackHt -= 2;
+
+#ifdef _DEBUG
+ if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL) &&
+ IsInLocalArea(ptr))
+ {
+ PrintLocals();
+ }
+#endif // _DEBUG
+}
+
+void Interpreter::StInd_Ref()
+{
+ assert(m_curStackHt >= 2);
+ assert(CorInfoTypeIsPointer(OpStackTypeGet(m_curStackHt-2).ToCorInfoType()));
+ BarrierIfVolatile();
+ unsigned stackInd0 = m_curStackHt-2;
+ unsigned stackInd1 = m_curStackHt-1;
+ OBJECTREF val = ObjectToOBJECTREF(OpStackGet<Object*>(stackInd1));
+ OBJECTREF* ptr = OpStackGet<OBJECTREF*>(stackInd0);
+ ThrowOnInvalidPointer(ptr);
+ SetObjectReferenceUnchecked(ptr, val);
+ m_curStackHt -= 2;
+
+#ifdef _DEBUG
+ if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL) &&
+ IsInLocalArea(ptr))
+ {
+ PrintLocals();
+ }
+#endif // _DEBUG
+}
+
+
+template<int op>
+void Interpreter::BinaryArithOp()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 2);
+ unsigned op1idx = m_curStackHt - 2;
+ unsigned op2idx = m_curStackHt - 1;
+ InterpreterType t1 = OpStackTypeGet(op1idx);
+ assert(IsStackNormalType(t1.ToCorInfoType()));
+ // Looking at the generated code, it does seem to save some instructions to use the "shifted
+ // types," though the effect on end-to-end time is variable. So I'll leave it set.
+ InterpreterType t2 = OpStackTypeGet(op2idx);
+ assert(IsStackNormalType(t2.ToCorInfoType()));
+
+ // In all cases belows, since "op" is compile-time constant, "if" chains on it should fold away.
+ switch (t1.ToCorInfoTypeShifted())
+ {
+ case CORINFO_TYPE_SHIFTED_INT:
+ if (t1 == t2)
+ {
+ // Int op Int = Int
+ INT32 val1 = OpStackGet<INT32>(op1idx);
+ INT32 val2 = OpStackGet<INT32>(op2idx);
+ BinaryArithOpWork<op, INT32, /*IsIntType*/true, CORINFO_TYPE_INT, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ else
+ {
+ CorInfoTypeShifted cits2 = t2.ToCorInfoTypeShifted();
+ if (cits2 == CORINFO_TYPE_SHIFTED_NATIVEINT)
+ {
+ // Int op NativeInt = NativeInt
+ NativeInt val1 = static_cast<NativeInt>(OpStackGet<INT32>(op1idx));
+ NativeInt val2 = OpStackGet<NativeInt>(op2idx);
+ BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/false>(val1, val2);
+ }
+ else if (s_InterpreterLooseRules && cits2 == CORINFO_TYPE_SHIFTED_LONG)
+ {
+ // Int op Long = Long
+ INT64 val1 = static_cast<INT64>(OpStackGet<INT32>(op1idx));
+ INT64 val2 = OpStackGet<INT64>(op2idx);
+ BinaryArithOpWork<op, INT64, /*IsIntType*/true, CORINFO_TYPE_LONG, /*TypeIsUnchanged*/false>(val1, val2);
+ }
+ else if (cits2 == CORINFO_TYPE_SHIFTED_BYREF)
+ {
+ if (op == BA_Add || (s_InterpreterLooseRules && op == BA_Sub))
+ {
+ // Int + ByRef = ByRef
+ NativeInt val1 = static_cast<NativeInt>(OpStackGet<INT32>(op1idx));
+ NativeInt val2 = OpStackGet<NativeInt>(op2idx);
+ BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/false>(val1, val2);
+ }
+ else
+ {
+ VerificationError("Operation not permitted on int and managed pointer.");
+ }
+ }
+ else
+ {
+ VerificationError("Binary arithmetic operation type mismatch (int and ?)");
+ }
+ }
+ break;
+
+ case CORINFO_TYPE_SHIFTED_NATIVEINT:
+ {
+ NativeInt val1 = OpStackGet<NativeInt>(op1idx);
+ if (t1 == t2)
+ {
+ // NativeInt op NativeInt = NativeInt
+ NativeInt val2 = OpStackGet<NativeInt>(op2idx);
+ BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ else
+ {
+ CorInfoTypeShifted cits2 = t2.ToCorInfoTypeShifted();
+ if (cits2 == CORINFO_TYPE_SHIFTED_INT)
+ {
+ // NativeInt op Int = NativeInt
+ NativeInt val2 = static_cast<NativeInt>(OpStackGet<INT32>(op2idx));
+ BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ // CLI spec does not allow adding a native int and an int64. So use loose rules.
+ else if (s_InterpreterLooseRules && cits2 == CORINFO_TYPE_SHIFTED_LONG)
+ {
+ // NativeInt op Int = NativeInt
+ NativeInt val2 = static_cast<NativeInt>(OpStackGet<INT64>(op2idx));
+ BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ else if (cits2 == CORINFO_TYPE_SHIFTED_BYREF)
+ {
+ if (op == BA_Add || (s_InterpreterLooseRules && op == BA_Sub))
+ {
+ // NativeInt + ByRef = ByRef
+ NativeInt val2 = OpStackGet<NativeInt>(op2idx);
+ BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/false>(val1, val2);
+ }
+ else
+ {
+ VerificationError("Operation not permitted on native int and managed pointer.");
+ }
+ }
+ else
+ {
+ VerificationError("Binary arithmetic operation type mismatch (native int and ?)");
+ }
+ }
+ }
+ break;
+
+ case CORINFO_TYPE_SHIFTED_LONG:
+ {
+ bool looseLong = false;
+#if defined(_AMD64_)
+ looseLong = (s_InterpreterLooseRules && (t2.ToCorInfoType() == CORINFO_TYPE_NATIVEINT ||
+ t2.ToCorInfoType() == CORINFO_TYPE_BYREF));
+#endif
+ if (t1 == t2 || looseLong)
+ {
+ // Long op Long = Long
+ INT64 val1 = OpStackGet<INT64>(op1idx);
+ INT64 val2 = OpStackGet<INT64>(op2idx);
+ BinaryArithOpWork<op, INT64, /*IsIntType*/true, CORINFO_TYPE_LONG, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ else
+ {
+ VerificationError("Binary arithmetic operation type mismatch (long and ?)");
+ }
+ }
+ break;
+
+ case CORINFO_TYPE_SHIFTED_FLOAT:
+ {
+ if (t1 == t2)
+ {
+ // Float op Float = Float
+ float val1 = OpStackGet<float>(op1idx);
+ float val2 = OpStackGet<float>(op2idx);
+ BinaryArithOpWork<op, float, /*IsIntType*/false, CORINFO_TYPE_FLOAT, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ else
+ {
+ CorInfoTypeShifted cits2 = t2.ToCorInfoTypeShifted();
+ if (cits2 == CORINFO_TYPE_SHIFTED_DOUBLE)
+ {
+ // Float op Double = Double
+ double val1 = static_cast<double>(OpStackGet<float>(op1idx));
+ double val2 = OpStackGet<double>(op2idx);
+ BinaryArithOpWork<op, double, /*IsIntType*/false, CORINFO_TYPE_DOUBLE, /*TypeIsUnchanged*/false>(val1, val2);
+ }
+ else
+ {
+ VerificationError("Binary arithmetic operation type mismatch (float and ?)");
+ }
+ }
+ }
+ break;
+
+ case CORINFO_TYPE_SHIFTED_DOUBLE:
+ {
+ if (t1 == t2)
+ {
+ // Double op Double = Double
+ double val1 = OpStackGet<double>(op1idx);
+ double val2 = OpStackGet<double>(op2idx);
+ BinaryArithOpWork<op, double, /*IsIntType*/false, CORINFO_TYPE_DOUBLE, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ else
+ {
+ CorInfoTypeShifted cits2 = t2.ToCorInfoTypeShifted();
+ if (cits2 == CORINFO_TYPE_SHIFTED_FLOAT)
+ {
+ // Double op Float = Double
+ double val1 = OpStackGet<double>(op1idx);
+ double val2 = static_cast<double>(OpStackGet<float>(op2idx));
+ BinaryArithOpWork<op, double, /*IsIntType*/false, CORINFO_TYPE_DOUBLE, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ else
+ {
+ VerificationError("Binary arithmetic operation type mismatch (double and ?)");
+ }
+ }
+ }
+ break;
+
+ case CORINFO_TYPE_SHIFTED_BYREF:
+ {
+ NativeInt val1 = OpStackGet<NativeInt>(op1idx);
+ CorInfoTypeShifted cits2 = t2.ToCorInfoTypeShifted();
+ if (cits2 == CORINFO_TYPE_SHIFTED_INT)
+ {
+ if (op == BA_Add || op == BA_Sub)
+ {
+ // ByRef +- Int = ByRef
+ NativeInt val2 = static_cast<NativeInt>(OpStackGet<INT32>(op2idx));
+ BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ else
+ {
+ VerificationError("May only add/subtract managed pointer and integral value.");
+ }
+ }
+ else if (cits2 == CORINFO_TYPE_SHIFTED_NATIVEINT)
+ {
+ if (op == BA_Add || op == BA_Sub)
+ {
+ // ByRef +- NativeInt = ByRef
+ NativeInt val2 = OpStackGet<NativeInt>(op2idx);
+ BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ else
+ {
+ VerificationError("May only add/subtract managed pointer and integral value.");
+ }
+ }
+ else if (cits2 == CORINFO_TYPE_SHIFTED_BYREF)
+ {
+ if (op == BA_Sub)
+ {
+ // ByRef - ByRef = NativeInt
+ NativeInt val2 = OpStackGet<NativeInt>(op2idx);
+ BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/false>(val1, val2);
+ }
+ else
+ {
+ VerificationError("May only subtract managed pointer values.");
+ }
+ }
+ // CLI spec does not allow adding a native int and an int64. So use loose rules.
+ else if (s_InterpreterLooseRules && cits2 == CORINFO_TYPE_SHIFTED_LONG)
+ {
+ // NativeInt op Int = NativeInt
+ NativeInt val2 = static_cast<NativeInt>(OpStackGet<INT64>(op2idx));
+ BinaryArithOpWork<op, NativeInt, /*IsIntType*/true, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ else
+ {
+ VerificationError("Binary arithmetic operation not permitted on byref");
+ }
+ }
+ break;
+
+ case CORINFO_TYPE_SHIFTED_CLASS:
+ VerificationError("Can't do binary arithmetic on object references.");
+ break;
+
+ default:
+ _ASSERTE_MSG(false, "Non-stack-normal type on stack.");
+ }
+
+ // In all cases:
+ m_curStackHt--;
+}
+
+template<int op, bool asUnsigned>
+void Interpreter::BinaryArithOvfOp()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 2);
+ unsigned op1idx = m_curStackHt - 2;
+ unsigned op2idx = m_curStackHt - 1;
+
+ InterpreterType t1 = OpStackTypeGet(op1idx);
+ CorInfoType cit1 = t1.ToCorInfoType();
+ assert(IsStackNormalType(cit1));
+
+ InterpreterType t2 = OpStackTypeGet(op2idx);
+ CorInfoType cit2 = t2.ToCorInfoType();
+ assert(IsStackNormalType(cit2));
+
+ // In all cases belows, since "op" is compile-time constant, "if" chains on it should fold away.
+ switch (cit1)
+ {
+ case CORINFO_TYPE_INT:
+ if (cit2 == CORINFO_TYPE_INT)
+ {
+ if (asUnsigned)
+ {
+ // UnsignedInt op UnsignedInt = UnsignedInt
+ UINT32 val1 = OpStackGet<UINT32>(op1idx);
+ UINT32 val2 = OpStackGet<UINT32>(op2idx);
+ BinaryArithOvfOpWork<op, UINT32, CORINFO_TYPE_INT, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ else
+ {
+ // Int op Int = Int
+ INT32 val1 = OpStackGet<INT32>(op1idx);
+ INT32 val2 = OpStackGet<INT32>(op2idx);
+ BinaryArithOvfOpWork<op, INT32, CORINFO_TYPE_INT, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ }
+ else if (cit2 == CORINFO_TYPE_NATIVEINT)
+ {
+ if (asUnsigned)
+ {
+ // UnsignedInt op UnsignedNativeInt = UnsignedNativeInt
+ NativeUInt val1 = static_cast<NativeUInt>(OpStackGet<UINT32>(op1idx));
+ NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
+ BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/false>(val1, val2);
+ }
+ else
+ {
+ // Int op NativeInt = NativeInt
+ NativeInt val1 = static_cast<NativeInt>(OpStackGet<INT32>(op1idx));
+ NativeInt val2 = OpStackGet<NativeInt>(op2idx);
+ BinaryArithOvfOpWork<op, NativeInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/false>(val1, val2);
+ }
+ }
+ else if (cit2 == CORINFO_TYPE_BYREF)
+ {
+ if (asUnsigned && op == BA_Add)
+ {
+ // UnsignedInt + ByRef = ByRef
+ NativeUInt val1 = static_cast<NativeUInt>(OpStackGet<UINT32>(op1idx));
+ NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
+ BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/false>(val1, val2);
+ }
+ else
+ {
+ VerificationError("Illegal arithmetic overflow operation for int and byref.");
+ }
+ }
+ else
+ {
+ VerificationError("Binary arithmetic overflow operation type mismatch (int and ?)");
+ }
+ break;
+
+ case CORINFO_TYPE_NATIVEINT:
+ if (cit2 == CORINFO_TYPE_INT)
+ {
+ if (asUnsigned)
+ {
+ // UnsignedNativeInt op UnsignedInt = UnsignedNativeInt
+ NativeUInt val1 = OpStackGet<NativeUInt>(op1idx);
+ NativeUInt val2 = static_cast<NativeUInt>(OpStackGet<UINT32>(op2idx));
+ BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ else
+ {
+ // NativeInt op Int = NativeInt
+ NativeInt val1 = OpStackGet<NativeInt>(op1idx);
+ NativeInt val2 = static_cast<NativeInt>(OpStackGet<INT32>(op2idx));
+ BinaryArithOvfOpWork<op, NativeInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ }
+ else if (cit2 == CORINFO_TYPE_NATIVEINT)
+ {
+ if (asUnsigned)
+ {
+ // UnsignedNativeInt op UnsignedNativeInt = UnsignedNativeInt
+ NativeUInt val1 = OpStackGet<NativeUInt>(op1idx);
+ NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
+ BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ else
+ {
+ // NativeInt op NativeInt = NativeInt
+ NativeInt val1 = OpStackGet<NativeInt>(op1idx);
+ NativeInt val2 = OpStackGet<NativeInt>(op2idx);
+ BinaryArithOvfOpWork<op, NativeInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ }
+ else if (cit2 == CORINFO_TYPE_BYREF)
+ {
+ if (asUnsigned && op == BA_Add)
+ {
+ // UnsignedNativeInt op ByRef = ByRef
+ NativeUInt val1 = OpStackGet<UINT32>(op1idx);
+ NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
+ BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/false>(val1, val2);
+ }
+ else
+ {
+ VerificationError("Illegal arithmetic overflow operation for native int and byref.");
+ }
+ }
+ else
+ {
+ VerificationError("Binary arithmetic overflow operation type mismatch (native int and ?)");
+ }
+ break;
+
+ case CORINFO_TYPE_LONG:
+ if (cit2 == CORINFO_TYPE_LONG || (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_NATIVEINT))
+ {
+ if (asUnsigned)
+ {
+ // UnsignedLong op UnsignedLong = UnsignedLong
+ UINT64 val1 = OpStackGet<UINT64>(op1idx);
+ UINT64 val2 = OpStackGet<UINT64>(op2idx);
+ BinaryArithOvfOpWork<op, UINT64, CORINFO_TYPE_LONG, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ else
+ {
+ // Long op Long = Long
+ INT64 val1 = OpStackGet<INT64>(op1idx);
+ INT64 val2 = OpStackGet<INT64>(op2idx);
+ BinaryArithOvfOpWork<op, INT64, CORINFO_TYPE_LONG, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ }
+ else
+ {
+ VerificationError("Binary arithmetic overflow operation type mismatch (long and ?)");
+ }
+ break;
+
+ case CORINFO_TYPE_BYREF:
+ if (asUnsigned && (op == BA_Add || op == BA_Sub))
+ {
+ NativeUInt val1 = OpStackGet<NativeUInt>(op1idx);
+ if (cit2 == CORINFO_TYPE_INT)
+ {
+ // ByRef +- UnsignedInt = ByRef
+ NativeUInt val2 = static_cast<NativeUInt>(OpStackGet<INT32>(op2idx));
+ BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ else if (cit2 == CORINFO_TYPE_NATIVEINT)
+ {
+ // ByRef +- UnsignedNativeInt = ByRef
+ NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
+ BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ else if (cit2 == CORINFO_TYPE_BYREF)
+ {
+ if (op == BA_Sub)
+ {
+ // ByRef - ByRef = UnsignedNativeInt
+ NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
+ BinaryArithOvfOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/false>(val1, val2);
+ }
+ else
+ {
+ VerificationError("Illegal arithmetic overflow operation for byref and byref: may only subtract managed pointer values.");
+ }
+ }
+ else
+ {
+ VerificationError("Binary arithmetic overflow operation not permitted on byref");
+ }
+ }
+ else
+ {
+ if (!asUnsigned)
+ {
+ VerificationError("Signed binary arithmetic overflow operation not permitted on managed pointer values.");
+ }
+ else
+ {
+ _ASSERTE_MSG(op == BA_Mul, "Must be an overflow operation; tested for Add || Sub above.");
+ VerificationError("Cannot multiply managed pointer values.");
+ }
+ }
+ break;
+
+ case CORINFO_TYPE_SHIFTED_CLASS:
+ VerificationError("Can't do binary arithmetic overflow operation on object references.");
+ break;
+
+ default:
+ _ASSERTE_MSG(false, "Non-stack-normal type on stack.");
+ }
+
+ // In all cases:
+ m_curStackHt--;
+}
+
+template<int op, typename T, CorInfoType cit, bool TypeIsUnchanged>
+void Interpreter::BinaryArithOvfOpWork(T val1, T val2)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ ClrSafeInt<T> res;
+ ClrSafeInt<T> safeV1(val1);
+ ClrSafeInt<T> safeV2(val2);
+ if (op == BA_Add)
+ {
+ res = safeV1 + safeV2;
+ }
+ else if (op == BA_Sub)
+ {
+ res = safeV1 - safeV2;
+ }
+ else if (op == BA_Mul)
+ {
+ res = safeV1 * safeV2;
+ }
+ else
+ {
+ _ASSERTE_MSG(false, "op should be one of the overflow ops...");
+ }
+
+ if (res.IsOverflow())
+ {
+ ThrowOverflowException();
+ }
+
+ unsigned residx = m_curStackHt - 2;
+ OpStackSet<T>(residx, res.Value());
+ if (!TypeIsUnchanged)
+ {
+ OpStackTypeSet(residx, InterpreterType(cit));
+ }
+}
+
+template<int op>
+void Interpreter::BinaryIntOp()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 2);
+ unsigned op1idx = m_curStackHt - 2;
+ unsigned op2idx = m_curStackHt - 1;
+
+ InterpreterType t1 = OpStackTypeGet(op1idx);
+ CorInfoType cit1 = t1.ToCorInfoType();
+ assert(IsStackNormalType(cit1));
+
+ InterpreterType t2 = OpStackTypeGet(op2idx);
+ CorInfoType cit2 = t2.ToCorInfoType();
+ assert(IsStackNormalType(cit2));
+
+ // In all cases belows, since "op" is compile-time constant, "if" chains on it should fold away.
+ switch (cit1)
+ {
+ case CORINFO_TYPE_INT:
+ if (cit2 == CORINFO_TYPE_INT)
+ {
+ // Int op Int = Int
+ UINT32 val1 = OpStackGet<UINT32>(op1idx);
+ UINT32 val2 = OpStackGet<UINT32>(op2idx);
+ BinaryIntOpWork<op, UINT32, CORINFO_TYPE_INT, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ else if (cit2 == CORINFO_TYPE_NATIVEINT)
+ {
+ // Int op NativeInt = NativeInt
+ NativeUInt val1 = static_cast<NativeUInt>(OpStackGet<INT32>(op1idx));
+ NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
+ BinaryIntOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/false>(val1, val2);
+ }
+ else if (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_BYREF)
+ {
+ // Int op NativeUInt = NativeUInt
+ NativeUInt val1 = static_cast<NativeUInt>(OpStackGet<INT32>(op1idx));
+ NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
+ BinaryIntOpWork<op, NativeUInt, CORINFO_TYPE_BYREF, /*TypeIsUnchanged*/false>(val1, val2);
+ }
+ else
+ {
+ VerificationError("Binary arithmetic operation type mismatch (int and ?)");
+ }
+ break;
+
+ case CORINFO_TYPE_NATIVEINT:
+ if (cit2 == CORINFO_TYPE_NATIVEINT)
+ {
+ // NativeInt op NativeInt = NativeInt
+ NativeUInt val1 = OpStackGet<NativeUInt>(op1idx);
+ NativeUInt val2 = OpStackGet<NativeUInt>(op2idx);
+ BinaryIntOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ else if (cit2 == CORINFO_TYPE_INT)
+ {
+ // NativeInt op Int = NativeInt
+ NativeUInt val1 = OpStackGet<NativeUInt>(op1idx);
+ NativeUInt val2 = static_cast<NativeUInt>(OpStackGet<INT32>(op2idx));
+ BinaryIntOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ // CLI spec does not allow adding a native int and an int64. So use loose rules.
+ else if (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_LONG)
+ {
+ // NativeInt op Int = NativeInt
+ NativeUInt val1 = OpStackGet<NativeUInt>(op1idx);
+ NativeUInt val2 = static_cast<NativeUInt>(OpStackGet<INT64>(op2idx));
+ BinaryIntOpWork<op, NativeUInt, CORINFO_TYPE_NATIVEINT, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ else
+ {
+ VerificationError("Binary arithmetic operation type mismatch (native int and ?)");
+ }
+ break;
+
+ case CORINFO_TYPE_LONG:
+ if (cit2 == CORINFO_TYPE_LONG || (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_NATIVEINT))
+ {
+ // Long op Long = Long
+ UINT64 val1 = OpStackGet<UINT64>(op1idx);
+ UINT64 val2 = OpStackGet<UINT64>(op2idx);
+ BinaryIntOpWork<op, UINT64, CORINFO_TYPE_LONG, /*TypeIsUnchanged*/true>(val1, val2);
+ }
+ else
+ {
+ VerificationError("Binary arithmetic operation type mismatch (long and ?)");
+ }
+ break;
+
+ default:
+ VerificationError("Illegal operation for non-integral data type.");
+ }
+
+ // In all cases:
+ m_curStackHt--;
+}
+
+template<int op, typename T, CorInfoType cit, bool TypeIsUnchanged>
+void Interpreter::BinaryIntOpWork(T val1, T val2)
+{
+ T res;
+ if (op == BIO_And)
+ {
+ res = val1 & val2;
+ }
+ else if (op == BIO_Or)
+ {
+ res = val1 | val2;
+ }
+ else if (op == BIO_Xor)
+ {
+ res = val1 ^ val2;
+ }
+ else
+ {
+ assert(op == BIO_DivUn || op == BIO_RemUn);
+ if (val2 == 0)
+ {
+ ThrowDivideByZero();
+ }
+ else if (val2 == -1 && val1 == static_cast<T>(((UINT64)1) << (sizeof(T)*8 - 1))) // min int / -1 is not representable.
+ {
+ ThrowSysArithException();
+ }
+ // Otherwise...
+ if (op == BIO_DivUn)
+ {
+ res = val1 / val2;
+ }
+ else
+ {
+ res = val1 % val2;
+ }
+ }
+
+ unsigned residx = m_curStackHt - 2;
+ OpStackSet<T>(residx, res);
+ if (!TypeIsUnchanged)
+ {
+ OpStackTypeSet(residx, InterpreterType(cit));
+ }
+}
+
+template<int op>
+void Interpreter::ShiftOp()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 2);
+ unsigned op1idx = m_curStackHt - 2;
+ unsigned op2idx = m_curStackHt - 1;
+
+ InterpreterType t1 = OpStackTypeGet(op1idx);
+ CorInfoType cit1 = t1.ToCorInfoType();
+ assert(IsStackNormalType(cit1));
+
+ InterpreterType t2 = OpStackTypeGet(op2idx);
+ CorInfoType cit2 = t2.ToCorInfoType();
+ assert(IsStackNormalType(cit2));
+
+ // In all cases belows, since "op" is compile-time constant, "if" chains on it should fold away.
+ switch (cit1)
+ {
+ case CORINFO_TYPE_INT:
+ ShiftOpWork<op, INT32, UINT32>(op1idx, cit2);
+ break;
+
+ case CORINFO_TYPE_NATIVEINT:
+ ShiftOpWork<op, NativeInt, NativeUInt>(op1idx, cit2);
+ break;
+
+ case CORINFO_TYPE_LONG:
+ ShiftOpWork<op, INT64, UINT64>(op1idx, cit2);
+ break;
+
+ default:
+ VerificationError("Illegal value type for shift operation.");
+ break;
+ }
+
+ m_curStackHt--;
+}
+
+template<int op, typename T, typename UT>
+void Interpreter::ShiftOpWork(unsigned op1idx, CorInfoType cit2)
+{
+ T val = OpStackGet<T>(op1idx);
+ unsigned op2idx = op1idx + 1;
+ T res = 0;
+
+ if (cit2 == CORINFO_TYPE_INT)
+ {
+ INT32 shiftAmt = OpStackGet<INT32>(op2idx);
+ if (op == CEE_SHL)
+ {
+ res = val << shiftAmt; // TODO: Check that C++ semantics matches IL.
+ }
+ else if (op == CEE_SHR)
+ {
+ res = val >> shiftAmt;
+ }
+ else
+ {
+ assert(op == CEE_SHR_UN);
+ res = (static_cast<UT>(val)) >> shiftAmt;
+ }
+ }
+ else if (cit2 = CORINFO_TYPE_NATIVEINT)
+ {
+ NativeInt shiftAmt = OpStackGet<NativeInt>(op2idx);
+ if (op == CEE_SHL)
+ {
+ res = val << shiftAmt; // TODO: Check that C++ semantics matches IL.
+ }
+ else if (op == CEE_SHR)
+ {
+ res = val >> shiftAmt;
+ }
+ else
+ {
+ assert(op == CEE_SHR_UN);
+ res = (static_cast<UT>(val)) >> shiftAmt;
+ }
+ }
+ else
+ {
+ VerificationError("Operand type mismatch for shift operator.");
+ }
+ OpStackSet<T>(op1idx, res);
+}
+
+
+void Interpreter::Neg()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 1);
+ unsigned opidx = m_curStackHt - 1;
+
+ InterpreterType t1 = OpStackTypeGet(opidx);
+ CorInfoType cit1 = t1.ToCorInfoType();
+ assert(IsStackNormalType(cit1));
+
+ switch (cit1)
+ {
+ case CORINFO_TYPE_INT:
+ OpStackSet<INT32>(opidx, -OpStackGet<INT32>(opidx));
+ break;
+
+ case CORINFO_TYPE_NATIVEINT:
+ OpStackSet<NativeInt>(opidx, -OpStackGet<NativeInt>(opidx));
+ break;
+
+ case CORINFO_TYPE_LONG:
+ OpStackSet<INT64>(opidx, -OpStackGet<INT64>(opidx));
+ break;
+
+ case CORINFO_TYPE_FLOAT:
+ OpStackSet<float>(opidx, -OpStackGet<float>(opidx));
+ break;
+
+ case CORINFO_TYPE_DOUBLE:
+ OpStackSet<double>(opidx, -OpStackGet<double>(opidx));
+ break;
+
+ default:
+ VerificationError("Illegal operand type for Neg operation.");
+ }
+}
+
+void Interpreter::Not()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 1);
+ unsigned opidx = m_curStackHt - 1;
+
+ InterpreterType t1 = OpStackTypeGet(opidx);
+ CorInfoType cit1 = t1.ToCorInfoType();
+ assert(IsStackNormalType(cit1));
+
+ switch (cit1)
+ {
+ case CORINFO_TYPE_INT:
+ OpStackSet<INT32>(opidx, ~OpStackGet<INT32>(opidx));
+ break;
+
+ case CORINFO_TYPE_NATIVEINT:
+ OpStackSet<NativeInt>(opidx, ~OpStackGet<NativeInt>(opidx));
+ break;
+
+ case CORINFO_TYPE_LONG:
+ OpStackSet<INT64>(opidx, ~OpStackGet<INT64>(opidx));
+ break;
+
+ default:
+ VerificationError("Illegal operand type for Not operation.");
+ }
+}
+
+template<typename T, bool TIsUnsigned, bool TCanHoldPtr, bool TIsShort, CorInfoType cit>
+void Interpreter::Conv()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 1);
+ unsigned opidx = m_curStackHt - 1;
+
+ InterpreterType t1 = OpStackTypeGet(opidx);
+ CorInfoType cit1 = t1.ToCorInfoType();
+ assert(IsStackNormalType(cit1));
+
+ T val;
+ switch (cit1)
+ {
+ case CORINFO_TYPE_INT:
+ if (TIsUnsigned)
+ {
+ // Must convert the 32 bit value to unsigned first, so that we zero-extend if necessary.
+ val = static_cast<T>(static_cast<UINT32>(OpStackGet<INT32>(opidx)));
+ }
+ else
+ {
+ val = static_cast<T>(OpStackGet<INT32>(opidx));
+ }
+ break;
+
+ case CORINFO_TYPE_NATIVEINT:
+ if (TIsUnsigned)
+ {
+ // NativeInt might be 32 bits, so convert to unsigned before possibly widening.
+ val = static_cast<T>(static_cast<NativeUInt>(OpStackGet<NativeInt>(opidx)));
+ }
+ else
+ {
+ val = static_cast<T>(OpStackGet<NativeInt>(opidx));
+ }
+ break;
+
+ case CORINFO_TYPE_LONG:
+ val = static_cast<T>(OpStackGet<INT64>(opidx));
+ break;
+
+ // TODO: Make sure that the C++ conversions do the right thing (truncate to zero...)
+ case CORINFO_TYPE_FLOAT:
+ val = static_cast<T>(OpStackGet<float>(opidx));
+ break;
+
+ case CORINFO_TYPE_DOUBLE:
+ val = static_cast<T>(OpStackGet<double>(opidx));
+ break;
+
+ case CORINFO_TYPE_BYREF:
+ case CORINFO_TYPE_CLASS:
+ case CORINFO_TYPE_STRING:
+ if (!TCanHoldPtr && !s_InterpreterLooseRules)
+ {
+ VerificationError("Conversion of pointer value to type that can't hold its value.");
+ }
+
+ // Otherwise...
+ // (Must first convert to NativeInt, because the compiler believes this might be applied for T =
+ // float or double. It won't, by the test above, and the extra cast shouldn't generate any code...)
+ val = static_cast<T>(reinterpret_cast<NativeInt>(OpStackGet<void*>(opidx)));
+ break;
+
+ default:
+ VerificationError("Illegal operand type for conv.* operation.");
+ UNREACHABLE();
+ }
+
+ if (TIsShort)
+ {
+ OpStackSet<INT32>(opidx, static_cast<INT32>(val));
+ }
+ else
+ {
+ OpStackSet<T>(opidx, val);
+ }
+
+ OpStackTypeSet(opidx, InterpreterType(cit));
+}
+
+
+void Interpreter::ConvRUn()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 1);
+ unsigned opidx = m_curStackHt - 1;
+
+ InterpreterType t1 = OpStackTypeGet(opidx);
+ CorInfoType cit1 = t1.ToCorInfoType();
+ assert(IsStackNormalType(cit1));
+
+ switch (cit1)
+ {
+ case CORINFO_TYPE_INT:
+ OpStackSet<double>(opidx, static_cast<double>(OpStackGet<UINT32>(opidx)));
+ break;
+
+ case CORINFO_TYPE_NATIVEINT:
+ OpStackSet<double>(opidx, static_cast<double>(OpStackGet<NativeUInt>(opidx)));
+ break;
+
+ case CORINFO_TYPE_LONG:
+ OpStackSet<double>(opidx, static_cast<double>(OpStackGet<UINT64>(opidx)));
+ break;
+
+ case CORINFO_TYPE_DOUBLE:
+ return;
+
+ default:
+ VerificationError("Illegal operand type for conv.r.un operation.");
+ }
+
+ OpStackTypeSet(opidx, InterpreterType(CORINFO_TYPE_DOUBLE));
+}
+
+template<typename T, INT64 TMin, UINT64 TMax, bool TCanHoldPtr, CorInfoType cit>
+void Interpreter::ConvOvf()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 1);
+ unsigned opidx = m_curStackHt - 1;
+
+ InterpreterType t1 = OpStackTypeGet(opidx);
+ CorInfoType cit1 = t1.ToCorInfoType();
+ assert(IsStackNormalType(cit1));
+
+ switch (cit1)
+ {
+ case CORINFO_TYPE_INT:
+ {
+ INT32 i4 = OpStackGet<INT32>(opidx);
+ if (!FitsIn<T>(i4))
+ {
+ ThrowOverflowException();
+ }
+ OpStackSet<T>(opidx, static_cast<T>(i4));
+ }
+ break;
+
+ case CORINFO_TYPE_NATIVEINT:
+ {
+ NativeInt i = OpStackGet<NativeInt>(opidx);
+ if (!FitsIn<T>(i))
+ {
+ ThrowOverflowException();
+ }
+ OpStackSet<T>(opidx, static_cast<T>(i));
+ }
+ break;
+
+ case CORINFO_TYPE_LONG:
+ {
+ INT64 i8 = OpStackGet<INT64>(opidx);
+ if (!FitsIn<T>(i8))
+ {
+ ThrowOverflowException();
+ }
+ OpStackSet<T>(opidx, static_cast<T>(i8));
+ }
+ break;
+
+ // Make sure that the C++ conversions do the right thing (truncate to zero...)
+ case CORINFO_TYPE_FLOAT:
+ {
+ float f = OpStackGet<float>(opidx);
+ if (!FloatFitsInIntType<TMin, TMax>(f))
+ {
+ ThrowOverflowException();
+ }
+ OpStackSet<T>(opidx, static_cast<T>(f));
+ }
+ break;
+
+ case CORINFO_TYPE_DOUBLE:
+ {
+ double d = OpStackGet<double>(opidx);
+ if (!DoubleFitsInIntType<TMin, TMax>(d))
+ {
+ ThrowOverflowException();
+ }
+ OpStackSet<T>(opidx, static_cast<T>(d));
+ }
+ break;
+
+ case CORINFO_TYPE_BYREF:
+ case CORINFO_TYPE_CLASS:
+ case CORINFO_TYPE_STRING:
+ if (!TCanHoldPtr)
+ {
+ VerificationError("Conversion of pointer value to type that can't hold its value.");
+ }
+
+ // Otherwise...
+ // (Must first convert to NativeInt, because the compiler believes this might be applied for T =
+ // float or double. It won't, by the test above, and the extra cast shouldn't generate any code...
+ OpStackSet<T>(opidx, static_cast<T>(reinterpret_cast<NativeInt>(OpStackGet<void*>(opidx))));
+ break;
+
+ default:
+ VerificationError("Illegal operand type for conv.ovf.* operation.");
+ }
+
+ _ASSERTE_MSG(IsStackNormalType(cit), "Precondition.");
+ OpStackTypeSet(opidx, InterpreterType(cit));
+}
+
+template<typename T, INT64 TMin, UINT64 TMax, bool TCanHoldPtr, CorInfoType cit>
+void Interpreter::ConvOvfUn()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 1);
+ unsigned opidx = m_curStackHt - 1;
+
+ InterpreterType t1 = OpStackTypeGet(opidx);
+ CorInfoType cit1 = t1.ToCorInfoType();
+ assert(IsStackNormalType(cit1));
+
+ switch (cit1)
+ {
+ case CORINFO_TYPE_INT:
+ {
+ UINT32 ui4 = OpStackGet<UINT32>(opidx);
+ if (!FitsIn<T>(ui4))
+ {
+ ThrowOverflowException();
+ }
+ OpStackSet<T>(opidx, static_cast<T>(ui4));
+ }
+ break;
+
+ case CORINFO_TYPE_NATIVEINT:
+ {
+ NativeUInt ui = OpStackGet<NativeUInt>(opidx);
+ if (!FitsIn<T>(ui))
+ {
+ ThrowOverflowException();
+ }
+ OpStackSet<T>(opidx, static_cast<T>(ui));
+ }
+ break;
+
+ case CORINFO_TYPE_LONG:
+ {
+ UINT64 ui8 = OpStackGet<UINT64>(opidx);
+ if (!FitsIn<T>(ui8))
+ {
+ ThrowOverflowException();
+ }
+ OpStackSet<T>(opidx, static_cast<T>(ui8));
+ }
+ break;
+
+ // Make sure that the C++ conversions do the right thing (truncate to zero...)
+ case CORINFO_TYPE_FLOAT:
+ {
+ float f = OpStackGet<float>(opidx);
+ if (!FloatFitsInIntType<TMin, TMax>(f))
+ {
+ ThrowOverflowException();
+ }
+ OpStackSet<T>(opidx, static_cast<T>(f));
+ }
+ break;
+
+ case CORINFO_TYPE_DOUBLE:
+ {
+ double d = OpStackGet<double>(opidx);
+ if (!DoubleFitsInIntType<TMin, TMax>(d))
+ {
+ ThrowOverflowException();
+ }
+ OpStackSet<T>(opidx, static_cast<T>(d));
+ }
+ break;
+
+ case CORINFO_TYPE_BYREF:
+ case CORINFO_TYPE_CLASS:
+ case CORINFO_TYPE_STRING:
+ if (!TCanHoldPtr)
+ {
+ VerificationError("Conversion of pointer value to type that can't hold its value.");
+ }
+
+ // Otherwise...
+ // (Must first convert to NativeInt, because the compiler believes this might be applied for T =
+ // float or double. It won't, by the test above, and the extra cast shouldn't generate any code...
+ OpStackSet<T>(opidx, static_cast<T>(reinterpret_cast<NativeInt>(OpStackGet<void*>(opidx))));
+ break;
+
+ default:
+ VerificationError("Illegal operand type for conv.ovf.*.un operation.");
+ }
+
+ _ASSERTE_MSG(IsStackNormalType(cit), "Precondition.");
+ OpStackTypeSet(opidx, InterpreterType(cit));
+}
+
+void Interpreter::LdObj()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ BarrierIfVolatile();
+
+ assert(m_curStackHt > 0);
+ unsigned ind = m_curStackHt - 1;
+
+#ifdef _DEBUG
+ CorInfoType cit = OpStackTypeGet(ind).ToCorInfoType();
+ _ASSERTE_MSG(IsValidPointerType(cit), "Expect pointer on stack");
+#endif // _DEBUG
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdObj]);
+#endif // INTERP_TRACING
+
+ // TODO: GetTypeFromToken also uses GCX_PREEMP(); can we merge it with the getClassAttribs() block below, and do it just once?
+ CORINFO_CLASS_HANDLE clsHnd = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_LdObj));
+ DWORD clsAttribs;
+ {
+ GCX_PREEMP();
+ clsAttribs = m_interpCeeInfo.getClassAttribs(clsHnd);
+ }
+
+ void* src = OpStackGet<void*>(ind);
+ ThrowOnInvalidPointer(src);
+
+ if (clsAttribs & CORINFO_FLG_VALUECLASS)
+ {
+ LdObjValueClassWork(clsHnd, ind, src);
+ }
+ else
+ {
+ OpStackSet<void*>(ind, *reinterpret_cast<void**>(src));
+ OpStackTypeSet(ind, InterpreterType(CORINFO_TYPE_CLASS));
+ }
+ m_ILCodePtr += 5;
+}
+
+void Interpreter::LdObjValueClassWork(CORINFO_CLASS_HANDLE valueClsHnd, unsigned ind, void* src)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ // "src" is a byref, which may be into an object. GCPROTECT for the call below.
+ GCPROTECT_BEGININTERIOR(src);
+
+ InterpreterType it = InterpreterType(&m_interpCeeInfo, valueClsHnd);
+ size_t sz = it.Size(&m_interpCeeInfo);
+ // Note that the memcpy's below are permissible because the destination is in the operand stack.
+ if (sz > sizeof(INT64))
+ {
+ void* dest = LargeStructOperandStackPush(sz);
+ memcpy(dest, src, sz);
+ OpStackSet<void*>(ind, dest);
+ }
+ else
+ {
+ OpStackSet<INT64>(ind, GetSmallStructValue(src, sz));
+ }
+
+ OpStackTypeSet(ind, it.StackNormalize());
+
+ GCPROTECT_END();
+}
+
+CORINFO_CLASS_HANDLE Interpreter::GetTypeFromToken(BYTE* codePtr, CorInfoTokenKind tokKind InterpTracingArg(ResolveTokenKind rtk))
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ GCX_PREEMP();
+
+ CORINFO_GENERICHANDLE_RESULT embedInfo;
+ CORINFO_RESOLVED_TOKEN typeTok;
+ ResolveToken(&typeTok, getU4LittleEndian(codePtr), tokKind InterpTracingArg(rtk));
+ return typeTok.hClass;
+}
+
+bool Interpreter::IsValidPointerType(CorInfoType cit)
+{
+ bool isValid = (cit == CORINFO_TYPE_NATIVEINT || cit == CORINFO_TYPE_BYREF);
+#if defined(_AMD64_)
+ isValid = isValid || (s_InterpreterLooseRules && cit == CORINFO_TYPE_LONG);
+#endif
+ return isValid;
+}
+
+void Interpreter::CpObj()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 2);
+ unsigned destInd = m_curStackHt - 2;
+ unsigned srcInd = m_curStackHt - 1;
+
+#ifdef _DEBUG
+ // Check that src and dest are both pointer types.
+ CorInfoType cit = OpStackTypeGet(destInd).ToCorInfoType();
+ _ASSERTE_MSG(IsValidPointerType(cit), "Expect pointer on stack for dest of cpobj");
+
+ cit = OpStackTypeGet(srcInd).ToCorInfoType();
+ _ASSERTE_MSG(IsValidPointerType(cit), "Expect pointer on stack for src of cpobj");
+#endif // _DEBUG
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_CpObj]);
+#endif // INTERP_TRACING
+
+ CORINFO_CLASS_HANDLE clsHnd = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_CpObj));
+ DWORD clsAttribs;
+ {
+ GCX_PREEMP();
+ clsAttribs = m_interpCeeInfo.getClassAttribs(clsHnd);
+ }
+
+ void* dest = OpStackGet<void*>(destInd);
+ void* src = OpStackGet<void*>(srcInd);
+
+ ThrowOnInvalidPointer(dest);
+ ThrowOnInvalidPointer(src);
+
+ // dest and src are vulnerable byrefs.
+ GCX_FORBID();
+
+ if (clsAttribs & CORINFO_FLG_VALUECLASS)
+ {
+ CopyValueClassUnchecked(dest, src, GetMethodTableFromClsHnd(clsHnd));
+ }
+ else
+ {
+ OBJECTREF val = *reinterpret_cast<OBJECTREF*>(src);
+ SetObjectReferenceUnchecked(reinterpret_cast<OBJECTREF*>(dest), val);
+ }
+ m_curStackHt -= 2;
+ m_ILCodePtr += 5;
+}
+
+void Interpreter::StObj()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 2);
+ unsigned destInd = m_curStackHt - 2;
+ unsigned valInd = m_curStackHt - 1;
+
+#ifdef _DEBUG
+ // Check that dest is a pointer type.
+ CorInfoType cit = OpStackTypeGet(destInd).ToCorInfoType();
+ _ASSERTE_MSG(IsValidPointerType(cit), "Expect pointer on stack for dest of stobj");
+#endif // _DEBUG
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_StObj]);
+#endif // INTERP_TRACING
+
+ CORINFO_CLASS_HANDLE clsHnd = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_StObj));
+ DWORD clsAttribs;
+ {
+ GCX_PREEMP();
+ clsAttribs = m_interpCeeInfo.getClassAttribs(clsHnd);
+ }
+
+ if (clsAttribs & CORINFO_FLG_VALUECLASS)
+ {
+ MethodTable* clsMT = GetMethodTableFromClsHnd(clsHnd);
+ size_t sz;
+ {
+ GCX_PREEMP();
+ sz = getClassSize(clsHnd);
+ }
+
+ // Note that "dest" might be a pointer into the heap. It is therefore important
+ // to calculate it *after* any PREEMP transitions at which we might do a GC.
+ void* dest = OpStackGet<void*>(destInd);
+ ThrowOnInvalidPointer(dest);
+
+ assert( (OpStackTypeGet(valInd).ToCorInfoType() == CORINFO_TYPE_VALUECLASS &&
+ OpStackTypeGet(valInd).ToClassHandle() == clsHnd)
+ ||
+ (OpStackTypeGet(valInd).ToCorInfoType() ==
+ CorInfoTypeStackNormalize(GetTypeForPrimitiveValueClass(clsHnd)))
+ || (s_InterpreterLooseRules && sz <= sizeof(dest)));
+
+ GCX_FORBID();
+
+ if (sz > sizeof(INT64))
+ {
+ // Large struct case -- ostack entry is pointer.
+ void* src = OpStackGet<void*>(valInd);
+ CopyValueClassUnchecked(dest, src, clsMT);
+ LargeStructOperandStackPop(sz, src);
+ }
+ else
+ {
+ // The ostack entry contains the struct value.
+ CopyValueClassUnchecked(dest, OpStackGetAddr(valInd, sz), clsMT);
+ }
+ }
+ else
+ {
+ // The ostack entry is an object reference.
+ assert(OpStackTypeGet(valInd).ToCorInfoType() == CORINFO_TYPE_CLASS);
+
+ // Note that "dest" might be a pointer into the heap. It is therefore important
+ // to calculate it *after* any PREEMP transitions at which we might do a GC. (Thus,
+ // we have to duplicate this code with the case above.
+ void* dest = OpStackGet<void*>(destInd);
+ ThrowOnInvalidPointer(dest);
+
+ GCX_FORBID();
+
+ OBJECTREF val = ObjectToOBJECTREF(OpStackGet<Object*>(valInd));
+ SetObjectReferenceUnchecked(reinterpret_cast<OBJECTREF*>(dest), val);
+ }
+
+ m_curStackHt -= 2;
+ m_ILCodePtr += 5;
+
+ BarrierIfVolatile();
+}
+
+void Interpreter::InitObj()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 1);
+ unsigned destInd = m_curStackHt - 1;
+#ifdef _DEBUG
+ // Check that src and dest are both pointer types.
+ CorInfoType cit = OpStackTypeGet(destInd).ToCorInfoType();
+ _ASSERTE_MSG(IsValidPointerType(cit), "Expect pointer on stack");
+#endif // _DEBUG
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_InitObj]);
+#endif // INTERP_TRACING
+
+ CORINFO_CLASS_HANDLE clsHnd = GetTypeFromToken(m_ILCodePtr + 2, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_InitObj));
+ size_t valueClassSz = 0;
+
+ DWORD clsAttribs;
+ {
+ GCX_PREEMP();
+ clsAttribs = m_interpCeeInfo.getClassAttribs(clsHnd);
+ if (clsAttribs & CORINFO_FLG_VALUECLASS)
+ {
+ valueClassSz = getClassSize(clsHnd);
+ }
+ }
+
+ void* dest = OpStackGet<void*>(destInd);
+ ThrowOnInvalidPointer(dest);
+
+ // dest is a vulnerable byref.
+ GCX_FORBID();
+
+ if (clsAttribs & CORINFO_FLG_VALUECLASS)
+ {
+ memset(dest, 0, valueClassSz);
+ }
+ else
+ {
+ // The ostack entry is an object reference.
+ SetObjectReferenceUnchecked(reinterpret_cast<OBJECTREF*>(dest), NULL);
+ }
+ m_curStackHt -= 1;
+ m_ILCodePtr += 6;
+}
+
+void Interpreter::LdStr()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ OBJECTHANDLE res = ConstructStringLiteral(m_methInfo->m_module, getU4LittleEndian(m_ILCodePtr + 1));
+ {
+ GCX_FORBID();
+ OpStackSet<Object*>(m_curStackHt, *reinterpret_cast<Object**>(res));
+ OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_CLASS)); // Stack-normal type for "string"
+ m_curStackHt++;
+ }
+ m_ILCodePtr += 5;
+}
+
+void Interpreter::NewObj()
+{
+#if INTERP_DYNAMIC_CONTRACTS
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+#else
+ // Dynamic contract occupies too much stack.
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+#endif
+
+ unsigned ctorTok = getU4LittleEndian(m_ILCodePtr + 1);
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_NewObj]);
+#endif // INTERP_TRACING
+
+ CORINFO_CALL_INFO callInfo;
+ CORINFO_RESOLVED_TOKEN methTok;
+
+ {
+ GCX_PREEMP();
+ ResolveToken(&methTok, ctorTok, CORINFO_TOKENKIND_Ldtoken InterpTracingArg(RTK_NewObj));
+ m_interpCeeInfo.getCallInfo(&methTok, NULL,
+ m_methInfo->m_method,
+ CORINFO_CALLINFO_FLAGS(0),
+ &callInfo);
+ }
+
+ unsigned mflags = callInfo.methodFlags;
+
+ if ((mflags & (CORINFO_FLG_STATIC|CORINFO_FLG_ABSTRACT)) != 0)
+ {
+ VerificationError("newobj on static or abstract method");
+ }
+
+ unsigned clsFlags = callInfo.classFlags;
+
+#ifdef _DEBUG
+ // What class are we allocating?
+ const char* clsName;
+
+ {
+ GCX_PREEMP();
+ clsName = m_interpCeeInfo.getClassName(methTok.hClass);
+ }
+#endif // _DEBUG
+
+ // There are four cases:
+ // 1) Value types (ordinary constructor, resulting VALUECLASS pushed)
+ // 2) String (var-args constructor, result automatically pushed)
+ // 3) MDArray (var-args constructor, resulting OBJECTREF pushed)
+ // 4) Reference types (ordinary constructor, resulting OBJECTREF pushed)
+ if (clsFlags & CORINFO_FLG_VALUECLASS)
+ {
+ void* tempDest;
+ INT64 smallTempDest = 0;
+ size_t sz = 0;
+ {
+ GCX_PREEMP();
+ sz = getClassSize(methTok.hClass);
+ }
+ if (sz > sizeof(INT64))
+ {
+ // TODO: Make sure this is deleted in the face of exceptions.
+ tempDest = new BYTE[sz];
+ }
+ else
+ {
+ tempDest = &smallTempDest;
+ }
+ memset(tempDest, 0, sz);
+ InterpreterType structValRetIT(&m_interpCeeInfo, methTok.hClass);
+ m_structRetValITPtr = &structValRetIT;
+ m_structRetValTempSpace = tempDest;
+
+ DoCallWork(/*virtCall*/false, tempDest, &methTok, &callInfo);
+
+ if (sz > sizeof(INT64))
+ {
+ void* dest = LargeStructOperandStackPush(sz);
+ memcpy(dest, tempDest, sz);
+ delete[] tempDest;
+ OpStackSet<void*>(m_curStackHt, dest);
+ }
+ else
+ {
+ OpStackSet<INT64>(m_curStackHt, GetSmallStructValue(tempDest, sz));
+ }
+ if (m_structRetValITPtr->IsStruct())
+ {
+ OpStackTypeSet(m_curStackHt, *m_structRetValITPtr);
+ }
+ else
+ {
+ // Must stack-normalize primitive types.
+ OpStackTypeSet(m_curStackHt, m_structRetValITPtr->StackNormalize());
+ }
+ // "Unregister" the temp space for GC scanning...
+ m_structRetValITPtr = NULL;
+ m_curStackHt++;
+ }
+ else if ((clsFlags & CORINFO_FLG_VAROBJSIZE) && !(clsFlags & CORINFO_FLG_ARRAY))
+ {
+ // For a VAROBJSIZE class (currently == String), pass NULL as this to "pseudo-constructor."
+ void* specialFlagArg = reinterpret_cast<void*>(0x1); // Special value for "thisArg" argument of "DoCallWork": push NULL that's not on op stack.
+ DoCallWork(/*virtCall*/false, specialFlagArg, &methTok, &callInfo); // pushes result automatically
+ }
+ else
+ {
+ OBJECTREF thisArgObj = NULL;
+ GCPROTECT_BEGIN(thisArgObj);
+
+ if (clsFlags & CORINFO_FLG_ARRAY)
+ {
+ assert(clsFlags & CORINFO_FLG_VAROBJSIZE);
+
+ MethodDesc* methDesc = GetMethod(methTok.hMethod);
+
+ PCCOR_SIGNATURE pSig;
+ DWORD cbSigSize;
+ methDesc->GetSig(&pSig, &cbSigSize);
+ MetaSig msig(pSig, cbSigSize, methDesc->GetModule(), NULL);
+
+ unsigned dwNumArgs = msig.NumFixedArgs();
+ assert(m_curStackHt >= dwNumArgs);
+ m_curStackHt -= dwNumArgs;
+
+ INT32* args = (INT32*)_alloca(dwNumArgs * sizeof(INT32));
+
+ unsigned dwArg;
+ for (dwArg = 0; dwArg < dwNumArgs; dwArg++)
+ {
+ unsigned stkInd = m_curStackHt + dwArg;
+ bool loose = s_InterpreterLooseRules && (OpStackTypeGet(stkInd).ToCorInfoType() == CORINFO_TYPE_NATIVEINT);
+ if (OpStackTypeGet(stkInd).ToCorInfoType() != CORINFO_TYPE_INT && !loose)
+ {
+ VerificationError("MD array dimension bounds and sizes must be int.");
+ }
+ args[dwArg] = loose ? (INT32) OpStackGet<NativeInt>(stkInd) : OpStackGet<INT32>(stkInd);
+ }
+
+ thisArgObj = AllocateArrayEx(TypeHandle(methTok.hClass), args, dwNumArgs);
+ }
+ else
+ {
+ CorInfoHelpFunc newHelper;
+ {
+ GCX_PREEMP();
+ newHelper = m_interpCeeInfo.getNewHelper(&methTok, m_methInfo->m_method);
+ }
+
+ MethodTable * pNewObjMT = GetMethodTableFromClsHnd(methTok.hClass);
+ switch (newHelper)
+ {
+#ifdef FEATURE_REMOTING
+ case CORINFO_HELP_NEW_CROSSCONTEXT:
+ {
+ if (CRemotingServices::RequiresManagedActivation(pNewObjMT) && !pNewObjMT->IsComObjectType())
+ {
+ thisArgObj = CRemotingServices::CreateProxyOrObject(pNewObjMT);
+ }
+ else
+ {
+ thisArgObj = AllocateObject(pNewObjMT);
+ }
+ }
+ break;
+#endif // FEATURE_REMOTING
+ case CORINFO_HELP_NEWFAST:
+ default:
+ thisArgObj = AllocateObject(pNewObjMT);
+ break;
+ }
+
+ DoCallWork(/*virtCall*/false, OBJECTREFToObject(thisArgObj), &methTok, &callInfo);
+ }
+
+ {
+ GCX_FORBID();
+ OpStackSet<Object*>(m_curStackHt, OBJECTREFToObject(thisArgObj));
+ OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_CLASS));
+ m_curStackHt++;
+ }
+ GCPROTECT_END(); // For "thisArgObj"
+ }
+
+ m_ILCodePtr += 5;
+}
+
+void Interpreter::NewArr()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt > 0);
+ unsigned stkInd = m_curStackHt-1;
+ CorInfoType cit = OpStackTypeGet(stkInd).ToCorInfoType();
+ NativeInt sz = 0;
+ switch (cit)
+ {
+ case CORINFO_TYPE_INT:
+ sz = static_cast<NativeInt>(OpStackGet<INT32>(stkInd));
+ break;
+ case CORINFO_TYPE_NATIVEINT:
+ sz = OpStackGet<NativeInt>(stkInd);
+ break;
+ default:
+ VerificationError("Size operand of 'newarr' must be int or native int.");
+ }
+
+ unsigned elemTypeTok = getU4LittleEndian(m_ILCodePtr + 1);
+
+ CORINFO_CLASS_HANDLE elemClsHnd;
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_NewArr]);
+#endif // INTERP_TRACING
+
+ CORINFO_RESOLVED_TOKEN elemTypeResolvedTok;
+
+ {
+ GCX_PREEMP();
+ ResolveToken(&elemTypeResolvedTok, elemTypeTok, CORINFO_TOKENKIND_Newarr InterpTracingArg(RTK_NewArr));
+ elemClsHnd = elemTypeResolvedTok.hClass;
+ }
+
+ {
+ if (sz < 0)
+ {
+ COMPlusThrow(kOverflowException);
+ }
+
+#ifdef _WIN64
+ // Even though ECMA allows using a native int as the argument to newarr instruction
+ // (therefore size is INT_PTR), ArrayBase::m_NumComponents is 32-bit, so even on 64-bit
+ // platforms we can't create an array whose size exceeds 32 bits.
+ if (sz > INT_MAX)
+ {
+ EX_THROW(EEMessageException, (kOverflowException, IDS_EE_ARRAY_DIMENSIONS_EXCEEDED));
+ }
+#endif
+
+ TypeHandle typeHnd(elemClsHnd);
+ ArrayTypeDesc* pArrayClassRef = typeHnd.AsArray();
+
+ pArrayClassRef->GetMethodTable()->CheckRunClassInitThrowing();
+
+ INT32 size32 = (INT32)sz;
+ Object* newarray = OBJECTREFToObject(AllocateArrayEx(typeHnd, &size32, 1));
+
+ GCX_FORBID();
+ OpStackTypeSet(stkInd, InterpreterType(CORINFO_TYPE_CLASS));
+ OpStackSet<Object*>(stkInd, newarray);
+ }
+
+ m_ILCodePtr += 5;
+}
+
+void Interpreter::IsInst()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_IsInst]);
+#endif // INTERP_TRACING
+
+ CORINFO_CLASS_HANDLE cls = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Casting InterpTracingArg(RTK_IsInst));
+
+ assert(m_curStackHt >= 1);
+ unsigned idx = m_curStackHt - 1;
+#ifdef _DEBUG
+ CorInfoType cit = OpStackTypeGet(idx).ToCorInfoType();
+ assert(cit == CORINFO_TYPE_CLASS || cit == CORINFO_TYPE_STRING);
+#endif // DEBUG
+
+ Object * pObj = OpStackGet<Object*>(idx);
+ if (pObj != NULL)
+ {
+ if (!ObjIsInstanceOf(pObj, TypeHandle(cls)))
+ OpStackSet<Object*>(idx, NULL);
+ }
+
+ // Type stack stays unmodified.
+
+ m_ILCodePtr += 5;
+}
+
+void Interpreter::CastClass()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_CastClass]);
+#endif // INTERP_TRACING
+
+ CORINFO_CLASS_HANDLE cls = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Casting InterpTracingArg(RTK_CastClass));
+
+ assert(m_curStackHt >= 1);
+ unsigned idx = m_curStackHt - 1;
+#ifdef _DEBUG
+ CorInfoType cit = OpStackTypeGet(idx).ToCorInfoType();
+ assert(cit == CORINFO_TYPE_CLASS || cit == CORINFO_TYPE_STRING);
+#endif // _DEBUG
+
+ Object * pObj = OpStackGet<Object*>(idx);
+ if (pObj != NULL)
+ {
+ if (!ObjIsInstanceOf(pObj, TypeHandle(cls)))
+ {
+ OBJECTREF oref = ObjectToOBJECTREF(OpStackGet<Object*>(idx));
+ COMPlusThrowInvalidCastException(&oref, TypeHandle(cls));
+ }
+ }
+
+
+ // Type stack stays unmodified.
+
+ m_ILCodePtr += 5;
+}
+
+void Interpreter::LocAlloc()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 1);
+ unsigned idx = m_curStackHt - 1;
+ CorInfoType cit = OpStackTypeGet(idx).ToCorInfoType();
+ NativeUInt sz = 0;
+ if (cit == CORINFO_TYPE_INT || cit == CORINFO_TYPE_UINT)
+ {
+ sz = static_cast<NativeUInt>(OpStackGet<UINT32>(idx));
+ }
+ else if (cit == CORINFO_TYPE_NATIVEINT || cit == CORINFO_TYPE_NATIVEUINT)
+ {
+ sz = OpStackGet<NativeUInt>(idx);
+ }
+ else if (s_InterpreterLooseRules && cit == CORINFO_TYPE_LONG)
+ {
+ sz = (NativeUInt) OpStackGet<INT64>(idx);
+ }
+ else
+ {
+ VerificationError("localloc requires int or nativeint argument.");
+ }
+ if (sz == 0)
+ {
+ OpStackSet<void*>(idx, NULL);
+ }
+ else
+ {
+ void* res = GetLocAllocData()->Alloc(sz);
+ if (res == NULL) ThrowStackOverflow();
+ OpStackSet<void*>(idx, res);
+ }
+ OpStackTypeSet(idx, InterpreterType(CORINFO_TYPE_NATIVEINT));
+}
+
+void Interpreter::MkRefany()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_MkRefAny]);
+#endif // INTERP_TRACING
+
+ CORINFO_CLASS_HANDLE cls = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_MkRefAny));
+ assert(m_curStackHt >= 1);
+ unsigned idx = m_curStackHt - 1;
+
+ CorInfoType cit = OpStackTypeGet(idx).ToCorInfoType();
+ if (!(cit == CORINFO_TYPE_BYREF || cit == CORINFO_TYPE_NATIVEINT))
+ VerificationError("MkRefany requires byref or native int (pointer) on the stack.");
+
+ void* ptr = OpStackGet<void*>(idx);
+
+ InterpreterType typedRefIT = GetTypedRefIT(&m_interpCeeInfo);
+ TypedByRef* tbr;
+#if defined(_AMD64_)
+ assert(typedRefIT.IsLargeStruct(&m_interpCeeInfo));
+ tbr = (TypedByRef*) LargeStructOperandStackPush(GetTypedRefSize(&m_interpCeeInfo));
+ OpStackSet<void*>(idx, tbr);
+#elif defined(_X86_) || defined(_ARM_)
+ assert(!typedRefIT.IsLargeStruct(&m_interpCeeInfo));
+ tbr = OpStackGetAddr<TypedByRef>(idx);
+#elif defined(_ARM64_)
+ tbr = NULL;
+ NYI_INTERP("Unimplemented code: MkRefAny");
+#else
+#error "unsupported platform"
+#endif
+ tbr->data = ptr;
+ tbr->type = TypeHandle(cls);
+ OpStackTypeSet(idx, typedRefIT);
+
+ m_ILCodePtr += 5;
+}
+
+void Interpreter::RefanyType()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt > 0);
+ unsigned idx = m_curStackHt - 1;
+
+ if (OpStackTypeGet(idx) != GetTypedRefIT(&m_interpCeeInfo))
+ VerificationError("RefAnyVal requires a TypedRef on the stack.");
+
+ TypedByRef* ptbr = OpStackGet<TypedByRef*>(idx);
+ LargeStructOperandStackPop(sizeof(TypedByRef), ptbr);
+
+ TypeHandle* pth = &ptbr->type;
+
+ {
+ OBJECTREF classobj = TypeHandleToTypeRef(pth);
+ GCX_FORBID();
+ OpStackSet<Object*>(idx, OBJECTREFToObject(classobj));
+ OpStackTypeSet(idx, InterpreterType(CORINFO_TYPE_CLASS));
+ }
+ m_ILCodePtr += 2;
+}
+
+// This (unfortunately) duplicates code in JIT_GetRuntimeTypeHandle, which
+// isn't callable because it sets up a Helper Method Frame.
+OBJECTREF Interpreter::TypeHandleToTypeRef(TypeHandle* pth)
+{
+ OBJECTREF typePtr = NULL;
+ if (!pth->IsTypeDesc())
+ {
+ // Most common... and fastest case
+ typePtr = pth->AsMethodTable()->GetManagedClassObjectIfExists();
+ if (typePtr == NULL)
+ {
+ typePtr = pth->GetManagedClassObject();
+ }
+ }
+ else
+ {
+ typePtr = pth->GetManagedClassObject();
+ }
+ return typePtr;
+}
+
+CorInfoType Interpreter::GetTypeForPrimitiveValueClass(CORINFO_CLASS_HANDLE clsHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ GCX_PREEMP();
+
+ return m_interpCeeInfo.getTypeForPrimitiveValueClass(clsHnd);
+}
+
+void Interpreter::RefanyVal()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt > 0);
+ unsigned idx = m_curStackHt - 1;
+
+ if (OpStackTypeGet(idx) != GetTypedRefIT(&m_interpCeeInfo))
+ VerificationError("RefAnyVal requires a TypedRef on the stack.");
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_RefAnyVal]);
+#endif // INTERP_TRACING
+
+ CORINFO_CLASS_HANDLE cls = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_RefAnyVal));
+ TypeHandle expected(cls);
+
+ TypedByRef* ptbr = OpStackGet<TypedByRef*>(idx);
+ LargeStructOperandStackPop(sizeof(TypedByRef), ptbr);
+ if (expected != ptbr->type) ThrowInvalidCastException();
+
+ OpStackSet<void*>(idx, static_cast<void*>(ptbr->data));
+ OpStackTypeSet(idx, InterpreterType(CORINFO_TYPE_BYREF));
+
+ m_ILCodePtr += 5;
+}
+
+void Interpreter::CkFinite()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt > 0);
+ unsigned idx = m_curStackHt - 1;
+
+ CorInfoType cit = OpStackTypeGet(idx).ToCorInfoType();
+ double val = 0.0;
+
+ switch (cit)
+ {
+ case CORINFO_TYPE_FLOAT:
+ val = (double)OpStackGet<float>(idx);
+ break;
+ case CORINFO_TYPE_DOUBLE:
+ val = OpStackGet<double>(idx);
+ break;
+ default:
+ VerificationError("CkFinite requires a floating-point value on the stack.");
+ break;
+ }
+
+ if (!_finite(val))
+ ThrowSysArithException();
+}
+
+void Interpreter::LdToken()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ unsigned tokVal = getU4LittleEndian(m_ILCodePtr + 1);
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdToken]);
+#endif // INTERP_TRACING
+
+
+ CORINFO_RESOLVED_TOKEN tok;
+ {
+ GCX_PREEMP();
+ ResolveToken(&tok, tokVal, CORINFO_TOKENKIND_Ldtoken InterpTracingArg(RTK_LdToken));
+ }
+
+ // To save duplication of the factored code at the bottom, I don't do GCX_FORBID for
+ // these Object* values, but this comment documents the intent.
+ if (tok.hMethod != NULL)
+ {
+ MethodDesc* pMethod = (MethodDesc*)tok.hMethod;
+ Object* objPtr = OBJECTREFToObject((OBJECTREF)pMethod->GetStubMethodInfo());
+ OpStackSet<Object*>(m_curStackHt, objPtr);
+ }
+ else if (tok.hField != NULL)
+ {
+ FieldDesc * pField = (FieldDesc *)tok.hField;
+ Object* objPtr = OBJECTREFToObject((OBJECTREF)pField->GetStubFieldInfo());
+ OpStackSet<Object*>(m_curStackHt, objPtr);
+ }
+ else
+ {
+ TypeHandle th(tok.hClass);
+ Object* objPtr = OBJECTREFToObject(th.GetManagedClassObject());
+ OpStackSet<Object*>(m_curStackHt, objPtr);
+ }
+
+ {
+ GCX_FORBID();
+ OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_CLASS));
+ m_curStackHt++;
+ }
+
+ m_ILCodePtr += 5;
+}
+
+void Interpreter::LdFtn()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ unsigned tokVal = getU4LittleEndian(m_ILCodePtr + 2);
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdFtn]);
+#endif // INTERP_TRACING
+
+ CORINFO_RESOLVED_TOKEN tok;
+ CORINFO_CALL_INFO callInfo;
+ {
+ GCX_PREEMP();
+ ResolveToken(&tok, tokVal, CORINFO_TOKENKIND_Method InterpTracingArg(RTK_LdFtn));
+ m_interpCeeInfo.getCallInfo(&tok, NULL, m_methInfo->m_method,
+ combine(CORINFO_CALLINFO_SECURITYCHECKS,CORINFO_CALLINFO_LDFTN),
+ &callInfo);
+ }
+
+ switch (callInfo.kind)
+ {
+ case CORINFO_CALL:
+ {
+ PCODE pCode = ((MethodDesc *)callInfo.hMethod)->GetMultiCallableAddrOfCode();
+ OpStackSet<void*>(m_curStackHt, (void *)pCode);
+ GetFunctionPointerStack()[m_curStackHt] = callInfo.hMethod;
+ }
+ break;
+ case CORINFO_CALL_CODE_POINTER:
+ NYI_INTERP("Indirect code pointer.");
+ break;
+ default:
+ _ASSERTE_MSG(false, "Should not reach here: unknown call kind.");
+ break;
+ }
+ OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_NATIVEINT));
+ m_curStackHt++;
+ m_ILCodePtr += 6;
+}
+
+void Interpreter::LdVirtFtn()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 1);
+ unsigned ind = m_curStackHt - 1;
+
+ unsigned tokVal = getU4LittleEndian(m_ILCodePtr + 2);
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdVirtFtn]);
+#endif // INTERP_TRACING
+
+ CORINFO_RESOLVED_TOKEN tok;
+ CORINFO_CALL_INFO callInfo;
+ CORINFO_CLASS_HANDLE classHnd;
+ CORINFO_METHOD_HANDLE methodHnd;
+ {
+ GCX_PREEMP();
+ ResolveToken(&tok, tokVal, CORINFO_TOKENKIND_Method InterpTracingArg(RTK_LdVirtFtn));
+ m_interpCeeInfo.getCallInfo(&tok, NULL, m_methInfo->m_method,
+ combine(CORINFO_CALLINFO_SECURITYCHECKS,CORINFO_CALLINFO_LDFTN),
+ &callInfo);
+
+
+ classHnd = tok.hClass;
+ methodHnd = tok.hMethod;
+ }
+
+ MethodDesc * pMD = (MethodDesc *)methodHnd;
+ PCODE pCode;
+ if (pMD->IsVtableMethod())
+ {
+ Object* obj = OpStackGet<Object*>(ind);
+ ThrowOnInvalidPointer(obj);
+
+ OBJECTREF objRef = ObjectToOBJECTREF(obj);
+ GCPROTECT_BEGIN(objRef);
+ pCode = pMD->GetMultiCallableAddrOfVirtualizedCode(&objRef, TypeHandle(classHnd));
+ GCPROTECT_END();
+
+ pMD = Entry2MethodDesc(pCode, TypeHandle(classHnd).GetMethodTable());
+ }
+ else
+ {
+ pCode = pMD->GetMultiCallableAddrOfCode();
+ }
+ OpStackSet<void*>(ind, (void *)pCode);
+ GetFunctionPointerStack()[ind] = (CORINFO_METHOD_HANDLE)pMD;
+
+ OpStackTypeSet(ind, InterpreterType(CORINFO_TYPE_NATIVEINT));
+ m_ILCodePtr += 6;
+}
+
+void Interpreter::Sizeof()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_Sizeof]);
+#endif // INTERP_TRACING
+
+ CORINFO_CLASS_HANDLE cls = GetTypeFromToken(m_ILCodePtr + 2, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_Sizeof));
+ unsigned sz;
+ {
+ GCX_PREEMP();
+ CorInfoType cit = ::asCorInfoType(cls);
+ // For class types, the ECMA spec says to return the size of the object reference, not the referent
+ // object. Everything else should be a value type, for which we can just return the size as reported
+ // by the EE.
+ switch (cit)
+ {
+ case CORINFO_TYPE_CLASS:
+ sz = sizeof(Object*);
+ break;
+ default:
+ sz = getClassSize(cls);
+ break;
+ }
+ }
+
+ OpStackSet<UINT32>(m_curStackHt, sz);
+ OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_INT));
+ m_curStackHt++;
+ m_ILCodePtr += 6;
+}
+
+
+// static:
+bool Interpreter::s_initialized = false;
+bool Interpreter::s_compilerStaticsInitialized = false;
+size_t Interpreter::s_TypedRefSize;
+CORINFO_CLASS_HANDLE Interpreter::s_TypedRefClsHnd;
+InterpreterType Interpreter::s_TypedRefIT;
+
+// Must call GetTypedRefIT
+size_t Interpreter::GetTypedRefSize(CEEInfo* info)
+{
+ _ASSERTE_MSG(s_compilerStaticsInitialized, "Precondition");
+ return s_TypedRefSize;
+}
+
+InterpreterType Interpreter::GetTypedRefIT(CEEInfo* info)
+{
+ _ASSERTE_MSG(s_compilerStaticsInitialized, "Precondition");
+ return s_TypedRefIT;
+}
+
+CORINFO_CLASS_HANDLE Interpreter::GetTypedRefClsHnd(CEEInfo* info)
+{
+ _ASSERTE_MSG(s_compilerStaticsInitialized, "Precondition");
+ return s_TypedRefClsHnd;
+}
+
+void Interpreter::Initialize()
+{
+ assert(!s_initialized);
+
+ s_InterpretMeths.ensureInit(CLRConfig::INTERNAL_Interpret);
+ s_InterpretMethsExclude.ensureInit(CLRConfig::INTERNAL_InterpretExclude);
+ s_InterpreterUseCaching = (s_InterpreterUseCachingFlag.val(CLRConfig::INTERNAL_InterpreterUseCaching) != 0);
+ s_InterpreterLooseRules = (s_InterpreterLooseRulesFlag.val(CLRConfig::INTERNAL_InterpreterLooseRules) != 0);
+ s_InterpreterDoLoopMethods = (s_InterpreterDoLoopMethodsFlag.val(CLRConfig::INTERNAL_InterpreterDoLoopMethods) != 0);
+
+ // Initialize the lock used to protect method locks.
+ // TODO: it would be better if this were a reader/writer lock.
+ s_methodCacheLock.Init(CrstLeafLock, CRST_DEFAULT);
+
+ // Similarly, initialize the lock used to protect the map from
+ // interpreter stub addresses to their method descs.
+ s_interpStubToMDMapLock.Init(CrstLeafLock, CRST_DEFAULT);
+
+ s_initialized = true;
+
+#if INTERP_ILINSTR_PROFILE
+ SetILInstrCategories();
+#endif // INTERP_ILINSTR_PROFILE
+}
+
+void Interpreter::InitializeCompilerStatics(CEEInfo* info)
+{
+ if (!s_compilerStaticsInitialized)
+ {
+ // TODO: I believe I need no synchronization around this on x86, but I do
+ // on more permissive memory models. (Why it's OK on x86: each thread executes this
+ // before any access to the initialized static variables; if several threads do
+ // so, they perform idempotent initializing writes to the statics.
+ GCX_PREEMP();
+ s_TypedRefClsHnd = info->getBuiltinClass(CLASSID_TYPED_BYREF);
+ s_TypedRefIT = InterpreterType(info, s_TypedRefClsHnd);
+ s_TypedRefSize = getClassSize(s_TypedRefClsHnd);
+ s_compilerStaticsInitialized = true;
+ // TODO: Need store-store memory barrier here.
+ }
+}
+
+void Interpreter::Terminate()
+{
+ if (s_initialized)
+ {
+ s_methodCacheLock.Destroy();
+ s_interpStubToMDMapLock.Destroy();
+ s_initialized = false;
+ }
+}
+
+#if INTERP_ILINSTR_PROFILE
+void Interpreter::SetILInstrCategories()
+{
+ // Start with the indentity maps
+ for (unsigned short instr = 0; instr < 512; instr++) s_ILInstrCategories[instr] = instr;
+ // Now make exceptions.
+ for (unsigned instr = CEE_LDARG_0; instr <= CEE_LDARG_3; instr++) s_ILInstrCategories[instr] = CEE_LDARG;
+ s_ILInstrCategories[CEE_LDARG_S] = CEE_LDARG;
+
+ for (unsigned instr = CEE_LDLOC_0; instr <= CEE_LDLOC_3; instr++) s_ILInstrCategories[instr] = CEE_LDLOC;
+ s_ILInstrCategories[CEE_LDLOC_S] = CEE_LDLOC;
+
+ for (unsigned instr = CEE_STLOC_0; instr <= CEE_STLOC_3; instr++) s_ILInstrCategories[instr] = CEE_STLOC;
+ s_ILInstrCategories[CEE_STLOC_S] = CEE_STLOC;
+
+ s_ILInstrCategories[CEE_LDLOCA_S] = CEE_LDLOCA;
+
+ for (unsigned instr = CEE_LDC_I4_M1; instr <= CEE_LDC_I4_S; instr++) s_ILInstrCategories[instr] = CEE_LDC_I4;
+
+ for (unsigned instr = CEE_BR_S; instr <= CEE_BLT_UN; instr++) s_ILInstrCategories[instr] = CEE_BR;
+
+ for (unsigned instr = CEE_LDIND_I1; instr <= CEE_LDIND_REF; instr++) s_ILInstrCategories[instr] = CEE_LDIND_I;
+
+ for (unsigned instr = CEE_STIND_REF; instr <= CEE_STIND_R8; instr++) s_ILInstrCategories[instr] = CEE_STIND_I;
+
+ for (unsigned instr = CEE_ADD; instr <= CEE_REM_UN; instr++) s_ILInstrCategories[instr] = CEE_ADD;
+
+ for (unsigned instr = CEE_AND; instr <= CEE_NOT; instr++) s_ILInstrCategories[instr] = CEE_AND;
+
+ for (unsigned instr = CEE_CONV_I1; instr <= CEE_CONV_U8; instr++) s_ILInstrCategories[instr] = CEE_CONV_I;
+ for (unsigned instr = CEE_CONV_OVF_I1_UN; instr <= CEE_CONV_OVF_U_UN; instr++) s_ILInstrCategories[instr] = CEE_CONV_I;
+
+ for (unsigned instr = CEE_LDELEM_I1; instr <= CEE_LDELEM_REF; instr++) s_ILInstrCategories[instr] = CEE_LDELEM;
+ for (unsigned instr = CEE_STELEM_I; instr <= CEE_STELEM_REF; instr++) s_ILInstrCategories[instr] = CEE_STELEM;
+
+ for (unsigned instr = CEE_CONV_OVF_I1; instr <= CEE_CONV_OVF_U8; instr++) s_ILInstrCategories[instr] = CEE_CONV_I;
+ for (unsigned instr = CEE_CONV_U2; instr <= CEE_CONV_U1; instr++) s_ILInstrCategories[instr] = CEE_CONV_I;
+ for (unsigned instr = CEE_CONV_OVF_I; instr <= CEE_CONV_OVF_U; instr++) s_ILInstrCategories[instr] = CEE_CONV_I;
+
+ for (unsigned instr = CEE_ADD_OVF; instr <= CEE_SUB_OVF; instr++) s_ILInstrCategories[instr] = CEE_ADD_OVF;
+
+ s_ILInstrCategories[CEE_LEAVE_S] = CEE_LEAVE;
+ s_ILInstrCategories[CEE_CONV_U] = CEE_CONV_I;
+}
+#endif // INTERP_ILINSTR_PROFILE
+
+
+template<int op>
+void Interpreter::CompareOp()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 2);
+ unsigned op1idx = m_curStackHt - 2;
+ INT32 res = CompareOpRes<op>(op1idx);
+ OpStackSet<INT32>(op1idx, res);
+ OpStackTypeSet(op1idx, InterpreterType(CORINFO_TYPE_INT));
+ m_curStackHt--;
+}
+
+template<int op>
+INT32 Interpreter::CompareOpRes(unsigned op1idx)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= op1idx + 2);
+ unsigned op2idx = op1idx + 1;
+ InterpreterType t1 = OpStackTypeGet(op1idx);
+ CorInfoType cit1 = t1.ToCorInfoType();
+ assert(IsStackNormalType(cit1));
+ InterpreterType t2 = OpStackTypeGet(op2idx);
+ CorInfoType cit2 = t2.ToCorInfoType();
+ assert(IsStackNormalType(cit2));
+ INT32 res = 0;
+
+ switch (cit1)
+ {
+ case CORINFO_TYPE_INT:
+ if (cit2 == CORINFO_TYPE_INT)
+ {
+ INT32 val1 = OpStackGet<INT32>(op1idx);
+ INT32 val2 = OpStackGet<INT32>(op2idx);
+ if (op == CO_EQ)
+ {
+ if (val1 == val2) res = 1;
+ }
+ else if (op == CO_GT)
+ {
+ if (val1 > val2) res = 1;
+ }
+ else if (op == CO_GT_UN)
+ {
+ if (static_cast<UINT32>(val1) > static_cast<UINT32>(val2)) res = 1;
+ }
+ else if (op == CO_LT)
+ {
+ if (val1 < val2) res = 1;
+ }
+ else
+ {
+ assert(op == CO_LT_UN);
+ if (static_cast<UINT32>(val1) < static_cast<UINT32>(val2)) res = 1;
+ }
+ }
+ else if (cit2 == CORINFO_TYPE_NATIVEINT ||
+ (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_BYREF) ||
+ (cit2 == CORINFO_TYPE_VALUECLASS
+ && CorInfoTypeStackNormalize(GetTypeForPrimitiveValueClass(t2.ToClassHandle())) == CORINFO_TYPE_NATIVEINT))
+ {
+ NativeInt val1 = OpStackGet<NativeInt>(op1idx);
+ NativeInt val2 = OpStackGet<NativeInt>(op2idx);
+ if (op == CO_EQ)
+ {
+ if (val1 == val2) res = 1;
+ }
+ else if (op == CO_GT)
+ {
+ if (val1 > val2) res = 1;
+ }
+ else if (op == CO_GT_UN)
+ {
+ if (static_cast<NativeUInt>(val1) > static_cast<NativeUInt>(val2)) res = 1;
+ }
+ else if (op == CO_LT)
+ {
+ if (val1 < val2) res = 1;
+ }
+ else
+ {
+ assert(op == CO_LT_UN);
+ if (static_cast<NativeUInt>(val1) < static_cast<NativeUInt>(val2)) res = 1;
+ }
+ }
+ else if (cit2 == CORINFO_TYPE_VALUECLASS)
+ {
+ cit2 = GetTypeForPrimitiveValueClass(t2.ToClassHandle());
+ INT32 val1 = OpStackGet<INT32>(op1idx);
+ INT32 val2 = 0;
+ if (CorInfoTypeStackNormalize(cit2) == CORINFO_TYPE_INT)
+ {
+
+ size_t sz = t2.Size(&m_interpCeeInfo);
+ switch (sz)
+ {
+ case 1:
+ if (CorInfoTypeIsUnsigned(cit2))
+ {
+ val2 = OpStackGet<UINT8>(op2idx);
+ }
+ else
+ {
+ val2 = OpStackGet<INT8>(op2idx);
+ }
+ break;
+ case 2:
+ if (CorInfoTypeIsUnsigned(cit2))
+ {
+ val2 = OpStackGet<UINT16>(op2idx);
+ }
+ else
+ {
+ val2 = OpStackGet<INT16>(op2idx);
+ }
+ break;
+ case 4:
+ val2 = OpStackGet<INT32>(op2idx);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ else
+ {
+ VerificationError("Can't compare with struct type.");
+ }
+ if (op == CO_EQ)
+ {
+ if (val1 == val2) res = 1;
+ }
+ else if (op == CO_GT)
+ {
+ if (val1 > val2) res = 1;
+ }
+ else if (op == CO_GT_UN)
+ {
+ if (static_cast<UINT32>(val1) > static_cast<UINT32>(val2)) res = 1;
+ }
+ else if (op == CO_LT)
+ {
+ if (val1 < val2) res = 1;
+ }
+ else
+ {
+ assert(op == CO_LT_UN);
+ if (static_cast<UINT32>(val1) < static_cast<UINT32>(val2)) res = 1;
+ }
+ }
+ else
+ {
+ VerificationError("Binary comparision operation: type mismatch.");
+ }
+ break;
+ case CORINFO_TYPE_NATIVEINT:
+ if (cit2 == CORINFO_TYPE_NATIVEINT || cit2 == CORINFO_TYPE_INT
+ || (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_LONG)
+ || (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_BYREF)
+ || (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_CLASS && OpStackGet<void*>(op2idx) == 0))
+ {
+ NativeInt val1 = OpStackGet<NativeInt>(op1idx);
+ NativeInt val2;
+ if (cit2 == CORINFO_TYPE_NATIVEINT)
+ {
+ val2 = OpStackGet<NativeInt>(op2idx);
+ }
+ else if (cit2 == CORINFO_TYPE_INT)
+ {
+ val2 = static_cast<NativeInt>(OpStackGet<INT32>(op2idx));
+ }
+ else if (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_LONG)
+ {
+ val2 = static_cast<NativeInt>(OpStackGet<INT64>(op2idx));
+ }
+ else if (cit2 == CORINFO_TYPE_CLASS)
+ {
+ assert(OpStackGet<void*>(op2idx) == 0);
+ val2 = 0;
+ }
+ else
+ {
+ assert(s_InterpreterLooseRules && cit2 == CORINFO_TYPE_BYREF);
+ val2 = reinterpret_cast<NativeInt>(OpStackGet<void*>(op2idx));
+ }
+ if (op == CO_EQ)
+ {
+ if (val1 == val2) res = 1;
+ }
+ else if (op == CO_GT)
+ {
+ if (val1 > val2) res = 1;
+ }
+ else if (op == CO_GT_UN)
+ {
+ if (static_cast<NativeUInt>(val1) > static_cast<NativeUInt>(val2)) res = 1;
+ }
+ else if (op == CO_LT)
+ {
+ if (val1 < val2) res = 1;
+ }
+ else
+ {
+ assert(op == CO_LT_UN);
+ if (static_cast<NativeUInt>(val1) < static_cast<NativeUInt>(val2)) res = 1;
+ }
+ }
+ else
+ {
+ VerificationError("Binary comparision operation: type mismatch.");
+ }
+ break;
+ case CORINFO_TYPE_LONG:
+ {
+ bool looseLong = false;
+#if defined(_AMD64_)
+ looseLong = s_InterpreterLooseRules && (cit2 == CORINFO_TYPE_NATIVEINT || cit2 == CORINFO_TYPE_BYREF);
+#endif
+ if (cit2 == CORINFO_TYPE_LONG || looseLong)
+ {
+ INT64 val1 = OpStackGet<INT64>(op1idx);
+ INT64 val2 = OpStackGet<INT64>(op2idx);
+ if (op == CO_EQ)
+ {
+ if (val1 == val2) res = 1;
+ }
+ else if (op == CO_GT)
+ {
+ if (val1 > val2) res = 1;
+ }
+ else if (op == CO_GT_UN)
+ {
+ if (static_cast<UINT64>(val1) > static_cast<UINT64>(val2)) res = 1;
+ }
+ else if (op == CO_LT)
+ {
+ if (val1 < val2) res = 1;
+ }
+ else
+ {
+ assert(op == CO_LT_UN);
+ if (static_cast<UINT64>(val1) < static_cast<UINT64>(val2)) res = 1;
+ }
+ }
+ else
+ {
+ VerificationError("Binary comparision operation: type mismatch.");
+ }
+ }
+ break;
+
+ case CORINFO_TYPE_CLASS:
+ case CORINFO_TYPE_STRING:
+ if (cit2 == CORINFO_TYPE_CLASS || cit2 == CORINFO_TYPE_STRING)
+ {
+ GCX_FORBID();
+ Object* val1 = OpStackGet<Object*>(op1idx);
+ Object* val2 = OpStackGet<Object*>(op2idx);
+ if (op == CO_EQ)
+ {
+ if (val1 == val2) res = 1;
+ }
+ else if (op == CO_GT_UN)
+ {
+ if (val1 != val2) res = 1;
+ }
+ else
+ {
+ VerificationError("Binary comparision operation: type mismatch.");
+ }
+ }
+ else
+ {
+ VerificationError("Binary comparision operation: type mismatch.");
+ }
+ break;
+
+
+ case CORINFO_TYPE_FLOAT:
+ {
+ bool isDouble = (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_DOUBLE);
+ if (cit2 == CORINFO_TYPE_FLOAT || isDouble)
+ {
+ float val1 = OpStackGet<float>(op1idx);
+ float val2 = (isDouble) ? (float) OpStackGet<double>(op2idx) : OpStackGet<float>(op2idx);
+ if (op == CO_EQ)
+ {
+ // I'm assuming IEEE math here, so that if at least one is a NAN, the comparison will fail...
+ if (val1 == val2) res = 1;
+ }
+ else if (op == CO_GT)
+ {
+ // I'm assuming that C++ arithmetic does the right thing here with infinities and NANs.
+ if (val1 > val2) res = 1;
+ }
+ else if (op == CO_GT_UN)
+ {
+ // Check for NAN's here: if either is a NAN, they're unordered, so this comparison returns true.
+ if (_isnan(val1) || _isnan(val2)) res = 1;
+ else if (val1 > val2) res = 1;
+ }
+ else if (op == CO_LT)
+ {
+ if (val1 < val2) res = 1;
+ }
+ else
+ {
+ assert(op == CO_LT_UN);
+ // Check for NAN's here: if either is a NAN, they're unordered, so this comparison returns true.
+ if (_isnan(val1) || _isnan(val2)) res = 1;
+ else if (val1 < val2) res = 1;
+ }
+ }
+ else
+ {
+ VerificationError("Binary comparision operation: type mismatch.");
+ }
+ }
+ break;
+
+ case CORINFO_TYPE_DOUBLE:
+ {
+ bool isFloat = (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_FLOAT);
+ if (cit2 == CORINFO_TYPE_DOUBLE || isFloat)
+ {
+ double val1 = OpStackGet<double>(op1idx);
+ double val2 = (isFloat) ? (double) OpStackGet<float>(op2idx) : OpStackGet<double>(op2idx);
+ if (op == CO_EQ)
+ {
+ // I'm assuming IEEE math here, so that if at least one is a NAN, the comparison will fail...
+ if (val1 == val2) res = 1;
+ }
+ else if (op == CO_GT)
+ {
+ // I'm assuming that C++ arithmetic does the right thing here with infinities and NANs.
+ if (val1 > val2) res = 1;
+ }
+ else if (op == CO_GT_UN)
+ {
+ // Check for NAN's here: if either is a NAN, they're unordered, so this comparison returns true.
+ if (_isnan(val1) || _isnan(val2)) res = 1;
+ else if (val1 > val2) res = 1;
+ }
+ else if (op == CO_LT)
+ {
+ if (val1 < val2) res = 1;
+ }
+ else
+ {
+ assert(op == CO_LT_UN);
+ // Check for NAN's here: if either is a NAN, they're unordered, so this comparison returns true.
+ if (_isnan(val1) || _isnan(val2)) res = 1;
+ else if (val1 < val2) res = 1;
+ }
+ }
+ else
+ {
+ VerificationError("Binary comparision operation: type mismatch.");
+ }
+ }
+ break;
+
+ case CORINFO_TYPE_BYREF:
+ if (cit2 == CORINFO_TYPE_BYREF || (s_InterpreterLooseRules && cit2 == CORINFO_TYPE_NATIVEINT))
+ {
+ NativeInt val1 = reinterpret_cast<NativeInt>(OpStackGet<void*>(op1idx));
+ NativeInt val2;
+ if (cit2 == CORINFO_TYPE_BYREF)
+ {
+ val2 = reinterpret_cast<NativeInt>(OpStackGet<void*>(op2idx));
+ }
+ else
+ {
+ assert(s_InterpreterLooseRules && cit2 == CORINFO_TYPE_NATIVEINT);
+ val2 = OpStackGet<NativeInt>(op2idx);
+ }
+ if (op == CO_EQ)
+ {
+ if (val1 == val2) res = 1;
+ }
+ else if (op == CO_GT)
+ {
+ if (val1 > val2) res = 1;
+ }
+ else if (op == CO_GT_UN)
+ {
+ if (static_cast<NativeUInt>(val1) > static_cast<NativeUInt>(val2)) res = 1;
+ }
+ else if (op == CO_LT)
+ {
+ if (val1 < val2) res = 1;
+ }
+ else
+ {
+ assert(op == CO_LT_UN);
+ if (static_cast<NativeUInt>(val1) < static_cast<NativeUInt>(val2)) res = 1;
+ }
+ }
+ else
+ {
+ VerificationError("Binary comparision operation: type mismatch.");
+ }
+ break;
+
+ case CORINFO_TYPE_VALUECLASS:
+ {
+ CorInfoType newCit1 = GetTypeForPrimitiveValueClass(t1.ToClassHandle());
+ if (newCit1 == CORINFO_TYPE_UNDEF)
+ {
+ VerificationError("Can't compare a value class.");
+ }
+ else
+ {
+ NYI_INTERP("Must eliminate 'punning' value classes from the ostack.");
+ }
+ }
+ break;
+
+ default:
+ assert(false); // Should not be here if the type is stack-normal.
+ }
+
+ return res;
+}
+
+template<bool val, int targetLen>
+void Interpreter::BrOnValue()
+{
+ assert(targetLen == 1 || targetLen == 4);
+ assert(m_curStackHt > 0);
+ unsigned stackInd = m_curStackHt - 1;
+ InterpreterType it = OpStackTypeGet(stackInd);
+
+ // It shouldn't be a value class, unless it's a punning name for a primitive integral type.
+ if (it.ToCorInfoType() == CORINFO_TYPE_VALUECLASS)
+ {
+ GCX_PREEMP();
+ CorInfoType cit = m_interpCeeInfo.getTypeForPrimitiveValueClass(it.ToClassHandle());
+ if (CorInfoTypeIsIntegral(cit))
+ {
+ it = InterpreterType(cit);
+ }
+ else
+ {
+ VerificationError("Can't branch on the value of a value type that is not a primitive type.");
+ }
+ }
+
+#ifdef _DEBUG
+ switch (it.ToCorInfoType())
+ {
+ case CORINFO_TYPE_FLOAT:
+ case CORINFO_TYPE_DOUBLE:
+ VerificationError("Can't branch on the value of a float or double.");
+ break;
+ default:
+ break;
+ }
+#endif // _DEBUG
+
+ switch (it.SizeNotStruct())
+ {
+ case 4:
+ {
+ INT32 branchVal = OpStackGet<INT32>(stackInd);
+ BrOnValueTakeBranch((branchVal != 0) == val, targetLen);
+ }
+ break;
+ case 8:
+ {
+ INT64 branchVal = OpStackGet<INT64>(stackInd);
+ BrOnValueTakeBranch((branchVal != 0) == val, targetLen);
+ }
+ break;
+
+ // The value-class case handled above makes sizes 1 and 2 possible.
+ case 1:
+ {
+ INT8 branchVal = OpStackGet<INT8>(stackInd);
+ BrOnValueTakeBranch((branchVal != 0) == val, targetLen);
+ }
+ break;
+ case 2:
+ {
+ INT16 branchVal = OpStackGet<INT16>(stackInd);
+ BrOnValueTakeBranch((branchVal != 0) == val, targetLen);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ m_curStackHt = stackInd;
+}
+
+// compOp is a member of the BranchComparisonOp enumeration.
+template<int compOp, bool reverse, int targetLen>
+void Interpreter::BrOnComparison()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(targetLen == 1 || targetLen == 4);
+ assert(m_curStackHt >= 2);
+ unsigned v1Ind = m_curStackHt - 2;
+
+ INT32 res = CompareOpRes<compOp>(v1Ind);
+ if (reverse)
+ {
+ res = (res == 0) ? 1 : 0;
+ }
+
+ if (res)
+ {
+ int offset;
+ if (targetLen == 1)
+ {
+ // BYTE is unsigned...
+ offset = getI1(m_ILCodePtr + 1);
+ }
+ else
+ {
+ offset = getI4LittleEndian(m_ILCodePtr + 1);
+ }
+ // 1 is the size of the current instruction; offset is relative to start of next.
+ if (offset < 0)
+ {
+ // Backwards branch; enable caching.
+ BackwardsBranchActions(offset);
+ }
+ ExecuteBranch(m_ILCodePtr + 1 + targetLen + offset);
+ }
+ else
+ {
+ m_ILCodePtr += targetLen + 1;
+ }
+ m_curStackHt -= 2;
+}
+
+void Interpreter::LdFld(FieldDesc* fldIn)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ BarrierIfVolatile();
+
+ FieldDesc* fld = fldIn;
+ CORINFO_CLASS_HANDLE valClsHnd = NULL;
+ DWORD fldOffset;
+ {
+ GCX_PREEMP();
+ unsigned ilOffset = CurOffset();
+ if (fld == NULL && s_InterpreterUseCaching)
+ {
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdFld]);
+#endif // INTERP_TRACING
+ fld = GetCachedInstanceField(ilOffset);
+ }
+ if (fld == NULL)
+ {
+ unsigned tok = getU4LittleEndian(m_ILCodePtr + sizeof(byte));
+ fld = FindField(tok InterpTracingArg(RTK_LdFld));
+ assert(fld != NULL);
+
+ fldOffset = fld->GetOffset();
+ if (s_InterpreterUseCaching && fldOffset < FIELD_OFFSET_LAST_REAL_OFFSET)
+ CacheInstanceField(ilOffset, fld);
+ }
+ else
+ {
+ fldOffset = fld->GetOffset();
+ }
+ }
+ CorInfoType valCit = CEEInfo::asCorInfoType(fld->GetFieldType());
+
+ // If "fldIn" is non-NULL, it's not a "real" LdFld -- the caller should handle updating the instruction pointer.
+ if (fldIn == NULL)
+ m_ILCodePtr += 5; // Last use above, so update now.
+
+ // We need to construct the interpreter type for a struct type before we try to do coordinated
+ // pushes of the value and type on the opstacks -- these must be atomic wrt GC, and constructing
+ // a struct InterpreterType transitions to preemptive mode.
+ InterpreterType structValIT;
+ if (valCit == CORINFO_TYPE_VALUECLASS)
+ {
+ GCX_PREEMP();
+ valCit = m_interpCeeInfo.getFieldType(CORINFO_FIELD_HANDLE(fld), &valClsHnd);
+ structValIT = InterpreterType(&m_interpCeeInfo, valClsHnd);
+ }
+
+ UINT sz = fld->GetSize();
+
+ // Live vars: valCit, structValIt
+ assert(m_curStackHt > 0);
+ unsigned stackInd = m_curStackHt - 1;
+ InterpreterType addrIt = OpStackTypeGet(stackInd);
+ CorInfoType addrCit = addrIt.ToCorInfoType();
+ bool isUnsigned;
+
+ if (addrCit == CORINFO_TYPE_CLASS)
+ {
+ OBJECTREF obj = OBJECTREF(OpStackGet<Object*>(stackInd));
+ ThrowOnInvalidPointer(OBJECTREFToObject(obj));
+#ifdef FEATURE_REMOTING
+ if (obj->IsTransparentProxy())
+ {
+ NYI_INTERP("Thunking objects not supported");
+ }
+#endif
+ if (valCit == CORINFO_TYPE_VALUECLASS)
+ {
+ void* srcPtr = fld->GetInstanceAddress(obj);
+
+ // srcPtr is now vulnerable.
+ GCX_FORBID();
+
+ MethodTable* valClsMT = GetMethodTableFromClsHnd(valClsHnd);
+ if (sz > sizeof(INT64))
+ {
+ // Large struct case: allocate space on the large struct operand stack.
+ void* destPtr = LargeStructOperandStackPush(sz);
+ OpStackSet<void*>(stackInd, destPtr);
+ CopyValueClass(destPtr, srcPtr, valClsMT, obj->GetAppDomain());
+ }
+ else
+ {
+ // Small struct case -- is inline in operand stack.
+ OpStackSet<INT64>(stackInd, GetSmallStructValue(srcPtr, sz));
+ }
+ }
+ else
+ {
+ BYTE* fldStart = dac_cast<PTR_BYTE>(OBJECTREFToObject(obj)) + sizeof(Object) + fldOffset;
+ // fldStart is now a vulnerable byref
+ GCX_FORBID();
+
+ switch (sz)
+ {
+ case 1:
+ isUnsigned = CorInfoTypeIsUnsigned(valCit);
+ if (isUnsigned)
+ {
+ OpStackSet<UINT32>(stackInd, *reinterpret_cast<UINT8*>(fldStart));
+ }
+ else
+ {
+ OpStackSet<INT32>(stackInd, *reinterpret_cast<INT8*>(fldStart));
+ }
+ break;
+ case 2:
+ isUnsigned = CorInfoTypeIsUnsigned(valCit);
+ if (isUnsigned)
+ {
+ OpStackSet<UINT32>(stackInd, *reinterpret_cast<UINT16*>(fldStart));
+ }
+ else
+ {
+ OpStackSet<INT32>(stackInd, *reinterpret_cast<INT16*>(fldStart));
+ }
+ break;
+ case 4:
+ OpStackSet<INT32>(stackInd, *reinterpret_cast<INT32*>(fldStart));
+ break;
+ case 8:
+ OpStackSet<INT64>(stackInd, *reinterpret_cast<INT64*>(fldStart));
+ break;
+ default:
+ _ASSERTE_MSG(false, "Should not reach here.");
+ break;
+ }
+ }
+ }
+ else
+ {
+ INT8* ptr = NULL;
+ if (addrCit == CORINFO_TYPE_VALUECLASS)
+ {
+ size_t addrSize = addrIt.Size(&m_interpCeeInfo);
+ // The ECMA spec allows ldfld to be applied to "an instance of a value type."
+ // We will take the address of the ostack entry.
+ if (addrIt.IsLargeStruct(&m_interpCeeInfo))
+ {
+ ptr = reinterpret_cast<INT8*>(OpStackGet<void*>(stackInd));
+ // This is delicate. I'm going to pop the large struct off the large-struct stack
+ // now, even though the field value we push may go back on the large object stack.
+ // We rely on the fact that this instruction doesn't do any other pushing, and
+ // we assume that LargeStructOperandStackPop does not actually deallocate any memory,
+ // and we rely on memcpy properly handling possibly-overlapping regions being copied.
+ // Finally (wow, this really *is* delicate), we rely on the property that the large-struct
+ // stack pop operation doesn't deallocate memory (the size of the allocated memory for the
+ // large-struct stack only grows in a method execution), and that if we push the field value
+ // on the large struct stack below, the size of the pushed item is at most the size of the
+ // popped item, so the stack won't grow (which would allow a dealloc/realloc).
+ // (All in all, maybe it would be better to just copy the value elsewhere then pop...but
+ // that wouldn't be very aggressive.)
+ LargeStructOperandStackPop(addrSize, ptr);
+ }
+ else
+ {
+ ptr = reinterpret_cast<INT8*>(OpStackGetAddr(stackInd, addrSize));
+ }
+ }
+ else
+ {
+ assert(CorInfoTypeIsPointer(addrCit));
+ ptr = OpStackGet<INT8*>(stackInd);
+ ThrowOnInvalidPointer(ptr);
+ }
+
+ assert(ptr != NULL);
+ ptr += fldOffset;
+
+ if (valCit == CORINFO_TYPE_VALUECLASS)
+ {
+ if (sz > sizeof(INT64))
+ {
+ // Large struct case.
+ void* dstPtr = LargeStructOperandStackPush(sz);
+ memcpy(dstPtr, ptr, sz);
+ OpStackSet<void*>(stackInd, dstPtr);
+ }
+ else
+ {
+ // Small struct case -- is inline in operand stack.
+ OpStackSet<INT64>(stackInd, GetSmallStructValue(ptr, sz));
+ }
+ OpStackTypeSet(stackInd, structValIT.StackNormalize());
+ return;
+ }
+ // Otherwise...
+ switch (sz)
+ {
+ case 1:
+ isUnsigned = CorInfoTypeIsUnsigned(valCit);
+ if (isUnsigned)
+ {
+ OpStackSet<UINT32>(stackInd, *reinterpret_cast<UINT8*>(ptr));
+ }
+ else
+ {
+ OpStackSet<INT32>(stackInd, *reinterpret_cast<INT8*>(ptr));
+ }
+ break;
+ case 2:
+ isUnsigned = CorInfoTypeIsUnsigned(valCit);
+ if (isUnsigned)
+ {
+ OpStackSet<UINT32>(stackInd, *reinterpret_cast<UINT16*>(ptr));
+ }
+ else
+ {
+ OpStackSet<INT32>(stackInd, *reinterpret_cast<INT16*>(ptr));
+ }
+ break;
+ case 4:
+ OpStackSet<INT32>(stackInd, *reinterpret_cast<INT32*>(ptr));
+ break;
+ case 8:
+ OpStackSet<INT64>(stackInd, *reinterpret_cast<INT64*>(ptr));
+ break;
+ }
+ }
+ if (valCit == CORINFO_TYPE_VALUECLASS)
+ {
+ OpStackTypeSet(stackInd, structValIT.StackNormalize());
+ }
+ else
+ {
+ OpStackTypeSet(stackInd, InterpreterType(valCit).StackNormalize());
+ }
+}
+
+void Interpreter::LdFldA()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ unsigned tok = getU4LittleEndian(m_ILCodePtr + sizeof(byte));
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdFldA]);
+#endif // INTERP_TRACING
+
+ unsigned offset = CurOffset();
+ m_ILCodePtr += 5; // Last use above, so update now.
+
+ FieldDesc* fld = NULL;
+ if (s_InterpreterUseCaching) fld = GetCachedInstanceField(offset);
+ if (fld == NULL)
+ {
+ GCX_PREEMP();
+ fld = FindField(tok InterpTracingArg(RTK_LdFldA));
+ if (s_InterpreterUseCaching) CacheInstanceField(offset, fld);
+ }
+ assert(m_curStackHt > 0);
+ unsigned stackInd = m_curStackHt - 1;
+ CorInfoType addrCit = OpStackTypeGet(stackInd).ToCorInfoType();
+ if (addrCit == CORINFO_TYPE_BYREF || addrCit == CORINFO_TYPE_CLASS || addrCit == CORINFO_TYPE_NATIVEINT)
+ {
+ NativeInt ptr = OpStackGet<NativeInt>(stackInd);
+ ThrowOnInvalidPointer((void*)ptr);
+ // The "offset" below does not include the Object (i.e., the MethodTable pointer) for object pointers, so add that in first.
+ if (addrCit == CORINFO_TYPE_CLASS) ptr += sizeof(Object);
+ // Now add the offset.
+ ptr += fld->GetOffset();
+ OpStackSet<NativeInt>(stackInd, ptr);
+ if (addrCit == CORINFO_TYPE_NATIVEINT)
+ {
+ OpStackTypeSet(stackInd, InterpreterType(CORINFO_TYPE_NATIVEINT));
+ }
+ else
+ {
+ OpStackTypeSet(stackInd, InterpreterType(CORINFO_TYPE_BYREF));
+ }
+ }
+ else
+ {
+ VerificationError("LdfldA requires object reference, managed or unmanaged pointer type.");
+ }
+}
+
+void Interpreter::StFld()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_StFld]);
+#endif // INTERP_TRACING
+
+ FieldDesc* fld = NULL;
+ DWORD fldOffset;
+ {
+ unsigned ilOffset = CurOffset();
+ if (s_InterpreterUseCaching) fld = GetCachedInstanceField(ilOffset);
+ if (fld == NULL)
+ {
+ unsigned tok = getU4LittleEndian(m_ILCodePtr + sizeof(byte));
+ GCX_PREEMP();
+ fld = FindField(tok InterpTracingArg(RTK_StFld));
+ assert(fld != NULL);
+ fldOffset = fld->GetOffset();
+ if (s_InterpreterUseCaching && fldOffset < FIELD_OFFSET_LAST_REAL_OFFSET)
+ CacheInstanceField(ilOffset, fld);
+ }
+ else
+ {
+ fldOffset = fld->GetOffset();
+ }
+ }
+ m_ILCodePtr += 5; // Last use above, so update now.
+
+ UINT sz = fld->GetSize();
+ assert(m_curStackHt >= 2);
+ unsigned addrInd = m_curStackHt - 2;
+ CorInfoType addrCit = OpStackTypeGet(addrInd).ToCorInfoType();
+ unsigned valInd = m_curStackHt - 1;
+ CorInfoType valCit = OpStackTypeGet(valInd).ToCorInfoType();
+ assert(IsStackNormalType(addrCit) && IsStackNormalType(valCit));
+
+ m_curStackHt -= 2;
+
+ if (addrCit == CORINFO_TYPE_CLASS)
+ {
+ OBJECTREF obj = OBJECTREF(OpStackGet<Object*>(addrInd));
+ ThrowOnInvalidPointer(OBJECTREFToObject(obj));
+
+ if (valCit == CORINFO_TYPE_CLASS)
+ {
+ fld->SetRefValue(obj, ObjectToOBJECTREF(OpStackGet<Object*>(valInd)));
+ }
+ else if (valCit == CORINFO_TYPE_VALUECLASS)
+ {
+ MethodTable* valClsMT = GetMethodTableFromClsHnd(OpStackTypeGet(valInd).ToClassHandle());
+ void* destPtr = fld->GetInstanceAddress(obj);
+
+ // destPtr is now a vulnerable byref, so can't do GC.
+ GCX_FORBID();
+
+ // I use GCSafeMemCpy below to ensure that write barriers happen for the case in which
+ // the value class contains GC pointers. We could do better...
+ if (sz > sizeof(INT64))
+ {
+ // Large struct case: stack slot contains pointer...
+ void* srcPtr = OpStackGet<void*>(valInd);
+ CopyValueClassUnchecked(destPtr, srcPtr, valClsMT);
+ LargeStructOperandStackPop(sz, srcPtr);
+ }
+ else
+ {
+ // Small struct case -- is inline in operand stack.
+ CopyValueClassUnchecked(destPtr, OpStackGetAddr(valInd, sz), valClsMT);
+ }
+ BarrierIfVolatile();
+ return;
+ }
+ else
+ {
+#ifdef _DEBUG
+ if (obj->IsTransparentProxy()) NYI_INTERP("Stores to thunking objects.");
+#endif
+ BYTE* fldStart = dac_cast<PTR_BYTE>(OBJECTREFToObject(obj)) + sizeof(Object) + fldOffset;
+ // fldStart is now a vulnerable byref
+ GCX_FORBID();
+
+ switch (sz)
+ {
+ case 1:
+ *reinterpret_cast<INT8*>(fldStart) = OpStackGet<INT8>(valInd);
+ break;
+ case 2:
+ *reinterpret_cast<INT16*>(fldStart) = OpStackGet<INT16>(valInd);
+ break;
+ case 4:
+ *reinterpret_cast<INT32*>(fldStart) = OpStackGet<INT32>(valInd);
+ break;
+ case 8:
+ *reinterpret_cast<INT64*>(fldStart) = OpStackGet<INT64>(valInd);
+ break;
+ }
+ }
+ }
+ else
+ {
+ assert(addrCit == CORINFO_TYPE_BYREF || addrCit == CORINFO_TYPE_NATIVEINT);
+
+ INT8* destPtr = OpStackGet<INT8*>(addrInd);
+ ThrowOnInvalidPointer(destPtr);
+ destPtr += fldOffset;
+
+ if (valCit == CORINFO_TYPE_VALUECLASS)
+ {
+ MethodTable* valClsMT = GetMethodTableFromClsHnd(OpStackTypeGet(valInd).ToClassHandle());
+ // I use GCSafeMemCpy below to ensure that write barriers happen for the case in which
+ // the value class contains GC pointers. We could do better...
+ if (sz > sizeof(INT64))
+ {
+ // Large struct case: stack slot contains pointer...
+ void* srcPtr = OpStackGet<void*>(valInd);
+ CopyValueClassUnchecked(destPtr, srcPtr, valClsMT);
+ LargeStructOperandStackPop(sz, srcPtr);
+ }
+ else
+ {
+ // Small struct case -- is inline in operand stack.
+ CopyValueClassUnchecked(destPtr, OpStackGetAddr(valInd, sz), valClsMT);
+ }
+ BarrierIfVolatile();
+ return;
+ }
+ else if (valCit == CORINFO_TYPE_CLASS)
+ {
+ OBJECTREF val = ObjectToOBJECTREF(OpStackGet<Object*>(valInd));
+ SetObjectReferenceUnchecked(reinterpret_cast<OBJECTREF*>(destPtr), val);
+ }
+ else
+ {
+ switch (sz)
+ {
+ case 1:
+ *reinterpret_cast<INT8*>(destPtr) = OpStackGet<INT8>(valInd);
+ break;
+ case 2:
+ *reinterpret_cast<INT16*>(destPtr) = OpStackGet<INT16>(valInd);
+ break;
+ case 4:
+ *reinterpret_cast<INT32*>(destPtr) = OpStackGet<INT32>(valInd);
+ break;
+ case 8:
+ *reinterpret_cast<INT64*>(destPtr) = OpStackGet<INT64>(valInd);
+ break;
+ }
+ }
+ }
+ BarrierIfVolatile();
+}
+
+bool Interpreter::StaticFldAddrWork(CORINFO_ACCESS_FLAGS accessFlgs, /*out (byref)*/void** pStaticFieldAddr, /*out*/InterpreterType* pit, /*out*/UINT* pFldSize, /*out*/bool* pManagedMem)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ bool isCacheable = true;
+ *pManagedMem = true; // Default result.
+
+ unsigned tok = getU4LittleEndian(m_ILCodePtr + sizeof(byte));
+ m_ILCodePtr += 5; // Above is last use of m_ILCodePtr in this method, so update now.
+
+ FieldDesc* fld;
+ CORINFO_FIELD_INFO fldInfo;
+ CORINFO_RESOLVED_TOKEN fldTok;
+
+ void* pFldAddr = NULL;
+ {
+ {
+ GCX_PREEMP();
+
+ ResolveToken(&fldTok, tok, CORINFO_TOKENKIND_Field InterpTracingArg(RTK_SFldAddr));
+ fld = reinterpret_cast<FieldDesc*>(fldTok.hField);
+
+ m_interpCeeInfo.getFieldInfo(&fldTok, m_methInfo->m_method, accessFlgs, &fldInfo);
+ }
+
+ EnsureClassInit(GetMethodTableFromClsHnd(fldTok.hClass));
+
+ if (fldInfo.fieldAccessor == CORINFO_FIELD_STATIC_TLS)
+ {
+ NYI_INTERP("Thread-local static.");
+ }
+ else if (fldInfo.fieldAccessor == CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER
+ || fldInfo.fieldAccessor == CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER)
+ {
+ *pStaticFieldAddr = fld->GetCurrentStaticAddress();
+ isCacheable = false;
+ }
+ else
+ {
+ *pStaticFieldAddr = fld->GetCurrentStaticAddress();
+ }
+ }
+ if (fldInfo.structType != NULL && fldInfo.fieldType != CORINFO_TYPE_CLASS && fldInfo.fieldType != CORINFO_TYPE_PTR)
+ {
+ *pit = InterpreterType(&m_interpCeeInfo, fldInfo.structType);
+
+ if ((fldInfo.fieldFlags & CORINFO_FLG_FIELD_UNMANAGED) == 0)
+ {
+ // For valuetypes in managed memory, the address returned contains a pointer into the heap, to a boxed version of the
+ // static variable; return a pointer to the boxed struct.
+ isCacheable = false;
+ }
+ else
+ {
+ *pManagedMem = false;
+ }
+ }
+ else
+ {
+ *pit = InterpreterType(fldInfo.fieldType);
+ }
+ *pFldSize = fld->GetSize();
+
+ return isCacheable;
+}
+
+void Interpreter::LdSFld()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ InterpreterType fldIt;
+ UINT sz;
+ bool managedMem;
+ void* srcPtr = NULL;
+
+ BarrierIfVolatile();
+
+ GCPROTECT_BEGININTERIOR(srcPtr);
+
+ StaticFldAddr(CORINFO_ACCESS_GET, &srcPtr, &fldIt, &sz, &managedMem);
+
+ bool isUnsigned;
+
+ if (fldIt.IsStruct())
+ {
+ // Large struct case.
+ CORINFO_CLASS_HANDLE sh = fldIt.ToClassHandle();
+ // This call is GC_TRIGGERS, so do it before we copy the value: no GC after this,
+ // until the op stacks and ht are consistent.
+ OpStackTypeSet(m_curStackHt, InterpreterType(&m_interpCeeInfo, sh).StackNormalize());
+ if (fldIt.IsLargeStruct(&m_interpCeeInfo))
+ {
+ void* dstPtr = LargeStructOperandStackPush(sz);
+ memcpy(dstPtr, srcPtr, sz);
+ OpStackSet<void*>(m_curStackHt, dstPtr);
+ }
+ else
+ {
+ OpStackSet<INT64>(m_curStackHt, GetSmallStructValue(srcPtr, sz));
+ }
+ }
+ else
+ {
+ CorInfoType valCit = fldIt.ToCorInfoType();
+ switch (sz)
+ {
+ case 1:
+ isUnsigned = CorInfoTypeIsUnsigned(valCit);
+ if (isUnsigned)
+ {
+ OpStackSet<UINT32>(m_curStackHt, *reinterpret_cast<UINT8*>(srcPtr));
+ }
+ else
+ {
+ OpStackSet<INT32>(m_curStackHt, *reinterpret_cast<INT8*>(srcPtr));
+ }
+ break;
+ case 2:
+ isUnsigned = CorInfoTypeIsUnsigned(valCit);
+ if (isUnsigned)
+ {
+ OpStackSet<UINT32>(m_curStackHt, *reinterpret_cast<UINT16*>(srcPtr));
+ }
+ else
+ {
+ OpStackSet<INT32>(m_curStackHt, *reinterpret_cast<INT16*>(srcPtr));
+ }
+ break;
+ case 4:
+ OpStackSet<INT32>(m_curStackHt, *reinterpret_cast<INT32*>(srcPtr));
+ break;
+ case 8:
+ OpStackSet<INT64>(m_curStackHt, *reinterpret_cast<INT64*>(srcPtr));
+ break;
+ default:
+ _ASSERTE_MSG(false, "LdSFld: this should have exhausted all the possible sizes.");
+ break;
+ }
+ OpStackTypeSet(m_curStackHt, fldIt.StackNormalize());
+ }
+ m_curStackHt++;
+ GCPROTECT_END();
+}
+
+void Interpreter::EnsureClassInit(MethodTable* pMT)
+{
+ if (!pMT->IsClassInited())
+ {
+ pMT->CheckRestore();
+ // This is tantamount to a call, so exempt it from the cycle count.
+#if INTERP_ILCYCLE_PROFILE
+ unsigned __int64 startCycles;
+ bool b = CycleTimer::GetThreadCyclesS(&startCycles); assert(b);
+#endif // INTERP_ILCYCLE_PROFILE
+
+ pMT->CheckRunClassInitThrowing();
+
+#if INTERP_ILCYCLE_PROFILE
+ unsigned __int64 endCycles;
+ b = CycleTimer::GetThreadCyclesS(&endCycles); assert(b);
+ m_exemptCycles += (endCycles - startCycles);
+#endif // INTERP_ILCYCLE_PROFILE
+ }
+}
+
+void Interpreter::LdSFldA()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ InterpreterType fldIt;
+ UINT fldSz;
+ bool managedMem;
+ void* srcPtr = NULL;
+ GCPROTECT_BEGININTERIOR(srcPtr);
+
+ StaticFldAddr(CORINFO_ACCESS_ADDRESS, &srcPtr, &fldIt, &fldSz, &managedMem);
+
+ OpStackSet<void*>(m_curStackHt, srcPtr);
+ if (managedMem)
+ {
+ // Static variable in managed memory...
+ OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_BYREF));
+ }
+ else
+ {
+ // RVA is in unmanaged memory.
+ OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_NATIVEINT));
+ }
+ m_curStackHt++;
+
+ GCPROTECT_END();
+}
+
+void Interpreter::StSFld()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+ InterpreterType fldIt;
+ UINT sz;
+ bool managedMem;
+ void* dstPtr = NULL;
+ GCPROTECT_BEGININTERIOR(dstPtr);
+
+ StaticFldAddr(CORINFO_ACCESS_SET, &dstPtr, &fldIt, &sz, &managedMem);
+
+ m_curStackHt--;
+ InterpreterType valIt = OpStackTypeGet(m_curStackHt);
+ CorInfoType valCit = valIt.ToCorInfoType();
+
+ if (valCit == CORINFO_TYPE_VALUECLASS)
+ {
+ MethodTable* valClsMT = GetMethodTableFromClsHnd(valIt.ToClassHandle());
+ if (sz > sizeof(INT64))
+ {
+ // Large struct case: value in operand stack is indirect pointer.
+ void* srcPtr = OpStackGet<void*>(m_curStackHt);
+ CopyValueClassUnchecked(dstPtr, srcPtr, valClsMT);
+ LargeStructOperandStackPop(sz, srcPtr);
+ }
+ else
+ {
+ // Struct value is inline in the operand stack.
+ CopyValueClassUnchecked(dstPtr, OpStackGetAddr(m_curStackHt, sz), valClsMT);
+ }
+ }
+ else if (valCit == CORINFO_TYPE_CLASS)
+ {
+ SetObjectReferenceUnchecked(reinterpret_cast<OBJECTREF*>(dstPtr), ObjectToOBJECTREF(OpStackGet<Object*>(m_curStackHt)));
+ }
+ else
+ {
+ switch (sz)
+ {
+ case 1:
+ *reinterpret_cast<UINT8*>(dstPtr) = OpStackGet<UINT8>(m_curStackHt);
+ break;
+ case 2:
+ *reinterpret_cast<UINT16*>(dstPtr) = OpStackGet<UINT16>(m_curStackHt);
+ break;
+ case 4:
+ *reinterpret_cast<UINT32*>(dstPtr) = OpStackGet<UINT32>(m_curStackHt);
+ break;
+ case 8:
+ *reinterpret_cast<UINT64*>(dstPtr) = OpStackGet<UINT64>(m_curStackHt);
+ break;
+ default:
+ _ASSERTE_MSG(false, "This should have exhausted all the possible sizes.");
+ break;
+ }
+ }
+ GCPROTECT_END();
+
+ BarrierIfVolatile();
+}
+
+template<typename T, bool IsObjType, CorInfoType cit>
+void Interpreter::LdElemWithType()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 2);
+ unsigned arrInd = m_curStackHt - 2;
+ unsigned indexInd = m_curStackHt - 1;
+
+ assert(OpStackTypeGet(arrInd).ToCorInfoType() == CORINFO_TYPE_CLASS);
+
+ ArrayBase* a = OpStackGet<ArrayBase*>(arrInd);
+ ThrowOnInvalidPointer(a);
+ int len = a->GetNumComponents();
+
+ CorInfoType indexCit = OpStackTypeGet(indexInd).ToCorInfoType();
+ if (indexCit == CORINFO_TYPE_INT)
+ {
+ int index = OpStackGet<INT32>(indexInd);
+ if (index < 0 || index >= len) ThrowArrayBoundsException();
+
+ GCX_FORBID();
+
+ if (IsObjType)
+ {
+ OBJECTREF res = reinterpret_cast<PtrArray*>(a)->GetAt(index);
+ OpStackSet<OBJECTREF>(arrInd, res);
+ }
+ else
+ {
+ T res = reinterpret_cast<Array<T>*>(a)->GetDirectConstPointerToNonObjectElements()[index];
+ if (cit == CORINFO_TYPE_INT)
+ {
+ // Widen narrow types.
+ int ires = (int)res;
+ OpStackSet<int>(arrInd, ires);
+ }
+ else
+ {
+ OpStackSet<T>(arrInd, res);
+ }
+ }
+ }
+ else
+ {
+ assert(indexCit == CORINFO_TYPE_NATIVEINT);
+ NativeInt index = OpStackGet<NativeInt>(indexInd);
+ if (index < 0 || index >= NativeInt(len)) ThrowArrayBoundsException();
+
+ GCX_FORBID();
+
+ if (IsObjType)
+ {
+ OBJECTREF res = reinterpret_cast<PtrArray*>(a)->GetAt(index);
+ OpStackSet<OBJECTREF>(arrInd, res);
+ }
+ else
+ {
+ T res = reinterpret_cast<Array<T>*>(a)->GetDirectConstPointerToNonObjectElements()[index];
+ OpStackSet<T>(arrInd, res);
+ }
+ }
+
+ OpStackTypeSet(arrInd, InterpreterType(cit));
+ m_curStackHt--;
+}
+
+template<typename T, bool IsObjType>
+void Interpreter::StElemWithType()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+
+ assert(m_curStackHt >= 3);
+ unsigned arrInd = m_curStackHt - 3;
+ unsigned indexInd = m_curStackHt - 2;
+ unsigned valInd = m_curStackHt - 1;
+
+ assert(OpStackTypeGet(arrInd).ToCorInfoType() == CORINFO_TYPE_CLASS);
+
+ ArrayBase* a = OpStackGet<ArrayBase*>(arrInd);
+ ThrowOnInvalidPointer(a);
+ int len = a->GetNumComponents();
+
+ CorInfoType indexCit = OpStackTypeGet(indexInd).ToCorInfoType();
+ if (indexCit == CORINFO_TYPE_INT)
+ {
+ int index = OpStackGet<INT32>(indexInd);
+ if (index < 0 || index >= len) ThrowArrayBoundsException();
+ if (IsObjType)
+ {
+ struct _gc {
+ OBJECTREF val;
+ OBJECTREF a;
+ } gc;
+ gc.val = ObjectToOBJECTREF(OpStackGet<Object*>(valInd));
+ gc.a = ObjectToOBJECTREF(a);
+ GCPROTECT_BEGIN(gc);
+ if (gc.val != NULL &&
+ !ObjIsInstanceOf(OBJECTREFToObject(gc.val), reinterpret_cast<PtrArray*>(a)->GetArrayElementTypeHandle()))
+ COMPlusThrow(kArrayTypeMismatchException);
+ reinterpret_cast<PtrArray*>(OBJECTREFToObject(gc.a))->SetAt(index, gc.val);
+ GCPROTECT_END();
+ }
+ else
+ {
+ GCX_FORBID();
+ T val = OpStackGet<T>(valInd);
+ reinterpret_cast<Array<T>*>(a)->GetDirectPointerToNonObjectElements()[index] = val;
+ }
+ }
+ else
+ {
+ assert(indexCit == CORINFO_TYPE_NATIVEINT);
+ NativeInt index = OpStackGet<NativeInt>(indexInd);
+ if (index < 0 || index >= NativeInt(len)) ThrowArrayBoundsException();
+ if (IsObjType)
+ {
+ struct _gc {
+ OBJECTREF val;
+ OBJECTREF a;
+ } gc;
+ gc.val = ObjectToOBJECTREF(OpStackGet<Object*>(valInd));
+ gc.a = ObjectToOBJECTREF(a);
+ GCPROTECT_BEGIN(gc);
+ if (gc.val != NULL &&
+ !ObjIsInstanceOf(OBJECTREFToObject(gc.val), reinterpret_cast<PtrArray*>(a)->GetArrayElementTypeHandle()))
+ COMPlusThrow(kArrayTypeMismatchException);
+ reinterpret_cast<PtrArray*>(OBJECTREFToObject(gc.a))->SetAt(index, gc.val);
+ GCPROTECT_END();
+ }
+ else
+ {
+ GCX_FORBID();
+ T val = OpStackGet<T>(valInd);
+ reinterpret_cast<Array<T>*>(a)->GetDirectPointerToNonObjectElements()[index] = val;
+ }
+ }
+
+ m_curStackHt -= 3;
+}
+
+template<bool takeAddress>
+void Interpreter::LdElem()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 2);
+ unsigned arrInd = m_curStackHt - 2;
+ unsigned indexInd = m_curStackHt - 1;
+
+ unsigned elemTypeTok = getU4LittleEndian(m_ILCodePtr + 1);
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_LdElem]);
+#endif // INTERP_TRACING
+
+ unsigned ilOffset = CurOffset();
+ CORINFO_CLASS_HANDLE clsHnd = NULL;
+ if (s_InterpreterUseCaching) clsHnd = GetCachedClassHandle(ilOffset);
+
+ if (clsHnd == NULL)
+ {
+
+ CORINFO_RESOLVED_TOKEN elemTypeResolvedTok;
+ {
+ GCX_PREEMP();
+ ResolveToken(&elemTypeResolvedTok, elemTypeTok, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_LdElem));
+ clsHnd = elemTypeResolvedTok.hClass;
+ }
+ if (s_InterpreterUseCaching) CacheClassHandle(ilOffset, clsHnd);
+ }
+
+ CorInfoType elemCit = ::asCorInfoType(clsHnd);
+
+ m_ILCodePtr += 5;
+
+
+ InterpreterType elemIt;
+ if (elemCit == CORINFO_TYPE_VALUECLASS)
+ {
+ elemIt = InterpreterType(&m_interpCeeInfo, clsHnd);
+ }
+ else
+ {
+ elemIt = InterpreterType(elemCit);
+ }
+
+ assert(OpStackTypeGet(arrInd).ToCorInfoType() == CORINFO_TYPE_CLASS);
+
+
+ ArrayBase* a = OpStackGet<ArrayBase*>(arrInd);
+ ThrowOnInvalidPointer(a);
+ int len = a->GetNumComponents();
+
+ NativeInt index;
+ {
+ GCX_FORBID();
+
+ CorInfoType indexCit = OpStackTypeGet(indexInd).ToCorInfoType();
+ if (indexCit == CORINFO_TYPE_INT)
+ {
+ index = static_cast<NativeInt>(OpStackGet<INT32>(indexInd));
+ }
+ else
+ {
+ assert(indexCit == CORINFO_TYPE_NATIVEINT);
+ index = OpStackGet<NativeInt>(indexInd);
+ }
+ }
+ if (index < 0 || index >= len) ThrowArrayBoundsException();
+
+ bool throwTypeMismatch = NULL;
+ {
+ void* elemPtr = a->GetDataPtr() + a->GetComponentSize() * index;
+ // elemPtr is now a vulnerable byref.
+ GCX_FORBID();
+
+ if (takeAddress)
+ {
+ // If the element type is a class type, may have to do a type check.
+ if (elemCit == CORINFO_TYPE_CLASS)
+ {
+ // Unless there was a readonly prefix, which removes the need to
+ // do the (dynamic) type check.
+ if (m_readonlyFlag)
+ {
+ // Consume the readonly prefix, and don't do the type check below.
+ m_readonlyFlag = false;
+ }
+ else
+ {
+ PtrArray* pa = reinterpret_cast<PtrArray*>(a);
+ // The element array type must be exactly the referent type of the managed
+ // pointer we'll be creating.
+ if (pa->GetArrayElementTypeHandle() != TypeHandle(clsHnd))
+ {
+ throwTypeMismatch = true;
+ }
+ }
+ }
+ if (!throwTypeMismatch)
+ {
+ // If we're not going to throw the exception, we can take the address.
+ OpStackSet<void*>(arrInd, elemPtr);
+ OpStackTypeSet(arrInd, InterpreterType(CORINFO_TYPE_BYREF));
+ m_curStackHt--;
+ }
+ }
+ else
+ {
+ m_curStackHt -= 2;
+ LdFromMemAddr(elemPtr, elemIt);
+ return;
+ }
+ }
+
+ // If we're going to throw, we do the throw outside the GCX_FORBID region above, since it requires GC_TRIGGERS.
+ if (throwTypeMismatch)
+ {
+ COMPlusThrow(kArrayTypeMismatchException);
+ }
+}
+
+void Interpreter::StElem()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 3);
+ unsigned arrInd = m_curStackHt - 3;
+ unsigned indexInd = m_curStackHt - 2;
+ unsigned valInd = m_curStackHt - 1;
+
+ CorInfoType valCit = OpStackTypeGet(valInd).ToCorInfoType();
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_StElem]);
+#endif // INTERP_TRACING
+
+ CORINFO_CLASS_HANDLE typeFromTok = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_StElem));
+
+ m_ILCodePtr += 5;
+
+ CorInfoType typeFromTokCit;
+ {
+ GCX_PREEMP();
+ typeFromTokCit = ::asCorInfoType(typeFromTok);
+ }
+ size_t sz;
+
+#ifdef _DEBUG
+ InterpreterType typeFromTokIt;
+#endif // _DEBUG
+
+ if (typeFromTokCit == CORINFO_TYPE_VALUECLASS)
+ {
+ GCX_PREEMP();
+ sz = getClassSize(typeFromTok);
+#ifdef _DEBUG
+ typeFromTokIt = InterpreterType(&m_interpCeeInfo, typeFromTok);
+#endif // _DEBUG
+ }
+ else
+ {
+ sz = CorInfoTypeSize(typeFromTokCit);
+#ifdef _DEBUG
+ typeFromTokIt = InterpreterType(typeFromTokCit);
+#endif // _DEBUG
+ }
+
+#ifdef _DEBUG
+ // Instead of debug, I need to parameterize the interpreter at the top level over whether
+ // to do checks corresponding to verification.
+ if (typeFromTokIt.StackNormalize().ToCorInfoType() != valCit)
+ {
+ // This is obviously only a partial test of the required condition.
+ VerificationError("Value in stelem does not have the required type.");
+ }
+#endif // _DEBUG
+
+ assert(OpStackTypeGet(arrInd).ToCorInfoType() == CORINFO_TYPE_CLASS);
+
+ ArrayBase* a = OpStackGet<ArrayBase*>(arrInd);
+ ThrowOnInvalidPointer(a);
+ int len = a->GetNumComponents();
+
+ CorInfoType indexCit = OpStackTypeGet(indexInd).ToCorInfoType();
+ NativeInt index = 0;
+ if (indexCit == CORINFO_TYPE_INT)
+ {
+ index = static_cast<NativeInt>(OpStackGet<INT32>(indexInd));
+ }
+ else
+ {
+ index = OpStackGet<NativeInt>(indexInd);
+ }
+
+ if (index < 0 || index >= len) ThrowArrayBoundsException();
+
+ if (typeFromTokCit == CORINFO_TYPE_CLASS)
+ {
+ struct _gc {
+ OBJECTREF val;
+ OBJECTREF a;
+ } gc;
+ gc.val = ObjectToOBJECTREF(OpStackGet<Object*>(valInd));
+ gc.a = ObjectToOBJECTREF(a);
+ GCPROTECT_BEGIN(gc);
+ if (gc.val != NULL &&
+ !ObjIsInstanceOf(OBJECTREFToObject(gc.val), reinterpret_cast<PtrArray*>(a)->GetArrayElementTypeHandle()))
+ COMPlusThrow(kArrayTypeMismatchException);
+ reinterpret_cast<PtrArray*>(OBJECTREFToObject(gc.a))->SetAt(index, gc.val);
+ GCPROTECT_END();
+ }
+ else
+ {
+ GCX_FORBID();
+
+ void* destPtr = a->GetDataPtr() + index * sz;;
+
+ if (typeFromTokCit == CORINFO_TYPE_VALUECLASS)
+ {
+ MethodTable* valClsMT = GetMethodTableFromClsHnd(OpStackTypeGet(valInd).ToClassHandle());
+ // I use GCSafeMemCpy below to ensure that write barriers happen for the case in which
+ // the value class contains GC pointers. We could do better...
+ if (sz > sizeof(UINT64))
+ {
+ // Large struct case: stack slot contains pointer...
+ void* src = OpStackGet<void*>(valInd);
+ CopyValueClassUnchecked(destPtr, src, valClsMT);
+ LargeStructOperandStackPop(sz, src);
+ }
+ else
+ {
+ // Small struct case -- is inline in operand stack.
+ CopyValueClassUnchecked(destPtr, OpStackGetAddr(valInd, sz), valClsMT);
+ }
+ }
+ else
+ {
+ switch (sz)
+ {
+ case 1:
+ *reinterpret_cast<INT8*>(destPtr) = OpStackGet<INT8>(valInd);
+ break;
+ case 2:
+ *reinterpret_cast<INT16*>(destPtr) = OpStackGet<INT16>(valInd);
+ break;
+ case 4:
+ *reinterpret_cast<INT32*>(destPtr) = OpStackGet<INT32>(valInd);
+ break;
+ case 8:
+ *reinterpret_cast<INT64*>(destPtr) = OpStackGet<INT64>(valInd);
+ break;
+ }
+ }
+ }
+
+ m_curStackHt -= 3;
+}
+
+void Interpreter::InitBlk()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 3);
+ unsigned addrInd = m_curStackHt - 3;
+ unsigned valInd = m_curStackHt - 2;
+ unsigned sizeInd = m_curStackHt - 1;
+
+#ifdef _DEBUG
+ CorInfoType addrCIT = OpStackTypeGet(addrInd).ToCorInfoType();
+ bool addrValidType = (addrCIT == CORINFO_TYPE_NATIVEINT || addrCIT == CORINFO_TYPE_BYREF);
+#if defined(_AMD64_)
+ if (s_InterpreterLooseRules && addrCIT == CORINFO_TYPE_LONG)
+ addrValidType = true;
+#endif
+ if (!addrValidType)
+ VerificationError("Addr of InitBlk must be native int or &.");
+
+ CorInfoType valCIT = OpStackTypeGet(valInd).ToCorInfoType();
+ if (valCIT != CORINFO_TYPE_INT)
+ VerificationError("Value of InitBlk must be int");
+
+#endif // _DEBUG
+
+ CorInfoType sizeCIT = OpStackTypeGet(sizeInd).ToCorInfoType();
+ bool isLong = s_InterpreterLooseRules && (sizeCIT == CORINFO_TYPE_LONG);
+
+#ifdef _DEBUG
+ if (sizeCIT != CORINFO_TYPE_INT && !isLong)
+ VerificationError("Size of InitBlk must be int");
+#endif // _DEBUG
+
+ void* addr = OpStackGet<void*>(addrInd);
+ ThrowOnInvalidPointer(addr);
+ GCX_FORBID(); // addr is a potentially vulnerable byref.
+ INT8 val = OpStackGet<INT8>(valInd);
+ size_t size = (size_t) ((isLong) ? OpStackGet<UINT64>(sizeInd) : OpStackGet<UINT32>(sizeInd));
+ memset(addr, val, size);
+
+ m_curStackHt = addrInd;
+ m_ILCodePtr += 2;
+
+ BarrierIfVolatile();
+}
+
+void Interpreter::CpBlk()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 3);
+ unsigned destInd = m_curStackHt - 3;
+ unsigned srcInd = m_curStackHt - 2;
+ unsigned sizeInd = m_curStackHt - 1;
+
+#ifdef _DEBUG
+ CorInfoType destCIT = OpStackTypeGet(destInd).ToCorInfoType();
+ bool destValidType = (destCIT == CORINFO_TYPE_NATIVEINT || destCIT == CORINFO_TYPE_BYREF);
+#if defined(_AMD64_)
+ if (s_InterpreterLooseRules && destCIT == CORINFO_TYPE_LONG)
+ destValidType = true;
+#endif
+ if (!destValidType)
+ {
+ VerificationError("Dest addr of CpBlk must be native int or &.");
+ }
+ CorInfoType srcCIT = OpStackTypeGet(srcInd).ToCorInfoType();
+ bool srcValidType = (srcCIT == CORINFO_TYPE_NATIVEINT || srcCIT == CORINFO_TYPE_BYREF);
+#if defined(_AMD64_)
+ if (s_InterpreterLooseRules && srcCIT == CORINFO_TYPE_LONG)
+ srcValidType = true;
+#endif
+ if (!srcValidType)
+ VerificationError("Src addr of CpBlk must be native int or &.");
+#endif // _DEBUG
+
+ CorInfoType sizeCIT = OpStackTypeGet(sizeInd).ToCorInfoType();
+ bool isLong = s_InterpreterLooseRules && (sizeCIT == CORINFO_TYPE_LONG);
+
+#ifdef _DEBUG
+ if (sizeCIT != CORINFO_TYPE_INT && !isLong)
+ VerificationError("Size of CpBlk must be int");
+#endif // _DEBUG
+
+
+ void* destAddr = OpStackGet<void*>(destInd);
+ void* srcAddr = OpStackGet<void*>(srcInd);
+ ThrowOnInvalidPointer(destAddr);
+ ThrowOnInvalidPointer(srcAddr);
+ GCX_FORBID(); // destAddr & srcAddr are potentially vulnerable byrefs.
+ size_t size = (size_t)((isLong) ? OpStackGet<UINT64>(sizeInd) : OpStackGet<UINT32>(sizeInd));
+ memcpyNoGCRefs(destAddr, srcAddr, size);
+
+ m_curStackHt = destInd;
+ m_ILCodePtr += 2;
+
+ BarrierIfVolatile();
+}
+
+void Interpreter::Box()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 1);
+ unsigned ind = m_curStackHt - 1;
+
+ DWORD boxTypeAttribs = 0;
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_Box]);
+#endif // INTERP_TRACING
+
+ CORINFO_CLASS_HANDLE boxTypeClsHnd = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_Box));
+
+ {
+ GCX_PREEMP();
+ boxTypeAttribs = m_interpCeeInfo.getClassAttribs(boxTypeClsHnd);
+ }
+
+ m_ILCodePtr += 5;
+
+ if (boxTypeAttribs & CORINFO_FLG_VALUECLASS)
+ {
+ InterpreterType valIt = OpStackTypeGet(ind);
+
+ void* valPtr;
+ if (valIt.IsLargeStruct(&m_interpCeeInfo))
+ {
+ // Operand stack entry is pointer to the data.
+ valPtr = OpStackGet<void*>(ind);
+ }
+ else
+ {
+ // Operand stack entry *is* the data.
+ size_t classSize = getClassSize(boxTypeClsHnd);
+ valPtr = OpStackGetAddr(ind, classSize);
+ }
+
+ TypeHandle th(boxTypeClsHnd);
+ if (th.IsTypeDesc())
+ {
+ COMPlusThrow(kInvalidOperationException, W("InvalidOperation_TypeCannotBeBoxed"));
+ }
+
+ MethodTable* pMT = th.AsMethodTable();
+ if (pMT->ContainsStackPtr()) // TODO: the call to MethodTable::Box() below also calls ContainsStackPtr(), and throws kInvalidOperationException if it returns true. Should we not call it here?
+ {
+ COMPlusThrow(kInvalidProgramException);
+ }
+
+ {
+ Object* res = OBJECTREFToObject(pMT->Box(valPtr));
+
+ GCX_FORBID();
+
+ // If we're popping a large struct off the operand stack, make sure we clean up.
+ if (valIt.IsLargeStruct(&m_interpCeeInfo))
+ {
+ LargeStructOperandStackPop(valIt.Size(&m_interpCeeInfo), valPtr);
+ }
+ OpStackSet<Object*>(ind, res);
+ OpStackTypeSet(ind, InterpreterType(CORINFO_TYPE_CLASS));
+ }
+ }
+}
+
+void Interpreter::BoxStructRefAt(unsigned ind, CORINFO_CLASS_HANDLE valCls)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ _ASSERTE_MSG(ind < m_curStackHt, "Precondition");
+ {
+ GCX_PREEMP();
+ _ASSERTE_MSG(m_interpCeeInfo.getClassAttribs(valCls) & CORINFO_FLG_VALUECLASS, "Precondition");
+ }
+ _ASSERTE_MSG(OpStackTypeGet(ind).ToCorInfoType() == CORINFO_TYPE_BYREF, "Precondition");
+
+ InterpreterType valIt = InterpreterType(&m_interpCeeInfo, valCls);
+
+ void* valPtr = OpStackGet<void*>(ind);
+
+ TypeHandle th(valCls);
+ if (th.IsTypeDesc())
+ COMPlusThrow(kInvalidOperationException,W("InvalidOperation_TypeCannotBeBoxed"));
+
+ MethodTable * pMT = th.AsMethodTable();
+ if (pMT->ContainsStackPtr())
+ COMPlusThrow(kInvalidProgramException);
+
+ {
+ Object* res = OBJECTREFToObject(pMT->Box(valPtr));
+
+ GCX_FORBID();
+
+ OpStackSet<Object*>(ind, res);
+ OpStackTypeSet(ind, InterpreterType(CORINFO_TYPE_CLASS));
+ }
+}
+
+
+void Interpreter::Unbox()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END
+
+ assert(m_curStackHt > 0);
+ unsigned tos = m_curStackHt - 1;
+
+#ifdef _DEBUG
+ CorInfoType tosCIT = OpStackTypeGet(tos).ToCorInfoType();
+ if (tosCIT != CORINFO_TYPE_CLASS)
+ VerificationError("Unbox requires that TOS is an object pointer.");
+#endif // _DEBUG
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_Unbox]);
+#endif // INTERP_TRACING
+
+ CORINFO_CLASS_HANDLE boxTypeClsHnd = GetTypeFromToken(m_ILCodePtr + 1, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_Unbox));
+
+ CorInfoHelpFunc unboxHelper;
+
+ {
+ GCX_PREEMP();
+ unboxHelper = m_interpCeeInfo.getUnBoxHelper(boxTypeClsHnd);
+ }
+
+ void* res = NULL;
+ Object* obj = OpStackGet<Object*>(tos);
+
+ switch (unboxHelper)
+ {
+ case CORINFO_HELP_UNBOX:
+ {
+ ThrowOnInvalidPointer(obj);
+
+ MethodTable* pMT1 = (MethodTable*)boxTypeClsHnd;
+ MethodTable* pMT2 = obj->GetMethodTable();
+
+ if (pMT1->IsEquivalentTo(pMT2))
+ {
+ res = OpStackGet<Object*>(tos)->UnBox();
+ }
+ else
+ {
+ CorElementType type1 = pMT1->GetInternalCorElementType();
+ CorElementType type2 = pMT2->GetInternalCorElementType();
+
+ // we allow enums and their primtive type to be interchangable
+ if (type1 == type2)
+ {
+ if ((pMT1->IsEnum() || pMT1->IsTruePrimitive()) &&
+ (pMT2->IsEnum() || pMT2->IsTruePrimitive()))
+ {
+ res = OpStackGet<Object*>(tos)->UnBox();
+ }
+ }
+ }
+
+ if (res == NULL)
+ {
+ COMPlusThrow(kInvalidCastException);
+ }
+ }
+ break;
+
+ case CORINFO_HELP_UNBOX_NULLABLE:
+ {
+ // For "unbox Nullable<T>", we need to create a new object (maybe in some temporary local
+ // space (that we reuse every time we hit this IL instruction?), that gets reported to the GC,
+ // maybe in the GC heap itself). That object will contain an embedded Nullable<T>. Then, we need to
+ // get a byref to the data within the object.
+
+ NYI_INTERP("Unhandled 'unbox' of Nullable<T>.");
+ }
+ break;
+
+ default:
+ NYI_INTERP("Unhandled 'unbox' helper.");
+ }
+
+ {
+ GCX_FORBID();
+ OpStackSet<void*>(tos, res);
+ OpStackTypeSet(tos, InterpreterType(CORINFO_TYPE_BYREF));
+ }
+
+ m_ILCodePtr += 5;
+}
+
+
+void Interpreter::Throw()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END
+
+ assert(m_curStackHt >= 1);
+
+ // Note that we can't decrement the stack height here, since the operand stack
+ // protects the thrown object. Nor do we need to, since the ostack will be cleared on
+ // any catch within this method.
+ unsigned exInd = m_curStackHt - 1;
+
+#ifdef _DEBUG
+ CorInfoType exCIT = OpStackTypeGet(exInd).ToCorInfoType();
+ if (exCIT != CORINFO_TYPE_CLASS)
+ {
+ VerificationError("Can only throw an object.");
+ }
+#endif // _DEBUG
+
+ Object* obj = OpStackGet<Object*>(exInd);
+ ThrowOnInvalidPointer(obj);
+
+ OBJECTREF oref = ObjectToOBJECTREF(obj);
+ if (!IsException(oref->GetMethodTable()))
+ {
+ GCPROTECT_BEGIN(oref);
+ WrapNonCompliantException(&oref);
+ GCPROTECT_END();
+ }
+ COMPlusThrow(oref);
+}
+
+void Interpreter::Rethrow()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END
+
+ OBJECTREF throwable = GetThread()->LastThrownObject();
+ COMPlusThrow(throwable);
+}
+
+void Interpreter::UnboxAny()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt > 0);
+ unsigned tos = m_curStackHt - 1;
+
+ unsigned boxTypeTok = getU4LittleEndian(m_ILCodePtr + 1);
+ m_ILCodePtr += 5;
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_UnboxAny]);
+#endif // INTERP_TRACING
+
+ CORINFO_RESOLVED_TOKEN boxTypeResolvedTok;
+ CORINFO_CLASS_HANDLE boxTypeClsHnd;
+ DWORD boxTypeAttribs = 0;
+
+ {
+ GCX_PREEMP();
+ ResolveToken(&boxTypeResolvedTok, boxTypeTok, CORINFO_TOKENKIND_Class InterpTracingArg(RTK_UnboxAny));
+ boxTypeClsHnd = boxTypeResolvedTok.hClass;
+ boxTypeAttribs = m_interpCeeInfo.getClassAttribs(boxTypeClsHnd);
+ }
+
+ CorInfoType unboxCIT = OpStackTypeGet(tos).ToCorInfoType();
+ if (unboxCIT != CORINFO_TYPE_CLASS)
+ VerificationError("Type mismatch in UNBOXANY.");
+
+ if ((boxTypeAttribs & CORINFO_FLG_VALUECLASS) == 0)
+ {
+ Object* obj = OpStackGet<Object*>(tos);
+ if (obj != NULL && !ObjIsInstanceOf(obj, TypeHandle(boxTypeClsHnd)))
+ COMPlusThrowInvalidCastException(&ObjectToOBJECTREF(obj), TypeHandle(boxTypeClsHnd));
+ }
+ else
+ {
+ CorInfoHelpFunc unboxHelper;
+
+ {
+ GCX_PREEMP();
+ unboxHelper = m_interpCeeInfo.getUnBoxHelper(boxTypeClsHnd);
+ }
+
+ // Important that this *not* be factored out with the identical statement in the "if" branch:
+ // delay read from GC-protected operand stack until after COOP-->PREEMP transition above.
+ Object* obj = OpStackGet<Object*>(tos);
+
+ switch (unboxHelper)
+ {
+ case CORINFO_HELP_UNBOX:
+ {
+ ThrowOnInvalidPointer(obj);
+
+ MethodTable* pMT1 = (MethodTable*)boxTypeClsHnd;
+ MethodTable* pMT2 = obj->GetMethodTable();
+
+ void* res = NULL;
+ if (pMT1->IsEquivalentTo(pMT2))
+ {
+ res = OpStackGet<Object*>(tos)->UnBox();
+ }
+ else
+ {
+ CorElementType type1 = pMT1->GetInternalCorElementType();
+ CorElementType type2 = pMT2->GetInternalCorElementType();
+
+ // we allow enums and their primtive type to be interchangable
+ if (type1 == type2)
+ {
+ if ((pMT1->IsEnum() || pMT1->IsTruePrimitive()) &&
+ (pMT2->IsEnum() || pMT2->IsTruePrimitive()))
+ {
+ res = OpStackGet<Object*>(tos)->UnBox();
+ }
+ }
+ }
+
+ if (res == NULL)
+ {
+ COMPlusThrow(kInvalidCastException);
+ }
+
+ // As the ECMA spec says, the rest is like a "ldobj".
+ LdObjValueClassWork(boxTypeClsHnd, tos, res);
+ }
+ break;
+
+ case CORINFO_HELP_UNBOX_NULLABLE:
+ {
+ InterpreterType it = InterpreterType(&m_interpCeeInfo, boxTypeClsHnd);
+ size_t sz = it.Size(&m_interpCeeInfo);
+ if (sz > sizeof(INT64))
+ {
+ void* destPtr = LargeStructOperandStackPush(sz);
+ if (!Nullable::UnBox(destPtr, ObjectToOBJECTREF(obj), (MethodTable*)boxTypeClsHnd))
+ {
+ COMPlusThrow(kInvalidCastException);
+ }
+ OpStackSet<void*>(tos, destPtr);
+ }
+ else
+ {
+ INT64 dest = 0;
+ if (!Nullable::UnBox(&dest, ObjectToOBJECTREF(obj), (MethodTable*)boxTypeClsHnd))
+ {
+ COMPlusThrow(kInvalidCastException);
+ }
+ OpStackSet<INT64>(tos, dest);
+ }
+ OpStackTypeSet(tos, it.StackNormalize());
+ }
+ break;
+
+ default:
+ NYI_INTERP("Unhandled 'unbox.any' helper.");
+ }
+ }
+}
+
+void Interpreter::LdLen()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 1);
+ unsigned arrInd = m_curStackHt - 1;
+
+ assert(OpStackTypeGet(arrInd).ToCorInfoType() == CORINFO_TYPE_CLASS);
+
+ GCX_FORBID();
+
+ ArrayBase* a = OpStackGet<ArrayBase*>(arrInd);
+ ThrowOnInvalidPointer(a);
+ int len = a->GetNumComponents();
+
+ OpStackSet<NativeUInt>(arrInd, NativeUInt(len));
+ // The ECMA spec says that the type of the length value is NATIVEUINT, but this
+ // doesn't make any sense -- unsigned types are not stack-normalized. So I'm
+ // using NATIVEINT, to get the width right.
+ OpStackTypeSet(arrInd, InterpreterType(CORINFO_TYPE_NATIVEINT));
+}
+
+
+void Interpreter::DoCall(bool virtualCall)
+{
+#if INTERP_DYNAMIC_CONTRACTS
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+#else
+ // Dynamic contract occupies too much stack.
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+#endif
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_Call]);
+#endif // INTERP_TRACING
+
+ DoCallWork(virtualCall);
+
+ m_ILCodePtr += 5;
+}
+
+CORINFO_CONTEXT_HANDLE InterpreterMethodInfo::GetPreciseGenericsContext(Object* thisArg, void* genericsCtxtArg)
+{
+ // If the caller has a generic argument, then we need to get the exact methodContext.
+ // There are several possibilities that lead to a generic argument:
+ // 1) Static method of generic class: generic argument is the method table of the class.
+ // 2) generic method of a class: generic argument is the precise MethodDesc* of the method.
+ if (GetFlag<InterpreterMethodInfo::Flag_hasGenericsContextArg>())
+ {
+ assert(GetFlag<InterpreterMethodInfo::Flag_methHasGenericArgs>() || GetFlag<InterpreterMethodInfo::Flag_typeHasGenericArgs>());
+ if (GetFlag<InterpreterMethodInfo::Flag_methHasGenericArgs>())
+ {
+ return MAKE_METHODCONTEXT(reinterpret_cast<CORINFO_METHOD_HANDLE>(genericsCtxtArg));
+ }
+ else
+ {
+ MethodTable* methodClass = reinterpret_cast<MethodDesc*>(m_method)->GetMethodTable();
+ MethodTable* contextClass = reinterpret_cast<MethodTable*>(genericsCtxtArg)->GetMethodTableMatchingParentClass(methodClass);
+ return MAKE_CLASSCONTEXT(contextClass);
+ }
+ }
+ // TODO: This condition isn't quite right. If the actual class is a subtype of the declaring type of the method,
+ // then it might be in another module, the scope and context won't agree.
+ else if (GetFlag<InterpreterMethodInfo::Flag_typeHasGenericArgs>()
+ && !GetFlag<InterpreterMethodInfo::Flag_methHasGenericArgs>()
+ && GetFlag<InterpreterMethodInfo::Flag_hasThisArg>()
+ && GetFlag<InterpreterMethodInfo::Flag_thisArgIsObjPtr>() && thisArg != NULL)
+ {
+ MethodTable* methodClass = reinterpret_cast<MethodDesc*>(m_method)->GetMethodTable();
+ MethodTable* contextClass = thisArg->GetMethodTable()->GetMethodTableMatchingParentClass(methodClass);
+ return MAKE_CLASSCONTEXT(contextClass);
+ }
+ else
+ {
+ return MAKE_METHODCONTEXT(m_method);
+ }
+}
+
+void Interpreter::DoCallWork(bool virtualCall, void* thisArg, CORINFO_RESOLVED_TOKEN* methTokPtr, CORINFO_CALL_INFO* callInfoPtr)
+{
+#if INTERP_DYNAMIC_CONTRACTS
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+#else
+ // Dynamic contract occupies too much stack.
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+#endif
+
+#if INTERP_ILCYCLE_PROFILE
+#if 0
+ // XXX
+ unsigned __int64 callStartCycles;
+ bool b = CycleTimer::GetThreadCyclesS(&callStartCycles); assert(b);
+ unsigned __int64 callStartExemptCycles = m_exemptCycles;
+#endif
+#endif // INTERP_ILCYCLE_PROFILE
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_totalInterpCalls);
+#endif // INTERP_TRACING
+ unsigned tok = getU4LittleEndian(m_ILCodePtr + sizeof(byte));
+
+ // It's possible for an IL method to push a capital-F Frame. If so, we pop it and save it;
+ // we'll push it back on after our GCPROTECT frame is popped.
+ Frame* ilPushedFrame = NULL;
+
+ // We can't protect "thisArg" with a GCPROTECT, because this pushes a Frame, and there
+ // exist managed methods that push (and pop) Frames -- so that the Frame chain does not return
+ // to its original state after a call. Therefore, we can't have a Frame on the stack over the duration
+ // of a call. (I assume that any method that calls a Frame-pushing IL method performs a matching
+ // call to pop that Frame before the caller method completes. If this were not true, if one method could push
+ // a Frame, but defer the pop to its caller, then we could *never* use a Frame in the interpreter, and
+ // our implementation plan would be doomed.)
+ assert(m_callThisArg == NULL);
+ m_callThisArg = thisArg;
+
+ // Have we already cached a MethodDescCallSite for this call? (We do this only in loops
+ // in the current execution).
+ unsigned iloffset = CurOffset();
+ CallSiteCacheData* pCscd = NULL;
+ if (s_InterpreterUseCaching) pCscd = GetCachedCallInfo(iloffset);
+
+ // If this is true, then we should not cache this call site.
+ bool doNotCache;
+
+ CORINFO_RESOLVED_TOKEN methTok;
+ CORINFO_CALL_INFO callInfo;
+ MethodDesc* methToCall = NULL;
+ CORINFO_CLASS_HANDLE exactClass = NULL;
+ CORINFO_SIG_INFO_SMALL sigInfo;
+ if (pCscd != NULL)
+ {
+ GCX_PREEMP();
+ methToCall = pCscd->m_pMD;
+ sigInfo = pCscd->m_sigInfo;
+
+ doNotCache = true; // We already have a cache entry.
+ }
+ else
+ {
+ doNotCache = false; // Until we determine otherwise.
+ if (callInfoPtr == NULL)
+ {
+ GCX_PREEMP();
+
+ // callInfoPtr and methTokPtr must either both be NULL, or neither.
+ assert(methTokPtr == NULL);
+
+ methTokPtr = &methTok;
+ ResolveToken(methTokPtr, tok, CORINFO_TOKENKIND_Method InterpTracingArg(RTK_Call));
+ OPCODE opcode = (OPCODE)(*m_ILCodePtr);
+
+ m_interpCeeInfo.getCallInfo(methTokPtr,
+ m_constrainedFlag ? & m_constrainedResolvedToken : NULL,
+ m_methInfo->m_method,
+ //this is how impImportCall invokes getCallInfo
+ combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM,
+ CORINFO_CALLINFO_SECURITYCHECKS),
+ (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
+ : CORINFO_CALLINFO_NONE),
+ &callInfo);
+#if INTERP_ILCYCLE_PROFILE
+#if 0
+ if (virtualCall)
+ {
+ unsigned __int64 callEndCycles;
+ b = CycleTimer::GetThreadCyclesS(&callEndCycles); assert(b);
+ unsigned __int64 delta = (callEndCycles - callStartCycles);
+ delta -= (m_exemptCycles - callStartExemptCycles);
+ s_callCycles += delta;
+ s_calls++;
+ }
+#endif
+#endif // INTERP_ILCYCLE_PROFILE
+
+ callInfoPtr = &callInfo;
+
+ assert(!callInfoPtr->exactContextNeedsRuntimeLookup);
+
+ methToCall = reinterpret_cast<MethodDesc*>(methTok.hMethod);
+ exactClass = methTok.hClass;
+ }
+ else
+ {
+ // callInfoPtr and methTokPtr must either both be NULL, or neither.
+ assert(methTokPtr != NULL);
+
+ assert(!callInfoPtr->exactContextNeedsRuntimeLookup);
+
+ methToCall = reinterpret_cast<MethodDesc*>(callInfoPtr->hMethod);
+ exactClass = methTokPtr->hClass;
+ }
+
+ // We used to take the sigInfo from the callInfo here, but that isn't precise, since
+ // we may have made "methToCall" more precise wrt generics than the method handle in
+ // the callinfo. So look up th emore precise signature.
+ GCX_PREEMP();
+
+ CORINFO_SIG_INFO sigInfoFull;
+ m_interpCeeInfo.getMethodSig(CORINFO_METHOD_HANDLE(methToCall), &sigInfoFull);
+ sigInfo.retTypeClass = sigInfoFull.retTypeClass;
+ sigInfo.numArgs = sigInfoFull.numArgs;
+ sigInfo.callConv = sigInfoFull.callConv;
+ sigInfo.retType = sigInfoFull.retType;
+ }
+
+ // Point A in our cycle count.
+
+
+ // Is the method an intrinsic? If so, and if it's one we've written special-case code for
+ // handle intrinsically.
+ CorInfoIntrinsics intrinsicId;
+ {
+ GCX_PREEMP();
+ intrinsicId = m_interpCeeInfo.getIntrinsicID(CORINFO_METHOD_HANDLE(methToCall));
+ }
+
+#if INTERP_TRACING
+ if (intrinsicId != CORINFO_INTRINSIC_Illegal)
+ InterlockedIncrement(&s_totalInterpCallsToIntrinsics);
+#endif // INTERP_TRACING
+ bool didIntrinsic = false;
+ if (!m_constrainedFlag)
+ {
+ switch (intrinsicId)
+ {
+ case CORINFO_INTRINSIC_StringLength:
+ DoStringLength(); didIntrinsic = true;
+ break;
+ case CORINFO_INTRINSIC_StringGetChar:
+ DoStringGetChar(); didIntrinsic = true;
+ break;
+ case CORINFO_INTRINSIC_GetTypeFromHandle:
+ // This is an identity transformation. (At least until I change LdToken to
+ // return a RuntimeTypeHandle struct...which is a TODO.)
+ DoGetTypeFromHandle();
+ didIntrinsic = true;
+ break;
+#if INTERP_ILSTUBS
+ case CORINFO_INTRINSIC_StubHelpers_GetStubContext:
+ OpStackSet<void*>(m_curStackHt, GetStubContext());
+ OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_NATIVEINT));
+ m_curStackHt++; didIntrinsic = true;
+ break;
+ case CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr:
+ OpStackSet<void*>(m_curStackHt, GetStubContextAddr());
+ OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_NATIVEINT));
+ m_curStackHt++; didIntrinsic = true;
+ break;
+#endif // INTERP_ILSTUBS
+ default:
+#if INTERP_TRACING
+ InterlockedIncrement(&s_totalInterpCallsToIntrinsicsUnhandled);
+#endif // INTERP_TRACING
+ break;
+ }
+
+ // Plus some other calls that we're going to treat "like" intrinsics...
+ if (methToCall == MscorlibBinder::GetMethod(METHOD__STUBHELPERS__SET_LAST_ERROR))
+ {
+ // If we're interpreting a method that calls "SetLastError", it's very likely that the call(i) whose
+ // error we're trying to capture was performed with MethodDescCallSite machinery that itself trashes
+ // the last error. We solve this by saving the last error in a special interpreter-specific field of
+ // "Thread" in that case, and essentially implement SetLastError here, taking that field as the
+ // source for the last error.
+ Thread* thrd = GetThread();
+ thrd->m_dwLastError = thrd->m_dwLastErrorInterp;
+ didIntrinsic = true;
+ }
+ }
+
+ if (didIntrinsic)
+ {
+ if (s_InterpreterUseCaching && !doNotCache)
+ {
+ // Cache the token resolution result...
+ pCscd = new CallSiteCacheData(methToCall, sigInfo);
+ CacheCallInfo(iloffset, pCscd);
+ }
+ // Now we can return.
+ return;
+ }
+
+ // Handle other simple special cases:
+
+#if FEATURE_INTERPRETER_DEADSIMPLE_OPT
+#ifndef DACCESS_COMPILE
+ // Dead simple static getters.
+ InterpreterMethodInfo* calleeInterpMethInfo;
+ if (GetMethodHandleToInterpMethInfoPtrMap()->Lookup(CORINFO_METHOD_HANDLE(methToCall), &calleeInterpMethInfo))
+ {
+ if (calleeInterpMethInfo->GetFlag<InterpreterMethodInfo::Flag_methIsDeadSimpleGetter>())
+ {
+ if (methToCall->IsStatic())
+ {
+ // TODO
+ }
+ else
+ {
+ ILOffsetToItemCache* calleeCache;
+ {
+ Object* thisArg = OpStackGet<Object*>(m_curStackHt-1);
+ GCX_FORBID();
+ // We pass NULL for the generic context arg, because a dead simple getter takes none, by definition.
+ calleeCache = calleeInterpMethInfo->GetCacheForCall(thisArg, /*genericsContextArg*/NULL);
+ }
+ // We've interpreted the getter at least once, so the cache for *some* generics context is populated -- but maybe not
+ // this one. We're hoping that it usually is.
+ if (calleeCache != NULL)
+ {
+ CachedItem cachedItem;
+ unsigned offsetOfLd;
+ if (calleeInterpMethInfo->GetFlag<InterpreterMethodInfo::Flag_methIsDeadSimpleGetterIsDbgForm>())
+ offsetOfLd = ILOffsetOfLdFldInDeadSimpleInstanceGetterOpt;
+ else
+ offsetOfLd = ILOffsetOfLdFldInDeadSimpleInstanceGetterOpt;
+
+ bool b = calleeCache->GetItem(offsetOfLd, cachedItem);
+ _ASSERTE_MSG(b, "If the cache exists for this generic context, it should an entry for the LdFld.");
+ _ASSERTE_MSG(cachedItem.m_tag == CIK_InstanceField, "If it's there, it should be an instance field cache.");
+ LdFld(cachedItem.m_value.m_instanceField);
+#if INTERP_TRACING
+ InterlockedIncrement(&s_totalInterpCallsToDeadSimpleGetters);
+ InterlockedIncrement(&s_totalInterpCallsToDeadSimpleGettersShortCircuited);
+#endif // INTERP_TRACING
+ return;
+ }
+ }
+ }
+ }
+#endif // DACCESS_COMPILE
+#endif // FEATURE_INTERPRETER_DEADSIMPLE_OPT
+
+ unsigned totalSigArgs;
+ CORINFO_VARARGS_HANDLE vaSigCookie = nullptr;
+ if ((sigInfo.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
+ (sigInfo.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
+ {
+ GCX_PREEMP();
+ CORINFO_SIG_INFO sig;
+ m_interpCeeInfo.findCallSiteSig(m_methInfo->m_module, methTokPtr->token, MAKE_METHODCONTEXT(m_methInfo->m_method), &sig);
+ sigInfo.retTypeClass = sig.retTypeClass;
+ sigInfo.numArgs = sig.numArgs;
+ sigInfo.callConv = sig.callConv;
+ sigInfo.retType = sig.retType;
+ // Adding 'this' pointer because, numArgs doesn't include the this pointer.
+ totalSigArgs = sigInfo.numArgs + sigInfo.hasThis();
+
+ if ((sigInfo.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
+ {
+ Module* module = GetModule(sig.scope);
+ vaSigCookie = CORINFO_VARARGS_HANDLE(module->GetVASigCookie(Signature(sig.pSig, sig.cbSig)));
+ }
+ doNotCache = true;
+ }
+ else
+ {
+ totalSigArgs = sigInfo.totalILArgs();
+ }
+
+ // Note that "totalNativeArgs()" includes space for ret buff arg.
+ unsigned nSlots = totalSigArgs + 1;
+ if (sigInfo.hasTypeArg()) nSlots++;
+ if (sigInfo.isVarArg()) nSlots++;
+
+ DelegateCtorArgs ctorData;
+ // If any of these are non-null, they will be pushed as extra arguments (see the code below).
+ ctorData.pArg3 = NULL;
+ ctorData.pArg4 = NULL;
+ ctorData.pArg5 = NULL;
+
+ // Since we make "doNotCache" true below, well never have a non-null "pCscd" for a delegate
+ // constructor. But we have to check for a cached method first, since callInfoPtr may be null in the cached case.
+ if (pCscd == NULL && callInfoPtr->classFlags & CORINFO_FLG_DELEGATE && callInfoPtr->methodFlags & CORINFO_FLG_CONSTRUCTOR)
+ {
+ // We won't cache this case.
+ doNotCache = true;
+
+ _ASSERTE_MSG(!sigInfo.hasTypeArg(), "I assume that this isn't possible.");
+ GCX_PREEMP();
+
+ ctorData.pMethod = methToCall;
+
+ // Second argument to delegate constructor will be code address of the function the delegate wraps.
+ assert(TOSIsPtr() && OpStackTypeGet(m_curStackHt-1).ToCorInfoType() != CORINFO_TYPE_BYREF);
+ CORINFO_METHOD_HANDLE targetMethodHnd = GetFunctionPointerStack()[m_curStackHt-1];
+ assert(targetMethodHnd != NULL);
+ CORINFO_METHOD_HANDLE alternateCtorHnd = m_interpCeeInfo.GetDelegateCtor(reinterpret_cast<CORINFO_METHOD_HANDLE>(methToCall), methTokPtr->hClass, targetMethodHnd, &ctorData);
+ MethodDesc* alternateCtor = reinterpret_cast<MethodDesc*>(alternateCtorHnd);
+ if (alternateCtor != methToCall)
+ {
+ methToCall = alternateCtor;
+
+ // Translate the method address argument from a method handle to the actual callable code address.
+ void* val = (void *)((MethodDesc *)targetMethodHnd)->GetMultiCallableAddrOfCode();
+ // Change the method argument to the code pointer.
+ OpStackSet<void*>(m_curStackHt-1, val);
+
+ // Now if there are extra arguments, add them to the number of slots; we'll push them on the
+ // arg list later.
+ if (ctorData.pArg3) nSlots++;
+ if (ctorData.pArg4) nSlots++;
+ if (ctorData.pArg5) nSlots++;
+ }
+ }
+
+ // Make sure that the operand stack has the required number of arguments.
+ // (Note that this is IL args, not native.)
+ //
+
+ // The total number of arguments on the IL stack. Initially we assume that all the IL arguments
+ // the callee expects are on the stack, but may be adjusted downwards if the "this" argument
+ // is provided by an allocation (the call is to a constructor).
+ unsigned totalArgsOnILStack = totalSigArgs;
+ if (m_callThisArg != NULL)
+ {
+ assert(totalArgsOnILStack > 0);
+ totalArgsOnILStack--;
+ }
+
+#if defined(FEATURE_HFA)
+ // Does the callee have an HFA return type?
+ unsigned HFAReturnArgSlots = 0;
+ {
+ GCX_PREEMP();
+
+ if (sigInfo.retType == CORINFO_TYPE_VALUECLASS
+ && CorInfoTypeIsFloatingPoint(m_interpCeeInfo.getHFAType(sigInfo.retTypeClass))
+ && (sigInfo.getCallConv() & CORINFO_CALLCONV_VARARG) == 0)
+ {
+ HFAReturnArgSlots = getClassSize(sigInfo.retTypeClass);
+ // Round up to a multiple of double size.
+ HFAReturnArgSlots = (HFAReturnArgSlots + sizeof(ARG_SLOT) - 1) / sizeof(ARG_SLOT);
+ }
+ }
+#endif
+
+ // Point B
+
+ const unsigned LOCAL_ARG_SLOTS = 8;
+ ARG_SLOT localArgs[LOCAL_ARG_SLOTS];
+ InterpreterType localArgTypes[LOCAL_ARG_SLOTS];
+
+ ARG_SLOT* args;
+ InterpreterType* argTypes;
+#if defined(_X86_)
+ unsigned totalArgSlots = nSlots;
+#elif defined(_ARM_) || defined(_ARM64_)
+ // ARM64TODO: Verify that the following statement is correct for ARM64.
+ unsigned totalArgSlots = nSlots + HFAReturnArgSlots;
+#elif defined(_AMD64_)
+ unsigned totalArgSlots = nSlots;
+#else
+#error "unsupported platform"
+#endif
+
+ if (totalArgSlots <= LOCAL_ARG_SLOTS)
+ {
+ args = &localArgs[0];
+ argTypes = &localArgTypes[0];
+ }
+ else
+ {
+ args = (ARG_SLOT*)_alloca(totalArgSlots * sizeof(ARG_SLOT));
+#if defined(_ARM_)
+ // The HFA return buffer, if any, is assumed to be at a negative
+ // offset from the IL arg pointer, so adjust that pointer upward.
+ args = args + HFAReturnArgSlots;
+#endif // defined(_ARM_)
+ argTypes = (InterpreterType*)_alloca(nSlots * sizeof(InterpreterType));
+ }
+ // Make sure that we don't scan any of these until we overwrite them with
+ // the real types of the arguments.
+ InterpreterType undefIt(CORINFO_TYPE_UNDEF);
+ for (unsigned i = 0; i < nSlots; i++) argTypes[i] = undefIt;
+
+ // GC-protect the argument array (as byrefs).
+ m_args = args; m_argsSize = nSlots; m_argTypes = argTypes;
+
+ // This is the index into the "args" array (where we copy the value to).
+ int curArgSlot = 0;
+
+ // The operand stack index of the first IL argument.
+ assert(m_curStackHt >= totalArgsOnILStack);
+ int argsBase = m_curStackHt - totalArgsOnILStack;
+
+ // Current on-stack argument index.
+ unsigned arg = 0;
+
+ // We do "this" -- in the case of a constructor, we "shuffle" the "m_callThisArg" argument in as the first
+ // argument -- it isn't on the IL operand stack.
+
+ if (m_constrainedFlag)
+ {
+ _ASSERT(m_callThisArg == NULL); // "m_callThisArg" non-null only for .ctor, which are not callvirts.
+
+ CorInfoType argCIT = OpStackTypeGet(argsBase + arg).ToCorInfoType();
+ if (argCIT != CORINFO_TYPE_BYREF)
+ VerificationError("This arg of constrained call must be managed pointer.");
+
+ // We only cache for the CORINFO_NO_THIS_TRANSFORM case, so we may assume that if we have a cached call site,
+ // there's no thisTransform to perform.
+ if (pCscd == NULL)
+ {
+ switch (callInfoPtr->thisTransform)
+ {
+ case CORINFO_NO_THIS_TRANSFORM:
+ // It is a constrained call on a method implemented by a value type; this is already the proper managed pointer.
+ break;
+
+ case CORINFO_DEREF_THIS:
+#ifdef _DEBUG
+ {
+ GCX_PREEMP();
+ DWORD clsAttribs = m_interpCeeInfo.getClassAttribs(m_constrainedResolvedToken.hClass);
+ assert((clsAttribs & CORINFO_FLG_VALUECLASS) == 0);
+ }
+#endif // _DEBUG
+ {
+ // As per the spec, dereference the byref to the "this" pointer, and substitute it as the new "this" pointer.
+ GCX_FORBID();
+ Object** objPtrPtr = OpStackGet<Object**>(argsBase + arg);
+ OpStackSet<Object*>(argsBase + arg, *objPtrPtr);
+ OpStackTypeSet(argsBase + arg, InterpreterType(CORINFO_TYPE_CLASS));
+ }
+ doNotCache = true;
+ break;
+
+ case CORINFO_BOX_THIS:
+ // This is the case where the call is to a virtual method of Object the given
+ // struct class does not override -- the struct must be boxed, so that the
+ // method can be invoked as a virtual.
+ BoxStructRefAt(argsBase + arg, m_constrainedResolvedToken.hClass);
+ doNotCache = true;
+ break;
+ }
+
+ exactClass = m_constrainedResolvedToken.hClass;
+ {
+ GCX_PREEMP();
+ DWORD exactClassAttribs = m_interpCeeInfo.getClassAttribs(exactClass);
+ // If the constraint type is a value class, then it is the exact class (which will be the
+ // "owner type" in the MDCS below.) If it is not, leave it as the (precise) interface method.
+ if (exactClassAttribs & CORINFO_FLG_VALUECLASS)
+ {
+ MethodTable* exactClassMT = GetMethodTableFromClsHnd(exactClass);
+ // Find the method on exactClass corresponding to methToCall.
+ methToCall = MethodDesc::FindOrCreateAssociatedMethodDesc(
+ reinterpret_cast<MethodDesc*>(callInfoPtr->hMethod), // pPrimaryMD
+ exactClassMT, // pExactMT
+ FALSE, // forceBoxedEntryPoint
+ methToCall->GetMethodInstantiation(), // methodInst
+ FALSE); // allowInstParam
+ }
+ else
+ {
+ exactClass = methTokPtr->hClass;
+ }
+ }
+ }
+
+ // We've consumed the constraint, so reset the flag.
+ m_constrainedFlag = false;
+ }
+
+ if (pCscd == NULL)
+ {
+ if (callInfoPtr->methodFlags & CORINFO_FLG_STATIC)
+ {
+ MethodDesc* pMD = reinterpret_cast<MethodDesc*>(callInfoPtr->hMethod);
+ EnsureClassInit(pMD->GetMethodTable());
+ }
+ }
+
+ // Point C
+
+ // We must do anything that might make a COOP->PREEMP transition before copying arguments out of the
+ // operand stack (where they are GC-protected) into the args array (where they are not).
+#ifdef _DEBUG
+ const char* clsOfMethToCallName;;
+ const char* methToCallName = NULL;
+ {
+ GCX_PREEMP();
+ methToCallName = m_interpCeeInfo.getMethodName(CORINFO_METHOD_HANDLE(methToCall), &clsOfMethToCallName);
+ }
+#if INTERP_TRACING
+ if (strncmp(methToCallName, "get_", 4) == 0)
+ {
+ InterlockedIncrement(&s_totalInterpCallsToGetters);
+ size_t offsetOfLd;
+ if (IsDeadSimpleGetter(&m_interpCeeInfo, methToCall, &offsetOfLd))
+ {
+ InterlockedIncrement(&s_totalInterpCallsToDeadSimpleGetters);
+ }
+ }
+ else if (strncmp(methToCallName, "set_", 4) == 0)
+ {
+ InterlockedIncrement(&s_totalInterpCallsToSetters);
+ }
+#endif // INTERP_TRACING
+
+ // Only do this check on the first call, since it should be the same each time.
+ if (pCscd == NULL)
+ {
+ // Ensure that any value types used as argument types are loaded. This property is checked
+ // by the MethodDescCall site mechanisms. Since enums are freely convertible with their underlying
+ // integer type, this is at least one case where a caller may push a value convertible to a value type
+ // without any code having caused the value type to be loaded. This is DEBUG-only because if the callee
+ // the integer-type value as the enum value type, it will have loaded the value type.
+ MetaSig ms(methToCall);
+ CorElementType argType;
+ while ((argType = ms.NextArg()) != ELEMENT_TYPE_END)
+ {
+ if (argType == ELEMENT_TYPE_VALUETYPE)
+ {
+ TypeHandle th = ms.GetLastTypeHandleThrowing(ClassLoader::LoadTypes);
+ CONSISTENCY_CHECK(th.CheckFullyLoaded());
+ CONSISTENCY_CHECK(th.IsRestored_NoLogging());
+ }
+ }
+ }
+#endif
+
+ // CYCLE PROFILE: BEFORE ARG PROCESSING.
+
+ if (sigInfo.hasThis())
+ {
+ if (m_callThisArg != NULL)
+ {
+ if (size_t(m_callThisArg) == 0x1)
+ {
+ args[curArgSlot] = NULL;
+ }
+ else
+ {
+ args[curArgSlot] = PtrToArgSlot(m_callThisArg);
+ }
+ argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_BYREF);
+ }
+ else
+ {
+ args[curArgSlot] = PtrToArgSlot(OpStackGet<void*>(argsBase + arg));
+ argTypes[curArgSlot] = OpStackTypeGet(argsBase + arg);
+ arg++;
+ }
+ // AV -> NullRef translation is NYI for the interpreter,
+ // so we should manually check and throw the correct exception.
+ if (args[curArgSlot] == NULL)
+ {
+ // If we're calling a constructor, we bypass this check since the runtime
+ // should have thrown OOM if it was unable to allocate an instance.
+ if (m_callThisArg == NULL)
+ {
+ assert(!methToCall->IsStatic());
+ ThrowNullPointerException();
+ }
+ // ...except in the case of strings, which are both
+ // allocated and initialized by their special constructor.
+ else
+ {
+ assert(methToCall->IsCtor() && methToCall->GetMethodTable()->IsString());
+ }
+ }
+ curArgSlot++;
+ }
+
+ // This is the argument slot that will be used to hold the return value.
+ ARG_SLOT retVal = 0;
+
+ // If the return type is a structure, then these will be initialized.
+ CORINFO_CLASS_HANDLE retTypeClsHnd = NULL;
+ InterpreterType retTypeIt;
+ size_t retTypeSz = 0;
+
+ // If non-null, space allocated to hold a large struct return value. Should be deleted later.
+ // (I could probably optimize this pop all the arguments first, then allocate space for the return value
+ // on the large structure operand stack, and pass a pointer directly to that space, avoiding the extra
+ // copy we have below. But this seemed more expedient, and this should be a pretty rare case.)
+ byte* pLargeStructRetVal = NULL;
+
+ // If there's a "GetFlag<Flag_hasRetBuffArg>()" struct return value, it will be stored in this variable if it fits,
+ // otherwise, we'll dynamically allocate memory for it.
+ ARG_SLOT smallStructRetVal = 0;
+
+ // We should have no return buffer temp space registered here...unless this is a constructor, in which
+ // case it will return void. In particular, if the return type VALUE_CLASS, then this should be NULL.
+ _ASSERTE_MSG((pCscd != NULL) || sigInfo.retType == CORINFO_TYPE_VOID || m_structRetValITPtr == NULL, "Invariant.");
+
+ // Is it the return value a struct with a ret buff?
+ _ASSERTE_MSG(methToCall != NULL, "assumption");
+ bool hasRetBuffArg = false;
+ if (sigInfo.retType == CORINFO_TYPE_VALUECLASS || sigInfo.retType == CORINFO_TYPE_REFANY)
+ {
+ hasRetBuffArg = !!methToCall->HasRetBuffArg();
+ retTypeClsHnd = sigInfo.retTypeClass;
+
+ MetaSig ms(methToCall);
+
+
+ // On ARM, if there's an HFA return type, we must also allocate a return buffer, since the
+ // MDCS calling convention requires it.
+ if (hasRetBuffArg
+#if defined(_ARM_)
+ || HFAReturnArgSlots > 0
+#endif // defined(_ARM_)
+ )
+ {
+ assert(retTypeClsHnd != NULL);
+ retTypeIt = InterpreterType(&m_interpCeeInfo, retTypeClsHnd);
+ retTypeSz = retTypeIt.Size(&m_interpCeeInfo);
+
+#if defined(_ARM_)
+ if (HFAReturnArgSlots > 0)
+ {
+ args[curArgSlot] = PtrToArgSlot(args - HFAReturnArgSlots);
+ }
+ else
+#endif // defined(_ARM_)
+
+ if (retTypeIt.IsLargeStruct(&m_interpCeeInfo))
+ {
+ size_t retBuffSize = retTypeSz;
+ // If the target architecture can sometimes return a struct in several registers,
+ // MethodDescCallSite will reserve a return value array big enough to hold the maximum.
+ // It will then copy *all* of this into the return buffer area we allocate. So make sure
+ // we allocate at least that much.
+#ifdef ENREGISTERED_RETURNTYPE_MAXSIZE
+ retBuffSize = max(retTypeSz, ENREGISTERED_RETURNTYPE_MAXSIZE);
+#endif // ENREGISTERED_RETURNTYPE_MAXSIZE
+ pLargeStructRetVal = (byte*)_alloca(retBuffSize);
+ // Clear this in case a GC happens.
+ for (unsigned i = 0; i < retTypeSz; i++) pLargeStructRetVal[i] = 0;
+ // Register this as location needing GC.
+ m_structRetValTempSpace = pLargeStructRetVal;
+ // Set it as the return buffer.
+ args[curArgSlot] = PtrToArgSlot(pLargeStructRetVal);
+ }
+ else
+ {
+ // Clear this in case a GC happens.
+ smallStructRetVal = 0;
+ // Register this as location needing GC.
+ m_structRetValTempSpace = &smallStructRetVal;
+ // Set it as the return buffer.
+ args[curArgSlot] = PtrToArgSlot(&smallStructRetVal);
+ }
+ m_structRetValITPtr = &retTypeIt;
+ argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
+ curArgSlot++;
+ }
+ else
+ {
+ // The struct type might "normalize" to a primitive type.
+ if (retTypeClsHnd == NULL)
+ {
+ retTypeIt = InterpreterType(CEEInfo::asCorInfoType(ms.GetReturnTypeNormalized()));
+ }
+ else
+ {
+ retTypeIt = InterpreterType(&m_interpCeeInfo, retTypeClsHnd);
+ }
+ }
+ }
+
+ if (((sigInfo.callConv & CORINFO_CALLCONV_VARARG) != 0) && sigInfo.isVarArg())
+ {
+ assert(vaSigCookie != nullptr);
+ args[curArgSlot] = PtrToArgSlot(vaSigCookie);
+ argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
+ curArgSlot++;
+ }
+
+ if (pCscd == NULL)
+ {
+ if (sigInfo.hasTypeArg())
+ {
+ GCX_PREEMP();
+ // We will find the instantiating stub for the method, and call that instead.
+ CORINFO_SIG_INFO sigInfoFull;
+ Instantiation methodInst = methToCall->GetMethodInstantiation();
+ BOOL fNeedUnboxingStub = virtualCall && TypeHandle(exactClass).IsValueType() && methToCall->IsVirtual();
+ methToCall = MethodDesc::FindOrCreateAssociatedMethodDesc(methToCall,
+ TypeHandle(exactClass).GetMethodTable(), fNeedUnboxingStub, methodInst, FALSE, TRUE);
+ m_interpCeeInfo.getMethodSig(CORINFO_METHOD_HANDLE(methToCall), &sigInfoFull);
+ sigInfo.retTypeClass = sigInfoFull.retTypeClass;
+ sigInfo.numArgs = sigInfoFull.numArgs;
+ sigInfo.callConv = sigInfoFull.callConv;
+ sigInfo.retType = sigInfoFull.retType;
+ }
+
+ if (sigInfo.hasTypeArg())
+ {
+ // If we still have a type argument, we're calling an ArrayOpStub and need to pass the array TypeHandle.
+ assert(methToCall->IsArray());
+ doNotCache = true;
+ args[curArgSlot] = PtrToArgSlot(exactClass);
+ argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
+ curArgSlot++;
+ }
+ }
+
+ // Now we do the non-this arguments.
+ size_t largeStructSpaceToPop = 0;
+ for (; arg < totalArgsOnILStack; arg++)
+ {
+ InterpreterType argIt = OpStackTypeGet(argsBase + arg);
+ size_t sz = OpStackTypeGet(argsBase + arg).Size(&m_interpCeeInfo);
+ switch (sz)
+ {
+ case 1:
+ args[curArgSlot] = OpStackGet<INT8>(argsBase + arg);
+ break;
+ case 2:
+ args[curArgSlot] = OpStackGet<INT16>(argsBase + arg);
+ break;
+ case 4:
+ args[curArgSlot] = OpStackGet<INT32>(argsBase + arg);
+ break;
+ case 8:
+ default:
+ if (sz > 8)
+ {
+ void* srcPtr = OpStackGet<void*>(argsBase + arg);
+ args[curArgSlot] = PtrToArgSlot(srcPtr);
+ if (!IsInLargeStructLocalArea(srcPtr))
+ largeStructSpaceToPop += sz;
+ }
+ else
+ {
+ args[curArgSlot] = OpStackGet<INT64>(argsBase + arg);
+ }
+ break;
+ }
+ argTypes[curArgSlot] = argIt;
+ curArgSlot++;
+ }
+
+ if (ctorData.pArg3)
+ {
+ args[curArgSlot] = PtrToArgSlot(ctorData.pArg3);
+ argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
+ curArgSlot++;
+ }
+ if (ctorData.pArg4)
+ {
+ args[curArgSlot] = PtrToArgSlot(ctorData.pArg4);
+ argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
+ curArgSlot++;
+ }
+ if (ctorData.pArg5)
+ {
+ args[curArgSlot] = PtrToArgSlot(ctorData.pArg5);
+ argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
+ curArgSlot++;
+ }
+
+ // CYCLE PROFILE: AFTER ARG PROCESSING.
+ {
+ Thread* thr = GetThread();
+
+ Object** thisArgHnd = NULL;
+ ARG_SLOT nullThisArg = NULL;
+ if (sigInfo.hasThis())
+ {
+ if (m_callThisArg != NULL)
+ {
+ if (size_t(m_callThisArg) == 0x1)
+ {
+ thisArgHnd = reinterpret_cast<Object**>(&nullThisArg);
+ }
+ else
+ {
+ thisArgHnd = reinterpret_cast<Object**>(&m_callThisArg);
+ }
+ }
+ else
+ {
+ thisArgHnd = OpStackGetAddr<Object*>(argsBase);
+ }
+ }
+
+ Frame* topFrameBefore = thr->GetFrame();
+
+#if INTERP_ILCYCLE_PROFILE
+ unsigned __int64 startCycles;
+#endif // INTERP_ILCYCLE_PROFILE
+
+ // CYCLE PROFILE: BEFORE MDCS CREATION.
+
+ PCODE target = NULL;
+ MethodDesc *exactMethToCall = methToCall;
+
+ // Determine the target of virtual calls.
+ if (virtualCall && methToCall->IsVtableMethod())
+ {
+ PCODE pCode;
+
+ assert(thisArgHnd != NULL);
+ OBJECTREF objRef = ObjectToOBJECTREF(*thisArgHnd);
+ GCPROTECT_BEGIN(objRef);
+ pCode = methToCall->GetMultiCallableAddrOfVirtualizedCode(&objRef, methToCall->GetMethodTable());
+ GCPROTECT_END();
+
+ exactMethToCall = Entry2MethodDesc(pCode, objRef->GetTrueMethodTable());
+ }
+
+ // Compile the target in advance of calling.
+ if (exactMethToCall->IsPointingToPrestub())
+ {
+ MethodTable* dispatchingMT = NULL;
+ if (exactMethToCall->IsVtableMethod())
+ {
+ assert(thisArgHnd != NULL);
+ dispatchingMT = (*thisArgHnd)->GetMethodTable();
+ }
+ GCX_PREEMP();
+ target = exactMethToCall->DoPrestub(dispatchingMT);
+ }
+ else
+ {
+ target = exactMethToCall->GetMethodEntryPoint();
+ }
+
+ // If we're interpreting the method, simply call it directly.
+ if (InterpretationStubToMethodInfo(target) == exactMethToCall)
+ {
+ assert(!exactMethToCall->IsILStub());
+ InterpreterMethodInfo* methInfo = MethodHandleToInterpreterMethInfoPtr(CORINFO_METHOD_HANDLE(exactMethToCall));
+ assert(methInfo != NULL);
+#if INTERP_ILCYCLE_PROFILE
+ bool b = CycleTimer::GetThreadCyclesS(&startCycles); assert(b);
+#endif // INTERP_ILCYCLE_PROFILE
+ retVal = InterpretMethodBody(methInfo, true, reinterpret_cast<BYTE*>(args), NULL);
+ pCscd = NULL; // Nothing to cache.
+ }
+ else
+ {
+ MetaSig msig(exactMethToCall);
+ // We've already resolved the virtual call target above, so there is no need to do it again.
+ MethodDescCallSite mdcs(exactMethToCall, &msig, target);
+#if INTERP_ILCYCLE_PROFILE
+ bool b = CycleTimer::GetThreadCyclesS(&startCycles); assert(b);
+#endif // INTERP_ILCYCLE_PROFILE
+ retVal = mdcs.CallTargetWorker(args);
+
+ if (pCscd != NULL)
+ {
+ // We will do a check at the end to determine whether to cache pCscd, to set
+ // to NULL here to make sure we don't.
+ pCscd = NULL;
+ }
+ else
+ {
+ // For now, we won't cache virtual calls to virtual methods.
+ // TODO: fix this somehow.
+ if (virtualCall && (callInfoPtr->methodFlags & CORINFO_FLG_VIRTUAL)) doNotCache = true;
+
+ if (s_InterpreterUseCaching && !doNotCache)
+ {
+ // We will add this to the cache later; the locking provokes a GC,
+ // and "retVal" is vulnerable.
+ pCscd = new CallSiteCacheData(exactMethToCall, sigInfo);
+ }
+ }
+ }
+#if INTERP_ILCYCLE_PROFILE
+ unsigned __int64 endCycles;
+ bool b = CycleTimer::GetThreadCyclesS(&endCycles); assert(b);
+ m_exemptCycles += (endCycles - startCycles);
+#endif // INTERP_ILCYCLE_PROFILE
+
+ // retVal is now vulnerable.
+ GCX_FORBID();
+
+ // Some managed methods, believe it or not, can push capital-F Frames on the Frame chain.
+ // The example I've found involves SecurityContextFrame.Push/Pop.
+ // If this happens, executing the EX_CATCH below will pop it, which is bad.
+ // So detect that case, pop the explicitly-pushed frame, and push it again after the EX_CATCH.
+ // (Asserting that there is only 1 such frame!)
+ if (thr->GetFrame() != topFrameBefore)
+ {
+ ilPushedFrame = thr->GetFrame();
+ if (ilPushedFrame != NULL)
+ {
+ ilPushedFrame->Pop(thr);
+ if (thr->GetFrame() != topFrameBefore)
+ {
+ // This wasn't an IL-pushed frame, so restore.
+ ilPushedFrame->Push(thr);
+ ilPushedFrame = NULL;
+ }
+ }
+ }
+ }
+
+ // retVal is still vulnerable.
+ {
+ GCX_FORBID();
+ m_argsSize = 0;
+
+ // At this point, the call has happened successfully. We can delete the arguments from the operand stack.
+ m_curStackHt -= totalArgsOnILStack;
+ // We've already checked that "largeStructSpaceToPop
+ LargeStructOperandStackPop(largeStructSpaceToPop, NULL);
+
+ if (size_t(m_callThisArg) == 0x1)
+ {
+ _ASSERTE_MSG(sigInfo.retType == CORINFO_TYPE_VOID, "Constructor for var-sized object becomes factory method that returns result.");
+ OpStackSet<Object*>(m_curStackHt, reinterpret_cast<Object*>(retVal));
+ OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_CLASS));
+ m_curStackHt++;
+ }
+ else if (sigInfo.retType != CORINFO_TYPE_VOID)
+ {
+ switch (sigInfo.retType)
+ {
+ case CORINFO_TYPE_BOOL:
+ case CORINFO_TYPE_BYTE:
+ OpStackSet<INT32>(m_curStackHt, static_cast<INT8>(retVal));
+ break;
+ case CORINFO_TYPE_UBYTE:
+ OpStackSet<UINT32>(m_curStackHt, static_cast<UINT8>(retVal));
+ break;
+ case CORINFO_TYPE_SHORT:
+ OpStackSet<INT32>(m_curStackHt, static_cast<INT16>(retVal));
+ break;
+ case CORINFO_TYPE_USHORT:
+ case CORINFO_TYPE_CHAR:
+ OpStackSet<UINT32>(m_curStackHt, static_cast<UINT16>(retVal));
+ break;
+ case CORINFO_TYPE_INT:
+ case CORINFO_TYPE_UINT:
+ case CORINFO_TYPE_FLOAT:
+ OpStackSet<INT32>(m_curStackHt, static_cast<INT32>(retVal));
+ break;
+ case CORINFO_TYPE_LONG:
+ case CORINFO_TYPE_ULONG:
+ case CORINFO_TYPE_DOUBLE:
+ OpStackSet<INT64>(m_curStackHt, static_cast<INT64>(retVal));
+ break;
+ case CORINFO_TYPE_NATIVEINT:
+ case CORINFO_TYPE_NATIVEUINT:
+ case CORINFO_TYPE_PTR:
+ OpStackSet<NativeInt>(m_curStackHt, static_cast<NativeInt>(retVal));
+ break;
+ case CORINFO_TYPE_CLASS:
+ OpStackSet<Object*>(m_curStackHt, reinterpret_cast<Object*>(retVal));
+ break;
+ case CORINFO_TYPE_BYREF:
+ OpStackSet<void*>(m_curStackHt, reinterpret_cast<void*>(retVal));
+ break;
+ case CORINFO_TYPE_VALUECLASS:
+ case CORINFO_TYPE_REFANY:
+ {
+ // We must be careful here to write the value, the type, and update the stack height in one
+ // sequence that has no COOP->PREEMP transitions in it, so no GC's happen until the value
+ // is protected by being fully "on" the operandStack.
+#if defined(_ARM_)
+ // Is the return type an HFA?
+ if (HFAReturnArgSlots > 0)
+ {
+ ARG_SLOT* hfaRetBuff = args - HFAReturnArgSlots;
+ if (retTypeIt.IsLargeStruct(&m_interpCeeInfo))
+ {
+ void* dst = LargeStructOperandStackPush(retTypeSz);
+ memcpy(dst, hfaRetBuff, retTypeSz);
+ OpStackSet<void*>(m_curStackHt, dst);
+ }
+ else
+ {
+ memcpy(OpStackGetAddr<UINT64>(m_curStackHt), hfaRetBuff, retTypeSz);
+ }
+ }
+ else
+#endif // defined(_ARM_)
+ if (pLargeStructRetVal != NULL)
+ {
+ assert(hasRetBuffArg);
+ void* dst = LargeStructOperandStackPush(retTypeSz);
+ CopyValueClassUnchecked(dst, pLargeStructRetVal, GetMethodTableFromClsHnd(retTypeClsHnd));
+ OpStackSet<void*>(m_curStackHt, dst);
+ }
+ else if (hasRetBuffArg)
+ {
+ OpStackSet<INT64>(m_curStackHt, GetSmallStructValue(&smallStructRetVal, retTypeSz));
+ }
+ else
+ {
+ OpStackSet<UINT64>(m_curStackHt, retVal);
+ }
+ // We already created this interpreter type, so use it.
+ OpStackTypeSet(m_curStackHt, retTypeIt.StackNormalize());
+ m_curStackHt++;
+
+
+ // In the value-class case, the call might have used a ret buff, which we would have registered for GC scanning.
+ // Make sure it's unregistered.
+ m_structRetValITPtr = NULL;
+ }
+ break;
+ default:
+ NYI_INTERP("Unhandled return type");
+ break;
+ }
+ _ASSERTE_MSG(m_structRetValITPtr == NULL, "Invariant.");
+
+ // The valueclass case is handled fully in the switch above.
+ if (sigInfo.retType != CORINFO_TYPE_VALUECLASS &&
+ sigInfo.retType != CORINFO_TYPE_REFANY)
+ {
+ OpStackTypeSet(m_curStackHt, InterpreterType(sigInfo.retType).StackNormalize());
+ m_curStackHt++;
+ }
+ }
+ }
+
+ // Originally, this assertion was in the ValueClass case above, but it does a COOP->PREEMP
+ // transition, and therefore causes a GC, and we're GCX_FORBIDden from doing a GC while retVal
+ // is vulnerable. So, for completeness, do it here.
+ assert(sigInfo.retType != CORINFO_TYPE_VALUECLASS || retTypeIt == InterpreterType(&m_interpCeeInfo, retTypeClsHnd));
+
+ // If we created a cached call site, cache it now (when it's safe to take a GC).
+ if (pCscd != NULL && !doNotCache)
+ {
+ CacheCallInfo(iloffset, pCscd);
+ }
+
+ m_callThisArg = NULL;
+
+ // If the call we just made pushed a Frame, we popped it above, so re-push it.
+ if (ilPushedFrame != NULL) ilPushedFrame->Push();
+}
+
+#include "metadata.h"
+
+void Interpreter::CallI()
+{
+#if INTERP_DYNAMIC_CONTRACTS
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+#else
+ // Dynamic contract occupies too much stack.
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+#endif
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_totalInterpCalls);
+#endif // INTERP_TRACING
+
+ unsigned tok = getU4LittleEndian(m_ILCodePtr + sizeof(byte));
+
+ CORINFO_SIG_INFO sigInfo;
+
+ {
+ GCX_PREEMP();
+ m_interpCeeInfo.findSig(m_methInfo->m_module, tok, GetPreciseGenericsContext(), &sigInfo);
+ }
+
+ // I'm assuming that a calli can't depend on the generics context, so the simple form of type
+ // context should suffice?
+ MethodDesc* pMD = reinterpret_cast<MethodDesc*>(m_methInfo->m_method);
+ SigTypeContext sigTypeCtxt(pMD);
+ MetaSig mSig(sigInfo.pSig, sigInfo.cbSig, GetModule(sigInfo.scope), &sigTypeCtxt);
+
+ unsigned totalSigArgs = sigInfo.totalILArgs();
+
+ // Note that "totalNativeArgs()" includes space for ret buff arg.
+ unsigned nSlots = totalSigArgs + 1;
+ if ((sigInfo.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
+ {
+ nSlots++;
+ }
+
+ // Make sure that the operand stack has the required number of arguments.
+ // (Note that this is IL args, not native.)
+ //
+
+ // The total number of arguments on the IL stack. Initially we assume that all the IL arguments
+ // the callee expects are on the stack, but may be adjusted downwards if the "this" argument
+ // is provided by an allocation (the call is to a constructor).
+ unsigned totalArgsOnILStack = totalSigArgs;
+
+ const unsigned LOCAL_ARG_SLOTS = 8;
+ ARG_SLOT localArgs[LOCAL_ARG_SLOTS];
+ InterpreterType localArgTypes[LOCAL_ARG_SLOTS];
+
+ ARG_SLOT* args;
+ InterpreterType* argTypes;
+ if (nSlots <= LOCAL_ARG_SLOTS)
+ {
+ args = &localArgs[0];
+ argTypes = &localArgTypes[0];
+ }
+ else
+ {
+ args = (ARG_SLOT*)_alloca(nSlots * sizeof(ARG_SLOT));
+ argTypes = (InterpreterType*)_alloca(nSlots * sizeof(InterpreterType));
+ }
+ // Make sure that we don't scan any of these until we overwrite them with
+ // the real types of the arguments.
+ InterpreterType undefIt(CORINFO_TYPE_UNDEF);
+ for (unsigned i = 0; i < nSlots; i++)
+ {
+ argTypes[i] = undefIt;
+ }
+
+ // GC-protect the argument array (as byrefs).
+ m_args = args;
+ m_argsSize = nSlots;
+ m_argTypes = argTypes;
+
+ // This is the index into the "args" array (where we copy the value to).
+ int curArgSlot = 0;
+
+ // The operand stack index of the first IL argument.
+ unsigned totalArgPositions = totalArgsOnILStack + 1; // + 1 for the ftn argument.
+ assert(m_curStackHt >= totalArgPositions);
+ int argsBase = m_curStackHt - totalArgPositions;
+
+ // Current on-stack argument index.
+ unsigned arg = 0;
+
+ if (sigInfo.hasThis())
+ {
+ args[curArgSlot] = PtrToArgSlot(OpStackGet<void*>(argsBase + arg));
+ argTypes[curArgSlot] = OpStackTypeGet(argsBase + arg);
+ // AV -> NullRef translation is NYI for the interpreter,
+ // so we should manually check and throw the correct exception.
+ ThrowOnInvalidPointer((void*)args[curArgSlot]);
+ arg++;
+ curArgSlot++;
+ }
+
+ // This is the argument slot that will be used to hold the return value.
+ ARG_SLOT retVal = 0;
+
+ // If the return type is a structure, then these will be initialized.
+ CORINFO_CLASS_HANDLE retTypeClsHnd = NULL;
+ InterpreterType retTypeIt;
+ size_t retTypeSz = 0;
+
+ // If non-null, space allocated to hold a large struct return value. Should be deleted later.
+ // (I could probably optimize this pop all the arguments first, then allocate space for the return value
+ // on the large structure operand stack, and pass a pointer directly to that space, avoiding the extra
+ // copy we have below. But this seemed more expedient, and this should be a pretty rare case.)
+ byte* pLargeStructRetVal = NULL;
+
+ // If there's a "GetFlag<Flag_hasRetBuffArg>()" struct return value, it will be stored in this variable if it fits,
+ // otherwise, we'll dynamically allocate memory for it.
+ ARG_SLOT smallStructRetVal = 0;
+
+ // We should have no return buffer temp space registered here...unless this is a constructor, in which
+ // case it will return void. In particular, if the return type VALUE_CLASS, then this should be NULL.
+ _ASSERTE_MSG(sigInfo.retType == CORINFO_TYPE_VOID || m_structRetValITPtr == NULL, "Invariant.");
+
+ // Is it the return value a struct with a ret buff?
+ bool hasRetBuffArg = false;
+ if (sigInfo.retType == CORINFO_TYPE_VALUECLASS)
+ {
+ retTypeClsHnd = sigInfo.retTypeClass;
+ retTypeIt = InterpreterType(&m_interpCeeInfo, retTypeClsHnd);
+ retTypeSz = retTypeIt.Size(&m_interpCeeInfo);
+#if defined(_AMD64_)
+ // TODO: Investigate why HasRetBuffArg can't be used. pMD is a hacked up MD for the
+ // calli because it belongs to the current method. Doing what the JIT does.
+ hasRetBuffArg = (retTypeSz > sizeof(void*)) || ((retTypeSz & (retTypeSz - 1)) != 0);
+#else
+ hasRetBuffArg = !!pMD->HasRetBuffArg();
+#endif
+ if (hasRetBuffArg)
+ {
+ if (retTypeIt.IsLargeStruct(&m_interpCeeInfo))
+ {
+ size_t retBuffSize = retTypeSz;
+ // If the target architecture can sometimes return a struct in several registers,
+ // MethodDescCallSite will reserve a return value array big enough to hold the maximum.
+ // It will then copy *all* of this into the return buffer area we allocate. So make sure
+ // we allocate at least that much.
+#ifdef ENREGISTERED_RETURNTYPE_MAXSIZE
+ retBuffSize = max(retTypeSz, ENREGISTERED_RETURNTYPE_MAXSIZE);
+#endif // ENREGISTERED_RETURNTYPE_MAXSIZE
+ pLargeStructRetVal = (byte*)_alloca(retBuffSize);
+
+ // Clear this in case a GC happens.
+ for (unsigned i = 0; i < retTypeSz; i++)
+ {
+ pLargeStructRetVal[i] = 0;
+ }
+
+ // Register this as location needing GC.
+ m_structRetValTempSpace = pLargeStructRetVal;
+
+ // Set it as the return buffer.
+ args[curArgSlot] = PtrToArgSlot(pLargeStructRetVal);
+ }
+ else
+ {
+ // Clear this in case a GC happens.
+ smallStructRetVal = 0;
+
+ // Register this as location needing GC.
+ m_structRetValTempSpace = &smallStructRetVal;
+
+ // Set it as the return buffer.
+ args[curArgSlot] = PtrToArgSlot(&smallStructRetVal);
+ }
+ m_structRetValITPtr = &retTypeIt;
+ argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
+ curArgSlot++;
+ }
+ }
+
+ if ((sigInfo.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
+ {
+ Module* module = GetModule(sigInfo.scope);
+ CORINFO_VARARGS_HANDLE handle = CORINFO_VARARGS_HANDLE(module->GetVASigCookie(Signature(sigInfo.pSig, sigInfo.cbSig)));
+ args[curArgSlot] = PtrToArgSlot(handle);
+ argTypes[curArgSlot] = InterpreterType(CORINFO_TYPE_NATIVEINT);
+ curArgSlot++;
+ }
+
+ // Now we do the non-this arguments.
+ size_t largeStructSpaceToPop = 0;
+ for (; arg < totalArgsOnILStack; arg++)
+ {
+ InterpreterType argIt = OpStackTypeGet(argsBase + arg);
+ size_t sz = OpStackTypeGet(argsBase + arg).Size(&m_interpCeeInfo);
+ switch (sz)
+ {
+ case 1:
+ args[curArgSlot] = OpStackGet<INT8>(argsBase + arg);
+ break;
+ case 2:
+ args[curArgSlot] = OpStackGet<INT16>(argsBase + arg);
+ break;
+ case 4:
+ args[curArgSlot] = OpStackGet<INT32>(argsBase + arg);
+ break;
+ case 8:
+ default:
+ if (sz > 8)
+ {
+ void* srcPtr = OpStackGet<void*>(argsBase + arg);
+ args[curArgSlot] = PtrToArgSlot(srcPtr);
+ if (!IsInLargeStructLocalArea(srcPtr))
+ {
+ largeStructSpaceToPop += sz;
+ }
+ }
+ else
+ {
+ args[curArgSlot] = OpStackGet<INT64>(argsBase + arg);
+ }
+ break;
+ }
+ argTypes[curArgSlot] = argIt;
+ curArgSlot++;
+ }
+
+ // Finally, we get the code pointer.
+ unsigned ftnInd = m_curStackHt - 1;
+#ifdef _DEBUG
+ CorInfoType ftnType = OpStackTypeGet(ftnInd).ToCorInfoType();
+ assert(ftnType == CORINFO_TYPE_NATIVEINT
+ || ftnType == CORINFO_TYPE_INT
+ || ftnType == CORINFO_TYPE_LONG);
+#endif // DEBUG
+
+ PCODE ftnPtr = OpStackGet<PCODE>(ftnInd);
+
+ {
+ MethodDesc* methToCall;
+ // If we're interpreting the target, simply call it directly.
+ if ((methToCall = InterpretationStubToMethodInfo((PCODE)ftnPtr)) != NULL)
+ {
+ InterpreterMethodInfo* methInfo = MethodHandleToInterpreterMethInfoPtr(CORINFO_METHOD_HANDLE(methToCall));
+ assert(methInfo != NULL);
+#if INTERP_ILCYCLE_PROFILE
+ bool b = CycleTimer::GetThreadCyclesS(&startCycles); assert(b);
+#endif // INTERP_ILCYCLE_PROFILE
+ retVal = InterpretMethodBody(methInfo, true, reinterpret_cast<BYTE*>(args), NULL);
+ }
+ else
+ {
+ // This is not a great workaround. For the most part, we really don't care what method desc we're using, since
+ // we're providing the signature and function pointer -- other than that it's well-formed and "activated."
+ // And also, one more thing: whether it is static or not. Which is actually determined by the signature.
+ // So we query the signature we have to determine whether we need a static or instance MethodDesc, and then
+ // use one of the appropriate staticness that happens to be sitting around in global variables. For static
+ // we use "RuntimeHelpers.PrepareConstrainedRegions", for instance we use the default constructor of "Object."
+ // TODO: make this cleaner -- maybe invent a couple of empty methods with instructive names, just for this purpose.
+ MethodDesc* pMD;
+ if (mSig.HasThis())
+ {
+ pMD = g_pObjectCtorMD;
+ }
+ else
+ {
+ pMD = g_pPrepareConstrainedRegionsMethod; // A random static method.
+ }
+ MethodDescCallSite mdcs(pMD, &mSig, ftnPtr);
+ // If the current method being interpreted is an IL stub, we're calling native code, so
+ // change the GC mode. (We'll only do this at the call if the calling convention turns out
+ // to be a managed calling convention.)
+ MethodDesc* pStubContextMD = reinterpret_cast<MethodDesc*>(m_stubContext);
+ bool transitionToPreemptive = (pStubContextMD != NULL && !pStubContextMD->IsIL());
+ retVal = mdcs.CallTargetWorker(args, transitionToPreemptive);
+ }
+ // retVal is now vulnerable.
+ GCX_FORBID();
+ }
+
+ // retVal is still vulnerable.
+ {
+ GCX_FORBID();
+ m_argsSize = 0;
+
+ // At this point, the call has happened successfully. We can delete the arguments from the operand stack.
+ m_curStackHt -= totalArgPositions;
+
+ // We've already checked that "largeStructSpaceToPop
+ LargeStructOperandStackPop(largeStructSpaceToPop, NULL);
+
+ if (size_t(m_callThisArg) == 0x1)
+ {
+ _ASSERTE_MSG(sigInfo.retType == CORINFO_TYPE_VOID, "Constructor for var-sized object becomes factory method that returns result.");
+ OpStackSet<Object*>(m_curStackHt, reinterpret_cast<Object*>(retVal));
+ OpStackTypeSet(m_curStackHt, InterpreterType(CORINFO_TYPE_CLASS));
+ m_curStackHt++;
+ }
+ else if (sigInfo.retType != CORINFO_TYPE_VOID)
+ {
+ switch (sigInfo.retType)
+ {
+ case CORINFO_TYPE_BOOL:
+ case CORINFO_TYPE_BYTE:
+ OpStackSet<INT32>(m_curStackHt, static_cast<INT8>(retVal));
+ break;
+ case CORINFO_TYPE_UBYTE:
+ OpStackSet<UINT32>(m_curStackHt, static_cast<UINT8>(retVal));
+ break;
+ case CORINFO_TYPE_SHORT:
+ OpStackSet<INT32>(m_curStackHt, static_cast<INT16>(retVal));
+ break;
+ case CORINFO_TYPE_USHORT:
+ case CORINFO_TYPE_CHAR:
+ OpStackSet<UINT32>(m_curStackHt, static_cast<UINT16>(retVal));
+ break;
+ case CORINFO_TYPE_INT:
+ case CORINFO_TYPE_UINT:
+ case CORINFO_TYPE_FLOAT:
+ OpStackSet<INT32>(m_curStackHt, static_cast<INT32>(retVal));
+ break;
+ case CORINFO_TYPE_LONG:
+ case CORINFO_TYPE_ULONG:
+ case CORINFO_TYPE_DOUBLE:
+ OpStackSet<INT64>(m_curStackHt, static_cast<INT64>(retVal));
+ break;
+ case CORINFO_TYPE_NATIVEINT:
+ case CORINFO_TYPE_NATIVEUINT:
+ case CORINFO_TYPE_PTR:
+ OpStackSet<NativeInt>(m_curStackHt, static_cast<NativeInt>(retVal));
+ break;
+ case CORINFO_TYPE_CLASS:
+ OpStackSet<Object*>(m_curStackHt, reinterpret_cast<Object*>(retVal));
+ break;
+ case CORINFO_TYPE_VALUECLASS:
+ {
+ // We must be careful here to write the value, the type, and update the stack height in one
+ // sequence that has no COOP->PREEMP transitions in it, so no GC's happen until the value
+ // is protected by being fully "on" the operandStack.
+ if (pLargeStructRetVal != NULL)
+ {
+ assert(hasRetBuffArg);
+ void* dst = LargeStructOperandStackPush(retTypeSz);
+ CopyValueClassUnchecked(dst, pLargeStructRetVal, GetMethodTableFromClsHnd(retTypeClsHnd));
+ OpStackSet<void*>(m_curStackHt, dst);
+ }
+ else if (hasRetBuffArg)
+ {
+ OpStackSet<INT64>(m_curStackHt, GetSmallStructValue(&smallStructRetVal, retTypeSz));
+ }
+ else
+ {
+ OpStackSet<UINT64>(m_curStackHt, retVal);
+ }
+ // We already created this interpreter type, so use it.
+ OpStackTypeSet(m_curStackHt, retTypeIt.StackNormalize());
+ m_curStackHt++;
+
+ // In the value-class case, the call might have used a ret buff, which we would have registered for GC scanning.
+ // Make sure it's unregistered.
+ m_structRetValITPtr = NULL;
+ }
+ break;
+ default:
+ NYI_INTERP("Unhandled return type");
+ break;
+ }
+ _ASSERTE_MSG(m_structRetValITPtr == NULL, "Invariant.");
+
+ // The valueclass case is handled fully in the switch above.
+ if (sigInfo.retType != CORINFO_TYPE_VALUECLASS)
+ {
+ OpStackTypeSet(m_curStackHt, InterpreterType(sigInfo.retType).StackNormalize());
+ m_curStackHt++;
+ }
+ }
+ }
+
+ // Originally, this assertion was in the ValueClass case above, but it does a COOP->PREEMP
+ // transition, and therefore causes a GC, and we're GCX_FORBIDden from doing a GC while retVal
+ // is vulnerable. So, for completeness, do it here.
+ assert(sigInfo.retType != CORINFO_TYPE_VALUECLASS || retTypeIt == InterpreterType(&m_interpCeeInfo, retTypeClsHnd));
+
+ m_ILCodePtr += 5;
+}
+
+// static
+bool Interpreter::IsDeadSimpleGetter(CEEInfo* info, MethodDesc* pMD, size_t* offsetOfLd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ DWORD flags = pMD->GetAttrs();
+ CORINFO_METHOD_INFO methInfo;
+ {
+ GCX_PREEMP();
+ bool b = info->getMethodInfo(CORINFO_METHOD_HANDLE(pMD), &methInfo);
+ if (!b) return false;
+ }
+
+ // If the method takes a generic type argument, it's not dead simple...
+ if (methInfo.args.callConv & CORINFO_CALLCONV_PARAMTYPE) return false;
+
+ BYTE* codePtr = methInfo.ILCode;
+
+ if (flags & CORINFO_FLG_STATIC)
+ {
+ if (methInfo.ILCodeSize != 6)
+ return false;
+ if (*codePtr != CEE_LDSFLD)
+ return false;
+ assert(ILOffsetOfLdSFldInDeadSimpleStaticGetter == 0);
+ *offsetOfLd = 0;
+ codePtr += 5;
+ return (*codePtr == CEE_RET);
+ }
+ else
+ {
+ // We handle two forms, one for DBG IL, and one for OPT IL.
+ bool dbg = false;
+ if (methInfo.ILCodeSize == 0xc)
+ dbg = true;
+ else if (methInfo.ILCodeSize != 7)
+ return false;
+
+ if (dbg)
+ {
+ if (*codePtr != CEE_NOP)
+ return false;
+ codePtr += 1;
+ }
+ if (*codePtr != CEE_LDARG_0)
+ return false;
+ codePtr += 1;
+ if (*codePtr != CEE_LDFLD)
+ return false;
+ *offsetOfLd = codePtr - methInfo.ILCode;
+ assert((dbg && ILOffsetOfLdFldInDeadSimpleInstanceGetterDbg == *offsetOfLd)
+ || (!dbg && ILOffsetOfLdFldInDeadSimpleInstanceGetterOpt == *offsetOfLd));
+ codePtr += 5;
+ if (dbg)
+ {
+ if (*codePtr != CEE_STLOC_0)
+ return false;
+ codePtr += 1;
+ if (*codePtr != CEE_BR)
+ return false;
+ if (getU4LittleEndian(codePtr + 1) != 0)
+ return false;
+ codePtr += 5;
+ if (*codePtr != CEE_LDLOC_0)
+ return false;
+ }
+ return (*codePtr == CEE_RET);
+ }
+}
+
+void Interpreter::DoStringLength()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt > 0);
+ unsigned ind = m_curStackHt - 1;
+
+#ifdef _DEBUG
+ CorInfoType stringCIT = OpStackTypeGet(ind).ToCorInfoType();
+ if (stringCIT != CORINFO_TYPE_CLASS)
+ {
+ VerificationError("StringLength called on non-string.");
+ }
+#endif // _DEBUG
+
+ Object* obj = OpStackGet<Object*>(ind);
+
+#ifdef _DEBUG
+ if (obj->GetMethodTable() != g_pStringClass)
+ {
+ VerificationError("StringLength called on non-string.");
+ }
+#endif // _DEBUG
+
+ StringObject* str = reinterpret_cast<StringObject*>(obj);
+ INT32 len = str->GetStringLength();
+ OpStackSet<INT32>(ind, len);
+ OpStackTypeSet(ind, InterpreterType(CORINFO_TYPE_INT));
+}
+
+void Interpreter::DoStringGetChar()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 2);
+ unsigned strInd = m_curStackHt - 2;
+ unsigned indexInd = strInd + 1;
+
+#ifdef _DEBUG
+ CorInfoType stringCIT = OpStackTypeGet(strInd).ToCorInfoType();
+ if (stringCIT != CORINFO_TYPE_CLASS)
+ {
+ VerificationError("StringGetChar called on non-string.");
+ }
+#endif // _DEBUG
+
+ Object* obj = OpStackGet<Object*>(strInd);
+
+#ifdef _DEBUG
+ if (obj->GetMethodTable() != g_pStringClass)
+ {
+ VerificationError("StringGetChar called on non-string.");
+ }
+#endif // _DEBUG
+
+ StringObject* str = reinterpret_cast<StringObject*>(obj);
+
+#ifdef _DEBUG
+ CorInfoType indexCIT = OpStackTypeGet(indexInd).ToCorInfoType();
+ if (indexCIT != CORINFO_TYPE_INT)
+ {
+ VerificationError("StringGetChar needs integer index.");
+ }
+#endif // _DEBUG
+
+ INT32 ind = OpStackGet<INT32>(indexInd);
+ if (ind < 0)
+ ThrowArrayBoundsException();
+ UINT32 uind = static_cast<UINT32>(ind);
+ if (uind >= str->GetStringLength())
+ ThrowArrayBoundsException();
+
+ // Otherwise...
+ GCX_FORBID(); // str is vulnerable.
+ UINT16* dataPtr = reinterpret_cast<UINT16*>(reinterpret_cast<INT8*>(str) + StringObject::GetBufferOffset());
+ UINT32 filledChar = dataPtr[ind];
+ OpStackSet<UINT32>(strInd, filledChar);
+ OpStackTypeSet(strInd, InterpreterType(CORINFO_TYPE_INT));
+ m_curStackHt = indexInd;
+}
+
+void Interpreter::DoGetTypeFromHandle()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt > 0);
+ unsigned ind = m_curStackHt - 1;
+
+#ifdef _DEBUG
+ CorInfoType handleCIT = OpStackTypeGet(ind).ToCorInfoType();
+ if (handleCIT != CORINFO_TYPE_VALUECLASS && handleCIT != CORINFO_TYPE_CLASS)
+ {
+ VerificationError("HandleGetTypeFromHandle called on non-RuntimeTypeHandle/non-RuntimeType.");
+ }
+ Object* obj = OpStackGet<Object*>(ind);
+ if (obj->GetMethodTable() != g_pRuntimeTypeClass)
+ {
+ VerificationError("HandleGetTypeFromHandle called on non-RuntimeTypeHandle/non-RuntimeType.");
+ }
+#endif // _DEBUG
+
+ OpStackTypeSet(ind, InterpreterType(CORINFO_TYPE_CLASS));
+}
+
+void Interpreter::RecordConstrainedCall()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_Constrained]);
+#endif // INTERP_TRACING
+
+ {
+ GCX_PREEMP();
+ ResolveToken(&m_constrainedResolvedToken, getU4LittleEndian(m_ILCodePtr + 2), CORINFO_TOKENKIND_Constrained InterpTracingArg(RTK_Constrained));
+ }
+
+ m_constrainedFlag = true;
+
+ m_ILCodePtr += 6;
+}
+
+void Interpreter::LargeStructOperandStackEnsureCanPush(size_t sz)
+{
+ size_t remaining = m_largeStructOperandStackAllocSize - m_largeStructOperandStackHt;
+ if (remaining < sz)
+ {
+ size_t newAllocSize = max(m_largeStructOperandStackAllocSize + sz * 4, m_largeStructOperandStackAllocSize * 2);
+ BYTE* newStack = new BYTE[newAllocSize];
+ m_largeStructOperandStackAllocSize = newAllocSize;
+ if (m_largeStructOperandStack != NULL)
+ {
+ memcpy(newStack, m_largeStructOperandStack, m_largeStructOperandStackHt);
+ delete[] m_largeStructOperandStack;
+ }
+ m_largeStructOperandStack = newStack;
+ }
+}
+
+void* Interpreter::LargeStructOperandStackPush(size_t sz)
+{
+ LargeStructOperandStackEnsureCanPush(sz);
+ assert(m_largeStructOperandStackAllocSize >= m_largeStructOperandStackHt + sz);
+ void* res = &m_largeStructOperandStack[m_largeStructOperandStackHt];
+ m_largeStructOperandStackHt += sz;
+ return res;
+}
+
+void Interpreter::LargeStructOperandStackPop(size_t sz, void* fromAddr)
+{
+ if (!IsInLargeStructLocalArea(fromAddr))
+ {
+ assert(m_largeStructOperandStackHt >= sz);
+ m_largeStructOperandStackHt -= sz;
+ }
+}
+
+#ifdef _DEBUG
+bool Interpreter::LargeStructStackHeightIsValid()
+{
+ size_t sz2 = 0;
+ for (unsigned k = 0; k < m_curStackHt; k++)
+ {
+ if (OpStackTypeGet(k).IsLargeStruct(&m_interpCeeInfo) && !IsInLargeStructLocalArea(OpStackGet<void*>(k)))
+ {
+ sz2 += OpStackTypeGet(k).Size(&m_interpCeeInfo);
+ }
+ }
+ assert(sz2 == m_largeStructOperandStackHt);
+ return sz2 == m_largeStructOperandStackHt;
+}
+#endif // _DEBUG
+
+void Interpreter::VerificationError(const char* msg)
+{
+ // TODO: Should raise an exception eventually; for now:
+ const char* const msgPrefix = "Verification Error: ";
+ size_t len = strlen(msgPrefix) + strlen(msg) + 1;
+ char* msgFinal = (char*)_alloca(len);
+ strcpy_s(msgFinal, len, msgPrefix);
+ strcat_s(msgFinal, len, msg);
+ _ASSERTE_MSG(false, msgFinal);
+}
+
+void Interpreter::ThrowDivideByZero()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ COMPlusThrow(kDivideByZeroException);
+}
+
+void Interpreter::ThrowSysArithException()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ // According to the ECMA spec, this should be an ArithmeticException; however,
+ // the JITs throw an OverflowException and consistency is top priority...
+ COMPlusThrow(kOverflowException);
+}
+
+void Interpreter::ThrowNullPointerException()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ COMPlusThrow(kNullReferenceException);
+}
+
+void Interpreter::ThrowOverflowException()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ COMPlusThrow(kOverflowException);
+}
+
+void Interpreter::ThrowArrayBoundsException()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ COMPlusThrow(kIndexOutOfRangeException);
+}
+
+void Interpreter::ThrowInvalidCastException()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ COMPlusThrow(kInvalidCastException);
+}
+
+void Interpreter::ThrowStackOverflow()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ COMPlusThrow(kStackOverflowException);
+}
+
+float Interpreter::RemFunc(float v1, float v2)
+{
+ return fmodf(v1, v2);
+}
+
+double Interpreter::RemFunc(double v1, double v2)
+{
+ return fmod(v1, v2);
+}
+
+// Static members and methods.
+Interpreter::AddrToMDMap* Interpreter::s_addrToMDMap = NULL;
+
+unsigned Interpreter::s_interpreterStubNum = 0;
+
+// TODO: contracts and synchronization for the AddrToMDMap methods.
+// Requires caller to hold "s_interpStubToMDMapLock".
+Interpreter::AddrToMDMap* Interpreter::GetAddrToMdMap()
+{
+#if 0
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+#endif
+
+ if (s_addrToMDMap == NULL)
+ {
+ s_addrToMDMap = new AddrToMDMap(/* use default allocator */ NULL);
+ }
+ return s_addrToMDMap;
+}
+
+void Interpreter::RecordInterpreterStubForMethodDesc(CORINFO_METHOD_HANDLE md, void* addr)
+{
+#if 0
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+#endif
+
+ CrstHolder ch(&s_interpStubToMDMapLock);
+
+ AddrToMDMap* map = Interpreter::GetAddrToMdMap();
+#ifdef _DEBUG
+ CORINFO_METHOD_HANDLE dummy;
+ assert(!map->Lookup(addr, &dummy));
+#endif // DEBUG
+ map->Set(addr, md);
+}
+
+MethodDesc* Interpreter::InterpretationStubToMethodInfo(PCODE addr)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+
+ // This query function will never allocate the table...
+ if (s_addrToMDMap == NULL)
+ return NULL;
+
+ // Otherwise...if we observe s_addrToMdMap non-null, the lock below must be initialized.
+ // CrstHolder ch(&s_interpStubToMDMapLock);
+
+ AddrToMDMap* map = Interpreter::GetAddrToMdMap();
+ CORINFO_METHOD_HANDLE result = NULL;
+ (void)map->Lookup((void*)addr, &result);
+ return (MethodDesc*)result;
+}
+
+Interpreter::MethodHandleToInterpMethInfoPtrMap* Interpreter::s_methodHandleToInterpMethInfoPtrMap = NULL;
+
+// Requires caller to hold "s_interpStubToMDMapLock".
+Interpreter::MethodHandleToInterpMethInfoPtrMap* Interpreter::GetMethodHandleToInterpMethInfoPtrMap()
+{
+#if 0
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+#endif
+
+ if (s_methodHandleToInterpMethInfoPtrMap == NULL)
+ {
+ s_methodHandleToInterpMethInfoPtrMap = new MethodHandleToInterpMethInfoPtrMap(/* use default allocator */ NULL);
+ }
+ return s_methodHandleToInterpMethInfoPtrMap;
+}
+
+InterpreterMethodInfo* Interpreter::RecordInterpreterMethodInfoForMethodHandle(CORINFO_METHOD_HANDLE md, InterpreterMethodInfo* methInfo)
+{
+#if 0
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+#endif
+
+ CrstHolder ch(&s_interpStubToMDMapLock);
+
+ MethodHandleToInterpMethInfoPtrMap* map = Interpreter::GetMethodHandleToInterpMethInfoPtrMap();
+
+ MethInfo mi;
+ if (map->Lookup(md, &mi))
+ {
+ // If there's already an entry, make sure it was created by another thread -- the same thread shouldn't create two
+ // of these.
+ _ASSERTE_MSG(mi.m_thread != GetThread(), "Two InterpMethInfo's for same meth by same thread.");
+ // If we were creating an interpreter stub at the same time as another thread, and we lost the race to
+ // insert it, use the already-existing one, and delete this one.
+ delete methInfo;
+ return mi.m_info;
+ }
+
+ mi.m_info = methInfo;
+#ifdef _DEBUG
+ mi.m_thread = GetThread();
+#endif
+
+ bool b = map->Set(md, mi);
+ _ASSERTE_MSG(!b, "Multiple InterpMethInfos for method desc.");
+ return methInfo;
+}
+
+InterpreterMethodInfo* Interpreter::MethodHandleToInterpreterMethInfoPtr(CORINFO_METHOD_HANDLE md)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ // This query function will never allocate the table...
+ if (s_methodHandleToInterpMethInfoPtrMap == NULL)
+ return NULL;
+
+ // Otherwise...if we observe s_addrToMdMap non-null, the lock below must be initialized.
+ CrstHolder ch(&s_interpStubToMDMapLock);
+
+ MethodHandleToInterpMethInfoPtrMap* map = Interpreter::GetMethodHandleToInterpMethInfoPtrMap();
+
+ MethInfo mi;
+ mi.m_info = NULL;
+ (void)map->Lookup(md, &mi);
+ return mi.m_info;
+}
+
+
+#ifndef DACCESS_COMPILE
+
+// Requires that the current thread holds "s_methodCacheLock."
+ILOffsetToItemCache* InterpreterMethodInfo::GetCacheForCall(Object* thisArg, void* genericsCtxtArg, bool alloc)
+{
+ // First, does the current method have dynamic generic information, and, if so,
+ // what kind?
+ CORINFO_CONTEXT_HANDLE context = GetPreciseGenericsContext(thisArg, genericsCtxtArg);
+ if (context == MAKE_METHODCONTEXT(m_method))
+ {
+ // No dynamic generics context information. The caching field in "m_methInfo" is the
+ // ILoffset->Item cache directly.
+ // First, ensure that it's allocated.
+ if (m_methodCache == NULL && alloc)
+ {
+ // Lazy init via compare-exchange.
+ ILOffsetToItemCache* cache = new ILOffsetToItemCache();
+ void* prev = InterlockedCompareExchangeT<void*>(&m_methodCache, cache, NULL);
+ if (prev != NULL) delete cache;
+ }
+ return reinterpret_cast<ILOffsetToItemCache*>(m_methodCache);
+ }
+ else
+ {
+ // Otherwise, it does have generic info, so find the right cache.
+ // First ensure that the top-level generics-context --> cache cache exists.
+ GenericContextToInnerCache* outerCache = reinterpret_cast<GenericContextToInnerCache*>(m_methodCache);
+ if (outerCache == NULL)
+ {
+ if (alloc)
+ {
+ // Lazy init via compare-exchange.
+ outerCache = new GenericContextToInnerCache();
+ void* prev = InterlockedCompareExchangeT<void*>(&m_methodCache, outerCache, NULL);
+ if (prev != NULL)
+ {
+ delete outerCache;
+ outerCache = reinterpret_cast<GenericContextToInnerCache*>(prev);
+ }
+ }
+ else
+ {
+ return NULL;
+ }
+ }
+ // Does the outerCache already have an entry for this instantiation?
+ ILOffsetToItemCache* innerCache = NULL;
+ if (!outerCache->GetItem(size_t(context), innerCache) && alloc)
+ {
+ innerCache = new ILOffsetToItemCache();
+ outerCache->AddItem(size_t(context), innerCache);
+ }
+ return innerCache;
+ }
+}
+
+void Interpreter::CacheCallInfo(unsigned iloffset, CallSiteCacheData* callInfo)
+{
+ CrstHolder ch(&s_methodCacheLock);
+
+ ILOffsetToItemCache* cache = GetThisExecCache(true);
+ // Insert, but if the item is already there, delete "mdcs" (which would have been owned
+ // by the cache).
+ // (Duplicate entries can happen because of recursive calls -- F makes a recursive call to F, and when it
+ // returns wants to cache it, but the recursive call makes a furher recursive call, and caches that, so the
+ // first call finds the iloffset already occupied.)
+ if (!cache->AddItem(iloffset, CachedItem(callInfo)))
+ {
+ delete callInfo;
+ }
+}
+
+CallSiteCacheData* Interpreter::GetCachedCallInfo(unsigned iloffset)
+{
+ CrstHolder ch(&s_methodCacheLock);
+
+ ILOffsetToItemCache* cache = GetThisExecCache(false);
+ if (cache == NULL) return NULL;
+ // Otherwise...
+ CachedItem item;
+ if (cache->GetItem(iloffset, item))
+ {
+ _ASSERTE_MSG(item.m_tag == CIK_CallSite, "Wrong cached item tag.");
+ return item.m_value.m_callSiteInfo;
+ }
+ else
+ {
+ return NULL;
+ }
+}
+
+void Interpreter::CacheInstanceField(unsigned iloffset, FieldDesc* fld)
+{
+ CrstHolder ch(&s_methodCacheLock);
+
+ ILOffsetToItemCache* cache = GetThisExecCache(true);
+ cache->AddItem(iloffset, CachedItem(fld));
+}
+
+FieldDesc* Interpreter::GetCachedInstanceField(unsigned iloffset)
+{
+ CrstHolder ch(&s_methodCacheLock);
+
+ ILOffsetToItemCache* cache = GetThisExecCache(false);
+ if (cache == NULL) return NULL;
+ // Otherwise...
+ CachedItem item;
+ if (cache->GetItem(iloffset, item))
+ {
+ _ASSERTE_MSG(item.m_tag == CIK_InstanceField, "Wrong cached item tag.");
+ return item.m_value.m_instanceField;
+ }
+ else
+ {
+ return NULL;
+ }
+}
+
+void Interpreter::CacheStaticField(unsigned iloffset, StaticFieldCacheEntry* pEntry)
+{
+ CrstHolder ch(&s_methodCacheLock);
+
+ ILOffsetToItemCache* cache = GetThisExecCache(true);
+ // If (say) a concurrent thread has beaten us to this, delete the entry (which otherwise would have
+ // been owned by the cache).
+ if (!cache->AddItem(iloffset, CachedItem(pEntry)))
+ {
+ delete pEntry;
+ }
+}
+
+StaticFieldCacheEntry* Interpreter::GetCachedStaticField(unsigned iloffset)
+{
+ CrstHolder ch(&s_methodCacheLock);
+
+ ILOffsetToItemCache* cache = GetThisExecCache(false);
+ if (cache == NULL)
+ return NULL;
+
+ // Otherwise...
+ CachedItem item;
+ if (cache->GetItem(iloffset, item))
+ {
+ _ASSERTE_MSG(item.m_tag == CIK_StaticField, "Wrong cached item tag.");
+ return item.m_value.m_staticFieldAddr;
+ }
+ else
+ {
+ return NULL;
+ }
+}
+
+
+void Interpreter::CacheClassHandle(unsigned iloffset, CORINFO_CLASS_HANDLE clsHnd)
+{
+ CrstHolder ch(&s_methodCacheLock);
+
+ ILOffsetToItemCache* cache = GetThisExecCache(true);
+ cache->AddItem(iloffset, CachedItem(clsHnd));
+}
+
+CORINFO_CLASS_HANDLE Interpreter::GetCachedClassHandle(unsigned iloffset)
+{
+ CrstHolder ch(&s_methodCacheLock);
+
+ ILOffsetToItemCache* cache = GetThisExecCache(false);
+ if (cache == NULL)
+ return NULL;
+
+ // Otherwise...
+ CachedItem item;
+ if (cache->GetItem(iloffset, item))
+ {
+ _ASSERTE_MSG(item.m_tag == CIK_ClassHandle, "Wrong cached item tag.");
+ return item.m_value.m_clsHnd;
+ }
+ else
+ {
+ return NULL;
+ }
+}
+#endif // DACCESS_COMPILE
+
+// Statics
+
+// Theses are not debug-only.
+ConfigMethodSet Interpreter::s_InterpretMeths;
+ConfigMethodSet Interpreter::s_InterpretMethsExclude;
+ConfigDWORD Interpreter::s_InterpretMethHashMin;
+ConfigDWORD Interpreter::s_InterpretMethHashMax;
+ConfigDWORD Interpreter::s_InterpreterJITThreshold;
+ConfigDWORD Interpreter::s_InterpreterDoLoopMethodsFlag;
+ConfigDWORD Interpreter::s_InterpreterUseCachingFlag;
+ConfigDWORD Interpreter::s_InterpreterLooseRulesFlag;
+
+bool Interpreter::s_InterpreterDoLoopMethods;
+bool Interpreter::s_InterpreterUseCaching;
+bool Interpreter::s_InterpreterLooseRules;
+
+CrstExplicitInit Interpreter::s_methodCacheLock;
+CrstExplicitInit Interpreter::s_interpStubToMDMapLock;
+
+// The static variables below are debug-only.
+#if INTERP_TRACING
+LONG Interpreter::s_totalInvocations = 0;
+LONG Interpreter::s_totalInterpCalls = 0;
+LONG Interpreter::s_totalInterpCallsToGetters = 0;
+LONG Interpreter::s_totalInterpCallsToDeadSimpleGetters = 0;
+LONG Interpreter::s_totalInterpCallsToDeadSimpleGettersShortCircuited = 0;
+LONG Interpreter::s_totalInterpCallsToSetters = 0;
+LONG Interpreter::s_totalInterpCallsToIntrinsics = 0;
+LONG Interpreter::s_totalInterpCallsToIntrinsicsUnhandled = 0;
+
+LONG Interpreter::s_tokenResolutionOpportunities[RTK_Count] = {0, };
+LONG Interpreter::s_tokenResolutionCalls[RTK_Count] = {0, };
+const char* Interpreter::s_tokenResolutionKindNames[RTK_Count] =
+{
+ "Undefined",
+ "Constrained",
+ "NewObj",
+ "NewArr",
+ "LdToken",
+ "LdFtn",
+ "LdVirtFtn",
+ "SFldAddr",
+ "LdElem",
+ "Call",
+ "LdObj",
+ "StObj",
+ "CpObj",
+ "InitObj",
+ "IsInst",
+ "CastClass",
+ "MkRefAny",
+ "RefAnyVal",
+ "Sizeof",
+ "StElem",
+ "Box",
+ "Unbox",
+ "UnboxAny",
+ "LdFld",
+ "LdFldA",
+ "StFld",
+ "FindClass",
+ "Exception",
+};
+
+FILE* Interpreter::s_InterpreterLogFile = NULL;
+ConfigDWORD Interpreter::s_DumpInterpreterStubsFlag;
+ConfigDWORD Interpreter::s_TraceInterpreterEntriesFlag;
+ConfigDWORD Interpreter::s_TraceInterpreterILFlag;
+ConfigDWORD Interpreter::s_TraceInterpreterOstackFlag;
+ConfigDWORD Interpreter::s_TraceInterpreterVerboseFlag;
+ConfigDWORD Interpreter::s_TraceInterpreterJITTransitionFlag;
+ConfigDWORD Interpreter::s_InterpreterStubMin;
+ConfigDWORD Interpreter::s_InterpreterStubMax;
+#endif // INTERP_TRACING
+
+#if INTERP_ILINSTR_PROFILE
+unsigned short Interpreter::s_ILInstrCategories[512];
+
+int Interpreter::s_ILInstrExecs[256] = {0, };
+int Interpreter::s_ILInstrExecsByCategory[512] = {0, };
+int Interpreter::s_ILInstr2ByteExecs[Interpreter::CountIlInstr2Byte] = {0, };
+#if INTERP_ILCYCLE_PROFILE
+unsigned __int64 Interpreter::s_ILInstrCycles[512] = { 0, };
+unsigned __int64 Interpreter::s_ILInstrCyclesByCategory[512] = { 0, };
+// XXX
+unsigned __int64 Interpreter::s_callCycles = 0;
+unsigned Interpreter::s_calls = 0;
+
+void Interpreter::UpdateCycleCount()
+{
+ unsigned __int64 endCycles;
+ bool b = CycleTimer::GetThreadCyclesS(&endCycles); assert(b);
+ if (m_instr != CEE_COUNT)
+ {
+ unsigned __int64 delta = (endCycles - m_startCycles);
+ if (m_exemptCycles > 0)
+ {
+ delta = delta - m_exemptCycles;
+ m_exemptCycles = 0;
+ }
+ CycleTimer::InterlockedAddU64(&s_ILInstrCycles[m_instr], delta);
+ }
+ // In any case, set the instruction to the current one, and record it's start time.
+ m_instr = (*m_ILCodePtr);
+ if (m_instr == CEE_PREFIX1) {
+ m_instr = *(m_ILCodePtr + 1) + 0x100;
+ }
+ b = CycleTimer::GetThreadCyclesS(&m_startCycles); assert(b);
+}
+
+#endif // INTERP_ILCYCLE_PROFILE
+#endif // INTERP_ILINSTR_PROFILE
+
+#ifdef _DEBUG
+InterpreterMethodInfo** Interpreter::s_interpMethInfos = NULL;
+unsigned Interpreter::s_interpMethInfosAllocSize = 0;
+unsigned Interpreter::s_interpMethInfosCount = 0;
+
+bool Interpreter::TOSIsPtr()
+{
+ if (m_curStackHt == 0)
+ return false;
+
+ return CorInfoTypeIsPointer(OpStackTypeGet(m_curStackHt - 1).ToCorInfoType());
+}
+#endif // DEBUG
+
+ConfigDWORD Interpreter::s_PrintPostMortemFlag;
+
+// InterpreterCache.
+template<typename Key, typename Val>
+InterpreterCache<Key,Val>::InterpreterCache() : m_pairs(NULL), m_allocSize(0), m_count(0)
+{
+#ifdef _DEBUG
+ AddAllocBytes(sizeof(*this));
+#endif
+}
+
+#ifdef _DEBUG
+// static
+static unsigned InterpreterCacheAllocBytes = 0;
+const unsigned KBYTE = 1024;
+const unsigned MBYTE = KBYTE*KBYTE;
+const unsigned InterpreterCacheAllocBytesIncrement = 16*KBYTE;
+static unsigned InterpreterCacheAllocBytesNextTarget = InterpreterCacheAllocBytesIncrement;
+
+template<typename Key, typename Val>
+void InterpreterCache<Key,Val>::AddAllocBytes(unsigned bytes)
+{
+ // Reinstate this code if you want to track bytes attributable to caching.
+#if 0
+ InterpreterCacheAllocBytes += bytes;
+ if (InterpreterCacheAllocBytes > InterpreterCacheAllocBytesNextTarget)
+ {
+ printf("Total cache alloc = %d bytes.\n", InterpreterCacheAllocBytes);
+ fflush(stdout);
+ InterpreterCacheAllocBytesNextTarget += InterpreterCacheAllocBytesIncrement;
+ }
+#endif
+}
+#endif // _DEBUG
+
+template<typename Key, typename Val>
+void InterpreterCache<Key,Val>::EnsureCanInsert()
+{
+ if (m_count < m_allocSize)
+ return;
+
+ // Otherwise, must make room.
+ if (m_allocSize == 0)
+ {
+ assert(m_count == 0);
+ m_pairs = new KeyValPair[InitSize];
+ m_allocSize = InitSize;
+#ifdef _DEBUG
+ AddAllocBytes(m_allocSize * sizeof(KeyValPair));
+#endif
+ }
+ else
+ {
+ unsigned short newSize = min(m_allocSize * 2, USHRT_MAX);
+
+ KeyValPair* newPairs = new KeyValPair[newSize];
+ memcpy(newPairs, m_pairs, m_count * sizeof(KeyValPair));
+ delete[] m_pairs;
+ m_pairs = newPairs;
+#ifdef _DEBUG
+ AddAllocBytes((newSize - m_allocSize) * sizeof(KeyValPair));
+#endif
+ m_allocSize = newSize;
+ }
+}
+
+template<typename Key, typename Val>
+bool InterpreterCache<Key,Val>::AddItem(Key key, Val val)
+{
+ EnsureCanInsert();
+ // Find the index to insert before.
+ unsigned firstGreaterOrEqual = 0;
+ for (; firstGreaterOrEqual < m_count; firstGreaterOrEqual++)
+ {
+ if (m_pairs[firstGreaterOrEqual].m_key >= key)
+ break;
+ }
+ if (firstGreaterOrEqual < m_count && m_pairs[firstGreaterOrEqual].m_key == key)
+ {
+ assert(m_pairs[firstGreaterOrEqual].m_val == val);
+ return false;
+ }
+ // Move everything starting at firstGreater up one index (if necessary)
+ if (m_count > 0)
+ {
+ for (unsigned k = m_count-1; k >= firstGreaterOrEqual; k--)
+ {
+ m_pairs[k + 1] = m_pairs[k];
+ if (k == 0)
+ break;
+ }
+ }
+ // Now we can insert the new element.
+ m_pairs[firstGreaterOrEqual].m_key = key;
+ m_pairs[firstGreaterOrEqual].m_val = val;
+ m_count++;
+ return true;
+}
+
+template<typename Key, typename Val>
+bool InterpreterCache<Key,Val>::GetItem(Key key, Val& v)
+{
+ unsigned lo = 0;
+ unsigned hi = m_count;
+ // Invariant: we've determined that the pair for "iloffset", if present,
+ // is in the index interval [lo, hi).
+ while (lo < hi)
+ {
+ unsigned mid = (hi + lo)/2;
+ Key midKey = m_pairs[mid].m_key;
+ if (key == midKey)
+ {
+ v = m_pairs[mid].m_val;
+ return true;
+ }
+ else if (key < midKey)
+ {
+ hi = mid;
+ }
+ else
+ {
+ assert(key > midKey);
+ lo = mid + 1;
+ }
+ }
+ // If we reach here without returning, it's not here.
+ return false;
+}
+
+// TODO: add a header comment here describing this function.
+void Interpreter::OpStackNormalize()
+{
+ size_t largeStructStackOffset = 0;
+ // Yes, I've written a quadratic algorithm here. I don't think it will matter in practice.
+ for (unsigned i = 0; i < m_curStackHt; i++)
+ {
+ InterpreterType tp = OpStackTypeGet(i);
+ if (tp.IsLargeStruct(&m_interpCeeInfo))
+ {
+ size_t sz = tp.Size(&m_interpCeeInfo);
+
+ void* addr = OpStackGet<void*>(i);
+ if (IsInLargeStructLocalArea(addr))
+ {
+ // We're going to allocate space at the top for the new value, then copy everything above the current slot
+ // up into that new space, then copy the value into the vacated space.
+ // How much will we have to copy?
+ size_t toCopy = m_largeStructOperandStackHt - largeStructStackOffset;
+
+ // Allocate space for the new value.
+ void* dummy = LargeStructOperandStackPush(sz);
+
+ // Remember where we're going to write to.
+ BYTE* fromAddr = m_largeStructOperandStack + largeStructStackOffset;
+ BYTE* toAddr = fromAddr + sz;
+ memcpy(toAddr, fromAddr, toCopy);
+
+ // Now copy the local variable value.
+ memcpy(fromAddr, addr, sz);
+ OpStackSet<void*>(i, fromAddr);
+ }
+ largeStructStackOffset += sz;
+ }
+ }
+ // When we've normalized the stack, it contains no pointers to locals.
+ m_orOfPushedInterpreterTypes = 0;
+}
+
+#if INTERP_TRACING
+
+// Code copied from eeinterface.cpp in "compiler". Should be common...
+
+static const char* CorInfoTypeNames[] = {
+ "undef",
+ "void",
+ "bool",
+ "char",
+ "byte",
+ "ubyte",
+ "short",
+ "ushort",
+ "int",
+ "uint",
+ "long",
+ "ulong",
+ "nativeint",
+ "nativeuint",
+ "float",
+ "double",
+ "string",
+ "ptr",
+ "byref",
+ "valueclass",
+ "class",
+ "refany",
+ "var"
+};
+
+const char* eeGetMethodFullName(CEEInfo* info, CORINFO_METHOD_HANDLE hnd, const char** clsName)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ GCX_PREEMP();
+
+ const char* returnType = NULL;
+
+ const char* className;
+ const char* methodName = info->getMethodName(hnd, &className);
+ if (clsName != NULL)
+ {
+ *clsName = className;
+ }
+
+ size_t length = 0;
+ unsigned i;
+
+ /* Generating the full signature is a two-pass process. First we have to walk
+ the components in order to assess the total size, then we allocate the buffer
+ and copy the elements into it.
+ */
+
+ /* Right now there is a race-condition in the EE, className can be NULL */
+
+ /* initialize length with length of className and '.' */
+
+ if (className)
+ {
+ length = strlen(className) + 1;
+ }
+ else
+ {
+ assert(strlen("<NULL>.") == 7);
+ length = 7;
+ }
+
+ /* add length of methodName and opening bracket */
+ length += strlen(methodName) + 1;
+
+ CORINFO_SIG_INFO sig;
+ info->getMethodSig(hnd, &sig);
+ CORINFO_ARG_LIST_HANDLE argLst = sig.args;
+
+ CORINFO_CLASS_HANDLE dummyCls;
+ for (i = 0; i < sig.numArgs; i++)
+ {
+ CorInfoType type = strip(info->getArgType(&sig, argLst, &dummyCls));
+
+ length += strlen(CorInfoTypeNames[type]);
+ argLst = info->getArgNext(argLst);
+ }
+
+ /* add ',' if there is more than one argument */
+
+ if (sig.numArgs > 1)
+ {
+ length += (sig.numArgs - 1);
+ }
+
+ if (sig.retType != CORINFO_TYPE_VOID)
+ {
+ returnType = CorInfoTypeNames[sig.retType];
+ length += strlen(returnType) + 1; // don't forget the delimiter ':'
+ }
+
+ /* add closing bracket and null terminator */
+
+ length += 2;
+
+ char* retName = new char[length];
+
+ /* Now generate the full signature string in the allocated buffer */
+
+ if (className)
+ {
+ strcpy_s(retName, length, className);
+ strcat_s(retName, length, ":");
+ }
+ else
+ {
+ strcpy_s(retName, length, "<NULL>.");
+ }
+
+ strcat_s(retName, length, methodName);
+
+ // append the signature
+ strcat_s(retName, length, "(");
+
+ argLst = sig.args;
+
+ for (i = 0; i < sig.numArgs; i++)
+ {
+ CorInfoType type = strip(info->getArgType(&sig, argLst, &dummyCls));
+ strcat_s(retName, length, CorInfoTypeNames[type]);
+
+ argLst = info->getArgNext(argLst);
+ if (i + 1 < sig.numArgs)
+ {
+ strcat_s(retName, length, ",");
+ }
+ }
+
+ strcat_s(retName, length, ")");
+
+ if (returnType)
+ {
+ strcat_s(retName, length, ":");
+ strcat_s(retName, length, returnType);
+ }
+
+ assert(strlen(retName) == length - 1);
+
+ return(retName);
+}
+
+const char* Interpreter::eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd)
+{
+ return ::eeGetMethodFullName(&m_interpCeeInfo, hnd);
+}
+
+const char* ILOpNames[256*2];
+bool ILOpNamesInited = false;
+
+void InitILOpNames()
+{
+ if (!ILOpNamesInited)
+ {
+ // Initialize the array.
+#define OPDEF(c,s,pop,push,args,type,l,s1,s2,ctrl) if (s1 == 0xfe || s1 == 0xff) { int ind ((unsigned(s1) << 8) + unsigned(s2)); ind -= 0xfe00; ILOpNames[ind] = s; }
+#include "opcode.def"
+#undef OPDEF
+ ILOpNamesInited = true;
+ }
+};
+const char* Interpreter::ILOp(BYTE* m_ILCodePtr)
+{
+ InitILOpNames();
+ BYTE b = *m_ILCodePtr;
+ if (b == 0xfe)
+ {
+ return ILOpNames[*(m_ILCodePtr + 1)];
+ }
+ else
+ {
+ return ILOpNames[(0x1 << 8) + b];
+ }
+}
+const char* Interpreter::ILOp1Byte(unsigned short ilInstrVal)
+{
+ InitILOpNames();
+ return ILOpNames[(0x1 << 8) + ilInstrVal];
+}
+const char* Interpreter::ILOp2Byte(unsigned short ilInstrVal)
+{
+ InitILOpNames();
+ return ILOpNames[ilInstrVal];
+}
+
+void Interpreter::PrintOStack()
+{
+ if (m_curStackHt == 0)
+ {
+ fprintf(GetLogFile(), " <empty>\n");
+ }
+ else
+ {
+ for (unsigned k = 0; k < m_curStackHt; k++)
+ {
+ CorInfoType cit = OpStackTypeGet(k).ToCorInfoType();
+ assert(IsStackNormalType(cit));
+ fprintf(GetLogFile(), " %4d: %10s: ", k, CorInfoTypeNames[cit]);
+ PrintOStackValue(k);
+ fprintf(GetLogFile(), "\n");
+ }
+ }
+ fflush(GetLogFile());
+}
+
+void Interpreter::PrintOStackValue(unsigned index)
+{
+ _ASSERTE_MSG(index < m_curStackHt, "precondition");
+ InterpreterType it = OpStackTypeGet(index);
+ if (it.IsLargeStruct(&m_interpCeeInfo))
+ {
+ PrintValue(it, OpStackGet<BYTE*>(index));
+ }
+ else
+ {
+ PrintValue(it, reinterpret_cast<BYTE*>(OpStackGetAddr(index, it.Size(&m_interpCeeInfo))));
+ }
+}
+
+void Interpreter::PrintLocals()
+{
+ if (m_methInfo->m_numLocals == 0)
+ {
+ fprintf(GetLogFile(), " <no locals>\n");
+ }
+ else
+ {
+ for (unsigned i = 0; i < m_methInfo->m_numLocals; i++)
+ {
+ InterpreterType it = m_methInfo->m_localDescs[i].m_type;
+ CorInfoType cit = it.ToCorInfoType();
+ void* localPtr = NULL;
+ if (it.IsLargeStruct(&m_interpCeeInfo))
+ {
+ void* structPtr = ArgSlotEndianessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(i)), sizeof(void**));
+ localPtr = *reinterpret_cast<void**>(structPtr);
+ }
+ else
+ {
+ localPtr = ArgSlotEndianessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(i)), it.Size(&m_interpCeeInfo));
+ }
+ fprintf(GetLogFile(), " loc%-4d: %10s: ", i, CorInfoTypeNames[cit]);
+ PrintValue(it, reinterpret_cast<BYTE*>(localPtr));
+ fprintf(GetLogFile(), "\n");
+ }
+ }
+ fflush(GetLogFile());
+}
+
+void Interpreter::PrintArgs()
+{
+ for (unsigned k = 0; k < m_methInfo->m_numArgs; k++)
+ {
+ CorInfoType cit = GetArgType(k).ToCorInfoType();
+ fprintf(GetLogFile(), " %4d: %10s: ", k, CorInfoTypeNames[cit]);
+ PrintArgValue(k);
+ fprintf(GetLogFile(), "\n");
+ }
+ fprintf(GetLogFile(), "\n");
+ fflush(GetLogFile());
+}
+
+void Interpreter::PrintArgValue(unsigned argNum)
+{
+ _ASSERTE_MSG(argNum < m_methInfo->m_numArgs, "precondition");
+ InterpreterType it = GetArgType(argNum);
+ PrintValue(it, GetArgAddr(argNum));
+}
+
+// Note that this is used to print non-stack-normal values, so
+// it must handle all cases.
+void Interpreter::PrintValue(InterpreterType it, BYTE* valAddr)
+{
+ switch (it.ToCorInfoType())
+ {
+ case CORINFO_TYPE_BOOL:
+ fprintf(GetLogFile(), "%s", ((*reinterpret_cast<INT8*>(valAddr)) ? "true" : "false"));
+ break;
+ case CORINFO_TYPE_BYTE:
+ fprintf(GetLogFile(), "%d", *reinterpret_cast<INT8*>(valAddr));
+ break;
+ case CORINFO_TYPE_UBYTE:
+ fprintf(GetLogFile(), "%u", *reinterpret_cast<UINT8*>(valAddr));
+ break;
+
+ case CORINFO_TYPE_SHORT:
+ fprintf(GetLogFile(), "%d", *reinterpret_cast<INT16*>(valAddr));
+ break;
+ case CORINFO_TYPE_USHORT: case CORINFO_TYPE_CHAR:
+ fprintf(GetLogFile(), "%u", *reinterpret_cast<UINT16*>(valAddr));
+ break;
+
+ case CORINFO_TYPE_INT:
+ fprintf(GetLogFile(), "%d", *reinterpret_cast<INT32*>(valAddr));
+ break;
+ case CORINFO_TYPE_UINT:
+ fprintf(GetLogFile(), "%u", *reinterpret_cast<UINT32*>(valAddr));
+ break;
+
+ case CORINFO_TYPE_NATIVEINT:
+ {
+ INT64 val = static_cast<INT64>(*reinterpret_cast<NativeInt*>(valAddr));
+ fprintf(GetLogFile(), "%lld (= 0x%llx)", val, val);
+ }
+ break;
+ case CORINFO_TYPE_NATIVEUINT:
+ {
+ UINT64 val = static_cast<UINT64>(*reinterpret_cast<NativeUInt*>(valAddr));
+ fprintf(GetLogFile(), "%lld (= 0x%llx)", val, val);
+ }
+ break;
+
+ case CORINFO_TYPE_BYREF:
+ fprintf(GetLogFile(), "0x%p", *reinterpret_cast<void**>(valAddr));
+ break;
+
+ case CORINFO_TYPE_LONG:
+ {
+ INT64 val = *reinterpret_cast<INT64*>(valAddr);
+ fprintf(GetLogFile(), "%lld (= 0x%llx)", val, val);
+ }
+ break;
+ case CORINFO_TYPE_ULONG:
+ fprintf(GetLogFile(), "%lld", *reinterpret_cast<UINT64*>(valAddr));
+ break;
+
+ case CORINFO_TYPE_CLASS:
+ {
+ Object* obj = *reinterpret_cast<Object**>(valAddr);
+ if (obj == NULL)
+ {
+ fprintf(GetLogFile(), "null");
+ }
+ else
+ {
+#ifdef _DEBUG
+ fprintf(GetLogFile(), "0x%p (%s) [", obj, obj->GetMethodTable()->GetDebugClassName());
+#else
+ fprintf(GetLogFile(), "0x%p (MT=0x%p) [", obj, obj->GetMethodTable());
+#endif
+ unsigned sz = obj->GetMethodTable()->GetBaseSize();
+ BYTE* objBytes = reinterpret_cast<BYTE*>(obj);
+ for (unsigned i = 0; i < sz; i++)
+ {
+ if (i > 0)
+ {
+ fprintf(GetLogFile(), " ");
+ }
+ fprintf(GetLogFile(), "0x%x", objBytes[i]);
+ }
+ fprintf(GetLogFile(), "]");
+ }
+ }
+ break;
+ case CORINFO_TYPE_VALUECLASS:
+ {
+ GCX_PREEMP();
+ fprintf(GetLogFile(), "<%s>: [", m_interpCeeInfo.getClassName(it.ToClassHandle()));
+ unsigned sz = getClassSize(it.ToClassHandle());
+ for (unsigned i = 0; i < sz; i++)
+ {
+ if (i > 0)
+ {
+ fprintf(GetLogFile(), " ");
+ }
+ fprintf(GetLogFile(), "0x%p", valAddr[i]);
+ }
+ fprintf(GetLogFile(), "]");
+ }
+ break;
+ case CORINFO_TYPE_REFANY:
+ fprintf(GetLogFile(), "<refany>");
+ break;
+ case CORINFO_TYPE_FLOAT:
+ fprintf(GetLogFile(), "%f", *reinterpret_cast<float*>(valAddr));
+ break;
+ case CORINFO_TYPE_DOUBLE:
+ fprintf(GetLogFile(), "%g", *reinterpret_cast<double*>(valAddr));
+ break;
+ case CORINFO_TYPE_PTR:
+ fprintf(GetLogFile(), "0x%p", *reinterpret_cast<void**>(valAddr));
+ break;
+ default:
+ _ASSERTE_MSG(false, "Unknown type in PrintValue.");
+ break;
+ }
+}
+#endif // INTERP_TRACING
+
+#ifdef _DEBUG
+void Interpreter::AddInterpMethInfo(InterpreterMethodInfo* methInfo)
+{
+ typedef InterpreterMethodInfo* InterpreterMethodInfoPtr;
+ // TODO: this requires synchronization.
+ const unsigned InitSize = 128;
+ if (s_interpMethInfos == NULL)
+ {
+ s_interpMethInfos = new InterpreterMethodInfoPtr[InitSize];
+ s_interpMethInfosAllocSize = InitSize;
+ }
+ if (s_interpMethInfosAllocSize == s_interpMethInfosCount)
+ {
+ unsigned newSize = s_interpMethInfosAllocSize * 2;
+ InterpreterMethodInfoPtr* tmp = new InterpreterMethodInfoPtr[newSize];
+ memcpy(tmp, s_interpMethInfos, s_interpMethInfosCount * sizeof(InterpreterMethodInfoPtr));
+ delete[] s_interpMethInfos;
+ s_interpMethInfos = tmp;
+ s_interpMethInfosAllocSize = newSize;
+ }
+ s_interpMethInfos[s_interpMethInfosCount] = methInfo;
+ s_interpMethInfosCount++;
+}
+
+int _cdecl Interpreter::CompareMethInfosByInvocations(const void* mi0in, const void* mi1in)
+{
+ const InterpreterMethodInfo* mi0 = *((const InterpreterMethodInfo**)mi0in);
+ const InterpreterMethodInfo* mi1 = *((const InterpreterMethodInfo**)mi1in);
+ if (mi0->m_invocations < mi1->m_invocations)
+ {
+ return -1;
+ }
+ else if (mi0->m_invocations == mi1->m_invocations)
+ {
+ return 0;
+ }
+ else
+ {
+ assert(mi0->m_invocations > mi1->m_invocations);
+ return 1;
+ }
+}
+
+#if INTERP_PROFILE
+int _cdecl Interpreter::CompareMethInfosByILInstrs(const void* mi0in, const void* mi1in)
+{
+ const InterpreterMethodInfo* mi0 = *((const InterpreterMethodInfo**)mi0in);
+ const InterpreterMethodInfo* mi1 = *((const InterpreterMethodInfo**)mi1in);
+ if (mi0->m_totIlInstructionsExeced < mi1->m_totIlInstructionsExeced) return 1;
+ else if (mi0->m_totIlInstructionsExeced == mi1->m_totIlInstructionsExeced) return 0;
+ else
+ {
+ assert(mi0->m_totIlInstructionsExeced > mi1->m_totIlInstructionsExeced);
+ return -1;
+ }
+}
+#endif // INTERP_PROFILE
+#endif // _DEBUG
+
+const int MIL = 1000000;
+
+// Leaving this disabled for now.
+#if 0
+unsigned __int64 ForceSigWalkCycles = 0;
+#endif
+
+void Interpreter::PrintPostMortemData()
+{
+ if (s_PrintPostMortemFlag.val(CLRConfig::INTERNAL_InterpreterPrintPostMortem) == 0)
+ return;
+
+ // Otherwise...
+
+#ifdef _DEBUG
+ // Let's print two things: the number of methods that are 0-10, or more, and
+ // For each 10% of methods, cumulative % of invocations they represent. By 1% for last 10%.
+
+ // First one doesn't require any sorting.
+ const unsigned HistoMax = 11;
+ unsigned histo[HistoMax];
+ unsigned numExecs[HistoMax];
+ for (unsigned k = 0; k < HistoMax; k++)
+ {
+ histo[k] = 0; numExecs[k] = 0;
+ }
+ for (unsigned k = 0; k < s_interpMethInfosCount; k++)
+ {
+ unsigned invokes = s_interpMethInfos[k]->m_invocations;
+ if (invokes > HistoMax - 1)
+ {
+ invokes = HistoMax - 1;
+ }
+ histo[invokes]++;
+ numExecs[invokes] += s_interpMethInfos[k]->m_invocations;
+ }
+
+ fprintf(GetLogFile(), "Histogram of method executions:\n");
+ fprintf(GetLogFile(), " # of execs | # meths (%%) | cum %% | %% cum execs\n");
+ fprintf(GetLogFile(), " -------------------------------------------------------\n");
+ float fTotMeths = float(s_interpMethInfosCount);
+ float fTotExecs = float(s_totalInvocations);
+ float numPct = 0.0f;
+ float numExecPct = 0.0f;
+ for (unsigned k = 0; k < HistoMax; k++)
+ {
+ fprintf(GetLogFile(), " %10d", k);
+ if (k == HistoMax)
+ {
+ fprintf(GetLogFile(), "+ ");
+ }
+ else
+ {
+ fprintf(GetLogFile(), " ");
+ }
+ float pct = float(histo[k])*100.0f/fTotMeths;
+ numPct += pct;
+ float execPct = float(numExecs[k])*100.0f/fTotExecs;
+ numExecPct += execPct;
+ fprintf(GetLogFile(), "| %7d (%5.2f%%) | %6.2f%% | %6.2f%%\n", histo[k], pct, numPct, numExecPct);
+ }
+
+ // This sorts them in ascending order of number of invocations.
+ qsort(&s_interpMethInfos[0], s_interpMethInfosCount, sizeof(InterpreterMethodInfo*), &CompareMethInfosByInvocations);
+
+ fprintf(GetLogFile(), "\nFor methods sorted in ascending # of executions order, cumulative %% of executions:\n");
+ if (s_totalInvocations > 0)
+ {
+ fprintf(GetLogFile(), " %% of methods | max execs | cum %% of execs\n");
+ fprintf(GetLogFile(), " ------------------------------------------\n");
+ unsigned methNum = 0;
+ unsigned nNumExecs = 0;
+ float totExecsF = float(s_totalInvocations);
+ for (unsigned k = 10; k < 100; k += 10)
+ {
+ unsigned targ = unsigned((float(k)/100.0f)*float(s_interpMethInfosCount));
+ unsigned targLess1 = (targ > 0 ? targ - 1 : 0);
+ while (methNum < targ)
+ {
+ nNumExecs += s_interpMethInfos[methNum]->m_invocations;
+ methNum++;
+ }
+ float pctExecs = float(nNumExecs) * 100.0f / totExecsF;
+
+ fprintf(GetLogFile(), " %8d%% | %9d | %8.2f%%\n", k, s_interpMethInfos[targLess1]->m_invocations, pctExecs);
+
+ if (k == 90)
+ {
+ k++;
+ for (; k < 100; k++)
+ {
+ unsigned targ = unsigned((float(k)/100.0f)*float(s_interpMethInfosCount));
+ while (methNum < targ)
+ {
+ nNumExecs += s_interpMethInfos[methNum]->m_invocations;
+ methNum++;
+ }
+ pctExecs = float(nNumExecs) * 100.0f / totExecsF;
+
+ fprintf(GetLogFile(), " %8d%% | %9d | %8.2f%%\n", k, s_interpMethInfos[targLess1]->m_invocations, pctExecs);
+ }
+
+ // Now do 100%.
+ targ = s_interpMethInfosCount;
+ while (methNum < targ)
+ {
+ nNumExecs += s_interpMethInfos[methNum]->m_invocations;
+ methNum++;
+ }
+ pctExecs = float(nNumExecs) * 100.0f / totExecsF;
+ fprintf(GetLogFile(), " %8d%% | %9d | %8.2f%%\n", k, s_interpMethInfos[targLess1]->m_invocations, pctExecs);
+ }
+ }
+ }
+
+ fprintf(GetLogFile(), "\nTotal number of calls from interpreted code: %d.\n", s_totalInterpCalls);
+ fprintf(GetLogFile(), " Also, %d are intrinsics; %d of these are not currently handled intrinsically.\n",
+ s_totalInterpCallsToIntrinsics, s_totalInterpCallsToIntrinsicsUnhandled);
+ fprintf(GetLogFile(), " Of these, %d to potential property getters (%d of these dead simple), %d to setters.\n",
+ s_totalInterpCallsToGetters, s_totalInterpCallsToDeadSimpleGetters, s_totalInterpCallsToSetters);
+ fprintf(GetLogFile(), " Of the dead simple getter calls, %d have been short-circuited.\n",
+ s_totalInterpCallsToDeadSimpleGettersShortCircuited);
+
+ fprintf(GetLogFile(), "\nToken resolutions by category:\n");
+ fprintf(GetLogFile(), "Category | opportunities | calls | %%\n");
+ fprintf(GetLogFile(), "---------------------------------------------------\n");
+ for (unsigned i = RTK_Undefined; i < RTK_Count; i++)
+ {
+ float pct = 0.0;
+ if (s_tokenResolutionOpportunities[i] > 0)
+ pct = 100.0f * float(s_tokenResolutionCalls[i]) / float(s_tokenResolutionOpportunities[i]);
+ fprintf(GetLogFile(), "%12s | %15d | %9d | %6.2f%%\n",
+ s_tokenResolutionKindNames[i], s_tokenResolutionOpportunities[i], s_tokenResolutionCalls[i], pct);
+ }
+
+#if INTERP_PROFILE
+ fprintf(GetLogFile(), "Information on num of execs:\n");
+
+ UINT64 totILInstrs = 0;
+ for (unsigned i = 0; i < s_interpMethInfosCount; i++) totILInstrs += s_interpMethInfos[i]->m_totIlInstructionsExeced;
+
+ float totILInstrsF = float(totILInstrs);
+
+ fprintf(GetLogFile(), "\nTotal instructions = %lld.\n", totILInstrs);
+ fprintf(GetLogFile(), "\nTop <=10 methods by # of IL instructions executed.\n");
+ fprintf(GetLogFile(), "%10s | %9s | %10s | %10s | %8s | %s\n", "tot execs", "# invokes", "code size", "ratio", "% of tot", "Method");
+ fprintf(GetLogFile(), "----------------------------------------------------------------------------\n");
+
+ qsort(&s_interpMethInfos[0], s_interpMethInfosCount, sizeof(InterpreterMethodInfo*), &CompareMethInfosByILInstrs);
+
+ for (unsigned i = 0; i < min(10, s_interpMethInfosCount); i++)
+ {
+ unsigned ilCodeSize = unsigned(s_interpMethInfos[i]->m_ILCodeEnd - s_interpMethInfos[i]->m_ILCode);
+ fprintf(GetLogFile(), "%10lld | %9d | %10d | %10.2f | %8.2f%% | %s:%s\n",
+ s_interpMethInfos[i]->m_totIlInstructionsExeced,
+ s_interpMethInfos[i]->m_invocations,
+ ilCodeSize,
+ float(s_interpMethInfos[i]->m_totIlInstructionsExeced) / float(ilCodeSize),
+ float(s_interpMethInfos[i]->m_totIlInstructionsExeced) * 100.0f / totILInstrsF,
+ s_interpMethInfos[i]->m_clsName,
+ s_interpMethInfos[i]->m_methName);
+ }
+#endif // INTERP_PROFILE
+#endif // _DEBUG
+
+#if INTERP_ILINSTR_PROFILE
+ fprintf(GetLogFile(), "\nIL instruction profiling:\n");
+ // First, classify by categories.
+ unsigned totInstrs = 0;
+#if INTERP_ILCYCLE_PROFILE
+ unsigned __int64 totCycles = 0;
+ unsigned __int64 perMeasurementOverhead = CycleTimer::QueryOverhead();
+#endif // INTERP_ILCYCLE_PROFILE
+ for (unsigned i = 0; i < 256; i++)
+ {
+ s_ILInstrExecsByCategory[s_ILInstrCategories[i]] += s_ILInstrExecs[i];
+ totInstrs += s_ILInstrExecs[i];
+#if INTERP_ILCYCLE_PROFILE
+ unsigned __int64 cycles = s_ILInstrCycles[i];
+ if (cycles > s_ILInstrExecs[i] * perMeasurementOverhead) cycles -= s_ILInstrExecs[i] * perMeasurementOverhead;
+ else cycles = 0;
+ s_ILInstrCycles[i] = cycles;
+ s_ILInstrCyclesByCategory[s_ILInstrCategories[i]] += cycles;
+ totCycles += cycles;
+#endif // INTERP_ILCYCLE_PROFILE
+ }
+ unsigned totInstrs2Byte = 0;
+#if INTERP_ILCYCLE_PROFILE
+ unsigned __int64 totCycles2Byte = 0;
+#endif // INTERP_ILCYCLE_PROFILE
+ for (unsigned i = 0; i < CountIlInstr2Byte; i++)
+ {
+ unsigned ind = 0x100 + i;
+ s_ILInstrExecsByCategory[s_ILInstrCategories[ind]] += s_ILInstr2ByteExecs[i];
+ totInstrs += s_ILInstr2ByteExecs[i];
+ totInstrs2Byte += s_ILInstr2ByteExecs[i];
+#if INTERP_ILCYCLE_PROFILE
+ unsigned __int64 cycles = s_ILInstrCycles[ind];
+ if (cycles > s_ILInstrExecs[ind] * perMeasurementOverhead) cycles -= s_ILInstrExecs[ind] * perMeasurementOverhead;
+ else cycles = 0;
+ s_ILInstrCycles[i] = cycles;
+ s_ILInstrCyclesByCategory[s_ILInstrCategories[ind]] += cycles;
+ totCycles += cycles;
+ totCycles2Byte += cycles;
+#endif // INTERP_ILCYCLE_PROFILE
+ }
+
+ // Now sort the categories by # of occurrences.
+
+ InstrExecRecord ieps[256 + CountIlInstr2Byte];
+ for (unsigned short i = 0; i < 256; i++)
+ {
+ ieps[i].m_instr = i; ieps[i].m_is2byte = false; ieps[i].m_execs = s_ILInstrExecs[i];
+#if INTERP_ILCYCLE_PROFILE
+ if (i == CEE_BREAK)
+ {
+ ieps[i].m_cycles = 0;
+ continue; // Don't count these if they occur...
+ }
+ ieps[i].m_cycles = s_ILInstrCycles[i];
+ assert((ieps[i].m_execs != 0) || (ieps[i].m_cycles == 0)); // Cycles can be zero for non-zero execs because of measurement correction.
+#endif // INTERP_ILCYCLE_PROFILE
+ }
+ for (unsigned short i = 0; i < CountIlInstr2Byte; i++)
+ {
+ int ind = 256 + i;
+ ieps[ind].m_instr = i; ieps[ind].m_is2byte = true; ieps[ind].m_execs = s_ILInstr2ByteExecs[i];
+#if INTERP_ILCYCLE_PROFILE
+ ieps[ind].m_cycles = s_ILInstrCycles[ind];
+ assert((ieps[i].m_execs != 0) || (ieps[i].m_cycles == 0)); // Cycles can be zero for non-zero execs because of measurement correction.
+#endif // INTERP_ILCYCLE_PROFILE
+ }
+
+ qsort(&ieps[0], 256 + CountIlInstr2Byte, sizeof(InstrExecRecord), &InstrExecRecord::Compare);
+
+ fprintf(GetLogFile(), "\nInstructions (%d total, %d 1-byte):\n", totInstrs, totInstrs - totInstrs2Byte);
+#if INTERP_ILCYCLE_PROFILE
+ if (s_callCycles > s_calls * perMeasurementOverhead) s_callCycles -= s_calls * perMeasurementOverhead;
+ else s_callCycles = 0;
+ fprintf(GetLogFile(), " MCycles (%lld total, %lld 1-byte, %lld calls (%d calls, %10.2f cyc/call):\n",
+ totCycles/MIL, (totCycles - totCycles2Byte)/MIL, s_callCycles/MIL, s_calls, float(s_callCycles)/float(s_calls));
+#if 0
+ extern unsigned __int64 MetaSigCtor1Cycles;
+ fprintf(GetLogFile(), " MetaSig(MethodDesc, TypeHandle) ctor: %lld MCycles.\n",
+ MetaSigCtor1Cycles/MIL);
+ fprintf(GetLogFile(), " ForceSigWalk: %lld MCycles.\n",
+ ForceSigWalkCycles/MIL);
+#endif
+#endif // INTERP_ILCYCLE_PROFILE
+
+ PrintILProfile(&ieps[0], totInstrs
+#if INTERP_ILCYCLE_PROFILE
+ , totCycles
+#endif // INTERP_ILCYCLE_PROFILE
+ );
+
+ fprintf(GetLogFile(), "\nInstructions grouped by category: (%d total, %d 1-byte):\n", totInstrs, totInstrs - totInstrs2Byte);
+#if INTERP_ILCYCLE_PROFILE
+ fprintf(GetLogFile(), " MCycles (%lld total, %lld 1-byte):\n",
+ totCycles/MIL, (totCycles - totCycles2Byte)/MIL);
+#endif // INTERP_ILCYCLE_PROFILE
+ for (unsigned short i = 0; i < 256 + CountIlInstr2Byte; i++)
+ {
+ if (i < 256)
+ {
+ ieps[i].m_instr = i; ieps[i].m_is2byte = false;
+ }
+ else
+ {
+ ieps[i].m_instr = i - 256; ieps[i].m_is2byte = true;
+ }
+ ieps[i].m_execs = s_ILInstrExecsByCategory[i];
+#if INTERP_ILCYCLE_PROFILE
+ ieps[i].m_cycles = s_ILInstrCyclesByCategory[i];
+#endif // INTERP_ILCYCLE_PROFILE
+ }
+ qsort(&ieps[0], 256 + CountIlInstr2Byte, sizeof(InstrExecRecord), &InstrExecRecord::Compare);
+ PrintILProfile(&ieps[0], totInstrs
+#if INTERP_ILCYCLE_PROFILE
+ , totCycles
+#endif // INTERP_ILCYCLE_PROFILE
+ );
+
+#if 0
+ // Early debugging code.
+ fprintf(GetLogFile(), "\nInstructions grouped category mapping:\n", totInstrs, totInstrs - totInstrs2Byte);
+ for (unsigned short i = 0; i < 256; i++)
+ {
+ unsigned short cat = s_ILInstrCategories[i];
+ if (cat < 256) {
+ fprintf(GetLogFile(), "Instr: %12s ==> %12s.\n", ILOp1Byte(i), ILOp1Byte(cat));
+ } else {
+ fprintf(GetLogFile(), "Instr: %12s ==> %12s.\n", ILOp1Byte(i), ILOp2Byte(cat - 256));
+ }
+ }
+ for (unsigned short i = 0; i < CountIlInstr2Byte; i++)
+ {
+ unsigned ind = 256 + i;
+ unsigned short cat = s_ILInstrCategories[ind];
+ if (cat < 256) {
+ fprintf(GetLogFile(), "Instr: %12s ==> %12s.\n", ILOp2Byte(i), ILOp1Byte(cat));
+ } else {
+ fprintf(GetLogFile(), "Instr: %12s ==> %12s.\n", ILOp2Byte(i), ILOp2Byte(cat - 256));
+ }
+ }
+#endif
+#endif // INTERP_ILINSTR_PROFILE
+}
+
+#if INTERP_ILINSTR_PROFILE
+
+const int K = 1000;
+
+// static
+void Interpreter::PrintILProfile(Interpreter::InstrExecRecord *recs, unsigned int totInstrs
+#if INTERP_ILCYCLE_PROFILE
+ , unsigned __int64 totCycles
+#endif // INTERP_ILCYCLE_PROFILE
+ )
+{
+ float fTotInstrs = float(totInstrs);
+ fprintf(GetLogFile(), "Instruction | execs | %% | cum %%");
+#if INTERP_ILCYCLE_PROFILE
+ float fTotCycles = float(totCycles);
+ fprintf(GetLogFile(), "| KCycles | %% | cum %% | cyc/inst\n");
+ fprintf(GetLogFile(), "--------------------------------------------------"
+ "-----------------------------------------\n");
+#else
+ fprintf(GetLogFile(), "\n-------------------------------------------\n");
+#endif
+ float numPct = 0.0f;
+#if INTERP_ILCYCLE_PROFILE
+ float numCyclePct = 0.0f;
+#endif // INTERP_ILCYCLE_PROFILE
+ for (unsigned i = 0; i < 256 + CountIlInstr2Byte; i++)
+ {
+ float pct = 0.0f;
+ if (totInstrs > 0) pct = float(recs[i].m_execs) * 100.0f / fTotInstrs;
+ numPct += pct;
+ if (recs[i].m_execs > 0)
+ {
+ fprintf(GetLogFile(), "%12s | %9d | %6.2f%% | %6.2f%%",
+ (recs[i].m_is2byte ? ILOp2Byte(recs[i].m_instr) : ILOp1Byte(recs[i].m_instr)), recs[i].m_execs,
+ pct, numPct);
+#if INTERP_ILCYCLE_PROFILE
+ pct = 0.0f;
+ if (totCycles > 0) pct = float(recs[i].m_cycles) * 100.0f / fTotCycles;
+ numCyclePct += pct;
+ float cyclesPerInst = float(recs[i].m_cycles) / float(recs[i].m_execs);
+ fprintf(GetLogFile(), "| %12llu | %6.2f%% | %6.2f%% | %11.2f",
+ recs[i].m_cycles/K, pct, numCyclePct, cyclesPerInst);
+#endif // INTERP_ILCYCLE_PROFILE
+ fprintf(GetLogFile(), "\n");
+ }
+ }
+}
+#endif // INTERP_ILINSTR_PROFILE
+
+#endif // FEATURE_INTERPRETER
diff --git a/src/vm/interpreter.h b/src/vm/interpreter.h
new file mode 100644
index 0000000000..6ecaded9e3
--- /dev/null
+++ b/src/vm/interpreter.h
@@ -0,0 +1,2053 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#ifndef INTERPRETER_H_DEFINED
+#define INTERPRETER_H_DEFINED 1
+
+#include "corjit.h"
+#include "corinfo.h"
+#include "codeman.h"
+#include "jitinterface.h"
+#include "simplerhash.h"
+#include "stack.h"
+#include "crst.h"
+#include "callhelpers.h"
+
+typedef SSIZE_T NativeInt;
+typedef SIZE_T NativeUInt;
+typedef SIZE_T NativePtr;
+
+// Determines whether we interpret IL stubs. (We might disable this selectively for
+// some architectures, perhaps.)
+#define INTERP_ILSTUBS 1
+
+// If this is set, we keep track of extra information about IL instructions executed per-method.
+#define INTERP_PROFILE 0
+
+// If this is set, we track the distribution of IL instructions.
+#define INTERP_ILINSTR_PROFILE 0
+
+#define INTERP_ILCYCLE_PROFILE 0
+#if INTERP_ILCYCLE_PROFILE
+#if !INTERP_ILINSTR_PROFILE
+#error INTERP_ILCYCLE_PROFILE may only be set if INTERP_ILINSTR_PROFILE is also set.
+#endif
+#endif
+#if defined(_DEBUG)
+#define INTERPLOG(...) if (s_TraceInterpreterVerboseFlag.val(CLRConfig::INTERNAL_TraceInterpreterVerbose)) { fprintf(GetLogFile(), __VA_ARGS__); }
+#else
+#define INTERPLOG(...)
+#endif
+#if defined(_DEBUG) || INTERP_ILINSTR_PROFILE
+// I define "INTERP_TRACING", rather than just using _DEBUG, so that I can easily make a build
+// in which tracing is enabled in retail.
+#define INTERP_TRACING 1
+#else
+#define INTERP_TRACING 0
+#endif // defined(_DEBUG) || defined(INTERP_ILINSTR_PROFILE)
+
+#if INTERP_TRACING
+#define InterpTracingArg(x) ,x
+#else
+#define InterpTracingArg(x)
+#endif
+
+#define FEATURE_INTERPRETER_DEADSIMPLE_OPT 0
+
+#define NYI_INTERP(msg) _ASSERTE_MSG(false, msg)
+// I wanted to define NYI_INTERP as the following in retail:
+// #define NYI_INTERP(msg) _ASSERTE_ALL_BUILDS(__FILE__, false)
+// but doing so gave a very odd unreachable code error.
+
+
+// To allow keeping a pointer (index) to the vararg cookie argument to implement arglist.
+// Use sentinel value of NO_VA_ARGNUM.
+#define NO_VA_ARGNUM UINT_MAX
+
+// First, a set of utility routines on CorInfoTypes.
+
+// Returns "true" iff "cit" is "stack-normal": all integer types with byte size less than 4
+// are folded to CORINFO_TYPE_INT; all remaining unsigned types are folded to their signed counterparts.
+bool IsStackNormalType(CorInfoType cit);
+
+// Returns the stack-normal CorInfoType that contains "cit".
+CorInfoType CorInfoTypeStackNormalize(CorInfoType cit);
+
+// Returns the (byte) size of "cit". Requires that "cit" is not a CORINFO_TYPE_VALUECLASS.
+inline size_t CorInfoTypeSize(CorInfoType cit);
+
+// Returns true iff "cit" is an unsigned integral type.
+bool CorInfoTypeIsUnsigned(CorInfoType cit);
+
+// Returns true iff "cit" is an integral type.
+bool CorInfoTypeIsIntegral(CorInfoType cit);
+
+// Returns true iff "cet" is an unsigned integral type.
+bool CorElemTypeIsUnsigned(CorElementType cet);
+
+// Returns true iff "cit" is an integral type.
+bool CorInfoTypeIsFloatingPoint(CorInfoType cit);
+
+// Returns true iff "cit" is a pointer type (mgd/unmgd pointer, or native int).
+bool CorInfoTypeIsPointer(CorInfoType cit);
+
+// Requires that "cit" is stack-normal; returns its (byte) size.
+inline size_t CorInfoTypeStackNormalSize(CorInfoType cit)
+{
+ assert(IsStackNormalType(cit));
+ return CorInfoTypeSize(cit);
+}
+
+inline getClassSize(CORINFO_CLASS_HANDLE clsHnd)
+{
+ TypeHandle VMClsHnd(clsHnd);
+ return VMClsHnd.GetSize();
+}
+
+// The values of this enumeration are in one-to-one correspondence with CorInfoType --
+// just shifted so that they're the value stored in an interpreter type for non-value-class
+// CorinfoTypes.
+enum CorInfoTypeShifted
+{
+ CORINFO_TYPE_SHIFTED_UNDEF = unsigned(CORINFO_TYPE_UNDEF) << 2, //0x0 << 2 = 0x0
+ CORINFO_TYPE_SHIFTED_VOID = unsigned(CORINFO_TYPE_VOID) << 2, //0x1 << 2 = 0x4
+ CORINFO_TYPE_SHIFTED_BOOL = unsigned(CORINFO_TYPE_BOOL) << 2, //0x2 << 2 = 0x8
+ CORINFO_TYPE_SHIFTED_CHAR = unsigned(CORINFO_TYPE_CHAR) << 2, //0x3 << 2 = 0xC
+ CORINFO_TYPE_SHIFTED_BYTE = unsigned(CORINFO_TYPE_BYTE) << 2, //0x4 << 2 = 0x10
+ CORINFO_TYPE_SHIFTED_UBYTE = unsigned(CORINFO_TYPE_UBYTE) << 2, //0x5 << 2 = 0x14
+ CORINFO_TYPE_SHIFTED_SHORT = unsigned(CORINFO_TYPE_SHORT) << 2, //0x6 << 2 = 0x18
+ CORINFO_TYPE_SHIFTED_USHORT = unsigned(CORINFO_TYPE_USHORT) << 2, //0x7 << 2 = 0x1C
+ CORINFO_TYPE_SHIFTED_INT = unsigned(CORINFO_TYPE_INT) << 2, //0x8 << 2 = 0x20
+ CORINFO_TYPE_SHIFTED_UINT = unsigned(CORINFO_TYPE_UINT) << 2, //0x9 << 2 = 0x24
+ CORINFO_TYPE_SHIFTED_LONG = unsigned(CORINFO_TYPE_LONG) << 2, //0xa << 2 = 0x28
+ CORINFO_TYPE_SHIFTED_ULONG = unsigned(CORINFO_TYPE_ULONG) << 2, //0xb << 2 = 0x2C
+ CORINFO_TYPE_SHIFTED_NATIVEINT = unsigned(CORINFO_TYPE_NATIVEINT) << 2, //0xc << 2 = 0x30
+ CORINFO_TYPE_SHIFTED_NATIVEUINT = unsigned(CORINFO_TYPE_NATIVEUINT) << 2, //0xd << 2 = 0x34
+ CORINFO_TYPE_SHIFTED_FLOAT = unsigned(CORINFO_TYPE_FLOAT) << 2, //0xe << 2 = 0x38
+ CORINFO_TYPE_SHIFTED_DOUBLE = unsigned(CORINFO_TYPE_DOUBLE) << 2, //0xf << 2 = 0x3C
+ CORINFO_TYPE_SHIFTED_STRING = unsigned(CORINFO_TYPE_STRING) << 2, //0x10 << 2 = 0x40
+ CORINFO_TYPE_SHIFTED_PTR = unsigned(CORINFO_TYPE_PTR) << 2, //0x11 << 2 = 0x44
+ CORINFO_TYPE_SHIFTED_BYREF = unsigned(CORINFO_TYPE_BYREF) << 2, //0x12 << 2 = 0x48
+ CORINFO_TYPE_SHIFTED_VALUECLASS = unsigned(CORINFO_TYPE_VALUECLASS) << 2, //0x13 << 2 = 0x4C
+ CORINFO_TYPE_SHIFTED_CLASS = unsigned(CORINFO_TYPE_CLASS) << 2, //0x14 << 2 = 0x50
+ CORINFO_TYPE_SHIFTED_REFANY = unsigned(CORINFO_TYPE_REFANY) << 2, //0x15 << 2 = 0x54
+ CORINFO_TYPE_SHIFTED_VAR = unsigned(CORINFO_TYPE_VAR) << 2, //0x16 << 2 = 0x58
+};
+
+class InterpreterType
+{
+ // We use this typedef, but the InterpreterType is actually encoded. We assume that the two
+ // low-order bits of a "real" CORINFO_CLASS_HANDLE are zero, then use them as follows:
+ // 0x0 ==> if "ci" is a non-struct CORINFO_TYPE_* value, m_tp contents are (ci << 2).
+ // 0x1, 0x3 ==> is a CORINFO_CLASS_HANDLE "sh" for a struct type, or'd with 0x1 and possibly 0x2.
+ // 0x2 is added to indicate that an instance does not fit in a INT64 stack slot on the plaform, and
+ // should be referenced via a level of indirection.
+ // 0x2 (exactly) indicates that it is a "native struct type".
+ //
+ CORINFO_CLASS_HANDLE m_tp;
+
+public:
+ // Default ==> undefined.
+ InterpreterType()
+ : m_tp(reinterpret_cast<CORINFO_CLASS_HANDLE>((static_cast<intptr_t>(CORINFO_TYPE_UNDEF) << 2)))
+ {}
+
+ // Requires that "cit" is not CORINFO_TYPE_VALUECLASS.
+ InterpreterType(CorInfoType cit)
+ : m_tp(reinterpret_cast<CORINFO_CLASS_HANDLE>((static_cast<intptr_t>(cit) << 2)))
+ {
+ assert(cit != CORINFO_TYPE_VALUECLASS);
+ }
+
+ // Requires that "cet" is not ELEMENT_TYPE_VALUETYPE.
+ InterpreterType(CorElementType cet)
+ : m_tp(reinterpret_cast<CORINFO_CLASS_HANDLE>((static_cast<intptr_t>(CEEInfo::asCorInfoType(cet)) << 2)))
+ {
+ assert(cet != ELEMENT_TYPE_VALUETYPE);
+ }
+
+ InterpreterType(CEEInfo* comp, CORINFO_CLASS_HANDLE sh)
+ {
+ GCX_PREEMP();
+
+ // TODO: might wish to make a different constructor, for the cases where this is possible...
+ TypeHandle typHnd(sh);
+ if (typHnd.IsNativeValueType())
+ {
+ intptr_t shAsInt = reinterpret_cast<intptr_t>(sh);
+ assert((shAsInt & 0x1) == 0); // The 0x2 bit might already be set by the VM! This is ok, because it's only set for native value types. This is a bit slimey...
+ m_tp = reinterpret_cast<CORINFO_CLASS_HANDLE>(shAsInt | 0x2);
+ }
+ else
+ {
+ CorInfoType cit = comp->getTypeForPrimitiveValueClass(sh);
+ if (cit != CORINFO_TYPE_UNDEF)
+ {
+ m_tp = reinterpret_cast<CORINFO_CLASS_HANDLE>(static_cast<intptr_t>(cit) << 2);
+ }
+ else
+ {
+ assert((comp->getClassAttribs(sh) & CORINFO_FLG_VALUECLASS) != 0);
+ intptr_t shAsInt = reinterpret_cast<intptr_t>(sh);
+ assert((shAsInt & 0x3) == 0);
+ intptr_t bits = 0x1; // All value classes (structs) get 0x1 set.
+ if (getClassSize(sh) > sizeof(INT64))
+ {
+ bits |= 0x2; // "Large" structs get 0x2 set, also.
+ }
+ m_tp = reinterpret_cast<CORINFO_CLASS_HANDLE>(shAsInt | bits);
+ }
+ }
+ }
+
+ bool operator==(const InterpreterType& it2) const { return m_tp == it2.m_tp; }
+ bool operator!=(const InterpreterType& it2) const { return m_tp != it2.m_tp; }
+
+ CorInfoType ToCorInfoType() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ intptr_t iTypeAsInt = reinterpret_cast<intptr_t>(m_tp);
+ if ((iTypeAsInt & 0x3) == 0x0)
+ {
+ return static_cast<CorInfoType>(iTypeAsInt >> 2);
+ }
+ // Is a class or struct (or refany?).
+ else
+ {
+ return CORINFO_TYPE_VALUECLASS;
+ }
+ }
+
+ CorInfoType ToCorInfoTypeNotStruct() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE_MSG((reinterpret_cast<intptr_t>(m_tp) & 0x3) == 0x0, "precondition: not a struct type.");
+
+ intptr_t iTypeAsInt = reinterpret_cast<intptr_t>(m_tp);
+ return static_cast<CorInfoType>(iTypeAsInt >> 2);
+ }
+
+ CorInfoTypeShifted ToCorInfoTypeShifted() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE_MSG((reinterpret_cast<intptr_t>(m_tp) & 0x3) == 0x0, "precondition: not a struct type.");
+
+ return static_cast<CorInfoTypeShifted>(reinterpret_cast<size_t>(m_tp));
+ }
+
+ CORINFO_CLASS_HANDLE ToClassHandle() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ intptr_t asInt = reinterpret_cast<intptr_t>(m_tp);
+ assert((asInt & 0x3) != 0);
+ return reinterpret_cast<CORINFO_CLASS_HANDLE>(asInt & (~0x3));
+ }
+
+ size_t AsRaw() const // Just hand out the raw bits. Be careful using this! Use something else if you can!
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return reinterpret_cast<size_t>(m_tp);
+ }
+
+ // Returns the stack-normalized type for "this".
+ InterpreterType StackNormalize() const;
+
+ // Returns the (byte) size of "this". Requires "ceeInfo" for the struct case.
+ __forceinline size_t Size(CEEInfo* ceeInfo) const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ intptr_t asInt = reinterpret_cast<intptr_t>(m_tp);
+ intptr_t asIntBits = (asInt & 0x3);
+ if (asIntBits == 0)
+ {
+ return CorInfoTypeSize(ToCorInfoType());
+ }
+ else if (asIntBits == 0x2)
+ {
+ // Here we're breaking abstraction, and taking advantage of the fact that 0x2
+ // is the low-bit encoding of "native struct type" both for InterpreterType and for
+ // TypeHandle.
+ TypeHandle typHnd(m_tp);
+ assert(typHnd.IsNativeValueType());
+ return typHnd.AsNativeValueType()->GetNativeSize();
+ }
+ else
+ {
+ return getClassSize(ToClassHandle());
+ }
+ }
+
+ __forceinline size_t SizeNotStruct() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE_MSG((reinterpret_cast<intptr_t>(m_tp) & 0x3) == 0, "Precondition: is not a struct type!");
+ return CorInfoTypeSize(ToCorInfoTypeNotStruct());
+ }
+
+ // Requires that "it" is stack-normal; returns its (byte) size.
+ size_t StackNormalSize() const
+ {
+ CorInfoType cit = ToCorInfoType();
+ assert(IsStackNormalType(cit)); // Precondition.
+ return CorInfoTypeStackNormalSize(cit);
+ }
+
+ // Is it a struct? (But don't include "native struct type").
+ bool IsStruct() const
+ {
+ intptr_t asInt = reinterpret_cast<intptr_t>(m_tp);
+ return (asInt & 0x1) == 0x1 || (asInt == CORINFO_TYPE_SHIFTED_REFANY);
+ }
+
+ // Returns "true" iff represents a large (> INT64 size) struct.
+ bool IsLargeStruct(CEEInfo* ceeInfo) const
+ {
+ intptr_t asInt = reinterpret_cast<intptr_t>(m_tp);
+#ifdef _TARGET_AMD64_
+ if (asInt == CORINFO_TYPE_SHIFTED_REFANY)
+ {
+ return true;
+ }
+#endif
+ return (asInt & 0x3) == 0x3
+ || ((asInt & 0x3) == 0x2 && Size(ceeInfo) > sizeof(INT64));
+ }
+
+#ifdef _DEBUG
+ bool MatchesWork(const InterpreterType it2, CEEInfo* info) const;
+
+ bool Matches(const InterpreterType it2, CEEInfo* info) const
+ {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ return MatchesWork(it2, info) || it2.MatchesWork(*this, info);
+ }
+#endif // _DEBUG
+};
+
+#ifndef DACCESS_COMPILE
+// This class does whatever "global" (applicable to all executions after the first, as opposed to caching
+// within a single execution) we do. It is parameterized over the "Key" type (which is required to be an integral
+// type, to allow binary search), and the "Val" type of things cached.
+template<typename Key, typename Val>
+class InterpreterCache
+{
+public:
+ InterpreterCache();
+
+ // Returns "false" if "k" is already present, otherwise "true". Requires that "v" == current mapping
+ // if "k" is already present.
+ bool AddItem(Key k, Val v);
+ bool GetItem(Key k, Val& v);
+
+private:
+ struct KeyValPair
+ {
+ Key m_key;
+ Val m_val;
+ };
+
+ // This is kept ordered by m_iloffset, to enable binary search.
+ KeyValPair* m_pairs;
+ unsigned short m_allocSize;
+ unsigned short m_count;
+
+ static const unsigned InitSize = 8;
+
+ void EnsureCanInsert();
+
+#ifdef _DEBUG
+ static void AddAllocBytes(unsigned bytes);
+#endif // _DEBUG
+};
+
+#ifdef _DEBUG
+enum CachedItemKind
+{
+ CIK_Undefined,
+ CIK_CallSite,
+ CIK_StaticField,
+ CIK_InstanceField,
+ CIK_ClassHandle,
+};
+#endif // _DEBUG
+
+struct StaticFieldCacheEntry
+{
+ void* m_srcPtr;
+ UINT m_sz;
+ InterpreterType m_it;
+
+ StaticFieldCacheEntry(void* srcPtr, UINT sz, InterpreterType it) : m_srcPtr(srcPtr), m_sz(sz), m_it(it) {}
+
+#ifdef _DEBUG
+ bool operator==(const StaticFieldCacheEntry& entry) const
+ {
+ return m_srcPtr == entry.m_srcPtr && m_sz == entry.m_sz && m_it == entry.m_it;
+ }
+#endif // _DEBUG
+};
+
+// "small" part of CORINFO_SIG_INFO, sufficient for the interpreter to call the method so decribed
+struct CORINFO_SIG_INFO_SMALL
+{
+ CORINFO_CLASS_HANDLE retTypeClass; // if the return type is a value class, this is its handle (enums are normalized)
+ unsigned numArgs : 16;
+ CorInfoCallConv callConv: 8;
+ CorInfoType retType : 8;
+
+ CorInfoCallConv getCallConv() { return CorInfoCallConv((callConv & CORINFO_CALLCONV_MASK)); }
+ bool hasThis() { return ((callConv & CORINFO_CALLCONV_HASTHIS) != 0); }
+ bool hasExplicitThis() { return ((callConv & CORINFO_CALLCONV_EXPLICITTHIS) != 0); }
+ unsigned totalILArgs() { return (numArgs + hasThis()); }
+ bool isVarArg() { return ((getCallConv() == CORINFO_CALLCONV_VARARG) || (getCallConv() == CORINFO_CALLCONV_NATIVEVARARG)); }
+ bool hasTypeArg() { return ((callConv & CORINFO_CALLCONV_PARAMTYPE) != 0); }
+
+#ifdef _DEBUG
+ bool operator==(const CORINFO_SIG_INFO_SMALL& csis) const
+ {
+ return retTypeClass == csis.retTypeClass
+ && numArgs == csis.numArgs
+ && callConv == csis.callConv
+ && retType == csis.retType;
+ }
+#endif // _DEBUG
+};
+
+struct CallSiteCacheData
+{
+ MethodDesc* m_pMD;
+
+ CORINFO_SIG_INFO_SMALL m_sigInfo;
+
+ CallSiteCacheData(MethodDesc* pMD, const CORINFO_SIG_INFO_SMALL& sigInfo)
+ : m_pMD(pMD), m_sigInfo(sigInfo)
+ {}
+
+#ifdef _DEBUG
+ bool operator==(const CallSiteCacheData& cscd) const
+ {
+ return m_pMD == cscd.m_pMD
+ && m_sigInfo == cscd.m_sigInfo;
+ }
+#endif // _DEBUG
+};
+
+struct CachedItem
+{
+#ifdef _DEBUG
+ CachedItemKind m_tag;
+#endif // _DEBUG
+ union
+ {
+ // m_tag == CIK_CallSite
+ CallSiteCacheData* m_callSiteInfo;
+ // m_tag == CIK_StaticField
+ StaticFieldCacheEntry* m_staticFieldAddr;
+ // m_tag == CIK_InstanceField
+ FieldDesc* m_instanceField;
+ // m_tag == CIT_ClassHandle
+ CORINFO_CLASS_HANDLE m_clsHnd;
+ } m_value;
+
+ CachedItem()
+#ifdef _DEBUG
+ : m_tag(CIK_Undefined)
+#endif
+ {}
+
+#ifdef _DEBUG
+ bool operator==(const CachedItem& ci)
+ {
+ if (m_tag != ci.m_tag) return false;
+ switch (m_tag)
+ {
+ case CIK_CallSite:
+ return *m_value.m_callSiteInfo == *ci.m_value.m_callSiteInfo;
+ case CIK_StaticField:
+ return *m_value.m_staticFieldAddr == *ci.m_value.m_staticFieldAddr;
+ case CIK_InstanceField:
+ return m_value.m_instanceField == ci.m_value.m_instanceField;
+ case CIK_ClassHandle:
+ return m_value.m_clsHnd == ci.m_value.m_clsHnd;
+ default:
+ return true;
+ }
+ }
+#endif
+
+ CachedItem(CallSiteCacheData* callSiteInfo)
+#ifdef _DEBUG
+ : m_tag(CIK_CallSite)
+#endif
+ {
+ m_value.m_callSiteInfo = callSiteInfo;
+ }
+
+ CachedItem(StaticFieldCacheEntry* staticFieldAddr)
+#ifdef _DEBUG
+ : m_tag(CIK_StaticField)
+#endif
+ {
+ m_value.m_staticFieldAddr = staticFieldAddr;
+ }
+
+ CachedItem(FieldDesc* instanceField)
+#ifdef _DEBUG
+ : m_tag(CIK_InstanceField)
+#endif
+ {
+ m_value.m_instanceField = instanceField;
+ }
+
+ CachedItem(CORINFO_CLASS_HANDLE m_clsHnd)
+#ifdef _DEBUG
+ : m_tag(CIK_ClassHandle)
+#endif
+ {
+ m_value.m_clsHnd = m_clsHnd;
+ }
+};
+
+
+const char* eeGetMethodFullName(CEEInfo* info, CORINFO_METHOD_HANDLE hnd, const char** clsName = NULL);
+
+// The per-InterpMethodInfo cache may map generic instantiation information to the
+// cache for the current instantitation; when we find the right one the first time we copy it
+// into here, so we only have to do the instantiation->cache lookup once.
+typedef InterpreterCache<unsigned, CachedItem> ILOffsetToItemCache;
+typedef InterpreterCache<size_t, ILOffsetToItemCache*> GenericContextToInnerCache;
+
+#endif // DACCESS_COMPILE
+
+// This is the information that the intepreter stub provides to the
+// interpreter about the method being interpreted.
+struct InterpreterMethodInfo
+{
+#if INTERP_PROFILE || defined(_DEBUG)
+ const char* m_clsName;
+ const char* m_methName;
+#endif
+
+ // Stub num for the current method under interpretation.
+ int m_stubNum;
+
+ // The method this info is relevant to.
+ CORINFO_METHOD_HANDLE m_method;
+
+ // The module containing the method.
+ CORINFO_MODULE_HANDLE m_module;
+
+ // If the method has been JITted, it's JITted code (for indirection).
+ PCODE m_jittedCode;
+
+ // Code pointer, size, and max stack usage.
+ BYTE* m_ILCode;
+ BYTE* m_ILCodeEnd; // One byte past the last byte of IL. IL Code Size = m_ILCodeEnd - m_ILCode.
+
+ // The CLR transforms delegate constructors, and may add up to this many
+ // extra arguments. This amount will be added to the IL's reported MaxStack to
+ // get the "maxStack" value below, so we can use a uniform calling convention for
+ // "DoCall".
+ unsigned m_maxStack;
+
+ unsigned m_ehClauseCount;
+
+ // Used to implement arglist, an index into the ilArgs array where the argument pointed to is VA sig cookie.
+ unsigned m_varArgHandleArgNum;
+
+ // The number of arguments.
+ unsigned short m_numArgs;
+
+ // The number of local variables.
+ unsigned short m_numLocals;
+
+ enum Flags
+ {
+ // Is the first argument a "this" pointer?
+ Flag_hasThisArg,
+ // If "m_hasThisArg" is true, indicates whether the type of this is an object pointer
+ // or a byref.
+ Flag_thisArgIsObjPtr,
+ // Is there a return buffer argument?
+ Flag_hasRetBuffArg,
+ // Is the method a var arg method
+ Flag_isVarArg,
+ // Is the last argument a generic type context?
+ Flag_hasGenericsContextArg,
+ // Does the type have generic args?
+ Flag_typeHasGenericArgs,
+ // Does the method have generic args?
+ Flag_methHasGenericArgs,
+ // Is the method a "dead simple" getter (one that just reads a field?)
+ Flag_methIsDeadSimpleGetter,
+ // We recognize two forms of dead simple getters, one for "opt" and one for "dbg". If it is
+ // dead simple, is it dbg or opt?
+ Flag_methIsDeadSimpleGetterIsDbgForm,
+ Flag_Count,
+ };
+
+ typedef UINT16 FlagGroup;
+
+ // The bitmask for a set of InterpreterMethodInfo::Flags.
+ FlagGroup m_flags;
+
+ template<int Flg>
+ FlagGroup GetFlagBit() {
+ // This works as long as FlagGroup is "int" type.
+ static_assert(sizeof(FlagGroup) * 8 >= Flag_Count, "error: bitset not large enough");
+ return (1 << Flg);
+ }
+
+ // Get and set the value of a flag.
+ template<int Flg>
+ bool GetFlag() { return (m_flags & GetFlagBit<Flg>()) != 0; }
+ template<int Flg>
+ void SetFlag(bool b)
+ {
+ if (b) m_flags |= GetFlagBit<Flg>();
+ else m_flags &= (~GetFlagBit<Flg>());
+ }
+
+ // This structure describes a local: its type and its offset.
+ struct LocalDesc
+ {
+ InterpreterType m_typeStackNormal;
+ InterpreterType m_type;
+ unsigned m_offset;
+ };
+
+ // This structure describes an argument. Much like a LocalDesc, but
+ // "m_nativeOffset" contains the offset if the argument was passed using the system's native calling convention
+ // (e.g., the calling convention for a JIT -> Interpreter call) whereas "m_directOffset" describes arguments passed
+ // via a direct Interpreter -> Interpreter call.
+ struct ArgDesc
+ {
+ InterpreterType m_typeStackNormal;
+ InterpreterType m_type;
+ short m_nativeOffset;
+ short m_directOffset;
+ };
+
+
+ // This is an array of size at least "m_numArgs", such that entry "i" describes the "i'th"
+ // arg in the "m_ilArgs" array passed to the intepreter: that is, the ArgDesc contains the type, stack-normal type,
+ // and offset in the "m_ilArgs" array of that argument. In addition, has extra entries if "m_hasGenericsContextArg"
+ // and/or "m_hasRetBuffArg" are true, giving the offset of those arguments -- the offsets of those arguments
+ // are in that order in the array. (The corresponding types should be NativeInt.)
+ ArgDesc* m_argDescs;
+
+ // This is an array of size "m_numLocals", such that entry "i" describes the "i'th"
+ // local : that is, the LocalDesc contains the type, stack-normal type, and, if the type
+ // is a large struct type, the offset in the local variable large-struct memory array.
+ LocalDesc* m_localDescs;
+
+ // A bit map, with 1 bit per local, indicating whether it contains a pinning reference.
+ char* m_localIsPinningRefBits;
+
+ unsigned m_largeStructLocalSize;
+ unsigned LocalMemSize()
+ {
+ return m_largeStructLocalSize + m_numLocals * sizeof(INT64);
+ }
+
+ // I will probably need more information about the return value, but for now...
+ CorInfoType m_returnType;
+
+ // The number of times this method has been interpreted.
+ unsigned int m_invocations;
+
+#if INTERP_PROFILE
+ UINT64 m_totIlInstructionsExeced;
+ unsigned m_maxIlInstructionsExeced;
+
+ void RecordExecInstrs(unsigned instrs)
+ {
+ m_totIlInstructionsExeced += instrs;
+ if (instrs > m_maxIlInstructionsExeced)
+ {
+ m_maxIlInstructionsExeced = instrs;
+ }
+ }
+#endif
+
+// #ifndef DACCESS_COMPILE
+ // Caching information. Currently the only thing we cache is saved formats of MethodDescCallSites
+ // at call instructions.
+ // We use a "void*", because the actual type depends on the whether the method has
+ // a dynamic generics context. If so, this is a cache from the generic parameter to an
+ // ILoffset->item cache; if not, it's a the ILoffset->item cache directly.
+ void* m_methodCache;
+// #endif // DACCESS_COMPILE
+
+ InterpreterMethodInfo(CEEInfo* comp, CORINFO_METHOD_INFO* methInfo);
+
+ void InitArgInfo(CEEInfo* comp, CORINFO_METHOD_INFO* methInfo, short* argOffsets_);
+
+ void AllocPinningBitsIfNeeded();
+
+ void SetPinningBit(unsigned locNum);
+ bool GetPinningBit(unsigned locNum);
+
+ CORINFO_METHOD_HANDLE GetPreciseGenericsContext(Object* thisArg, void* genericsCtxtArg);
+
+#ifndef DACCESS_COMPILE
+ // Gets the proper cache for a call to a method with the current InterpreterMethodInfo, with the given
+ // "thisArg" and "genericsCtxtArg". If "alloc" is true, will allocate the cache if necessary.
+ ILOffsetToItemCache* GetCacheForCall(Object* thisArg, void* genericsCtxtArg, bool alloc = false);
+#endif // DACCESS_COMPILE
+
+ ~InterpreterMethodInfo();
+};
+
+
+// Expose some protected methods of CEEInfo.
+class InterpreterCEEInfo: public CEEInfo
+{
+ CEEJitInfo m_jitInfo;
+public:
+ InterpreterCEEInfo(CORINFO_METHOD_HANDLE meth): CEEInfo((MethodDesc*)meth), m_jitInfo((MethodDesc*)meth, NULL, NULL, CorJitFlag(0)) { m_pOverride = this; }
+
+ // Certain methods are unimplemented by CEEInfo (they hit an assert). They are implemented by CEEJitInfo, yet
+ // don't seem to require any of the CEEJitInfo state we can't provide. For those case, delegate to the "partial"
+ // CEEJitInfo m_jitInfo.
+ void addActiveDependency(CORINFO_MODULE_HANDLE moduleFrom,CORINFO_MODULE_HANDLE moduleTo)
+ {
+ m_jitInfo.addActiveDependency(moduleFrom, moduleTo);
+ }
+};
+
+extern INT64 F_CALL_CONV InterpretMethod(InterpreterMethodInfo* methInfo, BYTE* ilArgs, void* stubContext);
+extern float F_CALL_CONV InterpretMethodFloat(InterpreterMethodInfo* methInfo, BYTE* ilArgs, void* stubContext);
+extern double F_CALL_CONV InterpretMethodDouble(InterpreterMethodInfo* methInfo, BYTE* ilArgs, void* stubContext);
+
+class Interpreter
+{
+ friend INT64 F_CALL_CONV InterpretMethod(InterpreterMethodInfo* methInfo, BYTE* ilArgs, void* stubContext);
+ friend float F_CALL_CONV InterpretMethodFloat(InterpreterMethodInfo* methInfo, BYTE* ilArgs, void* stubContext);
+ friend double F_CALL_CONV InterpretMethodDouble(InterpreterMethodInfo* methInfo, BYTE* ilArgs, void* stubContext);
+
+ // This will be inlined into the bodies of the methods above
+ static inline ARG_SLOT InterpretMethodBody(InterpreterMethodInfo* interpMethInfo, bool directCall, BYTE* ilArgs, void* stubContext);
+
+ // The local frame size of the method being interpreted.
+ static size_t GetFrameSize(InterpreterMethodInfo* interpMethInfo);
+
+ // JIT the method if we've passed the threshold, or if "force" is true.
+ static void JitMethodIfAppropriate(InterpreterMethodInfo* interpMethInfo, bool force = false);
+
+ friend class InterpreterFrame;
+
+public:
+ // Return an interpreter stub for the given method. That is, a stub that transforms the arguments from the native
+ // calling convention to the interpreter convention, and provides the method descriptor, then calls the interpreter.
+ // If "jmpCall" setting is true, then "ppInterpreterMethodInfo" must be provided and the GenerateInterpreterStub
+ // will NOT generate a stub. Instead it will provide a MethodInfo that is initialized correctly after computing
+ // arg descs.
+ static CorJitResult GenerateInterpreterStub(CEEInfo* comp,
+ CORINFO_METHOD_INFO* info,
+ /*OUT*/ BYTE **nativeEntry,
+ /*OUT*/ ULONG *nativeSizeOfCode,
+ InterpreterMethodInfo** ppInterpMethodInfo = NULL,
+ bool jmpCall = false);
+
+ // If "addr" is the start address of an interpreter stub, return the corresponding MethodDesc*,
+ // else "NULL".
+ static class MethodDesc* InterpretationStubToMethodInfo(PCODE addr);
+
+ // A value to indicate that the cache has not been initialized (to distinguish it from NULL --
+ // we've looked and it doesn't yet have a cache.)
+#define UninitExecCache reinterpret_cast<ILOffsetToItemCache*>(0x1)
+
+ // The "frameMemory" should be a pointer to a locally-allocated memory block
+ // whose size is sufficient to hold the m_localVarMemory, the operand stack, and the
+ // operand type stack.
+ Interpreter(InterpreterMethodInfo* methInfo_, bool directCall_, BYTE* ilArgs_, void* stubContext_, BYTE* frameMemory)
+ : m_methInfo(methInfo_),
+ m_directCall(directCall_),
+ m_ilArgs(ilArgs_),
+ m_stubContext(stubContext_),
+ m_ILCodePtr(methInfo_->m_ILCode),
+ m_curStackHt(0),
+ m_interpCeeInfo(methInfo_->m_method),
+ m_largeStructOperandStack(NULL),
+ m_largeStructOperandStackHt(0),
+ m_largeStructOperandStackAllocSize(0),
+ m_thisArg(NULL),
+ m_securityObject(TADDR(NULL)),
+ m_args(NULL),
+ m_argsSize(0),
+ m_structRetValITPtr(NULL),
+ m_callThisArg(NULL),
+ m_orOfPushedInterpreterTypes(0),
+ m_preciseGenericsContext(NULL),
+ m_functionPointerStack(NULL),
+ m_genericsCtxtArg(NULL),
+ m_inFlightException(NULL),
+ m_filterNextScan(0),
+ m_filterHandlerOffset(0),
+ m_filterExcILOffset(0),
+#ifdef USE_CHECKED_OBJECTREFS
+ m_retBufArg(NULL), // Initialize to NULL so we can safely declare protected.
+#endif // USE_CHECKED_OBJECTREFS
+#ifndef DACCESS_COMPILE
+ // Means "uninitialized"
+ m_thisExecCache(UninitExecCache),
+#endif
+ m_constrainedFlag(false),
+ m_readonlyFlag(false),
+ m_locAllocData(NULL),
+ m_leaveInfoStack()
+ {
+ // We must zero the locals.
+ memset(frameMemory, 0, methInfo_->LocalMemSize() + sizeof(GSCookie));
+
+ // m_localVarMemory is below the fixed size slots, above the large struct slots.
+ m_localVarMemory = frameMemory + methInfo_->m_largeStructLocalSize + sizeof(GSCookie);
+ m_gsCookieAddr = (GSCookie*) (m_localVarMemory - sizeof(GSCookie));
+
+ // Having zeroed, for large struct locals, we must initialize the fixed-size local slot to point to the
+ // corresponding large-struct local slot.
+ for (unsigned i = 0; i < methInfo_->m_numLocals; i++)
+ {
+ if (methInfo_->m_localDescs[i].m_type.IsLargeStruct(&m_interpCeeInfo))
+ {
+ void* structPtr = ArgSlotEndianessFixup(reinterpret_cast<ARG_SLOT*>(FixedSizeLocalSlot(i)), sizeof(void**));
+ *reinterpret_cast<void**>(structPtr) = LargeStructLocalSlot(i);
+ }
+ }
+ frameMemory += methInfo_->LocalMemSize();
+ frameMemory += sizeof(GSCookie);
+
+#define COMBINE_OPSTACK_VAL_TYPE 0
+
+#if COMBINE_OPSTACK_VAL_TYPE
+ m_operandStackX = reinterpret_cast<OpStackValAndType*>(frameMemory);
+ frameMemory += (methInfo_->m_maxStack * sizeof(OpStackValAndType));
+#else
+ m_operandStack = reinterpret_cast<INT64*>(frameMemory);
+ frameMemory += (methInfo_->m_maxStack * sizeof(INT64));
+ m_operandStackTypes = reinterpret_cast<InterpreterType*>(frameMemory);
+#endif
+
+ // If we have a "this" arg, save it in case we need it later. (So we can
+ // reliably get it even if the IL updates arg 0...)
+ if (m_methInfo->GetFlag<InterpreterMethodInfo::Flag_hasThisArg>())
+ {
+ m_thisArg = *reinterpret_cast<Object**>(GetArgAddr(0));
+ }
+
+ unsigned extraArgInd = methInfo_->m_numArgs - 1;
+ // We do these in the *reverse* of the order they appear in the array, so that we can conditionally process
+ // the ones that are used.
+ if (m_methInfo->GetFlag<InterpreterMethodInfo::Flag_hasGenericsContextArg>())
+ {
+ m_genericsCtxtArg = *reinterpret_cast<Object**>(GetArgAddr(extraArgInd));
+ extraArgInd--;
+ }
+ if (m_methInfo->GetFlag<InterpreterMethodInfo::Flag_isVarArg>())
+ {
+ extraArgInd--;
+ }
+ if (m_methInfo->GetFlag<InterpreterMethodInfo::Flag_hasRetBuffArg>())
+ {
+ m_retBufArg = *reinterpret_cast<void**>(GetArgAddr(extraArgInd));
+ extraArgInd--;
+ }
+ }
+
+ ~Interpreter()
+ {
+ if (m_largeStructOperandStack != NULL)
+ {
+ delete[] m_largeStructOperandStack;
+ }
+
+ if (m_locAllocData != NULL)
+ {
+ delete m_locAllocData;
+ }
+
+ if (m_functionPointerStack != NULL)
+ {
+ delete[] m_functionPointerStack;
+ }
+ }
+
+ // Called during EE startup to initialize locks and other generic resources.
+ static void Initialize();
+
+ // Called during stub generation to initialize compiler-specific resources.
+ static void InitializeCompilerStatics(CEEInfo* info);
+
+ // Called during EE shutdown to destroy locks and release generic resources.
+ static void Terminate();
+
+ // Returns true iff "stackPtr" can only be in younger frames than "this". (On a downwards-
+ // growing stack, it is less than the smallest local address of "this".)
+ bool IsInCalleesFrames(void* stackPtr);
+
+ MethodDesc* GetMethodDesc() { return reinterpret_cast<MethodDesc*>(m_methInfo->m_method); }
+
+#if INTERP_ILSTUBS
+ void* GetStubContext() { return m_stubContext; }
+ void* GetStubContextAddr() { return &m_stubContext; }
+#endif
+
+ OBJECTREF* GetAddressOfSecurityObject() { return &m_securityObject; }
+
+ void* GetParamTypeArg() { return m_genericsCtxtArg; }
+
+private:
+ // Architecture-dependent helpers.
+ inline static unsigned short NumberOfIntegerRegArgs();
+
+ // Wrapper for ExecuteMethod to do a O(1) alloca when performing a jmpCall and normal calls. If doJmpCall is true, this method also resolves the call token into pResolvedToken.
+ static
+ ARG_SLOT ExecuteMethodWrapper(struct InterpreterMethodInfo* interpMethInfo, bool directCall, BYTE* ilArgs, void* stubContext, bool* pDoJmpCall, CORINFO_RESOLVED_TOKEN* pResolvedCallToken);
+
+ // Execute the current method, and set *retVal to the return value, if any.
+ void ExecuteMethod(ARG_SLOT* retVal, bool* pDoJmpCall, unsigned* pJumpCallToken);
+
+ // Fetches the monitor for static methods by asking cee info. Returns the monitor
+ // object.
+ AwareLock* GetMonitorForStaticMethod();
+
+ // Synchronized methods have to call monitor enter and exit at the entry and exits of the
+ // method.
+ void DoMonitorEnterWork();
+ void DoMonitorExitWork();
+
+ // Determines if the current exception is handled by the current method. If so,
+ // returns true and sets the interpreter state to start executing in the appropriate handler.
+ bool MethodHandlesException(OBJECTREF orThrowable);
+
+ // Assumes that "ilCode" is the first instruction in a method, whose code is of size "codeSize".
+ // Returns "false" if this method has no loops; if it returns "true", it might have a loop.
+ static bool MethodMayHaveLoop(BYTE* ilCode, unsigned codeSize);
+
+ // Do anything that needs to be done on a backwards branch (e.g., GC poll).
+ // Assumes that "offset" is the delta between the current code pointer and the post-branch pointer;
+ // obviously, it will be negative.
+ void BackwardsBranchActions(int offset);
+
+ // Expects "interp0" to be the address of the interpreter object being scanned.
+ static void GCScanRoots(promote_func* pf, ScanContext* sc, void* interp0);
+
+ // The above calls this instance method.
+ void GCScanRoots(promote_func* pf, ScanContext* sc);
+
+ // Scan the root at "loc", whose type is "it", using "pf" and "sc".
+ void GCScanRootAtLoc(Object** loc, InterpreterType it, promote_func* pf, ScanContext* sc,
+ bool pinningRef = false);
+
+ // Scan the root at "loc", whose type is the value class "valueCls", using "pf" and "sc".
+ void GCScanValueClassRootAtLoc(Object** loc, CORINFO_CLASS_HANDLE valueClsHnd, promote_func* pf, ScanContext* sc);
+
+ // Asserts that "addr" is the start of the interpretation stub for "md". Records this in a table,
+ // to satisfy later calls to "InterpretationStubToMethodInfo."
+ static void RecordInterpreterStubForMethodDesc(CORINFO_METHOD_HANDLE md, void* addr);
+
+ struct ArgState
+ {
+ unsigned short numRegArgs;
+ unsigned short numFPRegArgSlots;
+ unsigned fpArgsUsed; // Bit per single-precision fp arg accounted for.
+ short callerArgStackSlots;
+ short* argOffsets;
+ enum ArgRegStatus
+ {
+ ARS_IntReg,
+ ARS_FloatReg,
+ ARS_NotReg
+ };
+ ArgRegStatus* argIsReg;
+
+ ArgState(unsigned totalArgs) :
+ numRegArgs(0),
+ numFPRegArgSlots(0), fpArgsUsed(0),
+ callerArgStackSlots(0),
+ argOffsets(new short[totalArgs]),
+ argIsReg(new ArgRegStatus[totalArgs])
+ {
+ for (unsigned i = 0; i < totalArgs; i++)
+ {
+ argIsReg[i] = ARS_NotReg;
+ argOffsets[i] = 0;
+ }
+ }
+
+#if defined(_ARM_)
+ static const int MaxNumFPRegArgSlots = 16;
+#elif defined(_ARM64_)
+ static const int MaxNumFPRegArgSlots = 8;
+#elif defined(_AMD64_)
+ static const int MaxNumFPRegArgSlots = 4;
+#endif
+
+ ~ArgState()
+ {
+ delete[] argOffsets;
+ delete[] argIsReg;
+ }
+
+ void AddArg(unsigned canonIndex, short numSlots = 1, bool noReg = false, bool twoSlotAlign = false);
+
+ // By this call, argument "canonIndex" is declared to be a floating point argument, taking the given #
+ // of slots. Important that this be called in argument order.
+ void AddFPArg(unsigned canonIndex, unsigned short numSlots, bool doubleAlign);
+
+#if defined(_AMD64_)
+ // We have a special function for AMD64 because both integer/float registers overlap. However, all
+ // callers are expected to call AddArg/AddFPArg directly.
+ void AddArgAmd64(unsigned canonIndex, unsigned short numSlots, bool isFloatingType);
+#endif
+ };
+
+ typedef SimplerHashTable<void*, PtrKeyFuncs<void>, CORINFO_METHOD_HANDLE, DefaultSimplerHashBehavior> AddrToMDMap;
+ static AddrToMDMap* s_addrToMDMap;
+ static AddrToMDMap* GetAddrToMdMap();
+
+ // In debug, we map to a pair, containing the Thread that inserted it, so we can assert that any given thread only
+ // inserts one stub for a CORINFO_METHOD_HANDLE.
+ struct MethInfo
+ {
+ InterpreterMethodInfo* m_info;
+#ifdef _DEBUG
+ Thread* m_thread;
+#endif // _DEBUG
+ };
+ typedef SimplerHashTable<CORINFO_METHOD_HANDLE, PtrKeyFuncs<CORINFO_METHOD_STRUCT_>, MethInfo, DefaultSimplerHashBehavior> MethodHandleToInterpMethInfoPtrMap;
+ static MethodHandleToInterpMethInfoPtrMap* s_methodHandleToInterpMethInfoPtrMap;
+ static MethodHandleToInterpMethInfoPtrMap* GetMethodHandleToInterpMethInfoPtrMap();
+
+ static InterpreterMethodInfo* RecordInterpreterMethodInfoForMethodHandle(CORINFO_METHOD_HANDLE md, InterpreterMethodInfo* methInfo);
+ static InterpreterMethodInfo* Interpreter::MethodHandleToInterpreterMethInfoPtr(CORINFO_METHOD_HANDLE md);
+
+public:
+ static unsigned s_interpreterStubNum;
+private:
+ unsigned CurOffset()
+ {
+ assert(m_methInfo->m_ILCode <= m_ILCodePtr &&
+ m_ILCodePtr < m_methInfo->m_ILCodeEnd);
+ unsigned res = static_cast<unsigned>(m_ILCodePtr - m_methInfo->m_ILCode);
+ return res;
+ }
+
+ // We've computed a branch target. Is the target in range? If not, throw an InvalidProgramException.
+ // Otherwise, execute the branch by changing m_ILCodePtr.
+ void ExecuteBranch(BYTE* ilTargetPtr)
+ {
+ if (m_methInfo->m_ILCode <= ilTargetPtr &&
+ ilTargetPtr < m_methInfo->m_ILCodeEnd)
+ {
+ m_ILCodePtr = ilTargetPtr;
+ }
+ else
+ {
+ COMPlusThrow(kInvalidProgramException);
+ }
+ }
+
+ // Private fields:
+ //
+ InterpreterMethodInfo* m_methInfo;
+ InterpreterCEEInfo m_interpCeeInfo;
+
+ BYTE* m_ILCodePtr;
+
+ bool m_directCall;
+ BYTE* m_ilArgs;
+
+ __forceinline InterpreterType GetArgType(unsigned argNum)
+ {
+ return m_methInfo->m_argDescs[argNum].m_type;
+ }
+
+ __forceinline InterpreterType GetArgTypeNormal(unsigned argNum)
+ {
+ return m_methInfo->m_argDescs[argNum].m_typeStackNormal;
+ }
+
+ __forceinline BYTE* GetArgAddr(unsigned argNum)
+ {
+ if (!m_directCall)
+ {
+#if defined(_AMD64_)
+ // In AMD64, a reference to the struct is passed if its size exceeds the word size.
+ // Dereference the arg to get to the ref of the struct.
+ if (GetArgType(argNum).IsLargeStruct(&m_interpCeeInfo))
+ {
+ return *reinterpret_cast<BYTE**>(&m_ilArgs[m_methInfo->m_argDescs[argNum].m_nativeOffset]);
+ }
+#endif
+ return &m_ilArgs[m_methInfo->m_argDescs[argNum].m_nativeOffset];
+ }
+ else
+ {
+ if (GetArgType(argNum).IsLargeStruct(&m_interpCeeInfo))
+ {
+ return *reinterpret_cast<BYTE**>(&m_ilArgs[m_methInfo->m_argDescs[argNum].m_directOffset]);
+ }
+ else
+ {
+ return &m_ilArgs[m_methInfo->m_argDescs[argNum].m_directOffset];
+ }
+ }
+ }
+
+ __forceinline MethodTable* GetMethodTableFromClsHnd(CORINFO_CLASS_HANDLE hnd)
+ {
+ TypeHandle th(hnd);
+ return th.GetMethodTable();
+ }
+
+#ifdef FEATURE_HFA
+ __forceinline BYTE* GetHFARetBuffAddr(unsigned sz)
+ {
+ // Round up to a double boundary:
+ sz = ((sz + sizeof(double) - 1) / sizeof(double)) * sizeof(double);
+ // We rely on the interpreter stub to have pushed "sz" bytes on its stack frame,
+ // below m_ilArgs;
+ return m_ilArgs - sz;
+ }
+#endif // FEATURE_HFA
+
+ void* m_stubContext;
+
+
+ // Address of the GSCookie value in the current method's frame.
+ GSCookie* m_gsCookieAddr;
+
+ BYTE* GetFrameBase()
+ {
+ return (m_localVarMemory - sizeof(GSCookie) - m_methInfo->m_largeStructLocalSize);
+ }
+ // m_localVarMemory points to the boundary between the fixed-size slots for the locals
+ // (positive offsets), and the full-sized slots for large struct locals (negative offsets).
+ BYTE* m_localVarMemory;
+ INT64* FixedSizeLocalSlot(unsigned locNum)
+ {
+ return reinterpret_cast<INT64*>(m_localVarMemory) + locNum;
+ }
+
+ BYTE* LargeStructLocalSlot(unsigned locNum)
+ {
+ BYTE* base = GetFrameBase();
+ BYTE* addr = base + m_methInfo->m_localDescs[locNum].m_offset;
+ assert(IsInLargeStructLocalArea(addr));
+ return addr;
+ }
+
+ bool IsInLargeStructLocalArea(void* addr)
+ {
+ void* base = GetFrameBase();
+ return (base <= addr) && (addr < (static_cast<void*>(m_localVarMemory - sizeof(GSCookie))));
+ }
+
+ bool IsInLocalArea(void* addr)
+ {
+ void* base = GetFrameBase();
+ return (base <= addr) && (addr < static_cast<void*>(reinterpret_cast<INT64*>(m_localVarMemory) + m_methInfo->m_numLocals));
+ }
+
+ // Ensures that the operand stack contains no pointers to large struct local slots (by
+ // copying the values out to locations allocated on the large struct stack.
+ void OpStackNormalize();
+
+ // The defining property of this word is: if the bottom two bits are not 0x3, then the current operand stack contains no pointers
+ // to large-struct slots for locals. Operationally, we achieve this by taking "OR" of the interpreter types of local variables that have been loaded onto the
+ // operand stack -- if any have been large structs, they will have 0x3 as the low order bits of their interpreter type, and this will be
+ // "sticky." We may sometimes determine that no large struct local pointers are currently on the stack, and reset this word to zero.
+ size_t m_orOfPushedInterpreterTypes;
+
+#if COMBINE_OPSTACK_VAL_TYPE
+ struct OpStackValAndType
+ {
+ INT64 m_val;
+ InterpreterType m_type;
+ INT32 m_pad;
+ };
+
+ OpStackValAndType* m_operandStackX;
+#else
+ INT64* m_operandStack;
+#endif
+
+ template<typename T>
+ __forceinline T OpStackGet(unsigned ind)
+ {
+ return *OpStackGetAddr<T>(ind);
+ }
+
+ template<typename T>
+ __forceinline void OpStackSet(unsigned ind, T val)
+ {
+ *OpStackGetAddr<T>(ind) = val;
+ }
+
+#if COMBINE_OPSTACK_VAL_TYPE
+ template<typename T>
+ __forceinline T* OpStackGetAddr(unsigned ind)
+ {
+ return reinterpret_cast<T*>(ArgSlotEndianessFixup(reinterpret_cast<ARG_SLOT*>(&m_operandStackX[ind].m_val), sizeof(T)));
+ }
+
+ __forceinline void* OpStackGetAddr(unsigned ind, size_t sz)
+ {
+ return ArgSlotEndianessFixup(reinterpret_cast<ARG_SLOT*>(&m_operandStackX[ind].m_val), sz);
+ }
+#else
+ template<typename T>
+ __forceinline T* OpStackGetAddr(unsigned ind)
+ {
+ return reinterpret_cast<T*>(ArgSlotEndianessFixup(reinterpret_cast<ARG_SLOT*>(&m_operandStack[ind]), sizeof(T)));
+ }
+
+ __forceinline void* OpStackGetAddr(unsigned ind, size_t sz)
+ {
+ return ArgSlotEndianessFixup(reinterpret_cast<ARG_SLOT*>(&m_operandStack[ind]), sz);
+ }
+#endif
+
+ __forceinline INT64 GetSmallStructValue(void* src, size_t sz)
+ {
+ assert(sz <= sizeof(INT64));
+
+ INT64 ret = 0;
+ memcpy(ArgSlotEndianessFixup(reinterpret_cast<ARG_SLOT*>(&ret), sz), src, sz);
+ return ret;
+ }
+
+ BYTE* m_largeStructOperandStack;
+ size_t m_largeStructOperandStackHt;
+ size_t m_largeStructOperandStackAllocSize;
+
+ // Allocate "sz" bytes on the large struct operand stack, and return a pointer to where
+ // the structure should be copied.
+ void* LargeStructOperandStackPush(size_t sz);
+
+ // Deallocate "sz" bytes from the large struct operand stack, unless the corresponding
+ // operand stack value "fromAddr" is a pointer to a local variable.
+ void LargeStructOperandStackPop(size_t sz, void* fromAddr);
+
+ // Ensures that we can push a struct of size "sz" on the large struct operand stack.
+ void LargeStructOperandStackEnsureCanPush(size_t sz);
+
+#ifdef _DEBUG
+ // Returns "true" iff the sum of sizes of large structures on the operand stack
+ // equals "m_largeStructOperandStackHt", which should be an invariant.
+ bool LargeStructStackHeightIsValid();
+#endif // _DEBUG
+
+ // Returns "true" iff the "cit" is 'considered' a valid pointer type for the
+ // architecture. For ex: nativeint/byref and for amd64 longs with loose rules.
+ bool IsValidPointerType(CorInfoType cit);
+
+#if !COMBINE_OPSTACK_VAL_TYPE
+ InterpreterType* m_operandStackTypes;
+#endif
+
+#if COMBINE_OPSTACK_VAL_TYPE
+#if USE_MACRO_FOR_OPSTACKACCESS
+#define OpStackTypeGet(ind) m_operandStackX[ind].m_type
+#define OpStackTypeSet(ind, it) m_operandStackX[ind].m_type = it
+#else
+ __forceinline InterpreterType OpStackTypeGet(unsigned ind)
+ {
+ return m_operandStackX[ind].m_type;
+ }
+
+ __forceinline void OpStackTypeSet(unsigned ind, InterpreterType it)
+ {
+ assert(IsStackNormalType(it.ToCorInfoType()));
+ m_operandStackX[ind].m_type = it;
+ }
+#endif
+#else
+ __forceinline InterpreterType OpStackTypeGet(unsigned ind)
+ {
+ return m_operandStackTypes[ind];
+ }
+
+ __forceinline void OpStackTypeSet(unsigned ind, InterpreterType it)
+ {
+ assert(IsStackNormalType(it.ToCorInfoType()));
+ m_operandStackTypes[ind] = it;
+ }
+#endif
+ unsigned m_curStackHt;
+
+ // These are used in searching for finally clauses when we 'leave' a try block:
+
+ struct LeaveInfo
+ {
+ unsigned m_offset; // The offset of "leave" instructions in try blocks whose finally blocks are being executed.
+ BYTE* m_target; // The location the 'leave' was jumping to -- where execution should resume after all finally's have been executed.
+ unsigned m_nextEHIndex; // The index in the EH table at which the search for the next finally for "lastLeaveOffset" should resume.
+
+ LeaveInfo(unsigned offset = 0, BYTE* target = NULL) : m_offset(offset), m_target(target), m_nextEHIndex(0) {}
+ };
+ // This is a stack of the currently in-force "leaves." (Multiple leave's can be being processed when a try-finally occurs
+ // within a finally).
+ Stack<LeaveInfo> m_leaveInfoStack;
+
+ // Used to track the next filter to scan in case the current
+ // filter doesn't handle the exception.
+ unsigned m_filterNextScan;
+
+ // Used to record the handler offset for the current filter so it can be used during endfilter.
+ unsigned m_filterHandlerOffset;
+
+ // The actual offset at which the exception occurred for a filter that might possibly handle it.
+ unsigned m_filterExcILOffset;
+
+ // This is the exception to rethrow upon exiting the last finally.
+ Object* m_inFlightException; // This must be scanned by GC.
+
+ // Storing "this" and "typeCtxt" args if necessary.
+ Object* m_thisArg; // This must be scanned by GC.
+ void* m_retBufArg; // This must be scanned by GC:
+ // if the caller is JITted, o.f = Foo(), for o.f a value type, retBuf may be ref o.f.
+ void* m_genericsCtxtArg;
+
+ // Acquired variable for synchronized methods.
+ unsigned char m_monAcquired;
+
+ // Holds the security object, for frames that require it.
+ OBJECTREF m_securityObject;
+
+ ARG_SLOT* m_args;
+ InterpreterType* m_argTypes;
+ unsigned m_argsSize;
+
+ void* m_callThisArg;
+
+ // If "m_structRetValITPtr" is non-NULL, then "*m_structRetValITPtr" represents a struct type, and
+ // "m_structRetValTempSpace" is a pointer to a value of that struct type, which must be scanned during GC.
+ InterpreterType* m_structRetValITPtr;
+ void* m_structRetValTempSpace;
+
+#ifdef DACCESS_COMPILE
+ void* m_thisExecCache;
+#else // DACCESS_COMPILE
+
+ // The proper cache for the current method execution (or else UninitExecCache).
+ ILOffsetToItemCache* m_thisExecCache;
+
+ // Retrieve the ILoffset->Item cache for the generic instantiation (if any) of the
+ // currently-executing method. If "alloc" is true, allocate one if its not there.
+ ILOffsetToItemCache* GetThisExecCache(bool alloc)
+ {
+ if (m_thisExecCache == UninitExecCache ||
+ (m_thisExecCache == NULL && alloc))
+ {
+ m_thisExecCache = m_methInfo->GetCacheForCall(m_thisArg, m_genericsCtxtArg, alloc);
+ }
+ assert(!alloc || m_thisExecCache != NULL);
+ return m_thisExecCache;
+ }
+
+ // Cache that a call at "iloffset" has the given CallSiteCacheData "callInfo".
+ void CacheCallInfo(unsigned iloffset, CallSiteCacheData* callInfo);
+
+ // If there's a cached CORINFO_CALL_INFO for the call at the given IL offset, return it, else NULL.
+ CallSiteCacheData* GetCachedCallInfo(unsigned iloffset);
+
+ void CacheInstanceField(unsigned iloffset, FieldDesc* fld);
+ FieldDesc* GetCachedInstanceField(unsigned iloffset);
+
+ void CacheStaticField(unsigned iloffset, StaticFieldCacheEntry* pEntry);
+ StaticFieldCacheEntry* GetCachedStaticField(unsigned iloffset);
+
+ void CacheClassHandle(unsigned ilOffset, CORINFO_CLASS_HANDLE clsHnd);
+ CORINFO_CLASS_HANDLE GetCachedClassHandle(unsigned iloffset);
+#endif // DACCESS_COMPILE
+
+#if INTERP_ILCYCLE_PROFILE
+ // Cycles we want to delete from the current instructions cycle count; e.g.,
+ // cycles spent in a callee.
+ unsigned __int64 m_exemptCycles;
+ unsigned __int64 m_startCycles;
+ unsigned short m_instr;
+
+ void UpdateCycleCount();
+#endif // INTERP_ILCYCLE_PROFILE
+
+#ifdef _DEBUG
+
+ // These collectively record all the interpreter method infos we've created.
+ static InterpreterMethodInfo** s_interpMethInfos;
+ static unsigned s_interpMethInfosAllocSize;
+ static unsigned s_interpMethInfosCount;
+
+ static void AddInterpMethInfo(InterpreterMethodInfo* methInfo);
+
+ // Print any end-of-run summary information we've collected, and want
+ // printed.
+
+ // Both methods below require that "mi0" and "mi1" are actually "InterpreterMethodInfo*"s.
+
+ // Returns -1, 0, or 1, depending on whether "mi0->m_invocations" is less than,
+ // equal, or greater than "mi1->m_invocations.".
+ static int _cdecl CompareMethInfosByInvocations(const void* mi0, const void* mi1);
+#if INTERP_PROFILE
+ // Returns 1, 0, or -1, depending on whether "mi0->m_totIlInstructionsExeced" is less than,
+ // equal, or greater than "mi1->m_totIlInstructionsExeced.". (Note that this enables a descending sort.)
+ static int _cdecl CompareMethInfosByILInstrs(const void* mi0, const void* mi1);
+#endif // INTERP_PROFILE
+#endif // _DEBUG
+
+ private:
+ static ConfigDWORD s_PrintPostMortemFlag;
+
+ public:
+ static void PrintPostMortemData();
+
+#if INTERP_TRACING
+ private:
+ // Returns a string name of the il operation at "ILCodePtr".
+ static const char* ILOp(BYTE* ilCodePtr);
+ static const char* ILOp1Byte(unsigned short ilInstrVal);
+ static const char* ILOp2Byte(unsigned short ilInstrVal);
+
+ // Prints a representation of the operand stack.
+ void PrintOStack();
+
+ // Prints a representation of the arguments.
+ void PrintArgs();
+
+ // Prints a representation of the locals.
+ void PrintLocals();
+
+ // Helper functions for the above:
+ // Print the value at ostack position "index".
+ void PrintOStackValue(unsigned index);
+
+ // Print the value of the argument number "argNum".
+ void PrintArgValue(unsigned argNum);
+
+ // Requires that "valAddr" point to a location containing a value of type
+ // "cit", and prints that value.
+ void PrintValue(InterpreterType cit, BYTE* valAddr);
+
+ public:
+ static inline FILE* GetLogFile();
+ private:
+ static FILE* s_InterpreterLogFile;
+ static ConfigDWORD s_DumpInterpreterStubsFlag;
+ static ConfigDWORD s_TraceInterpreterEntriesFlag;
+ static ConfigDWORD s_TraceInterpreterILFlag;
+ static ConfigDWORD s_TraceInterpreterOstackFlag;
+ static ConfigDWORD s_TraceInterpreterVerboseFlag;
+ static ConfigDWORD s_TraceInterpreterJITTransitionFlag;
+ static ConfigDWORD s_InterpreterStubMin;
+ static ConfigDWORD s_InterpreterStubMax;
+
+ // The total number of method invocations.
+ static LONG s_totalInvocations;
+ // The total number of calls made by interpreted code.
+ static LONG s_totalInterpCalls;
+ static LONG s_totalInterpCallsToGetters;
+ static LONG s_totalInterpCallsToDeadSimpleGetters;
+ static LONG s_totalInterpCallsToDeadSimpleGettersShortCircuited;
+ static LONG s_totalInterpCallsToSetters;
+ static LONG s_totalInterpCallsToIntrinsics;
+ static LONG s_totalInterpCallsToIntrinsicsUnhandled;
+
+ enum ResolveTokenKind {
+ RTK_Undefined,
+ RTK_Constrained,
+ RTK_NewObj,
+ RTK_NewArr,
+ RTK_LdToken,
+ RTK_LdFtn,
+ RTK_LdVirtFtn,
+ RTK_SFldAddr,
+ RTK_LdElem,
+ RTK_Call,
+ RTK_LdObj,
+ RTK_StObj,
+ RTK_CpObj,
+ RTK_InitObj,
+ RTK_IsInst,
+ RTK_CastClass,
+ RTK_MkRefAny,
+ RTK_RefAnyVal,
+ RTK_Sizeof,
+ RTK_StElem,
+ RTK_Box,
+ RTK_Unbox,
+ RTK_UnboxAny,
+ RTK_LdFld,
+ RTK_LdFldA,
+ RTK_StFld,
+ RTK_FindClass,
+ RTK_CheckHandlesException,
+ RTK_Count
+ };
+ static const char* s_tokenResolutionKindNames[RTK_Count];
+
+ static LONG s_tokenResolutionOpportunities[RTK_Count];
+ static LONG s_tokenResolutionCalls[RTK_Count];
+#endif // INTERP_TRACING
+
+#if INTERP_ILINSTR_PROFILE
+ static unsigned short s_ILInstrCategories[512];
+
+ static int s_ILInstrExecs[256];
+ static int s_ILInstrExecsByCategory[512];
+#if INTERP_ILCYCLE_PROFILE
+ static unsigned __int64 s_ILInstrCyclesByCategory[512];
+#endif // INTERP_ILCYCLE_PROFILE
+
+ static const unsigned CountIlInstr2Byte = 0x22;
+ static int s_ILInstr2ByteExecs[CountIlInstr2Byte];
+
+#if INTERP_ILCYCLE_PROFILE
+ static unsigned __int64 s_ILInstrCycles[512];
+ // XXX
+ static unsigned __int64 s_callCycles;
+ static unsigned s_calls;
+#endif // INTERP_ILCYCLE_PROFILE
+#endif // INTERP_ILINSTR_PROFILE
+
+ // Non-debug-only statics.
+ static ConfigMethodSet s_InterpretMeths;
+ static ConfigMethodSet s_InterpretMethsExclude;
+ static ConfigDWORD s_InterpretMethHashMin;
+ static ConfigDWORD s_InterpretMethHashMax;
+ static ConfigDWORD s_InterpreterJITThreshold;
+ static ConfigDWORD s_InterpreterDoLoopMethodsFlag;
+ static bool s_InterpreterDoLoopMethods;
+ static ConfigDWORD s_InterpreterUseCachingFlag;
+ static bool s_InterpreterUseCaching;
+ static ConfigDWORD s_InterpreterLooseRulesFlag;
+ static bool s_InterpreterLooseRules;
+ static CrstExplicitInit s_methodCacheLock;
+ static CrstExplicitInit s_interpStubToMDMapLock;
+
+ // True iff a "constrained" prefix has preceded a call.
+ bool m_constrainedFlag;
+ // True iff a "volatile" prefixe precedes a memory reference.
+ bool m_volatileFlag;
+ // If there has been a "constrained" prefix, this is initialized
+ // with the token of the constraint class.
+ CORINFO_RESOLVED_TOKEN m_constrainedResolvedToken;
+ // True iff a "readonly" prefix has preceded a ldelema.
+ bool m_readonlyFlag;
+
+ // Data structures related to localloc.
+ class LocAllocData
+ {
+ typedef void* PVoid;
+
+ unsigned m_locAllocSize; // The currently allocated # elements in m_locAllocs
+ unsigned m_locAllocCurIdx; // Number of elements of m_locAllocs in use; 0 <= m_locAllocCurIdx < m_locAllocSize
+ void** m_locAllocs; // Always non-null in a constructed LocAllocData.
+ static const unsigned DefaultAllocs = 1;
+
+ unsigned EnsureIdx()
+ {
+ if (m_locAllocCurIdx == m_locAllocSize)
+ {
+ unsigned newSize = m_locAllocSize * 2;
+ void** newLocAllocs = new PVoid[newSize];
+ for (unsigned j = 0; j < m_locAllocCurIdx; j++)
+ {
+ newLocAllocs[j] = m_locAllocs[j];
+ }
+ m_locAllocSize = newSize;
+ delete[] m_locAllocs;
+ m_locAllocs = newLocAllocs;
+ }
+ return m_locAllocCurIdx++; // Note that we're returning the value before post-increment.
+ }
+
+ public:
+ LocAllocData() :
+ m_locAllocSize(DefaultAllocs),
+ m_locAllocCurIdx(0)
+ {
+ m_locAllocs = new PVoid[DefaultAllocs];
+ memset(m_locAllocs, 0, DefaultAllocs * sizeof(void*));
+ }
+
+ void* Alloc(NativeUInt sz)
+ {
+ unsigned idx = EnsureIdx();
+ void* res = new char[sz];
+ // We only *have* to do this if initlocals is set, but no harm in always doing it.
+ memset(res, 0, sz);
+ m_locAllocs[idx] = res;
+ return res;
+ }
+
+ ~LocAllocData()
+ {
+ if (m_locAllocs != NULL)
+ {
+ for (unsigned i = 0; i < m_locAllocCurIdx; i++)
+ {
+ delete[] m_locAllocs[i];
+ }
+ }
+ delete[] m_locAllocs;
+ }
+ };
+
+ LocAllocData* m_locAllocData;
+
+ LocAllocData* GetLocAllocData()
+ {
+ if (m_locAllocData == NULL)
+ {
+ m_locAllocData = new LocAllocData();
+ }
+ return m_locAllocData;
+ }
+
+ // Search the current method's exception table, starting at "leaveEHIndex", for the first finally clause
+ // for a try block that covers "lastLeaveOffset". If one is found, sets m_ILCodePtr to the start of that
+ // finally clause, updates "leaveEHIndex" to be the next index after the found clause in the exception
+ // table, and returns true. Otherwise, if no applicable finally clause is found, returns false.
+ bool SearchForCoveringFinally();
+
+ void LdIcon(INT32 c);
+ void LdLcon(INT64 c);
+ void LdR4con(INT32 c);
+ void LdR8con(INT64 c);
+
+ void LdArg(int argNum);
+ void LdArgA(int argNum);
+ void StArg(int argNum);
+
+ __forceinline void LdLoc(int locNum);
+ void LdLocA(int locNum);
+ __forceinline void StLoc(int locNum);
+
+ // Requires that "*addr" contain a value of type "tp"; reads that value and
+ // pushes it on the operand stack.
+ __forceinline void LdFromMemAddr(void* addr, InterpreterType tp);
+
+ // Requires that "addr" is the address of a local var or argument location.
+ // Pops the value on the operand stack, assumed to be of the given "tp", and stores
+ // in "*addr".
+ __forceinline void StToLocalMemAddr(void* addr, InterpreterType tp);
+
+ void LdNull();
+
+ // This requires that the width of "T" is at least 4 bytes.
+ template<typename T, CorInfoType cit>
+ void LdInd();
+
+ // This requires that the width of "T" is less than 4 bytes (and loads it as an INT32).
+ template<typename T, bool isUnsigned>
+ void LdIndShort();
+
+ void LdIndFloat();
+
+ // Use this for non-object-ref types, and StInd_Ref for object refs.
+ template<typename T>
+ void StInd();
+
+ void StInd_Ref();
+
+ // Load/store instance/static fields.
+
+ // If non-NULL, we've determined the field to be loaded by other means (e.g., we've identified a
+ // "dead simple" property getter). In this case, use this FieldDesc*, otherwise, look up via token
+ // or cache.
+ void LdFld(FieldDesc* fld = NULL);
+
+ void LdFldA();
+ void LdSFld();
+ void LdSFldA();
+ void StFld();
+ void StSFld();
+
+ // Helper method used by the static field methods above.
+ // Requires that the code stream be pointing to a LDSFLD, LDSFLDA, or STSFLD.
+ // The "accessFlgs" variable should indicate which, by which of the CORINFO_ACCESS_GET,
+ // CORINFO_ACCESS_GET, and CORINFO_ACCESS_ADDRESS bits are set.
+ // Sets *pStaticFieldAddr, which must be a pointer to memory protected as a byref) to the address of the static field,
+ // sets *pit to the InterpreterType of the field,
+ // sets *pFldSize to the size of the field, and sets *pManagedMem to true iff the address is in managed memory (this is
+ // false only if the static variable is an "RVA"). (Increments the m_ILCodePtr of 'this' by 5, the
+ // assumed size of all the listed instructions.
+ __forceinline void StaticFldAddr(CORINFO_ACCESS_FLAGS accessFlgs,
+ /*out (byref)*/void** pStaticFieldAddr,
+ /*out*/InterpreterType* pit, /*out*/UINT* pFldSize, /*out*/bool* pManagedMem);
+
+ // We give out the address of this as the address for an "intrinsic static Zero".
+ static INT64 IntrinsicStaticZero;
+
+ // The version above does caching; this version always does the work. Returns "true" iff the results
+ // are cacheable.
+ bool StaticFldAddrWork(CORINFO_ACCESS_FLAGS accessFlgs,
+ /*out (byref)*/void** pStaticFieldAddr,
+ /*out*/InterpreterType* pit, /*out*/UINT* pFldSize, /*out*/bool* pManagedMem);
+
+ // Ensure that pMT has been initialized (including running it's .cctor).
+ static void EnsureClassInit(MethodTable* pMT);
+
+ // Load/store array elements, get length. "T" should be the element
+ // type of the array (as indicated by a LDELEM opcode with a type); "IsObjType" should
+ // be true iff T is an object type, and "cit" should be the stack-normal CorInfoType
+ // to push on the type stack.
+ template<typename T, bool IsObjType, CorInfoType cit>
+ void LdElemWithType();
+
+ // Load the address of an array element.
+
+ template<typename T, bool IsObjType>
+ void StElemWithType();
+
+ template<bool takeAddr>
+ void LdElem();
+ void StElem();
+
+ void InitBlk();
+ void CpBlk();
+
+ void Box();
+ void UnboxAny();
+ void Unbox();
+
+ // Requires that operand stack location "i" contain a byref to a value of the struct type
+ // "valCls". Boxes the referent of that byref, and substitutes the resulting object pointer
+ // at opstack location "i."
+ void BoxStructRefAt(unsigned ind, CORINFO_CLASS_HANDLE valCls);
+
+ void Throw();
+ void Rethrow();
+ void EndFilter();
+
+ void LdLen();
+
+ // Perform a normal (non-constructor) call. The "virtualCall" argument indicates whether the
+ // call should be virtual.
+ void DoCall(bool virtualCall);
+
+ // Perform a call. For normal (non-constructor) calls, all optional args should be
+ // NULL (the default). For constructors, "thisArg" should be a this pointer (that is not on the operand stack),
+ // and "callInfoPtr" should be the callInfo describing the constructor. There's a special case here: for "VAROBJSIZE" constructors
+ // (which currently are defined for String), we want to explicitly pass NULL to the (pseudo) constructor. So passing
+ // the special value "0x1" as "thisArg" will cause NULL to be pushed.
+ void DoCallWork(bool virtualCall, void* thisArg = NULL, CORINFO_RESOLVED_TOKEN* methTokPtr = NULL, CORINFO_CALL_INFO* callInfoPtr = NULL);
+
+ // Do the call-indirect operation.
+ void CallI();
+
+ // Analyze the given method to see if it is a "dead simple" property getter:
+ // * if instance, ldarg.0, ldfld, ret.
+ // * if static, ldstfld ret.
+ // More complicated forms in DBG. Sets *offsetOfLd" to the offset of the ldfld or ldstfld instruction.
+ static bool IsDeadSimpleGetter(CEEInfo* info, MethodDesc* pMD, size_t* offsetOfLd);
+ static const unsigned ILOffsetOfLdFldInDeadSimpleInstanceGetterDbg = 2;
+ static const unsigned ILOffsetOfLdFldInDeadSimpleInstanceGetterOpt = 1;
+ static const unsigned ILOffsetOfLdSFldInDeadSimpleStaticGetter = 0;
+
+ // Here we handle a few intrinsic calls directly.
+ void DoStringLength();
+ void DoStringGetChar();
+ void DoGetTypeFromHandle();
+
+ // Returns the proper generics context for use in resolving tokens ("precise" in the sense of including generic instantiation
+ // information).
+ CORINFO_METHOD_HANDLE m_preciseGenericsContext;
+
+ CORINFO_METHOD_HANDLE GetPreciseGenericsContext()
+ {
+ if (m_preciseGenericsContext == NULL)
+ {
+ m_preciseGenericsContext = m_methInfo->GetPreciseGenericsContext(m_thisArg, m_genericsCtxtArg);
+ }
+ return m_preciseGenericsContext;
+ }
+
+ // Process the "CONSTRAINED" prefix, recording the constraint on the "this" parameter.
+ void RecordConstrainedCall();
+
+ // Emit a barrier if the m_volatile flag is set, and reset the flag.
+ void BarrierIfVolatile()
+ {
+ if (m_volatileFlag)
+ {
+ MemoryBarrier(); m_volatileFlag = false;
+ }
+ }
+
+ enum BinaryArithOpEnum
+ {
+ BA_Add, BA_Sub, BA_Mul, BA_Div, BA_Rem
+ };
+ template<int op>
+ __forceinline void BinaryArithOp();
+
+ // "IsIntType" must be true iff "T" is an integral type, and "cit" must correspond to
+ // "T". "TypeIsUnchanged" implies that the proper type is already on the operand type stack.
+ template<int op, typename T, bool IsIntType, CorInfoType cit, bool TypeIsUnchanged>
+ __forceinline void BinaryArithOpWork(T val1, T val2);
+
+ // "op" is a BinaryArithOpEnum above; actually, must be one "BA_Add", "BA_Sub", "BA_Mul".
+ template<int op, bool asUnsigned>
+ void BinaryArithOvfOp();
+
+ template<int op, typename T, CorInfoType cit, bool TypeIsUnchanged>
+ void BinaryArithOvfOpWork(T val1, T val2);
+
+ INT32 RemFunc(INT32 v1, INT32 v2) { return v1 % v2; }
+ INT64 RemFunc(INT64 v1, INT64 v2) { return v1 % v2; }
+ float RemFunc(float v1, float v2);
+ double RemFunc(double v1, double v2);
+
+ enum BinaryIntOpEnum
+ {
+ BIO_And, BIO_DivUn, BIO_Or, BIO_RemUn, BIO_Xor
+ };
+ template<int op>
+ void BinaryIntOp();
+
+ template<int op, typename T, CorInfoType cit, bool TypeIsUnchanged>
+ void BinaryIntOpWork(T val1, T val2);
+
+ template<int op>
+ void ShiftOp();
+
+ template<int op, typename T, typename UT>
+ void ShiftOpWork(unsigned op1idx, CorInfoType cit2);
+
+ void Neg();
+ void Not();
+
+ // "T" should be the type indicated by the opcode.
+ // "TIsUnsigned" should be true if "T" is an unsigned type.
+ // "TCanHoldPtr" should be true if the type can hold a pointer (true for NativeInt and Long).
+ // "TIsShort" should be true if "T" is less wide than Int32.
+ // "cit" should be the *stack-normal* type of the converted value; even if "TIsShort", "cit" should be CORINFO_TYPE_INT.
+ template<typename T, bool TIsUnsigned, bool TCanHoldPtr, bool TIsShort, CorInfoType cit>
+ void Conv();
+
+ void ConvRUn();
+
+ // This version is for conversion to integral types.
+ template<typename T, INT64 TMin, UINT64 TMax, bool TCanHoldPtr, CorInfoType cit>
+ void ConvOvf();
+
+ // This version is for conversion to integral types.
+ template<typename T, INT64 TMin, UINT64 TMax, bool TCanHoldPtr, CorInfoType cit>
+ void ConvOvfUn();
+
+ void LdObj();
+ void LdObjValueClassWork(CORINFO_CLASS_HANDLE valueClsHnd, unsigned ind, void* src);
+ void CpObj();
+ void StObj();
+ void InitObj();
+
+ void LdStr();
+ void NewObj();
+ void NewArr();
+ void IsInst();
+ void CastClass();
+
+ void MkRefany();
+ void RefanyType();
+ void RefanyVal();
+
+ void CkFinite();
+
+ void LdToken();
+ void LdFtn();
+ void LdVirtFtn();
+
+ // The JIT/EE machinery for transforming delegate constructor calls requires the
+ // CORINFO_METHOD_HANDLE of a method. Usually, the method will be provided by a previous LDFTN/LDVIRTFTN.
+ // In the JIT, we fold that previous instruction and the delegate constructor into a single tree, before morphing.
+ // At this time, the loaded function is still in the form of a CORINFO_METHOD_HANDLE. At morph time, delegate constructor is transformed,
+ // looking into the argument trees to find this handle. LDFTN's that are not removed this way are morphed to have actual native code addresses.
+ // To support both of these needs, LDFTN will push the native code address of a method, as uses that actually need the value to invoke or store in
+ // data structures require, but it will also ensure that this parallel stack is allocated, and set the corresponding index to hold the method handle.
+ // When we call a delegate constructor, we find the method handle on this stack.
+ CORINFO_METHOD_HANDLE* m_functionPointerStack;
+ CORINFO_METHOD_HANDLE* GetFunctionPointerStack()
+ {
+ if (m_functionPointerStack == NULL)
+ {
+ m_functionPointerStack = new CORINFO_METHOD_HANDLE[m_methInfo->m_maxStack];
+ for (unsigned i = 0; i < m_methInfo->m_maxStack; i++)
+ {
+ m_functionPointerStack[i] = NULL;
+ }
+ }
+ return m_functionPointerStack;
+ }
+
+ void Sizeof();
+
+ void LocAlloc();
+
+#if INTERP_ILINSTR_PROFILE
+ static void SetILInstrCategories();
+
+ // This type is used in sorting il instructions in a profile.
+ struct InstrExecRecord
+ {
+ unsigned short m_instr;
+ bool m_is2byte;
+ unsigned m_execs;
+#if INTERP_ILCYCLE_PROFILE
+ unsigned __int64 m_cycles;
+#endif // INTERP_ILCYCLE_PROFILE
+
+ static int _cdecl Compare(const void* v0, const void* v1)
+ {
+ InstrExecRecord* iep0 = (InstrExecRecord*)v0;
+ InstrExecRecord* iep1 = (InstrExecRecord*)v1;
+#if INTERP_ILCYCLE_PROFILE
+ if (iep0->m_cycles > iep1->m_cycles) return -1;
+ else if (iep0->m_cycles == iep1->m_cycles) return 0;
+ else return 1;
+#else
+ if (iep0->m_execs > iep1->m_execs) return -1;
+ else if (iep0->m_execs == iep1->m_execs) return 0;
+ else return 1;
+#endif // INTERP_ILCYCLE_PROFILE
+ }
+ };
+ // Prints the given array "recs", assumed to already be sorted.
+ static void PrintILProfile(InstrExecRecord* recs, unsigned totInstrs
+#if INTERP_ILCYCLE_PROFILE
+ , unsigned __int64 totCycles
+#endif // INTERP_ILCYCLE_PROFILE
+ );
+#endif // INTERP_ILINSTR_PROFILE
+
+ static size_t GetTypedRefSize(CEEInfo* info);
+ static CORINFO_CLASS_HANDLE GetTypedRefClsHnd(CEEInfo* info);
+ static InterpreterType GetTypedRefIT(CEEInfo* info);
+
+ OBJECTREF TypeHandleToTypeRef(TypeHandle* pth);
+
+ CorInfoType GetTypeForPrimitiveValueClass(CORINFO_CLASS_HANDLE clsHnd);
+
+ static bool s_initialized;
+ static bool s_compilerStaticsInitialized;
+
+ // This is the class handle for the struct type TypedRef (aka "Refany").
+ static CORINFO_CLASS_HANDLE s_TypedRefClsHnd;
+ // This is the InterpreterType for the struct type TypedRef (aka "Refany").
+ static InterpreterType s_TypedRefIT;
+ // And this is the size of that struct.
+ static size_t s_TypedRefSize;
+
+ // This returns the class corresponding to the token, of kind "tokKind", at "codePtr". If this
+ // includes any runtime lookup via a generics context parameter, does that.
+ CORINFO_CLASS_HANDLE GetTypeFromToken(BYTE* codePtr, CorInfoTokenKind tokKind InterpTracingArg(ResolveTokenKind rtk));
+
+
+ // Calls m_interpCeeInfo.resolveToken
+ inline void ResolveToken(CORINFO_RESOLVED_TOKEN* resTok, mdToken token, CorInfoTokenKind tokenType InterpTracingArg(ResolveTokenKind rtk));
+
+ inline FieldDesc* FindField(unsigned metaTok InterpTracingArg(ResolveTokenKind rtk));
+ inline CORINFO_CLASS_HANDLE FindClass(unsigned metaTok InterpTracingArg(ResolveTokenKind rtk));
+
+ enum CompareOpEnum
+ {
+ CO_EQ, CO_GT, CO_GT_UN, CO_LT, CO_LT_UN
+ };
+
+ // It does not help making these next two inline functions (taking the
+ // template arg as a "real" arg).
+ template<int compOp>
+ void CompareOp();
+
+ // Requires that the m_curStackHt is at least op1Idx+2.
+ // Returns the result (0 or 1) of the comparison "opStack[op1Idx] op opStack[op1Idx + 1]".
+ template<int compOp>
+ INT32 CompareOpRes(unsigned op1Idx);
+
+ // Making this inline, by making its arguments real arguments,
+ // and using __forceinline didn't result in material difference.
+ template<bool val, int targetLen>
+ void BrOnValue();
+
+ // A worker function for BrOnValue. Assumes that "shouldBranch" indicates whether
+ // a branch should be taken, and that "targetLen" is the length of the branch offset (1 or 4).
+ // Updates "m_ILCodePtr" to the branch target if "shouldBranch" is true, or else
+ // he next instruction (+ 1 + targetLength).
+ __forceinline void BrOnValueTakeBranch(bool shouldBranch, int targetLen);
+
+ template<int compOp, bool reverse, int targetLen>
+ void BrOnComparison();
+
+ inline static
+ INT8 getI1(const BYTE * ptr)
+ { return *(INT8*)ptr; }
+
+ inline static
+ UINT16 getU2LittleEndian(const BYTE * ptr)
+ { return VAL16(*(UNALIGNED UINT16*)ptr); }
+
+ inline static
+ UINT32 getU4LittleEndian(const BYTE * ptr)
+ { return VAL32(*(UNALIGNED UINT32*)ptr); }
+
+ inline static
+ INT32 getI4LittleEndian(const BYTE * ptr)
+ { return VAL32(*(UNALIGNED INT32*)ptr); }
+
+ inline static
+ INT64 getI8LittleEndian(const BYTE * ptr)
+ { return VAL64(*(UNALIGNED INT64*)ptr); }
+
+ void VerificationError(const char* msg);
+
+ void ThrowDivideByZero();
+ void ThrowSysArithException();
+ void ThrowNullPointerException();
+ void ThrowOverflowException();
+ void ThrowArrayBoundsException();
+ void ThrowInvalidCastException();
+ void ThrowStackOverflow();
+ void ThrowOnInvalidPointer(void* ptr);
+
+#ifdef _DEBUG
+ bool TOSIsPtr();
+#endif
+
+#if INTERP_TRACING
+ // Code copied from eeinterface.cpp in "compiler". Should be common...
+ const char* eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd);
+#endif // INTERP_TRACING
+};
+
+#if defined(_X86_)
+inline
+unsigned short Interpreter::NumberOfIntegerRegArgs() { return 2; }
+#elif defined(_AMD64_)
+unsigned short Interpreter::NumberOfIntegerRegArgs() { return 4; }
+#elif defined(_ARM_)
+unsigned short Interpreter::NumberOfIntegerRegArgs() { return 4; }
+#elif defined(_ARM64_)
+unsigned short Interpreter::NumberOfIntegerRegArgs() { return 8; }
+#else
+#error Unsupported architecture.
+#endif
+
+#endif // INTERPRETER_H_DEFINED
diff --git a/src/vm/interpreter.hpp b/src/vm/interpreter.hpp
new file mode 100644
index 0000000000..2567f0ac79
--- /dev/null
+++ b/src/vm/interpreter.hpp
@@ -0,0 +1,482 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+// This file contains bodies of inline methods.
+
+#ifndef INTERPRETER_HPP_DEFINED
+#define INTERPRETER_HPP_DEFINED 1
+
+#include "interpreter.h"
+#if INTERP_ILCYCLE_PROFILE
+#include "cycletimer.h"
+#endif // INTERP_ILCYCLE_PROFILE
+
+#if INTERP_TRACING
+// static
+FILE* Interpreter::GetLogFile()
+{
+ if (s_InterpreterLogFile == NULL)
+ {
+ static ConfigString fileName;
+ LPWSTR fn = fileName.val(CLRConfig::INTERNAL_InterpreterLogFile);
+ if (fn == NULL)
+ {
+ s_InterpreterLogFile = stdout;
+ }
+ else
+ {
+ s_InterpreterLogFile = _wfopen(fn, W("a"));
+ }
+ }
+ return s_InterpreterLogFile;
+}
+#endif // INTERP_TRACING
+
+inline void Interpreter::LdFromMemAddr(void* addr, InterpreterType tp)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ unsigned stackHt = m_curStackHt;
+
+ OpStackTypeSet(stackHt, tp.StackNormalize());
+
+ size_t sz = tp.Size(&m_interpCeeInfo);
+ if (tp.IsStruct())
+ {
+ if (tp.IsLargeStruct(&m_interpCeeInfo))
+ {
+ // Large struct case.
+ void* ptr = LargeStructOperandStackPush(sz);
+ memcpy(ptr, addr, sz);
+ OpStackSet<void*>(stackHt, ptr);
+ }
+ else
+ {
+ OpStackSet<INT64>(stackHt, GetSmallStructValue(addr, sz));
+ }
+ m_curStackHt = stackHt + 1;
+ return;
+ }
+
+ // Otherwise...
+
+ // The compiler seems to compile this switch statement into an "if
+ // cascade" anyway, but in a non-optimal order (one that's good for
+ // code density, but doesn't match the actual usage frequency,
+ // which, in fairness, it would have no clue about. So we might
+ // as well do our own "if cascade" in the order we believe is
+ // likely to be optimal (at least on 32-bit systems).
+ if (sz == 4)
+ {
+ OpStackSet<INT32>(stackHt, *reinterpret_cast<INT32*>(addr));
+ }
+ else if (sz == 1)
+ {
+ CorInfoType cit = tp.ToCorInfoType();
+ if (CorInfoTypeIsUnsigned(cit))
+ {
+ OpStackSet<UINT32>(stackHt, *reinterpret_cast<UINT8*>(addr));
+ }
+ else
+ {
+ OpStackSet<INT32>(stackHt, *reinterpret_cast<INT8*>(addr));
+ }
+ }
+ else if (sz == 8)
+ {
+ OpStackSet<INT64>(stackHt, *reinterpret_cast<INT64*>(addr));
+ }
+ else
+ {
+ assert(sz == 2); // only remaining case.
+ CorInfoType cit = tp.ToCorInfoType();
+ if (CorInfoTypeIsUnsigned(cit))
+ {
+ OpStackSet<UINT32>(stackHt, *reinterpret_cast<UINT16*>(addr));
+ }
+ else
+ {
+ OpStackSet<INT32>(stackHt, *reinterpret_cast<INT16*>(addr));
+ }
+ }
+ m_curStackHt = stackHt + 1;
+}
+
+inline void Interpreter::LdLoc(int locNum)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ if (locNum >= m_methInfo->m_numLocals)
+ {
+ COMPlusThrow(kVerificationException);
+ }
+
+ unsigned stackHt = m_curStackHt;
+ GCX_FORBID();
+ OpStackSet<INT64>(stackHt, *FixedSizeLocalSlot(locNum));
+ InterpreterType tp = m_methInfo->m_localDescs[locNum].m_typeStackNormal;
+ OpStackTypeSet(stackHt, tp);
+ m_curStackHt = stackHt + 1;
+ m_orOfPushedInterpreterTypes |= static_cast<size_t>(tp.AsRaw());
+}
+
+void Interpreter::StLoc(int locNum)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 1);
+
+ if (locNum >= m_methInfo->m_numLocals)
+ {
+ COMPlusThrow(kVerificationException);
+ }
+
+ // Don't decrement "m_curStackHt" early -- if we do, then we'll have a potential GC hole, if
+ // the top-of-stack value is a GC ref.
+ unsigned ind = m_curStackHt - 1;
+ InterpreterType tp = m_methInfo->m_localDescs[locNum].m_typeStackNormal;
+
+#ifdef _DEBUG
+ if (!tp.Matches(OpStackTypeGet(ind), &m_interpCeeInfo))
+ {
+ if (!s_InterpreterLooseRules ||
+ // We copy a 64 bit value, otherwise some of the below conditions should end up as casts.
+ !((tp.ToCorInfoTypeShifted() == CORINFO_TYPE_SHIFTED_NATIVEINT && OpStackTypeGet(ind).ToCorInfoTypeShifted() == CORINFO_TYPE_SHIFTED_INT) ||
+ (tp.ToCorInfoTypeShifted() == CORINFO_TYPE_SHIFTED_INT && OpStackTypeGet(ind).ToCorInfoTypeShifted() == CORINFO_TYPE_SHIFTED_NATIVEINT) ||
+ (tp.ToCorInfoTypeShifted() == CORINFO_TYPE_SHIFTED_INT && OpStackTypeGet(ind).ToCorInfoTypeShifted() == CORINFO_TYPE_SHIFTED_LONG) ||
+ (tp.ToCorInfoTypeShifted() == CORINFO_TYPE_SHIFTED_LONG && OpStackTypeGet(ind).ToCorInfoTypeShifted() == CORINFO_TYPE_SHIFTED_INT) ||
+ (tp.ToCorInfoTypeShifted() == CORINFO_TYPE_SHIFTED_LONG && OpStackTypeGet(ind).ToCorInfoTypeShifted() == CORINFO_TYPE_SHIFTED_BYREF) ||
+ (tp.ToCorInfoTypeShifted() == CORINFO_TYPE_SHIFTED_INT && OpStackTypeGet(ind).ToCorInfoTypeShifted() == CORINFO_TYPE_SHIFTED_BYREF) ||
+ (tp.ToCorInfoTypeShifted() == CORINFO_TYPE_SHIFTED_NATIVEINT && OpStackTypeGet(ind).ToCorInfoTypeShifted() == CORINFO_TYPE_SHIFTED_BYREF) ||
+ (tp.ToCorInfoTypeShifted() == CORINFO_TYPE_SHIFTED_BYREF && OpStackTypeGet(ind).ToCorInfoTypeShifted() == CORINFO_TYPE_SHIFTED_LONG) ||
+ (tp.ToCorInfoTypeShifted() == CORINFO_TYPE_SHIFTED_BYREF && OpStackTypeGet(ind).ToCorInfoTypeShifted() == CORINFO_TYPE_SHIFTED_CLASS) ||
+ (tp.ToCorInfoTypeShifted() == CORINFO_TYPE_SHIFTED_FLOAT && OpStackTypeGet(ind).ToCorInfoTypeShifted() == CORINFO_TYPE_SHIFTED_DOUBLE) ||
+ (tp.ToCorInfoTypeShifted() == CORINFO_TYPE_SHIFTED_DOUBLE && OpStackTypeGet(ind).ToCorInfoTypeShifted() == CORINFO_TYPE_SHIFTED_FLOAT)))
+ {
+ VerificationError("StLoc requires types to match.");
+ }
+ }
+#endif
+
+ if (tp.IsLargeStruct(&m_interpCeeInfo))
+ {
+ size_t sz = tp.Size(&m_interpCeeInfo); // TODO: note that tp.IsLargeStruct() above just called tp.Size(), so this is duplicate work unless the optimizer inlines and CSEs the calls.
+
+ // The operand stack entry is a pointer to a corresponding entry in the large struct stack.
+ // There will be a large struct location for the local as well.
+ BYTE* addr = LargeStructLocalSlot(locNum);
+
+ // Now, before we copy from the large struct stack to "addr", we have a problem.
+ // We've optimized "ldloc" to just copy the fixed size entry for the local onto the ostack.
+ // But this might mean that there are pointers to "addr" already on the stack, as stand-ins for
+ // the value they point to. If we overwrite that value, we've inadvertently modified the ostack.
+ // So we first "normalize" the ostack wrt "addr", ensuring that any entries containing addr
+ // have large-struct slots allocated for them, and the values are copied there.
+ OpStackNormalize();
+
+ // Now we can do the copy.
+ void* srcAddr = OpStackGet<void*>(ind);
+ memcpy(addr, srcAddr, sz);
+ LargeStructOperandStackPop(sz, srcAddr);
+ }
+ else
+ {
+ // Otherwise, we just copy the full stack entry.
+ *FixedSizeLocalSlot(locNum) = OpStackGet<INT64>(ind);
+ }
+
+ m_curStackHt = ind;
+
+#ifdef _DEBUG
+ // The value of the locals has changed; print them.
+ if (s_TraceInterpreterILFlag.val(CLRConfig::INTERNAL_TraceInterpreterIL))
+ {
+ PrintLocals();
+ }
+#endif // _DEBUG
+}
+
+void Interpreter::StToLocalMemAddr(void* addr, InterpreterType tp)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ assert(m_curStackHt >= 1);
+ m_curStackHt--;
+
+ size_t sz = tp.Size(&m_interpCeeInfo);
+ if (tp.IsStruct())
+ {
+ if (tp.IsLargeStruct(&m_interpCeeInfo))
+ {
+ // Large struct case.
+ void* srcAddr = OpStackGet<void*>(m_curStackHt);
+ memcpy(addr, srcAddr, sz);
+ LargeStructOperandStackPop(sz, srcAddr);
+ }
+ else
+ {
+ memcpy(addr, OpStackGetAddr(m_curStackHt, sz), sz);
+ }
+ return;
+ }
+
+ // Note: this implementation assumes a little-endian architecture.
+ if (sz == 4)
+ {
+ *reinterpret_cast<INT32*>(addr) = OpStackGet<INT32>(m_curStackHt);
+ }
+ else if (sz == 1)
+ {
+ *reinterpret_cast<INT8*>(addr) = OpStackGet<INT8>(m_curStackHt);
+ }
+ else if (sz == 8)
+ {
+ *reinterpret_cast<INT64*>(addr) = OpStackGet<INT64>(m_curStackHt);
+ }
+ else
+ {
+ assert(sz == 2);
+ *reinterpret_cast<INT16*>(addr) = OpStackGet<INT16>(m_curStackHt);
+ }
+}
+
+template<int op, typename T, bool IsIntType, CorInfoType cit, bool TypeIsUnchanged>
+void Interpreter::BinaryArithOpWork(T val1, T val2)
+{
+ T res;
+ if (op == BA_Add)
+ {
+ res = val1 + val2;
+ }
+ else if (op == BA_Sub)
+ {
+ res = val1 - val2;
+ }
+ else if (op == BA_Mul)
+ {
+ res = val1 * val2;
+ }
+ else
+ {
+ assert(op == BA_Div || op == BA_Rem);
+ if (IsIntType)
+ {
+ if (val2 == 0)
+ {
+ ThrowDivideByZero();
+ }
+ else if (val2 == -1 && val1 == static_cast<T>(((UINT64)1) << (sizeof(T)*8 - 1))) // min int / -1 is not representable.
+ {
+ ThrowSysArithException();
+ }
+ }
+ // Otherwise...
+ if (op == BA_Div)
+ {
+ res = val1 / val2;
+ }
+ else
+ {
+ res = RemFunc(val1, val2);
+ }
+ }
+
+ unsigned residx = m_curStackHt - 2;
+ OpStackSet<T>(residx, res);
+ if (!TypeIsUnchanged)
+ {
+ OpStackTypeSet(residx, InterpreterType(cit));
+ }
+}
+
+void Interpreter::BrOnValueTakeBranch(bool shouldBranch, int targetLen)
+{
+ if (shouldBranch)
+ {
+ int offset;
+ if (targetLen == 1)
+ {
+ // BYTE is unsigned...
+ offset = getI1(m_ILCodePtr + 1);
+ }
+ else
+ {
+ offset = getI4LittleEndian(m_ILCodePtr + 1);
+ }
+ // 1 is the size of the current instruction; offset is relative to start of next.
+ if (offset < 0)
+ {
+ // Backwards branch; enable caching.
+ BackwardsBranchActions(offset);
+ }
+ ExecuteBranch(m_ILCodePtr + 1 + targetLen + offset);
+ }
+ else
+ {
+ m_ILCodePtr += targetLen + 1;
+ }
+}
+
+extern size_t CorInfoTypeSizeArray[];
+
+size_t CorInfoTypeSize(CorInfoType cit)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE_MSG(cit != CORINFO_TYPE_VALUECLASS, "Precondition");
+
+ size_t res = CorInfoTypeSizeArray[cit];
+
+ _ASSERTE_MSG(res != 0, "Other illegal input");
+
+ return res;
+}
+
+void Interpreter::StaticFldAddr(CORINFO_ACCESS_FLAGS accessFlgs,
+ /*out (byref)*/void** pStaticFieldAddr,
+ /*out*/InterpreterType* pit, /*out*/UINT* pFldSize, /*out*/bool* pManagedMem)
+{
+ unsigned ilOffset = CurOffset();
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionOpportunities[RTK_SFldAddr]);
+#endif // INTERP_TRACING
+
+ StaticFieldCacheEntry* cacheEntry = NULL;
+ if (s_InterpreterUseCaching) cacheEntry = GetCachedStaticField(ilOffset);
+ if (cacheEntry == NULL)
+ {
+ bool doCaching = StaticFldAddrWork(accessFlgs, pStaticFieldAddr, pit, pFldSize, pManagedMem);
+ if (s_InterpreterUseCaching && doCaching)
+ {
+ cacheEntry = new StaticFieldCacheEntry(*pStaticFieldAddr, *pFldSize, *pit);
+ CacheStaticField(ilOffset, cacheEntry);
+ }
+ }
+ else
+ {
+ // Reenable this if you want to check this (#ifdef _DEBUG). Was interfering with some statistics
+ // gathering I was doing in debug builds.
+#if 0
+ // Make sure the caching works correctly.
+ StaticFldAddrWork(accessFlgs, pStaticFieldAddr, pit, pFldSize, pManagedMem);
+ assert(*pStaticFieldAddr == cacheEntry->m_srcPtr && *pit == cacheEntry->m_it && *pFldSize == cacheEntry->m_sz);
+#else
+ // If we do the call above, it takes care of this.
+ m_ILCodePtr += 5; // In the case above, the call to StaticFldAddr increments the code pointer.
+#endif
+ *pStaticFieldAddr = cacheEntry->m_srcPtr;
+ *pit = cacheEntry->m_it;
+ *pFldSize = cacheEntry->m_sz;
+ *pManagedMem = true; // Or else it wouldn't have been cached.
+ }
+}
+
+void Interpreter::ResolveToken(CORINFO_RESOLVED_TOKEN* resTok, mdToken token, CorInfoTokenKind tokenType InterpTracingArg(ResolveTokenKind rtk))
+{
+ resTok->tokenContext = GetPreciseGenericsContext();
+ resTok->tokenScope = m_methInfo->m_module;
+ resTok->token = token;
+ resTok->tokenType = tokenType;
+#if INTERP_ILCYCLE_PROFILE
+ unsigned __int64 startCycles;
+ bool b = CycleTimer::GetThreadCyclesS(&startCycles); assert(b);
+#endif // INTERP_ILCYCLE_PROFILE
+ m_interpCeeInfo.resolveToken(resTok);
+#if 1
+ if (resTok->tokenType == CORINFO_TOKENKIND_Method)
+ {
+ MethodDesc* pMD = reinterpret_cast<MethodDesc*>(resTok->hMethod);
+ MethodTable* pMT = GetMethodTableFromClsHnd(resTok->hClass);
+
+ if (pMD->GetMethodTable() != pMT)
+ {
+ // Find the method on exactClass corresponding to methToCall.
+ pMD = MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pMD, // pPrimaryMD
+ pMT, // pExactMT
+ FALSE, // forceBoxedEntryPoint
+ pMD->GetMethodInstantiation(), // methodInst
+ FALSE, // allowInstParam
+ TRUE); // forceRemotableMethod (to get maximally specific).
+ resTok->hMethod = reinterpret_cast<CORINFO_METHOD_HANDLE>(pMD);
+ }
+ }
+#endif
+#if INTERP_ILCYCLE_PROFILE
+ unsigned __int64 endCycles;
+ b = CycleTimer::GetThreadCyclesS(&endCycles); assert(b);
+ m_exemptCycles += (endCycles - startCycles);
+#endif // INTERP_ILCYCLE_PROFILE
+
+#if INTERP_TRACING
+ InterlockedIncrement(&s_tokenResolutionCalls[rtk]);
+#endif // INTERP_TRACING
+}
+
+FieldDesc* Interpreter::FindField(unsigned metaTok InterpTracingArg(ResolveTokenKind rtk))
+{
+ CORINFO_RESOLVED_TOKEN fldTok;
+ ResolveToken(&fldTok, metaTok, CORINFO_TOKENKIND_Field InterpTracingArg(rtk));
+ return (FieldDesc*)fldTok.hField;
+}
+
+CORINFO_CLASS_HANDLE Interpreter::FindClass(unsigned metaTok InterpTracingArg(ResolveTokenKind rtk))
+{
+ CORINFO_RESOLVED_TOKEN clsTok;
+ ResolveToken(&clsTok, metaTok, CORINFO_TOKENKIND_Class InterpTracingArg(rtk));
+ return clsTok.hClass;
+}
+
+void Interpreter::ThrowOnInvalidPointer(void* ptr)
+{
+ if (ptr == NULL)
+ ThrowNullPointerException();
+
+ BOOL good = TRUE;
+
+ EX_TRY
+ {
+ AVInRuntimeImplOkayHolder AVOkay;
+ good = *(BOOL*)ptr;
+
+ // This conditional forces the dereference to occur; it also
+ // ensures that good == TRUE if the dereference succeeds.
+ if (!good)
+ good = TRUE;
+ }
+ EX_CATCH
+ {
+ good = FALSE;
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ if (!good)
+ ThrowNullPointerException();
+}
+
+#endif // INTERPRETER_HPP_DEFINED
diff --git a/src/vm/invalidoverlappedwrappers.h b/src/vm/invalidoverlappedwrappers.h
new file mode 100644
index 0000000000..bc8cb131f0
--- /dev/null
+++ b/src/vm/invalidoverlappedwrappers.h
@@ -0,0 +1,71 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// InvalidOverlappedWrappers.h
+//
+
+//
+
+
+CREATE_WRAPPER_FUNCTION(HttpApi, ULONG, WINAPI, HttpReceiveHttpRequest,
+ ( HANDLE ReqQueueHandle, ULONGLONG RequestId, ULONG Flags, LPVOID pRequestBuffer, ULONG RequestBufferLength, PULONG pBytesReceived, LPOVERLAPPED overlapped),
+ ( ReqQueueHandle, RequestId, Flags, pRequestBuffer, RequestBufferLength, pBytesReceived, overlapped))
+
+CREATE_WRAPPER_FUNCTION(IpHlpApi, DWORD, WINAPI, NotifyAddrChange,
+ (PHANDLE Handle,LPOVERLAPPED overlapped),
+ (Handle, overlapped))
+
+CREATE_WRAPPER_FUNCTION(IpHlpApi, DWORD, WINAPI, NotifyRouteChange,
+ (PHANDLE Handle,LPOVERLAPPED overlapped),
+ (Handle, overlapped))
+
+CREATE_WRAPPER_FUNCTION(kernel32, BOOL, WINAPI, ReadFile,
+ (HANDLE hFile, LPVOID lpBuffer, DWORD nNumberOfBytesToRead, LPDWORD lpNumberOfBytesRead, LPOVERLAPPED overlapped),
+ (hFile, lpBuffer, nNumberOfBytesToRead, lpNumberOfBytesRead, overlapped))
+
+CREATE_WRAPPER_FUNCTION(kernel32, BOOL, WINAPI, ReadFileEx,
+ (HANDLE hFile, LPVOID lpBuffer, DWORD nNumberOfBytesToRead, LPOVERLAPPED overlapped, LPOVERLAPPED_COMPLETION_ROUTINE lpCompletionRoutine),
+ (hFile, lpBuffer, nNumberOfBytesToRead, overlapped, lpCompletionRoutine))
+
+CREATE_WRAPPER_FUNCTION(kernel32, BOOL, WINAPI, WriteFile,
+ (HANDLE hFile, LPCVOID lpBuffer, DWORD nNumberOfBytesToWrite, LPDWORD lpNumberOfBytesWritten, LPOVERLAPPED overlapped),
+ (hFile, lpBuffer, nNumberOfBytesToWrite, lpNumberOfBytesWritten, overlapped))
+
+CREATE_WRAPPER_FUNCTION(kernel32, BOOL, WINAPI, WriteFileEx,
+ (HANDLE hFile, LPCVOID lpBuffer, DWORD nNumberOfBytesToWrite, LPOVERLAPPED overlapped, LPOVERLAPPED_COMPLETION_ROUTINE lpCompletionRoutine),
+ (hFile, lpBuffer, nNumberOfBytesToWrite, overlapped, lpCompletionRoutine))
+
+CREATE_WRAPPER_FUNCTION(kernel32, BOOL, WINAPI, ReadDirectoryChangesW,
+ (HANDLE hDirectory, LPVOID lpBuffer, DWORD nBufferLength, BOOL bWatchSubtree, DWORD dwNotifyFilter, LPDWORD lpBytesReturned, LPOVERLAPPED overlapped, LPOVERLAPPED_COMPLETION_ROUTINE lpCompletionRoutine),
+ (hDirectory, lpBuffer, nBufferLength, bWatchSubtree, dwNotifyFilter, lpBytesReturned, overlapped, lpCompletionRoutine))
+
+CREATE_WRAPPER_FUNCTION(kernel32, BOOL, WINAPI, PostQueuedCompletionStatus,
+ (HANDLE CompletionPort, DWORD dwNumberOfBytesTransferred, ULONG_PTR dwCompletionKey, LPOVERLAPPED overlapped),
+ (CompletionPort, dwNumberOfBytesTransferred, dwCompletionKey, overlapped))
+
+CREATE_WRAPPER_FUNCTION(MSWSock, BOOL, PASCAL, ConnectEx,
+ (UINT_PTR s, LPVOID name, int namelen, PVOID lpSendBuffer, DWORD dwSendDataLength, LPDWORD lpdwBytesSent, LPOVERLAPPED overlapped),
+ (s, name, namelen, lpSendBuffer, dwSendDataLength, lpdwBytesSent, overlapped))
+
+CREATE_WRAPPER_FUNCTION(WS2_32, int, PASCAL, WSASend,
+ (UINT_PTR s, LPVOID lpBuffers, DWORD dwBufferCount, LPDWORD lpNumberOfBytesSent, DWORD dwFlags, LPOVERLAPPED overlapped, LPVOID lpCompletionRoutine),
+ (s, lpBuffers, dwBufferCount, lpNumberOfBytesSent, dwFlags, overlapped, lpCompletionRoutine))
+
+CREATE_WRAPPER_FUNCTION(WS2_32, int, PASCAL, WSASendTo,
+ (UINT_PTR s, LPVOID lpBuffers, DWORD dwBufferCount, LPDWORD lpNumberOfBytesSent, DWORD dwFlags, LPVOID lpTo, int iToLen, LPOVERLAPPED overlapped, LPVOID lpCompletionRoutine),
+ (s, lpBuffers, dwBufferCount, lpNumberOfBytesSent, dwFlags, lpTo, iToLen, overlapped, lpCompletionRoutine))
+
+CREATE_WRAPPER_FUNCTION(WS2_32, int, PASCAL, WSARecv,
+ (UINT_PTR s, LPVOID lpBuffers, DWORD dwBufferCount, LPDWORD lpNumberOfBytesRecvd, LPDWORD lpFlags, LPOVERLAPPED overlapped, LPVOID lpCompletionRoutine),
+ (s, lpBuffers, dwBufferCount, lpNumberOfBytesRecvd, lpFlags, overlapped, lpCompletionRoutine))
+
+CREATE_WRAPPER_FUNCTION(WS2_32, int, PASCAL, WSARecvFrom,
+ (UINT_PTR s, LPVOID lpBuffers, DWORD dwBufferCount, LPDWORD lpNumberOfBytesRecvd, LPDWORD lpFlags, LPVOID lpFrom, LPINT lpFromlen, LPOVERLAPPED overlapped, LPVOID lpCompletionRoutine),
+ (s, lpBuffers, dwBufferCount, lpNumberOfBytesRecvd, lpFlags, lpFrom, lpFromlen, overlapped, lpCompletionRoutine))
+
+CREATE_WRAPPER_FUNCTION(MQRT, int, PASCAL, MQReceiveMessage,
+ (HANDLE hSource, DWORD dwTimeout, DWORD dwAction, LPVOID pMessageProps, LPOVERLAPPED overlapped, LPVOID fnReceiveCallback, HANDLE hCursor, LPVOID pTransaction),
+ (hSource, dwTimeout, dwAction, pMessageProps, overlapped, fnReceiveCallback, hCursor, pTransaction))
+
diff --git a/src/vm/invokeutil.cpp b/src/vm/invokeutil.cpp
new file mode 100644
index 0000000000..4c623f6981
--- /dev/null
+++ b/src/vm/invokeutil.cpp
@@ -0,0 +1,2128 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+////////////////////////////////////////////////////////////////////////////////
+// This module defines a Utility Class used by reflection
+//
+//
+
+////////////////////////////////////////////////////////////////////////////////
+
+
+#include "common.h"
+#include "invokeutil.h"
+#include "corpriv.h"
+#include "method.hpp"
+#include "threads.h"
+#include "excep.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "security.h"
+#include "field.h"
+#include "customattribute.h"
+#include "eeconfig.h"
+#include "generics.h"
+#include "runtimehandles.h"
+
+#ifndef CROSSGEN_COMPILE
+
+// The Attributes Table
+// 20 bits for built in types and 12 bits for Properties
+// The properties are followed by the widening mask. All types widen to them selves.
+const DWORD InvokeUtil::PrimitiveAttributes[PRIMITIVE_TABLE_SIZE] = {
+ 0x00, // ELEMENT_TYPE_END
+ 0x00, // ELEMENT_TYPE_VOID
+ PT_Primitive | 0x0004, // ELEMENT_TYPE_BOOLEAN
+ PT_Primitive | 0x3F88, // ELEMENT_TYPE_CHAR (W = U2, CHAR, I4, U4, I8, U8, R4, R8) (U2 == Char)
+ PT_Primitive | 0x3550, // ELEMENT_TYPE_I1 (W = I1, I2, I4, I8, R4, R8)
+ PT_Primitive | 0x3FE8, // ELEMENT_TYPE_U1 (W = CHAR, U1, I2, U2, I4, U4, I8, U8, R4, R8)
+ PT_Primitive | 0x3540, // ELEMENT_TYPE_I2 (W = I2, I4, I8, R4, R8)
+ PT_Primitive | 0x3F88, // ELEMENT_TYPE_U2 (W = U2, CHAR, I4, U4, I8, U8, R4, R8)
+ PT_Primitive | 0x3500, // ELEMENT_TYPE_I4 (W = I4, I8, R4, R8)
+ PT_Primitive | 0x3E00, // ELEMENT_TYPE_U4 (W = U4, I8, R4, R8)
+ PT_Primitive | 0x3400, // ELEMENT_TYPE_I8 (W = I8, R4, R8)
+ PT_Primitive | 0x3800, // ELEMENT_TYPE_U8 (W = U8, R4, R8)
+ PT_Primitive | 0x3000, // ELEMENT_TYPE_R4 (W = R4, R8)
+ PT_Primitive | 0x2000, // ELEMENT_TYPE_R8 (W = R8)
+};
+
+BOOL InvokeUtil::IsVoidPtr(TypeHandle th)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (!th.IsPointer())
+ return FALSE;
+
+ return th.AsTypeDesc()->GetTypeParam() == MscorlibBinder::GetElementType(ELEMENT_TYPE_VOID);
+}
+
+OBJECTREF InvokeUtil::CreatePointer(TypeHandle th, void * p)
+{
+ CONTRACT(OBJECTREF) {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(!th.IsNull());
+ POSTCONDITION(RETVAL != NULL);
+ }
+ CONTRACT_END;
+
+ OBJECTREF refObj = NULL;
+ GCPROTECT_BEGIN(refObj);
+
+ refObj = AllocateObject(MscorlibBinder::GetClass(CLASS__POINTER));
+
+ ((ReflectionPointer *)OBJECTREFToObject(refObj))->_ptr = p;
+
+ OBJECTREF refType = th.GetManagedClassObject();
+ SetObjectReference(&(((ReflectionPointer *)OBJECTREFToObject(refObj))->_ptrType), refType, GetAppDomain());
+
+ GCPROTECT_END();
+ RETURN refObj;
+}
+
+TypeHandle InvokeUtil::GetPointerType(OBJECTREF pObj) {
+ CONTRACT(TypeHandle) {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(pObj != NULL);
+ POSTCONDITION(!RETVAL.IsNull());
+ }
+ CONTRACT_END;
+
+ ReflectionPointer * pReflectionPointer = (ReflectionPointer *)OBJECTREFToObject(pObj);
+ REFLECTCLASSBASEREF o = (REFLECTCLASSBASEREF)pReflectionPointer->_ptrType;
+ TypeHandle typeHandle = o->GetType();
+ RETURN typeHandle;
+}
+
+void* InvokeUtil::GetPointerValue(OBJECTREF pObj) {
+ CONTRACT(void*) {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(pObj != NULL);
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ ReflectionPointer * pReflectionPointer = (ReflectionPointer *)OBJECTREFToObject(pObj);
+ void *value = pReflectionPointer->_ptr;
+ RETURN value;
+}
+
+void *InvokeUtil::GetIntPtrValue(OBJECTREF pObj) {
+ CONTRACT(void*) {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(pObj != NULL);
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ RETURN *(void **)((pObj)->UnBox());
+}
+
+void InvokeUtil::CopyArg(TypeHandle th, OBJECTREF *pObjUNSAFE, void *pArgDst) {
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER; // Caller does not protect object references
+ MODE_COOPERATIVE;
+ PRECONDITION(!th.IsNull());
+ PRECONDITION(CheckPointer(pObjUNSAFE));
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ OBJECTREF rObj = *pObjUNSAFE;
+ MethodTable* pMT;
+ CorElementType oType;
+ CorElementType type;
+
+ if (rObj != 0) {
+ pMT = rObj->GetMethodTable();
+ oType = pMT->GetInternalCorElementType();
+ }
+ else {
+ pMT = 0;
+ oType = ELEMENT_TYPE_OBJECT;
+ }
+ type = th.GetVerifierCorElementType();
+
+ // This basically maps the Signature type our type and calls the CreatePrimitiveValue
+ // method. We can omit this if we get alignment on these types.
+ switch (type) {
+ case ELEMENT_TYPE_BOOLEAN:
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_CHAR:
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_R4:
+ IN_WIN32(case ELEMENT_TYPE_I:)
+ IN_WIN32(case ELEMENT_TYPE_U:)
+ {
+ // If we got the univeral zero...Then assign it and exit.
+ if (rObj == 0)
+ *(PVOID *)pArgDst = 0;
+ else
+ {
+ ARG_SLOT slot;
+ CreatePrimitiveValue(type, oType, rObj, &slot);
+ *(PVOID *)pArgDst = (PVOID)slot;
+ }
+ break;
+ }
+
+
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_R8:
+ IN_WIN64(case ELEMENT_TYPE_I:)
+ IN_WIN64(case ELEMENT_TYPE_U:)
+ {
+ // If we got the univeral zero...Then assign it and exit.
+ if (rObj == 0)
+ *(INT64 *)pArgDst = 0;
+ else
+ {
+ ARG_SLOT slot;
+ CreatePrimitiveValue(type, oType, rObj, &slot);
+ *(INT64 *)pArgDst = (INT64)slot;
+ }
+ break;
+ }
+
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ // If we got the univeral zero...Then assign it and exit.
+ if (rObj == 0) {
+ InitValueClass(pArgDst, th.AsMethodTable());
+ }
+ else {
+ if (!th.AsMethodTable()->UnBoxInto(pArgDst, rObj))
+ COMPlusThrow(kArgumentException, W("Arg_ObjObj"));
+ }
+ break;
+ }
+
+ case ELEMENT_TYPE_SZARRAY: // Single Dim
+ case ELEMENT_TYPE_ARRAY: // General Array
+ case ELEMENT_TYPE_CLASS: // Class
+ case ELEMENT_TYPE_OBJECT:
+ case ELEMENT_TYPE_STRING: // System.String
+ case ELEMENT_TYPE_VAR:
+ {
+ if (rObj == 0)
+ *(PVOID *)pArgDst = 0;
+ else
+ *(PVOID *)pArgDst = OBJECTREFToObject(rObj);
+ break;
+ }
+
+ case ELEMENT_TYPE_BYREF:
+ {
+ //
+ // (obj is the parameter passed to MethodInfo.Invoke, by the caller)
+ // if argument is a primitive
+ // {
+ // if incoming argument, obj, is null
+ // Allocate a boxed object and place ref to it in 'obj'
+ // Unbox 'obj' and pass it to callee
+ // }
+ // if argument is a value class
+ // {
+ // if incoming argument, obj, is null
+ // Allocate an object of that valueclass, and place ref to it in 'obj'
+ // Unbox 'obj' and pass it to callee
+ // }
+ // if argument is an objectref
+ // {
+ // pass obj to callee
+ // }
+ //
+ TypeHandle thBaseType = th.AsTypeDesc()->GetTypeParam();
+
+ // We should never get here for nullable types. Instead invoke
+ // heads these off and morphs the type handle to not be byref anymore
+ _ASSERTE(!Nullable::IsNullableType(thBaseType));
+
+ TypeHandle srcTH = TypeHandle();
+ if (rObj == 0)
+ oType = thBaseType.GetSignatureCorElementType();
+ else
+ srcTH = rObj->GetTypeHandle();
+
+ //CreateByRef only triggers GC in throw path, so it's OK to use the raw unsafe pointer
+ *(PVOID *)pArgDst = CreateByRef(thBaseType, oType, srcTH, rObj, pObjUNSAFE);
+ break;
+ }
+
+ case ELEMENT_TYPE_TYPEDBYREF:
+ {
+ TypedByRef* ptr = (TypedByRef*) pArgDst;
+ TypeHandle srcTH;
+ BOOL bIsZero = FALSE;
+
+ // If we got the univeral zero...Then assign it and exit.
+ if (rObj== 0) {
+ bIsZero = TRUE;
+ ptr->data = 0;
+ ptr->type = TypeHandle();
+ }
+ else {
+ bIsZero = FALSE;
+ srcTH = rObj->GetTypeHandle();
+ ptr->type = rObj->GetTypeHandle();
+ }
+
+ if (!bIsZero)
+ {
+ //CreateByRef only triggers GC in throw path
+ ptr->data = CreateByRef(srcTH, oType, srcTH, rObj, pObjUNSAFE);
+ }
+
+ break;
+ }
+
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_FNPTR:
+ {
+ // If we got the univeral zero...Then assign it and exit.
+ if (rObj == 0) {
+ *(PVOID *)pArgDst = 0;
+ }
+ else {
+ if (rObj->GetMethodTable() == MscorlibBinder::GetClassIfExist(CLASS__POINTER) && type == ELEMENT_TYPE_PTR)
+ *(PVOID *)pArgDst = GetPointerValue(rObj);
+ else if (rObj->GetTypeHandle().AsMethodTable() == MscorlibBinder::GetElementType(ELEMENT_TYPE_I))
+ {
+ ARG_SLOT slot;
+ CreatePrimitiveValue(oType, oType, rObj, &slot);
+ *(PVOID *)pArgDst = (PVOID)slot;
+ }
+ else
+ COMPlusThrow(kArgumentException,W("Arg_ObjObj"));
+ }
+ break;
+ }
+
+ case ELEMENT_TYPE_VOID:
+ default:
+ _ASSERTE(!"Unknown Type");
+ COMPlusThrow(kNotSupportedException);
+ }
+}
+
+// CreatePrimitiveValue
+// This routine will validate the object and then place the value into
+// the destination
+// dstType -- The type of the destination
+// srcType -- The type of the source
+// srcObj -- The Object containing the primitive value.
+// pDst -- pointer to the destination
+void InvokeUtil::CreatePrimitiveValue(CorElementType dstType,
+ CorElementType srcType,
+ OBJECTREF srcObj,
+ ARG_SLOT *pDst) {
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(srcObj != NULL);
+ PRECONDITION(CheckPointer(pDst));
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+ CreatePrimitiveValue(dstType, srcType, srcObj->UnBox(), srcObj->GetMethodTable(), pDst);
+}
+
+void InvokeUtil::CreatePrimitiveValue(CorElementType dstType,CorElementType srcType,
+ void *pSrc, MethodTable *pSrcMT, ARG_SLOT* pDst)
+{
+
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pDst));
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ if (!IsPrimitiveType(srcType) || !CanPrimitiveWiden(dstType, srcType))
+ COMPlusThrow(kArgumentException, W("Arg_PrimWiden"));
+
+ ARG_SLOT data = 0;
+
+ switch (srcType) {
+ case ELEMENT_TYPE_I1:
+ data = *(INT8*)pSrc;
+ break;
+ case ELEMENT_TYPE_I2:
+ data = *(INT16*)pSrc;
+ break;
+ IN_WIN32(case ELEMENT_TYPE_I:)
+ case ELEMENT_TYPE_I4:
+ data = *(INT32 *)pSrc;
+ break;
+ IN_WIN64(case ELEMENT_TYPE_I:)
+ case ELEMENT_TYPE_I8:
+ data = *(INT64 *)pSrc;
+ break;
+ default:
+ switch (pSrcMT->GetNumInstanceFieldBytes())
+ {
+ case 1:
+ data = *(UINT8 *)pSrc;
+ break;
+ case 2:
+ data = *(UINT16 *)pSrc;
+ break;
+ case 4:
+ data = *(UINT32 *)pSrc;
+ break;
+ case 8:
+ data = *(UINT64 *)pSrc;
+ break;
+ default:
+ _ASSERTE(!"Unknown conversion");
+ // this is really an impossible condition
+ COMPlusThrow(kNotSupportedException);
+ break;
+ }
+ }
+
+ if (srcType == dstType) {
+ // shortcut
+ *pDst = data;
+ return;
+ }
+
+ // Copy the data and return
+ switch (dstType) {
+ case ELEMENT_TYPE_BOOLEAN:
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_CHAR:
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_U:
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ switch (srcType) {
+ case ELEMENT_TYPE_BOOLEAN:
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_CHAR:
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_U:
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ *pDst = data;
+ break;
+ case ELEMENT_TYPE_R4:
+ *pDst = (I8)(*(R4*)pSrc);
+ break;
+ case ELEMENT_TYPE_R8:
+ *pDst = (I8)(*(R8*)pSrc);
+ break;
+ default:
+ _ASSERTE(!"Unknown conversion");
+ // this is really an impossible condition
+ COMPlusThrow(kNotSupportedException);
+ }
+ break;
+ case ELEMENT_TYPE_R4:
+ case ELEMENT_TYPE_R8:
+ {
+ R8 r8 = 0;
+ switch (srcType) {
+ case ELEMENT_TYPE_BOOLEAN:
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_I4:
+ IN_WIN32(case ELEMENT_TYPE_I:)
+ r8 = (R8)((INT32)data);
+ break;
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_CHAR:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_U4:
+ IN_WIN32(case ELEMENT_TYPE_U:)
+ r8 = (R8)((UINT32)data);
+ break;
+ case ELEMENT_TYPE_U8:
+ IN_WIN64(case ELEMENT_TYPE_U:)
+ r8 = (R8)((UINT64)data);
+ break;
+ case ELEMENT_TYPE_I8:
+ IN_WIN64(case ELEMENT_TYPE_I:)
+ r8 = (R8)((INT64)data);
+ break;
+ case ELEMENT_TYPE_R4:
+ r8 = *(R4*)pSrc;
+ break;
+ case ELEMENT_TYPE_R8:
+ r8 = *(R8*)pSrc;
+ break;
+ default:
+ _ASSERTE(!"Unknown R4 or R8 conversion");
+ // this is really an impossible condition
+ COMPlusThrow(kNotSupportedException);
+ }
+
+ if (dstType == ELEMENT_TYPE_R4) {
+ R4 r4 = (R4)r8;
+ *pDst = (UINT32&)r4;
+ }
+ else {
+ *pDst = (UINT64&)r8;
+ }
+
+ }
+ break;
+ default:
+ _ASSERTE(!"Unknown conversion");
+ }
+}
+
+void* InvokeUtil::CreateByRef(TypeHandle dstTh,
+ CorElementType srcType,
+ TypeHandle srcTH,
+ OBJECTREF srcObj,
+ OBJECTREF *pIncomingObj) {
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(!dstTh.IsNull());
+ PRECONDITION(CheckPointer(pIncomingObj));
+
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ CorElementType dstType = dstTh.GetSignatureCorElementType();
+ if (IsPrimitiveType(srcType) && IsPrimitiveType(dstType)) {
+ if (dstType != srcType)
+ {
+ CONTRACT_VIOLATION (GCViolation);
+ COMPlusThrow(kArgumentException,W("Arg_PrimWiden"));
+ }
+
+ return srcObj->UnBox();
+ }
+
+ if (srcTH.IsNull()) {
+ return pIncomingObj;
+ }
+
+ _ASSERTE(srcObj != NULL);
+
+ if (dstType == ELEMENT_TYPE_VALUETYPE) {
+ return srcObj->UnBox();
+ }
+ else
+ return pIncomingObj;
+}
+
+// GetBoxedObject
+// Given an address of a primitve type, this will box that data...
+// <TODO>@TODO: We need to handle all value classes?</TODO>
+OBJECTREF InvokeUtil::GetBoxedObject(TypeHandle th, void* pData) {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(!th.IsNull());
+ PRECONDITION(CheckPointer(pData));
+
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ MethodTable *pMethTable = th.GetMethodTable();
+ PREFIX_ASSUME(pMethTable != NULL);
+ // Save off the data. We are going to create and object
+ // which may cause GC to occur.
+ int size = pMethTable->GetNumInstanceFieldBytes();
+ void *p = _alloca(size);
+ memcpy(p, pData, size);
+ OBJECTREF retO = pMethTable->Box(p);
+ return retO;
+}
+
+//ValidField
+// This method checks that the object can be widened to the proper type
+void InvokeUtil::ValidField(TypeHandle th, OBJECTREF* value)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(!th.IsNull());
+ PRECONDITION(CheckPointer(value));
+ PRECONDITION(IsProtectedByGCFrame (value));
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ if ((*value) == 0)
+ return;
+
+ MethodTable* pMT;
+ CorElementType oType;
+ CorElementType type = th.GetSignatureCorElementType();
+ pMT = (*value)->GetMethodTable();
+ oType = TypeHandle(pMT).GetSignatureCorElementType();
+
+ // handle pointers
+ if (type == ELEMENT_TYPE_PTR || type == ELEMENT_TYPE_FNPTR) {
+ if (MscorlibBinder::IsClass((*value)->GetMethodTable(), CLASS__POINTER) && type == ELEMENT_TYPE_PTR) {
+ TypeHandle srcTH = GetPointerType(*value);
+
+ if (!IsVoidPtr(th)) {
+ if (!srcTH.CanCastTo(th))
+ COMPlusThrow(kArgumentException,W("Arg_ObjObj"));
+ }
+ Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_SKIP_VER);
+ return;
+ }
+ else if (MscorlibBinder::IsClass((*value)->GetMethodTable(), CLASS__INTPTR)) {
+ Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_SKIP_VER);
+ return;
+ }
+
+ COMPlusThrow(kArgumentException,W("Arg_ObjObj"));
+ }
+
+ // Need to handle Object special
+ if (type == ELEMENT_TYPE_CLASS || type == ELEMENT_TYPE_VALUETYPE ||
+ type == ELEMENT_TYPE_OBJECT || type == ELEMENT_TYPE_STRING ||
+ type == ELEMENT_TYPE_ARRAY || type == ELEMENT_TYPE_SZARRAY)
+ {
+
+ if (th.GetMethodTable() == g_pObjectClass)
+ return;
+ if (IsPrimitiveType(oType)) {
+ if (type != ELEMENT_TYPE_VALUETYPE)
+ COMPlusThrow(kArgumentException,W("Arg_ObjObj"));
+
+ // Legacy behavior: The following if disallows assigning primitives to enums.
+ if (th.IsEnum())
+ COMPlusThrow(kArgumentException,W("Arg_ObjObj"));
+
+ type = th.GetVerifierCorElementType();
+ if (IsPrimitiveType(type))
+ if (CanPrimitiveWiden(type, oType))
+ return;
+ else
+ COMPlusThrow(kArgumentException,W("Arg_ObjObj"));
+ }
+
+ if (!ObjIsInstanceOf(OBJECTREFToObject(*value), th)) {
+ COMPlusThrow(kArgumentException,W("Arg_ObjObj"));
+ }
+ return;
+ }
+
+
+ if (!IsPrimitiveType(oType))
+ COMPlusThrow(kArgumentException,W("Arg_ObjObj"));
+ // Now make sure we can widen into the proper type -- CanWiden may run GC...
+ if (!CanPrimitiveWiden(type,oType))
+ COMPlusThrow(kArgumentException,W("Arg_ObjObj"));
+}
+
+// InternalCreateObject
+// This routine will create the specified object from the value
+OBJECTREF InvokeUtil::CreateObject(TypeHandle th, void * pValue) {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(!th.IsNull());
+
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ CorElementType type = th.GetSignatureCorElementType();
+ MethodTable *pMT = NULL;
+ OBJECTREF obj = NULL;
+
+ // Handle the non-table types
+ switch (type) {
+ case ELEMENT_TYPE_VOID:
+ break;
+
+ case ELEMENT_TYPE_PTR:
+ {
+ obj = CreatePointer(th, *(void **)pValue);
+ break;
+ }
+
+ case ELEMENT_TYPE_FNPTR:
+ pMT = MscorlibBinder::GetElementType(ELEMENT_TYPE_I);
+ goto PrimitiveType;
+
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ _ASSERTE(!th.IsTypeDesc());
+ pMT = th.AsMethodTable();
+ obj = pMT->Box(pValue);
+ break;
+ }
+
+ case ELEMENT_TYPE_CLASS: // Class
+ case ELEMENT_TYPE_SZARRAY: // Single Dim, Zero
+ case ELEMENT_TYPE_ARRAY: // General Array
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_OBJECT:
+ case ELEMENT_TYPE_VAR:
+ obj = *(OBJECTREF *)pValue;
+ break;
+
+ case ELEMENT_TYPE_BOOLEAN: // boolean
+ case ELEMENT_TYPE_I1: // byte
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_I2: // short
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_CHAR: // char
+ case ELEMENT_TYPE_I4: // int
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_I8: // long
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_R4: // float
+ case ELEMENT_TYPE_R8: // double
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_U:
+ _ASSERTE(!th.IsTypeDesc());
+ pMT = th.AsMethodTable();
+ PrimitiveType:
+ {
+ // Don't use MethodTable::Box here for perf reasons
+ PREFIX_ASSUME(pMT != NULL);
+ obj = AllocateObject(pMT);
+ DWORD size = pMT->GetNumInstanceFieldBytes();
+ memcpyNoGCRefs(obj->UnBox(), pValue, size);
+ }
+ break;
+
+ case ELEMENT_TYPE_BYREF:
+ COMPlusThrow(kNotSupportedException, W("NotSupported_ByRefReturn"));
+ case ELEMENT_TYPE_END:
+ default:
+ _ASSERTE(!"Unknown Type");
+ COMPlusThrow(kNotSupportedException);
+ }
+
+ return obj;
+}
+
+// This is a special purpose Exception creation function. It
+// creates the ReflectionTypeLoadException placing the passed
+// classes array and exception array into it.
+OBJECTREF InvokeUtil::CreateClassLoadExcept(OBJECTREF* classes, OBJECTREF* except) {
+ CONTRACT(OBJECTREF) {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(classes));
+ PRECONDITION(CheckPointer(except));
+ PRECONDITION(IsProtectedByGCFrame (classes));
+ PRECONDITION(IsProtectedByGCFrame (except));
+
+ POSTCONDITION(RETVAL != NULL);
+
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACT_END;
+
+ OBJECTREF oRet = 0;
+
+ struct {
+ OBJECTREF o;
+ STRINGREF str;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ MethodTable *pVMClassLoadExcept = MscorlibBinder::GetException(kReflectionTypeLoadException);
+ gc.o = AllocateObject(pVMClassLoadExcept);
+ GCPROTECT_BEGIN(gc);
+ ARG_SLOT args[4];
+
+ // Retrieve the resource string.
+ ResMgrGetString(W("ReflectionTypeLoad_LoadFailed"), &gc.str);
+
+ MethodDesc* pMD = MemberLoader::FindMethod(gc.o->GetTrueMethodTable(),
+ COR_CTOR_METHOD_NAME, &gsig_IM_ArrType_ArrException_Str_RetVoid);
+
+ if (!pMD)
+ {
+ MAKE_WIDEPTR_FROMUTF8(wzMethodName, COR_CTOR_METHOD_NAME);
+ COMPlusThrowNonLocalized(kMissingMethodException, wzMethodName);
+ }
+
+ MethodDescCallSite ctor(pMD);
+
+ // Call the constructor
+ args[0] = ObjToArgSlot(gc.o);
+ args[1] = ObjToArgSlot(*classes);
+ args[2] = ObjToArgSlot(*except);
+ args[3] = ObjToArgSlot((OBJECTREF)gc.str);
+
+ ctor.Call(args);
+
+ oRet = gc.o;
+
+ GCPROTECT_END();
+ RETURN oRet;
+}
+
+OBJECTREF InvokeUtil::CreateTargetExcept(OBJECTREF* except) {
+ CONTRACT(OBJECTREF) {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(except));
+ PRECONDITION(IsProtectedByGCFrame (except));
+
+ POSTCONDITION(RETVAL != NULL);
+
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACT_END;
+
+ OBJECTREF o;
+ OBJECTREF oRet = 0;
+
+ MethodTable *pVMTargetExcept = MscorlibBinder::GetException(kTargetInvocationException);
+ o = AllocateObject(pVMTargetExcept);
+ GCPROTECT_BEGIN(o);
+ ARG_SLOT args[2];
+
+ MethodDesc* pMD = MemberLoader::FindMethod(o->GetTrueMethodTable(),
+ COR_CTOR_METHOD_NAME, &gsig_IM_Exception_RetVoid);
+
+ if (!pMD)
+ {
+ MAKE_WIDEPTR_FROMUTF8(wzMethodName, COR_CTOR_METHOD_NAME);
+ COMPlusThrowNonLocalized(kMissingMethodException, wzMethodName);
+ }
+
+ MethodDescCallSite ctor(pMD);
+
+ // Call the constructor
+ args[0] = ObjToArgSlot(o);
+ // for security, don't allow a non-exception object to be spoofed as an exception object. We cast later and
+ // don't check and this could cause us grief.
+ _ASSERTE(!except || IsException((*except)->GetMethodTable())); // how do we get non-exceptions?
+ if (except && IsException((*except)->GetMethodTable()))
+ {
+ args[1] = ObjToArgSlot(*except);
+ }
+ else
+ {
+ args[1] = NULL;
+ }
+
+ ctor.Call(args);
+
+ oRet = o;
+
+ GCPROTECT_END();
+ RETURN oRet;
+}
+
+// ChangeType
+// This method will invoke the Binder change type method on the object
+// binder -- The Binder object
+// srcObj -- The source object to be changed
+// th -- The TypeHandel of the target type
+// locale -- The locale passed to the class.
+OBJECTREF InvokeUtil::ChangeType(OBJECTREF binder, OBJECTREF srcObj, TypeHandle th, OBJECTREF locale) {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(binder != NULL);
+ PRECONDITION(srcObj != NULL);
+
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ OBJECTREF typeClass = NULL;
+ OBJECTREF o;
+
+ struct _gc {
+ OBJECTREF binder;
+ OBJECTREF srcObj;
+ OBJECTREF locale;
+ OBJECTREF typeClass;
+ } gc;
+
+ gc.binder = binder;
+ gc.srcObj = srcObj;
+ gc.locale = locale;
+ gc.typeClass = NULL;
+
+ GCPROTECT_BEGIN(gc);
+
+ MethodDescCallSite changeType(METHOD__BINDER__CHANGE_TYPE, &gc.binder);
+
+ // Now call this method on this object.
+ typeClass = th.GetManagedClassObject();
+
+ ARG_SLOT pNewArgs[] = {
+ ObjToArgSlot(gc.binder),
+ ObjToArgSlot(gc.srcObj),
+ ObjToArgSlot(gc.typeClass),
+ ObjToArgSlot(gc.locale),
+ };
+
+ o = changeType.Call_RetOBJECTREF(pNewArgs);
+
+ GCPROTECT_END();
+
+ return o;
+}
+
+// Ensure that the field is declared on the type or subtype of the type to which the typed reference refers.
+// Note that a typed reference is a reference to an object and is not a field on that object (as in C# ref).
+// Ensure that if the field is an instance field that the typed reference is not null.
+void InvokeUtil::ValidateObjectTarget(FieldDesc *pField, TypeHandle enclosingType, OBJECTREF *target) {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pField));
+ PRECONDITION(!enclosingType.IsNull() || pField->IsStatic());
+ PRECONDITION(CheckPointer(target));
+
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ if (pField->IsStatic() && (enclosingType.IsNull() || !*target))
+ return;
+
+ if (!pField->IsStatic() && !*target)
+ COMPlusThrow(kTargetException,W("RFLCT.Targ_StatFldReqTarg"));
+
+ // Verify that the object is of the proper type...
+ TypeHandle ty = (*target)->GetTrueTypeHandle();
+ while (!ty.IsNull() && ty != enclosingType)
+ ty = ty.GetParent();
+
+ // Give a second chance to thunking classes to do the
+ // correct cast
+ if (ty.IsNull()) {
+#ifdef FEATURE_REMOTING
+ BOOL fCastOK = FALSE;
+ if ((*target)->IsTransparentProxy()) {
+ fCastOK = CRemotingServices::CheckCast(*target, enclosingType);
+ }
+ if(!fCastOK)
+#endif
+ {
+ COMPlusThrow(kArgumentException,W("Arg_ObjObj"));
+ }
+ }
+}
+
+// SetValidField
+// Given an target object, a value object and a field this method will set the field
+// on the target object. The field must be validate before calling this.
+void InvokeUtil::SetValidField(CorElementType fldType,
+ TypeHandle fldTH,
+ FieldDesc *pField,
+ OBJECTREF *target,
+ OBJECTREF *valueObj,
+ TypeHandle declaringType,
+ CLR_BOOL *pDomainInitialized) {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(!fldTH.IsNull());
+ PRECONDITION(CheckPointer(pField));
+ PRECONDITION(CheckPointer(target));
+ PRECONDITION(CheckPointer(valueObj));
+ PRECONDITION(IsProtectedByGCFrame (target));
+ PRECONDITION(IsProtectedByGCFrame (valueObj));
+ PRECONDITION(declaringType.IsNull () || !declaringType.IsTypeDesc());
+
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ // We don't allow setting the field of nullable<T> (hasValue and value)
+ // Because you can't independantly set them for this type.
+ if (!declaringType.IsNull() && Nullable::IsNullableType(declaringType.GetMethodTable()))
+ COMPlusThrow(kNotSupportedException);
+
+ // call the <cinit>
+ OBJECTREF Throwable = NULL;
+
+ MethodTable * pDeclMT = NULL;
+ if (!declaringType.IsNull())
+ {
+ pDeclMT = declaringType.GetMethodTable();
+
+ if (pDeclMT->IsSharedByGenericInstantiations())
+ COMPlusThrow(kNotSupportedException, W("NotSupported_Type"));
+ }
+
+ if (*pDomainInitialized == FALSE)
+ {
+ EX_TRY
+ {
+ if (declaringType.IsNull())
+ {
+ pField->GetModule()->GetGlobalMethodTable()->EnsureInstanceActive();
+ pField->GetModule()->GetGlobalMethodTable()->CheckRunClassInitThrowing();
+ }
+ else
+ {
+ pDeclMT->EnsureInstanceActive();
+ pDeclMT->CheckRunClassInitThrowing();
+
+ if (declaringType.IsDomainNeutral() == FALSE)
+ *pDomainInitialized = TRUE;
+ }
+ }
+ EX_CATCH_THROWABLE(&Throwable);
+ }
+#ifdef _DEBUG
+ else if (*pDomainInitialized == TRUE && !declaringType.IsNull())
+ CONSISTENCY_CHECK(declaringType.GetMethodTable()->CheckActivated());
+#endif
+
+ if(Throwable != NULL)
+ {
+ GCPROTECT_BEGIN(Throwable);
+ OBJECTREF except = CreateTargetExcept(&Throwable);
+ COMPlusThrow(except);
+ GCPROTECT_END();
+ }
+
+ // Set the field
+ ARG_SLOT value;
+
+ void* valueptr;
+ switch (fldType) {
+ case ELEMENT_TYPE_VOID:
+ _ASSERTE(!"Void used as Field Type!");
+ COMPlusThrow(kNotSupportedException);
+
+ case ELEMENT_TYPE_BOOLEAN: // boolean
+ case ELEMENT_TYPE_I1: // byte
+ case ELEMENT_TYPE_U1: // unsigned byte
+ value = 0;
+ if (*valueObj != 0) {
+ MethodTable *p = (*valueObj)->GetMethodTable();
+ CorElementType oType = p->GetInternalCorElementType();
+ CreatePrimitiveValue(fldType, oType, *valueObj, &value);
+ }
+
+ if (pField->IsStatic())
+ pField->SetStaticValue8((unsigned char)value);
+ else
+ pField->SetValue8(*target,(unsigned char)value);
+ break;
+
+ case ELEMENT_TYPE_I2: // short
+ case ELEMENT_TYPE_U2: // unsigned short
+ case ELEMENT_TYPE_CHAR: // char
+ value = 0;
+ if (*valueObj != 0) {
+ MethodTable *p = (*valueObj)->GetMethodTable();
+ CorElementType oType = p->GetInternalCorElementType();
+ CreatePrimitiveValue(fldType, oType, *valueObj, &value);
+ }
+
+ if (pField->IsStatic())
+ pField->SetStaticValue16((short)value);
+ else
+ pField->SetValue16(*target, (short)value);
+ break;
+
+ case ELEMENT_TYPE_I:
+ valueptr = *valueObj != 0 ? GetIntPtrValue(*valueObj) : NULL;
+ if (pField->IsStatic())
+ pField->SetStaticValuePtr(valueptr);
+ else
+ pField->SetValuePtr(*target,valueptr);
+ break;
+
+ case ELEMENT_TYPE_U:
+ valueptr = *valueObj != 0 ? GetIntPtrValue(*valueObj) : NULL;
+ if (pField->IsStatic())
+ pField->SetStaticValuePtr(valueptr);
+ else
+ pField->SetValuePtr(*target,valueptr);
+ break;
+
+ case ELEMENT_TYPE_PTR: // pointers
+ if (*valueObj != 0 && MscorlibBinder::IsClass((*valueObj)->GetMethodTable(), CLASS__POINTER)) {
+ valueptr = GetPointerValue(*valueObj);
+ if (pField->IsStatic())
+ pField->SetStaticValuePtr(valueptr);
+ else
+ pField->SetValuePtr(*target,valueptr);
+ break;
+ }
+ // drop through
+ case ELEMENT_TYPE_FNPTR:
+ valueptr = *valueObj != 0 ? GetIntPtrValue(*valueObj) : NULL;
+ if (pField->IsStatic())
+ pField->SetStaticValuePtr(valueptr);
+ else
+ pField->SetValuePtr(*target,valueptr);
+ break;
+
+ case ELEMENT_TYPE_I4: // int
+ case ELEMENT_TYPE_U4: // unsigned int
+ case ELEMENT_TYPE_R4: // float
+ value = 0;
+ if (*valueObj != 0) {
+ MethodTable *p = (*valueObj)->GetMethodTable();
+ CorElementType oType = p->GetInternalCorElementType();
+ CreatePrimitiveValue(fldType, oType, *valueObj, &value);
+ }
+
+ if (pField->IsStatic())
+ pField->SetStaticValue32((int)value);
+ else
+ pField->SetValue32(*target, (int)value);
+ break;
+
+ case ELEMENT_TYPE_I8: // long
+ case ELEMENT_TYPE_U8: // unsigned long
+ case ELEMENT_TYPE_R8: // double
+ value = 0;
+ if (*valueObj != 0) {
+ MethodTable *p = (*valueObj)->GetMethodTable();
+ CorElementType oType = p->GetInternalCorElementType();
+ CreatePrimitiveValue(fldType, oType, *valueObj, &value);
+ }
+
+ if (pField->IsStatic())
+ pField->SetStaticValue64(value);
+ else
+ pField->SetValue64(*target,value);
+ break;
+
+ case ELEMENT_TYPE_SZARRAY: // Single Dim, Zero
+ case ELEMENT_TYPE_ARRAY: // General Array
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_OBJECT:
+ case ELEMENT_TYPE_VAR:
+ if (pField->IsStatic())
+ pField->SetStaticOBJECTREF(*valueObj);
+ else
+ pField->SetRefValue(*target, *valueObj);
+ break;
+
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ _ASSERTE(!fldTH.IsTypeDesc());
+ MethodTable *pMT = fldTH.AsMethodTable();
+#ifdef FEATURE_REMOTING
+ if((*target) != NULL && (*target)->IsTransparentProxy()) {
+ OBJECTREF val = *valueObj;
+ GCPROTECT_BEGIN(val)
+
+ void* valueData;
+ if (Nullable::IsNullableType(fldTH)) {
+ // Special case for Nullable<T>, we need a true nullable that is gc protected. The easiest
+ // way to make one is to allocate an object on the heap
+ OBJECTREF trueNullable = fldTH.AsMethodTable()->Allocate();
+ BOOL typesChecked;
+ typesChecked = Nullable::UnBox(trueNullable->GetData(), val, fldTH.AsMethodTable());
+ _ASSERTE(typesChecked);
+ val = trueNullable;
+ valueData = val->GetData();
+ }
+ else if (val == NULL) {
+ // Null is the universal null object. (Is this a good idea?)
+ int size = pMT->GetNumInstanceFieldBytes();
+ valueData = _alloca(size);
+ memset(valueData, 0, size);
+ }
+ else
+ valueData = val->GetData();
+
+ OBJECTREF unwrapped = CRemotingServices::GetObjectFromProxy(*target);
+ CRemotingServices::FieldAccessor(pField, unwrapped, valueData, FALSE);
+ GCPROTECT_END();
+ }
+ else
+#endif
+ {
+ void* pFieldData;
+ if (pField->IsStatic())
+ pFieldData = pField->GetCurrentStaticAddress();
+ else
+ pFieldData = (*((BYTE**)target)) + pField->GetOffset() + sizeof(Object);
+
+ if (*valueObj == NULL)
+ InitValueClass(pFieldData, pMT);
+ else
+ pMT->UnBoxIntoUnchecked(pFieldData, *valueObj);
+ }
+ }
+ break;
+
+ default:
+ _ASSERTE(!"Unknown Type");
+ // this is really an impossible condition
+ COMPlusThrow(kNotSupportedException);
+ }
+}
+
+// GetFieldValue
+// This method will return an ARG_SLOT containing the value of the field.
+// GetFieldValue
+// This method will return an ARG_SLOT containing the value of the field.
+OBJECTREF InvokeUtil::GetFieldValue(FieldDesc* pField, TypeHandle fieldType, OBJECTREF* target, TypeHandle declaringType, CLR_BOOL *pDomainInitialized) {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pField));
+ PRECONDITION(!fieldType.IsNull());
+ PRECONDITION(CheckPointer(target));
+ PRECONDITION(declaringType.IsNull () || !declaringType.IsTypeDesc());
+
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ OBJECTREF obj = NULL;
+
+ // call the .cctor
+ OBJECTREF Throwable = NULL;
+
+ MethodTable * pDeclMT = NULL;
+ if (!declaringType.IsNull())
+ {
+ pDeclMT = declaringType.GetMethodTable();
+
+ if (pDeclMT->IsSharedByGenericInstantiations())
+ COMPlusThrow(kNotSupportedException, W("NotSupported_Type"));
+ }
+
+ if (*pDomainInitialized == FALSE)
+ {
+ EX_TRY
+ {
+ if (declaringType.IsNull())
+ {
+ pField->GetModule()->GetGlobalMethodTable()->EnsureInstanceActive();
+ pField->GetModule()->GetGlobalMethodTable()->CheckRunClassInitThrowing();
+ }
+ else
+ {
+ pDeclMT->EnsureInstanceActive();
+ pDeclMT->CheckRunClassInitThrowing();
+
+ if (!declaringType.IsDomainNeutral())
+ *pDomainInitialized = TRUE;
+ }
+ }
+ EX_CATCH_THROWABLE(&Throwable);
+ }
+#ifdef _DEBUG
+ else if (*pDomainInitialized == TRUE && !declaringType.IsNull())
+ CONSISTENCY_CHECK(declaringType.GetMethodTable()->CheckActivated());
+#endif
+
+
+ if(Throwable != NULL)
+ {
+ GCPROTECT_BEGIN(Throwable);
+ OBJECTREF except = CreateTargetExcept(&Throwable);
+ COMPlusThrow(except);
+ GCPROTECT_END();
+ }
+
+ // We don't allow getting the field just so we don't have more specical
+ // cases than we need to. The we need at least the throw check to insure
+ // we don't allow data corruption, but
+ if (!declaringType.IsNull() && Nullable::IsNullableType(pDeclMT))
+ COMPlusThrow(kNotSupportedException);
+
+ CorElementType fieldElementType = pField->GetFieldType();
+
+ switch (fieldElementType) {
+
+ case ELEMENT_TYPE_BOOLEAN: // boolean
+ case ELEMENT_TYPE_I1: // byte
+ case ELEMENT_TYPE_U1: // unsigned byte
+ case ELEMENT_TYPE_I2: // short
+ case ELEMENT_TYPE_U2: // unsigned short
+ case ELEMENT_TYPE_CHAR: // char
+ case ELEMENT_TYPE_I4: // int
+ case ELEMENT_TYPE_U4: // unsigned int
+ case ELEMENT_TYPE_R4: // float
+ case ELEMENT_TYPE_I8: // long
+ case ELEMENT_TYPE_U8: // unsigned long
+ case ELEMENT_TYPE_R8: // double
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_U:
+ {
+ // create the object and copy
+ fieldType.AsMethodTable()->EnsureActive();
+ obj = AllocateObject(fieldType.AsMethodTable());
+ GCPROTECT_BEGIN(obj);
+ if (pField->IsStatic())
+ CopyValueClass(obj->UnBox(),
+ pField->GetCurrentStaticAddress(),
+ fieldType.AsMethodTable(),
+ obj->GetAppDomain());
+ else
+ pField->GetInstanceField(*target, obj->UnBox());
+ GCPROTECT_END();
+ break;
+ }
+
+ case ELEMENT_TYPE_OBJECT:
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_SZARRAY: // Single Dim, Zero
+ case ELEMENT_TYPE_ARRAY: // general array
+ case ELEMENT_TYPE_VAR:
+ if (pField->IsStatic())
+ obj = pField->GetStaticOBJECTREF();
+ else
+ obj = pField->GetRefValue(*target);
+ break;
+
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ // Value classes require createing a boxed version of the field and then
+ // copying from the source...
+ // Allocate an object to return...
+ _ASSERTE(!fieldType.IsTypeDesc());
+
+ void *p = NULL;
+ fieldType.AsMethodTable()->EnsureActive();
+ obj = fieldType.AsMethodTable()->Allocate();
+ GCPROTECT_BEGIN(obj);
+ // calculate the offset to the field...
+ if (pField->IsStatic())
+ p = pField->GetCurrentStaticAddress();
+ else {
+#ifdef FEATURE_REMOTING
+ OBJECTREF o = *target;
+ if(o->IsTransparentProxy()) {
+ OBJECTREF unwrapped = CRemotingServices::GetObjectFromProxy(o);
+ CRemotingServices::FieldAccessor(pField, unwrapped, (void*)obj->GetData(), TRUE);
+ }
+ else
+#endif
+ p = (*((BYTE**)target)) + pField->GetOffset() + sizeof(Object);
+ }
+ GCPROTECT_END();
+
+ // copy the field to the unboxed object.
+ // note: this will be done only for the non-remoting case
+ if (p) {
+ CopyValueClass(obj->GetData(), p, fieldType.AsMethodTable(), obj->GetAppDomain());
+ }
+
+ // If it is a Nullable<T>, box it using Nullable<T> conventions.
+ // TODO: this double allocates on constructions which is wastefull
+ obj = Nullable::NormalizeBox(obj);
+ break;
+ }
+
+ case ELEMENT_TYPE_FNPTR:
+ {
+ void *value = NULL;
+ if (pField->IsStatic())
+ value = pField->GetStaticValuePtr();
+ else
+ value = pField->GetValuePtr(*target);
+
+ MethodTable *pIntPtrMT = MscorlibBinder::GetClass(CLASS__INTPTR);
+ obj = AllocateObject(pIntPtrMT);
+ CopyValueClass(obj->UnBox(), &value, pIntPtrMT, obj->GetAppDomain());
+ break;
+ }
+
+ case ELEMENT_TYPE_PTR:
+ {
+ void *value = NULL;
+ if (pField->IsStatic())
+ value = pField->GetStaticValuePtr();
+ else
+ value = pField->GetValuePtr(*target);
+ obj = CreatePointer(fieldType, value);
+ break;
+ }
+
+ default:
+ _ASSERTE(!"Unknown Type");
+ // this is really an impossible condition
+ COMPlusThrow(kNotSupportedException);
+ }
+
+ return obj;
+}
+
+void RefSecContext::FindCaller()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!m_fCheckedCaller)
+ {
+ m_pCaller = SystemDomain::GetCallersMethod(NULL, &m_pCallerDomain);
+
+ // If we didn't find a caller, we were called through interop. In this
+ // case we know we're going to get full permissions.
+ if (m_pCaller == NULL && !m_fCheckedPerm) {
+ m_fCallerHasPerm = true;
+
+ m_fCheckedPerm = true;
+ }
+ m_fCheckedCaller = true;
+ }
+}
+
+MethodDesc *RefSecContext::GetCallerMethod() {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ FindCaller();
+ return m_pCaller;
+}
+
+AppDomain *RefSecContext::GetCallerDomain() {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ FindCaller();
+ return m_pCallerDomain;
+}
+
+MethodTable *RefSecContext::GetCallerMT() {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodDesc *pCaller = GetCallerMethod();
+ return pCaller ? pCaller->GetMethodTable() : NULL;
+}
+
+Assembly *RefSecContext::GetCallerAssembly() {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodTable *pMT = GetCallerMT();
+ return pMT ? pMT->GetAssembly() : NULL;
+}
+
+bool RefSecContext::IsCalledFromInterop()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodDesc *pCaller = GetCallerMethod();
+ return (pCaller == NULL);
+}
+
+BOOL InvokeUtil::IsCriticalWithConversionToFullDemand(MethodTable* pMT)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return Security::TypeRequiresTransparencyCheck(pMT, true);
+}
+
+BOOL InvokeUtil::IsCriticalWithConversionToFullDemand(MethodDesc* pMD, MethodTable* pInstanceMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (Security::IsMethodCritical(pMD) && !Security::IsMethodSafeCritical(pMD)
+ && pMD->GetAssembly()->GetSecurityTransparencyBehavior()->CanCriticalMembersBeConvertedToLinkDemand())
+ return TRUE;
+
+ if (pMD->HasMethodInstantiation())
+ {
+ Instantiation inst = pMD->GetMethodInstantiation();
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ TypeHandle th = inst[i];
+ if (InvokeUtil::IsCriticalWithConversionToFullDemand(th.GetMethodTableOfElementType()))
+ return TRUE;
+ }
+ }
+
+ if (pInstanceMT && InvokeUtil::IsCriticalWithConversionToFullDemand(pInstanceMT))
+ return TRUE;
+
+ return FALSE;
+}
+
+BOOL InvokeUtil::IsCriticalWithConversionToFullDemand(FieldDesc* pFD, MethodTable* pInstanceMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (Security::IsFieldCritical(pFD) && !Security::IsFieldSafeCritical(pFD)
+ && pFD->GetModule()->GetAssembly()->GetSecurityTransparencyBehavior()->CanCriticalMembersBeConvertedToLinkDemand())
+ return TRUE;
+
+ if (pInstanceMT && InvokeUtil::IsCriticalWithConversionToFullDemand(pInstanceMT))
+ return TRUE;
+
+ return FALSE;
+}
+
+void InvokeUtil::CanAccessClass(RefSecContext* pCtx,
+ MethodTable* pClass,
+ BOOL checkAccessForImplicitValueTypeCtor /*= FALSE*/)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ InvokeUtil::CheckAccessClass(pCtx, pClass, checkAccessForImplicitValueTypeCtor);
+
+#ifndef FEATURE_CORECLR
+ // Reflection invocation should turn critical method access into a full demand of full trust
+ // for level 2 assemblies.
+ if (InvokeUtil::IsCriticalWithConversionToFullDemand(pClass))
+ {
+ Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_FULL_TRUST);
+ }
+#endif //FEATURE_CORECLR
+}
+
+#ifndef DACCESS_COMPILE
+void InvokeUtil::CanAccessMethod(MethodDesc* pMeth,
+ MethodTable* pParentMT,
+ MethodTable* pInstanceMT,
+ RefSecContext* pSCtx,
+ BOOL fCriticalToFullDemand,
+ BOOL checkSkipVer /*= FALSE*/)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pMeth));
+ PRECONDITION(CheckPointer(pSCtx));
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_CORECLR
+ // Security checks are expensive as they involve stack walking. Avoid them if we can.
+ // In immersive we don't allow private reflection to framework code. So we need to perform
+ // the access check even if all the domains on the stack are fully trusted.
+ if (Security::AllDomainsOnStackFullyTrusted() && !AppX::IsAppXProcess() && !pParentMT->GetAssembly()->IsDisabledPrivateReflection())
+ return;
+#endif // FEATURE_CORECLR
+
+ InvokeUtil::CheckAccessMethod(pSCtx,
+ pParentMT,
+ pInstanceMT,
+ pMeth);
+
+#ifndef FEATURE_CORECLR
+ // Reflection invocation should turn critical method access into a full demand of full trust
+ // for level 2 assemblies.
+ if (fCriticalToFullDemand && InvokeUtil::IsCriticalWithConversionToFullDemand(pMeth, pParentMT))
+ {
+ Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_FULL_TRUST);
+
+ // No need to do any more checks if a full trust full demand has succeeded.
+ return;
+ }
+#endif //FEATURE_CORECLR
+
+ if (pMeth->RequiresLinktimeCheck())
+ {
+ // The following logic turns link demands on the target method into full
+ // stack walks in order to close security holes in poorly written
+ // reflection users.
+
+#ifdef FEATURE_APTCA
+ if (Security::IsUntrustedCallerCheckNeeded(pMeth))
+ {
+ if (pSCtx->GetCallerMT())
+ {
+ // Check for untrusted caller
+ // It is possible that wrappers like VBHelper libraries that are
+ // fully trusted, make calls to public methods that do not have
+ // safe for Untrusted caller custom attribute set.
+ // Like all other link demand that gets transformed to a full stack
+ // walk for reflection, calls to public methods also gets
+ // converted to full stack walk
+
+ Security::DoUntrustedCallerChecks(
+ pSCtx->GetCallerMT()->GetAssembly(), pMeth,
+ TRUE);
+ }
+ }
+#endif // FEATURE_APTCA
+
+ struct _gc
+ {
+ OBJECTREF refClassNonCasDemands;
+ OBJECTREF refClassCasDemands;
+ OBJECTREF refMethodNonCasDemands;
+ OBJECTREF refMethodCasDemands;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ // Fetch link demand sets from all the places in metadata where we might
+ // find them (class and method). These might be split into CAS and non-CAS
+ // sets as well.
+ Security::RetrieveLinktimeDemands(pMeth,
+ &gc.refClassCasDemands,
+ &gc.refClassNonCasDemands,
+ &gc.refMethodCasDemands,
+ &gc.refMethodNonCasDemands);
+
+ // CAS Link Demands
+ if (gc.refClassCasDemands != NULL)
+ Security::DemandSet(SSWT_LATEBOUND_LINKDEMAND, gc.refClassCasDemands);
+
+ if (gc.refMethodCasDemands != NULL)
+ Security::DemandSet(SSWT_LATEBOUND_LINKDEMAND, gc.refMethodCasDemands);
+
+ // Non-CAS demands are not applied against a grant
+ // set, they're standalone.
+ if (gc.refClassNonCasDemands != NULL)
+ Security::CheckNonCasDemand(&gc.refClassNonCasDemands);
+
+ if (gc.refMethodNonCasDemands != NULL)
+ Security::CheckNonCasDemand(&gc.refMethodNonCasDemands);
+
+ GCPROTECT_END();
+
+#ifdef FEATURE_CORECLR
+ if (pMeth->IsNDirect() ||
+ (pMeth->IsComPlusCall() && !pMeth->IsInterface()))
+ {
+ MethodDesc* pmdCaller = pSCtx->GetCallerMethod();
+
+ if (pmdCaller != NULL &&
+ Security::IsMethodTransparent(pmdCaller))
+ {
+ ThrowMethodAccessException(pSCtx, pMeth, IDS_E_TRANSPARENT_CALL_NATIVE);
+ }
+
+ }
+
+#else // FEATURE_CORECLR
+ // We perform automatic linktime checks for UnmanagedCode in three cases:
+ // o P/Invoke calls.
+ // o Calls through an interface that have a suppress runtime check
+ // attribute on them (these are almost certainly interop calls).
+ // o Interop calls made through method impls.
+ if (pMeth->IsNDirect() ||
+ (pMeth->IsInterface() &&
+ (pMeth->GetMDImport()->GetCustomAttributeByName(pParentMT->GetCl(),
+ COR_SUPPRESS_UNMANAGED_CODE_CHECK_ATTRIBUTE_ANSI,
+ NULL,
+ NULL) == S_OK ||
+ pMeth->GetMDImport()->GetCustomAttributeByName(pMeth->GetMemberDef(),
+ COR_SUPPRESS_UNMANAGED_CODE_CHECK_ATTRIBUTE_ANSI,
+ NULL,
+ NULL) == S_OK) ) ||
+ (pMeth->IsComPlusCall() && !pMeth->IsInterface()))
+ {
+ Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_UNMANAGED_CODE);
+ }
+#endif // FEATURE_CORECLR
+ }
+
+ // @todo:
+ //if (checkSkipVer && !Security::CanSkipVerification(pSCtx->GetCallerMethod()->GetModule()))
+ //Security::ThrowSecurityException(g_SecurityPermissionClassName, SPFLAGSSKIPVERIFICATION);
+ //checkSkipVer is set only when the user tries to invoke a constructor on a existing object.
+ if (checkSkipVer)
+ {
+ MethodDesc *pCallerMD = pSCtx->GetCallerMethod();
+
+ // Interop (NULL) caller should be able to skip verification
+ if (pCallerMD != NULL &&
+ Security::IsMethodTransparent(pCallerMD) &&
+ !pCallerMD->GetAssembly()->GetSecurityTransparencyBehavior()->CanTransparentCodeSkipVerification())
+ {
+#ifdef _DEBUG
+ if (g_pConfig->LogTransparencyErrors())
+ {
+ SecurityTransparent::LogTransparencyError(pMeth, "Attempt by a transparent method to use unverifiable code");
+ }
+ if (!g_pConfig->DisableTransparencyEnforcement())
+#endif // _DEBUG
+ {
+ ThrowMethodAccessException(pCallerMD, pMeth, FALSE, IDS_E_TRANSPARENT_REFLECTION);
+ }
+ }
+
+#ifndef FEATURE_CORECLR
+ Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_SKIP_VER);
+#endif // !FEATURE_CORECLR
+ }
+}
+#endif // #ifndef DACCESS_COMPILE
+
+void InvokeUtil::CanAccessField(RefSecContext* pCtx,
+ MethodTable* pTargetMT,
+ MethodTable* pInstanceMT,
+ FieldDesc* pTargetField)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ InvokeUtil::CheckAccessField(pCtx, pTargetMT, pInstanceMT, pTargetField);
+
+#ifndef FEATURE_CORECLR
+ // Reflection invocation should turn critical method access into a full demand of full trust
+ // for level 2 assemblies.
+ if (InvokeUtil::IsCriticalWithConversionToFullDemand(pTargetField, pInstanceMT))
+ {
+ Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_FULL_TRUST);
+ }
+#endif //FEATURE_CORECLR
+}
+
+//
+// Ensure that a type is accessable, throwing a TypeLoadException if not
+//
+// Arguments:
+// pCtx - current reflection context
+// pTargetMT - class to check access to
+// checkAccessForImplicitValueTypeCtor - ValueTypes always have an implicit constructor.
+// If a user tries to do "new ValueType()", we support it even if there is not
+// explicit constructor. However, we want to throw MethodAccessException in this case,
+// though the accessibility check is done against the type.
+//
+// Return Value:
+// Nothing - throws an exception if access is not allowed
+//
+
+// static
+void InvokeUtil::CheckAccessClass(RefSecContext *pCtx,
+ MethodTable *pClassMT,
+ BOOL checkAccessForImplicitValueTypeCtor /* = FALSE */)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pCtx));
+ PRECONDITION(CheckPointer(pClassMT));
+ }
+ CONTRACTL_END;
+
+ // Walking the stack is expensive so don't try to get the caller here.
+ // ClassLoader::CanAccessClass will retrieve the caller when necessary
+ // and it will give an interop (NULL) caller a pass.
+
+ AccessCheckOptions accessCheckOptions(pCtx->GetAccessCheckType(),
+ NULL,
+ !checkAccessForImplicitValueTypeCtor,
+ pClassMT);
+
+ BOOL canAccess;
+ canAccess = ClassLoader::CanAccessClass(pCtx,
+ pClassMT,
+ pClassMT->GetAssembly(),
+ accessCheckOptions);
+
+ if (!canAccess)
+ {
+ _ASSERTE(checkAccessForImplicitValueTypeCtor);
+ COMPlusThrow(kMethodAccessException, W("Arg_MethodAccessException"));
+ }
+}
+
+//
+// Ensure that a method is accessable, throwing a MethodAccessException if not
+//
+// Arguments:
+// pCtx - current reflection context
+// pTargetMT - class containing the method being checked
+// pInstanceMT - instance being accessed if the method is not static
+// pTargetMethod - method to check access to
+//
+// Return Value:
+// Nothing - throws an exception if access is not allowed
+//
+
+// static
+void InvokeUtil::CheckAccessMethod(RefSecContext *pCtx,
+ MethodTable *pTargetMT,
+ MethodTable *pInstanceMT,
+ MethodDesc *pTargetMethod)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pTargetMT));
+ PRECONDITION(CheckPointer(pTargetMethod));
+ }
+ CONTRACTL_END;
+
+ AccessCheckOptions accessCheckOptions(pCtx->GetAccessCheckType(),
+ NULL,
+ TRUE,
+ pTargetMethod);
+ InvokeUtil::CheckAccess(pCtx,
+ pTargetMT,
+ pInstanceMT,
+ pTargetMethod,
+ NULL,
+ accessCheckOptions);
+}
+
+//
+// Ensure that a field is accessable, throwing a FieldAccessException if not
+//
+// Arguments:
+// pCtx - current reflection context
+// pTargetMT - class containing the field being checked
+// pInstanceMT - instance being accessed if the field is not static
+// pTargetField - field to check access to
+//
+// Return Value:
+// Nothing - throws an exception if access is not allowed
+//
+
+// static
+void InvokeUtil::CheckAccessField(RefSecContext *pCtx,
+ MethodTable *pTargetMT,
+ MethodTable *pInstanceMT,
+ FieldDesc *pTargetField)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pTargetMT));
+ PRECONDITION(CheckPointer(pTargetField));
+ }
+ CONTRACTL_END;
+
+ AccessCheckOptions accessCheckOptions(pCtx->GetAccessCheckType(),
+ NULL,
+ TRUE,
+ pTargetField);
+
+ InvokeUtil::CheckAccess(pCtx,
+ pTargetMT,
+ pInstanceMT,
+ NULL,
+ pTargetField,
+ accessCheckOptions);
+ }
+
+
+//
+// Check accessability of a field or method.
+//
+// Arguments:
+// pCtx - current reflection context
+// pTargetMT - class containing the target being checked
+// pInstanceMT - instance being accessed, if the field or method is non-NULL
+// pTargetMethod - if checking access to a method, its MethodDesc
+// pTargetField - if checking access to a field, its FieldDesc
+// accessCheckOptions - CanAccess flags indicating how security demands should be done
+//
+// Return Value:
+// Nothing - throws an exception if access is not allowed
+//
+// Notes:
+// accessCheckOptions is required to be setup to throw if the target is inaccessable
+
+// static
+void InvokeUtil::CheckAccess(RefSecContext *pCtx,
+ MethodTable *pTargetMT,
+ MethodTable *pInstanceMT,
+ MethodDesc *pTargetMethod,
+ FieldDesc *pTargetField,
+ const AccessCheckOptions &accessCheckOptions)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pTargetMT));
+ PRECONDITION((pTargetMethod != NULL) ^ (pTargetField != NULL));
+ }
+ CONTRACTL_END;
+
+ DWORD dwAttr = pTargetMethod != NULL ? pTargetMethod->GetAttrs() : pTargetField->GetAttributes();
+
+ // Walking the stack is expensive so don't try to get the caller here.
+ // ClassLoader::CanAccess will retrieve the caller when necessary
+ // and it will give an interop (NULL) caller a pass.
+
+ BOOL canAccess;
+
+ canAccess = ClassLoader::CanAccess(pCtx,
+ pTargetMT,
+ pTargetMT->GetAssembly(),
+ dwAttr,
+ pTargetMethod,
+ pTargetField,
+ accessCheckOptions);
+ if (pInstanceMT && canAccess)
+ {
+ if (pTargetMethod != NULL ? IsMdFamily(dwAttr) : IsFdFamily(dwAttr))
+ {
+ MethodTable* pCallerMT = pCtx->GetCallerMT();
+
+ if (pCallerMT != NULL &&
+ !ClassLoader::CanAccessFamilyVerification(pCallerMT, pInstanceMT))
+ {
+ canAccess = accessCheckOptions.DemandMemberAccessOrFail(pCtx,
+ pInstanceMT,
+ TRUE /*visibilityCheck*/);
+ }
+ }
+ }
+
+ // If this assert fires, ensure that accessCheckOptions was setup to throw if the target was inaccessable.
+ _ASSERTE(canAccess);
+}
+
+// If a method has a linktime demand attached, perform it.
+
+// static
+void InvokeUtil::CheckLinktimeDemand(RefSecContext *pCtx, MethodDesc *pCalleeMD) {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ if (pCalleeMD->RequiresLinktimeCheck())
+ {
+ MethodDesc* pCallerMD = pCtx->GetCallerMethod();
+
+ if (pCallerMD)
+ {
+ Security::LinktimeCheckMethod(pCallerMD->GetAssembly(), pCalleeMD);
+
+ // perform transparency checks as well
+ if (Security::RequiresTransparentAssemblyChecks(pCallerMD, pCalleeMD, NULL))
+ {
+ Security::EnforceTransparentAssemblyChecks(pCallerMD, pCalleeMD);
+ }
+ }
+ }
+}
+
+/*static*/
+AccessCheckOptions::AccessCheckType InvokeUtil::GetInvocationAccessCheckType(BOOL targetRemoted /*= FALSE*/)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (targetRemoted)
+ return AccessCheckOptions::kMemberAccess;
+
+ AppDomain * pAppDomain = GetAppDomain();
+
+#ifdef FEATURE_CORECLR
+
+ if (pAppDomain->GetSecurityDescriptor()->IsFullyTrusted())
+ // Ignore transparency so that reflection invocation is consistenct with LCG.
+ // There is no security concern because we are in Full Trust.
+ return AccessCheckOptions::kRestrictedMemberAccessNoTransparency;
+
+#ifdef FEATURE_LEGACYNETCF
+ if (pAppDomain->GetAppDomainCompatMode() == BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8)
+ return AccessCheckOptions::kRestrictedMemberAccess;
+#endif // FEATURE_LEGACYNETCF
+
+ return AccessCheckOptions::kMemberAccess;
+
+#else // !FEATURE_CORECLR
+ return
+ AppX::IsAppXProcess() ?
+ (Security::AllDomainsOnStackFullyTrusted() ?
+ AccessCheckOptions::kUserCodeOnlyRestrictedMemberAccessNoTransparency :
+ AccessCheckOptions::kUserCodeOnlyRestrictedMemberAccess) :
+ AccessCheckOptions::kRestrictedMemberAccess;
+#endif //FEATURE_CORECLR
+}
+
+#endif // CROSSGEN_COMPILE
+
+struct DangerousAPIEntry
+{
+ BinderClassID classID;
+ const LPCSTR *pszAPINames;
+ DWORD cAPINames;
+};
+
+#define DEFINE_DANGEROUS_API(classID, szAPINames) static const LPCSTR g__ ## classID ## __DangerousAPIs[] = { szAPINames };
+#include "dangerousapis.h"
+#undef DEFINE_DANGEROUS_API
+
+#define DEFINE_DANGEROUS_API(classID, szAPINames) { CLASS__ ## classID, g__ ## classID ## __DangerousAPIs, NumItems(g__ ## classID ## __DangerousAPIs)},
+static const DangerousAPIEntry DangerousAPIs[] =
+{
+#include "dangerousapis.h"
+};
+#undef DEFINE_DANGEROUS_API
+
+/*static*/
+bool InvokeUtil::IsDangerousMethod(MethodDesc *pMD)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodTable *pMT = pMD->GetMethodTable();
+
+ if (pMT->GetModule()->IsSystem())
+ {
+ // All methods on these types are considered dangerous
+ static const BinderClassID dangerousTypes[] = {
+#ifdef FEATURE_METHOD_RENTAL
+ CLASS__METHOD_RENTAL,
+#endif // FEATURE_METHOD_RENTAL
+#ifdef FEATURE_ISOSTORE
+ CLASS__ISS_STORE_FILE,
+#endif
+ CLASS__TYPE_HANDLE,
+ CLASS__METHOD_HANDLE,
+ CLASS__FIELD_HANDLE,
+ CLASS__ACTIVATOR,
+ CLASS__DELEGATE,
+ CLASS__MULTICAST_DELEGATE,
+ CLASS__RUNTIME_HELPERS
+ };
+
+
+ static bool fInited = false;
+
+ if (!VolatileLoad(&fInited))
+ {
+ // Make sure all types are loaded so that we can use faster GetExistingClass()
+ for (unsigned i = 0; i < NumItems(dangerousTypes); i++)
+ {
+ MscorlibBinder::GetClass(dangerousTypes[i]);
+ }
+
+ for (unsigned i = 0; i < NumItems(DangerousAPIs); i++)
+ {
+ MscorlibBinder::GetClass(DangerousAPIs[i].classID);
+ }
+
+ VolatileStore(&fInited, true);
+ }
+
+ for (unsigned i = 0; i < NumItems(dangerousTypes); i++)
+ {
+ if (MscorlibBinder::GetExistingClass(dangerousTypes[i]) == pMT)
+ return true;
+ }
+
+ for (unsigned i = 0; i < NumItems(DangerousAPIs); i++)
+ {
+ DangerousAPIEntry entry = DangerousAPIs[i];
+ if (MscorlibBinder::GetExistingClass(entry.classID) == pMT)
+ {
+ LPCUTF8 szMethodName = pMD->GetName();
+ for (unsigned j = 0; j < entry.cAPINames; j++)
+ {
+ if (strcmp(szMethodName, entry.pszAPINames[j]) == 0)
+ return true;
+ }
+
+ break;
+ }
+ }
+ }
+
+ // For reduce compat risks we treat non-ctors on DynamicMethod as safe.
+ if (pMT->IsDelegate() && pMD->IsCtor())
+ return true;
+
+ return false;
+}
diff --git a/src/vm/invokeutil.h b/src/vm/invokeutil.h
new file mode 100644
index 0000000000..f2acb61f9e
--- /dev/null
+++ b/src/vm/invokeutil.h
@@ -0,0 +1,335 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+////////////////////////////////////////////////////////////////////////////////
+// This module defines a Utility Class used by reflection
+//
+//
+
+////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef __INVOKEUTIL_H__
+#define __INVOKEUTIL_H__
+
+// The following class represents the value class
+#include <pshpack1.h>
+
+struct InterfaceMapData
+{
+ REFLECTCLASSBASEREF m_targetType;
+ REFLECTCLASSBASEREF m_interfaceType;
+ PTRARRAYREF m_targetMethods;
+ PTRARRAYREF m_interfaceMethods;
+};
+
+// Calling Conventions
+// NOTE: These are defined in CallingConventions.cs They must match up.
+#define Standard_CC 0x0001
+#define VarArgs_CC 0x0002
+#define Any_CC (Standard_CC | VarArgs_CC)
+
+#define PRIMITIVE_TABLE_SIZE ELEMENT_TYPE_STRING
+#define PT_Primitive 0x01000000
+
+// Define the copy back constants.
+#define COPYBACK_PRIMITIVE 1
+#define COPYBACK_OBJECTREF 2
+#define COPYBACK_VALUECLASS 3
+
+#include <poppack.h>
+
+class ReflectMethodList;
+
+// Structure used to track security access checks efficiently when applied
+// across a range of methods, fields etc.
+//
+class RefSecContext : public AccessCheckContext
+{
+public:
+ RefSecContext(AccessCheckOptions::AccessCheckType accessCheckType)
+ : m_fCheckedCaller(false),
+ m_fCheckedPerm(false),
+ m_fCallerHasPerm(false),
+#ifdef FEATURE_REMOTING
+ m_fSkippingRemoting(false),
+#endif
+ m_pCaller(NULL),
+ m_pCallerDomain(NULL),
+ m_accessCheckType(accessCheckType)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual MethodTable* GetCallerMT();
+ virtual MethodDesc* GetCallerMethod();
+ virtual Assembly* GetCallerAssembly();
+ virtual bool IsCalledFromInterop();
+
+ // The caller will be computed lazily by the reflection system.
+ virtual bool IsCallerCritical()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return false;
+ }
+
+ AccessCheckOptions::AccessCheckType GetAccessCheckType() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_accessCheckType;
+ }
+
+ AppDomain* GetCallerDomain();
+
+private:
+ bool m_fCheckedCaller;
+ bool m_fCheckedPerm;
+ bool m_fCallerHasPerm;
+
+#ifdef FEATURE_REMOTING
+ bool m_fSkippingRemoting;
+#endif
+
+ // @review GENERICS:
+ // These method descriptors may be shared between compatible instantiations
+ // Check that this does not open any security holes
+ MethodDesc* m_pCaller;
+ AppDomain* m_pCallerDomain;
+
+ AccessCheckOptions::AccessCheckType m_accessCheckType;
+
+ void FindCaller();
+};
+
+// This class abstracts the functionality which creats the
+// parameters on the call stack and deals with the return type
+// inside reflection.
+//
+class InvokeUtil
+{
+
+public:
+ static void CopyArg(TypeHandle th, OBJECTREF *obj, void *pArgDst);
+
+ // Given a type, this routine will convert an return value representing that
+ // type into an ObjectReference. If the type is a primitive, the
+ // value is wrapped in one of the Value classes.
+ static OBJECTREF CreateObject(TypeHandle th, void * pValue);
+
+ // This is a special purpose Exception creation function. It
+ // creates the TargetInvocationExeption placing the passed
+ // exception into it.
+ static OBJECTREF CreateTargetExcept(OBJECTREF* except);
+
+ // This is a special purpose Exception creation function. It
+ // creates the ReflectionClassLoadException placing the passed
+ // classes array and exception array into it.
+ static OBJECTREF CreateClassLoadExcept(OBJECTREF* classes,OBJECTREF* except);
+
+ // Validate that the field can be widened for Set
+ static void ValidField(TypeHandle th, OBJECTREF* value);
+
+ // ChangeType
+ // This method will invoke the Binder change type method on the object
+ // binder -- The Binder object
+ // srcObj -- The source object to be changed
+ // th -- The TypeHandel of the target type
+ // locale -- The locale passed to the class.
+ static OBJECTREF ChangeType(OBJECTREF binder,OBJECTREF srcObj,TypeHandle th,OBJECTREF locale);
+
+ // CreatePrimitiveValue
+ // This routine will validate the object and then place the value into
+ // the destination
+ // dstType -- The type of the destination
+ // srcType -- The type of the source
+ // srcObj -- The Object containing the primitive value.
+ // pDst -- poiner to the destination
+ static void CreatePrimitiveValue(CorElementType dstType, CorElementType srcType, OBJECTREF srcObj, ARG_SLOT* pDst);
+
+ // CreatePrimitiveValue
+ // This routine will validate the object and then place the value into
+ // the destination
+ // dstType -- The type of the destination
+ // srcType -- The type of the source
+ // pSrc -- pointer to source data.
+ // pSrcMT - MethodTable of source type
+ // pDst -- poiner to the destination
+ static void CreatePrimitiveValue(CorElementType dstType,CorElementType srcType,
+ void *pSrc, MethodTable *pSrcMT, ARG_SLOT* pDst);
+
+ // IsPrimitiveType
+ // This method will verify the passed in type is a primitive or not
+ // type -- the CorElementType to check for
+ inline static DWORD IsPrimitiveType(const CorElementType type)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (type >= PRIMITIVE_TABLE_SIZE)
+ {
+ if (ELEMENT_TYPE_I==type || ELEMENT_TYPE_U==type)
+ {
+ return TRUE;
+ }
+ return 0;
+ }
+
+ return (PT_Primitive & PrimitiveAttributes[type]);
+ }
+
+ static BOOL IsVoidPtr(TypeHandle th);
+
+ // CanPrimitiveWiden
+ // This method determines if the srcType and be widdened without loss to the destType
+ // destType -- The target type
+ // srcType -- The source type.
+ inline static DWORD CanPrimitiveWiden(const CorElementType destType, const CorElementType srcType)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (destType >= PRIMITIVE_TABLE_SIZE || srcType >= PRIMITIVE_TABLE_SIZE)
+ {
+ if ((ELEMENT_TYPE_I==destType && ELEMENT_TYPE_I==srcType) ||
+ (ELEMENT_TYPE_U==destType && ELEMENT_TYPE_U==srcType))
+ {
+ return TRUE;
+ }
+ return 0;
+ }
+ return ((1 << destType) & PrimitiveAttributes[srcType]);
+ }
+
+ // Field Stuff. The following stuff deals with fields making it possible
+ // to set/get field values on objects
+
+ // SetValidField
+ // Given an target object, a value object and a field this method will set the field
+ // on the target object. The field must be validate before calling this.
+ static void SetValidField(CorElementType fldType, TypeHandle fldTH, FieldDesc* pField, OBJECTREF* target, OBJECTREF* value, TypeHandle declaringType, CLR_BOOL *pDomainInitialized);
+
+ static OBJECTREF GetFieldValue(FieldDesc* pField, TypeHandle fieldType, OBJECTREF* target, TypeHandle declaringType, CLR_BOOL *pDomainInitialized);
+
+ // ValidateObjectTarget
+ // This method will validate the Object/Target relationship
+ // is correct. It throws an exception if this is not the case.
+ static void ValidateObjectTarget(FieldDesc* pField,TypeHandle fldType,OBJECTREF *target);
+
+ // Create reflection pointer wrapper
+ static OBJECTREF CreatePointer(TypeHandle th, void * p);
+
+ static TypeHandle GetPointerType(OBJECTREF pObj);
+ static void* GetPointerValue(OBJECTREF pObj);
+ static void* GetIntPtrValue(OBJECTREF pObj);
+
+ // Check accessability of a type or nested type.
+ static void CanAccessClass(RefSecContext* pCtx,
+ MethodTable* pClass,
+ BOOL checkAccessForImplicitValueTypeCtor = FALSE);
+
+ static void CanAccessMethod(MethodDesc* pMeth,
+ MethodTable* pParentMT,
+ MethodTable* pInstanceMT,
+ RefSecContext* pSCtx,
+ BOOL fCriticalToFullDemand = TRUE,
+ BOOL checkSkipVer = FALSE);
+
+ // Check accessability of a field
+ static void CanAccessField(RefSecContext* pCtx,
+ MethodTable* pTargetMT,
+ MethodTable* pInstanceMT,
+ FieldDesc* pTargetField);
+
+ // If a method has a linktime demand attached, perform it.
+ static void CheckLinktimeDemand(RefSecContext *pCtx, MethodDesc *pMeth);
+
+ //
+ // Check to see if the target of a reflection operation is on a remote object
+ //
+ // Arguments:
+ // pDesc - item being accessed (MethodDesc, FieldDesc, etc)
+ // pTargetMT - object being reflected on
+
+ template <typename T>
+ static bool IsTargetRemoted(T *pDesc, MethodTable *pTargetMT)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pDesc));
+ }
+ CONTRACTL_END;
+
+ if (pDesc->IsStatic())
+ return FALSE;
+
+ if (pTargetMT == NULL)
+ return FALSE;
+
+#ifdef FEATURE_REMOTING
+ if (pTargetMT->IsTransparentProxy())
+ return TRUE;
+#endif
+ return FALSE;
+ }
+
+ static BOOL IsCriticalWithConversionToFullDemand(MethodTable* pMT);
+ static BOOL IsCriticalWithConversionToFullDemand(MethodDesc* pMD, MethodTable* pInstanceMT);
+ static BOOL IsCriticalWithConversionToFullDemand(FieldDesc* pFD, MethodTable* pInstanceMT);
+
+ static AccessCheckOptions::AccessCheckType GetInvocationAccessCheckType(BOOL targetRemoted = FALSE);
+
+ static bool IsDangerousMethod(MethodDesc *pMD);
+
+private:
+ // Check accessability of a type or nested type.
+ static void CheckAccessClass(RefSecContext *pCtx,
+ MethodTable *pClass,
+ BOOL checkAccessForImplicitValueTypeCtor = FALSE);
+
+public:
+ // Check accessability of a method
+ static void CheckAccessMethod(RefSecContext *pCtx,
+ MethodTable *pTargetMT,
+ MethodTable *pInstanceMT,
+ MethodDesc *pTargetMethod);
+
+private:
+ // Check accessability of a field
+ static void CheckAccessField(RefSecContext *pCtx,
+ MethodTable *pTargetMT,
+ MethodTable *pInstanceMT,
+ FieldDesc *pTargetField);
+
+ // Check accessability of a field or method.
+ // pTargetMD should be NULL for a field, and may be NULL for a method.
+ // If checking a generic method with a method instantiation,
+ // the method should be in as pOptionalTargetMethod so
+ // that the accessibilty of its type arguments is checked too.
+ static void CheckAccess(RefSecContext *pCtx,
+ MethodTable *pTargetMT,
+ MethodTable *pInstanceMT,
+ MethodDesc *pTargetMD,
+ FieldDesc *pTargetField,
+ const AccessCheckOptions &accessCheckOptions);
+
+ static void* CreateByRef(TypeHandle dstTh,CorElementType srcType, TypeHandle srcTH,OBJECTREF srcObj, OBJECTREF *pIncomingObj);
+
+ // GetBoxedObject
+ // Given an address of a primitve type, this will box that data...
+ static OBJECTREF GetBoxedObject(TypeHandle th,void* pData);
+
+private:
+ // The Attributes Table
+ // This constructs a table of legal widening operations
+ // for the primitive types.
+ static DWORD const PrimitiveAttributes[PRIMITIVE_TABLE_SIZE];
+};
+
+
+#endif // __INVOKEUTIL_H__
diff --git a/src/vm/iterator_util.h b/src/vm/iterator_util.h
new file mode 100644
index 0000000000..9746b85c05
--- /dev/null
+++ b/src/vm/iterator_util.h
@@ -0,0 +1,334 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: iterator_util.h
+//
+
+//
+
+//
+// ============================================================================
+
+#ifndef _ITERATOR_UTIL_H_
+#define _ITERATOR_UTIL_H_
+
+namespace IteratorUtil
+{
+
+// **************************************************************************************
+template <typename ElementType>
+class ArrayIteratorBase
+{
+public:
+ typedef DPTR(ElementType) PTR_ElementType;
+ typedef ArrayIteratorBase<ElementType> MyType;
+
+ // ----------------------------------------------------------------------------------
+ ArrayIteratorBase(
+ PTR_ElementType pStart,
+ size_t cEntries)
+ : m_pCur(pStart),
+ m_pStart(pStart),
+ m_pEnd(pStart + cEntries)
+ { LIMITED_METHOD_CONTRACT; }
+
+ // ----------------------------------------------------------------------------------
+ ArrayIteratorBase(
+ PTR_ElementType pStart,
+ PTR_ElementType pEnd)
+ : m_pCur(pStart),
+ m_pStart(pStart),
+ m_pEnd(pEnd)
+ { LIMITED_METHOD_CONTRACT; }
+
+ // ----------------------------------------------------------------------------------
+ ArrayIteratorBase(
+ const MyType &it)
+ : m_pCur(it.m_pCur),
+ m_pStart(it.m_pStart),
+ m_pEnd(it.m_pEnd)
+ { LIMITED_METHOD_CONTRACT; }
+
+ // ----------------------------------------------------------------------------------
+ bool
+ AtStart() const
+ { LIMITED_METHOD_CONTRACT; return m_pCur == m_pStart; }
+
+ // ----------------------------------------------------------------------------------
+ bool
+ AtEnd() const
+ { LIMITED_METHOD_CONTRACT; return m_pCur == m_pEnd; }
+
+ // ----------------------------------------------------------------------------------
+ void
+ ResetToStart()
+ { LIMITED_METHOD_CONTRACT; m_pCur = m_pStart; }
+
+ // ----------------------------------------------------------------------------------
+ void
+ ResetToEnd()
+ { LIMITED_METHOD_CONTRACT; m_pCur = m_pEnd; }
+
+ // ----------------------------------------------------------------------------------
+ ElementType &
+ Value()
+ { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(!AtEnd()); return *m_pCur; }
+
+ // ----------------------------------------------------------------------------------
+ ElementType &
+ operator*()
+ { WRAPPER_NO_CONTRACT; return Value(); }
+
+ // ----------------------------------------------------------------------------------
+ ElementType &
+ operator[](size_t idx)
+ {
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(m_pStart + idx < m_pEnd);
+ return m_pStart[idx];
+ }
+
+ // ----------------------------------------------------------------------------------
+ size_t
+ CurrentIndex()
+ { LIMITED_METHOD_CONTRACT; return m_pCur - m_pStart; }
+
+ // ----------------------------------------------------------------------------------
+ void
+ MoveTo(size_t idx)
+ { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(m_pStart + idx < m_pEnd); m_pCur = m_pStart + idx; }
+
+ // ----------------------------------------------------------------------------------
+ bool
+ Next()
+ {
+ WRAPPER_NO_CONTRACT;
+ if (AtEnd())
+ {
+ return false;
+ }
+
+ m_pCur++;
+
+ return !AtEnd();
+ }
+
+ // ----------------------------------------------------------------------------------
+ bool
+ Prev()
+ {
+ if (AtStart())
+ {
+ return false;
+ }
+
+ m_pCur--;
+
+ return true;
+ }
+
+ MyType &
+ operator++()
+ { WRAPPER_NO_CONTRACT; CONSISTENCY_CHECK(!AtEnd()); Next(); return *this; }
+
+protected:
+ // ----------------------------------------------------------------------------------
+ PTR_ElementType m_pCur;
+ PTR_ElementType m_pStart;
+ PTR_ElementType m_pEnd;
+
+ // ----------------------------------------------------------------------------------
+ // Do not allow address to be taken of what should be a by-val or by-ref.
+ ArrayIteratorBase<ElementType> *
+ operator&()
+ { LIMITED_METHOD_CONTRACT; }
+};
+
+// **************************************************************************************
+template <typename ElementType>
+class ArrayIterator
+ : public ArrayIteratorBase<ElementType>
+{
+public:
+ typedef ArrayIteratorBase<ElementType> _BaseTy;
+ typedef typename _BaseTy::PTR_ElementType PTR_ElementType;
+
+ // ----------------------------------------------------------------------------------
+ ArrayIterator(
+ PTR_ElementType pStart,
+ size_t cEntries)
+ : _BaseTy(pStart, cEntries)
+ { LIMITED_METHOD_CONTRACT; }
+
+ // ----------------------------------------------------------------------------------
+ ArrayIterator(
+ PTR_ElementType pStart,
+ PTR_ElementType pEnd)
+ : _BaseTy(pStart, pEnd)
+ { LIMITED_METHOD_CONTRACT; }
+
+ // ----------------------------------------------------------------------------------
+ ArrayIterator(
+ const ArrayIterator &it)
+ : _BaseTy(it)
+ { LIMITED_METHOD_CONTRACT; }
+
+ // ----------------------------------------------------------------------------------
+ PTR_ElementType
+ operator->()
+ { WRAPPER_NO_CONTRACT; return &this->Value(); }
+
+#ifdef DACCESS_COMPILE
+private:
+ // ----------------------------------------------------------------------------------
+ // You are trying to instantiate the iterator over a non DACized array.
+ // Make sure you pass in "DPTR(ElementType)" or "PTR_ElementType" and
+ // not "ElementType *" as the argument type.
+ ArrayIterator(
+ ElementType * pStart,
+ size_t cEntries);
+
+ // ----------------------------------------------------------------------------------
+ // You are trying to instantiate the iterator over a non DACized array.
+ // Make sure you pass in "DPTR(ElementType)" or "PTR_ElementType" and
+ // not "ElementType *" as the argument type.
+ ArrayIterator(
+ ElementType * pStart,
+ ElementType * pEnd);
+#endif
+};
+
+// **************************************************************************************
+template <typename ValueType>
+class ArrayIterator<ValueType *>
+ : public ArrayIteratorBase<DPTR(ValueType)>
+{
+public:
+ typedef ArrayIteratorBase<DPTR(ValueType)> _BaseTy;
+ typedef typename _BaseTy::PTR_ElementType PTR_ElementType;
+
+ // ----------------------------------------------------------------------------------
+ ArrayIterator(
+ PTR_ElementType pStart,
+ size_t cEntries)
+ : _BaseTy(pStart, cEntries)
+ { LIMITED_METHOD_CONTRACT; }
+
+ // ----------------------------------------------------------------------------------
+ ArrayIterator(
+ PTR_ElementType pStart,
+ PTR_ElementType pEnd)
+ : _BaseTy(pStart, pEnd)
+ { LIMITED_METHOD_CONTRACT; }
+
+ // ----------------------------------------------------------------------------------
+ ArrayIterator(
+ const ArrayIterator &it)
+ : _BaseTy(it)
+ { LIMITED_METHOD_CONTRACT; }
+
+ // ----------------------------------------------------------------------------------
+ DPTR(ValueType)
+ operator->()
+ { WRAPPER_NO_CONTRACT; return this->Value(); }
+
+#ifdef DACCESS_COMPILE
+private:
+ // ----------------------------------------------------------------------------------
+ // You are trying to instantiate the iterator over a non DACized array.
+ // Make sure you pass in "DPTR(ElementType)" or "PTR_ElementType" and
+ // not "ElementType *" as the argument type.
+ ArrayIterator(
+ ValueType ** pStart,
+ size_t cEntries);
+
+ // ----------------------------------------------------------------------------------
+ // You are trying to instantiate the iterator over a non DACized array.
+ // Make sure you pass in "DPTR(ElementType)" or "PTR_ElementType" and
+ // not "ElementType *" as the argument type.
+ ArrayIterator(
+ ValueType ** pStart,
+ ValueType ** pEnd);
+#endif
+};
+
+#if 0
+// **************************************************************************************
+// It's important to note that ElemType is expected to have a public instance method:
+// ElemType * GetNext();
+
+template <typename ElemType>
+class SListIterator
+{
+public:
+ // ----------------------------------------------------------------------------------
+ SListIterator(
+ ElemType * pHead)
+ : m_pCur(pHead),
+ m_pHead(pHead)
+ { LIMITED_METHOD_CONTRACT; }
+
+ // ----------------------------------------------------------------------------------
+ bool
+ AtStart() const
+ { LIMITED_METHOD_CONTRACT; return m_pCur == m_pHead; }
+
+ // ----------------------------------------------------------------------------------
+ bool
+ AtEnd() const
+ { LIMITED_METHOD_CONTRACT; return m_pCur == NULL; }
+
+ // ----------------------------------------------------------------------------------
+ void
+ ResetToStart()
+ { LIMITED_METHOD_CONTRACT; m_pCur = m_pHead; }
+
+ // ----------------------------------------------------------------------------------
+ ElemType &
+ Value()
+ { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(!AtEnd()); return *m_pCur; }
+
+ // ----------------------------------------------------------------------------------
+ ElemType &
+ operator*()
+ { WRAPPER_NO_CONTRACT; return Value(); }
+
+ // ----------------------------------------------------------------------------------
+ ElemType *
+ operator->()
+ { WRAPPER_NO_CONTRACT; return &Value(); }
+
+ // ----------------------------------------------------------------------------------
+ bool
+ Next()
+ {
+ WRAPPER_NO_CONTRACT;
+ if (AtEnd())
+ {
+ return false;
+ }
+
+ m_pCur = m_pCur->GetNext();
+
+ return !AtEnd();
+ }
+
+protected:
+ // ----------------------------------------------------------------------------------
+ ElemType * m_pCur;
+ ElemType * m_pHead;
+
+ // ----------------------------------------------------------------------------------
+ // Do not allow address to be taken of what should be a by-val or by-ref.
+ SListIterator<ElemType> *
+ operator&()
+ { LIMITED_METHOD_CONTRACT; }
+};
+#endif
+
+} // IteratorUtil
+
+
+#endif // _ITERATOR_UTIL_H_
diff --git a/src/vm/jithelpers.cpp b/src/vm/jithelpers.cpp
new file mode 100644
index 0000000000..6dc88d85c1
--- /dev/null
+++ b/src/vm/jithelpers.cpp
@@ -0,0 +1,6764 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#include "common.h"
+#include "jitinterface.h"
+#include "codeman.h"
+#include "method.hpp"
+#include "class.h"
+#include "object.h"
+#include "field.h"
+#include "stublink.h"
+#include "virtualcallstub.h"
+#include "corjit.h"
+#include "eeconfig.h"
+#include "excep.h"
+#include "log.h"
+#include "excep.h"
+#include "float.h" // for isnan
+#include "dbginterface.h"
+#include "security.h"
+#include "securitymeta.h"
+#include "dllimport.h"
+#include "gc.h"
+#include "comdelegate.h"
+#include "jitperf.h" // to track jit perf
+#include "corprof.h"
+#include "eeprofinterfaces.h"
+
+#ifndef FEATURE_PAL
+// Included for referencing __report_gsfailure
+#include "process.h"
+#endif // !FEATURE_PAL
+
+#ifdef FEATURE_REMOTING
+#include "remoting.h" // create context bound and remote class instances
+#endif
+#include "perfcounters.h"
+#ifdef PROFILING_SUPPORTED
+#include "proftoeeinterfaceimpl.h"
+#endif
+#include "tls.h"
+#include "ecall.h"
+#include "generics.h"
+#include "typestring.h"
+#include "stackprobe.h"
+#include "typedesc.h"
+#include "genericdict.h"
+#include "array.h"
+#include "debuginfostore.h"
+#include "constrainedexecutionregion.h"
+#include "security.h"
+#include "safemath.h"
+#include "threadstatics.h"
+
+#ifdef FEATURE_PREJIT
+#include "compile.h"
+#endif
+
+#ifdef HAVE_GCCOVER
+#include "gccover.h"
+#endif // HAVE_GCCOVER
+
+#ifdef FEATURE_CORECLR
+#include "runtimehandles.h"
+#endif
+
+//========================================================================
+//
+// This file contains implementation of all JIT helpers. The helpers are
+// divided into following categories:
+//
+// INTEGER ARITHMETIC HELPERS
+// FLOATING POINT HELPERS
+// INSTANCE FIELD HELPERS
+// STATIC FIELD HELPERS
+// SHARED STATIC FIELD HELPERS
+// CASTING HELPERS
+// ALLOCATION HELPERS
+// STRING HELPERS
+// ARRAY HELPERS
+// VALUETYPE/BYREF HELPERS
+// GENERICS HELPERS
+// EXCEPTION HELPERS
+// SECURITY HELPERS
+// DEBUGGER/PROFILER HELPERS
+// GC HELPERS
+// INTEROP HELPERS
+//
+//========================================================================
+
+
+
+//========================================================================
+//
+// INTEGER ARITHMETIC HELPERS
+//
+//========================================================================
+
+#include <optsmallperfcritical.h>
+
+//
+// helper macro to multiply two 32-bit uints
+//
+#define Mul32x32To64(a, b) ((UINT64)((UINT32)(a)) * (UINT64)((UINT32)(b)))
+
+//
+// helper macro to get high 32-bit of 64-bit int
+//
+#define Hi32Bits(a) ((UINT32)((UINT64)(a) >> 32))
+
+//
+// helper macro to check whether 64-bit signed int fits into 32-bit signed (compiles into one 32-bit compare)
+//
+#define Is32BitSigned(a) (Hi32Bits(a) == Hi32Bits((INT64)(INT32)(a)))
+
+//
+// helper function to shift the result by 32-bits
+//
+inline UINT64 ShiftToHi32Bits(UINT32 x)
+{
+ // The shift compiles into slow multiplication by 2^32! VSWhidbey 360736
+ // return ((UINT64)x) << 32;
+
+ ULARGE_INTEGER ret;
+ ret.u.HighPart = x;
+ ret.u.LowPart = 0;
+ return ret.QuadPart;
+}
+
+#if !defined(_TARGET_X86_)
+/*********************************************************************/
+HCIMPL2_VV(INT64, JIT_LMul, INT64 val1, INT64 val2)
+{
+ FCALL_CONTRACT;
+
+ UINT32 val1High = Hi32Bits(val1);
+ UINT32 val2High = Hi32Bits(val2);
+
+ if ((val1High == 0) && (val2High == 0))
+ return Mul32x32To64(val1, val2);
+
+ return (val1 * val2);
+}
+HCIMPLEND
+#endif // !defined(_TARGET_X86_)
+
+/*********************************************************************/
+HCIMPL2_VV(INT64, JIT_LMulOvf, INT64 val1, INT64 val2)
+{
+ FCALL_CONTRACT;
+
+ // This short-cut does not actually help since the multiplication
+ // of two 32-bit signed ints compiles into the call to a slow helper
+ // if (Is32BitSigned(val1) && Is32BitSigned(val2))
+ // return (INT64)(INT32)val1 * (INT64)(INT32)val2;
+
+ INDEBUG(INT64 expected = val1 * val2;)
+ INT64 ret;
+
+ // Remember the sign of the result
+ INT32 sign = Hi32Bits(val1) ^ Hi32Bits(val2);
+
+ // Convert to unsigned multiplication
+ if (val1 < 0) val1 = -val1;
+ if (val2 < 0) val2 = -val2;
+
+ // Get the upper 32 bits of the numbers
+ UINT32 val1High = Hi32Bits(val1);
+ UINT32 val2High = Hi32Bits(val2);
+
+ UINT64 valMid;
+
+ if (val1High == 0) {
+ // Compute the 'middle' bits of the long multiplication
+ valMid = Mul32x32To64(val2High, val1);
+ }
+ else {
+ if (val2High != 0)
+ goto ThrowExcep;
+ // Compute the 'middle' bits of the long multiplication
+ valMid = Mul32x32To64(val1High, val2);
+ }
+
+ // See if any bits after bit 32 are set
+ if (Hi32Bits(valMid) != 0)
+ goto ThrowExcep;
+
+ ret = Mul32x32To64(val1, val2) + ShiftToHi32Bits((UINT32)(valMid));
+
+ // check for overflow
+ if (Hi32Bits(ret) < (UINT32)valMid)
+ goto ThrowExcep;
+
+ if (sign >= 0) {
+ // have we spilled into the sign bit?
+ if (ret < 0)
+ goto ThrowExcep;
+ }
+ else {
+ ret = -ret;
+ // have we spilled into the sign bit?
+ if (ret > 0)
+ goto ThrowExcep;
+ }
+ _ASSERTE(ret == expected);
+ return ret;
+
+ThrowExcep:
+ FCThrow(kOverflowException);
+}
+HCIMPLEND
+
+/*********************************************************************/
+HCIMPL2_VV(UINT64, JIT_ULMulOvf, UINT64 val1, UINT64 val2)
+{
+ FCALL_CONTRACT;
+
+ INDEBUG(UINT64 expected = val1 * val2;)
+ UINT64 ret;
+
+ // Get the upper 32 bits of the numbers
+ UINT32 val1High = Hi32Bits(val1);
+ UINT32 val2High = Hi32Bits(val2);
+
+ UINT64 valMid;
+
+ if (val1High == 0) {
+ if (val2High == 0)
+ return Mul32x32To64(val1, val2);
+ // Compute the 'middle' bits of the long multiplication
+ valMid = Mul32x32To64(val2High, val1);
+ }
+ else {
+ if (val2High != 0)
+ goto ThrowExcep;
+ // Compute the 'middle' bits of the long multiplication
+ valMid = Mul32x32To64(val1High, val2);
+ }
+
+ // See if any bits after bit 32 are set
+ if (Hi32Bits(valMid) != 0)
+ goto ThrowExcep;
+
+ ret = Mul32x32To64(val1, val2) + ShiftToHi32Bits((UINT32)(valMid));
+
+ // check for overflow
+ if (Hi32Bits(ret) < (UINT32)valMid)
+ goto ThrowExcep;
+
+ _ASSERTE(ret == expected);
+ return ret;
+
+ThrowExcep:
+ FCThrow(kOverflowException);
+ }
+HCIMPLEND
+
+/*********************************************************************/
+HCIMPL2(INT32, JIT_Div, INT32 dividend, INT32 divisor)
+{
+ FCALL_CONTRACT;
+
+ RuntimeExceptionKind ehKind;
+
+ if (((UINT32) (divisor + 1)) <= 1) // Unsigned test for divisor in [-1 .. 0]
+ {
+ if (divisor == 0)
+ {
+ ehKind = kDivideByZeroException;
+ goto ThrowExcep;
+ }
+ else if (divisor == -1)
+ {
+ if (dividend == _I32_MIN)
+ {
+ ehKind = kOverflowException;
+ goto ThrowExcep;
+ }
+ return -dividend;
+ }
+ }
+
+ return(dividend / divisor);
+
+ThrowExcep:
+ FCThrow(ehKind);
+}
+HCIMPLEND
+
+/*********************************************************************/
+HCIMPL2(INT32, JIT_Mod, INT32 dividend, INT32 divisor)
+{
+ FCALL_CONTRACT;
+
+ RuntimeExceptionKind ehKind;
+
+ if (((UINT32) (divisor + 1)) <= 1) // Unsigned test for divisor in [-1 .. 0]
+ {
+ if (divisor == 0)
+ {
+ ehKind = kDivideByZeroException;
+ goto ThrowExcep;
+ }
+ else if (divisor == -1)
+ {
+ if (dividend == _I32_MIN)
+ {
+ ehKind = kOverflowException;
+ goto ThrowExcep;
+ }
+ return 0;
+ }
+ }
+
+ return(dividend % divisor);
+
+ThrowExcep:
+ FCThrow(ehKind);
+}
+HCIMPLEND
+
+/*********************************************************************/
+HCIMPL2(UINT32, JIT_UDiv, UINT32 dividend, UINT32 divisor)
+{
+ FCALL_CONTRACT;
+
+ if (divisor == 0)
+ FCThrow(kDivideByZeroException);
+
+ return(dividend / divisor);
+}
+HCIMPLEND
+
+/*********************************************************************/
+HCIMPL2(UINT32, JIT_UMod, UINT32 dividend, UINT32 divisor)
+{
+ FCALL_CONTRACT;
+
+ if (divisor == 0)
+ FCThrow(kDivideByZeroException);
+
+ return(dividend % divisor);
+}
+HCIMPLEND
+
+/*********************************************************************/
+HCIMPL2_VV(INT64, JIT_LDiv, INT64 dividend, INT64 divisor)
+{
+ FCALL_CONTRACT;
+
+ RuntimeExceptionKind ehKind;
+
+ if (Is32BitSigned(divisor))
+ {
+ if ((INT32)divisor == 0)
+ {
+ ehKind = kDivideByZeroException;
+ goto ThrowExcep;
+ }
+
+ if ((INT32)divisor == -1)
+ {
+ if ((UINT64) dividend == UI64(0x8000000000000000))
+ {
+ ehKind = kOverflowException;
+ goto ThrowExcep;
+ }
+ return -dividend;
+ }
+
+ // Check for -ive or +ive numbers in the range -2**31 to 2**31
+ if (Is32BitSigned(dividend))
+ return((INT32)dividend / (INT32)divisor);
+ }
+
+ // For all other combinations fallback to int64 div.
+ return(dividend / divisor);
+
+ThrowExcep:
+ FCThrow(ehKind);
+}
+HCIMPLEND
+
+/*********************************************************************/
+HCIMPL2_VV(INT64, JIT_LMod, INT64 dividend, INT64 divisor)
+{
+ FCALL_CONTRACT;
+
+ RuntimeExceptionKind ehKind;
+
+ if (Is32BitSigned(divisor))
+ {
+ if ((INT32)divisor == 0)
+ {
+ ehKind = kDivideByZeroException;
+ goto ThrowExcep;
+ }
+
+ if ((INT32)divisor == -1)
+ {
+ // <TODO>TODO, we really should remove this as it lengthens the code path
+ // and the spec really says that it should not throw an exception. </TODO>
+ if ((UINT64) dividend == UI64(0x8000000000000000))
+ {
+ ehKind = kOverflowException;
+ goto ThrowExcep;
+ }
+ return 0;
+ }
+
+ // Check for -ive or +ive numbers in the range -2**31 to 2**31
+ if (Is32BitSigned(dividend))
+ return((INT32)dividend % (INT32)divisor);
+ }
+
+ // For all other combinations fallback to int64 div.
+ return(dividend % divisor);
+
+ThrowExcep:
+ FCThrow(ehKind);
+}
+HCIMPLEND
+
+/*********************************************************************/
+HCIMPL2_VV(UINT64, JIT_ULDiv, UINT64 dividend, UINT64 divisor)
+{
+ FCALL_CONTRACT;
+
+ if (Hi32Bits(divisor) == 0)
+ {
+ if ((UINT32)(divisor) == 0)
+ FCThrow(kDivideByZeroException);
+
+ if (Hi32Bits(dividend) == 0)
+ return((UINT32)dividend / (UINT32)divisor);
+ }
+
+ return(dividend / divisor);
+}
+HCIMPLEND
+
+/*********************************************************************/
+HCIMPL2_VV(UINT64, JIT_ULMod, UINT64 dividend, UINT64 divisor)
+{
+ FCALL_CONTRACT;
+
+ if (Hi32Bits(divisor) == 0)
+ {
+ if ((UINT32)(divisor) == 0)
+ FCThrow(kDivideByZeroException);
+
+ if (Hi32Bits(dividend) == 0)
+ return((UINT32)dividend % (UINT32)divisor);
+ }
+
+ return(dividend % divisor);
+}
+HCIMPLEND
+
+#if !defined(_WIN64) && !defined(_TARGET_X86_)
+/*********************************************************************/
+HCIMPL2_VV(UINT64, JIT_LLsh, UINT64 num, int shift)
+{
+ FCALL_CONTRACT;
+ return num << shift;
+}
+HCIMPLEND
+
+/*********************************************************************/
+HCIMPL2_VV(INT64, JIT_LRsh, INT64 num, int shift)
+{
+ FCALL_CONTRACT;
+ return num >> shift;
+}
+HCIMPLEND
+
+/*********************************************************************/
+HCIMPL2_VV(UINT64, JIT_LRsz, UINT64 num, int shift)
+{
+ FCALL_CONTRACT;
+ return num >> shift;
+}
+HCIMPLEND
+
+#endif
+
+#include <optdefault.h>
+
+
+//========================================================================
+//
+// FLOATING POINT HELPERS
+//
+//========================================================================
+
+#include <optsmallperfcritical.h>
+
+/*********************************************************************/
+//
+HCIMPL1_V(double, JIT_ULng2Dbl, UINT64 val)
+{
+ FCALL_CONTRACT;
+
+ double conv = (double) ((INT64) val);
+ if (conv < 0)
+ conv += (4294967296.0 * 4294967296.0); // add 2^64
+ _ASSERTE(conv >= 0);
+ return(conv);
+}
+HCIMPLEND
+
+/*********************************************************************/
+// needed for ARM
+HCIMPL1_V(double, JIT_Lng2Dbl, INT64 val)
+{
+ FCALL_CONTRACT;
+ return double(val);
+}
+HCIMPLEND
+
+//--------------------------------------------------------------------------
+template <class ftype>
+ftype modftype(ftype value, ftype *iptr);
+template <> float modftype(float value, float *iptr) { return modff(value, iptr); }
+template <> double modftype(double value, double *iptr) { return modf(value, iptr); }
+
+// round to nearest, round to even if tied
+template <class ftype>
+ftype BankersRound(ftype value)
+{
+ if (value < 0.0) return -BankersRound <ftype> (-value);
+
+ ftype integerPart;
+ modftype( value, &integerPart );
+
+ // if decimal part is exactly .5
+ if ((value -(integerPart +0.5)) == 0.0)
+ {
+ // round to even
+#if defined(_TARGET_ARM_) && defined(FEATURE_CORESYSTEM)
+ // @ARMTODO: On ARM when building on CoreSystem (where we link against the system CRT) an attempt to
+ // use fmod(float, float) fails to link (apparently this is converted to a reference to fmodf, which
+ // is not included in the system CRT). Use the double version instead.
+ if (fmod(double(integerPart), double(2.0)) == 0.0)
+ return integerPart;
+#else
+ if (fmod(ftype(integerPart), ftype(2.0)) == 0.0)
+ return integerPart;
+#endif
+
+ // Else return the nearest even integer
+ return (ftype)_copysign(ceil(fabs(value+0.5)),
+ value);
+ }
+
+ // Otherwise round to closest
+ return (ftype)_copysign(floor(fabs(value)+0.5),
+ value);
+}
+
+
+/*********************************************************************/
+// round double to nearest int (as double)
+HCIMPL1_V(double, JIT_DoubleRound, double val)
+{
+ FCALL_CONTRACT;
+ return BankersRound(val);
+}
+HCIMPLEND
+
+/*********************************************************************/
+// round float to nearest int (as float)
+HCIMPL1_V(float, JIT_FloatRound, float val)
+{
+ FCALL_CONTRACT;
+ return BankersRound(val);
+}
+HCIMPLEND
+
+/*********************************************************************/
+// Call fast Dbl2Lng conversion - used by functions below
+FORCEINLINE INT64 FastDbl2Lng(double val)
+{
+#ifdef _TARGET_X86_
+ FCALL_CONTRACT;
+ return HCCALL1_V(JIT_Dbl2Lng, val);
+#else
+ FCALL_CONTRACT;
+ return((__int64) val);
+#endif
+}
+
+/*********************************************************************/
+HCIMPL1_V(UINT32, JIT_Dbl2UIntOvf, double val)
+{
+ FCALL_CONTRACT;
+
+ // Note that this expression also works properly for val = NaN case
+ if (val > -1.0 && val < 4294967296.0)
+ return((UINT32)FastDbl2Lng(val));
+
+ FCThrow(kOverflowException);
+}
+HCIMPLEND
+
+/*********************************************************************/
+HCIMPL1_V(UINT64, JIT_Dbl2ULng, double val)
+{
+ FCALL_CONTRACT;
+
+ const double two63 = 2147483648.0 * 4294967296.0;
+ UINT64 ret;
+ if (val < two63) {
+ ret = FastDbl2Lng(val);
+ }
+ else {
+ // subtract 0x8000000000000000, do the convert then add it back again
+ ret = FastDbl2Lng(val - two63) + I64(0x8000000000000000);
+}
+ return ret;
+}
+HCIMPLEND
+
+/*********************************************************************/
+HCIMPL1_V(UINT64, JIT_Dbl2ULngOvf, double val)
+{
+ FCALL_CONTRACT;
+
+ const double two64 = 4294967296.0 * 4294967296.0;
+ // Note that this expression also works properly for val = NaN case
+ if (val > -1.0 && val < two64) {
+ const double two63 = 2147483648.0 * 4294967296.0;
+ UINT64 ret;
+ if (val < two63) {
+ ret = FastDbl2Lng(val);
+ }
+ else {
+ // subtract 0x8000000000000000, do the convert then add it back again
+ ret = FastDbl2Lng(val - two63) + I64(0x8000000000000000);
+ }
+#ifdef _DEBUG
+ // since no overflow can occur, the value always has to be within 1
+ double roundTripVal = HCCALL1_V(JIT_ULng2Dbl, ret);
+ _ASSERTE(val - 1.0 <= roundTripVal && roundTripVal <= val + 1.0);
+#endif // _DEBUG
+ return ret;
+ }
+
+ FCThrow(kOverflowException);
+}
+HCIMPLEND
+
+
+#if !defined(_TARGET_X86_)
+
+HCIMPL1_V(INT64, JIT_Dbl2Lng, double val)
+{
+ FCALL_CONTRACT;
+
+ return((INT64)val);
+}
+HCIMPLEND
+
+HCIMPL1_V(int, JIT_Dbl2IntOvf, double val)
+{
+ FCALL_CONTRACT;
+
+ const double two31 = 2147483648.0;
+
+ // Note that this expression also works properly for val = NaN case
+ if (val > -two31 - 1 && val < two31)
+ return((INT32)val);
+
+ FCThrow(kOverflowException);
+}
+HCIMPLEND
+
+HCIMPL1_V(INT64, JIT_Dbl2LngOvf, double val)
+{
+ FCALL_CONTRACT;
+
+ const double two63 = 2147483648.0 * 4294967296.0;
+
+ // Note that this expression also works properly for val = NaN case
+ // We need to compare with the very next double to two63. 0x402 is epsilon to get us there.
+ if (val > -two63 - 0x402 && val < two63)
+ return((INT64)val);
+
+ FCThrow(kOverflowException);
+}
+HCIMPLEND
+
+HCIMPL2_VV(float, JIT_FltRem, float dividend, float divisor)
+{
+ FCALL_CONTRACT;
+
+ //
+ // From the ECMA standard:
+ //
+ // If [divisor] is zero or [dividend] is infinity
+ // the result is NaN.
+ // If [divisor] is infinity,
+ // the result is [dividend] (negated for -infinity***).
+ //
+ // ***"negated for -infinity" has been removed from the spec
+ //
+
+ if (divisor==0 || !_finite(dividend))
+ {
+ UINT32 NaN = CLR_NAN_32;
+ return *(float *)(&NaN);
+ }
+ else if (!_finite(divisor) && !_isnan(divisor))
+ {
+ return dividend;
+ }
+ // else...
+#if 0
+ // COMPILER BUG WITH FMODF() + /Oi, USE FMOD() INSTEAD
+ return fmodf(dividend,divisor);
+#else
+ return (float)fmod((double)dividend,(double)divisor);
+#endif
+}
+HCIMPLEND
+
+HCIMPL2_VV(double, JIT_DblRem, double dividend, double divisor)
+{
+ FCALL_CONTRACT;
+
+ //
+ // From the ECMA standard:
+ //
+ // If [divisor] is zero or [dividend] is infinity
+ // the result is NaN.
+ // If [divisor] is infinity,
+ // the result is [dividend] (negated for -infinity***).
+ //
+ // ***"negated for -infinity" has been removed from the spec
+ //
+ if (divisor==0 || !_finite(dividend))
+ {
+ UINT64 NaN = CLR_NAN_64;
+ return *(double *)(&NaN);
+ }
+ else if (!_finite(divisor) && !_isnan(divisor))
+ {
+ return dividend;
+ }
+ // else...
+ return(fmod(dividend,divisor));
+}
+HCIMPLEND
+
+#endif // !defined(_TARGET_X86_)
+
+#include <optdefault.h>
+
+
+//========================================================================
+//
+// INSTANCE FIELD HELPERS
+//
+//========================================================================
+
+/*********************************************************************/
+// Returns the address of the field in the object (This is an interior
+// pointer and the caller has to use it appropriately). obj can be
+// either a reference or a byref
+HCIMPL2(void*, JIT_GetFieldAddr_Framed, Object *obj, FieldDesc* pFD)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pFD));
+ } CONTRACTL_END;
+
+ void * fldAddr = NULL;
+ OBJECTREF objRef = ObjectToOBJECTREF(obj);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(objRef);
+
+ if (objRef == NULL)
+ COMPlusThrow(kNullReferenceException);
+
+#ifdef FEATURE_REMOTING
+ if(objRef->IsTransparentProxy())
+ {
+ objRef = CRemotingServices::GetObjectFromProxy(objRef);
+ if (objRef->IsTransparentProxy())
+ COMPlusThrow(kInvalidOperationException, W("Remoting_InvalidValueTypeFieldAccess"));
+ }
+#endif // FEATURE_REMOTING
+
+ fldAddr = pFD->GetAddress(OBJECTREFToObject(objRef));
+
+ HELPER_METHOD_FRAME_END();
+
+ return fldAddr;
+}
+HCIMPLEND
+
+#include <optsmallperfcritical.h>
+HCIMPL2(void*, JIT_GetFieldAddr, Object *obj, FieldDesc* pFD)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pFD));
+ } CONTRACTL_END;
+
+ if (obj == NULL || obj->IsTransparentProxy() || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
+ {
+ ENDFORBIDGC();
+ return HCCALL2(JIT_GetFieldAddr_Framed, obj, pFD);
+ }
+
+ return pFD->GetAddressGuaranteedInHeap(obj);
+}
+HCIMPLEND
+#include <optdefault.h>
+
+/*********************************************************************/
+#define HCallAssert(cache, target) // suppressed to avoid ambiguous cast errors caused by use of template
+template <typename FIELDTYPE>
+NOINLINE HCIMPL2(FIELDTYPE, JIT_GetField_Framed, Object *obj, FieldDesc *pFD)
+#undef HCallAssert
+{
+ FCALL_CONTRACT;
+
+ FIELDTYPE value = 0;
+
+ // This is an instance field helper
+ _ASSERTE(!pFD->IsStatic());
+
+ OBJECTREF objRef = ObjectToOBJECTREF(obj);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(objRef);
+ if (objRef == NULL)
+ COMPlusThrow(kNullReferenceException);
+ pFD->GetInstanceField(objRef, &value);
+ HELPER_METHOD_POLL();
+ HELPER_METHOD_FRAME_END();
+
+ return value;
+}
+HCIMPLEND
+
+/*********************************************************************/
+#include <optsmallperfcritical.h>
+
+HCIMPL2(INT8, JIT_GetField8, Object *obj, FieldDesc *pFD)
+{
+ FCALL_CONTRACT;
+
+ if (obj == NULL || obj->IsTransparentProxy() || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
+ {
+ ENDFORBIDGC();
+ return HCCALL2(JIT_GetField_Framed<INT8>, obj, pFD);
+ }
+
+ INT8 val = VolatileLoad<INT8>((INT8*)pFD->GetAddressGuaranteedInHeap(obj));
+ FC_GC_POLL_RET();
+ return val;
+}
+HCIMPLEND
+
+HCIMPL2(INT16, JIT_GetField16, Object *obj, FieldDesc *pFD)
+{
+ FCALL_CONTRACT;
+
+ if (obj == NULL || obj->IsTransparentProxy() || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
+ {
+ ENDFORBIDGC();
+ return HCCALL2(JIT_GetField_Framed<INT16>, obj, pFD);
+ }
+
+ INT16 val = VolatileLoad<INT16>((INT16*)pFD->GetAddressGuaranteedInHeap(obj));
+ FC_GC_POLL_RET();
+ return val;
+}
+HCIMPLEND
+
+HCIMPL2(INT32, JIT_GetField32, Object *obj, FieldDesc *pFD)
+{
+ FCALL_CONTRACT;
+
+ if (obj == NULL || obj->IsTransparentProxy() || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
+ {
+ ENDFORBIDGC();
+ return HCCALL2(JIT_GetField_Framed<INT32>, obj, pFD);
+ }
+
+ INT32 val = VolatileLoad<INT32>((INT32*)pFD->GetAddressGuaranteedInHeap(obj));
+ FC_GC_POLL_RET();
+ return val;
+}
+HCIMPLEND
+
+HCIMPL2(INT64, JIT_GetField64, Object *obj, FieldDesc *pFD)
+{
+ FCALL_CONTRACT;
+
+ if (obj == NULL || obj->IsTransparentProxy() || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
+ {
+ ENDFORBIDGC();
+ return HCCALL2(JIT_GetField_Framed<INT64>, obj, pFD);
+ }
+
+ INT64 val = VolatileLoad<INT64>((INT64*)pFD->GetAddressGuaranteedInHeap(obj));
+ FC_GC_POLL_RET();
+ return val;
+}
+HCIMPLEND
+
+HCIMPL2(FLOAT, JIT_GetFieldFloat, Object *obj, FieldDesc *pFD)
+{
+ FCALL_CONTRACT;
+
+ if (obj == NULL || obj->IsTransparentProxy() || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
+ {
+ ENDFORBIDGC();
+ return HCCALL2(JIT_GetField_Framed<FLOAT>, obj, pFD);
+ }
+
+ FLOAT val;
+ (INT32&)val = VolatileLoad<INT32>((INT32*)pFD->GetAddressGuaranteedInHeap(obj));
+ FC_GC_POLL_RET();
+ return val;
+}
+HCIMPLEND
+
+HCIMPL2(DOUBLE, JIT_GetFieldDouble, Object *obj, FieldDesc *pFD)
+{
+ FCALL_CONTRACT;
+
+ if (obj == NULL || obj->IsTransparentProxy() || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
+ {
+ ENDFORBIDGC();
+ return HCCALL2(JIT_GetField_Framed<DOUBLE>, obj, pFD);
+ }
+
+ DOUBLE val;
+ (INT64&)val = VolatileLoad<INT64>((INT64*)pFD->GetAddressGuaranteedInHeap(obj));
+ FC_GC_POLL_RET();
+ return val;
+}
+HCIMPLEND
+
+#include <optdefault.h>
+
+/*********************************************************************/
+#define HCallAssert(cache, target) // suppressed to avoid ambiguous cast errors caused by use of template
+template <typename FIELDTYPE>
+NOINLINE HCIMPL3(VOID, JIT_SetField_Framed, Object *obj, FieldDesc* pFD, FIELDTYPE val)
+#undef HCallAssert
+{
+ FCALL_CONTRACT;
+
+ // This is an instance field helper
+ _ASSERTE(!pFD->IsStatic());
+
+ OBJECTREF objRef = ObjectToOBJECTREF(obj);
+
+ HELPER_METHOD_FRAME_BEGIN_1(objRef);
+ if (objRef == NULL)
+ COMPlusThrow(kNullReferenceException);
+ pFD->SetInstanceField(objRef, &val);
+ HELPER_METHOD_POLL();
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND
+
+/*********************************************************************/
+#include <optsmallperfcritical.h>
+
+HCIMPL3(VOID, JIT_SetField8, Object *obj, FieldDesc *pFD, INT8 val)
+{
+ FCALL_CONTRACT;
+
+ if (obj == NULL || obj->IsTransparentProxy() || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
+ {
+ ENDFORBIDGC();
+ return HCCALL3(JIT_SetField_Framed<INT8>, obj, pFD, val);
+ }
+
+ VolatileStore<INT8>((INT8*)pFD->GetAddressGuaranteedInHeap(obj), val);
+ FC_GC_POLL();
+}
+HCIMPLEND
+
+HCIMPL3(VOID, JIT_SetField16, Object *obj, FieldDesc *pFD, INT16 val)
+{
+ FCALL_CONTRACT;
+
+ if (obj == NULL || obj->IsTransparentProxy() || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
+ {
+ ENDFORBIDGC();
+ return HCCALL3(JIT_SetField_Framed<INT16>, obj, pFD, val);
+ }
+
+ VolatileStore<INT16>((INT16*)pFD->GetAddressGuaranteedInHeap(obj), val);
+ FC_GC_POLL();
+}
+HCIMPLEND
+
+HCIMPL3(VOID, JIT_SetField32, Object *obj, FieldDesc *pFD, INT32 val)
+{
+ FCALL_CONTRACT;
+
+ if (obj == NULL || obj->IsTransparentProxy() || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
+ {
+ ENDFORBIDGC();
+ return HCCALL3(JIT_SetField_Framed<INT32>, obj, pFD, val);
+ }
+
+ VolatileStore<INT32>((INT32*)pFD->GetAddressGuaranteedInHeap(obj), val);
+ FC_GC_POLL();
+}
+HCIMPLEND
+
+HCIMPL3(VOID, JIT_SetField64, Object *obj, FieldDesc *pFD, INT64 val)
+{
+ FCALL_CONTRACT;
+
+ if (obj == NULL || obj->IsTransparentProxy() || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
+ {
+ ENDFORBIDGC();
+ return HCCALL3(JIT_SetField_Framed<INT64>, obj, pFD, val);
+ }
+
+ VolatileStore<INT64>((INT64*)pFD->GetAddressGuaranteedInHeap(obj), val);
+ FC_GC_POLL();
+}
+HCIMPLEND
+
+HCIMPL3(VOID, JIT_SetFieldFloat, Object *obj, FieldDesc *pFD, FLOAT val)
+{
+ FCALL_CONTRACT;
+
+ if (obj == NULL || obj->IsTransparentProxy() || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
+ {
+ ENDFORBIDGC();
+ return HCCALL3(JIT_SetField_Framed<FLOAT>, obj, pFD, val);
+ }
+
+ VolatileStore<INT32>((INT32*)pFD->GetAddressGuaranteedInHeap(obj), (INT32&)val);
+ FC_GC_POLL();
+}
+HCIMPLEND
+
+HCIMPL3(VOID, JIT_SetFieldDouble, Object *obj, FieldDesc *pFD, DOUBLE val)
+{
+ FCALL_CONTRACT;
+
+ if (obj == NULL || obj->IsTransparentProxy() || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
+ {
+ ENDFORBIDGC();
+ return HCCALL3(JIT_SetField_Framed<DOUBLE>, obj, pFD, val);
+ }
+
+ VolatileStore<INT64>((INT64*)pFD->GetAddressGuaranteedInHeap(obj), (INT64&)val);
+ FC_GC_POLL();
+}
+HCIMPLEND
+
+#include <optdefault.h>
+
+/*********************************************************************/
+HCIMPL2(Object*, JIT_GetFieldObj_Framed, Object *obj, FieldDesc *pFD)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(!pFD->IsStatic());
+ PRECONDITION(!pFD->IsPrimitive() && !pFD->IsByValue()); // Assert that we are called only for objects
+ } CONTRACTL_END;
+
+ OBJECTREF objRef = ObjectToOBJECTREF(obj);
+ OBJECTREF val = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_2(objRef, val); // Set up a frame
+ if (objRef == NULL)
+ COMPlusThrow(kNullReferenceException);
+ pFD->GetInstanceField(objRef, &val);
+ HELPER_METHOD_POLL();
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(val);
+}
+HCIMPLEND
+
+#include <optsmallperfcritical.h>
+HCIMPL2(Object*, JIT_GetFieldObj, Object *obj, FieldDesc *pFD)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(!pFD->IsStatic());
+ PRECONDITION(!pFD->IsPrimitive() && !pFD->IsByValue()); // Assert that we are called only for objects
+ } CONTRACTL_END;
+
+ if (obj == NULL || obj->IsTransparentProxy() || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
+ {
+ ENDFORBIDGC();
+ return HCCALL2(JIT_GetFieldObj_Framed, obj, pFD);
+ }
+
+ void * address = pFD->GetAddressGuaranteedInHeap(obj);
+ OBJECTREF val = ObjectToOBJECTREF(VolatileLoad((Object **)address));
+
+ FC_GC_POLL_AND_RETURN_OBJREF(val);
+}
+HCIMPLEND
+#include <optdefault.h>
+
+/*********************************************************************/
+HCIMPL3(VOID, JIT_SetFieldObj_Framed, Object *obj, FieldDesc *pFD, Object *value)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(!pFD->IsStatic());
+ PRECONDITION(!pFD->IsPrimitive() && !pFD->IsByValue()); // Assert that we are called only for objects
+ } CONTRACTL_END;
+
+ OBJECTREF objRef = ObjectToOBJECTREF(obj);
+ OBJECTREF val = ObjectToOBJECTREF(value);
+
+ HELPER_METHOD_FRAME_BEGIN_2(objRef, val);
+ if (objRef == NULL)
+ COMPlusThrow(kNullReferenceException);
+ pFD->SetInstanceField(objRef, &val);
+ HELPER_METHOD_POLL();
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND
+
+#include <optsmallperfcritical.h>
+HCIMPL3(VOID, JIT_SetFieldObj, Object *obj, FieldDesc *pFD, Object *value)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(!pFD->IsStatic());
+ PRECONDITION(!pFD->IsPrimitive() && !pFD->IsByValue()); // Assert that we are called only for objects
+ } CONTRACTL_END;
+
+ if (obj == NULL || obj->IsTransparentProxy() || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
+ {
+ ENDFORBIDGC();
+ return HCCALL3(JIT_SetFieldObj_Framed, obj, pFD, value);
+ }
+
+ void * address = pFD->GetAddressGuaranteedInHeap(obj);
+ SetObjectReference((OBJECTREF*)address, ObjectToOBJECTREF(value), GetAppDomain());
+ FC_GC_POLL();
+}
+HCIMPLEND
+#include <optdefault.h>
+
+/*********************************************************************/
+HCIMPL4(VOID, JIT_GetFieldStruct_Framed, LPVOID retBuff, Object *obj, FieldDesc *pFD, MethodTable *pFieldMT)
+{
+ FCALL_CONTRACT;
+
+ // This may be a cross context field access. Setup a frame as we will
+ // transition to managed code later
+
+ // This is an instance field helper
+ _ASSERTE(!pFD->IsStatic());
+
+ // Assert that we are not called for objects or primitive types
+ _ASSERTE(!pFD->IsPrimitive());
+
+ OBJECTREF objRef = ObjectToOBJECTREF(obj);
+
+ HELPER_METHOD_FRAME_BEGIN_1(objRef); // Set up a frame
+
+ if (objRef == NULL)
+ COMPlusThrow(kNullReferenceException);
+
+ // Try an unwrap operation in case that we are not being called
+ // in the same context as the server.
+ // If that is the case then GetObjectFromProxy will return
+ // the server object.
+ BOOL fRemoted = FALSE;
+
+#ifdef FEATURE_REMOTING
+ if (objRef->IsTransparentProxy())
+ {
+ objRef = CRemotingServices::GetObjectFromProxy(objRef);
+ if (objRef->IsTransparentProxy())
+ {
+ CRemotingServices::FieldAccessor(pFD, objRef, retBuff, TRUE);
+ fRemoted = TRUE;
+ }
+ }
+#endif
+
+ if (!fRemoted)
+ {
+ void * pAddr = pFD->GetAddress(OBJECTREFToObject(objRef));
+ CopyValueClass(retBuff, pAddr, pFieldMT, objRef->GetAppDomain());
+ }
+
+ HELPER_METHOD_FRAME_END(); // Tear down the frame
+}
+HCIMPLEND
+
+#include <optsmallperfcritical.h>
+HCIMPL4(VOID, JIT_GetFieldStruct, LPVOID retBuff, Object *obj, FieldDesc *pFD, MethodTable *pFieldMT)
+{
+ FCALL_CONTRACT;
+
+ _ASSERTE(pFieldMT->IsValueType());
+
+ if (obj == NULL || obj->IsTransparentProxy() || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
+ {
+ ENDFORBIDGC();
+ return HCCALL4(JIT_GetFieldStruct_Framed, retBuff, obj, pFD, pFieldMT);
+ }
+
+ void * pAddr = pFD->GetAddressGuaranteedInHeap(obj);
+ CopyValueClass(retBuff, pAddr, pFieldMT, obj->GetAppDomain());
+}
+HCIMPLEND
+#include <optdefault.h>
+
+/*********************************************************************/
+HCIMPL4(VOID, JIT_SetFieldStruct_Framed, Object *obj, FieldDesc *pFD, MethodTable *pFieldMT, LPVOID valuePtr)
+{
+ FCALL_CONTRACT;
+
+ // Assert that we are not called for objects or primitive types
+ _ASSERTE(!pFD->IsPrimitive());
+
+ OBJECTREF objRef = ObjectToOBJECTREF(obj);
+
+ // This may be a cross context field access. Setup a frame as we will
+ // transition to managed code later
+
+ HELPER_METHOD_FRAME_BEGIN_1(objRef); // Set up a frame
+
+ if (objRef == NULL)
+ COMPlusThrow(kNullReferenceException);
+
+ // Try an unwrap operation in case that we are not being called
+ // in the same context as the server.
+ // If that is the case then GetObjectFromProxy will return
+ // the server object.
+ BOOL fRemoted = FALSE;
+
+#ifdef FEATURE_REMOTING
+ if(objRef->IsTransparentProxy())
+ {
+ objRef = CRemotingServices::GetObjectFromProxy(objRef);
+
+ if(objRef->IsTransparentProxy())
+ {
+ CRemotingServices::FieldAccessor(pFD, objRef, valuePtr, FALSE);
+ fRemoted = TRUE;
+ }
+ }
+#endif
+
+ if (!fRemoted)
+ {
+ void * pAddr = pFD->GetAddress(OBJECTREFToObject(objRef));
+ CopyValueClass(pAddr, valuePtr, pFieldMT, objRef->GetAppDomain());
+ }
+
+ HELPER_METHOD_FRAME_END(); // Tear down the frame
+}
+HCIMPLEND
+
+#include <optsmallperfcritical.h>
+HCIMPL4(VOID, JIT_SetFieldStruct, Object *obj, FieldDesc *pFD, MethodTable *pFieldMT, LPVOID valuePtr)
+{
+ FCALL_CONTRACT;
+
+ _ASSERTE(pFieldMT->IsValueType());
+
+ if (obj == NULL || obj->IsTransparentProxy() || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
+ {
+ ENDFORBIDGC();
+ return HCCALL4(JIT_SetFieldStruct_Framed, obj, pFD, pFieldMT, valuePtr);
+ }
+
+ void * pAddr = pFD->GetAddressGuaranteedInHeap(obj);
+ CopyValueClass(pAddr, valuePtr, pFieldMT, obj->GetAppDomain());
+}
+HCIMPLEND
+#include <optdefault.h>
+
+
+
+//========================================================================
+//
+// STATIC FIELD HELPERS
+//
+//========================================================================
+
+#ifdef FEATURE_MIXEDMODE
+HCIMPL1(void*, JIT_GetStaticFieldAddr_Tls, FieldDesc *pFD)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pFD));
+ PRECONDITION(pFD->IsStatic());
+ PRECONDITION(pFD->IsRVA() && pFD->GetModule()->IsRvaFieldTls(pFD->GetOffset()));
+ } CONTRACTL_END;
+
+ void *addr = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ Module* pModule = pFD->GetModule();
+
+ // Get the ThreadLocalStoragePointer in the TEB.
+ LPVOID pTlsPtr = ClrTeb::GetLegacyThreadLocalStoragePointer();
+
+ // pTlsPtr is pointing at an array of pointers, each of which points to
+ // the TLS block of a dll. So here, we need to get the TLS index for
+ // the dll, add that to pTlsPtr, and dereference it to get the TLS
+ // block of the dll.
+ DWORD tlsIndex = pModule->GetTlsIndex();
+ LPVOID pDllTlsBase = (LPVOID)*((UINT_PTR*)pTlsPtr + tlsIndex);
+
+ // Finally, we need to find the field offset into the TLS block.
+ addr = (LPVOID)((PBYTE)pDllTlsBase + pModule->GetFieldTlsOffset(pFD->GetOffset()));
+
+ HELPER_METHOD_FRAME_END();
+
+ return addr;
+}
+HCIMPLEND
+#endif // FEATURE_MIXEDMODE
+
+#ifdef FEATURE_REMOTING
+HCIMPL1(void*, JIT_GetStaticFieldAddr_Context, FieldDesc *pFD)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pFD));
+ PRECONDITION(pFD->IsStatic());
+ PRECONDITION(pFD->IsContextStatic());
+ } CONTRACTL_END;
+
+ void *addr = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ MethodTable *pMT = pFD->GetEnclosingMethodTable();
+ pMT->CheckRestore();
+ pMT->CheckRunClassInitThrowing();
+
+ addr = Context::GetStaticFieldAddress(pFD);
+
+ HELPER_METHOD_FRAME_END();
+
+ return addr;
+}
+HCIMPLEND
+#endif
+
+// Slow helper to tailcall from the fast one
+NOINLINE HCIMPL1(void, JIT_InitClass_Framed, MethodTable* pMT)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ // We don't want to be calling JIT_InitClass at all for perf reasons
+ // on the Global Class <Module> as the Class loading logic ensures that we
+ // already have initialized the Gloabl Class <Module>
+ CONSISTENCY_CHECK(!pMT->IsGlobalClass());
+
+ pMT->CheckRestore();
+ pMT->CheckRunClassInitThrowing();
+
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND
+
+
+/*************************************************************/
+#include <optsmallperfcritical.h>
+HCIMPL1(void, JIT_InitClass, CORINFO_CLASS_HANDLE typeHnd_)
+{
+ FCALL_CONTRACT;
+
+ TypeHandle typeHnd(typeHnd_);
+ MethodTable *pMT = typeHnd.AsMethodTable();
+ _ASSERTE(!pMT->IsClassPreInited());
+
+ if (pMT->GetDomainLocalModule()->IsClassInitialized(pMT))
+ return;
+
+ // Tailcall to the slow helper
+ ENDFORBIDGC();
+ HCCALL1(JIT_InitClass_Framed, pMT);
+}
+HCIMPLEND
+#include <optdefault.h>
+
+/*************************************************************/
+HCIMPL2(void, JIT_InitInstantiatedClass, CORINFO_CLASS_HANDLE typeHnd_, CORINFO_METHOD_HANDLE methHnd_)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(methHnd_ != NULL);
+ } CONTRACTL_END;
+
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL(); // Set up a frame
+
+ MethodTable * pMT = (MethodTable*) typeHnd_;
+ MethodDesc * pMD = (MethodDesc*) methHnd_;
+
+ MethodTable * pTemplateMT = pMD->GetMethodTable();
+ if (pTemplateMT->IsSharedByGenericInstantiations())
+ {
+ pMT = ClassLoader::LoadGenericInstantiationThrowing(pTemplateMT->GetModule(),
+ pTemplateMT->GetCl(),
+ pMD->GetExactClassInstantiation(pMT)).AsMethodTable();
+ }
+ else
+ {
+ pMT = pTemplateMT;
+ }
+
+ pMT->CheckRestore();
+ pMT->EnsureInstanceActive();
+ pMT->CheckRunClassInitThrowing();
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND
+
+
+//========================================================================
+//
+// SHARED STATIC FIELD HELPERS
+//
+//========================================================================
+
+#include <optsmallperfcritical.h>
+
+HCIMPL2(void*, JIT_GetSharedNonGCStaticBase_Portable, SIZE_T moduleDomainID, DWORD dwClassDomainID)
+{
+ FCALL_CONTRACT;
+
+ DomainLocalModule *pLocalModule = NULL;
+
+ if (!Module::IsEncodedModuleIndex(moduleDomainID))
+ pLocalModule = (DomainLocalModule *) moduleDomainID;
+ else
+ {
+ DomainLocalBlock *pLocalBlock = GetAppDomain()->GetDomainLocalBlock();
+ pLocalModule = pLocalBlock->GetModuleSlot(Module::IDToIndex(moduleDomainID));
+ }
+
+ // If type doesn't have a class constructor, the contents of this if statement may
+ // still get executed. JIT_GetSharedNonGCStaticBaseNoCtor should be used in this case.
+ if (pLocalModule->IsPrecomputedClassInitialized(dwClassDomainID))
+ {
+ return (void*)pLocalModule->GetPrecomputedNonGCStaticsBasePointer();
+ }
+
+ // Tailcall to the slow helper
+ ENDFORBIDGC();
+ return HCCALL2(JIT_GetSharedNonGCStaticBase_Helper, pLocalModule, dwClassDomainID);
+}
+HCIMPLEND
+
+// No constructor version of JIT_GetSharedNonGCStaticBase. Does not check if class has
+// been initialized.
+HCIMPL1(void*, JIT_GetSharedNonGCStaticBaseNoCtor_Portable, SIZE_T moduleDomainID)
+{
+ FCALL_CONTRACT;
+
+ DomainLocalModule *pLocalModule = NULL;
+
+ if (!Module::IsEncodedModuleIndex(moduleDomainID))
+ pLocalModule = (DomainLocalModule *) moduleDomainID;
+ else
+ {
+ DomainLocalBlock *pLocalBlock = GetAppDomain()->GetDomainLocalBlock();
+ pLocalModule = pLocalBlock->GetModuleSlot(Module::IDToIndex(moduleDomainID));
+ }
+
+ return (void*)pLocalModule->GetPrecomputedNonGCStaticsBasePointer();
+}
+HCIMPLEND
+
+HCIMPL2(void*, JIT_GetSharedGCStaticBase_Portable, SIZE_T moduleDomainID, DWORD dwClassDomainID)
+{
+ FCALL_CONTRACT;
+
+ DomainLocalModule *pLocalModule = NULL;
+
+ if (!Module::IsEncodedModuleIndex(moduleDomainID))
+ pLocalModule = (DomainLocalModule *) moduleDomainID;
+ else
+ {
+ DomainLocalBlock *pLocalBlock = GetAppDomain()->GetDomainLocalBlock();
+ pLocalModule = pLocalBlock->GetModuleSlot(Module::IDToIndex(moduleDomainID));
+ }
+
+ // If type doesn't have a class constructor, the contents of this if statement may
+ // still get executed. JIT_GetSharedGCStaticBaseNoCtor should be used in this case.
+ if (pLocalModule->IsPrecomputedClassInitialized(dwClassDomainID))
+ {
+ return (void*)pLocalModule->GetPrecomputedGCStaticsBasePointer();
+ }
+
+ // Tailcall to the slow helper
+ ENDFORBIDGC();
+ return HCCALL2(JIT_GetSharedGCStaticBase_Helper, pLocalModule, dwClassDomainID);
+}
+HCIMPLEND
+
+// No constructor version of JIT_GetSharedGCStaticBase. Does not check if class has been
+// initialized.
+HCIMPL1(void*, JIT_GetSharedGCStaticBaseNoCtor_Portable, SIZE_T moduleDomainID)
+{
+ FCALL_CONTRACT;
+
+ DomainLocalModule *pLocalModule = NULL;
+
+ if (!Module::IsEncodedModuleIndex(moduleDomainID))
+ pLocalModule = (DomainLocalModule *) moduleDomainID;
+ else
+ {
+ DomainLocalBlock *pLocalBlock = GetAppDomain()->GetDomainLocalBlock();
+ pLocalModule = pLocalBlock->GetModuleSlot(Module::IDToIndex(moduleDomainID));
+ }
+
+ return (void*)pLocalModule->GetPrecomputedGCStaticsBasePointer();
+}
+HCIMPLEND
+
+#include <optdefault.h>
+
+
+// The following two functions can be tail called from platform dependent versions of
+// JIT_GetSharedGCStaticBase and JIT_GetShareNonGCStaticBase
+HCIMPL2(void*, JIT_GetSharedNonGCStaticBase_Helper, DomainLocalModule *pLocalModule, DWORD dwClassDomainID)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ // Obtain Method table
+ MethodTable * pMT = pLocalModule->GetMethodTableFromClassDomainID(dwClassDomainID);
+
+ PREFIX_ASSUME(pMT != NULL);
+ pMT->CheckRunClassInitThrowing();
+ HELPER_METHOD_FRAME_END();
+
+ return (void*)pLocalModule->GetPrecomputedNonGCStaticsBasePointer();
+}
+HCIMPLEND
+
+HCIMPL2(void*, JIT_GetSharedGCStaticBase_Helper, DomainLocalModule *pLocalModule, DWORD dwClassDomainID)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ // Obtain Method table
+ MethodTable * pMT = pLocalModule->GetMethodTableFromClassDomainID(dwClassDomainID);
+
+ PREFIX_ASSUME(pMT != NULL);
+ pMT->CheckRunClassInitThrowing();
+ HELPER_METHOD_FRAME_END();
+
+ return (void*)pLocalModule->GetPrecomputedGCStaticsBasePointer();
+}
+HCIMPLEND
+
+/*********************************************************************/
+// Slow helper to tail call from the fast one
+HCIMPL2(void*, JIT_GetSharedNonGCStaticBaseDynamicClass_Helper, DomainLocalModule *pLocalModule, DWORD dwDynamicClassDomainID)
+{
+ FCALL_CONTRACT;
+
+ void* result = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ MethodTable *pMT = pLocalModule->GetDomainFile()->GetModule()->GetDynamicClassMT(dwDynamicClassDomainID);
+ _ASSERTE(pMT);
+
+ pMT->CheckRunClassInitThrowing();
+
+ result = (void*)pLocalModule->GetDynamicEntryNonGCStaticsBasePointer(dwDynamicClassDomainID, pMT->GetLoaderAllocator());
+ HELPER_METHOD_FRAME_END();
+
+ return result;
+}
+HCIMPLEND
+
+/*************************************************************/
+#include <optsmallperfcritical.h>
+HCIMPL2(void*, JIT_GetSharedNonGCStaticBaseDynamicClass, SIZE_T moduleDomainID, DWORD dwDynamicClassDomainID)
+{
+ FCALL_CONTRACT;
+
+ DomainLocalModule *pLocalModule;
+
+ if (!Module::IsEncodedModuleIndex(moduleDomainID))
+ pLocalModule = (DomainLocalModule *) moduleDomainID;
+ else
+ {
+ DomainLocalBlock *pLocalBlock = GetAppDomain()->GetDomainLocalBlock();
+ pLocalModule = pLocalBlock->GetModuleSlot(Module::IDToIndex(moduleDomainID));
+ }
+
+ DomainLocalModule::PTR_DynamicClassInfo pLocalInfo = pLocalModule->GetDynamicClassInfoIfInitialized(dwDynamicClassDomainID);
+ if (pLocalInfo != NULL)
+ {
+ PTR_BYTE retval;
+ GET_DYNAMICENTRY_NONGCSTATICS_BASEPOINTER(pLocalModule->GetDomainFile()->GetModule()->GetLoaderAllocator(),
+ pLocalInfo,
+ &retval);
+
+ return retval;
+ }
+
+ // Tailcall to the slow helper
+ ENDFORBIDGC();
+ return HCCALL2(JIT_GetSharedNonGCStaticBaseDynamicClass_Helper, pLocalModule, dwDynamicClassDomainID);
+}
+HCIMPLEND
+#include <optdefault.h>
+
+/*************************************************************/
+// Slow helper to tail call from the fast one
+HCIMPL2(void, JIT_ClassInitDynamicClass_Helper, DomainLocalModule *pLocalModule, DWORD dwDynamicClassDomainID)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ MethodTable *pMT = pLocalModule->GetDomainFile()->GetModule()->GetDynamicClassMT(dwDynamicClassDomainID);
+ _ASSERTE(pMT);
+
+ pMT->CheckRunClassInitThrowing();
+
+ HELPER_METHOD_FRAME_END();
+
+ return;
+}
+HCIMPLEND
+
+#include <optsmallperfcritical.h>
+HCIMPL2(void, JIT_ClassInitDynamicClass, SIZE_T moduleDomainID, DWORD dwDynamicClassDomainID)
+{
+ FCALL_CONTRACT;
+
+ DomainLocalModule *pLocalModule;
+
+ if (!Module::IsEncodedModuleIndex(moduleDomainID))
+ pLocalModule = (DomainLocalModule *) moduleDomainID;
+ else
+ {
+ DomainLocalBlock *pLocalBlock = GetAppDomain()->GetDomainLocalBlock();
+ pLocalModule = pLocalBlock->GetModuleSlot(Module::IDToIndex(moduleDomainID));
+ }
+
+ DomainLocalModule::PTR_DynamicClassInfo pLocalInfo = pLocalModule->GetDynamicClassInfoIfInitialized(dwDynamicClassDomainID);
+ if (pLocalInfo != NULL)
+ {
+ return;
+ }
+
+ // Tailcall to the slow helper
+ ENDFORBIDGC();
+ return HCCALL2(JIT_ClassInitDynamicClass_Helper, pLocalModule, dwDynamicClassDomainID);
+}
+HCIMPLEND
+#include <optdefault.h>
+
+/*************************************************************/
+// Slow helper to tail call from the fast one
+HCIMPL2(void*, JIT_GetSharedGCStaticBaseDynamicClass_Helper, DomainLocalModule *pLocalModule, DWORD dwDynamicClassDomainID)
+{
+ FCALL_CONTRACT;
+
+ void* result = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ MethodTable *pMT = pLocalModule->GetDomainFile()->GetModule()->GetDynamicClassMT(dwDynamicClassDomainID);
+ _ASSERTE(pMT);
+
+ pMT->CheckRunClassInitThrowing();
+
+ result = (void*)pLocalModule->GetDynamicEntryGCStaticsBasePointer(dwDynamicClassDomainID, pMT->GetLoaderAllocator());
+ HELPER_METHOD_FRAME_END();
+
+ return result;
+}
+HCIMPLEND
+
+/*************************************************************/
+#include <optsmallperfcritical.h>
+HCIMPL2(void*, JIT_GetSharedGCStaticBaseDynamicClass, SIZE_T moduleDomainID, DWORD dwDynamicClassDomainID)
+{
+ FCALL_CONTRACT;
+
+ DomainLocalModule *pLocalModule;
+
+ if (!Module::IsEncodedModuleIndex(moduleDomainID))
+ pLocalModule = (DomainLocalModule *) moduleDomainID;
+ else
+ {
+ DomainLocalBlock *pLocalBlock = GetAppDomain()->GetDomainLocalBlock();
+ pLocalModule = pLocalBlock->GetModuleSlot(Module::IDToIndex(moduleDomainID));
+ }
+
+ DomainLocalModule::PTR_DynamicClassInfo pLocalInfo = pLocalModule->GetDynamicClassInfoIfInitialized(dwDynamicClassDomainID);
+ if (pLocalInfo != NULL)
+ {
+ PTR_BYTE retval;
+ GET_DYNAMICENTRY_GCSTATICS_BASEPOINTER(pLocalModule->GetDomainFile()->GetModule()->GetLoaderAllocator(),
+ pLocalInfo,
+ &retval);
+
+ return retval;
+ }
+
+ // Tailcall to the slow helper
+ ENDFORBIDGC();
+ return HCCALL2(JIT_GetSharedGCStaticBaseDynamicClass_Helper, pLocalModule, dwDynamicClassDomainID);
+}
+HCIMPLEND
+#include <optdefault.h>
+
+/*********************************************************************/
+// Slow helper to tail call from the fast one
+NOINLINE HCIMPL1(void*, JIT_GetGenericsGCStaticBase_Framed, MethodTable *pMT)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(pMT->HasGenericsStaticsInfo());
+ } CONTRACTL_END;
+
+ void* base = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ pMT->CheckRestore();
+
+ pMT->CheckRunClassInitThrowing();
+
+ base = (void*) pMT->GetGCStaticsBasePointer();
+ CONSISTENCY_CHECK(base != NULL);
+
+ HELPER_METHOD_FRAME_END();
+
+ return base;
+}
+HCIMPLEND
+
+/*********************************************************************/
+#include <optsmallperfcritical.h>
+HCIMPL1(void*, JIT_GetGenericsGCStaticBase, MethodTable *pMT)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(pMT->HasGenericsStaticsInfo());
+ } CONTRACTL_END;
+
+ DWORD dwDynamicClassDomainID;
+ PTR_Module pModuleForStatics = pMT->GetGenericsStaticsModuleAndID(&dwDynamicClassDomainID);
+
+ DomainLocalModule *pLocalModule = pModuleForStatics->GetDomainLocalModule();
+ _ASSERTE(pLocalModule);
+
+ DomainLocalModule::PTR_DynamicClassInfo pLocalInfo = pLocalModule->GetDynamicClassInfoIfInitialized(dwDynamicClassDomainID);
+ if (pLocalInfo != NULL)
+ {
+ PTR_BYTE retval;
+ GET_DYNAMICENTRY_GCSTATICS_BASEPOINTER(pMT->GetLoaderAllocator(),
+ pLocalInfo,
+ &retval);
+
+ return retval;
+ }
+
+ // Tailcall to the slow helper
+ ENDFORBIDGC();
+ return HCCALL1(JIT_GetGenericsGCStaticBase_Framed, pMT);
+}
+HCIMPLEND
+#include <optdefault.h>
+
+/*********************************************************************/
+// Slow helper to tail call from the fast one
+NOINLINE HCIMPL1(void*, JIT_GetGenericsNonGCStaticBase_Framed, MethodTable *pMT)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(pMT->HasGenericsStaticsInfo());
+ } CONTRACTL_END;
+
+ void* base = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ pMT->CheckRestore();
+
+ // If pMT refers to a method table that requires some initialization work,
+ // then pMT cannot to a method table that is shared by generic instantiations,
+ // because method tables that are shared by generic instantiations do not have
+ // a base for statics to live in.
+ _ASSERTE(pMT->IsClassPreInited() || !pMT->IsSharedByGenericInstantiations());
+
+ pMT->CheckRunClassInitThrowing();
+
+ // We could just return null here instead of returning base when this helper is called just to trigger the cctor
+ base = (void*) pMT->GetNonGCStaticsBasePointer();
+
+ HELPER_METHOD_FRAME_END();
+
+ return base;
+}
+HCIMPLEND
+
+/*********************************************************************/
+#include <optsmallperfcritical.h>
+HCIMPL1(void*, JIT_GetGenericsNonGCStaticBase, MethodTable *pMT)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(pMT->HasGenericsStaticsInfo());
+ } CONTRACTL_END;
+
+ // This fast path will typically always be taken once the slow framed path below
+ // has executed once. Sometimes the slow path will be executed more than once,
+ // e.g. if static fields are accessed during the call to CheckRunClassInitThrowing()
+ // in the slow path.
+
+ DWORD dwDynamicClassDomainID;
+ PTR_Module pModuleForStatics = pMT->GetGenericsStaticsModuleAndID(&dwDynamicClassDomainID);
+
+ DomainLocalModule *pLocalModule = pModuleForStatics->GetDomainLocalModule();
+ _ASSERTE(pLocalModule);
+
+ DomainLocalModule::PTR_DynamicClassInfo pLocalInfo = pLocalModule->GetDynamicClassInfoIfInitialized(dwDynamicClassDomainID);
+ if (pLocalInfo != NULL)
+ {
+ PTR_BYTE retval;
+ GET_DYNAMICENTRY_NONGCSTATICS_BASEPOINTER(pMT->GetLoaderAllocator(),
+ pLocalInfo,
+ &retval);
+
+ return retval;
+ }
+
+ // Tailcall to the slow helper
+ ENDFORBIDGC();
+ return HCCALL1(JIT_GetGenericsNonGCStaticBase_Framed, pMT);
+}
+HCIMPLEND
+#include <optdefault.h>
+
+
+//========================================================================
+//
+// THREAD STATIC FIELD HELPERS
+//
+//========================================================================
+
+
+// *** These framed helpers get called if allocation needs to occur or
+// if the class constructor needs to run
+
+HCIMPL1(void*, JIT_GetNonGCThreadStaticBase_Helper, MethodTable * pMT)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pMT));
+ } CONTRACTL_END;
+
+ void* base = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ // For generics, we need to call CheckRestore() for some reason
+ if (pMT->HasGenericsStaticsInfo())
+ pMT->CheckRestore();
+
+ // Get the TLM
+ ThreadLocalModule * pThreadLocalModule = ThreadStatics::GetTLM(pMT);
+ _ASSERTE(pThreadLocalModule != NULL);
+
+ // Check if the class constructor needs to be run
+ pThreadLocalModule->CheckRunClassInitThrowing(pMT);
+
+ // Lookup the non-GC statics base pointer
+ base = (void*) pMT->GetNonGCThreadStaticsBasePointer();
+ CONSISTENCY_CHECK(base != NULL);
+
+ HELPER_METHOD_FRAME_END();
+
+ return base;
+}
+HCIMPLEND
+
+HCIMPL1(void*, JIT_GetGCThreadStaticBase_Helper, MethodTable * pMT)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pMT));
+ } CONTRACTL_END;
+
+ void* base = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ // For generics, we need to call CheckRestore() for some reason
+ if (pMT->HasGenericsStaticsInfo())
+ pMT->CheckRestore();
+
+ // Get the TLM
+ ThreadLocalModule * pThreadLocalModule = ThreadStatics::GetTLM(pMT);
+ _ASSERTE(pThreadLocalModule != NULL);
+
+ // Check if the class constructor needs to be run
+ pThreadLocalModule->CheckRunClassInitThrowing(pMT);
+
+ // Lookup the GC statics base pointer
+ base = (void*) pMT->GetGCThreadStaticsBasePointer();
+ CONSISTENCY_CHECK(base != NULL);
+
+ HELPER_METHOD_FRAME_END();
+
+ return base;
+}
+HCIMPLEND
+
+
+// *** This helper corresponds to both CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE and
+// CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_NOCTOR. Even though we always check
+// if the class constructor has been run, we have a separate helper ID for the "no ctor"
+// version because it allows the JIT to do some reordering that otherwise wouldn't be
+// possible.
+
+#include <optsmallperfcritical.h>
+HCIMPL2(void*, JIT_GetSharedNonGCThreadStaticBase, SIZE_T moduleDomainID, DWORD dwClassDomainID)
+{
+ FCALL_CONTRACT;
+
+ // Get the ModuleIndex
+ ModuleIndex index =
+ (Module::IsEncodedModuleIndex(moduleDomainID)) ?
+ Module::IDToIndex(moduleDomainID) :
+ ((DomainLocalModule *)moduleDomainID)->GetModuleIndex();
+
+ // Get the relevant ThreadLocalModule
+ ThreadLocalModule * pThreadLocalModule = ThreadStatics::GetTLMIfExists(index);
+
+ // If the TLM has been allocated and the class has been marked as initialized,
+ // get the pointer to the non-GC statics base and return
+ if (pThreadLocalModule != NULL && pThreadLocalModule->IsPrecomputedClassInitialized(dwClassDomainID))
+ return (void*)pThreadLocalModule->GetPrecomputedNonGCStaticsBasePointer();
+
+ // If the TLM was not allocated or if the class was not marked as initialized
+ // then we have to go through the slow path
+
+ // Get the DomainLocalModule
+ DomainLocalModule *pDomainLocalModule =
+ (Module::IsEncodedModuleIndex(moduleDomainID)) ?
+ GetAppDomain()->GetDomainLocalBlock()->GetModuleSlot(Module::IDToIndex(moduleDomainID)) :
+ (DomainLocalModule *) moduleDomainID;
+
+ // Obtain the MethodTable
+ MethodTable * pMT = pDomainLocalModule->GetMethodTableFromClassDomainID(dwClassDomainID);
+ _ASSERTE(!pMT->HasGenericsStaticsInfo());
+
+ ENDFORBIDGC();
+ return HCCALL1(JIT_GetNonGCThreadStaticBase_Helper, pMT);
+}
+HCIMPLEND
+#include <optdefault.h>
+
+// *** This helper corresponds to both CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE and
+// CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR. Even though we always check
+// if the class constructor has been run, we have a separate helper ID for the "no ctor"
+// version because it allows the JIT to do some reordering that otherwise wouldn't be
+// possible.
+
+#include <optsmallperfcritical.h>
+HCIMPL2(void*, JIT_GetSharedGCThreadStaticBase, SIZE_T moduleDomainID, DWORD dwClassDomainID)
+{
+ FCALL_CONTRACT;
+
+ // Get the ModuleIndex
+ ModuleIndex index =
+ (Module::IsEncodedModuleIndex(moduleDomainID)) ?
+ Module::IDToIndex(moduleDomainID) :
+ ((DomainLocalModule *)moduleDomainID)->GetModuleIndex();
+
+ // Get the relevant ThreadLocalModule
+ ThreadLocalModule * pThreadLocalModule = ThreadStatics::GetTLMIfExists(index);
+
+ // If the TLM has been allocated and the class has been marked as initialized,
+ // get the pointer to the GC statics base and return
+ if (pThreadLocalModule != NULL && pThreadLocalModule->IsPrecomputedClassInitialized(dwClassDomainID))
+ return (void*)pThreadLocalModule->GetPrecomputedGCStaticsBasePointer();
+
+ // If the TLM was not allocated or if the class was not marked as initialized
+ // then we have to go through the slow path
+
+ // Get the DomainLocalModule
+ DomainLocalModule *pDomainLocalModule =
+ (Module::IsEncodedModuleIndex(moduleDomainID)) ?
+ GetAppDomain()->GetDomainLocalBlock()->GetModuleSlot(Module::IDToIndex(moduleDomainID)) :
+ (DomainLocalModule *) moduleDomainID;
+
+ // Obtain the MethodTable
+ MethodTable * pMT = pDomainLocalModule->GetMethodTableFromClassDomainID(dwClassDomainID);
+ _ASSERTE(!pMT->HasGenericsStaticsInfo());
+
+ ENDFORBIDGC();
+ return HCCALL1(JIT_GetGCThreadStaticBase_Helper, pMT);
+}
+HCIMPLEND
+#include <optdefault.h>
+
+// *** This helper corresponds to CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_DYNAMICCLASS
+
+#include <optsmallperfcritical.h>
+HCIMPL2(void*, JIT_GetSharedNonGCThreadStaticBaseDynamicClass, SIZE_T moduleDomainID, DWORD dwDynamicClassDomainID)
+{
+ FCALL_CONTRACT;
+
+ // Get the ModuleIndex
+ ModuleIndex index =
+ (Module::IsEncodedModuleIndex(moduleDomainID)) ?
+ Module::IDToIndex(moduleDomainID) :
+ ((DomainLocalModule *)moduleDomainID)->GetModuleIndex();
+
+ // Get the relevant ThreadLocalModule
+ ThreadLocalModule * pThreadLocalModule = ThreadStatics::GetTLMIfExists(index);
+
+ // If the TLM has been allocated and the class has been marked as initialized,
+ // get the pointer to the non-GC statics base and return
+ if (pThreadLocalModule != NULL)
+ {
+ ThreadLocalModule::PTR_DynamicClassInfo pLocalInfo = pThreadLocalModule->GetDynamicClassInfoIfInitialized(dwDynamicClassDomainID);
+ if (pLocalInfo != NULL)
+ return (void*)pLocalInfo->m_pDynamicEntry->GetNonGCStaticsBasePointer();
+ }
+
+ // If the TLM was not allocated or if the class was not marked as initialized
+ // then we have to go through the slow path
+
+ // Obtain the DomainLocalModule
+ DomainLocalModule *pDomainLocalModule =
+ (Module::IsEncodedModuleIndex(moduleDomainID)) ?
+ GetAppDomain()->GetDomainLocalBlock()->GetModuleSlot(Module::IDToIndex(moduleDomainID)) :
+ (DomainLocalModule *) moduleDomainID;
+
+ // Obtain the Module
+ Module * pModule = pDomainLocalModule->GetDomainFile()->GetModule();
+
+ // Obtain the MethodTable
+ MethodTable * pMT = pModule->GetDynamicClassMT(dwDynamicClassDomainID);
+ _ASSERTE(pMT != NULL);
+ _ASSERTE(!pMT->IsSharedByGenericInstantiations());
+
+ // Tailcall to the slow helper
+ ENDFORBIDGC();
+
+ return HCCALL1(JIT_GetNonGCThreadStaticBase_Helper, pMT);
+
+}
+HCIMPLEND
+#include <optdefault.h>
+
+// *** This helper corresponds to CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_DYNAMICCLASS
+
+#include <optsmallperfcritical.h>
+HCIMPL2(void*, JIT_GetSharedGCThreadStaticBaseDynamicClass, SIZE_T moduleDomainID, DWORD dwDynamicClassDomainID)
+{
+ FCALL_CONTRACT;
+
+ // Get the ModuleIndex
+ ModuleIndex index =
+ (Module::IsEncodedModuleIndex(moduleDomainID)) ?
+ Module::IDToIndex(moduleDomainID) :
+ ((DomainLocalModule *)moduleDomainID)->GetModuleIndex();
+
+ // Get the relevant ThreadLocalModule
+ ThreadLocalModule * pThreadLocalModule = ThreadStatics::GetTLMIfExists(index);
+
+ // If the TLM has been allocated and the class has been marked as initialized,
+ // get the pointer to the GC statics base and return
+ if (pThreadLocalModule != NULL)
+ {
+ ThreadLocalModule::PTR_DynamicClassInfo pLocalInfo = pThreadLocalModule->GetDynamicClassInfoIfInitialized(dwDynamicClassDomainID);
+ if (pLocalInfo != NULL)
+ return (void*)pLocalInfo->m_pDynamicEntry->GetGCStaticsBasePointer();
+ }
+
+ // If the TLM was not allocated or if the class was not marked as initialized
+ // then we have to go through the slow path
+
+ // Obtain the DomainLocalModule
+ DomainLocalModule *pDomainLocalModule =
+ (Module::IsEncodedModuleIndex(moduleDomainID)) ?
+ GetAppDomain()->GetDomainLocalBlock()->GetModuleSlot(Module::IDToIndex(moduleDomainID)) :
+ (DomainLocalModule *) moduleDomainID;
+
+ // Obtain the Module
+ Module * pModule = pDomainLocalModule->GetDomainFile()->GetModule();
+
+ // Obtain the MethodTable
+ MethodTable * pMT = pModule->GetDynamicClassMT(dwDynamicClassDomainID);
+ _ASSERTE(pMT != NULL);
+ _ASSERTE(!pMT->IsSharedByGenericInstantiations());
+
+ // Tailcall to the slow helper
+ ENDFORBIDGC();
+ return HCCALL1(JIT_GetGCThreadStaticBase_Helper, pMT);
+}
+HCIMPLEND
+#include <optdefault.h>
+
+// *** This helper corresponds to CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE
+
+#include <optsmallperfcritical.h>
+HCIMPL1(void*, JIT_GetGenericsNonGCThreadStaticBase, MethodTable *pMT)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(pMT->HasGenericsStaticsInfo());
+ } CONTRACTL_END;
+
+ // This fast path will typically always be taken once the slow framed path below
+ // has executed once. Sometimes the slow path will be executed more than once,
+ // e.g. if static fields are accessed during the call to CheckRunClassInitThrowing()
+ // in the slow path.
+
+ // Get the Module and dynamic class ID
+ DWORD dwDynamicClassDomainID;
+ PTR_Module pModule = pMT->GetGenericsStaticsModuleAndID(&dwDynamicClassDomainID);
+
+ // Get ModuleIndex
+ ModuleIndex index = pModule->GetModuleIndex();
+
+ // Get the relevant ThreadLocalModule
+ ThreadLocalModule * pThreadLocalModule = ThreadStatics::GetTLMIfExists(index);
+
+ // If the TLM has been allocated and the class has been marked as initialized,
+ // get the pointer to the non-GC statics base and return
+ if (pThreadLocalModule != NULL)
+ {
+ ThreadLocalModule::PTR_DynamicClassInfo pLocalInfo = pThreadLocalModule->GetDynamicClassInfoIfInitialized(dwDynamicClassDomainID);
+ if (pLocalInfo != NULL)
+ return (void*)pLocalInfo->m_pDynamicEntry->GetNonGCStaticsBasePointer();
+ }
+
+ // If the TLM was not allocated or if the class was not marked as initialized
+ // then we have to go through the slow path
+
+ // Tailcall to the slow helper
+ ENDFORBIDGC();
+ return HCCALL1(JIT_GetNonGCThreadStaticBase_Helper, pMT);
+}
+HCIMPLEND
+#include <optdefault.h>
+
+// *** This helper corresponds to CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE
+
+#include <optsmallperfcritical.h>
+HCIMPL1(void*, JIT_GetGenericsGCThreadStaticBase, MethodTable *pMT)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(pMT->HasGenericsStaticsInfo());
+ } CONTRACTL_END;
+
+ // This fast path will typically always be taken once the slow framed path below
+ // has executed once. Sometimes the slow path will be executed more than once,
+ // e.g. if static fields are accessed during the call to CheckRunClassInitThrowing()
+ // in the slow path.
+
+ // Get the Module and dynamic class ID
+ DWORD dwDynamicClassDomainID;
+ PTR_Module pModule = pMT->GetGenericsStaticsModuleAndID(&dwDynamicClassDomainID);
+
+ // Get ModuleIndex
+ ModuleIndex index = pModule->GetModuleIndex();
+
+ // Get the relevant ThreadLocalModule
+ ThreadLocalModule * pThreadLocalModule = ThreadStatics::GetTLMIfExists(index);
+
+ // If the TLM has been allocated and the class has been marked as initialized,
+ // get the pointer to the GC statics base and return
+ if (pThreadLocalModule != NULL)
+ {
+ ThreadLocalModule::PTR_DynamicClassInfo pLocalInfo = pThreadLocalModule->GetDynamicClassInfoIfInitialized(dwDynamicClassDomainID);
+ if (pLocalInfo != NULL)
+ return (void*)pLocalInfo->m_pDynamicEntry->GetGCStaticsBasePointer();
+ }
+
+ // If the TLM was not allocated or if the class was not marked as initialized
+ // then we have to go through the slow path
+
+ // Tailcall to the slow helper
+ ENDFORBIDGC();
+ return HCCALL1(JIT_GetGCThreadStaticBase_Helper, pMT);
+}
+HCIMPLEND
+#include <optdefault.h>
+
+//========================================================================
+//
+// STATIC FIELD DYNAMIC HELPERS
+//
+//========================================================================
+
+#include <optsmallperfcritical.h>
+HCIMPL1_RAW(TADDR, JIT_StaticFieldAddress_Dynamic, StaticFieldAddressArgs * pArgs)
+{
+ FCALL_CONTRACT;
+
+ TADDR base = HCCALL2(pArgs->staticBaseHelper, pArgs->arg0, pArgs->arg1);
+ return base + pArgs->offset;
+}
+HCIMPLEND_RAW
+#include <optdefault.h>
+
+#include <optsmallperfcritical.h>
+HCIMPL1_RAW(TADDR, JIT_StaticFieldAddressUnbox_Dynamic, StaticFieldAddressArgs * pArgs)
+{
+ FCALL_CONTRACT;
+
+ TADDR base = HCCALL2(pArgs->staticBaseHelper, pArgs->arg0, pArgs->arg1);
+ return *(TADDR *)(base + pArgs->offset) + Object::GetOffsetOfFirstField();
+}
+HCIMPLEND_RAW
+#include <optdefault.h>
+
+//========================================================================
+//
+// CASTING HELPERS
+//
+//========================================================================
+
+// pObject MUST be an instance of an array.
+TypeHandle::CastResult ArrayIsInstanceOfNoGC(Object *pObject, TypeHandle toTypeHnd)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pObject));
+ PRECONDITION(pObject->GetMethodTable()->IsArray());
+ PRECONDITION(toTypeHnd.IsArray());
+ } CONTRACTL_END;
+
+ ArrayBase *pArray = (ArrayBase*) pObject;
+ ArrayTypeDesc *toArrayType = toTypeHnd.AsArray();
+
+ // GetRank touches EEClass. Try to avoid it for SZArrays.
+ if (toArrayType->GetInternalCorElementType() == ELEMENT_TYPE_SZARRAY)
+ {
+ if (pArray->GetMethodTable()->IsMultiDimArray())
+ return TypeHandle::CannotCast;
+ }
+ else
+ {
+ if (pArray->GetRank() != toArrayType->GetRank())
+ return TypeHandle::CannotCast;
+ }
+ _ASSERTE(pArray->GetRank() == toArrayType->GetRank());
+
+ // ArrayBase::GetTypeHandle consults the loader tables to find the
+ // exact type handle for an array object. This can be disproportionately slow - but after
+ // all, why should we need to go looking up hash tables just to do a cast test?
+ //
+ // Thus we can always special-case the casting logic to avoid fetching this
+ // exact type handle. Here we have only done so for one
+ // particular case, i.e. when we are trying to cast to an array type where
+ // there is an exact match between the rank, kind and element type of the two
+ // array types. This happens when, for example, assigning an int32[] into an int32[][].
+ //
+
+ TypeHandle elementTypeHandle = pArray->GetArrayElementTypeHandle();
+ TypeHandle toElementTypeHandle = toArrayType->GetArrayElementTypeHandle();
+
+ if (elementTypeHandle == toElementTypeHandle)
+ return TypeHandle::CanCast;
+
+ // By this point we know that toArrayType->GetInternalCorElementType matches the element type of the Array object
+ // so we can use a faster constructor to create the TypeDesc. (It so happens that ArrayTypeDescs derives from ParamTypeDesc
+ // and can be created as identical in a slightly faster way with the following set of parameters.)
+ ParamTypeDesc arrayType(toArrayType->GetInternalCorElementType(), pArray->GetMethodTable(), elementTypeHandle);
+ return arrayType.CanCastToNoGC(toTypeHnd);
+}
+
+// pObject MUST be an instance of an array.
+TypeHandle::CastResult ArrayObjSupportsBizarreInterfaceNoGC(Object *pObject, MethodTable * pInterfaceMT)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pObject));
+ PRECONDITION(pObject->GetMethodTable()->IsArray());
+ PRECONDITION(pInterfaceMT->IsInterface());
+ } CONTRACTL_END;
+
+ ArrayBase *pArray = (ArrayBase*) pObject;
+
+ // IList<T> & IReadOnlyList<T> only supported for SZ_ARRAYS
+ if (pArray->GetMethodTable()->IsMultiDimArray())
+ return TypeHandle::CannotCast;
+
+ if (pInterfaceMT->GetLoadLevel() < CLASS_DEPENDENCIES_LOADED)
+ {
+ if (!pInterfaceMT->HasInstantiation())
+ return TypeHandle::CannotCast;
+ // The slow path will take care of restoring the interface
+ return TypeHandle::MaybeCast;
+ }
+
+ if (!IsImplicitInterfaceOfSZArray(pInterfaceMT))
+ return TypeHandle::CannotCast;
+
+ return TypeDesc::CanCastParamNoGC(pArray->GetArrayElementTypeHandle(), pInterfaceMT->GetInstantiation()[0]);
+}
+
+TypeHandle::CastResult STDCALL ObjIsInstanceOfNoGC(Object *pObject, TypeHandle toTypeHnd)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pObject));
+ } CONTRACTL_END;
+
+
+ MethodTable *pMT = pObject->GetMethodTable();
+
+ // Quick exact match first
+ if (TypeHandle(pMT) == toTypeHnd)
+ return TypeHandle::CanCast;
+
+ if (pMT->IsTransparentProxy() ||
+ pMT->IsComObjectType() && toTypeHnd.IsInterface())
+ return TypeHandle::MaybeCast;
+
+ if (pMT->IsArray())
+ {
+ if (toTypeHnd.IsArray())
+ return ArrayIsInstanceOfNoGC(pObject, toTypeHnd);
+
+ if (toTypeHnd.IsInterface())
+ {
+ MethodTable * pInterfaceMT = toTypeHnd.AsMethodTable();
+ if (pInterfaceMT->HasInstantiation())
+ return ArrayObjSupportsBizarreInterfaceNoGC(pObject, pInterfaceMT);
+ return pMT->ImplementsInterface(pInterfaceMT) ? TypeHandle::CanCast : TypeHandle::CannotCast;
+ }
+
+ if (toTypeHnd == TypeHandle(g_pObjectClass) || toTypeHnd == TypeHandle(g_pArrayClass))
+ return TypeHandle::CanCast;
+
+ return TypeHandle::CannotCast;
+ }
+
+ if (toTypeHnd.IsTypeDesc())
+ return TypeHandle::CannotCast;
+
+ // allow an object of type T to be cast to Nullable<T> (they have the same representation)
+ if (Nullable::IsNullableForTypeNoGC(toTypeHnd, pMT))
+ return TypeHandle::CanCast;
+
+ return pMT->CanCastToClassOrInterfaceNoGC(toTypeHnd.AsMethodTable());
+}
+
+BOOL ObjIsInstanceOf(Object *pObject, TypeHandle toTypeHnd)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pObject));
+ } CONTRACTL_END;
+
+ BOOL fCast = FALSE;
+
+ OBJECTREF obj = ObjectToOBJECTREF(pObject);
+
+ GCPROTECT_BEGIN(obj);
+
+ TypeHandle fromTypeHnd = obj->GetTypeHandle();
+
+ // If we are trying to cast a proxy we need to delegate to remoting
+ // services which will determine whether the proxy and the type are compatible.
+#ifdef FEATURE_REMOTING
+ if (fromTypeHnd.IsTransparentProxy())
+ {
+ fCast = CRemotingServices::CheckCast(obj, toTypeHnd);
+ }
+ else
+#endif
+ // Start by doing a quick static cast check to see if the type information captured in
+ // the metadata indicates that the cast is legal.
+ if (fromTypeHnd.CanCastTo(toTypeHnd))
+ {
+ fCast = TRUE;
+ }
+ else
+#ifdef FEATURE_COMINTEROP
+ // If we are casting a COM object from interface then we need to do a check to see
+ // if it implements the interface.
+ if (toTypeHnd.IsInterface() && fromTypeHnd.GetMethodTable()->IsComObjectType())
+ {
+ fCast = ComObject::SupportsInterface(obj, toTypeHnd.AsMethodTable());
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ if (Nullable::IsNullableForType(toTypeHnd, obj->GetMethodTable()))
+ {
+ // allow an object of type T to be cast to Nullable<T> (they have the same representation)
+ fCast = TRUE;
+ }
+
+ GCPROTECT_END();
+
+ return(fCast);
+}
+
+//
+// This optimization is intended for all non-framed casting helpers
+//
+
+#include <optsmallperfcritical.h>
+
+HCIMPL2(Object*, JIT_ChkCastClass_Portable, MethodTable* pTargetMT, Object* pObject)
+{
+ FCALL_CONTRACT;
+
+ //
+ // casts pObject to type pMT
+ //
+
+ if (NULL == pObject)
+ {
+ return NULL;
+ }
+
+ PTR_VOID pMT = pObject->GetMethodTable();
+
+ do {
+ if (pMT == pTargetMT)
+ return pObject;
+
+ pMT = MethodTable::GetParentMethodTableOrIndirection(pMT);
+ } while (pMT);
+
+ ENDFORBIDGC();
+ return HCCALL2(JITutil_ChkCastAny, CORINFO_CLASS_HANDLE(pTargetMT), pObject);
+}
+HCIMPLEND
+
+//
+// This helper assumes that the check for the trivial cases has been inlined by the JIT.
+//
+HCIMPL2(Object*, JIT_ChkCastClassSpecial_Portable, MethodTable* pTargetMT, Object* pObject)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ // This assumes that the check for the trivial cases has been inlined by the JIT.
+ PRECONDITION(pObject != NULL);
+ PRECONDITION(pObject->GetMethodTable() != pTargetMT);
+ } CONTRACTL_END;
+
+ PTR_VOID pMT = MethodTable::GetParentMethodTableOrIndirection(pObject->GetMethodTable());
+
+ while (pMT)
+ {
+ if (pMT == pTargetMT)
+ return pObject;
+
+ pMT = MethodTable::GetParentMethodTableOrIndirection(pMT);
+ }
+
+ ENDFORBIDGC();
+ return HCCALL2(JITutil_ChkCastAny, CORINFO_CLASS_HANDLE(pTargetMT), pObject);
+}
+HCIMPLEND
+
+HCIMPL2(Object*, JIT_IsInstanceOfClass_Portable, MethodTable* pTargetMT, Object* pObject)
+{
+ FCALL_CONTRACT;
+
+ //
+ // casts pObject to type pMT
+ //
+
+ if (NULL == pObject)
+ {
+ return NULL;
+ }
+
+ PTR_VOID pMT = pObject->GetMethodTable();
+
+ do {
+ if (pMT == pTargetMT)
+ return pObject;
+
+ pMT = MethodTable::GetParentMethodTableOrIndirection(pMT);
+ } while (pMT);
+
+ if (!pObject->GetMethodTable()->HasTypeEquivalence())
+ {
+ return NULL;
+ }
+
+ ENDFORBIDGC();
+ return HCCALL2(JITutil_IsInstanceOfAny, CORINFO_CLASS_HANDLE(pTargetMT), pObject);
+}
+HCIMPLEND
+
+HCIMPL2(Object*, JIT_ChkCastInterface_Portable, MethodTable *pInterfaceMT, Object* pObject)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(pInterfaceMT->IsInterface());
+ } CONTRACTL_END;
+
+ if (NULL == pObject)
+ {
+ return pObject;
+ }
+
+ if (pObject->GetMethodTable()->ImplementsInterfaceInline(pInterfaceMT))
+ {
+ return pObject;
+ }
+
+ ENDFORBIDGC();
+ return HCCALL2(JITutil_ChkCastInterface, pInterfaceMT, pObject);
+}
+HCIMPLEND
+
+HCIMPL2(Object*, JIT_IsInstanceOfInterface_Portable, MethodTable *pInterfaceMT, Object* pObject)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(pInterfaceMT->IsInterface());
+ } CONTRACTL_END;
+
+ if (NULL == pObject)
+ {
+ return NULL;
+ }
+
+ if (pObject->GetMethodTable()->ImplementsInterfaceInline(pInterfaceMT))
+ {
+ return pObject;
+ }
+
+ if (!pObject->GetMethodTable()->InstanceRequiresNonTrivialInterfaceCast())
+ {
+ return NULL;
+ }
+
+ ENDFORBIDGC();
+ return HCCALL2(JITutil_IsInstanceOfInterface, pInterfaceMT, pObject);
+}
+HCIMPLEND
+
+HCIMPL2(Object *, JIT_ChkCastArray, CORINFO_CLASS_HANDLE type, Object *pObject)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(TypeHandle(type).IsArray());
+ } CONTRACTL_END;
+
+ if (pObject == NULL)
+ {
+ return NULL;
+ }
+
+ OBJECTREF refObj = ObjectToOBJECTREF(pObject);
+ VALIDATEOBJECTREF(refObj);
+
+ TypeHandle::CastResult result = refObj->GetMethodTable()->IsArray() ?
+ ArrayIsInstanceOfNoGC(pObject, TypeHandle(type)) : TypeHandle::CannotCast;
+
+ if (result == TypeHandle::CanCast)
+ {
+ return pObject;
+ }
+
+ ENDFORBIDGC();
+ Object* pRet = HCCALL2(JITutil_ChkCastAny, type, pObject);
+ // Make sure that the fast helper have not lied
+ _ASSERTE(result != TypeHandle::CannotCast);
+ return pRet;
+}
+HCIMPLEND
+
+
+HCIMPL2(Object *, JIT_IsInstanceOfArray, CORINFO_CLASS_HANDLE type, Object *pObject)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(TypeHandle(type).IsArray());
+ } CONTRACTL_END;
+
+ if (pObject == NULL)
+ {
+ return NULL;
+ }
+
+ OBJECTREF refObj = ObjectToOBJECTREF(pObject);
+ VALIDATEOBJECTREF(refObj);
+ MethodTable *pMT = refObj->GetMethodTable();
+
+ if (!pMT->IsArray())
+ {
+ // We know that the clsHnd is an array so check the object. If it is not an array return null
+ return NULL;
+ }
+ else
+ {
+ switch (ArrayIsInstanceOfNoGC(pObject, TypeHandle(type))) {
+ case TypeHandle::CanCast:
+ return pObject;
+ case TypeHandle::CannotCast:
+ return NULL;
+ default:
+ // fall through to the slow helper
+ break;
+ }
+ }
+
+ ENDFORBIDGC();
+ return HCCALL2(JITutil_IsInstanceOfAny, type, pObject);
+}
+HCIMPLEND
+
+/*********************************************************************/
+// IsInstanceOf test used for unusual cases (naked type parameters, variant generic types)
+// Unlike the IsInstanceOfInterface, IsInstanceOfClass, and IsIsntanceofArray functions,
+// this test must deal with all kinds of type tests
+HCIMPL2(Object *, JIT_IsInstanceOfAny, CORINFO_CLASS_HANDLE type, Object* obj)
+{
+ FCALL_CONTRACT;
+
+ if (NULL == obj)
+ {
+ return NULL;
+ }
+
+ switch (ObjIsInstanceOfNoGC(obj, TypeHandle(type))) {
+ case TypeHandle::CanCast:
+ return obj;
+ case TypeHandle::CannotCast:
+ return NULL;
+ default:
+ // fall through to the slow helper
+ break;
+ }
+
+ ENDFORBIDGC();
+ return HCCALL2(JITutil_IsInstanceOfAny, type, obj);
+}
+HCIMPLEND
+
+// ChkCast test used for unusual cases (naked type parameters, variant generic types)
+// Unlike the ChkCastInterface, ChkCastClass, and ChkCastArray functions,
+// this test must deal with all kinds of type tests
+HCIMPL2(Object *, JIT_ChkCastAny, CORINFO_CLASS_HANDLE type, Object *obj)
+{
+ FCALL_CONTRACT;
+
+ if (NULL == obj)
+ {
+ return NULL;
+ }
+
+ TypeHandle::CastResult result = ObjIsInstanceOfNoGC(obj, TypeHandle(type));
+
+ if (result == TypeHandle::CanCast)
+ {
+ return obj;
+ }
+
+ ENDFORBIDGC();
+ Object* pRet = HCCALL2(JITutil_ChkCastAny, type, obj);
+ // Make sure that the fast helper have not lied
+ _ASSERTE(result != TypeHandle::CannotCast);
+ return pRet;
+}
+HCIMPLEND
+
+
+NOINLINE HCIMPL2(Object *, JITutil_IsInstanceOfInterface, MethodTable *pInterfaceMT, Object* obj)
+{
+ FCALL_CONTRACT;
+
+ if (obj->GetMethodTable()->IsArray())
+ {
+ switch (ArrayObjSupportsBizarreInterfaceNoGC(obj, pInterfaceMT)) {
+ case TypeHandle::CanCast:
+ return obj;
+ case TypeHandle::CannotCast:
+ return NULL;
+ default:
+ // fall through to the slow helper
+ break;
+ }
+ }
+
+ ENDFORBIDGC();
+ return HCCALL2(JITutil_IsInstanceOfAny, CORINFO_CLASS_HANDLE(pInterfaceMT), obj);
+
+}
+HCIMPLEND
+
+NOINLINE HCIMPL2(Object *, JITutil_ChkCastInterface, MethodTable *pInterfaceMT, Object *obj)
+{
+ FCALL_CONTRACT;
+
+ if (obj->GetMethodTable()->IsArray())
+ {
+ if (ArrayObjSupportsBizarreInterfaceNoGC(obj, pInterfaceMT) == TypeHandle::CanCast)
+ {
+ return obj;
+ }
+ }
+
+ ENDFORBIDGC();
+ return HCCALL2(JITutil_ChkCastAny, CORINFO_CLASS_HANDLE(pInterfaceMT), obj);
+}
+HCIMPLEND
+
+
+#include <optdefault.h>
+
+
+//
+// Framed helpers
+//
+NOINLINE HCIMPL2(Object *, JITutil_ChkCastAny, CORINFO_CLASS_HANDLE type, Object *obj)
+{
+ FCALL_CONTRACT;
+
+ // This case should be handled by frameless helper
+ _ASSERTE(obj != NULL);
+
+ OBJECTREF oref = ObjectToOBJECTREF (obj);
+ VALIDATEOBJECTREF(oref);
+
+ TypeHandle clsHnd(type);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(oref);
+ if (!ObjIsInstanceOf(OBJECTREFToObject(oref), clsHnd))
+ COMPlusThrowInvalidCastException(&oref, clsHnd);
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(oref);
+}
+HCIMPLEND
+
+NOINLINE HCIMPL2(Object *, JITutil_IsInstanceOfAny, CORINFO_CLASS_HANDLE type, Object *obj)
+{
+ FCALL_CONTRACT;
+
+ // This case should be handled by frameless helper
+ _ASSERTE(obj != NULL);
+
+ OBJECTREF oref = ObjectToOBJECTREF (obj);
+ VALIDATEOBJECTREF(oref);
+
+ TypeHandle clsHnd(type);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(oref);
+ if (!ObjIsInstanceOf(OBJECTREFToObject(oref), clsHnd))
+ oref = NULL;
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(oref);
+}
+HCIMPLEND
+
+
+
+//========================================================================
+//
+// ALLOCATION HELPERS
+//
+//========================================================================
+
+/*************************************************************/
+HCIMPL1(Object*, JIT_New, CORINFO_CLASS_HANDLE typeHnd_)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF newobj = NULL;
+ HELPER_METHOD_FRAME_BEGIN_RET_0(); // Set up a frame
+
+ TypeHandle typeHnd(typeHnd_);
+
+ _ASSERTE(!typeHnd.IsTypeDesc()); // we never use this helper for arrays
+ MethodTable *pMT = typeHnd.AsMethodTable();
+ _ASSERTE(pMT->IsRestored_NoLogging());
+
+#ifdef _DEBUG
+ if (g_pConfig->FastGCStressLevel()) {
+ GetThread()->DisableStressHeap();
+ }
+#endif // _DEBUG
+
+ newobj = AllocateObject(pMT);
+
+ HELPER_METHOD_FRAME_END();
+ return(OBJECTREFToObject(newobj));
+}
+HCIMPLEND
+
+#ifdef FEATURE_REMOTING
+/*************************************************************/
+HCIMPL1(Object*, JIT_NewCrossContext_Portable, CORINFO_CLASS_HANDLE typeHnd_)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF newobj = NULL;
+ HELPER_METHOD_FRAME_BEGIN_RET_0(); // Set up a frame
+
+ TypeHandle typeHnd(typeHnd_);
+
+ _ASSERTE(!typeHnd.IsTypeDesc()); // we never use this helper for arrays
+ MethodTable *pMT = typeHnd.AsMethodTable();
+
+ // Don't bother to restore the method table; assume that the prestub of the
+ // constructor will do that check.
+
+#ifdef _DEBUG
+ if (g_pConfig->FastGCStressLevel()) {
+ GetThread()->DisableStressHeap();
+ }
+#endif // _DEBUG
+
+ if (CRemotingServices::RequiresManagedActivation(typeHnd))
+ {
+ if (pMT->IsComObjectType())
+ {
+ newobj = AllocateObject(pMT);
+ }
+ else
+ {
+ // Remoting services determines if the current context is appropriate
+ // for activation. If the current context is OK then it creates an object
+ // else it creates a proxy.
+ newobj = CRemotingServices::CreateProxyOrObject(pMT);
+ }
+ }
+ else
+ {
+ newobj = AllocateObject(pMT);
+ }
+
+ HELPER_METHOD_FRAME_END();
+ return(OBJECTREFToObject(newobj));
+}
+HCIMPLEND
+#endif // FEATURE_REMOTING
+
+
+//========================================================================
+//
+// STRING HELPERS
+//
+//========================================================================
+
+/*********************************************************************/
+/* We don't use HCIMPL macros because this is not a real helper call */
+/* This function just needs mangled arguments like a helper call */
+
+HCIMPL1_RAW(StringObject*, UnframedAllocateString, DWORD stringLength)
+{
+ // This isn't _really_ an FCALL and therefore shouldn't have the
+ // SO_TOLERANT part of the FCALL_CONTRACT b/c it is not entered
+ // from managed code.
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_INTOLERANT;
+ } CONTRACTL_END;
+
+ STRINGREF result;
+ result = SlowAllocateString(stringLength);
+
+ return((StringObject*) OBJECTREFToObject(result));
+}
+HCIMPLEND_RAW
+
+HCIMPL1(StringObject*, FramedAllocateString, DWORD stringLength)
+{
+ FCALL_CONTRACT;
+
+ STRINGREF result = NULL;
+ HELPER_METHOD_FRAME_BEGIN_RET_0(); // Set up a frame
+
+ result = SlowAllocateString(stringLength);
+
+ HELPER_METHOD_FRAME_END();
+ return((StringObject*) OBJECTREFToObject(result));
+}
+HCIMPLEND
+
+/*********************************************************************/
+OBJECTHANDLE ConstructStringLiteral(CORINFO_MODULE_HANDLE scopeHnd, mdToken metaTok)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ _ASSERTE(TypeFromToken(metaTok) == mdtString);
+
+ Module* module = GetModule(scopeHnd);
+
+
+ // If our module is ngenned and we're calling this API, it means that we're not going through
+ // the fixup mechanism for strings. This can happen 2 ways:
+ //
+ // a) Lazy string object construction: This happens when JIT decides that initizalizing a
+ // string via fixup on method entry is very expensive. This is normally done for strings
+ // that appear in rarely executed blocks, such as throw blocks.
+ //
+ // b) The ngen image isn't complete (it's missing classes), therefore we're jitting methods.
+ //
+ // If we went ahead and called ResolveStringRef directly, we would be breaking the per module
+ // interning we're guaranteeing, so we will have to detect the case and handle it appropiately.
+#ifdef FEATURE_PREJIT
+ if (module->HasNativeImage() && module->IsNoStringInterning())
+ {
+ return module->ResolveStringRef(metaTok, module->GetAssembly()->Parent(), true);
+ }
+#endif
+ return module->ResolveStringRef(metaTok, module->GetAssembly()->Parent(), false);
+}
+
+/*********************************************************************/
+HCIMPL2(Object *, JIT_StrCns, unsigned rid, CORINFO_MODULE_HANDLE scopeHnd)
+{
+ FCALL_CONTRACT;
+
+ OBJECTHANDLE hndStr = 0;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ // Retrieve the handle to the COM+ string object.
+ hndStr = ConstructStringLiteral(scopeHnd, RidToToken(rid, mdtString));
+ HELPER_METHOD_FRAME_END();
+
+ // Don't use ObjectFromHandle; this isn't a real handle
+ return *(Object**)hndStr;
+}
+HCIMPLEND
+
+
+
+//========================================================================
+//
+// ARRAY HELPERS
+//
+//========================================================================
+
+/*************************************************************/
+HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF newArray = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0(); // Set up a frame
+
+ TypeHandle typeHnd(arrayTypeHnd_);
+
+ _ASSERTE(typeHnd.GetInternalCorElementType() == ELEMENT_TYPE_SZARRAY);
+ typeHnd.CheckRestore();
+ ArrayTypeDesc* pArrayClassRef = typeHnd.AsArray();
+
+ if (size < 0)
+ COMPlusThrow(kOverflowException);
+
+#ifdef _WIN64
+ // Even though ECMA allows using a native int as the argument to newarr instruction
+ // (therefore size is INT_PTR), ArrayBase::m_NumComponents is 32-bit, so even on 64-bit
+ // platforms we can't create an array whose size exceeds 32 bits.
+ if (size > INT_MAX)
+ EX_THROW(EEMessageException, (kOverflowException, IDS_EE_ARRAY_DIMENSIONS_EXCEEDED));
+#endif
+
+ //
+ // is this a primitive type?
+ //
+
+ CorElementType elemType = pArrayClassRef->GetArrayElementTypeHandle().GetSignatureCorElementType();
+
+ if (CorTypeInfo::IsPrimitiveType(elemType)
+#ifdef FEATURE_64BIT_ALIGNMENT
+ // On platforms where 64-bit types require 64-bit alignment and don't obtain it naturally force us
+ // through the slow path where this will be handled.
+ && (elemType != ELEMENT_TYPE_I8)
+ && (elemType != ELEMENT_TYPE_U8)
+ && (elemType != ELEMENT_TYPE_R8)
+#endif
+ ) {
+#ifdef _DEBUG
+ if (g_pConfig->FastGCStressLevel()) {
+ GetThread()->DisableStressHeap();
+ }
+#endif // _DEBUG
+
+ // Disallow the creation of void[] (an array of System.Void)
+ if (elemType == ELEMENT_TYPE_VOID)
+ COMPlusThrow(kArgumentException);
+
+ BOOL bAllocateInLargeHeap = FALSE;
+#ifdef FEATURE_DOUBLE_ALIGNMENT_HINT
+ if ((elemType == ELEMENT_TYPE_R8) &&
+ (static_cast<DWORD>(size) >= g_pConfig->GetDoubleArrayToLargeObjectHeapThreshold()))
+ {
+ STRESS_LOG1(LF_GC, LL_INFO10, "Allocating double array of size %d to large object heap\n", size);
+ bAllocateInLargeHeap = TRUE;
+ }
+#endif
+
+ if (g_pPredefinedArrayTypes[elemType] == NULL)
+ g_pPredefinedArrayTypes[elemType] = pArrayClassRef;
+
+ newArray = FastAllocatePrimitiveArray(pArrayClassRef->GetMethodTable(), static_cast<DWORD>(size), bAllocateInLargeHeap);
+ }
+ else
+ {
+#ifdef _DEBUG
+ if (g_pConfig->FastGCStressLevel()) {
+ GetThread()->DisableStressHeap();
+ }
+#endif // _DEBUG
+ INT32 size32 = (INT32)size;
+ newArray = AllocateArrayEx(typeHnd, &size32, 1);
+ }
+
+ HELPER_METHOD_FRAME_END();
+
+ return(OBJECTREFToObject(newArray));
+}
+HCIMPLEND
+
+/*********************************************************************
+// Allocate a multi-dimensional array
+*/
+OBJECTREF allocNewMDArr(TypeHandle typeHnd, unsigned dwNumArgs, PVOID args)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(dwNumArgs > 0);
+ } CONTRACTL_END;
+
+ // Get the arguments in the right order
+
+ INT32* fwdArgList;
+
+#ifdef _TARGET_X86_
+ fwdArgList = (INT32*)args;
+
+ // reverse the order
+ INT32* p = fwdArgList;
+ INT32* q = fwdArgList + (dwNumArgs-1);
+ while (p < q)
+ {
+ INT32 t = *p; *p = *q; *q = t;
+ p++; q--;
+ }
+#elif defined(_WIN64)
+ INT64* pArgs = (INT64 *)args;
+ // create an array where fwdArgList[0] == arg[0] ...
+ fwdArgList = (INT32*) _alloca(dwNumArgs * sizeof(INT32));
+ for (unsigned i = 0; i <dwNumArgs; i++)
+ {
+ fwdArgList[i] = (INT32)*(pArgs + i);
+ }
+#else
+ fwdArgList = (INT32*) args;
+#endif
+
+ return AllocateArrayEx(typeHnd, fwdArgList, dwNumArgs);
+}
+
+/*********************************************************************
+// Allocate a multi-dimensional array with lower bounds specified.
+// The caller pushes both sizes AND/OR bounds for every dimension
+*/
+
+HCIMPL2VA(Object*, JIT_NewMDArr, CORINFO_CLASS_HANDLE classHnd, unsigned dwNumArgs)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF ret = 0;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(ret); // Set up a frame
+
+ TypeHandle typeHnd(classHnd);
+ typeHnd.CheckRestore();
+ _ASSERTE(typeHnd.GetMethodTable()->IsArray());
+
+ va_list dimsAndBounds;
+ va_start(dimsAndBounds, dwNumArgs);
+
+ ret = allocNewMDArr(typeHnd, dwNumArgs, (PVOID)dimsAndBounds);
+ va_end(dimsAndBounds);
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(ret);
+}
+HCIMPLEND
+
+/*************************************************************/
+/* returns '&array[idx], after doing all the proper checks */
+
+#include <optsmallperfcritical.h>
+HCIMPL3(void*, JIT_Ldelema_Ref, PtrArray* array, unsigned idx, CORINFO_CLASS_HANDLE type)
+{
+ FCALL_CONTRACT;
+
+ RuntimeExceptionKind except;
+ // This has been carefully arranged to ensure that in the common
+ // case the branches are predicted properly (fall through).
+ // and that we dont spill registers unnecessarily etc.
+ if (array != 0)
+ if (idx < array->GetNumComponents())
+ if (array->GetArrayElementTypeHandle() == TypeHandle(type))
+ return(&array->m_Array[idx]);
+ else
+ except = kArrayTypeMismatchException;
+ else
+ except = kIndexOutOfRangeException;
+ else
+ except = kNullReferenceException;
+
+ FCThrow(except);
+}
+HCIMPLEND
+#include <optdefault.h>
+
+//===========================================================================
+// This routine is called if the Array store needs a frame constructed
+// in order to do the array check. It should only be called from
+// the array store check helpers.
+
+HCIMPL2(LPVOID, ArrayStoreCheck, Object** pElement, PtrArray** pArray)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_2(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, *pElement, *pArray);
+
+ GCStress<cfg_any, EeconfigFastGcSPolicy>::MaybeTrigger();
+
+#if CHECK_APP_DOMAIN_LEAKS
+ if (g_pConfig->AppDomainLeaks())
+ (*pElement)->AssignAppDomain((*pArray)->GetAppDomain());
+#endif // CHECK_APP_DOMAIN_LEAKS
+
+ if (!ObjIsInstanceOf(*pElement, (*pArray)->GetArrayElementTypeHandle()))
+ COMPlusThrow(kArrayTypeMismatchException);
+
+ HELPER_METHOD_FRAME_END();
+
+ return (LPVOID)0; // Used to aid epilog walker
+}
+HCIMPLEND
+
+/****************************************************************************/
+/* assigns 'val to 'array[idx], after doing all the proper checks */
+
+HCIMPL3(void, JIT_Stelem_Ref_Portable, PtrArray* array, unsigned idx, Object *val)
+{
+ FCALL_CONTRACT;
+
+ if (!array)
+ {
+ FCThrowVoid(kNullReferenceException);
+ }
+ if (idx >= array->GetNumComponents())
+ {
+ FCThrowVoid(kIndexOutOfRangeException);
+ }
+
+ if (val)
+ {
+ MethodTable *valMT = val->GetMethodTable();
+ TypeHandle arrayElemTH = array->GetArrayElementTypeHandle();
+
+#if CHECK_APP_DOMAIN_LEAKS
+ // If the instance is agile or check agile
+ if (g_pConfig->AppDomainLeaks() && !arrayElemTH.IsAppDomainAgile() && !arrayElemTH.IsCheckAppDomainAgile())
+ {
+ // FCALL_CONTRACT increase ForbidGC count. Normally, HELPER_METHOD_FRAME macros decrease the count.
+ // But to avoid perf hit, we manually decrease the count here before calling another HCCALL.
+ ENDFORBIDGC();
+
+ if (HCCALL2(ArrayStoreCheck,(Object**)&val, (PtrArray**)&array) != NULL)
+ {
+ // This return is never executed. It helps epilog walker to find its way out.
+ return;
+ }
+ }
+ else
+#endif
+ if (arrayElemTH != TypeHandle(valMT) && arrayElemTH != TypeHandle(g_pObjectClass))
+ {
+ TypeHandle::CastResult result = ObjIsInstanceOfNoGC(val, arrayElemTH);
+ if (result != TypeHandle::CanCast)
+ {
+ // FCALL_CONTRACT increase ForbidGC count. Normally, HELPER_METHOD_FRAME macros decrease the count.
+ // But to avoid perf hit, we manually decrease the count here before calling another HCCALL.
+ ENDFORBIDGC();
+
+ if (HCCALL2(ArrayStoreCheck,(Object**)&val, (PtrArray**)&array) != NULL)
+ {
+ // This return is never executed. It helps epilog walker to find its way out.
+ return;
+ }
+ }
+ }
+
+#ifdef _TARGET_ARM64_
+ SetObjectReferenceUnchecked((OBJECTREF*)&array->m_Array[idx], ObjectToOBJECTREF(val));
+#else
+ // The performance gain of the optimized JIT_Stelem_Ref in
+ // jitinterfacex86.cpp is mainly due to calling JIT_WriteBarrier
+ // By calling write barrier directly here,
+ // we can avoid translating in-line assembly from MSVC to gcc
+ // while keeping most of the performance gain.
+ HCCALL2(JIT_WriteBarrier, (Object **)&array->m_Array[idx], val);
+#endif
+
+ }
+ else
+ {
+ // no need to go through write-barrier for NULL
+ ClearObjectReference(&array->m_Array[idx]);
+ }
+}
+HCIMPLEND
+
+
+
+//========================================================================
+//
+// VALUETYPE/BYREF HELPERS
+//
+//========================================================================
+
+/*************************************************************/
+HCIMPL2(Object*, JIT_Box, CORINFO_CLASS_HANDLE type, void* unboxedData)
+{
+ FCALL_CONTRACT;
+
+ // <TODO>TODO: if we care, we could do a fast trial allocation
+ // and avoid the building the frame most times</TODO>
+ OBJECTREF newobj = NULL;
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL(); // Set up a frame
+ GCPROTECT_BEGININTERIOR(unboxedData);
+ HELPER_METHOD_POLL();
+
+ TypeHandle clsHnd(type);
+
+ _ASSERTE(!clsHnd.IsTypeDesc()); // we never use this helper for arrays
+
+ MethodTable *pMT = clsHnd.AsMethodTable();
+
+ pMT->CheckRestore();
+
+ // You can only box valuetypes
+ if (!pMT->IsValueType())
+ COMPlusThrow(kInvalidCastException, W("Arg_ObjObj"));
+
+#ifdef _DEBUG
+ if (g_pConfig->FastGCStressLevel()) {
+ GetThread()->DisableStressHeap();
+ }
+#endif // _DEBUG
+
+ newobj = pMT->FastBox(&unboxedData);
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+ return(OBJECTREFToObject(newobj));
+}
+HCIMPLEND
+
+/*************************************************************/
+NOINLINE HCIMPL3(VOID, JIT_Unbox_Nullable_Framed, void * destPtr, MethodTable* typeMT, OBJECTREF objRef)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_1(objRef);
+ if (!Nullable::UnBox(destPtr, objRef, typeMT))
+ {
+ COMPlusThrow(kInvalidCastException);
+ }
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND
+
+/*************************************************************/
+HCIMPL3(VOID, JIT_Unbox_Nullable, void * destPtr, CORINFO_CLASS_HANDLE type, Object* obj)
+{
+ FCALL_CONTRACT;
+
+ TypeHandle typeHnd(type);
+ _ASSERTE(Nullable::IsNullableType(typeHnd));
+
+ MethodTable* typeMT = typeHnd.AsMethodTable();
+
+ OBJECTREF objRef = ObjectToOBJECTREF(obj);
+
+ if (Nullable::UnBoxNoGC(destPtr, objRef, typeMT))
+ {
+ // exact match (type equivalence not needed)
+ return;
+ }
+
+ // Fall back to a framed helper that handles type equivalence.
+ ENDFORBIDGC();
+ HCCALL3(JIT_Unbox_Nullable_Framed, destPtr, typeMT, objRef);
+}
+HCIMPLEND
+
+/*************************************************************/
+/* framed helper that handles full-blown type equivalence */
+NOINLINE HCIMPL2(LPVOID, JIT_Unbox_Helper_Framed, CORINFO_CLASS_HANDLE type, Object* obj)
+{
+ FCALL_CONTRACT;
+
+ LPVOID result = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+ if (TypeHandle(type).IsEquivalentTo(obj->GetTypeHandle()))
+ {
+ // the structures are equivalent
+ result = obj->GetData();
+ }
+ else
+ {
+ COMPlusThrow(kInvalidCastException);
+ }
+ HELPER_METHOD_FRAME_END();
+
+ return result;
+}
+HCIMPLEND
+
+/*************************************************************/
+/* the uncommon case for the helper below (allowing enums to be unboxed
+ as their underlying type */
+LPVOID __fastcall JIT_Unbox_Helper(CORINFO_CLASS_HANDLE type, Object* obj)
+{
+ FCALL_CONTRACT;
+
+ TypeHandle typeHnd(type);
+
+ CorElementType type1 = typeHnd.GetInternalCorElementType();
+
+ // we allow enums and their primtive type to be interchangable
+
+ MethodTable* pMT2 = obj->GetMethodTable();
+ CorElementType type2 = pMT2->GetInternalCorElementType();
+ if (type1 == type2)
+ {
+ MethodTable* pMT1 = typeHnd.GetMethodTable();
+ if (pMT1 && (pMT1->IsEnum() || pMT1->IsTruePrimitive()) &&
+ (pMT2->IsEnum() || pMT2->IsTruePrimitive()))
+ {
+ _ASSERTE(CorTypeInfo::IsPrimitiveType_NoThrow(type1));
+ return(obj->GetData());
+ }
+ }
+
+ // Even less common cases (type equivalence) go to a framed helper.
+ ENDFORBIDGC();
+ return HCCALL2(JIT_Unbox_Helper_Framed, type, obj);
+}
+
+/*************************************************************/
+HCIMPL2(LPVOID, JIT_Unbox, CORINFO_CLASS_HANDLE type, Object* obj)
+{
+ FCALL_CONTRACT;
+
+ TypeHandle typeHnd(type);
+ VALIDATEOBJECT(obj);
+ _ASSERTE(!typeHnd.IsTypeDesc()); // value classes are always unshared
+
+ // This has been tuned so that branch predictions are good
+ // (fall through for forward branches) for the common case
+ if (obj != NULL) {
+ if (obj->GetMethodTable() == typeHnd.AsMethodTable())
+ return(obj->GetData());
+ else {
+ // Stuff the uncommon case into a helper so that
+ // its register needs don't cause spills that effect
+ // the common case above.
+ return JIT_Unbox_Helper(type, obj);
+ }
+ }
+
+ FCThrow(kNullReferenceException);
+}
+HCIMPLEND
+
+/*************************************************************/
+HCIMPL2_IV(LPVOID, JIT_GetRefAny, CORINFO_CLASS_HANDLE type, TypedByRef typedByRef)
+{
+ FCALL_CONTRACT;
+
+ TypeHandle clsHnd(type);
+
+ // <TODO>@TODO right now we check for precisely the correct type.
+ // do we want to allow inheritance? (watch out since value
+ // classes inherit from object but do not normal object layout).</TODO>
+ if (clsHnd != typedByRef.type) {
+ FCThrow(kInvalidCastException);
+ }
+
+ return(typedByRef.data);
+}
+HCIMPLEND
+
+
+//========================================================================
+//
+// GENERICS HELPERS
+//
+//========================================================================
+
+/***********************************************************************/
+// JIT_GenericHandle and its cache
+//
+// Perform a "polytypic" operation related to shared generic code at runtime, possibly filling in an entry in
+// either a generic dictionary cache assocaited with a descriptor or placing an entry in the global
+// JitGenericHandle cache.
+//
+// A polytypic operation is one such as
+// * new List<T>
+// * castclass List<T>
+// where the code being executed is shared generic code. In these cases the outcome of the operation depends
+// on the exact value for T, which is acquired from a dynamic parameter.
+//
+// The actual operation always boils down to finding a "handle" (TypeHandle, MethodDesc, call address,
+// dispatch stub address etc.) based on some static information (passed as tokens) and on the exact runtime
+// type context (passed as one or two parameters classHnd and methodHnd).
+//
+// The static information specifies which polytypic operation (and thus which kind of handle) we're
+// interested in.
+//
+// The dynamic information (the type context, i.e. the exact instantiation of class and method type
+// parameters is specified in one of two ways:
+// * If classHnd is null then the methodHnd should be an exact method descriptor wrapping shared code that
+// satisfies SharedByGenericMethodInstantiations().
+//
+// For example:
+// * We may be running the shared code for a generic method instantiation C::m<object>. The methodHnd
+// will carry the exact instantiation, e.g. C::m<string>
+//
+// * If classHnd is non-null (e.g. a type D<exact>) then:
+// * methodHnd will indicate the representative code being run (which will be
+// !SharedByGenericMethodInstantiations but will be SharedByGenericClassInstantiations). Let's say
+// this code is C<repr>::m().
+// * the type D will be a descendent of type C. In particular D<exact> will relate to some type C<exact'>
+// where C<repr> is the represntative instantiation of C<exact>'
+// * the relevant dictionary will be the one attached to C<exact'>.
+//
+// The JitGenericHandleCache is a global data structure shared across all application domains. It is only
+// used if generic dictionaries have overflowed. It is flushed each time an application domain is unloaded.
+
+struct JitGenericHandleCacheKey
+{
+ JitGenericHandleCacheKey(CORINFO_CLASS_HANDLE classHnd, CORINFO_METHOD_HANDLE methodHnd, void *signature, BaseDomain* pDomain=NULL)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_Data1 = (size_t)classHnd;
+ m_Data2 = (size_t)methodHnd;
+ m_Data3 = (size_t)signature;
+ m_pDomainAndType = 0 | (size_t)pDomain;
+ }
+
+ JitGenericHandleCacheKey(MethodTable* pMT, CORINFO_CLASS_HANDLE classHnd, CORINFO_METHOD_HANDLE methodHnd, BaseDomain* pDomain=NULL)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_Data1 = (size_t)pMT;
+ m_Data2 = (size_t)classHnd;
+ m_Data3 = (size_t)methodHnd;
+ m_pDomainAndType = 1 | (size_t)pDomain;
+ }
+
+ size_t GetType() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_pDomainAndType & 1);
+ }
+
+ BaseDomain* GetDomain() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (BaseDomain*)(m_pDomainAndType & ~1);
+ }
+
+ size_t m_Data1;
+ size_t m_Data2;
+ size_t m_Data3;
+
+ size_t m_pDomainAndType; // Which domain the entry belongs to. Not actually part of the key.
+ // Used only so we can scrape the table on AppDomain termination.
+ // NULL appdomain means that the entry should be scratched
+ // on any appdomain unload.
+ //
+ // The lowest bit is used to indicate the type of the entry:
+ // 0 - JIT_GenericHandle entry
+ // 1 - JIT_VirtualFunctionPointer entry
+};
+
+class JitGenericHandleCacheTraits
+{
+public:
+ static EEHashEntry_t *AllocateEntry(const JitGenericHandleCacheKey *pKey, BOOL bDeepCopy, AllocationHeap pHeap = 0)
+ {
+ LIMITED_METHOD_CONTRACT;
+ EEHashEntry_t *pEntry = (EEHashEntry_t *) new (nothrow) BYTE[SIZEOF_EEHASH_ENTRY + sizeof(JitGenericHandleCacheKey)];
+ if (!pEntry)
+ return NULL;
+ *((JitGenericHandleCacheKey*)pEntry->Key) = *pKey;
+ return pEntry;
+ }
+
+ static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap pHeap = 0)
+ {
+ LIMITED_METHOD_CONTRACT;
+ delete [] (BYTE*)pEntry;
+ }
+
+ static BOOL CompareKeys(EEHashEntry_t *pEntry, const JitGenericHandleCacheKey *e2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ const JitGenericHandleCacheKey *e1 = (const JitGenericHandleCacheKey*)&pEntry->Key;
+ return (e1->m_Data1 == e2->m_Data1) && (e1->m_Data2 == e2->m_Data2) && (e1->m_Data3 == e2->m_Data3) &&
+ (e1->GetType() == e2->GetType()) &&
+ // Any domain will work if the lookup key does not specify it
+ ((e2->GetDomain() == NULL) || (e1->GetDomain() == e2->GetDomain()));
+ }
+
+ static DWORD Hash(const JitGenericHandleCacheKey *k)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (DWORD)k->m_Data1 + _rotl((DWORD)k->m_Data2,5) + _rotr((DWORD)k->m_Data3,5);
+ }
+
+ static const JitGenericHandleCacheKey *GetKey(EEHashEntry_t *pEntry)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (const JitGenericHandleCacheKey*)&pEntry->Key;
+ }
+};
+
+typedef EEHashTable<const JitGenericHandleCacheKey *, JitGenericHandleCacheTraits, FALSE> JitGenericHandleCache;
+
+JitGenericHandleCache *g_pJitGenericHandleCache = NULL; //cache of calls to JIT_GenericHandle
+CrstStatic g_pJitGenericHandleCacheCrst;
+
+void AddToGenericHandleCache(JitGenericHandleCacheKey* pKey, HashDatum datum)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pKey));
+ PRECONDITION(CheckPointer(datum));
+ } CONTRACTL_END;
+
+ EX_TRY
+ {
+ GCX_COOP();
+
+ CrstHolder lock(&g_pJitGenericHandleCacheCrst);
+
+ HashDatum entry;
+ if (!g_pJitGenericHandleCache->GetValue(pKey,&entry))
+ g_pJitGenericHandleCache->InsertValue(pKey,datum);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions) // Swallow OOM
+}
+
+/* static */
+void ClearJitGenericHandleCache(AppDomain *pDomain)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+
+ // We call this on every AppDomain unload, because entries in the cache might include
+ // pointers into the AppDomain being unloaded. We would prefer to
+ // only flush entries that have that are no longer valid, but the entries don't yet contain
+ // enough information to do that. However everything in the cache can be found again by calling
+ // loader functions, and the total number of entries in the cache is typically very small (indeed
+ // normally the cache is not used at all - it is only used when the generic dictionaries overflow).
+ if (g_pJitGenericHandleCache)
+ {
+ // It's not necessary to take the lock here because this function should only be called when EE is suspended,
+ // the lock is only taken to fullfill the threadsafety check and to be consistent. If the lock becomes a problem, we
+ // could put it in a "ifdef _DEBUG" block
+ CrstHolder lock(&g_pJitGenericHandleCacheCrst);
+ EEHashTableIteration iter;
+ g_pJitGenericHandleCache->IterateStart(&iter);
+ BOOL keepGoing = g_pJitGenericHandleCache->IterateNext(&iter);
+ while(keepGoing)
+ {
+ const JitGenericHandleCacheKey *key = g_pJitGenericHandleCache->IterateGetKey(&iter);
+ BaseDomain* pKeyDomain = key->GetDomain();
+ if (pKeyDomain == pDomain || pKeyDomain == NULL
+ // We compute fake domain for types during NGen (see code:ClassLoader::ComputeLoaderModule).
+ // To avoid stale handles, we need to clear the cache unconditionally during NGen.
+ || IsCompilationProcess())
+ {
+ // Advance the iterator before we delete!! See notes in EEHash.h
+ keepGoing = g_pJitGenericHandleCache->IterateNext(&iter);
+ g_pJitGenericHandleCache->DeleteValue(key);
+ }
+ else
+ {
+ keepGoing = g_pJitGenericHandleCache->IterateNext(&iter);
+ }
+ }
+ }
+}
+
+// Factored out most of the body of JIT_GenericHandle so it could be called easily from the CER reliability code to pre-populate the
+// cache.
+CORINFO_GENERIC_HANDLE
+JIT_GenericHandleWorker(
+ MethodDesc * pMD,
+ MethodTable * pMT,
+ LPVOID signature)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ MethodTable * pDeclaringMT = NULL;
+
+ if (pMT != NULL)
+ {
+ SigPointer ptr((PCCOR_SIGNATURE)signature);
+
+ ULONG kind; // DictionaryEntryKind
+ IfFailThrow(ptr.GetData(&kind));
+
+ // We need to normalize the class passed in (if any) for reliability purposes. That's because preparation of a code region that
+ // contains these handle lookups depends on being able to predict exactly which lookups are required (so we can pre-cache the
+ // answers and remove any possibility of failure at runtime). This is hard to do if the lookup (in this case the lookup of the
+ // dictionary overflow cache) is keyed off the somewhat arbitrary type of the instance on which the call is made (we'd need to
+ // prepare for every possible derived type of the type containing the method). So instead we have to locate the exactly
+ // instantiated (non-shared) super-type of the class passed in.
+
+ ULONG dictionaryIndex = 0;
+ IfFailThrow(ptr.GetData(&dictionaryIndex));
+
+ pDeclaringMT = pMT;
+ for (;;)
+ {
+ MethodTable * pParentMT = pDeclaringMT->GetParentMethodTable();
+ if (pParentMT->GetNumDicts() <= dictionaryIndex)
+ break;
+ pDeclaringMT = pParentMT;
+ }
+
+ if (pDeclaringMT != pMT)
+ {
+ JitGenericHandleCacheKey key((CORINFO_CLASS_HANDLE)pDeclaringMT, NULL, signature);
+ HashDatum res;
+ if (g_pJitGenericHandleCache->GetValue(&key,&res))
+ {
+ // Add the denormalized key for faster lookup next time. This is not a critical entry - no need
+ // to specify appdomain affinity.
+ AddToGenericHandleCache(&key, res);
+ return (CORINFO_GENERIC_HANDLE) (DictionaryEntry) res;
+ }
+ }
+ }
+
+ DictionaryEntry * pSlot;
+ CORINFO_GENERIC_HANDLE result = (CORINFO_GENERIC_HANDLE)Dictionary::PopulateEntry(pMD, pDeclaringMT, signature, FALSE, &pSlot);
+
+ if (pSlot == NULL)
+ {
+ // If we've overflowed the dictionary write the result to the cache.
+ BaseDomain *pDictDomain = NULL;
+
+ if (pMT != NULL)
+ {
+ pDictDomain = pDeclaringMT->GetDomain();
+ }
+ else
+ {
+ pDictDomain = pMD->GetDomain();
+ }
+
+ JitGenericHandleCacheKey key((CORINFO_CLASS_HANDLE)pDeclaringMT, (CORINFO_METHOD_HANDLE)pMD, signature, pDictDomain);
+ AddToGenericHandleCache(&key, (HashDatum)result);
+ }
+
+ return result;
+} // JIT_GenericHandleWorker
+
+/*********************************************************************/
+// slow helper to tail call from the fast one
+NOINLINE HCIMPL3(CORINFO_GENERIC_HANDLE, JIT_GenericHandle_Framed,
+ CORINFO_CLASS_HANDLE classHnd,
+ CORINFO_METHOD_HANDLE methodHnd,
+ LPVOID signature)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(classHnd != NULL || methodHnd != NULL);
+ PRECONDITION(classHnd == NULL || methodHnd == NULL);
+ } CONTRACTL_END;
+
+ // Result is a generic handle (in fact, a CORINFO_CLASS_HANDLE, CORINFO_METHOD_HANDLE, or a code pointer)
+ CORINFO_GENERIC_HANDLE result = NULL;
+
+ MethodDesc * pMD = GetMethod(methodHnd);
+ MethodTable * pMT = TypeHandle(classHnd).AsMethodTable();
+
+ // Set up a frame
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ result = JIT_GenericHandleWorker(pMD, pMT, signature);
+
+ HELPER_METHOD_FRAME_END();
+
+ _ASSERTE(result != NULL);
+
+ // Return the handle
+ return result;
+}
+HCIMPLEND
+
+/*********************************************************************/
+#include <optsmallperfcritical.h>
+HCIMPL2(CORINFO_GENERIC_HANDLE, JIT_GenericHandleMethod, CORINFO_METHOD_HANDLE methodHnd, LPVOID signature)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(methodHnd));
+ PRECONDITION(GetMethod(methodHnd)->IsRestored());
+ PRECONDITION(CheckPointer(signature));
+ } CONTRACTL_END;
+
+ JitGenericHandleCacheKey key(NULL, methodHnd, signature);
+ HashDatum res;
+ if (g_pJitGenericHandleCache->GetValueSpeculative(&key,&res))
+ return (CORINFO_GENERIC_HANDLE) (DictionaryEntry) res;
+
+ // Tailcall to the slow helper
+ ENDFORBIDGC();
+ return HCCALL3(JIT_GenericHandle_Framed, NULL, methodHnd, signature);
+}
+HCIMPLEND
+#include <optdefault.h>
+
+/*********************************************************************/
+HCIMPL2(CORINFO_GENERIC_HANDLE, JIT_GenericHandleMethodLogging, CORINFO_METHOD_HANDLE methodHnd, LPVOID signature)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(methodHnd));
+ PRECONDITION(GetMethod(methodHnd)->IsRestored());
+ PRECONDITION(CheckPointer(signature));
+ } CONTRACTL_END;
+
+ g_IBCLogger.LogMethodDescAccess(GetMethod(methodHnd));
+
+ JitGenericHandleCacheKey key(NULL, methodHnd, signature);
+ HashDatum res;
+ if (g_pJitGenericHandleCache->GetValueSpeculative(&key,&res))
+ return (CORINFO_GENERIC_HANDLE) (DictionaryEntry) res;
+
+ // Tailcall to the slow helper
+ ENDFORBIDGC();
+ return HCCALL3(JIT_GenericHandle_Framed, NULL, methodHnd, signature);
+}
+HCIMPLEND
+
+/*********************************************************************/
+#include <optsmallperfcritical.h>
+HCIMPL2(CORINFO_GENERIC_HANDLE, JIT_GenericHandleClass, CORINFO_CLASS_HANDLE classHnd, LPVOID signature)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(classHnd));
+ PRECONDITION(TypeHandle(classHnd).IsRestored());
+ PRECONDITION(CheckPointer(signature));
+ } CONTRACTL_END;
+
+ JitGenericHandleCacheKey key(classHnd, NULL, signature);
+ HashDatum res;
+ if (g_pJitGenericHandleCache->GetValueSpeculative(&key,&res))
+ return (CORINFO_GENERIC_HANDLE) (DictionaryEntry) res;
+
+ // Tailcall to the slow helper
+ ENDFORBIDGC();
+ return HCCALL3(JIT_GenericHandle_Framed, classHnd, NULL, signature);
+}
+HCIMPLEND
+#include <optdefault.h>
+
+/*********************************************************************/
+HCIMPL2(CORINFO_GENERIC_HANDLE, JIT_GenericHandleClassLogging, CORINFO_CLASS_HANDLE classHnd, LPVOID signature)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(classHnd));
+ PRECONDITION(TypeHandle(classHnd).IsRestored());
+ PRECONDITION(CheckPointer(signature));
+ } CONTRACTL_END;
+
+ g_IBCLogger.LogMethodTableAccess((MethodTable *)classHnd);
+
+ JitGenericHandleCacheKey key(classHnd, NULL, signature);
+ HashDatum res;
+ if (g_pJitGenericHandleCache->GetValueSpeculative(&key,&res))
+ return (CORINFO_GENERIC_HANDLE) (DictionaryEntry) res;
+
+ // Tailcall to the slow helper
+ ENDFORBIDGC();
+ return HCCALL3(JIT_GenericHandle_Framed, classHnd, NULL, signature);
+}
+HCIMPLEND
+
+/*********************************************************************/
+// Resolve a virtual method at run-time, either because of
+// aggressive backpatching or because the call is to a generic
+// method which is itself virtual.
+//
+// classHnd is the actual run-time type for the call is made.
+// methodHnd is the exact (instantiated) method descriptor corresponding to the
+// static method signature (i.e. might be for a superclass of classHnd)
+
+// slow helper to tail call from the fast one
+NOINLINE HCIMPL3(CORINFO_MethodPtr, JIT_VirtualFunctionPointer_Framed, Object * objectUNSAFE,
+ CORINFO_CLASS_HANDLE classHnd,
+ CORINFO_METHOD_HANDLE methodHnd)
+{
+ FCALL_CONTRACT;
+
+ // The address of the method that's returned.
+ CORINFO_MethodPtr addr = NULL;
+
+ OBJECTREF objRef = ObjectToOBJECTREF(objectUNSAFE);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(objRef); // Set up a frame
+
+ if (objRef == NULL)
+ COMPlusThrow(kNullReferenceException);
+
+ // This is the static method descriptor describing the call.
+ // It is not the destination of the call, which we must compute.
+ MethodDesc* pStaticMD = (MethodDesc*) methodHnd;
+ TypeHandle staticTH(classHnd);
+
+ pStaticMD->CheckRestore();
+
+ // MDIL: If IL specifies callvirt/ldvirtftn it remains a "virtual" instruction
+ // even if the target is an instance method at MDIL generation time because
+ // we want to keep MDIL as resilient as IL. Right now we can end up here with
+ // non-virtual generic methods called from a "shared generic code".
+ // As soon as this deficiency is fixed in the binder we can get rid of this test.
+ if (!pStaticMD->IsVtableMethod())
+ {
+ addr = (CORINFO_MethodPtr) pStaticMD->GetMultiCallableAddrOfCode();
+ _ASSERTE(addr);
+ }
+ else
+ {
+ // This is the new way of resolving a virtual call, including generic virtual methods.
+ // The code is now also used by reflection, remoting etc.
+ addr = (CORINFO_MethodPtr) pStaticMD->GetMultiCallableAddrOfVirtualizedCode(&objRef, staticTH);
+ _ASSERTE(addr);
+
+ // The cache can be used only if MethodTable is a real one
+ if (!objRef->IsTransparentProxy())
+ {
+ // This is not a critical entry - no need to specify appdomain affinity
+ JitGenericHandleCacheKey key(objRef->GetMethodTable(), classHnd, methodHnd);
+ AddToGenericHandleCache(&key, (HashDatum)addr);
+ }
+ }
+
+ HELPER_METHOD_FRAME_END();
+
+ return addr;
+}
+HCIMPLEND
+
+HCIMPL2(VOID, JIT_GetRuntimeFieldHandle, Object ** destPtr, CORINFO_FIELD_HANDLE field)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ FieldDesc *pField = (FieldDesc *)field;
+ SetObjectReference((OBJECTREF*) destPtr,
+ pField->GetStubFieldInfo(), GetAppDomain());
+
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND
+
+HCIMPL1(Object*, JIT_GetRuntimeFieldStub, CORINFO_FIELD_HANDLE field)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF stubRuntimeField = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0(); // Set up a frame
+
+ FieldDesc *pField = (FieldDesc *)field;
+ stubRuntimeField = (OBJECTREF)pField->GetStubFieldInfo();
+
+ HELPER_METHOD_FRAME_END();
+
+ return (OBJECTREFToObject(stubRuntimeField));
+}
+HCIMPLEND
+
+HCIMPL2(VOID, JIT_GetRuntimeMethodHandle, Object ** destPtr, CORINFO_METHOD_HANDLE method)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ MethodDesc *pMethod = (MethodDesc *)method;
+ SetObjectReference((OBJECTREF*) destPtr,
+ pMethod->GetStubMethodInfo(), GetAppDomain());
+
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND
+
+HCIMPL1(Object*, JIT_GetRuntimeMethodStub, CORINFO_METHOD_HANDLE method)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF stubRuntimeMethod = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0(); // Set up a frame
+
+ MethodDesc *pMethod = (MethodDesc *)method;
+ stubRuntimeMethod = (OBJECTREF)pMethod->GetStubMethodInfo();
+
+ HELPER_METHOD_FRAME_END();
+
+ return (OBJECTREFToObject(stubRuntimeMethod));
+}
+HCIMPLEND
+
+HCIMPL2(VOID, JIT_GetRuntimeTypeHandle, Object ** destPtr, CORINFO_CLASS_HANDLE type)
+{
+ FCALL_CONTRACT;
+
+ TypeHandle typeHnd(type);
+
+ if (!typeHnd.IsTypeDesc())
+ {
+ // Most common... and fastest case
+ OBJECTREF typePtr = typeHnd.AsMethodTable()->GetManagedClassObjectIfExists();
+ if (typePtr != NULL)
+ {
+ SetObjectReference((OBJECTREF*) destPtr,
+ typePtr, GetAppDomain());
+ return;
+ }
+ }
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ SetObjectReference((OBJECTREF*) destPtr,
+ typeHnd.GetManagedClassObject(), GetAppDomain());
+
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND
+
+
+NOINLINE HCIMPL1(Object*, JIT_GetRuntimeType_Framed, CORINFO_CLASS_HANDLE type)
+{
+ FCALL_CONTRACT;
+
+ TypeHandle typeHandle(type);
+
+ // Array/other type handle case.
+ OBJECTREF refType = typeHandle.GetManagedClassObjectFast();
+ if (refType == NULL)
+ {
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refType);
+ refType = typeHandle.GetManagedClassObject();
+ HELPER_METHOD_FRAME_END();
+ }
+
+ return OBJECTREFToObject(refType);
+}
+HCIMPLEND
+
+#include <optsmallperfcritical.h>
+HCIMPL1(Object*, JIT_GetRuntimeType, CORINFO_CLASS_HANDLE type)
+{
+ FCALL_CONTRACT;
+
+ TypeHandle typeHnd(type);
+
+ if (!typeHnd.IsTypeDesc())
+ {
+ // Most common... and fastest case
+ OBJECTREF typePtr = typeHnd.AsMethodTable()->GetManagedClassObjectIfExists();
+ if (typePtr != NULL)
+ {
+ return OBJECTREFToObject(typePtr);
+ }
+ }
+
+ ENDFORBIDGC();
+ return HCCALL1(JIT_GetRuntimeType_Framed, type);
+}
+HCIMPLEND
+
+HCIMPL1(Object*, JIT_GetRuntimeType_MaybeNull, CORINFO_CLASS_HANDLE type)
+{
+ FCALL_CONTRACT;
+
+ if (type == NULL)
+ return NULL;;
+
+ ENDFORBIDGC();
+ return HCCALL1(JIT_GetRuntimeType, type);
+}
+HCIMPLEND
+#include <optdefault.h>
+
+/*********************************************************************/
+#include <optsmallperfcritical.h>
+HCIMPL3(CORINFO_MethodPtr, JIT_VirtualFunctionPointer, Object * objectUNSAFE,
+ CORINFO_CLASS_HANDLE classHnd,
+ CORINFO_METHOD_HANDLE methodHnd)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF objRef = ObjectToOBJECTREF(objectUNSAFE);
+
+ if (objRef != NULL)
+ {
+ JitGenericHandleCacheKey key(objRef->GetMethodTable(), classHnd, methodHnd);
+ HashDatum res;
+ if (g_pJitGenericHandleCache->GetValueSpeculative(&key,&res))
+ return (CORINFO_GENERIC_HANDLE)res;
+ }
+
+ // Tailcall to the slow helper
+ ENDFORBIDGC();
+ return HCCALL3(JIT_VirtualFunctionPointer_Framed, OBJECTREFToObject(objRef), classHnd, methodHnd);
+}
+HCIMPLEND
+
+HCIMPL2(CORINFO_MethodPtr, JIT_VirtualFunctionPointer_Dynamic, Object * objectUNSAFE, VirtualFunctionPointerArgs * pArgs)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF objRef = ObjectToOBJECTREF(objectUNSAFE);
+
+ if (objRef != NULL)
+ {
+ JitGenericHandleCacheKey key(objRef->GetMethodTable(), pArgs->classHnd, pArgs->methodHnd);
+ HashDatum res;
+ if (g_pJitGenericHandleCache->GetValueSpeculative(&key,&res))
+ return (CORINFO_GENERIC_HANDLE)res;
+ }
+
+ // Tailcall to the slow helper
+ ENDFORBIDGC();
+ return HCCALL3(JIT_VirtualFunctionPointer_Framed, OBJECTREFToObject(objRef), pArgs->classHnd, pArgs->methodHnd);
+}
+HCIMPLEND
+
+#include <optdefault.h>
+
+// Helper for synchronized static methods in shared generics code
+#include <optsmallperfcritical.h>
+HCIMPL1(CORINFO_CLASS_HANDLE, JIT_GetClassFromMethodParam, CORINFO_METHOD_HANDLE methHnd_)
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(methHnd_ != NULL);
+ } CONTRACTL_END;
+
+ MethodDesc * pMD = (MethodDesc*) methHnd_;
+
+ MethodTable * pMT = pMD->GetMethodTable();
+ _ASSERTE(!pMT->IsSharedByGenericInstantiations());
+
+ return((CORINFO_CLASS_HANDLE)pMT);
+HCIMPLEND
+#include <optdefault.h>
+
+
+
+//========================================================================
+//
+// MONITOR HELPERS
+//
+//========================================================================
+
+/*********************************************************************/
+NOINLINE static void JIT_MonEnter_Helper(Object* obj, BYTE* pbLockTaken, LPVOID __me)
+{
+ FC_INNER_PROLOG_NO_ME_SETUP();
+
+ OBJECTREF objRef = ObjectToOBJECTREF(obj);
+
+ // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth.
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, objRef);
+
+ if (objRef == NULL)
+ COMPlusThrow(kArgumentNullException);
+
+ GCPROTECT_BEGININTERIOR(pbLockTaken);
+
+#ifdef _DEBUG
+ Thread *pThread = GetThread();
+ DWORD lockCount = pThread->m_dwLockCount;
+#endif
+ if (GET_THREAD()->CatchAtSafePointOpportunistic())
+ {
+ GET_THREAD()->PulseGCMode();
+ }
+ objRef->EnterObjMonitor();
+ _ASSERTE ((objRef->GetSyncBlock()->GetMonitor()->m_Recursion == 1 && pThread->m_dwLockCount == lockCount + 1) ||
+ pThread->m_dwLockCount == lockCount);
+ if (pbLockTaken != 0) *pbLockTaken = 1;
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+
+ FC_INNER_EPILOG();
+}
+
+/*********************************************************************/
+NOINLINE static void JIT_MonContention_Helper(Object* obj, BYTE* pbLockTaken, LPVOID __me)
+{
+ FC_INNER_PROLOG_NO_ME_SETUP();
+
+ OBJECTREF objRef = ObjectToOBJECTREF(obj);
+
+ // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth.
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, objRef);
+
+ GCPROTECT_BEGININTERIOR(pbLockTaken);
+
+ objRef->GetSyncBlock()->QuickGetMonitor()->Contention();
+ if (pbLockTaken != 0) *pbLockTaken = 1;
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+
+ FC_INNER_EPILOG();
+}
+
+/*********************************************************************/
+#include <optsmallperfcritical.h>
+
+HCIMPL_MONHELPER(JIT_MonEnterWorker_Portable, Object* obj)
+{
+ FCALL_CONTRACT;
+
+ AwareLock::EnterHelperResult result;
+ Thread * pCurThread;
+
+ if (obj == NULL)
+ {
+ goto FramedLockHelper;
+ }
+
+ pCurThread = GetThread();
+
+ if (pCurThread->CatchAtSafePointOpportunistic())
+ {
+ goto FramedLockHelper;
+ }
+
+ result = obj->EnterObjMonitorHelper(pCurThread);
+ if (result == AwareLock::EnterHelperResult_Entered)
+ {
+ MONHELPER_STATE(*pbLockTaken = 1;)
+ return;
+ }
+ else
+ if (result == AwareLock::EnterHelperResult_Contention)
+ {
+ AwareLock::EnterHelperResult resultSpin = obj->EnterObjMonitorHelperSpin(pCurThread);
+ if (resultSpin == AwareLock::EnterHelperResult_Entered)
+ {
+ MONHELPER_STATE(*pbLockTaken = 1;)
+ return;
+ }
+ if (resultSpin == AwareLock::EnterHelperResult_Contention)
+ {
+ FC_INNER_RETURN_VOID(JIT_MonContention_Helper(obj, MONHELPER_ARG, GetEEFuncEntryPointMacro(JIT_MonEnter)));
+ }
+ }
+
+FramedLockHelper:
+ FC_INNER_RETURN_VOID(JIT_MonEnter_Helper(obj, MONHELPER_ARG, GetEEFuncEntryPointMacro(JIT_MonEnter)));
+}
+HCIMPLEND
+
+HCIMPL1(void, JIT_MonEnter_Portable, Object* obj)
+{
+ FCALL_CONTRACT;
+
+ Thread * pCurThread;
+ AwareLock::EnterHelperResult result;
+
+ if (obj == NULL)
+ {
+ goto FramedLockHelper;
+ }
+
+ pCurThread = GetThread();
+
+ if (pCurThread->CatchAtSafePointOpportunistic())
+ {
+ goto FramedLockHelper;
+ }
+
+ result = obj->EnterObjMonitorHelper(pCurThread);
+ if (result == AwareLock::EnterHelperResult_Entered)
+ {
+ return;
+ }
+ else
+ if (result == AwareLock::EnterHelperResult_Contention)
+ {
+ AwareLock::EnterHelperResult resultSpin = obj->EnterObjMonitorHelperSpin(pCurThread);
+ if (resultSpin == AwareLock::EnterHelperResult_Entered)
+ {
+ return;
+ }
+ if (resultSpin == AwareLock::EnterHelperResult_Contention)
+ {
+ FC_INNER_RETURN_VOID(JIT_MonContention_Helper(obj, NULL, GetEEFuncEntryPointMacro(JIT_MonEnter)));
+ }
+ }
+
+FramedLockHelper:
+ FC_INNER_RETURN_VOID(JIT_MonEnter_Helper(obj, NULL, GetEEFuncEntryPointMacro(JIT_MonEnter)));
+}
+HCIMPLEND
+
+HCIMPL2(void, JIT_MonReliableEnter_Portable, Object* obj, BYTE* pbLockTaken)
+{
+ FCALL_CONTRACT;
+
+ Thread * pCurThread;
+ AwareLock::EnterHelperResult result;
+
+ if (obj == NULL)
+ {
+ goto FramedLockHelper;
+ }
+
+ pCurThread = GetThread();
+
+ if (pCurThread->CatchAtSafePointOpportunistic())
+ {
+ goto FramedLockHelper;
+ }
+
+ result = obj->EnterObjMonitorHelper(pCurThread);
+ if (result == AwareLock::EnterHelperResult_Entered)
+ {
+ *pbLockTaken = 1;
+ return;
+ }
+ else
+ if (result == AwareLock::EnterHelperResult_Contention)
+ {
+ AwareLock::EnterHelperResult resultSpin = obj->EnterObjMonitorHelperSpin(pCurThread);
+ if (resultSpin == AwareLock::EnterHelperResult_Entered)
+ {
+ *pbLockTaken = 1;
+ return;
+ }
+ if (resultSpin == AwareLock::EnterHelperResult_Contention)
+ {
+ FC_INNER_RETURN_VOID(JIT_MonContention_Helper(obj, pbLockTaken, GetEEFuncEntryPointMacro(JIT_MonReliableEnter)));
+ }
+ }
+
+FramedLockHelper:
+ FC_INNER_RETURN_VOID(JIT_MonEnter_Helper(obj, pbLockTaken, GetEEFuncEntryPointMacro(JIT_MonReliableEnter)));
+}
+HCIMPLEND
+
+#include <optdefault.h>
+
+
+/*********************************************************************/
+NOINLINE static void JIT_MonTryEnter_Helper(Object* obj, INT32 timeOut, BYTE* pbLockTaken)
+{
+ FC_INNER_PROLOG(JIT_MonTryEnter);
+
+ OBJECTREF objRef = ObjectToOBJECTREF(obj);
+
+ // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth.
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, objRef);
+
+ if (objRef == NULL)
+ COMPlusThrow(kArgumentNullException);
+
+ if (timeOut < -1)
+ COMPlusThrow(kArgumentOutOfRangeException);
+
+ GCPROTECT_BEGININTERIOR(pbLockTaken);
+
+ if (GET_THREAD()->CatchAtSafePointOpportunistic())
+ {
+ GET_THREAD()->PulseGCMode();
+ }
+
+ BOOL result = objRef->TryEnterObjMonitor(timeOut);
+ *pbLockTaken = result != FALSE;
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+
+ FC_INNER_EPILOG();
+}
+
+#include <optsmallperfcritical.h>
+HCIMPL3(void, JIT_MonTryEnter_Portable, Object* obj, INT32 timeOut, BYTE* pbLockTaken)
+{
+ FCALL_CONTRACT;
+
+ AwareLock::EnterHelperResult result;
+ Thread * pCurThread;
+
+ if (obj == NULL)
+ {
+ goto FramedLockHelper;
+ }
+
+ if (timeOut < -1)
+ {
+ goto FramedLockHelper;
+ }
+
+ pCurThread = GetThread();
+
+ if (pCurThread->CatchAtSafePointOpportunistic())
+ {
+ goto FramedLockHelper;
+ }
+
+ result = obj->EnterObjMonitorHelper(pCurThread);
+ if (result == AwareLock::EnterHelperResult_Entered)
+ {
+ *pbLockTaken = 1;
+ return;
+ }
+ else
+ if (result == AwareLock::EnterHelperResult_Contention)
+ {
+ if (timeOut == 0)
+ return;
+
+ AwareLock::EnterHelperResult resultSpin = obj->EnterObjMonitorHelperSpin(pCurThread);
+ if (resultSpin == AwareLock::EnterHelperResult_Entered)
+ {
+ *pbLockTaken = 1;
+ return;
+ }
+ }
+
+FramedLockHelper:
+ FC_INNER_RETURN_VOID(JIT_MonTryEnter_Helper(obj, timeOut, pbLockTaken));
+}
+HCIMPLEND
+#include <optdefault.h>
+
+/*********************************************************************/
+NOINLINE static void JIT_MonExit_Helper(Object* obj, BYTE* pbLockTaken)
+{
+ FC_INNER_PROLOG(JIT_MonExit);
+
+ OBJECTREF objRef = ObjectToOBJECTREF(obj);
+
+ // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth.
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_NO_THREAD_ABORT|Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, objRef);
+
+ if (objRef == NULL)
+ COMPlusThrow(kArgumentNullException);
+
+ if (!objRef->LeaveObjMonitor())
+ COMPlusThrow(kSynchronizationLockException);
+
+ if (pbLockTaken != 0) *pbLockTaken = 0;
+
+ TESTHOOKCALL(AppDomainCanBeUnloaded(GET_THREAD()->GetDomain()->GetId().m_dwId,FALSE));
+
+ if (GET_THREAD()->IsAbortRequested()) {
+ GET_THREAD()->HandleThreadAbort();
+ }
+
+ HELPER_METHOD_FRAME_END();
+
+ FC_INNER_EPILOG();
+}
+
+NOINLINE static void JIT_MonExit_Signal(Object* obj)
+{
+ FC_INNER_PROLOG(JIT_MonExit);
+
+ OBJECTREF objRef = ObjectToOBJECTREF(obj);
+
+ // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth.
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_NO_THREAD_ABORT|Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, objRef);
+
+ // Signal the event
+ SyncBlock *psb = objRef->PassiveGetSyncBlock();
+ if (psb != NULL)
+ psb->QuickGetMonitor()->Signal();
+
+ TESTHOOKCALL(AppDomainCanBeUnloaded(GET_THREAD()->GetDomain()->GetId().m_dwId,FALSE));
+
+ if (GET_THREAD()->IsAbortRequested()) {
+ GET_THREAD()->HandleThreadAbort();
+ }
+
+ HELPER_METHOD_FRAME_END();
+
+ FC_INNER_EPILOG();
+}
+
+#include <optsmallperfcritical.h>
+FCIMPL1(void, JIT_MonExit_Portable, Object* obj)
+{
+ FCALL_CONTRACT;
+
+ AwareLock::LeaveHelperAction action;
+
+ if (obj == NULL)
+ {
+ goto FramedLockHelper;
+ }
+
+ // Handle the simple case without erecting helper frame
+ action = obj->LeaveObjMonitorHelper(GetThread());
+ if (action == AwareLock::LeaveHelperAction_None)
+ {
+ return;
+ }
+ else
+ if (action == AwareLock::LeaveHelperAction_Signal)
+ {
+ FC_INNER_RETURN_VOID(JIT_MonExit_Signal(obj));
+ }
+
+FramedLockHelper:
+ FC_INNER_RETURN_VOID(JIT_MonExit_Helper(obj, NULL));
+}
+HCIMPLEND
+
+HCIMPL_MONHELPER(JIT_MonExitWorker_Portable, Object* obj)
+{
+ FCALL_CONTRACT;
+
+ MONHELPER_STATE(_ASSERTE(pbLockTaken != NULL));
+ MONHELPER_STATE(if (*pbLockTaken == 0) return;)
+
+ AwareLock::LeaveHelperAction action;
+
+ if (obj == NULL)
+ {
+ goto FramedLockHelper;
+ }
+
+ // Handle the simple case without erecting helper frame
+ action = obj->LeaveObjMonitorHelper(GetThread());
+ if (action == AwareLock::LeaveHelperAction_None)
+ {
+ MONHELPER_STATE(*pbLockTaken = 0;)
+ return;
+ }
+ else
+ if (action == AwareLock::LeaveHelperAction_Signal)
+ {
+ MONHELPER_STATE(*pbLockTaken = 0;)
+ FC_INNER_RETURN_VOID(JIT_MonExit_Signal(obj));
+ }
+
+FramedLockHelper:
+ FC_INNER_RETURN_VOID(JIT_MonExit_Helper(obj, MONHELPER_ARG));
+}
+HCIMPLEND
+#include <optdefault.h>
+
+/*********************************************************************/
+NOINLINE static void JIT_MonEnterStatic_Helper(AwareLock *lock, BYTE* pbLockTaken)
+{
+ // The following makes sure that Monitor.Enter shows up on thread abort
+ // stack walks (otherwise Monitor.Enter called within a CER can block a
+ // thread abort indefinitely). Setting the __me internal variable (normally
+ // only set for fcalls) will cause the helper frame below to be able to
+ // backtranslate into the method desc for the Monitor.Enter fcall.
+ FC_INNER_PROLOG(JIT_MonEnter);
+
+ // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth.
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
+ lock->Enter();
+ MONHELPER_STATE(*pbLockTaken = 1;)
+ HELPER_METHOD_FRAME_END_POLL();
+
+ FC_INNER_EPILOG();
+}
+
+#include <optsmallperfcritical.h>
+HCIMPL_MONHELPER(JIT_MonEnterStatic_Portable, AwareLock *lock)
+{
+ FCALL_CONTRACT;
+
+ _ASSERTE(lock);
+
+ MONHELPER_STATE(_ASSERTE(pbLockTaken != NULL && *pbLockTaken == 0));
+
+ Thread *pCurThread = GetThread();
+
+ if (pCurThread->CatchAtSafePointOpportunistic())
+ {
+ goto FramedLockHelper;
+ }
+
+ if (lock->EnterHelper(pCurThread) == AwareLock::EnterHelperResult_Entered)
+ {
+#if defined(_DEBUG) && defined(TRACK_SYNC)
+ // The best place to grab this is from the ECall frame
+ Frame * pFrame = pCurThread->GetFrame();
+ int caller = (pFrame && pFrame != FRAME_TOP ? (int) pFrame->GetReturnAddress() : -1);
+ pCurThread->m_pTrackSync->EnterSync(caller, lock);
+#endif
+
+ MONHELPER_STATE(*pbLockTaken = 1;)
+ return;
+ }
+
+FramedLockHelper:
+ FC_INNER_RETURN_VOID(JIT_MonEnterStatic_Helper(lock, MONHELPER_ARG));
+}
+HCIMPLEND
+#include <optdefault.h>
+
+/*********************************************************************/
+NOINLINE static void JIT_MonExitStatic_Helper(AwareLock *lock, BYTE* pbLockTaken)
+{
+ FC_INNER_PROLOG(JIT_MonExit);
+
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB(Frame::FRAME_ATTR_NO_THREAD_ABORT|Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
+
+ // Error, yield or contention
+ if (!lock->Leave())
+ COMPlusThrow(kSynchronizationLockException);
+ MONHELPER_STATE(*pbLockTaken = 0;)
+
+ TESTHOOKCALL(AppDomainCanBeUnloaded(GET_THREAD()->GetDomain()->GetId().m_dwId,FALSE));
+ if (GET_THREAD()->IsAbortRequested()) {
+ GET_THREAD()->HandleThreadAbort();
+ }
+
+ HELPER_METHOD_FRAME_END();
+
+ FC_INNER_EPILOG();
+}
+
+NOINLINE static void JIT_MonExitStatic_Signal(AwareLock *lock)
+{
+ FC_INNER_PROLOG(JIT_MonExit);
+
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB(Frame::FRAME_ATTR_NO_THREAD_ABORT|Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
+
+ lock->Signal();
+
+ TESTHOOKCALL(AppDomainCanBeUnloaded(GET_THREAD()->GetDomain()->GetId().m_dwId,FALSE));
+ if (GET_THREAD()->IsAbortRequested()) {
+ GET_THREAD()->HandleThreadAbort();
+ }
+
+ HELPER_METHOD_FRAME_END();
+
+ FC_INNER_EPILOG();
+}
+
+#include <optsmallperfcritical.h>
+HCIMPL_MONHELPER(JIT_MonExitStatic_Portable, AwareLock *lock)
+{
+ FCALL_CONTRACT;
+
+ _ASSERTE(lock);
+
+ MONHELPER_STATE(_ASSERTE(pbLockTaken != NULL));
+ MONHELPER_STATE(if (*pbLockTaken == 0) return;)
+
+ // Handle the simple case without erecting helper frame
+ AwareLock::LeaveHelperAction action = lock->LeaveHelper(GetThread());
+ if (action == AwareLock::LeaveHelperAction_None)
+ {
+ MONHELPER_STATE(*pbLockTaken = 0;)
+ return;
+ }
+ else
+ if (action == AwareLock::LeaveHelperAction_Signal)
+ {
+ MONHELPER_STATE(*pbLockTaken = 0;)
+ FC_INNER_RETURN_VOID(JIT_MonExitStatic_Signal(lock));
+ }
+
+ FC_INNER_RETURN_VOID(JIT_MonExitStatic_Helper(lock, MONHELPER_ARG));
+}
+HCIMPLEND
+#include <optdefault.h>
+
+/*********************************************************************/
+// JITutil_Mon* are helpers than handle slow paths for JIT_Mon* methods
+// implemented in assembly. They are not doing any spinning compared
+// to the full fledged portable implementations above.
+/*********************************************************************/
+
+/*********************************************************************/
+HCIMPL_MONHELPER(JITutil_MonEnterWorker, Object* obj)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF objRef = ObjectToOBJECTREF(obj);
+
+ // The following makes sure that Monitor.Enter shows up on thread abort
+ // stack walks (otherwise Monitor.Enter called within a CER can block a
+ // thread abort indefinitely). Setting the __me internal variable (normally
+ // only set for fcalls) will cause the helper frame below to be able to
+ // backtranslate into the method desc for the Monitor.Enter fcall.
+ //
+ // Note that we need explicitly initialize Monitor.Enter fcall in
+ // code:SystemDomain::LoadBaseSystemClasses to make this work in the case
+ // where the first call ever to Monitor.Enter is done as JIT helper
+ // for synchronized method.
+ __me = GetEEFuncEntryPointMacro(JIT_MonEnter);
+
+ // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth.
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_EXACT_DEPTH, objRef);
+
+ if (objRef == NULL)
+ COMPlusThrow(kArgumentNullException);
+
+ MONHELPER_STATE(GCPROTECT_BEGININTERIOR(pbLockTaken);)
+
+#ifdef _DEBUG
+ Thread *pThread = GetThread();
+ DWORD lockCount = pThread->m_dwLockCount;
+#endif
+ if (GET_THREAD()->CatchAtSafePointOpportunistic())
+ {
+ GET_THREAD()->PulseGCMode();
+ }
+ objRef->EnterObjMonitor();
+ _ASSERTE ((objRef->GetSyncBlock()->GetMonitor()->m_Recursion == 1 && pThread->m_dwLockCount == lockCount + 1) ||
+ pThread->m_dwLockCount == lockCount);
+ MONHELPER_STATE(if (pbLockTaken != 0) *pbLockTaken = 1;)
+
+ MONHELPER_STATE(GCPROTECT_END();)
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND
+
+/*********************************************************************/
+
+// This helper is only ever used as part of FCall, but it is implemented using HCIMPL macro
+// so that it can be tail called from assembly helper without triggering asserts in debug.
+HCIMPL2(void, JITutil_MonReliableEnter, Object* obj, BYTE* pbLockTaken)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF objRef = ObjectToOBJECTREF(obj);
+
+ // The following makes sure that Monitor.Enter shows up on thread abort
+ // stack walks (otherwise Monitor.Enter called within a CER can block a
+ // thread abort indefinitely). Setting the __me internal variable (normally
+ // only set for fcalls) will cause the helper frame below to be able to
+ // backtranslate into the method desc for the Monitor.Enter fcall.
+ __me = GetEEFuncEntryPointMacro(JIT_MonReliableEnter);
+
+ // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth.
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_EXACT_DEPTH, objRef);
+
+ if (objRef == NULL)
+ COMPlusThrow(kArgumentNullException);
+
+ GCPROTECT_BEGININTERIOR(pbLockTaken);
+
+#ifdef _DEBUG
+ Thread *pThread = GetThread();
+ DWORD lockCount = pThread->m_dwLockCount;
+#endif
+ if (GET_THREAD()->CatchAtSafePointOpportunistic())
+ {
+ GET_THREAD()->PulseGCMode();
+ }
+ objRef->EnterObjMonitor();
+ _ASSERTE ((objRef->GetSyncBlock()->GetMonitor()->m_Recursion == 1 && pThread->m_dwLockCount == lockCount + 1) ||
+ pThread->m_dwLockCount == lockCount);
+ *pbLockTaken = 1;
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND
+
+
+/*********************************************************************/
+
+// This helper is only ever used as part of FCall, but it is implemented using HCIMPL macro
+// so that it can be tail called from assembly helper without triggering asserts in debug.
+HCIMPL3(void, JITutil_MonTryEnter, Object* obj, INT32 timeOut, BYTE* pbLockTaken)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ BOOL result = FALSE;
+
+ OBJECTREF objRef = ObjectToOBJECTREF(obj);
+
+ // The following makes sure that Monitor.TryEnter shows up on thread
+ // abort stack walks (otherwise Monitor.TryEnter called within a CER can
+ // block a thread abort for long periods of time). Setting the __me internal
+ // variable (normally only set for fcalls) will cause the helper frame below
+ // to be able to backtranslate into the method desc for the Monitor.TryEnter
+ // fcall.
+ __me = GetEEFuncEntryPointMacro(JIT_MonTryEnter);
+
+ // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth.
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_EXACT_DEPTH, objRef);
+
+ if (objRef == NULL)
+ COMPlusThrow(kArgumentNullException);
+
+ if (timeOut < -1)
+ COMPlusThrow(kArgumentOutOfRangeException);
+
+ GCPROTECT_BEGININTERIOR(pbLockTaken);
+
+ if (GET_THREAD()->CatchAtSafePointOpportunistic())
+ {
+ GET_THREAD()->PulseGCMode();
+ }
+
+ result = objRef->TryEnterObjMonitor(timeOut);
+ *pbLockTaken = result != FALSE;
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND
+
+/*********************************************************************/
+HCIMPL_MONHELPER(JITutil_MonExitWorker, Object* obj)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ MONHELPER_STATE(if (pbLockTaken != NULL && *pbLockTaken == 0) return;)
+
+ OBJECTREF objRef = ObjectToOBJECTREF(obj);
+
+ // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth.
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_NO_THREAD_ABORT|Frame::FRAME_ATTR_EXACT_DEPTH, objRef);
+
+ if (objRef == NULL)
+ COMPlusThrow(kArgumentNullException);
+
+ if (!objRef->LeaveObjMonitor())
+ COMPlusThrow(kSynchronizationLockException);
+
+ MONHELPER_STATE(if (pbLockTaken != 0) *pbLockTaken = 0;)
+
+ TESTHOOKCALL(AppDomainCanBeUnloaded(GET_THREAD()->GetDomain()->GetId().m_dwId,FALSE));
+
+ if (GET_THREAD()->IsAbortRequested()) {
+ GET_THREAD()->HandleThreadAbort();
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND
+
+/*********************************************************************/
+// A helper for JIT_MonEnter that is on the callee side of an ecall
+// frame and handles the contention case.
+
+HCIMPL_MONHELPER(JITutil_MonContention, AwareLock* lock)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ // The following makes sure that Monitor.Enter shows up on thread abort
+ // stack walks (otherwise Monitor.Enter called within a CER can block a
+ // thread abort indefinitely). Setting the __me internal variable (normally
+ // only set for fcalls) will cause the helper frame below to be able to
+ // backtranslate into the method desc for the Monitor.Enter fcall.
+ __me = GetEEFuncEntryPointMacro(JIT_MonEnter);
+
+ // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth.
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH);
+ MONHELPER_STATE(GCPROTECT_BEGININTERIOR(pbLockTaken);)
+
+#ifdef _DEBUG
+ Thread *pThread = GetThread();
+ DWORD lockCount = pThread->m_dwLockCount;
+#endif
+ lock->Contention();
+ _ASSERTE (pThread->m_dwLockCount == lockCount + 1);
+ MONHELPER_STATE(if (pbLockTaken != 0) *pbLockTaken = 1;)
+
+ MONHELPER_STATE(GCPROTECT_END();)
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND
+
+// This helper is only ever used as part of FCall, but it is implemented using HCIMPL macro
+// so that it can be tail called from assembly helper without triggering asserts in debug.
+HCIMPL2(void, JITutil_MonReliableContention, AwareLock* lock, BYTE* pbLockTaken)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ // The following makes sure that Monitor.Enter shows up on thread abort
+ // stack walks (otherwise Monitor.Enter called within a CER can block a
+ // thread abort indefinitely). Setting the __me internal variable (normally
+ // only set for fcalls) will cause the helper frame below to be able to
+ // backtranslate into the method desc for the Monitor.Enter fcall.
+ __me = GetEEFuncEntryPointMacro(JIT_MonReliableEnter);
+
+ // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth.
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH);
+ GCPROTECT_BEGININTERIOR(pbLockTaken);
+
+#ifdef _DEBUG
+ Thread *pThread = GetThread();
+ DWORD lockCount = pThread->m_dwLockCount;
+#endif
+ lock->Contention();
+ _ASSERTE (pThread->m_dwLockCount == lockCount + 1);
+ *pbLockTaken = 1;
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND
+
+/*********************************************************************/
+// A helper for JIT_MonExit and JIT_MonExitStatic that is on the
+// callee side of an ecall frame and handles cases that might allocate,
+// throw or block.
+HCIMPL_MONHELPER(JITutil_MonSignal, AwareLock* lock)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth.
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH | Frame::FRAME_ATTR_NO_THREAD_ABORT);
+
+ lock->Signal();
+ MONHELPER_STATE(if (pbLockTaken != 0) *pbLockTaken = 0;)
+
+ TESTHOOKCALL(AppDomainCanBeUnloaded(GET_THREAD()->GetDomain()->GetId().m_dwId,FALSE));
+
+ if (GET_THREAD()->IsAbortRequested()) {
+ GET_THREAD()->HandleThreadAbort();
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND
+
+HCIMPL1(void *, JIT_GetSyncFromClassHandle, CORINFO_CLASS_HANDLE typeHnd_)
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(typeHnd_ != NULL);
+ } CONTRACTL_END;
+
+ void * result = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL(); // Set up a frame
+
+ TypeHandle typeHnd(typeHnd_);
+ MethodTable *pMT = typeHnd.AsMethodTable();
+
+ OBJECTREF ref = pMT->GetManagedClassObject();
+ _ASSERTE(ref);
+
+ result = (void*)ref->GetSyncBlock()->GetMonitor();
+
+ HELPER_METHOD_FRAME_END();
+
+ return(result);
+
+HCIMPLEND
+
+
+//========================================================================
+//
+// EXCEPTION HELPERS
+//
+//========================================================================
+
+// In general, we want to use COMPlusThrow to throw exceptions. However,
+// the IL_Throw helper is a special case. Here, we're called from
+// managed code. We have a guarantee that the first FS:0 handler
+// is our COMPlusFrameHandler. We could call COMPlusThrow(), which pushes
+// another handler, but there is a significant (10% on JGFExceptionBench)
+// performance gain if we avoid this by calling RaiseTheException()
+// directly.
+//
+
+/*************************************************************/
+
+HCIMPL1(void, IL_Throw, Object* obj)
+{
+ FCALL_CONTRACT;
+
+ // This "violation" isn't a really a violation.
+ // We are calling a assembly helper that can't have an SO Tolerance contract
+ CONTRACT_VIOLATION(SOToleranceViolation);
+ /* Make no assumptions about the current machine state */
+ ResetCurrentContext();
+
+ FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
+
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
+
+ OBJECTREF oref = ObjectToOBJECTREF(obj);
+
+#if defined(_DEBUG) && defined(_TARGET_X86_)
+ __helperframe.InsureInit(false, NULL);
+ g_ExceptionEIP = (LPVOID)__helperframe.GetReturnAddress();
+#endif // defined(_DEBUG) && defined(_TARGET_X86_)
+
+
+ if (oref == 0)
+ COMPlusThrow(kNullReferenceException);
+ else
+ if (!IsException(oref->GetMethodTable()))
+ {
+ GCPROTECT_BEGIN(oref);
+
+ WrapNonCompliantException(&oref);
+
+ GCPROTECT_END();
+ }
+ else
+ { // We know that the object derives from System.Exception
+ if (g_CLRPolicyRequested &&
+ oref->GetMethodTable() == g_pOutOfMemoryExceptionClass)
+ {
+ EEPolicy::HandleOutOfMemory();
+ }
+
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ // If the flag indicating ForeignExceptionRaise has been set,
+ // then do not clear the "_stackTrace" field of the exception object.
+ if (GetThread()->GetExceptionState()->IsRaisingForeignException())
+ {
+ ((EXCEPTIONREF)oref)->SetStackTraceString(NULL);
+ }
+ else
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+ {
+ ((EXCEPTIONREF)oref)->ClearStackTracePreservingRemoteStackTrace();
+ }
+ }
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ if (!g_pConfig->LegacyCorruptedStateExceptionsPolicy())
+ {
+ // Within the VM, we could have thrown and caught a managed exception. This is done by
+ // RaiseTheException that will flag that exception's corruption severity to be used
+ // incase it leaks out to managed code.
+ //
+ // If it does not leak out, but ends up calling into managed code that throws,
+ // we will come here. In such a case, simply reset the corruption-severity
+ // since we want the exception being thrown to have its correct severity set
+ // when CLR's managed code exception handler sets it.
+
+ ThreadExceptionState *pExState = GetThread()->GetExceptionState();
+ pExState->SetLastActiveExceptionCorruptionSeverity(NotSet);
+ }
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ RaiseTheExceptionInternalOnly(oref, FALSE);
+
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND
+
+/*************************************************************/
+
+HCIMPL0(void, IL_Rethrow)
+{
+ FCALL_CONTRACT;
+
+ FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
+
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
+
+ OBJECTREF throwable = GetThread()->GetThrowable();
+ if (throwable != NULL)
+ {
+ if (g_CLRPolicyRequested &&
+ throwable->GetMethodTable() == g_pOutOfMemoryExceptionClass)
+ {
+ EEPolicy::HandleOutOfMemory();
+ }
+
+ RaiseTheExceptionInternalOnly(throwable, TRUE);
+ }
+ else
+ {
+ // This can only be the result of bad IL (or some internal EE failure).
+ _ASSERTE(!"No throwable on rethrow");
+ RealCOMPlusThrow(kInvalidProgramException, (UINT)IDS_EE_RETHROW_NOT_ALLOWED);
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND
+
+/*********************************************************************/
+HCIMPL0(void, JIT_RngChkFail)
+{
+ FCALL_CONTRACT;
+
+ /* Make no assumptions about the current machine state */
+ ResetCurrentContext();
+
+ FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
+
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
+
+ COMPlusThrow(kIndexOutOfRangeException);
+
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND
+
+/*********************************************************************/
+HCIMPL0(void, JIT_Overflow)
+{
+ FCALL_CONTRACT;
+
+ /* Make no assumptions about the current machine state */
+ ResetCurrentContext();
+
+ FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
+
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
+
+ COMPlusThrow(kOverflowException);
+
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND
+
+/*********************************************************************/
+HCIMPL0(void, JIT_ThrowDivZero)
+{
+ FCALL_CONTRACT;
+
+ /* Make no assumptions about the current machine state */
+ ResetCurrentContext();
+
+ FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
+
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
+
+ COMPlusThrow(kDivideByZeroException);
+
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND
+
+/*********************************************************************/
+HCIMPL1(void, IL_VerificationError, int ilOffset)
+{
+ FCALL_CONTRACT;
+
+ FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
+
+ COMPlusThrow(kVerificationException);
+
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND
+
+/*********************************************************************/
+HCIMPL1(void, JIT_SecurityUnmanagedCodeException, CORINFO_CLASS_HANDLE typeHnd_)
+{
+ FCALL_CONTRACT;
+
+ FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
+
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
+
+ Security::ThrowSecurityException(g_SecurityPermissionClassName, SPFLAGSUNMANAGEDCODE);
+
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND
+
+/*********************************************************************/
+static RuntimeExceptionKind MapCorInfoExceptionToRuntimeExceptionKind(unsigned exceptNum)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ static const RuntimeExceptionKind map[CORINFO_Exception_Count] =
+ {
+ kNullReferenceException,
+ kDivideByZeroException,
+ kInvalidCastException,
+ kIndexOutOfRangeException,
+ kOverflowException,
+ kSynchronizationLockException,
+ kArrayTypeMismatchException,
+ kRankException,
+ kArgumentNullException,
+ kArgumentException,
+ };
+
+ // spot check of the array above
+ _ASSERTE(map[CORINFO_NullReferenceException] == kNullReferenceException);
+ _ASSERTE(map[CORINFO_DivideByZeroException] == kDivideByZeroException);
+ _ASSERTE(map[CORINFO_IndexOutOfRangeException] == kIndexOutOfRangeException);
+ _ASSERTE(map[CORINFO_OverflowException] == kOverflowException);
+ _ASSERTE(map[CORINFO_SynchronizationLockException] == kSynchronizationLockException);
+ _ASSERTE(map[CORINFO_ArrayTypeMismatchException] == kArrayTypeMismatchException);
+ _ASSERTE(map[CORINFO_RankException] == kRankException);
+ _ASSERTE(map[CORINFO_ArgumentNullException] == kArgumentNullException);
+ _ASSERTE(map[CORINFO_ArgumentException] == kArgumentException);
+
+ PREFIX_ASSUME(exceptNum < CORINFO_Exception_Count);
+ return map[exceptNum];
+}
+
+/*********************************************************************/
+HCIMPL1(void, JIT_InternalThrow, unsigned exceptNum)
+{
+ FCALL_CONTRACT;
+
+ FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
+
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXACT_DEPTH);
+ COMPlusThrow(MapCorInfoExceptionToRuntimeExceptionKind(exceptNum));
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND
+
+/*********************************************************************/
+HCIMPL1(void*, JIT_InternalThrowFromHelper, unsigned exceptNum)
+{
+ FCALL_CONTRACT;
+
+ FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_NOPOLL(Frame::FRAME_ATTR_CAPTURE_DEPTH_2|Frame::FRAME_ATTR_EXACT_DEPTH);
+ COMPlusThrow(MapCorInfoExceptionToRuntimeExceptionKind(exceptNum));
+ HELPER_METHOD_FRAME_END();
+ return NULL;
+}
+HCIMPLEND
+
+#ifndef STATUS_STACK_BUFFER_OVERRUN // Not defined yet in CESDK includes
+# define STATUS_STACK_BUFFER_OVERRUN ((NTSTATUS)0xC0000409L)
+#endif
+
+/*********************************************************************
+ * Kill process without using any potentially corrupted data:
+ * o Do not throw an exception
+ * o Do not call any indirect/virtual functions
+ * o Do not depend on any global data
+ *
+ * This function is used by the security checks for unsafe buffers (VC's -GS checks)
+ */
+
+void DoJITFailFast ()
+{
+ CONTRACTL {
+ MODE_ANY;
+ WRAPPER(GC_TRIGGERS);
+ WRAPPER(THROWS);
+ SO_NOT_MAINLINE; // If process is coming down, SO probe is not going to do much good
+ } CONTRACTL_END;
+
+ LOG((LF_ALWAYS, LL_FATALERROR, "Unsafe buffer security check failure: Buffer overrun detected"));
+
+#ifdef _DEBUG
+ if (g_pConfig->fAssertOnFailFast())
+ _ASSERTE(!"About to FailFast. set ComPlus_AssertOnFailFast=0 if this is expected");
+#endif
+
+#ifndef FEATURE_PAL
+ // Use the function provided by the C runtime.
+ //
+ // Ideally, this function is called directly from managed code so
+ // that the address of the managed function will be included in the
+ // error log. However, this function is also used by the stackwalker.
+ // To keep things simple, we just call it from here.
+#if defined(_TARGET_X86_)
+ __report_gsfailure();
+#else // !defined(_TARGET_X86_)
+ // On AMD64/IA64/ARM, we need to pass a stack cookie, which will be saved in the context record
+ // that is used to raise the buffer-overrun exception by __report_gsfailure.
+ __report_gsfailure((ULONG_PTR)0);
+#endif // defined(_TARGET_X86_)
+#else // FEATURE_PAL
+ if(ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, FailFast))
+ {
+ // Fire an ETW FailFast event
+ FireEtwFailFast(L"Unsafe buffer security check failure: Buffer overrun detected",
+ (const PVOID)GetThread()->GetFrame()->GetIP(),
+ STATUS_STACK_BUFFER_OVERRUN,
+ COR_E_EXECUTIONENGINE,
+ GetClrInstanceId());
+ }
+
+ TerminateProcess(GetCurrentProcess(), STATUS_STACK_BUFFER_OVERRUN);
+#endif // !FEATURE_PAL
+}
+
+HCIMPL0(void, JIT_FailFast)
+{
+ FCALL_CONTRACT;
+ DoJITFailFast ();
+}
+HCIMPLEND
+
+HCIMPL2(void, JIT_ThrowMethodAccessException, CORINFO_METHOD_HANDLE caller, CORINFO_METHOD_HANDLE callee)
+{
+ FCALL_CONTRACT;
+
+ FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
+
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
+
+ MethodDesc* pCallerMD = GetMethod(caller);
+
+ _ASSERTE(pCallerMD != NULL);
+ StaticAccessCheckContext accessContext(pCallerMD);
+
+ ThrowMethodAccessException(&accessContext, GetMethod(callee));
+
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND
+
+HCIMPL2(void, JIT_ThrowFieldAccessException, CORINFO_METHOD_HANDLE caller, CORINFO_FIELD_HANDLE callee)
+{
+ FCALL_CONTRACT;
+
+ FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
+
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
+
+ MethodDesc* pCallerMD = GetMethod(caller);
+
+ _ASSERTE(pCallerMD != NULL);
+ StaticAccessCheckContext accessContext(pCallerMD);
+
+ ThrowFieldAccessException(&accessContext, reinterpret_cast<FieldDesc *>(callee));
+
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND;
+
+HCIMPL2(void, JIT_ThrowClassAccessException, CORINFO_METHOD_HANDLE caller, CORINFO_CLASS_HANDLE callee)
+{
+ FCALL_CONTRACT;
+
+ FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
+
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
+
+ MethodDesc* pCallerMD = GetMethod(caller);
+
+ _ASSERTE(pCallerMD != NULL);
+ StaticAccessCheckContext accessContext(pCallerMD);
+
+ ThrowTypeAccessException(&accessContext, TypeHandle(callee).GetMethodTable());
+
+ HELPER_METHOD_FRAME_END();
+}
+HCIMPLEND;
+
+//========================================================================
+//
+// SECURITY HELPERS
+//
+//========================================================================
+
+HCIMPL2(void, JIT_DelegateSecurityCheck_Internal, CORINFO_CLASS_HANDLE delegateHnd, CORINFO_METHOD_HANDLE calleeMethodHnd)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL();
+
+ TypeHandle delegateType(delegateHnd);
+ MethodDesc* pCallee = GetMethod(calleeMethodHnd);
+
+ Security::EnforceTransparentDelegateChecks(delegateType.AsMethodTable(), pCallee);
+
+ HELPER_METHOD_FRAME_END_POLL();
+}
+HCIMPLEND
+
+/*************************************************************/
+HCIMPL2(void, JIT_DelegateSecurityCheck, CORINFO_CLASS_HANDLE delegateHnd, CORINFO_METHOD_HANDLE calleeMethodHnd)
+{
+ FCALL_CONTRACT;
+
+#ifdef FEATURE_CORECLR
+ // If we're in full trust, then we don't enforce the delegate binding rules
+ if (GetAppDomain()->GetSecurityDescriptor()->IsFullyTrusted())
+ {
+ return;
+ }
+#endif // FEATURE_CORECLR
+
+ // Tailcall to the real implementation
+ ENDFORBIDGC();
+ HCCALL2(JIT_DelegateSecurityCheck_Internal, delegateHnd, calleeMethodHnd);
+}
+HCIMPLEND
+
+
+/*************************************************************/
+//Make sure to allow check of 0 for COMPlus_Security_AlwaysInsertCallout
+HCIMPL4(void, JIT_MethodAccessCheck_Internal, CORINFO_METHOD_HANDLE callerMethodHnd, CORINFO_METHOD_HANDLE calleeMethodHnd, CORINFO_CLASS_HANDLE calleeTypeHnd, CorInfoSecurityRuntimeChecks check)
+{
+ FCALL_CONTRACT;
+
+ //
+ // Verify with the security at runtime whether call is allowed.
+ // Throws an exception if the call is not allowed, returns if it is allowed.
+ //
+
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL();
+
+ MethodDesc *pCaller = GetMethod(callerMethodHnd);
+ MethodDesc *pCallee = GetMethod(calleeMethodHnd);
+ // If we're being called because of a transparency violation (either a standard violation, or an attempt
+ // to call a conditional APTCA protected method from transparent code), process that now.
+ if (check & CORINFO_ACCESS_SECURITY_TRANSPARENCY)
+ {
+ Security::EnforceTransparentAssemblyChecks(pCaller, pCallee);
+ }
+
+ // Also make sure that we have access to the type that the method lives on
+ TypeHandle calleeTH(calleeTypeHnd);
+ Security::DoSecurityClassAccessChecks(pCaller, calleeTH, check);
+
+ // If the method has a generic instantiation, then we also need to do checks on its generic parameters
+ if (pCallee->HasMethodInstantiation())
+ {
+ Instantiation instantiation = pCallee->GetMethodInstantiation();
+ for (DWORD i = 0; i < instantiation.GetNumArgs(); i++)
+ {
+ TypeHandle argTH = instantiation[i];
+ if (!argTH.IsGenericVariable())
+ {
+ Security::DoSecurityClassAccessChecks(pCaller, argTH, check);
+ }
+ }
+ }
+
+ HELPER_METHOD_FRAME_END_POLL();
+}
+HCIMPLEND
+
+
+/*************************************************************/
+//Make sure to allow check of 0 for COMPlus_Security_AlwaysInsertCallout
+HCIMPL4(void, JIT_MethodAccessCheck, CORINFO_METHOD_HANDLE callerMethodHnd, CORINFO_METHOD_HANDLE calleeMethodHnd, CORINFO_CLASS_HANDLE calleeTypeHnd, CorInfoSecurityRuntimeChecks check)
+{
+ FCALL_CONTRACT;
+
+ MethodDesc *pCallerMD = GetMethod(callerMethodHnd);
+ _ASSERTE(GetMethod(callerMethodHnd)->IsRestored());
+ _ASSERTE(GetMethod(calleeMethodHnd)->IsRestored());
+
+
+ // If we don't need to process this callout, then exit early
+ if (Security::SecurityCalloutQuickCheck(pCallerMD))
+ {
+ return;
+ }
+
+ // Tailcall to the slow helper
+ ENDFORBIDGC();
+ HCCALL4(JIT_MethodAccessCheck_Internal, callerMethodHnd, calleeMethodHnd, calleeTypeHnd, check);
+}
+HCIMPLEND
+
+
+// Slower checks (including failure paths) for determining if a method has runtime access to a field
+HCIMPL3(void, JIT_FieldAccessCheck_Internal, CORINFO_METHOD_HANDLE callerMethodHnd, CORINFO_FIELD_HANDLE calleeFieldHnd, CorInfoSecurityRuntimeChecks check)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL();
+
+ MethodDesc *pCallerMD = GetMethod(callerMethodHnd);
+ FieldDesc *pFD = reinterpret_cast<FieldDesc *>(calleeFieldHnd);
+
+ // We can get caller checks of 0 if we're in AlwaysInsertCallout mode, so make sure to do all of our
+ // work under checks for specific flags
+
+ if (check & CORINFO_ACCESS_SECURITY_TRANSPARENCY)
+ {
+ _ASSERTE(pCallerMD != NULL);
+ StaticAccessCheckContext accessContext(pCallerMD);
+
+ if (!Security::CheckCriticalAccess(&accessContext, NULL, pFD, NULL))
+ {
+ ThrowFieldAccessException(pCallerMD, pFD, TRUE, IDS_E_CRITICAL_FIELD_ACCESS_DENIED);
+ }
+ }
+
+ // Also make sure that we have access to the type that the field lives on
+ TypeHandle fieldTH(pFD->GetApproxEnclosingMethodTable());
+ Security::DoSecurityClassAccessChecks(pCallerMD, fieldTH, check);
+
+ HELPER_METHOD_FRAME_END_POLL();
+}
+HCIMPLEND
+
+// Check to see if a method has runtime access to a field
+HCIMPL3(void, JIT_FieldAccessCheck, CORINFO_METHOD_HANDLE callerMethodHnd, CORINFO_FIELD_HANDLE calleeFieldHnd, CorInfoSecurityRuntimeChecks check)
+{
+ FCALL_CONTRACT;
+ _ASSERTE(GetMethod(callerMethodHnd)->IsRestored());
+ _ASSERTE(((FieldDesc*)calleeFieldHnd)->GetEnclosingMethodTable()->IsRestored_NoLogging());
+
+ // We want to try to exit JIT_FieldAccessCheck as soon as possible, preferably without
+ // entering JIT_FieldAccessCheck_Internal. This method contains only quick checks to see if
+ // the access is definately allowed. More complete checks are done in the Internal method.
+
+ MethodDesc *pCallerMD = GetMethod(callerMethodHnd);
+
+ // If we don't need to process this callout at all, exit early
+ if (Security::SecurityCalloutQuickCheck(pCallerMD))
+ {
+ return;
+ }
+
+ // If the callout is for conditional APTCA only and we know the target is enabled, then we can also exit
+ // early
+
+ // We couldn't quickly determine that this access is legal, so tailcall to the slower helper to do some
+ // more work to process the access.
+ ENDFORBIDGC();
+ HCCALL3(JIT_FieldAccessCheck_Internal, callerMethodHnd, calleeFieldHnd, check);
+}
+HCIMPLEND
+
+// Slower checks (including failure paths) for determining if a method has runtime access to a type
+HCIMPL3(void, JIT_ClassAccessCheck_Internal, CORINFO_METHOD_HANDLE callerMethodHnd, CORINFO_CLASS_HANDLE calleeClassHnd, CorInfoSecurityRuntimeChecks check)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL();
+
+ MethodDesc *pCallerMD = GetMethod(callerMethodHnd);
+ TypeHandle calleeClassTH(calleeClassHnd);
+
+ Security::DoSecurityClassAccessChecks(pCallerMD, calleeClassTH, check);
+
+ HELPER_METHOD_FRAME_END_POLL();
+}
+HCIMPLEND
+
+// Check to see if a method has runtime access to a type
+HCIMPL3(void, JIT_ClassAccessCheck, CORINFO_METHOD_HANDLE callerMethodHnd, CORINFO_CLASS_HANDLE calleeClassHnd, CorInfoSecurityRuntimeChecks check)
+{
+ FCALL_CONTRACT;
+ _ASSERTE(GetMethod(callerMethodHnd)->IsRestored());
+ _ASSERTE(TypeHandle(calleeClassHnd).IsRestored());
+
+ // We want to try to exit JIT_ClassAccessCheck as soon as possible, preferably without
+ // entering JIT_ClassAccessCheck_Internal. This method contains only quick checks to see if
+ // the access is definately allowed. More complete checks are done in the Internal method.
+
+ MethodDesc *pCallerMD = GetMethod(callerMethodHnd);
+
+ // If we don't need to prrocess the callout at all, exit early
+ if (Security::SecurityCalloutQuickCheck(pCallerMD))
+ {
+ return;
+ }
+
+ // If the callout is for conditional APTCA only, and we know the target is enabled, then we can also
+ // exit early
+
+ // We couldn't quickly determine that this access is legal, so tailcall to the slower helper to do some
+ // more work processing the access.
+ ENDFORBIDGC();
+ HCCALL3(JIT_ClassAccessCheck_Internal, callerMethodHnd, calleeClassHnd, check);
+}
+HCIMPLEND
+
+NOINLINE HCIMPL2(void, JIT_Security_Prolog_Framed, CORINFO_METHOD_HANDLE methHnd_, OBJECTREF* ppFrameSecDesc)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL();
+ {
+ ASSUME_BYREF_FROM_JIT_STACK_BEGIN(ppFrameSecDesc);
+
+ MethodDesc *pCurrent = GetMethod(methHnd_);
+
+ g_IBCLogger.LogMethodDescAccess(pCurrent);
+
+ // Note: This check is replicated in JIT_Security_Prolog
+ if ((pCurrent->IsInterceptedForDeclSecurity() &&
+ !(pCurrent->IsInterceptedForDeclSecurityCASDemandsOnly() &&
+ SecurityStackWalk::HasFlagsOrFullyTrusted(0)))
+#ifdef FEATURE_COMPRESSEDSTACK
+ || SecurityStackWalk::MethodIsAnonymouslyHostedDynamicMethodWithCSToEvaluate(pCurrent)
+#endif //FEATURE_COMPRESSEDSTACK
+ )
+ {
+ MethodSecurityDescriptor MDSecDesc(pCurrent);
+ MethodSecurityDescriptor::LookupOrCreateMethodSecurityDescriptor(&MDSecDesc);
+
+ // Do the Declarative CAS actions check
+ DeclActionInfo* pRuntimeDeclActionInfo = MDSecDesc.GetRuntimeDeclActionInfo();
+ if (pRuntimeDeclActionInfo != NULL || pCurrent->IsLCGMethod())
+ {
+ // Tell the debugger not to start on any managed code that we call in this method
+ FrameWithCookie<DebuggerSecurityCodeMarkFrame> __dbgSecFrame;
+
+ Security::DoDeclarativeActions(pCurrent, pRuntimeDeclActionInfo, ppFrameSecDesc, &MDSecDesc);
+
+ // Pop the debugger frame
+ __dbgSecFrame.Pop();
+ }
+ }
+
+ ASSUME_BYREF_FROM_JIT_STACK_END();
+ }
+ HELPER_METHOD_FRAME_END_POLL();
+}
+HCIMPLEND
+
+/*************************************************************/
+#include <optsmallperfcritical.h>
+HCIMPL2(void, JIT_Security_Prolog, CORINFO_METHOD_HANDLE methHnd_, OBJECTREF* ppFrameSecDesc)
+{
+ FCALL_CONTRACT;
+
+ //
+ // do the security prolog work
+ //
+
+ MethodDesc *pCurrent = GetMethod(methHnd_);
+
+ // Note: This check is replicated in JIT_Security_Prolog_Framed
+ if ((pCurrent->IsInterceptedForDeclSecurity() &&
+ !(pCurrent->IsInterceptedForDeclSecurityCASDemandsOnly() &&
+ SecurityStackWalk::HasFlagsOrFullyTrusted(0)))
+ // We don't necessarily need to do work for LCG methods, but we need a frame
+ // to find out for sure
+ || pCurrent->IsLCGMethod())
+ {
+ // Tailcall to the slow helper
+ ENDFORBIDGC();
+ HCCALL2(JIT_Security_Prolog_Framed, methHnd_, ppFrameSecDesc);
+ }
+}
+HCIMPLEND
+#include <optdefault.h>
+
+/*************************************************************/
+HCIMPL1(void, JIT_VerificationRuntimeCheck_Internal, CORINFO_METHOD_HANDLE methHnd_)
+{
+ FCALL_CONTRACT;
+
+
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL();
+ {
+#ifdef FEATURE_CORECLR
+ // Transparent methods that contains unverifiable code is not allowed.
+ MethodDesc *pMethod = GetMethod(methHnd_);
+
+#if defined(FEATURE_CORECLR_COVERAGE_BUILD) && defined(FEATURE_STRONGNAME_DELAY_SIGNING_ALLOWED)
+ // For code coverage builds we have an issue where the inserted IL is not verifiable.
+ // This means that transparent methods in platform assemblies will throw verification exceptions.
+ // Temporary fix is to allow transparent methods in platform assemblies to be unverifiable only on coverage builds.
+ // Paranoia: allow this only on non ret builds - all builds except the RET type will have
+ // FEATURE_STRONGNAME_DELAY_SIGNING_ALLOWED defined. So we can use that to figure out if this is a RET build
+ // type that someone is trying to relax that constraint on and not allow that.
+ if (!pMethod->GetModule()->GetFile()->GetAssembly()->IsProfileAssembly())
+ {
+ // Only throw if pMethod is not in any platform assembly.
+ SecurityTransparent::ThrowMethodAccessException(pMethod);
+ }
+#else // defined(FEATURE_CORECLR_COVERAGE_BUILD) && defined(FEATURE_STRONGNAME_DELAY_SIGNING_ALLOWED)
+
+ SecurityTransparent::ThrowMethodAccessException(pMethod);
+#endif // defined(FEATURE_CORECLR_COVERAGE_BUILD) && defined(FEATURE_STRONGNAME_DELAY_SIGNING_ALLOWED)
+
+#else // FEATURE_CORECLR
+ //
+ // inject a full-demand for unmanaged code permission at runtime
+ // around methods in transparent assembly that contains unverifiable code
+ Security::SpecialDemand(SSWT_DECLARATIVE_DEMAND, SECURITY_UNMANAGED_CODE);
+#endif // FEATURE_CORECLR
+ }
+ HELPER_METHOD_FRAME_END_POLL();
+}
+HCIMPLEND
+
+/*************************************************************/
+HCIMPL1(void, JIT_VerificationRuntimeCheck, CORINFO_METHOD_HANDLE methHnd_)
+{
+ FCALL_CONTRACT;
+
+ if (SecurityStackWalk::HasFlagsOrFullyTrustedIgnoreMode(0))
+ return;
+ //
+ // inject a full-demand for unmanaged code permission at runtime
+ // around methods in transparent assembly that contains unverifiable code
+ {
+ // Tailcall to the slow helper
+ ENDFORBIDGC();
+ HCCALL1(JIT_VerificationRuntimeCheck_Internal, methHnd_);
+ }
+
+}
+HCIMPLEND
+
+
+
+//========================================================================
+//
+// DEBUGGER/PROFILER HELPERS
+//
+//========================================================================
+
+/*********************************************************************/
+// JIT_UserBreakpoint
+// Called by the JIT whenever a cee_break instruction should be executed.
+// This ensures that enough info will be pushed onto the stack so that
+// we can continue from the exception w/o having special code elsewhere.
+// Body of function is written by debugger team
+// Args: None
+//
+// <TODO> make sure this actually gets called by all JITters</TODO>
+// Note: this code is duplicated in the ecall in VM\DebugDebugger:Break,
+// so propogate changes to there
+
+HCIMPL0(void, JIT_UserBreakpoint)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL(); // Set up a frame
+
+#ifdef DEBUGGING_SUPPORTED
+ FrameWithCookie<DebuggerExitFrame> __def;
+
+ MethodDescCallSite breakCanThrow(METHOD__DEBUGGER__BREAK_CAN_THROW);
+
+ // Call Diagnostic.Debugger.BreakCanThrow instead. This will make us demand
+ // UnmanagedCode permission if debugger is not attached.
+ //
+ breakCanThrow.Call((ARG_SLOT*)NULL);
+
+ __def.Pop();
+#else // !DEBUGGING_SUPPORTED
+ _ASSERTE(!"JIT_UserBreakpoint called, but debugging support is not available in this build.");
+#endif // !DEBUGGING_SUPPORTED
+
+ HELPER_METHOD_FRAME_END_POLL();
+}
+HCIMPLEND
+
+#if defined(_MSC_VER)
+// VC++ Compiler intrinsic.
+extern "C" void * _ReturnAddress(void);
+#endif
+
+/*********************************************************************/
+// Callback for Just-My-Code probe
+// Probe looks like:
+// if (*pFlag != 0) call JIT_DbgIsJustMyCode
+// So this is only called if the flag (obtained by GetJMCFlagAddr) is
+// non-zero.
+// Body of this function is maintained by the debugger people.
+HCIMPL0(void, JIT_DbgIsJustMyCode)
+{
+ FCALL_CONTRACT;
+ SO_NOT_MAINLINE_FUNCTION;
+
+ // We need to get both the ip of the managed function this probe is in
+ // (which will be our return address) and the frame pointer for that
+ // function (since we can't get it later because we're pushing unmanaged
+ // frames on the stack).
+ void * ip = NULL;
+
+ // <NOTE>
+ // In order for the return address to be correct, we must NOT call any
+ // function before calling _ReturnAddress().
+ // </NOTE>
+ ip = _ReturnAddress();
+
+ _ASSERTE(ip != NULL);
+
+ // Call into debugger proper
+ g_pDebugInterface->OnMethodEnter(ip);
+
+ return;
+}
+HCIMPLEND
+
+#if !(defined(_TARGET_X86_) || defined(_WIN64))
+void JIT_ProfilerEnterLeaveTailcallStub(UINT_PTR ProfilerHandle)
+{
+ return;
+}
+#endif // !(_TARGET_X86_ || _WIN64)
+
+#ifdef PROFILING_SUPPORTED
+
+//---------------------------------------------------------------------------------------
+//
+// Sets the profiler's enter/leave/tailcall hooks into the JIT's dynamic helper
+// function table.
+//
+// Arguments:
+// pFuncEnter - Enter hook
+// pFuncLeave - Leave hook
+// pFuncTailcall - Tailcall hook
+//
+// For each hook parameter, if NULL is passed in, that will cause the JIT
+// to insert calls to its default stub replacement for that hook, which
+// just does a ret.
+//
+// Return Value:
+// HRESULT indicating success or failure
+//
+// Notes:
+// On IA64, this will allocate space for stubs to update GP, and that
+// allocation may take locks and may throw on failure. Callers be warned.
+//
+
+HRESULT EEToProfInterfaceImpl::SetEnterLeaveFunctionHooksForJit(FunctionEnter3 * pFuncEnter,
+ FunctionLeave3 * pFuncLeave,
+ FunctionTailcall3 * pFuncTailcall)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ SetJitHelperFunction(
+ CORINFO_HELP_PROF_FCN_ENTER,
+ (pFuncEnter == NULL) ?
+ reinterpret_cast<void *>(JIT_ProfilerEnterLeaveTailcallStub) :
+ reinterpret_cast<void *>(pFuncEnter));
+
+ SetJitHelperFunction(
+ CORINFO_HELP_PROF_FCN_LEAVE,
+ (pFuncLeave == NULL) ?
+ reinterpret_cast<void *>(JIT_ProfilerEnterLeaveTailcallStub) :
+ reinterpret_cast<void *>(pFuncLeave));
+
+ SetJitHelperFunction(
+ CORINFO_HELP_PROF_FCN_TAILCALL,
+ (pFuncTailcall == NULL) ?
+ reinterpret_cast<void *>(JIT_ProfilerEnterLeaveTailcallStub) :
+ reinterpret_cast<void *>(pFuncTailcall));
+
+ return (S_OK);
+}
+#endif // PROFILING_SUPPORTED
+
+/*************************************************************/
+HCIMPL1(void, JIT_LogMethodEnter, CORINFO_METHOD_HANDLE methHnd_)
+ FCALL_CONTRACT;
+
+ //
+ // Record an access to this method desc
+ //
+
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL();
+
+ g_IBCLogger.LogMethodCodeAccess(GetMethod(methHnd_));
+
+ HELPER_METHOD_FRAME_END_POLL();
+
+HCIMPLEND
+
+
+
+//========================================================================
+//
+// GC HELPERS
+//
+//========================================================================
+
+/*************************************************************/
+HCIMPL3(VOID, JIT_StructWriteBarrier, void *dest, void* src, CORINFO_CLASS_HANDLE typeHnd_)
+{
+ FCALL_CONTRACT;
+
+ TypeHandle typeHnd(typeHnd_);
+ MethodTable *pMT = typeHnd.AsMethodTable();
+
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL(); // Set up a frame
+ CopyValueClassUnchecked(dest, src, pMT);
+ HELPER_METHOD_FRAME_END_POLL();
+
+}
+HCIMPLEND
+
+/*************************************************************/
+HCIMPL0(VOID, JIT_PollGC)
+{
+ FCALL_CONTRACT;
+
+ FC_GC_POLL_NOT_NEEDED();
+
+ Thread *thread = GetThread();
+ if (thread->CatchAtSafePointOpportunistic()) // Does someone want this thread stopped?
+ {
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL(); // Set up a frame
+#ifdef _DEBUG
+ BOOL GCOnTransition = FALSE;
+ if (g_pConfig->FastGCStressLevel()) {
+ GCOnTransition = GC_ON_TRANSITIONS (FALSE);
+ }
+#endif // _DEBUG
+ CommonTripThread(); // Indicate we are at a GC safe point
+#ifdef _DEBUG
+ if (g_pConfig->FastGCStressLevel()) {
+ GC_ON_TRANSITIONS (GCOnTransition);
+ }
+#endif // _DEBUG
+ HELPER_METHOD_FRAME_END();
+ }
+}
+HCIMPLEND
+
+/*************************************************************/
+// For an inlined N/Direct call (and possibly for other places that need this service)
+// we have noticed that the returning thread should trap for one reason or another.
+// ECall sets up the frame.
+
+#if defined(_TARGET_ARM_) || defined(_TARGET_AMD64_)
+// The JIT expects this helper to preserve the return value on AMD64 and ARM. We should eventually
+// switch other platforms to the same convention since it produces smaller code.
+extern "C" FCDECL0(VOID, JIT_RareDisableHelper);
+extern "C" FCDECL0(VOID, JIT_RareDisableHelperWorker);
+
+HCIMPL0(void, JIT_RareDisableHelperWorker)
+#else
+HCIMPL0(void, JIT_RareDisableHelper)
+#endif
+{
+ // We do this here (before we set up a frame), because the following scenario
+ // We are in the process of doing an inlined pinvoke. Since we are in preemtive
+ // mode, the thread is allowed to continue. The thread continues and gets a context
+ // switch just after it has cleared the preemptive mode bit but before it gets
+ // to this helper. When we do our stack crawl now, we think this thread is
+ // in cooperative mode (and believed that it was suspended in the SuspendEE), so
+ // we do a getthreadcontext (on the unsuspended thread!) and get an EIP in jitted code.
+ // and proceed. Assume the crawl of jitted frames is proceeding on the other thread
+ // when this thread wakes up and sets up a frame. Eventually the other thread
+ // runs out of jitted frames and sees the frame we just established. This causes
+ // an assert in the stack crawling code. If this assert is ignored, however, we
+ // will end up scanning the jitted frames twice, which will lead to GC holes
+ //
+ // <TODO>TODO: It would be MUCH more robust if we should remember which threads
+ // we suspended in the SuspendEE, and only even consider using EIP if it was suspended
+ // in the first phase.
+ // </TODO>
+
+ BEGIN_PRESERVE_LAST_ERROR;
+
+ FCALL_CONTRACT;
+
+ Thread *thread = GetThread();
+
+ // We need to disable the implicit FORBID GC region that exists inside an FCALL
+ // in order to call RareDisablePreemptiveGC().
+ FC_CAN_TRIGGER_GC();
+ thread->RareDisablePreemptiveGC();
+ FC_CAN_TRIGGER_GC_END();
+
+ FC_GC_POLL_NOT_NEEDED();
+
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL(); // Set up a frame
+ thread->HandleThreadAbort();
+ HELPER_METHOD_FRAME_END();
+
+ END_PRESERVE_LAST_ERROR;
+}
+HCIMPLEND
+
+/*********************************************************************/
+// This is called by the JIT after every instruction in fully interuptable
+// code to make certain our GC tracking is OK
+HCIMPL0(VOID, JIT_StressGC_NOP)
+{
+ FCALL_CONTRACT;
+}
+HCIMPLEND
+
+
+HCIMPL0(VOID, JIT_StressGC)
+{
+ FCALL_CONTRACT;
+
+#ifdef _DEBUG
+ HELPER_METHOD_FRAME_BEGIN_0(); // Set up a frame
+
+ bool fSkipGC = false;
+
+ if (!fSkipGC)
+ GCHeap::GetGCHeap()->GarbageCollect();
+
+// <TODO>@TODO: the following ifdef is in error, but if corrected the
+// compiler complains about the *__ms->pRetAddr() saying machine state
+// doesn't allow -></TODO>
+#ifdef _X86
+ // Get the machine state, (from HELPER_METHOD_FRAME_BEGIN)
+ // and wack our return address to a nop function
+ BYTE* retInstrs = ((BYTE*) *__ms->pRetAddr()) - 4;
+ _ASSERTE(retInstrs[-1] == 0xE8); // it is a call instruction
+ // Wack it to point to the JITStressGCNop instead
+ FastInterlockExchange((LONG*) retInstrs), (LONG) JIT_StressGC_NOP);
+#endif // _X86
+
+ HELPER_METHOD_FRAME_END();
+#endif // _DEBUG
+}
+HCIMPLEND
+
+
+
+HCIMPL0(INT32, JIT_GetCurrentManagedThreadId)
+{
+ FCALL_CONTRACT;
+
+ FC_GC_POLL_NOT_NEEDED();
+
+ Thread * pThread = GetThread();
+ return pThread->GetThreadId();
+}
+HCIMPLEND
+
+
+/*********************************************************************/
+/* we don't use HCIMPL macros because we don't want the overhead even in debug mode */
+
+HCIMPL1_RAW(Object*, JIT_CheckObj, Object* obj)
+{
+ FCALL_CONTRACT;
+
+ if (obj != 0) {
+ MethodTable* pMT = obj->GetMethodTable();
+ if (!pMT->ValidateWithPossibleAV()) {
+ _ASSERTE(!"Bad Method Table");
+ FreeBuildDebugBreak();
+ }
+ }
+ return obj;
+}
+HCIMPLEND_RAW
+
+static int loopChoice = 0;
+
+// This function supports a JIT mode in which we're debugging the mechanism for loop cloning.
+// We want to clone loops, then make a semi-random choice, on each execution of the loop,
+// whether to run the original loop or the cloned copy. We do this by incrementing the contents
+// of a memory location, and testing whether the result is odd or even. The "loopChoice" variable
+// above provides that memory location, and this JIT helper merely informs the JIT of the address of
+// "loopChoice".
+HCIMPL0(void*, JIT_LoopCloneChoiceAddr)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ } CONTRACTL_END;
+
+ return &loopChoice;
+}
+HCIMPLEND
+
+// Prints a message that loop cloning optimization has occurred.
+HCIMPL0(void, JIT_DebugLogLoopCloning)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ } CONTRACTL_END;
+
+#ifdef _DEBUG
+ printf(">> Logging loop cloning optimization\n");
+#endif
+}
+HCIMPLEND
+
+//========================================================================
+//
+// INTEROP HELPERS
+//
+//========================================================================
+
+#ifdef _WIN64
+
+/**********************************************************************/
+/* Fills out portions of an InlinedCallFrame for JIT64 */
+/* The idea here is to allocate and initalize the frame to only once, */
+/* regardless of how many PInvokes there are in the method */
+Thread * __stdcall JIT_InitPInvokeFrame(InlinedCallFrame *pFrame, PTR_VOID StubSecretArg)
+{
+ CONTRACTL
+ {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ Thread *pThread = GetThread();
+
+ // The JIT messed up and is initializing a frame that is already live on the stack?!?!?!?!
+ _ASSERTE(pFrame != pThread->GetFrame());
+
+ pFrame->Init();
+ pFrame->m_StubSecretArg = StubSecretArg;
+ pFrame->m_Next = pThread->GetFrame();
+
+ return pThread;
+}
+
+#endif // _WIN64
+
+//========================================================================
+//
+// JIT HELPERS IMPLEMENTED AS FCALLS
+//
+//========================================================================
+
+FCIMPL3(void, JitHelpers::UnsafeSetArrayElement, PtrArray* pPtrArrayUNSAFE, INT32 index, Object* objectUNSAFE) {
+ FCALL_CONTRACT;
+
+ PTRARRAYREF pPtrArray = (PTRARRAYREF)pPtrArrayUNSAFE;
+ OBJECTREF object = (OBJECTREF)objectUNSAFE;
+
+ _ASSERTE(index < (INT32)pPtrArray->GetNumComponents());
+
+ pPtrArray->SetAt(index, object);
+}
+FCIMPLEND
+
+#ifdef _TARGET_ARM_
+// This function is used from the FCallMemcpy for GC polling
+EXTERN_C VOID FCallMemCpy_GCPoll()
+{
+ FC_INNER_PROLOG(FCallMemcpy);
+
+ Thread *thread = GetThread();
+ // CommonTripThread does this check, but doing this to avoid raising the frames
+ if (thread->CatchAtSafePointOpportunistic())
+ {
+ HELPER_METHOD_FRAME_BEGIN_0();
+ CommonTripThread();
+ HELPER_METHOD_FRAME_END();
+ }
+
+ FC_INNER_EPILOG();
+}
+#endif // _TARGET_ARM_
+
+//========================================================================
+//
+// JIT HELPERS INITIALIZATION
+//
+//========================================================================
+
+// verify consistency of jithelpers.h and corinfo.h
+enum __CorInfoHelpFunc {
+#define JITHELPER(code, pfnHelper, sig) __##code,
+#include "jithelpers.h"
+};
+#define JITHELPER(code, pfnHelper, sig) C_ASSERT((int)__##code == (int)code);
+#include "jithelpers.h"
+
+#ifdef _DEBUG
+#define HELPERDEF(code, lpv, sig) { (LPVOID)(lpv), #code },
+#else // !_DEBUG
+#define HELPERDEF(code, lpv, sig) { (LPVOID)(lpv) },
+#endif // !_DEBUG
+
+// static helpers - constant array
+const VMHELPDEF hlpFuncTable[CORINFO_HELP_COUNT] =
+{
+#define JITHELPER(code, pfnHelper, sig) HELPERDEF(code, pfnHelper,sig)
+#define DYNAMICJITHELPER(code, pfnHelper,sig) HELPERDEF(code, 1 + DYNAMIC_##code, sig)
+#include "jithelpers.h"
+};
+
+// dynamic helpers - filled in at runtime
+VMHELPDEF hlpDynamicFuncTable[DYNAMIC_CORINFO_HELP_COUNT] =
+{
+#define JITHELPER(code, pfnHelper, sig)
+#define DYNAMICJITHELPER(code, pfnHelper, sig) HELPERDEF(DYNAMIC_ ## code, pfnHelper, sig)
+#include "jithelpers.h"
+};
+
+#if defined(_DEBUG) && (defined(_TARGET_AMD64_) || defined(_TARGET_X86_)) && !defined(FEATURE_PAL)
+#define HELPERCOUNTDEF(lpv) { (LPVOID)(lpv), NULL, 0 },
+
+VMHELPCOUNTDEF hlpFuncCountTable[CORINFO_HELP_COUNT+1] =
+{
+#define JITHELPER(code, pfnHelper, sig) HELPERCOUNTDEF(pfnHelper)
+#define DYNAMICJITHELPER(code, pfnHelper, sig) HELPERCOUNTDEF(1 + DYNAMIC_##code)
+#include "jithelpers.h"
+};
+#endif
+
+// Set the JIT helper function in the helper table
+// Handles the case where the function does not reside in mscorwks.dll
+
+void _SetJitHelperFunction(DynamicCorInfoHelpFunc ftnNum, void * pFunc)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ _ASSERTE(ftnNum < DYNAMIC_CORINFO_HELP_COUNT);
+
+ LOG((LF_JIT, LL_INFO1000000, "Setting JIT dynamic helper %3d (%s) to %p\n",
+ ftnNum, hlpDynamicFuncTable[ftnNum].name, pFunc));
+
+ hlpDynamicFuncTable[ftnNum].pfnHelper = (void *) pFunc;
+}
+
+/*********************************************************************/
+// Initialize the part of the JIT helpers that require much of the
+// EE infrastructure to be in place.
+/*********************************************************************/
+void InitJITHelpers2()
+{
+ STANDARD_VM_CONTRACT;
+
+#if defined(_TARGET_X86_) || defined(_TARGET_ARM_)
+ SetJitHelperFunction(CORINFO_HELP_INIT_PINVOKE_FRAME, (void *)GenerateInitPInvokeFrameHelper()->GetEntryPoint());
+#endif // _TARGET_X86_ || _TARGET_ARM_
+
+ ECall::DynamicallyAssignFCallImpl(GetEEFuncEntryPoint(GetThread), ECall::InternalGetCurrentThread);
+
+ InitJitHelperLogging();
+
+ g_pJitGenericHandleCacheCrst.Init(CrstJitGenericHandleCache, CRST_UNSAFE_COOPGC);
+
+ // Allocate and initialize the table
+ NewHolder <JitGenericHandleCache> tempGenericHandleCache (new JitGenericHandleCache());
+ LockOwner sLock = {&g_pJitGenericHandleCacheCrst, IsOwnerOfCrst};
+ if (!tempGenericHandleCache->Init(59, &sLock))
+ COMPlusThrowOM();
+ g_pJitGenericHandleCache = tempGenericHandleCache.Extract();
+}
+
+#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM_)
+
+NOINLINE void DoCopy(CONTEXT * ctx, void * pvTempStack, size_t cbTempStack, Thread * pThread, Frame * pNewFrame)
+{
+ // We need to ensure that copying pvTempStack onto our stack will not in
+ // *ANY* way trash the context record (or our pointer to it) that we need
+ // in order to restore context
+ _ASSERTE((DWORD_PTR)&ctx + sizeof(ctx) < (DWORD_PTR)GetSP(ctx));
+
+ CONTEXT ctx2;
+ if ((DWORD_PTR)ctx + sizeof(*ctx) > (DWORD_PTR)GetSP(ctx))
+ {
+ // The context record is in danger, copy it down
+ _ASSERTE((DWORD_PTR)&ctx2 + sizeof(ctx2) < (DWORD_PTR)GetSP(ctx));
+ ctx2 = *ctx;
+
+ // Clear any context that we didn't copy...
+ ctx2.ContextFlags &= CONTEXT_ALL;
+ ctx = &ctx2;
+ }
+
+ _ASSERTE((DWORD_PTR)ctx + sizeof(*ctx) <= (DWORD_PTR)GetSP(ctx));
+
+ // DevDiv 189140 - use memmove because source and dest might overlap.
+ memmove((void*)GetSP(ctx), pvTempStack, cbTempStack);
+
+ if (pNewFrame != NULL)
+ {
+ // Now that the memmove above is complete, pNewFrame is actually pointing at a
+ // TailCallFrame, and not garbage. So it's safe to add pNewFrame to the Frame
+ // chain.
+ _ASSERTE(pThread != NULL);
+ pThread->SetFrame(pNewFrame);
+ }
+
+ RtlRestoreContext(ctx, NULL);
+}
+
+//
+// Mostly Architecture-agnostic RtlVirtualUnwind-based tail call helper...
+//
+// Can't use HCIMPL macro because it requires unwind, and this method *NEVER* unwinds.
+//
+
+#define INVOKE_COPY_ARGS_HELPER(helperFunc, arg1, arg2, arg3, arg4) ((pfnCopyArgs)helperFunc)(arg1, arg2, arg3, arg4)
+void F_CALL_VA_CONV JIT_TailCall(PCODE copyArgs, PCODE target, ...)
+{
+ // Can't have a regular contract because we would never pop it
+ // We only throw a stack overflow if needed, and we can't handle
+ // a GC because the incoming parameters are totally unprotected.
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_COOPERATIVE
+
+#ifndef FEATURE_PAL
+
+ Thread *pThread = GetThread();
+
+#ifdef FEATURE_HIJACK
+ // We can't crawl the stack of a thread that currently has a hijack pending
+ // (since the hijack routine won't be recognized by any code manager). So we
+ // undo any hijack, the EE will re-attempt it later.
+ pThread->UnhijackThread();
+#endif
+
+ ULONG_PTR establisherFrame = 0;
+ PVOID handlerData = NULL;
+ CONTEXT ctx;
+
+ // Unwind back to our caller in managed code
+ static PRUNTIME_FUNCTION my_pdata;
+ static ULONG_PTR my_imagebase;
+
+ ctx.ContextFlags = CONTEXT_ALL;
+ RtlCaptureContext(&ctx);
+
+ if (!VolatileLoadWithoutBarrier(&my_imagebase)) {
+ ULONG_PTR imagebase = 0;
+ my_pdata = RtlLookupFunctionEntry(GetIP(&ctx), &imagebase, NULL);
+ InterlockedExchangeT(&my_imagebase, imagebase);
+ }
+
+ RtlVirtualUnwind(UNW_FLAG_NHANDLER, my_imagebase, GetIP(&ctx), my_pdata, &ctx, &handlerData,
+ &establisherFrame, NULL);
+
+ EECodeInfo codeInfo(GetIP(&ctx));
+
+ // Now unwind back to our caller's caller
+ establisherFrame = 0;
+ RtlVirtualUnwind(UNW_FLAG_NHANDLER, codeInfo.GetModuleBase(), GetIP(&ctx), codeInfo.GetFunctionEntry(), &ctx, &handlerData,
+ &establisherFrame, NULL);
+
+ va_list args;
+
+ // Compute the space needed for arguments
+ va_start(args, target);
+
+ ULONG_PTR pGCLayout = 0;
+ size_t cbArgArea = INVOKE_COPY_ARGS_HELPER(copyArgs, args, NULL, NULL, (size_t)&pGCLayout);
+
+ // reset (in case the helper walked them)
+ va_start(args, target);
+
+ // Fake call frame (if needed)
+ size_t cbCopyFrame = 0;
+ bool fCopyDown = false;
+ BYTE rgFrameBuffer[sizeof(FrameWithCookie<TailCallFrame>)];
+ Frame * pNewFrame = NULL;
+
+#if defined(_TARGET_AMD64_)
+# define STACK_ADJUST_FOR_RETURN_ADDRESS (sizeof(void*))
+# define STACK_ALIGN_MASK (0xF)
+#elif defined(_TARGET_ARM_)
+# define STACK_ADJUST_FOR_RETURN_ADDRESS (0)
+# define STACK_ALIGN_MASK (0x7)
+#else
+#error "Unknown tail call architecture"
+#endif
+
+ // figure out if we can re-use an existing TailCallHelperStub
+ // or if we need to create a new one.
+ if ((void*)GetIP(&ctx) == JIT_TailCallHelperStub_ReturnAddress) {
+ TailCallFrame * pCurrentFrame = TailCallFrame::GetFrameFromContext(&ctx);
+ _ASSERTE(pThread->GetFrame() == pCurrentFrame);
+ // The caller was tail called, so we can re-use that frame
+ // See if we need to enlarge the ArgArea
+ // This can potentially enlarge cbArgArea to the size of the
+ // existing TailCallFrame.
+ const size_t endOfFrame = (size_t)pCurrentFrame - (size_t)sizeof(GSCookie);
+ size_t cbOldArgArea = (endOfFrame - GetSP(&ctx));
+ if (cbOldArgArea >= cbArgArea) {
+ cbArgArea = cbOldArgArea;
+ }
+ else {
+ SetSP(&ctx, (endOfFrame - cbArgArea));
+ fCopyDown = true;
+ }
+
+ // Reset the GCLayout
+ pCurrentFrame->SetGCLayout((TADDR)pGCLayout);
+
+ // We're jumping to the new method, not calling it
+ // so make room for the return address that the 'call'
+ // would have pushed.
+ SetSP(&ctx, GetSP(&ctx) - STACK_ADJUST_FOR_RETURN_ADDRESS);
+ }
+ else {
+ // Create a fake fixed frame as if the new method was called by
+ // TailCallHelperStub asm stub and did an
+ // alloca, then called the target method.
+ cbCopyFrame = sizeof(rgFrameBuffer);
+ FrameWithCookie<TailCallFrame> * CookieFrame = new (rgFrameBuffer) FrameWithCookie<TailCallFrame>(&ctx, pThread);
+ TailCallFrame * tailCallFrame = &*CookieFrame;
+
+ tailCallFrame->SetGCLayout((TADDR)pGCLayout);
+ pNewFrame = TailCallFrame::AdjustContextForTailCallHelperStub(&ctx, cbArgArea, pThread);
+ fCopyDown = true;
+
+ // Eventually, we'll add pNewFrame to our frame chain, but don't do it yet. It's
+ // pointing to the place on the stack where the TailCallFrame contents WILL be,
+ // but aren't there yet. In order to keep the stack walkable by profilers, wait
+ // until the contents are moved over properly (inside DoCopy), and then add
+ // pNewFrame onto the frame chain.
+ }
+
+ // The stack should be properly aligned, modulo the pushed return
+ // address (at least on x64)
+ _ASSERTE((GetSP(&ctx) & STACK_ALIGN_MASK) == STACK_ADJUST_FOR_RETURN_ADDRESS);
+
+ // Set the target pointer so we land there when we restore the context
+ SetIP(&ctx, (PCODE)target);
+
+ // Begin creating the new stack frame and copying arguments
+ size_t cbTempStack = cbCopyFrame + cbArgArea + STACK_ADJUST_FOR_RETURN_ADDRESS;
+
+ // If we're going to have to overwrite some of our incoming argument slots
+ // then do a double-copy, first to temporary copy below us on the stack and
+ // then back up to the real stack.
+ void * pvTempStack;
+ if (!fCopyDown && (((ULONG_PTR)args + cbArgArea) < GetSP(&ctx))) {
+
+ //
+ // After this our stack may no longer be walkable by the debugger!!!
+ //
+
+ pvTempStack = (void*)GetSP(&ctx);
+ }
+ else {
+ fCopyDown = true;
+
+ // Need to align properly for a return address (if it goes on the stack)
+ //
+ // AMD64 ONLY:
+ // _alloca produces 16-byte aligned buffers, but the return address,
+ // where our buffer 'starts' is off by 8, so make sure our buffer is
+ // off by 8.
+ //
+ pvTempStack = (BYTE*)_alloca(cbTempStack + STACK_ADJUST_FOR_RETURN_ADDRESS) + STACK_ADJUST_FOR_RETURN_ADDRESS;
+ }
+
+ _ASSERTE(((size_t)pvTempStack & STACK_ALIGN_MASK) == STACK_ADJUST_FOR_RETURN_ADDRESS);
+
+ // Start creating the new stack (bottom up)
+ BYTE * pbTempStackFill = (BYTE*)pvTempStack;
+ // Return address
+ if (STACK_ADJUST_FOR_RETURN_ADDRESS > 0) {
+ *((PVOID*)pbTempStackFill) = (PVOID)JIT_TailCallHelperStub_ReturnAddress; // return address
+ pbTempStackFill += STACK_ADJUST_FOR_RETURN_ADDRESS;
+ }
+
+ // arguments
+ INVOKE_COPY_ARGS_HELPER(copyArgs, args, &ctx, (DWORD_PTR*)pbTempStackFill, cbArgArea);
+
+ pbTempStackFill += cbArgArea;
+
+ // frame (includes TailCallFrame)
+ if (cbCopyFrame > 0) {
+ _ASSERTE(cbCopyFrame == sizeof(rgFrameBuffer));
+ memcpy(pbTempStackFill, rgFrameBuffer, cbCopyFrame);
+ pbTempStackFill += cbCopyFrame;
+ }
+
+ // If this fires, check the math above, because we copied more than we should have
+ _ASSERTE((size_t)((pbTempStackFill - (BYTE*)pvTempStack)) == cbTempStack);
+
+ // If this fires, it means we messed up the math and we're about to overwrite
+ // some of our locals which would be bad because we still need them to call
+ // RtlRestoreContext and pop the contract...
+ _ASSERTE(fCopyDown || ((DWORD_PTR)&ctx + sizeof(ctx) < (DWORD_PTR)GetSP(&ctx)));
+
+ if (fCopyDown) {
+ // We've created a dummy stack below our frame and now we overwrite
+ // our own real stack.
+
+ //
+ // After this our stack may no longer be walkable by the debugger!!!
+ //
+
+ // This does the copy, adds pNewFrame to the frame chain, and calls RtlRestoreContext
+ DoCopy(&ctx, pvTempStack, cbTempStack, pThread, pNewFrame);
+ }
+
+ RtlRestoreContext(&ctx, NULL);
+
+#undef STACK_ADJUST_FOR_RETURN_ADDRESS
+#undef STACK_ALIGN_MASK
+
+#else // !FEATURE_PAL
+ PORTABILITY_ASSERT("TODO: Implement JIT_TailCall for PAL");
+#endif // !FEATURE_PAL
+
+}
+
+#endif // _TARGET_AMD64_ || _TARGET_ARM_
+
+//========================================================================
+//
+// JIT HELPERS LOGGING
+//
+//========================================================================
+
+#if defined(_DEBUG) && (defined(_TARGET_AMD64_) || defined(_TARGET_X86_)) && !defined(FEATURE_PAL)
+// *****************************************************************************
+// JitHelperLogging usage:
+// 1) Ngen using:
+// COMPLUS_HardPrejitEnabled=0
+//
+// This allows us to instrument even ngen'd image calls to JIT helpers.
+// Remember to clear the key after ngen-ing and before actually running
+// the app you want to log.
+//
+// 2) Then set:
+// COMPLUS_JitHelperLogging=1
+// COMPLUS_LogEnable=1
+// COMPLUS_LogLevel=1
+// COMPLUS_LogToFile=1
+//
+// 3) Run the app that you want to log; Results will be in COMPLUS.LOG(.X)
+//
+// 4) JitHelperLogging=2 and JitHelperLogging=3 result in different output
+// as per code in WriteJitHelperCountToSTRESSLOG() below.
+// *****************************************************************************
+void WriteJitHelperCountToSTRESSLOG()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ int jitHelperLoggingLevel = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitHelperLogging);
+ if (jitHelperLoggingLevel != 0)
+ {
+ DWORD logFacility, logLevel;
+
+ logFacility = LF_ALL; //LF_ALL/LL_ALWAYS is okay here only because this logging would normally
+ logLevel = LL_ALWAYS; // would never be turned on at all (used only for performance measurements)
+
+ const int countPos = 60;
+
+ STRESS_LOG0(logFacility, logLevel, "Writing Jit Helper COUNT table to log\n");
+
+ VMHELPCOUNTDEF* hlpFuncCount = hlpFuncCountTable;
+ while(hlpFuncCount < (hlpFuncCountTable + CORINFO_HELP_COUNT))
+ {
+ const char* name;
+ LONG count;
+
+ name = hlpFuncCount->helperName;
+ count = hlpFuncCount->count;
+
+ int nameLen = 0;
+ switch (jitHelperLoggingLevel)
+ {
+ case 1:
+ // This will print a comma seperated list:
+ // CORINFO_XXX_HELPER, 10
+ // CORINFO_YYYY_HELPER, 11
+ STRESS_LOG2(logFacility, logLevel, "%s, %d\n", name, count);
+ break;
+
+ case 2:
+ // This will print a table like:
+ // CORINFO_XXX_HELPER 10
+ // CORINFO_YYYY_HELPER 11
+ if (hlpFuncCount->helperName != NULL)
+ nameLen = (int)strlen(name);
+ else
+ nameLen = (int)strlen("(null)");
+
+ if (nameLen < countPos)
+ {
+ char* buffer = new char[(countPos - nameLen) + 1];
+ memset(buffer, (int)' ', (countPos-nameLen));
+ buffer[(countPos - nameLen)] = '\0';
+ STRESS_LOG3(logFacility, logLevel, "%s%s %d\n", name, buffer, count);
+ }
+ else
+ {
+ STRESS_LOG2(logFacility, logLevel, "%s %d\n", name, count);
+ }
+ break;
+
+ case 3:
+ // This will print out the counts and the address range of the helper (if we know it)
+ // CORINFO_XXX_HELPER, 10, (0x12345678 -> 0x12345778)
+ // CORINFO_YYYY_HELPER, 11, (0x00011234 -> 0x00012234)
+ STRESS_LOG4(logFacility, logLevel, "%s, %d, (0x%p -> 0x%p)\n", name, count, hlpFuncCount->pfnRealHelper, ((LPBYTE)hlpFuncCount->pfnRealHelper + hlpFuncCount->helperSize));
+ break;
+
+ default:
+ STRESS_LOG1(logFacility, logLevel, "Unsupported JitHelperLogging mode (%d)\n", jitHelperLoggingLevel);
+ break;
+ }
+
+ hlpFuncCount++;
+ }
+ }
+}
+// This will do the work to instrument the JIT helper table.
+void InitJitHelperLogging()
+{
+ STANDARD_VM_CONTRACT;
+
+ if ((CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitHelperLogging) != 0))
+ {
+
+#ifdef _TARGET_X86_
+ IMAGE_DOS_HEADER *pDOS = (IMAGE_DOS_HEADER *)g_pMSCorEE;
+ _ASSERTE(pDOS->e_magic == VAL16(IMAGE_DOS_SIGNATURE) && pDOS->e_lfanew != 0);
+
+ IMAGE_NT_HEADERS *pNT = (IMAGE_NT_HEADERS*)((LPBYTE)g_pMSCorEE + VAL32(pDOS->e_lfanew));
+#ifdef _WIN64
+ _ASSERTE(pNT->Signature == VAL32(IMAGE_NT_SIGNATURE)
+ && pNT->FileHeader.SizeOfOptionalHeader == VAL16(sizeof(IMAGE_OPTIONAL_HEADER64))
+ && pNT->OptionalHeader.Magic == VAL16(IMAGE_NT_OPTIONAL_HDR_MAGIC) );
+#else
+ _ASSERTE(pNT->Signature == VAL32(IMAGE_NT_SIGNATURE)
+ && pNT->FileHeader.SizeOfOptionalHeader == VAL16(sizeof(IMAGE_OPTIONAL_HEADER32))
+ && pNT->OptionalHeader.Magic == VAL16(IMAGE_NT_OPTIONAL_HDR_MAGIC) );
+#endif
+#endif // _TARGET_X86_
+
+ if (g_pConfig->NgenHardBind() == EEConfig::NGEN_HARD_BIND_NONE)
+ {
+ _ASSERTE(g_pConfig->NgenHardBind() != EEConfig::NGEN_HARD_BIND_NONE && "You are "
+ "trying to log JIT helper method calls while you have NGEN HARD BINDING "
+ "set to 0. This probably means you're really trying to NGEN something for "
+ "logging purposes, NGEN breaks with JitHelperLogging turned on!!!! Please "
+ "set JitHelperLogging=0 while you NGEN, or unset HardPrejitEnabled while "
+ "running managed code.");
+ return;
+ }
+
+ // Make the static hlpFuncTable read/write for purposes of writing the logging thunks
+ DWORD dwOldProtect;
+ if (!ClrVirtualProtect((LPVOID)hlpFuncTable, (sizeof(VMHELPDEF) * CORINFO_HELP_COUNT), PAGE_EXECUTE_READWRITE, &dwOldProtect))
+ {
+ ThrowLastError();
+ }
+
+ // iterate through the jit helper tables replacing helpers with logging thunks
+ //
+ // NOTE: if NGEN'd images were NGEN'd with hard binding on then static helper
+ // calls will NOT be instrumented.
+ VMHELPDEF* hlpFunc = const_cast<VMHELPDEF*>(hlpFuncTable);
+ VMHELPCOUNTDEF* hlpFuncCount = hlpFuncCountTable;
+ while(hlpFunc < (hlpFuncTable + CORINFO_HELP_COUNT))
+ {
+ if (hlpFunc->pfnHelper != NULL)
+ {
+ CPUSTUBLINKER sl;
+ CPUSTUBLINKER* pSl = &sl;
+
+ if (((size_t)hlpFunc->pfnHelper - 1) > DYNAMIC_CORINFO_HELP_COUNT)
+ {
+ // While we're here initialize the table of VMHELPCOUNTDEF
+ // guys with info about this helper
+ hlpFuncCount->pfnRealHelper = hlpFunc->pfnHelper;
+ hlpFuncCount->helperName = hlpFunc->name;
+ hlpFuncCount->count = 0;
+#ifdef _TARGET_AMD64_
+ ULONGLONG uImageBase;
+ PRUNTIME_FUNCTION pFunctionEntry;
+ pFunctionEntry = RtlLookupFunctionEntry((ULONGLONG)hlpFunc->pfnHelper, &uImageBase, NULL);
+
+ if (pFunctionEntry != NULL)
+ {
+ _ASSERTE((uImageBase + pFunctionEntry->BeginAddress) == (ULONGLONG)hlpFunc->pfnHelper);
+ hlpFuncCount->helperSize = pFunctionEntry->EndAddress - pFunctionEntry->BeginAddress;
+ }
+ else
+ {
+ hlpFuncCount->helperSize = 0;
+ }
+#else // _TARGET_X86_
+ // How do I get this for x86?
+ hlpFuncCount->helperSize = 0;
+#endif // _TARGET_AMD64_
+
+ pSl->EmitJITHelperLoggingThunk(GetEEFuncEntryPoint(hlpFunc->pfnHelper), (LPVOID)hlpFuncCount);
+ Stub* pStub = pSl->Link();
+ hlpFunc->pfnHelper = (void*)pStub->GetEntryPoint();
+ }
+ else
+ {
+ _ASSERTE(((size_t)hlpFunc->pfnHelper - 1) >= 0 &&
+ ((size_t)hlpFunc->pfnHelper - 1) < COUNTOF(hlpDynamicFuncTable));
+ VMHELPDEF* dynamicHlpFunc = &hlpDynamicFuncTable[((size_t)hlpFunc->pfnHelper - 1)];
+
+ // While we're here initialize the table of VMHELPCOUNTDEF
+ // guys with info about this helper. There is only one table
+ // for the count dudes that contains info about both dynamic
+ // and static helpers.
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:26001) // "Bounds checked above"
+#endif /*_PREFAST_ */
+ hlpFuncCount->pfnRealHelper = dynamicHlpFunc->pfnHelper;
+ hlpFuncCount->helperName = dynamicHlpFunc->name;
+ hlpFuncCount->count = 0;
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif /*_PREFAST_*/
+
+#ifdef _TARGET_AMD64_
+ ULONGLONG uImageBase;
+ PRUNTIME_FUNCTION pFunctionEntry;
+ pFunctionEntry = RtlLookupFunctionEntry((ULONGLONG)hlpFunc->pfnHelper, &uImageBase, NULL);
+
+ if (pFunctionEntry != NULL)
+ {
+ _ASSERTE((uImageBase + pFunctionEntry->BeginAddress) == (ULONGLONG)hlpFunc->pfnHelper);
+ hlpFuncCount->helperSize = pFunctionEntry->EndAddress - pFunctionEntry->BeginAddress;
+ }
+ else
+ {
+ // if we can't get a function entry for this we'll just pretend the size is 0
+ hlpFuncCount->helperSize = 0;
+ }
+#else // _TARGET_X86_
+ // Is the address in mscoree.dll at all? (All helpers are in
+ // mscoree.dll)
+ if (dynamicHlpFunc->pfnHelper >= (LPBYTE*)g_pMSCorEE && dynamicHlpFunc->pfnHelper < (LPBYTE*)g_pMSCorEE + VAL32(pNT->OptionalHeader.SizeOfImage))
+ {
+ // See note above. How do I get the size on x86 for a static method?
+ hlpFuncCount->helperSize = 0;
+ }
+ else
+ {
+ Stub::RecoverStubAndSize((TADDR)dynamicHlpFunc->pfnHelper, (DWORD*)&hlpFuncCount->helperSize);
+ hlpFuncCount->helperSize -= sizeof(Stub);
+ }
+
+#endif // _TARGET_AMD64_
+
+ pSl->EmitJITHelperLoggingThunk(GetEEFuncEntryPoint(dynamicHlpFunc->pfnHelper), (LPVOID)hlpFuncCount);
+ Stub* pStub = pSl->Link();
+ dynamicHlpFunc->pfnHelper = (void*)pStub->GetEntryPoint();
+ }
+ }
+
+ hlpFunc++;
+ hlpFuncCount++;
+ }
+
+ // Restore original access rights to the static hlpFuncTable
+ ClrVirtualProtect((LPVOID)hlpFuncTable, (sizeof(VMHELPDEF) * CORINFO_HELP_COUNT), dwOldProtect, &dwOldProtect);
+ }
+
+ return;
+}
+#endif // _DEBUG && (_TARGET_AMD64_ || _TARGET_X86_)
diff --git a/src/vm/jitinterface.cpp b/src/vm/jitinterface.cpp
new file mode 100644
index 0000000000..1a92bfd639
--- /dev/null
+++ b/src/vm/jitinterface.cpp
@@ -0,0 +1,14178 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: JITinterface.CPP
+//
+
+// ===========================================================================
+
+
+#include "common.h"
+#include "jitinterface.h"
+#include "codeman.h"
+#include "method.hpp"
+#include "class.h"
+#include "object.h"
+#include "field.h"
+#include "stublink.h"
+#include "virtualcallstub.h"
+#include "corjit.h"
+#include "eeconfig.h"
+#include "excep.h"
+#include "log.h"
+#include "excep.h"
+#include "float.h" // for isnan
+#include "dbginterface.h"
+#include "security.h"
+#include "securitymeta.h"
+#include "dllimport.h"
+#include "gc.h"
+#include "comdelegate.h"
+#include "jitperf.h" // to track jit perf
+#include "corprof.h"
+#include "eeprofinterfaces.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h" // create context bound and remote class instances
+#endif
+#include "perfcounters.h"
+#ifdef PROFILING_SUPPORTED
+#include "proftoeeinterfaceimpl.h"
+#include "eetoprofinterfaceimpl.h"
+#include "eetoprofinterfaceimpl.inl"
+#include "profilepriv.h"
+#endif
+#include "tls.h"
+#include "ecall.h"
+#include "generics.h"
+#include "typestring.h"
+#include "stackprobe.h"
+#include "typedesc.h"
+#include "genericdict.h"
+#include "array.h"
+#include "debuginfostore.h"
+#include "constrainedexecutionregion.h"
+#include "security.h"
+#include "safemath.h"
+#include "runtimehandles.h"
+#include "sigbuilder.h"
+#include "openum.h"
+
+#ifdef HAVE_GCCOVER
+#include "gccover.h"
+#endif // HAVE_GCCOVER
+
+#include "mdaassistants.h"
+
+#ifdef FEATURE_PREJIT
+#include "compile.h"
+#include "corcompile.h"
+#endif // FEATURE_PREJIT
+
+
+#ifdef FEATURE_INTERPRETER
+#include "interpreter.h"
+#endif // FEATURE_INTERPRETER
+
+// The Stack Overflow probe takes place in the COOPERATIVE_TRANSITION_BEGIN() macro
+//
+
+#define JIT_TO_EE_TRANSITION() MAKE_CURRENT_THREAD_AVAILABLE_EX(m_pThread); \
+ _ASSERTE(CURRENT_THREAD == GetThread()); \
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER_NO_PROBE; \
+ COOPERATIVE_TRANSITION_BEGIN(); \
+ START_NON_JIT_PERF();
+
+#define EE_TO_JIT_TRANSITION() STOP_NON_JIT_PERF(); \
+ COOPERATIVE_TRANSITION_END(); \
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER_NO_PROBE;
+
+#define JIT_TO_EE_TRANSITION_LEAF()
+#define EE_TO_JIT_TRANSITION_LEAF()
+
+
+#if defined(CROSSGEN_COMPILE)
+static const PCHAR hlpNameTable[CORINFO_HELP_COUNT] = {
+#define JITHELPER(code, pfnHelper, sig) #code,
+#include "jithelpers.h"
+};
+#endif
+
+#ifdef DACCESS_COMPILE
+
+// The real definitions are in jithelpers.cpp. However, those files are not included in the DAC build.
+// Hence, we add them here.
+GARY_IMPL(VMHELPDEF, hlpFuncTable, CORINFO_HELP_COUNT);
+GARY_IMPL(VMHELPDEF, hlpDynamicFuncTable, DYNAMIC_CORINFO_HELP_COUNT);
+
+#else // DACCESS_COMPILE
+
+/*********************************************************************/
+
+#if defined(ENABLE_PERF_COUNTERS)
+LARGE_INTEGER g_lastTimeInJitCompilation;
+#endif
+
+BOOL canReplaceMethodOnStack(MethodDesc* pReplaced, MethodDesc* pDeclaredReplacer, MethodDesc* pExactReplacer);
+
+/*********************************************************************/
+
+inline CORINFO_MODULE_HANDLE GetScopeHandle(MethodDesc* method)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (method->IsDynamicMethod())
+ {
+ return MakeDynamicScope(method->AsDynamicMethodDesc()->GetResolver());
+ }
+ else
+ {
+ return GetScopeHandle(method->GetModule());
+ }
+}
+
+//This is common refactored code from within several of the access check functions.
+BOOL ModifyCheckForDynamicMethod(DynamicResolver *pResolver,
+ TypeHandle *pOwnerTypeForSecurity,
+ AccessCheckOptions::AccessCheckType *pAccessCheckType,
+ DynamicResolver** ppAccessContext)
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pResolver));
+ PRECONDITION(CheckPointer(pOwnerTypeForSecurity));
+ PRECONDITION(CheckPointer(pAccessCheckType));
+ PRECONDITION(CheckPointer(ppAccessContext));
+ PRECONDITION(*pAccessCheckType == AccessCheckOptions::kNormalAccessibilityChecks);
+ } CONTRACTL_END;
+
+ BOOL doAccessCheck = TRUE;
+
+ //Do not blindly initialize fields, since they've already got important values.
+ DynamicResolver::SecurityControlFlags dwSecurityFlags = DynamicResolver::Default;
+
+ TypeHandle dynamicOwner;
+ pResolver->GetJitContext(&dwSecurityFlags, &dynamicOwner);
+ if (!dynamicOwner.IsNull())
+ *pOwnerTypeForSecurity = dynamicOwner;
+
+ if (dwSecurityFlags & DynamicResolver::SkipVisibilityChecks)
+ {
+ doAccessCheck = FALSE;
+ }
+ else if (dwSecurityFlags & DynamicResolver::RestrictedSkipVisibilityChecks)
+ {
+ *pAccessCheckType = AccessCheckOptions::kRestrictedMemberAccess;
+
+#ifdef FEATURE_CORECLR
+ // For compatibility, don't do transparency checks from dynamic methods in FT CoreCLR.
+ if (GetAppDomain()->GetSecurityDescriptor()->IsFullyTrusted())
+ *pAccessCheckType = AccessCheckOptions::kRestrictedMemberAccessNoTransparency;
+#endif // FEATURE_CORECLR
+
+#ifdef FEATURE_COMPRESSEDSTACK
+ if (dwSecurityFlags & DynamicResolver::HasCreationContext)
+ *ppAccessContext = pResolver;
+#endif // FEATURE_COMPRESSEDSTACK
+ }
+ else
+ {
+#ifdef FEATURE_CORECLR
+ // For compatibility, don't do transparency checks from dynamic methods in FT CoreCLR.
+ if (GetAppDomain()->GetSecurityDescriptor()->IsFullyTrusted())
+ *pAccessCheckType = AccessCheckOptions::kNormalAccessNoTransparency;
+#endif // FEATURE_CORECLR
+ }
+
+ return doAccessCheck;
+}
+
+/*****************************************************************************/
+
+// Initialize from data we passed across to the JIT
+inline static void GetTypeContext(const CORINFO_SIG_INST *info, SigTypeContext *pTypeContext)
+{
+ LIMITED_METHOD_CONTRACT;
+ SigTypeContext::InitTypeContext(
+ Instantiation((TypeHandle *) info->classInst, info->classInstCount),
+ Instantiation((TypeHandle *) info->methInst, info->methInstCount),
+ pTypeContext);
+}
+
+static MethodDesc* GetMethodFromContext(CORINFO_CONTEXT_HANDLE context)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (((size_t) context & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS)
+ {
+ return NULL;
+ }
+ else
+ {
+ return GetMethod((CORINFO_METHOD_HANDLE)((size_t) context & ~CORINFO_CONTEXTFLAGS_MASK));
+ }
+}
+
+static TypeHandle GetTypeFromContext(CORINFO_CONTEXT_HANDLE context)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (((size_t) context & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS)
+ {
+ return TypeHandle((CORINFO_CLASS_HANDLE) ((size_t) context & ~CORINFO_CONTEXTFLAGS_MASK));
+ }
+ else
+ {
+ MethodTable * pMT = GetMethodFromContext(context)->GetMethodTable();
+ return TypeHandle(pMT);
+ }
+}
+
+// Initialize from a context parameter passed to the JIT and back. This is a parameter
+// that indicates which method is being jitted.
+
+inline static void GetTypeContext(CORINFO_CONTEXT_HANDLE context, SigTypeContext *pTypeContext)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ PRECONDITION(context != NULL);
+ }
+ CONTRACTL_END;
+ if (GetMethodFromContext(context))
+ {
+ SigTypeContext::InitTypeContext(GetMethodFromContext(context), pTypeContext);
+ }
+ else
+ {
+ SigTypeContext::InitTypeContext(GetTypeFromContext(context), pTypeContext);
+ }
+}
+
+static BOOL ContextIsShared(CORINFO_CONTEXT_HANDLE context)
+{
+ LIMITED_METHOD_CONTRACT;
+ MethodDesc *pContextMD = GetMethodFromContext(context);
+ if (pContextMD != NULL)
+ {
+ return pContextMD->IsSharedByGenericInstantiations();
+ }
+ else
+ {
+ // Type handle contexts are non-shared and are used for inlining of
+ // non-generic methods in generic classes
+ return FALSE;
+ }
+}
+
+// Returns true if context is providing any generic variables
+static BOOL ContextIsInstantiated(CORINFO_CONTEXT_HANDLE context)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (GetMethodFromContext(context))
+ {
+ return GetMethodFromContext(context)->HasClassOrMethodInstantiation();
+ }
+ else
+ {
+ return GetTypeFromContext(context).HasInstantiation();
+ }
+}
+
+/*********************************************************************/
+// This normalizes EE type information into the form expected by the JIT.
+//
+// If typeHnd contains exact type information, then *clsRet will contain
+// the normalized CORINFO_CLASS_HANDLE information on return.
+
+// Static
+CorInfoType CEEInfo::asCorInfoType(CorElementType eeType,
+ TypeHandle typeHnd, /* optional in */
+ CORINFO_CLASS_HANDLE *clsRet/* optional out */ ) {
+ CONTRACT(CorInfoType) {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION((CorTypeInfo::IsGenericVariable(eeType)) ==
+ (!typeHnd.IsNull() && typeHnd.IsGenericVariable()));
+ PRECONDITION(eeType != ELEMENT_TYPE_GENERICINST);
+ } CONTRACT_END;
+
+ TypeHandle typeHndUpdated = typeHnd;
+
+ if (!typeHnd.IsNull())
+ {
+ CorElementType normType = typeHnd.GetInternalCorElementType();
+ // If we have a type handle, then it has the better type
+ // in some cases
+ if (eeType == ELEMENT_TYPE_VALUETYPE && !CorTypeInfo::IsObjRef(normType))
+ eeType = normType;
+
+ // Zap the typeHnd when the type _really_ is a primitive
+ // as far as verification is concerned. Returning a null class
+ // handle means it is is a primitive.
+ //
+ // Enums are exactly like primitives, even from a verification standpoint,
+ // so we zap the type handle in this case.
+ //
+ // However RuntimeTypeHandle etc. are reported as E_T_INT (or something like that)
+ // but don't count as primitives as far as verification is concerned...
+ //
+ // To make things stranger, TypedReference returns true for "IsTruePrimitive".
+ // However the JIT likes us to report the type handle in that case.
+ if (!typeHnd.IsTypeDesc() && (
+ (typeHnd.AsMethodTable()->IsTruePrimitive() && typeHnd != TypeHandle(g_TypedReferenceMT))
+ || typeHnd.AsMethodTable()->IsEnum()) )
+ {
+ typeHndUpdated = TypeHandle();
+ }
+
+ }
+
+ static const BYTE map[] = {
+ CORINFO_TYPE_UNDEF,
+ CORINFO_TYPE_VOID,
+ CORINFO_TYPE_BOOL,
+ CORINFO_TYPE_CHAR,
+ CORINFO_TYPE_BYTE,
+ CORINFO_TYPE_UBYTE,
+ CORINFO_TYPE_SHORT,
+ CORINFO_TYPE_USHORT,
+ CORINFO_TYPE_INT,
+ CORINFO_TYPE_UINT,
+ CORINFO_TYPE_LONG,
+ CORINFO_TYPE_ULONG,
+ CORINFO_TYPE_FLOAT,
+ CORINFO_TYPE_DOUBLE,
+ CORINFO_TYPE_STRING,
+ CORINFO_TYPE_PTR, // PTR
+ CORINFO_TYPE_BYREF,
+ CORINFO_TYPE_VALUECLASS,
+ CORINFO_TYPE_CLASS,
+ CORINFO_TYPE_VAR, // VAR (type variable)
+ CORINFO_TYPE_CLASS, // ARRAY
+ CORINFO_TYPE_CLASS, // WITH
+ CORINFO_TYPE_REFANY,
+ CORINFO_TYPE_UNDEF, // VALUEARRAY_UNSUPPORTED
+ CORINFO_TYPE_NATIVEINT, // I
+ CORINFO_TYPE_NATIVEUINT, // U
+ CORINFO_TYPE_UNDEF, // R_UNSUPPORTED
+
+ // put the correct type when we know our implementation
+ CORINFO_TYPE_PTR, // FNPTR
+ CORINFO_TYPE_CLASS, // OBJECT
+ CORINFO_TYPE_CLASS, // SZARRAY
+ CORINFO_TYPE_VAR, // MVAR
+
+ CORINFO_TYPE_UNDEF, // CMOD_REQD
+ CORINFO_TYPE_UNDEF, // CMOD_OPT
+ CORINFO_TYPE_UNDEF, // INTERNAL
+ };
+
+ _ASSERTE(sizeof(map) == ELEMENT_TYPE_MAX);
+ _ASSERTE(eeType < (CorElementType) sizeof(map));
+ // spot check of the map
+ _ASSERTE((CorInfoType) map[ELEMENT_TYPE_I4] == CORINFO_TYPE_INT);
+ _ASSERTE((CorInfoType) map[ELEMENT_TYPE_PTR] == CORINFO_TYPE_PTR);
+ _ASSERTE((CorInfoType) map[ELEMENT_TYPE_TYPEDBYREF] == CORINFO_TYPE_REFANY);
+
+ CorInfoType res = (eeType < ELEMENT_TYPE_MAX) ? ((CorInfoType) map[eeType]) : CORINFO_TYPE_UNDEF;
+
+ if (clsRet)
+ *clsRet = CORINFO_CLASS_HANDLE(typeHndUpdated.AsPtr());
+
+ RETURN res;
+}
+
+
+inline static CorInfoType toJitType(TypeHandle typeHnd, CORINFO_CLASS_HANDLE *clsRet = NULL)
+{
+ WRAPPER_NO_CONTRACT;
+ return CEEInfo::asCorInfoType(typeHnd.GetInternalCorElementType(), typeHnd, clsRet);
+}
+
+#ifdef _DEBUG
+void DebugSecurityCalloutStress(CORINFO_METHOD_HANDLE methodBeingCompiledHnd,
+ CorInfoIsAccessAllowedResult& currentAnswer,
+ CorInfoSecurityRuntimeChecks& currentRuntimeChecks)
+{
+ WRAPPER_NO_CONTRACT;
+ if (currentAnswer != CORINFO_ACCESS_ALLOWED)
+ {
+ return;
+ }
+ static ConfigDWORD AlwaysInsertCallout;
+ switch (AlwaysInsertCallout.val(CLRConfig::INTERNAL_Security_AlwaysInsertCallout))
+ {
+ case 0: //No stress
+ return;
+ case 1: //Always
+ break;
+ default: //2 (or anything else), do so half the time
+ if (((size_t(methodBeingCompiledHnd) / sizeof(void*)) % 64) < 32)
+ return;
+ }
+ //Do the stress
+ currentAnswer = CORINFO_ACCESS_RUNTIME_CHECK;
+ currentRuntimeChecks = CORINFO_ACCESS_SECURITY_NONE;
+}
+#else
+#define DebugSecurityCalloutStress(a, b, c) do {} while(0)
+#endif //_DEBUG
+
+void CheckForEquivalenceAndLoadTypeBeforeCodeIsRun(Module *pModule, mdToken token, Module *pDefModule, mdToken defToken, const SigParser *ptr, SigTypeContext *pTypeContext, void *pData)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (IsTypeDefEquivalent(defToken, pDefModule))
+ {
+ SigPointer sigPtr(*ptr);
+ TypeHandle th = sigPtr.GetTypeHandleThrowing(pModule, pTypeContext);
+ ((ICorDynamicInfo *)pData)->classMustBeLoadedBeforeCodeIsRun(CORINFO_CLASS_HANDLE(th.AsPtr()));
+ }
+}
+
+inline static void TypeEquivalenceFixupSpecificationHelper(ICorDynamicInfo * pCorInfo, MethodDesc *pMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ // A fixup is necessary to ensure that the parameters to the method are loaded before the method
+ // is called. In these cases we will not perform the appropriate loading when we load parameter
+ // types because with type equivalence, the parameter types at the call site do not necessarily
+ // match that those in the actual function. (They must be equivalent, but not necessarily the same.)
+ // In non-ngen scenarios this code here will force the types to be loaded directly by the call to
+ // HasTypeEquivalentStructParameters.
+ if (!pMD->IsVirtual())
+ {
+ if (pMD->HasTypeEquivalentStructParameters())
+ {
+ if (IsCompilationProcess())
+ pMD->WalkValueTypeParameters(pMD->GetMethodTable(), CheckForEquivalenceAndLoadTypeBeforeCodeIsRun, pCorInfo);
+ }
+ }
+ else
+ {
+ if (pMD->GetMethodTable()->DependsOnEquivalentOrForwardedStructs())
+ {
+ if (pMD->HasTypeEquivalentStructParameters())
+ pCorInfo->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)pMD->GetMethodTable());
+ }
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+//@GENERICS:
+// The method handle is used to instantiate method and class type parameters
+// It's also used to determine whether an extra dictionary parameter is required
+//
+// sig - Input metadata signature
+// scopeHnd - The signature is to be interpreted in the context of this scope (module)
+// token - Metadata token used to refer to the signature (may be mdTokenNil for dynamic methods)
+// sigRet - Resulting output signature in a format that is understood by native compilers
+// pContextMD - The method with any instantiation information (may be NULL)
+// localSig - Is it a local variables declaration, or a method signature (with return type, etc).
+// contextType - The type with any instantiaton information
+//
+//static
+void
+CEEInfo::ConvToJitSig(
+ PCCOR_SIGNATURE pSig,
+ DWORD cbSig,
+ CORINFO_MODULE_HANDLE scopeHnd,
+ mdToken token,
+ CORINFO_SIG_INFO * sigRet,
+ MethodDesc * pContextMD,
+ bool localSig,
+ TypeHandle contextType)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ SigTypeContext typeContext;
+
+ if (pContextMD)
+ {
+ SigTypeContext::InitTypeContext(pContextMD, contextType, &typeContext);
+ }
+ else
+ {
+ SigTypeContext::InitTypeContext(contextType, &typeContext);
+ }
+
+ _ASSERTE(CORINFO_CALLCONV_DEFAULT == (CorInfoCallConv) IMAGE_CEE_CS_CALLCONV_DEFAULT);
+ _ASSERTE(CORINFO_CALLCONV_VARARG == (CorInfoCallConv) IMAGE_CEE_CS_CALLCONV_VARARG);
+ _ASSERTE(CORINFO_CALLCONV_MASK == (CorInfoCallConv) IMAGE_CEE_CS_CALLCONV_MASK);
+ _ASSERTE(CORINFO_CALLCONV_HASTHIS == (CorInfoCallConv) IMAGE_CEE_CS_CALLCONV_HASTHIS);
+
+ TypeHandle typeHnd = TypeHandle();
+
+ sigRet->pSig = pSig;
+ sigRet->cbSig = cbSig;
+ sigRet->retTypeClass = 0;
+ sigRet->retTypeSigClass = 0;
+ sigRet->scope = scopeHnd;
+ sigRet->token = token;
+ sigRet->sigInst.classInst = (CORINFO_CLASS_HANDLE *) typeContext.m_classInst.GetRawArgs();
+ sigRet->sigInst.classInstCount = (unsigned) typeContext.m_classInst.GetNumArgs();
+ sigRet->sigInst.methInst = (CORINFO_CLASS_HANDLE *) typeContext.m_methodInst.GetRawArgs();
+ sigRet->sigInst.methInstCount = (unsigned) typeContext.m_methodInst.GetNumArgs();
+
+ SigPointer sig(pSig, cbSig);
+
+ if (!localSig)
+ {
+ // This is a method signature which includes calling convention, return type,
+ // arguments, etc
+
+ _ASSERTE(!sig.IsNull());
+ Module * module = GetModule(scopeHnd);
+ sigRet->flags = 0;
+
+ ULONG data;
+ IfFailThrow(sig.GetCallingConvInfo(&data));
+ sigRet->callConv = (CorInfoCallConv) data;
+ // Skip number of type arguments
+ if (sigRet->callConv & IMAGE_CEE_CS_CALLCONV_GENERIC)
+ IfFailThrow(sig.GetData(NULL));
+
+ ULONG numArgs;
+ IfFailThrow(sig.GetData(&numArgs));
+ if (numArgs != (unsigned short) numArgs)
+ COMPlusThrowHR(COR_E_INVALIDPROGRAM);
+
+ sigRet->numArgs = (unsigned short) numArgs;
+
+ CorElementType type = sig.PeekElemTypeClosed(module, &typeContext);
+
+ if (!CorTypeInfo::IsPrimitiveType(type))
+ {
+ typeHnd = sig.GetTypeHandleThrowing(module, &typeContext);
+ _ASSERTE(!typeHnd.IsNull());
+
+ // I believe it doesn't make any diff. if this is
+ // GetInternalCorElementType or GetSignatureCorElementType
+ type = typeHnd.GetSignatureCorElementType();
+
+ }
+ sigRet->retType = CEEInfo::asCorInfoType(type, typeHnd, &sigRet->retTypeClass);
+ sigRet->retTypeSigClass = CORINFO_CLASS_HANDLE(typeHnd.AsPtr());
+
+ IfFailThrow(sig.SkipExactlyOne()); // must to a skip so we skip any class tokens associated with the return type
+ _ASSERTE(sigRet->retType < CORINFO_TYPE_COUNT);
+
+ sigRet->args = (CORINFO_ARG_LIST_HANDLE)sig.GetPtr();
+ }
+ else
+ {
+ // This is local variables declaration
+
+ sigRet->callConv = CORINFO_CALLCONV_DEFAULT;
+ sigRet->retType = CORINFO_TYPE_VOID;
+ sigRet->flags = CORINFO_SIGFLAG_IS_LOCAL_SIG;
+ sigRet->numArgs = 0;
+ if (!sig.IsNull())
+ {
+ ULONG callConv;
+ IfFailThrow(sig.GetCallingConvInfo(&callConv));
+ if (callConv != IMAGE_CEE_CS_CALLCONV_LOCAL_SIG)
+ {
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_CALLCONV_NOT_LOCAL_SIG);
+ }
+
+ ULONG numArgs;
+ IfFailThrow(sig.GetData(&numArgs));
+
+ if (numArgs != (unsigned short) numArgs)
+ COMPlusThrowHR(COR_E_INVALIDPROGRAM);
+
+ sigRet->numArgs = (unsigned short) numArgs;
+ }
+
+ sigRet->args = (CORINFO_ARG_LIST_HANDLE)sig.GetPtr();
+ }
+
+ _ASSERTE(SigInfoFlagsAreValid(sigRet));
+} // CEEInfo::ConvToJitSig
+
+//---------------------------------------------------------------------------------------
+//
+CORINFO_CLASS_HANDLE CEEInfo::getTokenTypeAsHandle (CORINFO_RESOLVED_TOKEN * pResolvedToken)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CORINFO_CLASS_HANDLE tokenType = NULL;
+
+ JIT_TO_EE_TRANSITION();
+
+ _ASSERTE((pResolvedToken->hMethod == NULL) || (pResolvedToken->hField == NULL));
+
+ BinderClassID classID = CLASS__TYPE_HANDLE;
+
+ if (pResolvedToken->hMethod != NULL)
+ {
+ classID = CLASS__METHOD_HANDLE;
+ }
+ else
+ if (pResolvedToken->hField != NULL)
+ {
+ classID = CLASS__FIELD_HANDLE;
+ }
+
+ tokenType = CORINFO_CLASS_HANDLE(MscorlibBinder::GetClass(classID));
+
+ EE_TO_JIT_TRANSITION();
+
+ return tokenType;
+}
+
+/*********************************************************************/
+size_t CEEInfo::findNameOfToken (
+ CORINFO_MODULE_HANDLE scopeHnd,
+ mdToken metaTOK,
+ __out_ecount (FQNameCapacity) char * szFQName,
+ size_t FQNameCapacity)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ size_t NameLen = 0;
+
+ JIT_TO_EE_TRANSITION();
+
+ if (IsDynamicScope(scopeHnd))
+ {
+ strncpy_s (szFQName, FQNameCapacity, "DynamicToken", FQNameCapacity - 1);
+ NameLen = strlen (szFQName);
+ }
+ else
+ {
+ Module* module = (Module *)scopeHnd;
+ NameLen = findNameOfToken(module, metaTOK, szFQName, FQNameCapacity);
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return NameLen;
+}
+
+CorInfoCanSkipVerificationResult CEEInfo::canSkipMethodVerification(CORINFO_METHOD_HANDLE ftnHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CorInfoCanSkipVerificationResult canSkipVerif = CORINFO_VERIFICATION_CANNOT_SKIP;
+
+ JIT_TO_EE_TRANSITION();
+
+ MethodDesc* pMD = GetMethod(ftnHnd);
+
+
+#ifdef _DEBUG
+ if (g_pConfig->IsVerifierOff())
+ {
+ canSkipVerif = CORINFO_VERIFICATION_CAN_SKIP;
+ }
+ else
+#endif // _DEBUG
+ {
+ canSkipVerif = Security::JITCanSkipVerification(pMD);
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return canSkipVerif;
+
+}
+
+/*********************************************************************/
+BOOL CEEInfo::shouldEnforceCallvirtRestriction(
+ CORINFO_MODULE_HANDLE scopeHnd)
+{
+ LIMITED_METHOD_CONTRACT;
+ // see vsw 599197
+ // verification rule added in whidbey requiring virtual methods
+ // to be called via callvirt except if certain other rules are
+ // obeyed.
+
+ if (g_pConfig->LegacyVirtualMethodCallVerification())
+ return false;
+ else
+ return true;
+
+}
+
+#ifdef MDIL
+unsigned CEEInfo::getStructTypeToken(InlineContext *inlineContext, CORINFO_ARG_LIST_HANDLE argList)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"should not be reachable");
+ return 0;
+}
+
+unsigned CEEInfo::getTypeTokenForFieldOrMethod(
+ unsigned fieldOrMethodToken)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"should not be reachable");
+ return 0;
+}
+
+unsigned CEEInfo::getTokenForType(CORINFO_CLASS_HANDLE cls)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"should not be reachable");
+ return 0;
+}
+
+
+unsigned CEEInfo::getEnclosingClassToken(InlineContext *inlineContext, CORINFO_METHOD_HANDLE method)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"should not be reachable");
+ return 0;
+}
+
+InlineContext * CEEInfo::computeInlineContext(InlineContext *outerContext, unsigned inlinedMethodToken, unsigned constraintTypeToken, CORINFO_METHOD_HANDLE method)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"should not be reachable");
+ return NULL;
+}
+
+unsigned CEEInfo::translateToken(InlineContext *inlineContext, CORINFO_MODULE_HANDLE scopeHnd, unsigned token)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!"should not be reachable");
+ return token;
+}
+
+CorInfoType CEEInfo::getFieldElementType(unsigned fieldToken, CORINFO_MODULE_HANDLE scope, CORINFO_METHOD_HANDLE methHnd)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!"should not be reachable");
+ return CORINFO_TYPE_INT;
+}
+
+unsigned CEEInfo::getCurrentMethodToken(InlineContext *inlineContext, CORINFO_METHOD_HANDLE method)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!"should not be reachable");
+ return 0;
+}
+
+unsigned CEEInfo::getStubMethodFlags(CORINFO_METHOD_HANDLE method)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ MethodDesc *pMD = GetMethod(method);
+ assert(pMD->IsILStub());
+
+ DynamicMethodDesc *pDMD = pMD->AsDynamicMethodDesc();
+
+ unsigned flags;
+
+ if (pDMD->IsReverseStub())
+ {
+ if (pDMD->IsStatic())
+ flags = ICompactLayoutWriter::SF_REVERSE_PINVOKE;
+ else
+ flags = ICompactLayoutWriter::SF_COM_TO_CLR;
+ }
+ else
+ {
+ if (pDMD->IsDelegateStub())
+ flags = ICompactLayoutWriter::SF_DELEGATE_PINVOKE;
+ else if (pDMD->IsCALLIStub())
+ flags = ICompactLayoutWriter::SF_CALLI_PINVOKE;
+ else if (pDMD->IsCLRToCOMStub())
+ flags = ICompactLayoutWriter::SF_CLR_TO_COM;
+ else
+ flags = ICompactLayoutWriter::SF_PINVOKE;
+ }
+
+ // We save the signature with all stubs for now although we strictly need it only for reverse
+ // P/Invoke and COM->CLR. The size increase is negligible and it is a bit cleaner (i.e. all
+ // DynamicMethodDesc's will have their signature just as they do when we JIT).
+ flags |= ICompactLayoutWriter::SF_NEEDS_STUB_SIGNATURE;
+
+ if (pDMD->HasCopyCtorArgs())
+ flags |= ICompactLayoutWriter::SF_HAS_COPY_CONSTRUCTED_ARGS;
+
+ flags |= (pDMD->GetNativeStackArgSize() << 16);
+
+ return flags;
+}
+
+#endif // MDIL
+
+#if defined(MDIL) || defined(FEATURE_READYTORUN_COMPILER)
+
+// Returns true if assemblies are in the same version bubble
+// Right now each assembly is in its own version bubble.
+// If the need arises (i.e. performance issues) we will define sets of assemblies (e.g. all app assemblies)
+// The main point is that all this logic is concentrated in one place.
+
+bool IsInSameVersionBubble(Assembly * current, Assembly * target)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // trivial case: current and target are identical
+ if (current == target)
+ return true;
+
+ return false;
+}
+
+// Returns true if the assemblies defining current and target are in the same version bubble
+static bool IsInSameVersionBubble(MethodDesc* pCurMD, MethodDesc *pTargetMD)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (IsInSameVersionBubble(pCurMD->GetModule()->GetAssembly(),
+ pTargetMD->GetModule()->GetAssembly()))
+ {
+ return true;
+ }
+ if (IsReadyToRunCompilation())
+ {
+ if (pTargetMD->GetModule()->GetMDImport()->GetCustomAttributeByName(pTargetMD->GetMemberDef(),
+ NONVERSIONABLE_TYPE, NULL, NULL) == S_OK)
+ {
+ return true;
+ }
+ }
+ return false;
+
+}
+
+static bool IsVersionResilientCompilation()
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef MDIL
+ if (SystemDomain::GetCurrentDomain()->IsMDILCompilationDomain())
+ return true;
+#endif
+
+ return IsReadyToRunCompilation();
+}
+
+#endif // MDIL || FEATURE_READYTORUN_COMPILER
+
+
+/*********************************************************************/
+CorInfoCanSkipVerificationResult CEEInfo::canSkipVerification(
+ CORINFO_MODULE_HANDLE moduleHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CorInfoCanSkipVerificationResult canSkipVerif = CORINFO_VERIFICATION_CANNOT_SKIP;
+
+ JIT_TO_EE_TRANSITION();
+
+#ifdef MDIL
+ // We always want to verify IL when generating MDIL
+ // The return value is already initialized to CORINFO_VERIFICATION_CANNOT_SKIP.
+ if (!SystemDomain::GetCurrentDomain()->IsMDILCompilationDomain())
+#endif
+ {
+ Assembly * pAssem = GetModule(moduleHnd)->GetAssembly();
+
+#ifdef _DEBUG
+ if (g_pConfig->IsVerifierOff())
+ {
+ canSkipVerif = CORINFO_VERIFICATION_CAN_SKIP;
+ }
+ else
+#endif // _DEBUG
+ {
+ //
+ // fQuickCheckOnly is set only by calls from Zapper::CompileAssembly
+ // because that allows us make a determination for the most
+ // common full trust scenarios (local machine) without actually
+ // resolving policy and bringing in a whole list of assembly
+ // dependencies.
+ //
+ // The scenario of interest here is determing whether or not an
+ // assembly MVID comparison is enough when loading an NGEN'd
+ // assembly or if a full binary hash comparison must be done.
+ //
+
+ DomainAssembly * pAssembly = pAssem->GetDomainAssembly();
+ canSkipVerif = Security::JITCanSkipVerification(pAssembly);
+ }
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return canSkipVerif;
+}
+
+/*********************************************************************/
+// Checks if the given metadata token is valid
+BOOL CEEInfo::isValidToken (
+ CORINFO_MODULE_HANDLE module,
+ mdToken metaTOK)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ BOOL result = FALSE;
+
+ JIT_TO_EE_TRANSITION();
+
+ if (IsDynamicScope(module))
+ {
+ // No explicit token validation for dynamic code. Validation is
+ // side-effect of token resolution.
+ result = TRUE;
+ }
+ else
+ {
+ result = ((Module *)module)->GetMDImport()->IsValidToken(metaTOK);
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/*********************************************************************/
+// Checks if the given metadata token is valid StringRef
+BOOL CEEInfo::isValidStringRef (
+ CORINFO_MODULE_HANDLE module,
+ mdToken metaTOK)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ BOOL result = FALSE;
+
+ JIT_TO_EE_TRANSITION();
+
+ if (IsDynamicScope(module))
+ {
+ result = GetDynamicResolver(module)->IsValidStringRef(metaTOK);
+ }
+ else
+ {
+ result = ((Module *)module)->CheckStringRef(metaTOK);
+ if (result)
+ {
+ DWORD dwCharCount;
+ LPCWSTR pString;
+ result = (!FAILED(((Module *)module)->GetMDImport()->GetUserString(metaTOK, &dwCharCount, NULL, &pString)) &&
+ pString != NULL);
+ }
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/* static */
+size_t CEEInfo::findNameOfToken (Module* module,
+ mdToken metaTOK,
+ __out_ecount (FQNameCapacity) char * szFQName,
+ size_t FQNameCapacity)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+#ifdef _DEBUG
+ PCCOR_SIGNATURE sig = NULL;
+ DWORD cSig;
+ LPCUTF8 pszNamespace = NULL;
+ LPCUTF8 pszClassName = NULL;
+
+ mdToken tokType = TypeFromToken(metaTOK);
+ switch(tokType)
+ {
+ case mdtTypeRef:
+ {
+ if (FAILED(module->GetMDImport()->GetNameOfTypeRef(metaTOK, &pszNamespace, &pszClassName)))
+ {
+ pszNamespace = pszClassName = "Invalid TypeRef record";
+ }
+ ns::MakePath(szFQName, (int)FQNameCapacity, pszNamespace, pszClassName);
+ break;
+ }
+ case mdtTypeDef:
+ {
+ if (FAILED(module->GetMDImport()->GetNameOfTypeDef(metaTOK, &pszClassName, &pszNamespace)))
+ {
+ pszClassName = pszNamespace = "Invalid TypeDef record";
+ }
+ ns::MakePath(szFQName, (int)FQNameCapacity, pszNamespace, pszClassName);
+ break;
+ }
+ case mdtFieldDef:
+ {
+ LPCSTR szFieldName;
+ if (FAILED(module->GetMDImport()->GetNameOfFieldDef(metaTOK, &szFieldName)))
+ {
+ szFieldName = "Invalid FieldDef record";
+ }
+ strncpy_s(szFQName, FQNameCapacity, (char*)szFieldName, FQNameCapacity - 1);
+ break;
+ }
+ case mdtMethodDef:
+ {
+ LPCSTR szMethodName;
+ if (FAILED(module->GetMDImport()->GetNameOfMethodDef(metaTOK, &szMethodName)))
+ {
+ szMethodName = "Invalid MethodDef record";
+ }
+ strncpy_s(szFQName, FQNameCapacity, (char*)szMethodName, FQNameCapacity - 1);
+ break;
+ }
+ case mdtMemberRef:
+ {
+ LPCSTR szName;
+ if (FAILED(module->GetMDImport()->GetNameAndSigOfMemberRef((mdMemberRef)metaTOK, &sig, &cSig, &szName)))
+ {
+ szName = "Invalid MemberRef record";
+ }
+ strncpy_s(szFQName, FQNameCapacity, (char *)szName, FQNameCapacity - 1);
+ break;
+ }
+ default:
+ sprintf_s(szFQName, FQNameCapacity, "!TK_%x", metaTOK);
+ break;
+ }
+
+#else // !_DEBUG
+ strncpy_s (szFQName, FQNameCapacity, "<UNKNOWN>", FQNameCapacity - 1);
+#endif // _DEBUG
+
+
+ return strlen (szFQName);
+}
+
+CorInfoHelpFunc CEEInfo::getLazyStringLiteralHelper(CORINFO_MODULE_HANDLE handle)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CorInfoHelpFunc result = CORINFO_HELP_UNDEF;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ result = IsDynamicScope(handle) ? CORINFO_HELP_UNDEF : CORINFO_HELP_STRCNS;
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return result;
+}
+
+
+CHECK CheckContext(CORINFO_MODULE_HANDLE scopeHnd, CORINFO_CONTEXT_HANDLE context)
+{
+ CHECK_MSG(scopeHnd != NULL, "Illegal null scope");
+ CHECK_MSG(((size_t) context & ~CORINFO_CONTEXTFLAGS_MASK) != NULL, "Illegal null context");
+ if (((size_t) context & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS)
+ {
+ TypeHandle handle((CORINFO_CLASS_HANDLE) ((size_t) context & ~CORINFO_CONTEXTFLAGS_MASK));
+ CHECK_MSG(handle.GetModule() == GetModule(scopeHnd), "Inconsistent scope and context");
+ }
+ else
+ {
+ MethodDesc* handle = (MethodDesc*) ((size_t) context & ~CORINFO_CONTEXTFLAGS_MASK);
+ CHECK_MSG(handle->GetModule() == GetModule(scopeHnd), "Inconsistent scope and context");
+ }
+
+ CHECK_OK;
+}
+
+
+static DECLSPEC_NORETURN void ThrowBadTokenException(CORINFO_RESOLVED_TOKEN * pResolvedToken)
+{
+ switch (pResolvedToken->tokenType & CORINFO_TOKENKIND_Mask)
+ {
+ case CORINFO_TOKENKIND_Class:
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_CLASS_TOKEN);
+ case CORINFO_TOKENKIND_Method:
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_INVALID_METHOD_TOKEN);
+ case CORINFO_TOKENKIND_Field:
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_FIELD_TOKEN);
+ default:
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+}
+
+/*********************************************************************/
+void CEEInfo::resolveToken(/* IN, OUT */ CORINFO_RESOLVED_TOKEN * pResolvedToken)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION();
+
+ _ASSERTE(CheckContext(pResolvedToken->tokenScope, pResolvedToken->tokenContext));
+
+ pResolvedToken->pTypeSpec = NULL;
+ pResolvedToken->cbTypeSpec = NULL;
+ pResolvedToken->pMethodSpec = NULL;
+ pResolvedToken->cbMethodSpec = NULL;
+
+ TypeHandle th;
+ MethodDesc * pMD = NULL;
+ FieldDesc * pFD = NULL;
+
+ CorInfoTokenKind tokenType = pResolvedToken->tokenType;
+
+ if (IsDynamicScope(pResolvedToken->tokenScope))
+ {
+ GetDynamicResolver(pResolvedToken->tokenScope)->ResolveToken(pResolvedToken->token, &th, &pMD, &pFD);
+
+ //
+ // Check that we got the expected handles and fill in missing data if necessary
+ //
+
+ CorTokenType tkType = (CorTokenType)TypeFromToken(pResolvedToken->token);
+
+ if (pMD != NULL)
+ {
+ if ((tkType != mdtMethodDef) && (tkType != mdtMemberRef))
+ ThrowBadTokenException(pResolvedToken);
+ if ((tokenType & CORINFO_TOKENKIND_Method) == 0)
+ ThrowBadTokenException(pResolvedToken);
+ if (th.IsNull())
+ th = pMD->GetMethodTable();
+
+ // "PermitUninstDefOrRef" check
+ if ((tokenType != CORINFO_TOKENKIND_Ldtoken) && pMD->ContainsGenericVariables())
+ {
+ COMPlusThrow(kInvalidProgramException);
+ }
+
+ // if this is a BoxedEntryPointStub get the UnboxedEntryPoint one
+ if (pMD->IsUnboxingStub())
+ {
+ pMD = pMD->GetMethodTable()->GetUnboxedEntryPointMD(pMD);
+ }
+
+ // Activate target if required
+ if (tokenType != CORINFO_TOKENKIND_Ldtoken)
+ {
+ ScanTokenForDynamicScope(pResolvedToken, th, pMD);
+ }
+ }
+ else
+ if (pFD != NULL)
+ {
+ if ((tkType != mdtFieldDef) && (tkType != mdtMemberRef))
+ ThrowBadTokenException(pResolvedToken);
+ if ((tokenType & CORINFO_TOKENKIND_Field) == 0)
+ ThrowBadTokenException(pResolvedToken);
+ if (th.IsNull())
+ th = pFD->GetApproxEnclosingMethodTable();
+
+ if (pFD->IsStatic() && (tokenType != CORINFO_TOKENKIND_Ldtoken))
+ {
+ ScanTokenForDynamicScope(pResolvedToken, th);
+ }
+ }
+ else
+ {
+ if ((tkType != mdtTypeDef) && (tkType != mdtTypeRef))
+ ThrowBadTokenException(pResolvedToken);
+ if ((tokenType & CORINFO_TOKENKIND_Class) == 0)
+ ThrowBadTokenException(pResolvedToken);
+ if (th.IsNull())
+ ThrowBadTokenException(pResolvedToken);
+
+ if (tokenType == CORINFO_TOKENKIND_Box || tokenType == CORINFO_TOKENKIND_Constrained)
+ {
+ ScanTokenForDynamicScope(pResolvedToken, th);
+ }
+ }
+
+ _ASSERTE((pMD == NULL) || (pFD == NULL));
+ _ASSERTE(!th.IsNull());
+
+ // "PermitUninstDefOrRef" check
+ if ((tokenType != CORINFO_TOKENKIND_Ldtoken) && th.ContainsGenericVariables())
+ {
+ COMPlusThrow(kInvalidProgramException);
+ }
+
+ // The JIT always wants to see normalized typedescs for arrays
+ if (!th.IsTypeDesc() && th.AsMethodTable()->IsArray())
+ {
+ MethodTable * pMT = th.AsMethodTable();
+
+ // Load the TypeDesc for the array type.
+ DWORD rank = pMT->GetRank();
+ TypeHandle elemType = pMT->GetApproxArrayElementTypeHandle();
+ th = ClassLoader::LoadArrayTypeThrowing(elemType, pMT->GetInternalCorElementType(), rank);
+ }
+ }
+ else
+ {
+ unsigned metaTOK = pResolvedToken->token;
+ Module * pModule = (Module *)pResolvedToken->tokenScope;
+
+ switch (TypeFromToken(metaTOK))
+ {
+ case mdtModuleRef:
+ if ((tokenType & CORINFO_TOKENKIND_Class) == 0)
+ ThrowBadTokenException(pResolvedToken);
+
+ {
+ DomainFile *pTargetModule = pModule->LoadModule(GetAppDomain(), metaTOK, FALSE /* loadResources */);
+ if (pTargetModule == NULL)
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ th = TypeHandle(pTargetModule->GetModule()->GetGlobalMethodTable());
+ if (th.IsNull())
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ break;
+
+ case mdtTypeDef:
+ case mdtTypeRef:
+ if ((tokenType & CORINFO_TOKENKIND_Class) == 0)
+ ThrowBadTokenException(pResolvedToken);
+
+ th = ClassLoader::LoadTypeDefOrRefThrowing(pModule, metaTOK,
+ ClassLoader::ThrowIfNotFound,
+ (tokenType == CORINFO_TOKENKIND_Ldtoken) ?
+ ClassLoader::PermitUninstDefOrRef : ClassLoader::FailIfUninstDefOrRef);
+ break;
+
+ case mdtTypeSpec:
+ {
+ if ((tokenType & CORINFO_TOKENKIND_Class) == 0)
+ ThrowBadTokenException(pResolvedToken);
+
+ IfFailThrow(pModule->GetMDImport()->GetTypeSpecFromToken(metaTOK, &pResolvedToken->pTypeSpec, &pResolvedToken->cbTypeSpec));
+
+ SigTypeContext typeContext;
+ GetTypeContext(pResolvedToken->tokenContext, &typeContext);
+
+ SigPointer sigptr(pResolvedToken->pTypeSpec, pResolvedToken->cbTypeSpec);
+ th = sigptr.GetTypeHandleThrowing(pModule, &typeContext);
+ }
+ break;
+
+ case mdtMethodDef:
+ if ((tokenType & CORINFO_TOKENKIND_Method) == 0)
+ ThrowBadTokenException(pResolvedToken);
+
+ pMD = MemberLoader::GetMethodDescFromMethodDef(pModule, metaTOK, (tokenType != CORINFO_TOKENKIND_Ldtoken));
+
+ th = pMD->GetMethodTable();
+ break;
+
+ case mdtFieldDef:
+ if ((tokenType & CORINFO_TOKENKIND_Field) == 0)
+ ThrowBadTokenException(pResolvedToken);
+
+ pFD = MemberLoader::GetFieldDescFromFieldDef(pModule, metaTOK, (tokenType != CORINFO_TOKENKIND_Ldtoken));
+
+ th = pFD->GetEnclosingMethodTable();
+ break;
+
+ case mdtMemberRef:
+ {
+ SigTypeContext typeContext;
+ GetTypeContext(pResolvedToken->tokenContext, &typeContext);
+
+ MemberLoader::GetDescFromMemberRef(pModule, metaTOK, &pMD, &pFD, &typeContext, (tokenType != CORINFO_TOKENKIND_Ldtoken),
+ &th, TRUE, &pResolvedToken->pTypeSpec, &pResolvedToken->cbTypeSpec);
+
+ _ASSERTE((pMD != NULL) ^ (pFD != NULL));
+ _ASSERTE(!th.IsNull());
+
+ if (pMD != NULL)
+ {
+ if ((tokenType & CORINFO_TOKENKIND_Method) == 0)
+ ThrowBadTokenException(pResolvedToken);
+ }
+ else
+ {
+ if ((tokenType & CORINFO_TOKENKIND_Field) == 0)
+ ThrowBadTokenException(pResolvedToken);
+ }
+ }
+ break;
+
+ case mdtMethodSpec:
+ {
+ if ((tokenType & CORINFO_TOKENKIND_Method) == 0)
+ ThrowBadTokenException(pResolvedToken);
+
+ SigTypeContext typeContext;
+ GetTypeContext(pResolvedToken->tokenContext, &typeContext);
+
+ // We need the method desc to carry exact instantiation, thus allowInstParam == FALSE.
+ pMD = MemberLoader::GetMethodDescFromMethodSpec(pModule, metaTOK, &typeContext, (tokenType != CORINFO_TOKENKIND_Ldtoken), FALSE /* allowInstParam */,
+ &th, TRUE, &pResolvedToken->pTypeSpec, &pResolvedToken->cbTypeSpec, &pResolvedToken->pMethodSpec, &pResolvedToken->cbMethodSpec);
+ }
+ break;
+
+ default:
+ ThrowBadTokenException(pResolvedToken);
+ }
+
+ //
+ // Module dependency tracking
+ //
+ if (pMD != NULL)
+ {
+ ScanToken(pModule, pResolvedToken, th, pMD);
+ }
+ else
+ if (pFD != NULL)
+ {
+ if (pFD->IsStatic())
+ ScanToken(pModule, pResolvedToken, th);
+ }
+ else
+ {
+ // It should not be required to trigger the modules cctors for ldtoken, it is done for backward compatibility only.
+ if (tokenType == CORINFO_TOKENKIND_Box || tokenType == CORINFO_TOKENKIND_Constrained || tokenType == CORINFO_TOKENKIND_Ldtoken)
+ ScanToken(pModule, pResolvedToken, th);
+ }
+ }
+
+ //
+ // tokenType specific verification and transformations
+ //
+ CorElementType et = th.GetInternalCorElementType();
+ switch (tokenType)
+ {
+ case CORINFO_TOKENKIND_Ldtoken:
+ // Allow everything.
+ break;
+
+ case CORINFO_TOKENKIND_Newarr:
+ // Disallow ELEMENT_TYPE_BYREF and ELEMENT_TYPE_VOID
+ if (et == ELEMENT_TYPE_BYREF || et == ELEMENT_TYPE_VOID)
+ COMPlusThrow(kInvalidProgramException);
+
+ th = ClassLoader::LoadArrayTypeThrowing(th);
+ break;
+
+ default:
+ // Disallow ELEMENT_TYPE_BYREF and ELEMENT_TYPE_VOID
+ if (et == ELEMENT_TYPE_BYREF || et == ELEMENT_TYPE_VOID)
+ COMPlusThrow(kInvalidProgramException);
+ break;
+ }
+
+ // The JIT interface should always return fully loaded types
+ _ASSERTE(th.IsFullyLoaded());
+
+ pResolvedToken->hClass = CORINFO_CLASS_HANDLE(th.AsPtr());
+ pResolvedToken->hMethod = CORINFO_METHOD_HANDLE(pMD);
+ pResolvedToken->hField = CORINFO_FIELD_HANDLE(pFD);
+
+ EE_TO_JIT_TRANSITION();
+}
+
+/*********************************************************************/
+// We have a few frequently used constants in mscorlib that are defined as
+// readonly static fields for historic reasons. Check for them here and
+// allow them to be treated as actual constants by the JIT.
+static CORINFO_FIELD_ACCESSOR getFieldIntrinsic(FieldDesc * field)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (MscorlibBinder::GetField(FIELD__STRING__EMPTY) == field)
+ {
+ return CORINFO_FIELD_INTRINSIC_EMPTY_STRING;
+ }
+ else
+ if ((MscorlibBinder::GetField(FIELD__INTPTR__ZERO) == field) ||
+ (MscorlibBinder::GetField(FIELD__UINTPTR__ZERO) == field))
+ {
+ return CORINFO_FIELD_INTRINSIC_ZERO;
+ }
+
+ return (CORINFO_FIELD_ACCESSOR)-1;
+}
+
+static CorInfoHelpFunc getGenericStaticsHelper(FieldDesc * pField)
+{
+ STANDARD_VM_CONTRACT;
+
+ int helper = CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE;
+
+ if (pField->GetFieldType() == ELEMENT_TYPE_CLASS ||
+ pField->GetFieldType() == ELEMENT_TYPE_VALUETYPE)
+ {
+ helper = CORINFO_HELP_GETGENERICS_GCSTATIC_BASE;
+ }
+
+ if (pField->IsThreadStatic())
+ {
+ const int delta = CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE - CORINFO_HELP_GETGENERICS_GCSTATIC_BASE;
+
+ static_assert_no_msg(CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE
+ == CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE + delta);
+
+ helper += (CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE - CORINFO_HELP_GETGENERICS_GCSTATIC_BASE);
+ }
+
+ return (CorInfoHelpFunc)helper;
+}
+
+CorInfoHelpFunc CEEInfo::getSharedStaticsHelper(FieldDesc * pField, MethodTable * pFieldMT)
+{
+ STANDARD_VM_CONTRACT;
+
+ int helper = CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE;
+
+ if (pField->GetFieldType() == ELEMENT_TYPE_CLASS ||
+ pField->GetFieldType() == ELEMENT_TYPE_VALUETYPE)
+ {
+ helper = CORINFO_HELP_GETSHARED_GCSTATIC_BASE;
+ }
+
+ if (pFieldMT->IsDynamicStatics())
+ {
+ const int delta = CORINFO_HELP_GETSHARED_GCSTATIC_BASE_DYNAMICCLASS - CORINFO_HELP_GETSHARED_GCSTATIC_BASE;
+
+ static_assert_no_msg(CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_DYNAMICCLASS
+ == CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE + delta);
+
+ helper += delta;
+ }
+ else
+ if (!pFieldMT->HasClassConstructor() && !pFieldMT->HasBoxedRegularStatics())
+ {
+ const int delta = CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR - CORINFO_HELP_GETSHARED_GCSTATIC_BASE;
+
+ static_assert_no_msg(CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR
+ == CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE + delta);
+
+ helper += delta;
+ }
+
+ if (pField->IsThreadStatic())
+ {
+ const int delta = CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE - CORINFO_HELP_GETSHARED_GCSTATIC_BASE;
+
+ static_assert_no_msg(CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE
+ == CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE + delta);
+ static_assert_no_msg(CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR
+ == CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR + delta);
+ static_assert_no_msg(CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_NOCTOR
+ == CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR + delta);
+ static_assert_no_msg(CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_DYNAMICCLASS
+ == CORINFO_HELP_GETSHARED_GCSTATIC_BASE_DYNAMICCLASS + delta);
+ static_assert_no_msg(CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_DYNAMICCLASS
+ == CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_DYNAMICCLASS + delta);
+
+ helper += delta;
+ }
+
+ return (CorInfoHelpFunc)helper;
+}
+
+static CorInfoHelpFunc getInstanceFieldHelper(FieldDesc * pField, CORINFO_ACCESS_FLAGS flags)
+{
+ STANDARD_VM_CONTRACT;
+
+ int helper;
+
+ CorElementType type = pField->GetFieldType();
+
+ if (CorTypeInfo::IsObjRef(type))
+ helper = CORINFO_HELP_GETFIELDOBJ;
+ else
+ switch (type)
+ {
+ case ELEMENT_TYPE_VALUETYPE:
+ helper = CORINFO_HELP_GETFIELDSTRUCT;
+ break;
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_BOOLEAN:
+ case ELEMENT_TYPE_U1:
+ helper = CORINFO_HELP_GETFIELD8;
+ break;
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_CHAR:
+ case ELEMENT_TYPE_U2:
+ helper = CORINFO_HELP_GETFIELD16;
+ break;
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ IN_WIN32(default:)
+ helper = CORINFO_HELP_GETFIELD32;
+ break;
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ IN_WIN64(default:)
+ helper = CORINFO_HELP_GETFIELD64;
+ break;
+ case ELEMENT_TYPE_R4:
+ helper = CORINFO_HELP_GETFIELDFLOAT;
+ break;
+ case ELEMENT_TYPE_R8:
+ helper = CORINFO_HELP_GETFIELDDOUBLE;
+ break;
+ }
+
+ if (flags & CORINFO_ACCESS_SET)
+ {
+ const int delta = CORINFO_HELP_SETFIELDOBJ - CORINFO_HELP_GETFIELDOBJ;
+
+ static_assert_no_msg(CORINFO_HELP_SETFIELD8 == CORINFO_HELP_GETFIELD8 + delta);
+ static_assert_no_msg(CORINFO_HELP_SETFIELD16 == CORINFO_HELP_GETFIELD16 + delta);
+ static_assert_no_msg(CORINFO_HELP_SETFIELD32 == CORINFO_HELP_GETFIELD32 + delta);
+ static_assert_no_msg(CORINFO_HELP_SETFIELD64 == CORINFO_HELP_GETFIELD64 + delta);
+ static_assert_no_msg(CORINFO_HELP_SETFIELDSTRUCT == CORINFO_HELP_GETFIELDSTRUCT + delta);
+ static_assert_no_msg(CORINFO_HELP_SETFIELDFLOAT == CORINFO_HELP_GETFIELDFLOAT + delta);
+ static_assert_no_msg(CORINFO_HELP_SETFIELDDOUBLE == CORINFO_HELP_GETFIELDDOUBLE + delta);
+
+ helper += delta;
+ }
+
+ return (CorInfoHelpFunc)helper;
+}
+
+#ifdef FEATURE_LEGACYNETCF
+void CheckValidTypeOnNetCF(MethodTable * pMT)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Do this quirk for application assemblies only
+ if (pMT->GetAssembly()->GetManifestFile()->IsProfileAssembly())
+ return;
+
+ if (pMT->GetClass()->IsTypeValidOnNetCF())
+ return;
+
+ DWORD dwStaticsSizeOnNetCF = 0;
+
+ //
+ // NetCF had 64k limit on total size of statics per class. This limit
+ // is easy to reach by initialized data in C#. Apps took dependency
+ // on type load exceptions being thrown in this case.
+ //
+ ApproxFieldDescIterator fieldIterator(pMT, ApproxFieldDescIterator::STATIC_FIELDS);
+ for (FieldDesc *pFD = fieldIterator.Next(); pFD != NULL; pFD = fieldIterator.Next())
+ {
+ DWORD fldSize = pFD->LoadSize();
+
+ // Simulate NetCF behaviour that caused size to wrap around
+ dwStaticsSizeOnNetCF += (UINT16)fldSize;
+
+ if (dwStaticsSizeOnNetCF > 0xFFFF)
+ COMPlusThrow(kTypeLoadException);
+ }
+
+ // Cache the result of the check
+ pMT->GetClass()->SetTypeValidOnNetCF();
+}
+#endif // FEATURE_LEGACYNETCF
+
+
+/*********************************************************************/
+void CEEInfo::getFieldInfo (CORINFO_RESOLVED_TOKEN * pResolvedToken,
+ CORINFO_METHOD_HANDLE callerHandle,
+ CORINFO_ACCESS_FLAGS flags,
+ CORINFO_FIELD_INFO *pResult
+ )
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION();
+
+ _ASSERTE((flags & (CORINFO_ACCESS_GET | CORINFO_ACCESS_SET | CORINFO_ACCESS_ADDRESS | CORINFO_ACCESS_INIT_ARRAY)) != 0);
+
+ INDEBUG(memset(pResult, 0xCC, sizeof(*pResult)));
+
+ FieldDesc * pField = (FieldDesc*)pResolvedToken->hField;
+ MethodTable * pFieldMT = pField->GetApproxEnclosingMethodTable();
+
+ // Helper to use if the field access requires it
+ CORINFO_FIELD_ACCESSOR fieldAccessor = (CORINFO_FIELD_ACCESSOR)-1;
+ DWORD fieldFlags = 0;
+
+ pResult->offset = pField->GetOffset();
+
+ if (pField->IsStatic())
+ {
+#ifdef FEATURE_LEGACYNETCF
+ if (pFieldMT->GetDomain()->GetAppDomainCompatMode() == BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8)
+ CheckValidTypeOnNetCF(pFieldMT);
+#endif
+
+ fieldFlags |= CORINFO_FLG_FIELD_STATIC;
+
+ if (pField->IsRVA())
+ {
+ fieldFlags |= CORINFO_FLG_FIELD_UNMANAGED;
+
+ Module* module = pFieldMT->GetModule();
+ if (module->IsRvaFieldTls(pResult->offset))
+ {
+ fieldAccessor = CORINFO_FIELD_STATIC_TLS;
+
+ // Provide helper to use if the JIT is not able to emit the TLS access
+ // as intrinsic
+ pResult->helper = CORINFO_HELP_GETSTATICFIELDADDR_TLS;
+
+ pResult->offset = module->GetFieldTlsOffset(pResult->offset);
+ }
+ else
+ {
+ fieldAccessor = CORINFO_FIELD_STATIC_RVA_ADDRESS;
+ }
+
+ // We are not going through a helper. The constructor has to be triggered explicitly.
+ if (!pFieldMT->IsClassPreInited())
+ fieldFlags |= CORINFO_FLG_FIELD_INITCLASS;
+ }
+ else
+ if (pField->IsContextStatic())
+ {
+ fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
+
+ pResult->helper = CORINFO_HELP_GETSTATICFIELDADDR_CONTEXT;
+ }
+ else
+ {
+ // Regular or thread static
+ CORINFO_FIELD_ACCESSOR intrinsicAccessor;
+
+ if (pField->GetFieldType() == ELEMENT_TYPE_VALUETYPE)
+ fieldFlags |= CORINFO_FLG_FIELD_STATIC_IN_HEAP;
+
+ if (pFieldMT->IsSharedByGenericInstantiations())
+ {
+ fieldAccessor = CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER;
+
+ pResult->helper = getGenericStaticsHelper(pField);
+ }
+ else
+ if (pFieldMT->GetModule()->IsSystem() && (flags & CORINFO_ACCESS_GET) &&
+ (intrinsicAccessor = getFieldIntrinsic(pField)) != (CORINFO_FIELD_ACCESSOR)-1)
+ {
+ // Intrinsics
+ fieldAccessor = intrinsicAccessor;
+ }
+ else
+ if (// Domain neutral access.
+ m_pMethodBeingCompiled->IsDomainNeutral() || m_pMethodBeingCompiled->IsZapped() || IsCompilingForNGen() ||
+ // Static fields are not pinned in collectible types. We will always access
+ // them using a helper since the address cannot be embeded into the code.
+ pFieldMT->Collectible() ||
+ // We always treat accessing thread statics as if we are in domain neutral code.
+ pField->IsThreadStatic()
+ )
+ {
+ fieldAccessor = CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER;
+
+ pResult->helper = getSharedStaticsHelper(pField, pFieldMT);
+ }
+ else
+ {
+ fieldAccessor = CORINFO_FIELD_STATIC_ADDRESS;
+
+ // We are not going through a helper. The constructor has to be triggered explicitly.
+ if (!pFieldMT->IsClassPreInited())
+ fieldFlags |= CORINFO_FLG_FIELD_INITCLASS;
+ }
+ }
+
+ //
+ // Currently, we only this optimization for regular statics, but it
+ // looks like it may be permissible to do this optimization for
+ // thread statics as well.
+ //
+ if ((flags & CORINFO_ACCESS_ADDRESS) &&
+ !pField->IsThreadStatic() &&
+ !pField->IsContextStatic() &&
+ (fieldAccessor != CORINFO_FIELD_STATIC_TLS))
+ {
+ fieldFlags |= CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN;
+ }
+ }
+ else
+ {
+ BOOL fInstanceHelper = FALSE;
+
+#if CHECK_APP_DOMAIN_LEAKS
+ if (g_pConfig->EnableFullDebug()
+ && pField->IsDangerousAppDomainAgileField()
+ && CorTypeInfo::IsObjRef(pField->GetFieldType()))
+ {
+ //
+ // In a checked field with all checks turned on, we use a helper to enforce the app domain
+ // agile invariant.
+ //
+ // <REVISIT_TODO>@todo: we'd like to check this for value type fields as well - we
+ // just need to add some code to iterate through the fields for
+ // references during the assignment.
+ // </REVISIT_TODO>
+ fInstanceHelper = TRUE;
+ }
+ else
+#endif // CHECK_APP_DOMAIN_LEAKS
+#ifdef FEATURE_REMOTING
+ // are we a contextful class? (approxMT is OK to use here)
+ if (pFieldMT->IsContextful())
+ {
+ // Allow the JIT to optimize special cases
+
+ // If the caller is states that we have a 'this reference'
+ // and he is also willing to unwrap it himself
+ // then we won't require a helper call.
+ if (!(flags & CORINFO_ACCESS_THIS ) ||
+ !(flags & CORINFO_ACCESS_UNWRAP))
+ {
+ // Normally a helper call is required.
+ fInstanceHelper = TRUE;
+ }
+ }
+ // are we a marshaled by ref class? (approxMT is OK to use here)
+ else if (pFieldMT->IsMarshaledByRef())
+ {
+ // Allow the JIT to optimize special cases
+
+ // If the caller is states that we have a 'this reference'
+ // then we won't require a helper call.
+ if (!(flags & CORINFO_ACCESS_THIS))
+ {
+ // Normally a helper call is required.
+ fInstanceHelper = TRUE;
+ }
+ }
+#endif // FEATURE_REMOTING
+
+ if (fInstanceHelper)
+ {
+ if (flags & CORINFO_ACCESS_ADDRESS)
+ {
+ fieldAccessor = CORINFO_FIELD_INSTANCE_ADDR_HELPER;
+
+ pResult->helper = CORINFO_HELP_GETFIELDADDR;
+ }
+ else
+ {
+ fieldAccessor = CORINFO_FIELD_INSTANCE_HELPER;
+
+ pResult->helper = getInstanceFieldHelper(pField, flags);
+ }
+ }
+ else
+ if (pField->IsEnCNew())
+ {
+ fieldAccessor = CORINFO_FIELD_INSTANCE_ADDR_HELPER;
+
+ pResult->helper = CORINFO_HELP_GETFIELDADDR;
+ }
+ else
+ {
+ fieldAccessor = CORINFO_FIELD_INSTANCE;
+ }
+
+ // FieldDesc::GetOffset() does not include the size of Object
+ if (!pFieldMT->IsValueType())
+ {
+ pResult->offset += sizeof(Object);
+ }
+ }
+
+ // TODO: This is touching metadata. Can we avoid it?
+ DWORD fieldAttribs = pField->GetAttributes();
+
+ if (IsFdFamily(fieldAttribs))
+ fieldFlags |= CORINFO_FLG_FIELD_PROTECTED;
+
+ if (IsFdInitOnly(fieldAttribs))
+ fieldFlags |= CORINFO_FLG_FIELD_FINAL;
+
+ pResult->fieldAccessor = fieldAccessor;
+ pResult->fieldFlags = fieldFlags;
+
+ if (!(flags & CORINFO_ACCESS_INLINECHECK))
+ {
+
+ //get the field's type. Grab the class for structs.
+ pResult->fieldType = getFieldTypeInternal(pResolvedToken->hField, &pResult->structType, pResolvedToken->hClass);
+
+
+ MethodDesc * pCallerForSecurity = GetMethodForSecurity(callerHandle);
+
+ //
+ //Since we can't get the special verify-only instantiated FD like we can with MDs, go back to the parent
+ //of the memberRef and load that one. That should give us the open instantiation.
+ //
+ //If the field we found is owned by a generic type, you have to go back to the signature and reload.
+ //Otherwise we filled in !0.
+ TypeHandle fieldTypeForSecurity = TypeHandle(pResolvedToken->hClass);
+ if (pResolvedToken->pTypeSpec != NULL)
+ {
+ SigTypeContext typeContext;
+ SigTypeContext::InitTypeContext(pCallerForSecurity, &typeContext);
+
+ SigPointer sigptr(pResolvedToken->pTypeSpec, pResolvedToken->cbTypeSpec);
+ fieldTypeForSecurity = sigptr.GetTypeHandleThrowing((Module *)pResolvedToken->tokenScope, &typeContext);
+
+ // typeHnd can be a variable type
+ if (fieldTypeForSecurity.GetMethodTable() == NULL)
+ {
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_METHODDEF_PARENT_NO_MEMBERS);
+ }
+ }
+
+ BOOL doAccessCheck = TRUE;
+ AccessCheckOptions::AccessCheckType accessCheckType = AccessCheckOptions::kNormalAccessibilityChecks;
+
+ DynamicResolver * pAccessContext = NULL;
+
+ //More in code:CEEInfo::getCallInfo, but the short version is that the caller and callee Descs do
+ //not completely describe the type.
+ TypeHandle callerTypeForSecurity = TypeHandle(pCallerForSecurity->GetMethodTable());
+ if (IsDynamicScope(pResolvedToken->tokenScope))
+ {
+ doAccessCheck = ModifyCheckForDynamicMethod(GetDynamicResolver(pResolvedToken->tokenScope), &callerTypeForSecurity,
+ &accessCheckType, &pAccessContext);
+ }
+
+ //Now for some link time checks.
+ //Um... where are the field link demands?
+
+ pResult->accessAllowed = CORINFO_ACCESS_ALLOWED;
+
+ if (doAccessCheck)
+ {
+ //Well, let's check some visibility at least.
+ AccessCheckOptions accessCheckOptions(accessCheckType,
+ pAccessContext,
+ FALSE,
+ pField);
+
+ _ASSERTE(pCallerForSecurity != NULL && callerTypeForSecurity != NULL);
+ StaticAccessCheckContext accessContext(pCallerForSecurity, callerTypeForSecurity.GetMethodTable());
+
+ BOOL canAccess = ClassLoader::CanAccess(
+ &accessContext,
+ fieldTypeForSecurity.GetMethodTable(),
+ fieldTypeForSecurity.GetAssembly(),
+ fieldAttribs,
+ NULL,
+ (flags & CORINFO_ACCESS_INIT_ARRAY) ? NULL : pField, // For InitializeArray, we don't need tocheck the type of the field.
+ accessCheckOptions,
+ FALSE /*checkTargetMethodTransparency*/,
+ TRUE /*checkTargetTypeTransparency*/);
+
+ if (!canAccess)
+ {
+ //Set up the throw helper
+ pResult->accessAllowed = CORINFO_ACCESS_ILLEGAL;
+
+ pResult->accessCalloutHelper.helperNum = CORINFO_HELP_FIELD_ACCESS_EXCEPTION;
+ pResult->accessCalloutHelper.numArgs = 2;
+
+ pResult->accessCalloutHelper.args[0].Set(CORINFO_METHOD_HANDLE(pCallerForSecurity));
+#ifdef MDIL
+ pResult->accessCalloutHelper.args[0].token = 0;
+#endif
+
+ pResult->accessCalloutHelper.args[1].Set(CORINFO_FIELD_HANDLE(pField));
+#ifdef MDIL
+ pResult->accessCalloutHelper.args[1].token = pResolvedToken->token;
+#endif
+
+ if (IsCompilingForNGen())
+ {
+ //see code:CEEInfo::getCallInfo for more information.
+ if (pCallerForSecurity->ContainsGenericVariables())
+ COMPlusThrowNonLocalized(kNotSupportedException, W("Cannot embed generic MethodDesc"));
+ }
+ }
+ else
+ {
+ CorInfoIsAccessAllowedResult isAccessAllowed = CORINFO_ACCESS_ALLOWED;
+ CorInfoSecurityRuntimeChecks runtimeChecks = CORINFO_ACCESS_SECURITY_NONE;
+
+ DebugSecurityCalloutStress(getMethodBeingCompiled(), isAccessAllowed, runtimeChecks);
+ if (isAccessAllowed == CORINFO_ACCESS_RUNTIME_CHECK)
+ {
+ pResult->accessAllowed = isAccessAllowed;
+ //Explain the callback to the JIT.
+ pResult->accessCalloutHelper.helperNum = CORINFO_HELP_FIELD_ACCESS_CHECK;
+ pResult->accessCalloutHelper.numArgs = 3;
+
+ pResult->accessCalloutHelper.args[0].Set(CORINFO_METHOD_HANDLE(pCallerForSecurity));
+#ifdef MDIL
+ pResult->accessCalloutHelper.args[0].token = 0;
+#endif
+
+
+ /* REVISIT_TODO Wed 4/8/2009
+ * This field handle is not useful on its own. We also need to embed the enclosing class
+ * handle.
+ */
+ pResult->accessCalloutHelper.args[1].Set(CORINFO_FIELD_HANDLE(pField));
+#ifdef MDIL
+ pResult->accessCalloutHelper.args[1].token = pResolvedToken->token;
+#endif
+
+ pResult->accessCalloutHelper.args[2].Set(runtimeChecks);
+
+ if (IsCompilingForNGen())
+ {
+ //see code:CEEInfo::getCallInfo for more information.
+ if (pCallerForSecurity->ContainsGenericVariables())
+ COMPlusThrowNonLocalized(kNotSupportedException, W("Cannot embed generic MethodDesc"));
+ }
+ }
+ }
+ }
+
+ }
+
+#ifdef MDIL
+ if (SystemDomain::GetCurrentDomain()->IsMDILCompilationDomain() && !isVerifyOnly())
+ {
+ pResult->offset += (GetRandomInt(1) == 0) ? 4 : 8;
+
+ // we want to disallow the rare cases for MDIL because
+ // - they are not as well tested
+ // - we are not as sure about versioning wrt to VM or library changes
+ // - and they are supposed to very rare in application code
+ // for those cases where they do occur, we'll fall back to jitting
+ switch (pResult->fieldAccessor)
+ {
+ // these are the common cases that we want to allow
+ case CORINFO_FIELD_INSTANCE: // regular instance field at given offset from this-ptr
+ case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: // static field accessed using the "shared static" helper (arguments are ModuleID + ClassID)
+ case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: // static field access using the "generic static" helper (argument is MethodTable *)
+ case CORINFO_FIELD_INTRINSIC_ZERO: // intrinsic zero (IntPtr.Zero, UIntPtr.Zero)
+ case CORINFO_FIELD_INTRINSIC_EMPTY_STRING: // intrinsic emptry string (String.Empty)
+ break;
+
+ // these are the rare cases that we want do disallow
+ case CORINFO_FIELD_STATIC_RVA_ADDRESS: // RVA field at given address
+ case CORINFO_FIELD_INSTANCE_HELPER: // instance field accessed using helper (arguments are this, FieldDesc * and the value)
+ case CORINFO_FIELD_INSTANCE_ADDR_HELPER: // instance field accessed using address-of helper (arguments are this and FieldDesc *)
+ case CORINFO_FIELD_STATIC_ADDRESS: // field at given address
+ case CORINFO_FIELD_STATIC_ADDR_HELPER: // static field accessed using address-of helper (argument is FieldDesc *)
+ case CORINFO_FIELD_STATIC_TLS: // unmanaged TLS access
+ default:
+ pResult->accessAllowed = CORINFO_ACCESS_ILLEGAL;
+ break;
+ }
+ }
+#endif
+ EE_TO_JIT_TRANSITION();
+}
+
+#ifdef MDIL
+DWORD CEEInfo::getFieldOrdinal(CORINFO_MODULE_HANDLE tokenScope,
+ unsigned fieldToken)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!"should not be reachable");
+ return 0;
+}
+
+// Given a field or method token metaTOK return return its parent token
+unsigned GetMemberParent(Module * module, unsigned metaTOK)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+
+ if (TypeFromToken(metaTOK) == mdtMethodSpec)
+ {
+ PCCOR_SIGNATURE pSig;
+ ULONG cSig;
+ mdMemberRef GenericMemberRef;
+ IfFailThrow(module->GetMDImport()->GetMethodSpecProps(metaTOK, &GenericMemberRef, &pSig, &cSig));
+
+ _ASSERTE(TypeFromToken(GenericMemberRef) == mdtMethodDef || TypeFromToken(GenericMemberRef) == mdtMemberRef);
+ metaTOK = GenericMemberRef;
+ }
+
+
+ // Extended to extract parent class of a member-ref, method-def or field-def
+
+ if (TypeFromToken(metaTOK) == mdtMemberRef)
+ {
+ IfFailThrow(module->GetMDImport()->GetParentOfMemberRef(metaTOK, &metaTOK));
+
+ // For varargs, a memberref can point to a methodDef, so we
+ if (TypeFromToken(metaTOK) == mdtMethodDef)
+ {
+ IfFailThrow(module->GetMDImport()->GetParentToken(metaTOK, &metaTOK));
+ }
+ }
+ else if (TypeFromToken(metaTOK) == mdtMethodDef || TypeFromToken(metaTOK) == mdtFieldDef)
+ {
+ IfFailThrow(module->GetMDImport()->GetParentToken(metaTOK,&metaTOK));
+ }
+
+ if ((TypeFromToken(metaTOK) == mdtTypeDef ||
+ TypeFromToken(metaTOK) == mdtTypeRef ||
+ TypeFromToken(metaTOK) == mdtTypeSpec ||
+ TypeFromToken(metaTOK) == mdtModuleRef))
+ {
+ return metaTOK;
+ }
+ else
+ {
+ COMPlusThrowHR(COR_E_INVALIDPROGRAM);
+ }
+}
+
+// Given a field or method token metaTOK return its parent token
+// we still need this in MDIL, for example for static field access we need the
+// token of the enclosing type
+unsigned CEEInfo::getMemberParent(CORINFO_MODULE_HANDLE scopeHnd, unsigned metaTOK)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ unsigned result = 0;
+
+ JIT_TO_EE_TRANSITION();
+
+ if (IsDynamicScope(scopeHnd))
+ {
+ // this shouldn't happen in an MDIL compile
+ _ASSERTE(!"NYI");
+ COMPlusThrowHR(COR_E_INVALIDPROGRAM);
+ }
+ else
+ {
+ Module* module = GetModule(scopeHnd);
+ result = GetMemberParent(module, metaTOK);
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+// given a token representing an MD array of structs, get the element type token
+unsigned CEEInfo::getArrayElementToken(CORINFO_MODULE_HANDLE scopeHnd, unsigned metaTOK)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // this shouldn't happen in an MDIL compile
+ _ASSERTE(!"NYI");
+ return 0;
+}
+#endif
+
+//---------------------------------------------------------------------------------------
+//
+bool CEEInfo::isFieldStatic(CORINFO_FIELD_HANDLE fldHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ bool res = false;
+ JIT_TO_EE_TRANSITION_LEAF();
+ FieldDesc* field = (FieldDesc*)fldHnd;
+ res = (field->IsStatic() != 0);
+ EE_TO_JIT_TRANSITION_LEAF();
+ return res;
+}
+
+//---------------------------------------------------------------------------------------
+//
+void
+CEEInfo::findCallSiteSig(
+ CORINFO_MODULE_HANDLE scopeHnd,
+ unsigned sigMethTok,
+ CORINFO_CONTEXT_HANDLE context,
+ CORINFO_SIG_INFO * sigRet)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION();
+
+ PCCOR_SIGNATURE pSig = NULL;
+ DWORD cbSig = 0;
+
+ if (IsDynamicScope(scopeHnd))
+ {
+ DynamicResolver * pResolver = GetDynamicResolver(scopeHnd);
+ SigPointer sig;
+
+ if (TypeFromToken(sigMethTok) == mdtMemberRef)
+ {
+ sig = pResolver->ResolveSignatureForVarArg(sigMethTok);
+ }
+ else
+ {
+ _ASSERTE(TypeFromToken(sigMethTok) == mdtMethodDef);
+
+ TypeHandle classHandle;
+ MethodDesc * pMD = NULL;
+ FieldDesc * pFD = NULL;
+
+ // in this case a method is asked for its sig. Resolve the method token and get the sig
+ pResolver->ResolveToken(sigMethTok, &classHandle, &pMD, &pFD);
+ if (pMD == NULL)
+ COMPlusThrow(kInvalidProgramException);
+
+ PCCOR_SIGNATURE pSig = NULL;
+ DWORD cbSig;
+ pMD->GetSig(&pSig, &cbSig);
+ sig = SigPointer(pSig, cbSig);
+
+ context = MAKE_METHODCONTEXT(pMD);
+ scopeHnd = GetScopeHandle(pMD->GetModule());
+ }
+
+ sig.GetSignature(&pSig, &cbSig);
+ sigMethTok = mdTokenNil;
+ }
+ else
+ {
+ Module * module = (Module *)scopeHnd;
+ LPCUTF8 szName;
+
+ if (TypeFromToken(sigMethTok) == mdtMemberRef)
+ {
+ IfFailThrow(module->GetMDImport()->GetNameAndSigOfMemberRef(sigMethTok, &pSig, &cbSig, &szName));
+
+ // Defs have already been checked by the loader for validity
+ // However refs need to be checked.
+ if (!Security::CanSkipVerification(module->GetDomainAssembly()))
+ {
+ // Can pass 0 for the flags, since it is only used for defs.
+ IfFailThrow(validateTokenSig(sigMethTok, pSig, cbSig, 0, module->GetMDImport()));
+ }
+ }
+ else if (TypeFromToken(sigMethTok) == mdtMethodDef)
+ {
+ IfFailThrow(module->GetMDImport()->GetSigOfMethodDef(sigMethTok, &cbSig, &pSig));
+ }
+ }
+
+ CEEInfo::ConvToJitSig(
+ pSig,
+ cbSig,
+ scopeHnd,
+ sigMethTok,
+ sigRet,
+ GetMethodFromContext(context),
+ false,
+ GetTypeFromContext(context));
+ EE_TO_JIT_TRANSITION();
+} // CEEInfo::findCallSiteSig
+
+//---------------------------------------------------------------------------------------
+//
+void
+CEEInfo::findSig(
+ CORINFO_MODULE_HANDLE scopeHnd,
+ unsigned sigTok,
+ CORINFO_CONTEXT_HANDLE context,
+ CORINFO_SIG_INFO * sigRet)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION();
+
+ PCCOR_SIGNATURE pSig = NULL;
+ DWORD cbSig = 0;
+
+ if (IsDynamicScope(scopeHnd))
+ {
+ SigPointer sig = GetDynamicResolver(scopeHnd)->ResolveSignature(sigTok);
+ sig.GetSignature(&pSig, &cbSig);
+ sigTok = mdTokenNil;
+ }
+ else
+ {
+ Module * module = (Module *)scopeHnd;
+
+ // We need to resolve this stand alone sig
+ IfFailThrow(module->GetMDImport()->GetSigFromToken(
+ (mdSignature)sigTok,
+ &cbSig,
+ &pSig));
+ }
+
+ CEEInfo::ConvToJitSig(
+ pSig,
+ cbSig,
+ scopeHnd,
+ sigTok,
+ sigRet,
+ GetMethodFromContext(context),
+ false,
+ GetTypeFromContext(context));
+
+ EE_TO_JIT_TRANSITION();
+} // CEEInfo::findSig
+
+//---------------------------------------------------------------------------------------
+//
+unsigned
+CEEInfo::getClassSize(
+ CORINFO_CLASS_HANDLE clsHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ unsigned result = 0;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ TypeHandle VMClsHnd(clsHnd);
+ result = VMClsHnd.GetSize();
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return result;
+}
+
+unsigned CEEInfo::getClassAlignmentRequirement(CORINFO_CLASS_HANDLE type, BOOL fDoubleAlignHint)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ // Default alignment is sizeof(void*)
+ unsigned result = sizeof(void*);
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ TypeHandle clsHnd(type);
+
+#ifdef FEATURE_DOUBLE_ALIGNMENT_HINT
+ if (fDoubleAlignHint)
+ {
+ MethodTable* pMT = clsHnd.GetMethodTable();
+ if (pMT != NULL)
+ {
+ // Return the size of the double align hint. Ignore the actual alignment info account
+ // so that structs with 64-bit integer fields do not trigger double aligned frames on x86.
+ if (pMT->GetClass()->IsAlign8Candidate())
+ result = 8;
+ }
+ }
+ else
+#endif
+ {
+ result = getClassAlignmentRequirementStatic(clsHnd);
+ }
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return result;
+}
+
+unsigned CEEInfo::getClassAlignmentRequirementStatic(TypeHandle clsHnd)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Default alignment is sizeof(void*)
+ unsigned result = sizeof(void*);
+
+ MethodTable * pMT = clsHnd.GetMethodTable();
+ if (pMT == NULL)
+ return result;
+
+ if (pMT->HasLayout())
+ {
+ EEClassLayoutInfo* pInfo = pMT->GetLayoutInfo();
+
+ if (clsHnd.IsNativeValueType())
+ {
+ // if it's the unmanaged view of the managed type, we always use the unmanaged alignment requirement
+ result = pInfo->m_LargestAlignmentRequirementOfAllMembers;
+ }
+ else
+ if (pInfo->IsManagedSequential())
+ {
+ _ASSERTE(!pMT->ContainsPointers());
+
+ // if it's managed sequential, we use the managed alignment requirement
+ result = pInfo->m_ManagedLargestAlignmentRequirementOfAllMembers;
+ }
+ else if (pInfo->IsBlittable())
+ {
+ _ASSERTE(!pMT->ContainsPointers());
+
+ // if it's blittable, we use the unmanaged alignment requirement
+ result = pInfo->m_LargestAlignmentRequirementOfAllMembers;
+ }
+ }
+
+#ifdef FEATURE_64BIT_ALIGNMENT
+ if (result < 8 && pMT->RequiresAlign8())
+ {
+ // If the structure contains 64-bit primitive fields and the platform requires 8-byte alignment for
+ // such fields then make sure we return at least 8-byte alignment. Note that it's technically possible
+ // to create unmanaged APIs that take unaligned structures containing such fields and this
+ // unconditional alignment bump would cause us to get the calling convention wrong on platforms such
+ // as ARM. If we see such cases in the future we'd need to add another control (such as an alignment
+ // property for the StructLayout attribute or a marshaling directive attribute for p/invoke arguments)
+ // that allows more precise control. For now we'll go with the likely scenario.
+ result = 8;
+ }
+#endif // FEATURE_64BIT_ALIGNMENT
+
+ return result;
+}
+
+CORINFO_FIELD_HANDLE
+CEEInfo::getFieldInClass(CORINFO_CLASS_HANDLE clsHnd, INT num)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CORINFO_FIELD_HANDLE result = NULL;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ TypeHandle VMClsHnd(clsHnd);
+
+ MethodTable* pMT= VMClsHnd.AsMethodTable();
+
+ result = (CORINFO_FIELD_HANDLE) ((pMT->GetApproxFieldDescListRaw()) + num);
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return result;
+}
+
+mdMethodDef
+CEEInfo::getMethodDefFromMethod(CORINFO_METHOD_HANDLE hMethod)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ mdMethodDef result = 0;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ MethodDesc* pMD = GetMethod(hMethod);
+
+ if (pMD->IsDynamicMethod())
+ {
+ // Dynamic methods do not have tokens
+ result = mdMethodDefNil;
+ }
+ else
+ {
+ result = pMD->GetMemberDef();
+ }
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return result;
+}
+
+BOOL CEEInfo::checkMethodModifier(CORINFO_METHOD_HANDLE hMethod,
+ LPCSTR modifier,
+ BOOL fOptional)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ BOOL result = FALSE;
+
+ JIT_TO_EE_TRANSITION();
+
+ MethodDesc* pMD = GetMethod(hMethod);
+ Module* pModule = pMD->GetModule();
+ MetaSig sig(pMD);
+ CorElementType eeType = fOptional ? ELEMENT_TYPE_CMOD_OPT : ELEMENT_TYPE_CMOD_REQD;
+
+ // modopts/modreqs for the method are by convention stored on the return type
+ result = sig.GetReturnProps().HasCustomModifier(pModule, modifier, eeType);
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/*********************************************************************/
+unsigned CEEInfo::getClassGClayout (CORINFO_CLASS_HANDLE clsHnd, BYTE* gcPtrs)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ unsigned result = 0;
+
+ JIT_TO_EE_TRANSITION();
+
+ TypeHandle VMClsHnd(clsHnd);
+
+ MethodTable* pMT = VMClsHnd.GetMethodTable();
+
+#ifdef MDIL
+ _ASSERTE(!SystemDomain::GetCurrentDomain()->IsMDILCompilationDomain() || isVerifyOnly());
+#endif
+
+ if (pMT == g_TypedReferenceMT)
+ {
+ gcPtrs[0] = TYPE_GC_BYREF;
+ gcPtrs[1] = TYPE_GC_NONE;
+ result = 1;
+ }
+ else if (VMClsHnd.IsNativeValueType())
+ {
+ // native value types have no GC pointers
+ result = 0;
+ memset(gcPtrs, TYPE_GC_NONE,
+ (VMClsHnd.GetSize() + sizeof(void*) -1)/ sizeof(void*));
+ }
+ else
+ {
+ _ASSERTE(pMT->IsValueType());
+ _ASSERTE(sizeof(BYTE) == 1);
+
+ // assume no GC pointers at first
+ result = 0;
+ memset(gcPtrs, TYPE_GC_NONE,
+ (VMClsHnd.GetSize() + sizeof(void*) -1)/ sizeof(void*));
+
+ // walk the GC descriptors, turning on the correct bits
+ if (pMT->ContainsPointers())
+ {
+ CGCDesc* map = CGCDesc::GetCGCDescFromMT(pMT);
+ CGCDescSeries * pByValueSeries = map->GetLowestSeries();
+
+ for (SIZE_T i = 0; i < map->GetNumSeries(); i++)
+ {
+ // Get offset into the value class of the first pointer field (includes a +Object)
+ size_t cbSeriesSize = pByValueSeries->GetSeriesSize() + pMT->GetBaseSize();
+ size_t cbOffset = pByValueSeries->GetSeriesOffset() - sizeof(Object);
+
+ _ASSERTE (cbOffset % sizeof(void*) == 0);
+ _ASSERTE (cbSeriesSize % sizeof(void*) == 0);
+
+ result += (unsigned) (cbSeriesSize / sizeof(void*));
+ memset(&gcPtrs[cbOffset/sizeof(void*)], TYPE_GC_REF, cbSeriesSize / sizeof(void*));
+
+ pByValueSeries++;
+ }
+ }
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+/*********************************************************************/
+unsigned CEEInfo::getClassNumInstanceFields (CORINFO_CLASS_HANDLE clsHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ unsigned result = 0;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ TypeHandle th(clsHnd);
+
+ if (!th.IsTypeDesc())
+ {
+ result = th.AsMethodTable()->GetNumInstanceFields();
+ }
+ else
+ {
+ // native value types are opaque aggregates with explicit size
+ result = 0;
+ }
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return result;
+}
+
+
+CorInfoType CEEInfo::asCorInfoType (CORINFO_CLASS_HANDLE clsHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CorInfoType result = CORINFO_TYPE_UNDEF;
+
+ JIT_TO_EE_TRANSITION();
+
+ TypeHandle VMClsHnd(clsHnd);
+ result = toJitType(VMClsHnd);
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+
+CORINFO_LOOKUP_KIND CEEInfo::getLocationOfThisType(CORINFO_METHOD_HANDLE context)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CORINFO_LOOKUP_KIND result;
+
+ /* Initialize fields of result for debug build warning */
+ result.needsRuntimeLookup = false;
+ result.runtimeLookupKind = CORINFO_LOOKUP_THISOBJ;
+
+ JIT_TO_EE_TRANSITION();
+
+ MethodDesc *pContextMD = GetMethod(context);
+
+ // If the method table is not shared, then return CONST
+ if (!pContextMD->GetMethodTable()->IsSharedByGenericInstantiations())
+ {
+ result.needsRuntimeLookup = false;
+ }
+ else
+ {
+ result.needsRuntimeLookup = true;
+
+ // If we've got a vtable extra argument, go through that
+ if (pContextMD->RequiresInstMethodTableArg())
+ {
+ result.runtimeLookupKind = CORINFO_LOOKUP_CLASSPARAM;
+ }
+ // If we've got an object, go through its vtable
+ else if (pContextMD->AcquiresInstMethodTableFromThis())
+ {
+ result.runtimeLookupKind = CORINFO_LOOKUP_THISOBJ;
+ }
+ // Otherwise go through the method-desc argument
+ else
+ {
+ _ASSERTE(pContextMD->RequiresInstMethodDescArg());
+ result.runtimeLookupKind = CORINFO_LOOKUP_METHODPARAM;
+ }
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+CORINFO_METHOD_HANDLE CEEInfo::GetDelegateCtor(
+ CORINFO_METHOD_HANDLE methHnd,
+ CORINFO_CLASS_HANDLE clsHnd,
+ CORINFO_METHOD_HANDLE targetMethodHnd,
+ DelegateCtorArgs *pCtorData)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ if (isVerifyOnly())
+ {
+ // No sense going through the optimized case just for verification and it can cause issues parsing
+ // uninstantiated generic signatures.
+ return methHnd;
+ }
+
+ CORINFO_METHOD_HANDLE result = NULL;
+
+ JIT_TO_EE_TRANSITION();
+
+ MethodDesc *pCurrentCtor = (MethodDesc*)methHnd;
+ if (!pCurrentCtor->IsFCall())
+ {
+ result = methHnd;
+ }
+ else
+ {
+ MethodDesc *pTargetMethod = (MethodDesc*)targetMethodHnd;
+ TypeHandle delegateType = (TypeHandle)clsHnd;
+
+ MethodDesc *pDelegateCtor = COMDelegate::GetDelegateCtor(delegateType, pTargetMethod, pCtorData);
+ if (!pDelegateCtor)
+ pDelegateCtor = pCurrentCtor;
+ result = (CORINFO_METHOD_HANDLE)pDelegateCtor;
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+void CEEInfo::MethodCompileComplete(CORINFO_METHOD_HANDLE methHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION();
+
+ MethodDesc* pMD = GetMethod(methHnd);
+
+ if (pMD->IsDynamicMethod())
+ {
+ pMD->AsDynamicMethodDesc()->GetResolver()->FreeCompileTimeState();
+ }
+
+ EE_TO_JIT_TRANSITION();
+}
+
+// Given a module scope (scopeHnd), a method handle (context) and an metadata token,
+// attempt to load the handle (type, field or method) associated with the token.
+// If this is not possible at compile-time (because the method code is shared and the token contains type parameters)
+// then indicate how the handle should be looked up at run-time.
+//
+// See corinfo.h for more details
+//
+void CEEInfo::embedGenericHandle(
+ CORINFO_RESOLVED_TOKEN * pResolvedToken,
+ BOOL fEmbedParent,
+ CORINFO_GENERICHANDLE_RESULT *pResult)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ INDEBUG(memset(pResult, 0xCC, sizeof(*pResult)));
+
+ JIT_TO_EE_TRANSITION();
+
+ BOOL fRuntimeLookup;
+ MethodDesc * pTemplateMD = NULL;
+
+ if (!fEmbedParent && pResolvedToken->hMethod != NULL)
+ {
+ MethodDesc * pMD = (MethodDesc *)pResolvedToken->hMethod;
+ TypeHandle th(pResolvedToken->hClass);
+
+ pResult->handleType = CORINFO_HANDLETYPE_METHOD;
+
+ Instantiation methodInst = pMD->GetMethodInstantiation();
+
+ pMD = MethodDesc::FindOrCreateAssociatedMethodDesc(pMD, th.GetMethodTable(), FALSE, methodInst, FALSE);
+
+ // Normalize the method handle for reflection
+ if (pResolvedToken->tokenType == CORINFO_TOKENKIND_Ldtoken)
+ pMD = MethodDesc::FindOrCreateAssociatedMethodDescForReflection(pMD, th, methodInst);
+
+ pResult->compileTimeHandle = (CORINFO_GENERIC_HANDLE)pMD;
+ pTemplateMD = pMD;
+
+ // Runtime lookup is only required for stubs. Regular entrypoints are always the same shared MethodDescs.
+ fRuntimeLookup = pMD->IsWrapperStub() &&
+ (pMD->GetMethodTable()->IsSharedByGenericInstantiations() || TypeHandle::IsCanonicalSubtypeInstantiation(methodInst));
+ }
+ else
+ if (!fEmbedParent && pResolvedToken->hField != NULL)
+ {
+ FieldDesc * pFD = (FieldDesc *)pResolvedToken->hField;
+ TypeHandle th(pResolvedToken->hClass);
+
+ pResult->handleType = CORINFO_HANDLETYPE_FIELD;
+
+ pResult->compileTimeHandle = (CORINFO_GENERIC_HANDLE)pFD;
+
+ fRuntimeLookup = th.IsSharedByGenericInstantiations() && pFD->IsStatic();
+ }
+ else
+ {
+ TypeHandle th(pResolvedToken->hClass);
+
+ pResult->handleType = CORINFO_HANDLETYPE_CLASS;
+
+ pResult->compileTimeHandle = (CORINFO_GENERIC_HANDLE)th.AsPtr();
+
+ if (fEmbedParent && pResolvedToken->hMethod != NULL)
+ {
+ MethodDesc * pDeclaringMD = (MethodDesc *)pResolvedToken->hMethod;
+
+ if (!pDeclaringMD->GetMethodTable()->HasSameTypeDefAs(th.GetMethodTable()))
+ {
+ //
+ // The method type may point to a sub-class of the actual class that declares the method.
+ // It is important to embed the declaring type in this case.
+ //
+
+ pTemplateMD = pDeclaringMD;
+
+ pResult->compileTimeHandle = (CORINFO_GENERIC_HANDLE)pDeclaringMD->GetMethodTable();
+ }
+ }
+
+ // IsSharedByGenericInstantiations would not work here. The runtime lookup is required
+ // even for standalone generic variables that show up as __Cannon here.
+ fRuntimeLookup = th.IsCanonicalSubtype();
+ }
+
+ _ASSERTE(pResult->compileTimeHandle);
+
+ if (fRuntimeLookup
+ // Handle invalid IL - see comment in code:CEEInfo::ComputeRuntimeLookupForSharedGenericToken
+ && ContextIsShared(pResolvedToken->tokenContext))
+ {
+ DictionaryEntryKind entryKind = EmptySlot;
+ switch (pResult->handleType)
+ {
+ case CORINFO_HANDLETYPE_CLASS:
+ entryKind = (pTemplateMD != NULL) ? DeclaringTypeHandleSlot : TypeHandleSlot;
+ break;
+ case CORINFO_HANDLETYPE_METHOD:
+ entryKind = MethodDescSlot;
+ break;
+ case CORINFO_HANDLETYPE_FIELD:
+ entryKind = FieldDescSlot;
+ break;
+ default:
+ _ASSERTE(false);
+ }
+
+ ComputeRuntimeLookupForSharedGenericToken(entryKind,
+ pResolvedToken,
+ NULL,
+ pTemplateMD,
+ &pResult->lookup);
+ }
+ else
+ {
+ // If the target is not shared then we've already got our result and
+ // can simply do a static look up
+ pResult->lookup.lookupKind.needsRuntimeLookup = false;
+
+ pResult->lookup.constLookup.handle = pResult->compileTimeHandle;
+ pResult->lookup.constLookup.accessType = IAT_VALUE;
+#ifdef MDIL
+ MethodDesc *pContextMD = GetMethodFromContext(pResolvedToken->tokenContext);
+ if (pContextMD != NULL)
+ {
+ if (pContextMD->HasMethodInstantiation())
+ {
+ pResult->lookup.lookupKind.runtimeLookupKind = CORINFO_LOOKUP_METHODPARAM;
+ }
+ // If we've got a vtable extra argument, go through that
+ else if (pContextMD->IsStatic() || pContextMD->GetMethodTable()->IsValueType())
+ {
+ pResult->lookup.lookupKind.runtimeLookupKind = CORINFO_LOOKUP_CLASSPARAM;
+ }
+ // If we've got an object, go through its vtable
+ else
+ {
+ _ASSERTE(!pContextMD->IsStatic() && !pContextMD->GetMethodTable()->IsValueType());
+ pResult->lookup.lookupKind.runtimeLookupKind = CORINFO_LOOKUP_THISOBJ;
+ }
+ }
+#endif
+ }
+
+ EE_TO_JIT_TRANSITION();
+}
+
+void CEEInfo::ScanForModuleDependencies(Module* pModule, SigPointer psig)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(pModule && !pModule->IsSystem());
+
+ CorElementType eType;
+ IfFailThrow(psig.GetElemType(&eType));
+
+ switch (eType)
+ {
+ case ELEMENT_TYPE_GENERICINST:
+ {
+ ScanForModuleDependencies(pModule,psig);
+ IfFailThrow(psig.SkipExactlyOne());
+
+ ULONG ntypars;
+ IfFailThrow(psig.GetData(&ntypars));
+ for (ULONG i = 0; i < ntypars; i++)
+ {
+ ScanForModuleDependencies(pModule,psig);
+ IfFailThrow(psig.SkipExactlyOne());
+ }
+ break;
+ }
+
+ case ELEMENT_TYPE_VALUETYPE:
+ case ELEMENT_TYPE_CLASS:
+ {
+ mdToken tk;
+ IfFailThrow(psig.GetToken(&tk));
+ if (TypeFromToken(tk) == mdtTypeRef)
+ {
+ Module * pTypeDefModule;
+ mdToken tkTypeDef;
+
+ if (ClassLoader::ResolveTokenToTypeDefThrowing(pModule, tk, &pTypeDefModule, &tkTypeDef))
+ break;
+
+ if (!pTypeDefModule->IsSystem() && (pModule != pTypeDefModule))
+ {
+ m_pOverride->addActiveDependency((CORINFO_MODULE_HANDLE)pModule, (CORINFO_MODULE_HANDLE)pTypeDefModule);
+ }
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+}
+
+void CEEInfo::ScanMethodSpec(Module * pModule, PCCOR_SIGNATURE pMethodSpec, ULONG cbMethodSpec)
+{
+ STANDARD_VM_CONTRACT;
+
+ SigPointer sp(pMethodSpec, cbMethodSpec);
+
+ BYTE etype;
+ IfFailThrow(sp.GetByte(&etype));
+
+ _ASSERT(etype == (BYTE)IMAGE_CEE_CS_CALLCONV_GENERICINST);
+
+ ULONG nGenericMethodArgs;
+ IfFailThrow(sp.GetData(&nGenericMethodArgs));
+
+ for (ULONG i = 0; i < nGenericMethodArgs; i++)
+ {
+ ScanForModuleDependencies(pModule,sp);
+ IfFailThrow(sp.SkipExactlyOne());
+ }
+}
+
+BOOL CEEInfo::ScanTypeSpec(Module * pModule, PCCOR_SIGNATURE pTypeSpec, ULONG cbTypeSpec)
+{
+ STANDARD_VM_CONTRACT;
+
+ SigPointer sp(pTypeSpec, cbTypeSpec);
+
+ CorElementType eType;
+ IfFailThrow(sp.GetElemType(&eType));
+
+ // Filter out non-instantiated types and typedescs (typevars, arrays, ...)
+ if (eType != ELEMENT_TYPE_GENERICINST)
+ {
+ // Scanning of the parent chain is required for reference types only.
+ // Note that the parent chain MUST NOT be scanned for instantiated
+ // generic variables because of they are not a real dependencies.
+ return (eType == ELEMENT_TYPE_CLASS);
+ }
+
+ IfFailThrow(sp.SkipExactlyOne());
+
+ ULONG ntypars;
+ IfFailThrow(sp.GetData(&ntypars));
+
+ for (ULONG i = 0; i < ntypars; i++)
+ {
+ ScanForModuleDependencies(pModule,sp);
+ IfFailThrow(sp.SkipExactlyOne());
+ }
+
+ return TRUE;
+}
+
+void CEEInfo::ScanInstantiation(Module * pModule, Instantiation inst)
+{
+ STANDARD_VM_CONTRACT;
+
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ TypeHandle th = inst[i];
+ if (th.IsTypeDesc())
+ continue;
+
+ MethodTable * pMT = th.AsMethodTable();
+
+ Module * pDefModule = pMT->GetModule();
+
+ if (!pDefModule->IsSystem() && (pModule != pDefModule))
+ {
+ m_pOverride->addActiveDependency((CORINFO_MODULE_HANDLE)pModule, (CORINFO_MODULE_HANDLE)pDefModule);
+ }
+
+ if (pMT->HasInstantiation())
+ {
+ ScanInstantiation(pModule, pMT->GetInstantiation());
+ }
+ }
+}
+
+//
+// ScanToken is used to track triggers for creation of per-AppDomain state instead, including allocations required for statics and
+// triggering of module cctors.
+//
+// The basic rule is: There should be no possibility of a shared module that is "active" to have a direct call into a module that
+// is not "active". And we don't want to intercept every call during runtime, so during compile time we track static calls and
+// everything that can result in new virtual calls.
+//
+// The current algoritm (scan the parent type chain and instantiation variables) is more than enough to maintain this invariant.
+// One could come up with a more efficient algorithm that still maintains the invariant, but it may introduce backward compatibility
+// issues.
+//
+// For efficiency, the implementation leverages the loaded types as much as possible. Unfortunately, we still have to go back to
+// metadata when the generic variables could have been substituted via generic context.
+//
+void CEEInfo::ScanToken(Module * pModule, CORINFO_RESOLVED_TOKEN * pResolvedToken, TypeHandle th, MethodDesc * pMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (pModule->IsSystem())
+ return;
+
+ if (isVerifyOnly())
+ return;
+
+ //
+ // Scan method instantiation
+ //
+ if (pMD != NULL && pResolvedToken->pMethodSpec != NULL)
+ {
+ if (ContextIsInstantiated(pResolvedToken->tokenContext))
+ {
+ ScanMethodSpec(pModule, pResolvedToken->pMethodSpec, pResolvedToken->cbMethodSpec);
+ }
+ else
+ {
+ ScanInstantiation(pModule, pMD->GetMethodInstantiation());
+ }
+ }
+
+ if (th.IsTypeDesc())
+ return;
+
+ MethodTable * pMT = th.AsMethodTable();
+
+ //
+ // Scan type instantiation
+ //
+ if (pResolvedToken->pTypeSpec != NULL)
+ {
+ if (ContextIsInstantiated(pResolvedToken->tokenContext))
+ {
+ if (!ScanTypeSpec(pModule, pResolvedToken->pTypeSpec, pResolvedToken->cbTypeSpec))
+ return;
+ }
+ else
+ {
+ ScanInstantiation(pModule, pMT->GetInstantiation());
+ }
+ }
+
+ //
+ // Scan chain of parent types
+ //
+ for (;;)
+ {
+ Module * pDefModule = pMT->GetModule();
+ if (pDefModule->IsSystem())
+ break;
+
+ if (pModule != pDefModule)
+ {
+ m_pOverride->addActiveDependency((CORINFO_MODULE_HANDLE)pModule, (CORINFO_MODULE_HANDLE)pDefModule);
+ }
+
+ MethodTable * pParentMT = pMT->GetParentMethodTable();
+ if (pParentMT == NULL)
+ break;
+
+ if (pParentMT->HasInstantiation())
+ {
+ IMDInternalImport* pInternalImport = pDefModule->GetMDImport();
+
+ mdToken tkParent;
+ IfFailThrow(pInternalImport->GetTypeDefProps(pMT->GetCl(), NULL, &tkParent));
+
+ if (TypeFromToken(tkParent) == mdtTypeSpec)
+ {
+ PCCOR_SIGNATURE pTypeSpec;
+ ULONG cbTypeSpec;
+ IfFailThrow(pInternalImport->GetTypeSpecFromToken(tkParent, &pTypeSpec, &cbTypeSpec));
+
+ ScanTypeSpec(pDefModule, pTypeSpec, cbTypeSpec);
+ }
+ }
+
+ pMT = pParentMT;
+ }
+}
+
+void CEEInfo::ScanTokenForDynamicScope(CORINFO_RESOLVED_TOKEN * pResolvedToken, TypeHandle th, MethodDesc * pMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (m_pMethodBeingCompiled->IsLCGMethod())
+ {
+ // The dependency tracking for LCG is irrelevant. Perform immediate activation.
+ if (pMD != NULL && pMD->HasMethodInstantiation())
+ pMD->EnsureActive();
+ if (!th.IsTypeDesc())
+ th.AsMethodTable()->EnsureInstanceActive();
+ return;
+ }
+
+ // Stubs-as-IL have to do regular dependency tracking because they can be shared cross-domain.
+ Module * pModule = GetDynamicResolver(pResolvedToken->tokenScope)->GetDynamicMethod()->GetModule();
+ ScanToken(pModule, pResolvedToken, th, pMD);
+}
+
+MethodDesc * CEEInfo::GetMethodForSecurity(CORINFO_METHOD_HANDLE callerHandle)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Cache the cast lookup
+ if (callerHandle == m_hMethodForSecurity_Key)
+ {
+ return m_pMethodForSecurity_Value;
+ }
+
+ //If the caller is generic, load the open type and then load the field again, This allows us to
+ //differentiate between BadGeneric<T> containing a memberRef for a field of type InaccessibleClass and
+ //GoodGeneric<T> containing a memberRef for a field of type T instantiated over InaccessibleClass.
+ MethodDesc * pMethodForSecurity = ((MethodDesc *)callerHandle)->LoadTypicalMethodDefinition();
+
+ m_hMethodForSecurity_Key = callerHandle;
+ m_pMethodForSecurity_Value = pMethodForSecurity;
+
+ return pMethodForSecurity;
+}
+
+// Check that the instantation is <!/!!0, ..., !/!!(n-1)>
+static BOOL IsSignatureForTypicalInstantiation(SigPointer sigptr, CorElementType varType, ULONG ntypars)
+{
+ STANDARD_VM_CONTRACT;
+
+ for (ULONG i = 0; i < ntypars; i++)
+ {
+ CorElementType type;
+ IfFailThrow(sigptr.GetElemType(&type));
+ if (type != varType)
+ return FALSE;
+
+ ULONG data;
+ IfFailThrow(sigptr.GetData(&data));
+
+ if (data != i)
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+// Check that methodSpec instantiation is <!!0, ..., !!(n-1)>
+static BOOL IsMethodSpecForTypicalInstantation(SigPointer sigptr)
+{
+ STANDARD_VM_CONTRACT;
+
+ BYTE etype;
+ IfFailThrow(sigptr.GetByte(&etype));
+ _ASSERTE(etype == (BYTE)IMAGE_CEE_CS_CALLCONV_GENERICINST);
+
+ ULONG ntypars;
+ IfFailThrow(sigptr.GetData(&ntypars));
+
+ return IsSignatureForTypicalInstantiation(sigptr, ELEMENT_TYPE_MVAR, ntypars);
+}
+
+// Check that typeSpec instantiation is <!0, ..., !(n-1)>
+static BOOL IsTypeSpecForTypicalInstantiation(SigPointer sigptr)
+{
+ STANDARD_VM_CONTRACT;
+
+ CorElementType type;
+ IfFailThrow(sigptr.GetElemType(&type));
+ if (type != ELEMENT_TYPE_GENERICINST)
+ return FALSE;
+
+ IfFailThrow(sigptr.SkipExactlyOne());
+
+ ULONG ntypars;
+ IfFailThrow(sigptr.GetData(&ntypars));
+
+ return IsSignatureForTypicalInstantiation(sigptr, ELEMENT_TYPE_VAR, ntypars);
+}
+
+void CEEInfo::ComputeRuntimeLookupForSharedGenericToken(DictionaryEntryKind entryKind,
+ CORINFO_RESOLVED_TOKEN * pResolvedToken,
+ CORINFO_RESOLVED_TOKEN * pConstrainedResolvedToken /* for ConstrainedMethodEntrySlot */,
+ MethodDesc * pTemplateMD /* for method-based slots */,
+ CORINFO_LOOKUP *pResultLookup)
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pResultLookup));
+ } CONTRACTL_END;
+
+ // We should never get here when we are only verifying
+ _ASSERTE(!isVerifyOnly());
+
+ pResultLookup->lookupKind.needsRuntimeLookup = true;
+
+ CORINFO_RUNTIME_LOOKUP *pResult = &pResultLookup->runtimeLookup;
+ pResult->signature = NULL;
+
+ // Unless we decide otherwise, just do the lookup via a helper function
+ pResult->indirections = CORINFO_USEHELPER;
+
+ MethodDesc *pContextMD = GetMethodFromContext(pResolvedToken->tokenContext);
+ MethodTable *pContextMT = pContextMD->GetMethodTable();
+
+ // Do not bother computing the runtime lookup if we are inlining. The JIT is going
+ // to abort the inlining attempt anyway.
+ if (pContextMD != m_pMethodBeingCompiled)
+ {
+ return;
+ }
+
+ // There is a pathological case where invalid IL refereces __Canon type directly, but there is no dictionary availabled to store the lookup.
+ // All callers of ComputeRuntimeLookupForSharedGenericToken have to filter out this case. We can't do much about it here.
+ _ASSERTE(pContextMD->IsSharedByGenericInstantiations());
+
+ BOOL fInstrument = FALSE;
+
+#ifdef FEATURE_PREJIT
+ // This will make sure that when IBC logging is turned on we will go through a version
+ // of JIT_GenericHandle which logs the access. Note that we still want the dictionaries
+ // to be populated to prepopulate the types at NGen time.
+ if (IsCompilingForNGen() &&
+ GetAppDomain()->ToCompilationDomain()->m_fForceInstrument)
+ {
+ fInstrument = TRUE;
+ }
+#endif // FEATURE_PREJIT
+
+ // If we've got a method type parameter of any kind then we must look in the method desc arg
+ if (pContextMD->RequiresInstMethodDescArg())
+ {
+ pResultLookup->lookupKind.runtimeLookupKind = CORINFO_LOOKUP_METHODPARAM;
+ pResult->helper = fInstrument ? CORINFO_HELP_RUNTIMEHANDLE_METHOD_LOG : CORINFO_HELP_RUNTIMEHANDLE_METHOD;
+
+ if (fInstrument)
+ goto NoSpecialCase;
+
+ // Special cases:
+ // (1) Naked method type variable: look up directly in instantiation hanging off runtime md
+ // (2) Reference to method-spec of current method (e.g. a recursive call) i.e. currentmeth<!0,...,!(n-1)>
+ if ((entryKind == TypeHandleSlot) && (pResolvedToken->tokenType != CORINFO_TOKENKIND_Newarr))
+ {
+ SigPointer sigptr(pResolvedToken->pTypeSpec, pResolvedToken->cbTypeSpec);
+ CorElementType type;
+ IfFailThrow(sigptr.GetElemType(&type));
+ if (type == ELEMENT_TYPE_MVAR)
+ {
+ pResult->indirections = 2;
+ pResult->testForNull = 0;
+#ifdef FEATURE_PREJIT
+ pResult->testForFixup = 1;
+#else
+ pResult->testForFixup = 0;
+#endif
+ pResult->offsets[0] = offsetof(InstantiatedMethodDesc, m_pPerInstInfo);
+
+ ULONG data;
+ IfFailThrow(sigptr.GetData(&data));
+ pResult->offsets[1] = sizeof(TypeHandle) * data;
+
+ return;
+ }
+ }
+ else if (entryKind == MethodDescSlot)
+ {
+ // It's the context itself (i.e. a recursive call)
+ if (!pTemplateMD->HasSameMethodDefAs(pContextMD))
+ goto NoSpecialCase;
+
+ // Now just check that the instantiation is (!!0, ..., !!(n-1))
+ if (!IsMethodSpecForTypicalInstantation(SigPointer(pResolvedToken->pMethodSpec, pResolvedToken->cbMethodSpec)))
+ goto NoSpecialCase;
+
+ // Type instantiation has to match too if there is one
+ if (pContextMT->HasInstantiation())
+ {
+ TypeHandle thTemplate(pResolvedToken->hClass);
+
+ if (thTemplate.IsTypeDesc() || !thTemplate.AsMethodTable()->HasSameTypeDefAs(pContextMT))
+ goto NoSpecialCase;
+
+ // This check filters out method instantiation on generic type definition, like G::M<!!0>()
+ // We may not ever get it here. Filter it out just to be sure...
+ if (pResolvedToken->pTypeSpec == NULL)
+ goto NoSpecialCase;
+
+ if (!IsTypeSpecForTypicalInstantiation(SigPointer(pResolvedToken->pTypeSpec, pResolvedToken->cbTypeSpec)))
+ goto NoSpecialCase;
+ }
+
+ // Just use the method descriptor that was passed in!
+ pResult->indirections = 0;
+ pResult->testForNull = 0;
+ pResult->testForFixup = 0;
+
+ return;
+ }
+ }
+ // Otherwise we must just have class type variables
+ else
+ {
+ _ASSERTE(pContextMT->GetNumGenericArgs() > 0);
+
+ if (pContextMD->RequiresInstMethodTableArg())
+ {
+ // If we've got a vtable extra argument, go through that
+ pResultLookup->lookupKind.runtimeLookupKind = CORINFO_LOOKUP_CLASSPARAM;
+ pResult->helper = fInstrument ? CORINFO_HELP_RUNTIMEHANDLE_CLASS_LOG : CORINFO_HELP_RUNTIMEHANDLE_CLASS;
+ }
+ // If we've got an object, go through its vtable
+ else
+ {
+ _ASSERTE(pContextMD->AcquiresInstMethodTableFromThis());
+ pResultLookup->lookupKind.runtimeLookupKind = CORINFO_LOOKUP_THISOBJ;
+ pResult->helper = fInstrument ? CORINFO_HELP_RUNTIMEHANDLE_CLASS_LOG : CORINFO_HELP_RUNTIMEHANDLE_CLASS;
+ }
+
+ if (fInstrument)
+ goto NoSpecialCase;
+
+ // Special cases:
+ // (1) Naked class type variable: look up directly in instantiation hanging off vtable
+ // (2) C<!0,...,!(n-1)> where C is the context's class and C is sealed: just return vtable ptr
+ if ((entryKind == TypeHandleSlot) && (pResolvedToken->tokenType != CORINFO_TOKENKIND_Newarr))
+ {
+ SigPointer sigptr(pResolvedToken->pTypeSpec, pResolvedToken->cbTypeSpec);
+ CorElementType type;
+ IfFailThrow(sigptr.GetElemType(&type));
+ if (type == ELEMENT_TYPE_VAR)
+ {
+ pResult->indirections = 3;
+ pResult->testForNull = 0;
+#ifdef FEATURE_PREJIT
+ pResult->testForFixup = 1;
+#else
+ pResult->testForFixup = 0;
+#endif
+ pResult->offsets[0] = MethodTable::GetOffsetOfPerInstInfo();
+ pResult->offsets[1] = sizeof(TypeHandle*) * (pContextMT->GetNumDicts()-1);
+ ULONG data;
+ IfFailThrow(sigptr.GetData(&data));
+ pResult->offsets[2] = sizeof(TypeHandle) * data;
+
+ return;
+ }
+ else if (type == ELEMENT_TYPE_GENERICINST &&
+ (pContextMT->IsSealed() || pResultLookup->lookupKind.runtimeLookupKind == CORINFO_LOOKUP_CLASSPARAM))
+ {
+ TypeHandle thTemplate(pResolvedToken->hClass);
+
+ if (thTemplate.IsTypeDesc() || !thTemplate.AsMethodTable()->HasSameTypeDefAs(pContextMT))
+ goto NoSpecialCase;
+
+ if (!IsTypeSpecForTypicalInstantiation(SigPointer(pResolvedToken->pTypeSpec, pResolvedToken->cbTypeSpec)))
+ goto NoSpecialCase;
+
+ // Just use the vtable pointer itself!
+ pResult->indirections = 0;
+ pResult->testForNull = 0;
+ pResult->testForFixup = 0;
+
+ return;
+ }
+ }
+ }
+
+NoSpecialCase:
+
+ SigBuilder sigBuilder;
+
+ sigBuilder.AppendData(entryKind);
+
+ if (pResultLookup->lookupKind.runtimeLookupKind != CORINFO_LOOKUP_METHODPARAM)
+ {
+ _ASSERTE(pContextMT->GetNumDicts() > 0);
+ sigBuilder.AppendData(pContextMT->GetNumDicts() - 1);
+ }
+
+ Module * pModule = (Module *)pResolvedToken->tokenScope;
+
+ switch (entryKind)
+ {
+ case DeclaringTypeHandleSlot:
+ _ASSERTE(pTemplateMD != NULL);
+ sigBuilder.AppendElementType(ELEMENT_TYPE_INTERNAL);
+ sigBuilder.AppendPointer(pTemplateMD->GetMethodTable());
+ // fall through
+
+ case TypeHandleSlot:
+ {
+ if (pResolvedToken->tokenType == CORINFO_TOKENKIND_Newarr)
+ sigBuilder.AppendElementType(ELEMENT_TYPE_SZARRAY);
+
+ // Note that we can come here with pResolvedToken->pTypeSpec == NULL for invalid IL that
+ // directly references __Cannon
+ if (pResolvedToken->pTypeSpec != NULL)
+ {
+ SigPointer sigptr(pResolvedToken->pTypeSpec, pResolvedToken->cbTypeSpec);
+ sigptr.ConvertToInternalExactlyOne(pModule, NULL, &sigBuilder);
+ }
+ else
+ {
+ sigBuilder.AppendElementType(ELEMENT_TYPE_INTERNAL);
+ sigBuilder.AppendPointer(pResolvedToken->hClass);
+ }
+ }
+ break;
+
+ case ConstrainedMethodEntrySlot:
+ // Encode constrained type token
+ if (pConstrainedResolvedToken->pTypeSpec != NULL)
+ {
+ SigPointer sigptr(pConstrainedResolvedToken->pTypeSpec, pConstrainedResolvedToken->cbTypeSpec);
+ sigptr.ConvertToInternalExactlyOne(pModule, NULL, &sigBuilder);
+ }
+ else
+ {
+ sigBuilder.AppendElementType(ELEMENT_TYPE_INTERNAL);
+ sigBuilder.AppendPointer(pConstrainedResolvedToken->hClass);
+ }
+ // fall through
+
+ case MethodDescSlot:
+ case MethodEntrySlot:
+ case DispatchStubAddrSlot:
+ {
+ // Encode containing type
+ if (pResolvedToken->pTypeSpec != NULL)
+ {
+ SigPointer sigptr(pResolvedToken->pTypeSpec, pResolvedToken->cbTypeSpec);
+ sigptr.ConvertToInternalExactlyOne(pModule, NULL, &sigBuilder);
+ }
+ else
+ {
+ sigBuilder.AppendElementType(ELEMENT_TYPE_INTERNAL);
+ sigBuilder.AppendPointer(pResolvedToken->hClass);
+ }
+
+ // Encode method
+ _ASSERTE(pTemplateMD != NULL);
+
+ mdMethodDef methodToken = pTemplateMD->GetMemberDef_NoLogging();
+ DWORD methodFlags = 0;
+
+ // Check for non-NULL method spec first. We can encode the method instantiation only if we have one in method spec to start with. Note that there are weird cases
+ // like instantiating stub for generic method definition that do not have method spec but that won't be caught by the later conditions either.
+ BOOL fMethodNeedsInstantiation = (pResolvedToken->pMethodSpec != NULL) && pTemplateMD->HasMethodInstantiation() && !pTemplateMD->IsGenericMethodDefinition();
+
+ if (pTemplateMD->IsUnboxingStub())
+ methodFlags |= ENCODE_METHOD_SIG_UnboxingStub;
+ // Always create instantiating stub for method entry points even if the template does not ask for it. It saves caller
+ // from creating throw-away instantiating stub.
+ if (pTemplateMD->IsInstantiatingStub() || (entryKind == MethodEntrySlot))
+ methodFlags |= ENCODE_METHOD_SIG_InstantiatingStub;
+ if (fMethodNeedsInstantiation)
+ methodFlags |= ENCODE_METHOD_SIG_MethodInstantiation;
+ if (IsNilToken(methodToken))
+ {
+ methodFlags |= ENCODE_METHOD_SIG_SlotInsteadOfToken;
+ }
+ else
+ if (entryKind == DispatchStubAddrSlot)
+ {
+ // Encode the method for dispatch stub using slot to avoid touching the interface method MethodDesc at runtime
+
+ // There should be no other flags set if we are encoding the method using slot for virtual stub dispatch
+ _ASSERTE(methodFlags == 0);
+
+ methodFlags |= ENCODE_METHOD_SIG_SlotInsteadOfToken;
+ }
+ else
+ if (!pTemplateMD->GetModule()->IsInCurrentVersionBubble())
+ {
+ // Using a method defined in another version bubble. We can assume the slot number is stable only for real interface methods.
+ if (!pTemplateMD->GetMethodTable()->IsInterface() || pTemplateMD->IsStatic() || pTemplateMD->HasMethodInstantiation())
+ {
+ _ASSERTE(!"References to non-interface methods not yet supported in version resilient images");
+ IfFailThrow(E_FAIL);
+ }
+ methodFlags |= ENCODE_METHOD_SIG_SlotInsteadOfToken;
+ }
+
+ sigBuilder.AppendData(methodFlags);
+
+ if ((methodFlags & ENCODE_METHOD_SIG_SlotInsteadOfToken) == 0)
+ {
+ // Encode method token and its module context (as method's type)
+ sigBuilder.AppendElementType(ELEMENT_TYPE_INTERNAL);
+ sigBuilder.AppendPointer(pTemplateMD->GetMethodTable());
+
+ sigBuilder.AppendData(RidFromToken(methodToken));
+ }
+ else
+ {
+ sigBuilder.AppendData(pTemplateMD->GetSlot());
+ }
+
+ if (fMethodNeedsInstantiation)
+ {
+ SigPointer sigptr(pResolvedToken->pMethodSpec, pResolvedToken->cbMethodSpec);
+
+ BYTE etype;
+ IfFailThrow(sigptr.GetByte(&etype));
+
+ // Load the generic method instantiation
+ THROW_BAD_FORMAT_MAYBE(etype == (BYTE)IMAGE_CEE_CS_CALLCONV_GENERICINST, 0, pModule);
+
+ DWORD nGenericMethodArgs;
+ IfFailThrow(sigptr.GetData(&nGenericMethodArgs));
+ sigBuilder.AppendData(nGenericMethodArgs);
+
+ _ASSERTE(nGenericMethodArgs == pTemplateMD->GetNumGenericMethodArgs());
+
+ for (DWORD i = 0; i < nGenericMethodArgs; i++)
+ {
+ sigptr.ConvertToInternalExactlyOne(pModule, NULL, &sigBuilder);
+ }
+ }
+ }
+ break;
+
+ case FieldDescSlot:
+ {
+ if (pResolvedToken->pTypeSpec != NULL)
+ {
+ // Encode containing type
+ SigPointer sigptr(pResolvedToken->pTypeSpec, pResolvedToken->cbTypeSpec);
+ sigptr.ConvertToInternalExactlyOne(pModule, NULL, &sigBuilder);
+ }
+ else
+ {
+ sigBuilder.AppendElementType(ELEMENT_TYPE_INTERNAL);
+ sigBuilder.AppendPointer(pResolvedToken->hClass);
+ }
+
+ FieldDesc * pField = (FieldDesc *)pResolvedToken->hField;
+ _ASSERTE(pField != NULL);
+
+ DWORD fieldIndex = pField->GetApproxEnclosingMethodTable()->GetIndexForFieldDesc(pField);
+ sigBuilder.AppendData(fieldIndex);
+ }
+ break;
+
+ default:
+ _ASSERTE(false);
+ }
+
+ // It's a method dictionary lookup
+ if (pResultLookup->lookupKind.runtimeLookupKind == CORINFO_LOOKUP_METHODPARAM)
+ {
+ _ASSERTE(pContextMD != NULL);
+ _ASSERTE(pContextMD->HasMethodInstantiation());
+
+ if (DictionaryLayout::FindToken(pContextMD->GetLoaderAllocator(), pContextMD->GetNumGenericMethodArgs(), pContextMD->GetDictionaryLayout(), pResult, &sigBuilder, 1))
+ {
+ pResult->testForNull = 1;
+ pResult->testForFixup = 0;
+
+ // Indirect through dictionary table pointer in InstantiatedMethodDesc
+ pResult->offsets[0] = offsetof(InstantiatedMethodDesc, m_pPerInstInfo);
+ }
+ }
+
+ // It's a class dictionary lookup (CORINFO_LOOKUP_CLASSPARAM or CORINFO_LOOKUP_THISOBJ)
+ else
+ {
+ if (DictionaryLayout::FindToken(pContextMT->GetLoaderAllocator(), pContextMT->GetNumGenericArgs(), pContextMT->GetClass()->GetDictionaryLayout(), pResult, &sigBuilder, 2))
+ {
+ pResult->testForNull = 1;
+ pResult->testForFixup = 0;
+
+ // Indirect through dictionary table pointer in vtable
+ pResult->offsets[0] = MethodTable::GetOffsetOfPerInstInfo();
+
+ // Next indirect through the dictionary appropriate to this instantiated type
+ pResult->offsets[1] = sizeof(TypeHandle*) * (pContextMT->GetNumDicts()-1);
+ }
+ }
+}
+
+
+
+/*********************************************************************/
+const char* CEEInfo::getClassName (CORINFO_CLASS_HANDLE clsHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ const char* result = NULL;
+
+ JIT_TO_EE_TRANSITION();
+
+ TypeHandle VMClsHnd(clsHnd);
+ MethodTable* pMT = VMClsHnd.GetMethodTable();
+ if (pMT == NULL)
+ {
+ result = "";
+ }
+ else
+ {
+#ifdef _DEBUG
+ result = pMT->GetDebugClassName();
+#else // !_DEBUG
+ // since this is for diagnostic purposes only,
+ // give up on the namespace, as we don't have a buffer to concat it
+ // also note this won't show array class names.
+ LPCUTF8 nameSpace;
+ result = pMT->GetFullyQualifiedNameInfo(&nameSpace);
+#endif
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/***********************************************************************/
+const char* CEEInfo::getHelperName (CorInfoHelpFunc ftnNum)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ PRECONDITION(ftnNum >= 0 && ftnNum < CORINFO_HELP_COUNT);
+ } CONTRACTL_END;
+
+ const char* result = NULL;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+#ifdef CROSSGEN_COMPILE
+ result = hlpNameTable[ftnNum];
+#else
+#ifdef _DEBUG
+ result = hlpFuncTable[ftnNum].name;
+#else
+ result = "AnyJITHelper";
+#endif
+#endif
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return result;
+}
+
+
+/*********************************************************************/
+int CEEInfo::appendClassName(__deref_inout_ecount(*pnBufLen) WCHAR** ppBuf,
+ int* pnBufLen,
+ CORINFO_CLASS_HANDLE clsHnd,
+ BOOL fNamespace,
+ BOOL fFullInst,
+ BOOL fAssembly)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ MODE_PREEMPTIVE;
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ int nLen = 0;
+
+ JIT_TO_EE_TRANSITION();
+
+ TypeHandle th(clsHnd);
+ StackSString ss;
+ TypeString::AppendType(ss,th,
+ (fNamespace ? TypeString::FormatNamespace : 0) |
+ (fFullInst ? TypeString::FormatFullInst : 0) |
+ (fAssembly ? TypeString::FormatAssembly : 0));
+ const WCHAR* szString = ss.GetUnicode();
+ nLen = (int)wcslen(szString);
+ if (*pnBufLen > 0)
+ {
+ wcscpy_s(*ppBuf, *pnBufLen, szString );
+ (*ppBuf)[(*pnBufLen) - 1] = W('\0');
+ (*ppBuf) += nLen;
+ (*pnBufLen) -= nLen;
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return nLen;
+}
+
+/*********************************************************************/
+CORINFO_MODULE_HANDLE CEEInfo::getClassModule(CORINFO_CLASS_HANDLE clsHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CORINFO_MODULE_HANDLE result = NULL;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ TypeHandle VMClsHnd(clsHnd);
+
+ result = CORINFO_MODULE_HANDLE(VMClsHnd.GetModule());
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return result;
+}
+
+/*********************************************************************/
+CORINFO_ASSEMBLY_HANDLE CEEInfo::getModuleAssembly(CORINFO_MODULE_HANDLE modHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CORINFO_ASSEMBLY_HANDLE result = NULL;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ result = CORINFO_ASSEMBLY_HANDLE(GetModule(modHnd)->GetAssembly());
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return result;
+}
+
+/*********************************************************************/
+const char* CEEInfo::getAssemblyName(CORINFO_ASSEMBLY_HANDLE asmHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ const char* result = NULL;
+
+ JIT_TO_EE_TRANSITION();
+ result = ((Assembly*)asmHnd)->GetSimpleName();
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/*********************************************************************/
+void* CEEInfo::LongLifetimeMalloc(size_t sz)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ void* result = NULL;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+ result = new (nothrow) char[sz];
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return result;
+}
+
+/*********************************************************************/
+void CEEInfo::LongLifetimeFree(void* obj)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+ (operator delete)(obj);
+ EE_TO_JIT_TRANSITION_LEAF();
+}
+
+/*********************************************************************/
+size_t CEEInfo::getClassModuleIdForStatics(CORINFO_CLASS_HANDLE clsHnd, CORINFO_MODULE_HANDLE *pModuleHandle, void **ppIndirection)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+#ifdef MDIL
+ _ASSERTE(!SystemDomain::GetCurrentDomain()->IsMDILCompilationDomain() || isVerifyOnly());
+#endif
+
+ size_t result = 0;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ TypeHandle VMClsHnd(clsHnd);
+ Module *pModule = VMClsHnd.AsMethodTable()->GetModuleForStatics();
+
+ if (ppIndirection != NULL)
+ *ppIndirection = NULL;
+
+ // The zapper needs the module handle. The jit should not use it at all.
+ if (pModuleHandle)
+ *pModuleHandle = CORINFO_MODULE_HANDLE(pModule);
+
+ result = pModule->GetModuleID();
+
+ _ASSERTE(result);
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return result;
+}
+
+/*********************************************************************/
+BOOL CEEInfo::isValueClass(CORINFO_CLASS_HANDLE clsHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ BOOL ret = FALSE;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ _ASSERTE(clsHnd);
+
+ // Note that clsHnd.IsValueType() would not return what the JIT expects
+ // for corner cases like ELEMENT_TYPE_FNPTR
+ TypeHandle VMClsHnd(clsHnd);
+ MethodTable * pMT = VMClsHnd.GetMethodTable();
+ ret = (pMT != NULL) ? pMT->IsValueType() : 0;
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return ret;
+}
+
+/*********************************************************************/
+// If this method returns true, JIT will do optimization to inline the check for
+// GetClassFromHandle(handle) == obj.GetType()
+//
+// This will enable to use directly the typehandle instead of going through getClassByHandle
+BOOL CEEInfo::canInlineTypeCheckWithObjectVTable (CORINFO_CLASS_HANDLE clsHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ BOOL ret = FALSE;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ _ASSERTE(clsHnd);
+
+ TypeHandle VMClsHnd(clsHnd);
+
+ if (VMClsHnd.IsTypeDesc())
+ {
+ // We can't do this optimization for arrays because of the object methodtable is template methodtable
+ ret = FALSE;
+ }
+ else
+ if (VMClsHnd.AsMethodTable()->IsMarshaledByRef())
+ {
+ // We can't do this optimization for marshalbyrefs because of the object methodtable can be transparent proxy
+ ret = FALSE;
+ }
+ else
+ if (VMClsHnd.AsMethodTable()->IsInterface())
+ {
+ // Object.GetType() should not ever return interface. However, WCF custom remoting proxy does it. Disable this
+ // optimization for interfaces so that (autogenerated) code that compares Object.GetType() with interface type works
+ // as expected for WCF custom remoting proxy. Note that this optimization is still not going to work well for custom
+ // remoting proxies that are even more broken than the WCF one, e.g. returning random non-marshalbyref types
+ // from Object.GetType().
+ ret = FALSE;
+ }
+ else
+ if (VMClsHnd == TypeHandle(g_pCanonMethodTableClass))
+ {
+ // We can't do this optimization in shared generics code because of we do not know what the actual type is going to be.
+ // (It can be array, marshalbyref, etc.)
+ ret = FALSE;
+ }
+ else
+ {
+ // It is safe to perform this optimization
+ ret = TRUE;
+ }
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return(ret);
+}
+
+/*********************************************************************/
+DWORD CEEInfo::getClassAttribs (CORINFO_CLASS_HANDLE clsHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ // <REVISIT_TODO>@todo FIX need to really fetch the class atributes. at present
+ // we don't need to because the JIT only cares in the case of COM classes</REVISIT_TODO>
+ DWORD ret = 0;
+
+ JIT_TO_EE_TRANSITION();
+
+ ret = getClassAttribsInternal(clsHnd);
+
+ EE_TO_JIT_TRANSITION();
+
+ return ret;
+}
+
+
+/*********************************************************************/
+BOOL CEEInfo::isStructRequiringStackAllocRetBuf(CORINFO_CLASS_HANDLE clsHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ BOOL ret = 0;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ TypeHandle VMClsHnd(clsHnd);
+ MethodTable * pMT = VMClsHnd.GetMethodTable();
+ ret = (pMT != NULL && pMT->IsStructRequiringStackAllocRetBuf());
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return ret;
+}
+
+/*********************************************************************/
+DWORD CEEInfo::getClassAttribsInternal (CORINFO_CLASS_HANDLE clsHnd)
+{
+ STANDARD_VM_CONTRACT;
+
+ DWORD ret = 0;
+
+ _ASSERTE(clsHnd);
+
+ TypeHandle VMClsHnd(clsHnd);
+
+ // Byrefs should only occur in method and local signatures, which are accessed
+ // using ICorClassInfo and ICorClassInfo.getChildType.
+ // So getClassAttribs() should not be called for byrefs
+
+ if (VMClsHnd.IsByRef())
+ {
+ _ASSERTE(!"Did findClass() return a Byref?");
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ else if (VMClsHnd.IsGenericVariable())
+ {
+ //@GENERICSVER: for now, type variables simply report "variable".
+ ret |= CORINFO_FLG_GENERIC_TYPE_VARIABLE;
+ }
+ else
+ {
+ MethodTable *pMT = VMClsHnd.GetMethodTable();
+
+ if (!pMT)
+ {
+ _ASSERTE(!"Did findClass() return a Byref?");
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+
+ EEClass * pClass = pMT->GetClass();
+
+ // The array flag is used to identify the faked-up methods on
+ // array types, i.e. .ctor, Get, Set and Address
+ if (pMT->IsArray())
+ ret |= CORINFO_FLG_ARRAY;
+
+ if (pMT->IsInterface())
+ ret |= CORINFO_FLG_INTERFACE;
+
+ if (pMT->HasComponentSize())
+ ret |= CORINFO_FLG_VAROBJSIZE;
+
+ if (pMT->IsValueType())
+ {
+ ret |= CORINFO_FLG_VALUECLASS;
+
+ if (pMT->ContainsStackPtr())
+ ret |= CORINFO_FLG_CONTAINS_STACK_PTR;
+
+ if (pClass->IsNotTightlyPacked() && (!pClass->IsManagedSequential() || pClass->HasExplicitSize()) ||
+ pMT == g_TypedReferenceMT ||
+ VMClsHnd.IsNativeValueType())
+ {
+ ret |= CORINFO_FLG_CUSTOMLAYOUT;
+ }
+
+ if (pClass->IsUnsafeValueClass())
+ ret |= CORINFO_FLG_UNSAFE_VALUECLASS;
+ }
+ if (pClass->HasExplicitFieldOffsetLayout() && pClass->HasOverLayedField())
+ ret |= CORINFO_FLG_OVERLAPPING_FIELDS;
+ if (VMClsHnd.IsCanonicalSubtype())
+ ret |= CORINFO_FLG_SHAREDINST;
+
+ if (pMT->HasVariance())
+ ret |= CORINFO_FLG_VARIANCE;
+
+ if (pMT->IsContextful())
+ ret |= CORINFO_FLG_CONTEXTFUL;
+
+ if (pMT->IsMarshaledByRef())
+ ret |= CORINFO_FLG_MARSHAL_BYREF;
+
+ if (pMT->ContainsPointers())
+ ret |= CORINFO_FLG_CONTAINS_GC_PTR;
+
+ if (pMT->IsDelegate())
+ ret |= CORINFO_FLG_DELEGATE;
+
+ if (pClass->IsBeforeFieldInit())
+ {
+ if (IsReadyToRunCompilation() && !pMT->GetModule()->IsInCurrentVersionBubble())
+ {
+ // For version resiliency do not allow hoisting static constructors out of loops
+ }
+ else
+ {
+ ret |= CORINFO_FLG_BEFOREFIELDINIT;
+ }
+ }
+
+ if (pClass->IsAbstract())
+ ret |= CORINFO_FLG_ABSTRACT;
+
+ if (pClass->IsSealed())
+ ret |= CORINFO_FLG_FINAL;
+ }
+
+ return ret;
+}
+
+/*********************************************************************/
+//
+// See code:CorInfoFlag#ClassConstructionFlags for details.
+//
+CorInfoInitClassResult CEEInfo::initClass(
+ CORINFO_FIELD_HANDLE field,
+ CORINFO_METHOD_HANDLE method,
+ CORINFO_CONTEXT_HANDLE context,
+ BOOL speculative)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ DWORD result = CORINFO_INITCLASS_NOT_REQUIRED;
+
+ JIT_TO_EE_TRANSITION();
+ {
+
+ // Do not bother figuring out the initialization if we are only verifying the method.
+ if (isVerifyOnly())
+ {
+ result = CORINFO_INITCLASS_NOT_REQUIRED;
+ goto exit;
+ }
+
+ FieldDesc * pFD = (FieldDesc *)field;
+ _ASSERTE(pFD == NULL || pFD->IsStatic());
+
+ MethodDesc * pMD = (MethodDesc *)method;
+
+ TypeHandle typeToInitTH = (pFD != NULL) ? pFD->GetEnclosingMethodTable() : GetTypeFromContext(context);
+
+#ifdef MDIL
+ Assembly *pInitializationTargetAssembly = typeToInitTH.GetModule()->GetAssembly();
+ bool initAssemblyIsCurrentlyCompilingAssembly = false;
+
+ // ASSERT that initClass is only being called for types in
+ // 1. Application assemblies
+ // 2. Type's in framework assemblies which have non-versionable methods.
+ // This is done by observing that the MDIL Compiler JIT only calls initClass with pMD specified
+ // and never with pFD specified, and that either pMD is an application assembly method, or method that
+ // is itself marked as non-versionable. To allow this assert to work when tests are compiling
+ // framework assemblies to MDIL, the assembly currently being compiled is also permitted to have
+ // types on which initClass is called.
+ if (SystemDomain::GetCurrentDomain()->IsMDILCompilationDomain())
+ {
+ _ASSERTE(pFD == NULL);
+
+ CompilationDomain *compDomain = GetAppDomain()->ToCompilationDomain();
+ initAssemblyIsCurrentlyCompilingAssembly = IsInSameVersionBubble(compDomain->GetTargetAssembly(),pInitializationTargetAssembly);
+ _ASSERTE(initAssemblyIsCurrentlyCompilingAssembly);
+ }
+#endif // MDIL
+
+ MethodDesc *methodBeingCompiled = m_pMethodBeingCompiled;
+
+ BOOL fMethodDomainNeutral = methodBeingCompiled->IsDomainNeutral() || methodBeingCompiled->IsZapped() || IsCompilingForNGen();
+
+ MethodTable *pTypeToInitMT = typeToInitTH.AsMethodTable();
+
+ // This should be the most common early-out case.
+ if (fMethodDomainNeutral)
+ {
+#ifdef MDIL
+ // Only check to see if a class is PreInited if running initClass on something within the same versioning bubble as
+ // the assembly being compiled when working with MDIL. If not compiling for MDIL, checking for pre-initialization is always safe.
+ if ((!SystemDomain::GetCurrentDomain()->IsMDILCompilationDomain() || initAssemblyIsCurrentlyCompilingAssembly) && pTypeToInitMT->IsClassPreInited())
+#else
+ if (pTypeToInitMT->IsClassPreInited())
+#endif
+ {
+ result = CORINFO_INITCLASS_NOT_REQUIRED;
+ goto exit;
+ }
+ }
+ else
+ {
+#ifdef MDIL
+ _ASSERTE(!SystemDomain::GetCurrentDomain()->IsMDILCompilationDomain());
+#endif
+#ifdef CROSSGEN_COMPILE
+ _ASSERTE(FALSE);
+#else // CROSSGEN_COMPILE
+ if (pTypeToInitMT->IsClassInited())
+ {
+ // If the type is initialized there really is nothing to do.
+ result = CORINFO_INITCLASS_INITIALIZED;
+ goto exit;
+ }
+#endif // CROSSGEN_COMPILE
+ }
+
+ if (pTypeToInitMT->IsGlobalClass())
+ {
+ // For both jitted and ngen code the global class is always considered initialized
+ result = CORINFO_INITCLASS_NOT_REQUIRED;
+ goto exit;
+ }
+
+ bool fIgnoreBeforeFieldInit = false;
+
+ if (pFD == NULL)
+ {
+#ifdef FEATURE_LEGACYNETCF
+ // For methods, NetCF always triggers static constructor as side-effect of JITing, essentially ignoring before field init.
+ if (pTypeToInitMT->GetDomain()->GetAppDomainCompatMode() == BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8)
+ {
+ // This quirk assumes that RunCCTorAsIfNGenImageExists() is TRUE. It would need to be replicated in more places
+ // if it was not the case.
+ _ASSERTE(pTypeToInitMT->RunCCTorAsIfNGenImageExists());
+
+ fIgnoreBeforeFieldInit = true;
+ }
+#endif
+
+ if (!fIgnoreBeforeFieldInit && pTypeToInitMT->GetClass()->IsBeforeFieldInit())
+ {
+ // We can wait for field accesses to run .cctor
+ result = CORINFO_INITCLASS_NOT_REQUIRED;
+ goto exit;
+ }
+
+ // Run .cctor on statics & constructors
+ if (pMD->IsStatic())
+ {
+ // Except don't class construct on .cctor - it would be circular
+ if (pMD->IsClassConstructor())
+ {
+ result = CORINFO_INITCLASS_NOT_REQUIRED;
+ goto exit;
+ }
+ }
+ else
+ // According to the spec, we should be able to do this optimization for both reference and valuetypes.
+ // To maintain backward compatibility, we are doing it for reference types only.
+ if (!pMD->IsCtor() && !pTypeToInitMT->IsValueType())
+ {
+ // For instance methods of types with precise-initialization
+ // semantics, we can assume that the .ctor triggerred the
+ // type initialization.
+ // This does not hold for NULL "this" object. However, the spec does
+ // not require that case to work.
+ result = CORINFO_INITCLASS_NOT_REQUIRED;
+ goto exit;
+ }
+ }
+
+ if (pTypeToInitMT->IsSharedByGenericInstantiations())
+ {
+ // Shared generic code has to use helper. Moreover, tell JIT not to inline since
+ // inlining of generic dictionary lookups is not supported.
+ result = CORINFO_INITCLASS_USE_HELPER | CORINFO_INITCLASS_DONT_INLINE;
+ goto exit;
+ }
+
+ //
+ // Try to prove that the initialization is not necessary because of nesting
+ //
+
+ if (pFD == NULL)
+ {
+ // Handled above
+ _ASSERTE(fIgnoreBeforeFieldInit || !pTypeToInitMT->GetClass()->IsBeforeFieldInit());
+
+ // Note that jit has both methods the same if asking whether to emit cctor
+ // for a given method's code (as opposed to inlining codegen).
+ if (context != MAKE_METHODCONTEXT(methodBeingCompiled) && pTypeToInitMT == methodBeingCompiled->GetMethodTable())
+ {
+ // If we're inling a call to a method in our own type, then we should already
+ // have triggered the .cctor when caller was itself called.
+ result = CORINFO_INITCLASS_NOT_REQUIRED;
+ goto exit;
+ }
+ }
+ else
+ {
+ // This optimization may cause static fields in reference types to be accessed without cctor being triggered
+ // for NULL "this" object. It does not conform with what the spec says. However, we have been historically
+ // doing it for perf reasons.
+ if (!pTypeToInitMT->IsValueType() && !pTypeToInitMT->GetClass()->IsBeforeFieldInit())
+ {
+ if (pTypeToInitMT == GetTypeFromContext(context).AsMethodTable() || pTypeToInitMT == methodBeingCompiled->GetMethodTable())
+ {
+ // The class will be initialized by the time we access the field.
+ result = CORINFO_INITCLASS_NOT_REQUIRED;
+ goto exit;
+ }
+ }
+
+ // If we are currently compiling the class constructor for this static field access then we can skip the initClass
+ if (methodBeingCompiled->GetMethodTable() == pTypeToInitMT && methodBeingCompiled->IsStatic() && methodBeingCompiled->IsClassConstructor())
+ {
+ // The class will be initialized by the time we access the field.
+ result = CORINFO_INITCLASS_NOT_REQUIRED;
+ goto exit;
+ }
+ }
+
+ if (fMethodDomainNeutral)
+ {
+ // Well, because of code sharing we can't do anything at coge generation time.
+ // We have to do it at runtime.
+ result = CORINFO_INITCLASS_USE_HELPER;
+ goto exit;
+ }
+
+#ifndef CROSSGEN_COMPILE
+ //
+ // Optimizations for domain specific code
+ //
+
+ // Allocate space for the local class if necessary, but don't trigger
+ // class construction.
+ DomainLocalModule *pModule = pTypeToInitMT->GetDomainLocalModule();
+ pModule->PopulateClass(pTypeToInitMT);
+
+ if (pTypeToInitMT->IsClassInited())
+ {
+ result = CORINFO_INITCLASS_INITIALIZED;
+ goto exit;
+ }
+
+#ifdef FEATURE_MULTICOREJIT
+ // Once multicore JIT is enabled in an AppDomain by calling SetProfileRoot, always use helper function to call class init, for consistency
+ if (! GetAppDomain()->GetMulticoreJitManager().AllowCCtorsToRunDuringJITing())
+ {
+ result = CORINFO_INITCLASS_USE_HELPER;
+
+ goto exit;
+ }
+#endif
+
+ // To preserve consistent behavior between ngen and not-ngenned states, do not eagerly
+ // run class constructors for autongennable code.
+ if (pTypeToInitMT->RunCCTorAsIfNGenImageExists())
+ {
+ result = CORINFO_INITCLASS_USE_HELPER;
+ goto exit;
+ }
+
+ if (!pTypeToInitMT->GetClass()->IsBeforeFieldInit())
+ {
+ // Do not inline the access if we cannot initialize the class. Chances are that the class will get
+ // initialized by the time the access is jitted.
+ result = CORINFO_INITCLASS_USE_HELPER | CORINFO_INITCLASS_DONT_INLINE;
+ goto exit;
+ }
+
+ if (speculative)
+ {
+ // Tell the JIT that we may be able to initialize the class when asked to.
+ result = CORINFO_INITCLASS_SPECULATIVE;
+ goto exit;
+ }
+
+ //
+ // We cannot run the class constructor without first activating the
+ // module containing the class. However, since the current module
+ // we are compiling inside is not active, we don't want to do this.
+ //
+ // This should be an unusal case since normally the method's module should
+ // be active during jitting.
+ //
+ // @TODO: We should check IsActivating() instead of IsActive() since we may
+ // be running the Module::.cctor(). The assembly is not marked as active
+ // until then.
+ if (!methodBeingCompiled->GetLoaderModule()->GetDomainFile()->IsActive())
+ {
+ result = CORINFO_INITCLASS_USE_HELPER;
+ goto exit;
+ }
+
+ //
+ // Run the .cctor
+ //
+
+ EX_TRY
+ {
+ pTypeToInitMT->CheckRunClassInitThrowing();
+ }
+ EX_CATCH
+ {
+ } EX_END_CATCH(SwallowAllExceptions);
+
+ if (pTypeToInitMT->IsClassInited())
+ {
+ result = CORINFO_INITCLASS_INITIALIZED;
+ goto exit;
+ }
+#endif // CROSSGEN_COMPILE
+
+ // Do not inline the access if we were unable to initialize the class. Chances are that the class will get
+ // initialized by the time the access is jitted.
+ result = (CORINFO_INITCLASS_USE_HELPER | CORINFO_INITCLASS_DONT_INLINE);
+
+ }
+exit: ;
+ EE_TO_JIT_TRANSITION();
+
+ return (CorInfoInitClassResult)result;
+}
+
+
+
+void CEEInfo::classMustBeLoadedBeforeCodeIsRun (CORINFO_CLASS_HANDLE typeToLoadHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ TypeHandle th = TypeHandle(typeToLoadHnd);
+
+ // Type handles returned to JIT at runtime are always fully loaded. Verify that it is the case.
+ _ASSERTE(th.IsFullyLoaded());
+
+ EE_TO_JIT_TRANSITION_LEAF();
+}
+
+/*********************************************************************/
+void CEEInfo::methodMustBeLoadedBeforeCodeIsRun (CORINFO_METHOD_HANDLE methHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ MethodDesc *pMD = (MethodDesc*) methHnd;
+
+ // MethodDescs returned to JIT at runtime are always fully loaded. Verify that it is the case.
+ _ASSERTE(pMD->IsRestored() && pMD->GetMethodTable()->IsFullyLoaded());
+
+ EE_TO_JIT_TRANSITION_LEAF();
+}
+
+/*********************************************************************/
+CORINFO_METHOD_HANDLE CEEInfo::mapMethodDeclToMethodImpl(CORINFO_METHOD_HANDLE methHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CORINFO_METHOD_HANDLE result = NULL;
+
+ JIT_TO_EE_TRANSITION();
+
+ MethodDesc *pMD = GetMethod(methHnd);
+ pMD = MethodTable::MapMethodDeclToMethodImpl(pMD);
+ result = (CORINFO_METHOD_HANDLE) pMD;
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+#ifdef MDIL
+/*********************************************************************/
+unsigned CEEInfo::getNumTypeParameters(CORINFO_METHOD_HANDLE methHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ unsigned result = 0;
+
+ JIT_TO_EE_TRANSITION();
+
+ MethodDesc *pMD = GetMethod(methHnd);
+ result = pMD->GetNumGenericClassArgs() + pMD->GetNumGenericMethodArgs();
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/*********************************************************************/
+CorElementType CEEInfo::getTypeOfTypeParameter(CORINFO_METHOD_HANDLE methHnd, unsigned index)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CorElementType result = ELEMENT_TYPE_END;
+
+ JIT_TO_EE_TRANSITION();
+
+ MethodDesc *pMD = GetMethod(methHnd);
+ MethodTable *pMT = pMD->GetMethodTable();
+ TypeHandle th = TypeHandle();
+ if (index < pMD->GetNumGenericClassArgs())
+ {
+ th = pMT->GetInstantiation()[index];
+ }
+ else
+ {
+ InstantiatedMethodDesc *pIMD = pMD->AsInstantiatedMethodDesc();
+ th = pIMD->GetMethodInstantiation()[index-pMD->GetNumGenericClassArgs()];
+ }
+
+ result = th.GetInternalCorElementType();
+
+ switch (result)
+ {
+ case ELEMENT_TYPE_VALUETYPE:
+ if (Nullable::IsNullableType(th))
+ {
+ result = (CorElementType)0x17;
+ if (th.IsCanonicalSubtype())
+ result = (CorElementType)0x1f;
+ }
+ else if (th.IsCanonicalSubtype())
+ result = (CorElementType)0x1e;
+ break;
+
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_U:
+#if defined(_WIN64)
+ result = (CorElementType)(result - (ELEMENT_TYPE_I - ELEMENT_TYPE_I8));
+#else
+ result = (CorElementType)(result - (ELEMENT_TYPE_I - ELEMENT_TYPE_I4));
+#endif
+ break;
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+/*********************************************************************/
+CORINFO_CLASS_HANDLE CEEInfo::getTypeParameter(CORINFO_METHOD_HANDLE methHnd, bool classTypeParameter, unsigned index)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ TypeHandle th = TypeHandle();
+
+ JIT_TO_EE_TRANSITION();
+
+ MethodDesc *pMD = GetMethod(methHnd);
+ MethodTable *pMT = pMD->GetMethodTable();
+ if (classTypeParameter && index >= pMD->GetNumGenericClassArgs())
+ {
+ classTypeParameter = false;
+ index -= pMD->GetNumGenericClassArgs();
+ }
+ if (classTypeParameter)
+ {
+ _ASSERTE(index < pMD->GetNumGenericClassArgs());
+ th = pMT->GetInstantiation()[index];
+ }
+ else
+ {
+ _ASSERTE(index < pMD->GetNumGenericMethodArgs());
+ InstantiatedMethodDesc *pIMD = pMD->AsInstantiatedMethodDesc();
+ th = pIMD->GetMethodInstantiation()[index];
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return CORINFO_CLASS_HANDLE(th.AsPtr());
+}
+#endif
+
+/*********************************************************************/
+CORINFO_CLASS_HANDLE CEEInfo::getBuiltinClass(CorInfoClassId classId)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CORINFO_CLASS_HANDLE result = (CORINFO_CLASS_HANDLE) 0;
+
+ JIT_TO_EE_TRANSITION();
+
+ switch (classId)
+ {
+ case CLASSID_SYSTEM_OBJECT:
+ result = CORINFO_CLASS_HANDLE(g_pObjectClass);
+ break;
+ case CLASSID_TYPED_BYREF:
+ result = CORINFO_CLASS_HANDLE(g_TypedReferenceMT);
+ break;
+ case CLASSID_TYPE_HANDLE:
+ result = CORINFO_CLASS_HANDLE(MscorlibBinder::GetClass(CLASS__TYPE_HANDLE));
+ break;
+ case CLASSID_FIELD_HANDLE:
+ result = CORINFO_CLASS_HANDLE(MscorlibBinder::GetClass(CLASS__FIELD_HANDLE));
+ break;
+ case CLASSID_METHOD_HANDLE:
+ result = CORINFO_CLASS_HANDLE(MscorlibBinder::GetClass(CLASS__METHOD_HANDLE));
+ break;
+ case CLASSID_ARGUMENT_HANDLE:
+ result = CORINFO_CLASS_HANDLE(g_ArgumentHandleMT);
+ break;
+ case CLASSID_STRING:
+ result = CORINFO_CLASS_HANDLE(g_pStringClass);
+ break;
+ case CLASSID_RUNTIME_TYPE:
+ result = CORINFO_CLASS_HANDLE(g_pRuntimeTypeClass);
+ break;
+ default:
+ _ASSERTE(!"NYI: unknown classId");
+ break;
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+
+
+/*********************************************************************/
+CorInfoType CEEInfo::getTypeForPrimitiveValueClass(
+ CORINFO_CLASS_HANDLE clsHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CorInfoType result = CORINFO_TYPE_UNDEF;
+
+ JIT_TO_EE_TRANSITION();
+
+ TypeHandle th(clsHnd);
+ _ASSERTE (!th.IsGenericVariable());
+
+ MethodTable *pMT = th.GetMethodTable();
+ PREFIX_ASSUME(pMT != NULL);
+
+ // Is it a non primitive struct such as
+ // RuntimeTypeHandle, RuntimeMethodHandle, RuntimeArgHandle?
+ if (pMT->IsValueType() &&
+ !pMT->IsTruePrimitive() &&
+ !pMT->IsEnum())
+ {
+ // default value CORINFO_TYPE_UNDEF is what we want
+ }
+ else
+ {
+ switch (th.GetInternalCorElementType())
+ {
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_BOOLEAN:
+ result = asCorInfoType(ELEMENT_TYPE_I1);
+ break;
+
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_CHAR:
+ result = asCorInfoType(ELEMENT_TYPE_I2);
+ break;
+
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ result = asCorInfoType(ELEMENT_TYPE_I4);
+ break;
+
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ result = asCorInfoType(ELEMENT_TYPE_I8);
+ break;
+
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_U:
+ result = asCorInfoType(ELEMENT_TYPE_I);
+ break;
+
+ case ELEMENT_TYPE_R4:
+ result = asCorInfoType(ELEMENT_TYPE_R4);
+ break;
+
+ case ELEMENT_TYPE_R8:
+ result = asCorInfoType(ELEMENT_TYPE_R8);
+ break;
+
+ case ELEMENT_TYPE_VOID:
+ result = asCorInfoType(ELEMENT_TYPE_VOID);
+ break;
+
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_FNPTR:
+ result = asCorInfoType(ELEMENT_TYPE_PTR);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+
+void CEEInfo::getGSCookie(GSCookie * pCookieVal, GSCookie ** ppCookieVal)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION();
+
+ if (pCookieVal)
+ {
+ *pCookieVal = GetProcessGSCookie();
+ *ppCookieVal = NULL;
+ }
+ else
+ {
+ *ppCookieVal = GetProcessGSCookiePtr();
+ }
+
+ EE_TO_JIT_TRANSITION();
+}
+
+
+/*********************************************************************/
+// TRUE if child is a subtype of parent
+// if parent is an interface, then does child implement / extend parent
+BOOL CEEInfo::canCast(
+ CORINFO_CLASS_HANDLE child,
+ CORINFO_CLASS_HANDLE parent)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ BOOL result = FALSE;
+
+ JIT_TO_EE_TRANSITION();
+
+ result = ((TypeHandle)child).CanCastTo((TypeHandle)parent);
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/*********************************************************************/
+// TRUE if cls1 and cls2 are considered equivalent types.
+BOOL CEEInfo::areTypesEquivalent(
+ CORINFO_CLASS_HANDLE cls1,
+ CORINFO_CLASS_HANDLE cls2)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ BOOL result = FALSE;
+
+ JIT_TO_EE_TRANSITION();
+
+ result = ((TypeHandle)cls1).IsEquivalentTo((TypeHandle)cls2);
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/*********************************************************************/
+// returns is the intersection of cls1 and cls2.
+CORINFO_CLASS_HANDLE CEEInfo::mergeClasses(
+ CORINFO_CLASS_HANDLE cls1,
+ CORINFO_CLASS_HANDLE cls2)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CORINFO_CLASS_HANDLE result = NULL;
+
+ JIT_TO_EE_TRANSITION();
+
+ TypeHandle merged = TypeHandle::MergeTypeHandlesToCommonParent(TypeHandle(cls1), TypeHandle(cls2));
+#ifdef _DEBUG
+ {
+ //Make sure the merge is reflexive in the cases we "support".
+ TypeHandle hnd1 = TypeHandle(cls1);
+ TypeHandle hnd2 = TypeHandle(cls2);
+ TypeHandle reflexive = TypeHandle::MergeTypeHandlesToCommonParent(hnd2, hnd1);
+
+ //If both sides are classes than either they have a common non-interface parent (in which case it is
+ //reflexive)
+ //OR they share a common interface, and it can be order dependent (if they share multiple interfaces
+ //in common)
+ if (!hnd1.IsInterface() && !hnd2.IsInterface())
+ {
+ if (merged.IsInterface())
+ {
+ _ASSERTE(reflexive.IsInterface());
+ }
+ else
+ {
+ _ASSERTE(merged == reflexive);
+ }
+ }
+ //Both results must either be interfaces or classes. They cannot be mixed.
+ _ASSERTE((!!merged.IsInterface()) == (!!reflexive.IsInterface()));
+
+ //If the result of the merge was a class, then the result of the reflexive merge was the same class.
+ if (!merged.IsInterface())
+ {
+ _ASSERTE(merged == reflexive);
+ }
+
+ //If both sides are arrays, then the result is either an array or g_pArrayClass. The above is
+ //actually true about the element type for references types, but I think that that is a little
+ //excessive for sanity.
+ if (hnd1.IsArray() && hnd2.IsArray())
+ {
+ _ASSERTE((merged.IsArray() && reflexive.IsArray())
+ || ((merged == g_pArrayClass) && (reflexive == g_pArrayClass)));
+ }
+
+ //Can I assert anything about generic variables?
+
+ //The results must always be assignable
+ _ASSERTE(hnd1.CanCastTo(merged) && hnd2.CanCastTo(merged) && hnd1.CanCastTo(reflexive)
+ && hnd2.CanCastTo(reflexive));
+ }
+#endif
+ result = CORINFO_CLASS_HANDLE(merged.AsPtr());
+
+ EE_TO_JIT_TRANSITION();
+ return result;
+}
+
+/*********************************************************************/
+// Given a class handle, returns the Parent type.
+// For COMObjectType, it returns Class Handle of System.Object.
+// Returns 0 if System.Object is passed in.
+CORINFO_CLASS_HANDLE CEEInfo::getParentType(
+ CORINFO_CLASS_HANDLE cls)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CORINFO_CLASS_HANDLE result = NULL;
+
+ JIT_TO_EE_TRANSITION();
+
+ TypeHandle th(cls);
+
+ _ASSERTE(!th.IsNull());
+ _ASSERTE(!th.IsGenericVariable());
+
+ TypeHandle thParent = th.GetParent();
+
+#ifdef FEATURE_COMINTEROP
+ // If we encounter __ComObject in the hierarchy, we need to skip it
+ // since this hierarchy is introduced by the EE, but won't be present
+ // in the metadata.
+ if (IsComObjectClass(thParent))
+ {
+ result = (CORINFO_CLASS_HANDLE) g_pObjectClass;
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ result = CORINFO_CLASS_HANDLE(thParent.AsPtr());
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+
+/*********************************************************************/
+// Returns the CorInfoType of the "child type". If the child type is
+// not a primitive type, *clsRet will be set.
+// Given an Array of Type Foo, returns Foo.
+// Given BYREF Foo, returns Foo
+CorInfoType CEEInfo::getChildType (
+ CORINFO_CLASS_HANDLE clsHnd,
+ CORINFO_CLASS_HANDLE *clsRet
+ )
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CorInfoType ret = CORINFO_TYPE_UNDEF;
+ *clsRet = 0;
+ TypeHandle retType = TypeHandle();
+
+ JIT_TO_EE_TRANSITION();
+
+ TypeHandle th(clsHnd);
+
+ _ASSERTE(!th.IsNull());
+
+ // BYREF, ARRAY types
+ if (th.IsTypeDesc())
+ {
+ retType = th.AsTypeDesc()->GetTypeParam();
+ }
+ else
+ {
+ // <REVISIT_TODO> we really should not have this case. arrays type handles
+ // used in the JIT interface should never be ordinary method tables,
+ // indeed array type handles should really never be ordinary MTs
+ // at all. Perhaps we should assert !th.IsTypeDesc() && th.AsMethodTable().IsArray()? </REVISIT_TODO>
+ MethodTable* pMT= th.AsMethodTable();
+ if (pMT->IsArray())
+ retType = pMT->GetApproxArrayElementTypeHandle();
+ }
+
+ if (!retType.IsNull()) {
+ CorElementType type = retType.GetInternalCorElementType();
+ ret = CEEInfo::asCorInfoType(type,retType, clsRet);
+
+ // <REVISIT_TODO>What if this one is a value array ?</REVISIT_TODO>
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return ret;
+}
+
+/*********************************************************************/
+// Check any constraints on class type arguments
+BOOL CEEInfo::satisfiesClassConstraints(CORINFO_CLASS_HANDLE cls)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ BOOL result = FALSE;
+
+ JIT_TO_EE_TRANSITION();
+
+ _ASSERTE(cls != NULL);
+ result = TypeHandle(cls).SatisfiesClassConstraints();
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/*********************************************************************/
+// Check if this is a single dimensional array type
+BOOL CEEInfo::isSDArray(CORINFO_CLASS_HANDLE cls)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ BOOL result = FALSE;
+
+ JIT_TO_EE_TRANSITION();
+
+ TypeHandle th(cls);
+
+ _ASSERTE(!th.IsNull());
+
+ if (th.IsArrayType())
+ {
+ // Lots of code used to think that System.Array's methodtable returns TRUE for IsArray(). It doesn't.
+ _ASSERTE(th != TypeHandle(g_pArrayClass));
+
+ result = (th.GetInternalCorElementType() == ELEMENT_TYPE_SZARRAY);
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/*********************************************************************/
+// Get the number of dimensions in an array
+unsigned CEEInfo::getArrayRank(CORINFO_CLASS_HANDLE cls)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ unsigned result = 0;
+
+ JIT_TO_EE_TRANSITION();
+
+ TypeHandle th(cls);
+
+ _ASSERTE(!th.IsNull());
+
+ if (th.IsArrayType())
+ {
+ // Lots of code used to think that System.Array's methodtable returns TRUE for IsArray(). It doesn't.
+ _ASSERTE(th != TypeHandle(g_pArrayClass));
+
+ result = th.GetPossiblySharedArrayMethodTable()->GetRank();
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/*********************************************************************/
+// Get static field data for an array
+// Note that it's OK to return NULL from this method. This will cause
+// the JIT to make a runtime call to InitializeArray instead of doing
+// the inline optimization (thus preserving the original behavior).
+void * CEEInfo::getArrayInitializationData(
+ CORINFO_FIELD_HANDLE field,
+ DWORD size
+ )
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ void * result = NULL;
+
+ JIT_TO_EE_TRANSITION();
+
+ FieldDesc* pField = (FieldDesc*) field;
+
+ if (!pField ||
+ !pField->IsRVA() ||
+ (pField->LoadSize() < size)
+#ifdef FEATURE_PREJIT
+ // This will make sure that when IBC logging is on, the array initialization happens thru
+ // COMArrayInfo::InitializeArray. This gives a place to put the IBC probe that can help
+ // separate hold and cold RVA blobs.
+ || (IsCompilingForNGen() &&
+ GetAppDomain()->ToCompilationDomain()->m_fForceInstrument)
+#endif // FEATURE_PREJIT
+ )
+ {
+ result = NULL;
+ }
+ else
+ {
+ result = pField->GetStaticAddressHandle(NULL);
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+CorInfoIsAccessAllowedResult CEEInfo::canAccessClass(
+ CORINFO_RESOLVED_TOKEN * pResolvedToken,
+ CORINFO_METHOD_HANDLE callerHandle,
+ CORINFO_HELPER_DESC *pAccessHelper
+ )
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CorInfoIsAccessAllowedResult isAccessAllowed = CORINFO_ACCESS_ALLOWED;
+
+ JIT_TO_EE_TRANSITION();
+
+ INDEBUG(memset(pAccessHelper, 0xCC, sizeof(*pAccessHelper)));
+
+ BOOL doAccessCheck = TRUE;
+ AccessCheckOptions::AccessCheckType accessCheckType = AccessCheckOptions::kNormalAccessibilityChecks;
+ DynamicResolver * pAccessContext = NULL;
+
+ //All access checks must be done on the open instantiation.
+ MethodDesc * pCallerForSecurity = GetMethodForSecurity(callerHandle);
+ TypeHandle callerTypeForSecurity = TypeHandle(pCallerForSecurity->GetMethodTable());
+
+ TypeHandle pCalleeForSecurity = TypeHandle(pResolvedToken->hClass);
+ if (pResolvedToken->pTypeSpec != NULL)
+ {
+ SigTypeContext typeContext;
+ SigTypeContext::InitTypeContext(pCallerForSecurity, &typeContext);
+
+ SigPointer sigptr(pResolvedToken->pTypeSpec, pResolvedToken->cbTypeSpec);
+ pCalleeForSecurity = sigptr.GetTypeHandleThrowing((Module *)pResolvedToken->tokenScope, &typeContext);
+ }
+
+ while (pCalleeForSecurity.HasTypeParam())
+ {
+ pCalleeForSecurity = pCalleeForSecurity.GetTypeParam();
+ }
+
+ if (IsDynamicScope(pResolvedToken->tokenScope))
+ {
+ doAccessCheck = ModifyCheckForDynamicMethod(GetDynamicResolver(pResolvedToken->tokenScope),
+ &callerTypeForSecurity, &accessCheckType,
+ &pAccessContext);
+ }
+
+ //Since this is a check against a TypeHandle, there are some things we can stick in a TypeHandle that
+ //don't require access checks.
+ if (pCalleeForSecurity.IsGenericVariable())
+ {
+ //I don't need to check for access against !!0.
+ doAccessCheck = FALSE;
+ }
+
+ //Now do the visibility checks
+ if (doAccessCheck)
+ {
+ AccessCheckOptions accessCheckOptions(accessCheckType,
+ pAccessContext,
+ FALSE /*throw on error*/,
+ pCalleeForSecurity.GetMethodTable());
+
+ _ASSERTE(pCallerForSecurity != NULL && callerTypeForSecurity != NULL);
+ StaticAccessCheckContext accessContext(pCallerForSecurity, callerTypeForSecurity.GetMethodTable());
+
+ BOOL canAccessType = ClassLoader::CanAccessClass(&accessContext,
+ pCalleeForSecurity.GetMethodTable(),
+ pCalleeForSecurity.GetAssembly(),
+ accessCheckOptions);
+
+ isAccessAllowed = canAccessType ? CORINFO_ACCESS_ALLOWED : CORINFO_ACCESS_ILLEGAL;
+ }
+
+
+ if (isAccessAllowed != CORINFO_ACCESS_ALLOWED)
+ {
+ //These all get the throw helper
+ pAccessHelper->helperNum = CORINFO_HELP_CLASS_ACCESS_EXCEPTION;
+ pAccessHelper->numArgs = 2;
+
+ pAccessHelper->args[0].Set(CORINFO_METHOD_HANDLE(pCallerForSecurity));
+#ifdef MDIL
+ pAccessHelper->args[0].token = 0;
+#endif
+
+ pAccessHelper->args[1].Set(CORINFO_CLASS_HANDLE(pCalleeForSecurity.AsPtr()));
+#ifdef MDIL
+ pAccessHelper->args[1].token = pResolvedToken->token;
+#endif
+
+ if (IsCompilingForNGen())
+ {
+ //see code:CEEInfo::getCallInfo for more information.
+ if (pCallerForSecurity->ContainsGenericVariables() || pCalleeForSecurity.ContainsGenericVariables())
+ COMPlusThrowNonLocalized(kNotSupportedException, W("Cannot embed generic TypeHandle"));
+ }
+ }
+
+ if (isAccessAllowed == CORINFO_ACCESS_ALLOWED)
+ {
+ //Finally let's get me some transparency checks.
+ CorInfoSecurityRuntimeChecks runtimeChecks = CORINFO_ACCESS_SECURITY_NONE;
+
+
+ DebugSecurityCalloutStress(getMethodBeingCompiled(), isAccessAllowed,
+ runtimeChecks);
+
+ if (isAccessAllowed != CORINFO_ACCESS_ALLOWED)
+ {
+ _ASSERTE(isAccessAllowed == CORINFO_ACCESS_RUNTIME_CHECK);
+ //Well, time for the runtime helper
+ pAccessHelper->helperNum = CORINFO_HELP_CLASS_ACCESS_CHECK;
+ pAccessHelper->numArgs = 3;
+
+ pAccessHelper->args[0].Set(CORINFO_METHOD_HANDLE(pCallerForSecurity));
+#ifdef MDIL
+ pAccessHelper->args[0].token = 0;
+#endif
+
+ pAccessHelper->args[1].Set(CORINFO_CLASS_HANDLE(pCalleeForSecurity.AsPtr()));
+#ifdef MDIL
+ pAccessHelper->args[1].token = pResolvedToken->token;
+#endif
+ pAccessHelper->args[2].Set(runtimeChecks);
+
+ if (IsCompilingForNGen())
+ {
+ //see code:CEEInfo::getCallInfo for more information.
+ if (pCallerForSecurity->ContainsGenericVariables() || pCalleeForSecurity.ContainsGenericVariables())
+ COMPlusThrowNonLocalized(kNotSupportedException, W("Cannot embed generic TypeHandle"));
+ }
+ }
+ }
+
+ EE_TO_JIT_TRANSITION();
+ return isAccessAllowed;
+}
+
+/***********************************************************************/
+// return the address of a pointer to a callable stub that will do the
+// virtual or interface call
+void CEEInfo::getCallInfo(
+ CORINFO_RESOLVED_TOKEN * pResolvedToken,
+ CORINFO_RESOLVED_TOKEN * pConstrainedResolvedToken,
+ CORINFO_METHOD_HANDLE callerHandle,
+ CORINFO_CALLINFO_FLAGS flags,
+ CORINFO_CALL_INFO *pResult /*out */)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION();
+
+ _ASSERTE(CheckPointer(pResult));
+
+ INDEBUG(memset(pResult, 0xCC, sizeof(*pResult)));
+
+ MethodDesc* pMD = (MethodDesc *)pResolvedToken->hMethod;
+ TypeHandle th(pResolvedToken->hClass);
+
+ _ASSERTE(pMD);
+ _ASSERTE((size_t(pMD) & 0x1) == 0);
+
+ // Spec says that a callvirt lookup ignores static methods. Since static methods
+ // can't have the exact same signature as instance methods, a lookup that found
+ // a static method would have never found an instance method.
+ if (pMD->IsStatic() && (flags & CORINFO_CALLINFO_CALLVIRT))
+ {
+ EX_THROW(EEMessageException, (kMissingMethodException, IDS_EE_MISSING_METHOD, W("?")));
+ }
+
+
+ if (pMD == g_pPrepareConstrainedRegionsMethod && !isVerifyOnly())
+ {
+ MethodDesc * methodFromContext = GetMethodFromContext(pResolvedToken->tokenContext);
+
+#ifdef MDIL
+ // This shouldn't occur in WP8 applications. However, as a safety valve
+ // fall back to jitting the current method if this assumption is wrong ....
+ // Also, we let CER in mscorlib slip through; that way we have less noise when comparing
+ // native images generated by the binder and coregen/crossgen.
+ if (SystemDomain::GetCurrentDomain()->IsMDILCompilationDomain() &&
+ (methodFromContext == NULL || !methodFromContext->GetModule()->IsSystem()) )
+ {
+ COMPlusThrowNonLocalized(kNotSupportedException, W("MDIL does not support CER regions"));
+ }
+#endif
+
+ if (methodFromContext != NULL && methodFromContext->IsIL())
+ {
+ SigTypeContext typeContext;
+ GetTypeContext(pResolvedToken->tokenContext, &typeContext);
+
+ // If the method whose context we're in is attempting a call to PrepareConstrainedRegions() then we've found the root
+ // method in a Constrained Execution Region (CER). Prepare the call graph of the critical parts of that method now so
+ // they won't fail because of us at runtime.
+ MethodCallGraphPreparer mcgp(methodFromContext, &typeContext, false, false);
+ bool fMethodHasCallsWithinExplicitCer = mcgp.Run();
+ if (! g_pConfig->ProbeForStackOverflow() || ! fMethodHasCallsWithinExplicitCer)
+ {
+ // if the method does not contain any CERs that call out, we can optimize the probe away
+ pMD = MscorlibBinder::GetMethod(METHOD__RUNTIME_HELPERS__PREPARE_CONSTRAINED_REGIONS_NOOP);
+ }
+ }
+ }
+
+ TypeHandle exactType = TypeHandle(pResolvedToken->hClass);
+
+ TypeHandle constrainedType;
+ if ((flags & CORINFO_CALLINFO_CALLVIRT) && (pConstrainedResolvedToken != NULL))
+ {
+ constrainedType = TypeHandle(pConstrainedResolvedToken->hClass);
+ }
+
+ BOOL fResolvedConstraint = FALSE;
+ BOOL fForceUseRuntimeLookup = FALSE;
+
+ MethodDesc * pMDAfterConstraintResolution = pMD;
+ if (constrainedType.IsNull())
+ {
+ pResult->thisTransform = CORINFO_NO_THIS_TRANSFORM;
+ }
+ // <NICE> Things go wrong when this code path is used when verifying generic code.
+ // It would be nice if we didn't go down this sort of code path when verifying but
+ // not generating code. </NICE>
+ else if (constrainedType.ContainsGenericVariables() || exactType.ContainsGenericVariables())
+ {
+ // <NICE> It shouldn't really matter what we do here - but the x86 JIT is annoyingly sensitive
+ // about what we do, since it pretend generic variables are reference types and generates
+ // an internal JIT tree even when just verifying generic code. </NICE>
+ if (constrainedType.IsGenericVariable())
+ {
+ pResult->thisTransform = CORINFO_DEREF_THIS; // convert 'this' of type &T --> T
+ }
+ else if (constrainedType.IsValueType())
+ {
+ pResult->thisTransform = CORINFO_BOX_THIS; // convert 'this' of type &VC<T> --> boxed(VC<T>)
+ }
+ else
+ {
+ pResult->thisTransform = CORINFO_DEREF_THIS; // convert 'this' of type &C<T> --> C<T>
+ }
+ }
+ else
+ {
+ // We have a "constrained." call. Try a partial resolve of the constraint call. Note that this
+ // will not necessarily resolve the call exactly, since we might be compiling
+ // shared generic code - it may just resolve it to a candidate suitable for
+ // JIT compilation, and require a runtime lookup for the actual code pointer
+ // to call.
+ MethodDesc * directMethod = constrainedType.GetMethodTable()->TryResolveConstraintMethodApprox(
+ exactType,
+ pMD,
+ &fForceUseRuntimeLookup);
+ if (directMethod)
+ {
+ // Either
+ // 1. no constraint resolution at compile time (!directMethod)
+ // OR 2. no code sharing lookup in call
+ // OR 3. we have have resolved to an instantiating stub
+
+ pMDAfterConstraintResolution = directMethod;
+ _ASSERTE(!pMDAfterConstraintResolution->IsInterface());
+ fResolvedConstraint = TRUE;
+ pResult->thisTransform = CORINFO_NO_THIS_TRANSFORM;
+
+ exactType = constrainedType;
+ }
+ else if (constrainedType.IsValueType())
+ {
+ pResult->thisTransform = CORINFO_BOX_THIS;
+ }
+ else
+ {
+ pResult->thisTransform = CORINFO_DEREF_THIS;
+ }
+ }
+
+ //
+ // Initialize callee context used for inlining and instantiation arguments
+ //
+
+ MethodDesc * pTargetMD = pMDAfterConstraintResolution;
+
+ if (pTargetMD->HasMethodInstantiation())
+ {
+ pResult->contextHandle = MAKE_METHODCONTEXT(pTargetMD);
+ pResult->exactContextNeedsRuntimeLookup = pTargetMD->GetMethodTable()->IsSharedByGenericInstantiations() || TypeHandle::IsCanonicalSubtypeInstantiation(pTargetMD->GetMethodInstantiation());
+ }
+ else
+ {
+ if (!exactType.IsTypeDesc())
+ {
+ // Because of .NET's notion of base calls, exactType may point to a sub-class
+ // of the actual class that defines pTargetMD. If the JIT decides to inline, it is
+ // important that they 'match', so we fix exactType here.
+#if defined(MDIL) || defined(FEATURE_READYTORUN_COMPILER)
+ if (IsVersionResilientCompilation() &&
+ !isVerifyOnly() &&
+ !IsInSameVersionBubble((MethodDesc*)callerHandle, pTargetMD))
+ {
+ // For version resilient code we can only inline within the same version bubble;
+ // we "repair" the precise types only for those callees.
+ // The above condition needs to stay in sync with CEEInfo::canInline
+ }
+ else
+#endif
+ {
+
+ exactType = pTargetMD->GetExactDeclaringType(exactType.AsMethodTable());
+ _ASSERTE(!exactType.IsNull());
+ }
+ }
+
+ pResult->contextHandle = MAKE_CLASSCONTEXT(exactType.AsPtr());
+ pResult->exactContextNeedsRuntimeLookup = exactType.IsSharedByGenericInstantiations();
+ }
+
+ //
+ // Determine whether to perform direct call
+ //
+
+ bool directCall = false;
+ bool resolvedCallVirt = false;
+ bool callVirtCrossingVersionBubble = false;
+
+
+ // Delegate targets are always treated as direct calls here. (It would be nice to clean it up...).
+ if (flags & CORINFO_CALLINFO_LDFTN)
+ {
+ TypeEquivalenceFixupSpecificationHelper(m_pOverride, pTargetMD);
+ directCall = true;
+ }
+ else
+ // Static methods are always direct calls
+ if (pTargetMD->IsStatic())
+ {
+ directCall = true;
+ }
+ else
+ // Force all interface calls to be interpreted as if they are virtual.
+ if (pTargetMD->GetMethodTable()->IsInterface())
+ {
+ directCall = false;
+ }
+ else
+ if (!(flags & CORINFO_CALLINFO_CALLVIRT) || fResolvedConstraint)
+ {
+ directCall = true;
+ }
+ else
+ {
+ bool devirt;
+
+#if defined(MDIL) || defined(FEATURE_READYTORUN_COMPILER)
+
+ // if we are generating version resilient code
+ // AND
+ // caller/callee are in different version bubbles
+ // we have to apply more restrictive rules
+ // These rules are related to the "inlining rules" as far as the
+ // boundaries of a version bubble are concerned.
+
+ if (IsVersionResilientCompilation() &&
+ !isVerifyOnly() &&
+ !IsInSameVersionBubble((MethodDesc*)callerHandle, pTargetMD)
+ )
+ {
+ // For version resiliency we won't de-virtualize all final/sealed method calls. Because during a
+ // servicing event it is legal to unseal a method or type.
+ //
+ // Note that it is safe to devirtualize in the following cases, since a servicing event cannot later modify it
+ // 1) Callvirt on a virtual final method of a value type - since value types are sealed types as per ECMA spec
+ // 2) Delegate.Invoke() - since a Delegate is a sealed class as per ECMA spec
+ devirt = pTargetMD->GetMethodTable()->IsValueType() ||
+ (pTargetMD->GetMethodTable()->IsDelegate() && ((DelegateEEClass*)(pTargetMD->GetMethodTable()->GetClass()))->m_pInvokeMethod == pMD);
+
+ callVirtCrossingVersionBubble = true;
+ }
+ else
+#endif
+ {
+ DWORD dwMethodAttrs = pTargetMD->GetAttrs();
+ devirt = !IsMdVirtual(dwMethodAttrs) || IsMdFinal(dwMethodAttrs) || pTargetMD->GetMethodTable()->IsSealed();
+ }
+
+ if (devirt)
+ {
+ // We can't allow generic remotable methods to be considered resolved, it leads to a non-instantiating method desc being
+ // passed to the remoting stub. The easiest way to deal with these is to force them through the virtual code path.
+ // It is actually good to do this deoptimization for all remotable methods since remoting interception via vtable dispatch
+ // is faster then remoting interception via thunk
+ if (!pTargetMD->IsRemotingInterceptedViaVirtualDispatch() /* || !pTargetMD->HasMethodInstantiation() */)
+ {
+ resolvedCallVirt = true;
+ directCall = true;
+ }
+ }
+ }
+
+ if (directCall)
+ {
+ bool allowInstParam = (flags & CORINFO_CALLINFO_ALLOWINSTPARAM)
+ // See code:IsRemotingInterceptedViaPrestub on why we need need to disallow inst param for remoting.
+ && !( pTargetMD->MayBeRemotingIntercepted() && !pTargetMD->IsVtableMethod() );
+
+ // Create instantiating stub if necesary
+ if (!allowInstParam && pTargetMD->RequiresInstArg())
+ {
+ pTargetMD = MethodDesc::FindOrCreateAssociatedMethodDesc(pTargetMD,
+ exactType.AsMethodTable(),
+ FALSE /* forceBoxedEntryPoint */,
+ pTargetMD->GetMethodInstantiation(),
+ FALSE /* allowInstParam */);
+ }
+
+ // We don't allow a JIT to call the code directly if a runtime lookup is
+ // needed. This is the case if
+ // 1. the scan of the call token indicated that it involves code sharing
+ // AND 2. the method is an instantiating stub
+ //
+ // In these cases the correct instantiating stub is only found via a runtime lookup.
+ //
+ // Note that most JITs don't call instantiating stubs directly if they can help it -
+ // they call the underlying shared code and provide the type context parameter
+ // explicitly. However
+ // (a) some JITs may call instantiating stubs (it makes the JIT simpler) and
+ // (b) if the method is a remote stub then the EE will force the
+ // call through an instantiating stub and
+ // (c) constraint calls that require runtime context lookup are never resolved
+ // to underlying shared generic code
+
+ if (((pResult->exactContextNeedsRuntimeLookup && pTargetMD->IsInstantiatingStub() && (!allowInstParam || fResolvedConstraint)) || fForceUseRuntimeLookup)
+ // Handle invalid IL - see comment in code:CEEInfo::ComputeRuntimeLookupForSharedGenericToken
+ && ContextIsShared(pResolvedToken->tokenContext))
+ {
+ _ASSERTE(!m_pMethodBeingCompiled->IsDynamicMethod());
+ pResult->kind = CORINFO_CALL_CODE_POINTER;
+
+ // For reference types, the constrained type does not affect method resolution
+ DictionaryEntryKind entryKind = (!constrainedType.IsNull() && constrainedType.IsValueType()) ? ConstrainedMethodEntrySlot : MethodEntrySlot;
+
+ ComputeRuntimeLookupForSharedGenericToken(entryKind,
+ pResolvedToken,
+ pConstrainedResolvedToken,
+ pMD,
+ &pResult->codePointerLookup);
+ }
+ else
+ {
+ if (allowInstParam && pTargetMD->IsInstantiatingStub())
+ {
+ pTargetMD = pTargetMD->GetWrappedMethodDesc();
+ }
+
+ pResult->kind = CORINFO_CALL;
+
+ if (IsReadyToRunCompilation())
+ {
+ // Compensate for always treating delegates as direct calls above
+ if ((flags & CORINFO_CALLINFO_LDFTN) && (flags & CORINFO_CALLINFO_CALLVIRT) && !resolvedCallVirt)
+ {
+ pResult->kind = CORINFO_VIRTUALCALL_LDVIRTFTN;
+ }
+ }
+ }
+ pResult->nullInstanceCheck = resolvedCallVirt;
+ }
+ // All virtual calls which take method instantiations must
+ // currently be implemented by an indirect call via a runtime-lookup
+ // function pointer
+ else if (pTargetMD->HasMethodInstantiation())
+ {
+ pResult->kind = CORINFO_VIRTUALCALL_LDVIRTFTN; // stub dispatch can't handle generic method calls yet
+ pResult->nullInstanceCheck = TRUE;
+ }
+ // Non-interface dispatches go through the vtable
+ else if (!pTargetMD->IsInterface() && !IsReadyToRunCompilation())
+ {
+ pResult->kind = CORINFO_VIRTUALCALL_VTABLE;
+ pResult->nullInstanceCheck = TRUE;
+ }
+ else
+ {
+ if (IsReadyToRunCompilation())
+ {
+ // Insert explicit null checks for cross-version bubble non-interface calls.
+ // It is required to handle null checks properly for non-virtual <-> virtual change between versions
+ pResult->nullInstanceCheck = !!(callVirtCrossingVersionBubble && !pTargetMD->IsInterface());
+ }
+ else
+ {
+ // No need to null check - the dispatch code will deal with null this.
+ pResult->nullInstanceCheck = FALSE;
+ }
+#ifdef STUB_DISPATCH_PORTABLE
+ pResult->kind = CORINFO_VIRTUALCALL_LDVIRTFTN;
+#else // STUB_DISPATCH_PORTABLE
+ pResult->kind = CORINFO_VIRTUALCALL_STUB;
+
+ // We can't make stub calls when we need exact information
+ // for interface calls from shared code.
+
+ if (// If the token is not shared then we don't need a runtime lookup
+ pResult->exactContextNeedsRuntimeLookup
+ // Handle invalid IL - see comment in code:CEEInfo::ComputeRuntimeLookupForSharedGenericToken
+ && ContextIsShared(pResolvedToken->tokenContext))
+ {
+ _ASSERTE(!m_pMethodBeingCompiled->IsDynamicMethod());
+
+ ComputeRuntimeLookupForSharedGenericToken(DispatchStubAddrSlot,
+ pResolvedToken,
+ NULL,
+ pMD,
+ &pResult->stubLookup);
+ }
+ else
+ {
+ pResult->stubLookup.lookupKind.needsRuntimeLookup = false;
+
+ BYTE * indcell = NULL;
+
+ if (!(flags & CORINFO_CALLINFO_KINDONLY) && !isVerifyOnly())
+ {
+#ifndef CROSSGEN_COMPILE
+ // We shouldn't be using GetLoaderAllocator here because for LCG, we need to get the
+ // VirtualCallStubManager from where the stub will be used.
+ // For normal methods there is no difference.
+ LoaderAllocator *pLoaderAllocator = m_pMethodBeingCompiled->GetLoaderAllocatorForCode();
+ VirtualCallStubManager *pMgr = pLoaderAllocator->GetVirtualCallStubManager();
+
+ PCODE addr = pMgr->GetCallStub(exactType, pTargetMD);
+ _ASSERTE(pMgr->isStub(addr));
+
+ // Now we want to indirect through a cell so that updates can take place atomically.
+ if (m_pMethodBeingCompiled->IsLCGMethod())
+ {
+ // LCG methods should use recycled indcells to prevent leaks.
+ indcell = pMgr->GenerateStubIndirection(addr, TRUE);
+
+ // Add it to the per DM list so that we can recycle them when the resolver is finalized
+ LCGMethodResolver *pResolver = m_pMethodBeingCompiled->AsDynamicMethodDesc()->GetLCGMethodResolver();
+ pResolver->AddToUsedIndCellList(indcell);
+ }
+ else
+ {
+ // Normal methods should avoid recycled cells to preserve the locality of all indcells
+ // used by one method.
+ indcell = pMgr->GenerateStubIndirection(addr, FALSE);
+ }
+#else // CROSSGEN_COMPILE
+ // This path should be unreachable during crossgen
+ _ASSERTE(false);
+#endif // CROSSGEN_COMPILE
+ }
+
+ // We use an indirect call
+ pResult->stubLookup.constLookup.accessType = IAT_PVALUE;
+ pResult->stubLookup.constLookup.addr = indcell;
+ }
+#endif // STUB_DISPATCH_PORTABLE
+ }
+
+ pResult->hMethod = CORINFO_METHOD_HANDLE(pTargetMD);
+
+ pResult->accessAllowed = CORINFO_ACCESS_ALLOWED;
+ if ((flags & CORINFO_CALLINFO_SECURITYCHECKS) &&
+ !((MethodDesc *)callerHandle)->IsILStub()) // IL stubs can access everything, don't bother doing access checks
+ {
+ //Our type system doesn't always represent the target exactly with the MethodDesc. In all cases,
+ //carry around the parent MethodTable for both Caller and Callee.
+ TypeHandle calleeTypeForSecurity = TypeHandle(pResolvedToken->hClass);
+ MethodDesc * pCalleeForSecurity = pMD;
+
+ MethodDesc * pCallerForSecurity = GetMethodForSecurity(callerHandle); //Should this be the open MD?
+
+ if (pCallerForSecurity->HasClassOrMethodInstantiation())
+ {
+ _ASSERTE(!IsDynamicScope(pResolvedToken->tokenScope));
+
+ SigTypeContext typeContext;
+ SigTypeContext::InitTypeContext(pCallerForSecurity, &typeContext);
+ _ASSERTE(!typeContext.IsEmpty());
+
+ //If the caller is generic, load the open type and resolve the token again. Use that for the access
+ //checks. If we don't do this then we can't tell the difference between:
+ //
+ //BadGeneric<T> containing a methodspec for InaccessibleType::member (illegal)
+ //and
+ //BadGeneric<T> containing a methodspec for !!0::member instantiated over InaccessibleType (legal)
+
+ if (pResolvedToken->pTypeSpec != NULL)
+ {
+ SigPointer sigptr(pResolvedToken->pTypeSpec, pResolvedToken->cbTypeSpec);
+ calleeTypeForSecurity = sigptr.GetTypeHandleThrowing((Module *)pResolvedToken->tokenScope, &typeContext);
+
+ // typeHnd can be a variable type
+ if (calleeTypeForSecurity.GetMethodTable() == NULL)
+ {
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_METHODDEF_PARENT_NO_MEMBERS);
+ }
+ }
+
+ if (pCalleeForSecurity->IsArray())
+ {
+ // FindOrCreateAssociatedMethodDesc won't remap array method desc because of array base type
+ // is not part of instantiation. We have to special case it.
+ pCalleeForSecurity = calleeTypeForSecurity.GetMethodTable()->GetParallelMethodDesc(pCalleeForSecurity);
+ }
+ else
+ if (pResolvedToken->pMethodSpec != NULL)
+ {
+ DWORD nGenericMethodArgs = 0;
+ CQuickBytes qbGenericMethodArgs;
+ TypeHandle *genericMethodArgs = NULL;
+
+ SigPointer sp(pResolvedToken->pMethodSpec, pResolvedToken->cbMethodSpec);
+
+ BYTE etype;
+ IfFailThrow(sp.GetByte(&etype));
+
+ // Load the generic method instantiation
+ THROW_BAD_FORMAT_MAYBE(etype == (BYTE)IMAGE_CEE_CS_CALLCONV_GENERICINST, 0, (Module *)pResolvedToken->tokenScope);
+
+ IfFailThrow(sp.GetData(&nGenericMethodArgs));
+
+ DWORD cbAllocSize = 0;
+ if (!ClrSafeInt<DWORD>::multiply(nGenericMethodArgs, sizeof(TypeHandle), cbAllocSize))
+ {
+ COMPlusThrowHR(COR_E_OVERFLOW);
+ }
+
+ genericMethodArgs = reinterpret_cast<TypeHandle *>(qbGenericMethodArgs.AllocThrows(cbAllocSize));
+
+ for (DWORD i = 0; i < nGenericMethodArgs; i++)
+ {
+ genericMethodArgs[i] = sp.GetTypeHandleThrowing((Module *)pResolvedToken->tokenScope, &typeContext);
+ _ASSERTE (!genericMethodArgs[i].IsNull());
+ IfFailThrow(sp.SkipExactlyOne());
+ }
+
+ pCalleeForSecurity = MethodDesc::FindOrCreateAssociatedMethodDesc(pMD, calleeTypeForSecurity.GetMethodTable(), FALSE, Instantiation(genericMethodArgs, nGenericMethodArgs), FALSE);
+ }
+ else
+ if (pResolvedToken->pTypeSpec != NULL)
+ {
+ pCalleeForSecurity = MethodDesc::FindOrCreateAssociatedMethodDesc(pMD, calleeTypeForSecurity.GetMethodTable(), FALSE, Instantiation(), TRUE);
+ }
+ }
+
+ TypeHandle callerTypeForSecurity = TypeHandle(pCallerForSecurity->GetMethodTable());
+
+ //This just throws.
+ if (pCalleeForSecurity->RequiresLinktimeCheck())
+ {
+#ifdef FEATURE_CORECLR
+ //hostProtectionAttribute(HPA) can be removed for coreclr mscorlib.dll
+ //So if the call to LinktimeCheckMethod() is only b'coz of HPA then skip it
+ if (!pCalleeForSecurity->RequiresLinkTimeCheckHostProtectionOnly())
+#endif
+ Security::LinktimeCheckMethod(pCallerForSecurity->GetAssembly(), pCalleeForSecurity);
+ }
+
+ //Passed various link-time checks. Now do access checks.
+
+ BOOL doAccessCheck = TRUE;
+ BOOL canAccessMethod = TRUE;
+ AccessCheckOptions::AccessCheckType accessCheckType = AccessCheckOptions::kNormalAccessibilityChecks;
+ DynamicResolver * pAccessContext = NULL;
+
+ callerTypeForSecurity = TypeHandle(pCallerForSecurity->GetMethodTable());
+ if (pCallerForSecurity->IsDynamicMethod())
+ {
+ doAccessCheck = ModifyCheckForDynamicMethod(pCallerForSecurity->AsDynamicMethodDesc()->GetResolver(),
+ &callerTypeForSecurity,
+ &accessCheckType, &pAccessContext);
+ }
+
+ pResult->accessAllowed = CORINFO_ACCESS_ALLOWED;
+
+ if (doAccessCheck)
+ {
+ AccessCheckOptions accessCheckOptions(accessCheckType,
+ pAccessContext,
+ FALSE,
+ pCalleeForSecurity);
+
+ _ASSERTE(pCallerForSecurity != NULL && callerTypeForSecurity != NULL);
+ StaticAccessCheckContext accessContext(pCallerForSecurity, callerTypeForSecurity.GetMethodTable());
+
+ canAccessMethod = ClassLoader::CanAccess(&accessContext,
+ calleeTypeForSecurity.GetMethodTable(),
+ calleeTypeForSecurity.GetAssembly(),
+ pCalleeForSecurity->GetAttrs(),
+ pCalleeForSecurity,
+ NULL,
+ accessCheckOptions,
+#ifdef FEATURE_CORECLR
+ FALSE,
+#else
+ TRUE,
+#endif //FEATURE_CORECLR
+ TRUE
+ );
+
+ // If we were allowed access to the exact method, but it is on a type that has a type parameter
+ // (for instance an array), we need to ensure that we also have access to the type parameter.
+ if (canAccessMethod && calleeTypeForSecurity.HasTypeParam())
+ {
+ TypeHandle typeParam = calleeTypeForSecurity.GetTypeParam();
+ while (typeParam.HasTypeParam())
+ {
+ typeParam = typeParam.GetTypeParam();
+ }
+
+ _ASSERTE(pCallerForSecurity != NULL && callerTypeForSecurity != NULL);
+ StaticAccessCheckContext accessContext(pCallerForSecurity, callerTypeForSecurity.GetMethodTable());
+
+ MethodTable* pTypeParamMT = typeParam.GetMethodTable();
+
+ // No accees check is need for Var, MVar, or FnPtr.
+ if (pTypeParamMT != NULL)
+ canAccessMethod = ClassLoader::CanAccessClassForExtraChecks(&accessContext,
+ pTypeParamMT,
+ typeParam.GetAssembly(),
+ accessCheckOptions,
+ TRUE);
+ }
+
+ pResult->accessAllowed = canAccessMethod ? CORINFO_ACCESS_ALLOWED : CORINFO_ACCESS_ILLEGAL;
+ if (!canAccessMethod)
+ {
+ //Check failed, fill in the throw exception helper.
+ pResult->callsiteCalloutHelper.helperNum = CORINFO_HELP_METHOD_ACCESS_EXCEPTION;
+ pResult->callsiteCalloutHelper.numArgs = 2;
+
+ pResult->callsiteCalloutHelper.args[0].Set(CORINFO_METHOD_HANDLE(pCallerForSecurity));
+#ifdef MDIL
+ pResult->callsiteCalloutHelper.args[0].token = 0;
+#endif
+ pResult->callsiteCalloutHelper.args[1].Set(CORINFO_METHOD_HANDLE(pCalleeForSecurity));
+#ifdef MDIL
+ pResult->callsiteCalloutHelper.args[1].token = pResolvedToken->token;
+#endif
+
+ //We now embed open instantiations in a few places for security callouts (since you can only
+ //do the security check on the open instantiation). We throw these methods out in
+ //TriageMethodForZap. In addition, NGen has problems referencing them properly. Just throw out the whole
+ //method and rejit at runtime.
+ if (IsCompilingForNGen())
+ {
+ if (pCallerForSecurity->ContainsGenericVariables()
+ || pCalleeForSecurity->ContainsGenericVariables())
+ {
+ COMPlusThrowNonLocalized(kNotSupportedException, W("Cannot embed generic MethodDesc"));
+ }
+ }
+ }
+
+ //Only do this if we're allowed to access the method under any circumstance.
+ if (canAccessMethod)
+ {
+ BOOL fNeedsTransparencyCheck = TRUE;
+
+#ifdef FEATURE_CORECLR
+ // All LCG methods are transparent in CoreCLR. When we switch from PT
+ // to FT most user assemblies will become opportunistically critical.
+ // If a LCG method calls a method in such an assembly it will stop working.
+ // To avoid this we allow LCG methods to call user critical code in FT.
+ // There is no security concern because the domain is fully trusted anyway.
+ // There is nothing the LCG method can do that user code cannot do directly.
+ // This is also consistent with the desktop where a transparent->critical
+ // access will be converted to a demand and succeed in FT if the caller is
+ // level1 and the target is level2.
+ // See also AccessCheckOptions::DemandMemberAccess.
+ if (GetAppDomain()->GetSecurityDescriptor()->IsFullyTrusted() && pCallerForSecurity->IsLCGMethod())
+ fNeedsTransparencyCheck = FALSE;
+#endif // FEATURE_CORECLR
+
+ if (fNeedsTransparencyCheck)
+ {
+ CorInfoSecurityRuntimeChecks runtimeChecks = CORINFO_ACCESS_SECURITY_NONE;
+
+ // See if transparency requires the runtime check too
+ CorInfoIsAccessAllowedResult isCallAllowedResult =
+ Security::RequiresTransparentAssemblyChecks(pCallerForSecurity, pCalleeForSecurity, NULL);
+
+ if (isCallAllowedResult != CORINFO_ACCESS_ALLOWED)
+ runtimeChecks = CORINFO_ACCESS_SECURITY_TRANSPARENCY;
+
+ DebugSecurityCalloutStress(getMethodBeingCompiled(), isCallAllowedResult, runtimeChecks);
+
+ if (isCallAllowedResult == CORINFO_ACCESS_RUNTIME_CHECK)
+ {
+ pResult->accessAllowed = CORINFO_ACCESS_RUNTIME_CHECK;
+ //Explain the callback to the JIT.
+ pResult->callsiteCalloutHelper.helperNum = CORINFO_HELP_METHOD_ACCESS_CHECK;
+ pResult->callsiteCalloutHelper.numArgs = 4;
+
+ pResult->callsiteCalloutHelper.args[0].Set(CORINFO_METHOD_HANDLE(pCallerForSecurity));
+ pResult->callsiteCalloutHelper.args[1].Set(CORINFO_METHOD_HANDLE(pCalleeForSecurity));
+ pResult->callsiteCalloutHelper.args[2].Set(CORINFO_CLASS_HANDLE(calleeTypeForSecurity.AsPtr()));
+ pResult->callsiteCalloutHelper.args[3].Set(runtimeChecks);
+
+#ifdef MDIL
+ pResult->callsiteCalloutHelper.args[0].token = 0;
+ pResult->callsiteCalloutHelper.args[1].token = pResolvedToken->token;
+
+ if (flags & CORJIT_FLG_MDIL)
+ {
+ // Only bother with the lookup if the JIT really needs it
+ pResult->callsiteCalloutHelper.args[2].token = getMemberParent(pResolvedToken->tokenScope, pResolvedToken->token);
+ }
+ else
+ {
+ pResult->callsiteCalloutHelper.args[2].token = 0;
+ }
+
+#endif
+
+ if (IsCompilingForNGen())
+ {
+ //see code:CEEInfo::getCallInfo for more information.
+ if (pCallerForSecurity->ContainsGenericVariables()
+ || pCalleeForSecurity->ContainsGenericVariables())
+ {
+ COMPlusThrowNonLocalized(kNotSupportedException, W("Cannot embed generic MethodDesc"));
+ }
+ }
+ }
+ else
+ {
+ _ASSERTE(pResult->accessAllowed == CORINFO_ACCESS_ALLOWED);
+ _ASSERTE(isCallAllowedResult == CORINFO_ACCESS_ALLOWED);
+ }
+ }
+ }
+ }
+
+ }
+
+ //We're pretty much done at this point. Let's grab the rest of the information that the jit is going to
+ //need.
+ pResult->classFlags = getClassAttribsInternal(pResolvedToken->hClass);
+
+ pResult->methodFlags = getMethodAttribsInternal(pResult->hMethod);
+ getMethodSigInternal(pResult->hMethod, &pResult->sig, (pResult->hMethod == pResolvedToken->hMethod) ? pResolvedToken->hClass : NULL);
+
+ if (flags & CORINFO_CALLINFO_VERIFICATION)
+ {
+ if (pResult->hMethod != pResolvedToken->hMethod)
+ {
+ pResult->verMethodFlags = getMethodAttribsInternal(pResolvedToken->hMethod);
+ getMethodSigInternal(pResolvedToken->hMethod, &pResult->verSig, pResolvedToken->hClass);
+ }
+ else
+ {
+ pResult->verMethodFlags = pResult->methodFlags;
+ pResult->verSig = pResult->sig;
+ }
+ }
+
+ EE_TO_JIT_TRANSITION();
+}
+
+BOOL CEEInfo::canAccessFamily(CORINFO_METHOD_HANDLE hCaller,
+ CORINFO_CLASS_HANDLE hInstanceType)
+{
+ WRAPPER_NO_CONTRACT;
+
+ BOOL ret = FALSE;
+
+ //Since this is only for verification, I don't need to do the demand.
+ JIT_TO_EE_TRANSITION();
+
+ TypeHandle targetType = TypeHandle(hInstanceType);
+ TypeHandle accessingType = TypeHandle(GetMethod(hCaller)->GetMethodTable());
+ AccessCheckOptions::AccessCheckType accessCheckOptions = AccessCheckOptions::kNormalAccessibilityChecks;
+ DynamicResolver* pIgnored;
+ BOOL doCheck = TRUE;
+ if (GetMethod(hCaller)->IsDynamicMethod())
+ {
+ //If this is a DynamicMethod, perform the check from the type to which the DynamicMethod was
+ //attached.
+ //
+ //If this is a dynamic method, don't do this check. If they specified SkipVisibilityChecks
+ //(ModifyCheckForDynamicMethod returned false), we should obviously skip the check for the C++
+ //protected rule (since we skipped all the other visibility checks). If they specified
+ //RestrictedSkipVisibilityChecks, then they're a "free" DynamicMethod. This check is meaningless
+ //(i.e. it would always fail). We've already done a demand for access to the member. Let that be
+ //enough.
+ doCheck = ModifyCheckForDynamicMethod(GetMethod(hCaller)->AsDynamicMethodDesc()->GetResolver(),
+ &accessingType, &accessCheckOptions, &pIgnored);
+ if (accessCheckOptions == AccessCheckOptions::kRestrictedMemberAccess
+#ifdef FEATURE_CORECLR
+ || accessCheckOptions == AccessCheckOptions::kRestrictedMemberAccessNoTransparency
+#endif //FEATURE_CORECLR
+ )
+ doCheck = FALSE;
+ }
+
+ if (doCheck)
+ {
+ ret = ClassLoader::CanAccessFamilyVerification(accessingType, targetType);
+ }
+ else
+ {
+ ret = TRUE;
+ }
+
+ EE_TO_JIT_TRANSITION();
+ return ret;
+}
+void CEEInfo::ThrowExceptionForHelper(const CORINFO_HELPER_DESC * throwHelper)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION();
+
+ _ASSERTE(throwHelper->args[0].argType == CORINFO_HELPER_ARG_TYPE_Method);
+ MethodDesc *pCallerMD = GetMethod(throwHelper->args[0].methodHandle);
+
+ StaticAccessCheckContext accessContext(pCallerMD);
+
+ switch (throwHelper->helperNum)
+ {
+ case CORINFO_HELP_METHOD_ACCESS_EXCEPTION:
+ {
+ _ASSERTE(throwHelper->args[1].argType == CORINFO_HELPER_ARG_TYPE_Method);
+ ThrowMethodAccessException(&accessContext, GetMethod(throwHelper->args[1].methodHandle));
+ }
+ break;
+ case CORINFO_HELP_FIELD_ACCESS_EXCEPTION:
+ {
+ _ASSERTE(throwHelper->args[1].argType == CORINFO_HELPER_ARG_TYPE_Field);
+ ThrowFieldAccessException(&accessContext, reinterpret_cast<FieldDesc *>(throwHelper->args[1].fieldHandle));
+ }
+ break;
+ case CORINFO_HELP_CLASS_ACCESS_EXCEPTION:
+ {
+ _ASSERTE(throwHelper->args[1].argType == CORINFO_HELPER_ARG_TYPE_Class);
+ TypeHandle typeHnd(throwHelper->args[1].classHandle);
+ ThrowTypeAccessException(&accessContext, typeHnd.GetMethodTable());
+ }
+ break;
+
+ default:
+ _ASSERTE(!"Unknown access exception type");
+ }
+ EE_TO_JIT_TRANSITION();
+}
+
+
+BOOL CEEInfo::isRIDClassDomainID(CORINFO_CLASS_HANDLE cls)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ BOOL result = FALSE;
+
+ JIT_TO_EE_TRANSITION();
+
+ TypeHandle VMClsHnd(cls);
+
+ result = !VMClsHnd.AsMethodTable()->IsDynamicStatics();
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+
+/***********************************************************************/
+unsigned CEEInfo::getClassDomainID (CORINFO_CLASS_HANDLE clsHnd,
+ void **ppIndirection)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+#ifdef MDIL
+ _ASSERTE(!SystemDomain::GetCurrentDomain()->IsMDILCompilationDomain() || isVerifyOnly());
+#endif
+
+ unsigned result = 0;
+
+ if (ppIndirection != NULL)
+ *ppIndirection = NULL;
+
+ JIT_TO_EE_TRANSITION();
+
+ TypeHandle VMClsHnd(clsHnd);
+
+ if (VMClsHnd.AsMethodTable()->IsDynamicStatics())
+ {
+ result = (unsigned)VMClsHnd.AsMethodTable()->GetModuleDynamicEntryID();
+ }
+ else
+ {
+ result = (unsigned)VMClsHnd.AsMethodTable()->GetClassIndex();
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Used by the JIT to determine whether the profiler or IBC is tracking object
+// allocations
+//
+// Return Value:
+// bool indicating whether the profiler or IBC is tracking object allocations
+//
+// Notes:
+// Normally, a profiler would just directly call the inline helper to determine
+// whether the profiler set the relevant event flag (e.g.,
+// CORProfilerTrackAllocationsEnabled). However, this wrapper also asks whether we're
+// running for IBC instrumentation or enabling the object allocated ETW event. If so,
+// we treat that the same as if the profiler requested allocation information, so that
+// the JIT will still use the profiling-friendly object allocation jit helper, so the
+// allocations can be tracked.
+//
+
+bool __stdcall TrackAllocationsEnabled()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return (
+ (g_IBCLogger.InstrEnabled() != FALSE)
+#ifdef PROFILING_SUPPORTED
+ || CORProfilerTrackAllocationsEnabled()
+#endif // PROFILING_SUPPORTED
+#ifdef FEATURE_EVENT_TRACE
+ || ETW::TypeSystemLog::IsHeapAllocEventEnabled()
+#endif // FEATURE_EVENT_TRACE
+ );
+}
+
+/***********************************************************************/
+CorInfoHelpFunc CEEInfo::getNewHelper(CORINFO_RESOLVED_TOKEN * pResolvedToken, CORINFO_METHOD_HANDLE callerHandle)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CorInfoHelpFunc result = CORINFO_HELP_UNDEF;
+
+ JIT_TO_EE_TRANSITION();
+
+ TypeHandle VMClsHnd(pResolvedToken->hClass);
+
+ if(VMClsHnd.IsTypeDesc())
+ {
+ COMPlusThrow(kInvalidOperationException,W("InvalidOperation_CantInstantiateFunctionPointer"));
+ }
+
+ if(VMClsHnd.IsAbstract())
+ {
+ COMPlusThrow(kInvalidOperationException,W("InvalidOperation_CantInstantiateAbstractClass"));
+ }
+
+ MethodTable* pMT = VMClsHnd.AsMethodTable();
+#ifdef FEATURE_COMINTEROP
+ if (pMT->IsComObjectType() && !GetMethod(callerHandle)->GetModule()->GetSecurityDescriptor()->CanCallUnmanagedCode())
+ {
+ // Caller does not have permission to make interop calls. Generate a
+ // special helper that will throw a security exception when called.
+ result = CORINFO_HELP_SEC_UNMGDCODE_EXCPT;
+ }
+ else
+#endif // FEATURE_COMINTEROP
+
+#ifdef MDIL
+ if (SystemDomain::GetCurrentDomain()->IsMDILCompilationDomain())
+ {
+ // always return the standard slow helper for MDIL compiles
+ result = CORINFO_HELP_NEWFAST;
+ }
+ else
+#endif
+ {
+ result = getNewHelperStatic(pMT);
+ }
+
+ _ASSERTE(result != CORINFO_HELP_UNDEF);
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/***********************************************************************/
+CorInfoHelpFunc CEEInfo::getNewHelperStatic(MethodTable * pMT)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef FEATURE_REMOTING
+ if (pMT->MayRequireManagedActivation())
+ {
+ return CORINFO_HELP_NEW_CROSSCONTEXT;
+ }
+#endif
+
+ // Slow helper is the default
+ CorInfoHelpFunc helper = CORINFO_HELP_NEWFAST;
+
+#ifdef FEATURE_REMOTING
+ // We shouldn't get here with a COM object (they're all potentially
+ // remotable, so they're covered by the case above).
+ _ASSERTE(!pMT->IsComObjectType() || pMT->IsWinRTObjectType());
+#endif
+
+ if (pMT->IsComObjectType())
+ {
+ // Use slow helper
+ _ASSERTE(helper == CORINFO_HELP_NEWFAST);
+ }
+ else
+ if (GCHeap::IsLargeObject(pMT) ||
+ pMT->HasFinalizer())
+ {
+ // Use slow helper
+ _ASSERTE(helper == CORINFO_HELP_NEWFAST);
+ }
+ else
+ // don't call the super-optimized one since that does not check
+ // for GCStress
+ if (GCStress<cfg_alloc>::IsEnabled())
+ {
+ // Use slow helper
+ _ASSERTE(helper == CORINFO_HELP_NEWFAST);
+ }
+ else
+#ifdef _LOGALLOC
+#ifdef LOGGING
+ // Super fast version doesn't do logging
+ if (LoggingOn(LF_GCALLOC, LL_INFO10))
+ {
+ // Use slow helper
+ _ASSERTE(helper == CORINFO_HELP_NEWFAST);
+ }
+ else
+#endif // LOGGING
+#endif // _LOGALLOC
+ // Don't use the SFAST allocator when tracking object allocations,
+ // so we don't have to instrument it.
+ if (TrackAllocationsEnabled())
+ {
+ // Use slow helper
+ _ASSERTE(helper == CORINFO_HELP_NEWFAST);
+ }
+ else
+#ifdef FEATURE_64BIT_ALIGNMENT
+ // @ARMTODO: Force all 8-byte alignment requiring allocations down one slow path. As performance
+ // measurements dictate we can spread these out to faster, more specialized helpers later.
+ if (pMT->RequiresAlign8())
+ {
+ // Use slow helper
+ _ASSERTE(helper == CORINFO_HELP_NEWFAST);
+ }
+ else
+#endif
+ {
+ // Use the fast helper when all conditions are met
+ helper = CORINFO_HELP_NEWSFAST;
+ }
+
+#ifdef FEATURE_DOUBLE_ALIGNMENT_HINT
+ // If we are use the the fast allocator we also may need the
+ // specialized varion for align8
+ if (pMT->GetClass()->IsAlign8Candidate() &&
+ (helper == CORINFO_HELP_NEWSFAST))
+ {
+ helper = CORINFO_HELP_NEWSFAST_ALIGN8;
+ }
+#endif // FEATURE_DOUBLE_ALIGNMENT_HINT
+
+ return helper;
+}
+
+/***********************************************************************/
+// <REVIEW> this only works for shared generic code because all the
+// helpers are actually the same. If they were different then things might
+// break because the same helper would end up getting used for different but
+// representation-compatible arrays (e.g. one with a default constructor
+// and one without) </REVIEW>
+CorInfoHelpFunc CEEInfo::getNewArrHelper (CORINFO_CLASS_HANDLE arrayClsHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CorInfoHelpFunc result = CORINFO_HELP_UNDEF;
+
+ JIT_TO_EE_TRANSITION();
+
+ TypeHandle arrayType(arrayClsHnd);
+
+#ifdef MDIL
+ if (SystemDomain::GetCurrentDomain()->IsMDILCompilationDomain())
+ {
+ // always return the same helper for MDIL compiles
+ result = CORINFO_HELP_NEWARR_1_DIRECT;
+ }
+ else
+#endif
+ {
+ result = getNewArrHelperStatic(arrayType);
+ }
+
+ _ASSERTE(result != CORINFO_HELP_UNDEF);
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/***********************************************************************/
+CorInfoHelpFunc CEEInfo::getNewArrHelperStatic(TypeHandle clsHnd)
+{
+ STANDARD_VM_CONTRACT;
+
+ ArrayTypeDesc* arrayTypeDesc = clsHnd.AsArray();
+ _ASSERTE(arrayTypeDesc->GetInternalCorElementType() == ELEMENT_TYPE_SZARRAY);
+
+ if (GCStress<cfg_alloc>::IsEnabled())
+ {
+ return CORINFO_HELP_NEWARR_1_DIRECT;
+ }
+
+ CorInfoHelpFunc result = CORINFO_HELP_UNDEF;
+
+ TypeHandle thElemType = arrayTypeDesc->GetTypeParam();
+ CorElementType elemType = thElemType.GetInternalCorElementType();
+
+ // This is if we're asked for newarr !0 when verifying generic code
+ // Of course ideally you wouldn't even be generating code when
+ // simply doing verification (we run the JIT importer in import-only
+ // mode), but importing does more than one would like so we try to be
+ // tolerant when asked for non-sensical helpers.
+ if (CorTypeInfo::IsGenericVariable(elemType))
+ {
+ result = CORINFO_HELP_NEWARR_1_OBJ;
+ }
+ else if (CorTypeInfo::IsObjRef(elemType))
+ {
+ // It is an array of object refs
+ result = CORINFO_HELP_NEWARR_1_OBJ;
+ }
+ else
+ {
+ // These cases always must use the slow helper
+ if (
+#ifdef FEATURE_64BIT_ALIGNMENT
+ thElemType.RequiresAlign8() ||
+#endif
+ (elemType == ELEMENT_TYPE_VOID) ||
+ LoggingOn(LF_GCALLOC, LL_INFO10) ||
+ TrackAllocationsEnabled())
+ {
+ // Use the slow helper
+ result = CORINFO_HELP_NEWARR_1_DIRECT;
+ }
+#ifdef FEATURE_DOUBLE_ALIGNMENT_HINT
+ else if (elemType == ELEMENT_TYPE_R8)
+ {
+ // Use the Align8 fast helper
+ result = CORINFO_HELP_NEWARR_1_ALIGN8;
+ }
+#endif
+ else
+ {
+ // Yea, we can do it the fast way!
+ result = CORINFO_HELP_NEWARR_1_VC;
+ }
+ }
+
+ return result;
+}
+
+/***********************************************************************/
+CorInfoHelpFunc CEEInfo::getCastingHelper(CORINFO_RESOLVED_TOKEN * pResolvedToken, bool fThrowing)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ if (isVerifyOnly())
+ return fThrowing ? CORINFO_HELP_CHKCASTANY : CORINFO_HELP_ISINSTANCEOFANY;
+
+ #ifdef MDIL
+ // we should never be called for an MDIL compile
+ _ASSERTE(!SystemDomain::GetCurrentDomain()->IsMDILCompilationDomain());
+#endif
+
+ CorInfoHelpFunc result = CORINFO_HELP_UNDEF;
+
+ JIT_TO_EE_TRANSITION();
+
+ bool fClassMustBeRestored;
+ result = getCastingHelperStatic(TypeHandle(pResolvedToken->hClass), fThrowing, &fClassMustBeRestored);
+ if (fClassMustBeRestored)
+ m_pOverride->classMustBeLoadedBeforeCodeIsRun(pResolvedToken->hClass);
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/***********************************************************************/
+CorInfoHelpFunc CEEInfo::getCastingHelperStatic(TypeHandle clsHnd, bool fThrowing, bool * pfClassMustBeRestored)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Slow helper is the default
+ int helper = CORINFO_HELP_ISINSTANCEOFANY;
+
+ *pfClassMustBeRestored = false;
+
+ if (clsHnd == TypeHandle(g_pCanonMethodTableClass))
+ {
+ // In shared code just use the catch-all helper for type variables, as the same
+ // code may be used for interface/array/class instantiations
+ //
+ // We may be able to take advantage of constraints to select a specialized helper.
+ // This optimizations does not seem to be warranted at the moment.
+ _ASSERTE(helper == CORINFO_HELP_ISINSTANCEOFANY);
+ }
+ else
+ if (!clsHnd.IsTypeDesc() && clsHnd.AsMethodTable()->HasVariance())
+ {
+ // Casting to variant type requires the type to be fully loaded
+ *pfClassMustBeRestored = true;
+
+ _ASSERTE(helper == CORINFO_HELP_ISINSTANCEOFANY);
+ }
+ else
+ if (!clsHnd.IsTypeDesc() && clsHnd.AsMethodTable()->HasTypeEquivalence())
+ {
+ // If the type can be equivalent with something, use the slow helper
+ // Note: if the type of the instance is the one marked as equivalent, it will be
+ // caught by the fast helpers in the same way as they catch transparent proxies.
+ _ASSERTE(helper == CORINFO_HELP_ISINSTANCEOFANY);
+ }
+ else
+ if (clsHnd.IsInterface())
+ {
+ // If it is a non-variant interface, use the fast interface helper
+ helper = CORINFO_HELP_ISINSTANCEOFINTERFACE;
+ }
+ else
+ if (clsHnd.IsArray())
+ {
+ if (clsHnd.AsArray()->GetInternalCorElementType() != ELEMENT_TYPE_SZARRAY)
+ {
+ // Casting to multidimensional array type requires restored pointer to EEClass to fetch rank
+ *pfClassMustBeRestored = true;
+ }
+
+ // If it is an array, use the fast array helper
+ helper = CORINFO_HELP_ISINSTANCEOFARRAY;
+ }
+ else
+ if (!clsHnd.IsTypeDesc() && !Nullable::IsNullableType(clsHnd))
+ {
+ // If it is a non-variant class, use the fast class helper
+ helper = CORINFO_HELP_ISINSTANCEOFCLASS;
+ }
+ else
+ {
+ // Otherwise, use the slow helper
+ _ASSERTE(helper == CORINFO_HELP_ISINSTANCEOFANY);
+ }
+
+#ifdef FEATURE_PREJIT
+ BOOL t1, t2, forceInstr;
+ SystemDomain::GetCompilationOverrides(&t1, &t2, &forceInstr);
+ if (forceInstr)
+ {
+ // If we're compiling for instrumentation, use the slowest but instrumented cast helper
+ helper = CORINFO_HELP_ISINSTANCEOFANY;
+ }
+#endif
+
+ if (fThrowing)
+ {
+ const int delta = CORINFO_HELP_CHKCASTANY - CORINFO_HELP_ISINSTANCEOFANY;
+
+ static_assert_no_msg(CORINFO_HELP_CHKCASTINTERFACE
+ == CORINFO_HELP_ISINSTANCEOFINTERFACE + delta);
+ static_assert_no_msg(CORINFO_HELP_CHKCASTARRAY
+ == CORINFO_HELP_ISINSTANCEOFARRAY + delta);
+ static_assert_no_msg(CORINFO_HELP_CHKCASTCLASS
+ == CORINFO_HELP_ISINSTANCEOFCLASS + delta);
+
+ helper += delta;
+ }
+
+ return (CorInfoHelpFunc)helper;
+}
+
+/***********************************************************************/
+CorInfoHelpFunc CEEInfo::getSharedCCtorHelper(CORINFO_CLASS_HANDLE clsHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CorInfoHelpFunc result = CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ TypeHandle cls(clsHnd);
+ MethodTable* pMT = cls.AsMethodTable();
+
+ if (pMT->IsDynamicStatics())
+ {
+ _ASSERTE(!cls.ContainsGenericVariables());
+ _ASSERTE(pMT->GetModuleDynamicEntryID() != (unsigned) -1);
+
+ result = CORINFO_HELP_CLASSINIT_SHARED_DYNAMICCLASS;
+ }
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return result;
+}
+
+/***********************************************************************/
+CorInfoHelpFunc CEEInfo::getUnBoxHelper(CORINFO_CLASS_HANDLE clsHnd)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (!isVerifyOnly())
+ m_pOverride->classMustBeLoadedBeforeCodeIsRun(clsHnd);
+
+ TypeHandle VMClsHnd(clsHnd);
+ if (Nullable::IsNullableType(VMClsHnd))
+ return CORINFO_HELP_UNBOX_NULLABLE;
+
+ return CORINFO_HELP_UNBOX;
+}
+
+/***********************************************************************/
+void CEEInfo::getReadyToRunHelper(
+ CORINFO_RESOLVED_TOKEN * pResolvedToken,
+ CorInfoHelpFunc id,
+ CORINFO_CONST_LOOKUP * pLookup
+ )
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE(); // only called during NGen
+}
+
+/***********************************************************************/
+// see code:Nullable#NullableVerification
+
+CORINFO_CLASS_HANDLE CEEInfo::getTypeForBox(CORINFO_CLASS_HANDLE cls)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ TypeHandle VMClsHnd(cls);
+ if (Nullable::IsNullableType(VMClsHnd)) {
+ VMClsHnd = VMClsHnd.AsMethodTable()->GetInstantiation()[0];
+ }
+ return static_cast<CORINFO_CLASS_HANDLE>(VMClsHnd.AsPtr());
+}
+
+/***********************************************************************/
+// see code:Nullable#NullableVerification
+CorInfoHelpFunc CEEInfo::getBoxHelper(CORINFO_CLASS_HANDLE clsHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CorInfoHelpFunc result = CORINFO_HELP_UNDEF;
+
+ JIT_TO_EE_TRANSITION();
+
+ TypeHandle VMClsHnd(clsHnd);
+ if (Nullable::IsNullableType(VMClsHnd))
+ {
+ result = CORINFO_HELP_BOX_NULLABLE;
+ }
+ else
+ {
+ // Dev10 718281 - This has been functionally broken fora very long time (at least 2.0).
+ // The recent addition of the check for stack pointers has caused it to now AV instead
+ // of gracefully failing with an InvalidOperationException. Since nobody has noticed
+ // it being broken, we are choosing not to invest to fix it, and instead explicitly
+ // breaking it and failing early and consistently.
+ if(VMClsHnd.IsTypeDesc())
+ {
+ COMPlusThrow(kInvalidOperationException,W("InvalidOperation_TypeCannotBeBoxed"));
+ }
+
+ // we shouldn't allow boxing of types that contains stack pointers
+ // csc and vbc already disallow it.
+ if (VMClsHnd.AsMethodTable()->ContainsStackPtr())
+ COMPlusThrow(kInvalidProgramException);
+
+ result = CORINFO_HELP_BOX;
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/***********************************************************************/
+CorInfoHelpFunc CEEInfo::getSecurityPrologHelper(CORINFO_METHOD_HANDLE ftn)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CorInfoHelpFunc result = CORINFO_HELP_UNDEF;
+
+ JIT_TO_EE_TRANSITION();
+
+#ifdef FEATURE_PREJIT
+ // This will make sure that when IBC logging is on, we call the slow helper with IBC probe
+ if (IsCompilingForNGen() &&
+ GetAppDomain()->ToCompilationDomain()->m_fForceInstrument)
+ {
+ result = CORINFO_HELP_SECURITY_PROLOG_FRAMED;
+ }
+#endif // FEATURE_PREJIT
+
+ if (result == CORINFO_HELP_UNDEF)
+ {
+ result = CORINFO_HELP_SECURITY_PROLOG;
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/***********************************************************************/
+// registers a vararg sig & returns a class-specific cookie for it.
+
+CORINFO_VARARGS_HANDLE CEEInfo::getVarArgsHandle(CORINFO_SIG_INFO *sig,
+ void **ppIndirection)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CORINFO_VARARGS_HANDLE result = NULL;
+
+ if (ppIndirection != NULL)
+ *ppIndirection = NULL;
+
+ JIT_TO_EE_TRANSITION();
+
+ Module* module = GetModule(sig->scope);
+
+ result = CORINFO_VARARGS_HANDLE(module->GetVASigCookie(Signature(sig->pSig, sig->cbSig)));
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+bool CEEInfo::canGetVarArgsHandle(CORINFO_SIG_INFO *sig)
+{
+ LIMITED_METHOD_CONTRACT;
+ return true;
+}
+
+/***********************************************************************/
+unsigned CEEInfo::getMethodHash (CORINFO_METHOD_HANDLE ftnHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ unsigned result = 0;
+
+ JIT_TO_EE_TRANSITION();
+
+ MethodDesc* ftn = GetMethod(ftnHnd);
+
+ result = (unsigned) ftn->GetStableHash();
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/***********************************************************************/
+const char* CEEInfo::getMethodName (CORINFO_METHOD_HANDLE ftnHnd, const char** scopeName)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ const char* result = NULL;
+
+ JIT_TO_EE_TRANSITION();
+
+ MethodDesc *ftn;
+
+ ftn = GetMethod(ftnHnd);
+
+ if (scopeName != 0)
+ {
+ if (ftn->IsLCGMethod())
+ {
+ *scopeName = "DynamicClass";
+ }
+ else if (ftn->IsILStub())
+ {
+ *scopeName = ILStubResolver::GetStubClassName(ftn);
+ }
+ else
+ {
+ MethodTable * pMT = ftn->GetMethodTable();
+#if defined(_DEBUG)
+#ifdef FEATURE_SYMDIFF
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SymDiffDump))
+ {
+ if (pMT->IsArray())
+ {
+ ssClsNameBuff.Clear();
+ ssClsNameBuff.SetUTF8(pMT->GetDebugClassName());
+ }
+ else
+ pMT->_GetFullyQualifiedNameForClassNestedAware(ssClsNameBuff);
+ }
+ else
+ {
+#endif
+ // Calling _GetFullyQualifiedNameForClass in chk build is very expensive
+ // since it construct the class name everytime we call this method. In chk
+ // builds we already have a cheaper way to get the class name -
+ // GetDebugClassName - which doesn't calculate the class name everytime.
+ // This results in huge saving in Ngen time for checked builds.
+ ssClsNameBuff.Clear();
+ ssClsNameBuff.SetUTF8(pMT->GetDebugClassName());
+
+#ifdef FEATURE_SYMDIFF
+ }
+#endif
+ // Append generic instantiation at the end
+ Instantiation inst = pMT->GetInstantiation();
+ if (!inst.IsEmpty())
+ TypeString::AppendInst(ssClsNameBuff, inst);
+
+ *scopeName = ssClsNameBuff.GetUTF8(ssClsNameBuffScratch);
+#else // !_DEBUG
+ // since this is for diagnostic purposes only,
+ // give up on the namespace, as we don't have a buffer to concat it
+ // also note this won't show array class names.
+ LPCUTF8 nameSpace;
+ *scopeName= pMT->GetFullyQualifiedNameInfo(&nameSpace);
+#endif // !_DEBUG
+ }
+ }
+
+ result = ftn->GetName();
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/*********************************************************************/
+DWORD CEEInfo::getMethodAttribs (CORINFO_METHOD_HANDLE ftn)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ DWORD result = 0;
+
+ JIT_TO_EE_TRANSITION();
+
+ result = getMethodAttribsInternal(ftn);
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/*********************************************************************/
+DWORD CEEInfo::getMethodAttribsInternal (CORINFO_METHOD_HANDLE ftn)
+{
+ STANDARD_VM_CONTRACT;
+
+/*
+ returns method attribute flags (defined in corhdr.h)
+
+ NOTE: This doesn't return certain method flags
+ (mdAssem, mdFamANDAssem, mdFamORAssem, mdPrivateScope)
+*/
+
+ MethodDesc* pMD = GetMethod(ftn);
+
+ if (pMD->IsLCGMethod())
+ {
+#ifndef CROSSGEN_COMPILE
+#ifdef FEATURE_COMPRESSEDSTACK
+ if(SecurityStackWalk::MethodIsAnonymouslyHostedDynamicMethodWithCSToEvaluate(pMD))
+ {
+ return CORINFO_FLG_STATIC | CORINFO_FLG_DONT_INLINE | CORINFO_FLG_SECURITYCHECK;
+ }
+#endif // FEATURE_COMPRESSEDSTACK
+#endif // !CROSSGEN_COMPILE
+
+ return CORINFO_FLG_STATIC | CORINFO_FLG_DONT_INLINE | CORINFO_FLG_NOSECURITYWRAP;
+ }
+
+ DWORD result = 0;
+
+ // <REVISIT_TODO>@todo: can we git rid of CORINFO_FLG_ stuff and just include cor.h?</REVISIT_TODO>
+
+ DWORD attribs = pMD->GetAttrs();
+
+ if (IsMdFamily(attribs))
+ result |= CORINFO_FLG_PROTECTED;
+ if (IsMdStatic(attribs))
+ result |= CORINFO_FLG_STATIC;
+ if (pMD->IsSynchronized())
+ result |= CORINFO_FLG_SYNCH;
+ if (pMD->IsFCallOrIntrinsic())
+ result |= CORINFO_FLG_NOGCCHECK | CORINFO_FLG_INTRINSIC;
+ if (IsMdVirtual(attribs))
+ result |= CORINFO_FLG_VIRTUAL;
+ if (IsMdAbstract(attribs))
+ result |= CORINFO_FLG_ABSTRACT;
+ if (IsMdRTSpecialName(attribs))
+ {
+ LPCUTF8 pName = pMD->GetName();
+ if (IsMdInstanceInitializer(attribs, pName) ||
+ IsMdClassConstructor(attribs, pName))
+ result |= CORINFO_FLG_CONSTRUCTOR;
+ }
+
+ //
+ // See if we need to embed a .cctor call at the head of the
+ // method body.
+ //
+
+ MethodTable* pMT = pMD->GetMethodTable();
+
+ // method or class might have the final bit
+ if (IsMdFinal(attribs) || pMT->IsSealed())
+ {
+ result |= CORINFO_FLG_FINAL;
+ }
+
+ if (pMD->IsEnCAddedMethod())
+ {
+ result |= CORINFO_FLG_EnC;
+ }
+
+ if (pMD->IsSharedByGenericInstantiations())
+ {
+ result |= CORINFO_FLG_SHAREDINST;
+ }
+
+ if (pMD->IsNDirect())
+ {
+ result |= CORINFO_FLG_PINVOKE;
+ }
+
+ if (!pMD->IsInterceptedForDeclSecurity())
+ {
+ result |= CORINFO_FLG_NOSECURITYWRAP;
+ }
+
+
+ // Check for an inlining directive.
+ if (pMD->IsNotInline())
+ {
+ /* Function marked as not inlineable */
+ result |= CORINFO_FLG_DONT_INLINE;
+
+ if (pMD->IsIL() && (IsMdRequireSecObject(attribs) ||
+ (pMD->GetModule()->IsSystem() && IsMiNoInlining(pMD->GetImplAttrs()))))
+ {
+ // Assume all methods marked as NoInline inside mscorlib are
+ // marked that way because they use StackCrawlMark to identify
+ // the caller (not just the security info).
+ // See comments in canInline or canTailCall
+ result |= CORINFO_FLG_DONT_INLINE_CALLER;
+ }
+ }
+
+ // AggressiveInlining only makes sense for IL methods.
+ else if (pMD->IsIL() && IsMiAggressiveInlining(pMD->GetImplAttrs()))
+ {
+ result |= CORINFO_FLG_FORCEINLINE;
+ }
+
+
+ if (!pMD->IsRuntimeSupplied())
+ {
+ if (IsMdRequireSecObject(attribs))
+ {
+ result |= CORINFO_FLG_SECURITYCHECK;
+ }
+ }
+
+ if (pMT->IsDelegate() && ((DelegateEEClass*)(pMT->GetClass()))->m_pInvokeMethod == pMD)
+ {
+ // This is now used to emit efficient invoke code for any delegate invoke,
+ // including multicast.
+ result |= CORINFO_FLG_DELEGATE_INVOKE;
+ }
+
+ return result;
+}
+
+/*********************************************************************/
+void CEEInfo::setMethodAttribs (
+ CORINFO_METHOD_HANDLE ftnHnd,
+ CorInfoMethodRuntimeFlags attribs)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION();
+
+ MethodDesc* ftn = GetMethod(ftnHnd);
+
+ if (attribs & CORINFO_FLG_BAD_INLINEE)
+ {
+ BOOL fCacheInliningHint = TRUE;
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+ if (IsCompilationProcess())
+ {
+ // Since we are running managed code during NGen the inlining hint may be
+ // changing underneeth us as the code is JITed. We need to prevent the inlining
+ // hints from changing once we start to use them to place IL in the image.
+ if (!g_pCEECompileInfo->IsCachingOfInliningHintsEnabled())
+ {
+ fCacheInliningHint = FALSE;
+ }
+ else
+ {
+ // Don't cache inlining hints inside mscorlib during NGen of other assemblies,
+ // since mscorlib is loaded domain neutral and will survive worker process recycling,
+ // causing determinism problems.
+ Module * pModule = ftn->GetModule();
+ if (pModule->IsSystem() && pModule->HasNativeImage())
+ {
+ fCacheInliningHint = FALSE;
+ }
+ }
+ }
+#endif
+
+ if (fCacheInliningHint)
+ {
+ ftn->SetNotInline(true);
+ }
+ }
+
+ // Both CORINFO_FLG_UNVERIFIABLE and CORINFO_FLG_VERIFIABLE cannot be set
+ _ASSERTE(!(attribs & CORINFO_FLG_UNVERIFIABLE) ||
+ !(attribs & CORINFO_FLG_VERIFIABLE ));
+
+ if (attribs & CORINFO_FLG_VERIFIABLE)
+ ftn->SetIsVerified(TRUE);
+ else if (attribs & CORINFO_FLG_UNVERIFIABLE)
+ ftn->SetIsVerified(FALSE);
+
+ EE_TO_JIT_TRANSITION();
+}
+
+/*********************************************************************/
+
+void getMethodInfoILMethodHeaderHelper(
+ COR_ILMETHOD_DECODER* header,
+ CORINFO_METHOD_INFO* methInfo
+ )
+{
+ LIMITED_METHOD_CONTRACT;
+
+ methInfo->ILCode = const_cast<BYTE*>(header->Code);
+ methInfo->ILCodeSize = header->GetCodeSize();
+ methInfo->maxStack = static_cast<unsigned short>(header->GetMaxStack());
+ methInfo->EHcount = static_cast<unsigned short>(header->EHCount());
+
+ methInfo->options =
+ (CorInfoOptions)((header->GetFlags() & CorILMethod_InitLocals) ? CORINFO_OPT_INIT_LOCALS : 0) ;
+}
+
+/*********************************************************************
+
+IL is the most efficient and portable way to implement certain low level methods
+in mscorlib.dll. Unfortunately, there is no good way to link IL into mscorlib.dll today.
+Until we find a good way to link IL into mscorlib.dll, we will provide the IL implementation here.
+
+- All IL intrinsincs are members of System.Runtime.CompilerServices.JitHelpers class
+- All IL intrinsincs should be kept very simple. Implement the minimal reusable version of
+unsafe construct and depend on inlining to do the rest.
+- The C# implementation of the IL intrinsic should be good enough for functionalily. Everything should work
+correctly (but slower) if the IL intrinsics are removed.
+
+*********************************************************************/
+
+bool getILIntrinsicImplementation(MethodDesc * ftn,
+ CORINFO_METHOD_INFO * methInfo)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Precondition: ftn is a method in mscorlib
+ _ASSERTE(ftn->GetModule()->IsSystem());
+
+ mdMethodDef tk = ftn->GetMemberDef();
+
+ // Compare tokens to cover all generic instantiations
+ // The body of the first method is simply ret Arg0. The second one first casts the arg to I4.
+
+ if (tk == MscorlibBinder::GetMethod(METHOD__JIT_HELPERS__UNSAFE_CAST)->GetMemberDef())
+ {
+ // Return the argument that was passed in.
+ static const BYTE ilcode[] = { CEE_LDARG_0, CEE_RET };
+ methInfo->ILCode = const_cast<BYTE*>(ilcode);
+ methInfo->ILCodeSize = sizeof(ilcode);
+ methInfo->maxStack = 1;
+ methInfo->EHcount = 0;
+ methInfo->options = (CorInfoOptions)0;
+ return true;
+ }
+ else if (tk == MscorlibBinder::GetMethod(METHOD__JIT_HELPERS__UNSAFE_CAST_TO_STACKPTR)->GetMemberDef())
+ {
+ // Return the argument that was passed in converted to IntPtr
+ static const BYTE ilcode[] = { CEE_LDARG_0, CEE_CONV_I, CEE_RET };
+ methInfo->ILCode = const_cast<BYTE*>(ilcode);
+ methInfo->ILCodeSize = sizeof(ilcode);
+ methInfo->maxStack = 1;
+ methInfo->EHcount = 0;
+ methInfo->options = (CorInfoOptions)0;
+ return true;
+ }
+ else if (tk == MscorlibBinder::GetMethod(METHOD__JIT_HELPERS__UNSAFE_ENUM_CAST)->GetMemberDef())
+ {
+ // Normally we would follow the above pattern and unconditionally replace the IL,
+ // relying on generic type constraints to guarantee that it will only ever be instantiated
+ // on the type/size of argument we expect.
+ //
+ // However C#/CLR does not support restricting a generic type to be an Enum, so the best
+ // we can do is constrain it to be a value type. This is fine for run time, since we only
+ // ever create instantiations on 4 byte or less Enums. But during NGen we may compile instantiations
+ // on other value types (to be specific, every value type instatiation of EqualityComparer
+ // because of its TypeDependencyAttribute; here again we would like to restrict this to
+ // 4 byte or less Enums but cannot).
+ //
+ // This IL is invalid for those instantiations, and replacing it would lead to all sorts of
+ // errors at NGen time. So we only replace it for instantiations where it would be valid,
+ // leaving the others, which we should never execute, with the C# implementation of throwing.
+
+ _ASSERTE(ftn->HasMethodInstantiation());
+ Instantiation inst = ftn->GetMethodInstantiation();
+
+ _ASSERTE(inst.GetNumArgs() == 1);
+ CorElementType et = inst[0].GetVerifierCorElementType();
+ if (et == ELEMENT_TYPE_I4 ||
+ et == ELEMENT_TYPE_U4 ||
+ et == ELEMENT_TYPE_I2 ||
+ et == ELEMENT_TYPE_U2 ||
+ et == ELEMENT_TYPE_I1 ||
+ et == ELEMENT_TYPE_U1)
+ {
+ // Cast to I4 and return the argument that was passed in.
+ static const BYTE ilcode[] = { CEE_LDARG_0, CEE_CONV_I4, CEE_RET };
+ methInfo->ILCode = const_cast<BYTE*>(ilcode);
+ methInfo->ILCodeSize = sizeof(ilcode);
+ methInfo->maxStack = 1;
+ methInfo->EHcount = 0;
+ methInfo->options = (CorInfoOptions)0;
+ return true;
+ }
+ }
+ else if (tk == MscorlibBinder::GetMethod(METHOD__JIT_HELPERS__UNSAFE_ENUM_CAST_LONG)->GetMemberDef())
+ {
+ // The the comment above on why this is is not an unconditional replacement. This case handles
+ // Enums backed by 8 byte values.
+
+ _ASSERTE(ftn->HasMethodInstantiation());
+ Instantiation inst = ftn->GetMethodInstantiation();
+
+ _ASSERTE(inst.GetNumArgs() == 1);
+ CorElementType et = inst[0].GetVerifierCorElementType();
+ if (et == ELEMENT_TYPE_I8 ||
+ et == ELEMENT_TYPE_U8)
+ {
+ // Cast to I8 and return the argument that was passed in.
+ static const BYTE ilcode[] = { CEE_LDARG_0, CEE_CONV_I8, CEE_RET };
+ methInfo->ILCode = const_cast<BYTE*>(ilcode);
+ methInfo->ILCodeSize = sizeof(ilcode);
+ methInfo->maxStack = 1;
+ methInfo->EHcount = 0;
+ methInfo->options = (CorInfoOptions)0;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool getILIntrinsicImplementationForVolatile(MethodDesc * ftn,
+ CORINFO_METHOD_INFO * methInfo)
+{
+ STANDARD_VM_CONTRACT;
+
+ //
+ // This replaces the implementations of Volatile.* in mscorlib with more efficient ones.
+ // We do this because we cannot otherwise express these in C#. What we *want* to do is
+ // to treat the byref args to these methods as "volatile." In pseudo-C#, this would look
+ // like:
+ //
+ // int Read(ref volatile int location)
+ // {
+ // return location;
+ // }
+ //
+ // However, C# does not yet provide a way to declare a byref as "volatile." So instead,
+ // we substitute raw IL bodies for these methods that use the correct volatile instructions.
+ //
+
+ // Precondition: ftn is a method in mscorlib in the System.Threading.Volatile class
+ _ASSERTE(ftn->GetModule()->IsSystem());
+ _ASSERTE(MscorlibBinder::IsClass(ftn->GetMethodTable(), CLASS__VOLATILE));
+ _ASSERTE(strcmp(ftn->GetMethodTable()->GetClass()->GetDebugClassName(), "System.Threading.Volatile") == 0);
+
+ const size_t VolatileMethodBodySize = 6;
+
+ struct VolatileMethodImpl
+ {
+ BinderMethodID methodId;
+ BYTE body[VolatileMethodBodySize];
+ };
+
+#define VOLATILE_IMPL(type, loadinst, storeinst) \
+ { \
+ METHOD__VOLATILE__READ_##type, \
+ { \
+ CEE_LDARG_0, \
+ CEE_PREFIX1, (CEE_VOLATILE & 0xFF), \
+ loadinst, \
+ CEE_NOP, /*pad to VolatileMethodBodySize bytes*/ \
+ CEE_RET \
+ } \
+ }, \
+ { \
+ METHOD__VOLATILE__WRITE_##type, \
+ { \
+ CEE_LDARG_0, \
+ CEE_LDARG_1, \
+ CEE_PREFIX1, (CEE_VOLATILE & 0xFF), \
+ storeinst, \
+ CEE_RET \
+ } \
+ },
+
+ static const VolatileMethodImpl volatileImpls[] =
+ {
+ VOLATILE_IMPL(T, CEE_LDIND_REF, CEE_STIND_REF)
+ VOLATILE_IMPL(Bool, CEE_LDIND_I1, CEE_STIND_I1)
+ VOLATILE_IMPL(Int, CEE_LDIND_I4, CEE_STIND_I4)
+ VOLATILE_IMPL(IntPtr, CEE_LDIND_I, CEE_STIND_I)
+ VOLATILE_IMPL(UInt, CEE_LDIND_U4, CEE_STIND_I4)
+ VOLATILE_IMPL(UIntPtr, CEE_LDIND_I, CEE_STIND_I)
+ VOLATILE_IMPL(SByt, CEE_LDIND_I1, CEE_STIND_I1)
+ VOLATILE_IMPL(Byte, CEE_LDIND_U1, CEE_STIND_I1)
+ VOLATILE_IMPL(Shrt, CEE_LDIND_I2, CEE_STIND_I2)
+ VOLATILE_IMPL(UShrt, CEE_LDIND_U2, CEE_STIND_I2)
+ VOLATILE_IMPL(Flt, CEE_LDIND_R4, CEE_STIND_R4)
+
+ //
+ // Ordinary volatile loads and stores only guarantee atomicity for pointer-sized (or smaller) data.
+ // So, on 32-bit platforms we must use Interlocked operations instead for the 64-bit types.
+ // The implementation in mscorlib already does this, so we will only substitute a new
+ // IL body if we're running on a 64-bit platform.
+ //
+ IN_WIN64(VOLATILE_IMPL(Long, CEE_LDIND_I8, CEE_STIND_I8))
+ IN_WIN64(VOLATILE_IMPL(ULong, CEE_LDIND_I8, CEE_STIND_I8))
+ IN_WIN64(VOLATILE_IMPL(Dbl, CEE_LDIND_R8, CEE_STIND_R8))
+ };
+
+ mdMethodDef md = ftn->GetMemberDef();
+ for (unsigned i = 0; i < NumItems(volatileImpls); i++)
+ {
+ if (md == MscorlibBinder::GetMethod(volatileImpls[i].methodId)->GetMemberDef())
+ {
+ methInfo->ILCode = const_cast<BYTE*>(volatileImpls[i].body);
+ methInfo->ILCodeSize = VolatileMethodBodySize;
+ methInfo->maxStack = 2;
+ methInfo->EHcount = 0;
+ methInfo->options = (CorInfoOptions)0;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool getILIntrinsicImplementationForInterlocked(MethodDesc * ftn,
+ CORINFO_METHOD_INFO * methInfo)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Precondition: ftn is a method in mscorlib in the System.Threading.Interlocked class
+ _ASSERTE(ftn->GetModule()->IsSystem());
+ _ASSERTE(MscorlibBinder::IsClass(ftn->GetMethodTable(), CLASS__INTERLOCKED));
+#ifndef MDIL
+ //binder doesn't have class names available currently
+ _ASSERTE(strcmp(ftn->GetMethodTable()->GetClass()->GetDebugClassName(), "System.Threading.Interlocked") == 0);
+#endif
+
+
+ // We are only interested if ftn's token and CompareExchange<T> token match
+ if (ftn->GetMemberDef() != MscorlibBinder::GetMethod(METHOD__INTERLOCKED__COMPARE_EXCHANGE_T)->GetMemberDef())
+ return false;
+
+ // Get MethodDesc for System.Threading.Interlocked.CompareExchangeFast()
+ MethodDesc* cmpxchgFast = MscorlibBinder::GetMethod(METHOD__INTERLOCKED__COMPARE_EXCHANGE_OBJECT);
+
+ // The MethodDesc lookup must not fail, and it should have the name "CompareExchangeFast"
+ _ASSERTE(cmpxchgFast != NULL);
+ _ASSERTE(strcmp(cmpxchgFast->GetName(), "CompareExchange") == 0);
+
+ // Setup up the body of the method
+ static BYTE il[] = {
+ CEE_LDARG_0,
+ CEE_LDARG_1,
+ CEE_LDARG_2,
+ CEE_CALL,0,0,0,0,
+ CEE_RET
+ };
+
+ // Get the token for System.Threading.Interlocked.CompareExchangeFast(), and patch [target]
+ mdMethodDef cmpxchgFastToken = cmpxchgFast->GetMemberDef();
+ il[4] = (BYTE)((int)cmpxchgFastToken >> 0);
+ il[5] = (BYTE)((int)cmpxchgFastToken >> 8);
+ il[6] = (BYTE)((int)cmpxchgFastToken >> 16);
+ il[7] = (BYTE)((int)cmpxchgFastToken >> 24);
+
+ // Initialize methInfo
+ methInfo->ILCode = const_cast<BYTE*>(il);
+ methInfo->ILCodeSize = sizeof(il);
+ methInfo->maxStack = 3;
+ methInfo->EHcount = 0;
+ methInfo->options = (CorInfoOptions)0;
+
+ return true;
+}
+
+//---------------------------------------------------------------------------------------
+//
+//static
+void
+getMethodInfoHelper(
+ MethodDesc * ftn,
+ CORINFO_METHOD_HANDLE ftnHnd,
+ COR_ILMETHOD_DECODER * header,
+ CORINFO_METHOD_INFO * methInfo)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(ftn == GetMethod(ftnHnd));
+
+ methInfo->ftn = ftnHnd;
+ methInfo->scope = GetScopeHandle(ftn);
+ methInfo->regionKind = CORINFO_REGION_JIT;
+ //
+ // For Jitted code the regionKind is JIT;
+ // For Ngen-ed code the zapper will set this to HOT or COLD, if we
+ // are using IBC data to partition methods into Hot/Cold regions
+
+ /* Grab information from the IL header */
+
+ PCCOR_SIGNATURE pLocalSig = NULL;
+ DWORD cbLocalSig = 0;
+
+ if (NULL != header)
+ {
+ bool fILIntrinsic = false;
+
+ MethodTable * pMT = ftn->GetMethodTable();
+
+ if (MscorlibBinder::IsClass(pMT, CLASS__JIT_HELPERS))
+ {
+ fILIntrinsic = getILIntrinsicImplementation(ftn, methInfo);
+ }
+ else if (MscorlibBinder::IsClass(pMT, CLASS__INTERLOCKED))
+ {
+ fILIntrinsic = getILIntrinsicImplementationForInterlocked(ftn, methInfo);
+ }
+ else if (MscorlibBinder::IsClass(pMT, CLASS__VOLATILE))
+ {
+ fILIntrinsic = getILIntrinsicImplementationForVolatile(ftn, methInfo);
+ }
+
+ if (!fILIntrinsic)
+ {
+ getMethodInfoILMethodHeaderHelper(header, methInfo);
+ pLocalSig = header->LocalVarSig;
+ cbLocalSig = header->cbLocalVarSig;
+ }
+ }
+ else
+ {
+ _ASSERTE(ftn->IsDynamicMethod());
+
+ DynamicResolver * pResolver = ftn->AsDynamicMethodDesc()->GetResolver();
+ unsigned int EHCount;
+ methInfo->ILCode = pResolver->GetCodeInfo(&methInfo->ILCodeSize,
+ &methInfo->maxStack,
+ &methInfo->options,
+ &EHCount);
+ methInfo->EHcount = (unsigned short)EHCount;
+ SigPointer localSig = pResolver->GetLocalSig();
+ localSig.GetSignature(&pLocalSig, &cbLocalSig);
+ }
+
+ methInfo->options = (CorInfoOptions)(((UINT32)methInfo->options) |
+ ((ftn->AcquiresInstMethodTableFromThis() ? CORINFO_GENERICS_CTXT_FROM_THIS : 0) |
+ (ftn->RequiresInstMethodTableArg() ? CORINFO_GENERICS_CTXT_FROM_METHODTABLE : 0) |
+ (ftn->RequiresInstMethodDescArg() ? CORINFO_GENERICS_CTXT_FROM_METHODDESC : 0)));
+
+ // EEJitManager::ResolveEHClause and CrawlFrame::GetExactGenericInstantiations
+ // need to be able to get to CORINFO_GENERICS_CTXT_MASK if there are any
+ // catch clauses like "try {} catch(MyException<T> e) {}".
+ // Such constructs are rare, and having to extend the lifetime of variable
+ // for such cases is reasonable
+
+ if (methInfo->options & CORINFO_GENERICS_CTXT_MASK)
+ {
+#if defined(PROFILING_SUPPORTED)
+ BOOL fProfilerRequiresGenericsContextForEnterLeave = FALSE;
+ {
+ BEGIN_PIN_PROFILER(CORProfilerPresent());
+ if (g_profControlBlock.pProfInterface->RequiresGenericsContextForEnterLeave())
+ {
+ fProfilerRequiresGenericsContextForEnterLeave = TRUE;
+ }
+ END_PIN_PROFILER();
+ }
+ if (fProfilerRequiresGenericsContextForEnterLeave)
+ {
+ methInfo->options = CorInfoOptions(methInfo->options|CORINFO_GENERICS_CTXT_KEEP_ALIVE);
+ }
+ else
+#endif // defined(PROFILING_SUPPORTED)
+ {
+ // Check all the exception clauses
+
+ if (ftn->IsDynamicMethod())
+ {
+ // @TODO: how do we detect the need to mark this flag?
+ }
+ else
+ {
+ COR_ILMETHOD_SECT_EH_CLAUSE_FAT ehClause;
+
+ for (unsigned i = 0; i < methInfo->EHcount; i++)
+ {
+ const COR_ILMETHOD_SECT_EH_CLAUSE_FAT* ehInfo =
+ (COR_ILMETHOD_SECT_EH_CLAUSE_FAT*)header->EH->EHClause(i, &ehClause);
+
+ // Is it a typed catch clause?
+ if (ehInfo->GetFlags() != COR_ILEXCEPTION_CLAUSE_NONE)
+ continue;
+
+ // Check if we catch "C<T>" ?
+
+ DWORD catchTypeToken = ehInfo->GetClassToken();
+ if (TypeFromToken(catchTypeToken) != mdtTypeSpec)
+ continue;
+
+ PCCOR_SIGNATURE pSig;
+ ULONG cSig;
+ IfFailThrow(ftn->GetMDImport()->GetTypeSpecFromToken(catchTypeToken, &pSig, &cSig));
+
+ SigPointer psig(pSig, cSig);
+
+ SigTypeContext sigTypeContext(ftn);
+ if (psig.IsPolyType(&sigTypeContext) & hasSharableVarsMask)
+ {
+ methInfo->options = CorInfoOptions(methInfo->options|CORINFO_GENERICS_CTXT_KEEP_ALIVE);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ PCCOR_SIGNATURE pSig = NULL;
+ DWORD cbSig = 0;
+ ftn->GetSig(&pSig, &cbSig);
+
+ /* Fetch the method signature */
+ // Type parameters in the signature should be instantiated according to the
+ // class/method/array instantiation of ftnHnd
+ CEEInfo::ConvToJitSig(
+ pSig,
+ cbSig,
+ GetScopeHandle(ftn),
+ mdTokenNil,
+ &methInfo->args,
+ ftn,
+ false);
+
+ // Shared generic or static per-inst methods and shared methods on generic structs
+ // take an extra argument representing their instantiation
+ if (ftn->RequiresInstArg())
+ methInfo->args.callConv = (CorInfoCallConv)(methInfo->args.callConv | CORINFO_CALLCONV_PARAMTYPE);
+
+ _ASSERTE((IsMdStatic(ftn->GetAttrs()) == 0) == ((methInfo->args.callConv & CORINFO_CALLCONV_HASTHIS) != 0));
+
+ /* And its local variables */
+ // Type parameters in the signature should be instantiated according to the
+ // class/method/array instantiation of ftnHnd
+ CEEInfo::ConvToJitSig(
+ pLocalSig,
+ cbLocalSig,
+ GetScopeHandle(ftn),
+ mdTokenNil,
+ &methInfo->locals,
+ ftn,
+ true);
+} // getMethodInfoHelper
+
+//---------------------------------------------------------------------------------------
+//
+bool
+CEEInfo::getMethodInfo(
+ CORINFO_METHOD_HANDLE ftnHnd,
+ CORINFO_METHOD_INFO * methInfo)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ bool result = false;
+
+ JIT_TO_EE_TRANSITION();
+
+ MethodDesc * ftn = GetMethod(ftnHnd);
+
+ if (!ftn->IsDynamicMethod() && (!ftn->IsIL() || !ftn->GetRVA() || ftn->IsWrapperStub()))
+ {
+ /* Return false if not IL or has no code */
+ result = false;
+ }
+ else
+ {
+ /* Get the IL header */
+ /* <REVISIT_TODO>TODO: canInline already did validation, however, we do it again
+ here because NGEN uses this function without calling canInline
+ It would be nice to avoid this redundancy </REVISIT_TODO>*/
+ Module* pModule = ftn->GetModule();
+
+ bool verify = !Security::CanSkipVerification(ftn);
+
+ if (ftn->IsDynamicMethod())
+ {
+ getMethodInfoHelper(ftn, ftnHnd, NULL, methInfo);
+ }
+ else
+ {
+ COR_ILMETHOD_DECODER::DecoderStatus status = COR_ILMETHOD_DECODER::SUCCESS;
+ COR_ILMETHOD_DECODER header(ftn->GetILHeader(TRUE), ftn->GetMDImport(), verify ? &status : NULL);
+
+ // If we get a verification error then we try to demand SkipVerification for the module
+ if (status == COR_ILMETHOD_DECODER::VERIFICATION_ERROR &&
+ Security::CanSkipVerification(pModule->GetDomainAssembly()))
+ {
+ status = COR_ILMETHOD_DECODER::SUCCESS;
+ }
+
+ if (status != COR_ILMETHOD_DECODER::SUCCESS)
+ {
+ if (status == COR_ILMETHOD_DECODER::VERIFICATION_ERROR)
+ {
+ // Throw a verification HR
+ COMPlusThrowHR(COR_E_VERIFICATION);
+ }
+ else
+ {
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_IL);
+ }
+ }
+
+ getMethodInfoHelper(ftn, ftnHnd, &header, methInfo);
+ }
+
+ LOG((LF_JIT, LL_INFO100000, "Getting method info (possible inline) %s::%s%s\n",
+ ftn->m_pszDebugClassName, ftn->m_pszDebugMethodName, ftn->m_pszDebugMethodSignature));
+
+ result = true;
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+#ifdef _DEBUG
+
+/************************************************************************
+ Return true when ftn contains a local of type CLASS__STACKCRAWMARK
+*/
+
+bool containsStackCrawlMarkLocal(MethodDesc* ftn)
+{
+ STANDARD_VM_CONTRACT;
+
+ COR_ILMETHOD* ilHeader = ftn->GetILHeader();
+ _ASSERTE(ilHeader);
+
+ COR_ILMETHOD_DECODER header(ilHeader, ftn->GetMDImport(), NULL);
+
+ if (header.LocalVarSig == NULL)
+ return NULL;
+
+ SigPointer ptr(header.LocalVarSig, header.cbLocalVarSig);
+
+ IfFailThrow(ptr.GetData(NULL)); // IMAGE_CEE_CS_CALLCONV_LOCAL_SIG
+
+ ULONG numLocals;
+ IfFailThrow(ptr.GetData(&numLocals));
+
+ for(ULONG i = 0; i < numLocals; i++)
+ {
+ CorElementType eType;
+ IfFailThrow(ptr.PeekElemType(&eType));
+ if (eType != ELEMENT_TYPE_VALUETYPE)
+ {
+ IfFailThrow(ptr.SkipExactlyOne());
+ continue;
+ }
+
+ IfFailThrow(ptr.GetElemType(NULL));
+
+ mdToken token;
+ IfFailThrow(ptr.GetToken(&token));
+
+ // We are inside mscorlib - simple token match is sufficient
+ if (token == MscorlibBinder::GetClass(CLASS__STACKCRAWMARK)->GetCl())
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+#endif
+
+/*************************************************************
+ * Check if the caller and calle are in the same assembly
+ * i.e. do not inline across assemblies
+ *************************************************************/
+
+CorInfoInline CEEInfo::canInline (CORINFO_METHOD_HANDLE hCaller,
+ CORINFO_METHOD_HANDLE hCallee,
+ DWORD* pRestrictions)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CorInfoInline result = INLINE_PASS; // By default we pass.
+ // Do not set pass in the rest of the method.
+ DWORD dwRestrictions = 0; // By default, no restrictions
+ const char * szFailReason = NULL; // for reportInlineDecision
+
+ JIT_TO_EE_TRANSITION();
+
+ // This does not work in the multi-threaded case
+#if 0
+ // Caller should check this condition first
+ _ASSERTE(!(CORINFO_FLG_DONT_INLINE & getMethodAttribsInternal(hCallee)));
+#endif
+
+ // Returns TRUE: if caller and callee are from the same assembly or the callee
+ // is part of the system assembly.
+ //
+ // If the caller and callee have the same Critical state and the same Grant (and refuse) sets, then the
+ // callee may always be inlined into the caller.
+ //
+ // If they differ, then the callee is marked as INLINE_RESPECT_BOUNDARY. The Jit may only inline the
+ // callee when any of the following are true.
+ // 1) the callee is a leaf method.
+ // 2) the callee does not call any Boundary Methods.
+ //
+ // Conceptually, a Boundary method is a method that needs to accurately find the permissions of its
+ // caller. Boundary methods are:
+ //
+ // 1) A method that calls anything that creates a StackCrawlMark to look for its caller. In this code
+ // this is approximated as "in mscorlib and is marked as NoInlining".
+ // 2) A method that calls a method which calls Demand. These methods must be marked as
+ // IsMdRequireSecObject.
+ // 3) Calls anything that is virtual. This is because the virtual method could be #1 or #2.
+ //
+ // In CoreCLR, all public Critical methods of mscorlib are considered Boundary Method
+
+ MethodDesc* pCaller = GetMethod(hCaller);
+ MethodDesc* pCallee = GetMethod(hCallee);
+
+ if (pCallee->IsNoMetadata())
+ {
+ result = INLINE_FAIL;
+ szFailReason = "Inlinee is NoMetadata";
+ goto exit;
+ }
+
+#ifdef DEBUGGING_SUPPORTED
+
+ // If the callee wants debuggable code, don't allow it to be inlined
+
+ if (GetDebuggerCompileFlags(pCallee->GetModule(), 0) & CORJIT_FLG_DEBUG_CODE)
+ {
+ result = INLINE_NEVER;
+ szFailReason = "Inlinee is debuggable";
+ goto exit;
+ }
+#endif
+
+ // The orginal caller is the current method
+ MethodDesc * pOrigCaller;
+ pOrigCaller = m_pMethodBeingCompiled;
+ Module * pOrigCallerModule;
+ pOrigCallerModule = pOrigCaller->GetLoaderModule();
+
+ // Prevent recursive compiling/inlining/verifying
+ if (pOrigCaller != pCallee)
+ {
+ // The Inliner may not do code verification.
+ // So never inline anything that is unverifiable / bad code.
+ if (!Security::CanSkipVerification(pCallee))
+ {
+ // Inlinee needs to be verifiable
+ if (!pCallee->IsVerifiable())
+ {
+ result = INLINE_NEVER;
+ szFailReason = "Inlinee is not verifiable";
+ goto exit;
+ }
+ }
+ }
+
+ // We check this here as the call to MethodDesc::IsVerifiable()
+ // may set CORINFO_FLG_DONT_INLINE.
+ if (pCallee->IsNotInline())
+ {
+ result = INLINE_NEVER;
+ szFailReason = "Inlinee is marked as no inline";
+ goto exit;
+ }
+
+ // Also check to see if the method requires a security object. This means they call demand and
+ // shouldn't be inlined.
+ if (IsMdRequireSecObject(pCallee->GetAttrs()))
+ {
+ result = INLINE_NEVER;
+ szFailReason = "Inlinee requires a security object (calls Demand/Assert/Deny)";
+ goto exit;
+ }
+
+ // If the method is MethodImpl'd by another method within the same type, then we have
+ // an issue that the importer will import the wrong body. In this case, we'll just
+ // disallow inlining because getFunctionEntryPoint will do the right thing.
+ {
+ MethodDesc *pMDDecl = pCallee;
+ MethodTable *pMT = pMDDecl->GetMethodTable();
+ MethodDesc *pMDImpl = pMT->MapMethodDeclToMethodImpl(pMDDecl);
+
+ if (pMDDecl != pMDImpl)
+ {
+ result = INLINE_NEVER;
+ szFailReason = "Inlinee is MethodImpl'd by another method within the same type";
+ goto exit;
+ }
+ }
+
+ //
+ // Perform the Cross-Assembly inlining checks
+ //
+ {
+ Module * pCalleeModule = pCallee->GetModule();
+
+#ifdef FEATURE_LEGACYNETCF
+ if (m_pMethodBeingCompiled->GetDomain()->GetAppDomainCompatMode() == BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8)
+ {
+ // NetCF did not allow cross-assembly inlining (except for mscorlib)
+ // and leaf methods
+ Assembly * pCalleeAssembly = pCalleeModule->GetAssembly();
+ Assembly * pOrigCallerAssembly = pOrigCallerModule->GetAssembly();
+ if ((pCalleeAssembly != pOrigCallerAssembly) && !pCalleeAssembly->IsSystem())
+ {
+ dwRestrictions |= INLINE_RESPECT_BOUNDARY;
+ }
+ }
+#endif // FEATURE_LEGACYNETCF
+
+#ifdef FEATURE_PREJIT
+ Assembly * pCalleeAssembly = pCalleeModule->GetAssembly();
+
+#ifdef _DEBUG
+ //
+ // Make sure that all methods with StackCrawlMark are marked as non-inlineable
+ //
+ if (pCalleeAssembly->IsSystem())
+ {
+ _ASSERTE(!containsStackCrawlMarkLocal(pCallee));
+ }
+#endif
+
+ // To allow for servicing of Ngen images we want to disable most
+ // Cross-Assembly inlining except for the cases that we explicitly allow.
+ //
+ if (IsCompilingForNGen())
+ {
+ // This is an canInline call at Ngen time
+ //
+ //
+ Assembly * pOrigCallerAssembly = pOrigCallerModule->GetAssembly();
+
+ if (pCalleeAssembly == pOrigCallerAssembly)
+ {
+ // Within the same assembly
+ // we can freely inline with no restrictions
+ }
+ else
+ {
+#if defined(MDIL) || defined(FEATURE_READYTORUN_COMPILER)
+ // No inlinining for version resilient code except if in the same version bubble
+ // If this condition changes, please make the corresponding change
+ // in getCallInfo, too.
+ if (IsVersionResilientCompilation() &&
+ !isVerifyOnly() &&
+ !IsInSameVersionBubble(pCaller, pCallee)
+ )
+ {
+ result = INLINE_NEVER;
+ szFailReason = "Cross-module inlining in version resilient code";
+ goto exit;
+ }
+#endif
+ }
+ }
+#endif // FEATURE_PREJIT
+
+ if (!canReplaceMethodOnStack(pCallee, NULL, pCaller))
+ {
+ dwRestrictions |= INLINE_RESPECT_BOUNDARY;
+ }
+
+ // TODO: We can probably be smarter here if the caller is jitted, as we will
+ // know for sure if the inlinee has really no string interning active (currently
+ // it's only on in the ngen case (besides requiring the attribute)), but this is getting
+ // too subtle. Will only do if somebody screams about it, as bugs here are going to
+ // be tough to find
+ if ((pOrigCallerModule != pCalleeModule) && pCalleeModule->IsNoStringInterning())
+ {
+ dwRestrictions |= INLINE_NO_CALLEE_LDSTR;
+ }
+
+ // The remoting interception can be skipped only if the call is on same this pointer
+ if (pCallee->MayBeRemotingIntercepted())
+ {
+ dwRestrictions |= INLINE_SAME_THIS;
+ }
+ }
+
+
+#ifdef PROFILING_SUPPORTED
+ if (CORProfilerPresent())
+ {
+ // #rejit
+ //
+ // See if rejit-specific flags for the caller disable inlining
+ if ((ReJitManager::GetCurrentReJitFlags(pCaller) &
+ COR_PRF_CODEGEN_DISABLE_INLINING) != 0)
+ {
+ result = INLINE_FAIL;
+ szFailReason = "ReJIT request disabled inlining from caller";
+ goto exit;
+ }
+
+ // If the profiler has set a mask preventing inlining, always return
+ // false to the jit.
+ if (CORProfilerDisableInlining())
+ {
+ result = INLINE_FAIL;
+ szFailReason = "Profiler disabled inlining globally";
+ goto exit;
+ }
+
+ // If the profiler wishes to be notified of JIT events and the result from
+ // the above tests will cause a function to be inlined, we need to tell the
+ // profiler that this inlining is going to take place, and give them a
+ // chance to prevent it.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
+ if (pCaller->IsILStub() || pCallee->IsILStub())
+ {
+ // do nothing
+ }
+ else
+ {
+ BOOL fShouldInline;
+
+ HRESULT hr = g_profControlBlock.pProfInterface->JITInlining(
+ (FunctionID)pCaller,
+ (FunctionID)pCallee,
+ &fShouldInline);
+
+ if (SUCCEEDED(hr) && !fShouldInline)
+ {
+ result = INLINE_FAIL;
+ szFailReason = "Profiler disabled inlining locally";
+ goto exit;
+ }
+ }
+ END_PIN_PROFILER();
+ }
+ }
+#endif // PROFILING_SUPPORTED
+
+exit: ;
+
+ EE_TO_JIT_TRANSITION();
+
+ if (result == INLINE_PASS && dwRestrictions)
+ {
+ if (pRestrictions)
+ {
+ *pRestrictions = dwRestrictions;
+ }
+ else
+ {
+ // If the jitter didn't want to know about restrictions, it shouldn't be inlining
+ result = INLINE_FAIL;
+ szFailReason = "Inlinee has restrictions the JIT doesn't want";
+ }
+ }
+ else
+ {
+ if (pRestrictions)
+ {
+ // Denied inlining, makes no sense to pass out restrictions,
+ *pRestrictions = 0;
+ }
+ }
+
+ if (dontInline(result))
+ {
+ // If you hit this assert, it means you added a new way to prevent inlining
+ // without documenting it for ETW!
+ _ASSERTE(szFailReason != NULL);
+ reportInliningDecision(hCaller, hCallee, result, szFailReason);
+ }
+
+ return result;
+}
+
+void CEEInfo::reportInliningDecision (CORINFO_METHOD_HANDLE inlinerHnd,
+ CORINFO_METHOD_HANDLE inlineeHnd,
+ CorInfoInline inlineResult,
+ const char * reason)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ JIT_TO_EE_TRANSITION();
+
+#ifdef _DEBUG
+ if (LoggingOn(LF_JIT, LL_INFO100000))
+ {
+ SString currentMethodName;
+ currentMethodName.AppendUTF8(m_pMethodBeingCompiled->GetModule_NoLogging()->GetFile()->GetSimpleName());
+ currentMethodName.Append(L'/');
+ TypeString::AppendMethodInternal(currentMethodName, m_pMethodBeingCompiled, TypeString::FormatBasic);
+
+ SString inlineeMethodName;
+ if (GetMethod(inlineeHnd))
+ {
+ inlineeMethodName.AppendUTF8(GetMethod(inlineeHnd)->GetModule_NoLogging()->GetFile()->GetSimpleName());
+ inlineeMethodName.Append(L'/');
+ TypeString::AppendMethodInternal(inlineeMethodName, GetMethod(inlineeHnd), TypeString::FormatBasic);
+ }
+ else
+ {
+ inlineeMethodName.AppendASCII( "<null>" );
+ }
+
+ SString inlinerMethodName;
+ if (GetMethod(inlinerHnd))
+ {
+ inlinerMethodName.AppendUTF8(GetMethod(inlinerHnd)->GetModule_NoLogging()->GetFile()->GetSimpleName());
+ inlinerMethodName.Append(L'/');
+ TypeString::AppendMethodInternal(inlinerMethodName, GetMethod(inlinerHnd), TypeString::FormatBasic);
+ }
+ else
+ {
+ inlinerMethodName.AppendASCII("<null>");
+ }
+
+ if (dontInline(inlineResult))
+ {
+ LOG((LF_JIT, LL_INFO100000,
+ "While compiling '%S', inline of '%S' into '%S' failed because: '%s'.\n",
+ currentMethodName.GetUnicode(), inlineeMethodName.GetUnicode(),
+ inlinerMethodName.GetUnicode(), reason));
+ }
+ else
+ {
+ LOG((LF_JIT, LL_INFO100000, "While compiling '%S', inline of '%S' into '%S' succeeded.\n",
+ currentMethodName.GetUnicode(), inlineeMethodName.GetUnicode(),
+ inlinerMethodName.GetUnicode()));
+
+ }
+ }
+#endif //_DEBUG
+
+ //I'm gonna duplicate this code because the format is slightly different. And LoggingOn is debug only.
+ if (ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_VERBOSE,
+ CLR_JITTRACING_KEYWORD))
+ {
+ SString methodBeingCompiledNames[3];
+ SString inlinerNames[3];
+ SString inlineeNames[3];
+ MethodDesc * methodBeingCompiled = m_pMethodBeingCompiled;
+#define GMI(pMD, strArray) \
+ do { \
+ if (pMD) { \
+ (pMD)->GetMethodInfo((strArray)[0], (strArray)[1], (strArray)[2]); \
+ } else { \
+ (strArray)[0].Set(W("<null>")); \
+ (strArray)[1].Set(W("<null>")); \
+ (strArray)[2].Set(W("<null>")); \
+ } } while (0)
+
+ GMI(methodBeingCompiled, methodBeingCompiledNames);
+ GMI(GetMethod(inlinerHnd), inlinerNames);
+ GMI(GetMethod(inlineeHnd), inlineeNames);
+#undef GMI
+ if (dontInline(inlineResult))
+ {
+ const char * str = (reason ? reason : "");
+
+ FireEtwMethodJitInliningFailed(methodBeingCompiledNames[0].GetUnicode(),
+ methodBeingCompiledNames[1].GetUnicode(),
+ methodBeingCompiledNames[2].GetUnicode(),
+ inlinerNames[0].GetUnicode(),
+ inlinerNames[1].GetUnicode(),
+ inlinerNames[2].GetUnicode(),
+ inlineeNames[0].GetUnicode(),
+ inlineeNames[1].GetUnicode(),
+ inlineeNames[2].GetUnicode(),
+ inlineResult == INLINE_NEVER,
+ str,
+ GetClrInstanceId());
+ }
+ else
+ {
+ FireEtwMethodJitInliningSucceeded(methodBeingCompiledNames[0].GetUnicode(),
+ methodBeingCompiledNames[1].GetUnicode(),
+ methodBeingCompiledNames[2].GetUnicode(),
+ inlinerNames[0].GetUnicode(),
+ inlinerNames[1].GetUnicode(),
+ inlinerNames[2].GetUnicode(),
+ inlineeNames[0].GetUnicode(),
+ inlineeNames[1].GetUnicode(),
+ inlineeNames[2].GetUnicode(),
+ GetClrInstanceId());
+ }
+
+ }
+
+ EE_TO_JIT_TRANSITION();
+}
+
+
+/*************************************************************
+This loads the (formal) declared constraints on the class and method type parameters,
+and detects (but does not itself reject) circularities among the class type parameters
+and (separately) method type parameters.
+
+It must be called whenever we verify a typical method, ie any method (generic or
+nongeneric) in a typical class. It must be called for non-generic methods too,
+because their bodies may still mention class type parameters which will need to
+have their formal constraints loaded in order to perform type compatibility tests.
+
+We have to rule out cycles like "C<U,T> where T:U, U:T" only to avoid looping
+in the verifier (ie the T.CanCast(A) would loop calling U.CanCast(A) then
+T.CanCastTo(A) etc.). Since the JIT only tries to walk the hierarchy from a type
+a parameter when verifying, it should be safe to JIT unverified, but trusted,
+instantiations even in the presence of cycle constraints.
+@TODO: It should be possible (and easy) to detect cycles much earlier on by
+directly inspecting the metadata. All you have to do is check that, for each
+of the n type parameters to a class or method there is no path of length n
+obtained by following naked type parameter constraints of the same kind.
+This can be detected by looking directly at metadata, without actually loading
+the typehandles for the naked type parameters.
+ *************************************************************/
+
+void CEEInfo::initConstraintsForVerification(CORINFO_METHOD_HANDLE hMethod,
+ BOOL *pfHasCircularClassConstraints,
+ BOOL *pfHasCircularMethodConstraints)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pfHasCircularClassConstraints));
+ PRECONDITION(CheckPointer(pfHasCircularMethodConstraints));
+ } CONTRACTL_END;
+
+ *pfHasCircularClassConstraints = FALSE;
+ *pfHasCircularMethodConstraints = FALSE;
+
+ JIT_TO_EE_TRANSITION();
+
+ MethodDesc* pMethod = GetMethod(hMethod);
+ if (pMethod->IsTypicalMethodDefinition())
+ {
+ // Force a load of the constraints on the type parameters, detecting cyclic bounds
+ pMethod->LoadConstraintsForTypicalMethodDefinition(pfHasCircularClassConstraints,pfHasCircularMethodConstraints);
+ }
+
+ EE_TO_JIT_TRANSITION();
+}
+
+/*************************************************************
+ * Check if a method to be compiled is an instantiation
+ * of generic code that has already been verified.
+ * Three possible return values (see corinfo.h)
+ *************************************************************/
+
+CorInfoInstantiationVerification
+ CEEInfo::isInstantiationOfVerifiedGeneric(CORINFO_METHOD_HANDLE hMethod)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CorInfoInstantiationVerification result = INSTVER_NOT_INSTANTIATION;
+
+ JIT_TO_EE_TRANSITION();
+
+ MethodDesc * pMethod = GetMethod(hMethod);
+
+ if (!(pMethod->HasClassOrMethodInstantiation()))
+ {
+ result = INSTVER_NOT_INSTANTIATION;
+ goto exit;
+ }
+
+ if (pMethod->IsTypicalMethodDefinition())
+ {
+ result = INSTVER_NOT_INSTANTIATION;
+ goto exit;
+ }
+
+#ifdef FEATURE_CORECLR
+ //Skip verification of all methods in profile assemblies. We will ensure that they are all verifiable.
+ if (pMethod->GetModule()->GetFile()->GetAssembly()->IsProfileAssembly())
+ {
+ result = INSTVER_GENERIC_PASSED_VERIFICATION;
+ goto exit;
+ }
+#endif
+
+ result = pMethod->IsVerifiable() ? INSTVER_GENERIC_PASSED_VERIFICATION
+ : INSTVER_GENERIC_FAILED_VERIFICATION;
+
+ exit: ;
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+// This function returns true if we can replace pReplaced on the stack with
+// pReplacer. In the case of inlining this means that pReplaced is the inlinee
+// and pReplacer is the inliner. In the case of tail calling, pReplacer is the
+// tail callee and pReplaced is the tail caller.
+//
+// It's possible for pReplacer to be NULL. This means that it's an unresolved
+// callvirt for a tail call. This is legal, but we make the static decision
+// based on pReplaced only (assuming that pReplacer is from a different
+// assembly that is a different partial trust).
+//
+// The general logic is this:
+// 1) You can replace anything that is full trust (since full trust doesn't
+// cause a demand to fail).
+// 2) You can coalesce all stack frames that have the same permission set
+// down to a single stack frame.
+//
+// You'll see three patterns in the code below:
+// 1) There is only one permission set per assembly
+// 2) Comparing grant sets is prohibitively expensive. Therefore we use the
+// the fact that, "a homogenous app domain has only one partial trust
+// permission set" to infer that two grant sets are equal.
+// 3) Refuse sets are rarely used and too complex to handle correctly, so
+// they generally just torpedo all of the logic in here.
+//
+BOOL canReplaceMethodOnStack(MethodDesc* pReplaced, MethodDesc* pDeclaredReplacer, MethodDesc* pExactReplacer)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE; //Called from PREEMPTIVE functions
+ } CONTRACTL_END;
+
+ OBJECTREF refused = NULL;
+ Assembly * pReplacedAssembly = pReplaced->GetAssembly();
+
+ _ASSERTE(Security::IsResolved(pReplacedAssembly));
+
+ // The goal of this code is to ensure that we never allow a unique non-full
+ // trust grant set to be eliminated from the stack.
+
+ Assembly * pReplacerAssembly = NULL;
+ if (pExactReplacer != NULL)
+ {
+ pReplacerAssembly = pExactReplacer->GetAssembly();
+ _ASSERTE(Security::IsResolved(pReplacerAssembly));
+
+ // If two methods are from the same assembly, they must have the same grant set.
+ if (pReplacerAssembly == pReplacedAssembly)
+ {
+ // When both methods are in the same assembly, then it is always safe to
+ // coalesce them for the purposes of security.
+ return TRUE;
+ }
+ }
+
+ if ( pDeclaredReplacer != NULL &&
+ pReplacedAssembly->GetDomainAssembly() == GetAppDomain()->GetAnonymouslyHostedDynamicMethodsAssembly() &&
+ SystemDomain::IsReflectionInvocationMethod(pDeclaredReplacer) )
+ {
+ // When an anonymously hosted dynamic method invokes a method through reflection invocation,
+ // the dynamic method is the true caller. If we replace it on the stack we would be doing
+ // security check against its caller rather than the dynamic method itself.
+ // We should do this check against pDeclaredReplacer rather than pExactReplacer because the
+ // latter is NULL is the former if virtual, e.g. MethodInfo.Invoke(...).
+ return FALSE;
+ }
+
+ // It is always safe to remove a full trust stack frame from the stack.
+ IAssemblySecurityDescriptor * pReplacedDesc = pReplacedAssembly->GetSecurityDescriptor();
+
+#ifdef FEATURE_APTCA
+ if (GetAppDomain()->IsCompilationDomain())
+ {
+ // If we're NGENing assemblies, we don't want to inline code out of a conditionally APTCA assembly,
+ // since we need to ensure that the dependency is loaded and checked to ensure that it is condtional
+ // APTCA. We only need to do this if the replaced caller is transparent, since a critical caller
+ // will be allowed to use the conditional APTCA disabled assembly anyway.
+ if (pReplacedAssembly != pReplacerAssembly && Security::IsMethodTransparent(pReplaced))
+ {
+ ModuleSecurityDescriptor *pReplacedMSD = ModuleSecurityDescriptor::GetModuleSecurityDescriptor(pReplacedAssembly);
+ if (pReplacedMSD->GetTokenFlags() & TokenSecurityDescriptorFlags_ConditionalAPTCA)
+ {
+ return FALSE;
+ }
+ }
+ }
+#endif // FEATURE_APTCA
+
+ if (pReplacedDesc->IsFullyTrusted())
+ {
+ GCX_COOP(); // Required for GetGrantedPermissionSet
+ (void)pReplacedDesc->GetGrantedPermissionSet(&refused);
+ if (refused != NULL)
+ {
+ // This is full trust with a Refused set. That means that it is partial
+ // trust. However, even in a homogeneous app domain, it could be a
+ // different partial trust from any other partial trust, and since
+ // pExactReplacer is either unknown or from a different assembly, we assume
+ // the worst: that is is a different partial trust.
+ return FALSE;
+ }
+ return TRUE;
+ }
+
+ // pReplaced is partial trust and pExactReplacer is either unknown or from a
+ // different assembly than pReplaced.
+
+ if (pExactReplacer == NULL)
+ {
+ // This is the unresolved callvirt case. Since we're partial trust,
+ // we can't tail call.
+ return FALSE;
+ }
+
+ // We're replacing a partial trust stack frame. We can only do this with a
+ // matching grant set. We know pReplaced is partial trust. Make sure both
+ // pExactReplacer and pReplaced are the same partial trust.
+ IAssemblySecurityDescriptor * pReplacerDesc = pReplacerAssembly->GetSecurityDescriptor();
+ if (pReplacerDesc->IsFullyTrusted())
+ {
+ return FALSE; // Replacing partial trust with full trust.
+ }
+
+ // At this point both pExactReplacer and pReplaced are partial trust. We can
+ // only do this if the grant sets are equal. Since comparing grant sets
+ // requires calling up into managed code, we will infer that the two grant
+ // sets are equal if the domain is homogeneous.
+ IApplicationSecurityDescriptor * adSec = GetAppDomain()->GetSecurityDescriptor();
+ if (adSec->IsHomogeneous())
+ {
+ // We're homogeneous, but the two descriptors could have refused sets.
+ // Bail if they do.
+ GCX_COOP(); // Required for GetGrantedPermissionSet
+ (void)pReplacedDesc->GetGrantedPermissionSet(&refused);
+ if (refused != NULL)
+ {
+ return FALSE;
+ }
+
+ (void)pReplacerDesc->GetGrantedPermissionSet(&refused);
+ if (refused != NULL)
+ return FALSE;
+
+ return TRUE;
+ }
+
+ // pExactReplacer and pReplaced are from 2 different assemblies. Both are partial
+ // trust, and the app domain is not homogeneous, so we just have to
+ // assume that they have different grant or refuse sets, and thus cannot
+ // safely be replaced.
+ return FALSE;
+}
+
+/*************************************************************
+ * Similar to above, but perform check for tail call
+ * eligibility. The callee can be passed as NULL if not known
+ * (calli and callvirt).
+ *************************************************************/
+
+bool CEEInfo::canTailCall (CORINFO_METHOD_HANDLE hCaller,
+ CORINFO_METHOD_HANDLE hDeclaredCallee,
+ CORINFO_METHOD_HANDLE hExactCallee,
+ bool fIsTailPrefix)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ bool result = false;
+ const char * szFailReason = NULL;
+
+ JIT_TO_EE_TRANSITION();
+
+ // See comments in canInline above.
+
+ MethodDesc* pCaller = GetMethod(hCaller);
+ MethodDesc* pDeclaredCallee = GetMethod(hDeclaredCallee);
+ MethodDesc* pExactCallee = GetMethod(hExactCallee);
+
+ _ASSERTE(pCaller->GetModule());
+ _ASSERTE(pCaller->GetModule()->GetClassLoader());
+
+ _ASSERTE((pExactCallee == NULL) || pExactCallee->GetModule());
+ _ASSERTE((pExactCallee == NULL) || pExactCallee->GetModule()->GetClassLoader());
+
+#ifdef FEATURE_LEGACYNETCF
+ // NetCF did not implement tail calls
+ if (m_pMethodBeingCompiled->GetDomain()->GetAppDomainCompatMode() == BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8)
+ {
+
+ result = false;
+ szFailReason = "Windows Phone OS 7 compatibility";
+ goto exit;
+ }
+#endif // FEATURE_LEGACYNETCF
+
+ // If the caller is the static constructor (.cctor) of a class which has a ComImport base class
+ // somewhere up the class hierarchy, then we cannot make the call into a tailcall. See
+ // RegisterObjectCreationCallback() in ExtensibleClassFactory.cpp for more information.
+ if (pCaller->IsClassConstructor() &&
+ pCaller->GetMethodTable()->IsComObjectType())
+ {
+ result = false;
+ szFailReason = "Caller is ComImport .cctor";
+ goto exit;
+ }
+
+ // TailCalls will throw off security stackwalking logic when there is a declarative Assert
+ // Note that this check will also include declarative demands. It's OK to do a tailcall in
+ // those cases, but we currently don't have a way to check only for declarative Asserts.
+ if (pCaller->IsInterceptedForDeclSecurity())
+ {
+ result = false;
+ szFailReason = "Caller has declarative security";
+ goto exit;
+ }
+
+ // The jit already checks and doesn't allow the tail caller to use imperative security.
+ _ASSERTE(pCaller->IsRuntimeSupplied() || !IsMdRequireSecObject(pCaller->GetAttrs()));
+
+ if (!canReplaceMethodOnStack(pCaller, pDeclaredCallee, pExactCallee))
+ {
+ result = false;
+ szFailReason = "Different security";
+ goto exit;
+ }
+
+ if (!fIsTailPrefix)
+ {
+ mdMethodDef callerToken = pCaller->GetMemberDef();
+
+ // We don't want to tailcall the entrypoint for an application; JIT64 will sometimes
+ // do this for simple entrypoints and it results in a rather confusing debugging
+ // experience.
+ if (callerToken == pCaller->GetModule()->GetEntryPointToken())
+ {
+ result = false;
+ szFailReason = "Caller is the entry point";
+ goto exit;
+ }
+
+ if (!pCaller->IsNoMetadata())
+ {
+ // Do not tailcall from methods that are marked as noinline (people often use no-inline
+ // to mean "I want to always see this method in stacktrace")
+ DWORD dwImplFlags = 0;
+ IfFailThrow(pCaller->GetMDImport()->GetMethodImplProps(callerToken, NULL, &dwImplFlags));
+
+ if (IsMiNoInlining(dwImplFlags))
+ {
+ result = false;
+ szFailReason = "Caller is marked as no inline";
+ goto exit;
+ }
+ }
+
+ // Methods with StackCrawlMark depend on finding their caller on the stack.
+ // If we tail call one of these guys, they get confused. For lack of
+ // a better way of identifying them, we look for methods marked as NoInlining
+ // inside mscorlib (StackCrawlMark is private), and assume it is one of these
+ // methods. We have an assert in canInline that ensures all StackCrawlMark
+ // methods are appropriately marked.
+ //
+ // NOTE that this is *NOT* a security issue because we check to ensure that
+ // the callee has the *SAME* security properties as the caller, it just might
+ // be from a different assembly which messes up APIs like Type.GetType, which
+ // for back-compat uses the assembly of it's caller to resolve unqualified
+ // typenames.
+ if ((pExactCallee != NULL) && pExactCallee->GetModule()->IsSystem() && pExactCallee->IsIL())
+ {
+ if (IsMiNoInlining(pExactCallee->GetImplAttrs()))
+ {
+ result = false;
+ szFailReason = "Callee might have a StackCrawlMark.LookForMyCaller";
+ goto exit;
+ }
+ }
+ }
+
+ // We cannot tail call from a root CER method, the thread abort algorithm to
+ // detect CERs depends on seeing such methods on the stack.
+ if (IsCerRootMethod(pCaller))
+ {
+ result = false;
+ szFailReason = "Caller is a CER root";
+ goto exit;
+ }
+
+ result = true;
+
+exit: ;
+
+ EE_TO_JIT_TRANSITION();
+
+ if (!result)
+ {
+ // If you hit this assert, it means you added a new way to prevent tail calls
+ // without documenting it for ETW!
+ _ASSERTE(szFailReason != NULL);
+ reportTailCallDecision(hCaller, hExactCallee, fIsTailPrefix, TAILCALL_FAIL, szFailReason);
+ }
+
+ return result;
+}
+
+void CEEInfo::reportTailCallDecision (CORINFO_METHOD_HANDLE callerHnd,
+ CORINFO_METHOD_HANDLE calleeHnd,
+ bool fIsTailPrefix,
+ CorInfoTailCall tailCallResult,
+ const char * reason)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ JIT_TO_EE_TRANSITION();
+
+ //put code here. Make sure to report the method being compiled in addition to inliner and inlinee.
+#ifdef _DEBUG
+ if (LoggingOn(LF_JIT, LL_INFO100000))
+ {
+ SString currentMethodName;
+ TypeString::AppendMethodInternal(currentMethodName, m_pMethodBeingCompiled,
+ TypeString::FormatBasic);
+
+ SString calleeMethodName;
+ if (GetMethod(calleeHnd))
+ {
+ TypeString::AppendMethodInternal(calleeMethodName, GetMethod(calleeHnd),
+ TypeString::FormatBasic);
+ }
+ else
+ {
+ calleeMethodName.AppendASCII( "<null>" );
+ }
+
+ SString callerMethodName;
+ if (GetMethod(callerHnd))
+ {
+ TypeString::AppendMethodInternal(callerMethodName, GetMethod(callerHnd),
+ TypeString::FormatBasic);
+ }
+ else
+ {
+ callerMethodName.AppendASCII( "<null>" );
+ }
+ if (tailCallResult == TAILCALL_FAIL)
+ {
+ LOG((LF_JIT, LL_INFO100000,
+ "While compiling '%S', %Splicit tail call from '%S' to '%S' failed because: '%s'.\n",
+ currentMethodName.GetUnicode(), fIsTailPrefix ? W("ex") : W("im"),
+ callerMethodName.GetUnicode(), calleeMethodName.GetUnicode(), reason));
+ }
+ else
+ {
+ static const char * const tailCallType[] = {
+ "optimized tail call", "recursive loop", "helper assisted tailcall"
+ };
+ _ASSERTE(tailCallResult >= 0 && (size_t)tailCallResult < sizeof(tailCallType) / sizeof(tailCallType[0]));
+ LOG((LF_JIT, LL_INFO100000,
+ "While compiling '%S', %Splicit tail call from '%S' to '%S' generated as a %s.\n",
+ currentMethodName.GetUnicode(), fIsTailPrefix ? W("ex") : W("im"),
+ callerMethodName.GetUnicode(), calleeMethodName.GetUnicode(), tailCallType[tailCallResult]));
+
+ }
+ }
+#endif //_DEBUG
+
+ // I'm gonna duplicate this code because the format is slightly different. And LoggingOn is debug only.
+ if (ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_VERBOSE,
+ CLR_JITTRACING_KEYWORD))
+ {
+ SString methodBeingCompiledNames[3];
+ SString callerNames[3];
+ SString calleeNames[3];
+ MethodDesc * methodBeingCompiled = m_pMethodBeingCompiled;
+#define GMI(pMD, strArray) \
+ do { \
+ if (pMD) { \
+ (pMD)->GetMethodInfo((strArray)[0], (strArray)[1], (strArray)[2]); \
+ } else { \
+ (strArray)[0].Set(W("<null>")); \
+ (strArray)[1].Set(W("<null>")); \
+ (strArray)[2].Set(W("<null>")); \
+ } } while (0)
+
+ GMI(methodBeingCompiled, methodBeingCompiledNames);
+ GMI(GetMethod(callerHnd), callerNames);
+ GMI(GetMethod(calleeHnd), calleeNames);
+#undef GMI
+ if (tailCallResult == TAILCALL_FAIL)
+ {
+ const char * str = (reason ? reason : "");
+
+ FireEtwMethodJitTailCallFailed(methodBeingCompiledNames[0].GetUnicode(),
+ methodBeingCompiledNames[1].GetUnicode(),
+ methodBeingCompiledNames[2].GetUnicode(),
+ callerNames[0].GetUnicode(),
+ callerNames[1].GetUnicode(),
+ callerNames[2].GetUnicode(),
+ calleeNames[0].GetUnicode(),
+ calleeNames[1].GetUnicode(),
+ calleeNames[2].GetUnicode(),
+ fIsTailPrefix,
+ str,
+ GetClrInstanceId());
+ }
+ else
+ {
+ FireEtwMethodJitTailCallSucceeded(methodBeingCompiledNames[0].GetUnicode(),
+ methodBeingCompiledNames[1].GetUnicode(),
+ methodBeingCompiledNames[2].GetUnicode(),
+ callerNames[0].GetUnicode(),
+ callerNames[1].GetUnicode(),
+ callerNames[2].GetUnicode(),
+ calleeNames[0].GetUnicode(),
+ calleeNames[1].GetUnicode(),
+ calleeNames[2].GetUnicode(),
+ fIsTailPrefix,
+ tailCallResult,
+ GetClrInstanceId());
+ }
+
+ }
+
+
+ EE_TO_JIT_TRANSITION();
+}
+
+void CEEInfo::getEHinfoHelper(
+ CORINFO_METHOD_HANDLE ftnHnd,
+ unsigned EHnumber,
+ CORINFO_EH_CLAUSE* clause,
+ COR_ILMETHOD_DECODER* pILHeader)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(CheckPointer(pILHeader->EH));
+ _ASSERTE(EHnumber < pILHeader->EH->EHCount());
+
+ COR_ILMETHOD_SECT_EH_CLAUSE_FAT ehClause;
+ const COR_ILMETHOD_SECT_EH_CLAUSE_FAT* ehInfo;
+ ehInfo = (COR_ILMETHOD_SECT_EH_CLAUSE_FAT*)pILHeader->EH->EHClause(EHnumber, &ehClause);
+
+ clause->Flags = (CORINFO_EH_CLAUSE_FLAGS)ehInfo->GetFlags();
+ clause->TryOffset = ehInfo->GetTryOffset();
+ clause->TryLength = ehInfo->GetTryLength();
+ clause->HandlerOffset = ehInfo->GetHandlerOffset();
+ clause->HandlerLength = ehInfo->GetHandlerLength();
+ if ((clause->Flags & CORINFO_EH_CLAUSE_FILTER) == 0)
+ clause->ClassToken = ehInfo->GetClassToken();
+ else
+ clause->FilterOffset = ehInfo->GetFilterOffset();
+}
+
+/*********************************************************************/
+// get individual exception handler
+void CEEInfo::getEHinfo(
+ CORINFO_METHOD_HANDLE ftnHnd,
+ unsigned EHnumber,
+ CORINFO_EH_CLAUSE* clause)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION();
+
+ MethodDesc * ftn = GetMethod(ftnHnd);
+
+ if (IsDynamicMethodHandle(ftnHnd))
+ {
+ GetMethod(ftnHnd)->AsDynamicMethodDesc()->GetResolver()->GetEHInfo(EHnumber, clause);
+ }
+ else
+ {
+ COR_ILMETHOD_DECODER header(ftn->GetILHeader(TRUE), ftn->GetMDImport(), NULL);
+ getEHinfoHelper(ftnHnd, EHnumber, clause, &header);
+ }
+
+ EE_TO_JIT_TRANSITION();
+}
+
+//---------------------------------------------------------------------------------------
+//
+void
+CEEInfo::getMethodSig(
+ CORINFO_METHOD_HANDLE ftnHnd,
+ CORINFO_SIG_INFO * sigRet,
+ CORINFO_CLASS_HANDLE owner)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION();
+
+ getMethodSigInternal(ftnHnd, sigRet, owner);
+
+ EE_TO_JIT_TRANSITION();
+}
+
+//---------------------------------------------------------------------------------------
+//
+void
+CEEInfo::getMethodSigInternal(
+ CORINFO_METHOD_HANDLE ftnHnd,
+ CORINFO_SIG_INFO * sigRet,
+ CORINFO_CLASS_HANDLE owner)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc * ftn = GetMethod(ftnHnd);
+
+ PCCOR_SIGNATURE pSig = NULL;
+ DWORD cbSig = 0;
+ ftn->GetSig(&pSig, &cbSig);
+
+ // Type parameters in the signature are instantiated
+ // according to the class/method/array instantiation of ftnHnd and owner
+ CEEInfo::ConvToJitSig(
+ pSig,
+ cbSig,
+ GetScopeHandle(ftn),
+ mdTokenNil,
+ sigRet,
+ ftn,
+ false,
+ (TypeHandle)owner);
+
+ //@GENERICS:
+ // Shared generic methods and shared methods on generic structs take an extra argument representing their instantiation
+ if (ftn->RequiresInstArg())
+ {
+ sigRet->callConv = (CorInfoCallConv) (sigRet->callConv | CORINFO_CALLCONV_PARAMTYPE);
+ }
+
+ // We want the calling convention bit to be consistant with the method attribute bit
+ _ASSERTE( (IsMdStatic(ftn->GetAttrs()) == 0) == ((sigRet->callConv & CORINFO_CALLCONV_HASTHIS) != 0) );
+}
+
+//---------------------------------------------------------------------------------------
+//
+//@GENERICSVER: for a method desc in a typical instantiation of a generic class,
+// this will return the typical instantiation of the generic class,
+// but only provided type variables are never shared.
+// The JIT verifier relies on this behaviour to extract the typical class from an instantiated method's typical method handle.
+//
+CORINFO_CLASS_HANDLE
+CEEInfo::getMethodClass(
+ CORINFO_METHOD_HANDLE methodHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CORINFO_CLASS_HANDLE result = NULL;
+
+ JIT_TO_EE_TRANSITION();
+
+ MethodDesc* method = GetMethod(methodHnd);
+
+ if (method->IsDynamicMethod())
+ {
+ DynamicResolver::SecurityControlFlags securityControlFlags = DynamicResolver::Default;
+ TypeHandle typeOwner;
+
+ DynamicResolver* pResolver = method->AsDynamicMethodDesc()->GetResolver();
+ pResolver->GetJitContext(&securityControlFlags, &typeOwner);
+
+ if (!typeOwner.IsNull() && (method == pResolver->GetDynamicMethod()))
+ {
+ result = CORINFO_CLASS_HANDLE(typeOwner.AsPtr());
+ }
+ }
+
+ if (result == NULL)
+ {
+ TypeHandle th = TypeHandle(method->GetMethodTable());
+
+ result = CORINFO_CLASS_HANDLE(th.AsPtr());
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/***********************************************************************/
+CORINFO_MODULE_HANDLE CEEInfo::getMethodModule (CORINFO_METHOD_HANDLE methodHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CORINFO_MODULE_HANDLE result = NULL;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ MethodDesc* method = GetMethod(methodHnd);
+
+ if (method->IsDynamicMethod())
+ {
+ // this should never be called, thus the assert, I don't know if the (non existent) caller
+ // expects the Module or the scope
+ UNREACHABLE();
+ }
+ else
+ {
+ result = (CORINFO_MODULE_HANDLE) method->GetModule();
+ }
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return result;
+}
+
+/*********************************************************************/
+CorInfoIntrinsics CEEInfo::getIntrinsicID(CORINFO_METHOD_HANDLE methodHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CorInfoIntrinsics result = CORINFO_INTRINSIC_Illegal;
+
+ JIT_TO_EE_TRANSITION();
+
+ MethodDesc* method = GetMethod(methodHnd);
+
+ if (method->IsArray())
+ {
+ ArrayMethodDesc * arrMethod = (ArrayMethodDesc *)method;
+ result = arrMethod->GetIntrinsicID();
+ }
+ else
+ if (method->IsFCall())
+ {
+ result = ECall::GetIntrinsicID(method);
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/*********************************************************************/
+bool CEEInfo::isInSIMDModule(CORINFO_CLASS_HANDLE classHnd)
+{
+CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ bool result = false;
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ TypeHandle VMClsHnd(classHnd);
+ if (VMClsHnd.GetMethodTable()->GetAssembly()->IsSIMDVectorAssembly())
+ {
+ result = true;
+ }
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return result;
+}
+
+/*********************************************************************/
+void CEEInfo::getMethodVTableOffset (CORINFO_METHOD_HANDLE methodHnd,
+ unsigned * pOffsetOfIndirection,
+ unsigned * pOffsetAfterIndirection)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ MethodDesc* method = GetMethod(methodHnd);
+
+ //@GENERICS: shouldn't be doing this for instantiated methods as they live elsewhere
+ _ASSERTE(!method->HasMethodInstantiation());
+
+ _ASSERTE(MethodTable::GetVtableOffset() < 256); // a rough sanity check
+
+#ifdef MDIL
+ // we could get here when we don't de-virtualize a call-virt to a non-virtual method (while generating MDIL)
+ // however, the when generating MDIL the jit should never ask for slot numbers; instead it should just
+ // emit the proper MDIL symbol and leave it to the binder to resolve it to a method table slot
+ _ASSERTE(!SystemDomain::GetCurrentDomain()->IsMDILCompilationDomain() || isVerifyOnly());
+#endif
+ // better be in the vtable
+ _ASSERTE(method->GetSlot() < method->GetMethodTable()->GetNumVirtuals());
+
+ *pOffsetOfIndirection = MethodTable::GetVtableOffset() + MethodTable::GetIndexOfVtableIndirection(method->GetSlot()) * sizeof(PTR_PCODE);
+ *pOffsetAfterIndirection = MethodTable::GetIndexAfterVtableIndirection(method->GetSlot()) * sizeof(PCODE);
+
+ EE_TO_JIT_TRANSITION_LEAF();
+}
+
+/*********************************************************************/
+void CEEInfo::getFunctionEntryPoint(CORINFO_METHOD_HANDLE ftnHnd,
+ CORINFO_CONST_LOOKUP * pResult,
+ CORINFO_ACCESS_FLAGS accessFlags)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ void* ret = NULL;
+ InfoAccessType accessType = IAT_VALUE;
+
+ JIT_TO_EE_TRANSITION();
+
+ MethodDesc * ftn = GetMethod(ftnHnd);
+
+ // Resolve methodImpl.
+ ftn = ftn->GetMethodTable()->MapMethodDeclToMethodImpl(ftn);
+
+ ret = (void *)ftn->TryGetMultiCallableAddrOfCode(accessFlags);
+
+ // TryGetMultiCallableAddrOfCode returns NULL if indirect access is desired
+ if (ret == NULL)
+ {
+ // should never get here for EnC methods or if interception via remoting stub is required
+ _ASSERTE(!ftn->IsEnCMethod());
+
+ _ASSERTE((accessFlags & CORINFO_ACCESS_THIS) || !ftn->IsRemotingInterceptedViaVirtualDispatch());
+
+ ret = ftn->GetAddrOfSlot();
+ accessType = IAT_PVALUE;
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ _ASSERTE(ret != NULL);
+
+ pResult->accessType = accessType;
+ pResult->addr = ret;
+}
+
+/*********************************************************************/
+void CEEInfo::getFunctionFixedEntryPoint(CORINFO_METHOD_HANDLE ftn,
+ CORINFO_CONST_LOOKUP * pResult)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION();
+
+ MethodDesc * pMD = GetMethod(ftn);
+
+ pResult->accessType = IAT_VALUE;
+ pResult->addr = (void *) pMD->GetMultiCallableAddrOfCode();
+
+ EE_TO_JIT_TRANSITION();
+}
+
+/*********************************************************************/
+const char* CEEInfo::getFieldName (CORINFO_FIELD_HANDLE fieldHnd, const char** scopeName)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ const char* result = NULL;
+
+ JIT_TO_EE_TRANSITION();
+
+ FieldDesc* field = (FieldDesc*) fieldHnd;
+ if (scopeName != 0)
+ {
+ TypeHandle t = TypeHandle(field->GetApproxEnclosingMethodTable());
+ *scopeName = "";
+ if (!t.IsNull())
+ {
+#ifdef _DEBUG
+ t.GetName(ssClsNameBuff);
+ *scopeName = ssClsNameBuff.GetUTF8(ssClsNameBuffScratch);
+#else // !_DEBUG
+ // since this is for diagnostic purposes only,
+ // give up on the namespace, as we don't have a buffer to concat it
+ // also note this won't show array class names.
+ LPCUTF8 nameSpace;
+ *scopeName= t.GetMethodTable()->GetFullyQualifiedNameInfo(&nameSpace);
+#endif // !_DEBUG
+ }
+ }
+
+ result = field->GetName();
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/*********************************************************************/
+// Get the type that declares the field
+CORINFO_CLASS_HANDLE CEEInfo::getFieldClass (CORINFO_FIELD_HANDLE fieldHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CORINFO_CLASS_HANDLE result = NULL;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ FieldDesc* field = (FieldDesc*) fieldHnd;
+ result = CORINFO_CLASS_HANDLE(field->GetApproxEnclosingMethodTable());
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return result;
+}
+
+/*********************************************************************/
+// Returns the basic type of the field (not the the type that declares the field)
+//
+// pTypeHnd - On return, for reference and value types, *pTypeHnd will contain
+// the normalized type of the field.
+// owner - Optional. For resolving in a generic context
+
+CorInfoType CEEInfo::getFieldType (CORINFO_FIELD_HANDLE fieldHnd,
+ CORINFO_CLASS_HANDLE* pTypeHnd,
+ CORINFO_CLASS_HANDLE owner)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CorInfoType result = CORINFO_TYPE_UNDEF;
+
+ JIT_TO_EE_TRANSITION();
+
+ result = getFieldTypeInternal(fieldHnd, pTypeHnd, owner);
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/*********************************************************************/
+CorInfoType CEEInfo::getFieldTypeInternal (CORINFO_FIELD_HANDLE fieldHnd,
+ CORINFO_CLASS_HANDLE* pTypeHnd,
+ CORINFO_CLASS_HANDLE owner)
+{
+ STANDARD_VM_CONTRACT;
+
+ *pTypeHnd = 0;
+
+ TypeHandle clsHnd = TypeHandle();
+ FieldDesc* field = (FieldDesc*) fieldHnd;
+ CorElementType type = field->GetFieldType();
+
+ // <REVISIT_TODO>TODO should not burn the time to do this for anything but Value Classes</REVISIT_TODO>
+ _ASSERTE(type != ELEMENT_TYPE_BYREF);
+
+ // For verifying code involving generics, use the class instantiation
+ // of the optional owner (to provide exact, not representative,
+ // type information)
+ SigTypeContext typeContext(field, (TypeHandle) owner);
+
+ if (!CorTypeInfo::IsPrimitiveType(type))
+ {
+ PCCOR_SIGNATURE sig;
+ DWORD sigCount;
+ CorCallingConvention conv;
+
+ field->GetSig(&sig, &sigCount);
+
+ conv = (CorCallingConvention) CorSigUncompressCallingConv(sig);
+ _ASSERTE(isCallConv(conv, IMAGE_CEE_CS_CALLCONV_FIELD));
+
+ SigPointer ptr(sig, sigCount);
+
+ clsHnd = ptr.GetTypeHandleThrowing(field->GetModule(), &typeContext);
+ _ASSERTE(!clsHnd.IsNull());
+
+ // I believe it doesn't make any diff. if this is GetInternalCorElementType
+ // or GetSignatureCorElementType.
+ type = clsHnd.GetSignatureCorElementType();
+ }
+
+ return CEEInfo::asCorInfoType(type, clsHnd, pTypeHnd);
+}
+
+/*********************************************************************/
+unsigned CEEInfo::getFieldOffset (CORINFO_FIELD_HANDLE fieldHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ unsigned result = (unsigned) -1;
+
+ JIT_TO_EE_TRANSITION();
+
+ FieldDesc* field = (FieldDesc*) fieldHnd;
+
+ // GetOffset() does not include the size of Object
+ result = field->GetOffset();
+
+ // So if it is not a value class, add the Object into it
+ if (field->IsStatic())
+ {
+ Module* pModule = field->GetModule();
+ if (field->IsRVA() && pModule->IsRvaFieldTls(field->GetOffset()))
+ {
+ result = pModule->GetFieldTlsOffset(field->GetOffset());
+ }
+ }
+ else if (!field->GetApproxEnclosingMethodTable()->IsValueType())
+ {
+ result += sizeof(Object);
+ }
+
+#ifdef MDIL
+ if (SystemDomain::GetCurrentDomain()->IsMDILCompilationDomain() && !isVerifyOnly())
+ {
+ result += (GetRandomInt(1) == 0) ? 4 : 8;
+ }
+#endif
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/*********************************************************************/
+bool CEEInfo::isWriteBarrierHelperRequired(CORINFO_FIELD_HANDLE field)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ bool fHelperRequired = false;
+
+ JIT_TO_EE_TRANSITION();
+
+ FieldDesc * pField = (FieldDesc *)field;
+
+ // TODO: jit64 should be switched to the same plan as the i386 jits - use
+ // getClassGClayout to figure out the need for writebarrier helper, and inline the copying.
+ // Once this happens, USE_WRITE_BARRIER_HELPERS and CORINFO_FLG_WRITE_BARRIER_HELPER can be removed.
+ CorElementType type = pField->GetFieldType();
+
+ if(CorTypeInfo::IsObjRef(type))
+ fHelperRequired = true;
+ else if (type == ELEMENT_TYPE_VALUETYPE)
+ {
+ TypeHandle th = pField->GetFieldTypeHandleThrowing();
+ _ASSERTE(!th.IsNull());
+ if(th.GetMethodTable()->ContainsPointers())
+ fHelperRequired = true;
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return fHelperRequired;
+}
+
+/*********************************************************************/
+DWORD CEEInfo::getFieldThreadLocalStoreID(CORINFO_FIELD_HANDLE fieldHnd, void **ppIndirection)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ DWORD result = 0;
+
+ if (ppIndirection != NULL)
+ *ppIndirection = NULL;
+
+ JIT_TO_EE_TRANSITION();
+
+ FieldDesc* field = (FieldDesc*) fieldHnd;
+ Module* module = field->GetModule();
+
+ _ASSERTE(field->IsRVA()); // Only RVA statics can be thread local
+ _ASSERTE(module->IsRvaFieldTls(field->GetOffset()));
+
+ result = module->GetTlsIndex();
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+void *CEEInfo::allocateArray(ULONG cBytes)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ void * result = NULL;
+
+ JIT_TO_EE_TRANSITION();
+
+ result = new BYTE [cBytes];
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+void CEEInfo::freeArray(void *array)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION();
+
+ delete [] ((BYTE*) array);
+
+ EE_TO_JIT_TRANSITION();
+}
+
+void CEEInfo::getBoundaries(CORINFO_METHOD_HANDLE ftn,
+ unsigned int *cILOffsets, DWORD **pILOffsets,
+ ICorDebugInfo::BoundaryTypes *implicitBoundaries)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION();
+
+#ifdef DEBUGGING_SUPPORTED
+ if (g_pDebugInterface && !IsCompilationProcess())
+ {
+ g_pDebugInterface->getBoundaries(GetMethod(ftn), cILOffsets, pILOffsets,
+ implicitBoundaries);
+ }
+ else
+ {
+ *cILOffsets = 0;
+ *pILOffsets = NULL;
+ *implicitBoundaries = ICorDebugInfo::DEFAULT_BOUNDARIES;
+ }
+#endif // DEBUGGING_SUPPORTED
+
+ EE_TO_JIT_TRANSITION();
+}
+
+void CEEInfo::getVars(CORINFO_METHOD_HANDLE ftn, ULONG32 *cVars, ICorDebugInfo::ILVarInfo **vars,
+ bool *extendOthers)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION();
+
+#ifdef DEBUGGING_SUPPORTED
+ if (g_pDebugInterface && !IsCompilationProcess())
+ {
+ g_pDebugInterface->getVars(GetMethod(ftn), cVars, vars, extendOthers);
+ }
+ else
+ {
+ *cVars = 0;
+ *vars = NULL;
+
+ // Just tell the JIT to extend everything.
+ *extendOthers = true;
+ }
+#endif // DEBUGGING_SUPPORTED
+
+ EE_TO_JIT_TRANSITION();
+}
+
+/*********************************************************************/
+CORINFO_ARG_LIST_HANDLE CEEInfo::getArgNext(CORINFO_ARG_LIST_HANDLE args)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CORINFO_ARG_LIST_HANDLE result = NULL;
+
+ JIT_TO_EE_TRANSITION();
+
+ SigPointer ptr((unsigned __int8*) args);
+ IfFailThrow(ptr.SkipExactlyOne());
+
+ result = (CORINFO_ARG_LIST_HANDLE) ptr.GetPtr();
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+
+/*********************************************************************/
+
+CorInfoTypeWithMod CEEInfo::getArgType (
+ CORINFO_SIG_INFO* sig,
+ CORINFO_ARG_LIST_HANDLE args,
+ CORINFO_CLASS_HANDLE* vcTypeRet
+ )
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CorInfoTypeWithMod result = CorInfoTypeWithMod(CORINFO_TYPE_UNDEF);
+
+ JIT_TO_EE_TRANSITION();
+
+ _ASSERTE((BYTE*) sig->pSig <= (BYTE*) sig->args && (BYTE*) args < (BYTE*) sig->pSig + sig->cbSig);
+ _ASSERTE((BYTE*) sig->args <= (BYTE*) args);
+ INDEBUG(*vcTypeRet = CORINFO_CLASS_HANDLE((size_t)INVALID_POINTER_CC));
+
+ SigPointer ptr((unsigned __int8*) args);
+ CorElementType eType;
+ IfFailThrow(ptr.PeekElemType(&eType));
+ while (eType == ELEMENT_TYPE_PINNED)
+ {
+ result = CORINFO_TYPE_MOD_PINNED;
+ IfFailThrow(ptr.GetElemType(NULL));
+ IfFailThrow(ptr.PeekElemType(&eType));
+ }
+
+ // Now read off the "real" element type after taking any instantiations into consideration
+ SigTypeContext typeContext;
+ GetTypeContext(&sig->sigInst,&typeContext);
+
+ Module* pModule = GetModule(sig->scope);
+
+ CorElementType type = ptr.PeekElemTypeClosed(pModule, &typeContext);
+
+ TypeHandle typeHnd = TypeHandle();
+ switch (type) {
+ case ELEMENT_TYPE_VAR :
+ case ELEMENT_TYPE_MVAR :
+ case ELEMENT_TYPE_VALUETYPE :
+ case ELEMENT_TYPE_TYPEDBYREF :
+ case ELEMENT_TYPE_INTERNAL :
+ {
+ typeHnd = ptr.GetTypeHandleThrowing(pModule, &typeContext);
+ _ASSERTE(!typeHnd.IsNull());
+
+ CorElementType normType = typeHnd.GetInternalCorElementType();
+
+ // if we are looking up a value class, don't morph it to a refernece type
+ // (This can only happen in illegal IL
+ if (!CorTypeInfo::IsObjRef(normType) || type != ELEMENT_TYPE_VALUETYPE)
+ {
+ type = normType;
+ }
+ }
+ break;
+
+ case ELEMENT_TYPE_PTR:
+ // Load the type eagerly under debugger to make the eval work
+ if (!isVerifyOnly() && CORDisableJITOptimizations(pModule->GetDebuggerInfoBits()))
+ {
+ // NOTE: in some IJW cases, when the type pointed at is unmanaged,
+ // the GetTypeHandle may fail, because there is no TypeDef for such type.
+ // Usage of GetTypeHandleThrowing would lead to class load exception
+ TypeHandle thPtr = ptr.GetTypeHandleNT(pModule, &typeContext);
+ if(!thPtr.IsNull())
+ {
+ m_pOverride->classMustBeLoadedBeforeCodeIsRun(CORINFO_CLASS_HANDLE(thPtr.AsPtr()));
+ }
+ }
+ break;
+
+ case ELEMENT_TYPE_VOID:
+ // void is not valid in local sigs
+ if (sig->flags & CORINFO_SIGFLAG_IS_LOCAL_SIG)
+ COMPlusThrowHR(COR_E_INVALIDPROGRAM);
+ break;
+
+ case ELEMENT_TYPE_END:
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ break;
+
+ default:
+ break;
+ }
+
+ result = CorInfoTypeWithMod(result | CEEInfo::asCorInfoType(type, typeHnd, vcTypeRet));
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/*********************************************************************/
+
+CORINFO_CLASS_HANDLE CEEInfo::getArgClass (
+ CORINFO_SIG_INFO* sig,
+ CORINFO_ARG_LIST_HANDLE args
+ )
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CORINFO_CLASS_HANDLE result = NULL;
+
+ JIT_TO_EE_TRANSITION();
+
+ // make certain we dont have a completely wacked out sig pointer
+ _ASSERTE((BYTE*) sig->pSig <= (BYTE*) sig->args);
+ _ASSERTE((BYTE*) sig->args <= (BYTE*) args && (BYTE*) args < &((BYTE*) sig->args)[0x10000*5]);
+
+ Module* pModule = GetModule(sig->scope);
+
+ SigPointer ptr((unsigned __int8*) args);
+
+ CorElementType eType;
+ IfFailThrow(ptr.PeekElemType(&eType));
+
+ while (eType == ELEMENT_TYPE_PINNED)
+ {
+ IfFailThrow(ptr.GetElemType(NULL));
+ IfFailThrow(ptr.PeekElemType(&eType));
+ }
+ // Now read off the "real" element type after taking any instantiations into consideration
+ SigTypeContext typeContext;
+ GetTypeContext(&sig->sigInst, &typeContext);
+ CorElementType type = ptr.PeekElemTypeClosed(pModule, &typeContext);
+
+ if (!CorTypeInfo::IsPrimitiveType(type)) {
+ TypeHandle th = ptr.GetTypeHandleThrowing(pModule, &typeContext);
+ result = CORINFO_CLASS_HANDLE(th.AsPtr());
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/*********************************************************************/
+
+CorInfoType CEEInfo::getHFAType(CORINFO_CLASS_HANDLE hClass)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CorInfoType result = CORINFO_TYPE_UNDEF;
+
+#ifdef MDIL
+ if (SystemDomain::GetCurrentDomain()->IsMDILCompilationDomain() && !isVerifyOnly())
+ return result;
+#endif
+
+#ifdef FEATURE_HFA
+ JIT_TO_EE_TRANSITION();
+
+ TypeHandle VMClsHnd(hClass);
+
+ result = asCorInfoType(VMClsHnd.GetHFAType());
+
+ EE_TO_JIT_TRANSITION();
+#endif
+
+ return result;
+}
+
+/*********************************************************************/
+
+ // return the unmanaged calling convention for a PInvoke
+CorInfoUnmanagedCallConv CEEInfo::getUnmanagedCallConv(CORINFO_METHOD_HANDLE method)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CorInfoUnmanagedCallConv result = CORINFO_UNMANAGED_CALLCONV_UNKNOWN;
+
+ JIT_TO_EE_TRANSITION();
+
+ MethodDesc* pMD = NULL;
+ pMD = GetMethod(method);
+ _ASSERTE(pMD->IsNDirect());
+
+#ifdef _TARGET_X86_
+ EX_TRY
+ {
+ PInvokeStaticSigInfo sigInfo(pMD, PInvokeStaticSigInfo::NO_THROW_ON_ERROR);
+
+ switch (sigInfo.GetCallConv()) {
+ case pmCallConvCdecl:
+ result = CORINFO_UNMANAGED_CALLCONV_C;
+ break;
+ case pmCallConvStdcall:
+ result = CORINFO_UNMANAGED_CALLCONV_STDCALL;
+ break;
+ case pmCallConvThiscall:
+ result = CORINFO_UNMANAGED_CALLCONV_THISCALL;
+ break;
+ default:
+ result = CORINFO_UNMANAGED_CALLCONV_UNKNOWN;
+ }
+ }
+ EX_CATCH
+ {
+ result = CORINFO_UNMANAGED_CALLCONV_UNKNOWN;
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+#else // !_TARGET_X86_
+ //
+ // we have only one calling convention
+ //
+ result = CORINFO_UNMANAGED_CALLCONV_STDCALL;
+#endif // !_TARGET_X86_
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/*********************************************************************/
+BOOL NDirectMethodDesc::ComputeMarshalingRequired()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return NDirect::MarshalingRequired(this);
+}
+
+/*********************************************************************/
+BOOL CEEInfo::pInvokeMarshalingRequired(CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* callSiteSig)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+#ifdef MDIL
+ _ASSERTE(!SystemDomain::GetCurrentDomain()->IsMDILCompilationDomain() || isVerifyOnly());
+#endif
+
+ BOOL result = FALSE;
+
+ JIT_TO_EE_TRANSITION();
+
+ if (method != 0)
+ {
+ MethodDesc* ftn = GetMethod(method);
+ _ASSERTE(ftn->IsNDirect());
+ NDirectMethodDesc *pMD = (NDirectMethodDesc*)ftn;
+
+#if defined(HAS_NDIRECT_IMPORT_PRECODE)
+ if (pMD->IsVarArg())
+ {
+ // Varag P/Invoke must not be inlined because its NDirectMethodDesc
+ // does not contain a meaningful stack size (it is call site specific).
+ // See code:InlinedCallFrame.UpdateRegDisplay where this is needed.
+ result = TRUE;
+ }
+ else if (pMD->MarshalingRequired())
+ {
+ // This is not a no-marshal signature.
+ result = TRUE;
+ }
+ else
+ {
+ // This is a no-marshal non-vararg signature.
+ result = FALSE;
+ }
+#else
+ // Marshalling is required to lazy initialize the indirection cell
+ // without NDirectImportPrecode.
+ result = TRUE;
+#endif
+ }
+ else
+ {
+ // check the call site signature
+ result = NDirect::MarshalingRequired(
+ GetMethod(method),
+ callSiteSig->pSig,
+ GetModule(callSiteSig->scope));
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/*********************************************************************/
+// Generate a cookie based on the signature that would needs to be passed
+// to CORINFO_HELP_PINVOKE_CALLI
+LPVOID CEEInfo::GetCookieForPInvokeCalliSig(CORINFO_SIG_INFO* szMetaSig,
+ void **ppIndirection)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return getVarArgsHandle(szMetaSig, ppIndirection);
+}
+
+bool CEEInfo::canGetCookieForPInvokeCalliSig(CORINFO_SIG_INFO* szMetaSig)
+{
+ LIMITED_METHOD_CONTRACT;
+ return true;
+}
+
+
+// Check any constraints on method type arguments
+BOOL CEEInfo::satisfiesMethodConstraints(
+ CORINFO_CLASS_HANDLE parent,
+ CORINFO_METHOD_HANDLE method)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ BOOL result = FALSE;
+
+ JIT_TO_EE_TRANSITION();
+
+ _ASSERTE(parent != NULL);
+ _ASSERTE(method != NULL);
+ result = GetMethod(method)->SatisfiesMethodConstraints(TypeHandle(parent));
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+
+
+/*********************************************************************/
+// Given a delegate target class, a target method parent class, a target method,
+// a delegate class, check if the method signature is compatible with the Invoke method of the delegate
+// (under the typical instantiation of any free type variables in the memberref signatures).
+//
+// objCls should be NULL if the target object is NULL
+//@GENERICSVER: new (suitable for generics)
+BOOL CEEInfo::isCompatibleDelegate(
+ CORINFO_CLASS_HANDLE objCls,
+ CORINFO_CLASS_HANDLE methodParentCls,
+ CORINFO_METHOD_HANDLE method,
+ CORINFO_CLASS_HANDLE delegateCls,
+ BOOL* pfIsOpenDelegate)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ BOOL result = FALSE;
+
+ JIT_TO_EE_TRANSITION();
+
+ _ASSERTE(method != NULL);
+ _ASSERTE(delegateCls != NULL);
+
+ TypeHandle delegateClsHnd = (TypeHandle) delegateCls;
+
+ _ASSERTE(delegateClsHnd.GetMethodTable()->IsDelegate());
+
+ TypeHandle methodParentHnd = (TypeHandle) (methodParentCls);
+ MethodDesc* pMDFtn = GetMethod(method);
+ TypeHandle objClsHnd(objCls);
+
+ EX_TRY
+ {
+ result = COMDelegate::ValidateCtor(objClsHnd, methodParentHnd, pMDFtn, delegateClsHnd, pfIsOpenDelegate);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+// Determines whether the delegate creation obeys security transparency rules
+BOOL CEEInfo::isDelegateCreationAllowed (
+ CORINFO_CLASS_HANDLE delegateHnd,
+ CORINFO_METHOD_HANDLE calleeHnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ BOOL isCallAllowed = FALSE;
+
+ JIT_TO_EE_TRANSITION();
+
+ TypeHandle delegateType(delegateHnd);
+ MethodDesc* pCallee = GetMethod(calleeHnd);
+
+ isCallAllowed = COMDelegate::ValidateSecurityTransparency(pCallee, delegateType.AsMethodTable());
+
+ EE_TO_JIT_TRANSITION();
+
+ return isCallAllowed;
+}
+
+/*********************************************************************/
+ // return the unmanaged target *if method has already been prelinked.*
+void* CEEInfo::getPInvokeUnmanagedTarget(CORINFO_METHOD_HANDLE method,
+ void **ppIndirection)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ void* result = NULL;
+
+ if (ppIndirection != NULL)
+ *ppIndirection = NULL;
+
+#ifndef CROSSGEN_COMPILE
+ JIT_TO_EE_TRANSITION();
+
+ MethodDesc* ftn = GetMethod(method);
+ _ASSERTE(ftn->IsNDirect());
+ NDirectMethodDesc *pMD = (NDirectMethodDesc*)ftn;
+
+ if (pMD->NDirectTargetIsImportThunk())
+ {
+#ifdef FEATURE_MIXEDMODE // IJW
+ if (pMD->IsEarlyBound()
+#ifdef FEATURE_MULTICOREJIT
+ // Bug 126723: Calling ClassInit in multicore JIT background thread, return NULL
+ // When multicore JIT is enabled (StartProfile called), calling managed code is not allowed in the background thread
+ && GetAppDomain()->GetMulticoreJitManager().AllowCCtorsToRunDuringJITing()
+#endif
+ )
+ {
+ EX_TRY
+ {
+ pMD->InitEarlyBoundNDirectTarget();
+ _ASSERTE(!pMD->NDirectTargetIsImportThunk());
+ result = pMD->GetNDirectTarget();
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+ }
+#endif // FEATURE_MIXEDMODE
+ }
+ else
+ {
+ result = pMD->GetNDirectTarget();
+ }
+
+ EE_TO_JIT_TRANSITION();
+#endif // CROSSGEN_COMPILE
+
+ return result;
+}
+
+/*********************************************************************/
+ // return address of fixup area for late-bound N/Direct calls.
+void* CEEInfo::getAddressOfPInvokeFixup(CORINFO_METHOD_HANDLE method,
+ void **ppIndirection)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ void * result = NULL;
+
+ if (ppIndirection != NULL)
+ *ppIndirection = NULL;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ MethodDesc* ftn = GetMethod(method);
+ _ASSERTE(ftn->IsNDirect());
+ NDirectMethodDesc *pMD = (NDirectMethodDesc*)ftn;
+
+ result = (LPVOID)&(pMD->GetWriteableData()->m_pNDirectTarget);
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return result;
+}
+
+
+/*********************************************************************/
+CORINFO_JUST_MY_CODE_HANDLE CEEInfo::getJustMyCodeHandle(
+ CORINFO_METHOD_HANDLE method,
+ CORINFO_JUST_MY_CODE_HANDLE**ppIndirection)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ CORINFO_JUST_MY_CODE_HANDLE result = NULL;
+
+ if (ppIndirection)
+ *ppIndirection = NULL;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ // Get the flag from the debugger.
+ MethodDesc* ftn = GetMethod(method);
+ DWORD * pFlagAddr = NULL;
+
+ if (g_pDebugInterface)
+ {
+ pFlagAddr = g_pDebugInterface->GetJMCFlagAddr(ftn->GetModule());
+ }
+
+ result = (CORINFO_JUST_MY_CODE_HANDLE) pFlagAddr;
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return result;
+}
+
+/*********************************************************************/
+void InlinedCallFrame::GetEEInfo(CORINFO_EE_INFO::InlinedCallFrameInfo *pInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ pInfo->size = sizeof(GSCookie) + sizeof(InlinedCallFrame);
+
+ pInfo->offsetOfGSCookie = 0;
+ pInfo->offsetOfFrameVptr = sizeof(GSCookie);
+ pInfo->offsetOfFrameLink = sizeof(GSCookie) + Frame::GetOffsetOfNextLink();
+ pInfo->offsetOfCallSiteSP = sizeof(GSCookie) + offsetof(InlinedCallFrame, m_pCallSiteSP);
+ pInfo->offsetOfCalleeSavedFP = sizeof(GSCookie) + offsetof(InlinedCallFrame, m_pCalleeSavedFP);
+ pInfo->offsetOfCallTarget = sizeof(GSCookie) + offsetof(InlinedCallFrame, m_Datum);
+ pInfo->offsetOfReturnAddress = sizeof(GSCookie) + offsetof(InlinedCallFrame, m_pCallerReturnAddress);
+}
+
+/*********************************************************************/
+// Return details about EE internal data structures
+void CEEInfo::getEEInfo(CORINFO_EE_INFO *pEEInfoOut)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ INDEBUG(memset(pEEInfoOut, 0xCC, sizeof(*pEEInfoOut)));
+
+ JIT_TO_EE_TRANSITION();
+
+ InlinedCallFrame::GetEEInfo(&pEEInfoOut->inlinedCallFrameInfo);
+
+ // Offsets into the Thread structure
+ pEEInfoOut->offsetOfThreadFrame = Thread::GetOffsetOfCurrentFrame();
+ pEEInfoOut->offsetOfGCState = Thread::GetOffsetOfGCFlag();
+
+ // Delegate offsets
+ pEEInfoOut->offsetOfDelegateInstance = DelegateObject::GetOffsetOfTarget();
+ pEEInfoOut->offsetOfDelegateFirstTarget = DelegateObject::GetOffsetOfMethodPtr();
+
+ // Remoting offsets
+ pEEInfoOut->offsetOfTransparentProxyRP = TransparentProxyObject::GetOffsetOfRP();
+ pEEInfoOut->offsetOfRealProxyServer = RealProxyObject::GetOffsetOfServerObject();
+
+ pEEInfoOut->offsetOfObjArrayData = (DWORD)PtrArray::GetDataOffset();
+
+ OSVERSIONINFO sVerInfo;
+ sVerInfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
+ GetOSVersion(&sVerInfo);
+
+ pEEInfoOut->osType = CORINFO_WINNT;
+
+ pEEInfoOut->osMajor = sVerInfo.dwMajorVersion;
+ pEEInfoOut->osMinor = sVerInfo.dwMinorVersion;
+ pEEInfoOut->osBuild = sVerInfo.dwBuildNumber;
+
+ EE_TO_JIT_TRANSITION();
+}
+
+LPCWSTR CEEInfo::getJitTimeLogFilename()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ LPCWSTR result = NULL;
+
+ JIT_TO_EE_TRANSITION();
+ result = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitTimeLogFile);
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+
+
+ // Return details about EE internal data structures
+DWORD CEEInfo::getThreadTLSIndex(void **ppIndirection)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ DWORD result = (DWORD)-1;
+
+ if (ppIndirection != NULL)
+ *ppIndirection = NULL;
+
+ JIT_TO_EE_TRANSITION();
+
+#if !defined(CROSSGEN_COMPILE) && !defined(FEATURE_IMPLICIT_TLS)
+ result = GetThreadTLSIndex();
+
+ // The JIT can use the optimized TLS access only if the runtime is using it as well.
+ // (This is necessaryto make managed code work well under appverifier.)
+ if (GetTLSAccessMode(result) == TLSACCESS_GENERIC)
+ result = (DWORD)-1;
+#endif
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+const void * CEEInfo::getInlinedCallFrameVptr(void **ppIndirection)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ void * result = NULL;
+
+ if (ppIndirection != NULL)
+ *ppIndirection = NULL;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+#ifndef CROSSGEN_COMPILE
+ result = (void*)InlinedCallFrame::GetMethodFrameVPtr();
+#else
+ result = (void*)0x43210;
+#endif
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return result;
+}
+
+
+SIZE_T * CEEInfo::getAddrModuleDomainID(CORINFO_MODULE_HANDLE module)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ SIZE_T * result = NULL;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ Module* pModule = GetModule(module);
+
+ result = pModule->GetAddrModuleID();
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return result;
+}
+
+LONG * CEEInfo::getAddrOfCaptureThreadGlobal(void **ppIndirection)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ LONG * result = NULL;
+
+ if (ppIndirection != NULL)
+ *ppIndirection = NULL;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ result = (LONG *)&g_TrapReturningThreads;
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return result;
+}
+
+
+
+HRESULT CEEInfo::GetErrorHRESULT(struct _EXCEPTION_POINTERS *pExceptionPointers)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ //This function is called from the JIT64 exception filter during PEVerify. Because it is a filter, it
+ //can be "called" from a NOTHROW region in the case of StackOverflow. Security::MapToHR throws
+ //internally, but it catches all exceptions. Therefore, none of the children can cause an exception to
+ //percolate out of this function (except for Stack Overflow). Obviously I can't explain most of this to
+ //the Contracts system, and I can't add this CONTRACT_VIOLATION to the filter in Jit64.
+ CONTRACT_VIOLATION(ThrowsViolation);
+
+ JIT_TO_EE_TRANSITION();
+
+ GCX_COOP();
+
+ OBJECTREF throwable = GetThread()->LastThrownObject();
+ hr = GetExceptionHResult(throwable);
+
+ EE_TO_JIT_TRANSITION();
+
+ return hr;
+}
+
+
+ULONG CEEInfo::GetErrorMessage(__inout_ecount(bufferLength) LPWSTR buffer, ULONG bufferLength)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ ULONG result = 0;
+
+#ifndef CROSSGEN_COMPILE
+ JIT_TO_EE_TRANSITION();
+
+ GCX_COOP();
+
+ OBJECTREF throwable = GetThread()->LastThrownObject();
+
+ if (throwable != NULL)
+ {
+ EX_TRY
+ {
+ result = GetExceptionMessage(throwable, buffer, bufferLength);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+ }
+
+ EE_TO_JIT_TRANSITION();
+#endif
+
+ return result;
+}
+
+// This method is called from CEEInfo::FilterException which
+// is run as part of the SEH filter clause for the JIT.
+// It is fatal to throw an exception while running a SEH filter clause
+// so our contract is NOTHROW, NOTRIGGER.
+//
+int CEEInfo::FilterException(struct _EXCEPTION_POINTERS *pExceptionPointers)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ int result = 0;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ VALIDATE_BACKOUT_STACK_CONSUMPTION;
+
+ unsigned code = pExceptionPointers->ExceptionRecord->ExceptionCode;
+
+#ifdef _DEBUG
+ if (code == EXCEPTION_ACCESS_VIOLATION)
+ {
+ static int hit = 0;
+ if (hit++ == 0)
+ {
+ _ASSERTE(!"Access violation while Jitting!");
+ // If you set the debugger to catch access violations and 'go'
+ // you will get back to the point at which the access violation occured
+ result = EXCEPTION_CONTINUE_EXECUTION;
+ }
+ else
+ {
+ result = EXCEPTION_CONTINUE_SEARCH;
+ }
+ }
+ else
+#endif // _DEBUG
+ // No one should be catching breakpoint
+ // Similarly the JIT doesn't know how to reset the guard page, so it shouldn't
+ // be catching a hard stack overflow
+ if (code == EXCEPTION_BREAKPOINT || code == EXCEPTION_SINGLE_STEP || code == EXCEPTION_STACK_OVERFLOW)
+ {
+ result = EXCEPTION_CONTINUE_SEARCH;
+ }
+#ifdef CROSSGEN_COMPILE
+ else
+ {
+ result = EXCEPTION_EXECUTE_HANDLER;
+ }
+#else
+ else if (!IsComPlusException(pExceptionPointers->ExceptionRecord))
+ {
+ result = EXCEPTION_EXECUTE_HANDLER;
+ }
+ else
+ {
+ GCX_COOP();
+
+ // This is actually the LastThrown exception object.
+ OBJECTREF throwable = CLRException::GetThrowableFromExceptionRecord(pExceptionPointers->ExceptionRecord);
+
+ if (throwable != NULL)
+ {
+ struct
+ {
+ OBJECTREF oLastThrownObject;
+ } _gc;
+
+ ZeroMemory(&_gc, sizeof(_gc));
+
+ // Setup the throwables
+ _gc.oLastThrownObject = throwable;
+
+ GCPROTECT_BEGIN(_gc);
+
+ // Don't catch ThreadAbort and other uncatchable exceptions
+ if (IsUncatchable(&_gc.oLastThrownObject))
+ result = EXCEPTION_CONTINUE_SEARCH;
+ else
+ result = EXCEPTION_EXECUTE_HANDLER;
+
+ GCPROTECT_END();
+ }
+ }
+#endif
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return result;
+}
+
+// This code is called if FilterException chose to handle the exception.
+void CEEInfo::HandleException(struct _EXCEPTION_POINTERS *pExceptionPointers)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+#ifndef CROSSGEN_COMPILE
+ if (IsComPlusException(pExceptionPointers->ExceptionRecord))
+ {
+ GCX_COOP();
+
+ // This is actually the LastThrown exception object.
+ OBJECTREF throwable = CLRException::GetThrowableFromExceptionRecord(pExceptionPointers->ExceptionRecord);
+
+ if (throwable != NULL)
+ {
+ struct
+ {
+ OBJECTREF oLastThrownObject;
+ OBJECTREF oCurrentThrowable;
+ } _gc;
+
+ ZeroMemory(&_gc, sizeof(_gc));
+
+ PTR_Thread pCurThread = GetThread();
+
+ // Setup the throwables
+ _gc.oLastThrownObject = throwable;
+
+ // This will be NULL if no managed exception is active. Otherwise,
+ // it will reference the active throwable.
+ _gc.oCurrentThrowable = pCurThread->GetThrowable();
+
+ GCPROTECT_BEGIN(_gc);
+
+ // JIT does not use or reference managed exceptions at all and simply swallows them,
+ // or lets them fly through so that they will either get caught in managed code, the VM
+ // or will go unhandled.
+ //
+ // Blind swallowing of managed exceptions can break the semantic of "which exception handler"
+ // gets to process the managed exception first. The expected handler is managed code exception
+ // handler (e.g. COMPlusFrameHandler on x86 and ProcessCLRException on 64bit) which will setup
+ // the exception tracker for the exception that will enable the expected sync between the
+ // LastThrownObject (LTO), setup in RaiseTheExceptionInternalOnly, and the exception tracker.
+ //
+ // However, JIT can break this by swallowing the managed exception before managed code exception
+ // handler gets a chance to setup an exception tracker for it. Since there is no cleanup
+ // done for the swallowed exception as part of the unwind (because no exception tracker may have been setup),
+ // we need to reset the LTO, if it is out of sync from the active throwable.
+ //
+ // Hence, check if the LastThrownObject and active-exception throwable are in sync or not.
+ // If not, bring them in sync.
+ //
+ // Example
+ // -------
+ // It is possible that an exception was already in progress and while processing it (e.g.
+ // invoking finally block), we invoked JIT that had another managed exception @ JIT-EE transition boundary
+ // that is swallowed by the JIT before managed code exception handler sees it. This breaks the sync between
+ // LTO and the active exception in the exception tracker.
+ if (_gc.oCurrentThrowable != _gc.oLastThrownObject)
+ {
+ // Update the LTO.
+ //
+ // Note: Incase of OOM, this will get set to OOM instance.
+ pCurThread->SafeSetLastThrownObject(_gc.oCurrentThrowable);
+ }
+
+ GCPROTECT_END();
+ }
+ }
+#endif
+
+ EE_TO_JIT_TRANSITION_LEAF();
+}
+
+void ThrowExceptionForJit(HRESULT res);
+
+void CEEInfo::ThrowExceptionForJitResult(
+ HRESULT result)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION();
+
+ if (!SUCCEEDED(result))
+ ThrowExceptionForJit(result);
+
+ EE_TO_JIT_TRANSITION();
+}
+
+
+CORINFO_MODULE_HANDLE CEEInfo::embedModuleHandle(CORINFO_MODULE_HANDLE handle,
+ void **ppIndirection)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ PRECONDITION(!IsDynamicScope(handle));
+ }
+ CONTRACTL_END;
+
+ if (ppIndirection != NULL)
+ *ppIndirection = NULL;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return handle;
+}
+
+CORINFO_CLASS_HANDLE CEEInfo::embedClassHandle(CORINFO_CLASS_HANDLE handle,
+ void **ppIndirection)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ if (ppIndirection != NULL)
+ *ppIndirection = NULL;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return handle;
+}
+
+CORINFO_FIELD_HANDLE CEEInfo::embedFieldHandle(CORINFO_FIELD_HANDLE handle,
+ void **ppIndirection)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ if (ppIndirection != NULL)
+ *ppIndirection = NULL;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return handle;
+}
+
+CORINFO_METHOD_HANDLE CEEInfo::embedMethodHandle(CORINFO_METHOD_HANDLE handle,
+ void **ppIndirection)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ if (ppIndirection != NULL)
+ *ppIndirection = NULL;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return handle;
+}
+
+/*********************************************************************/
+IEEMemoryManager* CEEInfo::getMemoryManager()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ IEEMemoryManager* result = NULL;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ result = GetEEMemoryManager();
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return result;
+}
+
+/*********************************************************************/
+int CEEInfo::doAssert(const char* szFile, int iLine, const char* szExpr)
+{
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_PREEMPTIVE;
+ STATIC_CONTRACT_DEBUG_ONLY;
+
+ int result = 0;
+
+ JIT_TO_EE_TRANSITION();
+
+#ifdef CROSSGEN_COMPILE
+ ThrowHR(COR_E_INVALIDPROGRAM);
+#else
+
+#ifdef _DEBUG
+ BEGIN_DEBUG_ONLY_CODE;
+ result = _DbgBreakCheck(szFile, iLine, szExpr);
+ END_DEBUG_ONLY_CODE;
+#else // !_DEBUG
+ result = 1; // break into debugger
+#endif // !_DEBUG
+
+#endif
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+void CEEInfo::reportFatalError(CorJitResult result)
+{
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_PREEMPTIVE;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ STRESS_LOG2(LF_JIT,LL_ERROR, "Jit reported error 0x%x while compiling 0x%p\n",
+ (int)result, (INT_PTR)getMethodBeingCompiled());
+
+ EE_TO_JIT_TRANSITION_LEAF();
+}
+
+BOOL CEEInfo::logMsg(unsigned level, const char* fmt, va_list args)
+{
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_PREEMPTIVE;
+ STATIC_CONTRACT_DEBUG_ONLY;
+
+ BOOL result = FALSE;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+#ifdef LOGGING
+ if (LoggingOn(LF_JIT, level))
+ {
+ LogSpewValist(LF_JIT, level, (char*) fmt, args);
+ result = TRUE;
+ }
+#endif // LOGGING
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return result;
+}
+
+void CEEInfo::yieldExecution()
+{
+ WRAPPER_NO_CONTRACT;
+ // DDR: 17066 - Performance degrade
+ // The JIT should not give up it's time slice when we are not hosted
+ if (CLRTaskHosted())
+ {
+ // SwitchToTask forces the current thread to give up quantum, while a host can decide what
+ // to do with Sleep if the current thread has not run out of quantum yet.
+ ClrSleepEx(0, FALSE);
+ }
+}
+
+
+#ifndef CROSSGEN_COMPILE
+
+/*********************************************************************/
+
+void* CEEJitInfo::getHelperFtn(CorInfoHelpFunc ftnNum, /* IN */
+ void ** ppIndirection) /* OUT */
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ void* result = NULL;
+
+ if (ppIndirection != NULL)
+ *ppIndirection = NULL;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ _ASSERTE(ftnNum < CORINFO_HELP_COUNT);
+
+ void* pfnHelper = hlpFuncTable[ftnNum].pfnHelper;
+
+ size_t dynamicFtnNum = ((size_t)pfnHelper - 1);
+ if (dynamicFtnNum < DYNAMIC_CORINFO_HELP_COUNT)
+ {
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:26001) // "Bounds checked above using the underflow trick"
+#endif /*_PREFAST_ */
+
+#if defined(_TARGET_AMD64_)
+ // Always call profiler helpers indirectly to avoid going through jump stubs.
+ // Jumps stubs corrupt RAX that has to be preserved for profiler probes.
+ if (dynamicFtnNum == DYNAMIC_CORINFO_HELP_PROF_FCN_ENTER ||
+ dynamicFtnNum == DYNAMIC_CORINFO_HELP_PROF_FCN_LEAVE ||
+ dynamicFtnNum == DYNAMIC_CORINFO_HELP_PROF_FCN_TAILCALL)
+ {
+ _ASSERTE(ppIndirection != NULL);
+ *ppIndirection = &hlpDynamicFuncTable[dynamicFtnNum].pfnHelper;
+ return NULL;
+ }
+#endif
+
+#if defined(ENABLE_FAST_GCPOLL_HELPER)
+ //always call this indirectly so that we can swap GC Poll helpers.
+ if (dynamicFtnNum == DYNAMIC_CORINFO_HELP_POLL_GC)
+ {
+ _ASSERTE(ppIndirection != NULL);
+ *ppIndirection = &hlpDynamicFuncTable[dynamicFtnNum].pfnHelper;
+ return NULL;
+ }
+#endif
+
+ pfnHelper = hlpDynamicFuncTable[dynamicFtnNum].pfnHelper;
+
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif /*_PREFAST_*/
+ }
+
+ _ASSERTE(pfnHelper);
+
+ result = (LPVOID)GetEEFuncEntryPoint(pfnHelper);
+
+ EE_TO_JIT_TRANSITION_LEAF();
+
+ return result;
+}
+
+PCODE CEEJitInfo::getHelperFtnStatic(CorInfoHelpFunc ftnNum)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ void* pfnHelper = hlpFuncTable[ftnNum].pfnHelper;
+
+ // If pfnHelper is an index into the dynamic helper table, it should be less
+ // than DYNAMIC_CORINFO_HELP_COUNT. In this case we need to find the actual pfnHelper
+ // using an extra indirection. Note the special case
+ // where pfnHelper==0 where pfnHelper-1 will underflow and we will avoid the indirection.
+ if (((size_t)pfnHelper - 1) < DYNAMIC_CORINFO_HELP_COUNT)
+ {
+ pfnHelper = hlpDynamicFuncTable[((size_t)pfnHelper - 1)].pfnHelper;
+ }
+
+ _ASSERTE(pfnHelper != NULL);
+
+ return GetEEFuncEntryPoint(pfnHelper);
+}
+
+void CEEJitInfo::addActiveDependency(CORINFO_MODULE_HANDLE moduleFrom,CORINFO_MODULE_HANDLE moduleTo)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(moduleFrom));
+ PRECONDITION(!IsDynamicScope(moduleFrom));
+ PRECONDITION(CheckPointer(moduleTo));
+ PRECONDITION(!IsDynamicScope(moduleTo));
+ PRECONDITION(moduleFrom != moduleTo);
+ }
+ CONTRACTL_END;
+
+ // This is only called internaly. JIT-EE transition is not needed.
+ // JIT_TO_EE_TRANSITION();
+
+ Module *dependency = (Module *)moduleTo;
+ _ASSERTE(!dependency->IsSystem());
+
+ if (m_pMethodBeingCompiled->IsLCGMethod())
+ {
+ // The context module of the m_pMethodBeingCompiled is irrelevant. Rather than tracking
+ // the dependency, we just do immediate activation.
+ dependency->EnsureActive();
+ }
+ else
+ {
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ Module *context = (Module *)moduleFrom;
+
+ // Record active dependency for loader.
+ context->AddActiveDependency(dependency, FALSE);
+#else
+ dependency->EnsureActive();
+#endif
+ }
+
+ // EE_TO_JIT_TRANSITION();
+}
+
+
+// Wrapper around CEEInfo::GetProfilingHandle. The first time this is called for a
+// method desc, it calls through to EEToProfInterfaceImpl::EEFunctionIDMappe and caches the
+// result in CEEJitInfo::GetProfilingHandleCache. Thereafter, this wrapper regurgitates the cached values
+// rather than calling into CEEInfo::GetProfilingHandle each time. This avoids
+// making duplicate calls into the profiler's FunctionIDMapper callback.
+void CEEJitInfo::GetProfilingHandle(BOOL *pbHookFunction,
+ void **pProfilerHandle,
+ BOOL *pbIndirectedHandles)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ _ASSERTE(pbHookFunction != NULL);
+ _ASSERTE(pProfilerHandle != NULL);
+ _ASSERTE(pbIndirectedHandles != NULL);
+
+ if (!m_gphCache.m_bGphIsCacheValid)
+ {
+#ifdef PROFILING_SUPPORTED
+ JIT_TO_EE_TRANSITION();
+
+ // Cache not filled in, so make our first and only call to CEEInfo::GetProfilingHandle here
+
+ // methods with no metadata behind cannot be exposed to tools expecting metadata (profiler, debugger...)
+ // they shouldnever come here as they are called out in GetCompileFlag
+ _ASSERTE(!m_pMethodBeingCompiled->IsNoMetadata());
+
+ // We pass in the typical method definition to the function mapper because in
+ // Whidbey all the profiling API transactions are done in terms of typical
+ // method definitions not instantiations.
+ BOOL bHookFunction = TRUE;
+ void * profilerHandle = m_pMethodBeingCompiled;
+
+ {
+ BEGIN_PIN_PROFILER(CORProfilerFunctionIDMapperEnabled());
+ profilerHandle = (void *)g_profControlBlock.pProfInterface->EEFunctionIDMapper((FunctionID) m_pMethodBeingCompiled, &bHookFunction);
+ END_PIN_PROFILER();
+ }
+
+ m_gphCache.m_pvGphProfilerHandle = profilerHandle;
+ m_gphCache.m_bGphHookFunction = (bHookFunction != FALSE);
+ m_gphCache.m_bGphIsCacheValid = true;
+
+ EE_TO_JIT_TRANSITION();
+#endif //PROFILING_SUPPORTED
+ }
+
+ // Our cache of these values are bitfield bools, but the interface requires
+ // BOOL. So to avoid setting aside a staging area on the stack for these
+ // values, we filled them in directly in the if (not cached yet) case.
+ *pbHookFunction = (m_gphCache.m_bGphHookFunction != false);
+
+ // At this point, the remaining values must be in the cache by now, so use them
+ *pProfilerHandle = m_gphCache.m_pvGphProfilerHandle;
+
+ //
+ // This is the JIT case, which is never indirected.
+ //
+ *pbIndirectedHandles = FALSE;
+}
+
+/*********************************************************************/
+void CEEJitInfo::BackoutJitData(EEJitManager * jitMgr)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ CodeHeader* pCodeHeader = GetCodeHeader();
+ if (pCodeHeader)
+ jitMgr->RemoveJitData(pCodeHeader, m_GCinfo_len, m_EHinfo_len);
+}
+
+/*********************************************************************/
+// Route jit information to the Jit Debug store.
+void CEEJitInfo::setBoundaries(CORINFO_METHOD_HANDLE ftn, ULONG32 cMap,
+ ICorDebugInfo::OffsetMapping *pMap)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION();
+
+ // We receive ownership of the array
+ _ASSERTE(m_pOffsetMapping == NULL && m_iOffsetMapping == 0);
+ m_iOffsetMapping = cMap;
+ m_pOffsetMapping = pMap;
+
+ EE_TO_JIT_TRANSITION();
+}
+
+void CEEJitInfo::setVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars, ICorDebugInfo::NativeVarInfo *vars)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION();
+
+ // We receive ownership of the array
+ _ASSERTE(m_pNativeVarInfo == NULL && m_iNativeVarInfo == 0);
+ m_iNativeVarInfo = cVars;
+ m_pNativeVarInfo = vars;
+
+ EE_TO_JIT_TRANSITION();
+}
+
+void CEEJitInfo::CompressDebugInfo()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ // Don't track JIT info for DynamicMethods.
+ if (m_pMethodBeingCompiled->IsDynamicMethod())
+ return;
+
+ if (m_iOffsetMapping == 0 && m_iNativeVarInfo == 0)
+ return;
+
+ JIT_TO_EE_TRANSITION();
+
+ EX_TRY
+ {
+ PTR_BYTE pDebugInfo = CompressDebugInfo::CompressBoundariesAndVars(
+ m_pOffsetMapping, m_iOffsetMapping,
+ m_pNativeVarInfo, m_iNativeVarInfo,
+ NULL,
+ m_pMethodBeingCompiled->GetLoaderAllocator()->GetLowFrequencyHeap());
+
+ GetCodeHeader()->SetDebugInfo(pDebugInfo);
+ }
+ EX_CATCH
+ {
+ // Just ignore exceptions here. The debugger's structures will still be in a consistent state.
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ EE_TO_JIT_TRANSITION();
+}
+
+// Reserve memory for the method/funclet's unwind information.
+// Note that this must be called before allocMem. It should be
+// called once for the main method, once for every funclet, and
+// once for every block of cold code for which allocUnwindInfo
+// will be called.
+//
+// This is necessary because jitted code must allocate all the
+// memory needed for the unwindInfo at the allocMem call.
+// For prejitted code we split up the unwinding information into
+// separate sections .rdata and .pdata.
+//
+void CEEJitInfo::reserveUnwindInfo(BOOL isFunclet, BOOL isColdCode, ULONG unwindSize)
+{
+#ifdef WIN64EXCEPTIONS
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ CONSISTENCY_CHECK_MSG(!isColdCode, "Hot/Cold splitting is not supported in jitted code");
+ _ASSERTE_MSG(m_theUnwindBlock == NULL,
+ "reserveUnwindInfo() can only be called before allocMem(), but allocMem() has already been called. "
+ "This may indicate the JIT has hit a NO_WAY assert after calling allocMem(), and is re-JITting. "
+ "Set COMPLUS_JitBreakOnBadCode=1 and rerun to get the real error.");
+
+ ULONG currentSize = unwindSize;
+
+#if defined(_TARGET_AMD64_)
+ // Add space for personality routine, it must be 4-byte aligned.
+ // Everything in the UNWIND_INFO up to the variable-sized UnwindCodes
+ // array has already had its size included in unwindSize by the caller.
+ currentSize += sizeof(ULONG);
+
+ // Note that the count of unwind codes (2 bytes each) is stored as a UBYTE
+ // So the largest size could be 510 bytes, plus the header and language
+ // specific stuff. This can't overflow.
+
+ _ASSERTE(FitsInU4(currentSize + sizeof(ULONG)));
+ currentSize = (ULONG)(ALIGN_UP(currentSize, sizeof(ULONG)));
+#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+ // The JIT passes in a 4-byte aligned block of unwind data.
+ _ASSERTE(IS_ALIGNED(currentSize, sizeof(ULONG)));
+
+ // Add space for personality routine, it must be 4-byte aligned.
+ currentSize += sizeof(ULONG);
+#else
+ PORTABILITY_ASSERT("CEEJitInfo::reserveUnwindInfo");
+#endif // !defined(_TARGET_AMD64_)
+
+ m_totalUnwindSize += currentSize;
+
+ m_totalUnwindInfos++;
+
+ EE_TO_JIT_TRANSITION_LEAF();
+#else // WIN64EXCEPTIONS
+ LIMITED_METHOD_CONTRACT;
+ // Dummy implementation to make cross-platform altjit work
+#endif // WIN64EXCEPTIONS
+}
+
+// Allocate and initialize the .rdata and .pdata for this method or
+// funclet and get the block of memory needed for the machine specific
+// unwind information (the info for crawling the stack frame).
+// Note that allocMem must be called first.
+//
+// The pHotCode parameter points at the first byte of the code of the method
+// The startOffset and endOffset are the region (main or funclet) that
+// we are to allocate and create .rdata and .pdata for.
+// The pUnwindBlock is copied and contains the .pdata unwind area
+//
+// Parameters:
+//
+// pHotCode main method code buffer, always filled in
+// pColdCode always NULL for jitted code
+// startOffset start of code block, relative to pHotCode
+// endOffset end of code block, relative to pHotCode
+// unwindSize size of unwind info pointed to by pUnwindBlock
+// pUnwindBlock pointer to unwind info
+// funcKind type of funclet (main method code, handler, filter)
+//
+void CEEJitInfo::allocUnwindInfo (
+ BYTE * pHotCode, /* IN */
+ BYTE * pColdCode, /* IN */
+ ULONG startOffset, /* IN */
+ ULONG endOffset, /* IN */
+ ULONG unwindSize, /* IN */
+ BYTE * pUnwindBlock, /* IN */
+ CorJitFuncKind funcKind /* IN */
+ )
+{
+#ifdef WIN64EXCEPTIONS
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(m_theUnwindBlock != NULL);
+ PRECONDITION(m_usedUnwindSize < m_totalUnwindSize);
+ PRECONDITION(m_usedUnwindInfos < m_totalUnwindInfos);
+ PRECONDITION(endOffset <= m_codeSize);
+ } CONTRACTL_END;
+
+ CONSISTENCY_CHECK_MSG(pColdCode == NULL, "Hot/Cold code splitting not supported for jitted code");
+
+ JIT_TO_EE_TRANSITION();
+
+ //
+ // We add one callback-type dynamic function table per range section.
+ // Therefore, the RUNTIME_FUNCTION info is always relative to the
+ // image base contained in the dynamic function table, which happens
+ // to be the LowAddress of the range section. The JIT has no
+ // knowledge of the range section, so it gives us offsets that are
+ // relative to the beginning of the method (pHotCode) and we allocate
+ // and initialize the RUNTIME_FUNCTION data and record its location
+ // in this function.
+ //
+
+ if (funcKind != CORJIT_FUNC_ROOT)
+ {
+ // The main method should be emitted before funclets
+ _ASSERTE(m_usedUnwindInfos > 0);
+ }
+
+ PRUNTIME_FUNCTION pRuntimeFunction = m_CodeHeader->GetUnwindInfo(m_usedUnwindInfos);
+ m_usedUnwindInfos++;
+
+ // Make sure that the RUNTIME_FUNCTION is aligned on a DWORD sized boundary
+ _ASSERTE(IS_ALIGNED(pRuntimeFunction, sizeof(DWORD)));
+
+ UNWIND_INFO * pUnwindInfo = (UNWIND_INFO *) &(m_theUnwindBlock[m_usedUnwindSize]);
+ m_usedUnwindSize += unwindSize;
+
+#if defined(_TARGET_AMD64_)
+ // Add space for personality routine, it must be 4-byte aligned.
+ // Everything in the UNWIND_INFO up to the variable-sized UnwindCodes
+ // array has already had its size included in unwindSize by the caller.
+ m_usedUnwindSize += sizeof(ULONG);
+
+ // Note that the count of unwind codes (2 bytes each) is stored as a UBYTE
+ // So the largest size could be 510 bytes, plus the header and language
+ // specific stuff. This can't overflow.
+
+ _ASSERTE(FitsInU4(m_usedUnwindSize + sizeof(ULONG)));
+ m_usedUnwindSize = (ULONG)(ALIGN_UP(m_usedUnwindSize,sizeof(ULONG)));
+#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+ // The JIT passes in a 4-byte aligned block of unwind data.
+ _ASSERTE(IS_ALIGNED(m_usedUnwindSize, sizeof(ULONG)));
+
+ // Add space for personality routine, it must be 4-byte aligned.
+ m_usedUnwindSize += sizeof(ULONG);
+#else
+ PORTABILITY_ASSERT("CEEJitInfo::reserveUnwindInfo");
+#endif
+
+ _ASSERTE(m_usedUnwindSize <= m_totalUnwindSize);
+
+ // Make sure that the UnwindInfo is aligned
+ _ASSERTE(IS_ALIGNED(pUnwindInfo, sizeof(ULONG)));
+
+ /* Calculate Image Relative offset to add to the jit generated unwind offsets */
+
+ TADDR baseAddress = m_moduleBase;
+
+ size_t currentCodeSizeT = (size_t)pHotCode - baseAddress;
+
+ /* Check if currentCodeSizeT offset fits in 32-bits */
+ if (!FitsInU4(currentCodeSizeT))
+ {
+ _ASSERTE(!"Bad currentCodeSizeT");
+ COMPlusThrowHR(E_FAIL);
+ }
+
+ /* Check if EndAddress offset fits in 32-bit */
+ if (!FitsInU4(currentCodeSizeT + endOffset))
+ {
+ _ASSERTE(!"Bad currentCodeSizeT");
+ COMPlusThrowHR(E_FAIL);
+ }
+
+ unsigned currentCodeOffset = (unsigned) currentCodeSizeT;
+
+ /* Calculate Unwind Info delta */
+ size_t unwindInfoDeltaT = (size_t) pUnwindInfo - baseAddress;
+
+ /* Check if unwindDeltaT offset fits in 32-bits */
+ if (!FitsInU4(unwindInfoDeltaT))
+ {
+ _ASSERTE(!"Bad unwindInfoDeltaT");
+ COMPlusThrowHR(E_FAIL);
+ }
+
+ unsigned unwindInfoDelta = (unsigned) unwindInfoDeltaT;
+
+ RUNTIME_FUNCTION__SetBeginAddress(pRuntimeFunction, currentCodeOffset + startOffset);
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+ pRuntimeFunction->EndAddress = currentCodeOffset + endOffset;
+#endif
+
+ RUNTIME_FUNCTION__SetUnwindInfoAddress(pRuntimeFunction, unwindInfoDelta);
+
+#ifdef _DEBUG
+ if (funcKind != CORJIT_FUNC_ROOT)
+ {
+ // Check the the new funclet doesn't overlap any existing funclet.
+
+ for (ULONG iUnwindInfo = 0; iUnwindInfo < m_usedUnwindInfos - 1; iUnwindInfo++)
+ {
+ PRUNTIME_FUNCTION pOtherFunction = m_CodeHeader->GetUnwindInfo(iUnwindInfo);
+ _ASSERTE( RUNTIME_FUNCTION__BeginAddress(pOtherFunction) >= RUNTIME_FUNCTION__EndAddress(pRuntimeFunction, baseAddress)
+ || RUNTIME_FUNCTION__EndAddress(pOtherFunction, baseAddress) <= RUNTIME_FUNCTION__BeginAddress(pRuntimeFunction));
+ }
+ }
+#endif // _DEBUG
+
+#if defined(_TARGET_AMD64_)
+
+ /* Copy the UnwindBlock */
+ memcpy(pUnwindInfo, pUnwindBlock, unwindSize);
+
+ pUnwindInfo->Flags = UNW_FLAG_EHANDLER | UNW_FLAG_UHANDLER;
+
+ ULONG * pPersonalityRoutine = (ULONG*)ALIGN_UP(&(pUnwindInfo->UnwindCode[pUnwindInfo->CountOfUnwindCodes]), sizeof(ULONG));
+ *pPersonalityRoutine = ExecutionManager::GetCLRPersonalityRoutineValue();
+
+#elif defined(_TARGET_ARM64_)
+
+ /* Copy the UnwindBlock */
+ memcpy(pUnwindInfo, pUnwindBlock, unwindSize);
+
+ *(LONG *)pUnwindInfo |= (1 << 20); // X bit
+
+ ULONG * pPersonalityRoutine = (ULONG*)((BYTE *)pUnwindInfo + ALIGN_UP(unwindSize, sizeof(ULONG)));
+ *pPersonalityRoutine = ExecutionManager::GetCLRPersonalityRoutineValue();
+
+#elif defined(_TARGET_ARM_)
+
+ /* Copy the UnwindBlock */
+ memcpy(pUnwindInfo, pUnwindBlock, unwindSize);
+
+ *(LONG *)pUnwindInfo |= (1 << 20); // X bit
+
+ ULONG * pPersonalityRoutine = (ULONG*)((BYTE *)pUnwindInfo + ALIGN_UP(unwindSize, sizeof(ULONG)));
+ *pPersonalityRoutine = (TADDR)ProcessCLRException - baseAddress;
+#endif
+
+#if defined(_TARGET_AMD64_)
+ // Publish the new unwind information in a way that the ETW stack crawler can find
+ if (m_usedUnwindInfos == m_totalUnwindInfos)
+ UnwindInfoTable::PublishUnwindInfoForMethod(baseAddress, m_CodeHeader->GetUnwindInfo(0), m_totalUnwindInfos);
+#endif // defined(_TARGET_AMD64_)
+
+ EE_TO_JIT_TRANSITION();
+#else // WIN64EXCEPTIONS
+ LIMITED_METHOD_CONTRACT;
+ // Dummy implementation to make cross-platform altjit work
+#endif // WIN64EXCEPTIONS
+}
+
+void CEEJitInfo::recordCallSite(ULONG instrOffset,
+ CORINFO_SIG_INFO * callSig,
+ CORINFO_METHOD_HANDLE methodHandle)
+{
+ // Currently, only testing tools use this method. The EE itself doesn't need record this information.
+ // N.B. The memory that callSig points to is managed by the JIT and isn't guaranteed to be around after
+ // this function returns, so future implementations should copy the sig info if they want it to persist.
+ LIMITED_METHOD_CONTRACT;
+}
+
+// This is a variant for AMD64 or other machines that
+// cannot always hold the destination address in a 32-bit location
+// A relocation is recorded if we are pre-jitting.
+// A jump thunk may be inserted if we are jitting
+
+void CEEJitInfo::recordRelocation(void * location,
+ void * target,
+ WORD fRelocType,
+ WORD slot,
+ INT32 addlDelta)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+#ifdef _WIN64
+ JIT_TO_EE_TRANSITION();
+
+ INT64 delta;
+
+ switch (fRelocType)
+ {
+ case IMAGE_REL_BASED_DIR64:
+ // Write 64-bits into location
+ *((UINT64 *) ((BYTE *) location + slot)) = (UINT64) target;
+ break;
+
+#ifdef _TARGET_AMD64_
+ case IMAGE_REL_BASED_REL32:
+ {
+ target = (BYTE *)target + addlDelta;
+
+ INT32 * fixupLocation = (INT32 *) ((BYTE *) location + slot);
+ BYTE * baseAddr = (BYTE *)fixupLocation + sizeof(INT32);
+
+ delta = (INT64)((BYTE *)target - baseAddr);
+
+ //
+ // Do we need to insert a jump stub to make the source reach the target?
+ //
+ // Note that we cannot stress insertion of jump stub by inserting it unconditionally. JIT records the relocations
+ // for intra-module jumps and calls. It does not expect the register used by the jump stub to be trashed.
+ //
+ if (!FitsInI4(delta))
+ {
+ if (m_fAllowRel32)
+ {
+ //
+ // When m_fAllowRel32 == TRUE, the JIT will use REL32s for both data addresses and direct code targets.
+ // Since we cannot tell what the relocation is for, we have to defensively retry.
+ //
+ m_fRel32Overflow = TRUE;
+ delta = 0;
+ }
+ else
+ {
+ //
+ // When m_fAllowRel32 == FALSE, the JIT will use a REL32s for direct code targets only.
+ // Use jump stub.
+ //
+ delta = rel32UsingJumpStub(fixupLocation, (PCODE)target, m_pMethodBeingCompiled);
+ }
+ }
+
+ LOG((LF_JIT, LL_INFO100000, "Encoded a PCREL32 at" FMT_ADDR "to" FMT_ADDR "+%d, delta is 0x%04x\n",
+ DBG_ADDR(fixupLocation), DBG_ADDR(target), addlDelta, delta));
+
+ // Write the 32-bits pc-relative delta into location
+ *fixupLocation = (INT32) delta;
+ }
+ break;
+#endif // _TARGET_AMD64_
+
+#ifdef _TARGET_ARM64_
+ case IMAGE_REL_ARM64_BRANCH26: // 26 bit offset << 2 & sign ext, for B and BL
+ {
+ _ASSERTE(slot == 0);
+ _ASSERTE(addlDelta == 0);
+
+ PCODE branchTarget = (PCODE) target;
+ _ASSERTE((branchTarget & 0x3) == 0); // the low two bits must be zero
+
+ PCODE fixupLocation = (PCODE) location;
+ _ASSERTE((fixupLocation & 0x3) == 0); // the low two bits must be zero
+
+ delta = (INT64)(branchTarget - fixupLocation);
+ _ASSERTE((delta & 0x3) == 0); // the low two bits must be zero
+
+ UINT32 branchInstr = *((UINT32*) fixupLocation);
+ branchInstr &= 0xFC000000; // keep bits 31-26
+ _ASSERTE((branchInstr & 0x7FFFFFFF) == 0x14000000); // Must be B or BL
+
+ //
+ // Do we need to insert a jump stub to make the source reach the target?
+ //
+ //
+ if (!FitsInRel28(delta))
+ {
+ // Use jump stub.
+ //
+ TADDR baseAddr = (TADDR)fixupLocation;
+ TADDR loAddr = baseAddr - 0x08000000; // -2^27
+ TADDR hiAddr = baseAddr + 0x07FFFFFF; // +2^27-1
+
+ // Check for the wrap around cases
+ if (loAddr > baseAddr)
+ loAddr = UINT64_MIN; // overflow
+ if (hiAddr < baseAddr)
+ hiAddr = UINT64_MAX; // overflow
+
+ PCODE jumpStubAddr = ExecutionManager::jumpStub(m_pMethodBeingCompiled,
+ (PCODE) target,
+ (BYTE *) loAddr,
+ (BYTE *) hiAddr);
+
+ delta = (INT64)(jumpStubAddr - fixupLocation);
+
+ if (!FitsInRel28(delta))
+ {
+ _ASSERTE(!"jump stub was not in expected range");
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
+ }
+
+ LOG((LF_JIT, LL_INFO100000, "Using JumpStub at" FMT_ADDR "that jumps to" FMT_ADDR "\n",
+ DBG_ADDR(jumpStubAddr), DBG_ADDR(target)));
+ }
+
+ LOG((LF_JIT, LL_INFO100000, "Encoded a BRANCH26 at" FMT_ADDR "to" FMT_ADDR ", delta is 0x%04x\n",
+ DBG_ADDR(fixupLocation), DBG_ADDR(target), delta));
+
+ _ASSERTE(FitsInRel28(delta));
+
+ PutArm64Rel28((UINT32*) fixupLocation, (INT32)delta);
+ }
+ break;
+#endif // _TARGET_ARM64_
+
+ default:
+ _ASSERTE(!"Unknown reloc type");
+ break;
+ }
+
+ EE_TO_JIT_TRANSITION();
+#else // _WIN64
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ // Nothing to do on 32-bit
+
+ EE_TO_JIT_TRANSITION_LEAF();
+#endif // _WIN64
+}
+
+WORD CEEJitInfo::getRelocTypeHint(void * target)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+#ifdef _TARGET_AMD64_
+ if (m_fAllowRel32)
+ {
+ // The JIT calls this method for data addresses only. It always uses REL32s for direct code targets.
+ if (IsPreferredExecutableRange(target))
+ return IMAGE_REL_BASED_REL32;
+ }
+#endif // _TARGET_AMD64_
+
+ // No hints
+ return (WORD)-1;
+}
+
+void CEEJitInfo::getModuleNativeEntryPointRange(void** pStart, void** pEnd)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION_LEAF();
+
+ *pStart = *pEnd = 0;
+
+ EE_TO_JIT_TRANSITION_LEAF();
+}
+
+DWORD CEEJitInfo::getExpectedTargetArchitecture()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return IMAGE_FILE_MACHINE_NATIVE;
+}
+
+void CEEInfo::JitProcessShutdownWork()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ EEJitManager* jitMgr = ExecutionManager::GetEEJitManager();
+
+ // If we didn't load the JIT, there is no work to do.
+ if (jitMgr->m_jit != NULL)
+ {
+ // Do the shutdown work.
+ jitMgr->m_jit->ProcessShutdownWork(this);
+ }
+
+#ifdef ALLOW_SXS_JIT
+ if (jitMgr->m_alternateJit != NULL)
+ {
+ jitMgr->m_alternateJit->ProcessShutdownWork(this);
+ }
+#endif // ALLOW_SXS_JIT
+}
+
+/*********************************************************************/
+InfoAccessType CEEJitInfo::constructStringLiteral(CORINFO_MODULE_HANDLE scopeHnd,
+ mdToken metaTok,
+ void **ppValue)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ InfoAccessType result = IAT_PVALUE;
+
+ JIT_TO_EE_TRANSITION();
+
+ _ASSERTE(ppValue != NULL);
+
+ if (IsDynamicScope(scopeHnd))
+ {
+ *ppValue = (LPVOID)GetDynamicResolver(scopeHnd)->ConstructStringLiteral(metaTok);
+ }
+ else
+ {
+ *ppValue = (LPVOID)ConstructStringLiteral(scopeHnd, metaTok); // throws
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/*********************************************************************/
+InfoAccessType CEEJitInfo::emptyStringLiteral(void ** ppValue)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ InfoAccessType result = IAT_PVALUE;
+
+ if(NingenEnabled())
+ {
+ *ppValue = NULL;
+ return result;
+ }
+
+ JIT_TO_EE_TRANSITION();
+ *ppValue = StringObject::GetEmptyStringRefPtr();
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/*********************************************************************/
+void* CEEJitInfo::getFieldAddress(CORINFO_FIELD_HANDLE fieldHnd,
+ void **ppIndirection)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ void *result = NULL;
+
+ if (ppIndirection != NULL)
+ *ppIndirection = NULL;
+
+ // Do not bother with initialization if we are only verifying the method.
+ if (isVerifyOnly())
+ {
+ return (void *)0x10;
+ }
+
+ JIT_TO_EE_TRANSITION();
+
+ FieldDesc* field = (FieldDesc*) fieldHnd;
+
+#ifdef MDIL
+ if (SystemDomain::GetCurrentDomain()->IsMDILCompilationDomain() && !isVerifyOnly())
+ {
+ result = (void *)0xBAADF00D;
+ goto Done;
+ }
+#endif // MDIL
+
+ MethodTable* pMT = field->GetEnclosingMethodTable();
+
+ _ASSERTE(!pMT->ContainsGenericVariables());
+
+ // We must not call here for statics of collectible types.
+ _ASSERTE(!pMT->Collectible());
+
+ void *base = NULL;
+
+ if (!field->IsRVA())
+ {
+ // <REVISIT_TODO>@todo: assert that the current method being compiled is unshared</REVISIT_TODO>
+
+ // Allocate space for the local class if necessary, but don't trigger
+ // class construction.
+ DomainLocalModule *pLocalModule = pMT->GetDomainLocalModule();
+ pLocalModule->PopulateClass(pMT);
+
+ GCX_COOP();
+
+ base = (void *) field->GetBase();
+ }
+
+ result = field->GetStaticAddressHandle(base);
+
+#ifdef MDIL
+Done: ;
+#endif // MDIL
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+static void *GetClassSync(MethodTable *pMT)
+{
+ STANDARD_VM_CONTRACT;
+
+ GCX_COOP();
+
+ OBJECTREF ref = pMT->GetManagedClassObject();
+ return (void*)ref->GetSyncBlock()->GetMonitor();
+}
+
+/*********************************************************************/
+void* CEEJitInfo::getMethodSync(CORINFO_METHOD_HANDLE ftnHnd,
+ void **ppIndirection)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ void * result = NULL;
+
+ if (ppIndirection != NULL)
+ *ppIndirection = NULL;
+
+ JIT_TO_EE_TRANSITION();
+
+ result = GetClassSync((GetMethod(ftnHnd))->GetMethodTable());
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
+/*********************************************************************/
+HRESULT CEEJitInfo::allocBBProfileBuffer (
+ ULONG count,
+ ICorJitInfo::ProfileBuffer ** profileBuffer
+ )
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ HRESULT hr = E_FAIL;
+
+ JIT_TO_EE_TRANSITION();
+
+#ifdef FEATURE_PREJIT
+
+ // We need to know the code size. Typically we can get the code size
+ // from m_ILHeader. For dynamic methods, m_ILHeader will be NULL, so
+ // for that case we need to use DynamicResolver to get the code size.
+
+ unsigned codeSize = 0;
+ if (m_pMethodBeingCompiled->IsDynamicMethod())
+ {
+ unsigned stackSize, ehSize;
+ CorInfoOptions options;
+ DynamicResolver * pResolver = m_pMethodBeingCompiled->AsDynamicMethodDesc()->GetResolver();
+ pResolver->GetCodeInfo(&codeSize, &stackSize, &options, &ehSize);
+ }
+ else
+ {
+ codeSize = m_ILHeader->GetCodeSize();
+ }
+
+ *profileBuffer = m_pMethodBeingCompiled->GetLoaderModule()->AllocateProfileBuffer(m_pMethodBeingCompiled->GetMemberDef(), count, codeSize);
+ hr = (*profileBuffer ? S_OK : E_OUTOFMEMORY);
+#else // FEATURE_PREJIT
+ _ASSERTE(!"allocBBProfileBuffer not implemented on CEEJitInfo!");
+ hr = E_NOTIMPL;
+#endif // !FEATURE_PREJIT
+
+ EE_TO_JIT_TRANSITION();
+
+ return hr;
+}
+
+// Consider implementing getBBProfileData on CEEJitInfo. This will allow us
+// to use profile info in codegen for non zapped images.
+HRESULT CEEJitInfo::getBBProfileData (
+ CORINFO_METHOD_HANDLE ftnHnd,
+ ULONG * size,
+ ICorJitInfo::ProfileBuffer ** profileBuffer,
+ ULONG * numRuns
+ )
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"getBBProfileData not implemented on CEEJitInfo!");
+ return E_NOTIMPL;
+}
+
+void CEEJitInfo::allocMem (
+ ULONG hotCodeSize, /* IN */
+ ULONG coldCodeSize, /* IN */
+ ULONG roDataSize, /* IN */
+ ULONG xcptnsCount, /* IN */
+ CorJitAllocMemFlag flag, /* IN */
+ void ** hotCodeBlock, /* OUT */
+ void ** coldCodeBlock, /* OUT */
+ void ** roDataBlock /* OUT */
+ )
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION();
+
+ _ASSERTE(coldCodeSize == 0);
+ if (coldCodeBlock)
+ {
+ *coldCodeBlock = NULL;
+ }
+
+ ULONG codeSize = hotCodeSize;
+ void **codeBlock = hotCodeBlock;
+
+ S_SIZE_T totalSize = S_SIZE_T(codeSize);
+
+ if (roDataSize > 0)
+ {
+ totalSize.AlignUp(sizeof(void *));
+ totalSize += roDataSize;
+
+#ifndef _WIN64
+ if (roDataSize >= 8)
+ {
+ // allocates an extra 4 bytes so that we can
+ // double align the roData section.
+ totalSize += 4;
+ }
+#endif
+ }
+
+#ifdef WIN64EXCEPTIONS
+ totalSize.AlignUp(sizeof(DWORD));
+ totalSize += m_totalUnwindSize;
+#endif
+
+ _ASSERTE(m_CodeHeader == 0 &&
+ // The jit-compiler sometimes tries to compile a method a second time
+ // if it failed the first time. In such a situation, m_CodeHeader may
+ // have already been assigned. Its OK to ignore this assert in such a
+ // situation - we will leak some memory, but that is acceptable
+ // since this should happen very rarely.
+ "Note that this may fire if the JITCompiler tries to recompile a method");
+
+ if( totalSize.IsOverflow() )
+ {
+ COMPlusThrowHR(CORJIT_OUTOFMEM);
+ }
+
+ m_CodeHeader = m_jitManager->allocCode(m_pMethodBeingCompiled, totalSize.Value(), flag
+#ifdef WIN64EXCEPTIONS
+ , m_totalUnwindInfos
+ , &m_moduleBase
+#endif
+ );
+
+ BYTE* current = (BYTE *)m_CodeHeader->GetCodeStartAddress();
+
+ *codeBlock = current;
+ current += codeSize;
+
+ if (roDataSize > 0)
+ {
+ current = (BYTE *)ALIGN_UP(current, (roDataSize >= 8) ? 8 : sizeof(void *));
+
+ *roDataBlock = current;
+ current += roDataSize;
+ }
+ else
+ {
+ *roDataBlock = NULL;
+ }
+
+#ifdef WIN64EXCEPTIONS
+ current = (BYTE *)ALIGN_UP(current, sizeof(DWORD));
+
+ m_theUnwindBlock = current;
+ current += m_totalUnwindSize;
+#endif
+
+ _ASSERTE((SIZE_T)(current - (BYTE *)m_CodeHeader->GetCodeStartAddress()) <= totalSize.Value());
+
+#ifdef _DEBUG
+ m_codeSize = codeSize;
+#endif // _DEBUG
+
+ EE_TO_JIT_TRANSITION();
+}
+
+/*********************************************************************/
+void * CEEJitInfo::allocGCInfo (size_t size)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ void * block = NULL;
+
+ JIT_TO_EE_TRANSITION();
+
+ _ASSERTE(m_CodeHeader != 0);
+ _ASSERTE(m_CodeHeader->GetGCInfo() == 0);
+
+#ifdef _WIN64
+ if (size & 0xFFFFFFFF80000000LL)
+ {
+ COMPlusThrowHR(CORJIT_OUTOFMEM);
+ }
+#endif // _WIN64
+
+ block = m_jitManager->allocGCInfo(m_CodeHeader,(DWORD)size, &m_GCinfo_len);
+ if (!block)
+ {
+ COMPlusThrowHR(CORJIT_OUTOFMEM);
+ }
+
+ _ASSERTE(m_CodeHeader->GetGCInfo() != 0 && block == m_CodeHeader->GetGCInfo());
+
+ EE_TO_JIT_TRANSITION();
+
+ return block;
+}
+
+/*********************************************************************/
+void CEEJitInfo::setEHcount (
+ unsigned cEH)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION();
+
+ _ASSERTE(cEH != 0);
+ _ASSERTE(m_CodeHeader != 0);
+ _ASSERTE(m_CodeHeader->GetEHInfo() == 0);
+
+ EE_ILEXCEPTION* ret;
+ ret = m_jitManager->allocEHInfo(m_CodeHeader,cEH, &m_EHinfo_len);
+ _ASSERTE(ret); // allocEHInfo throws if there's not enough memory
+
+ _ASSERTE(m_CodeHeader->GetEHInfo() != 0 && m_CodeHeader->GetEHInfo()->EHCount() == cEH);
+
+ EE_TO_JIT_TRANSITION();
+}
+
+/*********************************************************************/
+void CEEJitInfo::setEHinfo (
+ unsigned EHnumber,
+ const CORINFO_EH_CLAUSE* clause)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION();
+
+ // <REVISIT_TODO> Fix make the Code Manager EH clauses EH_INFO+</REVISIT_TODO>
+ _ASSERTE(m_CodeHeader->GetEHInfo() != 0 && EHnumber < m_CodeHeader->GetEHInfo()->EHCount());
+
+ EE_ILEXCEPTION_CLAUSE* pEHClause = m_CodeHeader->GetEHInfo()->EHClause(EHnumber);
+
+ pEHClause->TryStartPC = clause->TryOffset;
+ pEHClause->TryEndPC = clause->TryLength;
+ pEHClause->HandlerStartPC = clause->HandlerOffset;
+ pEHClause->HandlerEndPC = clause->HandlerLength;
+ pEHClause->ClassToken = clause->ClassToken;
+ pEHClause->Flags = (CorExceptionFlag)clause->Flags;
+
+ LOG((LF_EH, LL_INFO1000000, "Setting EH clause #%d for %s::%s\n", EHnumber, m_pMethodBeingCompiled->m_pszDebugClassName, m_pMethodBeingCompiled->m_pszDebugMethodName));
+ LOG((LF_EH, LL_INFO1000000, " Flags : 0x%08lx -> 0x%08lx\n", clause->Flags, pEHClause->Flags));
+ LOG((LF_EH, LL_INFO1000000, " TryOffset : 0x%08lx -> 0x%08lx (startpc)\n", clause->TryOffset, pEHClause->TryStartPC));
+ LOG((LF_EH, LL_INFO1000000, " TryLength : 0x%08lx -> 0x%08lx (endpc)\n", clause->TryLength, pEHClause->TryEndPC));
+ LOG((LF_EH, LL_INFO1000000, " HandlerOffset : 0x%08lx -> 0x%08lx\n", clause->HandlerOffset, pEHClause->HandlerStartPC));
+ LOG((LF_EH, LL_INFO1000000, " HandlerLength : 0x%08lx -> 0x%08lx\n", clause->HandlerLength, pEHClause->HandlerEndPC));
+ LOG((LF_EH, LL_INFO1000000, " ClassToken : 0x%08lx -> 0x%08lx\n", clause->ClassToken, pEHClause->ClassToken));
+ LOG((LF_EH, LL_INFO1000000, " FilterOffset : 0x%08lx -> 0x%08lx\n", clause->FilterOffset, pEHClause->FilterOffset));
+
+ if (m_pMethodBeingCompiled->IsDynamicMethod() &&
+ ((pEHClause->Flags & COR_ILEXCEPTION_CLAUSE_FILTER) == 0) &&
+ (clause->ClassToken != NULL))
+ {
+ MethodDesc * pMD; FieldDesc * pFD;
+ m_pMethodBeingCompiled->AsDynamicMethodDesc()->GetResolver()->ResolveToken(clause->ClassToken, (TypeHandle *)&pEHClause->TypeHandle, &pMD, &pFD);
+ SetHasCachedTypeHandle(pEHClause);
+ LOG((LF_EH, LL_INFO1000000, " CachedTypeHandle: 0x%08lx -> 0x%08lx\n", clause->ClassToken, pEHClause->TypeHandle));
+ }
+
+ EE_TO_JIT_TRANSITION();
+}
+
+/*********************************************************************/
+// get individual exception handler
+void CEEJitInfo::getEHinfo(
+ CORINFO_METHOD_HANDLE ftn, /* IN */
+ unsigned EHnumber, /* IN */
+ CORINFO_EH_CLAUSE* clause) /* OUT */
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION();
+
+ if (IsDynamicMethodHandle(ftn))
+ {
+ GetMethod(ftn)->AsDynamicMethodDesc()->GetResolver()->GetEHInfo(EHnumber, clause);
+ }
+ else
+ {
+ _ASSERTE(ftn == CORINFO_METHOD_HANDLE(m_pMethodBeingCompiled)); // For now only support if the method being jitted
+ getEHinfoHelper(ftn, EHnumber, clause, m_ILHeader);
+ }
+
+ EE_TO_JIT_TRANSITION();
+}
+#endif // CROSSGEN_COMPILE
+
+#ifdef CROSSGEN_COMPILE
+EXTERN_C ICorJitCompiler* __stdcall getJit();
+#endif
+
+#ifdef FEATURE_INTERPRETER
+static CorJitResult CompileMethodWithEtwWrapper(EEJitManager *jitMgr,
+ CEEInfo *comp,
+ struct CORINFO_METHOD_INFO *info,
+ unsigned flags,
+ BYTE **nativeEntry,
+ ULONG *nativeSizeOfCode)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_PREEMPTIVE;
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+ SString namespaceOrClassName, methodName, methodSignature;
+ // Fire an ETW event to mark the beginning of JIT'ing
+ ETW::MethodLog::MethodJitting(reinterpret_cast<MethodDesc*>(info->ftn), &namespaceOrClassName, &methodName, &methodSignature);
+
+ CorJitResult ret = jitMgr->m_jit->compileMethod(comp, info, flags, nativeEntry, nativeSizeOfCode);
+
+ // Logically, it would seem that the end-of-JITting ETW even should go here, but it must come after the native code has been
+ // set for the given method desc, which happens in a caller.
+
+ return ret;
+}
+#endif // FEATURE_INTERPRETER
+
+//
+// Helper function because can't have dtors in BEGIN_SO_TOLERANT_CODE
+// flags2 is not passed on to the JIT (yet) through the JITInterface.
+// It is extra flags that can be passed on within the VM.
+//
+CorJitResult invokeCompileMethodHelper(EEJitManager *jitMgr,
+ CEEInfo *comp,
+ struct CORINFO_METHOD_INFO *info,
+ unsigned flags,
+ unsigned flags2,
+ BYTE **nativeEntry,
+ ULONG *nativeSizeOfCode)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_PREEMPTIVE;
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+ CorJitResult ret = CORJIT_SKIPPED; // Note that CORJIT_SKIPPED is an error exit status code
+
+#ifdef FEATURE_STACK_SAMPLING
+ // SO_INTOLERANT due to init affecting global state.
+ static ConfigDWORD s_stackSamplingEnabled;
+ bool samplingEnabled = (s_stackSamplingEnabled.val(CLRConfig::UNSUPPORTED_StackSamplingEnabled) != 0);
+#endif
+
+ BEGIN_SO_TOLERANT_CODE(GetThread());
+
+#ifdef CROSSGEN_COMPILE
+ ret = getJit()->compileMethod( comp,
+ info,
+ flags,
+ nativeEntry,
+ nativeSizeOfCode);
+
+#else // CROSSGEN_COMPILE
+
+#ifdef ALLOW_SXS_JIT
+ if (FAILED(ret) && jitMgr->m_alternateJit
+#ifdef FEATURE_STACK_SAMPLING
+ && (!samplingEnabled || (flags2 & CORJIT_FLG2_SAMPLING_JIT_BACKGROUND))
+#endif
+ )
+ {
+ ret = jitMgr->m_alternateJit->compileMethod( comp,
+ info,
+ flags,
+ nativeEntry,
+ nativeSizeOfCode );
+
+#ifdef FEATURE_STACK_SAMPLING
+ if (flags2 & CORJIT_FLG2_SAMPLING_JIT_BACKGROUND)
+ {
+ // Don't bother with failures if we couldn't collect a trace.
+ ret = CORJIT_OK;
+ }
+#endif // FEATURE_STACK_SAMPLING
+
+ // If we failed to jit, then fall back to the primary Jit.
+ if (FAILED(ret))
+ {
+ // Consider adding this call:
+ // ((CEEJitInfo*)comp)->BackoutJitData(jitMgr);
+ ((CEEJitInfo*)comp)->ResetForJitRetry();
+ ret = CORJIT_SKIPPED;
+ }
+ }
+#endif // ALLOW_SXS_JIT
+
+#ifdef FEATURE_INTERPRETER
+ static ConfigDWORD s_InterpreterFallback;
+
+ bool interpreterFallback = (s_InterpreterFallback.val(CLRConfig::INTERNAL_InterpreterFallback) != 0);
+
+ if (interpreterFallback == false)
+ {
+ // If we're doing an "import_only" compilation, it's for verification, so don't interpret.
+ // (We assume that importation is completely architecture-independent, or at least nearly so.)
+ if (FAILED(ret) && (flags & (CORJIT_FLG_IMPORT_ONLY | CORJIT_FLG_MAKEFINALCODE)) == 0)
+ {
+ ret = Interpreter::GenerateInterpreterStub(comp, info, nativeEntry, nativeSizeOfCode);
+ }
+ }
+
+ if (FAILED(ret) && jitMgr->m_jit)
+ {
+ ret = CompileMethodWithEtwWrapper(jitMgr,
+ comp,
+ info,
+ flags,
+ nativeEntry,
+ nativeSizeOfCode);
+ }
+
+ if (interpreterFallback == true)
+ {
+ // If we're doing an "import_only" compilation, it's for verification, so don't interpret.
+ // (We assume that importation is completely architecture-independent, or at least nearly so.)
+ if (FAILED(ret) && (flags & (CORJIT_FLG_IMPORT_ONLY | CORJIT_FLG_MAKEFINALCODE)) == 0)
+ {
+ ret = Interpreter::GenerateInterpreterStub(comp, info, nativeEntry, nativeSizeOfCode);
+ }
+ }
+#else
+ if (FAILED(ret))
+ {
+ ret = jitMgr->m_jit->compileMethod( comp,
+ info,
+ flags,
+ nativeEntry,
+ nativeSizeOfCode);
+ }
+#endif // FEATURE_INTERPRETER
+
+ // Cleanup any internal data structures allocated
+ // such as IL code after a successfull JIT compile
+ // If the JIT fails we keep the IL around and will
+ // try reJIT the same IL. VSW 525059
+ //
+ if (SUCCEEDED(ret) && !(flags & CORJIT_FLG_IMPORT_ONLY) && !((CEEJitInfo*)comp)->JitAgain())
+ {
+ ((CEEJitInfo*)comp)->CompressDebugInfo();
+
+#ifdef FEATURE_INTERPRETER
+ // We do this cleanup in the prestub, where we know whether the method
+ // has been interpreted.
+#else
+ comp->MethodCompileComplete(info->ftn);
+#endif // FEATURE_INTERPRETER
+ }
+
+#endif // CROSSGEN_COMPILE
+
+ END_SO_TOLERANT_CODE;
+
+ return ret;
+}
+
+
+/*********************************************************************/
+CorJitResult invokeCompileMethod(EEJitManager *jitMgr,
+ CEEInfo *comp,
+ struct CORINFO_METHOD_INFO *info,
+ unsigned flags,
+ unsigned flags2,
+ BYTE **nativeEntry,
+ ULONG *nativeSizeOfCode)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+ //
+ // The JIT runs in preemptive mode
+ //
+
+ GCX_PREEMP();
+
+ CorJitResult ret = invokeCompileMethodHelper(jitMgr, comp, info, flags, flags2, nativeEntry, nativeSizeOfCode);
+
+ //
+ // Verify that we are still in preemptive mode when we return
+ // from the JIT
+ //
+
+ _ASSERTE(GetThread()->PreemptiveGCDisabled() == FALSE);
+
+ return ret;
+}
+
+CorJitFlag GetCompileFlagsIfGenericInstantiation(
+ CORINFO_METHOD_HANDLE method,
+ CorJitFlag compileFlags,
+ ICorJitInfo * pCorJitInfo,
+ BOOL * raiseVerificationException,
+ BOOL * unverifiableGenericCode);
+
+CorJitResult CallCompileMethodWithSEHWrapper(EEJitManager *jitMgr,
+ CEEInfo *comp,
+ struct CORINFO_METHOD_INFO *info,
+ unsigned flags,
+ unsigned flags2,
+ BYTE **nativeEntry,
+ ULONG *nativeSizeOfCode,
+ MethodDesc *ftn)
+{
+ // no dynamic contract here because SEH is used, with a finally clause
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ LOG((LF_CORDB, LL_EVERYTHING, "CallCompileMethodWithSEHWrapper called...\n"));
+
+ struct Param
+ {
+ EEJitManager *jitMgr;
+ CEEInfo *comp;
+ struct CORINFO_METHOD_INFO *info;
+ unsigned flags;
+ unsigned flags2;
+ BYTE **nativeEntry;
+ ULONG *nativeSizeOfCode;
+ MethodDesc *ftn;
+ CorJitResult res;
+ }; Param param;
+ param.jitMgr = jitMgr;
+ param.comp = comp;
+ param.info = info;
+ param.flags = flags;
+ param.flags2 = flags2;
+ param.nativeEntry = nativeEntry;
+ param.nativeSizeOfCode = nativeSizeOfCode;
+ param.ftn = ftn;
+ param.res = CORJIT_INTERNALERROR;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ //
+ // Call out to the JIT-compiler
+ //
+
+ pParam->res = invokeCompileMethod( pParam->jitMgr,
+ pParam->comp,
+ pParam->info,
+ pParam->flags,
+ pParam->flags2,
+ pParam->nativeEntry,
+ pParam->nativeSizeOfCode);
+ }
+ PAL_FINALLY
+ {
+#if defined(DEBUGGING_SUPPORTED) && !defined(CROSSGEN_COMPILE)
+ if (!(flags & (CORJIT_FLG_IMPORT_ONLY | CORJIT_FLG_MCJIT_BACKGROUND))
+#ifdef FEATURE_STACK_SAMPLING
+ && !(flags2 & CORJIT_FLG2_SAMPLING_JIT_BACKGROUND)
+#endif // FEATURE_STACK_SAMPLING
+ )
+ {
+ //
+ // Notify the debugger that we have successfully jitted the function
+ //
+ if (ftn->HasNativeCode())
+ {
+ //
+ // Nothing to do here (don't need to notify the debugger
+ // because the function has already been successfully jitted)
+ //
+ // This is the case where we aborted the jit because of a deadlock cycle
+ // in initClass.
+ //
+ }
+ else
+ {
+ if (g_pDebugInterface)
+ {
+ if (param.res == CORJIT_OK && !((CEEJitInfo*)param.comp)->JitAgain())
+ {
+ g_pDebugInterface->JITComplete(ftn, (TADDR) *nativeEntry);
+ }
+ }
+ }
+ }
+#endif // DEBUGGING_SUPPORTED && !CROSSGEN_COMPILE
+ }
+ PAL_ENDTRY
+
+ return param.res;
+}
+
+/*********************************************************************/
+// Figures out the compile flags that are used by both JIT and NGen
+
+/* static */ DWORD CEEInfo::GetBaseCompileFlags(MethodDesc * ftn)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ //
+ // Figure out the code quality flags
+ //
+
+ DWORD flags = 0;
+ if (g_pConfig->JitFramed())
+ flags |= CORJIT_FLG_FRAMED;
+ if (g_pConfig->JitAlignLoops())
+ flags |= CORJIT_FLG_ALIGN_LOOPS;
+ if (ReJitManager::IsReJITEnabled() || g_pConfig->AddRejitNops())
+ flags |= CORJIT_FLG_PROF_REJIT_NOPS;
+#ifdef _TARGET_X86_
+ if (g_pConfig->PInvokeRestoreEsp(ftn->GetModule()->IsPreV4Assembly()))
+ flags |= CORJIT_FLG_PINVOKE_RESTORE_ESP;
+#endif // _TARGET_X86_
+
+ //See if we should instruct the JIT to emit calls to JIT_PollGC for thread suspension. If we have a
+ //non-default value in the EE Config, then use that. Otherwise select the platform specific default.
+#ifdef FEATURE_ENABLE_GCPOLL
+ EEConfig::GCPollType pollType = g_pConfig->GetGCPollType();
+ if (EEConfig::GCPOLL_TYPE_POLL == pollType)
+ flags |= CORJIT_FLG_GCPOLL_CALLS;
+ else if (EEConfig::GCPOLL_TYPE_INLINE == pollType)
+ flags |= CORJIT_FLG_GCPOLL_INLINE;
+#endif //FEATURE_ENABLE_GCPOLL
+
+ // Set flags based on method's ImplFlags.
+ if (!ftn->IsNoMetadata())
+ {
+ DWORD dwImplFlags = 0;
+ IfFailThrow(ftn->GetMDImport()->GetMethodImplProps(ftn->GetMemberDef(), NULL, &dwImplFlags));
+
+ if (IsMiNoOptimization(dwImplFlags))
+ {
+ flags |= CORJIT_FLG_MIN_OPT;
+ }
+
+ // Always emit frames for methods marked no-inline (see #define ETW_EBP_FRAMED in the JIT)
+ if (IsMiNoInlining(dwImplFlags))
+ {
+ flags |= CORJIT_FLG_FRAMED;
+ }
+ }
+
+#ifdef FEATURE_LEGACYNETCF
+ // for "AppDomainCompatSwitch" == "WindowsPhone_3.7.0.0" or "AppDomainCompatSwitch" == "WindowsPhone_3.8.0.0"
+ // This is when we need to generate code that more closely resembles
+ // what the WinPhone 7.0/7.1/7.5 NetCF JIT used to generate.
+ if (ftn->GetDomain()->GetAppDomainCompatMode() == BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8)
+ flags |= CORJIT_FLG_NETCF_QUIRKS;
+#endif // FEATURE_LEGACYNETCF
+
+ return flags;
+}
+
+/*********************************************************************/
+// Figures out (some of) the flags to use to compile the method
+// Returns the new set to use
+
+DWORD GetDebuggerCompileFlags(Module* pModule, DWORD flags)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef FEATURE_CORECLR
+ //Right now if we don't have a debug interface on CoreCLR, we can't generate debug info. So, in those
+ //cases don't attempt it.
+ if (!g_pDebugInterface)
+ return flags;
+#endif //FEATURE_CORECLR
+
+#ifdef DEBUGGING_SUPPORTED
+
+#ifdef _DEBUG
+ if (g_pConfig->GenDebuggableCode())
+ flags |= CORJIT_FLG_DEBUG_CODE;
+#endif // _DEBUG
+
+#ifdef EnC_SUPPORTED
+ if (pModule->IsEditAndContinueEnabled())
+ {
+ flags |= CORJIT_FLG_DEBUG_EnC;
+ }
+#endif // EnC_SUPPORTED
+
+ // Debug info is always tracked
+ flags |= CORJIT_FLG_DEBUG_INFO;
+#endif // DEBUGGING_SUPPORTED
+
+ if (CORDisableJITOptimizations(pModule->GetDebuggerInfoBits()))
+ {
+ flags |= CORJIT_FLG_DEBUG_CODE;
+ }
+
+ if (flags & CORJIT_FLG_IMPORT_ONLY)
+ {
+ // If we are only verifying the method, dont need any debug info and this
+ // prevents getVars()/getBoundaries() from being called unnecessarily.
+ flags &= ~(CORJIT_FLG_DEBUG_INFO|CORJIT_FLG_DEBUG_CODE);
+ }
+
+ return flags;
+}
+
+CorJitFlag GetCompileFlags(MethodDesc * ftn, DWORD flags, CORINFO_METHOD_INFO * methodInfo)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(methodInfo->regionKind == CORINFO_REGION_JIT);
+
+ //
+ // Get the compile flags that are shared between JIT and NGen
+ //
+ flags |= CEEInfo::GetBaseCompileFlags(ftn);
+
+ //
+ // Get CPU specific flags
+ //
+ if ((flags & CORJIT_FLG_IMPORT_ONLY) == 0)
+ {
+ flags |= ExecutionManager::GetEEJitManager()->GetCPUCompileFlags();
+ }
+
+ //
+ // Find the debugger and profiler related flags
+ //
+
+#ifdef DEBUGGING_SUPPORTED
+ flags |= GetDebuggerCompileFlags(ftn->GetModule(), flags);
+#endif
+
+#ifdef PROFILING_SUPPORTED
+ if (CORProfilerTrackEnterLeave()
+ && !ftn->IsNoMetadata()
+ )
+ flags |= CORJIT_FLG_PROF_ENTERLEAVE;
+
+ if (CORProfilerTrackTransitions())
+ flags |= CORJIT_FLG_PROF_NO_PINVOKE_INLINE;
+#endif // PROFILING_SUPPORTED
+
+ // Set optimization flags
+ if (0 == (flags & CORJIT_FLG_MIN_OPT))
+ {
+ unsigned optType = g_pConfig->GenOptimizeType();
+ _ASSERTE(optType <= OPT_RANDOM);
+
+ if (optType == OPT_RANDOM)
+ optType = methodInfo->ILCodeSize % OPT_RANDOM;
+
+ if (g_pConfig->JitMinOpts())
+ flags |= CORJIT_FLG_MIN_OPT;
+
+ const static unsigned optTypeFlags[] =
+ {
+ 0, // OPT_BLENDED
+ CORJIT_FLG_SIZE_OPT, // OPT_CODE_SIZE
+ CORJIT_FLG_SPEED_OPT // OPT_CODE_SPEED
+ };
+
+ _ASSERTE(optType < OPT_RANDOM);
+ _ASSERTE((sizeof(optTypeFlags)/sizeof(optTypeFlags[0])) == OPT_RANDOM);
+ flags |= optTypeFlags[optType];
+ }
+
+ //
+ // Verification flags
+ //
+
+#ifdef _DEBUG
+ if (g_pConfig->IsJitVerificationDisabled())
+ flags |= CORJIT_FLG_SKIP_VERIFICATION;
+#endif // _DEBUG
+
+ if ((flags & CORJIT_FLG_IMPORT_ONLY) == 0 &&
+ Security::CanSkipVerification(ftn))
+ flags |= CORJIT_FLG_SKIP_VERIFICATION;
+
+ if (ftn->IsILStub())
+ {
+ flags |= CORJIT_FLG_SKIP_VERIFICATION;
+
+ // no debug info available for IL stubs
+ flags &= ~CORJIT_FLG_DEBUG_INFO;
+ }
+
+ return (CorJitFlag)flags;
+}
+
+#if defined(_WIN64)
+//The implementation of Jit64 prevents it from both inlining and verifying at the same time. This causes a
+//perf problem for code that adopts Transparency. This code attempts to enable inlining in spite of that
+//limitation in that scenario.
+//
+//This only works for real methods. If the method isn't IsIL, then IsVerifiable will AV. That would be a
+//bad thing (TM).
+BOOL IsTransparentMethodSafeToSkipVerification(CorJitFlag flags, MethodDesc * ftn)
+{
+ STANDARD_VM_CONTRACT;
+
+ BOOL ret = FALSE;
+ if (!(flags & CORJIT_FLG_IMPORT_ONLY) && !(flags & CORJIT_FLG_SKIP_VERIFICATION)
+ && Security::IsMethodTransparent(ftn) &&
+ ((ftn->IsIL() && !ftn->IsUnboxingStub()) ||
+ (ftn->IsDynamicMethod() && !ftn->IsILStub())))
+ {
+ EX_TRY
+ {
+ //Verify the method
+ ret = ftn->IsVerifiable();
+ }
+ EX_CATCH
+ {
+ //If the jit throws an exception, do not let it leak out of here. For example, we can sometimes
+ //get an IPE that we could recover from in the Jit (i.e. invalid local in a method with skip
+ //verification).
+ }
+ EX_END_CATCH(RethrowTerminalExceptions)
+ }
+ return ret;
+}
+#else
+#define IsTransparentMethodSafeToSkipVerification(flags,ftn) (FALSE)
+#endif //_WIN64
+
+/*********************************************************************/
+// We verify generic code once and for all using the typical open type,
+// and then no instantiations need to be verified. If verification
+// failed, then we need to throw an exception whenever we try
+// to compile a real instantiation
+
+CorJitFlag GetCompileFlagsIfGenericInstantiation(
+ CORINFO_METHOD_HANDLE method,
+ CorJitFlag compileFlags,
+ ICorJitInfo * pCorJitInfo,
+ BOOL * raiseVerificationException,
+ BOOL * unverifiableGenericCode)
+{
+ STANDARD_VM_CONTRACT;
+
+ *raiseVerificationException = FALSE;
+ *unverifiableGenericCode = FALSE;
+
+ // If we have already decided to skip verification, keep on going.
+ if (compileFlags & CORJIT_FLG_SKIP_VERIFICATION)
+ return compileFlags;
+
+ CorInfoInstantiationVerification ver = pCorJitInfo->isInstantiationOfVerifiedGeneric(method);
+
+ switch(ver)
+ {
+ case INSTVER_NOT_INSTANTIATION:
+ // Non-generic, or open instantiation of a generic type/method
+ if (IsTransparentMethodSafeToSkipVerification(compileFlags, (MethodDesc*)method))
+ compileFlags = (CorJitFlag)(compileFlags | CORJIT_FLG_SKIP_VERIFICATION);
+ return compileFlags;
+
+ case INSTVER_GENERIC_PASSED_VERIFICATION:
+ // If the typical instantiation is verifiable, there is no need
+ // to verify the concrete instantiations
+ return (CorJitFlag)(compileFlags | CORJIT_FLG_SKIP_VERIFICATION);
+
+ case INSTVER_GENERIC_FAILED_VERIFICATION:
+
+ *unverifiableGenericCode = TRUE;
+
+ // The generic method is not verifiable.
+ // Check if it has SkipVerification permission
+ MethodDesc * pGenMethod = GetMethod(method)->LoadTypicalMethodDefinition();
+
+ CORINFO_METHOD_HANDLE genMethodHandle = CORINFO_METHOD_HANDLE(pGenMethod);
+
+ CorInfoCanSkipVerificationResult canSkipVer;
+ canSkipVer = pCorJitInfo->canSkipMethodVerification(genMethodHandle);
+
+ switch(canSkipVer)
+ {
+
+#ifdef FEATURE_PREJIT
+ case CORINFO_VERIFICATION_DONT_JIT:
+ {
+ // Transparent code could be partial trust, but we don't know at NGEN time.
+ // This is the flag that NGEN passes to the JIT to tell it to give-up if it
+ // hits unverifiable code. Since we've already hit unverifiable code,
+ // there's no point in starting the JIT, just to have it give up, so we
+ // give up here.
+ _ASSERTE(compileFlags & CORJIT_FLG_PREJIT);
+ *raiseVerificationException = TRUE;
+ return (CorJitFlag)-1; // This value will not be used
+ }
+#else // FEATURE_PREJIT
+ // Need to have this case here to keep the MAC build happy
+ case CORINFO_VERIFICATION_DONT_JIT:
+ {
+ _ASSERTE(!"We should never get here");
+ return compileFlags;
+ }
+#endif // FEATURE_PREJIT
+
+ case CORINFO_VERIFICATION_CANNOT_SKIP:
+ {
+ // For unverifiable generic code without SkipVerification permission,
+ // we cannot ask the compiler to emit CORINFO_HELP_VERIFICATION in
+ // unverifiable branches as the compiler cannot determine the unverifiable
+ // branches while compiling the concrete instantiation. Instead,
+ // just throw a VerificationException right away.
+ *raiseVerificationException = TRUE;
+ return (CorJitFlag)-1; // This value will not be used
+ }
+
+ case CORINFO_VERIFICATION_CAN_SKIP:
+ {
+ return (CorJitFlag)(compileFlags | CORJIT_FLG_SKIP_VERIFICATION);
+ }
+
+ case CORINFO_VERIFICATION_RUNTIME_CHECK:
+ {
+ // Compile the method without CORJIT_FLG_SKIP_VERIFICATION.
+ // The compiler will know to add a call to
+ // CORINFO_HELP_VERIFICATION_RUNTIME_CHECK, and then to skip verification.
+ return compileFlags;
+ }
+ }
+ }
+
+ _ASSERTE(!"We should never get here");
+ return compileFlags;
+}
+
+// ********************************************************************
+
+// Throw the right type of exception for the given JIT result
+
+void ThrowExceptionForJit(HRESULT res)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_INTOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ switch (res)
+ {
+ case CORJIT_OUTOFMEM:
+ COMPlusThrowOM();
+ break;
+
+#ifdef _TARGET_X86_
+ // Currently, only x86 JIT returns adequate error codes. The x86 JIT is also the
+ // JIT that has more limitations and given that to get this message for 64 bit
+ // is going to require some code churn (either changing their EH handlers or
+ // fixing the 3 or 4 code sites they have that return CORJIT_INTERNALERROR independently
+ // of the error, the least risk fix is making this x86 only.
+ case CORJIT_INTERNALERROR:
+ COMPlusThrow(kInvalidProgramException, (UINT) IDS_EE_JIT_COMPILER_ERROR);
+ break;
+#endif
+ // If we are aborting compilation to do an operation which can't be compiled into
+ // MDIL, there is no need to surface a managed visible exception.
+ case CORJIT_SKIPMDIL:
+ ThrowHR(COR_E_UNSUPPORTEDMDIL);
+ break;
+
+ case CORJIT_BADCODE:
+ default:
+ COMPlusThrow(kInvalidProgramException);
+ break;
+ }
+ }
+
+// ********************************************************************
+#ifdef _DEBUG
+LONG g_JitCount = 0;
+#endif
+
+//#define PERF_TRACK_METHOD_JITTIMES
+#ifdef _TARGET_AMD64_
+BOOL g_fAllowRel32 = TRUE;
+#endif
+
+
+// ********************************************************************
+// README!!
+// ********************************************************************
+
+// The reason that this is named UnsafeJitFunction is that this helper
+// method is not thread safe! When multiple threads get in here for
+// the same pMD, ALL of them MUST return the SAME value.
+// To insure that this happens you must call MakeJitWorker.
+// It creates a DeadlockAware list of methods being jitted and prevents us
+// from trying to jit the same method more that once.
+//
+// Calls to this method that occur to check if inlining can occur on x86,
+// are OK since they discard the return value of this method.
+
+PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader,
+ DWORD flags, DWORD flags2)
+{
+ STANDARD_VM_CONTRACT;
+
+ PCODE ret = NULL;
+
+ COOPERATIVE_TRANSITION_BEGIN();
+
+#ifdef FEATURE_PREJIT
+
+ if (g_pConfig->RequireZaps() == EEConfig::REQUIRE_ZAPS_ALL &&
+ ftn->GetModule()->GetDomainFile()->IsZapRequired() &&
+ PartialNGenStressPercentage() == 0 &&
+#ifdef FEATURE_STACK_SAMPLING
+ !(flags2 & CORJIT_FLG2_SAMPLING_JIT_BACKGROUND) &&
+#endif
+ !(flags & CORJIT_FLG_IMPORT_ONLY))
+ {
+ StackSString ss(SString::Ascii, "ZapRequire: JIT compiler invoked for ");
+ TypeString::AppendMethodInternal(ss, ftn);
+
+#ifdef _DEBUG
+ // Assert as some test may not check their error codes well. So throwing an
+ // exception may not cause a test failure (as it should).
+ StackScratchBuffer scratch;
+ DbgAssertDialog(__FILE__, __LINE__, (char*)ss.GetUTF8(scratch));
+#endif // _DEBUG
+
+ COMPlusThrowNonLocalized(kFileNotFoundException, ss.GetUnicode());
+ }
+
+#endif // FEATURE_PREJIT
+
+#ifndef CROSSGEN_COMPILE
+ EEJitManager *jitMgr = ExecutionManager::GetEEJitManager();
+ if (!jitMgr->LoadJIT())
+ {
+#ifdef ALLOW_SXS_JIT
+ if (!jitMgr->IsMainJitLoaded())
+ {
+ // Don't want to throw InvalidProgram from here.
+ EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, W("Failed to load JIT compiler"));
+ }
+ if (!jitMgr->IsAltJitLoaded())
+ {
+ // Don't want to throw InvalidProgram from here.
+ EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, W("Failed to load alternative JIT compiler"));
+ }
+#else // ALLOW_SXS_JIT
+ // Don't want to throw InvalidProgram from here.
+ EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, W("Failed to load JIT compiler"));
+#endif // ALLOW_SXS_JIT
+ }
+#endif // CROSSGEN_COMPILE
+
+#ifdef _DEBUG
+ // This is here so we can see the name and class easily in the debugger
+
+ LPCUTF8 cls = ftn->GetMethodTable()->GetDebugClassName();
+ LPCUTF8 name = ftn->GetName();
+
+ if (ftn->IsNoMetadata())
+ {
+ if (ftn->IsILStub())
+ {
+ LOG((LF_JIT, LL_INFO10000, "{ Jitting IL Stub }\n"));
+ }
+ else
+ {
+ LOG((LF_JIT, LL_INFO10000, "{ Jitting dynamic method }\n"));
+ }
+ }
+ else
+ {
+ SString methodString;
+ if (LoggingOn(LF_JIT, LL_INFO10000))
+ TypeString::AppendMethodDebug(methodString, ftn);
+
+ LOG((LF_JIT, LL_INFO10000, "{ Jitting method (%p) %S %s\n", ftn, methodString.GetUnicode(), ftn->m_pszDebugMethodSignature));
+ }
+
+#if 0
+ if (!SString::_stricmp(cls,"ENC") &&
+ (!SString::_stricmp(name,"G")))
+ {
+ static count = 0;
+ count++;
+ if (count > 0)
+ DebugBreak();
+ }
+#endif // 0
+#endif // _DEBUG
+
+ CORINFO_METHOD_HANDLE ftnHnd = (CORINFO_METHOD_HANDLE)ftn;
+ CORINFO_METHOD_INFO methodInfo;
+
+ getMethodInfoHelper(ftn, ftnHnd, ILHeader, &methodInfo);
+
+ // If it's generic then we can only enter through an instantiated md (unless we're just verifying it)
+ _ASSERTE((flags & CORJIT_FLG_IMPORT_ONLY) != 0 || !ftn->IsGenericMethodDefinition());
+
+ // If it's an instance method then it must not be entered from a generic class
+ _ASSERTE((flags & CORJIT_FLG_IMPORT_ONLY) != 0 || ftn->IsStatic() ||
+ ftn->GetNumGenericClassArgs() == 0 || ftn->HasClassInstantiation());
+
+ // method attributes and signature are consistant
+ _ASSERTE(!!ftn->IsStatic() == ((methodInfo.args.callConv & CORINFO_CALLCONV_HASTHIS) == 0));
+
+ flags = GetCompileFlags(ftn, flags, &methodInfo);
+
+#ifdef _DEBUG
+ if (!(flags & CORJIT_FLG_SKIP_VERIFICATION))
+ {
+ SString methodString;
+ if (LoggingOn(LF_VERIFIER, LL_INFO100))
+ TypeString::AppendMethodDebug(methodString, ftn);
+
+ LOG((LF_VERIFIER, LL_INFO100, "{ Will verify method (%p) %S %s\n", ftn, methodString.GetUnicode(), ftn->m_pszDebugMethodSignature));
+ }
+#endif //_DEBUG
+
+#ifdef _TARGET_AMD64_
+ BOOL fForceRel32Overflow = FALSE;
+
+#ifdef _DEBUG
+ // Always exercise the overflow codepath with force relocs
+ if (PEDecoder::GetForceRelocs())
+ fForceRel32Overflow = TRUE;
+#endif
+
+ BOOL fAllowRel32 = g_fAllowRel32 | fForceRel32Overflow;
+
+ // For determinism, never try to use the REL32 in compilation process
+ if (IsCompilationProcess())
+ {
+ fForceRel32Overflow = FALSE;
+ fAllowRel32 = FALSE;
+ }
+#endif // _TARGET_AMD64_
+
+ for (;;)
+ {
+#ifndef CROSSGEN_COMPILE
+ CEEJitInfo jitInfo(ftn, ILHeader, jitMgr, (flags & CORJIT_FLG_IMPORT_ONLY) != 0);
+#else
+ // This path should be only ever used for verification in crossgen and so we should not need EEJitManager
+ _ASSERTE((flags & CORJIT_FLG_IMPORT_ONLY) != 0);
+ CEEInfo jitInfo(ftn, true);
+ EEJitManager *jitMgr = NULL;
+#endif
+
+#if defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE)
+ if (fForceRel32Overflow)
+ jitInfo.SetRel32Overflow(fAllowRel32);
+ jitInfo.SetAllowRel32(fAllowRel32);
+#endif
+
+ MethodDesc * pMethodForSecurity = jitInfo.GetMethodForSecurity(ftnHnd);
+
+ //Since the check could trigger a demand, we have to do this every time.
+ //This is actually an overly complicated way to make sure that a method can access all its arguments
+ //and its return type.
+ AccessCheckOptions::AccessCheckType accessCheckType = AccessCheckOptions::kNormalAccessibilityChecks;
+ TypeHandle ownerTypeForSecurity = TypeHandle(pMethodForSecurity->GetMethodTable());
+ DynamicResolver *pAccessContext = NULL;
+ BOOL doAccessCheck = TRUE;
+ if (pMethodForSecurity->IsDynamicMethod())
+ {
+ doAccessCheck = ModifyCheckForDynamicMethod(pMethodForSecurity->AsDynamicMethodDesc()->GetResolver(),
+ &ownerTypeForSecurity,
+ &accessCheckType, &pAccessContext);
+ }
+ if (doAccessCheck)
+ {
+ AccessCheckOptions accessCheckOptions(accessCheckType,
+ pAccessContext,
+ TRUE /*Throw on error*/,
+ pMethodForSecurity);
+
+ StaticAccessCheckContext accessContext(pMethodForSecurity, ownerTypeForSecurity.GetMethodTable());
+
+ // We now do an access check from pMethodForSecurity to pMethodForSecurity, its sole purpose is to
+ // verify that pMethodForSecurity/ownerTypeForSecurity has access to all its parameters.
+
+ // ownerTypeForSecurity.GetMethodTable() can be null if the pMethodForSecurity is a DynamicMethod
+ // associated with a TypeDesc (Array, Ptr, Ref, or FnPtr). That doesn't make any sense, but we will
+ // just do an access check from a NULL context which means only public types are accessible.
+ if (!ClassLoader::CanAccess(&accessContext,
+ ownerTypeForSecurity.GetMethodTable(),
+ ownerTypeForSecurity.GetAssembly(),
+ pMethodForSecurity->GetAttrs(),
+ pMethodForSecurity,
+ NULL,
+ accessCheckOptions,
+ TRUE /*Check method transparency*/,
+ TRUE /*Check type transparency*/))
+ {
+ EX_THROW(EEMethodException, (pMethodForSecurity));
+ }
+ }
+
+ BOOL raiseVerificationException, unverifiableGenericCode;
+
+ flags = GetCompileFlagsIfGenericInstantiation(
+ ftnHnd,
+ (CorJitFlag)flags,
+ &jitInfo,
+ &raiseVerificationException,
+ &unverifiableGenericCode);
+
+ if (raiseVerificationException)
+ COMPlusThrow(kVerificationException);
+
+ CorJitResult res;
+ PBYTE nativeEntry;
+ ULONG sizeOfCode;
+
+ {
+ GCX_COOP();
+
+ /* There is a double indirection to call compileMethod - can we
+ improve this with the new structure? */
+
+#ifdef PERF_TRACK_METHOD_JITTIMES
+ //Because we're not calling QPC enough. I'm not going to track times if we're just importing.
+ LARGE_INTEGER methodJitTimeStart = {0};
+ if (!(flags & CORJIT_FLG_IMPORT_ONLY))
+ QueryPerformanceCounter (&methodJitTimeStart);
+
+#endif
+#if defined(ENABLE_PERF_COUNTERS)
+ START_JIT_PERF();
+#endif
+
+#if defined(ENABLE_PERF_COUNTERS)
+ LARGE_INTEGER CycleStart;
+ QueryPerformanceCounter (&CycleStart);
+#endif // defined(ENABLE_PERF_COUNTERS)
+
+ // Note on debuggerTrackInfo arg: if we're only importing (ie, verifying/
+ // checking to make sure we could JIT, but not actually generating code (
+ // eg, for inlining), then DON'T TELL THE DEBUGGER about this.
+ res = CallCompileMethodWithSEHWrapper(jitMgr,
+ &jitInfo,
+ &methodInfo,
+ flags,
+ flags2,
+ &nativeEntry,
+ &sizeOfCode,
+ (MethodDesc*)ftn);
+ LOG((LF_CORDB, LL_EVERYTHING, "Got through CallCompile MethodWithSEHWrapper\n"));
+
+#if defined(ENABLE_PERF_COUNTERS)
+ LARGE_INTEGER CycleStop;
+ QueryPerformanceCounter(&CycleStop);
+ GetPerfCounters().m_Jit.timeInJitBase = GetPerfCounters().m_Jit.timeInJit;
+ GetPerfCounters().m_Jit.timeInJit += static_cast<DWORD>(CycleStop.QuadPart - CycleStart.QuadPart);
+ GetPerfCounters().m_Jit.cMethodsJitted++;
+ GetPerfCounters().m_Jit.cbILJitted+=methodInfo.ILCodeSize;
+
+#endif // defined(ENABLE_PERF_COUNTERS)
+
+#if defined(ENABLE_PERF_COUNTERS)
+ STOP_JIT_PERF();
+#endif
+
+#ifdef PERF_TRACK_METHOD_JITTIMES
+ //store the time in the string buffer. Module name and token are unique enough. Also, do not
+ //capture importing time, just actual compilation time.
+ if (!(flags & CORJIT_FLG_IMPORT_ONLY))
+ {
+ LARGE_INTEGER methodJitTimeStop;
+ QueryPerformanceCounter(&methodJitTimeStop);
+ SString codeBase;
+ ftn->GetModule()->GetDomainFile()->GetFile()->GetCodeBaseOrName(codeBase);
+ codeBase.AppendPrintf(W(",0x%x,%d,%d\n"),
+ //(const WCHAR *)codeBase, //module name
+ ftn->GetMemberDef(), //method token
+ (unsigned)(methodJitTimeStop.QuadPart - methodJitTimeStart.QuadPart), //cycle count
+ methodInfo.ILCodeSize //il size
+ );
+ WszOutputDebugString((const WCHAR*)codeBase);
+ }
+#endif // PERF_TRACK_METHOD_JITTIMES
+
+ }
+
+ LOG((LF_JIT, LL_INFO10000, "Done Jitting method %s::%s %s }\n",cls,name, ftn->m_pszDebugMethodSignature));
+
+ if (!SUCCEEDED(res))
+ {
+ COUNTER_ONLY(GetPerfCounters().m_Jit.cJitFailures++);
+
+#ifndef CROSSGEN_COMPILE
+ jitInfo.BackoutJitData(jitMgr);
+#endif
+
+ ThrowExceptionForJit(res);
+ }
+
+ if (flags & CORJIT_FLG_IMPORT_ONLY)
+ {
+ // The method must been processed by the verifier. Note that it may
+ // either have been marked as verifiable or unverifiable.
+ // ie. IsVerified() does not imply IsVerifiable()
+ _ASSERTE(ftn->IsVerified());
+
+ // We are done
+ break;
+ }
+
+ if (!nativeEntry)
+ COMPlusThrow(kInvalidProgramException);
+
+#if defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE)
+ if (jitInfo.IsRel32Overflow())
+ {
+ // Backout and try again with fAllowRel32 == FALSE.
+ jitInfo.BackoutJitData(jitMgr);
+
+ // Disallow rel32 relocs in future.
+ g_fAllowRel32 = FALSE;
+
+ _ASSERTE(fAllowRel32 != FALSE);
+ fAllowRel32 = FALSE;
+ continue;
+ }
+#endif // _TARGET_AMD64_ && !CROSSGEN_COMPILE
+
+ LOG((LF_JIT, LL_INFO10000,
+ "Jitted Entry at" FMT_ADDR "method %s::%s %s\n", DBG_ADDR(nativeEntry),
+ ftn->m_pszDebugClassName, ftn->m_pszDebugMethodName, ftn->m_pszDebugMethodSignature));
+
+#if defined(FEATURE_CORESYSTEM)
+
+#ifdef _DEBUG
+ LPCUTF8 pszDebugClassName = ftn->m_pszDebugClassName;
+ LPCUTF8 pszDebugMethodName = ftn->m_pszDebugMethodName;
+ LPCUTF8 pszDebugMethodSignature = ftn->m_pszDebugMethodSignature;
+#else
+ LPCUTF8 pszNamespace;
+ LPCUTF8 pszDebugClassName = ftn->GetMethodTable()->GetFullyQualifiedNameInfo(&pszNamespace);
+ LPCUTF8 pszDebugMethodName = ftn->GetName();
+ LPCUTF8 pszDebugMethodSignature = "";
+#endif
+
+ //DbgPrintf("Jitted Entry at" FMT_ADDR "method %s::%s %s size %08x\n", DBG_ADDR(nativeEntry),
+ // pszDebugClassName, pszDebugMethodName, pszDebugMethodSignature, sizeOfCode);
+#endif
+
+ ClrFlushInstructionCache(nativeEntry, sizeOfCode);
+ ret = (PCODE)nativeEntry;
+
+#ifdef _TARGET_ARM_
+ ret |= THUMB_CODE;
+#endif
+
+ // We are done
+ break;
+ }
+
+#ifdef _DEBUG
+ FastInterlockIncrement(&g_JitCount);
+ static BOOL fHeartbeat = -1;
+
+ if (fHeartbeat == -1)
+ fHeartbeat = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitHeartbeat);
+
+ if (fHeartbeat)
+ printf(".");
+#endif // _DEBUG
+
+ COOPERATIVE_TRANSITION_END();
+ return ret;
+}
+
+extern "C" unsigned __stdcall PartialNGenStressPercentage()
+{
+ LIMITED_METHOD_CONTRACT;
+#ifndef _DEBUG
+ return 0;
+#else // _DEBUG
+ static ConfigDWORD partialNGenStress;
+ DWORD partialNGenStressVal = partialNGenStress.val(CLRConfig::INTERNAL_partialNGenStress);
+ _ASSERTE(partialNGenStressVal <= 100);
+ return partialNGenStressVal;
+#endif // _DEBUG
+}
+
+#ifdef FEATURE_PREJIT
+/*********************************************************************/
+
+//
+// Table loading functions
+//
+void Module::LoadHelperTable()
+{
+ STANDARD_VM_CONTRACT;
+
+#ifndef CROSSGEN_COMPILE
+ COUNT_T tableSize;
+ BYTE * table = (BYTE *) GetNativeImage()->GetNativeHelperTable(&tableSize);
+
+ if (tableSize == 0)
+ return;
+
+ EnsureWritableExecutablePages(table, tableSize);
+
+ BYTE * curEntry = table;
+ BYTE * tableEnd = table + tableSize;
+
+#ifdef LOGGING
+ int iEntryNumber = 0;
+#endif // LOGGING
+
+ //
+ // Fill in helpers
+ //
+
+ while (curEntry < tableEnd)
+ {
+ DWORD dwHelper = *(DWORD *)curEntry;
+
+ int iHelper = (USHORT)dwHelper;
+ _ASSERTE(iHelper < CORINFO_HELP_COUNT);
+
+ LOG((LF_JIT, LL_INFO1000000, "JIT helper %3d (%-40s: table @ %p, size 0x%x, entry %3d @ %p, pfnHelper %p)\n",
+ iHelper, hlpFuncTable[iHelper].name, table, tableSize, iEntryNumber, curEntry, hlpFuncTable[iHelper].pfnHelper));
+
+#if defined(ENABLE_FAST_GCPOLL_HELPER)
+ // The fast GC poll helper works by calling indirect through a pointer that points to either
+ // JIT_PollGC or JIT_PollGC_Nop, based on whether we need to poll or not. The JIT_PollGC_Nop
+ // version is just a "ret". The pointer is stored in hlpDynamicFuncTable[DYNAMIC_CORINFO_HELP_POLL_GC].
+ // See EnableJitGCPoll() and DisableJitGCPoll().
+ // In NGEN images, we generate a direct call to the helper table. Here, we replace that with
+ // an indirect jump through the pointer in hlpDynamicFuncTable[DYNAMIC_CORINFO_HELP_POLL_GC].
+ if (iHelper == CORINFO_HELP_POLL_GC)
+ {
+ LOG((LF_JIT, LL_INFO1000000, "JIT helper CORINFO_HELP_POLL_GC (%d); emitting indirect jump to 0x%x\n",
+ CORINFO_HELP_POLL_GC, &hlpDynamicFuncTable[DYNAMIC_CORINFO_HELP_POLL_GC].pfnHelper));
+
+ emitJumpInd(curEntry, &hlpDynamicFuncTable[DYNAMIC_CORINFO_HELP_POLL_GC].pfnHelper);
+ curEntry = curEntry + HELPER_TABLE_ENTRY_LEN;
+ }
+ else
+#endif // ENABLE_FAST_GCPOLL_HELPER
+ {
+ PCODE pfnHelper = CEEJitInfo::getHelperFtnStatic((CorInfoHelpFunc)iHelper);
+
+ if (dwHelper & CORCOMPILE_HELPER_PTR)
+ {
+ //
+ // Indirection cell
+ //
+
+ *(TADDR *)curEntry = pfnHelper;
+
+ curEntry = curEntry + sizeof(TADDR);
+ }
+ else
+ {
+ //
+ // Jump thunk
+ //
+
+#if defined(_TARGET_AMD64_)
+ *curEntry = X86_INSTR_JMP_REL32;
+ *(INT32 *)(curEntry + 1) = rel32UsingJumpStub((INT32 *)(curEntry + 1), pfnHelper, NULL, GetLoaderAllocator());
+#elif defined (_TARGET_ARM64_)
+ _ASSERTE(!"ARM64:NYI");
+#else // all other platforms
+ emitJump(curEntry, (LPVOID)pfnHelper);
+ _ASSERTE(HELPER_TABLE_ENTRY_LEN >= JUMP_ALLOCATE_SIZE);
+#endif
+
+ curEntry = curEntry + HELPER_TABLE_ENTRY_LEN;
+ }
+ }
+#ifdef LOGGING
+ // Note that some table entries are sizeof(TADDR) in length, and some are HELPER_TABLE_ENTRY_LEN in length
+ ++iEntryNumber;
+#endif // LOGGING
+ }
+
+ ClrFlushInstructionCache(table, tableSize);
+#endif // CROSSGEN_COMPILE
+}
+
+#ifdef FEATURE_READYTORUN
+CorInfoHelpFunc MapReadyToRunHelper(ReadyToRunHelper helperNum)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ switch (helperNum)
+ {
+#define HELPER(readyToRunHelper, corInfoHelpFunc, flags) \
+ case readyToRunHelper: return corInfoHelpFunc;
+#include "readytorunhelpers.h"
+
+ case READYTORUN_HELPER_GetString: return CORINFO_HELP_STRCNS;
+
+ default: return CORINFO_HELP_UNDEF;
+ }
+}
+
+void ComputeGCRefMap(MethodTable * pMT, BYTE * pGCRefMap, size_t cbGCRefMap)
+{
+ STANDARD_VM_CONTRACT;
+
+ ZeroMemory(pGCRefMap, cbGCRefMap);
+
+ if (!pMT->ContainsPointers())
+ return;
+
+ CGCDesc* map = CGCDesc::GetCGCDescFromMT(pMT);
+ CGCDescSeries* cur = map->GetHighestSeries();
+ CGCDescSeries* last = map->GetLowestSeries();
+ DWORD size = pMT->GetBaseSize();
+ _ASSERTE(cur >= last);
+
+ do
+ {
+ // offset to embedded references in this series must be
+ // adjusted by the VTable pointer, when in the unboxed state.
+ size_t offset = cur->GetSeriesOffset() - sizeof(void*);
+ size_t offsetStop = offset + cur->GetSeriesSize() + size;
+ while (offset < offsetStop)
+ {
+ size_t bit = offset / sizeof(void *);
+
+ size_t index = bit / 8;
+ _ASSERTE(index < cbGCRefMap);
+ pGCRefMap[index] |= (1 << (bit & 7));
+
+ offset += sizeof(void *);
+ }
+ cur--;
+ } while (cur >= last);
+}
+
+//
+// Type layout check verifies that there was no incompatible change in the value type layout.
+// If there was one, we will fall back to JIT instead of using the pre-generated code from the ready to run image.
+// This should be rare situation. Changes in value type layout not common.
+//
+// The following properties of the value type layout are checked:
+// - Size
+// - HFA-ness (on platform that support HFAs)
+// - Alignment
+// - Position of GC references
+//
+BOOL TypeLayoutCheck(MethodTable * pMT, PCCOR_SIGNATURE pBlob)
+{
+ STANDARD_VM_CONTRACT;
+
+ SigPointer p(pBlob);
+ IfFailThrow(p.SkipExactlyOne());
+
+ DWORD dwFlags;
+ IfFailThrow(p.GetData(&dwFlags));
+
+ // Size is checked unconditionally
+ DWORD dwExpectedSize;
+ IfFailThrow(p.GetData(&dwExpectedSize));
+
+ DWORD dwActualSize = pMT->GetNumInstanceFieldBytes();
+ if (dwExpectedSize != dwActualSize)
+ return FALSE;
+
+#ifdef FEATURE_HFA
+ if (dwFlags & READYTORUN_LAYOUT_HFA)
+ {
+ DWORD dwExpectedHFAType;
+ IfFailThrow(p.GetData(&dwExpectedHFAType));
+
+ DWORD dwActualHFAType = pMT->GetHFAType();
+ if (dwExpectedHFAType != dwActualHFAType)
+ return FALSE;
+ }
+ else
+ {
+ if (pMT->IsHFA())
+ return FALSE;
+ }
+#else
+ _ASSERTE(!(dwFlags & READYTORUN_LAYOUT_HFA));
+#endif
+
+ if (dwFlags & READYTORUN_LAYOUT_Alignment)
+ {
+ DWORD dwExpectedAlignment = sizeof(void *);
+ if (!(dwFlags & READYTORUN_LAYOUT_Alignment_Native))
+ {
+ IfFailThrow(p.GetData(&dwExpectedAlignment));
+ }
+
+ DWORD dwActualAlignment = CEEInfo::getClassAlignmentRequirementStatic(pMT);
+ if (dwExpectedAlignment != dwActualAlignment)
+ return FALSE;
+
+ }
+
+ if (dwFlags & READYTORUN_LAYOUT_GCLayout)
+ {
+ if (dwFlags & READYTORUN_LAYOUT_GCLayout_Empty)
+ {
+ if (pMT->ContainsPointers())
+ return FALSE;
+ }
+ else
+ {
+ size_t cbGCRefMap = (dwActualSize / sizeof(TADDR) + 7) / 8;
+ _ASSERTE(cbGCRefMap > 0);
+
+ BYTE * pGCRefMap = (BYTE *)_alloca(cbGCRefMap);
+
+ ComputeGCRefMap(pMT, pGCRefMap, cbGCRefMap);
+
+ if (memcmp(pGCRefMap, p.GetPtr(), cbGCRefMap) != 0)
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+#endif // FEATURE_READYTORUN
+
+BOOL LoadDynamicInfoEntry(Module *currentModule,
+ RVA fixupRva,
+ SIZE_T *entry)
+{
+ STANDARD_VM_CONTRACT;
+
+ PCCOR_SIGNATURE pBlob = currentModule->GetNativeFixupBlobData(fixupRva);
+
+ BYTE kind = *pBlob++;
+
+ Module * pInfoModule = currentModule;
+
+ if (kind & ENCODE_MODULE_OVERRIDE)
+ {
+ pInfoModule = currentModule->GetModuleFromIndex(CorSigUncompressData(pBlob));
+ kind &= ~ENCODE_MODULE_OVERRIDE;
+ }
+
+ MethodDesc * pMD = NULL;
+
+ PCCOR_SIGNATURE pSig;
+ DWORD cSig;
+
+ mdSignature token;
+
+ size_t result = 0;
+
+ switch (kind)
+ {
+ case ENCODE_MODULE_HANDLE:
+ result = (size_t)pInfoModule;
+ break;
+
+ case ENCODE_TYPE_HANDLE:
+ case ENCODE_TYPE_DICTIONARY:
+ {
+ TypeHandle th = ZapSig::DecodeType(currentModule, pInfoModule, pBlob);
+
+ if (!th.IsTypeDesc())
+ {
+ if (currentModule->IsReadyToRun())
+ {
+ // We do not emit activation fixups for version resilient references. Activate the target explicitly.
+ th.AsMethodTable()->EnsureInstanceActive();
+ }
+ else
+ {
+#ifdef FEATURE_WINMD_RESILIENT
+ // We do not emit activation fixups for version resilient references. Activate the target explicitly.
+ th.AsMethodTable()->EnsureInstanceActive();
+#endif
+ }
+ }
+
+ result = (size_t)th.AsPtr();
+ }
+ break;
+
+ case ENCODE_METHOD_HANDLE:
+ case ENCODE_METHOD_DICTIONARY:
+ {
+ MethodDesc * pMD = ZapSig::DecodeMethod(currentModule, pInfoModule, pBlob);
+
+ if (currentModule->IsReadyToRun())
+ {
+ // We do not emit activation fixups for version resilient references. Activate the target explicitly.
+ pMD->EnsureActive();
+ }
+
+ result = (size_t)pMD;
+ }
+ break;
+
+ case ENCODE_FIELD_HANDLE:
+ result = (size_t) ZapSig::DecodeField(currentModule, pInfoModule, pBlob);
+ break;
+
+#ifndef CROSSGEN_COMPILE
+ case ENCODE_STRING_HANDLE:
+ {
+ // We need to update strings atomically (due to NoStringInterning attribute). Note
+ // that modules with string interning dont really need this, as the hash tables have
+ // their own locking, but dont add more complexity for what will be the non common
+ // case.
+
+ // We will have to lock and update the entry. (this is really a double check, where
+ // the first check is done in the caller of this function)
+ DWORD rid = CorSigUncompressData(pBlob);
+ if (rid == 0)
+ {
+ // Empty string
+ result = (size_t)StringObject::GetEmptyStringRefPtr();
+ }
+ else
+ {
+ CrstHolder ch(pInfoModule->GetFixupCrst());
+
+ if (!CORCOMPILE_IS_POINTER_TAGGED(*entry) && (*entry != NULL))
+ {
+ // We lost the race, just return
+ return TRUE;
+ }
+
+ // For generic instantiations compiled into the ngen image of some other
+ // client assembly, we need to ensure that we intern the string
+ // in the defining assembly.
+ bool mayNeedToSyncWithFixups = pInfoModule != currentModule;
+
+ result = (size_t) pInfoModule->ResolveStringRef(TokenFromRid(rid, mdtString), currentModule->GetDomain(), mayNeedToSyncWithFixups);
+ }
+ }
+ break;
+
+ case ENCODE_VARARGS_SIG:
+ {
+ mdSignature token = TokenFromRid(
+ CorSigUncompressData(pBlob),
+ mdtSignature);
+
+ IfFailThrow(pInfoModule->GetMDImport()->GetSigFromToken(token, &cSig, &pSig));
+
+ goto VarArgs;
+ }
+ break;
+
+ case ENCODE_VARARGS_METHODREF:
+ {
+ mdSignature token = TokenFromRid(
+ CorSigUncompressData(pBlob),
+ mdtMemberRef);
+
+ LPCSTR szName_Ignore;
+ IfFailThrow(pInfoModule->GetMDImport()->GetNameAndSigOfMemberRef(token, &pSig, &cSig, &szName_Ignore));
+
+ goto VarArgs;
+ }
+ break;
+
+ case ENCODE_VARARGS_METHODDEF:
+ {
+ token = TokenFromRid(
+ CorSigUncompressData(pBlob),
+ mdtMethodDef);
+
+ IfFailThrow(pInfoModule->GetMDImport()->GetSigOfMethodDef(token, &cSig, &pSig));
+
+ VarArgs:
+ result = (size_t) CORINFO_VARARGS_HANDLE(currentModule->GetVASigCookie(Signature(pSig, cSig)));
+ }
+ break;
+
+ case ENCODE_METHOD_ENTRY_DEF_TOKEN:
+ {
+ mdToken MethodDef = TokenFromRid(CorSigUncompressData(pBlob), mdtMethodDef);
+ pMD = MemberLoader::GetMethodDescFromMethodDef(pInfoModule, MethodDef, FALSE);
+
+ pMD->PrepareForUseAsADependencyOfANativeImage();
+
+ if (currentModule->IsReadyToRun())
+ {
+ // We do not emit activation fixups for version resilient references. Activate the target explicitly.
+ pMD->EnsureActive();
+ }
+
+ goto MethodEntry;
+ }
+
+ case ENCODE_METHOD_ENTRY_REF_TOKEN:
+ {
+ SigTypeContext typeContext;
+ mdToken MemberRef = TokenFromRid(CorSigUncompressData(pBlob), mdtMemberRef);
+ FieldDesc * pFD = NULL;
+ TypeHandle th;
+
+ MemberLoader::GetDescFromMemberRef(pInfoModule, MemberRef, &pMD, &pFD, &typeContext, FALSE /* strict metadata checks */, &th);
+ _ASSERTE(pMD != NULL);
+
+ pMD->PrepareForUseAsADependencyOfANativeImage();
+
+ if (currentModule->IsReadyToRun())
+ {
+ // We do not emit activation fixups for version resilient references. Activate the target explicitly.
+ pMD->EnsureActive();
+ }
+ else
+ {
+#ifdef FEATURE_WINMD_RESILIENT
+ // We do not emit activation fixups for version resilient references. Activate the target explicitly.
+ pMD->EnsureActive();
+#endif
+ }
+
+ goto MethodEntry;
+ }
+
+ case ENCODE_METHOD_ENTRY:
+ {
+ pMD = ZapSig::DecodeMethod(currentModule, pInfoModule, pBlob);
+
+ if (currentModule->IsReadyToRun())
+ {
+ // We do not emit activation fixups for version resilient references. Activate the target explicitly.
+ pMD->EnsureActive();
+ }
+
+ MethodEntry:
+ result = pMD->GetMultiCallableAddrOfCode(CORINFO_ACCESS_ANY);
+
+ #ifndef _TARGET_ARM_
+ if (CORCOMPILE_IS_PCODE_TAGGED(result))
+ {
+ // There is a rare case where the function entrypoint may not be aligned. This could happen only for FCalls,
+ // only on x86 and only if we failed to hardbind the fcall (e.g. ngen image for mscorlib.dll does not exist
+ // and /nodependencies flag for ngen was used). The function entrypoints should be aligned in all other cases.
+ //
+ // We will wrap the unaligned method entrypoint by funcptr stub with aligned entrypoint.
+ _ASSERTE(pMD->IsFCall());
+ result = pMD->GetLoaderAllocator()->GetFuncPtrStubs()->GetFuncPtrStub(pMD);
+ }
+ #endif
+ }
+ break;
+
+ case ENCODE_SYNC_LOCK:
+ {
+ TypeHandle th = ZapSig::DecodeType(currentModule, pInfoModule, pBlob);
+
+ result = (size_t) GetClassSync(th.AsMethodTable());
+ }
+ break;
+
+ case ENCODE_INDIRECT_PINVOKE_TARGET:
+ {
+ MethodDesc *pMethod = ZapSig::DecodeMethod(currentModule, pInfoModule, pBlob);
+
+ _ASSERTE(pMethod->IsNDirect());
+ NDirectMethodDesc *pMD = (NDirectMethodDesc*)pMethod;
+ result = (size_t)(LPVOID)&(pMD->GetWriteableData()->m_pNDirectTarget);
+ }
+ break;
+
+#if defined(PROFILING_SUPPORTED)
+ case ENCODE_PROFILING_HANDLE:
+ {
+ MethodDesc *pMethod = ZapSig::DecodeMethod(currentModule, pInfoModule, pBlob);
+
+ // methods with no metadata behind cannot be exposed to tools expecting metadata (profiler, debugger...)
+ // they shouldnever come here as they are called out in GetCompileFlag
+ _ASSERTE(!pMethod->IsNoMetadata());
+
+ FunctionID funId = (FunctionID)pMethod;
+
+ BOOL bHookFunction = TRUE;
+ CORINFO_PROFILING_HANDLE profilerHandle = (CORINFO_PROFILING_HANDLE)funId;
+
+ {
+ BEGIN_PIN_PROFILER(CORProfilerFunctionIDMapperEnabled());
+ profilerHandle = (CORINFO_PROFILING_HANDLE) g_profControlBlock.pProfInterface->EEFunctionIDMapper(funId, &bHookFunction);
+ END_PIN_PROFILER();
+ }
+
+ // Profiling handle is opaque token. It does not have to be aligned thus we can not store it in the same location as token.
+ *EnsureWritablePages(entry+kZapProfilingHandleImportValueIndexClientData) = (SIZE_T)profilerHandle;
+
+ if (bHookFunction)
+ {
+ *EnsureWritablePages(entry+kZapProfilingHandleImportValueIndexEnterAddr) = (SIZE_T)(void *)hlpDynamicFuncTable[DYNAMIC_CORINFO_HELP_PROF_FCN_ENTER].pfnHelper;
+ *EnsureWritablePages(entry+kZapProfilingHandleImportValueIndexLeaveAddr) = (SIZE_T)(void *)hlpDynamicFuncTable[DYNAMIC_CORINFO_HELP_PROF_FCN_LEAVE].pfnHelper;
+ *EnsureWritablePages(entry+kZapProfilingHandleImportValueIndexTailcallAddr) = (SIZE_T)(void *)hlpDynamicFuncTable[DYNAMIC_CORINFO_HELP_PROF_FCN_TAILCALL].pfnHelper;
+ }
+ else
+ {
+ *EnsureWritablePages(entry+kZapProfilingHandleImportValueIndexEnterAddr) = (SIZE_T)(void *)JIT_ProfilerEnterLeaveTailcallStub;
+ *EnsureWritablePages(entry+kZapProfilingHandleImportValueIndexLeaveAddr) = (SIZE_T)(void *)JIT_ProfilerEnterLeaveTailcallStub;
+ *EnsureWritablePages(entry+kZapProfilingHandleImportValueIndexTailcallAddr) = (SIZE_T)(void *)JIT_ProfilerEnterLeaveTailcallStub;
+ }
+ }
+ break;
+#endif // PROFILING_SUPPORTED
+
+ case ENCODE_STATIC_FIELD_ADDRESS:
+ {
+ FieldDesc *pField = ZapSig::DecodeField(currentModule, pInfoModule, pBlob);
+
+ pField->GetEnclosingMethodTable()->CheckRestore();
+
+ // We can take address of RVA field only since ngened code is domain neutral
+ _ASSERTE(pField->IsRVA());
+
+ // Field address is not aligned thus we can not store it in the same location as token.
+ *EnsureWritablePages(entry+1) = (size_t)pField->GetStaticAddressHandle(NULL);
+ }
+ break;
+
+ case ENCODE_VIRTUAL_ENTRY_SLOT:
+ {
+ DWORD slot = CorSigUncompressData(pBlob);
+
+ TypeHandle ownerType = ZapSig::DecodeType(currentModule, pInfoModule, pBlob);
+
+ LOG((LF_ZAP, LL_INFO100000, " Fixup stub dispatch\n"));
+
+ VirtualCallStubManager * pMgr = currentModule->GetLoaderAllocator()->GetVirtualCallStubManager();
+
+ // <REVISIT_TODO>
+ // We should be generating a stub indirection here, but the zapper already uses one level
+ // of indirection, i.e. we would have to return IAT_PPVALUE to the JIT, and on the whole the JITs
+ // aren't quite set up to accept that. Furthermore the call sequences would be different - at
+ // the moment an indirection cell uses "call [cell-addr]" on x86, and instead we would want the
+ // euqivalent of "call [[call-addr]]". This could perhaps be implemented as "call [eax]" </REVISIT_TODO>
+ result = pMgr->GetCallStub(ownerType, slot);
+ }
+ break;
+
+ case ENCODE_CLASS_ID_FOR_STATICS:
+ {
+ TypeHandle th = ZapSig::DecodeType(currentModule, pInfoModule, pBlob);
+
+ MethodTable * pMT = th.AsMethodTable();
+ if (pMT->IsDynamicStatics())
+ {
+ result = pMT->GetModuleDynamicEntryID();
+ }
+ else
+ {
+ result = pMT->GetClassIndex();
+ }
+ }
+ break;
+
+ case ENCODE_MODULE_ID_FOR_STATICS:
+ {
+ result = pInfoModule->GetModuleID();
+ }
+ break;
+
+ case ENCODE_MODULE_ID_FOR_GENERIC_STATICS:
+ {
+ TypeHandle th = ZapSig::DecodeType(currentModule, pInfoModule, pBlob);
+
+ MethodTable * pMT = th.AsMethodTable();
+
+ result = pMT->GetModuleForStatics()->GetModuleID();
+ }
+ break;
+
+ case ENCODE_ACTIVE_DEPENDENCY:
+ {
+ Module* pModule = currentModule->GetModuleFromIndex(CorSigUncompressData(pBlob));
+
+ STRESS_LOG3(LF_ZAP,LL_INFO10000,"Modules are: %08x,%08x,%08x",currentModule,pInfoModule,pModule);
+ pInfoModule->AddActiveDependency(pModule, FALSE);
+ }
+ break;
+
+#ifdef FEATURE_READYTORUN
+ case ENCODE_READYTORUN_HELPER:
+ {
+ DWORD helperNum = CorSigUncompressData(pBlob);
+
+ CorInfoHelpFunc corInfoHelpFunc = MapReadyToRunHelper((ReadyToRunHelper)helperNum);
+ if (corInfoHelpFunc != CORINFO_HELP_UNDEF)
+ {
+ result = (size_t)CEEJitInfo::getHelperFtnStatic(corInfoHelpFunc);
+ }
+ else
+ {
+ switch (helperNum)
+ {
+ case READYTORUN_HELPER_Module:
+ {
+ Module * pPrevious = InterlockedCompareExchangeT(EnsureWritablePages((Module **)entry), pInfoModule, NULL);
+ if (pPrevious != pInfoModule && pPrevious != NULL)
+ COMPlusThrowHR(COR_E_FILELOAD, IDS_NATIVE_IMAGE_CANNOT_BE_LOADED_MULTIPLE_TIMES, pInfoModule->GetPath());
+ return TRUE;
+ }
+ break;
+
+ case READYTORUN_HELPER_GSCookie:
+ result = (size_t)GetProcessGSCookie();
+ break;
+
+ case READYTORUN_HELPER_DelayLoad_MethodCall:
+ result = (size_t)DelayLoad_MethodCall;
+ break;
+
+ case READYTORUN_HELPER_DelayLoad_Helper:
+ result = (size_t)DelayLoad_Helper;
+ break;
+
+ case READYTORUN_HELPER_DelayLoad_Helper_Obj:
+ result = (size_t)DelayLoad_Helper_Obj;
+ break;
+
+ case READYTORUN_HELPER_DelayLoad_Helper_ObjObj:
+ result = (size_t)DelayLoad_Helper_ObjObj;
+ break;
+
+ default:
+ STRESS_LOG1(LF_ZAP, LL_WARNING, "Unknown READYTORUN_HELPER %d\n", helperNum);
+ _ASSERTE(!"Unknown READYTORUN_HELPER");
+ return FALSE;
+ }
+ }
+ }
+ break;
+
+ case ENCODE_FIELD_OFFSET:
+ {
+ FieldDesc * pFD = ZapSig::DecodeField(currentModule, pInfoModule, pBlob);
+ _ASSERTE(!pFD->IsStatic());
+ _ASSERTE(!pFD->IsFieldOfValueType());
+
+ DWORD dwOffset = (DWORD)sizeof(Object) + pFD->GetOffset();
+
+ if (dwOffset > MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT)
+ return FALSE;
+ result = dwOffset;
+ }
+ break;
+
+ case ENCODE_FIELD_BASE_OFFSET:
+ {
+ TypeHandle th = ZapSig::DecodeType(currentModule, pInfoModule, pBlob);
+
+ MethodTable * pMT = th.AsMethodTable();
+ _ASSERTE(!pMT->IsValueType());
+
+ DWORD dwOffsetBase = ReadyToRunInfo::GetFieldBaseOffset(pMT);
+ if (dwOffsetBase > MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT)
+ return FALSE;
+ result = dwOffsetBase;
+ }
+ break;
+
+ case ENCODE_CHECK_TYPE_LAYOUT:
+ {
+ TypeHandle th = ZapSig::DecodeType(currentModule, pInfoModule, pBlob);
+ MethodTable * pMT = th.AsMethodTable();
+ _ASSERTE(pMT->IsValueType());
+
+ if (!TypeLayoutCheck(pMT, pBlob))
+ return FALSE;
+
+ result = 1;
+ }
+ break;
+
+ case ENCODE_CHECK_FIELD_OFFSET:
+ {
+ DWORD dwExpectedOffset = CorSigUncompressData(pBlob);
+
+ FieldDesc * pFD = ZapSig::DecodeField(currentModule, pInfoModule, pBlob);
+ _ASSERTE(!pFD->IsStatic());
+
+ DWORD dwOffset = pFD->GetOffset();
+ if (!pFD->IsFieldOfValueType())
+ dwOffset += sizeof(Object);
+
+ if (dwExpectedOffset != dwOffset)
+ return FALSE;
+
+ result = 1;
+ }
+ break;
+#endif // FEATURE_READYTORUN
+
+#endif // CROSSGEN_COMPILE
+
+ default:
+ STRESS_LOG1(LF_ZAP, LL_WARNING, "Unknown FIXUP_BLOB_KIND %d\n", kind);
+ _ASSERTE(!"Unknown FIXUP_BLOB_KIND");
+ return FALSE;
+ }
+
+ MemoryBarrier();
+ *EnsureWritablePages(entry) = result;
+
+ return TRUE;
+}
+#endif // FEATURE_PREJIT
+
+void* CEEInfo::getTailCallCopyArgsThunk(CORINFO_SIG_INFO *pSig,
+ CorInfoHelperTailCallSpecialHandling flags)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ void * ftn = NULL;
+
+#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM_)
+
+ JIT_TO_EE_TRANSITION();
+
+ Stub* pStub = CPUSTUBLINKER::CreateTailCallCopyArgsThunk(pSig, flags);
+
+ ftn = (void*)pStub->GetEntryPoint();
+
+ EE_TO_JIT_TRANSITION();
+
+#endif // _TARGET_AMD64_ || _TARGET_ARM_
+
+ return ftn;
+}
+
+void CEEInfo::allocMem (
+ ULONG hotCodeSize, /* IN */
+ ULONG coldCodeSize, /* IN */
+ ULONG roDataSize, /* IN */
+ ULONG xcptnsCount, /* IN */
+ CorJitAllocMemFlag flag, /* IN */
+ void ** hotCodeBlock, /* OUT */
+ void ** coldCodeBlock, /* OUT */
+ void ** roDataBlock /* OUT */
+ )
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE(); // only called on derived class.
+}
+
+void CEEInfo::reserveUnwindInfo (
+ BOOL isFunclet, /* IN */
+ BOOL isColdCode, /* IN */
+ ULONG unwindSize /* IN */
+ )
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE(); // only called on derived class.
+}
+
+void CEEInfo::allocUnwindInfo (
+ BYTE * pHotCode, /* IN */
+ BYTE * pColdCode, /* IN */
+ ULONG startOffset, /* IN */
+ ULONG endOffset, /* IN */
+ ULONG unwindSize, /* IN */
+ BYTE * pUnwindBlock, /* IN */
+ CorJitFuncKind funcKind /* IN */
+ )
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE(); // only called on derived class.
+}
+
+void * CEEInfo::allocGCInfo (
+ size_t size /* IN */
+ )
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE_RET(); // only called on derived class.
+}
+
+void CEEInfo::setEHcount (
+ unsigned cEH /* IN */
+ )
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE(); // only called on derived class.
+}
+
+void CEEInfo::setEHinfo (
+ unsigned EHnumber, /* IN */
+ const CORINFO_EH_CLAUSE *clause /* IN */
+ )
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE(); // only called on derived class.
+}
+
+InfoAccessType CEEInfo::constructStringLiteral(CORINFO_MODULE_HANDLE scopeHnd,
+ mdToken metaTok,
+ void **ppValue)
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE(); // only called on derived class.
+}
+
+InfoAccessType CEEInfo::emptyStringLiteral(void ** ppValue)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(isVerifyOnly());
+ *ppValue = (void *)0x10;
+ return IAT_PVALUE;
+}
+
+void* CEEInfo::getFieldAddress(CORINFO_FIELD_HANDLE fieldHnd,
+ void **ppIndirection)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(isVerifyOnly());
+ if (ppIndirection != NULL)
+ *ppIndirection = NULL;
+ return (void *)0x10;
+}
+
+void* CEEInfo::getMethodSync(CORINFO_METHOD_HANDLE ftnHnd,
+ void **ppIndirection)
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE(); // only called on derived class.
+}
+
+HRESULT CEEInfo::allocBBProfileBuffer (
+ ULONG count, // The number of basic blocks that we have
+ ProfileBuffer ** profileBuffer
+ )
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE_RET(); // only called on derived class.
+}
+
+HRESULT CEEInfo::getBBProfileData(
+ CORINFO_METHOD_HANDLE ftnHnd,
+ ULONG * count, // The number of basic blocks that we have
+ ProfileBuffer ** profileBuffer,
+ ULONG * numRuns
+ )
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE_RET(); // only called on derived class.
+}
+
+
+void CEEInfo::recordCallSite(
+ ULONG instrOffset, /* IN */
+ CORINFO_SIG_INFO * callSig, /* IN */
+ CORINFO_METHOD_HANDLE methodHandle /* IN */
+ )
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE(); // only called on derived class.
+}
+
+void CEEInfo::recordRelocation(
+ void * location, /* IN */
+ void * target, /* IN */
+ WORD fRelocType, /* IN */
+ WORD slotNum, /* IN */
+ INT32 addlDelta /* IN */
+ )
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE(); // only called on derived class.
+}
+
+WORD CEEInfo::getRelocTypeHint(void * target)
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE_RET(); // only called on derived class.
+}
+
+void CEEInfo::getModuleNativeEntryPointRange(
+ void ** pStart, /* OUT */
+ void ** pEnd /* OUT */
+ )
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE(); // only called on derived class.
+}
+
+DWORD CEEInfo::getExpectedTargetArchitecture()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return IMAGE_FILE_MACHINE_NATIVE;
+}
+
+void CEEInfo::setBoundaries(CORINFO_METHOD_HANDLE ftn, ULONG32 cMap,
+ ICorDebugInfo::OffsetMapping *pMap)
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE(); // only called on derived class.
+}
+
+void CEEInfo::setVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars, ICorDebugInfo::NativeVarInfo *vars)
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE(); // only called on derived class.
+}
+
+void* CEEInfo::getHelperFtn(CorInfoHelpFunc ftnNum, /* IN */
+ void ** ppIndirection) /* OUT */
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE(); // only called on derived class.
+}
+
+// Active dependency helpers
+void CEEInfo::addActiveDependency(CORINFO_MODULE_HANDLE moduleFrom,CORINFO_MODULE_HANDLE moduleTo)
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE(); // only called on derived class.
+}
+
+void CEEInfo::GetProfilingHandle(BOOL *pbHookFunction,
+ void **pProfilerHandle,
+ BOOL *pbIndirectedHandles)
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE(); // only called on derived class.
+}
+
+#endif // !DACCESS_COMPILE
+
+EECodeInfo::EECodeInfo()
+{
+ WRAPPER_NO_CONTRACT;
+
+ m_codeAddress = NULL;
+
+ m_pJM = NULL;
+ m_pMD = NULL;
+ m_relOffset = 0;
+
+#ifdef WIN64EXCEPTIONS
+ m_pFunctionEntry = NULL;
+#endif
+}
+
+void EECodeInfo::Init(PCODE codeAddress)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ Init(codeAddress, ExecutionManager::GetScanFlags());
+}
+
+void EECodeInfo::Init(PCODE codeAddress, ExecutionManager::ScanFlag scanFlag)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ m_codeAddress = codeAddress;
+
+ RangeSection * pRS = ExecutionManager::FindCodeRange(codeAddress, scanFlag);
+ if (pRS == NULL)
+ goto Invalid;
+
+ if (!pRS->pjit->JitCodeToMethodInfo(pRS, codeAddress, &m_pMD, this))
+ goto Invalid;
+
+ m_pJM = pRS->pjit;
+ return;
+
+Invalid:
+ m_pJM = NULL;
+ m_pMD = NULL;
+ m_relOffset = 0;
+
+#ifdef WIN64EXCEPTIONS
+ m_pFunctionEntry = NULL;
+#endif
+}
+
+TADDR EECodeInfo::GetSavedMethodCode()
+{
+ CONTRACTL {
+ // All EECodeInfo methods must be NOTHROW/GC_NOTRIGGER since they can
+ // be used during GC.
+ NOTHROW;
+ GC_NOTRIGGER;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+#ifndef _WIN64
+#if defined(HAVE_GCCOVER)
+ _ASSERTE (!m_pMD->m_GcCover || GCStress<cfg_instr>::IsEnabled());
+ if (GCStress<cfg_instr>::IsEnabled()
+ && m_pMD->m_GcCover)
+ {
+ _ASSERTE(m_pMD->m_GcCover->savedCode);
+
+ // Make sure we return the TADDR of savedCode here. The byte array is not marshaled automatically.
+ // The caller is responsible for any necessary marshaling.
+ return PTR_TO_MEMBER_TADDR(GCCoverageInfo, m_pMD->m_GcCover, savedCode);
+ }
+#endif //defined(HAVE_GCCOVER)
+#endif
+
+ return GetStartAddress();
+}
+
+TADDR EECodeInfo::GetStartAddress()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ return m_pJM->JitTokenToStartAddress(m_methodToken);
+}
+
+#if defined(WIN64EXCEPTIONS)
+
+// ----------------------------------------------------------------------------
+// EECodeInfo::GetMainFunctionInfo
+//
+// Description:
+// Simple helper to transform a funclet's EECodeInfo into a parent function EECodeInfo.
+//
+// Return Value:
+// An EECodeInfo for the start of the main function body (offset 0).
+//
+
+EECodeInfo EECodeInfo::GetMainFunctionInfo()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ EECodeInfo result = *this;
+ result.m_relOffset = 0;
+ result.m_codeAddress = this->GetStartAddress();
+ result.m_pFunctionEntry = NULL;
+
+ return result;
+}
+
+PTR_RUNTIME_FUNCTION EECodeInfo::GetFunctionEntry()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ if (m_pFunctionEntry == NULL)
+ m_pFunctionEntry = m_pJM->LazyGetFunctionEntry(this);
+ return m_pFunctionEntry;
+}
+
+#if defined(_TARGET_AMD64_)
+
+BOOL EECodeInfo::HasFrameRegister()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ PTR_RUNTIME_FUNCTION pFuncEntry = GetFunctionEntry();
+ _ASSERTE(pFuncEntry != NULL);
+
+ BOOL fHasFrameRegister = FALSE;
+ PUNWIND_INFO pUnwindInfo = (PUNWIND_INFO)(GetModuleBase() + pFuncEntry->UnwindData);
+ if (pUnwindInfo->FrameRegister != 0)
+ {
+ fHasFrameRegister = TRUE;
+ _ASSERTE(pUnwindInfo->FrameRegister == kRBP);
+ }
+
+ return fHasFrameRegister;
+}
+#endif // defined(_TARGET_AMD64_)
+
+#endif // defined(WIN64EXCEPTIONS)
+
+
+#if defined(_TARGET_AMD64_)
+// ----------------------------------------------------------------------------
+// EECodeInfo::GetUnwindInfoHelper
+//
+// Description:
+// Simple helper to return a pointer to the UNWIND_INFO given the offset to the unwind info.
+// On DAC builds, this function will read the memory from the target process and create a host copy.
+//
+// Arguments:
+// * unwindInfoOffset - This is the offset to the unwind info, relative to the beginning of the code heap
+// for jitted code or to the module base for ngned code. this->GetModuleBase() will return the correct
+// module base.
+//
+// Return Value:
+// Return a pointer to the UNWIND_INFO. On DAC builds, this function will create a host copy of the
+// UNWIND_INFO and return a host pointer. It will correctly read all of the memory for the variable-sized
+// unwind info.
+//
+
+UNWIND_INFO * EECodeInfo::GetUnwindInfoHelper(ULONG unwindInfoOffset)
+{
+#if defined(DACCESS_COMPILE)
+ return DacGetUnwindInfo(static_cast<TADDR>(this->GetModuleBase() + unwindInfoOffset));
+#else // !DACCESS_COMPILE
+ return reinterpret_cast<UNWIND_INFO *>(this->GetModuleBase() + unwindInfoOffset);
+#endif // !DACCESS_COMPILE
+}
+
+// ----------------------------------------------------------------------------
+// EECodeInfo::GetFixedStackSize
+//
+// Description:
+// Return the fixed stack size of a specified managed method. This function DOES NOT take current control
+// PC into account. So the fixed stack size returned by this function is not valid in the prolog or
+// the epilog.
+//
+// Return Value:
+// Return the fixed stack size.
+//
+// Notes:
+// * For method with dynamic stack allocations, this function will return the fixed stack size on X64 (the
+// stack size immediately after the prolog), and it will return 0 on IA64. This difference is due to
+// the different unwind info encoding.
+//
+
+ULONG EECodeInfo::GetFixedStackSize()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ ULONG uFixedStackSize = 0;
+
+ ULONG uDummy = 0;
+ GetOffsetsFromUnwindInfo(&uFixedStackSize, &uDummy);
+
+ return uFixedStackSize;
+}
+
+#define kRBP 5
+// The information returned by this method is only valid if we are not in a prolog or an epilog.
+// Since this method is only used for the security stackwalk cache, this assumption is valid, since
+// we cannot make a call in a prolog or an epilog.
+//
+// The next assumption is that only rbp is used as a frame register in jitted code. There is an
+// assert below to guard this assumption.
+void EECodeInfo::GetOffsetsFromUnwindInfo(ULONG* pRSPOffset, ULONG* pRBPOffset)
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE((pRSPOffset != NULL) && (pRBPOffset != NULL));
+
+ // moduleBase is a target address.
+ TADDR moduleBase = GetModuleBase();
+
+ DWORD unwindInfo = RUNTIME_FUNCTION__GetUnwindInfoAddress(GetFunctionEntry());
+
+ if ((unwindInfo & RUNTIME_FUNCTION_INDIRECT) != 0)
+ {
+ unwindInfo = RUNTIME_FUNCTION__GetUnwindInfoAddress(PTR_RUNTIME_FUNCTION(moduleBase + (unwindInfo & ~RUNTIME_FUNCTION_INDIRECT)));
+ }
+
+ UNWIND_INFO * pInfo = GetUnwindInfoHelper(unwindInfo);
+ if (pInfo->Flags & UNW_FLAG_CHAININFO)
+ {
+ _ASSERTE(!"GetRbpOffset() - chained unwind info used, violating assumptions of the security stackwalk cache");
+ DebugBreak();
+ }
+
+ // Either we are not using a frame pointer, or we are using rbp as the frame pointer.
+ if ( (pInfo->FrameRegister != 0) && (pInfo->FrameRegister != kRBP) )
+ {
+ _ASSERTE(!"GetRbpOffset() - non-RBP frame pointer used, violating assumptions of the security stackwalk cache");
+ DebugBreak();
+ }
+
+ // Walk the unwind info.
+ ULONG StackOffset = 0;
+ ULONG StackSize = 0;
+ for (int i = 0; i < pInfo->CountOfUnwindCodes; i++)
+ {
+ ULONG UnwindOp = pInfo->UnwindCode[i].UnwindOp;
+ ULONG OpInfo = pInfo->UnwindCode[i].OpInfo;
+
+ if (UnwindOp == UWOP_SAVE_NONVOL)
+ {
+ if (OpInfo == kRBP)
+ {
+ StackOffset = pInfo->UnwindCode[i+1].FrameOffset * 8;
+ }
+ }
+ else if (UnwindOp == UWOP_SAVE_NONVOL_FAR)
+ {
+ if (OpInfo == kRBP)
+ {
+ StackOffset = pInfo->UnwindCode[i + 1].FrameOffset;
+ StackOffset += (pInfo->UnwindCode[i + 2].FrameOffset << 16);
+ }
+ }
+ else if (UnwindOp == UWOP_ALLOC_SMALL)
+ {
+ StackSize += (OpInfo * 8) + 8;
+ }
+ else if (UnwindOp == UWOP_ALLOC_LARGE)
+ {
+ ULONG IncrementalStackSize = pInfo->UnwindCode[i + 1].FrameOffset;
+ if (OpInfo == 0)
+ {
+ IncrementalStackSize *= 8;
+ }
+ else
+ {
+ IncrementalStackSize += (pInfo->UnwindCode[i + 2].FrameOffset << 16);
+
+ // This is a special opcode. We need to increment the index by 1 in addition to the normal adjustments.
+ i += 1;
+ }
+ StackSize += IncrementalStackSize;
+ }
+ else if (UnwindOp == UWOP_PUSH_NONVOL)
+ {
+ // Because of constraints on epilogs, this unwind opcode is always last in the unwind code array.
+ // This means that StackSize has been initialized already when we first see this unwind opcode.
+ // Note that the intial value of StackSize does not include the stack space used for pushes.
+ // Thus, here we only need to increment StackSize 8 bytes at a time until we see the unwind code for "push rbp".
+ if (OpInfo == kRBP)
+ {
+ StackOffset = StackSize;
+ }
+
+ StackSize += 8;
+ }
+
+ // Adjust the index into the unwind code array.
+ i += UnwindOpExtraSlotTable[UnwindOp];
+ }
+
+ *pRSPOffset = StackSize + 8; // add 8 for the return address
+ *pRBPOffset = StackOffset;
+}
+#undef kRBP
+
+
+#if defined(_DEBUG) && defined(HAVE_GCCOVER)
+
+LPVOID EECodeInfo::findNextFunclet (LPVOID pvFuncletStart, SIZE_T cbCode, LPVOID *ppvFuncletEnd)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ while (cbCode > 0)
+ {
+ //
+ // This is GCStress debug only - use the slow OS APIs to enumerate funclets
+ //
+
+ ULONGLONG uImageBase;
+ PRUNTIME_FUNCTION pFunctionEntry = (PRUNTIME_FUNCTION) RtlLookupFunctionEntry((ULONGLONG)pvFuncletStart,
+ &uImageBase
+ AMD64_ARG(NULL)
+ );
+
+ if (pFunctionEntry != NULL)
+ {
+#ifdef FEATURE_PREJIT
+ // workaround: Check for indirect entry that is generated for cold part of main method body.
+ if ((TADDR)pvFuncletStart < (TADDR)uImageBase + pFunctionEntry->BeginAddress ||
+ (TADDR)uImageBase + pFunctionEntry->EndAddress <= (TADDR)pvFuncletStart)
+ {
+ Module * pZapModule = ExecutionManager::FindZapModule((TADDR)pvFuncletStart);
+ NGenLayoutInfo * pLayoutInfo = pZapModule->GetNGenLayoutInfo();
+
+ int ColdFunctionIndex = NativeUnwindInfoLookupTable::LookupUnwindInfoForMethod((DWORD)((TADDR)pvFuncletStart - uImageBase),
+ pLayoutInfo->m_pRuntimeFunctions[2],
+ 0, pLayoutInfo->m_nRuntimeFunctions[2] - 1);
+
+ pFunctionEntry = pLayoutInfo->m_pRuntimeFunctions[2] + ColdFunctionIndex;
+ }
+#endif
+
+ _ASSERTE((TADDR)pvFuncletStart == (TADDR)uImageBase + pFunctionEntry->BeginAddress);
+ _ASSERTE((TADDR)uImageBase + pFunctionEntry->EndAddress <= (TADDR)pvFuncletStart + cbCode);
+ *ppvFuncletEnd = (LPVOID)(uImageBase + pFunctionEntry->EndAddress);
+ return (LPVOID)(uImageBase + pFunctionEntry->BeginAddress);
+ }
+
+ pvFuncletStart = (LPVOID)((TADDR)pvFuncletStart + 1);
+ cbCode--;
+ }
+
+ return NULL;
+}
+#endif // defined(_DEBUG) && !defined(HAVE_GCCOVER)
+#endif // defined(_TARGET_AMD64_)
+
diff --git a/src/vm/jitinterface.h b/src/vm/jitinterface.h
new file mode 100644
index 0000000000..b576f2ef26
--- /dev/null
+++ b/src/vm/jitinterface.h
@@ -0,0 +1,1654 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: JITinterface.H
+//
+
+// ===========================================================================
+
+
+#ifndef JITINTERFACE_H
+#define JITINTERFACE_H
+
+#include "corjit.h"
+#ifdef FEATURE_PREJIT
+#include "corcompile.h"
+#endif // FEATURE_PREJIT
+
+class Stub;
+class MethodDesc;
+class FieldDesc;
+enum RuntimeExceptionKind;
+class AwareLock;
+class PtrArray;
+
+#include "genericdict.h"
+
+inline FieldDesc* GetField(CORINFO_FIELD_HANDLE fieldHandle)
+{
+ LIMITED_METHOD_CONTRACT;
+ return (FieldDesc*) fieldHandle;
+}
+
+inline
+bool SigInfoFlagsAreValid (CORINFO_SIG_INFO *sig)
+{
+ LIMITED_METHOD_CONTRACT;
+ return !(sig->flags & ~( CORINFO_SIGFLAG_IS_LOCAL_SIG
+ | CORINFO_SIGFLAG_IL_STUB
+ ));
+}
+
+
+void InitJITHelpers1();
+void InitJITHelpers2();
+
+PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* header,
+ DWORD flags, DWORD flags2);
+
+void getMethodInfoHelper(MethodDesc * ftn,
+ CORINFO_METHOD_HANDLE ftnHnd,
+ COR_ILMETHOD_DECODER * header,
+ CORINFO_METHOD_INFO * methInfo);
+
+void getMethodInfoILMethodHeaderHelper(
+ COR_ILMETHOD_DECODER* header,
+ CORINFO_METHOD_INFO* methInfo
+ );
+
+
+#ifdef FEATURE_PREJIT
+BOOL LoadDynamicInfoEntry(Module *currentModule,
+ RVA fixupRva,
+ SIZE_T *entry);
+#endif // FEATURE_PREJIT
+
+//
+// The legacy x86 monitor helpers do not need a state argument
+//
+#if !defined(_TARGET_X86_)
+
+#define FCDECL_MONHELPER(funcname, arg) FCDECL2(void, funcname, arg, BYTE* pbLockTaken)
+#define HCIMPL_MONHELPER(funcname, arg) HCIMPL2(void, funcname, arg, BYTE* pbLockTaken)
+#define MONHELPER_STATE(x) x
+#define MONHELPER_ARG pbLockTaken
+
+#else
+
+#define FCDECL_MONHELPER(funcname, arg) FCDECL1(void, funcname, arg)
+#define HCIMPL_MONHELPER(funcname, arg) HCIMPL1(void, funcname, arg)
+#define MONHELPER_STATE(x)
+#define MONHELPER_ARG NULL
+
+#endif // _TARGET_X86_
+
+
+//
+// JIT HELPER ALIASING FOR PORTABILITY.
+//
+// The portable helper is used if the platform does not provide optimized implementation.
+//
+
+#ifndef JIT_MonEnter
+#define JIT_MonEnter JIT_MonEnter_Portable
+#endif
+EXTERN_C FCDECL1(void, JIT_MonEnter, Object *obj);
+EXTERN_C FCDECL1(void, JIT_MonEnter_Portable, Object *obj);
+
+#ifndef JIT_MonEnterWorker
+#define JIT_MonEnterWorker JIT_MonEnterWorker_Portable
+#endif
+EXTERN_C FCDECL_MONHELPER(JIT_MonEnterWorker, Object *obj);
+EXTERN_C FCDECL_MONHELPER(JIT_MonEnterWorker_Portable, Object *obj);
+
+#ifndef JIT_MonReliableEnter
+#define JIT_MonReliableEnter JIT_MonReliableEnter_Portable
+#endif
+EXTERN_C FCDECL2(void, JIT_MonReliableEnter, Object* obj, BYTE *tookLock);
+EXTERN_C FCDECL2(void, JIT_MonReliableEnter_Portable, Object* obj, BYTE *tookLock);
+
+#ifndef JIT_MonTryEnter
+#define JIT_MonTryEnter JIT_MonTryEnter_Portable
+#endif
+EXTERN_C FCDECL3(void, JIT_MonTryEnter, Object *obj, INT32 timeout, BYTE* pbLockTaken);
+EXTERN_C FCDECL3(void, JIT_MonTryEnter_Portable, Object *obj, INT32 timeout, BYTE* pbLockTaken);
+
+#ifndef JIT_MonExit
+#define JIT_MonExit JIT_MonExit_Portable
+#endif
+EXTERN_C FCDECL1(void, JIT_MonExit, Object *obj);
+EXTERN_C FCDECL1(void, JIT_MonExit_Portable, Object *obj);
+
+#ifndef JIT_MonExitWorker
+#define JIT_MonExitWorker JIT_MonExitWorker_Portable
+#endif
+EXTERN_C FCDECL_MONHELPER(JIT_MonExitWorker, Object *obj);
+EXTERN_C FCDECL_MONHELPER(JIT_MonExitWorker_Portable, Object *obj);
+
+#ifndef JIT_MonEnterStatic
+#define JIT_MonEnterStatic JIT_MonEnterStatic_Portable
+#endif
+EXTERN_C FCDECL_MONHELPER(JIT_MonEnterStatic, AwareLock *lock);
+EXTERN_C FCDECL_MONHELPER(JIT_MonEnterStatic_Portable, AwareLock *lock);
+
+#ifndef JIT_MonExitStatic
+#define JIT_MonExitStatic JIT_MonExitStatic_Portable
+#endif
+EXTERN_C FCDECL_MONHELPER(JIT_MonExitStatic, AwareLock *lock);
+EXTERN_C FCDECL_MONHELPER(JIT_MonExitStatic_Portable, AwareLock *lock);
+
+
+#ifndef JIT_GetSharedGCStaticBase
+#define JIT_GetSharedGCStaticBase JIT_GetSharedGCStaticBase_Portable
+#endif
+EXTERN_C FCDECL2(void*, JIT_GetSharedGCStaticBase, SIZE_T moduleDomainID, DWORD dwModuleClassID);
+EXTERN_C FCDECL2(void*, JIT_GetSharedGCStaticBase_Portable, SIZE_T moduleDomainID, DWORD dwModuleClassID);
+
+#ifndef JIT_GetSharedNonGCStaticBase
+#define JIT_GetSharedNonGCStaticBase JIT_GetSharedNonGCStaticBase_Portable
+#endif
+EXTERN_C FCDECL2(void*, JIT_GetSharedNonGCStaticBase, SIZE_T moduleDomainID, DWORD dwModuleClassID);
+EXTERN_C FCDECL2(void*, JIT_GetSharedNonGCStaticBase_Portable, SIZE_T moduleDomainID, DWORD dwModuleClassID);
+
+#ifndef JIT_GetSharedGCStaticBaseNoCtor
+#define JIT_GetSharedGCStaticBaseNoCtor JIT_GetSharedGCStaticBaseNoCtor_Portable
+#endif
+EXTERN_C FCDECL1(void*, JIT_GetSharedGCStaticBaseNoCtor, SIZE_T moduleDomainID);
+EXTERN_C FCDECL1(void*, JIT_GetSharedGCStaticBaseNoCtor_Portable, SIZE_T moduleDomainID);
+
+#ifndef JIT_GetSharedNonGCStaticBaseNoCtor
+#define JIT_GetSharedNonGCStaticBaseNoCtor JIT_GetSharedNonGCStaticBaseNoCtor_Portable
+#endif
+EXTERN_C FCDECL1(void*, JIT_GetSharedNonGCStaticBaseNoCtor, SIZE_T moduleDomainID);
+EXTERN_C FCDECL1(void*, JIT_GetSharedNonGCStaticBaseNoCtor_Portable, SIZE_T moduleDomainID);
+
+#ifndef JIT_ChkCastClass
+#define JIT_ChkCastClass JIT_ChkCastClass_Portable
+#endif
+EXTERN_C FCDECL2(Object*, JIT_ChkCastClass, MethodTable* pMT, Object* pObject);
+EXTERN_C FCDECL2(Object*, JIT_ChkCastClass_Portable, MethodTable* pMT, Object* pObject);
+
+#ifndef JIT_ChkCastClassSpecial
+#define JIT_ChkCastClassSpecial JIT_ChkCastClassSpecial_Portable
+#endif
+EXTERN_C FCDECL2(Object*, JIT_ChkCastClassSpecial, MethodTable* pMT, Object* pObject);
+EXTERN_C FCDECL2(Object*, JIT_ChkCastClassSpecial_Portable, MethodTable* pMT, Object* pObject);
+
+#ifndef JIT_IsInstanceOfClass
+#define JIT_IsInstanceOfClass JIT_IsInstanceOfClass_Portable
+#endif
+EXTERN_C FCDECL2(Object*, JIT_IsInstanceOfClass, MethodTable* pMT, Object* pObject);
+EXTERN_C FCDECL2(Object*, JIT_IsInstanceOfClass_Portable, MethodTable* pMT, Object* pObject);
+
+#ifndef JIT_ChkCastInterface
+#define JIT_ChkCastInterface JIT_ChkCastInterface_Portable
+#endif
+EXTERN_C FCDECL2(Object*, JIT_ChkCastInterface, MethodTable* pMT, Object* pObject);
+EXTERN_C FCDECL2(Object*, JIT_ChkCastInterface_Portable, MethodTable* pMT, Object* pObject);
+
+#ifndef JIT_IsInstanceOfInterface
+#define JIT_IsInstanceOfInterface JIT_IsInstanceOfInterface_Portable
+#endif
+EXTERN_C FCDECL2(Object*, JIT_IsInstanceOfInterface, MethodTable* pMT, Object* pObject);
+EXTERN_C FCDECL2(Object*, JIT_IsInstanceOfInterface_Portable, MethodTable* pMT, Object* pObject);
+
+#ifndef JIT_NewCrossContext
+#define JIT_NewCrossContext JIT_NewCrossContext_Portable
+#endif
+EXTERN_C FCDECL1(Object*, JIT_NewCrossContext, CORINFO_CLASS_HANDLE typeHnd_);
+EXTERN_C FCDECL1(Object*, JIT_NewCrossContext_Portable, CORINFO_CLASS_HANDLE typeHnd_);
+
+#ifndef JIT_Stelem_Ref
+#define JIT_Stelem_Ref JIT_Stelem_Ref_Portable
+#endif
+EXTERN_C FCDECL3(void, JIT_Stelem_Ref, PtrArray* array, unsigned idx, Object* val);
+EXTERN_C FCDECL3(void, JIT_Stelem_Ref_Portable, PtrArray* array, unsigned idx, Object* val);
+
+EXTERN_C FCDECL_MONHELPER(JITutil_MonEnterWorker, Object* obj);
+EXTERN_C FCDECL2(void, JITutil_MonReliableEnter, Object* obj, BYTE* pbLockTaken);
+EXTERN_C FCDECL3(void, JITutil_MonTryEnter, Object* obj, INT32 timeOut, BYTE* pbLockTaken);
+EXTERN_C FCDECL_MONHELPER(JITutil_MonExitWorker, Object* obj);
+EXTERN_C FCDECL_MONHELPER(JITutil_MonSignal, AwareLock* lock);
+EXTERN_C FCDECL_MONHELPER(JITutil_MonContention, AwareLock* awarelock);
+EXTERN_C FCDECL2(void, JITutil_MonReliableContention, AwareLock* awarelock, BYTE* pbLockTaken);
+
+// Slow versions to tail call if the fast version fails
+EXTERN_C FCDECL2(void*, JIT_GetSharedNonGCStaticBase_Helper, DomainLocalModule *pLocalModule, DWORD dwClassDomainID);
+EXTERN_C FCDECL2(void*, JIT_GetSharedGCStaticBase_Helper, DomainLocalModule *pLocalModule, DWORD dwClassDomainID);
+
+EXTERN_C void DoJITFailFast ();
+EXTERN_C FCDECL0(void, JIT_FailFast);
+extern FCDECL3(void, JIT_ThrowAccessException, RuntimeExceptionKind, CORINFO_METHOD_HANDLE caller, void * callee);
+
+FCDECL1(void*, JIT_SafeReturnableByref, void* byref);
+
+#if !defined(FEATURE_USE_ASM_GC_WRITE_BARRIERS) && defined(FEATURE_COUNT_GC_WRITE_BARRIERS)
+// Extra argument for the classification of the checked barriers.
+extern "C" FCDECL3(VOID, JIT_CheckedWriteBarrier, Object **dst, Object *ref, CheckedWriteBarrierKinds kind);
+#else
+// Regular checked write barrier.
+extern "C" FCDECL2(VOID, JIT_CheckedWriteBarrier, Object **dst, Object *ref);
+#endif
+
+extern "C" FCDECL2(VOID, JIT_WriteBarrier, Object **dst, Object *ref);
+
+extern "C" FCDECL2(VOID, JIT_WriteBarrierEnsureNonHeapTarget, Object **dst, Object *ref);
+
+extern "C" FCDECL2(Object*, JIT_ChkCastAny, CORINFO_CLASS_HANDLE type, Object *pObject); // JITInterfaceX86.cpp, etc.
+extern "C" FCDECL2(Object*, JIT_IsInstanceOfAny, CORINFO_CLASS_HANDLE type, Object *pObject);
+
+extern "C" FCDECL2(Object*, JITutil_ChkCastInterface, MethodTable *pInterfaceMT, Object *obj);
+extern "C" FCDECL2(Object*, JITutil_IsInstanceOfInterface, MethodTable *pInterfaceMT, Object *obj);
+extern "C" FCDECL2(Object*, JITutil_ChkCastAny, CORINFO_CLASS_HANDLE type, Object *obj);
+extern "C" FCDECL2(Object*, JITutil_IsInstanceOfAny, CORINFO_CLASS_HANDLE type, Object *obj);
+
+extern "C" FCDECL1(void, JIT_InternalThrow, unsigned exceptNum);
+extern "C" FCDECL1(void*, JIT_InternalThrowFromHelper, unsigned exceptNum);
+
+#ifdef _TARGET_AMD64_
+
+
+class WriteBarrierManager
+{
+public:
+ enum WriteBarrierType
+ {
+ WRITE_BARRIER_UNINITIALIZED = 0,
+ WRITE_BARRIER_PREGROW32 = 1,
+ WRITE_BARRIER_PREGROW64 = 2,
+ WRITE_BARRIER_POSTGROW32 = 3,
+ WRITE_BARRIER_POSTGROW64 = 4,
+ WRITE_BARRIER_SVR32 = 5,
+ WRITE_BARRIER_SVR64 = 6,
+ WRITE_BARRIER_BUFFER = 7,
+ };
+
+ WriteBarrierManager();
+ void Initialize();
+
+ void UpdateEphemeralBounds();
+ void UpdateCardTableLocation(BOOL bReqUpperBoundsCheck);
+
+protected:
+ size_t GetCurrentWriteBarrierSize();
+ size_t GetSpecificWriteBarrierSize(WriteBarrierType writeBarrier);
+ PBYTE CalculatePatchLocation(LPVOID base, LPVOID label, int offset);
+ PCODE GetCurrentWriteBarrierCode();
+ void ChangeWriteBarrierTo(WriteBarrierType newWriteBarrier);
+ bool NeedDifferentWriteBarrier(BOOL bReqUpperBoundsCheck, WriteBarrierType* pNewWriteBarrierType);
+
+private:
+ void Validate();
+
+ WriteBarrierType m_currentWriteBarrier;
+
+ PBYTE m_pLowerBoundImmediate; // PREGROW32 | PREGROW64 | POSTGROW32 | POSTGROW64 | |
+ PBYTE m_pCardTableImmediate; // PREGROW32 | PREGROW64 | POSTGROW32 | POSTGROW64 | SVR32 |
+ PBYTE m_pUpperBoundImmediate; // | | POSTGROW32 | POSTGROW64 | |
+ PBYTE m_pCardTableImmediate2; // PREGROW32 | | POSTGROW32 | | SVR32 |
+};
+
+#endif // _TARGET_AMD64_
+
+#ifdef _WIN64
+EXTERN_C FCDECL1(Object*, JIT_TrialAllocSFastMP_InlineGetThread, CORINFO_CLASS_HANDLE typeHnd_);
+EXTERN_C FCDECL2(Object*, JIT_BoxFastMP_InlineGetThread, CORINFO_CLASS_HANDLE type, void* data);
+EXTERN_C FCDECL2(Object*, JIT_NewArr1VC_MP_InlineGetThread, CORINFO_CLASS_HANDLE typeHnd_, INT_PTR size);
+EXTERN_C FCDECL2(Object*, JIT_NewArr1OBJ_MP_InlineGetThread, CORINFO_CLASS_HANDLE typeHnd_, INT_PTR size);
+
+#endif // _WIN64
+
+EXTERN_C FCDECL2_VV(INT64, JIT_LMul, INT64 val1, INT64 val2);
+
+EXTERN_C FCDECL1_V(INT64, JIT_Dbl2Lng, double val);
+EXTERN_C FCDECL1_V(INT64, JIT_Dbl2IntSSE2, double val);
+EXTERN_C FCDECL1_V(INT64, JIT_Dbl2LngP4x87, double val);
+EXTERN_C FCDECL1_V(INT64, JIT_Dbl2LngSSE3, double val);
+EXTERN_C FCDECL1_V(INT64, JIT_Dbl2LngOvf, double val);
+
+EXTERN_C FCDECL1_V(INT32, JIT_Dbl2IntOvf, double val);
+
+EXTERN_C FCDECL2_VV(float, JIT_FltRem, float dividend, float divisor);
+EXTERN_C FCDECL2_VV(double, JIT_DblRem, double dividend, double divisor);
+
+#if !defined(_WIN64) && !defined(_TARGET_X86_)
+EXTERN_C FCDECL2_VV(UINT64, JIT_LLsh, UINT64 num, int shift);
+EXTERN_C FCDECL2_VV(INT64, JIT_LRsh, INT64 num, int shift);
+EXTERN_C FCDECL2_VV(UINT64, JIT_LRsz, UINT64 num, int shift);
+#endif
+
+#ifdef _TARGET_X86_
+
+extern "C"
+{
+ void STDCALL JIT_LLsh(); // JIThelp.asm
+ void STDCALL JIT_LRsh(); // JIThelp.asm
+ void STDCALL JIT_LRsz(); // JIThelp.asm
+
+ void STDCALL JIT_CheckedWriteBarrierEAX(); // JIThelp.asm/JIThelp.s
+ void STDCALL JIT_CheckedWriteBarrierEBX(); // JIThelp.asm/JIThelp.s
+ void STDCALL JIT_CheckedWriteBarrierECX(); // JIThelp.asm/JIThelp.s
+ void STDCALL JIT_CheckedWriteBarrierESI(); // JIThelp.asm/JIThelp.s
+ void STDCALL JIT_CheckedWriteBarrierEDI(); // JIThelp.asm/JIThelp.s
+ void STDCALL JIT_CheckedWriteBarrierEBP(); // JIThelp.asm/JIThelp.s
+
+ void STDCALL JIT_DebugWriteBarrierEAX(); // JIThelp.asm/JIThelp.s
+ void STDCALL JIT_DebugWriteBarrierEBX(); // JIThelp.asm/JIThelp.s
+ void STDCALL JIT_DebugWriteBarrierECX(); // JIThelp.asm/JIThelp.s
+ void STDCALL JIT_DebugWriteBarrierESI(); // JIThelp.asm/JIThelp.s
+ void STDCALL JIT_DebugWriteBarrierEDI(); // JIThelp.asm/JIThelp.s
+ void STDCALL JIT_DebugWriteBarrierEBP(); // JIThelp.asm/JIThelp.s
+
+ void STDCALL JIT_WriteBarrierEAX(); // JIThelp.asm/JIThelp.s
+ void STDCALL JIT_WriteBarrierEBX(); // JIThelp.asm/JIThelp.s
+ void STDCALL JIT_WriteBarrierECX(); // JIThelp.asm/JIThelp.s
+ void STDCALL JIT_WriteBarrierESI(); // JIThelp.asm/JIThelp.s
+ void STDCALL JIT_WriteBarrierEDI(); // JIThelp.asm/JIThelp.s
+ void STDCALL JIT_WriteBarrierEBP(); // JIThelp.asm/JIThelp.s
+
+ void STDCALL JIT_WriteBarrierStart();
+ void STDCALL JIT_WriteBarrierLast();
+
+ void STDCALL JIT_PatchedWriteBarrierStart();
+ void STDCALL JIT_PatchedWriteBarrierLast();
+}
+
+void ValidateWriteBarrierHelpers();
+
+#endif //_TARGET_X86_
+
+extern "C"
+{
+ void STDCALL JIT_EndCatch(); // JIThelp.asm/JIThelp.s
+
+ void STDCALL JIT_ByRefWriteBarrier(); // JIThelp.asm/JIThelp.s
+
+#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM_)
+
+ FCDECL2VA(void, JIT_TailCall, PCODE copyArgs, PCODE target);
+
+#else // _TARGET_AMD64_ || _TARGET_ARM_
+
+ void STDCALL JIT_TailCall(); // JIThelp.asm
+
+#endif // _TARGET_AMD64_ || _TARGET_ARM_
+
+ void STDCALL JIT_MemSet(void *dest, int c, SIZE_T count);
+ void STDCALL JIT_MemCpy(void *dest, const void *src, SIZE_T count);
+
+ void STDCALL JIT_ProfilerEnterLeaveTailcallStub(UINT_PTR ProfilerHandle);
+};
+
+#ifndef FEATURE_CORECLR
+//
+// Obfluscators that are hacking into the JIT expect certain methods to exist in certain places of CEEInfo vtable. Add artifical slots
+// to the vtable to avoid breaking apps by .NET 4.5 in-place update.
+//
+
+class ICorMethodInfo_Hack
+{
+public:
+ virtual const char* __stdcall ICorMethodInfo_Hack_getMethodName (CORINFO_METHOD_HANDLE ftnHnd, const char** scopeName) = 0;
+};
+
+class ICorModuleInfo_Hack
+{
+public:
+ virtual void ICorModuleInfo_Hack_dummy() { WRAPPER_NO_CONTRACT; UNREACHABLE(); };
+};
+
+class ICorClassInfo_Hack
+{
+public:
+ virtual void ICorClassInfo_Hack_dummy1() { WRAPPER_NO_CONTRACT; UNREACHABLE(); };
+ virtual void ICorClassInfo_Hack_dummy2() { WRAPPER_NO_CONTRACT; UNREACHABLE(); };
+ virtual void ICorClassInfo_Hack_dummy3() { WRAPPER_NO_CONTRACT; UNREACHABLE(); };
+ virtual void ICorClassInfo_Hack_dummy4() { WRAPPER_NO_CONTRACT; UNREACHABLE(); };
+ virtual void ICorClassInfo_Hack_dummy5() { WRAPPER_NO_CONTRACT; UNREACHABLE(); };
+ virtual void ICorClassInfo_Hack_dummy6() { WRAPPER_NO_CONTRACT; UNREACHABLE(); };
+ virtual void ICorClassInfo_Hack_dummy7() { WRAPPER_NO_CONTRACT; UNREACHABLE(); };
+ virtual void ICorClassInfo_Hack_dummy8() { WRAPPER_NO_CONTRACT; UNREACHABLE(); };
+ virtual void ICorClassInfo_Hack_dummy9() { WRAPPER_NO_CONTRACT; UNREACHABLE(); };
+ virtual void ICorClassInfo_Hack_dummy10() { WRAPPER_NO_CONTRACT; UNREACHABLE(); };
+ virtual void ICorClassInfo_Hack_dummy11() { WRAPPER_NO_CONTRACT; UNREACHABLE(); };
+ virtual void ICorClassInfo_Hack_dummy12() { WRAPPER_NO_CONTRACT; UNREACHABLE(); };
+ virtual void ICorClassInfo_Hack_dummy13() { WRAPPER_NO_CONTRACT; UNREACHABLE(); };
+ virtual void ICorClassInfo_Hack_dummy14() { WRAPPER_NO_CONTRACT; UNREACHABLE(); };
+
+ virtual mdMethodDef __stdcall ICorClassInfo_Hack_getMethodDefFromMethod(CORINFO_METHOD_HANDLE hMethod) = 0;
+};
+
+class ICorStaticInfo_Hack : public virtual ICorMethodInfo_Hack, public virtual ICorModuleInfo_Hack, public virtual ICorClassInfo_Hack
+{
+ virtual void ICorStaticInfo_Hack_dummy() { WRAPPER_NO_CONTRACT; UNREACHABLE(); };
+};
+
+#endif // FEATURE_CORECLR
+
+
+/*********************************************************************/
+/*********************************************************************/
+class CEEInfo : public ICorJitInfo
+#ifndef FEATURE_CORECLR
+ , public virtual ICorStaticInfo_Hack
+#endif
+{
+ friend class CEEDynamicCodeInfo;
+
+ const char * __stdcall ICorMethodInfo_Hack_getMethodName(CORINFO_METHOD_HANDLE ftnHnd, const char** scopeName)
+ {
+ WRAPPER_NO_CONTRACT;
+ return getMethodName(ftnHnd, scopeName);
+ }
+
+ mdMethodDef __stdcall ICorClassInfo_Hack_getMethodDefFromMethod(CORINFO_METHOD_HANDLE hMethod)
+ {
+ WRAPPER_NO_CONTRACT;
+ return getMethodDefFromMethod(hMethod);
+ }
+
+public:
+ // ICorClassInfo stuff
+ CorInfoType asCorInfoType (CORINFO_CLASS_HANDLE cls);
+ // This normalizes EE type information into the form expected by the JIT.
+ //
+ // If typeHnd contains exact type information, then *clsRet will contain
+ // the normalized CORINFO_CLASS_HANDLE information on return.
+ static CorInfoType asCorInfoType (CorElementType cet,
+ TypeHandle typeHnd = TypeHandle() /* optional in */,
+ CORINFO_CLASS_HANDLE *clsRet = NULL /* optional out */ );
+
+ CORINFO_MODULE_HANDLE getClassModule(CORINFO_CLASS_HANDLE clsHnd);
+ CORINFO_ASSEMBLY_HANDLE getModuleAssembly(CORINFO_MODULE_HANDLE mod);
+ const char* getAssemblyName(CORINFO_ASSEMBLY_HANDLE assem);
+ void* LongLifetimeMalloc(size_t sz);
+ void LongLifetimeFree(void* obj);
+ size_t getClassModuleIdForStatics(CORINFO_CLASS_HANDLE clsHnd, CORINFO_MODULE_HANDLE *pModuleHandle, void **ppIndirection);
+ const char* getClassName (CORINFO_CLASS_HANDLE cls);
+ const char* getHelperName(CorInfoHelpFunc ftnNum);
+ int appendClassName(__deref_inout_ecount(*pnBufLen) WCHAR** ppBuf,
+ int* pnBufLen,
+ CORINFO_CLASS_HANDLE cls,
+ BOOL fNamespace,
+ BOOL fFullInst,
+ BOOL fAssembly);
+ BOOL isValueClass (CORINFO_CLASS_HANDLE cls);
+ BOOL canInlineTypeCheckWithObjectVTable (CORINFO_CLASS_HANDLE cls);
+
+ DWORD getClassAttribs (CORINFO_CLASS_HANDLE cls);
+
+ // Internal version without JIT-EE transition
+ DWORD getClassAttribsInternal (CORINFO_CLASS_HANDLE cls);
+
+ BOOL isStructRequiringStackAllocRetBuf(CORINFO_CLASS_HANDLE cls);
+
+ unsigned getClassSize (CORINFO_CLASS_HANDLE cls);
+ unsigned getClassAlignmentRequirement(CORINFO_CLASS_HANDLE cls, BOOL fDoubleAlignHint);
+ static unsigned getClassAlignmentRequirementStatic(TypeHandle clsHnd);
+
+ // Used for HFA's on IA64...and later for type based disambiguation
+ CORINFO_FIELD_HANDLE getFieldInClass(CORINFO_CLASS_HANDLE clsHnd, INT num);
+
+ mdMethodDef getMethodDefFromMethod(CORINFO_METHOD_HANDLE hMethod);
+ BOOL checkMethodModifier(CORINFO_METHOD_HANDLE hMethod, LPCSTR modifier, BOOL fOptional);
+
+ unsigned getClassGClayout (CORINFO_CLASS_HANDLE cls, BYTE* gcPtrs); /* really GCType* gcPtrs */
+ unsigned getClassNumInstanceFields(CORINFO_CLASS_HANDLE cls);
+
+ // Check Visibility rules.
+ // For Protected (family access) members, type of the instance is also
+ // considered when checking visibility rules.
+
+
+ CorInfoHelpFunc getNewHelper(CORINFO_RESOLVED_TOKEN * pResolvedToken, CORINFO_METHOD_HANDLE callerHandle);
+ static CorInfoHelpFunc getNewHelperStatic(MethodTable * pMT);
+
+ CorInfoHelpFunc getNewArrHelper(CORINFO_CLASS_HANDLE arrayCls);
+ static CorInfoHelpFunc getNewArrHelperStatic(TypeHandle clsHnd);
+
+ CorInfoHelpFunc getCastingHelper(CORINFO_RESOLVED_TOKEN * pResolvedToken, bool fThrowing);
+ static CorInfoHelpFunc getCastingHelperStatic(TypeHandle clsHnd, bool fThrowing, bool * pfClassMustBeRestored);
+
+ CorInfoHelpFunc getSharedCCtorHelper(CORINFO_CLASS_HANDLE clsHnd);
+ CorInfoHelpFunc getSecurityPrologHelper(CORINFO_METHOD_HANDLE ftn);
+ CORINFO_CLASS_HANDLE getTypeForBox(CORINFO_CLASS_HANDLE cls);
+ CorInfoHelpFunc getBoxHelper(CORINFO_CLASS_HANDLE cls);
+ CorInfoHelpFunc getUnBoxHelper(CORINFO_CLASS_HANDLE cls);
+
+ void getReadyToRunHelper(
+ CORINFO_RESOLVED_TOKEN * pResolvedToken,
+ CorInfoHelpFunc id,
+ CORINFO_CONST_LOOKUP * pLookup
+ );
+
+ CorInfoInitClassResult initClass(
+ CORINFO_FIELD_HANDLE field,
+ CORINFO_METHOD_HANDLE method,
+ CORINFO_CONTEXT_HANDLE context,
+ BOOL speculative = FALSE);
+
+ void classMustBeLoadedBeforeCodeIsRun (CORINFO_CLASS_HANDLE cls);
+ void methodMustBeLoadedBeforeCodeIsRun (CORINFO_METHOD_HANDLE meth);
+ CORINFO_METHOD_HANDLE mapMethodDeclToMethodImpl(CORINFO_METHOD_HANDLE methHnd);
+ CORINFO_CLASS_HANDLE getBuiltinClass(CorInfoClassId classId);
+ void getGSCookie(GSCookie * pCookieVal, GSCookie ** ppCookieVal);
+
+#ifdef MDIL
+ unsigned getNumTypeParameters(CORINFO_METHOD_HANDLE method);
+
+ CorElementType getTypeOfTypeParameter(CORINFO_METHOD_HANDLE method, unsigned index);
+ CORINFO_CLASS_HANDLE getTypeParameter(CORINFO_METHOD_HANDLE method, bool classTypeParameter, unsigned index);
+ unsigned getStructTypeToken(InlineContext *inlineContext, CORINFO_ARG_LIST_HANDLE argList);
+ unsigned getEnclosingClassToken(InlineContext *inlineContext, CORINFO_METHOD_HANDLE method);
+ InlineContext * computeInlineContext(InlineContext *outerContext, unsigned inlinedMethodToken, unsigned constraintTypeToken, CORINFO_METHOD_HANDLE method);
+ unsigned translateToken(InlineContext *inlineContext, CORINFO_MODULE_HANDLE scopeHnd, unsigned token);
+ CorInfoType getFieldElementType(unsigned fieldToken, CORINFO_MODULE_HANDLE scope, CORINFO_METHOD_HANDLE methHnd);
+ unsigned getCurrentMethodToken(InlineContext *inlineContext, CORINFO_METHOD_HANDLE method);
+ unsigned getStubMethodFlags(CORINFO_METHOD_HANDLE method);
+#endif
+
+ // "System.Int32" ==> CORINFO_TYPE_INT..
+ CorInfoType getTypeForPrimitiveValueClass(
+ CORINFO_CLASS_HANDLE cls
+ );
+
+ // TRUE if child is a subtype of parent
+ // if parent is an interface, then does child implement / extend parent
+ BOOL canCast(
+ CORINFO_CLASS_HANDLE child,
+ CORINFO_CLASS_HANDLE parent
+ );
+
+ // TRUE if cls1 and cls2 are considered equivalent types.
+ BOOL areTypesEquivalent(
+ CORINFO_CLASS_HANDLE cls1,
+ CORINFO_CLASS_HANDLE cls2
+ );
+
+ // returns is the intersection of cls1 and cls2.
+ CORINFO_CLASS_HANDLE mergeClasses(
+ CORINFO_CLASS_HANDLE cls1,
+ CORINFO_CLASS_HANDLE cls2
+ );
+
+ // Given a class handle, returns the Parent type.
+ // For COMObjectType, it returns Class Handle of System.Object.
+ // Returns 0 if System.Object is passed in.
+ CORINFO_CLASS_HANDLE getParentType (
+ CORINFO_CLASS_HANDLE cls
+ );
+
+ // Returns the CorInfoType of the "child type". If the child type is
+ // not a primitive type, *clsRet will be set.
+ // Given an Array of Type Foo, returns Foo.
+ // Given BYREF Foo, returns Foo
+ CorInfoType getChildType (
+ CORINFO_CLASS_HANDLE clsHnd,
+ CORINFO_CLASS_HANDLE *clsRet
+ );
+
+ // Check constraints on type arguments of this class and parent classes
+ BOOL satisfiesClassConstraints(
+ CORINFO_CLASS_HANDLE cls
+ );
+
+ // Check if this is a single dimensional array type
+ BOOL isSDArray(
+ CORINFO_CLASS_HANDLE cls
+ );
+
+ // Get the number of dimensions in an array
+ unsigned getArrayRank(
+ CORINFO_CLASS_HANDLE cls
+ );
+
+ // Get static field data for an array
+ void * getArrayInitializationData(
+ CORINFO_FIELD_HANDLE field,
+ DWORD size
+ );
+
+ // Check Visibility rules.
+ CorInfoIsAccessAllowedResult canAccessClass(
+ CORINFO_RESOLVED_TOKEN * pResolvedToken,
+ CORINFO_METHOD_HANDLE callerHandle,
+ CORINFO_HELPER_DESC *pAccessHelper /* If canAccessClass returns something other
+ than ALLOWED, then this is filled in. */
+ );
+
+ // Returns that compilation flags that are shared between JIT and NGen
+ static DWORD GetBaseCompileFlags(MethodDesc * ftn);
+
+ // Resolve metadata token into runtime method handles.
+ void resolveToken(/* IN, OUT */ CORINFO_RESOLVED_TOKEN * pResolvedToken);
+
+ void getFieldInfo (CORINFO_RESOLVED_TOKEN * pResolvedToken,
+ CORINFO_METHOD_HANDLE callerHandle,
+ CORINFO_ACCESS_FLAGS flags,
+ CORINFO_FIELD_INFO *pResult
+ );
+ static CorInfoHelpFunc getSharedStaticsHelper(FieldDesc * pField, MethodTable * pFieldMT);
+
+#ifdef MDIL
+ virtual DWORD getFieldOrdinal(CORINFO_MODULE_HANDLE tokenScope,
+ unsigned fieldToken);
+
+ unsigned getMemberParent(CORINFO_MODULE_HANDLE scopeHnd, unsigned metaTOK);
+
+ // given a token representing an MD array of structs, get the element type token
+ unsigned getArrayElementToken(CORINFO_MODULE_HANDLE scopeHnd, unsigned metaTOK);
+#endif
+
+ bool isFieldStatic(CORINFO_FIELD_HANDLE fldHnd);
+
+ // Given a signature token sigTOK, use class/method instantiation in context to instantiate any type variables in the signature and return a new signature
+ void findSig(CORINFO_MODULE_HANDLE scopeHnd, unsigned sigTOK, CORINFO_CONTEXT_HANDLE context, CORINFO_SIG_INFO* sig);
+ void findCallSiteSig(CORINFO_MODULE_HANDLE scopeHnd, unsigned methTOK, CORINFO_CONTEXT_HANDLE context, CORINFO_SIG_INFO* sig);
+ CORINFO_CLASS_HANDLE getTokenTypeAsHandle(CORINFO_RESOLVED_TOKEN * pResolvedToken);
+
+ size_t findNameOfToken (CORINFO_MODULE_HANDLE module, mdToken metaTOK,
+ __out_ecount (FQNameCapacity) char * szFQName, size_t FQNameCapacity);
+
+ CorInfoCanSkipVerificationResult canSkipVerification(CORINFO_MODULE_HANDLE moduleHnd);
+
+ // Checks if the given metadata token is valid
+ BOOL isValidToken (
+ CORINFO_MODULE_HANDLE module,
+ mdToken metaTOK);
+
+ // Checks if the given metadata token is valid StringRef
+ BOOL isValidStringRef (
+ CORINFO_MODULE_HANDLE module,
+ mdToken metaTOK);
+
+ static size_t findNameOfToken (Module* module, mdToken metaTOK,
+ __out_ecount (FQNameCapacity) char * szFQName, size_t FQNameCapacity);
+
+ // ICorMethodInfo stuff
+ const char* getMethodName (CORINFO_METHOD_HANDLE ftnHnd, const char** scopeName);
+ unsigned getMethodHash (CORINFO_METHOD_HANDLE ftnHnd);
+
+ DWORD getMethodAttribs (CORINFO_METHOD_HANDLE ftnHnd);
+ // Internal version without JIT-EE transition
+ DWORD getMethodAttribsInternal (CORINFO_METHOD_HANDLE ftnHnd);
+
+ void setMethodAttribs (CORINFO_METHOD_HANDLE ftnHnd, CorInfoMethodRuntimeFlags attribs);
+
+ bool getMethodInfo (
+ CORINFO_METHOD_HANDLE ftnHnd,
+ CORINFO_METHOD_INFO* methInfo);
+
+ CorInfoInline canInline (
+ CORINFO_METHOD_HANDLE callerHnd,
+ CORINFO_METHOD_HANDLE calleeHnd,
+ DWORD* pRestrictions);
+
+ void reportInliningDecision (CORINFO_METHOD_HANDLE inlinerHnd,
+ CORINFO_METHOD_HANDLE inlineeHnd,
+ CorInfoInline inlineResult,
+ const char * reason);
+
+ // Used by ngen
+ CORINFO_METHOD_HANDLE instantiateMethodAtObject(CORINFO_METHOD_HANDLE method);
+
+ // Loads the constraints on a typical method definition, detecting cycles;
+ // used by verifiers.
+ void initConstraintsForVerification(
+ CORINFO_METHOD_HANDLE method,
+ BOOL *pfHasCircularClassConstraints,
+ BOOL *pfHasCircularMethodConstraints
+ );
+
+ CorInfoInstantiationVerification isInstantiationOfVerifiedGeneric (
+ CORINFO_METHOD_HANDLE methodHnd);
+
+
+ bool canTailCall (
+ CORINFO_METHOD_HANDLE callerHnd,
+ CORINFO_METHOD_HANDLE declaredCalleeHnd,
+ CORINFO_METHOD_HANDLE exactCalleeHnd,
+ bool fIsTailPrefix);
+
+ void reportTailCallDecision (CORINFO_METHOD_HANDLE callerHnd,
+ CORINFO_METHOD_HANDLE calleeHnd,
+ bool fIsTailPrefix,
+ CorInfoTailCall tailCallResult,
+ const char * reason);
+
+ CorInfoCanSkipVerificationResult canSkipMethodVerification(
+ CORINFO_METHOD_HANDLE ftnHnd);
+
+ // Given a method descriptor ftnHnd, extract signature information into sigInfo
+ // Obtain (representative) instantiation information from ftnHnd's owner class
+ //@GENERICSVER: added explicit owner parameter
+ void getMethodSig (
+ CORINFO_METHOD_HANDLE ftnHnd,
+ CORINFO_SIG_INFO* sigInfo,
+ CORINFO_CLASS_HANDLE owner = NULL
+ );
+ // Internal version without JIT-EE transition
+ void getMethodSigInternal (
+ CORINFO_METHOD_HANDLE ftnHnd,
+ CORINFO_SIG_INFO* sigInfo,
+ CORINFO_CLASS_HANDLE owner = NULL
+ );
+
+ void getEHinfo(
+ CORINFO_METHOD_HANDLE ftn,
+ unsigned EHnumber,
+ CORINFO_EH_CLAUSE* clause);
+
+ CORINFO_CLASS_HANDLE getMethodClass (CORINFO_METHOD_HANDLE methodHnd);
+ CORINFO_MODULE_HANDLE getMethodModule (CORINFO_METHOD_HANDLE methodHnd);
+
+ void getMethodVTableOffset (
+ CORINFO_METHOD_HANDLE methodHnd,
+ unsigned * pOffsetOfIndirection,
+ unsigned * pOffsetAfterIndirection
+ );
+
+ CorInfoIntrinsics getIntrinsicID(CORINFO_METHOD_HANDLE method);
+
+ bool isInSIMDModule(CORINFO_CLASS_HANDLE classHnd);
+
+ CorInfoUnmanagedCallConv getUnmanagedCallConv(CORINFO_METHOD_HANDLE method);
+ BOOL pInvokeMarshalingRequired(CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* callSiteSig);
+
+ // Generate a cookie based on the signature that would needs to be passed
+ // to the above generic stub
+ LPVOID GetCookieForPInvokeCalliSig(CORINFO_SIG_INFO* szMetaSig, void ** ppIndirection);
+ bool canGetCookieForPInvokeCalliSig(CORINFO_SIG_INFO* szMetaSig);
+
+ // Check Visibility rules.
+
+ // should we enforce the new (for whidbey) restrictions on calling virtual methods?
+ BOOL shouldEnforceCallvirtRestriction(
+ CORINFO_MODULE_HANDLE scope);
+
+#ifdef MDIL
+ virtual unsigned getTypeTokenForFieldOrMethod(
+ unsigned fieldOrMethodToken
+ );
+
+ virtual unsigned getTokenForType(CORINFO_CLASS_HANDLE cls);
+#endif
+ // Check constraints on method type arguments (only).
+ // The parent class should be checked separately using satisfiesClassConstraints(parent).
+ BOOL satisfiesMethodConstraints(
+ CORINFO_CLASS_HANDLE parent, // the exact parent of the method
+ CORINFO_METHOD_HANDLE method
+ );
+
+ // Given a Delegate type and a method, check if the method signature
+ // is Compatible with the Invoke method of the delegate.
+ //@GENERICSVER: new (suitable for generics)
+ BOOL isCompatibleDelegate(
+ CORINFO_CLASS_HANDLE objCls,
+ CORINFO_CLASS_HANDLE methodParentCls,
+ CORINFO_METHOD_HANDLE method,
+ CORINFO_CLASS_HANDLE delegateCls,
+ BOOL* pfIsOpenDelegate);
+
+ // Determines whether the delegate creation obeys security transparency rules
+ BOOL isDelegateCreationAllowed (
+ CORINFO_CLASS_HANDLE delegateHnd,
+ CORINFO_METHOD_HANDLE calleeHnd);
+
+ // ICorFieldInfo stuff
+ const char* getFieldName (CORINFO_FIELD_HANDLE field,
+ const char** scopeName);
+
+ CORINFO_CLASS_HANDLE getFieldClass (CORINFO_FIELD_HANDLE field);
+
+ //@GENERICSVER: added owner parameter
+ CorInfoType getFieldType (CORINFO_FIELD_HANDLE field, CORINFO_CLASS_HANDLE* structType,CORINFO_CLASS_HANDLE owner = NULL);
+ // Internal version without JIT-EE transition
+ CorInfoType getFieldTypeInternal (CORINFO_FIELD_HANDLE field, CORINFO_CLASS_HANDLE* structType,CORINFO_CLASS_HANDLE owner = NULL);
+
+ unsigned getFieldOffset (CORINFO_FIELD_HANDLE field);
+
+ bool isWriteBarrierHelperRequired(CORINFO_FIELD_HANDLE field);
+
+ void* getFieldAddress(CORINFO_FIELD_HANDLE field, void **ppIndirection);
+
+ // ICorDebugInfo stuff
+ void * allocateArray(ULONG cBytes);
+ void freeArray(void *array);
+ void getBoundaries(CORINFO_METHOD_HANDLE ftn,
+ unsigned int *cILOffsets, DWORD **pILOffsets,
+ ICorDebugInfo::BoundaryTypes *implictBoundaries);
+ void setBoundaries(CORINFO_METHOD_HANDLE ftn,
+ ULONG32 cMap, ICorDebugInfo::OffsetMapping *pMap);
+ void getVars(CORINFO_METHOD_HANDLE ftn, ULONG32 *cVars,
+ ICorDebugInfo::ILVarInfo **vars, bool *extendOthers);
+ void setVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars,
+ ICorDebugInfo::NativeVarInfo *vars);
+
+ // ICorArgInfo stuff
+
+ CorInfoTypeWithMod getArgType (
+ CORINFO_SIG_INFO* sig,
+ CORINFO_ARG_LIST_HANDLE args,
+ CORINFO_CLASS_HANDLE *vcTypeRet
+ );
+
+ CORINFO_CLASS_HANDLE getArgClass (
+ CORINFO_SIG_INFO* sig,
+ CORINFO_ARG_LIST_HANDLE args
+ );
+
+ CorInfoType getHFAType (
+ CORINFO_CLASS_HANDLE hClass
+ );
+
+ CORINFO_ARG_LIST_HANDLE getArgNext (
+ CORINFO_ARG_LIST_HANDLE args
+ );
+
+ // ICorErrorInfo stuff
+
+ HRESULT GetErrorHRESULT(struct _EXCEPTION_POINTERS *pExceptionPointers);
+ ULONG GetErrorMessage(__out_ecount(bufferLength) LPWSTR buffer,
+ ULONG bufferLength);
+ int FilterException(struct _EXCEPTION_POINTERS *pExceptionPointers);
+ void HandleException(struct _EXCEPTION_POINTERS *pExceptionPointers);
+ void ThrowExceptionForJitResult(HRESULT result);
+ void ThrowExceptionForHelper(const CORINFO_HELPER_DESC * throwHelper);
+
+ // ICorStaticInfo stuff
+ void getEEInfo(CORINFO_EE_INFO *pEEInfoOut);
+
+ LPCWSTR getJitTimeLogFilename();
+
+ //ICorDynamicInfo stuff
+ DWORD getFieldThreadLocalStoreID (CORINFO_FIELD_HANDLE field, void **ppIndirection);
+
+ // Stub dispatch stuff
+ void getCallInfo(
+ CORINFO_RESOLVED_TOKEN * pResolvedToken,
+ CORINFO_RESOLVED_TOKEN * pConstrainedResolvedToken,
+ CORINFO_METHOD_HANDLE callerHandle,
+ CORINFO_CALLINFO_FLAGS flags,
+ CORINFO_CALL_INFO *pResult /*out */);
+ BOOL canAccessFamily(CORINFO_METHOD_HANDLE hCaller,
+ CORINFO_CLASS_HANDLE hInstanceType);
+
+protected:
+
+ static void getEHinfoHelper(
+ CORINFO_METHOD_HANDLE ftnHnd,
+ unsigned EHnumber,
+ CORINFO_EH_CLAUSE* clause,
+ COR_ILMETHOD_DECODER* pILHeader);
+
+ bool isVerifyOnly()
+ {
+ return m_fVerifyOnly;
+ }
+
+public:
+
+ BOOL isRIDClassDomainID(CORINFO_CLASS_HANDLE cls);
+ unsigned getClassDomainID (CORINFO_CLASS_HANDLE cls, void **ppIndirection);
+ CORINFO_VARARGS_HANDLE getVarArgsHandle(CORINFO_SIG_INFO *sig, void **ppIndirection);
+ bool canGetVarArgsHandle(CORINFO_SIG_INFO *sig);
+ void* getPInvokeUnmanagedTarget(CORINFO_METHOD_HANDLE method, void **ppIndirection);
+ void* getAddressOfPInvokeFixup(CORINFO_METHOD_HANDLE method, void **ppIndirection);
+ CORINFO_JUST_MY_CODE_HANDLE getJustMyCodeHandle(CORINFO_METHOD_HANDLE method, CORINFO_JUST_MY_CODE_HANDLE **ppIndirection);
+
+ void GetProfilingHandle(
+ BOOL *pbHookFunction,
+ void **pProfilerHandle,
+ BOOL *pbIndirectedHandles
+ );
+
+ InfoAccessType constructStringLiteral(CORINFO_MODULE_HANDLE scopeHnd, mdToken metaTok, void **ppValue);
+ InfoAccessType emptyStringLiteral(void ** ppValue);
+ void* getMethodSync(CORINFO_METHOD_HANDLE ftnHnd, void **ppIndirection);
+
+ DWORD getThreadTLSIndex(void **ppIndirection);
+ const void * getInlinedCallFrameVptr(void **ppIndirection);
+
+ // Returns the address of the domain neutral module id. This only makes sense for domain neutral (shared)
+ // modules
+ SIZE_T* getAddrModuleDomainID(CORINFO_MODULE_HANDLE module);
+
+ LONG * getAddrOfCaptureThreadGlobal(void **ppIndirection);
+ void* getHelperFtn(CorInfoHelpFunc ftnNum, /* IN */
+ void ** ppIndirection); /* OUT */
+
+ void* getTailCallCopyArgsThunk(CORINFO_SIG_INFO *pSig,
+ CorInfoHelperTailCallSpecialHandling flags);
+
+ void getFunctionEntryPoint(CORINFO_METHOD_HANDLE ftn, /* IN */
+ CORINFO_CONST_LOOKUP * pResult, /* OUT */
+ CORINFO_ACCESS_FLAGS accessFlags = CORINFO_ACCESS_ANY);
+
+ void getFunctionFixedEntryPoint(CORINFO_METHOD_HANDLE ftn,
+ CORINFO_CONST_LOOKUP * pResult);
+
+ // get slow lazy string literal helper to use (CORINFO_HELP_STRCNS*).
+ // Returns CORINFO_HELP_UNDEF if lazy string literal helper cannot be used.
+ CorInfoHelpFunc getLazyStringLiteralHelper(CORINFO_MODULE_HANDLE handle);
+
+ CORINFO_MODULE_HANDLE embedModuleHandle(CORINFO_MODULE_HANDLE handle,
+ void **ppIndirection);
+ CORINFO_CLASS_HANDLE embedClassHandle(CORINFO_CLASS_HANDLE handle,
+ void **ppIndirection);
+ CORINFO_FIELD_HANDLE embedFieldHandle(CORINFO_FIELD_HANDLE handle,
+ void **ppIndirection);
+ CORINFO_METHOD_HANDLE embedMethodHandle(CORINFO_METHOD_HANDLE handle,
+ void **ppIndirection);
+ void embedGenericHandle(
+ CORINFO_RESOLVED_TOKEN * pResolvedToken,
+ BOOL fEmbedParent,
+ CORINFO_GENERICHANDLE_RESULT *pResult);
+
+ CORINFO_LOOKUP_KIND getLocationOfThisType(CORINFO_METHOD_HANDLE context);
+
+
+ void setOverride(ICorDynamicInfo *pOverride, CORINFO_METHOD_HANDLE currentMethod)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pOverride = pOverride;
+ m_pMethodBeingCompiled = (MethodDesc *)currentMethod; // method being compiled
+
+ m_hMethodForSecurity_Key = NULL;
+ m_pMethodForSecurity_Value = NULL;
+ }
+
+ // Returns whether we are generating code for NGen image.
+ BOOL IsCompilingForNGen()
+ {
+ LIMITED_METHOD_CONTRACT;
+ // NGen is the only place where we set the override
+ return this != m_pOverride;
+ }
+
+ void addActiveDependency(CORINFO_MODULE_HANDLE moduleFrom, CORINFO_MODULE_HANDLE moduleTo);
+ CORINFO_METHOD_HANDLE GetDelegateCtor(
+ CORINFO_METHOD_HANDLE methHnd,
+ CORINFO_CLASS_HANDLE clsHnd,
+ CORINFO_METHOD_HANDLE targetMethodHnd,
+ DelegateCtorArgs * pCtorData);
+
+ void MethodCompileComplete(
+ CORINFO_METHOD_HANDLE methHnd);
+
+ //
+ // ICorJitInfo stuff - none of this should be called on this class
+ //
+
+ IEEMemoryManager* getMemoryManager();
+
+ void allocMem (
+ ULONG hotCodeSize, /* IN */
+ ULONG coldCodeSize, /* IN */
+ ULONG roDataSize, /* IN */
+ ULONG xcptnsCount, /* IN */
+ CorJitAllocMemFlag flag, /* IN */
+ void ** hotCodeBlock, /* OUT */
+ void ** coldCodeBlock, /* OUT */
+ void ** roDataBlock /* OUT */
+ );
+
+ void reserveUnwindInfo (
+ BOOL isFunclet, /* IN */
+ BOOL isColdCode, /* IN */
+ ULONG unwindSize /* IN */
+ );
+
+ void allocUnwindInfo (
+ BYTE * pHotCode, /* IN */
+ BYTE * pColdCode, /* IN */
+ ULONG startOffset, /* IN */
+ ULONG endOffset, /* IN */
+ ULONG unwindSize, /* IN */
+ BYTE * pUnwindBlock, /* IN */
+ CorJitFuncKind funcKind /* IN */
+ );
+
+ void * allocGCInfo (
+ size_t size /* IN */
+ );
+
+ void yieldExecution();
+
+ void setEHcount (
+ unsigned cEH /* IN */
+ );
+
+ void setEHinfo (
+ unsigned EHnumber, /* IN */
+ const CORINFO_EH_CLAUSE *clause /* IN */
+ );
+
+ BOOL logMsg(unsigned level, const char* fmt, va_list args);
+
+ int doAssert(const char* szFile, int iLine, const char* szExpr);
+
+ void reportFatalError(CorJitResult result);
+
+ void logSQMLongJitEvent(unsigned mcycles, unsigned msec, unsigned ilSize, unsigned numBasicBlocks, bool minOpts,
+ CORINFO_METHOD_HANDLE methodHnd);
+
+ HRESULT allocBBProfileBuffer (
+ ULONG count, // The number of basic blocks that we have
+ ProfileBuffer ** profileBuffer
+ );
+
+ HRESULT getBBProfileData(
+ CORINFO_METHOD_HANDLE ftnHnd,
+ ULONG * count, // The number of basic blocks that we have
+ ProfileBuffer ** profileBuffer,
+ ULONG * numRuns
+ );
+
+ void recordCallSite(
+ ULONG instrOffset, /* IN */
+ CORINFO_SIG_INFO * callSig, /* IN */
+ CORINFO_METHOD_HANDLE methodHandle /* IN */
+ );
+
+ void recordRelocation(
+ void * location, /* IN */
+ void * target, /* IN */
+ WORD fRelocType, /* IN */
+ WORD slotNum = 0, /* IN */
+ INT32 addlDelta = 0 /* IN */
+ );
+
+ WORD getRelocTypeHint(void * target);
+
+ void getModuleNativeEntryPointRange(
+ void ** pStart, /* OUT */
+ void ** pEnd /* OUT */
+ );
+
+ DWORD getExpectedTargetArchitecture();
+
+ CEEInfo(MethodDesc * fd = NULL, bool fVerifyOnly = false) :
+ m_pOverride(NULL),
+ m_pMethodBeingCompiled(fd),
+ m_fVerifyOnly(fVerifyOnly),
+ m_pThread(GetThread()),
+ m_hMethodForSecurity_Key(NULL),
+ m_pMethodForSecurity_Value(NULL)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ ~CEEInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ // Performs any work JIT-related work that should be performed at process shutdown.
+ void JitProcessShutdownWork();
+
+private:
+ // Shrinking these buffers drastically reduces the amount of stack space
+ // required for each instance of the interpreter, and thereby reduces SOs.
+#ifdef FEATURE_INTERPRETER
+#define CLS_STRING_SIZE 8 // force heap allocation
+#define CLS_BUFFER_SIZE SBUFFER_PADDED_SIZE(8)
+#else
+#define CLS_STRING_SIZE MAX_CLASSNAME_LENGTH
+#define CLS_BUFFER_SIZE MAX_CLASSNAME_LENGTH
+#endif
+
+#ifdef _DEBUG
+ InlineSString<MAX_CLASSNAME_LENGTH> ssClsNameBuff;
+ ScratchBuffer<MAX_CLASSNAME_LENGTH> ssClsNameBuffScratch;
+#endif
+
+public:
+
+ //@GENERICS:
+ // The method handle is used to instantiate method and class type parameters
+ // It's also used to determine whether an extra dictionary parameter is required
+ static
+ void
+ ConvToJitSig(
+ PCCOR_SIGNATURE pSig,
+ DWORD cbSig,
+ CORINFO_MODULE_HANDLE scopeHnd,
+ mdToken token,
+ CORINFO_SIG_INFO * sigRet,
+ MethodDesc * context,
+ bool localSig,
+ TypeHandle owner = TypeHandle());
+
+ MethodDesc * GetMethodForSecurity(CORINFO_METHOD_HANDLE callerHandle);
+
+protected:
+ // NGen provides its own modifications to EE-JIT interface. From technical reason it cannot simply inherit
+ // from code:CEEInfo class (because it has dependencies on VM that NGen does not want).
+ // Therefore the "normal" EE-JIT interface has code:m_pOverride hook that is set either to
+ // * 'this' (code:CEEInfo) at runtime, or to
+ // * code:ZapInfo - the NGen specific implementation of the interface.
+ ICorDynamicInfo * m_pOverride;
+
+ MethodDesc* m_pMethodBeingCompiled; // Top-level method being compiled
+ bool m_fVerifyOnly;
+ Thread * m_pThread; // Cached current thread for faster JIT-EE transitions
+
+ CORINFO_METHOD_HANDLE getMethodBeingCompiled()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (CORINFO_METHOD_HANDLE)m_pMethodBeingCompiled;
+ }
+
+ // Cache of last GetMethodForSecurity() lookup
+ CORINFO_METHOD_HANDLE m_hMethodForSecurity_Key;
+ MethodDesc * m_pMethodForSecurity_Value;
+
+ // Tracking of module activation dependencies. We have two flavors:
+ // - Fast one that gathers generic arguments from EE handles, but does not work inside generic context.
+ // - Slow one that operates on typespec and methodspecs from metadata.
+ void ScanForModuleDependencies(Module* pModule, SigPointer psig);
+ void ScanMethodSpec(Module * pModule, PCCOR_SIGNATURE pMethodSpec, ULONG cbMethodSpec);
+ // Returns true if it is ok to proceed with scan of parent chain
+ BOOL ScanTypeSpec(Module * pModule, PCCOR_SIGNATURE pTypeSpec, ULONG cbTypeSpec);
+ void ScanInstantiation(Module * pModule, Instantiation inst);
+
+ // The main entrypoints for module activation tracking
+ void ScanToken(Module * pModule, CORINFO_RESOLVED_TOKEN * pResolvedToken, TypeHandle th, MethodDesc * pMD = NULL);
+ void ScanTokenForDynamicScope(CORINFO_RESOLVED_TOKEN * pResolvedToken, TypeHandle th, MethodDesc * pMD = NULL);
+
+ // Prepare the information about how to do a runtime lookup of the handle with shared
+ // generic variables.
+ void ComputeRuntimeLookupForSharedGenericToken(DictionaryEntryKind entryKind,
+ CORINFO_RESOLVED_TOKEN * pResolvedToken,
+ CORINFO_RESOLVED_TOKEN * pConstrainedResolvedToken /* for ConstrainedMethodEntrySlot */,
+ MethodDesc * pTemplateMD /* for method-based slots */,
+ CORINFO_LOOKUP *pResultLookup);
+};
+
+
+/*********************************************************************/
+
+class EEJitManager;
+struct _hpCodeHdr;
+typedef struct _hpCodeHdr CodeHeader;
+
+#ifndef CROSSGEN_COMPILE
+// CEEJitInfo is the concrete implementation of callbacks that the EE must provide for the JIT to do its
+// work. See code:ICorJitInfo#JitToEEInterface for more on this interface.
+class CEEJitInfo : public CEEInfo
+{
+public:
+ // ICorJitInfo stuff
+
+ void allocMem (
+ ULONG hotCodeSize, /* IN */
+ ULONG coldCodeSize, /* IN */
+ ULONG roDataSize, /* IN */
+ ULONG xcptnsCount, /* IN */
+ CorJitAllocMemFlag flag, /* IN */
+ void ** hotCodeBlock, /* OUT */
+ void ** coldCodeBlock, /* OUT */
+ void ** roDataBlock /* OUT */
+ );
+
+ void reserveUnwindInfo(BOOL isFunclet, BOOL isColdCode, ULONG unwindSize);
+
+ void allocUnwindInfo (
+ BYTE * pHotCode, /* IN */
+ BYTE * pColdCode, /* IN */
+ ULONG startOffset, /* IN */
+ ULONG endOffset, /* IN */
+ ULONG unwindSize, /* IN */
+ BYTE * pUnwindBlock, /* IN */
+ CorJitFuncKind funcKind /* IN */
+ );
+
+ void * allocGCInfo (size_t size);
+
+ void setEHcount (unsigned cEH);
+
+ void setEHinfo (
+ unsigned EHnumber,
+ const CORINFO_EH_CLAUSE* clause);
+
+ void getEHinfo(
+ CORINFO_METHOD_HANDLE ftn, /* IN */
+ unsigned EHnumber, /* IN */
+ CORINFO_EH_CLAUSE* clause /* OUT */
+ );
+
+
+ HRESULT allocBBProfileBuffer (
+ ULONG count, // The number of basic blocks that we have
+ ICorJitInfo::ProfileBuffer ** profileBuffer
+ );
+
+ HRESULT getBBProfileData (
+ CORINFO_METHOD_HANDLE ftnHnd,
+ ULONG * count, // The number of basic blocks that we have
+ ICorJitInfo::ProfileBuffer ** profileBuffer,
+ ULONG * numRuns
+ );
+
+ void recordCallSite(
+ ULONG instrOffset, /* IN */
+ CORINFO_SIG_INFO * callSig, /* IN */
+ CORINFO_METHOD_HANDLE methodHandle /* IN */
+ );
+
+ void recordRelocation(
+ void *location,
+ void *target,
+ WORD fRelocType,
+ WORD slot,
+ INT32 addlDelta);
+
+ WORD getRelocTypeHint(void * target);
+
+ void getModuleNativeEntryPointRange(
+ void** pStart,
+ void** pEnd);
+
+ DWORD getExpectedTargetArchitecture();
+
+ CodeHeader* GetCodeHeader()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_CodeHeader;
+ }
+
+ void SetCodeHeader(CodeHeader* pValue)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_CodeHeader = pValue;
+ }
+
+ void ResetForJitRetry()
+ {
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ m_CodeHeader = NULL;
+
+ if (m_pOffsetMapping != NULL)
+ delete [] ((BYTE*) m_pOffsetMapping);
+
+ if (m_pNativeVarInfo != NULL)
+ delete [] ((BYTE*) m_pNativeVarInfo);
+
+ m_iOffsetMapping = 0;
+ m_pOffsetMapping = NULL;
+ m_iNativeVarInfo = 0;
+ m_pNativeVarInfo = NULL;
+
+#ifdef WIN64EXCEPTIONS
+ m_moduleBase = NULL;
+ m_totalUnwindSize = 0;
+ m_usedUnwindSize = 0;
+ m_theUnwindBlock = NULL;
+ m_totalUnwindInfos = 0;
+ m_usedUnwindInfos = 0;
+#endif // WIN64EXCEPTIONS
+ }
+
+#ifdef _TARGET_AMD64_
+ void SetAllowRel32(BOOL fAllowRel32)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_fAllowRel32 = fAllowRel32;
+ }
+
+ void SetRel32Overflow(BOOL fRel32Overflow)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_fRel32Overflow = fRel32Overflow;
+ }
+
+ BOOL IsRel32Overflow()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_fRel32Overflow;
+ }
+
+ BOOL JitAgain()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_fRel32Overflow;
+ }
+#else
+ BOOL JitAgain()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+ }
+#endif
+
+ CEEJitInfo(MethodDesc* fd, COR_ILMETHOD_DECODER* header,
+ EEJitManager* jm, bool fVerifyOnly)
+ : CEEInfo(fd, fVerifyOnly),
+ m_jitManager(jm),
+ m_CodeHeader(NULL),
+ m_ILHeader(header),
+#ifdef WIN64EXCEPTIONS
+ m_moduleBase(NULL),
+ m_totalUnwindSize(0),
+ m_usedUnwindSize(0),
+ m_theUnwindBlock(NULL),
+ m_totalUnwindInfos(0),
+ m_usedUnwindInfos(0),
+#endif
+#ifdef _TARGET_AMD64_
+ m_fAllowRel32(FALSE),
+ m_fRel32Overflow(FALSE),
+#endif
+ m_GCinfo_len(0),
+ m_EHinfo_len(0),
+ m_iOffsetMapping(0),
+ m_pOffsetMapping(NULL),
+ m_iNativeVarInfo(0),
+ m_pNativeVarInfo(NULL),
+ m_gphCache()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ m_pOverride = this;
+ }
+
+ ~CEEJitInfo()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ if (m_pOffsetMapping != NULL)
+ delete [] ((BYTE*) m_pOffsetMapping);
+
+ if (m_pNativeVarInfo != NULL)
+ delete [] ((BYTE*) m_pNativeVarInfo);
+ }
+
+ // ICorDebugInfo stuff.
+ void setBoundaries(CORINFO_METHOD_HANDLE ftn,
+ ULONG32 cMap, ICorDebugInfo::OffsetMapping *pMap);
+ void setVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars,
+ ICorDebugInfo::NativeVarInfo *vars);
+ void CompressDebugInfo();
+
+ void* getHelperFtn(CorInfoHelpFunc ftnNum, /* IN */
+ void ** ppIndirection); /* OUT */
+ static PCODE getHelperFtnStatic(CorInfoHelpFunc ftnNum);
+
+ // Override active dependency to talk to loader
+ void addActiveDependency(CORINFO_MODULE_HANDLE moduleFrom, CORINFO_MODULE_HANDLE moduleTo);
+
+ // Override of CEEInfo::GetProfilingHandle. The first time this is called for a
+ // method desc, it calls through to CEEInfo::GetProfilingHandle and caches the
+ // result in CEEJitInfo::GetProfilingHandleCache. Thereafter, this wrapper regurgitates the cached values
+ // rather than calling into CEEInfo::GetProfilingHandle each time. This avoids
+ // making duplicate calls into the profiler's FunctionIDMapper callback.
+ void GetProfilingHandle(
+ BOOL *pbHookFunction,
+ void **pProfilerHandle,
+ BOOL *pbIndirectedHandles
+ );
+
+ InfoAccessType constructStringLiteral(CORINFO_MODULE_HANDLE scopeHnd, mdToken metaTok, void **ppValue);
+ InfoAccessType emptyStringLiteral(void ** ppValue);
+ void* getFieldAddress(CORINFO_FIELD_HANDLE field, void **ppIndirection);
+ void* getMethodSync(CORINFO_METHOD_HANDLE ftnHnd, void **ppIndirection);
+
+ void BackoutJitData(EEJitManager * jitMgr);
+
+protected :
+ EEJitManager* m_jitManager; // responsible for allocating memory
+ CodeHeader* m_CodeHeader; // descriptor for JITTED code
+ COR_ILMETHOD_DECODER * m_ILHeader; // the code header as exist in the file
+#ifdef WIN64EXCEPTIONS
+ TADDR m_moduleBase; // Base for unwind Infos
+ ULONG m_totalUnwindSize; // Total reserved unwind space
+ ULONG m_usedUnwindSize; // used space in m_theUnwindBlock
+ BYTE * m_theUnwindBlock; // start of the unwind memory block
+ ULONG m_totalUnwindInfos; // Number of RUNTIME_FUNCTION needed
+ ULONG m_usedUnwindInfos;
+#endif
+
+#ifdef _TARGET_AMD64_
+ BOOL m_fAllowRel32; // Use 32-bit PC relative address modes
+ BOOL m_fRel32Overflow; // Overflow while trying to use encode 32-bit PC relative address.
+ // The code will need to be regenerated with m_fRel32Allowed == FALSE.
+#endif
+
+#if defined(_DEBUG)
+ ULONG m_codeSize; // Code size requested via allocMem
+#endif
+
+ size_t m_GCinfo_len; // Cached copy of GCinfo_len so we can backout in BackoutJitData()
+ size_t m_EHinfo_len; // Cached copy of EHinfo_len so we can backout in BackoutJitData()
+
+ ULONG32 m_iOffsetMapping;
+ ICorDebugInfo::OffsetMapping * m_pOffsetMapping;
+
+ ULONG32 m_iNativeVarInfo;
+ ICorDebugInfo::NativeVarInfo * m_pNativeVarInfo;
+
+ // The first time a call is made to CEEJitInfo::GetProfilingHandle() from this thread
+ // for this method, these values are filled in. Thereafter, these values are used
+ // in lieu of calling into the base CEEInfo::GetProfilingHandle() again. This protects the
+ // profiler from duplicate calls to its FunctionIDMapper() callback.
+ struct GetProfilingHandleCache
+ {
+ GetProfilingHandleCache() :
+ m_bGphIsCacheValid(false),
+ m_bGphHookFunction(false),
+ m_pvGphProfilerHandle(NULL)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ bool m_bGphIsCacheValid : 1; // Tells us whether below values are valid
+ bool m_bGphHookFunction : 1;
+ void* m_pvGphProfilerHandle;
+ } m_gphCache;
+
+
+};
+#endif // CROSSGEN_COMPILE
+
+/*********************************************************************/
+/*********************************************************************/
+
+typedef struct {
+ void * pfnHelper;
+#ifdef _DEBUG
+ const char* name;
+#endif
+} VMHELPDEF;
+
+#if defined(DACCESS_COMPILE)
+
+GARY_DECL(VMHELPDEF, hlpFuncTable, CORINFO_HELP_COUNT);
+
+#else
+
+extern "C" const VMHELPDEF hlpFuncTable[CORINFO_HELP_COUNT];
+
+#endif
+
+#if defined(_DEBUG) && (defined(_TARGET_AMD64_) || defined(_TARGET_X86_)) && !defined(FEATURE_PAL)
+typedef struct {
+ void* pfnRealHelper;
+ const char* helperName;
+ LONG count;
+ LONG helperSize;
+} VMHELPCOUNTDEF;
+
+extern "C" VMHELPCOUNTDEF hlpFuncCountTable[CORINFO_HELP_COUNT+1];
+
+void InitJitHelperLogging();
+void WriteJitHelperCountToSTRESSLOG();
+#else
+inline void InitJitHelperLogging() { }
+inline void WriteJitHelperCountToSTRESSLOG() { }
+#endif
+
+// enum for dynamically assigned helper calls
+enum DynamicCorInfoHelpFunc {
+#define JITHELPER(code, pfnHelper, sig)
+#define DYNAMICJITHELPER(code, pfnHelper, sig) DYNAMIC_##code,
+#include "jithelpers.h"
+ DYNAMIC_CORINFO_HELP_COUNT
+};
+
+#ifdef _MSC_VER
+// GCC complains about duplicate "extern". And it is not needed for the GCC build
+extern "C"
+#endif
+GARY_DECL(VMHELPDEF, hlpDynamicFuncTable, DYNAMIC_CORINFO_HELP_COUNT);
+
+#define SetJitHelperFunction(ftnNum, pFunc) _SetJitHelperFunction(DYNAMIC_##ftnNum, pFunc)
+void _SetJitHelperFunction(DynamicCorInfoHelpFunc ftnNum, void * pFunc);
+#ifdef ENABLE_FAST_GCPOLL_HELPER
+//These should only be called from ThreadStore::TrapReturningThreads!
+
+//Called when the VM wants to suspend one or more threads.
+void EnableJitGCPoll();
+//Called when there are no threads to suspend.
+void DisableJitGCPoll();
+#endif
+
+// Helper for RtlVirtualUnwind-based tail calls
+#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM_)
+
+// The Stub-linker generated assembly routine to copy arguments from the va_list
+// into the CONTEXT and the stack.
+//
+typedef size_t (*pfnCopyArgs)(va_list, _CONTEXT *, DWORD_PTR *, size_t);
+
+// Forward declaration from Frames.h
+class TailCallFrame;
+
+// The shared stub return location
+EXTERN_C void JIT_TailCallHelperStub_ReturnAddress();
+
+#endif // _TARGET_AMD64_ || _TARGET_ARM_
+
+
+#ifdef _TARGET_X86_
+
+class JIT_TrialAlloc
+{
+public:
+ enum Flags
+ {
+ NORMAL = 0x0,
+ MP_ALLOCATOR = 0x1,
+ SIZE_IN_EAX = 0x2,
+ OBJ_ARRAY = 0x4,
+ ALIGN8 = 0x8, // insert a dummy object to insure 8 byte alignment (until the next GC)
+ ALIGN8OBJ = 0x10,
+ NO_FRAME = 0x20, // call is from unmanaged code - don't try to put up a frame
+ };
+
+ static void *GenAllocSFast(Flags flags);
+ static void *GenBox(Flags flags);
+ static void *GenAllocArray(Flags flags);
+ static void *GenAllocString(Flags flags);
+
+private:
+ static void EmitAlignmentRoundup(CPUSTUBLINKER *psl,X86Reg regTestAlign, X86Reg regToAdj, Flags flags);
+ static void EmitDummyObject(CPUSTUBLINKER *psl, X86Reg regTestAlign, Flags flags);
+ static void EmitCore(CPUSTUBLINKER *psl, CodeLabel *noLock, CodeLabel *noAlloc, Flags flags);
+ static void EmitNoAllocCode(CPUSTUBLINKER *psl, Flags flags);
+
+#if CHECK_APP_DOMAIN_LEAKS
+ static void EmitSetAppDomain(CPUSTUBLINKER *psl);
+ static void EmitCheckRestore(CPUSTUBLINKER *psl);
+#endif
+};
+#endif // _TARGET_X86_
+
+void *GenFastGetSharedStaticBase(bool bCheckCCtor);
+
+#ifdef HAVE_GCCOVER
+void SetupGcCoverage(MethodDesc* pMD, BYTE* nativeCode);
+void SetupGcCoverageForNativeImage(Module* module);
+BOOL OnGcCoverageInterrupt(PT_CONTEXT regs);
+void DoGcStress (PT_CONTEXT regs, MethodDesc *pMD);
+#endif //HAVE_GCCOVER
+
+EXTERN_C FCDECL2(LPVOID, ArrayStoreCheck, Object** pElement, PtrArray** pArray);
+FCDECL1(StringObject*, FramedAllocateString, DWORD stringLength);
+FCDECL1(StringObject*, UnframedAllocateString, DWORD stringLength);
+
+OBJECTHANDLE ConstructStringLiteral(CORINFO_MODULE_HANDLE scopeHnd, mdToken metaTok);
+
+FCDECL1(Object*, JIT_New, CORINFO_CLASS_HANDLE typeHnd_);
+FCDECL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE typeHnd_, INT_PTR size);
+FCDECL2(Object*, JIT_Box, CORINFO_CLASS_HANDLE type, void* data);
+FCDECL0(VOID, JIT_PollGC);
+#ifdef ENABLE_FAST_GCPOLL_HELPER
+EXTERN_C FCDECL0(VOID, JIT_PollGC_Nop);
+#endif
+
+BOOL ObjIsInstanceOf(Object *pObject, TypeHandle toTypeHnd);
+EXTERN_C TypeHandle::CastResult STDCALL ObjIsInstanceOfNoGC(Object *pObject, TypeHandle toTypeHnd);
+
+#ifdef _WIN64
+class InlinedCallFrame;
+Thread * __stdcall JIT_InitPInvokeFrame(InlinedCallFrame *pFrame, PTR_VOID StubSecretArg);
+#endif
+
+#ifdef _DEBUG
+extern LONG g_JitCount;
+#endif
+
+struct VirtualFunctionPointerArgs
+{
+ CORINFO_CLASS_HANDLE classHnd;
+ CORINFO_METHOD_HANDLE methodHnd;
+};
+
+FCDECL2(CORINFO_MethodPtr, JIT_VirtualFunctionPointer_Dynamic, Object * objectUNSAFE, VirtualFunctionPointerArgs * pArgs);
+
+typedef TADDR (F_CALL_CONV * FnStaticBaseHelper)(TADDR arg0, TADDR arg1);
+
+struct StaticFieldAddressArgs
+{
+ FnStaticBaseHelper staticBaseHelper;
+ TADDR arg0;
+ TADDR arg1;
+ SIZE_T offset;
+};
+
+FCDECL1(TADDR, JIT_StaticFieldAddress_Dynamic, StaticFieldAddressArgs * pArgs);
+FCDECL1(TADDR, JIT_StaticFieldAddressUnbox_Dynamic, StaticFieldAddressArgs * pArgs);
+
+CORINFO_GENERIC_HANDLE JIT_GenericHandleWorker(MethodDesc *pMD,
+ MethodTable *pMT,
+ LPVOID signature);
+
+void ClearJitGenericHandleCache(AppDomain *pDomain);
+
+class JitHelpers {
+public:
+ static FCDECL3(void, UnsafeSetArrayElement, PtrArray* pPtrArray, INT32 index, Object* object);
+};
+
+DWORD GetDebuggerCompileFlags(Module* pModule, DWORD flags);
+
+bool TrackAllocationsEnabled();
+
+#endif // JITINTERFACE_H
diff --git a/src/vm/jitinterfacegen.cpp b/src/vm/jitinterfacegen.cpp
new file mode 100644
index 0000000000..7ef145316a
--- /dev/null
+++ b/src/vm/jitinterfacegen.cpp
@@ -0,0 +1,291 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: JITinterfaceGen.CPP
+//
+
+// ===========================================================================
+
+// This contains generic C versions of some of the routines
+// required by JITinterface.cpp. They are modeled after
+// X86 specific routines found in JIThelp.asm or JITinterfaceX86.cpp
+// More and more we're making AMD64 and IA64 specific versions of
+// the helpers as well, JitInterfaceGen.cpp sticks around for rotor...
+
+
+#include "common.h"
+#include "clrtypes.h"
+#include "jitinterface.h"
+#include "eeconfig.h"
+#include "excep.h"
+#include "comdelegate.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h" // create context bound and remote class instances
+#endif
+#include "field.h"
+#include "ecall.h"
+
+#ifdef _WIN64
+
+// These are the fastest(?) versions of JIT helpers as they have the code to GetThread patched into them
+// that does not make a call.
+EXTERN_C Object* JIT_TrialAllocSFastMP_InlineGetThread(CORINFO_CLASS_HANDLE typeHnd_);
+EXTERN_C Object* JIT_BoxFastMP_InlineGetThread (CORINFO_CLASS_HANDLE type, void* unboxedData);
+EXTERN_C Object* AllocateStringFastMP_InlineGetThread (CLR_I4 cch);
+EXTERN_C Object* JIT_NewArr1OBJ_MP_InlineGetThread (CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size);
+EXTERN_C Object* JIT_NewArr1VC_MP_InlineGetThread (CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size);
+
+// This next set is the fast version that invoke GetThread but is still faster than the VM implementation (i.e.
+// the "slow" versions).
+EXTERN_C Object* JIT_TrialAllocSFastMP(CORINFO_CLASS_HANDLE typeHnd_);
+EXTERN_C Object* JIT_TrialAllocSFastSP(CORINFO_CLASS_HANDLE typeHnd_);
+EXTERN_C Object* JIT_BoxFastMP (CORINFO_CLASS_HANDLE type, void* unboxedData);
+EXTERN_C Object* JIT_BoxFastUP (CORINFO_CLASS_HANDLE type, void* unboxedData);
+EXTERN_C Object* AllocateStringFastMP (CLR_I4 cch);
+EXTERN_C Object* AllocateStringFastUP (CLR_I4 cch);
+
+EXTERN_C Object* JIT_NewArr1OBJ_MP (CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size);
+EXTERN_C Object* JIT_NewArr1OBJ_UP (CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size);
+EXTERN_C Object* JIT_NewArr1VC_MP (CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size);
+EXTERN_C Object* JIT_NewArr1VC_UP (CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size);
+
+//For the optimized JIT_Mon helpers
+#if defined(_TARGET_AMD64_)
+EXTERN_C void JIT_MonEnterWorker_Slow(Object* obj, BYTE* pbLockTaken);
+EXTERN_C void JIT_MonExitWorker_Slow(Object* obj, BYTE* pbLockTaken);
+EXTERN_C void JIT_MonTryEnter_Slow(Object* obj, INT32 timeOut, BYTE* pbLockTaken);
+EXTERN_C void JIT_MonEnterStatic_Slow(AwareLock* lock, BYTE* pbLockTaken);
+EXTERN_C void JIT_MonExitStatic_Slow(AwareLock* lock, BYTE* pbLockTaken);
+#endif // _TARGET_AMD64_
+
+extern "C" void* JIT_GetSharedNonGCStaticBase_Slow(SIZE_T moduleDomainID, DWORD dwModuleClassID);
+extern "C" void* JIT_GetSharedNonGCStaticBaseNoCtor_Slow(SIZE_T moduleDomainID, DWORD dwModuleClassID);
+extern "C" void* JIT_GetSharedGCStaticBase_Slow(SIZE_T moduleDomainID, DWORD dwModuleClassID);
+extern "C" void* JIT_GetSharedGCStaticBaseNoCtor_Slow(SIZE_T moduleDomainID, DWORD dwModuleClassID);
+
+#ifdef _TARGET_AMD64_
+extern WriteBarrierManager g_WriteBarrierManager;
+#endif // _TARGET_AMD64_
+
+#ifndef FEATURE_IMPLICIT_TLS
+EXTERN_C DWORD gThreadTLSIndex;
+EXTERN_C DWORD gAppDomainTLSIndex;
+#endif
+#endif // _WIN64
+
+/*********************************************************************/
+// Initialize the part of the JIT helpers that require very little of
+// EE infrastructure to be in place.
+/*********************************************************************/
+#ifndef _TARGET_X86_
+
+#if defined(_TARGET_AMD64_)
+
+void MakeIntoJumpStub(LPVOID pStubAddress, LPVOID pTarget)
+{
+ BYTE* pbStubAddress = (BYTE*)pStubAddress;
+ BYTE* pbTarget = (BYTE*)pTarget;
+
+ DWORD dwOldProtect;
+ if (!ClrVirtualProtect(pbStubAddress, 5, PAGE_EXECUTE_READWRITE, &dwOldProtect))
+ {
+ ThrowLastError();
+ }
+
+ DWORD diff = (DWORD)(pbTarget - (pbStubAddress + 5));
+
+ // Make sure that the offset fits in 32-bits
+ _ASSERTE( FitsInI4(pbTarget - (pbStubAddress + 5)) );
+
+ // Write a jmp pcrel32 instruction
+ //
+ // 0xe9xxxxxxxx
+ pbStubAddress[0] = 0xE9;
+ *((DWORD*)&pbStubAddress[1]) = diff;
+
+ ClrVirtualProtect(pbStubAddress, 5, dwOldProtect, &dwOldProtect);
+}
+
+EXTERN_C void JIT_TrialAllocSFastMP_InlineGetThread__PatchTLSOffset();
+EXTERN_C void JIT_BoxFastMPIGT__PatchTLSLabel();
+EXTERN_C void AllocateStringFastMP_InlineGetThread__PatchTLSOffset();
+EXTERN_C void JIT_NewArr1VC_MP_InlineGetThread__PatchTLSOffset();
+EXTERN_C void JIT_NewArr1OBJ_MP_InlineGetThread__PatchTLSOffset();
+EXTERN_C void JIT_MonEnterWorker_InlineGetThread_GetThread_PatchLabel();
+EXTERN_C void JIT_MonExitWorker_InlineGetThread_GetThread_PatchLabel();
+EXTERN_C void JIT_MonTryEnter_GetThread_PatchLabel();
+EXTERN_C void JIT_MonEnterStaticWorker_InlineGetThread_GetThread_PatchLabel_1();
+EXTERN_C void JIT_MonEnterStaticWorker_InlineGetThread_GetThread_PatchLabel_2();
+EXTERN_C void JIT_MonExitStaticWorker_InlineGetThread_GetThread_PatchLabel();
+
+
+static const LPVOID InlineGetThreadLocations[] = {
+ (PVOID)JIT_TrialAllocSFastMP_InlineGetThread__PatchTLSOffset,
+ (PVOID)JIT_BoxFastMPIGT__PatchTLSLabel,
+ (PVOID)AllocateStringFastMP_InlineGetThread__PatchTLSOffset,
+ (PVOID)JIT_NewArr1VC_MP_InlineGetThread__PatchTLSOffset,
+ (PVOID)JIT_NewArr1OBJ_MP_InlineGetThread__PatchTLSOffset,
+ (PVOID)JIT_MonEnterWorker_InlineGetThread_GetThread_PatchLabel,
+ (PVOID)JIT_MonExitWorker_InlineGetThread_GetThread_PatchLabel,
+ (PVOID)JIT_MonTryEnter_GetThread_PatchLabel,
+ (PVOID)JIT_MonEnterStaticWorker_InlineGetThread_GetThread_PatchLabel_1,
+ (PVOID)JIT_MonEnterStaticWorker_InlineGetThread_GetThread_PatchLabel_2,
+ (PVOID)JIT_MonExitStaticWorker_InlineGetThread_GetThread_PatchLabel,
+};
+
+EXTERN_C void JIT_GetSharedNonGCStaticBase__PatchTLSLabel();
+EXTERN_C void JIT_GetSharedNonGCStaticBaseNoCtor__PatchTLSLabel();
+EXTERN_C void JIT_GetSharedGCStaticBase__PatchTLSLabel();
+EXTERN_C void JIT_GetSharedGCStaticBaseNoCtor__PatchTLSLabel();
+
+static const LPVOID InlineGetAppDomainLocations[] = {
+ (PVOID)JIT_GetSharedNonGCStaticBase__PatchTLSLabel,
+ (PVOID)JIT_GetSharedNonGCStaticBaseNoCtor__PatchTLSLabel,
+ (PVOID)JIT_GetSharedGCStaticBase__PatchTLSLabel,
+ (PVOID)JIT_GetSharedGCStaticBaseNoCtor__PatchTLSLabel
+};
+
+
+#endif // defined(_TARGET_AMD64_)
+
+#if defined(_WIN64) && !defined(FEATURE_IMPLICIT_TLS)
+void FixupInlineGetters(DWORD tlsSlot, const LPVOID * pLocations, int nLocations)
+{
+ BYTE* pInlineGetter;
+ DWORD dwOldProtect;
+ for (int i=0; i<nLocations; i++)
+ {
+ pInlineGetter = (BYTE*)GetEEFuncEntryPoint((BYTE*)pLocations[i]);
+
+ static const DWORD cbPatch = 9;
+ if (!ClrVirtualProtect(pInlineGetter, cbPatch, PAGE_EXECUTE_READWRITE, &dwOldProtect))
+ {
+ ThrowLastError();
+ }
+
+ DWORD offset = (tlsSlot * sizeof(LPVOID) + offsetof(TEB, TlsSlots));
+
+#if defined(_TARGET_AMD64_)
+ // mov r??, gs:[TLS offset]
+ _ASSERTE_ALL_BUILDS("clr/src/VM/JITinterfaceGen.cpp",
+ pInlineGetter[0] == 0x65 &&
+ pInlineGetter[2] == 0x8B &&
+ pInlineGetter[4] == 0x25 &&
+ "Initialization failure while stomping instructions for the TLS slot offset: the instruction at the given offset did not match what we expect");
+
+ *((DWORD*)(pInlineGetter + 5)) = offset;
+#else // _TARGET_AMD64_
+ PORTABILITY_ASSERT("FixupInlineGetters");
+#endif //_TARGET_AMD64_
+
+ FlushInstructionCache(GetCurrentProcess(), pInlineGetter, cbPatch);
+ ClrVirtualProtect(pInlineGetter, cbPatch, dwOldProtect, &dwOldProtect);
+ }
+}
+#endif // defined(_WIN64) && !defined(FEATURE_IMPLICIT_TLS)
+
+#if defined(_TARGET_AMD64_)
+EXTERN_C void JIT_MonEnterStaticWorker();
+EXTERN_C void JIT_MonExitStaticWorker();
+#endif
+
+void InitJITHelpers1()
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(g_SystemInfo.dwNumberOfProcessors != 0);
+
+#if defined(_TARGET_AMD64_)
+
+ g_WriteBarrierManager.Initialize();
+
+#ifndef FEATURE_IMPLICIT_TLS
+
+ if (gThreadTLSIndex < TLS_MINIMUM_AVAILABLE)
+ {
+ FixupInlineGetters(gThreadTLSIndex, InlineGetThreadLocations, COUNTOF(InlineGetThreadLocations));
+ }
+
+ if (gAppDomainTLSIndex < TLS_MINIMUM_AVAILABLE)
+ {
+ FixupInlineGetters(gAppDomainTLSIndex, InlineGetAppDomainLocations, COUNTOF(InlineGetAppDomainLocations));
+ }
+
+ // Allocation helpers, faster but non-logging
+ if (!((TrackAllocationsEnabled()) ||
+ (LoggingOn(LF_GCALLOC, LL_INFO10))
+#ifdef _DEBUG
+ || (g_pConfig->ShouldInjectFault(INJECTFAULT_GCHEAP) != 0)
+#endif // _DEBUG
+ ))
+ {
+
+ // if (multi-proc || server GC)
+ if (GCHeap::UseAllocationContexts())
+ {
+ // If the TLS for Thread is low enough use the super-fast helpers
+ if (gThreadTLSIndex < TLS_MINIMUM_AVAILABLE)
+ {
+ SetJitHelperFunction(CORINFO_HELP_NEWSFAST, JIT_TrialAllocSFastMP_InlineGetThread);
+ SetJitHelperFunction(CORINFO_HELP_NEWSFAST_ALIGN8, JIT_TrialAllocSFastMP_InlineGetThread);
+ SetJitHelperFunction(CORINFO_HELP_BOX, JIT_BoxFastMP_InlineGetThread);
+ SetJitHelperFunction(CORINFO_HELP_NEWARR_1_VC, JIT_NewArr1VC_MP_InlineGetThread);
+ SetJitHelperFunction(CORINFO_HELP_NEWARR_1_OBJ, JIT_NewArr1OBJ_MP_InlineGetThread);
+
+ ECall::DynamicallyAssignFCallImpl(GetEEFuncEntryPoint(AllocateStringFastMP_InlineGetThread), ECall::FastAllocateString);
+ }
+ else
+ {
+ SetJitHelperFunction(CORINFO_HELP_NEWSFAST, JIT_TrialAllocSFastMP);
+ SetJitHelperFunction(CORINFO_HELP_NEWSFAST_ALIGN8, JIT_TrialAllocSFastMP);
+ SetJitHelperFunction(CORINFO_HELP_BOX, JIT_BoxFastMP);
+ SetJitHelperFunction(CORINFO_HELP_NEWARR_1_VC, JIT_NewArr1VC_MP);
+ SetJitHelperFunction(CORINFO_HELP_NEWARR_1_OBJ, JIT_NewArr1OBJ_MP);
+
+ ECall::DynamicallyAssignFCallImpl(GetEEFuncEntryPoint(AllocateStringFastMP), ECall::FastAllocateString);
+ }
+ }
+ else
+ {
+ // Replace the 1p slow allocation helpers with faster version
+ //
+ // When we're running Workstation GC on a single proc box we don't have
+ // InlineGetThread versions because there is no need to call GetThread
+ SetJitHelperFunction(CORINFO_HELP_NEWSFAST, JIT_TrialAllocSFastSP);
+ SetJitHelperFunction(CORINFO_HELP_NEWSFAST_ALIGN8, JIT_TrialAllocSFastSP);
+ SetJitHelperFunction(CORINFO_HELP_BOX, JIT_BoxFastUP);
+ SetJitHelperFunction(CORINFO_HELP_NEWARR_1_VC, JIT_NewArr1VC_UP);
+ SetJitHelperFunction(CORINFO_HELP_NEWARR_1_OBJ, JIT_NewArr1OBJ_UP);
+
+ ECall::DynamicallyAssignFCallImpl(GetEEFuncEntryPoint(AllocateStringFastUP), ECall::FastAllocateString);
+ }
+ }
+
+ if (gThreadTLSIndex >= TLS_MINIMUM_AVAILABLE)
+ {
+ // We need to patch the helpers for FCalls
+ MakeIntoJumpStub(JIT_MonEnterWorker_InlineGetThread, JIT_MonEnterWorker_Slow);
+ MakeIntoJumpStub(JIT_MonExitWorker_InlineGetThread, JIT_MonExitWorker_Slow);
+ MakeIntoJumpStub(JIT_MonTryEnter_InlineGetThread, JIT_MonTryEnter_Slow);
+
+ SetJitHelperFunction(CORINFO_HELP_MON_ENTER, JIT_MonEnterWorker_Slow);
+ SetJitHelperFunction(CORINFO_HELP_MON_EXIT, JIT_MonExitWorker_Slow);
+
+ SetJitHelperFunction(CORINFO_HELP_MON_ENTER_STATIC, JIT_MonEnterStatic_Slow);
+ SetJitHelperFunction(CORINFO_HELP_MON_EXIT_STATIC, JIT_MonExitStatic_Slow);
+ }
+
+ if (gAppDomainTLSIndex >= TLS_MINIMUM_AVAILABLE)
+ {
+ SetJitHelperFunction(CORINFO_HELP_GETSHARED_GCSTATIC_BASE, JIT_GetSharedGCStaticBase_Slow);
+ SetJitHelperFunction(CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE, JIT_GetSharedNonGCStaticBase_Slow);
+ SetJitHelperFunction(CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR, JIT_GetSharedGCStaticBaseNoCtor_Slow);
+ SetJitHelperFunction(CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR,JIT_GetSharedNonGCStaticBaseNoCtor_Slow);
+ }
+#endif // !FEATURE_IMPLICIT_TLS
+#endif // _TARGET_AMD64_
+}
+
+#endif // !_TARGET_X86_
diff --git a/src/vm/jupiterobject.h b/src/vm/jupiterobject.h
new file mode 100644
index 0000000000..1417a9b081
--- /dev/null
+++ b/src/vm/jupiterobject.h
@@ -0,0 +1,92 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+/*============================================================
+**
+** Header: IJupiterObject.h
+**
+**
+** Purpose: Defines headers used in RCW walk
+**
+==============================================================*/
+
+#ifndef _H_JUPITER_OBJECT_
+#define _H_JUPITER_OBJECT_
+
+#ifdef FEATURE_COMINTEROP
+
+#include "internalunknownimpl.h"
+
+class ICCW;
+
+// Windows.UI.Xaml.Hosting.IReferenceTrackerHost
+class DECLSPEC_UUID("29a71c6a-3c42-4416-a39d-e2825a07a773") ICLRServices : public IUnknown
+{
+public :
+ STDMETHOD(GarbageCollect)(DWORD dwFlags) = 0;
+ STDMETHOD(FinalizerThreadWait)() = 0;
+ STDMETHOD(DisconnectRCWsInCurrentApartment)() = 0;
+ STDMETHOD(CreateManagedReference)(IUnknown *pJupiterObject, ICCW **ppNewReference) = 0;
+ STDMETHOD(AddMemoryPressure)(UINT64 bytesAllocated) = 0;
+ STDMETHOD(RemoveMemoryPressure)(UINT64 bytesAllocated) = 0;
+};
+
+// Windows.UI.Xaml.Hosting.IReferenceTrackerManager
+class DECLSPEC_UUID("3cf184b4-7ccb-4dda-8455-7e6ce99a3298") IJupiterGCManager : public IUnknown
+{
+public :
+ STDMETHOD(OnGCStarted)() = 0;
+ STDMETHOD(OnRCWWalkFinished)(BOOL bWalkFailed) = 0;
+ STDMETHOD(OnGCFinished)() = 0;
+ STDMETHOD(SetCLRServices)(/* [in] */ ICLRServices *pCLRServices) = 0;
+};
+
+// Windows.UI.Xaml.Hosting.IReferenceTrackerTarget
+class DECLSPEC_UUID("64bd43f8-bfee-4ec4-b7eb-2935158dae21") ICCW : public IUnknown
+{
+
+public :
+ STDMETHOD_(ULONG, AddReferenceFromJupiter)() = 0;
+ STDMETHOD_(ULONG, ReleaseFromJupiter)() = 0;
+ STDMETHOD(Peg)() = 0;
+ STDMETHOD(Unpeg)() = 0;
+};
+
+// Windows.UI.Xaml.Hosting.IFindReferenceTargetsCallback
+class DECLSPEC_UUID("04b3486c-4687-4229-8d14-505ab584dd88") IFindDependentWrappersCallback : public IUnknown
+{
+
+public :
+ STDMETHOD(OnFoundDependentWrapper)(/* [in] */ ICCW *pCCW) = 0;
+};
+
+// Windows.UI.Xaml.Hosting.IReferenceTracker
+class DECLSPEC_UUID("11d3b13a-180e-4789-a8be-7712882893e6") IJupiterObject : public IUnknown
+{
+public :
+ STDMETHOD(Connect)() = 0; // Notify Jupiter that we've created a new RCW
+ STDMETHOD(Disconnect)() = 0; // Notify Jupiter that we are about to destroy this RCW
+ STDMETHOD(FindDependentWrappers)(/* [in] */ IFindDependentWrappersCallback *pCallback) = 0;
+ // Find list of dependent CCWs for this RCW
+ STDMETHOD(GetJupiterGCManager)(/* [out, retval] */ IJupiterGCManager **ppJupiterGCManager) = 0;
+ // Retrieve IJupiterGCManager interface
+ STDMETHOD(AfterAddRef)() = 0; // Notify Jupiter that we've done a AddRef()
+ STDMETHOD(BeforeRelease)() = 0; // Notify Jupiter that we are about to do a Release()
+ STDMETHOD(Peg)() = 0; // Notify Jupiter that we've returned this IJupiterObject as
+ // [out] parameter and this IJupiterObject object including
+ // all its reachable CCWs should be pegged
+};
+
+extern const IID IID_ICCW;
+extern const IID IID_IJupiterObject;
+extern const IID IID_IJupiterGCManager;
+extern const IID IID_IFindDependentWrappersCallback;
+
+#endif // FEATURE_COMINTEROP
+
+#endif // _H_JUPITER_OBJECT_
diff --git a/src/vm/listlock.cpp b/src/vm/listlock.cpp
new file mode 100644
index 0000000000..56c870cba2
--- /dev/null
+++ b/src/vm/listlock.cpp
@@ -0,0 +1,97 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: ListLock.cpp
+//
+
+//
+// ===========================================================================
+// This file decribes the list lock and deadlock aware list lock.
+// ===========================================================================
+
+
+#include "common.h"
+#include "listlock.h"
+#include "listlock.inl"
+
+ListLockEntry::ListLockEntry(ListLock *pList, void *pData, const char *description)
+ : m_deadlock(description),
+ m_pList(pList),
+ m_pData(pData),
+ m_Crst(CrstListLock,
+ (CrstFlags)(CRST_REENTRANCY | (pList->IsHostBreakable()?CRST_HOST_BREAKABLE:0))),
+ m_pszDescription(description),
+ m_pNext(NULL),
+ m_dwRefCount(1),
+ m_hrResultCode(S_FALSE),
+ m_hInitException(NULL),
+ m_pLoaderAllocator(NULL)
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ ,
+ m_CorruptionSeverity(NotCorrupting)
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+ListLockEntry *ListLockEntry::Find(ListLock* pLock, LPVOID pPointer, const char *description)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pLock->HasLock());
+
+ ListLockEntry *pEntry = pLock->Find(pPointer);
+ if (pEntry==NULL)
+ {
+ pEntry = new ListLockEntry(pLock, pPointer, description);
+ pLock->AddElement(pEntry);
+ }
+ else
+ pEntry->AddRef();
+
+ return pEntry;
+};
+
+void ListLockEntry::AddRef()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(this));
+ }
+ CONTRACTL_END;
+
+ FastInterlockIncrement((LONG*)&m_dwRefCount);
+}
+
+void ListLockEntry::Release()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(this));
+ }
+ CONTRACTL_END;
+
+ ListLockHolder lock(m_pList);
+
+ if (FastInterlockDecrement((LONG*)&m_dwRefCount) == 0)
+ {
+ // Remove from list
+ m_pList->Unlink(this);
+ delete this;
+ }
+};
+
diff --git a/src/vm/listlock.h b/src/vm/listlock.h
new file mode 100644
index 0000000000..284d2dad4f
--- /dev/null
+++ b/src/vm/listlock.h
@@ -0,0 +1,358 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: ListLock.h
+//
+
+//
+// ===========================================================================
+// This file decribes the list lock and deadlock aware list lock.
+// ===========================================================================
+
+#ifndef LISTLOCK_H
+#define LISTLOCK_H
+
+#include "vars.hpp"
+#include "threads.h"
+#include "crst.h"
+
+class ListLock;
+// This structure is used for running class init methods or JITing methods
+// (m_pData points to a FunctionDesc). This class cannot have a destructor since it is used
+// in function that also have EX_TRY's and the VC compiler doesn't allow classes with destructors
+// to be allocated in a function that used SEH.
+// <TODO>@FUTURE Keep a pool of these (e.g. an array), so we don't have to allocate on the fly</TODO>
+// m_hInitException contains a handle to the exception thrown by the class init. This
+// allows us to throw this information to the caller on subsequent class init attempts.
+class ListLockEntry
+{
+ friend class ListLock;
+
+public:
+#ifdef _DEBUG
+ bool Check()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return m_dwRefCount != (DWORD)-1;
+ }
+#endif // DEBUG
+
+ DeadlockAwareLock m_deadlock;
+ ListLock * m_pList;
+ void * m_pData;
+ Crst m_Crst;
+ const char * m_pszDescription;
+ ListLockEntry * m_pNext;
+ DWORD m_dwRefCount;
+ HRESULT m_hrResultCode;
+ LOADERHANDLE m_hInitException;
+ PTR_LoaderAllocator m_pLoaderAllocator;
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // Field to maintain the corruption severity of the exception
+ CorruptionSeverity m_CorruptionSeverity;
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ ListLockEntry(ListLock *pList, void *pData, const char *description = NULL);
+
+ virtual ~ListLockEntry()
+ {
+ }
+
+ DEBUG_NOINLINE void Enter()
+ {
+ WRAPPER_NO_CONTRACT;
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+
+ m_deadlock.BeginEnterLock();
+ DeadlockAwareLock::BlockingLockHolder dlLock;
+ m_Crst.Enter();
+ m_deadlock.EndEnterLock();
+ }
+
+ BOOL CanDeadlockAwareEnter()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return m_deadlock.CanEnterLock();
+ }
+
+ DEBUG_NOINLINE BOOL DeadlockAwareEnter()
+ {
+ WRAPPER_NO_CONTRACT;
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+
+ if (!m_deadlock.TryBeginEnterLock())
+ return FALSE;
+
+ DeadlockAwareLock::BlockingLockHolder dlLock;
+ m_Crst.Enter();
+ m_deadlock.EndEnterLock();
+
+ return TRUE;
+ }
+
+ DEBUG_NOINLINE void Leave()
+ {
+ WRAPPER_NO_CONTRACT;
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+
+ m_deadlock.LeaveLock();
+ m_Crst.Leave();
+ }
+
+ static ListLockEntry *Find(ListLock* pLock, LPVOID pPointer, const char *description = NULL) DAC_EMPTY_RET(NULL);
+
+ void AddRef() DAC_EMPTY_ERR();
+ void Release() DAC_EMPTY_ERR();
+
+#ifdef _DEBUG
+ BOOL HasLock()
+ {
+ WRAPPER_NO_CONTRACT;
+ return(m_Crst.OwnedByCurrentThread());
+ }
+#endif
+
+ // LockHolder holds the lock of the element, not the element itself
+
+ DEBUG_NOINLINE static void LockHolderEnter(ListLockEntry *pThis) PUB
+ {
+ WRAPPER_NO_CONTRACT;
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+ pThis->Enter();
+ }
+
+ DEBUG_NOINLINE static void LockHolderLeave(ListLockEntry *pThis) PUB
+ {
+ WRAPPER_NO_CONTRACT;
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+ pThis->Leave();
+ }
+
+ DEBUG_NOINLINE void FinishDeadlockAwareEnter()
+ {
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+ DeadlockAwareLock::BlockingLockHolder dlLock;
+ m_Crst.Enter();
+ m_deadlock.EndEnterLock();
+ }
+
+ typedef Wrapper<ListLockEntry *, ListLockEntry::LockHolderEnter, ListLockEntry::LockHolderLeave> LockHolderBase;
+
+ class LockHolder : public LockHolderBase
+ {
+ public:
+
+ LockHolder()
+ : LockHolderBase(NULL, FALSE)
+ {
+ }
+
+ LockHolder(ListLockEntry *value, BOOL take = TRUE)
+ : LockHolderBase(value, take)
+ {
+ }
+
+ BOOL DeadlockAwareAcquire()
+ {
+ if (!m_acquired && m_value != NULL)
+ {
+ if (!m_value->m_deadlock.TryBeginEnterLock())
+ return FALSE;
+ m_value->FinishDeadlockAwareEnter();
+ m_acquired = TRUE;
+ }
+ return TRUE;
+ }
+ };
+};
+
+class ListLock
+{
+ protected:
+ CrstStatic m_Crst;
+ BOOL m_fInited;
+ BOOL m_fHostBreakable; // Lock can be broken by a host for deadlock detection
+ ListLockEntry * m_pHead;
+
+ public:
+
+ BOOL IsInitialized()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_fInited;
+ }
+ inline void PreInit()
+ {
+ LIMITED_METHOD_CONTRACT;
+ memset(this, 0, sizeof(*this));
+ }
+
+ // DO NOT MAKE A CONSTRUCTOR FOR THIS CLASS - There are global instances
+ void Init(CrstType crstType, CrstFlags flags, BOOL fHostBreakable = FALSE)
+ {
+ WRAPPER_NO_CONTRACT;
+ PreInit();
+ m_Crst.Init(crstType, flags);
+ m_fInited = TRUE;
+ m_fHostBreakable = fHostBreakable;
+ }
+
+ void Destroy()
+ {
+ WRAPPER_NO_CONTRACT;
+ // There should not be any of these around
+ _ASSERTE(m_pHead == NULL || dbg_fDrasticShutdown || g_fInControlC);
+
+ if (m_fInited)
+ {
+ m_fInited = FALSE;
+ m_Crst.Destroy();
+ }
+ }
+
+ BOOL IsHostBreakable () const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_fHostBreakable;
+ }
+
+ void AddElement(ListLockEntry* pElement)
+ {
+ WRAPPER_NO_CONTRACT;
+ pElement->m_pNext = m_pHead;
+ m_pHead = pElement;
+ }
+
+
+ DEBUG_NOINLINE void Enter()
+ {
+ CANNOT_HAVE_CONTRACT; // See below
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+#if 0 // The cleanup logic contract will cause any forbid GC state from the Crst to
+ // get deleted. This causes asserts from Leave. We probably should make the contract
+ // implementation tolerant of this pattern, or else ensure that the state the contract
+ // modifies is not used by any other code.
+ CONTRACTL
+ {
+ NOTHROW;
+ UNCHECKED(GC_TRIGGERS); // May trigger or not based on Crst's type
+ MODE_ANY;
+ PRECONDITION(CheckPointer(this));
+ }
+ CONTRACTL_END;
+#endif
+
+ m_Crst.Enter();
+ }
+
+ DEBUG_NOINLINE void Leave()
+ {
+ WRAPPER_NO_CONTRACT;
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+ m_Crst.Leave();
+ }
+
+ // Must own the lock before calling this or is ok if the debugger has
+ // all threads stopped
+ ListLockEntry *Find(void *pData);
+
+ // Must own the lock before calling this!
+ ListLockEntry* Pop(BOOL unloading = FALSE)
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifdef _DEBUG
+ if(unloading == FALSE)
+ _ASSERTE(m_Crst.OwnedByCurrentThread());
+#endif
+
+ if(m_pHead == NULL) return NULL;
+ ListLockEntry* pEntry = m_pHead;
+ m_pHead = m_pHead->m_pNext;
+ return pEntry;
+ }
+
+ // Must own the lock before calling this!
+ ListLockEntry* Peek()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_Crst.OwnedByCurrentThread());
+ return m_pHead;
+ }
+
+ // Must own the lock before calling this!
+ BOOL Unlink(ListLockEntry *pItem)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_Crst.OwnedByCurrentThread());
+ ListLockEntry *pSearch;
+ ListLockEntry *pPrev;
+
+ pPrev = NULL;
+
+ for (pSearch = m_pHead; pSearch != NULL; pSearch = pSearch->m_pNext)
+ {
+ if (pSearch == pItem)
+ {
+ if (pPrev == NULL)
+ m_pHead = pSearch->m_pNext;
+ else
+ pPrev->m_pNext = pSearch->m_pNext;
+
+ return TRUE;
+ }
+
+ pPrev = pSearch;
+ }
+
+ // Not found
+ return FALSE;
+ }
+
+#ifdef _DEBUG
+ BOOL HasLock()
+ {
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return(m_Crst.OwnedByCurrentThread());
+ }
+#endif
+
+ DEBUG_NOINLINE static void HolderEnter(ListLock *pThis)
+ {
+ WRAPPER_NO_CONTRACT;
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+ pThis->Enter();
+ }
+
+ DEBUG_NOINLINE static void HolderLeave(ListLock *pThis)
+ {
+ WRAPPER_NO_CONTRACT;
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+ pThis->Leave();
+ }
+
+ typedef Wrapper<ListLock*, ListLock::HolderEnter, ListLock::HolderLeave> LockHolder;
+};
+
+class WaitingThreadListElement
+{
+public:
+ Thread * m_pThread;
+ WaitingThreadListElement * m_pNext;
+};
+
+// Holds the lock of the ListLock
+typedef ListLock::LockHolder ListLockHolder;
+
+// Holds the ownership of the lock element
+typedef ReleaseHolder<ListLockEntry> ListLockEntryHolder;
+
+// Holds the lock of the lock element
+typedef ListLockEntry::LockHolder ListLockEntryLockHolder;
+
+
+#endif // LISTLOCK_H
diff --git a/src/vm/listlock.inl b/src/vm/listlock.inl
new file mode 100644
index 0000000000..cd40c020d8
--- /dev/null
+++ b/src/vm/listlock.inl
@@ -0,0 +1,52 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+//
+
+//
+// File: ListLock.inl
+//
+// ===========================================================================
+// This file decribes the list lock and deadlock aware list lock functions
+// that are inlined but can't go in the header.
+// ===========================================================================
+#ifndef LISTLOCK_INL
+#define LISTLOCK_INL
+
+#include "listlock.h"
+#include "dbginterface.h"
+// Must own the lock before calling this or is ok if the debugger has
+// all threads stopped
+
+inline ListLockEntry *ListLock::Find(void *pData)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(this));
+#ifdef DEBUGGING_SUPPORTED
+ PRECONDITION(m_Crst.OwnedByCurrentThread() ||
+ CORDebuggerAttached() && g_pDebugInterface->IsStopped());
+#else
+ PRECONDITION(m_Crst.OwnedByCurrentThread());
+#endif // DEBUGGING_SUPPORTED
+
+ }
+ CONTRACTL_END;
+
+ ListLockEntry *pSearch;
+
+ for (pSearch = m_pHead; pSearch != NULL; pSearch = pSearch->m_pNext)
+ {
+ if (pSearch->m_pData == pData)
+ return pSearch;
+ }
+
+ return NULL;
+}
+
+
+#endif // LISTLOCK_I
diff --git a/src/vm/loaderallocator.cpp b/src/vm/loaderallocator.cpp
new file mode 100644
index 0000000000..fad814292f
--- /dev/null
+++ b/src/vm/loaderallocator.cpp
@@ -0,0 +1,1668 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "common.h"
+#include "stringliteralmap.h"
+#include "virtualcallstub.h"
+
+//*****************************************************************************
+// Used by LoaderAllocator::Init for easier readability.
+#ifdef ENABLE_PERF_COUNTERS
+#define LOADERHEAP_PROFILE_COUNTER (&(GetPerfCounters().m_Loading.cbLoaderHeapSize))
+#else
+#define LOADERHEAP_PROFILE_COUNTER (NULL)
+#endif
+
+#ifndef CROSSGEN_COMPILE
+#define STUBMANAGER_RANGELIST(stubManager) (stubManager::g_pManager->GetRangeList())
+#else
+#define STUBMANAGER_RANGELIST(stubManager) (NULL)
+#endif
+
+UINT64 LoaderAllocator::cLoaderAllocatorsCreated = 1;
+
+LoaderAllocator::LoaderAllocator()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // initialize all members up front to NULL so that short-circuit failure won't cause invalid values
+ m_InitialReservedMemForLoaderHeaps = NULL;
+ m_pLowFrequencyHeap = NULL;
+ m_pHighFrequencyHeap = NULL;
+ m_pStubHeap = NULL;
+ m_pPrecodeHeap = NULL;
+ m_pExecutableHeap = NULL;
+#ifdef FEATURE_READYTORUN
+ m_pDynamicHelpersHeap = NULL;
+#endif
+ m_pFuncPtrStubs = NULL;
+ m_hLoaderAllocatorObjectHandle = NULL;
+ m_pStringLiteralMap = NULL;
+
+ m_cReferences = (UINT32)-1;
+
+ m_pDomainAssemblyToDelete = NULL;
+
+#ifdef FAT_DISPATCH_TOKENS
+ // DispatchTokenFat pointer table for token overflow scenarios. Lazily allocated.
+ m_pFatTokenSetLock = NULL;
+ m_pFatTokenSet = NULL;
+#endif
+
+ m_pVirtualCallStubManager = NULL;
+ m_fGCPressure = false;
+ m_fTerminated = false;
+ m_fUnloaded = false;
+ m_fMarked = false;
+ m_pLoaderAllocatorDestroyNext = NULL;
+ m_pDomain = NULL;
+ m_pCodeHeapInitialAlloc = NULL;
+ m_pVSDHeapInitialAlloc = NULL;
+ m_pLastUsedCodeHeap = NULL;
+ m_pLastUsedDynamicCodeHeap = NULL;
+ m_pJumpStubCache = NULL;
+
+ m_nLoaderAllocator = InterlockedIncrement64((LONGLONG *)&LoaderAllocator::cLoaderAllocatorsCreated);
+}
+
+LoaderAllocator::~LoaderAllocator()
+{
+ CONTRACTL
+ {
+ DESTRUCTOR_CHECK;
+ }
+ CONTRACTL_END;
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+ Terminate();
+
+ // Assert that VSD is not still active when the destructor is called.
+ _ASSERTE(m_pVirtualCallStubManager == NULL);
+
+ // Code manager is responsible for cleaning up.
+ _ASSERTE(m_pJumpStubCache == NULL);
+#endif
+}
+
+#ifndef DACCESS_COMPILE
+//---------------------------------------------------------------------------------------
+//
+void LoaderAllocator::AddReference()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE((m_cReferences > (UINT32)0) && (m_cReferences != (UINT32)-1));
+ FastInterlockIncrement((LONG *)&m_cReferences);
+}
+#endif //!DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+//
+// Adds reference if the native object is alive - code:LoaderAllocator#AssemblyPhases.
+// Returns TRUE if the reference was added.
+//
+BOOL LoaderAllocator::AddReferenceIfAlive()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+ for (;;)
+ {
+ // Local snaphost of ref-count
+ UINT32 cReferencesLocalSnapshot = m_cReferences;
+ _ASSERTE(cReferencesLocalSnapshot != (UINT32)-1);
+
+ if (cReferencesLocalSnapshot == 0)
+ { // Ref-count was 0, do not AddRef
+ return FALSE;
+ }
+
+ UINT32 cOriginalReferences = FastInterlockCompareExchange(
+ (LONG *)&m_cReferences,
+ cReferencesLocalSnapshot + 1,
+ cReferencesLocalSnapshot);
+
+ if (cOriginalReferences == cReferencesLocalSnapshot)
+ { // The exchange happened
+ return TRUE;
+ }
+ // Let's spin till we are the only thread to modify this value
+ }
+#else //DACCESS_COMPILE
+ // DAC won't AddRef
+ return IsAlive();
+#endif //DACCESS_COMPILE
+} // LoaderAllocator::AddReferenceIfAlive
+
+//---------------------------------------------------------------------------------------
+//
+BOOL LoaderAllocator::Release()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Only actually destroy the domain assembly when all references to it are gone.
+ // This should preserve behavior in the debugger such that an UnloadModule event
+ // will occur before the underlying data structure cease functioning.
+#ifndef DACCESS_COMPILE
+
+ _ASSERTE((m_cReferences > (UINT32)0) && (m_cReferences != (UINT32)-1));
+ LONG cNewReferences = FastInterlockDecrement((LONG *)&m_cReferences);
+ return (cNewReferences == 0);
+#else //DACCESS_COMPILE
+
+ return (m_cReferences == (UINT32)0);
+#endif //DACCESS_COMPILE
+} // LoaderAllocator::Release
+
+#ifndef DACCESS_COMPILE
+#ifndef CROSSGEN_COMPILE
+//---------------------------------------------------------------------------------------
+//
+BOOL LoaderAllocator::CheckAddReference_Unlocked(LoaderAllocator *pOtherLA)
+{
+ CONTRACTL
+ {
+ THROWS;
+ SO_INTOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // This must be checked before calling this function
+ _ASSERTE(pOtherLA != this);
+
+ // This function requires the that loader allocator lock have been taken.
+ _ASSERTE(GetDomain()->GetLoaderAllocatorReferencesLock()->OwnedByCurrentThread());
+
+ if (m_LoaderAllocatorReferences.Lookup(pOtherLA) == NULL)
+ {
+ GCX_COOP();
+ // Build a managed reference to keep the target object live
+ AllocateHandle(pOtherLA->GetExposedObject());
+
+ // Keep track of the references that have already been made
+ m_LoaderAllocatorReferences.Add(pOtherLA);
+
+ // Notify the other LoaderAllocator that a reference exists
+ pOtherLA->AddReference();
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+//---------------------------------------------------------------------------------------
+//
+BOOL LoaderAllocator::EnsureReference(LoaderAllocator *pOtherLA)
+{
+ CONTRACTL
+ {
+ THROWS;
+ SO_INTOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Check if this lock can be taken in all places that the function is called
+ _ASSERTE(GetDomain()->GetLoaderAllocatorReferencesLock()->Debug_CanTake());
+
+ if (!IsCollectible())
+ return FALSE;
+
+ if (this == pOtherLA)
+ return FALSE;
+
+ if (!pOtherLA->IsCollectible())
+ return FALSE;
+
+ CrstHolder ch(GetDomain()->GetLoaderAllocatorReferencesLock());
+ return CheckAddReference_Unlocked(pOtherLA);
+}
+
+BOOL LoaderAllocator::EnsureInstantiation(Module *pDefiningModule, Instantiation inst)
+{
+ CONTRACTL
+ {
+ THROWS;
+ SO_INTOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ BOOL fNewReferenceNeeded = FALSE;
+
+ // Check if this lock can be taken in all places that the function is called
+ _ASSERTE(GetDomain()->GetLoaderAllocatorReferencesLock()->Debug_CanTake());
+
+ if (!IsCollectible())
+ return FALSE;
+
+ CrstHolder ch(GetDomain()->GetLoaderAllocatorReferencesLock());
+
+ if (pDefiningModule != NULL)
+ {
+ LoaderAllocator *pDefiningLoaderAllocator = pDefiningModule->GetLoaderAllocator();
+ if (pDefiningLoaderAllocator->IsCollectible())
+ {
+ if (pDefiningLoaderAllocator != this)
+ {
+ fNewReferenceNeeded = CheckAddReference_Unlocked(pDefiningLoaderAllocator) || fNewReferenceNeeded;
+ }
+ }
+ }
+
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ TypeHandle arg = inst[i];
+ _ASSERTE(!arg.IsEncodedFixup());
+ LoaderAllocator *pOtherLA = arg.GetLoaderModule()->GetLoaderAllocator();
+
+ if (pOtherLA == this)
+ continue;
+
+ if (!pOtherLA->IsCollectible())
+ continue;
+
+ fNewReferenceNeeded = CheckAddReference_Unlocked(pOtherLA) || fNewReferenceNeeded;
+ }
+
+ return fNewReferenceNeeded;
+}
+#else // CROSSGEN_COMPILE
+BOOL LoaderAllocator::EnsureReference(LoaderAllocator *pOtherLA)
+{
+ return FALSE;
+}
+
+BOOL LoaderAllocator::EnsureInstantiation(Module *pDefiningModule, Instantiation inst)
+{
+ return FALSE;
+}
+#endif // CROSSGEN_COMPILE
+
+#ifndef CROSSGEN_COMPILE
+bool LoaderAllocator::Marked()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fMarked;
+}
+
+void LoaderAllocator::ClearMark()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_fMarked = false;
+}
+
+void LoaderAllocator::Mark()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!m_fMarked)
+ {
+ m_fMarked = true;
+
+ LoaderAllocatorSet::Iterator iter = m_LoaderAllocatorReferences.Begin();
+ while (iter != m_LoaderAllocatorReferences.End())
+ {
+ LoaderAllocator *pAllocator = *iter;
+ pAllocator->Mark();
+ iter++;
+ }
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Collect unreferenced assemblies, remove them from the assembly list and return their loader allocator
+// list.
+//
+//static
+LoaderAllocator * LoaderAllocator::GCLoaderAllocators_RemoveAssemblies(AppDomain * pAppDomain)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER; // Because we are holding assembly list lock code:BaseDomain#AssemblyListLock
+ MODE_PREEMPTIVE;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pAppDomain->GetLoaderAllocatorReferencesLock()->OwnedByCurrentThread());
+ _ASSERTE(pAppDomain->GetAssemblyListLock()->OwnedByCurrentThread());
+
+ // List of LoaderAllocators being deleted
+ LoaderAllocator * pFirstDestroyedLoaderAllocator = NULL;
+
+#if 0
+ // Debug logic for debugging the loader allocator gc.
+ {
+ /* Iterate through every loader allocator, and print its current state */
+ AppDomain::AssemblyIterator iData;
+ iData = pAppDomain->IterateAssembliesEx((AssemblyIterationFlags)(
+ kIncludeExecution | kIncludeLoaded | kIncludeCollected));
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+
+ while (iData.Next_Unlocked(pDomainAssembly.This()))
+ {
+ // The assembly could be collected (ref-count = 0), do not use holder which calls add-ref
+ Assembly * pAssembly = pDomainAssembly->GetLoadedAssembly();
+
+ if (pAssembly != NULL)
+ {
+ LoaderAllocator * pLoaderAllocator = pAssembly->GetLoaderAllocator();
+ if (pLoaderAllocator->IsCollectible())
+ {
+ printf("LA %p ReferencesTo %d\n", pLoaderAllocator, pLoaderAllocator->m_cReferences);
+ LoaderAllocatorSet::Iterator iter = pLoaderAllocator->m_LoaderAllocatorReferences.Begin();
+ while (iter != pLoaderAllocator->m_LoaderAllocatorReferences.End())
+ {
+ LoaderAllocator * pAllocator = *iter;
+ printf("LARefTo: %p\n", pAllocator);
+ iter++;
+ }
+ }
+ }
+ }
+ }
+#endif //0
+
+ AppDomain::AssemblyIterator i;
+ // Iterate through every loader allocator, marking as we go
+ {
+ i = pAppDomain->IterateAssembliesEx((AssemblyIterationFlags)(
+ kIncludeExecution | kIncludeLoaded | kIncludeCollected));
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+
+ while (i.Next_Unlocked(pDomainAssembly.This()))
+ {
+ // The assembly could be collected (ref-count = 0), do not use holder which calls add-ref
+ Assembly * pAssembly = pDomainAssembly->GetLoadedAssembly();
+
+ if (pAssembly != NULL)
+ {
+ LoaderAllocator * pLoaderAllocator = pAssembly->GetLoaderAllocator();
+ if (pLoaderAllocator->IsCollectible())
+ {
+ if (pLoaderAllocator->IsAlive())
+ pLoaderAllocator->Mark();
+ }
+ }
+ }
+ }
+
+ // Iterate through every loader allocator, unmarking marked loaderallocators, and
+ // build a free list of unmarked ones
+ {
+ i = pAppDomain->IterateAssembliesEx((AssemblyIterationFlags)(
+ kIncludeExecution | kIncludeLoaded | kIncludeCollected));
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+
+ while (i.Next_Unlocked(pDomainAssembly.This()))
+ {
+ // The assembly could be collected (ref-count = 0), do not use holder which calls add-ref
+ Assembly * pAssembly = pDomainAssembly->GetLoadedAssembly();
+
+ if (pAssembly != NULL)
+ {
+ LoaderAllocator * pLoaderAllocator = pAssembly->GetLoaderAllocator();
+ if (pLoaderAllocator->IsCollectible())
+ {
+ if (pLoaderAllocator->Marked())
+ {
+ pLoaderAllocator->ClearMark();
+ }
+ else if (!pLoaderAllocator->IsAlive())
+ {
+ pLoaderAllocator->m_pLoaderAllocatorDestroyNext = pFirstDestroyedLoaderAllocator;
+ // We will store a reference to this assembly, and use it later in this function
+ pFirstDestroyedLoaderAllocator = pLoaderAllocator;
+ _ASSERTE(pLoaderAllocator->m_pDomainAssemblyToDelete != NULL);
+ }
+ }
+ }
+ }
+ }
+
+ // Iterate through free list, removing from Assembly list
+ LoaderAllocator * pDomainLoaderAllocatorDestroyIterator = pFirstDestroyedLoaderAllocator;
+
+ while (pDomainLoaderAllocatorDestroyIterator != NULL)
+ {
+ _ASSERTE(!pDomainLoaderAllocatorDestroyIterator->IsAlive());
+ _ASSERTE(pDomainLoaderAllocatorDestroyIterator->m_pDomainAssemblyToDelete != NULL);
+
+ pAppDomain->RemoveAssembly_Unlocked(pDomainLoaderAllocatorDestroyIterator->m_pDomainAssemblyToDelete);
+
+ pDomainLoaderAllocatorDestroyIterator = pDomainLoaderAllocatorDestroyIterator->m_pLoaderAllocatorDestroyNext;
+ }
+
+ return pFirstDestroyedLoaderAllocator;
+} // LoaderAllocator::GCLoaderAllocators_RemoveAssemblies
+
+//---------------------------------------------------------------------------------------
+//
+// Collect unreferenced assemblies, delete all their remaining resources.
+//
+//static
+void LoaderAllocator::GCLoaderAllocators(AppDomain * pAppDomain)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ // List of LoaderAllocators being deleted
+ LoaderAllocator * pFirstDestroyedLoaderAllocator = NULL;
+
+ {
+ CrstHolder chLoaderAllocatorReferencesLock(pAppDomain->GetLoaderAllocatorReferencesLock());
+
+ // We will lock the assembly list, so no other thread can delete items from it while we are deleting
+ // them.
+ // Note: Because of the previously taken lock we could just lock during every enumeration, but this
+ // is more robust for the future.
+ // This lock switches thread to GC_NOTRIGGER (see code:BaseDomain#AssemblyListLock).
+ CrstHolder chAssemblyListLock(pAppDomain->GetAssemblyListLock());
+
+ pFirstDestroyedLoaderAllocator = GCLoaderAllocators_RemoveAssemblies(pAppDomain);
+ }
+ // Note: The removed LoaderAllocators are not reachable outside of this function anymore, because we
+ // removed them from the assembly list
+
+ // Iterate through free list, firing ETW events and notifying the debugger
+ LoaderAllocator * pDomainLoaderAllocatorDestroyIterator = pFirstDestroyedLoaderAllocator;
+ while (pDomainLoaderAllocatorDestroyIterator != NULL)
+ {
+ _ASSERTE(!pDomainLoaderAllocatorDestroyIterator->IsAlive());
+ // Fire ETW event
+ ETW::LoaderLog::CollectibleLoaderAllocatorUnload((AssemblyLoaderAllocator *)pDomainLoaderAllocatorDestroyIterator);
+
+ // Set the unloaded flag before notifying the debugger
+ pDomainLoaderAllocatorDestroyIterator->SetIsUnloaded();
+
+ DomainAssembly * pDomainAssembly = pDomainLoaderAllocatorDestroyIterator->m_pDomainAssemblyToDelete;
+ _ASSERTE(pDomainAssembly != NULL);
+ // Notify the debugger
+ pDomainAssembly->NotifyDebuggerUnload();
+
+ pDomainLoaderAllocatorDestroyIterator = pDomainLoaderAllocatorDestroyIterator->m_pLoaderAllocatorDestroyNext;
+ }
+
+ // Iterate through free list, deleting DomainAssemblies
+ pDomainLoaderAllocatorDestroyIterator = pFirstDestroyedLoaderAllocator;
+ while (pDomainLoaderAllocatorDestroyIterator != NULL)
+ {
+ _ASSERTE(!pDomainLoaderAllocatorDestroyIterator->IsAlive());
+ _ASSERTE(pDomainLoaderAllocatorDestroyIterator->m_pDomainAssemblyToDelete != NULL);
+
+ delete pDomainLoaderAllocatorDestroyIterator->m_pDomainAssemblyToDelete;
+ // We really don't have to set it to NULL as the assembly is not reachable anymore, but just in case ...
+ // (Also debugging NULL AVs if someone uses it accidentaly is so much easier)
+ pDomainLoaderAllocatorDestroyIterator->m_pDomainAssemblyToDelete = NULL;
+
+ pDomainLoaderAllocatorDestroyIterator = pDomainLoaderAllocatorDestroyIterator->m_pLoaderAllocatorDestroyNext;
+ }
+
+ // Deleting the DomainAssemblies will have created a list of LoaderAllocator's on the AppDomain
+ // Call this shutdown function to clean those up.
+ pAppDomain->ShutdownFreeLoaderAllocators(TRUE);
+} // LoaderAllocator::GCLoaderAllocators
+
+//---------------------------------------------------------------------------------------
+//
+//static
+BOOL QCALLTYPE LoaderAllocator::Destroy(QCall::LoaderAllocatorHandle pLoaderAllocator)
+{
+ QCALL_CONTRACT;
+
+ BOOL ret = FALSE;
+
+ BEGIN_QCALL;
+
+ if (ObjectHandleIsNull(pLoaderAllocator->GetLoaderAllocatorObjectHandle()))
+ {
+ STRESS_LOG1(LF_CLASSLOADER, LL_INFO100, "Begin LoaderAllocator::Destroy for loader allocator %p\n", reinterpret_cast<void *>(static_cast<PTR_LoaderAllocator>(pLoaderAllocator)));
+ LoaderAllocatorID *pID = pLoaderAllocator->Id();
+
+ // This will probably change for shared code unloading
+ _ASSERTE(pID->GetType() == LAT_Assembly);
+
+ Assembly *pAssembly = pID->GetDomainAssembly()->GetCurrentAssembly();
+
+ //if not fully loaded, it is still domain specific, so just get one from DomainAssembly
+ BaseDomain *pDomain = pAssembly ? pAssembly->Parent() : pID->GetDomainAssembly()->GetAppDomain();
+
+ pLoaderAllocator->CleanupStringLiteralMap();
+
+ // This will probably change for shared code unloading
+ _ASSERTE(pDomain->IsAppDomain());
+
+ AppDomain *pAppDomain = pDomain->AsAppDomain();
+
+ pLoaderAllocator->m_pDomainAssemblyToDelete = pAssembly->GetDomainAssembly(pAppDomain);
+
+ // Iterate through all references to other loader allocators and decrement their reference
+ // count
+ LoaderAllocatorSet::Iterator iter = pLoaderAllocator->m_LoaderAllocatorReferences.Begin();
+ while (iter != pLoaderAllocator->m_LoaderAllocatorReferences.End())
+ {
+ LoaderAllocator *pAllocator = *iter;
+ pAllocator->Release();
+ iter++;
+ }
+
+ // Release this loader allocator
+ BOOL fIsLastReferenceReleased = pLoaderAllocator->Release();
+
+ // If the reference count on this assembly got to 0, then a LoaderAllocator may
+ // be able to be collected, thus, perform a garbage collection.
+ // The reference count is setup such that in the case of non-trivial graphs, the reference count
+ // may hit zero early.
+ if (fIsLastReferenceReleased)
+ {
+ LoaderAllocator::GCLoaderAllocators(pAppDomain);
+ }
+ STRESS_LOG1(LF_CLASSLOADER, LL_INFO100, "End LoaderAllocator::Destroy for loader allocator %p\n", reinterpret_cast<void *>(static_cast<PTR_LoaderAllocator>(pLoaderAllocator)));
+
+ ret = TRUE;
+ }
+
+ END_QCALL;
+
+ return ret;
+} // LoaderAllocator::Destroy
+
+// Returns NULL if the managed LoaderAllocator object was already collected.
+LOADERHANDLE LoaderAllocator::AllocateHandle(OBJECTREF value)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ LOADERHANDLE retVal;
+
+ GCPROTECT_BEGIN(value);
+ CrstHolder ch(&m_crstLoaderAllocator);
+
+ retVal = AllocateHandle_Unlocked(value);
+ GCPROTECT_END();
+
+ return retVal;
+}
+
+#define MAX_LOADERALLOCATOR_HANDLE 0x40000000
+
+// Returns NULL if the managed LoaderAllocator object was already collected.
+LOADERHANDLE LoaderAllocator::AllocateHandle_Unlocked(OBJECTREF valueUNSAFE)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_crstLoaderAllocator.OwnedByCurrentThread());
+
+ UINT_PTR retVal;
+
+ struct _gc
+ {
+ OBJECTREF value;
+ LOADERALLOCATORREF loaderAllocator;
+ PTRARRAYREF handleTable;
+ PTRARRAYREF handleTableOld;
+ } gc;
+
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.value = valueUNSAFE;
+
+ {
+ // The handle table is read locklessly, be careful
+ if (IsCollectible())
+ {
+ gc.loaderAllocator = (LOADERALLOCATORREF)ObjectFromHandle(m_hLoaderAllocatorObjectHandle);
+ if (gc.loaderAllocator == NULL)
+ { // The managed LoaderAllocator is already collected, we cannot allocate any exposed managed objects for it
+ retVal = NULL;
+ }
+ else
+ {
+ DWORD slotsUsed = gc.loaderAllocator->GetSlotsUsed();
+
+ if (slotsUsed > MAX_LOADERALLOCATOR_HANDLE)
+ {
+ COMPlusThrowOM();
+ }
+ gc.handleTable = gc.loaderAllocator->GetHandleTable();
+
+ /* If we need to enlarge the table, do it now. */
+ if (slotsUsed >= gc.handleTable->GetNumComponents())
+ {
+ gc.handleTableOld = gc.handleTable;
+
+ DWORD newSize = gc.handleTable->GetNumComponents() * 2;
+ gc.handleTable = (PTRARRAYREF)AllocateObjectArray(newSize, g_pObjectClass);
+
+ /* Copy out of old array */
+ memmoveGCRefs(gc.handleTable->GetDataPtr(), gc.handleTableOld->GetDataPtr(), slotsUsed * sizeof(Object *));
+ gc.loaderAllocator->SetHandleTable(gc.handleTable);
+ }
+
+ gc.handleTable->SetAt(slotsUsed, gc.value);
+ gc.loaderAllocator->SetSlotsUsed(slotsUsed + 1);
+ retVal = (UINT_PTR)((slotsUsed + 1) << 1);
+ }
+ }
+ else
+ {
+ OBJECTREF* pRef = GetDomain()->AllocateObjRefPtrsInLargeTable(1);
+ SetObjectReference(pRef, gc.value, IsDomainNeutral() ? NULL : GetDomain()->AsAppDomain());
+ retVal = (((UINT_PTR)pRef) + 1);
+ }
+ }
+
+ GCPROTECT_END();
+
+ return (LOADERHANDLE)retVal;
+} // LoaderAllocator::AllocateHandle_Unlocked
+
+OBJECTREF LoaderAllocator::GetHandleValue(LOADERHANDLE handle)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF objRet = NULL;
+ GET_LOADERHANDLE_VALUE_FAST(this, handle, &objRet);
+ return objRet;
+}
+
+void LoaderAllocator::ClearHandle(LOADERHANDLE handle)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(handle != NULL);
+ }
+ CONTRACTL_END;
+
+ SetHandleValue(handle, NULL);
+}
+
+OBJECTREF LoaderAllocator::CompareExchangeValueInHandle(LOADERHANDLE handle, OBJECTREF valueUNSAFE, OBJECTREF compareUNSAFE)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(handle != NULL);
+ }
+ CONTRACTL_END;
+
+ OBJECTREF retVal;
+
+ struct _gc
+ {
+ OBJECTREF value;
+ OBJECTREF compare;
+ OBJECTREF previous;
+ } gc;
+
+ ZeroMemory(&gc, sizeof(gc));
+ GCPROTECT_BEGIN(gc);
+
+ gc.value = valueUNSAFE;
+ gc.compare = compareUNSAFE;
+
+ /* The handle table is read locklessly, be careful */
+ {
+ CrstHolder ch(&m_crstLoaderAllocator);
+
+ if ((((UINT_PTR)handle) & 1) != 0)
+ {
+ OBJECTREF *ptr = (OBJECTREF *)(((UINT_PTR)handle) - 1);
+ gc.previous = *ptr;
+ if ((*ptr) == gc.compare)
+ {
+ SetObjectReference(ptr, gc.value, IsDomainNeutral() ? NULL : GetDomain()->AsAppDomain());
+ }
+ }
+ else
+ {
+ _ASSERTE(!ObjectHandleIsNull(m_hLoaderAllocatorObjectHandle));
+
+ UINT_PTR index = (((UINT_PTR)handle) >> 1) - 1;
+ LOADERALLOCATORREF loaderAllocator = (LOADERALLOCATORREF)ObjectFromHandle(m_hLoaderAllocatorObjectHandle);
+ PTRARRAYREF handleTable = loaderAllocator->GetHandleTable();
+
+ gc.previous = handleTable->GetAt(index);
+ if (gc.previous == gc.compare)
+ {
+ handleTable->SetAt(index, gc.value);
+ }
+ }
+ } // End critical section
+
+ retVal = gc.previous;
+ GCPROTECT_END();
+
+ return retVal;
+}
+
+void LoaderAllocator::SetHandleValue(LOADERHANDLE handle, OBJECTREF value)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(handle != NULL);
+ }
+ CONTRACTL_END;
+
+ GCPROTECT_BEGIN(value);
+
+ // The handle table is read locklessly, be careful
+ {
+ CrstHolder ch(&m_crstLoaderAllocator);
+
+ // If the slot value does have the low bit set, then it is a simple pointer to the value
+ // Otherwise, we will need a more complicated operation to clear the value.
+ if ((((UINT_PTR)handle) & 1) != 0)
+ {
+ OBJECTREF *ptr = (OBJECTREF *)(((UINT_PTR)handle) - 1);
+ SetObjectReference(ptr, value, IsDomainNeutral() ? NULL : GetDomain()->AsAppDomain());
+ }
+ else
+ {
+ _ASSERTE(!ObjectHandleIsNull(m_hLoaderAllocatorObjectHandle));
+
+ UINT_PTR index = (((UINT_PTR)handle) >> 1) - 1;
+ LOADERALLOCATORREF loaderAllocator = (LOADERALLOCATORREF)ObjectFromHandle(m_hLoaderAllocatorObjectHandle);
+ PTRARRAYREF handleTable = loaderAllocator->GetHandleTable();
+ handleTable->SetAt(index, value);
+ }
+ }
+
+ GCPROTECT_END();
+
+ return;
+}
+
+void LoaderAllocator::SetupManagedTracking(LOADERALLOCATORREF * pKeepLoaderAllocatorAlive)
+{
+ STANDARD_VM_CONTRACT;
+
+ GCInterface::AddMemoryPressure(30000);
+ m_fGCPressure = true;
+
+ GCX_COOP();
+
+ //
+ // Initialize managed loader allocator reference holder
+ //
+
+ MethodTable *pMT = MscorlibBinder::GetClass(CLASS__LOADERALLOCATOR);
+
+ *pKeepLoaderAllocatorAlive = (LOADERALLOCATORREF)AllocateObject(pMT);
+
+ MethodDescCallSite initLoaderAllocator(METHOD__LOADERALLOCATOR__CTOR, (OBJECTREF *)pKeepLoaderAllocatorAlive);
+
+ ARG_SLOT args[] = {
+ ObjToArgSlot(*pKeepLoaderAllocatorAlive)
+ };
+
+ initLoaderAllocator.Call(args);
+
+ m_hLoaderAllocatorObjectHandle = GetDomain()->CreateLongWeakHandle(*pKeepLoaderAllocatorAlive);
+
+ RegisterHandleForCleanup(m_hLoaderAllocatorObjectHandle);
+}
+
+void LoaderAllocator::ActivateManagedTracking()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ FORBID_FAULT;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ GCX_COOP();
+
+ // There is now one external reference to this LoaderAllocator (the managed scout)
+ _ASSERTE(m_cReferences == (UINT32)-1);
+ m_cReferences = (UINT32)1;
+
+ LOADERALLOCATORREF loaderAllocator = (LOADERALLOCATORREF)ObjectFromHandle(m_hLoaderAllocatorObjectHandle);
+ loaderAllocator->SetNativeLoaderAllocator(this);
+}
+#endif // CROSSGEN_COMPILE
+
+
+// We don't actually allocate a low frequency heap for collectible types
+#define COLLECTIBLE_LOW_FREQUENCY_HEAP_SIZE (0 * PAGE_SIZE)
+#define COLLECTIBLE_HIGH_FREQUENCY_HEAP_SIZE (3 * PAGE_SIZE)
+#define COLLECTIBLE_STUB_HEAP_SIZE PAGE_SIZE
+#define COLLECTIBLE_CODEHEAP_SIZE (7 * PAGE_SIZE)
+#define COLLECTIBLE_VIRTUALSTUBDISPATCH_HEAP_SPACE (5 * PAGE_SIZE)
+
+void LoaderAllocator::Init(BaseDomain *pDomain, BYTE *pExecutableHeapMemory)
+{
+ STANDARD_VM_CONTRACT;
+
+ m_pDomain = pDomain;
+
+ m_crstLoaderAllocator.Init(CrstLoaderAllocator);
+
+ //
+ // Initialize the heaps
+ //
+
+ DWORD dwLowFrequencyHeapReserveSize;
+ DWORD dwHighFrequencyHeapReserveSize;
+ DWORD dwStubHeapReserveSize;
+ DWORD dwExecutableHeapReserveSize;
+ DWORD dwCodeHeapReserveSize;
+ DWORD dwVSDHeapReserveSize;
+
+ dwExecutableHeapReserveSize = 0;
+
+ if (IsCollectible())
+ {
+ dwLowFrequencyHeapReserveSize = COLLECTIBLE_LOW_FREQUENCY_HEAP_SIZE;
+ dwHighFrequencyHeapReserveSize = COLLECTIBLE_HIGH_FREQUENCY_HEAP_SIZE;
+ dwStubHeapReserveSize = COLLECTIBLE_STUB_HEAP_SIZE;
+ dwCodeHeapReserveSize = COLLECTIBLE_CODEHEAP_SIZE;
+ dwVSDHeapReserveSize = COLLECTIBLE_VIRTUALSTUBDISPATCH_HEAP_SPACE;
+ }
+ else
+ {
+ dwLowFrequencyHeapReserveSize = LOW_FREQUENCY_HEAP_RESERVE_SIZE;
+ dwHighFrequencyHeapReserveSize = HIGH_FREQUENCY_HEAP_RESERVE_SIZE;
+ dwStubHeapReserveSize = STUB_HEAP_RESERVE_SIZE;
+
+ // Non-collectible assemblies do not reserve space for these heaps.
+ dwCodeHeapReserveSize = 0;
+ dwVSDHeapReserveSize = 0;
+ }
+
+ // The global heap needs a bit of space for executable memory that is not associated with a rangelist.
+ // Take a page from the high-frequency heap for this.
+ if (pExecutableHeapMemory != NULL)
+ {
+#ifdef FEATURE_WINDOWSPHONE
+ // code:UMEntryThunk::CreateUMEntryThunk allocates memory on executable loader heap for phone.
+ // Reserve enough for a typical phone app to fit.
+ dwExecutableHeapReserveSize = 3 * PAGE_SIZE;
+#else
+ dwExecutableHeapReserveSize = PAGE_SIZE;
+#endif
+
+ _ASSERTE(dwExecutableHeapReserveSize < dwHighFrequencyHeapReserveSize);
+ dwHighFrequencyHeapReserveSize -= dwExecutableHeapReserveSize;
+ }
+
+ DWORD dwTotalReserveMemSize = dwLowFrequencyHeapReserveSize
+ + dwHighFrequencyHeapReserveSize
+ + dwStubHeapReserveSize
+ + dwCodeHeapReserveSize
+ + dwVSDHeapReserveSize
+ + dwExecutableHeapReserveSize;
+
+ dwTotalReserveMemSize = (DWORD) ALIGN_UP(dwTotalReserveMemSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY);
+
+#if !defined(_WIN64)
+ // Make sure that we reserve as little as possible on 32-bit to save address space
+ _ASSERTE(dwTotalReserveMemSize <= VIRTUAL_ALLOC_RESERVE_GRANULARITY);
+#endif
+
+ BYTE * initReservedMem = ClrVirtualAllocExecutable(dwTotalReserveMemSize, MEM_RESERVE, PAGE_NOACCESS);
+
+ m_InitialReservedMemForLoaderHeaps = initReservedMem;
+
+ if (initReservedMem == NULL)
+ COMPlusThrowOM();
+
+ if (IsCollectible())
+ {
+ m_pCodeHeapInitialAlloc = initReservedMem;
+ initReservedMem += dwCodeHeapReserveSize;
+ m_pVSDHeapInitialAlloc = initReservedMem;
+ initReservedMem += dwVSDHeapReserveSize;
+ }
+ else
+ {
+ _ASSERTE((dwCodeHeapReserveSize == 0) && (m_pCodeHeapInitialAlloc == NULL));
+ _ASSERTE((dwVSDHeapReserveSize == 0) && (m_pVSDHeapInitialAlloc == NULL));
+ }
+
+ if (dwLowFrequencyHeapReserveSize != 0)
+ {
+ _ASSERTE(!IsCollectible());
+
+ m_pLowFrequencyHeap = new (&m_LowFreqHeapInstance) LoaderHeap(LOW_FREQUENCY_HEAP_RESERVE_SIZE,
+ LOW_FREQUENCY_HEAP_COMMIT_SIZE,
+ initReservedMem,
+ dwLowFrequencyHeapReserveSize,
+ LOADERHEAP_PROFILE_COUNTER);
+ initReservedMem += dwLowFrequencyHeapReserveSize;
+ }
+
+ if (dwExecutableHeapReserveSize != 0)
+ {
+ _ASSERTE(!IsCollectible());
+
+ m_pExecutableHeap = new (pExecutableHeapMemory) LoaderHeap(STUB_HEAP_RESERVE_SIZE,
+ STUB_HEAP_COMMIT_SIZE,
+ initReservedMem,
+ dwExecutableHeapReserveSize,
+ LOADERHEAP_PROFILE_COUNTER,
+ NULL,
+ TRUE /* Make heap executable */);
+ initReservedMem += dwExecutableHeapReserveSize;
+ }
+
+ m_pHighFrequencyHeap = new (&m_HighFreqHeapInstance) LoaderHeap(HIGH_FREQUENCY_HEAP_RESERVE_SIZE,
+ HIGH_FREQUENCY_HEAP_COMMIT_SIZE,
+ initReservedMem,
+ dwHighFrequencyHeapReserveSize,
+ LOADERHEAP_PROFILE_COUNTER);
+ initReservedMem += dwHighFrequencyHeapReserveSize;
+
+ if (IsCollectible())
+ m_pLowFrequencyHeap = m_pHighFrequencyHeap;
+
+#if defined(_DEBUG) && defined(STUBLINKER_GENERATES_UNWIND_INFO)
+ m_pHighFrequencyHeap->m_fPermitStubsWithUnwindInfo = TRUE;
+#endif
+
+ m_pStubHeap = new (&m_StubHeapInstance) LoaderHeap(STUB_HEAP_RESERVE_SIZE,
+ STUB_HEAP_COMMIT_SIZE,
+ initReservedMem,
+ dwStubHeapReserveSize,
+ LOADERHEAP_PROFILE_COUNTER,
+ STUBMANAGER_RANGELIST(StubLinkStubManager),
+ TRUE /* Make heap executable */);
+
+ initReservedMem += dwStubHeapReserveSize;
+
+#if defined(_DEBUG) && defined(STUBLINKER_GENERATES_UNWIND_INFO)
+ m_pStubHeap->m_fPermitStubsWithUnwindInfo = TRUE;
+#endif
+
+#ifdef CROSSGEN_COMPILE
+ m_pPrecodeHeap = new (&m_PrecodeHeapInstance) LoaderHeap(PAGE_SIZE, PAGE_SIZE);
+#else
+ m_pPrecodeHeap = new (&m_PrecodeHeapInstance) CodeFragmentHeap(this, STUB_CODE_BLOCK_PRECODE);
+#endif
+}
+
+
+#ifndef CROSSGEN_COMPILE
+
+#ifdef FEATURE_READYTORUN
+PTR_CodeFragmentHeap LoaderAllocator::GetDynamicHelpersHeap()
+{
+ CONTRACTL {
+ THROWS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ if (m_pDynamicHelpersHeap == NULL)
+ {
+ CodeFragmentHeap * pDynamicHelpersHeap = new CodeFragmentHeap(this, STUB_CODE_BLOCK_DYNAMICHELPER);
+ if (InterlockedCompareExchangeT(&m_pDynamicHelpersHeap, pDynamicHelpersHeap, NULL) != NULL)
+ delete pDynamicHelpersHeap;
+ }
+ return m_pDynamicHelpersHeap;
+}
+#endif
+
+FuncPtrStubs * LoaderAllocator::GetFuncPtrStubs()
+{
+ CONTRACTL {
+ THROWS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ if (m_pFuncPtrStubs == NULL)
+ {
+ FuncPtrStubs * pFuncPtrStubs = new FuncPtrStubs();
+ if (InterlockedCompareExchangeT(&m_pFuncPtrStubs, pFuncPtrStubs, NULL) != NULL)
+ delete pFuncPtrStubs;
+ }
+ return m_pFuncPtrStubs;
+}
+
+BYTE *LoaderAllocator::GetVSDHeapInitialBlock(DWORD *pSize)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ *pSize = 0;
+ BYTE *buffer = InterlockedCompareExchangeT(&m_pVSDHeapInitialAlloc, NULL, m_pVSDHeapInitialAlloc);
+ if (buffer != NULL)
+ {
+ *pSize = COLLECTIBLE_VIRTUALSTUBDISPATCH_HEAP_SPACE;
+ }
+ return buffer;
+}
+
+BYTE *LoaderAllocator::GetCodeHeapInitialBlock(const BYTE * loAddr, const BYTE * hiAddr, DWORD minimumSize, DWORD *pSize)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ *pSize = 0;
+ // Check to see if the size is small enough that this might work
+ if (minimumSize > COLLECTIBLE_CODEHEAP_SIZE)
+ return NULL;
+
+ // Check to see if initial alloc would be in the proper region
+ if (loAddr != NULL || hiAddr != NULL)
+ {
+ if (m_pCodeHeapInitialAlloc < loAddr)
+ return NULL;
+ if ((m_pCodeHeapInitialAlloc + COLLECTIBLE_CODEHEAP_SIZE) > hiAddr)
+ return NULL;
+ }
+
+ BYTE * buffer = InterlockedCompareExchangeT(&m_pCodeHeapInitialAlloc, NULL, m_pCodeHeapInitialAlloc);
+ if (buffer != NULL)
+ {
+ *pSize = COLLECTIBLE_CODEHEAP_SIZE;
+ }
+ return buffer;
+}
+
+// in retail should be called from AppDomain::Terminate
+void LoaderAllocator::Terminate()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ } CONTRACTL_END;
+
+ if (m_fTerminated)
+ return;
+
+ m_fTerminated = true;
+
+ LOG((LF_CLASSLOADER, LL_INFO100, "Begin LoaderAllocator::Terminate for loader allocator %p\n", reinterpret_cast<void *>(static_cast<PTR_LoaderAllocator>(this))));
+
+ if (m_fGCPressure)
+ {
+ GCX_PREEMP();
+ GCInterface::RemoveMemoryPressure(30000);
+ m_fGCPressure = false;
+ }
+
+ m_crstLoaderAllocator.Destroy();
+ m_LoaderAllocatorReferences.RemoveAll();
+
+ // In collectible types we merge the low frequency and high frequency heaps
+ // So don't destroy them twice.
+ if ((m_pLowFrequencyHeap != NULL) && (m_pLowFrequencyHeap != m_pHighFrequencyHeap))
+ {
+ m_pLowFrequencyHeap->~LoaderHeap();
+ m_pLowFrequencyHeap = NULL;
+ }
+
+ if (m_pHighFrequencyHeap != NULL)
+ {
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ UnregisterUnwindInfoInLoaderHeap(m_pHighFrequencyHeap);
+#endif
+
+ m_pHighFrequencyHeap->~LoaderHeap();
+ m_pHighFrequencyHeap = NULL;
+ }
+
+ if (m_pStubHeap != NULL)
+ {
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ UnregisterUnwindInfoInLoaderHeap(m_pStubHeap);
+#endif
+
+ m_pStubHeap->~LoaderHeap();
+ m_pStubHeap = NULL;
+ }
+
+ if (m_pPrecodeHeap != NULL)
+ {
+ m_pPrecodeHeap->~CodeFragmentHeap();
+ m_pPrecodeHeap = NULL;
+ }
+
+#ifdef FEATURE_READYTORUN
+ if (m_pDynamicHelpersHeap != NULL)
+ {
+ delete m_pDynamicHelpersHeap;
+ m_pDynamicHelpersHeap = NULL;
+ }
+#endif
+
+ if (m_pFuncPtrStubs != NULL)
+ {
+ delete m_pFuncPtrStubs;
+ m_pFuncPtrStubs = NULL;
+ }
+
+ // This was the block reserved by BaseDomain::Init for the loaderheaps.
+ if (m_InitialReservedMemForLoaderHeaps)
+ {
+ ClrVirtualFree (m_InitialReservedMemForLoaderHeaps, 0, MEM_RELEASE);
+ m_InitialReservedMemForLoaderHeaps=NULL;
+ }
+
+#ifdef FAT_DISPATCH_TOKENS
+ if (m_pFatTokenSetLock != NULL)
+ {
+ delete m_pFatTokenSetLock;
+ m_pFatTokenSetLock = NULL;
+ }
+
+ if (m_pFatTokenSet != NULL)
+ {
+ delete m_pFatTokenSet;
+ m_pFatTokenSet = NULL;
+ }
+#endif // FAT_DISPATCH_TOKENS
+
+ LOG((LF_CLASSLOADER, LL_INFO100, "End LoaderAllocator::Terminate for loader allocator %p\n", reinterpret_cast<void *>(static_cast<PTR_LoaderAllocator>(this))));
+}
+
+#endif // CROSSGEN_COMPILE
+
+
+#else //DACCESS_COMPILE
+void LoaderAllocator::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ DAC_ENUM_DTHIS();
+ if (m_pLowFrequencyHeap.IsValid())
+ {
+ m_pLowFrequencyHeap->EnumMemoryRegions(flags);
+ }
+ if (m_pHighFrequencyHeap.IsValid())
+ {
+ m_pHighFrequencyHeap->EnumMemoryRegions(flags);
+ }
+ if (m_pStubHeap.IsValid())
+ {
+ m_pStubHeap->EnumMemoryRegions(flags);
+ }
+ if (m_pPrecodeHeap.IsValid())
+ {
+ m_pPrecodeHeap->EnumMemoryRegions(flags);
+ }
+ if (m_pPrecodeHeap.IsValid())
+ {
+ m_pPrecodeHeap->EnumMemoryRegions(flags);
+ }
+}
+#endif //DACCESS_COMPILE
+
+SIZE_T LoaderAllocator::EstimateSize()
+{
+ WRAPPER_NO_CONTRACT;
+ SIZE_T retval=0;
+ if(m_pHighFrequencyHeap)
+ retval+=m_pHighFrequencyHeap->GetSize();
+ if(m_pLowFrequencyHeap)
+ retval+=m_pLowFrequencyHeap->GetSize();
+ if(m_pStubHeap)
+ retval+=m_pStubHeap->GetSize();
+ if(m_pStringLiteralMap)
+ retval+=m_pStringLiteralMap->GetSize();
+ if(m_pVirtualCallStubManager)
+ retval+=m_pVirtualCallStubManager->GetSize();
+
+ return retval;
+}
+
+#ifndef DACCESS_COMPILE
+
+#ifndef CROSSGEN_COMPILE
+
+DispatchToken LoaderAllocator::GetDispatchToken(
+ UINT32 typeId, UINT32 slotNumber)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+#ifdef FAT_DISPATCH_TOKENS
+
+ if (DispatchToken::RequiresDispatchTokenFat(typeId, slotNumber))
+ {
+ //
+ // Lock and set are lazily created.
+ //
+ if (m_pFatTokenSetLock == NULL)
+ {
+ NewHolder<SimpleRWLock> pFatTokenSetLock = new SimpleRWLock(COOPERATIVE_OR_PREEMPTIVE, LOCK_TYPE_DEFAULT);
+ SimpleWriteLockHolder lock(pFatTokenSetLock);
+ NewHolder<FatTokenSet> pFatTokenSet = new FatTokenSet;
+
+ if (FastInterlockCompareExchangePointer(
+ &m_pFatTokenSetLock, pFatTokenSetLock.GetValue(), NULL) != NULL)
+ { // Someone beat us to it
+ lock.Release();
+ // NewHolder will delete lock.
+ }
+ else
+ { // Make sure second allocation succeeds before suppressing holder of first.
+ pFatTokenSetLock.SuppressRelease();
+ m_pFatTokenSet = pFatTokenSet;
+ pFatTokenSet.SuppressRelease();
+ }
+ }
+
+ //
+ // Take read lock, see if the requisite token has already been created and if so use it.
+ // Otherwise, take write lock and create new token and add to the set.
+ //
+
+ // Lookup
+ SimpleReadLockHolder rlock(m_pFatTokenSetLock);
+ DispatchTokenFat key(typeId, slotNumber);
+ DispatchTokenFat *pFat = m_pFatTokenSet->Lookup(&key);
+ if (pFat != NULL)
+ { // <typeId,slotNumber> is already in the set.
+ return DispatchToken(pFat);
+ }
+ else
+ { // Create
+ rlock.Release();
+ SimpleWriteLockHolder wlock(m_pFatTokenSetLock);
+
+ // Check to see if someone beat us to the punch between
+ // releasing the read lock and taking the write lock.
+ pFat = m_pFatTokenSet->Lookup(&key);
+
+ if (pFat == NULL)
+ { // No one beat us; allocate and insert a new DispatchTokenFat instance.
+ pFat = new ((LPVOID)GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(DispatchTokenFat))))
+ DispatchTokenFat(typeId, slotNumber);
+
+ m_pFatTokenSet->Add(pFat);
+ }
+
+ return DispatchToken(pFat);
+ }
+ }
+#endif // FAT_DISPATCH_TOKENS
+
+ return DispatchToken::CreateDispatchToken(typeId, slotNumber);
+}
+
+DispatchToken LoaderAllocator::TryLookupDispatchToken(UINT32 typeId, UINT32 slotNumber)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+#ifdef FAT_DISPATCH_TOKENS
+
+ if (DispatchToken::RequiresDispatchTokenFat(typeId, slotNumber))
+ {
+ if (m_pFatTokenSetLock != NULL)
+ {
+ DispatchTokenFat * pFat = NULL;
+ // Stack probes and locking operations are throwing. Catch all
+ // exceptions and just return an invalid token, since this is
+ EX_TRY
+ {
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+ SimpleReadLockHolder rlock(m_pFatTokenSetLock);
+ if (m_pFatTokenSet != NULL)
+ {
+ DispatchTokenFat key(typeId, slotNumber);
+ pFat = m_pFatTokenSet->Lookup(&key);
+ }
+ END_SO_INTOLERANT_CODE;
+ }
+ EX_CATCH
+ {
+ pFat = NULL;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (pFat != NULL)
+ {
+ return DispatchToken(pFat);
+ }
+ }
+ // Return invalid token when not found.
+ return DispatchToken();
+ }
+ else
+#endif // FAT_DISPATCH_TOKENS
+ {
+ return DispatchToken::CreateDispatchToken(typeId, slotNumber);
+ }
+}
+
+void LoaderAllocator::InitVirtualCallStubManager(BaseDomain * pDomain, BOOL fCollectible /* = FALSE */)
+{
+ STANDARD_VM_CONTRACT;
+
+ NewHolder<VirtualCallStubManager> pMgr(new VirtualCallStubManager());
+
+ // Init the manager, including all heaps and such.
+ pMgr->Init(pDomain, this);
+
+ m_pVirtualCallStubManager = pMgr;
+
+ // Successfully created the manager.
+ pMgr.SuppressRelease();
+}
+
+void LoaderAllocator::UninitVirtualCallStubManager()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (m_pVirtualCallStubManager != NULL)
+ {
+ m_pVirtualCallStubManager->Uninit();
+ delete m_pVirtualCallStubManager;
+ m_pVirtualCallStubManager = NULL;
+ }
+}
+#endif // CROSSGEN_COMPILE
+
+#endif // DACCESS_COMPILE
+
+BOOL GlobalLoaderAllocator::CanUnload()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return FALSE;
+}
+
+BOOL AppDomainLoaderAllocator::CanUnload()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ return m_Id.GetAppDomain()->CanUnload();
+}
+
+#ifndef CROSSGEN_COMPILE
+BOOL AssemblyLoaderAllocator::CanUnload()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return TRUE;
+}
+#endif // CROSSGEN_COMPILE
+
+BOOL LoaderAllocator::IsDomainNeutral()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ return GetDomain()->IsSharedDomain();
+}
+
+#ifndef DACCESS_COMPILE
+
+#ifndef CROSSGEN_COMPILE
+STRINGREF *LoaderAllocator::GetStringObjRefPtrFromUnicodeString(EEStringData *pStringData)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pStringData));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+ if (m_pStringLiteralMap == NULL)
+ {
+ LazyInitStringLiteralMap();
+ }
+ _ASSERTE(m_pStringLiteralMap);
+ return m_pStringLiteralMap->GetStringLiteral(pStringData, TRUE, !CanUnload());
+}
+
+//*****************************************************************************
+void LoaderAllocator::LazyInitStringLiteralMap()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ NewHolder<StringLiteralMap> pStringLiteralMap(new StringLiteralMap());
+
+ pStringLiteralMap->Init();
+
+ if (InterlockedCompareExchangeT<StringLiteralMap *>(&m_pStringLiteralMap, pStringLiteralMap, NULL) == NULL)
+ {
+ pStringLiteralMap.SuppressRelease();
+ }
+}
+
+void LoaderAllocator::CleanupStringLiteralMap()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_pStringLiteralMap)
+ {
+ delete m_pStringLiteralMap;
+ m_pStringLiteralMap = NULL;
+ }
+}
+
+STRINGREF *LoaderAllocator::IsStringInterned(STRINGREF *pString)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pString));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+ if (m_pStringLiteralMap == NULL)
+ {
+ LazyInitStringLiteralMap();
+ }
+ _ASSERTE(m_pStringLiteralMap);
+ return m_pStringLiteralMap->GetInternedString(pString, FALSE, !CanUnload());
+}
+
+STRINGREF *LoaderAllocator::GetOrInternString(STRINGREF *pString)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pString));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+ if (m_pStringLiteralMap == NULL)
+ {
+ LazyInitStringLiteralMap();
+ }
+ _ASSERTE(m_pStringLiteralMap);
+ return m_pStringLiteralMap->GetInternedString(pString, TRUE, !CanUnload());
+}
+
+void AssemblyLoaderAllocator::RegisterHandleForCleanup(OBJECTHANDLE objHandle)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ PRECONDITION(CheckPointer(objHandle));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ void * pItem = GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(HandleCleanupListItem)));
+
+ // InsertTail must be protected by a lock. Just use the loader allocator lock
+ CrstHolder ch(&m_crstLoaderAllocator);
+ m_handleCleanupList.InsertTail(new (pItem) HandleCleanupListItem(objHandle));
+}
+
+void AssemblyLoaderAllocator::CleanupHandles()
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ NOTHROW;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(GetDomain()->IsAppDomain());
+ _ASSERTE(!GetDomain()->AsAppDomain()->NoAccessToHandleTable());
+
+ // This method doesn't take a lock around RemoveHead because it's supposed to
+ // be called only from Terminate
+ while (!m_handleCleanupList.IsEmpty())
+ {
+ HandleCleanupListItem * pItem = m_handleCleanupList.RemoveHead();
+ DestroyTypedHandle(pItem->m_handle);
+ }
+}
+
+void LoaderAllocator::RegisterFailedTypeInitForCleanup(ListLockEntry *pListLockEntry)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ PRECONDITION(CheckPointer(pListLockEntry));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (!IsCollectible())
+ {
+ return;
+ }
+
+ void * pItem = GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(FailedTypeInitCleanupListItem)));
+
+ // InsertTail must be protected by a lock. Just use the loader allocator lock
+ CrstHolder ch(&m_crstLoaderAllocator);
+ m_failedTypeInitCleanupList.InsertTail(new (pItem) FailedTypeInitCleanupListItem(pListLockEntry));
+}
+
+void LoaderAllocator::CleanupFailedTypeInit()
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ if (!IsCollectible())
+ {
+ return;
+ }
+
+ _ASSERTE(GetDomain()->IsAppDomain());
+
+ // This method doesn't take a lock around loader allocator state access, because
+ // it's supposed to be called only during cleanup. However, the domain-level state
+ // might be accessed by multiple threads.
+ ListLock *pLock = GetDomain()->GetClassInitLock();
+
+ while (!m_failedTypeInitCleanupList.IsEmpty())
+ {
+ FailedTypeInitCleanupListItem * pItem = m_failedTypeInitCleanupList.RemoveHead();
+
+ ListLockHolder pInitLock(pLock);
+ pLock->Unlink(pItem->m_pListLockEntry);
+ }
+}
+#endif // CROSSGEN_COMPILE
+
+#endif //!DACCES_COMPILE
diff --git a/src/vm/loaderallocator.hpp b/src/vm/loaderallocator.hpp
new file mode 100644
index 0000000000..046ea9fb32
--- /dev/null
+++ b/src/vm/loaderallocator.hpp
@@ -0,0 +1,520 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: LoaderAllocator.hpp
+**
+
+**
+** Purpose: Implements collection of loader heaps
+**
+**
+===========================================================*/
+
+#ifndef __LoaderAllocator_h__
+#define __LoaderAllocator_h__
+
+class FuncPtrStubs;
+#include "qcall.h"
+
+#define VPTRU_LoaderAllocator 0x3200
+
+enum LoaderAllocatorType
+{
+ LAT_Invalid,
+ LAT_Global,
+ LAT_AppDomain,
+ LAT_Assembly
+};
+
+class LoaderAllocatorID
+{
+
+protected:
+ LoaderAllocatorType m_type;
+ union
+ {
+ AppDomain* m_pAppDomain;
+ DomainAssembly* m_pDomainAssembly;
+ void* m_pValue;
+ };
+
+ VOID * GetValue();
+
+public:
+ LoaderAllocatorID(LoaderAllocatorType laType=LAT_Invalid, VOID* value = 0)
+ {
+ m_type = laType;
+ m_pValue = value;
+ };
+ VOID Init();
+ VOID Init(AppDomain* pAppDomain);
+ LoaderAllocatorType GetType();
+ VOID SetDomainAssembly(DomainAssembly* pDomainAssembly);
+ DomainAssembly* GetDomainAssembly();
+ AppDomain* GetAppDomain();
+ BOOL Equals(LoaderAllocatorID* pId);
+ COUNT_T Hash();
+ BOOL IsCollectible();
+};
+
+class StringLiteralMap;
+class VirtualCallStubManager;
+class ListLockEntry;
+
+class LoaderAllocator
+{
+ VPTR_BASE_VTABLE_CLASS(LoaderAllocator)
+ VPTR_UNIQUE(VPTRU_LoaderAllocator)
+protected:
+
+ //****************************************************************************************
+ // #LoaderAllocator Heaps
+ // Heaps for allocating data that persists for the life of the AppDomain
+ // Objects that are allocated frequently should be allocated into the HighFreq heap for
+ // better page management
+ BYTE * m_InitialReservedMemForLoaderHeaps;
+ BYTE m_LowFreqHeapInstance[sizeof(LoaderHeap)];
+ BYTE m_HighFreqHeapInstance[sizeof(LoaderHeap)];
+ BYTE m_StubHeapInstance[sizeof(LoaderHeap)];
+ BYTE m_PrecodeHeapInstance[sizeof(CodeFragmentHeap)];
+ PTR_LoaderHeap m_pLowFrequencyHeap;
+ PTR_LoaderHeap m_pHighFrequencyHeap;
+ PTR_LoaderHeap m_pStubHeap; // stubs for PInvoke, remoting, etc
+ PTR_CodeFragmentHeap m_pPrecodeHeap;
+ PTR_LoaderHeap m_pExecutableHeap;
+#ifdef FEATURE_READYTORUN
+ PTR_CodeFragmentHeap m_pDynamicHelpersHeap;
+#endif
+ //****************************************************************************************
+ OBJECTHANDLE m_hLoaderAllocatorObjectHandle;
+ FuncPtrStubs * m_pFuncPtrStubs; // for GetMultiCallableAddrOfCode()
+ // The LoaderAllocator specific string literal map.
+ StringLiteralMap *m_pStringLiteralMap;
+ CrstExplicitInit m_crstLoaderAllocator;
+ bool m_fGCPressure;
+ bool m_fUnloaded;
+ bool m_fTerminated;
+ bool m_fMarked;
+ int m_nGCCount;
+
+ // Pre-allocated blocks of heap for collectible assemblies. Will be set to NULL as soon as it is
+ // used. See code in GetVSDHeapInitialBlock and GetCodeHeapInitialBlock
+ BYTE * m_pVSDHeapInitialAlloc;
+ BYTE * m_pCodeHeapInitialAlloc;
+
+public:
+ BYTE *GetVSDHeapInitialBlock(DWORD *pSize);
+ BYTE *GetCodeHeapInitialBlock(const BYTE * loAddr, const BYTE * hiAddr, DWORD minimumSize, DWORD *pSize);
+
+ BaseDomain *m_pDomain;
+
+ // ExecutionManager caches
+ void * m_pLastUsedCodeHeap;
+ void * m_pLastUsedDynamicCodeHeap;
+ void * m_pJumpStubCache;
+
+ // LoaderAllocator GC Structures
+ PTR_LoaderAllocator m_pLoaderAllocatorDestroyNext; // Used in LoaderAllocator GC process (during sweeping)
+protected:
+ void ClearMark();
+ void Mark();
+ bool Marked();
+
+#ifdef FAT_DISPATCH_TOKENS
+ struct DispatchTokenFatSHashTraits : public DefaultSHashTraits<DispatchTokenFat*>
+ {
+ typedef DispatchTokenFat* key_t;
+
+ static key_t GetKey(element_t e)
+ { return e; }
+
+ static BOOL Equals(key_t k1, key_t k2)
+ { return *k1 == *k2; }
+
+ static count_t Hash(key_t k)
+ { return (count_t)(size_t)*k; }
+ };
+
+ typedef SHash<DispatchTokenFatSHashTraits> FatTokenSet;
+ SimpleRWLock *m_pFatTokenSetLock;
+ FatTokenSet *m_pFatTokenSet;
+#endif
+
+ VirtualCallStubManager *m_pVirtualCallStubManager;
+
+private:
+ typedef SHash<PtrSetSHashTraits<LoaderAllocator * > > LoaderAllocatorSet;
+
+ LoaderAllocatorSet m_LoaderAllocatorReferences;
+ Volatile<UINT32> m_cReferences;
+ // This will be set by code:LoaderAllocator::Destroy (from managed scout finalizer) and signalizes that
+ // the assembly was collected
+ DomainAssembly * m_pDomainAssemblyToDelete;
+
+ BOOL CheckAddReference_Unlocked(LoaderAllocator *pOtherLA);
+
+ static UINT64 cLoaderAllocatorsCreated;
+ UINT64 m_nLoaderAllocator;
+
+ struct FailedTypeInitCleanupListItem
+ {
+ SLink m_Link;
+ ListLockEntry *m_pListLockEntry;
+ explicit FailedTypeInitCleanupListItem(ListLockEntry *pListLockEntry)
+ :
+ m_pListLockEntry(pListLockEntry)
+ {
+ }
+ };
+
+ SList<FailedTypeInitCleanupListItem> m_failedTypeInitCleanupList;
+
+#ifndef DACCESS_COMPILE
+ LOADERHANDLE AllocateHandle_Unlocked(OBJECTREF value);
+
+public:
+ // CleanupFailedTypeInit is called from AppDomain
+ // This method accesses loader allocator state in a thread unsafe manner.
+ // It expects to be called only from Terminate.
+ void CleanupFailedTypeInit();
+#endif //!DACCESS_COMPILE
+
+ // Collect unreferenced assemblies, remove them from the assembly list and return their loader allocator
+ // list.
+ static LoaderAllocator * GCLoaderAllocators_RemoveAssemblies(AppDomain * pAppDomain);
+
+public:
+
+ //
+ // The scheme for ensuring that LoaderAllocators are destructed correctly is substantially
+ // complicated by the requirement that LoaderAllocators that are eligible for destruction
+ // must be destroyed as a group due to issues where there may be ordering issues in destruction
+ // of LoaderAllocators.
+ // Thus, while there must be a complete web of references keeping the LoaderAllocator alive in
+ // managed memory, we must also have an analogous web in native memory to manage the specific
+ // ordering requirements.
+ //
+ // Thus we have an extra garbage collector here to manage the native web of LoaderAllocator references
+ // Also, we have a reference count scheme so that LCG methods keep their associated LoaderAllocator
+ // alive. LCG methods cannot be referenced by LoaderAllocators, so they do not need to participate
+ // in the garbage collection scheme except by using AddRef/Release to adjust the root set of this
+ // garbage collector.
+ //
+
+ //#AssemblyPhases
+ // The phases of unloadable assembly are:
+ //
+ // 1. Managed LoaderAllocator is alive.
+ // - Assembly is visible to managed world, the managed scout is alive and was not finalized yet.
+ // Note that the fact that the managed scout is in the finalizer queue is not important as it can
+ // (and in certain cases has to) ressurect itself.
+ // Detection:
+ // code:IsAlive ... TRUE
+ // code:IsManagedScoutAlive ... TRUE
+ // code:DomainAssembly::GetExposedAssemblyObject ... non-NULL (may need to allocate GC object)
+ //
+ // code:AddReferenceIfAlive ... TRUE (+ adds reference)
+ //
+ // 2. Managed scout is alive, managed LoaderAllocator is collected.
+ // - All managed object related to this assembly (types, their instances, Assembly/AssemblyBuilder)
+ // are dead and/or about to disappear and cannot be recreated anymore. We are just waiting for the
+ // managed scout to run its finalizer.
+ // Detection:
+ // code:IsAlive ... TRUE
+ // code:IsManagedScoutAlive ... TRUE
+ // code:DomainAssembly::GetExposedAssemblyObject ... NULL (change from phase #1)
+ //
+ // code:AddReferenceIfAlive ... TRUE (+ adds reference)
+ //
+ // 3. Native LoaderAllocator is alive, managed scout is collected.
+ // - The native LoaderAllocator can be kept alive via native reference with code:AddRef call, e.g.:
+ // * Reference from LCG method,
+ // * Reference recieved from assembly iterator code:AppDomain::AssemblyIterator::Next and/or
+ // held by code:CollectibleAssemblyHolder.
+ // - Other LoaderAllocator can have this LoaderAllocator in its reference list
+ // (code:m_LoaderAllocatorReferences), but without call to code:AddRef.
+ // - LoaderAllocator cannot ever go back to phase #1 or #2, but it can skip this phase if there are
+ // not any LCG method references keeping it alive at the time of manged scout finalization.
+ // Detection:
+ // code:IsAlive ... TRUE
+ // code:IsManagedScoutAlive ... FALSE (change from phase #2)
+ // code:DomainAssembly::GetExposedAssemblyObject ... NULL
+ //
+ // code:AddReferenceIfAlive ... TRUE (+ adds reference)
+ //
+ // 4. LoaderAllocator is dead.
+ // - The managed scout was collected. No one holds a native reference with code:AddRef to this
+ // LoaderAllocator.
+ // - Other LoaderAllocator can have this LoaderAllocator in its reference list
+ // (code:m_LoaderAllocatorReferences), but without call to code:AddRef.
+ // - LoaderAllocator cannot ever become alive again (i.e. go back to phase #3, #2 or #1).
+ // Detection:
+ // code:IsAlive ... FALSE (change from phase #3, #2 and #1)
+ //
+ // code:AddReferenceIfAlive ... FALSE (change from phase #3, #2 and #1)
+ //
+
+ void AddReference();
+ // Adds reference if the native object is alive - code:#AssemblyPhases.
+ // Returns TRUE if the reference was added.
+ BOOL AddReferenceIfAlive();
+ BOOL Release();
+ // Checks if the native object is alive - see code:#AssemblyPhases.
+ BOOL IsAlive() { LIMITED_METHOD_DAC_CONTRACT; return (m_cReferences != (UINT32)0); }
+ // Checks if managed scout is alive - see code:#AssemblyPhases.
+ BOOL IsManagedScoutAlive()
+ {
+ return (m_pDomainAssemblyToDelete == NULL);
+ }
+
+ // Collect unreferenced assemblies, delete all their remaining resources.
+ static void GCLoaderAllocators(AppDomain *pAppDomain);
+
+ UINT64 GetCreationNumber() { LIMITED_METHOD_DAC_CONTRACT; return m_nLoaderAllocator; }
+
+ // Ensure this LoaderAllocator has a reference to another LoaderAllocator
+ BOOL EnsureReference(LoaderAllocator *pOtherLA);
+
+ // Ensure this LoaderAllocator has a reference to every LoaderAllocator of the types
+ // in an instantiation
+ BOOL EnsureInstantiation(Module *pDefiningModule, Instantiation inst);
+
+ // Given typeId and slotNumber, GetDispatchToken will return a DispatchToken
+ // representing <typeId, slotNumber>. If the typeId is big enough, this
+ // method will automatically allocate a DispatchTokenFat and encapsulate it
+ // in the return value.
+ DispatchToken GetDispatchToken(UINT32 typeId, UINT32 slotNumber);
+
+ // Same as GetDispatchToken, but returns invalid DispatchToken when the
+ // value doesn't exist or a transient exception (OOM, stack overflow) is
+ // encountered. To check if the token is valid, use DispatchToken::IsValid
+ DispatchToken TryLookupDispatchToken(UINT32 typeId, UINT32 slotNumber);
+
+ virtual LoaderAllocatorID* Id() =0;
+ BOOL IsCollectible() { WRAPPER_NO_CONTRACT; return Id()->IsCollectible(); }
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+ PTR_LoaderHeap GetLowFrequencyHeap()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pLowFrequencyHeap;
+ }
+
+ PTR_LoaderHeap GetHighFrequencyHeap()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pHighFrequencyHeap;
+ }
+
+ PTR_LoaderHeap GetStubHeap()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pStubHeap;
+ }
+
+ PTR_CodeFragmentHeap GetPrecodeHeap()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pPrecodeHeap;
+ }
+
+ // The executable heap is intended to only be used by the global loader allocator.
+ // It refers to executable memory that is not associated with a rangelist.
+ PTR_LoaderHeap GetExecutableHeap()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pExecutableHeap;
+ }
+
+ PTR_CodeFragmentHeap GetDynamicHelpersHeap();
+
+ FuncPtrStubs * GetFuncPtrStubs();
+
+ FuncPtrStubs * GetFuncPtrStubsNoCreate()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pFuncPtrStubs;
+ }
+
+ OBJECTHANDLE GetLoaderAllocatorObjectHandle()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_hLoaderAllocatorObjectHandle;
+ }
+
+ LOADERALLOCATORREF GetExposedObject();
+
+#ifndef DACCESS_COMPILE
+ LOADERHANDLE AllocateHandle(OBJECTREF value);
+
+ void SetHandleValue(LOADERHANDLE handle, OBJECTREF value);
+ OBJECTREF CompareExchangeValueInHandle(LOADERHANDLE handle, OBJECTREF value, OBJECTREF compare);
+ void ClearHandle(LOADERHANDLE handle);
+
+ // The default implementation is a no-op. Only collectible loader allocators implement this method.
+ virtual void RegisterHandleForCleanup(OBJECTHANDLE /* objHandle */) { }
+ virtual void CleanupHandles() { }
+
+ void RegisterFailedTypeInitForCleanup(ListLockEntry *pListLockEntry);
+#endif // !defined(DACCESS_COMPILE)
+
+
+ // This function is only safe to call if the handle is known to be a handle in a collectible
+ // LoaderAllocator, and the handle is allocated, and the LoaderAllocator is also not collected.
+ FORCEINLINE OBJECTREF GetHandleValueFastCannotFailType2(LOADERHANDLE handle);
+
+ // These functions are designed to be used for maximum performance to access handle values
+ // The GetHandleValueFast will handle the scenario where a loader allocator pointer does not
+ // need to be acquired to do the handle lookup, and the GetHandleValueFastPhase2 handles
+ // the scenario where the LoaderAllocator pointer is required.
+ // Do not use these functions directly - use GET_LOADERHANDLE_VALUE_FAST macro instead.
+ FORCEINLINE static BOOL GetHandleValueFast(LOADERHANDLE handle, OBJECTREF *pValue);
+ FORCEINLINE BOOL GetHandleValueFastPhase2(LOADERHANDLE handle, OBJECTREF *pValue);
+
+#define GET_LOADERHANDLE_VALUE_FAST(pLoaderAllocator, handle, pRetVal) \
+ do { \
+ LOADERHANDLE __handle__ = handle; \
+ if (!LoaderAllocator::GetHandleValueFast(__handle__, pRetVal) && \
+ !pLoaderAllocator->GetHandleValueFastPhase2(__handle__, pRetVal)) \
+ { \
+ *(pRetVal) = NULL; \
+ } \
+ } while (0)
+
+ OBJECTREF GetHandleValue(LOADERHANDLE handle);
+
+ LoaderAllocator();
+ virtual ~LoaderAllocator();
+ BaseDomain *GetDomain() { LIMITED_METHOD_CONTRACT; return m_pDomain; }
+ virtual BOOL CanUnload() = 0;
+ BOOL IsDomainNeutral();
+ void Init(BaseDomain *pDomain, BYTE *pExecutableHeapMemory = NULL);
+ void Terminate();
+ SIZE_T EstimateSize();
+
+ void SetupManagedTracking(LOADERALLOCATORREF *pLoaderAllocatorKeepAlive);
+ void ActivateManagedTracking();
+
+ // Unloaded in this context means that there is no managed code running against this loader allocator.
+ // This flag is used by debugger to filter out methods in modules that are being destructed.
+ bool IsUnloaded() { LIMITED_METHOD_CONTRACT; return m_fUnloaded; }
+ void SetIsUnloaded() { LIMITED_METHOD_CONTRACT; m_fUnloaded = true; }
+
+ void SetGCRefPoint(int gccounter)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_nGCCount=gccounter;
+ }
+ int GetGCRefPoint()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_nGCCount;
+ }
+
+ static BOOL QCALLTYPE Destroy(QCall::LoaderAllocatorHandle pLoaderAllocator);
+
+ //****************************************************************************************
+ // Methods to retrieve a pointer to the COM+ string STRINGREF for a string constant.
+ // If the string is not currently in the hash table it will be added and if the
+ // copy string flag is set then the string will be copied before it is inserted.
+ STRINGREF *GetStringObjRefPtrFromUnicodeString(EEStringData *pStringData);
+ void LazyInitStringLiteralMap();
+ STRINGREF *IsStringInterned(STRINGREF *pString);
+ STRINGREF *GetOrInternString(STRINGREF *pString);
+ void CleanupStringLiteralMap();
+
+ void InitVirtualCallStubManager(BaseDomain *pDomain, BOOL fCollectible = FALSE);
+ void UninitVirtualCallStubManager();
+ inline VirtualCallStubManager *GetVirtualCallStubManager()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pVirtualCallStubManager;
+ }
+}; // class LoaderAllocator
+
+typedef VPTR(LoaderAllocator) PTR_LoaderAllocator;
+
+class GlobalLoaderAllocator : public LoaderAllocator
+{
+ VPTR_VTABLE_CLASS(GlobalLoaderAllocator, LoaderAllocator)
+ VPTR_UNIQUE(VPTRU_LoaderAllocator+1);
+
+ BYTE m_ExecutableHeapInstance[sizeof(LoaderHeap)];
+
+protected:
+ LoaderAllocatorID m_Id;
+
+public:
+ void Init(BaseDomain *pDomain);
+ GlobalLoaderAllocator() : m_Id(LAT_Global, (void*)1) { LIMITED_METHOD_CONTRACT;};
+ virtual LoaderAllocatorID* Id();
+ virtual BOOL CanUnload();
+};
+
+typedef VPTR(GlobalLoaderAllocator) PTR_GlobalLoaderAllocator;
+
+
+class AppDomainLoaderAllocator : public LoaderAllocator
+{
+ VPTR_VTABLE_CLASS(AppDomainLoaderAllocator, LoaderAllocator)
+ VPTR_UNIQUE(VPTRU_LoaderAllocator+2);
+
+protected:
+ LoaderAllocatorID m_Id;
+public:
+ AppDomainLoaderAllocator() : m_Id(LAT_AppDomain) { LIMITED_METHOD_CONTRACT;};
+ void Init(AppDomain *pAppDomain);
+ virtual LoaderAllocatorID* Id();
+ virtual BOOL CanUnload();
+};
+
+typedef VPTR(AppDomainLoaderAllocator) PTR_AppDomainLoaderAllocator;
+
+class AssemblyLoaderAllocator : public LoaderAllocator
+{
+ VPTR_VTABLE_CLASS(AssemblyLoaderAllocator, LoaderAllocator)
+ VPTR_UNIQUE(VPTRU_LoaderAllocator+3);
+
+protected:
+ LoaderAllocatorID m_Id;
+public:
+ virtual LoaderAllocatorID* Id();
+ AssemblyLoaderAllocator() : m_Id(LAT_Assembly) { LIMITED_METHOD_CONTRACT; }
+ void Init(AppDomain *pAppDomain);
+ virtual BOOL CanUnload();
+ void SetDomainAssembly(DomainAssembly *pDomainAssembly) { WRAPPER_NO_CONTRACT; m_Id.SetDomainAssembly(pDomainAssembly); }
+
+#ifndef DACCESS_COMPILE
+ virtual void RegisterHandleForCleanup(OBJECTHANDLE objHandle);
+ virtual void CleanupHandles();
+#endif // !defined(DACCESS_COMPILE)
+
+private:
+ struct HandleCleanupListItem
+ {
+ SLink m_Link;
+ OBJECTHANDLE m_handle;
+ explicit HandleCleanupListItem(OBJECTHANDLE handle)
+ :
+ m_handle(handle)
+ {
+ }
+ };
+
+ SList<HandleCleanupListItem> m_handleCleanupList;
+};
+
+typedef VPTR(AssemblyLoaderAllocator) PTR_AssemblyLoaderAllocator;
+
+
+#include "loaderallocator.inl"
+
+#endif // __LoaderAllocator_h__
+
diff --git a/src/vm/loaderallocator.inl b/src/vm/loaderallocator.inl
new file mode 100644
index 0000000000..2bbd691b37
--- /dev/null
+++ b/src/vm/loaderallocator.inl
@@ -0,0 +1,184 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#ifndef _LOADER_ALLOCATOR_I
+#define _LOADER_ALLOCATOR_I
+
+#include "assembly.hpp"
+
+#ifndef DACCESS_COMPILE
+inline LOADERALLOCATORREF LoaderAllocator::GetExposedObject()
+{
+ LIMITED_METHOD_CONTRACT;
+ OBJECTREF loaderAllocatorObject = (m_hLoaderAllocatorObjectHandle != NULL) ? ObjectFromHandle(m_hLoaderAllocatorObjectHandle) : NULL;
+ return (LOADERALLOCATORREF)loaderAllocatorObject;
+}
+#endif
+
+inline void GlobalLoaderAllocator::Init(BaseDomain *pDomain)
+{
+ LoaderAllocator::Init(pDomain, m_ExecutableHeapInstance);
+}
+
+inline void AppDomainLoaderAllocator::Init(AppDomain *pAppDomain)
+{
+ WRAPPER_NO_CONTRACT;
+ m_Id.Init(pAppDomain);
+ LoaderAllocator::Init((BaseDomain *)pAppDomain);
+}
+
+inline void LoaderAllocatorID::Init(AppDomain *pAppDomain)
+{
+ m_type = LAT_AppDomain;
+ m_pAppDomain = pAppDomain;
+}
+
+inline void AssemblyLoaderAllocator::Init(AppDomain* pAppDomain)
+{
+ m_Id.Init();
+ LoaderAllocator::Init((BaseDomain *)pAppDomain);
+}
+
+inline BOOL LoaderAllocatorID::Equals(LoaderAllocatorID *pId)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (GetType() != pId->GetType())
+ return false;
+
+ return GetValue() == pId->GetValue();
+}
+
+inline void LoaderAllocatorID::Init()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_type = LAT_Assembly;
+};
+
+inline void LoaderAllocatorID::SetDomainAssembly(DomainAssembly* pAssembly)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_type == LAT_Assembly);
+ m_pDomainAssembly = pAssembly;
+}
+
+inline VOID* LoaderAllocatorID::GetValue()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pValue;
+}
+
+inline COUNT_T LoaderAllocatorID::Hash()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (COUNT_T)(SIZE_T)GetValue();
+}
+
+inline LoaderAllocatorType LoaderAllocatorID::GetType()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_type;
+}
+
+inline DomainAssembly* LoaderAllocatorID::GetDomainAssembly()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(m_type == LAT_Assembly);
+ return m_pDomainAssembly;
+}
+
+inline AppDomain *LoaderAllocatorID::GetAppDomain()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(m_type == LAT_AppDomain);
+ return m_pAppDomain;
+}
+
+inline BOOL LoaderAllocatorID::IsCollectible()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_type == LAT_Assembly;
+}
+
+inline LoaderAllocatorID* AssemblyLoaderAllocator::Id()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return &m_Id;
+}
+
+inline LoaderAllocatorID* GlobalLoaderAllocator::Id()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return &m_Id;
+}
+
+inline LoaderAllocatorID* AppDomainLoaderAllocator::Id()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return &m_Id;
+}
+
+/* static */
+FORCEINLINE BOOL LoaderAllocator::GetHandleValueFast(LOADERHANDLE handle, OBJECTREF *pValue)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // If the slot value does have the low bit set, then it is a simple pointer to the value
+ // Otherwise, we will need a more complicated operation to get the value.
+ if ((((UINT_PTR)handle) & 1) != 0)
+ {
+ *pValue = *((OBJECTREF *)(((UINT_PTR)handle) - 1));
+ return TRUE;
+ }
+ else
+ {
+ return FALSE;
+ }
+}
+
+FORCEINLINE BOOL LoaderAllocator::GetHandleValueFastPhase2(LOADERHANDLE handle, OBJECTREF *pValue)
+{
+ SUPPORTS_DAC;
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ if (handle == 0)
+ return FALSE;
+
+ /* This is lockless access to the handle table, be careful */
+ OBJECTREF loaderAllocatorAsObjectRef = ObjectFromHandle(m_hLoaderAllocatorObjectHandle);
+
+ // If the managed loader allocator has been collected, then the handles associated with it are dead as well.
+ if (loaderAllocatorAsObjectRef == NULL)
+ return FALSE;
+
+ LOADERALLOCATORREF loaderAllocator = dac_cast<LOADERALLOCATORREF>(loaderAllocatorAsObjectRef);
+ PTRARRAYREF handleTable = loaderAllocator->GetHandleTable();
+ UINT_PTR index = (((UINT_PTR)handle) >> 1) - 1;
+ *pValue = handleTable->GetAt(index);
+
+ return TRUE;
+}
+
+FORCEINLINE OBJECTREF LoaderAllocator::GetHandleValueFastCannotFailType2(LOADERHANDLE handle)
+{
+ SUPPORTS_DAC;
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ /* This is lockless access to the handle table, be careful */
+ OBJECTREF loaderAllocatorAsObjectRef = ObjectFromHandle(m_hLoaderAllocatorObjectHandle);
+ LOADERALLOCATORREF loaderAllocator = dac_cast<LOADERALLOCATORREF>(loaderAllocatorAsObjectRef);
+ PTRARRAYREF handleTable = loaderAllocator->GetHandleTable();
+ UINT_PTR index = (((UINT_PTR)handle) >> 1) - 1;
+
+ return handleTable->GetAt(index);
+}
+#endif // _LOADER_ALLOCATOR_I
+
diff --git a/src/vm/managedmdimport.cpp b/src/vm/managedmdimport.cpp
new file mode 100644
index 0000000000..b588db832e
--- /dev/null
+++ b/src/vm/managedmdimport.cpp
@@ -0,0 +1,723 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+// TODO: Re-implement with MC++ if we ever compile any mscorlib code with that
+
+
+
+#include "common.h"
+#include "mlinfo.h"
+#include "managedmdimport.hpp"
+#include "wrappers.h"
+
+void ThrowMetaDataImportException(HRESULT hr)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (hr == CLDB_E_RECORD_NOTFOUND)
+ return;
+
+ MethodDescCallSite throwError(METHOD__METADATA_IMPORT__THROW_ERROR);
+
+ ARG_SLOT args[] = { hr };
+ throwError.Call(args);
+}
+
+//
+// MetaDataImport
+//
+extern BOOL ParseNativeTypeInfo(NativeTypeParamInfo* pInfo, PCCOR_SIGNATURE pvNativeType, ULONG cbNativeType);
+
+FCIMPL11(void, MetaDataImport::GetMarshalAs,
+ BYTE* pvNativeType,
+ ULONG cbNativeType,
+ INT32* unmanagedType,
+ INT32* safeArraySubType,
+ STRINGREF* safeArrayUserDefinedSubType,
+ INT32* arraySubType,
+ INT32* sizeParamIndex,
+ INT32* sizeConst,
+ STRINGREF* marshalType,
+ STRINGREF* marshalCookie,
+ INT32* iidParamIndex)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+ {
+ NativeTypeParamInfo info;
+
+ ZeroMemory(&info, sizeof(NativeTypeParamInfo));
+
+ if (!ParseNativeTypeInfo(&info, pvNativeType, cbNativeType))
+ {
+ ThrowMetaDataImportException(E_FAIL);
+ }
+
+ *unmanagedType = info.m_NativeType;
+ *sizeParamIndex = info.m_CountParamIdx;
+ *sizeConst = info.m_Additive;
+ *arraySubType = info.m_ArrayElementType;
+
+#ifdef FEATURE_COMINTEROP
+ *iidParamIndex = info.m_IidParamIndex;
+
+ *safeArraySubType = info.m_SafeArrayElementVT;
+
+ *safeArrayUserDefinedSubType = info.m_strSafeArrayUserDefTypeName == NULL ? NULL :
+ StringObject::NewString(info.m_strSafeArrayUserDefTypeName, info.m_cSafeArrayUserDefTypeNameBytes);
+#else
+ *iidParamIndex = -1;
+
+ *safeArraySubType = VT_EMPTY;
+
+ *safeArrayUserDefinedSubType = NULL;
+#endif
+
+ *marshalType = info.m_strCMMarshalerTypeName == NULL ? NULL :
+ StringObject::NewString(info.m_strCMMarshalerTypeName, info.m_cCMMarshalerTypeNameBytes);
+
+ *marshalCookie = info.m_strCMCookie == NULL ? NULL :
+ StringObject::NewString(info.m_strCMCookie, info.m_cCMCookieStrBytes);
+ }
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+MDImpl4(Object *, MetaDataImport::GetDefaultValue, mdToken tk, INT64* pDefaultValue, INT32* pLength, INT32* pCorElementType)
+{
+ FCALL_CONTRACT;
+
+ HRESULT hr = S_OK;
+ Object *pRetVal = NULL;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrow(kStackOverflowException));
+
+ IMDInternalImport *_pScope = pScope;
+
+ MDDefaultValue value;
+ IfFailGo(_pScope->GetDefaultValue(tk, &value));
+
+ // We treat string values differently. That's because on big-endian architectures we can't return a
+ // pointer to static string data in the metadata, we have to buffer the string in order to byte-swap
+ // all the unicode characters. MDDefaultValue therefore has a destructor on big-endian machines which
+ // reclaims this buffer, implying we can't safely return the embedded pointer to managed code.
+ // The easiest thing for us to do is to construct the managed string object here, in the context of
+ // the still valid MDDefaultValue. We can't return a managed object via the normal out parameter
+ // because it won't be GC protected, so in this special case null the output parameter and return
+ // the string via the protected return result (which is null for all other cases).
+ if (value.m_bType == ELEMENT_TYPE_STRING)
+ {
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+ *pDefaultValue = 0;
+ STRINGREF refRetval = StringObject::NewString(value.m_wzValue, value.m_cbSize / sizeof(WCHAR));
+ pRetVal = STRINGREFToObject(refRetval);
+ HELPER_METHOD_FRAME_END();
+ }
+ else
+ {
+ *pDefaultValue = value.m_ullValue;
+ }
+
+ *pCorElementType = (UINT32)value.m_bType;
+ *pLength = (INT32)value.m_cbSize;
+ErrExit:
+ END_SO_INTOLERANT_CODE;
+
+ if (FAILED(hr))
+ {
+ FCThrow(kBadImageFormatException);
+ }
+
+ return pRetVal;
+}
+FCIMPLEND
+
+MDImpl3(void, MetaDataImport::GetCustomAttributeProps, mdCustomAttribute cv, mdToken* ptkType, ConstArray* ppBlob)
+{
+ FCALL_CONTRACT;
+
+ HRESULT hr = S_OK;
+ IMDInternalImport *_pScope = pScope;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
+ IfFailGo(_pScope->GetCustomAttributeProps(cv, ptkType));
+ IfFailGo(_pScope->GetCustomAttributeAsBlob(cv, (const void **)&ppBlob->m_array, (ULONG *)&ppBlob->m_count));
+ErrExit:
+ END_SO_INTOLERANT_CODE;
+
+ if (FAILED(hr))
+ {
+ FCThrowVoid(kBadImageFormatException);
+ }
+}
+FCIMPLEND
+
+static int * EnsureResultSize(MetadataEnumResult * pResult, ULONG length)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ int * p;
+
+ if (length >= NumItems(pResult->smallResult) || DbgRandomOnExe(.01))
+ {
+ pResult->largeResult = (I4Array *)OBJECTREFToObject(AllocatePrimitiveArray(ELEMENT_TYPE_I4, length));
+ p = pResult->largeResult->GetDirectPointerToNonObjectElements();
+ }
+ else
+ {
+ ZeroMemory(pResult->smallResult, sizeof(pResult->smallResult));
+ p = pResult->smallResult;
+ }
+
+ pResult->length = length;
+ return p;
+}
+
+MDImpl3(void, MetaDataImport::Enum, mdToken type, mdToken tkParent, MetadataEnumResult * pResult)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(pResult != NULL);
+ }
+ CONTRACTL_END;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+ {
+ IMDInternalImport *_pScope = pScope;
+
+ if (type == mdtTypeDef)
+ {
+ ULONG nestedClassesCount;
+ IfFailThrow(_pScope->GetCountNestedClasses(tkParent, &nestedClassesCount));
+
+ mdTypeDef* arToken = (mdTypeDef*)EnsureResultSize(pResult, nestedClassesCount);
+ IfFailThrow(_pScope->GetNestedClasses(tkParent, arToken, nestedClassesCount, &nestedClassesCount));
+ }
+ else if (type == mdtMethodDef && (TypeFromToken(tkParent) == mdtProperty || TypeFromToken(tkParent) == mdtEvent))
+ {
+ HENUMInternalHolder hEnum(pScope);
+ hEnum.EnumAssociateInit(tkParent);
+
+ ULONG associatesCount = hEnum.EnumGetCount();
+
+ static_assert_no_msg(sizeof(ASSOCIATE_RECORD) == 2 * sizeof(int));
+
+ ASSOCIATE_RECORD* arAssocRecord = (ASSOCIATE_RECORD*)EnsureResultSize(pResult, 2 * associatesCount);
+ IfFailThrow(_pScope->GetAllAssociates(&hEnum, arAssocRecord, associatesCount));
+ }
+ else
+ {
+ HENUMInternalHolder hEnum(pScope);
+ hEnum.EnumInit(type, tkParent);
+
+ ULONG count = hEnum.EnumGetCount();
+
+ mdToken* arToken = (mdToken*)EnsureResultSize(pResult, count);
+ for(COUNT_T i = 0; i < count && _pScope->EnumNext(&hEnum, &arToken[i]); i++);
+ }
+ }
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+#if defined(_MSC_VER) && defined(_TARGET_X86_)
+#pragma optimize("y", on) // Small critical routines, don't put in EBP frame
+#endif
+
+MDImpl1(FC_BOOL_RET, MetaDataImport::IsValidToken, mdToken tk)
+{
+ FCALL_CONTRACT;
+
+ IMDInternalImport *_pScope = pScope;
+
+ FC_RETURN_BOOL(_pScope->IsValidToken(tk));
+}
+FCIMPLEND
+
+
+MDImpl3(void, MetaDataImport::GetClassLayout, mdTypeDef td, DWORD* pdwPackSize, ULONG* pulClassSize)
+{
+ FCALL_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
+ {
+ IMDInternalImport *_pScope = pScope;
+
+ if (pdwPackSize != NULL)
+ {
+ hr = _pScope->GetClassPackSize(td, (ULONG *)pdwPackSize);
+ if (hr == CLDB_E_RECORD_NOTFOUND)
+ {
+ *pdwPackSize = 0;
+ hr = S_OK;
+ }
+ IfFailGo(hr);
+ }
+
+ if (pulClassSize != NULL)
+ {
+ hr = _pScope->GetClassTotalSize(td, pulClassSize);
+ if (hr == CLDB_E_RECORD_NOTFOUND)
+ {
+ *pulClassSize = 0;
+ hr = S_OK;
+ }
+ IfFailGo(hr);
+ }
+ }
+ErrExit:
+ END_SO_INTOLERANT_CODE;
+
+ if (FAILED(hr))
+ {
+ FCThrowVoid(kBadImageFormatException);
+ }
+}
+FCIMPLEND
+
+MDImpl3(FC_BOOL_RET, MetaDataImport::GetFieldOffset, mdTypeDef td, mdFieldDef target, DWORD* pdwFieldOffset)
+{
+ FCALL_CONTRACT;
+
+ HRESULT hr = S_OK;
+ IMDInternalImport *_pScope = pScope;
+ MD_CLASS_LAYOUT layout;
+ BOOL retVal = FALSE;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrow(kStackOverflowException));
+ IfFailGo(_pScope->GetClassLayoutInit(td, &layout));
+
+ ULONG cFieldOffset;
+ cFieldOffset = layout.m_ridFieldEnd - layout.m_ridFieldCur;
+
+ for (COUNT_T i = 0; i < cFieldOffset; i ++)
+ {
+ mdFieldDef fd;
+ ULONG offset;
+ IfFailGo(_pScope->GetClassLayoutNext(&layout, &fd, &offset));
+
+ if (fd == target)
+ {
+ *pdwFieldOffset = offset;
+ retVal = TRUE;
+ break;
+ }
+ }
+ErrExit:
+ END_SO_INTOLERANT_CODE;
+
+ if (FAILED(hr))
+ {
+ FCThrow(kBadImageFormatException);
+ }
+ FC_RETURN_BOOL(retVal);
+}
+FCIMPLEND
+
+MDImpl3(void, MetaDataImport::GetUserString, mdToken tk, LPCSTR* pszName, ULONG* pCount)
+{
+ FCALL_CONTRACT;
+
+ HRESULT hr;
+ IMDInternalImport *_pScope = pScope;
+ BOOL bHasExtendedChars;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
+ hr = _pScope->GetUserString(tk, pCount, &bHasExtendedChars, (LPCWSTR *)pszName);
+ END_SO_INTOLERANT_CODE;
+
+ if (FAILED(hr))
+ {
+ FCThrowVoid(kBadImageFormatException);
+ }
+}
+FCIMPLEND
+
+MDImpl2(void, MetaDataImport::GetName, mdToken tk, LPCSTR* pszName)
+{
+ FCALL_CONTRACT;
+
+ HRESULT hr = S_OK;
+ IMDInternalImport *_pScope = pScope;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
+ if (TypeFromToken(tk) == mdtMethodDef)
+ {
+ hr = _pScope->GetNameOfMethodDef(tk, pszName);
+ }
+ else if (TypeFromToken(tk) == mdtParamDef)
+ {
+ USHORT seq;
+ DWORD attr;
+ hr = _pScope->GetParamDefProps(tk, &seq, &attr, pszName);
+ }
+ else if (TypeFromToken(tk) == mdtFieldDef)
+ {
+ hr = _pScope->GetNameOfFieldDef(tk, pszName);
+ }
+ else if (TypeFromToken(tk) == mdtProperty)
+ {
+ hr = _pScope->GetPropertyProps(tk, pszName, NULL, NULL, NULL);
+ }
+ else if (TypeFromToken(tk) == mdtEvent)
+ {
+ hr = _pScope->GetEventProps(tk, pszName, NULL, NULL);
+ }
+ else if (TypeFromToken(tk) == mdtModule)
+ {
+ hr = _pScope->GetModuleRefProps(tk, pszName);
+ }
+ else if (TypeFromToken(tk) == mdtTypeDef)
+ {
+ LPCSTR szNamespace = NULL;
+ hr = _pScope->GetNameOfTypeDef(tk, pszName, &szNamespace);
+ }
+ else
+ {
+ hr = E_FAIL;
+ }
+ END_SO_INTOLERANT_CODE;
+
+ if (FAILED(hr))
+ {
+ FCThrowVoid(kBadImageFormatException);
+ }
+}
+FCIMPLEND
+
+MDImpl2(void, MetaDataImport::GetNamespace, mdToken tk, LPCSTR* pszName)
+{
+ FCALL_CONTRACT;
+
+ HRESULT hr;
+ IMDInternalImport *_pScope = pScope;
+ LPCSTR szName = NULL;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
+ hr = _pScope->GetNameOfTypeDef(tk, &szName, pszName);
+ END_SO_INTOLERANT_CODE;
+
+ if (FAILED(hr))
+ {
+ FCThrowVoid(kBadImageFormatException);
+ }
+}
+FCIMPLEND
+
+
+MDImpl2(void, MetaDataImport::GetGenericParamProps, mdToken tk, DWORD* pAttributes)
+{
+ FCALL_CONTRACT;
+
+ HRESULT hr;
+ IMDInternalImport *_pScope = pScope;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
+ hr = _pScope->GetGenericParamProps(tk, NULL, pAttributes, NULL, NULL, NULL);
+ END_SO_INTOLERANT_CODE;
+
+ if (FAILED(hr))
+ {
+ FCThrowVoid(kBadImageFormatException);
+ }
+}
+FCIMPLEND
+
+MDImpl3(void, MetaDataImport::GetEventProps, mdToken tk, LPCSTR* pszName, INT32 *pdwEventFlags)
+{
+ FCALL_CONTRACT;
+
+ HRESULT hr;
+ IMDInternalImport *_pScope = pScope;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
+ hr = _pScope->GetEventProps(tk, pszName, (DWORD*)pdwEventFlags, NULL);
+ END_SO_INTOLERANT_CODE;
+
+ if (FAILED(hr))
+ {
+ FCThrowVoid(kBadImageFormatException);
+ }
+}
+FCIMPLEND
+
+MDImpl4(void, MetaDataImport::GetPinvokeMap, mdToken tk, DWORD* pMappingFlags, LPCSTR* pszImportName, LPCSTR* pszImportDll)
+{
+ FCALL_CONTRACT;
+
+ HRESULT hr;
+ IMDInternalImport *_pScope = pScope;
+ mdModule tkModule;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
+ hr = _pScope->GetPinvokeMap(tk, pMappingFlags, pszImportName, &tkModule);
+ if (FAILED(hr))
+ {
+ *pMappingFlags = 0;
+ *pszImportName = NULL;
+ *pszImportDll = NULL;
+ hr = S_OK;
+ }
+ else
+ {
+ hr = _pScope->GetModuleRefProps(tkModule, pszImportDll);
+ }
+ END_SO_INTOLERANT_CODE;
+
+ if (FAILED(hr))
+ {
+ FCThrowVoid(kBadImageFormatException);
+ }
+}
+FCIMPLEND
+
+MDImpl3(void, MetaDataImport::GetParamDefProps, mdToken tk, INT32* pSequence, INT32* pAttributes)
+{
+ FCALL_CONTRACT;
+
+ HRESULT hr;
+ IMDInternalImport *_pScope = pScope;
+ USHORT usSequence = 0;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
+
+ // Is this a valid token?
+ if (_pScope->IsValidToken((mdParamDef)tk))
+ {
+ LPCSTR szParamName;
+ hr = _pScope->GetParamDefProps(tk, &usSequence, (DWORD *)pAttributes, &szParamName);
+ }
+ else
+ {
+ // Invalid token - throw an exception
+ hr = COR_E_BADIMAGEFORMAT;
+ }
+ *pSequence = (INT32) usSequence;
+ END_SO_INTOLERANT_CODE;
+
+ if (FAILED(hr))
+ {
+ FCThrowVoid(kBadImageFormatException);
+ }
+}
+FCIMPLEND
+
+MDImpl2(void, MetaDataImport::GetFieldDefProps, mdToken tk, INT32 *pdwFieldFlags)
+{
+ FCALL_CONTRACT;
+
+ HRESULT hr;
+ IMDInternalImport *_pScope = pScope;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
+ hr = _pScope->GetFieldDefProps(tk, (DWORD *)pdwFieldFlags);
+ END_SO_INTOLERANT_CODE;
+
+ if (FAILED(hr))
+ {
+ FCThrowVoid(kBadImageFormatException);
+ }
+}
+FCIMPLEND
+
+MDImpl4(void, MetaDataImport::GetPropertyProps, mdToken tk, LPCSTR* pszName, INT32 *pdwPropertyFlags, ConstArray* ppValue)
+{
+ FCALL_CONTRACT;
+
+ HRESULT hr;
+ IMDInternalImport *_pScope = pScope;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
+ hr = _pScope->GetPropertyProps(tk, pszName, (DWORD*)pdwPropertyFlags, (PCCOR_SIGNATURE*)&ppValue->m_array, (ULONG*)&ppValue->m_count);
+ END_SO_INTOLERANT_CODE;
+
+ if (FAILED(hr))
+ {
+ FCThrowVoid(kBadImageFormatException);
+ }
+}
+FCIMPLEND
+
+MDImpl2(void, MetaDataImport::GetFieldMarshal, mdToken tk, ConstArray* ppValue)
+{
+ FCALL_CONTRACT;
+
+ HRESULT hr;
+ IMDInternalImport *_pScope = pScope;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
+ hr = _pScope->GetFieldMarshal(tk, (PCCOR_SIGNATURE *)&ppValue->m_array, (ULONG *)&ppValue->m_count);
+ if (hr == CLDB_E_RECORD_NOTFOUND)
+ {
+ ppValue->m_array = NULL;
+ ppValue->m_count = 0;
+ hr = S_OK;
+ }
+ END_SO_INTOLERANT_CODE;
+
+ if (FAILED(hr))
+ {
+ FCThrowVoid(kBadImageFormatException);
+ }
+}
+FCIMPLEND
+
+MDImpl2(void, MetaDataImport::GetSigOfMethodDef, mdToken tk, ConstArray* ppValue)
+{
+ FCALL_CONTRACT;
+
+ HRESULT hr;
+ IMDInternalImport *_pScope = pScope;
+
+ BEGIN_SO_INTOLERANT_CODE(GetThread())
+ hr = _pScope->GetSigOfMethodDef(tk, (ULONG*)&ppValue->m_count, (PCCOR_SIGNATURE *)&ppValue->m_array);
+ END_SO_INTOLERANT_CODE;
+
+ if (FAILED(hr))
+ {
+ FCThrowVoid(kBadImageFormatException);
+ }
+}
+FCIMPLEND
+
+MDImpl2(void, MetaDataImport::GetSignatureFromToken, mdToken tk, ConstArray* ppValue)
+{
+ FCALL_CONTRACT;
+
+ HRESULT hr;
+ IMDInternalImport *_pScope = pScope;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
+ hr = _pScope->GetSigFromToken(tk, (ULONG*)&ppValue->m_count, (PCCOR_SIGNATURE *)&(ppValue->m_array));
+ END_SO_INTOLERANT_CODE;
+
+ if (FAILED(hr))
+ {
+ FCThrowVoid(kBadImageFormatException);
+ }
+}
+FCIMPLEND
+
+MDImpl2(void, MetaDataImport::GetSigOfFieldDef, mdToken tk, ConstArray* ppValue)
+{
+ FCALL_CONTRACT;
+
+ HRESULT hr;
+ IMDInternalImport *_pScope = pScope;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
+ hr = _pScope->GetSigOfFieldDef(tk, (ULONG*)&ppValue->m_count, (PCCOR_SIGNATURE *)&ppValue->m_array);
+ END_SO_INTOLERANT_CODE;
+
+ if (FAILED(hr))
+ {
+ FCThrowVoid(kBadImageFormatException);
+ }
+}
+FCIMPLEND
+
+MDImpl2(void, MetaDataImport::GetParentToken, mdToken tk, mdToken* ptk)
+{
+ FCALL_CONTRACT;
+
+ HRESULT hr;
+ IMDInternalImport *_pScope = pScope;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
+
+ switch (TypeFromToken(tk))
+ {
+ case mdtTypeDef:
+ hr = _pScope->GetNestedClassProps(tk, ptk);
+ if (hr == CLDB_E_RECORD_NOTFOUND)
+ {
+ *ptk = mdTypeDefNil;
+ hr = S_OK;
+ }
+ break;
+
+ case mdtGenericParam:
+ hr = _pScope->GetGenericParamProps(tk, NULL, NULL, ptk, NULL, NULL);
+ break;
+
+ case mdtMethodDef:
+ case mdtMethodSpec:
+ case mdtFieldDef:
+ case mdtParamDef:
+ case mdtMemberRef:
+ case mdtCustomAttribute:
+ case mdtEvent:
+ case mdtProperty:
+ hr = _pScope->GetParentToken(tk, ptk);
+ break;
+
+ default:
+ hr = COR_E_BADIMAGEFORMAT;
+ break;
+ }
+
+ END_SO_INTOLERANT_CODE;
+
+ if (FAILED(hr))
+ {
+ FCThrowVoid(kBadImageFormatException);
+ }
+}
+FCIMPLEND
+
+MDImpl1(void, MetaDataImport::GetScopeProps, GUID* pmvid)
+{
+ FCALL_CONTRACT;
+
+ HRESULT hr;
+ LPCSTR szName;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
+ IMDInternalImport *_pScope = pScope;
+ hr = _pScope->GetScopeProps(&szName, pmvid);
+ END_SO_INTOLERANT_CODE;
+
+ if (FAILED(hr))
+ {
+ FCThrowVoid(kBadImageFormatException);
+ }
+}
+FCIMPLEND
+
+
+MDImpl2(void, MetaDataImport::GetMemberRefProps,
+ mdMemberRef mr,
+ ConstArray* ppvSigBlob)
+{
+ FCALL_CONTRACT;
+
+ HRESULT hr;
+ IMDInternalImport *_pScope = pScope;
+ LPCSTR szName_Ignore;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
+ hr = _pScope->GetNameAndSigOfMemberRef(mr, (PCCOR_SIGNATURE*)&ppvSigBlob->m_array, (ULONG*)&ppvSigBlob->m_count, &szName_Ignore);
+ END_SO_INTOLERANT_CODE;
+
+ if (FAILED(hr))
+ {
+ FCThrowVoid(kBadImageFormatException);
+ }
+}
+FCIMPLEND
+
+#if defined(_MSC_VER) && defined(_TARGET_X86_)
+#pragma optimize("", on) // restore command line optimization defaults
+#endif
+
diff --git a/src/vm/managedmdimport.hpp b/src/vm/managedmdimport.hpp
new file mode 100644
index 0000000000..37db65d8a9
--- /dev/null
+++ b/src/vm/managedmdimport.hpp
@@ -0,0 +1,123 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+////////////////////////////////////////////////////////////////////////////////
+
+////////////////////////////////////////////////////////////////////////////////
+
+
+
+
+#ifndef _MANAGEDMDIMPORT_H_
+#define _MANAGEDMDIMPORT_H_
+
+#include "corhdr.h"
+#include "fcall.h"
+#include "runtimehandles.h"
+
+//
+// Keep the struct definitions in sync with bcl\system\reflection\mdimport.cs
+//
+
+typedef struct
+{
+ INT32 m_count;
+ void* m_array;
+} ConstArray;
+
+typedef struct
+{
+ I4Array * largeResult;
+ int length;
+#ifdef _WIN64
+ int padding;
+#endif
+ int smallResult[16];
+} MetadataEnumResult;
+
+#define MDDecl0(RET, NAME) static FCDECL1(RET, NAME, IMDInternalImport* pScope)
+#define MDDecl1(RET, NAME, arg0) static FCDECL2(RET, NAME, IMDInternalImport* pScope, arg0)
+#define MDDecl2(RET, NAME, arg0, arg1) static FCDECL3(RET, NAME, IMDInternalImport* pScope, arg0, arg1)
+#define MDDecl3(RET, NAME, arg0, arg1, arg2) static FCDECL4(RET, NAME, IMDInternalImport* pScope, arg0, arg1, arg2)
+#define MDDecl4(RET, NAME, arg0, arg1, arg2, arg3) static FCDECL5(RET, NAME, IMDInternalImport* pScope, arg0, arg1, arg2, arg3)
+#define MDDecl5(RET, NAME, arg0, arg1, arg2, arg3, arg4) static FCDECL6(RET, NAME, IMDInternalImport* pScope, arg0, arg1, arg2, arg3, arg4)
+#define MDDecl6(RET, NAME, arg0, arg1, arg2, arg3, arg4, arg5) static FCDECL7(RET, NAME, IMDInternalImport* pScope, arg0, arg1, arg2, arg3, arg4, arg5)
+#define MDDecl7(RET, NAME, arg0, arg1, arg2, arg3, arg4, arg5, arg6) static FCDECL8(RET, NAME, IMDInternalImport* pScope, arg0, arg1, arg2, arg3, arg4, arg5, arg6)
+#define MDDecl8(RET, NAME, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) static FCDECL9(RET, NAME, IMDInternalImport* pScope, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
+#define MDDecl9(RET, NAME, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) static FCDECL10(RET, NAME, IMDInternalImport* pScope, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8)
+#define MDDecl10(RET, NAME, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9) static FCDECL11(RET, NAME, IMDInternalImport* pScope, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9)
+
+#define MDImpl0(RET, NAME) FCIMPL1(RET, NAME, IMDInternalImport* pScope)
+#define MDImpl1(RET, NAME, arg0) FCIMPL2(RET, NAME, IMDInternalImport* pScope, arg0)
+#define MDImpl2(RET, NAME, arg0, arg1) FCIMPL3(RET, NAME, IMDInternalImport* pScope, arg0, arg1)
+#define MDImpl3(RET, NAME, arg0, arg1, arg2) FCIMPL4(RET, NAME, IMDInternalImport* pScope, arg0, arg1, arg2)
+#define MDImpl4(RET, NAME, arg0, arg1, arg2, arg3) FCIMPL5(RET, NAME, IMDInternalImport* pScope, arg0, arg1, arg2, arg3)
+#define MDImpl5(RET, NAME, arg0, arg1, arg2, arg3, arg4) FCIMPL6(RET, NAME, IMDInternalImport* pScope, arg0, arg1, arg2, arg3, arg4)
+#define MDImpl6(RET, NAME, arg0, arg1, arg2, arg3, arg4, arg5) FCIMPL7(RET, NAME, IMDInternalImport* pScope, arg0, arg1, arg2, arg3, arg4, arg5)
+#define MDImpl7(RET, NAME, arg0, arg1, arg2, arg3, arg4, arg5, arg6) FCIMPL8(RET, NAME, IMDInternalImport* pScope, arg0, arg1, arg2, arg3, arg4, arg5, arg6)
+#define MDImpl8(RET, NAME, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) FCIMPL9(RET, NAME, IMDInternalImport* pScope, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
+#define MDImpl9(RET, NAME, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) FCIMPL10(RET, NAME, IMDInternalImport* pScope, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8)
+#define MDImpl10(RET, NAME, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9) FCIMPL11(RET, NAME, IMDInternalImport* pScope, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9)
+
+class MetaDataImport
+{
+public:
+ //
+ // GetXXXProps
+ //
+ MDDecl1(void, GetScopeProps, GUID* pmvid);
+ MDDecl4(void, GetTypeDefProps, mdTypeDef td, STRINGREF* pszTypeDef, DWORD* pdwTypeDefFlags, mdToken* ptkExtends);
+ MDDecl2(void, GetMemberRefProps, mdMemberRef mr, ConstArray* ppvSigBlob);
+
+
+ ////
+ //// EnumXXX
+ ////
+ MDDecl3(void, Enum, mdToken type, mdToken tkParent, MetadataEnumResult * pResult);
+ MDDecl3(void, GetCustomAttributeProps, mdCustomAttribute cv, mdToken* ptkType, ConstArray* ppBlob);
+
+ ////
+ //// Misc
+ ////
+
+ MDDecl4(Object *, GetDefaultValue, mdToken tk, INT64* pDefaultValue, INT32* pLength, INT32* pCorElementType);
+ MDDecl2(void, GetName, mdToken tk, LPCSTR* pszName);
+ MDDecl3(void, GetUserString, mdToken tk, LPCSTR* pszName, ULONG* pCount);
+ MDDecl2(void, GetNamespace, mdToken tk, LPCSTR* pszName);
+ MDDecl2(void, GetParentToken, mdToken tk, mdToken* ptk);
+ MDDecl3(void, GetParamDefProps, mdToken tk, INT32* pSequence, INT32* pAttributes);
+ MDDecl4(void, GetPinvokeMap, mdToken tk, DWORD* pMappingFlags, LPCSTR* pszImportName, LPCSTR* pszImportDll);
+
+ MDDecl3(void, GetClassLayout, mdTypeDef td, DWORD* pdwPackSize, ULONG* pulClassSize);
+ MDDecl3(FC_BOOL_RET, GetFieldOffset, mdTypeDef td, mdFieldDef target, DWORD* pdwFieldOffset);
+
+ MDDecl3(void, GetEventProps, mdToken tk, LPCSTR* pszName, INT32 *pdwEventFlags);
+ MDDecl2(void, GetGenericParamProps, mdToken tk, DWORD* pAttributes);
+ MDDecl2(void, GetFieldDefProps, mdToken tk, INT32 *pdwFieldFlags);
+ MDDecl4(void, GetPropertyProps, mdToken tk, LPCSTR* pszName, INT32 *pdwPropertyFlags, ConstArray* ppvSigBlob);
+
+ MDDecl2(void, GetSignatureFromToken, mdToken tk, ConstArray* pSig);
+ MDDecl2(void, GetSigOfFieldDef, mdToken tk, ConstArray* pMarshalInfo);
+ MDDecl2(void, GetSigOfMethodDef, mdToken tk, ConstArray* pMarshalInfo);
+ MDDecl2(void, GetFieldMarshal, mdToken tk, ConstArray* pMarshalInfo);
+ MDDecl2(mdParamDef, GetParamForMethodIndex, mdMethodDef md, ULONG ulParamSeq);
+ MDDecl1(FC_BOOL_RET, IsValidToken, mdToken tk);
+ MDDecl1(mdTypeDef, GetNestedClassProps, mdTypeDef tdNestedClass);
+ MDDecl1(ULONG, GetNativeCallConvFromSig, ConstArray sig);
+
+ static FCDECL11(void, GetMarshalAs,
+ BYTE* pvNativeType,
+ ULONG cbNativeType,
+ INT32* unmanagedType,
+ INT32* safeArraySubType,
+ STRINGREF* safeArrayUserDefinedSubType,
+ INT32* arraySubType,
+ INT32* sizeParamIndex,
+ INT32* sizeConst,
+ STRINGREF* marshalType,
+ STRINGREF* marshalCookie,
+ INT32* iidParamIndex);
+};
+
+#endif
diff --git a/src/vm/marshalnative.cpp b/src/vm/marshalnative.cpp
new file mode 100644
index 0000000000..23793d544d
--- /dev/null
+++ b/src/vm/marshalnative.cpp
@@ -0,0 +1,2708 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: MarshalNative.cpp
+//
+
+//
+// FCall's for the PInvoke classlibs
+//
+
+
+#include "common.h"
+#include "clsload.hpp"
+#include "method.hpp"
+#include "class.h"
+#include "object.h"
+#include "field.h"
+#include "util.hpp"
+#include "excep.h"
+#include "siginfo.hpp"
+#include "threads.h"
+#include "stublink.h"
+#include "dllimport.h"
+#include "jitinterface.h"
+#include "eeconfig.h"
+#include "log.h"
+#include "fieldmarshaler.h"
+#include "cgensys.h"
+#include "gc.h"
+#include "security.h"
+#include "dbginterface.h"
+#include "objecthandle.h"
+#include "marshalnative.h"
+#include "fcall.h"
+#include "dllimportcallback.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "comdelegate.h"
+#include "handletablepriv.h"
+#include "mdaassistants.h"
+#include "typestring.h"
+#include "appdomain.inl"
+
+#ifdef FEATURE_COMINTEROP
+#include "comcallablewrapper.h"
+#include "cominterfacemarshaler.h"
+#include "commtmemberinfomap.h"
+#include "runtimecallablewrapper.h"
+#include "olevariant.h"
+#include "interoputil.h"
+#include "stubhelpers.h"
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+#include "olecontexthelpers.h"
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+//
+// NumParamBytes
+// Counts # of parameter bytes
+INT32 QCALLTYPE MarshalNative::NumParamBytes(MethodDesc * pMD)
+{
+ QCALL_CONTRACT;
+
+ // Arguments are check on managed side
+ PRECONDITION(pMD != NULL);
+
+ INT32 cbParamBytes = 0;
+
+ BEGIN_QCALL;
+
+ if (!(pMD->IsNDirect()))
+ COMPlusThrow(kArgumentException, IDS_EE_NOTNDIRECT);
+
+ // Read the unmanaged stack size from the stub MethodDesc. For vararg P/Invoke,
+ // this function returns size of the fixed portion of the stack.
+ // Note that the following code does not throw if the DllImport declaration is
+ // incorrect (such as a vararg method not marked as CallingConvention.Cdecl).
+
+ MethodDesc * pStubMD = NULL;
+
+ PCODE pTempStub = NULL;
+ pTempStub = GetStubForInteropMethod(pMD, NDIRECTSTUB_FL_FOR_NUMPARAMBYTES, &pStubMD);
+ _ASSERTE(pTempStub == NULL);
+
+ _ASSERTE(pStubMD != NULL && pStubMD->IsILStub());
+
+ cbParamBytes = pStubMD->AsDynamicMethodDesc()->GetNativeStackArgSize();
+
+#ifdef _X86_
+ if (((NDirectMethodDesc *)pMD)->IsThisCall())
+ {
+ // The size of 'this' is not included in native stack arg size.
+ cbParamBytes += sizeof(LPVOID);
+ }
+#endif // _X86_
+
+ END_QCALL;
+
+ return cbParamBytes;
+}
+
+
+// Prelink
+// Does advance loading of an N/Direct library
+VOID QCALLTYPE MarshalNative::Prelink(MethodDesc * pMD)
+{
+ QCALL_CONTRACT;
+
+ // Arguments are check on managed side
+ PRECONDITION(pMD != NULL);
+
+ // If the code is already ready, we are done. Else, we need to execute the prestub
+ // This is a perf thing since it's always safe to execute the prestub twice.
+ if (!pMD->IsPointingToPrestub())
+ return;
+
+ // Silently ignore if not N/Direct and not runtime generated.
+ if (!(pMD->IsNDirect()) && !(pMD->IsRuntimeSupplied()))
+ return;
+
+ BEGIN_QCALL;
+
+ pMD->CheckRestore();
+ pMD->DoPrestub(NULL);
+
+ END_QCALL;
+}
+
+
+FCIMPL3(VOID, MarshalNative::StructureToPtr, Object* pObjUNSAFE, LPVOID ptr, CLR_BOOL fDeleteOld)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(ptr, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF pObj = (OBJECTREF) pObjUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_1(pObj);
+
+ if (ptr == NULL)
+ COMPlusThrowArgumentNull(W("ptr"));
+ if (pObj == NULL)
+ COMPlusThrowArgumentNull(W("structure"));
+
+ // Code path will accept both regular layout objects and boxed value classes
+ // with layout.
+
+ MethodTable *pMT = pObj->GetMethodTable();
+
+ if (pMT->HasInstantiation())
+ COMPlusThrowArgumentException(W("structure"), W("Argument_NeedNonGenericObject"));
+
+ if (pMT->IsBlittable())
+ {
+ memcpyNoGCRefs(ptr, pObj->GetData(), pMT->GetNativeSize());
+ }
+ else if (pMT->HasLayout())
+ {
+ if (fDeleteOld)
+ LayoutDestroyNative(ptr, pMT);
+
+ FmtClassUpdateNative( &(pObj), (LPBYTE)(ptr), NULL );
+ }
+ else
+ {
+ COMPlusThrowArgumentException(W("structure"), W("Argument_MustHaveLayoutOrBeBlittable"));
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL3(VOID, MarshalNative::PtrToStructureHelper, LPVOID ptr, Object* pObjIn, CLR_BOOL allowValueClasses)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(ptr, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF pObj = ObjectToOBJECTREF(pObjIn);
+
+ if (ptr == NULL)
+ FCThrowArgumentNullVoid(W("ptr"));
+ if (pObj == NULL)
+ FCThrowArgumentNullVoid(W("structure"));
+
+ // Code path will accept regular layout objects.
+ MethodTable *pMT = pObj->GetMethodTable();
+
+ // Validate that the object passed in is not a value class.
+ if (!allowValueClasses && pMT->IsValueType())
+ {
+ FCThrowArgumentVoid(W("structure"), W("Argument_StructMustNotBeValueClass"));
+ }
+ else if (pMT->IsBlittable())
+ {
+ memcpyNoGCRefs(pObj->GetData(), ptr, pMT->GetNativeSize());
+ }
+ else if (pMT->HasLayout())
+ {
+ HELPER_METHOD_FRAME_BEGIN_1(pObj);
+ LayoutUpdateCLR((LPVOID*) &(pObj), Object::GetOffsetOfFirstField(), pMT, (LPBYTE)(ptr));
+ HELPER_METHOD_FRAME_END();
+ }
+ else
+ {
+ FCThrowArgumentVoid(W("structure"), W("Argument_MustHaveLayoutOrBeBlittable"));
+ }
+}
+FCIMPLEND
+
+
+FCIMPL2(VOID, MarshalNative::DestroyStructure, LPVOID ptr, ReflectClassBaseObject* refClassUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(ptr, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refClass = (REFLECTCLASSBASEREF) refClassUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_1(refClass);
+
+ if (ptr == NULL)
+ COMPlusThrowArgumentNull(W("ptr"));
+ if (refClass == NULL)
+ COMPlusThrowArgumentNull(W("structureType"));
+ if (refClass->GetMethodTable() != g_pRuntimeTypeClass)
+ COMPlusThrowArgumentException(W("structureType"), W("Argument_MustBeRuntimeType"));
+
+ TypeHandle th = refClass->GetType();
+
+ if (th.HasInstantiation())
+ COMPlusThrowArgumentException(W("structureType"), W("Argument_NeedNonGenericType"));
+
+ if (th.IsBlittable())
+ {
+ // ok to call with blittable structure, but no work to do in this case.
+ }
+ else if (th.HasLayout())
+ {
+ LayoutDestroyNative(ptr, th.GetMethodTable());
+ }
+ else
+ {
+ COMPlusThrowArgumentException(W("structureType"), W("Argument_MustHaveLayoutOrBeBlittable"));
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+
+/************************************************************************
+ * PInvoke.SizeOf(Class)
+ */
+FCIMPL2(UINT32, MarshalNative::SizeOfClass, ReflectClassBaseObject* refClassUNSAFE, CLR_BOOL throwIfNotMarshalable)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(refClassUNSAFE));
+ }
+ CONTRACTL_END;
+
+ UINT32 rv = 0;
+ REFLECTCLASSBASEREF refClass = (REFLECTCLASSBASEREF)refClassUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refClass);
+
+ // refClass is validated to be non-NULL RuntimeType by callers
+ TypeHandle th = refClass->GetType();
+
+ if (throwIfNotMarshalable)
+ {
+ // Determine if the type is marshalable
+ if (!IsStructMarshalable(th))
+ {
+ // It isn't marshalable so throw an ArgumentException.
+ StackSString strTypeName;
+ TypeString::AppendType(strTypeName, th);
+ COMPlusThrow(kArgumentException, IDS_CANNOT_MARSHAL, strTypeName.GetUnicode(), NULL, NULL);
+ }
+ }
+
+ // The type is marshalable or we don't care so return its size.
+ rv = th.GetMethodTable()->GetNativeSize();
+ HELPER_METHOD_FRAME_END();
+ return rv;
+}
+FCIMPLEND
+
+
+/************************************************************************
+ * PInvoke.UnsafeAddrOfPinnedArrayElement(Array arr, int index)
+ */
+
+FCIMPL2(LPVOID, MarshalNative::FCUnsafeAddrOfPinnedArrayElement, ArrayBase *arr, INT32 index)
+{
+ FCALL_CONTRACT;
+
+ if (!arr)
+ FCThrowArgumentNull(W("arr"));
+
+ return (arr->GetDataPtr() + (index*arr->GetComponentSize()));
+}
+FCIMPLEND
+
+
+/************************************************************************
+ * PInvoke.OffsetOfHelper(Class, Field)
+ */
+FCIMPL1(UINT32, MarshalNative::OffsetOfHelper, ReflectFieldObject *pFieldUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pFieldUNSAFE));
+ }
+ CONTRACTL_END;
+
+ REFLECTFIELDREF refField = (REFLECTFIELDREF)ObjectToOBJECTREF(pFieldUNSAFE);
+
+ FieldDesc *pField = refField->GetField();
+ TypeHandle th = TypeHandle(pField->GetApproxEnclosingMethodTable());
+
+ // Determine if the type is marshalable.
+ if (!IsStructMarshalable(th))
+ {
+ // It isn't marshalable so throw an ArgumentException.
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refField);
+
+ StackSString strTypeName;
+ TypeString::AppendType(strTypeName, th);
+ COMPlusThrow(kArgumentException, IDS_CANNOT_MARSHAL, strTypeName.GetUnicode(), NULL, NULL);
+
+ HELPER_METHOD_FRAME_END();
+ }
+
+ FieldMarshaler *pFM = th.GetMethodTable()->GetLayoutInfo()->GetFieldMarshalers();
+ UINT numReferenceFields = th.GetMethodTable()->GetLayoutInfo()->GetNumCTMFields();
+
+ while (numReferenceFields--)
+ {
+ if (pFM->GetFieldDesc() == pField)
+ {
+ return pFM->GetExternalOffset();
+ }
+ ((BYTE*&)pFM) += MAXFIELDMARSHALERSIZE;
+ }
+
+ UNREACHABLE_MSG("We should never hit this point since we already verified that the requested field was present from managed code");
+}
+FCIMPLEND
+
+FCIMPL2(Object*, MarshalNative::GetDelegateForFunctionPointerInternal, LPVOID FPtr, ReflectClassBaseObject* refTypeUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(refTypeUNSAFE != NULL);
+ }
+ CONTRACTL_END;
+
+ OBJECTREF refDelegate = NULL;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF) refTypeUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_2(refType, refDelegate);
+
+ // Retrieve the method table from the RuntimeType. We already verified in managed
+ // code that the type was a RuntimeType that represented a delegate. Because type handles
+ // for delegates must have a method table, we are safe in telling prefix to assume it below.
+ MethodTable* pMT = refType->GetType().GetMethodTable();
+ PREFIX_ASSUME(pMT != NULL);
+ refDelegate = COMDelegate::ConvertToDelegate(FPtr, pMT);
+
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(refDelegate);
+}
+FCIMPLEND
+
+FCIMPL1(LPVOID, MarshalNative::GetFunctionPointerForDelegateInternal, Object* refDelegateUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ LPVOID pFPtr = NULL;
+
+ OBJECTREF refDelegate = (OBJECTREF) refDelegateUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refDelegate);
+
+ pFPtr = COMDelegate::ConvertToCallback(refDelegate);
+
+ HELPER_METHOD_FRAME_END();
+
+ return pFPtr;
+}
+FCIMPLEND
+
+//====================================================================
+// map a fiber cookie from the hosting APIs into a managed Thread object
+//====================================================================
+FCIMPL1(THREADBASEREF, MarshalNative::GetThreadFromFiberCookie, int cookie)
+{
+ FCALL_CONTRACT;
+
+ _ASSERTE(cookie);
+
+ THREADBASEREF ret = 0;
+
+ // Set up a frame
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ // Any host who is sophisticated enough to correctly schedule fibers
+ // had better be sophisticated enough to give us a real fiber cookie.
+ Thread *pThread = *((Thread **) &cookie);
+
+ // Minimal check that it smells like a thread:
+ _ASSERTE(pThread->m_fPreemptiveGCDisabled.Load() == 0 || pThread->m_fPreemptiveGCDisabled.Load() == 1);
+
+ ret = (THREADBASEREF)(pThread->GetExposedObject());
+ HELPER_METHOD_FRAME_END();
+
+ return ret;
+}
+FCIMPLEND
+
+FCIMPL3(LPVOID, MarshalNative::GetUnmanagedThunkForManagedMethodPtr, LPVOID pfnMethodToWrap, PCCOR_SIGNATURE pbSignature, ULONG cbSignature)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ INJECT_FAULT(FCThrow(kOutOfMemoryException););
+ PRECONDITION(CheckPointer(pfnMethodToWrap, NULL_OK));
+ PRECONDITION(CheckPointer(pbSignature, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ LPVOID pThunk = NULL;
+#ifdef FEATURE_MIXEDMODE
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ if (pfnMethodToWrap == NULL)
+ COMPlusThrowArgumentNull(W("pfnMethodToWrap"));
+ if (pbSignature == NULL)
+ COMPlusThrowArgumentNull(W("pbSignature"));
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable:4996) // Suppress warning on call to deprecated method
+#endif
+ Module *pModule = SystemDomain::GetCallersModule(1);
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+ PREFIX_ASSUME(pModule != NULL);
+ pThunk = pModule->GetUMThunk(pfnMethodToWrap, pbSignature, cbSignature);
+ if (!pThunk)
+ COMPlusThrowOM();
+
+ HELPER_METHOD_FRAME_END();
+#endif // FEATURE_MIXEDMODE
+ return pThunk;
+}
+FCIMPLEND
+
+
+/************************************************************************
+ * PInvoke.GetManagedThunkForUnmanagedMethodPtr()
+ */
+FCIMPL3(LPVOID, MarshalNative::GetManagedThunkForUnmanagedMethodPtr, LPVOID pfnMethodToWrap, PCCOR_SIGNATURE pbSignature, ULONG cbSignature)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pfnMethodToWrap, NULL_OK));
+ PRECONDITION(CheckPointer(pbSignature, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ LPVOID pThunk = NULL;
+#ifdef FEATURE_MIXEDMODE
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ if (pfnMethodToWrap == NULL)
+ COMPlusThrowArgumentNull(W("pfnMethodToWrap"));
+ if (pbSignature == NULL)
+ COMPlusThrowArgumentNull(W("pbSignature"));
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable:4996) // Suppress warning on call to deprecated method
+#endif
+ Module *pModule = SystemDomain::GetCallersModule(1);
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+ if (!pModule)
+ ThrowOutOfMemory();
+
+ pThunk = pModule->GetMUThunk(pfnMethodToWrap, pbSignature, cbSignature);
+ if (!pThunk)
+ ThrowOutOfMemory();
+
+ HELPER_METHOD_FRAME_END();
+#endif // FEATURE_MIXEDMODE
+ return pThunk;
+}
+FCIMPLEND
+
+
+FCIMPL0(UINT32, MarshalNative::GetSystemMaxDBCSCharSize)
+{
+ FCALL_CONTRACT;
+
+ return GetMaxDBCSCharByteSize();
+}
+FCIMPLEND
+
+
+/************************************************************************
+ * Handles all PInvoke.Copy(array source, ....) methods.
+ */
+FCIMPL4(void, MarshalNative::CopyToNative, Object* psrcUNSAFE, INT32 startindex, LPVOID pdst, INT32 length)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pdst, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // The BCL code guarantees that Array will be passed in
+ _ASSERTE(!psrcUNSAFE || psrcUNSAFE->GetMethodTable()->IsArray());
+
+ BASEARRAYREF psrc = (BASEARRAYREF)(OBJECTREF)psrcUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_1(psrc);
+
+ if (pdst == NULL)
+ COMPlusThrowArgumentNull(W("destination"));
+ if (psrc == NULL)
+ COMPlusThrowArgumentNull(W("source"));
+
+ SIZE_T numelem = psrc->GetNumComponents();
+
+ if (startindex < 0 || length < 0 || (SIZE_T)startindex + (SIZE_T)length > numelem)
+ {
+ COMPlusThrow(kArgumentOutOfRangeException, IDS_EE_COPY_OUTOFRANGE);
+ }
+
+ SIZE_T componentsize = psrc->GetComponentSize();
+
+ memcpyNoGCRefs(pdst,
+ componentsize*startindex + (BYTE*)(psrc->GetDataPtr()),
+ componentsize*length);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL4(void, MarshalNative::CopyToManaged, LPVOID psrc, Object* pdstUNSAFE, INT32 startindex, INT32 length)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(psrc, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // The BCL code guarantees that Array will be passed in
+ _ASSERTE(!pdstUNSAFE || pdstUNSAFE->GetMethodTable()->IsArray());
+
+ BASEARRAYREF pdst = (BASEARRAYREF)(OBJECTREF)pdstUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_1(pdst);
+
+ if (pdst == NULL)
+ COMPlusThrowArgumentNull(W("destination"));
+ if (psrc == NULL)
+ COMPlusThrowArgumentNull(W("source"));
+ if (startindex < 0)
+ COMPlusThrowArgumentOutOfRange(W("startIndex"), W("ArgumentOutOfRange_Count"));
+ if (length < 0)
+ COMPlusThrowArgumentOutOfRange(W("length"), W("ArgumentOutOfRange_NeedNonNegNum"));
+
+ SIZE_T numelem = pdst->GetNumComponents();
+
+ if ((SIZE_T)startindex + (SIZE_T)length > numelem)
+ {
+ COMPlusThrow(kArgumentOutOfRangeException, IDS_EE_COPY_OUTOFRANGE);
+ }
+
+ SIZE_T componentsize = pdst->GetComponentSize();
+
+ _ASSERTE(CorTypeInfo::IsPrimitiveType(pdst->GetArrayElementTypeHandle().GetInternalCorElementType()));
+ memcpyNoGCRefs(componentsize*startindex + (BYTE*)(pdst->GetDataPtr()),
+ psrc,
+ componentsize*length);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+
+/************************************************************************
+ * PInvoke.GetLastWin32Error
+ */
+FCIMPL0(int, MarshalNative::GetLastWin32Error)
+{
+ FCALL_CONTRACT;
+
+ return (UINT32)(GetThread()->m_dwLastError);
+}
+FCIMPLEND
+
+
+/************************************************************************
+ * PInvoke.SetLastWin32Error
+ */
+FCIMPL1(void, MarshalNative::SetLastWin32Error, int error)
+{
+ FCALL_CONTRACT;
+
+ GetThread()->m_dwLastError = (DWORD)error;
+}
+FCIMPLEND
+
+
+/************************************************************************
+ * Support for the GCHandle class.
+ */
+
+ // Check that the supplied object is valid to put in a pinned handle.
+// Throw an exception if not.
+void GCHandleValidatePinnedObject(OBJECTREF obj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // NULL is fine.
+ if (obj == NULL)
+ return;
+
+ if (obj->GetMethodTable() == g_pStringClass)
+ return;
+
+ if (obj->GetMethodTable()->IsArray())
+ {
+ BASEARRAYREF asArray = (BASEARRAYREF) obj;
+ if (CorTypeInfo::IsPrimitiveType(asArray->GetArrayElementType()))
+ return;
+
+ TypeHandle th = asArray->GetArrayElementTypeHandle();
+ if (!th.IsTypeDesc())
+ {
+ MethodTable *pMT = th.AsMethodTable();
+ if (pMT->IsValueType() && pMT->IsBlittable())
+ return;
+ }
+ }
+ else if (obj->GetMethodTable()->IsBlittable())
+ {
+ return;
+ }
+
+ COMPlusThrow(kArgumentException, IDS_EE_NOTISOMORPHIC);
+}
+
+FCIMPL2(LPVOID, MarshalNative::GCHandleInternalAlloc, Object *obj, int type)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF objRef(obj);
+ OBJECTHANDLE hnd = 0;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL();
+
+ // If it is a pinned handle, check the object type.
+ if (type == HNDTYPE_PINNED)
+ GCHandleValidatePinnedObject(objRef);
+
+ // Create the handle.
+ hnd = GetAppDomain()->CreateTypedHandle(objRef, type);
+
+ HELPER_METHOD_FRAME_END_POLL();
+ return (LPVOID) hnd;
+}
+FCIMPLEND
+
+// Free a GC handle.
+FCIMPL1(VOID, MarshalNative::GCHandleInternalFree, OBJECTHANDLE handle)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+#ifdef MDA_SUPPORTED
+ UINT handleType = HandleFetchType(handle);
+#endif
+
+ DestroyTypedHandle(handle);
+
+#ifdef MDA_SUPPORTED
+ if (handleType == HNDTYPE_PINNED)
+ {
+ MDA_TRIGGER_ASSISTANT(GcManagedToUnmanaged, TriggerGC());
+ }
+#endif
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+// Get the object referenced by a GC handle.
+FCIMPL1(LPVOID, MarshalNative::GCHandleInternalGet, OBJECTHANDLE handle)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF objRef;
+
+ objRef = ObjectFromHandle(handle);
+
+ return *((LPVOID*)&objRef);
+}
+FCIMPLEND
+
+// Update the object referenced by a GC handle.
+FCIMPL3(VOID, MarshalNative::GCHandleInternalSet, OBJECTHANDLE handle, Object *obj, CLR_BOOL isPinned)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF objRef(obj);
+ HELPER_METHOD_FRAME_BEGIN_1(objRef);
+
+ //<TODO>@todo: If the handle is pinned check the object type.</TODO>
+ if (isPinned)
+ {
+ GCHandleValidatePinnedObject(objRef);
+ }
+
+ // Update the stored object reference.
+ StoreObjectInHandle(handle, objRef);
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+// Update the object referenced by a GC handle.
+FCIMPL4(Object*, MarshalNative::GCHandleInternalCompareExchange, OBJECTHANDLE handle, Object *obj, Object* oldObj, CLR_BOOL isPinned)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF newObjref(obj);
+ OBJECTREF oldObjref(oldObj);
+ LPVOID ret = NULL;
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL();
+
+ //<TODO>@todo: If the handle is pinned check the object type.</TODO>
+ if (isPinned)
+ GCHandleValidatePinnedObject(newObjref);
+
+ // Update the stored object reference.
+ ret = InterlockedCompareExchangeObjectInHandle(handle, newObjref, oldObjref);
+ HELPER_METHOD_FRAME_END_POLL();
+ return (Object*)ret;
+}
+FCIMPLEND
+
+// Get the address of a pinned object referenced by the supplied pinned
+// handle. This routine assumes the handle is pinned and does not check.
+FCIMPL1(LPVOID, MarshalNative::GCHandleInternalAddrOfPinnedObject, OBJECTHANDLE handle)
+{
+ FCALL_CONTRACT;
+
+ LPVOID p;
+ OBJECTREF objRef = ObjectFromHandle(handle);
+
+ if (objRef == NULL)
+ {
+ p = NULL;
+ }
+ else
+ {
+ // Get the interior pointer for the supported pinned types.
+ if (objRef->GetMethodTable() == g_pStringClass)
+ p = ((*(StringObject **)&objRef))->GetBuffer();
+ else if (objRef->GetMethodTable()->IsArray())
+ p = (*((ArrayBase**)&objRef))->GetDataPtr();
+ else
+ p = objRef->GetData();
+ }
+
+ return p;
+}
+FCIMPLEND
+
+// Make sure the handle is accessible from the current domain. (Throw if not.)
+FCIMPL1(VOID, MarshalNative::GCHandleInternalCheckDomain, OBJECTHANDLE handle)
+{
+ FCALL_CONTRACT;
+
+ if (handle == NULL)
+ FCThrowArgumentVoid(W("handle"), W("Argument_ArgumentZero"));
+
+ ADIndex index = HndGetHandleTableADIndex(HndGetHandleTable(handle));
+
+ if (index.m_dwIndex != 1 && index != GetAppDomain()->GetIndex())
+ FCThrowArgumentVoid(W("handle"), W("Argument_HandleLeak"));
+}
+FCIMPLEND
+
+// Make sure the handle is accessible from the current domain. (Throw if not.)
+FCIMPL1(INT32, MarshalNative::GCHandleInternalGetHandleType, OBJECTHANDLE handle)
+{
+ FCALL_CONTRACT;
+
+ return HandleFetchType(handle);
+}
+FCIMPLEND
+
+FCIMPL1(INT32, MarshalNative::CalculateCount, ArrayWithOffsetData* pArrayWithOffset)
+{
+ FCALL_CONTRACT;
+
+ INT32 uRetVal = 0;
+ BASEARRAYREF arrayObj = pArrayWithOffset->m_Array;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(arrayObj);
+
+ SIZE_T cbTotalSize = 0;
+
+ if (arrayObj != NULL)
+ {
+ if (!(arrayObj->GetMethodTable()->IsArray()))
+ COMPlusThrow(kArgumentException, IDS_EE_NOTISOMORPHIC);
+ if (arrayObj->GetMethodTable()->IsMultiDimArray())
+ COMPlusThrow(kArgumentException, IDS_EE_NOTISOMORPHIC);
+
+ GCHandleValidatePinnedObject(arrayObj);
+ }
+
+ if (arrayObj == NULL)
+ {
+ if (pArrayWithOffset->m_cbOffset != 0)
+ COMPlusThrow(kIndexOutOfRangeException, IDS_EE_ARRAYWITHOFFSETOVERFLOW);
+
+ goto lExit;
+ }
+
+ cbTotalSize = arrayObj->GetNumComponents() * arrayObj->GetComponentSize();
+
+ if (cbTotalSize > MAX_SIZE_FOR_INTEROP)
+ COMPlusThrow(kArgumentException, IDS_EE_STRUCTARRAYTOOLARGE);
+
+ if (pArrayWithOffset->m_cbOffset > (INT32)cbTotalSize)
+ COMPlusThrow(kIndexOutOfRangeException, IDS_EE_ARRAYWITHOFFSETOVERFLOW);
+
+ uRetVal = (INT32)cbTotalSize - pArrayWithOffset->m_cbOffset;
+ _ASSERTE(uRetVal >= 0);
+
+lExit: ;
+ HELPER_METHOD_FRAME_END();
+ return uRetVal;
+}
+FCIMPLEND
+
+
+//====================================================================
+// *** Interop Helpers ***
+//====================================================================
+
+FCIMPL2(Object *, MarshalNative::GetExceptionForHR, INT32 errorCode, LPVOID errorInfo)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(FAILED(errorCode));
+ PRECONDITION(CheckPointer(errorInfo, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF RetExceptionObj = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(RetExceptionObj);
+
+ // Retrieve the IErrorInfo to use.
+ IErrorInfo *pErrorInfo = (IErrorInfo*)errorInfo;
+ if (pErrorInfo == (IErrorInfo*)(-1))
+ {
+ pErrorInfo = NULL;
+ }
+ else if (!pErrorInfo)
+ {
+ if (SafeGetErrorInfo(&pErrorInfo) != S_OK)
+ pErrorInfo = NULL;
+ }
+
+ ::GetExceptionForHR(errorCode, pErrorInfo, &RetExceptionObj);
+
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(RetExceptionObj);
+}
+FCIMPLEND
+
+FCIMPL2(void, MarshalNative::ThrowExceptionForHR, INT32 errorCode, LPVOID errorInfo)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(FAILED(errorCode));
+ PRECONDITION(CheckPointer(errorInfo, NULL_OK));
+ }
+ CONTRACTL_END;
+
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ // Retrieve the IErrorInfo to use.
+ IErrorInfo *pErrorInfo = (IErrorInfo*)errorInfo;
+ if (pErrorInfo == (IErrorInfo*)(-1))
+ {
+ pErrorInfo = NULL;
+ }
+ else if (!pErrorInfo)
+ {
+ if (SafeGetErrorInfo(&pErrorInfo) != S_OK)
+ pErrorInfo = NULL;
+ }
+
+ // Throw the exception based on the HR and the IErrorInfo.
+ COMPlusThrowHR(errorCode, pErrorInfo);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+
+FCIMPL1(int, MarshalNative::GetHRForException, Object* eUNSAFE)
+{
+ CONTRACTL {
+ NOTHROW; // Used by reverse COM IL stubs, so we must not throw exceptions back to COM
+ DISABLED(GC_TRIGGERS); // FCALLS with HELPER frames have issues with GC_TRIGGERS
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ int retVal = 0;
+ OBJECTREF e = (OBJECTREF) eUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_NOTHROW_1({ retVal = COR_E_STACKOVERFLOW; }, e);
+
+ retVal = SetupErrorInfo(e);
+
+ HELPER_METHOD_FRAME_END_NOTHROW();
+ return retVal;
+}
+FCIMPLEND
+
+FCIMPL1(int, MarshalNative::GetHRForException_WinRT, Object* eUNSAFE)
+{
+ CONTRACTL {
+ NOTHROW; // Used by reverse COM IL stubs, so we must not throw exceptions back to COM
+ DISABLED(GC_TRIGGERS); // FCALLS with HELPER frames have issues with GC_TRIGGERS
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ int retVal = 0;
+ OBJECTREF e = (OBJECTREF) eUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_NOTHROW_1({ retVal = COR_E_STACKOVERFLOW; }, e);
+
+ retVal = SetupErrorInfo(e, /* isWinRTScenario = */ TRUE);
+
+ HELPER_METHOD_FRAME_END_NOTHROW();
+ return retVal;
+}
+FCIMPLEND
+
+
+#ifdef FEATURE_COMINTEROP
+
+//====================================================================
+// map GUID to Type
+//====================================================================
+
+/*OBJECTREF */
+FCIMPL1(Object*, MarshalNative::GetLoadedTypeForGUID, GUID* pGuid)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pGuid, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF refRetVal = NULL;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refRetVal);
+
+ if (!pGuid)
+ COMPlusThrowArgumentNull(W("pGuid"));
+
+ AppDomain* pDomain = SystemDomain::GetCurrentDomain();
+ _ASSERTE(pDomain);
+
+ MethodTable* pMT = pDomain->LookupClass(*(pGuid));
+ if (pMT)
+ refRetVal = pMT->GetManagedClassObject();
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(refRetVal);
+}
+FCIMPLEND
+
+//====================================================================
+// map Type to ITypeInfo*
+//====================================================================
+FCIMPL1(ITypeInfo*, MarshalNative::GetITypeInfoForType, ReflectClassBaseObject* refClassUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ ITypeInfo* pTI = NULL;
+ REFLECTCLASSBASEREF refClass = (REFLECTCLASSBASEREF) refClassUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refClass);
+
+ // Check for null arguments.
+ if(!refClass)
+ COMPlusThrowArgumentNull(W("t"));
+
+ MethodTable *pRefMT = refClass->GetMethodTable();
+ if (pRefMT != g_pRuntimeTypeClass &&
+ pRefMT != MscorlibBinder::GetClass(CLASS__CLASS_INTROSPECTION_ONLY))
+ COMPlusThrowArgumentException(W("t"), W("Argument_MustBeRuntimeType"));
+
+ TypeHandle th = refClass->GetType();
+
+ if (th.GetMethodTable() != NULL && (th.IsProjectedFromWinRT() || th.IsExportedToWinRT()))
+ COMPlusThrowArgumentException(W("t"), W("Argument_ObjIsWinRTObject"));
+
+ if (th.HasInstantiation())
+ COMPlusThrowArgumentException(W("t"), W("Argument_NeedNonGenericType"));
+
+ // Make sure the type is visible from COM.
+ if (!::IsTypeVisibleFromCom(th))
+ COMPlusThrowArgumentException(W("t"), W("Argument_TypeMustBeVisibleFromCom"));
+
+ // Retrieve the EE class from the reflection type.
+ MethodTable* pMT = th.GetMethodTable();
+ _ASSERTE(pMT);
+
+ // Retrieve the ITypeInfo for the class.
+ IfFailThrow(GetITypeInfoForEEClass(pMT, &pTI, true));
+ _ASSERTE(pTI != NULL);
+
+ HELPER_METHOD_FRAME_END();
+ return pTI;
+}
+FCIMPLEND
+
+//====================================================================
+// return the IUnknown* for an Object.
+//====================================================================
+FCIMPL2(IUnknown*, MarshalNative::GetIUnknownForObjectNative, Object* orefUNSAFE, CLR_BOOL fOnlyInContext)
+{
+ FCALL_CONTRACT;
+
+ IUnknown* retVal = NULL;
+ OBJECTREF oref = (OBJECTREF) orefUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(oref);
+
+ HRESULT hr = S_OK;
+
+ if(!oref)
+ COMPlusThrowArgumentNull(W("o"));
+
+ // Ensure COM is started up.
+ EnsureComStarted();
+
+ if (fOnlyInContext && !IsObjectInContext(&oref))
+ retVal = NULL;
+ else
+ retVal = GetComIPFromObjectRef(&oref, ComIpType_OuterUnknown, NULL);
+
+ HELPER_METHOD_FRAME_END();
+ return retVal;
+}
+FCIMPLEND
+
+//====================================================================
+// return the raw IUnknown* for a COM Object not related to current
+// context.
+// Does not AddRef the returned pointer
+//====================================================================
+FCIMPL1(IUnknown*, MarshalNative::GetRawIUnknownForComObjectNoAddRef, Object* orefUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ IUnknown* retVal = NULL;
+ OBJECTREF oref = (OBJECTREF) orefUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(oref);
+
+ HRESULT hr = S_OK;
+
+ if(!oref)
+ COMPlusThrowArgumentNull(W("o"));
+
+ MethodTable* pMT = oref->GetTrueMethodTable();
+ PREFIX_ASSUME(pMT != NULL);
+ if(!pMT->IsComObjectType())
+ COMPlusThrow(kArgumentException, IDS_EE_SRC_OBJ_NOT_COMOBJECT);
+
+ // Ensure COM is started up.
+ EnsureComStarted();
+
+ RCWHolder pRCW(GetThread());
+ pRCW.Init(oref);
+
+ // Retrieve raw IUnknown * without AddRef for better performance
+ retVal = pRCW->GetRawIUnknown_NoAddRef();
+
+ HELPER_METHOD_FRAME_END();
+ return retVal;
+}
+FCIMPLEND
+
+//====================================================================
+// return the IDispatch* for an Object.
+//====================================================================
+FCIMPL2(IDispatch*, MarshalNative::GetIDispatchForObjectNative, Object* orefUNSAFE, CLR_BOOL fOnlyInContext)
+{
+ FCALL_CONTRACT;
+
+ IDispatch* retVal = NULL;
+ OBJECTREF oref = (OBJECTREF) orefUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(oref);
+
+ HRESULT hr = S_OK;
+
+ if(!oref)
+ COMPlusThrowArgumentNull(W("o"));
+
+ // Ensure COM is started up.
+ EnsureComStarted();
+
+ if (fOnlyInContext && !IsObjectInContext(&oref))
+ retVal = NULL;
+ else
+ retVal = (IDispatch*)GetComIPFromObjectRef(&oref, ComIpType_Dispatch, NULL);
+
+ HELPER_METHOD_FRAME_END();
+ return retVal;
+}
+FCIMPLEND
+
+//====================================================================
+// return the IUnknown* representing the interface for the Object
+// Object o should support Type T
+//====================================================================
+FCIMPL4(IUnknown*, MarshalNative::GetComInterfaceForObjectNative, Object* orefUNSAFE, ReflectClassBaseObject* refClassUNSAFE, CLR_BOOL fOnlyInContext, CLR_BOOL bEnableCustomizedQueryInterface)
+{
+ FCALL_CONTRACT;
+
+ IUnknown* retVal = NULL;
+ OBJECTREF oref = (OBJECTREF) orefUNSAFE;
+ REFLECTCLASSBASEREF refClass = (REFLECTCLASSBASEREF) refClassUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_2(oref, refClass);
+
+ HRESULT hr = S_OK;
+
+ if(!oref)
+ COMPlusThrowArgumentNull(W("o"));
+ if(!refClass)
+ COMPlusThrowArgumentNull(W("t"));
+
+ // Ensure COM is started up.
+ EnsureComStarted();
+
+ if (refClass->GetMethodTable() != g_pRuntimeTypeClass)
+ COMPlusThrowArgumentException(W("t"), W("Argument_MustBeRuntimeType"));
+
+ TypeHandle th = refClass->GetType();
+
+ if (!th.SupportsGenericInterop(TypeHandle::Interop_NativeToManaged))
+ {
+ if (th.HasInstantiation())
+ COMPlusThrowArgumentException(W("t"), W("Argument_NeedNonGenericType"));
+
+ if (oref->GetMethodTable()->HasInstantiation())
+ COMPlusThrowArgumentException(W("o"), W("Argument_NeedNonGenericObject"));
+ }
+
+ // If the IID being asked for does not represent an interface then
+ // throw an argument exception.
+ if (!th.IsInterface())
+ COMPlusThrowArgumentException(W("t"), W("Arg_MustBeInterface"));
+
+ // If the interface being asked for is not visible from COM then
+ // throw an argument exception.
+ if (!::IsTypeVisibleFromCom(th))
+ COMPlusThrowArgumentException(W("t"), W("Argument_TypeMustBeVisibleFromCom"));
+
+ if (fOnlyInContext && !IsObjectInContext(&oref))
+ retVal = NULL;
+ else
+ retVal = GetComIPFromObjectRef(&oref, th.GetMethodTable(), TRUE, bEnableCustomizedQueryInterface);
+
+ HELPER_METHOD_FRAME_END();
+ return retVal;
+}
+FCIMPLEND
+
+//====================================================================
+// return an Object for IUnknown
+//====================================================================
+FCIMPL1(Object*, MarshalNative::GetObjectForIUnknown, IUnknown* pUnk)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pUnk, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF oref = NULL;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(oref);
+
+ HRESULT hr = S_OK;
+
+ if(!pUnk)
+ COMPlusThrowArgumentNull(W("pUnk"));
+
+ // Ensure COM is started up.
+ EnsureComStarted();
+
+ GetObjectRefFromComIP(&oref, pUnk);
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(oref);
+}
+FCIMPLEND
+
+
+FCIMPL1(Object*, MarshalNative::GetUniqueObjectForIUnknown, IUnknown* pUnk)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pUnk, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF oref = NULL;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(oref);
+
+ HRESULT hr = S_OK;
+
+ if(!pUnk)
+ COMPlusThrowArgumentNull(W("pUnk"));
+
+ // Ensure COM is started up.
+ EnsureComStarted();
+
+ GetObjectRefFromComIP(&oref, pUnk, NULL, NULL, ObjFromComIP::UNIQUE_OBJECT);
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(oref);
+}
+FCIMPLEND
+
+//====================================================================
+// return an Object for IUnknown, using the Type T,
+// NOTE:
+// Type T should be either a COM imported Type or a sub-type of COM imported Type
+//====================================================================
+FCIMPL2(Object*, MarshalNative::GetTypedObjectForIUnknown, IUnknown* pUnk, ReflectClassBaseObject* refClassUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pUnk, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF oref = NULL;
+ REFLECTCLASSBASEREF refClass = (REFLECTCLASSBASEREF) refClassUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_2(refClass, oref);
+
+ HRESULT hr = S_OK;
+
+ MethodTable* pMTClass = NULL;
+
+ if(!pUnk)
+ COMPlusThrowArgumentNull(W("pUnk"));
+
+ if(refClass != NULL)
+ {
+ if (refClass->GetMethodTable() != g_pRuntimeTypeClass)
+ COMPlusThrowArgumentException(W("t"), W("Argument_MustBeRuntimeType"));
+
+ TypeHandle th = refClass->GetType();
+
+ if (th.GetMethodTable() != NULL && (th.IsProjectedFromWinRT() || th.IsExportedToWinRT()))
+ COMPlusThrowArgumentException(W("t"), W("Argument_ObjIsWinRTObject"));
+
+ if (th.HasInstantiation())
+ COMPlusThrowArgumentException(W("t"), W("Argument_NeedNonGenericType"));
+
+ pMTClass = th.GetMethodTable();
+ }
+ else
+ COMPlusThrowArgumentNull(W("t"));
+
+
+ // Ensure COM is started up.
+ EnsureComStarted();
+
+ GetObjectRefFromComIP(&oref, pUnk, pMTClass);
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(oref);
+}
+FCIMPLEND
+
+FCIMPL2(IUnknown*, MarshalNative::CreateAggregatedObject, IUnknown* pOuter, Object* refObjUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pOuter, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ IUnknown* pInner = NULL;
+
+ OBJECTREF oref = (OBJECTREF)refObjUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(oref);
+
+ HRESULT hr = S_OK;
+
+ if (!pOuter)
+ COMPlusThrowArgumentNull(W("pOuter"));
+
+ if (oref == NULL)
+ COMPlusThrowArgumentNull(W("o"));
+
+ MethodTable *pMT = oref->GetTrueMethodTable();
+ if (pMT->IsWinRTObjectType() || pMT->IsExportedToWinRT())
+ COMPlusThrowArgumentException(W("o"), W("Argument_ObjIsWinRTObject"));
+
+ // Ensure COM is started up.
+ EnsureComStarted();
+
+ if (NULL != ComCallWrapper::GetWrapperForObject(oref))
+ COMPlusThrowArgumentException(W("o"), W("Argument_AlreadyACCW"));
+
+ //get wrapper for the object, this could enable GC
+ CCWHolder pWrap = ComCallWrapper::InlineGetWrapper(&oref);
+
+ // Aggregation support,
+ pWrap->InitializeOuter(pOuter);
+ IfFailThrow(pWrap->GetInnerUnknown((LPVOID*)&pInner));
+
+ HELPER_METHOD_FRAME_END();
+ return pInner;
+}
+FCIMPLEND
+
+//====================================================================
+// Free unused RCWs in the current COM+ context.
+//====================================================================
+FCIMPL0(void, MarshalNative::CleanupUnusedObjectsInCurrentContext)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ if (g_pRCWCleanupList)
+ {
+ g_pRCWCleanupList->CleanupWrappersInCurrentCtxThread(
+ TRUE, // fWait
+ TRUE, // fManualCleanupRequested
+ TRUE // bIgnoreComObjectEagerCleanupSetting
+ );
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+//====================================================================
+// Checks whether there are RCWs from any context available for cleanup.
+//====================================================================
+FCIMPL0(FC_BOOL_RET, MarshalNative::AreComObjectsAvailableForCleanup)
+{
+ FCALL_CONTRACT;
+
+ BOOL retVal = FALSE;
+ if (g_pRCWCleanupList)
+ {
+ retVal = !g_pRCWCleanupList->IsEmpty();
+ }
+
+ FC_RETURN_BOOL(retVal);
+}
+FCIMPLEND
+
+//====================================================================
+// check if the object is classic COM component
+//====================================================================
+FCIMPL1(FC_BOOL_RET, MarshalNative::IsComObject, Object* objUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ BOOL retVal = FALSE;
+ OBJECTREF obj = (OBJECTREF) objUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(obj);
+
+ if(!obj)
+ COMPlusThrowArgumentNull(W("o"));
+
+ MethodTable* pMT = obj->GetTrueMethodTable();
+ PREFIX_ASSUME(pMT != NULL);
+ retVal = pMT->IsComObjectType();
+
+ HELPER_METHOD_FRAME_END();
+ FC_RETURN_BOOL(retVal);
+}
+FCIMPLEND
+
+
+//====================================================================
+// free the COM component and zombie this object if the ref count hits 0
+// further usage of this Object might throw an exception,
+//====================================================================
+FCIMPL1(INT32, MarshalNative::ReleaseComObject, Object* objUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ INT32 retVal = 0;
+ OBJECTREF obj = (OBJECTREF) objUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(obj);
+
+ if(!obj)
+ COMPlusThrowArgumentNull(W("o"));
+
+ MethodTable* pMT = obj->GetTrueMethodTable();
+ PREFIX_ASSUME(pMT != NULL);
+ if(!pMT->IsComObjectType())
+ COMPlusThrow(kArgumentException, IDS_EE_SRC_OBJ_NOT_COMOBJECT);
+
+ // remove the wrapper from the object
+ retVal = RCW::ExternalRelease(&obj);
+
+ HELPER_METHOD_FRAME_END();
+ return retVal;
+}
+FCIMPLEND
+
+//====================================================================
+// free the COM component and zombie this object
+// further usage of this Object might throw an exception,
+//====================================================================
+FCIMPL1(void, MarshalNative::FinalReleaseComObject, Object* objUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF obj = (OBJECTREF) objUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_1(obj);
+
+ if(!obj)
+ COMPlusThrowArgumentNull(W("o"));
+
+ MethodTable* pMT = obj->GetTrueMethodTable();
+ PREFIX_ASSUME(pMT != NULL);
+ if(!pMT->IsComObjectType())
+ COMPlusThrow(kArgumentException, IDS_EE_SRC_OBJ_NOT_COMOBJECT);
+
+ // remove the wrapper from the object
+ RCW::FinalExternalRelease(&obj);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+
+//====================================================================
+// This method takes the given COM object and wraps it in an object
+// of the specified type. The type must be derived from __ComObject.
+//====================================================================
+FCIMPL2(Object*, MarshalNative::InternalCreateWrapperOfType, Object* objUNSAFE, ReflectClassBaseObject* refClassUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(objUNSAFE != NULL);
+ PRECONDITION(refClassUNSAFE != NULL);
+ }
+ CONTRACTL_END;
+
+ struct _gc
+ {
+ OBJECTREF refRetVal;
+ OBJECTREF obj;
+ REFLECTCLASSBASEREF refClass;
+ } gc;
+
+ gc.refRetVal = NULL;
+ gc.obj = (OBJECTREF) objUNSAFE;
+ gc.refClass = (REFLECTCLASSBASEREF) refClassUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ // Validate the arguments.
+ if (gc.refClass->GetMethodTable() != g_pRuntimeTypeClass)
+ COMPlusThrowArgumentException(W("t"), W("Argument_MustBeRuntimeType"));
+
+ // Retrieve the class of the COM object.
+ MethodTable *pObjMT = gc.obj->GetMethodTable();
+
+ // Retrieve the method table for new wrapper type.
+ MethodTable *pNewWrapMT = gc.refClass->GetType().GetMethodTable();
+
+ // Validate that the destination type is a COM object.
+ _ASSERTE(pNewWrapMT->IsComObjectType());
+
+ BOOL fSet = FALSE;
+
+ // Start by checking if we can cast the obj to the wrapper type.
+#ifdef FEATURE_REMOTING
+ if (pObjMT->IsTransparentProxy())
+ {
+ if (CRemotingServices::CheckCast(gc.obj, pNewWrapMT))
+ {
+ gc.refRetVal = gc.obj;
+ fSet = TRUE;
+ }
+ }
+ else
+#endif
+ if (TypeHandle(pObjMT).CanCastTo(TypeHandle(pNewWrapMT)))
+ {
+ gc.refRetVal = gc.obj;
+ fSet = TRUE;
+ }
+
+ if (!fSet)
+ {
+ // Validate that the source object is a valid COM object.
+ _ASSERTE(pObjMT->IsComObjectType());
+
+ RCWHolder pRCW(GetThread());
+
+ RCWPROTECT_BEGIN(pRCW, gc.obj);
+
+ // Make sure the COM object supports all the COM imported interfaces that the new
+ // wrapper class implements.
+ MethodTable::InterfaceMapIterator it = pNewWrapMT->IterateInterfaceMap();
+ while (it.Next())
+ {
+ MethodTable *pItfMT = it.GetInterface();
+ if (pItfMT->IsComImport())
+ {
+ if (!Object::SupportsInterface(gc.obj, pItfMT))
+ COMPlusThrow(kInvalidCastException, IDS_EE_CANNOT_COERCE_COMOBJECT);
+ }
+ }
+
+ // Create the duplicate wrapper object.
+ {
+ RCWHolder pNewRCW(GetThread());
+ pRCW->CreateDuplicateWrapper(pNewWrapMT, &pNewRCW);
+
+ gc.refRetVal = pNewRCW->GetExposedObject();
+ }
+
+ RCWPROTECT_END(pRCW);
+ }
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(gc.refRetVal);
+}
+FCIMPLEND
+
+
+//====================================================================
+// check if the type is visible from COM.
+//====================================================================
+FCIMPL1(FC_BOOL_RET, MarshalNative::IsTypeVisibleFromCom, ReflectClassBaseObject* refClassUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ BOOL retVal = FALSE;
+ REFLECTCLASSBASEREF refClass = (REFLECTCLASSBASEREF) refClassUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refClass);
+
+ // Validate the arguments.
+ if (refClass == NULL)
+ COMPlusThrowArgumentNull(W("t"));
+
+ MethodTable *pRefMT = refClass->GetMethodTable();
+ if (pRefMT != g_pRuntimeTypeClass &&
+ pRefMT != MscorlibBinder::GetClass(CLASS__CLASS_INTROSPECTION_ONLY))
+ COMPlusThrowArgumentException(W("t"), W("Argument_MustBeRuntimeType"));
+
+ // Call the internal version of IsTypeVisibleFromCom.
+ retVal = ::IsTypeVisibleFromCom(refClass->GetType());
+
+ HELPER_METHOD_FRAME_END();
+ FC_RETURN_BOOL(retVal);
+}
+FCIMPLEND
+
+
+//====================================================================
+// IUnknown Helpers
+//====================================================================
+// IUnknown::QueryInterface
+FCIMPL3(HRESULT, MarshalNative::QueryInterface, IUnknown* pUnk, REFGUID iid, void** ppv)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pUnk, NULL_OK));
+ PRECONDITION(CheckPointer(ppv));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ if (!pUnk)
+ COMPlusThrowArgumentNull(W("pUnk"));
+
+ hr = SafeQueryInterface(pUnk,iid,(IUnknown**)ppv);
+ LogInteropQI(pUnk, iid, hr, "PInvoke::QI");
+
+ HELPER_METHOD_FRAME_END();
+ return hr;
+}
+FCIMPLEND
+
+// IUnknown::AddRef
+FCIMPL1(ULONG, MarshalNative::AddRef, IUnknown* pUnk)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pUnk, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ ULONG cbRef = 0;
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ if (!pUnk)
+ COMPlusThrowArgumentNull(W("pUnk"));
+
+ cbRef = SafeAddRef(pUnk);
+ LogInteropAddRef(pUnk, cbRef, "PInvoke.AddRef");
+
+ HELPER_METHOD_FRAME_END();
+ return cbRef;
+}
+FCIMPLEND
+
+//IUnknown::Release
+FCIMPL1(ULONG, MarshalNative::Release, IUnknown* pUnk)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pUnk, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ ULONG cbRef = 0;
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ if (!pUnk)
+ COMPlusThrowArgumentNull(W("pUnk"));
+
+ cbRef = SafeRelease(pUnk);
+ LogInteropRelease(pUnk, cbRef, "PInvoke.Release");
+
+ HELPER_METHOD_FRAME_END();
+ return cbRef;
+}
+FCIMPLEND
+
+FCIMPL2(void, MarshalNative::GetNativeVariantForObject, Object* ObjUNSAFE, LPVOID pDestNativeVariant)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pDestNativeVariant, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF Obj = (OBJECTREF) ObjUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_1(Obj);
+
+ if (pDestNativeVariant == NULL)
+ COMPlusThrowArgumentNull(W("pDstNativeVariant"));
+
+ if (Obj == NULL)
+ {
+ // Will return empty variant in MarshalOleVariantForObject
+ }
+ else if (Obj->GetMethodTable()->HasInstantiation())
+ {
+ COMPlusThrowArgumentException(W("obj"), W("Argument_NeedNonGenericObject"));
+ }
+
+ // intialize the output variant
+ SafeVariantInit((VARIANT*)pDestNativeVariant);
+ OleVariant::MarshalOleVariantForObject(&Obj, (VARIANT*)pDestNativeVariant);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL1(Object*, MarshalNative::GetObjectForNativeVariant, LPVOID pSrcNativeVariant)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pSrcNativeVariant, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF Obj = NULL;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(Obj);
+
+ if (pSrcNativeVariant == NULL)
+ COMPlusThrowArgumentNull(W("pSrcNativeVariant"));
+
+ OleVariant::MarshalObjectForOleVariant((VARIANT*)pSrcNativeVariant, &Obj);
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(Obj);
+}
+FCIMPLEND
+
+FCIMPL2(Object*, MarshalNative::GetObjectsForNativeVariants, VARIANT* aSrcNativeVariant, int cVars)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ INJECT_FAULT(FCThrow(kOutOfMemoryException););
+ PRECONDITION(CheckPointer(aSrcNativeVariant, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ PTRARRAYREF Array = NULL;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(Array);
+
+ if (aSrcNativeVariant == NULL)
+ COMPlusThrowArgumentNull(W("aSrcNativeVariant"));
+ if (cVars < 0)
+ COMPlusThrowArgumentOutOfRange(W("cVars"), W("ArgumentOutOfRange_NeedNonNegNum"));
+
+ OBJECTREF Obj = NULL;
+ GCPROTECT_BEGIN(Obj)
+ {
+ // Allocate the array of objects.
+ Array = (PTRARRAYREF)AllocateObjectArray(cVars, g_pObjectClass);
+
+ // Convert each VARIANT in the array into an object.
+ for (int i = 0; i < cVars; i++)
+ {
+ OleVariant::MarshalObjectForOleVariant(&aSrcNativeVariant[i], &Obj);
+ Array->SetAt(i, Obj);
+ }
+ }
+ GCPROTECT_END();
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(Array);
+}
+FCIMPLEND
+
+
+FCIMPL2(void, MarshalNative::DoGenerateGuidForType, GUID * result, ReflectClassBaseObject* refTypeUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF) refTypeUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_1(refType);
+ GCPROTECT_BEGININTERIOR (result);
+
+ // Validate the arguments.
+ if (refType == NULL)
+ COMPlusThrowArgumentNull(W("type"));
+
+ MethodTable *pRefMT = refType->GetMethodTable();
+ if (pRefMT != g_pRuntimeTypeClass &&
+ pRefMT != MscorlibBinder::GetClass(CLASS__CLASS_INTROSPECTION_ONLY))
+ COMPlusThrowArgumentException(W("type"), W("Argument_MustBeRuntimeType"));
+ if (result == NULL)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_GUID"));
+
+ // Check to see if the type is a COM object or not.
+ if (IsComObjectClass(refType->GetType()))
+ {
+#ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+ // The type is a COM object then we get the GUID from the class factory.
+ SyncBlock* pSyncBlock = refType->GetSyncBlock();
+
+ ComClassFactory* pComClsFac = pSyncBlock->GetInteropInfo()->GetComClassFactory();
+ memcpy(result, &pComClsFac->m_rclsid, sizeof(GUID));
+#else // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+ memset(result,0,sizeof(GUID));
+#endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+ }
+ else
+ {
+ // The type is a normal COM+ class so we need to generate the GUID.
+ TypeHandle classTH = refType->GetType();
+ classTH.GetMethodTable()->GetGuid(result, TRUE);
+ }
+
+ GCPROTECT_END ();
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL2(void, MarshalNative::DoGetTypeLibGuid, GUID * result, Object* refTlbUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF refTlb = (OBJECTREF) refTlbUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_1(refTlb);
+ GCPROTECT_BEGININTERIOR (result);
+
+ if (refTlb == NULL)
+ COMPlusThrowArgumentNull(W("pTLB"), W("ArgumentNull_Generic"));
+
+ // Ensure COM is started up.
+ EnsureComStarted();
+
+ SafeComHolder<ITypeLib> pTLB = (ITypeLib*)GetComIPFromObjectRef(&refTlb, IID_ITypeLib);
+ if (!pTLB)
+ COMPlusThrow(kArgumentException, W("Arg_NoITypeLib"));
+
+ GCX_PREEMP();
+
+ // Retrieve the TLIBATTR.
+ TLIBATTR *pAttr;
+ IfFailThrow(pTLB->GetLibAttr(&pAttr));
+
+ // Extract the guid from the TLIBATTR.
+ *result = pAttr->guid;
+
+ // Release the TLIBATTR now that we have the GUID.
+ pTLB->ReleaseTLibAttr(pAttr);
+
+ GCPROTECT_END ();
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL1(LCID, MarshalNative::GetTypeLibLcid, Object* refTlbUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ LCID retVal = 0;
+ OBJECTREF refTlb = (OBJECTREF) refTlbUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refTlb);
+
+ if (refTlb == NULL)
+ COMPlusThrowArgumentNull(W("pTLB"), W("ArgumentNull_Generic"));
+
+ // Ensure COM is started up.
+ EnsureComStarted();
+
+ SafeComHolder<ITypeLib> pTLB = (ITypeLib*)GetComIPFromObjectRef(&refTlb, IID_ITypeLib);
+ if (!pTLB)
+ COMPlusThrow(kArgumentException, W("Arg_NoITypeLib"));
+
+ GCX_PREEMP();
+
+ // Retrieve the TLIBATTR.
+ TLIBATTR *pAttr;
+ IfFailThrow(pTLB->GetLibAttr(&pAttr));
+
+ // Extract the LCID from the TLIBATTR.
+ retVal = pAttr->lcid;
+
+ // Release the TLIBATTR now that we have the LCID.
+ pTLB->ReleaseTLibAttr(pAttr);
+
+ HELPER_METHOD_FRAME_END();
+ return retVal;
+}
+FCIMPLEND
+
+FCIMPL3(void, MarshalNative::GetTypeLibVersion, Object* refTlbUNSAFE, int *pMajor, int *pMinor)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF refTlb = (OBJECTREF) refTlbUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_1(refTlb);
+
+ if (refTlb == NULL)
+ COMPlusThrowArgumentNull(W("typeLibrary"), W("ArgumentNull_Generic"));
+
+ // Ensure COM is started up.
+ EnsureComStarted();
+
+ SafeComHolder<ITypeLib> pTLB = (ITypeLib*)GetComIPFromObjectRef(&refTlb, IID_ITypeLib);
+ if (!pTLB)
+ COMPlusThrow(kArgumentException, W("Arg_NoITypeLib"));
+
+ GCX_PREEMP();
+
+ // Retrieve the TLIBATTR.
+ TLIBATTR *pAttr;
+ IfFailThrow(pTLB->GetLibAttr(&pAttr));
+
+ // Extract the LCID from the TLIBATTR.
+ *pMajor = pAttr->wMajorVerNum;
+ *pMinor = pAttr->wMinorVerNum;
+
+ // Release the TLIBATTR now that we have the version numbers.
+ pTLB->ReleaseTLibAttr(pAttr);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL2(void, MarshalNative::DoGetTypeInfoGuid, GUID * result, Object* refTypeInfoUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF refTypeInfo = (OBJECTREF) refTypeInfoUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_1(refTypeInfo);
+ GCPROTECT_BEGININTERIOR (result);
+
+ if (refTypeInfo == NULL)
+ COMPlusThrowArgumentNull(W("typeInfo"), W("ArgumentNull_Generic"));
+
+ // Ensure COM is started up.
+ EnsureComStarted();
+
+ SafeComHolder<ITypeInfo> pTI = (ITypeInfo*)GetComIPFromObjectRef(&refTypeInfo, IID_ITypeInfo);
+ if (!pTI)
+ COMPlusThrow(kArgumentException, W("Arg_NoITypeInfo"));
+
+ GCX_PREEMP();
+
+ // Retrieve the TYPEATTR.
+ TYPEATTR *pAttr;
+ IfFailThrow(pTI->GetTypeAttr(&pAttr));
+
+ // Extract the guid from the TYPEATTR.
+ *result = pAttr->guid;
+
+ // Release the TYPEATTR now that we have the GUID.
+ pTI->ReleaseTypeAttr(pAttr);
+
+ GCPROTECT_END ();
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL2(void, MarshalNative::DoGetTypeLibGuidForAssembly, GUID * result, AssemblyBaseObject* refAsmUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ // Validate the arguments.
+ _ASSERTE(refAsmUNSAFE != NULL);
+ _ASSERTE(result != NULL);
+
+ ASSEMBLYREF refAsm = (ASSEMBLYREF) refAsmUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_1(refAsm);
+ GCPROTECT_BEGININTERIOR (result)
+
+ HRESULT hr = S_OK;
+
+ // Retrieve the assembly from the ASSEMBLYREF.
+ Assembly *pAssembly = refAsm->GetAssembly();
+ _ASSERTE(pAssembly);
+
+ // Retrieve the TLBID for the assembly.
+ IfFailThrow(::GetTypeLibGuidForAssembly(pAssembly, result));
+
+ GCPROTECT_END ();
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL3(void, MarshalNative::GetTypeLibVersionForAssembly, AssemblyBaseObject* refAsmUNSAFE, INT32 *pMajorVersion, INT32 *pMinorVersion)
+{
+ FCALL_CONTRACT;
+
+ // Validate the arguments.
+ _ASSERTE(refAsmUNSAFE != NULL);
+
+ ASSEMBLYREF refAsm = (ASSEMBLYREF) refAsmUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_1(refAsm);
+
+ HRESULT hr = S_OK;
+
+ // Retrieve the assembly from the ASSEMBLYREF.
+ Assembly *pAssembly = refAsm->GetAssembly();
+ _ASSERTE(pAssembly);
+
+ // Retrieve the version for the assembly.
+ USHORT usMaj, usMin;
+ IfFailThrow(::GetTypeLibVersionForAssembly(pAssembly, &usMaj, &usMin));
+
+ // Set the out parameters.
+ *pMajorVersion = usMaj;
+ *pMinorVersion = usMin;
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL1(int, MarshalNative::GetStartComSlot, ReflectClassBaseObject* tUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ int retVal = 0;
+ REFLECTCLASSBASEREF t = (REFLECTCLASSBASEREF) tUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(t);
+
+ if (!(t))
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_Generic"));
+
+ MethodTable *pTMT = t->GetMethodTable();
+ if (pTMT != g_pRuntimeTypeClass &&
+ pTMT != MscorlibBinder::GetClass(CLASS__CLASS_INTROSPECTION_ONLY))
+ COMPlusThrowArgumentException(W("t"), W("Argument_MustBeRuntimeType"));
+
+ MethodTable *pMT = t->GetType().GetMethodTable();
+ if (NULL == pMT)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_Generic"));
+
+ // The service does not make any sense to be called for non COM visible types.
+ if (!::IsTypeVisibleFromCom(TypeHandle(pMT)))
+ COMPlusThrowArgumentException(W("t"), W("Argument_TypeMustBeVisibleFromCom"));
+
+ retVal = GetComSlotInfo(pMT, &pMT);
+
+ HELPER_METHOD_FRAME_END();
+ return retVal;
+}
+FCIMPLEND
+
+
+FCIMPL1(int, MarshalNative::GetEndComSlot, ReflectClassBaseObject* tUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ int retVal = 0;
+ REFLECTCLASSBASEREF t = (REFLECTCLASSBASEREF) tUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(t);
+
+ int StartSlot = -1;
+
+ if (!(t))
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_Generic"));
+
+ MethodTable *pTMT = t->GetMethodTable();
+ if (pTMT != g_pRuntimeTypeClass &&
+ pTMT != MscorlibBinder::GetClass(CLASS__CLASS_INTROSPECTION_ONLY))
+ COMPlusThrowArgumentException(W("t"), W("Argument_MustBeRuntimeType"));
+
+ TypeHandle classTH = t->GetType();
+ MethodTable *pMT = classTH.GetMethodTable();
+ if (NULL == pMT)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_Generic"));
+
+ // The service does not make any sense to be called for non COM visible types.
+ if (!::IsTypeVisibleFromCom(classTH))
+ COMPlusThrowArgumentException(W("t"), W("Argument_TypeMustBeVisibleFromCom"));
+
+ // Retrieve the start slot and the default interface class.
+ StartSlot = GetComSlotInfo(pMT, &pMT);
+ if (StartSlot == -1)
+ {
+ retVal = StartSlot;
+ }
+ else
+ {
+ // Retrieve the map of members.
+ ComMTMemberInfoMap MemberMap(pMT);
+ MemberMap.Init(sizeof(void*));
+
+ // The end slot is the start slot plus the number of user defined methods.
+ retVal = int(StartSlot + MemberMap.GetMethods().Size() - 1);
+ }
+
+ HELPER_METHOD_FRAME_END();
+ return retVal;
+}
+FCIMPLEND
+
+FCIMPL1(int, MarshalNative::GetComSlotForMethodInfo, ReflectMethodObject* pMethodUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pMethodUNSAFE));
+ PRECONDITION(pMethodUNSAFE->GetMethod()->GetMethodTable()->IsInterface());
+ }
+ CONTRACTL_END;
+
+ REFLECTMETHODREF refMethod = (REFLECTMETHODREF)ObjectToOBJECTREF(pMethodUNSAFE);
+ MethodDesc *pMeth = refMethod->GetMethod();
+ _ASSERTE(pMeth);
+ int retVal = 0;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refMethod);
+
+ retVal = pMeth->GetComSlot();
+
+ HELPER_METHOD_FRAME_END();
+ return retVal;
+}
+FCIMPLEND
+
+FCIMPL3(Object*, MarshalNative::GetMethodInfoForComSlot, ReflectClassBaseObject* tUNSAFE, INT32 slot, ComMemberType* pMemberType)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pMemberType, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF refRetVal = NULL;
+ REFLECTCLASSBASEREF t = (REFLECTCLASSBASEREF) tUNSAFE;
+ REFLECTCLASSBASEREF tInterface = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_2(t, tInterface);
+
+ int StartSlot = -1;
+ OBJECTREF MemberInfoObj = NULL;
+
+ if (!(t))
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_Generic"));
+
+ MethodTable *pTMT = t->GetMethodTable();
+ if (pTMT != g_pRuntimeTypeClass &&
+ pTMT != MscorlibBinder::GetClass(CLASS__CLASS_INTROSPECTION_ONLY))
+ COMPlusThrowArgumentException(W("t"), W("Argument_MustBeRuntimeType"));
+
+ TypeHandle type = t->GetType();
+ MethodTable *pMT= type.GetMethodTable();
+ if (NULL == pMT)
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_Generic"));
+
+ // The service does not make any sense to be called for non COM visible types.
+ if (!::IsTypeVisibleFromCom(type))
+ COMPlusThrowArgumentException(W("t"), W("Argument_TypeMustBeVisibleFromCom"));
+
+ // Retrieve the start slot and the default interface class.
+ StartSlot = GetComSlotInfo(pMT, &pMT);
+ if (StartSlot == -1)
+ COMPlusThrowArgumentOutOfRange(W("slot"), W("ArgumentOutOfRange_Count"));
+
+ // Retrieve the map of members.
+ ComMTMemberInfoMap MemberMap(pMT);
+ MemberMap.Init(sizeof(void*));
+ CQuickArray<ComMTMethodProps> &rProps = MemberMap.GetMethods();
+
+ // Update the typehandle we'll be using based on the interface returned by GetComSlotInfo.
+ tInterface = (REFLECTCLASSBASEREF)pMT->GetManagedClassObject();
+ type = TypeHandle(pMT);
+
+ // Make sure the specified slot is valid.
+ if (slot < StartSlot)
+ COMPlusThrowArgumentOutOfRange(W("slot"), W("ArgumentOutOfRange_Count"));
+ if (slot >= StartSlot + (int)rProps.Size())
+ COMPlusThrowArgumentOutOfRange(W("slot"), W("ArgumentOutOfRange_Count"));
+
+ ComMTMethodProps *pProps = &rProps[slot - StartSlot];
+ if (pProps->semantic >= FieldSemanticOffset)
+ {
+ // We are dealing with a field.
+ ComCallMethodDesc *pFieldMeth = reinterpret_cast<ComCallMethodDesc*>(pProps->pMeth);
+ FieldDesc *pField = pFieldMeth->GetFieldDesc();
+
+ // call the managed code to get the FieldInfo
+ MethodDescCallSite getFieldInfo(METHOD__CLASS__GET_FIELD_INFO);
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(tInterface),
+ (ARG_SLOT)pField
+ };
+
+ MemberInfoObj = getFieldInfo.Call_RetOBJECTREF(args);
+
+ *(pMemberType) = (pProps->semantic == (FieldSemanticOffset + msGetter)) ? CMT_PropGet : CMT_PropSet;
+ }
+ else if (pProps->property == mdPropertyNil)
+ {
+ // We are dealing with a normal property.
+
+ // call the managed code to get the MethodInfo
+ MethodDescCallSite getMethodBase(METHOD__CLASS__GET_METHOD_BASE);
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(tInterface),
+ (ARG_SLOT)pProps->pMeth
+ };
+
+ MemberInfoObj = getMethodBase.Call_RetOBJECTREF(args);
+
+ *(pMemberType) = CMT_Method;
+ }
+ else
+ {
+ // We are dealing with a property.
+ mdProperty tkProp;
+ if (TypeFromToken(pProps->property) == mdtProperty)
+ tkProp = pProps->property;
+ else
+ tkProp = rProps[pProps->property].property;
+
+ // call the managed code to get the PropertyInfo
+ MethodDescCallSite getPropertyInfo(METHOD__CLASS__GET_PROPERTY_INFO);
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(tInterface),
+ tkProp
+ };
+
+ MemberInfoObj = getPropertyInfo.Call_RetOBJECTREF(args);
+
+ *(pMemberType) = (pProps->semantic == msGetter) ? CMT_PropGet : CMT_PropSet;
+ }
+
+ refRetVal = MemberInfoObj;
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(refRetVal);
+}
+FCIMPLEND
+
+//+----------------------------------------------------------------------------
+//
+// Method: MarshalNative::WrapIUnknownWithComObject
+// Synopsis: unmarshal the buffer and return IUnknown
+//
+
+//
+//+----------------------------------------------------------------------------
+FCIMPL1(Object*, MarshalNative::WrapIUnknownWithComObject, IUnknown* pUnk)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pUnk, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF cref = NULL;
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ if(pUnk == NULL)
+ COMPlusThrowArgumentNull(W("punk"));
+
+ EnsureComStarted();
+
+ COMInterfaceMarshaler marshaler;
+ marshaler.Init(pUnk, g_pBaseCOMObject, GET_THREAD());
+
+ cref = marshaler.WrapWithComObject();
+
+ if (cref == NULL)
+ COMPlusThrowOM();
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(cref);
+}
+FCIMPLEND
+
+//+----------------------------------------------------------------------------
+//
+// Method: CLR_BOOL __stdcall MarshalNative::SwitchCCW(switchCCWArgs* pArgs)
+//
+// Synopsis: switch the wrapper from oldtp to newtp
+//
+
+//
+//+----------------------------------------------------------------------------
+
+FCIMPL2(FC_BOOL_RET, MarshalNative::SwitchCCW, Object* oldtpUNSAFE, Object* newtpUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ BOOL retVal = FALSE;
+ OBJECTREF oldtp = (OBJECTREF) oldtpUNSAFE;
+ OBJECTREF newtp = (OBJECTREF) newtpUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_2(oldtp, newtp);
+
+ if (oldtp == NULL)
+ COMPlusThrowArgumentNull(W("oldtp"));
+ if (newtp == NULL)
+ COMPlusThrowArgumentNull(W("newtp"));
+
+ // defined in interoputil.cpp
+ retVal = ReconnectWrapper(&oldtp, &newtp);
+
+ HELPER_METHOD_FRAME_END();
+ FC_RETURN_BOOL(retVal);
+}
+FCIMPLEND
+
+
+FCIMPL2(void, MarshalNative::ChangeWrapperHandleStrength, Object* orefUNSAFE, CLR_BOOL fIsWeak)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF oref = (OBJECTREF) orefUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_1(oref);
+
+ if(oref == NULL)
+ COMPlusThrowArgumentNull(W("otp"));
+
+ if (
+#ifdef FEATURE_REMOTING
+ CRemotingServices::IsTransparentProxy(OBJECTREFToObject(oref)) ||
+#endif
+ !oref->GetMethodTable()->IsComImport())
+ {
+ CCWHolder pWrap = ComCallWrapper::InlineGetWrapper(&oref);
+
+ if (pWrap == NULL)
+ COMPlusThrowOM();
+ AppDomainFromIDHolder pDomain(pWrap->GetDomainID(), FALSE);
+ pDomain.ThrowIfUnloaded();
+ if (fIsWeak != 0)
+ pWrap->MarkHandleWeak();
+ else
+ pWrap->ResetHandleStrength();
+ pDomain.Release();
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL2(void, MarshalNative::InitializeWrapperForWinRT, Object *unsafe_pThis, IUnknown **ppUnk)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF oref = ObjectToOBJECTREF(unsafe_pThis);
+ HELPER_METHOD_FRAME_BEGIN_1(oref);
+
+ _ASSERTE(ppUnk != NULL);
+ if (*ppUnk == NULL)
+ {
+ // this should never happen but it's not nice to AV if the factory method is busted and returns NULL
+ COMPlusThrow(kNullReferenceException);
+ }
+
+ // the object does not have the right RCW yet
+ COMInterfaceMarshaler marshaler;
+ marshaler.Init(*ppUnk, oref->GetMethodTable(), GET_THREAD(), RCW::CF_SupportsIInspectable | RCW::CF_QueryForIdentity);
+
+ // the following will assign NULL to *ppUnk which signals to the caller that we successfully
+ // initialized the RCW so (*ppUnk)->Release() should be suppressed
+ marshaler.InitializeExistingComObject(&oref, ppUnk);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL2(void, MarshalNative::InitializeManagedWinRTFactoryObject, Object *unsafe_pThis, ReflectClassBaseObject *unsafe_pType)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF orefThis = ObjectToOBJECTREF(unsafe_pThis);
+ REFLECTCLASSBASEREF orefType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(unsafe_pType);
+ HELPER_METHOD_FRAME_BEGIN_2(orefThis, orefType);
+
+ MethodTable *pMT = orefType->GetType().GetMethodTable();
+
+ // get the special "factory" template for the type
+ _ASSERTE(pMT->IsExportedToWinRT());
+ WinRTManagedClassFactory *pFactory = GetComClassFactory(pMT)->AsWinRTManagedClassFactory();
+
+ ComCallWrapperTemplate *pTemplate = pFactory->GetOrCreateComCallWrapperTemplate(orefThis->GetMethodTable());
+
+ // create a CCW for the factory object using the template
+ CCWHolder pCCWHold = ComCallWrapper::InlineGetWrapper(&orefThis, pTemplate);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+//
+// Create activation factory and wraps it with a unique RCW
+//
+// This is necessary because WinRT factories are often implemented as a singleton,
+// and getting back a RCW for such WinRT factory would usually get back a RCW from
+// another apartment, even if the interface pointe returned from GetActivationFactory
+// is a raw pointer. As a result, user would randomly get back RCWs for activation
+// factories from other apartments and make transiton to those apartments and cause
+// deadlocks and create objects in incorrect apartments
+//
+// The solution here is to always create a unique RCW
+//
+FCIMPL1(Object *, MarshalNative::GetNativeActivationFactory, ReflectClassBaseObject *unsafe_pType)
+{
+ FCALL_CONTRACT;
+
+ REFLECTCLASSBASEREF orefType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(unsafe_pType);
+ OBJECTREF orefFactory = NULL;
+ HELPER_METHOD_FRAME_BEGIN_RET_2(orefFactory, orefType);
+
+ MethodTable *pMT = orefType->GetType().GetMethodTable();
+
+ // Must be a native WinRT type
+ _ASSERTE(pMT->IsProjectedFromWinRT());
+
+ //
+ // Get the activation factory instance for this WinRT type and create a RCW for it
+ //
+ GetNativeWinRTFactoryObject(
+ pMT,
+ GET_THREAD(),
+ NULL, // No factory interface available at this point
+ TRUE, // Create unique RCW - See comments for this function for more details
+ NULL, // No callback necessary
+ &orefFactory // return value
+ );
+
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(orefFactory);
+}
+FCIMPLEND
+
+void QCALLTYPE MarshalNative::GetInspectableIIDs(
+ QCall::ObjectHandleOnStack hobj,
+ QCall::ObjectHandleOnStack retArrayGuids)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(*hobj.m_ppObject));
+ }
+ CONTRACTL_END;
+
+ BEGIN_QCALL;
+
+ SyncBlock * pSyncBlock = NULL;
+
+ {
+ GCX_COOP();
+
+ // set return to failure value
+ retArrayGuids.Set(NULL);
+
+ OBJECTREF orComObject = NULL;
+ GCPROTECT_BEGIN(orComObject);
+
+ orComObject = ObjectToOBJECTREF(*hobj.m_ppObject);
+
+ // Validation: hobj represents a non-NULL System.__ComObject instance
+ if(orComObject == NULL)
+ COMPlusThrowArgumentNull(W("obj"));
+
+ MethodTable* pMT = orComObject->GetTrueMethodTable();
+ PREFIX_ASSUME(pMT != NULL);
+ if(!pMT->IsComObjectType())
+ COMPlusThrow(kArgumentException, IDS_EE_SRC_OBJ_NOT_COMOBJECT);
+
+ // while in cooperative mode, retrieve the object's sync block
+ pSyncBlock = orComObject->PassiveGetSyncBlock();
+
+ GCPROTECT_END();
+ } // close the GCX_COOP scope
+
+
+ InteropSyncBlockInfo * pInteropInfo;
+ RCW * pRCW;
+ SafeComHolderPreemp<IInspectable> pInspectable;
+
+ // Retrieve obj's IInspectable interface pointer
+ if (pSyncBlock != NULL
+ && (pInteropInfo = pSyncBlock->GetInteropInfoNoCreate()) != NULL
+ && (pRCW = pInteropInfo->GetRawRCW()) != NULL
+ && (pInspectable = pRCW->GetIInspectable()) != NULL)
+ {
+ // retrieve IIDs using IInspectable::GetIids()
+ ULONG size = 0;
+ CoTaskMemHolder<IID> rgIIDs(NULL);
+ if (SUCCEEDED(pInspectable->GetIids(&size, &rgIIDs)))
+ {
+ retArrayGuids.SetGuidArray(rgIIDs, size);
+ }
+ }
+
+ END_QCALL;
+}
+
+
+void QCALLTYPE MarshalNative::GetCachedWinRTTypes(
+ QCall::ObjectHandleOnStack hadObj,
+ int * pEpoch,
+ QCall::ObjectHandleOnStack retArrayMT)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(*hadObj.m_ppObject, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ BEGIN_QCALL;
+
+ AppDomain * pDomain = GetAppDomain();
+
+ {
+ GCX_COOP();
+
+ // set return to failure value
+ retArrayMT.Set(NULL);
+
+ OBJECTREF orDomain = NULL;
+ GCPROTECT_BEGIN(orDomain);
+
+ orDomain = ObjectToOBJECTREF(*hadObj.m_ppObject);
+
+ // Validation: hadObj represents a non-NULL System.AppDomain instance
+ if(orDomain != NULL)
+ {
+ MethodTable* pMT = orDomain->GetTrueMethodTable();
+ PREFIX_ASSUME(pMT != NULL);
+ if (!pMT->CanCastToClass(MscorlibBinder::GetClass(CLASS__APP_DOMAIN)))
+ // TODO: find better resource string
+ COMPlusThrow(kArgumentException, IDS_EE_ADUNLOAD_DEFAULT);
+
+ pDomain = ((AppDomainBaseObject*)(OBJECTREFToObject(orDomain)))->GetDomain();
+ }
+ GCPROTECT_END();
+ }
+
+ if (pDomain != NULL)
+ {
+ SArray<PTR_MethodTable> types;
+ SArray<GUID> guids;
+ UINT e = *(UINT*)pEpoch;
+ pDomain->GetCachedWinRTTypes(&types, &guids, e, (UINT*)pEpoch);
+
+ retArrayMT.SetIntPtrArray((void**)(&types[0]), types.GetCount());
+ }
+
+ END_QCALL;
+}
+
+void QCALLTYPE MarshalNative::GetCachedWinRTTypeByIID(
+ QCall::ObjectHandleOnStack hadObj,
+ GUID guid,
+ void * * ppMT)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(*hadObj.m_ppObject, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ BEGIN_QCALL;
+
+ AppDomain * pDomain = GetAppDomain();
+
+ {
+ GCX_COOP();
+
+ // set return to failure value
+ *ppMT = NULL;
+
+ OBJECTREF orDomain = NULL;
+ GCPROTECT_BEGIN(orDomain);
+
+ orDomain = ObjectToOBJECTREF(*hadObj.m_ppObject);
+
+ // Validation: hadObj represents a non-NULL System.AppDomain instance
+ if(orDomain != NULL)
+ {
+ MethodTable* pMT = orDomain->GetTrueMethodTable();
+ PREFIX_ASSUME(pMT != NULL);
+ if (!pMT->CanCastToClass(MscorlibBinder::GetClass(CLASS__APP_DOMAIN)))
+ // TODO: find better resource string
+ COMPlusThrow(kArgumentException, IDS_EE_ADUNLOAD_DEFAULT);
+
+ pDomain = ((AppDomainBaseObject*)(OBJECTREFToObject(orDomain)))->GetDomain();
+ }
+ GCPROTECT_END();
+ }
+
+ if (pDomain != NULL)
+ {
+ *ppMT = pDomain->LookupTypeByGuid(guid);;
+ }
+
+
+ END_QCALL;
+}
+
+//====================================================================
+// Helper function used in the COM slot to method info mapping.
+//====================================================================
+
+int MarshalNative::GetComSlotInfo(MethodTable *pMT, MethodTable **ppDefItfMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(CheckPointer(ppDefItfMT));
+ }
+ CONTRACTL_END;
+
+ *ppDefItfMT = NULL;
+
+ // If a class was passed in then retrieve the default interface.
+ if (!pMT->IsInterface())
+ {
+ TypeHandle hndDefItfClass;
+ DefaultInterfaceType DefItfType = GetDefaultInterfaceForClassWrapper(TypeHandle(pMT), &hndDefItfClass);
+
+ if (DefItfType == DefaultInterfaceType_AutoDual || DefItfType == DefaultInterfaceType_Explicit)
+ {
+ pMT = hndDefItfClass.GetMethodTable();
+ PREFIX_ASSUME(pMT != NULL);
+ }
+ else
+ {
+ // The default interface does not have any user defined methods.
+ return -1;
+ }
+ }
+
+ // Set the default interface class.
+ *ppDefItfMT = pMT;
+
+ if (pMT->IsInterface())
+ {
+ // Return the right number of slots depending on interface type.
+ return ComMethodTable::GetNumExtraSlots(pMT->GetComInterfaceType());
+ }
+ else
+ {
+ // We are dealing with an IClassX which are always IDispatch based.
+ return ComMethodTable::GetNumExtraSlots(ifDispatch);
+ }
+}
+
+BOOL MarshalNative::IsObjectInContext(OBJECTREF *pObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(pObj != NULL);
+ }
+ CONTRACTL_END;
+
+ SyncBlock* pBlock = (*pObj)->GetSyncBlock();
+
+ InteropSyncBlockInfo* pInteropInfo = pBlock->GetInteropInfo();
+
+ ComCallWrapper* pCCW = pInteropInfo->GetCCW();
+
+ if((pCCW) || (!pInteropInfo->RCWWasUsed()))
+ {
+ // We are dealing with a CCW. Since CCW's are agile, they are always in the
+ // correct context.
+ return TRUE;
+ }
+ else
+ {
+ RCWHolder pRCW(GetThread());
+ pRCW.Init(pBlock);
+
+ // We are dealing with an RCW, we need to check to see if the current
+ // context is the one it was first seen in.
+ LPVOID pCtxCookie = GetCurrentCtxCookie();
+ _ASSERTE(pCtxCookie != NULL);
+
+ return pCtxCookie == pRCW->GetWrapperCtxCookie();
+ }
+}
+
+#endif // FEATURE_COMINTEROP
diff --git a/src/vm/marshalnative.h b/src/vm/marshalnative.h
new file mode 100644
index 0000000000..26f64b79bb
--- /dev/null
+++ b/src/vm/marshalnative.h
@@ -0,0 +1,242 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: MarshalNative.h
+//
+
+//
+// FCall's for the Marshal class
+//
+
+
+#ifndef __MARSHALNATIVE_H__
+#define __MARSHALNATIVE_H__
+
+#include "fcall.h"
+
+//!!! Must be kept in sync with ArrayWithOffset class layout.
+struct ArrayWithOffsetData
+{
+ BASEARRAYREF m_Array;
+ INT32 m_cbOffset;
+ INT32 m_cbCount;
+};
+
+
+#ifdef FEATURE_COMINTEROP
+enum ComMemberType
+{
+ CMT_Method = 0,
+ CMT_PropGet = 1,
+ CMT_PropSet = 2
+};
+#endif // FEATURE_COMINTEROP
+
+class MarshalNative
+{
+public:
+ static INT32 QCALLTYPE NumParamBytes(MethodDesc * pMD);
+ static VOID QCALLTYPE Prelink(MethodDesc * pMD);
+
+ //====================================================================
+ // These methods convert between an HR and and a managed exception.
+ //====================================================================
+ static FCDECL2(void, ThrowExceptionForHR, INT32 errorCode, LPVOID errorInfo);
+ static FCDECL2(Object *, GetExceptionForHR, INT32 errorCode, LPVOID errorInfo);
+ static FCDECL1(int, GetHRForException, Object* eUNSAFE);
+ static FCDECL1(int, GetHRForException_WinRT, Object* eUNSAFE);
+
+ static FCDECL4(void, CopyToNative, Object* psrcUNSAFE, INT32 startindex, LPVOID pdst, INT32 length);
+ static FCDECL4(void, CopyToManaged, LPVOID psrc, Object* pdstUNSAFE, INT32 startindex, INT32 length);
+ static FCDECL2(UINT32, SizeOfClass, ReflectClassBaseObject* refClass, CLR_BOOL throwIfNotMarshalable);
+
+ static FCDECL2(LPVOID, FCUnsafeAddrOfPinnedArrayElement, ArrayBase *arr, INT32 index);
+
+ static FCDECL1(UINT32, OffsetOfHelper, ReflectFieldObject* pFieldUNSAFE);
+ static FCDECL0(int, GetLastWin32Error);
+ static FCDECL1(void, SetLastWin32Error, int error);
+ static FCDECL1(INT32, CalculateCount, ArrayWithOffsetData* pRef);
+
+ static FCDECL3(VOID, StructureToPtr, Object* pObjUNSAFE, LPVOID ptr, CLR_BOOL fDeleteOld);
+ static FCDECL3(VOID, PtrToStructureHelper, LPVOID ptr, Object* pObjIn, CLR_BOOL allowValueClasses);
+ static FCDECL2(VOID, DestroyStructure, LPVOID ptr, ReflectClassBaseObject* refClassUNSAFE);
+
+ //====================================================================
+ // map a fiber cookie from the hosting APIs into a managed Thread object
+ //====================================================================
+ static FCDECL1(THREADBASEREF, GetThreadFromFiberCookie, int cookie);
+
+ static FCDECL3(LPVOID, GetUnmanagedThunkForManagedMethodPtr, LPVOID pfnMethodToWrap, PCCOR_SIGNATURE pbSignature, ULONG cbSignature);
+ static FCDECL3(LPVOID, GetManagedThunkForUnmanagedMethodPtr, LPVOID pfnMethodToWrap, PCCOR_SIGNATURE pbSignature, ULONG cbSignature);
+
+ static FCDECL0(UINT32, GetSystemMaxDBCSCharSize);
+
+ static FCDECL2(LPVOID, GCHandleInternalAlloc, Object *obj, int type);
+ static FCDECL1(VOID, GCHandleInternalFree, OBJECTHANDLE handle);
+ static FCDECL1(LPVOID, GCHandleInternalGet, OBJECTHANDLE handle);
+ static FCDECL3(VOID, GCHandleInternalSet, OBJECTHANDLE handle, Object *obj, CLR_BOOL isPinned);
+ static FCDECL4(Object*, GCHandleInternalCompareExchange, OBJECTHANDLE handle, Object *obj, Object* oldObj, CLR_BOOL isPinned);
+ static FCDECL1(LPVOID, GCHandleInternalAddrOfPinnedObject, OBJECTHANDLE handle);
+ static FCDECL1(VOID, GCHandleInternalCheckDomain, OBJECTHANDLE handle);
+ static FCDECL1(INT32, GCHandleInternalGetHandleType, OBJECTHANDLE handle);
+
+ static FCDECL2(Object*, GetDelegateForFunctionPointerInternal, LPVOID FPtr, ReflectClassBaseObject* refTypeUNSAFE);
+ static FCDECL1(LPVOID, GetFunctionPointerForDelegateInternal, Object* refDelegateUNSAFE);
+
+#ifdef FEATURE_COMINTEROP
+ //====================================================================
+ // map GUID to Type
+ //====================================================================
+ static FCDECL1(Object*, GetLoadedTypeForGUID, GUID* pGuid);
+
+ //====================================================================
+ // map Type to ITypeInfo*
+ //====================================================================
+ static FCDECL1(ITypeInfo*, GetITypeInfoForType, ReflectClassBaseObject* refClassUNSAFE);
+
+ //====================================================================
+ // return the IUnknown* for an Object
+ //====================================================================
+ static FCDECL2(IUnknown*, GetIUnknownForObjectNative, Object* orefUNSAFE, CLR_BOOL fOnlyInContext);
+
+ //====================================================================
+ // return the raw IUnknown* for a COM Object not related to current
+ // context
+ // Does not AddRef the returned pointer
+ //====================================================================
+ static FCDECL1(IUnknown*, GetRawIUnknownForComObjectNoAddRef, Object* orefUNSAFE);
+
+ //====================================================================
+ // return the IDispatch* for an Object
+ //====================================================================
+ static FCDECL2(IDispatch*, GetIDispatchForObjectNative, Object* orefUNSAFE, CLR_BOOL fOnlyInContext);
+
+ //====================================================================
+ // return the IUnknown* representing the interface for the Object
+ // Object o should support Type T
+ //====================================================================
+ static FCDECL4(IUnknown*, GetComInterfaceForObjectNative, Object* orefUNSAFE, ReflectClassBaseObject* refClassUNSAFE, CLR_BOOL fOnlyInContext, CLR_BOOL bEnableCustomizedQueryInterface);
+
+ //====================================================================
+ // return an Object for IUnknown
+ //====================================================================
+ static FCDECL1(Object*, GetObjectForIUnknown, IUnknown* pUnk);
+
+ //====================================================================
+ // return a unique cacheless Object for IUnknown
+ //====================================================================
+ static FCDECL1(Object*, GetUniqueObjectForIUnknown, IUnknown* pUnk);
+
+ //====================================================================
+ // return an Object for IUnknown, using the Type T,
+ // NOTE:
+ // Type T should be either a COM imported Type or a sub-type of COM imported Type
+ //====================================================================
+ static FCDECL2(Object*, GetTypedObjectForIUnknown, IUnknown* pUnk, ReflectClassBaseObject* refClassUNSAFE);
+
+ //====================================================================
+ // Free unused RCWs in the current COM+ context.
+ //====================================================================
+ static FCDECL0(void, CleanupUnusedObjectsInCurrentContext);
+
+ //====================================================================
+ // Checks whether there are RCWs from any context available for cleanup.
+ //====================================================================
+ static FCDECL0(FC_BOOL_RET, AreComObjectsAvailableForCleanup);
+
+ //====================================================================
+ // Create an object and aggregate it, then return the inner unknown.
+ //====================================================================
+ static FCDECL2(IUnknown*, CreateAggregatedObject, IUnknown* pOuter, Object* refObjUNSAFE);
+
+ //====================================================================
+ // check if the object is classic COM component
+ //====================================================================
+ static FCDECL1(FC_BOOL_RET, IsComObject, Object* objUNSAFE);
+
+ //====================================================================
+ // free the COM component and zombie this object
+ // further usage of this Object might throw an exception,
+ //====================================================================
+ static FCDECL1(INT32, ReleaseComObject, Object* objUNSAFE);
+ static FCDECL1(void, FinalReleaseComObject, Object* objUNSAFE);
+
+ //====================================================================
+ // This method takes the given COM object and wraps it in an object
+ // of the specified type. The type must be derived from __ComObject.
+ //====================================================================
+ static FCDECL2(Object*, InternalCreateWrapperOfType, Object* objUNSAFE, ReflectClassBaseObject* refClassUNSAFE);
+
+ //====================================================================
+ // check if the type is visible from COM.
+ //====================================================================
+ static FCDECL1(FC_BOOL_RET, IsTypeVisibleFromCom, ReflectClassBaseObject* refClassUNSAFE);
+
+ //====================================================================
+ // IUnknown Helpers
+ //====================================================================
+ static FCDECL3(HRESULT, QueryInterface, IUnknown* pUnk, REFGUID iid, void** ppv);
+ static FCDECL1(ULONG, AddRef, IUnknown* pUnk);
+ static FCDECL1(ULONG, Release, IUnknown* pUnk);
+
+ //====================================================================
+ // These methods convert OLE variants to and from objects.
+ //====================================================================
+ static FCDECL2(void, GetNativeVariantForObject, Object* ObjUNSAFE, LPVOID pDestNativeVariant);
+ static FCDECL1(Object*, GetObjectForNativeVariant, LPVOID pSrcNativeVariant);
+ static FCDECL2(Object*, GetObjectsForNativeVariants, VARIANT* aSrcNativeVariant, int cVars);
+
+ //====================================================================
+ // This method generates a guid for the specified type.
+ //====================================================================
+ static FCDECL2(void, DoGenerateGuidForType, GUID * result, ReflectClassBaseObject* refTypeUNSAFE);
+
+ //====================================================================
+ // Methods to retrieve information from TypeLibs and TypeInfos.
+ //====================================================================
+ static FCDECL2(void, DoGetTypeLibGuid, GUID * result, Object* refTlbUNSAFE);
+ static FCDECL1(LCID, GetTypeLibLcid, Object* refTlbUNSAFE);
+ static FCDECL3(void, GetTypeLibVersion, Object* refTlbUNSAFE, int *pMajor, int *pMinor);
+ static FCDECL2(void, DoGetTypeInfoGuid, GUID * result, Object* refTypeInfoUNSAFE);
+
+ //====================================================================
+ // Given a assembly, return the TLBID that will be generated for the
+ // typelib exported from the assembly.
+ //====================================================================
+ static FCDECL2(void, DoGetTypeLibGuidForAssembly, GUID * result, AssemblyBaseObject* refAsmUNSAFE);
+
+ //====================================================================
+ // Given a assembly, return the version number of the type library
+ // that would be exported from the assembly.
+ //====================================================================
+ static FCDECL3(void, GetTypeLibVersionForAssembly, AssemblyBaseObject* refAsmUNSAFE, INT32 *pMajorVersion, INT32 *pMinorVersion);
+
+ //====================================================================
+ // These methods are used to map COM slots to method info's.
+ //====================================================================
+ static FCDECL1(int, GetStartComSlot, ReflectClassBaseObject* tUNSAFE);
+ static FCDECL1(int, GetEndComSlot, ReflectClassBaseObject* tUNSAFE);
+ static FCDECL3(Object*, GetMethodInfoForComSlot, ReflectClassBaseObject* tUNSAFE, INT32 slot, ComMemberType* pMemberType);
+
+ static FCDECL1(int, GetComSlotForMethodInfo, ReflectMethodObject* pMethodUNSAFE);
+
+ static FCDECL2(FC_BOOL_RET, SwitchCCW, Object* oldtpUNSAFE, Object* newtpUNSAFE);
+ static FCDECL1(Object*, WrapIUnknownWithComObject, IUnknown* pUnk);
+
+ static FCDECL2(void, ChangeWrapperHandleStrength, Object* orefUNSAFE, CLR_BOOL fIsWeak);
+ static FCDECL2(void, InitializeWrapperForWinRT, Object *unsafe_pThis, IUnknown **ppUnk);
+ static FCDECL2(void, InitializeManagedWinRTFactoryObject, Object *unsafe_pThis, ReflectClassBaseObject *unsafe_pType);
+ static FCDECL1(Object *, MarshalNative::GetNativeActivationFactory, ReflectClassBaseObject *unsafe_pType);
+ static void QCALLTYPE GetInspectableIIDs(QCall::ObjectHandleOnStack hobj, QCall::ObjectHandleOnStack retArrayGuids);
+ static void QCALLTYPE GetCachedWinRTTypes(QCall::ObjectHandleOnStack hadObj, int * epoch, QCall::ObjectHandleOnStack retArrayMT);
+ static void QCALLTYPE GetCachedWinRTTypeByIID(QCall::ObjectHandleOnStack hadObj, GUID iid, void * * ppMT);
+
+private:
+ static int GetComSlotInfo(MethodTable *pMT, MethodTable **ppDefItfMT);
+ static BOOL IsObjectInContext(OBJECTREF *pObj);
+#endif // FEATURE_COMINTEROP
+};
+
+#endif
diff --git a/src/vm/marvin32.cpp b/src/vm/marvin32.cpp
new file mode 100644
index 0000000000..dd630b9e54
--- /dev/null
+++ b/src/vm/marvin32.cpp
@@ -0,0 +1,267 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//
+// This module contains the routines to implement the Marvin32 checksum function
+//
+//
+
+#include "common.h"
+#include "marvin32.h"
+
+//
+// See the symcrypt.h file for documentation on what the various functions do.
+//
+
+//
+// Round rotation amounts. This array is optimized away by the compiler
+// as we inline all our rotations.
+//
+static const int rotate[4] = {
+ 20, 9, 27, 19,
+};
+
+
+#define ROL32( x, n ) _rotl( (x), (n) )
+#define ROR32( x, n ) _rotr( (x), (n) )
+
+#define BLOCK( a, b ) \
+{\
+ b ^= a; a = ROL32( a, rotate[0] );\
+ a += b; b = ROL32( b, rotate[1] );\
+ b ^= a; a = ROL32( a, rotate[2] );\
+ a += b; b = ROL32( b, rotate[3] );\
+}
+
+
+
+HRESULT
+SymCryptMarvin32ExpandSeed(
+ __out PSYMCRYPT_MARVIN32_EXPANDED_SEED pExpandedSeed,
+ __in_ecount(cbKey) PCBYTE pbSeed,
+ SIZE_T cbSeed )
+{
+ HRESULT retVal = S_OK;
+
+ if( cbSeed != SYMCRYPT_MARVIN32_SEED_SIZE )
+ {
+ retVal =E_INVALIDARG;
+ goto cleanup;
+ }
+ pExpandedSeed->s[0] = LOAD_LSBFIRST32( pbSeed );
+ pExpandedSeed->s[1] = LOAD_LSBFIRST32( pbSeed + 4 );
+
+cleanup:
+ return retVal;
+}
+
+
+VOID
+SymCryptMarvin32Init( _Out_ PSYMCRYPT_MARVIN32_STATE pState,
+ _In_ PCSYMCRYPT_MARVIN32_EXPANDED_SEED pExpandedSeed)
+{
+ pState->chain = *pExpandedSeed;
+ pState->dataLength = 0;
+ pState->pSeed = pExpandedSeed;
+
+ *(ULONG *) &pState->buffer[4] = 0; // wipe the last 4 bytes of the buffer.
+}
+
+VOID
+SymCryptMarvin32AppendBlocks(
+ _Inout_ PSYMCRYPT_MARVIN32_CHAINING_STATE pChain,
+ _In_reads_( cbData ) PCBYTE pbData,
+ SIZE_T cbData )
+{
+ ULONG s0 = pChain->s[0];
+ ULONG s1 = pChain->s[1];
+
+ SIZE_T bytesInFirstBlock = cbData & 0xc; // 0, 4, 8, or 12
+
+ pbData += bytesInFirstBlock;
+ cbData -= bytesInFirstBlock;
+
+ switch( bytesInFirstBlock )
+ {
+ case 0: // This handles the cbData == 0 case too
+ while( cbData > 0 )
+ {
+ pbData += 16;
+ cbData -= 16;
+
+ s0 += LOAD_LSBFIRST32( pbData - 16 );
+ BLOCK( s0, s1 );
+ case 12:
+ s0 += LOAD_LSBFIRST32( pbData - 12 );
+ BLOCK( s0, s1 );
+ case 8:
+ s0 += LOAD_LSBFIRST32( pbData - 8 );
+ BLOCK( s0, s1 );
+ case 4:
+ s0 += LOAD_LSBFIRST32( pbData - 4 );
+ BLOCK( s0, s1 );
+ }
+ }
+
+ pChain->s[0] = s0;
+ pChain->s[1] = s1;
+}
+
+VOID
+SymCryptMarvin32Append(_Inout_ SYMCRYPT_MARVIN32_STATE * state,
+_In_reads_bytes_(cbData) PCBYTE pbData,
+SIZE_T cbData)
+{
+ ULONG bytesInBuffer = state->dataLength;
+
+ state->dataLength += (ULONG)cbData; // We only keep track of the last 2 bits...
+
+ //
+ // Truncate bytesInBuffer so that we never have an integer overflow.
+ //
+ bytesInBuffer &= SYMCRYPT_MARVIN32_INPUT_BLOCK_SIZE - 1;
+
+ //
+ // If previous data in buffer, buffer new input and transform if possible.
+ //
+ if (bytesInBuffer > 0)
+ {
+ SIZE_T freeInBuffer = SYMCRYPT_MARVIN32_INPUT_BLOCK_SIZE - bytesInBuffer;
+ if (cbData < freeInBuffer)
+ {
+ //
+ // All the data will fit in the buffer.
+ // We don't do anything here.
+ // As cbData < INPUT_BLOCK_SIZE the bulk data processing is skipped,
+ // and the data will be copied to the buffer at the end
+ // of this code.
+ }
+ else {
+ //
+ // Enough data to fill the whole buffer & process it
+ //
+ memcpy(&state->buffer[bytesInBuffer], pbData, freeInBuffer);
+ pbData += freeInBuffer;
+ cbData -= freeInBuffer;
+ SymCryptMarvin32AppendBlocks(&state->chain, state->buffer, SYMCRYPT_MARVIN32_INPUT_BLOCK_SIZE);
+
+ //
+ // Set bytesInBuffer to zero to ensure that the trailing data in the
+ // buffer will be copied to the right location of the buffer below.
+ //
+ bytesInBuffer = 0;
+ }
+ }
+
+ //
+ // Internal buffer is empty; process all remaining whole blocks in the input
+ //
+ if (cbData >= SYMCRYPT_MARVIN32_INPUT_BLOCK_SIZE)
+ {
+ SIZE_T cbDataRoundedDown = cbData & ~(SIZE_T)(SYMCRYPT_MARVIN32_INPUT_BLOCK_SIZE - 1);
+ SymCryptMarvin32AppendBlocks(&state->chain, pbData, cbDataRoundedDown);
+ pbData += cbDataRoundedDown;
+ cbData -= cbDataRoundedDown;
+ }
+
+ //
+ // buffer remaining input if necessary.
+ //
+ if (cbData > 0)
+ {
+ memcpy(&state->buffer[bytesInBuffer], pbData, cbData);
+ }
+
+}
+
+VOID
+SymCryptMarvin32Result(
+ _Inout_ PSYMCRYPT_MARVIN32_STATE pState,
+ _Out_writes_( SYMCRYPT_MARVIN32_RESULT_SIZE ) PBYTE pbResult )
+{
+ SIZE_T bytesInBuffer = ( pState->dataLength) & 0x3;
+
+ //
+ // Wipe four bytes in the buffer.
+ // Doing this first ensures that this write is aligned when the input was of
+ // length 0 mod 4.
+ // The buffer is 8 bytes long, so we never overwrite anything else.
+ //
+ *(ULONG *) &pState->buffer[bytesInBuffer] = 0;
+
+ //
+ // The buffer is never completely full, so we can always put the first
+ // padding byte in.
+ //
+ pState->buffer[bytesInBuffer++] = 0x80;
+
+ //
+ // Process the final block
+ //
+ SymCryptMarvin32AppendBlocks( &pState->chain, pState->buffer, 8 );
+
+ STORE_LSBFIRST32( pbResult , pState->chain.s[0] );
+ STORE_LSBFIRST32( pbResult + 4, pState->chain.s[1] );
+
+ //
+ // Wipe only those things that we need to wipe.
+ //
+
+ *(ULONG *) &pState->buffer[0] = 0;
+ pState->dataLength = 0;
+
+ pState->chain = *pState->pSeed;
+}
+
+
+VOID
+SymCryptMarvin32(
+ __in PCSYMCRYPT_MARVIN32_EXPANDED_SEED pExpandedSeed,
+ __in_ecount(cbData) PCBYTE pbData,
+ SIZE_T cbData,
+ __out_ecount(SYMCRYPT_MARVIN32_RESULT_SIZE) PBYTE pbResult)
+//
+// To reduce the per-computation overhead, we have a dedicated code here instead of the whole Init/Append/Result stuff.
+//
+{
+ ULONG tmp;
+
+ ULONG s0 = pExpandedSeed->s[0];
+ ULONG s1 = pExpandedSeed->s[1];
+
+ while( cbData > 7 )
+ {
+ s0 += LOAD_LSBFIRST32( pbData );
+ BLOCK( s0, s1 );
+ s0 += LOAD_LSBFIRST32( pbData + 4 );
+ BLOCK( s0, s1 );
+ pbData += 8;
+ cbData -= 8;
+ }
+
+ switch( cbData )
+ {
+ default:
+ case 4: s0 += LOAD_LSBFIRST32( pbData ); BLOCK( s0, s1 ); pbData += 4;
+ case 0: tmp = 0x80; break;
+
+ case 5: s0 += LOAD_LSBFIRST32( pbData ); BLOCK( s0, s1 ); pbData += 4;
+ case 1: tmp = 0x8000 | pbData[0]; break;
+
+ case 6: s0 += LOAD_LSBFIRST32( pbData ); BLOCK( s0, s1 ); pbData += 4;
+ case 2: tmp = 0x800000 | LOAD_LSBFIRST16( pbData ); break;
+
+ case 7: s0 += LOAD_LSBFIRST32( pbData ); BLOCK( s0, s1 ); pbData += 4;
+ case 3: tmp = LOAD_LSBFIRST16( pbData ) | (pbData[2] << 16) | 0x80000000; break;
+ }
+ s0 += tmp;
+
+
+ BLOCK( s0, s1 );
+ BLOCK( s0, s1 );
+
+ STORE_LSBFIRST32( pbResult , s0 );
+ STORE_LSBFIRST32( pbResult + 4, s1 );
+}
diff --git a/src/vm/mda.cpp b/src/vm/mda.cpp
new file mode 100644
index 0000000000..cd55fe5209
--- /dev/null
+++ b/src/vm/mda.cpp
@@ -0,0 +1,4018 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "common.h"
+#include "eeconfig.h"
+#include "eeconfigfactory.h"
+#include "corhlpr.h"
+#include <xmlparser.h>
+#include <mscorcfg.h>
+#include <holder.h>
+#include <dbginterface.h>
+#include "wrappers.h"
+#include "mda.h"
+#include "mdaassistants.h"
+#include "sstring.h"
+#include "util.hpp"
+#include "debugdebugger.h"
+
+#ifdef MDA_SUPPORTED
+
+//
+// MdaHashtable
+//
+
+BOOL MdaLockOwner(LPVOID) { LIMITED_METHOD_CONTRACT; return TRUE; }
+
+BOOL IsJustMyCode(MethodDesc* pMethodDesc)
+{
+ CONTRACT(BOOL)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+ if (!ManagedDebuggingAssistants::IsManagedDebuggerAttached())
+ return TRUE;
+
+ BOOL bIsJMC = FALSE;
+
+ EX_TRY
+ {
+ if (g_pDebugInterface && g_pDebugInterface->IsJMCMethod(pMethodDesc->GetModule(), pMethodDesc->GetMemberDef()))
+ bIsJMC = TRUE;
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ RETURN bIsJMC;
+}
+
+
+//
+// ManagedDebuggingAssistants
+//
+
+const bool g_mdaAssistantIsSwitch[] =
+{
+#define MDA_ASSISTANT_IS_SWITCH
+#include "mdaschema.inl"
+#undef MDA_ASSISTANT_IS_SWITCH
+ false
+};
+
+void ManagedDebuggingAssistants::Initialize()
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+ EX_TRY
+ {
+ //
+ // Initialize
+ //
+ m_pSwitchActivationXml = NULL;
+ m_pMdaXmlIndustry = new MdaXmlIndustry();
+
+ MdaSchema::Initialize();
+
+ //
+ // Create AssistantSchema
+ //
+ m_pAssistantSchema = new MdaAssistantSchema();
+
+ //
+ // Create AssistantMsgSchema
+ //
+ m_pAssistantMsgSchema = new MdaAssistantMsgSchema();
+
+ //
+ // Create SchemaSchema
+ //
+ m_pSchemaSchema = new MdaSchemaSchema();
+
+ //
+ // InvalidConfigFile
+ //
+ g_mdaStaticHeap.m_mdaInvalidConfigFile.Enable();
+
+#ifdef _DEBUG
+ StackSString sszValidateFramework(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_MDAValidateFramework));
+ if (!sszValidateFramework.IsEmpty() && sszValidateFramework.Equals(W("1")))
+ DebugInitialize();
+#endif
+ }
+ EX_CATCH
+ {
+ // MDA State corrupted, unable to initialize, runtime still OK
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ RETURN;
+}
+
+MdaEnvironment::~MdaEnvironment()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_pStringFactory)
+ delete m_pStringFactory;
+
+ if (m_pGroups)
+ delete m_pGroups;
+
+ if (m_szMda)
+ delete m_szMda;
+}
+
+MdaEnvironment::MdaEnvironment()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_bDisable = TRUE;
+ m_szMda = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_MDA);
+ m_pStringFactory = NULL;
+ m_pGroups = NULL;
+
+ if (ManagedDebuggingAssistants::IsManagedDebuggerAttached())
+ {
+ if (m_pStringFactory == NULL)
+ m_pStringFactory = new MdaFactory<StackSString>();
+
+ if (m_pGroups == NULL)
+ m_pGroups = new SArray<SString*>();
+
+ SString* pStr = m_pStringFactory->Create();
+ pStr->Set(W("managedDebugger"));
+ m_pGroups->Append(pStr);
+ m_bDisable = FALSE;
+ }
+
+ if (ManagedDebuggingAssistants::IsUnmanagedDebuggerAttached())
+ {
+ if (m_pStringFactory == NULL)
+ m_pStringFactory = new MdaFactory<StackSString>();
+
+ if (m_pGroups == NULL)
+ m_pGroups = new SArray<SString*>();
+
+ SString* pStr = m_pStringFactory->Create();
+ pStr->Set(W("unmanagedDebugger"));
+ m_pGroups->Append(pStr);
+ m_bDisable = FALSE;
+ }
+
+ if (m_szMda)
+ {
+ if (m_pStringFactory == NULL)
+ m_pStringFactory = new MdaFactory<StackSString>();
+
+ if (m_pGroups == NULL)
+ m_pGroups = new SArray<SString*>();
+
+ StackSString sszMda(m_szMda);
+ SString::Iterator s = sszMda.Begin();
+ SString::Iterator e = s;
+
+ while (true)
+ {
+ if (!sszMda.Find(e, W(';')))
+ e = sszMda.End();
+ SString* psszGroup = m_pStringFactory->Create();
+ psszGroup->Set(sszMda, s, e);
+
+ if (psszGroup->Equals(W("0")))
+ {
+ m_pGroups->Clear();
+ m_bDisable = TRUE;
+ }
+ else
+ {
+ m_pGroups->Append(psszGroup);
+
+ m_bDisable = FALSE;
+ }
+
+ if (e == sszMda.End())
+ break;
+ s = ++e;
+ }
+ }
+
+ if (m_bDisable == FALSE)
+ {
+ // If we get here, m_pStringFactory should already have been created.
+ _ASSERTE(m_pStringFactory != NULL);
+
+ WCHAR szExe[_MAX_PATH];
+ if (!WszGetModuleFileName(NULL, szExe, _MAX_PATH))
+ return;
+
+ // Construct file name of the config file
+ m_psszConfigFile = m_pStringFactory->Create();
+ m_psszConfigFile->Set(szExe);
+ m_psszConfigFile->Append(W(".config"));
+
+ // Construct file name of mda config file
+ m_psszMdaConfigFile = m_pStringFactory->Create();
+ m_psszMdaConfigFile->Set(szExe);
+ m_psszMdaConfigFile->Append(W(".mda.config"));
+ }
+}
+
+void ManagedDebuggingAssistants::EEStartupActivation()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ //
+ // Read environment variable, then registry settings
+ //
+ MdaEnvironment env;
+
+ if (env.IsDisabled())
+ return;
+
+ AllocateManagedDebuggingAssistants();
+
+ //
+ // ConfigFile Activation
+ //
+ g_mdaStaticHeap.m_pMda->EnvironmentActivation(&env);
+}
+
+#ifdef _DEBUG
+void ManagedDebuggingAssistants::DebugInitialize()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ //
+ // Validate MDA output on Debug builds
+ //
+ m_bValidateOutput = TRUE;
+
+ //
+ // XmlValidationError
+ //
+ g_mdaStaticHeap.m_mdaXmlValidationError.Enable();
+
+ MdaSchema::ValidationResult validationResult;
+
+ //
+ // Validate SchemaScheam
+ //
+ MdaXmlElement* pXmlSchemaSchema = m_pSchemaSchema->ToXml(m_pMdaXmlIndustry);
+ if (m_pSchemaSchema->Validate(pXmlSchemaSchema, &validationResult)->ValidationFailed())
+ {
+ MDA_TRIGGER_ASSISTANT(XmlValidationError, ReportError(&validationResult));
+ UNREACHABLE();
+ }
+
+ //
+ // Validate AssistantSchema
+ //
+ MdaXmlElement* pXmlAssistantSchema = m_pAssistantSchema->ToXml(m_pMdaXmlIndustry);
+ if (m_pSchemaSchema->Validate(pXmlAssistantSchema, &validationResult)->ValidationFailed())
+ {
+ MDA_TRIGGER_ASSISTANT(XmlValidationError, ReportError(&validationResult));
+ ASSERT(!W("You're modifications to MdaAssistantSchema for assistant input don't conform to XSD"));
+ }
+
+ //
+ // Validate AssistantMsgSchema
+ //
+ MdaXmlElement* pXmlAssistantMsgSchema = m_pAssistantMsgSchema->ToXml(m_pMdaXmlIndustry);
+ if (m_pSchemaSchema->Validate(pXmlAssistantMsgSchema, &validationResult)->ValidationFailed())
+ {
+ MDA_TRIGGER_ASSISTANT(XmlValidationError, ReportError(&validationResult));
+ ASSERT(!W("You're modifications to MdaAssistantSchema for assistant output don't conform to XSD"));
+ }
+}
+#endif
+
+void ManagedDebuggingAssistants::ConfigFileActivation(LPCWSTR szConfigFile, MdaXmlIndustry* pXmlIndustry, MdaHashtable<MdaXmlElement*>* pMdaXmlPairs)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Parse
+ MdaSchema::ValidationResult validationResult;
+ MdaXmlElement* pMdaConfig = MdaConfigFactory::ParseXmlStream(pXmlIndustry, szConfigFile);
+ if (!pMdaConfig)
+ return;
+
+ // Validate
+ if (m_pAssistantSchema->Validate(pMdaConfig, &validationResult)->ValidationFailed())
+ {
+ MDA_TRIGGER_ASSISTANT(InvalidConfigFile, ReportError(MdaElemDef(MdaConfig)));
+ g_mdaStaticHeap.DisableAll();
+ return;
+ }
+
+ // Activate
+ InlineSArray<MdaXmlElement*, MdaElemDef(Max)> xmlMdaConfigs;
+ MdaXPath::FindElements(pMdaConfig, W("/mdaConfig/assistants/*"), &xmlMdaConfigs);
+ for(COUNT_T i = 0; i < xmlMdaConfigs.GetCount(); i ++)
+ {
+ MdaXmlElement* pXmlMdaConfig = xmlMdaConfigs[i];
+ if (pXmlMdaConfig->GetAttribute(MdaAttrDecl(Enable))->GetValueAsBool())
+ {
+ pMdaXmlPairs->Set(pXmlMdaConfig->GetName(), xmlMdaConfigs[i]);
+ }
+ else
+ {
+ if (pMdaXmlPairs->HasKey(pXmlMdaConfig->GetName()))
+ pMdaXmlPairs->DeleteValue(pXmlMdaConfig->GetName());
+ }
+ }
+}
+
+MdaXmlElement* ManagedDebuggingAssistants::GetSwitchActivationXml(MdaElemDeclDef mda)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (g_mdaAssistantIsSwitch[mda])
+ {
+ MdaXmlElement* pXml = m_pMdaXmlIndustry->CreateElement()->SetDeclDef(mda);
+ pXml->AddAttributeBool(MdaAttrDecl(Enable), TRUE);
+ return pXml;
+ }
+ else
+ {
+ if (!m_pSwitchActivationXml)
+ {
+ MdaXmlElement* pXmlMdaConfig = m_pMdaXmlIndustry->CreateElement()->SetDeclDef(MdaElemDef(MdaConfig));
+ m_pSwitchActivationXml = pXmlMdaConfig->AddChild(MdaElemDecl(Assistants));
+
+ for (COUNT_T i = 0; i < MdaElemDef(AssistantMax); i ++)
+ m_pSwitchActivationXml->AddChild((MdaElemDeclDef)i);
+
+ MdaSchema::ValidationResult validationResult;
+
+ // Validating the schema has the side-effect of initializing the default XML attributes
+ if (m_pAssistantSchema->Validate(pXmlMdaConfig, &validationResult)->ValidationFailed())
+ ASSERT(!W("MDA Assistant must allow <Assistant /> form."));
+ }
+
+ return m_pSwitchActivationXml->GetChild(mda);
+ }
+}
+
+void ManagedDebuggingAssistants::ActivateGroup(LPCWSTR groupName, SArray<MdaElemDeclDef>* pGroupMdaXmlParis, MdaHashtable<MdaXmlElement*>* pActivationMdaXmlPairs)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ StackSString sszGroupName(groupName);
+ BOOL bIsManagedDebuggerSet = sszGroupName.EqualsCaseInsensitive(W("managedDebugger"));
+
+ SArray<MdaElemDeclDef>& groupMdaXmlParis = *pGroupMdaXmlParis;
+
+ for (COUNT_T i = 0; i < groupMdaXmlParis.GetCount(); i++)
+ {
+ MdaElemDeclDef mda = groupMdaXmlParis[i];
+ MdaXmlElement* pSwitchActivationXml = GetSwitchActivationXml(mda);
+
+ PREFIX_ASSUME(pSwitchActivationXml != NULL);
+
+ pSwitchActivationXml->AddAttributeBool(MdaAttrDecl(SuppressDialog), bIsManagedDebuggerSet);
+
+ pActivationMdaXmlPairs->Set(MdaSchema::g_arElementNames[mda], pSwitchActivationXml);
+ }
+}
+
+LPCWSTR ToLowerFirstChar(LPCWSTR name, MdaFactory<SString>* pSstringFactory)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ASSERT(*name >= 'A' && *name <= 'Z');
+
+ SString* pOutput = pSstringFactory->Create();
+ pOutput->Clear();
+ pOutput->Append(*name - W('A') + W('a'));
+ pOutput->Append(&name[1]);
+ return pOutput->GetUnicode();
+}
+
+void ManagedDebuggingAssistants::EnvironmentActivation(MdaEnvironment* pEnvironment)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (pEnvironment->GetActivationMechanisms().GetCount() == 0)
+ return;
+
+ MdaFactory<StackSArray<MdaElemDeclDef> > arrayFactory;
+ MdaFactory<SString> sstringFactory;
+ MdaHashtable<MdaXmlElement*> mdaXmlPairs;
+
+ // Activate
+ SArray<SString*>& aActivationMechanisms = pEnvironment->GetActivationMechanisms();
+ SArray<MdaElemDeclDef>* pGroup = NULL;
+ StackSArray<SArray<MdaElemDeclDef>* > aGroups;
+
+#define MDA_DEFINE_GROUPS
+#include "mdaschema.inl"
+#undef MDA_DEFINE_GROUPS
+
+ // Match COMPlus_MDA env var to group
+ for (COUNT_T i = 0; i < aActivationMechanisms.GetCount(); i++)
+ {
+ SString& sszActivationMechanism = *aActivationMechanisms[i];
+
+ if (sszActivationMechanism.EqualsCaseInsensitive(W("ConfigFile")) || sszActivationMechanism.EqualsCaseInsensitive(W("1")))
+ {
+ ConfigFileActivation(pEnvironment->GetMdaConfigFile(), m_pMdaXmlIndustry, &mdaXmlPairs);
+ }
+ else
+ {
+ COUNT_T cGroup = 0;
+
+#define MDA_ACTIVATE_GROUPS
+#include "mdaschema.inl"
+#undef MDA_ACTIVATE_GROUPS
+
+#define MDA_ACTIVATE_SINGLTON_GROUPS
+#include "mdaschema.inl"
+#undef MDA_ACTIVATE_SINGLTON_GROUPS
+
+ }
+ }
+
+ if (mdaXmlPairs.GetCount() == 0)
+ return;
+
+ // Create
+ MdaXmlElement* pXmlAssistant = NULL;
+
+#define MDA_ASSISTANT_CREATION
+#include "mdaschema.inl"
+#undef MDA_ASSISTANT_CREATION
+}
+
+typedef enum
+{
+ MDA_MSGBOX_NONE = 0,
+ MDA_MSGBOX_RETRY = 4,
+ MDA_MSGBOX_CANCLE = 2,
+} MsgBoxResult;
+
+BOOL ManagedDebuggingAssistants::IsUnmanagedDebuggerAttached()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (IsDebuggerPresent())
+ return TRUE;
+
+ return FALSE;
+}
+
+BOOL ManagedDebuggingAssistants::IsManagedDebuggerAttached()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#if DEBUGGING_SUPPORTED
+ if (CORDebuggerAttached())
+ return TRUE;
+#endif
+
+ return FALSE;
+}
+
+BOOL ManagedDebuggingAssistants::IsDebuggerAttached()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return IsUnmanagedDebuggerAttached() || IsManagedDebuggerAttached();
+}
+
+MdaXmlElement* ManagedDebuggingAssistants::GetRootElement(MdaXmlElement* pMdaXmlRoot)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ pMdaXmlRoot->SetDeclDef(MdaElemDef(Msg));
+ pMdaXmlRoot->AddAttributeSz(MdaAttrDecl(Xmlns), MDA_TARGET_NAMESPACE)->SetNs(W("mda"));
+ return pMdaXmlRoot;
+}
+
+
+
+//
+// MdaXmlMessage
+//
+BOOL IsFormatChar(WCHAR c) { LIMITED_METHOD_CONTRACT; return (c == W('\\') || c == W('!') || c == W('+') || c == W('.') || c == W(':') || c == W('-')); }
+
+// Logic copied from /fx/src/Xml/System/Xml/Core/XmlRawTextWriterGenerator.cxx::WriteAttributeTextBlock
+SString& MdaXmlEscape(SString& sszBuffer, const SString& sszXml, BOOL bEscapeComment = FALSE)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ sszBuffer.Clear();
+
+ SString::CIterator itr = sszXml.Begin();
+ SString::CIterator end = sszXml.End();
+
+ while (itr != end)
+ {
+ WCHAR c = *itr;
+
+ switch(c)
+ {
+ case W('-'):
+ if (*(itr+1) == W('-') && bEscapeComment)
+ sszBuffer.Append(W("- "));
+ else
+ sszBuffer.Append(W("-"));
+ break;
+ case W('&'):
+ sszBuffer.Append(W("&amp;"));
+ break;
+ case W('<'):
+ sszBuffer.Append(W("&lt;"));
+ break;
+ case W('>'):
+ sszBuffer.Append(W("&gt;"));
+ break;
+ case W('"'):
+ sszBuffer.Append(W("&quote;"));
+ break;
+ default:
+ sszBuffer.Append(c);
+ }
+
+ itr++;
+ }
+
+ return sszBuffer;
+}
+
+SString* WrapString(SString& buffer, SString& sszString, SCOUNT_T cWidth, SCOUNT_T cIndent = 0, SCOUNT_T cPostIndent = 0)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ StackSString sszEscapedString;
+ MdaXmlEscape(sszEscapedString, sszString, TRUE);
+
+ StackSString sszIndent;
+ for (SCOUNT_T i = 0; i < cIndent; i ++)
+ sszIndent.Append(W(" "));
+
+ StackSString sszPostIndent;
+ for (SCOUNT_T i = 0; i < cPostIndent; i ++)
+ sszPostIndent.Append(W(" "));
+
+ buffer.Append(sszIndent);
+
+ SString::CIterator itr = sszEscapedString.Begin();
+ SString::CIterator lineStart = sszEscapedString.Begin();
+ SString::CIterator lineEnd = sszEscapedString.Begin();
+ SString::CIterator lastFormatChar = sszEscapedString.Begin();
+ SString::CIterator end = sszEscapedString.End();
+
+ while (itr != end)
+ {
+ if (*itr == W(' '))
+ lineEnd = itr;
+
+ // Keep track of reasonable breaks in member and file names...
+ if (IsFormatChar(*itr) && itr - lineStart < cWidth)
+ lastFormatChar = itr;
+
+ if (itr - lineStart >= cWidth || *itr == W('\n'))
+ {
+ if (*itr == W('\n'))
+ lineEnd = itr;
+
+ // If we didn't find a space or wrapping at found space wraps less than 3/5 of the line...
+ else if (lineEnd == end || itr - lineEnd > cWidth * 3 / 5)
+ {
+ // ...then if we found a format char, start the wrap there...
+ if (lastFormatChar != end)
+ lineEnd = lastFormatChar + 1;
+ // ...else just do a simple wrap...
+ else
+ lineEnd = itr;
+ }
+
+ SString sszLine(sszEscapedString, lineStart, lineEnd);
+ buffer.Append(sszLine);
+ buffer.Append(sszPostIndent);
+ buffer.Append(W("\n"));
+ buffer.Append(sszIndent);
+
+ lineStart = lineEnd;
+
+ // If we wrapped on a space or a return than skip over that character as we already replaced it with a \n.
+ if (*lineEnd == W(' ') || *lineEnd == W('\n'))
+ lineStart++;
+
+ lineEnd = end;
+ lastFormatChar = end;
+ }
+
+ itr++;
+ }
+
+ SString sszLine(sszEscapedString, lineStart, itr);
+ buffer.Append(sszLine);
+
+ return &buffer;
+}
+
+LPCWSTR ToUpperFirstChar(SString& buffer, LPCWSTR name)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ASSERT(*name >= 'a' && *name <= 'z');
+
+ buffer.Clear();
+ buffer.Append(*name - W('a') + W('A'));
+ buffer.Append(&name[1]);
+ return buffer.GetUnicode();
+}
+
+MdaXmlMessage::MdaXmlMessage(MdaAssistant* pAssistant, BOOL bBreak, MdaXmlElement** ppMdaXmlRoot)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(g_mdaStaticHeap.m_pMda));
+ }
+ CONTRACTL_END;
+
+ m_pMdaAssistant = pAssistant;
+ m_bBreak = (pAssistant->GetSuppressDialog()) ? FALSE : bBreak;
+ m_pMdaXmlRoot = g_mdaStaticHeap.m_pMda->GetRootElement(m_mdaXmlIndustry.CreateElement());
+ *ppMdaXmlRoot = m_pAssistantXmlRoot = pAssistant->GetRootElement(m_mdaXmlIndustry.CreateElement(), bBreak);
+}
+
+MdaXmlMessage::MdaXmlMessage(MdaXmlElement** ppMdaXmlRoot) : m_bBreak(FALSE)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(g_mdaStaticHeap.m_pMda));
+ }
+ CONTRACTL_END;
+
+ *ppMdaXmlRoot = m_pMdaXmlRoot = g_mdaStaticHeap.m_pMda->GetRootElement(m_mdaXmlIndustry.CreateElement());
+}
+
+BOOL MdaXmlMessage::ShouldLogToManagedDebugger()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ BOOL bUnmanagedDebuggerAttached = FALSE;
+ BOOL bManagedDebuggerAttached = FALSE;
+ BOOL bManagedDebugLoggingEnabled = FALSE;
+
+ bUnmanagedDebuggerAttached = IsUnmanagedDebuggerAttached();
+
+#if DEBUGGING_SUPPORTED
+ bManagedDebuggerAttached = IsManagedDebuggerAttached();
+ bManagedDebugLoggingEnabled = (g_pDebugInterface && g_pDebugInterface->IsLoggingEnabled());
+#endif
+
+ return (!bUnmanagedDebuggerAttached && bManagedDebuggerAttached && bManagedDebugLoggingEnabled);
+}
+
+// Send an event for this MDA.
+void MdaXmlMessage::SendEvent()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (IsHostRegisteredForEvent(Event_MDAFired))
+ {
+ // A host is registered for the MDA fired event so let's start by notifying the
+ // debugger is on is attached.
+ if (IsManagedDebuggerAttached() || IsUnmanagedDebuggerAttached())
+ {
+ SendDebugEvent();
+ }
+
+ // Now that the debugger has been notified and continued, let's notify the host
+ // so it can take any action it deems neccessary based on the MDA that fired.
+ SendHostEvent();
+ }
+ else
+ {
+ // We aren't hosted or no host registered for the MDA fired event so let's simply
+ // send the MDA to the debubber. Note that as opposed to the hosted case, we
+ // will force a JIT attach if no debugger is present.
+ SendDebugEvent();
+ }
+}
+
+// Send an event for this MDA.
+void MdaXmlMessage::SendHostEvent()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MDAInfo info;
+ SString strStackTrace;
+
+ EX_TRY
+ {
+ // Retrieve the textual representation of the managed stack trace and add it to
+ // the MDA information we give the host.
+ GetManagedStackTraceString(TRUE, strStackTrace);
+ }
+ EX_CATCH
+ {
+ // We failed to get the stack trace string. This isn't fatal, we will simply not be
+ // able to provide this information as part of the notification.
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ // Set up the information and invoke the host to process the MDA fired event.
+ info.lpMDACaption = m_pMdaAssistant->GetName();
+ info.lpStackTrace = strStackTrace;
+ info.lpMDAMessage = m_localizedMessage;
+ ProcessEventForHost(Event_MDAFired, &info);
+
+ // If the host initiated a thread abort, we want to raise it immediatly to
+ // prevent any further code inside the VM from running and potentially
+ // crashing the process.
+ Thread *pThread = GetThread();
+ TESTHOOKCALL(AppDomainCanBeUnloaded(pThread->GetDomain()->GetId().m_dwId,FALSE));
+
+ if (pThread && pThread->IsAbortInitiated())
+ pThread->HandleThreadAbort(TRUE);
+}
+
+// Send a managed debug event for this MDA.
+// This will block until the debugger continues us. This means the debugger could to things like run callstacks
+// and change debuggee state.
+void MdaXmlMessage::SendDebugEvent()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(g_mdaStaticHeap.m_pMda));
+ }
+ CONTRACTL_END;
+
+ // Simple check to avoid getting XML string if we're not going to actually use it.
+ if (!IsManagedDebuggerAttached() && !IsUnmanagedDebuggerAttached() && !m_bBreak)
+ {
+ return;
+ }
+
+ EX_TRY
+ {
+ StackSString sszXml;
+ LPCWSTR ns = NULL;
+
+ MdaSchema * pSchema = g_mdaStaticHeap.m_pMda->m_pAssistantSchema;
+ ns = pSchema->SetRootAttributes(m_pMdaXmlRoot);
+ m_pMdaXmlRoot->ToXml(&sszXml, ns);
+
+ // For managed + interop cases, send a managed debug event.
+ // If m_bBreak is true and no unmanaged debugger is attached trigger a jit-attach.
+ if (IsManagedDebuggerAttached() || (m_bBreak && !IsUnmanagedDebuggerAttached()))
+ {
+ // Get MDA name (this is the type)
+ StackSString sszMdaName;
+ ToUpperFirstChar(sszMdaName, m_pMdaAssistant->GetName());
+ // SendMDANotification needs to be called in preemptive GC mode.
+ GCX_PREEMP();
+
+ // This will do two things:
+ // 1. If a managed debugger is attached, it will send the managed debug event for the MDA.
+ // 2. If it's a m_bBreak, we'll try to do a managed jit-attach.
+ // This blocks until continued. Since we're not slipping, we don't need the MDA_FLAG_SLIP flag.
+ g_pDebugInterface->SendMDANotification(
+ GetThread(),
+ &sszMdaName,
+ &m_localizedMessage,
+ &sszXml,
+ ((CorDebugMDAFlags) 0 ),
+ RunningInteractive() ? m_bBreak : FALSE);
+ }
+
+ if (IsUnmanagedDebuggerAttached() && !IsManagedDebuggerAttached())
+ {
+ // For native case, sent native debug event for logging.
+ WszOutputDebugString(sszXml.GetUnicode());
+
+ if (m_bBreak)
+ RetailBreak();
+ }
+ }
+ EX_CATCH
+ {
+ // No global MDA state modified in TRY
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+void MdaXmlMessage::SendMessagef(int resourceID, ...)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ StackSString sszResourcef;
+ sszResourcef.LoadResource(CCompRC::DesktopCLR, resourceID );
+ ASSERT(!sszResourcef.IsEmpty());
+
+ va_list argItr;
+ va_start(argItr, resourceID);
+ m_localizedMessage.PVPrintf(sszResourcef, argItr);
+ va_end(argItr);
+
+ SendMessage();
+}
+
+
+void MdaXmlMessage::SendMessage(int resourceID)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SendMessagef(resourceID);
+}
+
+void MdaXmlMessage::SendMessage(LPCWSTR szMessage)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_localizedMessage.Set(szMessage);
+
+ SendMessage();
+}
+
+void MdaXmlMessage::SendMessage()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(g_mdaStaticHeap.m_pMda));
+ }
+ CONTRACTL_END;
+
+#if _DEBUG
+ if (g_mdaStaticHeap.m_pMda->m_bValidateOutput)
+ {
+ MdaSchema::ValidationResult validationResult;
+ if (g_mdaStaticHeap.m_pMda->m_pAssistantMsgSchema->Validate(m_pAssistantXmlRoot, &validationResult)->ValidationFailed())
+ {
+ MDA_TRIGGER_ASSISTANT(XmlValidationError, ReportError(&validationResult));
+ ASSERT(W("Your MDA assistant's output did not match its output schema."));
+ }
+ }
+#endif
+
+ if (!m_localizedMessage.IsEmpty())
+ {
+ StackSString sszComment(m_localizedMessage);
+ StackSString sszWrappedComment(W("\n"));
+ WrapString(sszWrappedComment, sszComment, 80, 7);
+ sszWrappedComment.Append(W("\n "));
+ m_pMdaXmlRoot->AddChildComment(sszWrappedComment.GetUnicode());
+ }
+
+ m_pMdaXmlRoot->AddChild(m_pAssistantXmlRoot);
+
+ // Send applicable debug event (managed, native, interop) for this MDA.
+ // If this is a severe probe, it may trigger a jit-attach
+ SendEvent();
+}
+
+
+//
+// MdaXPath::FindXXX
+//
+
+void MdaXPath::Find(SArray<MdaXPathVariable>& args, SString* pWildCard, va_list argItr)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ for (COUNT_T i = 0; i < GetArgCount(); i ++)
+ {
+ XPathVarType varType = m_argTypes[i];
+
+ if (varType == XPathVarElemDeclDef)
+ args[i].m_u.m_elemDeclDef = va_arg(argItr, MdaElemDeclDef);
+
+ else if (varType == XPathVarAttrDeclDef)
+ args[i].m_u.m_attrDeclDef = va_arg(argItr, MdaAttrDeclDef);
+
+ else if (varType == XPathVarAttrBool)
+ args[i].m_u.m_bool = va_arg(argItr, BOOL);
+
+ else if (varType == XPathVarAttrINT32)
+ args[i].m_u.m_int32 = va_arg(argItr, INT32);
+
+ else if (varType == XPathVarAttrSString)
+ {
+ SString* pSString = va_arg(argItr, SString*);
+ ASSERT(CheckPointer(pSString, NULL_OK));
+ if (!pSString)
+ pSString = pWildCard;
+ args[i].m_u.m_pSstr = pSString;
+ }
+
+ else { UNREACHABLE(); }
+ }
+}
+
+MdaXmlElement* MdaXPath::FindElement(MdaXmlElement* pRoot, ...)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!pRoot)
+ return NULL;
+
+ va_list argItr;
+ va_start(argItr, pRoot);
+
+ SString wildCard;
+ InlineSArray<MdaXPathVariable, 20> args;
+ Find(args, &wildCard, argItr);
+
+ MdaXPathResult result(&args);
+ m_pCompiledQuery->Run(pRoot, &result);
+
+ va_end(argItr);
+ return result.GetXmlElement();
+}
+
+MdaXmlAttribute* MdaXPath::FindAttribute(MdaXmlElement* pRoot, ...)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!pRoot)
+ return NULL;
+
+ va_list argItr;
+ va_start(argItr, pRoot);
+
+ SString wildCard;
+ InlineSArray<MdaXPathVariable, 20> args;
+ Find(args, &wildCard, argItr);
+
+ MdaXPathResult result(&args);
+ m_pCompiledQuery->Run(pRoot, &result);
+
+ va_end(argItr);
+ return result.GetXmlAttribute();
+}
+
+SArray<MdaXmlElement*>* MdaXPath::FindElements(MdaXmlElement* pRoot, SArray<MdaXmlElement*>* pResult, ...)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!pRoot)
+ return NULL;
+
+ va_list argItr;
+ va_start(argItr, pResult);
+
+ SString wildCard;
+ InlineSArray<MdaXPathVariable, 20> args;
+ Find(args, &wildCard, argItr);
+
+ MdaXPathResult result(pResult, &args);
+ m_pCompiledQuery->Run(pRoot, &result);
+
+ va_end(argItr);
+ return pResult;
+}
+
+SArray<MdaXmlAttribute*>* MdaXPath::FindAttributes(MdaXmlElement* pRoot, SArray<MdaXmlAttribute*>* pResult, ...)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!pRoot)
+ return NULL;
+
+ va_list argItr;
+ va_start(argItr, pResult);
+
+ SString wildCard;
+ InlineSArray<MdaXPathVariable, 20> args;
+ Find(args, &wildCard, argItr);
+
+ MdaXPathResult result(pResult, &args);
+ m_pCompiledQuery->Run(pRoot, &result);
+
+ va_end(argItr);
+ return pResult;
+}
+
+
+//
+// MdaXPath::MdaXPathCompiler -- Lexifier
+//
+
+#define ISWHITE(ch) (ch == W(' ') || ch == W('\t') || ch == W('\n'))
+#define ISRESERVED(ch) (wcschr(W("./()[]&|=@*?':"), ch) != NULL)
+#define ISMDAID(ch) (!ISWHITE(ch) && !ISRESERVED(ch))
+
+MdaXPath::MdaXPathCompiler::MdaXPathTokens MdaXPath::MdaXPathCompiler::LexAToken()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (*m_itr == W('\0'))
+ return MdaXPathEnd;
+
+ if (ISWHITE(*m_itr))
+ {
+ m_itr++;
+ return LexAToken();
+ }
+
+ if (ISMDAID(*m_itr))
+ {
+ m_identifier.Clear();
+
+ do
+ {
+ m_identifier.Append(*m_itr);
+ m_itr++;
+ }
+ while(ISMDAID(*m_itr));
+
+ m_identifier.Append(W("\0"));
+ return MdaXPathIdentifier;
+ }
+
+ if (*m_itr == W('\''))
+ {
+ m_identifier.Clear();
+
+ m_itr++;
+
+ while(*m_itr != W('\''))
+ {
+ m_identifier.Append(*m_itr);
+ m_itr++;
+ }
+
+ m_identifier.Append(W("\0"));
+
+ m_itr++;
+ return MdaXPathQuotedString;
+ }
+
+ WCHAR c = *m_itr;
+ m_itr++;
+ switch(c)
+ {
+ case W('.'): return MdaXPathDot;
+ case W('/'): return MdaXPathSlash;
+ case W('('): return MdaXPathOpenParen;
+ case W(')'): return MdaXPathCloseParen;
+ case W('['): return MdaXPathOpenSqBracket;
+ case W(']'): return MdaXPathCloseSqBracket;
+ case W('&'): return MdaXPathLogicalAnd;
+ case W('|'): return MdaXPathLogicalOr;
+ case W('='): return MdaXPathEquals;
+ case W('@'): return MdaXPathAtSign;
+ case W('*'): return MdaXPathAstrix;
+ case W('?'): return MdaXPathQMark;
+ }
+
+ UNREACHABLE();
+}
+
+
+//
+// MdaXPath::MdaXPathCompiler -- Parser
+//
+
+// XPATH
+// '/' ELEMENT_EXPR end
+// '/' ELEMENT_EXPR XPATH
+// '/' ATTRIBUTE end
+MdaXPath::MdaXPathBase* MdaXPath::MdaXPathCompiler::XPATH()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ PRECONDITION(TokenIs(MdaXPathXPATH));
+
+ MdaXPathElement* pElementExpr = NULL;
+
+ NextToken();
+ if (TokenIs(MdaXPathELEMENT_EXPR))
+ pElementExpr = ELEMENT_EXPR();
+
+ else if (TokenIs(MdaXPathATTRIBUTE))
+ {
+ MdaXPathAttribute* pAttr = ATTRIBUTE();
+ pAttr->MarkAsTarget();
+ NextToken();
+ ASSERT(TokenIs(MdaXPathEnd));
+ return pAttr;
+ }
+
+ else { UNREACHABLE(); }
+
+
+ if (TokenIs(MdaXPathEnd))
+ return pElementExpr->MarkAsTarget();
+
+ else if (TokenIs(MdaXPathXPATH))
+ return pElementExpr->SetChild(XPATH());
+
+ else { UNREACHABLE(); }
+}
+
+// ATTRIBUTE
+// '@' id
+// '@' '?'
+MdaXPath::MdaXPathAttribute* MdaXPath::MdaXPathCompiler::ATTRIBUTE()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ PRECONDITION(TokenIs(MdaXPathATTRIBUTE));
+
+ MdaXPathAttribute* pAttr = NULL;
+
+ NextToken();
+ if (TokenIs(MdaXPathQMark))
+ {
+ pAttr = m_pXPath->m_attrFactory.Create()->SetName(++m_pXPath->m_cArgs);
+ *m_pXPath->m_argTypes.Append() = XPathVarAttrDeclDef;
+ }
+
+ else if (TokenIs(MdaXPathIdentifier))
+ {
+ pAttr = m_pXPath->m_attrFactory.Create()->SetName(MdaSchema::GetAttributeType(GetIdentifier()));
+ }
+
+ else { UNREACHABLE(); }
+
+ NextToken();
+ return pAttr;
+}
+
+// ELEMENT_EXPR
+// ELEMENT '[' FILTER_EXPR ']'
+// ELEMENT
+MdaXPath::MdaXPathElement* MdaXPath::MdaXPathCompiler::ELEMENT_EXPR()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ PRECONDITION(TokenIs(MdaXPathELEMENT_EXPR));
+
+ MdaXPathElement* pElement = ELEMENT();
+
+ if (TokenIs(MdaXPathOpenSqBracket))
+ {
+ NextToken();
+ pElement->SetQualifier(FILTER_EXPR());
+ ASSERT(TokenIs(MdaXPathCloseSqBracket));
+
+ NextToken();
+ }
+
+ return pElement;
+}
+
+// FILTER_EXPR
+// FILTER
+// '(' FILTER ')'
+// FILTER '&' FILTER
+// FILTER '|' FILTER
+MdaXPath::MdaXPathBase* MdaXPath::MdaXPathCompiler::FILTER_EXPR()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ PRECONDITION(TokenIs(MdaXPathFILTER_EXPR));
+
+ // '(' FILTER ')'
+ if (TokenIs(MdaXPathOpenParen))
+ {
+ MdaXPath::MdaXPathBase* pFilter = FILTER();
+ ASSERT(TokenIs(MdaXPathCloseParen));
+
+ NextToken();
+ return pFilter;
+ }
+
+ if (TokenIs(MdaXPathFILTER))
+ {
+ MdaXPath::MdaXPathBase* pFilter = FILTER();
+
+ // FILTER '&' FILTER
+ if (TokenIs(MdaXPathLogicalAnd))
+ {
+ NextToken();
+ return m_pXPath->m_logicalOpFactory.Create()->Initialize(TRUE, pFilter, FILTER());
+ }
+
+ // FILTER '|' FILTER
+ if (TokenIs(MdaXPathLogicalOr))
+ {
+ NextToken();
+ return m_pXPath->m_logicalOpFactory.Create()->Initialize(FALSE, pFilter, FILTER());
+ }
+
+ // FILTER
+ return pFilter;
+ }
+
+ UNREACHABLE();
+}
+
+// FILTER
+// ELEMENT_EXPR
+// ATTRIBUTE_FILTER
+// ELEMENT_EXPR ATTRIBUTE_FILTER
+MdaXPath::MdaXPathBase* MdaXPath::MdaXPathCompiler::FILTER()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ PRECONDITION(TokenIs(MdaXPathFILTER));
+
+ if (TokenIs(MdaXPathELEMENT_EXPR))
+ {
+ MdaXPathElement* pElementExpr = ELEMENT_EXPR();
+
+ if (TokenIs(MdaXPathATTRIBUTE_FILTER))
+ pElementExpr->SetQualifier(ATTRIBUTE_FILTER());
+
+ return pElementExpr;
+ }
+
+ if (TokenIs(MdaXPathATTRIBUTE_FILTER))
+ return ATTRIBUTE_FILTER();
+
+ UNREACHABLE();
+}
+
+// ELEMENT
+// id
+// '*'
+// '?'
+MdaXPath::MdaXPathElement* MdaXPath::MdaXPathCompiler::ELEMENT()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ PRECONDITION(TokenIs(MdaXPathELEMENT));
+
+ MdaXPathElement* pElement = m_pXPath->m_elementFactory.Create();
+
+ if (TokenIs(MdaXPathAstrix))
+ pElement->Initialize();
+
+ else if (TokenIs(MdaXPathIdentifier))
+ pElement->Initialize(MdaSchema::GetElementType(GetIdentifier()));
+
+ else if (TokenIs(MdaXPathQMark))
+ {
+ pElement->Initialize(++m_pXPath->m_cArgs);
+ *m_pXPath->m_argTypes.Append() = XPathVarElemDeclDef;
+ }
+
+ else { UNREACHABLE(); }
+
+ NextToken();
+ return pElement;
+}
+
+// ATTRIBUTE_FILTER();
+// ATTRIBUTE
+// ATTRIBUTE '=' ''' id '''
+// ATTRIBUTE '=' '?'
+MdaXPath::MdaXPathAttribute* MdaXPath::MdaXPathCompiler::ATTRIBUTE_FILTER()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ PRECONDITION(TokenIs(MdaXPathATTRIBUTE_FILTER));
+
+ MdaXPathAttribute* pAttr = ATTRIBUTE();
+
+ if (TokenIs(MdaXPathEquals))
+ {
+ NextToken();
+
+ if (TokenIs(MdaXPathQuotedString))
+ {
+ NextToken();
+ pAttr->SetValue(GetIdentifier());
+
+ NextToken();
+ ASSERT(TokenIs(MdaXPathQuotedString));
+ }
+ else if (TokenIs(MdaXPathQMark))
+ {
+ pAttr->SetValue(++m_pXPath->m_cArgs);
+ *m_pXPath->m_argTypes.Append() = XPathVarAttrSString;
+ }
+ else { UNREACHABLE(); }
+ }
+
+ NextToken();
+ return pAttr;
+}
+
+
+//
+// MdaXPath::Elements::Run() -- The search engine
+//
+
+BOOL MdaXPath::MdaXPathElement::Run(MdaXmlElement* pElement, MdaXPathResult* pResult)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ BOOL bAnyPass = FALSE;
+ if (pResult->IsRoot())
+ {
+ bAnyPass |= RunOnChild(pElement, pResult);
+ }
+ else
+ {
+ SArray<MdaXmlElement*>& children = pElement->GetChildren();
+
+ for (UINT32 i = 0; i < children.GetCount(); i ++)
+ {
+ bAnyPass |= RunOnChild(children[i], pResult);
+ }
+ }
+
+ return bAnyPass;
+}
+
+BOOL MdaXPath::MdaXPathElement::RunOnChild(MdaXmlElement* pElement, MdaXPathResult* pResult)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MdaElemDeclDef name = m_nameArg == NOT_VARIABLE ? m_name : pResult->GetArgs()[m_nameArg].m_u.m_elemDeclDef;
+
+ if (name != MdaElemUndefined && name != pElement->GetDeclDef())
+ return FALSE;
+
+ if (m_pQualifier && !m_pQualifier->Run(pElement, pResult))
+ return FALSE;
+
+ if (m_pChild && !m_pChild->Run(pElement, pResult))
+ return FALSE;
+
+ if (m_bIsTarget)
+ {
+ ASSERT(!m_pChild);
+ pResult->AddMatch(pElement);
+ }
+
+ return TRUE;
+}
+
+BOOL MdaXPath::MdaXPathAttribute::Run(MdaXmlElement* pElement, MdaXPathResult* pResult)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MdaAttrDeclDef name = m_nameArg == NOT_VARIABLE ? m_name : pResult->GetArgs()[m_nameArg].m_u.m_attrDeclDef;
+ SString& value = m_valueArg == NOT_VARIABLE ? m_value : *pResult->GetArgs()[m_valueArg].m_u.m_pSstr;
+
+ MdaXmlAttribute* pAttr = pElement->GetAttribute(name);
+ if (!pAttr)
+ return FALSE;
+
+ LPCWSTR szAttrValue = pAttr->GetValue();
+ if (!value.IsEmpty() && *szAttrValue != W('*') && !value.Equals(szAttrValue))
+ return FALSE;
+
+ if (m_bIsTarget)
+ pResult->AddMatch(pElement);
+
+ return TRUE;
+}
+
+BOOL MdaXPath::MdaXPathLogicalOp::Run(MdaXmlElement* pParent, MdaXPathResult* pResult)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_andOp)
+ return m_pLhs->Run(pParent, pResult) && m_pRhs->Run(pParent, pResult);
+
+ return m_pLhs->Run(pParent, pResult) || m_pRhs->Run(pParent, pResult);
+}
+
+
+//
+// MdaSchema
+//
+
+MdaHashtable<MdaElemDeclDef>* MdaSchema::g_pHtElementType;
+MdaHashtable<MdaAttrDeclDef>* MdaSchema::g_pHtAttributeType;
+LPCWSTR MdaSchema::g_arElementNames[MdaElemEnd];
+LPCWSTR MdaSchema::g_arAttributeNames[MdaAttrEnd];
+MdaFactory<SString>* MdaSchema::g_pSstringFactory;
+MdaElemDeclDef MdaSchema::MdaSchemaTypeToElemDef[MdaSchema::MdaSchemaTypeEnd];
+MdaSchema::MdaSchemaMetaType MdaSchema::MdaSchemaTypeToMetaType[MdaSchema::MdaSchemaTypeEnd];
+
+LPCWSTR MdaSchema::ToLowerFirstChar(LPCWSTR name)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return ::ToLowerFirstChar(name, g_pSstringFactory);
+}
+
+void MdaSchema::Initialize()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ g_pHtElementType = new MdaHashtable<MdaElemDeclDef>();
+ g_pHtAttributeType = new MdaHashtable<MdaAttrDeclDef>();
+ g_pSstringFactory = new MdaFactory<SString>();
+
+ COUNT_T i = 0;
+ MdaSchemaTypeToElemDef[i++] = MdaElemDef(Sequence); // MdaSchemaSequenceType
+ MdaSchemaTypeToElemDef[i++] = MdaElemDef(Choice); // MdaSchemaChoiceType
+ MdaSchemaTypeToElemDef[i++] = MdaElemDef(Group); // MdaSchemaGroupType
+ MdaSchemaTypeToElemDef[i++] = MdaElemDef(Group); // MdaSchemaGroupRefType
+ MdaSchemaTypeToElemDef[i++] = MdaElemDef(Schema); // MdaSchemaRootType
+ MdaSchemaTypeToElemDef[i++] = MdaElemDef(Attribute); // MdaSchemaAttributeType
+ MdaSchemaTypeToElemDef[i++] = MdaElemDef(Element); // MdaSchemaElementType
+ MdaSchemaTypeToElemDef[i++] = MdaElemDef(ComplexType); // MdaSchemaComplexTypeType
+ MdaSchemaTypeToElemDef[i++] = MdaElemDef(ComplexType); // MdaSchemaComplexTypeDefType
+ MdaSchemaTypeToElemDef[i++] = MdaElemDef(Element); // MdaSchemaElementRefTyp
+ MdaSchemaTypeToElemDef[i++] = MdaElemDef(Extension); // MdaSchemaExtensionType
+ MdaSchemaTypeToElemDef[i++] = MdaElemDef(Element); // MdaSchemaElementRefTypeType
+ MdaSchemaTypeToElemDef[i++] = MdaElemDef(ComplexContent); // MdaSchemaComplexContentType
+ MdaSchemaTypeToElemDef[i++] = MdaElemDef(Element); // MdaSchemaElementAnyType
+
+ i = 0;
+ MdaSchemaTypeToMetaType[i++] = MdaSchemaMataTypePattern; // MdaSchemaSequenceType
+ MdaSchemaTypeToMetaType[i++] = MdaSchemaMataTypePattern; // MdaSchemaChoiceType
+ MdaSchemaTypeToMetaType[i++] = (MdaSchemaMetaType)(MdaSchemaMataTypePattern | MdaSchemaMataTypeDeclDef); // MdaSchemaGroupType
+ MdaSchemaTypeToMetaType[i++] = (MdaSchemaMetaType)(MdaSchemaMataTypePattern | MdaSchemaMataTypeRef); // MdaSchemaGroupRefType
+ MdaSchemaTypeToMetaType[i++] = MdaSchemaMataNone; // MdaSchemaRootType
+ MdaSchemaTypeToMetaType[i++] = MdaSchemaMataNone; // MdaSchemaAttributeType
+ MdaSchemaTypeToMetaType[i++] = MdaSchemaMataTypeDeclDef; // MdaSchemaElementType
+ MdaSchemaTypeToMetaType[i++] = (MdaSchemaMetaType)(MdaSchemaMataNone | MdaSchemaMataMayHaveAttributes); // MdaSchemaComplexTypeType
+ MdaSchemaTypeToMetaType[i++] = (MdaSchemaMetaType)(MdaSchemaMataTypeDeclDef | MdaSchemaMataMayHaveAttributes); // MdaSchemaComplexTypeDefType
+ MdaSchemaTypeToMetaType[i++] = MdaSchemaMataTypeRef; // MdaSchemaElementRefTyp
+ MdaSchemaTypeToMetaType[i++] = (MdaSchemaMetaType)(MdaSchemaMataTypeRef | MdaSchemaMataMayHaveAttributes); // MdaSchemaExtensionType
+ MdaSchemaTypeToMetaType[i++] = (MdaSchemaMetaType)(MdaSchemaMataTypeDeclDef | MdaSchemaMataTypeRef); // MdaSchemaElementRefTypeType
+ MdaSchemaTypeToMetaType[i++] = MdaSchemaMataNone; // MdaSchemaComplexContentType
+ MdaSchemaTypeToMetaType[i++] = MdaSchemaMataTypeDeclDef; // MdaSchemaElementAnyType
+
+ i = 0;
+#define MDA_MAP_ASSISTANT_DEFINITION_TO_NAME
+#include "mdaschema.inl"
+#undef MDA_MAP_ASSISTANT_DEFINITION_TO_NAME
+ g_arElementNames[i++] = NULL;
+#define MDA_MAP_ELEMENT_DEFINITION_TO_NAME
+#include "mdaschema.inl"
+#undef MDA_MAP_ELEMENT_DEFINITION_TO_NAME
+ g_arElementNames[i++] = NULL;
+#define MDA_MAP_ELEMENT_DECLARATION_TO_NAME
+#include "mdaschema.inl"
+#undef MDA_MAP_ELEMENT_DECLARATION_TO_NAME
+ g_arElementNames[i++] = NULL; // Max
+ g_arElementNames[i++] = W("!--"); // Comment
+ g_arElementNames[i++] = NULL; // Undefined
+
+ i = 0;
+#define MDA_MAP_ATTRIBUTE_DECLARATION_TO_NAME
+#include "mdaschema.inl"
+#undef MDA_MAP_ATTRIBUTE_DECLARATION_TO_NAME
+
+#define MDA_MAP_ASSISTANT_NAME_TO_DEFINITION
+#include "mdaschema.inl"
+#undef MDA_MAP_ASSISTANT_NAME_TO_DEFINITION
+
+#define MDA_MAP_ELEMENT_NAME_TO_DEFINITION
+#include "mdaschema.inl"
+#undef MDA_MAP_ELEMENT_NAME_TO_DEFINITION
+
+#define MDA_MAP_ELEMENT_NAME_TO_DECLARATION
+#include "mdaschema.inl"
+#undef MDA_MAP_ELEMENT_NAME_TO_DECLARATION
+
+#define MDA_MAP_ATTRIBUTE_NAME_TO_DECLARATION
+#include "mdaschema.inl"
+#undef MDA_MAP_ATTRIBUTE_NAME_TO_DECLARATION
+}
+
+MdaElemDeclDef MdaSchema::GetElementType(LPCWSTR name, BOOL bAssertDefined)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MdaElemDeclDef type;
+
+ if (!g_pHtElementType->Get(name, &type))
+ {
+ ASSERT(!bAssertDefined);
+ return MdaElemUndefined;
+ }
+
+ return type;
+}
+
+LPCWSTR MdaSchema::GetElementName(MdaElemDeclDef type)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PRECONDITION(type >= 0 && type < MdaElemUndefined);
+ return g_arElementNames[type];
+}
+
+MdaAttrDeclDef MdaSchema::GetAttributeType(LPCWSTR name, BOOL bAssertDefined)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MdaAttrDeclDef type;
+
+ if (!g_pHtAttributeType->Get(name, &type))
+ {
+ ASSERT(!bAssertDefined);
+ return MdaAttrUndefined;
+ }
+
+ return type;
+}
+
+LPCWSTR MdaSchema::GetAttributeName(MdaAttrDeclDef type)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return g_arAttributeNames[type];
+}
+
+// TODO: Validation error reporting needs work
+MdaSchema::ValidationResult* MdaSchema::Validate(MdaXmlElement* pRoot, ValidationResult* pResult)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ pResult->Initialize(this, pRoot);
+
+ MdaSchemaBase* pXsd = *GetDef(pRoot->GetDeclDef());
+ ASSERT((CheckPointer(pXsd) || (pRoot->GetDeclDef() > MdaElemDecl(Max))) && W("You likley did not include a MDA_DEFINE_OUTPUT section in your schema!"));
+
+ BOOL bValidationSucceeded = pXsd ? pXsd->Validate(pRoot, pResult) : FALSE;
+
+ if (bValidationSucceeded)
+ pResult->ResetResult();
+ else
+ pResult->SetError();
+
+ ASSERT(pResult->ValidationFailed() == !bValidationSucceeded);
+ return pResult;
+}
+
+MdaSchema::MdaSchema()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ for(COUNT_T i = 0; i < MdaElemEnd; i ++)
+ m_definitions[i] = NULL;
+}
+
+
+//
+// MdaAssistantSchema
+//
+
+MdaAssistantSchema::MdaAssistantSchema()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#define MDA_DEFINE_ASSISTANT_SCHEMA
+#include "mdaschema.inl"
+#undef MDA_DEFINE_ASSISTANT_SCHEMA
+
+#define MDA_DEFINE_MDA_ASSISTANT_CONFIG_GROUP
+#include "mdaschema.inl"
+#undef MDA_DEFINE_MDA_ASSISTANT_CONFIG_GROUP
+}
+
+LPCWSTR MdaAssistantSchema::SetRootAttributes(MdaXmlElement* pXml)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ //pXml->AddAttribute(W("xmlns:") MDA_SCHEMA_PREFIX, MDA_TARGET_NAMESPACE);
+ //pXml->AddAttribute(W("xmlns:xsi"), W("http://www.w3.org/2001/XMLSchema-instance"));
+ return MDA_SCHEMA_PREFIX;
+}
+
+
+//
+// MdaAssistantMsgSchema
+//
+
+MdaAssistantMsgSchema::MdaAssistantMsgSchema()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#define MDA_DEFINE_ASSISTANT_MSG_SCHEMA
+#include "mdaschema.inl"
+#undef MDA_DEFINE_ASSISTANT_MSG_SCHEMA
+
+#define MDA_DEFINE_MDA_ASSISTANT_MSG_GROUP
+#include "mdaschema.inl"
+#undef MDA_DEFINE_MDA_ASSISTANT_MSG_GROUP
+}
+
+LPCWSTR MdaAssistantMsgSchema::SetRootAttributes(MdaXmlElement* pXml)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ //pXml->AddAttribute(W("xmlns:") MDA_SCHEMA_PREFIX, MDA_TARGET_NAMESPACE);
+ //pXml->AddAttribute(W("xmlns:xsi"), W("http://www.w3.org/2001/XMLSchema-instance"));
+ return MDA_SCHEMA_PREFIX;
+}
+
+
+//
+// MdaSchemaSchema
+//
+
+MdaSchemaSchema::MdaSchemaSchema()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#define MDA_DEFINE_SCHEMA_SCHEMA
+#include "mdaschema.inl"
+#undef MDA_DEFINE_SCHEMA_SCHEMA
+}
+
+LPCWSTR MdaSchemaSchema::SetRootAttributes(MdaXmlElement* pXml)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ pXml->AddAttributeSz(MdaAttrDecl(TargetNamespace), MDA_TARGET_NAMESPACE);
+ pXml->AddAttributeSz(MdaAttrDecl(Xmlns), W("http://www.w3.org/2001/XMLSchema"))->SetNs(W("xs"));
+ pXml->AddAttributeSz(MdaAttrDecl(Xmlns), MDA_TARGET_NAMESPACE);
+ return W("xs");
+}
+
+
+//
+// MdaSchema::MdaSchemaXXX
+//
+MdaXmlElement* MdaSchema::MdaSchemaBase::ToXml(MdaXmlIndustry* pMdaXmlIndustry, MdaSchemaBase* pViolation)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return ToXml(pMdaXmlIndustry->CreateElement(), pViolation);
+}
+
+MdaXmlElement* MdaSchema::MdaSchemaBase::ToXml(MdaXmlElement* pXmlRoot, MdaSchemaBase* pViolation)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LPCWSTR debugName = GetName();
+
+ MdaXmlElement* pXml = pXmlRoot->AddChild(GetSchemaDeclDef());
+ SetAttributes(pXml);
+
+// if (this == pViolation)
+// pXml->AddAttributeSz(MdaAttrDecl(Violated), W("---- THIS XSD ELEMENT VIOLATED -----"));
+
+ if (m_children.GetCount() == 1 &&
+ m_children[0]->GetSchemaDeclDef() == MdaElemDef(ComplexType) &&
+ m_children[0]->m_children.GetCount() == 0 &&
+ (!MayHaveAttr(m_children[0]) ||
+ m_children[0]->GetAttributes().GetCount() == 0))
+ {
+ // Convert <Element><ComplexType/><Element> to <Element/>
+ return pXml;
+ }
+
+ for(COUNT_T i = 0; i < m_children.GetCount(); i ++)
+ {
+ debugName = m_children[i]->GetName();
+ m_children[i]->ToXml(pXml, pViolation);
+ }
+
+ if (MayHaveAttr(this))
+ {
+ SArray<MdaSchemaAttribute*>& attributes = GetAttributes();
+ for(COUNT_T j = 0; j < attributes.GetCount(); j ++)
+ {
+ debugName = attributes[j]->GetName();
+ attributes[j]->ToXml(pXml, pViolation);
+ }
+ }
+
+ return pXml;
+}
+
+
+void MdaSchema::MdaSchemaBase::AddChild(MdaSchemaBase* pElement)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (pElement->GetSchemaDeclDef() == MdaElemDef(Attribute))
+ *GetAttributes().Append() = (MdaSchemaAttribute*)pElement;
+ else
+ *m_children.Append() = pElement;
+}
+
+//
+// Validation
+//
+
+#define CpdXsdIfFailGo(EXPR) do { if (!(EXPR)) { goto Fail; } } while (0)
+#define CpdXsdTest(EXPR) do { if (!(EXPR)) { pResult->SetError(this, pElement); goto Fail; } } while (0)
+#define MDA_XSD_VERIFY_OK return TRUE;
+#define MDA_XSD_VERIFY_FAIL Fail: return FALSE;
+
+BOOL MdaSchema::MdaSchemaElement::Validate(MdaXmlElement* pElement, ValidationResult* pResult)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ StackSString buffer;
+ LPCWSTR debug = pElement->DebugToString(&buffer);
+
+ CpdXsdTest(pElement->GetDeclDef() == GetDeclDef());
+
+ for(COUNT_T i = 0; i < m_children.GetCount(); i++)
+ CpdXsdIfFailGo(m_children[i]->Validate(pElement, pResult));
+
+ MDA_XSD_VERIFY_OK;
+ MDA_XSD_VERIFY_FAIL;
+}
+
+BOOL MdaSchema::MdaSchemaSequence::ValidatePattern(MdaXmlElement* pElement, ValidationResult* pResult, COUNT_T* pCount)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ StackSString buffer;
+ LPCWSTR debug = pElement->DebugToString(&buffer);
+
+ COUNT_T cPeriod = m_children.GetCount();
+ COUNT_T cChildren = pElement->GetChildren().GetCount();
+ COUNT_T cCurrent = *pCount;
+ COUNT_T cCount = cCurrent;
+ COUNT_T cMatches = 0;
+
+ if (cPeriod == 0)
+ return TRUE;
+
+ while(cCurrent <= cChildren)
+ {
+ MdaSchemaBase* pXsd = m_children[cMatches % cPeriod];
+ if (pXsd->GetSchemaDeclDef() == MdaElemDef(Element))
+ {
+ if (cCurrent == cChildren)
+ break;
+
+ if (!pXsd->Validate(pElement->GetChildren()[cCurrent], pResult))
+ break;
+
+ cCurrent++;
+ }
+ else
+ {
+ ASSERT(IsPattern(pXsd));
+ if (!pXsd->ValidatePattern(pElement, pResult, &cCurrent))
+ break;
+ }
+
+ cMatches++;
+
+ // One period matched
+ if (cMatches % cPeriod == 0)
+ cCount = cCurrent;
+
+ // Maximum periods matcheds
+ if (cMatches / cPeriod == m_max)
+ break;
+ }
+
+ // Test if the minumum number periods have been matched
+ if (cMatches / cPeriod < m_min)
+ return FALSE;
+
+ // Update the position past the matched elements
+ *pCount = cCount;
+
+ return TRUE;
+}
+
+BOOL MdaSchema::MdaSchemaChoice::ValidatePattern(MdaXmlElement* pElement, ValidationResult* pResult, COUNT_T* pCount)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ StackSString buffer;
+ LPCWSTR debug = pElement->DebugToString(&buffer);
+
+ BOOL bFound = FALSE;
+ COUNT_T cCurrent = *pCount;
+ COUNT_T cChildren = pElement->GetChildren().GetCount();
+
+ for(COUNT_T cXsd = 0; cXsd < m_children.GetCount(); cXsd++)
+ {
+ MdaSchemaBase* pXsd = m_children[cXsd];
+
+ if (IsPattern(pXsd))
+ {
+ COUNT_T cOldCurrent = cCurrent;
+ if (pXsd->ValidatePattern(pElement, pResult, &cCurrent))
+ {
+ // "Empty matches" only allowed in choice pattern if there are no children to match
+ if (cOldCurrent != cCurrent || cChildren == 0)
+ {
+ bFound = TRUE;
+ break;
+ }
+ }
+ }
+ else
+ {
+ if (cCurrent == cChildren)
+ break;
+
+ if (pXsd->Validate(pElement->GetChildren()[cCurrent], pResult))
+ {
+ cCurrent++;
+ bFound = TRUE;
+ break;
+ }
+ }
+ }
+
+ CpdXsdIfFailGo(bFound);
+
+ *pCount = cCurrent;
+
+ MDA_XSD_VERIFY_OK;
+ MDA_XSD_VERIFY_FAIL;
+}
+
+#define this pThis
+BOOL MdaSchema::Validate(MdaSchemaAttribute* pThis, MdaXmlElement* pElement, ValidationResult* pResult)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ StackSString sszValue;
+ MdaXmlAttribute* pAttr = (MdaXmlAttribute*)pElement->GetAttribute(pThis->m_declDef);
+
+ if (!pAttr && !pThis->m_szDefault.IsEmpty())
+ {
+ pAttr = pElement->AddDefaultAttribute(pThis->m_declDef, pThis->m_szDefault.GetUnicode());
+ }
+
+ if (!pAttr)
+ {
+ CpdXsdTest(!pThis->m_bRequired);
+ return TRUE;
+ }
+
+#ifdef _DEBUG
+ // Only necessary for validation of assistant output
+ if (pAttr->m_type != MdaSchemaPrimitiveUnknown)
+ {
+ CpdXsdTest(pAttr->m_type == pThis->m_type);
+ return TRUE;
+ }
+#endif
+
+ LPCWSTR szValue = pAttr->GetValue();
+ sszValue.Set(szValue);
+
+ if (pThis->m_type == MdaSchemaPrimitiveSString)
+ {
+ /* accept all strings? */
+ }
+ else if (pThis->m_type == MdaSchemaPrimitiveINT32)
+ {
+ CpdXsdTest(!sszValue.IsEmpty() && sszValue.GetCount() != 0);
+
+ for (COUNT_T i = 0; i < sszValue.GetCount(); i ++)
+ {
+ if (i == 0 && *szValue == W('-') && sszValue.GetCount() > 1)
+ continue;
+
+ CpdXsdTest(IS_DIGIT(szValue[i]));
+ }
+
+ pAttr->SetINT32(_wtoi(szValue));
+ }
+ else if (pThis->m_type == MdaSchemaPrimitiveBOOL)
+ {
+ CpdXsdTest(!sszValue.IsEmpty() && sszValue.GetCount() != 0);
+
+ if (sszValue.Equals(W("true")))
+ pAttr->SetBOOL(true);
+ else if (sszValue.Equals(W("false")))
+ pAttr->SetBOOL(false);
+ else
+ CpdXsdTest(FALSE);
+ }
+
+ MDA_XSD_VERIFY_OK;
+ MDA_XSD_VERIFY_FAIL;
+}
+#undef this
+
+BOOL MdaSchema::MdaSchemaBase::Validate(MdaXmlElement* pElement, ValidationResult* pResult)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ COUNT_T count = 0;
+
+ CpdXsdTest(ValidatePattern(pElement, pResult, &count));
+
+ CpdXsdTest(count == pElement->GetChildren().GetCount());
+
+ MDA_XSD_VERIFY_OK;
+ MDA_XSD_VERIFY_FAIL;
+}
+
+BOOL MdaSchema::MdaSchemaRoot::Validate(MdaXmlElement* pElement, ValidationResult* pResult)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ for(COUNT_T i = 0; i < m_children.GetCount(); i++)
+ CpdXsdIfFailGo(m_children[i]->Validate(pElement, pResult));
+
+ MDA_XSD_VERIFY_OK;
+ MDA_XSD_VERIFY_FAIL;
+}
+
+BOOL MdaSchema::MdaSchemaComplexType::Validate(MdaXmlElement* pElement, ValidationResult* pResult)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ for(COUNT_T i = 0; i < m_children.GetCount(); i++)
+ CpdXsdIfFailGo(m_children[i]->Validate(pElement, pResult));
+
+ for(COUNT_T i = 0; i < m_attributes.GetCount(); i++)
+ CpdXsdIfFailGo(m_attributes[i]->Validate(pElement, pResult));
+
+ MDA_XSD_VERIFY_OK;
+ MDA_XSD_VERIFY_FAIL;
+}
+
+BOOL MdaSchema::MdaSchemaComplexTypeDef::Validate(MdaXmlElement* pElement, ValidationResult* pResult)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ for(COUNT_T i = 0; i < m_children.GetCount(); i++)
+ CpdXsdIfFailGo(m_children[i]->Validate(pElement, pResult));
+
+ for(COUNT_T i = 0; i < m_attributes.GetCount(); i++)
+ CpdXsdIfFailGo(m_attributes[i]->Validate(pElement, pResult));
+
+ MDA_XSD_VERIFY_OK;
+ MDA_XSD_VERIFY_FAIL;
+}
+
+BOOL MdaSchema::MdaSchemaComplexContent::Validate(MdaXmlElement* pElement, ValidationResult* pResult)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ for(COUNT_T i = 0; i < m_children.GetCount(); i++)
+ CpdXsdIfFailGo(m_children[i]->Validate(pElement, pResult));
+
+ MDA_XSD_VERIFY_OK;
+ MDA_XSD_VERIFY_FAIL;
+}
+
+BOOL MdaSchema::MdaSchemaGroup::ValidatePattern(MdaXmlElement* pElement, ValidationResult* pResult, COUNT_T* pCount)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ for(COUNT_T i = 0; i < m_children.GetCount(); i++)
+ {
+ ASSERT(IsPattern(m_children[i]));
+ CpdXsdIfFailGo(m_children[i]->ValidatePattern(pElement, pResult, pCount));
+ }
+
+ MDA_XSD_VERIFY_OK;
+ MDA_XSD_VERIFY_FAIL;
+}
+
+BOOL MdaSchema::MdaSchemaGroupRef::ValidatePattern(MdaXmlElement* pElement, ValidationResult* pResult, COUNT_T* pCount)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MdaSchemaBase* pReference = GetRef();
+ LPCWSTR debug = GetRefName();
+ ASSERT(IsPattern(this));
+ return pReference->ValidatePattern(pElement, pResult, pCount);
+}
+
+BOOL MdaSchema::MdaSchemaExtension::Validate(MdaXmlElement* pElement, ValidationResult* pResult)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ASSERT(GetRef()->GetSchemaType() == MdaSchemaComplexTypeDefType);
+ MdaSchemaComplexTypeDef* pReference = (MdaSchemaComplexTypeDef*)GetRef();
+
+ MdaSchemaSequence sequence;
+ sequence.Initialize(1, 1);
+
+ MdaSchemaBase* pXsd = pReference;
+ while(true)
+ {
+ if (MayHaveAttr(pXsd))
+ {
+ for(COUNT_T i = 0; i < pXsd->GetAttributes().GetCount(); i++)
+ CpdXsdIfFailGo(pXsd->GetAttributes()[i]->Validate(pElement, pResult));
+ }
+
+ if (pXsd->GetSchemaType() == MdaSchemaExtensionType)
+ {
+ pXsd = ((MdaSchemaComplexTypeDef*)pXsd)->GetRef();
+ continue;
+ }
+
+ if (pXsd->m_children.GetCount() == 0)
+ break;
+
+ pXsd = pXsd->m_children[0];
+
+ if (IsPattern(pXsd))
+ {
+ sequence.AddChild(pXsd);
+ break;
+ }
+ }
+
+ if (m_children.GetCount() == 1)
+ {
+ ASSERT(IsPattern(m_children[0]));
+ sequence.AddChild(m_children[0]);
+ }
+
+ CpdXsdIfFailGo(sequence.Validate(pElement, pResult));
+
+ for(COUNT_T i = 0; i < m_attributes.GetCount(); i++)
+ CpdXsdIfFailGo(m_attributes[i]->Validate(pElement, pResult));
+
+ MDA_XSD_VERIFY_OK;
+ MDA_XSD_VERIFY_FAIL;
+}
+
+BOOL MdaSchema::MdaSchemaElementRefType::Validate(MdaXmlElement* pElement, ValidationResult* pResult)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ CpdXsdIfFailGo(GetRef()->Validate(pElement, pResult));
+
+ MDA_XSD_VERIFY_OK;
+ MDA_XSD_VERIFY_FAIL;
+}
+
+BOOL MdaSchema::MdaSchemaElementAny::Validate(MdaXmlElement* pElement, ValidationResult* pResult)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ StackSString buffer;
+ LPCWSTR debug = pElement->DebugToString(&buffer);
+
+ CpdXsdTest(pElement->GetDeclDef() == GetDeclDef());
+
+ MDA_XSD_VERIFY_OK;
+ MDA_XSD_VERIFY_FAIL;
+}
+
+BOOL MdaSchema::MdaSchemaElementRef::Validate(MdaXmlElement* pElement, ValidationResult* pResult)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LPCWSTR debug = GetRefName();
+ CpdXsdIfFailGo(GetRef()->Validate(pElement, pResult));
+
+ MDA_XSD_VERIFY_OK;
+ MDA_XSD_VERIFY_FAIL;
+}
+
+
+//
+// MdaSchema::XXX::SetAttributes()
+//
+
+void MdaSchema::MdaSchemaSequence::SetAttributes(MdaXmlElement* pXml)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SmallStackSString ssBound;
+
+ ssBound.Printf(W("%d"), m_min);
+ pXml->AddAttributeSz(MdaAttrDecl(MinOccurs), ssBound.GetUnicode());
+
+ if (m_max == -1)
+ {
+ pXml->AddAttributeSz(MdaAttrDecl(MaxOccurs), W("unbounded"));
+ }
+ else
+ {
+ ssBound.Printf(W("%d"), m_max);
+ pXml->AddAttributeSz(MdaAttrDecl(MaxOccurs), ssBound.GetUnicode());
+ }
+}
+
+void MdaSchema::MdaSchemaAttribute::SetAttributes(MdaXmlElement* pXml)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ pXml->AddAttributeSz(MdaAttrDecl(Name), GetAttributeName(m_declDef));
+
+ LPCWSTR szType = NULL;
+ if (m_type == MdaSchemaPrimitiveBOOL)
+ szType = W("xs:boolean");
+ else if (m_type == MdaSchemaPrimitiveINT32)
+ szType = W("xs:int");
+ else if (m_type == MdaSchemaPrimitiveSString)
+ szType = W("xs:string");
+ else { UNREACHABLE(); }
+
+ pXml->AddAttributeSz(MdaAttrDecl(Type), szType);
+ pXml->AddAttributeSz(MdaAttrDecl(Use), m_bRequired ? W("required") : W("optional"));
+
+ if (!m_szDefault.IsEmpty())
+ pXml->AddAttributeSz(MdaAttrDecl(Default), m_szDefault);
+}
+
+void MdaSchema::MdaSchemaDeclDefRef::SetAttributes(MdaXmlElement* pXml)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LPCWSTR szDeclDef = NULL;
+ LPCWSTR szRef = NULL;
+
+ if (IsDeclDef(this))
+ szDeclDef = GetDeclDefName();
+
+ if (IsRef(this))
+ szRef = GetRefName();
+
+ switch (GetSchemaType())
+ {
+ case MdaSchemaGroupRefType:
+ case MdaSchemaElementRefTyp:
+ pXml->AddAttributeSz(MdaAttrDecl(Ref), szRef);
+ break;
+
+ case MdaSchemaExtensionType:
+ pXml->AddAttributeSz(MdaAttrDecl(Base), szRef);
+ break;
+
+ case MdaSchemaElementRefTypeType:
+ pXml->AddAttributeSz(MdaAttrDecl(Name), szDeclDef);
+ pXml->AddAttributeSz(MdaAttrDecl(Type), szRef);
+ break;
+
+ case MdaSchemaElementAnyType:
+ pXml->AddAttributeSz(MdaAttrDecl(Name), szDeclDef);
+ pXml->AddAttributeSz(MdaAttrDecl(Type), W("xs:anyType"));
+ break;
+
+ case MdaSchemaGroupType:
+ case MdaSchemaElementType:
+ case MdaSchemaComplexTypeDefType:
+ pXml->AddAttributeSz(MdaAttrDecl(Name), szDeclDef);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+//
+// MdaAssistant
+//
+void MdaAssistant::Initialize(MdaXmlElement* pXmlInput)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (pXmlInput->GetAttribute(MdaAttrDecl(SuppressDialog)))
+ m_bSuppressDialog = !!pXmlInput->GetAttributeValueAsBool(MdaAttrDecl(SuppressDialog));
+}
+
+LPCWSTR MdaAssistant::GetName()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return MdaSchema::GetElementName(m_assistantDeclDef);
+}
+
+MdaXmlElement* MdaAssistant::GetRootElement(MdaXmlElement* pRoot, BOOL bBreak)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MdaXmlElement* pXmlAssistant = pRoot->AddChild(GetAssistantMsgDeclDef());
+
+ if (bBreak)
+ pXmlAssistant->AddAttributeSz(MdaAttrDecl(Break), W("true"));
+
+ return pXmlAssistant;
+}
+
+BOOL MdaAssistant::IsAssistantActive(MdaXmlElement* pXml)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return TRUE;
+}
+
+MdaXmlElement* MdaAssistant::OutputThread(Thread* pThread, MdaXmlElement* pXml)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pThread);
+ pXml->AddAttributeInt(MdaAttrDecl(OsId), pThread->GetOSThreadId());
+ pXml->AddAttributeInt(MdaAttrDecl(ManagedId), pThread->GetThreadId());
+
+ return pXml;
+}
+
+MdaXmlElement* MdaAssistant::OutputMethodTable(MethodTable* pMT, MdaXmlElement* pXml)
+{
+ CONTRACT (MdaXmlElement*)
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(CheckPointer(pXml));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ static WCHAR szTemplateMsg[] = {W("Failed to QI for interface %s because it does not have a COM proxy stub registered.")};
+
+ DefineFullyQualifiedNameForClassWOnStack();
+ pXml->AddAttributeSz(MdaAttrDecl(Name), GetFullyQualifiedNameForClassW(pMT));
+
+ RETURN pXml;
+}
+
+void MdaAssistant::ToString(TypeHandle typeHandle, SString* psszFullname, SString* psszNamespace)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ StackSString ssz;;
+
+ psszFullname->Clear();
+
+ LPCSTR szDeclTypeName, szNamespace;
+ InlineSArray<mdTypeDef, 32> nesting;
+
+ mdTypeDef tkTypeDef = typeHandle.GetCl();
+ Module* pModule = typeHandle.GetModule();
+ IMDInternalImport* pImport = pModule->GetMDImport();
+
+ // Get tkTypeDef tokens for declaring type and its nested classes
+ nesting.Append(tkTypeDef);
+ while (S_OK == pImport->GetNestedClassProps(tkTypeDef, &tkTypeDef))
+ nesting.Append(tkTypeDef);
+
+ // Append the namespace
+ COUNT_T i = nesting.GetCount() - 1;
+ if (FAILED(pImport->GetNameOfTypeDef(nesting[i], &szDeclTypeName, &szNamespace)))
+ {
+ szNamespace = NULL;
+ szDeclTypeName = NULL;
+ }
+ if (szNamespace && *szNamespace != W('\0'))
+ {
+ if (psszNamespace)
+ psszNamespace->SetUTF8(szNamespace);
+
+ psszFullname->SetUTF8(szNamespace);
+ psszFullname->Append(W("."));
+ }
+
+ // Append the nested classes
+ for(; i > 0; i --)
+ {
+ IfFailThrow(pImport->GetNameOfTypeDef(nesting[i], &szDeclTypeName, &szNamespace));
+ ssz.SetUTF8(szDeclTypeName);
+ psszFullname->Append(ssz);
+ psszFullname->Append(W("+"));
+ }
+
+ // Append the declaring type name
+ IfFailThrow(pImport->GetNameOfTypeDef(nesting[i], &szDeclTypeName, &szNamespace));
+ ssz.SetUTF8(szDeclTypeName);
+ psszFullname->Append(ssz);
+}
+
+SString& MdaAssistant::ToString(SString& sszBuffer, Module* pModule)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ sszBuffer.AppendUTF8(pModule->GetSimpleName());
+ return sszBuffer;
+}
+
+SString& MdaAssistant::ToString(SString& sszBuffer, TypeHandle typeHandle)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ StackSString sszScratch;
+ ToString(sszBuffer, typeHandle.GetModule()).GetUnicode();
+ sszBuffer.Append(W("!"));
+ ToString(typeHandle, &sszScratch, NULL);
+ sszBuffer.Append(sszScratch);
+ return sszBuffer;
+}
+
+SString& MdaAssistant::ToString(SString& sszBuffer, MethodDesc* pMethodDesc)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ToString(sszBuffer, pMethodDesc->GetMethodTable()).GetUnicode();
+ sszBuffer.Append(W("::"));
+ StackSString ssz;
+ ssz.SetUTF8(pMethodDesc->GetName());
+ sszBuffer.Append(ssz);
+ return sszBuffer;
+}
+
+SString& MdaAssistant::ToString(SString& sszBuffer, FieldDesc* pFieldDesc)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ToString(sszBuffer, pFieldDesc->GetEnclosingMethodTable()).GetUnicode();
+ sszBuffer.Append(W("::"));
+ StackSString ssz;
+ ssz.SetUTF8(pFieldDesc->GetName());
+ sszBuffer.Append(ssz);
+ return sszBuffer;
+}
+
+MdaXmlElement* MdaAssistant::OutputParameter(SString parameterName, USHORT sequence, MethodDesc* pMethodDesc, MdaXmlElement* pXml)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ TypeHandle declType(pMethodDesc->GetMethodTable());
+ Module* pDeclModule = declType.GetModule();
+
+ pXml->AddAttributeSz(MdaAttrDecl(Name), parameterName);
+ pXml->AddAttributeInt(MdaAttrDecl(Index), sequence);
+
+ OutputMethodDesc(pMethodDesc, pXml->AddChild(MdaElemDecl(DeclaringMethod)));
+
+ return pXml;
+}
+
+MdaXmlElement* MdaAssistant::OutputMethodDesc(MethodDesc* pMethodDesc, MdaXmlElement* pXml)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ TypeHandle declType(pMethodDesc->GetMethodTable());
+ Module* pDeclModule = declType.GetModule();
+
+ StackSString sszMethod;
+
+ pXml->AddAttributeSz(MdaAttrDecl(Name), ToString(sszMethod, pMethodDesc).GetUnicode());
+
+ return pXml;
+}
+
+MdaXmlElement* MdaAssistant::OutputFieldDesc(FieldDesc* pFieldDesc, MdaXmlElement* pXml)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ StackSString sszField;
+
+ pXml->AddAttributeSz(MdaAttrDecl(Name), ToString(sszField, pFieldDesc).GetUnicode());
+
+ return pXml;
+}
+
+MdaXmlElement* MdaAssistant::OutputTypeHandle(TypeHandle typeHandle, MdaXmlElement* pXml)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ StackSString sszTypeName;
+
+ // Set Attribute
+ pXml->AddAttributeSz(MdaAttrDecl(Name), ToString(sszTypeName, typeHandle.GetMethodTable()).GetUnicode());
+
+ return pXml;
+}
+
+MdaXmlElement* MdaAssistant::OutputModule(Module* pModule, MdaXmlElement* pXml)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ pXml->AddAttributeSz(MdaAttrDecl(Name), pModule->GetSimpleName());
+
+ return pXml;
+}
+
+MdaXmlElement* MdaAssistant::OutputCallsite(MethodDesc *pMethodDesc, DWORD dwOffset, MdaXmlElement* pXml)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ StackSString sszMethod;
+ pXml->AddAttributeSz(MdaAttrDecl(Name), ToString(sszMethod, pMethodDesc).GetUnicode());
+
+ StackSString sszOffset;
+ sszOffset.Printf(W("0x%04X"), dwOffset);
+ pXml->AddAttributeSz(MdaAttrDecl(Offset), sszOffset.GetUnicode());
+
+ return pXml;
+}
+
+MdaXmlElement* MdaAssistant::OutputException(OBJECTREF *pExceptionObj, MdaXmlElement* pXml)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ OutputTypeHandle((*pExceptionObj)->GetTypeHandle(), pXml->AddChild(MdaElemDecl(Type)));
+
+ StackSString message;
+ GetExceptionMessage(*pExceptionObj, message);
+
+ pXml->AddAttributeSz(MdaAttrDecl(Message), message);
+
+ return pXml;
+}
+
+//
+// MdaQuery::CompiledQueries
+//
+BOOL MdaQuery::CompiledQueries::Test(MethodDesc* pMethodDesc)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ for (COUNT_T i = 0; i < m_queries.GetCount(); i ++)
+ {
+ if (m_queries[i]->Test(pMethodDesc))
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+BOOL MdaQuery::CompiledQueries::Test(FieldDesc* pFieldDesc)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ for (COUNT_T i = 0; i < m_queries.GetCount(); i ++)
+ {
+ if (m_queries[i]->Test(pFieldDesc))
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+BOOL MdaQuery::CompiledQueries::Test(MethodTable* pMethodTable)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ for (COUNT_T i = 0; i < m_queries.GetCount(); i ++)
+ {
+ if (m_queries[i]->Test(pMethodTable))
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+MdaQuery::CompiledQuery* MdaQuery::CompiledQueries::AddQuery()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ CompiledQuery* pQuery = m_factory.Create();
+ m_queries.Append(pQuery);
+ return pQuery;
+}
+
+
+//
+// MdaQuery::CompiledQuery
+//
+void MdaQuery::Compile(MdaXmlElement* pXmlFilters, CompiledQueries* pCompiledQueries)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SArray<MdaXmlElement*>& children = pXmlFilters->GetChildren();
+ BOOL bJmc = pXmlFilters->GetAttribute(MdaAttrDecl(JustMyCode))->GetValueAsBool();
+
+ for (COUNT_T i = 0; i < children.GetCount(); i ++)
+ {
+ MdaXmlElement* pXmlFilter = children[i];
+ SString* psszName = pXmlFilter->GetAttribute(MdaAttrDecl(Name))->GetValueAsCSString();
+ MdaXmlAttribute* pJmcOptAttr = pXmlFilter->GetAttribute(MdaAttrDecl(JustMyCode));
+ if (pJmcOptAttr)
+ bJmc = pJmcOptAttr->GetValueAsBool();
+ Compiler compiler;
+ CompiledQuery* pQuery = pCompiledQueries->AddQuery();
+ compiler.Compile(psszName, pQuery);
+ if (bJmc)
+ pQuery->SetJustMyCode();
+ }
+}
+
+MdaQuery::CompiledQuery::CompiledQuery()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_bJustMyCode = FALSE;
+ m_bAnyMember = FALSE;
+ m_bAnyType = FALSE;
+ m_sszFullname.Clear();
+ m_sszMember.Clear();
+}
+
+BOOL StartsWith(SString* psszString, SString* psszSubstring)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ StackSString sszString(*psszString);
+ if (psszString->GetCount() < psszSubstring->GetCount())
+ return FALSE;
+ sszString.Truncate(sszString.Begin() + psszSubstring->GetCount());
+ return sszString.Equals(*psszSubstring);
+}
+
+BOOL MdaQuery::CompiledQuery::Test(MethodDesc* pMethodDesc)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ StackSString sszName(SString::Utf8, pMethodDesc->GetName());
+
+ if (pMethodDesc->IsLCGMethod() || pMethodDesc->IsILStub())
+ return FALSE;
+
+ if (!Test(&sszName, pMethodDesc->GetMethodTable()))
+ return FALSE;
+
+ if (!m_bJustMyCode)
+ return TRUE;
+
+ if (IsJustMyCode(pMethodDesc))
+ return TRUE;
+
+ return FALSE;
+}
+
+BOOL MdaQuery::CompiledQuery::Test(FieldDesc* pFieldDesc)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ StackSString sszName(SString::Utf8, pFieldDesc->GetName());
+ if (!Test(&sszName, pFieldDesc->GetApproxEnclosingMethodTable()))
+ return FALSE;
+
+ if (!m_bJustMyCode)
+ return TRUE;
+
+ return TRUE;
+}
+
+BOOL MdaQuery::CompiledQuery::Test(SString* psszName, MethodTable* pMethodTable)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!m_sszMember.IsEmpty())
+ {
+ if (!m_sszMember.Equals(*psszName))
+ return FALSE;
+
+ if (m_sszMember.GetCount() == m_sszFullname.GetCount())
+ return TRUE;
+ }
+ else if (!m_bAnyMember)
+ return FALSE;
+
+ return Test(pMethodTable);
+}
+
+BOOL MdaQuery::CompiledQuery::Test(MethodTable* pMethodTable)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!pMethodTable)
+ return FALSE;
+
+ if (m_sszFullname.IsEmpty())
+ return TRUE;
+
+ StackSString sszNamespace, sszFullName;
+ MdaAssistant::ToString(pMethodTable, &sszFullName, &sszNamespace);
+
+ if (m_bAnyType && StartsWith(&m_sszFullname, &sszNamespace))
+ return TRUE;
+
+ if (m_bAnyMember && StartsWith(&m_sszFullname, &sszFullName))
+ return TRUE;
+
+ return m_sszFullname.Equals(sszFullName);
+}
+
+void MdaQuery::CompiledQuery::SetName(LPCWSTR name)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!m_sszFullname.IsEmpty())
+ {
+ m_sszFullname.Append(W("."));
+ m_sszMember.Clear();
+ }
+ else
+ {
+ m_sszMember.Set(name);
+ }
+
+ m_sszFullname.Append(name);
+
+}
+
+void MdaQuery::CompiledQuery::SetNestedTypeName(LPCWSTR name)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_sszMember.Clear();
+
+ if (!m_sszFullname.IsEmpty())
+ m_sszFullname.Append(W("+"));
+
+ m_sszFullname.Append(name);
+}
+
+void MdaQuery::CompiledQuery::SetAnyMember()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_bAnyMember = TRUE;
+ m_sszMember.Clear();
+}
+
+void MdaQuery::CompiledQuery::SetAnyType()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_bAnyType = TRUE;
+ m_sszMember.Clear();
+
+ if (m_sszFullname.IsEmpty())
+ m_bAnyMember = TRUE;
+}
+
+
+//
+// MdaQuery::CompiledQuery
+//
+
+MdaQuery::Compiler::Token MdaQuery::Compiler::LexAToken()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (*m_itr == W('\0'))
+ return MdaFilterEnd;
+
+ if (ISWHITE(*m_itr))
+ {
+ m_itr++;
+ return LexAToken();
+ }
+
+ if (ISMDAID(*m_itr))
+ {
+ m_identifier.Clear();
+
+ do
+ {
+ m_identifier.Append(*m_itr);
+ m_itr++;
+ }
+ while(ISMDAID(*m_itr));
+
+ m_identifier.Append(W("\0"));
+ return MdaFilterIdentifier;
+ }
+
+ WCHAR c = *m_itr;
+ m_itr++;
+ switch(c)
+ {
+ case W('.'): return MdaFilterDot;
+ case W(':'): return MdaFilterColon;
+ case W('*'): return MdaFilterAstrix;
+ case W('+'): return MdaFilterPlus;
+ }
+
+ return MdaFilterEnd;
+}
+
+//
+// MdaXPath::MdaXPathCompiler -- Parser
+//
+BOOL MdaQuery::Compiler::Compile(SString* sszQuery, CompiledQuery* pAst)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_itr = sszQuery->Begin();
+
+ NextToken();
+ BOOL bResult = NAME(pAst);
+
+ return bResult;
+}
+
+// NAME
+// '*'
+// id
+// id '.' NAME
+// id '+' NESTNAME
+// id ':' ':' NESTNAME
+BOOL MdaQuery::Compiler::NAME(CompiledQuery* pAst)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (TokenIs(MdaFilterIdentifier))
+ {
+ pAst->SetName(GetIdentifier());
+
+ NextToken();
+ if (TokenIs(MdaFilterDot))
+ {
+ NextToken();
+ return NAME(pAst);
+ }
+ else if (TokenIs(MdaFilterPlus))
+ {
+ NextToken();
+ return NESTNAME(pAst);
+ }
+ else if (TokenIs(MdaFilterColon))
+ {
+ NextToken();
+ if (!TokenIs(MdaFilterColon))
+ return FALSE;
+
+ NextToken();
+ return MEMBERNAME(pAst);
+ }
+ }
+ else if (TokenIs(MdaFilterAstrix))
+ {
+ pAst->SetAnyType();
+ NextToken();
+ }
+ else return FALSE;
+
+ return TRUE;
+}
+
+// NESTNAME
+// id '+' NESTNAME
+// id ':' ':' NESTNAME
+BOOL MdaQuery::Compiler::NESTNAME(CompiledQuery* pAst)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!TokenIs(MdaFilterIdentifier))
+ return FALSE;
+
+ pAst->SetNestedTypeName(GetIdentifier());
+
+ NextToken();
+
+ if (TokenIs(MdaFilterPlus))
+ {
+ NextToken();
+ return NESTNAME(pAst);
+ }
+ else if (TokenIs(MdaFilterColon))
+ {
+ NextToken();
+ if (!TokenIs(MdaFilterColon))
+ return FALSE;
+
+ NextToken();
+ return MEMBERNAME(pAst);
+ }
+ else return FALSE;
+}
+
+// MEMBERNAME
+// '*'
+// id
+BOOL MdaQuery::Compiler::MEMBERNAME(CompiledQuery* pAst)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (TokenIs(MdaFilterIdentifier))
+ pAst->SetMemberName(GetIdentifier());
+
+ else if (TokenIs(MdaFilterAstrix))
+ pAst->SetAnyMember();
+
+ else return FALSE;
+
+ NextToken();
+ return TRUE;
+}
+
+
+//
+// MdaXmlElement
+//
+MdaXmlElement* MdaXmlElement::GetChild(MdaElemDeclDef declDef)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ for(COUNT_T i = 0; i < m_children.GetCount(); i ++)
+ {
+ if (m_children[i]->GetDeclDef() == declDef)
+ return m_children[i];
+ }
+
+ return NULL;
+}
+
+SString* MdaXmlElement::ToXml(SString* pXml, LPCWSTR ns, INT32 depth)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(depth < 60); // Trap for recursion
+ }
+ CONTRACTL_END;
+
+ // Indent
+ for (INT32 i = 0; i < depth; i ++)
+ pXml->Append(W(" "));
+
+ pXml->Append(W("<"));
+ if (ns && IsDefinition()) { pXml->Append(ns); pXml->Append(W(":")); }
+ pXml->Append(GetName());
+
+ if (m_attributes.GetCount() != 0)
+ {
+ for (COUNT_T i = 0; i < m_defaultAttrIndex && i < m_attributes.GetCount(); i ++)
+ {
+ pXml->Append(W(" "));
+ m_attributes[i]->ToXml(pXml);
+ }
+ }
+
+ if (m_children.GetCount() == 0)
+ {
+ if (GetDeclDef() == MdaElemComment)
+ {
+ pXml->Append(W(" "));
+ pXml->Append(m_szName.GetUnicode());
+ pXml->Append(W(" -->\n"));
+ }
+ else
+ pXml->Append(W("/>\n"));
+ }
+ else
+ {
+ pXml->Append(W(">"));
+
+ SArray<MdaXmlElement*>::Iterator itr = m_children.Begin();
+ SArray<MdaXmlElement*>::Iterator end = m_children.End();
+
+ pXml->Append(W("\n"));
+ while (itr != end)
+ {
+ (*itr)->ToXml(pXml, ns, depth + 1);
+ itr++;
+ }
+
+ // Indent
+ for (INT32 i = 0; i < depth; i ++)
+ pXml->Append(W(" "));
+
+ pXml->Append(W("</"));
+ if (ns && IsDefinition()) { pXml->Append(ns); pXml->Append(W(":")); }
+ pXml->Append(GetName());
+ pXml->Append(W(">\n"));
+ }
+
+
+ return pXml;
+}
+
+LPCWSTR MdaXmlElement::DebugToString(SString* pBuffer)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ pBuffer->Append(W("<"));
+ pBuffer->Append(GetName());
+
+ for(COUNT_T i = 0; i < GetAttributes().GetCount(); i++)
+ {
+ pBuffer->Append(W(" "));
+ GetAttributes()[i]->ToXml(pBuffer);
+ }
+
+ pBuffer->Append(W("/>"));
+ return pBuffer->GetUnicode();
+}
+
+MdaXmlElement* MdaXmlElement::SetName(LPCWSTR name, BOOL bAssertDefined)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SetDeclDef(MdaSchema::GetElementType(name, bAssertDefined));
+
+ if (GetDeclDef() == MdaElemUndefined)
+ m_szName.Set(name);
+
+ return this;
+}
+
+MdaXmlAttribute* MdaXmlElement::AddAttribute(MdaAttrDeclDef declDef)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return AddAttribute(m_pXmlIndustry->CreateAttribute()->SetDeclDef(declDef));
+}
+
+MdaXmlAttribute* MdaXmlElement::AddAttribute(LPCWSTR szName, LPCWSTR szValue)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return AddAttribute(m_pXmlIndustry->CreateAttribute()->Initialize(szName, szValue));
+}
+
+MdaXmlAttribute* MdaXmlElement::AddDefaultAttribute(MdaAttrDeclDef attrDeclDef, LPCWSTR szValue)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_defaultAttrIndex == -1)
+ m_defaultAttrIndex = m_attributes.GetCount();
+ MdaXmlAttribute* pAttr = AddAttribute(attrDeclDef)->SetSString(szValue);
+ pAttr->m_type = MdaSchemaPrimitiveUnknown;
+ return pAttr;
+}
+
+MdaXmlElement* MdaXmlElement::AddChild(MdaXmlElement* pChild)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ //PRECONDITION(m_elemDeclDef != MdaElemUndefined);
+ PRECONDITION(CheckPointer(pChild));
+ PRECONDITION(CheckPointer(pChild->m_pXmlIndustry));
+
+ *m_children.Append() = pChild;
+ return pChild;
+}
+
+MdaXmlElement* MdaXmlElement::AddChild(LPCWSTR name, BOOL bAssertDefined)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return AddChild(m_pXmlIndustry->CreateElement())->SetName(name, bAssertDefined);
+}
+
+MdaXmlElement* MdaXmlElement::AddChild(MdaElemDeclDef type)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return AddChild(m_pXmlIndustry->CreateElement()->SetDeclDef(type));
+}
+
+LPCWSTR MdaXmlElement::GetName()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (GetDeclDef() == MdaElemUndefined)
+ return m_szName.GetUnicode();
+
+ return MdaSchema::GetElementName(m_elemDeclDef);
+}
+
+MdaXmlAttribute* MdaXmlElement::GetAttribute(MdaAttrDeclDef attrDeclDef)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+
+ for(UINT32 i = 0; i < m_attributes.GetCount(); i++)
+ {
+ if (attrDeclDef == m_attributes[i]->GetDeclDef())
+ return m_attributes[i];
+ }
+
+ return NULL;
+}
+
+BOOL MdaXmlElement::GetAttributeValueAsBool(MdaAttrDeclDef attrDeclDef, BOOL bDefault)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MdaXmlAttribute* pAttr = GetAttribute(attrDeclDef);
+
+ if (!pAttr)
+ return bDefault;
+
+ return pAttr->GetValueAsBool();
+}
+
+BOOL MdaXmlElement::GetAttributeValueAsBool(MdaAttrDeclDef attrDeclDef)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MdaXmlAttribute* pAttr = GetAttribute(attrDeclDef);
+ PREFIX_ASSUME(pAttr != NULL);
+ ASSERT(pAttr);
+ return pAttr->GetValueAsBool();
+}
+
+//
+// MdaXmlAttribute
+//
+
+SString* MdaXmlAttribute::ToXml(SString* xml)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SString sszBuffer;
+
+ xml->Append(GetName());
+ if (!m_szNs.IsEmpty())
+ {
+ xml->Append(W(":"));
+ xml->Append(m_szNs.GetUnicode());
+ }
+
+ xml->Append(W("=\""));
+ if (m_type == MdaSchemaPrimitiveSString)
+ xml->Append(MdaXmlEscape(sszBuffer, m_value));
+ else if (m_type == MdaSchemaPrimitiveBOOL)
+ xml->Append(m_bool ? W("true") : W("false"));
+ else if (m_type == MdaSchemaPrimitiveINT32)
+ {
+ StackSString sszOutput;
+ sszOutput.Printf(W("%d"), m_int);
+ xml->Append(sszOutput);
+ }
+ xml->Append(W("\""));
+ return xml;
+}
+
+LPCWSTR MdaXmlAttribute::GetName()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_declDef != MdaAttrUndefined)
+ return MdaSchema::GetAttributeName(m_declDef);
+
+ return m_szName.GetUnicode();
+}
+
+MdaXmlAttribute* MdaXmlAttribute::Initialize(LPCWSTR szName, LPCWSTR szValue)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_type = MdaSchemaPrimitiveUnknown;
+ m_value.Set(szValue);
+
+ SetDeclDef(MdaSchema::GetAttributeType(szName, FALSE));
+ if (m_declDef == MdaAttrUndefined)
+ m_szName.Set(szName);
+
+ return this;
+}
+
+
+//
+// MdaConfigFactory
+//
+STDAPI GetXMLObjectEx(IXMLParser **ppv);
+
+MdaXmlElement* MdaConfigFactory::ParseXmlStream(MdaXmlIndustry* pXmlIndustry, LPCWSTR pszFileName)
+{
+ CONTRACT(MdaXmlElement*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+ HRESULT hr = S_OK;
+ MdaXmlElement* pRoot = NULL;
+
+ EX_TRY
+ {
+ {
+ if (!pszFileName)
+ goto Exit;
+
+ NonVMComHolder<IXMLParser> pIXMLParser(NULL);
+ NonVMComHolder<IStream> pFile(NULL);
+
+ hr = CreateConfigStream(pszFileName, &pFile);
+ if(FAILED(hr)) goto Exit;
+
+ hr = GetXMLObjectEx(&pIXMLParser);
+ if(FAILED(hr)) goto Exit;
+
+ hr = pIXMLParser->SetInput(pFile); // filestream's RefCount=2
+ if ( ! SUCCEEDED(hr))
+ goto Exit;
+
+ pRoot = pXmlIndustry->CreateElement()->SetDeclDef(MdaElemDef(Dummy));
+ MdaConfigFactory mdaConfigFactory(pRoot);
+
+ hr = pIXMLParser->SetFactory(&mdaConfigFactory); // factory's RefCount=2
+ if (!SUCCEEDED(hr))
+ goto Exit;
+
+ hr = pIXMLParser->Run(-1);
+
+ if (pRoot->GetChildren().GetCount() == 1)
+ pRoot = pRoot->GetChildren()[0];
+ else
+ pRoot = NULL;
+ }
+ Exit: ;
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (hr == (HRESULT)XML_E_MISSINGROOT)
+ hr = S_OK;
+ else if (Assembly::FileNotFound(hr))
+ hr = S_FALSE;
+
+ RETURN pRoot;
+}
+
+HRESULT STDMETHODCALLTYPE MdaConfigFactory::CreateNode(
+ IXMLNodeSource* pSource,
+ PVOID pNodeParent,
+ USHORT cNumRecs,
+ XML_NODE_INFO** apNodeInfo)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_pMdaXmlElement = NULL;
+
+ for(INT32 i = 0; i < cNumRecs; i++)
+ {
+ DWORD dwType = apNodeInfo[i]->dwType;
+
+ if(dwType == XML_ELEMENT || dwType == XML_ATTRIBUTE)
+ {
+ StackSString sszName((WCHAR*)apNodeInfo[i]->pwcText, apNodeInfo[i]->ulLen);
+
+ if (dwType == XML_ELEMENT)
+ {
+ m_pMdaXmlElement = m_stack.Tos()->AddChild(sszName, FALSE);
+ }
+ else if (dwType == XML_ATTRIBUTE)
+ {
+ i++;
+ InlineSString<MDA_BUFFER_SIZE> szValue((WCHAR*)apNodeInfo[i]->pwcText, apNodeInfo[i]->ulLen);
+
+ if (m_pMdaXmlElement)
+ m_pMdaXmlElement->AddAttribute(sszName.GetUnicode(), szValue);
+ }
+ }
+ }
+
+ return S_OK;
+}
+
+HRESULT STDMETHODCALLTYPE MdaConfigFactory::BeginChildren(
+ IXMLNodeSource* pSource,
+ XML_NODE_INFO* pNodeInfo)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_stack.Push(m_pMdaXmlElement);
+
+ return S_OK;
+}
+
+HRESULT STDMETHODCALLTYPE MdaConfigFactory::EndChildren(
+ IXMLNodeSource* pSource,
+ BOOL fEmptyNode,
+ XML_NODE_INFO* pNodeInfo)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+
+ if (fEmptyNode)
+ return S_OK;
+
+ m_stack.Pop();
+
+ return S_OK;
+}
+
+HRESULT STDMETHODCALLTYPE MdaConfigFactory::NotifyEvent(
+ IXMLNodeSource* pSource,
+ XML_NODEFACTORY_EVENT iEvt)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return S_OK;
+}
+
+HRESULT STDMETHODCALLTYPE MdaConfigFactory::Error(
+ IXMLNodeSource* pSource,
+ HRESULT hrErrorCode,
+ USHORT cNumRecs,
+ XML_NODE_INFO** apNodeInfo)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return E_FAIL;
+}
+
+#endif
diff --git a/src/vm/mda.h b/src/vm/mda.h
new file mode 100644
index 0000000000..1bf3e22bfb
--- /dev/null
+++ b/src/vm/mda.h
@@ -0,0 +1,1515 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#ifndef _MDA_
+#define _MDA_
+
+#ifndef _DEBUG
+#ifdef DACCESS_COMPILE
+#undef MDA_SUPPORTED
+#endif
+#endif
+
+#ifdef MDA_SUPPORTED
+
+#include "sarray.h"
+#include "eeconfig.h"
+// Factory includes
+#include <xmlparser.h>
+#include <objbase.h>
+#include "unknwn.h"
+#include "crst.h"
+#include "../xmlparser/_reference.h"
+#include "../dlls/mscorrc/resource.h"
+
+#define MdaTypeOf(TYPE) ((TYPE*)0)
+#define MdaType(TYPE) (TYPE*)
+#define MdaElemDecl(NAME) MdaElemDecl##NAME
+#define MdaElemDef(NAME) MdaElemDef##NAME
+#define MdaAttrDecl(NAME) MdaAttrDecl##NAME
+
+#define MDA_TARGET_NAMESPACE W("http://schemas.microsoft.com/CLR/2004/10/mda")
+#define MDA_SCHEMA_PREFIX W("mda")
+
+
+class ManagedDebuggingAssistants;
+class MdaAssistant;
+class MdaInvalidConfigFile;
+class MdaXmlElement;
+class MdaXmlAttribute;
+class MdaXmlMessage;
+class MdaXmlIndustry;
+class MdaXPath;
+class MdaSchema;
+class MdaSchemaSchema;
+class MdaAssistantSchema;
+class MdaAssistantMsgSchema;
+class MdaXmlValidationError;
+class MdaFramework;
+template<typename> class MdaFactory;
+
+#define MDA_BUFFER_SIZE 256
+#define MDA_XML_NAME_SIZE 16
+#define MDA_XML_VALUE_SIZE 16
+#define MDA_XML_ELEMENT_CHILDREN 16
+#define MDA_XML_ELEMENT_ATTRIBUTES 16
+#define MDA_MAX_FACTORY_PRODUCT 20
+#define MDA_MAX_STACK_ELEMENTS 20
+#define MDA_MAX_STACK_ATTRIBUTES 20
+
+typedef enum
+{
+ MdaSchemaPrimitiveBOOL,
+ MdaSchemaPrimitiveSString,
+ MdaSchemaPrimitiveINT32,
+ MdaSchemaPrimitiveUnknown,
+} MdaSchemaPrimitive;
+
+// HKLM\SYSTEM\CurrentControlSet\Control\Session Manager\Memory Management\PoolTag
+// Hex\Text value
+
+// HKLM\SYSTEM\CurrentControlSet\Control\Session Manager\Memory Management\PoolTagOverruns
+// 0x0 == Verify Start, 0x1 == VerifyEnd
+
+#define GFLAG_REG_KEY_PATH W("SYSTEM\\CurrentControlSet\\Control\\Session Manager")
+#define GFLAG_REG_KEY_NAME W("GlobalFlag")
+#define MDA_REG_KEY_PATH FRAMEWORK_REGISTRY_KEY_W
+#define MDA_REG_KEY_ENABLE W("MdaEnable")
+#define MDA_CONFIG_ENV_VARIABLE W("MDA_CONFIG")
+
+extern const bool g_mdaAssistantIsSwitch[];
+
+typedef enum : BYTE
+{
+#define MDA_DEFINE_ASSISTANT_ENUMERATION
+#include "mdaschema.inl"
+#undef MDA_DEFINE_ASSISTANT_ENUMERATION
+ MdaElemDef(AssistantMax),
+
+#define MDA_ELEMENT_DEFINITION_ENUMERATION
+#include "mdaschema.inl"
+#undef MDA_ELEMENT_DEFINITION_ENUMERATION
+ MdaElemDef(Max),
+
+#define MDA_ELEMENT_DECLARAION_ENUMERATION
+#include "mdaschema.inl"
+#undef MDA_ELEMENT_DECLARAION_ENUMERATION
+ MdaElemDecl(Max),
+ MdaElemComment,
+ MdaElemUndefined,
+ MdaElemEnd,
+} MdaElemDeclDef;
+
+typedef enum
+{
+#define MDA_ATTRIBUTE_DECLARATION_ENUMERATION
+#include "mdaschema.inl"
+#undef MDA_ATTRIBUTE_DECLARATION_ENUMERATION
+ MdaAttrDecl(Max),
+
+ MdaAttrUndefined,
+ MdaAttrEnd,
+} MdaAttrDeclDef;
+
+typedef const SString CSString;
+
+#pragma warning(push)
+#pragma warning(disable:4324)
+
+//
+// MdaStack
+//
+template<typename TYPE>
+class MdaStack
+{
+private: // MdaStack not for general use. //
+ MdaStack() : m_depth(0) { LIMITED_METHOD_CONTRACT; }
+
+public: // MdaStack not for general use. //
+ void Set(MdaStack<TYPE>* pStack) { WRAPPER_NO_CONTRACT; m_stack.Set(pStack->m_stack); m_depth = pStack->m_depth; }
+ TYPE Push(TYPE element) { WRAPPER_NO_CONTRACT; *m_stack.Append() = element; m_depth++; return Tos(); }
+ TYPE Push() { WRAPPER_NO_CONTRACT; *m_stack.Append(); m_depth++; return Tos(); }
+ TYPE Pop() { WRAPPER_NO_CONTRACT; PRECONDITION(GetDepth() > 0); TYPE tos = Tos(); m_stack.Delete(m_stack.End() - 1); m_depth--; return tos; }
+ TYPE Tos() { WRAPPER_NO_CONTRACT; return m_stack.End()[-1]; }
+ void Clear() { WRAPPER_NO_CONTRACT; while(GetDepth()) Pop(); }
+ COUNT_T GetDepth() { WRAPPER_NO_CONTRACT; return m_depth; }
+
+private:
+ friend class MdaConfigFactory;
+ friend class ManagedDebuggingAssistants;
+ friend class MdaSchema;
+ friend class MdaXPath;
+
+private:
+ INT32 m_depth;
+ InlineSArray<TYPE, 16> m_stack;
+};
+
+
+//
+// MdaHashtable
+//
+BOOL MdaLockOwner(LPVOID);
+
+template<typename TYPE>
+class MdaHashtable
+{
+private: // MdaHashtable not for general use. //
+ MdaHashtable() { WRAPPER_NO_CONTRACT; LockOwner lockOwner = {NULL, MdaLockOwner}; m_ht.Init(11, &lockOwner); }
+
+public: // MdaHashtable not for general use. //
+ TYPE Get(LPCWSTR pKey) { WRAPPER_NO_CONTRACT; StackSString sKey(pKey); return Get(&sKey); }
+ BOOL Get(LPCWSTR pKey, TYPE* pValue) { WRAPPER_NO_CONTRACT; StackSString sKey(pKey); return Get(&sKey, pValue); }
+ BOOL HasKey(LPCWSTR pKey) { TYPE value; return Get(pKey, &value); }
+ TYPE Get(CSString* pKey)
+ {
+ WRAPPER_NO_CONTRACT;
+ TYPE value;
+ ASSERT(Get(pKey, &value));
+ return value;
+ }
+ BOOL Get(CSString* psszKey, TYPE* pValue)
+ {
+ WRAPPER_NO_CONTRACT;
+ EEStringData key(psszKey->GetCount(), psszKey->GetUnicode());
+ HashDatum value;
+ if (m_ht.GetValue(&key, &value))
+ {
+ *pValue = (TYPE)(UINT_PTR)value;
+ return TRUE;
+ }
+ return FALSE;
+ }
+ void EmptyHashTable() { WRAPPER_NO_CONTRACT; m_ht.EmptyHashTable(); }
+ void DeleteValue(LPCWSTR szKey)
+ {
+ WRAPPER_NO_CONTRACT;
+ StackSString sszKey(szKey);
+ EEStringData key(sszKey.GetCount(), sszKey.GetUnicode());
+ m_ht.DeleteValue(&key);
+ }
+ DWORD GetCount() { WRAPPER_NO_CONTRACT; return m_ht.GetCount(); }
+
+ TYPE Set(LPCWSTR pKey, TYPE value) { WRAPPER_NO_CONTRACT; StackSString sszKey(pKey); return Set(&sszKey, value); }
+ TYPE Set(CSString* psszKey, TYPE value)
+ {
+ WRAPPER_NO_CONTRACT;
+ EEStringData key(psszKey->GetCount(), psszKey->GetUnicode());
+ m_ht.InsertValue(&key, (HashDatum)value);
+ return value;
+ }
+
+private:
+ friend class MdaXmlElement;
+ friend class MdaSchema;
+ friend class ManagedDebuggingAssistants;
+
+private:
+ EEUnicodeStringHashTable m_ht;
+};
+
+
+//
+
+// MdaEnvironment
+//
+class MdaEnvironment
+{
+public:
+ MdaEnvironment();
+ ~MdaEnvironment();
+ BOOL IsDisabled() { return m_bDisable; }
+ LPCWSTR GetConfigFile() { return m_psszConfigFile->GetUnicode(); }
+ LPCWSTR GetMdaConfigFile() { return m_psszMdaConfigFile->GetUnicode(); }
+ SArray<SString*>& GetActivationMechanisms() { return *m_pGroups; }
+
+private:
+ LPWSTR m_szMda;
+ MdaFactory<StackSString>* m_pStringFactory;
+ SString* m_psszMdaConfigFile;
+ SString* m_psszConfigFile;
+ BOOL m_bDisable;
+ SArray<SString*>* m_pGroups;
+};
+
+
+//
+// Mda
+//
+
+// Use these macros if your callsite cannot run on the debugger helper thread. This is the fastest version.
+#define MDA_GET_ASSISTANT(ASSISTANT) (Mda##ASSISTANT*)ManagedDebuggingAssistants::GetAssistant(MdaElemDef(ASSISTANT))
+#define MDA_TRIGGER_ASSISTANT(ASSISTANT, MEMBER) if (Mda##ASSISTANT* pMdaAssistant = MDA_GET_ASSISTANT(ASSISTANT)) pMdaAssistant->MEMBER
+
+// Use these macros if your callsite might run on the debugger helper thread. This should be avoided for
+// very hot checks.
+#define MDA_GET_ASSISTANT_EX(ASSISTANT) (Mda##ASSISTANT*)ManagedDebuggingAssistants::GetAssistantEx(MdaElemDef(ASSISTANT))
+#define MDA_TRIGGER_ASSISTANT_EX(ASSISTANT, MEMBER) if (Mda##ASSISTANT* pMdaAssistant = MDA_GET_ASSISTANT_EX(ASSISTANT)) pMdaAssistant->MEMBER
+
+class ManagedDebuggingAssistants
+{
+public:
+ FORCEINLINE static MdaAssistant* GetAssistant(MdaElemDeclDef id);
+ FORCEINLINE static MdaAssistant* GetAssistantEx(MdaElemDeclDef id);
+ FORCEINLINE static void Enable(MdaElemDeclDef assistantDeclDef, MdaAssistant* pMda);
+
+private:
+ static void AllocateManagedDebuggingAssistants();
+ ManagedDebuggingAssistants();
+ void Initialize();
+#ifdef _DEBUG
+ void DebugInitialize();
+#endif
+
+private:
+ void SetFwLink(MdaElemDeclDef assistant, LPCWSTR szFwLink) { LIMITED_METHOD_CONTRACT; m_szFwLinks[assistant] = szFwLink; }
+ LPCWSTR GetFwLink(MdaElemDeclDef assistant) { LIMITED_METHOD_CONTRACT; return m_szFwLinks[assistant]; }
+ void ReadAppConfigurationFile(MdaXmlElement* pXmlRoot, SString* pConfigFile, MdaStack<LPCWSTR>* pConfigMdaRoot);
+ MdaXmlElement* GetRootElement(MdaXmlElement* pMdaXmlRoot);
+ void EnvironmentActivation(MdaEnvironment* pEnvironment);
+ void ConfigFileActivation(LPCWSTR szConfigFile, MdaXmlIndustry* pXmlIndustry, MdaHashtable<MdaXmlElement*>* pXmlConfigs);
+ void ActivateGroup(LPCWSTR groupName, SArray<MdaElemDeclDef>* pGroup, MdaHashtable<MdaXmlElement*>* pXmlConfigs);
+ MdaXmlElement* GetSwitchActivationXml(MdaElemDeclDef mda);
+
+public:
+ static BOOL IsDebuggerAttached();
+ static BOOL IsManagedDebuggerAttached();
+ static BOOL IsUnmanagedDebuggerAttached();
+
+private:
+ static void EEStartupActivation();
+
+private:
+ friend HRESULT EEStartup(DWORD fFlags);
+
+private:
+ friend class MdaAssistant;
+ friend class MdaEnvironment;
+ friend class MdaInvalidConfigFile;
+ friend class MdaSchema;
+ friend class MdaAssistantSchema;
+ friend class MdaAssistantMsgSchema;
+ friend class MdaSchemaSchema;
+ friend class MdaXmlMessage;
+ friend class MdaXmlIndustry;
+ friend class MdaConfigFactory;
+ friend class EEConfigFactory;
+ friend class MdaFramework;
+ friend void EEStartupHelper(COINITIEE fFlags);
+
+private:
+ BOOL GetConfigBool(MdaAttrDeclDef attrDeclDef, MdaElemDeclDef element = MdaElemUndefined, BOOL bDefault = FALSE);
+ BOOL GetConfigBool(MdaAttrDeclDef attrDeclDef, BOOL bDefault) { WRAPPER_NO_CONTRACT; return GetConfigBool(attrDeclDef, MdaElemUndefined, bDefault); }
+
+private:
+ Crst* m_pLock;
+ BOOL m_bValidateOutput, m_bIsInitialized;
+ LPCWSTR m_szFwLinks[MdaElemDef(AssistantMax)];
+ MdaSchema* m_pAssistantSchema;
+ MdaSchema* m_pAssistantMsgSchema;
+ MdaSchema* m_pSchemaSchema;
+ MdaXmlIndustry* m_pMdaXmlIndustry;
+ MdaXmlElement* m_pSwitchActivationXml;
+};
+
+
+
+typedef VPTR(MdaAssistant) PTR_MdaAssistant;
+
+//
+// MdaAssistant
+//
+class MdaAssistant
+{
+ friend class ValidateMdaAssistantLayout;
+public:
+ static MdaXmlElement* OutputThread(Thread* pThread, MdaXmlElement* pXml);
+ static MdaXmlElement* OutputParameter(SString parameterName, USHORT sequence, MethodDesc* pMethodDesc, MdaXmlElement* pXml);
+ static MdaXmlElement* OutputMethodTable(MethodTable* pMT, MdaXmlElement* pXml);
+ static MdaXmlElement* OutputMethodDesc(MethodDesc* pMethodDesc, MdaXmlElement* pXml);
+ static MdaXmlElement* OutputFieldDesc(FieldDesc* pFieldDesc, MdaXmlElement* pXml);
+ static MdaXmlElement* OutputTypeHandle(TypeHandle typeHandle, MdaXmlElement* pXml);
+ static MdaXmlElement* OutputModule(Module* pModule, MdaXmlElement* pXml);
+ static MdaXmlElement* OutputCallsite(MethodDesc *pMethodDesc, DWORD dwOffset, MdaXmlElement* pXml);
+ static MdaXmlElement* OutputException(OBJECTREF *pExceptionObj, MdaXmlElement* pXml);
+
+public:
+ static SString& ToString(SString& sszBuffer, Module* pModule);
+ static SString& ToString(SString& sszBuffer, TypeHandle typeHandle);
+ static SString& ToString(SString& sszBuffer, MethodDesc* pMethodDesc);
+ static SString& ToString(SString& sszBuffer, FieldDesc* pFieldDesc);
+ static void ToString(TypeHandle typeHandle, SString* psszFullname, SString* psszNamespace);
+
+public:
+ LPCWSTR GetName();
+
+private:
+ void Initialize(MdaXmlElement* pXmlInput);
+ static BOOL IsAssistantActive(MdaXmlElement* pXml);
+
+private:
+ bool GetSuppressDialog() { LIMITED_METHOD_CONTRACT; return m_bSuppressDialog; }
+ MdaElemDeclDef GetAssistantDeclDef() { LIMITED_METHOD_CONTRACT; return m_assistantDeclDef; }
+ MdaElemDeclDef GetAssistantMsgDeclDef() { LIMITED_METHOD_CONTRACT; return m_assistantMsgDeclDef; }
+ MdaXmlElement* GetRootElement(MdaXmlElement* pMdaXmlRoot, BOOL bBreak);
+
+private:
+ friend class ManagedDebuggingAssistants;
+ friend class MdaXmlMessage;
+
+private:
+ // WARNING: do not modify the field layout without also
+ // modifying the MDA_ASSISTANT_BASE_MEMBERS macro.
+ MdaElemDeclDef m_assistantDeclDef;
+ MdaElemDeclDef m_assistantMsgDeclDef;
+ bool m_bSuppressDialog;
+};
+
+//
+// MdaXmlAttribute
+//
+class MdaXmlAttribute
+{
+public:
+ LPCWSTR GetName();
+ LPCWSTR GetValue() { LIMITED_METHOD_CONTRACT; PRECONDITION(CheckPointer(this)); return m_value.GetUnicode(); }
+ LPCWSTR GetValueAsUnicode() { LIMITED_METHOD_CONTRACT; PRECONDITION(CheckPointer(this)); return GetValueAsCSString()->GetUnicode(); }
+ SString* GetValueAsCSString() { LIMITED_METHOD_CONTRACT; PRECONDITION(CheckPointer(this)); return &m_value; }
+ BOOL GetValueAsBool() { LIMITED_METHOD_CONTRACT; PRECONDITION(CheckPointer(this)); ASSERT(m_type == MdaSchemaPrimitiveBOOL); return m_bool; }
+ INT32 GetValueAsInt32() { LIMITED_METHOD_CONTRACT; PRECONDITION(CheckPointer(this)); ASSERT(m_type == MdaSchemaPrimitiveINT32); return m_int; }
+ MdaAttrDeclDef GetDeclDef() { LIMITED_METHOD_CONTRACT; PRECONDITION(CheckPointer(this)); return m_declDef; }
+
+private:
+ SString* ToXml(SString* xml);
+
+ MdaXmlAttribute* Initialize(LPCWSTR szName, LPCWSTR szValue);
+
+ MdaXmlAttribute* SetSString(LPCUTF8 szValue) { WRAPPER_NO_CONTRACT; m_type = MdaSchemaPrimitiveSString; m_value.SetUTF8(szValue); return this; }
+ MdaXmlAttribute* SetSString(LPCWSTR szValue) { WRAPPER_NO_CONTRACT; m_type = MdaSchemaPrimitiveSString; m_value.Set(szValue); return this; }
+ MdaXmlAttribute* SetDeclDef(MdaAttrDeclDef declDef) { WRAPPER_NO_CONTRACT; m_declDef = declDef; return this; }
+ MdaXmlAttribute* SetNs(LPCWSTR szNs) { WRAPPER_NO_CONTRACT; m_szNs.Set(szNs); return this; }
+ MdaXmlAttribute* SetINT32(INT32 value) { LIMITED_METHOD_CONTRACT; m_type = MdaSchemaPrimitiveINT32; m_int = value; return this; }
+ MdaXmlAttribute* SetBOOL(BOOL value) { LIMITED_METHOD_CONTRACT; m_type = MdaSchemaPrimitiveBOOL; m_bool = value; return this; }
+
+private:
+ friend class ManagedDebuggingAssistants;
+ friend class MdaConfigFactory;
+ friend class MdaSchemaSchema;
+ friend class MdaXmlElement;
+ friend class MdaSchema;
+ friend class MdaXmlMessage;
+ template<typename PRODUCT> friend class MdaFactory;
+
+private:
+ MdaAttrDeclDef m_declDef;
+ SString m_szName;
+ SString m_szNs;
+ MdaSchemaPrimitive m_type;
+ SString m_value;
+ BOOL m_bool;
+ INT32 m_int;
+};
+
+
+//
+// MdaXmlElement
+//
+class MdaXmlElement
+{
+public: /* inspection */
+ LPCWSTR GetName();
+ MdaElemDeclDef GetDeclDef() { LIMITED_METHOD_CONTRACT; return m_elemDeclDef; }
+ BOOL IsDefinition() { LIMITED_METHOD_CONTRACT; return m_elemDeclDef < MdaElemDef(Max); }
+ BOOL IsDeclaration() { LIMITED_METHOD_CONTRACT; return !IsDefinition(); }
+ SArray<MdaXmlElement*>& GetChildren() { LIMITED_METHOD_CONTRACT; return m_children; }
+ MdaXmlElement* GetChild(MdaElemDeclDef declDef);
+ SArray<MdaXmlAttribute*>& GetAttributes() { LIMITED_METHOD_CONTRACT; return m_attributes; }
+ MdaXmlAttribute* GetAttribute(MdaAttrDeclDef attrDeclDef);
+ BOOL GetAttributeValueAsBool(MdaAttrDeclDef attrDeclDef, BOOL bDefault);
+ BOOL GetAttributeValueAsBool(MdaAttrDeclDef attrDeclDef);
+
+public: /* creation */
+ MdaXmlElement* SetDeclDef(MdaElemDeclDef elemDeclDef) { LIMITED_METHOD_CONTRACT; m_elemDeclDef = elemDeclDef; return this; }
+ MdaXmlElement* SetName(LPCWSTR name, BOOL bAssertDefined = TRUE);
+ MdaXmlElement* AddChild(LPCWSTR name, BOOL bAssertDefined = TRUE);
+ MdaXmlElement* AddChild(MdaElemDeclDef type);
+ void AddChildComment(LPCWSTR szComment) { WRAPPER_NO_CONTRACT; AddChild(MdaElemComment)->m_szName.Set(szComment); }
+ LPCWSTR DebugToString(SString* pBuffer);
+
+ template<typename ATTRIBUTE_TYPE>
+ MdaXmlAttribute* AddAttributeSz(MdaAttrDeclDef declDef, ATTRIBUTE_TYPE szValue) { return AddAttribute(declDef)->SetSString(szValue); }
+ MdaXmlAttribute* AddAttributeInt(MdaAttrDeclDef declDef, INT32 value) { return AddAttribute(declDef)->SetINT32(value); }
+ MdaXmlAttribute* AddAttributeBool(MdaAttrDeclDef declDef, BOOL bValue) { return AddAttribute(declDef)->SetBOOL(bValue); }
+
+private:
+ MdaXmlElement() : m_elemDeclDef(MdaElemUndefined), m_defaultAttrIndex(-1) { WRAPPER_NO_CONTRACT; }
+ MdaXmlElement* AddChild(MdaXmlElement* pChild);
+
+ MdaXmlElement* SetIndustry(MdaXmlIndustry* pXmlIndustry)
+ { LIMITED_METHOD_CONTRACT; PRECONDITION(CheckPointer(pXmlIndustry, NULL_OK)); m_pXmlIndustry = pXmlIndustry; return this; }
+
+ MdaXmlAttribute* AddDefaultAttribute(MdaAttrDeclDef attrDeclDef, LPCWSTR szValue);
+ MdaXmlAttribute* AddAttribute(LPCWSTR szName, LPCWSTR szValue);
+
+ SString* ToXml(SString* xml) { WRAPPER_NO_CONTRACT; return ToXml(xml, NULL, 0); }
+ SString* ToXml(SString* xml, LPCWSTR ns) { WRAPPER_NO_CONTRACT; return ToXml(xml, ns, 0); }
+ SString* ToXml(SString* xml, LPCWSTR ns, INT32 depth);
+
+ MdaXmlAttribute* AddAttribute(MdaAttrDeclDef declDef);
+ MdaXmlAttribute* AddAttribute(MdaXmlAttribute* pAttr) { WRAPPER_NO_CONTRACT; *m_attributes.Append() = pAttr; return pAttr; }
+
+private:
+ friend class MdaSchema;
+ friend class ManagedDebuggingAssistants;
+ friend class MdaXmlMessage;
+ friend class MdaXmlElement;
+ template<typename PRODUCT> friend class MdaFactory;
+ friend class MdaXmlIndustry;
+ friend class MdaConfigFactory;
+ friend class MdaSchemaSchema;
+ friend class MdaXmlValidationError;
+
+private:
+ MdaXmlIndustry* m_pXmlIndustry;
+ MdaElemDeclDef m_elemDeclDef;
+ SString m_szName;
+ InlineSArray<MdaXmlElement*, MDA_XML_ELEMENT_CHILDREN> m_children;
+ COUNT_T m_defaultAttrIndex;
+ InlineSArray<MdaXmlAttribute*, MDA_XML_ELEMENT_ATTRIBUTES> m_attributes;
+};
+
+
+//
+// MdaFactory
+//
+template<typename PRODUCT>
+class MdaFactory
+{
+public:
+ MdaFactory() : m_cProduct(0), m_next(NULL) { LIMITED_METHOD_CONTRACT; }
+ ~MdaFactory() { LIMITED_METHOD_CONTRACT; if (m_next) delete m_next; }
+ MdaFactory* GetNext() { if (!m_next) m_next = new MdaFactory<PRODUCT>(); return m_next; }
+ PRODUCT* Create();
+
+private:
+ MdaFactory* m_next;
+ PRODUCT m_product[MDA_MAX_FACTORY_PRODUCT];
+ INT32 m_cProduct;
+};
+
+
+//
+// MdaXmlIndustry
+//
+class MdaXmlIndustry
+{
+public:
+ MdaXmlElement* CreateElement() { WRAPPER_NO_CONTRACT; return m_elements.Create()->SetIndustry(this); }
+ MdaXmlAttribute* CreateAttribute() { WRAPPER_NO_CONTRACT; return m_attributes.Create(); }
+
+private:
+ MdaFactory<MdaXmlElement> m_elements;
+ MdaFactory<MdaXmlAttribute> m_attributes;
+
+private:
+ friend class MdaConfigFactory;
+ friend class MdaFramework;
+ friend class MdaXmlMessage;
+ friend class ManagedDebuggingAssistants;
+ friend class MdaXmlElement;
+ friend class MdaXmlAttribute;
+ friend class MdaSchema;
+};
+
+
+//
+// MdaXmlMessage
+//
+class MdaXmlMessage
+{
+public:
+ MdaXmlMessage(MdaXmlElement** ppMdaXmlRoot);
+ MdaXmlMessage(MdaAssistant* pAssistant, BOOL bBreak, MdaXmlElement** ppMdaXmlRoot);
+
+public:
+ void SendMessage();
+ void SendMessage(int resourceID);
+ void SendMessage(LPCWSTR szMessage);
+ void SendMessagef(int resourceID, ...);
+
+private:
+ static BOOL IsDebuggerAttached() { WRAPPER_NO_CONTRACT; return ManagedDebuggingAssistants::IsDebuggerAttached(); }
+ static BOOL IsManagedDebuggerAttached() { WRAPPER_NO_CONTRACT; return ManagedDebuggingAssistants::IsManagedDebuggerAttached(); }
+ static BOOL IsUnmanagedDebuggerAttached() { WRAPPER_NO_CONTRACT; return ManagedDebuggingAssistants::IsUnmanagedDebuggerAttached(); }
+ static BOOL ShouldLogToManagedDebugger();
+
+private:
+ void SendEvent();
+ void SendHostEvent();
+ void SendDebugEvent();
+
+private:
+ friend class ManagedDebuggingAssistants;
+ friend class MdaFramework;
+
+private:
+ BOOL m_bBreak;
+ MdaAssistant* m_pMdaAssistant;
+ SString m_localizedMessage;
+ SString m_englishMessage;
+ MdaXmlElement* m_pMdaXmlRoot;
+ MdaXmlElement* m_pAssistantXmlRoot;
+ MdaXmlIndustry m_mdaXmlIndustry;
+};
+
+
+//
+// MdaXPath
+//
+class MdaXPath
+{
+public:
+ static SArray<MdaXmlElement*>* FindElements(MdaXmlElement* pRoot, LPCWSTR szQuery, SArray<MdaXmlElement*>* pResult)
+ { WRAPPER_NO_CONTRACT; MdaXPath query(szQuery); return query.FindElements(pRoot, pResult); }
+ static MdaXmlElement* FindElement(MdaXmlElement* pRoot, LPCWSTR szQuery)
+ { WRAPPER_NO_CONTRACT; MdaXPath query(szQuery); return query.FindElement(pRoot); }
+ static SArray<MdaXmlAttribute*>* FindAttributes(MdaXmlElement* pRoot, LPCWSTR szQuery, SArray<MdaXmlAttribute*>* pResult)
+ { WRAPPER_NO_CONTRACT; MdaXPath query(szQuery); return query.FindAttributes(pRoot, pResult); }
+ static MdaXmlAttribute* FindAttribute(MdaXmlElement* pRoot, LPCWSTR szQuery)
+ { WRAPPER_NO_CONTRACT; MdaXPath query(szQuery); return query.FindAttribute(pRoot); }
+
+public:
+ MdaXPath() : m_cArgs(NOT_VARIABLE), m_pCompiledQuery(NULL) { WRAPPER_NO_CONTRACT; }
+ MdaXPath(LPCWSTR xpath) : m_cArgs(NOT_VARIABLE) { WRAPPER_NO_CONTRACT; Initialize(xpath); }
+ MdaXPath* Initialize(LPCWSTR xpath) { WRAPPER_NO_CONTRACT; m_xpath.Set(xpath); MdaXPathCompiler(this, &m_pCompiledQuery); return this; }
+ MdaXmlElement* FindElement(MdaXmlElement* pRoot, ...);
+ MdaXmlAttribute* FindAttribute(MdaXmlElement* pRoot, ...);
+ SArray<MdaXmlElement*>* FindElements(MdaXmlElement* pRoot, SArray<MdaXmlElement*>* pResult, ...);
+ SArray<MdaXmlAttribute*>* FindAttributes(MdaXmlElement* pRoot, SArray<MdaXmlAttribute*>* pResult, ...);
+ COUNT_T GetArgCount() { LIMITED_METHOD_CONTRACT; return m_cArgs + 1; }
+
+private:
+ class MdaXPathBase;
+ class MdaXPathElement;
+ class MdaXPathAttribute;
+ class MdaXPathResult;
+ class MdaXPathLogicalOp;
+
+ typedef enum
+ {
+ XPathVarAttrBool = MdaSchemaPrimitiveBOOL,
+ XPathVarAttrSString = MdaSchemaPrimitiveSString,
+ XPathVarAttrINT32 = MdaSchemaPrimitiveINT32,
+ XPathVarElemDeclDef = XPathVarAttrINT32 + 1,
+ XPathVarAttrDeclDef = XPathVarAttrINT32 + 2,
+ } XPathVarType;
+
+ typedef struct
+ {
+ union
+ {
+ MdaElemDeclDef m_elemDeclDef;
+ MdaAttrDeclDef m_attrDeclDef;
+ BOOL m_bool;
+ SString* m_pSstr;
+ INT32 m_int32;
+ } m_u;
+ } MdaXPathVariable;
+
+private:
+ void Find(SArray<MdaXPathVariable>& args, SString* pWildCard, va_list argItr);
+ static const COUNT_T NOT_VARIABLE = -1;
+
+private:
+ class MdaXPathResult
+ {
+ public:
+ MdaXPathResult(SArray<MdaXPathVariable>* args) { LIMITED_METHOD_CONTRACT; Initialize(args); }
+ MdaXPathResult(SArray<MdaXmlElement*>* pElements, SArray<MdaXPathVariable>* args) { WRAPPER_NO_CONTRACT; Initialize(args); m_pElements = pElements; }
+ MdaXPathResult(SArray<MdaXmlAttribute*>* pAttributes, SArray<MdaXPathVariable>* args) { WRAPPER_NO_CONTRACT; Initialize(args); m_pAttributes = pAttributes; }
+ void Initialize(SArray<MdaXPathVariable>* args) { LIMITED_METHOD_CONTRACT; m_args = args; m_pElements = NULL; m_pAttributes = NULL; m_pElement = NULL; m_pAttribute = NULL; m_bIsRoot = TRUE; }
+ MdaXmlElement* GetXmlElement() { LIMITED_METHOD_CONTRACT; return m_pElement; }
+ MdaXmlAttribute* GetXmlAttribute() { LIMITED_METHOD_CONTRACT; return m_pAttribute; }
+
+ void AddMatch(MdaXmlAttribute* pMatch)
+ { LIMITED_METHOD_CONTRACT; if (m_pAttributes) m_pAttributes->Append((MdaXmlAttribute*)pMatch); else { ASSERT(!m_pAttribute); m_pAttribute = pMatch; } }
+ void AddMatch(MdaXmlElement* pMatch)
+ { LIMITED_METHOD_CONTRACT; if (m_pElements) m_pElements->Append((MdaXmlElement*)pMatch); else { ASSERT(!m_pElement); m_pElement = pMatch; } }
+ BOOL IsRoot() { LIMITED_METHOD_CONTRACT; if (!m_bIsRoot) return FALSE; m_bIsRoot = FALSE; return TRUE; }
+ SArray<MdaXPathVariable>& GetArgs() { LIMITED_METHOD_CONTRACT; return *m_args; }
+
+ private:
+ BOOL m_bIsRoot;
+ SArray<MdaXPathVariable>* m_args;
+ SArray<MdaXmlElement*>* m_pElements;
+ SArray<MdaXmlAttribute*>* m_pAttributes;
+ MdaXmlElement* m_pElement;
+ MdaXmlAttribute* m_pAttribute;
+ };
+
+ class MdaXPathCompiler
+ {
+ public:
+ MdaXPathCompiler(MdaXPath* pXPath, MdaXPathBase** ppCompiledQuery)
+ : m_pXPath(pXPath) { WRAPPER_NO_CONTRACT; m_itr = pXPath->m_xpath.Begin(); NextToken(); *ppCompiledQuery = XPATH(); }
+
+ private:
+ typedef enum {
+ //
+ // TOKENS
+ //
+ MdaXPathIdentifier = 0x0001,
+ MdaXPathDot = 0x0002,
+ MdaXPathSlash = 0x0004,
+ MdaXPathAstrix = 0x0008,
+ MdaXPathQuotedString = 0x0010,
+ MdaXPathOpenParen = 0x0020,
+ MdaXPathCloseParen = 0x0040,
+ MdaXPathOpenSqBracket = 0x0080,
+ MdaXPathCloseSqBracket = 0x0100,
+ MdaXPathLogicalAnd = 0x0200,
+ MdaXPathLogicalOr = 0x0400,
+ MdaXPathEquals = 0x0800,
+ MdaXPathAtSign = 0x1000,
+ MdaXPathQMark = 0x2000,
+ MdaXPathEnd = 0x4000,
+
+ //
+ // 1 TOKEN LOOK AHEAD
+ //
+ MdaXPathSTART = MdaXPathSlash,
+ MdaXPathXPATH = MdaXPathSlash,
+ MdaXPathATTRIBUTE = MdaXPathAtSign,
+ MdaXPathATTRIBUTE_FILTER = MdaXPathAtSign,
+ MdaXPathELEMENT = MdaXPathIdentifier | MdaXPathAstrix | MdaXPathQMark,
+ MdaXPathELEMENT_EXPR = MdaXPathELEMENT,
+ MdaXPathFILTER = MdaXPathELEMENT_EXPR | MdaXPathATTRIBUTE_FILTER,
+ MdaXPathFILTER_EXPR = MdaXPathFILTER | MdaXPathOpenParen,
+ } MdaXPathTokens;
+
+ //
+ // LEXIFIER
+ //
+ private:
+ MdaXPathTokens LexAToken();
+ void NextToken() { WRAPPER_NO_CONTRACT; m_currentToken = LexAToken(); }
+ BOOL TokenIs(MdaXPathTokens token) { LIMITED_METHOD_CONTRACT; return !!(m_currentToken & token); }
+ BOOL TokenIs(int token) { LIMITED_METHOD_CONTRACT; return TokenIs((MdaXPathTokens)token); }
+ LPCWSTR GetIdentifier() { WRAPPER_NO_CONTRACT; return m_identifier.GetUnicode(); }
+
+ //
+ // PRODUCTIONS
+ //
+ private:
+ MdaXPathBase* XPATH();
+ // '/' ATTRIBUTE end
+ // '/' ELEMENT_EXPR XPATH
+ // '/' ELEMENT_EXPR end
+
+ MdaXPathAttribute* ATTRIBUTE();
+ // '@' id
+ // '@' '?'
+
+ MdaXPathElement* ELEMENT();
+ // id
+ // '*'
+ // '?'
+
+ MdaXPathElement* ELEMENT_EXPR();
+ // ELEMENT '[' FILTER_EXPR ']'
+ // ELEMENT
+
+ MdaXPathBase* FILTER_EXPR();
+ // FILTER
+ // '(' FILTER ')'
+ // FILTER '&' FILTER
+ // FILTER '|' FILTER
+
+ MdaXPathBase* FILTER();
+ // ELEMENT_EXPR
+ // ATTRIBUTE_FILTER
+ // ELEMENT_EXPR ATTRIBUTE_FILTER
+
+ MdaXPathAttribute* ATTRIBUTE_FILTER();
+ // ATTRIBUTE
+ // ATTRIBUTE '=' ''' id '''
+ // ATTRIBUTE '=' '?'
+
+ private:
+ MdaXPath* m_pXPath;
+ SString::CIterator m_itr;
+ StackSString m_identifier;
+ MdaXPathTokens m_currentToken;
+ };
+
+ class MdaXPathBase
+ {
+ public:
+ virtual BOOL Run(MdaXmlElement* pElement, MdaXPathResult* pResult) = 0;
+ virtual BOOL IsXPathAttribute() { LIMITED_METHOD_CONTRACT; return FALSE; }
+
+ private:
+ };
+
+ class MdaXPathElement : public MdaXPathBase
+ {
+ public:
+ virtual BOOL Run(MdaXmlElement* pElement, MdaXPathResult* pResult);
+ BOOL RunOnChild(MdaXmlElement* pElement, MdaXPathResult* pResult);
+
+ public:
+ MdaXPathElement() : m_name(MdaElemUndefined), m_nameArg(NOT_VARIABLE), m_bIsTarget(FALSE), m_pChild(NULL), m_pQualifier(NULL) { LIMITED_METHOD_CONTRACT; }
+ MdaXPathBase* MarkAsTarget() { LIMITED_METHOD_CONTRACT; m_bIsTarget = TRUE; return this; };
+ MdaXPathElement* SetChild(MdaXPathBase* pChild) { LIMITED_METHOD_CONTRACT; m_pChild = pChild; return this; }
+ MdaXPathElement* SetQualifier(MdaXPathBase* pQualifier) { LIMITED_METHOD_CONTRACT; m_pQualifier = pQualifier; return this; }
+ MdaXPathElement* Initialize() { LIMITED_METHOD_CONTRACT; return this; }
+ MdaXPathElement* Initialize(MdaElemDeclDef identifier) { LIMITED_METHOD_CONTRACT; m_name = identifier; return this; }
+ MdaXPathElement* Initialize(COUNT_T identifier) { LIMITED_METHOD_CONTRACT; m_nameArg = identifier; return this; }
+
+ private:
+ MdaElemDeclDef m_name;
+ COUNT_T m_nameArg;
+ BOOL m_bIsTarget;
+ MdaXPathBase* m_pChild;
+ MdaXPathBase* m_pQualifier;
+ };
+
+ class MdaXPathAttribute : public MdaXPathBase
+ {
+ public:
+ MdaXPathAttribute() : m_name(MdaAttrUndefined), m_nameArg(NOT_VARIABLE), m_valueArg(NOT_VARIABLE) { WRAPPER_NO_CONTRACT; }
+ virtual BOOL Run(MdaXmlElement* pElement, MdaXPathResult* pResult);
+ virtual BOOL IsXPathAttribute() { LIMITED_METHOD_CONTRACT; return TRUE; }
+
+ public:
+ MdaXPathBase* MarkAsTarget() { LIMITED_METHOD_CONTRACT; m_bIsTarget = TRUE; return this; };
+ MdaXPathAttribute* SetName(MdaAttrDeclDef name) { WRAPPER_NO_CONTRACT; m_name = name; return this; }
+ MdaXPathAttribute* SetValue(LPCWSTR value) { WRAPPER_NO_CONTRACT; m_value.Set(value); return this; }
+ MdaXPathAttribute* SetName(COUNT_T name) { WRAPPER_NO_CONTRACT; m_nameArg = name; return this; }
+ MdaXPathAttribute* SetValue(COUNT_T value) { WRAPPER_NO_CONTRACT; m_valueArg = value; return this; }
+
+ private:
+ BOOL m_bIsTarget;
+ MdaAttrDeclDef m_name;
+ COUNT_T m_nameArg;
+ SString m_value;
+ COUNT_T m_valueArg;
+ };
+
+ class MdaXPathLogicalOp : public MdaXPathBase
+ {
+ public:
+ virtual BOOL Run(MdaXmlElement* pElement, MdaXPathResult* pResult);
+
+ public:
+ MdaXPathLogicalOp* Initialize(BOOL andOp, MdaXPathBase* pLhs, MdaXPathBase* pRhs)
+ { LIMITED_METHOD_CONTRACT; m_andOp = andOp; m_pLhs = pLhs; m_pRhs = pRhs; return this; }
+
+ private:
+ BOOL m_andOp;
+ MdaXPathBase* m_pLhs;
+ MdaXPathBase* m_pRhs;
+ };
+
+private:
+ COUNT_T m_cArgs;
+ InlineSArray<XPathVarType, 20> m_argTypes;
+ StackSString m_xpath;
+ MdaXPathBase* m_pCompiledQuery;
+ MdaFactory<MdaXPathElement> m_elementFactory;
+ MdaFactory<MdaXPathAttribute> m_attrFactory;
+ MdaFactory<MdaXPathLogicalOp> m_logicalOpFactory;
+};
+
+
+//
+// MdaSchema
+//
+class MdaSchema
+{
+private:
+ static void Initialize();
+
+public:
+// SPTR_DECL(RangeSection, m_RangeTree);
+// SPTR_IMPL(RangeSection, ExecutionManager, m_RangeTree);
+
+ static MdaElemDeclDef GetElementType(LPCWSTR name, BOOL bAssertDefined = TRUE);
+ static LPCWSTR GetElementName(MdaElemDeclDef type);
+ static MdaAttrDeclDef GetAttributeType(LPCWSTR name, BOOL bAssertDefined = TRUE);
+ static LPCWSTR GetAttributeName(MdaAttrDeclDef type);
+
+public:
+ static LPCWSTR g_arElementNames[MdaElemEnd];
+
+private:
+ static LPCWSTR g_arAttributeNames[MdaAttrEnd];
+ static MdaFactory<SString>* g_pSstringFactory;
+ static MdaHashtable<MdaElemDeclDef>* g_pHtElementType;
+ static MdaHashtable<MdaAttrDeclDef>* g_pHtAttributeType;
+ static LPCWSTR ToLowerFirstChar(LPCWSTR name);
+
+private:
+ class MdaSchemaBase;
+ class MdaSchemaAttribute;
+ class MdaSchemaSequence;
+ class MdaSchemaChoice;
+ class MdaSchemaComplexType;
+ class MdaSchemaElement;
+ class MdaSchemaGroup;
+ class MdaSchemaGroupRef;
+ class MdaSchemaExtension;
+ class MdaSchemaDeclDefRef;
+
+private:
+ class ValidationResult
+ {
+ public:
+ ValidationResult() { LIMITED_METHOD_CONTRACT; ResetResult(); }
+ void ResetResult() { LIMITED_METHOD_CONTRACT; m_bValid = TRUE; m_pViolatedElement = NULL; m_pViolatingElement = NULL; m_pXmlRoot = NULL; m_pSchema = NULL; }
+ BOOL ValidationFailed() { LIMITED_METHOD_CONTRACT; return !m_bValid; }
+ void Initialize(MdaSchema* pSchema, MdaXmlElement* pRoot) { LIMITED_METHOD_CONTRACT; m_pXmlRoot = pRoot; m_pSchema = pSchema; }
+ void SetError() { LIMITED_METHOD_CONTRACT; m_bValid = FALSE; }
+ void SetError(MdaSchemaBase* pViolatedElement, MdaXmlElement* pViolatingElement)
+ { LIMITED_METHOD_CONTRACT; m_bValid = FALSE; m_pViolatedElement = pViolatedElement; m_pViolatingElement = pViolatingElement; }
+
+ private:
+ friend class MdaXmlValidationError;
+
+ private:
+ BOOL m_bValid;
+ MdaXmlElement* m_pXmlRoot;
+ MdaSchema* m_pSchema;
+ MdaSchemaBase* m_pViolatedElement;
+ MdaXmlElement* m_pViolatingElement;
+ };
+
+private:
+ static BOOL MayHaveAttr(MdaSchemaBase* pBase) { LIMITED_METHOD_CONTRACT; return MdaSchemaTypeToMetaType[pBase->GetSchemaType()] & MdaSchemaMataMayHaveAttributes; }
+ static BOOL IsPattern(MdaSchemaBase* pBase) { LIMITED_METHOD_CONTRACT; return MdaSchemaTypeToMetaType[pBase->GetSchemaType()] & MdaSchemaMataTypePattern; }
+ static BOOL IsRef(MdaSchemaBase* pBase) { LIMITED_METHOD_CONTRACT; return MdaSchemaTypeToMetaType[pBase->GetSchemaType()] & MdaSchemaMataTypeRef; }
+ static BOOL IsDeclDef(MdaSchemaBase* pBase) { LIMITED_METHOD_CONTRACT; return MdaSchemaTypeToMetaType[pBase->GetSchemaType()] & MdaSchemaMataTypeDeclDef; }
+ static BOOL IsDeclDefRef(MdaSchemaBase* pBase) { WRAPPER_NO_CONTRACT; return IsDeclDef(pBase) || IsRef(pBase); }
+ static MdaSchemaDeclDefRef* AsDeclDefRef(MdaSchemaBase* pBase) { WRAPPER_NO_CONTRACT; if (!IsDeclDefRef(pBase)) return NULL; return (MdaSchemaDeclDefRef*)pBase; }
+ static MdaSchemaDeclDefRef* ToDeclDefRef(MdaSchemaBase* pBase) { WRAPPER_NO_CONTRACT; ASSERT(IsDeclDefRef(pBase)); return (MdaSchemaDeclDefRef*)pBase; }
+ static MdaSchemaDeclDefRef* ToDeclDef(MdaSchemaBase* pBase) { WRAPPER_NO_CONTRACT; ASSERT(IsDeclDef(pBase)); return (MdaSchemaDeclDefRef*)pBase; }
+ static MdaSchemaDeclDefRef* ToRef(MdaSchemaBase* pBase) { WRAPPER_NO_CONTRACT; ASSERT(IsRef(pBase)); return (MdaSchemaDeclDefRef*)pBase; }
+
+public:
+ typedef enum {
+ MdaSchemaSequenceType,
+ MdaSchemaChoiceType,
+ MdaSchemaGroupType,
+ MdaSchemaGroupRefType,
+ MdaSchemaRootType,
+ MdaSchemaAttributeType,
+ MdaSchemaElementType,
+ MdaSchemaComplexTypeType,
+ MdaSchemaComplexTypeDefType,
+ MdaSchemaElementRefTyp,
+ MdaSchemaExtensionType,
+ MdaSchemaElementRefTypeType,
+ MdaSchemaComplexContentType,
+ MdaSchemaElementAnyType,
+ MdaSchemaTypeEnd,
+ } MdaSchemaType;
+
+ typedef enum {
+ MdaSchemaMataNone = 0x0,
+ MdaSchemaMataTypePattern = 0x1,
+ MdaSchemaMataTypeDeclDef = 0x2,
+ MdaSchemaMataTypeRef = 0x4,
+ MdaSchemaMataMayHaveAttributes = 0x8,
+ } MdaSchemaMetaType;
+
+private:
+ static MdaElemDeclDef MdaSchemaTypeToElemDef[];
+ static MdaSchemaMetaType MdaSchemaTypeToMetaType[];
+
+ class MdaSchemaBase
+ {
+ public:
+ virtual MdaSchemaType GetSchemaType() = 0;
+ virtual MdaElemDeclDef GetSchemaDeclDef() { LIMITED_METHOD_CONTRACT; return MdaSchemaTypeToElemDef[GetSchemaType()]; }
+ virtual BOOL Validate(MdaXmlElement* pElement, ValidationResult* pResult);
+ virtual BOOL ValidatePattern(MdaXmlElement* pElement, ValidationResult* pResult, COUNT_T* pCount) { UNREACHABLE(); }
+ virtual void SetAttributes(MdaXmlElement* pXml) { LIMITED_METHOD_CONTRACT; }
+
+ public:
+ void Verify(MdaSchemaType schemaType, MdaElemDeclDef declDef)
+ {
+ LIMITED_METHOD_CONTRACT;
+ // Look for missmatch element in your schema ELEMENT(Foo) ... ELEMENT_END(bar)
+ ASSERT(schemaType == GetSchemaType() &&
+ W("Mismatch element in your schema ELEMENT(foo) ... TYPE_END(foo) -- attach debugger and look for MdaAssistantSchema on stack"));
+ ASSERT(ToDeclDef(this)->GetDeclDef() == declDef &&
+ W("Mismatch declaration in your schema ELEMENT(Foo) ... ELEMENT_END(bar) -- attach debugger and look for MdaAssistantSchema on stack"));
+ }
+ void Verify(MdaSchemaType schemaType, MdaSchemaBase** ppRef)
+ {
+ LIMITED_METHOD_CONTRACT;
+ // Look for missmatch element in your schema ELEMENT(Foo) ... ELEMENT_END(bar)
+ ASSERT(schemaType == GetSchemaType() &&
+ W("Mismatch element in your schema ELEMENT(foo) ... TYPE_END(foo) -- attach debugger and look for MdaAssistantSchema on stack"));
+ ASSERT(ToRef(this)->m_ppRef == ppRef &&
+ W("Mismatch declaration in your schema ELEMENT(foo) ... ELEMENT_END(bar) -- attach debugger and look for MdaAssistantSchema on stack"));
+ }
+ void Verify(MdaSchemaType schemaType)
+ {
+ LIMITED_METHOD_CONTRACT;
+ // Look for missmatch element in your schema ELEMENT(Foo) ... ELEMENT_END(bar)
+ ASSERT(schemaType == GetSchemaType() &&
+ W("Mismatch element in your schema ELEMENT(foo) ... TYPE_END(foo) -- attach debugger and look for MdaAssistantSchema on stack"));
+ }
+
+ public:
+ MdaXmlElement* ToXml(MdaXmlIndustry* pMdaXmlIndustry, MdaSchemaBase* pViolation = NULL);
+ MdaXmlElement* ToXml(MdaXmlElement* pXmlRoot) { WRAPPER_NO_CONTRACT; return ToXml(pXmlRoot, NULL); }
+ MdaXmlElement* ToXml(MdaXmlElement* pXmlRoot, MdaSchemaBase* pViolation);
+ void AddChild(MdaSchemaBase* pElement);
+ LPCWSTR GetName() { WRAPPER_NO_CONTRACT; return GetElementName(GetSchemaDeclDef()); }
+ friend class MdaSchemaExtension;
+
+ protected:
+ InlineSArray<MdaSchemaBase*, MDA_XML_ELEMENT_CHILDREN> m_children;
+ virtual InlineSArray<MdaSchemaAttribute*, MDA_XML_ELEMENT_CHILDREN>& GetAttributes() { LIMITED_METHOD_CONTRACT; UNREACHABLE(); }
+ };
+
+ // <xs:schema>
+ class MdaSchemaRoot : public MdaSchemaBase
+ {
+ public:
+ virtual MdaSchemaType GetSchemaType() { LIMITED_METHOD_CONTRACT; return MdaSchemaRootType; }
+ virtual BOOL Validate(MdaXmlElement* pElement, ValidationResult* pResult);
+ };
+
+ // <xs:attribute name="enable" value="xs:boolean" required="true" default="true">
+ static BOOL MdaSchema::Validate(MdaSchemaAttribute* pThis, MdaXmlElement* pElement, ValidationResult* pResult);
+ class MdaSchemaAttribute : public MdaSchemaBase
+ {
+ public:
+ virtual MdaSchemaType GetSchemaType() { LIMITED_METHOD_CONTRACT; return MdaSchemaAttributeType; }
+ virtual BOOL Validate(MdaXmlElement* pElement, ValidationResult* pResult) { WRAPPER_NO_CONTRACT; return MdaSchema::Validate(this, pElement, pResult); }
+
+ public:
+ void SetAttributes(MdaXmlElement* pXml);
+ MdaSchemaAttribute* Initialize(MdaAttrDeclDef name, MdaSchemaPrimitive type, BOOL bRequired, LPCWSTR szDefault)
+ { WRAPPER_NO_CONTRACT; m_declDef = name; m_type = type; m_bRequired = bRequired; m_szDefault = szDefault; return this; }
+
+ private:
+ friend MdaSchema;
+
+ private:
+ BOOL m_bRequired;
+ SString m_szDefault;
+ MdaAttrDeclDef m_declDef;
+ MdaSchemaPrimitive m_type;
+ };
+
+ // <xs:sequence minOccures="0" maxOccures="unbounded">
+ class MdaSchemaSequence : public MdaSchemaBase
+ {
+ public:
+ virtual MdaSchemaType GetSchemaType() { LIMITED_METHOD_CONTRACT; return MdaSchemaSequenceType; }
+ virtual BOOL ValidatePattern(MdaXmlElement* pElement, ValidationResult* pResult, COUNT_T* pCount);
+ virtual void SetAttributes(MdaXmlElement* pXml);
+
+ public:
+ MdaSchemaSequence* Initialize(COUNT_T min, COUNT_T max) { WRAPPER_NO_CONTRACT; m_min = min; m_max = max; return this; }
+
+ private:
+ BOOL m_VsHack;
+ COUNT_T m_min;
+ COUNT_T m_max;
+ };
+
+ // <xs:choice>
+ class MdaSchemaChoice : public MdaSchemaBase
+ {
+ public:
+ virtual MdaSchemaType GetSchemaType() { LIMITED_METHOD_CONTRACT; return MdaSchemaChoiceType; }
+ virtual BOOL ValidatePattern(MdaXmlElement* pElement, ValidationResult* pResult, COUNT_T* pCount);
+ };
+
+ // <xs:complexContent>
+ class MdaSchemaComplexContent : public MdaSchemaBase
+ {
+ public:
+ virtual MdaSchemaType GetSchemaType() { LIMITED_METHOD_CONTRACT; return MdaSchemaComplexContentType; }
+ virtual BOOL Validate(MdaXmlElement* pElement, ValidationResult* pResult);
+ };
+
+ // <xs:complexType>
+ class MdaSchemaComplexType : public MdaSchemaBase
+ {
+ public:
+ virtual MdaSchemaType GetSchemaType() { LIMITED_METHOD_CONTRACT; return MdaSchemaComplexTypeType; }
+ virtual BOOL Validate(MdaXmlElement* pElement, ValidationResult* pResult);
+ virtual InlineSArray<MdaSchemaAttribute*, MDA_XML_ELEMENT_CHILDREN>& GetAttributes() { LIMITED_METHOD_CONTRACT; return m_attributes; }
+
+ private:
+ friend class MdaSchemaExtension;
+ InlineSArray<MdaSchemaAttribute*, MDA_XML_ELEMENT_CHILDREN> m_attributes;
+ };
+
+ class MdaSchemaDeclDefRef : public MdaSchemaBase
+ {
+ public:
+ virtual void SetAttributes(MdaXmlElement* pXml);
+
+ public:
+ MdaSchemaDeclDefRef() : m_declDef(MdaElemUndefined), m_ppRef(NULL) { LIMITED_METHOD_CONTRACT; }
+ LPCWSTR GetDeclDefName() { WRAPPER_NO_CONTRACT; ASSERT(IsDeclDef(this)); return GetElementName(m_declDef); }
+ LPCWSTR GetRefName() { LIMITED_METHOD_CONTRACT; return GetRef()->GetDeclDefName(); }
+ MdaElemDeclDef GetDeclDef() { LIMITED_METHOD_CONTRACT; ASSERT(IsDeclDef(this)); return m_declDef; }
+ MdaSchemaDeclDefRef* GetRef() { LIMITED_METHOD_CONTRACT; ASSERT(IsRef(this)); return ToDeclDef(*m_ppRef); }
+ BOOL IsDefinition() { LIMITED_METHOD_CONTRACT; ASSERT(IsDeclDef(this)); return m_declDef < MdaElemDef(Max); }
+
+ public:
+ MdaSchemaDeclDefRef* InitRef(MdaSchemaBase** ppRef) { WRAPPER_NO_CONTRACT; ASSERT(IsRef(this)); m_ppRef = ppRef; return this; }
+ MdaSchemaDeclDefRef* InitDeclDef(MdaElemDeclDef declDef) { WRAPPER_NO_CONTRACT; ASSERT(IsDeclDef(this)); m_declDef = declDef; return this; }
+
+ private:
+ friend class MdaSchemaBase;
+ MdaSchemaBase** m_ppRef;
+ MdaElemDeclDef m_declDef;
+ };
+
+ // <xs:group name="myGroup">
+ class MdaSchemaGroup : public MdaSchemaDeclDefRef
+ {
+ public:
+ virtual MdaSchemaType GetSchemaType() { LIMITED_METHOD_CONTRACT; return MdaSchemaGroupType; }
+ virtual BOOL ValidatePattern(MdaXmlElement* pElement, ValidationResult* pResult, COUNT_T* pCount);
+ };
+
+ // <xs:element name="myGroup">
+ class MdaSchemaElement : public MdaSchemaDeclDefRef
+ {
+ public:
+ virtual MdaSchemaType GetSchemaType() { LIMITED_METHOD_CONTRACT; return MdaSchemaElementType; }
+ virtual BOOL Validate(MdaXmlElement* pElement, ValidationResult* pResult);
+ };
+
+ // <xs:complexType name="myElementType">
+ class MdaSchemaComplexTypeDef : public MdaSchemaDeclDefRef
+ {
+ public:
+ virtual MdaSchemaType GetSchemaType() { LIMITED_METHOD_CONTRACT; return MdaSchemaComplexTypeDefType; }
+ virtual BOOL Validate(MdaXmlElement* pElement, ValidationResult* pResult);
+ virtual InlineSArray<MdaSchemaAttribute*, MDA_XML_ELEMENT_CHILDREN>& GetAttributes() { LIMITED_METHOD_CONTRACT; return m_attributes; }
+
+ private:
+ friend class MdaSchemaExtension;
+ InlineSArray<MdaSchemaAttribute*, MDA_XML_ELEMENT_CHILDREN> m_attributes;
+ };
+
+ // <xs:group ref="myGroup">
+ class MdaSchemaGroupRef : public MdaSchemaDeclDefRef
+ {
+ public:
+ virtual MdaSchemaType GetSchemaType() { LIMITED_METHOD_CONTRACT; return MdaSchemaGroupRefType; }
+ virtual BOOL ValidatePattern(MdaXmlElement* pElement, ValidationResult* pResult, COUNT_T* pCount);
+ };
+
+ // <xs:extension base="myElementType">
+ class MdaSchemaExtension : public MdaSchemaDeclDefRef
+ {
+ public:
+ virtual MdaSchemaType GetSchemaType() { LIMITED_METHOD_CONTRACT; return MdaSchemaExtensionType; }
+ virtual BOOL Validate(MdaXmlElement* pXml, ValidationResult* pResult);
+ virtual InlineSArray<MdaSchemaAttribute*, MDA_XML_ELEMENT_CHILDREN>& GetAttributes() { LIMITED_METHOD_CONTRACT; return m_attributes; }
+
+ public:
+ MdaSchemaExtension() { LIMITED_METHOD_CONTRACT; }
+
+ private:
+ InlineSArray<MdaSchemaAttribute*, MDA_XML_ELEMENT_CHILDREN> m_attributes;
+ };
+
+ // <xs:element ref="myElement">
+ class MdaSchemaElementRef : public MdaSchemaDeclDefRef
+ {
+ public:
+ virtual MdaSchemaType GetSchemaType() { LIMITED_METHOD_CONTRACT; return MdaSchemaElementRefTyp; }
+ virtual BOOL Validate(MdaXmlElement* pElement, ValidationResult* pResult);
+ };
+
+ // <xs:element name="myElementAsMyType" type="myType">
+ class MdaSchemaElementRefType : public MdaSchemaDeclDefRef
+ {
+ public:
+ virtual MdaSchemaType GetSchemaType() { LIMITED_METHOD_CONTRACT; return MdaSchemaElementRefTypeType; }
+ virtual BOOL Validate(MdaXmlElement* pElement, ValidationResult* pResult);
+ };
+
+ // <xs:element name="myElementAsMyType" type="xs:anyType">
+ class MdaSchemaElementAny : public MdaSchemaDeclDefRef
+ {
+ public:
+ virtual MdaSchemaType GetSchemaType() { LIMITED_METHOD_CONTRACT; return MdaSchemaElementAnyType; }
+ virtual BOOL Validate(MdaXmlElement* pElement, ValidationResult* pResult);
+ };
+/*
+ // <xs:simpleType name="mySimpleType>
+ class MdaSimpleTypeDef : public MdaSchemaDeclDefRef
+ {
+ }
+
+ // <xs:restriction base="xs:string>
+ class MdaRestriction : public MdaSchemaDeclDefRef
+ {
+ }
+
+ // <xs:enumeration value="blue">
+ class MdaEnumeration : public MdaSchemaBase
+ {
+ }
+*/
+private:
+ MdaSchema();
+ virtual LPCWSTR SetRootAttributes(MdaXmlElement* pXml) = 0;
+ ValidationResult* Validate(MdaXmlElement* pRoot, ValidationResult* pResult);
+ MdaXmlElement* ToXml(MdaXmlElement* pXmlRoot) { WRAPPER_NO_CONTRACT; return m_tos->ToXml(pXmlRoot); }
+ MdaXmlElement* ToXml(MdaXmlIndustry* pMdaXmlIndustry) { WRAPPER_NO_CONTRACT; return m_tos->ToXml(pMdaXmlIndustry); }
+ MdaXmlElement* ToXml(MdaXmlIndustry* pMdaXmlIndustry, MdaSchemaBase* pXsdViolation) { WRAPPER_NO_CONTRACT; return m_tos->ToXml(pMdaXmlIndustry, pXsdViolation); }
+
+private: // Assistant Definitions
+ void DefineAssistant(MdaElemDeclDef name) { WRAPPER_NO_CONTRACT; m_currentAssistant = name; }
+ void DefineAssistantEnd(MdaElemDeclDef name) { WRAPPER_NO_CONTRACT; m_currentAssistant = MdaElemUndefined; }
+ void DefineAssistantInput(MdaElemDeclDef name) { WRAPPER_NO_CONTRACT; ASSERT(m_currentAssistant == name); AddExtendElement(name, MdaElemDef(Assistant)); }
+ void DefineAssistantInputEnd(MdaElemDeclDef name) { WRAPPER_NO_CONTRACT; ASSERT(m_currentAssistant == name); AddExtendElementEnd(name, MdaElemDef(Assistant)); }
+ void DefineAssistantOutput(MdaElemDeclDef name, MdaElemDeclDef msgName) { WRAPPER_NO_CONTRACT; ASSERT(m_currentAssistant == name); AddExtendElement(msgName, MdaElemDef(AssistantMsgType)); }
+ void DefineAssistantOutputEnd(MdaElemDeclDef name, MdaElemDeclDef msgName) { WRAPPER_NO_CONTRACT; ASSERT(m_currentAssistant == name); AddExtendElementEnd(msgName, MdaElemDef(AssistantMsgType)); }
+
+private: // <xs:*>
+ void DefineSchema() { WRAPPER_NO_CONTRACT; m_tos = m_schemaRootFactory.Create(); }
+ void DefineSchemaEnd() { CONTRACTL {NOTHROW; GC_NOTRIGGER; SO_TOLERANT; MODE_ANY; PRECONDITION(m_stack.GetDepth() == 0); } CONTRACTL_END; }
+ void AddElement(MdaElemDeclDef name) { WRAPPER_NO_CONTRACT; Push(CreateDeclDef(name, &m_elementFactory)); Push(m_complexTypeFactory.Create()); }
+ void AddElementRefType(MdaElemDeclDef name, MdaElemDeclDef type) { WRAPPER_NO_CONTRACT; AddTerminal(CreateDeclDef(name, &m_elementRefTypeFactory)->InitRef(GetDef(type))); }
+ void AddElementAny(MdaElemDeclDef name) { WRAPPER_NO_CONTRACT; AddTerminal(CreateDeclDef(name, &m_elementAnyFactory)); }
+ void AddExtendElement(MdaElemDeclDef name, MdaElemDeclDef type) { WRAPPER_NO_CONTRACT; AddElement(name); AddExtension(type); }
+ void AddComplexType(MdaElemDeclDef name) { WRAPPER_NO_CONTRACT; Push(CreateDeclDef(name, &m_complexTypeDefFactory)); }
+ void AddExtendType(MdaElemDeclDef name, MdaElemDeclDef type) { WRAPPER_NO_CONTRACT; AddComplexType(name); AddExtension(type); }
+ void AddExtension(MdaElemDeclDef type) { WRAPPER_NO_CONTRACT; Push(m_complexContentFactory.Create()); Push(m_extensionFactory.Create()->InitRef(GetDef(type))); }
+ void RefElement(MdaElemDeclDef name) { WRAPPER_NO_CONTRACT; AddTerminal(m_elementRefFactory.Create()->InitRef(GetDef(name))); }
+ void RefGroup(MdaElemDeclDef name) { WRAPPER_NO_CONTRACT; AddTerminal(m_groupRefFactory.Create()->InitRef(GetDef(name))); }
+ void AddChoice() { WRAPPER_NO_CONTRACT; Push(m_choiceFactory.Create()); }
+ void AddGroup(MdaElemDeclDef name) { WRAPPER_NO_CONTRACT; Push(CreateDeclDef(name, &m_groupFactory)); }
+ void AddSequence(COUNT_T minOccures, COUNT_T maxOccures) { WRAPPER_NO_CONTRACT; Push(m_sequenceFactory.Create()->Initialize(minOccures, maxOccures)); }
+ void AddAttribute(MdaAttrDeclDef name, MdaSchemaPrimitive type, BOOL bRequired, LPCWSTR szDefault)
+ { WRAPPER_NO_CONTRACT; AddTerminal(m_attrFactory.Create()->Initialize(name, type, bRequired, szDefault)); }
+
+private: // </xs:*>
+ void AddElementEnd(MdaElemDeclDef name) { WRAPPER_NO_CONTRACT; Pop()->Verify(MdaSchemaComplexTypeType); Pop()->Verify(MdaSchemaElementType, name); }
+ void AddExtendElementEnd(MdaElemDeclDef name, MdaElemDeclDef type) { WRAPPER_NO_CONTRACT; AddExtensionEnd(type); AddElementEnd(name); }
+ void AddComplexTypeEnd(MdaElemDeclDef name) { WRAPPER_NO_CONTRACT; Pop()->Verify(MdaSchemaComplexTypeDefType, name); }
+ void AddExtendTypeEnd(MdaElemDeclDef name, MdaElemDeclDef type) { WRAPPER_NO_CONTRACT; AddExtensionEnd(type); AddComplexTypeEnd(name); }
+ void AddExtensionEnd(MdaElemDeclDef name) { WRAPPER_NO_CONTRACT; Pop()->Verify(MdaSchemaExtensionType, GetDef(name)); Pop()->Verify(MdaSchemaComplexContentType); }
+ void AddGroupEnd(MdaElemDeclDef name) { WRAPPER_NO_CONTRACT; Pop()->Verify(MdaSchemaGroupType, name); }
+ void AddChoiceEnd() { WRAPPER_NO_CONTRACT; Pop()->Verify(MdaSchemaChoiceType); }
+ void AddSequenceEnd() { WRAPPER_NO_CONTRACT; Pop()->Verify(MdaSchemaSequenceType); }
+
+private:
+ MdaSchemaBase* Pop() { WRAPPER_NO_CONTRACT; ASSERT(m_stack.GetDepth() > 0); MdaSchemaBase* popped = m_tos; m_tos = m_stack.Pop(); return popped; }
+ void AddTerminal(MdaSchemaBase* pSchemaBase) { WRAPPER_NO_CONTRACT; m_tos->AddChild(pSchemaBase); }
+
+ template<typename TYPE>
+ TYPE* Push(TYPE* pChild) { WRAPPER_NO_CONTRACT; AddTerminal(pChild); m_stack.Push(m_tos); m_tos = pChild; return pChild; }
+
+ template<typename TYPE>
+ TYPE* CreateDeclDef(MdaElemDeclDef name, MdaFactory<TYPE>* m_pFactory)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ TYPE* pDeclDef = m_pFactory->Create();
+ pDeclDef->InitDeclDef(name);
+
+ if (pDeclDef->IsDefinition())
+ {
+ ASSERT(m_stack.GetDepth() == 0);
+ *GetDef(name) = pDeclDef;
+ }
+
+ return pDeclDef;
+ }
+
+ MdaSchemaBase** GetDef(MdaElemDeclDef type) { WRAPPER_NO_CONTRACT; return &m_definitions[type]; }
+
+private:
+ friend class ManagedDebuggingAssistants;
+ friend class MdaFramework;
+ friend class MdaXmlElement;
+ friend class MdaXmlAttribute;
+ friend class MdaAssistant;
+ friend class MdaAssistantSchema;
+ friend class MdaAssistantMsgSchema;
+ friend class MdaSchemaSchema;
+ friend class MdaXPath;
+ friend class MdaXmlMessage;
+ friend class MdaXmlValidationError;
+
+private:
+ MdaFactory<MdaSchemaRoot> m_schemaRootFactory;
+ MdaFactory<MdaSchemaAttribute> m_attrFactory;
+ MdaFactory<MdaSchemaSequence> m_sequenceFactory;
+ MdaFactory<MdaSchemaChoice> m_choiceFactory;
+ MdaFactory<MdaSchemaGroup> m_groupFactory;
+ MdaFactory<MdaSchemaGroupRef> m_groupRefFactory;
+ MdaFactory<MdaSchemaComplexTypeDef> m_complexTypeDefFactory;
+ MdaFactory<MdaSchemaComplexType> m_complexTypeFactory;
+ MdaFactory<MdaSchemaComplexContent> m_complexContentFactory;
+ MdaFactory<MdaSchemaElement> m_elementFactory;
+ MdaFactory<MdaSchemaElementRef> m_elementRefFactory;
+ MdaFactory<MdaSchemaElementRefType> m_elementRefTypeFactory;
+ MdaFactory<MdaSchemaExtension> m_extensionFactory;
+ MdaFactory<MdaSchemaElementAny> m_elementAnyFactory;
+
+private:
+ MdaSchemaBase* m_definitions[MdaElemEnd];
+ MdaElemDeclDef m_currentAssistant;
+ MdaSchemaBase* m_tos;
+ MdaStack<MdaSchemaBase*> m_stack;
+};
+
+
+//
+// MdaAssistantMsgSchema
+//
+class MdaAssistantSchema : public MdaSchema
+{
+private:
+ MdaAssistantSchema();
+ LPCWSTR SetRootAttributes(MdaXmlElement* pXml);
+
+private:
+ friend class ManagedDebuggingAssistants;
+ friend class MdaXmlElement;
+ friend class MdaAssistant;
+};
+
+
+//
+// MdaAssistantMsgSchema
+//
+class MdaAssistantMsgSchema : public MdaSchema
+{
+private:
+ MdaAssistantMsgSchema();
+ LPCWSTR SetRootAttributes(MdaXmlElement* pXml);
+
+private:
+ friend class ManagedDebuggingAssistants;
+ friend class MdaXmlElement;
+ friend class MdaAssistant;
+};
+
+
+//
+// MdaSchemaSchema
+//
+class MdaSchemaSchema : public MdaSchema
+{
+private:
+ MdaSchemaSchema();
+ LPCWSTR SetRootAttributes(MdaXmlElement* pXml);
+
+private:
+ friend class ManagedDebuggingAssistants;
+ friend class MdaXmlElement;
+ friend class MdaAssistant;
+};
+
+
+//
+// MdaQuery
+//
+
+BOOL IsJustMyCode(MethodDesc* pMethodDesc);
+
+class MdaQuery
+{
+private:
+ class CompiledQuery
+ {
+ public:
+ CompiledQuery();
+
+ public:
+ BOOL Test(MethodDesc* pMethodDesc);
+ BOOL Test(FieldDesc* pFieldDesc);
+ BOOL Test(MethodTable* pMethodTable);
+
+ public:
+ void SetName(LPCWSTR name);
+ void SetNestedTypeName(LPCWSTR name);
+ void SetMemberName(LPCWSTR name) { WRAPPER_NO_CONTRACT; m_sszMember.Set(name); }
+ void SetAnyMember();
+ void SetAnyType();
+ void SetJustMyCode() { LIMITED_METHOD_CONTRACT; m_bJustMyCode = TRUE; }
+
+ private:
+ BOOL Test(SString* psszName, MethodTable* pMethodTable);
+
+ private:
+ friend class MdaQuery;
+
+ private:
+ BOOL m_bAnyMember;
+ BOOL m_bAnyType;
+ BOOL m_bJustMyCode;
+ StackSString m_sszFullname;
+ StackSString m_sszMember;
+ };
+
+public:
+ class CompiledQueries
+ {
+ public:
+ CompiledQueries() { LIMITED_METHOD_CONTRACT; }
+
+ public:
+ BOOL Test(MethodDesc* pMethodDesc);
+ BOOL Test(FieldDesc* pFieldDesc);
+ BOOL Test(MethodTable* pMethodTable);
+
+ private:
+ friend class MdaQuery;
+
+ private:
+ CompiledQuery* AddQuery();
+
+ private:
+ InlineSArray<CompiledQuery*, 10> m_queries;
+ MdaFactory<CompiledQuery> m_factory;
+ };
+
+public:
+ static void Compile(MdaXmlElement* pXmlFilter, CompiledQueries* pCompiledQueries);
+
+private:
+ friend class ManagedDebuggingAssistants;
+
+private:
+ class Compiler
+ {
+ private:
+ friend class CompiledQuery;
+ friend class MdaQuery;
+
+ private:
+ BOOL Compile(SString* sszQuery, CompiledQuery* pCompiledQuery);
+
+ typedef enum
+ {
+ //
+ // TOKENS
+ //
+ MdaFilterIdentifier = 0x0001,
+ MdaFilterDot = 0x0002,
+ MdaFilterPlus = 0x0004,
+ MdaFilterAstrix = 0x0008,
+ MdaFilterColon = 0x0010,
+ MdaFilterEnd = 0x4000,
+ }
+ Token;
+
+ //
+ // LEXIFIER
+ //
+ private:
+ Token LexAToken();
+ void NextToken() { WRAPPER_NO_CONTRACT; m_currentToken = LexAToken(); }
+ BOOL TokenIs(Token token) { LIMITED_METHOD_CONTRACT; return !!(m_currentToken & token); }
+ BOOL TokenIs(int token) { LIMITED_METHOD_CONTRACT; return TokenIs((Token)token); }
+ LPCWSTR GetIdentifier() { WRAPPER_NO_CONTRACT; return m_identifier.GetUnicode(); }
+
+ //
+ // PRODUCTIONS
+ //
+ private:
+
+ BOOL NAME(CompiledQuery* pAst);
+ // '*'
+ // id
+ // id '.' NAME
+ // id '+' NESTNAME
+ // id ':' ':' MEMBERNAME
+
+ BOOL NESTNAME(CompiledQuery* pAst);
+ // id '+' NESTNAME
+ // id ':' ':' MEMBERNAME
+
+ BOOL MEMBERNAME(CompiledQuery* pAst);
+ // '*'
+ // id
+
+ private:
+ SString::CIterator m_itr;
+ StackSString m_identifier;
+ Token m_currentToken;
+ };
+};
+
+
+//
+// MdaConfigFactory
+//
+class MdaConfigFactory : public IXMLNodeFactory
+{
+private:
+ friend class ManagedDebuggingAssistants;
+
+private:
+ static MdaXmlElement* ParseXmlStream(MdaXmlIndustry* pXmlIndustry, LPCWSTR szXmlStream);
+
+private:
+ MdaConfigFactory(MdaXmlElement* pXmlRoot, BOOL bDeveloperSettings = FALSE) { WRAPPER_NO_CONTRACT; m_bParse = !bDeveloperSettings; m_pMdaXmlElement = NULL; m_stack.Push(pXmlRoot); }
+
+public:
+ HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid, void** ppvObject) { WRAPPER_NO_CONTRACT; return S_OK; }
+ ULONG STDMETHODCALLTYPE AddRef() { WRAPPER_NO_CONTRACT; return 0; }
+ ULONG STDMETHODCALLTYPE Release() { WRAPPER_NO_CONTRACT; return 0; }
+
+public:
+ HRESULT STDMETHODCALLTYPE NotifyEvent(
+ IXMLNodeSource* pSource,
+ XML_NODEFACTORY_EVENT iEvt);
+
+ HRESULT STDMETHODCALLTYPE BeginChildren(
+ IXMLNodeSource* pSource,
+ XML_NODE_INFO* pNodeInfo);
+
+ HRESULT STDMETHODCALLTYPE EndChildren(
+ IXMLNodeSource* pSource,
+ BOOL fEmptyNode,
+ XML_NODE_INFO* pNodeInfo);
+
+ HRESULT STDMETHODCALLTYPE Error(
+ IXMLNodeSource* pSource,
+ HRESULT hrErrorCode,
+ USHORT cNumRecs,
+ XML_NODE_INFO** apNodeInfo);
+
+ HRESULT STDMETHODCALLTYPE CreateNode(
+ IXMLNodeSource* pSource,
+ PVOID pNodeParent,
+ USHORT cNumRecs,
+ XML_NODE_INFO** apNodeInfo);
+
+private:
+ BOOL m_bParse;
+ MdaXmlElement* m_pMdaXmlElement;
+ MdaStack<MdaXmlElement*> m_stack;
+};
+
+#pragma warning(pop)
+
+#include "mda.inl"
+
+#endif
+#endif
+
diff --git a/src/vm/mda.inl b/src/vm/mda.inl
new file mode 100644
index 0000000000..d600330b9f
--- /dev/null
+++ b/src/vm/mda.inl
@@ -0,0 +1,15 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+template<typename PRODUCT>
+PRODUCT* MdaFactory<PRODUCT>::Create()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (m_cProduct == MDA_MAX_FACTORY_PRODUCT)
+ return GetNext()->Create();
+
+ return &m_product[m_cProduct++];
+}
diff --git a/src/vm/mdaBoilerplate.exe.mda.config b/src/vm/mdaBoilerplate.exe.mda.config
new file mode 100644
index 0000000000..2906d788dd
--- /dev/null
+++ b/src/vm/mdaBoilerplate.exe.mda.config
@@ -0,0 +1,1134 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+************************************************
+ PLEASE KEEP MDAS IN ALPHABETICAL ORDER!
+************************************************
+-->
+
+<!--
+
+FEEDBACK:
+ Please add yourself to clrmda discussion alias (just paste this link into
+ IE: http://autogroup/JoinGroup.asp?GroupAlias=clrmda). Once you join,
+ please send out a "Hello World" mail letting us know what problem your
+ trying to solve with MDAs. We're interested in feedback on activation,
+ reporting, VS integration, new MDA requests etc. We'll also send out mail
+ for any breaking changes or new MDAs.
+
+OVERVIEW:
+ Managed Debugging Assistants (MDAs) are switches shipped in the retail
+ runtime to help debug managed code. Each MDA and the problem it addresses
+ is documented below.
+
+ACTIVATION\REPORTING QUICK START:
+ Decide which MDAs you'd like enabled by reading their documentation below and then
+ set the environment variable COMPLUS_MDA equal to a semi-colen delemited list of
+ the names of the MDAs you want enabled. The list is not case sensitive. So, for
+ instance if you want the memberInfoCacheCreation, gcUnmanagedToManaged and
+ gcManagedToUnmanaged MDAs enabled
+
+ set complus_mda=mEmBeRInFoCacheCreation;gcUnmanagedToManaged;gcManagedToUnmanaged
+
+ Any managed program run from that command shell will now have these MDAs enabled. If
+ an MDA fires and no debugger is attached then the dialog very much like the dialog you
+ would get if an unhandled exception occurred which will tell you what MDA fired and
+ give you a chance to attach a debugger so you can get a stack trace indicating where
+ the MDA fired.
+
+ If you launch under a debugger you'll have to ensure that the debugger won't ignore the MDAs
+ you enabled. You can force VS to report all MDAs by checking the "Managed Debugging Assistants"
+ box in the exceptions dialog. VS uses the same dialog exceptions use to report MDA messages.
+ Cordbg and windbg report all MDAs by default by splashing the message to the console.
+
+ACTIVATION\REPORTING SCENARIOS:
+ The MDA activation and reporting featurs support a variety of scenarios. Those scenarios
+ and the supporting features are described below.
+
+-->
+
+
+<!--
+ Constrained Execution Region (CER) MDA
+ - InvalidCERCall
+ - VirtualCERCall
+ - OpenGenericCERCall
+ - IllegalPrepareConstrainedRegion
+
+ These all fire during the analysis of CER call graphs. This analysis
+ can occur for a number of reasons:
+
+ Early bound; triggered by jitting or ngen'ing a method containing a
+ RuntimeHelpers.PrepareConstrainedRegions() call. Call graph is
+ rooted at the catch/finally/filter/fault blocks belonging to the
+ exception handler the PCR call is associated with.
+
+ At instantiation of "critical" objects. Instances derived from
+ CriticalFinalizerObject prepare a CER rooted at the finalizer method.
+ Instances derived from SafeHandle or CriticalHandle additionally have
+ the ReleaseHandle(), Dispose() and IsInvalid property getter methods
+ prepared as CER roots.
+
+ Explicitly via RuntimeHelpers.PrepareMethod() or PrepareDelegates() calls
+ (the CER root method is passed as an argument, indirectly in the case of
+ PrepareDelegate() and possibly with exact generic instantiation information
+ for PrepareMethod()).
+
+ Note that for the early bound (PCR precipitated) case the jit time semantics
+ imply the probe will fire some time before the method which contains the CER
+ root is called (typically just prior to the first call to that method). Since
+ the entire CER call graph is scanned in one go, probes relating to methods
+ deep within the CER will be reported at that time as well (putting the report
+ even further away from the execution). The probes will not fire each time the
+ method is executed (and if the image is ngen'd then they will not fire at runtime at all).
+
+ Even in the late bound cases a given probe will fire at most once (we remember
+ which CERs we've prepared already, subsequent prepare operations on the same
+ graphs are no-ops). Note that CER graphs that are qualified by generic type
+ parameters (i.e. those CERs whose root method has generic method or class
+ type variables) are considered to define distinct CERs for each possible
+ instantiation of that root method. Therefore multiple calls to PrepareMethod()
+ with such a root method and different instantiations will cause the analysis
+ to be run multiple times and therefore any probes will fire on each invocation
+ (provided the instantiation given hasn't been prepared for that method before).
+ This last case (multiple CER preparations from the same root differing only by
+ generic instantiation) will occur at jit time only for generic instantiations
+ that contain only non-reference types (we jit a separate copy of the code for
+ each of those instantiations). We will refuse to jit-time prepare any method
+ with a generic instantiation containing one or more reference types, since
+ code for those cases is shared (and thus at jit time we cannot predict which
+ exact instantiations will be required at runtime). See the description of
+ the openGenericCERCall probe.
+
+ -->
+
+<mdaConfig>
+ <assistants>
+
+ <!--
+ AsynchronousThreadAbort (weiwenli)
+
+ DESCRIPTION:
+ Causes the runtime to fire a MDA message when someone attempts to abort another thread.
+
+ BEHAVIORAL IMPACT:
+ None.
+
+ -->
+ <asynchronousThreadAbort enable="false" />
+
+
+ <!--
+ BindingFailure (t-daveh)
+
+ DESCRIPTION:
+ This probe fires when an Assembly fails to load.
+
+ SCENARIOS:
+ This probe is intended for use in debugging Assembly binding failures. In addition to general information
+ identifying the failure and Assembly, the binding context the Assembly would have been loaded in is included.
+ See http://blogs.msdn.com/suzcook/archive/2003/05/29/57143.aspx for more information.
+
+ BEHAVIORAL IMPACT: None.
+
+ OUTPUT: An MDA message is output for each Assembly that fails to load. It includes the failing HRESULT as well
+ as the display name, code base, binding context index, and AppDomain ID that the Assembly would have had.
+ -->
+ <bindingFailure enable="false" />
+
+
+ <!--
+ CallbackOnCollectedDelegate (chriseck)
+
+ DESCRIPTION: Detects when unmanaged code is trying to call back into the runtime via a delegate which
+ has been garbage collected. Will store up to listSize number of delegates after the GC has released them.
+ The default listSize is 1000 members. The minimum is 50 members. The maximum is 2000.
+
+ SCENARIOS:
+
+ SYMPTOM:
+ User code AVs when trying to call back into the runtime on a function pointer which was marshaled from
+ a managed delegate. The failure is non-deterministic; sometimes the call on the function
+ pointer succeeds and sometimes it fails.
+
+ CAUSE:
+ The delegate which from which the function pointer was created and exposed to unmanaged code was collected
+ so when the unmanaged component tries to call on the function pointer it AVs. By enabling this assistant the runtime
+ will not collect "all" of the delegate - instead it leaks just enough of the delegate so that if someone
+ tries to call back on it the MDA will fire.
+ -->
+ <callbackOnCollectedDelegate listSize="1000" enable="false" />
+
+ <!--
+ ContextSwitchDeadlock (chriseck)
+
+ DESCRIPTION:
+ Causes the runtime to fire a MDA message when a deadlock is detected during an attempted context transition
+
+ BEHAVIORAL IMPACT:
+ None.
+
+ SCENARIOS:
+
+ SYMPTOM:
+ A native context transition appears to be deadlocked.
+
+ CAUSE:
+ Most likely cause is a non-pumping STA thread.
+
+ OUTPUT:
+ An XML message specifying the violation.
+ -->
+ <contextSwitchDeadlock enable="false" />
+
+ <!--
+ DangerousThreadingAPI (weiwenli)
+
+ DESCRIPTION:
+ Causes the runtime to fire a MDA message when someone attempts to call dangerous threading API.
+
+ BEHAVIORAL IMPACT:
+ None.
+
+ -->
+ <dangerousThreadingAPI enable="false" />
+
+ <!--
+ DateTimeInvalidLocalFormat (amoore)
+
+ ACTIVATION:
+ Activated by default under a managed debugger.
+
+ DESCRIPTION:
+ Indicates when a DateTime instance that represents a UTC time is formatted with a format that should
+ only be used with local instances.
+
+ BEHAVIORAL IMPACT:
+ None.
+
+ SCENARIOS:
+
+ SYMPTOM:
+ An application is manually serializing a UTC DateTime instance using a local format:
+
+ DateTime myDateTime = DateTime.UtcNow;
+ Serialize(myDateTime.ToString("yyyy-MM-dd'T'HH:mm:ss.fffffffzzz"));
+
+ CAUSE:
+ The 'z' format for DateTime.ToString outputs the local time zone offset, e.g "+10:00" for Sydney time.
+ As such, it will only output a meaningful result if the value of the DateTime is local. If the value
+ is UTC, DateTime.ToString will still output the local time zone offset.
+
+ CORRECTION:
+ UTC DateTime instances should be formatted in a way that indicates that they are UTC. The recommended
+ format for UTC times to use a 'Z' to denote UTC time:
+
+ DateTime myDateTime = DateTime.UtcNow;
+ Serialize(myDateTime.ToString("yyyy-MM-dd'T'HH:mm:ss.fffffffZ"));
+
+ There is also a short-hand "o" format that will serialize a DateTime making use of the DateTime.Kind
+ property that will serialize correctly regardless of whether the instance is Local, Utc or Unspecified:
+
+ DateTime myDateTime = DateTime.UtcNow;
+ Serialize(myDateTime.ToString("o"));
+
+ SYMPTOM:
+ An application is indirectly serializing a UTC DateTime with a library like XMLConvert or DataSet
+ serialization :
+
+ DateTime myDateTime = DateTime.UtcNow;
+ String serialized = XMLConvert.ToString(myDateTime);
+
+ CAUSE:
+ XmlConvert and DataSet serialization use local formats for serialization by default.
+ Additional options are required to serialize other kinds of DateTime, such as UTC.
+
+ CORRECTION:
+ For XML Convert, pass in XmlConvertDateTimeOption.RoundTrip.
+
+ DateTime myDateTime = DateTime.UtcNow;
+ String serialized = XmlConvert.ToString(myDateTime, XmlDateTimeSerializationMode.RoundtripKind);
+
+ If using DataSet, set the DateTimeMode on the DataColumn object to DataSetDateTime.Utc.
+
+
+ -->
+ <dateTimeInvalidLocalFormat enable="false" />
+
+ <!--
+ DirtyCastAndCallOnInterface (chriseck)
+
+ DESCRIPTION:
+ A native component makes a call on an IUnknown or IDispatch interface without first QI-ing for the correct interface.
+
+ BEHAVIORAL IMPACT:
+ None.
+
+ SCENARIOS:
+
+ SYMPTOM:
+ AVs or unexpected memory corruption when making a call from native code into the CLR on a CCW.
+
+ CAUSE:
+ Caller neglected to QI for the correct interface.
+
+ OUTPUT:
+ An XML message specifying the violation.
+ -->
+ <dirtyCastAndCallOnInterface enable="false" />
+
+ <!--
+ DisconnectedContext (chriseck)
+
+ DESCRIPTION:
+ The CLR attempts to transition into a dead context while trying to service a request concerning a
+ COM object living in that dead context. This can happen while cleaning up RCWs or servicing QIs.
+
+ BEHAVIORAL IMPACT:
+ None.
+
+ SCENARIOS:
+
+ SYMPTOM:
+ Calls on RCWs living in dead contexts are not serviced, or cleanup of COM interface pointers occurs in
+ a context other than the one in which the interface pointers live.
+
+ CAUSE:
+ The OLE context is disconnected.
+
+ OUTPUT:
+ An XML message specifying the violation.
+ -->
+ <disconnectedContext enable="false" />
+
+
+ <!--
+ DllMainReturnsFalse (slidin)
+
+ ACTIVATION:
+ Activated by default under a managed debugger.
+ -->
+ <dllMainReturnsFalse enable="false" />
+
+ <!--
+ ExceptionSwallowedOnCallFromCom (dmortens)
+
+ DESCRIPTION:
+ Causes the runtime to fire a MDA message when an error occurs while determining how to marshal the parameters
+ of a member member to be called from COM.
+
+ BEHAVIORAL IMPACT:
+ None.
+
+ SCENARIOS:
+
+ SYMPTOM:
+ A failure HRESULT is returned to COM without the managed method having been called.
+
+ CAUSE:
+ This is most likely due to an incompatible MarshalAs attribute on one of the parameters.
+
+ OUTPUT:
+ An XML message specifying the violation.
+ -->
+ <exceptionSwallowedOnCallFromCom enable="false" />
+
+
+ <!--
+ FailedQI (chriseck)
+
+ DESCRIPTION:
+ Causes the runtime to fire a MDA message when the runtime calls QueryInterface on a COM interface pointer
+ on behalf of a RCW, and the QueryInterface call fails because the call was attempted in the wrong context or
+ because an OLE owned proxy returned a failure HRESULT.
+
+ BEHAVIORAL IMPACT:
+ None.
+
+ SCENARIOS:
+
+ SYMPTOM:
+ A cast on a RCW fails, or a call to COM from a RCW fails unexpectedly.
+
+ CAUSE:
+ Calling from the wrong context or the registered proxy is failing the QueryInterface call.
+
+ OUTPUT:
+ An XML message specifying the violation.
+ -->
+ <failedQI enable="false" />
+
+
+ <!--
+ GcManagedToUnmanaged (chrisk)
+
+ DESCRIPTION: Causes a garbage collection whenever a thread transitions from managed to unmanaged
+ code (also see gcUnmanagedToManaged).
+
+ SCENARIOS:
+
+ SYMPTOM: An unmanaged user component AVs when trying to use a managed object which had been exposed to
+ COM. The COM object appears to have been released. The AV is non-deterministic.
+
+ CAUSE: If an unmanaged component is not ref counting a managed COM object correctly
+ then the runtime could collect a managed object exposed to COM when the unmanaged component still
+ holds a reference to the object. The runtime calls release during GCs so if the user component uses the
+ object before the GC than it will not yet have been collected which is the source of the non-determinism.
+ Enabling this assistant will reduce the time between when the object is eligible for collection and release
+ is called helping to track down which unmanaged component first tries to access the collected object.
+
+ OUTPUT: None
+ -->
+ <gcManagedToUnmanaged enable="false" />
+
+ <!--
+ GcUnmanagedToManaged (chrisk)
+
+ DESCRIPTION:
+ Causes a garbage collection whenever a thread transitions from unmanaged to
+ managed code (also see gcManagedToUnmanaged).
+
+ BEHAVIORAL IMPACT:
+ This assistant changes the behavior of the runtime. When enabled more GC will occur.
+
+ SCENARIOS:
+
+ SYMPTOM:
+ An application running unmanaged user components (COM\PInvoke) is showing a non-deterministic AV
+ in runtime code.
+
+ CAUSE:
+ If an application is running unmanaged user components then those components may have corrupted
+ the GC heap. This will cause the runtime to AV when the GC tries to walk the object graph.
+ Enabling this assistant will reduce the time between when the unmanaged component corrupts the GC
+ heap and when the AV happens by forcing a GC to occur before every managed transition.
+
+ OUTPUT: None
+ -->
+
+ <gcUnmanagedToManaged enable="false" />
+
+
+ <!--
+ IllegalPrepareConstrainedRegion (rudim)
+
+ This is an error event. The RuntimeHelpers.PrepareConstrainedRegions() method (PCR)
+ call we use to mark exception handlers as introducing CERs in their catch/finally/fault/filter
+ blocks are only valid when used in that context. They must immediately precede the try
+ statement of the exception handler. (This is at the IL level, so it's obviously permissible
+ to have non-code generating source in between the two, such as comments). In the future
+ these markers will be generated by a compiler (from a new, higher level syntax for marking
+ CERs) and the code author won't have to worry about this. But until then we help the author
+ out by generating this MDA when the PCR call appears anywhere else in the code:
+
+ If this MDA is firing the sort of symptoms you'd expect are probably as if CERs had
+ stopped working (i.e. runtime errors from jitting, thread aborts or generics lazy type
+ loading occurring inside CER regions). This is because they probably intended to declare
+ a CER region but failed by mispositioning the PCR call.
+ -->
+ <illegalPrepareConstrainedRegion enable="false" />
+
+ <!--
+ InvalidApartmentStateChange (chriseck)
+
+ DESCRIPTION:
+ Causes the runtime to fire a MDA message when someone attempts to change the COM apartment state of a thread which
+ has already been COM initialized to a different apartment state.
+
+ BEHAVIORAL IMPACT:
+ None.
+
+ SCENARIOS:
+
+ SYMPTOM:
+ A thread's COM apartment state is not what was requested.
+
+ CAUSE:
+ The thread was previously initialized to a different COM apartment state.
+
+ OUTPUT:
+ An XML message specifying the violation.
+ -->
+ <invalidApartmentStateChange enable="false" />
+
+ <!--
+ InvalidCERCall (rudim)
+
+ This is an error report. It occurs whenever a location within the CER graph
+ calls a method which has no reliability contract or an excessively weak contract.
+ A weak contract is one which declares that the worst case state corruption is of
+ greater scope than the instance passed to the call (i.e. the appdomain or process
+ state may become corrupted) or that its result is not always deterministically
+ computable when called within a CER. Either of these states indicates that the
+ code called may thwart the efforts of the rest of the CER to maintain consistent
+ state (CERs allow an author to treat errors in a very deterministic manner as a
+ way on maintaining whatever internal invariants are important to the particular
+ application and thus allow it to continue running in the face of transient errors
+ such as out of memory).
+
+ In terms of reliability contract syntax a weak contract is one that does not specify
+ a Consistency enumeration or specifies ones one of Consistency.MayCorruptProcess or
+ Consistency.MayCorruptAppDomain or that does not specify a CER enumeration or specifies CER.None.
+
+ When this probe fires there's a chance that the method being called in the CER
+ can fail in a way that the caller didn't expect or that leaves the appdomain or
+ process state corrupted or non-recoverable. Of course the called code may actually
+ work perfectly and the author merely hasn't gotten round to adding a contract. But
+ the issues involved in hardening code in this way are subtle and most "random"
+ code doesn't fall out this way. The contracts serve as markers that the author
+ has done their homework and hardened their algorithms and also as promises that
+ these guarantees will never backslide in future revisions of the code. (I.e.
+ they're declarations of intent rather than mere indicators of implementation).
+
+ Because any method with a weak or non-existent contract may potentially fail in
+ all sorts of unpredictable manners anyway, the runtime doesn't attempt to remove
+ any of its own unpredictable failures from the method (introduced by lazy jitting
+ or generics dictionary population or thread aborts for instance). That is, when
+ this MDA fires it indicates that the runtime didn't include the called method in
+ the CER being defined; the call graph was pruned at this node (to carry on preparing
+ this sub-tree would just serve to help mask the potential error).
+
+ So the symptoms this MDA may indicate are unfortunately very broad. They could see
+ an unexpected OutOfMemory or ThreadAbort exception (among others, we don't guarantee
+ the list) at the callsite into the "bad" method because the runtime didn't prepare
+ it ahead of time or protect it from ThreadAbort exceptions at runtime. But worse than
+ that, any exception that comes from this method at runtime could be leaving the
+ appdomain or process in a bad state, which is presumably counter to the wishes of
+ the CER author, since the only reason to declare a CER is to avoid large scale state
+ corruptions such as these in the first place. How corrupt state manifests itself is
+ very application specific (since the definition of consistent state belongs to the
+ application).
+ -->
+ <invalidCERCall enable="false" />
+
+ <!--
+ InvalidFunctionPointerInDelegate (chriseck)
+
+ DESCRIPTION:
+ An invalid function pointer is passed in to construct a delegate over a native function pointer.
+
+ BEHAVIORAL IMPACT:
+ None.
+
+ SCENARIOS:
+
+ SYMPTOM:
+ AVs or unexpected memory corruption when using a delegate over a function pointer.
+
+ CAUSE:
+ An invalid function pointer was specified.
+
+ OUTPUT:
+ An XML message specifying the violation.
+ -->
+ <invalidFunctionPointerInDelegate enable="false" />
+
+ <!--
+ InvalidGCHandleCookie (chriseck)
+
+ This error event is fired when an invalid IntPtr cookie->GCHandle retrieval is attempted.
+ The cookie is likely invalid because it was not originally created from a GCHandle,
+ represents a GCHandle that has already been freed, is a cookie to a GCHandle in
+ a different appdomain, or was marshaled to native code as a GCHandle but passed back into
+ the CLR as an IntPtr where a cast was attempted.
+
+ The symptoms the user will see is undefined behavior (AVs, memory corruption, etc.) while
+ attempting to use or retrieve a GCHandle from a IntPtr.
+ -->
+ <invalidGCHandleCookie enable="false" />
+
+
+ <!--
+ InvalidIUnknown (chriseck)
+
+ DESCRIPTION:
+ An invalid IUnknown* is passed to managed code from native code. The IUnknown fails to return success
+ when queried for the IUnknown interface.
+
+ BEHAVIORAL IMPACT:
+ None.
+
+ SCENARIOS:
+
+ SYMPTOM:
+ Unexpected error when marshaling a COM interface pointer during argument marshaling.
+
+ CAUSE:
+ A misbehaving QueryInterface implemenation on the COM interface passed to the runtime.
+
+ OUTPUT:
+ An XML message specifying the violation.
+ -->
+ <invalidIUnknown enable="false" />
+
+ <!--
+ InvalidMemberDeclaration (dmortens)
+
+ DESCRIPTION:
+ Causes the runtime to fire a MDA message when an error occurs while determining how to marshal the parameters
+ of a member member to be called from COM.
+
+ BEHAVIORAL IMPACT:
+ None.
+
+ SCENARIOS:
+
+ SYMPTOM:
+ A failure HRESULT is returned to COM without the managed method having been called.
+
+ CAUSE:
+ This is most likely due to an incompatible MarshalAs attribute on one of the parameters.
+
+ OUTPUT:
+ An XML message specifying the violation.
+ -->
+ <invalidMemberDeclaration enable="false" />
+
+ <!--
+ InvalidOverlappedToPinvoke (mstanton)
+
+ DESCRIPTION:
+ This probe fires when an overlapped pointer not created on the gc heap is passed to a popular
+ Win32 function. The potential for heap corruption is high when this is done because the
+ AppDomain where the call is made may unload. In that case, the user code will either free
+ the memory for the overlapped pointer, causing corruption when the operation finishes, or
+ the code will leak the memory, causing difficulties later.
+
+ Here are the functions that this MDA tracks:
+
+ Module Function
+ HttpApi.dll HttpReceiveHttpRequest
+ IpHlpApi.dll NotifyAddrChange
+ IpHlpApi.dll NotifyRouteChange
+ kernel32.dll ReadFile
+ kernel32.dll ReadFileEx
+ kernel32.dll WriteFile
+ kernel32.dll WriteFileEx
+ kernel32.dll ReadDirectoryChangesW
+ kernel32.dll PostQueuedCompletionStatus
+ MSWSock.dll ConnectEx
+ WS2_32.dll WSASend
+ WS2_32.dll WSASendTo
+ WS2_32.dll WSARecv
+ WS2_32.dll WSARecvFrom
+ MQRT.dll MQReceiveMessage
+
+ The way to fix this problem is to use a System.Threading.Overlapped object, calling
+ Overlapped.Pack() to get a NativeOverlapped structure that can be passed to the
+ function. If the AppDomain unloads, the CLR will wait until the async operation completes
+ before freeing the pointer.
+
+ Note that this MDA is by default only fires if the P/Invoke is defined in your
+ code, using your debugger to report the JustMyCode status of each method.
+ A debugger that doesn't understand JustMyCode (such as mdbg with no extensions)
+ will not let this MDA fire. You can activate this MDA using a config file
+ if you explicitly set justMyCode="false" in your .mda.config file.
+
+ OUTPUT:
+ An XML message specifying the overlapped pointer address, the module name, and the win32
+ function that was called.
+ -->
+ <invalidOverlappedToPinvoke enable="false" justMyCode="true"/>
+
+
+ <!--
+ InvalidVariant (chriseck)
+
+ DESCRIPTION:
+ Causes the runtime to fire a MDA message when an invalid VARIANT structure is encountered.
+
+ BEHAVIORAL IMPACT:
+ None.
+
+ SCENARIOS:
+
+ SYMPTOM:
+ Unexpected behavior during a transition between native and managed code involving the marshaling
+ of an object to a VARIANT or vice versa.
+
+ CAUSE:
+ The native code is passing a malformed VARIANT structure to the runtime.
+
+ OUTPUT:
+ An XML message specifying the violation.
+ -->
+ <invalidVariant enable="false" />
+
+
+ <!--
+ JitCompilationStart (chrisk)
+
+ DESCRIPTION:
+ Enabling this assistant causes a message to be generated whenever a method which matches the filter is jitted.
+ This assistant was primarily used to test the MDA framework but could also be used.
+
+ BEHAVIORAL IMPACT:
+ None. The performance difference should also be negligible as this assistant is only fired when the method is
+ first jitted.
+
+ SCENARIOS:
+
+ SYMPTOM:
+ clrjit.dll is loaded in a performance scenario in which all assemblies are ngened.
+
+ CAUSE:
+ This would likely be a bug in the runtime. Enabling this assistant will help determine which method is
+ being jitted.
+
+ OUTPUT:
+ Methods which match the filter that are being jitted.
+ -->
+ <jitCompilationStart enable="false">
+ <methods justMyCode="true">
+ <match break="false" name="MyMethod" />
+ </methods>
+ </jitCompilationStart >
+
+ <!--
+ LoaderLock (cbrumme)
+
+ DESCRIPTION:
+ It is unsafe to execute managed code on a thread that holds the operating system's LoaderLock.
+ Violating this rule can lead to deadlocks or calls into DLLs that have not yet been initialized.
+ Such failures are somewhat random and can appear or disappear from run to run of a process.
+ On some platforms, we can detect whether the current thread holds the LoaderLock during a
+ transition from native to managed code.
+
+ BEHAVIORAL IMPACT:
+ The extra checks can cause a slight slowdown on calls from native code to managed code. This
+ slowdown is on the order of 10 instructions.
+
+ -->
+ <loaderLock enable="false" />
+
+
+ <!--
+ LoadFromContext (t-daveh)
+
+ DESCRIPTION:
+ This probe fires when an Assembly loads in the LoadFrom binding context. This happens on some but not all calls
+ to Assembly.LoadFrom and can also occur when loading dependencies for a separate Assembly loading call.
+
+ SCENARIOS:
+ This probe is primarily intended for use in debugging Assembly binding failures, which often occur because a call
+ to Assembly.LoadFrom does not imply that the Assembly will be loaded in the LoadFrom context. Binding contexts
+ affect Assembly behavior, and in almost all cases it is recommended that the LoadFrom context be avoided. See
+ http://blogs.msdn.com/suzcook/archive/2003/05/29/57143.aspx for more information.
+
+ BEHAVIORAL IMPACT: None.
+
+ OUTPUT: An MDA message is output for each Assembly loaded in the LoadFrom context. It includes the display name
+ of the Assembly and its code base.
+ -->
+ <loadFromContext enable="false" />
+
+
+ <!--
+ MarshalCleanupError (chriseck)
+
+ DESCRIPTION:
+ The CLR encounters an error while attempting to clean up temporary structures and memory required for marshaling data types between
+ native / managed code boundaries. It is likely that a memory leak will occur.
+
+ BEHAVIORAL IMPACT:
+ None.
+
+ SCENARIOS:
+
+ SYMPTOM:
+ Memory leak occurs when making native / managed code transitions, runtime state such as thread culture is not restored, or errors occur
+ in SafeHandle cleanup.
+
+ CAUSE:
+ An unexpected error occured while cleaning up temporary structures. Review all SafeHandle destructor / finalizer implementations and
+ custom-marshaler implementations for errors.
+
+ OUTPUT:
+ An XML message specifying the encountered problem.
+ -->
+ <marshalCleanupError enable="false" />
+
+ <!--
+ Reflection (chrisk)
+
+ DESCRIPTION:
+ This probe fires when reflection creates a MemberInfo cache. This happens on calls to
+ Type.GetMethod, Type.GetProperty, Type.GetField etc. Creation of this cache is expensive in working set
+ because it pages in metadata which is usually stored in a cold section of the PE file and because
+ reflection eagerly caches MemberInfos. The Reflection team has plans in Beta2 to make the cache lazy.
+
+ SCENARIOS:
+ This probe is primarily intended for use in a regression test which ensures that "heavy" reflection is
+ not used is an optimized scenario.
+
+ BEHAVIORAL IMPACT: None.
+
+ OUTPUT: A MDA message is output for each time a MemberInfoCache is created.
+ -->
+ <memberInfoCacheCreation enable="false"/>
+
+ <!--
+ ModuloObjectHashcode (chrisk)
+
+ DESCRIPTION:
+ Enabling this assistant causes Object.GetHashcode to return the modulus of the hashcode it would
+ have otherwise returned. This does not affect any other implementation of GetHashcode.
+
+ BEHAVIORAL IMPACT:
+ See Description.
+
+ SCENARIOS:
+
+ SYMPTOM:
+ Varied. Generally, after debugging, it is discovered that an object with the wrong identity
+ is being manipulated.
+
+ CAUSE:
+ Program is using an objects hashcode to identify the object. While it is true that if two
+ object references are the same their hashcodes are the same, the converse is not true. If two
+ object references have the same hashcodes that does not imply they refer to the same object.
+ Making this assumption will cause incorrect program behavior in the very rare case when it is false.
+ Enabling this assistant will make it much more likely to have "hashcode collisions" and flesh out bugs.
+
+ OUTPUT: None
+ -->
+ <moduloObjectHashcode modulus="1" enable="false" />
+
+ <!--
+ NonComVisibleBaseClass (dmortens)
+
+ This error event is fired when a QueryInterface call is made on a CCW requesting the class
+ interface or the default IDispatch, not implemented by an explicit interface, of a COM visible
+ managed class that derives from a non COM visible base class.
+
+ The symptoms the user will see is the QueryInterface call failing with a
+ COR_E_INVALIDOPERATION HRESULT.
+ -->
+ <nonComVisibleBaseClass enable="false" />
+
+
+ <!--
+ NotMarshalable (chriseck)
+
+ DESCRIPTION:
+ The CLR encounters a COM interface pointer with no valid proxy/stub registered or a misbehaving IMarshalable implementation while attempting
+ to marshal the interface across contexts.
+
+ BEHAVIORAL IMPACT:
+ None.
+
+ SCENARIOS:
+
+ SYMPTOM:
+ Calls are not serviced, or calls occur in the wrong context for COM interface pointers.
+
+ CAUSE:
+ The CLR encounters a COM interface pointer with no valid proxy/stub registered or a misbehaving IMarshalable implementation.
+
+ OUTPUT:
+ An XML message specifying the violation.
+ -->
+ <notMarshalable enable="false" />
+
+ <!--
+ OpenGenericCERCall (rudim)
+
+ This event is a warning. It is generated when a CER graph with generic type
+ variables at the root method is being processed at jit/ngen time and at least
+ one of the generic type variables is an object reference type. Since at jit time
+ an instantiation containing an object reference type is only representative (the
+ resultant code is shared such that each of the object reference type variables may
+ in fact be any object reference type) we cannot guarantee prepare all runtime
+ resources ahead of time. In particular methods with generic type variables sometimes
+ lazily allocate resources behind the user's back (these are referred to as generic
+ dictionary entries). For instance the statement "List<T> list = new List<T>();" where
+ T is a generic type variable will need to lookup and possibly create the exact
+ instantiation (e.g. List<Object>, List<String> etc.) at runtime and this might
+ fail for a variety of reasons beyond the author's control (out of memory, for instance).
+
+ This probe shouldn't fire for any of the non-jit cases (they always provide an
+ exact instantiation to work with).
+
+ When this probe fires the likely symptoms you might see are that CERs will appear not
+ to work at all for the bad instantiations (in fact we don't even attempt to implement
+ a CER in the circumstances where the event fires). So if the author uses a shared
+ instantiation of the CER they will not avoid runtime injected jit or generics type
+ loading errors or thread aborts within the region of the supposed CER.
+ -->
+ <openGenericCERCall enable="false" />
+
+ <!--
+ OverlappedFreeError (mstanton)
+
+ DESCRIPTION:
+ This probe fires if code calls System.Threading.Overlapped.Free(NativeOverlapped *) before
+ the overlapped operation has completed. The overlapped operation needs to be cancelled
+ before this.
+
+ OUTPUT:
+ An XML message specifying the overlapped pointer address that was freed pre-maturely.
+ -->
+ <overlappedFreeError enable="false" />
+
+ <!--
+ PInvokeLog (chrisk)
+
+ DESCRIPTION:
+ Logs a message the first time a PInvoke call is call is made.
+
+ OUTPUT:
+ An XML message specifying the managed PInvoke signature and target which was called for the first time.
+ -->
+ <pInvokeLog enable="false" />
+
+ <!--
+ PInvokeStackImbalance (chrisk)
+
+ DESCRIPTION:
+ Causes the runtime to compare the actual stack depth before and after a PInvoke call against
+ what the call depth should be given the calling conventions specified in the DllImport attribute
+ and the arguments. If the depths do not agree than the MDA will fire. (Future versions of this
+ assistant will also check the stack depth of "reverse-PInvoke" or calling back into managed code via
+ a function pointer representing a managed delegate.)
+
+ BEHAVIORAL IMPACT:
+ This assistants disables PInvoke marshaling optimizations and so PInvoke calls will be slower.
+
+ ACTIVATION:
+ Activated by default under a managed debugger.
+
+ SCENARIOS:
+
+ SYMPTOM:
+ An application AVs when placing or just after placing a PInvoke call.
+
+ CAUSE:
+ Very likely that the managed signature, the DllImportAttribute, does not match the unmanaged
+ signature. Either the number or size of the parameters does not match or the calling convention
+ does not match. Try explicitly specifying the calling convention on both the managed and unmanaged
+ sides. It is also possible, though much less likely, that the unmanaged function unbalanced the
+ stack for some other reason such as a bug in the unmanaged compiler.
+
+ OUTPUT:
+ An XML message specifying the managed PInvoke signature which detected the unbalanced stack.
+ -->
+ <pInvokeStackImbalance enable="false" />
+
+ <!--
+ RaceOnRCWCleanup (chriseck)
+
+ DESCRIPTION:
+ Causes the runtime to fire a MDA message when it detects that a RCW is in use while the user attempts
+ to free it via Marshal.ReleaseComObject or other such construct.
+
+ BEHAVIORAL IMPACT:
+ None.
+
+ SCENARIOS:
+
+ SYMPTOM:
+ AVs or memory corruption during or after freeing a RCW via Marshal.ReleaseComObject or other such construct.
+
+ CAUSE:
+ The RCW is in use on another thread or further up the freeing thread stack. It is illegal to free
+ a RCW that is in use.
+
+ OUTPUT:
+ An XML message specifying the violation.
+ -->
+ <raceOnRCWCleanup enable="false" />
+
+ <!--
+ Reentrancy (cbrumme)
+
+ DESCRIPTION:
+ Threads that switch between native and managed code in either direction must perform an
+ orderly transition. However, certain low level extensibility points in the operating system
+ (like the Vectored Exception Handler) allow switches from managed to native code without
+ performing an orderly transition. Any native code that executes inside these extensibility points
+ must avoid calling back into managed code. If this rule is violated, the object heap can become
+ corrupted and other serious errors can occur. This assistant can detect attempts to transition
+ from native to managed code in cases where a prior switch from managed to native code was
+ not performed through an orderly transition.
+
+ BEHAVIORAL IMPACT:
+ The extra checks can cause a slight slowdown on calls from native code to managed code. This
+ slowdown is on the order of 5 instructions.
+
+ -->
+ <reentrancy enable="false" />
+
+
+ <!--
+ ReleaseHandleFailed (rudim)
+
+ This is an error event. It fired when the ReleaseHandle method of a SafeHandle or
+ CriticalHandle subclass returns false. These methods are provided by the author subclassing
+ SafeHandle or CriticalHandle so the circumstances are handle specific, but the contract
+ is the following:
+
+ Safe and critical handles represent wrappers around vital process resources that
+ cannot be permitted to leak (otherwise the process will become unusable over time).
+
+ Therefore the ReleaseHandle method must not fail to perform its function
+ (once we've acquired such a resource, ReleaseHandle is the only means we have
+ of releasing it, so failure implies resource leakage).
+
+ Therefore any failure which does occur during ReleaseHandle (and impedes the
+ release of the resource) is a serious bug on the part of the author of the ReleaseHandle
+ method itself (it is their responsibility to make sure the contract is fulfilled, even
+ if they're calling other people's code to achieve the end effect).
+
+ To aid in debugging such leaks we allow ReleaseHandle to return a boolean result and
+ if that result is false we generate this MDA with some state information that might
+ help track down the problem. (We used to throw an IOException in earlier builds of Whidbey).
+
+ The symptoms the user might see in situations where this MDA fires is resource
+ leakage (for whatever resource the safe or critical handle is a wrapper for or at
+ least handles against that resource which can be scarce in their own rite).
+ -->
+ <releaseHandleFailed enable="false" />
+
+
+ <!--
+ ReportAvOnComRelease (chriseck)
+
+ DESCRIPTION:
+ Occasionally an exception is thrown due to user refcount errors while performing COM Interop and
+ using Marshal.Release or Marshal.ReleaseComObject mixed with raw COM calls. Today, this exception
+ is simply discarded, since not doing so would cause an AV in the runtime and bring it down. Using
+ this assistant, such exceptions can be detected and reported instead of simply discarded.
+
+ Two modes are available " if AllowAV is true, then the assistant simply strips the exception handling
+ from the function. If it is false (by default), then the exception handling occurs, but a warning
+ message is reported to the user to indicate that an exception was handled.
+ -->
+ <reportAvOnComRelease allowAv="false" enable="false" />
+
+ <!--
+ Marshaling (chriseck)
+
+ DESCRIPTION:
+ This assistant fires when the CLR sets up marshaling information for a method parameter or a field of a structure.
+ It prints out the type of the parameter or field both in the managed and unmanaged worlds, as well as indicating
+ the structure or method where the type lives.
+ <marshaling enable="false" />
+ -->
+
+
+ <!--
+ StreamWriterBufferedDataLost (BrianGru)
+
+ DESCRIPTION:
+ Intended to detect when users write data to a StreamWriter but
+ don't flush or close the StreamWriter. That data is then lost,
+ because StreamWriter cannot reliably write data to the underlying
+ Stream from its finalizer. Users should use a using block when
+ possible to ensure they always close the StreamWriter.
+
+ Poorly written code:
+ void Foo() {
+ StreamWriter sw = new StreamWriter("file.txt");
+ sw.WriteLine("Data");
+ // Forgot to close the StreamWriter.
+ }
+
+ This MDA was implemented by adding a finalizer to StreamWriter
+ that looks for data in its buffer. As such, it requires your
+ program to get around to running finalizers before exiting.
+ This should happen in long-running apps automatically over time,
+ but can be forced in short-lived test apps (like the above) by
+ calling GC.Collect then GC.WaitForPendingFinalizers.
+
+ BEHAVIORAL IMPACT:
+ None.
+
+ SCENARIOS:
+ SYMPTOM:
+ User attempts to write to a file, but the last 1K - 4K of data
+ haven't been written to the file. This MDA detects this
+ data loss during finalization of the StreamWriter.
+
+ CAUSE:
+ User did not properly close their StreamWriter, or arrange for
+ it to be flushed.
+
+ CORRECTION:
+ Use the using statement in C# & VB. In managed C++, use a
+ try/finally to call Dispose.
+
+ void Foo() {
+ using(StreamWriter sw = new StreamWriter("file.txt")) {
+ sw.WriteLine("Data");
+ }
+ }
+
+ Or users can use the long form, expanding out the using clause:
+
+ void Foo() {
+ StreamWriter sw;
+ try {
+ sw = new StreamWriter("file.txt"));
+ sw.WriteLine("Data");
+ }
+ finally {
+ if (sw != null)
+ sw.Close();
+ }
+ }
+
+ If neither of these solutions can be used (say, if you have a
+ StreamWriter stored in a static variable and thus you cannot
+ easily run code at the end of its lifetime), then calling Flush
+ on the StreamWriter after its last use or setting its AutoFlush
+ property to true before its first use will be sufficient.
+ Here's an example:
+
+ internal static class Foo {
+ private static StreamWriter _log;
+
+ static Foo() { // Static class constructor
+ StreamWriter sw = new StreamWriter("log.txt");
+ sw.AutoFlush = true;
+ // Now publish the StreamWriter for other threads.
+ _log = sw;
+ }
+ }
+
+ OUTPUT:
+ An XML message, indicating this violation occurred. To the
+ effect of "You lost data because you didn't close your
+ StreamWriter." It may include a file name and a stack trace
+ showing where the StreamWriter was allocated, to help track
+ down incorrect code.
+ -->
+ <streamWriterBufferedDataLost enable="true" captureAllocatedCallStack="false"/>
+
+
+ <!--
+ VirtualCERCall (rudim)
+
+ This is just a warning. It indicates that a callsite within a CER call graph
+ refers to a virtual target (i.e. a virtual call to a non-final virtual method
+ or a call via an interface). The runtime cannot predict the destination method
+ of these calls from IL and metadata analysis alone (which is all we have), so
+ we won't descend into that call tree and prepare it as part of the CER graph (or
+ automatically block thread aborts in that subtree either). So this warns of cases
+ where a CER might need to be extended manually via explicit calls to PrepareMethod()
+ (once the additional information required to compute the call target is known at
+ runtime).
+
+ Symptoms of a problem reported via this probe are pretty much the same as above. The
+ callsite can experience a failure from the runtime if the target wasn't explicitly
+ prepared by another means (e.g. PrepareMethod) and it's not defined whether or not
+ the code to be run meets any reliability contracts since the runtime couldn't scan
+ ahead to tell. If the eventual target wasn't hardened for deterministic operation
+ then it could surprise the author of the CER as above.
+
+ -->
+ <virtualCERCall enable="false" />
+
+ </assistants>
+</mdaConfig>
+
+
+
diff --git a/src/vm/mdaassistants.cpp b/src/vm/mdaassistants.cpp
new file mode 100644
index 0000000000..38c38d89b0
--- /dev/null
+++ b/src/vm/mdaassistants.cpp
@@ -0,0 +1,2351 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "common.h"
+#ifdef MDA_SUPPORTED
+#include "mda.h"
+#include "mdaassistants.h"
+#include "field.h"
+#include "dllimport.h"
+#ifdef FEATURE_COMINTEROP
+#include "runtimecallablewrapper.h"
+#include "comcallablewrapper.h"
+#include "comcache.h"
+#include "comtoclrcall.h"
+#include "mlinfo.h"
+#endif
+#include "sigformat.h"
+#include "fieldmarshaler.h"
+#include "dllimportcallback.h"
+#include "dbginterface.h"
+#include "finalizerthread.h"
+
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+#include "olecontexthelpers.h"
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+#ifdef MDA_SUPPORTED
+////
+//// Mda Assistants
+////
+
+
+// Why is ANYTHING in here marked SO_TOLERANT?? Presumably some of them are called from managed code????
+
+
+//
+// MdaFramework
+//
+void MdaFramework::DumpDiagnostics()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ ManagedDebuggingAssistants* pMda = g_mdaStaticHeap.m_pMda;
+
+#ifdef _DEBUG
+ if (m_dumpSchemaSchema)
+ {
+ MdaXmlElement* pXmlSchemaSchema = pMda->m_pSchemaSchema->ToXml(pMda->m_pMdaXmlIndustry);
+// MdaXmlMessage::SendMessage(pXmlSchemaSchema, TRUE, pMda->m_pSchemaSchema);
+ }
+
+ if (m_dumpAssistantSchema)
+ {
+ MdaXmlElement* pXmlAssistantMsgSchema = pMda->m_pAssistantMsgSchema->ToXml(pMda->m_pMdaXmlIndustry);
+// MdaXmlMessage::SendMessage(pXmlAssistantMsgSchema, TRUE, pMda->m_pSchemaSchema);
+ }
+
+ if (m_dumpAssistantMsgSchema)
+ {
+ MdaXmlElement* pXmlAssistantSchema = pMda->m_pAssistantSchema->ToXml(pMda->m_pMdaXmlIndustry);
+// MdaXmlMessage::SendMessage(pXmlAssistantSchema, TRUE, pMda->m_pSchemaSchema);
+ }
+#endif
+}
+
+#ifdef _DEBUG
+extern BOOL g_bMdaDisableAsserts;
+#endif
+
+void MdaFramework::Initialize(MdaXmlElement* pXmlInput)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ ManagedDebuggingAssistants* pMda = g_mdaStaticHeap.m_pMda;
+ g_bMdaDisableAsserts = pXmlInput->GetAttributeValueAsBool(MdaAttrDecl(DisableAsserts));
+ MdaXmlElement* pXmlDiagnostics = pXmlInput->GetChild(MdaElemDecl(Diagnostics));
+
+ if (pXmlDiagnostics)
+ {
+ m_dumpSchemaSchema = pXmlDiagnostics->GetAttributeValueAsBool(MdaAttrDecl(DumpSchemaSchema), FALSE);
+ m_dumpAssistantSchema = pXmlDiagnostics->GetAttributeValueAsBool(MdaAttrDecl(DumpAssistantSchema), FALSE);
+ m_dumpAssistantMsgSchema = pXmlDiagnostics->GetAttributeValueAsBool(MdaAttrDecl(DumpAssistantMsgSchema), FALSE);
+ }
+#endif
+}
+
+//
+// MdaGcUnmanagedToManaged
+//
+void MdaGcUnmanagedToManaged::TriggerGC()
+{
+ WRAPPER_NO_CONTRACT;
+
+ TriggerGCForMDAInternal();
+}
+
+
+//
+// MdaGcManagedToUnmanaged
+//
+void MdaGcManagedToUnmanaged::TriggerGC()
+{
+ WRAPPER_NO_CONTRACT;
+
+ TriggerGCForMDAInternal();
+}
+
+void TriggerGCForMDAInternal()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), return);
+
+ EX_TRY
+ {
+ GCHeap::GetGCHeap()->GarbageCollect();
+
+#ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+ //
+ // It is very dangerous to wait for finalizer thread here if we are inside a wait
+ // operation, as the wait operation might call into interop which calls this MDA
+ // and call into FinalizerThreadWait. In this case, we might run into infinite recursion:
+ // SynchronizationContext.Wait -> P/Invoke -> WaitForPendingFinalizer ->
+ // SynchronizationContext.Wait ....
+ //
+ // So, if we are inside a SyncContext.Wait, don't call out to FinalizerThreadWait
+ //
+ if (!GetThread()->HasThreadStateNC(Thread::TSNC_InsideSyncContextWait))
+#endif // FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+ // It is possible that user code run as part of finalization will wait for this thread.
+ // To avoid deadlocks, we limit the wait time to 10 seconds (an arbitrary number).
+ FinalizerThread::FinalizerThreadWait(10 * 1000);
+ }
+ EX_CATCH
+ {
+ // Caller cannot take exceptions.
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ END_SO_INTOLERANT_CODE;
+}
+
+//
+// MdaCallbackOnCollectedDelegate
+//
+/*
+MdaCallbackOnCollectedDelegate::~MdaCallbackOnCollectedDelegate()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (m_pList && m_size)
+ {
+ for (int i=0; i < m_size; i++)
+ ReplaceEntry(i, NULL);
+
+ delete[] m_pList;
+ }
+}
+*/
+
+void MdaCallbackOnCollectedDelegate::ReportViolation(MethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+ MdaXmlElement* pDelegate = pXml->AddChild(MdaElemDecl(Delegate));
+ StackSString delegateName;
+
+ if(pMD)
+ {
+ AsMdaAssistant()->OutputMethodDesc(pMD, pDelegate);
+ AsMdaAssistant()->ToString(delegateName, pMD);
+ }
+
+ msg.SendMessagef(MDARC_CALLBACK_ON_COLLECTED_DELEGATE, delegateName.GetUnicode());
+}
+
+void MdaCallbackOnCollectedDelegate::AddToList(UMEntryThunk* pEntryThunk)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_INTOLERANT;
+ PRECONDITION(CheckPointer(pEntryThunk));
+ }
+ CONTRACTL_END;
+
+ // Get an index to use.
+ ULONG oldIndex = m_iIndex;
+ ULONG newIndex = oldIndex + 1;
+ if (newIndex >= (ULONG)m_size)
+ newIndex = 0;
+
+ while ((ULONG)FastInterlockCompareExchange((LONG*)&m_iIndex, newIndex, oldIndex) != oldIndex)
+ {
+ oldIndex = m_iIndex;
+ newIndex = oldIndex + 1;
+ if (newIndex >= (ULONG)m_size)
+ newIndex = 0;
+ }
+
+ // We successfully incremented the index and can use the oldIndex value as our entry.
+ ReplaceEntry(oldIndex, pEntryThunk);
+}
+
+void MdaCallbackOnCollectedDelegate::ReplaceEntry(int index, UMEntryThunk* pET)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_INTOLERANT;
+ PRECONDITION((index >= 0) && (index < m_size));
+ PRECONDITION(CheckPointer(m_pList));
+ }
+ CONTRACTL_END;
+
+ if ((m_pList) && (m_size > index) && (index >= 0))
+ {
+ UMEntryThunk* pETTemp = m_pList[index];
+ while (FastInterlockCompareExchangePointer((LPVOID*)&m_pList[index], (LPVOID)pET, (LPVOID)pETTemp) != (LPVOID)pETTemp)
+ {
+ pETTemp = m_pList[index];
+ }
+
+ if (NULL != pETTemp)
+ {
+ pETTemp->Terminate();
+ }
+ }
+}
+
+#ifdef FEATURE_COMINTEROP
+
+void MdaInvalidMemberDeclaration::ReportViolation(ComCallMethodDesc *pCMD, OBJECTREF *pExceptionObj)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ EX_TRY
+ {
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ TypeHandle th;
+ StackSString strMemberName;
+ StackSString strTypeName;
+ StackSString strMessage;
+
+ GetExceptionMessage(*pExceptionObj, strMessage);
+
+ if (pCMD->IsFieldCall())
+ {
+ FieldDesc *pFD = pCMD->GetFieldDesc();
+
+ th = pFD->GetFieldTypeHandleThrowing();
+ strMemberName.SetUTF8(pFD->GetName());
+ AsMdaAssistant()->OutputFieldDesc(pFD, pXml->AddChild(MdaElemDecl(Field)));
+ }
+ else
+ {
+ MethodDesc *pMD = pCMD->GetCallMethodDesc();
+
+ th = TypeHandle(pMD->GetMethodTable());
+ strMemberName.SetUTF8(pMD->GetName());
+ AsMdaAssistant()->OutputMethodDesc(pMD, pXml->AddChild(MdaElemDecl(Method)));
+ }
+
+ th.GetName(strTypeName);
+
+ AsMdaAssistant()->OutputTypeHandle(th, pXml->AddChild(MdaElemDecl(Type)));
+ AsMdaAssistant()->OutputException(pExceptionObj, pXml->AddChild(MdaElemDecl(Exception)));
+
+ msg.SendMessagef(MDARC_INVALID_MEMBER_DECLARATION,
+ strMemberName.GetUnicode(), strTypeName.GetUnicode(), strMessage.GetUnicode());
+ }
+ EX_CATCH
+ {
+ // Caller cannot take exceptions.
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+#endif //FEATURE_COMINTEROP
+
+
+//
+// MdaExceptionSwallowedOnCallFromCom
+//
+void MdaExceptionSwallowedOnCallFromCom::ReportViolation(MethodDesc *pMD, OBJECTREF *pExceptionObj)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ EX_TRY
+ {
+ StackSString strMessage;
+ StackSString strTypeName;
+ StackSString strMemberName;
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ TypeHandle th = TypeHandle(pMD->GetMethodTable());
+
+ GetExceptionMessage(*pExceptionObj, strMessage);
+ th.GetName(strTypeName);
+ strMemberName.SetUTF8(pMD->GetName());
+
+ AsMdaAssistant()->OutputMethodDesc(pMD, pXml->AddChild(MdaElemDecl(Method)));
+ AsMdaAssistant()->OutputTypeHandle(th, pXml->AddChild(MdaElemDecl(Type)));
+ AsMdaAssistant()->OutputException(pExceptionObj, pXml->AddChild(MdaElemDecl(Exception)));
+
+ msg.SendMessagef(MDARC_EXCEPTION_SWALLOWED_COM_TO_CLR,
+ strMemberName.GetUnicode(), strTypeName.GetUnicode(), strMessage.GetUnicode());
+ }
+ EX_CATCH
+ {
+ // Caller cannot take exceptions.
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+
+//
+// MdaInvalidVariant
+//
+void MdaInvalidVariant::ReportViolation()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ msg.SendMessagef(MDARC_INVALID_VARIANT);
+}
+
+
+//
+// MdaInvalidIUnknown
+//
+void MdaInvalidIUnknown::ReportViolation()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ msg.SendMessagef(MDARC_INVALID_IUNKNOWN);
+}
+
+
+//
+// MdaContextSwitchDeadlock
+//
+void MdaContextSwitchDeadlock::ReportDeadlock(LPVOID Origin, LPVOID Destination)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (g_fEEShutDown == 0)
+ {
+ EX_TRY
+ {
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ msg.SendMessagef(MDARC_CONTEXT_SWITCH_DEADLOCK, Origin, Destination);
+ }
+ EX_CATCH
+ {
+ // Caller cannot take exceptions.
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+}
+
+
+//
+// MdaRaceOnRCWCleanup
+//
+void MdaRaceOnRCWCleanup::ReportViolation()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ msg.SendMessagef(MDARC_RCW_CLEANUP_RACE);
+}
+
+
+//
+// MdaFailedQI
+//
+void MdaFailedQI::ReportAdditionalInfo(HRESULT hr, RCW* pRCW, GUID iid, MethodTable* pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+ TypeHandle th(pMT);
+
+ SafeComHolder<IUnknown> pInnerUnk = pRCW->GetIUnknown();
+
+ // We are interested in the case where the QI fails because of wrong context.
+ if (!pRCW->IsFreeThreaded() && GetCurrentCtxCookie() != pRCW->GetWrapperCtxCookie())
+ {
+ // Try to change context and perform the QI in the new context again.
+ MdaFailedQIAssistantCallbackData data;
+
+ data.pWrapper = pRCW;
+ data.iid = iid;
+
+ pRCW->EnterContext(MdaFailedQIAssistantCallback, &data);
+ if (data.fSuccess)
+ {
+ // QI succeeds in the other context, i.e. the original QI fails because of wrong context.
+ pXml = AsMdaAssistant()->OutputTypeHandle(th, pXml->AddChild(MdaElemDecl(Type)));
+
+ // Stringize IID
+ WCHAR strNativeItfIID[39];
+ StringFromGUID2(iid, strNativeItfIID, sizeof(strNativeItfIID) / sizeof(WCHAR));
+
+ // Map HRESULT to a message
+ StackSString sszHR2Description;
+ GetHRMsg(hr, sszHR2Description);
+
+ // Format the HRESULT as a string
+ StackSString sszHR2Hex;
+ sszHR2Hex.Printf("%.8x", hr);
+
+ StackSString sszTypeName;
+ th.GetName(sszTypeName);
+
+ msg.SendMessagef(MDARC_FAILED_QI, sszTypeName.GetUnicode(), strNativeItfIID, sszHR2Hex.GetUnicode(), sszHR2Description.GetUnicode());
+ }
+ }
+ else if (hr == E_NOINTERFACE)
+ {
+
+ // BUG: You'd have to check the registry to ensure that the proxy stub it's not actually there as opposed to the
+ // COM object QI simply returning a failure code.
+
+ /*
+ // Check if pInnerUnk is actually pointing to a proxy, i.e. that it is pointing to an address
+ // within the loaded ole32.dll image. Note that WszGetModuleHandle does not increment the
+ // ref count.
+ HINSTANCE hModOle32 = WszGetModuleHandle(OLE32DLL);
+ if (hModOle32 && IsIPInModule(hModOle32, (BYTE *)(*(BYTE **)(IUnknown*)pInnerUnk)))
+ {
+ pXml = AsMdaAssistant()->OutputTypeHandle(th, pXml->AddChild(MdaElemDecl(Type)));
+
+ WCHAR strGuid[40];
+ GuidToLPWSTR(iid, strGuid, 40);
+ msg.SendMessagef(MDARC_FAILED_QI, strGuid);
+ }
+ */
+ }
+}
+
+HRESULT MdaFailedQIAssistantCallback(LPVOID pData)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pData));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_FAIL;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ SafeComHolder<IUnknown> pDummyUnk;
+
+ MdaFailedQIAssistantCallbackData *pCallbackData = (MdaFailedQIAssistantCallbackData *)pData;
+
+ // Initialize the fSuccess flag to false until we know for a fact the QI will succeed.
+ pCallbackData->fSuccess = FALSE;
+
+ // QI for the requested interface.
+ hr = pCallbackData->pWrapper->SafeQueryInterfaceRemoteAware(pCallbackData->iid, &pDummyUnk);
+
+ // If the QI call succeded then set the fSuccess flag to true.
+ if (SUCCEEDED(hr))
+ pCallbackData->fSuccess = TRUE;
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return S_OK; // Need to return S_OK so that the assert in CtxEntry::EnterContext() won't fire.
+}
+
+//
+// MdaDisconnectedContext
+//
+void MdaDisconnectedContext::ReportViolationDisconnected(LPVOID context, HRESULT hr)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (g_fEEShutDown == 0)
+ {
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ StackSString strHRMsg;
+ GetHRMsg(hr, strHRMsg);
+
+ msg.SendMessagef(MDARC_DISCONNECTED_CONTEXT_1, context, strHRMsg.GetUnicode());
+ }
+}
+
+void MdaDisconnectedContext::ReportViolationCleanup(LPVOID context1, LPVOID context2, HRESULT hr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (g_fEEShutDown == 0)
+ {
+ EX_TRY
+ {
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ StackSString strHRMsg;
+ GetHRMsg(hr, strHRMsg);
+
+ msg.SendMessagef(MDARC_DISCONNECTED_CONTEXT_2, context1, strHRMsg.GetUnicode(), context2);
+ }
+ EX_CATCH
+ {
+ // Caller cannot take exceptions.
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+ }
+}
+
+
+//
+// MdaInvalidApartmentStateChange
+//
+void MdaInvalidApartmentStateChange::ReportViolation(Thread* pThread, Thread::ApartmentState newstate, BOOL fAlreadySet)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ EX_TRY
+ {
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ AsMdaAssistant()->OutputThread(pThread, pXml->AddChild(MdaElemDecl(Thread)));
+
+ if (fAlreadySet)
+ {
+ if (newstate == Thread::AS_InSTA)
+ {
+ msg.SendMessagef(MDARC_INVALID_APT_STATE_CHANGE_SET, W("STA"), W("MTA"));
+ }
+ else
+ {
+ msg.SendMessagef(MDARC_INVALID_APT_STATE_CHANGE_SET, W("MTA"), W("STA"));
+ }
+ }
+ else
+ {
+ if (newstate == Thread::AS_InSTA)
+ {
+ msg.SendMessagef(MDARC_INVALID_APT_STATE_CHANGE_NOTSET, W("STA"), W("MTA"));
+ }
+ else
+ {
+ msg.SendMessagef(MDARC_INVALID_APT_STATE_CHANGE_NOTSET, W("MTA"), W("STA"));
+ }
+ }
+ }
+ EX_CATCH
+ {
+ // Caller cannot take exceptions.
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+//
+// MdaDllMainReturnsFalse
+//
+void MdaDllMainReturnsFalse::ReportError()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), return);
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ msg.SendMessagef(MDARC_DLLMAIN_RETURNS_FALSE);
+
+ END_SO_INTOLERANT_CODE;
+}
+
+//
+// MdaOverlappedFreeError
+//
+void MdaOverlappedFreeError::ReportError(LPVOID pOverlapped)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ msg.SendMessagef(MDARC_INVALID_OVERLAPPED_FREE,
+ pOverlapped);
+}
+
+//
+// MdaInvalidOverlappedToPinvoke
+//
+
+// NOTE: the overlapped pointer needs to be named "overlapped".
+// It is embedded in the (Args) and (ArgsUsed) sections.
+
+
+#define CREATE_WRAPPER_FUNCTION(DllName, Return, Flags, Name, Args, ArgsUsed) \
+ DllName##_##Name,
+enum {
+#include "invalidoverlappedwrappers.h"
+};
+#undef CREATE_WRAPPER_FUNCTION
+
+#define CREATE_WRAPPER_FUNCTION(DllName, Return, Flags, Name, Args, ArgsUsed) \
+Return Flags Mda_##Name Args \
+{ \
+ CONTRACTL \
+ { \
+ THROWS; \
+ GC_TRIGGERS; \
+ SO_TOLERANT; \
+ MODE_ANY; \
+ } \
+ CONTRACTL_END; \
+ Return (Flags *old##Name) Args; \
+ MdaInvalidOverlappedToPinvoke *pOverlapCheck = MDA_GET_ASSISTANT(InvalidOverlappedToPinvoke); \
+ _ASSERTE(pOverlapCheck); \
+ *(PVOID *)&(old##Name) = pOverlapCheck->CheckOverlappedPointer(DllName##_##Name, (LPVOID) overlapped); \
+ return old##Name ArgsUsed; \
+}
+#include "invalidoverlappedwrappers.h"
+#undef CREATE_WRAPPER_FUNCTION
+
+#define CREATE_WRAPPER_FUNCTION(DllName, Return, Flags, Name, Args, ArgsUsed) \
+ { L#DllName W(".DLL"), L#Name, Mda_##Name, NULL, NULL },
+static MdaInvalidOverlappedToPinvoke::pinvoke_entry PInvokeTable[] = {
+#include "invalidoverlappedwrappers.h"
+};
+#undef CREATE_WRAPPER_FUNCTION
+
+void MdaInvalidOverlappedToPinvoke::Initialize(MdaXmlElement* pXmlInput)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+// TODO: CONTRACT_VIOLATION(SOToleranceViolation);
+
+ m_entries = PInvokeTable;
+ m_entryCount = sizeof(PInvokeTable) / sizeof(pinvoke_entry);
+ m_bJustMyCode = pXmlInput->GetAttributeValueAsBool(MdaAttrDecl(JustMyCode));
+}
+
+BOOL MdaInvalidOverlappedToPinvoke::InitializeModuleFunctions(HINSTANCE hmod)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // For every entry where m_moduleName matches moduleName, fill in m_hmod with hmod
+ // and fill in the m_realFunction pointer.
+
+ BOOL bFoundSomething = FALSE;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), return FALSE);
+
+ SString moduleNameFullPath, moduleName;
+ ClrGetModuleFileNameNoThrow(hmod,moduleNameFullPath);
+ // Strip any path info
+ SString::CIterator iM = moduleNameFullPath.End();
+ if (moduleNameFullPath.FindBack(iM, W('\\')))
+ {
+ iM++;
+ moduleName.Set(moduleNameFullPath, iM, moduleNameFullPath.End());
+ }
+
+ for (UINT i=0; i<m_entryCount; i++)
+ {
+ if (SString::_wcsicmp(m_entries[i].m_moduleName, moduleName.GetUnicode()) == 0)
+ {
+ if (m_entries[i].m_hmod == NULL)
+ {
+ SString moduleNameForLookup(m_entries[i].m_functionName);
+ StackScratchBuffer ansiVersion;
+ m_entries[i].m_realFunction = GetProcAddress(hmod, moduleNameForLookup.GetANSI(ansiVersion));
+ m_entries[i].m_hmod = hmod;
+ }
+ bFoundSomething = TRUE;
+ }
+ }
+
+ END_SO_INTOLERANT_CODE;
+
+ return bFoundSomething;
+}
+
+LPVOID MdaInvalidOverlappedToPinvoke::CheckOverlappedPointer(UINT index, LPVOID pOverlapped)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ MdaInvalidOverlappedToPinvoke::pinvoke_entry *pEntry = m_entries + index;
+
+ // pEntry should always be non-NULL, because we got the address of pvMdaFunction
+ // from the entries table in the first place.
+ _ASSERTE(pEntry);
+ if (pEntry == NULL)
+ {
+ return NULL;
+ }
+
+ // Is the overlapped pointer in the gc heap?
+ if (pOverlapped != NULL)
+ {
+ // If a stack overflow occurs, we would just want to continue and
+ // return the function pointer as expected.
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), return pEntry->m_realFunction);
+
+ BOOL fHeapPointer;
+
+ {
+ GCX_COOP();
+ GCHeap *pHeap = GCHeap::GetGCHeap();
+ fHeapPointer = pHeap->IsHeapPointer(pOverlapped);
+ }
+
+ if (!fHeapPointer)
+ {
+ // Output a message
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ msg.SendMessagef(MDARC_INVALID_OVERLAPPED_TO_PINVOKE,
+ pOverlapped,
+ pEntry->m_functionName,
+ pEntry->m_moduleName);
+ }
+
+ END_SO_INTOLERANT_CODE;
+ }
+
+ return pEntry->m_realFunction;
+}
+
+// We want to hook the functions where it is in the user's code only, unless
+// the attribute JustMyCode is set to false. In that case, we want all
+// occurances.
+BOOL MdaInvalidOverlappedToPinvoke::ShouldHook(MethodDesc *pMD)
+{
+ LIMITED_METHOD_CONTRACT;
+ return (m_bJustMyCode ? IsJustMyCode(pMD) : TRUE);
+}
+
+LPVOID MdaInvalidOverlappedToPinvoke::Register(HINSTANCE hmod,LPVOID pvTarget)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // Quick lookup - do we have a matching target?
+ // walk our entries, looking for a match.
+ BOOL bNullModules = FALSE;
+ BOOL bSeenThisModule = FALSE;
+
+ for (UINT i=0; i<m_entryCount; i++)
+ {
+ MdaInvalidOverlappedToPinvoke::pinvoke_entry *pEntry = m_entries + i;
+ if (pvTarget == pEntry->m_realFunction)
+ {
+ return pEntry->m_mdaFunction;
+ }
+
+ bNullModules |= (pEntry->m_hmod == NULL);
+ bSeenThisModule |= (pEntry->m_hmod == hmod);
+ }
+
+ // if we have some NULL targets, do we have a matching hmod?
+ // if so,
+ if (bNullModules && !bSeenThisModule)
+ {
+ if (InitializeModuleFunctions(hmod))
+ {
+ // Search once more
+ for (UINT i=0; i<m_entryCount; i++)
+ {
+ pinvoke_entry *pEntry = m_entries + i;
+ if (pvTarget == pEntry->m_realFunction)
+ {
+ return pEntry->m_mdaFunction;
+ }
+ }
+ }
+ }
+
+ return NULL;
+}
+
+//
+// MdaPInvokeLog
+//
+BOOL MdaPInvokeLog::Filter(SString& sszDllName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ MdaXmlElement* pXmlFilter = m_pXmlInput->GetChild(MdaElemDecl(Filter));
+ if (!pXmlFilter)
+ return TRUE;
+
+ BOOL bFound = FALSE;
+ for (COUNT_T i = 0; i < pXmlFilter->GetChildren().GetCount(); i ++)
+ {
+ if (pXmlFilter->GetChildren()[i]->GetAttribute(MdaAttrDecl(DllName))->GetValueAsCSString()->EqualsCaseInsensitive(sszDllName))
+ {
+ bFound = TRUE;
+ break;
+ }
+ }
+
+ return bFound;
+}
+
+void MdaPInvokeLog::LogPInvoke(NDirectMethodDesc* pMD, HINSTANCE hMod)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ EX_TRY
+ {
+ StackSString sszEntryPoint;
+ sszEntryPoint.SetUTF8(pMD->GetEntrypointName());
+
+ WCHAR szDllFullName[_MAX_PATH], szDrive[_MAX_PATH], szPath[_MAX_PATH], szFileName[_MAX_PATH], szExt[_MAX_PATH];
+ WszGetModuleFileName(hMod, szDllFullName, _MAX_PATH);
+ SplitPath(szDllFullName, szDrive, _MAX_PATH, szPath, _MAX_PATH, szFileName, _MAX_PATH, szExt, _MAX_PATH);
+
+ StackSString sszDllName;
+ sszDllName.Append(szFileName);
+ if (szExt)
+ sszDllName.Append(szExt);
+
+ if (Filter(sszDllName))
+ {
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), FALSE, &pXml);
+
+ MdaXmlElement* pMethod = pXml->AddChild(MdaElemDecl(Method));
+ AsMdaAssistant()->OutputMethodDesc(pMD, pMethod);
+
+ MdaXmlElement* pDllImport = pXml->AddChild(MdaElemDecl(DllImport));
+ pDllImport->AddAttributeSz(MdaAttrDecl(DllName), szDllFullName);
+ pDllImport->AddAttributeSz(MdaAttrDecl(EntryPoint), sszEntryPoint.GetUnicode());
+
+ msg.SendMessage();
+ }
+ }
+ EX_CATCH
+ {
+ // Caller cannot take exceptions.
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+
+#ifdef _TARGET_X86_
+//
+// MdaPInvokeStackImbalance
+//
+void MdaPInvokeStackImbalance::CheckStack(StackImbalanceCookie *pSICookie, DWORD dwPostEsp)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ DWORD dwEspAfterPushedArgs = pSICookie->m_dwSavedEsp;
+ DWORD dwEspBeforePushedArgs = dwEspAfterPushedArgs + pSICookie->m_dwStackArgSize;
+ BOOL bStackImbalance = false;
+
+ // Note: We use relaxed rules here depending on the NetFx40_PInvokeStackResilience compat switch in order to mimic 2.0 behavior.
+ switch (pSICookie->m_callConv & pmCallConvMask)
+ {
+ // Caller cleans stack
+ case pmCallConvCdecl:
+ if (dwPostEsp != dwEspAfterPushedArgs)
+ {
+ if (dwPostEsp != dwEspBeforePushedArgs)
+ {
+ bStackImbalance = true;
+ }
+ else
+ {
+ // If NetFx40_PInvokeStackResilience is on, ignore the case where we see that the callee cleaned the stack.
+ BOOL fPreV4Method = pSICookie->m_pMD->GetModule()->IsPreV4Assembly();
+ if (!g_pConfig->PInvokeRestoreEsp(fPreV4Method))
+ bStackImbalance = true;
+ }
+ }
+ break;
+
+ // Callee cleans stack
+ case pmCallConvThiscall:
+ case pmCallConvWinapi:
+ case pmCallConvStdcall:
+ if (dwPostEsp != dwEspBeforePushedArgs)
+ {
+ if (dwPostEsp != dwEspAfterPushedArgs)
+ {
+ bStackImbalance = true;
+ }
+ else
+ {
+ // If NetFx40_PInvokeStackResilience is on, ignore the case where we see that the callee did not clean the stack
+ BOOL fPreV4Method = pSICookie->m_pMD->GetModule()->IsPreV4Assembly();
+ if (!g_pConfig->PInvokeRestoreEsp(fPreV4Method))
+ bStackImbalance = true;
+ }
+ }
+ break;
+
+ // Unsupported calling convention
+ case pmCallConvFastcall:
+ default:
+ _ASSERTE(!"Unsupported calling convention");
+ break;
+ }
+
+ if (!bStackImbalance)
+ return;
+
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+ MdaXmlElement* pMethod = pXml->AddChild(MdaElemDecl(Method));
+ AsMdaAssistant()->OutputMethodDesc(pSICookie->m_pMD, pMethod);
+
+ StackSString sszMethodName;
+ msg.SendMessagef(MDARC_PINVOKE_SIGNATURE_MISMATCH, AsMdaAssistant()->ToString(sszMethodName, pSICookie->m_pMD).GetUnicode());
+
+ END_SO_INTOLERANT_CODE;
+}
+#endif
+
+
+//
+// MdaJitCompilationStart
+//
+void MdaJitCompilationStart::Initialize(MdaXmlElement* pXmlInput)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ m_bBreak = pXmlInput->GetAttributeValueAsBool(MdaAttrDecl(Break));
+ MdaXmlElement* pXmlMethodFilter = pXmlInput->GetChild(MdaElemDecl(Methods));
+ m_pMethodFilter = NULL;
+
+ if (pXmlMethodFilter)
+ {
+ m_pMethodFilter = new MdaQuery::CompiledQueries();
+ MdaQuery::Compile(pXmlMethodFilter, m_pMethodFilter);
+ }
+}
+
+void MdaJitCompilationStart::NowCompiling(MethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (m_pMethodFilter && !m_pMethodFilter->Test(pMD))
+ return;
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), m_bBreak, &pXml);
+
+ MdaXmlElement* pMethod = pXml->AddChild(MdaElemDecl(Method));
+ AsMdaAssistant()->OutputMethodDesc(pMD, pMethod);
+
+ msg.SendMessage();
+}
+
+//
+// MdaLoadFromContext
+//
+void MdaLoadFromContext::NowLoading(IAssembly** ppIAssembly, StackCrawlMark *pCallerStackMark)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (ppIAssembly && *ppIAssembly) {
+
+ // Send an MDA if this assembly was loaded in the LoadFrom context
+ if ((*ppIAssembly)->GetFusionLoadContext() == LOADCTX_TYPE_LOADFROM) {
+ // Apply MDA filtering
+ if (g_pDebugInterface && pCallerStackMark && ManagedDebuggingAssistants::IsManagedDebuggerAttached()) {
+ MethodDesc *pMethodDesc = NULL;
+ {
+ GCX_COOP();
+ pMethodDesc = SystemDomain::GetCallersMethod(pCallerStackMark, NULL);
+ }
+ if (pMethodDesc && !g_pDebugInterface->IsJMCMethod(pMethodDesc->GetModule(), pMethodDesc->GetMemberDef()))
+ return;
+ }
+
+ MdaXmlElement *pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ MdaXmlElement *pXmlAssembly = pXml->AddChild(MdaElemDecl(AssemblyInfo));
+
+ StackSString sszAssemblyName;
+ StackSString sszCodeBase;
+ SafeComHolder<IAssemblyName> pNameDef;
+
+ if (FAILED((*ppIAssembly)->GetAssemblyNameDef(&pNameDef))) {
+ return;
+ }
+
+ if ((!FusionBind::GetAssemblyNameStringProperty(pNameDef, ASM_NAME_NAME, sszAssemblyName)) ||
+ (!FusionBind::GetAssemblyNameStringProperty(pNameDef, ASM_NAME_CODEBASE_URL, sszCodeBase))) {
+ return;
+ }
+
+ pXmlAssembly->AddAttributeSz(MdaAttrDecl(DisplayName), sszAssemblyName.GetUnicode());
+ pXmlAssembly->AddAttributeSz(MdaAttrDecl(CodeBase), sszCodeBase.GetUnicode());
+
+ msg.SendMessagef(MDARC_LOAD_FROM_CONTEXT, sszAssemblyName.GetUnicode(), sszCodeBase.GetUnicode());
+ }
+ }
+}
+
+const LPCWSTR ContextIdName[] =
+{
+ W("Load"),
+ W("LoadFrom"),
+ W("Anonymous")
+};
+
+//
+// MdaBindingFailure
+//
+void MdaBindingFailure::BindFailed(AssemblySpec *pSpec, OBJECTREF *pExceptionObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ MdaXmlElement *pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ MdaXmlElement *pXmlAssembly = pXml->AddChild(MdaElemDecl(AssemblyInfo));
+
+ DWORD dwAppDomainId;
+ SString sszAssemblyName;
+ SString sszCodeBase;
+ SString sszMessage;
+ int iBindingContext;
+ HRESULT hr;
+
+ // determine AppDomain ID
+ AppDomain *appDomain = pSpec->GetAppDomain();
+ if (appDomain) {
+ dwAppDomainId = appDomain->GetId().m_dwId;
+ } else {
+ dwAppDomainId = 0;
+ }
+ pXmlAssembly->AddAttributeInt(MdaAttrDecl(AppDomainId), dwAppDomainId);
+
+ // determine Assembly display name
+ LPCSTR assemblyName = pSpec->GetName();
+ if (assemblyName && assemblyName[0]) {
+ sszAssemblyName.SetASCII(assemblyName);
+ }
+ pXmlAssembly->AddAttributeSz(MdaAttrDecl(DisplayName), sszAssemblyName.GetUnicode());
+
+ // determine Assembly code base
+ if (pSpec->GetCodeBase() && pSpec->GetCodeBase()[0]) {
+ sszCodeBase.Set(pSpec->GetCodeBase());
+ }
+ pXmlAssembly->AddAttributeSz(MdaAttrDecl(CodeBase), sszCodeBase.GetUnicode());
+
+ // retrieve the exception message.
+ GetExceptionMessage(*pExceptionObj, sszMessage);
+
+ // determine failing HRESULT
+ hr = GetExceptionHResult(*pExceptionObj);
+ pXmlAssembly->AddAttributeInt(MdaAttrDecl(HResult), hr);
+
+ // determine binding context Assembly would have been loaded in (based on parent)
+ IAssembly* pParentAssembly = pSpec->GetParentIAssembly();
+ if (pParentAssembly) {
+ iBindingContext = pParentAssembly->GetFusionLoadContext();
+ } else {
+
+ // if the parent hasn't been set but the code base has, it's in LoadFrom
+ iBindingContext = LOADCTX_TYPE_LOADFROM;
+ }
+ pXmlAssembly->AddAttributeInt(MdaAttrDecl(BindingContextId), iBindingContext);
+
+ // Make sure the binding context ID isn't larger then our ID to name lookup table.
+ _ASSERTE(iBindingContext < COUNTOF(ContextIdName));
+
+ if (sszAssemblyName.IsEmpty())
+ {
+ _ASSERTE(!sszCodeBase.IsEmpty());
+ msg.SendMessagef(MDARC_BINDING_FAILURE_CODEBASE_ONLY, sszCodeBase.GetUnicode(),
+ ContextIdName[iBindingContext], dwAppDomainId, sszMessage.GetUnicode());
+ }
+ else if (sszCodeBase.IsEmpty())
+ {
+ _ASSERTE(!sszAssemblyName.IsEmpty());
+ msg.SendMessagef(MDARC_BINDING_FAILURE_DISPLAYNAME_ONLY, sszAssemblyName.GetUnicode(),
+ ContextIdName[iBindingContext], dwAppDomainId, sszMessage.GetUnicode());
+ }
+ else
+ {
+ msg.SendMessagef(MDARC_BINDING_FAILURE, sszAssemblyName.GetUnicode(), sszCodeBase.GetUnicode(),
+ ContextIdName[iBindingContext], dwAppDomainId, sszMessage.GetUnicode());
+ }
+}
+
+
+//
+// MdaReflection
+//
+FCIMPL0(void, MdaManagedSupport::MemberInfoCacheCreation)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+ {
+ MdaMemberInfoCacheCreation* pMda = MDA_GET_ASSISTANT(MemberInfoCacheCreation);
+ if (pMda)
+ {
+ pMda->MemberInfoCacheCreation();
+ }
+ }
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+void MdaMemberInfoCacheCreation::MemberInfoCacheCreation()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ msg.SendMessage(MDARC_REFLECTION_PERFORMANCE_MEMBERINFOCACHECREATION);
+}
+
+
+FCIMPL0(FC_BOOL_RET, MdaManagedSupport::IsStreamWriterBufferedDataLostEnabled)
+{
+ FCALL_CONTRACT;
+
+ // To see if it's enabled, allocate one then throw it away.
+ MdaStreamWriterBufferedDataLost* pMda = MDA_GET_ASSISTANT(StreamWriterBufferedDataLost);
+
+ FC_RETURN_BOOL(pMda != NULL);
+}
+FCIMPLEND
+
+FCIMPL0(FC_BOOL_RET, MdaManagedSupport::IsStreamWriterBufferedDataLostCaptureAllocatedCallStack)
+{
+ FCALL_CONTRACT;
+
+ // To see if it's enabled, allocate one then throw it away.
+ MdaStreamWriterBufferedDataLost* pMda = MDA_GET_ASSISTANT(StreamWriterBufferedDataLost);
+
+ FC_RETURN_BOOL((pMda != NULL) && (pMda->CaptureAllocatedCallStack()));
+}
+FCIMPLEND
+
+FCIMPL1(void, MdaManagedSupport::ReportStreamWriterBufferedDataLost, StringObject * stringRef)
+{
+ FCALL_CONTRACT;
+
+ STRINGREF str(stringRef);
+ MdaStreamWriterBufferedDataLost* pMda = MDA_GET_ASSISTANT(StreamWriterBufferedDataLost);
+ if (pMda)
+ {
+ HELPER_METHOD_FRAME_BEGIN_1(str);
+ StackSString message(str->GetBuffer());
+ pMda->ReportError(message);
+ HELPER_METHOD_FRAME_END();
+ }
+}
+FCIMPLEND
+
+FCIMPL0(FC_BOOL_RET, MdaManagedSupport::IsInvalidGCHandleCookieProbeEnabled)
+{
+ FCALL_CONTRACT;
+
+ // To see if it's enabled, allocate one then throw it away.
+ MdaInvalidGCHandleCookie* pMda = MDA_GET_ASSISTANT(InvalidGCHandleCookie);
+
+ FC_RETURN_BOOL(pMda != NULL);
+}
+FCIMPLEND
+
+FCIMPL1(void, MdaManagedSupport::FireInvalidGCHandleCookieProbe, LPVOID cookie)
+{
+ FCALL_CONTRACT;
+
+ MdaInvalidGCHandleCookie* pMda = MDA_GET_ASSISTANT(InvalidGCHandleCookie);
+ if (pMda)
+ {
+ HELPER_METHOD_FRAME_BEGIN_0();
+ pMda->ReportError(cookie);
+ HELPER_METHOD_FRAME_END();
+ }
+}
+FCIMPLEND
+
+FCIMPL1(void, MdaManagedSupport::ReportErrorSafeHandleRelease, ExceptionObject * exceptionRef)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF exception(exceptionRef);
+ MdaMarshalCleanupError* pMda = MDA_GET_ASSISTANT(MarshalCleanupError);
+ if (pMda)
+ {
+ HELPER_METHOD_FRAME_BEGIN_1(exception);
+ pMda->ReportErrorSafeHandleRelease(&exception);
+ HELPER_METHOD_FRAME_END();
+ }
+}
+FCIMPLEND
+
+void MdaInvalidGCHandleCookie::ReportError(LPVOID cookie)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ msg.SendMessagef(MDARC_INVALID_GCHANDLE_COOKIE, cookie);
+}
+
+void MdaStreamWriterBufferedDataLost::ReportError(SString text)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ msg.SendMessage(text);
+}
+
+
+//
+// MdaNotMarshalable
+//
+void MdaNotMarshalable::ReportViolation()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ msg.SendMessagef(MDARC_NOTMARSHALABLE);
+}
+
+
+//
+// MdaMarshalCleanupError
+//
+void MdaMarshalCleanupError::ReportErrorThreadCulture(OBJECTREF *pExceptionObj)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ EX_TRY
+ {
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ // retrieve the exception message.
+ SString sszMessage;
+ GetExceptionMessage(*pExceptionObj, sszMessage);
+
+ msg.SendMessagef(MDARC_MARSHALCLEANUPERROR_THREADCULTURE, sszMessage.GetUnicode());
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+void MdaMarshalCleanupError::ReportErrorSafeHandleRelease(OBJECTREF *pExceptionObj)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ EX_TRY
+ {
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ // retrieve the exception message.
+ SString sszMessage;
+ GetExceptionMessage(*pExceptionObj, sszMessage);
+
+ msg.SendMessagef(MDARC_MARSHALCLEANUPERROR_SAFEHANDLERELEASE, sszMessage.GetUnicode());
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+void MdaMarshalCleanupError::ReportErrorSafeHandleProp(OBJECTREF *pExceptionObj)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ EX_TRY
+ {
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ // retrieve the exception message.
+ SString sszMessage;
+ GetExceptionMessage(*pExceptionObj, sszMessage);
+
+ msg.SendMessagef(MDARC_MARSHALCLEANUPERROR_SAFEHANDLEPROP, sszMessage.GetUnicode());
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+void MdaMarshalCleanupError::ReportErrorCustomMarshalerCleanup(TypeHandle typeCustomMarshaler, OBJECTREF *pExceptionObj)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ EX_TRY
+ {
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ // retrieve the exception message.
+ SString sszMessage;
+ GetExceptionMessage(*pExceptionObj, sszMessage);
+
+ // Retrieve the type name.
+ StackSString sszType;
+ typeCustomMarshaler.GetName(sszType);
+
+ msg.SendMessagef(MDARC_MARSHALCLEANUPERROR_CUSTOMCLEANUP, sszType.GetUnicode(), sszMessage.GetUnicode());
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+//
+// MdaMarshaling
+//
+void MdaMarshaling::Initialize(MdaXmlElement* pXmlInput)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ m_pMethodFilter = new MdaQuery::CompiledQueries();
+ m_pFieldFilter = new MdaQuery::CompiledQueries();
+
+ MdaXmlElement* pXmlMethodFilter = pXmlInput->GetChild(MdaElemDecl(MethodFilter));
+ if (pXmlMethodFilter)
+ MdaQuery::Compile(pXmlMethodFilter, m_pMethodFilter);
+
+ MdaXmlElement* pXmlFieldFilter = pXmlInput->GetChild(MdaElemDecl(FieldFilter));
+ if (pXmlFieldFilter)
+ MdaQuery::Compile(pXmlFieldFilter, m_pFieldFilter);
+}
+
+void MdaMarshaling::ReportFieldMarshal(FieldMarshaler* pFM)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ PRECONDITION(CheckPointer(pFM));
+ }
+ CONTRACTL_END;
+
+ FieldDesc* pFD = pFM->GetFieldDesc();
+
+ if (!pFD || !m_pFieldFilter->Test(pFD))
+ return;
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), FALSE, &pXml);
+
+ MdaXmlElement* pField = pXml->AddChild(MdaElemDecl(MarshalingField));
+ AsMdaAssistant()->OutputFieldDesc(pFD, pField);
+
+ StackSString sszField;
+ SString managed;
+ SString unmanaged;
+
+ GetManagedSideForField(managed, pFD);
+ GetUnmanagedSideForField(unmanaged, pFM);
+
+ msg.SendMessagef(MDARC_MARSHALING_FIELD, AsMdaAssistant()->ToString(sszField, pFD).GetUnicode(), managed.GetUnicode(), unmanaged.GetUnicode());
+}
+
+
+void MdaMarshaling::GetManagedSideForField(SString& strManagedMarshalType, FieldDesc* pFD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (!CheckForPrimitiveType(pFD->GetFieldType(), strManagedMarshalType))
+ {
+ // The following workaround is added to avoid a recursion caused by calling GetTypeHandle on
+ // the m_value field of the UIntPtr class.
+ LPCUTF8 szNamespace, szClassName;
+ IfFailThrow(pFD->GetMDImport()->GetNameOfTypeDef(pFD->GetApproxEnclosingMethodTable()->GetCl(), &szClassName, &szNamespace));
+
+ if (strcmp(szNamespace, "System") == 0 && strcmp(szClassName, "UIntPtr") == 0)
+ {
+ static LPWSTR strRetVal = W("Void*");
+ strManagedMarshalType.Set(strRetVal);
+ }
+ else
+ {
+ MetaSig fSig(pFD);
+ fSig.NextArgNormalized();
+ TypeHandle th = fSig.GetLastTypeHandleNT();
+ if (th.IsNull())
+ {
+ static const WCHAR strErrorMsg[] = W("<error>");
+ strManagedMarshalType.Set(strErrorMsg);
+ }
+ else
+ {
+ SigFormat sigFmt;
+ sigFmt.AddType(th);
+ UINT iManagedTypeLen = (UINT)strlen(sigFmt.GetCString()) + 1;
+
+ WCHAR* buffer = strManagedMarshalType.OpenUnicodeBuffer(iManagedTypeLen);
+ MultiByteToWideChar(CP_ACP, MB_PRECOMPOSED, sigFmt.GetCString(), -1, buffer, iManagedTypeLen);
+ strManagedMarshalType.CloseBuffer();
+ }
+ }
+ }
+}
+
+void MdaMarshaling::GetUnmanagedSideForField(SString& strUnmanagedMarshalType, FieldMarshaler* pFM)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ NStructFieldTypeToString(pFM, strUnmanagedMarshalType);
+}
+
+
+void MdaMarshaling::GetManagedSideForMethod(SString& strManagedMarshalType, Module* pModule, SigPointer sig, CorElementType elemType)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (!CheckForPrimitiveType(elemType, strManagedMarshalType))
+ {
+ // an empty type context is sufficient: all methods should be non-generic
+ SigTypeContext emptyTypeContext;
+
+ TypeHandle th = sig.GetTypeHandleNT(pModule, &emptyTypeContext);
+ if (th.IsNull())
+ {
+ strManagedMarshalType.Set(W("<error>"));
+ }
+ else
+ {
+ SigFormat sigfmt;
+ sigfmt.AddType(th);
+ UINT iManagedMarshalTypeLength = MultiByteToWideChar( CP_ACP, MB_PRECOMPOSED, sigfmt.GetCString(), -1, NULL, 0);
+
+ WCHAR* str = strManagedMarshalType.OpenUnicodeBuffer(iManagedMarshalTypeLength);
+ MultiByteToWideChar( CP_ACP, MB_PRECOMPOSED, sigfmt.GetCString(), -1, str, iManagedMarshalTypeLength);
+ strManagedMarshalType.CloseBuffer();
+ }
+ }
+}
+
+
+void MdaMarshaling::GetUnmanagedSideForMethod(SString& strNativeMarshalType, MarshalInfo* mi, BOOL fSizeIsSpecified)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ mi->MarshalTypeToString(strNativeMarshalType, fSizeIsSpecified);
+}
+
+BOOL MdaMarshaling::CheckForPrimitiveType(CorElementType elemType, SString& strPrimitiveType)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_INTOLERANT;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ LPWSTR strRetVal;
+
+ switch (elemType)
+ {
+ case ELEMENT_TYPE_VOID:
+ strRetVal = W("Void");
+ break;
+ case ELEMENT_TYPE_BOOLEAN:
+ strRetVal = W("Boolean");
+ break;
+ case ELEMENT_TYPE_I1:
+ strRetVal = W("SByte");
+ break;
+ case ELEMENT_TYPE_U1:
+ strRetVal = W("Byte");
+ break;
+ case ELEMENT_TYPE_I2:
+ strRetVal = W("Int16");
+ break;
+ case ELEMENT_TYPE_U2:
+ strRetVal = W("UInt16");
+ break;
+ case ELEMENT_TYPE_CHAR:
+ strRetVal = W("Char");
+ break;
+ case ELEMENT_TYPE_I:
+ strRetVal = W("IntPtr");
+ break;
+ case ELEMENT_TYPE_U:
+ strRetVal = W("UIntPtr");
+ break;
+ case ELEMENT_TYPE_I4:
+ strRetVal = W("Int32");
+ break;
+ case ELEMENT_TYPE_U4:
+ strRetVal = W("UInt32");
+ break;
+ case ELEMENT_TYPE_I8:
+ strRetVal = W("Int64");
+ break;
+ case ELEMENT_TYPE_U8:
+ strRetVal = W("UInt64");
+ break;
+ case ELEMENT_TYPE_R4:
+ strRetVal = W("Single");
+ break;
+ case ELEMENT_TYPE_R8:
+ strRetVal = W("Double");
+ break;
+ default:
+ return FALSE;
+ }
+
+ strPrimitiveType.Set(strRetVal);
+ return TRUE;
+}
+
+//
+// MdaLoaderLock
+//
+void MdaLoaderLock::ReportViolation(HINSTANCE hInst)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ // Called from SO_TOLERANT CODE
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), return);
+
+ EX_TRY
+ {
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ DWORD cName = 0;
+ WCHAR szName[_MAX_PATH * 2];
+ if (hInst)
+ {
+ cName = _MAX_PATH * 2 - 1;
+ cName = WszGetModuleFileName(hInst, szName, cName);
+ }
+
+ if (cName)
+ {
+ szName[cName] = W('\0');
+ msg.SendMessagef(MDARC_LOADER_LOCK_DLL, szName);
+ }
+ else
+ {
+ msg.SendMessagef(MDARC_LOADER_LOCK);
+ }
+ }
+ EX_CATCH
+ {
+ // Caller cannot take exceptions.
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ END_SO_INTOLERANT_CODE;
+}
+
+
+//
+// MdaReentrancy
+//
+void MdaReentrancy::ReportViolation()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), return);
+
+ EX_TRY
+ {
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ msg.SendMessagef(MDARC_REENTRANCY);
+ }
+ EX_CATCH
+ {
+ // Caller cannot take exceptions.
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ END_SO_INTOLERANT_CODE;
+}
+
+//
+// MdaAsynchronousThreadAbort
+//
+void MdaAsynchronousThreadAbort::ReportViolation(Thread *pCallingThread, Thread *pAbortedThread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ EX_TRY
+ {
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ AsMdaAssistant()->OutputThread(pCallingThread, pXml->AddChild(MdaElemDecl(CallingThread)));
+ AsMdaAssistant()->OutputThread(pAbortedThread, pXml->AddChild(MdaElemDecl(AbortedThread)));
+
+ msg.SendMessagef(MDARC_ASYNCHRONOUS_THREADABORT, pCallingThread->GetOSThreadId(), pAbortedThread->GetOSThreadId());
+ }
+ EX_CATCH
+ {
+ // Caller cannot take exceptions.
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+
+//
+// MdaAsynchronousThreadAbort
+//
+void MdaDangerousThreadingAPI::ReportViolation(__in_z WCHAR *apiName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ msg.SendMessagef(MDARC_DANGEROUS_THREADINGAPI, apiName);
+}
+
+
+//
+// MdaReportAvOnComRelease
+//
+
+void MdaReportAvOnComRelease::ReportHandledException(RCW* pRCW)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ EX_TRY
+ {
+ FAULT_NOT_FATAL();
+
+ // TODO: comment this code...
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ if (pRCW)
+ {
+ LPVOID vtablePtr = pRCW->GetVTablePtr();
+ msg.SendMessagef(MDARC_REPORT_AV_ON_COM_RELEASE_WITH_VTABLE, vtablePtr);
+ }
+ else
+ {
+ msg.SendMessagef(MDARC_REPORT_AV_ON_COM_RELEASE);
+ }
+ }
+ EX_CATCH
+ {
+ // Caller cannot take exceptions.
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+void MdaInvalidFunctionPointerInDelegate::ReportViolation(LPVOID pFunc)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ msg.SendMessagef(MDARC_INVALID_FUNCTION_PTR_IN_DELEGATE, pFunc);
+}
+
+//
+// MdaDirtyCastAndCallOnInterface
+//
+
+void MdaDirtyCastAndCallOnInterface::ReportViolation(IUnknown* pUnk)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ msg.SendMessagef(MDARC_DIRTY_CAST_AND_CALL_ON_INTERFACE);
+}
+
+//
+// MdaFatalExecutionEngineError
+//
+void MdaFatalExecutionEngineError::ReportFEEE(TADDR addrOfError, HRESULT hrError)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ EX_TRY
+ {
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ DWORD tid = GetCurrentThreadId();
+
+ msg.SendMessagef(MDARC_FATAL_EXECUTION_ENGINE_ERROR, addrOfError, tid, hrError);
+ }
+ EX_CATCH
+ {
+ // Caller cannot take exceptions.
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+
+//
+// MdaInvalidCERCall
+//
+void MdaInvalidCERCall::ReportViolation(MethodDesc* pCallerMD, MethodDesc *pCalleeMD, DWORD dwOffset)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), FALSE, &pXml);
+
+ AsMdaAssistant()->OutputMethodDesc(pCalleeMD, pXml->AddChild(MdaElemDecl(Method)));
+ AsMdaAssistant()->OutputCallsite(pCallerMD, dwOffset, pXml->AddChild(MdaElemDecl(Callsite)));
+
+ StackSString sszCalleeMethodName(SString::Utf8, pCalleeMD->GetName());
+ StackSString sszCallerMethodName(SString::Utf8, pCallerMD->GetName());
+ msg.SendMessagef(MDARC_INVALID_CER_CALL, sszCallerMethodName.GetUnicode(), dwOffset, sszCalleeMethodName.GetUnicode());
+}
+
+
+//
+// MdaVirtualCERCall
+//
+void MdaVirtualCERCall::ReportViolation(MethodDesc* pCallerMD, MethodDesc *pCalleeMD, DWORD dwOffset)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), FALSE, &pXml);
+
+ AsMdaAssistant()->OutputMethodDesc(pCalleeMD, pXml->AddChild(MdaElemDecl(Method)));
+ AsMdaAssistant()->OutputCallsite(pCallerMD, dwOffset, pXml->AddChild(MdaElemDecl(Callsite)));
+
+ StackSString sszCalleeMethodName(SString::Utf8, pCalleeMD->GetName());
+ StackSString sszCallerMethodName(SString::Utf8, pCallerMD->GetName());
+ msg.SendMessagef(MDARC_VIRTUAL_CER_CALL, sszCallerMethodName.GetUnicode(), dwOffset, sszCalleeMethodName.GetUnicode());
+}
+
+
+//
+// MdaOpenGenericCERCall
+//
+void MdaOpenGenericCERCall::ReportViolation(MethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), FALSE, &pXml);
+
+ AsMdaAssistant()->OutputMethodDesc(pMD, pXml->AddChild(MdaElemDecl(Method)));
+
+ StackSString sszMethodName(SString::Utf8, pMD->GetName());
+ msg.SendMessagef(MDARC_OPENGENERIC_CER_CALL, sszMethodName.GetUnicode());
+}
+
+
+//
+// MdaIllegalPrepareConstrainedRegion
+//
+void MdaIllegalPrepareConstrainedRegion::ReportViolation(MethodDesc* pMD, DWORD dwOffset)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), FALSE, &pXml);
+
+ AsMdaAssistant()->OutputCallsite(pMD, dwOffset, pXml->AddChild(MdaElemDecl(Callsite)));
+
+ StackSString sszMethodName(SString::Utf8, pMD->GetName());
+ msg.SendMessagef(MDARC_ILLEGAL_PCR, sszMethodName.GetUnicode(), dwOffset);
+}
+
+
+//
+// MdaReleaseHandleFailed
+//
+void MdaReleaseHandleFailed::ReportViolation(TypeHandle th, LPVOID lpvHandle)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ AsMdaAssistant()->OutputTypeHandle(th, pXml->AddChild(MdaElemDecl(Type)));
+
+ StackSString sszHandle;
+ sszHandle.Printf(W("0x%p"), lpvHandle);
+ pXml->AddChild(MdaElemDecl(Handle))->AddAttributeSz(MdaAttrDecl(Value), sszHandle.GetUnicode());
+
+ StackSString sszType;
+ th.GetName(sszType);
+ msg.SendMessagef(MDARC_SAFEHANDLE_CRITICAL_FAILURE, sszType.GetUnicode(), lpvHandle);
+}
+
+
+#ifdef FEATURE_COMINTEROP
+//
+// MdaReleaseHandleFailed
+//
+void MdaNonComVisibleBaseClass::ReportViolation(MethodTable *pMT, BOOL fForIDispatch)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ TypeHandle thDerived = TypeHandle(pMT);
+ TypeHandle thBase = thDerived.GetParent();
+
+ while (IsTypeVisibleFromCom(thBase))
+ thBase = thBase.GetParent();
+
+ // If we get there, one of the parents must be non COM visible.
+ _ASSERTE(!thBase.IsNull());
+
+ AsMdaAssistant()->OutputTypeHandle(thDerived, pXml->AddChild(MdaElemDecl(DerivedType)));
+ AsMdaAssistant()->OutputTypeHandle(thBase, pXml->AddChild(MdaElemDecl(BaseType)));
+
+ SString strDerivedClassName;
+ SString strBaseClassName;
+
+ thDerived.GetName(strDerivedClassName);
+ thBase.GetName(strBaseClassName);
+
+
+ msg.SendMessagef(fForIDispatch ? MDARC_NON_COMVISIBLE_BASE_CLASS_IDISPATCH : MDARC_NON_COMVISIBLE_BASE_CLASS_CLASSITF,
+ strDerivedClassName.GetUnicode(), strBaseClassName.GetUnicode());
+}
+#endif //FEATURE_COMINTEROP
+
+
+#ifdef _DEBUG
+//
+// MdaXmlValidationError
+//
+void MdaXmlValidationError::ReportError(MdaSchema::ValidationResult* pValidationResult)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ DEBUG_ONLY;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+ PRECONDITION(CheckPointer(pValidationResult->m_pViolatingElement));
+ PRECONDITION(CheckPointer(pValidationResult->m_pViolatedElement));
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), FALSE, &pXml);
+
+ pXml->AddChild(MdaElemDecl(ViolatingXml))->AddChild(pValidationResult->m_pXmlRoot);
+ pValidationResult->m_pSchema->ToXml(pXml->AddChild(MdaElemDecl(ViolatedXsd)));
+
+ msg.SendMessage(W("The following XML does not match its schema."));
+}
+#endif
+
+
+//
+// InvalidConfigFile
+//
+void MdaInvalidConfigFile::ReportError(MdaElemDeclDef configFile)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ DEBUG_ONLY;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage report(this->AsMdaAssistant(), TRUE, &pXml);
+
+ LPCWSTR szConfigFile = MdaSchema::GetElementName(configFile);
+ pXml->AddAttributeSz(MdaAttrDecl(ConfigFile), szConfigFile);
+
+ report.SendMessagef(MDARC_INVALID_CONFIG_FILE, szConfigFile);
+}
+
+//
+// MdaDateTimeInvalidLocalFormat
+//
+void MdaDateTimeInvalidLocalFormat::ReportError()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ DEBUG_ONLY;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ MdaXmlElement* pXml;
+ MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
+
+ msg.SendMessagef(MDARC_DATETIME_INVALID_LOCAL_FORMAT);
+}
+
+FCIMPL0(void, MdaManagedSupport::DateTimeInvalidLocalFormat)
+{
+ FCALL_CONTRACT;
+
+ MdaDateTimeInvalidLocalFormat* pMda = MDA_GET_ASSISTANT(DateTimeInvalidLocalFormat);
+ if (pMda)
+ {
+ HELPER_METHOD_FRAME_BEGIN_0();
+ pMda->ReportError();
+ HELPER_METHOD_FRAME_END();
+ }
+}
+FCIMPLEND
+
+#endif
+#endif //MDA_SUPPORTED
diff --git a/src/vm/mdaassistants.h b/src/vm/mdaassistants.h
new file mode 100644
index 0000000000..71a0a8d1ab
--- /dev/null
+++ b/src/vm/mdaassistants.h
@@ -0,0 +1,933 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#ifndef _MDA_ASSISTANTS_
+#define _MDA_ASSISTANTS_
+
+#include "common.h"
+#include "mda.h"
+#include "mlinfo.h"
+#include <dbginterface.h>
+
+/*
+
+//-----------------------------------------------------------------------------
+// How to add a new MDA:
+//-----------------------------------------------------------------------------
+
+1) add a new class that derives from MdaAssistant to src\vm\mdaAssistants.h
+- the new class should have some function to report the error (we'll call it ReportXYZ()).
+The function is not virtual, and so can take any needed parameters and will be called explicitly wherever you want to fire the MDA.
+
+2) Add the new implementation to src\vm\mdaAssistants.cpp
+See the other report functions for an example (eg, MdaLoaderLock::ReportViolation)
+
+3) The MDA contains a user-description string. This must be localized, and so it comes from a resource file.
+ - add a new resource ID to src\dlls\mscorrc\Resource.h (eg, MDARC_REPORT_AV_ON_COM_RELEASE)
+
+4) add the actual text to src\dlls\mscorrc\Mscorrc.rc.
+ - add a #define MDARC_XYZ_MSG string. This is a printf format string and may contain parameters.
+ - add an entry into the MDARC stringtable like "MDARC_XYZ_MSG MDAARC_XYZ"
+
+5) In order to get an instance of the MDA:
+ Use a construct like:
+ MdaFatalExecutionEngineError * pMDA = MDA_GET_ASSISTANT(FatalExecutionEngineError);
+
+ The macro parameter is the MDA class name minus the "MDA" prefix.
+ This may return null if the MDA is not available.
+
+6) Update mdaAssistantSchemas.inl
+
+7) Add it to any appropriate groups in mdaGroups.inl. Please be sure to follow each groups policy.
+
+8) Write a test for it.
+*/
+
+#ifdef MDA_SUPPORTED
+
+// Until Mda offers first class support for managed code we'll just make targetd ecalls.
+class MdaManagedSupport
+{
+public:
+ static FCDECL0(void, MemberInfoCacheCreation);
+ static FCDECL0(void, DateTimeInvalidLocalFormat);
+ static FCDECL1(void, ReportStreamWriterBufferedDataLost, StringObject * pString);
+ static FCDECL0(FC_BOOL_RET, IsStreamWriterBufferedDataLostEnabled);
+ static FCDECL0(FC_BOOL_RET, IsStreamWriterBufferedDataLostCaptureAllocatedCallStack);
+ static FCDECL0(FC_BOOL_RET, IsInvalidGCHandleCookieProbeEnabled);
+ static FCDECL1(void, FireInvalidGCHandleCookieProbe, LPVOID cookie);
+ static FCDECL1(void, ReportErrorSafeHandleRelease, ExceptionObject * pException);
+};
+
+// MDA classes do not derive from MdaAssistant in the type system, but, rather, use this macro to
+// ensure that their layout is identical to what it would be had they derived from MdaAssistant.
+// This allows them to be "aggregates", which C++ will allow to be initialized at compile time.
+// This means that we must explicitly coerce from a derived type to the "base" type as needed.
+//
+// Note that the layout is asserted to be correct at compile time via the MDA_DEFINE_ASSISTANT
+// macro.
+#define MDA_ASSISTANT_BASE_MEMBERS \
+ MdaAssistant* AsMdaAssistant() \
+ { \
+ LIMITED_METHOD_CONTRACT; \
+ return (MdaAssistant*)this; \
+ } \
+ void Enable() \
+ { \
+ LIMITED_METHOD_CONTRACT; \
+ ManagedDebuggingAssistants::Enable( \
+ m_assistantDeclDef, this->AsMdaAssistant()); \
+ } \
+ MdaElemDeclDef m_assistantDeclDef; \
+ MdaElemDeclDef m_assistantMsgDeclDef; \
+ bool m_bSuppressDialog \
+
+
+//
+// MdaFramework
+//
+class MdaFramework
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput);
+ void DumpDiagnostics();
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+ BOOL m_disableAsserts;
+ BOOL m_dumpSchemaSchema;
+ BOOL m_dumpAssistantSchema;
+ BOOL m_dumpAssistantMsgSchema;
+ BOOL m_dumpMachineConfig;
+ BOOL m_dumpAppConfig;
+};
+
+
+//
+// MdaJitCompilationStart
+//
+class MdaJitCompilationStart
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput);
+ void NowCompiling(MethodDesc* pMethodDesc);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+ MdaQuery::CompiledQueries* m_pMethodFilter;
+ BOOL m_bBreak;
+};
+
+
+//
+// MdaLoadFromContext
+//
+class MdaLoadFromContext
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void NowLoading(IAssembly** ppIAssembly, StackCrawlMark *pCallerStackMark);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+// MdaBindingFailure
+//
+class MdaBindingFailure
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void BindFailed(AssemblySpec *pSpec, OBJECTREF *pExceptionObj);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+//
+// MdaReflection
+//
+class MdaMemberInfoCacheCreation
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { WRAPPER_NO_CONTRACT; }
+ void MemberInfoCacheCreation();
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+//
+// MdaPInvokeLog
+//
+class MdaPInvokeLog
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; m_pXmlInput = pXmlInput; }
+ BOOL Filter(SString& sszDllName);
+ void LogPInvoke(NDirectMethodDesc* pMd, HINSTANCE hMod);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+ MdaXmlElement* m_pXmlInput;
+};
+
+
+//
+// MdaOverlappedFreeError
+//
+class MdaOverlappedFreeError
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void ReportError(LPVOID pOverlapped);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+//
+// MdaInvalidOverlappedToPinvoke
+//
+class MdaInvalidOverlappedToPinvoke
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput);
+
+ BOOL ShouldHook(MethodDesc *pMD);
+
+ // Called when setting up the pinvoke target
+ LPVOID Register(HINSTANCE hmod,LPVOID pvTarget);
+
+ // Logs the MDA error if the overlapped pointer isn't in the gc heap
+ LPVOID CheckOverlappedPointer(UINT index,LPVOID pOverlapped);
+
+ struct pinvoke_entry
+ {
+ LPCWSTR m_moduleName;
+ LPCWSTR m_functionName;
+ LPVOID m_mdaFunction;
+ LPVOID m_realFunction;
+ HINSTANCE m_hmod;
+
+ void Init(LPCWSTR moduleName, LPCWSTR functionName, LPVOID mdaFunction)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_moduleName = moduleName;
+ m_functionName = functionName;
+ m_mdaFunction = mdaFunction;
+ m_realFunction = NULL;
+ m_hmod = NULL;
+ }
+ };
+ BOOL InitializeModuleFunctions(HINSTANCE hmod);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+ pinvoke_entry *m_entries;
+ UINT m_entryCount;
+ BOOL m_bJustMyCode;
+};
+
+#ifdef _TARGET_X86_
+//
+// PInvokeStackImbalance
+//
+struct StackImbalanceCookie
+{
+ enum
+ {
+ // combined with the unmanaged calling convention (code:pmCallConvMask) in
+ // code:m_callConv if the unmanaged target has a floating point return value
+ HAS_FP_RETURN_VALUE = 0x80000000
+ };
+
+ // Filled in by stub generated by code:NDirectMethodDesc.GenerateStubForMDA or
+ // code:COMDelegate::GenerateStubForMDA:
+ MethodDesc *m_pMD; // dispatching MD (P/Invoke or delegate's Invoke)
+ LPVOID m_pTarget; // target address
+ DWORD m_dwStackArgSize; // number of arg bytes pushed on stack
+ DWORD m_callConv; // unmanaged calling convention, highest bit indicates FP return
+
+ // Pre-call ESP as recorded by PInvokeStackImbalanceHelper:
+ DWORD m_dwSavedEsp;
+};
+
+class MdaPInvokeStackImbalance
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void CheckStack(StackImbalanceCookie *pSICookie, DWORD dwPostESP);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+#endif
+
+
+//
+// DllMainReturnsFalse
+//
+class MdaDllMainReturnsFalse
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void ReportError();
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+
+//
+// MdaModuloObjectHashcode
+//
+class MdaModuloObjectHashcode
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_modulus = pXmlInput->GetAttribute(MdaAttrDecl(Modulus))->GetValueAsInt32();
+ if (m_modulus <= 0)
+ m_modulus = 1;
+ }
+
+ INT32 GetModulo() { LIMITED_METHOD_CONTRACT; _ASSERTE(m_modulus > 0); return m_modulus; }
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+ INT32 m_modulus;
+};
+
+
+//
+// MdaGCUnmanagedToManaged
+//
+class MdaGcUnmanagedToManaged
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void TriggerGC(); // calls to GC.Collect & GC.WaitForPendingFinalizers are also generated to IL stubs
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+//
+// MdaGCManagedToUnmanaged
+//
+class MdaGcManagedToUnmanaged
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void TriggerGC(); // calls to GC.Collect & GC.WaitForPendingFinalizers are also generated to IL stubs
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+//
+// MdaLoaderLock
+//
+class MdaLoaderLock
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void ReportViolation(HINSTANCE hInst);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+//
+// MdaReentrancy
+//
+class MdaReentrancy
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void ReportViolation();
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+//
+// MdaAsynchronousThreadAbort
+//
+class MdaAsynchronousThreadAbort
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void ReportViolation(Thread *pCallingThread, Thread *pAbortedThread);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+//
+// MdaAsynchronousThreadAbort
+//
+class MdaDangerousThreadingAPI
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void ReportViolation(__in_z WCHAR *apiName);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+//
+// MdaReportAvOnComRelease
+//
+class MdaReportAvOnComRelease
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ m_allowAv = pXmlInput->GetAttribute(MdaAttrDecl(AllowAv))->GetValueAsBool();
+ }
+
+ void ReportHandledException(RCW* pRCW);
+
+ BOOL AllowAV() { LIMITED_METHOD_CONTRACT; return m_allowAv; }
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+ BOOL m_allowAv;
+};
+
+
+
+//
+// MdaFatalExecutionEngineError
+//
+class MdaFatalExecutionEngineError
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ // Report a FatalExecutionEngine error.
+ // It is assumed to be on the current thread.
+ void ReportFEEE(TADDR addrOfError, HRESULT hrError);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+//
+// MdaCallbackOnCollectedDelegate
+//
+class MdaCallbackOnCollectedDelegate
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_size = pXmlInput->GetAttribute(MdaAttrDecl(ListSize))->GetValueAsInt32();
+ if (m_size < 50)
+ m_size = 1000;
+
+ if (m_size > 2000)
+ m_size = 1000;
+
+ m_pList = new UMEntryThunk*[m_size];
+ memset(m_pList, 0, sizeof(UMEntryThunk*) * m_size);
+ }
+
+ void ReportViolation(MethodDesc* pMD);
+ void AddToList(UMEntryThunk* pEntryThunk);
+
+private:
+ void ReplaceEntry(int index, UMEntryThunk* pET);
+
+public:
+ MDA_ASSISTANT_BASE_MEMBERS;
+ UMEntryThunk** m_pList;
+ int m_iIndex;
+ int m_size;
+};
+
+//
+// InvalidMemberDeclaration
+//
+class MdaInvalidMemberDeclaration
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+
+#ifdef FEATURE_COMINTEROP
+ void ReportViolation(ComCallMethodDesc *pCMD, OBJECTREF *pExceptionObj);
+#endif //FEATURE_COMINTEROP
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+//
+// MdaExceptionSwallowedOnCallFromCom
+//
+class MdaExceptionSwallowedOnCallFromCom
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void ReportViolation(MethodDesc *pMD, OBJECTREF *pExceptionObj);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+//
+// MdaInvalidVariant
+//
+class MdaInvalidVariant
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void ReportViolation();
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+//
+// MdaInvalidApartmentStateChange
+//
+class MdaInvalidApartmentStateChange
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void ReportViolation(Thread* pThread, Thread::ApartmentState state, BOOL fAlreadySet);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+
+//
+// MdaFailedQI
+//
+HRESULT MdaFailedQIAssistantCallback(LPVOID pData);
+
+typedef struct
+{
+ RCW* pWrapper;
+ IID iid;
+ BOOL fSuccess;
+} MdaFailedQIAssistantCallbackData;
+
+#define OLE32DLL W("ole32.dll")
+
+class MdaFailedQI
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void ReportAdditionalInfo(HRESULT hr, RCW* pRCW, GUID iid, MethodTable* pMT);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+//
+// MdaDisconnectedContext
+//
+class MdaDisconnectedContext
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void ReportViolationDisconnected(LPVOID context, HRESULT hr);
+ void ReportViolationCleanup(LPVOID context1, LPVOID context2, HRESULT hr);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+//
+// MdaNotMarshalable
+//
+class MdaNotMarshalable
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void ReportViolation();
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+
+//
+// MdaMarshalCleanupError
+//
+class MdaMarshalCleanupError
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void ReportErrorThreadCulture(OBJECTREF *pExceptionObj);
+ void ReportErrorSafeHandleRelease(OBJECTREF *pExceptionObj);
+ void ReportErrorSafeHandleProp(OBJECTREF *pExceptionObj);
+ void ReportErrorCustomMarshalerCleanup(TypeHandle typeCustomMarshaler, OBJECTREF *pExceptionObj);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+//
+// MdaInvalidIUnknown
+//
+class MdaInvalidIUnknown
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void ReportViolation();
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+//
+// MdaContextSwitchDeadlock
+//
+class MdaContextSwitchDeadlock
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void ReportDeadlock(LPVOID Origin, LPVOID Destination);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+//
+// MdaRaceOnRCWCleanup
+//
+class MdaRaceOnRCWCleanup
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void ReportViolation();
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+//
+// MdaMarshaling
+//
+class MdaMarshaling
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput);
+ void ReportFieldMarshal(FieldMarshaler* pFM);
+
+private:
+ void GetManagedSideForMethod(SString& strManagedMarshalType, Module* pModule, SigPointer sig, CorElementType elemType);
+ void GetUnmanagedSideForMethod(SString& strNativeMarshalType, MarshalInfo* mi, BOOL fSizeIsSpecified);
+ void GetManagedSideForField(SString& strManagedMarshalType, FieldDesc* pFD);
+ void GetUnmanagedSideForField(SString& strUnmanagedMarshalType, FieldMarshaler* pFM);
+ BOOL CheckForPrimitiveType(CorElementType elemType, SString& strPrimitiveType);
+
+public:
+ MDA_ASSISTANT_BASE_MEMBERS;
+ MdaQuery::CompiledQueries* m_pMethodFilter;
+ MdaQuery::CompiledQueries* m_pFieldFilter;
+};
+
+
+
+//
+// InvalidFunctionPointerInDelegate
+//
+class MdaInvalidFunctionPointerInDelegate
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) {LIMITED_METHOD_CONTRACT; }
+ void ReportViolation(LPVOID pFunc);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+//
+// DirtyCastAndCallOnInterface
+//
+class MdaDirtyCastAndCallOnInterface
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) {LIMITED_METHOD_CONTRACT; }
+ void ReportViolation(IUnknown* pUnk);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+//
+// InvalidCERCall
+//
+class MdaInvalidCERCall
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void ReportViolation(MethodDesc* pCallerMD, MethodDesc *pCalleeMD, DWORD dwOffset);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+
+//
+// VirtualCERCall
+//
+class MdaVirtualCERCall
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void ReportViolation(MethodDesc* pCallerMD, MethodDesc *pCalleeMD, DWORD dwOffset);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+
+//
+// OpenGenericCERCall
+//
+class MdaOpenGenericCERCall
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void ReportViolation(MethodDesc* pMD);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+
+//
+// IllegalPrepareConstrainedRegion
+//
+class MdaIllegalPrepareConstrainedRegion
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void ReportViolation(MethodDesc* pMD, DWORD dwOffset);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+
+//
+// ReleaseHandleFailed
+//
+class MdaReleaseHandleFailed
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void ReportViolation(TypeHandle th, LPVOID lpvHandle);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+//
+// NonComVisibleBaseClass
+//
+class MdaNonComVisibleBaseClass
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+#ifdef FEATURE_COMINTEROP
+ void ReportViolation(MethodTable *pMT, BOOL fForIDispatch);
+#endif //FEATURE_COMINTEROP
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+//
+// InvalidGCHandleCookie
+//
+class MdaInvalidGCHandleCookie
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void ReportError(LPVOID cookie);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+//
+// MdaXmlValidator
+//
+class MdaXmlValidator
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+
+#ifdef _DEBUG
+//
+// MdaXmlValidationError
+//
+class MdaXmlValidationError
+{
+public:
+ void Initialize(MdaXmlElement* pXml) { LIMITED_METHOD_CONTRACT; }
+
+public:
+ void ReportError(MdaSchema::ValidationResult* pValidationResult);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+#endif
+
+
+//
+// MdaInvalidConfigFile
+//
+class MdaInvalidConfigFile
+{
+public:
+ void Initialize(MdaXmlElement* pXml) { LIMITED_METHOD_CONTRACT; }
+
+public:
+ void ReportError(MdaElemDeclDef configFile);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+//
+// MdaDateTimeInvalidLocalFormat
+//
+class MdaDateTimeInvalidLocalFormat
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput) { LIMITED_METHOD_CONTRACT; }
+ void ReportError();
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+};
+
+//
+// MdaStreamWriterBufferedDataLost
+//
+class MdaStreamWriterBufferedDataLost
+{
+public:
+ void Initialize(MdaXmlElement* pXmlInput)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ m_captureAllocatedCallStack = pXmlInput->GetAttribute(MdaAttrDecl(CaptureAllocatedCallStack))->GetValueAsBool();
+ }
+
+ BOOL CaptureAllocatedCallStack() { LIMITED_METHOD_CONTRACT; return m_captureAllocatedCallStack; }
+
+ void ReportError(SString text);
+
+ MDA_ASSISTANT_BASE_MEMBERS;
+ BOOL m_captureAllocatedCallStack;
+};
+
+class ValidateMdaAssistantLayout
+{
+ static_assert_no_msg(sizeof(MdaAssistant) == 3);
+#define MDA_VALIDATE_MEMBER_LAYOUT
+#include "mdaschema.inl"
+#undef MDA_VALIDATE_MEMBER_LAYOUT
+};
+
+//
+// MdaStaticHeap
+//
+
+typedef struct
+{
+ // This array is always live. Checking whether an assistant is enabled is
+ // simply a fetch from this array.
+ MdaAssistant* m_assistants[MdaElemDef(AssistantMax)];
+
+ // This pointer will point to the m_mda memory region, where the actual
+ // ManagedDebuggingAssistants instance lives. It may be null if we no MDAs
+ // were enabled.
+ ManagedDebuggingAssistants* m_pMda;
+ BYTE m_mda[sizeof(ManagedDebuggingAssistants)];
+
+#define MDA_ASSISTANT_HEAP_RAW
+#include "mdaschema.inl"
+#undef MDA_ASSISTANT_HEAP_RAW
+
+ void DisableAll()
+ {
+ LIMITED_METHOD_CONTRACT;
+ memset(&m_assistants, 0, sizeof(m_assistants));
+ }
+}
+MdaStaticHeap;
+typedef DPTR(MdaStaticHeap) PTR_MdaStaticHeap;
+extern MdaStaticHeap g_mdaStaticHeap;
+
+
+// static
+FORCEINLINE void ManagedDebuggingAssistants::Enable(MdaElemDeclDef assistantDeclDef, MdaAssistant* pMda)
+{
+ g_mdaStaticHeap.m_assistants[assistantDeclDef] = pMda;
+}
+
+#ifndef DACCESS_COMPILE
+FORCEINLINE MdaAssistant* ManagedDebuggingAssistants::GetAssistant(MdaElemDeclDef id)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // If this assert fires, you should consider using GET_ASSISTANT_EX / TRIGGER_ASSISTANT_EX
+ _ASSERTE((g_pDebugInterface == NULL) || !g_pDebugInterface->ThisIsHelperThread());
+
+ return g_mdaStaticHeap.m_assistants[id];
+}
+
+FORCEINLINE MdaAssistant* ManagedDebuggingAssistants::GetAssistantEx(MdaElemDeclDef id)
+{
+ WRAPPER_NO_CONTRACT;
+
+ MdaAssistant* pMda = g_mdaStaticHeap.m_assistants[id];
+ if ((pMda != NULL) && ((g_pDebugInterface == NULL) || !g_pDebugInterface->ThisIsHelperThread()))
+ return pMda;
+
+ return NULL;
+}
+#endif // DACCESS_COMPILE
+
+void TriggerGCForMDAInternal();
+
+#endif // MDA_SUPPORTED
+#endif // _MDA_ASSISTANTS_
+
+
diff --git a/src/vm/mdaassistantschemas.inl b/src/vm/mdaassistantschemas.inl
new file mode 100644
index 0000000000..c6cbcf99af
--- /dev/null
+++ b/src/vm/mdaassistantschemas.inl
@@ -0,0 +1,640 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+ //
+ // Assistants
+ //
+
+// ************************************************
+// PLEASE KEEP MDAS IN ALPHABETICAL ORDER (starting after AsynchronousThreadAbort)
+// ************************************************
+
+
+ // Framework
+ MDA_DEFINE_ASSISTANT(Framework, NULL)
+ // Input
+ MDA_DEFINE_INPUT(Framework)
+#ifdef _DEBUG
+ MDA_XSD_OPTIONAL()
+ MDA_XSD_ELEMENT(Diagnostics)
+ MDA_XSD_ATTRIBUTE_DEFAULT(DumpAssistantMsgSchema, BOOL, W("false"))
+ MDA_XSD_ATTRIBUTE_DEFAULT(DumpAssistantSchema, BOOL, W("false"))
+ MDA_XSD_ATTRIBUTE_DEFAULT(DumpSchemaSchema, BOOL, W("false"))
+ MDA_XSD_ELEMENT_END(Diagnostics)
+ MDA_XSD_OPTIONAL_END()
+ MDA_XSD_ATTRIBUTE_DEFAULT(DisableAsserts, BOOL, FALSE)
+#endif
+ MDA_DEFINE_INPUT_END(Framework)
+ // Output
+ MDA_DEFINE_OUTPUT(Framework)
+ MDA_DEFINE_OUTPUT_END(Framework)
+ MDA_DEFINE_ASSISTANT_END(Framework)
+
+ // AsynchronousThreadAbort
+ MDA_DEFINE_ASSISTANT(AsynchronousThreadAbort, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(AsynchronousThreadAbort)
+ // Output
+ MDA_DEFINE_OUTPUT(AsynchronousThreadAbort)
+ MDA_XSD_ONCE()
+ MDA_XSD_ELEMENT_REFTYPE(CallingThread, ThreadType)
+ MDA_XSD_ELEMENT_REFTYPE(AbortedThread, ThreadType)
+ MDA_XSD_ONCE_END()
+ MDA_DEFINE_OUTPUT_END(AsynchronousThreadAbort)
+ MDA_DEFINE_ASSISTANT_END(AsynchronousThreadAbort)
+
+ // BindingFailure
+ MDA_DEFINE_ASSISTANT(BindingFailure, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(BindingFailure)
+ // Output
+ MDA_DEFINE_OUTPUT(BindingFailure)
+ MDA_XSD_ONCE()
+ MDA_XSD_ELEMENT(AssemblyInfo)
+ MDA_XSD_ATTRIBUTE_REQ(AppDomainId, INT32)
+ MDA_XSD_ATTRIBUTE_REQ(DisplayName, SString)
+ MDA_XSD_ATTRIBUTE_REQ(CodeBase, SString)
+ MDA_XSD_ATTRIBUTE_REQ(HResult, INT32)
+ MDA_XSD_ATTRIBUTE_REQ(BindingContextId, INT32)
+ MDA_XSD_ELEMENT_END(AssemblyInfo)
+ MDA_XSD_ONCE_END()
+ MDA_DEFINE_OUTPUT_END(BindingFailure)
+ MDA_DEFINE_ASSISTANT_END(BindingFailure)
+
+ // CallbackOnCollectedDelegate
+ MDA_DEFINE_ASSISTANT(CallbackOnCollectedDelegate, NULL)
+ // Input
+ MDA_DEFINE_INPUT(CallbackOnCollectedDelegate)
+ MDA_XSD_ATTRIBUTE_DEFAULT(ListSize, INT32, W("1000"))
+ MDA_DEFINE_INPUT_END(CallbackOnCollectedDelegate)
+ // Output
+ MDA_DEFINE_OUTPUT(CallbackOnCollectedDelegate)
+ MDA_XSD_ONCE()
+ MDA_XSD_ELEMENT_REFTYPE(Delegate, MethodType)
+ MDA_XSD_ONCE_END()
+ MDA_DEFINE_OUTPUT_END(CallbackOnCollectedDelegate)
+ MDA_DEFINE_ASSISTANT_END(CallbackOnCollectedDelegate)
+
+ // ContextSwitchDeadlock
+ MDA_DEFINE_ASSISTANT(ContextSwitchDeadlock, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(ContextSwitchDeadlock)
+ // Output
+ MDA_DEFINE_OUTPUT(ContextSwitchDeadlock)
+ MDA_DEFINE_OUTPUT_END(ContextSwitchDeadlock)
+ MDA_DEFINE_ASSISTANT_END(ContextSwitchDeadlock)
+
+ // DangerousThreadingAPI
+ MDA_DEFINE_ASSISTANT(DangerousThreadingAPI, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(DangerousThreadingAPI)
+ // Output
+ MDA_DEFINE_OUTPUT(DangerousThreadingAPI)
+ MDA_DEFINE_OUTPUT_END(DangerousThreadingAPI)
+ MDA_DEFINE_ASSISTANT_END(DangerousThreadingAPI)
+
+ // DateTimeInvalidLocalFormat
+ MDA_DEFINE_ASSISTANT(DateTimeInvalidLocalFormat, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(DateTimeInvalidLocalFormat)
+ // Output
+ MDA_DEFINE_OUTPUT(DateTimeInvalidLocalFormat)
+ MDA_DEFINE_OUTPUT_END(DateTimeInvalidLocalFormat)
+ MDA_DEFINE_ASSISTANT_END(DateTimeInvalidLocalFormat)
+
+ // DirtyCastAndCallOnInterface
+ MDA_DEFINE_ASSISTANT(DirtyCastAndCallOnInterface, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(DirtyCastAndCallOnInterface)
+ // Output
+ MDA_DEFINE_OUTPUT(DirtyCastAndCallOnInterface)
+ MDA_DEFINE_OUTPUT_END(DirtyCastAndCallOnInterface)
+ MDA_DEFINE_ASSISTANT_END(DirtyCastAndCallOnInterface)
+
+ // DisconnectedContext
+ MDA_DEFINE_ASSISTANT(DisconnectedContext, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(DisconnectedContext)
+ // Output
+ MDA_DEFINE_OUTPUT(DisconnectedContext)
+ MDA_DEFINE_OUTPUT_END(DisconnectedContext)
+ MDA_DEFINE_ASSISTANT_END(DisconnectedContext)
+
+ // DllMainReturnsFalse
+ MDA_DEFINE_ASSISTANT(DllMainReturnsFalse, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(DllMainReturnsFalse)
+ // Output
+ MDA_DEFINE_OUTPUT(DllMainReturnsFalse)
+ MDA_DEFINE_OUTPUT_END(DllMainReturnsFalse)
+ MDA_DEFINE_ASSISTANT_END(DllMainReturnsFalse)
+
+ // ExceptionSwallowedOnCallFromCom
+ MDA_DEFINE_ASSISTANT(ExceptionSwallowedOnCallFromCom, NULL)
+ // Input
+ MDA_DEFINE_INPUT(ExceptionSwallowedOnCallFromCom)
+ MDA_DEFINE_INPUT_END(ExceptionSwallowedOnCallFromCom)
+ // Output
+ MDA_DEFINE_OUTPUT(ExceptionSwallowedOnCallFromCom)
+ MDA_XSD_ONCE()
+ MDA_XSD_ELEMENT__REFTYPE(Method, MethodType)
+ MDA_XSD_ELEMENT__REFTYPE(Type, TypeType)
+ MDA_XSD_ELEMENT_REFTYPE(Exception, ExceptionType)
+ MDA_XSD_ONCE_END()
+ MDA_DEFINE_OUTPUT_END(ExceptionSwallowedOnCallFromCom)
+ MDA_DEFINE_ASSISTANT_END(ExceptionSwallowedOnCallFromCom)
+
+ // FailedQI
+ MDA_DEFINE_ASSISTANT(FailedQI, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(FailedQI)
+ // Output
+ MDA_DEFINE_OUTPUT(FailedQI)
+ MDA_XSD_ONCE()
+ MDA_XSD_ELEMENT__REFTYPE(Type, TypeType)
+ MDA_XSD_ONCE_END()
+ MDA_DEFINE_OUTPUT_END(FailedQI)
+ MDA_DEFINE_ASSISTANT_END(FailedQI)
+
+ // FatalExecutionEngineError
+ MDA_DEFINE_ASSISTANT(FatalExecutionEngineError, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(FatalExecutionEngineError)
+ // Output
+ MDA_DEFINE_OUTPUT(FatalExecutionEngineError)
+ MDA_DEFINE_OUTPUT_END(FatalExecutionEngineError)
+ MDA_DEFINE_ASSISTANT_END(FatalExecutionEngineError)
+
+ // GcManagedToUnmanaged
+ MDA_DEFINE_ASSISTANT(GcManagedToUnmanaged, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(GcManagedToUnmanaged)
+ MDA_DEFINE_ASSISTANT_END(GcManagedToUnmanaged)
+
+ // GcUnmanagedToManaged
+ MDA_DEFINE_ASSISTANT(GcUnmanagedToManaged, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(GcUnmanagedToManaged)
+ MDA_DEFINE_ASSISTANT_END(GcUnmanagedToManaged)
+
+ // IllegalPrepareConstrainedRegion
+ MDA_DEFINE_ASSISTANT(IllegalPrepareConstrainedRegion, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(IllegalPrepareConstrainedRegion)
+ // Output
+ MDA_DEFINE_OUTPUT(IllegalPrepareConstrainedRegion)
+ MDA_XSD_ONCE()
+ MDA_XSD_ELEMENT__REFTYPE(Callsite, MethodAndOffsetType)
+ MDA_XSD_ONCE_END()
+ MDA_DEFINE_OUTPUT_END(IllegalPrepareConstrainedRegion)
+ MDA_DEFINE_ASSISTANT_END(IllegalPrepareConstrainedRegion)
+
+ // InvalidApartmentStateChange
+ MDA_DEFINE_ASSISTANT(InvalidApartmentStateChange, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(InvalidApartmentStateChange)
+ // Output
+ MDA_DEFINE_OUTPUT(InvalidApartmentStateChange)
+ MDA_XSD_ONCE()
+ MDA_XSD_ELEMENT_REFTYPE(Thread, ThreadType)
+ MDA_XSD_ONCE_END()
+ MDA_DEFINE_OUTPUT_END(InvalidApartmentStateChange)
+ MDA_DEFINE_ASSISTANT_END(InvalidApartmentStateChange)
+
+ // InvalidCERCall
+ MDA_DEFINE_ASSISTANT(InvalidCERCall, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(InvalidCERCall)
+ // Output
+ MDA_DEFINE_OUTPUT(InvalidCERCall)
+ MDA_XSD_ONCE()
+ MDA_XSD_ELEMENT__REFTYPE(Method, MethodType)
+ MDA_XSD_ELEMENT_REFTYPE(Callsite, MethodAndOffsetType)
+ MDA_XSD_ONCE_END()
+ MDA_DEFINE_OUTPUT_END(InvalidCERCall)
+ MDA_DEFINE_ASSISTANT_END(InvalidCERCall)
+
+ // InvalidFunctionPointerInDelegate
+ MDA_DEFINE_ASSISTANT(InvalidFunctionPointerInDelegate, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(InvalidFunctionPointerInDelegate)
+ // Output
+ MDA_DEFINE_OUTPUT(InvalidFunctionPointerInDelegate)
+ MDA_DEFINE_OUTPUT_END(InvalidFunctionPointerInDelegate)
+ MDA_DEFINE_ASSISTANT_END(InvalidFunctionPointerInDelegate)
+
+ // InvalidGCHandleCookie
+ MDA_DEFINE_ASSISTANT(InvalidGCHandleCookie, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(InvalidGCHandleCookie)
+ // Output
+ MDA_DEFINE_OUTPUT(InvalidGCHandleCookie)
+ MDA_DEFINE_OUTPUT_END(InvalidGCHandleCookie)
+ MDA_DEFINE_ASSISTANT_END(InvalidGCHandleCookie)
+
+ // InvalidIUnknown
+ MDA_DEFINE_ASSISTANT(InvalidIUnknown, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(InvalidIUnknown)
+ // Output
+ MDA_DEFINE_OUTPUT(InvalidIUnknown)
+ MDA_DEFINE_OUTPUT_END(InvalidIUnknown)
+ MDA_DEFINE_ASSISTANT_END(InvalidIUnknown)
+
+ // InvalidMemberDeclaration
+ MDA_DEFINE_ASSISTANT(InvalidMemberDeclaration, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(InvalidMemberDeclaration)
+ // Output
+ MDA_DEFINE_OUTPUT(InvalidMemberDeclaration)
+ MDA_XSD_ONCE()
+ MDA_XSD_CHOICE()
+ MDA_XSD_ELEMENT__REFTYPE(Method, MethodType)
+ MDA_XSD_ELEMENT_REFTYPE(Field, FieldType)
+ MDA_XSD_CHOICE_END()
+ MDA_XSD_ELEMENT_REFTYPE(Type, TypeType)
+ MDA_XSD_ELEMENT__REFTYPE(Exception, ExceptionType)
+ MDA_XSD_ONCE_END()
+ MDA_DEFINE_OUTPUT_END(InvalidMemberDeclaration)
+ MDA_DEFINE_ASSISTANT_END(InvalidMemberDeclaration)
+
+ // InvalidOverlappedToPinvoke
+ MDA_DEFINE_ASSISTANT(InvalidOverlappedToPinvoke, NULL)
+ // Input
+ MDA_DEFINE_INPUT(InvalidOverlappedToPinvoke)
+ MDA_XSD_ATTRIBUTE__DEFAULT(JustMyCode, BOOL, W("true"))
+ MDA_DEFINE_INPUT_END(InvalidOverlappedToPinvoke)
+ // Output
+ MDA_DEFINE_OUTPUT(InvalidOverlappedToPinvoke)
+ MDA_DEFINE_OUTPUT_END(InvalidOverlappedToPinvoke)
+ MDA_DEFINE_ASSISTANT_END(InvalidOverlappedToPinvoke)
+
+ // InvalidVariant
+ MDA_DEFINE_ASSISTANT(InvalidVariant, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(InvalidVariant)
+ // Output
+ MDA_DEFINE_OUTPUT(InvalidVariant)
+ MDA_DEFINE_OUTPUT_END(InvalidVariant)
+ MDA_DEFINE_ASSISTANT_END(InvalidVariant)
+
+ // JitCompilationStart
+ MDA_DEFINE_ASSISTANT(JitCompilationStart, NULL)
+ // Input
+ MDA_DEFINE_INPUT(JitCompilationStart)
+ MDA_XSD_OPTIONAL()
+ MDA_XSD_ELEMENT_REFTYPE(Methods, MemberFilterType)
+ MDA_XSD_OPTIONAL_END()
+ MDA_XSD_ATTRIBUTE_DEFAULT(Break, BOOL, W("true"))
+ MDA_DEFINE_INPUT_END(JitCompilationStart)
+ // Output
+ MDA_DEFINE_OUTPUT(JitCompilationStart)
+ MDA_XSD_ONCE()
+ MDA_XSD_ELEMENT_REFTYPE(Method, MethodType)
+ MDA_XSD_ONCE_END()
+ MDA_DEFINE_OUTPUT_END(JitCompilationStart)
+ MDA_DEFINE_ASSISTANT_END(JitCompilationStart)
+
+ // LoaderLock
+ MDA_DEFINE_ASSISTANT(LoaderLock, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(LoaderLock)
+ // Output
+ MDA_DEFINE_OUTPUT(LoaderLock)
+ MDA_DEFINE_OUTPUT_END(LoaderLock)
+ MDA_DEFINE_ASSISTANT_END(LoaderLock)
+
+ // LoadFromContext
+ MDA_DEFINE_ASSISTANT(LoadFromContext, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(LoadFromContext)
+ // Output
+ MDA_DEFINE_OUTPUT(LoadFromContext)
+ MDA_XSD_ONCE()
+ MDA_XSD__ELEMENT(AssemblyInfo)
+ MDA_XSD_ATTRIBUTE__REQ(DisplayName, SString)
+ MDA_XSD_ATTRIBUTE__REQ(CodeBase, SString)
+ MDA_XSD_ELEMENT_END(AssemblyInfo)
+ MDA_XSD_ONCE_END()
+ MDA_DEFINE_OUTPUT_END(LoadFromContext)
+ MDA_DEFINE_ASSISTANT_END(LoadFromContext)
+
+ // MarshalCleanupError
+ MDA_DEFINE_ASSISTANT(MarshalCleanupError, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(MarshalCleanupError)
+ // Output
+ MDA_DEFINE_OUTPUT(MarshalCleanupError)
+ MDA_DEFINE_OUTPUT_END(MarshalCleanupError)
+ MDA_DEFINE_ASSISTANT_END(MarshalCleanupError)
+
+ // Marshaling
+ MDA_DEFINE_ASSISTANT(Marshaling, NULL)
+ // Input
+ MDA_DEFINE_INPUT(Marshaling)
+ MDA_XSD_ONCE()
+ MDA_XSD_OPTIONAL()
+ MDA_XSD_ELEMENT_REFTYPE(MethodFilter, MemberFilterType)
+ MDA_XSD_OPTIONAL_END()
+ MDA_XSD_OPTIONAL()
+ MDA_XSD_ELEMENT_REFTYPE(FieldFilter, MemberFilterType)
+ MDA_XSD_OPTIONAL_END()
+ MDA_XSD_ONCE_END()
+ MDA_DEFINE_INPUT_END(Marshaling)
+ // Output
+ MDA_DEFINE_OUTPUT(Marshaling)
+ MDA_XSD_CHOICE()
+ MDA_XSD_ELEMENT_REFTYPE(MarshalingParameter, ParameterType)
+ MDA_XSD_ELEMENT_REFTYPE(MarshalingField, FieldType)
+ MDA_XSD_CHOICE_END()
+ MDA_DEFINE_OUTPUT_END(Marshaling)
+ MDA_DEFINE_ASSISTANT_END(Marshaling)
+
+ // MemberInfoCacheCreation
+ MDA_DEFINE_ASSISTANT(MemberInfoCacheCreation, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(MemberInfoCacheCreation)
+ // Output
+ MDA_DEFINE_OUTPUT(MemberInfoCacheCreation)
+ MDA_DEFINE_OUTPUT_END(MemberInfoCacheCreation)
+ MDA_DEFINE_ASSISTANT_END(MemberInfoCacheCreation)
+
+ // ModuloObjectHashcode
+ MDA_DEFINE_ASSISTANT(ModuloObjectHashcode, W("moh"))
+ // Input
+ MDA_DEFINE_INPUT(ModuloObjectHashcode)
+ MDA_XSD_ATTRIBUTE_DEFAULT(Modulus, INT32, W("1"))
+ MDA_DEFINE_INPUT_END(ModuloObjectHashcode)
+ MDA_DEFINE_ASSISTANT_END(ModuloObjectHashcode)
+
+ // NonComVisibleBaseClass
+ MDA_DEFINE_ASSISTANT(NonComVisibleBaseClass, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(NonComVisibleBaseClass)
+ // Output
+ MDA_DEFINE_OUTPUT(NonComVisibleBaseClass)
+ MDA_XSD_ONCE()
+ MDA_XSD_ELEMENT_REFTYPE(DerivedType, TypeType)
+ MDA_XSD_ELEMENT_REFTYPE(BaseType, TypeType)
+ MDA_XSD_ONCE_END()
+ MDA_DEFINE_OUTPUT_END(NonComVisibleBaseClass)
+ MDA_DEFINE_ASSISTANT_END(NonComVisibleBaseClass)
+
+ // NotMarshalable
+ MDA_DEFINE_ASSISTANT(NotMarshalable, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(NotMarshalable)
+ // Output
+ MDA_DEFINE_OUTPUT(NotMarshalable)
+ MDA_DEFINE_OUTPUT_END(NotMarshalable)
+ MDA_DEFINE_ASSISTANT_END(NotMarshalable)
+
+ // OpenGenericCERCall
+ MDA_DEFINE_ASSISTANT(OpenGenericCERCall, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(OpenGenericCERCall)
+ // Output
+ MDA_DEFINE_OUTPUT(OpenGenericCERCall)
+ MDA_XSD_ONCE()
+ MDA_XSD_ELEMENT__REFTYPE(Method, MethodType)
+ MDA_XSD_ONCE_END()
+ MDA_DEFINE_OUTPUT_END(OpenGenericCERCall)
+ MDA_DEFINE_ASSISTANT_END(OpenGenericCERCall)
+
+ // OverlappedFreeError
+ MDA_DEFINE_ASSISTANT(OverlappedFreeError, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(OverlappedFreeError)
+ // Output
+ MDA_DEFINE_OUTPUT(OverlappedFreeError)
+ MDA_DEFINE_OUTPUT_END(OverlappedFreeError)
+ MDA_DEFINE_ASSISTANT_END(OverlappedFreeError)
+
+ // PInvokeLog
+ MDA_DEFINE_ASSISTANT(PInvokeLog, NULL)
+ // Input
+ MDA_DEFINE_INPUT(PInvokeLog)
+ MDA_XSD_OPTIONAL()
+ MDA_XSD_ELEMENT(Filter)
+ MDA_XSD_PERIODIC()
+ MDA_XSD__ELEMENT(Match)
+ MDA_XSD_ATTRIBUTE__REQ(DllName, SString)
+ MDA_XSD_ELEMENT_END(Match)
+ MDA_XSD_PERIODIC_END()
+ MDA_XSD_ELEMENT_END(Filter)
+ MDA_XSD_OPTIONAL_END()
+ MDA_DEFINE_INPUT_END(PInvokeLog)
+ // Output
+ MDA_DEFINE_OUTPUT(PInvokeLog)
+ MDA_XSD_GROUP_REF(PInvokeGrpType)
+ MDA_DEFINE_OUTPUT_END(PInvokeLog)
+ MDA_DEFINE_ASSISTANT_END(PInvokeLog)
+
+#ifdef _TARGET_X86_
+ // PInvokeStackImbalance
+ MDA_DEFINE_ASSISTANT(PInvokeStackImbalance, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(PInvokeStackImbalance)
+ // Output
+ MDA_DEFINE_OUTPUT(PInvokeStackImbalance)
+ MDA_XSD_ONCE()
+ MDA_XSD_ELEMENT__REFTYPE(Method, MethodType)
+ MDA_XSD_ONCE_END()
+ MDA_DEFINE_OUTPUT_END(PInvokeStackImbalance)
+ MDA_DEFINE_ASSISTANT_END(PInvokeStackImbalance)
+#endif
+
+ // RaceOnRCWCleanup
+ MDA_DEFINE_ASSISTANT(RaceOnRCWCleanup, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(RaceOnRCWCleanup)
+ // Output
+ MDA_DEFINE_OUTPUT(RaceOnRCWCleanup)
+ MDA_DEFINE_OUTPUT_END(RaceOnRCWCleanup)
+ MDA_DEFINE_ASSISTANT_END(RaceOnRCWCleanup)
+
+ // Reentrancy
+ MDA_DEFINE_ASSISTANT(Reentrancy, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(Reentrancy)
+ // Output
+ MDA_DEFINE_OUTPUT(Reentrancy)
+ MDA_DEFINE_OUTPUT_END(Reentrancy)
+ MDA_DEFINE_ASSISTANT_END(Reentrancy)
+
+ // ReleaseHandleFailed
+ MDA_DEFINE_ASSISTANT(ReleaseHandleFailed, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(ReleaseHandleFailed)
+ // Output
+ MDA_DEFINE_OUTPUT(ReleaseHandleFailed)
+ MDA_XSD_ONCE()
+ MDA_XSD_ELEMENT__REFTYPE(Type, TypeType)
+ MDA_XSD_ELEMENT(Handle)
+ MDA_XSD_ATTRIBUTE_REQ(Value, SString)
+ MDA_XSD_ELEMENT_END(Handle)
+ MDA_XSD_ONCE_END()
+ MDA_DEFINE_OUTPUT_END(ReleaseHandleFailed)
+ MDA_DEFINE_ASSISTANT_END(ReleaseHandleFailed)
+
+ // ReportAvOnComRelease
+ MDA_DEFINE_ASSISTANT(ReportAvOnComRelease, NULL)
+ // Input
+ MDA_DEFINE_INPUT(ReportAvOnComRelease)
+ MDA_XSD_ATTRIBUTE_DEFAULT(AllowAv, BOOL, W("false"))
+ MDA_DEFINE_INPUT_END(ReportAvOnComRelease)
+ // Output
+ MDA_DEFINE_OUTPUT(ReportAvOnComRelease)
+ MDA_DEFINE_OUTPUT_END(ReportAvOnComRelease)
+ MDA_DEFINE_ASSISTANT_END(ReportAvOnComRelease)
+
+ // StreamWriterBufferedDataLost
+ MDA_DEFINE_ASSISTANT(StreamWriterBufferedDataLost, NULL)
+ // Input
+ MDA_DEFINE_INPUT(StreamWriterBufferedDataLost)
+ MDA_XSD_ATTRIBUTE_DEFAULT(CaptureAllocatedCallStack, BOOL, W("false"))
+ MDA_DEFINE_INPUT_END(StreamWriterBufferedDataLost)
+ //Output
+ MDA_DEFINE_OUTPUT(StreamWriterBufferedDataLost)
+ MDA_DEFINE_OUTPUT_END(StreamWriterBufferedDataLost)
+ MDA_DEFINE_ASSISTANT_END(StreamWriterBufferedDataLost)
+
+ // VirtualCERCall
+ MDA_DEFINE_ASSISTANT(VirtualCERCall, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(VirtualCERCall)
+ // Output
+ MDA_DEFINE_OUTPUT(VirtualCERCall)
+ MDA_XSD_ONCE()
+ MDA_XSD_ELEMENT__REFTYPE(Method, MethodType)
+ MDA_XSD_ELEMENT__REFTYPE(Callsite, MethodAndOffsetType)
+ MDA_XSD_ONCE_END()
+ MDA_DEFINE_OUTPUT_END(VirtualCERCall)
+ MDA_DEFINE_ASSISTANT_END(VirtualCERCall)
+
+ //
+ // Framework helper assistants
+ //
+#if _DEBUG
+ // XmlValidationError
+ MDA_DEFINE_ASSISTANT(XmlValidationError, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(XmlValidationError)
+ // Output
+ MDA_DEFINE_OUTPUT(XmlValidationError)
+ MDA_XSD_ONCE()
+ MDA_XSD_ELEMENT(ViolatingXml)
+ MDA_XSD_ELEMENT_END(ViolatingXml)
+ MDA_XSD_ELEMENT(ViolatedXsd)
+ MDA_XSD_ELEMENT_END(ViolatedXsd)
+ MDA_XSD_ONCE_END()
+ MDA_DEFINE_OUTPUT_END(XmlValidationError)
+ MDA_DEFINE_ASSISTANT_END(XmlValidationError)
+#endif
+
+ // InvalidConfigFile
+ MDA_DEFINE_ASSISTANT(InvalidConfigFile, NULL)
+ // Input
+ MDA_DEFINE_INPUT_AS_SWITCH(InvalidConfigFile)
+ // Output
+ MDA_DEFINE_OUTPUT(InvalidConfigFile)
+ MDA_XSD_ATTRIBUTE_REQ(ConfigFile, SString)
+ MDA_DEFINE_OUTPUT_END(InvalidConfigFile)
+ MDA_DEFINE_ASSISTANT_END(InvalidConfigFile)
+
+
+ //
+ // Framework Type and Element definitions
+ //
+ MDA_XSD_OUTPUT_ONLY()
+
+ // Module
+ MDA_XSD_DEFINE_TYPE(ModuleType)
+ MDA_XSD_ATTRIBUTE__OPT(Name, SString)
+ MDA_XSD_DEFINE_TYPE_END(ModuleType)
+
+ // Type
+ MDA_XSD_DEFINE_TYPE(TypeType)
+ MDA_XSD_ATTRIBUTE__REQ(Name, SString)
+ MDA_XSD_DEFINE_TYPE_END(TypeType)
+
+ // Parameter
+ MDA_XSD_DEFINE_TYPE(ParameterType)
+ MDA_XSD_ONCE()
+ MDA_XSD_ELEMENT_REFTYPE(DeclaringMethod, MethodType)
+ MDA_XSD_ONCE_END()
+
+ MDA_XSD_ATTRIBUTE_OPT(Index, INT32)
+ MDA_XSD_ATTRIBUTE__OPT(Name, SString)
+ MDA_XSD_DEFINE_TYPE_END(ParameterType)
+
+ // Method
+ MDA_XSD_DEFINE_TYPE(MethodType)
+ MDA_XSD_ATTRIBUTE__REQ(Name, SString)
+ MDA_XSD_DEFINE_TYPE_END(MethodType)
+
+ // Field
+ MDA_XSD_DEFINE_TYPE(FieldType)
+ MDA_XSD_ATTRIBUTE_REQ(Name, SString)
+ MDA_XSD_DEFINE_TYPE_END(FieldType)
+
+ // Thread
+ MDA_XSD_DEFINE_TYPE(ThreadType)
+ MDA_XSD_ATTRIBUTE_REQ(OsId, INT32)
+ MDA_XSD_ATTRIBUTE_OPT(ManagedId, INT32)
+ MDA_XSD_DEFINE_TYPE_END(ThreadType)
+
+ // Exception
+ MDA_XSD_DEFINE_TYPE(ExceptionType)
+ MDA_XSD_ONCE()
+ MDA_XSD_ELEMENT__REFTYPE(Type, TypeType)
+ MDA_XSD_ONCE_END()
+ MDA_XSD_ATTRIBUTE_REQ(Message, SString)
+ MDA_XSD_DEFINE_TYPE_END(ExceptionType)
+
+ // MethodAndOffset
+ MDA_XSD_DEFINE_TYPE(MethodAndOffsetType)
+ MDA_XSD_ATTRIBUTE__REQ(Name, SString)
+ MDA_XSD_ATTRIBUTE_OPT(Offset, SString)
+ MDA_XSD_DEFINE_TYPE_END(MethodAndOffsetType)
+
+ // PInvoke
+ MDA_XSD_GROUP(PInvokeGrpType)
+ MDA_XSD_ONCE()
+ MDA_XSD_ELEMENT__REFTYPE(Method, MethodType)
+ MDA_XSD_ELEMENT(DllImport)
+ MDA_XSD_ATTRIBUTE_REQ(EntryPoint, SString)
+ MDA_XSD_ATTRIBUTE_REQ(DllName, SString)
+ MDA_XSD_ELEMENT_END(DllImport)
+ MDA_XSD_ONCE_END()
+ MDA_XSD_GROUP_END(PInvokeGrpType)
+
+ MDA_XSD_OUTPUT_ONLY_END()
+
+
+
+ MDA_XSD_INPUT_ONLY()
+
+ // MemberFilter
+ MDA_XSD_DEFINE_TYPE(MemberFilterType)
+ MDA_XSD_PERIODIC()
+ MDA_XSD_ELEMENT(Match)
+ MDA_XSD_ATTRIBUTE_DEFAULT(Module, SString, NULL)
+ MDA_XSD_ATTRIBUTE__REQ(Name, SString)
+ MDA_XSD_ATTRIBUTE__OPT(JustMyCode, BOOL)
+ MDA_XSD_ELEMENT_END(Match)
+ MDA_XSD_PERIODIC_END()
+
+ MDA_XSD_ATTRIBUTE_DEFAULT(JustMyCode, BOOL, W("true"))
+ MDA_XSD_DEFINE_TYPE_END(MemberFilterType)
+
+
+ MDA_XSD_INPUT_ONLY_END()
+
+
+
+
+
diff --git a/src/vm/mdadac.cpp b/src/vm/mdadac.cpp
new file mode 100644
index 0000000000..d8b7c23fae
--- /dev/null
+++ b/src/vm/mdadac.cpp
@@ -0,0 +1,49 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "common.h"
+#include "mda.h"
+#include "mdaassistants.h"
+#include "sstring.h"
+#include "daccess.h"
+
+#ifdef MDA_SUPPORTED
+MdaStaticHeap g_mdaStaticHeap =
+{
+ { 0 }, // m_assistants[]
+ 0, // m_pMda
+ { 0 }, // m_mda[]
+
+#define MDA_ASSISTANT_STATIC_INIT
+#include "mdaschema.inl"
+#undef MDA_ASSISTANT_STATIC_INIT
+};
+
+
+//
+// MdaManagedDebuggingAssistants
+//
+void ManagedDebuggingAssistants::AllocateManagedDebuggingAssistants()
+{
+ WRAPPER_NO_CONTRACT;
+ g_mdaStaticHeap.m_pMda = new (&g_mdaStaticHeap.m_mda) ManagedDebuggingAssistants();
+}
+
+ManagedDebuggingAssistants::ManagedDebuggingAssistants()
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifndef DACCESS_COMPILE
+ Initialize();
+#endif
+}
+#endif // MDA_SUPPORTED
+
+
+
+
+
+
diff --git a/src/vm/mdagroups.inl b/src/vm/mdagroups.inl
new file mode 100644
index 0000000000..2904f38c61
--- /dev/null
+++ b/src/vm/mdagroups.inl
@@ -0,0 +1,74 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+//
+// Groups
+//
+
+
+// These are the MDAs that are on by-default when a debugger is attached.
+// These ABSOLUTELY MUST NOT CHANGE BEHAVIOR. They must be purely checks
+// with absolutely no sideeffects.
+// Violating this will cause an app to behave differently under a debugger
+// vs. not under a debugger, and that will really confuse end-users.
+// (eg, "My app only does XYZ when running under a debugger."
+// If you have any questions about this, please follow up with the
+// managed debugger team for further guidance.
+MDA_GROUP_DEFINITION(managedDebugger)
+ MDA_GROUP_MEMBER(AsynchronousThreadAbort)
+ MDA_GROUP_MEMBER(BindingFailure)
+ MDA_GROUP_MEMBER(CallbackOnCollectedDelegate)
+ MDA_GROUP_MEMBER(ContextSwitchDeadlock)
+ MDA_GROUP_MEMBER(DangerousThreadingAPI)
+ MDA_GROUP_MEMBER(DateTimeInvalidLocalFormat)
+ MDA_GROUP_MEMBER(DisconnectedContext)
+ MDA_GROUP_MEMBER(DllMainReturnsFalse)
+ MDA_GROUP_MEMBER(ExceptionSwallowedOnCallFromCom)
+ MDA_GROUP_MEMBER(FailedQI)
+ MDA_GROUP_MEMBER(FatalExecutionEngineError)
+ MDA_GROUP_MEMBER(InvalidApartmentStateChange)
+ MDA_GROUP_MEMBER(InvalidFunctionPointerInDelegate)
+ MDA_GROUP_MEMBER(InvalidMemberDeclaration)
+ MDA_GROUP_MEMBER(InvalidOverlappedToPinvoke)
+ MDA_GROUP_MEMBER(InvalidVariant)
+ MDA_GROUP_MEMBER(LoaderLock)
+ MDA_GROUP_MEMBER(LoadFromContext)
+ MDA_GROUP_MEMBER(MarshalCleanupError)
+ MDA_GROUP_MEMBER(NonComVisibleBaseClass)
+ MDA_GROUP_MEMBER(NotMarshalable)
+#ifdef _X86_
+ MDA_GROUP_MEMBER(PInvokeStackImbalance)
+#endif
+ MDA_GROUP_MEMBER(RaceOnRCWCleanup)
+ MDA_GROUP_MEMBER(Reentrancy)
+ MDA_GROUP_MEMBER(ReleaseHandleFailed)
+ MDA_GROUP_MEMBER(ReportAvOnComRelease)
+ MDA_GROUP_MEMBER(StreamWriterBufferedDataLost)
+MDA_GROUP_DEFINITION_END(managedDebugger)
+
+MDA_GROUP_DEFINITION(unmanagedDebugger)
+ MDA_GROUP_MEMBER(Reentrancy)
+ MDA_GROUP_MEMBER(LoaderLock)
+MDA_GROUP_DEFINITION_END(unmanagedDebugger)
+
+MDA_GROUP_DEFINITION(halting)
+ MDA_GROUP_MEMBER(CallbackOnCollectedDelegate)
+ MDA_GROUP_MEMBER(ContextSwitchDeadlock)
+ MDA_GROUP_MEMBER(DateTimeInvalidLocalFormat)
+ MDA_GROUP_MEMBER(DisconnectedContext)
+ MDA_GROUP_MEMBER(FatalExecutionEngineError)
+ MDA_GROUP_MEMBER(InvalidFunctionPointerInDelegate)
+ MDA_GROUP_MEMBER(InvalidMemberDeclaration)
+ MDA_GROUP_MEMBER(InvalidVariant)
+ MDA_GROUP_MEMBER(LoaderLock)
+ MDA_GROUP_MEMBER(NonComVisibleBaseClass)
+#ifdef _X86_
+ MDA_GROUP_MEMBER(PInvokeStackImbalance)
+#endif
+ MDA_GROUP_MEMBER(RaceOnRCWCleanup)
+ MDA_GROUP_MEMBER(Reentrancy)
+MDA_GROUP_DEFINITION_END(halting)
+
diff --git a/src/vm/mdamacroscrubber.inl b/src/vm/mdamacroscrubber.inl
new file mode 100644
index 0000000000..36b9ea7864
--- /dev/null
+++ b/src/vm/mdamacroscrubber.inl
@@ -0,0 +1,296 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+#ifndef MDA_MACRO_DEFAULTS
+#define MDA_MACRO_DEFAULTS
+
+#ifndef MDA_DEFINE_ASSISTANT
+#define MDA_DEFINE_ASSISTANT(ASSISTANT, SOS) // Did you forget to define a class Mda##Assistant?
+#endif
+#ifndef MDA_DEFINE_ASSISTANT_END
+#define MDA_DEFINE_ASSISTANT_END(ASSISTANT)
+#endif
+#ifndef MDA_GFLAG_ACTIVATION
+#define MDA_GFLAG_ACTIVATION(ASSISTANT, GFLAG)
+#endif
+
+#ifndef MDA_DEFINE_INPUT
+#define MDA_DEFINE_INPUT(ASSISTANT)
+#endif
+#ifndef MDA_DEFINE_INPUT_END
+#define MDA_DEFINE_INPUT_END(ASSISTANT)
+#endif
+#ifndef MDA_DEFINE_INPUT_AS_SWITCH
+#define MDA_DEFINE_INPUT_AS_SWITCH(ASSISTANT)
+#endif
+#ifndef MDA_DEFINE_OUTPUT
+#define MDA_DEFINE_OUTPUT(ASSISTANT)
+#endif
+#ifndef MDA_DEFINE_OUTPUT_END
+#define MDA_DEFINE_OUTPUT_END(ASSISTANT)
+#endif
+
+#ifndef MDA_XSD_INPUT_ONLY
+#define MDA_XSD_INPUT_ONLY()
+#endif
+#ifndef MDA_XSD_INPUT_ONLY_END
+#define MDA_XSD_INPUT_ONLY_END()
+#endif
+#ifndef MDA_XSD_OUTPUT_ONLY
+#define MDA_XSD_OUTPUT_ONLY()
+#endif
+#ifndef MDA_XSD_OUTPUT_ONLY_END
+#define MDA_XSD_OUTPUT_ONLY_END()
+#endif
+
+#ifndef MDA_XSD_SCHEMA_SCHEMA
+#define MDA_XSD_SCHEMA_SCHEMA()
+#endif
+#ifndef MDA_XSD_SCHEMA_SCHEMA_END
+#define MDA_XSD_SCHEMA_SCHEMA_END()
+#endif
+
+#ifndef MDA_XSD_ASSISTANT_SCHEMA
+#define MDA_XSD_ASSISTANT_SCHEMA()
+#endif
+#ifndef MDA_XSD_ASSISTANT_SCHEMA_END
+#define MDA_XSD_ASSISTANT_SCHEMA_END()
+#endif
+
+#ifndef MDA_XSD_ELEMENT
+#define MDA_XSD_ELEMENT(NAME)
+#endif
+#ifndef MDA_XSD__ELEMENT
+#define MDA_XSD__ELEMENT(NAME)
+#endif
+#ifndef MDA_XSD_ELEMENT_END
+#define MDA_XSD_ELEMENT_END(NAME)
+#endif
+#ifndef MDA_XSD_ELEMENT_ANY
+#define MDA_XSD_ELEMENT_ANY(NAME)
+#endif
+#ifndef MDA_XSD_ELEMENT__ANY
+#define MDA_XSD_ELEMENT__ANY(NAME)
+#endif
+#ifndef MDA_XSD_ELEMENT_ANY_END
+#define MDA_XSD_ELEMENT_ANY_END(NAME)
+#endif
+#ifndef MDA_XSD_DEFINE_ELEMENT
+#define MDA_XSD_DEFINE_ELEMENT(NAME)
+#endif
+#ifndef MDA_XSD_DEFINE_ELEMENT_END
+#define MDA_XSD_DEFINE_ELEMENT_END(NAME)
+#endif
+
+#ifndef MDA_XSD_DEFINE_TYPE
+#define MDA_XSD_DEFINE_TYPE(NAME)
+#endif
+#ifndef MDA_XSD_DEFINE_TYPE_END
+#define MDA_XSD_DEFINE_TYPE_END(NAME)
+#endif
+#ifndef MDA_XSD_ELEMENT_REF
+#define MDA_XSD_ELEMENT_REF(NAME)
+#endif
+#ifndef MDA_XSD_TYPEDEF_ELEMENT
+#define MDA_XSD_TYPEDEF_ELEMENT(NAME, TYPE)
+#endif
+#ifndef MDA_XSD_ELEMENT_REFTYPE
+#define MDA_XSD_ELEMENT_REFTYPE(NAME, TYPE)
+#endif
+#ifndef MDA_XSD_ELEMENT__REFTYPE
+#define MDA_XSD_ELEMENT__REFTYPE(NAME, TYPE)
+#endif
+#ifndef MDA_XSD_ELEMENT_EXTEND_TYPE
+#define MDA_XSD_ELEMENT_EXTEND_TYPE(NAME, TYPE)
+#endif
+#ifndef MDA_XSD_ELEMENT_EXTEND__TYPE
+#define MDA_XSD_ELEMENT_EXTEND__TYPE(NAME, TYPE)
+#endif
+#ifndef MDA_XSD_ELEMENT_EXTEND_TYPE_END
+#define MDA_XSD_ELEMENT_EXTEND_TYPE_END(NAME, TYPE)
+#endif
+
+#ifndef MDA_XSD_DEFINE_EXTEND_ELEMENT
+#define MDA_XSD_DEFINE_EXTEND_ELEMENT(NAME, TYPE)
+#endif
+#ifndef MDA_XSD_DEFINE_EXTEND_ELEMENT_END
+#define MDA_XSD_DEFINE_EXTEND_ELEMENT_END(NAME, TYPE)
+#endif
+
+#ifndef MDA_XSD_ATTRIBUTE_OPT
+#define MDA_XSD_ATTRIBUTE_OPT(NAME,TYPE)
+#endif
+#ifndef MDA_XSD_ATTRIBUTE__OPT
+#define MDA_XSD_ATTRIBUTE__OPT(NAME,TYPE)
+#endif
+#ifndef MDA_XSD_ATTRIBUTE_REQ
+#define MDA_XSD_ATTRIBUTE_REQ(NAME,TYPE)
+#endif
+#ifndef MDA_XSD_ATTRIBUTE__REQ
+#define MDA_XSD_ATTRIBUTE__REQ(NAME,TYPE)
+#endif
+#ifndef MDA_XSD_ATTRIBUTE_DEFAULT
+#define MDA_XSD_ATTRIBUTE_DEFAULT(NAME,TYPE,DEFAULT)
+#endif
+#ifndef MDA_XSD_ATTRIBUTE__DEFAULT
+#define MDA_XSD_ATTRIBUTE__DEFAULT(NAME,TYPE,DEFAULT)
+#endif
+#ifndef MDA_XSD_DEFINE_ATTRIBUTE
+#define MDA_XSD_DEFINE_ATTRIBUTE(NAME,TYPE)
+#endif
+
+
+#ifndef MDA_XSD_COMPLEX_TYPE
+#define MDA_XSD_COMPLEX_TYPE()
+#endif
+#ifndef MDA_XSD_COMPLEX_TYPE_END
+#define MDA_XSD_COMPLEX_TYPE_END()
+#endif
+
+#ifndef MDA_XSD_ONCE
+#define MDA_XSD_ONCE()
+#endif
+#ifndef MDA_XSD_ONCE_END
+#define MDA_XSD_ONCE_END()
+#endif
+#ifndef MDA_XSD_OPTIONAL
+#define MDA_XSD_OPTIONAL()
+#endif
+#ifndef MDA_XSD_OPTIONAL_END
+#define MDA_XSD_OPTIONAL_END()
+#endif
+#ifndef MDA_XSD_PERIODIC
+#define MDA_XSD_PERIODIC()
+#endif
+#ifndef MDA_XSD_PERIODIC_END
+#define MDA_XSD_PERIODIC_END()
+#endif
+
+#ifndef MDA_XSD_GROUP
+#define MDA_XSD_GROUP(NAME)
+#endif
+#ifndef MDA_XSD_GROUP_END
+#define MDA_XSD_GROUP_END(NAME)
+#endif
+#ifndef MDA_XSD_GROUP_REF
+#define MDA_XSD_GROUP_REF(NAME)
+#endif
+
+#ifndef MDA_XSD_DEFINE_EXTEND_TYPE
+#define MDA_XSD_DEFINE_EXTEND_TYPE(NAME, TYPE)
+#endif
+#ifndef MDA_XSD_DEFINE_EXTEND_TYPE_END
+#define MDA_XSD_DEFINE_EXTEND_TYPE_END(NAME, TYPE)
+#endif
+
+#ifndef MDA_XSD_EXTENSION
+#define MDA_XSD_EXTENSION()
+#endif
+#ifndef MDA_XSD_EXTENSION_END
+#define MDA_XSD_EXTENSION_END()
+#endif
+
+#ifndef MDA_XSD_CHOICE
+#define MDA_XSD_CHOICE()
+#endif
+#ifndef MDA_XSD_CHOICE_END
+#define MDA_XSD_CHOICE_END()
+#endif
+
+#ifndef MDA_GROUP_DEFINITION
+#define MDA_GROUP_DEFINITION(NAME)
+#endif
+#ifndef MDA_GROUP_DEFINITION_END
+#define MDA_GROUP_DEFINITION_END(NAME)
+#endif
+#ifndef MDA_GROUP_MEMBER
+#define MDA_GROUP_MEMBER(NAME)
+#endif
+
+
+
+#else
+#undef MDA_MACRO_DEFAULTS
+
+#undef MDA_DEFINE_SCHEMA
+#undef MDA_XSD_ASSISTANT_SCHEMA
+#undef MDA_XSD_ASSISTANT_SCHEMA_END
+#undef MDA_XSD_SCHEMA_SCHEMA
+#undef MDA_XSD_SCHEMA_SCHEMA_END
+
+#undef MDA_DEFINE_ASSISTANT
+#undef MDA_DEFINE_ASSISTANT_END
+#undef MDA_GFLAG_ACTIVATION
+
+#undef MDA_DEFINE_INPUT
+#undef MDA_DEFINE_INPUT_END
+#undef MDA_DEFINE_INPUT_AS_SWITCH
+#undef MDA_DEFINE_OUTPUT
+#undef MDA_DEFINE_OUTPUT_END
+
+#undef MDA_XSD_INPUT_ONLY
+#undef MDA_XSD_INPUT_ONLY_END
+#undef MDA_XSD_OUTPUT_ONLY
+#undef MDA_XSD_OUTPUT_ONLY_END
+
+#undef MDA_XSD_ELEMENT
+#undef MDA_XSD__ELEMENT
+#undef MDA_XSD_ELEMENT_ANY
+#undef MDA_XSD_ELEMENT__ANY
+#undef MDA_XSD_ELEMENT_ANY_END
+#undef MDA_XSD_ELEMENT_END
+#undef MDA_XSD_DEFINE_ELEMENT
+#undef MDA_XSD_DEFINE_ELEMENT_END
+
+#undef MDA_XSD_DEFINE_TYPE
+#undef MDA_XSD_DEFINE_TYPE_END
+#undef MDA_XSD_ELEMENT_REF
+#undef MDA_XSD_TYPEDEF_ELEMENT
+#undef MDA_XSD_ELEMENT_REFTYPE
+#undef MDA_XSD_ELEMENT__REFTYPE
+#undef MDA_XSD_ELEMENT_EXTEND_TYPE
+#undef MDA_XSD_ELEMENT_EXTEND__TYPE
+#undef MDA_XSD_ELEMENT_EXTEND_TYPE_END
+
+#undef MDA_XSD_DEFINE_EXTEND_ELEMENT
+#undef MDA_XSD_DEFINE_EXTEND_ELEMENT_END
+
+#undef MDA_XSD_ATTRIBUTE_OPT
+#undef MDA_XSD_ATTRIBUTE__OPT
+#undef MDA_XSD_ATTRIBUTE_REQ
+#undef MDA_XSD_ATTRIBUTE__REQ
+#undef MDA_XSD_ATTRIBUTE_DEFAULT
+#undef MDA_XSD_ATTRIBUTE__DEFAULT
+#undef MDA_XSD_DEFINE_ATTRIBUTE
+
+
+#undef MDA_XSD_COMPLEX_TYPE
+#undef MDA_XSD_COMPLEX_TYPE_END
+
+#undef MDA_XSD_ONCE
+#undef MDA_XSD_ONCE_END
+#undef MDA_XSD_OPTIONAL
+#undef MDA_XSD_OPTIONAL_END
+#undef MDA_XSD_PERIODIC
+#undef MDA_XSD_PERIODIC_END
+
+#undef MDA_XSD_GROUP
+#undef MDA_XSD_GROUP_END
+#undef MDA_XSD_GROUP_REF
+
+#undef MDA_XSD_DEFINE_EXTEND_TYPE
+#undef MDA_XSD_DEFINE_EXTEND_TYPE_END
+#undef MDA_XSD_EXTENSION
+#undef MDA_XSD_EXTENSION_END
+
+#undef MDA_XSD_CHOICE
+#undef MDA_XSD_CHOICE_END
+
+#undef MDA_GROUP_DEFINITION
+#undef MDA_GROUP_DEFINITION_END
+#undef MDA_GROUP_MEMBER
+
+
+#endif
diff --git a/src/vm/mdaschema.inl b/src/vm/mdaschema.inl
new file mode 100644
index 0000000000..fd0969b675
--- /dev/null
+++ b/src/vm/mdaschema.inl
@@ -0,0 +1,576 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+//
+// MDA_DEFINE_ASSISTANT_ENUMERATION
+//
+#ifdef MDA_DEFINE_ASSISTANT_ENUMERATION
+#define MDA_DEFINE_ASSISTANT(NAME, ABBR) MdaElemDef(NAME),
+#endif
+
+//
+// MDA_ASSISTANT_NAME
+//
+#ifdef MDA_ASSISTANT_NAME
+#define MDA_DEFINE_ASSISTANT(NAME, ABBR) L#NAME,
+#endif
+
+//
+// MDA_ASSISTANT_ABBR
+//
+#ifdef MDA_ASSISTANT_ABBR
+#define MDA_DEFINE_ASSISTANT(NAME, ABBR) ABBR,
+#endif
+
+//
+// MDA_ELEMENT_DEFINITION_STRING
+//
+#ifdef MDA_ASSISTANT_STRING
+#define MDA_DEFINE_ASSISTANT(NAME, ABBR) L#NAME
+#endif
+
+//
+// MDA_ASSISTANT_HEAP_RAW
+//
+#ifdef MDA_ASSISTANT_HEAP_RAW
+#define MDA_DEFINE_ASSISTANT(NAME, ABBR) Mda##NAME m_mda##NAME;
+#endif
+
+//
+// MDA_ASSISTANT_STATIC_INIT
+//
+#ifdef MDA_ASSISTANT_STATIC_INIT
+#define MDA_DEFINE_ASSISTANT(NAME, ABBR) \
+ { \
+ MdaElemDef(NAME), /* m_assistantDeclDef */ \
+ MdaElemDef(NAME##Msg), /* m_assistantMsgDeclDef */ \
+ 0 /* m_bSuppressDialog */ \
+ },
+#endif
+
+#ifdef MDA_VALIDATE_MEMBER_LAYOUT
+// See MDA_ASSISTANT_BASE_MEMBERS for details on why we're asserting that these fields have matching offsets.
+#define MDA_DEFINE_ASSISTANT(NAME, ABBR) \
+ static_assert_no_msg(offsetof(MdaAssistant, m_assistantDeclDef) == offsetof(Mda##NAME, m_assistantDeclDef)); \
+ static_assert_no_msg(offsetof(MdaAssistant, m_assistantMsgDeclDef) == offsetof(Mda##NAME, m_assistantMsgDeclDef)); \
+ static_assert_no_msg(offsetof(MdaAssistant, m_bSuppressDialog) == offsetof(Mda##NAME, m_bSuppressDialog));
+#endif
+
+//
+// MDA_ASSISTANT_IS_SWITCH
+//
+#ifdef MDA_ASSISTANT_IS_SWITCH
+#define MDA_DEFINE_INPUT_AS_SWITCH(ASSISTANT) true,
+#define MDA_DEFINE_INPUT(ASSISTANT) false,
+#endif
+
+//
+// MDA_DEFINE_GROUPS
+//
+#ifdef MDA_DEFINE_GROUPS
+#define MDA_GROUP_DEFINITION(NAME) \
+ pGroup = arrayFactory.Create(); \
+ aGroups.Append(pGroup);
+#define MDA_GROUP_MEMBER(NAME) pGroup->Append(MdaElemDef(NAME));
+#endif
+
+//
+// MDA_ACTIVATE_GROUPS
+//
+#ifdef MDA_ACTIVATE_GROUPS
+#define MDA_GROUP_DEFINITION(NAME) \
+ if (sszActivationMechanism.EqualsCaseInsensitive(L#NAME)) \
+ ActivateGroup(L#NAME, aGroups[cGroup], &mdaXmlPairs); \
+ cGroup++;
+#endif
+
+//
+// MDA_ACTIVATE_SINGLTON_GROUPS
+//
+#ifdef MDA_ACTIVATE_SINGLTON_GROUPS
+#define MDA_DEFINE_ASSISTANT(NAME, ABBR) \
+ if (sszActivationMechanism.EqualsCaseInsensitive(L#NAME)) \
+ mdaXmlPairs.Set(ToLowerFirstChar(L#NAME, &sstringFactory), \
+ GetSwitchActivationXml(MdaElemDef(NAME)));
+#endif
+
+
+//
+// MDA_ELEMENT_DEFINITION_ENUMERATION
+//
+#ifdef MDA_ELEMENT_DEFINITION_ENUMERATION
+#define MDA_XSD_DEFINE_ELEMENT(NAME) MdaElemDef(NAME),
+#define MDA_XSD_TYPEDEF_ELEMENT(NAME, TYPE) MDA_XSD_DEFINE_ELEMENT(NAME)
+#define MDA_XSD_DEFINE_EXTEND_ELEMENT(NAME, TYPE) MDA_XSD_DEFINE_ELEMENT(NAME)
+#define MDA_XSD_DEFINE_EXTEND_TYPE(NAME, TYPE) MDA_XSD_DEFINE_ELEMENT(NAME)
+#define MDA_XSD_GROUP(NAME) MDA_XSD_DEFINE_ELEMENT(NAME)
+#define MDA_XSD_DEFINE_TYPE(NAME) MDA_XSD_DEFINE_ELEMENT(NAME)
+#define MDA_DEFINE_ASSISTANT(NAME, ABBR) MDA_XSD_DEFINE_ELEMENT(NAME##Msg)
+MDA_XSD_DEFINE_ELEMENT(AssistantConfigGroup)
+MDA_XSD_DEFINE_ELEMENT(AssistantMsgGroup)
+#endif
+
+//
+// MDA_ELEMENT_DECLARAION_ENUMERATION
+//
+#ifdef MDA_ELEMENT_DECLARAION_ENUMERATION
+#define MDA_XSD_ELEMENT(NAME) MdaElemDecl(NAME),
+#define MDA_XSD_ELEMENT_REFTYPE(NAME, TYPE) MDA_XSD_ELEMENT(NAME)
+#define MDA_XSD_ELEMENT_EXTEND_TYPE(NAME, TYPE) MDA_XSD_ELEMENT(NAME)
+#define MDA_XSD_ELEMENT_ANY(NAME) MDA_XSD_ELEMENT(NAME)
+#endif
+
+//
+// MDA_ATTRIBUTE_DECLARATION_ENUMERATION
+//
+#ifdef MDA_ATTRIBUTE_DECLARATION_ENUMERATION
+#define MDA_XSD_ATTRIBUTE_OPT(NAME, TYPE) MdaAttrDecl(NAME),
+#define MDA_XSD_ATTRIBUTE_REQ(NAME, TYPE) MDA_XSD_ATTRIBUTE_OPT(NAME, TYPE)
+#define MDA_XSD_ATTRIBUTE_DEFAULT(NAME, TYPE, DEFAULT) MDA_XSD_ATTRIBUTE_OPT(NAME, TYPE)
+#endif
+
+//
+// MDA_MAP_ASSISTANT_DEFINITION_TO_NAME
+//
+#ifdef MDA_MAP_ASSISTANT_DEFINITION_TO_NAME
+#define MDA_DEFINE_ASSISTANT(NAME, ABBR) g_arElementNames[i++] = ToLowerFirstChar(L#NAME);
+#endif
+
+//
+// MDA_MAP_ELEMENT_DEFINITION_TO_NAME
+//
+#ifdef MDA_MAP_ELEMENT_DEFINITION_TO_NAME
+#define MDA_XSD_DEFINE_ELEMENT(NAME) g_arElementNames[i++] = ToLowerFirstChar(L#NAME);
+#define MDA_XSD_TYPEDEF_ELEMENT(NAME, TYPE) MDA_XSD_DEFINE_ELEMENT(NAME)
+#define MDA_XSD_DEFINE_EXTEND_ELEMENT(NAME, TYPE) MDA_XSD_DEFINE_ELEMENT(NAME)
+#define MDA_XSD_DEFINE_EXTEND_TYPE(NAME, TYPE) MDA_XSD_DEFINE_ELEMENT(NAME)
+#define MDA_XSD_GROUP(NAME) MDA_XSD_DEFINE_ELEMENT(NAME)
+#define MDA_XSD_DEFINE_TYPE(NAME) MDA_XSD_DEFINE_ELEMENT(NAME)
+#define MDA_DEFINE_ASSISTANT(NAME, ABBR) MDA_XSD_DEFINE_ELEMENT(NAME##Msg)
+MDA_XSD_DEFINE_ELEMENT(AssistantConfigGroup)
+MDA_XSD_DEFINE_ELEMENT(AssistantMsgGroup)
+#endif
+
+//
+// MDA_MAP_ELEMENT_DECLARATION_TO_NAME
+//
+#ifdef MDA_MAP_ELEMENT_DECLARATION_TO_NAME
+#define MDA_XSD_ELEMENT(NAME) g_arElementNames[i++] = ToLowerFirstChar(L#NAME);
+#define MDA_XSD_ELEMENT_REFTYPE(NAME, TYPE) MDA_XSD_ELEMENT(NAME)
+#define MDA_XSD_ELEMENT_EXTEND_TYPE(NAME, TYPE) MDA_XSD_ELEMENT(NAME)
+#define MDA_XSD_ELEMENT_ANY(NAME) MDA_XSD_ELEMENT(NAME)
+#endif
+
+//
+// MDA_MAP_ATTRIBUTE_DECLARATION_TO_NAME
+//
+#ifdef MDA_MAP_ATTRIBUTE_DECLARATION_TO_NAME
+#define MDA_XSD_ATTRIBUTE_OPT(NAME, TYPE) g_arAttributeNames[i++] = ToLowerFirstChar(L#NAME);
+#define MDA_XSD_ATTRIBUTE_REQ(NAME, TYPE) MDA_XSD_ATTRIBUTE_OPT(NAME, TYPE)
+#define MDA_XSD_ATTRIBUTE_DEFAULT(NAME, TYPE, DEFAULT) MDA_XSD_ATTRIBUTE_OPT(NAME, TYPE)
+#endif
+
+//
+// MDA_MAP_ELEMENT_NAME_TO_DEFINITION
+//
+#ifdef MDA_MAP_ASSISTANT_NAME_TO_DEFINITION
+#define MDA_DEFINE_ASSISTANT(NAME, ABBR) g_pHtElementType->Set(g_arElementNames[MdaElemDef(NAME)], MdaElemDef(NAME));
+#endif
+
+//
+// MDA_MAP_ELEMENT_NAME_TO_DEFINITION
+//
+#ifdef MDA_MAP_ELEMENT_NAME_TO_DEFINITION
+#define MDA_XSD_DEFINE_ELEMENT(NAME) g_pHtElementType->Set(g_arElementNames[MdaElemDef(NAME)], MdaElemDef(NAME));
+#define MDA_XSD_TYPEDEF_ELEMENT(NAME, TYPE) MDA_XSD_DEFINE_ELEMENT(NAME)
+#define MDA_XSD_DEFINE_EXTEND_ELEMENT(NAME, TYPE) MDA_XSD_DEFINE_ELEMENT(NAME)
+#define MDA_XSD_DEFINE_EXTEND_TYPE(NAME, TYPE) MDA_XSD_DEFINE_ELEMENT(NAME)
+#define MDA_XSD_GROUP(NAME) MDA_XSD_DEFINE_ELEMENT(NAME)
+#define MDA_XSD_DEFINE_TYPE(NAME) MDA_XSD_DEFINE_ELEMENT(NAME)
+#define MDA_DEFINE_ASSISTANT(NAME, ABBR) MDA_XSD_DEFINE_ELEMENT(NAME##Msg)
+MDA_XSD_DEFINE_ELEMENT(AssistantConfigGroup)
+MDA_XSD_DEFINE_ELEMENT(AssistantMsgGroup)
+#endif
+
+//
+// MDA_MAP_ELEMENT_NAME_TO_DECLARATION
+//
+#ifdef MDA_MAP_ELEMENT_NAME_TO_DECLARATION
+#define MDA_XSD_ELEMENT(NAME) g_pHtElementType->Set(g_arElementNames[MdaElemDecl(NAME)], MdaElemDecl(NAME));
+#define MDA_XSD_ELEMENT_REFTYPE(NAME, TYPE) MDA_XSD_ELEMENT(NAME)
+#define MDA_XSD_ELEMENT_EXTEND_TYPE(NAME, TYPE) MDA_XSD_ELEMENT(NAME)
+#define MDA_XSD_ELEMENT_ANY(NAME) MDA_XSD_ELEMENT(NAME)
+#endif
+
+//
+// MDA_MAP_ATTRIBUTE_NAME_TO_DECLARATION
+//
+#ifdef MDA_MAP_ATTRIBUTE_NAME_TO_DECLARATION
+#define MDA_XSD_ATTRIBUTE_OPT(NAME, TYPE) g_pHtAttributeType->Set(g_arAttributeNames[MdaAttrDecl(NAME)], MdaAttrDecl(NAME));
+#define MDA_XSD_ATTRIBUTE_REQ(NAME, TYPE) MDA_XSD_ATTRIBUTE_OPT(NAME, TYPE)
+#define MDA_XSD_ATTRIBUTE_DEFAULT(NAME, TYPE, DEFAULT) MDA_XSD_ATTRIBUTE_OPT(NAME, TYPE)
+#endif
+
+//
+// MDA_ASSISTANT_CREATION
+//
+#ifdef MDA_ASSISTANT_CREATION
+#define MDA_DEFINE_ASSISTANT(ASSISTANT, ABBR) \
+ if (mdaXmlPairs.Get(MdaSchema::g_arElementNames[MdaElemDef(ASSISTANT)], &pXmlAssistant)) \
+ { \
+ if (MdaAssistant::IsAssistantActive(pXmlAssistant)) \
+ { \
+ Mda##ASSISTANT* pAssistant = &g_mdaStaticHeap.m_mda##ASSISTANT; \
+ pAssistant->AsMdaAssistant()->Initialize(pXmlAssistant); \
+ pAssistant->Initialize(pXmlAssistant); \
+ g_mdaStaticHeap.m_assistants[MdaElemDef(ASSISTANT)] = pAssistant->AsMdaAssistant(); \
+ } \
+ }
+#endif
+
+
+//
+// MDA_DEFINE_SCHEMA_SCHEMA
+//
+#ifdef MDA_DEFINE_SCHEMA_SCHEMA
+#define MDA_DEFINE_SCHEMA
+#define MDA_XSD_ASSISTANT_SCHEMA() if (FALSE) {
+#define MDA_XSD_ASSISTANT_SCHEMA_END() }
+#define MDA_XSD_SCHEMA_SCHEMA() DefineSchema();
+#define MDA_XSD_SCHEMA_SCHEMA_END() DefineSchemaEnd();
+#endif
+
+//
+// MDA_DEFINE_ASSISTANT_SCHEMA
+//
+#ifdef MDA_DEFINE_ASSISTANT_SCHEMA
+#define MDA_DEFINE_SCHEMA
+#define MDA_XSD_SCHEMA_SCHEMA() if (FALSE) {
+#define MDA_XSD_SCHEMA_SCHEMA_END() }
+#define MDA_XSD_OUTPUT_ONLY() if (FALSE) {
+#define MDA_XSD_OUTPUT_ONLY_END() }
+#define MDA_DEFINE_INPUT(ASSISTANT) DefineAssistantInput(MdaElemDef(ASSISTANT));
+#define MDA_DEFINE_INPUT_END(ASSISTANT) DefineAssistantInputEnd(MdaElemDef(ASSISTANT));
+#define MDA_DEFINE_OUTPUT(ASSISTANT) if (FALSE) {
+#define MDA_DEFINE_OUTPUT_END(ASSISTANT) }
+#define MDA_XSD_ASSISTANT_SCHEMA() DefineSchema();
+#endif
+
+//
+// MDA_DEFINE_MDA_ASSISTANT_CONFIG_GROUP
+//
+#ifdef MDA_DEFINE_MDA_ASSISTANT_CONFIG_GROUP
+#define MDA_XSD_ASSISTANT_SCHEMA() AddGroup(MdaElemDef(AssistantConfigGroup)); AddSequence(0, 1);
+#define MDA_XSD_ASSISTANT_SCHEMA_END() AddSequenceEnd(); AddGroupEnd(MdaElemDef(AssistantConfigGroup));
+#define MDA_DEFINE_INPUT(ASSISTANT) AddSequence(0, 1); RefElement(MdaElemDef(ASSISTANT)); AddSequenceEnd();
+#endif
+
+//
+// MDA_DEFINE_ASSISTANT_MSG_SCHEMA
+//
+#ifdef MDA_DEFINE_ASSISTANT_MSG_SCHEMA
+#define MDA_DEFINE_SCHEMA
+#define MDA_XSD_SCHEMA_SCHEMA() if (FALSE) {
+#define MDA_XSD_SCHEMA_SCHEMA_END() }
+#define MDA_XSD_INPUT_ONLY() if (FALSE) {
+#define MDA_XSD_INPUT_ONLY_END() }
+#define MDA_DEFINE_INPUT(ASSISTANT) if (FALSE) {
+#define MDA_DEFINE_INPUT_END(ASSISTANT) }
+#define MDA_DEFINE_OUTPUT(ASSISTANT) DefineAssistantOutput(MdaElemDef(ASSISTANT), MdaElemDef(ASSISTANT##Msg));
+#define MDA_DEFINE_OUTPUT_END(ASSISTANT) DefineAssistantOutputEnd(MdaElemDef(ASSISTANT), MdaElemDef(ASSISTANT##Msg));
+#define MDA_XSD_ASSISTANT_SCHEMA() DefineSchema();
+#endif
+
+//
+// MDA_DEFINE_MDA_ASSISTANT_MSG_GROUP
+//
+#ifdef MDA_DEFINE_MDA_ASSISTANT_MSG_GROUP
+#define MDA_XSD_ASSISTANT_SCHEMA() AddGroup(MdaElemDef(AssistantMsgGroup)); AddSequence(0, 1);
+#define MDA_XSD_ASSISTANT_SCHEMA_END() AddSequenceEnd(); AddGroupEnd(MdaElemDef(AssistantMsgGroup)); DefineSchemaEnd();
+#define MDA_DEFINE_OUTPUT(ASSISTANT) RefElement(MdaElemDef(ASSISTANT##Msg));
+#endif
+
+//
+// MDA_DEFINE_SCHEMA
+//
+#ifdef MDA_DEFINE_SCHEMA
+
+// Assistants
+#define MDA_DEFINE_ASSISTANT(NAME, ABBR) DefineAssistant(MdaElemDef(NAME));
+#define MDA_DEFINE_ASSISTANT_END(NAME) DefineAssistantEnd(MdaElemDef(NAME));
+
+// Attributes
+#define MDA_XSD_ATTRIBUTE_OPT(NAME,TYPE) AddAttribute(MdaAttrDecl(NAME), MdaSchemaPrimitive##TYPE, FALSE, NULL);
+#define MDA_XSD_ATTRIBUTE__OPT(NAME,TYPE) AddAttribute(MdaAttrDecl(NAME), MdaSchemaPrimitive##TYPE, FALSE, NULL);
+#define MDA_XSD_ATTRIBUTE_REQ(NAME,TYPE) AddAttribute(MdaAttrDecl(NAME), MdaSchemaPrimitive##TYPE, TRUE, NULL);
+#define MDA_XSD_ATTRIBUTE__REQ(NAME,TYPE) AddAttribute(MdaAttrDecl(NAME), MdaSchemaPrimitive##TYPE, TRUE, NULL);
+#define MDA_XSD_ATTRIBUTE_DEFAULT(NAME,TYPE,DEFAULT) AddAttribute(MdaAttrDecl(NAME), MdaSchemaPrimitive##TYPE, FALSE, DEFAULT);
+#define MDA_XSD_ATTRIBUTE__DEFAULT(NAME,TYPE,DEFAULT) AddAttribute(MdaAttrDecl(NAME), MdaSchemaPrimitive##TYPE, FALSE, DEFAULT);
+
+// Definitions
+#define MDA_XSD_DEFINE_ELEMENT(NAME) AddElement(MdaElemDef(NAME));
+#define MDA_XSD_DEFINE_ELEMENT_END(NAME) AddElementEnd(MdaElemDef(NAME));
+#define MDA_XSD_DEFINE_TYPE(NAME) AddComplexType(MdaElemDef(NAME));
+#define MDA_XSD_DEFINE_TYPE_END(NAME) AddComplexTypeEnd(MdaElemDef(NAME));
+#define MDA_XSD_DEFINE_EXTEND_TYPE(NAME, TYPE) AddExtendType(MdaElemDef(NAME), MdaElemDef(TYPE));
+#define MDA_XSD_DEFINE_EXTEND_TYPE_END(NAME, TYPE) AddExtendTypeEnd(MdaElemDef(NAME), MdaElemDef(TYPE));
+#define MDA_XSD_DEFINE_EXTEND_ELEMENT(NAME, TYPE) AddExtendElement(MdaElemDef(NAME), MdaElemDef(TYPE));
+#define MDA_XSD_DEFINE_EXTEND_ELEMENT_END(NAME, TYPE) AddExtendElementEnd(MdaElemDef(NAME), MdaElemDef(TYPE));
+#define MDA_XSD_TYPEDEF_ELEMENT(NAME, TYPE) AddElementRefType(MdaElemDef(NAME), MdaElemDef(TYPE));
+
+// Declarations
+#define MDA_XSD_ELEMENT(NAME) AddElement(MdaElemDecl(NAME));
+#define MDA_XSD__ELEMENT(NAME) AddElement(MdaElemDecl(NAME));
+#define MDA_XSD_ELEMENT_END(NAME) AddElementEnd(MdaElemDecl(NAME));
+#define MDA_XSD_ELEMENT_ANY(NAME) AddElementAny(MdaElemDecl(NAME));
+#define MDA_XSD_ELEMENT__ANY(NAME) AddElementAny(MdaElemDecl(NAME));
+#define MDA_XSD_ELEMENT_REF(NAME) RefElement(MdaElemDef(NAME));
+#define MDA_XSD_ELEMENT_REFTYPE(NAME, TYPE) AddElementRefType(MdaElemDecl(NAME), MdaElemDef(TYPE));
+#define MDA_XSD_ELEMENT__REFTYPE(NAME, TYPE) AddElementRefType(MdaElemDecl(NAME), MdaElemDef(TYPE));
+#define MDA_XSD_ELEMENT_EXTEND_TYPE(NAME, TYPE) AddExtendElement(MdaElemDecl(NAME), MdaElemDef(TYPE));
+#define MDA_XSD_ELEMENT_EXTEND__TYPE(NAME, TYPE) AddExtendElement(MdaElemDecl(NAME), MdaElemDef(TYPE));
+#define MDA_XSD_ELEMENT_EXTEND_TYPE_END(NAME, TYPE) AddExtendElementEnd(MdaElemDecl(NAME), MdaElemDef(TYPE));
+
+// Patterns
+#define MDA_XSD_CHOICE() AddChoice();
+#define MDA_XSD_CHOICE_END() AddChoiceEnd();
+#define MDA_XSD_GROUP(NAME) AddGroup(MdaElemDef(NAME));
+#define MDA_XSD_GROUP_END(NAME) AddGroupEnd(MdaElemDef(NAME));
+#define MDA_XSD_GROUP_REF(NAME) RefGroup(MdaElemDef(NAME));
+#define MDA_XSD_ONCE() AddSequence(1, 1);
+#define MDA_XSD_ONCE_END() AddSequenceEnd();
+#define MDA_XSD_OPTIONAL() AddSequence(0, 1);
+#define MDA_XSD_OPTIONAL_END() AddSequenceEnd();
+#define MDA_XSD_PERIODIC() AddSequence(0, -1);
+#define MDA_XSD_PERIODIC_END() AddSequenceEnd();
+#endif
+
+#ifndef MDA_DEFINE_INPUT_AS_SWITCH
+#ifdef MDA_DEFINE_INPUT
+#define MDA_DEFINE_INPUT_AS_SWITCH(ASSISTANT) MDA_DEFINE_INPUT(ASSISTANT) MDA_DEFINE_INPUT_END(ASSISTANT)
+#endif
+#endif
+
+#include "mdamacroscrubber.inl"
+
+#include "mdagroups.inl"
+
+//
+// Standard Element Definitions
+//
+MDA_XSD_ASSISTANT_SCHEMA()
+
+
+#include "mdaassistantschemas.inl"
+
+ //
+ // MDA Output Framework Defintions
+ //
+ MDA_XSD_OUTPUT_ONLY()
+
+ // MdaAssistantMsgGroup
+ // MDA_XSD_GROUP(AssistantMsgGroup)
+ // MDA_XSD_GROUP_END(AssistantMsgGroup)
+
+ // Output Root
+ MDA_XSD_DEFINE_TYPE(Msg)
+ MDA_XSD_GROUP_REF(AssistantMsgGroup)
+ MDA_XSD_DEFINE_TYPE_END(Msg)
+
+ // Output Root
+ MDA_XSD_DEFINE_TYPE(AssistantMsgType)
+ //MDA_XSD_ATTRIBUTE_REQ(Documentation, SString)
+ MDA_XSD_DEFINE_TYPE_END(AssistantMsgType)
+
+ MDA_XSD_OUTPUT_ONLY_END()
+
+
+
+ //
+ // MDA Input Framework Defintions
+ //
+ MDA_XSD_INPUT_ONLY()
+
+ // MdaAssistantConfigGroup
+ // MDA_XSD_GROUP(AssistantConfigGroup)
+ // MDA_XSD_GROUP_END(AssistantConfigGroup)
+
+ // MdaConfigType
+ MDA_XSD_DEFINE_TYPE(MdaConfigType)
+ MDA_XSD_ONCE()
+ MDA_XSD_OPTIONAL()
+ MDA_XSD_ELEMENT(Assistants)
+ MDA_XSD_GROUP_REF(AssistantConfigGroup)
+ MDA_XSD_ELEMENT_END(Assistants)
+ MDA_XSD_OPTIONAL_END()
+ MDA_XSD_ONCE_END()
+ MDA_XSD_DEFINE_TYPE_END(MdaConfigType)
+
+ // AppConfig
+ MDA_XSD_DEFINE_EXTEND_ELEMENT(MdaAppConfig, MdaConfigType)
+ MDA_XSD_DEFINE_EXTEND_ELEMENT_END(MdaAppConfig, MdaConfigType)
+
+ // MdaConfig
+ MDA_XSD_DEFINE_EXTEND_ELEMENT(MdaConfig, MdaConfigType)
+ MDA_XSD_DEFINE_EXTEND_ELEMENT_END(MdaConfig, MdaConfigType)
+
+ // MdaGroupConfig
+ MDA_XSD_DEFINE_ELEMENT(MdaGroupConfig)
+ MDA_XSD_PERIODIC()
+ MDA_XSD_ELEMENT(Group)
+ MDA_XSD_ONCE()
+ MDA_XSD_PERIODIC()
+ MDA_XSD_ELEMENT(GroupReference)
+ MDA_XSD_ATTRIBUTE__REQ(Name, SString)
+ MDA_XSD_ELEMENT_END(GroupReference)
+ MDA_XSD_PERIODIC_END()
+ MDA_XSD_OPTIONAL()
+ MDA_XSD_GROUP_REF(AssistantConfigGroup)
+ MDA_XSD_OPTIONAL_END()
+ MDA_XSD_ONCE_END()
+ MDA_XSD_ATTRIBUTE__REQ(Name, SString)
+ MDA_XSD_ELEMENT_END(Group)
+ MDA_XSD_PERIODIC_END()
+ MDA_XSD_DEFINE_ELEMENT_END(MdaGroupConfig)
+
+ // Mda Assistant
+ MDA_XSD_DEFINE_TYPE(Assistant)
+ MDA_XSD_ATTRIBUTE_DEFAULT(Enable, BOOL, W("true"))
+ MDA_XSD_DEFINE_TYPE_END(Assistant)
+
+ // Dummy
+ MDA_XSD_DEFINE_ELEMENT(Dummy)
+ MDA_XSD_ATTRIBUTE_OPT(SuppressDialog, BOOL)
+ MDA_XSD_DEFINE_ELEMENT_END(Dummy)
+
+ MDA_XSD_INPUT_ONLY_END()
+
+
+MDA_XSD_ASSISTANT_SCHEMA_END()
+
+
+//
+// Schema Infrastructure
+//
+MDA_XSD_SCHEMA_SCHEMA()
+
+ // Schema Schema Definition
+ MDA_XSD_DEFINE_ELEMENT(Schema)
+ MDA_XSD_PERIODIC()
+ MDA_XSD_CHOICE()
+ MDA_XSD_ELEMENT_REF(ComplexType)
+ MDA_XSD_ELEMENT_REF(Group)
+ MDA_XSD_ELEMENT_REF(Element)
+ MDA_XSD_CHOICE_END()
+ MDA_XSD_PERIODIC_END()
+ MDA_XSD_ATTRIBUTE_OPT(TargetNamespace, SString)
+ MDA_XSD_ATTRIBUTE_OPT(Xmlns, SString)
+ MDA_XSD_DEFINE_ELEMENT_END(Schema)
+
+ // Element
+ MDA_XSD_DEFINE_ELEMENT(Element)
+ MDA_XSD_OPTIONAL()
+ MDA_XSD_ELEMENT_REF(ComplexType)
+ MDA_XSD_OPTIONAL_END()
+
+ MDA_XSD_ATTRIBUTE__OPT(Name, SString)
+ MDA_XSD_ATTRIBUTE__OPT(Ref, SString)
+ MDA_XSD_ATTRIBUTE__OPT(Type, SString)
+ MDA_XSD_DEFINE_ELEMENT_END(Element)
+
+ // ComplexType
+ MDA_XSD_DEFINE_ELEMENT(ComplexType)
+ MDA_XSD_OPTIONAL()
+ MDA_XSD_CHOICE()
+ MDA_XSD_GROUP_REF(ElementContent)
+ MDA_XSD_ELEMENT_REF(ComplexContent)
+ MDA_XSD_CHOICE_END()
+ MDA_XSD_OPTIONAL_END()
+
+ MDA_XSD_ATTRIBUTE__OPT(Name, SString)
+ MDA_XSD_DEFINE_ELEMENT_END(ComplexType)
+
+ // ComplexContent
+ MDA_XSD_DEFINE_ELEMENT(ComplexContent)
+ MDA_XSD_ONCE()
+ MDA_XSD_ELEMENT_REF(Extension)
+ MDA_XSD_ONCE_END()
+ MDA_XSD_DEFINE_ELEMENT_END(ComplexContent)
+
+ // Extension
+ MDA_XSD_DEFINE_ELEMENT(Extension)
+ MDA_XSD_GROUP_REF(ElementContent)
+
+ MDA_XSD_ATTRIBUTE_REQ(Base, SString)
+ MDA_XSD_DEFINE_ELEMENT_END(Extension)
+
+ // ElementContent
+ MDA_XSD_GROUP(ElementContent)
+ MDA_XSD_OPTIONAL()
+ MDA_XSD_GROUP_REF(PatternRoot)
+
+ MDA_XSD_PERIODIC()
+ MDA_XSD_ELEMENT_REF(Attribute)
+ MDA_XSD_PERIODIC_END()
+ MDA_XSD_OPTIONAL_END()
+ MDA_XSD_GROUP_END(ElementContent)
+
+ // PatternRoot
+ MDA_XSD_GROUP(PatternRoot)
+ MDA_XSD_OPTIONAL()
+ MDA_XSD_CHOICE()
+ MDA_XSD_ELEMENT_REF(Choice)
+ MDA_XSD_ELEMENT_REF(Sequence)
+ MDA_XSD_ELEMENT_REF(Group)
+ MDA_XSD_CHOICE_END()
+ MDA_XSD_OPTIONAL_END()
+ MDA_XSD_GROUP_END(PatternRoot)
+
+ // PeriodicPattern
+ MDA_XSD_GROUP(PeriodicPattern)
+ MDA_XSD_PERIODIC()
+ MDA_XSD_CHOICE()
+ MDA_XSD_ELEMENT_REF(Element)
+ MDA_XSD_ELEMENT_REF(Choice)
+ MDA_XSD_ELEMENT_REF(Sequence)
+ MDA_XSD_ELEMENT_REF(Group)
+ MDA_XSD_CHOICE_END()
+ MDA_XSD_PERIODIC_END()
+ MDA_XSD_GROUP_END(PeriodicPattern)
+
+ // Sequence
+ MDA_XSD_DEFINE_ELEMENT(Sequence)
+ MDA_XSD_GROUP_REF(PeriodicPattern)
+
+ MDA_XSD_ATTRIBUTE_OPT(MinOccurs, SString)
+ MDA_XSD_ATTRIBUTE_OPT(MaxOccurs, SString)
+ MDA_XSD_DEFINE_ELEMENT_END(Sequence)
+
+ // Choice
+ MDA_XSD_DEFINE_ELEMENT(Choice)
+ MDA_XSD_GROUP_REF(PeriodicPattern)
+ MDA_XSD_DEFINE_ELEMENT_END(Choice)
+
+ // Group
+ MDA_XSD_DEFINE_ELEMENT(Group)
+ MDA_XSD_GROUP_REF(PatternRoot)
+
+ MDA_XSD_ATTRIBUTE__OPT(Name, SString)
+ MDA_XSD_ATTRIBUTE_OPT(Ref, SString)
+ MDA_XSD_DEFINE_ELEMENT_END(Group)
+
+ // Attribute
+ MDA_XSD_DEFINE_ELEMENT(Attribute)
+ MDA_XSD_ATTRIBUTE__REQ(Name, SString)
+ MDA_XSD_ATTRIBUTE_REQ(Type, SString)
+ MDA_XSD_ATTRIBUTE_OPT(Use, SString)
+ MDA_XSD_ATTRIBUTE_OPT(Default, SString)
+ MDA_XSD_DEFINE_ELEMENT_END(Attribute)
+
+MDA_XSD_SCHEMA_SCHEMA_END()
+
+#include "mdamacroscrubber.inl"
+
diff --git a/src/vm/memberload.cpp b/src/vm/memberload.cpp
new file mode 100644
index 0000000000..016acabd67
--- /dev/null
+++ b/src/vm/memberload.cpp
@@ -0,0 +1,1567 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: memberload.cpp
+//
+
+
+//
+
+//
+// ============================================================================
+
+#include "common.h"
+#include "clsload.hpp"
+#include "method.hpp"
+#include "class.h"
+#include "object.h"
+#include "field.h"
+#include "util.hpp"
+#include "excep.h"
+#include "siginfo.hpp"
+#include "threads.h"
+#include "stublink.h"
+#include "ecall.h"
+#include "dllimport.h"
+#include "verifier.hpp"
+#include "jitinterface.h"
+#include "eeconfig.h"
+#include "log.h"
+#include "fieldmarshaler.h"
+#include "cgensys.h"
+#include "gc.h"
+#include "security.h"
+#include "dbginterface.h"
+#include "comdelegate.h"
+#include "sigformat.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "eeprofinterfaces.h"
+#include "dllimportcallback.h"
+#include "listlock.h"
+#include "methodimpl.h"
+#include "stackprobe.h"
+#include "encee.h"
+#include "comsynchronizable.h"
+#include "customattribute.h"
+#include "virtualcallstub.h"
+#include "eeconfig.h"
+#include "contractimpl.h"
+#ifdef FEATURE_REMOTING
+#include "objectclone.h"
+#endif
+#include "listlock.inl"
+#include "generics.h"
+#include "instmethhash.h"
+#include "typestring.h"
+
+#ifndef DACCESS_COMPILE
+
+void DECLSPEC_NORETURN MemberLoader::ThrowMissingFieldException(MethodTable* pMT, LPCSTR szMember)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT, NULL_OK));
+ PRECONDITION(CheckPointer(szMember,NULL_OK));
+ }
+ CONTRACTL_END;
+
+ LPCUTF8 szClassName;
+
+ DefineFullyQualifiedNameForClass();
+ if (pMT)
+ {
+ szClassName = GetFullyQualifiedNameForClass(pMT);
+ }
+ else
+ {
+ szClassName = "?";
+ };
+
+
+ LPUTF8 szFullName;
+ MAKE_FULLY_QUALIFIED_MEMBER_NAME(szFullName, NULL, szClassName, (szMember?szMember:"?"), "");
+ PREFIX_ASSUME(szFullName!=NULL);
+ MAKE_WIDEPTR_FROMUTF8(szwFullName, szFullName);
+ EX_THROW(EEMessageException, (kMissingFieldException, IDS_EE_MISSING_FIELD, szwFullName));
+}
+
+void DECLSPEC_NORETURN MemberLoader::ThrowMissingMethodException(MethodTable* pMT, LPCSTR szMember, Module *pModule, PCCOR_SIGNATURE pSig,DWORD cSig,const SigTypeContext *pTypeContext)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT,NULL_OK));
+ PRECONDITION(CheckPointer(szMember,NULL_OK));
+ PRECONDITION(CheckPointer(pSig,NULL_OK));
+ PRECONDITION(CheckPointer(pModule,NULL_OK));
+ PRECONDITION(CheckPointer(pTypeContext,NULL_OK));
+ }
+ CONTRACTL_END;
+ LPCUTF8 szClassName;
+
+ DefineFullyQualifiedNameForClass();
+ if (pMT)
+ {
+ szClassName = GetFullyQualifiedNameForClass(pMT);
+ }
+ else
+ {
+ szClassName = "?";
+ };
+
+ if (pSig && cSig && pModule)
+ {
+ MetaSig tmp(pSig, cSig, pModule, pTypeContext);
+ SigFormat sf(tmp, szMember ? szMember : "?", szClassName, NULL);
+ MAKE_WIDEPTR_FROMUTF8(szwFullName, sf.GetCString());
+ EX_THROW(EEMessageException, (kMissingMethodException, IDS_EE_MISSING_METHOD, szwFullName));
+ }
+ else
+ {
+ EX_THROW(EEMessageException, (kMissingMethodException, IDS_EE_MISSING_METHOD, W("?")));
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+void MemberLoader::GetDescFromMemberRef(Module * pModule,
+ mdToken MemberRef,
+ MethodDesc ** ppMD,
+ FieldDesc ** ppFD,
+ const SigTypeContext *pTypeContext,
+ BOOL strictMetadataChecks,
+ TypeHandle *ppTH,
+ BOOL actualTypeRequired,
+ PCCOR_SIGNATURE * ppTypeSig,
+ ULONG * pcbTypeSig)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(TypeFromToken(MemberRef) == mdtMemberRef);
+ PRECONDITION(ppMD != NULL && *ppMD == NULL);
+ PRECONDITION(ppFD != NULL && *ppFD == NULL);
+ PRECONDITION(ppTH != NULL && ppTH->IsNull());
+ PRECONDITION(!((ppTypeSig == NULL) ^ (pcbTypeSig == NULL)));
+ }
+ CONTRACTL_END;
+
+ // In lookup table?
+ BOOL fIsMethod;
+ TADDR pDatum = pModule->LookupMemberRef(MemberRef, &fIsMethod);
+
+ if (pDatum != NULL)
+ {
+ if (!fIsMethod)
+ {
+ FieldDesc * pFD = dac_cast<PTR_FieldDesc>(pDatum);
+ *ppFD = pFD;
+
+ // Fields are not inherited so we can always return the exact type right away.
+ *ppTH = pFD->GetEnclosingMethodTable();
+ return;
+ }
+ else
+ {
+ MethodDesc * pMD = dac_cast<PTR_MethodDesc>(pDatum);
+ pMD->CheckRestore();
+ *ppMD = pMD;
+
+ // We are done if the caller is not interested in actual type.
+ if (!actualTypeRequired)
+ {
+ *ppTH = pMD->GetMethodTable();
+ return;
+ }
+ }
+ }
+
+ // No, so do it the long way
+ IMDInternalImport * pInternalImport = pModule->GetMDImport();
+
+ mdTypeRef parent;
+ IfFailThrow(pInternalImport->GetParentOfMemberRef(MemberRef, &parent));
+
+ // If parent is a method def, then this is a varargs method and the
+ // desc lives in the same module.
+ if (TypeFromToken(parent) == mdtMethodDef)
+ {
+ // Return now if actualTypeRequired was set and the desc was cached
+ if (pDatum != NULL)
+ {
+ *ppTH = dac_cast<PTR_MethodDesc>(pDatum)->GetMethodTable();
+ return;
+ }
+
+ MethodDesc *pMethodDef = pModule->LookupMethodDef(parent);
+ if (!pMethodDef)
+ {
+ // There is no value for this def so we haven't yet loaded the class.
+ mdTypeDef typeDef;
+ IfFailThrow(pInternalImport->GetParentToken(parent, &typeDef));
+
+ // Make sure it is a typedef
+ if (TypeFromToken(typeDef) != mdtTypeDef)
+ {
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_METHODDEF_WO_TYPEDEF_PARENT);
+ }
+
+ // load the class
+
+ TypeHandle th = ClassLoader::LoadTypeDefThrowing(
+ pModule,
+ typeDef,
+ ClassLoader::ThrowIfNotFound,
+ strictMetadataChecks ?
+ ClassLoader::FailIfUninstDefOrRef : ClassLoader::PermitUninstDefOrRef);
+
+ // the class has been loaded and the method should be in the rid map!
+ pMethodDef = pModule->LookupMethodDef(parent);
+ }
+
+ LPCUTF8 szMember;
+ PCCOR_SIGNATURE pSig;
+ DWORD cSig;
+
+ IfFailThrow(pInternalImport->GetNameAndSigOfMemberRef(MemberRef, &pSig, &cSig, &szMember));
+
+ BOOL fMissingMethod = FALSE;
+ if (!pMethodDef)
+ {
+ fMissingMethod = TRUE;
+ }
+ else
+ if (pMethodDef->HasClassOrMethodInstantiation())
+ {
+ // A memberref to a varargs method must not find a MethodDesc that is generic (as varargs methods may not be implemented on generics)
+ fMissingMethod = TRUE;
+ }
+ else
+ {
+ // Ensure the found method matches up correctly
+ PCCOR_SIGNATURE pMethodSig;
+ DWORD cMethodSig;
+
+ pMethodDef->GetSig(&pMethodSig, &cMethodSig);
+ if (!MetaSig::CompareMethodSigs(pSig, cSig, pModule, NULL, pMethodSig,
+ cMethodSig, pModule, NULL))
+ {
+ // If the signatures do not match, then the correct MethodDesc has not been found.
+ fMissingMethod = TRUE;
+ }
+ }
+
+ if (fMissingMethod)
+ {
+ ThrowMissingMethodException(
+ (pMethodDef != NULL) ? pMethodDef->GetMethodTable() : NULL,
+ szMember,
+ pModule,
+ pSig,
+ cSig,
+ pTypeContext);
+ }
+
+ pMethodDef->CheckRestore();
+
+ *ppMD = pMethodDef;
+ *ppTH = pMethodDef->GetMethodTable();
+
+ pModule->StoreMemberRef(MemberRef, pMethodDef);
+ return;
+ }
+
+ TypeHandle typeHnd;
+ PCCOR_SIGNATURE pTypeSig = NULL;
+ ULONG cTypeSig = 0;
+
+ switch (TypeFromToken(parent))
+ {
+ case mdtModuleRef:
+ {
+ DomainFile *pTargetModule = pModule->LoadModule(GetAppDomain(), parent, FALSE /* loadResources */);
+ if (pTargetModule == NULL)
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ typeHnd = TypeHandle(pTargetModule->GetModule()->GetGlobalMethodTable());
+ if (typeHnd.IsNull())
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ break;
+
+ case mdtTypeDef:
+ case mdtTypeRef:
+ typeHnd = ClassLoader::LoadTypeDefOrRefThrowing(pModule, parent,
+ ClassLoader::ThrowIfNotFound,
+ strictMetadataChecks ?
+ ClassLoader::FailIfUninstDefOrRef : ClassLoader::PermitUninstDefOrRef);
+ break;
+
+ case mdtTypeSpec:
+ {
+ IfFailThrow(pInternalImport->GetTypeSpecFromToken(parent, &pTypeSig, &cTypeSig));
+
+ if (ppTypeSig != NULL)
+ {
+ *ppTypeSig = pTypeSig;
+ *pcbTypeSig = cTypeSig;
+ }
+
+ SigPointer sigptr(pTypeSig, cTypeSig);
+ typeHnd = sigptr.GetTypeHandleThrowing(pModule, pTypeContext);
+ }
+ break;
+
+ default:
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+
+ // Return now if actualTypeRequired was set and the desc was cached
+ if (pDatum != NULL)
+ {
+ *ppTH = typeHnd;
+ return;
+ }
+
+ // Now load the parent of the method ref
+ MethodTable * pMT = typeHnd.GetMethodTable();
+
+ // pMT will be null if typeHnd is a variable type
+ if (pMT == NULL)
+ {
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_METHODDEF_PARENT_NO_MEMBERS);
+ }
+
+ PREFIX_ASSUME(pMT != NULL);
+
+ LPCUTF8 szMember;
+ PCCOR_SIGNATURE pSig;
+ DWORD cSig;
+
+ IfFailThrow(pInternalImport->GetNameAndSigOfMemberRef(MemberRef, &pSig, &cSig, &szMember));
+
+ BOOL fIsField = isCallConv(
+ MetaSig::GetCallingConvention(pModule, Signature(pSig, cSig)),
+ IMAGE_CEE_CS_CALLCONV_FIELD);
+
+ if (fIsField)
+ {
+ FieldDesc * pFD = MemberLoader::FindField(pMT, szMember, pSig, cSig, pModule);
+
+ if (pFD == NULL)
+ ThrowMissingFieldException(pMT, szMember);
+
+ if (pFD->IsStatic() && pMT->HasGenericsStaticsInfo())
+ {
+ //
+ // <NICE> this is duplicated logic GetFieldDescByIndex </NICE>
+ //
+ INDEBUG(mdFieldDef token = pFD->GetMemberDef();)
+
+ DWORD pos = static_cast<DWORD>(pFD - (pMT->GetApproxFieldDescListRaw() + pMT->GetNumIntroducedInstanceFields()));
+ _ASSERTE(pos >= 0 && pos < pMT->GetNumStaticFields());
+
+ pFD = pMT->GetGenericsStaticFieldDescs() + pos;
+ _ASSERTE(pFD->GetMemberDef() == token);
+ _ASSERTE(!pFD->IsSharedByGenericInstantiations());
+ _ASSERTE(pFD->GetEnclosingMethodTable() == pMT);
+ }
+
+ *ppFD = pFD;
+ *ppTH = typeHnd;
+
+ //@GENERICS: don't store FieldDescs for instantiated types
+ //or we'll get the wrong one for another instantiation!
+ if (!pMT->HasInstantiation())
+ {
+ pModule->StoreMemberRef(MemberRef, pFD);
+
+ // Verify that the exact type returned here is same as exact type returned by the cached path
+ _ASSERTE(TypeHandle(pFD->GetEnclosingMethodTable()) == *ppTH);
+ }
+ }
+ else
+ {
+ // For array method signatures, the caller's signature contains "actual" types whereas the callee's signature has
+ // formals (ELEMENT_TYPE_VAR 0 wherever the element type of the array occurs). So we need to pass in a substitution
+ // built from the signature of the element type.
+ Substitution sigSubst(pModule, SigPointer(), NULL);
+
+ if (typeHnd.IsArray())
+ {
+ _ASSERTE(pTypeSig != NULL && cTypeSig != 0);
+
+ SigPointer sigptr = SigPointer(pTypeSig, cTypeSig);
+ CorElementType type;
+ IfFailThrow(sigptr.GetElemType(&type));
+
+ THROW_BAD_FORMAT_MAYBE(
+ ((type == ELEMENT_TYPE_SZARRAY) || (type == ELEMENT_TYPE_ARRAY)),
+ BFA_NOT_AN_ARRAY,
+ pModule);
+ sigSubst = Substitution(pModule, sigptr, NULL);
+ }
+
+ // Lookup the method in the class.
+ MethodDesc * pMD = MemberLoader::FindMethod(pMT,
+ szMember,
+ pSig,
+ cSig,
+ pModule,
+ MemberLoader::FM_Default,
+ &sigSubst);
+ if (pMD == NULL)
+ {
+ ThrowMissingMethodException(pMT, szMember, pModule, pSig, cSig, pTypeContext);
+ }
+
+ pMD->CheckRestore();
+
+ *ppMD = pMD;
+ *ppTH = typeHnd;
+
+ // Don't store MethodDescs for instantiated types or we'll get
+ // the wrong one for another instantiation!
+ // The same thing happens for arrays as the same MemberRef can be used for multiple array types
+ // e.g. the member ref in
+ // call void MyList<!0>[,]::Set(int32,int32,MyList<!0>)
+ // could be used for the Set method in MyList<string>[,] and MyList<int32>[,], etc.
+ // <NICE>use cache when memberref is closed (contains no free type parameters) as then it does identify</NICE>
+ // a method-desc uniquely
+ if (!pMD->HasClassOrMethodInstantiation() && !typeHnd.IsArray())
+ {
+ pModule->StoreMemberRef(MemberRef, pMD);
+
+ // Return actual type only if caller asked for it
+ if (!actualTypeRequired)
+ *ppTH = pMD->GetMethodTable();
+ }
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+MethodDesc * MemberLoader::GetMethodDescFromMemberRefAndType(Module * pModule,
+ mdToken MemberRef,
+ MethodTable * pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(TypeFromToken(MemberRef) == mdtMemberRef);
+ }
+ CONTRACTL_END;
+
+ //
+ // Fraction of MemberLoader::GetDescFromMemberRef that we actually need here
+ //
+
+ IMDInternalImport * pInternalImport = pModule->GetMDImport();
+
+ LPCUTF8 szMember;
+ PCCOR_SIGNATURE pSig;
+ DWORD cSig;
+
+ IfFailThrow(pInternalImport->GetNameAndSigOfMemberRef(MemberRef, &pSig, &cSig, &szMember));
+
+ _ASSERTE(!isCallConv(MetaSig::GetCallingConvention(pModule, Signature(pSig, cSig)), IMAGE_CEE_CS_CALLCONV_FIELD));
+
+ // For array method signatures, the caller's signature contains "actual" types whereas the callee's signature has
+ // formals (ELEMENT_TYPE_VAR 0 wherever the element type of the array occurs). So we need to pass in a substitution
+ // built from the signature of the element type.
+ Substitution sigSubst(pModule, SigPointer(), NULL);
+
+ if (pMT->IsArray())
+ {
+ mdTypeRef parent;
+ IfFailThrow(pInternalImport->GetParentOfMemberRef(MemberRef, &parent));
+
+ PCCOR_SIGNATURE pTypeSig = NULL;
+ ULONG cTypeSig = 0;
+ IfFailThrow(pInternalImport->GetTypeSpecFromToken(parent, &pTypeSig, &cTypeSig));
+ _ASSERTE(pTypeSig != NULL && cTypeSig != 0);
+
+ SigPointer sigptr = SigPointer(pTypeSig, cTypeSig);
+ CorElementType type;
+ IfFailThrow(sigptr.GetElemType(&type));
+
+ _ASSERTE((type == ELEMENT_TYPE_SZARRAY) || (type == ELEMENT_TYPE_ARRAY));
+
+ sigSubst = Substitution(pModule, sigptr, NULL);
+ }
+
+ // Lookup the method in the class.
+ MethodDesc * pMD = MemberLoader::FindMethod(pMT,
+ szMember,
+ pSig,
+ cSig,
+ pModule,
+ MemberLoader::FM_Default,
+ &sigSubst);
+ if (pMD == NULL)
+ {
+ ThrowMissingMethodException(pMT, szMember, pModule, pSig, cSig, NULL);
+ }
+
+ pMD->CheckRestore();
+
+ return pMD;
+}
+
+//---------------------------------------------------------------------------------------
+//
+FieldDesc * MemberLoader::GetFieldDescFromMemberRefAndType(Module * pModule,
+ mdToken MemberRef,
+ MethodTable * pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(TypeFromToken(MemberRef) == mdtMemberRef);
+ }
+ CONTRACTL_END;
+
+ //
+ // Fraction of MemberLoader::GetDescFromMemberRef that we actually need here
+ //
+
+ IMDInternalImport * pInternalImport = pModule->GetMDImport();
+
+ LPCUTF8 szMember;
+ PCCOR_SIGNATURE pSig;
+ DWORD cSig;
+
+ IfFailThrow(pInternalImport->GetNameAndSigOfMemberRef(MemberRef, &pSig, &cSig, &szMember));
+
+ _ASSERTE(isCallConv(MetaSig::GetCallingConvention(pModule, Signature(pSig, cSig)), IMAGE_CEE_CS_CALLCONV_FIELD));
+
+ FieldDesc * pFD = MemberLoader::FindField(pMT, szMember, pSig, cSig, pModule);
+
+ if (pFD == NULL)
+ ThrowMissingFieldException(pMT, szMember);
+
+ if (pFD->IsStatic() && pMT->HasGenericsStaticsInfo())
+ {
+ //
+ // <NICE> this is duplicated logic GetFieldDescByIndex </NICE>
+ //
+ INDEBUG(mdFieldDef token = pFD->GetMemberDef();)
+
+ DWORD pos = static_cast<DWORD>(pFD - (pMT->GetApproxFieldDescListRaw() + pMT->GetNumIntroducedInstanceFields()));
+ _ASSERTE(pos >= 0 && pos < pMT->GetNumStaticFields());
+
+ pFD = pMT->GetGenericsStaticFieldDescs() + pos;
+ _ASSERTE(pFD->GetMemberDef() == token);
+ _ASSERTE(!pFD->IsSharedByGenericInstantiations());
+ _ASSERTE(pFD->GetEnclosingMethodTable() == pMT);
+ }
+
+ return pFD;
+}
+
+//---------------------------------------------------------------------------------------
+//
+MethodDesc* MemberLoader::GetMethodDescFromMethodDef(Module *pModule,
+ mdToken MethodDef,
+ BOOL strictMetadataChecks)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(TypeFromToken(MethodDef) == mdtMethodDef);
+ }
+ CONTRACTL_END;
+
+ // In lookup table?
+ MethodDesc * pMD = pModule->LookupMethodDef(MethodDef);
+ if (!pMD)
+ {
+ // No, so do it the long way
+ //
+ // Notes on methodDefs to generic things
+ //
+ // For internal purposes we wish to resolve MethodDef from generic classes or for generic methods to
+ // the corresponding fully uninstantiated descriptor. For example, for
+ // class C<T> { void m(); }
+ // then then MethodDef for m resolves to a method descriptor for C<T>.m(). This is the
+ // descriptor that gets stored in the RID map.
+ //
+ // Normal IL code that uses generic code cannot use MethodDefs in this way: all calls
+ // to generic code must be emitted as MethodRefs and MethodSpecs. However, at other
+ // points in tthe codebase we need to resolve MethodDefs to generic uninstantiated
+ // method descriptors, and this is the best place to implement that.
+ //
+ mdTypeDef typeDef;
+ IfFailThrow(pModule->GetMDImport()->GetParentToken(MethodDef, &typeDef));
+
+ TypeHandle th = ClassLoader::LoadTypeDefThrowing(
+ pModule,
+ typeDef,
+ ClassLoader::ThrowIfNotFound,
+ strictMetadataChecks ?
+ ClassLoader::FailIfUninstDefOrRef : ClassLoader::PermitUninstDefOrRef);
+
+ // The RID map should have been filled out if we fully loaded the class
+ pMD = pModule->LookupMethodDef(MethodDef);
+
+ if (pMD == NULL)
+ {
+ LPCUTF8 szMember;
+ PCCOR_SIGNATURE pSig;
+ DWORD cSig;
+
+ IfFailThrow(pModule->GetMDImport()->GetSigOfMethodDef(MethodDef, &cSig, &pSig));
+ IfFailThrow(pModule->GetMDImport()->GetNameOfMethodDef(MethodDef, &szMember));
+
+ ThrowMissingMethodException(
+ th.GetMethodTable(),
+ szMember,
+ pModule,
+ pSig,
+ cSig,
+ NULL);
+ }
+ }
+
+ pMD->CheckRestore();
+
+#if 0
+ // <TODO> Generics: enable this check after the findMethod call in the Zapper which passes
+ // naked generic MethodDefs across the JIT interface is moved over into the EE</TODO>
+ if (strictMetadataChecks && pDatum->GetNumGenericClassArgs() != 0)
+ {
+ THROW_BAD_FORMAT_MAYBE(!"Methods inside generic classes must be referenced using MemberRefs or MethodSpecs, even in the same module as the class", 0, pModule);
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+#endif
+
+ return pMD;
+}
+
+//---------------------------------------------------------------------------------------
+//
+FieldDesc* MemberLoader::GetFieldDescFromFieldDef(Module *pModule,
+ mdToken FieldDef,
+ BOOL strictMetadataChecks)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(TypeFromToken(FieldDef) == mdtFieldDef);
+ }
+ CONTRACTL_END;
+
+ // In lookup table?
+ FieldDesc * pFD = pModule->LookupFieldDef(FieldDef);
+ if (!pFD)
+ {
+ // No, so do it the long way
+ mdTypeDef typeDef;
+ IfFailThrow(pModule->GetMDImport()->GetParentToken(FieldDef, &typeDef));
+
+ // Load the class - that should set the desc in the rid map
+ // Field defs to generic things resolve to the formal instantiation
+ // without taking the type context into account. They are only valid internally.
+ // <TODO> check that we rule out field defs to generic things in IL streams elsewhere</TODO>
+
+ TypeHandle th = ClassLoader::LoadTypeDefThrowing(
+ pModule,
+ typeDef,
+ ClassLoader::ThrowIfNotFound,
+ strictMetadataChecks ?
+ ClassLoader::FailIfUninstDefOrRef : ClassLoader::PermitUninstDefOrRef);
+
+ pFD = pModule->LookupFieldDef(FieldDef);
+ if (pFD == NULL)
+ {
+ LPCUTF8 szMember;
+ if (FAILED(pModule->GetMDImport()->GetNameOfFieldDef(FieldDef, &szMember)))
+ {
+ szMember = "Invalid FieldDef record";
+ }
+ ThrowMissingFieldException(th.GetMethodTable(), szMember);
+ }
+ }
+
+ pFD->GetApproxEnclosingMethodTable()->CheckRestore();
+
+#ifdef EnC_SUPPORTED
+ if (pModule->IsEditAndContinueEnabled() && pFD->IsEnCNew())
+ {
+ EnCFieldDesc *pEnCFD = (EnCFieldDesc*)pFD;
+ // we may not have the full FieldDesc info at applyEnC time becuase we don't
+ // have a thread so can't do things like load classes (due to possible exceptions)
+ if (pEnCFD->NeedsFixup())
+ {
+ GCX_COOP();
+ pEnCFD->Fixup(FieldDef);
+ }
+ }
+#endif // EnC_SUPPORTED
+
+ return pFD;
+}
+
+//---------------------------------------------------------------------------------------
+//
+MethodDesc *
+MemberLoader::GetMethodDescFromMemberDefOrRefOrSpec(
+ Module * pModule,
+ mdMemberRef MemberRef,
+ const SigTypeContext * pTypeContext,
+ BOOL strictMetadataChecks,
+ // Normally true - the zapper is one exception. Throw an exception if no generic method args
+ // given for a generic method, otherwise return the 'generic' instantiation
+ BOOL allowInstParam)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(CheckPointer(pModule));
+ }
+ CONTRACTL_END;
+
+ IMDInternalImport *pInternalImport = pModule->GetMDImport();
+ if(!pInternalImport->IsValidToken(MemberRef))
+ {
+ // The exception type and message preserved for compatibility
+ THROW_BAD_FORMAT(BFA_INVALID_METHOD_TOKEN, pModule);
+ }
+
+ MethodDesc * pMD = NULL;
+ FieldDesc * pFD = NULL;
+ TypeHandle th;
+
+ switch (TypeFromToken(MemberRef))
+ {
+ case mdtMethodDef:
+ pMD = GetMethodDescFromMethodDef(pModule, MemberRef, strictMetadataChecks);
+ th = pMD->GetMethodTable();
+ break;
+
+ case mdtMemberRef:
+ GetDescFromMemberRef(pModule, MemberRef, &pMD, &pFD, pTypeContext, strictMetadataChecks, &th);
+
+ if (pMD == NULL)
+ {
+ // The exception type and message preserved for compatibility
+ EX_THROW(EEMessageException, (kMissingMethodException, IDS_EE_MISSING_METHOD, W("?")));
+ }
+ break;
+
+ case mdtMethodSpec:
+ return GetMethodDescFromMethodSpec(pModule, MemberRef, pTypeContext, strictMetadataChecks, allowInstParam, &th);
+
+ default:
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+
+ // Apply the method instantiation if any. If not applying strictMetadataChecks we
+ // generate the "generic" instantiation - this is used by FuncEval.
+ //
+ // For generic code this call will return an instantiating stub where needed. If the method
+ // is a generic method then instantiate it with the given parameters.
+ // For non-generic code this will just return pDatum
+ return MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pMD,
+ th.GetMethodTable(),
+ FALSE /* don't get unboxing entry point */,
+ strictMetadataChecks ? Instantiation() : pMD->LoadMethodInstantiation(),
+ allowInstParam);
+} // MemberLoader::GetMethodDescFromMemberDefOrRefOrSpec
+
+//---------------------------------------------------------------------------------------
+//
+MethodDesc * MemberLoader::GetMethodDescFromMethodSpec(Module * pModule,
+ mdToken MethodSpec,
+ const SigTypeContext *pTypeContext,
+ BOOL strictMetadataChecks,
+ BOOL allowInstParam,
+ TypeHandle *ppTH,
+ BOOL actualTypeRequired,
+ PCCOR_SIGNATURE * ppTypeSig,
+ ULONG * pcbTypeSig,
+ PCCOR_SIGNATURE * ppMethodSig,
+ ULONG * pcbMethodSig)
+
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(TypeFromToken(MethodSpec) == mdtMethodSpec);
+ PRECONDITION(ppTH != NULL && ppTH->IsNull());
+ PRECONDITION(!((ppTypeSig == NULL) ^ (pcbTypeSig == NULL)));
+ PRECONDITION(!((ppMethodSig == NULL) ^ (pcbMethodSig == NULL)));
+ }
+ CONTRACTL_END;
+
+ CQuickBytes qbGenericMethodArgs;
+
+ mdMemberRef GenericMemberRef;
+ PCCOR_SIGNATURE pSig;
+ ULONG cSig;
+
+ IMDInternalImport * pInternalImport = pModule->GetMDImport();
+
+ // Get the member def/ref and instantiation signature
+ IfFailThrow(pInternalImport->GetMethodSpecProps(MethodSpec, &GenericMemberRef, &pSig, &cSig));
+
+ if (ppMethodSig != NULL)
+ {
+ *ppMethodSig = pSig;
+ *pcbMethodSig = cSig;
+ }
+
+ SigPointer sp(pSig, cSig);
+
+ BYTE etype;
+ IfFailThrow(sp.GetByte(&etype));
+
+ // Load the generic method instantiation
+ THROW_BAD_FORMAT_MAYBE(etype == (BYTE)IMAGE_CEE_CS_CALLCONV_GENERICINST, 0, pModule);
+
+ DWORD nGenericMethodArgs = 0;
+ IfFailThrow(sp.GetData(&nGenericMethodArgs));
+
+ DWORD cbAllocSize = 0;
+ if (!ClrSafeInt<DWORD>::multiply(nGenericMethodArgs, sizeof(TypeHandle), cbAllocSize))
+ {
+ COMPlusThrowHR(COR_E_OVERFLOW);
+ }
+
+ TypeHandle *genericMethodArgs = reinterpret_cast<TypeHandle *>(qbGenericMethodArgs.AllocThrows(cbAllocSize));
+
+ for (DWORD i = 0; i < nGenericMethodArgs; i++)
+ {
+ genericMethodArgs[i] = sp.GetTypeHandleThrowing(pModule, pTypeContext);
+ _ASSERTE (!genericMethodArgs[i].IsNull());
+ IfFailThrow(sp.SkipExactlyOne());
+ }
+
+ MethodDesc * pMD = NULL;
+ FieldDesc * pFD = NULL;
+
+ switch (TypeFromToken(GenericMemberRef))
+ {
+ case mdtMethodDef:
+ pMD = GetMethodDescFromMethodDef(pModule, GenericMemberRef, strictMetadataChecks);
+ *ppTH = pMD->GetMethodTable();
+ break;
+
+ case mdtMemberRef:
+ GetDescFromMemberRef(pModule, GenericMemberRef, &pMD, &pFD, pTypeContext, strictMetadataChecks, ppTH,
+ actualTypeRequired, ppTypeSig, pcbTypeSig);
+
+ if (pMD == NULL)
+ {
+ // The exception type and message preserved for compatibility
+ EX_THROW(EEMessageException, (kMissingMethodException, IDS_EE_MISSING_METHOD, W("?")));
+ }
+ break;
+
+ default:
+ // The exception type and message preserved for compatibility
+ THROW_BAD_FORMAT(
+ BFA_EXPECTED_METHODDEF_OR_MEMBERREF,
+ pModule);
+ }
+
+ return MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pMD,
+ ppTH->GetMethodTable(),
+ FALSE /* don't get unboxing entry point */,
+ Instantiation(genericMethodArgs, nGenericMethodArgs),
+ allowInstParam);
+}
+
+//---------------------------------------------------------------------------------------
+//
+MethodDesc *
+MemberLoader::GetMethodDescFromMethodDef(
+ Module * pModule,
+ mdMethodDef MethodDef, // MethodDef token
+ Instantiation classInst, // Generic arguments for declaring class
+ Instantiation methodInst, // Generic arguments for declaring method
+ BOOL forceRemotable /* = FALSE */)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(TypeFromToken(MethodDef) == mdtMethodDef);
+ }
+ CONTRACTL_END;
+
+ // Get the generic method definition. The functions above are guaranteed to
+ // return the generic definition when given a MethodDef.
+ MethodDesc* pDefMD = GetMethodDescFromMethodDef(pModule, MethodDef, FALSE);
+ if (pDefMD->GetNumGenericMethodArgs() != methodInst.GetNumArgs())
+ {
+ COMPlusThrowHR(COR_E_TARGETPARAMCOUNT);
+ }
+
+ // If the class isn't generic then LoadGenericInstantiation just checks that
+ // we're not supplying type parameters and then returns us the class as a type handle
+ MethodTable *pMT = ClassLoader::LoadGenericInstantiationThrowing(
+ pModule, pDefMD->GetMethodTable()->GetCl(), classInst).AsMethodTable();
+
+ // Apply the instantiations (if any).
+ MethodDesc *pMD = MethodDesc::FindOrCreateAssociatedMethodDesc(pDefMD, pMT,
+ FALSE, /* don't get unboxing entry point */
+ methodInst,
+ FALSE /* no allowInstParam */,
+ forceRemotable);
+
+ return pMD;
+}
+
+FieldDesc* MemberLoader::GetFieldDescFromMemberDefOrRef(
+ Module *pModule,
+ mdMemberRef MemberDefOrRef,
+ const SigTypeContext *pTypeContext,
+ BOOL strictMetadataChecks // Normally true - reflection is the one exception. Throw an exception if no generic method args given for a generic field, otherwise return the 'generic' instantiation
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ FieldDesc * pFD = NULL;
+ MethodDesc * pMD = NULL;
+ TypeHandle th;
+
+ switch (TypeFromToken(MemberDefOrRef))
+ {
+ case mdtFieldDef:
+ pFD = GetFieldDescFromFieldDef(pModule, MemberDefOrRef, strictMetadataChecks);
+ break;
+
+ case mdtMemberRef:
+ GetDescFromMemberRef(
+ pModule, MemberDefOrRef, &pMD, &pFD, pTypeContext, strictMetadataChecks, &th);
+
+ if (!pFD)
+ {
+ // The exception type and message preserved for compatibility
+ COMPlusThrow(kMissingFieldException, W("Arg_MissingFieldException"));
+ }
+ break;
+
+ default:
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+
+ return pFD;
+}
+
+//*******************************************************************************
+BOOL MemberLoader::FM_PossibleToSkipMethod(FM_Flags flags)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return ((flags & FM_SpecialVirtualMask) || (flags & FM_SpecialAccessMask));
+}
+
+//*******************************************************************************
+BOOL MemberLoader::FM_ShouldSkipMethod(DWORD dwAttrs, FM_Flags flags)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ BOOL retVal = FALSE;
+
+ // If we have any special selection flags, then we need to check a little deeper.
+ if (flags & FM_SpecialVirtualMask)
+ {
+ if (((flags & FM_ExcludeVirtual) && IsMdVirtual(dwAttrs)) ||
+ ((flags & FM_ExcludeNonVirtual) && !IsMdVirtual(dwAttrs)))
+ {
+ retVal = TRUE;
+ }
+ }
+
+ // This makes for quick shifting in determining if an access mask bit matches
+ static_assert_no_msg((FM_ExcludePrivateScope >> 0x4) == 0x1);
+
+ if (flags & FM_SpecialAccessMask)
+ {
+ DWORD dwAccess = dwAttrs & mdMemberAccessMask;
+ if ((1 << dwAccess) & ((DWORD)(flags & FM_SpecialAccessMask) >> 0x4))
+ {
+ retVal = TRUE;
+ }
+ }
+
+ // Ensure that this function is kept in sync with FM_PossibleToSkipMethod
+ CONSISTENCY_CHECK(FM_PossibleToSkipMethod(flags) || !retVal);
+
+ return retVal;
+}
+
+//*******************************************************************************
+// Given a signature, and a method declared on a class or on a parent of a class,
+// find out if the signature matches the method.
+//
+// In the normal non-generic case, we can simply perform a signature check,
+// but with generics, we need to have a properly set up Substitution, so that
+// we have a correct set of types to compare with. The idea is that either the current
+// EEClass matches up with the methoddesc, or a parent EEClass will match up.
+BOOL CompareMethodSigWithCorrectSubstitution(
+ PCCOR_SIGNATURE pSignature,
+ DWORD cSignature,
+ Module* pModule,
+ MethodDesc *pCurDeclMD,
+ const Substitution *pDefSubst,
+ MethodTable *pCurMT
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ MethodTable *pCurDeclMT = pCurDeclMD->GetMethodTable();
+ BOOL fNeedsSubstitutionUpdateDueToInstantiationDifferences = pCurDeclMT->HasInstantiation() && pCurDeclMT != pCurMT->GetCanonicalMethodTable();
+ if (!fNeedsSubstitutionUpdateDueToInstantiationDifferences)
+ {
+ PCCOR_SIGNATURE pCurMethodSig;
+ DWORD cCurMethodSig;
+
+ pCurDeclMD->GetSig(&pCurMethodSig, &cCurMethodSig);
+ return MetaSig::CompareMethodSigs(pSignature, cSignature, pModule, NULL, pCurMethodSig,
+ cCurMethodSig, pCurDeclMD->GetModule(), pDefSubst);
+ }
+ else
+ {
+ MethodTable *pParentMT = pCurMT->GetParentMethodTable();
+ if (pParentMT != NULL)
+ {
+ Substitution subst2 = pCurMT->GetSubstitutionForParent(pDefSubst);
+
+ return CompareMethodSigWithCorrectSubstitution(pSignature, cSignature, pModule, pCurDeclMD, &subst2, pParentMT);
+ }
+ return FALSE;
+ }
+}
+
+//*******************************************************************************
+// Finds a method by name and signature, where scope is the scope in which the
+// signature is defined.
+MethodDesc *
+MemberLoader::FindMethod(
+ MethodTable * pMT,
+ LPCUTF8 pszName,
+ PCCOR_SIGNATURE pSignature, DWORD cSignature,
+ Module* pModule,
+ FM_Flags flags, // = FM_Default
+ const Substitution *pDefSubst) // = NULL
+{
+
+ CONTRACT (MethodDesc *) {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(!pMT->IsTransparentProxy());
+ MODE_ANY;
+ } CONTRACT_END;
+
+ // Retrive the right comparition function to use.
+ UTF8StringCompareFuncPtr StrCompFunc = FM_GetStrCompFunc(flags);
+
+ SString targetName(SString::Utf8Literal, pszName);
+ ULONG targetNameHash = targetName.HashCaseInsensitive();
+
+ // Statistically it's most likely for a method to be found in non-vtable portion of this class's members, then in the
+ // vtable of this class's declared members, then in the inherited portion of the vtable, so we search backwards.
+
+ // For value classes, if it's a value class method, we want to return the duplicated MethodDesc, not the one in the vtable
+ // section. We'll find the one in the duplicate section before the one in the vtable section, so we're ok.
+
+ // Search non-vtable portion of this class first
+
+ MethodTable::MethodIterator it(pMT);
+
+ // Move the iterator to the appropriate starting point. It is imporant to search from the end
+ // because hide-by-sig methods found in child types must be matched before the methods they
+ // may be hiding in parent types.
+ it.MoveToEnd();
+
+ // Iterate through the methods of the current type searching for a match.
+ for (; it.IsValid(); it.Prev())
+ {
+ MethodDesc *pCurDeclMD = it.GetDeclMethodDesc();
+#ifdef _DEBUG
+ MethodTable *pCurDeclMT = pCurDeclMD->GetMethodTable();
+ CONSISTENCY_CHECK(!pMT->IsInterface() || pCurDeclMT == pMT->GetCanonicalMethodTable());
+#endif
+
+ if (FM_PossibleToSkipMethod(flags) && FM_ShouldSkipMethod(pCurDeclMD->GetAttrs(), flags))
+ {
+ continue;
+ }
+
+ if ((flags & FM_IgnoreName) != 0
+ ||
+ (pCurDeclMD->MightHaveName(targetNameHash)
+ // This is done last since it is the most expensive of the IF statement.
+ && StrCompFunc(pszName, pCurDeclMD->GetName()) == 0)
+ )
+ {
+ if (CompareMethodSigWithCorrectSubstitution(pSignature, cSignature, pModule, pCurDeclMD, pDefSubst, pMT))
+ {
+ RETURN pCurDeclMD;
+ }
+ }
+ }
+
+
+ // No inheritance on value types or interfaces
+ if (pMT->IsValueType() || pMT->IsInterface())
+ {
+ RETURN NULL;
+ }
+
+ // Recurse up the hierarchy if the method was not found.
+ //<TODO>@todo: This routine might be factored slightly to improve perf.</TODO>
+ CONSISTENCY_CHECK(pMT->CheckLoadLevel(CLASS_LOAD_APPROXPARENTS));
+
+ MethodTable *pParentMT = pMT->GetParentMethodTable();
+ if (pParentMT != NULL)
+ {
+ Substitution subst2 = pMT->GetSubstitutionForParent(pDefSubst);
+
+ MethodDesc *md = MemberLoader::FindMethod(pParentMT,
+ pszName, pSignature, cSignature, pModule, flags, &subst2);
+
+ // Don't inherit constructors from parent classes. It is important to forbid this,
+ // because the JIT needs to get the class handle from the memberRef, and when the
+ // constructor is inherited, the JIT will get the class handle for the parent class
+ // (and not allocate enough space, etc.). See bug #50035 for details.
+ if (md)
+ {
+ if (IsMdInstanceInitializer(md->GetAttrs(), pszName))
+ {
+ md = NULL;
+ }
+ }
+
+ RETURN md;
+ }
+
+ RETURN NULL;
+}
+
+//*******************************************************************************
+// This will return the MethodDesc that implements the interface method <pInterface,slotNum>.
+MethodDesc *
+MemberLoader::FindMethodForInterfaceSlot(MethodTable * pMT, MethodTable *pInterface, WORD slotNum)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pInterface));
+ PRECONDITION(pInterface->IsInterface());
+ PRECONDITION(slotNum < pInterface->GetNumVirtuals());
+ } CONTRACTL_END;
+
+ MethodDesc *pMDRet = NULL;
+
+ DispatchSlot ds(pMT->FindDispatchSlot(pInterface->GetTypeID(), (UINT32)slotNum));
+ if (!ds.IsNull()) {
+ pMDRet = ds.GetMethodDesc();
+ }
+
+ CONSISTENCY_CHECK(CheckPointer(pMDRet));
+ return pMDRet;
+}
+
+//*******************************************************************************
+MethodDesc *
+MemberLoader::FindMethod(MethodTable * pMT, LPCUTF8 pwzName, LPHARDCODEDMETASIG pwzSignature, FM_Flags flags)
+ {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(!pMT->IsTransparentProxy());
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ Signature sig = MscorlibBinder::GetSignature(pwzSignature);
+
+ return FindMethod(pMT, pwzName, sig.GetRawSig(), sig.GetRawSigLen(), MscorlibBinder::GetModule(), flags);
+}
+
+//*******************************************************************************
+MethodDesc *
+MemberLoader::FindMethod(MethodTable * pMT, mdMethodDef mb)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(!pMT->IsTransparentProxy());
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ // We have the EEClass (this) and so lets just look this up in the ridmap.
+ MethodDesc *pMD = NULL;
+ Module *pModule = pMT->GetModule();
+ PREFIX_ASSUME(pModule != NULL);
+
+ if (TypeFromToken(mb) == mdtMemberRef)
+ pMD = pModule->LookupMemberRefAsMethod(mb);
+ else
+ pMD = pModule->LookupMethodDef(mb);
+
+ if (pMD != NULL)
+ pMD->CheckRestore();
+
+ return pMD;
+}
+
+//*******************************************************************************
+MethodDesc *
+MemberLoader::FindMethodByName(MethodTable * pMT, LPCUTF8 pszName, FM_Flags flags)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(!pMT->IsTransparentProxy());
+ PRECONDITION(!pMT->IsArray());
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ // Caching of MethodDescs (impl and decl) for MethodTable slots provided significant
+ // performance gain in some reflection emit scenarios.
+ MethodTable::AllowMethodDataCaching();
+
+ // Retrieve the right comparison function to use.
+ UTF8StringCompareFuncPtr StrCompFunc = FM_GetStrCompFunc(flags);
+
+ SString targetName(SString::Utf8, pszName);
+ ULONG targetNameHash = targetName.HashCaseInsensitive();
+
+ // Scan all classes in the hierarchy, starting at the current class and
+ // moving back up towards the base.
+ while (pMT != NULL)
+ {
+ MethodDesc *pRetMD = NULL;
+
+ // Iterate through the methods searching for a match.
+ MethodTable::MethodIterator it(pMT);
+ it.MoveToEnd();
+ for (; it.IsValid(); it.Prev())
+ {
+ MethodDesc *pCurMD = it.GetDeclMethodDesc();
+
+ if (pCurMD != NULL)
+ {
+ // If we're working from the end of the vtable, we'll cover all the non-virtuals
+ // first, and so if we're supposed to ignore virtuals (see setting of the flag
+ // below) then we can just break out of the loop and go to the parent.
+ if ((flags & FM_ExcludeVirtual) && pCurMD->IsVirtual())
+ {
+ break;
+ }
+
+ if (FM_PossibleToSkipMethod(flags) && FM_ShouldSkipMethod(pCurMD->GetAttrs(), flags))
+ {
+ continue;
+ }
+
+ if (pCurMD->MightHaveName(targetNameHash) && StrCompFunc(pszName, pCurMD->GetNameOnNonArrayClass()) == 0)
+ {
+ if (pRetMD != NULL)
+ {
+ _ASSERTE(flags & FM_Unique);
+
+ // Found another method of this name but FM_Unique was given.
+ return NULL;
+ }
+
+ pRetMD = it.GetMethodDesc();
+ pRetMD->CheckRestore();
+
+ // Let's always finish iterating through this MT for FM_Unique to reveal overloads, i.e.
+ // methods with the same name. Returning the first/last method of the given name
+ // may in some cases work but it depends on the vtable order which is something we
+ // do not want. It can be easily broken by a seemingly unrelated change.
+ if (!(flags & FM_Unique))
+ return pRetMD;
+ }
+ }
+ }
+
+ if (pRetMD != NULL)
+ return pRetMD;
+
+ // Check the parent type for a matching method.
+ pMT = pMT->GetParentMethodTable();
+
+ // There is no need to check virtuals for parent types, since by definition they have the same name.
+ //
+ // Warning: This is not entirely true as virtuals can be overriden explicitly regardless of their name.
+ // We should be fine though as long as we do not use this code to find arbitrary user-defined methods.
+ flags = (FM_Flags)(flags | FM_ExcludeVirtual);
+ }
+
+ return NULL;
+}
+
+//*******************************************************************************
+MethodDesc *
+MemberLoader::FindPropertyMethod(MethodTable * pMT, LPCUTF8 pszName, EnumPropertyMethods Method, FM_Flags flags)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ MODE_ANY;
+ PRECONDITION(Method < 2);
+ } CONTRACTL_END;
+
+ // The format strings for the getter and setter. These must stay in synch with the
+ // EnumPropertyMethods enum defined in class.h
+ static const LPCUTF8 aFormatStrings[] =
+ {
+ "get_%s",
+ "set_%s"
+ };
+
+ CQuickBytes qbMethName;
+ size_t len = strlen(pszName) + strlen(aFormatStrings[Method]) + 1;
+ LPUTF8 strMethName = (LPUTF8) qbMethName.AllocThrows(len);
+ sprintf_s(strMethName, len, aFormatStrings[Method], pszName);
+
+ return FindMethodByName(pMT, strMethName, flags);
+}
+
+//*******************************************************************************
+MethodDesc *
+MemberLoader::FindEventMethod(MethodTable * pMT, LPCUTF8 pszName, EnumEventMethods Method, FM_Flags flags)
+ {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ MODE_ANY;
+ PRECONDITION(Method < 3);
+ } CONTRACTL_END;
+
+ // The format strings for the getter and setter. These must stay in synch with the
+ // EnumPropertyMethods enum defined in class.h
+ static const LPCUTF8 aFormatStrings[] =
+ {
+ "add_%s",
+ "remove_%s",
+ "raise_%s"
+ };
+
+ CQuickBytes qbMethName;
+ size_t len = strlen(pszName) + strlen(aFormatStrings[Method]) + 1;
+ LPUTF8 strMethName = (LPUTF8) qbMethName.AllocThrows(len);
+ sprintf_s(strMethName, len, aFormatStrings[Method], pszName);
+
+ return FindMethodByName(pMT, strMethName, flags);
+}
+
+//*******************************************************************************
+MethodDesc *
+MemberLoader::FindConstructor(MethodTable * pMT, LPHARDCODEDMETASIG pwzSignature)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ Signature sig = MscorlibBinder::GetSignature(pwzSignature);
+
+ return FindConstructor(pMT, sig.GetRawSig(), sig.GetRawSigLen(), MscorlibBinder::GetModule());
+}
+
+//*******************************************************************************
+MethodDesc *
+MemberLoader::FindConstructor(MethodTable * pMT, PCCOR_SIGNATURE pSignature,DWORD cSignature, Module* pModule)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ // Array classes don't have metadata
+ if (pMT->IsArray())
+ return NULL;
+
+ MethodTable::MethodIterator it(pMT);
+
+ for (it.MoveTo(it.GetNumVirtuals()); it.IsValid(); it.Next())
+ {
+ _ASSERTE(!it.IsVirtual());
+
+ MethodDesc *pCurMethod = it.GetMethodDesc();
+ if (pCurMethod == NULL)
+ {
+ continue;
+ }
+
+ // Don't want class initializers.
+ if (pCurMethod->IsStatic())
+ {
+ continue;
+ }
+
+ DWORD dwCurMethodAttrs = pCurMethod->GetAttrs();
+ if (!IsMdRTSpecialName(dwCurMethodAttrs))
+ {
+ continue;
+ }
+
+ // Find only the constructor for for this object
+ _ASSERTE(pCurMethod->GetMethodTable() == pMT->GetCanonicalMethodTable());
+
+ PCCOR_SIGNATURE pCurMethodSig;
+ DWORD cCurMethodSig;
+ pCurMethod->GetSig(&pCurMethodSig, &cCurMethodSig);
+
+ if (MetaSig::CompareMethodSigs(pSignature, cSignature, pModule, NULL, pCurMethodSig, cCurMethodSig, pCurMethod->GetModule(), NULL))
+ {
+ return pCurMethod;
+ }
+ }
+
+ return NULL;
+}
+
+#endif // DACCESS_COMPILE
+
+FieldDesc *
+MemberLoader::FindField(MethodTable * pMT, LPCUTF8 pszName, PCCOR_SIGNATURE pSignature, DWORD cSignature, Module* pModule, BOOL bCaseSensitive)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ DWORD i;
+ DWORD dwFieldDescsToScan;
+ IMDInternalImport *pInternalImport = pMT->GetMDImport(); // All explicitly declared fields in this class will have the same scope
+
+ CONSISTENCY_CHECK(pMT->CheckLoadLevel(CLASS_LOAD_APPROXPARENTS));
+
+ // Retrive the right comparition function to use.
+ UTF8StringCompareFuncPtr StrCompFunc = bCaseSensitive ? strcmp : stricmpUTF8;
+
+ // The following assert is very important, but we need to special case it enough
+ // to allow us access to the legitimate fields of a context proxy object.
+ CONSISTENCY_CHECK(!pMT->IsTransparentProxy() ||
+ !strcmp(pszName, "actualObject") ||
+ !strcmp(pszName, "contextID") ||
+ !strcmp(pszName, "_rp") ||
+ !strcmp(pszName, "_stubData") ||
+ !strcmp(pszName, "_pMT") ||
+ !strcmp(pszName, "_pInterfaceMT") ||
+ !strcmp(pszName, "_stub"));
+
+ // Array classes don't have fields, and don't have metadata
+ if (pMT->IsArray())
+ return NULL;
+
+ SString targetName(SString::Utf8Literal, pszName);
+ ULONG targetNameHash = targetName.HashCaseInsensitive();
+
+ EEClass * pClass = pMT->GetClass();
+ MethodTable *pParentMT = pMT->GetParentMethodTable();
+
+ // Scan the FieldDescs of this class
+ if (pParentMT != NULL)
+ dwFieldDescsToScan = pClass->GetNumInstanceFields() - pParentMT->GetNumInstanceFields() + pClass->GetNumStaticFields();
+ else
+ dwFieldDescsToScan = pClass->GetNumInstanceFields() + pClass->GetNumStaticFields();
+
+ PTR_FieldDesc pFieldDescList = pClass->GetFieldDescList();
+
+ for (i = 0; i < dwFieldDescsToScan; i++)
+ {
+ LPCUTF8 szMemberName;
+ FieldDesc * pFD = &pFieldDescList[i];
+ PREFIX_ASSUME(pFD!=NULL);
+ mdFieldDef mdField = pFD->GetMemberDef();
+
+ // Check is valid FieldDesc, and not some random memory
+ INDEBUGIMPL(pFD->GetApproxEnclosingMethodTable()->SanityCheck());
+
+ if (!pFD->MightHaveName(targetNameHash))
+ {
+ continue;
+ }
+
+ IfFailThrow(pInternalImport->GetNameOfFieldDef(mdField, &szMemberName));
+
+ if (StrCompFunc(szMemberName, pszName) != 0)
+ {
+ continue;
+ }
+
+ if (pSignature != NULL)
+ {
+ PCCOR_SIGNATURE pMemberSig;
+ DWORD cMemberSig;
+
+ IfFailThrow(pInternalImport->GetSigOfFieldDef(mdField, &cMemberSig, &pMemberSig));
+
+ if (!MetaSig::CompareFieldSigs(
+ pMemberSig,
+ cMemberSig,
+ pMT->GetModule(),
+ pSignature,
+ cSignature,
+ pModule))
+ {
+ continue;
+ }
+ }
+
+ return pFD;
+ }
+
+ return NULL;
+}
diff --git a/src/vm/memberload.h b/src/vm/memberload.h
new file mode 100644
index 0000000000..07fb8bc130
--- /dev/null
+++ b/src/vm/memberload.h
@@ -0,0 +1,266 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: memberload.h
+//
+
+
+//
+
+//
+// ============================================================================
+
+#ifndef _MEMBERLOAD_H
+#define _MEMBERLOAD_H
+
+
+/*
+ * Include Files
+ */
+#include "eecontract.h"
+#include "argslot.h"
+#include "vars.hpp"
+#include "cor.h"
+#include "clrex.h"
+#include "hash.h"
+#include "crst.h"
+#include "declsec.h"
+#include "slist.h"
+#include "typehandle.h"
+#include "methodtable.h"
+#include "typectxt.h"
+
+//
+// This enum represents the property methods that can be passed to FindPropertyMethod().
+//
+
+enum EnumPropertyMethods
+{
+ PropertyGet = 0,
+ PropertySet = 1,
+};
+
+
+//
+// This enum represents the event methods that can be passed to FindEventMethod().
+//
+
+enum EnumEventMethods
+{
+ EventAdd = 0,
+ EventRemove = 1,
+ EventRaise = 2,
+};
+
+// The MemberLoader logic is analogous to the ClassLoader logic, i.e. it turn
+// tokens into internal EE descriptors.
+//
+// The implementations of these functions currently lies in class.cpp.
+class MemberLoader
+{
+
+
+public:
+ static void DECLSPEC_NORETURN ThrowMissingMethodException(MethodTable* pMT,
+ LPCSTR szMember,
+ Module *pModule,
+ PCCOR_SIGNATURE pSig,
+ DWORD cSig,
+ const SigTypeContext *pTypeContext);
+
+ static void DECLSPEC_NORETURN ThrowMissingFieldException( MethodTable *pMT,
+ LPCSTR szMember);
+
+ static MethodDesc* GetMethodDescFromMemberDefOrRefOrSpec(Module *pModule,
+ mdToken MemberRefOrDefOrSpec,
+ const SigTypeContext *pTypeContext, // Context for type parameters in any parent TypeSpec and in the instantiation in a MethodSpec
+ BOOL strictMetadataChecks, // Normally true - the zapper is one exception. Throw an exception if no generic method args given for a generic method, otherwise return the 'generic' instantiation
+ BOOL allowInstParam);
+
+ static FieldDesc* GetFieldDescFromMemberDefOrRef(Module *pModule,
+ mdMemberRef MemberDefOrRef,
+ const SigTypeContext *pTypeContext,
+ BOOL strictMetadataChecks);
+
+ static MethodDesc *GetMethodDescFromMethodDef(Module *pModule,
+ mdMethodDef MethodDef, // MethodDef token
+ Instantiation classInst, // Generic arguments for declaring class
+ Instantiation methodInst, // Generic arguments for declaring method
+ BOOL forceRemotable = FALSE); // force remotable MethodDesc
+ //
+ // Methods that actually do the work
+ //
+
+ static MethodDesc* GetMethodDescFromMethodDef(Module *pModule,
+ mdToken MethodDef,
+ BOOL strictMetadataChecks);
+
+ static FieldDesc* GetFieldDescFromFieldDef(Module *pModule,
+ mdToken FieldDef,
+ BOOL strictMetadataChecks);
+
+ static void GetDescFromMemberRef(Module * pModule,
+ mdToken MemberRef,
+ MethodDesc ** ppMD,
+ FieldDesc ** ppFD,
+ const SigTypeContext *pTypeContext,
+ BOOL strictMetadataChecks,
+ TypeHandle *ppTH,
+ // Because of inheritance, the actual type stored in metadata may be sub-class of the
+ // class that defines the member. The semantics (verification, security checks, etc.) is based on
+ // the actual type in metadata. This JIT-EE interface passes in TRUE here to get the actual type.
+ // If actualTypeRequired is false, returned *ppTH will be the MethodDesc::GetMethodTable/FieldDesc::GetEnclosingMethodTable
+ // except when generics are involved. The actual type will be still returned for generics since it is required
+ // for instantiation.
+ // If actualTypeRequired is true, returned *ppTH will always be the actual type defined in metadata.
+ BOOL actualTypeRequired = FALSE,
+ PCCOR_SIGNATURE * ppTypeSig = NULL, // Optionally, return generic signatures fetched from metadata during loading.
+ ULONG * pcbTypeSig = NULL);
+
+ static MethodDesc * GetMethodDescFromMemberRefAndType(Module * pModule,
+ mdToken MemberRef,
+ MethodTable * pMT);
+
+ static FieldDesc * GetFieldDescFromMemberRefAndType(Module * pModule,
+ mdToken MemberRef,
+ MethodTable * pMT);
+
+ static MethodDesc * GetMethodDescFromMethodSpec(Module * pModule,
+ mdToken MethodSpec,
+ const SigTypeContext *pTypeContext,
+ BOOL strictMetadataChecks,
+ BOOL allowInstParam,
+ TypeHandle *ppTH,
+ BOOL actualTypeRequired = FALSE, // See comment for GetDescFromMemberRef
+ PCCOR_SIGNATURE * ppTypeSig = NULL, // Optionally, return generic signatures fetched from metadata during loading.
+ ULONG * pcbTypeSig = NULL,
+ PCCOR_SIGNATURE * ppMethodSig = NULL,
+ ULONG * pcbMethodSig = NULL);
+
+ //-------------------------------------------------------------------
+ // METHOD AND FIELD LOOKUP BY NAME AND SIGNATURE
+ //
+
+ // Used by FindMethod and varieties
+ enum FM_Flags
+ {
+ // Default behaviour is to scan all methods, virtual and non-virtual, of the current type
+ // and all non-virtual methods of all parent types.
+
+ // Default set of flags - this must always be zero.
+ FM_Default = 0x0000,
+
+ // Case sensitivity
+ FM_IgnoreCase = 0x0001, // Name matching is case insensitive
+ FM_IgnoreName = (FM_IgnoreCase << 1), // Ignore the name altogether
+
+ // USE THE FOLLOWING WITH EXTREME CAUTION. We do not want to inadvertently
+ // change binding semantics by using this without a really good reason.
+
+ // Virtuals
+ FM_ExcludeNonVirtual = (FM_IgnoreName << 1), // has mdVirtual set
+ FM_ExcludeVirtual = (FM_ExcludeNonVirtual << 1), // does not have mdVirtual set.
+
+ // Accessibility.
+ // NOTE: These appear in the exact same order as mdPrivateScope ... mdPublic in corhdr.h. This enables some
+ // bit masking to quickly determine if a method qualifies in FM_ShouldSkipMethod.
+ FM_ExcludePrivateScope = (FM_ExcludeVirtual << 1), // Member not referenceable.
+ FM_ExcludePrivate = (FM_ExcludePrivateScope << 1), // Accessible only by the parent type.
+ FM_ExcludeFamANDAssem = (FM_ExcludePrivate << 1), // Accessible by sub-types only in this Assembly.
+ FM_ExcludeAssem = (FM_ExcludeFamANDAssem << 1), // Accessibly by anyone in the Assembly.
+ FM_ExcludeFamily = (FM_ExcludeAssem << 1), // Accessible only by type and sub-types.
+ FM_ExcludeFamORAssem = (FM_ExcludeFamily << 1), // Accessibly by sub-types anywhere, plus anyone in assembly.
+ FM_ExcludePublic = (FM_ExcludeFamORAssem << 1), // Accessibly by anyone who has visibility to this scope.
+ FM_Unique = (FM_ExcludePublic << 1), // Make sure the method is unique for the class
+
+ // This means that FindMethod will only consider mdPublic mdVirtual methods.
+ // This is the only time when name/sig lookup will look past the first match.
+ FM_ForInterface = (FM_ExcludeNonVirtual |
+ FM_ExcludePrivateScope |
+ FM_ExcludePrivate |
+ FM_ExcludeFamANDAssem |
+ FM_ExcludeAssem |
+ FM_ExcludeFamily |
+ FM_ExcludeFamORAssem),
+ };
+
+private:
+ // A mask to indicate that some filtering needs to be done.
+ static const FM_Flags FM_SpecialAccessMask = (FM_Flags) (FM_ExcludePrivateScope |
+ FM_ExcludePrivate |
+ FM_ExcludeFamANDAssem |
+ FM_ExcludeAssem |
+ FM_ExcludeFamily |
+ FM_ExcludeFamORAssem |
+ FM_ExcludePublic);
+
+ static const FM_Flags FM_SpecialVirtualMask = (FM_Flags) (FM_ExcludeNonVirtual |
+ FM_ExcludeVirtual);
+
+ // Typedef for string comparition functions.
+ typedef int (__cdecl *UTF8StringCompareFuncPtr)(const char *, const char *);
+
+ static inline UTF8StringCompareFuncPtr FM_GetStrCompFunc(DWORD dwFlags)
+ { LIMITED_METHOD_CONTRACT; return (dwFlags & FM_IgnoreCase) ? stricmpUTF8 : strcmp; }
+
+ static BOOL FM_PossibleToSkipMethod(FM_Flags flags);
+ static BOOL FM_ShouldSkipMethod(DWORD dwAttrs, FM_Flags flags);
+
+public:
+ static MethodDesc *FindMethod(
+ MethodTable * pMT,
+ LPCUTF8 pwzName,
+ LPHARDCODEDMETASIG pwzSignature,
+ FM_Flags flags = FM_Default);
+
+ // typeHnd is the type handle associated with the class being looked up.
+ // It has additional information in the case of a domain neutral class (Arrays)
+ static MethodDesc *FindMethod(
+ MethodTable * pMT,
+ LPCUTF8 pszName,
+ PCCOR_SIGNATURE pSignature,
+ DWORD cSignature,
+ Module* pModule,
+ FM_Flags flags = FM_Default,
+ const Substitution *pDefSubst = NULL);
+
+ static MethodDesc *FindMethod(MethodTable * pMT, mdMethodDef mb);
+
+ static MethodDesc *FindMethodByName(
+ MethodTable * pMT,
+ LPCUTF8 pszName,
+ FM_Flags flags = FM_Default);
+
+ static MethodDesc *FindPropertyMethod(
+ MethodTable * pMT,
+ LPCUTF8 pszName,
+ EnumPropertyMethods Method,
+ FM_Flags flags = FM_Default);
+
+ static MethodDesc *FindEventMethod(
+ MethodTable * pMT,
+ LPCUTF8 pszName,
+ EnumEventMethods Method,
+ FM_Flags flags = FM_Default);
+
+ static MethodDesc *FindMethodForInterfaceSlot(
+ MethodTable * pMT,
+ MethodTable *pInterface,
+ WORD slotNum);
+
+ // pSignature can be NULL to find any field with the given name
+ static FieldDesc *FindField(
+ MethodTable * pMT,
+ LPCUTF8 pszName,
+ PCCOR_SIGNATURE pSignature,
+ DWORD cSignature,
+ Module* pModule,
+ BOOL bCaseSensitive = TRUE);
+
+ static MethodDesc *FindConstructor(MethodTable * pMT, LPHARDCODEDMETASIG pwzSignature);
+ static MethodDesc *FindConstructor(MethodTable * pMT, PCCOR_SIGNATURE pSignature,DWORD cSignature, Module* pModule);
+};
+
+#endif // MEMBERLOAD_H
diff --git a/src/vm/message.cpp b/src/vm/message.cpp
new file mode 100644
index 0000000000..d8bdb3d2c8
--- /dev/null
+++ b/src/vm/message.cpp
@@ -0,0 +1,1172 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** File: message.cpp
+**
+** Purpose: Encapsulates a function call frame into a message
+** object with an interface that can enumerate the
+** arguments of the message
+**
+**
+
+===========================================================*/
+#include "common.h"
+
+#ifdef FEATURE_REMOTING
+
+#include "comdelegate.h"
+#include "excep.h"
+#include "message.h"
+#include "remoting.h"
+#include "field.h"
+#include "eeconfig.h"
+#include "invokeutil.h"
+#include "callingconvention.h"
+
+//+----------------------------------------------------------------------------
+//
+// Method: CMessage::GetArgCount public
+//
+// Synopsis: Returns number of arguments in the method call
+//
+//+----------------------------------------------------------------------------
+FCIMPL1(INT32, CMessage::GetArgCount, MessageObject * pMessage)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pMessage));
+ }
+ CONTRACTL_END;
+
+ LOG((LF_REMOTING, LL_INFO10, "CMessage::GetArgCount IN pMsg:0x%x\n", pMessage));
+
+ // Get the frame pointer from the object
+ MetaSig *pSig = pMessage->GetResetMetaSig();
+
+ // scan the sig for the argument count
+ INT32 ret = pSig->NumFixedArgs();
+
+ if (pMessage->GetDelegateMD())
+ ret -= 2;
+
+ LOG((LF_REMOTING, LL_INFO10, "CMessage::GetArgCount OUT ret:0x%x\n", ret));
+ return ret;
+}
+FCIMPLEND
+
+//+----------------------------------------------------------------------------
+//
+// Method: CMessage::GetArg public
+//
+// Synopsis: Use to enumerate a call's arguments
+//
+//+----------------------------------------------------------------------------
+FCIMPL2(Object*, CMessage::GetArg, MessageObject* pMessageUNSAFE, INT32 argNum)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(pMessageUNSAFE != NULL);
+ }
+ CONTRACTL_END;
+
+ struct _gc {
+ OBJECTREF refRetVal;
+ MESSAGEREF pMessage;
+ } gc;
+
+ gc.refRetVal = NULL;
+ gc.pMessage = (MESSAGEREF) pMessageUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ LOG((LF_REMOTING, LL_INFO10, "CMessage::GetArgCount IN\n"));
+
+ MetaSig *pSig = gc.pMessage->GetResetMetaSig();
+
+ if ((UINT)argNum >= pSig->NumFixedArgs())
+ COMPlusThrow(kTargetParameterCountException);
+
+ for (INT32 i = 0; i < argNum; i++)
+ pSig->NextArg();
+
+ BOOL fIsByRef = FALSE;
+ CorElementType eType = pSig->NextArg();
+ TypeHandle ty = TypeHandle();
+ if (eType == ELEMENT_TYPE_BYREF)
+ {
+ fIsByRef = TRUE;
+ TypeHandle tycopy;
+ eType = pSig->GetByRefType(&tycopy);
+ if (eType == ELEMENT_TYPE_VALUETYPE)
+ ty = tycopy;
+ }
+ else
+ {
+ if (eType == ELEMENT_TYPE_VALUETYPE)
+ {
+ ty = pSig->GetLastTypeHandleThrowing();
+
+#ifdef ENREGISTERED_PARAMTYPE_MAXSIZE
+ if (ArgIterator::IsArgPassedByRef(ty))
+ fIsByRef = TRUE;
+#endif
+ }
+ }
+
+ if (eType == ELEMENT_TYPE_PTR)
+ COMPlusThrow(kRemotingException, W("Remoting_CantRemotePointerType"));
+
+ GetObjectFromStack(&gc.refRetVal,
+ GetStackPtr(argNum, gc.pMessage->GetFrame(), gc.pMessage->GetResetMetaSig()),
+ eType,
+ ty,
+ fIsByRef,
+ gc.pMessage->GetFrame());
+
+ LOG((LF_REMOTING, LL_INFO10, "CMessage::GetArg OUT\n"));
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(gc.refRetVal);
+}
+FCIMPLEND
+
+FCIMPL1(Object*, CMessage::GetArgs, MessageObject* pMessageUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(pMessageUNSAFE != NULL);
+ }
+ CONTRACTL_END;
+
+ struct _gc {
+ PTRARRAYREF refRetVal;
+ MESSAGEREF pMessage;
+ OBJECTREF arg;
+ } gc;
+
+ gc.refRetVal = NULL;
+ gc.pMessage = (MESSAGEREF) pMessageUNSAFE;
+ gc.arg = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ LOG((LF_REMOTING, LL_INFO10, "CMessage::GetArgCount IN\n"));
+
+ MetaSig *pSig = gc.pMessage->GetResetMetaSig();
+
+ // scan the sig for the argument count
+ INT32 numArgs = pSig->NumFixedArgs();
+ if (gc.pMessage->GetDelegateMD())
+ numArgs -= 2;
+
+ // Allocate an object array
+ gc.refRetVal = (PTRARRAYREF) AllocateObjectArray(numArgs, g_pObjectClass);
+
+ ArgIterator iter(pSig);
+
+ for (int index = 0; index < numArgs; index++)
+ {
+ BOOL fIsByRef = FALSE;
+ CorElementType eType;
+ PVOID addr;
+ eType = pSig->PeekArg();
+ addr = (LPBYTE) gc.pMessage->GetFrame()->GetTransitionBlock() + GetStackOffset(gc.pMessage->GetFrame(), &iter, pSig);
+
+ TypeHandle ty = TypeHandle();
+ if (eType == ELEMENT_TYPE_BYREF)
+ {
+ fIsByRef = TRUE;
+ TypeHandle tycopy;
+ // If this is a by-ref arg, GetObjectFromStack() will dereference "addr" to
+ // get the real argument address. Dereferencing now will open a gc hole if "addr"
+ // points into the gc heap, and we trigger gc between here and the point where
+ // we return the arguments.
+ //addr = *((PVOID *) addr);
+ eType = pSig->GetByRefType(&tycopy);
+ if (eType == ELEMENT_TYPE_VALUETYPE)
+ ty = tycopy;
+ }
+ else
+ {
+ if (eType == ELEMENT_TYPE_VALUETYPE)
+ {
+ ty = pSig->GetLastTypeHandleThrowing();
+
+#ifdef ENREGISTERED_PARAMTYPE_MAXSIZE
+ if (ArgIterator::IsArgPassedByRef(ty))
+ fIsByRef = TRUE;
+#endif
+ }
+ }
+
+ if (eType == ELEMENT_TYPE_PTR)
+ COMPlusThrow(kRemotingException, W("Remoting_CantRemotePointerType"));
+
+ GetObjectFromStack(&gc.arg,
+ addr,
+ eType,
+ ty,
+ fIsByRef,
+ gc.pMessage->GetFrame());
+
+ gc.refRetVal->SetAt(index, gc.arg);
+ }
+
+ LOG((LF_REMOTING, LL_INFO10, "CMessage::GetArgs OUT\n"));
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(gc.refRetVal);
+}
+FCIMPLEND
+
+//static
+void CMessage::GetObjectFromStack(OBJECTREF* ppDest, PVOID val, const CorElementType eType, TypeHandle ty, BOOL fIsByRef, FramedMethodFrame *pFrame)
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(ppDest));
+ PRECONDITION(CheckPointer(val));
+ }
+ CONTRACT_END;
+
+ // Value types like Nullable<T> have special unboxing semantics,
+ //
+ if (eType == ELEMENT_TYPE_VALUETYPE)
+ {
+ //
+ // box the value class
+ //
+
+ _ASSERTE(ty.GetMethodTable()->IsValueType() || ty.GetMethodTable()->IsEnum());
+
+ _ASSERTE(!GCHeap::GetGCHeap()->IsHeapPointer((BYTE *) ppDest) ||
+ !"(pDest) can not point to GC Heap");
+ MethodTable* pMT = ty.GetMethodTable();
+
+ if (pMT->ContainsStackPtr())
+ COMPlusThrow(kRemotingException, W("Remoting_TypeCantBeRemoted"));
+
+ PVOID* pVal;
+ if (fIsByRef)
+ pVal = (PVOID *)val;
+ else {
+ val = StackElemEndianessFixup(val, pMT->GetNumInstanceFieldBytes());
+ pVal = &val;
+ }
+
+ *ppDest = pMT->FastBox(pVal);
+ RETURN;
+ }
+
+ switch (CorTypeInfo::GetGCType(eType))
+ {
+ case TYPE_GC_NONE:
+ {
+ if(ELEMENT_TYPE_PTR == eType)
+ {
+ COMPlusThrow(kNotSupportedException);
+ }
+
+ MethodTable *pMT = MscorlibBinder::GetElementType(eType);
+
+ OBJECTREF pObj = pMT->Allocate();
+ if (fIsByRef)
+ val = *((PVOID *)val);
+ else
+ val = StackElemEndianessFixup(val, CorTypeInfo::Size(eType));
+
+ void *pDest = pObj->UnBox();
+
+#ifdef COM_STUBS_SEPARATE_FP_LOCATIONS
+ if ( !fIsByRef
+ && ( ELEMENT_TYPE_R4 == eType
+ || ELEMENT_TYPE_R8 == eType)
+ && pFrame
+ && !TransitionBlock::IsStackArgumentOffset(static_cast<int>((TADDR) val - pFrame->GetTransitionBlock())))
+ {
+ if (ELEMENT_TYPE_R4 == eType)
+ *(UINT32*)pDest = (UINT32)FPSpillToR4(val);
+ else
+ *(UINT64*)pDest = (UINT64)FPSpillToR8(val);
+ }
+ else
+#endif // COM_STUBS_SEPARATE_FP_LOCATIONS
+ {
+ memcpyNoGCRefs(pDest, val, CorTypeInfo::Size(eType));
+ }
+
+ *ppDest = pObj;
+ }
+ break;
+ case TYPE_GC_REF:
+ if (fIsByRef)
+ val = *((PVOID *)val);
+ *ppDest = ObjectToOBJECTREF(*(Object **)val);
+ break;
+ default:
+ COMPlusThrow(kRemotingException, W("Remoting_TypeCantBeRemoted"));
+ }
+
+ RETURN;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CMessage::PropagateOutParameters private
+//
+// Synopsis: Copy back data for in/out parameters and the return value
+//
+//+----------------------------------------------------------------------------
+FCIMPL3(void, CMessage::PropagateOutParameters, MessageObject* pMessageUNSAFE, ArrayBase* pOutPrmsUNSAFE, Object* RetValUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(pMessageUNSAFE != NULL);
+ }
+ CONTRACTL_END;
+
+ struct _gc
+ {
+ MESSAGEREF pMessage;
+ BASEARRAYREF pOutPrms;
+ OBJECTREF RetVal;
+ OBJECTREF param;
+ } gc;
+ gc.pMessage = (MESSAGEREF) pMessageUNSAFE;
+ gc.pOutPrms = (BASEARRAYREF) pOutPrmsUNSAFE;
+ gc.RetVal = (OBJECTREF) RetValUNSAFE;
+ gc.param = NULL;
+ HELPER_METHOD_FRAME_BEGIN_PROTECT(gc);
+
+ LOG((LF_REMOTING, LL_INFO10, "CMessage::PropogateOutParameters IN\n"));
+
+ // Retrieve the message's flags.
+ INT32 flags = gc.pMessage->GetFlags();
+
+ // Construct an ArgIterator from the message's frame and sig.
+ MetaSig *pSig = gc.pMessage->GetResetMetaSig();
+ FramedMethodFrame *pFrame = gc.pMessage->GetFrame();
+ ArgIterator argit(pSig);
+
+ // move into object to return to client
+
+ // Propagate the return value only if the pMsg is not a Ctor message
+ // Check if the return type has a return buffer associated with it
+ if ((flags& MSGFLG_CTOR) == 0 && pSig->GetReturnType() != ELEMENT_TYPE_VOID)
+ {
+ if (argit.HasRetBuffArg())
+ {
+ // Copy from RetVal into the retBuff.
+ INT64 retVal = CopyOBJECTREFToStack(
+ *(void**)(pFrame->GetTransitionBlock() + argit.GetRetBuffArgOffset()),
+ &gc.RetVal,
+ pSig->GetReturnType(),
+ TypeHandle(),
+ pSig,
+ TRUE); // copy class contents
+
+ // Copy the return value
+ *(ARG_SLOT *)(gc.pMessage->GetFrame()->GetReturnValuePtr()) = retVal;
+ }
+ else
+ {
+#ifdef ENREGISTERED_RETURNTYPE_MAXSIZE
+ if (argit.HasNonStandardByvalReturn())
+ {
+ //
+ // in these cases, we put the pointer to the return buffer into the frame's
+ // return value slot
+ //
+ CopyOBJECTREFToStack(gc.pMessage->GetFrame()->GetReturnValuePtr(),
+ &gc.RetVal,
+ pSig->GetReturnType(),
+ TypeHandle(),
+ pSig,
+ TRUE); // copy class contents
+ }
+ else
+#endif // ENREGISTERED_RETURNTYPE_MAXSIZE
+ {
+ // There is no separate return buffer, the retVal should fit in
+ // an INT64.
+ INT64 retVal = CopyOBJECTREFToStack(
+ NULL, //no return buff
+ &gc.RetVal,
+ pSig->GetReturnType(),
+ TypeHandle(),
+ pSig,
+ FALSE); //don't copy class contents
+
+ // Copy the return value
+ *(ARG_SLOT *)(gc.pMessage->GetFrame()->GetReturnValuePtr()) = retVal;
+ }
+ }
+ }
+
+ // Refetch all the variables as GC could have happened after call to
+ // CopyOBJECTREFToStack
+ UINT32 cOutParams = (gc.pOutPrms != NULL) ? gc.pOutPrms->GetNumComponents() : 0;
+ if (cOutParams > 0)
+ {
+ PVOID *argAddr;
+ UINT32 i = 0;
+ MetaSig syncSig(gc.pMessage->GetMethodDesc());
+ MetaSig *pSyncSig = NULL;
+
+ if (flags & MSGFLG_ENDINVOKE)
+ {
+ pSyncSig = &syncSig;
+ }
+
+ for (i=0; i<cOutParams; i++)
+ {
+ if (pSyncSig)
+ {
+ CorElementType typ = pSyncSig->NextArg();
+ if (typ == ELEMENT_TYPE_END)
+ {
+ break;
+ }
+
+ if (typ != ELEMENT_TYPE_BYREF)
+ {
+ continue;
+ }
+
+ argAddr = (PVOID *)(pFrame->GetTransitionBlock() + argit.GetNextOffset());
+ }
+ else
+ {
+ int ofs = argit.GetNextOffset();
+ if (ofs == TransitionBlock::InvalidOffset)
+ {
+ break;
+ }
+
+ if (argit.GetArgType() != ELEMENT_TYPE_BYREF)
+ {
+ continue;
+ }
+
+ argAddr = (PVOID *)(pFrame->GetTransitionBlock() + ofs);
+ }
+
+ TypeHandle ty = TypeHandle();
+ CorElementType brType = pSig->GetByRefType(&ty);
+
+ gc.param = ((OBJECTREF *) gc.pOutPrms->GetDataPtr())[i];
+
+ CopyOBJECTREFToStack(
+ *argAddr,
+ &gc.param,
+ brType,
+ ty,
+ pSig,
+ ty.IsNull() ? FALSE : ty.IsValueType());
+ }
+
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+INT64 CMessage::CopyOBJECTREFToStack(PVOID pvDest, OBJECTREF *pSrc, CorElementType typ, TypeHandle ty, MetaSig *pSig, BOOL fCopyClassContents)
+{
+ INT64 ret = 0;
+
+ CONTRACT(INT64)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pvDest, NULL_OK));
+ PRECONDITION(CheckPointer(pSrc));
+ PRECONDITION(typ != ELEMENT_TYPE_VOID);
+ PRECONDITION(CheckPointer(pSig));
+ }
+ CONTRACT_END;
+
+ if (fCopyClassContents)
+ {
+ // We have to copy the contents of a value class to pvDest
+
+ // write unboxed version back to memory provided by the client
+ if (pvDest)
+ {
+ if (ty.IsNull())
+ ty = pSig->GetRetTypeHandleThrowing();
+
+ if (*pSrc == NULL && !Nullable::IsNullableType(ty))
+ COMPlusThrow(kRemotingException, W("Remoting_Message_BadRetValOrOutArg"));
+
+ ty.GetMethodTable()->UnBoxIntoUnchecked(pvDest, *pSrc);
+
+ // return the object so it can be stored in the frame and
+ // propagated to the root set
+ // pSrc may not be doubleword aligned!
+ *(OBJECTREF*)&ret = *pSrc;
+ }
+ }
+ else
+ {
+ // We have either a real OBJECTREF or something that does not have
+ // a return buffer associated
+
+ // Check if it is an ObjectRef (from the GC heap)
+ if (CorTypeInfo::IsObjRef(typ))
+ {
+ if ((*pSrc!=NULL) && ((*pSrc)->IsTransparentProxy()))
+ {
+ if (ty.IsNull())
+ ty = pSig->GetRetTypeHandleThrowing();
+
+ // CheckCast ensures that the returned object (proxy) gets
+ // refined to the level expected by the caller of the method
+ if (!CRemotingServices::CheckCast(*pSrc, ty))
+ COMPlusThrow(kInvalidCastException, W("Arg_ObjObj"));
+ }
+ if (pvDest)
+ SetObjectReferenceUnchecked((OBJECTREF *)pvDest, *pSrc);
+
+ // pSrc may not be double-word aligned!
+ *(OBJECTREF*)&ret = *pSrc;
+ }
+ else
+ {
+ // Note: this assert includes VALUETYPE because for Enums
+ // HasRetBuffArg() returns false since the normalized type is I4
+ // so we end up here ... but GetReturnType() returns VALUETYPE
+ // Almost all VALUETYPEs will go through the fCopyClassContents
+ // codepath instead of here.
+ // Also, IsPrimitiveType() does not check for IntPtr, UIntPtr etc
+ // there is a note in siginfo.hpp about that ... hence we have
+ // ELEMENT_TYPE_I, ELEMENT_TYPE_U.
+ _ASSERTE(
+ CorTypeInfo::IsPrimitiveType(typ)
+ || (typ == ELEMENT_TYPE_VALUETYPE)
+ || (typ == ELEMENT_TYPE_I)
+ || (typ == ELEMENT_TYPE_U)
+ || (typ == ELEMENT_TYPE_FNPTR)
+ );
+
+ // REVIEW: For a "ref int" arg, if a nasty sink replaces the boxed
+ // int with a null OBJECTREF, this is where we check. We need to be
+ // uniform in our policy w.r.t. this (throw v/s ignore)
+ // The 'if' block above throws, CallFieldAccessor also has this
+ // problem.
+ if (*pSrc != NULL)
+ {
+ PVOID pvSrcData = (*pSrc)->GetData();
+ int cbsize = gElementTypeInfo[typ].m_cbSize;
+ INT64 retBuff;
+
+ // ElementTypeInfo.m_cbSize can be less that zero for cases that need
+ // special handling (e.g. value types) to be sure of size (see
+ // siginfo.cpp). Luckily, the type handle has the actual byte count,
+ // so we look there for such cases.
+ if (cbsize < 0)
+ {
+ if (ty.IsNull())
+ ty = pSig->GetRetTypeHandleThrowing();
+
+ _ASSERTE(!ty.IsNull());
+ cbsize = ty.GetSize();
+
+ // we are returning this value class in an INT64 so it better be small enough
+ _ASSERTE(cbsize <= (int) sizeof(INT64));
+ // Unbox it into a local buffer, This coveres the Nullable<T> case
+ // then do the endianness morph below
+ ty.GetMethodTable()->UnBoxIntoUnchecked(&retBuff, *pSrc);
+
+ pvSrcData = &retBuff;
+ }
+
+ if (pvDest)
+ {
+ memcpyNoGCRefs(pvDest, pvSrcData, cbsize);
+ }
+
+ // need to sign-extend signed types
+ bool fEndianessFixup = false;
+ switch (typ) {
+ case ELEMENT_TYPE_I1:
+ ret = *(INT8*)pvSrcData;
+ fEndianessFixup = true;
+ break;
+ case ELEMENT_TYPE_I2:
+ ret = *(INT16*)pvSrcData;
+ fEndianessFixup = true;
+ break;
+ case ELEMENT_TYPE_I4:
+ ret = *(INT32*)pvSrcData;
+ fEndianessFixup = true;
+ break;
+ default:
+ memcpyNoGCRefs(StackElemEndianessFixup(&ret, cbsize), pvSrcData, cbsize);
+ break;
+ }
+
+#if !defined(_WIN64) && BIGENDIAN
+ if (fEndianessFixup)
+ ret <<= 32;
+#endif
+ }
+ }
+ }
+
+ RETURN(ret);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CMessage::GetReturnValue
+//
+// Synopsis: Pull return value off the stack
+//
+//+----------------------------------------------------------------------------
+FCIMPL1(Object*, CMessage::GetReturnValue, MessageObject* pMessageUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(pMessageUNSAFE != NULL);
+ }
+ CONTRACTL_END;
+
+ struct _gc {
+ OBJECTREF refRetVal;
+ MESSAGEREF pMessage;
+ } gc;
+
+ gc.refRetVal = NULL;
+ gc.pMessage = (MESSAGEREF) pMessageUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ MetaSig* pSig = gc.pMessage->GetResetMetaSig();
+ FramedMethodFrame* pFrame = gc.pMessage->GetFrame();
+
+ ArgIterator argit(pSig);
+
+ PVOID pvRet;
+ if (argit.HasRetBuffArg())
+ {
+ pvRet = *(PVOID *)(pFrame->GetTransitionBlock() + argit.GetRetBuffArgOffset());
+ }
+ else
+ {
+ pvRet = pFrame->GetReturnValuePtr();
+ }
+
+ CorElementType eType = pSig->GetReturnType();
+ TypeHandle ty;
+ if (eType == ELEMENT_TYPE_VALUETYPE)
+ {
+ ty = pSig->GetRetTypeHandleThrowing();
+ }
+ else
+ {
+ ty = TypeHandle();
+ }
+
+ GetObjectFromStack(&gc.refRetVal,
+ pvRet,
+ eType,
+ ty);
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(gc.refRetVal);
+}
+FCIMPLEND
+
+
+
+FCIMPL2(FC_BOOL_RET, CMessage::Dispatch, MessageObject* pMessageUNSAFE, Object* pServerUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(pMessageUNSAFE != NULL);
+ PRECONDITION(pServerUNSAFE != NULL);
+ }
+ CONTRACTL_END;
+
+ BOOL fDispatched = FALSE;
+ MESSAGEREF pMessage = (MESSAGEREF) pMessageUNSAFE;
+ OBJECTREF pServer = (OBJECTREF) pServerUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_2(pMessage, pServer);
+
+ MetaSig *pSig = pMessage->GetResetMetaSig();
+
+ if (pMessage->GetFlags() & (MSGFLG_BEGININVOKE | MSGFLG_ENDINVOKE | MSGFLG_ONEWAY))
+ {
+ fDispatched = FALSE;
+ goto lExit;
+ }
+
+ {
+ ArgIterator argit(pSig);
+
+ UINT nStackBytes;
+ MethodDesc *pMD;
+ PCODE pTarget;
+
+ nStackBytes = argit.SizeOfFrameArgumentArray();
+ pMD = pMessage->GetMethodDesc();
+
+ // Get the address of the code
+ pTarget = pMD->GetCallTarget(&(pServer));
+
+#ifdef PROFILING_SUPPORTED
+ // If we're profiling, notify the profiler that we're about to invoke the remoting target
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackRemoting());
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->RemotingServerInvocationStarted();
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+#ifdef CALLDESCR_FPARGREGS
+ // @TODO: This code badly needs refactorization. It's largely shared with MethodDesc::CallDescr in
+ // method.cpp and CallDescrWithObjectArray in stackbuildersync.cpp.
+
+ FloatArgumentRegisters *pFloatArgumentRegisters = NULL;
+
+ // Iterate through all the args looking for floating point values that will be passed in (FP) argument
+ // registers.
+ int ofs;
+ while ((ofs = argit.GetNextOffset()) != TransitionBlock::InvalidOffset)
+ {
+ if (TransitionBlock::IsFloatArgumentRegisterOffset(ofs))
+ {
+ // Found a floating point argument register. The first time we find this we point
+ // pFloatArgumentRegisters to the part of the frame where these values were spilled (we don't do
+ // this unconditionally since the call worker can optimize out the copy of the floating point
+ // registers if none are involved at all.
+ pFloatArgumentRegisters = (FloatArgumentRegisters*)(pMessage->GetFrame()->GetTransitionBlock() +
+ TransitionBlock::GetOffsetOfFloatArgumentRegisters());
+
+ // That's all we need to do, CallDescrWorkerWithHandler will automatically pick up all the
+ // floating point argument values now.
+ break;
+ }
+ }
+#endif // CALLDESCR_FPARGREGS
+
+#if defined(CALLDESCR_REGTYPEMAP) || defined(COM_STUBS_SEPARATE_FP_LOCATIONS)
+ DWORD_PTR dwRegTypeMap = 0;
+
+ {
+ int ofs;
+ while ((ofs = argit.GetNextOffset()) != TransitionBlock::InvalidOffset)
+ {
+ int regArgNum = TransitionBlock::GetArgumentIndexFromOffset(ofs);
+
+ if (regArgNum >= NUM_ARGUMENT_REGISTERS)
+ break;
+
+ CorElementType argTyp = argit.GetArgType();
+
+#ifdef CALLDESCR_REGTYPEMAP
+ FillInRegTypeMap(ofs, argTyp, (BYTE*)&dwRegTypeMap);
+#endif
+
+#ifdef COM_STUBS_SEPARATE_FP_LOCATIONS
+ // If this is a floating point argument, it was stored in a
+ // separate area of the frame. Copy it into the argument array.
+ if (ELEMENT_TYPE_R4 == argTyp || ELEMENT_TYPE_R8 == argTyp)
+ {
+ TADDR pTransitionBlock = pMessage->GetFrame()->GetTransitionBlock();
+
+ PVOID pDest = (PVOID)(pTransitionBlock + ofs);
+ PVOID pSrc = (PVOID)(pTransitionBlock + pMessage->GetFrame()->GetFPArgOffset(regArgNum));
+
+ ARG_SLOT val;
+ if (ELEMENT_TYPE_R4 == argTyp)
+ val = FPSpillToR4(pSrc);
+ else
+ val = FPSpillToR8(pSrc);
+
+ *(ARG_SLOT*)pDest = val;
+ }
+#endif
+ }
+ }
+#endif // CALLDESCR_REGTYPEMAP || COM_STUBS_SEPARATE_FP_LOCATIONS
+
+ CallDescrData callDescrData;
+
+ callDescrData.pSrc = (BYTE*)pMessage->GetFrame()->GetTransitionBlock() + sizeof(TransitionBlock);
+ callDescrData.numStackSlots = nStackBytes / STACK_ELEM_SIZE;
+#ifdef CALLDESCR_ARGREGS
+ callDescrData.pArgumentRegisters = (ArgumentRegisters*)(pMessage->GetFrame()->GetTransitionBlock() + TransitionBlock::GetOffsetOfArgumentRegisters());
+#endif
+#ifdef CALLDESCR_FPARGREGS
+ callDescrData.pFloatArgumentRegisters = pFloatArgumentRegisters;
+#endif
+#ifdef CALLDESCR_REGTYPEMAP
+ callDescrData.dwRegTypeMap = dwRegTypeMap;
+#endif
+ callDescrData.fpReturnSize = argit.GetFPReturnSize();
+ callDescrData.pTarget = pTarget;
+
+ CallDescrWorkerWithHandler(&callDescrData);
+
+#ifdef PROFILING_SUPPORTED
+ // If we're profiling, notify the profiler that we're about to invoke the remoting target
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackRemoting());
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->RemotingServerInvocationReturned();
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ memcpyNoGCRefs(pMessage->GetFrame()->GetReturnValuePtr(), &callDescrData.returnValue, sizeof(callDescrData.returnValue));
+
+ fDispatched = TRUE;
+ }
+
+lExit: ;
+ HELPER_METHOD_FRAME_END();
+ FC_RETURN_BOOL(fDispatched);
+}
+FCIMPLEND
+
+void CMessage::AppendAssemblyName(CQuickBytes &out, const CHAR* str)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(str));
+ }
+ CONTRACTL_END;
+
+ SIZE_T len = strlen(str) * sizeof(CHAR);
+ SIZE_T oldSize = out.Size();
+ out.ReSizeThrows(oldSize + len + 2);
+ CHAR * cur = (CHAR *) ((BYTE *) out.Ptr() + oldSize - 1);
+ if (*cur)
+ cur++;
+
+ *cur = ASSEMBLY_SEPARATOR_CHAR;
+ memcpy(cur + 1, str, len);
+ cur += (len + 1);
+ *cur = '\0';
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CMessage::GetAsyncBeginInfo
+//
+// Synopsis: Pull the AsyncBeginInfo object from an async call
+//
+//+----------------------------------------------------------------------------
+FCIMPL3(void, CMessage::GetAsyncBeginInfo, MessageObject* pMessageUNSAFE, OBJECTREF* ppACBD, OBJECTREF* ppState)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(pMessageUNSAFE != NULL);
+ }
+ CONTRACTL_END;
+
+ MESSAGEREF pMessage = (MESSAGEREF) pMessageUNSAFE;
+ _ASSERTE(pMessage->GetFlags() & MSGFLG_BEGININVOKE);
+
+ HELPER_METHOD_FRAME_BEGIN_1(pMessage);
+
+ LOG((LF_REMOTING, LL_INFO10, "CMessage::GetAsyncBeginInfo IN\n"));
+
+ if (pMessage == NULL)
+ COMPlusThrow(kNullReferenceException, W("NullReference_This"));
+
+ MetaSig *pSig = pMessage->GetResetMetaSig();
+
+ FramedMethodFrame * pFrame = pMessage->GetFrame();
+ ArgIterator argit(pSig);
+
+ if ((ppACBD != NULL) || (ppState != NULL))
+ {
+ int ofs;
+ int last = TransitionBlock::InvalidOffset, secondtolast = TransitionBlock::InvalidOffset;
+ while ((ofs = argit.GetNextOffset()) != TransitionBlock::InvalidOffset)
+ {
+ secondtolast = last;
+ last = ofs;
+ }
+ _ASSERTE(secondtolast != TransitionBlock::InvalidOffset);
+ if (secondtolast != TransitionBlock::InvalidOffset && ppACBD != NULL)
+ SetObjectReferenceUnchecked(ppACBD, ObjectToOBJECTREF(*(Object **)(pFrame->GetTransitionBlock() + secondtolast)));
+ PREFIX_ASSUME(last != TransitionBlock::InvalidOffset);
+ if (ppState != NULL)
+ SetObjectReferenceUnchecked(ppState, ObjectToOBJECTREF(*(Object **)(pFrame->GetTransitionBlock() + last)));
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+//+----------------------------------------------------------------------------
+//
+// Method: CMessage::GetAsyncResult
+//
+// Synopsis: Pull the AsyncResult from an async call
+//
+//+----------------------------------------------------------------------------
+FCIMPL1(LPVOID, CMessage::GetAsyncResult, MessageObject* pMessageUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(pMessageUNSAFE != NULL);
+ }
+ CONTRACTL_END;
+
+ LPVOID retVal = NULL;
+ MESSAGEREF pMessage = (MESSAGEREF) pMessageUNSAFE;
+ _ASSERTE(pMessage->GetFlags() & MSGFLG_ENDINVOKE);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(pMessage);
+
+ LOG((LF_REMOTING, LL_INFO10, "CMessage::GetAsyncResult IN\n"));
+
+ retVal = GetLastArgument(&pMessage);
+
+ HELPER_METHOD_FRAME_END();
+ return retVal;
+}
+FCIMPLEND
+
+//+----------------------------------------------------------------------------
+//
+// Method: CMessage::GetAsyncObject
+//
+// Synopsis: Pull the AsyncObject from an async call
+//
+//+----------------------------------------------------------------------------
+FCIMPL1(Object*, CMessage::GetAsyncObject, MessageObject* pMessageUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(pMessageUNSAFE != NULL);
+ }
+ CONTRACTL_END;
+
+ Object* pobjRetVal = NULL;
+ MESSAGEREF pMessage = (MESSAGEREF) pMessageUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(pMessage);
+
+ LOG((LF_REMOTING, LL_INFO10, "CMessage::GetAsyncObject IN\n"));
+
+ FramedMethodFrame *pFrame = pMessage->GetFrame();
+ MetaSig *pSig = pMessage->GetResetMetaSig();
+ ArgIterator argit(pSig);
+
+ pobjRetVal = *(Object**)(pFrame->GetTransitionBlock() + argit.GetThisOffset());
+
+ HELPER_METHOD_FRAME_END();
+ return pobjRetVal;
+}
+FCIMPLEND
+
+//+----------------------------------------------------------------------------
+//
+// Method: CMessage::GetLastArgument private
+//
+// Synopsis: Pull the last argument of 4 bytes off the stack
+//
+//+----------------------------------------------------------------------------
+LPVOID CMessage::GetLastArgument(MESSAGEREF *pMsg)
+{
+ CONTRACT(LPVOID)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pMsg));
+ POSTCONDITION(*pMsg != NULL); // CheckPointer doesn't seem to work here.
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ FramedMethodFrame *pFrame = (*pMsg)->GetFrame();
+ MetaSig *pSig = (*pMsg)->GetResetMetaSig();
+
+ ArgIterator argit(pSig);
+ int arg;
+ int backadder = TransitionBlock::InvalidOffset;
+ while ((arg = argit.GetNextOffset()) != TransitionBlock::InvalidOffset)
+ backadder = arg;
+
+ _ASSERTE(backadder != TransitionBlock::InvalidOffset);
+
+ RETURN *(LPVOID *)(pFrame->GetTransitionBlock() + backadder);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CMessage::DebugOut public
+//
+// Synopsis: temp Debug out until the classlibs have one.
+//
+//+----------------------------------------------------------------------------
+FCIMPL1(void, CMessage::DebugOut, StringObject* pOutUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(pOutUNSAFE != NULL);
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ STRINGREF pOut = (STRINGREF) pOutUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_1(pOut);
+
+ static int fMessageDebugOut = 0;
+
+ if (fMessageDebugOut == 0)
+ fMessageDebugOut = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_MessageDebugOut) ? 1 : -1;
+
+ if (fMessageDebugOut == 1)
+ WszOutputDebugString(pOut->GetBuffer());
+
+ HELPER_METHOD_FRAME_END();
+#endif
+
+ FCUnique(0x76);
+}
+FCIMPLEND
+
+//+----------------------------------------------------------------------------
+//
+// Method: CMessage::HasVarArgs public
+//
+// Synopsis: Return TRUE if the method is a VarArgs Method
+//
+//+----------------------------------------------------------------------------
+FCIMPL1(FC_BOOL_RET, CMessage::HasVarArgs, MessageObject * pMessage)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pMessage));
+ }
+ CONTRACTL_END;
+
+ BOOL result;
+
+ // Need entire path to be SO_TOLERANT or put a Hard SO probe here as
+ // no failure path.
+ CONTRACT_VIOLATION(SOToleranceViolation);
+
+ result = pMessage->GetMethodDesc()->IsVarArg();
+
+ FC_RETURN_BOOL(result);
+}
+FCIMPLEND
+
+
+//static
+int CMessage::GetStackOffset (FramedMethodFrame *pFrame, ArgIterator *pArgIter, MetaSig *pSig)
+{
+ CONTRACT(int)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pFrame));
+ PRECONDITION(CheckPointer(pArgIter));
+ PRECONDITION(CheckPointer(pSig));
+ }
+ CONTRACT_END;
+
+ LOG((LF_REMOTING, LL_INFO100,
+ "CMessage::GetStackOffset pFrame:0x%x, pArgIter:0x%x\n",
+ pFrame, pArgIter));
+
+ int ret = pArgIter->GetNextOffset();
+
+#ifdef COM_STUBS_SEPARATE_FP_LOCATIONS
+ int typ = pArgIter->GetArgType();
+ // REVISIT_TODO do we need to handle this?
+ if ((ELEMENT_TYPE_R4 == typ || ELEMENT_TYPE_R8 == typ) &&
+ TransitionBlock::IsArgumentRegisterOffset(ret))
+ {
+ int iFPArg = TransitionBlock::GetArgumentIndexFromOffset(ret);
+
+ ret = static_cast<int>(pFrame->GetFPArgOffset(iFPArg));
+ }
+#endif // COM_STUBS_SEPARATE_FP_LOCATIONS
+
+ RETURN ret;
+}
+
+
+//+----------------------------------------------------------------------------
+//
+// Method: CMessage::GetStackPtr private
+//
+// Synopsis: Figure out where on the stack a parameter is stored
+//
+// Parameters: ndx - the parameter index (zero-based)
+// pFrame - stack frame pointer (FramedMethodFrame)
+// pSig - method signature, used to determine parameter sizes
+//
+//
+//<REVISIT_TODO>
+// CODEWORK: Currently we assume all parameters to be 32-bit intrinsics
+// or 32-bit pointers. Value classes are not handled correctly.
+//</REVISIT_TODO>
+//+----------------------------------------------------------------------------
+PVOID CMessage::GetStackPtr(INT32 ndx, FramedMethodFrame *pFrame, MetaSig *pSig)
+{
+ CONTRACT(PVOID)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pFrame));
+ PRECONDITION(CheckPointer(pSig));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ LOG((LF_REMOTING, LL_INFO100,
+ "CMessage::GetStackPtr IN ndx:0x%x, pFrame:0x%x, pSig:0x%x\n",
+ ndx, pFrame, pSig));
+
+ ArgIterator iter(pSig);
+ PVOID ret = NULL;
+
+ // <REVISIT_TODO>CODEWORK:: detect and optimize for sequential access</REVISIT_TODO>
+ _ASSERTE((UINT)ndx < pSig->NumFixedArgs());
+ for (int i=0; i<=ndx; i++)
+ ret = (BYTE*)pFrame->GetTransitionBlock() + GetStackOffset(pFrame, &iter, pSig);
+
+ RETURN ret;
+}
+
+#endif //FEATURE_REMOTING
diff --git a/src/vm/message.h b/src/vm/message.h
new file mode 100644
index 0000000000..0ac406ea84
--- /dev/null
+++ b/src/vm/message.h
@@ -0,0 +1,201 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** File: message.h
+**
+** Purpose: Encapsulates a function call frame into a message
+** object with an interface that can enumerate the
+** arguments of the messagef
+**
+**
+
+===========================================================*/
+#ifndef ___MESSAGE_H___
+#define ___MESSAGE_H___
+
+#ifndef FEATURE_REMOTING
+#error FEATURE_REMOTING is not set, please do not include message.h
+#endif
+
+#include "fcall.h"
+
+//+----------------------------------------------------------
+//
+// Struct: MessageObject
+//
+// Synopsis: Physical mapping of the System.Runtime.Remoting.Message
+// object.
+//
+//
+//------------------------------------------------------------
+class MessageObject : public Object
+{
+ friend class MscorlibBinder;
+
+public:
+ MetaSig* GetResetMetaSig()
+ {
+ CONTRACT(MetaSig*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pMetaSigHolder));
+ POSTCONDITION(CheckPointer(RETVAL));
+ SO_TOLERANT;
+ }
+ CONTRACT_END;
+
+ pMetaSigHolder->Reset();
+ RETURN pMetaSigHolder;
+ }
+
+ FramedMethodFrame *GetFrame()
+ {
+ CONTRACT(FramedMethodFrame *)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN pFrame;
+ }
+
+ MethodDesc *GetMethodDesc()
+ {
+ CONTRACT(MethodDesc *)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ POSTCONDITION(CheckPointer(RETVAL));
+ SO_TOLERANT;
+ }
+ CONTRACT_END;
+
+ RETURN pMethodDesc;
+ }
+
+ MethodDesc *GetDelegateMD()
+ {
+ CONTRACT(MethodDesc *)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ SO_TOLERANT;
+ }
+ CONTRACT_END;
+
+ RETURN pDelegateMD;
+ }
+
+ INT32 GetFlags()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ return iFlags;
+ }
+
+private:
+ STRINGREF pMethodName; // Method name
+ BASEARRAYREF pMethodSig; // Array of parameter types
+ OBJECTREF pMethodBase; // Reflection method object
+ OBJECTREF pHashTable; // hashtable for properties
+ STRINGREF pURI; // object's URI
+ STRINGREF pTypeName; // not used in VM, placeholder
+ OBJECTREF pFault; // exception
+
+ OBJECTREF pID; // not used in VM, placeholder
+ OBJECTREF pSrvID; // not used in VM, placeholder
+ OBJECTREF pArgMapper; // not used in VM, placeholder
+ OBJECTREF pCallCtx; // not used in VM, placeholder
+
+ FramedMethodFrame *pFrame;
+ MethodDesc *pMethodDesc;
+ MetaSig *pMetaSigHolder;
+ MethodDesc *pDelegateMD;
+ TypeHandle thGoverningType;
+ INT32 iFlags;
+ CLR_BOOL initDone; // called the native Init routine
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<MessageObject> MESSAGEREF;
+#else
+typedef MessageObject* MESSAGEREF;
+#endif
+
+// *******
+// Note: Needs to be in sync with flags in Message.cs
+// *******
+enum
+{
+ MSGFLG_BEGININVOKE = 0x01,
+ MSGFLG_ENDINVOKE = 0x02,
+ MSGFLG_CTOR = 0x04,
+ MSGFLG_ONEWAY = 0x08,
+ MSGFLG_FIXEDARGS = 0x10,
+ MSGFLG_VARARGS = 0x20
+};
+
+//+----------------------------------------------------------
+//
+// Class: CMessage
+//
+// Synopsis: EE counterpart to Microsoft.Runtime.Message.
+// Encapsulates code to read a function call
+// frame into an interface that can enumerate
+// the parameters.
+//
+//
+//------------------------------------------------------------
+class CMessage
+{
+public:
+ // public fcalls.
+ static FCDECL1(INT32, GetArgCount, MessageObject *pMsg);
+ static FCDECL2(Object*, GetArg, MessageObject* pMessage, INT32 argNum);
+ static FCDECL1(Object*, GetArgs, MessageObject* pMessageUNSAFE);
+ static FCDECL3(void, PropagateOutParameters, MessageObject* pMessageUNSAFE, ArrayBase* pOutPrmsUNSAFE, Object* RetValUNSAFE);
+ static FCDECL1(Object*, GetReturnValue, MessageObject* pMessageUNSAFE);
+ static FCDECL3(void, GetAsyncBeginInfo, MessageObject* pMessageUNSAFE, OBJECTREF* ppACBD, OBJECTREF* ppState);
+ static FCDECL1(LPVOID, GetAsyncResult, MessageObject* pMessageUNSAFE);
+ static FCDECL1(Object*, GetAsyncObject, MessageObject* pMessageUNSAFE);
+ static FCDECL1(void, DebugOut, StringObject* pOutUNSAFE);
+ static FCDECL2(FC_BOOL_RET, Dispatch, MessageObject* pMessageUNSAFE, Object* pServerUNSAFE);
+ static FCDECL1(FC_BOOL_RET, HasVarArgs, MessageObject * poMessage);
+
+public:
+ // public helper
+ static void GetObjectFromStack(OBJECTREF* ppDest, PVOID val, const CorElementType eType, TypeHandle ty, BOOL fIsByRef = FALSE, FramedMethodFrame *pFrame = NULL);
+
+private:
+ // private helpers
+ static PVOID GetStackPtr(INT32 ndx, FramedMethodFrame *pFrame, MetaSig *pSig);
+ static int GetStackOffset (FramedMethodFrame *pFrame, ArgIterator *pArgIter, MetaSig *pSig);
+ static INT64 __stdcall CallMethod(const void *pTarget,
+ INT32 cArgs,
+ FramedMethodFrame *pFrame,
+ OBJECTREF pObj);
+ static INT64 CopyOBJECTREFToStack(PVOID pvDest, OBJECTREF *pSrc, CorElementType typ, TypeHandle ty, MetaSig *pSig, BOOL fCopyClassContents);
+ static LPVOID GetLastArgument(MESSAGEREF *pMsg);
+ static void AppendAssemblyName(CQuickBytes &out, const CHAR* str);
+};
+
+#endif // ___MESSAGE_H___
diff --git a/src/vm/metasig.h b/src/vm/metasig.h
new file mode 100644
index 0000000000..03f7f501d5
--- /dev/null
+++ b/src/vm/metasig.h
@@ -0,0 +1,722 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// METASIG.H
+//
+
+//
+// All literal MetaData signatures should be defined here.
+//
+
+
+// Generic sig's based on types.
+
+// All sigs are alphabetized by the signature string and given a canonical name. Do not
+// give them "meaningful" names because we want to share them aggressively. Do not add
+// duplicates!
+
+// The canonical form:
+//
+// <what>(<type>*, <name>*)
+//
+// where <what> is:
+//
+// Fld -- field
+// IM -- instance method (HASTHIS == TRUE)
+// SM -- static method
+//
+// and <name> -- <type> is:
+//
+// a -- Arr -- array
+// P -- Ptr -- a pointer
+// r -- Ref -- a byref
+// Ret -- indicates function return type
+//
+// PMS -- PermissionSet
+// Var -- Variant
+//
+// b -- Byte -- (unsigned) byte
+// u -- Char -- character (2 byte unsigned unicode)
+// d -- Dbl -- double
+// f -- Flt -- float
+// i -- Int -- integer
+// K -- UInt -- unsigned integer
+// I -- IntPtr -- agnostic integer
+// U -- UIntPtr -- agnostic unsigned integer
+// l -- Long -- long integer
+// L -- ULong -- unsigned long integer
+// h -- Shrt -- short integer
+// H -- UShrt -- unsigned short integer
+// v -- Void -- Void
+// B -- SByt -- signed byte
+// F -- Bool -- boolean
+// j -- Obj -- System.Object
+// s -- Str -- System.String
+// C -- -- class
+// g -- -- struct
+// T -- TypedReference -- TypedReference
+// G -- -- Generic type variable
+// M -- -- Generic method variable
+//
+
+//#DEFINE_METASIG
+// Use DEFINE_METASIG for signatures that does not reference other types
+// Use DEFINE_METASIG_T for signatures that reference other types (contains C or g)
+
+
+// This part of the file is included multiple times with different macro definitions
+// to generate the hardcoded metasigs.
+
+
+// SM, IM and Fld macros have often a very similar definitions. METASIG_BODY is
+// a helper to avoid these redundant SM, IM and Fld definitions.
+#ifdef METASIG_BODY
+#ifndef DEFINE_METASIG
+// See code:#DEFINE_METASIG
+#define DEFINE_METASIG(body) body
+#endif
+#define SM(varname, args, retval) METASIG_BODY( SM_ ## varname, args retval )
+#define IM(varname, args, retval) METASIG_BODY( IM_ ## varname, args retval )
+#define GM(varname, n, conv, args, retval) METASIG_BODY( GM_ ## varname, args retval )
+#define Fld(varname, val) METASIG_BODY( Fld_ ## varname, val )
+#endif
+
+
+#ifdef DEFINE_METASIG
+
+// Use default if undefined
+// See code:#DEFINE_METASIG
+#ifndef DEFINE_METASIG_T
+#define DEFINE_METASIG_T(body) DEFINE_METASIG(body)
+#endif
+
+// One letter shortcuts are defined for all types that can occur in the signature.
+// The shortcuts are defined indirectly through METASIG_ATOM. METASIG_ATOM is
+// designed to control whether to generate the initializer for the signature or
+// just compute the size of the signature.
+
+#define b METASIG_ATOM(ELEMENT_TYPE_U1)
+#define u METASIG_ATOM(ELEMENT_TYPE_CHAR)
+#define d METASIG_ATOM(ELEMENT_TYPE_R8)
+#define f METASIG_ATOM(ELEMENT_TYPE_R4)
+#define i METASIG_ATOM(ELEMENT_TYPE_I4)
+#define K METASIG_ATOM(ELEMENT_TYPE_U4)
+#define I METASIG_ATOM(ELEMENT_TYPE_I)
+#define U METASIG_ATOM(ELEMENT_TYPE_U)
+#define l METASIG_ATOM(ELEMENT_TYPE_I8)
+#define L METASIG_ATOM(ELEMENT_TYPE_U8)
+#define h METASIG_ATOM(ELEMENT_TYPE_I2)
+#define H METASIG_ATOM(ELEMENT_TYPE_U2)
+#define v METASIG_ATOM(ELEMENT_TYPE_VOID)
+#define B METASIG_ATOM(ELEMENT_TYPE_I1)
+#define F METASIG_ATOM(ELEMENT_TYPE_BOOLEAN)
+#define j METASIG_ATOM(ELEMENT_TYPE_OBJECT)
+#define s METASIG_ATOM(ELEMENT_TYPE_STRING)
+#define T METASIG_ATOM(ELEMENT_TYPE_TYPEDBYREF)
+
+
+// METASIG_RECURSE controls whether to recurse into the complex types
+// in the macro expansion. METASIG_RECURSE is designed to control
+// whether to compute the byte size of the signature or just compute
+// the number of arguments in the signature.
+
+#if METASIG_RECURSE
+
+#define a(x) METASIG_ATOM(ELEMENT_TYPE_SZARRAY) x
+#define P(x) METASIG_ATOM(ELEMENT_TYPE_PTR) x
+#define r(x) METASIG_ATOM(ELEMENT_TYPE_BYREF) x
+
+#define G(n) METASIG_ATOM(ELEMENT_TYPE_VAR) METASIG_ATOM(n)
+#define M(n) METASIG_ATOM(ELEMENT_TYPE_MVAR) METASIG_ATOM(n)
+
+// The references to other types have special definition in some cases
+#ifndef C
+#define C(x) METASIG_ATOM(ELEMENT_TYPE_CLASS) METASIG_ATOM(CLASS__ ## x % 0x100) METASIG_ATOM(CLASS__ ## x / 0x100)
+#endif
+#ifndef g
+#define g(x) METASIG_ATOM(ELEMENT_TYPE_VALUETYPE) METASIG_ATOM(CLASS__ ## x % 0x100) METASIG_ATOM(CLASS__ ## x / 0x100)
+#endif
+
+#else
+
+#define a(x) METASIG_ATOM(ELEMENT_TYPE_SZARRAY)
+#define P(x) METASIG_ATOM(ELEMENT_TYPE_PTR)
+#define r(x) METASIG_ATOM(ELEMENT_TYPE_BYREF)
+
+#define G(n) METASIG_ATOM(ELEMENT_TYPE_VAR)
+#define M(n) METASIG_ATOM(ELEMENT_TYPE_MVAR)
+
+// The references to other types have special definition in some cases
+#ifndef C
+#define C(x) METASIG_ATOM(ELEMENT_TYPE_CLASS)
+#endif
+#ifndef g
+#define g(x) METASIG_ATOM(ELEMENT_TYPE_VALUETYPE)
+#endif
+
+#endif
+
+
+
+// to avoid empty arguments for macros
+#define _
+
+
+// static methods:
+DEFINE_METASIG_T(SM(Int_IntPtr_Obj_RetException, i I j, C(EXCEPTION)))
+DEFINE_METASIG_T(SM(Type_ArrType_IntPtr_int_RetType, C(TYPE) a(C(TYPE)) I i, C(TYPE) ))
+DEFINE_METASIG_T(SM(Type_RetIntPtr, C(TYPE), I))
+DEFINE_METASIG(SM(IntPtr_IntPtr_IntPtr_Int_RetObj, I I I i, j))
+DEFINE_METASIG(SM(Obj_IntPtr_RetIntPtr, j I, I))
+DEFINE_METASIG(SM(Obj_IntPtr_RetObj, j I, j))
+DEFINE_METASIG(SM(Obj_RefIntPtr_RetVoid, j r(I), v))
+DEFINE_METASIG(SM(Obj_IntPtr_RetVoid, j I, v))
+DEFINE_METASIG(SM(Obj_IntPtr_RetBool, j I, F))
+DEFINE_METASIG(SM(Obj_IntPtr_IntPtr_Int_RetIntPtr, j I I i, I))
+DEFINE_METASIG(SM(IntPtr_IntPtr_RefIntPtr_RetObj, I I r(I), j))
+#ifdef FEATURE_COMINTEROP
+DEFINE_METASIG(SM(Obj_IntPtr_RefIntPtr_RefBool_RetIntPtr, j I r(I) r(F), I))
+DEFINE_METASIG(SM(Obj_IntPtr_RefIntPtr_RetIntPtr, j I r(I), I))
+DEFINE_METASIG_T(SM(Obj_Str_RetICustomProperty, j s, C(ICUSTOMPROPERTY)))
+DEFINE_METASIG_T(SM(Obj_Str_PtrTypeName_RetICustomProperty, j s P(g(TYPENAMENATIVE)), C(ICUSTOMPROPERTY)))
+DEFINE_METASIG_T(SM(Obj_PtrTypeName_RetVoid, j P(g(TYPENAMENATIVE)), v))
+DEFINE_METASIG_T(SM(Type_PtrTypeName_RetVoid, C(TYPE) P(g(TYPENAMENATIVE)), v))
+DEFINE_METASIG_T(SM(PtrTypeName_RefType_RetVoid, P(g(TYPENAMENATIVE)) r(C(TYPE)), v))
+DEFINE_METASIG_T(SM(ArrType_PtrTypeName_RetVoid, a(C(TYPE)) P(g(TYPENAMENATIVE)), v))
+DEFINE_METASIG_T(SM(PtrTypeName_ArrType_RetVoid, P(g(TYPENAMENATIVE)) a(C(TYPE)), v))
+DEFINE_METASIG_T(SM(PtrTypeName_RetVoid, P(g(TYPENAMENATIVE)), v))
+DEFINE_METASIG_T(SM(PtrTypeName_Int_RetVoid, P(g(TYPENAMENATIVE)) i, v))
+DEFINE_METASIG_T(SM(Exception_IntPtr_RetException, C(EXCEPTION) I, C(EXCEPTION)))
+#endif // FEATURE_COMINTEROP
+DEFINE_METASIG(SM(Int_RetVoid, i, v))
+DEFINE_METASIG(SM(Int_Int_RetVoid, i i, v))
+DEFINE_METASIG(SM(Str_RetIntPtr, s, I))
+DEFINE_METASIG(SM(Str_RetBool, s, F))
+DEFINE_METASIG(SM(IntPtr_IntPtr_RetVoid, I I, v))
+DEFINE_METASIG(SM(IntPtr_IntPtr_Obj_RetIntPtr, I I j, I))
+DEFINE_METASIG(SM(IntPtr_IntPtr_Int_Obj_RetIntPtr, I I i j, I))
+DEFINE_METASIG(SM(IntPtr_IntPtr_IntPtr_RetVoid, I I I, v))
+DEFINE_METASIG(SM(IntPtr_IntPtr_IntPtr_UShrt_RetVoid, I I I H, v))
+DEFINE_METASIG(SM(IntPtr_Int_IntPtr_RetIntPtr, I i I, I))
+DEFINE_METASIG(SM(IntPtr_IntPtr_Int_IntPtr_RetVoid, I I i I, v))
+DEFINE_METASIG(SM(IntPtr_IntPtr_Obj_RetVoid, I I j, v))
+DEFINE_METASIG(SM(Obj_ArrObject_RetVoid, j a(j), v))
+DEFINE_METASIG(SM(Obj_IntPtr_Obj_RetVoid, j I j, v))
+DEFINE_METASIG(SM(RetUIntPtr, _, U))
+DEFINE_METASIG(SM(RetIntPtr, _, I))
+DEFINE_METASIG(SM(RetBool, _, F))
+DEFINE_METASIG(SM(IntPtr_RetStr, I, s))
+DEFINE_METASIG(SM(IntPtr_RetBool, I, F))
+DEFINE_METASIG(SM(IntPtrIntPtrIntPtr_RetVoid, I I I, v))
+DEFINE_METASIG_T(SM(IntPtrIntPtrIntPtr_RefCleanupWorkList_RetVoid, I I I r(C(CLEANUP_WORK_LIST)), v))
+DEFINE_METASIG_T(SM(RuntimeType_RuntimeMethodHandleInternal_RetMethodBase, C(CLASS) g(METHOD_HANDLE_INTERNAL), C(METHOD_BASE) ))
+DEFINE_METASIG_T(SM(RuntimeType_IRuntimeFieldInfo_RetFieldInfo, C(CLASS) C(I_RT_FIELD_INFO), C(FIELD_INFO) ))
+DEFINE_METASIG_T(SM(RuntimeType_Int_RetPropertyInfo, C(CLASS) i, C(PROPERTY_INFO) ))
+DEFINE_METASIG(SM(Char_Bool_Bool_RetByte, u F F, b))
+DEFINE_METASIG(SM(Byte_RetChar, b, u))
+DEFINE_METASIG(SM(Str_Bool_Bool_RefInt_RetIntPtr, s F F r(i), I))
+DEFINE_METASIG(SM(IntPtr_Int_RetStr, I i, s))
+DEFINE_METASIG_T(SM(Obj_PtrByte_RefCleanupWorkList_RetVoid, j P(b) r(C(CLEANUP_WORK_LIST)), v))
+DEFINE_METASIG(SM(Obj_PtrByte_RetVoid, j P(b), v))
+DEFINE_METASIG(SM(PtrByte_IntPtr_RetVoid, P(b) I, v))
+DEFINE_METASIG(SM(Str_Bool_Bool_RefInt_RetArrByte, s F F r(i), a(b) ))
+DEFINE_METASIG(SM(ArrByte_Int_PtrByte_Int_Int_RetVoid, a(b) i P(b) i i, v))
+DEFINE_METASIG(SM(PtrByte_Int_ArrByte_Int_Int_RetVoid, P(b) i a(b) i i, v))
+DEFINE_METASIG(SM(PtrSByt_RetInt, P(B), i))
+DEFINE_METASIG(SM(IntPtr_RetIntPtr, I, I))
+DEFINE_METASIG(SM(UIntPtr_RetIntPtr, U, I))
+DEFINE_METASIG(SM(PtrByte_PtrByte_Int_RetVoid, P(b) P(b) i, v))
+DEFINE_METASIG(SM(RefObj_IntPtr_RetVoid, r(j) I, v))
+DEFINE_METASIG(SM(RefObj_RefIntPtr_RetVoid, r(j) r(I), v))
+DEFINE_METASIG(SM(IntPtr_RefObj_IntPtr_RetVoid, I r(j) I, v))
+DEFINE_METASIG(SM(IntPtr_RefObj_IntPtr_Int_RetVoid, I r(j) I i,v))
+DEFINE_METASIG(SM(IntPtr_Int_IntPtr_Int_Int_Int_RetVoid, I i I i i i, v))
+DEFINE_METASIG(SM(IntPtr_IntPtr_Int_Int_RetVoid, I I i i, v))
+DEFINE_METASIG(SM(IntPtr_RefObj_IntPtr_Obj_RetVoid, I r(j) I j, v))
+DEFINE_METASIG(SM(Obj_Int_RetVoid, j i,v))
+
+DEFINE_METASIG(SM(Flt_RetFlt, f, f))
+DEFINE_METASIG(SM(Dbl_RetDbl, d, d))
+DEFINE_METASIG(SM(RefDbl_Dbl_RetDbl, r(d) d, d))
+DEFINE_METASIG(SM(RefDbl_Dbl_Dbl_RetDbl, r(d) d d, d))
+DEFINE_METASIG(SM(RefLong_Long_RetLong, r(l) l, l))
+DEFINE_METASIG(SM(RefLong_Long_Long_RetLong, r(l) l l, l))
+DEFINE_METASIG(SM(RefFlt_Flt_RetFlt, r(f) f, f))
+DEFINE_METASIG(SM(RefFlt_Flt_Flt_RetFlt, r(f) f f, f))
+DEFINE_METASIG(SM(RefInt_Int_RetInt, r(i) i, i))
+DEFINE_METASIG(SM(RefInt_Int_Int_RetInt, r(i) i i, i))
+DEFINE_METASIG(SM(RefInt_Int_Int_RefBool_RetInt, r(i) i i r(F), i))
+DEFINE_METASIG(SM(RefIntPtr_IntPtr_RetIntPtr, r(I) I, I))
+DEFINE_METASIG(SM(RefIntPtr_IntPtr_IntPtr_RetIntPtr, r(I) I I, I))
+DEFINE_METASIG(SM(RefObj_Obj_RetObj, r(j) j, j))
+DEFINE_METASIG(SM(RefObj_Obj_Obj_RetObj, r(j) j j, j))
+DEFINE_METASIG(SM(ObjIntPtr_RetVoid, j I, v))
+
+DEFINE_METASIG(SM(RefBool_RetBool, r(F), F))
+DEFINE_METASIG(SM(RefBool_Bool, r(F) F, v))
+DEFINE_METASIG(SM(RefSByt_RetSByt, r(B), B))
+DEFINE_METASIG(SM(RefSByt_SByt, r(B) B, v))
+DEFINE_METASIG(SM(RefByte_RetByte, r(b), b))
+DEFINE_METASIG(SM(RefByte_Byte, r(b) b, v))
+DEFINE_METASIG(SM(RefShrt_RetShrt, r(h), h))
+DEFINE_METASIG(SM(RefShrt_Shrt, r(h) h, v))
+DEFINE_METASIG(SM(RefUShrt_RetUShrt, r(H), H))
+DEFINE_METASIG(SM(RefUShrt_UShrt, r(H) H, v))
+DEFINE_METASIG(SM(RefInt_RetInt, r(i), i))
+DEFINE_METASIG(SM(RefInt_Int, r(i) i, v))
+DEFINE_METASIG(SM(RefUInt_RetUInt, r(K), K))
+DEFINE_METASIG(SM(RefUInt_UInt, r(K) K, v))
+DEFINE_METASIG(SM(RefLong_RetLong, r(l), l))
+DEFINE_METASIG(SM(RefLong_Long, r(l) l, v))
+DEFINE_METASIG(SM(RefULong_RetULong, r(L), L))
+DEFINE_METASIG(SM(RefULong_ULong, r(L) L, v))
+DEFINE_METASIG(SM(RefIntPtr_RetIntPtr, r(I), I))
+DEFINE_METASIG(SM(RefIntPtr_IntPtr, r(I) I, v))
+DEFINE_METASIG(SM(RefUIntPtr_RetUIntPtr, r(U), U))
+DEFINE_METASIG(SM(RefUIntPtr_UIntPtr, r(U) U, v))
+DEFINE_METASIG(SM(RefFlt_RetFlt, r(f), f))
+DEFINE_METASIG(SM(RefFlt_Flt, r(f) f, v))
+DEFINE_METASIG(SM(RefDbl_RetDbl, r(d), d))
+DEFINE_METASIG(SM(RefDbl_Dbl, r(d) d, v))
+DEFINE_METASIG(GM(RefT_RetT, IMAGE_CEE_CS_CALLCONV_DEFAULT, 1, r(M(0)) , M(0)))
+DEFINE_METASIG(GM(RefT_T, IMAGE_CEE_CS_CALLCONV_DEFAULT, 1, r(M(0)) M(0), v))
+
+
+DEFINE_METASIG_T(SM(SafeHandle_RefBool_RetIntPtr, C(SAFE_HANDLE) r(F), I ))
+DEFINE_METASIG_T(SM(SafeHandle_RetVoid, C(SAFE_HANDLE), v ))
+
+#ifdef FEATURE_REMOTING
+DEFINE_METASIG_T(SM(RetContext, _, C(CONTEXT)))
+#endif
+DEFINE_METASIG_T(SM(RetMethodBase, _, C(METHOD_BASE)))
+DEFINE_METASIG(SM(RetVoid, _, v))
+DEFINE_METASIG(SM(Str_IntPtr_Int_RetVoid, s I i, v))
+DEFINE_METASIG(SM(Int_RetIntPtr, i, I))
+
+DEFINE_METASIG_T(SM(DateTime_RetDbl, g(DATE_TIME), d))
+DEFINE_METASIG(SM(Dbl_RetLong, d, l))
+
+DEFINE_METASIG(SM(IntPtr_RetObj, I, j))
+DEFINE_METASIG_T(SM(Int_RetException, i, C(EXCEPTION)))
+DEFINE_METASIG(SM(Int_IntPtr_RetObj, i I, j))
+DEFINE_METASIG(SM(IntPtr_IntPtr_Int_RetVoid, I I i, v))
+DEFINE_METASIG_T(SM(Exception_RetInt, C(EXCEPTION), i))
+
+DEFINE_METASIG_T(SM(ContextBoundObject_RetObj, C(CONTEXT_BOUND_OBJECT), j))
+DEFINE_METASIG_T(SM(PMS_PMS_RetInt, C(PERMISSION_SET) C(PERMISSION_SET), i))
+
+DEFINE_METASIG(SM(IntPtr_RetVoid, I, v))
+DEFINE_METASIG(SM(IntPtr_Bool_RetVoid, I F, v))
+DEFINE_METASIG(SM(IntPtr_UInt_IntPtr_RetVoid, I K I, v))
+DEFINE_METASIG(SM(IntPtr_RetUInt, I, K))
+DEFINE_METASIG(SM(PtrChar_RetInt, P(u), i))
+DEFINE_METASIG(SM(IntPtr_IntPtr_RetIntPtr, I I, I))
+DEFINE_METASIG(SM(IntPtr_IntPtr_Int_RetIntPtr, I I i, I))
+DEFINE_METASIG(SM(PtrVoid_PtrVoid_RetVoid, P(v) P(v), v))
+DEFINE_METASIG(IM(Obj_RetBool, j, F))
+DEFINE_METASIG(SM(Obj_RetVoid, j, v))
+DEFINE_METASIG(SM(Obj_RetInt, j, i))
+DEFINE_METASIG(SM(Obj_RetIntPtr, j, I))
+DEFINE_METASIG(SM(Obj_RetObj, j, j))
+DEFINE_METASIG(SM(Obj_RetArrByte, j, a(b)))
+DEFINE_METASIG(SM(Obj_Bool_RetArrByte, j F, a(b)))
+#ifdef FEATURE_REMOTING
+DEFINE_METASIG_T(SM(Obj_RefMessageData_RetVoid, j r(g(MESSAGE_DATA)), v))
+#endif
+DEFINE_METASIG(SM(Obj_Obj_RefArrByte_RetArrByte, j j r(a(b)), a(b)))
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_METASIG_T(SM(Obj_Int_RefVariant_RetVoid, j i r(g(VARIANT)), v))
+DEFINE_METASIG_T(SM(Obj_RefVariant_RetVoid, j r(g(VARIANT)), v))
+DEFINE_METASIG_T(SM(RefVariant_RetObject, r(g(VARIANT)), j))
+DEFINE_METASIG_T(SM(Str_PtrHStringHeader_RetIntPtr, s P(g(HSTRING_HEADER_MANAGED)), I))
+
+DEFINE_METASIG_T(SM(RefDateTimeOffset_RefDateTimeNative_RetVoid, r(g(DATE_TIME_OFFSET)) r(g(DATETIMENATIVE)), v))
+
+#endif
+
+#ifdef FEATURE_REMOTING
+DEFINE_METASIG_T(SM(RealProxy_Class_RetBool, C(REAL_PROXY) C(CLASS), F))
+#endif
+
+DEFINE_METASIG_T(SM(IPermission_RetPermissionToken, C(IPERMISSION), C(PERMISSION_TOKEN)))
+DEFINE_METASIG_T(SM(FrameSecurityDescriptor_IPermission_PermissionToken_RuntimeMethodHandleInternal_RetBool, \
+ C(FRAME_SECURITY_DESCRIPTOR) C(IPERMISSION) C(PERMISSION_TOKEN) g(METHOD_HANDLE_INTERNAL), F))
+DEFINE_METASIG_T(SM(FrameSecurityDescriptor_PMS_OutPMS_RuntimeMethodHandleInternal_RetBool, \
+ C(FRAME_SECURITY_DESCRIPTOR) C(PERMISSION_SET) r(C(PERMISSION_SET)) g(METHOD_HANDLE_INTERNAL), F))
+DEFINE_METASIG_T(SM(FrameSecurityDescriptor_RetInt, C(FRAME_SECURITY_DESCRIPTOR), i))
+DEFINE_METASIG_T(SM(DynamicResolver_IPermission_PermissionToken_RuntimeMethodHandleInternal_RetBool, \
+ C(DYNAMICRESOLVER) C(IPERMISSION) C(PERMISSION_TOKEN) g(METHOD_HANDLE_INTERNAL), F))
+DEFINE_METASIG_T(SM(DynamicResolver_PMS_OutPMS_RuntimeMethodHandleInternal_RetBool, \
+ C(DYNAMICRESOLVER) C(PERMISSION_SET) r(C(PERMISSION_SET)) g(METHOD_HANDLE_INTERNAL), F))
+DEFINE_METASIG_T(SM(PermissionListSet_PMS_PMS_RetPermissionListSet, \
+ C(PERMISSION_LIST_SET) C(PERMISSION_SET) C(PERMISSION_SET), C(PERMISSION_LIST_SET)))
+DEFINE_METASIG_T(SM(PMS_IntPtr_RuntimeMethodHandleInternal_Assembly_SecurityAction_RetVoid, C(PERMISSION_SET) I g(METHOD_HANDLE_INTERNAL) C(ASSEMBLY) g(SECURITY_ACTION), v))
+#ifdef FEATURE_COMPRESSEDSTACK
+DEFINE_METASIG_T(SM(CS_PMS_PMS_CodeAccessPermission_PermissionToken_RuntimeMethodHandleInternal_Assembly_SecurityAction_RetVoid, \
+ C(COMPRESSED_STACK) C(PERMISSION_SET) C(PERMISSION_SET) C(CODE_ACCESS_PERMISSION) C(PERMISSION_TOKEN) g(METHOD_HANDLE_INTERNAL) C(ASSEMBLY) g(SECURITY_ACTION), v))
+DEFINE_METASIG_T(SM(CS_PMS_PMS_PMS_RuntimeMethodHandleInternal_Assembly_SecurityAction_RetVoid, C(COMPRESSED_STACK) C(PERMISSION_SET) C(PERMISSION_SET) C(PERMISSION_SET) g(METHOD_HANDLE_INTERNAL) C(ASSEMBLY) g(SECURITY_ACTION), v))
+#else // #ifdef FEATURE_COMPRESSEDSTACK
+DEFINE_METASIG_T(SM(CS_PMS_PMS_CodeAccessPermission_PermissionToken_RuntimeMethodHandleInternal_Assembly_SecurityAction_RetVoid, \
+ j C(PERMISSION_SET) C(PERMISSION_SET) C(CODE_ACCESS_PERMISSION) C(PERMISSION_TOKEN) g(METHOD_HANDLE_INTERNAL) C(ASSEMBLY) g(SECURITY_ACTION), v))
+DEFINE_METASIG_T(SM(CS_PMS_PMS_PMS_RuntimeMethodHandleInternal_Assembly_SecurityAction_RetVoid, j C(PERMISSION_SET) C(PERMISSION_SET) C(PERMISSION_SET) g(METHOD_HANDLE_INTERNAL) C(ASSEMBLY) g(SECURITY_ACTION), v))
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
+DEFINE_METASIG_T(SM(Evidence_RefInt_Bool_RetPMS, C(EVIDENCE) r(i) F, C(PERMISSION_SET)))
+#ifdef FEATURE_APTCA
+DEFINE_METASIG_T(SM(Assembly_PMS_PMS_RuntimeMethodHandleInternal_SecurityAction_Obj_IPermission_RetVoid, C(ASSEMBLY) C(PERMISSION_SET) C(PERMISSION_SET) g(METHOD_HANDLE_INTERNAL) g(SECURITY_ACTION) j C(IPERMISSION), v))
+#endif // FEATURE_APTCA
+DEFINE_METASIG_T(SM(Evidence_PMS_PMS_PMS_PMS_int_Bool_RetPMS, \
+ C(EVIDENCE) C(PERMISSION_SET) C(PERMISSION_SET) C(PERMISSION_SET) r(C(PERMISSION_SET)) r(i) F, C(PERMISSION_SET)))
+DEFINE_METASIG_T(SM(Int_PMS_RetVoid, i C(PERMISSION_SET), v))
+DEFINE_METASIG_T(SM(Int_PMS_Resolver_RetVoid, i C(PERMISSION_SET) C(RESOLVER), v))
+DEFINE_METASIG_T(SM(PMS_RetVoid, C(PERMISSION_SET), v))
+
+#ifndef FEATURE_CORECLR
+DEFINE_METASIG_T(SM(ExecutionContext_ContextCallback_Object_Bool_RetVoid, \
+ C(EXECUTIONCONTEXT) C(CONTEXTCALLBACK) j F, v))
+#endif
+#if defined(FEATURE_IMPERSONATION) || defined(FEATURE_COMPRESSEDSTACK)
+DEFINE_METASIG_T(SM(SecurityContext_ContextCallback_Object_RetVoid, \
+ C(SECURITYCONTEXT) C(CONTEXTCALLBACK) j, v))
+#endif // #if defined(FEATURE_IMPERSONATION) || defined(FEATURE_COMPRESSEDSTACK)
+#ifdef FEATURE_COMPRESSEDSTACK
+DEFINE_METASIG_T(SM(CompressedStack_ContextCallback_Object_RetVoid, \
+ C(COMPRESSED_STACK) C(CONTEXTCALLBACK) j, v))
+DEFINE_METASIG_T(SM(IntPtr_RetDCS, I, C(DOMAIN_COMPRESSED_STACK)))
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
+DEFINE_METASIG(SM(Str_RetInt, s, i))
+DEFINE_METASIG_T(SM(Str_RetICustomMarshaler, s, C(ICUSTOM_MARSHALER)))
+DEFINE_METASIG(SM(Int_Str_RetIntPtr, i s, I))
+DEFINE_METASIG(SM(Int_Str_IntPtr_RetIntPtr, i s I, I))
+DEFINE_METASIG(SM(Str_IntPtr_RetIntPtr, s I, I))
+DEFINE_METASIG(SM(Str_Bool_Int_RetV, s F i, v))
+
+DEFINE_METASIG_T(SM(Type_RetInt, C(TYPE), i))
+#ifdef FEATURE_REMOTING
+DEFINE_METASIG_T(SM(Class_ArrObject_Bool_RetMarshalByRefObject, C(CLASS) a(j) F, C(MARSHAL_BY_REF_OBJECT)))
+#endif
+DEFINE_METASIG(SM(ArrByte_RetObj, a(b), j))
+DEFINE_METASIG(SM(ArrByte_Bool_RetObj, a(b) F, j))
+DEFINE_METASIG(SM(ArrByte_ArrByte_RefObj_RetObj, a(b) a(b) r(j), j))
+DEFINE_METASIG_T(SM(PtrSByt_Int_Int_Encoding_RetStr, P(B) i i C(ENCODING), s))
+DEFINE_METASIG_T(SM(ArrObj_Bool_RefArrByte_OutPMS_HostProtectionResource_Bool_RetArrByte, a(j) F r(a(b)) r(C(PERMISSION_SET)) g(HOST_PROTECTION_RESOURCE) F, a(b)))
+DEFINE_METASIG_T(SM(Evidence_RetEvidence, C(EVIDENCE), C(EVIDENCE)))
+#ifdef FEATURE_CAS_POLICY
+DEFINE_METASIG_T(SM(PEFile_Evidence_RetEvidence, C(SAFE_PEFILE_HANDLE) C(EVIDENCE), C(EVIDENCE)))
+#endif // FEATURE_CAS_POLICY
+DEFINE_METASIG_T(SM(Evidence_Asm_RetEvidence, C(EVIDENCE) C(ASSEMBLY), C(EVIDENCE)))
+DEFINE_METASIG_T(IM(Evidence_RetVoid, C(EVIDENCE), v))
+
+DEFINE_METASIG_T(SM(Void_RetRuntimeTypeHandle, _, g(RT_TYPE_HANDLE)))
+DEFINE_METASIG(SM(Void_RetIntPtr, _, I))
+
+#ifdef FEATURE_CAS_POLICY
+#ifdef FEATURE_NONGENERIC_COLLECTIONS
+DEFINE_METASIG_T(SM(CS_PMS_PMS_ArrayList_ArrayList_RetVoid, \
+ C(COMPRESSED_STACK) C(PERMISSION_SET) C(PERMISSION_SET) C(ARRAY_LIST) C(ARRAY_LIST), v))
+#else
+#error Need replacement for GetZoneAndOriginHelper
+#endif // FEATURE_NONGENERIC_COLLECTIONS
+#endif // #ifdef FEATURE_CAS_POLICY
+DEFINE_METASIG_T(SM(UInt_UInt_PtrNativeOverlapped_RetVoid, K K P(g(NATIVEOVERLAPPED)), v))
+#ifdef FEATURE_REMOTING
+DEFINE_METASIG_T(SM(CrossContextDelegate_ArrObj_RetObj, C(CROSS_CONTEXT_DELEGATE) a(j), j))
+#endif
+
+DEFINE_METASIG(IM(Long_RetVoid, l, v))
+DEFINE_METASIG(IM(IntPtr_Int_RetVoid, I i, v))
+DEFINE_METASIG(IM(IntInt_RetArrByte, i i, a(b)))
+DEFINE_METASIG(IM(RetIntPtr, _, I))
+DEFINE_METASIG(IM(RetInt, _, i))
+DEFINE_METASIG_T(IM(RetAssemblyName, _, C(ASSEMBLY_NAME)))
+DEFINE_METASIG_T(IM(RetAssemblyBase, _, C(ASSEMBLYBASE)))
+DEFINE_METASIG_T(IM(RetModule, _, C(MODULE)))
+DEFINE_METASIG_T(IM(Str_ArrB_ArrB_Ver_CI_AHA_AVC_Str_ANF_SNKP_RetV,
+ s a(b) a(b) C(VERSION) C(CULTURE_INFO) g(ASSEMBLY_HASH_ALGORITHM) g(ASSEMBLY_VERSION_COMPATIBILITY) s g(ASSEMBLY_NAME_FLAGS) C(STRONG_NAME_KEY_PAIR), v))
+DEFINE_METASIG_T(IM(PEK_IFM_RetV,
+ g(PORTABLE_EXECUTABLE_KINDS) g(IMAGE_FILE_MACHINE), v))
+DEFINE_METASIG(IM(RetObj, _, j))
+DEFINE_METASIG_T(IM(RetIEnumerator, _, C(IENUMERATOR)))
+DEFINE_METASIG(IM(RetStr, _, s))
+DEFINE_METASIG(IM(RetLong, _, l))
+
+DEFINE_METASIG_T(IM(RetType, _, C(TYPE)))
+DEFINE_METASIG(IM(RetVoid, _, v))
+DEFINE_METASIG(IM(RetBool, _, F))
+DEFINE_METASIG(IM(RetArrByte, _, a(b)))
+DEFINE_METASIG_T(IM(RetArrParameterInfo, _, a(C(PARAMETER))))
+DEFINE_METASIG_T(IM(RetCultureInfo, _, C(CULTURE_INFO)))
+#ifdef FEATURE_CAS_POLICY
+DEFINE_METASIG_T(IM(RetSecurityElement, _, C(SECURITY_ELEMENT)))
+#endif // FEATURE_CAS_POLICY
+
+DEFINE_METASIG_T(SM(RetThread, _, C(THREAD)))
+
+DEFINE_METASIG(IM(Bool_RetIntPtr, F, I))
+DEFINE_METASIG_T(IM(Bool_RetMethodInfo, F, C(METHOD_INFO)))
+DEFINE_METASIG(SM(Bool_RetStr, F, s))
+DEFINE_METASIG(IM(Bool_Bool_RetStr, F F, s))
+
+DEFINE_METASIG(IM(PtrChar_RetVoid, P(u), v))
+DEFINE_METASIG(IM(PtrChar_Int_Int_RetVoid, P(u) i i, v))
+DEFINE_METASIG(IM(PtrSByt_RetVoid, P(B), v))
+DEFINE_METASIG(IM(PtrSByt_Int_Int_RetVoid, P(B) i i, v))
+DEFINE_METASIG_T(IM(PtrSByt_Int_Int_Encoding_RetVoid, P(B) i i C(ENCODING), v))
+DEFINE_METASIG(IM(PtrChar_Int_RetVoid, P(u) i, v))
+DEFINE_METASIG(IM(PtrSByt_Int_RetVoid, P(B) i, v))
+
+DEFINE_METASIG(IM(ArrChar_RetStr, a(u), s))
+DEFINE_METASIG(IM(ArrChar_Int_Int_RetStr, a(u) i i, s))
+DEFINE_METASIG(IM(Char_Int_RetStr, u i, s))
+DEFINE_METASIG(IM(PtrChar_RetStr, P(u), s))
+DEFINE_METASIG(IM(PtrChar_Int_Int_RetStr, P(u) i i, s))
+DEFINE_METASIG(IM(Obj_Int_RetIntPtr, j i, I))
+
+DEFINE_METASIG(IM(Char_Char_RetStr, u u, s))
+DEFINE_METASIG(IM(Char_Int_RetVoid, u i, v))
+DEFINE_METASIG_T(IM(CultureInfo_RetVoid, C(CULTURE_INFO), v))
+DEFINE_METASIG(IM(Dbl_RetVoid, d, v))
+DEFINE_METASIG(IM(Flt_RetVoid, f, v))
+DEFINE_METASIG(IM(Int_RetInt, i, i))
+DEFINE_METASIG(IM(Int_RefIntPtr_RefIntPtr_RefIntPtr_RetVoid, i r(I) r(I) r(I), v))
+DEFINE_METASIG(IM(Int_RetStr, i, s))
+DEFINE_METASIG(IM(Int_RetVoid, i, v))
+DEFINE_METASIG(IM(Int_RetBool, i, F))
+#ifdef FEATURE_REMOTING
+DEFINE_METASIG_T(IM(RefMessageData_Int_RetVoid, r(g(MESSAGE_DATA)) i, v))
+#endif // FEATURE_REMOTING
+DEFINE_METASIG(IM(Int_Int_Int_Int_RetVoid, i i i i, v))
+DEFINE_METASIG_T(IM(Obj_EventArgs_RetVoid, j C(EVENT_ARGS), v))
+DEFINE_METASIG_T(IM(Obj_UnhandledExceptionEventArgs_RetVoid, j C(UNHANDLED_EVENTARGS), v))
+
+DEFINE_METASIG_T(IM(Assembly_RetVoid, C(ASSEMBLY), v))
+DEFINE_METASIG_T(IM(Assembly_RetBool, C(ASSEMBLY), F))
+DEFINE_METASIG_T(IM(AssemblyBase_RetBool, C(ASSEMBLYBASE), F))
+#ifdef FEATURE_COMINTEROP_REGISTRATION
+DEFINE_METASIG_T(IM(AssemblyBase_AssemblyRegistrationFlags_RetBool, C(ASSEMBLYBASE) g(ASSEMBLY_REGISTRATION_FLAGS), F))
+#endif
+DEFINE_METASIG_T(IM(Exception_RetVoid, C(EXCEPTION), v))
+
+DEFINE_METASIG(IM(IntPtr_RetObj, I, j))
+DEFINE_METASIG(IM(IntPtr_RetVoid, I, v))
+DEFINE_METASIG(IM(IntPtr_PtrVoid_RetVoid, I P(v), v))
+DEFINE_METASIG_T(IM(RefGuid_RetIntPtr, r(g(GUID)), I))
+
+DEFINE_METASIG(IM(Obj_RetInt, j, i))
+DEFINE_METASIG(IM(Obj_RetIntPtr, j, I))
+DEFINE_METASIG(IM(Obj_RetVoid, j, v))
+DEFINE_METASIG(IM(Obj_RetObj, j, j))
+DEFINE_METASIG(IM(Obj_IntPtr_RetVoid, j I, v))
+DEFINE_METASIG(IM(Obj_UIntPtr_RetVoid, j U, v))
+DEFINE_METASIG(IM(Obj_IntPtr_IntPtr_RetVoid, j I I, v))
+DEFINE_METASIG(IM(Obj_IntPtr_IntPtr_IntPtr_RetVoid, j I I I, v))
+DEFINE_METASIG(IM(Obj_IntPtr_IntPtr_IntPtr_IntPtr_RetVoid, j I I I I, v))
+DEFINE_METASIG(IM(IntPtr_UInt_IntPtr_IntPtr_RetVoid, I K I I, v))
+DEFINE_METASIG(IM(Obj_Bool_RetVoid, j F, v))
+#ifdef FEATURE_COMINTEROP
+DEFINE_METASIG(SM(Obj_RetStr, j, s))
+#endif // FEATURE_COMINTEROP
+#ifdef FEATURE_REMOTING
+DEFINE_METASIG_T(IM(Str_BindingFlags_Obj_ArrInt_RefMessageData_RetObj, s g(BINDING_FLAGS) j a(i) r(g(MESSAGE_DATA)), j))
+#endif // FEATURE_REMOTING
+DEFINE_METASIG_T(IM(Obj_Obj_BindingFlags_Binder_CultureInfo_RetVoid, j j g(BINDING_FLAGS) C(BINDER) C(CULTURE_INFO), v))
+DEFINE_METASIG_T(IM(Obj_Obj_BindingFlags_Binder_ArrObj_CultureInfo_RetVoid, j j g(BINDING_FLAGS) C(BINDER) a(j) C(CULTURE_INFO), v))
+DEFINE_METASIG_T(IM(Obj_BindingFlags_Binder_ArrObj_CultureInfo_RetObj, j g(BINDING_FLAGS) C(BINDER) a(j) C(CULTURE_INFO), j))
+DEFINE_METASIG_T(IM(Obj_Type_CultureInfo_RetObj, j C(TYPE) C(CULTURE_INFO), j))
+DEFINE_METASIG_T(IM(IPrincipal_RetVoid, C(IPRINCIPAL), v))
+DEFINE_METASIG_T(IM(MemberInfo_RetVoid, C(MEMBER), v))
+DEFINE_METASIG(IM(IntPtr_ArrObj_Obj_RefArrObj_RetObj, I a(j) j r(a(j)), j))
+DEFINE_METASIG_T(IM(CodeAccessPermission_RetBool, C(CODE_ACCESS_PERMISSION), F))
+DEFINE_METASIG_T(IM(IPermission_RetIPermission, C(IPERMISSION), C(IPERMISSION)))
+DEFINE_METASIG_T(IM(IPermission_RetBool, C(IPERMISSION), F))
+DEFINE_METASIG_T(IM(PMS_RetVoid, C(PERMISSION_SET), v))
+DEFINE_METASIG_T(IM(PMS_RetPMS, C(PERMISSION_SET), C(PERMISSION_SET)))
+DEFINE_METASIG_T(IM(PMS_RetBool, C(PERMISSION_SET), F))
+DEFINE_METASIG(IM(RefObject_RetBool, r(j), F))
+DEFINE_METASIG_T(IM(Class_RetObj, C(CLASS), j))
+DEFINE_METASIG(IM(Int_VoidPtr_RetVoid, i P(v), v))
+DEFINE_METASIG(IM(VoidPtr_RetVoid, P(v), v))
+
+DEFINE_METASIG_T(IM(Str_RetModule, s, C(MODULE)))
+DEFINE_METASIG_T(IM(Assembly_Str_RetAssembly, C(ASSEMBLY) s, C(ASSEMBLY)))
+DEFINE_METASIG_T(SM(Str_Bool_RetAssembly, s F, C(ASSEMBLY)))
+DEFINE_METASIG_T(IM(Str_Str_Str_Assembly_Assembly_RetVoid, s s s C(ASSEMBLY) C(ASSEMBLY), v))
+DEFINE_METASIG(IM(Str_Str_Obj_RetVoid, s s j, v))
+DEFINE_METASIG(IM(Str_Str_Str_Obj_RetVoid, s s s j, v))
+DEFINE_METASIG(IM(Str_Str_Str_Obj_Bool_RetVoid, s s s j F, v))
+DEFINE_METASIG(IM(Str_Str_RefObj_RetVoid, s s r(j), v))
+DEFINE_METASIG_T(IM(Str_RetFieldInfo, s, C(FIELD_INFO)))
+DEFINE_METASIG_T(IM(Str_RetPropertyInfo, s, C(PROPERTY_INFO)))
+DEFINE_METASIG(SM(Str_RetStr, s, s))
+DEFINE_METASIG_T(SM(Str_CultureInfo_RetStr, s C(CULTURE_INFO), s))
+DEFINE_METASIG_T(SM(Str_CultureInfo_RefBool_RetStr, s C(CULTURE_INFO) r(F), s))
+DEFINE_METASIG(IM(Str_ArrStr_ArrStr_RetVoid, s a(s) a(s), v))
+DEFINE_METASIG(IM(Str_RetVoid, s, v))
+DEFINE_METASIG(SM(RefBool_RefBool_RetVoid, r(F) r(F), v))
+DEFINE_METASIG_T(IM(Str_Exception_RetVoid, s C(EXCEPTION), v))
+DEFINE_METASIG(IM(Str_Obj_RetVoid, s j, v))
+DEFINE_METASIG_T(IM(Str_BindingFlags_Binder_ArrType_ArrParameterModifier_RetMethodInfo, \
+ s g(BINDING_FLAGS) C(BINDER) a(C(TYPE)) a(g(PARAMETER_MODIFIER)), C(METHOD_INFO)))
+DEFINE_METASIG_T(IM(Str_BindingFlags_Binder_Type_ArrType_ArrParameterModifier_RetPropertyInfo, \
+ s g(BINDING_FLAGS) C(BINDER) C(TYPE) a(C(TYPE)) a(g(PARAMETER_MODIFIER)), C(PROPERTY_INFO)))
+DEFINE_METASIG(IM(Str_Str_RetStr, s s, s))
+DEFINE_METASIG(IM(Str_Str_RetVoid, s s, v))
+DEFINE_METASIG(IM(Str_Str_Str_RetVoid, s s s, v))
+DEFINE_METASIG(IM(Str_Int_RetVoid, s i, v))
+DEFINE_METASIG(IM(Str_Str_Int_RetVoid, s s i, v))
+DEFINE_METASIG(IM(Str_Str_Str_Int_RetVoid, s s s i, v))
+DEFINE_METASIG_T(IM(Str_BindingFlags_RetFieldInfo, s g(BINDING_FLAGS), C(FIELD_INFO)))
+DEFINE_METASIG_T(IM(Str_BindingFlags_RetMemberInfo, s g(BINDING_FLAGS), a(C(MEMBER))))
+DEFINE_METASIG_T(IM(Str_BindingFlags_RetMethodInfo, s g(BINDING_FLAGS), C(METHOD_INFO)))
+DEFINE_METASIG_T(IM(Str_BindingFlags_RetPropertyInfo, s g(BINDING_FLAGS), C(PROPERTY_INFO)))
+DEFINE_METASIG_T(IM(Str_BindingFlags_Binder_Obj_ArrObj_ArrParameterModifier_CultureInfo_ArrStr_RetObj, \
+ s g(BINDING_FLAGS) C(BINDER) j a(j) a(g(PARAMETER_MODIFIER)) C(CULTURE_INFO) a(s), j))
+DEFINE_METASIG_T(IM(Str_Delegate_RetMethodInfo, s C(DELEGATE), C(METHOD_INFO)))
+DEFINE_METASIG_T(IM(Str_Type_Str_RetVoid, s C(TYPE) s, v))
+DEFINE_METASIG_T(SM(Delegate_RetIntPtr, C(DELEGATE), I))
+DEFINE_METASIG_T(SM(Delegate_RefIntPtr_RetIntPtr, C(DELEGATE) r(I), I))
+DEFINE_METASIG_T(SM(RuntimeTypeHandle_RetType, g(RT_TYPE_HANDLE), C(TYPE)))
+DEFINE_METASIG_T(SM(RuntimeTypeHandle_RetIntPtr, g(RT_TYPE_HANDLE), I))
+DEFINE_METASIG_T(SM(RuntimeMethodHandle_RetIntPtr, g(METHOD_HANDLE), I))
+DEFINE_METASIG_T(SM(IntPtr_Type_RetDelegate, I C(TYPE), C(DELEGATE)))
+
+
+DEFINE_METASIG_T(IM(Type_RetArrObj, C(TYPE) F, a(j)))
+DEFINE_METASIG(IM(Bool_RetVoid, F, v))
+DEFINE_METASIG_T(IM(BindingFlags_RetArrFieldInfo, g(BINDING_FLAGS), a(C(FIELD_INFO))))
+DEFINE_METASIG_T(IM(BindingFlags_RetArrMemberInfo, g(BINDING_FLAGS), a(C(MEMBER))))
+DEFINE_METASIG_T(IM(BindingFlags_RetArrMethodInfo, g(BINDING_FLAGS), a(C(METHOD_INFO))))
+DEFINE_METASIG_T(IM(BindingFlags_RetArrPropertyInfo, g(BINDING_FLAGS), a(C(PROPERTY_INFO))))
+DEFINE_METASIG(IM(ArrByte_RetVoid, a(b), v))
+DEFINE_METASIG_T(IM(ArrByte_HostProtectionResource_HostProtectionResource_RetBool, a(b) g(HOST_PROTECTION_RESOURCE) g(HOST_PROTECTION_RESOURCE), F))
+DEFINE_METASIG(IM(ArrChar_RetVoid, a(u), v))
+DEFINE_METASIG(IM(ArrChar_Int_Int_RetVoid, a(u) i i, v))
+DEFINE_METASIG_T(IM(ArrType_ArrException_Str_RetVoid, a(C(TYPE)) a(C(EXCEPTION)) s, v))
+DEFINE_METASIG(IM(RefInt_RefInt_RefInt_RetArrByte, r(i) r(i) r(i), a(b)))
+DEFINE_METASIG_T(IM(RefInt_RetRuntimeType, r(i) , C(CLASS)))
+DEFINE_METASIG_T(IM(RuntimeType_RetVoid, C(CLASS) , v))
+DEFINE_METASIG_T(SM(ArrException_PtrInt_RetVoid, a(C(EXCEPTION)) P(i), v))
+
+DEFINE_METASIG_T(IM(RuntimeArgumentHandle_PtrVoid_RetVoid, g(ARGUMENT_HANDLE) P(v), v))
+DEFINE_METASIG_T(IM(SecurityPermissionFlag_RetVoid, g(SECURITY_PERMISSION_FLAG), v))
+DEFINE_METASIG_T(IM(PermissionState_RetVoid, g(PERMISSION_STATE), v))
+DEFINE_METASIG_T(IM(SecurityAction_RetVoid, g(SECURITY_ACTION), v))
+DEFINE_METASIG_T(IM(ReflectionPermissionFlag_RetVoid, g(REFLECTION_PERMISSION_FLAG), v))
+DEFINE_METASIG_T(IM(LicenseInteropHelper_GetCurrentContextInfo, r(i) r(I) g(RT_TYPE_HANDLE), v))
+DEFINE_METASIG(IM(LicenseInteropHelper_SaveKeyInCurrentContext, I, v))
+DEFINE_METASIG_T(SM(LicenseInteropHelper_AllocateAndValidateLicense, g(RT_TYPE_HANDLE) I i, j))
+DEFINE_METASIG_T(SM(LicenseInteropHelper_RequestLicKey, g(RT_TYPE_HANDLE) r(I), i))
+DEFINE_METASIG_T(IM(LicenseInteropHelper_GetLicInfo, g(RT_TYPE_HANDLE) r(i) r(i), v))
+
+// App Domain related defines
+DEFINE_METASIG(IM(Bool_Str_Str_ArrStr_ArrStr_RetVoid, F s s a(s) a(s), v))
+DEFINE_METASIG_T(IM(LoaderOptimization_RetVoid, g(LOADER_OPTIMIZATION), v))
+DEFINE_METASIG_T(IM(Evidence_Evidence_Bool_IntPtr_Bool_RetVoid, C(EVIDENCE) C(EVIDENCE) F I F, v))
+DEFINE_METASIG_T(SM(Str_Evidence_AppDomainSetup_RetAppDomain, s C(EVIDENCE) C(APPDOMAIN_SETUP), C(APP_DOMAIN)))
+DEFINE_METASIG_T(SM(Str_Evidence_Str_Str_Bool_RetAppDomain, s C(EVIDENCE) s s F, C(APP_DOMAIN)))
+DEFINE_METASIG_T(SM(Str_RetAppDomain, s, C(APP_DOMAIN)))
+DEFINE_METASIG_T(SM(Str_AppDomainSetup_Evidence_Evidence_IntPtr_Str_ArrStr_ArrStr_RetObj, s C(APPDOMAIN_SETUP) C(EVIDENCE) C(EVIDENCE) I s a(s) a(s), j))
+#ifdef FEATURE_APTCA
+DEFINE_METASIG(IM(PtrChar_Int_PtrByte_Int_RetBool, P(u) i P(b) i, F))
+#endif //FEATURE_APTCA
+#ifdef FEATURE_COMINTEROP
+// System.AppDomain.OnReflectionOnlyNamespaceResolveEvent
+DEFINE_METASIG_T(IM(Assembly_Str_RetArrAssembly, C(ASSEMBLY) s, a(C(ASSEMBLY))))
+// System.AppDomain.OnDesignerNamespaceResolveEvent
+DEFINE_METASIG(IM(Str_RetArrStr, s, a(s)))
+#endif //FEATURE_COMINTEROP
+
+// Object Clone
+#ifdef FEATURE_SERIALIZATION
+DEFINE_METASIG_T(IM(SerInfo_RetVoid, C(SERIALIZATION_INFO), v))
+DEFINE_METASIG_T(IM(SerInfo_StrContext_RetVoid, C(SERIALIZATION_INFO) g(STREAMING_CONTEXT), v))
+DEFINE_METASIG_T(SM(Obj_ArrStr_ArrObj_OutStreamingContext_RetSerializationInfo, j a(s) a(j) r(g(STREAMING_CONTEXT)), C(SERIALIZATION_INFO)))
+#endif // FEATURE_SERIALIZATION
+DEFINE_METASIG(SM(Obj_OutStr_OutStr_OutArrStr_OutArrObj_RetObj, j r(s) r(s) r(a(s)) r(a(j)), j))
+
+#ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+// Execution Context
+DEFINE_METASIG_T(SM(SyncCtx_ArrIntPtr_Bool_Int_RetInt, C(SYNCHRONIZATION_CONTEXT) a(I) F i, i))
+#endif // #ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+// HostProtectionException
+DEFINE_METASIG_T(IM(HPR_HPR_RetVoid, g(HOST_PROTECTION_RESOURCE) g(HOST_PROTECTION_RESOURCE), v))
+
+#ifdef FEATURE_COMINTEROP
+// The signature of the method System.Runtime.InteropServices.ICustomQueryInterface.GetInterface
+DEFINE_METASIG_T(IM(RefGuid_OutIntPtr_RetCustomQueryInterfaceResult, r(g(GUID)) r(I), g(CUSTOMQUERYINTERFACERESULT)))
+#endif //FEATURE_COMINTEROP
+
+#if defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+DEFINE_METASIG_T(SM(IntPtr_AssemblyName_RetAssemblyBase, I C(ASSEMBLY_NAME), C(ASSEMBLYBASE)))
+#endif // defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+
+// ThreadPool
+DEFINE_METASIG(SM(Obj_Bool_RetVoid, j F, v))
+
+// For FailFast
+DEFINE_METASIG(SM(Str_RetVoid, s, v))
+DEFINE_METASIG(SM(Str_Uint_RetVoid, s K, v))
+DEFINE_METASIG_T(SM(Str_Exception_RetVoid, s C(EXCEPTION), v))
+
+// fields - e.g.:
+// DEFINE_METASIG(Fld(PtrVoid, P(v)))
+
+// Runtime Helpers
+DEFINE_METASIG(SM(Obj_Obj_Bool_RetVoid, j j F, v))
+
+DEFINE_METASIG_T(IM(Dec_RetVoid, g(DECIMAL), v))
+DEFINE_METASIG_T(IM(Currency_RetVoid, g(CURRENCY), v))
+DEFINE_METASIG_T(SM(RefDec_RetVoid, r(g(DECIMAL)), v))
+
+DEFINE_METASIG(GM(RefT_T_T_RetT, IMAGE_CEE_CS_CALLCONV_DEFAULT, 1, r(M(0)) M(0) M(0), M(0)))
+DEFINE_METASIG(SM(RefObject_Object_Object_RetObject, r(j) j j, j))
+
+DEFINE_METASIG_T(SM(RefCleanupWorkList_RetVoid, r(C(CLEANUP_WORK_LIST)), v))
+DEFINE_METASIG_T(SM(RefCleanupWorkList_SafeHandle_RetIntPtr, r(C(CLEANUP_WORK_LIST)) C(SAFE_HANDLE), I))
+
+// Undefine macros in case we include the file again in the compilation unit
+
+#undef DEFINE_METASIG
+#undef DEFINE_METASIG_T
+
+#undef METASIG_BODY
+#undef METASIG_ATOM
+#undef METASIG_RECURSE
+
+
+#undef SM
+#undef IM
+#undef GM
+#undef Fld
+
+#undef a
+#undef P
+#undef r
+#undef b
+#undef u
+#undef d
+#undef f
+#undef i
+#undef K
+#undef I
+#undef U
+#undef l
+#undef L
+#undef h
+#undef H
+#undef v
+#undef B
+#undef F
+#undef j
+#undef s
+#undef C
+#undef g
+#undef T
+#undef G
+#undef M
+
+#undef _
+
+
+#endif // DEFINE_METASIG
diff --git a/src/vm/method.cpp b/src/vm/method.cpp
new file mode 100644
index 0000000000..2dc699e534
--- /dev/null
+++ b/src/vm/method.cpp
@@ -0,0 +1,5849 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: Method.CPP
+//
+
+//
+// See the book of the runtime entry for overall design:
+// file:../../doc/BookOfTheRuntime/ClassLoader/MethodDescDesign.doc
+//
+
+
+#include "common.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "security.h"
+#include "verifier.hpp"
+#include "excep.h"
+#include "dbginterface.h"
+#include "ecall.h"
+#include "eeconfig.h"
+#include "mlinfo.h"
+#include "dllimport.h"
+#include "generics.h"
+#include "genericdict.h"
+#include "typedesc.h"
+#include "typestring.h"
+#include "virtualcallstub.h"
+#include "jitinterface.h"
+#include "runtimehandles.h"
+#include "eventtrace.h"
+#ifndef FEATURE_CORECLR
+#include "fxretarget.h"
+#endif
+#include "interoputil.h"
+#include "prettyprintsig.h"
+#include "formattype.h"
+#ifdef FEATURE_INTERPRETER
+#include "interpreter.h"
+#endif
+
+#ifdef FEATURE_PREJIT
+#include "compile.h"
+#endif
+
+#ifdef FEATURE_COMINTEROP
+#include "comcallablewrapper.h"
+#include "clrtocomcall.h"
+#endif
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable:4244)
+#endif // _MSC_VER
+
+#ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS
+GVAL_IMPL(DWORD, g_MiniMetaDataBuffMaxSize);
+GVAL_IMPL(TADDR, g_MiniMetaDataBuffAddress);
+#endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS
+
+// forward decl
+bool FixupSignatureContainingInternalTypes(
+ DataImage * image,
+ PCCOR_SIGNATURE pSig,
+ DWORD cSig,
+ bool checkOnly = false);
+
+// Alias ComPlusCallMethodDesc to regular MethodDesc to simplify definition of the size table
+#ifndef FEATURE_COMINTEROP
+#define ComPlusCallMethodDesc MethodDesc
+#endif
+
+// Verify that the structure sizes of our MethodDescs support proper
+// aligning for atomic stub replacement.
+//
+static_assert_no_msg((sizeof(MethodDescChunk) & MethodDesc::ALIGNMENT_MASK) == 0);
+static_assert_no_msg((sizeof(MethodDesc) & MethodDesc::ALIGNMENT_MASK) == 0);
+static_assert_no_msg((sizeof(FCallMethodDesc) & MethodDesc::ALIGNMENT_MASK) == 0);
+static_assert_no_msg((sizeof(NDirectMethodDesc) & MethodDesc::ALIGNMENT_MASK) == 0);
+static_assert_no_msg((sizeof(EEImplMethodDesc) & MethodDesc::ALIGNMENT_MASK) == 0);
+static_assert_no_msg((sizeof(ArrayMethodDesc) & MethodDesc::ALIGNMENT_MASK) == 0);
+static_assert_no_msg((sizeof(ComPlusCallMethodDesc) & MethodDesc::ALIGNMENT_MASK) == 0);
+static_assert_no_msg((sizeof(DynamicMethodDesc) & MethodDesc::ALIGNMENT_MASK) == 0);
+
+#define METHOD_DESC_SIZES(adjustment) \
+ adjustment + sizeof(MethodDesc), /* mcIL */ \
+ adjustment + sizeof(FCallMethodDesc), /* mcFCall */ \
+ adjustment + sizeof(NDirectMethodDesc), /* mcNDirect */ \
+ adjustment + sizeof(EEImplMethodDesc), /* mcEEImpl */ \
+ adjustment + sizeof(ArrayMethodDesc), /* mcArray */ \
+ adjustment + sizeof(InstantiatedMethodDesc), /* mcInstantiated */ \
+ adjustment + sizeof(ComPlusCallMethodDesc), /* mcComInterOp */ \
+ adjustment + sizeof(DynamicMethodDesc) /* mcDynamic */
+
+const SIZE_T MethodDesc::s_ClassificationSizeTable[] = {
+ // This is the raw
+ METHOD_DESC_SIZES(0),
+
+ // This extended part of the table is used for faster MethodDesc size lookup.
+ // We index using optional slot flags into it
+ METHOD_DESC_SIZES(sizeof(NonVtableSlot)),
+ METHOD_DESC_SIZES(sizeof(MethodImpl)),
+ METHOD_DESC_SIZES(sizeof(NonVtableSlot) + sizeof(MethodImpl))
+};
+
+#ifndef FEATURE_COMINTEROP
+#undef ComPlusCallMethodDesc
+#endif
+
+
+//*******************************************************************************
+SIZE_T MethodDesc::SizeOf()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ SIZE_T size = s_ClassificationSizeTable[m_wFlags & (mdcClassification | mdcHasNonVtableSlot | mdcMethodImpl)];
+
+ if (HasNativeCodeSlot())
+ {
+ size += (*dac_cast<PTR_TADDR>(dac_cast<TADDR>(this) + size) & FIXUP_LIST_MASK) ?
+ (sizeof(NativeCodeSlot) + sizeof(FixupListSlot)) : sizeof(NativeCodeSlot);
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (IsGenericComPlusCall())
+ size += sizeof(ComPlusCallInfo);
+#endif // FEATURE_COMINTEROP
+
+ return size;
+}
+
+//*******************************************************************************
+BOOL MethodDesc::IsIntrospectionOnly()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetModule()->GetAssembly()->IsIntrospectionOnly();
+}
+
+/*********************************************************************/
+#ifndef FEATURE_CORECLR
+#ifndef DACCESS_COMPILE
+BOOL NDirectMethodDesc::HasDefaultDllImportSearchPathsAttribute()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if(IsDefaultDllImportSearchPathsAttributeCached())
+ {
+ return (ndirect.m_wFlags & kDefaultDllImportSearchPathsStatus) != 0;
+ }
+
+ _ASSERTE(!IsZapped());
+
+ BOOL attributeIsFound = GetDefaultDllImportSearchPathsAttributeValue(GetMDImport(),GetMemberDef(),&ndirect.m_DefaultDllImportSearchPathsAttributeValue);
+
+ if(attributeIsFound )
+ {
+ InterlockedSetNDirectFlags(kDefaultDllImportSearchPathsIsCached | kDefaultDllImportSearchPathsStatus);
+ }
+ else
+ {
+ InterlockedSetNDirectFlags(kDefaultDllImportSearchPathsIsCached);
+ }
+
+ return (ndirect.m_wFlags & kDefaultDllImportSearchPathsStatus) != 0;
+}
+#endif //!DACCESS_COMPILE
+#endif // !FEATURE_CORECLR
+
+//*******************************************************************************
+#ifndef DACCESS_COMPILE
+VOID MethodDesc::EnsureActive()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ GetMethodTable()->EnsureInstanceActive();
+ if (HasMethodInstantiation() && !IsGenericMethodDefinition())
+ {
+ Instantiation methodInst = GetMethodInstantiation();
+ for (DWORD i = 0; i < methodInst.GetNumArgs(); ++i)
+ {
+ MethodTable * pMT = methodInst[i].GetMethodTable();
+ if (pMT)
+ pMT->EnsureInstanceActive();
+ }
+ }
+}
+#endif //!DACCESS_COMPILE
+
+//*******************************************************************************
+CHECK MethodDesc::CheckActivated()
+{
+ WRAPPER_NO_CONTRACT;
+ CHECK(GetModule()->CheckActivated());
+ CHECK_OK;
+}
+
+//*******************************************************************************
+BaseDomain *MethodDesc::GetDomain()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SUPPORTS_DAC;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END
+
+ if (HasMethodInstantiation() && !IsGenericMethodDefinition())
+ {
+ return BaseDomain::ComputeBaseDomain(GetMethodTable()->GetDomain(),
+ GetMethodInstantiation());
+ }
+ else
+ {
+ return GetMethodTable()->GetDomain();
+ }
+}
+
+#ifndef DACCESS_COMPILE
+
+//*******************************************************************************
+LoaderAllocator * MethodDesc::GetLoaderAllocatorForCode()
+{
+ if (IsLCGMethod())
+ {
+ return ::GetAppDomain()->GetLoaderAllocator();
+ }
+ else
+ {
+ return GetLoaderAllocator();
+ }
+}
+
+
+//*******************************************************************************
+LoaderAllocator * MethodDesc::GetDomainSpecificLoaderAllocator()
+{
+ if (GetLoaderModule()->IsCollectible())
+ {
+ return GetLoaderAllocator();
+ }
+ else
+ {
+ return ::GetAppDomain()->GetLoaderAllocator();
+ }
+
+}
+
+#endif //!DACCESS_COMPILE
+
+//*******************************************************************************
+LPCUTF8 MethodDesc::GetName(USHORT slot)
+{
+ // MethodDesc::GetDeclMethodDesc can throw.
+ WRAPPER_NO_CONTRACT;
+ MethodDesc *pDeclMD = GetDeclMethodDesc((UINT32)slot);
+ CONSISTENCY_CHECK(IsInterface() || !pDeclMD->IsInterface());
+ return pDeclMD->GetName();
+}
+
+//*******************************************************************************
+LPCUTF8 MethodDesc::GetName()
+{
+ CONTRACTL
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS; // MethodImpl::FindMethodDesc can throw.
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }CONTRACTL_END;
+
+ g_IBCLogger.LogMethodDescAccess(this);
+
+ if (IsArray())
+ {
+ // Array classes don't have metadata tokens
+ return dac_cast<PTR_ArrayMethodDesc>(this)->GetMethodName();
+ }
+ else if (IsNoMetadata())
+ {
+ // LCG methods don't have metadata tokens
+ return dac_cast<PTR_DynamicMethodDesc>(this)->GetMethodName();
+ }
+ else
+ {
+ // Get the metadata string name for this method
+ LPCUTF8 result = NULL;
+
+ // This probes only if we have a thread, in which case it is OK to throw the SO.
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(COMPlusThrowSO());
+
+ if (FAILED(GetMDImport()->GetNameOfMethodDef(GetMemberDef(), &result)))
+ {
+ result = NULL;
+ }
+
+ END_SO_INTOLERANT_CODE;
+
+ return(result);
+ }
+}
+
+#ifndef DACCESS_COMPILE
+/*
+ * Function to get a method's name, its namespace
+ */
+VOID MethodDesc::GetMethodInfoNoSig(SString &namespaceOrClassName, SString &methodName)
+{
+ static LPCWSTR pDynamicClassName = W("dynamicClass");
+
+ // namespace
+ if(IsDynamicMethod())
+ namespaceOrClassName.Append(pDynamicClassName);
+ else
+ TypeString::AppendType(namespaceOrClassName, TypeHandle(GetMethodTable()));
+
+ // name
+ methodName.AppendUTF8(GetName());
+}
+
+/*
+ * Function to get a method's name, its namespace and signature (legacy format)
+ */
+VOID MethodDesc::GetMethodInfo(SString &namespaceOrClassName, SString &methodName, SString &methodSignature)
+{
+ GetMethodInfoNoSig(namespaceOrClassName, methodName);
+
+ // signature
+ CQuickBytes qbOut;
+ ULONG cSig = 0;
+ PCCOR_SIGNATURE pSig;
+
+ GetSig(&pSig, &cSig);
+ PrettyPrintSigInternalLegacy(pSig, cSig, " ", &qbOut, GetMDImport());
+ methodSignature.AppendUTF8((char *)qbOut.Ptr());
+}
+
+/*
+ * Function to get a method's name, its namespace and signature (new format)
+ */
+VOID MethodDesc::GetMethodInfoWithNewSig(SString &namespaceOrClassName, SString &methodName, SString &methodSignature)
+{
+ GetMethodInfoNoSig(namespaceOrClassName, methodName);
+
+ // signature
+ CQuickBytes qbOut;
+ ULONG cSig = 0;
+ PCCOR_SIGNATURE pSig;
+
+ GetSig(&pSig, &cSig);
+ PrettyPrintSig(pSig, (DWORD)cSig, "", &qbOut, GetMDImport(), NULL);
+ methodSignature.AppendUTF8((char *)qbOut.Ptr());
+}
+
+/*
+ * Function to get a method's full name, something like
+ * void [mscorlib]System.StubHelpers.BSTRMarshaler::ClearNative(native int)
+ */
+VOID MethodDesc::GetFullMethodInfo(SString& fullMethodSigName)
+{
+ SString namespaceOrClassName, methodName;
+ GetMethodInfoNoSig(namespaceOrClassName, methodName);
+
+ // signature
+ CQuickBytes qbOut;
+ ULONG cSig = 0;
+ PCCOR_SIGNATURE pSig;
+
+ SString methodFullName;
+ StackScratchBuffer namespaceNameBuffer, methodNameBuffer;
+ methodFullName.AppendPrintf(
+ (LPCUTF8)"[%s] %s::%s",
+ GetModule()->GetAssembly()->GetSimpleName(),
+ namespaceOrClassName.GetUTF8(namespaceNameBuffer),
+ methodName.GetUTF8(methodNameBuffer));
+
+ GetSig(&pSig, &cSig);
+
+ StackScratchBuffer buffer;
+ PrettyPrintSig(pSig, (DWORD)cSig, methodFullName.GetUTF8(buffer), &qbOut, GetMDImport(), NULL);
+ fullMethodSigName.AppendUTF8((char *)qbOut.Ptr());
+}
+
+//*******************************************************************************
+void MethodDesc::PrecomputeNameHash()
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(IsCompilationProcess());
+ }
+ CONTRACTL_END;
+
+
+ // We only have space for a name hash when we can use the packed slot layout
+ if (RequiresFullSlotNumber())
+ {
+ return;
+ }
+
+ // Store a case-insensitive hash so that we can use this value for
+ // both case-sensitive and case-insensitive name lookups
+ SString name(SString::Utf8Literal, GetName());
+ ULONG nameHashValue = (WORD) name.HashCaseInsensitive() & enum_packedSlotLayout_NameHashMask;
+
+ // We expect to set the hash once during NGen and not overwrite any existing bits
+ _ASSERTE((m_wSlotNumber & enum_packedSlotLayout_NameHashMask) == 0);
+
+ m_wSlotNumber |= nameHashValue;
+}
+#endif
+
+//*******************************************************************************
+BOOL MethodDesc::MightHaveName(ULONG nameHashValue)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // We only have space for a name hash when we are using the packed slot layout
+ if (RequiresFullSlotNumber())
+ {
+ return TRUE;
+ }
+
+ WORD thisHashValue = m_wSlotNumber & enum_packedSlotLayout_NameHashMask;
+
+ // A zero value might mean no hash has ever been set
+ // (checking this way is better than dedicating a bit to tell us)
+ if (thisHashValue == 0)
+ {
+ return TRUE;
+ }
+
+ WORD testHashValue = (WORD) nameHashValue & enum_packedSlotLayout_NameHashMask;
+
+ return (thisHashValue == testHashValue);
+}
+
+//*******************************************************************************
+void MethodDesc::GetSig(PCCOR_SIGNATURE *ppSig, DWORD *pcSig)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ if (HasStoredSig())
+ {
+ PTR_StoredSigMethodDesc pSMD = dac_cast<PTR_StoredSigMethodDesc>(this);
+ if (pSMD->HasStoredMethodSig() || GetClassification()==mcDynamic)
+ {
+ *ppSig = pSMD->GetStoredMethodSig(pcSig);
+ PREFIX_ASSUME(*ppSig != NULL);
+
+#if defined(FEATURE_PREJIT) && !defined(DACCESS_COMPILE)
+ _ASSERTE_MSG((**ppSig & IMAGE_CEE_CS_CALLCONV_NEEDSRESTORE) == 0 || !IsILStub() || (strncmp(m_pszDebugMethodName,"IL_STUB_Array", 13)==0) ,
+ "CheckRestore must be called on IL stub MethodDesc");
+#endif // FEATURE_PREJIT && !DACCESS_COMPILE
+ return;
+ }
+ }
+
+ GetSigFromMetadata(GetMDImport(), ppSig, pcSig);
+ PREFIX_ASSUME(*ppSig != NULL);
+}
+
+//*******************************************************************************
+// get a function signature from its metadata
+// Arguments:
+// input:
+// importer the metatdata importer to be used
+// output:
+// ppSig the function signature
+// pcSig number of elements in the signature
+
+
+void MethodDesc::GetSigFromMetadata(IMDInternalImport * importer,
+ PCCOR_SIGNATURE * ppSig,
+ DWORD * pcSig)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ if (FAILED(importer->GetSigOfMethodDef(GetMemberDef(), pcSig, ppSig)))
+ { // Class loader already asked for signature, so this should always succeed (unless there's a
+ // bug or a new code path)
+ _ASSERTE(!"If this ever fires, then this method should return HRESULT");
+ *ppSig = NULL;
+ *pcSig = 0;
+ }
+}
+
+//*******************************************************************************
+PCCOR_SIGNATURE MethodDesc::GetSig()
+{
+ WRAPPER_NO_CONTRACT;
+
+ PCCOR_SIGNATURE pSig;
+ DWORD cSig;
+
+ GetSig(&pSig, &cSig);
+
+ PREFIX_ASSUME(pSig != NULL);
+
+ return pSig;
+}
+
+Signature MethodDesc::GetSignature()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ PCCOR_SIGNATURE pSig;
+ DWORD cSig;
+
+ GetSig(&pSig, &cSig);
+
+ PREFIX_ASSUME(pSig != NULL);
+
+ return Signature(pSig, cSig);
+}
+
+PCODE MethodDesc::GetMethodEntryPoint()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ // Keep implementations of MethodDesc::GetMethodEntryPoint and MethodDesc::GetAddrOfSlot in sync!
+
+ g_IBCLogger.LogMethodDescAccess(this);
+
+ if (HasNonVtableSlot())
+ {
+ SIZE_T size = GetBaseSize();
+
+ TADDR pSlot = dac_cast<TADDR>(this) + size;
+
+ return IsZapped() ? NonVtableSlot::GetValueAtPtr(pSlot) : *PTR_PCODE(pSlot);
+ }
+
+ _ASSERTE(GetMethodTable()->IsCanonicalMethodTable());
+ return GetMethodTable_NoLogging()->GetSlot(GetSlot());
+}
+
+PTR_PCODE MethodDesc::GetAddrOfSlot()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ // Keep implementations of MethodDesc::GetMethodEntryPoint and MethodDesc::GetAddrOfSlot in sync!
+
+ if (HasNonVtableSlot())
+ {
+ // Slots in NGened images are relative pointers
+ _ASSERTE(!IsZapped());
+
+ SIZE_T size = GetBaseSize();
+
+ return PTR_PCODE(dac_cast<TADDR>(this) + size);
+ }
+
+ _ASSERTE(GetMethodTable()->IsCanonicalMethodTable());
+ return GetMethodTable()->GetSlotPtr(GetSlot());
+}
+
+//*******************************************************************************
+PTR_MethodDesc MethodDesc::GetDeclMethodDesc(UINT32 slotNumber)
+{
+ CONTRACTL {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ INSTANCE_CHECK;
+ } CONTRACTL_END;
+
+ MethodDesc *pMDResult = this;
+
+ // If the MethodDesc is not itself a methodImpl, but it is not in its native
+ // slot, then someone (perhaps itself) must have overridden a methodImpl
+ // in a parent, which causes the method to get put into all of the methodImpl
+ // slots. So, the MethodDesc is implicitly a methodImpl without containing
+ // the data. To find the real methodImpl MethodDesc, climb the inheritance
+ // hierarchy checking the native slot on the way.
+ if ((UINT32)pMDResult->GetSlot() != slotNumber)
+ {
+ while (!pMDResult->IsMethodImpl())
+ {
+ CONSISTENCY_CHECK(CheckPointer(pMDResult->GetMethodTable()->GetParentMethodTable()));
+ CONSISTENCY_CHECK(slotNumber < pMDResult->GetMethodTable()->GetParentMethodTable()->GetNumVirtuals());
+ pMDResult = pMDResult->GetMethodTable()->GetParentMethodTable()->GetMethodDescForSlot(slotNumber);
+ }
+
+ {
+ CONSISTENCY_CHECK(pMDResult->IsMethodImpl());
+ MethodImpl *pImpl = pMDResult->GetMethodImpl();
+ pMDResult = pImpl->FindMethodDesc(slotNumber, PTR_MethodDesc(pMDResult));
+ }
+
+ // It is possible that a methodImpl'd slot got copied into another slot because
+ // of slot unification, for example:
+ // C1::A is methodImpled with C2::B
+ // C1::B is methodImpled with C2::C
+ // this means that through slot unification that A is tied to B and B is tied to C,
+ // so A is tied to C even though C does not have a methodImpl entry specifically
+ // relating to that slot. In this case, we recurse to the parent type and ask the
+ // same question again.
+ if (pMDResult->GetSlot() != slotNumber)
+ {
+ MethodTable * pMTOfMD = pMDResult->GetMethodTable();
+ CONSISTENCY_CHECK(slotNumber < pMTOfMD->GetParentMethodTable()->GetNumVirtuals());
+ pMDResult = pMTOfMD->GetParentMethodTable()->GetMethodDescForSlot(slotNumber);
+ pMDResult = pMDResult->GetDeclMethodDesc(slotNumber);
+ }
+ }
+
+ CONSISTENCY_CHECK(CheckPointer(pMDResult));
+ CONSISTENCY_CHECK((UINT32)pMDResult->GetSlot() == slotNumber);
+ return PTR_MethodDesc(pMDResult);
+}
+
+//*******************************************************************************
+// Returns a hash for the method.
+// The hash will be the same for the method across multiple process runs.
+COUNT_T MethodDesc::GetStableHash()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(IsRestored_NoLogging());
+ DefineFullyQualifiedNameForClass();
+
+ const char * moduleName = GetModule()->GetSimpleName();
+ const char * className;
+ const char * methodName = GetName();
+
+ if (IsLCGMethod())
+ {
+ className = "DynamicClass";
+ }
+ else if (IsILStub())
+ {
+ className = ILStubResolver::GetStubClassName(this);
+ }
+ else
+ {
+#if defined(_DEBUG)
+ // Calling _GetFullyQualifiedNameForClass in chk build is very expensive
+ // since it construct the class name everytime we call this method. In chk
+ // builds we already have a cheaper way to get the class name -
+ // GetDebugClassName - which doesn't calculate the class name everytime.
+ // This results in huge saving in Ngen time for checked builds.
+ className = m_pszDebugClassName;
+#else // !_DEBUG
+ // since this is for diagnostic purposes only,
+ // give up on the namespace, as we don't have a buffer to concat it
+ // also note this won't show array class names.
+ LPCUTF8 nameSpace;
+ MethodTable * pMT = GetMethodTable();
+
+ className = pMT->GetFullyQualifiedNameInfo(&nameSpace);
+#endif // !_DEBUG
+ }
+
+ COUNT_T hash = HashStringA(moduleName); // Start the hash with the Module name
+ hash = HashCOUNT_T(hash, HashStringA(className)); // Hash in the name of the Class name
+ hash = HashCOUNT_T(hash, HashStringA(methodName)); // Hash in the name of the Method name
+
+ // Handle Generic Types and Generic Methods
+ //
+ if (HasClassInstantiation() && !GetMethodTable()->IsGenericTypeDefinition())
+ {
+ Instantiation classInst = GetClassInstantiation();
+ for (DWORD i = 0; i < classInst.GetNumArgs(); i++)
+ {
+ MethodTable * pMT = classInst[i].GetMethodTable();
+ // pMT can be NULL for TypeVarTypeDesc
+ // @TODO: Implement TypeHandle::GetStableHash instead of
+ // checking pMT==NULL
+ if (pMT)
+ hash = HashCOUNT_T(hash, HashStringA(GetFullyQualifiedNameForClass(pMT)));
+ }
+ }
+
+ if (HasMethodInstantiation() && !IsGenericMethodDefinition())
+ {
+ Instantiation methodInst = GetMethodInstantiation();
+ for (DWORD i = 0; i < methodInst.GetNumArgs(); i++)
+ {
+ MethodTable * pMT = methodInst[i].GetMethodTable();
+ // pMT can be NULL for TypeVarTypeDesc
+ // @TODO: Implement TypeHandle::GetStableHash instead of
+ // checking pMT==NULL
+ if (pMT)
+ hash = HashCOUNT_T(hash, HashStringA(GetFullyQualifiedNameForClass(pMT)));
+ }
+ }
+
+ return hash;
+}
+
+//*******************************************************************************
+// Get the number of type parameters to a generic method
+DWORD MethodDesc::GetNumGenericMethodArgs()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ CANNOT_TAKE_LOCK;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ g_IBCLogger.LogMethodDescAccess(this);
+
+ if (GetClassification() == mcInstantiated)
+ {
+ InstantiatedMethodDesc *pIMD = AsInstantiatedMethodDesc();
+ return pIMD->m_wNumGenericArgs;
+ }
+ else return 0;
+}
+
+//*******************************************************************************
+MethodTable * MethodDesc::GetExactDeclaringType(MethodTable * ownerOrSubType)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodTable * pMT = GetMethodTable();
+
+ // Fast path for typical case.
+ if (ownerOrSubType == pMT)
+ return pMT;
+
+ // If we come here for array method, the typedef tokens inside GetMethodTableMatchingParentClass
+ // will match, but the types are actually from unrelated arrays, so the result would be incorrect.
+ _ASSERTE(!IsArray());
+
+ return ownerOrSubType->GetMethodTableMatchingParentClass(pMT);
+}
+
+//*******************************************************************************
+Instantiation MethodDesc::GetExactClassInstantiation(TypeHandle possibleObjType)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+
+ return (possibleObjType.IsNull()
+ ? GetClassInstantiation()
+ : possibleObjType.GetInstantiationOfParentClass(GetMethodTable()));
+}
+
+//*******************************************************************************
+BOOL MethodDesc::HasSameMethodDefAs(MethodDesc * pMD)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (this == pMD)
+ return TRUE;
+
+ return (GetMemberDef() == pMD->GetMemberDef()) && (GetModule() == pMD->GetModule());
+}
+
+//*******************************************************************************
+BOOL MethodDesc::IsTypicalSharedInstantiation()
+{
+ WRAPPER_NO_CONTRACT;
+ PRECONDITION(IsRestored_NoLogging());
+
+ Instantiation classInst = GetMethodTable()->GetInstantiation();
+ if (!ClassLoader::IsTypicalSharedInstantiation(classInst))
+ return FALSE;
+
+ if (IsGenericMethodDefinition())
+ return FALSE;
+
+ Instantiation methodInst = GetMethodInstantiation();
+ if (!ClassLoader::IsTypicalSharedInstantiation(methodInst))
+ return FALSE;
+
+ return TRUE;
+}
+
+//*******************************************************************************
+Instantiation MethodDesc::LoadMethodInstantiation()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ if (IsGenericMethodDefinition() && !IsTypicalMethodDefinition())
+ {
+ return LoadTypicalMethodDefinition()->GetMethodInstantiation();
+ }
+ else
+ return GetMethodInstantiation();
+}
+
+//*******************************************************************************
+Module *MethodDesc::GetDefiningModuleForOpenMethod()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ Module *pModule = GetMethodTable()->GetDefiningModuleForOpenType();
+ if (pModule != NULL)
+ return pModule;
+
+ if (IsGenericMethodDefinition())
+ return GetModule();
+
+ Instantiation inst = GetMethodInstantiation();
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ // Encoded types are never open
+ if (!inst[i].IsEncodedFixup())
+ {
+ pModule = inst[i].GetDefiningModuleForOpenType();
+ if (pModule != NULL)
+ return pModule;
+ }
+ }
+
+ return NULL;
+}
+
+
+//*******************************************************************************
+BOOL MethodDesc::ContainsGenericVariables()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ PRECONDITION(IsRestored_NoLogging());
+ }
+ CONTRACTL_END
+
+ // If this is a method of a generic type, does the type have
+ // non-instantiated type arguments
+
+ if (TypeHandle(GetMethodTable()).ContainsGenericVariables())
+ return TRUE;
+
+ if (IsGenericMethodDefinition())
+ return TRUE;
+
+ // If this is an instantiated generic method, are there are any generic type variables
+ if (GetNumGenericMethodArgs() != 0)
+ {
+ Instantiation methodInst = GetMethodInstantiation();
+ for (DWORD i = 0; i < methodInst.GetNumArgs(); i++)
+ {
+ if (methodInst[i].ContainsGenericVariables())
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+//*******************************************************************************
+BOOL MethodDesc::IsTightlyBoundToMethodTable()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ // Anything with the real vtable slot is tightly bound
+ if (!HasNonVtableSlot())
+ return TRUE;
+
+ // All instantiations of generic methods are stored in the InstMethHashTable.
+ if (HasMethodInstantiation())
+ {
+ if (IsGenericMethodDefinition())
+ return TRUE;
+ else
+ return FALSE;
+ }
+
+ // Wrapper stubs are stored in the InstMethHashTable, e.g. for static methods in generic classes
+ if (IsWrapperStub())
+ return FALSE;
+
+ return TRUE;
+}
+
+#ifndef DACCESS_COMPILE
+
+#if defined(FEATURE_REMOTING) && !defined(HAS_REMOTING_PRECODE)
+//*******************************************************************************
+void MethodDesc::Destruct()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ if (!IsRestored())
+ return;
+
+ MethodTable *pMT = GetMethodTable();
+ if(pMT->IsMarshaledByRef() || (pMT == g_pObjectClass))
+ {
+ // Destroy the thunk generated to intercept calls for remoting
+ CRemotingServices::DestroyThunk(this);
+ }
+}
+#endif // FEATURE_REMOTING && !HAS_REMOTING_PRECODE
+
+//*******************************************************************************
+HRESULT MethodDesc::Verify(COR_ILMETHOD_DECODER* ILHeader,
+ BOOL fThrowException,
+ BOOL fForceVerify)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END
+
+#ifdef _VER_EE_VERIFICATION_ENABLED
+ // ForceVerify will force verification if the Verifier is OFF
+ if (fForceVerify)
+ goto DoVerify;
+
+ // Don't even try to verify if verifier is off.
+ if (g_fVerifierOff)
+ return S_OK;
+
+ if (IsVerified())
+ return S_OK;
+
+ // LazyCanSkipVerification does not resolve the policy.
+ // We go ahead with verification if policy is not resolved.
+ // In case the verification fails, we resolve policy and
+ // fail verification if the Assembly of this method does not have
+ // permission to skip verification.
+
+ if (Security::LazyCanSkipVerification(GetModule()->GetDomainAssembly()))
+ return S_OK;
+
+#ifdef _DEBUG
+ _ASSERTE(Security::IsSecurityOn());
+ _ASSERTE(GetModule() != SystemDomain::SystemModule());
+#endif // _DEBUG
+
+
+DoVerify:
+
+ HRESULT hr;
+
+ if (fThrowException)
+ hr = Verifier::VerifyMethod(this, ILHeader, NULL,
+ fForceVerify ? VER_FORCE_VERIFY : VER_STOP_ON_FIRST_ERROR);
+ else
+ hr = Verifier::VerifyMethodNoException(this, ILHeader);
+
+ if (SUCCEEDED(hr))
+ SetIsVerified(TRUE);
+
+ return hr;
+#else // !_VER_EE_VERIFICATION_ENABLED
+ _ASSERTE(!"EE Verification is disabled, should never get here");
+ return E_FAIL;
+#endif // !_VER_EE_VERIFICATION_ENABLED
+}
+
+//*******************************************************************************
+
+BOOL MethodDesc::IsVerifiable()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (IsVerified())
+ return (m_wFlags & mdcVerifiable);
+
+ if (!IsTypicalMethodDefinition())
+ {
+ // We cannot verify concrete instantiation (eg. List<int>.Add()).
+ // We have to verify the typical instantiation (eg. List<T>.Add()).
+ MethodDesc * pGenMethod = LoadTypicalMethodDefinition();
+ BOOL isVerifiable = pGenMethod->IsVerifiable();
+
+ // Propagate the result from the typical instantiation to the
+ // concrete instantiation
+ SetIsVerified(isVerifiable);
+
+ return isVerifiable;
+ }
+
+ COR_ILMETHOD_DECODER *pHeader = NULL;
+ // Don't use HasILHeader() here because it returns the wrong answer
+ // for methods that have DynamicIL (not to be confused with DynamicMethods)
+ if (IsIL() && !IsUnboxingStub())
+ {
+ COR_ILMETHOD_DECODER::DecoderStatus status;
+ COR_ILMETHOD_DECODER header(GetILHeader(), GetMDImport(), &status);
+ if (status != COR_ILMETHOD_DECODER::SUCCESS)
+ {
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_IL);
+ }
+ pHeader = &header;
+
+#ifdef _VER_EE_VERIFICATION_ENABLED
+ static ConfigDWORD peVerify;
+ if (peVerify.val(CLRConfig::EXTERNAL_PEVerify))
+ {
+ HRESULT hr = Verify(&header, TRUE, FALSE);
+ }
+#endif // _VER_EE_VERIFICATION_ENABLED
+ }
+
+ UnsafeJitFunction(this, pHeader, CORJIT_FLG_IMPORT_ONLY, 0);
+ _ASSERTE(IsVerified());
+
+ return (IsVerified() && (m_wFlags & mdcVerifiable));
+}
+
+//*******************************************************************************
+// Update flags in a thread safe manner.
+WORD MethodDesc::InterlockedUpdateFlags(WORD wMask, BOOL fSet)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ WORD wOldState = m_wFlags;
+ DWORD dwMask = wMask;
+
+ // We need to make this operation atomic (multiple threads can play with the flags field at the same time). But the flags field
+ // is a word and we only have interlock operations over dwords. So we round down the flags field address to the nearest aligned
+ // dword (along with the intended bitfield mask). Note that we make the assumption that the flags word is aligned itself, so we
+ // only have two possibilites: the field already lies on a dword boundary or it's precisely one word out.
+ DWORD* pdwFlags = (DWORD*)((ULONG_PTR)&m_wFlags - (offsetof(MethodDesc, m_wFlags) & 0x3));
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:6326) // "Suppress PREFast warning about comparing two constants"
+#endif // _PREFAST_
+
+#if BIGENDIAN
+ if ((offsetof(MethodDesc, m_wFlags) & 0x3) == 0) {
+#else // !BIGENDIAN
+ if ((offsetof(MethodDesc, m_wFlags) & 0x3) != 0) {
+#endif // !BIGENDIAN
+ static_assert_no_msg(sizeof(m_wFlags) == 2);
+ dwMask <<= 16;
+ }
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+ g_IBCLogger.LogMethodDescWriteAccess(this);
+ EnsureWritablePages(pdwFlags);
+
+ if (fSet)
+ FastInterlockOr(pdwFlags, dwMask);
+ else
+ FastInterlockAnd(pdwFlags, ~dwMask);
+
+ return wOldState;
+}
+
+WORD MethodDesc::InterlockedUpdateFlags3(WORD wMask, BOOL fSet)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ WORD wOldState = m_wFlags3AndTokenRemainder;
+ DWORD dwMask = wMask;
+
+ // We need to make this operation atomic (multiple threads can play with the flags field at the same time). But the flags field
+ // is a word and we only have interlock operations over dwords. So we round down the flags field address to the nearest aligned
+ // dword (along with the intended bitfield mask). Note that we make the assumption that the flags word is aligned itself, so we
+ // only have two possibilites: the field already lies on a dword boundary or it's precisely one word out.
+ DWORD* pdwFlags = (DWORD*)((ULONG_PTR)&m_wFlags3AndTokenRemainder - (offsetof(MethodDesc, m_wFlags3AndTokenRemainder) & 0x3));
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:6326) // "Suppress PREFast warning about comparing two constants"
+#endif // _PREFAST_
+
+#if BIGENDIAN
+ if ((offsetof(MethodDesc, m_wFlags3AndTokenRemainder) & 0x3) == 0) {
+#else // !BIGENDIAN
+ if ((offsetof(MethodDesc, m_wFlags3AndTokenRemainder) & 0x3) != 0) {
+#endif // !BIGENDIAN
+ static_assert_no_msg(sizeof(m_wFlags3AndTokenRemainder) == 2);
+ dwMask <<= 16;
+ }
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+ g_IBCLogger.LogMethodDescWriteAccess(this);
+
+ if (fSet)
+ FastInterlockOr(pdwFlags, dwMask);
+ else
+ FastInterlockAnd(pdwFlags, ~dwMask);
+
+ return wOldState;
+}
+
+#endif // !DACCESS_COMPILE
+
+//*******************************************************************************
+// Returns the address of the native code. The native code can be one of:
+// - jitted code if !IsPreImplemented()
+// - ngened code if IsPreImplemented()
+//
+// Methods which have no native code are either implemented by stubs or not jitted yet.
+// For example, NDirectMethodDesc's have no native code. They are treated as
+// implemented by stubs. On WIN64, these stubs are IL stubs, which DO have native code.
+//
+// This function returns null if the method has no native code.
+PCODE MethodDesc::GetNativeCode()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ g_IBCLogger.LogMethodDescAccess(this);
+
+ if (HasNativeCodeSlot())
+ {
+ // When profiler is enabled, profiler may ask to rejit a code even though we
+ // we have ngen code for this MethodDesc. (See MethodDesc::DoPrestub).
+ // This means that NativeCodeSlot::GetValueMaybeNullAtPtr(GetAddrOfNativeCodeSlot())
+ // is not stable. It can turn from non-zero to zero.
+ PCODE pCode = PCODE(NativeCodeSlot::GetValueMaybeNullAtPtr(GetAddrOfNativeCodeSlot()) & ~FIXUP_LIST_MASK);
+#ifdef _TARGET_ARM_
+ if (pCode != NULL)
+ pCode |= THUMB_CODE;
+#endif
+ return pCode;
+ }
+
+#ifdef FEATURE_INTERPRETER
+#ifndef DACCESS_COMPILE // TODO: Need a solution that will work under DACCESS
+ PCODE pEntryPoint = GetMethodEntryPoint();
+ if (Interpreter::InterpretationStubToMethodInfo(pEntryPoint) == this)
+ {
+ return pEntryPoint;
+ }
+#endif
+#endif
+
+ if (!HasStableEntryPoint() || HasPrecode())
+ return NULL;
+
+ return GetStableEntryPoint();
+}
+
+//*******************************************************************************
+TADDR MethodDesc::GetAddrOfNativeCodeSlot()
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(HasNativeCodeSlot());
+
+ SIZE_T size = s_ClassificationSizeTable[m_wFlags & (mdcClassification | mdcHasNonVtableSlot | mdcMethodImpl)];
+
+ return dac_cast<TADDR>(this) + size;
+}
+
+//*******************************************************************************
+PCODE MethodDesc::GetPreImplementedCode()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_PREJIT
+ PCODE pNativeCode = GetNativeCode();
+ if (pNativeCode == NULL)
+ return NULL;
+
+ Module* pZapModule = GetZapModule();
+ if (pZapModule == NULL)
+ return NULL;
+
+ if (!pZapModule->IsZappedCode(pNativeCode))
+ return NULL;
+
+ return pNativeCode;
+#else // !FEATURE_PREJIT
+ return NULL;
+#endif // !FEATURE_PREJIT
+}
+
+//*******************************************************************************
+BOOL MethodDesc::IsVoid()
+{
+ WRAPPER_NO_CONTRACT;
+
+ MetaSig sig(this);
+ return sig.IsReturnTypeVoid();
+}
+
+//*******************************************************************************
+BOOL MethodDesc::HasRetBuffArg()
+{
+ WRAPPER_NO_CONTRACT;
+
+ MetaSig sig(this);
+ ArgIterator argit(&sig);
+ return argit.HasRetBuffArg();
+}
+
+//*******************************************************************************
+// This returns the offset of the IL.
+// The offset is relative to the base of the IL image.
+ULONG MethodDesc::GetRVA()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ if (IsRuntimeSupplied())
+ {
+ return 0;
+ }
+
+ // Methods without metadata don't have an RVA. Examples are IL stubs and LCG methods.
+ if (IsNoMetadata())
+ {
+ return 0;
+ }
+
+ if (GetMemberDef() & 0x00FFFFFF)
+ {
+ Module *pModule = GetModule();
+ PREFIX_ASSUME(pModule != NULL);
+
+ DWORD dwDescrOffset;
+ DWORD dwImplFlags;
+ if (FAILED(pModule->GetMDImport()->GetMethodImplProps(GetMemberDef(), &dwDescrOffset, &dwImplFlags)))
+ { // Class loader already asked for MethodImpls, so this should always succeed (unless there's a
+ // bug or a new code path)
+ _ASSERTE(!"If this ever fires, then this method should return HRESULT");
+ return 0;
+ }
+ BAD_FORMAT_NOTHROW_ASSERT(IsNDirect() || IsMiIL(dwImplFlags) || IsMiOPTIL(dwImplFlags) || dwDescrOffset == 0);
+ return dwDescrOffset;
+ }
+
+ return 0;
+}
+
+//*******************************************************************************
+BOOL MethodDesc::IsVarArg()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ SUPPORTS_DAC;
+
+ Signature signature = GetSignature();
+ _ASSERTE(!signature.IsEmpty());
+ return MetaSig::IsVarArg(GetModule(), signature);
+}
+
+//*******************************************************************************
+COR_ILMETHOD* MethodDesc::GetILHeader(BOOL fAllowOverrides /*=FALSE*/)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(IsIL());
+ PRECONDITION(!IsUnboxingStub());
+ }
+ CONTRACTL_END
+
+ Module *pModule = GetModule();
+
+ // Always pickup 'permanent' overrides like reflection emit, EnC, etc.
+ // but only grab temporary overrides (like profiler rewrites) if asked to
+ TADDR pIL = pModule->GetDynamicIL(GetMemberDef(), fAllowOverrides);
+
+ if (pIL == NULL)
+ {
+ pIL = pModule->GetIL(GetRVA());
+ }
+
+#ifdef _DEBUG_IMPL
+ if (pIL != NULL)
+ {
+ //
+ // This is convenient place to verify that COR_ILMETHOD_DECODER::GetOnDiskSize is in sync
+ // with our private DACized copy in PEDecoder::ComputeILMethodSize
+ //
+ COR_ILMETHOD_DECODER header((COR_ILMETHOD *)pIL);
+ SIZE_T size1 = header.GetOnDiskSize((COR_ILMETHOD *)pIL);
+ SIZE_T size2 = PEDecoder::ComputeILMethodSize(pIL);
+ _ASSERTE(size1 == size2);
+ }
+#endif
+
+#ifdef DACCESS_COMPILE
+ return (pIL != NULL) ? DacGetIlMethod(pIL) : NULL;
+#else // !DACCESS_COMPILE
+ return PTR_COR_ILMETHOD(pIL);
+#endif // !DACCESS_COMPILE
+}
+
+//*******************************************************************************
+MetaSig::RETURNTYPE MethodDesc::ReturnsObject(
+#ifdef _DEBUG
+ bool supportStringConstructors
+#endif
+ )
+{
+ CONTRACTL
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END
+
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+ TypeHandle thValueType;
+
+ MetaSig sig(this);
+ CorElementType et = sig.GetReturnTypeNormalized(&thValueType);
+
+ switch (et)
+ {
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_ARRAY:
+ case ELEMENT_TYPE_OBJECT:
+ case ELEMENT_TYPE_VAR:
+ return(MetaSig::RETOBJ);
+
+#ifdef ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE
+ case ELEMENT_TYPE_VALUETYPE:
+ // We return value types in registers if they fit in ENREGISTERED_RETURNTYPE_MAXSIZE
+ // These valuetypes could contain gc refs.
+ {
+ ArgIterator argit(&sig);
+ if (!argit.HasRetBuffArg())
+ {
+ // the type must already be loaded
+ _ASSERTE(!thValueType.IsNull());
+ if (!thValueType.IsTypeDesc())
+ {
+ MethodTable * pReturnTypeMT = thValueType.AsMethodTable();
+ if(pReturnTypeMT->ContainsPointers())
+ {
+ _ASSERTE(pReturnTypeMT->GetNumInstanceFieldBytes() == sizeof(void*));
+ return MetaSig::RETOBJ;
+ }
+ }
+ }
+ }
+ break;
+#endif // ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE
+
+#ifdef _DEBUG
+ case ELEMENT_TYPE_VOID:
+ // String constructors return objects. We should not have any ecall string
+ // constructors, except when called from gc coverage codes (which is only
+ // done under debug). We will therefore optimize the retail version of this
+ // method to not support string constructors.
+ if (IsCtor() && GetMethodTable()->HasComponentSize())
+ {
+ _ASSERTE(supportStringConstructors);
+ return MetaSig::RETOBJ;
+ }
+ break;
+#endif // _DEBUG
+
+ case ELEMENT_TYPE_BYREF:
+ return(MetaSig::RETBYREF);
+
+ default:
+ break;
+ }
+
+ return(MetaSig::RETNONOBJ);
+}
+
+#ifdef FEATURE_COMINTEROP
+
+#ifndef DACCESS_COMPILE
+
+//*******************************************************************************
+LONG MethodDesc::GetComDispid()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ ULONG dispid = -1;
+ HRESULT hr = GetMDImport()->GetDispIdOfMemberDef(
+ GetMemberDef(), // The member for which to get props.
+ &dispid // return dispid.
+ );
+ if (FAILED(hr))
+ return -1;
+
+ return (LONG)dispid;
+}
+
+//*******************************************************************************
+WORD MethodDesc::GetComSlot()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ PRECONDITION(IsRestored_NoLogging());
+ }
+ CONTRACTL_END
+
+ MethodTable * pMT = GetMethodTable();
+
+ _ASSERTE(pMT->IsInterface());
+
+ // COM slots are biased from MethodTable slots depending on interface type
+ WORD numExtraSlots = ComMethodTable::GetNumExtraSlots(pMT->GetComInterfaceType());
+
+ // Normal interfaces are layed out the same way as in the MethodTable, while
+ // sparse interfaces need to go through an extra layer of mapping.
+ WORD slot;
+
+ if (pMT->IsSparseForCOMInterop())
+ slot = numExtraSlots + pMT->GetClass()->GetSparseCOMInteropVTableMap()->LookupVTSlot(GetSlot());
+ else
+ slot = numExtraSlots + GetSlot();
+
+ return slot;
+}
+
+#endif // !DACCESS_COMPILE
+
+#endif // FEATURE_COMINTEROP
+
+//*******************************************************************************
+DWORD MethodDesc::GetAttrs() const
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END
+
+ if (IsArray())
+ return dac_cast<PTR_ArrayMethodDesc>(this)->GetAttrs();
+ else if (IsNoMetadata())
+ return dac_cast<PTR_DynamicMethodDesc>(this)->GetAttrs();;
+
+ DWORD dwAttributes;
+ if (FAILED(GetMDImport()->GetMethodDefProps(GetMemberDef(), &dwAttributes)))
+ { // Class loader already asked for attributes, so this should always succeed (unless there's a
+ // bug or a new code path)
+ _ASSERTE(!"If this ever fires, then this method should return HRESULT");
+ return 0;
+ }
+ return dwAttributes;
+}
+
+//*******************************************************************************
+DWORD MethodDesc::GetImplAttrs()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ DWORD props;
+ if (FAILED(GetMDImport()->GetMethodImplProps(GetMemberDef(), NULL, &props)))
+ { // Class loader already asked for MethodImpls, so this should always succeed (unless there's a
+ // bug or a new code path)
+ _ASSERTE(!"If this ever fires, then this method should return HRESULT");
+ return 0;
+ }
+ return props;
+}
+
+//*******************************************************************************
+Module* MethodDesc::GetZapModule()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+#ifdef FEATURE_PREJIT
+ if (!IsZapped())
+ {
+ return NULL;
+ }
+ else
+ if (!IsTightlyBoundToMethodTable())
+ {
+ return ExecutionManager::FindZapModule(dac_cast<TADDR>(this));
+ }
+ else
+ {
+ return GetMethodTable()->GetLoaderModule();
+ }
+#else
+ return NULL;
+#endif
+}
+
+//*******************************************************************************
+Module* MethodDesc::GetLoaderModule()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (IsZapped())
+ {
+ return GetZapModule();
+ }
+ else
+ if (HasMethodInstantiation() && !IsGenericMethodDefinition())
+ {
+ Module *retVal = ClassLoader::ComputeLoaderModule(GetMethodTable(),
+ GetMemberDef(),
+ GetMethodInstantiation());
+ return retVal;
+ }
+ else
+ {
+ return GetMethodTable()->GetLoaderModule();
+ }
+}
+
+//*******************************************************************************
+Module *MethodDesc::GetModule() const
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ SUPPORTS_DAC;
+
+ g_IBCLogger.LogMethodDescAccess(this);
+ Module *pModule = GetModule_NoLogging();
+
+ return pModule;
+}
+
+//*******************************************************************************
+Module *MethodDesc::GetModule_NoLogging() const
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ SUPPORTS_DAC;
+
+ MethodTable* pMT = GetMethodDescChunk()->GetMethodTable();
+ return pMT->GetModule();
+}
+
+//*******************************************************************************
+// Is this an instantiating stub for generics? This does not include those
+// BoxedEntryPointStubs which call an instantiating stub.
+BOOL MethodDesc::IsInstantiatingStub()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ return
+ (GetClassification() == mcInstantiated)
+ && !IsUnboxingStub()
+ && AsInstantiatedMethodDesc()->IMD_IsWrapperStubWithInstantiations();
+}
+
+//*******************************************************************************
+BOOL MethodDesc::IsWrapperStub()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ return (IsUnboxingStub() || IsInstantiatingStub());
+}
+
+#ifndef DACCESS_COMPILE
+//*******************************************************************************
+
+MethodDesc *MethodDesc::GetWrappedMethodDesc()
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(IsWrapperStub());
+
+ if (IsUnboxingStub())
+ {
+ return this->GetMethodTable()->GetUnboxedEntryPointMD(this);
+ }
+
+ if (IsInstantiatingStub())
+ {
+ MethodDesc *pRet = AsInstantiatedMethodDesc()->IMD_GetWrappedMethodDesc();
+#ifdef _DEBUG
+ MethodDesc *pAltMD =
+ MethodDesc::FindOrCreateAssociatedMethodDesc(this,
+ this->GetMethodTable(),
+ FALSE, /* no unboxing entrypoint */
+ this->GetMethodInstantiation(),
+ TRUE /* get shared code */ );
+ _ASSERTE(pAltMD == pRet);
+#endif // _DEBUG
+ return pRet;
+ }
+ return NULL;
+}
+
+
+MethodDesc *MethodDesc::GetExistingWrappedMethodDesc()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(IsWrapperStub());
+
+ if (IsUnboxingStub())
+ {
+ return this->GetMethodTable()->GetExistingUnboxedEntryPointMD(this);
+ }
+
+ if (IsInstantiatingStub())
+ {
+ MethodDesc *pRet = AsInstantiatedMethodDesc()->IMD_GetWrappedMethodDesc();
+ return pRet;
+ }
+ return NULL;
+}
+
+
+
+#endif // !DACCESS_COMPILE
+
+//*******************************************************************************
+BOOL MethodDesc::IsSharedByGenericInstantiations()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (IsWrapperStub())
+ return FALSE;
+ else if (GetMethodTable()->IsSharedByGenericInstantiations())
+ return TRUE;
+ else return IsSharedByGenericMethodInstantiations();
+}
+
+//*******************************************************************************
+BOOL MethodDesc::IsSharedByGenericMethodInstantiations()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (GetClassification() == mcInstantiated)
+ return AsInstantiatedMethodDesc()->IMD_IsSharedByGenericMethodInstantiations();
+ else return FALSE;
+}
+
+//*******************************************************************************
+// Does this method require an extra MethodTable argument for instantiation information?
+// This is the case for
+// * per-inst static methods in shared-code instantiated generic classes (e.g. static void MyClass<string>::m())
+// * shared-code instance methods in instantiated generic structs (e.g. void MyValueType<string>::m())
+BOOL MethodDesc::RequiresInstMethodTableArg()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return
+ IsSharedByGenericInstantiations() &&
+ !HasMethodInstantiation() &&
+ (IsStatic() || GetMethodTable()->IsValueType());
+}
+
+//*******************************************************************************
+// Does this method require an extra InstantiatedMethodDesc argument for instantiation information?
+// This is the case for
+// * shared-code instantiated generic methods
+BOOL MethodDesc::RequiresInstMethodDescArg()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return IsSharedByGenericInstantiations() &&
+ HasMethodInstantiation();
+}
+
+//*******************************************************************************
+// Does this method require any kind of extra argument for instantiation information?
+BOOL MethodDesc::RequiresInstArg()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ BOOL fRet = IsSharedByGenericInstantiations() &&
+ (HasMethodInstantiation() || IsStatic() || GetMethodTable()->IsValueType());
+
+ _ASSERT(fRet == (RequiresInstMethodTableArg() || RequiresInstMethodDescArg()));
+ return fRet;
+}
+
+//*******************************************************************************
+BOOL MethodDesc::IsRuntimeMethodHandle()
+{
+ WRAPPER_NO_CONTRACT;
+
+ // <TODO> Refine this check further for BoxedEntryPointStubs </TODO>
+ return (!HasMethodInstantiation() || !IsSharedByGenericMethodInstantiations());
+}
+
+//*******************************************************************************
+// Strip off method and class instantiation if present e.g.
+// C1<int>.m1<string> -> C1.m1
+// C1<int>.m2 -> C1.m2
+// C2.m2<int> -> C2.m2
+// C2.m2 -> C2.m2
+MethodDesc* MethodDesc::LoadTypicalMethodDefinition()
+{
+ CONTRACT(MethodDesc*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ POSTCONDITION(CheckPointer(RETVAL));
+ POSTCONDITION(RETVAL->IsTypicalMethodDefinition());
+ }
+ CONTRACT_END
+
+#ifndef DACCESS_COMPILE
+ if (HasClassOrMethodInstantiation())
+ {
+ MethodTable *pMT = GetMethodTable();
+ if (!pMT->IsTypicalTypeDefinition())
+ pMT = ClassLoader::LoadTypeDefThrowing(pMT->GetModule(),
+ pMT->GetCl(),
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef).GetMethodTable();
+ CONSISTENCY_CHECK(TypeHandle(pMT).CheckFullyLoaded());
+ MethodDesc *resultMD = pMT->GetParallelMethodDesc(this);
+ PREFIX_ASSUME(resultMD != NULL);
+ resultMD->CheckRestore();
+ RETURN (resultMD);
+ }
+ else
+#endif // !DACCESS_COMPILE
+ RETURN(this);
+}
+
+//*******************************************************************************
+BOOL MethodDesc::IsTypicalMethodDefinition() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (HasMethodInstantiation() && !IsGenericMethodDefinition())
+ return FALSE;
+
+ if (HasClassInstantiation() && !GetMethodTable()->IsGenericTypeDefinition())
+ return FALSE;
+
+ return TRUE;
+}
+
+//*******************************************************************************
+BOOL MethodDesc::AcquiresInstMethodTableFromThis() {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return
+ IsSharedByGenericInstantiations() &&
+ !HasMethodInstantiation() &&
+ !IsStatic() &&
+ !GetMethodTable()->IsValueType();
+}
+
+//*******************************************************************************
+UINT MethodDesc::SizeOfArgStack()
+{
+ WRAPPER_NO_CONTRACT;
+ MetaSig msig(this);
+ ArgIterator argit(&msig);
+ return argit.SizeOfArgStack();
+}
+
+#ifdef _TARGET_X86_
+//*******************************************************************************
+UINT MethodDesc::CbStackPop()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ MetaSig msig(this);
+ ArgIterator argit(&msig);
+ return argit.CbStackPop();
+}
+#endif // _TARGET_X86_
+
+#ifndef DACCESS_COMPILE
+
+//*******************************************************************************
+// Strip off the method instantiation (if present) e.g.
+// C<int>.m<string> -> C<int>.m
+// D.m<string> -> D.m
+// Note that this also canonicalizes the owning method table
+// @todo check uses and clean this up
+MethodDesc* MethodDesc::StripMethodInstantiation()
+{
+ CONTRACT(MethodDesc*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END
+
+ if (!HasClassOrMethodInstantiation())
+ RETURN(this);
+
+ MethodTable *pMT = GetMethodTable()->GetCanonicalMethodTable();
+ MethodDesc *resultMD = pMT->GetParallelMethodDesc(this);
+ _ASSERTE(resultMD->IsGenericMethodDefinition() || !resultMD->HasMethodInstantiation());
+ RETURN(resultMD);
+}
+
+//*******************************************************************************
+MethodDescChunk *MethodDescChunk::CreateChunk(LoaderHeap *pHeap, DWORD methodDescCount,
+ DWORD classification, BOOL fNonVtableSlot, BOOL fNativeCodeSlot, BOOL fComPlusCallInfo, MethodTable *pInitialMT, AllocMemTracker *pamTracker)
+{
+ CONTRACT(MethodDescChunk *)
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(ThrowOutOfMemory());
+
+ PRECONDITION(CheckPointer(pHeap));
+ PRECONDITION(CheckPointer(pInitialMT));
+ PRECONDITION(CheckPointer(pamTracker));
+
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ SIZE_T oneSize = MethodDesc::GetBaseSize(classification);
+
+ if (fNonVtableSlot)
+ oneSize += sizeof(MethodDesc::NonVtableSlot);
+
+ if (fNativeCodeSlot)
+ oneSize += sizeof(MethodDesc::NativeCodeSlot);
+
+#ifdef FEATURE_COMINTEROP
+ if (fComPlusCallInfo)
+ oneSize += sizeof(ComPlusCallInfo);
+#else // FEATURE_COMINTEROP
+ _ASSERTE(!fComPlusCallInfo);
+#endif // FEATURE_COMINTEROP
+
+ _ASSERTE((oneSize & MethodDesc::ALIGNMENT_MASK) == 0);
+
+ DWORD maxMethodDescsPerChunk = MethodDescChunk::MaxSizeOfMethodDescs / oneSize;
+
+ if (methodDescCount == 0)
+ methodDescCount = maxMethodDescsPerChunk;
+
+ MethodDescChunk * pFirstChunk = NULL;
+
+ do
+ {
+ DWORD count = min(methodDescCount, maxMethodDescsPerChunk);
+
+ void * pMem = pamTracker->Track(
+ pHeap->AllocMem(S_SIZE_T(sizeof(TADDR) + sizeof(MethodDescChunk) + oneSize * count)));
+
+ // Skip pointer to temporary entrypoints
+ MethodDescChunk * pChunk = (MethodDescChunk *)((BYTE*)pMem + sizeof(TADDR));
+
+ pChunk->SetSizeAndCount(oneSize * count, count);
+ pChunk->SetMethodTable(pInitialMT);
+
+ MethodDesc * pMD = pChunk->GetFirstMethodDesc();
+ for (DWORD i = 0; i < count; i++)
+ {
+ pMD->SetChunkIndex(pChunk);
+
+ pMD->SetClassification(classification);
+ if (fNonVtableSlot)
+ pMD->SetHasNonVtableSlot();
+ if (fNativeCodeSlot)
+ pMD->SetHasNativeCodeSlot();
+#ifdef FEATURE_COMINTEROP
+ if (fComPlusCallInfo)
+ pMD->SetupGenericComPlusCall();
+#endif // FEATURE_COMINTEROP
+
+ _ASSERTE(pMD->SizeOf() == oneSize);
+
+ pMD = (MethodDesc *)((BYTE *)pMD + oneSize);
+ }
+
+ pChunk->m_next.SetValueMaybeNull(pFirstChunk);
+ pFirstChunk = pChunk;
+
+ methodDescCount -= count;
+ }
+ while (methodDescCount > 0);
+
+ RETURN pFirstChunk;
+}
+
+#ifndef CROSSGEN_COMPILE
+//--------------------------------------------------------------------
+// Virtual Resolution on Objects
+//
+// Given a MethodDesc and an Object, return the target address
+// and/or the target MethodDesc and/or make a call.
+//
+// Some of the implementation of this logic is in
+// MethodTable::GetMethodDescForInterfaceMethodAndServer.
+// Those functions should really be moved here.
+//--------------------------------------------------------------------
+
+//*******************************************************************************
+// The following resolve virtual dispatch for the given method on the given
+// object down to an actual address to call, including any
+// handling of context proxies and other thunking layers.
+MethodDesc* MethodDesc::ResolveGenericVirtualMethod(OBJECTREF *orThis)
+{
+ CONTRACT(MethodDesc *)
+ {
+ THROWS;
+ GC_TRIGGERS;
+
+ PRECONDITION(IsVtableMethod());
+ PRECONDITION(IsRestored_NoLogging());
+ PRECONDITION(HasMethodInstantiation());
+ PRECONDITION(!ContainsGenericVariables());
+ POSTCONDITION(CheckPointer(RETVAL));
+ POSTCONDITION(RETVAL->HasMethodInstantiation());
+ }
+ CONTRACT_END;
+
+ // Method table of target (might be instantiated)
+ // Deliberately use GetMethodTable -- not GetTrueMethodTable
+ MethodTable *pObjMT = (*orThis)->GetMethodTable();
+
+ // This is the static method descriptor describing the call.
+ // It is not the destination of the call, which we must compute.
+ MethodDesc* pStaticMD = this;
+
+ if (pObjMT->IsTransparentProxy())
+ {
+ // For transparent proxies get the client's view of the server type
+ // unless we're calling through an interface (in which case we let the
+ // server handle the resolution).
+ if (pStaticMD->IsInterface())
+ RETURN(pStaticMD);
+ pObjMT = (*orThis)->GetTrueMethodTable();
+ }
+
+ // Strip off the method instantiation if present
+ MethodDesc* pStaticMDWithoutGenericMethodArgs = pStaticMD->StripMethodInstantiation();
+
+ // Compute the target, though we have not yet applied the type arguments.
+ MethodDesc *pTargetMDBeforeGenericMethodArgs =
+ pStaticMD->IsInterface()
+ ? MethodTable::GetMethodDescForInterfaceMethodAndServer(TypeHandle(pStaticMD->GetMethodTable()),
+ pStaticMDWithoutGenericMethodArgs,orThis)
+ : pObjMT->GetMethodDescForSlot(pStaticMDWithoutGenericMethodArgs->GetSlot());
+
+ pTargetMDBeforeGenericMethodArgs->CheckRestore();
+
+ // The actual destination may lie anywhere in the inheritance hierarchy.
+ // between the static descriptor and the target object.
+ // So now compute where we are really going! This may be an instantiated
+ // class type if the generic virtual lies in a generic class.
+ MethodTable *pTargetMT = pTargetMDBeforeGenericMethodArgs->GetMethodTable();
+
+ // No need to find/create a new generic instantiation if the target is the
+ // same as the static, i.e. the virtual method has not been overriden.
+ if (!pTargetMT->IsSharedByGenericInstantiations() && !pTargetMT->IsValueType() &&
+ pTargetMDBeforeGenericMethodArgs == pStaticMDWithoutGenericMethodArgs)
+ RETURN(pStaticMD);
+
+ if (pTargetMT->IsSharedByGenericInstantiations())
+ {
+ pTargetMT = ClassLoader::LoadGenericInstantiationThrowing(pTargetMT->GetModule(),
+ pTargetMT->GetCl(),
+ pTargetMDBeforeGenericMethodArgs->GetExactClassInstantiation(TypeHandle(pObjMT))).GetMethodTable();
+ }
+
+ RETURN(MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pTargetMDBeforeGenericMethodArgs,
+ pTargetMT,
+ (pTargetMT->IsValueType()), /* get unboxing entry point if a struct*/
+ pStaticMD->GetMethodInstantiation(),
+ FALSE /* no allowInstParam */ ));
+}
+
+//*******************************************************************************
+PCODE MethodDesc::GetSingleCallableAddrOfVirtualizedCode(OBJECTREF *orThis, TypeHandle staticTH)
+{
+ WRAPPER_NO_CONTRACT;
+ PRECONDITION(IsVtableMethod());
+
+ // Deliberately use GetMethodTable -- not GetTrueMethodTable
+ MethodTable *pObjMT = (*orThis)->GetMethodTable();
+
+ if (HasMethodInstantiation())
+ {
+ CheckRestore();
+ MethodDesc *pResultMD = ResolveGenericVirtualMethod(orThis);
+
+ // If we're remoting this call we can't call directly on the returned
+ // method desc, we need to go through a stub that guarantees we end up
+ // in the remoting handler. The stub we use below is normally just for
+ // non-virtual calls on virtual methods (that have the same problem
+ // where we could end up bypassing the remoting system), but it serves
+ // our purpose here (basically pushes our correctly instantiated,
+ // resolved method desc on the stack and calls the remoting code).
+#ifdef FEATURE_REMOTING
+ if (pObjMT->IsTransparentProxy())
+ if (IsInterface())
+ return CRemotingServices::GetStubForInterfaceMethod(pResultMD);
+ else
+ return CRemotingServices::GetNonVirtualEntryPointForVirtualMethod(pResultMD);
+#endif
+
+ return pResultMD->GetSingleCallableAddrOfCode();
+ }
+
+ if (IsInterface())
+ {
+ MethodDesc * pTargetMD = MethodTable::GetMethodDescForInterfaceMethodAndServer(staticTH,this,orThis);
+ return pTargetMD->GetSingleCallableAddrOfCode();
+ }
+
+ return pObjMT->GetRestoredSlot(GetSlot());
+}
+
+//*******************************************************************************
+// The following resolve virtual dispatch for the given method on the given
+// object down to an actual address to call, including any
+// handling of context proxies and other thunking layers.
+PCODE MethodDesc::GetMultiCallableAddrOfVirtualizedCode(OBJECTREF *orThis, TypeHandle staticTH)
+{
+ CONTRACT(PCODE)
+ {
+ THROWS;
+ GC_TRIGGERS;
+
+ PRECONDITION(IsRestored_NoLogging());
+ PRECONDITION(IsVtableMethod());
+ POSTCONDITION(RETVAL != NULL);
+ }
+ CONTRACT_END;
+
+ // Method table of target (might be instantiated)
+ // Deliberately use GetMethodTable -- not GetTrueMethodTable
+ MethodTable *pObjMT = (*orThis)->GetMethodTable();
+
+ // This is the static method descriptor describing the call.
+ // It is not the destination of the call, which we must compute.
+ MethodDesc* pStaticMD = this;
+ MethodDesc *pTargetMD;
+
+ if (pStaticMD->HasMethodInstantiation())
+ {
+ CheckRestore();
+ pTargetMD = ResolveGenericVirtualMethod(orThis);
+
+ // If we're remoting this call we can't call directly on the returned
+ // method desc, we need to go through a stub that guarantees we end up
+ // in the remoting handler. The stub we use below is normally just for
+ // non-virtual calls on virtual methods (that have the same problem
+ // where we could end up bypassing the remoting system), but it serves
+ // our purpose here (basically pushes our correctly instantiated,
+ // resolved method desc on the stack and calls the remoting code).
+#ifdef FEATURE_REMOTING
+ if (pObjMT->IsTransparentProxy())
+ if (pStaticMD->IsInterface())
+ RETURN(CRemotingServices::GetStubForInterfaceMethod(pTargetMD));
+ else
+ RETURN(CRemotingServices::GetNonVirtualEntryPointForVirtualMethod(pTargetMD));
+#endif
+
+ RETURN(pTargetMD->GetMultiCallableAddrOfCode());
+ }
+
+ if (pStaticMD->IsInterface())
+ {
+ pTargetMD = MethodTable::GetMethodDescForInterfaceMethodAndServer(staticTH,pStaticMD,orThis);
+ RETURN(pTargetMD->GetMultiCallableAddrOfCode());
+ }
+
+#ifdef FEATURE_REMOTING
+ if (pObjMT->IsTransparentProxy())
+ {
+ RETURN(pObjMT->GetRestoredSlot(pStaticMD->GetSlot()));
+ }
+#endif // FEATURE_REMOTING
+
+ pTargetMD = pObjMT->GetMethodDescForSlot(pStaticMD->GetSlot());
+
+ RETURN (pTargetMD->GetMultiCallableAddrOfCode());
+}
+
+//*******************************************************************************
+PCODE MethodDesc::GetMultiCallableAddrOfCode(CORINFO_ACCESS_FLAGS accessFlags /*=CORINFO_ACCESS_LDFTN*/)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ PCODE ret = TryGetMultiCallableAddrOfCode(accessFlags);
+
+ if (ret == NULL)
+ {
+ GCX_COOP();
+
+ // We have to allocate funcptr stub
+ ret = GetLoaderAllocator()->GetFuncPtrStubs()->GetFuncPtrStub(this);
+ }
+
+ return ret;
+}
+
+//*******************************************************************************
+//
+// Returns a callable entry point for a function.
+// Multiple entry points could be used for a single function.
+// ie. this function is not idempotent
+//
+
+// We must ensure that GetMultiCallableAddrOfCode works
+// correctly for all of the following cases:
+// 1. shared generic method instantiations
+// 2. unshared generic method instantiations
+// 3. instance methods in shared generic classes
+// 4. instance methods in unshared generic classes
+// 5. static methods in shared generic classes.
+// 6. static methods in unshared generic classes.
+//
+// For case 1 and 5 the methods are implemented using
+// an instantiating stub (i.e. IsInstantiatingStub()
+// should be true). These stubs pass on to
+// shared-generic-code-which-requires-an-extra-type-context-parameter.
+// So whenever we use LDFTN on these we need to give out
+// the address of an instantiating stub.
+//
+// For cases 2, 3, 4 and 6 we can just use the standard technique for LdFtn:
+// (for 2 we give out the address of the fake "slot" in InstantiatedMethodDescs)
+// (for 3 it doesn't matter if the code is shared between instantiations
+// because the instantiation context is picked up from the "this" parameter.)
+
+PCODE MethodDesc::TryGetMultiCallableAddrOfCode(CORINFO_ACCESS_FLAGS accessFlags)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ // Record this method desc if required
+ g_IBCLogger.LogMethodDescAccess(this);
+
+ if (IsGenericMethodDefinition())
+ {
+ _ASSERTE(!"Cannot take the address of an uninstantiated generic method.");
+ COMPlusThrow(kInvalidProgramException);
+ }
+
+ if (accessFlags & CORINFO_ACCESS_LDFTN)
+ {
+ // Whenever we use LDFTN on shared-generic-code-which-requires-an-extra-parameter
+ // we need to give out the address of an instantiating stub. This is why we give
+ // out GetStableEntryPoint() for the IsInstantiatingStub() case: this is
+ // safe. But first we assert that we only use GetMultiCallableAddrOfCode on
+ // the instantiating stubs and not on the shared code itself.
+ _ASSERTE(!RequiresInstArg());
+ _ASSERTE(!IsSharedByGenericMethodInstantiations());
+
+ // No other access flags are valid with CORINFO_ACCESS_LDFTN
+ _ASSERTE((accessFlags & ~CORINFO_ACCESS_LDFTN) == 0);
+ }
+
+ // We create stable entrypoints for these upfront
+ if (IsWrapperStub() || IsEnCAddedMethod())
+ return GetStableEntryPoint();
+
+#ifdef FEATURE_REMOTING
+ if (!(accessFlags & CORINFO_ACCESS_THIS) && IsRemotingInterceptedViaVirtualDispatch())
+ return CRemotingServices::GetNonVirtualEntryPointForVirtualMethod(this);
+#endif
+
+ // For EnC always just return the stable entrypoint so we can update the code
+ if (IsEnCMethod())
+ return GetStableEntryPoint();
+
+ // If the method has already been jitted, we can give out the direct address
+ // Note that we may have previously created a FuncPtrStubEntry, but
+ // GetMultiCallableAddrOfCode() does not need to be idempotent.
+
+ if (IsFCall())
+ {
+ // Call FCalls directly when possible
+ if (((accessFlags & CORINFO_ACCESS_THIS) || !IsRemotingInterceptedViaPrestub())
+ && !IsInterface() && !GetMethodTable()->ContainsGenericVariables())
+ {
+ BOOL fSharedOrDynamicFCallImpl;
+ PCODE pFCallImpl = ECall::GetFCallImpl(this, &fSharedOrDynamicFCallImpl);
+
+ if (!fSharedOrDynamicFCallImpl)
+ return pFCallImpl;
+
+ // Fake ctors share one implementation that has to be wrapped by prestub
+ GetOrCreatePrecode();
+ }
+ }
+ else
+ {
+ if (IsPointingToNativeCode())
+ return GetNativeCode();
+ }
+
+ if (HasStableEntryPoint())
+ return GetStableEntryPoint();
+
+ // Force the creation of the precode if we would eventually got one anyway
+ if (MayHavePrecode())
+ return GetOrCreatePrecode()->GetEntryPoint();
+
+#ifdef HAS_COMPACT_ENTRYPOINTS
+ // Caller has to call via slot or allocate funcptr stub
+ return NULL;
+#else // HAS_COMPACT_ENTRYPOINTS
+ //
+ // Embed call to the temporary entrypoint into the code. It will be patched
+ // to point to the actual code later.
+ //
+ return GetTemporaryEntryPoint();
+#endif // HAS_COMPACT_ENTRYPOINTS
+}
+
+//*******************************************************************************
+PCODE MethodDesc::GetCallTarget(OBJECTREF* pThisObj, TypeHandle ownerType)
+{
+ CONTRACTL
+ {
+ THROWS; // Resolving a generic virtual method can throw
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END
+
+ PCODE pTarget;
+
+ if (IsVtableMethod() && !GetMethodTable()->IsValueType())
+ {
+ CONSISTENCY_CHECK(NULL != pThisObj);
+ if (ownerType.IsNull())
+ ownerType = GetMethodTable();
+ pTarget = GetSingleCallableAddrOfVirtualizedCode(pThisObj, ownerType);
+ }
+ else
+ {
+ pTarget = GetSingleCallableAddrOfCode();
+ }
+
+ return pTarget;
+}
+
+//*******************************************************************************
+// convert an entry point into a method desc
+MethodDesc* Entry2MethodDesc(PCODE entryPoint, MethodTable *pMT)
+{
+ CONTRACT(MethodDesc*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(RETVAL->SanityCheck());
+ }
+ CONTRACT_END
+
+ MethodDesc * pMD;
+
+ RangeSection * pRS = ExecutionManager::FindCodeRange(entryPoint, ExecutionManager::GetScanFlags());
+ if (pRS != NULL)
+ {
+ if (pRS->pjit->JitCodeToMethodInfo(pRS, entryPoint, &pMD, NULL))
+ RETURN(pMD);
+
+ if (pRS->pjit->GetStubCodeBlockKind(pRS, entryPoint) == STUB_CODE_BLOCK_PRECODE)
+ RETURN(MethodDesc::GetMethodDescFromStubAddr(entryPoint));
+
+ // We should never get here
+ _ASSERTE(!"Entry2MethodDesc failed for RangeSection");
+ RETURN (NULL);
+ }
+
+ pMD = VirtualCallStubManagerManager::Entry2MethodDesc(entryPoint, pMT);
+ if (pMD != NULL)
+ RETURN(pMD);
+
+#ifdef FEATURE_REMOTING
+
+#ifndef HAS_REMOTING_PRECODE
+ pMD = CNonVirtualThunkMgr::Entry2MethodDesc(entryPoint, pMT);
+ if (pMD != NULL)
+ RETURN(pMD);
+#endif // HAS_REMOTING_PRECODE
+
+ pMD = CVirtualThunkMgr::Entry2MethodDesc(entryPoint, pMT);
+ if (pMD != NULL)
+ RETURN(pMD);
+
+#endif // FEATURE_REMOTING
+
+ // Is it an FCALL?
+ pMD = ECall::MapTargetBackToMethod(entryPoint);
+ if (pMD != NULL)
+ RETURN(pMD);
+
+ // We should never get here
+ _ASSERTE(!"Entry2MethodDesc failed");
+ RETURN (NULL);
+}
+#endif // CROSSGEN_COMPILE
+
+//*******************************************************************************
+BOOL MethodDesc::IsFCallOrIntrinsic()
+{
+ WRAPPER_NO_CONTRACT;
+ return (IsFCall() || IsArray());
+}
+
+//*******************************************************************************
+BOOL MethodDesc::IsPointingToPrestub()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!HasStableEntryPoint())
+ return TRUE;
+
+ if (!HasPrecode())
+ return FALSE;
+
+ if (!IsRestored())
+ return TRUE;
+
+ return GetPrecode()->IsPointingToPrestub();
+}
+
+#ifdef FEATURE_INTERPRETER
+//*******************************************************************************
+BOOL MethodDesc::IsReallyPointingToPrestub()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!HasPrecode())
+ {
+ PCODE pCode = GetMethodEntryPoint();
+ return HasTemporaryEntryPoint() && pCode == GetTemporaryEntryPoint();
+ }
+
+ if (!IsRestored())
+ return TRUE;
+
+ return GetPrecode()->IsPointingToPrestub();
+}
+#endif
+
+//*******************************************************************************
+void MethodDesc::Reset()
+{
+ WRAPPER_NO_CONTRACT;
+
+ // This method is not thread-safe since we are updating
+ // different pieces of data non-atomically.
+ // Use this only if you can guarantee thread-safety somehow.
+
+ _ASSERTE(IsEnCMethod() || // The process is frozen by the debugger
+ IsDynamicMethod() || // These are used in a very restricted way
+ GetLoaderModule()->IsReflection()); // Rental methods
+
+ // Reset any flags relevant to the old code
+ ClearFlagsOnUpdate();
+
+ if (HasPrecode())
+ {
+ GetPrecode()->Reset();
+ }
+ else
+ {
+ // We should go here only for the rental methods
+ _ASSERTE(GetLoaderModule()->IsReflection());
+
+ InterlockedUpdateFlags2(enum_flag2_HasStableEntryPoint | enum_flag2_HasPrecode, FALSE);
+
+ *GetAddrOfSlot() = GetTemporaryEntryPoint();
+ }
+
+ if (HasNativeCodeSlot())
+ NativeCodeSlot::SetValueMaybeNullAtPtr(GetAddrOfNativeCodeSlot(), NULL);
+ _ASSERTE(!HasNativeCode());
+}
+
+//*******************************************************************************
+DWORD MethodDesc::GetSecurityFlagsDuringPreStub()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END
+
+
+ DWORD dwMethDeclFlags = 0;
+ DWORD dwMethNullDeclFlags = 0;
+ DWORD dwClassDeclFlags = 0;
+ DWORD dwClassNullDeclFlags = 0;
+
+ if (IsInterceptedForDeclSecurity())
+ {
+ HRESULT hr;
+
+ BOOL fHasSuppressUnmanagedCodeAccessAttr = HasSuppressUnmanagedCodeAccessAttr();;
+
+ hr = Security::GetDeclarationFlags(GetMDImport(),
+ GetMemberDef(),
+ &dwMethDeclFlags,
+ &dwMethNullDeclFlags,
+ &fHasSuppressUnmanagedCodeAccessAttr);
+ if (FAILED(hr))
+ COMPlusThrowHR(hr);
+
+ // We only care about runtime actions, here.
+ // Don't add security interceptors for anything else!
+ dwMethDeclFlags &= DECLSEC_RUNTIME_ACTIONS;
+ dwMethNullDeclFlags &= DECLSEC_RUNTIME_ACTIONS;
+ }
+
+ MethodTable *pMT = GetMethodTable();
+ if (!pMT->IsNoSecurityProperties())
+ {
+ PSecurityProperties pSecurityProperties = pMT->GetClass()->GetSecurityProperties();
+ _ASSERTE(pSecurityProperties);
+
+ dwClassDeclFlags = pSecurityProperties->GetRuntimeActions();
+ dwClassNullDeclFlags= pSecurityProperties->GetNullRuntimeActions();
+ }
+ else
+ {
+ _ASSERTE( pMT->GetClass()->GetSecurityProperties() == NULL ||
+ ( pMT->GetClass()->GetSecurityProperties()->GetRuntimeActions() == 0
+ && pMT->GetClass()->GetSecurityProperties()->GetNullRuntimeActions() == 0 ) );
+ }
+
+
+ // Build up a set of flags to indicate the actions, if any,
+ // for which we will need to set up an interceptor.
+
+ // Add up the total runtime declarative actions so far.
+ DWORD dwSecurityFlags = dwMethDeclFlags | dwClassDeclFlags;
+
+ // Add in a declarative demand for NDirect.
+ // If this demand has been overridden by a declarative check
+ // on a class or method, then the bit won't change. If it's
+ // overridden by an empty check, then it will be reset by the
+ // subtraction logic below.
+ if (IsNDirect())
+ {
+ dwSecurityFlags |= DECLSEC_UNMNGD_ACCESS_DEMAND;
+ }
+
+ if (dwSecurityFlags)
+ {
+ // If we've found any declarative actions at this point,
+ // try to subtract any actions that are empty.
+
+ // Subtract out any empty declarative actions on the method.
+ dwSecurityFlags &= ~dwMethNullDeclFlags;
+
+ // Finally subtract out any empty declarative actions on the class,
+ // but only those actions that are not also declared by the method.
+ dwSecurityFlags &= ~(dwClassNullDeclFlags & ~dwMethDeclFlags);
+ }
+
+ return dwSecurityFlags;
+}
+
+//*******************************************************************************
+DWORD MethodDesc::GetSecurityFlagsDuringClassLoad(IMDInternalImport *pInternalImport,
+ mdToken tkMethod,
+ mdToken tkClass,
+ DWORD *pdwClassDeclFlags,
+ DWORD *pdwClassNullDeclFlags,
+ DWORD *pdwMethDeclFlags,
+ DWORD *pdwMethNullDeclFlags)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END
+
+ HRESULT hr;
+
+ hr = Security::GetDeclarationFlags(pInternalImport,
+ tkMethod,
+ pdwMethDeclFlags,
+ pdwMethNullDeclFlags);
+ if (FAILED(hr))
+ COMPlusThrowHR(hr);
+
+
+ if (!IsNilToken(tkClass) && (*pdwClassDeclFlags == 0xffffffff || *pdwClassNullDeclFlags == 0xffffffff))
+ {
+ hr = Security::GetDeclarationFlags(pInternalImport,
+ tkClass,
+ pdwClassDeclFlags,
+ pdwClassNullDeclFlags);
+ if (FAILED(hr))
+ COMPlusThrowHR(hr);
+
+ }
+
+ // Build up a set of flags to indicate the actions, if any,
+ // for which we will need to set up an interceptor.
+
+ // Add up the total runtime declarative actions so far.
+ DWORD dwSecurityFlags = *pdwMethDeclFlags | *pdwClassDeclFlags;
+
+ // Add in a declarative demand for NDirect.
+ // If this demand has been overridden by a declarative check
+ // on a class or method, then the bit won't change. If it's
+ // overridden by an empty check, then it will be reset by the
+ // subtraction logic below.
+ if (IsNDirect())
+ {
+ dwSecurityFlags |= DECLSEC_UNMNGD_ACCESS_DEMAND;
+ }
+
+ if (dwSecurityFlags)
+ {
+ // If we've found any declarative actions at this point,
+ // try to subtract any actions that are empty.
+
+ // Subtract out any empty declarative actions on the method.
+ dwSecurityFlags &= ~*pdwMethNullDeclFlags;
+
+ // Finally subtract out any empty declarative actions on the class,
+ // but only those actions that are not also declared by the method.
+ dwSecurityFlags &= ~(*pdwClassNullDeclFlags & ~*pdwMethDeclFlags);
+ }
+
+ return dwSecurityFlags;
+}
+
+//*******************************************************************************
+Dictionary* MethodDesc::GetMethodDictionary()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return
+ (GetClassification() == mcInstantiated)
+ ? (Dictionary*) (AsInstantiatedMethodDesc()->IMD_GetMethodDictionary())
+ : NULL;
+}
+
+//*******************************************************************************
+DictionaryLayout* MethodDesc::GetDictionaryLayout()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return
+ ((GetClassification() == mcInstantiated) && !IsUnboxingStub())
+ ? AsInstantiatedMethodDesc()->IMD_GetDictionaryLayout()
+ : NULL;
+}
+
+#endif // !DACCESS_COMPILE
+
+//*******************************************************************************
+MethodImpl *MethodDesc::GetMethodImpl()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ PRECONDITION(HasMethodImplSlot());
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ SIZE_T size = s_ClassificationSizeTable[m_wFlags & (mdcClassification | mdcHasNonVtableSlot)];
+
+ return PTR_MethodImpl(dac_cast<TADDR>(this) + size);
+}
+
+#ifndef DACCESS_COMPILE
+
+//*******************************************************************************
+BOOL MethodDesc::RequiresMethodDescCallingConvention(BOOL fEstimateForChunk /*=FALSE*/)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Interop marshaling is implemented using shared stubs
+ if (IsNDirect() || IsComPlusCall() || IsGenericComPlusCall())
+ return TRUE;
+
+#ifdef FEATURE_REMOTING
+ MethodTable * pMT = GetMethodTable();
+
+ if (fEstimateForChunk)
+ {
+ // Make a best guess based on the method table of the chunk.
+ if (pMT->IsInterface())
+ return TRUE;
+ }
+ else
+ {
+ // CRemotingServices::GetDispatchInterfaceHelper that needs method desc
+ if (pMT->IsInterface() && !IsStatic())
+ return TRUE;
+
+ // Asynchronous delegate methods are forwarded to shared TP stub
+ if (IsEEImpl())
+ {
+ DelegateEEClass *pClass = (DelegateEEClass*)(pMT->GetClass());
+
+ if (this != pClass->m_pInvokeMethod)
+ return TRUE;
+ }
+ }
+#endif // FEATURE_REMOTING
+
+ return FALSE;
+}
+
+//*******************************************************************************
+BOOL MethodDesc::RequiresStableEntryPoint(BOOL fEstimateForChunk /*=FALSE*/)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Create precodes for edit and continue to make methods updateable
+ if (IsEnCMethod() || IsEnCAddedMethod())
+ return TRUE;
+
+ // Precreate precodes for LCG methods so we do not leak memory when the method descs are recycled
+ if (IsLCGMethod())
+ return TRUE;
+
+ if (fEstimateForChunk)
+ {
+ // Make a best guess based on the method table of the chunk.
+ if (IsInterface())
+ return TRUE;
+ }
+ else
+ {
+ // Wrapper stubs are stored in generic dictionary that's not backpatched
+ if (IsWrapperStub())
+ return TRUE;
+
+ // TODO: Can we avoid early allocation of precodes for interfaces and cominterop?
+ if ((IsInterface() && !IsStatic()) || IsComPlusCall())
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+//*******************************************************************************
+BOOL MethodDesc::IsClassConstructorTriggeredViaPrestub()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // FCalls do not need cctor triggers
+ if (IsFCall())
+ return FALSE;
+
+ // NGened code has explicit cctor triggers
+ if (IsZapped())
+ return FALSE;
+
+ // Domain neutral code has explicit cctor triggers
+ if (IsDomainNeutral())
+ return FALSE;
+
+ MethodTable * pMT = GetMethodTable();
+
+ // Shared generic code has explicit cctor triggers
+ if (pMT->IsSharedByGenericInstantiations())
+ return FALSE;
+
+ bool fRunBeforeFieldInitCctorsLazily = true;
+
+ // Always run beforefieldinit cctors lazily for optimized code. Running cctors lazily should be good for perf.
+ // Variability between optimized and non-optimized code should reduce chance of people taking dependencies
+ // on exact beforefieldinit cctors timing.
+ if (fRunBeforeFieldInitCctorsLazily && pMT->GetClass()->IsBeforeFieldInit() && !CORDisableJITOptimizations(pMT->GetModule()->GetDebuggerInfoBits()))
+ return FALSE;
+
+ // To preserve consistent behavior between ngen and not-ngenned states, always
+ // run class constructors lazily for autongennable code.
+ if (pMT->RunCCTorAsIfNGenImageExists())
+ return FALSE;
+
+ return TRUE;
+}
+
+#endif // !DACCESS_COMPILE
+
+
+#ifdef FEATURE_REMOTING
+
+//*******************************************************************************
+BOOL MethodDesc::MayBeRemotingIntercepted()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (IsStatic())
+ return FALSE;
+
+ MethodTable *pMT = GetMethodTable();
+
+ if (pMT->IsMarshaledByRef())
+ return TRUE;
+
+ if (g_pObjectClass == pMT)
+ {
+ if ((this == g_pObjectCtorMD) || (this == g_pObjectFinalizerMD))
+ return FALSE;
+
+ // Make sure that the above check worked well
+ _ASSERTE(this->GetSlot() != g_pObjectCtorMD->GetSlot());
+ _ASSERTE(this->GetSlot() != g_pObjectFinalizerMD->GetSlot());
+
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+//*******************************************************************************
+BOOL MethodDesc::IsRemotingInterceptedViaPrestub()
+{
+ WRAPPER_NO_CONTRACT;
+ // We do not insert a remoting stub around the shared code method descriptor
+ // for instantiated generic methods, i.e. anything which requires a hidden
+ // instantiation argument. Instead we insert it around the instantiating stubs
+ // and ensure that we call the instantiating stubs directly.
+ return MayBeRemotingIntercepted() && !IsVtableMethod() && !RequiresInstArg();
+}
+
+//*******************************************************************************
+BOOL MethodDesc::IsRemotingInterceptedViaVirtualDispatch()
+{
+ WRAPPER_NO_CONTRACT;
+ return MayBeRemotingIntercepted() && IsVtableMethod();
+}
+
+#endif // FEATURE_REMOTING
+
+//*******************************************************************************
+BOOL MethodDesc::MayHaveNativeCode()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(IsRestored_NoLogging());
+ }
+ CONTRACTL_END
+
+ // This code flow of this method should roughly match the code flow of MethodDesc::DoPrestub.
+
+ switch (GetClassification())
+ {
+ case mcIL: // IsIL() case. Handled below.
+ break;
+ case mcFCall: // FCalls do not have real native code.
+ return FALSE;
+ case mcNDirect: // NDirect never have native code (note that the NDirect method
+ return FALSE; // does not appear as having a native code even for stubs as IL)
+ case mcEEImpl: // Runtime provided implementation. No native code.
+ return FALSE;
+ case mcArray: // Runtime provided implementation. No native code.
+ return FALSE;
+ case mcInstantiated: // IsIL() case. Handled below.
+ break;
+#ifdef FEATURE_COMINTEROP
+ case mcComInterop: // Generated stub. No native code.
+ return FALSE;
+#endif // FEATURE_COMINTEROP
+ case mcDynamic: // LCG or stub-as-il.
+ return TRUE;
+ default:
+ _ASSERTE(!"Unknown classification");
+ }
+
+ _ASSERTE(IsIL());
+
+ if ((IsInterface() && !IsStatic()) || IsWrapperStub() || ContainsGenericVariables() || IsAbstract())
+ {
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+//*******************************************************************************
+void MethodDesc::Save(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Make sure that the transparency is cached in the NGen image
+ Security::IsMethodTransparent(this);
+
+ // Initialize the DoesNotHaveEquivalentValuetypeParameters flag.
+ // If we fail to determine whether there is a type-equivalent struct parameter (eg. because there is a struct parameter
+ // defined in a missing dependency), then just continue. The reason we run this method is to initialize a flag that is
+ // only an optimization in any case, so it doesn't really matter if it fails.
+ EX_TRY
+ {
+ HasTypeEquivalentStructParameters();
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ _ASSERTE(image->GetModule()->GetAssembly() ==
+ GetAppDomain()->ToCompilationDomain()->GetTargetAssembly());
+
+#ifdef _DEBUG
+ SString s;
+ if (LoggingOn(LF_ZAP, LL_INFO10000))
+ {
+ TypeString::AppendMethodDebug(s, this);
+ LOG((LF_ZAP, LL_INFO10000, " MethodDesc::Save %S (%p)\n", s.GetUnicode(), this));
+ }
+
+ if (m_pszDebugMethodName && !image->IsStored((void*) m_pszDebugMethodName))
+ image->StoreStructure((void *) m_pszDebugMethodName,
+ (ULONG)(strlen(m_pszDebugMethodName) + 1),
+ DataImage::ITEM_DEBUG,
+ 1);
+ if (m_pszDebugClassName && !image->IsStored(m_pszDebugClassName))
+ image->StoreStructure((void *) m_pszDebugClassName,
+ (ULONG)(strlen(m_pszDebugClassName) + 1),
+ DataImage::ITEM_DEBUG,
+ 1);
+ if (m_pszDebugMethodSignature && !image->IsStored(m_pszDebugMethodSignature))
+ image->StoreStructure((void *) m_pszDebugMethodSignature,
+ (ULONG)(strlen(m_pszDebugMethodSignature) + 1),
+ DataImage::ITEM_DEBUG,
+ 1);
+#endif // _DEBUG
+
+ if (IsMethodImpl())
+ {
+ MethodImpl *pImpl = GetMethodImpl();
+
+ pImpl->Save(image);
+ }
+
+ if (IsNDirect())
+ {
+ EX_TRY
+ {
+ PInvokeStaticSigInfo sigInfo;
+ NDirect::PopulateNDirectMethodDesc((NDirectMethodDesc*)this, &sigInfo);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+ }
+
+ if (HasStoredSig())
+ {
+ StoredSigMethodDesc *pNewSMD = (StoredSigMethodDesc*) this;
+
+ if (pNewSMD->HasStoredMethodSig())
+ {
+ if (!image->IsStored((void *) pNewSMD->m_pSig))
+ {
+ // Store signatures that doesn't need restore into a read only section.
+ DataImage::ItemKind sigItemKind = DataImage::ITEM_STORED_METHOD_SIG_READONLY;
+ // Place the signatures for stubs-as-il into hot/cold or writeable section
+ // here since Module::Arrange won’t place them for us.
+ if (IsILStub())
+ {
+ PTR_DynamicMethodDesc pDynamicMD = AsDynamicMethodDesc();
+ // Forward PInvoke never touches the signature at runtime, only reverse pinvoke does.
+ if (pDynamicMD->IsReverseStub())
+ {
+ sigItemKind = DataImage::ITEM_STORED_METHOD_SIG_READONLY_WARM;
+ }
+
+ if (FixupSignatureContainingInternalTypes(image,
+ (PCCOR_SIGNATURE)pNewSMD->m_pSig,
+ pNewSMD->m_cSig,
+ true /*checkOnly if we will need to restore the signature without doing fixup*/))
+ {
+ sigItemKind = DataImage::ITEM_STORED_METHOD_SIG;
+ }
+ }
+
+ image->StoreInternedStructure((void *) pNewSMD->m_pSig,
+ pNewSMD->m_cSig,
+ sigItemKind,
+ 1);
+ }
+ }
+ }
+
+ if (GetMethodDictionary())
+ {
+ DWORD cBytes = DictionaryLayout::GetFirstDictionaryBucketSize(GetNumGenericMethodArgs(), GetDictionaryLayout());
+ void* pBytes = GetMethodDictionary()->AsPtr();
+
+ LOG((LF_ZAP, LL_INFO10000, " MethodDesc::Save dictionary size %d\n", cBytes));
+ image->StoreStructure(pBytes, cBytes,
+ DataImage::ITEM_DICTIONARY_WRITEABLE);
+ }
+
+ if (HasMethodInstantiation())
+ {
+ InstantiatedMethodDesc* pIMD = AsInstantiatedMethodDesc();
+ if (pIMD->IMD_IsSharedByGenericMethodInstantiations() && pIMD->m_pDictLayout != NULL)
+ {
+ pIMD->m_pDictLayout->Save(image);
+ }
+ }
+ if (IsNDirect())
+ {
+ NDirectMethodDesc *pNMD = (NDirectMethodDesc *)this;
+
+ // Make sure that the marshaling required flag is computed
+ pNMD->MarshalingRequired();
+
+#ifndef FEATURE_CORECLR
+ if (!pNMD->IsQCall())
+ {
+ //Cache DefaultImportDllImportSearchPaths attribute.
+ pNMD->HasDefaultDllImportSearchPathsAttribute();
+ }
+#endif
+
+ image->StoreStructure(pNMD->GetWriteableData(),
+ sizeof(NDirectWriteableData),
+ DataImage::ITEM_METHOD_DESC_COLD_WRITEABLE);
+
+#ifdef HAS_NDIRECT_IMPORT_PRECODE
+ if (!pNMD->MarshalingRequired())
+ {
+ // import thunk is only needed if the P/Invoke is inlinable
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+ image->SavePrecode(pNMD->GetNDirectImportThunkGlue(), pNMD, PRECODE_NDIRECT_IMPORT, DataImage::ITEM_METHOD_PRECODE_COLD);
+#else
+ image->StoreStructure(pNMD->GetNDirectImportThunkGlue(), sizeof(NDirectImportThunkGlue), DataImage::ITEM_METHOD_PRECODE_COLD);
+#endif
+ }
+#endif
+
+ if (pNMD->IsQCall())
+ {
+ // Make sure QCall id is cached
+ ECall::GetQCallImpl(this);
+ _ASSERTE(pNMD->GetECallID() != 0);
+ }
+ else
+ {
+ LPCUTF8 pszLibName = pNMD->GetLibName();
+ if (pszLibName && !image->IsStored(pszLibName))
+ {
+ image->StoreStructure(pszLibName,
+ (ULONG)strlen(pszLibName) + 1,
+ DataImage::ITEM_STORED_METHOD_NAME,
+ 1);
+ }
+
+ LPCUTF8 pszEntrypointName = pNMD->GetEntrypointName();
+ if (pszEntrypointName != NULL && !image->IsStored(pszEntrypointName))
+ {
+ image->StoreStructure(pszEntrypointName,
+ (ULONG)strlen(pszEntrypointName) + 1,
+ DataImage::ITEM_STORED_METHOD_NAME,
+ 1);
+ }
+ }
+ }
+
+ // ContainsGenericVariables() check is required to support generic FCalls
+ // (only instance methods on generic types constrained to "class" are allowed)
+ if(!IsUnboxingStub() && IsFCall() && !GetMethodTable()->ContainsGenericVariables())
+ {
+ // Make sure that ECall::GetFCallImpl is called for all methods. It has the
+ // side effect of adding the methoddesc to the reverse fcall hash table.
+ // MethodDesc::Save would eventually return to Module::Save which is where
+ // we would save the reverse fcall table also. Thus this call is effectively populating
+ // that reverse fcall table.
+
+ ECall::GetFCallImpl(this);
+ }
+
+ if (IsDynamicMethod())
+ {
+ DynamicMethodDesc *pDynMeth = AsDynamicMethodDesc();
+ if (pDynMeth->m_pszMethodName && !image->IsStored(pDynMeth->m_pszMethodName))
+ image->StoreStructure((void *) pDynMeth->m_pszMethodName,
+ (ULONG)(strlen(pDynMeth->m_pszMethodName) + 1),
+ DataImage::ITEM_STORED_METHOD_NAME,
+ 1);
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (IsComPlusCall())
+ {
+ ComPlusCallMethodDesc *pCMD = (ComPlusCallMethodDesc *)this;
+ ComPlusCallInfo *pComInfo = pCMD->m_pComPlusCallInfo;
+
+ if (pComInfo != NULL && pComInfo->ShouldSave(image))
+ {
+ image->StoreStructure(pCMD->m_pComPlusCallInfo,
+ sizeof(ComPlusCallInfo),
+ DataImage::ITEM_METHOD_DESC_COLD_WRITEABLE);
+ }
+ }
+#endif // FEATURE_COMINTEROP
+
+ LOG((LF_ZAP, LL_INFO10000, " MethodDesc::Save %S (%p) complete\n", s.GetUnicode(), this));
+
+}
+
+//*******************************************************************************
+bool MethodDesc::CanSkipDoPrestub (
+ MethodDesc * callerMD,
+ CorInfoIndirectCallReason *pReason,
+ CORINFO_ACCESS_FLAGS accessFlags/*=CORINFO_ACCESS_ANY*/)
+{
+ STANDARD_VM_CONTRACT;
+
+ CorInfoIndirectCallReason dummy;
+ if (pReason == NULL)
+ pReason = &dummy;
+ *pReason = CORINFO_INDIRECT_CALL_UNKNOWN;
+
+ // Only IL can be called directly
+ if (!IsIL())
+ {
+ // Pretend that IL stubs can be called directly. It allows us to not have
+ // useless precode for IL stubs
+ if (IsILStub())
+ return true;
+
+ if (IsNDirect())
+ {
+ *pReason = CORINFO_INDIRECT_CALL_PINVOKE;
+ return false;
+ }
+
+ *pReason = CORINFO_INDIRECT_CALL_EXOTIC;
+ return false;
+ }
+
+ // @todo generics: Until we fix the RVA map in zapper.cpp to be instantiation-aware, this must remain
+ CheckRestore();
+
+ // The remoting interception is not necessary if we are calling on the same thisptr
+ if (!(accessFlags & CORINFO_ACCESS_THIS) && IsRemotingInterceptedViaPrestub())
+ {
+ *pReason = CORINFO_INDIRECT_CALL_REMOTING;
+ return false;
+ }
+
+ // The wrapper stubs cannot be called directly (like any other stubs)
+ if (IsWrapperStub())
+ {
+ *pReason = CORINFO_INDIRECT_CALL_STUB;
+ return false;
+ }
+
+ // Can't hard bind to a method which contains one or more Constrained Execution Region roots (we need to force the prestub to
+ // execute for such methods).
+ if (ContainsPrePreparableCerRoot(this))
+ {
+ *pReason = CORINFO_INDIRECT_CALL_CER;
+ return false;
+ }
+
+ // Check whether our methoddesc needs restore
+ if (NeedsRestore(GetAppDomain()->ToCompilationDomain()->GetTargetImage(), TRUE))
+ {
+ // The speculative method instantiations are restored by the time we call them via indirection.
+ if (!IsTightlyBoundToMethodTable() &&
+ GetLoaderModule() != Module::GetPreferredZapModuleForMethodDesc(this))
+ {
+ // We should only take this codepath to determine whether method needs prestub.
+ // Cross module calls should be filtered out by CanEmbedMethodHandle earlier.
+ _ASSERTE(GetLoaderModule() == GetAppDomain()->ToCompilationDomain()->GetTargetModule());
+
+ return true;
+ }
+
+ *pReason = CORINFO_INDIRECT_CALL_RESTORE_METHOD;
+ return false;
+ }
+
+ /////////////////////////////////////////////////////////////////////////////////
+ // The method looks OK. Check class restore.
+ MethodTable * calleeMT = GetMethodTable();
+
+ // If no need for restore, we can call direct.
+ if (!calleeMT->NeedsRestore(GetAppDomain()->ToCompilationDomain()->GetTargetImage()))
+ return true;
+
+ // We will override this with more specific reason if we find one
+ *pReason = CORINFO_INDIRECT_CALL_RESTORE;
+
+ /////////////////////////////////////////////////////////////////////////////////
+ // Try to prove that we have done the restore already.
+
+ // If we're calling the same class, we can assume already initialized.
+ if (callerMD != NULL)
+ {
+ MethodTable * callerMT = callerMD->GetMethodTable();
+ if (calleeMT == callerMT)
+ return true;
+ }
+
+ // If we are called on non-NULL this pointer, we can assume that class is initialized.
+ if (accessFlags & CORINFO_ACCESS_NONNULL)
+ {
+ // Static methods may be first time call on the class
+ if (IsStatic())
+ {
+ *pReason = CORINFO_INDIRECT_CALL_RESTORE_FIRST_CALL;
+ }
+ else
+ // In some cases, instance value type methods may be called before an instance initializer
+ if (calleeMT->IsValueType())
+ {
+ *pReason = CORINFO_INDIRECT_CALL_RESTORE_VALUE_TYPE;
+ }
+ else
+ {
+ // Otherwise, we conclude that there must have been at least one call on the class already.
+ return true;
+ }
+ }
+
+ // If child calls its parent class, we can assume already restored.
+ if (callerMD != NULL)
+ {
+ MethodTable * parentMT = callerMD->GetMethodTable()->GetParentMethodTable();
+ while (parentMT != NULL)
+ {
+ if (calleeMT == parentMT)
+ return true;
+ parentMT = parentMT->GetParentMethodTable();
+ }
+ }
+
+ // The speculative method table instantiations are restored by the time we call methods on them via indirection.
+ if (IsTightlyBoundToMethodTable() &&
+ calleeMT->GetLoaderModule() != Module::GetPreferredZapModuleForMethodTable(calleeMT))
+ {
+ // We should only take this codepath to determine whether method needs prestub.
+ // Cross module calls should be filtered out by CanEmbedMethodHandle earlier.
+ _ASSERTE(calleeMT->GetLoaderModule() == GetAppDomain()->ToCompilationDomain()->GetTargetModule());
+
+ return true;
+ }
+
+ // Note: Reason for restore has been initialized earlier
+ return false;
+}
+
+//*******************************************************************************
+BOOL MethodDesc::ComputeNeedsRestore(DataImage *image, TypeHandleList *pVisited, BOOL fAssumeMethodTableRestored/*=FALSE*/)
+{
+ STATIC_STANDARD_VM_CONTRACT;
+
+ _ASSERTE(GetAppDomain()->IsCompilationDomain());
+
+ MethodTable * pMT = GetMethodTable();
+
+ if (!IsTightlyBoundToMethodTable())
+ {
+ if (!image->CanEagerBindToMethodTable(pMT))
+ return TRUE;
+ }
+
+ if (!fAssumeMethodTableRestored)
+ {
+ if (pMT->ComputeNeedsRestore(image, pVisited))
+ return TRUE;
+ }
+
+ if (GetClassification() == mcInstantiated)
+ {
+ InstantiatedMethodDesc* pIMD = AsInstantiatedMethodDesc();
+
+ if (pIMD->IMD_IsWrapperStubWithInstantiations())
+ {
+ if (!image->CanPrerestoreEagerBindToMethodDesc(pIMD->m_pWrappedMethodDesc.GetValue(), pVisited))
+ return TRUE;
+
+ if (!image->CanHardBindToZapModule(pIMD->m_pWrappedMethodDesc.GetValue()->GetLoaderModule()))
+ return TRUE;
+ }
+
+ if (GetMethodDictionary())
+ {
+ if (GetMethodDictionary()->ComputeNeedsRestore(image, pVisited, GetNumGenericMethodArgs()))
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Fixes up ET_INTERNAL TypeHandles in an IL stub signature. If at least one type is fixed up
+// marks the signature as "needs restore". Also handles probing through generic instantiations
+// to find ET_INTERNAL TypeHandles used as the generic type or its parameters.
+//
+// This function will parse one type and expects psig to be pointing to the element type. If
+// the type is a generic instantiation, we will recursively parse it.
+//
+bool
+FixupSignatureContainingInternalTypesParseType(
+ DataImage * image,
+ PCCOR_SIGNATURE pOriginalSig,
+ SigPointer & psig,
+ bool checkOnly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ SigPointer sigOrig = psig;
+
+ CorElementType eType;
+ IfFailThrow(psig.GetElemType(&eType));
+
+ switch (eType)
+ {
+ case ELEMENT_TYPE_INTERNAL:
+ {
+ TypeHandle * pTypeHandle = (TypeHandle *)psig.GetPtr();
+
+ void * ptr;
+ IfFailThrow(psig.GetPointer(&ptr));
+
+ if (!checkOnly)
+ {
+ // Always force creation of fixup to avoid unaligned relocation entries. Unaligned
+ // relocations entries are perf hit for ASLR, and they even disable ASLR on ARM.
+ image->FixupTypeHandlePointerInPlace((BYTE *)pOriginalSig, (BYTE *)pTypeHandle - (BYTE *)pOriginalSig, TRUE);
+
+ // mark the signature so we know we'll need to restore it
+ BYTE *pImageSig = (BYTE *)image->GetImagePointer((PVOID)pOriginalSig);
+ *pImageSig |= IMAGE_CEE_CS_CALLCONV_NEEDSRESTORE;
+ }
+ }
+ return true;
+
+ case ELEMENT_TYPE_GENERICINST:
+ {
+ bool needsRestore = FixupSignatureContainingInternalTypesParseType(image, pOriginalSig, psig, checkOnly);
+
+ // Get generic arg count
+ ULONG nArgs;
+ IfFailThrow(psig.GetData(&nArgs));
+
+ for (ULONG i = 0; i < nArgs; i++)
+ {
+ if (FixupSignatureContainingInternalTypesParseType(image, pOriginalSig, psig, checkOnly))
+ {
+ needsRestore = true;
+ }
+ }
+
+ // Return. We don't want to call psig.SkipExactlyOne in this case since we've manually
+ // parsed through the generic inst type.
+ return needsRestore;
+ }
+
+ case ELEMENT_TYPE_BYREF:
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_PINNED:
+ case ELEMENT_TYPE_SZARRAY:
+ // Call recursively
+ return FixupSignatureContainingInternalTypesParseType(image, pOriginalSig, psig, checkOnly);
+
+ default:
+ IfFailThrow(sigOrig.SkipExactlyOne());
+ psig = sigOrig;
+ break;
+ }
+
+ return false;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Fixes up ET_INTERNAL TypeHandles in an IL stub signature. If at least one type is fixed up
+// marks the signature as "needs restore".
+//
+bool
+FixupSignatureContainingInternalTypes(
+ DataImage * image,
+ PCCOR_SIGNATURE pSig,
+ DWORD cSig,
+ bool checkOnly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ ULONG nArgs;
+ bool needsRestore = false;
+
+ SigPointer psig(pSig, cSig);
+
+ // Skip calling convention
+ BYTE uCallConv;
+ IfFailThrow(psig.GetByte(&uCallConv));
+
+ if ((uCallConv & IMAGE_CEE_CS_CALLCONV_MASK) == IMAGE_CEE_CS_CALLCONV_FIELD)
+ {
+ ThrowHR(META_E_BAD_SIGNATURE);
+ }
+
+ // Skip type parameter count
+ if (uCallConv & IMAGE_CEE_CS_CALLCONV_GENERIC)
+ {
+ IfFailThrow(psig.GetData(NULL));
+ }
+
+ // Get arg count
+ IfFailThrow(psig.GetData(&nArgs));
+
+ nArgs++; // be sure to handle the return type
+
+ for (ULONG i = 0; i < nArgs; i++)
+ {
+ if (FixupSignatureContainingInternalTypesParseType(image, pSig, psig, checkOnly))
+ {
+ needsRestore = true;
+ }
+ }
+ return needsRestore;
+} // FixupSignatureContainingInternalTypes
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+#ifdef FEATURE_PREJIT
+//---------------------------------------------------------------------------------------
+//
+// Restores ET_INTERNAL TypeHandles in an IL stub signature.
+// This function will parse one type and expects psig to be pointing to the element type. If
+// the type is a generic instantiation, we will recursively parse it.
+//
+void
+RestoreSignatureContainingInternalTypesParseType(
+ SigPointer & psig)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ SigPointer sigOrig = psig;
+
+ CorElementType eType;
+ IfFailThrow(psig.GetElemType(&eType));
+
+ switch (eType)
+ {
+ case ELEMENT_TYPE_INTERNAL:
+ {
+ TypeHandle * pTypeHandle = (TypeHandle *)psig.GetPtr();
+
+ void * ptr;
+ IfFailThrow(psig.GetPointer(&ptr));
+
+ Module::RestoreTypeHandlePointerRaw(pTypeHandle);
+ }
+ break;
+
+ case ELEMENT_TYPE_GENERICINST:
+ {
+ RestoreSignatureContainingInternalTypesParseType(psig);
+
+ // Get generic arg count
+ ULONG nArgs;
+ IfFailThrow(psig.GetData(&nArgs));
+
+ for (ULONG i = 0; i < nArgs; i++)
+ {
+ RestoreSignatureContainingInternalTypesParseType(psig);
+ }
+ }
+ break;
+
+ case ELEMENT_TYPE_BYREF:
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_PINNED:
+ case ELEMENT_TYPE_SZARRAY:
+ // Call recursively
+ RestoreSignatureContainingInternalTypesParseType(psig);
+ break;
+
+ default:
+ IfFailThrow(sigOrig.SkipExactlyOne());
+ psig = sigOrig;
+ break;
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Restores ET_INTERNAL TypeHandles in an IL stub signature.
+//
+static
+void
+RestoreSignatureContainingInternalTypes(
+ PCCOR_SIGNATURE pSig,
+ DWORD cSig)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ Volatile<BYTE> * pVolatileSig = (Volatile<BYTE> *)pSig;
+ if (*pVolatileSig & IMAGE_CEE_CS_CALLCONV_NEEDSRESTORE)
+ {
+ EnsureWritablePages(dac_cast<void*>(pSig), cSig);
+
+ ULONG nArgs;
+ SigPointer psig(pSig, cSig);
+
+ // Skip calling convention
+ BYTE uCallConv;
+ IfFailThrow(psig.GetByte(&uCallConv));
+
+ if ((uCallConv & IMAGE_CEE_CS_CALLCONV_MASK) == IMAGE_CEE_CS_CALLCONV_FIELD)
+ {
+ ThrowHR(META_E_BAD_SIGNATURE);
+ }
+
+ // Skip type parameter count
+ if (uCallConv & IMAGE_CEE_CS_CALLCONV_GENERIC)
+ {
+ IfFailThrow(psig.GetData(NULL));
+ }
+
+ // Get arg count
+ IfFailThrow(psig.GetData(&nArgs));
+
+ nArgs++; // be sure to handle the return type
+
+ for (ULONG i = 0; i < nArgs; i++)
+ {
+ RestoreSignatureContainingInternalTypesParseType(psig);
+ }
+
+ // clear the needs-restore bit
+ *pVolatileSig &= (BYTE)~IMAGE_CEE_CS_CALLCONV_NEEDSRESTORE;
+ }
+} // RestoreSignatureContainingInternalTypes
+
+void DynamicMethodDesc::Restore()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ if (IsSignatureNeedsRestore())
+ {
+ _ASSERTE(IsILStub());
+
+ DWORD cSigLen;
+ PCCOR_SIGNATURE pSig = GetStoredMethodSig(&cSigLen);
+
+ RestoreSignatureContainingInternalTypes(pSig, cSigLen);
+ }
+}
+#endif // FEATURE_PREJIT
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+void DynamicMethodDesc::Fixup(DataImage* image)
+{
+ STANDARD_VM_CONTRACT;
+
+ DWORD cSigLen;
+ PCCOR_SIGNATURE pSig = GetStoredMethodSig(&cSigLen);
+
+ bool needsRestore = FixupSignatureContainingInternalTypes(image, pSig, cSigLen);
+
+ DynamicMethodDesc* pDynamicImageMD = (DynamicMethodDesc*)image->GetImagePointer(this);
+ pDynamicImageMD->SetSignatureNeedsRestore(needsRestore);
+}
+
+//---------------------------------------------------------------------------------------
+//
+void
+MethodDesc::Fixup(
+ DataImage * image)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef _DEBUG
+ SString s;
+ if (LoggingOn(LF_ZAP, LL_INFO10000))
+ {
+ TypeString::AppendMethodDebug(s, this);
+ LOG((LF_ZAP, LL_INFO10000, " MethodDesc::Fixup %S (%p)\n", s.GetUnicode(), this));
+ }
+#endif // _DEBUG
+
+#ifdef HAVE_GCCOVER
+ image->ZeroPointerField(this, offsetof(MethodDesc, m_GcCover));
+#endif // HAVE_GCCOVER
+
+#if _DEBUG
+ image->ZeroPointerField(this, offsetof(MethodDesc, m_pszDebugMethodName));
+ image->FixupPointerField(this, offsetof(MethodDesc, m_pszDebugMethodName));
+ image->FixupPointerField(this, offsetof(MethodDesc, m_pszDebugClassName));
+ image->FixupPointerField(this, offsetof(MethodDesc, m_pszDebugMethodSignature));
+ if (IsTightlyBoundToMethodTable())
+ {
+ image->FixupPointerField(this, offsetof(MethodDesc, m_pDebugMethodTable));
+ }
+ else
+ {
+ image->FixupMethodTablePointer(this, &m_pDebugMethodTable);
+ }
+#endif // _DEBUG
+
+ MethodDesc *pNewMD = (MethodDesc*) image->GetImagePointer(this);
+ PREFIX_ASSUME(pNewMD != NULL);
+
+ // Fixup the chunk header as part of the first MethodDesc in the chunk
+ if (pNewMD->m_chunkIndex == 0)
+ {
+ MethodDescChunk * pNewChunk = pNewMD->GetMethodDescChunk();
+
+ // For most MethodDescs we can always directly bind to the method table, because
+ // the MT is guaranteed to be in the same image. In other words the MethodDescs and the
+ // MethodTable are guaranteed to be "tightly-bound", i.e. if one is present in
+ // an NGEN image then then other will be, and if one is used at runtime then
+ // the other will be too. In these cases we always want to hardbind the pointer.
+ //
+ // However for generic method instantiations and other funky MDs managed by the InstMethHashTable
+ // the method table might be saved another module. Whether these get "used" at runtime
+ // is a decision taken by the MethodDesc loading code in genmeth.cpp (FindOrCreateAssociatedMethodDesc),
+ // and is independent of the decision of whether the method table gets used.
+
+ if (IsTightlyBoundToMethodTable())
+ {
+ image->FixupRelativePointerField(pNewChunk, offsetof(MethodDescChunk, m_methodTable));
+ }
+ else
+ {
+ image->FixupMethodTablePointer(pNewChunk, &pNewChunk->m_methodTable);
+ }
+
+ if (!pNewChunk->m_next.IsNull())
+ {
+ image->FixupRelativePointerField(pNewChunk, offsetof(MethodDescChunk, m_next));
+ }
+ }
+
+ if (pNewMD->HasPrecode())
+ {
+ Precode* pPrecode = GetSavedPrecode(image);
+
+ // Fixup the precode if we have stored it
+ pPrecode->Fixup(image, this);
+ }
+
+ if (IsDynamicMethod())
+ {
+ image->ZeroPointerField(this, offsetof(DynamicMethodDesc, m_pResolver));
+ image->FixupPointerField(this, offsetof(DynamicMethodDesc, m_pszMethodName));
+ }
+
+ if (GetClassification() == mcInstantiated)
+ {
+ InstantiatedMethodDesc* pIMD = AsInstantiatedMethodDesc();
+ BOOL needsRestore = NeedsRestore(image);
+
+ if (pIMD->IMD_IsWrapperStubWithInstantiations())
+ {
+ image->FixupMethodDescPointer(pIMD, &pIMD->m_pWrappedMethodDesc);
+ }
+ else
+ {
+ if (pIMD->IMD_IsSharedByGenericMethodInstantiations())
+ {
+ pIMD->m_pDictLayout->Fixup(image, TRUE);
+ image->FixupPointerField(this, offsetof(InstantiatedMethodDesc, m_pDictLayout));
+ }
+ }
+
+ image->FixupPointerField(this, offsetof(InstantiatedMethodDesc, m_pPerInstInfo));
+
+ // Generic methods are dealt with specially to avoid encoding the formal method type parameters
+ if (IsTypicalMethodDefinition())
+ {
+ Instantiation inst = GetMethodInstantiation();
+ FixupPointer<TypeHandle> * pInst = inst.GetRawArgs();
+ for (DWORD j = 0; j < inst.GetNumArgs(); j++)
+ {
+ image->FixupTypeHandlePointer(pInst, &pInst[j]);
+ }
+ }
+ else if (GetMethodDictionary())
+ {
+ LOG((LF_JIT, LL_INFO10000, "GENERICS: Fixup dictionary for MD %s\n",
+ m_pszDebugMethodName ? m_pszDebugMethodName : "<no-name>"));
+ BOOL canSaveInstantiation = TRUE;
+ if (IsGenericMethodDefinition() && !IsTypicalMethodDefinition())
+ {
+ if (GetMethodDictionary()->ComputeNeedsRestore(image, NULL, GetNumGenericMethodArgs()))
+ {
+ _ASSERTE(needsRestore);
+ canSaveInstantiation = FALSE;
+ }
+ else
+ {
+ Instantiation inst = GetMethodInstantiation();
+ FixupPointer<TypeHandle> * pInst = inst.GetRawArgs();
+ for (DWORD j = 0; j < inst.GetNumArgs(); j++)
+ {
+ TypeHandle th = pInst[j].GetValue();
+ if (!th.IsNull())
+ {
+ if (!(image->CanEagerBindToTypeHandle(th) && image->CanHardBindToZapModule(th.GetLoaderModule())))
+ {
+ canSaveInstantiation = FALSE;
+ needsRestore = TRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+ // We can only save the (non-instantiation) slots of
+ // the dictionary if we are compiling against a known and fixed
+ // dictionary layout. That will only be the case if we can hardbind
+ // to the shared method desc (which owns the dictionary layout).
+ // If we are not a wrapper stub then
+ // there won't be any (non-instantiation) slots in the dictionary.
+ BOOL canSaveSlots =
+ pIMD->IMD_IsWrapperStubWithInstantiations() &&
+ image->CanEagerBindToMethodDesc(pIMD->IMD_GetWrappedMethodDesc());
+
+ GetMethodDictionary()->Fixup(image,
+ canSaveInstantiation,
+ canSaveSlots,
+ GetNumGenericMethodArgs(),
+ GetModule(),
+ GetDictionaryLayout());
+ }
+
+ if (needsRestore)
+ {
+ InstantiatedMethodDesc* pNewIMD = (InstantiatedMethodDesc *) image->GetImagePointer(this);
+ if (pNewIMD == NULL)
+ COMPlusThrowHR(E_POINTER);
+
+ pNewIMD->m_wFlags2 |= InstantiatedMethodDesc::Unrestored;
+ }
+ }
+
+ if (IsNDirect())
+ {
+ //
+ // For now, set method desc back into its pristine uninitialized state.
+ //
+
+ NDirectMethodDesc *pNMD = (NDirectMethodDesc *)this;
+
+ image->FixupPointerField(this, offsetof(NDirectMethodDesc, ndirect.m_pWriteableData));
+
+ NDirectWriteableData *pWriteableData = pNMD->GetWriteableData();
+ NDirectImportThunkGlue *pImportThunkGlue = pNMD->GetNDirectImportThunkGlue();
+
+#ifdef HAS_NDIRECT_IMPORT_PRECODE
+ if (!pNMD->MarshalingRequired())
+ {
+ image->FixupField(pWriteableData, offsetof(NDirectWriteableData, m_pNDirectTarget),
+ pImportThunkGlue, Precode::GetEntryPointOffset());
+ }
+ else
+ {
+ image->ZeroPointerField(pWriteableData, offsetof(NDirectWriteableData, m_pNDirectTarget));
+ }
+#else // HAS_NDIRECT_IMPORT_PRECODE
+ PORTABILITY_WARNING("NDirectImportThunkGlue");
+#endif // HAS_NDIRECT_IMPORT_PRECODE
+
+ image->ZeroPointerField(this, offsetof(NDirectMethodDesc, ndirect.m_pNativeNDirectTarget));
+
+#ifdef HAS_NDIRECT_IMPORT_PRECODE
+ if (!pNMD->MarshalingRequired())
+ {
+ // import thunk is only needed if the P/Invoke is inlinable
+ image->FixupPointerField(this, offsetof(NDirectMethodDesc, ndirect.m_pImportThunkGlue));
+ ((Precode*)pImportThunkGlue)->Fixup(image, this);
+ }
+ else
+ {
+ image->ZeroPointerField(this, offsetof(NDirectMethodDesc, ndirect.m_pImportThunkGlue));
+ }
+#else // HAS_NDIRECT_IMPORT_PRECODE
+ PORTABILITY_WARNING("NDirectImportThunkGlue");
+#endif // HAS_NDIRECT_IMPORT_PRECODE
+
+ if (!IsQCall())
+ {
+ image->FixupPointerField(this, offsetof(NDirectMethodDesc, ndirect.m_pszLibName));
+ image->FixupPointerField(this, offsetof(NDirectMethodDesc, ndirect.m_pszEntrypointName));
+ }
+
+ if (image->IsStored(pNMD->ndirect.m_pStubMD.GetValueMaybeNull()))
+ image->FixupRelativePointerField(this, offsetof(NDirectMethodDesc, ndirect.m_pStubMD));
+ else
+ image->ZeroPointerField(this, offsetof(NDirectMethodDesc, ndirect.m_pStubMD));
+ }
+
+ if (HasStoredSig())
+ {
+ image->FixupPointerField(this, offsetof(StoredSigMethodDesc, m_pSig));
+
+ // The DynamicMethodDescs used for IL stubs may have a signature that refers to
+ // runtime types using ELEMENT_TYPE_INTERNAL. We need to fixup these types here.
+ if (IsILStub())
+ {
+ PTR_DynamicMethodDesc pDynamicMD = AsDynamicMethodDesc();
+ pDynamicMD->Fixup(image);
+ }
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (IsComPlusCall())
+ {
+ ComPlusCallMethodDesc *pComPlusMD = (ComPlusCallMethodDesc*)this;
+ ComPlusCallInfo *pComInfo = pComPlusMD->m_pComPlusCallInfo;
+
+ if (image->IsStored(pComInfo))
+ {
+ image->FixupPointerField(pComPlusMD, offsetof(ComPlusCallMethodDesc, m_pComPlusCallInfo));
+ pComInfo->Fixup(image);
+ }
+ else
+ {
+ image->ZeroPointerField(pComPlusMD, offsetof(ComPlusCallMethodDesc, m_pComPlusCallInfo));
+ }
+ }
+ else if (IsGenericComPlusCall())
+ {
+ ComPlusCallInfo *pComInfo = AsInstantiatedMethodDesc()->IMD_GetComPlusCallInfo();
+ pComInfo->Fixup(image);
+ }
+#endif // FEATURE_COMINTEROP
+
+ SIZE_T currentSize = GetBaseSize();
+
+ //
+ // Save all optional members
+ //
+
+ if (HasNonVtableSlot())
+ {
+ FixupSlot(image, this, currentSize, IMAGE_REL_BASED_RelativePointer);
+
+ currentSize += sizeof(NonVtableSlot);
+ }
+
+ if (IsMethodImpl())
+ {
+ MethodImpl *pImpl = GetMethodImpl();
+
+ pImpl->Fixup(image, this, currentSize);
+
+ currentSize += sizeof(MethodImpl);
+ }
+
+ if (pNewMD->HasNativeCodeSlot())
+ {
+ ZapNode * pCodeNode = image->GetCodeAddress(this);
+ ZapNode * pFixupList = image->GetFixupList(this);
+
+ if (pCodeNode != NULL)
+ image->FixupFieldToNode(this, currentSize, pCodeNode, (pFixupList != NULL) ? 1 : 0, IMAGE_REL_BASED_RelativePointer);
+ currentSize += sizeof(NativeCodeSlot);
+
+ if (pFixupList != NULL)
+ {
+ image->FixupFieldToNode(this, currentSize, pFixupList, 0, IMAGE_REL_BASED_RelativePointer);
+ currentSize += sizeof(FixupListSlot);
+ }
+ }
+} // MethodDesc::Fixup
+
+//*******************************************************************************
+Precode* MethodDesc::GetSavedPrecode(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ Precode * pPrecode = (Precode *)image->LookupSurrogate(this);
+ _ASSERTE(pPrecode != NULL);
+ _ASSERTE(pPrecode->IsCorrectMethodDesc(this));
+
+ return pPrecode;
+}
+
+Precode* MethodDesc::GetSavedPrecodeOrNull(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ Precode * pPrecode = (Precode *)image->LookupSurrogate(this);
+ if (pPrecode == NULL)
+ {
+ return NULL;
+ }
+
+ _ASSERTE(pPrecode->IsCorrectMethodDesc(this));
+
+ return pPrecode;
+}
+
+//*******************************************************************************
+void MethodDesc::FixupSlot(DataImage *image, PVOID p, SSIZE_T offset, ZapRelocationType type)
+{
+ STANDARD_VM_CONTRACT;
+
+
+ Precode* pPrecode = GetSavedPrecodeOrNull(image);
+ if (pPrecode != NULL)
+ {
+ // Use the precode if we have decided to store it
+ image->FixupField(p, offset, pPrecode, Precode::GetEntryPointOffset(), type);
+ }
+ else
+ {
+ _ASSERTE(MayHaveNativeCode());
+ ZapNode *code = image->GetCodeAddress(this);
+ _ASSERTE(code != 0);
+ image->FixupFieldToNode(p, offset, code, Precode::GetEntryPointOffset(), type);
+ }
+}
+
+//*******************************************************************************
+SIZE_T MethodDesc::SaveChunk::GetSavedMethodDescSize(MethodInfo * pMethodInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+ MethodDesc * pMD = pMethodInfo->m_pMD;
+
+ SIZE_T size = pMD->GetBaseSize();
+
+ if (pMD->HasNonVtableSlot())
+ size += sizeof(NonVtableSlot);
+
+ if (pMD->IsMethodImpl())
+ size += sizeof(MethodImpl);
+
+ if (pMethodInfo->m_fHasNativeCodeSlot)
+ {
+ size += sizeof(NativeCodeSlot);
+
+ if (pMethodInfo->m_fHasFixupList)
+ size += sizeof(FixupListSlot);
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (pMD->IsGenericComPlusCall())
+ size += sizeof(ComPlusCallInfo);
+#endif // FEATURE_COMINTEROP
+
+ _ASSERTE(size % MethodDesc::ALIGNMENT == 0);
+
+ return size;
+}
+
+//*******************************************************************************
+void MethodDesc::SaveChunk::SaveOneChunk(COUNT_T start, COUNT_T count, ULONG sizeOfMethodDescs, DWORD priority)
+{
+ STANDARD_VM_CONTRACT;
+ DataImage::ItemKind kind;
+
+ switch (priority)
+ {
+ case HotMethodDesc:
+ kind = DataImage::ITEM_METHOD_DESC_HOT;
+ break;
+ case WriteableMethodDesc:
+ kind = DataImage::ITEM_METHOD_DESC_HOT_WRITEABLE;
+ break;
+ case ColdMethodDesc:
+ kind = DataImage::ITEM_METHOD_DESC_COLD;
+ break;
+ case ColdWriteableMethodDesc:
+ kind = DataImage::ITEM_METHOD_DESC_COLD_WRITEABLE;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ ULONG size = sizeOfMethodDescs + sizeof(MethodDescChunk);
+ ZapStoredStructure * pNode = m_pImage->StoreStructure(NULL, size, kind);
+
+ BYTE * pData = (BYTE *)m_pImage->GetImagePointer(pNode);
+
+ MethodDescChunk * pNewChunk = (MethodDescChunk *)pData;
+
+ // Bind the image space so we can use the regular fixup helpers
+ m_pImage->BindPointer(pNewChunk, pNode, 0);
+
+ pNewChunk->SetMethodTable(m_methodInfos[start].m_pMD->GetMethodTable());
+
+ pNewChunk->SetIsZapped();
+ pNewChunk->SetTokenRange(GetTokenRange(m_methodInfos[start].m_pMD->GetMemberDef()));
+
+ pNewChunk->SetSizeAndCount(sizeOfMethodDescs, count);
+
+ Precode::SaveChunk precodeSaveChunk; // Helper for saving precodes in chunks
+
+ ULONG offset = sizeof(MethodDescChunk);
+ for (COUNT_T i = 0; i < count; i++)
+ {
+ MethodInfo * pMethodInfo = &(m_methodInfos[start + i]);
+ MethodDesc * pMD = pMethodInfo->m_pMD;
+
+ m_pImage->BindPointer(pMD, pNode, offset);
+
+ pMD->Save(m_pImage);
+
+ MethodDesc * pNewMD = (MethodDesc *)(pData + offset);
+
+ CopyMemory(pNewMD, pMD, pMD->GetBaseSize());
+
+ if (pMD->IsMethodImpl())
+ CopyMemory(pNewMD->GetMethodImpl(), pMD->GetMethodImpl(), sizeof(MethodImpl));
+ else
+ pNewMD->m_wFlags &= ~mdcMethodImpl;
+
+ pNewMD->m_chunkIndex = (BYTE) ((offset - sizeof(MethodDescChunk)) / MethodDesc::ALIGNMENT);
+ _ASSERTE(pNewMD->GetMethodDescChunk() == pNewChunk);
+
+ pNewMD->m_bFlags2 |= enum_flag2_HasStableEntryPoint;
+ if (pMethodInfo->m_fHasPrecode)
+ {
+ precodeSaveChunk.Save(m_pImage, pMD);
+ pNewMD->m_bFlags2 |= enum_flag2_HasPrecode;
+ }
+ else
+ {
+ pNewMD->m_bFlags2 &= ~enum_flag2_HasPrecode;
+ }
+
+ if (pMethodInfo->m_fHasNativeCodeSlot)
+ {
+ pNewMD->m_bFlags2 |= enum_flag2_HasNativeCodeSlot;
+ }
+ else
+ {
+ pNewMD->m_bFlags2 &= ~enum_flag2_HasNativeCodeSlot;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (pMD->IsGenericComPlusCall())
+ {
+ ComPlusCallInfo *pComInfo = pMD->AsInstantiatedMethodDesc()->IMD_GetComPlusCallInfo();
+
+ CopyMemory(pNewMD->AsInstantiatedMethodDesc()->IMD_GetComPlusCallInfo(), pComInfo, sizeof(ComPlusCallInfo));
+
+ m_pImage->BindPointer(pComInfo, pNode, offset + ((BYTE *)pComInfo - (BYTE *)pMD));
+ }
+#endif // FEATURE_COMINTEROP
+
+ pNewMD->PrecomputeNameHash();
+
+ offset += GetSavedMethodDescSize(pMethodInfo);
+ }
+ _ASSERTE(offset == sizeOfMethodDescs + sizeof(MethodDescChunk));
+
+ precodeSaveChunk.Flush(m_pImage);
+
+ if (m_methodInfos[start].m_pMD->IsTightlyBoundToMethodTable())
+ {
+ if (m_pLastChunk != NULL)
+ {
+ m_pLastChunk->m_next.SetValue(pNewChunk);
+ }
+ else
+ {
+ _ASSERTE(m_pFirstNode == NULL);
+ m_pFirstNode = pNode;
+ }
+ m_pLastChunk = pNewChunk;
+ }
+}
+
+//*******************************************************************************
+void MethodDesc::SaveChunk::Append(MethodDesc * pMD)
+{
+ STANDARD_VM_CONTRACT;
+#ifdef _DEBUG
+ if (!m_methodInfos.IsEmpty())
+ {
+ // Verify that all MethodDescs in the chunk are alike
+ MethodDesc * pFirstMD = m_methodInfos[0].m_pMD;
+
+ _ASSERTE(pFirstMD->GetMethodTable() == pMD->GetMethodTable());
+ _ASSERTE(pFirstMD->IsTightlyBoundToMethodTable() == pMD->IsTightlyBoundToMethodTable());
+ }
+ _ASSERTE(!m_pImage->IsStored(pMD));
+#endif
+
+ MethodInfo method;
+ method.m_pMD = pMD;
+
+ BYTE priority = HotMethodDesc;
+
+ // We only write into mcInstantiated methoddescs to mark them as restored
+ if (pMD->NeedsRestore(m_pImage, TRUE) && pMD->GetClassification() == mcInstantiated)
+ priority |= WriteableMethodDesc; // writeable
+
+ //
+ // Determines whether the method desc should be considered hot, based
+ // on a bitmap that contains entries for hot method descs. At this
+ // point the only cold method descs are those not in the bitmap.
+ //
+ if ((m_pImage->GetMethodProfilingFlags(pMD) & (1 << ReadMethodDesc)) == 0)
+ priority |= ColdMethodDesc; // cold
+
+ // We can have more priorities here in the future to scale well
+ // for many IBC training scenarios.
+
+ method.m_priority = priority;
+
+ // Save the precode if we have no directly callable code
+ method.m_fHasPrecode = !m_pImage->CanDirectCall(pMD);
+
+ // Determine optional slots that are going to be saved
+ if (method.m_fHasPrecode)
+ {
+ method.m_fHasNativeCodeSlot = pMD->MayHaveNativeCode();
+
+ if (method.m_fHasNativeCodeSlot)
+ {
+ method.m_fHasFixupList = (m_pImage->GetFixupList(pMD) != NULL);
+ }
+ else
+ {
+ _ASSERTE(m_pImage->GetFixupList(pMD) == NULL);
+ method.m_fHasFixupList = FALSE;
+ }
+ }
+ else
+ {
+ method.m_fHasNativeCodeSlot = FALSE;
+
+ _ASSERTE(m_pImage->GetFixupList(pMD) == NULL);
+ method.m_fHasFixupList = FALSE;
+ }
+
+ m_methodInfos.Append(method);
+}
+
+//*******************************************************************************
+int __cdecl MethodDesc::SaveChunk::MethodInfoCmp(const void* a_, const void* b_)
+{
+ LIMITED_METHOD_CONTRACT;
+ // Sort by priority as primary key and token as secondary key
+ MethodInfo * a = (MethodInfo *)a_;
+ MethodInfo * b = (MethodInfo *)b_;
+
+ int priorityDiff = (int)(a->m_priority - b->m_priority);
+ if (priorityDiff != 0)
+ return priorityDiff;
+
+ int tokenDiff = (int)(a->m_pMD->GetMemberDef_NoLogging() - b->m_pMD->GetMemberDef_NoLogging());
+ if (tokenDiff != 0)
+ return tokenDiff;
+
+ // Place unboxing stubs first, code:MethodDesc::FindOrCreateAssociatedMethodDesc depends on this invariant
+ int unboxingDiff = (int)(b->m_pMD->IsUnboxingStub() - a->m_pMD->IsUnboxingStub());
+ return unboxingDiff;
+}
+
+//*******************************************************************************
+ZapStoredStructure * MethodDesc::SaveChunk::Save()
+{
+ // Sort by priority as primary key and token as secondary key
+ qsort (&m_methodInfos[0], // start of array
+ m_methodInfos.GetCount(), // array size in elements
+ sizeof(MethodInfo), // element size in bytes
+ MethodInfoCmp); // comparer function
+
+ DWORD currentPriority = NoFlags;
+ int currentTokenRange = -1;
+ int nextStart = 0;
+ SIZE_T sizeOfMethodDescs = 0;
+
+ //
+ // Go over all MethodDescs and create smallest number of chunks possible
+ //
+
+ for (COUNT_T i = 0; i < m_methodInfos.GetCount(); i++)
+ {
+ MethodInfo * pMethodInfo = &(m_methodInfos[i]);
+ MethodDesc * pMD = pMethodInfo->m_pMD;
+
+ DWORD priority = pMethodInfo->m_priority;
+ int tokenRange = GetTokenRange(pMD->GetMemberDef());
+
+ SIZE_T size = GetSavedMethodDescSize(pMethodInfo);
+
+ // Bundle that has to be in same chunk
+ SIZE_T bundleSize = size;
+
+ if (pMD->IsUnboxingStub() && pMD->IsTightlyBoundToMethodTable())
+ {
+ // Wrapped method desc has to immediately follow unboxing stub, and both has to be in one chunk
+ _ASSERTE(m_methodInfos[i+1].m_pMD->GetMemberDef() == m_methodInfos[i].m_pMD->GetMemberDef());
+
+ // Make sure that both wrapped method desc and unboxing stub will fit into same chunk
+ bundleSize += GetSavedMethodDescSize(&m_methodInfos[i+1]);
+ }
+
+ if (priority != currentPriority ||
+ tokenRange != currentTokenRange ||
+ sizeOfMethodDescs + bundleSize > MethodDescChunk::MaxSizeOfMethodDescs)
+ {
+ if (sizeOfMethodDescs != 0)
+ {
+ SaveOneChunk(nextStart, i - nextStart, sizeOfMethodDescs, currentPriority);
+ nextStart = i;
+ }
+
+ currentPriority = priority;
+ currentTokenRange = tokenRange;
+ sizeOfMethodDescs = 0;
+ }
+
+ sizeOfMethodDescs += size;
+ }
+
+ if (sizeOfMethodDescs != 0)
+ SaveOneChunk(nextStart, m_methodInfos.GetCount() - nextStart, sizeOfMethodDescs, currentPriority);
+
+ return m_pFirstNode;
+}
+
+#ifdef FEATURE_COMINTEROP
+BOOL ComPlusCallInfo::ShouldSave(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc * pStubMD = m_pStubMD.GetValueMaybeNull();
+
+ // Note that pStubMD can be regular IL methods desc for stubs implemented by IL
+ return (pStubMD != NULL) && image->CanEagerBindToMethodDesc(pStubMD) && image->CanHardBindToZapModule(pStubMD->GetLoaderModule());
+}
+
+void ComPlusCallInfo::Fixup(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ // It is not worth the complexity to do full pre-initialization for WinRT delegates
+ if (m_pInterfaceMT != NULL && m_pInterfaceMT->IsDelegate())
+ {
+ if (!m_pStubMD.IsNull())
+ {
+ image->FixupRelativePointerField(this, offsetof(ComPlusCallInfo, m_pStubMD));
+ }
+ else
+ {
+ image->ZeroPointerField(this, offsetof(ComPlusCallInfo, m_pStubMD));
+ }
+
+ image->ZeroPointerField(this, offsetof(ComPlusCallInfo, m_pInterfaceMT));
+ image->ZeroPointerField(this, offsetof(ComPlusCallInfo, m_pILStub));
+ return;
+ }
+
+ if (m_pInterfaceMT != NULL)
+ {
+ if (image->CanEagerBindToTypeHandle(m_pInterfaceMT) && image->CanHardBindToZapModule(m_pInterfaceMT->GetLoaderModule()))
+ {
+ image->FixupPointerField(this, offsetof(ComPlusCallInfo, m_pInterfaceMT));
+ }
+ else
+ {
+ image->ZeroPointerField(this, offsetof(ComPlusCallInfo, m_pInterfaceMT));
+ }
+ }
+
+ if (!m_pStubMD.IsNull())
+ {
+ image->FixupRelativePointerField(this, offsetof(ComPlusCallInfo, m_pStubMD));
+
+ MethodDesc * pStubMD = m_pStubMD.GetValue();
+ ZapNode * pCode = pStubMD->IsDynamicMethod() ? image->GetCodeAddress(pStubMD) : NULL;
+ if (pCode != NULL)
+ {
+ image->FixupFieldToNode(this, offsetof(ComPlusCallInfo, m_pILStub), pCode ARM_ARG(THUMB_CODE));
+ }
+ else
+ {
+ image->ZeroPointerField(this, offsetof(ComPlusCallInfo, m_pILStub));
+ }
+ }
+ else
+ {
+ image->ZeroPointerField(this, offsetof(ComPlusCallInfo, m_pStubMD));
+
+ image->ZeroPointerField(this, offsetof(ComPlusCallInfo, m_pILStub));
+ }
+}
+#endif // FEATURE_COMINTEROP
+
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+#endif // !DACCESS_COMPILE
+
+#ifdef FEATURE_PREJIT
+//*******************************************************************************
+void MethodDesc::CheckRestore(ClassLoadLevel level)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FAULT;
+
+ if (!IsRestored() || !GetMethodTable()->IsFullyLoaded())
+ {
+ g_IBCLogger.LogMethodDescAccess(this);
+
+ if (GetClassification() == mcInstantiated)
+ {
+#ifndef DACCESS_COMPILE
+ InstantiatedMethodDesc *pIMD = AsInstantiatedMethodDesc();
+ EnsureWritablePages(pIMD);
+
+ // First restore method table pointer in singleton chunk;
+ // it might be out-of-module
+ GetMethodDescChunk()->RestoreMTPointer(level);
+#ifdef _DEBUG
+ Module::RestoreMethodTablePointer(&m_pDebugMethodTable, NULL, level);
+#endif
+
+ // Now restore wrapped method desc if present; we need this for the dictionary layout too
+ if (pIMD->IMD_IsWrapperStubWithInstantiations())
+ Module::RestoreMethodDescPointer(&pIMD->m_pWrappedMethodDesc);
+
+ // Finally restore the dictionary itself (including instantiation)
+ if (GetMethodDictionary())
+ {
+ GetMethodDictionary()->Restore(GetNumGenericMethodArgs(), level);
+ }
+
+ g_IBCLogger.LogMethodDescWriteAccess(this);
+
+ // If this function had already been requested for rejit, then give the rejit
+ // manager a chance to jump-stamp the code we are restoring. This ensures the
+ // first thread entering the function will jump to the prestub and trigger the
+ // rejit. Note that the PublishMethodHolder may take a lock to avoid a rejit race.
+ // See code:ReJitManager::PublishMethodHolder::PublishMethodHolder#PublishCode
+ // for details on the race.
+ //
+ {
+ ReJitPublishMethodHolder publishWorker(this, GetNativeCode());
+ pIMD->m_wFlags2 = pIMD->m_wFlags2 & ~InstantiatedMethodDesc::Unrestored;
+ }
+
+#if defined(FEATURE_EVENT_TRACE)
+ if (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled)
+#endif
+ {
+ ETW::MethodLog::MethodRestored(this);
+ }
+
+#else // DACCESS_COMPILE
+ DacNotImpl();
+#endif // DACCESS_COMPILE
+ }
+ else if (IsILStub()) // the only stored-sig MD type that uses ET_INTERNAL
+ {
+ ClassLoader::EnsureLoaded(TypeHandle(GetMethodTable()), level);
+
+#ifndef DACCESS_COMPILE
+ PTR_DynamicMethodDesc pDynamicMD = AsDynamicMethodDesc();
+ pDynamicMD->Restore();
+
+#if defined(FEATURE_EVENT_TRACE)
+ if (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled)
+#endif
+ {
+ ETW::MethodLog::MethodRestored(this);
+ }
+#else // DACCESS_COMPILE
+ DacNotImpl();
+#endif // DACCESS_COMPILE
+ }
+ else
+ {
+ ClassLoader::EnsureLoaded(TypeHandle(GetMethodTable()), level);
+ }
+ }
+}
+#else // FEATURE_PREJIT
+//*******************************************************************************
+void MethodDesc::CheckRestore(ClassLoadLevel level)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+#endif // !FEATURE_PREJIT
+
+// static
+MethodDesc* MethodDesc::GetMethodDescFromStubAddr(PCODE addr, BOOL fSpeculative /*=FALSE*/)
+{
+ CONTRACT(MethodDesc *)
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ SO_TOLERANT;
+ }
+ CONTRACT_END;
+
+ MethodDesc * pMD = NULL;
+
+#ifdef HAS_COMPACT_ENTRYPOINTS
+ if (MethodDescChunk::IsCompactEntryPointAtAddress(addr))
+ {
+ pMD = MethodDescChunk::GetMethodDescFromCompactEntryPoint(addr, fSpeculative);
+ RETURN(pMD);
+ }
+#endif // HAS_COMPACT_ENTRYPOINTS
+
+ // Otherwise this must be some kind of precode
+ //
+ Precode* pPrecode = Precode::GetPrecodeFromEntryPoint(addr, fSpeculative);
+ PREFIX_ASSUME(fSpeculative || (pPrecode != NULL));
+ if (pPrecode != NULL)
+ {
+ pMD = pPrecode->GetMethodDesc(fSpeculative);
+ RETURN(pMD);
+ }
+
+ RETURN(NULL); // Not found
+}
+
+#ifdef FEATURE_PREJIT
+//*******************************************************************************
+TADDR MethodDesc::GetFixupList()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (HasNativeCodeSlot())
+ {
+ TADDR pSlot = GetAddrOfNativeCodeSlot();
+ if (*dac_cast<PTR_TADDR>(pSlot) & FIXUP_LIST_MASK)
+ return FixupListSlot::GetValueAtPtr(pSlot + sizeof(NativeCodeSlot));
+ }
+
+ return NULL;
+}
+
+//*******************************************************************************
+BOOL MethodDesc::IsRestored_NoLogging()
+{
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_SUPPORTS_DAC;
+
+ DPTR(RelativeFixupPointer<PTR_MethodTable>) ppMT = GetMethodTablePtr();
+
+ if (ppMT->IsTagged(dac_cast<TADDR>(ppMT)))
+ return FALSE;
+
+ if (!ppMT->GetValue(dac_cast<TADDR>(ppMT))->IsRestored_NoLogging())
+ return FALSE;
+
+ if (GetClassification() == mcInstantiated)
+ {
+ InstantiatedMethodDesc *pIMD = AsInstantiatedMethodDesc();
+ return (pIMD->m_wFlags2 & InstantiatedMethodDesc::Unrestored) == 0;
+ }
+
+ if (IsILStub()) // the only stored-sig MD type that uses ET_INTERNAL
+ {
+ PTR_DynamicMethodDesc pDynamicMD = AsDynamicMethodDesc();
+ return pDynamicMD->IsRestored();
+ }
+
+ return TRUE;
+}
+
+BOOL MethodDesc::IsRestored()
+{
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_SUPPORTS_DAC;
+
+#ifdef DACCESS_COMPILE
+
+ return IsRestored_NoLogging();
+
+#else // not DACCESS_COMPILE
+
+ DPTR(RelativeFixupPointer<PTR_MethodTable>) ppMT = GetMethodTablePtr();
+
+ if (ppMT->IsTagged(dac_cast<TADDR>(ppMT)))
+ return FALSE;
+
+ if (!ppMT->GetValue(dac_cast<TADDR>(ppMT))->IsRestored())
+ return FALSE;
+
+ if (GetClassification() == mcInstantiated)
+ {
+ InstantiatedMethodDesc *pIMD = AsInstantiatedMethodDesc();
+ return (pIMD->m_wFlags2 & InstantiatedMethodDesc::Unrestored) == 0;
+ }
+
+ if (IsILStub()) // the only stored-sig MD type that uses ET_INTERNAL
+ {
+ PTR_DynamicMethodDesc pDynamicMD = AsDynamicMethodDesc();
+ return pDynamicMD->IsRestored();
+ }
+
+ return TRUE;
+
+#endif // DACCESS_COMPILE
+
+}
+
+#else // !FEATURE_PREJIT
+//*******************************************************************************
+BOOL MethodDesc::IsRestored_NoLogging()
+{
+ LIMITED_METHOD_CONTRACT;
+ return TRUE;
+}
+//*******************************************************************************
+BOOL MethodDesc::IsRestored()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return TRUE;
+}
+#endif // !FEATURE_PREJIT
+
+#ifdef HAS_COMPACT_ENTRYPOINTS
+
+#if defined(_TARGET_X86_)
+
+#include <pshpack1.h>
+static const struct CentralJumpCode {
+ BYTE m_movzxEAX[3];
+ BYTE m_shlEAX[3];
+ BYTE m_addEAX[1];
+ MethodDesc* m_pBaseMD;
+ BYTE m_jmp[1];
+ INT32 m_rel32;
+
+ inline void Setup(MethodDesc* pMD, PCODE target, LoaderAllocator *pLoaderAllocator) {
+ WRAPPER_NO_CONTRACT;
+ m_pBaseMD = pMD;
+ m_rel32 = rel32UsingJumpStub(&m_rel32, target, pMD, pLoaderAllocator);
+ }
+
+ inline BOOL CheckTarget(TADDR target) {
+ LIMITED_METHOD_CONTRACT;
+ TADDR addr = rel32Decode(PTR_HOST_MEMBER_TADDR(CentralJumpCode, this, m_rel32));
+ return (addr == target);
+ }
+}
+c_CentralJumpCode = {
+ { 0x0F, 0xB6, 0xC0 }, // movzx eax,al
+ { 0xC1, 0xE0, MethodDesc::ALIGNMENT_SHIFT }, // shl eax, MethodDesc::ALIGNMENT_SHIFT
+ { 0x05 }, NULL, // add eax, pBaseMD
+ { 0xE9 }, 0 // jmp PreStub
+};
+#include <poppack.h>
+
+#elif defined(_TARGET_AMD64_)
+
+#include <pshpack1.h>
+static const struct CentralJumpCode {
+ BYTE m_movzxRAX[4];
+ BYTE m_shlEAX[4];
+ BYTE m_movRAX[2];
+ MethodDesc* m_pBaseMD;
+ BYTE m_addR10RAX[3];
+ BYTE m_jmp[1];
+ INT32 m_rel32;
+
+ inline void Setup(MethodDesc* pMD, PCODE target, LoaderAllocator *pLoaderAllocator) {
+ WRAPPER_NO_CONTRACT;
+ m_pBaseMD = pMD;
+ m_rel32 = rel32UsingJumpStub(&m_rel32, target, pMD, pLoaderAllocator);
+ }
+
+ inline BOOL CheckTarget(TADDR target) {
+ WRAPPER_NO_CONTRACT;
+ TADDR addr = rel32Decode(PTR_HOST_MEMBER_TADDR(CentralJumpCode, this, m_rel32));
+ if (*PTR_BYTE(addr) == 0x48 &&
+ *PTR_BYTE(addr+1) == 0xB8 &&
+ *PTR_BYTE(addr+10) == 0xFF &&
+ *PTR_BYTE(addr+11) == 0xE0)
+ {
+ addr = *PTR_TADDR(addr+2);
+ }
+ return (addr == target);
+ }
+}
+c_CentralJumpCode = {
+ { 0x48, 0x0F, 0xB6, 0xC0 }, // movzx rax,al
+ { 0x48, 0xC1, 0xE0, MethodDesc::ALIGNMENT_SHIFT }, // shl rax, MethodDesc::ALIGNMENT_SHIFT
+ { 0x49, 0xBA }, NULL, // mov r10, pBaseMD
+ { 0x4C, 0x03, 0xD0 }, // add r10,rax
+ { 0xE9 }, 0 // jmp PreStub
+};
+#include <poppack.h>
+
+#else
+#error Unsupported platform
+#endif
+
+typedef DPTR(struct CentralJumpCode) PTR_CentralJumpCode;
+#define TEP_CENTRAL_JUMP_SIZE sizeof(c_CentralJumpCode)
+static_assert_no_msg((TEP_CENTRAL_JUMP_SIZE & 1) == 0);
+
+#define TEP_ENTRY_SIZE 4
+#define TEP_MAX_BEFORE_INDEX (1 + (127 / TEP_ENTRY_SIZE))
+#define TEP_MAX_BLOCK_INDEX (TEP_MAX_BEFORE_INDEX + (128 - TEP_CENTRAL_JUMP_SIZE) / TEP_ENTRY_SIZE)
+#define TEP_FULL_BLOCK_SIZE (TEP_MAX_BLOCK_INDEX * TEP_ENTRY_SIZE + TEP_CENTRAL_JUMP_SIZE)
+
+//*******************************************************************************
+/* static */ MethodDesc* MethodDescChunk::GetMethodDescFromCompactEntryPoint(PCODE addr, BOOL fSpeculative /*=FALSE*/)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef DACCESS_COMPILE
+ // Always use speculative checks with DAC
+ fSpeculative = TRUE;
+#endif
+
+ // Always do consistency check in debug
+ if (fSpeculative INDEBUG(|| TRUE))
+ {
+ if ((addr & 3) != 1 ||
+ *PTR_BYTE(addr) != X86_INSTR_MOV_AL ||
+ *PTR_BYTE(addr+2) != X86_INSTR_JMP_REL8)
+ {
+ if (fSpeculative) return NULL;
+ _ASSERTE(!"Unexpected code in temporary entrypoint");
+ }
+ }
+
+ int index = *PTR_BYTE(addr+1);
+ TADDR centralJump = addr + 4 + *PTR_SBYTE(addr+3);
+
+ CentralJumpCode* pCentralJumpCode = PTR_CentralJumpCode(centralJump);
+
+ // Always do consistency check in debug
+ if (fSpeculative INDEBUG(|| TRUE))
+ {
+ SIZE_T i;
+ for (i = 0; i < TEP_CENTRAL_JUMP_SIZE; i++)
+ {
+ BYTE b = ((BYTE*)&c_CentralJumpCode)[i];
+ if (b != 0 && b != *PTR_BYTE(centralJump+i))
+ {
+ if (fSpeculative) return NULL;
+ _ASSERTE(!"Unexpected code in temporary entrypoint");
+ }
+ }
+
+ _ASSERTE_IMPL(pCentralJumpCode->CheckTarget(GetPreStubEntryPoint()));
+ }
+
+ return PTR_MethodDesc((TADDR)pCentralJumpCode->m_pBaseMD + index * MethodDesc::ALIGNMENT);
+}
+
+//*******************************************************************************
+SIZE_T MethodDescChunk::SizeOfCompactEntryPoints(int count)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ int fullBlocks = count / TEP_MAX_BLOCK_INDEX;
+ int remainder = count % TEP_MAX_BLOCK_INDEX;
+
+ return 1 + (fullBlocks * TEP_FULL_BLOCK_SIZE) +
+ (remainder * TEP_ENTRY_SIZE) + ((remainder != 0) ? TEP_CENTRAL_JUMP_SIZE : 0);
+}
+
+#ifndef DACCESS_COMPILE
+TADDR MethodDescChunk::AllocateCompactEntryPoints(LoaderAllocator *pLoaderAllocator, AllocMemTracker *pamTracker)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ int count = GetCount();
+
+ SIZE_T size = SizeOfCompactEntryPoints(count);
+
+ TADDR temporaryEntryPoints = (TADDR)pamTracker->Track(pLoaderAllocator->GetPrecodeHeap()->AllocAlignedMem(size, sizeof(TADDR)));
+
+ // make the temporary entrypoints unaligned, so they are easy to identify
+ BYTE* p = (BYTE*)temporaryEntryPoints + 1;
+
+ int indexInBlock = TEP_MAX_BLOCK_INDEX; // recompute relOffset in first iteration
+ int relOffset = 0; // relative offset for the short jump
+ MethodDesc * pBaseMD = 0; // index of the start of the block
+
+ MethodDesc * pMD = GetFirstMethodDesc();
+ for (int index = 0; index < count; index++)
+ {
+ if (indexInBlock == TEP_MAX_BLOCK_INDEX)
+ {
+ relOffset = (min(count - index, TEP_MAX_BEFORE_INDEX) - 1) * TEP_ENTRY_SIZE;
+ indexInBlock = 0;
+ pBaseMD = pMD;
+ }
+
+ *(p+0) = X86_INSTR_MOV_AL;
+ int methodDescIndex = pMD->GetMethodDescIndex() - pBaseMD->GetMethodDescIndex();
+ _ASSERTE(FitsInU1(methodDescIndex));
+ *(p+1) = (BYTE)methodDescIndex;
+
+ *(p+2) = X86_INSTR_JMP_REL8;
+ _ASSERTE(FitsInI1(relOffset));
+ *(p+3) = (BYTE)relOffset;
+
+ p += TEP_ENTRY_SIZE; static_assert_no_msg(TEP_ENTRY_SIZE == 4);
+
+ if (relOffset == 0)
+ {
+ CentralJumpCode* pCode = (CentralJumpCode*)p;
+
+ memcpy(pCode, &c_CentralJumpCode, TEP_CENTRAL_JUMP_SIZE);
+
+ pCode->Setup(pBaseMD, GetPreStubEntryPoint(), pLoaderAllocator);
+
+ p += TEP_CENTRAL_JUMP_SIZE;
+
+ relOffset -= TEP_CENTRAL_JUMP_SIZE;
+ }
+
+ relOffset -= TEP_ENTRY_SIZE;
+ indexInBlock++;
+
+ pMD = (MethodDesc *)((BYTE *)pMD + pMD->SizeOf());
+ }
+
+ _ASSERTE(p == (BYTE*)temporaryEntryPoints + size);
+
+ ClrFlushInstructionCache((LPVOID)temporaryEntryPoints, size);
+
+ SetHasCompactEntryPoints();
+ return temporaryEntryPoints;
+}
+#endif // !DACCESS_COMPILE
+
+#endif // HAS_COMPACT_ENTRYPOINTS
+
+//*******************************************************************************
+PCODE MethodDescChunk::GetTemporaryEntryPoint(int index)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(HasTemporaryEntryPoints());
+
+#ifdef HAS_COMPACT_ENTRYPOINTS
+ if (HasCompactEntryPoints())
+ {
+ int fullBlocks = index / TEP_MAX_BLOCK_INDEX;
+ int remainder = index % TEP_MAX_BLOCK_INDEX;
+
+ return GetTemporaryEntryPoints() + 1 + (fullBlocks * TEP_FULL_BLOCK_SIZE) +
+ (remainder * TEP_ENTRY_SIZE) + ((remainder >= TEP_MAX_BEFORE_INDEX) ? TEP_CENTRAL_JUMP_SIZE : 0);
+ }
+#endif // HAS_COMPACT_ENTRYPOINTS
+
+ return Precode::GetPrecodeForTemporaryEntryPoint(GetTemporaryEntryPoints(), index)->GetEntryPoint();
+}
+
+PCODE MethodDesc::GetTemporaryEntryPoint()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodDescChunk* pChunk = GetMethodDescChunk();
+ _ASSERTE(pChunk->HasTemporaryEntryPoints());
+
+ int lo = 0, hi = pChunk->GetCount() - 1;
+
+ // Find the temporary entrypoint in the chunk by binary search
+ while (lo < hi)
+ {
+ int mid = (lo + hi) / 2;
+
+ TADDR pEntryPoint = pChunk->GetTemporaryEntryPoint(mid);
+
+ MethodDesc * pMD = MethodDesc::GetMethodDescFromStubAddr(pEntryPoint);
+ if (PTR_HOST_TO_TADDR(this) == PTR_HOST_TO_TADDR(pMD))
+ return pEntryPoint;
+
+ if (PTR_HOST_TO_TADDR(this) > PTR_HOST_TO_TADDR(pMD))
+ lo = mid + 1;
+ else
+ hi = mid - 1;
+ }
+
+ _ASSERTE(lo == hi);
+
+ TADDR pEntryPoint = pChunk->GetTemporaryEntryPoint(lo);
+
+#ifdef _DEBUG
+ MethodDesc * pMD = MethodDesc::GetMethodDescFromStubAddr(pEntryPoint);
+ _ASSERTE(PTR_HOST_TO_TADDR(this) == PTR_HOST_TO_TADDR(pMD));
+#endif
+
+ return pEntryPoint;
+}
+
+#ifndef DACCESS_COMPILE
+//*******************************************************************************
+void MethodDesc::SetTemporaryEntryPoint(LoaderAllocator *pLoaderAllocator, AllocMemTracker *pamTracker)
+{
+ WRAPPER_NO_CONTRACT;
+
+ GetMethodDescChunk()->EnsureTemporaryEntryPointsCreated(pLoaderAllocator, pamTracker);
+
+ PTR_PCODE pSlot = GetAddrOfSlot();
+ _ASSERTE(*pSlot == NULL);
+ *pSlot = GetTemporaryEntryPoint();
+
+ if (RequiresStableEntryPoint())
+ {
+ // The rest of the system assumes that certain methods always have stable entrypoints.
+ // Create them now.
+ GetOrCreatePrecode();
+ }
+}
+
+//*******************************************************************************
+void MethodDescChunk::CreateTemporaryEntryPoints(LoaderAllocator *pLoaderAllocator, AllocMemTracker *pamTracker)
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(GetTemporaryEntryPoints() == NULL);
+
+ TADDR temporaryEntryPoints = Precode::AllocateTemporaryEntryPoints(this, pLoaderAllocator, pamTracker);
+
+#ifdef HAS_COMPACT_ENTRYPOINTS
+ // Precodes allocated only if they provide more compact representation or if it is required
+ if (temporaryEntryPoints == NULL)
+ {
+ temporaryEntryPoints = AllocateCompactEntryPoints(pLoaderAllocator, pamTracker);
+ }
+#endif // HAS_COMPACT_ENTRYPOINTS
+
+ *(((TADDR *)this)-1) = temporaryEntryPoints;
+
+ _ASSERTE(GetTemporaryEntryPoints() != NULL);
+}
+
+//*******************************************************************************
+void MethodDesc::InterlockedUpdateFlags2(BYTE bMask, BOOL fSet)
+{
+ WRAPPER_NO_CONTRACT;
+
+ ULONG* pLong = (ULONG*)(&m_bFlags2 - 3);
+ static_assert_no_msg(offsetof(MethodDesc, m_bFlags2) % sizeof(LONG) == 3);
+
+#if BIGENDIAN
+ if (fSet)
+ FastInterlockOr(pLong, (ULONG)bMask);
+ else
+ FastInterlockAnd(pLong, ~(ULONG)bMask);
+#else // !BIGENDIAN
+ if (fSet)
+ FastInterlockOr(pLong, (ULONG)bMask << (3 * 8));
+ else
+ FastInterlockAnd(pLong, ~((ULONG)bMask << (3 * 8)));
+#endif // !BIGENDIAN
+}
+
+//*******************************************************************************
+Precode* MethodDesc::GetOrCreatePrecode()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (HasPrecode())
+ {
+ return GetPrecode();
+ }
+
+ PTR_PCODE pSlot = GetAddrOfSlot();
+ PCODE tempEntry = GetTemporaryEntryPoint();
+
+ PrecodeType requiredType = GetPrecodeType();
+ PrecodeType availableType = PRECODE_INVALID;
+
+ if (!GetMethodDescChunk()->HasCompactEntryPoints())
+ {
+ availableType = Precode::GetPrecodeFromEntryPoint(tempEntry)->GetType();
+ }
+
+ // Allocate the precode if necessary
+ if (requiredType != availableType)
+ {
+ // code:Precode::AllocateTemporaryEntryPoints should always create precode of the right type for dynamic methods.
+ // If we took this path for dynamic methods, the precode may leak since we may allocate it in domain-neutral loader heap.
+ _ASSERTE(!IsLCGMethod());
+
+ AllocMemTracker amt;
+ Precode* pPrecode = Precode::Allocate(requiredType, this, GetLoaderAllocator(), &amt);
+ if (FastInterlockCompareExchangePointer(EnsureWritablePages(pSlot), pPrecode->GetEntryPoint(), tempEntry) == tempEntry)
+ amt.SuppressRelease();
+ }
+
+ // Set the flags atomically
+ InterlockedUpdateFlags2(enum_flag2_HasStableEntryPoint | enum_flag2_HasPrecode, TRUE);
+
+ return Precode::GetPrecodeFromEntryPoint(*pSlot);
+}
+
+//*******************************************************************************
+BOOL MethodDesc::SetNativeCodeInterlocked(PCODE addr, PCODE pExpected /*=NULL*/
+#ifdef FEATURE_INTERPRETER
+ , BOOL fStable
+#endif
+ )
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ if (HasNativeCodeSlot())
+ {
+#ifdef _TARGET_ARM_
+ _ASSERTE(IsThumbCode(addr) || (addr==NULL));
+ addr &= ~THUMB_CODE;
+
+ if (pExpected != NULL)
+ {
+ _ASSERTE(IsThumbCode(pExpected));
+ pExpected &= ~THUMB_CODE;
+ }
+#endif
+
+ TADDR pSlot = GetAddrOfNativeCodeSlot();
+ NativeCodeSlot value, expected;
+
+ value.SetValueMaybeNull(pSlot, addr | (*dac_cast<PTR_TADDR>(pSlot) & FIXUP_LIST_MASK));
+ expected.SetValueMaybeNull(pSlot, pExpected | (*dac_cast<PTR_TADDR>(pSlot) & FIXUP_LIST_MASK));
+
+#ifdef FEATURE_INTERPRETER
+ BOOL fRet = FALSE;
+
+ fRet = FastInterlockCompareExchangePointer(
+ EnsureWritablePages(reinterpret_cast<TADDR*>(pSlot)),
+ (TADDR&)value,
+ (TADDR&)expected) == (TADDR&)expected;
+
+ if (!fRet)
+ {
+ // Can always replace NULL.
+ expected.SetValueMaybeNull(pSlot, (*dac_cast<PTR_TADDR>(pSlot) & FIXUP_LIST_MASK));
+ fRet = FastInterlockCompareExchangePointer(
+ EnsureWritablePages(reinterpret_cast<TADDR*>(pSlot)),
+ (TADDR&)value,
+ (TADDR&)expected) == (TADDR&)expected;
+ }
+ return fRet;
+#else // FEATURE_INTERPRETER
+ return FastInterlockCompareExchangePointer(EnsureWritablePages(reinterpret_cast<TADDR*>(pSlot)),
+ (TADDR&)value, (TADDR&)expected) == (TADDR&)expected;
+#endif // FEATURE_INTERPRETER
+ }
+
+#ifdef FEATURE_INTERPRETER
+ PCODE pFound = FastInterlockCompareExchangePointer(GetAddrOfSlot(), addr, pExpected);
+ if (fStable)
+ {
+ InterlockedUpdateFlags2(enum_flag2_HasStableEntryPoint, TRUE);
+ }
+ return (pFound == pExpected);
+#else
+ _ASSERTE(pExpected == NULL);
+ return SetStableEntryPointInterlocked(addr);
+#endif
+}
+
+//*******************************************************************************
+BOOL MethodDesc::SetStableEntryPointInterlocked(PCODE addr)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ _ASSERTE(!HasPrecode());
+
+ PCODE pExpected = GetTemporaryEntryPoint();
+ PTR_PCODE pSlot = GetAddrOfSlot();
+ EnsureWritablePages(pSlot);
+
+ BOOL fResult = FastInterlockCompareExchangePointer(pSlot, addr, pExpected) == pExpected;
+
+ InterlockedUpdateFlags2(enum_flag2_HasStableEntryPoint, TRUE);
+
+ return fResult;
+}
+
+#ifdef FEATURE_INTERPRETER
+BOOL MethodDesc::SetEntryPointInterlocked(PCODE addr)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ _ASSERTE(!HasPrecode());
+
+ PCODE pExpected = GetTemporaryEntryPoint();
+ PTR_PCODE pSlot = GetAddrOfSlot();
+
+ BOOL fResult = FastInterlockCompareExchangePointer(pSlot, addr, pExpected) == pExpected;
+
+ return fResult;
+}
+
+#endif // FEATURE_INTERPRETER
+
+//*******************************************************************************
+void NDirectMethodDesc::InterlockedSetNDirectFlags(WORD wFlags)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END
+
+ // Since InterlockedCompareExchange only works on ULONGs,
+ // we'll have to operate on the entire ULONG. Ugh.
+
+ WORD *pFlags = &ndirect.m_wFlags;
+
+ EnsureWritablePages(pFlags);
+
+ // Make sure that m_flags is aligned on a 4 byte boundry
+ _ASSERTE( ( ((size_t) pFlags) & (sizeof(ULONG)-1) ) == 0);
+
+ // Ensure we won't be reading or writing outside the bounds of the NDirectMethodDesc.
+ _ASSERTE((BYTE*)pFlags >= (BYTE*)this);
+ _ASSERTE((BYTE*)pFlags+sizeof(ULONG) <= (BYTE*)(this+1));
+
+ DWORD dwMask = 0;
+
+ // Set the flags in the mask
+ ((WORD*)&dwMask)[0] |= wFlags;
+
+ // Now, slam all 32 bits atomically.
+ FastInterlockOr((DWORD*)pFlags, dwMask);
+}
+
+#ifndef CROSSGEN_COMPILE
+//*******************************************************************************
+LPVOID NDirectMethodDesc::FindEntryPoint(HINSTANCE hMod) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ char const * funcName = NULL;
+
+ FARPROC pFunc = NULL, pFuncW = NULL;
+
+ // Handle ordinals.
+ if (GetEntrypointName()[0] == '#')
+ {
+ long ordinal = atol(GetEntrypointName()+1);
+ return GetProcAddress(hMod, (LPCSTR)(size_t)((UINT16)ordinal));
+ }
+
+ // Just look for the unmangled name. If it is unicode fcn, we are going
+ // to need to check for the 'W' API because it takes precedence over the
+ // unmangled one (on NT some APIs have unmangled ANSI exports).
+ pFunc = GetProcAddress(hMod, funcName = GetEntrypointName());
+ if ((pFunc != NULL && IsNativeAnsi()) || IsNativeNoMangled())
+ {
+ return (LPVOID)pFunc;
+ }
+
+ // Allocate space for a copy of the entry point name.
+ int dstbufsize = (int)(sizeof(char) * (strlen(GetEntrypointName()) + 1 + 20)); // +1 for the null terminator
+ // +20 for various decorations
+
+ // Allocate a single character before the start of this string to enable quickly
+ // prepending a '_' character if we look for a stdcall entrypoint later on.
+ LPSTR szAnsiEntrypointName = ((LPSTR)_alloca(dstbufsize + 1)) + 1;
+
+ // Copy the name so we can mangle it.
+ strcpy_s(szAnsiEntrypointName,dstbufsize,GetEntrypointName());
+ DWORD nbytes = (DWORD)(strlen(GetEntrypointName()) + 1);
+ szAnsiEntrypointName[nbytes] = '\0'; // Add an extra '\0'.
+
+#if !defined(FEATURE_CORECLR) && defined(_WIN64)
+ //
+ // Forward {Get|Set}{Window|Class}Long to their corresponding Ptr version
+ //
+
+ // LONG SetWindowLong( HWND hWnd, int nIndex, LONG dwNewLong);
+ // LONG_PTR SetWindowLongPtr(HWND hWnd, int nIndex, LONG_PTR dwNewLong);
+ //
+ // LONG GetWindowLong( HWND hWnd, int nIndex);
+ // LONG_PTR GetWindowLongPtr(HWND hWnd, int nIndex);
+ //
+ // DWORD GetClassLong( HWND hWnd, int nIndex);
+ // ULONG_PTR GetClassLongPtr( HWND hWnd, int nIndex);
+ //
+ // DWORD SetClassLong( HWND hWnd, int nIndex, LONG dwNewLong);
+ // ULONG_PTR SetClassLongPtr( HWND hWnd, int nIndex, LONG_PTR dwNewLong);
+
+ if (!SString::_stricmp(GetEntrypointName(), "SetWindowLong") ||
+ !SString::_stricmp(GetEntrypointName(), "GetWindowLong") ||
+ !SString::_stricmp(GetEntrypointName(), "SetClassLong") ||
+ !SString::_stricmp(GetEntrypointName(), "GetClassLong"))
+ {
+ szAnsiEntrypointName[nbytes-1] = 'P';
+ szAnsiEntrypointName[nbytes+0] = 't';
+ szAnsiEntrypointName[nbytes+1] = 'r';
+ szAnsiEntrypointName[nbytes+2] = '\0';
+ szAnsiEntrypointName[nbytes+3] = '\0';
+ nbytes += 3;
+ }
+#endif // !FEATURE_CORECLR && _WIN64
+
+ // If the program wants the ANSI api or if Unicode APIs are unavailable.
+ if (IsNativeAnsi())
+ {
+ szAnsiEntrypointName[nbytes-1] = 'A';
+ pFunc = GetProcAddress(hMod, funcName = szAnsiEntrypointName);
+ }
+ else
+ {
+ szAnsiEntrypointName[nbytes-1] = 'W';
+ pFuncW = GetProcAddress(hMod, szAnsiEntrypointName);
+
+ // This overrides the unmangled API. See the comment above.
+ if (pFuncW != NULL)
+ {
+ pFunc = pFuncW;
+ funcName = szAnsiEntrypointName;
+ }
+ }
+
+ if (!pFunc)
+ {
+#if !defined(FEATURE_CORECLR)
+ if (hMod == CLRGetModuleHandle(W("kernel32.dll")))
+ {
+ szAnsiEntrypointName[nbytes-1] = '\0';
+ if (0==strcmp(szAnsiEntrypointName, "MoveMemory") ||
+ 0==strcmp(szAnsiEntrypointName, "CopyMemory"))
+ {
+ pFunc = GetProcAddress(hMod, funcName = "RtlMoveMemory");
+ }
+ else if (0==strcmp(szAnsiEntrypointName, funcName = "FillMemory"))
+ {
+ pFunc = GetProcAddress(hMod, funcName = "RtlFillMemory");
+ }
+ else if (0==strcmp(szAnsiEntrypointName, funcName = "ZeroMemory"))
+ {
+ pFunc = GetProcAddress(hMod, funcName = "RtlZeroMemory");
+ }
+ }
+#endif // !FEATURE_CORECLR
+
+#if defined(_TARGET_X86_)
+ /* try mangled names only for __stdcalls */
+ if (!pFunc && IsStdCall())
+ {
+ UINT16 numParamBytesMangle = GetStackArgumentSize();
+
+ if (IsStdCallWithRetBuf())
+ {
+ _ASSERTE(numParamBytesMangle >= sizeof(LPVOID));
+ numParamBytesMangle -= (UINT16)sizeof(LPVOID);
+ }
+
+ szAnsiEntrypointName[-1] = '_';
+ sprintf_s(szAnsiEntrypointName + nbytes - 1, dstbufsize - (nbytes - 1), "@%ld", (ULONG)numParamBytesMangle);
+ pFunc = GetProcAddress(hMod, funcName = szAnsiEntrypointName - 1);
+ }
+#endif // _TARGET_X86_
+ }
+
+ return (LPVOID)pFunc;
+}
+#endif // CROSSGEN_COMPILE
+
+#if defined(FEATURE_MIXEDMODE) && !defined(CROSSGEN_COMPILE)
+//*******************************************************************************
+void NDirectMethodDesc::InitEarlyBoundNDirectTarget()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ _ASSERTE(IsEarlyBound());
+
+ if (IsClassConstructorTriggeredAtLinkTime())
+ {
+ GetMethodTable()->CheckRunClassInitThrowing();
+ }
+
+ const void *target = GetModule()->GetInternalPInvokeTarget(GetRVA());
+ _ASSERTE(target != 0);
+
+ if (HeuristicDoesThisLookLikeAGetLastErrorCall((LPBYTE)target))
+ target = (BYTE*) FalseGetLastError;
+
+ // As long as we've set the NDirect target field we don't need to backpatch the import thunk glue.
+ // All NDirect calls all through the NDirect target, so if it's updated, then we won't go into
+ // NDirectImportThunk(). In fact, backpatching the import thunk glue leads to race conditions.
+ SetNDirectTarget((LPVOID)target);
+}
+#endif // FEATURE_MIXEDMODE && !CROSSGEN_COMPILE
+
+//*******************************************************************************
+void MethodDesc::ComputeSuppressUnmanagedCodeAccessAttr(IMDInternalImport *pImport)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_CORECLR
+ // We only care about this bit for NDirect and ComPlusCall
+ if (!IsNDirect() && !IsComPlusCall())
+ return;
+
+ BOOL hasAttr = FALSE;
+ HRESULT hr = pImport->GetCustomAttributeByName(GetMemberDef(),
+ COR_SUPPRESS_UNMANAGED_CODE_CHECK_ATTRIBUTE_ANSI,
+ NULL,
+ NULL);
+ IfFailThrow(hr);
+ hasAttr = (hr == S_OK);
+
+
+ if (IsNDirect())
+ ((NDirectMethodDesc*)this)->SetSuppressUnmanagedCodeAccessAttr(hasAttr);
+
+#ifdef FEATURE_COMINTEROP
+ if (IsComPlusCall())
+ ((ComPlusCallMethodDesc*)this)->SetSuppressUnmanagedCodeAccessAttr(hasAttr);
+#endif
+
+#endif // FEATURE_COMINTEROP
+}
+
+//*******************************************************************************
+BOOL MethodDesc::HasSuppressUnmanagedCodeAccessAttr()
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef FEATURE_CORECLR
+ return TRUE;
+#else // FEATURE_CORECLR
+
+ // In AppX processes, there is only one full trust AppDomain, so there is never any need to do a security
+ // callout on interop stubs
+ if (AppX::IsAppXProcess())
+ {
+ return TRUE;
+ }
+
+ if (IsNDirect())
+ return ((NDirectMethodDesc*)this)->HasSuppressUnmanagedCodeAccessAttr();
+#ifdef FEATURE_COMINTEROP
+ else if (IsComPlusCall())
+ return ((ComPlusCallMethodDesc*)this)->HasSuppressUnmanagedCodeAccessAttr();
+#endif // FEATURE_COMINTEROP
+ else
+ return FALSE;
+
+#endif // FEATURE_CORECLR
+}
+
+#ifdef FEATURE_COMINTEROP
+//*******************************************************************************
+void ComPlusCallMethodDesc::InitComEventCallInfo()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ MethodTable *pItfMT = GetInterfaceMethodTable();
+ MethodDesc *pItfMD = this;
+ MethodTable *pSrcItfClass = NULL;
+ MethodTable *pEvProvClass = NULL;
+
+ // Retrieve the event provider class.
+ WORD cbExtraSlots = ComMethodTable::GetNumExtraSlots(pItfMT->GetComInterfaceType());
+ WORD itfSlotNum = (WORD) m_pComPlusCallInfo->m_cachedComSlot - cbExtraSlots;
+ pItfMT->GetEventInterfaceInfo(&pSrcItfClass, &pEvProvClass);
+ m_pComPlusCallInfo->m_pEventProviderMD = MemberLoader::FindMethodForInterfaceSlot(pEvProvClass, pItfMT, itfSlotNum);
+
+ // If we could not find the method, then the event provider does not support
+ // this event. This is a fatal error.
+ if (!m_pComPlusCallInfo->m_pEventProviderMD)
+ {
+ // Init the interface MD for error reporting.
+ pItfMD = (ComPlusCallMethodDesc*)pItfMT->GetMethodDescForSlot(itfSlotNum);
+
+ // Retrieve the event provider class name.
+ StackSString ssEvProvClassName;
+ pEvProvClass->_GetFullyQualifiedNameForClass(ssEvProvClassName);
+
+ // Retrieve the COM event interface class name.
+ StackSString ssEvItfName;
+ pItfMT->_GetFullyQualifiedNameForClass(ssEvItfName);
+
+ // Convert the method name to unicode.
+ StackSString ssMethodName(SString::Utf8, pItfMD->GetName());
+
+ // Throw the exception.
+ COMPlusThrow(kTypeLoadException, IDS_EE_METHOD_NOT_FOUND_ON_EV_PROV,
+ ssMethodName.GetUnicode(), ssEvItfName.GetUnicode(), ssEvProvClassName.GetUnicode());
+ }
+}
+#endif // FEATURE_COMINTEROP
+
+#endif // !DACCESS_COMPILE
+
+
+#ifdef DACCESS_COMPILE
+
+//*******************************************************************************
+void
+MethodDesc::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+
+ if (DacHasMethodDescBeenEnumerated(this))
+ {
+ return;
+ }
+
+ // Save away the whole MethodDescChunk as in many
+ // places RecoverChunk is called on a method desc so
+ // the whole chunk must be available. This also
+ // automatically picks up any prestubs and such.
+ GetMethodDescChunk()->EnumMemoryRegions(flags);
+
+ if (HasPrecode())
+ {
+ GetPrecode()->EnumMemoryRegions(flags);
+ }
+
+ // Need to save the Debug-Info for this method so that we can see it in a debugger later.
+ DebugInfoManager::EnumMemoryRegionsForMethodDebugInfo(flags, this);
+
+ if (!IsNoMetadata() ||IsILStub())
+ {
+ // The assembling of the string below implicitly dumps the memory we need.
+
+ StackSString str;
+ TypeString::AppendMethodInternal(str, this, TypeString::FormatSignature|TypeString::FormatNamespace|TypeString::FormatFullInst);
+
+#ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS
+ if (flags == CLRDATA_ENUM_MEM_MINI || flags == CLRDATA_ENUM_MEM_TRIAGE)
+ {
+ // we want to save just the method name, so truncate at the open paranthesis
+ SString::Iterator it = str.Begin();
+ if (str.Find(it, W('(')))
+ {
+ // ensure the symbol ends in "()" to minimize regressions
+ // in !analyze assuming the existence of the argument list
+ str.Truncate(++it);
+ str.Append(W(')'));
+ }
+
+ DacMdCacheAddEEName(dac_cast<TADDR>(this), str);
+ }
+#endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS
+
+ // The module path is used in the output of !clrstack and !pe if the
+ // module is not available when the minidump is inspected. By retrieving
+ // the path here, the required memory is implicitly dumped.
+ Module* pModule = GetModule();
+ if (pModule)
+ {
+ pModule->GetPath();
+ }
+ }
+
+ // Also, call DacValidateMD to dump the memory it needs. !clrstack calls
+ // DacValidateMD before it retrieves the method name. We don't expect
+ // DacValidateMD to fail, but if it does, ignore the failure and try to assemble the
+ // string anyway so that clients that don't validate the MD still work.
+
+ DacValidateMD(this);
+
+ DacSetMethodDescEnumerated(this);
+
+}
+
+//*******************************************************************************
+void
+StoredSigMethodDesc::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ // 'this' already done, see below.
+ DacEnumMemoryRegion(m_pSig, m_cSig);
+}
+
+//*******************************************************************************
+void
+MethodDescChunk::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+
+ DAC_CHECK_ENUM_THIS();
+ EMEM_OUT(("MEM: %p MethodDescChunk\n", dac_cast<TADDR>(this)));
+
+ DacEnumMemoryRegion(dac_cast<TADDR>(this), SizeOf());
+
+ PTR_MethodTable pMT = GetMethodTable();
+
+ if (pMT.IsValid())
+ {
+ pMT->EnumMemoryRegions(flags);
+ }
+
+ if (HasTemporaryEntryPoints())
+ {
+ SIZE_T size;
+
+#ifdef HAS_COMPACT_ENTRYPOINTS
+ if (HasCompactEntryPoints())
+ {
+ size = SizeOfCompactEntryPoints(GetCount());
+ }
+ else
+#endif // HAS_COMPACT_ENTRYPOINTS
+ {
+ size = Precode::SizeOfTemporaryEntryPoints(GetTemporaryEntryPoints(), GetCount());
+ }
+
+ DacEnumMemoryRegion(GetTemporaryEntryPoints(), size);
+ }
+
+ MethodDesc * pMD = GetFirstMethodDesc();
+ MethodDesc * pOldMD = NULL;
+ while (pMD != NULL && pMD != pOldMD)
+ {
+ pOldMD = pMD;
+ EX_TRY
+ {
+ if (pMD->IsMethodImpl())
+ {
+ pMD->GetMethodImpl()->EnumMemoryRegions(flags);
+ }
+ }
+ EX_CATCH_RETHROW_ONLY_COR_E_OPERATIONCANCELLED
+
+ EX_TRY
+ {
+ if (pMD->HasStoredSig())
+ {
+ dac_cast<PTR_StoredSigMethodDesc>(pMD)->EnumMemoryRegions(flags);
+ }
+
+ // Check whether the next MethodDesc is within the bounds of the current chunks
+ TADDR pNext = dac_cast<TADDR>(pMD) + pMD->SizeOf();
+ TADDR pEnd = dac_cast<TADDR>(this) + this->SizeOf();
+
+ pMD = (pNext < pEnd) ? PTR_MethodDesc(pNext) : NULL;
+ }
+ EX_CATCH_RETHROW_ONLY_COR_E_OPERATIONCANCELLED
+ }
+}
+
+#endif // DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+//*******************************************************************************
+MethodDesc *MethodDesc::GetInterfaceMD()
+{
+ CONTRACT (MethodDesc*) {
+ THROWS;
+ GC_TRIGGERS;
+ INSTANCE_CHECK;
+ PRECONDITION(!IsInterface());
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ } CONTRACT_END;
+ MethodTable *pMT = GetMethodTable();
+ RETURN(pMT->ReverseInterfaceMDLookup(GetSlot()));
+}
+#endif // !DACCESS_COMPILE
+
+PTR_LoaderAllocator MethodDesc::GetLoaderAllocator()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetLoaderModule()->GetLoaderAllocator();
+}
+
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+REFLECTMETHODREF MethodDesc::GetStubMethodInfo()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ REFLECTMETHODREF retVal;
+ REFLECTMETHODREF methodRef = (REFLECTMETHODREF)AllocateObject(MscorlibBinder::GetClass(CLASS__STUBMETHODINFO));
+ GCPROTECT_BEGIN(methodRef);
+
+ methodRef->SetMethod(this);
+ LoaderAllocator *pLoaderAllocatorOfMethod = this->GetLoaderAllocator();
+ if (pLoaderAllocatorOfMethod->IsCollectible())
+ methodRef->SetKeepAlive(pLoaderAllocatorOfMethod->GetExposedObject());
+
+ retVal = methodRef;
+ GCPROTECT_END();
+
+ return retVal;
+}
+#endif // !DACCESS_COMPILE && CROSSGEN_COMPILE
+
+#ifndef DACCESS_COMPILE
+typedef void (*WalkValueTypeParameterFnPtr)(Module *pModule, mdToken token, Module *pDefModule, mdToken tkDefToken, SigPointer *ptr, SigTypeContext *pTypeContext, void *pData);
+
+void MethodDesc::WalkValueTypeParameters(MethodTable *pMT, WalkValueTypeParameterFnPtr function, void *pData)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ ULONG numArgs = 0;
+ Module *pModule = this->GetModule();
+ SigPointer ptr = this->GetSigPointer();
+
+ // skip over calling convention.
+ ULONG callConv = 0;
+ IfFailThrowBF(ptr.GetCallingConvInfo(&callConv), BFA_BAD_SIGNATURE, pModule);
+
+ // If calling convention is generic, skip GenParamCount
+ if (callConv & IMAGE_CEE_CS_CALLCONV_GENERIC)
+ {
+ IfFailThrowBF(ptr.GetData(NULL), BFA_BAD_SIGNATURE, pModule);
+ }
+
+ IfFailThrowBF(ptr.GetData(&numArgs), BFA_BAD_SIGNATURE, pModule);
+
+ SigTypeContext typeContext(this, TypeHandle(pMT));
+
+ // iterate over the return type and parameters
+ for (DWORD j = 0; j <= numArgs; j++)
+ {
+ CorElementType type = ptr.PeekElemTypeClosed(pModule, &typeContext);
+ if (type != ELEMENT_TYPE_VALUETYPE)
+ goto moveToNextToken;
+
+ mdToken token;
+ Module *pTokenModule;
+ token = ptr.PeekValueTypeTokenClosed(pModule, &typeContext, &pTokenModule);
+
+ if (token == mdTokenNil)
+ goto moveToNextToken;
+
+ DWORD dwAttrType;
+ Module *pDefModule;
+ mdToken defToken;
+
+ dwAttrType = 0;
+ if (ClassLoader::ResolveTokenToTypeDefThrowing(pTokenModule, token, &pDefModule, &defToken))
+ {
+ if (function != NULL)
+ function(pModule, token, pDefModule, defToken, &ptr, &typeContext, pData);
+ }
+
+moveToNextToken:
+ // move to next argument token
+ IfFailThrowBF(ptr.SkipExactlyOne(), BFA_BAD_SIGNATURE, pModule);
+ }
+
+ if (!IsZapped() && !IsCompilationProcess() && !HaveValueTypeParametersBeenWalked())
+ {
+ SetValueTypeParametersWalked();
+ }
+}
+
+
+#ifdef FEATURE_TYPEEQUIVALENCE
+void CheckForEquivalenceAndLoadType(Module *pModule, mdToken token, Module *pDefModule, mdToken defToken, const SigParser *ptr, SigTypeContext *pTypeContext, void *pData)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ BOOL *pHasEquivalentParam = (BOOL *)pData;
+
+ if (IsTypeDefEquivalent(defToken, pDefModule))
+ {
+ *pHasEquivalentParam = TRUE;
+ SigPointer sigPtr(*ptr);
+ TypeHandle th = sigPtr.GetTypeHandleThrowing(pModule, pTypeContext);
+ }
+}
+
+BOOL MethodDesc::HasTypeEquivalentStructParameters()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ BOOL fHasTypeEquivalentStructParameters = FALSE;
+ if (DoesNotHaveEquivalentValuetypeParameters())
+ return FALSE;
+
+ WalkValueTypeParameters(this->GetMethodTable(), CheckForEquivalenceAndLoadType, &fHasTypeEquivalentStructParameters);
+
+ if (!fHasTypeEquivalentStructParameters && !IsZapped())
+ SetDoesNotHaveEquivalentValuetypeParameters();
+
+ return fHasTypeEquivalentStructParameters;
+}
+
+#endif // FEATURE_TYPEEQUIVALENCE
+
+PrecodeType MethodDesc::GetPrecodeType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ PrecodeType precodeType = PRECODE_INVALID;
+
+#ifdef HAS_REMOTING_PRECODE
+ if (IsRemotingInterceptedViaPrestub() || (IsComPlusCall() && !IsStatic()))
+ {
+ precodeType = PRECODE_REMOTING;
+ }
+ else
+#endif // HAS_REMOTING_PRECODE
+#ifdef HAS_FIXUP_PRECODE
+ if (!RequiresMethodDescCallingConvention())
+ {
+ // Use the more efficient fixup precode if possible
+ precodeType = PRECODE_FIXUP;
+ }
+ else
+#endif // HAS_FIXUP_PRECODE
+ {
+ precodeType = PRECODE_STUB;
+ }
+
+ return precodeType;
+}
+
+#endif // !DACCESS_COMPILE
+
+#ifdef FEATURE_COMINTEROP
+#ifndef DACCESS_COMPILE
+void ComPlusCallMethodDesc::InitRetThunk()
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef _TARGET_X86_
+ if (m_pComPlusCallInfo->m_pRetThunk != NULL)
+ return;
+
+ // Record the fact that we are writting into the ComPlusCallMethodDesc
+ g_IBCLogger.LogMethodDescAccess(this);
+
+ UINT numStackBytes = CbStackPop();
+
+ LPVOID pRetThunk = ComPlusCall::GetRetThunk(numStackBytes);
+
+ FastInterlockCompareExchangePointer<void *>(&m_pComPlusCallInfo->m_pRetThunk, pRetThunk, NULL);
+#endif // _TARGET_X86_
+}
+#endif //!DACCESS_COMPILE
+#endif // FEATURE_COMINTEROP
+
+#ifndef DACCESS_COMPILE
+void MethodDesc::PrepareForUseAsADependencyOfANativeImageWorker()
+{
+ STANDARD_VM_CONTRACT;
+
+ // This function ensures that a method is ready for use as a dependency of a native image
+ // The current requirement is only that valuetypes can be resolved to their type defs as much
+ // as is possible. (If the method is actually called, then this will not throw, but there
+ // are cases where we call this method and we are unaware if this method will actually be called
+ // or accessed as a native image dependency. This explains the contract (STANDARD_VM_CONTRACT)
+ // - This method should be callable only when general purpose VM code can be called
+ // , as well as the TRY/CATCH.
+ // - This function should not introduce failures
+
+ EX_TRY
+ {
+ WalkValueTypeParameters(this->GetMethodTable(), NULL, NULL);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+ _ASSERTE(IsZapped() || HaveValueTypeParametersBeenWalked());
+}
+#endif //!DACCESS_COMPILE
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif // _MSC_VER: warning C4244
diff --git a/src/vm/method.hpp b/src/vm/method.hpp
new file mode 100644
index 0000000000..f6ae19061e
--- /dev/null
+++ b/src/vm/method.hpp
@@ -0,0 +1,3775 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// method.hpp
+//
+
+//
+// See the book of the runtime entry for overall design:
+// file:../../doc/BookOfTheRuntime/ClassLoader/MethodDescDesign.doc
+//
+
+#ifndef _METHOD_H
+#define _METHOD_H
+
+#ifndef BINDER
+
+#include "cor.h"
+#include "util.hpp"
+#include "clsload.hpp"
+#include "codeman.h"
+#include "class.h"
+#include "siginfo.hpp"
+#include "declsec.h"
+#include "methodimpl.h"
+#include "typedesc.h"
+#include <stddef.h>
+#include "eeconfig.h"
+#include "precode.h"
+
+#ifndef FEATURE_PREJIT
+#include "fixuppointer.h"
+#endif
+#else // BINDER
+
+#include "fixuppointer.h"
+
+#define COMPLUSCALL_METHOD_DESC_ALIGNPAD_BYTES 3 // # bytes required to pad ComPlusCallMethodDesc to correct size
+
+#endif // BINDER
+
+class Stub;
+class FCallMethodDesc;
+class FieldDesc;
+class NDirect;
+class MethodDescChunk;
+struct LayoutRawFieldInfo;
+class InstantiatedMethodDesc;
+class DictionaryLayout;
+class Dictionary;
+class GCCoverageInfo;
+class DynamicMethodDesc;
+class ReJitManager;
+
+typedef DPTR(FCallMethodDesc) PTR_FCallMethodDesc;
+typedef DPTR(ArrayMethodDesc) PTR_ArrayMethodDesc;
+typedef DPTR(DynamicMethodDesc) PTR_DynamicMethodDesc;
+typedef DPTR(InstantiatedMethodDesc) PTR_InstantiatedMethodDesc;
+typedef DPTR(GCCoverageInfo) PTR_GCCoverageInfo; // see code:GCCoverageInfo::savedCode
+
+#ifdef FEATURE_MINIMETADATA_IN_TRIAGEDUMPS
+GVAL_DECL(DWORD, g_MiniMetaDataBuffMaxSize);
+GVAL_DECL(TADDR, g_MiniMetaDataBuffAddress);
+#endif // FEATURE_MINIMETADATA_IN_TRIAGEDUMPS
+
+EXTERN_C VOID STDCALL NDirectImportThunk();
+
+#define METHOD_TOKEN_REMAINDER_BIT_COUNT 14
+#define METHOD_TOKEN_REMAINDER_MASK ((1 << METHOD_TOKEN_REMAINDER_BIT_COUNT) - 1)
+#define METHOD_TOKEN_RANGE_BIT_COUNT (24 - METHOD_TOKEN_REMAINDER_BIT_COUNT)
+#define METHOD_TOKEN_RANGE_MASK ((1 << METHOD_TOKEN_RANGE_BIT_COUNT) - 1)
+
+//=============================================================
+// Splits methoddef token into two pieces for
+// storage inside a methoddesc.
+//=============================================================
+FORCEINLINE UINT16 GetTokenRange(mdToken tok)
+{
+ LIMITED_METHOD_CONTRACT;
+ return (UINT16)((tok>>METHOD_TOKEN_REMAINDER_BIT_COUNT) & METHOD_TOKEN_RANGE_MASK);
+}
+
+FORCEINLINE VOID SplitToken(mdToken tok, UINT16 *ptokrange, UINT16 *ptokremainder)
+{
+ LIMITED_METHOD_CONTRACT;
+ *ptokrange = (UINT16)((tok>>METHOD_TOKEN_REMAINDER_BIT_COUNT) & METHOD_TOKEN_RANGE_MASK);
+ *ptokremainder = (UINT16)(tok & METHOD_TOKEN_REMAINDER_MASK);
+}
+
+FORCEINLINE mdToken MergeToken(UINT16 tokrange, UINT16 tokremainder)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (tokrange << METHOD_TOKEN_REMAINDER_BIT_COUNT) | tokremainder | mdtMethodDef;
+}
+
+// The MethodDesc is a union of several types. The following
+// 3-bit field determines which type it is. Note that JIT'ed/non-JIT'ed
+// is not represented here because this isn't known until the
+// method is executed for the first time. Because any thread could
+// change this bit, it has to be done in a place where access is
+// synchronized.
+
+// **** NOTE: if you add any new flags, make sure you add them to ClearFlagsOnUpdate
+// so that when a method is replaced its relevant flags are updated
+
+// Used in MethodDesc
+enum MethodClassification
+{
+ mcIL = 0, // IL
+ mcFCall = 1, // FCall (also includes tlbimped ctor, Delegate ctor)
+ mcNDirect = 2, // N/Direct
+ mcEEImpl = 3, // special method; implementation provided by EE (like Delegate Invoke)
+ mcArray = 4, // Array ECall
+ mcInstantiated = 5, // Instantiated generic methods, including descriptors
+ // for both shared and unshared code (see InstantiatedMethodDesc)
+
+#ifdef FEATURE_COMINTEROP
+ // This needs a little explanation. There are MethodDescs on MethodTables
+ // which are Interfaces. These have the mdcInterface bit set. Then there
+ // are MethodDescs on MethodTables that are Classes, where the method is
+ // exposed through an interface. These do not have the mdcInterface bit set.
+ //
+ // So, today, a dispatch through an 'mdcInterface' MethodDesc is either an
+ // error (someone forgot to look up the method in a class' VTable) or it is
+ // a case of COM Interop.
+
+ mcComInterop = 6,
+#endif // FEATURE_COMINTEROP
+ mcDynamic = 7, // for method desc with no metadata behind
+ mcCount,
+};
+
+
+// All flags in the MethodDesc now reside in a single 16-bit field.
+
+enum MethodDescClassification
+{
+ // Method is IL, FCall etc., see MethodClassification above.
+ mdcClassification = 0x0007,
+ mdcClassificationCount = mdcClassification+1,
+
+ // Note that layout of code:MethodDesc::s_ClassificationSizeTable depends on the exact values
+ // of mdcHasNonVtableSlot and mdcMethodImpl
+
+ // Has local slot (vs. has real slot in MethodTable)
+ mdcHasNonVtableSlot = 0x0008,
+
+ // Method is a body for a method impl (MI_MethodDesc, MI_NDirectMethodDesc, etc)
+ // where the function explicitly implements IInterface.foo() instead of foo().
+ mdcMethodImpl = 0x0010,
+
+ // Method is static
+ mdcStatic = 0x0020,
+
+ // Temporary Security Interception.
+ // Methods can now be intercepted by security. An intercepted method behaves
+ // like it was an interpreted method. The Prestub at the top of the method desc
+ // is replaced by an interception stub. Therefore, no back patching will occur.
+ // We picked this approach to minimize the number variations given IL and native
+ // code with edit and continue. E&C will need to find the real intercepted method
+ // and if it is intercepted change the real stub. If E&C is enabled then there
+ // is no back patching and needs to fix the pre-stub.
+ mdcIntercepted = 0x0040,
+
+ // Method requires linktime security checks.
+ mdcRequiresLinktimeCheck = 0x0080,
+
+#if defined(CLR_STANDALONE_BINDER)
+ // Binder optimization - we have already parsed the signature
+ // of this method desc and it contains no user-defined value types (including enums)
+ mdcSignatureHasNoValueTypes = 0x0100,
+
+ // This should contain bits used for binder-internal purposes - reset these
+ // before persisting the method descs
+ mdcBinderBits = mdcSignatureHasNoValueTypes,
+#else
+ // Method requires inheritance security checks.
+ // If this bit is set, then this method demands inheritance permissions
+ // or a method that this method overrides demands inheritance permissions
+ // or both.
+ mdcRequiresInheritanceCheck = 0x0100,
+
+ // The method that this method overrides requires an inheritance security check.
+ // This bit is used as an optimization to avoid looking up overridden methods
+ // during the inheritance check.
+ mdcParentRequiresInheritanceCheck = 0x0200,
+#endif
+
+ // Duplicate method. When a method needs to be placed in multiple slots in the
+ // method table, because it could not be packed into one slot. For eg, a method
+ // providing implementation for two interfaces, MethodImpl, etc
+ mdcDuplicate = 0x0400,
+
+ // Has this method been verified?
+ mdcVerifiedState = 0x0800,
+
+ // Is the method verifiable? It needs to be verified first to determine this
+ mdcVerifiable = 0x1000,
+
+ // Is this method ineligible for inlining?
+ mdcNotInline = 0x2000,
+
+ // Is the method synchronized
+ mdcSynchronized = 0x4000,
+
+ // Does the method's slot number require all 16 bits
+ mdcRequiresFullSlotNumber = 0x8000
+};
+
+#define METHOD_MAX_RVA 0x7FFFFFFF
+
+
+// The size of this structure needs to be a multiple of MethodDesc::ALIGNMENT
+//
+// @GENERICS:
+// Method descriptors for methods belonging to instantiated types may be shared between compatible instantiations
+// Hence for reflection and elsewhere where exact types are important it's necessary to pair a method desc
+// with the exact owning type handle.
+//
+// See genmeth.cpp for details of instantiated generic method descriptors.
+//
+// A MethodDesc is the representation of a method of a type. These live in code:MethodDescChunk which in
+// turn lives in code:EEClass. They are conceptually cold (we do not expect to access them in normal
+// program exectution, but we often fall short of that goal.
+//
+// A Method desc knows how to get at its metadata token code:GetMemberDef, its chunk
+// code:MethodDescChunk, which in turns knows how to get at its type code:MethodTable.
+// It also knows how to get at its IL code (code:IMAGE_COR_ILMETHOD)
+class MethodDesc
+{
+ friend class EEClass;
+ friend class MethodTableBuilder;
+ friend class ArrayClass;
+ friend class NDirect;
+ friend class MethodDescChunk;
+ friend class InstantiatedMethodDesc;
+ friend class MethodImpl;
+ friend class CheckAsmOffsets;
+ friend class ClrDataAccess;
+
+ friend class MethodDescCallSite;
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+
+public:
+
+ enum
+ {
+#ifdef _WIN64
+ ALIGNMENT_SHIFT = 3,
+#else
+ ALIGNMENT_SHIFT = 2,
+#endif
+ ALIGNMENT = (1<<ALIGNMENT_SHIFT),
+ ALIGNMENT_MASK = (ALIGNMENT-1)
+ };
+
+#ifdef _DEBUG
+
+ // These are set only for MethodDescs but every time we want to use the debugger
+ // to examine these fields, the code has the thing stored in a MethodDesc*.
+ // So...
+ LPCUTF8 m_pszDebugMethodName;
+ LPCUTF8 m_pszDebugClassName;
+ LPCUTF8 m_pszDebugMethodSignature;
+ FixupPointer<PTR_MethodTable> m_pDebugMethodTable;
+
+ PTR_GCCoverageInfo m_GcCover;
+
+#endif // _DEBUG
+
+ inline BOOL HasStableEntryPoint()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (m_bFlags2 & enum_flag2_HasStableEntryPoint) != 0;
+ }
+
+ inline PCODE GetStableEntryPoint()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE(HasStableEntryPoint());
+ return GetMethodEntryPoint();
+ }
+
+ BOOL SetStableEntryPointInterlocked(PCODE addr);
+
+#ifdef FEATURE_INTERPRETER
+ BOOL SetEntryPointInterlocked(PCODE addr);
+#endif // FEATURE_INTERPRETER
+
+ BOOL HasTemporaryEntryPoint();
+ PCODE GetTemporaryEntryPoint();
+
+ void SetTemporaryEntryPoint(LoaderAllocator *pLoaderAllocator, AllocMemTracker *pamTracker);
+
+ inline BOOL HasPrecode()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (m_bFlags2 & enum_flag2_HasPrecode) != 0;
+ }
+
+#ifndef BINDER
+ inline Precode* GetPrecode()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ PRECONDITION(HasPrecode());
+ Precode* pPrecode = Precode::GetPrecodeFromEntryPoint(GetStableEntryPoint());
+ PREFIX_ASSUME(pPrecode != NULL);
+ return pPrecode;
+ }
+#endif // !BINDER
+
+ inline BOOL MayHavePrecode()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ return !MayHaveNativeCode() || IsRemotingInterceptedViaPrestub();
+ }
+
+#ifdef BINDER
+ inline void SetHasPrecode()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_bFlags2 |= (enum_flag2_HasPrecode | enum_flag2_HasStableEntryPoint);
+ }
+
+ inline void ResetHasPrecode()
+ {
+
+ LIMITED_METHOD_CONTRACT;
+ m_bFlags2 &= ~enum_flag2_HasPrecode;
+ m_bFlags2 |= enum_flag2_HasStableEntryPoint;
+ }
+#endif // BINDER
+
+ void InterlockedUpdateFlags2(BYTE bMask, BOOL fSet);
+
+ Precode* GetOrCreatePrecode();
+
+#ifdef FEATURE_PREJIT
+ Precode * GetSavedPrecode(DataImage *image);
+ Precode * GetSavedPrecodeOrNull(DataImage *image);
+#endif // FEATURE_PREJIT
+
+ // Given a code address return back the MethodDesc whenever possible
+ //
+ static MethodDesc * GetMethodDescFromStubAddr(PCODE addr, BOOL fSpeculative = FALSE);
+
+
+ DWORD GetAttrs() const;
+
+ DWORD GetImplAttrs();
+
+ // This function can lie if a method impl was used to implement
+ // more than one method on this class. Use GetName(int) to indicate
+ // which slot you are interested in.
+ // See the TypeString class for better control over name formatting.
+ LPCUTF8 GetName();
+
+ LPCUTF8 GetName(USHORT slot);
+
+ void PrecomputeNameHash();
+ BOOL MightHaveName(ULONG nameHashValue);
+
+#ifndef BINDER
+ FORCEINLINE LPCUTF8 GetNameOnNonArrayClass()
+ {
+ WRAPPER_NO_CONTRACT;
+ LPCSTR szName;
+ if (FAILED(GetMDImport()->GetNameOfMethodDef(GetMemberDef(), &szName)))
+ {
+ szName = NULL;
+ }
+ return szName;
+ }
+#endif // !BINDER
+
+ COUNT_T GetStableHash();
+
+ // Non-zero for InstantiatedMethodDescs
+ DWORD GetNumGenericMethodArgs();
+
+ // Return the number of class type parameters that are in scope for this method
+ DWORD GetNumGenericClassArgs()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return GetMethodTable()->GetNumGenericArgs();
+ }
+
+ // True if this is a method descriptor for an instantiated generic method
+ // whose method type arguments are the formal type parameters of the generic method
+ // NOTE: the declaring class may not be the generic type definition e.g. consider C<int>.m<T>
+ BOOL IsGenericMethodDefinition() const;
+
+ // True if the declaring type or instantiation of method (if any) contains formal generic type parameters
+ BOOL ContainsGenericVariables();
+
+ Module* GetDefiningModuleForOpenMethod();
+
+ // True if this is a class and method instantiation that on <__Canon,...,__Canon>
+ BOOL IsTypicalSharedInstantiation();
+
+
+ // True if and only if this is a method desriptor for :
+ // 1. a non-generic method or a generic method at its typical method instantiation
+ // 2. in a non-generic class or a typical instantiation of a generic class
+ // This method can be called on a non-restored method desc
+ BOOL IsTypicalMethodDefinition() const;
+
+ // Force a load of the (typical) constraints on the type parameters of a typical method definition,
+ // detecting cyclic bounds on class and method type parameters.
+ void LoadConstraintsForTypicalMethodDefinition(BOOL *pfHasCircularClassConstraints,
+ BOOL *pfHasCircularMethodConstraints,
+ ClassLoadLevel level = CLASS_LOADED);
+
+ DWORD IsClassConstructor()
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsMdClassConstructor(GetAttrs(), GetName());
+ }
+
+ DWORD IsClassConstructorOrCtor()
+ {
+ WRAPPER_NO_CONTRACT;
+ DWORD dwAttrs = GetAttrs();
+ if (IsMdRTSpecialName(dwAttrs))
+ {
+ LPCUTF8 name = GetName();
+ return IsMdInstanceInitializer(dwAttrs, name) || IsMdClassConstructor(dwAttrs, name);
+ }
+ return FALSE;
+ }
+
+ inline void SetHasMethodImplSlot()
+ {
+ m_wFlags |= mdcMethodImpl;
+ }
+
+ inline BOOL HasMethodImplSlot()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (mdcMethodImpl & m_wFlags);
+ }
+
+ FORCEINLINE BOOL IsMethodImpl()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+#ifndef BINDER
+ // Once we stop allocating dummy MethodImplSlot in MethodTableBuilder::WriteMethodImplData,
+ // the check for NULL will become unnecessary.
+ return HasMethodImplSlot() && (GetMethodImpl()->GetSlots() != NULL);
+#else // BINDER
+ return FALSE;
+#endif // BINDER
+ }
+
+ inline DWORD IsStatic()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // This bit caches the IsMdStatic(GetAttrs()) check. We used to assert it here, but not doing it anymore. GetAttrs()
+ // accesses metadata that is not compatible with contracts of this method. The metadata access can fail, the metadata
+ // are not available during shutdown, the metadata access can take locks. It is not worth it to code around all these
+ // just for the assert.
+ // _ASSERTE((((m_wFlags & mdcStatic) != 0) == (IsMdStatic(flags) != 0)));
+
+ return (m_wFlags & mdcStatic) != 0;
+ }
+
+ inline void SetStatic()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_wFlags |= mdcStatic;
+ }
+
+ inline void ClearStatic()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_wFlags &= ~mdcStatic;
+ }
+
+ inline BOOL IsIL()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return mcIL == GetClassification() || mcInstantiated == GetClassification();
+ }
+
+ //================================================================
+ // Generics-related predicates etc.
+
+ // True if the method descriptor is an instantiation of a generic method.
+ inline BOOL HasMethodInstantiation() const;
+
+ // True if the method descriptor is either an instantiation of
+ // a generic method or is an instance method in an instantiated class (or both).
+ BOOL HasClassOrMethodInstantiation()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (HasClassInstantiation() || HasMethodInstantiation());
+ }
+
+ BOOL HasClassOrMethodInstantiation_NoLogging() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (HasClassInstantiation_NoLogging() || HasMethodInstantiation());
+ }
+
+ inline BOOL HasClassInstantiation() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetMethodTable()->HasInstantiation();
+ }
+
+ inline BOOL HasClassInstantiation_NoLogging() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetMethodTable_NoLogging()->HasInstantiation();
+ }
+
+ // Return the instantiation for an instantiated generic method
+ // Return NULL if not an instantiated method
+ // To get the (representative) instantiation of the declaring class use GetMethodTable()->GetInstantiation()
+ // NOTE: This will assert if you try to get the instantiation of a generic method def in a non-typical class
+ // e.g. C<int>.m<U> will fail but C<T>.m<U> will succeed
+ Instantiation GetMethodInstantiation() const;
+
+ // As above, but will succeed on C<int>.m<U>
+ // To do this it might force a load of the typical parent
+ Instantiation LoadMethodInstantiation();
+
+ // Return a pointer to the method dictionary for an instantiated generic method
+ // The initial slots in a method dictionary are the type arguments themselves
+ // Return NULL if not an instantiated method
+ Dictionary* GetMethodDictionary();
+ DictionaryLayout* GetDictionaryLayout();
+
+ InstantiatedMethodDesc* AsInstantiatedMethodDesc() const;
+
+ BaseDomain *GetDomain();
+
+ ReJitManager * GetReJitManager();
+
+ PTR_LoaderAllocator GetLoaderAllocator();
+
+ // GetLoaderAllocatorForCode returns the allocator with the responsibility for allocation.
+ // This is called from GetMulticallableAddrOfCode when allocating a small trampoline stub for the method.
+ // Normally a method in a shared domain will allocate memory for stubs in the shared domain.
+ // That has to be different for DynamicMethod as they need to be allocated always in the AppDomain
+ // that created the method.
+ LoaderAllocator * GetLoaderAllocatorForCode();
+
+ // GetDomainSpecificLoaderAllocator returns the collectable loader allocator for collectable types
+ // and the loader allocator in the current domain for non-collectable types
+ LoaderAllocator * GetDomainSpecificLoaderAllocator();
+
+ inline BOOL IsDomainNeutral();
+
+#ifdef BINDER
+ MdilModule* GetLoaderModule();
+
+ MdilModule* GetZapModule();
+#else // !BINDER
+ Module* GetLoaderModule();
+
+ Module* GetZapModule();
+#endif
+
+ // Does this immediate item live in an NGEN module?
+ BOOL IsZapped();
+
+ // Strip off method and class instantiation if present and replace by the typical instantiation
+ // e.g. C<int>.m<string> -> C<T>.m<U>. Does not modify the MethodDesc, but returns
+ // the appropriate stripped MethodDesc.
+ // This is the identity function on non-instantiated method descs in non-instantiated classes
+ MethodDesc* LoadTypicalMethodDefinition();
+
+ // Strip off the method instantiation (if present) and replace by the typical instantiation
+ // e.g. // C<int>.m<string> -> C<int>.m<U>. Does not modify the MethodDesc, but returns
+ // the appropriate stripped MethodDesc.
+ // This is the identity function on non-instantiated method descs
+ MethodDesc* StripMethodInstantiation();
+
+ // Return the instantiation of a method's enclosing class
+ // Return NULL if the enclosing class is not instantiated
+ // If the method code is shared then this might be a *representative* instantiation
+ //
+ // See GetExactClassInstantiation if you need to get the exact
+ // instantiation of a shared method desc.
+ Instantiation GetClassInstantiation() const;
+
+ // Is the code shared between multiple instantiations of class or method?
+ // If so, then when compiling the code we might need to look up tokens
+ // in the class or method dictionary. Also, when debugging the exact generic arguments
+ // need to be ripped off the stack, either from the this pointer or from one of the
+ // extra args below.
+ BOOL IsSharedByGenericInstantiations(); // shared code of any kind
+
+ BOOL IsSharedByGenericMethodInstantiations(); // shared due to method instantiation
+
+ // How does a method shared between generic instantiations get at
+ // the extra instantiation information at runtime? Only one of the following three
+ // will ever hold:
+ //
+ // AcquiresInstMethodTableFromThis()
+ // The method is in a generic class but is not itself a
+ // generic method (the normal case). Furthermore a "this" pointer
+ // is available and we can get the exact instantiation from it.
+ //
+ // RequiresInstMethodTableArg()
+ // The method is shared between generic classes but is not
+ // itself generic. Furthermore no "this" pointer is given
+ // (e.g. a value type method), so we pass in the exact-instantiation
+ // method table as an extra argument.
+ // i.e. per-inst static methods in shared-code instantiated generic
+ // classes (e.g. static void MyClass<string>::m())
+ // i.e. shared-code instance methods in instantiated generic
+ // structs (e.g. void MyValueType<string>::m())
+ //
+ // RequiresInstMethodDescArg()
+ // The method is itself generic and is shared between generic
+ // instantiations but is not itself generic. Furthermore
+ // no "this" pointer is given (e.g. a value type method), so we pass in the
+ // exact-instantiation method table as an extra argument.
+ // i.e. shared-code instantiated generic methods
+ //
+ // These are used for direct calls to instantiated generic methods
+ // e.g. call void C::m<string>() implemented by calculating dict(m<string>) at compile-time and passing it as an extra parameter
+ // call void C::m<!0>() implemented by calculating dict(m<!0>) at run-time (if the caller lives in shared-class code)
+
+ BOOL AcquiresInstMethodTableFromThis();
+ BOOL RequiresInstMethodTableArg();
+ BOOL RequiresInstMethodDescArg();
+ BOOL RequiresInstArg();
+
+ // Can this method handle be given out to reflection for use in a MethodInfo
+ // object?
+ BOOL IsRuntimeMethodHandle();
+
+ // Given a method table of an object and a method that comes from some
+ // superclass of the class of that object, find that superclass.
+ MethodTable * GetExactDeclaringType(MethodTable * ownerOrSubType);
+
+ // Given a type handle of an object and a method that comes from some
+ // superclass of the class of that object, find the instantiation of
+ // that superclass, i.e. the class instantiation which will be relevant
+ // to interpreting the signature of the method. The type handle of
+ // the object does not need to be given in all circumstances, in
+ // particular it is only needed for MethodDescs pMD that
+ // return true for pMD->RequiresInstMethodTableArg() or
+ // pMD->RequiresInstMethodDescArg(). In other cases it is
+ // allowed to be null.
+ //
+ // Will return NULL if the method is not in a generic class.
+ Instantiation GetExactClassInstantiation(TypeHandle possibleObjType);
+
+
+ BOOL SatisfiesMethodConstraints(TypeHandle thParent, BOOL fThrowIfNotSatisfied = FALSE);
+
+
+ BOOL HasSameMethodDefAs(MethodDesc * pMD);
+
+ //================================================================
+ // Classifications of kinds of MethodDescs.
+
+ inline BOOL IsRuntimeSupplied()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return mcFCall == GetClassification()
+ || mcArray == GetClassification();
+ }
+
+
+ inline DWORD IsArray() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return mcArray == GetClassification();
+ }
+
+ inline DWORD IsEEImpl() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return mcEEImpl == GetClassification();
+ }
+
+ inline DWORD IsNoMetadata() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (mcDynamic == GetClassification());
+ }
+
+ inline PTR_DynamicMethodDesc AsDynamicMethodDesc();
+ inline bool IsDynamicMethod();
+ inline bool IsILStub();
+ inline bool IsLCGMethod();
+
+ inline DWORD IsNDirect()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return mcNDirect == GetClassification();
+ }
+
+ inline DWORD IsInterface()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetMethodTable()->IsInterface();
+ }
+
+ void ComputeSuppressUnmanagedCodeAccessAttr(IMDInternalImport *pImport);
+ BOOL HasSuppressUnmanagedCodeAccessAttr();
+
+#ifdef FEATURE_COMINTEROP
+ inline DWORD IsComPlusCall()
+ {
+ WRAPPER_NO_CONTRACT;
+ return mcComInterop == GetClassification();
+ }
+ inline DWORD IsGenericComPlusCall();
+ inline void SetupGenericComPlusCall();
+#else // !FEATURE_COMINTEROP
+ // hardcoded to return FALSE to improve code readibility
+ inline DWORD IsComPlusCall()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+ }
+ inline DWORD IsGenericComPlusCall()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+ }
+#endif // !FEATURE_COMINTEROP
+
+ // Update flags in a thread safe manner.
+ WORD InterlockedUpdateFlags(WORD wMask, BOOL fSet);
+
+ inline DWORD IsInterceptedForDeclSecurity()
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return m_wFlags & mdcIntercepted;
+ }
+
+ inline void SetInterceptedForDeclSecurity()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_wFlags |= mdcIntercepted;
+ }
+
+ inline DWORD IsInterceptedForDeclSecurityCASDemandsOnly()
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return m_bFlags2 & enum_flag2_CASDemandsOnly;
+ }
+
+ inline void SetInterceptedForDeclSecurityCASDemandsOnly()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_bFlags2 |= enum_flag2_CASDemandsOnly;
+ }
+
+#ifndef BINDER
+ // If the method is in an Edit and Contine (EnC) module, then
+ // we DON'T want to backpatch this, ever. We MUST always call
+ // through the precode so that we can update the method.
+ inline DWORD IsEnCMethod()
+ {
+ WRAPPER_NO_CONTRACT;
+ Module *pModule = GetModule();
+ PREFIX_ASSUME(pModule != NULL);
+ return pModule->IsEditAndContinueEnabled();
+ }
+#endif // !BINDER
+
+ inline BOOL IsNotInline()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_wFlags & mdcNotInline);
+ }
+
+ inline void SetNotInline(BOOL set)
+ {
+ WRAPPER_NO_CONTRACT;
+ InterlockedUpdateFlags(mdcNotInline, set);
+ }
+
+
+ BOOL IsIntrospectionOnly();
+#ifndef DACCESS_COMPILE
+ VOID EnsureActive();
+#endif
+ CHECK CheckActivated();
+
+
+ //================================================================
+ // REMOTING
+ //
+ // IsRemoting...: These predicates indicate how are remoting
+ // intercepts are implemented.
+ //
+ // Remoting intercepts are required for all invocations of methods on
+ // MarshalByRef classes (including virtual calls on methods
+ // which end up invoking a method on the MarshalByRef class).
+ //
+ // Remoting intercepts are implemented by one of the following techniques:
+ // (1) Non-virtual methods: inserting a stub in DoPrestub (for non-virtual calls)
+ // See: IsRemotingInterceptedViaPrestub
+ //
+ // (2) Virtual methods: by transparent proxy vtables, where all the entries in the vtable
+ // are a special hook which traps into the remoting logic
+ // See: IsRemotingInterceptedViaVirtualDispatch (context indicates
+ // if it is a virtual call)
+ //
+ // (3) Non-virtual-calls on virtual methods:
+ // by forcing calls to be indirect and wrapping the
+ // call with a stub returned by GetNonVirtualEntryPointForVirtualMethod.
+ // (this is used when invoking virtual methods non-virtually using 'call')
+ // See: IsRemotingInterceptedViaVirtualDispatch (context indicates
+ // if it is a virtual call)
+ //
+ // Ultimately essentially all calls go through CTPMethodTable::OnCall in
+ // remoting.cpp.
+ //
+ // Check if this methoddesc needs to be intercepted
+ // by the context code, using a stub.
+ // Also see IsRemotingInterceptedViaVirtualDispatch()
+ BOOL IsRemotingInterceptedViaPrestub();
+
+ // Check if is intercepted by the context code, using the virtual table
+ // of TransparentProxy.
+ // If such a function is called non-virtually, it needs to be handled specially
+ BOOL IsRemotingInterceptedViaVirtualDispatch();
+
+ BOOL MayBeRemotingIntercepted();
+
+#ifndef BINDER
+ //================================================================
+ // Does it represent a one way method call with no out/return parameters?
+#ifdef FEATURE_REMOTING
+ inline BOOL IsOneWay()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ return (S_OK == GetMDImport()->GetCustomAttributeByName(GetMemberDef(),
+ "System.Runtime.Remoting.Messaging.OneWayAttribute",
+ NULL,
+ NULL));
+
+ }
+#endif // FEATURE_REMOTING
+#endif
+
+ //================================================================
+ // FCalls.
+ BOOL IsFCall()
+ {
+ WRAPPER_NO_CONTRACT;
+ return mcFCall == GetClassification();
+ }
+
+ BOOL IsFCallOrIntrinsic();
+
+ BOOL IsQCall();
+
+ //================================================================
+ // Has the method been verified?
+ // This does not mean that the IL is verifiable, just that we have
+ // determined if the IL is verfiable or unverifiable.
+ // (Is this is dead code since the JIT now does verification?)
+
+ inline BOOL IsVerified()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_wFlags & mdcVerifiedState;
+ }
+
+ inline void SetIsVerified(BOOL isVerifiable)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ WORD flags = isVerifiable ? (WORD(mdcVerifiedState) | WORD(mdcVerifiable))
+ : (WORD(mdcVerifiedState));
+ InterlockedUpdateFlags(flags, TRUE);
+ }
+
+ inline void ResetIsVerified()
+ {
+ WRAPPER_NO_CONTRACT;
+ InterlockedUpdateFlags(mdcVerifiedState | mdcVerifiable, FALSE);
+ }
+
+ BOOL IsVerifiable();
+
+ // fThrowException is used to prevent Verifier from
+ // throwin an exception on error
+ // fForceVerify is to be used by tools that need to
+ // force verifier to verify code even if the code is fully trusted.
+ HRESULT Verify(COR_ILMETHOD_DECODER* ILHeader,
+ BOOL fThrowException,
+ BOOL fForceVerify);
+
+
+ //================================================================
+ //
+
+ inline void ClearFlagsOnUpdate()
+ {
+ WRAPPER_NO_CONTRACT;
+ ResetIsVerified();
+ SetNotInline(FALSE);
+ }
+
+ // Restore the MethodDesc to it's initial, pristine state, so that
+ // it can be reused for new code (eg. for EnC, method rental, etc.)
+ //
+ // Things to think about before calling this:
+ //
+ // Does the caller need to free up the jitted code for the old IL
+ // (including any other IJitManager datastructures) ?
+ // Does the caller guarantee thread-safety ?
+ //
+ void Reset();
+
+ //================================================================
+ // About the signature.
+
+ BOOL IsVarArg();
+ BOOL IsVoid();
+ BOOL HasRetBuffArg();
+
+ // Returns the # of bytes of stack used by arguments. Does not include
+ // arguments passed in registers.
+ UINT SizeOfArgStack();
+
+ // Returns the # of bytes to pop after a call. Not necessary the
+ // same as SizeOfArgStack()!
+ UINT CbStackPop();
+
+ //================================================================
+ // Unboxing stubs.
+ //
+ // Return TRUE if this is this a special stub used to implement delegates to an
+ // instance method in a value class and/or virtual methods on a value class.
+ //
+ // For every BoxedEntryPointStub there is associated unboxed-this-MethodDesc
+ // which accepts an unboxed "this" pointer.
+ //
+ // The action of a typical BoxedEntryPointStub is to
+ // bump up the this pointer by one word so that it points to the interior of the object
+ // and then call the underlying unboxed-this-MethodDesc.
+ //
+ // Additionally, if the non-BoxedEntryPointStub is RequiresInstMethodTableArg()
+ // then pass on the MethodTable as an extra argument to the
+ // underlying unboxed-this-MethodDesc.
+ BOOL IsUnboxingStub()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (m_bFlags2 & enum_flag2_IsUnboxingStub) != 0;
+ }
+
+ void SetIsUnboxingStub()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_bFlags2 |= enum_flag2_IsUnboxingStub;
+ }
+
+
+ //================================================================
+ // Instantiating Stubs
+ //
+ // Return TRUE if this is this a special stub used to implement an
+ // instantiated generic method or per-instantiation static method.
+ // The action of an instantiating stub is
+ // * pass on a MethodTable or InstantiatedMethodDesc extra argument to shared code
+ BOOL IsInstantiatingStub();
+
+
+ // A wrapper stub is either an unboxing stub or an instantiating stub
+ BOOL IsWrapperStub();
+ MethodDesc *GetWrappedMethodDesc();
+ MethodDesc *GetExistingWrappedMethodDesc();
+
+#ifndef BINDER
+
+ //==================================================================
+ // Access the underlying metadata
+
+ BOOL HasILHeader()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ return IsIL() && !IsUnboxingStub() && GetRVA();
+ }
+
+ COR_ILMETHOD* GetILHeader(BOOL fAllowOverrides = FALSE);
+#endif // !BINDER
+
+ BOOL HasStoredSig()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return IsEEImpl() || IsArray() || IsNoMetadata();
+ }
+
+ PCCOR_SIGNATURE GetSig();
+
+ void GetSig(PCCOR_SIGNATURE *ppSig, DWORD *pcSig);
+ SigParser GetSigParser();
+#ifndef BINDER
+
+
+ // Convenience methods for common signature wrapper types.
+ SigPointer GetSigPointer();
+ Signature GetSignature();
+
+
+ void GetSigFromMetadata(IMDInternalImport * importer,
+ PCCOR_SIGNATURE * ppSig,
+ DWORD * pcSig);
+
+
+ IMDInternalImport* GetMDImport() const
+ {
+ WRAPPER_NO_CONTRACT;
+ Module *pModule = GetModule();
+ PREFIX_ASSUME(pModule != NULL);
+ return pModule->GetMDImport();
+ }
+
+#ifndef DACCESS_COMPILE
+ IMetaDataEmit* GetEmitter()
+ {
+ WRAPPER_NO_CONTRACT;
+ Module *pModule = GetModule();
+ PREFIX_ASSUME(pModule != NULL);
+ return pModule->GetEmitter();
+ }
+
+ IMetaDataImport* GetRWImporter()
+ {
+ WRAPPER_NO_CONTRACT;
+ Module *pModule = GetModule();
+ PREFIX_ASSUME(pModule != NULL);
+ return pModule->GetRWImporter();
+ }
+#endif // !DACCESS_COMPILE
+#endif // !BINDER
+
+#ifdef FEATURE_COMINTEROP
+ WORD GetComSlot();
+ LONG GetComDispid();
+#endif // FEATURE_COMINTEROP
+
+ inline DWORD IsCtor()
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsMdInstanceInitializer(GetAttrs(), GetName());
+ }
+
+ inline DWORD IsFinal()
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsMdFinal(GetAttrs());
+ }
+
+ inline DWORD IsPrivate()
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsMdPrivate(GetAttrs());
+ }
+
+ inline DWORD IsPublic() const
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsMdPublic(GetAttrs());
+ }
+
+ inline DWORD IsProtected() const
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsMdFamily(GetAttrs());
+ }
+
+ inline DWORD IsVirtual()
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsMdVirtual(GetAttrs());
+ }
+
+ inline DWORD IsAbstract()
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsMdAbstract(GetAttrs());
+ }
+
+ //==================================================================
+ // Flags..
+
+ inline void SetSynchronized()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_wFlags |= mdcSynchronized;
+ }
+
+ inline DWORD IsSynchronized()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (m_wFlags & mdcSynchronized) != 0;
+ }
+
+ // Be careful about races with profiler when using this method. The profiler can
+ // replace preimplemented code of the method with jitted code.
+ // Avoid code patterns like if(IsPreImplemented()) { PCODE pCode = GetPreImplementedCode(); ... }.
+ // Use PCODE pCode = GetPreImplementedCode(); if (pCode != NULL) { ... } instead.
+ BOOL IsPreImplemented()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return GetPreImplementedCode() != NULL;
+ }
+
+ //==================================================================
+ // The MethodDesc in relation to the VTable it is associated with.
+ // WARNING: Not all MethodDescs have slots, nor do they all have
+ // entries in MethodTables. Beware.
+
+ // Does the method has virtual slot? Note that methods implementing interfaces
+ // on value types do not have virtual slots, but they are marked as virtual in metadata.
+ inline BOOL IsVtableMethod()
+ {
+ LIMITED_METHOD_CONTRACT;
+ MethodTable *pMT = GetMethodTable();
+ g_IBCLogger.LogMethodTableAccess(pMT);
+ return
+ !IsEnCAddedMethod()
+ // The slot numbers are currently meaningless for
+ // some unboxed-this-generic-method-instantiations
+ && !(pMT->IsValueType() && !IsStatic() && !IsUnboxingStub())
+ && GetSlot() < pMT->GetNumVirtuals();
+ }
+
+ inline BOOL HasNonVtableSlot();
+
+ void SetHasNonVtableSlot()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_wFlags |= mdcHasNonVtableSlot;
+ }
+
+ // duplicate methods
+ inline BOOL IsDuplicate()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_wFlags & mdcDuplicate) == mdcDuplicate;
+ }
+
+ void SetDuplicate()
+ {
+ LIMITED_METHOD_CONTRACT;
+ // method table is not setup yet
+ //_ASSERTE(!GetClass()->IsInterface());
+ m_wFlags |= mdcDuplicate;
+ }
+
+ //==================================================================
+ // EnC
+
+ inline BOOL IsEnCAddedMethod();
+
+ //==================================================================
+ //
+
+ inline EEClass* GetClass()
+ {
+ WRAPPER_NO_CONTRACT;
+ MethodTable *pMT = GetMethodTable_NoLogging();
+ g_IBCLogger.LogEEClassAndMethodTableAccess(pMT);
+ EEClass *pClass = pMT->GetClass_NoLogging();
+ PREFIX_ASSUME(pClass != NULL);
+ return pClass;
+ }
+
+ inline PTR_MethodTable GetMethodTable() const;
+ inline PTR_MethodTable GetMethodTable_NoLogging() const;
+
+ inline DPTR(RelativeFixupPointer<PTR_MethodTable>) GetMethodTablePtr() const;
+
+ public:
+ inline MethodDescChunk* GetMethodDescChunk() const;
+ inline int GetMethodDescIndex() const;
+ // If this is an method desc. (whether non-generic shared-instantiated or exact-instantiated)
+ // inside a shared class then get the method table for the representative
+ // class.
+ inline MethodTable* GetCanonicalMethodTable();
+
+#ifdef BINDER
+ MdilModule *GetModule();
+#else
+ Module *GetModule() const;
+ Module *GetModule_NoLogging() const;
+
+ Assembly *GetAssembly() const
+ {
+ WRAPPER_NO_CONTRACT;
+ Module *pModule = GetModule();
+ PREFIX_ASSUME(pModule != NULL);
+ return pModule->GetAssembly();
+ }
+#endif // !BINDER
+
+ //==================================================================
+ // The slot number of this method in the corresponding method table.
+ //
+ // Use with extreme caution. The slot number will not be
+ // valid for EnC code or for MethodDescs representing instantiation
+ // of generic methods. It may also not mean what you think it will mean
+ // for strange method descs such as BoxedEntryPointStubs.
+ //
+ // In any case we should be moving to use slot numbers a lot less
+ // since they make the EE code inflexible.
+
+ inline WORD GetSlot()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+#ifndef DACCESS_COMPILE
+ // The DAC build uses this method to test for "sanity" of a MethodDesc, and
+ // doesn't need the assert.
+ _ASSERTE(! IsEnCAddedMethod() || !"Cannot get slot for method added via EnC");
+#endif // !DACCESS_COMPILE
+
+ // Check if this MD is using the packed slot layout
+ if (!RequiresFullSlotNumber())
+ {
+ return (m_wSlotNumber & enum_packedSlotLayout_SlotMask);
+ }
+
+ return m_wSlotNumber;
+ }
+
+ inline VOID SetSlot(WORD wSlotNum)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // Check if we have to avoid using the packed slot layout
+ if (wSlotNum > enum_packedSlotLayout_SlotMask)
+ {
+ SetRequiresFullSlotNumber();
+ }
+#ifdef CLR_STANDALONE_BINDER
+ else if (RequiresFullSlotNumber())
+ {
+ ClearRequiresFullSlotNumber();
+ m_wSlotNumber = 0;
+ }
+#endif
+
+ // Set only the portion of m_wSlotNumber we are using
+ if (!RequiresFullSlotNumber())
+ {
+ m_wSlotNumber &= ~enum_packedSlotLayout_SlotMask;
+ m_wSlotNumber |= wSlotNum;
+ }
+ else
+ {
+ m_wSlotNumber = wSlotNum;
+ }
+ }
+
+ PTR_PCODE GetAddrOfSlot();
+
+ PTR_MethodDesc GetDeclMethodDesc(UINT32 slotNumber);
+
+protected:
+ inline void SetRequiresFullSlotNumber()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_wFlags |= mdcRequiresFullSlotNumber;
+ }
+
+#ifdef CLR_STANDALONE_BINDER
+ inline void ClearRequiresFullSlotNumber()
+ {
+
+ LIMITED_METHOD_CONTRACT;
+ m_wFlags &= ~mdcRequiresFullSlotNumber;
+ }
+#endif
+
+ inline DWORD RequiresFullSlotNumber()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (m_wFlags & mdcRequiresFullSlotNumber) != 0;
+ }
+
+public:
+ //==================================================================
+ // Security...
+
+ DWORD GetSecurityFlagsDuringPreStub();
+ DWORD GetSecurityFlagsDuringClassLoad(IMDInternalImport *pInternalImport,
+ mdToken tkMethod, mdToken tkClass,
+ DWORD *dwClassDeclFlags, DWORD *dwClassNullDeclFlags,
+ DWORD *dwMethDeclFlags, DWORD *dwMethNullDeclFlags);
+
+ inline DWORD RequiresLinktimeCheck()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_wFlags & mdcRequiresLinktimeCheck;
+ }
+
+#if defined(CLR_STANDALONE_BINDER)
+ inline BOOL SignatureHasNoValueTypes()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_wFlags & mdcSignatureHasNoValueTypes;
+ }
+
+ inline void SetSignatureHasNoValueTypes()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_wFlags |= mdcSignatureHasNoValueTypes;
+ }
+
+ // Clear bits used for binder-internal purposes
+ inline void ClearBinderBits()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_wFlags &= ~mdcBinderBits;
+ }
+#else
+ inline DWORD RequiresInheritanceCheck()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_wFlags & mdcRequiresInheritanceCheck;
+ }
+
+ inline DWORD ParentRequiresInheritanceCheck()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_wFlags & mdcParentRequiresInheritanceCheck;
+ }
+#endif
+
+ void SetRequiresLinktimeCheck()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_wFlags |= mdcRequiresLinktimeCheck;
+ }
+
+#ifndef BINDER
+ void SetRequiresInheritanceCheck()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_wFlags |= mdcRequiresInheritanceCheck;
+ }
+
+ void SetParentRequiresInheritanceCheck()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_wFlags |= mdcParentRequiresInheritanceCheck;
+ }
+#endif
+
+ mdMethodDef GetMemberDef() const;
+ mdMethodDef GetMemberDef_NoLogging() const;
+
+#ifdef _DEBUG
+ BOOL SanityCheck();
+#endif // _DEBUG
+
+public:
+
+ void SetMemberDef(mdMethodDef mb);
+
+ //================================================================
+ // Set the offset of this method desc in a chunk table (which allows us
+ // to work back to the method table/module pointer stored at the head of
+ // the table.
+ void SetChunkIndex(MethodDescChunk *pChunk);
+
+ BOOL IsPointingToPrestub();
+#ifdef FEATURE_INTERPRETER
+ BOOL IsReallyPointingToPrestub();
+#endif // FEATURE_INTERPRETER
+
+public:
+
+ // Note: We are skipping the prestub based on addition information from the JIT.
+ // (e.g. that the call is on same this ptr or that the this ptr is not null).
+ // Thus we can end up with a running NGENed method for which IsPointingToNativeCode is false!
+ BOOL IsPointingToNativeCode()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (!HasStableEntryPoint())
+ return FALSE;
+
+ if (!HasPrecode())
+ return TRUE;
+
+#ifdef BINDER
+ return TRUE;
+#else // !BINDER
+ return GetPrecode()->IsPointingToNativeCode(GetNativeCode());
+#endif // BINDER
+ }
+
+ // Be careful about races with profiler when using this method. The profiler can
+ // replace preimplemented code of the method with jitted code.
+ // Avoid code patterns like if(HasNativeCode()) { PCODE pCode = GetNativeCode(); ... }.
+ // Use PCODE pCode = GetNativeCode(); if (pCode != NULL) { ... } instead.
+ BOOL HasNativeCode()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return GetNativeCode() != NULL;
+ }
+
+#ifdef FEATURE_INTERPRETER
+ BOOL SetNativeCodeInterlocked(PCODE addr, PCODE pExpected, BOOL fStable);
+#else // FEATURE_INTERPRETER
+ BOOL SetNativeCodeInterlocked(PCODE addr, PCODE pExpected = NULL);
+#endif // FEATURE_INTERPRETER
+
+ TADDR GetAddrOfNativeCodeSlot();
+
+ BOOL MayHaveNativeCode();
+
+ ULONG GetRVA();
+
+ BOOL IsClassConstructorTriggeredViaPrestub();
+
+public:
+
+ // Returns preimplemented code of the method if method has one.
+ // Returns NULL if method has no preimplemented code.
+ // Be careful about races with profiler when using this method. The profiler can
+ // replace preimplemented code of the method with jitted code.
+ PCODE GetPreImplementedCode();
+
+ // Returns address of code to call. The address is good for one immediate invocation only.
+ // Use GetMultiCallableAddrOfCode() to get address that can be invoked multiple times.
+ //
+ // Only call GetSingleCallableAddrOfCode() if you can guarantee that no virtualization is
+ // necessary, or if you can guarantee that it has already happened. For instance, the frame of a
+ // stackwalk has obviously been virtualized as much as it will be.
+ //
+ PCODE GetSingleCallableAddrOfCode()
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(!IsGenericMethodDefinition());
+ return GetMethodEntryPoint();
+ }
+
+ // This one is used to implement "ldftn".
+ PCODE GetMultiCallableAddrOfCode(CORINFO_ACCESS_FLAGS accessFlags = CORINFO_ACCESS_LDFTN);
+
+ // Internal version of GetMultiCallableAddrOfCode. Returns NULL if attempt to acquire directly
+ // callable entrypoint would result into unnecesary allocation of indirection stub. Caller should use
+ // indirect call via slot in this case.
+ PCODE TryGetMultiCallableAddrOfCode(CORINFO_ACCESS_FLAGS accessFlags);
+
+ // These return an address after resolving "virtual methods" correctly, including any
+ // handling of context proxies, other thunking layers and also including
+ // instantiation of generic virtual methods if required.
+ // The first one returns an address which cannot be invoked
+ // multiple times. Use GetMultiCallableAddrOfVirtualizedCode() for that.
+ //
+ // The code that implements these was taken verbatim from elsewhere in the
+ // codebase, and there may be subtle differences between the two, e.g. with
+ // regard to thunking.
+ PCODE GetSingleCallableAddrOfVirtualizedCode(OBJECTREF *orThis, TypeHandle staticTH);
+ PCODE GetMultiCallableAddrOfVirtualizedCode(OBJECTREF *orThis, TypeHandle staticTH);
+
+ // The current method entrypoint. It is simply the value of the current method slot.
+ // GetMethodEntryPoint() should be used to get an opaque method entrypoint, for instance
+ // when copying or searching vtables. It should not be used to get address to call.
+ //
+ // GetSingleCallableAddrOfCode() and GetStableEntryPoint() are aliases with stricter preconditions.
+ // Use of these aliases is as appropriate.
+ //
+ PCODE GetMethodEntryPoint();
+
+ //*******************************************************************************
+ // Returns the address of the native code. The native code can be one of:
+ // - jitted code if !IsPreImplemented()
+ // - ngened code if IsPreImplemented()
+ PCODE GetNativeCode();
+
+ //================================================================
+ // FindOrCreateAssociatedMethodDesc
+ //
+ // You might think that every MethodDef in the metadata had
+ // one and only one MethodDesc in the source... Well, how wrong
+ // you are :-)
+ //
+ // Some MethodDefs can be associated with more than one MethodDesc.
+ // This can happen because:
+ // (1) The method is an instance method in a struct, which
+ // can be called with either an unboxed "this" pointer or
+ // a "boxed" this pointer.. There is a different MethodDesc for
+ // these two cases.
+ // (2) The method is a generic method. There is one primary
+ // MethodDesc for each generic method, called the GenericMethodDefinition.
+ // This is the one stored in the vtable. New MethodDescs will
+ // be created for instantiations according to the scheme described
+ // elsewhere in this file.
+ // There are also various other stubs associated with MethodDesc, but these stubs
+ // do not result in new MethodDescs.
+ //
+ // All of the above MethodDescs are called "associates" of the primary MethodDesc.
+ // Note that the primary MethodDesc for an instance method on a struct is
+ // the one that accepts an unboxed "this" pointer.
+ //
+ // FindOrCreateAssociatedMethodDesc is the _primary_ routine
+ // in the codebase for getting an associated MethodDesc from a primary MethodDesc.
+ // You should treat this routine as a black box, i.e. just specify the right
+ // parameters and it will do all the hard work of finding the right
+ // MethodDesc for you.
+ //
+ // This routine can be used for "normal" MethodDescs that have nothing
+ // to do with generics. For example, if you need an BoxedEntryPointStub then
+ // you may call this routine to get it. It may also return
+ // the Primary MethodDesc itself if that MethodDesc is suitable given the
+ // parameters.
+ //
+ // NOTE: The behaviour of this method is not thoroughly defined
+ // if pPrimaryMD is not really a "primary" MD. Primary MDs are:
+ // 1. Primary MDs are:never a generic method instantiation,
+ // but are instead the "uninstantiated" generic MD.
+ // 2. Primary MDs are never instantiating stubs.
+ // 3. Primary MDs are never BoxedEntryPointStubs.
+ //
+ // We assert if cases (1) or (2) occur. However, some places in the
+ // code pass in an BoxedEntryPointStub when pPrimaryMD is a virtual/interface method on
+ // a struct. These cases are confusing and should be rooted
+ // out: it is probably preferable in terms
+ // of correctness to pass in the the corresponding non-unboxing MD.
+ //
+ // allowCreate may be set to FALSE to enforce that the method searched
+ // should already be in existence - thus preventing creation and GCs during
+ // inappropriate times.
+ //
+ static MethodDesc* FindOrCreateAssociatedMethodDesc(MethodDesc* pPrimaryMD,
+ MethodTable *pExactMT,
+ BOOL forceBoxedEntryPoint,
+ Instantiation methodInst,
+ BOOL allowInstParam,
+ BOOL forceRemotableMethod = FALSE,
+ BOOL allowCreate = TRUE,
+ ClassLoadLevel level = CLASS_LOADED);
+
+ // Normalize methoddesc for reflection
+ static MethodDesc* FindOrCreateAssociatedMethodDescForReflection(MethodDesc *pMethod,
+ TypeHandle instType,
+ Instantiation methodInst);
+
+ // True if a MD is an funny BoxedEntryPointStub (not from the method table) or
+ // an MD for a generic instantiation...In other words the MethodDescs and the
+ // MethodTable are guaranteed to be "tightly-knit", i.e. if one is present in
+ // an NGEN image then then other will be, and if one is "used" at runtime then
+ // the other will be too.
+ BOOL IsTightlyBoundToMethodTable();
+
+ // For method descriptors which are non-generic this is the identity function
+ // (except it returns the primary descriptor, not an BoxedEntryPointStub).
+ //
+ // For a generic method definition C<T>.m<U> this will return
+ // C<__Canon>.m<__Canon>
+ //
+ // allowCreate may be set to FALSE to enforce that the method searched
+ // should already be in existence - thus preventing creation and GCs during
+ // inappropriate times.
+ //
+ MethodDesc * FindOrCreateTypicalSharedInstantiation(BOOL allowCreate = TRUE);
+
+ // Given an object and an method descriptor for an instantiation of
+ // a virtualized generic method, get the
+ // corresponding instantiation of the target of a call.
+ MethodDesc *ResolveGenericVirtualMethod(OBJECTREF *orThis);
+
+
+public:
+
+ // does this function return an object reference?
+ MetaSig::RETURNTYPE ReturnsObject(
+#ifdef _DEBUG
+ bool supportStringConstructors = false
+#endif
+ );
+
+
+ void Destruct();
+
+public:
+ // In general you don't want to call GetCallTarget - you want to
+ // use either "call" directly or call MethodDesc::GetSingleCallableAddrOfVirtualizedCode and
+ // then "CallTarget". Note that GetCallTarget is approximately GetSingleCallableAddrOfCode
+ // but the additional wierdness that class-based-virtual calls (but not interface calls nor calls
+ // on proxies) are resolved to their target. Because of this, many clients of "Call" (see above)
+ // end up doing some resolution for interface calls and/or proxies themselves.
+ PCODE GetCallTarget(OBJECTREF* pThisObj, TypeHandle ownerType = TypeHandle());
+
+ MethodImpl *GetMethodImpl();
+
+
+#if defined(FEATURE_PREJIT ) && !defined(DACCESS_COMPILE)
+ //================================================================
+ // Precompilation (NGEN)
+
+ void Save(DataImage *image);
+ void Fixup(DataImage *image);
+ void FixupSlot(DataImage *image, PVOID p, SSIZE_T offset, ZapRelocationType type = IMAGE_REL_BASED_PTR);
+
+ //
+ // Helper class used to regroup MethodDesc chunks before saving them into NGen image.
+ // The regrouping takes into account IBC data and optional NGen-specific MethodDesc members.
+ //
+ class SaveChunk
+ {
+ DataImage * m_pImage;
+
+ ZapStoredStructure * m_pFirstNode;
+ MethodDescChunk * m_pLastChunk;
+
+ typedef enum _MethodPriorityEnum
+ {
+ NoFlags = -1,
+ HotMethodDesc = 0x0,
+ WriteableMethodDesc = 0x1,
+ ColdMethodDesc = 0x2,
+ ColdWriteableMethodDesc= ColdMethodDesc | WriteableMethodDesc
+
+ } MethodPriorityEnum;
+
+ struct MethodInfo
+ {
+ MethodDesc * m_pMD;
+ //MethodPriorityEnum
+ BYTE m_priority;
+
+ BOOL m_fHasPrecode:1;
+ BOOL m_fHasNativeCodeSlot:1;
+ BOOL m_fHasFixupList:1;
+ };
+
+ InlineSArray<MethodInfo, 20> m_methodInfos;
+
+ static int __cdecl MethodInfoCmp(const void* a_, const void* b_);
+
+ SIZE_T GetSavedMethodDescSize(MethodInfo * pMethodInfo);
+
+ void SaveOneChunk(COUNT_T start, COUNT_T count, ULONG size, DWORD priority);
+
+ public:
+ SaveChunk(DataImage * image)
+ : m_pImage(image), m_pFirstNode(NULL), m_pLastChunk(NULL)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ void Append(MethodDesc * pMD);
+
+ ZapStoredStructure * Save();
+ };
+
+ bool CanSkipDoPrestub(MethodDesc * callerMD,
+ CorInfoIndirectCallReason *pReason,
+ CORINFO_ACCESS_FLAGS accessFlags = CORINFO_ACCESS_ANY);
+
+ // This is different from !IsRestored() in that it checks if restoring
+ // will ever be needed for this ngened data-structure.
+ // This is to be used at ngen time of a dependent module to determine
+ // if it can be accessed directly, or if the restoring mechanism needs
+ // to be hooked in.
+ BOOL NeedsRestore(DataImage *image, BOOL fAssumeMethodTableRestored = FALSE)
+ {
+ WRAPPER_NO_CONTRACT;
+ return ComputeNeedsRestore(image, NULL, fAssumeMethodTableRestored);
+ }
+
+ BOOL ComputeNeedsRestore(DataImage *image, TypeHandleList *pVisited, BOOL fAssumeMethodTableRestored = FALSE);
+
+ //
+ // After the zapper compiles all code in a module it may attempt
+ // to populate entries in all dictionaries
+ // associated with instantiations of generic methods. This is an optional step - nothing will
+ // go wrong at runtime except we may get more one-off calls to JIT_GenericHandle.
+ // Although these are one-off we prefer to avoid them since they touch metadata
+ // pages.
+ //
+ // Fully populating a dictionary may in theory load more types, methods etc. However
+ // for the moment only those entries that refer to things that
+ // are already loaded will be filled in.
+ void PrepopulateDictionary(DataImage * image, BOOL nonExpansive);
+
+#endif // FEATURE_PREJIT && !DACCESS_COMPILE
+
+ TADDR GetFixupList();
+
+ BOOL IsRestored_NoLogging();
+ BOOL IsRestored();
+ void CheckRestore(ClassLoadLevel level = CLASS_LOADED);
+
+ //================================================================
+ // Running the Prestub preparation step.
+
+ // The stub produced by prestub requires method desc to be passed
+ // in dedicated register. Used to implement stubs shared between
+ // MethodDescs (e.g. PInvoke stubs)
+ BOOL RequiresMethodDescCallingConvention(BOOL fEstimateForChunk = FALSE);
+
+ // Returns true if the method has to have stable entrypoint always.
+ BOOL RequiresStableEntryPoint(BOOL fEstimateForChunk = FALSE);
+
+ //
+ // Backpatch method slots
+ //
+ // Arguments:
+ // pMT - cached value of code:MethodDesc::GetMethodTable()
+ // pDispatchingMT - method table of the object that the method is being dispatched on, can be NULL.
+ // fFullBackPatch - indicates whether to patch all possible slots, including the ones
+ // expensive to patch
+ //
+ // Return value:
+ // stable entry point (code:MethodDesc::GetStableEntryPoint())
+ //
+ PCODE DoBackpatch(MethodTable * pMT, MethodTable * pDispatchingMT, BOOL fFullBackPatch);
+
+ PCODE DoPrestub(MethodTable *pDispatchingMT);
+
+ PCODE MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, DWORD flags, DWORD flags2);
+
+ VOID GetMethodInfo(SString &namespaceOrClassName, SString &methodName, SString &methodSignature);
+ VOID GetMethodInfoWithNewSig(SString &namespaceOrClassName, SString &methodName, SString &methodSignature);
+ VOID GetMethodInfoNoSig(SString &namespaceOrClassName, SString &methodName);
+ VOID GetFullMethodInfo(SString& fullMethodSigName);
+
+ BOOL IsCritical()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(HasCriticalTransparentInfo());
+ return (m_bFlags2 & enum_flag2_Transparency_Mask) != enum_flag2_Transparency_Transparent;
+ }
+
+ BOOL IsTreatAsSafe()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(HasCriticalTransparentInfo());
+ return (m_bFlags2 & enum_flag2_Transparency_Mask) == enum_flag2_Transparency_TreatAsSafe;
+ }
+
+ BOOL IsTransparent()
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(HasCriticalTransparentInfo());
+ return !IsCritical();
+ }
+
+ BOOL HasCriticalTransparentInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_bFlags2 & enum_flag2_Transparency_Mask) != enum_flag2_Transparency_Unknown;
+ }
+
+ void SetCriticalTransparentInfo(BOOL fIsCritical, BOOL fIsTreatAsSafe)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // TreatAsSafe has to imply critical
+ _ASSERTE(fIsCritical || !fIsTreatAsSafe);
+
+ EnsureWritablePages(this);
+ InterlockedUpdateFlags2(
+ static_cast<BYTE>(fIsTreatAsSafe ? enum_flag2_Transparency_TreatAsSafe :
+ fIsCritical ? enum_flag2_Transparency_Critical :
+ enum_flag2_Transparency_Transparent),
+ TRUE);
+
+ _ASSERTE(HasCriticalTransparentInfo());
+ }
+
+ BOOL RequiresLinkTimeCheckHostProtectionOnly()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_bFlags2 & enum_flag2_HostProtectionLinkCheckOnly) != 0;
+ }
+
+ void SetRequiresLinkTimeCheckHostProtectionOnly()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_bFlags2 |= enum_flag2_HostProtectionLinkCheckOnly;
+ }
+
+ BOOL HasTypeEquivalentStructParameters()
+#ifndef FEATURE_TYPEEQUIVALENCE
+ {
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+ }
+#else
+ ;
+#endif
+#ifdef BINDER
+ typedef void (*WalkValueTypeParameterFnPtr)(MdilModule *pModule, mdToken token, const SigParser *ptr, SigTypeContext *pTypeContext, void *pData);
+#else
+ typedef void (*WalkValueTypeParameterFnPtr)(Module *pModule, mdToken token, Module *pDefModule, mdToken tkDefToken, const SigParser *ptr, SigTypeContext *pTypeContext, void *pData);
+#endif
+
+ void WalkValueTypeParameters(MethodTable *pMT, WalkValueTypeParameterFnPtr function, void *pData);
+
+ void PrepareForUseAsADependencyOfANativeImage()
+ {
+ WRAPPER_NO_CONTRACT;
+ if (!IsZapped() && !HaveValueTypeParametersBeenWalked())
+ PrepareForUseAsADependencyOfANativeImageWorker();
+ }
+
+ void PrepareForUseAsADependencyOfANativeImageWorker();
+
+ //================================================================
+ // The actual data stored in a MethodDesc follows.
+
+protected:
+ enum {
+ // There are flags available for use here (currently 5 flags bits are available); however, new bits are hard to come by, so any new flags bits should
+ // have a fairly strong justification for existence.
+ enum_flag3_TokenRemainderMask = 0x3FFF, // This must equal METHOD_TOKEN_REMAINDER_MASK calculated higher in this file
+ // These are seperate to allow the flags space available and used to be obvious here
+ // and for the logic that splits the token to be algorithmically generated based on the
+ // #define
+ enum_flag3_HasForwardedValuetypeParameter = 0x4000, // Indicates that a type-forwarded type is used as a valuetype parameter (this flag is only valid for ngenned items)
+ enum_flag3_ValueTypeParametersWalked = 0x4000, // Indicates that all typeref's in the signature of the method have been resolved to typedefs (or that process failed) (this flag is only valid for non-ngenned methods)
+ enum_flag3_DoesNotHaveEquivalentValuetypeParameters = 0x8000, // Indicates that we have verified that there are no equivalent valuetype parameters for this method
+ };
+ UINT16 m_wFlags3AndTokenRemainder;
+
+ BYTE m_chunkIndex;
+
+ enum {
+ // enum_flag2_HasPrecode implies that enum_flag2_HasStableEntryPoint is set.
+ enum_flag2_HasStableEntryPoint = 0x01, // The method entrypoint is stable (either precode or actual code)
+ enum_flag2_HasPrecode = 0x02, // Precode has been allocated for this method
+
+ enum_flag2_IsUnboxingStub = 0x04,
+ enum_flag2_HasNativeCodeSlot = 0x08, // Has slot for native code
+
+ enum_flag2_Transparency_Mask = 0x30,
+ enum_flag2_Transparency_Unknown = 0x00, // The transparency has not been computed yet
+ enum_flag2_Transparency_Transparent = 0x10, // Method is transparent
+ enum_flag2_Transparency_Critical = 0x20, // Method is critical
+ enum_flag2_Transparency_TreatAsSafe = 0x30, // Method is treat as safe. Also implied critical.
+
+ // CAS Demands: Demands for Permissions that are CAS Permissions. CAS Perms are those
+ // that derive from CodeAccessPermission and need a stackwalk to evaluate demands
+ // Non-CAS perms are those that don't need a stackwalk and don't derive from CodeAccessPermission. The implementor
+ // specifies the behavior on a demand. Examples: CAS: FileIOPermission. Non-CAS: PrincipalPermission.
+ // This bit gets set if the demands are BCL CAS demands only. Even if there are non-BCL CAS demands, we don't set this
+ // bit.
+ enum_flag2_CASDemandsOnly = 0x40,
+
+ enum_flag2_HostProtectionLinkCheckOnly = 0x80, // Method has LinkTime check due to HP only.
+ };
+ BYTE m_bFlags2;
+
+ // The slot number of this MethodDesc in the vtable array.
+ // Note that we may store other information in the high bits if available --
+ // see enum_packedSlotLayout and mdcRequiresFullSlotNumber for details.
+ WORD m_wSlotNumber;
+
+ enum {
+ enum_packedSlotLayout_SlotMask = 0x03FF,
+ enum_packedSlotLayout_NameHashMask = 0xFC00
+ };
+
+ WORD m_wFlags;
+
+
+
+public:
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+public:
+ inline DWORD GetClassification() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (m_wFlags & mdcClassification);
+ }
+
+ inline void SetClassification(DWORD classification)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE((m_wFlags & mdcClassification) == 0);
+ m_wFlags |= classification;
+ }
+
+ inline BOOL HasNativeCodeSlot()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (m_bFlags2 & enum_flag2_HasNativeCodeSlot) != 0;
+ }
+
+ inline void SetHasNativeCodeSlot()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_bFlags2 |= enum_flag2_HasNativeCodeSlot;
+ }
+
+ static const SIZE_T s_ClassificationSizeTable[];
+
+ static SIZE_T GetBaseSize(DWORD classification)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(classification < mdcClassificationCount);
+ return s_ClassificationSizeTable[classification];
+ }
+
+ SIZE_T GetBaseSize()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetBaseSize(GetClassification());
+ }
+
+ SIZE_T SizeOf();
+
+ WORD InterlockedUpdateFlags3(WORD wMask, BOOL fSet);
+
+#ifdef FEATURE_COMINTEROP
+ inline BOOL DoesNotHaveEquivalentValuetypeParameters()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (m_wFlags3AndTokenRemainder & enum_flag3_DoesNotHaveEquivalentValuetypeParameters) != 0;
+ }
+
+ inline void SetDoesNotHaveEquivalentValuetypeParameters()
+ {
+ LIMITED_METHOD_CONTRACT;
+ InterlockedUpdateFlags3(enum_flag3_DoesNotHaveEquivalentValuetypeParameters, TRUE);
+ }
+#endif //FEATURE_COMINTEROP
+
+ inline BOOL HasForwardedValuetypeParameter()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ // This should only be asked of Zapped MethodDescs
+ _ASSERTE(IsZapped());
+ return (m_wFlags3AndTokenRemainder & enum_flag3_HasForwardedValuetypeParameter) != 0;
+ }
+
+ inline void SetHasForwardedValuetypeParameter()
+ {
+ LIMITED_METHOD_CONTRACT;
+ InterlockedUpdateFlags3(enum_flag3_HasForwardedValuetypeParameter, TRUE);
+ }
+
+ inline BOOL HaveValueTypeParametersBeenWalked()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+#ifndef CLR_STANDALONE_BINDER
+ // This should only be asked of non-Zapped MethodDescs, and only during execution (not compilation)
+ _ASSERTE(!IsZapped() && !IsCompilationProcess());
+#endif
+ return (m_wFlags3AndTokenRemainder & enum_flag3_ValueTypeParametersWalked) != 0;
+ }
+
+ inline void SetValueTypeParametersWalked()
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifndef CLR_STANDALONE_BINDER
+ _ASSERTE(!IsZapped() && !IsCompilationProcess());
+#endif
+ InterlockedUpdateFlags3(enum_flag3_ValueTypeParametersWalked, TRUE);
+ }
+
+ //
+ // Optional MethodDesc slots appear after the end of base MethodDesc in this order:
+ //
+
+ // class MethodImpl; // Present if HasMethodImplSlot() is true
+
+ typedef RelativePointer<PCODE> NonVtableSlot; // Present if HasNonVtableSlot() is true
+ // RelativePointer for NGen, PCODE for JIT
+
+#define FIXUP_LIST_MASK 1
+ typedef RelativePointer<TADDR> NativeCodeSlot; // Present if HasNativeCodeSlot() is true
+ // lower order bit (FIXUP_LIST_MASK) used to determine if FixupListSlot is present
+ typedef RelativePointer<TADDR> FixupListSlot;
+
+// Stub Dispatch code
+public:
+ MethodDesc *GetInterfaceMD();
+
+// StubMethodInfo for use in creating RuntimeMethodHandles
+ REFLECTMETHODREF GetStubMethodInfo();
+
+ PrecodeType GetPrecodeType();
+};
+
+/******************************************************************/
+
+// A code:MethodDescChunk is a container that holds one or more code:MethodDesc. Logically it is just
+// compression. Basically fields that are common among methods descs in the chunk are stored in the chunk
+// and the MethodDescs themselves just store and index that allows them to find their Chunk. Semantically
+// a code:MethodDescChunk is just a set of code:MethodDesc.
+class MethodDescChunk
+{
+ friend class MethodDesc;
+ friend class CheckAsmOffsets;
+#ifdef BINDER
+ friend class MdilModule;
+#endif
+#if defined(FEATURE_PREJIT) && !defined(DACCESS_COMPILE)
+ friend class MethodDesc::SaveChunk;
+#endif
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif // DACCESS_COMPILE
+
+ enum {
+ enum_flag_TokenRangeMask = 0x03FF, // This must equal METHOD_TOKEN_RANGE_MASK calculated higher in this file
+ // These are seperate to allow the flags space available and used to be obvious here
+ // and for the logic that splits the token to be algorithmically generated based on the
+ // #define
+ enum_flag_HasCompactEntrypoints = 0x4000, // Compact temporary entry points
+ enum_flag_IsZapped = 0x8000, // This chunk lives in NGen module
+ };
+
+public:
+ //
+ // Allocates methodDescCount identical MethodDescs in smallest possible number of chunks.
+ // If methodDescCount is zero, one chunk with maximum number of MethodDescs is allocated.
+ //
+ static MethodDescChunk *CreateChunk(LoaderHeap *pHeap, DWORD methodDescCount,
+ DWORD classification,
+ BOOL fNonVtableSlot,
+ BOOL fNativeCodeSlot,
+ BOOL fComPlusCallInfo,
+ MethodTable *initialMT,
+ class AllocMemTracker *pamTracker);
+
+ BOOL HasTemporaryEntryPoints()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return !IsZapped();
+ }
+
+ TADDR GetTemporaryEntryPoints()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(HasTemporaryEntryPoints());
+ return *(dac_cast<DPTR(TADDR)>(this) - 1);
+ }
+
+ PCODE GetTemporaryEntryPoint(int index);
+
+ void EnsureTemporaryEntryPointsCreated(LoaderAllocator *pLoaderAllocator, AllocMemTracker *pamTracker)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (GetTemporaryEntryPoints() == NULL)
+ CreateTemporaryEntryPoints(pLoaderAllocator, pamTracker);
+ }
+
+ void CreateTemporaryEntryPoints(LoaderAllocator *pLoaderAllocator, AllocMemTracker *pamTracker);
+
+#ifdef HAS_COMPACT_ENTRYPOINTS
+ //
+ // There two implementation options for temporary entrypoints:
+ //
+ // (1) Compact entrypoints. They provide as dense entrypoints as possible, but can't be patched
+ // to point to the final code. The call to unjitted method is indirect call via slot.
+ //
+ // (2) Precodes. The precode will be patched to point to the final code eventually, thus
+ // the temporary entrypoint can be embedded in the code. The call to unjitted method is
+ // direct call to direct jump.
+ //
+ // We use (1) for x86 and (2) for 64-bit to get the best performance on each platform.
+ //
+
+ TADDR AllocateCompactEntryPoints(LoaderAllocator *pLoaderAllocator, AllocMemTracker *pamTracker);
+
+ static MethodDesc* GetMethodDescFromCompactEntryPoint(PCODE addr, BOOL fSpeculative = FALSE);
+ static SIZE_T SizeOfCompactEntryPoints(int count);
+
+ static BOOL IsCompactEntryPointAtAddress(PCODE addr)
+ {
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+ // Compact entrypoints start at odd addresses
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (addr & 1) != 0;
+#else
+ #error Unsupported platform
+#endif
+ }
+#endif // HAS_COMPACT_ENTRYPOINTS
+
+ FORCEINLINE PTR_MethodTable GetMethodTable()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_methodTable.GetValue(PTR_HOST_MEMBER_TADDR(MethodDescChunk, this, m_methodTable));
+ }
+
+ inline DPTR(RelativeFixupPointer<PTR_MethodTable>) GetMethodTablePtr() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return dac_cast<DPTR(RelativeFixupPointer<PTR_MethodTable>)>(PTR_HOST_MEMBER_TADDR(MethodDescChunk, this, m_methodTable));
+ }
+
+#ifndef DACCESS_COMPILE
+ inline void SetMethodTable(MethodTable * pMT)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_methodTable.IsNull());
+ _ASSERTE(pMT != NULL);
+ m_methodTable.SetValue(PTR_HOST_MEMBER_TADDR(MethodDescChunk, this, m_methodTable), pMT);
+ }
+
+ inline void SetSizeAndCount(ULONG sizeOfMethodDescs, COUNT_T methodDescCount)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(FitsIn<BYTE>((sizeOfMethodDescs / MethodDesc::ALIGNMENT) - 1));
+ m_size = static_cast<BYTE>((sizeOfMethodDescs / MethodDesc::ALIGNMENT) - 1);
+ _ASSERTE(SizeOf() == sizeof(MethodDescChunk) + sizeOfMethodDescs);
+
+ _ASSERTE(FitsIn<BYTE>(methodDescCount - 1));
+ m_count = static_cast<BYTE>(methodDescCount - 1);
+ _ASSERTE(GetCount() == methodDescCount);
+ }
+#endif // !DACCESS_COMPILE
+
+#ifndef BINDER
+#ifdef FEATURE_PREJIT
+#ifndef DACCESS_COMPILE
+ inline void RestoreMTPointer(ClassLoadLevel level = CLASS_LOADED)
+ {
+ LIMITED_METHOD_CONTRACT;
+ Module::RestoreMethodTablePointer(&m_methodTable, NULL, level);
+ }
+#endif // !DACCESS_COMPILE
+#endif // FEATURE_PREJIT
+#endif // !BINDER
+
+#ifndef DACCESS_COMPILE
+ void SetNextChunk(MethodDescChunk *chunk)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_next.SetValueMaybeNull(chunk);
+ }
+#endif // !DACCESS_COMPILE
+
+ PTR_MethodDescChunk GetNextChunk()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_next.GetValueMaybeNull(PTR_HOST_MEMBER_TADDR(MethodDescChunk, this, m_next));
+ }
+
+ UINT32 GetCount()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_count + 1;
+ }
+
+ BOOL IsZapped()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+#ifdef FEATURE_PREJIT
+ return (m_flagsAndTokenRange & enum_flag_IsZapped) != 0;
+#else
+ return FALSE;
+#endif
+ }
+
+ inline BOOL HasCompactEntryPoints()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+#ifdef HAS_COMPACT_ENTRYPOINTS
+ return (m_flagsAndTokenRange & enum_flag_HasCompactEntrypoints) != 0;
+#else
+ return FALSE;
+#endif
+ }
+
+ inline UINT16 GetTokRange()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_flagsAndTokenRange & enum_flag_TokenRangeMask;
+ }
+
+ inline SIZE_T SizeOf()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return sizeof(MethodDescChunk) + (m_size + 1) * MethodDesc::ALIGNMENT;
+ }
+
+ inline MethodDesc *GetFirstMethodDesc()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return PTR_MethodDesc(dac_cast<TADDR>(this) + sizeof(MethodDescChunk));
+ }
+
+ // Maximum size of one chunk (corresponts to the maximum of m_size = 0xFF)
+ static const SIZE_T MaxSizeOfMethodDescs = 0x100 * MethodDesc::ALIGNMENT;
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+private:
+ void SetIsZapped()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_flagsAndTokenRange |= enum_flag_IsZapped;
+ }
+
+ void SetHasCompactEntryPoints()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_flagsAndTokenRange |= enum_flag_HasCompactEntrypoints;
+ }
+
+ void SetTokenRange(UINT16 tokenRange)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE((tokenRange & ~enum_flag_TokenRangeMask) == 0);
+ static_assert_no_msg(enum_flag_TokenRangeMask == METHOD_TOKEN_RANGE_MASK);
+ m_flagsAndTokenRange = (m_flagsAndTokenRange & ~enum_flag_TokenRangeMask) | tokenRange;
+ }
+
+ RelativeFixupPointer<PTR_MethodTable> m_methodTable;
+
+ RelativePointer<PTR_MethodDescChunk> m_next;
+
+ BYTE m_size; // The size of this chunk minus 1 (in multiples of MethodDesc::ALIGNMENT)
+ BYTE m_count; // The number of MethodDescs in this chunk minus 1
+ UINT16 m_flagsAndTokenRange;
+
+ // Followed by array of method descs...
+};
+
+inline int MethodDesc::GetMethodDescIndex() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return m_chunkIndex;
+}
+
+inline MethodDescChunk *MethodDesc::GetMethodDescChunk() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return
+ PTR_MethodDescChunk(dac_cast<TADDR>(this) -
+ (sizeof(MethodDescChunk) + (GetMethodDescIndex() * MethodDesc::ALIGNMENT)));
+}
+
+// convert an entry point into a MethodDesc
+MethodDesc* Entry2MethodDesc(PCODE entryPoint, MethodTable *pMT);
+
+
+typedef DPTR(class StoredSigMethodDesc) PTR_StoredSigMethodDesc;
+class StoredSigMethodDesc : public MethodDesc
+{
+ public:
+ // Put the sig RVA in here - this allows us to avoid
+ // touching the method desc table when mscorlib is prejitted.
+
+ TADDR m_pSig;
+ DWORD m_cSig;
+#ifdef _WIN64
+ // m_dwExtendedFlags is not used by StoredSigMethodDesc itself.
+ // It is used by child classes. We allocate the space here to get
+ // optimal layout.
+ DWORD m_dwExtendedFlags;
+#endif
+
+ bool HasStoredMethodSig(void)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pSig != 0;
+ }
+ PCCOR_SIGNATURE GetStoredMethodSig(DWORD* sigLen = NULL)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (sigLen)
+ {
+ *sigLen = m_cSig;
+ }
+#ifdef DACCESS_COMPILE
+ return (PCCOR_SIGNATURE)
+ DacInstantiateTypeByAddress(m_pSig, m_cSig, true);
+#else // !DACCESS_COMPILE
+#ifndef BINDER
+ g_IBCLogger.LogNDirectCodeAccess(this);
+#endif
+ return (PCCOR_SIGNATURE)m_pSig;
+#endif // !DACCESS_COMPILE
+ }
+ void SetStoredMethodSig(PCCOR_SIGNATURE sig, DWORD sigBytes)
+ {
+#ifndef DACCESS_COMPILE
+ m_pSig = (TADDR)sig;
+ m_cSig = sigBytes;
+#endif // !DACCESS_COMPILE
+ }
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+};
+
+//-----------------------------------------------------------------------
+// Operations specific to FCall methods. We use a derived class to get
+// the compiler involved in enforcing proper method type usage.
+// DO NOT ADD FIELDS TO THIS CLASS.
+//-----------------------------------------------------------------------
+
+class FCallMethodDesc : public MethodDesc
+{
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+
+ DWORD m_dwECallID;
+#ifdef _WIN64
+ DWORD m_padding;
+#endif
+
+public:
+ void SetECallID(DWORD dwID)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_dwECallID = dwID;
+ }
+
+ DWORD GetECallID()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwECallID;
+ }
+};
+
+class HostCodeHeap;
+class LCGMethodResolver;
+typedef DPTR(LCGMethodResolver) PTR_LCGMethodResolver;
+class ILStubResolver;
+typedef DPTR(ILStubResolver) PTR_ILStubResolver;
+class DynamicResolver;
+typedef DPTR(DynamicResolver) PTR_DynamicResolver;
+
+class DynamicMethodDesc : public StoredSigMethodDesc
+{
+ friend class ILStubCache;
+ friend class ILStubState;
+ friend class DynamicMethodTable;
+ friend class MethodDesc;
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+#ifdef MDIL
+ friend class CompactTypeBuilder;
+ friend class MdilModule;
+#endif
+
+protected:
+ PTR_CUTF8 m_pszMethodName;
+ PTR_DynamicResolver m_pResolver;
+
+#ifndef _WIN64
+ // We use m_dwExtendedFlags from StoredSigMethodDesc on WIN64
+ DWORD m_dwExtendedFlags; // see DynamicMethodDesc::ExtendedFlags enum
+#endif
+
+ typedef enum ExtendedFlags
+ {
+ nomdAttrs = 0x0000FFFF, // method attributes (LCG)
+ nomdILStubAttrs = mdMemberAccessMask | mdStatic, // method attributes (IL stubs)
+
+ // attributes (except mdStatic and mdMemberAccessMask) have different meaning for IL stubs
+ // mdMemberAccessMask = 0x0007,
+ nomdReverseStub = 0x0008,
+ // mdStatic = 0x0010,
+ nomdCALLIStub = 0x0020,
+ nomdDelegateStub = 0x0040,
+ nomdCopyCtorArgs = 0x0080,
+ nomdUnbreakable = 0x0100,
+ nomdDelegateCOMStub = 0x0200, // CLR->COM or COM->CLR call via a delegate (WinRT specific)
+ nomdSignatureNeedsRestore = 0x0400,
+ nomdStubNeedsCOMStarted = 0x0800, // EnsureComStarted must be called before executing the method
+ nomdMulticastStub = 0x1000,
+ nomdUnboxingILStub = 0x2000,
+
+ nomdILStub = 0x00010000,
+ nomdLCGMethod = 0x00020000,
+ nomdStackArgSize = 0xFFFC0000, // native stack arg size for IL stubs
+ } ExtendedFlags;
+
+public:
+ bool IsILStub() { LIMITED_METHOD_DAC_CONTRACT; return !!(m_dwExtendedFlags & nomdILStub); }
+ bool IsLCGMethod() { LIMITED_METHOD_DAC_CONTRACT; return !!(m_dwExtendedFlags & nomdLCGMethod); }
+
+ inline PTR_DynamicResolver GetResolver();
+ inline PTR_LCGMethodResolver GetLCGMethodResolver();
+ inline PTR_ILStubResolver GetILStubResolver();
+
+ PTR_CUTF8 GetMethodName() { LIMITED_METHOD_DAC_CONTRACT; return m_pszMethodName; }
+
+ WORD GetAttrs()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (IsILStub() ? (m_dwExtendedFlags & nomdILStubAttrs) : (m_dwExtendedFlags & nomdAttrs));
+ }
+
+ DWORD GetExtendedFlags()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwExtendedFlags;
+ }
+
+ WORD GetNativeStackArgSize()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(IsILStub());
+ return (WORD)((m_dwExtendedFlags & nomdStackArgSize) >> 16);
+ }
+
+ void SetNativeStackArgSize(WORD cbArgSize)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(IsILStub() && (cbArgSize % sizeof(SLOT)) == 0);
+ m_dwExtendedFlags = (m_dwExtendedFlags & ~nomdStackArgSize) | ((DWORD)cbArgSize << 16);
+ }
+
+ void SetHasCopyCtorArgs(bool value)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (value)
+ {
+ m_dwExtendedFlags |= nomdCopyCtorArgs;
+ }
+ }
+
+ void SetUnbreakable(bool value)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (value)
+ {
+ m_dwExtendedFlags |= nomdUnbreakable;
+ }
+ }
+
+ void SetSignatureNeedsRestore(bool value)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (value)
+ {
+ m_dwExtendedFlags |= nomdSignatureNeedsRestore;
+ }
+ }
+
+ void SetStubNeedsCOMStarted(bool value)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (value)
+ {
+ m_dwExtendedFlags |= nomdStubNeedsCOMStarted;
+ }
+ }
+
+ bool IsRestored()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (IsSignatureNeedsRestore())
+ {
+ // Since we don't update the signatreNeedsRestore bit when we actually
+ // restore the signature, the bit will have a stall value. The signature
+ // bit in the metadata will always contain the correct, up-to-date
+ // information.
+ Volatile<BYTE> *pVolatileSig = (Volatile<BYTE> *)GetStoredMethodSig();
+ if ((*pVolatileSig & IMAGE_CEE_CS_CALLCONV_NEEDSRESTORE) != 0)
+ return false;
+ }
+
+ return true;
+ }
+
+ bool IsReverseStub() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE(IsILStub()); return (0 != (m_dwExtendedFlags & nomdReverseStub)); }
+ bool IsCALLIStub() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE(IsILStub()); return (0 != (m_dwExtendedFlags & nomdCALLIStub)); }
+ bool IsDelegateStub() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE(IsILStub()); return (0 != (m_dwExtendedFlags & nomdDelegateStub)); }
+ bool IsCLRToCOMStub() { LIMITED_METHOD_CONTRACT; _ASSERTE(IsILStub()); return ((0 == (m_dwExtendedFlags & mdStatic)) && !IsReverseStub() && !IsDelegateStub()); }
+ bool IsCOMToCLRStub() { LIMITED_METHOD_CONTRACT; _ASSERTE(IsILStub()); return ((0 == (m_dwExtendedFlags & mdStatic)) && IsReverseStub()); }
+ bool IsPInvokeStub() { LIMITED_METHOD_CONTRACT; _ASSERTE(IsILStub()); return ((0 != (m_dwExtendedFlags & mdStatic)) && !IsReverseStub() && !IsCALLIStub()); }
+ bool HasCopyCtorArgs() { LIMITED_METHOD_CONTRACT; _ASSERTE(IsILStub()); return (0 != (m_dwExtendedFlags & nomdCopyCtorArgs)); }
+ bool IsUnbreakable() { LIMITED_METHOD_CONTRACT; _ASSERTE(IsILStub()); return (0 != (m_dwExtendedFlags & nomdUnbreakable)); }
+ bool IsDelegateCOMStub() { LIMITED_METHOD_CONTRACT; _ASSERTE(IsILStub()); return (0 != (m_dwExtendedFlags & nomdDelegateCOMStub)); }
+ bool IsSignatureNeedsRestore() { LIMITED_METHOD_CONTRACT; _ASSERTE(IsILStub()); return (0 != (m_dwExtendedFlags & nomdSignatureNeedsRestore)); }
+ bool IsStubNeedsCOMStarted() { LIMITED_METHOD_CONTRACT; _ASSERTE(IsILStub()); return (0 != (m_dwExtendedFlags & nomdStubNeedsCOMStarted)); }
+#ifdef FEATURE_STUBS_AS_IL
+ bool IsMulticastStub() {
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(IsILStub());
+ return !!(m_dwExtendedFlags & nomdMulticastStub);
+ }
+ bool IsUnboxingILStub() {
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(IsILStub());
+ return !!(m_dwExtendedFlags & nomdUnboxingILStub);
+ }
+#endif
+
+ // Whether the stub takes a context argument that is an interop MethodDesc.
+ bool HasMDContextArg()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ((IsCLRToCOMStub() && !IsDelegateCOMStub()) || IsPInvokeStub());
+ }
+
+ void Restore();
+ void Fixup(DataImage* image);
+ //
+ // following implementations defined in DynamicMethod.cpp
+ //
+ void Destroy(BOOL fDomainUnload = FALSE);
+};
+
+
+class ArrayMethodDesc : public StoredSigMethodDesc
+{
+public:
+ // The VTABLE for an array look like
+
+ // System.Object Vtable
+ // System.Array Vtable
+ // type[] Vtable
+ // Get(<rank specific)
+ // Set(<rank specific)
+ // Address(<rank specific)
+ // .ctor(int) // Possibly more
+
+ enum {
+ ARRAY_FUNC_GET = 0,
+ ARRAY_FUNC_SET = 1,
+ ARRAY_FUNC_ADDRESS = 2,
+ ARRAY_FUNC_CTOR = 3, // Anything >= ARRAY_FUNC_CTOR is .ctor
+ };
+
+ // Get the index of runtime provided array method
+ DWORD GetArrayFuncIndex()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // The ru
+ DWORD dwSlot = GetSlot();
+ DWORD dwVirtuals = GetMethodTable()->GetNumVirtuals();
+ _ASSERTE(dwSlot >= dwVirtuals);
+ return dwSlot - dwVirtuals;
+ }
+
+ LPCUTF8 GetMethodName();
+ DWORD GetAttrs();
+ CorInfoIntrinsics GetIntrinsicID();
+};
+
+#ifdef HAS_NDIRECT_IMPORT_PRECODE
+typedef NDirectImportPrecode NDirectImportThunkGlue;
+#else // HAS_NDIRECT_IMPORT_PRECODE
+
+class NDirectImportThunkGlue
+{
+ PVOID m_dummy; // Dummy field to make the alignment right
+
+public:
+ LPVOID GetEntrypoint()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return NULL;
+ }
+ void Init(MethodDesc *pMethod)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+};
+#ifdef FEATURE_PREJIT
+PORTABILITY_WARNING("NDirectImportThunkGlue");
+#endif // FEATURE_PREJIT
+
+#endif // HAS_NDIRECT_IMPORT_PRECODE
+
+typedef DPTR(NDirectImportThunkGlue) PTR_NDirectImportThunkGlue;
+
+
+//
+// This struct consolidates the writeable parts of the NDirectMethodDesc
+// so that we can eventually layout a read-only NDirectMethodDesc with a pointer
+// to the writeable parts in an ngen image
+//
+class NDirectWriteableData
+{
+public:
+ // The JIT generates an indirect call through this location in some cases.
+ // Initialized to NDirectImportThunkGlue. Patched to the true target or
+ // host interceptor stub or alignment thunk after linking.
+ LPVOID m_pNDirectTarget;
+};
+
+typedef DPTR(NDirectWriteableData) PTR_NDirectWriteableData;
+
+//-----------------------------------------------------------------------
+// Operations specific to NDirect methods. We use a derived class to get
+// the compiler involved in enforcing proper method type usage.
+// DO NOT ADD FIELDS TO THIS CLASS.
+//-----------------------------------------------------------------------
+class NDirectMethodDesc : public MethodDesc
+{
+public:
+ struct temp1
+ {
+ // If we are hosted, stack imbalance MDA is active, or alignment thunks are needed,
+ // we will intercept m_pNDirectTarget. The true target is saved here.
+ LPVOID m_pNativeNDirectTarget;
+
+ // Information about the entrypoint
+ LPCUTF8 m_pszEntrypointName;
+
+ union
+ {
+ LPCUTF8 m_pszLibName;
+ DWORD m_dwECallID; // ECallID for QCalls
+ };
+
+ // The writeable part of the methoddesc.
+ PTR_NDirectWriteableData m_pWriteableData;
+
+#ifdef HAS_NDIRECT_IMPORT_PRECODE
+ PTR_NDirectImportThunkGlue m_pImportThunkGlue;
+#else // HAS_NDIRECT_IMPORT_PRECODE
+ NDirectImportThunkGlue m_ImportThunkGlue;
+#endif // HAS_NDIRECT_IMPORT_PRECODE
+
+#ifndef FEATURE_CORECLR
+ ULONG m_DefaultDllImportSearchPathsAttributeValue; // DefaultDllImportSearchPathsAttribute is saved.
+#endif
+
+ // Various attributes needed at runtime.
+ WORD m_wFlags;
+
+#if defined(_TARGET_X86_)
+ // Size of outgoing arguments (on stack). Note that in order to get the @n stdcall name decoration,
+ // it may be necessary to subtract 4 as the hidden large structure pointer parameter does not count.
+ // See code:kStdCallWithRetBuf
+ WORD m_cbStackArgumentSize;
+#endif // defined(_TARGET_X86_)
+
+ // This field gets set only when this MethodDesc is marked as PreImplemented
+ RelativePointer<PTR_MethodDesc> m_pStubMD;
+
+ } ndirect;
+
+ enum Flags
+ {
+ // There are two groups of flag bits here each which gets initialized
+ // at different times.
+
+ //
+ // Group 1: The init group.
+ //
+ // This group is set during MethodDesc construction. No race issues
+ // here since they are initialized before the MD is ever published
+ // and never change after that.
+
+ kEarlyBound = 0x0001, // IJW managed->unmanaged thunk. Standard [sysimport] stuff otherwise.
+
+ kHasSuppressUnmanagedCodeAccess = 0x0002,
+
+#ifndef FEATURE_CORECLR
+ kDefaultDllImportSearchPathsIsCached = 0x0004, // set if we cache attribute value.
+#endif
+
+ // kUnusedMask = 0x0008
+
+ //
+ // Group 2: The runtime group.
+ //
+ // This group is set during runtime potentially by multiple threads
+ // at the same time. All flags in this category has to be set via interlocked operation.
+ //
+ kIsMarshalingRequiredCached = 0x0010, // Set if we have cached the results of marshaling required computation
+ kCachedMarshalingRequired = 0x0020, // The result of the marshaling required computation
+
+ kNativeAnsi = 0x0040,
+
+ kLastError = 0x0080, // setLastError keyword specified
+ kNativeNoMangle = 0x0100, // nomangle keyword specified
+
+ kVarArgs = 0x0200,
+ kStdCall = 0x0400,
+ kThisCall = 0x0800,
+
+ kIsQCall = 0x1000,
+
+#if !defined(FEATURE_CORECLR)
+ kDefaultDllImportSearchPathsStatus = 0x2000, // either method has custom attribute or not.
+#endif
+
+ kHasCopyCtorArgs = 0x4000,
+
+ kStdCallWithRetBuf = 0x8000, // Call returns large structure, only valid if kStdCall is also set
+
+ };
+
+ // Retrieves the cached result of marshaling required computation, or performs the computation
+ // if the result is not cached yet.
+ BOOL MarshalingRequired()
+ {
+ STANDARD_VM_CONTRACT;
+
+ if ((ndirect.m_wFlags & kIsMarshalingRequiredCached) == 0)
+ {
+ // Compute the flag and cache the result
+ InterlockedSetNDirectFlags(kIsMarshalingRequiredCached |
+ (ComputeMarshalingRequired() ? kCachedMarshalingRequired : 0));
+ }
+ _ASSERTE((ndirect.m_wFlags & kIsMarshalingRequiredCached) != 0);
+ return (ndirect.m_wFlags & kCachedMarshalingRequired) != 0;
+ }
+
+ BOOL ComputeMarshalingRequired();
+
+ // Atomically set specified flags. Only setting of the bits is supported.
+ void InterlockedSetNDirectFlags(WORD wFlags);
+
+#ifdef FEATURE_MIXEDMODE // IJW
+ void SetIsEarlyBound()
+ {
+ LIMITED_METHOD_CONTRACT;
+ ndirect.m_wFlags |= kEarlyBound;
+ }
+
+ BOOL IsEarlyBound()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (ndirect.m_wFlags & kEarlyBound) != 0;
+ }
+#endif // FEATURE_MIXEDMODE
+
+ BOOL IsNativeAnsi() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (ndirect.m_wFlags & kNativeAnsi) != 0;
+ }
+
+ BOOL IsNativeNoMangled() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (ndirect.m_wFlags & kNativeNoMangle) != 0;
+ }
+
+#ifndef FEATURE_CORECLR
+ BOOL HasSuppressUnmanagedCodeAccessAttr() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (ndirect.m_wFlags & kHasSuppressUnmanagedCodeAccess) != 0;
+ }
+
+ void SetSuppressUnmanagedCodeAccessAttr(BOOL value)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (value)
+ ndirect.m_wFlags |= kHasSuppressUnmanagedCodeAccess;
+ }
+#endif
+
+ DWORD GetECallID() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(IsQCall());
+ return ndirect.m_dwECallID;
+ }
+
+ void SetECallID(DWORD dwID)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(IsQCall());
+ ndirect.m_dwECallID = dwID;
+ }
+
+ LPCUTF8 GetLibName() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return IsQCall() ? "QCall" : ndirect.m_pszLibName;
+ }
+
+ LPCUTF8 GetEntrypointName() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return ndirect.m_pszEntrypointName;
+ }
+
+ BOOL IsVarArgs() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (ndirect.m_wFlags & kVarArgs) != 0;
+ }
+
+ BOOL IsStdCall() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (ndirect.m_wFlags & kStdCall) != 0;
+ }
+
+ BOOL IsThisCall() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (ndirect.m_wFlags & kThisCall) != 0;
+ }
+
+ // Returns TRUE if this MethodDesc is internal call from mscorlib to mscorwks
+ BOOL IsQCall() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (ndirect.m_wFlags & kIsQCall) != 0;
+ }
+
+#ifndef FEATURE_CORECLR
+ BOOL HasDefaultDllImportSearchPathsAttribute();
+
+ BOOL IsDefaultDllImportSearchPathsAttributeCached()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (ndirect.m_wFlags & kDefaultDllImportSearchPathsIsCached) != 0;
+ }
+
+ ULONG DefaultDllImportSearchPathsAttributeCachedValue()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ndirect.m_DefaultDllImportSearchPathsAttributeValue & 0xFFFFFFFD;
+ }
+
+ BOOL DllImportSearchAssemblyDirectory()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (ndirect.m_DefaultDllImportSearchPathsAttributeValue & 0x2) != 0;
+ }
+#endif // !FEATURE_CORECLR
+
+ BOOL HasCopyCtorArgs() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (ndirect.m_wFlags & kHasCopyCtorArgs) != 0;
+ }
+
+ void SetHasCopyCtorArgs(BOOL value)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (value)
+ {
+ InterlockedSetNDirectFlags(kHasCopyCtorArgs);
+ }
+ }
+
+ BOOL IsStdCallWithRetBuf() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (ndirect.m_wFlags & kStdCallWithRetBuf) != 0;
+ }
+
+ NDirectWriteableData* GetWriteableData() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return ndirect.m_pWriteableData;
+ }
+
+ NDirectImportThunkGlue* GetNDirectImportThunkGlue()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef HAS_NDIRECT_IMPORT_PRECODE
+ return ndirect.m_pImportThunkGlue;
+#else
+ return &ndirect.m_ImportThunkGlue;
+#endif
+ }
+
+ LPVOID GetNDirectTarget()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(IsNDirect());
+ return GetWriteableData()->m_pNDirectTarget;
+ }
+
+ LPVOID GetNativeNDirectTarget()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(IsNDirect());
+ _ASSERTE_IMPL(!NDirectTargetIsImportThunk());
+
+ LPVOID pNativeNDirectTarget = ndirect.m_pNativeNDirectTarget;
+ if (pNativeNDirectTarget != NULL)
+ return pNativeNDirectTarget;
+
+ return GetNDirectTarget();
+ }
+
+ VOID SetNDirectTarget(LPVOID pTarget);
+
+#ifndef DACCESS_COMPILE
+ BOOL NDirectTargetIsImportThunk()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(IsNDirect());
+
+ return (GetNDirectTarget() == GetNDirectImportThunkGlue()->GetEntrypoint());
+ }
+#endif // !DACCESS_COMPILE
+
+ // Find the entry point name and function address
+ // based on the module and data from NDirectMethodDesc
+ //
+ LPVOID FindEntryPoint(HINSTANCE hMod) const;
+
+private:
+ Stub* GenerateStubForHost(LPVOID pNativeTarget, Stub *pInnerStub);
+#ifdef MDA_SUPPORTED
+ Stub* GenerateStubForMDA(LPVOID pNativeTarget, Stub *pInnerStub, BOOL fCalledByStub);
+#endif // MDA_SUPPORTED
+
+public:
+
+ void SetStackArgumentSize(WORD cbDstBuffer, CorPinvokeMap unmgdCallConv)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+#if defined(_TARGET_X86_)
+ // thiscall passes the this pointer in ECX
+ if (unmgdCallConv == pmCallConvThiscall)
+ {
+ _ASSERTE(cbDstBuffer >= sizeof(SLOT));
+ cbDstBuffer -= sizeof(SLOT);
+ }
+
+ // Don't write to the field if it's already initialized to avoid creating private pages (NGEN)
+ if (ndirect.m_cbStackArgumentSize == 0xFFFF)
+ {
+ ndirect.m_cbStackArgumentSize = cbDstBuffer;
+ }
+ else
+ {
+ _ASSERTE(ndirect.m_cbStackArgumentSize == cbDstBuffer);
+ }
+#endif // defined(_TARGET_X86_)
+ }
+
+#if defined(_TARGET_X86_)
+ WORD GetStackArgumentSize() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE(ndirect.m_cbStackArgumentSize != 0xFFFF);
+
+ // If we have a methoddesc, stackArgSize is the number of bytes of
+ // the outgoing marshalling buffer.
+ return ndirect.m_cbStackArgumentSize;
+ }
+#endif // defined(_TARGET_X86_)
+
+#ifdef FEATURE_MIXEDMODE // IJW
+ VOID InitEarlyBoundNDirectTarget();
+#endif
+
+ // In AppDomains, we can trigger declarer's cctor when we link the P/Invoke,
+ // which takes care of inlined calls as well. See code:NDirect.NDirectLink.
+ // Although the cctor is guaranteed to run in the shared domain before the
+ // target is invoked (code:IsClassConstructorTriggeredByILStub), we will
+ // trigger at it link time as well because linking may depend on it - the
+ // cctor may change the target DLL, change DLL search path etc.
+ BOOL IsClassConstructorTriggeredAtLinkTime()
+#ifndef CLR_STANDALONE_BINDER
+ {
+ LIMITED_METHOD_CONTRACT;
+ MethodTable * pMT = GetMethodTable();
+ // Try to avoid touching the EEClass if possible
+ if (pMT->IsClassPreInited())
+ return FALSE;
+ return !pMT->GetClass()->IsBeforeFieldInit();
+ }
+#else
+ ;
+#endif
+
+#ifndef DACCESS_COMPILE
+ // In the shared domain and in NGENed code, we will trigger declarer's cctor
+ // in the marshaling stub by calling code:StubHelpers.InitDeclaringType. If
+ // this returns TRUE, the call must not be inlined.
+ BOOL IsClassConstructorTriggeredByILStub()
+#ifndef CLR_STANDALONE_BINDER
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return (IsClassConstructorTriggeredAtLinkTime() &&
+ (IsZapped() || GetDomain()->IsSharedDomain() || SystemDomain::GetCurrentDomain()->IsCompilationDomain()));
+ }
+#else
+ ;
+#endif
+#endif //!DACCESS_COMPILE
+}; //class NDirectMethodDesc
+
+
+//-----------------------------------------------------------------------
+// Operations specific to EEImplCall methods. We use a derived class to get
+// the compiler involved in enforcing proper method type usage.
+//
+// For now, the only EE impl is the delegate Invoke method. If we
+// add other EE impl types in the future, may need a discriminator
+// field here.
+//-----------------------------------------------------------------------
+class EEImplMethodDesc : public StoredSigMethodDesc
+{ };
+
+#ifdef FEATURE_COMINTEROP
+
+// This is the extra information needed to be associated with a method in order to use it for
+// CLR->COM calls. It is currently used by code:ComPlusCallMethodDesc (ordinary CLR->COM calls),
+// code:InstantiatedMethodDesc (optional field, CLR->COM calls on shared generic interfaces),
+// and code:DelegateEEClass (delegate->COM calls for WinRT).
+typedef DPTR(struct ComPlusCallInfo) PTR_ComPlusCallInfo;
+struct ComPlusCallInfo
+{
+ // Returns ComPlusCallInfo associated with a method. pMD must be a ComPlusCallMethodDesc or
+ // EEImplMethodDesc that has already been initialized for COM interop.
+ inline static ComPlusCallInfo *FromMethodDesc(MethodDesc *pMD);
+
+ enum Flags
+ {
+ kHasSuppressUnmanagedCodeAccess = 0x1,
+ kRequiresArgumentWrapping = 0x2,
+ kHasCopyCtorArgs = 0x4,
+ };
+
+#if defined(FEATURE_REMOTING) && !defined(HAS_REMOTING_PRECODE)
+ // These two fields cannot overlap in this case because of AMD64 GenericComPlusCallStub uses m_pILStub on the COM event provider path
+ struct
+#else
+ union
+#endif
+ {
+ // IL stub for CLR to COM call
+ PCODE m_pILStub;
+
+ // MethodDesc of the COM event provider to forward the call to (COM event interfaces)
+ MethodDesc *m_pEventProviderMD;
+ };
+
+ // method table of the interface which this represents
+ PTR_MethodTable m_pInterfaceMT;
+
+ // We need only 3 bits here, see enum Flags below.
+ BYTE m_flags;
+
+ // ComSlot() (is cached when we first invoke the method and generate
+ // the stubs for it. There's probably a better place to do this
+ // caching but I'm not sure I know all the places these things are
+ // created.)
+ WORD m_cachedComSlot;
+
+ PCODE * GetAddrOfILStubField()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return &m_pILStub;
+ }
+
+#ifdef _TARGET_X86_
+ // Size of outgoing arguments (on stack). This is currently used only
+ // on x86 when we have an InlinedCallFrame representing a CLR->COM call.
+ WORD m_cbStackArgumentSize;
+
+ void SetHasCopyCtorArgs(BOOL value)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (value)
+ FastInterlockOr(reinterpret_cast<DWORD *>(&m_flags), kHasCopyCtorArgs);
+ }
+
+ BOOL HasCopyCtorArgs()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ((m_flags & kHasCopyCtorArgs) != 0);
+ }
+
+ void InitStackArgumentSize()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_cbStackArgumentSize = 0xFFFF;
+ }
+
+ void SetStackArgumentSize(WORD cbDstBuffer)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // Don't write to the field if it's already initialized to avoid creating private pages (NGEN)
+ if (m_cbStackArgumentSize == 0xFFFF)
+ {
+ m_cbStackArgumentSize = cbDstBuffer;
+ }
+ _ASSERTE(m_cbStackArgumentSize == cbDstBuffer);
+ }
+
+ WORD GetStackArgumentSize()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE(m_cbStackArgumentSize != 0xFFFF);
+ return m_cbStackArgumentSize;
+ }
+
+ union
+ {
+ LPVOID m_pRetThunk; // used for late-bound calls
+ LPVOID m_pInterceptStub; // used for early-bound IL stub calls
+ };
+
+ Stub *GenerateStubForHost(LoaderHeap *pHeap, Stub *pInnerStub);
+#else // _TARGET_X86_
+ void InitStackArgumentSize()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ void SetStackArgumentSize(WORD cbDstBuffer)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+#endif // _TARGET_X86_
+
+ // This field gets set only when this MethodDesc is marked as PreImplemented
+ RelativePointer<PTR_MethodDesc> m_pStubMD;
+
+#ifdef FEATURE_PREJIT
+ BOOL ShouldSave(DataImage *image);
+ void Fixup(DataImage *image);
+#endif
+};
+
+
+//-----------------------------------------------------------------------
+// Operations specific to ComPlusCall methods. We use a derived class to get
+// the compiler involved in enforcing proper method type usage.
+// DO NOT ADD FIELDS TO THIS CLASS.
+//-----------------------------------------------------------------------
+class ComPlusCallMethodDesc : public MethodDesc
+{
+public:
+ ComPlusCallInfo *m_pComPlusCallInfo; // initialized in code:ComPlusCall.PopulateComPlusCallMethodDesc
+
+ void InitRetThunk();
+ void InitComEventCallInfo();
+
+ PCODE * GetAddrOfILStubField()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pComPlusCallInfo->GetAddrOfILStubField();
+ }
+
+ MethodTable* GetInterfaceMethodTable()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_pComPlusCallInfo->m_pInterfaceMT != NULL);
+ return m_pComPlusCallInfo->m_pInterfaceMT;
+ }
+
+ MethodDesc* GetEventProviderMD()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pComPlusCallInfo->m_pEventProviderMD;
+ }
+
+#ifndef FEATURE_CORECLR
+
+#ifndef BINDER
+ BOOL HasSuppressUnmanagedCodeAccessAttr()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_pComPlusCallInfo != NULL)
+ {
+ return (m_pComPlusCallInfo->m_flags & ComPlusCallInfo::kHasSuppressUnmanagedCodeAccess) != 0;
+ }
+
+ // it is possible that somebody will call this before we initialized m_pComPlusCallInfo
+ return (GetMDImport()->GetCustomAttributeByName(GetMemberDef(),
+ COR_SUPPRESS_UNMANAGED_CODE_CHECK_ATTRIBUTE_ANSI,
+ NULL,
+ NULL) == S_OK);
+ }
+#endif // !BINDER
+
+ void SetSuppressUnmanagedCodeAccessAttr(BOOL value)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (value)
+ FastInterlockOr(reinterpret_cast<DWORD *>(&m_pComPlusCallInfo->m_flags), ComPlusCallInfo::kHasSuppressUnmanagedCodeAccess);
+ }
+#endif // FEATURE_CORECLR
+
+ BOOL RequiresArgumentWrapping()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_pComPlusCallInfo->m_flags & ComPlusCallInfo::kRequiresArgumentWrapping) != 0;
+ }
+
+ void SetLateBoundFlags(BYTE newFlags)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ FastInterlockOr(reinterpret_cast<DWORD *>(&m_pComPlusCallInfo->m_flags), newFlags);
+ }
+
+#ifdef _TARGET_X86_
+ BOOL HasCopyCtorArgs()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pComPlusCallInfo->HasCopyCtorArgs();
+ }
+
+ void SetHasCopyCtorArgs(BOOL value)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pComPlusCallInfo->SetHasCopyCtorArgs(value);
+ }
+
+ WORD GetStackArgumentSize()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pComPlusCallInfo->GetStackArgumentSize();
+ }
+
+ void SetStackArgumentSize(WORD cbDstBuffer)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pComPlusCallInfo->SetStackArgumentSize(cbDstBuffer);
+ }
+#else // _TARGET_X86_
+ void SetStackArgumentSize(WORD cbDstBuffer)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+#endif // _TARGET_X86_
+};
+#endif // FEATURE_COMINTEROP
+
+//-----------------------------------------------------------------------
+// InstantiatedMethodDesc's are used for generics and
+// come in four flavours, discriminated by the
+// low order bits of the first field:
+//
+// 00 --> GenericMethodDefinition
+// 01 --> UnsharedMethodInstantiation
+// 10 --> SharedMethodInstantiation
+// 11 --> WrapperStubWithInstantiations - and unboxing or instantiating stub
+//
+// A SharedMethodInstantiation descriptor extends MethodDesc
+// with a pointer to dictionary layout and a representative instantiation.
+//
+// A GenericMethodDefinition is the instantiation of a
+// generic method at its formals, used for verifying the method and
+// also for reflection.
+//
+// A WrapperStubWithInstantiations extends MethodDesc with:
+// (1) a method instantiation
+// (2) an "underlying" method descriptor.
+// A WrapperStubWithInstantiations may be placed in a MethodChunk for
+// a method table which specifies an exact instantiation for the class/struct.
+// A WrapperStubWithInstantiations may be either
+// an BoxedEntryPointStub or an exact-instantiation stub.
+//
+// Exact-instantiation stubs are used as extra type-context parameters. When
+// used as an entry, instantiating stubs pass an instantiation
+// dictionary on to the underlying method. These entries are required to
+// implement ldftn instructions on instantiations of shared generic
+// methods, as the InstantiatingStub's pointer does not expect a
+// dictionary argument; instead, it passes itself on to the shared
+// code as the dictionary.
+//
+// An UnsharedMethodInstantiation contains just an instantiation.
+// These are fully-specialized wrt method and class type parameters.
+// These satisfy (!IMD_IsGenericMethodDefinition() &&
+// !IMD_IsSharedByGenericMethodInstantiations() &&
+// !IMD_IsWrapperStubWithInstantiations())
+//
+// Note that plain MethodDescs may represent shared code w.r.t. class type
+// parameters (see MethodDesc::IsSharedByGenericInstantiations()).
+//-----------------------------------------------------------------------
+
+class InstantiatedMethodDesc : public MethodDesc
+{
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+
+#ifdef BINDER
+ friend class CompactTypeBuilder;
+ friend class MdilModule;
+#endif
+public:
+
+ // All varities of InstantiatedMethodDesc's support this method.
+ BOOL IMD_HasMethodInstantiation()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (IMD_IsGenericMethodDefinition())
+ return TRUE;
+ else
+ return m_pPerInstInfo != NULL;
+ }
+
+ // All varieties of InstantiatedMethodDesc's support this method.
+ Instantiation IMD_GetMethodInstantiation()
+#ifndef BINDER
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return Instantiation(m_pPerInstInfo->GetInstantiation(), m_wNumGenericArgs);
+ }
+#else
+ ; // The binder requires a special implementation of this method as its methoddesc data structure holds the instantiation in a different way.
+#endif
+
+ PTR_Dictionary IMD_GetMethodDictionary()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pPerInstInfo;
+ }
+
+ BOOL IMD_IsGenericMethodDefinition()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return((m_wFlags2 & KindMask) == GenericMethodDefinition);
+ }
+
+ BOOL IMD_IsSharedByGenericMethodInstantiations()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return((m_wFlags2 & KindMask) == SharedMethodInstantiation);
+ }
+ BOOL IMD_IsWrapperStubWithInstantiations()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return((m_wFlags2 & KindMask) == WrapperStubWithInstantiations);
+ }
+
+ BOOL IMD_IsEnCAddedMethod()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef EnC_SUPPORTED
+ return((m_wFlags2 & KindMask) == EnCAddedMethod);
+#else
+ return FALSE;
+#endif
+ }
+
+#ifdef FEATURE_COMINTEROP
+ BOOL IMD_HasComPlusCallInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ((m_wFlags2 & HasComPlusCallInfo) != 0);
+ }
+
+ void IMD_SetupGenericComPlusCall()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_wFlags2 |= InstantiatedMethodDesc::HasComPlusCallInfo;
+
+ IMD_GetComPlusCallInfo()->InitStackArgumentSize();
+ }
+
+ PTR_ComPlusCallInfo IMD_GetComPlusCallInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(IMD_HasComPlusCallInfo());
+ SIZE_T size = s_ClassificationSizeTable[m_wFlags & (mdcClassification | mdcHasNonVtableSlot | mdcMethodImpl)];
+
+ if (HasNativeCodeSlot())
+ {
+ size += (*dac_cast<PTR_TADDR>(dac_cast<TADDR>(this) + size) & FIXUP_LIST_MASK) ?
+ (sizeof(NativeCodeSlot) + sizeof(FixupListSlot)) : sizeof(NativeCodeSlot);
+ }
+
+ return dac_cast<PTR_ComPlusCallInfo>(dac_cast<TADDR>(this) + size);
+ }
+#endif // FEATURE_COMINTEROP
+
+ // Get the dictionary layout, if there is one
+ DictionaryLayout* IMD_GetDictionaryLayout()
+ {
+ WRAPPER_NO_CONTRACT;
+ if (IMD_IsWrapperStubWithInstantiations() && IMD_HasMethodInstantiation())
+ return IMD_GetWrappedMethodDesc()->AsInstantiatedMethodDesc()->m_pDictLayout;
+ else
+ if (IMD_IsSharedByGenericMethodInstantiations())
+ return m_pDictLayout;
+ else
+ return NULL;
+ }
+
+#ifdef BINDER
+ void IMD_SetDictionaryLayout(DictionaryLayout *dictionaryLayout)
+ {
+
+ LIMITED_METHOD_CONTRACT;
+
+ m_pDictLayout = dictionaryLayout;
+ }
+#endif
+
+ MethodDesc* IMD_GetWrappedMethodDesc()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(IMD_IsWrapperStubWithInstantiations());
+ return m_pWrappedMethodDesc.GetValue();
+ }
+
+
+
+ // Setup the IMD as shared code
+ void SetupSharedMethodInstantiation(DWORD numGenericArgs, TypeHandle *pPerInstInfo, DictionaryLayout *pDL);
+
+ // Setup the IMD as unshared code
+ void SetupUnsharedMethodInstantiation(DWORD numGenericArgs, TypeHandle *pInst);
+
+ // Setup the IMD as the special MethodDesc for a "generic" method
+ void SetupGenericMethodDefinition(IMDInternalImport *pIMDII, LoaderAllocator* pAllocator, AllocMemTracker *pamTracker,
+ Module *pModule, mdMethodDef tok);
+
+ // Setup the IMD as a wrapper around another method desc
+ void SetupWrapperStubWithInstantiations(MethodDesc* wrappedMD,DWORD numGenericArgs, TypeHandle *pGenericMethodInst);
+
+
+#ifdef EnC_SUPPORTED
+ void SetupEnCAddedMethod()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_wFlags2 = EnCAddedMethod;
+ }
+#endif
+
+private:
+ enum
+ {
+ KindMask = 0x07,
+ GenericMethodDefinition = 0x00,
+ UnsharedMethodInstantiation = 0x01,
+ SharedMethodInstantiation = 0x02,
+ WrapperStubWithInstantiations = 0x03,
+
+#ifdef EnC_SUPPORTED
+ // Non-virtual method added through EditAndContinue.
+ EnCAddedMethod = 0x07,
+#endif // EnC_SUPPORTED
+
+ Unrestored = 0x08,
+
+#ifdef FEATURE_COMINTEROP
+ HasComPlusCallInfo = 0x10, // this IMD contains an optional ComPlusCallInfo
+#endif // FEATURE_COMINTEROP
+ };
+
+ friend class MethodDesc; // this fields are currently accessed by MethodDesc::Save/Restore etc.
+ union {
+ DictionaryLayout * m_pDictLayout; //SharedMethodInstantiation
+
+ FixupPointer<PTR_MethodDesc> m_pWrappedMethodDesc; // For WrapperStubWithInstantiations
+ };
+
+public: // <TODO>make private: JITinterface.cpp accesses through this </TODO>
+ // Note we can't steal bits off m_pPerInstInfo as the JIT generates code to access through it!!
+
+ // Type parameters to method (exact)
+ // For non-unboxing instantiating stubs this is actually
+ // a dictionary and further slots may hang off the end of the
+ // instantiation.
+ //
+ // For generic method definitions that are not the typical method definition (e.g. C<int>.m<U>)
+ // this field is null; to obtain the instantiation use LoadMethodInstantiation
+ PTR_Dictionary m_pPerInstInfo; //SHARED
+
+private:
+ WORD m_wFlags2;
+ WORD m_wNumGenericArgs;
+
+public:
+ static InstantiatedMethodDesc *FindOrCreateExactClassMethod(MethodTable *pExactMT,
+ MethodDesc *pCanonicalMD);
+
+ static InstantiatedMethodDesc* FindLoadedInstantiatedMethodDesc(MethodTable *pMT,
+ mdMethodDef methodDef,
+ Instantiation methodInst,
+ BOOL getSharedNotStub);
+
+private:
+
+ static InstantiatedMethodDesc *NewInstantiatedMethodDesc(MethodTable *pMT,
+ MethodDesc* pGenericMDescInRepMT,
+ MethodDesc* pSharedMDescForStub,
+ Instantiation methodInst,
+ BOOL getSharedNotStub);
+
+};
+
+inline PTR_MethodTable MethodDesc::GetMethodTable_NoLogging() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ MethodDescChunk *pChunk = GetMethodDescChunk();
+ PREFIX_ASSUME(pChunk != NULL);
+ return pChunk->GetMethodTable();
+}
+
+inline PTR_MethodTable MethodDesc::GetMethodTable() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ g_IBCLogger.LogMethodDescAccess(this);
+ return GetMethodTable_NoLogging();
+}
+
+#ifndef BINDER
+inline DPTR(RelativeFixupPointer<PTR_MethodTable>) MethodDesc::GetMethodTablePtr() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ MethodDescChunk *pChunk = GetMethodDescChunk();
+ PREFIX_ASSUME(pChunk != NULL);
+ return pChunk->GetMethodTablePtr();
+}
+
+inline MethodTable* MethodDesc::GetCanonicalMethodTable()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return GetMethodTable()->GetCanonicalMethodTable();
+}
+#endif // !BINDER
+
+inline mdMethodDef MethodDesc::GetMemberDef_NoLogging() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ MethodDescChunk *pChunk = GetMethodDescChunk();
+ PREFIX_ASSUME(pChunk != NULL);
+ UINT16 tokrange = pChunk->GetTokRange();
+
+ UINT16 tokremainder = m_wFlags3AndTokenRemainder & enum_flag3_TokenRemainderMask;
+ static_assert_no_msg(enum_flag3_TokenRemainderMask == METHOD_TOKEN_REMAINDER_MASK);
+
+ return MergeToken(tokrange, tokremainder);
+}
+
+inline mdMethodDef MethodDesc::GetMemberDef() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ g_IBCLogger.LogMethodDescAccess(this);
+ return GetMemberDef_NoLogging();
+}
+
+// Set the offset of this method desc in a chunk table (which allows us
+// to work back to the method table/module pointer stored at the head of
+// the table.
+inline void MethodDesc::SetChunkIndex(MethodDescChunk * pChunk)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Calculate the offset (mod 8) from the chunk table header.
+ SIZE_T offset = (BYTE*)this - (BYTE*)pChunk->GetFirstMethodDesc();
+ _ASSERTE((offset & ALIGNMENT_MASK) == 0);
+ offset >>= ALIGNMENT_SHIFT;
+
+ // Make sure that we did not overflow the BYTE
+ _ASSERTE(offset == (BYTE)offset);
+ m_chunkIndex = (BYTE)offset;
+
+ // Make sure that the MethodDescChunk is setup correctly
+ _ASSERTE(GetMethodDescChunk() == pChunk);
+}
+
+inline void MethodDesc::SetMemberDef(mdMethodDef mb)
+{
+ WRAPPER_NO_CONTRACT;
+
+ UINT16 tokrange;
+ UINT16 tokremainder;
+ SplitToken(mb, &tokrange, &tokremainder);
+
+ _ASSERTE((tokremainder & ~enum_flag3_TokenRemainderMask) == 0);
+ m_wFlags3AndTokenRemainder = (m_wFlags3AndTokenRemainder & ~enum_flag3_TokenRemainderMask) | tokremainder;
+
+ if (GetMethodDescIndex() == 0)
+ {
+ GetMethodDescChunk()->SetTokenRange(tokrange);
+ }
+
+#ifdef _DEBUG
+ if (mb != 0)
+ {
+ _ASSERTE(GetMemberDef_NoLogging() == mb);
+ }
+#endif
+}
+
+#ifdef _DEBUG
+
+#ifndef BINDER
+inline BOOL MethodDesc::SanityCheck()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+
+ // Do a simple sanity test
+ if (IsRestored())
+ {
+ // If it looks good, do a more intensive sanity test. We don't care about the result,
+ // we just want it to not AV.
+ return GetMethodTable() == m_pDebugMethodTable.GetValue() && this->GetModule() != NULL;
+ }
+
+ return TRUE;
+}
+
+#endif // !BINDER
+#endif // _DEBUG
+
+inline BOOL MethodDesc::IsEnCAddedMethod()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+#ifdef BINDER
+ return FALSE;
+#else // !BINDER
+ return (GetClassification() == mcInstantiated) && AsInstantiatedMethodDesc()->IMD_IsEnCAddedMethod();
+#endif // !BINDER
+}
+
+inline BOOL MethodDesc::HasNonVtableSlot()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (m_wFlags & mdcHasNonVtableSlot) != 0;
+}
+
+inline Instantiation MethodDesc::GetMethodInstantiation() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return
+ (GetClassification() == mcInstantiated)
+ ? AsInstantiatedMethodDesc()->IMD_GetMethodInstantiation()
+ : Instantiation();
+}
+
+inline Instantiation MethodDesc::GetClassInstantiation() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return GetMethodTable()->GetInstantiation();
+}
+
+inline BOOL MethodDesc::IsGenericMethodDefinition() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ g_IBCLogger.LogMethodDescAccess(this);
+ return GetClassification() == mcInstantiated && AsInstantiatedMethodDesc()->IMD_IsGenericMethodDefinition();
+}
+
+// True if the method descriptor is an instantiation of a generic method.
+inline BOOL MethodDesc::HasMethodInstantiation() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return mcInstantiated == GetClassification() && AsInstantiatedMethodDesc()->IMD_HasMethodInstantiation();
+}
+
+#ifdef BINDER
+inline BOOL MethodDesc::IsTypicalMethodDefinition() const
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (HasMethodInstantiation() && !IsGenericMethodDefinition())
+ return FALSE;
+
+ if (HasClassInstantiation() && !GetMethodTable()->IsGenericTypeDefinition())
+ return FALSE;
+
+ return TRUE;
+}
+#endif // !BINDER
+
+#include "method.inl"
+
+
+#endif // !_METHOD_H
diff --git a/src/vm/method.inl b/src/vm/method.inl
new file mode 100644
index 0000000000..0bc0c9c94a
--- /dev/null
+++ b/src/vm/method.inl
@@ -0,0 +1,222 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#ifndef _METHOD_INL_
+#define _METHOD_INL_
+
+inline BOOL MethodDesc::HasTemporaryEntryPoint()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetMethodDescChunk()->HasTemporaryEntryPoints();
+}
+
+inline InstantiatedMethodDesc* MethodDesc::AsInstantiatedMethodDesc() const
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(GetClassification() == mcInstantiated);
+ return dac_cast<PTR_InstantiatedMethodDesc>(this);
+}
+
+#ifndef BINDER
+inline BOOL MethodDesc::IsDomainNeutral()
+{
+ WRAPPER_NO_CONTRACT;
+ return !IsLCGMethod() && GetDomain()->IsSharedDomain();
+}
+#endif // !BINDER
+
+inline BOOL MethodDesc::IsZapped()
+{
+ WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_PREJIT
+ return GetMethodDescChunk()->IsZapped();
+#else
+ return FALSE;
+#endif
+}
+
+inline PTR_DynamicResolver DynamicMethodDesc::GetResolver()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pResolver;
+}
+
+inline SigParser MethodDesc::GetSigParser()
+{
+ WRAPPER_NO_CONTRACT;
+
+ PCCOR_SIGNATURE pSig;
+ ULONG cSig;
+ GetSig(&pSig, &cSig);
+
+ return SigParser(pSig, cSig);
+}
+
+#ifndef BINDER
+inline SigPointer MethodDesc::GetSigPointer()
+{
+ WRAPPER_NO_CONTRACT;
+
+ PCCOR_SIGNATURE pSig;
+ ULONG cSig;
+ GetSig(&pSig, &cSig);
+
+ return SigPointer(pSig, cSig);
+}
+#endif // !BINDER
+
+inline PTR_LCGMethodResolver DynamicMethodDesc::GetLCGMethodResolver()
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ PRECONDITION(IsLCGMethod());
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ return PTR_LCGMethodResolver(m_pResolver);
+}
+
+inline PTR_ILStubResolver DynamicMethodDesc::GetILStubResolver()
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ PRECONDITION(IsILStub());
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ return PTR_ILStubResolver(m_pResolver);
+}
+
+inline PTR_DynamicMethodDesc MethodDesc::AsDynamicMethodDesc()
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ PRECONDITION(IsDynamicMethod());
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ return dac_cast<PTR_DynamicMethodDesc>(this);
+}
+
+inline bool MethodDesc::IsDynamicMethod()
+{
+ LIMITED_METHOD_CONTRACT;
+ return (mcDynamic == GetClassification());
+}
+
+inline bool MethodDesc::IsLCGMethod()
+{
+ WRAPPER_NO_CONTRACT;
+ return ((mcDynamic == GetClassification()) && dac_cast<PTR_DynamicMethodDesc>(this)->IsLCGMethod());
+}
+
+inline bool MethodDesc::IsILStub()
+{
+ WRAPPER_NO_CONTRACT;
+
+ g_IBCLogger.LogMethodDescAccess(this);
+ return ((mcDynamic == GetClassification()) && dac_cast<PTR_DynamicMethodDesc>(this)->IsILStub());
+}
+
+inline BOOL MethodDesc::IsQCall()
+{
+ WRAPPER_NO_CONTRACT;
+ return (IsNDirect() && dac_cast<PTR_NDirectMethodDesc>(this)->IsQCall());
+}
+
+#ifdef FEATURE_COMINTEROP
+FORCEINLINE DWORD MethodDesc::IsGenericComPlusCall()
+{
+ LIMITED_METHOD_CONTRACT;
+ return (mcInstantiated == GetClassification() && AsInstantiatedMethodDesc()->IMD_HasComPlusCallInfo());
+}
+inline void MethodDesc::SetupGenericComPlusCall()
+{
+ LIMITED_METHOD_CONTRACT;
+ AsInstantiatedMethodDesc()->IMD_SetupGenericComPlusCall();
+}
+#endif // FEATURE_COMINTEROP
+
+#ifndef FEATURE_REMOTING
+
+inline BOOL MethodDesc::MayBeRemotingIntercepted()
+{
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+}
+
+inline BOOL MethodDesc::IsRemotingInterceptedViaPrestub()
+{
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+}
+
+inline BOOL MethodDesc::IsRemotingInterceptedViaVirtualDispatch()
+{
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+}
+
+#endif // FEATURE_REMOTING
+
+#ifdef FEATURE_COMINTEROP
+
+// static
+inline ComPlusCallInfo *ComPlusCallInfo::FromMethodDesc(MethodDesc *pMD)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (pMD->IsComPlusCall())
+ {
+ return ((ComPlusCallMethodDesc *)pMD)->m_pComPlusCallInfo;
+ }
+ else if (pMD->IsEEImpl())
+ {
+ return ((DelegateEEClass *)pMD->GetClass())->m_pComPlusCallInfo;
+ }
+ else
+ {
+ _ASSERTE(pMD->IsGenericComPlusCall());
+ return pMD->AsInstantiatedMethodDesc()->IMD_GetComPlusCallInfo();
+ }
+}
+
+#endif //FEATURE_COMINTEROP
+
+#ifndef FEATURE_TYPEEQUIVALENCE
+inline BOOL HasTypeEquivalentStructParameters()
+{
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+}
+#endif // FEATURE_TYPEEQUIVALENCE
+
+#ifndef BINDER
+inline ReJitManager * MethodDesc::GetReJitManager()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetModule()->GetReJitManager();
+}
+#endif // !BINDER
+
+#endif // _METHOD_INL_
+
diff --git a/src/vm/methodimpl.cpp b/src/vm/methodimpl.cpp
new file mode 100644
index 0000000000..5f3b41c83d
--- /dev/null
+++ b/src/vm/methodimpl.cpp
@@ -0,0 +1,286 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: methodimpl.cpp
+//
+
+
+//
+
+//
+// ============================================================================
+
+#include "common.h"
+#include "methodimpl.h"
+
+DWORD MethodImpl::FindSlotIndex(DWORD slot)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(GetSlots()));
+ } CONTRACTL_END;
+
+ DWORD dwSize = GetSize();
+ if(dwSize == 0) {
+ return INVALID_INDEX;
+ }
+
+ // Simple binary search
+ PTR_DWORD rgSlots = GetSlots();
+ INT32 l = 0;
+ INT32 r = dwSize - 1;
+ INT32 pivot;
+
+ while(1) {
+ pivot = (l + r) / 2;
+
+ if(rgSlots[pivot] == slot) {
+ break; // found it
+ }
+ else if(rgSlots[pivot] < slot) {
+ l = pivot + 1;
+ }
+ else {
+ r = pivot - 1;
+ }
+
+ if(l > r) {
+ return INVALID_INDEX; // Not here
+ }
+ }
+
+ CONSISTENCY_CHECK(pivot >= 0);
+ return (DWORD)pivot;
+}
+
+PTR_MethodDesc MethodImpl::FindMethodDesc(DWORD slot, PTR_MethodDesc defaultReturn)
+{
+ CONTRACTL
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ DWORD slotIndex = FindSlotIndex(slot);
+ if (slotIndex == INVALID_INDEX) {
+ return defaultReturn;
+ }
+
+ PTR_MethodDesc result = pImplementedMD[slotIndex]; // The method descs are not offset by one
+
+ // Prejitted images may leave NULL in this table if
+ // the methoddesc is declared in another module.
+ // In this case we need to manually compute & restore it
+ // from the slot number.
+
+ if (result == NULL)
+#ifndef DACCESS_COMPILE
+ result = RestoreSlot(slotIndex, defaultReturn->GetMethodTable());
+#else // DACCESS_COMPILE
+ DacNotImpl();
+#endif // DACCESS_COMPILE
+
+ return result;
+}
+
+#ifndef DACCESS_COMPILE
+
+MethodDesc *MethodImpl::RestoreSlot(DWORD index, MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ PRECONDITION(CheckPointer(pdwSlots));
+ }
+ CONTRACTL_END
+
+ MethodDesc *result;
+
+ PREFIX_ASSUME(pdwSlots != NULL);
+ DWORD slot = GetSlots()[index];
+
+ // Since the overridden method is in a different module, we
+ // are guaranteed that it is from a different class. It is
+ // either an override of a parent virtual method or parent-implemented
+ // interface, or of an interface that this class has introduced.
+
+ // In the former 2 cases, the slot number will be in the parent's
+ // vtable section, and we can retrieve the implemented MethodDesc from
+ // there. In the latter case, we can search through our interface
+ // map to determine which interface it is from.
+
+ MethodTable *pParentMT = pMT->GetParentMethodTable();
+ CONSISTENCY_CHECK(pParentMT != NULL && slot < pParentMT->GetNumVirtuals());
+ {
+ result = pParentMT->GetMethodDescForSlot(slot);
+ }
+
+ _ASSERTE(result != NULL);
+
+ // Don't worry about races since we would all be setting the same result
+ if (EnsureWritableExecutablePagesNoThrow(&pImplementedMD[index], sizeof(pImplementedMD[index])))
+ pImplementedMD[index] = result;
+
+ return result;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////
+void MethodImpl::SetSize(LoaderHeap *pHeap, AllocMemTracker *pamTracker, DWORD size)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(pdwSlots==NULL && pImplementedMD==NULL);
+ INJECT_FAULT(ThrowOutOfMemory());
+ } CONTRACTL_END;
+
+ if(size > 0) {
+ // An array of DWORDs, the first entry representing count, and the rest representing slot numbers
+ S_SIZE_T cbCountAndSlots = S_SIZE_T(sizeof(DWORD)) + // DWORD for the total count of slots
+ S_SIZE_T(size) * S_SIZE_T(sizeof(DWORD)); // DWORD each for the slot numbers
+
+ // MethodDesc* for each of the implemented methods
+ S_SIZE_T cbMethodDescs = S_SIZE_T(size) * S_SIZE_T(sizeof(MethodDesc *));
+
+ // Need to align-up the slot entries so that the MethodDesc* array starts on a pointer boundary.
+ cbCountAndSlots.AlignUp(sizeof(MethodDesc*));
+ S_SIZE_T cbTotal = cbCountAndSlots + cbMethodDescs;
+ if(cbCountAndSlots.IsOverflow())
+ ThrowOutOfMemory();
+
+ // Allocate the memory.
+ LPBYTE pAllocData = (BYTE*)pamTracker->Track(pHeap->AllocMem(cbTotal));
+
+ // Set the count and slot array
+ pdwSlots = (DWORD*)pAllocData;
+
+ // Set the MethodDesc* array. Make sure to adjust for alignment.
+ pImplementedMD = (MethodDesc**)ALIGN_UP(pAllocData + cbCountAndSlots.Value(), sizeof(MethodDesc*));
+
+ // Store the count in the first entry
+ *pdwSlots = size;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////////
+void MethodImpl::SetData(DWORD* slots, MethodDesc** md)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(CheckPointer(pdwSlots));
+ } CONTRACTL_END;
+
+ DWORD dwSize = *pdwSlots;
+ memcpy(&(pdwSlots[1]), slots, dwSize*sizeof(DWORD));
+ memcpy(pImplementedMD, md, dwSize*sizeof(MethodDesc*));
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+void MethodImpl::Save(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ DWORD size = GetSize();
+ _ASSERTE(size > 0);
+
+ image->StoreStructure(pdwSlots, (size+1)*sizeof(DWORD),
+ DataImage::ITEM_METHOD_DESC_COLD,
+ sizeof(DWORD));
+ image->StoreStructure(pImplementedMD, size*sizeof(MethodDesc*),
+ DataImage::ITEM_METHOD_DESC_COLD,
+ sizeof(MethodDesc*));
+}
+
+void MethodImpl::Fixup(DataImage *image, PVOID p, SSIZE_T offset)
+{
+ STANDARD_VM_CONTRACT;
+
+ DWORD size = GetSize();
+ _ASSERTE(size > 0);
+
+ for (DWORD iMD = 0; iMD < size; iMD++)
+ {
+ // <TODO> Why not use FixupMethodDescPointer? </TODO>
+ // <TODO> Does it matter if the MethodDesc needs a restore? </TODO>
+
+ MethodDesc * pMD = pImplementedMD[iMD];
+
+ if (image->CanEagerBindToMethodDesc(pMD) &&
+ image->CanHardBindToZapModule(pMD->GetLoaderModule()))
+ {
+ image->FixupPointerField(pImplementedMD, iMD * sizeof(MethodDesc *));
+ }
+ else
+ {
+ image->ZeroPointerField(pImplementedMD, iMD * sizeof(MethodDesc *));
+ }
+ }
+
+ image->FixupPointerField(p, offset + offsetof(MethodImpl, pdwSlots));
+ image->FixupPointerField(p, offset + offsetof(MethodImpl, pImplementedMD));
+}
+
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+#endif //!DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+
+void
+MethodImpl::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+#ifndef STUB_DISPATCH_ALL
+ CONSISTENCY_CHECK_MSG(FALSE, "Stub Dispatch forbidden code");
+#else // STUB_DISPATCH_ALL
+ // 'this' memory should already be enumerated as
+ // part of the base MethodDesc.
+
+ if (pdwSlots.IsValid() && GetSize())
+ {
+ ULONG32 numSlots = GetSize();
+ DacEnumMemoryRegion(dac_cast<TADDR>(pdwSlots),
+ (numSlots + 1) * sizeof(DWORD));
+
+ if (pImplementedMD.IsValid())
+ {
+ DacEnumMemoryRegion(dac_cast<TADDR>(pImplementedMD),
+ numSlots * sizeof(PTR_MethodDesc));
+ for (DWORD i = 0; i < numSlots; i++)
+ {
+ PTR_MethodDesc methodDesc = pImplementedMD[i];
+ if (methodDesc.IsValid())
+ {
+ methodDesc->EnumMemoryRegions(flags);
+ }
+ }
+ }
+ }
+#endif // STUB_DISPATCH_ALL
+}
+
+#endif //DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+MethodImpl::Iterator::Iterator(MethodDesc *pMD) : m_pMD(pMD), m_pImpl(NULL), m_iCur(0)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (pMD->IsMethodImpl())
+ {
+ m_pImpl = pMD->GetMethodImpl();
+ }
+}
+#endif //!DACCESS_COMPILE
+
diff --git a/src/vm/methodimpl.h b/src/vm/methodimpl.h
new file mode 100644
index 0000000000..3cac85f08b
--- /dev/null
+++ b/src/vm/methodimpl.h
@@ -0,0 +1,132 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: methodimpl.h
+//
+
+
+//
+
+//
+// ============================================================================
+
+#ifndef _METHODIMPL_H
+#define _METHODIMPL_H
+
+class MethodDesc;
+
+// <TODO>@TODO: This is very bloated. We need to trim this down alot. However,
+// we need to keep it on a 8 byte boundary.</TODO>
+class MethodImpl
+{
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+#ifdef BINDER
+ friend class MdilModule; // this allows it to get the offset of pdwSlots and pImplementedMD
+#endif
+
+ PTR_DWORD pdwSlots; // Maintains the slots in sorted order, the first entry is the size
+ DPTR(PTR_MethodDesc) pImplementedMD;
+
+public:
+
+#ifndef DACCESS_COMPILE
+ ///////////////////////////////////////////////////////////////////////////////////////
+ class Iterator
+ {
+ private:
+ MethodDesc *m_pMD;
+ MethodImpl *m_pImpl;
+ DWORD m_iCur;
+
+ public:
+ Iterator(MethodDesc *pMD);
+ inline BOOL IsValid()
+ { WRAPPER_NO_CONTRACT; return ((m_pImpl != NULL)&& (m_iCur < m_pImpl->GetSize())); }
+ inline void Next()
+ { WRAPPER_NO_CONTRACT; if (IsValid()) m_iCur++; }
+ inline WORD GetSlot()
+ { WRAPPER_NO_CONTRACT; CONSISTENCY_CHECK(IsValid()); _ASSERTE(FitsIn<WORD>(m_pImpl->GetSlots()[m_iCur])); return static_cast<WORD>(m_pImpl->GetSlots()[m_iCur]); }
+ inline MethodDesc *GetMethodDesc()
+ { WRAPPER_NO_CONTRACT; return m_pImpl->FindMethodDesc(GetSlot(), (PTR_MethodDesc) m_pMD); }
+ };
+
+ ///////////////////////////////////////////////////////////////////////////////////////
+ inline MethodDesc** GetImplementedMDs()
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(this));
+ } CONTRACTL_END;
+ return pImplementedMD;
+ }
+#endif // !DACCESS_COMPILE
+
+ ///////////////////////////////////////////////////////////////////////////////////////
+ inline DWORD GetSize()
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(this));
+ } CONTRACTL_END;
+
+ if(pdwSlots == NULL)
+ return 0;
+ else
+ return *pdwSlots;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////
+ inline PTR_DWORD GetSlots()
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(this));
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ if(pdwSlots == NULL)
+ return NULL;
+ else
+ return pdwSlots + 1;
+ }
+
+#ifndef DACCESS_COMPILE
+
+ ///////////////////////////////////////////////////////////////////////////////////////
+ void SetSize(LoaderHeap *pHeap, AllocMemTracker *pamTracker, DWORD size);
+
+ ///////////////////////////////////////////////////////////////////////////////////////
+ void SetData(DWORD* slots, MethodDesc** md);
+
+#endif // !DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+#ifdef FEATURE_PREJIT
+ void Save(DataImage *image);
+ void Fixup(DataImage *image, PVOID p, SSIZE_T offset);
+#endif // FEATURE_PREJIT
+
+
+ // Returns the method desc for the replaced slot;
+ PTR_MethodDesc FindMethodDesc(DWORD slot, PTR_MethodDesc defaultReturn);
+
+private:
+ static const DWORD INVALID_INDEX = (DWORD)(-1);
+ DWORD FindSlotIndex(DWORD slot);
+#ifndef DACCESS_COMPILE
+ MethodDesc* RestoreSlot(DWORD slotIndex, MethodTable *pMT);
+#endif
+
+};
+
+#endif // !_METHODIMPL_H
diff --git a/src/vm/methoditer.cpp b/src/vm/methoditer.cpp
new file mode 100644
index 0000000000..44bd8b8ce7
--- /dev/null
+++ b/src/vm/methoditer.cpp
@@ -0,0 +1,371 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// File: MethodIter.cpp
+
+// Iterate through jitted instances of a method.
+//*****************************************************************************
+
+
+#include "common.h"
+#include "methoditer.h"
+
+
+//---------------------------------------------------------------------------------------
+//
+// Iterates next MethodDesc. Updates the holder only if the assembly differs from the previous one.
+// Caller should not release (i.e. change) the holder explicitly between calls, otherwise collectible
+// assembly might be without a reference and get deallocated (even the native part).
+//
+BOOL LoadedMethodDescIterator::Next(
+ CollectibleAssemblyHolder<DomainAssembly *> * pDomainAssemblyHolder)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END
+
+ if (!m_fFirstTime)
+ {
+ // This is the 2nd or more time we called Next().
+
+ // If the method + type is not generic, then nothing more to iterate.
+ if (!m_mainMD->HasClassOrMethodInstantiation())
+ {
+ *pDomainAssemblyHolder = NULL;
+ return FALSE;
+ }
+ goto ADVANCE_METHOD;
+ }
+
+ m_fFirstTime = FALSE;
+
+ // This is the 1st time we've called Next(). must Initialize iterator
+ if (m_mainMD == NULL)
+ {
+ m_mainMD = m_module->LookupMethodDef(m_md);
+ }
+
+ // note m_mainMD should be sufficiently restored to allow us to get
+ // at the method table, flags and token etc.
+ if (m_mainMD == NULL)
+ {
+ *pDomainAssemblyHolder = NULL;
+ return FALSE;
+ }
+
+ // Needs to work w/ non-generic methods too.
+ // NOTE: this behavior seems odd. We appear to return the non-generic method even if
+ // that method doesn't reside in the set of assemblies defined by m_assemblyIterationMode.
+ // Presumably all the callers expect or at least cope with this so I'm just commenting without
+ // changing anything right now.
+ if (!m_mainMD->HasClassOrMethodInstantiation())
+ {
+ *pDomainAssemblyHolder = NULL;
+ return TRUE;
+ }
+
+ if (m_assemblyIterationMode == kModeSharedDomainAssemblies)
+ {
+ // Nothing to do... m_sharedAssemblyIterator is initialized on construction
+ }
+ else
+ {
+ m_assemIterator = m_pAppDomain->IterateAssembliesEx(m_assemIterationFlags);
+ }
+
+ADVANCE_ASSEMBLY:
+ if (m_assemblyIterationMode == kModeSharedDomainAssemblies)
+ {
+ if (!m_sharedAssemblyIterator.Next())
+ return FALSE;
+
+ m_sharedModuleIterator = m_sharedAssemblyIterator.GetAssembly()->IterateModules();
+ }
+ else
+ {
+ if (!m_assemIterator.Next(pDomainAssemblyHolder))
+ {
+ _ASSERTE(*pDomainAssemblyHolder == NULL);
+ return FALSE;
+ }
+
+ if (m_assemblyIterationMode == kModeUnsharedADAssemblies)
+ {
+ // We're supposed to ignore shared assemblies, so check for them now
+ if ((*pDomainAssemblyHolder)->GetAssembly()->IsDomainNeutral())
+ {
+ goto ADVANCE_ASSEMBLY;
+ }
+ }
+
+#ifdef _DEBUG
+ dbg_m_pDomainAssembly = *pDomainAssemblyHolder;
+#endif //_DEBUG
+
+ m_moduleIterator = (*pDomainAssemblyHolder)->IterateModules(m_moduleIterationFlags);
+ }
+
+
+ADVANCE_MODULE:
+ if (m_assemblyIterationMode == kModeSharedDomainAssemblies)
+ {
+ if (!NextSharedModule())
+ goto ADVANCE_ASSEMBLY;
+ }
+ else
+ {
+ if (!m_moduleIterator.Next())
+ goto ADVANCE_ASSEMBLY;
+ }
+
+ if (GetCurrentModule()->IsResource())
+ goto ADVANCE_MODULE;
+
+ if (m_mainMD->HasClassInstantiation())
+ {
+ m_typeIterator.Reset();
+ }
+ else
+ {
+ m_startedNonGenericType = FALSE;
+ }
+
+ADVANCE_TYPE:
+ if (m_mainMD->HasClassInstantiation())
+ {
+ if (!GetCurrentModule()->GetAvailableParamTypes()->FindNext(&m_typeIterator, &m_typeIteratorEntry))
+ goto ADVANCE_MODULE;
+ if (CORCOMPILE_IS_POINTER_TAGGED(m_typeIteratorEntry->GetTypeHandle().AsTAddr()))
+ goto ADVANCE_TYPE;
+
+ //if (m_typeIteratorEntry->data != TypeHandle(m_mainMD->GetMethodTable()))
+ // goto ADVANCE_TYPE;
+
+ // When looking up the AvailableParamTypes table we have to be really careful since
+ // the entries may be unrestored, and may have all sorts of encoded tokens in them.
+ // Similar logic occurs in the Lookup function for that table. We will clean this
+ // up in Whidbey Beta2.
+ TypeHandle th = m_typeIteratorEntry->GetTypeHandle();
+
+ if (th.IsEncodedFixup())
+ goto ADVANCE_TYPE;
+
+ if (th.IsTypeDesc())
+ goto ADVANCE_TYPE;
+
+ MethodTable *pMT = th.AsMethodTable();
+
+ if (!pMT->IsRestored())
+ goto ADVANCE_TYPE;
+
+ // Check the class token
+ if (pMT->GetTypeDefRid() != m_mainMD->GetMethodTable()->GetTypeDefRid())
+ goto ADVANCE_TYPE;
+
+ // Check the module is correct
+ if (pMT->GetModule() != m_module)
+ goto ADVANCE_TYPE;
+ }
+ else if (m_startedNonGenericType)
+ {
+ goto ADVANCE_MODULE;
+ }
+ else
+ {
+ m_startedNonGenericType = TRUE;
+ }
+
+ if (m_mainMD->HasMethodInstantiation())
+ {
+ m_methodIterator.Reset();
+ }
+ else
+ {
+ m_startedNonGenericMethod = FALSE;
+ }
+
+ADVANCE_METHOD:
+ if (m_mainMD->HasMethodInstantiation())
+ {
+ if (!GetCurrentModule()->GetInstMethodHashTable()->FindNext(&m_methodIterator, &m_methodIteratorEntry))
+ goto ADVANCE_TYPE;
+ if (CORCOMPILE_IS_POINTER_TAGGED(dac_cast<TADDR>(m_methodIteratorEntry->GetMethod())))
+ goto ADVANCE_METHOD;
+ if (!m_methodIteratorEntry->GetMethod()->IsRestored())
+ goto ADVANCE_METHOD;
+ if (m_methodIteratorEntry->GetMethod()->GetModule() != m_module)
+ goto ADVANCE_METHOD;
+ if (m_methodIteratorEntry->GetMethod()->GetMemberDef() != m_md)
+ goto ADVANCE_METHOD;
+ }
+ else if (m_startedNonGenericMethod)
+ {
+ goto ADVANCE_TYPE;
+ }
+ else
+ {
+ m_startedNonGenericMethod = TRUE;
+ }
+
+ // Note: We don't need to keep the assembly alive in DAC - see code:CollectibleAssemblyHolder#CAH_DAC
+#ifndef DACCESS_COMPILE
+ _ASSERTE_MSG(
+ ((m_assemblyIterationMode == kModeSharedDomainAssemblies) ||
+ (*pDomainAssemblyHolder == dbg_m_pDomainAssembly)),
+ "Caller probably modified the assembly holder, which he shouldn't - see method comment.");
+#endif //DACCESS_COMPILE
+
+ return TRUE;
+} // LoadedMethodDescIterator::Next
+
+
+Module * LoadedMethodDescIterator::GetCurrentModule()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ if (m_assemblyIterationMode == kModeSharedDomainAssemblies)
+ {
+ return m_sharedModuleIterator.GetModule();
+ }
+ return m_moduleIterator.GetLoadedModule();
+}
+
+
+BOOL LoadedMethodDescIterator::NextSharedModule()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END
+
+ _ASSERTE(m_assemblyIterationMode == kModeSharedDomainAssemblies);
+
+ while (m_sharedModuleIterator.Next())
+ {
+ // NOTE: If this code is to be shared with the dbgapi, the dbgapi
+ // will probably want to substitute its own test for "loadedness"
+ // here.
+#ifdef PROFILING_SUPPORTED
+ Module * pModule = m_sharedModuleIterator.GetModule();
+ if (!pModule->IsProfilerNotified())
+ continue;
+#endif // PROFILING_SUPPORTED
+
+ // If we made it this far, pModule is suitable for iterating over
+ return TRUE;
+ }
+ return FALSE;
+}
+
+MethodDesc *LoadedMethodDescIterator::Current()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(m_mainMD));
+ }
+ CONTRACTL_END
+
+
+ if (m_mainMD->HasMethodInstantiation())
+ {
+ _ASSERTE(m_methodIteratorEntry);
+ return m_methodIteratorEntry->GetMethod();
+ }
+
+ if (!m_mainMD->HasClassInstantiation())
+ {
+ // No Method or Class instantiation,then it's not generic.
+ return m_mainMD;
+ }
+
+ MethodTable *pMT = m_typeIteratorEntry->GetTypeHandle().GetMethodTable();
+ PREFIX_ASSUME(pMT != NULL);
+ _ASSERTE(pMT);
+
+ return pMT->GetMethodDescForSlot(m_mainMD->GetSlot());
+}
+
+// Initialize the iterator. It will cover generics + prejitted;
+// but it is not EnC aware.
+void
+LoadedMethodDescIterator::Start(
+ AppDomain * pAppDomain,
+ Module *pModule,
+ mdMethodDef md,
+ AssemblyIterationMode assemblyIterationMode,
+ AssemblyIterationFlags assemblyIterationFlags,
+ ModuleIterationOption moduleIterationFlags)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(CheckPointer(pAppDomain, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // Specifying different assembly/module iteration flags has only been tested for UnsharedADAssemblies mode so far.
+ // It probably doesn't work as you would expect in other modes. In particular the shared assembly iterator
+ // doesn't use flags, and the logic in this iterator does a hard-coded filter that roughly matches the unshared
+ // mode if you had specified these flags:
+ // Assembly: Loading | Loaded | Execution
+ // Module: kModIterIncludeAvailableToProfilers
+ _ASSERTE((assemblyIterationMode == kModeUnsharedADAssemblies) ||
+ (assemblyIterationFlags == (AssemblyIterationFlags)(kIncludeLoaded | kIncludeExecution)));
+ _ASSERTE((assemblyIterationMode == kModeUnsharedADAssemblies) ||
+ (moduleIterationFlags == kModIterIncludeLoaded));
+
+ m_assemblyIterationMode = assemblyIterationMode;
+ m_assemIterationFlags = assemblyIterationFlags;
+ m_moduleIterationFlags = moduleIterationFlags;
+ m_mainMD = NULL;
+ m_module = pModule;
+ m_md = md;
+ m_pAppDomain = pAppDomain;
+ m_fFirstTime = TRUE;
+
+ // If we're not iterating through the SharedDomain, caller must specify the
+ // pAppDomain to search.
+ _ASSERTE((assemblyIterationMode == kModeSharedDomainAssemblies) || (pAppDomain != NULL));
+ _ASSERTE(TypeFromToken(m_md) == mdtMethodDef);
+}
+
+// This is special init for DAC only
+// @TODO:: change it to dac compile only.
+void
+LoadedMethodDescIterator::Start(
+ AppDomain *pAppDomain,
+ Module *pModule,
+ mdMethodDef md,
+ MethodDesc *pMethodDesc)
+{
+ Start(pAppDomain, pModule, md, kModeAllADAssemblies);
+ m_mainMD = pMethodDesc;
+}
+
+LoadedMethodDescIterator::LoadedMethodDescIterator(void)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_mainMD = NULL;
+ m_module = NULL;
+ m_md = mdTokenNil;
+ m_pAppDomain = NULL;
+}
diff --git a/src/vm/methoditer.h b/src/vm/methoditer.h
new file mode 100644
index 0000000000..6383c4ac8e
--- /dev/null
+++ b/src/vm/methoditer.h
@@ -0,0 +1,128 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#ifndef _METHODDESCITER_H_
+#define _METHODDESCITER_H_
+
+#include "instmethhash.h"
+#include "method.hpp"
+#include "appdomain.hpp"
+#include "domainfile.h"
+#include "typehash.h"
+
+
+// Iterate all the currently loaded instantiations of a mdMethodDef
+// in a given AppDomain. Can be used for both generic + nongeneric methods.
+// This may give back duplicate entries; and it may give back some extra
+// MethodDescs for which HasNativeCode() returns false.
+// Regarding EnC: MethodDescs only match the latest version in an EnC case.
+// Thus this iterator does not go through all previous EnC versions.
+
+// This iterator is almost a nop for the non-generic case.
+// This is currently not an efficient implementation for the generic case
+// as we search every entry in every item in the ParamTypes and/or InstMeth tables.
+// It is possible we may have
+// to make this more efficient, but it should not be used very often (only
+// when debugging prejitted generic code, and then only when updating
+// methodInfos after the load of a new module, and also when fetching
+// the native code ranges for generic code).
+class LoadedMethodDescIterator
+{
+ Module * m_module;
+ mdMethodDef m_md;
+ MethodDesc * m_mainMD;
+ AppDomain * m_pAppDomain;
+
+ // The following hold the state of the iteration....
+ // Yes we iterate everything for the moment - we need
+ // to get every single module. Ideally when finding debugging information
+ // we should only iterate freshly added modules. We would also like to only
+ // iterate the relevant entries of the hash tables but that means changing the
+ // hash functions.
+
+ // These are used when iterating over an AppDomain
+ AppDomain::AssemblyIterator m_assemIterator;
+ DomainModuleIterator m_moduleIterator;
+ AssemblyIterationFlags m_assemIterationFlags;
+ ModuleIterationOption m_moduleIterationFlags;
+
+ // These are used when iterating over the SharedDomain
+ SharedDomain::SharedAssemblyIterator m_sharedAssemblyIterator;
+ Assembly::ModuleIterator m_sharedModuleIterator;
+
+ EETypeHashTable::Iterator m_typeIterator;
+ EETypeHashEntry * m_typeIteratorEntry;
+ BOOL m_startedNonGenericType;
+ InstMethodHashTable::Iterator m_methodIterator;
+ InstMethodHashEntry * m_methodIteratorEntry;
+ BOOL m_startedNonGenericMethod;
+ BOOL m_fFirstTime;
+
+#ifdef _DEBUG
+ DomainAssembly * dbg_m_pDomainAssembly;
+#endif //_DEBUG
+
+public:
+ // Defines the set of assemblies that LoadedMethodDescIterator should consider.
+ // Typical usages:
+ // * Debugger controller (for setting breakpoint) just uses kModeAllADAssemblies.
+ // * RejitManager uses the iterator once with kModeSharedDomainAssemblies, and
+ // then a bunch of times (once per AD) with kModeUnsharedADAssemblies to
+ // ensure all assemblies in all ADs are considered, and to avoid unnecessary
+ // dupes for domain-neutral assemblies.
+ enum AssemblyIterationMode
+ {
+ // Default, used by debugger's breakpoint controller. Iterates through all
+ // Assemblies associated with the specified AppDomain
+ kModeAllADAssemblies,
+
+ // Iterate through only the *unshared* assemblies associated with the specified
+ // AppDomain.
+ kModeUnsharedADAssemblies,
+
+ // Rather than iterating through Assemblies associated with an AppDomain, just
+ // iterate over all Assemblies associated with the SharedDomain
+ kModeSharedDomainAssemblies,
+ };
+
+ // Iterates next MethodDesc. Updates the holder only if the assembly differs from the previous one.
+ // Caller should not release (i.e. change) the holder explicitly between calls, otherwise collectible
+ // assembly might be without a reference and get deallocated (even the native part).
+ BOOL Next(CollectibleAssemblyHolder<DomainAssembly *> * pDomainAssemblyHolder);
+ MethodDesc *Current();
+ void Start(AppDomain * pAppDomain,
+ Module *pModule,
+ mdMethodDef md,
+ AssemblyIterationMode assemblyIterationMode,
+ AssemblyIterationFlags assemIterationFlags = (AssemblyIterationFlags)(kIncludeLoaded | kIncludeExecution),
+ ModuleIterationOption moduleIterationFlags = kModIterIncludeLoaded);
+ void Start(AppDomain * pAppDomain, Module *pModule, mdMethodDef md, MethodDesc *pDesc);
+
+ LoadedMethodDescIterator(
+ AppDomain * pAppDomain,
+ Module *pModule,
+ mdMethodDef md,
+ AssemblyIterationMode assemblyIterationMode = kModeAllADAssemblies,
+ AssemblyIterationFlags assemblyIterationFlags = (AssemblyIterationFlags)(kIncludeLoaded | kIncludeExecution),
+ ModuleIterationOption moduleIterationFlags = kModIterIncludeLoaded)
+ {
+ LIMITED_METHOD_CONTRACT;
+ Start(pAppDomain, pModule, md, assemblyIterationMode, assemblyIterationFlags, moduleIterationFlags);
+ }
+ LoadedMethodDescIterator(void);
+
+protected:
+ AssemblyIterationMode m_assemblyIterationMode;
+ BOOL m_fSharedDomain;
+
+ Module * GetCurrentModule();
+ BOOL NextSharedModule();
+
+}; // class LoadedMethodDescIterator
+
+
+#endif // _METHODDESCITER_H_
diff --git a/src/vm/methodtable.cpp b/src/vm/methodtable.cpp
new file mode 100644
index 0000000000..35db4ac651
--- /dev/null
+++ b/src/vm/methodtable.cpp
@@ -0,0 +1,9103 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: methodtable.cpp
+//
+
+
+//
+
+//
+// ============================================================================
+
+#include "common.h"
+
+#include "clsload.hpp"
+#include "method.hpp"
+#include "class.h"
+#include "classcompat.h"
+#include "object.h"
+#include "field.h"
+#include "util.hpp"
+#include "excep.h"
+#include "siginfo.hpp"
+#include "threads.h"
+#include "stublink.h"
+#include "ecall.h"
+#include "dllimport.h"
+#include "gcdesc.h"
+#include "verifier.hpp"
+#include "jitinterface.h"
+#include "eeconfig.h"
+#include "log.h"
+#include "fieldmarshaler.h"
+#include "cgensys.h"
+#include "gc.h"
+#include "security.h"
+#include "dbginterface.h"
+#include "comdelegate.h"
+#include "eventtrace.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "eeprofinterfaces.h"
+#include "dllimportcallback.h"
+#include "listlock.h"
+#include "methodimpl.h"
+#include "guidfromname.h"
+#include "stackprobe.h"
+#include "encee.h"
+#include "encee.h"
+#include "comsynchronizable.h"
+#include "customattribute.h"
+#include "virtualcallstub.h"
+#include "contractimpl.h"
+#ifdef FEATURE_PREJIT
+#include "zapsig.h"
+#endif //FEATURE_PREJIT
+
+#include "hostexecutioncontext.h"
+
+#ifdef FEATURE_COMINTEROP
+#include "comcallablewrapper.h"
+#include "clrtocomcall.h"
+#include "runtimecallablewrapper.h"
+#include "winrttypenameconverter.h"
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_TYPEEQUIVALENCE
+#include "typeequivalencehash.hpp"
+#endif
+
+#include "listlock.inl"
+#include "generics.h"
+#include "genericdict.h"
+#include "typestring.h"
+#include "typedesc.h"
+#ifdef FEATURE_REMOTING
+#include "crossdomaincalls.h"
+#endif
+#include "array.h"
+
+#ifdef FEATURE_INTERPRETER
+#include "interpreter.h"
+#endif // FEATURE_INTERPRETER
+
+#ifndef DACCESS_COMPILE
+
+// Typedef for string comparition functions.
+typedef int (__cdecl *UTF8StringCompareFuncPtr)(const char *, const char *);
+
+MethodDataCache *MethodTable::s_pMethodDataCache = NULL;
+BOOL MethodTable::s_fUseMethodDataCache = FALSE;
+BOOL MethodTable::s_fUseParentMethodData = FALSE;
+
+#ifdef _DEBUG
+extern unsigned g_dupMethods;
+#endif
+
+#endif // !DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+//==========================================================================================
+class MethodDataCache
+{
+ typedef MethodTable::MethodData MethodData;
+
+ public: // Ctor. Allocates cEntries entries. Throws.
+ static UINT32 GetObjectSize(UINT32 cEntries);
+ MethodDataCache(UINT32 cEntries);
+
+ MethodData *Find(MethodTable *pMT);
+ MethodData *Find(MethodTable *pMTDecl, MethodTable *pMTImpl);
+ void Insert(MethodData *pMData);
+ void Clear();
+
+ protected:
+ // This describes each entry in the cache.
+ struct Entry
+ {
+ MethodData *m_pMData;
+ UINT32 m_iTimestamp;
+ };
+
+ MethodData *FindHelper(MethodTable *pMTDecl, MethodTable *pMTImpl, UINT32 idx);
+
+ inline UINT32 GetNextTimestamp()
+ { return ++m_iCurTimestamp; }
+
+ inline UINT32 NumEntries()
+ { LIMITED_METHOD_CONTRACT; return m_cEntries; }
+
+ inline void TouchEntry(UINT32 i)
+ { WRAPPER_NO_CONTRACT; m_iLastTouched = i; GetEntry(i)->m_iTimestamp = GetNextTimestamp(); }
+
+ inline UINT32 GetLastTouchedEntryIndex()
+ { WRAPPER_NO_CONTRACT; return m_iLastTouched; }
+
+ // The end of this object contains an array of Entry
+ inline Entry *GetEntryData()
+ { LIMITED_METHOD_CONTRACT; return (Entry *)(this + 1); }
+
+ inline Entry *GetEntry(UINT32 i)
+ { WRAPPER_NO_CONTRACT; return GetEntryData() + i; }
+
+ private:
+ // This serializes access to the cache
+ SimpleRWLock m_lock;
+
+ // This allows ageing of entries to decide which to punt when
+ // inserting a new entry.
+ UINT32 m_iCurTimestamp;
+
+ // The number of entries in the cache
+ UINT32 m_cEntries;
+ UINT32 m_iLastTouched;
+
+#ifdef _WIN64
+ UINT32 pad; // insures that we are a multiple of 8-bytes
+#endif
+}; // class MethodDataCache
+
+//==========================================================================================
+UINT32 MethodDataCache::GetObjectSize(UINT32 cEntries)
+{
+ LIMITED_METHOD_CONTRACT;
+ return sizeof(MethodDataCache) + (sizeof(Entry) * cEntries);
+}
+
+//==========================================================================================
+MethodDataCache::MethodDataCache(UINT32 cEntries)
+ : m_lock(COOPERATIVE_OR_PREEMPTIVE, LOCK_TYPE_DEFAULT),
+ m_iCurTimestamp(0),
+ m_cEntries(cEntries),
+ m_iLastTouched(0)
+{
+ WRAPPER_NO_CONTRACT;
+ ZeroMemory(GetEntryData(), cEntries * sizeof(Entry));
+}
+
+//==========================================================================================
+MethodTable::MethodData *MethodDataCache::FindHelper(
+ MethodTable *pMTDecl, MethodTable *pMTImpl, UINT32 idx)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ INSTANCE_CHECK;
+ } CONTRACTL_END;
+
+ MethodData *pEntry = GetEntry(idx)->m_pMData;
+ if (pEntry != NULL) {
+ MethodTable *pMTDeclEntry = pEntry->GetDeclMethodTable();
+ MethodTable *pMTImplEntry = pEntry->GetImplMethodTable();
+ if (pMTDeclEntry == pMTDecl && pMTImplEntry == pMTImpl) {
+ return pEntry;
+ }
+ else if (pMTDecl == pMTImpl) {
+ if (pMTDeclEntry == pMTDecl) {
+ return pEntry->GetDeclMethodData();
+ }
+ if (pMTImplEntry == pMTDecl) {
+ return pEntry->GetImplMethodData();
+ }
+ }
+ }
+
+ return NULL;
+}
+
+//==========================================================================================
+MethodTable::MethodData *MethodDataCache::Find(MethodTable *pMTDecl, MethodTable *pMTImpl)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ INSTANCE_CHECK;
+ } CONTRACTL_END;
+
+#ifdef LOGGING
+ g_sdStats.m_cCacheLookups++;
+#endif
+
+ SimpleReadLockHolder lh(&m_lock);
+
+ // Check the last touched entry.
+ MethodData *pEntry = FindHelper(pMTDecl, pMTImpl, GetLastTouchedEntryIndex());
+
+ // Now search the entire cache.
+ if (pEntry == NULL) {
+ for (UINT32 i = 0; i < NumEntries(); i++) {
+ pEntry = FindHelper(pMTDecl, pMTImpl, i);
+ if (pEntry != NULL) {
+ TouchEntry(i);
+ break;
+ }
+ }
+ }
+
+ if (pEntry != NULL) {
+ pEntry->AddRef();
+ }
+
+#ifdef LOGGING
+ else {
+ // Failure to find the entry in the cache.
+ g_sdStats.m_cCacheMisses++;
+ }
+#endif // LOGGING
+
+ return pEntry;
+}
+
+//==========================================================================================
+MethodTable::MethodData *MethodDataCache::Find(MethodTable *pMT)
+{
+ WRAPPER_NO_CONTRACT;
+ return Find(pMT, pMT);
+}
+
+//==========================================================================================
+void MethodDataCache::Insert(MethodData *pMData)
+{
+ CONTRACTL {
+ NOTHROW; // for now, because it does not yet resize.
+ GC_NOTRIGGER;
+ INSTANCE_CHECK;
+ } CONTRACTL_END;
+
+ SimpleWriteLockHolder hLock(&m_lock);
+
+ UINT32 iMin = UINT32_MAX;
+ UINT32 idxMin = UINT32_MAX;
+ for (UINT32 i = 0; i < NumEntries(); i++) {
+ if (GetEntry(i)->m_iTimestamp < iMin) {
+ idxMin = i;
+ iMin = GetEntry(i)->m_iTimestamp;
+ }
+ }
+ Entry *pEntry = GetEntry(idxMin);
+ if (pEntry->m_pMData != NULL) {
+ pEntry->m_pMData->Release();
+ }
+ pMData->AddRef();
+ pEntry->m_pMData = pMData;
+ pEntry->m_iTimestamp = GetNextTimestamp();
+}
+
+//==========================================================================================
+void MethodDataCache::Clear()
+{
+ CONTRACTL {
+ NOTHROW; // for now, because it does not yet resize.
+ GC_NOTRIGGER;
+ INSTANCE_CHECK;
+ } CONTRACTL_END;
+
+ // Taking the lock here is just a precaution. Really, the runtime
+ // should be suspended because this is called while unloading an
+ // AppDomain at the SysSuspendEE stage. But, if someone calls it
+ // outside of that context, we should be extra cautious.
+ SimpleWriteLockHolder lh(&m_lock);
+
+ for (UINT32 i = 0; i < NumEntries(); i++) {
+ Entry *pEntry = GetEntry(i);
+ if (pEntry->m_pMData != NULL) {
+ pEntry->m_pMData->Release();
+ }
+ }
+ ZeroMemory(GetEntryData(), NumEntries() * sizeof(Entry));
+ m_iCurTimestamp = 0;
+} // MethodDataCache::Clear
+
+#endif // !DACCESS_COMPILE
+
+
+//==========================================================================================
+//
+// Initialize the offsets of multipurpose slots at compile time using template metaprogramming
+//
+
+template<int N>
+struct CountBitsAtCompileTime
+{
+ enum { value = (N & 1) + CountBitsAtCompileTime<(N >> 1)>::value };
+};
+
+template<>
+struct CountBitsAtCompileTime<0>
+{
+ enum { value = 0 };
+};
+
+// "mask" is mask of used slots.
+template<int mask>
+struct MethodTable::MultipurposeSlotOffset
+{
+ // This is raw index of the slot assigned on first come first served basis
+ enum { raw = CountBitsAtCompileTime<mask>::value };
+
+ // This is actual index of the slot. It is equal to raw index except for the case
+ // where the first fixed slot is not used, but the second one is. The first fixed
+ // slot has to be assigned instead of the second one in this case. This assumes that
+ // there are exactly two fixed slots.
+ enum { index = (((mask & 3) == 2) && (raw == 1)) ? 0 : raw };
+
+ // Offset of slot
+ enum { slotOffset = (index == 0) ? offsetof(MethodTable, m_pMultipurposeSlot1) :
+ (index == 1) ? offsetof(MethodTable, m_pMultipurposeSlot2) :
+ (sizeof(MethodTable) + index * sizeof(TADDR) - 2 * sizeof(TADDR)) };
+
+ // Size of methodtable with overflow slots. It is used to compute start offset of optional members.
+ enum { totalSize = (slotOffset >= sizeof(MethodTable)) ? slotOffset : sizeof(MethodTable) };
+};
+
+//
+// These macros recursively expand to create 2^N values for the offset arrays
+//
+#define MULTIPURPOSE_SLOT_OFFSET_1(mask) MULTIPURPOSE_SLOT_OFFSET (mask) MULTIPURPOSE_SLOT_OFFSET (mask | 0x01)
+#define MULTIPURPOSE_SLOT_OFFSET_2(mask) MULTIPURPOSE_SLOT_OFFSET_1(mask) MULTIPURPOSE_SLOT_OFFSET_1(mask | 0x02)
+#define MULTIPURPOSE_SLOT_OFFSET_3(mask) MULTIPURPOSE_SLOT_OFFSET_2(mask) MULTIPURPOSE_SLOT_OFFSET_2(mask | 0x04)
+#define MULTIPURPOSE_SLOT_OFFSET_4(mask) MULTIPURPOSE_SLOT_OFFSET_3(mask) MULTIPURPOSE_SLOT_OFFSET_3(mask | 0x08)
+#define MULTIPURPOSE_SLOT_OFFSET_5(mask) MULTIPURPOSE_SLOT_OFFSET_4(mask) MULTIPURPOSE_SLOT_OFFSET_4(mask | 0x10)
+
+#define MULTIPURPOSE_SLOT_OFFSET(mask) MultipurposeSlotOffset<mask>::slotOffset,
+const BYTE MethodTable::c_DispatchMapSlotOffsets[] = {
+ MULTIPURPOSE_SLOT_OFFSET_2(0)
+};
+const BYTE MethodTable::c_NonVirtualSlotsOffsets[] = {
+ MULTIPURPOSE_SLOT_OFFSET_3(0)
+};
+const BYTE MethodTable::c_ModuleOverrideOffsets[] = {
+ MULTIPURPOSE_SLOT_OFFSET_4(0)
+};
+#undef MULTIPURPOSE_SLOT_OFFSET
+
+#define MULTIPURPOSE_SLOT_OFFSET(mask) MultipurposeSlotOffset<mask>::totalSize,
+const BYTE MethodTable::c_OptionalMembersStartOffsets[] = {
+ MULTIPURPOSE_SLOT_OFFSET_5(0)
+};
+#undef MULTIPURPOSE_SLOT_OFFSET
+
+
+//==========================================================================================
+// Optimization intended for MethodTable::GetModule, MethodTable::GetDispatchMap and MethodTable::GetNonVirtualSlotsPtr
+
+#include <optsmallperfcritical.h>
+
+PTR_Module MethodTable::GetModule()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ g_IBCLogger.LogMethodTableAccess(this);
+
+ // Fast path for non-generic non-array case
+ if ((m_dwFlags & (enum_flag_HasComponentSize | enum_flag_GenericsMask)) == 0)
+ return GetLoaderModule();
+
+ MethodTable * pMTForModule = IsArray() ? this : GetCanonicalMethodTable();
+ if (!pMTForModule->HasModuleOverride())
+ return pMTForModule->GetLoaderModule();
+
+ TADDR pSlot = pMTForModule->GetMultipurposeSlotPtr(enum_flag_HasModuleOverride, c_ModuleOverrideOffsets);
+ return RelativeFixupPointer<PTR_Module>::GetValueAtPtr(pSlot);
+}
+
+//==========================================================================================
+PTR_Module MethodTable::GetModule_NoLogging()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // Fast path for non-generic non-array case
+ if ((m_dwFlags & (enum_flag_HasComponentSize | enum_flag_GenericsMask)) == 0)
+ return GetLoaderModule();
+
+ MethodTable * pMTForModule = IsArray() ? this : GetCanonicalMethodTable();
+ if (!pMTForModule->HasModuleOverride())
+ return pMTForModule->GetLoaderModule();
+
+ TADDR pSlot = pMTForModule->GetMultipurposeSlotPtr(enum_flag_HasModuleOverride, c_ModuleOverrideOffsets);
+ return RelativeFixupPointer<PTR_Module>::GetValueAtPtr(pSlot);
+}
+
+//==========================================================================================
+PTR_DispatchMap MethodTable::GetDispatchMap()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ MethodTable * pMT = this;
+
+ if (!pMT->HasDispatchMapSlot())
+ {
+ pMT = pMT->GetCanonicalMethodTable();
+ if (!pMT->HasDispatchMapSlot())
+ return NULL;
+ }
+
+ g_IBCLogger.LogDispatchMapAccess(pMT);
+
+ TADDR pSlot = pMT->GetMultipurposeSlotPtr(enum_flag_HasDispatchMapSlot, c_DispatchMapSlotOffsets);
+ return RelativePointer<PTR_DispatchMap>::GetValueAtPtr(pSlot);
+}
+
+//==========================================================================================
+TADDR MethodTable::GetNonVirtualSlotsPtr()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE(GetFlag(enum_flag_HasNonVirtualSlots));
+ return GetMultipurposeSlotPtr(enum_flag_HasNonVirtualSlots, c_NonVirtualSlotsOffsets);
+}
+
+#include <optdefault.h>
+
+
+//==========================================================================================
+PTR_Module MethodTable::GetModuleIfLoaded()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ g_IBCLogger.LogMethodTableAccess(this);
+
+ MethodTable * pMTForModule = IsArray() ? this : GetCanonicalMethodTable();
+ if (!pMTForModule->HasModuleOverride())
+ return pMTForModule->GetLoaderModule();
+
+ return Module::RestoreModulePointerIfLoaded(pMTForModule->GetModuleOverridePtr(), pMTForModule->GetLoaderModule());
+}
+
+#ifndef DACCESS_COMPILE
+//==========================================================================================
+void MethodTable::SetModule(Module * pModule)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (HasModuleOverride())
+ {
+ GetModuleOverridePtr()->SetValue(pModule);
+ }
+
+ _ASSERTE(GetModule() == pModule);
+}
+#endif // DACCESS_COMPILE
+
+//==========================================================================================
+BOOL MethodTable::ValidateWithPossibleAV()
+{
+ CANNOT_HAVE_CONTRACT;
+ SUPPORTS_DAC;
+
+ // MethodTables have the canonicalization property below.
+ // i.e. canonicalize, and canonicalize again, and check the result are
+ // the same. This is a property that holds for every single valid object in
+ // the system, but which should hold for very few other addresses.
+
+ // For non-generic classes, we can rely on comparing
+ // object->methodtable->class->methodtable
+ // to
+ // object->methodtable
+ //
+ // However, for generic instantiation this does not work. There we must
+ // compare
+ //
+ // object->methodtable->class->methodtable->class
+ // to
+ // object->methodtable->class
+ //
+ // Of course, that's not necessarily enough to verify that the method
+ // table and class are absolutely valid - we rely on type soundness
+ // for that. We need to do more sanity checking to
+ // make sure that our pointer here is in fact a valid object.
+ PTR_EEClass pEEClass = this->GetClassWithPossibleAV();
+ return ((this == pEEClass->GetMethodTableWithPossibleAV()) ||
+ ((HasInstantiation() || IsArray()) &&
+ (pEEClass->GetMethodTableWithPossibleAV()->GetClassWithPossibleAV() == pEEClass)));
+}
+
+#ifndef DACCESS_COMPILE
+
+//==========================================================================================
+BOOL MethodTable::IsClassInited(AppDomain* pAppDomain /* = NULL */)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (IsClassPreInited())
+ return TRUE;
+
+ if (IsSharedByGenericInstantiations())
+ return FALSE;
+
+ DomainLocalModule *pLocalModule;
+ if (pAppDomain == NULL)
+ {
+ pLocalModule = GetDomainLocalModule();
+ }
+ else
+ {
+ pLocalModule = GetDomainLocalModule(pAppDomain);
+ }
+
+ _ASSERTE(pLocalModule != NULL);
+
+ return pLocalModule->IsClassInitialized(this);
+}
+
+//==========================================================================================
+BOOL MethodTable::IsInitError()
+{
+ WRAPPER_NO_CONTRACT;
+
+ DomainLocalModule *pLocalModule = GetDomainLocalModule();
+ _ASSERTE(pLocalModule != NULL);
+
+ return pLocalModule->IsClassInitError(this);
+}
+
+//==========================================================================================
+// mark the class as having its .cctor run
+void MethodTable::SetClassInited()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(!IsClassPreInited() || MscorlibBinder::IsClass(this, CLASS__SHARED_STATICS));
+ GetDomainLocalModule()->SetClassInitialized(this);
+}
+
+//==========================================================================================
+void MethodTable::SetClassInitError()
+{
+ WRAPPER_NO_CONTRACT;
+ GetDomainLocalModule()->SetClassInitError(this);
+}
+
+//==========================================================================================
+// mark the class as having been restored.
+void MethodTable::SetIsRestored()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END
+
+ PRECONDITION(!IsFullyLoaded());
+
+ // If functions on this type have already been requested for rejit, then give the rejit
+ // manager a chance to jump-stamp the code we are implicitly restoring. This ensures the
+ // first thread entering the function will jump to the prestub and trigger the
+ // rejit. Note that the PublishMethodTableHolder may take a lock to avoid a rejit race.
+ // See code:ReJitManager::PublishMethodHolder::PublishMethodHolder#PublishCode
+ // for details on the race.
+ //
+ {
+ ReJitPublishMethodTableHolder(this);
+ FastInterlockAnd(EnsureWritablePages(&(GetWriteableDataForWrite()->m_dwFlags)), ~MethodTableWriteableData::enum_flag_Unrestored);
+ }
+#ifndef DACCESS_COMPILE
+#if defined(FEATURE_EVENT_TRACE)
+ if (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled)
+#endif
+ {
+ ETW::MethodLog::MethodTableRestored(this);
+ }
+#endif
+}
+
+#ifdef FEATURE_COMINTEROP
+
+//==========================================================================================
+// mark as COM object type (System.__ComObject and types deriving from it)
+void MethodTable::SetComObjectType()
+{
+ LIMITED_METHOD_CONTRACT;
+ SetFlag(enum_flag_ComObject);
+}
+
+#endif // FEATURE_COMINTEROP
+
+#if defined(FEATURE_TYPEEQUIVALENCE) || defined(FEATURE_REMOTING)
+void MethodTable::SetHasTypeEquivalence()
+{
+ LIMITED_METHOD_CONTRACT;
+ SetFlag(enum_flag_HasTypeEquivalence);
+}
+#endif
+
+#endif // !DACCESS_COMPILE
+
+//==========================================================================================
+WORD MethodTable::GetNumMethods()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetClass()->GetNumMethods();
+}
+
+//==========================================================================================
+PTR_BaseDomain MethodTable::GetDomain()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ g_IBCLogger.LogMethodTableAccess(this);
+ return GetLoaderModule()->GetDomain();
+}
+
+//==========================================================================================
+BOOL MethodTable::IsDomainNeutral()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_SUPPORTS_DAC;
+
+ BOOL ret = GetLoaderModule()->GetAssembly()->IsDomainNeutral();
+#ifndef DACCESS_COMPILE
+ _ASSERTE(!ret == !GetLoaderAllocator()->IsDomainNeutral());
+#endif
+
+ return ret;
+}
+
+//==========================================================================================
+BOOL MethodTable::HasSameTypeDefAs(MethodTable *pMT)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (this == pMT)
+ return TRUE;
+
+ // optimize for the negative case where we expect RID mismatch
+ if (GetTypeDefRid() != pMT->GetTypeDefRid())
+ return FALSE;
+
+ if (GetCanonicalMethodTable() == pMT->GetCanonicalMethodTable())
+ return TRUE;
+
+ return (GetModule() == pMT->GetModule());
+}
+
+//==========================================================================================
+BOOL MethodTable::HasSameTypeDefAs_NoLogging(MethodTable *pMT)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (this == pMT)
+ return TRUE;
+
+ // optimize for the negative case where we expect RID mismatch
+ if (GetTypeDefRid_NoLogging() != pMT->GetTypeDefRid_NoLogging())
+ return FALSE;
+
+ if (GetCanonicalMethodTable() == pMT->GetCanonicalMethodTable())
+ return TRUE;
+
+ return (GetModule_NoLogging() == pMT->GetModule_NoLogging());
+}
+
+#ifndef DACCESS_COMPILE
+
+//==========================================================================================
+PTR_MethodTable InterfaceInfo_t::GetApproxMethodTable(Module * pContainingModule)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+#ifdef FEATURE_PREJIT
+ if (m_pMethodTable.IsTagged())
+ {
+ // Ideally, we would use Module::RestoreMethodTablePointer here. Unfortunately, it is not
+ // possible because of the current type loader architecture that restores types incrementally
+ // even in the NGen case.
+ MethodTable * pItfMT = *(m_pMethodTable.GetValuePtr());
+
+ // Restore the method table, but do not write it back if it has instantiation. We do not want
+ // to write back the approximate instantiations.
+ Module::RestoreMethodTablePointerRaw(&pItfMT, pContainingModule, CLASS_LOAD_APPROXPARENTS);
+
+ if (!pItfMT->HasInstantiation())
+ {
+ // m_pMethodTable.SetValue() is not used here since we want to update the indirection cell
+ *EnsureWritablePages(m_pMethodTable.GetValuePtr()) = pItfMT;
+ }
+
+ return pItfMT;
+ }
+#endif
+ MethodTable * pItfMT = m_pMethodTable.GetValue();
+ ClassLoader::EnsureLoaded(TypeHandle(pItfMT), CLASS_LOAD_APPROXPARENTS);
+ return pItfMT;
+}
+
+#ifndef CROSSGEN_COMPILE
+//==========================================================================================
+// get the method desc given the interface method desc
+/* static */ MethodDesc *MethodTable::GetMethodDescForInterfaceMethodAndServer(
+ TypeHandle ownerType, MethodDesc *pItfMD, OBJECTREF *pServer)
+{
+ CONTRACT(MethodDesc*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pItfMD));
+ PRECONDITION(pItfMD->IsInterface());
+ PRECONDITION(!ownerType.IsNull());
+ PRECONDITION(ownerType.GetMethodTable()->HasSameTypeDefAs(pItfMD->GetMethodTable()));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+ VALIDATEOBJECTREF(*pServer);
+
+#ifdef _DEBUG
+ MethodTable * pItfMT = ownerType.GetMethodTable();
+ PREFIX_ASSUME(pItfMT != NULL);
+#endif // _DEBUG
+
+ MethodTable *pServerMT = (*pServer)->GetMethodTable();
+ PREFIX_ASSUME(pServerMT != NULL);
+
+ if (pServerMT->IsTransparentProxy())
+ {
+ // If pServer is a TP, then the interface method desc is the one to
+ // use to dispatch the call.
+ RETURN(pItfMD);
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (pServerMT->IsComObjectType() && !pItfMD->HasMethodInstantiation())
+ {
+ // interop needs an exact MethodDesc
+ pItfMD = MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pItfMD,
+ ownerType.GetMethodTable(),
+ FALSE, // forceBoxedEntryPoint
+ Instantiation(), // methodInst
+ FALSE, // allowInstParam
+ TRUE); // forceRemotableMethod
+
+ RETURN(pServerMT->GetMethodDescForComInterfaceMethod(pItfMD, false));
+ }
+#endif // !FEATURE_COMINTEROP
+
+ // Handle pure COM+ types.
+ RETURN (pServerMT->GetMethodDescForInterfaceMethod(ownerType, pItfMD));
+}
+
+#ifdef FEATURE_COMINTEROP
+//==========================================================================================
+// get the method desc given the interface method desc on a COM implemented server
+// (if fNullOk is set then NULL is an allowable return value)
+MethodDesc *MethodTable::GetMethodDescForComInterfaceMethod(MethodDesc *pItfMD, bool fNullOk)
+{
+ CONTRACT(MethodDesc*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pItfMD));
+ PRECONDITION(pItfMD->IsInterface());
+ PRECONDITION(IsComObjectType());
+ POSTCONDITION(fNullOk || CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ MethodTable * pItfMT = pItfMD->GetMethodTable();
+ PREFIX_ASSUME(pItfMT != NULL);
+
+ // We now handle __ComObject class that doesn't have Dynamic Interface Map
+ if (!HasDynamicInterfaceMap())
+ {
+ RETURN(pItfMD);
+ }
+ else
+ {
+ // Now we handle the more complex extensible RCW's. The first thing to do is check
+ // to see if the static definition of the extensible RCW specifies that the class
+ // implements the interface.
+ DWORD slot = (DWORD) -1;
+
+ // Calling GetTarget here instead of FindDispatchImpl gives us caching functionality to increase speed.
+ PCODE tgt = VirtualCallStubManager::GetTarget(
+ pItfMT->GetLoaderAllocator()->GetDispatchToken(pItfMT->GetTypeID(), pItfMD->GetSlot()), this);
+
+ if (tgt != NULL)
+ {
+ RETURN(MethodTable::GetMethodDescForSlotAddress(tgt));
+ }
+
+ // The interface is not in the static class definition so we need to look at the
+ // dynamic interfaces.
+ else if (FindDynamicallyAddedInterface(pItfMT))
+ {
+ // This interface was added to the class dynamically so it is implemented
+ // by the COM object. We treat this dynamically added interfaces the same
+ // way we treat COM objects. That is by using the interface vtable.
+ RETURN(pItfMD);
+ }
+ else
+ {
+ RETURN(NULL);
+ }
+ }
+}
+#endif // FEATURE_COMINTEROP
+
+#endif // CROSSGEN_COMPILE
+
+//---------------------------------------------------------------------------------------
+//
+MethodTable* CreateMinimalMethodTable(Module* pContainingModule,
+ LoaderHeap* pCreationHeap,
+ AllocMemTracker* pamTracker)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ EEClass* pClass = EEClass::CreateMinimalClass(pCreationHeap, pamTracker);
+
+ LOG((LF_BCL, LL_INFO100, "Level2 - Creating MethodTable {0x%p}...\n", pClass));
+
+ MethodTable* pMT = (MethodTable *)(void *)pamTracker->Track(pCreationHeap->AllocMem(S_SIZE_T(sizeof(MethodTable))));
+
+ // Note: Memory allocated on loader heap is zero filled
+ // memset(pMT, 0, sizeof(MethodTable));
+
+ // Allocate the private data block ("private" during runtime in the ngen'ed case).
+ BYTE* pMTWriteableData = (BYTE *)
+ pamTracker->Track(pCreationHeap->AllocMem(S_SIZE_T(sizeof(MethodTableWriteableData))));
+ pMT->SetWriteableData((PTR_MethodTableWriteableData)pMTWriteableData);
+
+ //
+ // Set up the EEClass
+ //
+ pClass->SetMethodTable(pMT); // in the EEClass set the pointer to this MethodTable
+ pClass->SetAttrClass(tdPublic | tdSealed);
+
+ //
+ // Set up the MethodTable
+ //
+ // Does not need parent. Note that MethodTable for COR_GLOBAL_PARENT_TOKEN does not have parent either,
+ // so the system has to be wired for dealing with no parent anyway.
+ pMT->SetParentMethodTable(NULL);
+ pMT->SetClass(pClass);
+ pMT->SetLoaderModule(pContainingModule);
+ pMT->SetLoaderAllocator(pContainingModule->GetLoaderAllocator());
+ pMT->SetInternalCorElementType(ELEMENT_TYPE_CLASS);
+ pMT->SetBaseSize(ObjSizeOf(Object));
+
+#ifdef _DEBUG
+ pClass->SetDebugClassName("dynamicClass");
+ pMT->SetDebugClassName("dynamicClass");
+#endif
+
+ LOG((LF_BCL, LL_INFO10, "Level1 - MethodTable created {0x%p}\n", pClass));
+
+ return pMT;
+}
+
+#ifdef FEATURE_REMOTING
+//==========================================================================================
+void MethodTable::SetupRemotableMethodInfo(AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ // Make RMI for a method table.
+ CrossDomainOptimizationInfo *pRMIBegin = NULL;
+ if (GetNumMethods() > 0)
+ {
+ SIZE_T requiredSize = CrossDomainOptimizationInfo::SizeOf(GetNumVtableSlots());
+ pRMIBegin = (CrossDomainOptimizationInfo*) pamTracker->Track(GetLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(requiredSize)));
+ _ASSERTE(IS_ALIGNED(pRMIBegin, sizeof(void*)));
+ }
+ *(GetRemotableMethodInfoPtr()) = pRMIBegin;
+}
+
+//==========================================================================================
+PTR_RemotingVtsInfo MethodTable::AllocateRemotingVtsInfo(AllocMemTracker *pamTracker, DWORD dwNumFields)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Size the data structure to contain enough bit flags for all the
+ // instance fields.
+ DWORD cbInfo = RemotingVtsInfo::GetSize(dwNumFields);
+ RemotingVtsInfo *pInfo = (RemotingVtsInfo*)pamTracker->Track(GetLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(cbInfo)));
+
+ // Note: Memory allocated on loader heap is zero filled
+ // ZeroMemory(pInfo, cbInfo);
+
+#ifdef _DEBUG
+ pInfo->m_dwNumFields = dwNumFields;
+#endif
+
+ *(GetRemotingVtsInfoPtr()) = pInfo;
+
+ return pInfo;
+}
+#endif // FEATURE_REMOTING
+
+#ifdef FEATURE_COMINTEROP
+#ifndef CROSSGEN_COMPILE
+//==========================================================================================
+OBJECTREF MethodTable::GetObjCreateDelegate()
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+ _ASSERT(!IsInterface());
+ if (GetOHDelegate())
+ return ObjectFromHandle(GetOHDelegate());
+ else
+ return NULL;
+}
+
+//==========================================================================================
+void MethodTable::SetObjCreateDelegate(OBJECTREF orDelegate)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_NOTRIGGER;
+ THROWS; // From CreateHandle
+ }
+ CONTRACTL_END;
+
+ if (GetOHDelegate())
+ StoreObjectInHandle(GetOHDelegate(), orDelegate);
+ else
+ SetOHDelegate (GetAppDomain()->CreateHandle(orDelegate));
+}
+#endif //CROSSGEN_COMPILE
+#endif // FEATURE_COMINTEROP
+
+
+//==========================================================================================
+void MethodTable::SetInterfaceMap(WORD wNumInterfaces, InterfaceInfo_t* iMap)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (wNumInterfaces == 0)
+ {
+ _ASSERTE(!HasInterfaceMap());
+ return;
+ }
+
+ m_wNumInterfaces = wNumInterfaces;
+
+ CONSISTENCY_CHECK(IS_ALIGNED(iMap, sizeof(void*)));
+ m_pInterfaceMap = iMap;
+}
+
+//==========================================================================================
+// Called after GetExtraInterfaceInfoSize above to setup a new MethodTable with the additional memory to track
+// extra interface info. If there are a non-zero number of interfaces implemented on this class but
+// GetExtraInterfaceInfoSize() returned zero, this call must still be made (with a NULL argument).
+void MethodTable::InitializeExtraInterfaceInfo(PVOID pInfo)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Check that memory was allocated or not allocated in the right scenarios.
+ _ASSERTE(((pInfo == NULL) && (GetExtraInterfaceInfoSize(GetNumInterfaces()) == 0)) ||
+ ((pInfo != NULL) && (GetExtraInterfaceInfoSize(GetNumInterfaces()) != 0)));
+
+ // This call is a no-op if we don't require extra interface info (in which case a buffer should never have
+ // been allocated).
+ if (!HasExtraInterfaceInfo())
+ {
+ _ASSERTE(pInfo == NULL);
+ return;
+ }
+
+ // Get pointer to optional slot that holds either a small inlined bitmap of flags or the pointer to a
+ // larger bitmap.
+ PTR_TADDR pInfoSlot = GetExtraInterfaceInfoPtr();
+
+ // In either case, data inlined or held in an external buffer, the correct thing to do is to write pInfo
+ // to the slot. In the inlined case we wish to set all flags to their default value (zero, false) and
+ // writing NULL does that. Otherwise we simply want to dump the buffer pointer directly into the slot (no
+ // need for a discriminator bit, we can always infer which format we're using based on the interface
+ // count).
+ *pInfoSlot = (TADDR)pInfo;
+
+ // There shouldn't be any need for further initialization in the buffered case since loader heap
+ // allocation zeroes data.
+#ifdef _DEBUG
+ if (pInfo != NULL)
+ for (DWORD i = 0; i < GetExtraInterfaceInfoSize(GetNumInterfaces()); i++)
+ _ASSERTE(*((BYTE*)pInfo + i) == 0);
+#endif // _DEBUG
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+// Ngen support.
+void MethodTable::SaveExtraInterfaceInfo(DataImage *pImage)
+{
+ STANDARD_VM_CONTRACT;
+
+ // No extra data to save if the number of interfaces is below the threshhold -- there is either no data or
+ // it all fits into the optional members inline.
+ if (GetNumInterfaces() <= kInlinedInterfaceInfoThreshhold)
+ return;
+
+ pImage->StoreStructure((LPVOID)*GetExtraInterfaceInfoPtr(),
+ GetExtraInterfaceInfoSize(GetNumInterfaces()),
+ DataImage::ITEM_INTERFACE_MAP);
+}
+
+void MethodTable::FixupExtraInterfaceInfo(DataImage *pImage)
+{
+ STANDARD_VM_CONTRACT;
+
+ // No pointer to extra data to fixup if the number of interfaces is below the threshhold -- there is
+ // either no data or it all fits into the optional members inline.
+ if (GetNumInterfaces() <= kInlinedInterfaceInfoThreshhold)
+ return;
+
+ pImage->FixupPointerField(this, (BYTE*)GetExtraInterfaceInfoPtr() - (BYTE*)this);
+}
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+// Define a macro that generates a mask for a given bit in a TADDR correctly on either 32 or 64 bit platforms.
+#ifdef _WIN64
+#define SELECT_TADDR_BIT(_index) (1ULL << (_index))
+#else
+#define SELECT_TADDR_BIT(_index) (1U << (_index))
+#endif
+
+//==========================================================================================
+// For the given interface in the map (specified via map index) mark the interface as declared explicitly on
+// this class. This is not legal for dynamically added interfaces (as used by RCWs).
+void MethodTable::SetInterfaceDeclaredOnClass(DWORD index)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(HasExtraInterfaceInfo());
+ _ASSERTE(index < GetNumInterfaces());
+
+ // Get address of optional slot for extra info.
+ PTR_TADDR pInfoSlot = GetExtraInterfaceInfoPtr();
+
+ if (GetNumInterfaces() <= kInlinedInterfaceInfoThreshhold)
+ {
+ // Bitmap of flags is stored inline in the optional slot.
+ *pInfoSlot |= SELECT_TADDR_BIT(index);
+ }
+ else
+ {
+ // Slot points to a buffer containing a larger bitmap.
+ TADDR *pBitmap = (PTR_TADDR)*pInfoSlot;
+
+ DWORD idxTaddr = index / (sizeof(TADDR) * 8); // Select TADDR in array that covers the target bit
+ DWORD idxInTaddr = index % (sizeof(TADDR) * 8);
+ TADDR bitmask = SELECT_TADDR_BIT(idxInTaddr);
+
+ pBitmap[idxTaddr] |= bitmask;
+ _ASSERTE((pBitmap[idxTaddr] & bitmask) == bitmask);
+ }
+}
+
+//==========================================================================================
+// For the given interface return true if the interface was declared explicitly on this class.
+bool MethodTable::IsInterfaceDeclaredOnClass(DWORD index)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(HasExtraInterfaceInfo());
+
+ // Dynamic interfaces are always marked as not DeclaredOnClass (I don't know why but this is how the code
+ // was originally authored).
+ if (index >= GetNumInterfaces())
+ {
+#ifdef FEATURE_COMINTEROP
+ _ASSERTE(HasDynamicInterfaceMap());
+#endif // FEATURE_COMINTEROP
+ return false;
+ }
+
+ // Get data from the optional extra info slot.
+ TADDR taddrInfo = *GetExtraInterfaceInfoPtr();
+
+ if (GetNumInterfaces() <= kInlinedInterfaceInfoThreshhold)
+ {
+ // Bitmap of flags is stored directly in the value.
+ return (taddrInfo & SELECT_TADDR_BIT(index)) != 0;
+ }
+ else
+ {
+ // Slot points to a buffer containing a larger bitmap.
+ TADDR *pBitmap = (PTR_TADDR)taddrInfo;
+
+ DWORD idxTaddr = index / (sizeof(TADDR) * 8); // Select TADDR in array that covers the target bit
+ DWORD idxInTaddr = index % (sizeof(TADDR) * 8);
+ TADDR bitmask = SELECT_TADDR_BIT(idxInTaddr);
+
+ return (pBitmap[idxTaddr] & bitmask) != 0;
+ }
+}
+
+#ifdef FEATURE_COMINTEROP
+
+//==========================================================================================
+PTR_InterfaceInfo MethodTable::GetDynamicallyAddedInterfaceMap()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ PRECONDITION(HasDynamicInterfaceMap());
+
+ return GetInterfaceMap() + GetNumInterfaces();
+}
+
+//==========================================================================================
+unsigned MethodTable::GetNumDynamicallyAddedInterfaces()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ PRECONDITION(HasDynamicInterfaceMap());
+
+ PTR_InterfaceInfo pInterfaces = GetInterfaceMap();
+ PREFIX_ASSUME(pInterfaces != NULL);
+ return (unsigned)*(dac_cast<PTR_SIZE_T>(pInterfaces) - 1);
+}
+
+//==========================================================================================
+BOOL MethodTable::FindDynamicallyAddedInterface(MethodTable *pInterface)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(IsRestored_NoLogging());
+ _ASSERTE(HasDynamicInterfaceMap()); // This should never be called on for a type that is not an extensible RCW.
+
+ unsigned cDynInterfaces = GetNumDynamicallyAddedInterfaces();
+ InterfaceInfo_t *pDynItfMap = GetDynamicallyAddedInterfaceMap();
+
+ for (unsigned i = 0; i < cDynInterfaces; i++)
+ {
+ if (pDynItfMap[i].GetMethodTable() == pInterface)
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+//==========================================================================================
+void MethodTable::AddDynamicInterface(MethodTable *pItfMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(IsRestored_NoLogging());
+ PRECONDITION(HasDynamicInterfaceMap()); // This should never be called on for a type that is not an extensible RCW.
+ }
+ CONTRACTL_END;
+
+ unsigned NumDynAddedInterfaces = GetNumDynamicallyAddedInterfaces();
+ unsigned TotalNumInterfaces = GetNumInterfaces() + NumDynAddedInterfaces;
+
+ InterfaceInfo_t *pNewItfMap = NULL;
+ S_SIZE_T AllocSize = (S_SIZE_T(S_UINT32(TotalNumInterfaces) + S_UINT32(1)) * S_SIZE_T(sizeof(InterfaceInfo_t))) + S_SIZE_T(sizeof(DWORD_PTR));
+ if (AllocSize.IsOverflow())
+ ThrowHR(COR_E_OVERFLOW);
+
+ // Allocate the new interface table adding one for the new interface and one
+ // more for the dummy slot before the start of the table..
+ pNewItfMap = (InterfaceInfo_t*)(void*)GetLoaderAllocator()->GetHighFrequencyHeap()->AllocMem(AllocSize);
+
+ pNewItfMap = (InterfaceInfo_t*)(((BYTE *)pNewItfMap) + sizeof(DWORD_PTR));
+
+ // Copy the old map into the new one.
+ if (TotalNumInterfaces > 0) {
+ InterfaceInfo_t *pInterfaceMap = GetInterfaceMap();
+ PREFIX_ASSUME(pInterfaceMap != NULL);
+ memcpy(pNewItfMap, pInterfaceMap, TotalNumInterfaces * sizeof(InterfaceInfo_t));
+ }
+
+ // Add the new interface at the end of the map.
+ pNewItfMap[TotalNumInterfaces].SetMethodTable(pItfMT);
+
+ // Update the count of dynamically added interfaces.
+ *(((DWORD_PTR *)pNewItfMap) - 1) = NumDynAddedInterfaces + 1;
+
+ // Switch the old interface map with the new one.
+ VolatileStore(EnsureWritablePages(&m_pInterfaceMap), pNewItfMap);
+
+ // Log the fact that we leaked the interface vtable map.
+#ifdef _DEBUG
+ LOG((LF_INTEROP, LL_EVERYTHING,
+ "Extensible RCW %s being cast to interface %s caused an interface vtable map leak",
+ GetClass()->GetDebugClassName(), pItfMT->GetClass()->m_szDebugClassName));
+#else // !_DEBUG
+ LOG((LF_INTEROP, LL_EVERYTHING,
+ "Extensible RCW being cast to an interface caused an interface vtable map leak"));
+#endif // !_DEBUG
+} // MethodTable::AddDynamicInterface
+
+#endif // FEATURE_COMINTEROP
+
+void MethodTable::SetupGenericsStaticsInfo(FieldDesc* pStaticFieldDescs)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // No need to generate IDs for open types. Indeed since we don't save them
+ // in the NGEN image it would be actively incorrect to do so. However
+ // we still leave the optional member in the MethodTable holding the value -1 for the ID.
+
+ GenericsStaticsInfo *pInfo = GetGenericsStaticsInfo();
+ if (!ContainsGenericVariables() && !IsSharedByGenericInstantiations())
+ {
+ Module * pModuleForStatics = GetLoaderModule();
+
+ pInfo->m_DynamicTypeID = pModuleForStatics->AllocateDynamicEntry(this);
+ }
+ else
+ {
+ pInfo->m_DynamicTypeID = (SIZE_T)-1;
+ }
+
+ pInfo->m_pFieldDescs = pStaticFieldDescs;
+}
+
+#endif // !DACCESS_COMPILE
+
+//==========================================================================================
+// Calculate how many bytes of storage will be required to track additional information for interfaces. This
+// will be zero if there are no interfaces, but can also be zero for small numbers of interfaces as well, and
+// callers should be ready to handle this.
+/* static */ SIZE_T MethodTable::GetExtraInterfaceInfoSize(DWORD cInterfaces)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // For small numbers of interfaces we can record the info in the TADDR of the optional member itself (use
+ // the TADDR as a bitmap).
+ if (cInterfaces <= kInlinedInterfaceInfoThreshhold)
+ return 0;
+
+ // Otherwise we'll cause an array of TADDRs to be allocated (use TADDRs since the heap space allocated
+ // will almost certainly need to be TADDR aligned anyway).
+ return ALIGN_UP(cInterfaces, sizeof(TADDR) * 8) / 8;
+}
+
+#ifdef DACCESS_COMPILE
+//==========================================================================================
+void MethodTable::EnumMemoryRegionsForExtraInterfaceInfo()
+{
+ SUPPORTS_DAC;
+
+ // No extra data to enum if the number of interfaces is below the threshhold -- there is either no data or
+ // it all fits into the optional members inline.
+ if (GetNumInterfaces() <= kInlinedInterfaceInfoThreshhold)
+ return;
+
+ DacEnumMemoryRegion(*GetExtraInterfaceInfoPtr(), GetExtraInterfaceInfoSize(GetNumInterfaces()));
+}
+#endif // DACCESS_COMPILE
+
+//==========================================================================================
+Module* MethodTable::GetModuleForStatics()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ g_IBCLogger.LogMethodTableAccess(this);
+
+ if (HasGenericsStaticsInfo())
+ {
+ DWORD dwDynamicClassDomainID;
+ return GetGenericsStaticsModuleAndID(&dwDynamicClassDomainID);
+ }
+ else
+ {
+ return GetLoaderModule();
+ }
+}
+
+//==========================================================================================
+DWORD MethodTable::GetModuleDynamicEntryID()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(IsDynamicStatics() && "Only memory reflection emit types and generics can have a dynamic ID");
+
+ if (HasGenericsStaticsInfo())
+ {
+ DWORD dwDynamicClassDomainID;
+ GetGenericsStaticsModuleAndID(&dwDynamicClassDomainID);
+ return dwDynamicClassDomainID;
+ }
+ else
+ {
+ return GetClass()->GetModuleDynamicID();
+ }
+}
+
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_TYPEEQUIVALENCE
+//==========================================================================================
+// Equivalence based on Guid and TypeIdentifier attributes to support the "no-PIA" feature.
+BOOL MethodTable::IsEquivalentTo_Worker(MethodTable *pOtherMT COMMA_INDEBUG(TypeHandlePairList *pVisited))
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT; // we are called from MethodTable::CanCastToClass
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(HasTypeEquivalence() && pOtherMT->HasTypeEquivalence());
+
+
+#ifdef _DEBUG
+ if (TypeHandlePairList::Exists(pVisited, TypeHandle(this), TypeHandle(pOtherMT)))
+ {
+ _ASSERTE(!"We are in the process of comparing these types already. That should never happen!");
+ return TRUE;
+ }
+ TypeHandlePairList newVisited(TypeHandle(this), TypeHandle(pOtherMT), pVisited);
+#endif
+
+
+ if (HasInstantiation() != pOtherMT->HasInstantiation())
+ return FALSE;
+
+ if (IsArray())
+ {
+ if (!pOtherMT->IsArray() || GetRank() != pOtherMT->GetRank())
+ return FALSE;
+
+ // arrays of structures have their own unshared MTs and will take this path
+ return (GetApproxArrayElementTypeHandle().IsEquivalentTo(pOtherMT->GetApproxArrayElementTypeHandle() COMMA_INDEBUG(&newVisited)));
+ }
+
+ BOOL bResult = FALSE;
+
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+ bResult = IsEquivalentTo_WorkerInner(pOtherMT COMMA_INDEBUG(&newVisited));
+ END_SO_INTOLERANT_CODE;
+
+ return bResult;
+}
+
+//==========================================================================================
+// Type equivalence - SO intolerant part.
+BOOL MethodTable::IsEquivalentTo_WorkerInner(MethodTable *pOtherMT COMMA_INDEBUG(TypeHandlePairList *pVisited))
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ LOADS_TYPE(CLASS_DEPENDENCIES_LOADED);
+ }
+ CONTRACTL_END;
+
+ AppDomain *pDomain = GetAppDomain();
+ if (pDomain != NULL)
+ {
+ TypeEquivalenceHashTable::EquivalenceMatch match = pDomain->GetTypeEquivalenceCache()->CheckEquivalence(TypeHandle(this), TypeHandle(pOtherMT));
+ switch (match)
+ {
+ case TypeEquivalenceHashTable::Match:
+ return TRUE;
+ case TypeEquivalenceHashTable::NoMatch:
+ return FALSE;
+ case TypeEquivalenceHashTable::MatchUnknown:
+ break;
+ default:
+ _ASSERTE(FALSE);
+ break;
+ }
+ }
+
+ BOOL fEquivalent = FALSE;
+
+ if (HasInstantiation())
+ {
+ // we limit variance on generics only to interfaces
+ if (!IsInterface() || !pOtherMT->IsInterface())
+ {
+ fEquivalent = FALSE;
+ goto EquivalenceCalculated;
+ }
+
+ // check whether the instantiations are equivalent
+ Instantiation inst1 = GetInstantiation();
+ Instantiation inst2 = pOtherMT->GetInstantiation();
+
+ if (inst1.GetNumArgs() != inst2.GetNumArgs())
+ {
+ fEquivalent = FALSE;
+ goto EquivalenceCalculated;
+ }
+
+ for (DWORD i = 0; i < inst1.GetNumArgs(); i++)
+ {
+ if (!inst1[i].IsEquivalentTo(inst2[i] COMMA_INDEBUG(pVisited)))
+ {
+ fEquivalent = FALSE;
+ goto EquivalenceCalculated;
+ }
+ }
+
+ if (GetTypeDefRid() == pOtherMT->GetTypeDefRid() && GetModule() == pOtherMT->GetModule())
+ {
+ // it's OK to declare the MTs equivalent at this point; the cases we care
+ // about are IList<IFoo> and IList<IBar> where IFoo and IBar are equivalent
+ fEquivalent = TRUE;
+ }
+ else
+ {
+ fEquivalent = FALSE;
+ }
+ goto EquivalenceCalculated;
+ }
+
+ if (IsArray())
+ {
+ if (!pOtherMT->IsArray() || GetRank() != pOtherMT->GetRank())
+ {
+ fEquivalent = FALSE;
+ goto EquivalenceCalculated;
+ }
+
+ // arrays of structures have their own unshared MTs and will take this path
+ fEquivalent = (GetApproxArrayElementTypeHandle().IsEquivalentTo(pOtherMT->GetApproxArrayElementTypeHandle() COMMA_INDEBUG(pVisited)));
+ goto EquivalenceCalculated;
+ }
+
+ fEquivalent = CompareTypeDefsForEquivalence(GetCl(), pOtherMT->GetCl(), GetModule(), pOtherMT->GetModule(), NULL);
+
+EquivalenceCalculated:
+ // Only record equivalence matches if we are in an AppDomain
+ if (pDomain != NULL)
+ {
+ // Collectible type results will not get cached.
+ if ((!this->Collectible() && !pOtherMT->Collectible()))
+ {
+ TypeEquivalenceHashTable::EquivalenceMatch match;
+ match = fEquivalent ? TypeEquivalenceHashTable::Match : TypeEquivalenceHashTable::NoMatch;
+ pDomain->GetTypeEquivalenceCache()->RecordEquivalence(TypeHandle(this), TypeHandle(pOtherMT), match);
+ }
+ }
+
+ return fEquivalent;
+}
+#endif // FEATURE_TYPEEQUIVALENCE
+
+//==========================================================================================
+BOOL MethodTable::CanCastToInterface(MethodTable *pTargetMT, TypeHandlePairList *pVisited)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pTargetMT));
+ PRECONDITION(pTargetMT->IsInterface());
+ PRECONDITION(!IsTransparentProxy());
+ PRECONDITION(IsRestored_NoLogging());
+ }
+ CONTRACTL_END
+
+ if (!pTargetMT->HasVariance())
+ {
+ if (HasTypeEquivalence() || pTargetMT->HasTypeEquivalence())
+ {
+ if (IsInterface() && IsEquivalentTo(pTargetMT))
+ return TRUE;
+
+ return ImplementsEquivalentInterface(pTargetMT);
+ }
+
+ return CanCastToNonVariantInterface(pTargetMT);
+ }
+ else
+ {
+ if (CanCastByVarianceToInterfaceOrDelegate(pTargetMT, pVisited))
+ return TRUE;
+
+ InterfaceMapIterator it = IterateInterfaceMap();
+ while (it.Next())
+ {
+ if (it.GetInterface()->CanCastByVarianceToInterfaceOrDelegate(pTargetMT, pVisited))
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+//==========================================================================================
+BOOL MethodTable::CanCastByVarianceToInterfaceOrDelegate(MethodTable *pTargetMT, TypeHandlePairList *pVisited)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pTargetMT));
+ PRECONDITION(pTargetMT->HasVariance());
+ PRECONDITION(pTargetMT->IsInterface() || pTargetMT->IsDelegate());
+ PRECONDITION(IsRestored_NoLogging());
+ }
+ CONTRACTL_END
+
+ BOOL returnValue = FALSE;
+
+ EEClass *pClass = NULL;
+
+ TypeHandlePairList pairList(this, pTargetMT, pVisited);
+
+ if (TypeHandlePairList::Exists(pVisited, this, pTargetMT))
+ goto Exit;
+
+ if (GetTypeDefRid() != pTargetMT->GetTypeDefRid() || GetModule() != pTargetMT->GetModule())
+ {
+ goto Exit;
+ }
+
+ {
+ pClass = pTargetMT->GetClass();
+ Instantiation inst = GetInstantiation();
+ Instantiation targetInst = pTargetMT->GetInstantiation();
+
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ TypeHandle thArg = inst[i];
+ TypeHandle thTargetArg = targetInst[i];
+
+ // If argument types are not equivalent, test them for compatibility
+ // in accordance with the the variance annotation
+ if (!thArg.IsEquivalentTo(thTargetArg))
+ {
+ switch (pClass->GetVarianceOfTypeParameter(i))
+ {
+ case gpCovariant :
+ if (!thArg.IsBoxedAndCanCastTo(thTargetArg, &pairList))
+ goto Exit;
+ break;
+
+ case gpContravariant :
+ if (!thTargetArg.IsBoxedAndCanCastTo(thArg, &pairList))
+ goto Exit;
+ break;
+
+ case gpNonVariant :
+ goto Exit;
+
+ default :
+ _ASSERTE(!"Illegal variance annotation");
+ goto Exit;
+ }
+ }
+ }
+ }
+
+ returnValue = TRUE;
+
+Exit:
+
+ return returnValue;
+}
+
+//==========================================================================================
+BOOL MethodTable::CanCastToClass(MethodTable *pTargetMT, TypeHandlePairList *pVisited)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pTargetMT));
+ PRECONDITION(!pTargetMT->IsArray());
+ PRECONDITION(!pTargetMT->IsInterface());
+ }
+ CONTRACTL_END
+
+ MethodTable *pMT = this;
+
+ // If the target type has variant type parameters, we take a slower path
+ if (pTargetMT->HasVariance())
+ {
+ // At present, we support variance only on delegates and interfaces
+ CONSISTENCY_CHECK(pTargetMT->IsDelegate());
+
+ // First chase inheritance hierarchy until we hit a class that only differs in its instantiation
+ do {
+ // Cheap check for equivalence
+ if (pMT->IsEquivalentTo(pTargetMT))
+ return TRUE;
+
+ g_IBCLogger.LogMethodTableAccess(pMT);
+
+ if (pMT->CanCastByVarianceToInterfaceOrDelegate(pTargetMT, pVisited))
+ return TRUE;
+
+ pMT = pMT->GetParentMethodTable();
+ } while (pMT);
+ }
+
+ // If there are no variant type parameters, just chase the hierarchy
+ else
+ {
+ do {
+ if (pMT->IsEquivalentTo(pTargetMT))
+ return TRUE;
+
+ g_IBCLogger.LogMethodTableAccess(pMT);
+
+ pMT = pMT->GetParentMethodTable();
+ } while (pMT);
+ }
+
+ return FALSE;
+}
+
+#include <optsmallperfcritical.h>
+//==========================================================================================
+BOOL MethodTable::CanCastToNonVariantInterface(MethodTable *pTargetMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INSTANCE_CHECK;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pTargetMT));
+ PRECONDITION(pTargetMT->IsInterface());
+ PRECONDITION(!pTargetMT->HasVariance());
+ PRECONDITION(!IsTransparentProxy());
+ PRECONDITION(IsRestored_NoLogging());
+ }
+ CONTRACTL_END
+
+ // Check to see if the current class is for the interface passed in.
+ if (this == pTargetMT)
+ return TRUE;
+
+ // Check to see if the static class definition indicates we implement the interface.
+ return ImplementsInterfaceInline(pTargetMT);
+}
+
+//==========================================================================================
+TypeHandle::CastResult MethodTable::CanCastToInterfaceNoGC(MethodTable *pTargetMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INSTANCE_CHECK;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pTargetMT));
+ PRECONDITION(pTargetMT->IsInterface());
+ PRECONDITION(!IsTransparentProxy());
+ PRECONDITION(IsRestored_NoLogging());
+ }
+ CONTRACTL_END
+
+ if (!pTargetMT->HasVariance() && !IsArray() && !HasTypeEquivalence() && !pTargetMT->HasTypeEquivalence())
+ {
+ return CanCastToNonVariantInterface(pTargetMT) ? TypeHandle::CanCast : TypeHandle::CannotCast;
+ }
+ else
+ {
+ // We're conservative on variant interfaces and types with equivalence
+ return TypeHandle::MaybeCast;
+ }
+}
+
+//==========================================================================================
+TypeHandle::CastResult MethodTable::CanCastToClassNoGC(MethodTable *pTargetMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INSTANCE_CHECK;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pTargetMT));
+ PRECONDITION(!pTargetMT->IsArray());
+ PRECONDITION(!pTargetMT->IsInterface());
+ }
+ CONTRACTL_END
+
+ // We're conservative on variant classes
+ if (pTargetMT->HasVariance() || g_IBCLogger.InstrEnabled())
+ {
+ return TypeHandle::MaybeCast;
+ }
+
+ // Type equivalence needs the slow path
+ if (HasTypeEquivalence() || pTargetMT->HasTypeEquivalence())
+ {
+ return TypeHandle::MaybeCast;
+ }
+
+ // If there are no variant type parameters, just chase the hierarchy
+ else
+ {
+ PTR_VOID pMT = this;
+
+ do {
+ if (pMT == pTargetMT)
+ return TypeHandle::CanCast;
+
+ pMT = MethodTable::GetParentMethodTableOrIndirection(pMT);
+ } while (pMT);
+ }
+
+ return TypeHandle::CannotCast;
+}
+#include <optdefault.h>
+
+BOOL
+MethodTable::IsExternallyVisible()
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ BOOL bIsVisible = IsTypeDefExternallyVisible(GetCl(), GetModule(), GetClass()->GetAttrClass());
+
+ if (bIsVisible && HasInstantiation() && !IsGenericTypeDefinition())
+ {
+ for (COUNT_T i = 0; i < GetNumGenericArgs(); i++)
+ {
+ if (!GetInstantiation()[i].IsExternallyVisible())
+ return FALSE;
+ }
+ }
+
+ return bIsVisible;
+} // MethodTable::IsExternallyVisible
+
+#ifdef FEATURE_PREJIT
+
+BOOL MethodTable::CanShareVtableChunksFrom(MethodTable *pTargetMT, Module *pCurrentLoaderModule, Module *pCurrentPreferredZapModule)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // These constraints come from two places:
+ // 1. A non-zapped MT cannot share with a zapped MT since it may result in SetSlot() on a read-only slot
+ // 2. Zapping this MT in MethodTable::Save cannot "unshare" something we decide to share now
+ //
+ // We could fix both of these and allow non-zapped MTs to share chunks fully by doing the following
+ // 1. Fix the few dangerous callers of SetSlot to first check whether the chunk itself is zapped
+ // (see MethodTableBuilder::CopyExactParentSlots, or we could use ExecutionManager::FindZapModule)
+ // 2. Have this function return FALSE if IsCompilationProcess and rely on MethodTable::Save to do all sharing for the NGen case
+
+ return !pTargetMT->IsZapped() &&
+ pTargetMT->GetLoaderModule() == pCurrentLoaderModule &&
+ pCurrentLoaderModule == pCurrentPreferredZapModule &&
+ pCurrentPreferredZapModule == Module::GetPreferredZapModuleForMethodTable(pTargetMT);
+}
+
+#else
+
+BOOL MethodTable::CanShareVtableChunksFrom(MethodTable *pTargetMT, Module *pCurrentLoaderModule)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return pTargetMT->GetLoaderModule() == pCurrentLoaderModule;
+}
+
+#endif
+
+#ifdef _DEBUG
+
+void
+MethodTable::DebugDumpVtable(LPCUTF8 szClassName, BOOL fDebug)
+{
+ //diag functions shouldn't affect normal behavior
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ CQuickBytes qb;
+ const size_t cchBuff = MAX_CLASSNAME_LENGTH + 30;
+ LPWSTR buff = fDebug ? (LPWSTR) qb.AllocNoThrow(cchBuff * sizeof(WCHAR)) : NULL;
+
+ if ((buff == NULL) && fDebug)
+ {
+ WszOutputDebugString(W("OOM when dumping VTable - falling back to logging"));
+ fDebug = FALSE;
+ }
+
+ if (fDebug)
+ {
+ swprintf_s(buff, cchBuff, W("Vtable (with interface dupes) for '%S':\n"), szClassName);
+#ifdef _DEBUG
+ swprintf_s(&buff[wcslen(buff)], cchBuff - wcslen(buff) , W(" Total duplicate slots = %d\n"), g_dupMethods);
+#endif
+ WszOutputDebugString(buff);
+ }
+ else
+ {
+ //LF_ALWAYS allowed here because this is controlled by special env var code:EEConfig::ShouldDumpOnClassLoad
+ LOG((LF_ALWAYS, LL_ALWAYS, "Vtable (with interface dupes) for '%s':\n", szClassName));
+ LOG((LF_ALWAYS, LL_ALWAYS, " Total duplicate slots = %d\n", g_dupMethods));
+ }
+
+ HRESULT hr;
+ EX_TRY
+ {
+ MethodIterator it(this);
+ for (; it.IsValid(); it.Next())
+ {
+ MethodDesc *pMD = it.GetMethodDesc();
+ LPCUTF8 pszName = pMD->GetName((USHORT) it.GetSlotNumber());
+ DWORD dwAttrs = pMD->GetAttrs();
+
+ if (fDebug)
+ {
+ DefineFullyQualifiedNameForClass();
+ LPCUTF8 name = GetFullyQualifiedNameForClass(pMD->GetMethodTable());
+ swprintf_s(buff, cchBuff,
+ W(" slot %2d: %S::%S%S 0x%p (slot = %2d)\n"),
+ it.GetSlotNumber(),
+ name,
+ pszName,
+ IsMdFinal(dwAttrs) ? " (final)" : "",
+ pMD->GetMethodEntryPoint(),
+ pMD->GetSlot()
+ );
+ WszOutputDebugString(buff);
+ }
+ else
+ {
+ //LF_ALWAYS allowed here because this is controlled by special env var code:EEConfig::ShouldDumpOnClassLoad
+ LOG((LF_ALWAYS, LL_ALWAYS,
+ " slot %2d: %s::%s%s 0x%p (slot = %2d)\n",
+ it.GetSlotNumber(),
+ pMD->GetClass()->GetDebugClassName(),
+ pszName,
+ IsMdFinal(dwAttrs) ? " (final)" : "",
+ pMD->GetMethodEntryPoint(),
+ pMD->GetSlot()
+ ));
+ }
+ if (it.GetSlotNumber() == (DWORD)(GetNumMethods()-1))
+ {
+ if (fDebug)
+ {
+ WszOutputDebugString(W(" <-- vtable ends here\n"));
+ }
+ else
+ {
+ //LF_ALWAYS allowed here because this is controlled by special env var code:EEConfig::ShouldDumpOnClassLoad
+ LOG((LF_ALWAYS, LL_ALWAYS, " <-- vtable ends here\n"));
+ }
+ }
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ if (fDebug)
+ {
+ WszOutputDebugString(W("\n"));
+ }
+ else
+ {
+ //LF_ALWAYS allowed here because this is controlled by special env var code:EEConfig::ShouldDumpOnClassLoad
+ LOG((LF_ALWAYS, LL_ALWAYS, "\n"));
+ }
+} // MethodTable::DebugDumpVtable
+
+void
+MethodTable::Debug_DumpInterfaceMap(
+ LPCSTR szInterfaceMapPrefix)
+{
+ // Diagnostic functions shouldn't affect normal behavior
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ if (GetNumInterfaces() == 0)
+ { // There are no interfaces, no point in printing interface map info
+ return;
+ }
+
+ //LF_ALWAYS allowed here because this is controlled by special env var code:EEConfig::ShouldDumpOnClassLoad
+ LOG((LF_ALWAYS, LL_ALWAYS,
+ "%s Interface Map for '%s':\n",
+ szInterfaceMapPrefix,
+ GetDebugClassName()));
+ LOG((LF_ALWAYS, LL_ALWAYS,
+ " Number of interfaces = %d\n",
+ GetNumInterfaces()));
+
+ HRESULT hr;
+ EX_TRY
+ {
+ InterfaceMapIterator it(this, false);
+ while (it.Next())
+ {
+ MethodTable *pInterfaceMT = it.GetInterface();
+
+ //LF_ALWAYS allowed here because this is controlled by special env var code:EEConfig::ShouldDumpOnClassLoad
+ LOG((LF_ALWAYS, LL_ALWAYS,
+ " index %2d: %s 0x%p\n",
+ it.GetIndex(),
+ pInterfaceMT->GetDebugClassName(),
+ pInterfaceMT));
+ }
+ //LF_ALWAYS allowed here because this is controlled by special env var code:EEConfig::ShouldDumpOnClassLoad
+ LOG((LF_ALWAYS, LL_ALWAYS, " <-- interface map ends here\n"));
+ }
+ EX_CATCH_HRESULT(hr);
+
+ //LF_ALWAYS allowed here because this is controlled by special env var code:EEConfig::ShouldDumpOnClassLoad
+ LOG((LF_ALWAYS, LL_ALWAYS, "\n"));
+} // MethodTable::Debug_DumpInterfaceMap
+
+void
+MethodTable::Debug_DumpDispatchMap()
+{
+ WRAPPER_NO_CONTRACT; // It's a dev helper, we don't care about contracts
+
+ if (!HasDispatchMap())
+ { // There is no dipstch map for this type, no point in printing the info
+ return;
+ }
+
+ //LF_ALWAYS allowed here because this is controlled by special env var code:EEConfig::ShouldDumpOnClassLoad
+ LOG((LF_ALWAYS, LL_ALWAYS, "Dispatch Map for '%s':\n", GetDebugClassName()));
+
+ InterfaceInfo_t * pInterfaceMap = GetInterfaceMap();
+ DispatchMap::EncodedMapIterator it(this);
+
+ while (it.IsValid())
+ {
+ DispatchMapEntry *pEntry = it.Entry();
+
+ UINT32 nInterfaceIndex = pEntry->GetTypeID().GetInterfaceNum();
+ _ASSERTE(nInterfaceIndex < GetNumInterfaces());
+
+ MethodTable * pInterface = pInterfaceMap[nInterfaceIndex].GetMethodTable();
+ UINT32 nInterfaceSlotNumber = pEntry->GetSlotNumber();
+ UINT32 nImplementationSlotNumber = pEntry->GetTargetSlotNumber();
+ //LF_ALWAYS allowed here because this is controlled by special env var code:EEConfig::ShouldDumpOnClassLoad
+ LOG((LF_ALWAYS, LL_ALWAYS,
+ " Interface %d (%s) slot %d (%s) implemented in slot %d (%s)\n",
+ nInterfaceIndex,
+ pInterface->GetDebugClassName(),
+ nInterfaceSlotNumber,
+ pInterface->GetMethodDescForSlot(nInterfaceSlotNumber)->GetName(),
+ nImplementationSlotNumber,
+ GetMethodDescForSlot(nImplementationSlotNumber)->GetName()));
+
+ it.Next();
+ }
+ //LF_ALWAYS allowed here because this is controlled by special env var code:EEConfig::ShouldDumpOnClassLoad
+ LOG((LF_ALWAYS, LL_ALWAYS, " <-- Dispatch map ends here\n"));
+} // MethodTable::Debug_DumpDispatchMap
+
+#endif //_DEBUG
+
+//==========================================================================================
+NOINLINE BOOL MethodTable::ImplementsInterface(MethodTable *pInterface)
+{
+ WRAPPER_NO_CONTRACT;
+ return ImplementsInterfaceInline(pInterface);
+}
+
+//==========================================================================================
+BOOL MethodTable::ImplementsEquivalentInterface(MethodTable *pInterface)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_TOLERANT;
+ PRECONDITION(pInterface->IsInterface()); // class we are looking up should be an interface
+ }
+ CONTRACTL_END;
+
+ // look for exact match first (optimize for success)
+ if (ImplementsInterfaceInline(pInterface))
+ return TRUE;
+
+ if (!pInterface->HasTypeEquivalence())
+ return FALSE;
+
+ DWORD numInterfaces = GetNumInterfaces();
+ if (numInterfaces == 0)
+ return FALSE;
+
+ InterfaceInfo_t *pInfo = GetInterfaceMap();
+
+ do
+ {
+ if (pInfo->GetMethodTable()->IsEquivalentTo(pInterface))
+ return TRUE;
+
+ pInfo++;
+ }
+ while (--numInterfaces);
+
+ return FALSE;
+}
+
+//==========================================================================================
+MethodDesc *MethodTable::GetMethodDescForInterfaceMethod(MethodDesc *pInterfaceMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(!pInterfaceMD->HasClassOrMethodInstantiation());
+ }
+ CONTRACTL_END;
+ WRAPPER_NO_CONTRACT;
+
+ return GetMethodDescForInterfaceMethod(TypeHandle(pInterfaceMD->GetMethodTable()), pInterfaceMD);
+}
+
+//==========================================================================================
+MethodDesc *MethodTable::GetMethodDescForInterfaceMethod(TypeHandle ownerType, MethodDesc *pInterfaceMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(!ownerType.IsNull());
+ PRECONDITION(ownerType.GetMethodTable()->IsInterface());
+ PRECONDITION(ownerType.GetMethodTable()->HasSameTypeDefAs(pInterfaceMD->GetMethodTable()));
+ PRECONDITION(IsArray() || ImplementsEquivalentInterface(ownerType.GetMethodTable()) || ownerType.GetMethodTable()->HasVariance());
+ }
+ CONTRACTL_END;
+
+ MethodDesc *pMD = NULL;
+
+ MethodTable *pInterfaceMT = ownerType.AsMethodTable();
+
+#ifdef CROSSGEN_COMPILE
+ DispatchSlot implSlot(FindDispatchSlot(pInterfaceMT->GetTypeID(), pInterfaceMD->GetSlot()));
+ PCODE pTgt = implSlot.GetTarget();
+#else
+ PCODE pTgt = VirtualCallStubManager::GetTarget(
+ pInterfaceMT->GetLoaderAllocator()->GetDispatchToken(pInterfaceMT->GetTypeID(), pInterfaceMD->GetSlot()),
+ this);
+#endif
+ pMD = MethodTable::GetMethodDescForSlotAddress(pTgt);
+
+#ifdef _DEBUG
+ MethodDesc *pDispSlotMD = FindDispatchSlotForInterfaceMD(ownerType, pInterfaceMD).GetMethodDesc();
+ _ASSERTE(pDispSlotMD == pMD);
+#endif // _DEBUG
+
+ pMD->CheckRestore();
+
+ return pMD;
+}
+#endif // DACCESS_COMPILE
+
+//==========================================================================================
+PTR_FieldDesc MethodTable::GetFieldDescByIndex(DWORD fieldIndex)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (HasGenericsStaticsInfo() &&
+ fieldIndex >= GetNumIntroducedInstanceFields())
+ {
+ return GetGenericsStaticFieldDescs() + (fieldIndex - GetNumIntroducedInstanceFields());
+ }
+ else
+ {
+ return GetClass()->GetFieldDescList() + fieldIndex;
+ }
+}
+
+//==========================================================================================
+DWORD MethodTable::GetIndexForFieldDesc(FieldDesc *pField)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (pField->IsStatic() && HasGenericsStaticsInfo())
+ {
+ FieldDesc *pStaticFields = GetGenericsStaticFieldDescs();
+
+ return GetNumIntroducedInstanceFields() + DWORD(pField - pStaticFields);
+
+ }
+ else
+ {
+ FieldDesc *pFields = GetClass()->GetFieldDescList();
+
+ return DWORD(pField - pFields);
+ }
+}
+
+//==========================================================================================
+#ifdef _MSC_VER
+#pragma optimize("t", on)
+#endif // _MSC_VER
+// compute whether the type can be considered to have had its
+// static initialization run without doing anything at all, i.e. whether we know
+// immediately that the type requires nothing to do for initialization
+//
+// If a type used as a representiative during JITting is PreInit then
+// any types that it may represent within a code-sharing
+// group are also PreInit. For example, if List<object> is PreInit then List<string>
+// and List<MyType> are also PreInit. This is because the dynamicStatics, staticRefHandles
+// and hasCCtor are all identical given a head type, and weakening the domainNeutrality
+// to DomainSpecific only makes more types PreInit.
+BOOL MethodTable::IsClassPreInited()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (ContainsGenericVariables())
+ return TRUE;
+
+ if (HasClassConstructor())
+ return FALSE;
+
+ if (HasBoxedRegularStatics())
+ return FALSE;
+
+ if (IsDynamicStatics())
+ return FALSE;
+
+ return TRUE;
+}
+#ifdef _MSC_VER
+#pragma optimize("", on)
+#endif // _MSC_VER
+
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+//==========================================================================================
+void MethodTable::AllocateRegularStaticBoxes()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(!ContainsGenericVariables());
+ PRECONDITION(HasBoxedRegularStatics());
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CLASSLOADER, LL_INFO10000, "STATICS: Instantiating static handles for %s\n", GetDebugClassName()));
+
+ GCX_COOP();
+
+ PTR_BYTE pStaticBase = GetGCStaticsBasePointer();
+
+ GCPROTECT_BEGININTERIOR(pStaticBase);
+
+ // In ngened case, we have cached array with boxed statics MTs. In JITed case, we have just the FieldDescs
+ ClassCtorInfoEntry *pClassCtorInfoEntry = GetClassCtorInfoIfExists();
+ if (pClassCtorInfoEntry != NULL)
+ {
+ OBJECTREF* pStaticSlots = (OBJECTREF*)(pStaticBase + pClassCtorInfoEntry->firstBoxedStaticOffset);
+ GCPROTECT_BEGININTERIOR(pStaticSlots);
+
+ ArrayDPTR(FixupPointer<PTR_MethodTable>) ppMTs = GetLoaderModule()->GetZapModuleCtorInfo()->
+ GetGCStaticMTs(pClassCtorInfoEntry->firstBoxedStaticMTIndex);
+
+ DWORD numBoxedStatics = pClassCtorInfoEntry->numBoxedStatics;
+ for (DWORD i = 0; i < numBoxedStatics; i++)
+ {
+#ifdef FEATURE_PREJIT
+ Module::RestoreMethodTablePointer(&(ppMTs[i]), GetLoaderModule());
+#endif
+ MethodTable *pFieldMT = ppMTs[i].GetValue();
+
+ _ASSERTE(pFieldMT);
+
+ LOG((LF_CLASSLOADER, LL_INFO10000, "\tInstantiating static of type %s\n", pFieldMT->GetDebugClassName()));
+ OBJECTREF obj = AllocateStaticBox(pFieldMT, pClassCtorInfoEntry->hasFixedAddressVTStatics);
+
+ SetObjectReference( &(pStaticSlots[i]), obj, GetAppDomain() );
+ }
+ GCPROTECT_END();
+ }
+ else
+ {
+ // We should never take this codepath in zapped images.
+ _ASSERTE(!IsZapped());
+
+ FieldDesc *pField = HasGenericsStaticsInfo() ?
+ GetGenericsStaticFieldDescs() : (GetApproxFieldDescListRaw() + GetNumIntroducedInstanceFields());
+ FieldDesc *pFieldEnd = pField + GetNumStaticFields();
+
+ while (pField < pFieldEnd)
+ {
+ _ASSERTE(pField->IsStatic());
+
+ if (!pField->IsSpecialStatic() && pField->IsByValue())
+ {
+ TypeHandle th = pField->GetFieldTypeHandleThrowing();
+ MethodTable* pFieldMT = th.GetMethodTable();
+
+ LOG((LF_CLASSLOADER, LL_INFO10000, "\tInstantiating static of type %s\n", pFieldMT->GetDebugClassName()));
+ OBJECTREF obj = AllocateStaticBox(pFieldMT, HasFixedAddressVTStatics());
+
+ SetObjectReference( (OBJECTREF*)(pStaticBase + pField->GetOffset()), obj, GetAppDomain() );
+ }
+
+ pField++;
+ }
+ }
+ GCPROTECT_END();
+}
+
+//==========================================================================================
+OBJECTREF MethodTable::AllocateStaticBox(MethodTable* pFieldMT, BOOL fPinned, OBJECTHANDLE* pHandle)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ CONTRACTL_END;
+ }
+
+ _ASSERTE(pFieldMT->IsValueType());
+
+ // Activate any dependent modules if necessary
+ pFieldMT->EnsureInstanceActive();
+
+ OBJECTREF obj = AllocateObject(pFieldMT);
+
+ // Pin the object if necessary
+ if (fPinned)
+ {
+ LOG((LF_CLASSLOADER, LL_INFO10000, "\tSTATICS:Pinning static (VT fixed address attribute) of type %s\n", pFieldMT->GetDebugClassName()));
+ OBJECTHANDLE oh = GetAppDomain()->CreatePinningHandle(obj);
+ if (pHandle)
+ {
+ *pHandle = oh;
+ }
+ }
+ else
+ {
+ if (pHandle)
+ {
+ *pHandle = NULL;
+ }
+ }
+
+ return obj;
+}
+
+//==========================================================================================
+BOOL MethodTable::RunClassInitEx(OBJECTREF *pThrowable)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(IsFullyLoaded());
+ PRECONDITION(IsProtectedByGCFrame(pThrowable));
+ }
+ CONTRACTL_END;
+
+ // A somewhat unusual function, can both return throwable and throw.
+ // The difference is, we throw on restartable operations and just return throwable
+ // on exceptions fatal for the .cctor
+ // (Of course in the latter case the caller is supposed to throw pThrowable)
+ // Doing the opposite ( i.e. throwing on fatal and returning on nonfatal)
+ // would be more intuitive but it's more convenient the way it is
+
+ BOOL fRet = FALSE;
+
+ // During the <clinit>, this thread must not be asynchronously
+ // stopped or interrupted. That would leave the class unavailable
+ // and is therefore a security hole. We don't have to worry about
+ // multithreading, since we only manipulate the current thread's count.
+ ThreadPreventAsyncHolder preventAsync;
+
+ // If the static initialiser throws an exception that it doesn't catch, it has failed
+ EX_TRY
+ {
+ // Activate our module if necessary
+ EnsureInstanceActive();
+
+ STRESS_LOG1(LF_CLASSLOADER, LL_INFO1000, "RunClassInit: Calling class contructor for type %pT\n", this);
+
+ MethodTable * pCanonMT = GetCanonicalMethodTable();
+
+ // Call the code method without touching MethodDesc if possible
+ PCODE pCctorCode = pCanonMT->GetSlot(pCanonMT->GetClassConstructorSlot());
+
+ if (pCanonMT->IsSharedByGenericInstantiations())
+ {
+ PREPARE_NONVIRTUAL_CALLSITE_USING_CODE(pCctorCode);
+ DECLARE_ARGHOLDER_ARRAY(args, 1);
+ args[ARGNUM_0] = PTR_TO_ARGHOLDER(this);
+ CATCH_HANDLER_FOUND_NOTIFICATION_CALLSITE;
+ CALL_MANAGED_METHOD_NORET(args);
+ }
+ else
+ {
+ PREPARE_NONVIRTUAL_CALLSITE_USING_CODE(pCctorCode);
+ DECLARE_ARGHOLDER_ARRAY(args, 0);
+ CATCH_HANDLER_FOUND_NOTIFICATION_CALLSITE;
+ CALL_MANAGED_METHOD_NORET(args);
+ }
+
+ STRESS_LOG1(LF_CLASSLOADER, LL_INFO100000, "RunClassInit: Returned Successfully from class contructor for type %pT\n", this);
+
+ fRet = TRUE;
+ }
+ EX_CATCH
+ {
+ // Exception set by parent
+ // <TODO>@TODO: We should make this an ExceptionInInitializerError if the exception thrown is not
+ // a subclass of Error</TODO>
+ *pThrowable = GET_THROWABLE();
+ _ASSERTE(fRet == FALSE);
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // If active thread state does not have a CorruptionSeverity set for the exception,
+ // then set one up based upon the current exception code and/or the throwable.
+ //
+ // When can we be here and current exception tracker may not have corruption severity set?
+ // Incase of SO in managed code, SO is never seen by CLR's exception handler for managed code
+ // and if this happens in cctor, we can end up here without the corruption severity set.
+ Thread *pThread = GetThread();
+ _ASSERTE(pThread != NULL);
+ ThreadExceptionState *pCurTES = pThread->GetExceptionState();
+ _ASSERTE(pCurTES != NULL);
+ if (pCurTES->GetLastActiveExceptionCorruptionSeverity() == NotSet)
+ {
+ if (CEHelper::IsProcessCorruptedStateException(GetCurrentExceptionCode()) ||
+ CEHelper::IsProcessCorruptedStateException(*pThrowable))
+ {
+ // Process Corrupting
+ pCurTES->SetLastActiveExceptionCorruptionSeverity(ProcessCorrupting);
+ LOG((LF_EH, LL_INFO100, "MethodTable::RunClassInitEx - Exception treated as ProcessCorrupting.\n"));
+ }
+ else
+ {
+ // Not Corrupting
+ pCurTES->SetLastActiveExceptionCorruptionSeverity(NotCorrupting);
+ LOG((LF_EH, LL_INFO100, "MethodTable::RunClassInitEx - Exception treated as non-corrupting.\n"));
+ }
+ }
+ else
+ {
+ LOG((LF_EH, LL_INFO100, "MethodTable::RunClassInitEx - Exception already has corruption severity set.\n"));
+ }
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ return fRet;
+}
+
+//==========================================================================================
+void MethodTable::DoRunClassInitThrowing()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ GCX_COOP();
+
+ // This is a fairly aggressive policy. Merely asking that the class be initialized is grounds for kicking you out.
+ // Alternately, we could simply NOP out the class initialization. Since the aggressive policy is also the more secure
+ // policy, keep this unless it proves intractable to remove all premature classinits in the system.
+ EnsureActive();
+
+ Thread *pThread;
+ pThread = GetThread();
+ _ASSERTE(pThread);
+ INTERIOR_STACK_PROBE_FOR(pThread, 8);
+
+ AppDomain *pDomain = GetAppDomain();
+
+ HRESULT hrResult = E_FAIL;
+ const char *description;
+ STRESS_LOG2(LF_CLASSLOADER, LL_INFO100000, "DoRunClassInit: Request to init %pT in appdomain %p\n", this, pDomain);
+
+ //
+ // Take the global lock
+ //
+
+ ListLock *_pLock = pDomain->GetClassInitLock();
+
+ ListLockHolder pInitLock(_pLock);
+
+ // Check again
+ if (IsClassInited())
+ goto Exit;
+
+ //
+ // Handle cases where the .cctor has already tried to run but failed.
+ //
+
+
+ if (IsInitError())
+ {
+ // Some error occurred trying to init this class
+ ListLockEntry* pEntry= (ListLockEntry *) _pLock->Find(this);
+ _ASSERTE(pEntry!=NULL);
+ _ASSERTE(pEntry->m_pLoaderAllocator == (GetDomain()->IsSharedDomain() ? pDomain->GetLoaderAllocator() : GetLoaderAllocator()));
+
+ // If this isn't a TypeInitializationException, then its creation failed
+ // somehow previously, so we should make one last attempt to create it. If
+ // that fails, just throw the exception that was originally thrown.
+ // Primarily, this deals with the problem that the exception is a
+ // ThreadAbortException, because this must be executing on a different
+ // thread. If in fact this thread is also aborting, then rethrowing the
+ // other thread's exception will not do any worse.
+
+ // If we need to create the type init exception object, we'll need to
+ // GC protect these, so might as well create the structure now.
+ struct _gc {
+ OBJECTREF pInitException;
+ OBJECTREF pNewInitException;
+ OBJECTREF pThrowable;
+ } gc;
+
+ gc.pInitException = pEntry->m_pLoaderAllocator->GetHandleValue(pEntry->m_hInitException);
+ gc.pNewInitException = NULL;
+ gc.pThrowable = NULL;
+
+ GCPROTECT_BEGIN(gc);
+
+ // We need to release this lock because CreateTypeInitializationExceptionObject and fetching the TypeLoad exception can cause
+ // managed code to re-enter into this codepath, causing a locking order violation.
+ pInitLock.Release();
+
+ if (MscorlibBinder::GetException(kTypeInitializationException) != gc.pInitException->GetMethodTable())
+ {
+ DefineFullyQualifiedNameForClassWOnStack();
+ LPCWSTR wszName = GetFullyQualifiedNameForClassW(this);
+
+ CreateTypeInitializationExceptionObject(wszName, &gc.pInitException, &gc.pNewInitException, &gc.pThrowable);
+
+ LOADERHANDLE hOrigInitException = pEntry->m_hInitException;
+ if (!CLRException::IsPreallocatedExceptionObject(pEntry->m_pLoaderAllocator->GetHandleValue(hOrigInitException)))
+ {
+ // Now put the new init exception in the handle. If another thread beat us (because we released the
+ // lock above), then we'll just let the extra init exception object get collected later.
+ pEntry->m_pLoaderAllocator->CompareExchangeValueInHandle(pEntry->m_hInitException, gc.pNewInitException, gc.pInitException);
+ } else {
+ // if the stored exception is a preallocated one we cannot store the new Exception object in it.
+ // we'll attempt to create a new handle for the new TypeInitializationException object
+ LOADERHANDLE hNewInitException = NULL;
+ // CreateHandle can throw due to OOM. We need to catch this so that we make sure to set the
+ // init error. Whatever exception was thrown will be rethrown below, so no worries.
+ EX_TRY {
+ hNewInitException = pEntry->m_pLoaderAllocator->AllocateHandle(gc.pNewInitException);
+ } EX_CATCH {
+ // If we failed to create the handle we'll just leave the originally alloc'd one in place.
+ } EX_END_CATCH(SwallowAllExceptions);
+
+ // if two threads are racing to set m_hInitException, clear the handle created by the loser
+ if (hNewInitException != NULL &&
+ InterlockedCompareExchangeT((&pEntry->m_hInitException), hNewInitException, hOrigInitException) != hOrigInitException)
+ {
+ pEntry->m_pLoaderAllocator->ClearHandle(hNewInitException);
+ }
+ }
+ }
+ else {
+ gc.pThrowable = gc.pInitException;
+ }
+
+ GCPROTECT_END();
+
+ // Throw the saved exception. Since we may be rethrowing a previously cached exception, must clear the stack trace first.
+ // Rethrowing a previously cached exception is distasteful but is required for appcompat with Everett.
+ //
+ // (The IsException() is probably more appropriate as an assert but as this isn't a heavily tested code path,
+ // I prefer to be defensive here.)
+ if (IsException(gc.pThrowable->GetMethodTable()))
+ {
+ ((EXCEPTIONREF)(gc.pThrowable))->ClearStackTraceForThrow();
+ }
+
+ // <FEATURE_CORRUPTING_EXCEPTIONS>
+ // Specify the corruption severity to be used to raise this exception in COMPlusThrow below.
+ // This will ensure that when the exception is seen by the managed code personality routine,
+ // it will setup the correct corruption severity in the exception tracker.
+ // </FEATURE_CORRUPTING_EXCEPTIONS>
+
+ COMPlusThrow(gc.pThrowable
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , pEntry->m_CorruptionSeverity
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ );
+ }
+
+ description = ".cctor lock";
+#if _DEBUG
+ description = GetDebugClassName();
+#endif
+
+ // Take the lock
+ {
+ //nontrivial holder, might take a lock in destructor
+ ListLockEntryHolder pEntry(ListLockEntry::Find(pInitLock, this, description));
+
+ ListLockEntryLockHolder pLock(pEntry, FALSE);
+
+ // We have a list entry, we can release the global lock now
+ pInitLock.Release();
+
+ if (pLock.DeadlockAwareAcquire())
+ {
+ if (pEntry->m_hrResultCode == S_FALSE)
+ {
+ if (!NingenEnabled())
+ {
+ if (HasBoxedRegularStatics())
+ {
+ // First, instantiate any objects needed for value type statics
+ AllocateRegularStaticBoxes();
+ }
+
+ // Nobody has run the .cctor yet
+ if (HasClassConstructor())
+ {
+ struct _gc {
+ OBJECTREF pInnerException;
+ OBJECTREF pInitException;
+ OBJECTREF pThrowable;
+ } gc;
+ gc.pInnerException = NULL;
+ gc.pInitException = NULL;
+ gc.pThrowable = NULL;
+ GCPROTECT_BEGIN(gc);
+
+ if (!RunClassInitEx(&gc.pInnerException))
+ {
+ // The .cctor failed and we want to store the exception that resulted
+ // in the entry. Increment the ref count to keep the entry alive for
+ // subsequent attempts to run the .cctor.
+ pEntry->AddRef();
+ // For collectible types, register the entry for cleanup.
+ if (GetLoaderAllocator()->IsCollectible())
+ {
+ GetLoaderAllocator()->RegisterFailedTypeInitForCleanup(pEntry);
+ }
+
+ _ASSERTE(g_pThreadAbortExceptionClass == MscorlibBinder::GetException(kThreadAbortException));
+
+ if(gc.pInnerException->GetMethodTable() == g_pThreadAbortExceptionClass)
+ {
+ gc.pThrowable = gc.pInnerException;
+ gc.pInitException = gc.pInnerException;
+ gc.pInnerException = NULL;
+ }
+ else
+ {
+ DefineFullyQualifiedNameForClassWOnStack();
+ LPCWSTR wszName = GetFullyQualifiedNameForClassW(this);
+
+ // Note that this may not succeed due to problems creating the exception
+ // object. On failure, it will first try to
+ CreateTypeInitializationExceptionObject(
+ wszName, &gc.pInnerException, &gc.pInitException, &gc.pThrowable);
+ }
+
+ pEntry->m_pLoaderAllocator = GetDomain()->IsSharedDomain() ? pDomain->GetLoaderAllocator() : GetLoaderAllocator();
+
+ // CreateHandle can throw due to OOM. We need to catch this so that we make sure to set the
+ // init error. Whatever exception was thrown will be rethrown below, so no worries.
+ EX_TRY {
+ // Save the exception object, and return to caller as well.
+ pEntry->m_hInitException = pEntry->m_pLoaderAllocator->AllocateHandle(gc.pInitException);
+ } EX_CATCH {
+ // If we failed to create the handle (due to OOM), we'll just store the preallocated OOM
+ // handle here instead.
+ pEntry->m_hInitException = pEntry->m_pLoaderAllocator->AllocateHandle(CLRException::GetPreallocatedOutOfMemoryException());
+ } EX_END_CATCH(SwallowAllExceptions);
+
+ pEntry->m_hrResultCode = E_FAIL;
+ SetClassInitError();
+
+ #ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // Save the corruption severity of the exception so that if the type system
+ // attempts to pick it up from its cache list and throw again, it should
+ // treat the exception as corrupting, if applicable.
+ pEntry->m_CorruptionSeverity = pThread->GetExceptionState()->GetLastActiveExceptionCorruptionSeverity();
+
+ // We should be having a valid corruption severity at this point
+ _ASSERTE(pEntry->m_CorruptionSeverity != NotSet);
+ #endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ COMPlusThrow(gc.pThrowable
+ #ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , pEntry->m_CorruptionSeverity
+ #endif // FEATURE_CORRUPTING_EXCEPTIONS
+ );
+ }
+
+ GCPROTECT_END();
+ }
+ }
+
+ pEntry->m_hrResultCode = S_OK;
+
+ // Set the initialization flags in the DLS and on domain-specific types.
+ // Note we also set the flag for dynamic statics, which use the DynamicStatics part
+ // of the DLS irrespective of whether the type is domain neutral or not.
+ SetClassInited();
+
+ }
+ else
+ {
+ // Use previous result
+
+ hrResult = pEntry->m_hrResultCode;
+ if(FAILED(hrResult))
+ {
+ // An exception may have occurred in the cctor. DoRunClassInit() should return FALSE in that
+ // case.
+ _ASSERTE(pEntry->m_hInitException);
+ _ASSERTE(pEntry->m_pLoaderAllocator == (GetDomain()->IsSharedDomain() ? pDomain->GetLoaderAllocator() : GetLoaderAllocator()));
+ _ASSERTE(IsInitError());
+
+ // Throw the saved exception. Since we are rethrowing a previously cached exception, must clear the stack trace first.
+ // Rethrowing a previously cached exception is distasteful but is required for appcompat with Everett.
+ //
+ // (The IsException() is probably more appropriate as an assert but as this isn't a heavily tested code path,
+ // I prefer to be defensive here.)
+ if (IsException(pEntry->m_pLoaderAllocator->GetHandleValue(pEntry->m_hInitException)->GetMethodTable()))
+ {
+ ((EXCEPTIONREF)(pEntry->m_pLoaderAllocator->GetHandleValue(pEntry->m_hInitException)))->ClearStackTraceForThrow();
+ }
+ COMPlusThrow(pEntry->m_pLoaderAllocator->GetHandleValue(pEntry->m_hInitException));
+ }
+ }
+ }
+ }
+
+ //
+ // Notify any entries waiting on the current entry and wait for the required entries.
+ //
+
+ // We need to take the global lock before we play with the list of entries.
+
+ STRESS_LOG2(LF_CLASSLOADER, LL_INFO100000, "DoRunClassInit: returning SUCCESS for init %pT in appdomain %p\n", this, pDomain);
+ // No need to set pThrowable in case of error it will already have been set.
+
+ g_IBCLogger.LogMethodTableAccess(this);
+Exit:
+ ;
+ END_INTERIOR_STACK_PROBE;
+}
+
+//==========================================================================================
+void MethodTable::CheckRunClassInitThrowing()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_TOLERANT;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(IsFullyLoaded());
+ }
+ CONTRACTL_END;
+
+ { // Debug-only code causes SO volation, so add exception.
+ CONTRACT_VIOLATION(SOToleranceViolation);
+ CONSISTENCY_CHECK(CheckActivated());
+ }
+
+ // To find GC hole easier...
+ TRIGGERSGC();
+
+ if (IsClassPreInited())
+ return;
+
+ // Don't initialize shared generic instantiations (e.g. MyClass<__Canon>)
+ if (IsSharedByGenericInstantiations())
+ return;
+
+ DomainLocalModule *pLocalModule = GetDomainLocalModule();
+ _ASSERTE(pLocalModule);
+
+ DWORD iClassIndex = GetClassIndex();
+
+ // Check to see if we have already run the .cctor for this class.
+ if (!pLocalModule->IsClassAllocated(this, iClassIndex))
+ pLocalModule->PopulateClass(this);
+
+ if (!pLocalModule->IsClassInitialized(this, iClassIndex))
+ DoRunClassInitThrowing();
+}
+
+//==========================================================================================
+void MethodTable::CheckRunClassInitAsIfConstructingThrowing()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ if (HasPreciseInitCctors())
+ {
+ MethodTable *pMTCur = this;
+ while (pMTCur != NULL)
+ {
+ if (!pMTCur->GetClass()->IsBeforeFieldInit())
+ pMTCur->CheckRunClassInitThrowing();
+
+ pMTCur = pMTCur->GetParentMethodTable();
+ }
+ }
+}
+
+//==========================================================================================
+OBJECTREF MethodTable::Allocate()
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ CONSISTENCY_CHECK(IsFullyLoaded());
+
+ EnsureInstanceActive();
+
+ if (HasPreciseInitCctors())
+ {
+ CheckRunClassInitAsIfConstructingThrowing();
+ }
+
+ return AllocateObject(this);
+}
+
+//==========================================================================================
+// box 'data' creating a new object and return it. This routine understands the special
+// handling needed for Nullable values.
+// see code:Nullable#NullableVerification
+
+OBJECTREF MethodTable::Box(void* data)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(IsValueType());
+ }
+ CONTRACTL_END;
+
+ OBJECTREF ref;
+
+ GCPROTECT_BEGININTERIOR (data);
+
+ if (ContainsStackPtr())
+ {
+ // We should never box a type that contains stack pointers.
+ COMPlusThrow(kInvalidOperationException, W("InvalidOperation_TypeCannotBeBoxed"));
+ }
+
+ ref = FastBox(&data);
+ GCPROTECT_END ();
+ return ref;
+}
+
+OBJECTREF MethodTable::FastBox(void** data)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(IsValueType());
+ }
+ CONTRACTL_END;
+
+ // See code:Nullable#NullableArchitecture for more
+ if (IsNullable())
+ return Nullable::Box(*data, this);
+
+ OBJECTREF ref = Allocate();
+ CopyValueClass(ref->UnBox(), *data, this, ref->GetAppDomain());
+ return ref;
+}
+
+#if _TARGET_X86_ || _TARGET_AMD64_
+//==========================================================================================
+static void FastCallFinalize(Object *obj, PCODE funcPtr, BOOL fCriticalCall)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+ BEGIN_CALL_TO_MANAGEDEX(fCriticalCall ? EEToManagedCriticalCall : EEToManagedDefault);
+
+#if defined(_TARGET_X86_)
+
+ __asm
+ {
+ mov ecx, [obj]
+ call [funcPtr]
+ INDEBUG(nop) // Mark the fact that we can call managed code
+ }
+
+#else // _TARGET_X86_
+
+ FastCallFinalizeWorker(obj, funcPtr);
+
+#endif // _TARGET_X86_
+
+ END_CALL_TO_MANAGED();
+}
+
+#endif // _TARGET_X86_ || _TARGET_AMD64_
+
+void CallFinalizerOnThreadObject(Object *obj)
+{
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ THREADBASEREF refThis = (THREADBASEREF)ObjectToOBJECTREF(obj);
+ Thread* thread = refThis->GetInternal();
+
+ // Prevent multiple calls to Finalize
+ // Objects can be resurrected after being finalized. However, there is no
+ // race condition here. We always check whether an exposed thread object is
+ // still attached to the internal Thread object, before proceeding.
+ if (thread)
+ {
+ refThis->SetDelegate(NULL);
+
+ // During process shutdown, we finalize even reachable objects. But if we break
+ // the link between the System.Thread and the internal Thread object, the runtime
+ // may not work correctly. In particular, we won't be able to transition between
+ // contexts and domains to finalize other objects. Since the runtime doesn't
+ // require that Threads finalize during shutdown, we need to disable this. If
+ // we wait until phase 2 of shutdown finalization (when the EE is suspended and
+ // will never resume) then we can simply skip the side effects of Thread
+ // finalization.
+ if ((g_fEEShutDown & ShutDown_Finalize2) == 0)
+ {
+ if (GetThread() != thread)
+ {
+ refThis->ClearInternal();
+ }
+
+ FastInterlockOr ((ULONG *)&thread->m_State, Thread::TS_Finalized);
+ Thread::SetCleanupNeededForFinalizedThread();
+ }
+ }
+}
+
+//==========================================================================================
+// From the GC finalizer thread, invoke the Finalize() method on an object.
+void MethodTable::CallFinalizer(Object *obj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(obj->GetMethodTable()->HasFinalizer() ||
+ obj->GetMethodTable()->IsTransparentProxy());
+ }
+ CONTRACTL_END;
+
+ // Never call any finalizers under ngen for determinism
+ if (IsCompilationProcess())
+ {
+ return;
+ }
+
+ MethodTable *pMT = obj->GetMethodTable();
+
+
+ // Check for precise init class constructors that have failed, if any have failed, then we didn't run the
+ // constructor for the object, and running the finalizer for the object would violate the CLI spec by running
+ // instance code without having successfully run the precise-init class constructor.
+ if (pMT->HasPreciseInitCctors())
+ {
+ MethodTable *pMTCur = pMT;
+ do
+ {
+ if ((!pMTCur->GetClass()->IsBeforeFieldInit()) && pMTCur->IsInitError())
+ {
+ // Precise init Type Initializer for type failed... do not run finalizer
+ return;
+ }
+
+ pMTCur = pMTCur->GetParentMethodTable();
+ }
+ while (pMTCur != NULL);
+ }
+
+ if (pMT == g_pThreadClass)
+ {
+ // Finalizing Thread object requires ThreadStoreLock. It is expensive if
+ // we keep taking ThreadStoreLock. This is very bad if we have high retiring
+ // rate of Thread objects.
+ // To avoid taking ThreadStoreLock multiple times, we mark Thread with TS_Finalized
+ // and clean up a batch of them when we take ThreadStoreLock next time.
+
+ // To avoid possible hierarchy requirement between critical finalizers, we call cleanup
+ // code directly.
+ CallFinalizerOnThreadObject(obj);
+ return;
+ }
+
+#ifdef FEATURE_CAS_POLICY
+ // Notify the host to setup the restricted context before finalizing each object
+ HostExecutionContextManager::SetHostRestrictedContext();
+#endif // FEATURE_CAS_POLICY
+
+ // Determine if the object has a critical or normal finalizer.
+ BOOL fCriticalFinalizer = pMT->HasCriticalFinalizer();
+
+ // There's no reason to actually set up a frame here. If we crawl out of the
+ // Finalize() method on this thread, we will see FRAME_TOP which indicates
+ // that the crawl should terminate. This is analogous to how KickOffThread()
+ // starts new threads in the runtime.
+ PCODE funcPtr = pMT->GetRestoredSlot(g_pObjectFinalizerMD->GetSlot());
+
+#ifdef STRESS_LOG
+ if (fCriticalFinalizer)
+ {
+ STRESS_LOG2(LF_GCALLOC, LL_INFO100, "Finalizing CriticalFinalizer %pM in domain %d\n",
+ pMT, GetAppDomain()->GetId().m_dwId);
+ }
+#endif
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+
+#ifdef DEBUGGING_SUPPORTED
+ if (CORDebuggerTraceCall())
+ g_pDebugInterface->TraceCall((const BYTE *) funcPtr);
+#endif // DEBUGGING_SUPPORTED
+
+ FastCallFinalize(obj, funcPtr, fCriticalFinalizer);
+
+#else // defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+
+ PREPARE_NONVIRTUAL_CALLSITE_USING_CODE(funcPtr);
+
+ DECLARE_ARGHOLDER_ARRAY(args, 1);
+
+ args[ARGNUM_0] = PTR_TO_ARGHOLDER(obj);
+
+ if (fCriticalFinalizer)
+ {
+ CRITICAL_CALLSITE;
+ }
+
+ CALL_MANAGED_METHOD_NORET(args);
+
+#endif // (defined(_TARGET_X86_) && defined(_TARGET_AMD64_)
+
+#ifdef STRESS_LOG
+ if (fCriticalFinalizer)
+ {
+ STRESS_LOG2(LF_GCALLOC, LL_INFO100, "Finalized CriticalFinalizer %pM in domain %d without exception\n",
+ pMT, GetAppDomain()->GetId().m_dwId);
+ }
+#endif
+}
+
+//==========================================================================
+// If the MethodTable doesn't yet know the Exposed class that represents it via
+// Reflection, acquire that class now. Regardless, return it to the caller.
+//==========================================================================
+OBJECTREF MethodTable::GetManagedClassObject()
+{
+ CONTRACT(OBJECTREF) {
+
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(!IsTransparentProxy() && !IsArray()); // Arrays and remoted objects can't go through this path.
+ POSTCONDITION(GetWriteableData()->m_hExposedClassObject != 0);
+ //REENTRANT
+ }
+ CONTRACT_END;
+
+#ifdef _DEBUG
+ // Force a GC here because GetManagedClassObject could trigger GC nondeterminsticaly
+ GCStress<cfg_any, PulseGcTriggerPolicy>::MaybeTrigger();
+#endif // _DEBUG
+
+ if (GetWriteableData()->m_hExposedClassObject == NULL)
+ {
+ // Make sure that we have been restored
+ CheckRestore();
+
+ if (IsTransparentProxy()) // Extra protection in a retail build against doing this on a transparent proxy.
+ return NULL;
+
+ REFLECTCLASSBASEREF refClass = NULL;
+ GCPROTECT_BEGIN(refClass);
+ if (GetAssembly()->IsIntrospectionOnly())
+ refClass = (REFLECTCLASSBASEREF) AllocateObject(MscorlibBinder::GetClass(CLASS__CLASS_INTROSPECTION_ONLY));
+ else
+ refClass = (REFLECTCLASSBASEREF) AllocateObject(g_pRuntimeTypeClass);
+
+ LoaderAllocator *pLoaderAllocator = GetLoaderAllocator();
+
+ ((ReflectClassBaseObject*)OBJECTREFToObject(refClass))->SetType(TypeHandle(this));
+ ((ReflectClassBaseObject*)OBJECTREFToObject(refClass))->SetKeepAlive(pLoaderAllocator->GetExposedObject());
+
+ // Let all threads fight over who wins using InterlockedCompareExchange.
+ // Only the winner can set m_ExposedClassObject from NULL.
+ LOADERHANDLE exposedClassObjectHandle = pLoaderAllocator->AllocateHandle(refClass);
+
+ if (FastInterlockCompareExchangePointer(&(EnsureWritablePages(GetWriteableDataForWrite())->m_hExposedClassObject), exposedClassObjectHandle, static_cast<LOADERHANDLE>(NULL)))
+ {
+ pLoaderAllocator->ClearHandle(exposedClassObjectHandle);
+ }
+
+ GCPROTECT_END();
+ }
+ RETURN(GetManagedClassObjectIfExists());
+}
+
+#endif //!DACCESS_COMPILE && !CROSSGEN_COMPILE
+
+//==========================================================================================
+// This needs to stay consistent with AllocateNewMT() and MethodTable::Save()
+//
+// <TODO> protect this via some asserts as we've had one hard-to-track-down
+// bug already </TODO>
+//
+void MethodTable::GetSavedExtent(TADDR *pStart, TADDR *pEnd)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ TADDR start;
+
+ if (ContainsPointersOrCollectible())
+ start = dac_cast<TADDR>(this) - CGCDesc::GetCGCDescFromMT(this)->GetSize();
+ else
+ start = dac_cast<TADDR>(this);
+
+ TADDR end = dac_cast<TADDR>(this) + GetEndOffsetOfOptionalMembers();
+
+ _ASSERTE(start && end && (start < end));
+ *pStart = start;
+ *pEnd = end;
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+
+#ifndef DACCESS_COMPILE
+
+BOOL MethodTable::CanInternVtableChunk(DataImage *image, VtableIndirectionSlotIterator it)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(IsCompilationProcess());
+
+ BOOL canBeSharedWith = TRUE;
+
+ // We allow full sharing except that which would break MethodTable::Fixup -- when the slots are Fixup'd
+ // we need to ensure that regardless of who is doing the Fixup the same target is decided on.
+ // Note that if this requirement is not met, an assert will fire in ZapStoredStructure::Save
+
+ if (GetFlag(enum_flag_NotInPZM))
+ {
+ canBeSharedWith = FALSE;
+ }
+
+ if (canBeSharedWith)
+ {
+ for (DWORD slotNumber = it.GetStartSlot(); slotNumber < it.GetEndSlot(); slotNumber++)
+ {
+ MethodDesc *pMD = GetMethodDescForSlot(slotNumber);
+ _ASSERTE(pMD != NULL);
+ pMD->CheckRestore();
+
+ if (!image->CanEagerBindToMethodDesc(pMD))
+ {
+ canBeSharedWith = FALSE;
+ break;
+ }
+ }
+ }
+
+ return canBeSharedWith;
+}
+
+//==========================================================================================
+void MethodTable::PrepopulateDictionary(DataImage * image, BOOL nonExpansive)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (GetDictionary())
+ {
+ // We can only save elements of the dictionary if we are sure of its
+ // layout, which means we must be either tightly-knit to the EEClass
+ // (i.e. be the owner of the EEClass) or else we can hard-bind to the EEClass.
+ // There's no point in prepopulating the dictionary if we can't save the entries.
+ //
+ // This corresponds to the canSaveSlots which we pass to the Dictionary::Fixup
+
+ if (!IsCanonicalMethodTable() && image->CanEagerBindToMethodTable(GetCanonicalMethodTable()))
+ {
+ LOG((LF_JIT, LL_INFO10000, "GENERICS: Prepopulating dictionary for MT %s\n", GetDebugClassName()));
+ GetDictionary()->PrepopulateDictionary(NULL, this, nonExpansive);
+ }
+ }
+}
+
+//==========================================================================================
+void ModuleCtorInfo::AddElement(MethodTable *pMethodTable)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Get the values for the new entry before we update the
+ // cache in the Module
+
+ // Expand the table if needed. No lock is needed because this is at NGEN time
+ if (numElements >= numLastAllocated)
+ {
+ _ASSERTE(numElements == numLastAllocated);
+
+ MethodTable ** ppOldMTEntries = ppMT;
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:22011) // Suppress PREFast warning about integer overflows or underflows
+#endif // _PREFAST_
+ DWORD numNewAllocated = max(2 * numLastAllocated, MODULE_CTOR_ELEMENTS);
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif // _PREFAST_
+
+ ppMT = new MethodTable* [numNewAllocated];
+
+ _ASSERTE(ppMT);
+
+ memcpy(ppMT, ppOldMTEntries, sizeof(MethodTable *) * numLastAllocated);
+ memset(ppMT + numLastAllocated, 0, sizeof(MethodTable *) * (numNewAllocated - numLastAllocated));
+
+ delete[] ppOldMTEntries;
+
+ numLastAllocated = numNewAllocated;
+ }
+
+ // Assign the new entry
+ //
+ // Note the use of two "parallel" arrays. We do this to keep the workingset smaller since we
+ // often search (in GetClassCtorInfoIfExists) for a methodtable pointer but never actually find it.
+
+ ppMT[numElements] = pMethodTable;
+ numElements++;
+}
+
+//==========================================================================================
+void MethodTable::Save(DataImage *image, DWORD profilingFlags)
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+ PRECONDITION(IsRestored_NoLogging());
+ PRECONDITION(IsFullyLoaded());
+ PRECONDITION(image->GetModule()->GetAssembly() ==
+ GetAppDomain()->ToCompilationDomain()->GetTargetAssembly());
+ } CONTRACTL_END;
+
+ LOG((LF_ZAP, LL_INFO10000, "MethodTable::Save %s (%p)\n", GetDebugClassName(), this));
+
+ // Be careful about calling DictionaryLayout::Trim - strict conditions apply.
+ // See note on that method.
+ if (GetDictionary() &&
+ GetClass()->GetDictionaryLayout() &&
+ image->CanEagerBindToMethodTable(GetCanonicalMethodTable()))
+ {
+ GetClass()->GetDictionaryLayout()->Trim();
+ }
+
+ // Set the "restore" flags. They may not have been set yet.
+ // We don't need the return value of this call.
+ NeedsRestore(image);
+
+ //check if this is actually in the PZM
+ if (Module::GetPreferredZapModuleForMethodTable(this) != GetLoaderModule())
+ {
+ _ASSERTE(!IsStringOrArray());
+ SetFlag(enum_flag_NotInPZM);
+ }
+
+ // Set the IsStructMarshallable Bit
+ if (::IsStructMarshalable(this))
+ {
+ SetStructMarshalable();
+ }
+
+ TADDR start, end;
+
+ GetSavedExtent(&start, &end);
+
+#ifdef FEATURE_COMINTEROP
+ if (HasGuidInfo())
+ {
+ // Make sure our GUID is computed
+
+ // Generic WinRT types can have their GUID computed only if the instantiation is WinRT-legal
+ if (IsLegalNonArrayWinRTType())
+ {
+ GUID dummy;
+ if (SUCCEEDED(GetGuidNoThrow(&dummy, TRUE, FALSE)))
+ {
+ GuidInfo* pGuidInfo = GetGuidInfo();
+ _ASSERTE(pGuidInfo != NULL);
+
+ image->StoreStructure(pGuidInfo,
+ sizeof(GuidInfo),
+ DataImage::ITEM_GUID_INFO);
+
+ Module *pModule = GetModule();
+ if (pModule->CanCacheWinRTTypeByGuid(this))
+ {
+ pModule->CacheWinRTTypeByGuid(this, pGuidInfo);
+ }
+ }
+ else
+ {
+ GuidInfo** ppGuidInfo = GetGuidInfoPtr();
+ *ppGuidInfo = NULL;
+ }
+ }
+ }
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_REMOTING
+ if (HasRemotableMethodInfo())
+ {
+ if (GetNumMethods() > 0)
+ {
+ // The CrossDomainOptimizationInfo was populated earlier in Module::PrepareTypesForSave
+ CrossDomainOptimizationInfo* pRMI = GetRemotableMethodInfo();
+ SIZE_T sizeToBeSaved = CrossDomainOptimizationInfo::SizeOf(this);
+ image->StoreStructure(pRMI, sizeToBeSaved,
+ DataImage::ITEM_CROSS_DOMAIN_INFO);
+ }
+ }
+
+ // Store any optional VTS (Version Tolerant Serialization) info.
+ if (HasRemotingVtsInfo())
+ image->StoreStructure(GetRemotingVtsInfo(),
+ RemotingVtsInfo::GetSize(GetNumIntroducedInstanceFields()),
+ DataImage::ITEM_VTS_INFO);
+#endif //FEATURE_REMOTING
+
+#ifdef _DEBUG
+ if (GetDebugClassName() != NULL && !image->IsStored(GetDebugClassName()))
+ image->StoreStructure(debug_m_szClassName, (ULONG)(strlen(GetDebugClassName())+1),
+ DataImage::ITEM_DEBUG,
+ 1);
+#endif // _DEBUG
+
+ DataImage::ItemKind kindBasic = DataImage::ITEM_METHOD_TABLE;
+ if (IsWriteable())
+ kindBasic = DataImage::ITEM_METHOD_TABLE_SPECIAL_WRITEABLE;
+
+ ZapStoredStructure * pMTNode = image->StoreStructure((void*) start, (ULONG)(end - start), kindBasic);
+
+ if ((void *)this != (void *)start)
+ image->BindPointer(this, pMTNode, (BYTE *)this - (BYTE *)start);
+
+ // Store the vtable chunks
+ VtableIndirectionSlotIterator it = IterateVtableIndirectionSlots();
+ while (it.Next())
+ {
+ if (!image->IsStored(it.GetIndirectionSlot()))
+ {
+ if (CanInternVtableChunk(image, it))
+ image->StoreInternedStructure(it.GetIndirectionSlot(), it.GetSize(), DataImage::ITEM_VTABLE_CHUNK);
+ else
+ image->StoreStructure(it.GetIndirectionSlot(), it.GetSize(), DataImage::ITEM_VTABLE_CHUNK);
+ }
+ else
+ {
+ // Tell the interning system that we have already shared this structure without its help
+ image->NoteReusedStructure(it.GetIndirectionSlot());
+ }
+ }
+
+ if (HasNonVirtualSlotsArray())
+ {
+ image->StoreStructure(GetNonVirtualSlotsArray(), GetNonVirtualSlotsArraySize(), DataImage::ITEM_VTABLE_CHUNK);
+ }
+
+ if (HasInterfaceMap())
+ {
+#ifdef FEATURE_COMINTEROP
+ // Dynamic interface maps have an additional DWORD_PTR preceding the InterfaceInfo_t array
+ if (HasDynamicInterfaceMap())
+ {
+ ZapStoredStructure * pInterfaceMapNode = image->StoreInternedStructure(((DWORD_PTR *)GetInterfaceMap()) - 1,
+ GetInterfaceMapSize(),
+ DataImage::ITEM_INTERFACE_MAP);
+
+ image->BindPointer(GetInterfaceMap(), pInterfaceMapNode, sizeof(DWORD_PTR));
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ image->StoreInternedStructure(GetInterfaceMap(), GetInterfaceMapSize(), DataImage::ITEM_INTERFACE_MAP);
+ }
+
+ SaveExtraInterfaceInfo(image);
+ }
+
+ // If we have a dispatch map, save it.
+ if (HasDispatchMapSlot())
+ {
+ GetDispatchMap()->Save(image);
+ }
+
+ if (HasPerInstInfo())
+ {
+ ZapStoredStructure * pPerInstInfoNode;
+ if (CanEagerBindToParentDictionaries(image, NULL))
+ {
+ pPerInstInfoNode = image->StoreInternedStructure((BYTE *)GetPerInstInfo() - sizeof(GenericsDictInfo), GetPerInstInfoSize() + sizeof(GenericsDictInfo), DataImage::ITEM_DICTIONARY);
+ }
+ else
+ {
+ pPerInstInfoNode = image->StoreStructure((BYTE *)GetPerInstInfo() - sizeof(GenericsDictInfo), GetPerInstInfoSize() + sizeof(GenericsDictInfo), DataImage::ITEM_DICTIONARY_WRITEABLE);
+ }
+ image->BindPointer(GetPerInstInfo(), pPerInstInfoNode, sizeof(GenericsDictInfo));
+ }
+
+ Dictionary * pDictionary = GetDictionary();
+ if (pDictionary != NULL)
+ {
+ BOOL fIsWriteable;
+
+ if (!IsCanonicalMethodTable())
+ {
+ // CanEagerBindToMethodTable would not work for targeted patching here. The dictionary
+ // layout is sensitive to compilation order that can be changed by TP compatible changes.
+ BOOL canSaveSlots = (image->GetModule() == GetCanonicalMethodTable()->GetLoaderModule());
+
+ fIsWriteable = pDictionary->IsWriteable(image, canSaveSlots,
+ GetNumGenericArgs(),
+ GetModule(),
+ GetClass()->GetDictionaryLayout());
+ }
+ else
+ {
+ fIsWriteable = FALSE;
+ }
+
+
+ if (!fIsWriteable)
+ {
+ image->StoreInternedStructure(pDictionary, GetInstAndDictSize(), DataImage::ITEM_DICTIONARY);
+ }
+ else
+ {
+ image->StoreStructure(pDictionary, GetInstAndDictSize(), DataImage::ITEM_DICTIONARY_WRITEABLE);
+ }
+ }
+
+ WORD numStaticFields = GetClass()->GetNumStaticFields();
+
+ if (!IsCanonicalMethodTable() && HasGenericsStaticsInfo() && numStaticFields != 0)
+ {
+ FieldDesc * pGenericsFieldDescs = GetGenericsStaticFieldDescs();
+
+ for (DWORD i = 0; i < numStaticFields; i++)
+ {
+ FieldDesc *pFld = pGenericsFieldDescs + i;
+ pFld->PrecomputeNameHash();
+ }
+
+ ZapStoredStructure * pFDNode = image->StoreStructure(pGenericsFieldDescs, sizeof(FieldDesc) * numStaticFields,
+ DataImage::ITEM_GENERICS_STATIC_FIELDDESCS);
+
+ for (DWORD i = 0; i < numStaticFields; i++)
+ {
+ FieldDesc *pFld = pGenericsFieldDescs + i;
+ pFld->SaveContents(image);
+ if (pFld != pGenericsFieldDescs)
+ image->BindPointer(pFld, pFDNode, (BYTE *)pFld - (BYTE *)pGenericsFieldDescs);
+ }
+ }
+
+ // Allocate a ModuleCtorInfo entry in the NGEN image if necessary
+ if (HasBoxedRegularStatics())
+ {
+ image->GetModule()->GetZapModuleCtorInfo()->AddElement(this);
+ }
+
+ // MethodTable WriteableData
+
+#ifdef FEATURE_REMOTING
+ // Store any context static info.
+ if (HasContextStatics())
+ {
+ DataImage::ItemKind kindWriteable = DataImage::ITEM_METHOD_TABLE_DATA_COLD_WRITEABLE;
+ if ((profilingFlags & (1 << WriteMethodTableWriteableData)) != 0)
+ kindWriteable = DataImage::ITEM_METHOD_TABLE_DATA_HOT_WRITEABLE;
+
+ image->StoreStructure(GetContextStaticsBucket(),
+ sizeof(ContextStaticsBucket),
+ kindWriteable);
+ }
+#endif // FEATURE_REMOTING
+
+ PTR_Const_MethodTableWriteableData pWriteableData = GetWriteableData_NoLogging();
+ _ASSERTE(pWriteableData != NULL);
+ if (pWriteableData != NULL)
+ {
+ pWriteableData->Save(image, this, profilingFlags);
+ }
+
+ LOG((LF_ZAP, LL_INFO10000, "MethodTable::Save %s (%p) complete.\n", GetDebugClassName(), this));
+
+ // Save the EEClass at the same time as the method table if this is the canonical method table
+ if (IsCanonicalMethodTable())
+ GetClass()->Save(image, this);
+} // MethodTable::Save
+
+//==========================================================================
+// The NeedsRestore Computation.
+//
+// WARNING: The NeedsRestore predicate on MethodTable and EEClass
+// MUST be computable immediately after we have loaded a type.
+// It must NOT depend on any additions or changes made to the
+// MethodTable as a result of compiling code, or
+// later steps such as prepopulating dictionaries.
+//==========================================================================
+BOOL MethodTable::ComputeNeedsRestore(DataImage *image, TypeHandleList *pVisited)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ // See comment in ComputeNeedsRestoreWorker
+ PRECONDITION(GetLoaderModule()->HasNativeImage() || GetLoaderModule() == GetAppDomain()->ToCompilationDomain()->GetTargetModule());
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(GetAppDomain()->IsCompilationDomain()); // only used at ngen time!
+
+ if (GetWriteableData()->IsNeedsRestoreCached())
+ {
+ return GetWriteableData()->GetCachedNeedsRestore();
+ }
+
+ // We may speculatively assume that any types we've visited on this run of
+ // the ComputeNeedsRestore algorithm don't need a restore. If they
+ // do need a restore then we will check that when we first visit that method
+ // table.
+ if (TypeHandleList::Exists(pVisited, TypeHandle(this)))
+ {
+ pVisited->MarkBrokenCycle(this);
+ return FALSE;
+ }
+ TypeHandleList newVisited(this, pVisited);
+
+ BOOL needsRestore = ComputeNeedsRestoreWorker(image, &newVisited);
+
+ // Cache the results of running the algorithm.
+ // We can only cache the result if we have not speculatively assumed
+ // that any types are not NeedsRestore
+ if (!newVisited.HasBrokenCycleMark())
+ {
+ GetWriteableDataForWrite()->SetCachedNeedsRestore(needsRestore);
+ }
+ else
+ {
+ _ASSERTE(pVisited != NULL);
+ }
+ return needsRestore;
+}
+
+//==========================================================================================
+BOOL MethodTable::ComputeNeedsRestoreWorker(DataImage *image, TypeHandleList *pVisited)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef _DEBUG
+ // You should only call ComputeNeedsRestoreWorker on things being saved into
+ // the current LoaderModule - the NeedsRestore flag should have been computed
+ // for all items from NGEN images, and we should never compute NeedsRestore
+ // on anything that is not related to an NGEN image. If this fails then
+ // there is probably a CanEagerBindTo check missing as we trace through a
+ // pointer from one data structure to another.
+ // Trace back on the call stack and work out where this condition first fails.
+
+ Module* myModule = GetLoaderModule();
+ AppDomain* myAppDomain = GetAppDomain();
+ CompilationDomain* myCompilationDomain = myAppDomain->ToCompilationDomain();
+ Module* myCompilationModule = myCompilationDomain->GetTargetModule();
+
+ if (myModule != myCompilationModule)
+ {
+ _ASSERTE(!"You should only call ComputeNeedsRestoreWorker on things being saved into the current LoaderModule");
+ }
+#endif
+
+ if (g_CorCompileVerboseLevel == CORCOMPILE_VERBOSE)
+ {
+ DefineFullyQualifiedNameForClass();
+ LPCUTF8 name = GetFullyQualifiedNameForClass(this);
+ printf ("MethodTable %s needs restore? ", name);
+ }
+ if (g_CorCompileVerboseLevel >= CORCOMPILE_STATS && GetModule()->GetNgenStats())
+ GetModule()->GetNgenStats()->MethodTableRestoreNumReasons[TotalMethodTables]++;
+
+ #define UPDATE_RESTORE_REASON(c) \
+ if (g_CorCompileVerboseLevel == CORCOMPILE_VERBOSE) \
+ printf ("Yes, " #c " \n"); \
+ if (g_CorCompileVerboseLevel >= CORCOMPILE_STATS && GetModule()->GetNgenStats()) \
+ GetModule()->GetNgenStats()->MethodTableRestoreNumReasons[c]++;
+
+ // The special method table for IL stubs has to be prerestored. Restore is not able to handle it
+ // because of it does not have a token. In particular, this is a problem for /profiling native images.
+ if (this == image->GetModule()->GetILStubCache()->GetStubMethodTable())
+ {
+ return FALSE;
+ }
+
+ // When profiling, we always want to perform the restore.
+ if (GetAppDomain()->ToCompilationDomain()->m_fForceProfiling)
+ {
+ UPDATE_RESTORE_REASON(ProfilingEnabled);
+ return TRUE;
+ }
+
+ if (DependsOnEquivalentOrForwardedStructs())
+ {
+ UPDATE_RESTORE_REASON(ComImportStructDependenciesNeedRestore);
+ return TRUE;
+ }
+
+ if (!IsCanonicalMethodTable() && !image->CanPrerestoreEagerBindToMethodTable(GetCanonicalMethodTable(), pVisited))
+ {
+ UPDATE_RESTORE_REASON(CanNotPreRestoreHardBindToCanonicalMethodTable);
+ return TRUE;
+ }
+
+ if (!image->CanEagerBindToModule(GetModule()))
+ {
+ UPDATE_RESTORE_REASON(CrossAssembly);
+ return TRUE;
+ }
+
+ if (GetParentMethodTable())
+ {
+ if (!image->CanPrerestoreEagerBindToMethodTable(GetParentMethodTable(), pVisited))
+ {
+ UPDATE_RESTORE_REASON(CanNotPreRestoreHardBindToParentMethodTable);
+ return TRUE;
+ }
+ }
+
+ // Check per-inst pointers-to-dictionaries.
+ if (!CanEagerBindToParentDictionaries(image, pVisited))
+ {
+ UPDATE_RESTORE_REASON(CanNotHardBindToInstanceMethodTableChain);
+ return TRUE;
+ }
+
+ // Now check if the dictionary (if any) owned by this methodtable needs a restore.
+ if (GetDictionary())
+ {
+ if (GetDictionary()->ComputeNeedsRestore(image, pVisited, GetNumGenericArgs()))
+ {
+ UPDATE_RESTORE_REASON(GenericsDictionaryNeedsRestore);
+ return TRUE;
+ }
+ }
+
+ // The interface chain is traversed without doing CheckRestore's. Thus
+ // if any of the types in the inherited interfaces hierarchy need a restore
+ // or are cross-module pointers then this methodtable will also need a restore.
+ InterfaceMapIterator it = IterateInterfaceMap();
+ while (it.Next())
+ {
+ if (!image->CanPrerestoreEagerBindToMethodTable(it.GetInterface(), pVisited))
+ {
+ UPDATE_RESTORE_REASON(InterfaceIsGeneric);
+ return TRUE;
+ }
+ }
+
+ if (NeedsCrossModuleGenericsStaticsInfo())
+ {
+ UPDATE_RESTORE_REASON(CrossModuleGenericsStatics);
+ return TRUE;
+ }
+
+ if (IsArray())
+ {
+ if(!image->CanPrerestoreEagerBindToTypeHandle(GetApproxArrayElementTypeHandle(), pVisited))
+ {
+ UPDATE_RESTORE_REASON(ArrayElement);
+ return TRUE;
+ }
+ }
+
+ if (g_CorCompileVerboseLevel == CORCOMPILE_VERBOSE)
+ printf ("No \n");
+ return FALSE;
+}
+
+//==========================================================================================
+BOOL MethodTable::CanEagerBindToParentDictionaries(DataImage *image, TypeHandleList *pVisited)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodTable *pChain = GetParentMethodTable();
+ while (pChain != NULL)
+ {
+ // This is for the case were the method table contains a pointer to
+ // an inherited dictionary, e.g. given the case D : C, C : B<int>
+ // where B<int> is in another module then D contains a pointer to the
+ // dictionary for B<int>. Note that in this case we might still be
+ // able to hadbind to C.
+ if (pChain->HasInstantiation())
+ {
+ if (!image->CanEagerBindToMethodTable(pChain, FALSE, pVisited) ||
+ !image->CanHardBindToZapModule(pChain->GetLoaderModule()))
+ {
+ return FALSE;
+ }
+ }
+ pChain = pChain->GetParentMethodTable();
+ }
+ return TRUE;
+}
+
+//==========================================================================================
+BOOL MethodTable::NeedsCrossModuleGenericsStaticsInfo()
+{
+ STANDARD_VM_CONTRACT;
+
+ return HasGenericsStaticsInfo() && !ContainsGenericVariables() && !IsSharedByGenericInstantiations() &&
+ (Module::GetPreferredZapModuleForMethodTable(this) != GetLoaderModule());
+}
+
+//==========================================================================================
+BOOL MethodTable::IsWriteable()
+{
+ STANDARD_VM_CONTRACT;
+
+ // Overlapped method table is written into in hosted scenarios
+ // (see code:CorHost2::GetHostOverlappedExtensionSize)
+ if (MscorlibBinder::IsClass(this, CLASS__OVERLAPPEDDATA))
+ return TRUE;
+
+#ifdef FEATURE_COMINTEROP
+ // Dynamic expansion of interface map writes into method table
+ // (see code:MethodTable::AddDynamicInterface)
+ if (HasDynamicInterfaceMap())
+ return TRUE;
+
+ // CCW template is created lazily and when that happens, the
+ // pointer is written directly into the method table.
+ if (HasCCWTemplate())
+ return TRUE;
+
+ // RCW per-type data is created lazily at run-time.
+ if (HasRCWPerTypeData())
+ return TRUE;
+#endif
+
+ return FALSE;
+}
+
+//==========================================================================================
+// This is used when non-canonical (i.e. duplicated) method tables
+// attempt to bind to items logically belonging to an EEClass or MethodTable.
+// i.e. the contract map in the EEClass and the generic dictionary stored in the canonical
+// method table.
+//
+// We want to check if we can hard bind to the containing structure before
+// deciding to hardbind to the inside of it. This is because we may not be able
+// to hardbind to all EEClass and/or MethodTables even if they live in a hradbindable
+// target module. Thus we want to call CanEagerBindToMethodTable
+// to check we can hardbind to the containing structure.
+static
+void HardBindOrClearDictionaryPointer(DataImage *image, MethodTable *pMT, void * p, SSIZE_T offset)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (image->CanEagerBindToMethodTable(pMT) &&
+ image->CanHardBindToZapModule(pMT->GetLoaderModule()))
+ {
+ image->FixupPointerField(p, offset);
+ }
+ else
+ {
+ image->ZeroPointerField(p, offset);
+ }
+}
+
+//==========================================================================================
+void MethodTable::Fixup(DataImage *image)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(IsFullyLoaded());
+ }
+ CONTRACTL_END;
+
+ LOG((LF_ZAP, LL_INFO10000, "MethodTable::Fixup %s\n", GetDebugClassName()));
+
+ if (GetWriteableData()->IsFixedUp())
+ return;
+
+ BOOL needsRestore = NeedsRestore(image);
+ LOG((LF_ZAP, LL_INFO10000, "MethodTable::Fixup %s (%p), needsRestore=%d\n", GetDebugClassName(), this, needsRestore));
+
+ BOOL isCanonical = IsCanonicalMethodTable();
+
+ Module *pZapModule = image->GetModule();
+
+ MethodTable *pNewMT = (MethodTable *) image->GetImagePointer(this);
+
+ // For canonical method tables, the pointer to the EEClass is never encoded as a fixup
+ // even if this method table is not in its preferred zap module, i.e. the two are
+ // "tightly-bound".
+ if (IsCanonicalMethodTable())
+ {
+ // Pointer to EEClass
+ image->FixupPointerField(this, offsetof(MethodTable, m_pEEClass));
+ }
+ else
+ {
+ //
+ // Encode m_pEEClassOrCanonMT
+ //
+ MethodTable * pCanonMT = GetCanonicalMethodTable();
+
+ ZapNode * pImport = NULL;
+ if (image->CanEagerBindToMethodTable(pCanonMT))
+ {
+ if (image->CanHardBindToZapModule(pCanonMT->GetLoaderModule()))
+ {
+ // Pointer to canonical methodtable
+ image->FixupField(this, offsetof(MethodTable, m_pCanonMT), pCanonMT, UNION_METHODTABLE);
+ }
+ else
+ {
+ // Pointer to lazy bound indirection cell to canonical methodtable
+ pImport = image->GetTypeHandleImport(pCanonMT);
+ }
+ }
+ else
+ {
+ // Pointer to eager bound indirection cell to canonical methodtable
+ _ASSERTE(pCanonMT->IsTypicalTypeDefinition() ||
+ !pCanonMT->ContainsGenericVariables());
+ pImport = image->GetTypeHandleImport(pCanonMT);
+ }
+
+ if (pImport != NULL)
+ {
+ image->FixupFieldToNode(this, offsetof(MethodTable, m_pCanonMT), pImport, UNION_INDIRECTION);
+ }
+ }
+
+ image->FixupField(this, offsetof(MethodTable, m_pLoaderModule), pZapModule);
+
+#ifdef _DEBUG
+ image->FixupPointerField(this, offsetof(MethodTable, debug_m_szClassName));
+#endif // _DEBUG
+
+ MethodTable * pParentMT = GetParentMethodTable();
+ _ASSERTE(!pNewMT->GetFlag(enum_flag_HasIndirectParent));
+
+ if (pParentMT != NULL)
+ {
+ //
+ // Encode m_pParentMethodTable
+ //
+ ZapNode * pImport = NULL;
+ if (image->CanEagerBindToMethodTable(pParentMT))
+ {
+ if (image->CanHardBindToZapModule(pParentMT->GetLoaderModule()))
+ {
+ image->FixupPointerField(this, offsetof(MethodTable, m_pParentMethodTable));
+ }
+ else
+ {
+ pImport = image->GetTypeHandleImport(pParentMT);
+ }
+ }
+ else
+ {
+ if (!pParentMT->IsCanonicalMethodTable())
+ {
+#ifdef _DEBUG
+ IMDInternalImport *pInternalImport = GetModule()->GetMDImport();
+
+ mdToken crExtends;
+ pInternalImport->GetTypeDefProps(GetCl(),
+ NULL,
+ &crExtends);
+
+ _ASSERTE(TypeFromToken(crExtends) == mdtTypeSpec);
+#endif
+
+ // Use unique cell for now since we are first going to set the parent method table to
+ // approx one first, and then to the exact one later. This would mess up the shared cell.
+ // It would be nice to clean it up to use the shared cell - we should set the parent method table
+ // just once at the end.
+ pImport = image->GetTypeHandleImport(pParentMT, this /* pUniqueId */);
+ }
+ else
+ {
+ pImport = image->GetTypeHandleImport(pParentMT);
+ }
+ }
+
+ if (pImport != NULL)
+ {
+ image->FixupFieldToNode(this, offsetof(MethodTable, m_pParentMethodTable), pImport, -(SSIZE_T)offsetof(MethodTable, m_pParentMethodTable));
+ pNewMT->SetFlag(enum_flag_HasIndirectParent);
+ }
+ }
+
+ if (HasNonVirtualSlotsArray())
+ {
+ TADDR ppNonVirtualSlots = GetNonVirtualSlotsPtr();
+ PREFIX_ASSUME(ppNonVirtualSlots != NULL);
+ image->FixupRelativePointerField(this, (BYTE *)ppNonVirtualSlots - (BYTE *)this);
+ }
+
+ if (HasInterfaceMap())
+ {
+ image->FixupPointerField(this, offsetof(MethodTable, m_pMultipurposeSlot2));
+
+ FixupExtraInterfaceInfo(image);
+ }
+
+ _ASSERTE(GetWriteableData());
+ image->FixupPointerField(this, offsetof(MethodTable, m_pWriteableData));
+ m_pWriteableData->Fixup(image, this, needsRestore);
+
+#ifdef FEATURE_COMINTEROP
+ if (HasGuidInfo())
+ {
+ GuidInfo **ppGuidInfo = GetGuidInfoPtr();
+ if (*ppGuidInfo != NULL)
+ {
+ image->FixupPointerField(this, (BYTE *)ppGuidInfo - (BYTE *)this);
+ }
+ else
+ {
+ image->ZeroPointerField(this, (BYTE *)ppGuidInfo - (BYTE *)this);
+ }
+ }
+
+ if (HasCCWTemplate())
+ {
+ ComCallWrapperTemplate **ppTemplate = GetCCWTemplatePtr();
+ image->ZeroPointerField(this, (BYTE *)ppTemplate - (BYTE *)this);
+ }
+
+ if (HasRCWPerTypeData())
+ {
+ // it would be nice to save these but the impact on mscorlib.ni size is prohibitive
+ RCWPerTypeData **ppData = GetRCWPerTypeDataPtr();
+ image->ZeroPointerField(this, (BYTE *)ppData - (BYTE *)this);
+ }
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_REMOTING
+ if (HasRemotableMethodInfo())
+ {
+ CrossDomainOptimizationInfo **pRMI = GetRemotableMethodInfoPtr();
+ if (*pRMI != NULL)
+ {
+ image->FixupPointerField(this, (BYTE *)pRMI - (BYTE *)this);
+ }
+ }
+
+ // Optional VTS (Version Tolerant Serialization) fixups.
+ if (HasRemotingVtsInfo())
+ {
+ RemotingVtsInfo **ppVtsInfo = GetRemotingVtsInfoPtr();
+ image->FixupPointerField(this, (BYTE *)ppVtsInfo - (BYTE *)this);
+
+ RemotingVtsInfo *pVtsInfo = *ppVtsInfo;
+ for (DWORD i = 0; i < RemotingVtsInfo::VTS_NUM_CALLBACK_TYPES; i++)
+ image->FixupMethodDescPointer(pVtsInfo, &pVtsInfo->m_pCallbacks[i]);
+ }
+#endif //FEATURE_REMOTING
+
+ //
+ // Fix flags
+ //
+
+ _ASSERTE((pNewMT->GetFlag(enum_flag_IsZapped) == 0));
+ pNewMT->SetFlag(enum_flag_IsZapped);
+
+ _ASSERTE((pNewMT->GetFlag(enum_flag_IsPreRestored) == 0));
+ if (!needsRestore)
+ pNewMT->SetFlag(enum_flag_IsPreRestored);
+
+ //
+ // Fixup vtable
+ // If the canonical method table lives in a different loader module
+ // then just zero out the entries and copy them across from the canonical
+ // vtable on restore.
+ //
+ // Note the canonical method table will be the same as the current method table
+ // if the method table is not a generic instantiation.
+
+ if (HasDispatchMapSlot())
+ {
+ TADDR pSlot = GetMultipurposeSlotPtr(enum_flag_HasDispatchMapSlot, c_DispatchMapSlotOffsets);
+ DispatchMap * pDispatchMap = RelativePointer<PTR_DispatchMap>::GetValueAtPtr(pSlot);
+ image->FixupField(this, pSlot - (TADDR)this, pDispatchMap, 0, IMAGE_REL_BASED_RelativePointer);
+ pDispatchMap->Fixup(image);
+ }
+
+ if (HasModuleOverride())
+ {
+ image->FixupModulePointer(this, GetModuleOverridePtr());
+ }
+
+ {
+ VtableIndirectionSlotIterator it = IterateVtableIndirectionSlots();
+ while (it.Next())
+ {
+ image->FixupPointerField(this, it.GetOffsetFromMethodTable());
+ }
+ }
+
+ unsigned numVTableSlots = GetNumVtableSlots();
+ for (unsigned slotNumber = 0; slotNumber < numVTableSlots; slotNumber++)
+ {
+ //
+ // Find the method desc from the slot.
+ //
+ MethodDesc *pMD = GetMethodDescForSlot(slotNumber);
+ _ASSERTE(pMD != NULL);
+ pMD->CheckRestore();
+
+ PVOID slotBase;
+ SSIZE_T slotOffset;
+
+ if (slotNumber < GetNumVirtuals())
+ {
+ // Virtual slots live in chunks pointed to by vtable indirections
+
+ slotBase = (PVOID) GetVtableIndirections()[GetIndexOfVtableIndirection(slotNumber)];
+ slotOffset = GetIndexAfterVtableIndirection(slotNumber) * sizeof(PCODE);
+ }
+ else if (HasSingleNonVirtualSlot())
+ {
+ // Non-virtual slots < GetNumVtableSlots live in a single chunk pointed to by an optional member,
+ // except when there is only one in which case it lives in the optional member itself
+
+ _ASSERTE(slotNumber == GetNumVirtuals());
+ slotBase = (PVOID) this;
+ slotOffset = (BYTE *)GetSlotPtr(slotNumber) - (BYTE *)this;
+ }
+ else
+ {
+ // Non-virtual slots < GetNumVtableSlots live in a single chunk pointed to by an optional member
+
+ _ASSERTE(HasNonVirtualSlotsArray());
+ slotBase = (PVOID) GetNonVirtualSlotsArray();
+ slotOffset = (slotNumber - GetNumVirtuals()) * sizeof(PCODE);
+ }
+
+ // Attempt to make the slot point directly at the prejitted code.
+ // Note that changes to this logic may require or enable an update to CanInternVtableChunk.
+ // If a necessary update is not made, an assert will fire in ZapStoredStructure::Save.
+
+ if (pMD->GetMethodTable() == this)
+ {
+ ZapRelocationType relocType;
+ if (slotNumber >= GetNumVirtuals())
+ relocType = IMAGE_REL_BASED_RelativePointer;
+ else
+ relocType = IMAGE_REL_BASED_PTR;
+
+ pMD->FixupSlot(image, slotBase, slotOffset, relocType);
+ }
+ else
+ {
+
+#ifdef _DEBUG
+
+ // Static method should be in the owning methodtable only.
+ _ASSERTE(!pMD->IsStatic());
+
+ MethodTable *pSourceMT = isCanonical
+ ? GetParentMethodTable()
+ : GetCanonicalMethodTable();
+
+ // It must be inherited from the parent or copied from the canonical
+ _ASSERTE(pSourceMT->GetMethodDescForSlot(slotNumber) == pMD);
+#endif
+
+ if (image->CanEagerBindToMethodDesc(pMD) && pMD->GetLoaderModule() == pZapModule)
+ {
+ pMD->FixupSlot(image, slotBase, slotOffset);
+ }
+ else
+ {
+ if (!pMD->IsGenericMethodDefinition())
+ {
+ ZapNode * importThunk = image->GetVirtualImportThunk(pMD->GetMethodTable(), pMD, slotNumber);
+ // On ARM, make sure that the address to the virtual thunk that we write into the
+ // vtable "chunk" has the Thumb bit set.
+ image->FixupFieldToNode(slotBase, slotOffset, importThunk ARM_ARG(THUMB_CODE));
+ }
+ else
+ {
+ // Virtual generic methods don't/can't use their vtable slot
+ image->ZeroPointerField(slotBase, slotOffset);
+ }
+ }
+ }
+ }
+
+ //
+ // Fixup Interface map
+ //
+
+ InterfaceMapIterator it = IterateInterfaceMap();
+ while (it.Next())
+ {
+ image->FixupMethodTablePointer(GetInterfaceMap(), &it.GetInterfaceInfo()->m_pMethodTable);
+ }
+
+ if (IsArray())
+ {
+ image->HardBindTypeHandlePointer(this, offsetof(MethodTable, m_ElementTypeHnd));
+ }
+
+ //
+ // Fixup per-inst pointers for this method table
+ //
+
+ if (HasPerInstInfo())
+ {
+ // Fixup the pointer to the per-inst table
+ image->FixupPointerField(this, offsetof(MethodTable, m_pPerInstInfo));
+
+ for (MethodTable *pChain = this; pChain != NULL; pChain = pChain->GetParentMethodTable())
+ {
+ if (pChain->HasInstantiation())
+ {
+ DWORD dictNum = pChain->GetNumDicts()-1;
+
+ // If we can't hardbind then the value will be copied down from
+ // the parent upon restore.
+
+ // We special-case the dictionary for this method table because we must always
+ // hard bind to it even if it's not in its preferred zap module
+ if (pChain == this)
+ image->FixupPointerField(GetPerInstInfo(), dictNum * sizeof(Dictionary *));
+ else
+ HardBindOrClearDictionaryPointer(image, pChain, GetPerInstInfo(), dictNum * sizeof(Dictionary *));
+ }
+ }
+ }
+ //
+ // Fixup instantiation+dictionary for this method table (if any)
+ //
+ if (GetDictionary())
+ {
+ LOG((LF_JIT, LL_INFO10000, "GENERICS: Fixup dictionary for MT %s\n", GetDebugClassName()));
+
+ // CanEagerBindToMethodTable would not work for targeted patching here. The dictionary
+ // layout is sensitive to compilation order that can be changed by TP compatible changes.
+ BOOL canSaveSlots = !IsCanonicalMethodTable() && (image->GetModule() == GetCanonicalMethodTable()->GetLoaderModule());
+
+ // See comment on Dictionary::Fixup
+ GetDictionary()->Fixup(image,
+ TRUE,
+ canSaveSlots,
+ GetNumGenericArgs(),
+ GetModule(),
+ GetClass()->GetDictionaryLayout());
+ }
+
+ // Fixup per-inst statics info
+ if (HasGenericsStaticsInfo())
+ {
+ GenericsStaticsInfo *pInfo = GetGenericsStaticsInfo();
+
+ image->FixupPointerField(this, (BYTE *)&pInfo->m_pFieldDescs - (BYTE *)this);
+ if (!isCanonical)
+ {
+ for (DWORD i = 0; i < GetClass()->GetNumStaticFields(); i++)
+ {
+ FieldDesc *pFld = GetGenericsStaticFieldDescs() + i;
+ pFld->Fixup(image);
+ }
+ }
+
+ if (NeedsCrossModuleGenericsStaticsInfo())
+ {
+ MethodTableWriteableData * pNewWriteableData = (MethodTableWriteableData *)image->GetImagePointer(m_pWriteableData);
+ CrossModuleGenericsStaticsInfo * pNewCrossModuleGenericsStaticsInfo = pNewWriteableData->GetCrossModuleGenericsStaticsInfo();
+
+ pNewCrossModuleGenericsStaticsInfo->m_DynamicTypeID = pInfo->m_DynamicTypeID;
+
+ image->ZeroPointerField(m_pWriteableData, sizeof(MethodTableWriteableData) + offsetof(CrossModuleGenericsStaticsInfo, m_pModuleForStatics));
+
+ pNewMT->SetFlag(enum_flag_StaticsMask_IfGenericsThenCrossModule);
+ }
+ }
+ else
+ {
+ _ASSERTE(!NeedsCrossModuleGenericsStaticsInfo());
+ }
+
+#ifdef FEATURE_REMOTING
+ if (HasContextStatics())
+ {
+ ContextStaticsBucket **ppInfo = GetContextStaticsBucketPtr();
+ image->FixupPointerField(this, (BYTE *)ppInfo - (BYTE *)this);
+
+ ContextStaticsBucket *pNewInfo = (ContextStaticsBucket*)image->GetImagePointer(*ppInfo);
+ pNewInfo->m_dwContextStaticsOffset = (DWORD)-1;
+ }
+#endif // FEATURE_REMOTING
+
+ LOG((LF_ZAP, LL_INFO10000, "MethodTable::Fixup %s (%p) complete\n", GetDebugClassName(), this));
+
+ // If this method table is canonical (one-to-one with EEClass) then fix up the EEClass also
+ if (isCanonical)
+ GetClass()->Fixup(image, this);
+
+ // Mark method table as fixed-up
+ GetWriteableDataForWrite()->SetFixedUp();
+
+} // MethodTable::Fixup
+
+//==========================================================================================
+void MethodTableWriteableData::Save(DataImage *image, MethodTable *pMT, DWORD profilingFlags) const
+{
+ STANDARD_VM_CONTRACT;
+
+ SIZE_T size = sizeof(MethodTableWriteableData);
+
+ // MethodTableWriteableData is followed by optional CrossModuleGenericsStaticsInfo in NGen images
+ if (pMT->NeedsCrossModuleGenericsStaticsInfo())
+ size += sizeof(CrossModuleGenericsStaticsInfo);
+
+ DataImage::ItemKind kindWriteable = DataImage::ITEM_METHOD_TABLE_DATA_COLD_WRITEABLE;
+ if ((profilingFlags & (1 << WriteMethodTableWriteableData)) != 0)
+ kindWriteable = DataImage::ITEM_METHOD_TABLE_DATA_HOT_WRITEABLE;
+
+ ZapStoredStructure * pNode = image->StoreStructure(NULL, size, kindWriteable);
+ image->BindPointer(this, pNode, 0);
+ image->CopyData(pNode, this, sizeof(MethodTableWriteableData));
+}
+
+//==========================================================================================
+void MethodTableWriteableData::Fixup(DataImage *image, MethodTable *pMT, BOOL needsRestore)
+{
+ STANDARD_VM_CONTRACT;
+
+ image->ZeroField(this, offsetof(MethodTableWriteableData, m_hExposedClassObject), sizeof(m_hExposedClassObject));
+
+ MethodTableWriteableData *pNewNgenPrivateMT = (MethodTableWriteableData*) image->GetImagePointer(this);
+ _ASSERTE(pNewNgenPrivateMT != NULL);
+
+ pNewNgenPrivateMT->m_dwFlags &= ~(enum_flag_RemotingConfigChecked |
+ enum_flag_CriticalTypePrepared);
+
+ if (needsRestore)
+ pNewNgenPrivateMT->m_dwFlags |= (enum_flag_UnrestoredTypeKey |
+ enum_flag_Unrestored |
+ enum_flag_HasApproxParent |
+ enum_flag_IsNotFullyLoaded);
+
+#ifdef _DEBUG
+ pNewNgenPrivateMT->m_dwLastVerifedGCCnt = (DWORD)-1;
+#endif
+}
+
+#endif // !DACCESS_COMPILE
+
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+#ifdef FEATURE_PREJIT
+
+//==========================================================================================
+void MethodTable::CheckRestore()
+{
+ CONTRACTL
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ }
+ CONTRACTL_END
+
+ if (!IsFullyLoaded())
+ {
+ ClassLoader::EnsureLoaded(this);
+ _ASSERTE(IsFullyLoaded());
+ }
+
+ g_IBCLogger.LogMethodTableAccess(this);
+}
+
+#else // !FEATURE_PREJIT
+//==========================================================================================
+void MethodTable::CheckRestore()
+{
+ LIMITED_METHOD_CONTRACT;
+}
+#endif // !FEATURE_PREJIT
+
+
+#ifndef DACCESS_COMPILE
+
+BOOL SatisfiesClassConstraints(TypeHandle instanceTypeHnd, TypeHandle typicalTypeHnd,
+ const InstantiationContext *pInstContext);
+
+static VOID DoAccessibilityCheck(MethodTable *pAskingMT, MethodTable *pTargetMT, UINT resIDWhy, BOOL checkTargetTypeTransparency)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ StaticAccessCheckContext accessContext(NULL, pAskingMT);
+
+ if (!ClassLoader::CanAccessClass(&accessContext,
+ pTargetMT, //the desired class
+ pTargetMT->GetAssembly(), //the desired class's assembly
+ *AccessCheckOptions::s_pNormalAccessChecks,
+ checkTargetTypeTransparency
+ ))
+ {
+ SString displayName;
+ pAskingMT->GetAssembly()->GetDisplayName(displayName);
+ SString targetName;
+
+ // Error string is either E_ACCESSDENIED which requires the type name of the target, vs
+ // a more normal TypeLoadException which displays the requesting type.
+ _ASSERTE((resIDWhy == (UINT)E_ACCESSDENIED) || (resIDWhy == (UINT)IDS_CLASSLOAD_INTERFACE_NO_ACCESS));
+ TypeString::AppendType(targetName, TypeHandle((resIDWhy == (UINT)E_ACCESSDENIED) ? pTargetMT : pAskingMT));
+
+ COMPlusThrow(kTypeLoadException, resIDWhy, targetName.GetUnicode(), displayName.GetUnicode());
+ }
+
+}
+
+VOID DoAccessibilityCheckForConstraint(MethodTable *pAskingMT, TypeHandle thConstraint, UINT resIDWhy)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ if (thConstraint.IsTypeDesc())
+ {
+ TypeDesc *pTypeDesc = thConstraint.AsTypeDesc();
+
+ if (pTypeDesc->IsGenericVariable())
+ {
+ // since the metadata respresents a generic type param constraint as an index into
+ // the declaring type's list of generic params, it is structurally impossible
+ // to express a violation this way. So there's no check to be done here.
+ }
+ else
+ if (pTypeDesc->HasTypeParam())
+ {
+ DoAccessibilityCheckForConstraint(pAskingMT, pTypeDesc->GetTypeParam(), resIDWhy);
+ }
+ else
+ {
+ COMPlusThrow(kTypeLoadException, E_ACCESSDENIED);
+ }
+
+ }
+ else
+ {
+ DoAccessibilityCheck(pAskingMT, thConstraint.GetMethodTable(), resIDWhy, FALSE);
+ }
+
+}
+
+VOID DoAccessibilityCheckForConstraints(MethodTable *pAskingMT, TypeVarTypeDesc *pTyVar, UINT resIDWhy)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ DWORD numConstraints;
+ TypeHandle *pthConstraints = pTyVar->GetCachedConstraints(&numConstraints);
+ for (DWORD cidx = 0; cidx < numConstraints; cidx++)
+ {
+ TypeHandle thConstraint = pthConstraints[cidx];
+
+ DoAccessibilityCheckForConstraint(pAskingMT, thConstraint, resIDWhy);
+ }
+}
+
+
+// Recursive worker that pumps the transitive closure of a type's dependencies to the specified target level.
+// Dependencies include:
+//
+// - parent
+// - interfaces
+// - canonical type, for non-canonical instantiations
+// - typical type, for non-typical instantiations
+//
+// Parameters:
+//
+// pVisited - used to prevent endless recursion in the case of cyclic dependencies
+//
+// level - target level to pump to - must be CLASS_DEPENDENCIES_LOADED or CLASS_LOADED
+//
+// if CLASS_DEPENDENCIES_LOADED, all transitive dependencies are resolved to their
+// exact types.
+//
+// if CLASS_LOADED, all type-safety checks are done on the type and all its transitive
+// dependencies. Note that for the CLASS_LOADED case, some types may be left
+// on the pending list rather that pushed to CLASS_LOADED in the case of cyclic
+// dependencies - the root caller must handle this.
+//
+// pfBailed - if we or one of our depedencies bails early due to cyclic dependencies, we
+// must set *pfBailed to TRUE. Otherwise, we must *leave it unchanged* (thus, the
+// boolean acts as a cumulative OR.)
+//
+// pPending - if one of our dependencies bailed, the type cannot yet be promoted to CLASS_LOADED
+// as the dependencies will be checked later and may fail a security check then.
+// Instead, DoFullyLoad() will add the type to the pending list - the root caller
+// is responsible for promoting the type after the full transitive closure has been
+// walked. Note that it would be just as correct to always defer to the pending list -
+// however, that is a little less performant.
+//
+// pInstContext - instantiation context created in code:SigPointer.GetTypeHandleThrowing and
+// ultimately passed down to code:TypeVarTypeDesc.SatisfiesConstraints.
+//
+
+
+// Closure of locals necessary for implementing CheckForEquivalenceAndFullyLoadType.
+// Used so that we can have one valuetype walking algorithm used for type equivalence walking of the parameters of the method.
+struct DoFullyLoadLocals
+{
+ DoFullyLoadLocals(DFLPendingList *pPendingParam, ClassLoadLevel levelParam, MethodTable *pMT, Generics::RecursionGraph *pVisited) :
+ newVisited(pVisited, TypeHandle(pMT)),
+ pPending(pPendingParam),
+ pInstContext(pInstContext),
+ level(levelParam),
+ fBailed(FALSE)
+#ifdef FEATURE_COMINTEROP
+ , fHasEquivalentStructParameter(FALSE)
+#endif
+ , fHasTypeForwarderDependentStructParameter(FALSE)
+ , fDependsOnEquivalentOrForwardedStructs(FALSE)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ Generics::RecursionGraph newVisited;
+ DFLPendingList * const pPending;
+ const InstantiationContext * const pInstContext;
+ const ClassLoadLevel level;
+ BOOL fBailed;
+#ifdef FEATURE_COMINTEROP
+ BOOL fHasEquivalentStructParameter;
+#endif
+ BOOL fHasTypeForwarderDependentStructParameter;
+ BOOL fDependsOnEquivalentOrForwardedStructs;
+};
+
+#if defined(FEATURE_TYPEEQUIVALENCE) && !defined(DACCESS_COMPILE)
+static void CheckForEquivalenceAndFullyLoadType(Module *pModule, mdToken token, Module *pDefModule, mdToken defToken, const SigParser *ptr, SigTypeContext *pTypeContext, void *pData)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ SigPointer sigPtr(*ptr);
+
+ DoFullyLoadLocals *pLocals = (DoFullyLoadLocals *)pData;
+
+ if (IsTypeDefEquivalent(defToken, pDefModule))
+ {
+ TypeHandle th = sigPtr.GetTypeHandleThrowing(pModule, pTypeContext, ClassLoader::LoadTypes, (ClassLoadLevel)(pLocals->level - 1));
+ CONSISTENCY_CHECK(!th.IsNull());
+
+ th.DoFullyLoad(&pLocals->newVisited, pLocals->level, pLocals->pPending, &pLocals->fBailed, pLocals->pInstContext);
+ pLocals->fDependsOnEquivalentOrForwardedStructs = TRUE;
+ pLocals->fHasEquivalentStructParameter = TRUE;
+ }
+}
+
+#endif // defined(FEATURE_TYPEEQUIVALENCE) && !defined(DACCESS_COMPILE)
+
+struct CheckForTypeForwardedTypeRefParameterLocals
+{
+ Module * pModule;
+ BOOL * pfTypeForwarderFound;
+};
+
+// Callback for code:WalkValueTypeTypeDefOrRefs of type code:PFN_WalkValueTypeTypeDefOrRefs
+static void CheckForTypeForwardedTypeRef(
+ mdToken tkTypeDefOrRef,
+ void * pData)
+{
+ STANDARD_VM_CONTRACT;
+
+ CheckForTypeForwardedTypeRefParameterLocals * pLocals = (CheckForTypeForwardedTypeRefParameterLocals *)pData;
+
+ // If a type forwarder was found, return - we're done
+ if ((pLocals->pfTypeForwarderFound != NULL) && (*(pLocals->pfTypeForwarderFound)))
+ return;
+
+ // Only type ref's are interesting
+ if (TypeFromToken(tkTypeDefOrRef) == mdtTypeRef)
+ {
+ Module * pDummyModule;
+ mdToken tkDummy;
+ ClassLoader::ResolveTokenToTypeDefThrowing(
+ pLocals->pModule,
+ tkTypeDefOrRef,
+ &pDummyModule,
+ &tkDummy,
+ Loader::Load,
+ pLocals->pfTypeForwarderFound);
+ }
+}
+
+typedef void (* PFN_WalkValueTypeTypeDefOrRefs)(mdToken tkTypeDefOrRef, void * pData);
+
+// Call 'function' for ValueType in the signature.
+void WalkValueTypeTypeDefOrRefs(
+ const SigParser * pSig,
+ PFN_WalkValueTypeTypeDefOrRefs function,
+ void * pData)
+{
+ STANDARD_VM_CONTRACT;
+
+ SigParser sig(*pSig);
+
+ CorElementType typ;
+ IfFailThrow(sig.GetElemType(&typ));
+
+ switch (typ)
+ {
+ case ELEMENT_TYPE_VALUETYPE:
+ mdToken token;
+ IfFailThrow(sig.GetToken(&token));
+ function(token, pData);
+ break;
+
+ case ELEMENT_TYPE_GENERICINST:
+ // Process and skip generic type
+ WalkValueTypeTypeDefOrRefs(&sig, function, pData);
+ IfFailThrow(sig.SkipExactlyOne());
+
+ // Get number of parameters
+ ULONG argCnt;
+ IfFailThrow(sig.GetData(&argCnt));
+ while (argCnt-- != 0)
+ { // Process and skip generic parameter
+ WalkValueTypeTypeDefOrRefs(&sig, function, pData);
+ IfFailThrow(sig.SkipExactlyOne());
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+// Callback for code:MethodDesc::WalkValueTypeParameters (of type code:WalkValueTypeParameterFnPtr)
+static void CheckForTypeForwardedTypeRefParameter(
+ Module * pModule,
+ mdToken token,
+ Module * pDefModule,
+ mdToken defToken,
+ const SigParser *ptr,
+ SigTypeContext * pTypeContext,
+ void * pData)
+{
+ STANDARD_VM_CONTRACT;
+
+ DoFullyLoadLocals * pLocals = (DoFullyLoadLocals *)pData;
+
+ // If a type forwarder was found, return - we're done
+ if (pLocals->fHasTypeForwarderDependentStructParameter)
+ return;
+
+ CheckForTypeForwardedTypeRefParameterLocals locals;
+ locals.pModule = pModule;
+ locals.pfTypeForwarderFound = &pLocals->fHasTypeForwarderDependentStructParameter; // By not passing NULL here, we determine if there is a type forwarder involved.
+
+ WalkValueTypeTypeDefOrRefs(ptr, CheckForTypeForwardedTypeRef, &locals);
+
+ if (pLocals->fHasTypeForwarderDependentStructParameter)
+ pLocals->fDependsOnEquivalentOrForwardedStructs = TRUE;
+}
+
+// Callback for code:MethodDesc::WalkValueTypeParameters (of type code:WalkValueTypeParameterFnPtr)
+static void LoadTypeDefOrRefAssembly(
+ Module * pModule,
+ mdToken token,
+ Module * pDefModule,
+ mdToken defToken,
+ const SigParser *ptr,
+ SigTypeContext * pTypeContext,
+ void * pData)
+{
+ STANDARD_VM_CONTRACT;
+
+ DoFullyLoadLocals * pLocals = (DoFullyLoadLocals *)pData;
+
+ CheckForTypeForwardedTypeRefParameterLocals locals;
+ locals.pModule = pModule;
+ locals.pfTypeForwarderFound = NULL; // By passing NULL here, we simply resolve the token to TypeDef.
+
+ WalkValueTypeTypeDefOrRefs(ptr, CheckForTypeForwardedTypeRef, &locals);
+}
+
+#endif //!DACCESS_COMPILE
+
+void MethodTable::DoFullyLoad(Generics::RecursionGraph * const pVisited, const ClassLoadLevel level, DFLPendingList * const pPending,
+ BOOL * const pfBailed, const InstantiationContext * const pInstContext)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(level == CLASS_LOADED || level == CLASS_DEPENDENCIES_LOADED);
+ _ASSERTE(pfBailed != NULL);
+ _ASSERTE(!(level == CLASS_LOADED && pPending == NULL));
+
+
+#ifndef DACCESS_COMPILE
+
+ if (Generics::RecursionGraph::HasSeenType(pVisited, TypeHandle(this)))
+ {
+ *pfBailed = TRUE;
+ return;
+ }
+
+ if (GetLoadLevel() >= level)
+ {
+ return;
+ }
+
+ if (level == CLASS_LOADED)
+ {
+ UINT numTH = pPending->Count();
+ TypeHandle *pTypeHndPending = pPending->Table();
+ for (UINT idxPending = 0; idxPending < numTH; idxPending++)
+ {
+ if (pTypeHndPending[idxPending] == this)
+ {
+ *pfBailed = TRUE;
+ return;
+ }
+ }
+
+ }
+
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+ // First ensure that we're loaded to just below CLASS_DEPENDENCIES_LOADED
+ ClassLoader::EnsureLoaded(this, (ClassLoadLevel) (level-1));
+
+ CONSISTENCY_CHECK(IsRestored_NoLogging());
+ CONSISTENCY_CHECK(!HasApproxParent());
+
+
+ DoFullyLoadLocals locals(pPending, level, this, pVisited);
+
+ bool fNeedsSanityChecks = !IsZapped(); // Validation has been performed for NGened classes already
+
+#ifdef FEATURE_READYTORUN
+ if (fNeedsSanityChecks)
+ {
+ Module * pModule = GetModule();
+
+ // No sanity checks for ready-to-run compiled images if possible
+ if (pModule->IsReadyToRun() && pModule->GetReadyToRunInfo()->SkipTypeValidation())
+ fNeedsSanityChecks = false;
+ }
+#endif
+
+ bool fNeedAccessChecks = (level == CLASS_LOADED) &&
+ fNeedsSanityChecks &&
+ IsTypicalTypeDefinition();
+
+ TypeHandle typicalTypeHnd;
+
+ if (!IsZapped()) // Validation has been performed for NGened classes already
+ {
+ // Fully load the typical instantiation. Make sure that this is done before loading other dependencies
+ // as the recursive generics detection algorithm needs to examine typical instantiations of the types
+ // in the closure.
+ if (!IsTypicalTypeDefinition())
+ {
+ typicalTypeHnd = ClassLoader::LoadTypeDefThrowing(GetModule(), GetCl(),
+ ClassLoader::ThrowIfNotFound, ClassLoader::PermitUninstDefOrRef, tdNoTypes,
+ (ClassLoadLevel) (level - 1));
+ CONSISTENCY_CHECK(!typicalTypeHnd.IsNull());
+ typicalTypeHnd.DoFullyLoad(&locals.newVisited, level, pPending, &locals.fBailed, pInstContext);
+ }
+ else if (level == CLASS_DEPENDENCIES_LOADED && HasInstantiation())
+ {
+ // This is a typical instantiation of a generic type. When attaining CLASS_DEPENDENCIES_LOADED, the
+ // recursive inheritance graph (ECMA part.II §9.2) will be constructed and checked for "expanding
+ // cycles" to detect infinite recursion, e.g. A<T> : B<A<A<T>>>.
+ //
+ // The dependencies loaded by this method (parent type, implemented interfaces, generic arguments)
+ // ensure that we will generate the finite instantiation closure as defined in ECMA. This load level
+ // is not being attained under lock so it's not possible to use TypeVarTypeDesc to represent graph
+ // nodes because multiple threads trying to fully load types from the closure at the same time would
+ // interfere with each other. In addition, the graph is only used for loading and can be discarded
+ // when the closure is fully loaded (TypeVarTypeDesc need to stay).
+ //
+ // The graph is represented by Generics::RecursionGraph instances organized in a linked list with
+ // each of them holding part of the graph. They live on the stack and are cleaned up automatically
+ // before returning from DoFullyLoad.
+
+ if (locals.newVisited.CheckForIllegalRecursion())
+ {
+ // An expanding cycle was detected, this type is part of a closure that is defined recursively.
+ IMDInternalImport* pInternalImport = GetModule()->GetMDImport();
+ GetModule()->GetAssembly()->ThrowTypeLoadException(pInternalImport, GetCl(), IDS_CLASSLOAD_GENERICTYPE_RECURSIVE);
+ }
+ }
+ }
+
+ // Fully load the parent
+ MethodTable *pParentMT = GetParentMethodTable();
+
+ if (pParentMT)
+ {
+ pParentMT->DoFullyLoad(&locals.newVisited, level, pPending, &locals.fBailed, pInstContext);
+
+ if (fNeedAccessChecks)
+ {
+ if (!IsComObjectType()) //RCW's are special - they are manufactured by the runtime and derive from the non-public type System.__ComObject
+ {
+ // A transparenct type should not be allowed to derive from a critical type.
+ // However since this has never been enforced before we have many classes that
+ // violate this rule. Enforcing it now will be a breaking change.
+ DoAccessibilityCheck(this, pParentMT, E_ACCESSDENIED, /* checkTargetTypeTransparency*/ FALSE);
+ }
+ }
+ }
+
+ // Fully load the interfaces
+ MethodTable::InterfaceMapIterator it = IterateInterfaceMap();
+ while (it.Next())
+ {
+ it.GetInterface()->DoFullyLoad(&locals.newVisited, level, pPending, &locals.fBailed, pInstContext);
+
+ if (fNeedAccessChecks)
+ {
+ if (IsInterfaceDeclaredOnClass(it.GetIndex())) // only test directly implemented interfaces (it's
+ // legal for an inherited interface to be private.)
+ {
+ // A transparenct type should not be allowed to implement a critical interface.
+ // However since this has never been enforced before we have many classes that
+ // violate this rule. Enforcing it now will be a breaking change.
+ DoAccessibilityCheck(this, it.GetInterface(), IDS_CLASSLOAD_INTERFACE_NO_ACCESS, /* checkTargetTypeTransparency*/ FALSE);
+ }
+ }
+ }
+
+ // Fully load the generic arguments
+ Instantiation inst = GetInstantiation();
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ inst[i].DoFullyLoad(&locals.newVisited, level, pPending, &locals.fBailed, pInstContext);
+ }
+
+ // Fully load the canonical methodtable
+ if (!IsCanonicalMethodTable())
+ {
+ GetCanonicalMethodTable()->DoFullyLoad(&locals.newVisited, level, pPending, &locals.fBailed, NULL);
+ }
+
+ if (fNeedsSanityChecks)
+ {
+ // Fully load the exact field types for value type fields
+ // Note that MethodTableBuilder::InitializeFieldDescs() loads the type of the
+ // field only upto level CLASS_LOAD_APPROXPARENTS.
+ FieldDesc *pField = GetApproxFieldDescListRaw();
+ FieldDesc *pFieldEnd = pField + GetNumStaticFields() + GetNumIntroducedInstanceFields();
+
+ while (pField < pFieldEnd)
+ {
+ g_IBCLogger.LogFieldDescsAccess(pField);
+
+ if (pField->GetFieldType() == ELEMENT_TYPE_VALUETYPE)
+ {
+ TypeHandle th = pField->GetFieldTypeHandleThrowing((ClassLoadLevel) (level - 1));
+ CONSISTENCY_CHECK(!th.IsNull());
+
+ th.DoFullyLoad(&locals.newVisited, level, pPending, &locals.fBailed, pInstContext);
+
+ if (fNeedAccessChecks)
+ {
+ DoAccessibilityCheck(this, th.GetMethodTable(), E_ACCESSDENIED, FALSE);
+ }
+
+ }
+ pField++;
+ }
+
+ // Fully load the exact field types for generic value type fields
+ if (HasGenericsStaticsInfo())
+ {
+ FieldDesc *pGenStaticField = GetGenericsStaticFieldDescs();
+ FieldDesc *pGenStaticFieldEnd = pGenStaticField + GetNumStaticFields();
+ while (pGenStaticField < pGenStaticFieldEnd)
+ {
+ if (pGenStaticField->GetFieldType() == ELEMENT_TYPE_VALUETYPE)
+ {
+ TypeHandle th = pGenStaticField->GetFieldTypeHandleThrowing((ClassLoadLevel) (level - 1));
+ CONSISTENCY_CHECK(!th.IsNull());
+
+ th.DoFullyLoad(&locals.newVisited, level, pPending, &locals.fBailed, pInstContext);
+
+ // The accessibility check is not necessary for generic fields. The generic fields are copy
+ // of the regular fields, the only difference is that they have the exact type.
+ }
+ pGenStaticField++;
+ }
+ }
+ }
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+ // Fully load the types of fields associated with a field marshaler when ngenning
+ if (HasLayout() && GetAppDomain()->IsCompilationDomain() && !IsZapped())
+ {
+ FieldMarshaler* pFM = this->GetLayoutInfo()->GetFieldMarshalers();
+ UINT numReferenceFields = this->GetLayoutInfo()->GetNumCTMFields();
+
+ while (numReferenceFields--)
+ {
+
+ FieldDesc *pMarshalerField = pFM->GetFieldDesc();
+
+ // If the fielddesc pointer here is a token tagged pointer, then the field marshaler that we are
+ // working with will not need to be saved into this ngen image. And as that was the reason that we
+ // needed to load this type, thus we will not need to fully load the type associated with this field desc.
+ //
+ if (!CORCOMPILE_IS_POINTER_TAGGED(pMarshalerField))
+ {
+ TypeHandle th = pMarshalerField->GetFieldTypeHandleThrowing((ClassLoadLevel) (level-1));
+ CONSISTENCY_CHECK(!th.IsNull());
+
+ th.DoFullyLoad(&locals.newVisited, level, pPending, &locals.fBailed, pInstContext);
+ }
+ // The accessibility check is not used here to prevent functional differences between ngen and non-ngen scenarios.
+ ((BYTE*&)pFM) += MAXFIELDMARSHALERSIZE;
+ }
+ }
+#endif //FEATURE_NATIVE_IMAGE_GENERATION
+
+ // Fully load exact parameter types for value type parameters opted into equivalence. This is required in case GC is
+ // triggered during prestub. GC needs to know where references are on the stack and if the parameter (as read from
+ // the method signature) is a structure, it relies on the loaded type to get the layout information from. For ordinary
+ // structures we are guaranteed to have loaded the type before entering prestub - the caller must have loaded it.
+ // However due to type equivalence, the caller may work with a different type than what's in the method signature.
+ //
+ // We deal with situation by eagerly loading types that may cause these problems, i.e. value types in signatures of
+ // methods introduced by this type. To avoid the perf hit for scenarios without type equivalence, we only preload
+ // structures that marked as type equivalent. In the no-PIA world
+ // these structures are called "local types" and are usually generated automatically by the compiler. Note that there
+ // is a related logic in code:CompareTypeDefsForEquivalence that declares two tokens corresponding to structures as
+ // equivalent based on an extensive set of equivalency checks..
+ //
+ // To address this situation for NGENed types and methods, we prevent pre-restoring them - see code:ComputeNeedsRestoreWorker
+ // for details. That forces them to go through the final stages of loading at run-time and hit the same code below.
+
+ if ((level == CLASS_LOADED)
+ && (GetCl() != mdTypeDefNil)
+ && !ContainsGenericVariables()
+ && (!IsZapped()
+ || DependsOnEquivalentOrForwardedStructs()
+#ifdef DEBUG
+ || TRUE // Always load types in debug builds so that we calculate fDependsOnEquivalentOrForwardedStructs all of the time
+#endif
+ )
+ )
+ {
+ MethodTable::IntroducedMethodIterator itMethods(this, FALSE);
+ for (; itMethods.IsValid(); itMethods.Next())
+ {
+ MethodDesc * pMD = itMethods.GetMethodDesc();
+
+ if (IsCompilationProcess())
+ {
+ locals.fHasTypeForwarderDependentStructParameter = FALSE;
+ EX_TRY
+ {
+ pMD->WalkValueTypeParameters(this, CheckForTypeForwardedTypeRefParameter, &locals);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ // This marks the class as needing restore.
+ if (locals.fHasTypeForwarderDependentStructParameter && !pMD->IsZapped())
+ pMD->SetHasForwardedValuetypeParameter();
+ }
+ else if (pMD->IsZapped() && pMD->HasForwardedValuetypeParameter())
+ {
+ pMD->WalkValueTypeParameters(this, LoadTypeDefOrRefAssembly, NULL);
+ locals.fDependsOnEquivalentOrForwardedStructs = TRUE;
+ }
+
+#ifdef FEATURE_TYPEEQUIVALENCE
+ if (!pMD->DoesNotHaveEquivalentValuetypeParameters() && pMD->IsVirtual())
+ {
+ locals.fHasEquivalentStructParameter = FALSE;
+ pMD->WalkValueTypeParameters(this, CheckForEquivalenceAndFullyLoadType, &locals);
+ if (!locals.fHasEquivalentStructParameter && !IsZapped())
+ pMD->SetDoesNotHaveEquivalentValuetypeParameters();
+ }
+#else
+#ifdef FEATURE_PREJIT
+ if (!IsZapped() && pMD->IsVirtual() && !IsCompilationProcess() )
+ {
+ pMD->PrepareForUseAsADependencyOfANativeImage();
+ }
+#endif
+#endif //FEATURE_TYPEEQUIVALENCE
+ }
+ }
+
+ _ASSERTE(!IsZapped() || !IsCanonicalMethodTable() || (level != CLASS_LOADED) || ((!!locals.fDependsOnEquivalentOrForwardedStructs) == (!!DependsOnEquivalentOrForwardedStructs())));
+ if (locals.fDependsOnEquivalentOrForwardedStructs)
+ {
+ if (!IsZapped())
+ {
+ // if this type declares a method that has an equivalent or type forwarded structure as a parameter type,
+ // make sure we come here and pre-load these structure types in NGENed cases as well
+ SetDependsOnEquivalentOrForwardedStructs();
+ }
+ }
+
+ // The rules for constraint cycles are same as rules for acccess checks
+ if (fNeedAccessChecks)
+ {
+ // Check for cyclical class constraints
+ {
+ Instantiation formalParams = GetInstantiation();
+
+ for (DWORD i = 0; i < formalParams.GetNumArgs(); i++)
+ {
+ BOOL Bounded(TypeVarTypeDesc *tyvar, DWORD depth);
+
+ TypeVarTypeDesc *pTyVar = formalParams[i].AsGenericVariable();
+ pTyVar->LoadConstraints(CLASS_DEPENDENCIES_LOADED);
+ if (!Bounded(pTyVar, formalParams.GetNumArgs()))
+ {
+ COMPlusThrow(kTypeLoadException, VER_E_CIRCULAR_VAR_CONSTRAINTS);
+ }
+
+ DoAccessibilityCheckForConstraints(this, pTyVar, E_ACCESSDENIED);
+ }
+ }
+
+ // Check for cyclical method constraints
+ {
+ if (GetCl() != mdTypeDefNil) // Make sure this is actually a metadata type!
+ {
+ MethodTable::IntroducedMethodIterator itMethods(this, FALSE);
+ for (; itMethods.IsValid(); itMethods.Next())
+ {
+ MethodDesc * pMD = itMethods.GetMethodDesc();
+
+ if (pMD->IsGenericMethodDefinition() && pMD->IsTypicalMethodDefinition())
+ {
+ BOOL fHasCircularClassConstraints = TRUE;
+ BOOL fHasCircularMethodConstraints = TRUE;
+
+ pMD->LoadConstraintsForTypicalMethodDefinition(&fHasCircularClassConstraints, &fHasCircularMethodConstraints, CLASS_DEPENDENCIES_LOADED);
+
+ if (fHasCircularClassConstraints)
+ {
+ COMPlusThrow(kTypeLoadException, VER_E_CIRCULAR_VAR_CONSTRAINTS);
+ }
+ if (fHasCircularMethodConstraints)
+ {
+ COMPlusThrow(kTypeLoadException, VER_E_CIRCULAR_MVAR_CONSTRAINTS);
+ }
+ }
+ }
+ }
+ }
+
+ }
+
+
+#ifdef _DEBUG
+ if (LoggingOn(LF_CLASSLOADER, LL_INFO10000))
+ {
+ SString name;
+ TypeString::AppendTypeDebug(name, this);
+ LOG((LF_CLASSLOADER, LL_INFO10000, "PHASEDLOAD: Completed full dependency load of type %S\n", name.GetUnicode()));
+ }
+#endif
+
+ switch (level)
+ {
+ case CLASS_DEPENDENCIES_LOADED:
+ SetIsDependenciesLoaded();
+
+#if defined(FEATURE_COMINTEROP) && !defined(DACCESS_COMPILE)
+ if (WinRTSupported() && g_fEEStarted && !ContainsIntrospectionOnlyTypes())
+ {
+ _ASSERTE(GetAppDomain() != NULL);
+
+ AppDomain* pAppDomain = GetAppDomain();
+ if (pAppDomain->CanCacheWinRTTypeByGuid(this))
+ {
+ pAppDomain->CacheWinRTTypeByGuid(this);
+ }
+ }
+#endif // FEATURE_COMINTEROP && !DACCESS_COMPILE
+
+ break;
+
+ case CLASS_LOADED:
+ if (!IsZapped() && // Constraint checks have been performed for NGened classes already
+ !IsTypicalTypeDefinition() &&
+ !IsSharedByGenericInstantiations())
+ {
+ TypeHandle thThis = TypeHandle(this);
+
+ // If we got here, we about to mark a generic instantiation as fully loaded. Before we do so,
+ // check to see if has constraints that aren't being satisfied.
+ SatisfiesClassConstraints(thThis, typicalTypeHnd, pInstContext);
+
+ }
+
+ if (locals.fBailed)
+ {
+ // We couldn't complete security checks on some dependency because he is already being processed by one of our callers.
+ // Do not mark this class fully loaded yet. Put him on the pending list and he will be marked fully loaded when
+ // everything unwinds.
+
+ *pfBailed = TRUE;
+
+ TypeHandle *pTHPending = pPending->AppendThrowing();
+ *pTHPending = TypeHandle(this);
+ }
+ else
+ {
+ // Finally, mark this method table as fully loaded
+ SetIsFullyLoaded();
+ }
+ break;
+
+ default:
+ _ASSERTE(!"Can't get here.");
+ break;
+
+ }
+
+ END_SO_INTOLERANT_CODE;
+
+#endif //!DACCESS_COMPILE
+} //MethodTable::DoFullyLoad
+
+
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_PREJIT
+
+// For a MethodTable in a native image, decode sufficient encoded pointers
+// that the TypeKey for this type is recoverable.
+//
+// For instantiated generic types, we need the generic type arguments,
+// the EEClass pointer, and its Module pointer.
+// (For non-generic types, the EEClass and Module are always hard bound).
+//
+// The process is applied recursively e.g. consider C<D<string>[]>.
+// It is guaranteed to terminate because types cannot contain cycles in their structure.
+//
+// Also note that no lock is required; the process of restoring this information is idempotent.
+// (Note the atomic action at the end though)
+//
+void MethodTable::DoRestoreTypeKey()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ // If we have an indirection cell then restore the m_pCanonMT and its module pointer
+ //
+ if (union_getLowBits(m_pCanonMT) == UNION_INDIRECTION)
+ {
+ Module::RestoreMethodTablePointerRaw((MethodTable **)(union_getPointer(m_pCanonMT)),
+ GetLoaderModule(), CLASS_LOAD_UNRESTORED);
+ }
+
+ MethodTable * pMTForModule = IsArray() ? this : GetCanonicalMethodTable();
+ if (pMTForModule->HasModuleOverride())
+ {
+ Module::RestoreModulePointer(pMTForModule->GetModuleOverridePtr(), pMTForModule->GetLoaderModule());
+ }
+
+ if (IsArray())
+ {
+ //
+ // Restore array element type handle
+ //
+ Module::RestoreTypeHandlePointerRaw(GetApproxArrayElementTypeHandlePtr(),
+ GetLoaderModule(), CLASS_LOAD_UNRESTORED);
+ }
+
+ // Next restore the instantiation and recurse
+ Instantiation inst = GetInstantiation();
+ for (DWORD j = 0; j < inst.GetNumArgs(); j++)
+ {
+ Module::RestoreTypeHandlePointer(&inst.GetRawArgs()[j], GetLoaderModule(), CLASS_LOAD_UNRESTORED);
+ }
+
+ FastInterlockAnd(&(EnsureWritablePages(GetWriteableDataForWrite())->m_dwFlags), ~MethodTableWriteableData::enum_flag_UnrestoredTypeKey);
+}
+
+//==========================================================================================
+// For a MethodTable in a native image, apply Restore actions
+// * Decode any encoded pointers
+// * Instantiate static handles
+// * Propagate Restore to EEClass
+// For array method tables, Restore MUST BE IDEMPOTENT as it can be entered from multiple threads
+// For other classes, restore cannot be entered twice because the loader maintains locks
+//
+// When you actually restore the MethodTable for a generic type, the generic
+// dictionary is restored. That means:
+// * Parent slots in the PerInstInfo are restored by this method eagerly. They are copied down from the
+// parent in code:ClassLoader.LoadExactParentAndInterfacesTransitively
+// * Instantiation parameters in the dictionary are restored eagerly when the type is restored. These are
+// either hard bound pointers, or tagged tokens (fixups).
+// * All other dictionary entries are either hard bound pointers or they are NULL (they are cleared when we
+// freeze the Ngen image). They are *never* tagged tokens.
+void MethodTable::Restore()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(IsZapped());
+ PRECONDITION(!IsRestored_NoLogging());
+ PRECONDITION(!HasUnrestoredTypeKey());
+ }
+ CONTRACTL_END;
+
+ g_IBCLogger.LogMethodTableAccess(this);
+
+ STRESS_LOG1(LF_ZAP, LL_INFO10000, "MethodTable::Restore: Restoring type %pT\n", this);
+ LOG((LF_ZAP, LL_INFO10000,
+ "Restoring methodtable %s at " FMT_ADDR ".\n", GetDebugClassName(), DBG_ADDR(this)));
+
+ // Class pointer should be restored already (in DoRestoreTypeKey)
+ CONSISTENCY_CHECK(IsClassPointerValid());
+
+ // If this isn't the canonical method table itself, then restore the canonical method table
+ // We will load the canonical method table to level EXACTPARENTS in LoadExactParents
+ if (!IsCanonicalMethodTable())
+ {
+ ClassLoader::EnsureLoaded(GetCanonicalMethodTable(), CLASS_LOAD_APPROXPARENTS);
+ }
+
+ //
+ // Restore parent method table
+ //
+ Module::RestoreMethodTablePointerRaw(GetParentMethodTablePtr(), GetLoaderModule(), CLASS_LOAD_APPROXPARENTS);
+
+ //
+ // Restore interface classes
+ //
+ InterfaceMapIterator it = IterateInterfaceMap();
+ while (it.Next())
+ {
+ // Just make sure that approximate interface is loaded. LoadExactParents fill in the exact interface later.
+ MethodTable * pIftMT;
+ pIftMT = it.GetInterfaceInfo()->GetApproxMethodTable(GetLoaderModule());
+ _ASSERTE(pIftMT != NULL);
+ }
+
+ if (HasCrossModuleGenericStaticsInfo())
+ {
+ MethodTableWriteableData * pWriteableData = GetWriteableDataForWrite();
+ CrossModuleGenericsStaticsInfo * pInfo = pWriteableData->GetCrossModuleGenericsStaticsInfo();
+
+ EnsureWritablePages(pWriteableData, sizeof(MethodTableWriteableData) + sizeof(CrossModuleGenericsStaticsInfo));
+
+ if (IsDomainNeutral())
+ {
+ // If we are domain neutral, we have to use constituent of the instantiation to store
+ // statics. We need to ensure that we can create DomainModule in all domains
+ // that this instantiations may get activated in. PZM is good approximation of such constituent.
+ Module * pModuleForStatics = Module::GetPreferredZapModuleForMethodTable(this);
+
+ pInfo->m_pModuleForStatics = pModuleForStatics;
+ pInfo->m_DynamicTypeID = pModuleForStatics->AllocateDynamicEntry(this);
+ }
+ else
+ {
+ pInfo->m_pModuleForStatics = GetLoaderModule();
+ }
+ }
+
+ LOG((LF_ZAP, LL_INFO10000,
+ "Restored methodtable %s at " FMT_ADDR ".\n", GetDebugClassName(), DBG_ADDR(this)));
+
+ // This has to be last!
+ SetIsRestored();
+}
+#endif // FEATURE_PREJIT
+
+#ifdef FEATURE_COMINTEROP
+
+//==========================================================================================
+BOOL MethodTable::IsExtensibleRCW()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(GetClass());
+ return IsComObjectType() && !GetClass()->IsComImport();
+}
+
+//==========================================================================================
+OBJECTHANDLE MethodTable::GetOHDelegate()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(GetClass());
+ return GetClass()->GetOHDelegate();
+}
+
+//==========================================================================================
+void MethodTable::SetOHDelegate (OBJECTHANDLE _ohDelegate)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(GetClass());
+ g_IBCLogger.LogEEClassCOWTableAccess(this);
+ GetClass_NoLogging()->SetOHDelegate(_ohDelegate);
+}
+
+//==========================================================================================
+// Helper to skip over COM class in the hierarchy
+MethodTable* MethodTable::GetComPlusParentMethodTable()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ MethodTable* pParent = GetParentMethodTable();
+
+ if (pParent && pParent->IsComImport())
+ {
+ if (pParent->IsProjectedFromWinRT())
+ {
+ // skip all Com Import classes
+ do
+ {
+ pParent = pParent->GetParentMethodTable();
+ _ASSERTE(pParent != NULL);
+ }while(pParent->IsComImport());
+
+ // Now we have either System.__ComObject or WindowsRuntime.RuntimeClass
+ if (pParent != g_pBaseCOMObject)
+ {
+ return pParent;
+ }
+ }
+ else
+ {
+ // Skip the single ComImport class we expect
+ _ASSERTE(pParent->GetParentMethodTable() != NULL);
+ pParent = pParent->GetParentMethodTable();
+ }
+ _ASSERTE(!pParent->IsComImport());
+
+ // Skip over System.__ComObject, expect System.MarshalByRefObject
+ pParent=pParent->GetParentMethodTable();
+ _ASSERTE(pParent != NULL);
+#ifdef FEATURE_REMOTING
+ _ASSERTE(pParent->IsMarshaledByRef());
+#endif
+ _ASSERTE(pParent->GetParentMethodTable() != NULL);
+ _ASSERTE(pParent->GetParentMethodTable() == g_pObjectClass);
+ }
+
+ return pParent;
+}
+
+BOOL MethodTable::IsWinRTObjectType()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Try to determine if this object represents a WindowsRuntime object - i.e. is either
+ // ProjectedFromWinRT or derived from a class that is
+
+ if (!IsComObjectType())
+ return FALSE;
+
+ // Ideally we'd compute this once in BuildMethodTable and track it with another
+ // flag, but we're now out of bits on m_dwFlags, and this is used very rarely
+ // so for now we'll just recompute it when necessary.
+ MethodTable* pMT = this;
+ do
+ {
+ if (pMT->IsProjectedFromWinRT())
+ {
+ // Found a WinRT COM object
+ return TRUE;
+ }
+ if (pMT->IsComImport())
+ {
+ // Found a class that is actually imported from COM but not WinRT
+ // this is definitely a non-WinRT COM object
+ return FALSE;
+ }
+ pMT = pMT->GetParentMethodTable();
+ }while(pMT != NULL);
+
+ return FALSE;
+}
+
+#endif // FEATURE_COMINTEROP
+
+#endif // !DACCESS_COMPILE
+
+//==========================================================================================
+// Return a pointer to the dictionary for an instantiated type
+// Return NULL if not instantiated
+Dictionary* MethodTable::GetDictionary()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (HasInstantiation())
+ {
+ // The instantiation for this class is stored in the type slots table
+ // *after* any inherited slots
+ return GetPerInstInfo()[GetNumDicts()-1];
+ }
+ else
+ {
+ return NULL;
+ }
+}
+
+//==========================================================================================
+// As above, but assert if an instantiated type is not restored
+Instantiation MethodTable::GetInstantiation()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ if (HasInstantiation())
+ {
+ PTR_GenericsDictInfo pDictInfo = GetGenericsDictInfo();
+ return Instantiation(GetPerInstInfo()[pDictInfo->m_wNumDicts-1]->GetInstantiation(), pDictInfo->m_wNumTyPars);
+ }
+ else
+ {
+ return Instantiation();
+ }
+}
+
+//==========================================================================================
+// Obtain instantiation from an instantiated type or a pointer to the
+// element type of an array
+Instantiation MethodTable::GetClassOrArrayInstantiation()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ if (IsArray()) {
+ return GetArrayInstantiation();
+ }
+ else {
+ return GetInstantiation();
+ }
+}
+
+//==========================================================================================
+Instantiation MethodTable::GetArrayInstantiation()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ _ASSERTE(IsArray());
+ return Instantiation((TypeHandle *)&m_ElementTypeHnd, 1);
+}
+
+//==========================================================================================
+CorElementType MethodTable::GetInternalCorElementType()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ // This should not touch the EEClass, at least not in the
+ // common cases of ELEMENT_TYPE_CLASS and ELEMENT_TYPE_VALUETYPE.
+
+ g_IBCLogger.LogMethodTableAccess(this);
+
+ CorElementType ret;
+
+ switch (GetFlag(enum_flag_Category_ElementTypeMask))
+ {
+ case enum_flag_Category_Array:
+ ret = ELEMENT_TYPE_ARRAY;
+ break;
+
+ case enum_flag_Category_Array | enum_flag_Category_IfArrayThenSzArray:
+ ret = ELEMENT_TYPE_SZARRAY;
+ break;
+
+ case enum_flag_Category_ValueType:
+ ret = ELEMENT_TYPE_VALUETYPE;
+ break;
+
+ case enum_flag_Category_PrimitiveValueType:
+ // This path should only be taken for the builtin mscorlib types
+ // and primitive valuetypes
+ ret = GetClass()->GetInternalCorElementType();
+ _ASSERTE((ret != ELEMENT_TYPE_CLASS) &&
+ (ret != ELEMENT_TYPE_VALUETYPE));
+ break;
+
+ default:
+ ret = ELEMENT_TYPE_CLASS;
+ break;
+ }
+
+ // DAC may be targetting a dump; dumps do not guarantee you can retrieve the EEClass from
+ // the MethodTable so this is not expected to work in a DAC build.
+#if defined(_DEBUG) && !defined(DACCESS_COMPILE)
+ if (IsRestored_NoLogging())
+ {
+ PTR_EEClass pClass = GetClass_NoLogging();
+ if (ret != pClass->GetInternalCorElementType())
+ {
+ _ASSERTE(!"Mismatched results in MethodTable::GetInternalCorElementType");
+ }
+ }
+#endif // defined(_DEBUG) && !defined(DACCESS_COMPILE)
+ return ret;
+}
+
+//==========================================================================================
+CorElementType MethodTable::GetVerifierCorElementType()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ // This should not touch the EEClass, at least not in the
+ // common cases of ELEMENT_TYPE_CLASS and ELEMENT_TYPE_VALUETYPE.
+
+ g_IBCLogger.LogMethodTableAccess(this);
+
+ CorElementType ret;
+
+ switch (GetFlag(enum_flag_Category_ElementTypeMask))
+ {
+ case enum_flag_Category_Array:
+ ret = ELEMENT_TYPE_ARRAY;
+ break;
+
+ case enum_flag_Category_Array | enum_flag_Category_IfArrayThenSzArray:
+ ret = ELEMENT_TYPE_SZARRAY;
+ break;
+
+ case enum_flag_Category_ValueType:
+ ret = ELEMENT_TYPE_VALUETYPE;
+ break;
+
+ case enum_flag_Category_PrimitiveValueType:
+ //
+ // This is the only difference from MethodTable::GetInternalCorElementType()
+ //
+ if (IsTruePrimitive() || IsEnum())
+ ret = GetClass()->GetInternalCorElementType();
+ else
+ ret = ELEMENT_TYPE_VALUETYPE;
+ break;
+
+ default:
+ ret = ELEMENT_TYPE_CLASS;
+ break;
+ }
+
+ return ret;
+}
+
+//==========================================================================================
+CorElementType MethodTable::GetSignatureCorElementType()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ // This should not touch the EEClass, at least not in the
+ // common cases of ELEMENT_TYPE_CLASS and ELEMENT_TYPE_VALUETYPE.
+
+ g_IBCLogger.LogMethodTableAccess(this);
+
+ CorElementType ret;
+
+ switch (GetFlag(enum_flag_Category_ElementTypeMask))
+ {
+ case enum_flag_Category_Array:
+ ret = ELEMENT_TYPE_ARRAY;
+ break;
+
+ case enum_flag_Category_Array | enum_flag_Category_IfArrayThenSzArray:
+ ret = ELEMENT_TYPE_SZARRAY;
+ break;
+
+ case enum_flag_Category_ValueType:
+ ret = ELEMENT_TYPE_VALUETYPE;
+ break;
+
+ case enum_flag_Category_PrimitiveValueType:
+ //
+ // This is the only difference from MethodTable::GetInternalCorElementType()
+ //
+ if (IsTruePrimitive())
+ ret = GetClass()->GetInternalCorElementType();
+ else
+ ret = ELEMENT_TYPE_VALUETYPE;
+ break;
+
+ default:
+ ret = ELEMENT_TYPE_CLASS;
+ break;
+ }
+
+ return ret;
+}
+
+#ifndef DACCESS_COMPILE
+
+//==========================================================================================
+void MethodTable::SetInternalCorElementType (CorElementType _NormType)
+{
+ WRAPPER_NO_CONTRACT;
+
+ switch (_NormType)
+ {
+ case ELEMENT_TYPE_CLASS:
+ _ASSERTE(!IsArray());
+ // Nothing to do
+ break;
+ case ELEMENT_TYPE_VALUETYPE:
+ SetFlag(enum_flag_Category_ValueType);
+ _ASSERTE(GetFlag(enum_flag_Category_Mask) == enum_flag_Category_ValueType);
+ break;
+ default:
+ SetFlag(enum_flag_Category_PrimitiveValueType);
+ _ASSERTE(GetFlag(enum_flag_Category_Mask) == enum_flag_Category_PrimitiveValueType);
+ break;
+ }
+
+ GetClass_NoLogging()->SetInternalCorElementType(_NormType);
+ _ASSERTE(GetInternalCorElementType() == _NormType);
+}
+
+#endif // !DACCESS_COMPILE
+
+#ifdef FEATURE_COMINTEROP
+#ifndef DACCESS_COMPILE
+
+#ifndef CROSSGEN_COMPILE
+BOOL MethodTable::IsLegalWinRTType(OBJECTREF *poref)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(IsProtectedByGCFrame(poref));
+ PRECONDITION(CheckPointer(poref));
+ PRECONDITION((*poref) != NULL);
+ }
+ CONTRACTL_END
+
+ if (IsArray())
+ {
+ BASEARRAYREF arrayRef = (BASEARRAYREF)(*poref);
+
+ // WinRT array must be one-dimensional array with 0 lower-bound
+ if (arrayRef->GetRank() == 1 && arrayRef->GetLowerBoundsPtr()[0] == 0)
+ {
+ MethodTable *pElementMT = ((BASEARRAYREF)(*poref))->GetArrayElementTypeHandle().GetMethodTable();
+
+ // Element must be a legal WinRT type and not an array
+ if (!pElementMT->IsArray() && pElementMT->IsLegalNonArrayWinRTType())
+ return TRUE;
+ }
+
+ return FALSE;
+ }
+ else
+ {
+ // Non-Array version of IsLegalNonArrayWinRTType
+ return IsLegalNonArrayWinRTType();
+ }
+}
+#endif //#ifndef CROSSGEN_COMPILE
+
+BOOL MethodTable::IsLegalNonArrayWinRTType()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(!IsArray()); // arrays are not fully described by MethodTable
+ }
+ CONTRACTL_END
+
+ if (WinRTTypeNameConverter::IsWinRTPrimitiveType(this))
+ return TRUE;
+
+ // Attributes are not legal
+ MethodTable *pParentMT = GetParentMethodTable();
+ if (pParentMT == MscorlibBinder::GetExistingClass(CLASS__ATTRIBUTE))
+ {
+ return FALSE;
+ }
+
+ bool fIsRedirected = false;
+ if (!IsProjectedFromWinRT() && !IsExportedToWinRT())
+ {
+ // If the type is not primitive and not coming from .winmd, it can still be legal if
+ // it's one of the redirected types (e.g. IEnumerable<T>).
+ if (!WinRTTypeNameConverter::IsRedirectedType(this))
+ return FALSE;
+
+ fIsRedirected = true;
+ }
+
+ if (IsValueType())
+ {
+ if (!fIsRedirected)
+ {
+ // check fields
+ ApproxFieldDescIterator fieldIterator(this, ApproxFieldDescIterator::INSTANCE_FIELDS);
+ for (FieldDesc *pFD = fieldIterator.Next(); pFD != NULL; pFD = fieldIterator.Next())
+ {
+ TypeHandle thField = pFD->GetFieldTypeHandleThrowing(CLASS_LOAD_EXACTPARENTS);
+
+ if (thField.IsTypeDesc())
+ return FALSE;
+
+ MethodTable *pFieldMT = thField.GetMethodTable();
+
+ // the only allowed reference types are System.String and types projected from WinRT value types
+ if (!pFieldMT->IsValueType() && !pFieldMT->IsString())
+ {
+ WinMDAdapter::RedirectedTypeIndex index;
+ if (!WinRTTypeNameConverter::ResolveRedirectedType(pFieldMT, &index))
+ return FALSE;
+
+ WinMDAdapter::WinMDTypeKind typeKind;
+ WinMDAdapter::GetRedirectedTypeInfo(index, NULL, NULL, NULL, NULL, NULL, &typeKind);
+ if (typeKind != WinMDAdapter::WinMDTypeKind_Struct && typeKind != WinMDAdapter::WinMDTypeKind_Enum)
+ return FALSE;
+ }
+
+ if (!pFieldMT->IsLegalNonArrayWinRTType())
+ return FALSE;
+ }
+ }
+ }
+
+ if (IsInterface() || IsDelegate() || (IsValueType() && fIsRedirected))
+ {
+ // interfaces, delegates, and redirected structures can be generic - check the instantiation
+ if (HasInstantiation())
+ {
+ Instantiation inst = GetInstantiation();
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ // arrays are not allowed as generic arguments
+ if (inst[i].IsArrayType())
+ return FALSE;
+
+ if (inst[i].IsTypeDesc())
+ return FALSE;
+
+ if (!inst[i].AsMethodTable()->IsLegalNonArrayWinRTType())
+ return FALSE;
+ }
+ }
+ }
+ else
+ {
+ // generic structures and runtime clases are not supported
+ if (HasInstantiation())
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+//==========================================================================================
+// Returns the default WinRT interface if this is a WinRT class, NULL otherwise.
+MethodTable *MethodTable::GetDefaultWinRTInterface()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ if (!IsProjectedFromWinRT() && !IsExportedToWinRT())
+ return NULL;
+
+ if (IsInterface())
+ return NULL;
+
+ // System.Runtime.InteropServices.WindowsRuntime.RuntimeClass is weird
+ // It is ProjectedFromWinRT but isn't really a WinRT class
+ if (this == g_pBaseRuntimeClass)
+ return NULL;
+
+ WinRTClassFactory *pFactory = ::GetComClassFactory(this)->AsWinRTClassFactory();
+ return pFactory->GetDefaultInterface();
+}
+
+#endif // !DACCESS_COMPILE
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_COMINTEROP
+#ifndef DACCESS_COMPILE
+
+WORD GetEquivalentMethodSlot(MethodTable * pOldMT, MethodTable * pNewMT, WORD wMTslot, BOOL *pfFound)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ MethodDesc * pMDRet = NULL;
+ *pfFound = FALSE;
+
+ // Get the COM vtable slot corresponding to the given MT slot
+ WORD wVTslot;
+ if (pOldMT->IsSparseForCOMInterop())
+ {
+ wVTslot = pOldMT->GetClass()->GetSparseCOMInteropVTableMap()->LookupVTSlot(wMTslot);
+ }
+ else
+ {
+ wVTslot = wMTslot;
+ }
+
+ // If the other MT is not sparse, we can return the COM slot directly
+ if (!pNewMT->IsSparseForCOMInterop())
+ {
+ if (wVTslot < pNewMT->GetNumVirtuals())
+ *pfFound = TRUE;
+
+ return wVTslot;
+ }
+
+ // Otherwise we iterate over all virtuals in the other MT trying to find a match
+ for (WORD wSlot = 0; wSlot < pNewMT->GetNumVirtuals(); wSlot++)
+ {
+ if (wVTslot == pNewMT->GetClass()->GetSparseCOMInteropVTableMap()->LookupVTSlot(wSlot))
+ {
+ *pfFound = TRUE;
+ return wSlot;
+ }
+ }
+
+ _ASSERTE(!*pfFound);
+ return 0;
+}
+#endif // #ifdef DACCESS_COMPILE
+#endif // #ifdef FEATURE_COMINTEROP
+
+//==========================================================================================
+BOOL
+MethodTable::FindEncodedMapDispatchEntry(
+ UINT32 typeID,
+ UINT32 slotNumber,
+ DispatchMapEntry * pEntry)
+{
+ CONTRACTL {
+ // NOTE: LookupDispatchMapType may or may not throw. Currently, it
+ // should never throw because lazy interface restore is disabled.
+ THROWS;
+ GC_TRIGGERS;
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pEntry));
+ PRECONDITION(typeID != TYPE_ID_THIS_CLASS);
+ } CONTRACTL_END;
+
+ CONSISTENCY_CHECK(HasDispatchMap());
+
+ MethodTable * dispatchTokenType = GetThread()->GetDomain()->LookupType(typeID);
+
+ // Search for an exact type match.
+ {
+ DispatchMap::EncodedMapIterator it(this);
+ for (; it.IsValid(); it.Next())
+ {
+ DispatchMapEntry * pCurEntry = it.Entry();
+ if (pCurEntry->GetSlotNumber() == slotNumber)
+ {
+ MethodTable * pCurEntryType = LookupDispatchMapType(pCurEntry->GetTypeID());
+ if (pCurEntryType == dispatchTokenType)
+ {
+ *pEntry = *pCurEntry;
+ return TRUE;
+ }
+ }
+ }
+ }
+
+ // Repeat the search if any variance is involved, allowing a CanCastTo match. (We do
+ // this in a separate pass because we want to avoid touching the type
+ // to see if it has variance or not)
+ //
+ // NOTE: CERs are not guaranteed for interfaces with co- and contra-variance involved.
+ if (dispatchTokenType->HasVariance() || dispatchTokenType->HasTypeEquivalence())
+ {
+ DispatchMap::EncodedMapIterator it(this);
+ for (; it.IsValid(); it.Next())
+ {
+ DispatchMapEntry * pCurEntry = it.Entry();
+ if (pCurEntry->GetSlotNumber() == slotNumber)
+ {
+#ifndef DACCESS_COMPILE
+ MethodTable * pCurEntryType = LookupDispatchMapType(pCurEntry->GetTypeID());
+ //@TODO: This is currently not guaranteed to work without throwing,
+ //@TODO: even with lazy interface restore disabled.
+ if (dispatchTokenType->HasVariance() &&
+ pCurEntryType->CanCastByVarianceToInterfaceOrDelegate(dispatchTokenType, NULL))
+ {
+ *pEntry = *pCurEntry;
+ return TRUE;
+ }
+
+ if (dispatchTokenType->HasInstantiation() && dispatchTokenType->HasTypeEquivalence())
+ {
+ if (dispatchTokenType->IsEquivalentTo(pCurEntryType))
+ {
+ *pEntry = *pCurEntry;
+ return TRUE;
+ }
+ }
+#endif // !DACCESS_COMPILE
+ }
+#if !defined(DACCESS_COMPILE) && defined(FEATURE_TYPEEQUIVALENCE)
+ if (this->HasTypeEquivalence() &&
+ !dispatchTokenType->HasInstantiation() &&
+ dispatchTokenType->HasTypeEquivalence() &&
+ dispatchTokenType->GetClass()->IsEquivalentType())
+ {
+ _ASSERTE(dispatchTokenType->IsInterface());
+ MethodTable * pCurEntryType = LookupDispatchMapType(pCurEntry->GetTypeID());
+
+ if (pCurEntryType->IsEquivalentTo(dispatchTokenType))
+ {
+ MethodDesc * pMD = dispatchTokenType->GetMethodDescForSlot(slotNumber);
+ _ASSERTE(FitsIn<WORD>(slotNumber));
+ BOOL fNewSlotFound = FALSE;
+ DWORD newSlot = GetEquivalentMethodSlot(
+ dispatchTokenType,
+ pCurEntryType,
+ static_cast<WORD>(slotNumber),
+ &fNewSlotFound);
+ if (fNewSlotFound && (newSlot == pCurEntry->GetSlotNumber()))
+ {
+ MethodDesc * pNewMD = pCurEntryType->GetMethodDescForSlot(newSlot);
+
+ MetaSig msig(pMD);
+ MetaSig msignew(pNewMD);
+
+ if (MetaSig::CompareMethodSigs(msig, msignew, FALSE))
+ {
+ *pEntry = *pCurEntry;
+ return TRUE;
+ }
+ }
+ }
+ }
+#endif
+ }
+ }
+ return FALSE;
+} // MethodTable::FindEncodedMapDispatchEntry
+
+//==========================================================================================
+BOOL MethodTable::FindDispatchEntryForCurrentType(UINT32 typeID,
+ UINT32 slotNumber,
+ DispatchMapEntry *pEntry)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pEntry));
+ PRECONDITION(typeID != TYPE_ID_THIS_CLASS);
+ } CONTRACTL_END;
+
+ BOOL fRes = FALSE;
+
+ if (HasDispatchMap())
+ {
+ fRes = FindEncodedMapDispatchEntry(
+ typeID, slotNumber, pEntry);
+ }
+
+ return fRes;
+}
+
+//==========================================================================================
+BOOL MethodTable::FindDispatchEntry(UINT32 typeID,
+ UINT32 slotNumber,
+ DispatchMapEntry *pEntry)
+{
+ CONTRACT (BOOL) {
+ INSTANCE_CHECK;
+ MODE_ANY;
+ THROWS;
+ GC_TRIGGERS;
+ POSTCONDITION(!RETVAL || pEntry->IsValid());
+ PRECONDITION(typeID != TYPE_ID_THIS_CLASS);
+ } CONTRACT_END;
+
+ // Start at the current type and work up the inheritance chain
+ MethodTable *pCurMT = this;
+ UINT32 iCurInheritanceChainDelta = 0;
+ while (pCurMT != NULL)
+ {
+ g_IBCLogger.LogMethodTableAccess(pCurMT);
+ if (pCurMT->FindDispatchEntryForCurrentType(
+ typeID, slotNumber, pEntry))
+ {
+ RETURN (TRUE);
+ }
+ pCurMT = pCurMT->GetParentMethodTable();
+ iCurInheritanceChainDelta++;
+ }
+ RETURN (FALSE);
+}
+
+//==========================================================================================
+// Possible cases:
+// 1. Typed (interface) contract
+// a. To non-virtual implementation (NYI). Just
+// return the DispatchSlot as the implementation
+// b. Mapped virtually to virtual slot on 'this'. Need to
+// further resolve the new 'this' virtual slot.
+// 2. 'this' contract
+// a. To non-virtual implementation. Return the DispatchSlot
+// as the implementation.
+// b. Mapped virtually to another virtual slot. Need to further
+// resolve the new slot on 'this'.
+BOOL
+MethodTable::FindDispatchImpl(
+ UINT32 typeID,
+ UINT32 slotNumber,
+ DispatchSlot * pImplSlot)
+{
+ CONTRACT (BOOL) {
+ INSTANCE_CHECK;
+ MODE_ANY;
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pImplSlot));
+ POSTCONDITION(!RETVAL || !pImplSlot->IsNull() || IsComObjectType());
+ } CONTRACT_END;
+
+ LOG((LF_LOADER, LL_INFO10000, "SD: MT::FindDispatchImpl: searching %s.\n", GetClass()->GetDebugClassName()));
+
+ ///////////////////////////////////
+ // 1. Typed (interface) contract
+
+ INDEBUG(MethodTable *dbg_pMTTok = NULL; dbg_pMTTok = this;)
+ DispatchMapEntry declEntry;
+ DispatchMapEntry implEntry;
+
+#ifndef DACCESS_COMPILE
+ if (typeID != TYPE_ID_THIS_CLASS)
+ {
+ INDEBUG(dbg_pMTTok = GetThread()->GetDomain()->LookupType(typeID));
+ DispatchMapEntry e;
+ if (!FindDispatchEntry(typeID, slotNumber, &e))
+ {
+ // A call to an array thru IList<T> (or IEnumerable<T> or ICollection<T>) has to be handled specially.
+ // These interfaces are "magic" (mostly due to working set concerned - they are created on demand internally
+ // even though semantically, these are static interfaces.)
+ //
+ // NOTE: CERs are not currently supported with generic array interfaces.
+ if (IsArray())
+ {
+ // At this, we know that we're trying to cast an array to an interface and that the normal static lookup failed.
+
+ // FindDispatchImpl assumes that the cast is legal so we should be able to assume now that it is a valid
+ // IList<T> call thru an array.
+
+ // Get the MT of IList<T> or IReadOnlyList<T>
+ MethodTable *pIfcMT = GetThread()->GetDomain()->LookupType(typeID);
+
+ // Quick sanity check
+ if (!(pIfcMT->HasInstantiation()))
+ {
+ _ASSERTE(!"Should not have gotten here. If you did, it's probably because multiple interface instantiation hasn't been checked in yet. This code only works on top of that.");
+ RETURN(FALSE);
+ }
+
+ // Get the type of T (as in IList<T>)
+ TypeHandle theT = pIfcMT->GetInstantiation()[0];
+
+ // Figure out which method of IList<T> the caller requested.
+ MethodDesc * pIfcMD = pIfcMT->GetMethodDescForSlot(slotNumber);
+
+ // Retrieve the corresponding method of SZArrayHelper. This is the guy that will actually execute.
+ // This method will be an instantiation of a generic method. I.e. if the caller requested
+ // IList<T>.Meth(), he will actually be diverted to SZArrayHelper.Meth<T>().
+ MethodDesc * pActualImplementor = GetActualImplementationForArrayGenericIListOrIReadOnlyListMethod(pIfcMD, theT);
+
+ // Now, construct a DispatchSlot to return in *pImplSlot
+ DispatchSlot ds(pActualImplementor->GetMethodEntryPoint());
+
+ if (pImplSlot != NULL)
+ {
+ *pImplSlot = ds;
+ }
+
+ RETURN(TRUE);
+
+ }
+
+ // This contract is not implemented by this class or any parent class.
+ RETURN(FALSE);
+ }
+
+ /////////////////////////////////
+ // 1.1. Update the typeID and slotNumber so that the full search can commense below
+ typeID = TYPE_ID_THIS_CLASS;
+ slotNumber = e.GetTargetSlotNumber();
+ }
+#endif // !DACCESS_COMPILE
+
+ //////////////////////////////////
+ // 2. 'this' contract
+
+ // Just grab the target out of the vtable
+ *pImplSlot = GetRestoredSlot(slotNumber);
+
+ // Successfully determined the target for the given target
+ RETURN (TRUE);
+}
+
+//==========================================================================================
+DispatchSlot MethodTable::FindDispatchSlot(UINT32 typeID, UINT32 slotNumber)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ DispatchSlot implSlot(NULL);
+ FindDispatchImpl(typeID, slotNumber, &implSlot);
+ return implSlot;
+}
+
+//==========================================================================================
+DispatchSlot MethodTable::FindDispatchSlot(DispatchToken tok)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ return FindDispatchSlot(tok.GetTypeID(), tok.GetSlotNumber());
+}
+
+#ifndef DACCESS_COMPILE
+
+//==========================================================================================
+DispatchSlot MethodTable::FindDispatchSlotForInterfaceMD(MethodDesc *pMD)
+{
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(CheckPointer(pMD));
+ CONSISTENCY_CHECK(pMD->IsInterface());
+ return FindDispatchSlotForInterfaceMD(TypeHandle(pMD->GetMethodTable()), pMD);
+}
+
+//==========================================================================================
+DispatchSlot MethodTable::FindDispatchSlotForInterfaceMD(TypeHandle ownerType, MethodDesc *pMD)
+{
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(!ownerType.IsNull());
+ CONSISTENCY_CHECK(CheckPointer(pMD));
+ CONSISTENCY_CHECK(pMD->IsInterface());
+ return FindDispatchSlot(ownerType.GetMethodTable()->GetTypeID(), pMD->GetSlot());
+}
+
+//==========================================================================================
+// This is used for reverse methodimpl lookups by ComPlusMethodCall MDs.
+// This assumes the following:
+// The methodimpl is for an interfaceToken->slotNumber
+// There is ONLY ONE such mapping for this slot number
+// The mapping exists in this type, not a parent type.
+MethodDesc * MethodTable::ReverseInterfaceMDLookup(UINT32 slotNumber)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+ DispatchMap::Iterator it(this);
+ for (; it.IsValid(); it.Next())
+ {
+ if (it.Entry()->GetTargetSlotNumber() == slotNumber)
+ {
+ DispatchMapTypeID typeID = it.Entry()->GetTypeID();
+ _ASSERTE(!typeID.IsThisClass());
+ UINT32 slotNum = it.Entry()->GetSlotNumber();
+ MethodTable * pMTItf = LookupDispatchMapType(typeID);
+ CONSISTENCY_CHECK(CheckPointer(pMTItf));
+
+ MethodDesc *pCanonMD = pMTItf->GetMethodDescForSlot((DWORD)slotNum);
+ return MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pCanonMD,
+ pMTItf,
+ FALSE, // forceBoxedEntryPoint
+ Instantiation(), // methodInst
+ FALSE, // allowInstParam
+ TRUE); // forceRemotableMethod
+ }
+ }
+ return NULL;
+}
+
+//==========================================================================================
+UINT32 MethodTable::GetTypeID()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ PTR_MethodTable pMT = PTR_MethodTable(this);
+
+ return GetDomain()->GetTypeID(pMT);
+}
+
+//==========================================================================================
+UINT32 MethodTable::LookupTypeID()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ PTR_MethodTable pMT = PTR_MethodTable(this);
+
+ return GetDomain()->LookupTypeID(pMT);
+}
+
+//==========================================================================================
+BOOL MethodTable::ImplementsInterfaceWithSameSlotsAsParent(MethodTable *pItfMT, MethodTable *pParentMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(!IsInterface() && !pParentMT->IsInterface());
+ PRECONDITION(pItfMT->IsInterface());
+ } CONTRACTL_END;
+
+ MethodTable *pMT = this;
+ do
+ {
+ DispatchMap::EncodedMapIterator it(pMT);
+ for (; it.IsValid(); it.Next())
+ {
+ DispatchMapEntry *pCurEntry = it.Entry();
+ if (LookupDispatchMapType(pCurEntry->GetTypeID()) == pItfMT)
+ {
+ // this class and its parents up to pParentMT must have no mappings for the interface
+ return FALSE;
+ }
+ }
+
+ pMT = pMT->GetParentMethodTable();
+ _ASSERTE(pMT != NULL);
+ }
+ while (pMT != pParentMT);
+
+ return TRUE;
+}
+
+//==========================================================================================
+BOOL MethodTable::HasSameInterfaceImplementationAsParent(MethodTable *pItfMT, MethodTable *pParentMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(!IsInterface() && !pParentMT->IsInterface());
+ PRECONDITION(pItfMT->IsInterface());
+ } CONTRACTL_END;
+
+ if (!ImplementsInterfaceWithSameSlotsAsParent(pItfMT, pParentMT))
+ {
+ // if the slots are not same, this class reimplements the interface
+ return FALSE;
+ }
+
+ // The target slots are the same, but they can still be overriden. We'll iterate
+ // the dispatch map beginning with pParentMT up the hierarchy and for each pItfMT
+ // entry check the target slot contents (pParentMT vs. this class). A mismatch
+ // means that there is an override. We'll keep track of source (interface) slots
+ // we have seen so that we can ignore entries higher in the hierarchy that are no
+ // longer in effect at pParentMT level.
+ BitMask bitMask;
+
+ WORD wSeenSlots = 0;
+ WORD wTotalSlots = pItfMT->GetNumVtableSlots();
+
+ MethodTable *pMT = pParentMT;
+ do
+ {
+ DispatchMap::EncodedMapIterator it(pMT);
+ for (; it.IsValid(); it.Next())
+ {
+ DispatchMapEntry *pCurEntry = it.Entry();
+ if (LookupDispatchMapType(pCurEntry->GetTypeID()) == pItfMT)
+ {
+ UINT32 ifaceSlot = pCurEntry->GetSlotNumber();
+ if (!bitMask.TestBit(ifaceSlot))
+ {
+ bitMask.SetBit(ifaceSlot);
+
+ UINT32 targetSlot = pCurEntry->GetTargetSlotNumber();
+ if (GetRestoredSlot(targetSlot) != pParentMT->GetRestoredSlot(targetSlot))
+ {
+ // the target slot is overriden
+ return FALSE;
+ }
+
+ if (++wSeenSlots == wTotalSlots)
+ {
+ // we've resolved all slots, no reason to continue
+ break;
+ }
+ }
+ }
+ }
+ pMT = pMT->GetParentMethodTable();
+ }
+ while (pMT != NULL);
+
+ return TRUE;
+}
+
+#endif // !DACCESS_COMPILE
+
+//==========================================================================================
+MethodTable * MethodTable::LookupDispatchMapType(DispatchMapTypeID typeID)
+{
+ CONTRACTL {
+ WRAPPER(THROWS);
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ _ASSERTE(!typeID.IsThisClass());
+
+ InterfaceMapIterator intIt = IterateInterfaceMapFrom(typeID.GetInterfaceNum());
+ return intIt.GetInterface();
+}
+
+//==========================================================================================
+MethodDesc * MethodTable::GetIntroducingMethodDesc(DWORD slotNumber)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodDesc * pCurrentMD = GetMethodDescForSlot(slotNumber);
+ DWORD dwSlot = pCurrentMD->GetSlot();
+ MethodDesc * pIntroducingMD = NULL;
+
+ MethodTable * pParentType = GetParentMethodTable();
+ MethodTable * pPrevParentType = NULL;
+
+ // Find this method in the parent.
+ // If it does exist in the parent, it would be at the same vtable slot.
+ while ((pParentType != NULL) &&
+ (dwSlot < pParentType->GetNumVirtuals()))
+ {
+ pPrevParentType = pParentType;
+ pParentType = pParentType->GetParentMethodTable();
+ }
+
+ if (pPrevParentType != NULL)
+ {
+ pIntroducingMD = pPrevParentType->GetMethodDescForSlot(dwSlot);
+ }
+
+ return pIntroducingMD;
+}
+
+//==========================================================================================
+// There is a case where a method declared in a type can be explicitly
+// overridden by a methodImpl on another method within the same type. In
+// this case, we need to call the methodImpl target, and this will map
+// things appropriately for us.
+MethodDesc * MethodTable::MapMethodDeclToMethodImpl(MethodDesc * pMDDecl)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ MethodTable * pMT = pMDDecl->GetMethodTable();
+
+ //
+ // Fast negative case check
+ //
+
+ // If it's not virtual, then it could not have been methodImpl'd.
+ if (!pMDDecl->IsVirtual() ||
+ // Is it a non-virtual call to the instantiating stub
+ (pMT->IsValueType() && !pMDDecl->IsUnboxingStub()))
+ {
+ return pMDDecl;
+ }
+
+ MethodDesc * pMDImpl = pMT->GetParallelMethodDesc(pMDDecl);
+
+ // If the method is instantiated, then we need to resolve to the corresponding
+ // instantiated MD for the new slot number.
+ if (pMDDecl->HasMethodInstantiation())
+ {
+ if (pMDDecl->GetSlot() != pMDImpl->GetSlot())
+ {
+ if (!pMDDecl->IsGenericMethodDefinition())
+ {
+#ifndef DACCESS_COMPILE
+ pMDImpl = pMDDecl->FindOrCreateAssociatedMethodDesc(
+ pMDImpl,
+ pMT,
+ pMDDecl->IsUnboxingStub(),
+ pMDDecl->GetMethodInstantiation(),
+ pMDDecl->IsInstantiatingStub());
+#else
+ DacNotImpl();
+#endif
+ }
+ }
+ else
+ {
+ // Since the generic method definition is always in the actual
+ // slot for the method table, and since the slot numbers for
+ // the Decl and Impl MDs are the same, then the call to
+ // FindOrCreateAssociatedMethodDesc would just result in the
+ // same pMDDecl being returned. In this case, we can skip all
+ // the work.
+ pMDImpl = pMDDecl;
+ }
+ }
+
+ CONSISTENCY_CHECK(CheckPointer(pMDImpl));
+ CONSISTENCY_CHECK(!pMDImpl->IsGenericMethodDefinition());
+ return pMDImpl;
+} // MethodTable::MapMethodDeclToMethodImpl
+
+
+//==========================================================================================
+HRESULT MethodTable::GetGuidNoThrow(GUID *pGuid, BOOL bGenerateIfNotFound, BOOL bClassic /*= TRUE*/)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ GetGuid(pGuid, bGenerateIfNotFound, bClassic);
+ }
+ EX_CATCH_HRESULT(hr);
+
+ // ensure we return a failure hr when pGuid is not filled in
+ if (SUCCEEDED(hr) && (*pGuid == GUID_NULL))
+ hr = E_FAIL;
+
+ return hr;
+}
+
+//==========================================================================================
+// Returns the GUID of this MethodTable.
+// If metadata does not specify GUID for the type, GUID_NULL is returned (if bGenerateIfNotFound
+// is FALSE) or a GUID is auto-generated on the fly from the name and members of the type
+// (bGenerateIfNotFound is TRUE).
+//
+// Redirected WinRT types may have two GUIDs, the "classic" one which matches the return value
+// of Type.Guid, and the new one which is the GUID of the WinRT type to which it is redirected.
+// The bClassic parameter controls which one is returned from this method. Note that the parameter
+// is ignored for genuine WinRT types, i.e. types loaded from .winmd files, those always return
+// the new GUID.
+//
+void MethodTable::GetGuid(GUID *pGuid, BOOL bGenerateIfNotFound, BOOL bClassic /*=TRUE*/)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+
+#ifdef DACCESS_COMPILE
+
+ _ASSERTE(pGuid != NULL);
+ PTR_GuidInfo pGuidInfo = (bClassic ? GetClass()->GetGuidInfo() : GetGuidInfo());
+ if (pGuidInfo != NULL)
+ *pGuid = pGuidInfo->m_Guid;
+ else
+ *pGuid = GUID_NULL;
+
+#else // DACCESS_COMPILE
+
+ SIZE_T cchName = 0; // Length of the name (possibly after decoration).
+ SIZE_T cbCur; // Current offset.
+ LPCWSTR szName = NULL; // Name to turn to a guid.
+ CQuickArray<BYTE> rName; // Buffer to accumulate signatures.
+ BOOL bGenerated = FALSE; // A flag indicating if we generated the GUID from name.
+
+ _ASSERTE(pGuid != NULL);
+
+ // Use the per-EEClass GuidInfo if we are asked for the "classic" non-WinRT GUID of non-WinRT type
+ GuidInfo *pInfo = ((bClassic && !IsProjectedFromWinRT()) ? GetClass()->GetGuidInfo() : GetGuidInfo());
+
+ // First check to see if we have already cached the guid for this type.
+ // We currently only cache guids on interfaces and WinRT delegates.
+ // In classic mode, though, ensure we don't retrieve the GuidInfo for redirected interfaces
+ if ((IsInterface() || IsWinRTDelegate()) && pInfo != NULL
+ && (!bClassic || !SupportsGenericInterop(TypeHandle::Interop_NativeToManaged, modeRedirected)))
+ {
+ if (pInfo->m_bGeneratedFromName)
+ {
+ // If the GUID was generated from the name then only return it
+ // if bGenerateIfNotFound is set.
+ if (bGenerateIfNotFound)
+ *pGuid = pInfo->m_Guid;
+ else
+ *pGuid = GUID_NULL;
+ }
+ else
+ {
+ *pGuid = pInfo->m_Guid;
+ }
+ return;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if ((SupportsGenericInterop(TypeHandle::Interop_NativeToManaged, modeProjected))
+ || (!bClassic
+ && SupportsGenericInterop(TypeHandle::Interop_NativeToManaged, modeRedirected)
+ && IsLegalNonArrayWinRTType()))
+ {
+ // Closed generic WinRT interfaces/delegates have their GUID computed
+ // based on the "PIID" in metadata and the instantiation.
+ // Note that we explicitly do this computation for redirected mscorlib
+ // interfaces only if !bClassic, so typeof(Enumerable<T>).GUID
+ // for example still returns the same result as pre-v4.5 runtimes.
+ // ComputeGuidForGenericType() may throw for generics nested beyond 64 levels.
+ WinRTGuidGenerator::ComputeGuidForGenericType(this, pGuid);
+
+ // This GUID is per-instantiation so make sure that the cache
+ // where we are going to keep it is per-instantiation as well.
+ _ASSERTE(IsCanonicalMethodTable() || HasGuidInfo());
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ if (GetClass()->HasNoGuid())
+ {
+ *pGuid = GUID_NULL;
+ }
+ else
+ {
+ // If there is a GUID in the metadata then return that.
+ IfFailThrow(GetMDImport()->GetItemGuid(GetCl(), pGuid));
+
+ if (*pGuid == GUID_NULL)
+ {
+ // Remember that we didn't find the GUID, so we can skip looking during
+ // future checks. (Note that this is a very important optimization in the
+ // prejit case.)
+ g_IBCLogger.LogEEClassCOWTableAccess(this);
+ GetClass_NoLogging()->SetHasNoGuid();
+ }
+ }
+
+ if (*pGuid == GUID_NULL && bGenerateIfNotFound)
+ {
+ // For interfaces, concatenate the signatures of the methods and fields.
+ if (!IsNilToken(GetCl()) && IsInterface())
+ {
+ // Retrieve the stringized interface definition.
+ cbCur = GetStringizedItfDef(TypeHandle(this), rName);
+
+ // Pad up to a whole WCHAR.
+ if (cbCur % sizeof(WCHAR))
+ {
+ SIZE_T cbDelta = sizeof(WCHAR) - (cbCur % sizeof(WCHAR));
+ rName.ReSizeThrows(cbCur + cbDelta);
+ memset(rName.Ptr() + cbCur, 0, cbDelta);
+ cbCur += cbDelta;
+ }
+
+ // Point to the new buffer.
+ cchName = cbCur / sizeof(WCHAR);
+ szName = reinterpret_cast<LPWSTR>(rName.Ptr());
+ }
+ else
+ {
+ // Get the name of the class.
+ DefineFullyQualifiedNameForClassW();
+ szName = GetFullyQualifiedNameForClassNestedAwareW(this);
+ if (szName == NULL)
+ return;
+ cchName = wcslen(szName);
+
+ // Enlarge buffer for class name.
+ cbCur = cchName * sizeof(WCHAR);
+ rName.ReSizeThrows(cbCur + sizeof(WCHAR));
+ wcscpy_s(reinterpret_cast<LPWSTR>(rName.Ptr()), cchName + 1, szName);
+
+ // Add the assembly guid string to the class name.
+ ULONG cbCurOUT = (ULONG)cbCur;
+ IfFailThrow(GetStringizedTypeLibGuidForAssembly(GetAssembly(), rName, (ULONG)cbCur, &cbCurOUT));
+ cbCur = (SIZE_T) cbCurOUT;
+
+ // Pad to a whole WCHAR.
+ if (cbCur % sizeof(WCHAR))
+ {
+ rName.ReSizeThrows(cbCur + sizeof(WCHAR)-(cbCur%sizeof(WCHAR)));
+ while (cbCur % sizeof(WCHAR))
+ rName[cbCur++] = 0;
+ }
+
+ // Point to the new buffer.
+ szName = reinterpret_cast<LPWSTR>(rName.Ptr());
+ cchName = cbCur / sizeof(WCHAR);
+ // Dont' want to have to pad.
+ _ASSERTE((sizeof(GUID) % sizeof(WCHAR)) == 0);
+ }
+
+ // Generate guid from name.
+ CorGuidFromNameW(pGuid, szName, cchName);
+
+ // Remeber we generated the guid from the type name.
+ bGenerated = TRUE;
+ }
+
+ // Cache the guid in the type, if not already cached.
+ // We currently only do this for interfaces.
+ // Also, in classic mode do NOT cache GUID for redirected interfaces.
+ if ((IsInterface() || IsWinRTDelegate()) && (pInfo == NULL) && (*pGuid != GUID_NULL)
+#ifdef FEATURE_COMINTEROP
+ && !(bClassic
+ && SupportsGenericInterop(TypeHandle::Interop_NativeToManaged, modeRedirected)
+ && IsLegalNonArrayWinRTType())
+#endif // FEATURE_COMINTEROP
+ )
+ {
+ AllocMemTracker amTracker;
+ BOOL bStoreGuidInfoOnEEClass = false;
+ PTR_LoaderAllocator pLoaderAllocator;
+
+#if FEATURE_COMINTEROP
+ if ((bClassic && !IsProjectedFromWinRT()) || !HasGuidInfo())
+ {
+ bStoreGuidInfoOnEEClass = true;
+ }
+#else
+ // We will always store the GuidInfo on the methodTable.
+ bStoreGuidInfoOnEEClass = true;
+#endif
+ if(bStoreGuidInfoOnEEClass)
+ {
+ // Since the GUIDInfo will be stored on the EEClass,
+ // the memory should be allocated on the loaderAllocator of the class.
+ // The definining module and the loaded module could be different in some scenarios.
+ // For example - in case of shared generic instantiations
+ // a shared generic i.e. System.__Canon which would be loaded in shared domain
+ // but the this->GetLoaderAllocator will be the loader allocator for the definining
+ // module which can get unloaded anytime.
+ _ASSERTE(GetClass());
+ _ASSERTE(GetClass()->GetMethodTable());
+ pLoaderAllocator = GetClass()->GetMethodTable()->GetLoaderAllocator();
+ }
+ else
+ {
+ pLoaderAllocator = GetLoaderAllocator();
+ }
+
+ _ASSERTE(pLoaderAllocator);
+
+ // Allocate the guid information.
+ pInfo = (GuidInfo *)amTracker.Track(
+ pLoaderAllocator->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(GuidInfo))));
+ pInfo->m_Guid = *pGuid;
+ pInfo->m_bGeneratedFromName = bGenerated;
+
+ // Set in in the interface method table.
+ if (bClassic && !IsProjectedFromWinRT())
+ {
+ // Set the per-EEClass GuidInfo if we are asked for the "classic" non-WinRT GUID.
+ // The MethodTable may be NGENed and read-only - and there's no point in saving
+ // classic GUIDs in non-WinRT MethodTables anyway.
+ _ASSERTE(bStoreGuidInfoOnEEClass);
+ GetClass()->SetGuidInfo(pInfo);
+ }
+ else
+ {
+#if FEATURE_COMINTEROP
+ _ASSERTE(bStoreGuidInfoOnEEClass || HasGuidInfo());
+#else
+ _ASSERTE(bStoreGuidInfoOnEEClass);
+#endif
+ SetGuidInfo(pInfo);
+ }
+
+ amTracker.SuppressRelease();
+ }
+#endif // !DACCESS_COMPILE
+}
+
+
+//==========================================================================================
+MethodDesc* MethodTable::GetMethodDescForSlotAddress(PCODE addr, BOOL fSpeculative /*=FALSE*/)
+{
+ CONTRACT(MethodDesc *)
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ SO_TOLERANT;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_NOT_OK));
+ POSTCONDITION(RETVAL->m_pDebugMethodTable.IsNull() || // We must be in BuildMethdTableThrowing()
+ RETVAL->SanityCheck());
+ }
+ CONTRACT_END;
+
+ // If we see shared fcall implementation as an argument to this
+ // function, it means that a vtable slot for the shared fcall
+ // got backpatched when it shouldn't have. The reason we can't
+ // backpatch this method is that it is an FCall that has many
+ // MethodDescs for one implementation. If we backpatch delegate
+ // constructors, this function will not be able to recover the
+ // MethodDesc for the method.
+ //
+ _ASSERTE_IMPL(!ECall::IsSharedFCallImpl(addr) &&
+ "someone backpatched shared fcall implementation -- "
+ "see comment in code");
+
+ MethodDesc* pMethodDesc = ExecutionManager::GetCodeMethodDesc(addr);
+ if (NULL != pMethodDesc)
+ {
+ goto lExit;
+ }
+
+#ifdef FEATURE_INTERPRETER
+ // I don't really know why this helps. Figure it out.
+#ifndef DACCESS_COMPILE
+ // If we didn't find it above, try as an Interpretation stub...
+ pMethodDesc = Interpreter::InterpretationStubToMethodInfo(addr);
+
+ if (NULL != pMethodDesc)
+ {
+ goto lExit;
+ }
+#endif
+#endif // FEATURE_INTERPRETER
+
+ // Is it an FCALL?
+ pMethodDesc = ECall::MapTargetBackToMethod(addr);
+ if (pMethodDesc != 0)
+ {
+ goto lExit;
+ }
+
+ pMethodDesc = MethodDesc::GetMethodDescFromStubAddr(addr, fSpeculative);
+
+lExit:
+
+ RETURN(pMethodDesc);
+}
+
+//==========================================================================================
+/* static*/
+BOOL MethodTable::ComputeContainsGenericVariables(Instantiation inst)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ for (DWORD j = 0; j < inst.GetNumArgs(); j++)
+ {
+ if (inst[j].ContainsGenericVariables())
+ {
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+//==========================================================================================
+BOOL MethodTable::SanityCheck()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ // strings have component size2, all other non-arrays should have 0
+ _ASSERTE((GetComponentSize() <= 2) || IsArray());
+
+ if (m_pEEClass == NULL)
+ {
+ if (IsAsyncPinType())
+ {
+ return TRUE;
+ }
+ else
+ {
+ return FALSE;
+ }
+ }
+
+ EEClass * pClass = GetClass();
+ MethodTable * pCanonMT = pClass->GetMethodTable();
+
+ // Let's try to make sure we have a valid EEClass pointer.
+ if (pCanonMT == NULL)
+ return FALSE;
+
+ if (GetNumGenericArgs() != 0)
+ return (pCanonMT->GetClass() == pClass);
+ else
+ return (pCanonMT == this) || IsArray() || IsTransparentProxy();
+}
+
+//==========================================================================================
+
+// Structs containing GC pointers whose size is at most this are always stack-allocated.
+const unsigned MaxStructBytesForLocalVarRetBuffBytes = 2 * sizeof(void*); // 4 pointer-widths.
+
+BOOL MethodTable::IsStructRequiringStackAllocRetBuf()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // Disable this optimization. It has limited value (only kicks in on x86, and only for less common structs),
+ // causes bugs and introduces odd ABI differences not compatible with ReadyToRun.
+ return FALSE;
+
+#if 0
+
+#if defined(_WIN64)
+ // We have not yet updated the 64-bit JIT compiler to follow this directive, so there's
+ // no reason to stack-allocate the return buffers.
+ return FALSE;
+#elif defined(MDIL) || defined(_TARGET_ARM_)
+ // WPB 481466 RetBuf GC hole (When jitting on ARM32 CoreCLR.dll MDIL is not defined)
+ //
+ // This optimization causes versioning problems for MDIL which we haven't addressed yet
+ return FALSE;
+#else
+ return IsValueType()
+ && ContainsPointers()
+ && GetNumInstanceFieldBytes() <= MaxStructBytesForLocalVarRetBuffBytes;
+#endif
+
+#endif
+}
+
+//==========================================================================================
+unsigned MethodTable::GetTypeDefRid()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ g_IBCLogger.LogMethodTableAccess(this);
+ return GetTypeDefRid_NoLogging();
+}
+
+//==========================================================================================
+unsigned MethodTable::GetTypeDefRid_NoLogging()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ WORD token = m_wToken;
+
+ if (token == METHODTABLE_TOKEN_OVERFLOW)
+ return (unsigned)*GetTokenOverflowPtr();
+
+ return token;
+}
+
+//==========================================================================================
+void MethodTable::SetCl(mdTypeDef token)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ unsigned rid = RidFromToken(token);
+ if (rid >= METHODTABLE_TOKEN_OVERFLOW)
+ {
+ m_wToken = METHODTABLE_TOKEN_OVERFLOW;
+ *GetTokenOverflowPtr() = rid;
+ }
+ else
+ {
+ _ASSERTE(FitsIn<U2>(rid));
+ m_wToken = (WORD)rid;
+ }
+
+ _ASSERTE(GetCl() == token);
+}
+
+//==========================================================================================
+MethodDesc * MethodTable::GetClassConstructor()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ return GetMethodDescForSlot(GetClassConstructorSlot());
+}
+
+//==========================================================================================
+DWORD MethodTable::HasFixedAddressVTStatics()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return GetClass()->HasFixedAddressVTStatics();
+}
+
+//==========================================================================================
+WORD MethodTable::GetNumHandleRegularStatics()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return GetClass()->GetNumHandleRegularStatics();
+}
+
+//==========================================================================================
+WORD MethodTable::GetNumBoxedRegularStatics()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return GetClass()->GetNumBoxedRegularStatics();
+}
+
+//==========================================================================================
+WORD MethodTable::GetNumBoxedThreadStatics ()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return GetClass()->GetNumBoxedThreadStatics();
+}
+
+//==========================================================================================
+ClassCtorInfoEntry* MethodTable::GetClassCtorInfoIfExists()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (!IsZapped())
+ return NULL;
+
+ g_IBCLogger.LogCCtorInfoReadAccess(this);
+
+ if (HasBoxedRegularStatics())
+ {
+ ModuleCtorInfo *pModuleCtorInfo = GetZapModule()->GetZapModuleCtorInfo();
+ DPTR(PTR_MethodTable) ppMT = pModuleCtorInfo->ppMT;
+ PTR_DWORD hotHashOffsets = pModuleCtorInfo->hotHashOffsets;
+ PTR_DWORD coldHashOffsets = pModuleCtorInfo->coldHashOffsets;
+
+ if (pModuleCtorInfo->numHotHashes)
+ {
+ DWORD hash = pModuleCtorInfo->GenerateHash(PTR_MethodTable(this), ModuleCtorInfo::HOT);
+ _ASSERTE(hash < pModuleCtorInfo->numHotHashes);
+
+ for (DWORD i = hotHashOffsets[hash]; i != hotHashOffsets[hash + 1]; i++)
+ {
+ _ASSERTE(ppMT[i]);
+ if (dac_cast<TADDR>(ppMT[i]) == dac_cast<TADDR>(this))
+ {
+ return pModuleCtorInfo->cctorInfoHot + i;
+ }
+ }
+ }
+
+ if (pModuleCtorInfo->numColdHashes)
+ {
+ DWORD hash = pModuleCtorInfo->GenerateHash(PTR_MethodTable(this), ModuleCtorInfo::COLD);
+ _ASSERTE(hash < pModuleCtorInfo->numColdHashes);
+
+ for (DWORD i = coldHashOffsets[hash]; i != coldHashOffsets[hash + 1]; i++)
+ {
+ _ASSERTE(ppMT[i]);
+ if (dac_cast<TADDR>(ppMT[i]) == dac_cast<TADDR>(this))
+ {
+ return pModuleCtorInfo->cctorInfoCold + (i - pModuleCtorInfo->numElementsHot);
+ }
+ }
+ }
+ }
+
+ return NULL;
+}
+
+#ifdef _DEBUG
+//==========================================================================================
+// Returns true if pointer to the parent method table has been initialized/restored already.
+BOOL MethodTable::IsParentMethodTablePointerValid()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ // workaround: Type loader accesses partially initialized datastructures that interferes with IBC logging.
+ // Once type loader is fixed to do not access partially initialized datastructures, this can go away.
+ if (!GetWriteableData_NoLogging()->IsParentMethodTablePointerValid())
+ return FALSE;
+
+ if (!GetFlag(enum_flag_HasIndirectParent))
+ {
+ return TRUE;
+ }
+ TADDR pMT;
+ pMT = *PTR_TADDR(m_pParentMethodTable + offsetof(MethodTable, m_pParentMethodTable));
+ return !CORCOMPILE_IS_POINTER_TAGGED(pMT);
+}
+#endif
+
+
+//---------------------------------------------------------------------------------------
+//
+// Ascends the parent class chain of "this", until a MethodTable is found whose typeDef
+// matches that of the specified pWhichParent. Why is this useful? See
+// code:MethodTable::GetInstantiationOfParentClass below and
+// code:Generics::GetExactInstantiationsOfMethodAndItsClassFromCallInformation for use
+// cases.
+//
+// Arguments:
+// pWhichParent - MethodTable whose typeDef we're trying to match as we go up
+// "this"'s parent chain.
+//
+// Return Value:
+// If a matching parent MethodTable is found, it is returned. Else, NULL is
+// returned.
+//
+
+MethodTable * MethodTable::GetMethodTableMatchingParentClass(MethodTable * pWhichParent)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pWhichParent));
+ PRECONDITION(IsRestored_NoLogging());
+ PRECONDITION(pWhichParent->IsRestored_NoLogging());
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ MethodTable *pMethodTableSearch = this;
+
+#ifdef DACCESS_COMPILE
+ unsigned parentCount = 0;
+ MethodTable *pOldMethodTable = NULL;
+#endif // DACCESS_COMPILE
+
+ while (pMethodTableSearch != NULL)
+ {
+#ifdef DACCESS_COMPILE
+ if (pMethodTableSearch == pOldMethodTable ||
+ parentCount > 1000)
+ {
+ break;
+ }
+ pOldMethodTable = pMethodTableSearch;
+ parentCount++;
+#endif // DACCESS_COMPILE
+
+ if (pMethodTableSearch->HasSameTypeDefAs(pWhichParent))
+ {
+ return pMethodTableSearch;
+ }
+
+ pMethodTableSearch = pMethodTableSearch->GetParentMethodTable();
+ }
+
+ return NULL;
+}
+
+
+//==========================================================================================
+// Given D<T> : C<List<T>> and a type handle D<string> we sometimes
+// need to find the corresponding type handle
+// C<List<string>> (C may also be some type
+// further up the inheritance hierarchy). GetInstantiationOfParentClass
+// helps us do this by getting the corresponding instantiation of C, i.e.
+// <List<string>>.
+//
+// pWhichParent: this is used identify which parent type we're interested in.
+// It must be a canonical EEClass, e.g. for C<ref>. This is used as a token for
+// C<List<T>>. This method can also be called with the minimal methodtable used
+// for dynamic methods. In that case, we need to return an empty instantiation.
+//
+// Note this only works for parent classes, not parent interfaces.
+Instantiation MethodTable::GetInstantiationOfParentClass(MethodTable *pWhichParent)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pWhichParent));
+ PRECONDITION(IsRestored_NoLogging());
+ PRECONDITION(pWhichParent->IsRestored_NoLogging());
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+
+ MethodTable * pMatchingParent = GetMethodTableMatchingParentClass(pWhichParent);
+ if (pMatchingParent != NULL)
+ {
+ return pMatchingParent->GetInstantiation();
+ }
+
+ // The parameter should always be a parent class or the dynamic method
+ // class. Since there is no bit on the dynamicclass methodtable to indicate
+ // that it is the dynamic method methodtable, we simply check the debug name
+ // This is good enough for an assert.
+ _ASSERTE(strcmp(pWhichParent->GetDebugClassName(), "dynamicClass") == 0);
+ return Instantiation();
+}
+
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_COMINTEROP
+
+//
+// This is for COM Interop backwards compatibility
+//
+
+//==========================================================================================
+// Returns the data pointer if present, NULL otherwise
+InteropMethodTableData *MethodTable::LookupComInteropData()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetDomain()->LookupComInteropData(this);
+}
+
+//==========================================================================================
+// Returns TRUE if successfully inserted, FALSE if this would be a duplicate entry
+BOOL MethodTable::InsertComInteropData(InteropMethodTableData *pData)
+{
+ WRAPPER_NO_CONTRACT;
+ return GetDomain()->InsertComInteropData(this, pData);
+}
+
+//==========================================================================================
+InteropMethodTableData *MethodTable::CreateComInteropData(AllocMemTracker *pamTracker)
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+ PRECONDITION(GetParentMethodTable() == NULL || GetParentMethodTable()->LookupComInteropData() != NULL);
+ } CONTRACTL_END;
+
+ ClassCompat::MethodTableBuilder builder(this);
+
+ InteropMethodTableData *pData = builder.BuildInteropVTable(pamTracker);
+ _ASSERTE(pData);
+ return (pData);
+}
+
+//==========================================================================================
+InteropMethodTableData *MethodTable::GetComInteropData()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ InteropMethodTableData *pData = LookupComInteropData();
+
+ if (!pData)
+ {
+ GCX_PREEMP();
+
+ // Make sure that the parent's interop data has been created
+ MethodTable *pParentMT = GetParentMethodTable();
+ if (pParentMT)
+ pParentMT->GetComInteropData();
+
+ AllocMemTracker amTracker;
+
+ pData = CreateComInteropData(&amTracker);
+ if (InsertComInteropData(pData))
+ {
+ amTracker.SuppressRelease();
+ }
+ else
+ {
+ pData = LookupComInteropData();
+ }
+ }
+
+ _ASSERTE(pData);
+ return (pData);
+}
+
+#endif // FEATURE_COMINTEROP
+
+//==========================================================================================
+ULONG MethodTable::MethodData::Release()
+{
+ LIMITED_METHOD_CONTRACT;
+ //@TODO: Must adjust this to use an alternate allocator so that we don't
+ //@TODO: potentially cause deadlocks on the debug thread.
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+ ULONG cRef = (ULONG) InterlockedDecrement((LONG*)&m_cRef);
+ if (cRef == 0) {
+ delete this;
+ }
+ return (cRef);
+}
+
+//==========================================================================================
+void
+MethodTable::MethodData::ProcessMap(
+ const DispatchMapTypeID * rgTypeIDs,
+ UINT32 cTypeIDs,
+ MethodTable * pMT,
+ UINT32 iCurrentChainDepth,
+ MethodDataEntry * rgWorkingData)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ for (DispatchMap::EncodedMapIterator it(pMT); it.IsValid(); it.Next())
+ {
+ for (UINT32 nTypeIDIndex = 0; nTypeIDIndex < cTypeIDs; nTypeIDIndex++)
+ {
+ if (it.Entry()->GetTypeID() == rgTypeIDs[nTypeIDIndex])
+ {
+ UINT32 curSlot = it.Entry()->GetSlotNumber();
+ // If we're processing an interface, or it's for a virtual, or it's for a non-virtual
+ // for the most derived type, we want to process the entry. In other words, we
+ // want to ignore non-virtuals for parent classes.
+ if ((curSlot < pMT->GetNumVirtuals()) || (iCurrentChainDepth == 0))
+ {
+ MethodDataEntry * pCurEntry = &rgWorkingData[curSlot];
+ if (!pCurEntry->IsDeclInit() && !pCurEntry->IsImplInit())
+ {
+ pCurEntry->SetImplData(it.Entry()->GetTargetSlotNumber());
+ }
+ }
+ }
+ }
+ }
+} // MethodTable::MethodData::ProcessMap
+
+//==========================================================================================
+UINT32 MethodTable::MethodDataObject::GetObjectSize(MethodTable *pMT)
+{
+ WRAPPER_NO_CONTRACT;
+ UINT32 cb = sizeof(MethodTable::MethodDataObject);
+ cb += pMT->GetCanonicalMethodTable()->GetNumMethods() * sizeof(MethodDataObjectEntry);
+ return cb;
+}
+
+//==========================================================================================
+// This will fill in all the MethodEntry slots present in the current MethodTable
+void MethodTable::MethodDataObject::Init(MethodTable *pMT, MethodData *pParentData)
+{
+ CONTRACTL {
+ THROWS;
+ WRAPPER(GC_TRIGGERS);
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(CheckPointer(pParentData, NULL_OK));
+ PRECONDITION(!pMT->IsInterface());
+ PRECONDITION(pParentData == NULL ||
+ (pMT->ParentEquals(pParentData->GetDeclMethodTable()) &&
+ pMT->ParentEquals(pParentData->GetImplMethodTable())));
+ } CONTRACTL_END;
+
+ m_pMT = pMT;
+ m_iNextChainDepth = 0;
+ m_containsMethodImpl = FALSE;
+
+ ZeroMemory(GetEntryData(), sizeof(MethodDataObjectEntry) * GetNumMethods());
+} // MethodTable::MethodDataObject::Init
+
+//==========================================================================================
+BOOL MethodTable::MethodDataObject::PopulateNextLevel()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Get the chain depth to next decode.
+ UINT32 iChainDepth = GetNextChainDepth();
+
+ // If the chain depth is MAX_CHAIN_DEPTH, then we've already parsed every parent.
+ if (iChainDepth == MAX_CHAIN_DEPTH) {
+ return FALSE;
+ }
+ // Now move up the chain to the target.
+ MethodTable *pMTCur = m_pMT;
+ for (UINT32 i = 0; pMTCur != NULL && i < iChainDepth; i++) {
+ pMTCur = pMTCur->GetParentMethodTable();
+ }
+
+ // If we reached the end, then we're done.
+ if (pMTCur == NULL) {
+ SetNextChainDepth(MAX_CHAIN_DEPTH);
+ return FALSE;
+ }
+
+ FillEntryDataForAncestor(pMTCur);
+
+ SetNextChainDepth(iChainDepth + 1);
+
+ return TRUE;
+} // MethodTable::MethodDataObject::PopulateNextLevel
+
+//==========================================================================================
+void MethodTable::MethodDataObject::FillEntryDataForAncestor(MethodTable * pMT)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Since we traverse ancestors from lowest in the inheritance hierarchy
+ // to highest, the first method we come across for a slot is normally
+ // both the declaring and implementing method desc.
+ //
+ // However if this slot is the target of a methodImpl, pMD is not
+ // necessarily either. Rather than track this on a per-slot basis,
+ // we conservatively avoid filling out virtual methods once we
+ // have found that this inheritance chain contains a methodImpl.
+ //
+ // Note that there may be a methodImpl higher in the inheritance chain
+ // that we have not seen yet, and so we will fill out virtual methods
+ // until we reach that level. We are safe doing that because the slots
+ // we fill have been introduced/overridden by a subclass and so take
+ // precedence over any inherited methodImpl.
+
+ // Before we fill the entry data, find if the current ancestor has any methodImpls
+
+ if (pMT->GetClass()->ContainsMethodImpls())
+ m_containsMethodImpl = TRUE;
+
+ if (m_containsMethodImpl && pMT != m_pMT)
+ return;
+
+ unsigned nVirtuals = pMT->GetNumVirtuals();
+
+ MethodTable::IntroducedMethodIterator it(pMT, FALSE);
+ for (; it.IsValid(); it.Next())
+ {
+ MethodDesc * pMD = it.GetMethodDesc();
+ g_IBCLogger.LogMethodDescAccess(pMD);
+
+ unsigned slot = pMD->GetSlot();
+ if (slot == MethodTable::NO_SLOT)
+ continue;
+
+ // We want to fill all methods introduced by the actual type we're gathering
+ // data for, and the virtual methods of the parent and above
+ if (pMT == m_pMT)
+ {
+ if (m_containsMethodImpl && slot < nVirtuals)
+ continue;
+ }
+ else
+ {
+ if (slot >= nVirtuals)
+ continue;
+ }
+
+ MethodDataObjectEntry * pEntry = GetEntry(slot);
+
+ if (pEntry->GetDeclMethodDesc() == NULL)
+ {
+ pEntry->SetDeclMethodDesc(pMD);
+ }
+
+ if (pEntry->GetImplMethodDesc() == NULL)
+ {
+ pEntry->SetImplMethodDesc(pMD);
+ }
+ }
+} // MethodTable::MethodDataObject::FillEntryDataForAncestor
+
+//==========================================================================================
+MethodDesc * MethodTable::MethodDataObject::GetDeclMethodDesc(UINT32 slotNumber)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(slotNumber < GetNumMethods());
+
+ MethodDataObjectEntry * pEntry = GetEntry(slotNumber);
+
+ // Fill the entries one level of inheritance at a time,
+ // stopping when we have filled the MD we are looking for.
+ while (!pEntry->GetDeclMethodDesc() && PopulateNextLevel());
+
+ MethodDesc * pMDRet = pEntry->GetDeclMethodDesc();
+ if (pMDRet == NULL)
+ {
+ pMDRet = GetImplMethodDesc(slotNumber)->GetDeclMethodDesc(slotNumber);
+ _ASSERTE(CheckPointer(pMDRet));
+ pEntry->SetDeclMethodDesc(pMDRet);
+ }
+ else
+ {
+ _ASSERTE(pMDRet == GetImplMethodDesc(slotNumber)->GetDeclMethodDesc(slotNumber));
+ }
+ return pMDRet;
+}
+
+//==========================================================================================
+DispatchSlot MethodTable::MethodDataObject::GetImplSlot(UINT32 slotNumber)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(slotNumber < GetNumMethods());
+ return DispatchSlot(m_pMT->GetRestoredSlot(slotNumber));
+}
+
+//==========================================================================================
+UINT32 MethodTable::MethodDataObject::GetImplSlotNumber(UINT32 slotNumber)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(slotNumber < GetNumMethods());
+ return slotNumber;
+}
+
+//==========================================================================================
+MethodDesc *MethodTable::MethodDataObject::GetImplMethodDesc(UINT32 slotNumber)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(slotNumber < GetNumMethods());
+ MethodDataObjectEntry *pEntry = GetEntry(slotNumber);
+
+ // Fill the entries one level of inheritance at a time,
+ // stopping when we have filled the MD we are looking for.
+ while (!pEntry->GetImplMethodDesc() && PopulateNextLevel());
+
+ MethodDesc *pMDRet = pEntry->GetImplMethodDesc();
+
+ if (pMDRet == NULL)
+ {
+ _ASSERTE(slotNumber < GetNumVirtuals());
+ pMDRet = m_pMT->GetMethodDescForSlot(slotNumber);
+ _ASSERTE(CheckPointer(pMDRet));
+ pEntry->SetImplMethodDesc(pMDRet);
+ }
+ else
+ {
+ _ASSERTE(slotNumber >= GetNumVirtuals() || pMDRet == m_pMT->GetMethodDescForSlot(slotNumber));
+ }
+
+ return pMDRet;
+}
+
+//==========================================================================================
+void MethodTable::MethodDataObject::InvalidateCachedVirtualSlot(UINT32 slotNumber)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(slotNumber < GetNumVirtuals());
+
+ MethodDataObjectEntry *pEntry = GetEntry(slotNumber);
+ pEntry->SetImplMethodDesc(NULL);
+}
+
+//==========================================================================================
+MethodDesc *MethodTable::MethodDataInterface::GetDeclMethodDesc(UINT32 slotNumber)
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pMT->GetMethodDescForSlot(slotNumber);
+}
+
+//==========================================================================================
+MethodDesc *MethodTable::MethodDataInterface::GetImplMethodDesc(UINT32 slotNumber)
+{
+ WRAPPER_NO_CONTRACT;
+ return MethodTable::MethodDataInterface::GetDeclMethodDesc(slotNumber);
+}
+
+//==========================================================================================
+void MethodTable::MethodDataInterface::InvalidateCachedVirtualSlot(UINT32 slotNumber)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // MethodDataInterface does not store any cached MethodDesc values
+ return;
+}
+
+//==========================================================================================
+UINT32 MethodTable::MethodDataInterfaceImpl::GetObjectSize(MethodTable *pMTDecl)
+{
+ WRAPPER_NO_CONTRACT;
+ UINT32 cb = sizeof(MethodDataInterfaceImpl);
+ cb += pMTDecl->GetNumMethods() * sizeof(MethodDataEntry);
+ return cb;
+}
+
+//==========================================================================================
+// This will fill in all the MethodEntry slots present in the current MethodTable
+void
+MethodTable::MethodDataInterfaceImpl::Init(
+ const DispatchMapTypeID * rgDeclTypeIDs,
+ UINT32 cDeclTypeIDs,
+ MethodData * pDecl,
+ MethodData * pImpl)
+{
+ CONTRACTL {
+ THROWS;
+ WRAPPER(GC_TRIGGERS);
+ PRECONDITION(CheckPointer(pDecl));
+ PRECONDITION(CheckPointer(pImpl));
+ PRECONDITION(pDecl->GetDeclMethodTable()->IsInterface());
+ PRECONDITION(!pImpl->GetDeclMethodTable()->IsInterface());
+ PRECONDITION(pDecl->GetDeclMethodTable() == pDecl->GetImplMethodTable());
+ PRECONDITION(pImpl->GetDeclMethodTable() == pImpl->GetImplMethodTable());
+ PRECONDITION(pDecl != pImpl);
+ } CONTRACTL_END;
+
+ // Store and AddRef the decl and impl data.
+ m_pDecl = pDecl;
+ m_pDecl->AddRef();
+ m_pImpl = pImpl;
+ m_pImpl->AddRef();
+
+ m_iNextChainDepth = 0;
+ // Need side effects of the calls, but not the result.
+ /* MethodTable *pDeclMT = */ pDecl->GetDeclMethodTable();
+ /* MethodTable *pImplMT = */ pImpl->GetImplMethodTable();
+ m_rgDeclTypeIDs = rgDeclTypeIDs;
+ m_cDeclTypeIDs = cDeclTypeIDs;
+
+ // Initialize each entry.
+ for (UINT32 i = 0; i < GetNumMethods(); i++) {
+ // Initialize the entry
+ GetEntry(i)->Init();
+ }
+} // MethodTable::MethodDataInterfaceImpl::Init
+
+//==========================================================================================
+MethodTable::MethodDataInterfaceImpl::MethodDataInterfaceImpl(
+ const DispatchMapTypeID * rgDeclTypeIDs,
+ UINT32 cDeclTypeIDs,
+ MethodData * pDecl,
+ MethodData * pImpl)
+{
+ WRAPPER_NO_CONTRACT;
+ Init(rgDeclTypeIDs, cDeclTypeIDs, pDecl, pImpl);
+}
+
+//==========================================================================================
+MethodTable::MethodDataInterfaceImpl::~MethodDataInterfaceImpl()
+{
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(CheckPointer(m_pDecl));
+ CONSISTENCY_CHECK(CheckPointer(m_pImpl));
+ m_pDecl->Release();
+ m_pImpl->Release();
+}
+
+//==========================================================================================
+BOOL
+MethodTable::MethodDataInterfaceImpl::PopulateNextLevel()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Get the chain depth to next decode.
+ UINT32 iChainDepth = GetNextChainDepth();
+
+ // If the chain depth is MAX_CHAIN_DEPTH, then we've already parsed every parent.
+ if (iChainDepth == MAX_CHAIN_DEPTH) {
+ return FALSE;
+ }
+
+ // Now move up the chain to the target.
+ MethodTable *pMTCur = m_pImpl->GetImplMethodTable();
+ for (UINT32 i = 0; pMTCur != NULL && i < iChainDepth; i++) {
+ pMTCur = pMTCur->GetParentMethodTable();
+ }
+
+ // If we reached the end, then we're done.
+ if (pMTCur == NULL) {
+ SetNextChainDepth(MAX_CHAIN_DEPTH);
+ return FALSE;
+ }
+
+ if (m_cDeclTypeIDs != 0)
+ { // We got the TypeIDs from TypeLoader, use them
+ ProcessMap(m_rgDeclTypeIDs, m_cDeclTypeIDs, pMTCur, iChainDepth, GetEntryData());
+ }
+ else
+ { // We should decode all interface duplicates of code:m_pDecl
+ MethodTable * pDeclMT = m_pDecl->GetImplMethodTable();
+ INDEBUG(BOOL dbg_fInterfaceFound = FALSE);
+
+ // Call code:ProcessMap for every (duplicate) occurence of interface code:pDeclMT in the interface
+ // map of code:m_pImpl
+ MethodTable::InterfaceMapIterator it = m_pImpl->GetImplMethodTable()->IterateInterfaceMap();
+ while (it.Next())
+ {
+ if (pDeclMT == it.GetInterface())
+ { // We found the interface
+ INDEBUG(dbg_fInterfaceFound = TRUE);
+ DispatchMapTypeID declTypeID = DispatchMapTypeID::InterfaceClassID(it.GetIndex());
+
+ ProcessMap(&declTypeID, 1, pMTCur, iChainDepth, GetEntryData());
+ }
+ }
+ // The interface code:m_Decl should be found at least once in the interface map of code:m_pImpl,
+ // otherwise someone passed wrong information
+ _ASSERTE(dbg_fInterfaceFound);
+ }
+
+ SetNextChainDepth(iChainDepth + 1);
+
+ return TRUE;
+} // MethodTable::MethodDataInterfaceImpl::PopulateNextLevel
+
+//==========================================================================================
+UINT32 MethodTable::MethodDataInterfaceImpl::MapToImplSlotNumber(UINT32 slotNumber)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(slotNumber < GetNumMethods());
+
+ MethodDataEntry *pEntry = GetEntry(slotNumber);
+ while (!pEntry->IsImplInit() && PopulateNextLevel()) {}
+ if (pEntry->IsImplInit()) {
+ return pEntry->GetImplSlotNum();
+ }
+ else {
+ return INVALID_SLOT_NUMBER;
+ }
+}
+
+//==========================================================================================
+DispatchSlot MethodTable::MethodDataInterfaceImpl::GetImplSlot(UINT32 slotNumber)
+{
+ WRAPPER_NO_CONTRACT;
+ UINT32 implSlotNumber = MapToImplSlotNumber(slotNumber);
+ if (implSlotNumber == INVALID_SLOT_NUMBER) {
+ return DispatchSlot(NULL);
+ }
+ return m_pImpl->GetImplSlot(implSlotNumber);
+}
+
+//==========================================================================================
+UINT32 MethodTable::MethodDataInterfaceImpl::GetImplSlotNumber(UINT32 slotNumber)
+{
+ WRAPPER_NO_CONTRACT;
+ return MapToImplSlotNumber(slotNumber);
+}
+
+//==========================================================================================
+MethodDesc *MethodTable::MethodDataInterfaceImpl::GetImplMethodDesc(UINT32 slotNumber)
+{
+ WRAPPER_NO_CONTRACT;
+ UINT32 implSlotNumber = MapToImplSlotNumber(slotNumber);
+ if (implSlotNumber == INVALID_SLOT_NUMBER) {
+ return NULL;
+ }
+ return m_pImpl->GetImplMethodDesc(MapToImplSlotNumber(slotNumber));
+}
+
+//==========================================================================================
+void MethodTable::MethodDataInterfaceImpl::InvalidateCachedVirtualSlot(UINT32 slotNumber)
+{
+ WRAPPER_NO_CONTRACT;
+ UINT32 implSlotNumber = MapToImplSlotNumber(slotNumber);
+ if (implSlotNumber == INVALID_SLOT_NUMBER) {
+ return;
+ }
+ return m_pImpl->InvalidateCachedVirtualSlot(MapToImplSlotNumber(slotNumber));
+}
+
+//==========================================================================================
+void MethodTable::CheckInitMethodDataCache()
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+ if (s_pMethodDataCache == NULL)
+ {
+ UINT32 cb = MethodDataCache::GetObjectSize(8);
+ NewHolder<BYTE> hb(new BYTE[cb]);
+ MethodDataCache *pCache = new (hb.GetValue()) MethodDataCache(8);
+ if (InterlockedCompareExchangeT(
+ &s_pMethodDataCache, pCache, NULL) == NULL)
+ {
+ hb.SuppressRelease();
+ }
+ // If somebody beat us, return and allow the holders to take care of cleanup.
+ else
+ {
+ return;
+ }
+ }
+}
+
+//==========================================================================================
+void MethodTable::ClearMethodDataCache()
+{
+ LIMITED_METHOD_CONTRACT;
+ if (s_pMethodDataCache != NULL) {
+ s_pMethodDataCache->Clear();
+ }
+}
+
+//==========================================================================================
+MethodTable::MethodData *MethodTable::FindMethodDataHelper(MethodTable *pMTDecl, MethodTable *pMTImpl)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CONSISTENCY_CHECK(s_fUseMethodDataCache);
+ } CONTRACTL_END;
+
+ return s_pMethodDataCache->Find(pMTDecl, pMTImpl);
+}
+
+//==========================================================================================
+MethodTable::MethodData *MethodTable::FindParentMethodDataHelper(MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ MethodData *pData = NULL;
+ if (s_fUseMethodDataCache && s_fUseParentMethodData) {
+ if (!pMT->IsInterface()) {
+ //@todo : this won't be correct for non-shared code
+ MethodTable *pMTParent = pMT->GetParentMethodTable();
+ if (pMTParent != NULL) {
+ pData = FindMethodDataHelper(pMTParent, pMTParent);
+ }
+ }
+ }
+ return pData;
+}
+
+//==========================================================================================
+// This method does not cache the resulting MethodData object in the global MethodDataCache.
+// The TypeIDs (rgDeclTypeIDs with cDeclTypeIDs items) have to be sorted.
+MethodTable::MethodData *
+MethodTable::GetMethodDataHelper(
+ const DispatchMapTypeID * rgDeclTypeIDs,
+ UINT32 cDeclTypeIDs,
+ MethodTable * pMTDecl,
+ MethodTable * pMTImpl)
+{
+ CONTRACTL {
+ THROWS;
+ WRAPPER(GC_TRIGGERS);
+ PRECONDITION(CheckPointer(pMTDecl));
+ PRECONDITION(CheckPointer(pMTImpl));
+ } CONTRACTL_END;
+
+ //@TODO: Must adjust this to use an alternate allocator so that we don't
+ //@TODO: potentially cause deadlocks on the debug thread.
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+
+ CONSISTENCY_CHECK(pMTDecl->IsInterface() && !pMTImpl->IsInterface());
+
+#ifdef _DEBUG
+ // Check that rgDeclTypeIDs are sorted, are valid interface indexes and reference only pMTDecl interface
+ {
+ InterfaceInfo_t * rgImplInterfaceMap = pMTImpl->GetInterfaceMap();
+ UINT32 cImplInterfaceMap = pMTImpl->GetNumInterfaces();
+ // Verify that all types referenced by code:rgDeclTypeIDs are code:pMTDecl (declared interface)
+ for (UINT32 nDeclTypeIDIndex = 0; nDeclTypeIDIndex < cDeclTypeIDs; nDeclTypeIDIndex++)
+ {
+ if (nDeclTypeIDIndex > 0)
+ { // Verify that interface indexes are sorted
+ _ASSERTE(rgDeclTypeIDs[nDeclTypeIDIndex - 1].GetInterfaceNum() < rgDeclTypeIDs[nDeclTypeIDIndex].GetInterfaceNum());
+ }
+ UINT32 nInterfaceIndex = rgDeclTypeIDs[nDeclTypeIDIndex].GetInterfaceNum();
+ _ASSERTE(nInterfaceIndex <= cImplInterfaceMap);
+ {
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOAD_APPROXPARENTS);
+ _ASSERTE(rgImplInterfaceMap[nInterfaceIndex].GetApproxMethodTable(pMTImpl->GetLoaderModule())->HasSameTypeDefAs(pMTDecl));
+ }
+ }
+ }
+#endif //_DEBUG
+
+ // Can't cache, since this is a custom method used in BuildMethodTable
+ MethodDataWrapper hDecl(GetMethodData(pMTDecl, FALSE));
+ MethodDataWrapper hImpl(GetMethodData(pMTImpl, FALSE));
+
+ UINT32 cb = MethodDataInterfaceImpl::GetObjectSize(pMTDecl);
+ NewHolder<BYTE> pb(new BYTE[cb]);
+ MethodDataInterfaceImpl * pData = new (pb.GetValue()) MethodDataInterfaceImpl(rgDeclTypeIDs, cDeclTypeIDs, hDecl, hImpl);
+ pb.SuppressRelease();
+
+ return pData;
+} // MethodTable::GetMethodDataHelper
+
+//==========================================================================================
+// The fCanCache argument determines if the resulting MethodData object can
+// be added to the global MethodDataCache. This is used when requesting a
+// MethodData object for a type currently being built.
+MethodTable::MethodData *MethodTable::GetMethodDataHelper(MethodTable *pMTDecl,
+ MethodTable *pMTImpl,
+ BOOL fCanCache)
+{
+ CONTRACTL {
+ THROWS;
+ WRAPPER(GC_TRIGGERS);
+ PRECONDITION(CheckPointer(pMTDecl));
+ PRECONDITION(CheckPointer(pMTImpl));
+ PRECONDITION(pMTDecl == pMTImpl ||
+ (pMTDecl->IsInterface() && !pMTImpl->IsInterface()));
+ } CONTRACTL_END;
+
+ //@TODO: Must adjust this to use an alternate allocator so that we don't
+ //@TODO: potentially cause deadlocks on the debug thread.
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+
+ if (s_fUseMethodDataCache) {
+ MethodData *pData = FindMethodDataHelper(pMTDecl, pMTImpl);
+ if (pData != NULL) {
+ return pData;
+ }
+ }
+
+ // If we get here, there are no entries in the cache.
+ MethodData *pData = NULL;
+ if (pMTDecl == pMTImpl) {
+ if (pMTDecl->IsInterface()) {
+ pData = new MethodDataInterface(pMTDecl);
+ }
+ else {
+ UINT32 cb = MethodDataObject::GetObjectSize(pMTDecl);
+ NewHolder<BYTE> pb(new BYTE[cb]);
+ MethodDataHolder h(FindParentMethodDataHelper(pMTDecl));
+ pData = new (pb.GetValue()) MethodDataObject(pMTDecl, h.GetValue());
+ pb.SuppressRelease();
+ }
+ }
+ else {
+ pData = GetMethodDataHelper(
+ NULL,
+ 0,
+ pMTDecl,
+ pMTImpl);
+ }
+
+ // Insert in the cache if it is active.
+ if (fCanCache && s_fUseMethodDataCache) {
+ s_pMethodDataCache->Insert(pData);
+ }
+
+ // Do not AddRef, already initialized to 1.
+ return pData;
+}
+
+//==========================================================================================
+// The fCanCache argument determines if the resulting MethodData object can
+// be added to the global MethodDataCache. This is used when requesting a
+// MethodData object for a type currently being built.
+MethodTable::MethodData *MethodTable::GetMethodData(MethodTable *pMTDecl,
+ MethodTable *pMTImpl,
+ BOOL fCanCache)
+{
+ CONTRACTL {
+ THROWS;
+ WRAPPER(GC_TRIGGERS);
+ } CONTRACTL_END;
+
+ MethodDataWrapper hData(GetMethodDataHelper(pMTDecl, pMTImpl, fCanCache));
+ hData.SuppressRelease();
+ return hData;
+}
+
+//==========================================================================================
+// This method does not cache the resulting MethodData object in the global MethodDataCache.
+MethodTable::MethodData *
+MethodTable::GetMethodData(
+ const DispatchMapTypeID * rgDeclTypeIDs,
+ UINT32 cDeclTypeIDs,
+ MethodTable * pMTDecl,
+ MethodTable * pMTImpl)
+{
+ CONTRACTL {
+ THROWS;
+ WRAPPER(GC_TRIGGERS);
+ PRECONDITION(pMTDecl != pMTImpl);
+ PRECONDITION(pMTDecl->IsInterface());
+ PRECONDITION(!pMTImpl->IsInterface());
+ } CONTRACTL_END;
+
+ MethodDataWrapper hData(GetMethodDataHelper(rgDeclTypeIDs, cDeclTypeIDs, pMTDecl, pMTImpl));
+ hData.SuppressRelease();
+ return hData;
+}
+
+//==========================================================================================
+// The fCanCache argument determines if the resulting MethodData object can
+// be added to the global MethodDataCache. This is used when requesting a
+// MethodData object for a type currently being built.
+MethodTable::MethodData *MethodTable::GetMethodData(MethodTable *pMT,
+ BOOL fCanCache)
+{
+ WRAPPER_NO_CONTRACT;
+ return GetMethodData(pMT, pMT, fCanCache);
+}
+
+//==========================================================================================
+MethodTable::MethodIterator::MethodIterator(MethodTable *pMTDecl, MethodTable *pMTImpl)
+{
+ WRAPPER_NO_CONTRACT;
+ Init(pMTDecl, pMTImpl);
+}
+
+//==========================================================================================
+MethodTable::MethodIterator::MethodIterator(MethodTable *pMT)
+{
+ WRAPPER_NO_CONTRACT;
+ Init(pMT, pMT);
+}
+
+//==========================================================================================
+MethodTable::MethodIterator::MethodIterator(MethodData *pMethodData)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pMethodData));
+ } CONTRACTL_END;
+
+ m_pMethodData = pMethodData;
+ m_pMethodData->AddRef();
+ m_iCur = 0;
+ m_iMethods = (INT32)m_pMethodData->GetNumMethods();
+}
+
+//==========================================================================================
+MethodTable::MethodIterator::MethodIterator(const MethodIterator &it)
+{
+ WRAPPER_NO_CONTRACT;
+ m_pMethodData = it.m_pMethodData;
+ m_pMethodData->AddRef();
+ m_iCur = it.m_iCur;
+ m_iMethods = it.m_iMethods;
+}
+
+//==========================================================================================
+void MethodTable::MethodIterator::Init(MethodTable *pMTDecl, MethodTable *pMTImpl)
+{
+ CONTRACTL {
+ THROWS;
+ WRAPPER(GC_TRIGGERS);
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pMTDecl));
+ PRECONDITION(CheckPointer(pMTImpl));
+ } CONTRACTL_END;
+
+ LOG((LF_LOADER, LL_INFO10000, "SD: MT::MethodIterator created for %s.\n", pMTDecl->GetDebugClassName()));
+
+ m_pMethodData = MethodTable::GetMethodData(pMTDecl, pMTImpl);
+ CONSISTENCY_CHECK(CheckPointer(m_pMethodData));
+ m_iCur = 0;
+ m_iMethods = (INT32)m_pMethodData->GetNumMethods();
+}
+#endif // !DACCESS_COMPILE
+
+//==========================================================================================
+
+void MethodTable::IntroducedMethodIterator::SetChunk(MethodDescChunk * pChunk)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (pChunk)
+ {
+ m_pMethodDesc = pChunk->GetFirstMethodDesc();
+
+ m_pChunk = pChunk;
+ m_pChunkEnd = dac_cast<TADDR>(pChunk) + pChunk->SizeOf();
+ }
+ else
+ {
+ m_pMethodDesc = NULL;
+ }
+}
+
+//==========================================================================================
+
+MethodDesc * MethodTable::IntroducedMethodIterator::GetFirst(MethodTable *pMT)
+{
+ LIMITED_METHOD_CONTRACT;
+ MethodDescChunk * pChunk = pMT->GetClass()->GetChunks();
+ return (pChunk != NULL) ? pChunk->GetFirstMethodDesc() : NULL;
+}
+
+//==========================================================================================
+MethodDesc * MethodTable::IntroducedMethodIterator::GetNext(MethodDesc * pMD)
+{
+ WRAPPER_NO_CONTRACT;
+
+ MethodDescChunk * pChunk = pMD->GetMethodDescChunk();
+
+ // Check whether the next MethodDesc is still within the bounds of the current chunk
+ TADDR pNext = dac_cast<TADDR>(pMD) + pMD->SizeOf();
+ TADDR pEnd = dac_cast<TADDR>(pChunk) + pChunk->SizeOf();
+
+ if (pNext < pEnd)
+ {
+ // Just skip to the next method in the same chunk
+ pMD = PTR_MethodDesc(pNext);
+ }
+ else
+ {
+ _ASSERTE(pNext == pEnd);
+
+ // We have walked all the methods in the current chunk. Move on
+ // to the next chunk.
+ pChunk = pChunk->GetNextChunk();
+
+ pMD = (pChunk != NULL) ? pChunk->GetFirstMethodDesc() : NULL;
+ }
+
+ return pMD;
+}
+
+//==========================================================================================
+PTR_GuidInfo MethodTable::GetGuidInfo()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_COMINTEROP
+ if (HasGuidInfo())
+ {
+ return *GetGuidInfoPtr();
+ }
+#endif // FEATURE_COMINTEROP
+ _ASSERTE(GetClass());
+ return GetClass()->GetGuidInfo();
+}
+
+//==========================================================================================
+void MethodTable::SetGuidInfo(GuidInfo* pGuidInfo)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_COMINTEROP
+ if (HasGuidInfo())
+ {
+ *EnsureWritablePages(GetGuidInfoPtr()) = pGuidInfo;
+ return;
+ }
+#endif // FEATURE_COMINTEROP
+ _ASSERTE(GetClass());
+ GetClass()->SetGuidInfo (pGuidInfo);
+
+#endif // DACCESS_COMPILE
+}
+
+#if defined(FEATURE_COMINTEROP) && !defined(DACCESS_COMPILE)
+
+//==========================================================================================
+RCWPerTypeData *MethodTable::CreateRCWPerTypeData(bool bThrowOnOOM)
+{
+ CONTRACTL
+ {
+ if (bThrowOnOOM) THROWS; else NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(HasRCWPerTypeData());
+ }
+ CONTRACTL_END;
+
+ AllocMemTracker amTracker;
+
+ RCWPerTypeData *pData;
+ if (bThrowOnOOM)
+ {
+ TaggedMemAllocPtr ptr = GetLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(RCWPerTypeData)));
+ pData = (RCWPerTypeData *)amTracker.Track(ptr);
+ }
+ else
+ {
+ TaggedMemAllocPtr ptr = GetLoaderAllocator()->GetLowFrequencyHeap()->AllocMem_NoThrow(S_SIZE_T(sizeof(RCWPerTypeData)));
+ pData = (RCWPerTypeData *)amTracker.Track_NoThrow(ptr);
+ if (pData == NULL)
+ {
+ return NULL;
+ }
+ }
+
+ // memory is zero-inited which means that nothing has been computed yet
+ _ASSERTE(pData->m_dwFlags == 0);
+
+ RCWPerTypeData **pDataPtr = GetRCWPerTypeDataPtr();
+
+ if (bThrowOnOOM)
+ {
+ EnsureWritablePages(pDataPtr);
+ }
+ else
+ {
+ if (!EnsureWritablePagesNoThrow(pDataPtr, sizeof(*pDataPtr)))
+ {
+ return NULL;
+ }
+ }
+
+ if (InterlockedCompareExchangeT(pDataPtr, pData, NULL) == NULL)
+ {
+ amTracker.SuppressRelease();
+ }
+ else
+ {
+ // another thread already published the pointer
+ pData = *pDataPtr;
+ }
+
+ return pData;
+}
+
+//==========================================================================================
+RCWPerTypeData *MethodTable::GetRCWPerTypeData(bool bThrowOnOOM /*= true*/)
+{
+ CONTRACTL
+ {
+ if (bThrowOnOOM) THROWS; else NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!HasRCWPerTypeData())
+ return NULL;
+
+ RCWPerTypeData *pData = *GetRCWPerTypeDataPtr();
+ if (pData == NULL)
+ {
+ // creation is factored out into a separate routine to avoid paying the EH cost here
+ pData = CreateRCWPerTypeData(bThrowOnOOM);
+ }
+
+ return pData;
+}
+
+#endif // FEATURE_COMINTEROP && !DACCESS_COMPILE
+
+//==========================================================================================
+CHECK MethodTable::CheckActivated()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!IsArray())
+ {
+ CHECK(GetModule()->CheckActivated());
+ }
+
+ // <TODO> Check all generic type parameters as well </TODO>
+
+ CHECK_OK;
+}
+
+#ifdef _MSC_VER
+// Optimization intended for EnsureInstanceActive, IsIntrospectionOnly, EnsureActive only
+#pragma optimize("t", on)
+#endif // _MSC_VER
+//==========================================================================================
+
+#ifndef DACCESS_COMPILE
+VOID MethodTable::EnsureInstanceActive()
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Module * pModule = GetModule();
+ pModule->EnsureActive();
+
+ MethodTable * pMT = this;
+ while (pMT->HasModuleDependencies())
+ {
+ pMT = pMT->GetParentMethodTable();
+ _ASSERTE(pMT != NULL);
+
+ Module * pParentModule = pMT->GetModule();
+ if (pParentModule != pModule)
+ {
+ pModule = pParentModule;
+ pModule->EnsureActive();
+ }
+ }
+
+ if (HasInstantiation())
+ {
+ // This is going to go recursive, so we need to use an interior stack probe
+
+ INTERIOR_STACK_PROBE(GetThread());
+ {
+ Instantiation inst = GetInstantiation();
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ TypeHandle thArg = inst[i];
+ if (!thArg.IsTypeDesc())
+ {
+ thArg.AsMethodTable()->EnsureInstanceActive();
+ }
+ }
+ }
+ END_INTERIOR_STACK_PROBE;
+ }
+
+}
+#endif //!DACCESS_COMPILE
+
+//==========================================================================================
+BOOL MethodTable::IsIntrospectionOnly()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetAssembly()->IsIntrospectionOnly();
+}
+
+//==========================================================================================
+BOOL MethodTable::ContainsIntrospectionOnlyTypes()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ // check this type
+ if (IsIntrospectionOnly())
+ return TRUE;
+
+ // check the instantiation
+ Instantiation inst = GetInstantiation();
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ CONSISTENCY_CHECK(!inst[i].IsEncodedFixup());
+ if (inst[i].ContainsIntrospectionOnlyTypes())
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+//==========================================================================================
+#ifndef DACCESS_COMPILE
+VOID MethodTable::EnsureActive()
+{
+ WRAPPER_NO_CONTRACT;
+
+ GetModule()->EnsureActive();
+}
+#endif
+
+#ifdef _MSC_VER
+#pragma optimize("", on)
+#endif // _MSC_VER
+
+//==========================================================================================
+CHECK MethodTable::CheckInstanceActivated()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (IsArray())
+ CHECK_OK;
+
+ Module * pModule = GetModule();
+ CHECK(pModule->CheckActivated());
+
+ MethodTable * pMT = this;
+ while (pMT->HasModuleDependencies())
+ {
+ pMT = pMT->GetParentMethodTable();
+ _ASSERTE(pMT != NULL);
+
+ Module * pParentModule = pMT->GetModule();
+ if (pParentModule != pModule)
+ {
+ pModule = pParentModule;
+ CHECK(pModule->CheckActivated());
+ }
+ }
+
+ CHECK_OK;
+}
+
+#ifdef DACCESS_COMPILE
+
+//==========================================================================================
+void
+MethodTable::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ WRAPPER_NO_CONTRACT;
+
+ DAC_CHECK_ENUM_THIS();
+ EMEM_OUT(("MEM: %p MethodTable\n", dac_cast<TADDR>(this)));
+
+ DWORD size = GetEndOffsetOfOptionalMembers();
+ DacEnumMemoryRegion(dac_cast<TADDR>(this), size);
+
+ if (!IsCanonicalMethodTable())
+ {
+ PTR_MethodTable pMTCanonical = GetCanonicalMethodTable();
+
+ if (pMTCanonical.IsValid())
+ {
+ pMTCanonical->EnumMemoryRegions(flags);
+ }
+ }
+ else
+ {
+ PTR_EEClass pClass = GetClass();
+
+ if (pClass.IsValid())
+ {
+ if (IsArray())
+ {
+ // This is kind of a workaround, in that ArrayClass is derived from EEClass, but
+ // it's not virtual, we only cast if the IsArray() predicate holds above.
+ // For minidumps, DAC will choke if we don't have the full size given
+ // by ArrayClass available. If ArrayClass becomes more complex, it
+ // should get it's own EnumMemoryRegions().
+ DacEnumMemoryRegion(dac_cast<TADDR>(pClass), sizeof(ArrayClass));
+ }
+ pClass->EnumMemoryRegions(flags, this);
+ }
+ }
+
+ PTR_MethodTable pMTParent = GetParentMethodTable();
+
+ if (pMTParent.IsValid())
+ {
+ pMTParent->EnumMemoryRegions(flags);
+ }
+
+ if (HasNonVirtualSlotsArray())
+ {
+ DacEnumMemoryRegion(dac_cast<TADDR>(GetNonVirtualSlotsArray()), GetNonVirtualSlotsArraySize());
+ }
+
+ if (HasInterfaceMap())
+ {
+#ifdef FEATURE_COMINTEROP
+ if (HasDynamicInterfaceMap())
+ DacEnumMemoryRegion(dac_cast<TADDR>(GetInterfaceMap()) - sizeof(DWORD_PTR), GetInterfaceMapSize());
+ else
+#endif // FEATURE_COMINTEROP
+ DacEnumMemoryRegion(dac_cast<TADDR>(GetInterfaceMap()), GetInterfaceMapSize());
+
+ EnumMemoryRegionsForExtraInterfaceInfo();
+ }
+
+ if (HasPerInstInfo() != NULL)
+ {
+ DacEnumMemoryRegion(dac_cast<TADDR>(GetPerInstInfo()) - sizeof(GenericsDictInfo), GetPerInstInfoSize() + sizeof(GenericsDictInfo));
+ }
+
+ if (GetDictionary() != NULL)
+ {
+ DacEnumMemoryRegion(dac_cast<TADDR>(GetDictionary()), GetInstAndDictSize());
+ }
+
+ VtableIndirectionSlotIterator it = IterateVtableIndirectionSlots();
+ while (it.Next())
+ {
+ DacEnumMemoryRegion(dac_cast<TADDR>(it.GetIndirectionSlot()), it.GetSize());
+ }
+
+ if (m_pWriteableData.IsValid())
+ {
+ m_pWriteableData.EnumMem();
+ }
+
+ if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE)
+ {
+ DispatchMap * pMap = GetDispatchMap();
+ if (pMap != NULL)
+ {
+ pMap->EnumMemoryRegions(flags);
+ }
+ }
+} // MethodTable::EnumMemoryRegions
+
+#endif // DACCESS_COMPILE
+
+//==========================================================================================
+BOOL MethodTable::ContainsGenericMethodVariables()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ Instantiation inst = GetInstantiation();
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ CONSISTENCY_CHECK(!inst[i].IsEncodedFixup());
+ if (inst[i].ContainsGenericVariables(TRUE))
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+//==========================================================================================
+Module *MethodTable::GetDefiningModuleForOpenType()
+{
+ CONTRACT(Module*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ POSTCONDITION((ContainsGenericVariables() != 0) == (RETVAL != NULL));
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END
+
+ if (ContainsGenericVariables())
+ {
+ Instantiation inst = GetInstantiation();
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ // Encoded fixups are never open types
+ if (!inst[i].IsEncodedFixup())
+ {
+ Module *pModule = inst[i].GetDefiningModuleForOpenType();
+ if (pModule != NULL)
+ RETURN pModule;
+ }
+ }
+ }
+
+ RETURN NULL;
+}
+
+//==========================================================================================
+PCODE MethodTable::GetRestoredSlot(DWORD slotNumber)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ //
+ // Keep in sync with code:MethodTable::GetRestoredSlotMT
+ //
+
+ MethodTable * pMT = this;
+ while (true)
+ {
+ g_IBCLogger.LogMethodTableAccess(pMT);
+
+ pMT = pMT->GetCanonicalMethodTable();
+
+ _ASSERTE(pMT != NULL);
+
+ PCODE slot = pMT->GetSlot(slotNumber);
+
+ if ((slot != NULL)
+#ifdef FEATURE_PREJIT
+ && !pMT->GetLoaderModule()->IsVirtualImportThunk(slot)
+#endif
+ )
+ {
+ return slot;
+ }
+
+ // This is inherited slot that has not been fixed up yet. Find
+ // the value by walking up the inheritance chain
+ pMT = pMT->GetParentMethodTable();
+ }
+}
+
+//==========================================================================================
+MethodTable * MethodTable::GetRestoredSlotMT(DWORD slotNumber)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ //
+ // Keep in sync with code:MethodTable::GetRestoredSlot
+ //
+
+ MethodTable * pMT = this;
+ while (true)
+ {
+ g_IBCLogger.LogMethodTableAccess(pMT);
+
+ pMT = pMT->GetCanonicalMethodTable();
+
+ _ASSERTE(pMT != NULL);
+
+ PCODE slot = pMT->GetSlot(slotNumber);
+
+ if ((slot != NULL)
+#ifdef FEATURE_PREJIT
+ && !pMT->GetLoaderModule()->IsVirtualImportThunk(slot)
+#endif
+ )
+ {
+ return pMT;
+ }
+
+ // This is inherited slot that has not been fixed up yet. Find
+ // the value by walking up the inheritance chain
+ pMT = pMT->GetParentMethodTable();
+ }
+}
+
+//==========================================================================================
+MethodDesc * MethodTable::GetParallelMethodDesc(MethodDesc * pDefMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ return GetMethodDescForSlot(pDefMD->GetSlot());
+}
+
+#ifndef DACCESS_COMPILE
+
+//==========================================================================================
+void MethodTable::SetSlot(UINT32 slotNumber, PCODE slotCode)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+#ifdef _DEBUG
+ if (slotNumber < GetNumVirtuals())
+ {
+ //
+ // Verify that slots in shared vtable chunks not owned by this methodtable are only ever patched to stable entrypoint.
+ // This invariant is required to prevent races with code:MethodDesc::SetStableEntryPointInterlocked.
+ //
+ BOOL fSharedVtableChunk = FALSE;
+ DWORD indirectionIndex = MethodTable::GetIndexOfVtableIndirection(slotNumber);
+
+ if (!IsCanonicalMethodTable())
+ {
+ if (GetVtableIndirections()[indirectionIndex] == GetCanonicalMethodTable()->GetVtableIndirections()[indirectionIndex])
+ fSharedVtableChunk = TRUE;
+ }
+
+ if (slotNumber < GetNumParentVirtuals())
+ {
+ if (GetVtableIndirections()[indirectionIndex] == GetParentMethodTable()->GetVtableIndirections()[indirectionIndex])
+ fSharedVtableChunk = TRUE;
+ }
+
+ if (fSharedVtableChunk)
+ {
+ MethodDesc* pMD = GetMethodDescForSlotAddress(slotCode);
+#ifndef FEATURE_INTERPRETER
+ // TBD: Make this take a "stable" debug arg, determining whether to make these assertions.
+ _ASSERTE(pMD->HasStableEntryPoint());
+ _ASSERTE(pMD->GetStableEntryPoint() == slotCode);
+#endif // FEATURE_INTERPRETER
+ }
+ }
+#endif
+
+ // IBC logging is not needed here - slots in ngen images are immutable.
+
+#ifdef _TARGET_ARM_
+ // Ensure on ARM that all target addresses are marked as thumb code.
+ _ASSERTE(IsThumbCode(slotCode));
+#endif
+
+ *GetSlotPtrRaw(slotNumber) = slotCode;
+}
+
+//==========================================================================================
+BOOL MethodTable::HasExplicitOrImplicitPublicDefaultConstructor()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END
+
+ if (IsValueType())
+ {
+ // valuetypes have public default ctors implicitly
+ return TRUE;
+ }
+
+ if (!HasDefaultConstructor())
+ {
+ return FALSE;
+ }
+
+ MethodDesc * pCanonMD = GetMethodDescForSlot(GetDefaultConstructorSlot());
+ return pCanonMD != NULL && pCanonMD->IsPublic();
+}
+
+//==========================================================================================
+MethodDesc *MethodTable::GetDefaultConstructor()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(HasDefaultConstructor());
+ MethodDesc *pCanonMD = GetMethodDescForSlot(GetDefaultConstructorSlot());
+ // The default constructor for a value type is an instantiating stub.
+ // The easiest way to find the right stub is to use the following function,
+ // which in the simple case of the default constructor for a class simply
+ // returns pCanonMD immediately.
+ return MethodDesc::FindOrCreateAssociatedMethodDesc(pCanonMD,
+ this,
+ FALSE /* no BoxedEntryPointStub */,
+ Instantiation(), /* no method instantiation */
+ FALSE /* no allowInstParam */);
+}
+
+//==========================================================================================
+// Finds the (non-unboxing) MethodDesc that implements the interface method pInterfaceMD.
+//
+// Note our ability to resolve constraint methods is affected by the degree of code sharing we are
+// performing for generic code.
+//
+// Return Value:
+// MethodDesc which can be used as unvirtualized call. Returns NULL if VSD has to be used.
+MethodDesc *
+MethodTable::TryResolveConstraintMethodApprox(
+ TypeHandle thInterfaceType,
+ MethodDesc * pInterfaceMD,
+ BOOL * pfForceUseRuntimeLookup) // = NULL
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ // We can't resolve constraint calls effectively for reference types, and there's
+ // not a lot of perf. benefit in doing it anyway.
+ //
+ if (!IsValueType())
+ {
+ LOG((LF_JIT, LL_INFO10000, "TryResolveConstraintmethodApprox: not a value type %s\n", GetDebugClassName()));
+ return NULL;
+ }
+
+ // 1. Find the (possibly generic) method that would implement the
+ // constraint if we were making a call on a boxed value type.
+
+ MethodTable * pCanonMT = GetCanonicalMethodTable();
+
+ MethodDesc * pGenInterfaceMD = pInterfaceMD->StripMethodInstantiation();
+ MethodDesc * pMD = NULL;
+ if (pGenInterfaceMD->IsInterface())
+ {
+ // Sometimes (when compiling shared generic code)
+ // we don't have enough exact type information at JIT time
+ // even to decide whether we will be able to resolve to an unboxed entry point...
+ // To cope with this case we always go via the helper function if there's any
+ // chance of this happening by checking for all interfaces which might possibly
+ // be compatible with the call (verification will have ensured that
+ // at least one of them will be)
+
+ // Enumerate all potential interface instantiations
+ MethodTable::InterfaceMapIterator it = pCanonMT->IterateInterfaceMap();
+ DWORD cPotentialMatchingInterfaces = 0;
+ while (it.Next())
+ {
+ TypeHandle thPotentialInterfaceType(it.GetInterface());
+ if (thPotentialInterfaceType.AsMethodTable()->GetCanonicalMethodTable() ==
+ thInterfaceType.AsMethodTable()->GetCanonicalMethodTable())
+ {
+ cPotentialMatchingInterfaces++;
+ pMD = pCanonMT->GetMethodDescForInterfaceMethod(thPotentialInterfaceType, pGenInterfaceMD);
+
+ // See code:#TryResolveConstraintMethodApprox_DoNotReturnParentMethod
+ if ((pMD != NULL) && !pMD->GetMethodTable()->IsValueType())
+ {
+ LOG((LF_JIT, LL_INFO10000, "TryResolveConstraintMethodApprox: %s::%s not a value type method\n",
+ pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName));
+ return NULL;
+ }
+ }
+ }
+
+ _ASSERTE_MSG((cPotentialMatchingInterfaces != 0),
+ "At least one interface has to implement the method, otherwise there's a bug in JIT/verification.");
+
+ if (cPotentialMatchingInterfaces > 1)
+ { // We have more potentially matching interfaces
+ MethodTable * pInterfaceMT = thInterfaceType.GetMethodTable();
+ _ASSERTE(pInterfaceMT->HasInstantiation());
+
+ BOOL fIsExactMethodResolved = FALSE;
+
+ if (!pInterfaceMT->IsSharedByGenericInstantiations() &&
+ !pInterfaceMT->IsGenericTypeDefinition() &&
+ !this->IsSharedByGenericInstantiations() &&
+ !this->IsGenericTypeDefinition())
+ { // We have exact interface and type instantiations (no generic variables and __Canon used
+ // anywhere)
+ if (this->CanCastToInterface(pInterfaceMT))
+ {
+ // We can resolve to exact method
+ pMD = this->GetMethodDescForInterfaceMethod(pInterfaceMT, pInterfaceMD);
+ _ASSERTE(pMD != NULL);
+ fIsExactMethodResolved = TRUE;
+ }
+ }
+
+ if (!fIsExactMethodResolved)
+ { // We couldn't resolve the interface statically
+ _ASSERTE(pfForceUseRuntimeLookup != NULL);
+ // Notify the caller that it should use runtime lookup
+ // Note that we can leave pMD incorrect, because we will use runtime lookup
+ *pfForceUseRuntimeLookup = TRUE;
+ }
+ }
+ else
+ {
+ // If we can resolve the interface exactly then do so (e.g. when doing the exact
+ // lookup at runtime, or when not sharing generic code).
+ if (pCanonMT->CanCastToInterface(thInterfaceType.GetMethodTable()))
+ {
+ pMD = pCanonMT->GetMethodDescForInterfaceMethod(thInterfaceType, pGenInterfaceMD);
+ if (pMD == NULL)
+ {
+ LOG((LF_JIT, LL_INFO10000, "TryResolveConstraintMethodApprox: failed to find method desc for interface method\n"));
+ }
+ }
+ }
+ }
+ else if (pGenInterfaceMD->IsVirtual())
+ {
+ if (pGenInterfaceMD->HasNonVtableSlot() && pGenInterfaceMD->GetMethodTable()->IsValueType())
+ { // GetMethodDescForSlot would AV for this slot
+ // We can get here for (invalid and unverifiable) IL:
+ // constrained. int32
+ // callvirt System.Int32::GetHashCode()
+ pMD = pGenInterfaceMD;
+ }
+ else
+ {
+ pMD = GetMethodDescForSlot(pGenInterfaceMD->GetSlot());
+ }
+ }
+ else
+ {
+ // The pMD will be NULL if calling a non-virtual instance
+ // methods on System.Object, i.e. when these are used as a constraint.
+ pMD = NULL;
+ }
+
+ if (pMD == NULL)
+ { // Fall back to VSD
+ return NULL;
+ }
+
+ //#TryResolveConstraintMethodApprox_DoNotReturnParentMethod
+ // Only return a method if the value type itself declares the method,
+ // otherwise we might get a method from Object or System.ValueType
+ if (!pMD->GetMethodTable()->IsValueType())
+ { // Fall back to VSD
+ return NULL;
+ }
+
+ // We've resolved the method, ignoring its generic method arguments
+ // If the method is a generic method then go and get the instantiated descriptor
+ pMD = MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pMD,
+ this,
+ FALSE /* no BoxedEntryPointStub */ ,
+ pInterfaceMD->GetMethodInstantiation(),
+ FALSE /* no allowInstParam */ );
+
+ // FindOrCreateAssociatedMethodDesc won't return an BoxedEntryPointStub.
+ _ASSERTE(pMD != NULL);
+ _ASSERTE(!pMD->IsUnboxingStub());
+
+ return pMD;
+} // MethodTable::TryResolveConstraintMethodApprox
+
+//==========================================================================================
+// Make best-case effort to obtain an image name for use in an error message.
+//
+// This routine must expect to be called before the this object is fully loaded.
+// It can return an empty if the name isn't available or the object isn't initialized
+// enough to get a name, but it mustn't crash.
+//==========================================================================================
+LPCWSTR MethodTable::GetPathForErrorMessages()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ Module *pModule = GetModule();
+
+ if (pModule)
+ {
+ return pModule->GetPathForErrorMessages();
+ }
+ else
+ {
+ return W("");
+ }
+}
+
+#ifdef FEATURE_REMOTING
+//==========================================================================================
+// context static functions
+void MethodTable::SetupContextStatics(AllocMemTracker *pamTracker, WORD wContextStaticsSize)
+{
+ STANDARD_VM_CONTRACT;
+
+ ContextStaticsBucket* pCSInfo = (ContextStaticsBucket*) pamTracker->Track(GetLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(ContextStaticsBucket))));
+ *(GetContextStaticsBucketPtr()) = pCSInfo;
+
+ pCSInfo->m_dwContextStaticsOffset = (DWORD)-1; // Initialized lazily
+ pCSInfo->m_wContextStaticsSize = wContextStaticsSize;
+}
+
+#ifndef CROSSGEN_COMPILE
+//==========================================================================================
+DWORD MethodTable::AllocateContextStaticsOffset()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ g_IBCLogger.LogMethodTableWriteableDataWriteAccess(this);
+
+ BaseDomain* pDomain = IsDomainNeutral() ? SystemDomain::System() : GetDomain();
+
+ ContextStaticsBucket* pCSInfo = GetContextStaticsBucket();
+ DWORD* pOffsetSlot = &pCSInfo->m_dwContextStaticsOffset;
+
+ return pDomain->AllocateContextStaticsOffset(pOffsetSlot);
+}
+#endif // CROSSGEN_COMPILE
+
+#endif // FEATURE_REMOTING
+
+bool MethodTable::ClassRequiresUnmanagedCodeCheck()
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef FEATURE_CORECLR
+ return false;
+#else
+ // all WinRT types have an imaginary [SuppressUnmanagedCodeSecurity] attribute on them
+ if (IsProjectedFromWinRT())
+ return false;
+
+ // In AppX processes, there is only one full trust AppDomain, so there is never any need to do a security
+ // callout on interop stubs
+ if (AppX::IsAppXProcess())
+ return false;
+
+ return GetMDImport()->GetCustomAttributeByName(GetCl(),
+ COR_SUPPRESS_UNMANAGED_CODE_CHECK_ATTRIBUTE_ANSI,
+ NULL,
+ NULL) == S_FALSE;
+#endif // FEATURE_CORECLR
+}
+
+#endif // !DACCESS_COMPILE
+
+
+
+BOOL MethodTable::Validate()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ ASSERT_AND_CHECK(SanityCheck());
+
+#ifdef _DEBUG
+ if (m_pWriteableData == NULL)
+ {
+ _ASSERTE(IsAsyncPinType());
+ return TRUE;
+ }
+
+ DWORD dwLastVerifiedGCCnt = m_pWriteableData->m_dwLastVerifedGCCnt;
+ // Here we used to assert that (dwLastVerifiedGCCnt <= GCHeap::GetGCHeap()->GetGcCount()) but
+ // this is no longer true because with background gc. Since the purpose of having
+ // m_dwLastVerifedGCCnt is just to only verify the same method table once for each GC
+ // I am getting rid of the assert.
+ if (g_pConfig->FastGCStressLevel () > 1 && dwLastVerifiedGCCnt == GCHeap::GetGCHeap()->GetGcCount())
+ return TRUE;
+#endif //_DEBUG
+
+ if (IsArray())
+ {
+ if (!IsAsyncPinType())
+ {
+ if (!SanityCheck())
+ {
+ ASSERT_AND_CHECK(!"Detected use of a corrupted OBJECTREF. Possible GC hole.");
+ }
+ }
+ }
+ else if (!IsCanonicalMethodTable())
+ {
+ // Non-canonical method tables has to have non-empty instantiation
+ if (GetInstantiation().IsEmpty())
+ {
+ ASSERT_AND_CHECK(!"Detected use of a corrupted OBJECTREF. Possible GC hole.");
+ }
+ }
+
+#ifdef _DEBUG
+ // It is not a fatal error to fail the update the counter. We will run slower and retry next time,
+ // but the system will function properly.
+ if (EnsureWritablePagesNoThrow(m_pWriteableData, sizeof(MethodTableWriteableData)))
+ m_pWriteableData->m_dwLastVerifedGCCnt = GCHeap::GetGCHeap()->GetGcCount();
+#endif //_DEBUG
+
+ return TRUE;
+}
+
+NOINLINE BYTE *MethodTable::GetLoaderAllocatorObjectForGC()
+{
+ WRAPPER_NO_CONTRACT;
+ if (!Collectible() || ((PTR_AppDomain)GetLoaderModule()->GetDomain())->NoAccessToHandleTable())
+ {
+ return NULL;
+ }
+ BYTE * retVal = *(BYTE**)GetLoaderAllocatorObjectHandle();
+ return retVal;
+}
+
+#ifdef FEATURE_COMINTEROP
+//==========================================================================================
+BOOL MethodTable::IsWinRTRedirectedDelegate()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (!IsDelegate())
+ {
+ return FALSE;
+ }
+
+ return !!WinRTDelegateRedirector::ResolveRedirectedDelegate(this, nullptr);
+}
+
+//==========================================================================================
+BOOL MethodTable::IsWinRTRedirectedInterface(TypeHandle::InteropKind interopKind)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (!IsInterface())
+ return FALSE;
+
+ if (!HasRCWPerTypeData())
+ {
+ // All redirected interfaces have per-type RCW data
+ return FALSE;
+ }
+
+#ifdef DACCESS_COMPILE
+ RCWPerTypeData *pData = NULL;
+#else // DACCESS_COMPILE
+ // We want to keep this function LIMITED_METHOD_CONTRACT so we call GetRCWPerTypeData with
+ // the non-throwing flag. pData can be NULL if it could not be allocated.
+ RCWPerTypeData *pData = GetRCWPerTypeData(false);
+#endif // DACCESS_COMPILE
+
+ DWORD dwFlags = (pData != NULL ? pData->m_dwFlags : 0);
+ if ((dwFlags & RCWPerTypeData::InterfaceFlagsInited) == 0)
+ {
+ dwFlags = RCWPerTypeData::InterfaceFlagsInited;
+
+ if (WinRTInterfaceRedirector::ResolveRedirectedInterface(this, NULL))
+ {
+ dwFlags |= RCWPerTypeData::IsRedirectedInterface;
+ }
+ else if (HasSameTypeDefAs(MscorlibBinder::GetExistingClass(CLASS__ICOLLECTIONGENERIC)) ||
+ HasSameTypeDefAs(MscorlibBinder::GetExistingClass(CLASS__IREADONLYCOLLECTIONGENERIC)) ||
+ this == MscorlibBinder::GetExistingClass(CLASS__ICOLLECTION))
+ {
+ dwFlags |= RCWPerTypeData::IsICollectionGeneric;
+ }
+
+ if (pData != NULL)
+ {
+ FastInterlockOr(&pData->m_dwFlags, dwFlags);
+ }
+ }
+
+ if ((dwFlags & RCWPerTypeData::IsRedirectedInterface) != 0)
+ return TRUE;
+
+ if (interopKind == TypeHandle::Interop_ManagedToNative)
+ {
+ // ICollection<T> is redirected in the managed->WinRT direction (i.e. we have stubs
+ // that implement ICollection<T> methods in terms of IVector/IMap), but it is not
+ // treated specially in the WinRT->managed direction (we don't build a WinRT vtable
+ // for a class that only implements ICollection<T>). IReadOnlyCollection<T> is
+ // treated similarly.
+ if ((dwFlags & RCWPerTypeData::IsICollectionGeneric) != 0)
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_READYTORUN_COMPILER
+
+static BOOL ComputeIsLayoutFixedInCurrentVersionBubble(MethodTable * pMT)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Primitive types and enums have fixed layout
+ if (pMT->IsTruePrimitive() || pMT->IsEnum())
+ return TRUE;
+
+ if (!pMT->GetModule()->IsInCurrentVersionBubble())
+ {
+ if (!pMT->IsValueType())
+ {
+ // Eventually, we may respect the non-versionable attribute for reference types too. For now, we are going
+ // to play is safe and ignore it.
+ return FALSE;
+ }
+
+ // Valuetypes with non-versionable attribute are candidates for fixed layout. Reject the rest.
+ if (pMT->GetModule()->GetMDImport()->GetCustomAttributeByName(pMT->GetCl(),
+ NONVERSIONABLE_TYPE, NULL, NULL) != S_OK)
+ {
+ return FALSE;
+ }
+ }
+
+ // If the above condition passed, check that all instance fields have fixed layout as well. In particular,
+ // it is important for generic types with non-versionable layout (e.g. Nullable<T>)
+ ApproxFieldDescIterator fieldIterator(pMT, ApproxFieldDescIterator::INSTANCE_FIELDS);
+ for (FieldDesc *pFD = fieldIterator.Next(); pFD != NULL; pFD = fieldIterator.Next())
+ {
+ if (pFD->GetFieldType() != ELEMENT_TYPE_VALUETYPE)
+ continue;
+
+ MethodTable * pFieldMT = pFD->GetApproxFieldTypeHandleThrowing().AsMethodTable();
+ if (!pFieldMT->IsLayoutFixedInCurrentVersionBubble())
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+//
+// Is field layout in this type fixed within the current version bubble?
+// This check does not take the inheritance chain into account.
+//
+BOOL MethodTable::IsLayoutFixedInCurrentVersionBubble()
+{
+ STANDARD_VM_CONTRACT;
+
+ const MethodTableWriteableData * pWriteableData = GetWriteableData();
+ if (!(pWriteableData->m_dwFlags & MethodTableWriteableData::enum_flag_NGEN_IsLayoutFixedComputed))
+ {
+ MethodTableWriteableData * pWriteableDataForWrite = GetWriteableDataForWrite();
+ if (ComputeIsLayoutFixedInCurrentVersionBubble(this))
+ *EnsureWritablePages(&pWriteableDataForWrite->m_dwFlags) |= MethodTableWriteableData::enum_flag_NGEN_IsLayoutFixed;
+ *EnsureWritablePages(&pWriteableDataForWrite->m_dwFlags) |= MethodTableWriteableData::enum_flag_NGEN_IsLayoutFixedComputed;
+ }
+
+ return (pWriteableData->m_dwFlags & MethodTableWriteableData::enum_flag_NGEN_IsLayoutFixed) != 0;
+}
+
+//
+// Is field layout of the inheritance chain fixed within the current version bubble?
+//
+BOOL MethodTable::IsInheritanceChainLayoutFixedInCurrentVersionBubble()
+{
+ STANDARD_VM_CONTRACT;
+
+ // This method is not expected to be called for value types
+ _ASSERTE(!IsValueType());
+
+ MethodTable * pMT = this;
+
+ while ((pMT != g_pObjectClass) && (pMT != NULL))
+ {
+ if (!pMT->IsLayoutFixedInCurrentVersionBubble())
+ return FALSE;
+
+ pMT = pMT->GetParentMethodTable();
+ }
+
+ return TRUE;
+}
+#endif // FEATURE_READYTORUN_COMPILER
diff --git a/src/vm/methodtable.h b/src/vm/methodtable.h
new file mode 100644
index 0000000000..8a312132ee
--- /dev/null
+++ b/src/vm/methodtable.h
@@ -0,0 +1,4298 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: methodtable.h
+//
+
+
+//
+
+//
+// ============================================================================
+
+#ifndef _METHODTABLE_H_
+#define _METHODTABLE_H_
+
+/*
+ * Include Files
+ */
+#ifndef BINDER
+#include "vars.hpp"
+#include "cor.h"
+#include "hash.h"
+#include "crst.h"
+#include "objecthandle.h"
+#include "cgensys.h"
+#include "declsec.h"
+#ifdef FEATURE_COMINTEROP
+#include "stdinterfaces.h"
+#endif
+#include "slist.h"
+#include "spinlock.h"
+#include "typehandle.h"
+#include "eehash.h"
+#include "contractimpl.h"
+#include "generics.h"
+#include "fixuppointer.h"
+#else
+#include "classloadlevel.h"
+#endif // !BINDER
+
+/*
+ * Forward Declarations
+ */
+class AppDomain;
+class ArrayClass;
+class ArrayMethodDesc;
+struct ClassCtorInfoEntry;
+class ClassLoader;
+class DomainLocalBlock;
+class FCallMethodDesc;
+class EEClass;
+class EnCFieldDesc;
+class FieldDesc;
+class FieldMarshaler;
+class JIT_TrialAlloc;
+struct LayoutRawFieldInfo;
+class MetaSig;
+class MethodDesc;
+class MethodDescChunk;
+class MethodTable;
+class Module;
+class Object;
+class Stub;
+class Substitution;
+class TypeHandle;
+#ifdef FEATURE_REMOTING
+class CrossDomainOptimizationInfo;
+typedef DPTR(CrossDomainOptimizationInfo) PTR_CrossDomainOptimizationInfo;
+#endif
+class Dictionary;
+class AllocMemTracker;
+class SimpleRWLock;
+class MethodDataCache;
+class EEClassLayoutInfo;
+#ifdef FEATURE_COMINTEROP
+class ComCallWrapperTemplate;
+#endif
+#ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+class ClassFactoryBase;
+#endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+
+//============================================================================
+// This is the in-memory structure of a class and it will evolve.
+//============================================================================
+
+// <TODO>
+// Add a sync block
+// Also this class currently has everything public - this may changes
+// Might also need to hold onto the meta data loader fot this class</TODO>
+
+//
+// A MethodTable contains an array of these structures, which describes each interface implemented
+// by this class (directly declared or indirectly declared).
+//
+// Generic type instantiations (in C# syntax: C<ty_1,...,ty_n>) are represented by
+// MethodTables, i.e. a new MethodTable gets allocated for each such instantiation.
+// The entries in these tables (i.e. the code) are, however, often shared.
+//
+// In particular, a MethodTable's vtable contents (and hence method descriptors) may be
+// shared between compatible instantiations (e.g. List<string> and List<object> have
+// the same vtable *contents*). Likewise the EEClass will be shared between
+// compatible instantiations whenever the vtable contents are.
+//
+// !!! Thus that it is _not_ generally the case that GetClass.GetMethodTable() == t. !!!
+//
+// Instantiated interfaces have their own method tables unique to the instantiation e.g. I<string> is
+// distinct from I<int> and I<object>
+//
+// For generic types the interface map lists generic interfaces
+// For instantiated types the interface map lists instantiated interfaces
+// e.g. for C<T> : I<T>, J<string>
+// the interface map for C would list I and J
+// the interface map for C<int> would list I<int> and J<string>
+//
+struct InterfaceInfo_t
+{
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+
+ FixupPointer<PTR_MethodTable> m_pMethodTable; // Method table of the interface
+
+public:
+ FORCEINLINE PTR_MethodTable GetMethodTable()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pMethodTable.GetValue();
+ }
+
+#ifndef DACCESS_COMPILE
+ void SetMethodTable(MethodTable * pMT)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pMethodTable.SetValue(pMT);
+ }
+
+ // Get approximate method table. This is used by the type loader before the type is fully loaded.
+ PTR_MethodTable GetApproxMethodTable(Module * pContainingModule);
+#endif
+}; // struct InterfaceInfo_t
+
+typedef DPTR(InterfaceInfo_t) PTR_InterfaceInfo;
+
+namespace ClassCompat
+{
+ struct InterfaceInfo_t;
+};
+
+// Data needed when simulating old VTable layout for COM Interop
+// This is necessary as the data is saved in MethodDescs and we need
+// to simulate different values without copying or changing the existing
+// MethodDescs
+//
+// This will be created in a parallel array to ppMethodDescList and
+// ppUnboxMethodDescList in the bmtMethAndFieldDescs structure below
+struct InteropMethodTableSlotData
+{
+ enum
+ {
+ e_DUPLICATE = 0x0001 // The entry is duplicate
+ };
+
+ MethodDesc *pMD; // The MethodDesc for this slot
+ WORD wSlot; // The simulated slot value for the MethodDesc
+ WORD wFlags; // The simulated duplicate value
+ MethodDesc *pDeclMD; // To keep track of MethodImpl's
+
+ void SetDuplicate()
+ {
+ wFlags |= e_DUPLICATE;
+ }
+
+ BOOL IsDuplicate() {
+ return ((BOOL)(wFlags & e_DUPLICATE));
+ }
+
+ WORD GetSlot() {
+ return wSlot;
+ }
+
+ void SetSlot(WORD wSlot) {
+ this->wSlot = wSlot;
+ }
+}; // struct InteropMethodTableSlotData
+
+#ifdef FEATURE_COMINTEROP
+struct InteropMethodTableData
+{
+ WORD cVTable; // Count of vtable slots
+ InteropMethodTableSlotData *pVTable; // Data for each slot
+
+ WORD cNonVTable; // Count of non-vtable slots
+ InteropMethodTableSlotData *pNonVTable; // Data for each slot
+
+ WORD cInterfaceMap; // Count of interfaces
+ ClassCompat::InterfaceInfo_t *
+ pInterfaceMap; // The interface map
+
+ // Utility methods
+ static WORD GetRealMethodDesc(MethodTable *pMT, MethodDesc *pMD);
+ static WORD GetSlotForMethodDesc(MethodTable *pMT, MethodDesc *pMD);
+ ClassCompat::InterfaceInfo_t* FindInterface(MethodTable *pInterface);
+ WORD GetStartSlotForInterface(MethodTable* pInterface);
+};
+
+class InteropMethodTableSlotDataMap
+{
+protected:
+ InteropMethodTableSlotData *m_pSlotData;
+ DWORD m_cSlotData;
+ DWORD m_iCurSlot;
+
+public:
+ InteropMethodTableSlotDataMap(InteropMethodTableSlotData *pSlotData, DWORD cSlotData);
+ InteropMethodTableSlotData *GetData(MethodDesc *pMD);
+ BOOL Exists(MethodDesc *pMD);
+
+protected:
+ InteropMethodTableSlotData *Exists_Helper(MethodDesc *pMD);
+ InteropMethodTableSlotData *GetNewEntry();
+}; // class InteropMethodTableSlotDataMap
+#endif // FEATURE_COMINTEROP
+
+//
+// This struct contains cached information on the GUID associated with a type.
+//
+
+struct GuidInfo
+{
+ GUID m_Guid; // The actual guid of the type.
+ BOOL m_bGeneratedFromName; // A boolean indicating if it was generated from the
+ // name of the type.
+};
+
+typedef DPTR(GuidInfo) PTR_GuidInfo;
+
+
+// GenericsDictInfo is stored at negative offset of the dictionary
+struct GenericsDictInfo
+{
+#ifdef _WIN64
+ DWORD m_dwPadding; // Just to keep the size a multiple of 8
+#endif
+
+ // Total number of instantiation dictionaries including inherited ones
+ // i.e. how many instantiated classes (including this one) are there in the hierarchy?
+ // See comments about PerInstInfo
+ WORD m_wNumDicts;
+
+ // Number of type parameters (NOT including those of superclasses).
+ WORD m_wNumTyPars;
+}; // struct GenericsDictInfo
+typedef DPTR(GenericsDictInfo) PTR_GenericsDictInfo;
+
+struct GenericsStaticsInfo
+{
+ // Pointer to field descs for statics
+ PTR_FieldDesc m_pFieldDescs;
+
+ // Method table ID for statics
+ SIZE_T m_DynamicTypeID;
+
+}; // struct GenericsStaticsInfo
+typedef DPTR(GenericsStaticsInfo) PTR_GenericsStaticsInfo;
+
+
+// CrossModuleGenericsStaticsInfo is used in NGen images for statics of cross-module
+// generic instantiations. CrossModuleGenericsStaticsInfo is optional member of
+// MethodTableWriteableData.
+struct CrossModuleGenericsStaticsInfo
+{
+ // Module this method table statics are attached to.
+ //
+ // The statics has to be attached to module referenced from the generic instantiation
+ // in domain-neutral code. We need to guarantee that the module for the statics
+ // has a valid local represenation in an appdomain.
+ //
+ PTR_Module m_pModuleForStatics;
+
+ // Method table ID for statics
+ SIZE_T m_DynamicTypeID;
+}; // struct CrossModuleGenericsStaticsInfo
+typedef DPTR(CrossModuleGenericsStaticsInfo) PTR_CrossModuleGenericsStaticsInfo;
+
+// This structure records methods and fields which are interesting for VTS
+// (Version Tolerant Serialization). A pointer to it is optionally appended to
+// MethodTables with VTS event methods or NotSerialized or OptionallySerialized
+// fields. The structure is variable length to incorporate a packed array of
+// data describing the disposition of fields in the type.
+struct RemotingVtsInfo
+{
+ enum VtsCallbackType
+ {
+ VTS_CALLBACK_ON_SERIALIZING = 0,
+ VTS_CALLBACK_ON_SERIALIZED,
+ VTS_CALLBACK_ON_DESERIALIZING,
+ VTS_CALLBACK_ON_DESERIALIZED,
+ VTS_NUM_CALLBACK_TYPES
+ };
+
+ FixupPointer<PTR_MethodDesc> m_pCallbacks[VTS_NUM_CALLBACK_TYPES];
+#ifdef _DEBUG
+ DWORD m_dwNumFields;
+#endif
+ DWORD m_rFieldTypes[1];
+
+ static DWORD GetSize(DWORD dwNumFields)
+ {
+ LIMITED_METHOD_CONTRACT;
+ // Encode each field in two bits. Round up allocation to the nearest DWORD.
+ DWORD dwBitsRequired = dwNumFields * 2;
+ DWORD dwBytesRequired = (dwBitsRequired + 7) / 8;
+ return (DWORD)(offsetof(RemotingVtsInfo, m_rFieldTypes[0]) + ALIGN_UP(dwBytesRequired, sizeof(DWORD)));
+ }
+
+ void SetIsNotSerialized(DWORD dwFieldIndex)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(dwFieldIndex < m_dwNumFields);
+ DWORD dwRecordIndex = dwFieldIndex * 2;
+ DWORD dwOffset = dwRecordIndex / (sizeof(DWORD) * 8);
+ DWORD dwMask = 1 << (dwRecordIndex % (sizeof(DWORD) * 8));
+ m_rFieldTypes[dwOffset] |= dwMask;
+ }
+
+ BOOL IsNotSerialized(DWORD dwFieldIndex)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(dwFieldIndex < m_dwNumFields);
+ DWORD dwRecordIndex = dwFieldIndex * 2;
+ DWORD dwOffset = dwRecordIndex / (sizeof(DWORD) * 8);
+ DWORD dwMask = 1 << (dwRecordIndex % (sizeof(DWORD) * 8));
+ return m_rFieldTypes[dwOffset] & dwMask;
+ }
+
+ void SetIsOptionallySerialized(DWORD dwFieldIndex)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(dwFieldIndex < m_dwNumFields);
+ DWORD dwRecordIndex = dwFieldIndex * 2;
+ DWORD dwOffset = dwRecordIndex / (sizeof(DWORD) * 8);
+ DWORD dwMask = 2 << (dwRecordIndex % (sizeof(DWORD) * 8));
+ m_rFieldTypes[dwOffset] |= dwMask;
+ }
+
+ BOOL IsOptionallySerialized(DWORD dwFieldIndex)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(dwFieldIndex < m_dwNumFields);
+ DWORD dwRecordIndex = dwFieldIndex * 2;
+ DWORD dwOffset = dwRecordIndex / (sizeof(DWORD) * 8);
+ DWORD dwMask = 2 << (dwRecordIndex % (sizeof(DWORD) * 8));
+ return m_rFieldTypes[dwOffset] & dwMask;
+ }
+}; // struct RemotingVtsInfo
+typedef DPTR(RemotingVtsInfo) PTR_RemotingVtsInfo;
+
+
+struct ContextStaticsBucket
+{
+ // Offset which points to the CLS storage. Allocated lazily - -1 means no offset allocated yet.
+ DWORD m_dwContextStaticsOffset;
+ // Size of CLS fields
+ WORD m_wContextStaticsSize;
+};
+typedef DPTR(ContextStaticsBucket) PTR_ContextStaticsBucket;
+
+#ifdef FEATURE_COMINTEROP
+struct RCWPerTypeData;
+#endif // FEATURE_COMINTEROP
+
+//
+// This struct consolidates the writeable parts of the MethodTable
+// so that we can layout a read-only MethodTable with a pointer
+// to the writeable parts of the MethodTable in an ngen image
+//
+struct MethodTableWriteableData
+{
+ friend class MethodTable;
+#if defined(DACCESS_COMPILE)
+ friend class NativeImageDumper;
+#endif
+
+ enum
+ {
+ // AS YOU ADD NEW FLAGS PLEASE CONSIDER WHETHER Generics::NewInstantiation NEEDS
+ // TO BE UPDATED IN ORDER TO ENSURE THAT METHODTABLES DUPLICATED FOR GENERIC INSTANTIATIONS
+ // CARRY THE CORRECT INITIAL FLAGS.
+
+ enum_flag_RemotingConfigChecked = 0x00000001,
+ enum_flag_RequiresManagedActivation = 0x00000002,
+ enum_flag_Unrestored = 0x00000004,
+ enum_flag_CriticalTypePrepared = 0x00000008, // CriticalFinalizerObject derived type has had backout routines prepared
+ enum_flag_HasApproxParent = 0x00000010,
+ enum_flag_UnrestoredTypeKey = 0x00000020,
+ enum_flag_IsNotFullyLoaded = 0x00000040,
+ enum_flag_DependenciesLoaded = 0x00000080, // class and all depedencies loaded up to CLASS_LOADED_BUT_NOT_VERIFIED
+
+ enum_flag_SkipWinRTOverride = 0x00000100, // No WinRT override is needed
+
+#ifdef FEATURE_PREJIT
+ // These flags are used only at ngen time. We store them here since
+ // we are running out of available flags in MethodTable. They may eventually
+ // go into ngen speficic state.
+ enum_flag_NGEN_IsFixedUp = 0x00010000, // This MT has been fixed up during NGEN
+ enum_flag_NGEN_IsNeedsRestoreCached = 0x00020000, // Set if we have cached the results of needs restore computation
+ enum_flag_NGEN_CachedNeedsRestore = 0x00040000, // The result of the needs restore computation
+ enum_flag_NGEN_OverridingInterface = 0x00080000, // Overriding interface that we should generate WinRT CCW stubs for.
+
+#ifdef FEATURE_READYTORUN_COMPILER
+ enum_flag_NGEN_IsLayoutFixedComputed = 0x0010000, // Set if we have cached the result of IsLayoutFixed computation
+ enum_flag_NGEN_IsLayoutFixed = 0x0020000, // The result of the IsLayoutFixed computation
+#endif
+
+#endif // FEATURE_PREJIT
+
+#ifdef _DEBUG
+ enum_flag_ParentMethodTablePointerValid = 0x40000000,
+ enum_flag_HasInjectedInterfaceDuplicates = 0x80000000,
+#endif
+ };
+ DWORD m_dwFlags; // Lot of empty bits here.
+
+private:
+ /*
+ * m_hExposedClassObject is LoaderAllocator slot index to
+ * a RuntimeType instance for this class. But
+ * do NOT use it for Arrays or remoted objects! All arrays of objects
+ * share the same MethodTable/EEClass.
+ * @GENERICS: this used to live in EEClass but now lives here because it is per-instantiation data
+ * only set in code:MethodTable.GetManagedClassObject
+ */
+ LOADERHANDLE m_hExposedClassObject;
+
+#ifdef _DEBUG
+public:
+ // to avoid verify same method table too many times when it's not changing, we cache the GC count
+ // on which the method table is verified. When fast GC STRESS is turned on, we only verify the MT if
+ // current GC count is bigger than the number. Note most thing which will invalidate a MT will require a
+ // GC (like AD unload)
+ Volatile<DWORD> m_dwLastVerifedGCCnt;
+
+#ifdef _WIN64
+ DWORD m_dwPadding; // Just to keep the size a multiple of 8
+#endif
+
+#endif
+
+ // Optional CrossModuleGenericsStaticsInfo may be here.
+
+public:
+#ifdef _DEBUG
+ inline BOOL IsParentMethodTablePointerValid() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (m_dwFlags & enum_flag_ParentMethodTablePointerValid);
+ }
+ inline void SetParentMethodTablePointerValid()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_dwFlags |= enum_flag_ParentMethodTablePointerValid;
+ }
+#endif
+
+#ifdef FEATURE_PREJIT
+
+ void Save(DataImage *image, MethodTable *pMT, DWORD profilingFlags) const;
+ void Fixup(DataImage *image, MethodTable *pMT, BOOL needsRestore);
+
+ inline BOOL IsFixedUp() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_dwFlags & enum_flag_NGEN_IsFixedUp);
+ }
+ inline void SetFixedUp()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_dwFlags |= enum_flag_NGEN_IsFixedUp;
+ }
+
+ inline BOOL IsNeedsRestoreCached() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_dwFlags & enum_flag_NGEN_IsNeedsRestoreCached);
+ }
+
+ inline BOOL GetCachedNeedsRestore() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(IsNeedsRestoreCached());
+ return (m_dwFlags & enum_flag_NGEN_CachedNeedsRestore);
+ }
+
+ inline void SetCachedNeedsRestore(BOOL fNeedsRestore)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!IsNeedsRestoreCached());
+ m_dwFlags |= enum_flag_NGEN_IsNeedsRestoreCached;
+ if (fNeedsRestore) m_dwFlags |= enum_flag_NGEN_CachedNeedsRestore;
+ }
+
+ inline void SetIsOverridingInterface()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if ((m_dwFlags & enum_flag_NGEN_OverridingInterface) != 0) return;
+ FastInterlockOr(EnsureWritablePages((ULONG *) &m_dwFlags), enum_flag_NGEN_OverridingInterface);
+ }
+
+ inline BOOL IsOverridingInterface() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_dwFlags & enum_flag_NGEN_OverridingInterface);
+ }
+#endif // FEATURE_PREJIT
+
+ inline BOOL IsRemotingConfigChecked() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwFlags & enum_flag_RemotingConfigChecked;
+ }
+ inline void SetRemotingConfigChecked()
+ {
+ WRAPPER_NO_CONTRACT;
+ // remembers that we went through the rigorous
+ // checks to decide whether this class should be
+ // activated locally or remote
+ FastInterlockOr(EnsureWritablePages((ULONG *)&m_dwFlags), enum_flag_RemotingConfigChecked);
+ }
+ inline void TrySetRemotingConfigChecked()
+ {
+ WRAPPER_NO_CONTRACT;
+ // remembers that we went through the rigorous
+ // checks to decide whether this class should be
+ // activated locally or remote
+ if (EnsureWritablePagesNoThrow(&m_dwFlags, sizeof(m_dwFlags)))
+ FastInterlockOr((ULONG *)&m_dwFlags, enum_flag_RemotingConfigChecked);
+ }
+ inline BOOL RequiresManagedActivation() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwFlags & enum_flag_RequiresManagedActivation;
+ }
+ inline void SetRequiresManagedActivation()
+ {
+ WRAPPER_NO_CONTRACT;
+ FastInterlockOr(EnsureWritablePages((ULONG *) &m_dwFlags), enum_flag_RequiresManagedActivation|enum_flag_RemotingConfigChecked);
+ }
+
+ inline LOADERHANDLE GetExposedClassObjectHandle() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_hExposedClassObject;
+ }
+
+ void SetIsNotFullyLoadedForBuildMethodTable()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // Used only during method table initialization - no need for logging or Interlocked Exchange.
+ m_dwFlags |= (MethodTableWriteableData::enum_flag_UnrestoredTypeKey |
+ MethodTableWriteableData::enum_flag_Unrestored |
+ MethodTableWriteableData::enum_flag_IsNotFullyLoaded |
+ MethodTableWriteableData::enum_flag_HasApproxParent);
+ }
+
+ void SetIsRestoredForBuildMethodTable()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // Used only during method table initialization - no need for logging or Interlocked Exchange.
+ m_dwFlags &= ~(MethodTableWriteableData::enum_flag_UnrestoredTypeKey |
+ MethodTableWriteableData::enum_flag_Unrestored);
+ }
+
+ void SetIsFullyLoadedForBuildMethodTable()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // Used only during method table initialization - no need for logging or Interlocked Exchange.
+ m_dwFlags &= ~(MethodTableWriteableData::enum_flag_UnrestoredTypeKey |
+ MethodTableWriteableData::enum_flag_Unrestored |
+ MethodTableWriteableData::enum_flag_IsNotFullyLoaded |
+ MethodTableWriteableData::enum_flag_HasApproxParent);
+ }
+
+ // Have the backout methods (Finalizer, Dispose, ReleaseHandle etc.) been prepared for this type? This currently only happens
+ // for types derived from CriticalFinalizerObject.
+ inline BOOL CriticalTypeHasBeenPrepared() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwFlags & enum_flag_CriticalTypePrepared;
+ }
+ inline void SetCriticalTypeHasBeenPrepared()
+ {
+ WRAPPER_NO_CONTRACT;
+ FastInterlockOr(EnsureWritablePages((ULONG*)&m_dwFlags), enum_flag_CriticalTypePrepared);
+ }
+
+ inline CrossModuleGenericsStaticsInfo * GetCrossModuleGenericsStaticsInfo()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ SIZE_T size = sizeof(MethodTableWriteableData);
+ return PTR_CrossModuleGenericsStaticsInfo(dac_cast<TADDR>(this) + size);
+ }
+
+}; // struct MethodTableWriteableData
+
+typedef DPTR(MethodTableWriteableData) PTR_MethodTableWriteableData;
+typedef DPTR(MethodTableWriteableData const) PTR_Const_MethodTableWriteableData;
+
+//===============================================================================================
+//
+// GC data appears before the beginning of the MethodTable
+//
+//@GENERICS:
+// Each generic type has a corresponding "generic" method table that serves the following
+// purposes:
+// * The method table pointer is used as a representative for the generic type e.g. in reflection
+// * MethodDescs for methods in the vtable are used for reflection; they should never be invoked.
+// Some other information (e.g. BaseSize) makes no sense "generically" but unfortunately gets put in anyway.
+//
+// Each distinct instantiation of a generic type has its own MethodTable structure.
+// However, the EEClass structure can be shared between compatible instantiations e.g. List<string> and List<object>.
+// In that case, MethodDescs are also shared between compatible instantiations (but see below about generic methods).
+// Hence the vtable entries for MethodTables belonging to such an EEClass are the same.
+//
+// The non-vtable section of such MethodTables are only present for one of the instantiations (the first one
+// requested) as non-vtable entries are never accessed through the vtable pointer of an object so it's always possible
+// to ensure that they are accessed through the representative MethodTable that contains them.
+
+// A MethodTable is the fundamental representation of type in the runtime. It is this structure that
+// objects point at (see code:Object). It holds the size and GC layout of the type, as well as the dispatch table
+// for virtual dispach (but not interface dispatch). There is a distinct method table for every instance of
+// a generic type. From here you can get to
+//
+// * code:EEClass
+//
+// Important fields
+// * code:MethodTable.m_pEEClass - pointer to the cold part of the type.
+// * code:MethodTable.m_pParentMethodTable - the method table of the parent type.
+//
+class MethodTableBuilder;
+class MethodTable
+{
+ /************************************
+ * FRIEND FUNCTIONS
+ ************************************/
+ // DO NOT ADD FRIENDS UNLESS ABSOLUTELY NECESSARY
+ // USE ACCESSORS TO READ/WRITE private field members
+
+ // Special access for setting up String object method table correctly
+ friend class ClassLoader;
+ friend class JIT_TrialAlloc;
+#ifndef BINDER
+ friend class Module;
+#else
+ friend class MdilModule;
+ friend class CompactTypeBuilder;
+#endif
+ friend class EEClass;
+ friend class MethodTableBuilder;
+ friend class CheckAsmOffsets;
+#if defined(DACCESS_COMPILE)
+ friend class NativeImageDumper;
+#endif
+
+public:
+ // Do some sanity checking to make sure it's a method table
+ // and not pointing to some random memory. In particular
+ // check that (apart from the special case of instantiated generic types) we have
+ // GetCanonicalMethodTable() == this;
+ BOOL SanityCheck();
+
+ static void CallFinalizer(Object *obj);
+
+public:
+ PTR_Module GetModule();
+ PTR_Module GetModule_NoLogging();
+ Assembly *GetAssembly();
+
+ PTR_Module GetModuleIfLoaded();
+
+ // GetDomain on an instantiated type, e.g. C<ty1,ty2> returns the SharedDomain if all the
+ // constituent parts of the type are SharedDomain (i.e. domain-neutral),
+ // and returns an AppDomain if any of the parts are from an AppDomain,
+ // i.e. are domain-bound. Note that if any of the parts are domain-bound
+ // then they will all belong to the same domain.
+ PTR_BaseDomain GetDomain();
+
+ // Does this immediate item live in an NGEN module?
+ BOOL IsZapped();
+
+ // For types that are part of an ngen-ed assembly this gets the
+ // Module* that contains this methodtable.
+ PTR_Module GetZapModule();
+
+ // For regular, non-constructed types, GetLoaderModule() == GetModule()
+ // For constructed types (e.g. int[], Dict<int[], C>) the hash table through which a type
+ // is accessed lives in a "loader module". The rule for determining the loader module must ensure
+ // that a type never outlives its loader module with respect to app-domain unloading
+ //
+ // GetModuleForStatics() is the third kind of module. GetModuleForStatics() is module that
+ // statics are attached to.
+ PTR_Module GetLoaderModule();
+ PTR_LoaderAllocator GetLoaderAllocator();
+
+#ifndef BINDER
+ void SetLoaderModule(Module* pModule);
+#else
+ void SetLoaderModule(MdilModule* pModule);
+#endif
+ void SetLoaderAllocator(LoaderAllocator* pAllocator);
+
+ // Get the domain local module - useful for static init checks
+ PTR_DomainLocalModule GetDomainLocalModule(AppDomain * pAppDomain);
+
+#ifndef DACCESS_COMPILE
+ // Version of GetDomainLocalModule which relies on the current AppDomain
+ PTR_DomainLocalModule GetDomainLocalModule();
+#endif
+
+ // Return whether the type lives in the shared domain.
+ BOOL IsDomainNeutral();
+
+ MethodTable *LoadEnclosingMethodTable(ClassLoadLevel targetLevel = CLASS_DEPENDENCIES_LOADED);
+
+ LPCWSTR GetPathForErrorMessages();
+
+ //-------------------------------------------------------------------
+ // COM INTEROP
+ //
+ BOOL IsProjectedFromWinRT();
+ BOOL IsExportedToWinRT();
+ BOOL IsWinRTDelegate();
+ BOOL IsWinRTRedirectedInterface(TypeHandle::InteropKind interopKind);
+ BOOL IsWinRTRedirectedDelegate();
+
+#ifdef FEATURE_COMINTEROP
+ TypeHandle GetCoClassForInterface();
+
+private:
+ TypeHandle SetupCoClassForInterface();
+
+public:
+ DWORD IsComClassInterface();
+
+ // Retrieves the COM interface type.
+ CorIfaceAttr GetComInterfaceType();
+ void SetComInterfaceType(CorIfaceAttr ItfType);
+
+ // Determines whether this is a WinRT-legal type
+ BOOL IsLegalWinRTType(OBJECTREF *poref);
+
+ // Determines whether this is a WinRT-legal type - don't use it with array
+ BOOL IsLegalNonArrayWinRTType();
+
+ MethodTable *GetDefaultWinRTInterface();
+
+ OBJECTHANDLE GetOHDelegate();
+ void SetOHDelegate (OBJECTHANDLE _ohDelegate);
+
+ CorClassIfaceAttr GetComClassInterfaceType();
+ TypeHandle GetDefItfForComClassItf();
+
+ void GetEventInterfaceInfo(MethodTable **ppSrcItfType, MethodTable **ppEvProvType);
+
+ BOOL IsExtensibleRCW();
+
+ // mark the class type as COM object class
+ void SetComObjectType();
+
+#if defined(FEATURE_TYPEEQUIVALENCE) || defined(FEATURE_REMOTING)
+ // mark the type as opted into type equivalence
+ void SetHasTypeEquivalence();
+#endif
+
+ // Helper to get parent class skipping over COM class in
+ // the hierarchy
+ MethodTable* GetComPlusParentMethodTable();
+
+ // class is a com object class
+ BOOL IsComObjectType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetFlag(enum_flag_ComObject);
+ }
+ // class is a WinRT object class (is itself or derives from a ProjectedFromWinRT class)
+ BOOL IsWinRTObjectType();
+
+ DWORD IsComImport();
+
+ // class is a special COM event interface
+ int IsComEventItfType();
+
+ //-------------------------------------------------------------------
+ // Sparse VTables. These require a SparseVTableMap in the EEClass in
+ // order to record how the CLR's vtable slots map across to COM
+ // Interop slots.
+ //
+ int IsSparseForCOMInterop();
+
+ // COM interop helpers
+ // accessors for m_pComData
+ ComCallWrapperTemplate *GetComCallWrapperTemplate();
+ BOOL SetComCallWrapperTemplate(ComCallWrapperTemplate *pTemplate);
+#ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+ ClassFactoryBase *GetComClassFactory();
+ BOOL SetComClassFactory(ClassFactoryBase *pFactory);
+#endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+
+ OBJECTREF GetObjCreateDelegate();
+ void SetObjCreateDelegate(OBJECTREF orDelegate);
+
+private:
+ // This is for COM Interop backwards compatibility
+ BOOL InsertComInteropData(InteropMethodTableData *pData);
+ InteropMethodTableData *CreateComInteropData(AllocMemTracker *pamTracker);
+
+public:
+ InteropMethodTableData *LookupComInteropData();
+ // This is the preferable entrypoint, as it will make sure that all
+ // parent MT's have their interop data created, and will create and
+ // add this MT's data if not available. The caller should make sure that
+ // an appropriate lock is taken to prevent duplicates.
+ // NOTE: The current caller of this is ComInterop, and it makes calls
+ // under its own lock to ensure not duplicates.
+ InteropMethodTableData *GetComInteropData();
+
+#else // !FEATURE_COMINTEROP
+ BOOL IsComObjectType()
+ {
+ SUPPORTS_DAC;
+ return FALSE;
+ }
+ BOOL IsWinRTObjectType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+ }
+#endif // !FEATURE_COMINTEROP
+
+#ifdef FEATURE_TYPEEQUIVALENCE
+ // type has opted into type equivalence or is instantiated by/derived from a type that is
+ BOOL HasTypeEquivalence()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return GetFlag(enum_flag_HasTypeEquivalence);
+ }
+#else
+ BOOL HasTypeEquivalence()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+ }
+#endif
+
+ //-------------------------------------------------------------------
+ // DYNAMIC ADDITION OF INTERFACES FOR COM INTEROP
+ //
+ // Support for dynamically added interfaces on extensible RCW's.
+
+#ifdef FEATURE_COMINTEROP
+ PTR_InterfaceInfo GetDynamicallyAddedInterfaceMap();
+ unsigned GetNumDynamicallyAddedInterfaces();
+ BOOL FindDynamicallyAddedInterface(MethodTable *pInterface);
+ void AddDynamicInterface(MethodTable *pItfMT);
+
+ BOOL HasDynamicInterfaceMap()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // currently all ComObjects except
+ // for __ComObject have dynamic Interface maps
+ return GetNumInterfaces() > 0 && IsComObjectType() && !ParentEquals(g_pObjectClass);
+ }
+#endif // FEATURE_COMINTEROP
+
+ BOOL IsIntrospectionOnly();
+
+ // Checks this type and its instantiation for "IsIntrospectionOnly"
+ BOOL ContainsIntrospectionOnlyTypes();
+
+#ifndef DACCESS_COMPILE
+ VOID EnsureActive();
+ VOID EnsureInstanceActive();
+#endif
+
+ CHECK CheckActivated();
+ CHECK CheckInstanceActivated();
+
+ //-------------------------------------------------------------------
+ // THE DEFAULT CONSTRUCTOR
+ //
+
+public:
+ BOOL HasDefaultConstructor();
+ void SetHasDefaultConstructor();
+ WORD GetDefaultConstructorSlot();
+ MethodDesc *GetDefaultConstructor();
+
+ BOOL HasExplicitOrImplicitPublicDefaultConstructor();
+
+ //-------------------------------------------------------------------
+ // THE CLASS INITIALIZATION CONDITION
+ // (and related DomainLocalBlock/DomainLocalModule storage)
+ //
+ // - populate the DomainLocalModule if needed
+ // - run the cctor
+ //
+
+public:
+
+ // checks whether the class initialiser should be run on this class, and runs it if necessary
+ void CheckRunClassInitThrowing();
+
+ // checks whether or not the non-beforefieldinit class initializers have been run for all types in this type's
+ // inheritance hierarchy, and runs them if necessary. This simulates the behavior of running class constructors
+ // during object construction.
+ void CheckRunClassInitAsIfConstructingThrowing();
+
+ // Copy m_dwFlags from another method table
+ void CopyFlags(MethodTable * pOldMT)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_dwFlags = pOldMT->m_dwFlags;
+ m_wFlags2 = pOldMT->m_wFlags2;
+ }
+
+ // Init the m_dwFlags field for an array
+ void SetIsArray(CorElementType arrayType, CorElementType elementType);
+
+ BOOL IsClassPreInited();
+
+ // mark the class as having its cctor run.
+#ifndef DACCESS_COMPILE
+ void SetClassInited();
+ BOOL IsClassInited(AppDomain* pAppDomain = NULL);
+
+ BOOL IsInitError();
+ void SetClassInitError();
+#endif
+
+ inline BOOL IsGlobalClass()
+ {
+ WRAPPER_NO_CONTRACT;
+ return (GetTypeDefRid() == RidFromToken(COR_GLOBAL_PARENT_TOKEN));
+ }
+
+ // uniquely identifes this type in the Domain table
+ DWORD GetClassIndex();
+
+ bool ClassRequiresUnmanagedCodeCheck();
+
+private:
+
+ DWORD GetClassIndexFromToken(mdTypeDef typeToken)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return RidFromToken(typeToken) - 1;
+ }
+
+ // called from CheckRunClassInitThrowing(). The type wasn't marked as
+ // inited while we were there, so let's attempt to do the work.
+ void DoRunClassInitThrowing();
+
+ BOOL RunClassInitEx(OBJECTREF *pThrowable);
+
+public:
+ //-------------------------------------------------------------------
+ // THE CLASS CONSTRUCTOR
+ //
+
+ MethodDesc * GetClassConstructor();
+
+ BOOL HasClassConstructor();
+ void SetHasClassConstructor();
+ WORD GetClassConstructorSlot();
+ void SetClassConstructorSlot (WORD wCCtorSlot);
+
+ ClassCtorInfoEntry* GetClassCtorInfoIfExists();
+
+
+ void GetSavedExtent(TADDR *ppStart, TADDR *ppEnd);
+
+ //-------------------------------------------------------------------
+ // Save/Fixup/Restore/NeedsRestore
+ //
+ // Restore this method table if it's not already restored
+ // This is done by forcing a class load which in turn calls the Restore method
+ // The pending list is required for restoring types that reference themselves through
+ // instantiations of the superclass or interfaces e.g. System.Int32 : IComparable<System.Int32>
+
+
+#ifdef FEATURE_PREJIT
+
+ void Save(DataImage *image, DWORD profilingFlags);
+ void Fixup(DataImage *image);
+
+ // This is different from !IsRestored() in that it checks if restoring
+ // will ever be needed for this ngened data-structure.
+ // This is to be used at ngen time of a dependent module to determine
+ // if it can be accessed directly, or if the restoring mechanism needs
+ // to be hooked in.
+ BOOL ComputeNeedsRestore(DataImage *image, TypeHandleList *pVisited);
+
+ BOOL NeedsRestore(DataImage *image)
+ {
+ WRAPPER_NO_CONTRACT;
+ return ComputeNeedsRestore(image, NULL);
+ }
+
+private:
+ BOOL ComputeNeedsRestoreWorker(DataImage *image, TypeHandleList *pVisited);
+
+public:
+ // This returns true at NGen time if we can eager bind to all dictionaries along the inheritance chain
+ BOOL CanEagerBindToParentDictionaries(DataImage *image, TypeHandleList *pVisited);
+
+ // This returns true at NGen time if we may need to attach statics to
+ // other module than current loader module at runtime
+ BOOL NeedsCrossModuleGenericsStaticsInfo();
+
+ // Returns true at NGen time if we may need to write into the MethodTable at runtime
+ BOOL IsWriteable();
+
+#endif // FEATURE_PREJIT
+
+ void AllocateRegularStaticBoxes();
+ static OBJECTREF AllocateStaticBox(MethodTable* pFieldMT, BOOL fPinned, OBJECTHANDLE* pHandle = 0);
+
+ void CheckRestore();
+
+ // Perform restore actions on type key components of method table (EEClass pointer + Module, generic args)
+ void DoRestoreTypeKey();
+
+ inline BOOL HasUnrestoredTypeKey() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return !IsPreRestored() &&
+ (GetWriteableData()->m_dwFlags & MethodTableWriteableData::enum_flag_UnrestoredTypeKey) != 0;
+ }
+
+ // Actually do the restore actions on the method table
+ void Restore();
+
+ void SetIsRestored();
+
+ inline BOOL IsRestored_NoLogging()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // If we are prerestored then we are considered a restored methodtable.
+ // Note that IsPreRestored is always false for jitted code.
+ if (IsPreRestored())
+ return TRUE;
+
+ return !(GetWriteableData_NoLogging()->m_dwFlags & MethodTableWriteableData::enum_flag_Unrestored);
+ }
+ inline BOOL IsRestored()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ g_IBCLogger.LogMethodTableAccess(this);
+
+ // If we are prerestored then we are considered a restored methodtable.
+ // Note that IsPreRestored is always false for jitted code.
+ if (IsPreRestored())
+ return TRUE;
+
+ return !(GetWriteableData()->m_dwFlags & MethodTableWriteableData::enum_flag_Unrestored);
+ }
+
+ //-------------------------------------------------------------------
+ // LOAD LEVEL
+ //
+ // The load level of a method table is derived from various flag bits
+ // See classloadlevel.h for details of each level
+ //
+ // Level CLASS_LOADED (fully loaded) is special: a type only
+ // reaches this level once all of its dependent types are also at
+ // this level (generic arguments, parent, interfaces, etc).
+ // Fully loading a type to this level is done outside locks, hence the need for
+ // a single atomic action that sets the level.
+ //
+ inline void SetIsFullyLoaded()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PRECONDITION(!HasApproxParent());
+ PRECONDITION(IsRestored_NoLogging());
+
+ FastInterlockAnd(EnsureWritablePages(&GetWriteableDataForWrite()->m_dwFlags), ~MethodTableWriteableData::enum_flag_IsNotFullyLoaded);
+ }
+
+ // Equivalent to GetLoadLevel() == CLASS_LOADED
+ inline BOOL IsFullyLoaded()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return (IsPreRestored())
+ || (GetWriteableData()->m_dwFlags & MethodTableWriteableData::enum_flag_IsNotFullyLoaded) == 0;
+ }
+
+ inline BOOL IsSkipWinRTOverride()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (GetWriteableData_NoLogging()->m_dwFlags & MethodTableWriteableData::enum_flag_SkipWinRTOverride);
+ }
+
+ inline void SetSkipWinRTOverride()
+ {
+ WRAPPER_NO_CONTRACT;
+ FastInterlockOr(EnsureWritablePages(&GetWriteableDataForWrite_NoLogging()->m_dwFlags), MethodTableWriteableData::enum_flag_SkipWinRTOverride);
+ }
+
+ inline void SetIsDependenciesLoaded()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PRECONDITION(!HasApproxParent());
+ PRECONDITION(IsRestored_NoLogging());
+
+ FastInterlockOr(EnsureWritablePages(&GetWriteableDataForWrite()->m_dwFlags), MethodTableWriteableData::enum_flag_DependenciesLoaded);
+ }
+
+ inline ClassLoadLevel GetLoadLevel()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ g_IBCLogger.LogMethodTableAccess(this);
+
+ // Fast path for zapped images
+ if (IsPreRestored())
+ return CLASS_LOADED;
+
+ DWORD dwFlags = GetWriteableData()->m_dwFlags;
+
+ if (dwFlags & MethodTableWriteableData::enum_flag_IsNotFullyLoaded)
+ {
+ if (dwFlags & MethodTableWriteableData::enum_flag_UnrestoredTypeKey)
+ return CLASS_LOAD_UNRESTOREDTYPEKEY;
+
+ if (dwFlags & MethodTableWriteableData::enum_flag_Unrestored)
+ return CLASS_LOAD_UNRESTORED;
+
+ if (dwFlags & MethodTableWriteableData::enum_flag_HasApproxParent)
+ return CLASS_LOAD_APPROXPARENTS;
+
+ if (!(dwFlags & MethodTableWriteableData::enum_flag_DependenciesLoaded))
+ return CLASS_LOAD_EXACTPARENTS;
+
+ return CLASS_DEPENDENCIES_LOADED;
+ }
+
+ return CLASS_LOADED;
+ }
+
+#ifdef _DEBUG
+ CHECK CheckLoadLevel(ClassLoadLevel level)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return TypeHandle(this).CheckLoadLevel(level);
+ }
+#endif
+
+
+ void DoFullyLoad(Generics::RecursionGraph * const pVisited, const ClassLoadLevel level, DFLPendingList * const pPending, BOOL * const pfBailed,
+ const InstantiationContext * const pInstContext);
+
+ //-------------------------------------------------------------------
+ // METHOD TABLES AS TYPE DESCRIPTORS
+ //
+ // A MethodTable can represeent a type such as "String" or an
+ // instantiated type such as "List<String>".
+ //
+
+ inline BOOL IsInterface()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetFlag(enum_flag_Category_Mask) == enum_flag_Category_Interface;
+ }
+
+ void SetIsInterface()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(GetFlag(enum_flag_Category_Mask) == 0);
+ SetFlag(enum_flag_Category_Interface);
+ }
+
+ inline BOOL IsSealed();
+
+ inline BOOL IsAbstract();
+
+ BOOL IsExternallyVisible();
+
+ // Get the instantiation for this instantiated type e.g. for Dict<string,int>
+ // this would be an array {string,int}
+ // If not instantiated, return NULL
+ Instantiation GetInstantiation();
+
+ // Get the instantiation for an instantiated type or a pointer to the
+ // element type for an array
+ Instantiation GetClassOrArrayInstantiation();
+ Instantiation GetArrayInstantiation();
+
+ // Does this method table require that additional modules be loaded?
+ inline BOOL HasModuleDependencies()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return GetFlag(enum_flag_HasModuleDependencies);
+ }
+
+ inline void SetHasModuleDependencies()
+ {
+ SetFlag(enum_flag_HasModuleDependencies);
+ }
+
+ // See the comment in code:MethodTable.DoFullyLoad for detailed description.
+ inline BOOL DependsOnEquivalentOrForwardedStructs()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return GetFlag(enum_flag_DependsOnEquivalentOrForwardedStructs);
+ }
+
+ inline void SetDependsOnEquivalentOrForwardedStructs()
+ {
+ SetFlag(enum_flag_DependsOnEquivalentOrForwardedStructs);
+ }
+
+ // Is this a method table for a generic type instantiation, e.g. List<string>?
+ inline BOOL HasInstantiation();
+
+ // Returns true for any class which is either itself a generic
+ // instantiation or is derived from a generic
+ // instantiation anywhere in it's class hierarchy,
+ //
+ // e.g. class D : C<int>
+ // or class E : D, class D : C<int>
+ //
+ // Does not return true just because the class supports
+ // an instantiated interface type.
+ BOOL HasGenericClassInstantiationInHierarchy()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetNumDicts() != 0;
+ }
+
+ // Is this an instantiation of a generic class at its formal
+ // type parameters ie. List<T> ?
+ inline BOOL IsGenericTypeDefinition();
+
+ BOOL ContainsGenericMethodVariables();
+
+ static BOOL ComputeContainsGenericVariables(Instantiation inst);
+
+ inline void SetContainsGenericVariables()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetFlag(enum_flag_ContainsGenericVariables);
+ }
+
+ inline void SetHasVariance()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetFlag(enum_flag_HasVariance);
+ }
+
+ inline BOOL HasVariance()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return GetFlag(enum_flag_HasVariance);
+ }
+
+ // Is this something like List<T> or List<Stack<T>>?
+ // List<Blah<T>> only exists for reflection and verification.
+ inline DWORD ContainsGenericVariables(BOOL methodVarsOnly = FALSE)
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ if (methodVarsOnly)
+ return ContainsGenericMethodVariables();
+ else
+ return GetFlag(enum_flag_ContainsGenericVariables);
+ }
+
+
+ inline BOOL ContainsStackPtr();
+
+ // class is a com object class
+ Module* GetDefiningModuleForOpenType();
+
+ inline BOOL IsTypicalTypeDefinition()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return !HasInstantiation() || IsGenericTypeDefinition();
+ }
+
+ typedef enum
+ {
+ modeProjected = 0x1,
+ modeRedirected = 0x2,
+ modeAll = modeProjected|modeRedirected
+ } Mode;
+
+ // Is this a generic interface/delegate that can be used for COM interop?
+ inline BOOL SupportsGenericInterop(TypeHandle::InteropKind interopKind, Mode = modeAll);
+
+ BOOL HasSameTypeDefAs(MethodTable *pMT);
+ BOOL HasSameTypeDefAs_NoLogging(MethodTable *pMT);
+
+ //-------------------------------------------------------------------
+ // GENERICS & CODE SHARING
+ //
+
+ BOOL IsSharedByGenericInstantiations();
+
+ // If this is a "representative" generic MT or a non-generic (regular) MT return true
+ inline BOOL IsCanonicalMethodTable();
+
+ // Return the canonical representative MT amongst the set of MT's that share
+ // code with the given MT because of generics.
+ PTR_MethodTable GetCanonicalMethodTable();
+
+ // Returns fixup if canonical method table needs fixing up, NULL otherwise
+ TADDR GetCanonicalMethodTableFixup();
+
+ //-------------------------------------------------------------------
+ // Accessing methods by slot number
+ //
+ // Some of these functions are also currently used to get non-virtual
+ // methods, relying on the assumption that they are contiguous. This
+ // is not true for non-virtual methods in generic instantiations, which
+ // only live on the canonical method table.
+
+ enum
+ {
+ NO_SLOT = 0xffff // a unique slot number used to indicate "empty" for fields that record slot numbers
+ };
+
+#ifndef BINDER // the binder works with a slightly different representation, so remove these
+ PCODE GetSlot(UINT32 slotNumber)
+ {
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ CONSISTENCY_CHECK(slotNumber < GetNumVtableSlots());
+ PTR_PCODE pSlot = GetSlotPtrRaw(slotNumber);
+ if (IsZapped() && slotNumber >= GetNumVirtuals())
+ {
+ // Non-virtual slots in NGened images are relative pointers
+ return RelativePointer<PCODE>::GetValueAtPtr(dac_cast<TADDR>(pSlot));
+ }
+ return *pSlot;
+ }
+
+ // Special-case for when we know that the slot number corresponds
+ // to a virtual method.
+ inline PCODE GetSlotForVirtual(UINT32 slotNum)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ CONSISTENCY_CHECK(slotNum < GetNumVirtuals());
+ // Virtual slots live in chunks pointed to by vtable indirections
+ return *(GetVtableIndirections()[GetIndexOfVtableIndirection(slotNum)] + GetIndexAfterVtableIndirection(slotNum));
+ }
+
+ PTR_PCODE GetSlotPtrRaw(UINT32 slotNum)
+ {
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ CONSISTENCY_CHECK(slotNum < GetNumVtableSlots());
+
+ if (slotNum < GetNumVirtuals())
+ {
+ // Virtual slots live in chunks pointed to by vtable indirections
+ return GetVtableIndirections()[GetIndexOfVtableIndirection(slotNum)] + GetIndexAfterVtableIndirection(slotNum);
+ }
+ else if (HasSingleNonVirtualSlot())
+ {
+ // Non-virtual slots < GetNumVtableSlots live in a single chunk pointed to by an optional member,
+ // except when there is only one in which case it lives in the optional member itself
+ _ASSERTE(slotNum == GetNumVirtuals());
+ return dac_cast<PTR_PCODE>(GetNonVirtualSlotsPtr());
+ }
+ else
+ {
+ // Non-virtual slots < GetNumVtableSlots live in a single chunk pointed to by an optional member
+ _ASSERTE(HasNonVirtualSlotsArray());
+ g_IBCLogger.LogMethodTableNonVirtualSlotsAccess(this);
+ return GetNonVirtualSlotsArray() + (slotNum - GetNumVirtuals());
+ }
+ }
+
+ PTR_PCODE GetSlotPtr(UINT32 slotNum)
+ {
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ // Slots in NGened images are relative pointers
+ CONSISTENCY_CHECK(!IsZapped());
+
+ return GetSlotPtrRaw(slotNum);
+ }
+
+ void SetSlot(UINT32 slotNum, PCODE slotVal);
+#endif
+
+ //-------------------------------------------------------------------
+ // The VTABLE
+ //
+ // Rather than the traditional array of code pointers (or "slots") we use a two-level vtable in
+ // which slots for virtual methods live in chunks. Doing so allows the chunks to be shared among
+ // method tables (the most common example being between parent and child classes where the child
+ // does not override any method in the chunk). This yields substantial space savings at the fixed
+ // cost of one additional indirection for a virtual call.
+ //
+ // Note that none of this should be visible outside the implementation of MethodTable; all other
+ // code continues to refer to a virtual method via the traditional slot number. This is similar to
+ // how we refer to non-virtual methods as having a slot number despite having long ago moved their
+ // code pointers out of the vtable.
+ //
+ // Consider a class where GetNumVirtuals is 5 and (for the sake of the example) assume we break
+ // the vtable into chunks of size 3. The layout would be as follows:
+ //
+ // pMT chunk 1 chunk 2
+ // ------------------ ------------------ ------------------
+ // | | | M1() | | M4() |
+ // | fixed-size | ------------------ ------------------
+ // | portion of | | M2() | | M5() |
+ // | MethodTable | ------------------ ------------------
+ // | | | M3() |
+ // ------------------ ------------------
+ // | ptr to chunk 1 |
+ // ------------------
+ // | ptr to chunk 2 |
+ // ------------------
+ //
+ // We refer to "ptr to chunk 1" and "ptr to chunk 2" as "indirection slots."
+ //
+ // The current chunking strategy is independent of class properties; all are of size 8. Several
+ // other strategies were tried, and the only one that has performed better empirically is to begin
+ // with a single chunk of size 4 (matching the number of virtuals in System.Object) and then
+ // continue with chunks of size 8. However it was a small improvement and required the run-time
+ // helpers listed below to be measurably slower.
+ //
+ // If you want to change this, you should only need to modify the first four functions below
+ // along with any assembly helper that has taken a dependency on the layout. Currently,
+ // those consist of:
+ // JIT_IsInstanceOfInterface
+ // JIT_ChkCastInterface
+ // Transparent proxy stub
+ //
+ // This layout only applies to the virtual methods in a class (those with slot number below GetNumVirtuals).
+ // Non-virtual methods that are in the vtable (those with slot numbers between GetNumVirtuals and
+ // GetNumVtableSlots) are laid out in a single chunk pointed to by an optional member.
+ // See GetSlotPtrRaw for more details.
+
+ #define VTABLE_SLOTS_PER_CHUNK 8
+ #define VTABLE_SLOTS_PER_CHUNK_LOG2 3
+
+ static DWORD GetIndexOfVtableIndirection(DWORD slotNum);
+ static DWORD GetStartSlotForVtableIndirection(UINT32 indirectionIndex, DWORD wNumVirtuals);
+ static DWORD GetEndSlotForVtableIndirection(UINT32 indirectionIndex, DWORD wNumVirtuals);
+ static UINT32 GetIndexAfterVtableIndirection(UINT32 slotNum);
+ static DWORD GetNumVtableIndirections(DWORD wNumVirtuals);
+ PTR_PTR_PCODE GetVtableIndirections();
+ DWORD GetNumVtableIndirections();
+
+ class VtableIndirectionSlotIterator
+ {
+ friend class MethodTable;
+
+ private:
+ PTR_PTR_PCODE m_pSlot;
+ DWORD m_i;
+ DWORD m_count;
+ PTR_MethodTable m_pMT;
+
+ VtableIndirectionSlotIterator(MethodTable *pMT);
+ VtableIndirectionSlotIterator(MethodTable *pMT, DWORD index);
+
+ public:
+ BOOL Next();
+ BOOL Finished();
+ DWORD GetIndex();
+ DWORD GetOffsetFromMethodTable();
+ PTR_PCODE GetIndirectionSlot();
+
+#ifndef DACCESS_COMPILE
+ void SetIndirectionSlot(PTR_PCODE pChunk);
+#endif
+
+ DWORD GetStartSlot();
+ DWORD GetEndSlot();
+ DWORD GetNumSlots();
+ DWORD GetSize();
+ }; // class VtableIndirectionSlotIterator
+
+ VtableIndirectionSlotIterator IterateVtableIndirectionSlots();
+ VtableIndirectionSlotIterator IterateVtableIndirectionSlotsFrom(DWORD index);
+
+#ifdef FEATURE_PREJIT
+ static BOOL CanShareVtableChunksFrom(MethodTable *pTargetMT, Module *pCurrentLoaderModule, Module *pCurrentPreferredZapModule);
+ BOOL CanInternVtableChunk(DataImage *image, VtableIndirectionSlotIterator it);
+#else
+ static BOOL CanShareVtableChunksFrom(MethodTable *pTargetMT, Module *pCurrentLoaderModule);
+#endif
+
+ inline BOOL HasNonVirtualSlots()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetFlag(enum_flag_HasNonVirtualSlots);
+ }
+
+ inline BOOL HasSingleNonVirtualSlot()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetFlag(enum_flag_HasSingleNonVirtualSlot);
+ }
+
+ inline BOOL HasNonVirtualSlotsArray()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return HasNonVirtualSlots() && !HasSingleNonVirtualSlot();
+ }
+
+ TADDR GetNonVirtualSlotsPtr();
+
+ inline PTR_PCODE GetNonVirtualSlotsArray()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(HasNonVirtualSlotsArray());
+ return RelativePointer<PTR_PCODE>::GetValueAtPtr(GetNonVirtualSlotsPtr());
+ }
+
+#ifndef DACCESS_COMPILE
+ inline void SetNonVirtualSlotsArray(PTR_PCODE slots)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(HasNonVirtualSlotsArray());
+
+ RelativePointer<PTR_PCODE>::SetValueAtPtr(GetNonVirtualSlotsPtr(), slots);
+ }
+
+ inline void SetHasSingleNonVirtualSlot()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetFlag(enum_flag_HasSingleNonVirtualSlot);
+ }
+#endif
+
+ inline unsigned GetNonVirtualSlotsArraySize()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetNumNonVirtualSlots() * sizeof(PCODE);
+ }
+
+ inline WORD GetNumNonVirtualSlots();
+
+ inline WORD GetNumVirtuals()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ g_IBCLogger.LogMethodTableAccess(this);
+ return GetNumVirtuals_NoLogging();
+ }
+
+ inline WORD GetNumVirtuals_NoLogging()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return m_wNumVirtuals;
+ }
+
+ inline void SetNumVirtuals (WORD wNumVtableSlots)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_wNumVirtuals = wNumVtableSlots;
+ }
+
+ unsigned GetNumParentVirtuals()
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (IsInterface() || IsTransparentProxy()) {
+ return 0;
+ }
+ MethodTable *pMTParent = GetParentMethodTable();
+ g_IBCLogger.LogMethodTableAccess(this);
+ return pMTParent == NULL ? 0 : pMTParent->GetNumVirtuals();
+ }
+
+ static inline DWORD GetVtableOffset()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (sizeof(MethodTable));
+ }
+
+ // Return total methods: virtual, static, and instance method slots.
+ WORD GetNumMethods();
+
+ // Return number of slots in this methodtable. This is just an information about the layout of the methodtable, it should not be used
+ // for functionality checks. Do not confuse with GetNumVirtuals()!
+ WORD GetNumVtableSlots()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetNumVirtuals() + GetNumNonVirtualSlots();
+ }
+
+ //-------------------------------------------------------------------
+ // Slots <-> the MethodDesc associated with the slot.
+ //
+
+ MethodDesc* GetMethodDescForSlot(DWORD slot);
+
+ static MethodDesc* GetMethodDescForSlotAddress(PCODE addr, BOOL fSpeculative = FALSE);
+
+ PCODE GetRestoredSlot(DWORD slot);
+
+ // Returns MethodTable that GetRestoredSlot get its values from
+ MethodTable * GetRestoredSlotMT(DWORD slot);
+
+ // Used to map methods on the same slot between instantiations.
+ MethodDesc * GetParallelMethodDesc(MethodDesc * pDefMD);
+
+ //-------------------------------------------------------------------
+ // BoxedEntryPoint MethodDescs.
+ //
+ // Virtual methods on structs have BoxedEntryPoint method descs in their vtable.
+ // See also notes for MethodDesc::FindOrCreateAssociatedMethodDesc. You should
+ // probably be using that function if you need to map between unboxing
+ // stubs and non-unboxing stubs.
+
+ MethodDesc* GetBoxedEntryPointMD(MethodDesc *pMD);
+
+ MethodDesc* GetUnboxedEntryPointMD(MethodDesc *pMD);
+ MethodDesc* GetExistingUnboxedEntryPointMD(MethodDesc *pMD);
+
+ //-------------------------------------------------------------------
+ // FIELD LAYOUT, OBJECT SIZE ETC.
+ //
+
+ inline BOOL HasLayout();
+
+ inline EEClassLayoutInfo *GetLayoutInfo();
+
+ inline BOOL IsBlittable();
+
+ inline BOOL IsManagedSequential();
+
+ inline BOOL HasExplicitSize();
+
+ UINT32 GetNativeSize();
+
+ DWORD GetBaseSize()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return(m_BaseSize);
+ }
+
+ void SetBaseSize(DWORD baseSize)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_BaseSize = baseSize;
+ }
+
+ BOOL IsStringOrArray() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return HasComponentSize();
+ }
+
+ BOOL IsString()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return HasComponentSize() && !IsArray();
+ }
+
+ BOOL HasComponentSize() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetFlag(enum_flag_HasComponentSize);
+ }
+
+ // returns random combination of flags if this doesn't have a component size
+ WORD RawGetComponentSize()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+#if BIGENDIAN
+ return *((WORD*)&m_dwFlags + 1);
+#else // !BIGENDIAN
+ return *(WORD*)&m_dwFlags;
+#endif // !BIGENDIAN
+ }
+
+ // returns 0 if this doesn't have a component size
+
+ // The component size is actually 16-bit WORD, but this method is returning SIZE_T to ensure
+ // that SIZE_T is used everywhere for object size computation. It is necessary to support
+ // objects bigger than 2GB.
+ SIZE_T GetComponentSize()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return HasComponentSize() ? RawGetComponentSize() : 0;
+ }
+
+ void SetComponentSize(WORD wComponentSize)
+ {
+ LIMITED_METHOD_CONTRACT;
+ // it would be nice to assert here that this is either a string
+ // or an array, but how do we know.
+ //
+ // it would also be nice to assert that the component size is > 0,
+ // but it turns out that for array's of System.Void we cannot do
+ // that b/c the component size is 0 (?)
+ SetFlag(enum_flag_HasComponentSize);
+ m_dwFlags = (m_dwFlags & ~0xFFFF) | wComponentSize;
+ }
+
+ inline WORD GetNumInstanceFields();
+
+ inline WORD GetNumStaticFields();
+
+ inline WORD GetNumThreadStaticFields();
+
+ // Note that for value types GetBaseSize returns the size of instance fields for
+ // a boxed value, and GetNumInstanceFieldsBytes for an unboxed value.
+ // We place methods like these on MethodTable primarily so we can choose to cache
+ // the information within MethodTable, and so less code manipulates EEClass
+ // objects directly, because doing so can lead to bugs related to generics.
+ //
+ // <TODO> Use m_wBaseSize whenever this is identical to GetNumInstanceFieldBytes.
+ // We would need to reserve a flag for this. </TODO>
+ //
+ inline DWORD GetNumInstanceFieldBytes();
+
+ inline WORD GetNumIntroducedInstanceFields();
+
+ // <TODO> Does this always return the same (or related) size as GetBaseSize()? </TODO>
+ inline DWORD GetAlignedNumInstanceFieldBytes();
+
+
+ // Note: This flag MUST be available even from an unrestored MethodTable - see GcScanRoots in siginfo.cpp.
+ DWORD ContainsPointers()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return GetFlag(enum_flag_ContainsPointers);
+ }
+ BOOL Collectible()
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifdef FEATURE_COLLECTIBLE_TYPES
+ return GetFlag(enum_flag_Collectible);
+#else
+ return FALSE;
+#endif
+ }
+ BOOL ContainsPointersOrCollectible()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return GetFlag(enum_flag_ContainsPointers) || GetFlag(enum_flag_Collectible);
+ }
+
+ OBJECTHANDLE GetLoaderAllocatorObjectHandle();
+ NOINLINE BYTE *GetLoaderAllocatorObjectForGC();
+
+ BOOL IsNotTightlyPacked();
+
+ void SetContainsPointers()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetFlag(enum_flag_ContainsPointers);
+ }
+
+#ifdef FEATURE_64BIT_ALIGNMENT
+ inline bool RequiresAlign8()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return !!GetFlag(enum_flag_RequiresAlign8);
+ }
+
+ inline void SetRequiresAlign8()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetFlag(enum_flag_RequiresAlign8);
+ }
+#endif // FEATURE_64BIT_ALIGNMENT
+
+ //-------------------------------------------------------------------
+ // FIELD DESCRIPTORS
+ //
+ // Most of this API still lives on EEClass.
+ //
+ // ************************************ WARNING *************
+ // ** !!!!INSTANCE FIELDDESCS ARE REPRESENTATIVES!!!!! **
+ // ** THEY ARE SHARED BY COMPATIBLE GENERIC INSTANTIATIONS **
+ // ************************************ WARNING *************
+
+ // This goes straight to the EEClass
+ // Careful about using this method. If it's possible that fields may have been added via EnC, then
+ // must use the FieldDescIterator as any fields added via EnC won't be in the raw list
+ PTR_FieldDesc GetApproxFieldDescListRaw();
+
+ // This returns a type-exact FieldDesc for a static field, but may still return a representative
+ // for a non-static field.
+ PTR_FieldDesc GetFieldDescByIndex(DWORD fieldIndex);
+
+ DWORD GetIndexForFieldDesc(FieldDesc *pField);
+
+ //-------------------------------------------------------------------
+ // REMOTING and THUNKING.
+ //
+ // We find a lot of information from the VTable. But sometimes the VTable is a
+ // thunking layer rather than the true type's VTable. For instance, context
+ // proxies use a single VTable for proxies to all the types we've loaded.
+ // The following service adjusts a MethodTable based on the supplied instance. As
+ // we add new thunking layers, we just need to teach this service how to navigate
+ // through them.
+#ifdef FEATURE_REMOTING
+ inline BOOL IsTransparentProxy()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetFlag(enum_flag_Category_Mask) == enum_flag_Category_TransparentProxy;
+ }
+ inline void SetTransparentProxy()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(GetFlag(enum_flag_Category_Mask) == 0);
+ SetFlag(enum_flag_Category_TransparentProxy);
+ }
+
+ inline BOOL IsMarshaledByRef()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetFlag(enum_flag_Category_MarshalByRef_Mask) == enum_flag_Category_MarshalByRef;
+ }
+ inline void SetMarshaledByRef()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(GetFlag(enum_flag_Category_Mask) == 0);
+ SetFlag(enum_flag_Category_MarshalByRef);
+ }
+
+ inline BOOL IsContextful()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetFlag(enum_flag_Category_Mask) == enum_flag_Category_Contextful;
+ }
+ inline void SetIsContextful()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(GetFlag(enum_flag_Category_Mask) == 0);
+ SetFlag(enum_flag_Category_Contextful);
+ }
+#else // FEATURE_REMOTING
+ inline BOOL IsTransparentProxy()
+ {
+ return FALSE;
+ }
+
+ BOOL IsMarshaledByRef()
+ {
+ return FALSE;
+ }
+
+ BOOL IsContextful()
+ {
+ return FALSE;
+ }
+#endif // FEATURE_REMOTING
+
+ inline bool RequiresFatDispatchTokens()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return !!GetFlag(enum_flag_RequiresDispatchTokenFat);
+ }
+
+ inline void SetRequiresFatDispatchTokens()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetFlag(enum_flag_RequiresDispatchTokenFat);
+ }
+
+ inline bool HasPreciseInitCctors()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return !!GetFlag(enum_flag_HasPreciseInitCctors);
+ }
+
+ inline void SetHasPreciseInitCctors()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetFlag(enum_flag_HasPreciseInitCctors);
+ }
+
+#ifdef FEATURE_HFA
+ inline bool IsHFA()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return !!GetFlag(enum_flag_IsHFA);
+ }
+
+ inline void SetIsHFA()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetFlag(enum_flag_IsHFA);
+ }
+
+ CorElementType GetHFAType();
+
+ // The managed and unmanaged HFA type can differ for types with layout. The following two methods return the unmanaged HFA type.
+ bool IsNativeHFA();
+ CorElementType GetNativeHFAType();
+#endif // FEATURE_HFA
+
+#ifdef FEATURE_64BIT_ALIGNMENT
+ // Returns true iff the native view of this type requires 64-bit aligment.
+ bool NativeRequiresAlign8();
+#endif // FEATURE_64BIT_ALIGNMENT
+
+ // True if interface casts for an object having this type require more
+ // than a simple scan of the interface map
+ // See JIT_IsInstanceOfInterface
+ inline BOOL InstanceRequiresNonTrivialInterfaceCast()
+ {
+ STATIC_CONTRACT_SO_TOLERANT;
+ LIMITED_METHOD_CONTRACT;
+
+ return GetFlag(enum_flag_NonTrivialInterfaceCast);
+ }
+
+
+ //-------------------------------------------------------------------
+ // PARENT INTERFACES
+ //
+ unsigned GetNumInterfaces()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_wNumInterfaces;
+ }
+
+ //-------------------------------------------------------------------
+ // CASTING
+ //
+ // There are two variants of each of these methods:
+ //
+ // CanCastToX
+ // - restore encoded pointers on demand
+ // - might throw, might trigger GC
+ // - return type is boolean (FALSE = cannot cast, TRUE = can cast)
+ //
+ // CanCastToXNoGC
+ // - do not restore encoded pointers on demand
+ // - does not throw, does not trigger GC
+ // - return type is three-valued (CanCast, CannotCast, MaybeCast)
+ // - MaybeCast indicates that the test tripped on an encoded pointer
+ // so the caller should now call CanCastToXRestoring if it cares
+ //
+ BOOL CanCastToInterface(MethodTable *pTargetMT, TypeHandlePairList *pVisited = NULL);
+ BOOL CanCastToClass(MethodTable *pTargetMT, TypeHandlePairList *pVisited = NULL);
+ BOOL CanCastToClassOrInterface(MethodTable *pTargetMT, TypeHandlePairList *pVisited);
+ BOOL CanCastByVarianceToInterfaceOrDelegate(MethodTable *pTargetMT, TypeHandlePairList *pVisited);
+
+ BOOL CanCastToNonVariantInterface(MethodTable *pTargetMT);
+
+ TypeHandle::CastResult CanCastToInterfaceNoGC(MethodTable *pTargetMT);
+ TypeHandle::CastResult CanCastToClassNoGC(MethodTable *pTargetMT);
+ TypeHandle::CastResult CanCastToClassOrInterfaceNoGC(MethodTable *pTargetMT);
+
+ // The inline part of equivalence check.
+#ifndef DACCESS_COMPILE
+ FORCEINLINE BOOL IsEquivalentTo(MethodTable *pOtherMT COMMA_INDEBUG(TypeHandlePairList *pVisited = NULL));
+
+#ifdef FEATURE_COMINTEROP
+ // This method is public so that TypeHandle has direct access to it
+ BOOL IsEquivalentTo_Worker(MethodTable *pOtherMT COMMA_INDEBUG(TypeHandlePairList *pVisited)); // out-of-line part, SO tolerant
+private:
+ BOOL IsEquivalentTo_WorkerInner(MethodTable *pOtherMT COMMA_INDEBUG(TypeHandlePairList *pVisited)); // out-of-line part, SO intolerant
+#endif // FEATURE_COMINTEROP
+#endif
+
+public:
+ //-------------------------------------------------------------------
+ // THE METHOD TABLE PARENT (SUPERCLASS/BASE CLASS)
+ //
+
+ BOOL HasApproxParent()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (GetWriteableData()->m_dwFlags & MethodTableWriteableData::enum_flag_HasApproxParent) != 0;
+ }
+ inline void SetHasExactParent()
+ {
+ WRAPPER_NO_CONTRACT;
+ FastInterlockAnd(&(GetWriteableDataForWrite()->m_dwFlags), ~MethodTableWriteableData::enum_flag_HasApproxParent);
+ }
+
+
+ // Caller must know that the parent method table is not an encoded fixup
+ inline PTR_MethodTable GetParentMethodTable()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ PRECONDITION(IsParentMethodTablePointerValid());
+
+ TADDR pMT = m_pParentMethodTable;
+#ifdef FEATURE_PREJIT
+ if (GetFlag(enum_flag_HasIndirectParent))
+ pMT = *PTR_TADDR(m_pParentMethodTable + offsetof(MethodTable, m_pParentMethodTable));
+#endif
+ return PTR_MethodTable(pMT);
+ }
+
+ inline static PTR_VOID GetParentMethodTableOrIndirection(PTR_VOID pMT)
+ {
+ WRAPPER_NO_CONTRACT;
+ return PTR_VOID(*PTR_TADDR(dac_cast<TADDR>(pMT) + offsetof(MethodTable, m_pParentMethodTable)));
+ }
+
+ inline MethodTable ** GetParentMethodTablePtr()
+ {
+ WRAPPER_NO_CONTRACT;
+
+#ifdef FEATURE_PREJIT
+ return GetFlag(enum_flag_HasIndirectParent) ?
+ (MethodTable **)(m_pParentMethodTable + offsetof(MethodTable, m_pParentMethodTable)) :(MethodTable **)&m_pParentMethodTable;
+#else
+ return (MethodTable **)&m_pParentMethodTable;
+#endif
+ }
+
+ // Is the parent method table pointer equal to the given argument?
+ BOOL ParentEquals(PTR_MethodTable pMT)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ PRECONDITION(IsParentMethodTablePointerValid());
+ g_IBCLogger.LogMethodTableAccess(this);
+ return GetParentMethodTable() == pMT;
+ }
+
+#ifdef _DEBUG
+ BOOL IsParentMethodTablePointerValid();
+#endif
+
+#ifndef DACCESS_COMPILE
+ void SetParentMethodTable (MethodTable *pParentMethodTable)
+ {
+ LIMITED_METHOD_CONTRACT;
+ PRECONDITION(!GetFlag(enum_flag_HasIndirectParent));
+ m_pParentMethodTable = (TADDR)pParentMethodTable;
+#ifdef _DEBUG
+ GetWriteableDataForWrite_NoLogging()->SetParentMethodTablePointerValid();
+#endif
+ }
+#endif // !DACCESS_COMPILE
+ MethodTable * GetMethodTableMatchingParentClass(MethodTable * pWhichParent);
+ Instantiation GetInstantiationOfParentClass(MethodTable *pWhichParent);
+
+ //-------------------------------------------------------------------
+ // THE EEClass (Possibly shared between instantiations!).
+ //
+ // Note that it is not generally the case that GetClass.GetMethodTable() == t.
+
+ PTR_EEClass GetClass();
+
+ inline PTR_EEClass GetClass_NoLogging();
+
+ PTR_EEClass GetClassWithPossibleAV();
+
+ BOOL ValidateWithPossibleAV();
+
+ BOOL IsClassPointerValid();
+
+ static UINT32 GetOffsetOfFlags()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return offsetof(MethodTable, m_dwFlags);
+ }
+
+ static UINT32 GetIfArrayThenSzArrayFlag()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return enum_flag_Category_IfArrayThenSzArray;
+ }
+
+ //-------------------------------------------------------------------
+ // CONSTRUCTION
+ //
+ // Do not call the following at any time except when creating a method table.
+ // One day we will have proper constructors for method tables and all these
+ // will disappear.
+#ifndef DACCESS_COMPILE
+ inline void SetClass(EEClass *pClass)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pEEClass = pClass;
+ }
+
+ inline void SetCanonicalMethodTable(MethodTable * pMT)
+ {
+ m_pCanonMT = (TADDR)pMT | MethodTable::UNION_METHODTABLE;
+ }
+#endif
+
+ inline void SetHasInstantiation(BOOL fTypicalInstantiation, BOOL fSharedByGenericInstantiations);
+
+ //-------------------------------------------------------------------
+ // INTERFACE IMPLEMENTATION
+ //
+ public:
+ // Faster force-inlined version of ImplementsInterface
+ BOOL ImplementsInterfaceInline(MethodTable *pInterface);
+
+ BOOL ImplementsInterface(MethodTable *pInterface);
+ BOOL ImplementsEquivalentInterface(MethodTable *pInterface);
+
+ MethodDesc *GetMethodDescForInterfaceMethod(TypeHandle ownerType, MethodDesc *pInterfaceMD);
+ MethodDesc *GetMethodDescForInterfaceMethod(MethodDesc *pInterfaceMD); // You can only use this one for non-generic interfaces
+
+ //-------------------------------------------------------------------
+ // INTERFACE MAP.
+ //
+
+ inline PTR_InterfaceInfo GetInterfaceMap();
+
+#ifdef BINDER
+ void SetNumInterfaces(DWORD dwNumInterfaces)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ m_wNumInterfaces = (WORD)dwNumInterfaces;
+ _ASSERTE(m_wNumInterfaces == dwNumInterfaces);
+ }
+#endif
+
+#ifndef DACCESS_COMPILE
+ void SetInterfaceMap(WORD wNumInterfaces, InterfaceInfo_t* iMap);
+#endif
+
+ inline int HasInterfaceMap()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (m_wNumInterfaces != 0);
+ }
+
+ // Where possible, use this iterator over the interface map instead of accessing the map directly
+ // That way we can easily change the implementation of the map
+ class InterfaceMapIterator
+ {
+ friend class MethodTable;
+
+ private:
+ PTR_InterfaceInfo m_pMap;
+ DWORD m_i;
+ DWORD m_count;
+
+ InterfaceMapIterator(MethodTable *pMT)
+ : m_pMap(pMT->GetInterfaceMap()),
+ m_i((DWORD) -1),
+ m_count(pMT->GetNumInterfaces())
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ InterfaceMapIterator(MethodTable *pMT, DWORD index)
+ : m_pMap(pMT->GetInterfaceMap() + index),
+ m_i(index),
+ m_count(pMT->GetNumInterfaces())
+ {
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(index >= 0 && index < m_count);
+ }
+
+ public:
+ InterfaceInfo_t* GetInterfaceInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pMap;
+ }
+
+ // Move to the next item in the map, returning TRUE if an item
+ // exists or FALSE if we've run off the end
+ inline BOOL Next()
+ {
+ LIMITED_METHOD_CONTRACT;
+ PRECONDITION(!Finished());
+ if (m_i != (DWORD) -1)
+ m_pMap++;
+ return (++m_i < m_count);
+ }
+
+ // Have we iterated over all of the items?
+ BOOL Finished()
+ {
+ return (m_i == m_count);
+ }
+
+ // Get the interface at the current position
+ inline PTR_MethodTable GetInterface()
+ {
+ CONTRACT(PTR_MethodTable)
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ SUPPORTS_DAC;
+ PRECONDITION(m_i != (DWORD) -1 && m_i < m_count);
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN (m_pMap->GetMethodTable());
+ }
+
+#ifndef DACCESS_COMPILE
+ void SetInterface(MethodTable *pMT)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_pMap->SetMethodTable(pMT);
+ }
+#endif
+
+ DWORD GetIndex()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_i;
+ }
+ }; // class InterfaceMapIterator
+
+ // Create a new iterator over the interface map
+ // The iterator starts just before the first item in the map
+ InterfaceMapIterator IterateInterfaceMap()
+ {
+ WRAPPER_NO_CONTRACT;
+ return InterfaceMapIterator(this);
+ }
+
+ // Create a new iterator over the interface map, starting at the index specified
+ InterfaceMapIterator IterateInterfaceMapFrom(DWORD index)
+ {
+ WRAPPER_NO_CONTRACT;
+ return InterfaceMapIterator(this, index);
+ }
+
+ //-------------------------------------------------------------------
+ // ADDITIONAL INTERFACE MAP DATA
+ //
+
+ // We store extra info (flag bits) for interfaces implemented on this MethodTable in a separate optional
+ // location for better data density (if we put them in the interface map directly data alignment could
+ // have us using 32 or even 64 bits to represent a single boolean value). Currently the only flag we
+ // persist is IsDeclaredOnClass (was the interface explicitly declared by this class).
+
+ // Currently we always store extra info whenever we have an interface map (in the future you could imagine
+ // this being limited to those scenarios in which at least one of the interfaces has a non-default value
+ // for a flag).
+ inline BOOL HasExtraInterfaceInfo()
+ {
+ SUPPORTS_DAC;
+ return HasInterfaceMap();
+ }
+
+ // Count of interfaces that can have their extra info stored inline in the optional data structure itself
+ // (once the interface count exceeds this limit the optional data slot will instead point to a buffer with
+ // the information).
+ enum { kInlinedInterfaceInfoThreshhold = sizeof(TADDR) * 8 };
+
+ // Calculate how many bytes of storage will be required to track additional information for interfaces.
+ // This will be zero if there are no interfaces, but can also be zero for small numbers of interfaces as
+ // well, and callers should be ready to handle this.
+ static SIZE_T GetExtraInterfaceInfoSize(DWORD cInterfaces);
+
+ // Called after GetExtraInterfaceInfoSize above to setup a new MethodTable with the additional memory to
+ // track extra interface info. If there are a non-zero number of interfaces implemented on this class but
+ // GetExtraInterfaceInfoSize() returned zero, this call must still be made (with a NULL argument).
+ void InitializeExtraInterfaceInfo(PVOID pInfo);
+
+#ifdef FEATURE_PREJIT
+ // Ngen support.
+ void SaveExtraInterfaceInfo(DataImage *pImage);
+ void FixupExtraInterfaceInfo(DataImage *pImage);
+#endif // FEATURE_PREJIT
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegionsForExtraInterfaceInfo();
+#endif // DACCESS_COMPILE
+
+ // For the given interface in the map (specified via map index) mark the interface as declared explicitly
+ // on this class. This is not legal for dynamically added interfaces (as used by RCWs).
+ void SetInterfaceDeclaredOnClass(DWORD index);
+
+ // For the given interface in the map (specified via map index) return true if the interface was declared
+ // explicitly on this class.
+ bool IsInterfaceDeclaredOnClass(DWORD index);
+
+ //-------------------------------------------------------------------
+ // VIRTUAL/INTERFACE CALL RESOLUTION
+ //
+ // These should probably go in method.hpp since they don't have
+ // much to do with method tables per se.
+ //
+
+ // get the method desc given the interface method desc
+ static MethodDesc *GetMethodDescForInterfaceMethodAndServer(TypeHandle ownerType, MethodDesc *pItfMD, OBJECTREF *pServer);
+
+#ifdef FEATURE_COMINTEROP
+ // get the method desc given the interface method desc on a COM implemented server (if fNullOk is set then NULL is an allowable return value)
+ MethodDesc *GetMethodDescForComInterfaceMethod(MethodDesc *pItfMD, bool fNullOk);
+#endif // FEATURE_COMINTEROP
+
+
+ // Try a partial resolve of the constraint call, up to generic code sharing.
+ //
+ // Note that this will not necessarily resolve the call exactly, since we might be compiling
+ // shared generic code - it may just resolve it to a candidate suitable for
+ // JIT compilation, and require a runtime lookup for the actual code pointer
+ // to call.
+ //
+ // Return NULL if the call could not be resolved, e.g. because it is invoked
+ // on a type that inherits the implementation of the method from System.Object
+ // or System.ValueType.
+ //
+ // Always returns an unboxed entry point with a uniform calling convention.
+ MethodDesc * TryResolveConstraintMethodApprox(
+ TypeHandle ownerType,
+ MethodDesc * pMD,
+ BOOL * pfForceUseRuntimeLookup = NULL);
+
+ //-------------------------------------------------------------------
+ // CONTRACT IMPLEMENTATIONS
+ //
+
+ inline BOOL HasDispatchMap()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetDispatchMap() != NULL;
+ }
+
+ PTR_DispatchMap GetDispatchMap();
+
+ inline BOOL HasDispatchMapSlot()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetFlag(enum_flag_HasDispatchMapSlot);
+ }
+
+#ifndef DACCESS_COMPILE
+ void SetDispatchMap(DispatchMap *pDispatchMap)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(HasDispatchMapSlot());
+
+ TADDR pSlot = GetMultipurposeSlotPtr(enum_flag_HasDispatchMapSlot, c_DispatchMapSlotOffsets);
+ RelativePointer<PTR_DispatchMap>::SetValueAtPtr(pSlot, pDispatchMap);
+ }
+#endif // !DACCESS_COMPILE
+
+protected:
+ BOOL FindEncodedMapDispatchEntry(UINT32 typeID,
+ UINT32 slotNumber,
+ DispatchMapEntry *pEntry);
+
+ BOOL FindIntroducedImplementationTableDispatchEntry(UINT32 slotNumber,
+ DispatchMapEntry *pEntry,
+ BOOL fVirtualMethodsOnly);
+
+ BOOL FindDispatchEntryForCurrentType(UINT32 typeID,
+ UINT32 slotNumber,
+ DispatchMapEntry *pEntry);
+
+ BOOL FindDispatchEntry(UINT32 typeID,
+ UINT32 slotNumber,
+ DispatchMapEntry *pEntry);
+
+public:
+ BOOL FindDispatchImpl(
+ UINT32 typeID,
+ UINT32 slotNumber,
+ DispatchSlot * pImplSlot);
+
+ DispatchSlot FindDispatchSlot(UINT32 typeID, UINT32 slotNumber);
+
+ DispatchSlot FindDispatchSlot(DispatchToken tok);
+
+ // You must use the second of these two if there is any chance the pMD is a method
+ // on a generic interface such as IComparable<T> (which it normally can be). The
+ // ownerType is used to provide an exact qualification in the case the pMD is
+ // a shared method descriptor.
+ DispatchSlot FindDispatchSlotForInterfaceMD(MethodDesc *pMD);
+ DispatchSlot FindDispatchSlotForInterfaceMD(TypeHandle ownerType, MethodDesc *pMD);
+
+ MethodDesc *ReverseInterfaceMDLookup(UINT32 slotNumber);
+
+ // Lookup, does not assign if not already done.
+ UINT32 LookupTypeID();
+ // Lookup, will assign ID if not already done.
+ UINT32 GetTypeID();
+
+
+ MethodTable *LookupDispatchMapType(DispatchMapTypeID typeID);
+
+ MethodDesc *GetIntroducingMethodDesc(DWORD slotNumber);
+
+ // Determines whether all methods in the given interface have their final implementing
+ // slot in a parent class. I.e. if this returns TRUE, it is trivial (no VSD lookup) to
+ // dispatch pItfMT methods on this class if one knows how to dispatch them on pParentMT.
+ BOOL ImplementsInterfaceWithSameSlotsAsParent(MethodTable *pItfMT, MethodTable *pParentMT);
+
+ // Determines whether all methods in the given interface have their final implementation
+ // in a parent class. I.e. if this returns TRUE, this class behaves the same as pParentMT
+ // when it comes to dispatching pItfMT methods.
+ BOOL HasSameInterfaceImplementationAsParent(MethodTable *pItfMT, MethodTable *pParentMT);
+
+public:
+ static MethodDesc *MapMethodDeclToMethodImpl(MethodDesc *pMDDecl);
+
+ //-------------------------------------------------------------------
+ // FINALIZATION SEMANTICS
+ //
+
+ DWORD CannotUseSuperFastHelper()
+ {
+ WRAPPER_NO_CONTRACT;
+ return HasFinalizer();
+ }
+
+ void SetHasFinalizer()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetFlag(enum_flag_HasFinalizer);
+ }
+
+ void SetHasCriticalFinalizer()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetFlag(enum_flag_HasCriticalFinalizer);
+ }
+ // Does this class have non-trivial finalization requirements?
+ DWORD HasFinalizer()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetFlag(enum_flag_HasFinalizer);
+ }
+ // Must this class be finalized during a rude appdomain unload, and
+ // must it's finalizer run in a different order from normal finalizers?
+ DWORD HasCriticalFinalizer() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return GetFlag(enum_flag_HasCriticalFinalizer);
+ }
+
+ // Have the backout methods (Finalizer, Dispose, ReleaseHandle etc.) been prepared for this type? This currently only happens
+ // for types derived from CriticalFinalizerObject.
+ BOOL CriticalTypeHasBeenPrepared()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(HasCriticalFinalizer());
+ return GetWriteableData()->CriticalTypeHasBeenPrepared();
+ }
+
+ void SetCriticalTypeHasBeenPrepared()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(HasCriticalFinalizer());
+ GetWriteableDataForWrite()->SetCriticalTypeHasBeenPrepared();
+ }
+
+ //-------------------------------------------------------------------
+ // STATIC FIELDS
+ //
+
+ DWORD GetOffsetOfFirstStaticHandle();
+ DWORD GetOffsetOfFirstStaticMT();
+
+#ifndef DACCESS_COMPILE
+ inline PTR_BYTE GetNonGCStaticsBasePointer();
+ inline PTR_BYTE GetGCStaticsBasePointer();
+ inline PTR_BYTE GetNonGCThreadStaticsBasePointer();
+ inline PTR_BYTE GetGCThreadStaticsBasePointer();
+#endif //!DACCESS_COMPILE
+
+ inline PTR_BYTE GetNonGCThreadStaticsBasePointer(PTR_Thread pThread, PTR_AppDomain pDomain);
+ inline PTR_BYTE GetGCThreadStaticsBasePointer(PTR_Thread pThread, PTR_AppDomain pDomain);
+
+ inline DWORD IsDynamicStatics()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return !TestFlagWithMask(enum_flag_StaticsMask, enum_flag_StaticsMask_NonDynamic);
+ }
+
+ inline void SetDynamicStatics(BOOL fGeneric)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetFlag(fGeneric ? enum_flag_StaticsMask_Generics : enum_flag_StaticsMask_Dynamic);
+ }
+
+ inline void SetHasBoxedRegularStatics()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetFlag(enum_flag_HasBoxedRegularStatics);
+ }
+
+ inline DWORD HasBoxedRegularStatics()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return GetFlag(enum_flag_HasBoxedRegularStatics);
+ }
+
+ DWORD HasFixedAddressVTStatics();
+
+ //-------------------------------------------------------------------
+ // PER-INSTANTIATION STATICS INFO
+ //
+
+
+ void SetupGenericsStaticsInfo(FieldDesc* pStaticFieldDescs);
+
+ BOOL HasGenericsStaticsInfo()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetFlag(enum_flag_StaticsMask_Generics);
+ }
+
+ PTR_FieldDesc GetGenericsStaticFieldDescs()
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(HasGenericsStaticsInfo());
+ return GetGenericsStaticsInfo()->m_pFieldDescs;
+ }
+
+ BOOL HasCrossModuleGenericStaticsInfo()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return TestFlagWithMask(enum_flag_StaticsMask, enum_flag_StaticsMask_CrossModuleGenerics);
+ }
+
+ PTR_Module GetGenericsStaticsModuleAndID(DWORD * pID);
+
+ WORD GetNumHandleRegularStatics();
+
+ WORD GetNumBoxedRegularStatics ();
+ WORD GetNumBoxedThreadStatics ();
+
+ //-------------------------------------------------------------------
+ // DYNAMIC ID
+ //
+
+ // Used for generics and reflection emit in memory
+ DWORD GetModuleDynamicEntryID();
+#ifndef BINDER
+ Module* GetModuleForStatics();
+#else // BINDER
+ MdilModule* GetModuleForStatics();
+#endif
+ //-------------------------------------------------------------------
+ // GENERICS DICT INFO
+ //
+
+ // Number of generic arguments, whether this is a method table for
+ // a generic type instantiation, e.g. List<string> or the "generic" MethodTable
+ // e.g. for List.
+#ifdef BINDER
+ DWORD GetNumGenericArgs();
+#else
+ inline DWORD GetNumGenericArgs()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (HasInstantiation())
+ return (DWORD) (GetGenericsDictInfo()->m_wNumTyPars);
+ else
+ return 0;
+ }
+#endif
+
+ inline DWORD GetNumDicts()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (HasPerInstInfo())
+ {
+ PTR_GenericsDictInfo pDictInfo = GetGenericsDictInfo();
+ return (DWORD) (pDictInfo->m_wNumDicts);
+ }
+ else
+ return 0;
+ }
+
+ //-------------------------------------------------------------------
+ // OBJECTS
+ //
+
+ OBJECTREF Allocate();
+
+ // This flavor of Allocate is more efficient, but can only be used
+ // if IsRestored(), CheckInstanceActivated(), IsClassInited() are known to be true.
+ // A sufficient condition is that another instance of the exact same type already
+ // exists in the same appdomain. It's currently called only from Delegate.Combine
+ // via COMDelegate::InternalAllocLike.
+ OBJECTREF AllocateNoChecks();
+
+ OBJECTREF Box(void* data);
+ OBJECTREF FastBox(void** data);
+#ifndef DACCESS_COMPILE
+ BOOL UnBoxInto(void *dest, OBJECTREF src);
+ void UnBoxIntoUnchecked(void *dest, OBJECTREF src);
+#endif
+
+#ifdef _DEBUG
+ // Used for debugging class layout. Dumps to the debug console
+ // when debug is true.
+ void DebugDumpVtable(LPCUTF8 szClassName, BOOL fDebug);
+ void Debug_DumpInterfaceMap(LPCSTR szInterfaceMapPrefix);
+ void Debug_DumpDispatchMap();
+ void DebugDumpFieldLayout(LPCUTF8 pszClassName, BOOL debug);
+ void DebugRecursivelyDumpInstanceFields(LPCUTF8 pszClassName, BOOL debug);
+ void DebugDumpGCDesc(LPCUTF8 pszClassName, BOOL debug);
+#endif //_DEBUG
+
+ inline BOOL IsAgileAndFinalizable()
+ {
+ LIMITED_METHOD_CONTRACT;
+ // Right now, System.Thread is the only cases of this.
+ // Things should stay this way - please don't change without talking to EE team.
+ return this == g_pThreadClass;
+ }
+
+
+ //-------------------------------------------------------------------
+ // ENUMS, DELEGATES, VALUE TYPES, ARRAYS
+ //
+ // #KindsOfElementTypes
+ // GetInternalCorElementType() retrieves the internal representation of the type. It's not always
+ // appropiate to use this. For example, we treat enums as their underlying type or some structs are
+ // optimized to be ints. To get the signature type or the verifier type (same as signature except for
+ // enums are normalized to the primtive type that underlies them), use the APIs in Typehandle.h
+ //
+ // * code:TypeHandle.GetSignatureCorElementType()
+ // * code:TypeHandle.GetVerifierCorElementType()
+ // * code:TypeHandle.GetInternalCorElementType()
+ CorElementType GetInternalCorElementType();
+ void SetInternalCorElementType(CorElementType _NormType);
+
+ // See code:TypeHandle::GetVerifierCorElementType for description
+ CorElementType GetVerifierCorElementType();
+
+ // See code:TypeHandle::GetSignatureCorElementType for description
+ CorElementType GetSignatureCorElementType();
+
+ // A true primitive is one who's GetVerifierCorElementType() ==
+ // ELEMENT_TYPE_I,
+ // ELEMENT_TYPE_I4,
+ // ELEMENT_TYPE_TYPEDREF etc.
+ // Note that GetIntenalCorElementType might return these same values for some additional
+ // types such as Enums and some structs.
+ BOOL IsTruePrimitive();
+ void SetIsTruePrimitive();
+
+ // Is this delegate? Returns false for System.Delegate and System.MulticastDelegate.
+ inline BOOL IsDelegate()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ // We do not allow single cast delegates anymore, just check for multicast delegate
+ _ASSERTE(g_pMulticastDelegateClass);
+ return ParentEquals(g_pMulticastDelegateClass);
+ }
+
+ // Is this System.Object?
+ inline BOOL IsObjectClass()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(g_pObjectClass);
+ return (this == g_pObjectClass);
+ }
+#ifndef BINDER
+ // Is this System.ValueType?
+ inline DWORD IsValueTypeClass()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(g_pValueTypeClass);
+ return (this == g_pValueTypeClass);
+ }
+#else // BINDER
+ // Is this System.ValueType?
+ bool IsValueTypeClass();
+
+ // Is this System.Enum?
+ bool IsEnumClass();
+#endif // BINDER
+
+ // Is this value type? Returns false for System.ValueType and System.Enum.
+ inline BOOL IsValueType();
+
+ // Returns "TRUE" iff "this" is a struct type such that return buffers used for returning a value
+ // of this type must be stack-allocated. This will generally be true only if the struct
+ // contains GC pointers, and does not exceed some size limit. Maintaining this as an invariant allows
+ // an optimization: the JIT may assume that return buffer pointers for return types for which this predicate
+ // returns TRUE are always stack allocated, and thus, that stores to the GC-pointer fields of such return
+ // buffers do not require GC write barriers.
+ BOOL IsStructRequiringStackAllocRetBuf();
+
+ // Is this enum? Returns false for System.Enum.
+ inline BOOL IsEnum();
+
+ // Is this array? Returns false for System.Array.
+ inline BOOL IsArray()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetFlag(enum_flag_Category_Array_Mask) == enum_flag_Category_Array;
+ }
+ inline BOOL IsMultiDimArray()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ PRECONDITION(IsArray());
+ return !GetFlag(enum_flag_Category_IfArrayThenSzArray);
+ }
+
+ // Returns true if this type is Nullable<T> for some T.
+ inline BOOL IsNullable()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetFlag(enum_flag_Category_Mask) == enum_flag_Category_Nullable;
+ }
+
+ inline void SetIsNullable()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(GetFlag(enum_flag_Category_Mask) == enum_flag_Category_ValueType);
+ SetFlag(enum_flag_Category_Nullable);
+ }
+
+ inline BOOL IsStructMarshalable()
+ {
+ LIMITED_METHOD_CONTRACT;
+ PRECONDITION(!IsInterface());
+ return GetFlag(enum_flag_IfNotInterfaceThenMarshalable);
+ }
+
+ inline void SetStructMarshalable()
+ {
+ LIMITED_METHOD_CONTRACT;
+ PRECONDITION(!IsInterface());
+ SetFlag(enum_flag_IfNotInterfaceThenMarshalable);
+ }
+
+ // The following methods are only valid for the
+ // method tables for array types. These MTs may
+ // be shared between array types and thus GetArrayElementTypeHandle
+ // may only be approximate. If you need the exact element type handle then
+ // you should probably be calling GetArrayElementTypeHandle on a TypeHandle,
+ // or an ArrayTypeDesc, or on an object reference that is known to be an array,
+ // e.g. a BASEARRAYREF.
+ //
+ // At the moment only the object[] MethodTable is shared between array types.
+ // In the future the amount of sharing of method tables is likely to be increased.
+ CorElementType GetArrayElementType();
+ DWORD GetRank();
+
+ TypeHandle GetApproxArrayElementTypeHandle()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE (IsArray());
+ return TypeHandle::FromTAddr(m_ElementTypeHnd);
+ }
+
+ void SetApproxArrayElementTypeHandle(TypeHandle th)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ m_ElementTypeHnd = th.AsTAddr();
+ }
+
+ TypeHandle * GetApproxArrayElementTypeHandlePtr()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (TypeHandle *)&m_ElementTypeHnd;
+ }
+
+ static inline DWORD GetOffsetOfArrayElementTypeHandle()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return offsetof(MethodTable, m_ElementTypeHnd);
+ }
+
+ //-------------------------------------------------------------------
+ // UNDERLYING METADATA
+ //
+
+
+ // Get the RID/token for the metadata for the corresponding type declaration
+ unsigned GetTypeDefRid();
+ unsigned GetTypeDefRid_NoLogging();
+
+ inline mdTypeDef GetCl()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return TokenFromRid(GetTypeDefRid(), mdtTypeDef);
+ }
+
+ inline mdTypeDef GetCl_NoLogging()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return TokenFromRid(GetTypeDefRid_NoLogging(), mdtTypeDef);
+ }
+
+ void SetCl(mdTypeDef token);
+
+#ifdef _DEBUG
+// Make this smaller in debug builds to exercise the overflow codepath
+#define METHODTABLE_TOKEN_OVERFLOW 0xFFF
+#else
+#define METHODTABLE_TOKEN_OVERFLOW 0xFFFF
+#endif
+
+ BOOL HasTokenOverflow()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_wToken == METHODTABLE_TOKEN_OVERFLOW;
+ }
+
+ // Get the MD Import for the metadata for the corresponding type declaration
+ IMDInternalImport* GetMDImport();
+
+ mdTypeDef GetEnclosingCl();
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+ //-------------------------------------------------------------------
+ // REMOTEABLE METHOD INFO
+ //
+#ifdef FEATURE_REMOTING
+ BOOL HasRemotableMethodInfo();
+
+ PTR_CrossDomainOptimizationInfo GetRemotableMethodInfo()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ _ASSERTE(HasRemotableMethodInfo());
+ return *GetRemotableMethodInfoPtr();
+ }
+ void SetupRemotableMethodInfo(AllocMemTracker *pamTracker);
+
+ //-------------------------------------------------------------------
+ // REMOTING VTS INFO
+ //
+ // This optional addition to MethodTables allows us to locate VTS (Version
+ // Tolerant Serialization) event callback methods and optionally
+ // serializable fields quickly. We also store the NotSerialized field
+ // information in here so remoting can avoid one more touch of the metadata
+ // during cross appdomain cloning.
+ //
+
+ void SetHasRemotingVtsInfo();
+ BOOL HasRemotingVtsInfo();
+ PTR_RemotingVtsInfo GetRemotingVtsInfo();
+ PTR_RemotingVtsInfo AllocateRemotingVtsInfo( AllocMemTracker *pamTracker, DWORD dwNumFields);
+#endif // FEATURE_REMOTING
+
+#ifdef FEATURE_COMINTEROP
+ void SetHasGuidInfo();
+ BOOL HasGuidInfo();
+ void SetHasCCWTemplate();
+ BOOL HasCCWTemplate();
+ void SetHasRCWPerTypeData();
+ BOOL HasRCWPerTypeData();
+#endif // FEATURE_COMINTEROP
+
+ // The following two methods produce correct results only if this type is
+ // marked Serializable (verified by assert in checked builds) and the field
+ // in question was introduced in this type (the index is the FieldDesc
+ // index).
+ BOOL IsFieldNotSerialized(DWORD dwFieldIndex);
+ BOOL IsFieldOptionallySerialized(DWORD dwFieldIndex);
+
+ //-------------------------------------------------------------------
+ // DICTIONARIES FOR GENERIC INSTANTIATIONS
+ //
+ // The PerInstInfo pointer is a pointer to per-instantiation pointer table,
+ // each entry of which points to an instantiation "dictionary"
+ // for an instantiated type; the last pointer points to a
+ // dictionary which is specific to this method table, previous
+ // entries point to dictionaries in superclasses. Instantiated interfaces and structs
+ // have just single dictionary (no inheritance).
+ //
+ // GetNumDicts() gives the number of dictionaries.
+ //
+ //@nice GENERICS: instead of a separate table of pointers, put the pointers
+ // in the vtable itself. Advantages:
+ // * Time: we save an indirection as we don't need to go through PerInstInfo first.
+ // * Space: no need for PerInstInfo (1 word)
+ // Problem is that lots of code assumes that the vtable is filled
+ // uniformly with pointers to MethodDesc stubs.
+ //
+ // The dictionary for the method table is just an array of handles for
+ // type parameters in the following cases:
+ // * instantiated interfaces (no code)
+ // * instantiated types whose code is not shared
+ // Otherwise, it starts with the type parameters and then has a fixed
+ // number of slots for handles (types & methods)
+ // that are filled in lazily at run-time. Finally there is a "spill-bucket"
+ // pointer used when the dictionary gets filled.
+ // In summary:
+ // typar_1 type handle for first type parameter
+ // ...
+ // typar_n type handle for last type parameter
+ // slot_1 slot for first run-time handle (initially null)
+ // ...
+ // slot_m slot for last run-time handle (initially null)
+ // next_bucket pointer to spill bucket (possibly null)
+ // The spill bucket contains just run-time handle slots.
+ // (Alternative: continue chaining buckets.
+ // Advantage: no need to deallocate when growing dictionaries.
+ // Disadvantage: more indirections required at run-time.)
+ //
+ // The layout of dictionaries is determined by GetClass()->GetDictionaryLayout()
+ // Thus the layout can vary between incompatible instantiations. This is sometimes useful because individual type
+ // parameters may or may not be shared. For example, consider a two parameter class Dict<K,D>. In instantiations shared with
+ // Dict<double,string> any reference to K is known at JIT-compile-time (it's double) but any token containing D
+ // must have a dictionary entry. On the other hand, for instantiations shared with Dict<string,double> the opposite holds.
+ //
+
+ // Return a pointer to the per-instantiation information. See field itself for comments.
+ DPTR(PTR_Dictionary) GetPerInstInfo()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(HasPerInstInfo());
+ return dac_cast<DPTR(PTR_Dictionary)>(m_pMultipurposeSlot1);
+ }
+ BOOL HasPerInstInfo()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetFlag(enum_flag_HasPerInstInfo) && !IsArray();
+ }
+#ifndef DACCESS_COMPILE
+ static inline DWORD GetOffsetOfPerInstInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return offsetof(MethodTable, m_pPerInstInfo);
+ }
+ void SetPerInstInfo(Dictionary** pPerInstInfo)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pPerInstInfo = pPerInstInfo;
+ }
+ void SetDictInfo(WORD numDicts, WORD numTyPars)
+ {
+ WRAPPER_NO_CONTRACT;
+ GenericsDictInfo* pInfo = GetGenericsDictInfo();
+ pInfo->m_wNumDicts = numDicts;
+ pInfo->m_wNumTyPars = numTyPars;
+ }
+#endif // !DACCESS_COMPILE
+ PTR_GenericsDictInfo GetGenericsDictInfo()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ // GenericsDictInfo is stored at negative offset of the dictionary
+ return dac_cast<PTR_GenericsDictInfo>(GetPerInstInfo()) - 1;
+ }
+
+ // Get a pointer to the dictionary for this instantiated type
+ // (The instantiation is stored in the initial slots of the dictionary)
+ // If not instantiated, return NULL
+ Dictionary* GetDictionary();
+
+#ifdef FEATURE_PREJIT
+ //
+ // After the zapper compiles all code in a module it may attempt
+ // to populate entries in all dictionaries
+ // associated with generic types. This is an optional step - nothing will
+ // go wrong at runtime except we may get more one-off calls to JIT_GenericHandle.
+ // Although these are one-off we prefer to avoid them since they touch metadata
+ // pages.
+ //
+ // Fully populating a dictionary may in theory load more types. However
+ // for the moment only those entries that refer to types that
+ // are already loaded will be filled in.
+ void PrepopulateDictionary(DataImage * image, BOOL nonExpansive);
+#endif // FEATURE_PREJIT
+
+ // Return a substitution suitbale for interpreting
+ // the metadata in parent class, assuming we already have a subst.
+ // suitable for interpreting the current class.
+ //
+ // If, for example, the definition for the current class is
+ // D<T> : C<List<T>, T[] >
+ // then this (for C<!0,!1>) will be
+ // 0 --> List<T>
+ // 1 --> T[]
+ // added to the chain of substitutions.
+ //
+ // Subsequently, if the definition for C is
+ // C<T, U> : B< Dictionary<T, U> >
+ // then the next subst (for B<!0>) will be
+ // 0 --> Dictionary< List<T>, T[] >
+
+ Substitution GetSubstitutionForParent(const Substitution *pSubst);
+
+ inline DWORD GetAttrClass();
+
+ inline BOOL IsSerializable();
+#ifdef FEATURE_REMOTING
+ inline BOOL CannotBeBlittedByObjectCloner();
+#endif
+ inline BOOL HasFieldsWhichMustBeInited();
+ inline BOOL SupportsAutoNGen();
+ inline BOOL RunCCTorAsIfNGenImageExists();
+
+ //-------------------------------------------------------------------
+ // SECURITY SEMANTICS
+ //
+
+
+ BOOL IsNoSecurityProperties()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return GetFlag(enum_flag_NoSecurityProperties);
+ }
+
+ void SetNoSecurityProperties()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetFlag(enum_flag_NoSecurityProperties);
+ }
+
+ void SetIsAsyncPinType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(GetFlag(enum_flag_Category_Mask) == 0);
+ SetFlag(enum_flag_Category_AsyncPin);
+ }
+
+ BOOL IsAsyncPinType()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetFlag(enum_flag_Category_Mask) == enum_flag_Category_AsyncPin;
+ }
+
+ inline BOOL IsPreRestored() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return GetFlag(enum_flag_IsPreRestored);
+ }
+
+ //-------------------------------------------------------------------
+ // THE EXPOSED CLASS OBJECT
+ //
+ /*
+ * m_ExposedClassObject is a RuntimeType instance for this class. But
+ * do NOT use it for Arrays or remoted objects! All arrays of objects
+ * share the same MethodTable/EEClass.
+ * @GENERICS: this is per-instantiation data
+ */
+ // There are two version of GetManagedClassObject. The GetManagedClassObject()
+ // method will get the class object. If it doesn't exist it will be created.
+ // GetManagedClassObjectIfExists() will return null if the Type object doesn't exist.
+ OBJECTREF GetManagedClassObject();
+ OBJECTREF GetManagedClassObjectIfExists();
+
+
+ // ------------------------------------------------------------------
+ // Private part of MethodTable
+ // ------------------------------------------------------------------
+
+ inline void SetWriteableData(PTR_MethodTableWriteableData pMTWriteableData)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(pMTWriteableData);
+ m_pWriteableData = pMTWriteableData;
+ }
+
+ inline PTR_Const_MethodTableWriteableData GetWriteableData() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ g_IBCLogger.LogMethodTableWriteableDataAccess(this);
+ return m_pWriteableData;
+ }
+
+ inline PTR_Const_MethodTableWriteableData GetWriteableData_NoLogging() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pWriteableData;
+ }
+
+ inline PTR_MethodTableWriteableData GetWriteableDataForWrite()
+ {
+ LIMITED_METHOD_CONTRACT;
+ g_IBCLogger.LogMethodTableWriteableDataWriteAccess(this);
+ return m_pWriteableData;
+ }
+
+ inline PTR_MethodTableWriteableData GetWriteableDataForWrite_NoLogging()
+ {
+ return m_pWriteableData;
+ }
+
+ //-------------------------------------------------------------------
+ // Remoting related
+ //
+ inline BOOL IsRemotingConfigChecked()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetWriteableData()->IsRemotingConfigChecked();
+ }
+ inline void SetRemotingConfigChecked()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ GetWriteableDataForWrite()->SetRemotingConfigChecked();
+ }
+ inline void TrySetRemotingConfigChecked()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ GetWriteableDataForWrite()->TrySetRemotingConfigChecked();
+ }
+ inline BOOL RequiresManagedActivation()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetWriteableData()->RequiresManagedActivation();
+ }
+ inline void SetRequiresManagedActivation()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ GetWriteableDataForWrite()->SetRequiresManagedActivation();
+ }
+
+ // Determines whether the type may require managed activation. The actual answer is known later
+ // once the remoting config is checked.
+ inline BOOL MayRequireManagedActivation()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return IsMarshaledByRef();
+ }
+
+ //-------------------------------------------------------------------
+ // The GUID Info
+ // Used by COM interop to get GUIDs (IIDs and CLSIDs)
+
+ // Get/store cached GUID information
+ PTR_GuidInfo GetGuidInfo();
+ void SetGuidInfo(GuidInfo* pGuidInfo);
+
+ // Get and cache the GUID for this interface/class
+ HRESULT GetGuidNoThrow(GUID *pGuid, BOOL bGenerateIfNotFound, BOOL bClassic = TRUE);
+
+ // Get and cache the GUID for this interface/class
+ void GetGuid(GUID *pGuid, BOOL bGenerateIfNotFound, BOOL bClassic = TRUE);
+
+#ifdef FEATURE_COMINTEROP
+ // Get the GUID used for WinRT interop
+ // * for projection generic interfaces returns the equivalent WinRT type's GUID
+ // * for everything else returns the GetGuid(, TRUE)
+ BOOL GetGuidForWinRT(GUID *pGuid);
+
+private:
+ // Create RCW data associated with this type.
+ RCWPerTypeData *CreateRCWPerTypeData(bool bThrowOnOOM);
+
+public:
+ // Get the RCW data associated with this type or NULL if the type does not need such data or allocation
+ // failed (only if bThrowOnOOM is false).
+ RCWPerTypeData *GetRCWPerTypeData(bool bThrowOnOOM = true);
+#endif // FEATURE_COMINTEROP
+
+ // Convenience method - determine if the interface/class has a guid specified (even if not yet cached)
+ BOOL HasExplicitGuid();
+
+public :
+ // Helper routines for the GetFullyQualifiedNameForClass macros defined at the top of class.h.
+ // You probably should not use these functions directly.
+ SString &_GetFullyQualifiedNameForClassNestedAware(SString &ssBuf);
+ SString &_GetFullyQualifiedNameForClass(SString &ssBuf);
+ LPCUTF8 GetFullyQualifiedNameInfo(LPCUTF8 *ppszNamespace);
+
+private:
+ template<typename RedirectFunctor> SString &_GetFullyQualifiedNameForClassNestedAwareInternal(SString &ssBuf);
+
+public :
+ //-------------------------------------------------------------------
+ // Debug Info
+ //
+
+
+#ifdef _DEBUG
+ inline LPCUTF8 GetDebugClassName()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return debug_m_szClassName;
+ }
+ inline void SetDebugClassName(LPCUTF8 name)
+ {
+ LIMITED_METHOD_CONTRACT;
+ debug_m_szClassName = name;
+ }
+
+ // Was the type created with injected duplicates?
+ // TRUE means that we tried to inject duplicates (not that we found one to inject).
+ inline BOOL Debug_HasInjectedInterfaceDuplicates() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (GetWriteableData()->m_dwFlags & MethodTableWriteableData::enum_flag_HasInjectedInterfaceDuplicates) != 0;
+ }
+ inline void Debug_SetHasInjectedInterfaceDuplicates()
+ {
+ LIMITED_METHOD_CONTRACT;
+ GetWriteableDataForWrite()->m_dwFlags |= MethodTableWriteableData::enum_flag_HasInjectedInterfaceDuplicates;
+ }
+#endif // _DEBUG
+
+
+#ifndef DACCESS_COMPILE
+public:
+ //--------------------------------------------------------------------------------------
+ class MethodData
+ {
+ public:
+ inline ULONG AddRef()
+ { LIMITED_METHOD_CONTRACT; return (ULONG) InterlockedIncrement((LONG*)&m_cRef); }
+
+ ULONG Release();
+
+ // Since all methods that return a MethodData already AddRef'd, we do NOT
+ // want to AddRef when putting a holder around it. We only want to release it.
+ static void HolderAcquire(MethodData *pEntry)
+ { LIMITED_METHOD_CONTRACT; return; }
+ static void HolderRelease(MethodData *pEntry)
+ { WRAPPER_NO_CONTRACT; if (pEntry != NULL) pEntry->Release(); }
+
+ protected:
+ ULONG m_cRef;
+
+ public:
+ MethodData() : m_cRef(1) { LIMITED_METHOD_CONTRACT; }
+ virtual ~MethodData() { LIMITED_METHOD_CONTRACT; }
+
+ virtual MethodData *GetDeclMethodData() = 0;
+ virtual MethodTable *GetDeclMethodTable() = 0;
+ virtual MethodDesc *GetDeclMethodDesc(UINT32 slotNumber) = 0;
+
+ virtual MethodData *GetImplMethodData() = 0;
+ virtual MethodTable *GetImplMethodTable() = 0;
+ virtual DispatchSlot GetImplSlot(UINT32 slotNumber) = 0;
+ // Returns INVALID_SLOT_NUMBER if no implementation exists.
+ virtual UINT32 GetImplSlotNumber(UINT32 slotNumber) = 0;
+ virtual MethodDesc *GetImplMethodDesc(UINT32 slotNumber) = 0;
+ virtual void InvalidateCachedVirtualSlot(UINT32 slotNumber) = 0;
+
+ virtual UINT32 GetNumVirtuals() = 0;
+ virtual UINT32 GetNumMethods() = 0;
+
+ protected:
+ static const UINT32 INVALID_SLOT_NUMBER = UINT32_MAX;
+
+ // This is used when building the data
+ struct MethodDataEntry
+ {
+ private:
+ static const UINT32 INVALID_CHAIN_AND_INDEX = (UINT32)(-1);
+ static const UINT16 INVALID_IMPL_SLOT_NUM = (UINT16)(-1);
+
+ // This contains both the chain delta and the table index. The
+ // reason that they are combined is that we need atomic update
+ // of both, and it is convenient that both are on UINT16 in size.
+ UINT32 m_chainDeltaAndTableIndex;
+ UINT16 m_implSlotNum; // For virtually remapped slots
+ DispatchSlot m_slot; // The entry in the DispatchImplTable
+ MethodDesc *m_pMD; // The MethodDesc for this slot
+
+ public:
+ inline MethodDataEntry() : m_slot(NULL)
+ { WRAPPER_NO_CONTRACT; Init(); }
+
+ inline void Init()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_chainDeltaAndTableIndex = INVALID_CHAIN_AND_INDEX;
+ m_implSlotNum = INVALID_IMPL_SLOT_NUM;
+ m_slot = NULL;
+ m_pMD = NULL;
+ }
+
+ inline BOOL IsDeclInit()
+ { LIMITED_METHOD_CONTRACT; return m_chainDeltaAndTableIndex != INVALID_CHAIN_AND_INDEX; }
+ inline BOOL IsImplInit()
+ { LIMITED_METHOD_CONTRACT; return m_implSlotNum != INVALID_IMPL_SLOT_NUM; }
+
+ inline void SetDeclData(UINT32 chainDelta, UINT32 tableIndex)
+ { LIMITED_METHOD_CONTRACT; m_chainDeltaAndTableIndex = ((((UINT16) chainDelta) << 16) | ((UINT16) tableIndex)); }
+ inline UINT32 GetChainDelta()
+ { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(IsDeclInit()); return m_chainDeltaAndTableIndex >> 16; }
+ inline UINT32 GetTableIndex()
+ { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(IsDeclInit()); return (m_chainDeltaAndTableIndex & (UINT32)UINT16_MAX); }
+
+ inline void SetImplData(UINT32 implSlotNum)
+ { LIMITED_METHOD_CONTRACT; m_implSlotNum = (UINT16) implSlotNum; }
+ inline UINT32 GetImplSlotNum()
+ { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(IsImplInit()); return m_implSlotNum; }
+
+ inline void SetSlot(DispatchSlot slot)
+ { LIMITED_METHOD_CONTRACT; m_slot = slot; }
+ inline DispatchSlot GetSlot()
+ { LIMITED_METHOD_CONTRACT; return m_slot; }
+
+ inline void SetMethodDesc(MethodDesc *pMD)
+ { LIMITED_METHOD_CONTRACT; m_pMD = pMD; }
+ inline MethodDesc *GetMethodDesc()
+ { LIMITED_METHOD_CONTRACT; return m_pMD; }
+
+ };
+
+ static void ProcessMap(
+ const DispatchMapTypeID * rgTypeIDs,
+ UINT32 cTypeIDs,
+ MethodTable * pMT,
+ UINT32 cCurrentChainDepth,
+ MethodDataEntry * rgWorkingData);
+ }; // class MethodData
+
+ typedef ::Holder < MethodData *, MethodData::HolderAcquire, MethodData::HolderRelease > MethodDataHolder;
+ typedef ::Wrapper < MethodData *, MethodData::HolderAcquire, MethodData::HolderRelease > MethodDataWrapper;
+
+protected:
+ //--------------------------------------------------------------------------------------
+ class MethodDataObject : public MethodData
+ {
+ public:
+ // Static method that returns the amount of memory to allocate for a particular type.
+ static UINT32 GetObjectSize(MethodTable *pMT);
+
+ // Constructor. Make sure you have allocated enough memory using GetObjectSize.
+ inline MethodDataObject(MethodTable *pMT)
+ { WRAPPER_NO_CONTRACT; Init(pMT, NULL); }
+
+ inline MethodDataObject(MethodTable *pMT, MethodData *pParentData)
+ { WRAPPER_NO_CONTRACT; Init(pMT, pParentData); }
+
+ virtual ~MethodDataObject() { LIMITED_METHOD_CONTRACT; }
+
+ virtual MethodData *GetDeclMethodData()
+ { LIMITED_METHOD_CONTRACT; return this; }
+ virtual MethodTable *GetDeclMethodTable()
+ { LIMITED_METHOD_CONTRACT; return m_pMT; }
+ virtual MethodDesc *GetDeclMethodDesc(UINT32 slotNumber);
+
+ virtual MethodData *GetImplMethodData()
+ { LIMITED_METHOD_CONTRACT; return this; }
+ virtual MethodTable *GetImplMethodTable()
+ { LIMITED_METHOD_CONTRACT; return m_pMT; }
+ virtual DispatchSlot GetImplSlot(UINT32 slotNumber);
+ virtual UINT32 GetImplSlotNumber(UINT32 slotNumber);
+ virtual MethodDesc *GetImplMethodDesc(UINT32 slotNumber);
+ virtual void InvalidateCachedVirtualSlot(UINT32 slotNumber);
+
+ virtual UINT32 GetNumVirtuals()
+ { LIMITED_METHOD_CONTRACT; return m_pMT->GetNumVirtuals(); }
+ virtual UINT32 GetNumMethods()
+ { LIMITED_METHOD_CONTRACT; return m_pMT->GetCanonicalMethodTable()->GetNumMethods(); }
+
+ protected:
+ void Init(MethodTable *pMT, MethodData *pParentData);
+
+ BOOL PopulateNextLevel();
+
+ // This is the method table for the actual type we're gathering the data for
+ MethodTable *m_pMT;
+
+ // This is used in staged map decoding - it indicates which type we will next decode.
+ UINT32 m_iNextChainDepth;
+ static const UINT32 MAX_CHAIN_DEPTH = UINT32_MAX;
+
+ BOOL m_containsMethodImpl;
+
+ // NOTE: Use of these APIs are unlocked and may appear to be erroneous. However, since calls
+ // to ProcessMap will result in identical values being placed in the MethodDataObjectEntry
+ // array, it it is not a problem if there is a race, since one thread may just end up
+ // doing some duplicate work.
+
+ inline UINT32 GetNextChainDepth()
+ { LIMITED_METHOD_CONTRACT; return VolatileLoad(&m_iNextChainDepth); }
+
+ inline void SetNextChainDepth(UINT32 iDepth)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (GetNextChainDepth() < iDepth) {
+ VolatileStore(&m_iNextChainDepth, iDepth);
+ }
+ }
+
+ // This is used when building the data
+ struct MethodDataObjectEntry
+ {
+ private:
+ MethodDesc *m_pMDDecl;
+ MethodDesc *m_pMDImpl;
+
+ public:
+ inline MethodDataObjectEntry() : m_pMDDecl(NULL), m_pMDImpl(NULL) {}
+
+ inline void SetDeclMethodDesc(MethodDesc *pMD)
+ { LIMITED_METHOD_CONTRACT; m_pMDDecl = pMD; }
+ inline MethodDesc *GetDeclMethodDesc()
+ { LIMITED_METHOD_CONTRACT; return m_pMDDecl; }
+ inline void SetImplMethodDesc(MethodDesc *pMD)
+ { LIMITED_METHOD_CONTRACT; m_pMDImpl = pMD; }
+ inline MethodDesc *GetImplMethodDesc()
+ { LIMITED_METHOD_CONTRACT; return m_pMDImpl; }
+ };
+
+ //
+ // At the end of this object is an array, so you cannot derive from this class.
+ //
+
+ inline MethodDataObjectEntry *GetEntryData()
+ { LIMITED_METHOD_CONTRACT; return (MethodDataObjectEntry *)(this + 1); }
+
+ inline MethodDataObjectEntry *GetEntry(UINT32 i)
+ { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(i < GetNumMethods()); return GetEntryData() + i; }
+
+ void FillEntryDataForAncestor(MethodTable *pMT);
+
+ // MethodDataObjectEntry m_rgEntries[...];
+ }; // class MethodDataObject
+
+ //--------------------------------------------------------------------------------------
+ class MethodDataInterface : public MethodData
+ {
+ public:
+ // Static method that returns the amount of memory to allocate for a particular type.
+ static UINT32 GetObjectSize(MethodTable *pMT)
+ { LIMITED_METHOD_CONTRACT; return sizeof(MethodDataInterface); }
+
+ // Constructor. Make sure you have allocated enough memory using GetObjectSize.
+ MethodDataInterface(MethodTable *pMT)
+ {
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(CheckPointer(pMT));
+ CONSISTENCY_CHECK(pMT->IsInterface());
+ m_pMT = pMT;
+ }
+ virtual ~MethodDataInterface()
+ { LIMITED_METHOD_CONTRACT; }
+
+ //
+ // Decl data
+ //
+ virtual MethodData *GetDeclMethodData()
+ { LIMITED_METHOD_CONTRACT; return this; }
+ virtual MethodTable *GetDeclMethodTable()
+ { LIMITED_METHOD_CONTRACT; return m_pMT; }
+ virtual MethodDesc *GetDeclMethodDesc(UINT32 slotNumber);
+
+ //
+ // Impl data
+ //
+ virtual MethodData *GetImplMethodData()
+ { LIMITED_METHOD_CONTRACT; return this; }
+ virtual MethodTable *GetImplMethodTable()
+ { LIMITED_METHOD_CONTRACT; return m_pMT; }
+ virtual DispatchSlot GetImplSlot(UINT32 slotNumber)
+ { WRAPPER_NO_CONTRACT; return DispatchSlot(m_pMT->GetRestoredSlot(slotNumber)); }
+ virtual UINT32 GetImplSlotNumber(UINT32 slotNumber)
+ { LIMITED_METHOD_CONTRACT; return slotNumber; }
+ virtual MethodDesc *GetImplMethodDesc(UINT32 slotNumber);
+ virtual void InvalidateCachedVirtualSlot(UINT32 slotNumber);
+
+ //
+ // Slot count data
+ //
+ virtual UINT32 GetNumVirtuals()
+ { LIMITED_METHOD_CONTRACT; return m_pMT->GetNumVirtuals(); }
+ virtual UINT32 GetNumMethods()
+ { LIMITED_METHOD_CONTRACT; return m_pMT->GetNumMethods(); }
+
+ protected:
+ // This is the method table for the actual type we're gathering the data for
+ MethodTable *m_pMT;
+ }; // class MethodDataInterface
+
+ //--------------------------------------------------------------------------------------
+ class MethodDataInterfaceImpl : public MethodData
+ {
+ public:
+ // Object construction-related methods
+ static UINT32 GetObjectSize(MethodTable *pMTDecl);
+
+ MethodDataInterfaceImpl(
+ const DispatchMapTypeID * rgDeclTypeIDs,
+ UINT32 cDeclTypeIDs,
+ MethodData * pDecl,
+ MethodData * pImpl);
+ virtual ~MethodDataInterfaceImpl();
+
+ // Decl-related methods
+ virtual MethodData *GetDeclMethodData()
+ { LIMITED_METHOD_CONTRACT; return m_pDecl; }
+ virtual MethodTable *GetDeclMethodTable()
+ { WRAPPER_NO_CONTRACT; return m_pDecl->GetDeclMethodTable(); }
+ virtual MethodDesc *GetDeclMethodDesc(UINT32 slotNumber)
+ { WRAPPER_NO_CONTRACT; return m_pDecl->GetDeclMethodDesc(slotNumber); }
+
+ // Impl-related methods
+ virtual MethodData *GetImplMethodData()
+ { LIMITED_METHOD_CONTRACT; return m_pImpl; }
+ virtual MethodTable *GetImplMethodTable()
+ { WRAPPER_NO_CONTRACT; return m_pImpl->GetImplMethodTable(); }
+ virtual DispatchSlot GetImplSlot(UINT32 slotNumber);
+ virtual UINT32 GetImplSlotNumber(UINT32 slotNumber);
+ virtual MethodDesc *GetImplMethodDesc(UINT32 slotNumber);
+ virtual void InvalidateCachedVirtualSlot(UINT32 slotNumber);
+
+ virtual UINT32 GetNumVirtuals()
+ { WRAPPER_NO_CONTRACT; return m_pDecl->GetNumVirtuals(); }
+ virtual UINT32 GetNumMethods()
+ { WRAPPER_NO_CONTRACT; return m_pDecl->GetNumVirtuals(); }
+
+ protected:
+ UINT32 MapToImplSlotNumber(UINT32 slotNumber);
+
+ BOOL PopulateNextLevel();
+ void Init(
+ const DispatchMapTypeID * rgDeclTypeIDs,
+ UINT32 cDeclTypeIDs,
+ MethodData * pDecl,
+ MethodData * pImpl);
+
+ MethodData *m_pDecl;
+ MethodData *m_pImpl;
+
+ // This is used in staged map decoding - it indicates which type(s) we will find.
+ const DispatchMapTypeID * m_rgDeclTypeIDs;
+ UINT32 m_cDeclTypeIDs;
+ UINT32 m_iNextChainDepth;
+ static const UINT32 MAX_CHAIN_DEPTH = UINT32_MAX;
+
+ inline UINT32 GetNextChainDepth()
+ { LIMITED_METHOD_CONTRACT; return VolatileLoad(&m_iNextChainDepth); }
+
+ inline void SetNextChainDepth(UINT32 iDepth)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (GetNextChainDepth() < iDepth) {
+ VolatileStore(&m_iNextChainDepth, iDepth);
+ }
+ }
+
+ //
+ // At the end of this object is an array, so you cannot derive from this class.
+ //
+
+ inline MethodDataEntry *GetEntryData()
+ { LIMITED_METHOD_CONTRACT; return (MethodDataEntry *)(this + 1); }
+
+ inline MethodDataEntry *GetEntry(UINT32 i)
+ { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(i < GetNumMethods()); return GetEntryData() + i; }
+
+ // MethodDataEntry m_rgEntries[...];
+ }; // class MethodDataInterfaceImpl
+
+ //--------------------------------------------------------------------------------------
+ static MethodDataCache *s_pMethodDataCache;
+ static BOOL s_fUseParentMethodData;
+ static BOOL s_fUseMethodDataCache;
+
+public:
+ static void AllowMethodDataCaching()
+ { WRAPPER_NO_CONTRACT; CheckInitMethodDataCache(); s_fUseMethodDataCache = TRUE; }
+ static void ClearMethodDataCache();
+ static void AllowParentMethodDataCopy()
+ { LIMITED_METHOD_CONTRACT; s_fUseParentMethodData = TRUE; }
+ // NOTE: The fCanCache argument determines if the resulting MethodData object can
+ // be added to the global MethodDataCache. This is used when requesting a
+ // MethodData object for a type currently being built.
+ static MethodData *GetMethodData(MethodTable *pMT, BOOL fCanCache = TRUE);
+ static MethodData *GetMethodData(MethodTable *pMTDecl, MethodTable *pMTImpl, BOOL fCanCache = TRUE);
+ // This method is used by BuildMethodTable because the exact interface has not yet been loaded.
+ // NOTE: This method does not cache the resulting MethodData object in the global MethodDataCache.
+ static MethodData * GetMethodData(
+ const DispatchMapTypeID * rgDeclTypeIDs,
+ UINT32 cDeclTypeIDs,
+ MethodTable * pMTDecl,
+ MethodTable * pMTImpl);
+
+protected:
+ static void CheckInitMethodDataCache();
+ static MethodData *FindParentMethodDataHelper(MethodTable *pMT);
+ static MethodData *FindMethodDataHelper(MethodTable *pMTDecl, MethodTable *pMTImpl);
+ static MethodData *GetMethodDataHelper(MethodTable *pMTDecl, MethodTable *pMTImpl, BOOL fCanCache);
+ // NOTE: This method does not cache the resulting MethodData object in the global MethodDataCache.
+ static MethodData * GetMethodDataHelper(
+ const DispatchMapTypeID * rgDeclTypeIDs,
+ UINT32 cDeclTypeIDs,
+ MethodTable * pMTDecl,
+ MethodTable * pMTImpl);
+
+public:
+ //--------------------------------------------------------------------------------------
+ class MethodIterator
+ {
+ public:
+ MethodIterator(MethodTable *pMT);
+ MethodIterator(MethodTable *pMTDecl, MethodTable *pMTImpl);
+ MethodIterator(MethodData *pMethodData);
+ MethodIterator(const MethodIterator &it);
+ inline ~MethodIterator() { WRAPPER_NO_CONTRACT; m_pMethodData->Release(); }
+ INT32 GetNumMethods() const;
+ inline BOOL IsValid() const;
+ inline BOOL MoveTo(UINT32 idx);
+ inline BOOL Prev();
+ inline BOOL Next();
+ inline void MoveToBegin();
+ inline void MoveToEnd();
+ inline UINT32 GetSlotNumber() const;
+ inline UINT32 GetImplSlotNumber() const;
+ inline BOOL IsVirtual() const;
+ inline UINT32 GetNumVirtuals() const;
+ inline DispatchSlot GetTarget() const;
+
+ // Can be called only if IsValid()=TRUE
+ inline MethodDesc *GetMethodDesc() const;
+ inline MethodDesc *GetDeclMethodDesc() const;
+
+ protected:
+ void Init(MethodTable *pMTDecl, MethodTable *pMTImpl);
+
+ MethodData *m_pMethodData;
+ INT32 m_iCur; // Current logical slot index
+ INT32 m_iMethods;
+ }; // class MethodIterator
+#endif // !DACCESS_COMPILE
+
+ //--------------------------------------------------------------------------------------
+ // This iterator lets you walk over all the method bodies introduced by this type.
+ // This includes new static methods, new non-virtual methods, and any overrides
+ // of the parent's virtual methods. It does not include virtual method implementations
+ // provided by the parent
+
+ class IntroducedMethodIterator
+ {
+ public:
+ IntroducedMethodIterator(MethodTable *pMT, BOOL restrictToCanonicalTypes = TRUE);
+ inline BOOL IsValid() const;
+ BOOL Next();
+
+ // Can be called only if IsValid()=TRUE
+ inline MethodDesc *GetMethodDesc() const;
+
+ // Static worker methods of the iterator. These are meant to be used
+ // by RuntimeTypeHandle::GetFirstIntroducedMethod and RuntimeTypeHandle::GetNextIntroducedMethod
+ // only to expose this iterator to managed code.
+ static MethodDesc * GetFirst(MethodTable * pMT);
+ static MethodDesc * GetNext(MethodDesc * pMD);
+
+ protected:
+ MethodDesc *m_pMethodDesc; // Current method desc
+
+ // Cached info about current method desc
+ MethodDescChunk *m_pChunk;
+ TADDR m_pChunkEnd;
+
+ void SetChunk(MethodDescChunk * pChunk);
+ }; // class IntroducedMethodIterator
+
+ //-------------------------------------------------------------------
+ // INSTANCE MEMBER VARIABLES
+ //
+
+#ifdef DACCESS_COMPILE
+public:
+#else
+private:
+#endif
+ enum WFLAGS_LOW_ENUM
+ {
+ // AS YOU ADD NEW FLAGS PLEASE CONSIDER WHETHER Generics::NewInstantiation NEEDS
+ // TO BE UPDATED IN ORDER TO ENSURE THAT METHODTABLES DUPLICATED FOR GENERIC INSTANTIATIONS
+ // CARRY THE CORECT FLAGS.
+ //
+
+ // We are overloading the low 2 bytes of m_dwFlags to be a component size for Strings
+ // and Arrays and some set of flags which we can be assured are of a specified state
+ // for Strings / Arrays, currently these will be a bunch of generics flags which don't
+ // apply to Strings / Arrays.
+
+ enum_flag_UNUSED_ComponentSize_1 = 0x00000001,
+
+ enum_flag_StaticsMask = 0x00000006,
+ enum_flag_StaticsMask_NonDynamic = 0x00000000,
+ enum_flag_StaticsMask_Dynamic = 0x00000002, // dynamic statics (EnC, reflection.emit)
+ enum_flag_StaticsMask_Generics = 0x00000004, // generics statics
+ enum_flag_StaticsMask_CrossModuleGenerics = 0x00000006, // cross module generics statics (NGen)
+ enum_flag_StaticsMask_IfGenericsThenCrossModule = 0x00000002, // helper constant to get rid of unnecessary check
+
+ enum_flag_NotInPZM = 0x00000008, // True if this type is not in its PreferredZapModule
+
+ enum_flag_GenericsMask = 0x00000030,
+ enum_flag_GenericsMask_NonGeneric = 0x00000000, // no instantiation
+ enum_flag_GenericsMask_GenericInst = 0x00000010, // regular instantiation, e.g. List<String>
+ enum_flag_GenericsMask_SharedInst = 0x00000020, // shared instantiation, e.g. List<__Canon> or List<MyValueType<__Canon>>
+ enum_flag_GenericsMask_TypicalInst = 0x00000030, // the type instantiated at its formal parameters, e.g. List<T>
+
+#ifdef FEATURE_REMOTING
+ enum_flag_ContextStatic = 0x00000040,
+#endif
+ enum_flag_UNUSED_ComponentSize_2 = 0x00000080,
+
+ enum_flag_HasVariance = 0x00000100, // This is an instantiated type some of whose type parameters are co or contra-variant
+
+ enum_flag_HasDefaultCtor = 0x00000200,
+ enum_flag_HasPreciseInitCctors = 0x00000400, // Do we need to run class constructors at allocation time? (Not perf important, could be moved to EEClass
+
+ enum_flag_IsHFA = 0x00000800, // This type is an HFA (Homogenous Floating-point Aggregate)
+
+ // In a perfect world we would fill these flags using other flags that we already have
+ // which have a constant value for something which has a component size.
+ enum_flag_UNUSED_ComponentSize_4 = 0x00001000,
+ enum_flag_UNUSED_ComponentSize_5 = 0x00002000,
+ enum_flag_UNUSED_ComponentSize_6 = 0x00004000,
+ enum_flag_UNUSED_ComponentSize_7 = 0x00008000,
+
+#define SET_FALSE(flag) (flag & 0)
+#define SET_TRUE(flag) (flag & 0xffff)
+
+ // IMPORTANT! IMPORTANT! IMPORTANT!
+ //
+ // As you change the flags in WFLAGS_LOW_ENUM you also need to change this
+ // to be up to date to reflect the default values of those flags for the
+ // case where this MethodTable is for a String or Array
+ enum_flag_StringArrayValues = SET_TRUE(enum_flag_StaticsMask_NonDynamic) |
+ SET_FALSE(enum_flag_NotInPZM) |
+ SET_TRUE(enum_flag_GenericsMask_NonGeneric) |
+#ifdef FEATURE_REMOTING
+ SET_FALSE(enum_flag_ContextStatic) |
+#endif
+ SET_FALSE(enum_flag_HasVariance) |
+ SET_FALSE(enum_flag_HasDefaultCtor) |
+ SET_FALSE(enum_flag_HasPreciseInitCctors),
+
+ }; // enum WFLAGS_LOW_ENUM
+
+ enum WFLAGS_HIGH_ENUM
+ {
+ // DO NOT use flags that have bits set in the low 2 bytes.
+ // These flags are DWORD sized so that our atomic masking
+ // operations can operate on the entire 4-byte aligned DWORD
+ // instead of the logical non-aligned WORD of flags. The
+ // low WORD of flags is reserved for the component size.
+
+ // The following bits describe mutually exclusive locations of the type
+ // in the type hiearchy.
+ enum_flag_Category_Mask = 0x000F0000,
+
+ enum_flag_Category_Class = 0x00000000,
+ enum_flag_Category_Unused_1 = 0x00010000,
+
+ enum_flag_Category_MarshalByRef_Mask= 0x000E0000,
+ enum_flag_Category_MarshalByRef = 0x00020000,
+ enum_flag_Category_Contextful = 0x00030000, // sub-category of MarshalByRef
+
+ enum_flag_Category_ValueType = 0x00040000,
+ enum_flag_Category_ValueType_Mask = 0x000C0000,
+ enum_flag_Category_Nullable = 0x00050000, // sub-category of ValueType
+ enum_flag_Category_PrimitiveValueType=0x00060000, // sub-category of ValueType, Enum or primitive value type
+ enum_flag_Category_TruePrimitive = 0x00070000, // sub-category of ValueType, Primitive (ELEMENT_TYPE_I, etc.)
+
+ enum_flag_Category_Array = 0x00080000,
+ enum_flag_Category_Array_Mask = 0x000C0000,
+ // enum_flag_Category_IfArrayThenUnused = 0x00010000, // sub-category of Array
+ enum_flag_Category_IfArrayThenSzArray = 0x00020000, // sub-category of Array
+
+ enum_flag_Category_Interface = 0x000C0000,
+ enum_flag_Category_Unused_2 = 0x000D0000,
+ enum_flag_Category_TransparentProxy = 0x000E0000,
+ enum_flag_Category_AsyncPin = 0x000F0000,
+
+ enum_flag_Category_ElementTypeMask = 0x000E0000, // bits that matter for element type mask
+
+
+ enum_flag_HasFinalizer = 0x00100000, // instances require finalization
+
+ enum_flag_IfNotInterfaceThenMarshalable = 0x00200000, // Is this type marshalable by the pinvoke marshalling layer
+#ifdef FEATURE_COMINTEROP
+ enum_flag_IfInterfaceThenHasGuidInfo = 0x00200000, // Does the type has optional GuidInfo
+#endif // FEATURE_COMINTEROP
+
+ enum_flag_HasRemotingVtsInfo = 0x00400000, // Optional data present indicating VTS methods and optional fields
+
+ enum_flag_HasIndirectParent = 0x00800000, // m_pParentMethodTable has double indirection
+
+ enum_flag_ContainsPointers = 0x01000000,
+
+ enum_flag_HasTypeEquivalence = 0x02000000, // can be equivalent to another type
+
+#ifdef FEATURE_COMINTEROP
+ enum_flag_HasRCWPerTypeData = 0x04000000, // has optional pointer to RCWPerTypeData
+#endif // FEATURE_COMINTEROP
+
+ enum_flag_HasCriticalFinalizer = 0x08000000, // finalizer must be run on Appdomain Unload
+ enum_flag_Collectible = 0x10000000,
+ enum_flag_ContainsGenericVariables = 0x20000000, // we cache this flag to help detect these efficiently and
+ // to detect this condition when restoring
+#ifdef FEATURE_COMINTEROP
+ enum_flag_ComObject = 0x40000000, // class is a com object
+#endif
+
+ enum_flag_HasComponentSize = 0x80000000, // This is set if component size is used for flags.
+
+#ifdef FEATURE_COMINTEROP
+ // Types that require non-trivial interface cast have this bit set in the category
+ enum_flag_NonTrivialInterfaceCast = 0x00080000 | enum_flag_ComObject,
+#else
+ enum_flag_NonTrivialInterfaceCast = 0x00080000,
+#endif
+
+ }; // enum WFLAGS_HIGH_ENUM
+
+// NIDump needs to be able to see these flags
+// TODO: figure out how to make these private
+#if defined(DACCESS_COMPILE)
+public:
+#else
+private:
+#endif
+ enum WFLAGS2_ENUM
+ {
+ // AS YOU ADD NEW FLAGS PLEASE CONSIDER WHETHER Generics::NewInstantiation NEEDS
+ // TO BE UPDATED IN ORDER TO ENSURE THAT METHODTABLES DUPLICATED FOR GENERIC INSTANTIATIONS
+ // CARRY THE CORECT FLAGS.
+
+ // The following bits describe usage of optional slots. They have to stay
+ // together because of we index using them into offset arrays.
+ enum_flag_MultipurposeSlotsMask = 0x001F,
+ enum_flag_HasPerInstInfo = 0x0001,
+ enum_flag_HasInterfaceMap = 0x0002,
+ enum_flag_HasDispatchMapSlot = 0x0004,
+ enum_flag_HasNonVirtualSlots = 0x0008,
+ enum_flag_HasModuleOverride = 0x0010,
+
+ enum_flag_IsZapped = 0x0020, // This could be fetched from m_pLoaderModule if we run out of flags
+
+ enum_flag_IsPreRestored = 0x0040, // Class does not need restore
+ // This flag is set only for NGENed classes (IsZapped is true)
+
+ enum_flag_HasModuleDependencies = 0x0080,
+
+ enum_flag_NoSecurityProperties = 0x0100, // Class does not have security properties (that is,
+ // GetClass()->GetSecurityProperties will return 0).
+
+ enum_flag_RequiresDispatchTokenFat = 0x0200,
+
+ enum_flag_HasCctor = 0x0400,
+ enum_flag_HasCCWTemplate = 0x0800, // Has an extra field pointing to a CCW template
+
+#ifdef FEATURE_64BIT_ALIGNMENT
+ enum_flag_RequiresAlign8 = 0x1000, // Type requires 8-byte alignment (only set on platforms that require this and don't get it implicitly)
+#endif
+
+ enum_flag_HasBoxedRegularStatics = 0x2000, // GetNumBoxedRegularStatics() != 0
+
+ enum_flag_HasSingleNonVirtualSlot = 0x4000,
+
+ enum_flag_DependsOnEquivalentOrForwardedStructs= 0x8000, // Declares methods that have type equivalent or type forwarded structures in their signature
+
+ }; // enum WFLAGS2_ENUM
+
+ __forceinline void ClearFlag(WFLAGS_LOW_ENUM flag)
+ {
+ _ASSERTE(!IsStringOrArray());
+ m_dwFlags &= ~flag;
+ }
+ __forceinline void SetFlag(WFLAGS_LOW_ENUM flag)
+ {
+ _ASSERTE(!IsStringOrArray());
+ m_dwFlags |= flag;
+ }
+ __forceinline DWORD GetFlag(WFLAGS_LOW_ENUM flag) const
+ {
+ SUPPORTS_DAC;
+ return (IsStringOrArray() ? (enum_flag_StringArrayValues & flag) : (m_dwFlags & flag));
+ }
+ __forceinline BOOL TestFlagWithMask(WFLAGS_LOW_ENUM mask, WFLAGS_LOW_ENUM flag) const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (IsStringOrArray() ? (((DWORD)enum_flag_StringArrayValues & (DWORD)mask) == (DWORD)flag) :
+ ((m_dwFlags & (DWORD)mask) == (DWORD)flag));
+ }
+
+ __forceinline void ClearFlag(WFLAGS_HIGH_ENUM flag)
+ {
+ m_dwFlags &= ~flag;
+ }
+ __forceinline void SetFlag(WFLAGS_HIGH_ENUM flag)
+ {
+ m_dwFlags |= flag;
+ }
+ __forceinline DWORD GetFlag(WFLAGS_HIGH_ENUM flag) const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_dwFlags & flag;
+ }
+ __forceinline BOOL TestFlagWithMask(WFLAGS_HIGH_ENUM mask, WFLAGS_HIGH_ENUM flag) const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return ((m_dwFlags & (DWORD)mask) == (DWORD)flag);
+ }
+
+ __forceinline void ClearFlag(WFLAGS2_ENUM flag)
+ {
+ m_wFlags2 &= ~flag;
+ }
+ __forceinline void SetFlag(WFLAGS2_ENUM flag)
+ {
+ m_wFlags2 |= flag;
+ }
+ __forceinline DWORD GetFlag(WFLAGS2_ENUM flag) const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_wFlags2 & flag;
+ }
+ __forceinline BOOL TestFlagWithMask(WFLAGS2_ENUM mask, WFLAGS2_ENUM flag) const
+ {
+ return (m_wFlags2 & (DWORD)mask) == (DWORD)flag;
+ }
+
+ // Just exposing a couple of these for x86 asm versions of JIT_IsInstanceOfClass and JIT_IsInstanceOfInterface
+public:
+ enum
+ {
+ public_enum_flag_HasTypeEquivalence = enum_flag_HasTypeEquivalence,
+ public_enum_flag_NonTrivialInterfaceCast = enum_flag_NonTrivialInterfaceCast,
+ };
+
+private:
+ /*
+ * This stuff must be first in the struct and should fit on a cache line - don't move it. Used by the GC.
+ */
+ // struct
+ // {
+
+ // Low WORD is component size for array and string types (HasComponentSize() returns true).
+ // Used for flags otherwise.
+ DWORD m_dwFlags;
+
+ // Base size of instance of this class when allocated on the heap
+ DWORD m_BaseSize;
+ // }
+
+ WORD m_wFlags2;
+
+ // Class token if it fits into 16-bits. If this is (WORD)-1, the class token is stored in the TokenOverflow optional member.
+ WORD m_wToken;
+
+ // <NICE> In the normal cases we shouldn't need a full word for each of these </NICE>
+ WORD m_wNumVirtuals;
+ WORD m_wNumInterfaces;
+
+#ifdef _DEBUG
+ LPCUTF8 debug_m_szClassName;
+#endif //_DEBUG
+
+ // Parent PTR_MethodTable if enum_flag_HasIndirectParent is not set. Pointer to indirection cell
+ // if enum_flag_enum_flag_HasIndirectParent is set. The indirection is offset by offsetof(MethodTable, m_pParentMethodTable).
+ // It allows casting helpers to go through parent chain natually. Casting helper do not need need the explicit check
+ // for enum_flag_HasIndirectParentMethodTable.
+ TADDR m_pParentMethodTable;
+
+ PTR_Module m_pLoaderModule; // LoaderModule. It is equal to the ZapModule in ngened images
+
+ PTR_MethodTableWriteableData m_pWriteableData;
+
+ // The value of lowest two bits describe what the union contains
+ enum LowBits {
+ UNION_EECLASS = 0, // 0 - pointer to EEClass. This MethodTable is the canonical method table.
+ UNION_INVALID = 1, // 1 - not used
+ UNION_METHODTABLE = 2, // 2 - pointer to canonical MethodTable.
+ UNION_INDIRECTION = 3 // 3 - pointer to indirection cell that points to canonical MethodTable.
+ }; // (used only if FEATURE_PREJIT is defined)
+ static const TADDR UNION_MASK = 3;
+
+ union {
+ EEClass * m_pEEClass;
+ TADDR m_pCanonMT;
+ };
+
+ __forceinline static LowBits union_getLowBits(TADDR pCanonMT)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return LowBits(pCanonMT & UNION_MASK);
+ }
+ __forceinline static TADDR union_getPointer(TADDR pCanonMT)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (pCanonMT & ~UNION_MASK);
+ }
+
+ // m_pPerInstInfo and m_pInterfaceMap have to be at fixed offsets because of performance sensitive
+ // JITed code and JIT helpers. However, they are frequently not present. The space is used by other
+ // multipurpose slots on first come first served basis if the fixed ones are not present. The other
+ // multipurpose are DispatchMapSlot, NonVirtualSlots, ModuleOverride (see enum_flag_MultipurposeSlotsMask).
+ // The multipurpose slots that do not fit are stored after vtable slots.
+
+ union
+ {
+ PTR_Dictionary * m_pPerInstInfo;
+ TADDR m_ElementTypeHnd;
+ TADDR m_pMultipurposeSlot1;
+ };
+ public:
+ union
+ {
+ InterfaceInfo_t * m_pInterfaceMap;
+ TADDR m_pMultipurposeSlot2;
+ };
+
+ // VTable and Non-Virtual slots go here
+
+ // Overflow multipurpose slots go here
+
+ // Optional Members go here
+ // See above for the list of optional members
+
+ // Generic dictionary pointers go here
+
+ // Interface map goes here
+
+ // Generic instantiation+dictionary goes here
+
+private:
+
+ // disallow direct creation
+ void *operator new(size_t dummy);
+ void operator delete(void *pData);
+ MethodTable();
+
+ // Optional members. These are used for fields in the data structure where
+ // the fields are (a) known when MT is created and (b) there is a default
+ // value for the field in the common case. That is, they are normally used
+ // for data that is only relevant to a small number of method tables.
+
+ // Optional members and multipurpose slots have similar purpose, but they differ in details:
+ // - Multipurpose slots can only accomodate pointer sized structures right now. It is non-trivial
+ // to add new ones, the access is faster.
+ // - Optional members can accomodate structures of any size. It is trivial to add new ones,
+ // the access is slower.
+
+ // The following macro will automatically create GetXXX accessors for the optional members.
+#define METHODTABLE_OPTIONAL_MEMBERS() \
+ /* NAME TYPE GETTER */ \
+ /* Accessing this member efficiently is currently performance critical for static field accesses */ \
+ /* in generic classes, so place it early in the list. */ \
+ METHODTABLE_OPTIONAL_MEMBER(GenericsStaticsInfo, GenericsStaticsInfo, GetGenericsStaticsInfo ) \
+ /* Accessed by interop, fairly frequently. */ \
+ METHODTABLE_COMINTEROP_OPTIONAL_MEMBERS() \
+ /* Accessed during x-domain transition only, so place it late in the list. */ \
+ METHODTABLE_REMOTING_OPTIONAL_MEMBERS() \
+ /* Accessed during certain generic type load operations only, so low priority */ \
+ METHODTABLE_OPTIONAL_MEMBER(ExtraInterfaceInfo, TADDR, GetExtraInterfaceInfoPtr ) \
+ /* TypeDef token for assemblies with more than 64k types. Never happens in real world. */ \
+ METHODTABLE_OPTIONAL_MEMBER(TokenOverflow, TADDR, GetTokenOverflowPtr ) \
+
+#ifdef FEATURE_COMINTEROP
+#define METHODTABLE_COMINTEROP_OPTIONAL_MEMBERS() \
+ METHODTABLE_OPTIONAL_MEMBER(GuidInfo, PTR_GuidInfo, GetGuidInfoPtr ) \
+ METHODTABLE_OPTIONAL_MEMBER(RCWPerTypeData, RCWPerTypeData *, GetRCWPerTypeDataPtr ) \
+ METHODTABLE_OPTIONAL_MEMBER(CCWTemplate, ComCallWrapperTemplate *, GetCCWTemplatePtr )
+#else
+#define METHODTABLE_COMINTEROP_OPTIONAL_MEMBERS()
+#endif
+
+#ifdef FEATURE_REMOTING
+#define METHODTABLE_REMOTING_OPTIONAL_MEMBERS() \
+ METHODTABLE_OPTIONAL_MEMBER(RemotingVtsInfo, PTR_RemotingVtsInfo, GetRemotingVtsInfoPtr ) \
+ METHODTABLE_OPTIONAL_MEMBER(RemotableMethodInfo, PTR_CrossDomainOptimizationInfo,GetRemotableMethodInfoPtr ) \
+ METHODTABLE_OPTIONAL_MEMBER(ContextStatics, PTR_ContextStaticsBucket, GetContextStaticsBucketPtr )
+#else
+#define METHODTABLE_REMOTING_OPTIONAL_MEMBERS()
+#endif
+
+ enum OptionalMemberId
+ {
+#undef METHODTABLE_OPTIONAL_MEMBER
+#define METHODTABLE_OPTIONAL_MEMBER(NAME, TYPE, GETTER) OptionalMember_##NAME,
+ METHODTABLE_OPTIONAL_MEMBERS()
+ OptionalMember_Count,
+
+ OptionalMember_First = OptionalMember_GenericsStaticsInfo,
+ };
+
+ FORCEINLINE DWORD GetOffsetOfOptionalMember(OptionalMemberId id);
+
+public:
+
+ //
+ // Public accessor helpers for the optional members of MethodTable
+ //
+
+#undef METHODTABLE_OPTIONAL_MEMBER
+#define METHODTABLE_OPTIONAL_MEMBER(NAME, TYPE, GETTER) \
+ inline DPTR(TYPE) GETTER() \
+ { \
+ LIMITED_METHOD_CONTRACT; \
+ STATIC_CONTRACT_SO_TOLERANT; \
+ _ASSERTE(Has##NAME()); \
+ return dac_cast<DPTR(TYPE)>(dac_cast<TADDR>(this) + GetOffsetOfOptionalMember(OptionalMember_##NAME)); \
+ }
+
+ METHODTABLE_OPTIONAL_MEMBERS()
+
+private:
+ inline DWORD GetStartOffsetOfOptionalMembers()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetOffsetOfOptionalMember(OptionalMember_First);
+ }
+
+ inline DWORD GetEndOffsetOfOptionalMembers()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetOffsetOfOptionalMember(OptionalMember_Count);
+ }
+
+ inline static DWORD GetOptionalMembersAllocationSize(
+ DWORD dwMultipurposeSlotsMask,
+ BOOL needsRemotableMethodInfo,
+ BOOL needsGenericsStaticsInfo,
+ BOOL needsGuidInfo,
+ BOOL needsCCWTemplate,
+ BOOL needsRCWPerTypeData,
+ BOOL needsRemotingVtsInfo,
+ BOOL needsContextStatic,
+ BOOL needsTokenOverflow);
+ inline DWORD GetOptionalMembersSize();
+
+ // The PerInstInfo is a (possibly empty) array of pointers to
+ // Instantiations/Dictionaries. This array comes after the optional members.
+ inline DWORD GetPerInstInfoSize();
+
+ // This is the size of the interface map chunk in the method table.
+ // If the MethodTable has a dynamic interface map then the size includes the pointer
+ // that stores the extra info for that map.
+ // The interface map itself comes after the PerInstInfo (if any)
+ inline DWORD GetInterfaceMapSize();
+
+ // The instantiation/dictionary comes at the end of the MethodTable after
+ // the interface map.
+ inline DWORD GetInstAndDictSize();
+
+private:
+ // Helper template to compute the offsets at compile time
+ template<int mask>
+ struct MultipurposeSlotOffset;
+
+ static const BYTE c_DispatchMapSlotOffsets[];
+ static const BYTE c_NonVirtualSlotsOffsets[];
+ static const BYTE c_ModuleOverrideOffsets[];
+
+ static const BYTE c_OptionalMembersStartOffsets[]; // total sizes of optional slots
+
+ TADDR GetMultipurposeSlotPtr(WFLAGS2_ENUM flag, const BYTE * offsets);
+
+ void SetMultipurposeSlotsMask(DWORD dwMask)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE((m_wFlags2 & enum_flag_MultipurposeSlotsMask) == 0);
+ m_wFlags2 |= (WORD)dwMask;
+ }
+
+ BOOL HasModuleOverride()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetFlag(enum_flag_HasModuleOverride);
+ }
+
+ DPTR(RelativeFixupPointer<PTR_Module>) GetModuleOverridePtr()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return dac_cast<DPTR(RelativeFixupPointer<PTR_Module>)>(GetMultipurposeSlotPtr(enum_flag_HasModuleOverride, c_ModuleOverrideOffsets));
+ }
+
+#ifdef BINDER
+ void SetModule(PTR_Module pModule);
+#else
+ void SetModule(Module * pModule);
+#endif
+
+ /************************************
+ //
+ // CONTEXT STATIC
+ //
+ ************************************/
+
+public:
+#ifdef FEATURE_REMOTING
+ inline BOOL HasContextStatics();
+ inline void SetHasContextStatics();
+
+ inline PTR_ContextStaticsBucket GetContextStaticsBucket()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(HasContextStatics());
+ PTR_ContextStaticsBucket pBucket = *GetContextStaticsBucketPtr();
+ _ASSERTE(pBucket != NULL);
+ return pBucket;
+ }
+
+ inline DWORD GetContextStaticsOffset();
+ inline WORD GetContextStaticsSize();
+
+ void SetupContextStatics(AllocMemTracker *pamTracker, WORD dwContextStaticsSize);
+ DWORD AllocateContextStaticsOffset();
+#endif
+
+ BOOL Validate ();
+
+#ifdef FEATURE_READYTORUN_COMPILER
+ //
+ // Is field layout in this type fixed within the current version bubble?
+ // This check does not take the inheritance chain into account.
+ //
+ BOOL IsLayoutFixedInCurrentVersionBubble();
+
+ //
+ // Is field layout of the inheritance chain fixed within the current version bubble?
+ //
+ BOOL IsInheritanceChainLayoutFixedInCurrentVersionBubble();
+#endif
+
+}; // class MethodTable
+
+#if defined(FEATURE_COMINTEROP) && !defined(DACCESS_COMPILE)
+WORD GetEquivalentMethodSlot(MethodTable * pOldMT, MethodTable * pNewMT, WORD wMTslot, BOOL *pfFound);
+#endif // defined(FEATURE_COMINTEROP) && !defined(DACCESS_COMPILE)
+
+MethodTable* CreateMinimalMethodTable(Module* pContainingModule,
+ LoaderHeap* pCreationHeap,
+ AllocMemTracker* pamTracker);
+
+#endif // !_METHODTABLE_H_
diff --git a/src/vm/methodtable.inl b/src/vm/methodtable.inl
new file mode 100644
index 0000000000..a993556db6
--- /dev/null
+++ b/src/vm/methodtable.inl
@@ -0,0 +1,1911 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: methodtable.inl
+//
+
+
+//
+
+//
+// ============================================================================
+
+#ifndef _METHODTABLE_INL_
+#define _METHODTABLE_INL_
+
+#include "methodtable.h"
+#include "genericdict.h"
+#include "threadstatics.h"
+
+//==========================================================================================
+inline PTR_EEClass MethodTable::GetClass_NoLogging()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+#ifdef _DEBUG
+ LowBits lowBits = union_getLowBits(m_pCanonMT);
+ if (lowBits == UNION_EECLASS)
+ {
+ return PTR_EEClass(m_pCanonMT);
+ }
+ else if (lowBits == UNION_METHODTABLE)
+ {
+ // pointer to canonical MethodTable.
+ TADDR canonicalMethodTable = union_getPointer(m_pCanonMT);
+ return PTR_EEClass(PTR_MethodTable(canonicalMethodTable)->m_pCanonMT);
+ }
+#ifdef FEATURE_PREJIT
+ else if (lowBits == UNION_INDIRECTION)
+ {
+ // pointer to indirection cell that points to canonical MethodTable
+ TADDR canonicalMethodTable = *PTR_TADDR(union_getPointer(m_pCanonMT));
+ return PTR_EEClass(PTR_MethodTable(canonicalMethodTable)->m_pCanonMT);
+ }
+#endif
+#ifdef DACCESS_COMPILE
+ // Minidumps don't guarantee that every member of every class will be able to work here.
+#else
+ _ASSERTE(!"Malformed m_pEEClass in MethodTable");
+#endif
+ return NULL;
+
+#else
+
+ TADDR addr = m_pCanonMT;
+
+ if ((addr & 2) == 0)
+ {
+ // pointer to EEClass
+ return PTR_EEClass(addr);
+ }
+
+#ifdef FEATURE_PREJIT
+ if ((addr & 1) != 0)
+ {
+ // pointer to indirection cell that points to canonical MethodTable
+ TADDR canonicalMethodTable = *PTR_TADDR(addr - 3);
+ return PTR_EEClass(PTR_MethodTable(canonicalMethodTable)->m_pCanonMT);
+ }
+#endif
+
+ // pointer to canonical MethodTable.
+ return PTR_EEClass(PTR_MethodTable(addr - 2)->m_pCanonMT);
+#endif
+}
+
+//==========================================================================================
+inline PTR_EEClass MethodTable::GetClass()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE_IMPL(!IsAsyncPinType());
+ _ASSERTE_IMPL(GetClass_NoLogging() != NULL);
+
+ g_IBCLogger.LogEEClassAndMethodTableAccess(this);
+ return GetClass_NoLogging();
+}
+
+//==========================================================================================
+inline Assembly * MethodTable::GetAssembly()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetModule()->GetAssembly();
+}
+
+//==========================================================================================
+// DO NOT ADD ANY ASSERTS OR ANY OTHER CODE TO THIS METHOD.
+// DO NOT USE THIS METHOD.
+// Yes folks, for better or worse the debugger pokes supposed object addresses
+// to try to see if objects are valid, possibly firing an AccessViolation or
+// worse. Thus it is "correct" behaviour for this to AV, and incorrect
+// behaviour for it to assert if called on an invalid pointer.
+inline PTR_EEClass MethodTable::GetClassWithPossibleAV()
+{
+ CANNOT_HAVE_CONTRACT;
+ SUPPORTS_DAC;
+ return GetClass_NoLogging();
+}
+
+//==========================================================================================
+inline BOOL MethodTable::IsClassPointerValid()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ LowBits lowBits = union_getLowBits(m_pCanonMT);
+ if (lowBits == UNION_EECLASS)
+ {
+ return (m_pEEClass != NULL);
+ }
+ else if (lowBits == UNION_METHODTABLE)
+ {
+ // pointer to canonical MethodTable.
+ TADDR canonicalMethodTable = union_getPointer(m_pCanonMT);
+ return (PTR_MethodTable(canonicalMethodTable)->m_pEEClass != NULL);
+ }
+#ifdef FEATURE_PREJIT
+ else if (lowBits == UNION_INDIRECTION)
+ {
+ // pointer to indirection cell that points to canonical MethodTable
+ TADDR canonicalMethodTable = *PTR_TADDR(union_getPointer(m_pCanonMT));
+ if (CORCOMPILE_IS_POINTER_TAGGED(canonicalMethodTable))
+ return FALSE;
+ return (PTR_MethodTable(canonicalMethodTable)->m_pEEClass != NULL);
+ }
+#endif
+ _ASSERTE(!"Malformed m_pEEClass in MethodTable");
+ return FALSE;
+}
+
+//==========================================================================================
+// Does this immediate item live in an NGEN module?
+inline BOOL MethodTable::IsZapped()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+#ifdef FEATURE_PREJIT
+ return GetFlag(enum_flag_IsZapped);
+#else
+ return FALSE;
+#endif
+}
+
+//==========================================================================================
+// For types that are part of an ngen-ed assembly this gets the
+// Module* that contains this methodtable.
+inline PTR_Module MethodTable::GetZapModule()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ PTR_Module zapModule = NULL;
+ if (IsZapped())
+ {
+ zapModule = m_pLoaderModule;
+ }
+
+ return zapModule;
+}
+
+//==========================================================================================
+inline PTR_Module MethodTable::GetLoaderModule()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pLoaderModule;
+}
+
+inline PTR_LoaderAllocator MethodTable::GetLoaderAllocator()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetLoaderModule()->GetLoaderAllocator();
+}
+
+
+
+#ifndef DACCESS_COMPILE
+//==========================================================================================
+inline void MethodTable::SetLoaderModule(Module* pModule)
+{
+ WRAPPER_NO_CONTRACT;
+ m_pLoaderModule = pModule;
+}
+
+inline void MethodTable::SetLoaderAllocator(LoaderAllocator* pAllocator)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(pAllocator == GetLoaderAllocator());
+
+ if (pAllocator->Id()->IsCollectible())
+ {
+ SetFlag(enum_flag_Collectible);
+ }
+}
+
+#endif
+
+//==========================================================================================
+inline WORD MethodTable::GetNumNonVirtualSlots()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return HasNonVirtualSlots() ? GetClass()->GetNumNonVirtualSlots() : 0;
+}
+
+//==========================================================================================
+inline WORD MethodTable::GetNumInstanceFields()
+{
+ WRAPPER_NO_CONTRACT;
+ return (GetClass()->GetNumInstanceFields());
+}
+
+//==========================================================================================
+inline WORD MethodTable::GetNumStaticFields()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (GetClass()->GetNumStaticFields());
+}
+
+//==========================================================================================
+inline WORD MethodTable::GetNumThreadStaticFields()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (GetClass()->GetNumThreadStaticFields());
+}
+
+//==========================================================================================
+inline DWORD MethodTable::GetNumInstanceFieldBytes()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return(GetBaseSize() - GetClass()->GetBaseSizePadding());
+}
+
+//==========================================================================================
+inline WORD MethodTable::GetNumIntroducedInstanceFields()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ WORD wNumFields = GetNumInstanceFields();
+
+ MethodTable * pParentMT = GetParentMethodTable();
+ if (pParentMT != NULL)
+ {
+ WORD wNumParentFields = pParentMT->GetNumInstanceFields();
+
+ // If this assert fires, then our bookkeeping is bad. Perhaps we incremented the count
+ // of fields on the base class w/o incrementing the count in the derived class. (EnC scenarios).
+ _ASSERTE(wNumFields >= wNumParentFields);
+
+ wNumFields -= wNumParentFields;
+ }
+
+ return(wNumFields);
+}
+
+//==========================================================================================
+inline DWORD MethodTable::GetAlignedNumInstanceFieldBytes()
+{
+ WRAPPER_NO_CONTRACT;
+ return((GetNumInstanceFieldBytes() + 3) & (~3));
+}
+
+//==========================================================================================
+inline PTR_FieldDesc MethodTable::GetApproxFieldDescListRaw()
+{
+ WRAPPER_NO_CONTRACT;
+ // Careful about using this method. If it's possible that fields may have been added via EnC, then
+ // must use the FieldDescIterator as any fields added via EnC won't be in the raw list
+
+ return GetClass()->GetFieldDescList();
+}
+
+#ifdef FEATURE_COMINTEROP
+//==========================================================================================
+inline DWORD MethodTable::IsComClassInterface()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetClass()->IsComClassInterface();
+}
+
+//==========================================================================================
+inline DWORD MethodTable::IsComImport()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetClass()->IsComImport();
+}
+
+//==========================================================================================
+// Sparse VTables. These require a SparseVTableMap in the EEClass in
+// order to record how the CLR's vtable slots map across to COM
+// Interop slots.
+//
+inline int MethodTable::IsSparseForCOMInterop()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetClass()->IsSparseForCOMInterop();
+}
+
+//==========================================================================================
+inline int MethodTable::IsComEventItfType()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(GetClass());
+ return GetClass()->IsComEventItfType();
+}
+
+#endif // FEATURE_COMINTEROP
+
+//==========================================================================================
+inline DWORD MethodTable::GetAttrClass()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetClass()->GetAttrClass();
+}
+
+//==========================================================================================
+inline BOOL MethodTable::IsSerializable()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetClass()->IsSerializable();
+}
+
+//==========================================================================================
+inline BOOL MethodTable::ContainsStackPtr()
+{
+ WRAPPER_NO_CONTRACT;
+ return (this == g_ArgumentHandleMT ||
+ this == g_ArgIteratorMT ||
+ this == g_TypedReferenceMT);
+}
+
+//==========================================================================================
+inline BOOL MethodTable::SupportsGenericInterop(TypeHandle::InteropKind interopKind,
+ MethodTable::Mode mode /*= modeAll*/)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef FEATURE_COMINTEROP
+ return ((IsInterface() || IsDelegate()) && // interface or delegate
+ HasInstantiation() && // generic
+ !IsSharedByGenericInstantiations() && // unshared
+ !ContainsGenericVariables() && // closed over concrete types
+ // defined in .winmd or one of the redirected mscorlib interfaces
+ ((((mode & modeProjected) != 0) && IsProjectedFromWinRT()) ||
+ (((mode & modeRedirected) != 0) && (IsWinRTRedirectedInterface(interopKind) || IsWinRTRedirectedDelegate()))));
+#else // FEATURE_COMINTEROP
+ return FALSE;
+#endif // FEATURE_COMINTEROP
+}
+
+#ifdef FEATURE_REMOTING
+//==========================================================================================
+inline BOOL MethodTable::CannotBeBlittedByObjectCloner()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetClass()->CannotBeBlittedByObjectCloner();
+}
+#endif
+
+//==========================================================================================
+inline BOOL MethodTable::IsNotTightlyPacked()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetClass()->IsNotTightlyPacked();
+}
+
+//==========================================================================================
+inline BOOL MethodTable::HasFieldsWhichMustBeInited()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetClass()->HasFieldsWhichMustBeInited();
+}
+
+//==========================================================================================
+inline BOOL MethodTable::SupportsAutoNGen()
+{
+ LIMITED_METHOD_CONTRACT;
+#ifndef FEATURE_CORECLR
+ return GetAssembly()->SupportsAutoNGen();
+#else
+ return FALSE;
+#endif
+}
+
+//==========================================================================================
+inline BOOL MethodTable::RunCCTorAsIfNGenImageExists()
+{
+ LIMITED_METHOD_CONTRACT;
+#ifndef FEATURE_CORECLR
+ return this->SupportsAutoNGen();
+#else
+#ifdef FEATURE_CORESYSTEM
+ return TRUE; // On our coresystem builds we will always be using triton in the customer scenario.
+#else
+ return FALSE;
+#endif
+#endif
+}
+
+//==========================================================================================
+inline BOOL MethodTable::IsAbstract()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetClass()->IsAbstract();
+}
+
+//==========================================================================================
+#ifdef FEATURE_REMOTING
+inline BOOL MethodTable::HasRemotableMethodInfo()
+{
+ WRAPPER_NO_CONTRACT;
+ return (IsMarshaledByRef() || IsInterface() || this == g_pObjectClass || g_pObjectClass == NULL) && IsCanonicalMethodTable();
+}
+
+//==========================================================================================
+inline void MethodTable::SetHasRemotingVtsInfo()
+{
+ LIMITED_METHOD_CONTRACT;
+ SetFlag(enum_flag_HasRemotingVtsInfo);
+}
+
+//==========================================================================================
+inline BOOL MethodTable::HasRemotingVtsInfo()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetFlag(enum_flag_HasRemotingVtsInfo);
+}
+
+//==========================================================================================
+inline PTR_RemotingVtsInfo MethodTable::GetRemotingVtsInfo()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(HasRemotingVtsInfo());
+ return *GetRemotingVtsInfoPtr();
+}
+#endif // FEATURE_REMOTING
+
+#ifdef FEATURE_COMINTEROP
+//==========================================================================================
+inline void MethodTable::SetHasGuidInfo()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(IsInterface() || (HasCCWTemplate() && IsDelegate()));
+
+ // for delegates, having CCW template implies having GUID info
+ if (IsInterface())
+ SetFlag(enum_flag_IfInterfaceThenHasGuidInfo);
+}
+
+//==========================================================================================
+inline BOOL MethodTable::HasGuidInfo()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (IsInterface())
+ return GetFlag(enum_flag_IfInterfaceThenHasGuidInfo);
+
+ // HasCCWTemplate() is intentionally checked first here to avoid hitting
+ // g_pMulticastDelegateClass == NULL inside IsDelegate() during startup
+ return HasCCWTemplate() && IsDelegate();
+}
+
+//==========================================================================================
+// True IFF the type has a GUID explicitly assigned to it (including WinRT generic interfaces
+// where the GUID is computed).
+inline BOOL MethodTable::HasExplicitGuid()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ GUID guid;
+ GetGuid(&guid, FALSE);
+ return (guid != GUID_NULL);
+}
+
+//==========================================================================================
+inline void MethodTable::SetHasCCWTemplate()
+{
+ LIMITED_METHOD_CONTRACT;
+ SetFlag(enum_flag_HasCCWTemplate);
+}
+
+//==========================================================================================
+inline BOOL MethodTable::HasCCWTemplate()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetFlag(enum_flag_HasCCWTemplate);
+}
+
+//==========================================================================================
+inline void MethodTable::SetHasRCWPerTypeData()
+{
+ LIMITED_METHOD_CONTRACT;
+ SetFlag(enum_flag_HasRCWPerTypeData);
+}
+
+//==========================================================================================
+inline BOOL MethodTable::HasRCWPerTypeData()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetFlag(enum_flag_HasRCWPerTypeData);
+}
+
+//==========================================================================================
+// Get the GUID used for WinRT interop
+// * if the type is not a WinRT type or a redirected interfae return FALSE
+inline BOOL MethodTable::GetGuidForWinRT(GUID *pGuid)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ BOOL bRes = FALSE;
+ if ((IsProjectedFromWinRT() && !HasInstantiation()) ||
+ (SupportsGenericInterop(TypeHandle::Interop_NativeToManaged) && IsLegalNonArrayWinRTType()))
+ {
+ bRes = SUCCEEDED(GetGuidNoThrow(pGuid, TRUE, FALSE));
+ }
+
+ return bRes;
+}
+
+
+#endif // FEATURE_COMINTEROP
+
+//==========================================================================================
+// The following two methods produce correct results only if this type is
+// marked Serializable (verified by assert in checked builds) and the field
+// in question was introduced in this type (the index is the FieldDesc
+// index).
+inline BOOL MethodTable::IsFieldNotSerialized(DWORD dwFieldIndex)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(IsSerializable());
+#ifdef FEATURE_REMOTING
+ if (!HasRemotingVtsInfo())
+ return FALSE;
+
+ return GetRemotingVtsInfo()->IsNotSerialized(dwFieldIndex);
+#else
+ return FALSE;
+#endif
+}
+
+//==========================================================================================
+inline BOOL MethodTable::IsFieldOptionallySerialized(DWORD dwFieldIndex)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(IsSerializable());
+#ifdef FEATURE_REMOTING
+ if (!HasRemotingVtsInfo())
+ return FALSE;
+
+ return GetRemotingVtsInfo()->IsOptionallySerialized(dwFieldIndex);
+#else
+ return FALSE;
+#endif
+}
+
+//==========================================================================================
+// Is pParentMT System.Enum? (Cannot be called before System.Enum is loaded.)
+inline BOOL MethodTable::IsEnum()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // We should not be calling this before our parent method table pointer
+ // is valid .
+ _ASSERTE_IMPL(IsParentMethodTablePointerValid());
+
+ PTR_MethodTable pParentMT = GetParentMethodTable();
+
+ // Make sure that we are not using this method during startup
+ _ASSERTE(g_pEnumClass != NULL);
+
+ return (pParentMT == g_pEnumClass);
+}
+
+//==========================================================================================
+// Is pParentMT either System.ValueType or System.Enum?
+inline BOOL MethodTable::IsValueType()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ g_IBCLogger.LogMethodTableAccess(this);
+
+ return GetFlag(enum_flag_Category_ValueType_Mask) == enum_flag_Category_ValueType;
+}
+
+//==========================================================================================
+inline CorElementType MethodTable::GetArrayElementType()
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE (IsArray());
+ return dac_cast<PTR_ArrayClass>(GetClass())->GetArrayElementType();
+}
+
+//==========================================================================================
+inline DWORD MethodTable::GetRank()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE (IsArray());
+ if (GetFlag(enum_flag_Category_IfArrayThenSzArray))
+ return 1; // ELEMENT_TYPE_SZARRAY
+ else
+ return dac_cast<PTR_ArrayClass>(GetClass())->GetRank();
+}
+
+//==========================================================================================
+inline BOOL MethodTable::IsTruePrimitive()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetFlag(enum_flag_Category_Mask) == enum_flag_Category_TruePrimitive;
+}
+
+//==========================================================================================
+inline void MethodTable::SetIsTruePrimitive()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ SetFlag(enum_flag_Category_TruePrimitive);
+}
+
+//==========================================================================================
+inline BOOL MethodTable::IsBlittable()
+{
+ WRAPPER_NO_CONTRACT;
+#ifndef DACCESS_COMPILE
+ _ASSERTE(GetClass());
+ return GetClass()->IsBlittable();
+#else // DACCESS_COMPILE
+ DacNotImpl();
+ return false;
+#endif // DACCESS_COMPILE
+}
+
+//==========================================================================================
+inline BOOL MethodTable::HasClassConstructor()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetFlag(enum_flag_HasCctor);
+}
+
+//==========================================================================================
+inline void MethodTable::SetHasClassConstructor()
+{
+ WRAPPER_NO_CONTRACT;
+ return SetFlag(enum_flag_HasCctor);
+}
+
+//==========================================================================================
+inline WORD MethodTable::GetClassConstructorSlot()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(HasClassConstructor());
+
+ // The class constructor slot is the first non-vtable slot
+ return GetNumVirtuals();
+}
+
+//==========================================================================================
+inline BOOL MethodTable::HasDefaultConstructor()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetFlag(enum_flag_HasDefaultCtor);
+}
+
+//==========================================================================================
+inline void MethodTable::SetHasDefaultConstructor()
+{
+ WRAPPER_NO_CONTRACT;
+ return SetFlag(enum_flag_HasDefaultCtor);
+}
+
+//==========================================================================================
+inline WORD MethodTable::GetDefaultConstructorSlot()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(HasDefaultConstructor());
+
+ // The default ctor slot is right after cctor slot if there is one
+ return GetNumVirtuals() + (HasClassConstructor() ? 1 : 0);
+}
+
+//==========================================================================================
+inline BOOL MethodTable::HasLayout()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(GetClass());
+ return GetClass()->HasLayout();
+}
+
+//==========================================================================================
+inline MethodDesc* MethodTable::GetMethodDescForSlot(DWORD slot)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PCODE pCode = GetRestoredSlot(slot);
+
+ // This is an optimization that we can take advantage of if we're trying
+ // to get the MethodDesc for an interface virtual, since their slots
+ // always point to the stub.
+ if (IsInterface() && slot < GetNumVirtuals())
+ {
+ return MethodDesc::GetMethodDescFromStubAddr(pCode);
+ }
+
+ return MethodTable::GetMethodDescForSlotAddress(pCode);
+}
+
+#ifndef DACCESS_COMPILE
+
+//==========================================================================================
+inline INT32 MethodTable::MethodIterator::GetNumMethods() const
+{
+ LIMITED_METHOD_CONTRACT;
+ // assert that number of methods hasn't changed during the iteration
+ CONSISTENCY_CHECK( m_pMethodData->GetNumMethods() == static_cast< UINT32 >( m_iMethods ) );
+ return m_iMethods;
+}
+
+//==========================================================================================
+// Returns TRUE if it's valid to request data from the current position
+inline BOOL MethodTable::MethodIterator::IsValid() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_iCur >= 0 && m_iCur < GetNumMethods();
+}
+
+//==========================================================================================
+inline BOOL MethodTable::MethodIterator::MoveTo(UINT32 idx)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_iCur = (INT32)idx;
+ return IsValid();
+}
+
+//==========================================================================================
+inline BOOL MethodTable::MethodIterator::Prev()
+{
+ WRAPPER_NO_CONTRACT;
+ if (IsValid())
+ --m_iCur;
+ return (IsValid());
+}
+
+//==========================================================================================
+inline BOOL MethodTable::MethodIterator::Next()
+{
+ WRAPPER_NO_CONTRACT;
+ if (IsValid())
+ ++m_iCur;
+ return (IsValid());
+}
+
+//==========================================================================================
+inline void MethodTable::MethodIterator::MoveToBegin()
+{
+ WRAPPER_NO_CONTRACT;
+ m_iCur = 0;
+}
+
+//==========================================================================================
+inline void MethodTable::MethodIterator::MoveToEnd()
+{
+ WRAPPER_NO_CONTRACT;
+ m_iCur = GetNumMethods() - 1;
+}
+
+//==========================================================================================
+inline UINT32 MethodTable::MethodIterator::GetSlotNumber() const {
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(IsValid());
+ return (UINT32)m_iCur;
+}
+
+//==========================================================================================
+inline UINT32 MethodTable::MethodIterator::GetImplSlotNumber() const {
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(IsValid());
+ return (UINT32)m_pMethodData->GetImplSlotNumber(m_iCur);
+}
+
+//==========================================================================================
+inline BOOL MethodTable::MethodIterator::IsVirtual() const {
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(IsValid());
+ return m_iCur < (INT32)(GetNumVirtuals());
+}
+
+//==========================================================================================
+inline UINT32 MethodTable::MethodIterator::GetNumVirtuals() const {
+ LIMITED_METHOD_CONTRACT;
+ return m_pMethodData->GetNumVirtuals();;
+}
+
+//==========================================================================================
+inline DispatchSlot MethodTable::MethodIterator::GetTarget() const {
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(IsValid());
+ return m_pMethodData->GetImplSlot(m_iCur);
+}
+
+//==========================================================================================
+inline MethodDesc *MethodTable::MethodIterator::GetMethodDesc() const {
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(IsValid());
+ MethodDesc *pMD = m_pMethodData->GetImplMethodDesc(m_iCur);
+ CONSISTENCY_CHECK(CheckPointer(pMD));
+ return pMD;
+}
+
+//==========================================================================================
+inline MethodDesc *MethodTable::MethodIterator::GetDeclMethodDesc() const {
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(IsValid());
+ MethodDesc *pMD = m_pMethodData->GetDeclMethodDesc(m_iCur);
+ CONSISTENCY_CHECK(CheckPointer(pMD));
+ CONSISTENCY_CHECK(pMD->GetSlot() == GetSlotNumber());
+ return pMD;
+}
+
+#endif // DACCESS_COMPILE
+
+//==========================================================================================
+// Non-canonical types share the method bodies with the canonical type. So the canonical
+// type can be said to own the method bodies. Hence, by default, IntroducedMethodIterator
+// only lets you iterate methods of the canonical type. You have to pass in
+// restrictToCanonicalTypes=FALSE to iterate methods through a non-canonical type.
+
+inline MethodTable::IntroducedMethodIterator::IntroducedMethodIterator(
+ MethodTable *pMT,
+ BOOL restrictToCanonicalTypes /* = TRUE */ )
+{
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(pMT->IsCanonicalMethodTable() || !restrictToCanonicalTypes);
+
+ SetChunk(pMT->GetClass()->GetChunks());
+}
+
+//==========================================================================================
+FORCEINLINE BOOL MethodTable::IntroducedMethodIterator::Next()
+{
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(IsValid());
+
+ // Check whether the next MethodDesc is still within the bounds of the current chunk
+ TADDR pNext = dac_cast<TADDR>(m_pMethodDesc) + m_pMethodDesc->SizeOf();
+
+ if (pNext < m_pChunkEnd)
+ {
+ // Just skip to the next method in the same chunk
+ m_pMethodDesc = PTR_MethodDesc(pNext);
+ }
+ else
+ {
+ _ASSERTE(pNext == m_pChunkEnd);
+
+ // We have walked all the methods in the current chunk. Move on
+ // to the next chunk.
+ SetChunk(m_pChunk->GetNextChunk());
+ }
+
+ return IsValid();
+}
+
+//==========================================================================================
+inline BOOL MethodTable::IntroducedMethodIterator::IsValid() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pMethodDesc != NULL;
+}
+
+//==========================================================================================
+inline MethodDesc * MethodTable::IntroducedMethodIterator::GetMethodDesc() const
+{
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(IsValid());
+ return m_pMethodDesc;
+}
+
+//==========================================================================================
+inline DWORD MethodTable::GetIndexOfVtableIndirection(DWORD slotNum)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE((1 << VTABLE_SLOTS_PER_CHUNK_LOG2) == VTABLE_SLOTS_PER_CHUNK);
+
+ return slotNum >> VTABLE_SLOTS_PER_CHUNK_LOG2;
+}
+
+//==========================================================================================
+inline DWORD MethodTable::GetStartSlotForVtableIndirection(UINT32 indirectionIndex, DWORD wNumVirtuals)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(indirectionIndex < GetNumVtableIndirections(wNumVirtuals));
+
+ return indirectionIndex * VTABLE_SLOTS_PER_CHUNK;
+}
+
+//==========================================================================================
+inline DWORD MethodTable::GetEndSlotForVtableIndirection(UINT32 indirectionIndex, DWORD wNumVirtuals)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(indirectionIndex < GetNumVtableIndirections(wNumVirtuals));
+
+ DWORD end = (indirectionIndex + 1) * VTABLE_SLOTS_PER_CHUNK;
+
+ if (end > wNumVirtuals)
+ {
+ end = wNumVirtuals;
+ }
+
+ return end;
+}
+
+//==========================================================================================
+inline UINT32 MethodTable::GetIndexAfterVtableIndirection(UINT32 slotNum)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE((1 << VTABLE_SLOTS_PER_CHUNK_LOG2) == VTABLE_SLOTS_PER_CHUNK);
+
+ return (slotNum & (VTABLE_SLOTS_PER_CHUNK - 1));
+}
+
+//==========================================================================================
+inline DWORD MethodTable::GetNumVtableIndirections(DWORD wNumVirtuals)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE((1 << VTABLE_SLOTS_PER_CHUNK_LOG2) == VTABLE_SLOTS_PER_CHUNK);
+
+ return (wNumVirtuals + (VTABLE_SLOTS_PER_CHUNK - 1)) >> VTABLE_SLOTS_PER_CHUNK_LOG2;
+}
+
+//==========================================================================================
+inline PTR_PTR_PCODE MethodTable::GetVtableIndirections()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return dac_cast<PTR_PTR_PCODE>(dac_cast<TADDR>(this) + sizeof(MethodTable));
+}
+
+//==========================================================================================
+inline DWORD MethodTable::GetNumVtableIndirections()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return GetNumVtableIndirections(GetNumVirtuals_NoLogging());
+}
+
+//==========================================================================================
+inline MethodTable::VtableIndirectionSlotIterator::VtableIndirectionSlotIterator(MethodTable *pMT)
+ : m_pSlot(pMT->GetVtableIndirections()),
+ m_i((DWORD) -1),
+ m_count(pMT->GetNumVtableIndirections()),
+ m_pMT(pMT)
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+//==========================================================================================
+inline MethodTable::VtableIndirectionSlotIterator::VtableIndirectionSlotIterator(MethodTable *pMT, DWORD index)
+ : m_pSlot(pMT->GetVtableIndirections() + index),
+ m_i(index),
+ m_count(pMT->GetNumVtableIndirections()),
+ m_pMT(pMT)
+{
+ WRAPPER_NO_CONTRACT;
+ PRECONDITION(index != (DWORD) -1 && index < m_count);
+}
+
+//==========================================================================================
+inline BOOL MethodTable::VtableIndirectionSlotIterator::Next()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ PRECONDITION(!Finished());
+ if (m_i != (DWORD) -1)
+ m_pSlot++;
+ return (++m_i < m_count);
+}
+
+//==========================================================================================
+inline BOOL MethodTable::VtableIndirectionSlotIterator::Finished()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (m_i == m_count);
+}
+
+//==========================================================================================
+inline DWORD MethodTable::VtableIndirectionSlotIterator::GetIndex()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_i;
+}
+
+//==========================================================================================
+inline DWORD MethodTable::VtableIndirectionSlotIterator::GetOffsetFromMethodTable()
+{
+ WRAPPER_NO_CONTRACT;
+ PRECONDITION(m_i != (DWORD) -1 && m_i < m_count);
+
+ return GetVtableOffset() + sizeof(PTR_PCODE) * m_i;
+}
+
+//==========================================================================================
+inline PTR_PCODE MethodTable::VtableIndirectionSlotIterator::GetIndirectionSlot()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ PRECONDITION(m_i != (DWORD) -1 && m_i < m_count);
+
+ return *m_pSlot;
+}
+
+//==========================================================================================
+#ifndef DACCESS_COMPILE
+inline void MethodTable::VtableIndirectionSlotIterator::SetIndirectionSlot(PTR_PCODE pChunk)
+{
+ LIMITED_METHOD_CONTRACT;
+ *m_pSlot = pChunk;
+}
+#endif
+
+//==========================================================================================
+inline DWORD MethodTable::VtableIndirectionSlotIterator::GetStartSlot()
+{
+ WRAPPER_NO_CONTRACT;
+ PRECONDITION(m_i != (DWORD) -1 && m_i < m_count);
+
+ return GetStartSlotForVtableIndirection(m_i, m_pMT->GetNumVirtuals());
+}
+
+//==========================================================================================
+inline DWORD MethodTable::VtableIndirectionSlotIterator::GetEndSlot()
+{
+ WRAPPER_NO_CONTRACT;
+ PRECONDITION(m_i != (DWORD) -1 && m_i < m_count);
+
+ return GetEndSlotForVtableIndirection(m_i, m_pMT->GetNumVirtuals());
+}
+
+//==========================================================================================
+inline DWORD MethodTable::VtableIndirectionSlotIterator::GetNumSlots()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return GetEndSlot() - GetStartSlot();
+}
+
+//==========================================================================================
+inline DWORD MethodTable::VtableIndirectionSlotIterator::GetSize()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return GetNumSlots() * sizeof(PCODE);
+}
+
+//==========================================================================================
+// Create a new iterator over the vtable indirection slots
+// The iterator starts just before the first item
+inline MethodTable::VtableIndirectionSlotIterator MethodTable::IterateVtableIndirectionSlots()
+{
+ WRAPPER_NO_CONTRACT;
+ return VtableIndirectionSlotIterator(this);
+}
+
+//==========================================================================================
+// Create a new iterator over the vtable indirection slots, starting at the index specified
+inline MethodTable::VtableIndirectionSlotIterator MethodTable::IterateVtableIndirectionSlotsFrom(DWORD index)
+{
+ WRAPPER_NO_CONTRACT;
+ return VtableIndirectionSlotIterator(this, index);
+}
+
+#ifndef DACCESS_COMPILE
+#ifdef FEATURE_COMINTEROP
+
+//==========================================================================================
+inline ComCallWrapperTemplate *MethodTable::GetComCallWrapperTemplate()
+{
+ LIMITED_METHOD_CONTRACT;
+ if (HasCCWTemplate())
+ {
+ return *GetCCWTemplatePtr();
+ }
+ return GetClass()->GetComCallWrapperTemplate();
+}
+
+//==========================================================================================
+inline BOOL MethodTable::SetComCallWrapperTemplate(ComCallWrapperTemplate *pTemplate)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (HasCCWTemplate())
+ {
+ TypeHandle th(this);
+ g_IBCLogger.LogTypeMethodTableWriteableAccess(&th);
+ return (InterlockedCompareExchangeT(EnsureWritablePages(GetCCWTemplatePtr()), pTemplate, NULL) == NULL);
+ }
+ g_IBCLogger.LogEEClassCOWTableAccess(this);
+ return GetClass_NoLogging()->SetComCallWrapperTemplate(pTemplate);
+}
+
+#ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+//==========================================================================================
+inline ClassFactoryBase *MethodTable::GetComClassFactory()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetClass()->GetComClassFactory();
+}
+
+//==========================================================================================
+inline BOOL MethodTable::SetComClassFactory(ClassFactoryBase *pFactory)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ g_IBCLogger.LogEEClassCOWTableAccess(this);
+ return GetClass_NoLogging()->SetComClassFactory(pFactory);
+}
+#endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+#endif // FEATURE_COMINTEROP
+#endif // DACCESS_COMPILE
+
+#ifdef FEATURE_COMINTEROP
+//==========================================================================================
+inline BOOL MethodTable::IsProjectedFromWinRT()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(GetClass());
+ return GetClass()->IsProjectedFromWinRT();
+}
+
+//==========================================================================================
+inline BOOL MethodTable::IsExportedToWinRT()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(GetClass());
+ return GetClass()->IsExportedToWinRT();
+}
+
+//==========================================================================================
+inline BOOL MethodTable::IsWinRTDelegate()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (IsProjectedFromWinRT() && IsDelegate()) || IsWinRTRedirectedDelegate();
+}
+
+#else // FEATURE_COMINTEROP
+
+//==========================================================================================
+inline BOOL MethodTable::IsProjectedFromWinRT()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return FALSE;
+}
+
+//==========================================================================================
+inline BOOL MethodTable::IsExportedToWinRT()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return FALSE;
+}
+
+//==========================================================================================
+inline BOOL MethodTable::IsWinRTDelegate()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return FALSE;
+}
+
+#endif // FEATURE_COMINTEROP
+
+//==========================================================================================
+inline UINT32 MethodTable::GetNativeSize()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(GetClass());
+ return GetClass()->GetNativeSize();
+}
+
+//==========================================================================================
+inline PTR_MethodTable MethodTable::GetCanonicalMethodTable()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+#ifdef _DEBUG
+ LowBits lowBits = union_getLowBits(m_pCanonMT);
+ if (lowBits == UNION_EECLASS)
+ {
+ return dac_cast<PTR_MethodTable>(this);
+ }
+ else if (lowBits == UNION_METHODTABLE)
+ {
+ // pointer to canonical MethodTable.
+ return PTR_MethodTable(union_getPointer(m_pCanonMT));
+ }
+#ifdef FEATURE_PREJIT
+ else if (lowBits == UNION_INDIRECTION)
+ {
+ return PTR_MethodTable(*PTR_TADDR(union_getPointer(m_pCanonMT)));
+ }
+#endif
+ _ASSERTE(!"Malformed m_pCanonMT in MethodTable");
+ return NULL;
+#else
+ TADDR addr = m_pCanonMT;
+
+ if ((addr & 2) == 0)
+ return dac_cast<PTR_MethodTable>(this);
+
+#ifdef FEATURE_PREJIT
+ if ((addr & 1) != 0)
+ return PTR_MethodTable(*PTR_TADDR(addr - 3));
+#endif
+
+ return PTR_MethodTable(addr - 2);
+#endif
+}
+
+//==========================================================================================
+inline TADDR MethodTable::GetCanonicalMethodTableFixup()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+#ifdef FEATURE_PREJIT
+ LowBits lowBits = union_getLowBits(m_pCanonMT);
+ if (lowBits == UNION_INDIRECTION)
+ {
+ // pointer to canonical MethodTable.
+ return *PTR_TADDR(union_getPointer(m_pCanonMT));
+ }
+ else
+#endif
+ {
+ return NULL;
+ }
+}
+
+//==========================================================================================
+#ifndef DACCESS_COMPILE
+FORCEINLINE BOOL MethodTable::IsEquivalentTo(MethodTable *pOtherMT COMMA_INDEBUG(TypeHandlePairList *pVisited /*= NULL*/))
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (this == pOtherMT)
+ return TRUE;
+
+#ifdef FEATURE_TYPEEQUIVALENCE
+ // bail early for normal types
+ if (!HasTypeEquivalence() || !pOtherMT->HasTypeEquivalence())
+ return FALSE;
+
+ if (IsEquivalentTo_Worker(pOtherMT COMMA_INDEBUG(pVisited)))
+ return TRUE;
+#endif // FEATURE_TYPEEQUIVALENCE
+
+ return FALSE;
+}
+#endif
+
+//==========================================================================================
+inline IMDInternalImport* MethodTable::GetMDImport()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetModule()->GetMDImport();
+}
+
+//==========================================================================================
+inline BOOL MethodTable::IsSealed()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetClass()->IsSealed();
+}
+
+//==========================================================================================
+inline BOOL MethodTable::IsManagedSequential()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetClass()->IsManagedSequential();
+}
+
+//==========================================================================================
+inline BOOL MethodTable::HasExplicitSize()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetClass()->HasExplicitSize();
+}
+
+//==========================================================================================
+inline DWORD MethodTable::GetPerInstInfoSize()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetNumDicts() * sizeof(TypeHandle*);
+}
+
+//==========================================================================================
+inline EEClassLayoutInfo *MethodTable::GetLayoutInfo()
+{
+ LIMITED_METHOD_CONTRACT;
+ PRECONDITION(HasLayout());
+ return GetClass()->GetLayoutInfo();
+}
+
+//==========================================================================================
+// These come after the pointers to the generic dictionaries (if any)
+inline DWORD MethodTable::GetInterfaceMapSize()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ DWORD cbIMap = GetNumInterfaces() * sizeof(InterfaceInfo_t);
+#ifdef FEATURE_COMINTEROP
+ cbIMap += (HasDynamicInterfaceMap() ? sizeof(DWORD_PTR) : 0);
+#endif
+ return cbIMap;
+}
+
+//==========================================================================================
+// These are the generic dictionaries themselves and are come after
+// the interface map. In principle they need not be inline in the method table.
+inline DWORD MethodTable::GetInstAndDictSize()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (!HasInstantiation())
+ return 0;
+ else
+ return DictionaryLayout::GetFirstDictionaryBucketSize(GetNumGenericArgs(), GetClass()->GetDictionaryLayout());
+}
+
+//==========================================================================================
+inline BOOL MethodTable::IsSharedByGenericInstantiations()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ g_IBCLogger.LogMethodTableAccess(this);
+
+ return TestFlagWithMask(enum_flag_GenericsMask, enum_flag_GenericsMask_SharedInst);
+}
+
+//==========================================================================================
+inline BOOL MethodTable::IsCanonicalMethodTable()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (union_getLowBits(m_pCanonMT) == UNION_EECLASS);
+}
+
+//==========================================================================================
+FORCEINLINE BOOL MethodTable::HasInstantiation()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // Generics flags cannot be expressed in terms of GetFlag()
+ return !TestFlagWithMask(enum_flag_GenericsMask, enum_flag_GenericsMask_NonGeneric);
+}
+
+//==========================================================================================
+inline void MethodTable::SetHasInstantiation(BOOL fTypicalInstantiation, BOOL fSharedByGenericInstantiations)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!IsStringOrArray());
+ SetFlag(fTypicalInstantiation ? enum_flag_GenericsMask_TypicalInst :
+ (fSharedByGenericInstantiations ? enum_flag_GenericsMask_SharedInst : enum_flag_GenericsMask_GenericInst));
+}
+//==========================================================================================
+inline BOOL MethodTable::IsGenericTypeDefinition()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // Generics flags cannot be expressed in terms of GetFlag()
+ return TestFlagWithMask(enum_flag_GenericsMask, enum_flag_GenericsMask_TypicalInst);
+}
+
+//==========================================================================================
+inline PTR_InterfaceInfo MethodTable::GetInterfaceMap()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return dac_cast<PTR_InterfaceInfo>(m_pMultipurposeSlot2); // m_pInterfaceMap
+}
+
+//==========================================================================================
+FORCEINLINE TADDR MethodTable::GetMultipurposeSlotPtr(WFLAGS2_ENUM flag, const BYTE * offsets)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE(GetFlag(flag));
+
+ DWORD offset = offsets[GetFlag((WFLAGS2_ENUM)(flag - 1))];
+
+ if (offset >= sizeof(MethodTable)) {
+ offset += GetNumVtableIndirections() * sizeof(PTR_PCODE);
+ }
+
+ return dac_cast<TADDR>(this) + offset;
+}
+
+//==========================================================================================
+// This method is dependent on the declared order of optional members
+// If you add or remove an optional member or reorder them please change this method
+FORCEINLINE DWORD MethodTable::GetOffsetOfOptionalMember(OptionalMemberId id)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DWORD offset = c_OptionalMembersStartOffsets[GetFlag(enum_flag_MultipurposeSlotsMask)];
+
+ offset += GetNumVtableIndirections() * sizeof(PTR_PCODE);
+
+#undef METHODTABLE_OPTIONAL_MEMBER
+#define METHODTABLE_OPTIONAL_MEMBER(NAME, TYPE, GETTER) \
+ if (id == OptionalMember_##NAME) { \
+ return offset; \
+ } \
+ C_ASSERT(sizeof(TYPE) % sizeof(UINT_PTR) == 0); /* To insure proper alignment */ \
+ if (Has##NAME()) { \
+ offset += sizeof(TYPE); \
+ }
+
+ METHODTABLE_OPTIONAL_MEMBERS()
+
+ _ASSERTE(!"Wrong optional member" || id == OptionalMember_Count);
+ return offset;
+}
+
+//==========================================================================================
+// this is not the pretties function however I got bitten pretty hard by the computation
+// of the allocation size of a MethodTable done "by hand" in few places.
+// Essentially the idea is that this is going to centralize computation of size for optional
+// members so the next morons that need to add an optional member will look at this function
+// and hopefully be less exposed to code doing size computation behind their back
+inline DWORD MethodTable::GetOptionalMembersAllocationSize(DWORD dwMultipurposeSlotsMask,
+ BOOL needsRemotableMethodInfo,
+ BOOL needsGenericsStaticsInfo,
+ BOOL needsGuidInfo,
+ BOOL needsCCWTemplate,
+ BOOL needsRCWPerTypeData,
+ BOOL needsRemotingVtsInfo,
+ BOOL needsContextStatic,
+ BOOL needsTokenOverflow)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DWORD size = c_OptionalMembersStartOffsets[dwMultipurposeSlotsMask] - sizeof(MethodTable);
+
+ if (needsRemotableMethodInfo)
+ size += sizeof(UINT_PTR);
+ if (needsGenericsStaticsInfo)
+ size += sizeof(GenericsStaticsInfo);
+ if (needsGuidInfo)
+ size += sizeof(UINT_PTR);
+ if (needsCCWTemplate)
+ size += sizeof(UINT_PTR);
+ if (needsRCWPerTypeData)
+ size += sizeof(UINT_PTR);
+ if (needsRemotingVtsInfo)
+ size += sizeof(UINT_PTR);
+ if (needsContextStatic)
+ size += sizeof(UINT_PTR);
+ if (dwMultipurposeSlotsMask & enum_flag_HasInterfaceMap)
+ size += sizeof(UINT_PTR);
+ if (needsTokenOverflow)
+ size += sizeof(UINT_PTR);
+
+ return size;
+}
+
+inline DWORD MethodTable::GetOptionalMembersSize()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return GetEndOffsetOfOptionalMembers() - GetStartOffsetOfOptionalMembers();
+}
+
+#ifndef DACCESS_COMPILE
+
+//==========================================================================================
+inline PTR_BYTE MethodTable::GetNonGCStaticsBasePointer()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetDomainLocalModule()->GetNonGCStaticsBasePointer(this);
+}
+
+//==========================================================================================
+inline PTR_BYTE MethodTable::GetGCStaticsBasePointer()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetDomainLocalModule()->GetGCStaticsBasePointer(this);
+}
+
+//==========================================================================================
+inline PTR_BYTE MethodTable::GetNonGCThreadStaticsBasePointer()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Get the current thread
+ PTR_Thread pThread = dac_cast<PTR_Thread>(GetThread());
+
+ // Get the current module's ModuleIndex
+ ModuleIndex index = GetModuleForStatics()->GetModuleIndex();
+
+ PTR_ThreadLocalBlock pTLB = ThreadStatics::GetCurrentTLBIfExists(pThread, NULL);
+ if (pTLB == NULL)
+ return NULL;
+
+ PTR_ThreadLocalModule pTLM = pTLB->GetTLMIfExists(index);
+ if (pTLM == NULL)
+ return NULL;
+
+ return pTLM->GetNonGCStaticsBasePointer(this);
+}
+
+//==========================================================================================
+inline PTR_BYTE MethodTable::GetGCThreadStaticsBasePointer()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // Get the current thread
+ PTR_Thread pThread = dac_cast<PTR_Thread>(GetThread());
+
+ // Get the current module's ModuleIndex
+ ModuleIndex index = GetModuleForStatics()->GetModuleIndex();
+
+ PTR_ThreadLocalBlock pTLB = ThreadStatics::GetCurrentTLBIfExists(pThread, NULL);
+ if (pTLB == NULL)
+ return NULL;
+
+ PTR_ThreadLocalModule pTLM = pTLB->GetTLMIfExists(index);
+ if (pTLM == NULL)
+ return NULL;
+
+ return pTLM->GetGCStaticsBasePointer(this);
+}
+
+#endif //!DACCESS_COMPILE
+
+//==========================================================================================
+inline PTR_BYTE MethodTable::GetNonGCThreadStaticsBasePointer(PTR_Thread pThread, PTR_AppDomain pDomain)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // Get the current module's ModuleIndex
+ ModuleIndex index = GetModuleForStatics()->GetModuleIndex();
+
+ PTR_ThreadLocalBlock pTLB = ThreadStatics::GetCurrentTLBIfExists(pThread, pDomain);
+ if (pTLB == NULL)
+ return NULL;
+
+ PTR_ThreadLocalModule pTLM = pTLB->GetTLMIfExists(index);
+ if (pTLM == NULL)
+ return NULL;
+
+ return pTLM->GetNonGCStaticsBasePointer(this);
+}
+
+//==========================================================================================
+inline PTR_BYTE MethodTable::GetGCThreadStaticsBasePointer(PTR_Thread pThread, PTR_AppDomain pDomain)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // Get the current module's ModuleIndex
+ ModuleIndex index = GetModuleForStatics()->GetModuleIndex();
+
+ PTR_ThreadLocalBlock pTLB = ThreadStatics::GetCurrentTLBIfExists(pThread, pDomain);
+ if (pTLB == NULL)
+ return NULL;
+
+ PTR_ThreadLocalModule pTLM = pTLB->GetTLMIfExists(index);
+ if (pTLM == NULL)
+ return NULL;
+
+ return pTLM->GetGCStaticsBasePointer(this);
+}
+
+//==========================================================================================
+inline PTR_DomainLocalModule MethodTable::GetDomainLocalModule(AppDomain * pAppDomain)
+{
+ WRAPPER_NO_CONTRACT;
+ return GetModuleForStatics()->GetDomainLocalModule(pAppDomain);
+}
+
+#ifndef DACCESS_COMPILE
+//==========================================================================================
+inline PTR_DomainLocalModule MethodTable::GetDomainLocalModule()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetModuleForStatics()->GetDomainLocalModule();
+}
+#endif //!DACCESS_COMPILE
+
+//==========================================================================================
+inline OBJECTREF MethodTable::AllocateNoChecks()
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ // we know an instance of this class already exists in the same appdomain
+ // therefore, some checks become redundant.
+ // this currently only happens for Delegate.Combine
+ CONSISTENCY_CHECK(IsRestored_NoLogging());
+
+ CONSISTENCY_CHECK(CheckInstanceActivated());
+
+ return AllocateObject(this);
+}
+
+#ifdef FEATURE_REMOTING
+//==========================================================================================
+inline BOOL MethodTable::HasContextStatics()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetFlag(enum_flag_ContextStatic);
+}
+
+//==========================================================================================
+inline void MethodTable::SetHasContextStatics()
+{
+ LIMITED_METHOD_CONTRACT;
+ SetFlag(enum_flag_ContextStatic);
+}
+
+//==========================================================================================
+inline DWORD MethodTable::GetContextStaticsOffset()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetContextStaticsBucket()->m_dwContextStaticsOffset;
+}
+
+//==========================================================================================
+inline WORD MethodTable::GetContextStaticsSize()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetContextStaticsBucket()->m_wContextStaticsSize;
+}
+#endif // FEATURE_REMOTING
+
+//==========================================================================================
+inline DWORD MethodTable::GetClassIndex()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetClassIndexFromToken(GetCl());
+}
+
+#ifndef DACCESS_COMPILE
+//==========================================================================================
+// unbox src into dest, making sure src is of the correct type.
+
+inline BOOL MethodTable::UnBoxInto(void *dest, OBJECTREF src)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (Nullable::IsNullableType(TypeHandle(this)))
+ return Nullable::UnBoxNoGC(dest, src, this);
+ else
+ {
+ if (src == NULL || src->GetMethodTable() != this)
+ return FALSE;
+
+ CopyValueClass(dest, src->UnBox(), this, src->GetAppDomain());
+ }
+ return TRUE;
+}
+
+//==========================================================================================
+// unbox src into dest, No checks are done
+
+inline void MethodTable::UnBoxIntoUnchecked(void *dest, OBJECTREF src)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (Nullable::IsNullableType(TypeHandle(this))) {
+ BOOL ret;
+ ret = Nullable::UnBoxNoGC(dest, src, this);
+ _ASSERTE(ret);
+ }
+ else
+ {
+ _ASSERTE(src->GetMethodTable()->GetNumInstanceFieldBytes() == GetNumInstanceFieldBytes());
+
+ CopyValueClass(dest, src->UnBox(), this, src->GetAppDomain());
+ }
+}
+#endif
+//==========================================================================================
+__forceinline TypeHandle::CastResult MethodTable::CanCastToClassOrInterfaceNoGC(MethodTable *pTargetMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INSTANCE_CHECK;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pTargetMT));
+ PRECONDITION(!pTargetMT->IsArray());
+ }
+ CONTRACTL_END
+
+ if (pTargetMT->IsInterface())
+ return CanCastToInterfaceNoGC(pTargetMT);
+ else
+ return CanCastToClassNoGC(pTargetMT);
+}
+
+//==========================================================================================
+inline BOOL MethodTable::CanCastToClassOrInterface(MethodTable *pTargetMT, TypeHandlePairList *pVisited)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pTargetMT));
+ PRECONDITION(!pTargetMT->IsArray());
+ PRECONDITION(IsRestored_NoLogging());
+ }
+ CONTRACTL_END
+
+ if (pTargetMT->IsInterface())
+ return CanCastToInterface(pTargetMT, pVisited);
+ else
+ return CanCastToClass(pTargetMT, pVisited);
+}
+
+//==========================================================================================
+FORCEINLINE PTR_Module MethodTable::GetGenericsStaticsModuleAndID(DWORD * pID)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ _ASSERTE(HasGenericsStaticsInfo());
+
+#ifdef FEATURE_PREJIT
+ // This is performance sensitive codepath inlined into JIT helpers. Test the flag directly without
+ // checking IsStringOrArray() first. IsStringOrArray() will always be false here.
+ _ASSERTE(!IsStringOrArray());
+ if (m_dwFlags & enum_flag_StaticsMask_IfGenericsThenCrossModule)
+ {
+ CrossModuleGenericsStaticsInfo *pInfo = m_pWriteableData->GetCrossModuleGenericsStaticsInfo();
+ _ASSERTE(FitsIn<DWORD>(pInfo->m_DynamicTypeID) || pInfo->m_DynamicTypeID == (SIZE_T)-1);
+ *pID = static_cast<DWORD>(pInfo->m_DynamicTypeID);
+ return pInfo->m_pModuleForStatics;
+ }
+#endif // FEATURE_PREJIT
+
+ _ASSERTE(FitsIn<DWORD>(GetGenericsStaticsInfo()->m_DynamicTypeID) || GetGenericsStaticsInfo()->m_DynamicTypeID == (SIZE_T)-1);
+ *pID = static_cast<DWORD>(GetGenericsStaticsInfo()->m_DynamicTypeID);
+ return GetLoaderModule();
+}
+
+//==========================================================================================
+inline OBJECTHANDLE MethodTable::GetLoaderAllocatorObjectHandle()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetLoaderAllocator()->GetLoaderAllocatorObjectHandle();
+}
+
+//==========================================================================================
+FORCEINLINE OBJECTREF MethodTable::GetManagedClassObjectIfExists()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Logging will be done by the slow path
+ LOADERHANDLE handle = GetWriteableData_NoLogging()->GetExposedClassObjectHandle();
+
+ OBJECTREF retVal;
+
+ // GET_LOADERHANDLE_VALUE_FAST macro is inlined here to let us give hint to the compiler
+ // when the return value is not null.
+ if (!LoaderAllocator::GetHandleValueFast(handle, &retVal) &&
+ !GetLoaderAllocator()->GetHandleValueFastPhase2(handle, &retVal))
+ {
+ return NULL;
+ }
+
+ // Only code:MethodTable::GetManagedClassObject sets m_pExposedClassObject and it insures that
+ // remoted objects and arrays don't get in.
+ _ASSERTE(!IsArray() && !IsTransparentProxy());
+
+ COMPILER_ASSUME(retVal != NULL);
+ return retVal;
+}
+
+//==========================================================================================
+inline void MethodTable::SetIsArray(CorElementType arrayType, CorElementType elementType)
+{
+ STANDARD_VM_CONTRACT;
+
+ DWORD category = enum_flag_Category_Array;
+ if (arrayType == ELEMENT_TYPE_SZARRAY)
+ category |= enum_flag_Category_IfArrayThenSzArray;
+
+ _ASSERTE((m_dwFlags & enum_flag_Category_Mask) == 0);
+ m_dwFlags |= category;
+
+ _ASSERTE(GetInternalCorElementType() == arrayType);
+}
+
+//==========================================================================================
+FORCEINLINE BOOL MethodTable::ImplementsInterfaceInline(MethodTable *pInterface)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ PRECONDITION(pInterface->IsInterface()); // class we are looking up should be an interface
+ }
+ CONTRACTL_END;
+
+ //
+ // Inline InterfaceMapIterator here for performance reasons
+ //
+
+ DWORD numInterfaces = GetNumInterfaces();
+ if (numInterfaces == 0)
+ return FALSE;
+
+ InterfaceInfo_t *pInfo = GetInterfaceMap();
+
+ do
+ {
+ if (pInfo->GetMethodTable() == pInterface)
+ {
+ // Extensible RCW's need to be handled specially because they can have interfaces
+ // in their map that are added at runtime. These interfaces will have a start offset
+ // of -1 to indicate this. We cannot take for granted that every instance of this
+ // COM object has this interface so FindInterface on these interfaces is made to fail.
+ //
+ // However, we are only considering the statically available slots here
+ // (m_wNumInterface doesn't contain the dynamic slots), so we can safely
+ // ignore this detail.
+ return TRUE;
+ }
+ pInfo++;
+ }
+ while (--numInterfaces);
+
+ return FALSE;
+}
+
+#endif // !_METHODTABLE_INL_
diff --git a/src/vm/methodtablebuilder.cpp b/src/vm/methodtablebuilder.cpp
new file mode 100644
index 0000000000..949bba6e6f
--- /dev/null
+++ b/src/vm/methodtablebuilder.cpp
@@ -0,0 +1,13345 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: METHODTABLEBUILDER.CPP
+//
+
+
+//
+
+//
+// ============================================================================
+
+#include "common.h"
+
+#include "methodtablebuilder.h"
+
+#include "constrainedexecutionregion.h"
+#include "sigbuilder.h"
+#include "dllimport.h"
+#include "fieldmarshaler.h"
+#include "encee.h"
+#include "mdaassistants.h"
+#include "ecmakey.h"
+#include "security.h"
+#include "customattribute.h"
+
+#ifdef FEATURE_REMOTING
+#include "objectclone.h"
+#endif
+
+#ifdef FEATURE_COMINTEROP
+#ifdef FEATURE_FUSION
+#include "policy.h"
+#endif
+#endif
+
+//*******************************************************************************
+// Helper functions to sort GCdescs by offset (decending order)
+int __cdecl compareCGCDescSeries(const void *arg1, const void *arg2)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ CGCDescSeries* gcInfo1 = (CGCDescSeries*) arg1;
+ CGCDescSeries* gcInfo2 = (CGCDescSeries*) arg2;
+
+ return (int)(gcInfo2->GetSeriesOffset() - gcInfo1->GetSeriesOffset());
+}
+
+//*******************************************************************************
+
+const char* FormatSig(MethodDesc* pMD, LoaderHeap *pHeap, AllocMemTracker *pamTracker);
+
+#ifdef _DEBUG
+unsigned g_dupMethods = 0;
+#endif // _DEBUG
+
+#ifdef FEATURE_COMINTEROP
+WinMDAdapter::RedirectedTypeIndex CalculateWinRTRedirectedTypeIndex(IMDInternalImport * pInternalImport, Module * pModule, mdTypeDef cl)
+{
+ STANDARD_VM_CONTRACT;
+
+ Assembly * pAssembly = pModule->GetAssembly();
+ WinMDAdapter::FrameworkAssemblyIndex assemblyIndex;
+ if (!GetAppDomain()->FindRedirectedAssembly(pAssembly, &assemblyIndex))
+ return WinMDAdapter::RedirectedTypeIndex_Invalid;
+
+ return WinRTTypeNameConverter::GetRedirectedTypeIndexByName(pInternalImport, cl, assemblyIndex);
+}
+#endif // FEATURE_COMINTEROP
+
+//==========================================================================
+// This function is very specific about how it constructs a EEClass. It first
+// determines the necessary size of the vtable and the number of statics that
+// this class requires. The necessary memory is then allocated for a EEClass
+// and its vtable and statics. The class members are then initialized and
+// the memory is then returned to the caller
+//
+// LPEEClass CreateClass()
+//
+// Parameters :
+// [in] scope - scope of the current class not the one requested to be opened
+// [in] cl - class token of the class to be created.
+// [out] ppEEClass - pointer to pointer to hold the address of the EEClass
+// allocated in this function.
+// Return : returns an HRESULT indicating the success of this function.
+//
+// This parameter has been removed but might need to be reinstated if the
+// global for the metadata loader is removed.
+// [in] pIMLoad - MetaDataLoader class/object for the current scope.
+
+
+//==========================================================================
+/*static*/ EEClass *
+MethodTableBuilder::CreateClass( Module *pModule,
+ mdTypeDef cl,
+ BOOL fHasLayout,
+ BOOL fDelegate,
+ BOOL fIsEnum,
+ const MethodTableBuilder::bmtGenericsInfo *bmtGenericsInfo,
+ LoaderAllocator * pAllocator,
+ AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(!(fHasLayout && fDelegate));
+ PRECONDITION(!(fHasLayout && fIsEnum));
+ PRECONDITION(CheckPointer(bmtGenericsInfo));
+ }
+ CONTRACTL_END;
+
+ EEClass *pEEClass = NULL;
+ IMDInternalImport *pInternalImport;
+ HRESULT hrToThrow;
+
+ //<TODO>============================================================================
+ // vtabsize and static size need to be converted from pointer sizes to #'s
+ // of bytes this will be very important for 64 bit NT!
+ // We will need to call on IMetaDataLoad to get these sizes and fill out the
+ // tables
+
+ // From the classref call on metadata to resolve the classref and check scope
+ // to make sure that this class is in the same scope otherwise we need to open
+ // a new scope and possibly file.
+
+ // if the scopes are different call the code to load a new file and get the new scope
+
+ // scopes are the same so we can use the existing scope to get the class info
+
+ // This method needs to be fleshed out.more it currently just returns enough
+ // space for the defined EEClass and the vtable and statics are not set.
+ //=============================================================================</TODO>
+
+ if (fHasLayout)
+ {
+ pEEClass = new (pAllocator->GetLowFrequencyHeap(), pamTracker) LayoutEEClass();
+ }
+ else if (fDelegate)
+ {
+ pEEClass = new (pAllocator->GetLowFrequencyHeap(), pamTracker) DelegateEEClass();
+ }
+ else
+ {
+ pEEClass = new (pAllocator->GetLowFrequencyHeap(), pamTracker) EEClass(sizeof(EEClass));
+ }
+
+ DWORD dwAttrClass = 0;
+ mdToken tkExtends = mdTokenNil;
+
+ // Set up variance info
+ if (bmtGenericsInfo->pVarianceInfo)
+ {
+ // Variance info is an optional field on EEClass, so ensure the optional field descriptor has been
+ // allocated.
+ EnsureOptionalFieldsAreAllocated(pEEClass, pamTracker, pAllocator->GetLowFrequencyHeap());
+ pEEClass->SetVarianceInfo((BYTE*) pamTracker->Track(
+ pAllocator->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(bmtGenericsInfo->GetNumGenericArgs()))));
+
+ memcpy(pEEClass->GetVarianceInfo(), bmtGenericsInfo->pVarianceInfo, bmtGenericsInfo->GetNumGenericArgs());
+ }
+
+ pInternalImport = pModule->GetMDImport();
+
+ if (pInternalImport == NULL)
+ COMPlusThrowHR(COR_E_TYPELOAD);
+
+ IfFailThrow(pInternalImport->GetTypeDefProps(
+ cl,
+ &dwAttrClass,
+ &tkExtends));
+
+ pEEClass->m_dwAttrClass = dwAttrClass;
+
+ // MDVal check: can't be both tdSequentialLayout and tdExplicitLayout
+ if((dwAttrClass & tdLayoutMask) == tdLayoutMask)
+ COMPlusThrowHR(COR_E_TYPELOAD);
+
+ if (IsTdInterface(dwAttrClass))
+ {
+ // MDVal check: must have nil tkExtends and must be tdAbstract
+ if((tkExtends & 0x00FFFFFF)||(!IsTdAbstract(dwAttrClass)))
+ COMPlusThrowHR(COR_E_TYPELOAD);
+ }
+
+ //
+ // Initialize SecurityProperties structure
+ //
+
+ if (IsTdHasSecurity(dwAttrClass))
+ {
+ DWORD dwSecFlags;
+ DWORD dwNullDeclFlags;
+
+ hrToThrow = Security::GetDeclarationFlags(pInternalImport, cl, &dwSecFlags, &dwNullDeclFlags);
+ if (FAILED(hrToThrow))
+ COMPlusThrowHR(hrToThrow);
+
+ // Security properties is an optional field. If we have a non-default value we need to ensure the
+ // optional field descriptor has been allocated.
+ EnsureOptionalFieldsAreAllocated(pEEClass, pamTracker, pAllocator->GetLowFrequencyHeap());
+
+ pEEClass->GetSecurityProperties()->SetFlags(dwSecFlags, dwNullDeclFlags);
+ }
+
+ // Cache class level reliability contract info.
+ DWORD dwReliabilityContract = ::GetReliabilityContract(pInternalImport, cl);
+ if (dwReliabilityContract != RC_NULL)
+ {
+ // Reliability contract is an optional field. If we have a non-default value we need to ensure the
+ // optional field descriptor has been allocated.
+ EnsureOptionalFieldsAreAllocated(pEEClass, pamTracker, pAllocator->GetLowFrequencyHeap());
+
+ pEEClass->SetReliabilityContract(dwReliabilityContract);
+ }
+
+ if (fHasLayout)
+ pEEClass->SetHasLayout();
+
+#ifdef FEATURE_COMINTEROP
+ if (IsTdWindowsRuntime(dwAttrClass))
+ {
+ Assembly *pAssembly = pModule->GetAssembly();
+
+ // On the desktop CLR, we do not allow non-FX assemblies to use/define WindowsRuntimeImport attribute.
+ //
+ // On CoreCLR, however, we do allow non-FX assemblies to have this attribute. This enables scenarios where we can
+ // activate 3rd-party WinRT components outside AppContainer - 1st party WinRT components are already allowed
+ // to be activated outside AppContainer (on both Desktop and CoreCLR).
+#ifdef FEATURE_FUSION
+ if (!pAssembly->IsWinMD() &&
+ Fusion::Util::IsAnyFrameworkAssembly(pAssembly->GetFusionAssemblyName()) != S_OK)
+ {
+ pAssembly->ThrowTypeLoadException(pModule->GetMDImport(), cl, IDS_EE_WINRT_TYPE_IN_ORDINARY_ASSEMBLY);
+ }
+#endif
+
+ pEEClass->SetProjectedFromWinRT();
+ }
+
+ if (pEEClass->IsProjectedFromWinRT())
+ {
+ if (IsTdInterface(dwAttrClass))
+ {
+ //
+ // Check for GuidAttribute
+ //
+ BOOL bHasGuid = FALSE;
+
+ GUID guid;
+ HRESULT hr = pModule->GetMDImport()->GetItemGuid(cl, &guid);
+ IfFailThrow(hr);
+
+ if (IsEqualGUID(guid, GUID_NULL))
+ {
+ // A WinRT interface should have a GUID
+ pModule->GetAssembly()->ThrowTypeLoadException(pModule->GetMDImport(), cl, IDS_EE_WINRT_INTERFACE_WITHOUT_GUID);
+ }
+ }
+ }
+
+ WinMDAdapter::RedirectedTypeIndex redirectedTypeIndex;
+ redirectedTypeIndex = CalculateWinRTRedirectedTypeIndex(pInternalImport, pModule, cl);
+ if (redirectedTypeIndex != WinMDAdapter::RedirectedTypeIndex_Invalid)
+ {
+ EnsureOptionalFieldsAreAllocated(pEEClass, pamTracker, pAllocator->GetLowFrequencyHeap());
+ pEEClass->SetWinRTRedirectedTypeIndex(redirectedTypeIndex);
+ }
+#endif // FEAUTRE_COMINTEROP
+
+#ifdef _DEBUG
+ pModule->GetClassLoader()->m_dwDebugClasses++;
+#endif
+
+ return pEEClass;
+}
+
+//*******************************************************************************
+//
+// Create a hash of all methods in this class. The hash is from method name to MethodDesc.
+//
+MethodTableBuilder::MethodNameHash *
+MethodTableBuilder::CreateMethodChainHash(
+ MethodTable *pMT)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodNameHash *pHash = new (GetStackingAllocator()) MethodNameHash();
+ pHash->Init(pMT->GetNumVirtuals(), GetStackingAllocator());
+
+ unsigned numVirtuals = GetParentMethodTable()->GetNumVirtuals();
+ for (unsigned i = 0; i < numVirtuals; ++i)
+ {
+ bmtMethodSlot &slot = (*bmtParent->pSlotTable)[i];
+ bmtRTMethod * pMethod = slot.Decl().AsRTMethod();
+ const MethodSignature &sig = pMethod->GetMethodSignature();
+ pHash->Insert(sig.GetName(), pMethod);
+ }
+
+ // Success
+ return pHash;
+}
+
+//*******************************************************************************
+//
+// Find a method in this class hierarchy - used ONLY by the loader during layout. Do not use at runtime.
+//
+// *ppMemberSignature must be NULL on entry - it and *pcMemberSignature may or may not be filled out
+//
+// ppMethodDesc will be filled out with NULL if no matching method in the hierarchy is found.
+//
+// Returns FALSE if there was an error of some kind.
+//
+// pMethodConstraintsMatch receives the result of comparing the method constraints.
+MethodTableBuilder::bmtRTMethod *
+MethodTableBuilder::LoaderFindMethodInParentClass(
+ const MethodSignature & methodSig,
+ BOOL * pMethodConstraintsMatch)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(CheckPointer(bmtParent));
+ PRECONDITION(CheckPointer(methodSig.GetModule()));
+ PRECONDITION(CheckPointer(methodSig.GetSignature()));
+ PRECONDITION(HasParent());
+ PRECONDITION(methodSig.GetSignatureLength() != 0);
+ }
+ CONTRACTL_END;
+
+//#if 0
+ MethodNameHash::HashEntry * pEntry;
+
+ // Have we created a hash of all the methods in the class chain?
+ if (bmtParent->pParentMethodHash == NULL)
+ {
+ // There may be such a method, so we will now create a hash table to reduce the pain for
+ // further lookups
+
+ // <TODO> Are we really sure that this is worth doing? </TODO>
+ bmtParent->pParentMethodHash = CreateMethodChainHash(GetParentMethodTable());
+ }
+
+ // We have a hash table, so use it
+ pEntry = bmtParent->pParentMethodHash->Lookup(methodSig.GetName());
+
+ // Traverse the chain of all methods with this name
+ while (pEntry != NULL)
+ {
+ bmtRTMethod * pEntryMethod = pEntry->m_data;
+ const MethodSignature & entrySig = pEntryMethod->GetMethodSignature();
+
+ // Note instantiation info
+ {
+ if (methodSig.Equivalent(entrySig))
+ {
+ if (pMethodConstraintsMatch != NULL)
+ {
+ // Check the constraints are consistent,
+ // and return the result to the caller.
+ // We do this here to avoid recalculating pSubst.
+ *pMethodConstraintsMatch = MetaSig::CompareMethodConstraints(
+ &methodSig.GetSubstitution(), methodSig.GetModule(), methodSig.GetToken(),
+ &entrySig.GetSubstitution(), entrySig.GetModule(), entrySig.GetToken());
+ }
+
+ return pEntryMethod;
+ }
+ }
+
+ // Advance to next item in the hash chain which has the same name
+ pEntry = bmtParent->pParentMethodHash->FindNext(pEntry);
+ }
+//#endif
+
+//@TODO: Move to this code, as the use of a HashTable is broken; overriding semantics
+//@TODO: require matching against the most-derived slot of a given name and signature,
+//@TODO: (which deals specifically with newslot methods with identical name and sig), but
+//@TODO: HashTables are by definition unordered and so we've only been getting by with the
+//@TODO: implementation being compatible with the order in which methods were added to
+//@TODO: the HashTable in CreateMethodChainHash.
+#if 0
+ bmtParentInfo::Iterator it(bmtParent->IterateSlots());
+ it.MoveTo(static_cast<size_t>(GetParentMethodTable()->GetNumVirtuals()));
+ while (it.Prev())
+ {
+ bmtMethodHandle decl(it->Decl());
+ const MethodSignature &declSig(decl.GetMethodSignature());
+ if (declSig == methodSig)
+ {
+ if (pMethodConstraintsMatch != NULL)
+ {
+ // Check the constraints are consistent,
+ // and return the result to the caller.
+ // We do this here to avoid recalculating pSubst.
+ *pMethodConstraintsMatch = MetaSig::CompareMethodConstraints(
+ &methodSig.GetSubstitution(), methodSig.GetModule(), methodSig.GetToken(),
+ &declSig.GetSubstitution(), declSig.GetModule(), declSig.GetToken());
+ }
+
+ return decl.AsRTMethod();
+ }
+ }
+#endif // 0
+
+ return NULL;
+}
+
+//*******************************************************************************
+//
+// Given an interface map to fill out, expand pNewInterface (and its sub-interfaces) into it, increasing
+// pdwInterfaceListSize as appropriate, and avoiding duplicates.
+//
+void
+MethodTableBuilder::ExpandApproxInterface(
+ bmtInterfaceInfo * bmtInterface, // out parameter, various parts cumulatively written to.
+ const Substitution * pNewInterfaceSubstChain,
+ MethodTable * pNewInterface,
+ InterfaceDeclarationScope declScope
+ COMMA_INDEBUG(MethodTable * dbg_pClassMT))
+{
+ STANDARD_VM_CONTRACT;
+
+ //#ExpandingInterfaces
+ // We expand the tree of inherited interfaces into a set by adding the
+ // current node BEFORE expanding the parents of the current node.
+ // ****** This must be consistent with code:ExpandExactInterface *******
+ // ****** This must be consistent with code:ClassCompat::MethodTableBuilder::BuildInteropVTable_ExpandInterface *******
+
+ // The interface list contains the fully expanded set of interfaces from the parent then
+ // we start adding all the interfaces we declare. We need to know which interfaces
+ // we declare but do not need duplicates of the ones we declare. This means we can
+ // duplicate our parent entries.
+
+ // Is it already present in the list?
+ for (DWORD i = 0; i < bmtInterface->dwInterfaceMapSize; i++)
+ {
+ bmtInterfaceEntry * pItfEntry = &bmtInterface->pInterfaceMap[i];
+ bmtRTType * pItfType = pItfEntry->GetInterfaceType();
+
+ // Type Equivalence is not respected for this comparision as you can have multiple type equivalent interfaces on a class
+ TokenPairList newVisited = TokenPairList::AdjustForTypeEquivalenceForbiddenScope(NULL);
+ if (MetaSig::CompareTypeDefsUnderSubstitutions(pItfType->GetMethodTable(),
+ pNewInterface,
+ &pItfType->GetSubstitution(),
+ pNewInterfaceSubstChain,
+ &newVisited))
+ {
+ if (declScope.fIsInterfaceDeclaredOnType)
+ {
+ pItfEntry->IsDeclaredOnType() = true;
+ }
+#ifdef _DEBUG
+ //#InjectInterfaceDuplicates_ApproxInterfaces
+ // We can inject duplicate interfaces in check builds.
+ // Has to be in sync with code:#InjectInterfaceDuplicates_Main
+ if (((dbg_pClassMT == NULL) && bmtInterface->dbg_fShouldInjectInterfaceDuplicates) ||
+ ((dbg_pClassMT != NULL) && dbg_pClassMT->Debug_HasInjectedInterfaceDuplicates()))
+ {
+ // The injected duplicate interface should have the same status 'ImplementedByParent' as
+ // the original interface (can be false if the interface is implemented indirectly twice)
+ declScope.fIsInterfaceDeclaredOnParent = pItfEntry->IsImplementedByParent();
+ // Just pretend we didn't find this match, but mark all duplicates as 'DeclaredOnType' if
+ // needed
+ continue;
+ }
+#endif //_DEBUG
+ return; // found it, don't add it again
+ }
+ }
+
+ bmtRTType * pNewItfType =
+ new (GetStackingAllocator()) bmtRTType(*pNewInterfaceSubstChain, pNewInterface);
+
+ if (bmtInterface->dwInterfaceMapSize >= bmtInterface->dwInterfaceMapAllocated)
+ {
+ //
+ // Grow the array of interfaces
+ //
+ S_UINT32 dwNewAllocated = S_UINT32(2) * S_UINT32(bmtInterface->dwInterfaceMapAllocated) + S_UINT32(5);
+
+ if (dwNewAllocated.IsOverflow())
+ {
+ BuildMethodTableThrowException(COR_E_OVERFLOW);
+ }
+
+ S_SIZE_T safeSize = S_SIZE_T(sizeof(bmtInterfaceEntry)) *
+ S_SIZE_T(dwNewAllocated.Value());
+
+ if (safeSize.IsOverflow())
+ {
+ BuildMethodTableThrowException(COR_E_OVERFLOW);
+ }
+
+ bmtInterfaceEntry * pNewMap = (bmtInterfaceEntry *)new (GetStackingAllocator()) BYTE[safeSize.Value()];
+ memcpy(pNewMap, bmtInterface->pInterfaceMap, sizeof(bmtInterfaceEntry) * bmtInterface->dwInterfaceMapAllocated);
+
+ bmtInterface->pInterfaceMap = pNewMap;
+ bmtInterface->dwInterfaceMapAllocated = dwNewAllocated.Value();
+ }
+
+ // The interface map memory was just allocated as an array of bytes, so we use
+ // in place new to init the new map entry. No need to do anything with the result,
+ // so just chuck it.
+ CONSISTENCY_CHECK(bmtInterface->dwInterfaceMapSize < bmtInterface->dwInterfaceMapAllocated);
+ new ((void *)&bmtInterface->pInterfaceMap[bmtInterface->dwInterfaceMapSize])
+ bmtInterfaceEntry(pNewItfType, declScope);
+
+ bmtInterface->dwInterfaceMapSize++;
+
+ // Make sure to pass in the substitution from the new itf type created above as
+ // these methods assume that substitutions are allocated in the stacking heap,
+ // not the stack.
+ InterfaceDeclarationScope declaredItfScope(declScope.fIsInterfaceDeclaredOnParent, false);
+ ExpandApproxDeclaredInterfaces(
+ bmtInterface,
+ bmtTypeHandle(pNewItfType),
+ declaredItfScope
+ COMMA_INDEBUG(dbg_pClassMT));
+} // MethodTableBuilder::ExpandApproxInterface
+
+//*******************************************************************************
+// Arguments:
+// dbg_pClassMT - Class on which the interfaces are declared (either explicitly or implicitly).
+// It will never be an interface. It may be NULL (if it is the type being built).
+void
+MethodTableBuilder::ExpandApproxDeclaredInterfaces(
+ bmtInterfaceInfo * bmtInterface, // out parameter, various parts cumulatively written to.
+ bmtTypeHandle thType,
+ InterfaceDeclarationScope declScope
+ COMMA_INDEBUG(MethodTable * dbg_pClassMT))
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE((dbg_pClassMT == NULL) || !dbg_pClassMT->IsInterface());
+
+ HRESULT hr;
+ // Iterate the list of interfaces declared by thType and add them to the map.
+ InterfaceImplEnum ie(thType.GetModule(), thType.GetTypeDefToken(), &thType.GetSubstitution());
+ while ((hr = ie.Next()) == S_OK)
+ {
+ MethodTable *pGenericIntf = ClassLoader::LoadApproxTypeThrowing(
+ thType.GetModule(), ie.CurrentToken(), NULL, NULL).GetMethodTable();
+ CONSISTENCY_CHECK(pGenericIntf->IsInterface());
+
+ ExpandApproxInterface(bmtInterface,
+ ie.CurrentSubst(),
+ pGenericIntf,
+ declScope
+ COMMA_INDEBUG(dbg_pClassMT));
+ }
+ if (FAILED(hr))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+} // MethodTableBuilder::ExpandApproxDeclaredInterfaces
+
+//*******************************************************************************
+void
+MethodTableBuilder::ExpandApproxInheritedInterfaces(
+ bmtInterfaceInfo * bmtInterface,
+ bmtRTType * pParentType)
+{
+ STANDARD_VM_CONTRACT;
+
+ INTERIOR_STACK_PROBE(GetThread());
+
+ // Expand interfaces in superclasses first. Interfaces inherited from parents
+ // must have identical indexes as in the parent.
+ bmtRTType * pParentOfParent = pParentType->GetParentType();
+
+ //#InterfaceMap_SupersetOfParent
+ // We have to load parent's interface map the same way the parent did it (as open type).
+ // Further code depends on this:
+ // code:#InterfaceMap_UseParentInterfaceImplementations
+ // We check that it is truth:
+ // code:#ApproxInterfaceMap_SupersetOfParent
+ // code:#ExactInterfaceMap_SupersetOfParent
+ //
+ //#InterfaceMap_CanonicalSupersetOfParent
+ // Note that canonical instantiation of parent can have different interface instantiations in the
+ // interface map than derived type:
+ // class MyClass<T> : MyBase<string, T>, I<T>
+ // class MyBase<U, V> : I<U>
+ // Type MyClass<_Canon> has MyBase<_Canon,_Canon> as parent. The interface maps are:
+ // MyBase<_Canon,_Canon> ... I<_Canon>
+ // MyClass<_Canon> ... I<string> (#1)
+ // I<_Canon> (#2)
+ // The I's instantiation I<string> (#1) in MyClass and I<_Canon> from MyBase are not the same
+ // instantiations.
+
+ // Backup parent substitution
+ Substitution parentSubstitution = pParentType->GetSubstitution();
+ // Make parent an open type
+ pParentType->SetSubstitution(Substitution());
+
+ if (pParentOfParent != NULL)
+ {
+ ExpandApproxInheritedInterfaces(bmtInterface, pParentOfParent);
+ }
+
+ InterfaceDeclarationScope declScope(true, false);
+ ExpandApproxDeclaredInterfaces(
+ bmtInterface,
+ bmtTypeHandle(pParentType),
+ declScope
+ COMMA_INDEBUG(pParentType->GetMethodTable()));
+
+ // Make sure we loaded the same number of interfaces as the parent type itself
+ CONSISTENCY_CHECK(pParentType->GetMethodTable()->GetNumInterfaces() == bmtInterface->dwInterfaceMapSize);
+
+ // Restore parent's substitution
+ pParentType->SetSubstitution(parentSubstitution);
+
+ END_INTERIOR_STACK_PROBE;
+} // MethodTableBuilder::ExpandApproxInheritedInterfaces
+
+//*******************************************************************************
+// Fill out a fully expanded interface map, such that if we are declared to
+// implement I3, and I3 extends I1,I2, then I1,I2 are added to our list if
+// they are not already present.
+void
+MethodTableBuilder::LoadApproxInterfaceMap()
+{
+ STANDARD_VM_CONTRACT;
+
+ bmtInterface->dwInterfaceMapSize = 0;
+
+#ifdef _DEBUG
+ //#InjectInterfaceDuplicates_Main
+ // We will inject duplicate interfaces in check builds if env. var.
+ // COMPLUS_INTERNAL_TypeLoader_InjectInterfaceDuplicates is set to TRUE for all types (incl. non-generic
+ // types).
+ // This should allow us better test coverage of duplicates in interface map.
+ //
+ // The duplicates are legal for some types:
+ // A<T> : I<T>
+ // B<U,V> : A<U>, I<V>
+ // C : B<int,int>
+ // where the interface maps are:
+ // A<T> ... 1 item: I<T>
+ // A<int> ... 1 item: I<int>
+ // B<U,V> ... 2 items: I<U>, I<V>
+ // B<int,int> ... 2 items: I<int>, I<int>
+ // B<_Canon,_Canon> ... 2 items: I<_Canon>, I<_Canon>
+ // B<string,string> ... 2 items: I<string>, I<string>
+ // C ... 2 items: I<int>, I<int>
+ // Note: C had only 1 item (I<int>) in CLR 2.0 RTM/SP1/SP2 and early in CLR 4.0.
+ //
+ // We will create duplicate from every re-implemented interface (incl. non-generic):
+ // code:#InjectInterfaceDuplicates_ApproxInterfaces
+ // code:#InjectInterfaceDuplicates_LoadExactInterfaceMap
+ // code:#InjectInterfaceDuplicates_ExactInterfaces
+ //
+ // Note that we don't have to do anything for COM, because COM has its own interface map
+ // (code:InteropMethodTableData)which is independent on type's interface map and is created only from
+ // non-generic interfaces (see code:ClassCompat::MethodTableBuilder::BuildInteropVTable_InterfaceList)
+
+ // We need to keep track which interface duplicates were injected. Right now its either all interfaces
+ // (declared on the type being built, not inheritted) or none. In the future we could inject duplicates
+ // just for some of them.
+ bmtInterface->dbg_fShouldInjectInterfaceDuplicates =
+ (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_TypeLoader_InjectInterfaceDuplicates) != 0);
+ if (bmtGenerics->Debug_GetTypicalMethodTable() != NULL)
+ { // It's safer to require that all instantiations have the same injected interface duplicates.
+ // In future we could inject different duplicates for various non-shared instantiations.
+
+ // Use the same injection status as typical instantiation
+ bmtInterface->dbg_fShouldInjectInterfaceDuplicates =
+ bmtGenerics->Debug_GetTypicalMethodTable()->Debug_HasInjectedInterfaceDuplicates();
+
+ if (GetModule() == g_pObjectClass->GetModule())
+ { // mscorlib has some weird hardcoded information about interfaces (e.g.
+ // code:CEEPreloader::ApplyTypeDependencyForSZArrayHelper), so we don't inject duplicates into
+ // mscorlib types
+ bmtInterface->dbg_fShouldInjectInterfaceDuplicates = FALSE;
+ }
+ }
+#endif //_DEBUG
+
+ // First inherit all the parent's interfaces. This is important, because our interface map must
+ // list the interfaces in identical order to our parent.
+ //
+ // <NICE> we should document the reasons why. One reason is that DispatchMapTypeIDs can be indexes
+ // into the list </NICE>
+ if (HasParent())
+ {
+ ExpandApproxInheritedInterfaces(bmtInterface, GetParentType());
+#ifdef _DEBUG
+ //#ApproxInterfaceMap_SupersetOfParent
+ // Check that parent's interface map is the same as what we just computed
+ // See code:#InterfaceMap_SupersetOfParent
+ {
+ MethodTable * pParentMT = GetParentMethodTable();
+ _ASSERTE(pParentMT->GetNumInterfaces() == bmtInterface->dwInterfaceMapSize);
+
+ MethodTable::InterfaceMapIterator parentInterfacesIterator = pParentMT->IterateInterfaceMap();
+ UINT32 nInterfaceIndex = 0;
+ while (parentInterfacesIterator.Next())
+ {
+ // Compare TypeDefs of the parent's interface and this interface (full MT comparison is in
+ // code:#ExactInterfaceMap_SupersetOfParent)
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOAD_APPROXPARENTS);
+ _ASSERTE(parentInterfacesIterator.GetInterfaceInfo()->GetApproxMethodTable(pParentMT->GetLoaderModule())->HasSameTypeDefAs(
+ bmtInterface->pInterfaceMap[nInterfaceIndex].GetInterfaceType()->GetMethodTable()));
+ nInterfaceIndex++;
+ }
+ _ASSERTE(nInterfaceIndex == bmtInterface->dwInterfaceMapSize);
+ }
+#endif //_DEBUG
+ }
+
+ // Now add in any freshly declared interfaces, possibly augmenting the flags
+ InterfaceDeclarationScope declScope(false, true);
+ ExpandApproxDeclaredInterfaces(
+ bmtInterface,
+ bmtInternal->pType,
+ declScope
+ COMMA_INDEBUG(NULL));
+} // MethodTableBuilder::LoadApproxInterfaceMap
+
+//*******************************************************************************
+// Fills array of TypeIDs with all duplicate occurences of pDeclIntfMT in the interface map.
+//
+// Arguments:
+// rg/c DispatchMapTypeIDs - Array of TypeIDs and its count of elements.
+// pcIfaceDuplicates - Number of duplicate occurences of the interface in the interface map (ideally <=
+// count of elements TypeIDs.
+//
+// Note: If the passed rgDispatchMapTypeIDs array is smaller than the number of duplicates, fills it
+// with the duplicates that fit and returns number of all existing duplicates (not just those fileld in the
+// array) in pcIfaceDuplicates.
+//
+void
+MethodTableBuilder::ComputeDispatchMapTypeIDs(
+ MethodTable * pDeclInftMT,
+ const Substitution * pDeclIntfSubst,
+ DispatchMapTypeID * rgDispatchMapTypeIDs,
+ UINT32 cDispatchMapTypeIDs,
+ UINT32 * pcIfaceDuplicates)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(pDeclInftMT->IsInterface());
+
+ // Count of interface duplicates (also used as index into TypeIDs array)
+ *pcIfaceDuplicates = 0;
+ for (DWORD idx = 0; idx < bmtInterface->dwInterfaceMapSize; idx++)
+ {
+ bmtInterfaceEntry * pItfEntry = &bmtInterface->pInterfaceMap[idx];
+ bmtRTType * pItfType = pItfEntry->GetInterfaceType();
+ // Type Equivalence is forbidden in interface type ids.
+ TokenPairList newVisited = TokenPairList::AdjustForTypeEquivalenceForbiddenScope(NULL);
+ if (MetaSig::CompareTypeDefsUnderSubstitutions(pItfType->GetMethodTable(),
+ pDeclInftMT,
+ &pItfType->GetSubstitution(),
+ pDeclIntfSubst,
+ &newVisited))
+ { // We found another occurence of this interface
+ // Can we fit it into the TypeID array?
+ if (*pcIfaceDuplicates < cDispatchMapTypeIDs)
+ {
+ rgDispatchMapTypeIDs[*pcIfaceDuplicates] = DispatchMapTypeID::InterfaceClassID(idx);
+ }
+ // Increase number of duplicate interfaces
+ (*pcIfaceDuplicates)++;
+ }
+ }
+} // MethodTableBuilder::ComputeDispatchMapTypeIDs
+
+//*******************************************************************************
+/*static*/
+VOID DECLSPEC_NORETURN
+MethodTableBuilder::BuildMethodTableThrowException(
+ HRESULT hr,
+ const bmtErrorInfo & bmtError)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ LPCUTF8 pszClassName, pszNameSpace;
+ if (FAILED(bmtError.pModule->GetMDImport()->GetNameOfTypeDef(bmtError.cl, &pszClassName, &pszNameSpace)))
+ {
+ pszClassName = pszNameSpace = "Invalid TypeDef record";
+ }
+
+ if (IsNilToken(bmtError.dMethodDefInError) && (bmtError.szMethodNameForError == NULL))
+ {
+ if (hr == E_OUTOFMEMORY)
+ {
+ COMPlusThrowOM();
+ }
+ else
+ bmtError.pModule->GetAssembly()->ThrowTypeLoadException(
+ pszNameSpace, pszClassName, bmtError.resIDWhy);
+ }
+ else
+ {
+ LPCUTF8 szMethodName;
+ if (bmtError.szMethodNameForError == NULL)
+ {
+ if (FAILED((bmtError.pModule->GetMDImport())->GetNameOfMethodDef(bmtError.dMethodDefInError, &szMethodName)))
+ {
+ szMethodName = "Invalid MethodDef record";
+ }
+ }
+ else
+ {
+ szMethodName = bmtError.szMethodNameForError;
+ }
+
+ bmtError.pModule->GetAssembly()->ThrowTypeLoadException(
+ pszNameSpace, pszClassName, szMethodName, bmtError.resIDWhy);
+ }
+} // MethodTableBuilder::BuildMethodTableThrowException
+
+//*******************************************************************************
+void MethodTableBuilder::SetBMTData(
+ LoaderAllocator *bmtAllocator,
+ bmtErrorInfo *bmtError,
+ bmtProperties *bmtProp,
+ bmtVtable *bmtVT,
+ bmtParentInfo *bmtParent,
+ bmtInterfaceInfo *bmtInterface,
+ bmtMetaDataInfo *bmtMetaData,
+ bmtMethodInfo *bmtMethod,
+ bmtMethAndFieldDescs *bmtMFDescs,
+ bmtFieldPlacement *bmtFP,
+ bmtInternalInfo *bmtInternal,
+ bmtGCSeriesInfo *bmtGCSeries,
+ bmtMethodImplInfo *bmtMethodImpl,
+ const bmtGenericsInfo *bmtGenerics,
+ bmtEnumFieldInfo *bmtEnumFields,
+ bmtContextStaticInfo *bmtCSInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+ this->bmtAllocator = bmtAllocator;
+ this->bmtError = bmtError;
+ this->bmtProp = bmtProp;
+ this->bmtVT = bmtVT;
+ this->bmtParent = bmtParent;
+ this->bmtInterface = bmtInterface;
+ this->bmtMetaData = bmtMetaData;
+ this->bmtMethod = bmtMethod;
+ this->bmtMFDescs = bmtMFDescs;
+ this->bmtFP = bmtFP;
+ this->bmtInternal = bmtInternal;
+ this->bmtGCSeries = bmtGCSeries;
+ this->bmtMethodImpl = bmtMethodImpl;
+ this->bmtGenerics = bmtGenerics;
+ this->bmtEnumFields = bmtEnumFields;
+ this->bmtCSInfo = bmtCSInfo;
+}
+
+//*******************************************************************************
+// Used by MethodTableBuilder
+
+MethodTableBuilder::bmtRTType *
+MethodTableBuilder::CreateTypeChain(
+ MethodTable * pMT,
+ const Substitution & subst)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(GetStackingAllocator()));
+ PRECONDITION(CheckPointer(pMT));
+ } CONTRACTL_END;
+
+ pMT = pMT->GetCanonicalMethodTable();
+
+ bmtRTType * pType = new (GetStackingAllocator())
+ bmtRTType(subst, pMT);
+
+ MethodTable * pMTParent = pMT->GetParentMethodTable();
+ if (pMTParent != NULL)
+ {
+ pType->SetParentType(
+ CreateTypeChain(
+ pMTParent,
+ pMT->GetSubstitutionForParent(&pType->GetSubstitution())));
+ }
+
+ return pType;
+}
+
+//*******************************************************************************
+/* static */
+MethodTableBuilder::bmtRTType *
+MethodTableBuilder::bmtRTType::FindType(
+ bmtRTType * pType,
+ MethodTable * pTargetMT)
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pType));
+ PRECONDITION(CheckPointer(pTargetMT));
+ } CONTRACTL_END;
+
+ pTargetMT = pTargetMT->GetCanonicalMethodTable();
+ while (pType != NULL &&
+ pType->GetMethodTable()->GetCanonicalMethodTable() != pTargetMT)
+ {
+ pType = pType->GetParentType();
+ }
+
+ return pType;
+}
+
+//*******************************************************************************
+mdTypeDef
+MethodTableBuilder::bmtRTType::GetEnclosingTypeToken() const
+{
+ STANDARD_VM_CONTRACT;
+
+ mdTypeDef tok = mdTypeDefNil;
+
+ if (IsNested())
+ { // This is guaranteed to succeed because the EEClass would not have been
+ // set as nested unless a valid token was stored in metadata.
+ if (FAILED(GetModule()->GetMDImport()->GetNestedClassProps(
+ GetTypeDefToken(), &tok)))
+ {
+ return mdTypeDefNil;
+ }
+ }
+
+ return tok;
+}
+
+//*******************************************************************************
+/*static*/ bool
+MethodTableBuilder::MethodSignature::NamesEqual(
+ const MethodSignature & sig1,
+ const MethodSignature & sig2)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (sig1.GetNameHash() != sig2.GetNameHash())
+ {
+ return false;
+ }
+
+ if (strcmp(sig1.GetName(), sig2.GetName()) != 0)
+ {
+ return false;
+ }
+
+ return true;
+}
+
+//*******************************************************************************
+/*static*/ bool
+MethodTableBuilder::MethodSignature::SignaturesEquivalent(
+ const MethodSignature & sig1,
+ const MethodSignature & sig2)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef FEATURE_LEGACYNETCF
+ BaseDomain::AppDomainCompatMode compatMode1 = sig1.GetModule()->GetDomain()->GetAppDomainCompatMode();
+ BaseDomain::AppDomainCompatMode compatMode2 = sig2.GetModule()->GetDomain()->GetAppDomainCompatMode();
+
+ if ((compatMode1 == BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8) || (compatMode2 == BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8))
+ {
+ return S_OK == MetaSig::CompareMethodSigsNT(
+ sig1.GetSignature(), static_cast<DWORD>(sig1.GetSignatureLength()), sig1.GetModule(), &sig1.GetSubstitution(),
+ sig2.GetSignature(), static_cast<DWORD>(sig2.GetSignatureLength()), sig2.GetModule(), &sig2.GetSubstitution());
+ }
+ else
+#endif
+ {
+ return !!MetaSig::CompareMethodSigs(
+ sig1.GetSignature(), static_cast<DWORD>(sig1.GetSignatureLength()), sig1.GetModule(), &sig1.GetSubstitution(),
+ sig2.GetSignature(), static_cast<DWORD>(sig2.GetSignatureLength()), sig2.GetModule(), &sig2.GetSubstitution());
+ }
+}
+
+//*******************************************************************************
+/*static*/ bool
+MethodTableBuilder::MethodSignature::SignaturesExactlyEqual(
+ const MethodSignature & sig1,
+ const MethodSignature & sig2)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef FEATURE_LEGACYNETCF
+ BaseDomain::AppDomainCompatMode compatMode1 = sig1.GetModule()->GetDomain()->GetAppDomainCompatMode();
+ BaseDomain::AppDomainCompatMode compatMode2 = sig2.GetModule()->GetDomain()->GetAppDomainCompatMode();
+
+ if ((compatMode1 == BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8) || (compatMode2 == BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8))
+ {
+ TokenPairList newVisited = TokenPairList::AdjustForTypeEquivalenceForbiddenScope(NULL);
+ return S_OK == MetaSig::CompareMethodSigsNT(
+ sig1.GetSignature(), static_cast<DWORD>(sig1.GetSignatureLength()), sig1.GetModule(), &sig1.GetSubstitution(),
+ sig2.GetSignature(), static_cast<DWORD>(sig2.GetSignatureLength()), sig2.GetModule(), &sig2.GetSubstitution(),
+ &newVisited);
+ }
+ else
+#endif
+ {
+ TokenPairList newVisited = TokenPairList::AdjustForTypeEquivalenceForbiddenScope(NULL);
+ return !!MetaSig::CompareMethodSigs(
+ sig1.GetSignature(), static_cast<DWORD>(sig1.GetSignatureLength()), sig1.GetModule(), &sig1.GetSubstitution(),
+ sig2.GetSignature(), static_cast<DWORD>(sig2.GetSignatureLength()), sig2.GetModule(), &sig2.GetSubstitution(),
+ &newVisited);
+ }
+}
+
+//*******************************************************************************
+bool
+MethodTableBuilder::MethodSignature::Equivalent(
+ const MethodSignature &rhs) const
+{
+ STANDARD_VM_CONTRACT;
+
+ return NamesEqual(*this, rhs) && SignaturesEquivalent(*this, rhs);
+}
+
+//*******************************************************************************
+bool
+MethodTableBuilder::MethodSignature::ExactlyEqual(
+ const MethodSignature &rhs) const
+{
+ STANDARD_VM_CONTRACT;
+
+ return NamesEqual(*this, rhs) && SignaturesExactlyEqual(*this, rhs);
+}
+
+//*******************************************************************************
+void
+MethodTableBuilder::MethodSignature::GetMethodAttributes() const
+{
+ STANDARD_VM_CONTRACT;
+
+ IMDInternalImport * pIMD = GetModule()->GetMDImport();
+ if (TypeFromToken(GetToken()) == mdtMethodDef)
+ {
+ DWORD cSig;
+ if (FAILED(pIMD->GetNameAndSigOfMethodDef(GetToken(), &m_pSig, &cSig, &m_szName)))
+ { // We have empty name or signature on error, do nothing
+ }
+ m_cSig = static_cast<size_t>(cSig);
+ }
+ else
+ {
+ CONSISTENCY_CHECK(TypeFromToken(m_tok) == mdtMemberRef);
+ DWORD cSig;
+ if (FAILED(pIMD->GetNameAndSigOfMemberRef(GetToken(), &m_pSig, &cSig, &m_szName)))
+ { // We have empty name or signature on error, do nothing
+ }
+ m_cSig = static_cast<size_t>(cSig);
+ }
+}
+
+//*******************************************************************************
+UINT32
+MethodTableBuilder::MethodSignature::GetNameHash() const
+{
+ STANDARD_VM_CONTRACT;
+
+ CheckGetMethodAttributes();
+
+ if (m_nameHash == INVALID_NAME_HASH)
+ {
+ ULONG nameHash = HashStringA(GetName());
+ if (nameHash == INVALID_NAME_HASH)
+ {
+ nameHash /= 2;
+ }
+ m_nameHash = nameHash;
+ }
+
+ return m_nameHash;
+}
+
+//*******************************************************************************
+MethodTableBuilder::bmtMDType::bmtMDType(
+ bmtRTType * pParentType,
+ Module * pModule,
+ mdTypeDef tok,
+ const SigTypeContext & sigContext)
+ : m_pParentType(pParentType),
+ m_pModule(pModule),
+ m_tok(tok),
+ m_enclTok(mdTypeDefNil),
+ m_sigContext(sigContext),
+ m_subst(),
+ m_dwAttrs(0),
+ m_pMT(NULL)
+{
+ STANDARD_VM_CONTRACT;
+
+ IfFailThrow(m_pModule->GetMDImport()->GetTypeDefProps(m_tok, &m_dwAttrs, NULL));
+
+ HRESULT hr = m_pModule->GetMDImport()->GetNestedClassProps(m_tok, &m_enclTok);
+ if (FAILED(hr))
+ {
+ if (hr != CLDB_E_RECORD_NOTFOUND)
+ {
+ ThrowHR(hr);
+ }
+ // Just in case GetNestedClassProps sets the out param to some other value
+ m_enclTok = mdTypeDefNil;
+ }
+}
+
+//*******************************************************************************
+MethodTableBuilder::bmtRTMethod::bmtRTMethod(
+ bmtRTType * pOwningType,
+ MethodDesc * pMD)
+ : m_pOwningType(pOwningType),
+ m_pMD(pMD),
+ m_methodSig(pMD->GetModule(),
+ pMD->GetMemberDef(),
+ &pOwningType->GetSubstitution())
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+}
+
+//*******************************************************************************
+MethodTableBuilder::bmtMDMethod::bmtMDMethod(
+ bmtMDType * pOwningType,
+ mdMethodDef tok,
+ DWORD dwDeclAttrs,
+ DWORD dwImplAttrs,
+ DWORD dwRVA,
+ METHOD_TYPE type,
+ METHOD_IMPL_TYPE implType)
+ : m_pOwningType(pOwningType),
+ m_dwDeclAttrs(dwDeclAttrs),
+ m_dwImplAttrs(dwImplAttrs),
+ m_dwRVA(dwRVA),
+ m_type(type),
+ m_implType(implType),
+ m_methodSig(pOwningType->GetModule(),
+ tok,
+ &pOwningType->GetSubstitution()),
+ m_pMD(NULL),
+ m_pUnboxedMD(NULL),
+ m_slotIndex(INVALID_SLOT_INDEX),
+ m_unboxedSlotIndex(INVALID_SLOT_INDEX)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ }
+//*******************************************************************************
+void
+MethodTableBuilder::ImportParentMethods()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (!HasParent())
+ { // If there's no parent, there's no methods to import
+ return;
+ }
+
+ SLOT_INDEX numMethods = static_cast<SLOT_INDEX>
+ (GetParentMethodTable()->GetNumMethods());
+
+ bmtParent->pSlotTable = new (GetStackingAllocator())
+ bmtMethodSlotTable(numMethods, GetStackingAllocator());
+
+ MethodTable::MethodIterator it(GetParentMethodTable());
+ for (;it.IsValid(); it.Next())
+ {
+ MethodDesc * pDeclDesc = NULL;
+ MethodTable * pDeclMT = NULL;
+ MethodDesc * pImplDesc = NULL;
+ MethodTable * pImplMT = NULL;
+
+ if (it.IsVirtual())
+ {
+ pDeclDesc = it.GetDeclMethodDesc();
+ pDeclMT = pDeclDesc->GetMethodTable();
+ pImplDesc = it.GetMethodDesc();
+ pImplMT = pImplDesc->GetMethodTable();
+ }
+ else
+ {
+ pDeclDesc = pImplDesc = it.GetMethodDesc();
+ pDeclMT = pImplMT = it.GetMethodDesc()->GetMethodTable();
+ }
+
+ CONSISTENCY_CHECK(CheckPointer(pDeclDesc));
+ CONSISTENCY_CHECK(CheckPointer(pImplDesc));
+
+ // Create and assign to each slot
+ bmtMethodSlot newSlot;
+ newSlot.Decl() = new (GetStackingAllocator())
+ bmtRTMethod(bmtRTType::FindType(GetParentType(), pDeclMT), pDeclDesc);
+ if (pDeclDesc == pImplDesc)
+ {
+ newSlot.Impl() = newSlot.Decl();
+ }
+ else
+ {
+ newSlot.Impl() = new (GetStackingAllocator())
+ bmtRTMethod(bmtRTType::FindType(GetParentType(), pImplMT), pImplDesc);
+ }
+
+ if (!bmtParent->pSlotTable->AddMethodSlot(newSlot))
+ BuildMethodTableThrowException(IDS_CLASSLOAD_TOO_MANY_METHODS);
+ }
+}
+
+//*******************************************************************************
+void
+MethodTableBuilder::CopyParentVtable()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (!HasParent())
+ {
+ return;
+ }
+
+ for (bmtParentInfo::Iterator it = bmtParent->IterateSlots();
+ !it.AtEnd() && it.CurrentIndex() < GetParentMethodTable()->GetNumVirtuals();
+ ++it)
+ {
+ if (!bmtVT->pSlotTable->AddMethodSlot(*it))
+ BuildMethodTableThrowException(IDS_CLASSLOAD_TOO_MANY_METHODS);
+ ++bmtVT->cVirtualSlots;
+ ++bmtVT->cTotalSlots;
+ }
+}
+
+//*******************************************************************************
+// Determine if this is the special SIMD type System.Numerics.Vector<T>, whose
+// size is determined dynamically based on the hardware and the presence of JIT
+// support.
+// If so:
+// - Update the NumInstanceFieldBytes on the bmtFieldPlacement.
+// - Update the m_cbNativeSize and m_cbManagedSize if HasLayout() is true.
+// Return a BOOL result to indicate whether the size has been updated.
+//
+// Will throw IDS_EE_SIMD_NGEN_DISALLOWED if the type is System.Numerics.Vector`1
+// and this is an ngen compilation process.
+//
+BOOL MethodTableBuilder::CheckIfSIMDAndUpdateSize()
+{
+ STANDARD_VM_CONTRACT;
+
+#if defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE)
+ if (!GetAssembly()->IsSIMDVectorAssembly())
+ return false;
+
+ if (bmtFP->NumInstanceFieldBytes != 16)
+ return false;
+
+ LPCUTF8 className;
+ LPCUTF8 nameSpace;
+ if (FAILED(GetMDImport()->GetNameOfTypeDef(bmtInternal->pType->GetTypeDefToken(), &className, &nameSpace)))
+ return false;
+
+ if (strcmp(className, "Vector`1") != 0 || strcmp(nameSpace, "System.Numerics") != 0)
+ return false;
+
+ if (IsCompilationProcess())
+ {
+ COMPlusThrow(kTypeLoadException, IDS_EE_SIMD_NGEN_DISALLOWED);
+ }
+
+ if (!TargetHasAVXSupport())
+ return false;
+
+ EEJitManager *jitMgr = ExecutionManager::GetEEJitManager();
+ if (jitMgr->LoadJIT())
+ {
+ DWORD cpuCompileFlags = jitMgr->GetCPUCompileFlags();
+ if ((cpuCompileFlags & CORJIT_FLG_FEATURE_SIMD) != 0)
+ {
+ unsigned intrinsicSIMDVectorLength = jitMgr->m_jit->getMaxIntrinsicSIMDVectorLength(cpuCompileFlags);
+ if (intrinsicSIMDVectorLength != 0)
+ {
+ bmtFP->NumInstanceFieldBytes = intrinsicSIMDVectorLength;
+ if (HasLayout())
+ {
+ GetLayoutInfo()->m_cbNativeSize = intrinsicSIMDVectorLength;
+ GetLayoutInfo()->m_cbManagedSize = intrinsicSIMDVectorLength;
+ }
+ return true;
+ }
+ }
+ }
+#endif
+ return false;
+}
+
+//*******************************************************************************
+void
+MethodTableBuilder::bmtInterfaceEntry::CreateSlotTable(
+ StackingAllocator * pStackingAllocator)
+{
+ STANDARD_VM_CONTRACT;
+
+ CONSISTENCY_CHECK(m_pImplTable == NULL);
+
+ SLOT_INDEX cSlots = (SLOT_INDEX)GetInterfaceType()->GetMethodTable()->GetNumVirtuals();
+ bmtInterfaceSlotImpl * pST = new (pStackingAllocator) bmtInterfaceSlotImpl[cSlots];
+
+ MethodTable::MethodIterator it(GetInterfaceType()->GetMethodTable());
+ for (; it.IsValid(); it.Next())
+ {
+ if (!it.IsVirtual())
+ {
+ break;
+ }
+
+ bmtRTMethod * pCurMethod = new (pStackingAllocator)
+ bmtRTMethod(GetInterfaceType(), it.GetDeclMethodDesc());
+
+ CONSISTENCY_CHECK(m_cImplTable == it.GetSlotNumber());
+ pST[m_cImplTable++] = bmtInterfaceSlotImpl(pCurMethod, INVALID_SLOT_INDEX);
+ }
+
+ m_pImplTable = pST;
+}
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#endif // _PREFAST_
+//---------------------------------------------------------------------------------------
+//
+// Builds the method table, allocates MethodDesc, handles overloaded members, attempts to compress
+// interface storage. All dependent classes must already be resolved!
+//
+MethodTable *
+MethodTableBuilder::BuildMethodTableThrowing(
+ LoaderAllocator * pAllocator,
+ Module * pLoaderModule,
+ Module * pModule,
+ mdToken cl,
+ BuildingInterfaceInfo_t * pBuildingInterfaceList,
+ const LayoutRawFieldInfo * pLayoutRawFieldInfos,
+ MethodTable * pParentMethodTable,
+ const bmtGenericsInfo * bmtGenericsInfo,
+ SigPointer parentInst,
+ WORD cBuildingInterfaceList)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(GetHalfBakedClass()));
+ PRECONDITION(CheckPointer(bmtGenericsInfo));
+ }
+ CONTRACTL_END;
+
+ pModule->EnsureLibraryLoaded();
+
+ // The following structs, defined as private members of MethodTableBuilder, contain the necessary local
+ // parameters needed for BuildMethodTable Look at the struct definitions for a detailed list of all
+ // parameters available to BuildMethodTableThrowing.
+
+ SetBMTData(
+ pAllocator,
+ new (GetStackingAllocator()) bmtErrorInfo(),
+ new (GetStackingAllocator()) bmtProperties(),
+ new (GetStackingAllocator()) bmtVtable(),
+ new (GetStackingAllocator()) bmtParentInfo(),
+ new (GetStackingAllocator()) bmtInterfaceInfo(),
+ new (GetStackingAllocator()) bmtMetaDataInfo(),
+ new (GetStackingAllocator()) bmtMethodInfo(),
+ new (GetStackingAllocator()) bmtMethAndFieldDescs(),
+ new (GetStackingAllocator()) bmtFieldPlacement(),
+ new (GetStackingAllocator()) bmtInternalInfo(),
+ new (GetStackingAllocator()) bmtGCSeriesInfo(),
+ new (GetStackingAllocator()) bmtMethodImplInfo(),
+ bmtGenericsInfo,
+ new (GetStackingAllocator()) bmtEnumFieldInfo(pModule->GetMDImport()),
+ new (GetStackingAllocator()) bmtContextStaticInfo());
+
+ //Initialize structs
+
+ bmtError->resIDWhy = IDS_CLASSLOAD_GENERAL; // Set the reason and the offending method def. If the method information
+ bmtError->pThrowable = NULL;
+ bmtError->pModule = pModule;
+ bmtError->cl = cl;
+
+ bmtInternal->pInternalImport = pModule->GetMDImport();
+ bmtInternal->pModule = pModule;
+
+ bmtInternal->pParentMT = pParentMethodTable;
+
+ // Create the chain of bmtRTType for the parent types. This allows all imported
+ // parent methods to be associated with their declaring types, and as such it is
+ // easy to access the appropriate Substitution when comparing signatures.
+ bmtRTType * pParent = NULL;
+ if (pParentMethodTable != NULL)
+ {
+ Substitution * pParentSubst =
+ new (GetStackingAllocator()) Substitution(pModule, parentInst, NULL);
+ pParent = CreateTypeChain(pParentMethodTable, *pParentSubst);
+ }
+
+ // Now create the bmtMDType for the type being built.
+ bmtInternal->pType = new (GetStackingAllocator())
+ bmtMDType(pParent, pModule, cl, bmtGenericsInfo->typeContext);
+
+ // put the interior stack probe after all the stack-allocted goop above. We check compare our this pointer to the SP on
+ // the dtor to determine if we are being called on an EH path or not.
+ INTERIOR_STACK_PROBE_FOR(GetThread(), 8);
+
+ // If not NULL, it means there are some by-value fields, and this contains an entry for each inst
+
+#ifdef _DEBUG
+ // Set debug class name string for easier debugging.
+ LPCUTF8 className;
+ LPCUTF8 nameSpace;
+ if (FAILED(GetMDImport()->GetNameOfTypeDef(bmtInternal->pType->GetTypeDefToken(), &className, &nameSpace)))
+ {
+ className = nameSpace = "Invalid TypeDef record";
+ }
+
+ {
+ S_SIZE_T safeLen = S_SIZE_T(sizeof(char))*(S_SIZE_T(strlen(className)) + S_SIZE_T(strlen(nameSpace)) + S_SIZE_T(2));
+ if(safeLen.IsOverflow()) COMPlusThrowHR(COR_E_OVERFLOW);
+
+ size_t len = safeLen.Value();
+ char *name = (char*) AllocateFromHighFrequencyHeap(safeLen);
+ strcpy_s(name, len, nameSpace);
+ if (strlen(nameSpace) > 0) {
+ name[strlen(nameSpace)] = '.';
+ name[strlen(nameSpace) + 1] = '\0';
+ }
+ strcat_s(name, len, className);
+
+ GetHalfBakedClass()->SetDebugClassName(name);
+ }
+
+ if (g_pConfig->ShouldBreakOnClassBuild(className))
+ {
+ CONSISTENCY_CHECK_MSGF(false, ("BreakOnClassBuild: typename '%s' ", className));
+ GetHalfBakedClass()->m_fDebuggingClass = TRUE;
+ }
+
+ LPCUTF8 pszDebugName,pszDebugNamespace;
+ if (FAILED(pModule->GetMDImport()->GetNameOfTypeDef(bmtInternal->pType->GetTypeDefToken(), &pszDebugName, &pszDebugNamespace)))
+ {
+ pszDebugName = pszDebugNamespace = "Invalid TypeDef record";
+ }
+
+ StackSString debugName(SString::Utf8, pszDebugName);
+
+ // If there is an instantiation, update the debug name to include instantiation type names.
+ if (bmtGenerics->HasInstantiation())
+ {
+ StackSString debugName(SString::Utf8, GetDebugClassName());
+ TypeString::AppendInst(debugName, bmtGenerics->GetInstantiation(), TypeString::FormatBasic);
+ StackScratchBuffer buff;
+ const char* pDebugNameUTF8 = debugName.GetUTF8(buff);
+ S_SIZE_T safeLen = S_SIZE_T(strlen(pDebugNameUTF8)) + S_SIZE_T(1);
+ if(safeLen.IsOverflow())
+ COMPlusThrowHR(COR_E_OVERFLOW);
+
+ size_t len = safeLen.Value();
+ char *name = (char*) AllocateFromLowFrequencyHeap(safeLen);
+ strcpy_s(name, len, pDebugNameUTF8);
+ GetHalfBakedClass()->SetDebugClassName(name);
+ pszDebugName = (LPCUTF8)name;
+ }
+
+ LOG((LF_CLASSLOADER, LL_INFO1000, "Loading class \"%s%s%S\" from module \"%ws\" in domain 0x%p %s\n",
+ *pszDebugNamespace ? pszDebugNamespace : "",
+ *pszDebugNamespace ? NAMESPACE_SEPARATOR_STR : "",
+ debugName.GetUnicode(),
+ pModule->GetDebugName(),
+ pModule->GetDomain(),
+ (pModule->IsSystem()) ? "System Domain" : ""
+ ));
+#endif // _DEBUG
+
+ // If this is mscorlib, then don't perform some sanity checks on the layout
+ bmtProp->fNoSanityChecks = ((g_pObjectClass == NULL) || pModule == g_pObjectClass->GetModule()) ||
+#ifdef FEATURE_READYTORUN
+ // No sanity checks for ready-to-run compiled images if possible
+ (pModule->IsReadyToRun() && pModule->GetReadyToRunInfo()->SkipTypeValidation()) ||
+#endif
+ // No sanity checks for real generic instantiations
+ !bmtGenerics->IsTypicalTypeDefinition();
+
+ // Interfaces have a parent class of Object, but we don't really want to inherit all of
+ // Object's virtual methods, so pretend we don't have a parent class - at the bottom of this
+ // function we reset the parent class
+ if (IsInterface())
+ {
+ bmtInternal->pType->SetParentType(NULL);
+ bmtInternal->pParentMT = NULL;
+ }
+
+ unsigned totalDeclaredFieldSize=0;
+
+ // Check to see if the class is a valuetype; but we don't want to mark System.Enum
+ // as a ValueType. To accomplish this, the check takes advantage of the fact
+ // that System.ValueType and System.Enum are loaded one immediately after the
+ // other in that order, and so if the parent MethodTable is System.ValueType and
+ // the System.Enum MethodTable is unset, then we must be building System.Enum and
+ // so we don't mark it as a ValueType.
+ if(HasParent() &&
+ ((g_pEnumClass != NULL && GetParentMethodTable() == g_pValueTypeClass) ||
+ GetParentMethodTable() == g_pEnumClass))
+ {
+ bmtProp->fIsValueClass = true;
+
+ HRESULT hr = GetMDImport()->GetCustomAttributeByName(bmtInternal->pType->GetTypeDefToken(),
+ g_CompilerServicesUnsafeValueTypeAttribute,
+ NULL, NULL);
+ IfFailThrow(hr);
+ if (hr == S_OK)
+ {
+ SetUnsafeValueClass();
+ }
+ }
+
+ // Check to see if the class is an enumeration. No fancy checks like the one immediately
+ // above for value types are necessary here.
+ if(HasParent() && GetParentMethodTable() == g_pEnumClass)
+ {
+ bmtProp->fIsEnum = true;
+
+ // Ensure we don't have generic enums, or at least enums that have a
+ // different number of type parameters from their enclosing class.
+ // The goal is to ensure that the enum's values can't depend on the
+ // type parameters in any way. And we don't see any need for an
+ // enum to have additional type parameters.
+ if (bmtGenerics->GetNumGenericArgs() != 0)
+ {
+ // Nested enums can have generic type parameters from their enclosing class.
+ // CLS rules require type parameters to be propogated to nested types.
+ // Note that class G<T> { enum E { } } will produce "G`1+E<T>".
+ // We want to disallow class G<T> { enum E<T, U> { } }
+ // Perhaps the IL equivalent of class G<T> { enum E { } } should be legal.
+ if (!IsNested())
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_ENUM_EXTRA_GENERIC_TYPE_PARAM);
+ }
+
+ mdTypeDef tdEnclosing = mdTypeDefNil;
+ HRESULT hr = GetMDImport()->GetNestedClassProps(GetCl(), &tdEnclosing);
+ if (FAILED(hr))
+ ThrowHR(hr, BFA_UNABLE_TO_GET_NESTED_PROPS);
+
+ HENUMInternalHolder hEnumGenericPars(GetMDImport());
+ if (FAILED(hEnumGenericPars.EnumInitNoThrow(mdtGenericParam, tdEnclosing)))
+ {
+ GetAssembly()->ThrowTypeLoadException(GetMDImport(), tdEnclosing, IDS_CLASSLOAD_BADFORMAT);
+ }
+
+ if (hEnumGenericPars.EnumGetCount() != bmtGenerics->GetNumGenericArgs())
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_ENUM_EXTRA_GENERIC_TYPE_PARAM);
+ }
+ }
+ }
+
+
+#ifdef FEATURE_COMINTEROP
+
+ // Com Import classes are special. These types must derive from System.Object,
+ // and we then substitute the parent with System._ComObject.
+ if (IsComImport() && !IsEnum() && !IsInterface() && !IsValueClass() && !IsDelegate())
+ {
+ // ComImport classes must either extend from Object or be a WinRT class
+ // that extends from another WinRT class (and so form a chain of WinRT classes
+ // that ultimately extend from object).
+ MethodTable* pMTParent = GetParentMethodTable();
+ if ((pMTParent == NULL) || !(
+ // is the parent valid?
+ (pMTParent == g_pObjectClass) ||
+ (GetHalfBakedClass()->IsProjectedFromWinRT() && pMTParent->IsProjectedFromWinRT())
+ ))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_CANTEXTEND);
+ }
+
+ if (HasLayout())
+ {
+ // ComImport classes cannot have layout information.
+ BuildMethodTableThrowException(IDS_CLASSLOAD_COMIMPCANNOTHAVELAYOUT);
+ }
+
+ if (pMTParent == g_pObjectClass)
+ {
+ // ComImport classes ultimately extend from our __ComObject or RuntimeClass class
+ MethodTable *pCOMMT = NULL;
+ if (GetHalfBakedClass()->IsProjectedFromWinRT())
+ pCOMMT = g_pBaseRuntimeClass;
+ else
+ pCOMMT = g_pBaseCOMObject;
+
+ _ASSERTE(pCOMMT);
+
+ // We could have had COM interop classes derive from System._ComObject,
+ // but instead we have them derive from System.Object, have them set the
+ // ComImport bit in the type attributes, and then we swap out the parent
+ // type under the covers.
+ bmtInternal->pType->SetParentType(CreateTypeChain(pCOMMT, Substitution()));
+ bmtInternal->pParentMT = pCOMMT;
+ }
+
+ // if the current class is imported
+ bmtProp->fIsComObjectType = true;
+ }
+
+ if (GetHalfBakedClass()->IsProjectedFromWinRT() && IsValueClass() && !IsEnum())
+ {
+ // WinRT structures must have sequential layout
+ if (!GetHalfBakedClass()->HasSequentialLayout())
+ {
+ BuildMethodTableThrowException(IDS_EE_STRUCTLAYOUT_WINRT);
+ }
+ }
+
+ // Check for special COM interop types.
+ CheckForSpecialTypes();
+
+ CheckForTypeEquivalence(cBuildingInterfaceList, pBuildingInterfaceList);
+
+ if (HasParent())
+ { // Types that inherit from com object types are themselves com object types.
+ if (GetParentMethodTable()->IsComObjectType())
+ {
+ // if the parent class is of ComObjectType
+ // so is the child
+ bmtProp->fIsComObjectType = true;
+ }
+
+#ifdef FEATURE_TYPEEQUIVALENCE
+ // If your parent is type equivalent then so are you
+ if (GetParentMethodTable()->HasTypeEquivalence())
+ {
+ bmtProp->fHasTypeEquivalence = true;
+ }
+#endif
+ }
+
+#endif // FEATURE_COMINTEROP
+
+ if (!HasParent() && !IsInterface())
+ {
+ if(g_pObjectClass != NULL)
+ {
+ if(!IsGlobalClass())
+ {
+ // Non object derived types that are not the global class are prohibited by spec
+ BuildMethodTableThrowException(IDS_CLASSLOAD_PARENTNULL);
+ }
+ }
+ }
+
+
+
+ // Set the contextful or marshalbyref flag if necessary
+ SetContextfulOrByRef();
+
+ // NOTE: This appears to be the earliest point during class loading that other classes MUST be loaded
+ // resolve unresolved interfaces, determine an upper bound on the size of the interface map,
+ // and determine the size of the largest interface (in # slots)
+ ResolveInterfaces(cBuildingInterfaceList, pBuildingInterfaceList);
+
+ // Enumerate this class's methodImpls
+ EnumerateMethodImpls();
+
+ // Enumerate this class's methods and fields
+ EnumerateClassMethods();
+ ValidateMethods();
+
+ EnumerateClassFields();
+
+ // Import the slots of the parent for use in placing this type's methods.
+ ImportParentMethods();
+
+ // This will allocate the working versions of the VTable and NonVTable in bmtVT
+ AllocateWorkingSlotTables();
+
+ // Allocate a MethodDesc* for each method (needed later when doing interfaces), and a FieldDesc* for each field
+ AllocateFieldDescs();
+
+ // Copy the parent's vtable into the current type's vtable
+ CopyParentVtable();
+
+ bmtVT->pDispatchMapBuilder = new (GetStackingAllocator()) DispatchMapBuilder(GetStackingAllocator());
+
+ // Determine vtable placement for each member in this class
+ PlaceVirtualMethods();
+ PlaceNonVirtualMethods();
+
+ // Allocate MethodDescs (expects methods placed methods)
+ AllocAndInitMethodDescs();
+
+ //
+ // If we are a class, then there may be some unplaced vtable methods (which are by definition
+ // interface methods, otherwise they'd already have been placed). Place as many unplaced methods
+ // as possible, in the order preferred by interfaces. However, do not allow any duplicates - once
+ // a method has been placed, it cannot be placed again - if we are unable to neatly place an interface,
+ // create duplicate slots for it starting at dwCurrentDuplicateVtableSlot. Fill out the interface
+ // map for all interfaces as they are placed.
+ //
+ // If we are an interface, then all methods are already placed. Fill out the interface map for
+ // interfaces as they are placed.
+ //
+ if (!IsInterface())
+ {
+ ComputeInterfaceMapEquivalenceSet();
+
+ PlaceInterfaceMethods();
+
+ ProcessMethodImpls();
+ ProcessInexactMethodImpls();
+ PlaceMethodImpls();
+
+ if (!bmtProp->fNoSanityChecks)
+ {
+ // Now that interface method implementation have been fully resolved,
+ // we need to make sure that type constraints are also met.
+ ValidateInterfaceMethodConstraints();
+ }
+ }
+
+ // Verify that we have not overflowed the number of slots.
+ if (!FitsInU2((UINT64)bmtVT->pSlotTable->GetSlotCount()))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_TOO_MANY_METHODS);
+ }
+
+ // ensure we didn't overflow the temporary vtable
+ _ASSERTE(bmtVT->pSlotTable->GetSlotCount() <= bmtVT->dwMaxVtableSize);
+
+ // Allocate and initialize the dictionary for the type. This will be filled out later
+ // with the final values.
+ AllocAndInitDictionary();
+
+ ////////////////////////////////////////////////////////////////////////////////////////////////
+ // Fields
+ //
+
+ // We decide here if we need a dynamic entry for our statics. We need it here because
+ // the offsets of our fields will depend on this. For the dynamic case (which requires
+ // an extra indirection (indirect depending of methodtable) we'll allocate the slot
+ // in setupmethodtable
+ if (((pModule->IsReflection() || bmtGenerics->HasInstantiation() || !pModule->IsStaticStoragePrepared(cl)) &&
+ (bmtVT->GetClassCtorSlotIndex() != INVALID_SLOT_INDEX || bmtEnumFields->dwNumStaticFields !=0))
+#ifdef EnC_SUPPORTED
+ // Classes in modules that have been edited (would do on class level if there were a
+ // way to tell if the class had been edited) also have dynamic statics as the number
+ // of statics might have changed, so can't use the static module-wide storage
+ || (pModule->IsEditAndContinueEnabled() &&
+ ((EditAndContinueModule*)pModule)->GetApplyChangesCount() > CorDB_DEFAULT_ENC_FUNCTION_VERSION)
+#endif // EnC_SUPPORTED
+ )
+ {
+ // We will need a dynamic id
+ bmtProp->fDynamicStatics = true;
+
+ if (bmtGenerics->HasInstantiation())
+ {
+ bmtProp->fGenericsStatics = true;
+ }
+ }
+
+ // If not NULL, it means there are some by-value fields, and this contains an entry for each instance or static field,
+ // which is NULL if not a by value field, and points to the EEClass of the field if a by value field. Instance fields
+ // come first, statics come second.
+ MethodTable ** pByValueClassCache = NULL;
+
+ // Go thru all fields and initialize their FieldDescs.
+ InitializeFieldDescs(GetApproxFieldDescListRaw(), pLayoutRawFieldInfos, bmtInternal, bmtGenerics,
+ bmtMetaData, bmtEnumFields, bmtError,
+ &pByValueClassCache, bmtMFDescs, bmtFP, bmtCSInfo,
+ &totalDeclaredFieldSize);
+
+ // Place regular static fields
+ PlaceRegularStaticFields();
+
+ // Place thread static fields
+ PlaceThreadStaticFields();
+
+ LOG((LF_CODESHARING,
+ LL_INFO10000,
+ "Placing %d statics (%d handles) for class %s.\n",
+ GetNumStaticFields(), GetNumHandleRegularStatics() + GetNumHandleThreadStatics(),
+ pszDebugName));
+
+ if (IsBlittable() || IsManagedSequential())
+ {
+ bmtFP->NumGCPointerSeries = 0;
+ bmtFP->NumInstanceGCPointerFields = 0;
+
+ _ASSERTE(HasLayout());
+
+ bmtFP->NumInstanceFieldBytes = IsBlittable() ? GetLayoutInfo()->m_cbNativeSize
+ : GetLayoutInfo()->m_cbManagedSize;
+
+ // For simple Blittable types we still need to check if they have any overlapping
+ // fields and call the method SetHasOverLayedFields() when they are detected.
+ //
+ if (HasExplicitFieldOffsetLayout())
+ {
+ _ASSERTE(!bmtGenerics->fContainsGenericVariables); // A simple Blittable type can't ever be an open generic type.
+ HandleExplicitLayout(pByValueClassCache);
+ }
+ }
+ else
+ {
+ _ASSERTE(!IsBlittable());
+ // HandleExplicitLayout fails for the GenericTypeDefinition when
+ // it will succeed for some particular instantiations.
+ // Thus we only do explicit layout for real instantiations, e.g. C<int>, not
+ // the open types such as the GenericTypeDefinition C<!0> or any
+ // of the "fake" types involving generic type variables which are
+ // used for reflection and verification, e.g. C<List<!0>>.
+ //
+ if (!bmtGenerics->fContainsGenericVariables && HasExplicitFieldOffsetLayout())
+ {
+ HandleExplicitLayout(pByValueClassCache);
+ }
+ else
+ {
+ // Place instance fields
+ PlaceInstanceFields(pByValueClassCache);
+ }
+ }
+
+ if (CheckIfSIMDAndUpdateSize())
+ {
+ totalDeclaredFieldSize = bmtFP->NumInstanceFieldBytes;
+ }
+
+ // We enforce that all value classes have non-zero size
+ if (IsValueClass() && bmtFP->NumInstanceFieldBytes == 0)
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_ZEROSIZE);
+ }
+
+ if (bmtFP->fHasSelfReferencingStaticValueTypeField_WithRVA)
+ { // Verify self-referencing statics with RVA (now when the ValueType size is known)
+ VerifySelfReferencingStaticValueTypeFields_WithRVA(pByValueClassCache);
+ }
+
+#ifdef FEATURE_REMOTING
+ // If the class is serializable we scan it for VTS (Version Tolerant
+ // Serialization) event methods or NotSerialized or OptionalField
+ // fields. Any such info found will be attached to the method as
+ // optional data later.
+ if (IsTdSerializable(GetAttrClass()))
+ {
+ ScanTypeForVtsInfo();
+ }
+#endif // FEATURE_REMOTING
+
+ // Now setup the method table
+
+#ifdef FEATURE_PREJIT
+ Module *pComputedPZM = pLoaderModule;
+
+ if (bmtGenerics->GetNumGenericArgs() > 0)
+ {
+ pComputedPZM = Module::ComputePreferredZapModule(pModule, bmtGenerics->GetInstantiation());
+ }
+
+ SetupMethodTable2(pLoaderModule, pComputedPZM);
+#else // FEATURE_PREJIT
+ SetupMethodTable2(pLoaderModule);
+#endif // FEATURE_PREJIT
+
+ MethodTable * pMT = GetHalfBakedMethodTable();
+
+#ifdef FEATURE_64BIT_ALIGNMENT
+ if (GetHalfBakedClass()->IsAlign8Candidate())
+ pMT->SetRequiresAlign8();
+#endif
+
+ if (bmtGenerics->pVarianceInfo != NULL)
+ {
+ pMT->SetHasVariance();
+ }
+
+ if (bmtFP->NumRegularStaticGCBoxedFields != 0)
+ {
+ pMT->SetHasBoxedRegularStatics();
+ }
+
+ if (IsValueClass())
+ {
+ if (bmtFP->NumInstanceFieldBytes != totalDeclaredFieldSize || HasOverLayedField())
+ GetHalfBakedClass()->SetIsNotTightlyPacked();
+
+#ifdef FEATURE_HFA
+ CheckForHFA(pByValueClassCache);
+#endif
+ }
+
+#ifdef FEATURE_HFA
+ if (HasLayout())
+ {
+ CheckForNativeHFA();
+ }
+#endif
+
+#ifdef _DEBUG
+ pMT->SetDebugClassName(GetDebugClassName());
+#endif
+
+#ifdef FEATURE_COMINTEROP
+ if (IsInterface())
+ {
+ GetCoClassAttribInfo();
+ }
+#endif // FEATURE_COMINTEROP
+
+ if (HasExplicitFieldOffsetLayout())
+ // Perform relevant GC calculations for tdexplicit
+ HandleGCForExplicitLayout();
+ else
+ // Perform relevant GC calculations for value classes
+ HandleGCForValueClasses(pByValueClassCache);
+
+ // GC reqires the series to be sorted.
+ // TODO: fix it so that we emit them in the correct order in the first place.
+ if (pMT->ContainsPointers())
+ {
+ CGCDesc* gcDesc = CGCDesc::GetCGCDescFromMT(pMT);
+ qsort(gcDesc->GetLowestSeries(), (int)gcDesc->GetNumSeries(), sizeof(CGCDescSeries), compareCGCDescSeries);
+ }
+
+ SetFinalizationSemantics();
+
+#if defined(CHECK_APP_DOMAIN_LEAKS) || defined(_DEBUG)
+ // Figure out if we're domain agile..
+ // Note that this checks a bunch of field directly on the class & method table,
+ // so it needs to come late in the game.
+ EEClass::SetAppDomainAgileAttribute(pMT);
+#endif
+
+ // Allocate dynamic slot if necessary
+ if (bmtProp->fDynamicStatics)
+ {
+ if (bmtProp->fGenericsStatics)
+ {
+ FieldDesc* pStaticFieldDescs = NULL;
+
+ if (bmtEnumFields->dwNumStaticFields != 0)
+ {
+ pStaticFieldDescs = pMT->GetApproxFieldDescListRaw() + bmtEnumFields->dwNumInstanceFields;
+ }
+
+ pMT->SetupGenericsStaticsInfo(pStaticFieldDescs);
+ }
+ else
+ {
+ // Get an id for the dynamic class. We store it in the class because
+ // no class that is persisted in ngen should have it (ie, if the class is ngened
+ // The id is stored in an optional field so we need to ensure an optional field descriptor has
+ // been allocated for this EEClass instance.
+ EnsureOptionalFieldsAreAllocated(GetHalfBakedClass(), m_pAllocMemTracker, pAllocator->GetLowFrequencyHeap());
+ SetModuleDynamicID(GetModule()->AllocateDynamicEntry(pMT));
+ }
+ }
+
+ //
+ // if there are context or thread static set the info in the method table optional members
+ //
+#ifdef FEATURE_REMOTING
+ DWORD contextStaticsSize = bmtCSInfo->dwContextStaticsSize;
+ if (contextStaticsSize != 0)
+ {
+ if (!FitsIn<WORD>(contextStaticsSize))
+ {
+ BuildMethodTableThrowException(IDS_EE_TOOMANYFIELDS);
+ }
+
+ // this is responsible for setting the flag and allocation in the loader heap
+ pMT->SetupContextStatics(GetMemTracker(), (WORD)contextStaticsSize);
+ }
+#endif // !FEATURE_REMOTING
+
+ if (!bmtProp->fNoSanityChecks)
+ {
+ // If we have a non-interface class, then do inheritance security
+ // checks on it. The check starts by checking for inheritance
+ // permission demands on the current class. If these first checks
+ // succeeded, then the cached declared method list is scanned for
+ // methods that have inheritance permission demands.
+ VerifyInheritanceSecurity();
+
+ // If this is a type equivalent class, then check to see that security
+ // rules have been properly followed
+ VerifyEquivalenceSecurity();
+ }
+
+ // Check for the RemotingProxy Attribute
+#ifdef FEATURE_REMOTING
+ if (IsContextful())
+ {
+ PREFIX_ASSUME(g_pObjectClass != NULL);
+ // Skip mscorlib marshal-by-ref classes since they all
+ // are assumed to have the default proxy attribute
+ if (pModule != g_pObjectClass->GetModule())
+ {
+ CONTRACT_VIOLATION(LoadsTypeViolation); // This api can cause the ProxyAttribute class to be loaded.
+ CheckForRemotingProxyAttrib();
+ }
+ }
+
+ if (IsContextful() || HasRemotingProxyAttribute())
+ {
+ // Contextful and classes that have a remoting proxy attribute
+ // (whether they are MarshalByRef or ContextFul) always take the slow
+ // path of managed activation
+ pMT->SetRequiresManagedActivation();
+ }
+#endif // FEATURE_REMOTING
+ // structs with GC poitners MUST be pointer sized aligned because the GC assumes it
+ if (IsValueClass() && pMT->ContainsPointers() && (bmtFP->NumInstanceFieldBytes % sizeof(void*) != 0))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+
+ if (IsInterface())
+ {
+ // Reset parent class
+ pMT->SetParentMethodTable (g_pObjectClass);
+ }
+
+#ifdef _DEBUG
+ // Reset the debug method names for BoxedEntryPointStubs
+ // so they reflect the very best debug information for the methods
+ {
+ DeclaredMethodIterator methIt(*this);
+ while (methIt.Next())
+ {
+ if (methIt->GetUnboxedMethodDesc() != NULL)
+ {
+ {
+ MethodDesc *pMD = methIt->GetUnboxedMethodDesc();
+ StackSString name(SString::Utf8);
+ TypeString::AppendMethodDebug(name, pMD);
+ StackScratchBuffer buff;
+ const char* pDebugNameUTF8 = name.GetUTF8(buff);
+ S_SIZE_T safeLen = S_SIZE_T(strlen(pDebugNameUTF8)) + S_SIZE_T(1);
+ if(safeLen.IsOverflow()) COMPlusThrowHR(COR_E_OVERFLOW);
+ size_t len = safeLen.Value();
+ pMD->m_pszDebugMethodName = (char*) AllocateFromLowFrequencyHeap(safeLen);
+ _ASSERTE(pMD->m_pszDebugMethodName);
+ strcpy_s((char *) pMD->m_pszDebugMethodName, len, pDebugNameUTF8);
+ }
+
+ {
+ MethodDesc *pMD = methIt->GetMethodDesc();
+
+ StackSString name(SString::Utf8);
+ TypeString::AppendMethodDebug(name, pMD);
+ StackScratchBuffer buff;
+ const char* pDebugNameUTF8 = name.GetUTF8(buff);
+ S_SIZE_T safeLen = S_SIZE_T(strlen(pDebugNameUTF8))+S_SIZE_T(1);
+ if(safeLen.IsOverflow()) COMPlusThrowHR(COR_E_OVERFLOW);
+ size_t len = safeLen.Value();
+ pMD->m_pszDebugMethodName = (char*) AllocateFromLowFrequencyHeap(safeLen);
+ _ASSERTE(pMD->m_pszDebugMethodName);
+ strcpy_s((char *) pMD->m_pszDebugMethodName, len, pDebugNameUTF8);
+ }
+ }
+ }
+ }
+#endif // _DEBUG
+
+#ifdef FEATURE_REMOTING
+ // Make sure the object cloner won't attempt to blit types that aren't serializable.
+ if (!IsTdSerializable(GetAttrClass()) && !IsEnum())
+ SetCannotBeBlittedByObjectCloner();
+#endif
+
+ //If this is a value type, then propagate the UnsafeValueTypeAttribute from
+ //its instance members to this type.
+ if (IsValueClass() && !IsUnsafeValueClass())
+ {
+ ApproxFieldDescIterator fields(GetHalfBakedMethodTable(),
+ ApproxFieldDescIterator::INSTANCE_FIELDS );
+ FieldDesc * current;
+ while (NULL != (current = fields.Next()))
+ {
+ CONSISTENCY_CHECK(!current->IsStatic());
+ if (current->GetFieldType() == ELEMENT_TYPE_VALUETYPE)
+ {
+ TypeHandle th = current->LookupApproxFieldTypeHandle();
+ CONSISTENCY_CHECK(!th.IsNull());
+ if (th.AsMethodTable()->GetClass()->IsUnsafeValueClass())
+ {
+ SetUnsafeValueClass();
+ break;
+ }
+ }
+ }
+ }
+
+ // Grow the typedef ridmap in advance as we can't afford to
+ // fail once we set the resolve bit
+ pModule->EnsureTypeDefCanBeStored(bmtInternal->pType->GetTypeDefToken());
+
+ // Grow the tables in advance so that RID map filling cannot fail
+ // once we're past the commit point.
+ EnsureRIDMapsCanBeFilled();
+
+ {
+ // NOTE. NOTE!! the EEclass can now be accessed by other threads.
+ // Do NOT place any initialization after this point.
+ // You may NOT fail the call after this point.
+ FAULT_FORBID();
+ CANNOTTHROWCOMPLUSEXCEPTION();
+
+ /*
+ GetMemTracker()->SuppressRelease();
+ */
+ }
+
+#ifdef _DEBUG
+ if (g_pConfig->ShouldDumpOnClassLoad(pszDebugName))
+ {
+ LOG((LF_ALWAYS, LL_ALWAYS, "Method table summary for '%s':\n", pszDebugName));
+ LOG((LF_ALWAYS, LL_ALWAYS, "Number of static fields: %d\n", bmtEnumFields->dwNumStaticFields));
+ LOG((LF_ALWAYS, LL_ALWAYS, "Number of instance fields: %d\n", bmtEnumFields->dwNumInstanceFields));
+ LOG((LF_ALWAYS, LL_ALWAYS, "Number of static obj ref fields: %d\n", bmtEnumFields->dwNumStaticObjRefFields));
+ LOG((LF_ALWAYS, LL_ALWAYS, "Number of static boxed fields: %d\n", bmtEnumFields->dwNumStaticBoxedFields));
+ LOG((LF_ALWAYS, LL_ALWAYS, "Number of declared fields: %d\n", NumDeclaredFields()));
+ LOG((LF_ALWAYS, LL_ALWAYS, "Number of declared methods: %d\n", NumDeclaredMethods()));
+ LOG((LF_ALWAYS, LL_ALWAYS, "Number of declared non-abstract methods: %d\n", bmtMethod->dwNumDeclaredNonAbstractMethods));
+ pMT->Debug_DumpInterfaceMap("Approximate");
+ pMT->DebugDumpVtable(pszDebugName, FALSE);
+ pMT->DebugDumpFieldLayout(pszDebugName, FALSE);
+ pMT->DebugDumpGCDesc(pszDebugName, FALSE);
+ pMT->Debug_DumpDispatchMap();
+ }
+#endif //_DEBUG
+
+ STRESS_LOG3(LF_CLASSLOADER, LL_INFO1000, "MethodTableBuilder: finished method table for module %p token %x = %pT \n",
+ pModule,
+ GetCl(),
+ GetHalfBakedMethodTable());
+
+#ifdef MDA_SUPPORTED
+ MdaMarshaling* mda = MDA_GET_ASSISTANT(Marshaling);
+ if (mda && HasLayout())
+ {
+ FieldMarshaler *pFieldMarshaler = (FieldMarshaler*)GetLayoutInfo()->GetFieldMarshalers();
+ UINT numReferenceFields = GetLayoutInfo()->GetNumCTMFields();
+
+ while (numReferenceFields--)
+ {
+ mda->ReportFieldMarshal(pFieldMarshaler);
+
+ ((BYTE*&)pFieldMarshaler) += MAXFIELDMARSHALERSIZE;
+ }
+ }
+#endif // MDA_SUPPORTED
+
+#ifdef FEATURE_PREJIT
+ _ASSERTE(pComputedPZM == Module::GetPreferredZapModuleForMethodTable(pMT));
+#endif // FEATURE_PREJIT
+
+ END_INTERIOR_STACK_PROBE;
+
+ return GetHalfBakedMethodTable();
+} // MethodTableBuilder::BuildMethodTableThrowing
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+#ifdef FEATURE_REMOTING
+BOOL
+IsSerializerRelatedInterface(MethodTable *pItfMT)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (MscorlibBinder::IsClass(pItfMT, CLASS__ISERIALIZABLE))
+ return TRUE;
+ if (MscorlibBinder::IsClass(pItfMT, CLASS__IOBJECTREFERENCE))
+ return TRUE;
+ if (MscorlibBinder::IsClass(pItfMT, CLASS__IDESERIALIZATIONCB))
+ return TRUE;
+
+ return FALSE;
+}
+#endif
+
+//---------------------------------------------------------------------------------------
+//
+// Resolve unresolved interfaces, determine an upper bound on the size of the interface map.
+//
+VOID
+MethodTableBuilder::ResolveInterfaces(
+ WORD cBuildingInterfaceList,
+ BuildingInterfaceInfo_t * pBuildingInterfaceList)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(CheckPointer(bmtAllocator));
+ PRECONDITION(CheckPointer(bmtInterface));
+ PRECONDITION(CheckPointer(bmtVT));
+ PRECONDITION(CheckPointer(bmtParent));
+ }
+ CONTRACTL_END;
+
+ // resolve unresolved interfaces and determine the size of the largest interface (in # slots)
+
+#ifdef FEATURE_REMOTING // code for objectcloner
+ // First look through the interfaces explicitly declared by this class
+ for (DWORD i = 0; i < cBuildingInterfaceList; i++)
+ {
+ MethodTable *pInterface = pBuildingInterfaceList[i].m_pMethodTable;
+ if (IsSerializerRelatedInterface(pInterface))
+ SetCannotBeBlittedByObjectCloner();
+ }
+#endif // FEATURE_REMOTING // code for objectcloner
+
+ LoadApproxInterfaceMap();
+
+ // Inherit parental slot counts
+ //@TODO: This doesn't belong here.
+ if (HasParent())
+ {
+ MethodTable * pParentClass = GetParentMethodTable();
+ PREFIX_ASSUME(pParentClass != NULL);
+
+ bmtParent->NumParentPointerSeries = pParentClass->ContainsPointers() ?
+ (DWORD)CGCDesc::GetCGCDescFromMT(pParentClass)->GetNumSeries() : 0;
+
+ if (pParentClass->HasFieldsWhichMustBeInited())
+ {
+ SetHasFieldsWhichMustBeInited();
+ }
+#ifdef FEATURE_REMOTING
+ if (pParentClass->CannotBeBlittedByObjectCloner())
+ {
+ SetCannotBeBlittedByObjectCloner();
+ }
+#endif
+#ifdef FEATURE_READYTORUN
+ if (!(IsValueClass() || (pParentClass == g_pObjectClass)))
+ {
+ CheckLayoutDependsOnOtherModules(pParentClass);
+ }
+#endif
+ }
+ else
+ {
+ bmtParent->NumParentPointerSeries = 0;
+ }
+} // MethodTableBuilder::ResolveInterfaces
+
+//*******************************************************************************
+/* static */
+int __cdecl MethodTableBuilder::bmtMetaDataInfo::MethodImplTokenPair::Compare(
+ const void *elem1,
+ const void *elem2)
+{
+ STATIC_CONTRACT_LEAF;
+ MethodImplTokenPair *e1 = (MethodImplTokenPair *)elem1;
+ MethodImplTokenPair *e2 = (MethodImplTokenPair *)elem2;
+ if (e1->methodBody < e2->methodBody) return -1;
+ else if (e1->methodBody > e2->methodBody) return 1;
+ else if (e1->methodDecl < e2->methodDecl) return -1;
+ else if (e1->methodDecl > e2->methodDecl) return 1;
+ else return 0;
+}
+
+//*******************************************************************************
+/* static */
+BOOL MethodTableBuilder::bmtMetaDataInfo::MethodImplTokenPair::Equal(
+ const MethodImplTokenPair *elem1,
+ const MethodImplTokenPair *elem2)
+{
+ STATIC_CONTRACT_LEAF;
+ return ((elem1->methodBody == elem2->methodBody) &&
+ (elem1->methodDecl == elem2->methodDecl));
+}
+
+//*******************************************************************************
+VOID
+MethodTableBuilder::EnumerateMethodImpls()
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+ IMDInternalImport * pMDInternalImport = GetMDImport();
+ DWORD rid, maxRidMD, maxRidMR;
+ HENUMInternalMethodImplHolder hEnumMethodImpl(pMDInternalImport);
+ hr = hEnumMethodImpl.EnumMethodImplInitNoThrow(GetCl());
+
+ if (FAILED(hr))
+ {
+ BuildMethodTableThrowException(hr, *bmtError);
+ }
+
+ // This gets the count out of the metadata interface.
+ bmtMethod->dwNumberMethodImpls = hEnumMethodImpl.EnumMethodImplGetCount();
+ bmtMethod->dwNumberInexactMethodImplCandidates = 0;
+
+ // This is the first pass. In this we will simply enumerate the token pairs and fill in
+ // the data structures. In addition, we'll sort the list and eliminate duplicates.
+ if (bmtMethod->dwNumberMethodImpls > 0)
+ {
+ //
+ // Allocate the structures to keep track of the token pairs
+ //
+ bmtMetaData->rgMethodImplTokens = new (GetStackingAllocator())
+ bmtMetaDataInfo::MethodImplTokenPair[bmtMethod->dwNumberMethodImpls];
+
+ // Iterate through each MethodImpl declared on this class
+ for (DWORD i = 0; i < bmtMethod->dwNumberMethodImpls; i++)
+ {
+ hr = hEnumMethodImpl.EnumMethodImplNext(
+ &bmtMetaData->rgMethodImplTokens[i].methodBody,
+ &bmtMetaData->rgMethodImplTokens[i].methodDecl);
+ bmtMetaData->rgMethodImplTokens[i].fConsiderDuringInexactMethodImplProcessing = false;
+ bmtMetaData->rgMethodImplTokens[i].fThrowIfUnmatchedDuringInexactMethodImplProcessing = false;
+ bmtMetaData->rgMethodImplTokens[i].interfaceEquivalenceSet = 0;
+
+ if (FAILED(hr))
+ {
+ BuildMethodTableThrowException(hr, *bmtError);
+ }
+ // Grab the next set of body/decl tokens
+ if (hr == S_FALSE)
+ {
+ // In the odd case that the enumerator fails before we've reached the total reported
+ // entries, let's reset the count and just break out. (Should we throw?)
+ bmtMethod->dwNumberMethodImpls = i;
+ break;
+ }
+ }
+
+ // No need to do any sorting or duplicate elimination if there's not two or more methodImpls
+ if (bmtMethod->dwNumberMethodImpls > 1)
+ {
+ // Now sort
+ qsort(bmtMetaData->rgMethodImplTokens,
+ bmtMethod->dwNumberMethodImpls,
+ sizeof(bmtMetaDataInfo::MethodImplTokenPair),
+ &bmtMetaDataInfo::MethodImplTokenPair::Compare);
+
+ // Now eliminate duplicates
+ for (DWORD i = 0; i < bmtMethod->dwNumberMethodImpls - 1; i++)
+ {
+ CONSISTENCY_CHECK((i + 1) < bmtMethod->dwNumberMethodImpls);
+
+ bmtMetaDataInfo::MethodImplTokenPair *e1 = &bmtMetaData->rgMethodImplTokens[i];
+ bmtMetaDataInfo::MethodImplTokenPair *e2 = &bmtMetaData->rgMethodImplTokens[i + 1];
+
+ // If the pair are equal, eliminate the first one, and reduce the total count by one.
+ if (bmtMetaDataInfo::MethodImplTokenPair::Equal(e1, e2))
+ {
+ DWORD dwCopyNum = bmtMethod->dwNumberMethodImpls - (i + 1);
+ memcpy(e1, e2, dwCopyNum * sizeof(bmtMetaDataInfo::MethodImplTokenPair));
+ bmtMethod->dwNumberMethodImpls--;
+ CONSISTENCY_CHECK(bmtMethod->dwNumberMethodImpls > 0);
+ }
+ }
+ }
+ }
+
+ if (bmtMethod->dwNumberMethodImpls != 0)
+ {
+ //
+ // Allocate the structures to keep track of the impl matches
+ //
+ bmtMetaData->pMethodDeclSubsts = new (GetStackingAllocator())
+ Substitution[bmtMethod->dwNumberMethodImpls];
+
+ // These are used for verification
+ maxRidMD = pMDInternalImport->GetCountWithTokenKind(mdtMethodDef);
+ maxRidMR = pMDInternalImport->GetCountWithTokenKind(mdtMemberRef);
+
+ // Iterate through each MethodImpl declared on this class
+ for (DWORD i = 0; i < bmtMethod->dwNumberMethodImpls; i++)
+ {
+ PCCOR_SIGNATURE pSigDecl = NULL;
+ PCCOR_SIGNATURE pSigBody = NULL;
+ ULONG cbSigDecl;
+ ULONG cbSigBody;
+ mdToken tkParent;
+
+ mdToken theBody, theDecl;
+ Substitution theDeclSubst(GetModule(), SigPointer(), NULL); // this can get updated later below.
+
+ theBody = bmtMetaData->rgMethodImplTokens[i].methodBody;
+ theDecl = bmtMetaData->rgMethodImplTokens[i].methodDecl;
+
+ // IMPLEMENTATION LIMITATION: currently, we require that the body of a methodImpl
+ // belong to the current type. This is because we need to allocate a different
+ // type of MethodDesc for bodies that are part of methodImpls.
+ if (TypeFromToken(theBody) != mdtMethodDef)
+ {
+ hr = FindMethodDeclarationForMethodImpl(
+ theBody,
+ &theBody,
+ TRUE);
+ if (FAILED(hr))
+ {
+ BuildMethodTableThrowException(hr, IDS_CLASSLOAD_MI_ILLEGAL_BODY, mdMethodDefNil);
+ }
+
+ // Make sure to update the stored token with the resolved token.
+ bmtMetaData->rgMethodImplTokens[i].methodBody = theBody;
+ }
+
+ if (TypeFromToken(theBody) != mdtMethodDef)
+ {
+ BuildMethodTableThrowException(BFA_METHODDECL_NOT_A_METHODDEF);
+ }
+ CONSISTENCY_CHECK(theBody == bmtMetaData->rgMethodImplTokens[i].methodBody);
+
+ //
+ // Now that the tokens of Decl and Body are obtained, do the MD validation
+ //
+
+ rid = RidFromToken(theDecl);
+
+ // Perform initial rudimentary validation of the token. Full token verification
+ // will be done in TestMethodImpl when placing the methodImpls.
+ if (TypeFromToken(theDecl) == mdtMethodDef)
+ {
+ // Decl must be valid token
+ if ((rid == 0) || (rid > maxRidMD))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_ILLEGAL_TOKEN_DECL);
+ }
+ // Get signature and length
+ if (FAILED(pMDInternalImport->GetSigOfMethodDef(theDecl, &cbSigDecl, &pSigDecl)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ }
+
+ // The token is not a MethodDef (likely a MemberRef)
+ else
+ {
+ // Decl must be valid token
+ if ((TypeFromToken(theDecl) != mdtMemberRef) || (rid == 0) || (rid > maxRidMR))
+ {
+ bmtError->resIDWhy = IDS_CLASSLOAD_MI_ILLEGAL_TOKEN_DECL;
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_ILLEGAL_TOKEN_DECL);
+ }
+
+ // Get signature and length
+ LPCSTR szDeclName;
+ if (FAILED(pMDInternalImport->GetNameAndSigOfMemberRef(theDecl, &pSigDecl, &cbSigDecl, &szDeclName)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+
+ // Get parent
+ hr = pMDInternalImport->GetParentToken(theDecl,&tkParent);
+ if (FAILED(hr))
+ BuildMethodTableThrowException(hr, *bmtError);
+
+ theDeclSubst = Substitution(tkParent, GetModule(), NULL);
+ }
+
+ // Perform initial rudimentary validation of the token. Full token verification
+ // will be done in TestMethodImpl when placing the methodImpls.
+ {
+ // Body must be valid token
+ rid = RidFromToken(theBody);
+ if ((rid == 0)||(rid > maxRidMD))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_ILLEGAL_TOKEN_BODY);
+ }
+ // Body's parent must be this class
+ hr = pMDInternalImport->GetParentToken(theBody,&tkParent);
+ if (FAILED(hr))
+ BuildMethodTableThrowException(hr, *bmtError);
+ if(tkParent != GetCl())
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_ILLEGAL_BODY);
+ }
+ }
+ // Decl's and Body's signatures must match
+ if(pSigDecl && cbSigDecl)
+ {
+ if (FAILED(pMDInternalImport->GetSigOfMethodDef(theBody, &cbSigBody, &pSigBody)) ||
+ (pSigBody == NULL) ||
+ (cbSigBody == 0))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_MISSING_SIG_BODY);
+ }
+ // Can't use memcmp because there may be two AssemblyRefs
+ // in this scope, pointing to the same assembly, etc.).
+ if (!MetaSig::CompareMethodSigs(
+ pSigDecl,
+ cbSigDecl,
+ GetModule(),
+ &theDeclSubst,
+ pSigBody,
+ cbSigBody,
+ GetModule(),
+ NULL))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_BODY_DECL_MISMATCH);
+ }
+ }
+ else
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_MISSING_SIG_DECL);
+ }
+
+ bmtMetaData->pMethodDeclSubsts[i] = theDeclSubst;
+ }
+ }
+} // MethodTableBuilder::EnumerateMethodImpls
+
+//*******************************************************************************
+//
+// Find a method declaration that must reside in the scope passed in. This method cannot be called if
+// the reference travels to another scope.
+//
+// Protect against finding a declaration that lives within
+// us (the type being created)
+//
+HRESULT MethodTableBuilder::FindMethodDeclarationForMethodImpl(
+ mdToken pToken, // Token that is being located (MemberRef or MemberDef)
+ mdToken* pDeclaration, // [OUT] Method definition for Member
+ BOOL fSameClass) // Does the declaration need to be in this class
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ IMDInternalImport *pMDInternalImport = GetMDImport();
+
+ PCCOR_SIGNATURE pSig; // Signature of Member
+ DWORD cSig;
+ LPCUTF8 szMember = NULL;
+
+ // The token should be a member ref or def. If it is a ref then we need to travel
+ // back to us hopefully.
+ if(TypeFromToken(pToken) == mdtMemberRef)
+ {
+ // Get the parent
+ mdToken typeref;
+ if (FAILED(pMDInternalImport->GetParentOfMemberRef(pToken, &typeref)))
+ {
+ BAD_FORMAT_NOTHROW_ASSERT(!"Invalid MemberRef record");
+ IfFailRet(COR_E_TYPELOAD);
+ }
+ GOTPARENT:
+ if (TypeFromToken(typeref) == mdtMethodDef)
+ { // If parent is a method def then this is a varags method
+ mdTypeDef typeDef;
+ hr = pMDInternalImport->GetParentToken(typeref, &typeDef);
+
+ if (TypeFromToken(typeDef) != mdtTypeDef)
+ { // A mdtMethodDef must be parented by a mdtTypeDef
+ BAD_FORMAT_NOTHROW_ASSERT(!"MethodDef without TypeDef as Parent");
+ IfFailRet(COR_E_TYPELOAD);
+ }
+
+ BAD_FORMAT_NOTHROW_ASSERT(typeDef == GetCl());
+
+ // This is the real method we are overriding
+ *pDeclaration = typeref;
+ }
+ else if (TypeFromToken(typeref) == mdtTypeSpec)
+ { // Added so that method impls can refer to instantiated interfaces or classes
+ if (FAILED(pMDInternalImport->GetSigFromToken(typeref, &cSig, &pSig)))
+ {
+ BAD_FORMAT_NOTHROW_ASSERT(!"Invalid TypeSpec record");
+ IfFailRet(COR_E_TYPELOAD);
+ }
+ CorElementType elemType = (CorElementType) *pSig++;
+
+ if (elemType == ELEMENT_TYPE_GENERICINST)
+ { // If this is a generic inst, we expect that the next elem is ELEMENT_TYPE_CLASS,
+ // which is handled in the case below.
+ elemType = (CorElementType) *pSig++;
+ BAD_FORMAT_NOTHROW_ASSERT(elemType == ELEMENT_TYPE_CLASS);
+ }
+
+ if (elemType == ELEMENT_TYPE_CLASS)
+ { // This covers E_T_GENERICINST and E_T_CLASS typespec formats. We don't expect
+ // any other kinds to come through here.
+ CorSigUncompressToken(pSig, &typeref);
+ }
+ else
+ { // This is an unrecognized signature format.
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT,
+ IDS_CLASSLOAD_MI_BAD_SIG,
+ mdMethodDefNil);
+ }
+ goto GOTPARENT;
+ }
+ else
+ { // Verify that the ref points back to us
+ mdToken tkDef = mdTokenNil;
+
+ if (TypeFromToken(typeref) == mdtTypeRef)
+ { // We only get here when we know the token does not reference a type in a different scope.
+ LPCUTF8 pszNameSpace;
+ LPCUTF8 pszClassName;
+
+ if (FAILED(pMDInternalImport->GetNameOfTypeRef(typeref, &pszNameSpace, &pszClassName)))
+ {
+ IfFailRet(COR_E_TYPELOAD);
+ }
+ mdToken tkRes;
+ if (FAILED(pMDInternalImport->GetResolutionScopeOfTypeRef(typeref, &tkRes)))
+ {
+ IfFailRet(COR_E_TYPELOAD);
+ }
+ hr = pMDInternalImport->FindTypeDef(pszNameSpace,
+ pszClassName,
+ (TypeFromToken(tkRes) == mdtTypeRef) ? tkRes : mdTokenNil,
+ &tkDef);
+ if (FAILED(hr))
+ {
+ IfFailRet(COR_E_TYPELOAD);
+ }
+ }
+ else if (TypeFromToken(typeref) == mdtTypeDef)
+ { // We get a typedef when the parent of the token is a typespec to the type.
+ tkDef = typeref;
+ }
+ else
+ {
+ CONSISTENCY_CHECK_MSGF(FALSE, ("Invalid methodimpl signature in class %s.", GetDebugClassName()));
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT,
+ IDS_CLASSLOAD_MI_BAD_SIG,
+ mdMethodDefNil);
+ }
+
+ if (fSameClass && tkDef != GetCl())
+ { // If we required that the typedef be the same type as the current class,
+ // and it doesn't match, we need to return a failure result.
+ IfFailRet(COR_E_TYPELOAD);
+ }
+
+ IfFailRet(pMDInternalImport->GetNameAndSigOfMemberRef(pToken, &pSig, &cSig, &szMember));
+
+ if (isCallConv(
+ MetaSig::GetCallingConvention(GetModule(), Signature(pSig, cSig)),
+ IMAGE_CEE_CS_CALLCONV_FIELD))
+ {
+ return VLDTR_E_MR_BADCALLINGCONV;
+ }
+
+ hr = pMDInternalImport->FindMethodDef(
+ tkDef, szMember, pSig, cSig, pDeclaration);
+
+ IfFailRet(hr);
+ }
+ }
+ else if (TypeFromToken(pToken) == mdtMethodDef)
+ {
+ mdTypeDef typeDef;
+
+ // Verify that we are the parent
+ hr = pMDInternalImport->GetParentToken(pToken, &typeDef);
+ IfFailRet(hr);
+
+ if(typeDef != GetCl())
+ {
+ IfFailRet(COR_E_TYPELOAD);
+ }
+
+ *pDeclaration = pToken;
+ }
+ else
+ {
+ IfFailRet(COR_E_TYPELOAD);
+ }
+ return hr;
+}
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#endif // _PREFAST_
+//---------------------------------------------------------------------------------------
+//
+// Used by BuildMethodTable
+//
+// Enumerate this class's members
+//
+VOID
+MethodTableBuilder::EnumerateClassMethods()
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(bmtInternal));
+ PRECONDITION(CheckPointer(bmtEnumFields));
+ PRECONDITION(CheckPointer(bmtMFDescs));
+ PRECONDITION(CheckPointer(bmtProp));
+ PRECONDITION(CheckPointer(bmtMetaData));
+ PRECONDITION(CheckPointer(bmtVT));
+ PRECONDITION(CheckPointer(bmtError));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ DWORD i;
+ IMDInternalImport *pMDInternalImport = GetMDImport();
+ mdToken tok;
+ DWORD dwMemberAttrs;
+ BOOL fIsClassEnum = IsEnum();
+ BOOL fIsClassInterface = IsInterface();
+ BOOL fIsClassValueType = IsValueClass();
+ BOOL fIsClassComImport = IsComImport();
+ BOOL fIsClassNotAbstract = (IsTdAbstract(GetAttrClass()) == 0);
+ PCCOR_SIGNATURE pMemberSignature;
+ ULONG cMemberSignature;
+
+ //
+ // Run through the method list and calculate the following:
+ // # methods.
+ // # "other" methods (i.e. static or private)
+ // # non-other methods
+ //
+
+ HENUMInternalHolder hEnumMethod(pMDInternalImport);
+ hr = hEnumMethod.EnumInitNoThrow(mdtMethodDef, GetCl());
+ if (FAILED(hr))
+ {
+ BuildMethodTableThrowException(hr, *bmtError);
+ }
+
+ // Allocate an array to contain the method tokens as well as information about the methods.
+ DWORD cMethAndGaps = hEnumMethod.EnumGetCount();
+
+ if ((DWORD)MAX_SLOT_INDEX <= cMethAndGaps)
+ BuildMethodTableThrowException(IDS_CLASSLOAD_TOO_MANY_METHODS);
+
+ bmtMethod->m_cMaxDeclaredMethods = (SLOT_INDEX)cMethAndGaps;
+ bmtMethod->m_cDeclaredMethods = 0;
+ bmtMethod->m_rgDeclaredMethods = new (GetStackingAllocator())
+ bmtMDMethod *[bmtMethod->m_cMaxDeclaredMethods];
+
+ enum { SeenCtor = 1, SeenInvoke = 2, SeenBeginInvoke = 4, SeenEndInvoke = 8};
+ unsigned delegateMethodsSeen = 0;
+
+ for (i = 0; i < cMethAndGaps; i++)
+ {
+ ULONG dwMethodRVA;
+ DWORD dwImplFlags;
+ METHOD_TYPE type;
+ METHOD_IMPL_TYPE implType;
+ LPSTR strMethodName;
+
+#ifdef FEATURE_TYPEEQUIVALENCE
+ // TypeEquivalent structs must not have methods
+ if (bmtProp->fIsTypeEquivalent && fIsClassValueType)
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_EQUIVALENTSTRUCTMETHODS);
+ }
+#endif
+
+ //
+ // Go to the next method and retrieve its attributes.
+ //
+
+ hEnumMethod.EnumNext(&tok);
+ DWORD rid = RidFromToken(tok);
+ if ((rid == 0)||(rid > pMDInternalImport->GetCountWithTokenKind(mdtMethodDef)))
+ {
+ BuildMethodTableThrowException(BFA_METHOD_TOKEN_OUT_OF_RANGE);
+ }
+
+ if (FAILED(pMDInternalImport->GetMethodDefProps(tok, &dwMemberAttrs)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ if (IsMdRTSpecialName(dwMemberAttrs) || IsMdVirtual(dwMemberAttrs) || IsDelegate())
+ {
+ if (FAILED(pMDInternalImport->GetNameOfMethodDef(tok, (LPCSTR *)&strMethodName)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ if(IsStrLongerThan(strMethodName,MAX_CLASS_NAME))
+ {
+ BuildMethodTableThrowException(BFA_METHOD_NAME_TOO_LONG);
+ }
+ }
+ else
+ {
+ strMethodName = NULL;
+ }
+
+ DWORD numGenericMethodArgs = 0;
+
+ {
+ HENUMInternalHolder hEnumTyPars(pMDInternalImport);
+ hr = hEnumTyPars.EnumInitNoThrow(mdtGenericParam, tok);
+ if (FAILED(hr))
+ {
+ BuildMethodTableThrowException(hr, *bmtError);
+ }
+
+ numGenericMethodArgs = hEnumTyPars.EnumGetCount();
+
+ // We do not want to support context-bound objects with generic methods.
+#ifdef FEATURE_REMOTING
+ if (IsContextful() && numGenericMethodArgs > 0)
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_CONTEXT_BOUND_GENERIC_METHOD);
+ }
+#endif // FEATURE_REMOTING
+
+ if (numGenericMethodArgs != 0)
+ {
+ HENUMInternalHolder hEnumGenericPars(pMDInternalImport);
+
+ hEnumGenericPars.EnumInit(mdtGenericParam, tok);
+
+ for (unsigned methIdx = 0; methIdx < numGenericMethodArgs; methIdx++)
+ {
+ mdGenericParam tkTyPar;
+ pMDInternalImport->EnumNext(&hEnumGenericPars, &tkTyPar);
+ DWORD flags;
+ if (FAILED(pMDInternalImport->GetGenericParamProps(tkTyPar, NULL, &flags, NULL, NULL, NULL)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+
+ if (0 != (flags & ~(gpVarianceMask | gpSpecialConstraintMask)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ switch (flags & gpVarianceMask)
+ {
+ case gpNonVariant:
+ break;
+
+ case gpCovariant: // intentional fallthru
+ case gpContravariant:
+ BuildMethodTableThrowException(VLDTR_E_GP_ILLEGAL_VARIANT_MVAR);
+ break;
+
+ default:
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+
+ }
+ }
+ }
+
+ //
+ // We need to check if there are any gaps in the vtable. These are
+ // represented by methods with the mdSpecial flag and a name of the form
+ // _VTblGap_nnn (to represent nnn empty slots) or _VTblGap (to represent a
+ // single empty slot).
+ //
+
+ if (IsMdRTSpecialName(dwMemberAttrs))
+ {
+ PREFIX_ASSUME(strMethodName != NULL); // if we've gotten here we've called GetNameOfMethodDef
+
+ // The slot is special, but it might not be a vtable spacer. To
+ // determine that we must look at the name.
+ if (strncmp(strMethodName, "_VtblGap", 8) == 0)
+ {
+ //
+ // This slot doesn't really exist, don't add it to the method
+ // table. Instead it represents one or more empty slots, encoded
+ // in the method name. Locate the beginning of the count in the
+ // name. There are these points to consider:
+ // There may be no count present at all (in which case the
+ // count is taken as one).
+ // There may be an additional count just after Gap but before
+ // the '_'. We ignore this.
+ //
+
+ LPCSTR pos = strMethodName + 8;
+
+ // Skip optional number.
+ while (IS_DIGIT(*pos))
+ pos++;
+
+ WORD n = 0;
+
+ // Check for presence of count.
+ if (*pos == '\0')
+ n = 1;
+ else
+ {
+ if (*pos != '_')
+ {
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT,
+ IDS_CLASSLOAD_BADSPECIALMETHOD,
+ tok);
+ }
+
+ // Skip '_'.
+ pos++;
+
+ // Read count.
+ bool fReadAtLeastOneDigit = false;
+ while (IS_DIGIT(*pos))
+ {
+ _ASSERTE(n < 6552);
+ n *= 10;
+ n += DIGIT_TO_INT(*pos);
+ pos++;
+ fReadAtLeastOneDigit = true;
+ }
+
+ // Check for end of name.
+ if (*pos != '\0' || !fReadAtLeastOneDigit)
+ {
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT,
+ IDS_CLASSLOAD_BADSPECIALMETHOD,
+ tok);
+ }
+ }
+
+#if defined(MDIL)
+ // Interfaces with sparse vtables are not currently supported in the triton toolchain.
+ if (GetAppDomain()->IsMDILCompilationDomain())
+ {
+ GetSvcLogger()->Log(W("Warning: Sparse v-table detected.\n"));
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT,
+ IDS_CLASSLOAD_BADSPECIALMETHOD,
+ tok);
+ }
+#endif // defined(MDIL)
+#ifdef FEATURE_COMINTEROP
+ // Record vtable gap in mapping list. The map is an optional field, so ensure we've allocated
+ // these fields first.
+ EnsureOptionalFieldsAreAllocated(GetHalfBakedClass(), m_pAllocMemTracker, GetLoaderAllocator()->GetLowFrequencyHeap());
+ if (GetHalfBakedClass()->GetSparseCOMInteropVTableMap() == NULL)
+ GetHalfBakedClass()->SetSparseCOMInteropVTableMap(new SparseVTableMap());
+
+ GetHalfBakedClass()->GetSparseCOMInteropVTableMap()->RecordGap((WORD)NumDeclaredMethods(), n);
+
+ bmtProp->fSparse = true;
+#endif // FEATURE_COMINTEROP
+ continue;
+ }
+
+ }
+
+
+ //
+ // This is a real method so add it to the enumeration of methods. We now need to retrieve
+ // information on the method and store it for later use.
+ //
+ if (FAILED(pMDInternalImport->GetMethodImplProps(tok, &dwMethodRVA, &dwImplFlags)))
+ {
+ BuildMethodTableThrowException(
+ COR_E_BADIMAGEFORMAT,
+ IDS_CLASSLOAD_BADSPECIALMETHOD,
+ tok);
+ }
+ //
+ // But first - minimal flags validity checks
+ //
+ // No methods in Enums!
+ if (fIsClassEnum)
+ {
+ BuildMethodTableThrowException(BFA_METHOD_IN_A_ENUM);
+ }
+ // RVA : 0
+ if (dwMethodRVA != 0)
+ {
+#ifdef FEATURE_COMINTEROP
+ if(fIsClassComImport)
+ {
+ BuildMethodTableThrowException(BFA_METHOD_WITH_NONZERO_RVA);
+ }
+#endif // FEATURE_COMINTEROP
+ if(IsMdAbstract(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_ABSTRACT_METHOD_WITH_RVA);
+ }
+ if(IsMiRuntime(dwImplFlags))
+ {
+ BuildMethodTableThrowException(BFA_RUNTIME_METHOD_WITH_RVA);
+ }
+ if(IsMiInternalCall(dwImplFlags))
+ {
+ BuildMethodTableThrowException(BFA_INTERNAL_METHOD_WITH_RVA);
+ }
+ }
+
+ // Abstract / not abstract
+ if(IsMdAbstract(dwMemberAttrs))
+ {
+ if(fIsClassNotAbstract)
+ {
+ BuildMethodTableThrowException(BFA_AB_METHOD_IN_AB_CLASS);
+ }
+ if(!IsMdVirtual(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_NONVIRT_AB_METHOD);
+ }
+ }
+ else if(fIsClassInterface)
+ {
+ if (IsMdRTSpecialName(dwMemberAttrs) || IsMdVirtual(dwMemberAttrs))
+ {
+ CONSISTENCY_CHECK(CheckPointer(strMethodName));
+ if (strcmp(strMethodName, COR_CCTOR_METHOD_NAME))
+ {
+ BuildMethodTableThrowException(BFA_NONAB_NONCCTOR_METHOD_ON_INT);
+ }
+ }
+ }
+
+ // Virtual / not virtual
+ if(IsMdVirtual(dwMemberAttrs))
+ {
+ if(IsMdPinvokeImpl(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_VIRTUAL_PINVOKE_METHOD);
+ }
+ if(IsMdStatic(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_VIRTUAL_STATIC_METHOD);
+ }
+ if(strMethodName && (0==strcmp(strMethodName, COR_CTOR_METHOD_NAME)))
+ {
+ BuildMethodTableThrowException(BFA_VIRTUAL_INSTANCE_CTOR);
+ }
+ }
+
+ // Some interface checks.
+ if (IsInterface())
+ {
+ if (IsMdVirtual(dwMemberAttrs))
+ {
+ if (!IsMdAbstract(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_VIRTUAL_NONAB_INT_METHOD);
+ }
+ }
+ else
+ {
+ // Instance field/method
+ if (!IsMdStatic(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_NONVIRT_INST_INT_METHOD);
+ }
+ }
+ }
+
+ // No synchronized methods in ValueTypes
+ if(fIsClassValueType && IsMiSynchronized(dwImplFlags))
+ {
+ BuildMethodTableThrowException(BFA_SYNC_METHOD_IN_VT);
+ }
+
+ // Global methods:
+ if(IsGlobalClass())
+ {
+ if(!IsMdStatic(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_NONSTATIC_GLOBAL_METHOD);
+ }
+ if (strMethodName) //<TODO>@todo: investigate mc++ generating null name</TODO>
+ {
+ if(0==strcmp(strMethodName, COR_CTOR_METHOD_NAME))
+ {
+ BuildMethodTableThrowException(BFA_GLOBAL_INST_CTOR);
+ }
+ }
+ }
+ //@GENERICS:
+ // Generic methods or methods in generic classes
+ // may not be part of a COM Import class (except for WinRT), PInvoke, internal call outside mscorlib.
+ if ((bmtGenerics->GetNumGenericArgs() != 0 || numGenericMethodArgs != 0) &&
+ (
+#ifdef FEATURE_COMINTEROP
+ fIsClassComImport ||
+ bmtProp->fComEventItfType ||
+#endif // FEATURE_COMINTEROP
+ IsMdPinvokeImpl(dwMemberAttrs) ||
+ (IsMiInternalCall(dwImplFlags) && !GetModule()->IsSystem())))
+ {
+#ifdef FEATURE_COMINTEROP
+ if (!GetHalfBakedClass()->IsProjectedFromWinRT())
+#endif // FEATURE_COMINTEROP
+ {
+ BuildMethodTableThrowException(BFA_BAD_PLACE_FOR_GENERIC_METHOD);
+ }
+ }
+
+ // Generic methods may not be marked "runtime". However note that
+ // methods in generic delegate classes are, hence we don't apply this to
+ // methods in generic classes in general.
+ if (numGenericMethodArgs != 0 && IsMiRuntime(dwImplFlags))
+ {
+ BuildMethodTableThrowException(BFA_GENERIC_METHOD_RUNTIME_IMPL);
+ }
+
+
+ // Signature validation
+ if (FAILED(pMDInternalImport->GetSigOfMethodDef(tok, &cMemberSignature, &pMemberSignature)))
+ {
+ BuildMethodTableThrowException(hr, BFA_BAD_SIGNATURE, mdMethodDefNil);
+ }
+ hr = validateTokenSig(tok,pMemberSignature,cMemberSignature,dwMemberAttrs,pMDInternalImport);
+ if (FAILED(hr))
+ {
+ BuildMethodTableThrowException(hr, BFA_BAD_SIGNATURE, mdMethodDefNil);
+ }
+
+ // Check the appearance of covariant and contravariant in the method signature
+ // Note that variance is only supported for interfaces
+ if (bmtGenerics->pVarianceInfo != NULL)
+ {
+ SigPointer sp(pMemberSignature, cMemberSignature);
+ ULONG callConv;
+ IfFailThrow(sp.GetCallingConvInfo(&callConv));
+
+ if (callConv & IMAGE_CEE_CS_CALLCONV_GENERIC)
+ IfFailThrow(sp.GetData(NULL));
+
+ DWORD numArgs;
+ IfFailThrow(sp.GetData(&numArgs));
+
+ // Return type behaves covariantly
+ if (!EEClass::CheckVarianceInSig(
+ bmtGenerics->GetNumGenericArgs(),
+ bmtGenerics->pVarianceInfo,
+ GetModule(),
+ sp,
+ gpCovariant))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_VARIANCE_IN_METHOD_RESULT, tok);
+ }
+ IfFailThrow(sp.SkipExactlyOne());
+ for (DWORD j = 0; j < numArgs; j++)
+ {
+ // Argument types behave contravariantly
+ if (!EEClass::CheckVarianceInSig(bmtGenerics->GetNumGenericArgs(),
+ bmtGenerics->pVarianceInfo,
+ GetModule(),
+ sp,
+ gpContravariant))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_VARIANCE_IN_METHOD_ARG, tok);
+ }
+ IfFailThrow(sp.SkipExactlyOne());
+ }
+ }
+
+ //
+ // Determine the method's type
+ //
+
+ if (IsReallyMdPinvokeImpl(dwMemberAttrs) || IsMiInternalCall(dwImplFlags))
+ {
+ hr = NDirect::HasNAT_LAttribute(pMDInternalImport, tok, dwMemberAttrs);
+
+ // There was a problem querying for the attribute
+ if (FAILED(hr))
+ {
+ BuildMethodTableThrowException(hr, IDS_CLASSLOAD_BADPINVOKE, tok);
+ }
+
+ // The attribute is not present
+ if (hr == S_FALSE)
+ {
+ if (fIsClassComImport
+#ifdef FEATURE_COMINTEROP
+ || GetHalfBakedClass()->IsProjectedFromWinRT()
+ || bmtProp->fComEventItfType
+#endif //FEATURE_COMINTEROP
+ )
+ {
+#ifdef FEATURE_COMINTEROP
+ // ComImport classes have methods which are just used
+ // for implementing all interfaces the class supports
+ type = METHOD_TYPE_COMINTEROP;
+
+ // constructor is special
+ if (IsMdRTSpecialName(dwMemberAttrs))
+ {
+ // Note: Method name (.ctor) will be checked in code:ValidateMethods
+
+ // WinRT ctors are interop calls via stubs
+ if (!GetHalfBakedClass()->IsProjectedFromWinRT())
+ {
+ // Ctor on a non-WinRT class
+ type = METHOD_TYPE_FCALL;
+ }
+ }
+#else
+ //If we don't support com interop, refuse to load interop methods. Otherwise we fail to
+ //jit calls to them since the constuctor has no intrinsic ID.
+ BuildMethodTableThrowException(hr, IDS_CLASSLOAD_GENERAL, tok);
+#endif // FEATURE_COMINTEROP
+ }
+ else if (dwMethodRVA == 0)
+ {
+ type = METHOD_TYPE_FCALL;
+ }
+ else
+ {
+ type = METHOD_TYPE_NDIRECT;
+ }
+ }
+ // The NAT_L attribute is present, marking this method as NDirect
+ else
+ {
+ CONSISTENCY_CHECK(hr == S_OK);
+ type = METHOD_TYPE_NDIRECT;
+ }
+ }
+ else if (IsMiRuntime(dwImplFlags))
+ {
+ // currently the only runtime implemented functions are delegate instance methods
+ if (!IsDelegate() || IsMdStatic(dwMemberAttrs) || IsMdAbstract(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_BAD_RUNTIME_IMPL);
+ }
+
+ unsigned newDelegateMethodSeen = 0;
+
+ if (IsMdRTSpecialName(dwMemberAttrs)) // .ctor
+ {
+ if (strcmp(strMethodName, COR_CTOR_METHOD_NAME) != 0 || IsMdVirtual(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_BAD_FLAGS_ON_DELEGATE);
+ }
+ newDelegateMethodSeen = SeenCtor;
+ type = METHOD_TYPE_FCALL;
+ }
+ else
+ {
+ if (strcmp(strMethodName, "Invoke") == 0)
+ newDelegateMethodSeen = SeenInvoke;
+ else if (strcmp(strMethodName, "BeginInvoke") == 0)
+ newDelegateMethodSeen = SeenBeginInvoke;
+ else if (strcmp(strMethodName, "EndInvoke") == 0)
+ newDelegateMethodSeen = SeenEndInvoke;
+ else
+ {
+ BuildMethodTableThrowException(BFA_UNKNOWN_DELEGATE_METHOD);
+ }
+ type = METHOD_TYPE_EEIMPL;
+ }
+
+ // If we get here we have either set newDelegateMethodSeen or we have thrown a BMT exception
+ _ASSERTE(newDelegateMethodSeen != 0);
+
+ if ((delegateMethodsSeen & newDelegateMethodSeen) != 0)
+ {
+ BuildMethodTableThrowException(BFA_DUPLICATE_DELEGATE_METHOD);
+ }
+
+ delegateMethodsSeen |= newDelegateMethodSeen;
+ }
+ else if (numGenericMethodArgs != 0)
+ {
+ //We use an instantiated method desc to represent a generic method
+ type = METHOD_TYPE_INSTANTIATED;
+ }
+ else if (fIsClassInterface)
+ {
+#ifdef FEATURE_COMINTEROP
+ if (IsMdStatic(dwMemberAttrs))
+ {
+ // Static methods in interfaces need nothing special.
+ type = METHOD_TYPE_NORMAL;
+ }
+ else if (bmtGenerics->GetNumGenericArgs() != 0 &&
+ (bmtGenerics->fSharedByGenericInstantiations || (!bmtProp->fIsRedirectedInterface && !GetHalfBakedClass()->IsProjectedFromWinRT())))
+ {
+ // Methods in instantiated interfaces need nothing special - they are not visible from COM etc.
+ // mcComInterop is only useful for unshared instantiated WinRT interfaces. If the interface is
+ // shared by multiple instantiations, the MD would be useless for interop anyway.
+ type = METHOD_TYPE_NORMAL;
+ }
+ else if (bmtProp->fIsMngStandardItf)
+ {
+ // If the interface is a standard managed interface then allocate space for an FCall method desc.
+ type = METHOD_TYPE_FCALL;
+ }
+ else
+ {
+ // If COM interop is supported then all other interface MDs may be
+ // accessed via COM interop. mcComInterop MDs have an additional
+ // pointer-sized field pointing to COM interop data which are
+ // allocated lazily when/if the MD actually gets used for interop.
+ type = METHOD_TYPE_COMINTEROP;
+ }
+#else // !FEATURE_COMINTEROP
+ // This codepath is used by remoting
+ type = METHOD_TYPE_NORMAL;
+#endif // !FEATURE_COMINTEROP
+ }
+ else
+ {
+ type = METHOD_TYPE_NORMAL;
+ }
+
+
+#ifdef _DEBUG
+ // We don't allow stack based declarative security on ecalls, fcalls and
+ // other special purpose methods implemented by the EE (the interceptor
+ // we use doesn't play well with non-jitted stubs).
+ if ((type == METHOD_TYPE_FCALL || type == METHOD_TYPE_EEIMPL) &&
+ (IsMdHasSecurity(dwMemberAttrs) || IsTdHasSecurity(GetAttrClass())))
+ {
+ DWORD dwSecFlags;
+ DWORD dwNullDeclFlags;
+
+ if (IsTdHasSecurity(GetAttrClass()) &&
+ SUCCEEDED(Security::GetDeclarationFlags(pMDInternalImport, GetCl(), &dwSecFlags, &dwNullDeclFlags)))
+ {
+ CONSISTENCY_CHECK_MSG(!(dwSecFlags & ~dwNullDeclFlags & DECLSEC_RUNTIME_ACTIONS),
+ "Cannot add stack based declarative security to a class containing an ecall/fcall/special method.");
+ }
+ if (IsMdHasSecurity(dwMemberAttrs) &&
+ SUCCEEDED(Security::GetDeclarationFlags(pMDInternalImport, tok, &dwSecFlags, &dwNullDeclFlags)))
+ {
+ CONSISTENCY_CHECK_MSG(!(dwSecFlags & ~dwNullDeclFlags & DECLSEC_RUNTIME_ACTIONS),
+ "Cannot add stack based declarative security to an ecall/fcall/special method.");
+ }
+ }
+#endif // _DEBUG
+
+ // PInvoke methods are not permitted on collectible types
+ if ((type == METHOD_TYPE_NDIRECT) && GetAssembly()->IsCollectible())
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_COLLECTIBLEPINVOKE);
+ }
+
+ // Generic methods should always be METHOD_TYPE_INSTANTIATED
+ if ((numGenericMethodArgs != 0) && (type != METHOD_TYPE_INSTANTIATED))
+ {
+ BuildMethodTableThrowException(BFA_GENERIC_METHODS_INST);
+ }
+
+ // count how many overrides this method does All methods bodies are defined
+ // on this type so we can just compare the tok with the body token found
+ // from the overrides.
+ implType = METHOD_IMPL_NOT;
+ for (DWORD impls = 0; impls < bmtMethod->dwNumberMethodImpls; impls++)
+ {
+ if (bmtMetaData->rgMethodImplTokens[impls].methodBody == tok)
+ {
+ implType = METHOD_IMPL;
+ break;
+ }
+ }
+
+ // For delegates we don't allow any non-runtime implemented bodies
+ // for any of the four special methods
+ if (IsDelegate() && !IsMiRuntime(dwImplFlags))
+ {
+ if ((strcmp(strMethodName, COR_CTOR_METHOD_NAME) == 0) ||
+ (strcmp(strMethodName, "Invoke") == 0) ||
+ (strcmp(strMethodName, "BeginInvoke") == 0) ||
+ (strcmp(strMethodName, "EndInvoke") == 0) )
+ {
+ BuildMethodTableThrowException(BFA_ILLEGAL_DELEGATE_METHOD);
+ }
+ }
+
+ //
+ // Create a new bmtMDMethod representing this method and add it to the
+ // declared method list.
+ //
+
+ bmtMDMethod * pNewMethod = new (GetStackingAllocator()) bmtMDMethod(
+ bmtInternal->pType,
+ tok,
+ dwMemberAttrs,
+ dwImplFlags,
+ dwMethodRVA,
+ type,
+ implType);
+
+ bmtMethod->AddDeclaredMethod(pNewMethod);
+
+ //
+ // Update the count of the various types of methods.
+ //
+
+ bmtVT->dwMaxVtableSize++;
+
+ // Increment the number of non-abstract declared methods
+ if (!IsMdAbstract(dwMemberAttrs))
+ {
+ bmtMethod->dwNumDeclaredNonAbstractMethods++;
+ }
+ }
+
+ // Check to see that we have all of the required delegate methods (ECMA 13.6 Delegates)
+ if (IsDelegate())
+ {
+ // Do we have all four special delegate methods
+ // or just the two special delegate methods
+ if ((delegateMethodsSeen != (SeenCtor | SeenInvoke | SeenBeginInvoke | SeenEndInvoke)) &&
+ (delegateMethodsSeen != (SeenCtor | SeenInvoke)) )
+ {
+ BuildMethodTableThrowException(BFA_MISSING_DELEGATE_METHOD);
+ }
+ }
+
+ if (i != cMethAndGaps)
+ {
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT, IDS_CLASSLOAD_BAD_METHOD_COUNT, mdTokenNil);
+ }
+
+#ifdef FEATURE_COMINTEROP
+ //
+ // If the interface is sparse, we need to finalize the mapping list by
+ // telling it how many real methods we found.
+ //
+
+ if (bmtProp->fSparse)
+ {
+ GetHalfBakedClass()->GetSparseCOMInteropVTableMap()->FinalizeMapping(NumDeclaredMethods());
+ }
+#endif // FEATURE_COMINTEROP
+} // MethodTableBuilder::EnumerateClassMethods
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+//*******************************************************************************
+//
+// Run through the field list and calculate the following:
+// # static fields
+// # static fields that contain object refs.
+// # instance fields
+//
+VOID
+MethodTableBuilder::EnumerateClassFields()
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+ DWORD i;
+ IMDInternalImport *pMDInternalImport = GetMDImport();
+ mdToken tok;
+ DWORD dwMemberAttrs;
+
+ bmtEnumFields->dwNumStaticFields = 0;
+ bmtEnumFields->dwNumStaticObjRefFields = 0;
+ bmtEnumFields->dwNumStaticBoxedFields = 0;
+
+ bmtEnumFields->dwNumThreadStaticFields = 0;
+ bmtEnumFields->dwNumThreadStaticObjRefFields = 0;
+ bmtEnumFields->dwNumThreadStaticBoxedFields = 0;
+
+ bmtEnumFields->dwNumInstanceFields = 0;
+
+ HENUMInternalHolder hEnumField(pMDInternalImport);
+ hr = hEnumField.EnumInitNoThrow(mdtFieldDef, GetCl());
+ if (FAILED(hr))
+ {
+ BuildMethodTableThrowException(hr, *bmtError);
+ }
+
+ bmtMetaData->cFields = hEnumField.EnumGetCount();
+
+ // Retrieve the fields and store them in a temp array.
+ bmtMetaData->pFields = new (GetStackingAllocator()) mdToken[bmtMetaData->cFields];
+ bmtMetaData->pFieldAttrs = new (GetStackingAllocator()) DWORD[bmtMetaData->cFields];
+
+ DWORD dwFieldLiteralInitOnly = fdLiteral | fdInitOnly;
+ DWORD dwMaxFieldDefRid = pMDInternalImport->GetCountWithTokenKind(mdtFieldDef);
+
+ for (i = 0; hEnumField.EnumNext(&tok); i++)
+ {
+ //
+ // Retrieve the attributes of the field.
+ //
+ DWORD rid = RidFromToken(tok);
+ if ((rid == 0)||(rid > dwMaxFieldDefRid))
+ {
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT, BFA_BAD_FIELD_TOKEN, mdTokenNil);
+ }
+
+ if (FAILED(pMDInternalImport->GetFieldDefProps(tok, &dwMemberAttrs)))
+ {
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT, BFA_BAD_FIELD_TOKEN, tok);
+ }
+
+ //
+ // Store the field and its attributes in the bmtMetaData structure for later use.
+ //
+
+ bmtMetaData->pFields[i] = tok;
+ bmtMetaData->pFieldAttrs[i] = dwMemberAttrs;
+
+ if((dwMemberAttrs & fdFieldAccessMask)==fdFieldAccessMask)
+ {
+ BuildMethodTableThrowException(BFA_INVALID_FIELD_ACC_FLAGS);
+ }
+ if((dwMemberAttrs & dwFieldLiteralInitOnly)==dwFieldLiteralInitOnly)
+ {
+ BuildMethodTableThrowException(BFA_FIELD_LITERAL_AND_INIT);
+ }
+
+ // can only have static global fields
+ if(IsGlobalClass())
+ {
+ if(!IsFdStatic(dwMemberAttrs))
+ {
+ BuildMethodTableThrowException(BFA_NONSTATIC_GLOBAL_FIELD);
+ }
+ }
+
+ //
+ // Update the count of the various types of fields.
+ //
+
+ if (IsFdStatic(dwMemberAttrs))
+ {
+ if (!IsFdLiteral(dwMemberAttrs))
+ {
+#ifdef FEATURE_TYPEEQUIVALENCE
+ if (bmtProp->fIsTypeEquivalent)
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_EQUIVALENTSTRUCTFIELDS);
+ }
+#endif
+
+ bmtEnumFields->dwNumStaticFields++;
+
+ // If this static field is thread static, then we need
+ // to increment bmtEnumFields->dwNumThreadStaticFields
+ hr = pMDInternalImport->GetCustomAttributeByName(tok,
+ g_ThreadStaticAttributeClassName,
+ NULL, NULL);
+ IfFailThrow(hr);
+ if (hr == S_OK)
+ {
+ // It's a thread static, so increment the count
+ bmtEnumFields->dwNumThreadStaticFields++;
+ }
+ }
+ }
+ else
+ {
+#ifdef FEATURE_TYPEEQUIVALENCE
+ if (!IsFdPublic(dwMemberAttrs) && bmtProp->fIsTypeEquivalent)
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_EQUIVALENTSTRUCTFIELDS);
+ }
+#endif
+
+ if (!IsFdLiteral(dwMemberAttrs))
+ {
+ bmtEnumFields->dwNumInstanceFields++;
+ }
+ if(IsInterface())
+ {
+ BuildMethodTableThrowException(BFA_INSTANCE_FIELD_IN_INT);
+ }
+ }
+ }
+
+ if (i != bmtMetaData->cFields)
+ {
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT, IDS_CLASSLOAD_BAD_FIELD_COUNT, mdTokenNil);
+ }
+
+ if(IsEnum() && (bmtEnumFields->dwNumInstanceFields==0))
+ {
+ BuildMethodTableThrowException(BFA_INSTANCE_FIELD_IN_ENUM);
+ }
+
+ bmtEnumFields->dwNumDeclaredFields = bmtEnumFields->dwNumStaticFields + bmtEnumFields->dwNumInstanceFields;
+}
+
+//*******************************************************************************
+//
+// Used by BuildMethodTable
+//
+// Determines the maximum size of the vtable and allocates the temporary storage arrays
+// Also copies the parent's vtable into the working vtable.
+//
+VOID MethodTableBuilder::AllocateWorkingSlotTables()
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(CheckPointer(bmtAllocator));
+ PRECONDITION(CheckPointer(bmtMFDescs));
+ PRECONDITION(CheckPointer(bmtMetaData));
+ PRECONDITION(CheckPointer(bmtVT));
+ PRECONDITION(CheckPointer(bmtEnumFields));
+ PRECONDITION(CheckPointer(bmtInterface));
+ PRECONDITION(CheckPointer(bmtFP));
+ PRECONDITION(CheckPointer(bmtParent));
+
+ }
+ CONTRACTL_END;
+
+ // Allocate a FieldDesc* for each field
+ bmtMFDescs->ppFieldDescList = new (GetStackingAllocator()) FieldDesc*[bmtMetaData->cFields];
+ ZeroMemory(bmtMFDescs->ppFieldDescList, bmtMetaData->cFields * sizeof(FieldDesc *));
+
+ // Create a temporary function table (we don't know how large the vtable will be until the very end,
+ // since we don't yet know how many declared methods are overrides vs. newslots).
+
+ if (IsValueClass())
+ { // ValueClass virtuals are converted into non-virtual methods and the virtual slots
+ // become unboxing stubs that forward to these new non-virtual methods. This has the
+ // side effect of doubling the number of slots introduced by newslot virtuals.
+ bmtVT->dwMaxVtableSize += NumDeclaredMethods();
+ }
+
+ _ASSERTE(!HasParent() || (bmtInterface->dwInterfaceMapSize - GetParentMethodTable()->GetNumInterfaces()) >= 0);
+
+ if (HasParent())
+ { // Add parent vtable size. <TODO> This should actually be the parent's virtual method count. </TODO>
+ bmtVT->dwMaxVtableSize += bmtParent->pSlotTable->GetSlotCount();
+ }
+
+ S_SLOT_INDEX cMaxSlots = AsClrSafeInt(bmtVT->dwMaxVtableSize) + AsClrSafeInt(NumDeclaredMethods());
+
+ if (cMaxSlots.IsOverflow() || MAX_SLOT_INDEX < cMaxSlots.Value())
+ cMaxSlots = S_SLOT_INDEX(MAX_SLOT_INDEX);
+
+ // Allocate the temporary vtable
+ bmtVT->pSlotTable = new (GetStackingAllocator())
+ bmtMethodSlotTable(cMaxSlots.Value(), GetStackingAllocator());
+
+ if (HasParent())
+ {
+#if 0
+ // @<TODO>todo: Figure out the right way to override Equals for value
+ // types only.
+ //
+ // This is broken because
+ // (a) g_pObjectClass->FindMethod("Equals", &gsig_IM_Obj_RetBool); will return
+ // the EqualsValue method
+ // (b) When mscorlib has been preloaded (and thus the munge already done
+ // ahead of time), we cannot easily find both methods
+ // to compute EqualsAddr & EqualsSlot
+ //
+ // For now, the Equals method has a runtime check to see if it's
+ // comparing value types.
+ //</TODO>
+
+ // If it is a value type, over ride a few of the base class methods.
+ if (IsValueClass())
+ {
+ static WORD EqualsSlot;
+
+ // If we haven't been through here yet, get some stuff from the Object class definition.
+ if (EqualsSlot == NULL)
+ {
+ // Get the slot of the Equals method.
+ MethodDesc *pEqualsMD = g_pObjectClass->FindMethod("Equals", &gsig_IM_Obj_RetBool);
+ THROW_BAD_FORMAT_MAYBE(pEqualsMD != NULL, 0, this);
+ EqualsSlot = pEqualsMD->GetSlot();
+
+ // Get the address of the EqualsValue method.
+ MethodDesc *pEqualsValueMD = g_pObjectClass->FindMethod("EqualsValue", &gsig_IM_Obj_RetBool);
+ THROW_BAD_FORMAT_MAYBE(pEqualsValueMD != NULL, 0, this);
+
+ // Patch the EqualsValue method desc in a dangerous way to
+ // look like the Equals method desc.
+ pEqualsValueMD->SetSlot(EqualsSlot);
+ pEqualsValueMD->SetMemberDef(pEqualsMD->GetMemberDef());
+ }
+
+ // Override the valuetype "Equals" with "EqualsValue".
+ bmtVT->SetMethodDescForSlot(EqualsSlot, EqualsSlot);
+ }
+#endif // 0
+ }
+
+ S_UINT32 cEntries = S_UINT32(2) * S_UINT32(NumDeclaredMethods());
+ if (cEntries.IsOverflow())
+ {
+ ThrowHR(COR_E_OVERFLOW);
+ }
+}
+
+//*******************************************************************************
+//
+// Used by BuildMethodTable
+//
+// Allocate a MethodDesc* for each method (needed later when doing interfaces), and a FieldDesc* for each field
+//
+VOID MethodTableBuilder::AllocateFieldDescs()
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(CheckPointer(bmtAllocator));
+ PRECONDITION(CheckPointer(bmtMFDescs));
+ PRECONDITION(CheckPointer(bmtMetaData));
+ PRECONDITION(CheckPointer(bmtVT));
+ PRECONDITION(CheckPointer(bmtEnumFields));
+ PRECONDITION(CheckPointer(bmtFP));
+ PRECONDITION(CheckPointer(bmtParent));
+
+ }
+ CONTRACTL_END;
+
+ // We'll be counting the # fields of each size as we go along
+ for (DWORD i = 0; i <= MAX_LOG2_PRIMITIVE_FIELD_SIZE; i++)
+ {
+ bmtFP->NumRegularStaticFieldsOfSize[i] = 0;
+ bmtFP->NumThreadStaticFieldsOfSize[i] = 0;
+ bmtFP->NumInstanceFieldsOfSize[i] = 0;
+ }
+
+ //
+ // Allocate blocks of MethodDescs and FieldDescs for all declared methods and fields
+ //
+ // In order to avoid allocating a field pointing back to the method
+ // table in every single method desc, we allocate memory in the
+ // following manner:
+ // o Field descs get a single contiguous block.
+ // o Method descs of different sizes (normal vs NDirect) are
+ // allocated in different MethodDescChunks.
+ // o Each method desc chunk starts with a header, and has
+ // at most MAX_ method descs (if there are more
+ // method descs of a given size, multiple chunks are allocated).
+ // This way method descs can use an 8-bit offset field to locate the
+ // pointer to their method table.
+ //
+
+ /////////////////////////////////////////////////////////////////
+ // Allocate fields
+ if (NumDeclaredFields() > 0)
+ {
+ GetHalfBakedClass()->SetFieldDescList((FieldDesc *)
+ AllocateFromHighFrequencyHeap(S_SIZE_T(NumDeclaredFields()) * S_SIZE_T(sizeof(FieldDesc))));
+ INDEBUG(GetClassLoader()->m_dwDebugFieldDescs += NumDeclaredFields();)
+ INDEBUG(GetClassLoader()->m_dwFieldDescData += (NumDeclaredFields() * sizeof(FieldDesc));)
+ }
+}
+
+#ifdef FEATURE_DOUBLE_ALIGNMENT_HINT
+//*******************************************************************************
+//
+// Heuristic to determine if we should have instances of this class 8 byte aligned
+//
+BOOL MethodTableBuilder::ShouldAlign8(DWORD dwR8Fields, DWORD dwTotalFields)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return dwR8Fields*2>dwTotalFields && dwR8Fields>=2;
+}
+#endif
+
+//*******************************************************************************
+BOOL MethodTableBuilder::IsSelfReferencingStaticValueTypeField(mdToken dwByValueClassToken,
+ bmtInternalInfo* bmtInternal,
+ const bmtGenericsInfo *bmtGenerics,
+ PCCOR_SIGNATURE pMemberSignature,
+ DWORD cMemberSignature)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (dwByValueClassToken != this->GetCl())
+ {
+ return FALSE;
+ }
+
+ if (!bmtGenerics->HasInstantiation())
+ {
+ return TRUE;
+ }
+
+ // The value class is generic. Check that the signature of the field
+ // is _exactly_ equivalent to VC<!0, !1, !2, ...>. Do this by consing up a fake
+ // signature.
+ DWORD nGenericArgs = bmtGenerics->GetNumGenericArgs();
+ CONSISTENCY_CHECK(nGenericArgs != 0);
+
+ SigBuilder sigBuilder;
+
+ sigBuilder.AppendElementType(ELEMENT_TYPE_GENERICINST);
+ sigBuilder.AppendElementType(ELEMENT_TYPE_VALUETYPE);
+ sigBuilder.AppendToken(dwByValueClassToken);
+ sigBuilder.AppendData(nGenericArgs);
+ for (unsigned int typearg = 0; typearg < nGenericArgs; typearg++)
+ {
+ sigBuilder.AppendElementType(ELEMENT_TYPE_VAR);
+ sigBuilder.AppendData(typearg);
+ }
+
+ DWORD cFakeSig;
+ PCCOR_SIGNATURE pFakeSig = (PCCOR_SIGNATURE)sigBuilder.GetSignature(&cFakeSig);
+
+ PCCOR_SIGNATURE pFieldSig = pMemberSignature + 1; // skip the CALLCONV_FIELD
+
+ return MetaSig::CompareElementType(pFakeSig, pFieldSig,
+ pFakeSig + cFakeSig, pMemberSignature + cMemberSignature,
+ GetModule(), GetModule(),
+ NULL, NULL);
+
+}
+
+//*******************************************************************************
+//
+// Used pByValueClass cache to mark self-references
+//
+static BOOL IsSelfRef(MethodTable * pMT)
+{
+ return pMT == (MethodTable *)-1;
+}
+
+//*******************************************************************************
+//
+// Used by BuildMethodTable
+//
+// Go thru all fields and initialize their FieldDescs.
+//
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#endif // _PREFAST_
+
+VOID MethodTableBuilder::InitializeFieldDescs(FieldDesc *pFieldDescList,
+ const LayoutRawFieldInfo* pLayoutRawFieldInfos,
+ bmtInternalInfo* bmtInternal,
+ const bmtGenericsInfo* bmtGenerics,
+ bmtMetaDataInfo* bmtMetaData,
+ bmtEnumFieldInfo* bmtEnumFields,
+ bmtErrorInfo* bmtError,
+ MethodTable *** pByValueClassCache,
+ bmtMethAndFieldDescs* bmtMFDescs,
+ bmtFieldPlacement* bmtFP,
+ bmtContextStaticInfo* pbmtCSInfo,
+ unsigned* totalDeclaredSize)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(CheckPointer(bmtInternal));
+ PRECONDITION(CheckPointer(bmtGenerics));
+ PRECONDITION(CheckPointer(bmtMetaData));
+ PRECONDITION(CheckPointer(bmtEnumFields));
+ PRECONDITION(CheckPointer(bmtError));
+ PRECONDITION(CheckPointer(pByValueClassCache));
+ PRECONDITION(CheckPointer(bmtMFDescs));
+ PRECONDITION(CheckPointer(bmtFP));
+ PRECONDITION(CheckPointer(totalDeclaredSize));
+ }
+ CONTRACTL_END;
+
+ DWORD i;
+ IMDInternalImport * pInternalImport = GetMDImport(); // to avoid multiple dereferencings
+
+ FieldMarshaler * pNextFieldMarshaler = NULL;
+ if (HasLayout())
+ {
+ pNextFieldMarshaler = (FieldMarshaler*)(GetLayoutInfo()->GetFieldMarshalers());
+ }
+
+
+//========================================================================
+// BEGIN:
+// Go thru all fields and initialize their FieldDescs.
+//========================================================================
+
+ DWORD dwCurrentDeclaredField = 0;
+ DWORD dwCurrentStaticField = 0;
+ DWORD dwCurrentThreadStaticField = 0;
+
+#ifdef FEATURE_REMOTING
+ DWORD dwContextStaticsOffset = 0;
+#endif
+
+ DWORD dwR8Fields = 0; // Number of R8's the class has
+
+#ifdef FEATURE_64BIT_ALIGNMENT
+ // Track whether any field in this type requires 8-byte alignment
+ BOOL fFieldRequiresAlign8 = HasParent() ? GetParentMethodTable()->RequiresAlign8() : FALSE;
+#endif
+
+#ifdef FEATURE_LEGACYNETCF
+ BOOL fNetCFCompat = GetModule()->GetDomain()->GetAppDomainCompatMode() == BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8;
+ DWORD dwStaticsSizeOnNetCF = 0;
+#else
+ const BOOL fNetCFCompat = FALSE;
+#endif
+
+ for (i = 0; i < bmtMetaData->cFields; i++)
+ {
+ PCCOR_SIGNATURE pMemberSignature;
+ DWORD cMemberSignature;
+ DWORD dwMemberAttrs;
+
+ dwMemberAttrs = bmtMetaData->pFieldAttrs[i];
+
+ BOOL fIsStatic = IsFdStatic(dwMemberAttrs);
+
+ // We don't store static final primitive fields in the class layout
+ if (IsFdLiteral(dwMemberAttrs))
+ continue;
+
+ if (!IsFdPublic(dwMemberAttrs))
+ SetHasNonPublicFields();
+
+ if (IsFdNotSerialized(dwMemberAttrs))
+ SetCannotBeBlittedByObjectCloner();
+
+ IfFailThrow(pInternalImport->GetSigOfFieldDef(bmtMetaData->pFields[i], &cMemberSignature, &pMemberSignature));
+ // Signature validation
+ IfFailThrow(validateTokenSig(bmtMetaData->pFields[i],pMemberSignature,cMemberSignature,dwMemberAttrs,pInternalImport));
+
+ FieldDesc * pFD;
+ DWORD dwLog2FieldSize = 0;
+ BOOL bCurrentFieldIsGCPointer = FALSE;
+ mdToken dwByValueClassToken = 0;
+ MethodTable * pByValueClass = NULL;
+ BOOL fIsByValue = FALSE;
+ BOOL fIsThreadStatic = FALSE;
+#ifdef FEATURE_REMOTING
+ BOOL fIsContextStatic = FALSE;
+#else
+ static const BOOL fIsContextStatic = FALSE;
+#endif
+ BOOL fHasRVA = FALSE;
+
+ MetaSig fsig(pMemberSignature,
+ cMemberSignature,
+ GetModule(),
+ &bmtGenerics->typeContext,
+ MetaSig::sigField);
+ CorElementType ElementType = fsig.NextArg();
+
+
+ // Get type
+ if (!isCallConv(fsig.GetCallingConvention(), IMAGE_CEE_CS_CALLCONV_FIELD))
+ {
+ IfFailThrow(COR_E_TYPELOAD);
+ }
+
+ // Determine if a static field is special i.e. RVA based, local to
+ // a thread or a context
+ if (fIsStatic)
+ {
+ if (IsFdHasFieldRVA(dwMemberAttrs))
+ {
+ fHasRVA = TRUE;
+ }
+
+ HRESULT hr;
+
+ hr = pInternalImport->GetCustomAttributeByName(bmtMetaData->pFields[i],
+ g_ThreadStaticAttributeClassName,
+ NULL, NULL);
+ IfFailThrow(hr);
+ if (hr == S_OK)
+ {
+ fIsThreadStatic = TRUE;
+ }
+
+#ifdef FEATURE_REMOTING
+ hr = pInternalImport->GetCustomAttributeByName(bmtMetaData->pFields[i],
+ g_ContextStaticAttributeClassName,
+ NULL, NULL);
+ IfFailThrow(hr);
+ if (hr == S_OK)
+ {
+ fIsContextStatic = TRUE;
+ }
+#endif // FEATURE_REMOTING
+
+ if (ElementType == ELEMENT_TYPE_VALUETYPE)
+ {
+ hr = pInternalImport->GetCustomAttributeByName(bmtMetaData->pFields[i],
+ g_CompilerServicesFixedAddressValueTypeAttribute,
+ NULL, NULL);
+ IfFailThrow(hr);
+ if (hr == S_OK)
+ {
+ bmtFP->fHasFixedAddressValueTypes = true;
+ }
+ }
+
+
+ // Do some sanity checks that we are not mixing context and thread
+ // relative statics.
+#ifdef FEATURE_REMOTING
+ if (fIsThreadStatic && fIsContextStatic)
+ {
+ IfFailThrow(COR_E_TYPELOAD);
+ }
+#endif
+ if (fHasRVA && (fIsThreadStatic || fIsContextStatic))
+ {
+ IfFailThrow(COR_E_TYPELOAD);
+ }
+
+ if ((fIsThreadStatic || fIsContextStatic || bmtFP->fHasFixedAddressValueTypes) && GetAssembly()->IsCollectible())
+ {
+ if (bmtFP->fHasFixedAddressValueTypes)
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_COLLECTIBLEFIXEDVTATTR);
+ }
+ BuildMethodTableThrowException(IDS_CLASSLOAD_COLLECTIBLESPECIALSTATICS);
+ }
+ }
+
+
+ GOT_ELEMENT_TYPE:
+ // Type to store in FieldDesc - we don't want to have extra case statements for
+ // ELEMENT_TYPE_STRING, SDARRAY etc., so we convert all object types to CLASS.
+ // Also, BOOLEAN, CHAR are converted to U1, I2.
+ CorElementType FieldDescElementType = ElementType;
+
+ switch (ElementType)
+ {
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ {
+ dwLog2FieldSize = 0;
+ break;
+ }
+
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ {
+ dwLog2FieldSize = 1;
+ break;
+ }
+
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ IN_WIN32(case ELEMENT_TYPE_I:)
+ IN_WIN32(case ELEMENT_TYPE_U:)
+ case ELEMENT_TYPE_R4:
+ {
+ dwLog2FieldSize = 2;
+ break;
+ }
+
+ case ELEMENT_TYPE_BOOLEAN:
+ {
+ // FieldDescElementType = ELEMENT_TYPE_U1;
+ dwLog2FieldSize = 0;
+ break;
+ }
+
+ case ELEMENT_TYPE_CHAR:
+ {
+ // FieldDescElementType = ELEMENT_TYPE_U2;
+ dwLog2FieldSize = 1;
+ break;
+ }
+
+ case ELEMENT_TYPE_R8:
+ {
+ dwR8Fields++;
+
+ // Deliberate fall through...
+ }
+
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ IN_WIN64(case ELEMENT_TYPE_I:)
+ IN_WIN64(case ELEMENT_TYPE_U:)
+ {
+#ifdef FEATURE_64BIT_ALIGNMENT
+ // Record that this field requires alignment for Int64/UInt64.
+ if(!fIsStatic)
+ fFieldRequiresAlign8 = true;
+#endif
+ dwLog2FieldSize = 3;
+ break;
+ }
+
+ case ELEMENT_TYPE_FNPTR:
+ case ELEMENT_TYPE_PTR: // ptrs are unmanaged scalars, for layout
+ {
+ dwLog2FieldSize = LOG2_PTRSIZE;
+ break;
+ }
+
+ // Class type variable (method type variables aren't allowed in fields)
+ // These only occur in open types used for verification/reflection.
+ case ELEMENT_TYPE_VAR:
+ case ELEMENT_TYPE_MVAR:
+ // deliberate drop through - do fake field layout
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_SZARRAY: // single dim, zero
+ case ELEMENT_TYPE_ARRAY: // all other arrays
+ case ELEMENT_TYPE_CLASS: // objectrefs
+ case ELEMENT_TYPE_OBJECT:
+ {
+ dwLog2FieldSize = LOG2_PTRSIZE;
+ bCurrentFieldIsGCPointer = TRUE;
+ FieldDescElementType = ELEMENT_TYPE_CLASS;
+
+ if (!fIsStatic)
+ {
+ SetHasFieldsWhichMustBeInited();
+ if (ElementType != ELEMENT_TYPE_STRING)
+ SetCannotBeBlittedByObjectCloner();
+ }
+ else
+ { // EnumerateFieldDescs already counted the total number of static vs. instance
+ // fields, now we're further subdividing the static field count by GC and non-GC.
+ bmtEnumFields->dwNumStaticObjRefFields++;
+ if (fIsThreadStatic)
+ bmtEnumFields->dwNumThreadStaticObjRefFields++;
+ }
+ break;
+ }
+
+ case ELEMENT_TYPE_VALUETYPE: // a byvalue class field
+ {
+ Module * pTokenModule;
+ dwByValueClassToken = fsig.GetArgProps().PeekValueTypeTokenClosed(GetModule(), &bmtGenerics->typeContext, &pTokenModule);
+ fIsByValue = TRUE;
+
+ // By-value class
+ BAD_FORMAT_NOTHROW_ASSERT(dwByValueClassToken != 0);
+
+ if (this->IsValueClass() && (pTokenModule == GetModule()))
+ {
+ if (TypeFromToken(dwByValueClassToken) == mdtTypeRef)
+ {
+ // It's a typeref - check if it's a class that has a static field of itself
+ LPCUTF8 pszNameSpace;
+ LPCUTF8 pszClassName;
+ if (FAILED(pInternalImport->GetNameOfTypeRef(dwByValueClassToken, &pszNameSpace, &pszClassName)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+
+ if (IsStrLongerThan((char *)pszClassName, MAX_CLASS_NAME)
+ || IsStrLongerThan((char *)pszNameSpace, MAX_CLASS_NAME)
+ || (strlen(pszClassName) + strlen(pszNameSpace) + 1 >= MAX_CLASS_NAME))
+ {
+ BuildMethodTableThrowException(BFA_TYPEREG_NAME_TOO_LONG, mdMethodDefNil);
+ }
+
+ mdToken tkRes;
+ if (FAILED(pInternalImport->GetResolutionScopeOfTypeRef(dwByValueClassToken, &tkRes)))
+ {
+ BuildMethodTableThrowException(BFA_BAD_TYPEREF_TOKEN, dwByValueClassToken);
+ }
+
+ if (TypeFromToken(tkRes) == mdtTypeRef)
+ {
+ if (!pInternalImport->IsValidToken(tkRes))
+ {
+ BuildMethodTableThrowException(BFA_BAD_TYPEREF_TOKEN, mdMethodDefNil);
+ }
+ }
+ else
+ {
+ tkRes = mdTokenNil;
+ }
+
+ if (FAILED(pInternalImport->FindTypeDef(pszNameSpace,
+ pszClassName,
+ tkRes,
+ &dwByValueClassToken)))
+ {
+ dwByValueClassToken = mdTokenNil;
+ }
+ } // If field is static typeref
+
+ BOOL selfref = IsSelfReferencingStaticValueTypeField(dwByValueClassToken,
+ bmtInternal,
+ bmtGenerics,
+ pMemberSignature,
+ cMemberSignature);
+
+ if (selfref)
+ { // immediately self-referential fields must be static.
+ if (!fIsStatic)
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_VALUEINSTANCEFIELD, mdMethodDefNil);
+ }
+
+ if (!IsValueClass())
+ {
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT, IDS_CLASSLOAD_MUST_BE_BYVAL, mdTokenNil);
+ }
+
+ pByValueClass = (MethodTable *)-1;
+ }
+ } // If 'this' is a value class
+
+ // It's not self-referential so try to load it
+ if (pByValueClass == NULL)
+ {
+ // Loading a non-self-ref valuetype field.
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOAD_APPROXPARENTS);
+ // We load the approximate type of the field to avoid recursion problems.
+ // MethodTable::DoFullyLoad() will later load it fully
+ pByValueClass = fsig.GetArgProps().GetTypeHandleThrowing(GetModule(),
+ &bmtGenerics->typeContext,
+ ClassLoader::LoadTypes,
+ CLASS_LOAD_APPROXPARENTS,
+ TRUE
+ ).GetMethodTable();
+ }
+
+ // #FieldDescTypeMorph IF it is an enum, strip it down to its underlying type
+ if (IsSelfRef(pByValueClass) ? IsEnum() : pByValueClass->IsEnum())
+ {
+ if (IsSelfRef(pByValueClass))
+ { // It is self-referencing enum (ValueType) static field - it is forbidden in the ECMA spec, but supported by CLR since v1
+ // Note: literal static fields are skipped early in this loop
+ if (bmtMFDescs->ppFieldDescList[0] == NULL)
+ { // The field is defined before (the only) instance field
+ // AppCompat with 3.5 SP1 and 4.0 RTM behavior
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT, IDS_CLASSLOAD_BAD_FIELD, mdTokenNil);
+ }
+ // We will treat the field type as if it was its underlying type (we know its size and will check correctly RVA with the size
+ // later in this method)
+ // Therefore we do not have to run code:VerifySelfReferencingStaticValueTypeFields_WithRVA or code:#SelfReferencingStaticValueTypeField_Checks
+ }
+ BAD_FORMAT_NOTHROW_ASSERT((IsSelfRef(pByValueClass) ?
+ bmtEnumFields->dwNumInstanceFields : pByValueClass->GetNumInstanceFields())
+ == 1); // enums must have exactly one field
+ FieldDesc * enumField = IsSelfRef(pByValueClass) ?
+ bmtMFDescs->ppFieldDescList[0] : pByValueClass->GetApproxFieldDescListRaw();
+ BAD_FORMAT_NOTHROW_ASSERT(!enumField->IsStatic()); // no real static fields on enums
+ ElementType = enumField->GetFieldType();
+ BAD_FORMAT_NOTHROW_ASSERT(ElementType != ELEMENT_TYPE_VALUETYPE);
+ fIsByValue = FALSE; // we're going to treat it as the underlying type now
+ goto GOT_ELEMENT_TYPE;
+ }
+
+ // There are just few types with code:ContainsStackPtr set - arrays and few ValueTypes in mscorlib.dll (see code:CheckForSystemTypes).
+ // Note: None of them will ever have self-referencing static ValueType field (we cannot assert it now because the ContainsStackPtr
+ // status for this type has not been initialized yet).
+ if (!IsSelfRef(pByValueClass) && pByValueClass->GetClass()->ContainsStackPtr())
+ { // Cannot have embedded valuetypes that contain a field that require stack allocation.
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT, IDS_CLASSLOAD_BAD_FIELD, mdTokenNil);
+ }
+
+ if (!IsSelfRef(pByValueClass) && pByValueClass->GetClass()->HasNonPublicFields())
+ { // If a class has a field of type ValueType with non-public fields in it,
+ // the class must "inherit" this characteristic
+ SetHasNonPublicFields();
+ }
+
+ if (!fHasRVA)
+ {
+ if (!fIsStatic)
+ {
+ // Inherit instance attributes
+ EEClass * pFieldClass = pByValueClass->GetClass();
+
+#ifdef FEATURE_64BIT_ALIGNMENT
+ // If a value type requires 8-byte alignment this requirement must be inherited by any
+ // class/struct that embeds it as a field.
+ if (pFieldClass->IsAlign8Candidate())
+ fFieldRequiresAlign8 = true;
+#endif
+ if (pFieldClass->HasNonPublicFields())
+ SetHasNonPublicFields();
+ if (pFieldClass->HasFieldsWhichMustBeInited())
+ SetHasFieldsWhichMustBeInited();
+#ifdef FEATURE_REMOTING
+ if (pFieldClass->CannotBeBlittedByObjectCloner())
+ SetCannotBeBlittedByObjectCloner();
+#endif
+
+#ifdef FEATURE_READYTORUN
+ if (!(pByValueClass->IsTruePrimitive() || pByValueClass->IsEnum()))
+ {
+ CheckLayoutDependsOnOtherModules(pByValueClass);
+ }
+#endif
+ }
+ else
+ { // Increment the number of static fields that contain object references.
+ bmtEnumFields->dwNumStaticBoxedFields++;
+ if (fIsThreadStatic)
+ bmtEnumFields->dwNumThreadStaticBoxedFields++;
+ }
+ }
+
+ if (*pByValueClassCache == NULL)
+ {
+ DWORD dwNumFields = bmtEnumFields->dwNumInstanceFields + bmtEnumFields->dwNumStaticFields;
+
+ *pByValueClassCache = new (GetStackingAllocator()) MethodTable * [dwNumFields];
+ memset (*pByValueClassCache, 0, dwNumFields * sizeof(MethodTable **));
+ }
+
+ // Thread static fields come after instance fields and regular static fields in this list
+ if (fIsThreadStatic)
+ {
+ (*pByValueClassCache)[bmtEnumFields->dwNumInstanceFields + bmtEnumFields->dwNumStaticFields - bmtEnumFields->dwNumThreadStaticFields + dwCurrentThreadStaticField] = pByValueClass;
+ // make sure to record the correct size for static field
+ // layout
+ dwLog2FieldSize = LOG2_PTRSIZE; // handle
+ }
+ // Regular static fields come after instance fields in this list
+ else if (fIsStatic)
+ {
+ (*pByValueClassCache)[bmtEnumFields->dwNumInstanceFields + dwCurrentStaticField] = pByValueClass;
+ // make sure to record the correct size for static field
+ // layout
+ dwLog2FieldSize = LOG2_PTRSIZE; // handle
+ }
+ else
+ {
+ (*pByValueClassCache)[dwCurrentDeclaredField] = pByValueClass;
+ dwLog2FieldSize = 0; // unused
+ }
+
+ break;
+ }
+ default:
+ {
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT, IDS_CLASSLOAD_BAD_FIELD, mdTokenNil);
+ }
+ }
+
+ if (!fIsStatic)
+ {
+ pFD = &pFieldDescList[dwCurrentDeclaredField];
+ *totalDeclaredSize += (1 << dwLog2FieldSize);
+ }
+ else /* (dwMemberAttrs & mdStatic) */
+ {
+ if (fIsThreadStatic)
+ {
+ pFD = &pFieldDescList[bmtEnumFields->dwNumInstanceFields + bmtEnumFields->dwNumStaticFields - bmtEnumFields->dwNumThreadStaticFields + dwCurrentThreadStaticField];
+ }
+ else
+ {
+ pFD = &pFieldDescList[bmtEnumFields->dwNumInstanceFields + dwCurrentStaticField];
+ }
+ }
+
+ bmtMFDescs->ppFieldDescList[i] = pFD;
+
+ const LayoutRawFieldInfo *pLayoutFieldInfo = NULL;
+
+ if (HasLayout())
+ {
+ const LayoutRawFieldInfo *pwalk = pLayoutRawFieldInfos;
+ while (pwalk->m_MD != mdFieldDefNil)
+ {
+ if (pwalk->m_MD == bmtMetaData->pFields[i])
+ {
+
+ pLayoutFieldInfo = pwalk;
+ CopyMemory(pNextFieldMarshaler,
+ &(pwalk->m_FieldMarshaler),
+ MAXFIELDMARSHALERSIZE);
+
+ pNextFieldMarshaler->SetFieldDesc(pFD);
+ pNextFieldMarshaler->SetExternalOffset(pwalk->m_offset);
+
+ ((BYTE*&)pNextFieldMarshaler) += MAXFIELDMARSHALERSIZE;
+ break;
+ }
+ pwalk++;
+ }
+ }
+
+ LPCSTR pszFieldName = NULL;
+#ifdef _DEBUG
+ if (FAILED(pInternalImport->GetNameOfFieldDef(bmtMetaData->pFields[i], &pszFieldName)))
+ {
+ pszFieldName = "Invalid FieldDef record";
+ }
+#endif
+ // #InitCall Initialize contents of the field descriptor called from
+ pFD->Init(
+ bmtMetaData->pFields[i],
+ FieldDescElementType,
+ dwMemberAttrs,
+ fIsStatic,
+ fHasRVA,
+ fIsThreadStatic,
+ fIsContextStatic,
+ pszFieldName
+ );
+
+ // Check if the ValueType field containing non-publics is overlapped
+ if (!fNetCFCompat
+ && HasExplicitFieldOffsetLayout()
+ && pLayoutFieldInfo != NULL
+ && pLayoutFieldInfo->m_fIsOverlapped
+ && pByValueClass != NULL
+ && pByValueClass->GetClass()->HasNonPublicFields())
+ {
+ if (!Security::CanSkipVerification(GetAssembly()->GetDomainAssembly()))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADOVERLAP);
+ }
+ }
+
+ // We're using FieldDesc::m_pMTOfEnclosingClass to temporarily store the field's size.
+ //
+ if (fIsByValue)
+ {
+ if (!fIsStatic &&
+ (IsBlittable() || HasExplicitFieldOffsetLayout()))
+ {
+ (DWORD_PTR &)pFD->m_pMTOfEnclosingClass =
+ (*pByValueClassCache)[dwCurrentDeclaredField]->GetNumInstanceFieldBytes();
+
+ if (pLayoutFieldInfo)
+ IfFailThrow(pFD->SetOffset(pLayoutFieldInfo->m_offset));
+ else
+ pFD->SetOffset(FIELD_OFFSET_VALUE_CLASS);
+ }
+ else if (!fIsStatic && IsManagedSequential())
+ {
+ (DWORD_PTR &)pFD->m_pMTOfEnclosingClass =
+ (*pByValueClassCache)[dwCurrentDeclaredField]->GetNumInstanceFieldBytes();
+
+ IfFailThrow(pFD->SetOffset(pLayoutFieldInfo->m_managedOffset));
+ }
+ else
+ {
+ // static value class fields hold a handle, which is ptr sized
+ // (instance field layout ignores this value)
+ (DWORD_PTR&)(pFD->m_pMTOfEnclosingClass) = LOG2_PTRSIZE;
+ pFD->SetOffset(FIELD_OFFSET_VALUE_CLASS);
+ }
+ }
+ else
+ {
+ (DWORD_PTR &)(pFD->m_pMTOfEnclosingClass) = (size_t)dwLog2FieldSize;
+
+ // -1 (FIELD_OFFSET_UNPLACED) means that this is a non-GC field that has not yet been placed
+ // -2 (FIELD_OFFSET_UNPLACED_GC_PTR) means that this is a GC pointer field that has not yet been placed
+
+ // If there is any kind of explicit layout information for this field, use it. If not, then
+ // mark it as either GC or non-GC and as unplaced; it will get placed later on in an optimized way.
+
+ if ((IsBlittable() || HasExplicitFieldOffsetLayout()) && !fIsStatic)
+ IfFailThrow(pFD->SetOffset(pLayoutFieldInfo->m_offset));
+ else if (IsManagedSequential() && !fIsStatic)
+ IfFailThrow(pFD->SetOffset(pLayoutFieldInfo->m_managedOffset));
+ else if (bCurrentFieldIsGCPointer)
+ pFD->SetOffset(FIELD_OFFSET_UNPLACED_GC_PTR);
+ else
+ pFD->SetOffset(FIELD_OFFSET_UNPLACED);
+ }
+
+ if (!fIsStatic)
+ {
+ if (!fIsByValue)
+ {
+ if (++bmtFP->NumInstanceFieldsOfSize[dwLog2FieldSize] == 1)
+ bmtFP->FirstInstanceFieldOfSize[dwLog2FieldSize] = dwCurrentDeclaredField;
+ }
+
+ dwCurrentDeclaredField++;
+
+ if (bCurrentFieldIsGCPointer)
+ {
+ bmtFP->NumInstanceGCPointerFields++;
+ }
+ }
+ else /* static fields */
+ {
+ // Static fields are stored in the vtable after the vtable and interface slots. We don't
+ // know how large the vtable will be, so we will have to fixup the slot number by
+ // <vtable + interface size> later.
+
+ if (fIsThreadStatic)
+ {
+ dwCurrentThreadStaticField++;
+ }
+ else
+ {
+ dwCurrentStaticField++;
+ }
+
+ if (fHasRVA)
+ {
+ if (FieldDescElementType == ELEMENT_TYPE_CLASS)
+ { // RVA fields are not allowed to have GC pointers.
+ BAD_FORMAT_NOTHROW_ASSERT(!"ObjectRef in an RVA field");
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT, IDS_CLASSLOAD_BAD_FIELD, mdTokenNil);
+ }
+ if (FieldDescElementType == ELEMENT_TYPE_VALUETYPE)
+ {
+ if (IsSelfRef(pByValueClass))
+ { // We will verify self-referencing statics after the loop through all fields - see code:#SelfReferencingStaticValueTypeField_Checks
+ bmtFP->fHasSelfReferencingStaticValueTypeField_WithRVA = TRUE;
+ }
+ else
+ {
+ if (pByValueClass->GetClass()->HasFieldsWhichMustBeInited())
+ { // RVA fields are not allowed to have GC pointers.
+ BAD_FORMAT_NOTHROW_ASSERT(!"ObjectRef in an RVA field");
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT, IDS_CLASSLOAD_BAD_FIELD, mdTokenNil);
+ }
+ if (pByValueClass->GetClass()->HasNonPublicFields())
+ {
+ if (!Security::CanHaveRVA(GetAssembly()))
+ {
+ BAD_FORMAT_NOTHROW_ASSERT(!"ValueType with non-public fields as a type of an RVA field");
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT, IDS_CLASSLOAD_BAD_FIELD, mdTokenNil);
+ }
+ }
+ }
+ }
+
+ // Set the field offset
+ DWORD rva;
+ IfFailThrow(pInternalImport->GetFieldRVA(pFD->GetMemberDef(), &rva));
+
+ // Ensure that the IL image is loaded. Note that this assembly may
+ // have an ngen image, but this type may have failed to load during ngen.
+ GetModule()->GetFile()->LoadLibrary(FALSE);
+
+ DWORD fldSize;
+ if (FieldDescElementType == ELEMENT_TYPE_VALUETYPE)
+ {
+ if (IsSelfRef(pByValueClass))
+ {
+ _ASSERTE(bmtFP->fHasSelfReferencingStaticValueTypeField_WithRVA);
+
+ // We do not known the size yet
+ _ASSERTE(bmtFP->NumInstanceFieldBytes == 0);
+ // We will check just the RVA with size 0 now, the full size verification will happen in code:VerifySelfReferencingStaticValueTypeFields_WithRVA
+ fldSize = 0;
+ }
+ else
+ {
+ fldSize = pByValueClass->GetNumInstanceFieldBytes();
+ }
+ }
+ else
+ {
+ fldSize = GetSizeForCorElementType(FieldDescElementType);
+ }
+ if (!GetModule()->CheckRvaField(rva, fldSize))
+ {
+ if (!Security::CanHaveRVA(GetAssembly()))
+ {
+ BAD_FORMAT_NOTHROW_ASSERT(!"Illegal RVA of a mapped field");
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT, IDS_CLASSLOAD_BAD_FIELD, mdTokenNil);
+ }
+ }
+
+ pFD->SetOffsetRVA(rva);
+ }
+ else if (fIsThreadStatic)
+ {
+ bmtFP->NumThreadStaticFieldsOfSize[dwLog2FieldSize]++;
+
+ if (bCurrentFieldIsGCPointer)
+ bmtFP->NumThreadStaticGCPointerFields++;
+
+ if (fIsByValue)
+ bmtFP->NumThreadStaticGCBoxedFields++;
+ }
+#ifdef FEATURE_REMOTING
+ else if (fIsContextStatic)
+ {
+ DWORD size = 1 << dwLog2FieldSize;
+
+ dwContextStaticsOffset = (DWORD)ALIGN_UP(dwContextStaticsOffset, size);
+
+ IfFailThrow(pFD->SetOffset(dwContextStaticsOffset)); // offset is the bucket index
+
+ dwContextStaticsOffset += size;
+ }
+#endif // FEATURE_REMOTING
+ else
+ {
+ bmtFP->NumRegularStaticFieldsOfSize[dwLog2FieldSize]++;
+
+ if (bCurrentFieldIsGCPointer)
+ bmtFP->NumRegularStaticGCPointerFields++;
+
+ if (fIsByValue)
+ bmtFP->NumRegularStaticGCBoxedFields++;
+ }
+ }
+ }
+ // We processed all fields
+
+ //#SelfReferencingStaticValueTypeField_Checks
+ if (bmtFP->fHasSelfReferencingStaticValueTypeField_WithRVA)
+ { // The type has self-referencing static ValueType field with RVA, do more checks now that depend on all fields being processed
+
+ // For enums we already checked its underlying type, we should not get here
+ _ASSERTE(!IsEnum());
+
+ if (HasFieldsWhichMustBeInited())
+ { // RVA fields are not allowed to have GC pointers.
+ BAD_FORMAT_NOTHROW_ASSERT(!"ObjectRef in an RVA self-referencing static field");
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT, IDS_CLASSLOAD_BAD_FIELD, mdTokenNil);
+ }
+ if (HasNonPublicFields())
+ { // RVA ValueTypes with non-public fields must be checked against security
+ if (!Security::CanHaveRVA(GetAssembly()))
+ {
+ BAD_FORMAT_NOTHROW_ASSERT(!"ValueType with non-public fields as a type of an RVA self-referencing static field");
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT, IDS_CLASSLOAD_BAD_FIELD, mdTokenNil);
+ }
+ }
+ }
+
+ DWORD dwNumInstanceFields = dwCurrentDeclaredField + (HasParent() ? GetParentMethodTable()->GetNumInstanceFields() : 0);
+ DWORD dwNumStaticFields = bmtEnumFields->dwNumStaticFields;
+ DWORD dwNumThreadStaticFields = bmtEnumFields->dwNumThreadStaticFields;
+
+ if (!FitsIn<WORD>(dwNumInstanceFields) ||
+ !FitsIn<WORD>(dwNumStaticFields))
+ { // An implementation limitation means that it's an error if there are greater that MAX_WORD fields.
+ BuildMethodTableThrowException(IDS_EE_TOOMANYFIELDS);
+ }
+
+ GetHalfBakedClass()->SetNumInstanceFields((WORD)dwNumInstanceFields);
+ GetHalfBakedClass()->SetNumStaticFields((WORD)dwNumStaticFields);
+ GetHalfBakedClass()->SetNumThreadStaticFields((WORD)dwNumThreadStaticFields);
+
+ if (bmtFP->fHasFixedAddressValueTypes)
+ {
+ // To make things simpler, if the class has any field with this requirement, we'll set
+ // all the statics to have this property. This allows us to only need to persist one bit
+ // for the ngen case.
+ GetHalfBakedClass()->SetHasFixedAddressVTStatics();
+ }
+
+#ifdef FEATURE_64BIT_ALIGNMENT
+ // For types with layout we drop any 64-bit alignment requirement if the packing size was less than 8
+ // bytes (this mimics what the native compiler does and ensures we match up calling conventions during
+ // interop).
+ if (HasLayout() && GetLayoutInfo()->GetPackingSize() < 8)
+ {
+ fFieldRequiresAlign8 = false;
+ }
+
+ if (fFieldRequiresAlign8)
+ {
+ SetAlign8Candidate();
+ }
+#endif // FEATURE_64BIT_ALIGNMENT
+
+#ifdef FEATURE_DOUBLE_ALIGNMENT_HINT
+ if (ShouldAlign8(dwR8Fields, dwNumInstanceFields))
+ {
+ SetAlign8Candidate();
+ }
+#endif // FEATURE_DOUBLE_ALIGNMENT_HINT
+
+#ifdef FEATURE_REMOTING
+ if (pbmtCSInfo)
+ {
+ pbmtCSInfo->dwContextStaticsSize = dwContextStaticsOffset;
+ }
+#endif
+
+ //========================================================================
+ // END:
+ // Go thru all fields and initialize their FieldDescs.
+ //========================================================================
+
+ return;
+} // MethodTableBuilder::InitializeFieldDescs
+
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+//*******************************************************************************
+// Verify self-referencing static ValueType fields with RVA (when the size of the ValueType is known).
+void
+MethodTableBuilder::VerifySelfReferencingStaticValueTypeFields_WithRVA(
+ MethodTable ** pByValueClassCache)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(bmtFP->fHasSelfReferencingStaticValueTypeField_WithRVA);
+ // Enum's static self-referencing fields have been verified as the underlying type of the enum, we should not get here for them
+ _ASSERTE(!IsEnum());
+ // The size of the ValueType should be known at this point (the caller throws if it is 0)
+ _ASSERTE(bmtFP->NumInstanceFieldBytes != 0);
+
+ FieldDesc * pFieldDescList = GetApproxFieldDescListRaw();
+ DWORD nFirstThreadStaticFieldIndex = bmtEnumFields->dwNumInstanceFields + bmtEnumFields->dwNumStaticFields - bmtEnumFields->dwNumThreadStaticFields;
+ for (DWORD i = bmtEnumFields->dwNumInstanceFields; i < nFirstThreadStaticFieldIndex; i++)
+ {
+ FieldDesc * pFD = &pFieldDescList[i];
+ _ASSERTE(pFD->IsStatic());
+
+ if (pFD->IsRVA() && pFD->IsByValue())
+ {
+ _ASSERTE(pByValueClassCache[i] != NULL);
+
+ if (IsSelfRef(pByValueClassCache[i]))
+ {
+ DWORD rva;
+ IfFailThrow(GetMDImport()->GetFieldRVA(pFD->GetMemberDef(), &rva));
+
+ if (!GetModule()->CheckRvaField(rva, bmtFP->NumInstanceFieldBytes))
+ {
+ if (!Security::CanHaveRVA(GetAssembly()))
+ {
+ BAD_FORMAT_NOTHROW_ASSERT(!"Illegal RVA of a mapped self-referencing static field");
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT, IDS_CLASSLOAD_BAD_FIELD, mdTokenNil);
+ }
+ }
+ }
+ }
+ }
+} // MethodTableBuilder::VerifySelfReferencingStaticValueTypeFields_WithRVA
+
+//*******************************************************************************
+// Returns true if hEnclosingTypeCandidate encloses, at any arbitrary depth,
+// hNestedTypeCandidate; returns false otherwise.
+
+bool MethodTableBuilder::IsEnclosingNestedTypePair(
+ bmtTypeHandle hEnclosingTypeCandidate,
+ bmtTypeHandle hNestedTypeCandidate)
+{
+ STANDARD_VM_CONTRACT;
+
+ CONSISTENCY_CHECK(!hEnclosingTypeCandidate.IsNull());
+ CONSISTENCY_CHECK(!hNestedTypeCandidate.IsNull());
+ CONSISTENCY_CHECK(!bmtTypeHandle::Equal(hEnclosingTypeCandidate, hNestedTypeCandidate));
+
+ Module * pModule = hEnclosingTypeCandidate.GetModule();
+
+ if (pModule != hNestedTypeCandidate.GetModule())
+ { // If the modules aren't the same, then there's no way
+ // hBase could be an enclosing type of hChild. We make
+ // this check early so that the code can deal with only
+ // one Module and IMDInternalImport instance and can avoid
+ // extra checks.
+ return false;
+ }
+
+ IMDInternalImport * pMDImport = pModule->GetMDImport();
+
+ mdTypeDef tkEncl = hEnclosingTypeCandidate.GetTypeDefToken();
+ mdTypeDef tkNest = hNestedTypeCandidate.GetTypeDefToken();
+
+ while (tkEncl != tkNest)
+ { // Do this using the metadata APIs because MethodTableBuilder does
+ // not construct type representations for enclosing type chains.
+ if (FAILED(pMDImport->GetNestedClassProps(tkNest, &tkNest)))
+ { // tokNest is not a nested type.
+ return false;
+ }
+ }
+
+ // tkNest's enclosing type is tkEncl, so we've shown that
+ // hEnclosingTypeCandidate encloses hNestedTypeCandidate
+ return true;
+}
+
+//*******************************************************************************
+// Given an arbitrary nesting+subclassing pattern like this:
+//
+// class C1 {
+// private virtual void Foo() { ... }
+// class C2 : C1 {
+// ...
+// class CN : CN-1 {
+// private override void Foo() { ... }
+// }
+// ...
+// }
+// }
+//
+// this method will return true, where hChild == N and hBase == C1
+//
+// Note that there is no requirement that a type derive from its immediately
+// enclosing type, but can skip a level, such as this example:
+//
+// class A
+// {
+// private virtual void Foo() { }
+// public class B
+// {
+// public class C : A
+// {
+// private override void Foo() { }
+// }
+// }
+// }
+//
+// NOTE: IMPORTANT: This code assumes that hBase is indeed a base type of hChild,
+// and behaviour is undefined if this is not the case.
+
+bool MethodTableBuilder::IsBaseTypeAlsoEnclosingType(
+ bmtTypeHandle hBase,
+ bmtTypeHandle hChild)
+{
+ STANDARD_VM_CONTRACT;
+
+ CONSISTENCY_CHECK(!hBase.IsNull());
+ CONSISTENCY_CHECK(!hChild.IsNull());
+ CONSISTENCY_CHECK(!bmtTypeHandle::Equal(hBase, hChild));
+
+ // The idea of this algorithm is that if we climb the inheritance chain
+ // starting at hChild then we'll eventually hit hBase. If we check that
+ // for every (hParent, hChild) pair in the chain that hParent encloses
+ // hChild, then we've shown that hBase encloses hChild.
+
+ while (!bmtTypeHandle::Equal(hBase, hChild))
+ {
+ CONSISTENCY_CHECK(!hChild.GetParentType().IsNull());
+ bmtTypeHandle hParent(hChild.GetParentType());
+
+ if (!IsEnclosingNestedTypePair(hParent, hChild))
+ { // First, the parent type must enclose the child type.
+ // If this is not the case we fail immediately.
+ return false;
+ }
+
+ // Move up one in the inheritance chain, and try again.
+ hChild = hParent;
+ }
+
+ // If the loop worked itself from the original hChild all the way
+ // up to hBase, then we know that for every (hParent, hChild)
+ // pair in the chain that hParent enclosed hChild, and so we know
+ // that hBase encloses the original hChild
+ return true;
+}
+
+//*******************************************************************************
+BOOL MethodTableBuilder::TestOverrideForAccessibility(
+ bmtMethodHandle hParentMethod,
+ bmtTypeHandle hChildType)
+{
+ STANDARD_VM_CONTRACT;
+
+ bmtTypeHandle hParentType(hParentMethod.GetOwningType());
+
+ Module * pParentModule = hParentType.GetModule();
+ Module * pChildModule = hChildType.GetModule();
+
+ Assembly * pParentAssembly = pParentModule->GetAssembly();
+ Assembly * pChildAssembly = pChildModule->GetAssembly();
+
+ BOOL isSameAssembly = (pChildAssembly == pParentAssembly);
+
+ DWORD dwParentAttrs = hParentMethod.GetDeclAttrs();
+
+ // AKA "strict bit". This means that overridability is tightly bound to accessibility.
+ if (IsMdCheckAccessOnOverride(dwParentAttrs))
+ {
+ // Same Assembly
+ if (isSameAssembly || pParentAssembly->GrantsFriendAccessTo(pChildAssembly, hParentMethod.GetMethodDesc())
+ || pChildAssembly->IgnoresAccessChecksTo(pParentAssembly))
+ {
+ // Can always override any method that has accessibility greater than mdPrivate
+ if ((dwParentAttrs & mdMemberAccessMask) > mdPrivate)
+ { // Fall through
+ }
+ // Generally, types cannot override inherited mdPrivate methods, except:
+ // Types can access enclosing type's private members, so it can
+ // override them if the nested type extends its enclosing type.
+ else if ((dwParentAttrs & mdMemberAccessMask) == mdPrivate &&
+ IsBaseTypeAlsoEnclosingType(hParentType, hChildType))
+ { // Fall through
+ }
+ else
+ {
+ return FALSE;
+ }
+ }
+ // Cross-Assembly
+ else
+ {
+ // If the method marks itself as check visibility the the method must be
+ // public, FamORAssem, or family
+ if((dwParentAttrs & mdMemberAccessMask) <= mdAssem)
+ {
+ return FALSE;
+ }
+ }
+ }
+ return TRUE;
+}
+
+//*******************************************************************************
+VOID MethodTableBuilder::TestOverRide(bmtMethodHandle hParentMethod,
+ bmtMethodHandle hChildMethod)
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+ PRECONDITION(IsMdVirtual(hParentMethod.GetDeclAttrs()));
+ PRECONDITION(IsMdVirtual(hChildMethod.GetDeclAttrs()));
+ } CONTRACTL_END;
+
+ DWORD dwAttrs = hChildMethod.GetDeclAttrs();
+ DWORD dwParentAttrs = hParentMethod.GetDeclAttrs();
+
+ Module *pModule = hChildMethod.GetOwningType().GetModule();
+ Module *pParentModule = hParentMethod.GetOwningType().GetModule();
+
+ Assembly *pAssembly = pModule->GetAssembly();
+ Assembly *pParentAssembly = pParentModule->GetAssembly();
+
+ BOOL isSameModule = (pModule == pParentModule);
+ BOOL isSameAssembly = (pAssembly == pParentAssembly);
+
+ if (!TestOverrideForAccessibility(hParentMethod, hChildMethod.GetOwningType()))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_ACCESS_FAILURE, hChildMethod.GetMethodSignature().GetToken());
+ }
+
+ //
+ // Refer to Partition II, 9.3.3 for more information on what is permitted.
+ //
+
+ enum WIDENING_STATUS
+ {
+ e_NO, // NO
+ e_YES, // YES
+ e_SA, // YES, but only when same assembly
+ e_NSA, // YES, but only when NOT same assembly
+ e_SM, // YES, but only when same module
+ };
+
+ static_assert_no_msg(mdPrivateScope == 0x00);
+ static_assert_no_msg(mdPrivate == 0x01);
+ static_assert_no_msg(mdFamANDAssem == 0x02);
+ static_assert_no_msg(mdAssem == 0x03);
+ static_assert_no_msg(mdFamily == 0x04);
+ static_assert_no_msg(mdFamORAssem == 0x05);
+ static_assert_no_msg(mdPublic == 0x06);
+
+ static const DWORD dwCount = mdPublic - mdPrivateScope + 1;
+ static const WIDENING_STATUS rgWideningTable[dwCount][dwCount] =
+
+ // | Base type
+ // Subtype | mdPrivateScope mdPrivate mdFamANDAssem mdAssem mdFamily mdFamORAssem mdPublic
+ // --------------+-------------------------------------------------------------------------------------------------------
+ /*mdPrivateScope | */ { { e_SM, e_NO, e_NO, e_NO, e_NO, e_NO, e_NO },
+ /*mdPrivate | */ { e_SM, e_YES, e_NO, e_NO, e_NO, e_NO, e_NO },
+ /*mdFamANDAssem | */ { e_SM, e_YES, e_SA, e_NO, e_NO, e_NO, e_NO },
+ /*mdAssem | */ { e_SM, e_YES, e_SA, e_SA, e_NO, e_NO, e_NO },
+ /*mdFamily | */ { e_SM, e_YES, e_YES, e_NO, e_YES, e_NSA, e_NO },
+ /*mdFamORAssem | */ { e_SM, e_YES, e_YES, e_SA, e_YES, e_YES, e_NO },
+ /*mdPublic | */ { e_SM, e_YES, e_YES, e_YES, e_YES, e_YES, e_YES } };
+
+ DWORD idxParent = (dwParentAttrs & mdMemberAccessMask) - mdPrivateScope;
+ DWORD idxMember = (dwAttrs & mdMemberAccessMask) - mdPrivateScope;
+ CONSISTENCY_CHECK(idxParent < dwCount);
+ CONSISTENCY_CHECK(idxMember < dwCount);
+
+ WIDENING_STATUS entry = rgWideningTable[idxMember][idxParent];
+
+ if (entry == e_NO ||
+ (entry == e_SA && !isSameAssembly && !pParentAssembly->GrantsFriendAccessTo(pAssembly, hParentMethod.GetMethodDesc())
+ && !pAssembly->IgnoresAccessChecksTo(pParentAssembly)) ||
+ (entry == e_NSA && isSameAssembly) ||
+ (entry == e_SM && !isSameModule)
+ )
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_REDUCEACCESS, hChildMethod.GetMethodSignature().GetToken());
+ }
+
+ return;
+}
+
+//*******************************************************************************
+VOID MethodTableBuilder::TestMethodImpl(
+ bmtMethodHandle hDeclMethod,
+ bmtMethodHandle hImplMethod)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(!hDeclMethod.IsNull());
+ PRECONDITION(!hImplMethod.IsNull());
+ }
+ CONTRACTL_END
+
+ Module * pDeclModule = hDeclMethod.GetOwningType().GetModule();
+ Module * pImplModule = hImplMethod.GetOwningType().GetModule();
+
+ mdTypeDef tokDecl = hDeclMethod.GetMethodSignature().GetToken();
+ mdTypeDef tokImpl = hImplMethod.GetMethodSignature().GetToken();
+
+ BOOL isSameModule = pDeclModule->Equals(pImplModule);
+
+ IMDInternalImport *pIMDDecl = pDeclModule->GetMDImport();
+ IMDInternalImport *pIMDImpl = pImplModule->GetMDImport();
+
+ DWORD dwDeclAttrs;
+ if (FAILED(pIMDDecl->GetMethodDefProps(tokDecl, &dwDeclAttrs)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ DWORD dwImplAttrs;
+ if (FAILED(pIMDImpl->GetMethodDefProps(tokImpl, &dwImplAttrs)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+
+ HRESULT hr = COR_E_TYPELOAD;
+
+ if (!IsMdVirtual(dwDeclAttrs))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_NONVIRTUAL_DECL);
+ }
+ if (!IsMdVirtual(dwImplAttrs))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_MUSTBEVIRTUAL);
+ }
+ // Virtual methods cannot be static
+ if (IsMdStatic(dwDeclAttrs))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_STATICVIRTUAL);
+ }
+ if (IsMdStatic(dwImplAttrs))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_STATICVIRTUAL);
+ }
+ if (IsMdFinal(dwDeclAttrs))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_FINAL_DECL);
+ }
+
+ // Since MethodImpl's do not affect the visibility of the Decl method, there's
+ // no need to check.
+
+ // If Decl's parent is other than this class, Decl must not be private
+ mdTypeDef tkImplParent = mdTypeDefNil;
+ mdTypeDef tkDeclParent = mdTypeDefNil;
+
+ if (FAILED(hr = pIMDDecl->GetParentToken(tokDecl, &tkDeclParent)))
+ {
+ BuildMethodTableThrowException(hr, *bmtError);
+ }
+ if (FAILED(hr = pIMDImpl->GetParentToken(tokImpl, &tkImplParent)))
+ {
+ BuildMethodTableThrowException(hr, *bmtError);
+ }
+
+ // Make sure that we test for accessibility restrictions only if the decl is
+ // not within our own type, as we are allowed to methodImpl a private with the
+ // strict bit set if it is in our own type.
+ if (!isSameModule || tkDeclParent != tkImplParent)
+ {
+ if (!TestOverrideForAccessibility(hDeclMethod, hImplMethod.GetOwningType()))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_ACCESS_FAILURE, tokImpl);
+ }
+
+ // Decl's parent must not be tdSealed
+ mdToken tkGrandParentDummyVar;
+ DWORD dwDeclTypeAttrs;
+ if (FAILED(hr = pIMDDecl->GetTypeDefProps(tkDeclParent, &dwDeclTypeAttrs, &tkGrandParentDummyVar)))
+ {
+ BuildMethodTableThrowException(hr, *bmtError);
+ }
+ if (IsTdSealed(dwDeclTypeAttrs))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_SEALED_DECL);
+ }
+ }
+
+ return;
+}
+
+//*******************************************************************************
+void MethodTableBuilder::SetSecurityFlagsOnMethod(bmtRTMethod* pParentMethod,
+ MethodDesc* pNewMD,
+ mdToken tokMethod,
+ DWORD dwMemberAttrs,
+ bmtInternalInfo* bmtInternal,
+ bmtMetaDataInfo* bmtMetaData)
+{
+ STANDARD_VM_CONTRACT;
+
+ DWORD dwMethDeclFlags = 0;
+ DWORD dwMethNullDeclFlags = 0;
+ DWORD dwClassDeclFlags = 0xffffffff;
+ DWORD dwClassNullDeclFlags = 0xffffffff;
+
+ if ( IsMdHasSecurity(dwMemberAttrs) || IsTdHasSecurity(GetAttrClass()) || pNewMD->IsNDirect() )
+ {
+ // Disable inlining for any function which does runtime declarative
+ // security actions.
+ DWORD dwRuntimeSecurityFlags = (pNewMD->GetSecurityFlagsDuringClassLoad(GetMDImport(),
+ tokMethod,
+ GetCl(),
+ &dwClassDeclFlags,
+ &dwClassNullDeclFlags,
+ &dwMethDeclFlags,
+ &dwMethNullDeclFlags) & DECLSEC_RUNTIME_ACTIONS);
+ if (dwRuntimeSecurityFlags)
+ {
+ // If we get here it means
+ // - We have some "runtime" actions on this method. We dont care about "linktime" demands
+ // - If this is a pinvoke method, then the unmanaged code access demand has not been suppressed
+ pNewMD->SetNotInline(true);
+
+ pNewMD->SetInterceptedForDeclSecurity();
+
+ if (MethodSecurityDescriptor::IsDeclSecurityCASDemandsOnly(dwRuntimeSecurityFlags, tokMethod, GetMDImport()))
+ {
+ pNewMD->SetInterceptedForDeclSecurityCASDemandsOnly();
+ }
+ }
+ }
+
+ if ( IsMdHasSecurity(dwMemberAttrs) )
+ {
+ // We only care about checks that are not empty...
+ dwMethDeclFlags &= ~dwMethNullDeclFlags;
+
+ if ( dwMethDeclFlags & (DECLSEC_LINK_CHECKS|DECLSEC_NONCAS_LINK_DEMANDS) )
+ {
+ pNewMD->SetRequiresLinktimeCheck();
+ // if the link check is due to HP and nothing else, capture that in the flags too
+ if (dwMethDeclFlags & DECLSEC_LINK_CHECKS_HPONLY)
+ {
+ pNewMD->SetRequiresLinkTimeCheckHostProtectionOnly();
+ }
+ }
+
+ if ( dwMethDeclFlags & (DECLSEC_INHERIT_CHECKS|DECLSEC_NONCAS_INHERITANCE) )
+ {
+ pNewMD->SetRequiresInheritanceCheck();
+ if (IsInterface())
+ {
+ GetHalfBakedClass()->SetSomeMethodsRequireInheritanceCheck();
+ }
+ }
+ }
+
+ // Linktime checks on a method override those on a class.
+ // If the method has an empty set of linktime checks,
+ // then don't require linktime checking for this method.
+ if (!pNewMD->RequiresLinktimeCheck() && RequiresLinktimeCheck() && !(dwMethNullDeclFlags & DECLSEC_LINK_CHECKS) )
+ {
+
+ pNewMD->SetRequiresLinktimeCheck();
+ if (RequiresLinktimeCheckHostProtectionOnly())
+ {
+ pNewMD->SetRequiresLinkTimeCheckHostProtectionOnly();
+ }
+ }
+
+ if ( pParentMethod != NULL &&
+ (pParentMethod->GetMethodDesc()->RequiresInheritanceCheck() ||
+ pParentMethod->GetMethodDesc()->ParentRequiresInheritanceCheck()) )
+ {
+ pNewMD->SetParentRequiresInheritanceCheck();
+ }
+
+ // Methods on an interface that includes an UnmanagedCode check
+ // suppression attribute are assumed to be interop methods. We ask
+ // for linktime checks on these.
+ // Also place linktime checks on all P/Invoke calls.
+ if (
+#ifndef FEATURE_CORECLR
+ (IsInterface() &&
+ (GetMDImport()->GetCustomAttributeByName(GetCl(),
+ COR_SUPPRESS_UNMANAGED_CODE_CHECK_ATTRIBUTE_ANSI,
+ NULL,
+ NULL) == S_OK ||
+ GetMDImport()->GetCustomAttributeByName(pNewMD->GetMemberDef(),
+ COR_SUPPRESS_UNMANAGED_CODE_CHECK_ATTRIBUTE_ANSI,
+ NULL,
+ NULL) == S_OK) ) ||
+
+#endif // !FEATURE_CORECLR
+ pNewMD->IsNDirect() ||
+ (pNewMD->IsComPlusCall() && !IsInterface()))
+ {
+ pNewMD->SetRequiresLinktimeCheck();
+ }
+
+#if defined(FEATURE_APTCA) || defined(FEATURE_CORESYSTEM)
+ // All public methods on public types will do a link demand of
+ // full trust, unless AllowUntrustedCaller attribute is set
+ if (
+#ifdef _DEBUG
+ g_pConfig->Do_AllowUntrustedCaller_Checks() &&
+#endif
+ !pNewMD->RequiresLinktimeCheck())
+ {
+ // If the method is public (visible outside it's assembly),
+ // and the type is public and the assembly
+ // is not marked with AllowUntrustedCaller attribute, do
+ // a link demand for full trust on all callers note that
+ // this won't be effective on virtual overrides. The caller
+ // can allways do a virtual call on the base type / interface
+
+ if (Security::MethodIsVisibleOutsideItsAssembly(dwMemberAttrs, GetAttrClass(), IsGlobalClass()))
+ {
+ _ASSERTE(GetClassLoader());
+ _ASSERTE(GetAssembly());
+
+ // See if the Assembly has AllowUntrustedCallerChecks CA
+ // Pull this page in last
+
+ if (!GetAssembly()->AllowUntrustedCaller())
+ pNewMD->SetRequiresLinktimeCheck();
+ }
+ }
+#endif // defined(FEATURE_APTCA) || defined(FEATURE_CORESYSTEM)
+
+ // If it's a delegate BeginInvoke, we need to do a HostProtection check for synchronization
+ if(!pNewMD->RequiresLinktimeCheck() && IsDelegate())
+ {
+ DelegateEEClass* pDelegateClass = (DelegateEEClass*)GetHalfBakedClass();
+ if(pNewMD == pDelegateClass->m_pBeginInvokeMethod)
+ {
+ pNewMD->SetRequiresLinktimeCheck();
+ pNewMD->SetRequiresLinkTimeCheckHostProtectionOnly(); // this link check is due to HP only
+ }
+
+ }
+}
+
+//*******************************************************************************
+//
+// Used by BuildMethodTable
+//
+VOID
+MethodTableBuilder::ValidateMethods()
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(CheckPointer(bmtInternal));
+ PRECONDITION(CheckPointer(bmtMetaData));
+ PRECONDITION(CheckPointer(bmtError));
+ PRECONDITION(CheckPointer(bmtProp));
+ PRECONDITION(CheckPointer(bmtInterface));
+ PRECONDITION(CheckPointer(bmtParent));
+ PRECONDITION(CheckPointer(bmtMFDescs));
+ PRECONDITION(CheckPointer(bmtEnumFields));
+ PRECONDITION(CheckPointer(bmtMethodImpl));
+ PRECONDITION(CheckPointer(bmtVT));
+ }
+ CONTRACTL_END;
+
+ // Used to keep track of located default and type constructors.
+ CONSISTENCY_CHECK(bmtVT->pCCtor == NULL);
+ CONSISTENCY_CHECK(bmtVT->pDefaultCtor == NULL);
+
+ // Fetch the hard-coded signatures for the type constructor and the
+ // default constructor and create MethodSignature objects for both at
+ // the method level so this does not happen for every specialname
+ // method.
+
+ Signature sig;
+
+ sig = MscorlibBinder::GetSignature(&gsig_SM_RetVoid);
+
+ MethodSignature cctorSig(MscorlibBinder::GetModule(),
+ COR_CCTOR_METHOD_NAME,
+ sig.GetRawSig(), sig.GetRawSigLen());
+
+ sig = MscorlibBinder::GetSignature(&gsig_IM_RetVoid);
+
+ MethodSignature defaultCtorSig(MscorlibBinder::GetModule(),
+ COR_CTOR_METHOD_NAME,
+ sig.GetRawSig(), sig.GetRawSigLen());
+
+ Module * pModule = GetModule();
+ DeclaredMethodIterator it(*this);
+ while (it.Next())
+ {
+ // The RVA is only valid/testable if it has not been overwritten
+ // for something like edit-and-continue
+ // Complete validation of non-zero RVAs is done later inside MethodDesc::GetILHeader.
+ if ((it.RVA() == 0) && (pModule->GetDynamicIL(it.Token(), FALSE) == NULL))
+ {
+ // for IL code that is implemented here must have a valid code RVA
+ // this came up due to a linker bug where the ImplFlags/DescrOffset were
+ // being set to null and we weren't coping with it
+ if((IsMiIL(it.ImplFlags()) || IsMiOPTIL(it.ImplFlags())) &&
+ !IsMdAbstract(it.Attrs()) &&
+ !IsReallyMdPinvokeImpl(it.Attrs()) &&
+ !IsMiInternalCall(it.ImplFlags()))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MISSINGMETHODRVA, it.Token());
+ }
+ }
+
+ if (IsMdRTSpecialName(it.Attrs()))
+ {
+ if (IsMdVirtual(it.Attrs()))
+ { // Virtual specialname methods are illegal
+ BuildMethodTableThrowException(IDS_CLASSLOAD_GENERAL);
+ }
+
+ // Constructors (.ctor) and class initialisers (.cctor) are special
+ const MethodSignature &curSig(it->GetMethodSignature());
+
+ if (IsMdStatic(it.Attrs()))
+ { // The only rtSpecialName static method allowed is the .cctor
+ if (!curSig.ExactlyEqual(cctorSig))
+ { // Bad method
+ BuildMethodTableThrowException(IDS_CLASSLOAD_GENERAL);
+ }
+
+ // Remember it for later
+ bmtVT->pCCtor = *it;
+ }
+ else
+ {
+ if(!MethodSignature::NamesEqual(curSig, defaultCtorSig))
+ { // The only rtSpecialName instance methods allowed are .ctors
+ BuildMethodTableThrowException(IDS_CLASSLOAD_GENERAL);
+ }
+
+ // .ctor must return void
+ MetaSig methodMetaSig(curSig.GetSignature(),
+ static_cast<DWORD>(curSig.GetSignatureLength()),
+ curSig.GetModule(),
+ NULL);
+
+ if (methodMetaSig.GetReturnType() != ELEMENT_TYPE_VOID)
+ { // All constructors must have a void return type
+ BuildMethodTableThrowException(IDS_CLASSLOAD_GENERAL);
+ }
+
+ // See if this is a default constructor. If so, remember it for later.
+ if (curSig.ExactlyEqual(defaultCtorSig))
+ {
+ bmtVT->pDefaultCtor = *it;
+ }
+ }
+ }
+
+ // Make sure that fcalls have a 0 rva. This is assumed by the prejit fixup logic
+ if (it.MethodType() == METHOD_TYPE_FCALL && it.RVA() != 0)
+ {
+ BuildMethodTableThrowException(BFA_ECALLS_MUST_HAVE_ZERO_RVA, it.Token());
+ }
+
+ // check for proper use of the Managed and native flags
+ if (IsMiManaged(it.ImplFlags()))
+ {
+ if (IsMiIL(it.ImplFlags()) || IsMiRuntime(it.ImplFlags())) // IsMiOPTIL(it.ImplFlags()) no longer supported
+ {
+ // No need to set code address, pre stub used automatically.
+ }
+ else
+ {
+ if (IsMiNative(it.ImplFlags()))
+ {
+ // For now simply disallow managed native code if you turn this on you have to at least
+ // insure that we have SkipVerificationPermission or equivalent
+ BuildMethodTableThrowException(BFA_MANAGED_NATIVE_NYI, it.Token());
+ }
+ else
+ {
+ BuildMethodTableThrowException(BFA_BAD_IMPL_FLAGS, it.Token());
+ }
+ }
+ }
+ else
+ {
+ if (IsMiNative(it.ImplFlags()) && IsGlobalClass())
+ {
+ // global function unmanaged entrypoint via IJW thunk was handled
+ // above.
+ }
+ else
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BAD_UNMANAGED_RVA, it.Token());
+ }
+ if (it.MethodType() != METHOD_TYPE_NDIRECT)
+ {
+ BuildMethodTableThrowException(BFA_BAD_UNMANAGED_ENTRY_POINT);
+ }
+ }
+
+ // Vararg methods are not allowed inside generic classes
+ // and nor can they be generic methods.
+ if (bmtGenerics->GetNumGenericArgs() > 0 || (it.MethodType() == METHOD_TYPE_INSTANTIATED) )
+ {
+ DWORD cMemberSignature;
+ PCCOR_SIGNATURE pMemberSignature = it.GetSig(&cMemberSignature);
+ // We've been trying to avoid asking for the signature - now we need it
+ if (pMemberSignature == NULL)
+ {
+ pMemberSignature = it.GetSig(&cMemberSignature);
+ }
+
+ if (MetaSig::IsVarArg(pModule, Signature(pMemberSignature, cMemberSignature)))
+ {
+ BuildMethodTableThrowException(BFA_GENCODE_NOT_BE_VARARG);
+ }
+ }
+
+ if (IsMdVirtual(it.Attrs()) && IsMdPublic(it.Attrs()) && it.Name() == NULL)
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_NOMETHOD_NAME);
+ }
+
+ if (it.IsMethodImpl())
+ {
+ if (!IsMdVirtual(it.Attrs()))
+ { // Non-virtual methods cannot participate in a methodImpl pair.
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_MUSTBEVIRTUAL, it.Token());
+ }
+ }
+
+ // Virtual static methods are not allowed.
+ if (IsMdStatic(it.Attrs()) && IsMdVirtual(it.Attrs()))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_STATICVIRTUAL, it.Token());
+ }
+ }
+}
+
+//*******************************************************************************
+// Essentially, this is a helper method that combines calls to InitMethodDesc and
+// SetSecurityFlagsOnMethod. It then assigns the newly initialized MethodDesc to
+// the bmtMDMethod.
+VOID
+MethodTableBuilder::InitNewMethodDesc(
+ bmtMDMethod * pMethod,
+ MethodDesc * pNewMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ //
+ // First, set all flags that control layout of optional slots
+ //
+ pNewMD->SetClassification(GetMethodClassification(pMethod->GetMethodType()));
+
+ if (pMethod->GetMethodImplType() == METHOD_IMPL)
+ pNewMD->SetHasMethodImplSlot();
+
+ if (pMethod->GetSlotIndex() >= bmtVT->cVtableSlots)
+ pNewMD->SetHasNonVtableSlot();
+
+ if (NeedsNativeCodeSlot(pMethod))
+ pNewMD->SetHasNativeCodeSlot();
+
+ // Now we know the classification we can allocate the correct type of
+ // method desc and perform any classification specific initialization.
+
+ LPCSTR pName = pMethod->GetMethodSignature().GetName();
+ if (pName == NULL)
+ {
+ if (FAILED(GetMDImport()->GetNameOfMethodDef(pMethod->GetMethodSignature().GetToken(), &pName)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ }
+
+#ifdef _DEBUG
+ LPCUTF8 pszDebugMethodName;
+ if (FAILED(GetMDImport()->GetNameOfMethodDef(pMethod->GetMethodSignature().GetToken(), &pszDebugMethodName)))
+ {
+ pszDebugMethodName = "Invalid MethodDef record";
+ }
+ S_SIZE_T safeLen = S_SIZE_T(strlen(pszDebugMethodName)) + S_SIZE_T(1);
+ if(safeLen.IsOverflow())
+ COMPlusThrowHR(COR_E_OVERFLOW);
+
+ size_t len = safeLen.Value();
+ LPCUTF8 pszDebugMethodNameCopy = (char*) AllocateFromLowFrequencyHeap(safeLen);
+ strcpy_s((char *) pszDebugMethodNameCopy, len, pszDebugMethodName);
+#endif // _DEBUG
+
+ // Do the init specific to each classification of MethodDesc & assign some common fields
+ InitMethodDesc(pNewMD,
+ GetMethodClassification(pMethod->GetMethodType()),
+ pMethod->GetMethodSignature().GetToken(),
+ pMethod->GetImplAttrs(),
+ pMethod->GetDeclAttrs(),
+ FALSE,
+ pMethod->GetRVA(),
+ GetMDImport(),
+ pName
+ COMMA_INDEBUG(pszDebugMethodNameCopy)
+ COMMA_INDEBUG(GetDebugClassName())
+ COMMA_INDEBUG("") // FIX this happens on global methods, give better info
+ );
+
+ pMethod->SetMethodDesc(pNewMD);
+
+ bmtRTMethod * pParentMethod = NULL;
+
+ if (HasParent())
+ {
+ SLOT_INDEX idx = pMethod->GetSlotIndex();
+ CONSISTENCY_CHECK(idx != INVALID_SLOT_INDEX);
+
+ if (idx < GetParentMethodTable()->GetNumVirtuals())
+ {
+ pParentMethod = (*bmtParent->pSlotTable)[idx].Decl().AsRTMethod();
+ }
+ }
+
+
+ // Declarative Security
+ SetSecurityFlagsOnMethod(pParentMethod, pNewMD, pMethod->GetMethodSignature().GetToken(), pMethod->GetDeclAttrs(), bmtInternal, bmtMetaData);
+
+ // Turn off inlining for any calls
+ // that are marked in the metadata as not being inlineable.
+ if(IsMiNoInlining(pMethod->GetImplAttrs()))
+ {
+ pNewMD->SetNotInline(true);
+ }
+
+ pNewMD->SetSlot(pMethod->GetSlotIndex());
+}
+
+//*******************************************************************************
+// Determine vtable placement for each non-virtual in the class, while also
+// looking for default and type constructors.
+VOID
+MethodTableBuilder::PlaceNonVirtualMethods()
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(CheckPointer(bmtInternal));
+ PRECONDITION(CheckPointer(bmtMetaData));
+ PRECONDITION(CheckPointer(bmtError));
+ PRECONDITION(CheckPointer(bmtProp));
+ PRECONDITION(CheckPointer(bmtInterface));
+ PRECONDITION(CheckPointer(bmtParent));
+ PRECONDITION(CheckPointer(bmtMFDescs));
+ PRECONDITION(CheckPointer(bmtEnumFields));
+ PRECONDITION(CheckPointer(bmtMethodImpl));
+ PRECONDITION(CheckPointer(bmtVT));
+ }
+ CONTRACTL_END;
+
+ INDEBUG(bmtVT->SealVirtualSlotSection();)
+
+ //
+ // For each non-virtual method, place the method in the next available non-virtual method slot.
+ //
+
+ // Place the cctor and default ctor first. code::MethodTableGetCCtorSlot and code:MethodTable::GetDefaultCtorSlot
+ // depends on this.
+ if (bmtVT->pCCtor != NULL)
+ {
+ if (!bmtVT->AddNonVirtualMethod(bmtVT->pCCtor))
+ BuildMethodTableThrowException(IDS_CLASSLOAD_TOO_MANY_METHODS);
+ }
+
+ if (bmtVT->pDefaultCtor != NULL)
+ {
+ if (!bmtVT->AddNonVirtualMethod(bmtVT->pDefaultCtor))
+ BuildMethodTableThrowException(IDS_CLASSLOAD_TOO_MANY_METHODS);
+ }
+
+ // We use slot during remoting and to map methods between generic instantiations
+ // (see MethodTable::GetParallelMethodDesc). The current implementation
+ // of this mechanism requires real slots.
+ BOOL fCanHaveNonVtableSlots = (bmtGenerics->GetNumGenericArgs() == 0) && !IsInterface();
+
+ // Flag to avoid second pass when possible
+ BOOL fHasNonVtableSlots = FALSE;
+
+ //
+ // Place all methods that require real vtable slot first. This is necessary so
+ // that they get consequitive slot numbers right after virtual slots.
+ //
+
+ DeclaredMethodIterator it(*this);
+ while (it.Next())
+ {
+ // Skip methods that are placed already
+ if (it->GetSlotIndex() != INVALID_SLOT_INDEX)
+ continue;
+
+#ifdef _DEBUG
+ if(GetHalfBakedClass()->m_fDebuggingClass && g_pConfig->ShouldBreakOnMethod(it.Name()))
+ CONSISTENCY_CHECK_MSGF(false, ("BreakOnMethodName: '%s' ", it.Name()));
+#endif // _DEBUG
+
+ if (!fCanHaveNonVtableSlots ||
+ it->GetMethodType() == METHOD_TYPE_INSTANTIATED ||
+ MayBeRemotingIntercepted(*it))
+ {
+ // We use slot during remoting and to map methods between generic instantiations
+ // (see MethodTable::GetParallelMethodDesc). The current implementation
+ // of this mechanism requires real slots.
+ }
+ else
+ {
+ // This method does not need real vtable slot
+ fHasNonVtableSlots = TRUE;
+ continue;
+ }
+
+ // This will update slot index in bmtMDMethod
+ if (!bmtVT->AddNonVirtualMethod(*it))
+ BuildMethodTableThrowException(IDS_CLASSLOAD_TOO_MANY_METHODS);
+ }
+
+ // Remeber last real vtable slot
+ bmtVT->cVtableSlots = bmtVT->cTotalSlots;
+
+ // Are there any Non-vtable slots to place?
+ if (!fHasNonVtableSlots)
+ return;
+
+ //
+ // Now, place the remaining methods. They will get non-vtable slot.
+ //
+
+ DeclaredMethodIterator it2(*this);
+ while (it2.Next())
+ {
+ // Skip methods that are placed already
+ if (it2->GetSlotIndex() != INVALID_SLOT_INDEX)
+ continue;
+
+ if (!bmtVT->AddNonVirtualMethod(*it2))
+ BuildMethodTableThrowException(IDS_CLASSLOAD_TOO_MANY_METHODS);
+ }
+
+}
+
+//*******************************************************************************
+// Determine vtable placement for each virtual member in this class.
+VOID
+MethodTableBuilder::PlaceVirtualMethods()
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(CheckPointer(bmtInternal));
+ PRECONDITION(CheckPointer(bmtMetaData));
+ PRECONDITION(CheckPointer(bmtError));
+ PRECONDITION(CheckPointer(bmtProp));
+ PRECONDITION(CheckPointer(bmtInterface));
+ PRECONDITION(CheckPointer(bmtParent));
+ PRECONDITION(CheckPointer(bmtMFDescs));
+ PRECONDITION(CheckPointer(bmtEnumFields));
+ PRECONDITION(CheckPointer(bmtMethodImpl));
+ PRECONDITION(CheckPointer(bmtVT));
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ LPCUTF8 pszDebugName, pszDebugNamespace;
+ if (FAILED(GetMDImport()->GetNameOfTypeDef(GetCl(), &pszDebugName, &pszDebugNamespace)))
+ {
+ pszDebugName = pszDebugNamespace = "Invalid TypeDef record";
+ }
+#endif // _DEBUG
+
+ //
+ // For each virtual method
+ // - If the method is not declared as newslot, search all virtual methods in the parent
+ // type for an override candidate.
+ // - If such a candidate is found, test to see if the override is valid. If
+ // the override is not valid, throw TypeLoadException
+ // - If a candidate is found above, place the method in the inherited slot as both
+ // the Decl and the Impl.
+ // - Else, place the method in the next available empty vtable slot.
+ //
+
+ DeclaredMethodIterator it(*this);
+ while (it.Next())
+ {
+ if (!IsMdVirtual(it.Attrs()))
+ { // Only processing declared virtual methods
+ continue;
+ }
+
+#ifdef _DEBUG
+ if(GetHalfBakedClass()->m_fDebuggingClass && g_pConfig->ShouldBreakOnMethod(it.Name()))
+ CONSISTENCY_CHECK_MSGF(false, ("BreakOnMethodName: '%s' ", it.Name()));
+#endif // _DEBUG
+
+ // If this member is a method which overrides a parent method, it will be set to non-NULL
+ bmtRTMethod * pParentMethod = NULL;
+
+ // Hash that a method with this name exists in this class
+ // Note that ctors and static ctors are not added to the table
+ BOOL fMethodConstraintsMatch = FALSE;
+
+ // If the member is marked with a new slot we do not need to find it in the parent
+ if (HasParent() && !IsMdNewSlot(it.Attrs()))
+ {
+ // Attempt to find the method with this name and signature in the parent class.
+ // This method may or may not create pParentMethodHash (if it does not already exist).
+ // It also may or may not fill in pMemberSignature/cMemberSignature.
+ // An error is only returned when we can not create the hash.
+ // NOTE: This operation touches metadata
+ pParentMethod = LoaderFindMethodInParentClass(
+ it->GetMethodSignature(), bmtProp->fNoSanityChecks ? NULL : &fMethodConstraintsMatch);
+
+ if (pParentMethod != NULL)
+ { // Found an override candidate
+ DWORD dwParentAttrs = pParentMethod->GetDeclAttrs();
+
+ if (!IsMdVirtual(dwParentAttrs))
+ { // Can't override a non-virtual methods
+ BuildMethodTableThrowException(BFA_NONVIRT_NO_SEARCH, it.Token());
+ }
+
+ if(IsMdFinal(dwParentAttrs))
+ { // Can't override a final methods
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_FINAL_DECL, it.Token());
+ }
+
+ if(!bmtProp->fNoSanityChecks)
+ {
+ TestOverRide(bmtMethodHandle(pParentMethod),
+ bmtMethodHandle(*it));
+
+ if (!fMethodConstraintsMatch)
+ {
+ BuildMethodTableThrowException(
+ IDS_CLASSLOAD_CONSTRAINT_MISMATCH_ON_IMPLICIT_OVERRIDE,
+ it.Token());
+ }
+ }
+ }
+ }
+
+ // vtable method
+ if (IsInterface())
+ {
+ CONSISTENCY_CHECK(pParentMethod == NULL);
+ // Also sets new slot number on bmtRTMethod and MethodDesc
+ if (!bmtVT->AddVirtualMethod(*it))
+ BuildMethodTableThrowException(IDS_CLASSLOAD_TOO_MANY_METHODS);
+ }
+ else if (pParentMethod != NULL)
+ {
+ bmtVT->SetVirtualMethodOverride(pParentMethod->GetSlotIndex(), *it);
+ }
+ else
+ {
+ if (!bmtVT->AddVirtualMethod(*it))
+ BuildMethodTableThrowException(IDS_CLASSLOAD_TOO_MANY_METHODS);
+ }
+ }
+}
+
+// Given an interface map entry, and a name+signature, compute the method on the interface
+// that the name+signature corresponds to. Used by ProcessMethodImpls and ProcessInexactMethodImpls
+// Always returns the first match that it finds. Affects the ambiguities in code:#ProcessInexactMethodImpls_Ambiguities
+MethodTableBuilder::bmtMethodHandle
+MethodTableBuilder::FindDeclMethodOnInterfaceEntry(bmtInterfaceEntry *pItfEntry, MethodSignature &declSig)
+{
+ STANDARD_VM_CONTRACT;
+
+ bmtMethodHandle declMethod;
+
+ bmtInterfaceEntry::InterfaceSlotIterator slotIt =
+ pItfEntry->IterateInterfaceSlots(GetStackingAllocator());
+ // Check for exact match
+ for (; !slotIt.AtEnd(); slotIt.Next())
+ {
+ bmtRTMethod * pCurDeclMethod = slotIt->Decl().AsRTMethod();
+
+ if (declSig.ExactlyEqual(pCurDeclMethod->GetMethodSignature()))
+ {
+ declMethod = slotIt->Decl();
+ break;
+ }
+ }
+ slotIt.ResetToStart();
+
+ // Check for equivalent match if exact match wasn't found
+ if (declMethod.IsNull())
+ {
+ for (; !slotIt.AtEnd(); slotIt.Next())
+ {
+ bmtRTMethod * pCurDeclMethod = slotIt->Decl().AsRTMethod();
+
+ // Type Equivalence is forbidden in MethodImpl MemberRefs
+ if (declSig.Equivalent(pCurDeclMethod->GetMethodSignature()))
+ {
+ declMethod = slotIt->Decl();
+ break;
+ }
+ }
+ }
+
+ return declMethod;
+}
+
+//*******************************************************************************
+//
+// Used by BuildMethodTable
+// Process the list of inexact method impls generated during ProcessMethodImpls.
+// This list is used to cause a methodImpl to an interface to override
+// methods on several equivalent interfaces in the interface map. This logic is necessary
+// so that in the presence of an embedded interface the behavior appears to mimic
+// the behavior if the interface was not embedded.
+//
+// In particular, the logic here is to handle cases such as
+//
+// Assembly A
+// [TypeIdentifier("x","y")]
+// interface I'
+// { void Method(); }
+// interface IOther : I' {}
+//
+// Assembly B
+// [TypeIdentifier("x","y")]
+// interface I
+// { void Method(); }
+// class Test : I, IOther
+// {
+// void I.Method()
+// {}
+// }
+//
+// In this case, there is one method, and one methodimpl, but there are 2 interfaces on the class that both
+// require an implementation of their method. The correct semantic for type equivalence, is that any
+// methodimpl directly targeting a method on an interface must be respected, and if it also applies to a type
+// equivalent interface method, then if that method was not methodimpl'd directly, then the methodimpl should apply
+// there as well. The ProcessInexactMethodImpls function does this secondary MethodImpl mapping.
+//
+//#ProcessInexactMethodImpls_Ambiguities
+// In the presence of ambiguities, such as there are 3 equivalent interfaces implemented on a class and 2 methodimpls,
+// we will apply the 2 method impls exactly to appropriate interface methods, and arbitrarily pick one to apply to the
+// other interface. This is clearly ambiguous, but tricky to detect in the type loader efficiently, and should hopefully
+// not cause too many problems.
+//
+VOID
+MethodTableBuilder::ProcessInexactMethodImpls()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (bmtMethod->dwNumberInexactMethodImplCandidates == 0)
+ return;
+
+ DeclaredMethodIterator it(*this);
+ while (it.Next())
+ {
+ // Non-virtual methods cannot be classified as methodImpl - we should have thrown an
+ // error before reaching this point.
+ CONSISTENCY_CHECK(!(!IsMdVirtual(it.Attrs()) && it.IsMethodImpl()));
+
+ if (!IsMdVirtual(it.Attrs()))
+ { // Only virtual methods can participate in methodImpls
+ continue;
+ }
+
+ if(!it.IsMethodImpl())
+ {
+ // Skip methods which are not the bodies of MethodImpl specifications
+ continue;
+ }
+
+ // If this method serves as the BODY of a MethodImpl specification, then
+ // we should iterate all the MethodImpl's for this class and see just how many
+ // of them this method participates in as the BODY.
+ for(DWORD m = 0; m < bmtMethod->dwNumberMethodImpls; m++)
+ {
+ // Inexact matching logic only works on MethodImpls that have been opted into inexactness by ProcessMethodImpls.
+ if (!bmtMetaData->rgMethodImplTokens[m].fConsiderDuringInexactMethodImplProcessing)
+ {
+ continue;
+ }
+
+ // If the methodimpl we are working with does not match this method, continue to next methodimpl
+ if(it.Token() != bmtMetaData->rgMethodImplTokens[m].methodBody)
+ {
+ continue;
+ }
+
+ bool fMatchFound = false;
+
+ LPCUTF8 szName = NULL;
+ PCCOR_SIGNATURE pSig = NULL;
+ ULONG cbSig;
+
+ mdToken mdDecl = bmtMetaData->rgMethodImplTokens[m].methodDecl;
+
+ if (TypeFromToken(mdDecl) == mdtMethodDef)
+ { // Different methods are aused to access MethodDef and MemberRef
+ // names and signatures.
+ if (FAILED(GetMDImport()->GetNameOfMethodDef(mdDecl, &szName)) ||
+ FAILED(GetMDImport()->GetSigOfMethodDef(mdDecl, &cbSig, &pSig)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ }
+ else
+ {
+ if (FAILED(GetMDImport()->GetNameAndSigOfMemberRef(mdDecl, &pSig, &cbSig, &szName)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ }
+
+ Substitution *pDeclSubst = &bmtMetaData->pMethodDeclSubsts[m];
+ MethodSignature declSig(GetModule(), szName, pSig, cbSig, pDeclSubst);
+ bmtInterfaceEntry * pItfEntry = NULL;
+
+ for (DWORD i = 0; i < bmtInterface->dwInterfaceMapSize; i++)
+ {
+ if (bmtInterface->pInterfaceMap[i].GetInterfaceEquivalenceSet() != bmtMetaData->rgMethodImplTokens[m].interfaceEquivalenceSet)
+ continue;
+
+ bmtMethodHandle declMethod;
+ pItfEntry = &bmtInterface->pInterfaceMap[i];
+
+ // Search for declmethod on this interface
+ declMethod = FindDeclMethodOnInterfaceEntry(pItfEntry, declSig);
+
+ // If we didn't find a match, continue on to next interface in the equivalence set
+ if (declMethod.IsNull())
+ continue;
+
+ if (!IsMdVirtual(declMethod.GetDeclAttrs()))
+ { // Make sure the decl is virtual
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_MUSTBEVIRTUAL, it.Token());
+ }
+
+ fMatchFound = true;
+
+ bool fPreexistingImplFound = false;
+
+ // Check to ensure there isn't already a matching declMethod in the method impl list
+ for (DWORD iMethodImpl = 0; iMethodImpl < bmtMethodImpl->pIndex; iMethodImpl++)
+ {
+ if (bmtMethodImpl->GetDeclarationMethod(iMethodImpl) == declMethod)
+ {
+ fPreexistingImplFound = true;
+ break;
+ }
+ }
+
+ // Search for other matches
+ if (fPreexistingImplFound)
+ continue;
+
+ // Otherwise, record the method impl discovery if the match is
+ bmtMethodImpl->AddMethodImpl(*it, declMethod, GetStackingAllocator());
+ }
+
+ if (!fMatchFound && bmtMetaData->rgMethodImplTokens[m].fThrowIfUnmatchedDuringInexactMethodImplProcessing)
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_DECLARATIONNOTFOUND, it.Token());
+ }
+ }
+ }
+}
+
+//*******************************************************************************
+//
+// Used by BuildMethodTable
+//
+VOID
+MethodTableBuilder::ProcessMethodImpls()
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ DeclaredMethodIterator it(*this);
+ while (it.Next())
+ {
+ // Non-virtual methods cannot be classified as methodImpl - we should have thrown an
+ // error before reaching this point.
+ CONSISTENCY_CHECK(!(!IsMdVirtual(it.Attrs()) && it.IsMethodImpl()));
+
+ if (!IsMdVirtual(it.Attrs()))
+ { // Only virtual methods can participate in methodImpls
+ continue;
+ }
+
+ // If this method serves as the BODY of a MethodImpl specification, then
+ // we should iterate all the MethodImpl's for this class and see just how many
+ // of them this method participates in as the BODY.
+ if(it.IsMethodImpl())
+ {
+ for(DWORD m = 0; m < bmtMethod->dwNumberMethodImpls; m++)
+ {
+ if(it.Token() == bmtMetaData->rgMethodImplTokens[m].methodBody)
+ {
+ mdToken mdDecl = bmtMetaData->rgMethodImplTokens[m].methodDecl;
+ bmtMethodHandle declMethod;
+
+ // Get the parent token for the decl method token
+ mdToken tkParent = mdTypeDefNil;
+ if (TypeFromToken(mdDecl) == mdtMethodDef || TypeFromToken(mdDecl) == mdtMemberRef)
+ {
+ if (FAILED(hr = GetMDImport()->GetParentToken(mdDecl,&tkParent)))
+ {
+ BuildMethodTableThrowException(hr, *bmtError);
+ }
+ }
+
+ if (GetCl() == tkParent)
+ { // The DECL has been declared within the class that we're currently building.
+ hr = S_OK;
+
+ if(bmtError->pThrowable != NULL)
+ {
+ *(bmtError->pThrowable) = NULL;
+ }
+
+ if(TypeFromToken(mdDecl) != mdtMethodDef)
+ {
+ if (FAILED(hr = FindMethodDeclarationForMethodImpl(
+ mdDecl, &mdDecl, TRUE)))
+ {
+ BuildMethodTableThrowException(hr, *bmtError);
+ }
+ }
+
+ CONSISTENCY_CHECK(TypeFromToken(mdDecl) == mdtMethodDef);
+ declMethod = bmtMethod->FindDeclaredMethodByToken(mdDecl);
+ }
+ else
+ { // We can't call GetDescFromMemberDefOrRef here because this
+ // method depends on a fully-loaded type, including parent types,
+ // which is not always guaranteed. In particular, it requires that
+ // the instantiation dictionary be filled. The solution is the following:
+ // 1. Load the approximate type that the method belongs to.
+ // 2. Get or create the correct substitution for the type involved
+ // 3. Iterate the introduced methods on that type looking for a matching
+ // method.
+
+ LPCUTF8 szName = NULL;
+ PCCOR_SIGNATURE pSig = NULL;
+ ULONG cbSig;
+ if (TypeFromToken(mdDecl) == mdtMethodDef)
+ { // Different methods are aused to access MethodDef and MemberRef
+ // names and signatures.
+ if (FAILED(GetMDImport()->GetNameOfMethodDef(mdDecl, &szName)) ||
+ FAILED(GetMDImport()->GetSigOfMethodDef(mdDecl, &cbSig, &pSig)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ }
+ else
+ {
+ if (FAILED(GetMDImport()->GetNameAndSigOfMemberRef(mdDecl, &pSig, &cbSig, &szName)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ }
+
+ Substitution *pDeclSubst = &bmtMetaData->pMethodDeclSubsts[m];
+ MethodTable * pDeclMT = NULL;
+ MethodSignature declSig(GetModule(), szName, pSig, cbSig, pDeclSubst);
+
+ { // 1. Load the approximate type.
+ // Block for the LoadsTypeViolation.
+ CONTRACT_VIOLATION(LoadsTypeViolation);
+ pDeclMT = ClassLoader::LoadTypeDefOrRefOrSpecThrowing(
+ GetModule(),
+ tkParent,
+ &bmtGenerics->typeContext,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef,
+ ClassLoader::LoadTypes,
+ CLASS_LOAD_APPROXPARENTS,
+ TRUE).GetMethodTable()->GetCanonicalMethodTable();
+ }
+
+ { // 2. Get or create the correct substitution
+ bmtRTType * pDeclType = NULL;
+
+ if (pDeclMT->IsInterface())
+ { // If the declaration method is a part of an interface, search through
+ // the interface map to find the matching interface so we can provide
+ // the correct substitution chain.
+ pDeclType = NULL;
+
+ bmtInterfaceEntry * pItfEntry = NULL;
+ for (DWORD i = 0; i < bmtInterface->dwInterfaceMapSize; i++)
+ {
+ bmtRTType * pCurItf = bmtInterface->pInterfaceMap[i].GetInterfaceType();
+ // Type Equivalence is not respected for this comparision as you can have multiple type equivalent interfaces on a class
+ TokenPairList newVisited = TokenPairList::AdjustForTypeEquivalenceForbiddenScope(NULL);
+ if (MetaSig::CompareTypeDefsUnderSubstitutions(
+ pCurItf->GetMethodTable(), pDeclMT,
+ &pCurItf->GetSubstitution(), pDeclSubst,
+ &newVisited))
+ {
+ pItfEntry = &bmtInterface->pInterfaceMap[i];
+ pDeclType = pCurItf;
+ break;
+ }
+ }
+
+ if (pDeclType == NULL)
+ {
+ DWORD equivalenceSet = 0;
+
+ for (DWORD i = 0; i < bmtInterface->dwInterfaceMapSize; i++)
+ {
+ bmtRTType * pCurItf = bmtInterface->pInterfaceMap[i].GetInterfaceType();
+ // Type Equivalence is respected for this comparision as we just need to find an
+ // equivalent interface, the particular interface is unimportant
+ if (MetaSig::CompareTypeDefsUnderSubstitutions(
+ pCurItf->GetMethodTable(), pDeclMT,
+ &pCurItf->GetSubstitution(), pDeclSubst,
+ NULL))
+ {
+ equivalenceSet = bmtInterface->pInterfaceMap[i].GetInterfaceEquivalenceSet();
+ pItfEntry = &bmtInterface->pInterfaceMap[i];
+ break;
+ }
+ }
+
+ if (equivalenceSet == 0)
+ {
+ // Interface is not implemented by this type.
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_NOTIMPLEMENTED, it.Token());
+ }
+
+ // Interface is not implemented by this type exactly. We need to consider this MethodImpl on non exact interface matches,
+ // as the only match may be one of the non-exact matches
+ bmtMetaData->rgMethodImplTokens[m].fConsiderDuringInexactMethodImplProcessing = true;
+ bmtMetaData->rgMethodImplTokens[m].fThrowIfUnmatchedDuringInexactMethodImplProcessing = true;
+ bmtMetaData->rgMethodImplTokens[m].interfaceEquivalenceSet = equivalenceSet;
+ bmtMethod->dwNumberInexactMethodImplCandidates++;
+ continue; // Move on to other MethodImpls
+ }
+ else
+ {
+ // This method impl may need to match other methods during inexact processing
+ if (pItfEntry->InEquivalenceSetWithMultipleEntries())
+ {
+ bmtMetaData->rgMethodImplTokens[m].fConsiderDuringInexactMethodImplProcessing = true;
+ bmtMetaData->rgMethodImplTokens[m].fThrowIfUnmatchedDuringInexactMethodImplProcessing = false;
+ bmtMetaData->rgMethodImplTokens[m].interfaceEquivalenceSet = pItfEntry->GetInterfaceEquivalenceSet();
+ bmtMethod->dwNumberInexactMethodImplCandidates++;
+ }
+ }
+
+ // 3. Find the matching method.
+ declMethod = FindDeclMethodOnInterfaceEntry(pItfEntry, declSig);
+ }
+ else
+ { // Assume the MethodTable is a parent of the current type,
+ // and create the substitution chain to match it.
+
+ pDeclType = NULL;
+
+ for (bmtRTType *pCur = GetParentType();
+ pCur != NULL;
+ pCur = pCur->GetParentType())
+ {
+ if (pCur->GetMethodTable() == pDeclMT)
+ {
+ pDeclType = pCur;
+ break;
+ }
+ }
+
+ if (pDeclType == NULL)
+ { // Method's type is not a parent.
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_DECLARATIONNOTFOUND, it.Token());
+ }
+
+ // 3. Find the matching method.
+ bmtRTType *pCurDeclType = pDeclType;
+ do
+ {
+ // two pass algorithm. search for exact matches followed
+ // by equivalent matches.
+ for (int iPass = 0; (iPass < 2) && (declMethod.IsNull()); iPass++)
+ {
+ MethodTable *pCurDeclMT = pCurDeclType->GetMethodTable();
+
+ MethodTable::IntroducedMethodIterator methIt(pCurDeclMT);
+ for(; methIt.IsValid(); methIt.Next())
+ {
+ MethodDesc * pCurMD = methIt.GetMethodDesc();
+
+ if (pCurDeclMT != pDeclMT)
+ {
+ // If the method isn't on the declaring type, then it must be virtual.
+ if (!pCurMD->IsVirtual())
+ continue;
+ }
+ if (strcmp(szName, pCurMD->GetName()) == 0)
+ {
+ PCCOR_SIGNATURE pCurMDSig;
+ DWORD cbCurMDSig;
+ pCurMD->GetSig(&pCurMDSig, &cbCurMDSig);
+
+ // First pass searches for declaration methods should not use type equivalence
+ TokenPairList newVisited = TokenPairList::AdjustForTypeEquivalenceForbiddenScope(NULL);
+
+ if (MetaSig::CompareMethodSigs(
+ declSig.GetSignature(),
+ static_cast<DWORD>(declSig.GetSignatureLength()),
+ declSig.GetModule(),
+ &declSig.GetSubstitution(),
+ pCurMDSig,
+ cbCurMDSig,
+ pCurMD->GetModule(),
+ &pCurDeclType->GetSubstitution(),
+ iPass == 0 ? &newVisited : NULL))
+ {
+ declMethod = (*bmtParent->pSlotTable)[pCurMD->GetSlot()].Decl();
+ break;
+ }
+ }
+ }
+ }
+
+ pCurDeclType = pCurDeclType->GetParentType();
+ } while ((pCurDeclType != NULL) && (declMethod.IsNull()));
+ }
+
+ if (declMethod.IsNull())
+ { // Would prefer to let this fall out to the BuildMethodTableThrowException
+ // below, but due to v2.0 and earlier behaviour throwing a MissingMethodException,
+ // primarily because this code used to be a simple call to
+ // MemberLoader::GetDescFromMemberDefOrRef (see above for reason why),
+ // we must continue to do the same.
+ MemberLoader::ThrowMissingMethodException(
+ pDeclMT,
+ declSig.GetName(),
+ declSig.GetModule(),
+ declSig.GetSignature(),
+ static_cast<DWORD>(declSig.GetSignatureLength()),
+ &bmtGenerics->typeContext);
+ }
+ }
+ }
+
+ if (declMethod.IsNull())
+ { // Method not found, throw.
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_DECLARATIONNOTFOUND, it.Token());
+ }
+
+ if (!IsMdVirtual(declMethod.GetDeclAttrs()))
+ { // Make sure the decl is virtual
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_MUSTBEVIRTUAL, it.Token());
+ }
+
+ bmtMethodImpl->AddMethodImpl(*it, declMethod, GetStackingAllocator());
+ }
+ }
+ }
+ } /* end ... for each member */
+}
+
+//*******************************************************************************
+// InitMethodDesc takes a pointer to space that's already allocated for the
+// particular type of MethodDesc, and initializes based on the other info.
+// This factors logic between PlaceMembers (the regular code path) & AddMethod
+// (Edit & Continue (EnC) code path) so we don't have to maintain separate copies.
+VOID
+MethodTableBuilder::InitMethodDesc(
+ MethodDesc * pNewMD, // This is should actually be of the correct sub-type, based on Classification
+ DWORD Classification,
+ mdToken tok,
+ DWORD dwImplFlags,
+ DWORD dwMemberAttrs,
+ BOOL fEnC,
+ DWORD RVA, // Only needed for NDirect case
+ IMDInternalImport * pIMDII, // Needed for NDirect, EEImpl(Delegate) cases
+ LPCSTR pMethodName // Only needed for mcEEImpl (Delegate) case
+ COMMA_INDEBUG(LPCUTF8 pszDebugMethodName)
+ COMMA_INDEBUG(LPCUTF8 pszDebugClassName)
+ COMMA_INDEBUG(LPCUTF8 pszDebugMethodSignature)
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ if (fEnC) { GC_NOTRIGGER; } else { GC_TRIGGERS; }
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_EVERYTHING, "EEC::IMD: pNewMD:0x%x for tok:0x%x (%s::%s)\n",
+ pNewMD, tok, pszDebugClassName, pszDebugMethodName));
+
+ // Now we know the classification we can perform any classification specific initialization.
+
+ // The method desc is zero inited by the caller.
+
+ switch (Classification)
+ {
+ case mcNDirect:
+ {
+ // NDirect specific initialization.
+ NDirectMethodDesc *pNewNMD = (NDirectMethodDesc*)pNewMD;
+
+ // Allocate writeable data
+ pNewNMD->ndirect.m_pWriteableData = (NDirectWriteableData*)
+ AllocateFromHighFrequencyHeap(S_SIZE_T(sizeof(NDirectWriteableData)));
+
+#ifdef HAS_NDIRECT_IMPORT_PRECODE
+ pNewNMD->ndirect.m_pImportThunkGlue = Precode::Allocate(PRECODE_NDIRECT_IMPORT, pNewMD,
+ GetLoaderAllocator(), GetMemTracker())->AsNDirectImportPrecode();
+#else // !HAS_NDIRECT_IMPORT_PRECODE
+ pNewNMD->GetNDirectImportThunkGlue()->Init(pNewNMD);
+#endif // !HAS_NDIRECT_IMPORT_PRECODE
+
+#if defined(_TARGET_X86_)
+ pNewNMD->ndirect.m_cbStackArgumentSize = 0xFFFF;
+#endif // defined(_TARGET_X86_)
+
+#ifdef FEATURE_MIXEDMODE // IJW
+ if (RVA != 0 && IsMiUnmanaged(dwImplFlags) && IsMiNative(dwImplFlags))
+ {
+ // Note that we cannot initialize the stub directly now in the general case,
+ // as LoadLibrary may not have been performed yet.
+ pNewNMD->SetIsEarlyBound();
+ }
+#endif // FEATURE_MIXEDMODE
+
+ pNewNMD->GetWriteableData()->m_pNDirectTarget = pNewNMD->GetNDirectImportThunkGlue()->GetEntrypoint();
+ }
+ break;
+
+ case mcFCall:
+ break;
+
+ case mcEEImpl:
+ // For the Invoke method we will set a standard invoke method.
+ BAD_FORMAT_NOTHROW_ASSERT(IsDelegate());
+
+ // For the asserts, either the pointer is NULL (since the class hasn't
+ // been constructed yet), or we're in EnC mode, meaning that the class
+ // does exist, but we may be re-assigning the field to point to an
+ // updated MethodDesc
+
+ // It is not allowed for EnC to replace one of the runtime builtin methods
+
+ if (strcmp(pMethodName, "Invoke") == 0)
+ {
+ BAD_FORMAT_NOTHROW_ASSERT(NULL == ((DelegateEEClass*)GetHalfBakedClass())->m_pInvokeMethod);
+ ((DelegateEEClass*)GetHalfBakedClass())->m_pInvokeMethod = pNewMD;
+ }
+ else if (strcmp(pMethodName, "BeginInvoke") == 0)
+ {
+ BAD_FORMAT_NOTHROW_ASSERT(NULL == ((DelegateEEClass*)GetHalfBakedClass())->m_pBeginInvokeMethod);
+ ((DelegateEEClass*)GetHalfBakedClass())->m_pBeginInvokeMethod = pNewMD;
+ }
+ else if (strcmp(pMethodName, "EndInvoke") == 0)
+ {
+ BAD_FORMAT_NOTHROW_ASSERT(NULL == ((DelegateEEClass*)GetHalfBakedClass())->m_pEndInvokeMethod);
+ ((DelegateEEClass*)GetHalfBakedClass())->m_pEndInvokeMethod = pNewMD;
+ }
+ else
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_GENERAL);
+ }
+
+ // StoredSig specific intialization
+ {
+ StoredSigMethodDesc *pNewSMD = (StoredSigMethodDesc*) pNewMD;;
+ DWORD cSig;
+ PCCOR_SIGNATURE pSig;
+ if (FAILED(pIMDII->GetSigOfMethodDef(tok, &cSig, &pSig)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ pNewSMD->SetStoredMethodSig(pSig, cSig);
+ }
+ break;
+
+#ifdef FEATURE_COMINTEROP
+ case mcComInterop:
+#endif // FEATURE_COMINTEROP
+ case mcIL:
+ break;
+
+ case mcInstantiated:
+#ifdef EnC_SUPPORTED
+ if (fEnC)
+ {
+ // We reuse the instantiated methoddescs to get the slot
+ InstantiatedMethodDesc* pNewIMD = (InstantiatedMethodDesc*) pNewMD;
+ pNewIMD->SetupEnCAddedMethod();
+ }
+ else
+#endif // EnC_SUPPORTED
+ {
+ // Initialize the typical instantiation.
+ InstantiatedMethodDesc* pNewIMD = (InstantiatedMethodDesc*) pNewMD;
+ //data has the same lifetime as method table, use our allocator
+ pNewIMD->SetupGenericMethodDefinition(pIMDII, GetLoaderAllocator(), GetMemTracker(), GetModule(),
+ tok);
+ }
+ break;
+
+ default:
+ BAD_FORMAT_NOTHROW_ASSERT(!"Failed to set a method desc classification");
+ }
+
+ // Check the method desc's classification.
+ _ASSERTE(pNewMD->GetClassification() == Classification);
+
+ pNewMD->SetMemberDef(tok);
+
+ if (IsMdStatic(dwMemberAttrs))
+ pNewMD->SetStatic();
+
+ // Set suppress unmanaged code access permission attribute
+
+ if (pNewMD->IsNDirect())
+ pNewMD->ComputeSuppressUnmanagedCodeAccessAttr(pIMDII);
+
+#ifdef _DEBUG
+ // Mark as many methods as synchronized as possible.
+ //
+ // Note that this can easily cause programs to deadlock, and that
+ // should not be treated as a bug in the program.
+
+ static ConfigDWORD stressSynchronized;
+ DWORD stressSynchronizedVal = stressSynchronized.val(CLRConfig::INTERNAL_stressSynchronized);
+
+ bool isStressSynchronized = stressSynchronizedVal &&
+ pNewMD->IsIL() && // Synchronized is not supported on Ecalls, NDirect method, etc
+ // IsValueClass() and IsEnum() do not work for System.ValueType and System.Enum themselves
+ ((g_pValueTypeClass != NULL && g_pEnumClass != NULL &&
+ !IsValueClass()) || // Can not synchronize on byref "this"
+ IsMdStatic(dwMemberAttrs)) && // IsStatic() blows up in _DEBUG as pNewMD is not fully inited
+ g_pObjectClass != NULL; // Ignore Object:* since "this" could be a boxed object
+
+ // stressSynchronized=1 turns off the stress in the system domain to reduce
+ // the chances of spurious deadlocks. Deadlocks in user code can still occur.
+ // stressSynchronized=2 will probably cause more deadlocks, and is not recommended
+ if (stressSynchronizedVal == 1 && GetAssembly()->IsSystem())
+ isStressSynchronized = false;
+
+ if (IsMiSynchronized(dwImplFlags) || isStressSynchronized)
+#else // !_DEBUG
+ if (IsMiSynchronized(dwImplFlags))
+#endif // !_DEBUG
+ pNewMD->SetSynchronized();
+
+#ifdef _DEBUG
+ pNewMD->m_pszDebugMethodName = (LPUTF8)pszDebugMethodName;
+ pNewMD->m_pszDebugClassName = (LPUTF8)pszDebugClassName;
+ pNewMD->m_pDebugMethodTable.SetValue(GetHalfBakedMethodTable());
+
+ if (pszDebugMethodSignature == NULL)
+ pNewMD->m_pszDebugMethodSignature = FormatSig(pNewMD,pNewMD->GetLoaderAllocator()->GetLowFrequencyHeap(),GetMemTracker());
+ else
+ pNewMD->m_pszDebugMethodSignature = pszDebugMethodSignature;
+#endif // _DEBUG
+} // MethodTableBuilder::InitMethodDesc
+
+//*******************************************************************************
+//
+// Used by BuildMethodTable
+//
+VOID
+MethodTableBuilder::AddMethodImplDispatchMapping(
+ DispatchMapTypeID typeID,
+ SLOT_INDEX slotNumber,
+ bmtMDMethod * pImplMethod)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc * pMDImpl = pImplMethod->GetMethodDesc();
+
+ // Look for an existing entry in the map.
+ DispatchMapBuilder::Iterator it(bmtVT->pDispatchMapBuilder);
+ if (bmtVT->pDispatchMapBuilder->Find(typeID, slotNumber, it))
+ {
+ // Throw if this entry has already previously been MethodImpl'd.
+ if (it.IsMethodImpl())
+ {
+ // NOTE: This is where we check for duplicate overrides. This is the easiest place to check
+ // because duplicate overrides could in fact have separate MemberRefs to the same
+ // member and so just comparing tokens at the very start would not be enough.
+ if (it.GetTargetMD() != pMDImpl)
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_MULTIPLEOVERRIDES, pMDImpl->GetMemberDef());
+ }
+ }
+ // This is the first MethodImpl. That's ok.
+ else
+ {
+ it.SetTarget(pMDImpl);
+ it.SetIsMethodImpl();
+ }
+ }
+ // A mapping for this interface method does not exist, so insert it.
+ else
+ {
+ bmtVT->pDispatchMapBuilder->InsertMDMapping(
+ typeID,
+ slotNumber,
+ pMDImpl,
+ TRUE);
+ }
+
+ // Save the entry into the vtable as well, if it isn't an interface methodImpl
+ if (typeID == DispatchMapTypeID::ThisClassID())
+ {
+ bmtVT->SetVirtualMethodImpl(slotNumber, pImplMethod);
+ }
+} // MethodTableBuilder::AddMethodImplDispatchMapping
+
+//*******************************************************************************
+VOID
+MethodTableBuilder::MethodImplCompareSignatures(
+ bmtMethodHandle hDecl,
+ bmtMethodHandle hImpl,
+ DWORD dwConstraintErrorCode)
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+ PRECONDITION(!hDecl.IsNull());
+ PRECONDITION(!hImpl.IsNull());
+ PRECONDITION(TypeFromToken(hDecl.GetMethodSignature().GetToken()) == mdtMethodDef);
+ PRECONDITION(TypeFromToken(hImpl.GetMethodSignature().GetToken()) == mdtMethodDef);
+ } CONTRACTL_END;
+
+ const MethodSignature &declSig(hDecl.GetMethodSignature());
+ const MethodSignature &implSig(hImpl.GetMethodSignature());
+
+ if (!MethodSignature::SignaturesEquivalent(declSig, implSig))
+ {
+ LOG((LF_CLASSLOADER, LL_INFO1000, "BADSIG placing MethodImpl: %x\n", declSig.GetToken()));
+ BuildMethodTableThrowException(COR_E_TYPELOAD, IDS_CLASSLOAD_MI_BADSIGNATURE, declSig.GetToken());
+ }
+
+ //now compare the method constraints
+ if (!MetaSig::CompareMethodConstraints(&implSig.GetSubstitution(), implSig.GetModule(), implSig.GetToken(),
+ &declSig.GetSubstitution(), declSig.GetModule(), declSig.GetToken()))
+ {
+ BuildMethodTableThrowException(dwConstraintErrorCode, implSig.GetToken());
+ }
+}
+
+//*******************************************************************************
+// We should have collected all the method impls. Cycle through them creating the method impl
+// structure that holds the information about which slots are overridden.
+VOID
+MethodTableBuilder::PlaceMethodImpls()
+{
+ STANDARD_VM_CONTRACT;
+
+ if(bmtMethodImpl->pIndex == 0)
+ {
+ return;
+ }
+
+ // Allocate some temporary storage. The number of overrides for a single method impl
+ // cannot be greater then the number of vtable slots.
+ DWORD * slots = new (&GetThread()->m_MarshalAlloc) DWORD[bmtVT->cVirtualSlots];
+ MethodDesc ** replaced = new (&GetThread()->m_MarshalAlloc) MethodDesc*[bmtVT->cVirtualSlots];
+
+ DWORD iEntry = 0;
+ bmtMDMethod * pCurImplMethod = bmtMethodImpl->GetImplementationMethod(iEntry);
+
+ DWORD slotIndex = 0;
+
+ // The impls are sorted according to the method descs for the body of the method impl.
+ // Loop through the impls until the next body is found. When a single body
+ // has been done move the slots implemented and method descs replaced into the storage
+ // found on the body method desc.
+ while (true)
+ { // collect information until we reach the next body
+
+ // Get the declaration part of the method impl. It will either be a token
+ // (declaration is on this type) or a method desc.
+ bmtMethodHandle hDeclMethod = bmtMethodImpl->GetDeclarationMethod(iEntry);
+ if(hDeclMethod.IsMDMethod())
+ { // The declaration is on the type being built
+ bmtMDMethod * pCurDeclMethod = hDeclMethod.AsMDMethod();
+
+ mdToken mdef = pCurDeclMethod->GetMethodSignature().GetToken();
+ if (bmtMethodImpl->IsBody(mdef))
+ { // A method declared on this class cannot be both a decl and an impl
+ BuildMethodTableThrowException(IDS_CLASSLOAD_MI_MULTIPLEOVERRIDES, mdef);
+ }
+
+ // Throws
+ PlaceLocalDeclaration(pCurDeclMethod,
+ pCurImplMethod,
+ slots, // Adds override to the slot and replaced arrays.
+ replaced,
+ &slotIndex); // Increments count
+ }
+ else
+ {
+ bmtRTMethod * pCurDeclMethod = hDeclMethod.AsRTMethod();
+
+ // Do not use pDecl->IsInterface here as that asks the method table and the MT may not yet be set up.
+ if(pCurDeclMethod->GetOwningType()->IsInterface())
+ {
+ // Throws
+ PlaceInterfaceDeclaration(pCurDeclMethod,
+ pCurImplMethod,
+ slots,
+ replaced,
+ &slotIndex); // Increments count
+ }
+ else
+ {
+ // Throws
+ PlaceParentDeclaration(pCurDeclMethod,
+ pCurImplMethod,
+ slots,
+ replaced,
+ &slotIndex); // Increments count
+ }
+ }
+
+ iEntry++;
+
+ if(iEntry == bmtMethodImpl->pIndex)
+ { // We hit the end of the list so dump the current data and leave
+ WriteMethodImplData(pCurImplMethod, slotIndex, slots, replaced);
+ break;
+ }
+ else
+ {
+ bmtMDMethod * pNextImplMethod = bmtMethodImpl->GetImplementationMethod(iEntry);
+
+ if (pNextImplMethod != pCurImplMethod)
+ { // If we're moving on to a new body, dump the current data and reset the counter
+ WriteMethodImplData(pCurImplMethod, slotIndex, slots, replaced);
+ slotIndex = 0;
+ }
+
+ pCurImplMethod = pNextImplMethod;
+ }
+ } // while(next != NULL)
+} // MethodTableBuilder::PlaceMethodImpls
+
+//*******************************************************************************
+VOID
+MethodTableBuilder::WriteMethodImplData(
+ bmtMDMethod * pImplMethod,
+ DWORD cSlots,
+ DWORD * rgSlots,
+ MethodDesc ** rgDeclMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Use the number of overrides to
+ // push information on to the method desc. We store the slots that
+ // are overridden and the method desc that is replaced. That way
+ // when derived classes need to determine if the method is to be
+ // overridden then it can check the name against the replaced
+ // method desc not the bodies name.
+ if (cSlots == 0)
+ {
+ //@TODO:NEWVTWORK: Determine methodImpl status so that we don't need this workaround.
+ //@TODO:NEWVTWORK: This occurs when only interface decls are involved, since
+ //@TODO:NEWVTWORK: these are stored in the dispatch map and not on the methoddesc.
+ }
+ else
+ {
+ MethodImpl * pImpl = pImplMethod->GetMethodDesc()->GetMethodImpl();
+
+ // Set the size of the info the MethodImpl needs to keep track of.
+ pImpl->SetSize(GetLoaderAllocator()->GetHighFrequencyHeap(), GetMemTracker(), cSlots);
+
+ // Gasp we do a bubble sort. Should change this to a qsort..
+ for (DWORD i = 0; i < cSlots; i++)
+ {
+ for (DWORD j = i+1; j < cSlots; j++)
+ {
+ if (rgSlots[j] < rgSlots[i])
+ {
+ MethodDesc * mTmp = rgDeclMD[i];
+ rgDeclMD[i] = rgDeclMD[j];
+ rgDeclMD[j] = mTmp;
+
+ DWORD sTmp = rgSlots[i];
+ rgSlots[i] = rgSlots[j];
+ rgSlots[j] = sTmp;
+ }
+ }
+ }
+
+ // Go and set the method impl
+ pImpl->SetData(rgSlots, rgDeclMD);
+
+ GetHalfBakedClass()->SetContainsMethodImpls();
+ }
+} // MethodTableBuilder::WriteMethodImplData
+
+//*******************************************************************************
+VOID
+MethodTableBuilder::PlaceLocalDeclaration(
+ bmtMDMethod * pDecl,
+ bmtMDMethod * pImpl,
+ DWORD * slots,
+ MethodDesc ** replaced,
+ DWORD * pSlotIndex)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(bmtVT->pDispatchMapBuilder));
+ PRECONDITION(CheckPointer(pDecl));
+ PRECONDITION(CheckPointer(pImpl));
+ }
+ CONTRACTL_END
+
+ if (!bmtProp->fNoSanityChecks)
+ {
+ ///////////////////////////////
+ // Verify the signatures match
+
+ MethodImplCompareSignatures(
+ pDecl,
+ pImpl,
+ IDS_CLASSLOAD_CONSTRAINT_MISMATCH_ON_LOCAL_METHOD_IMPL);
+
+ ///////////////////////////////
+ // Validate the method impl.
+
+ TestMethodImpl(
+ bmtMethodHandle(pDecl),
+ bmtMethodHandle(pImpl));
+ }
+
+ // Don't allow overrides for any of the four special runtime implemented delegate methods
+ if (IsDelegate())
+ {
+ LPCUTF8 strMethodName = pDecl->GetMethodSignature().GetName();
+ if ((strcmp(strMethodName, COR_CTOR_METHOD_NAME) == 0) ||
+ (strcmp(strMethodName, "Invoke") == 0) ||
+ (strcmp(strMethodName, "BeginInvoke") == 0) ||
+ (strcmp(strMethodName, "EndInvoke") == 0))
+ {
+ BuildMethodTableThrowException(
+ IDS_CLASSLOAD_MI_CANNOT_OVERRIDE,
+ pDecl->GetMethodSignature().GetToken());
+ }
+ }
+
+ ///////////////////
+ // Add the mapping
+
+ // Call helper to add it. Will throw if decl is already MethodImpl'd
+ CONSISTENCY_CHECK(pDecl->GetSlotIndex() == static_cast<SLOT_INDEX>(pDecl->GetMethodDesc()->GetSlot()));
+ AddMethodImplDispatchMapping(
+ DispatchMapTypeID::ThisClassID(),
+ pDecl->GetSlotIndex(),
+ pImpl);
+
+ // We implement this slot, record it
+ slots[*pSlotIndex] = pDecl->GetSlotIndex();
+ replaced[*pSlotIndex] = pDecl->GetMethodDesc();
+
+ // increment the counter
+ (*pSlotIndex)++;
+} // MethodTableBuilder::PlaceLocalDeclaration
+
+//*******************************************************************************
+VOID MethodTableBuilder::PlaceInterfaceDeclaration(
+ bmtRTMethod * pDecl,
+ bmtMDMethod * pImpl,
+ DWORD* slots,
+ MethodDesc** replaced,
+ DWORD* pSlotIndex)
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pDecl));
+ PRECONDITION(CheckPointer(pImpl));
+ PRECONDITION(pDecl->GetMethodDesc()->IsInterface());
+ PRECONDITION(CheckPointer(bmtVT->pDispatchMapBuilder));
+ } CONTRACTL_END;
+
+ MethodDesc * pDeclMD = pDecl->GetMethodDesc();
+ MethodTable * pDeclMT = pDeclMD->GetMethodTable();
+
+ // Note that the fact that pDecl is non-NULL means that we found the
+ // declaration token to be owned by a declared interface for this type.
+
+ if (!bmtProp->fNoSanityChecks)
+ {
+ ///////////////////////////////
+ // Verify the signatures match
+
+ MethodImplCompareSignatures(
+ pDecl,
+ pImpl,
+ IDS_CLASSLOAD_CONSTRAINT_MISMATCH_ON_INTERFACE_METHOD_IMPL);
+
+ ///////////////////////////////
+ // Validate the method impl.
+
+ TestMethodImpl(
+ bmtMethodHandle(pDecl),
+ bmtMethodHandle(pImpl));
+ }
+
+ ///////////////////
+ // Add the mapping
+
+ // Note that we need only one DispatchMapTypeID for this interface (though there might be more if there
+ // are duplicates). The first one is easy to get, but we could (in theory) use the last one or a random
+ // one.
+ // Q: Why don't we have to place this method for all duplicate interfaces? Because VSD knows about
+ // duplicates and finds the right (latest) implementation for us - see
+ // code:MethodTable::MethodDataInterfaceImpl::PopulateNextLevel#ProcessAllDuplicates.
+ UINT32 cInterfaceDuplicates;
+ DispatchMapTypeID firstDispatchMapTypeID;
+ ComputeDispatchMapTypeIDs(
+ pDeclMT,
+ &pDecl->GetMethodSignature().GetSubstitution(),
+ &firstDispatchMapTypeID,
+ 1,
+ &cInterfaceDuplicates);
+ CONSISTENCY_CHECK(cInterfaceDuplicates >= 1);
+ CONSISTENCY_CHECK(firstDispatchMapTypeID.IsImplementedInterface());
+
+ // Call helper to add it. Will throw if decl is already MethodImpl'd
+ CONSISTENCY_CHECK(pDecl->GetSlotIndex() == static_cast<SLOT_INDEX>(pDecl->GetMethodDesc()->GetSlot()));
+ AddMethodImplDispatchMapping(
+ firstDispatchMapTypeID,
+ pDecl->GetSlotIndex(),
+ pImpl);
+
+ if (IsCompilationProcess())
+ {
+ //
+ // Mark this interface as overridable. It is used to skip generation of
+ // CCWs stubs during NGen (see code:MethodNeedsReverseComStub)
+ //
+ if (!IsMdFinal(pImpl->GetDeclAttrs()))
+ {
+ pDeclMT->GetWriteableDataForWrite()->SetIsOverridingInterface();
+ }
+ }
+
+#ifdef _DEBUG
+ if (bmtInterface->dbg_fShouldInjectInterfaceDuplicates)
+ { // We injected interface duplicates
+
+ // We have to MethodImpl all interface duplicates as all duplicates are 'declared on type' (see
+ // code:#InjectInterfaceDuplicates_ApproxInterfaces)
+ DispatchMapTypeID * rgDispatchMapTypeIDs = (DispatchMapTypeID *)_alloca(sizeof(DispatchMapTypeID) * cInterfaceDuplicates);
+ ComputeDispatchMapTypeIDs(
+ pDeclMT,
+ &pDecl->GetMethodSignature().GetSubstitution(),
+ rgDispatchMapTypeIDs,
+ cInterfaceDuplicates,
+ &cInterfaceDuplicates);
+ for (UINT32 nInterfaceDuplicate = 1; nInterfaceDuplicate < cInterfaceDuplicates; nInterfaceDuplicate++)
+ {
+ // Add MethodImpl record for each injected interface duplicate
+ AddMethodImplDispatchMapping(
+ rgDispatchMapTypeIDs[nInterfaceDuplicate],
+ pDecl->GetSlotIndex(),
+ pImpl);
+ }
+ }
+#endif //_DEBUG
+} // MethodTableBuilder::PlaceInterfaceDeclaration
+
+//*******************************************************************************
+VOID
+MethodTableBuilder::PlaceParentDeclaration(
+ bmtRTMethod * pDecl,
+ bmtMDMethod * pImpl,
+ DWORD * slots,
+ MethodDesc ** replaced,
+ DWORD * pSlotIndex)
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pDecl));
+ PRECONDITION(CheckPointer(pImpl));
+ PRECONDITION(CheckPointer(bmtVT->pDispatchMapBuilder));
+ PRECONDITION(CheckPointer(GetParentMethodTable()));
+ } CONTRACTL_END;
+
+ MethodDesc * pDeclMD = pDecl->GetMethodDesc();
+
+ // Note that the fact that pDecl is non-NULL means that we found the
+ // declaration token to be owned by a parent type.
+
+ if (!bmtProp->fNoSanityChecks)
+ {
+ /////////////////////////////////////////
+ // Verify that the signatures match
+
+ MethodImplCompareSignatures(
+ pDecl,
+ pImpl,
+ IDS_CLASSLOAD_CONSTRAINT_MISMATCH_ON_PARENT_METHOD_IMPL);
+
+ ////////////////////////////////
+ // Verify rules of method impls
+
+ TestMethodImpl(
+ bmtMethodHandle(pDecl),
+ bmtMethodHandle(pImpl));
+ }
+
+ ///////////////////
+ // Add the mapping
+
+ // Call helper to add it. Will throw if DECL is already MethodImpl'd
+ AddMethodImplDispatchMapping(
+ DispatchMapTypeID::ThisClassID(),
+ pDeclMD->GetSlot(),
+ pImpl);
+
+ // We implement this slot, record it
+ slots[*pSlotIndex] = pDeclMD->GetSlot();
+ replaced[*pSlotIndex] = pDeclMD;
+
+ // increment the counter
+ (*pSlotIndex)++;
+} // MethodTableBuilder::PlaceParentDeclaration
+
+//*******************************************************************************
+// This will validate that all interface methods that were matched during
+// layout also validate against type constraints.
+
+VOID MethodTableBuilder::ValidateInterfaceMethodConstraints()
+{
+ STANDARD_VM_CONTRACT;
+
+ DispatchMapBuilder::Iterator it(bmtVT->pDispatchMapBuilder);
+ for (; it.IsValid(); it.Next())
+ {
+ if (it.GetTypeID() != DispatchMapTypeID::ThisClassID())
+ {
+ bmtRTType * pItf = bmtInterface->pInterfaceMap[it.GetTypeID().GetInterfaceNum()].GetInterfaceType();
+
+ // Grab the method token
+ MethodTable * pMTItf = pItf->GetMethodTable();
+ CONSISTENCY_CHECK(CheckPointer(pMTItf->GetMethodDescForSlot(it.GetSlotNumber())));
+ mdMethodDef mdTok = pItf->GetMethodTable()->GetMethodDescForSlot(it.GetSlotNumber())->GetMemberDef();
+
+ // Default to the current module. The code immediately below determines if this
+ // assumption is incorrect.
+ Module * pTargetModule = GetModule();
+
+ // Get the module of the target method. Get it through the chunk to
+ // avoid triggering the assert that MethodTable is non-NULL. It may
+ // be null since it may belong to the type we're building right now.
+ MethodDesc * pTargetMD = it.GetTargetMD();
+
+ // If pTargetMT is null, this indicates that the target MethodDesc belongs
+ // to the current type. Otherwise, the MethodDesc MUST be owned by a parent
+ // of the type we're building.
+ BOOL fTargetIsOwnedByParent = !pTargetMD->GetMethodTablePtr()->IsNull();
+
+ // If the method is owned by a parent, we need to use the parent's module,
+ // and we must construct the substitution chain all the way up to the parent.
+ const Substitution *pSubstTgt = NULL;
+ if (fTargetIsOwnedByParent)
+ {
+ CONSISTENCY_CHECK(CheckPointer(GetParentType()));
+ bmtRTType *pTargetType = bmtRTType::FindType(GetParentType(), pTargetMD->GetMethodTable());
+ pSubstTgt = &pTargetType->GetSubstitution();
+ pTargetModule = pTargetType->GetModule();
+ }
+
+ // Now compare the method constraints.
+ if (!MetaSig::CompareMethodConstraints(pSubstTgt,
+ pTargetModule,
+ pTargetMD->GetMemberDef(),
+ &pItf->GetSubstitution(),
+ pMTItf->GetModule(),
+ mdTok))
+ {
+ LOG((LF_CLASSLOADER, LL_INFO1000,
+ "BADCONSTRAINTS on interface method implementation: %x\n", pTargetMD));
+ // This exception will be due to an implicit implementation, since explicit errors
+ // will be detected in MethodImplCompareSignatures (for now, anyway).
+ CONSISTENCY_CHECK(!it.IsMethodImpl());
+ DWORD idsError = it.IsMethodImpl() ?
+ IDS_CLASSLOAD_CONSTRAINT_MISMATCH_ON_INTERFACE_METHOD_IMPL :
+ IDS_CLASSLOAD_CONSTRAINT_MISMATCH_ON_IMPLICIT_IMPLEMENTATION;
+ if (fTargetIsOwnedByParent)
+ {
+ DefineFullyQualifiedNameForClass();
+ LPCUTF8 szClassName = GetFullyQualifiedNameForClassNestedAware(pTargetMD->GetMethodTable());
+ LPCUTF8 szMethodName = pTargetMD->GetName();
+
+ CQuickBytes qb;
+ // allocate enough room for "<class>.<method>\0"
+ size_t cchFullName = strlen(szClassName) + 1 + strlen(szMethodName) + 1;
+ LPUTF8 szFullName = (LPUTF8) qb.AllocThrows(cchFullName);
+ strcpy_s(szFullName, cchFullName, szClassName);
+ strcat_s(szFullName, cchFullName, ".");
+ strcat_s(szFullName, cchFullName, szMethodName);
+
+ BuildMethodTableThrowException(idsError, szFullName);
+ }
+ else
+ {
+ BuildMethodTableThrowException(idsError, pTargetMD->GetMemberDef());
+ }
+ }
+ }
+ }
+} // MethodTableBuilder::ValidateInterfaceMethodConstraints
+
+//*******************************************************************************
+// Used to allocate and initialize MethodDescs (both the boxed and unboxed entrypoints)
+VOID MethodTableBuilder::AllocAndInitMethodDescs()
+{
+ STANDARD_VM_CONTRACT;
+
+ //
+ // Go over all MethodDescs and create smallest number of MethodDescChunks possible.
+ //
+ // Iterate over all methods and start a new chunk only if:
+ // - Token range (upper 24 bits of the method token) has changed.
+ // - The maximum size of the chunk has been reached.
+ //
+
+ int currentTokenRange = -1; // current token range
+ SIZE_T sizeOfMethodDescs = 0; // current running size of methodDesc chunk
+ int startIndex = 0; // start of the current chunk (index into bmtMethod array)
+
+ DeclaredMethodIterator it(*this);
+ while (it.Next())
+ {
+ int tokenRange = GetTokenRange(it.Token());
+
+ // This code assumes that iterator returns tokens in ascending order. If this assumption does not hold,
+ // the code will still work with small performance penalty (method desc chunk layout will be less efficient).
+ _ASSERTE(tokenRange >= currentTokenRange);
+
+ SIZE_T size = MethodDesc::GetBaseSize(GetMethodClassification(it->GetMethodType()));
+
+ // Add size of optional slots
+
+ if (it->GetMethodImplType() == METHOD_IMPL)
+ size += sizeof(MethodImpl);
+
+ if (it->GetSlotIndex() >= bmtVT->cVtableSlots)
+ size += sizeof(MethodDesc::NonVtableSlot); // slot
+
+ if (NeedsNativeCodeSlot(*it))
+ size += sizeof(MethodDesc::NativeCodeSlot);
+
+ // See comment in AllocAndInitMethodDescChunk
+ if (NeedsTightlyBoundUnboxingStub(*it))
+ {
+ size *= 2;
+
+ if (bmtGenerics->GetNumGenericArgs() == 0) {
+ size += sizeof(MethodDesc::NonVtableSlot);
+ }
+ else {
+ bmtVT->cVtableSlots++;
+ }
+ }
+
+ if (tokenRange != currentTokenRange ||
+ sizeOfMethodDescs + size > MethodDescChunk::MaxSizeOfMethodDescs)
+ {
+ if (sizeOfMethodDescs != 0)
+ {
+ AllocAndInitMethodDescChunk(startIndex, it.CurrentIndex() - startIndex, sizeOfMethodDescs);
+ startIndex = it.CurrentIndex();
+ }
+
+ currentTokenRange = tokenRange;
+ sizeOfMethodDescs = 0;
+ }
+
+ sizeOfMethodDescs += size;
+ }
+
+ if (sizeOfMethodDescs != 0)
+ {
+ AllocAndInitMethodDescChunk(startIndex, NumDeclaredMethods() - startIndex, sizeOfMethodDescs);
+ }
+}
+
+//*******************************************************************************
+// Allocates and initializes one method desc chunk.
+//
+// Arguments:
+// startIndex - index of first method in bmtMethod array.
+// count - number of methods in this chunk (contiguous region from startIndex)
+// sizeOfMethodDescs - total expected size of MethodDescs in this chunk
+//
+// Used by AllocAndInitMethodDescs.
+//
+VOID MethodTableBuilder::AllocAndInitMethodDescChunk(COUNT_T startIndex, COUNT_T count, SIZE_T sizeOfMethodDescs)
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+ PRECONDITION(sizeOfMethodDescs <= MethodDescChunk::MaxSizeOfMethodDescs);
+ } CONTRACTL_END;
+
+ void * pMem = GetMemTracker()->Track(
+ GetLoaderAllocator()->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(TADDR) + sizeof(MethodDescChunk) + sizeOfMethodDescs)));
+
+ // Skip pointer to temporary entrypoints
+ MethodDescChunk * pChunk = (MethodDescChunk *)((BYTE*)pMem + sizeof(TADDR));
+
+ COUNT_T methodDescCount = 0;
+
+ SIZE_T offset = sizeof(MethodDescChunk);
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:22019) // Suppress PREFast warning about integer underflow
+#endif // _PREFAST_
+ for (COUNT_T i = 0; i < count; i++)
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif // _PREFAST_
+
+ {
+ bmtMDMethod * pMDMethod = (*bmtMethod)[static_cast<SLOT_INDEX>(startIndex + i)];
+
+ MethodDesc * pMD = (MethodDesc *)((BYTE *)pChunk + offset);
+
+ pMD->SetChunkIndex(pChunk);
+
+ InitNewMethodDesc(pMDMethod, pMD);
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:22018) // Suppress PREFast warning about integer underflow
+#endif // _PREFAST_
+ offset += pMD->SizeOf();
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif // _PREFAST_
+
+ methodDescCount++;
+
+ // If we're a value class, we want to create duplicate slots
+ // and MethodDescs for all methods in the vtable
+ // section (i.e. not non-virtual instance methods or statics).
+ // In the name of uniformity it would be much nicer
+ // if we created _all_ value class BoxedEntryPointStubs at this point.
+ // However, non-virtual instance methods only require unboxing
+ // stubs in the rare case that we create a delegate to such a
+ // method, and thus it would be inefficient to create them on
+ // loading: after all typical structs will have many non-virtual
+ // instance methods.
+ //
+ // Unboxing stubs for non-virtual instance methods are created
+ // in code:MethodDesc::FindOrCreateAssociatedMethodDesc.
+
+ if (NeedsTightlyBoundUnboxingStub(pMDMethod))
+ {
+ MethodDesc * pUnboxedMD = (MethodDesc *)((BYTE *)pChunk + offset);
+
+ //////////////////////////////////
+ // Initialize the new MethodDesc
+
+ // <NICE> memcpy operations on data structures like MethodDescs are extremely fragile
+ // and should not be used. We should go to the effort of having proper constructors
+ // in the MethodDesc class. </NICE>
+
+ memcpy(pUnboxedMD, pMD, pMD->SizeOf());
+
+ // Reset the chunk index
+ pUnboxedMD->SetChunkIndex(pChunk);
+
+ if (bmtGenerics->GetNumGenericArgs() == 0) {
+ pUnboxedMD->SetHasNonVtableSlot();
+ }
+
+ //////////////////////////////////////////////////////////
+ // Modify the original MethodDesc to be an unboxing stub
+
+ pMD->SetIsUnboxingStub();
+
+ ////////////////////////////////////////////////////////////////////
+ // Add the new MethodDesc to the non-virtual portion of the vtable
+
+ if (!bmtVT->AddUnboxedMethod(pMDMethod))
+ BuildMethodTableThrowException(IDS_CLASSLOAD_TOO_MANY_METHODS);
+
+ pUnboxedMD->SetSlot(pMDMethod->GetUnboxedSlotIndex());
+ pMDMethod->SetUnboxedMethodDesc(pUnboxedMD);
+
+ offset += pUnboxedMD->SizeOf();
+ methodDescCount++;
+ }
+ }
+ _ASSERTE(offset == sizeof(MethodDescChunk) + sizeOfMethodDescs);
+
+ pChunk->SetSizeAndCount((ULONG)sizeOfMethodDescs, methodDescCount);
+
+ GetHalfBakedClass()->AddChunk(pChunk);
+}
+
+//*******************************************************************************
+BOOL
+MethodTableBuilder::NeedsTightlyBoundUnboxingStub(bmtMDMethod * pMDMethod)
+{
+ STANDARD_VM_CONTRACT;
+
+ return IsValueClass() &&
+ !IsMdStatic(pMDMethod->GetDeclAttrs()) &&
+ IsMdVirtual(pMDMethod->GetDeclAttrs()) &&
+ (pMDMethod->GetMethodType() != METHOD_TYPE_INSTANTIATED) &&
+ !IsMdRTSpecialName(pMDMethod->GetDeclAttrs());
+}
+
+//*******************************************************************************
+BOOL
+MethodTableBuilder::NeedsNativeCodeSlot(bmtMDMethod * pMDMethod)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef FEATURE_REMOTING
+ // Approximation of code:MethodDesc::IsRemotingInterceptedViaPrestub
+ if (MayBeRemotingIntercepted(pMDMethod) && !IsMdVirtual(pMDMethod->GetDeclAttrs()))
+ {
+ return TRUE;
+ }
+#endif
+
+ return GetModule()->IsEditAndContinueEnabled();
+}
+
+//*******************************************************************************
+BOOL
+MethodTableBuilder::MayBeRemotingIntercepted(bmtMDMethod * pMDMethod)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef FEATURE_REMOTING
+ return (IsMarshaledByRef() || g_pObjectClass == NULL) && !IsMdStatic(pMDMethod->GetDeclAttrs());
+#else
+ return FALSE;
+#endif
+}
+
+//*******************************************************************************
+VOID
+MethodTableBuilder::AllocAndInitDictionary()
+{
+ STANDARD_VM_CONTRACT;
+
+ // Allocate dictionary layout used by all compatible instantiations
+
+ if (bmtGenerics->fSharedByGenericInstantiations && !bmtGenerics->fContainsGenericVariables)
+ {
+ // We use the number of methods as a heuristic for the number of slots in the dictionary
+ // attached to shared class method tables.
+ // If there are no declared methods then we have no slots, and we will never do any token lookups
+ //
+ // Heuristics
+ // - Classes with a small number of methods (2-3) tend to be more likely to use new slots,
+ // i.e. further methods tend to reuse slots from previous methods.
+ // = treat all classes with only 2-3 methods as if they have an extra method.
+ // - Classes with more generic parameters tend to use more slots.
+ // = multiply by 1.5 for 2 params or more
+
+ DWORD numMethodsAdjusted =
+ (bmtMethod->dwNumDeclaredNonAbstractMethods == 0)
+ ? 0
+ : (bmtMethod->dwNumDeclaredNonAbstractMethods < 3)
+ ? 3
+ : bmtMethod->dwNumDeclaredNonAbstractMethods;
+
+ _ASSERTE(bmtGenerics->GetNumGenericArgs() != 0);
+ DWORD nTypeFactorBy2 = (bmtGenerics->GetNumGenericArgs() == 1)
+ ? 2
+ : 3;
+
+ DWORD estNumTypeSlots = (numMethodsAdjusted * nTypeFactorBy2 + 2) / 3;
+ // estNumTypeSlots should fit in a WORD as long as we maintain the current
+ // limit on the number of methods in a type (approx 2^16).
+ _ASSERTE(FitsIn<WORD>(estNumTypeSlots));
+ WORD numTypeSlots = static_cast<WORD>(estNumTypeSlots);
+
+ if (numTypeSlots > 0)
+ {
+ // Dictionary layout is an optional field on EEClass, so ensure the optional field descriptor has
+ // been allocated.
+ EnsureOptionalFieldsAreAllocated(GetHalfBakedClass(), m_pAllocMemTracker, GetLoaderAllocator()->GetLowFrequencyHeap());
+ GetHalfBakedClass()->SetDictionaryLayout(DictionaryLayout::Allocate(numTypeSlots, bmtAllocator, m_pAllocMemTracker));
+ }
+ }
+
+}
+
+//*******************************************************************************
+//
+// Used by BuildMethodTable
+//
+// Compute the set of interfaces which are equivalent. Duplicates in the interface map
+// will be placed into different equivalence sets unless they participate in type equivalence.
+// This is a bit odd, but it turns out we only need to know about equivalence classes if
+// there is type equivalence involved in the interface, and not detecting, or detecting equivalence
+// in other cases does not result in differing behavior.
+//
+// By restricting the reasons for having equivalence matches, we reduce the algorithm from one which
+// is O(n*n) best case to an algorithm which will typically execute something more like O(m*n) best case time
+// where m is the number of generic interface (although still n*n in worst case). The assumption is that equivalent
+// and generic interfaces are relatively rare.
+VOID
+MethodTableBuilder::ComputeInterfaceMapEquivalenceSet()
+{
+ STANDARD_VM_CONTRACT;
+
+ UINT32 nextEquivalenceSet = 1;
+
+ for (DWORD dwCurInterface = 0;
+ dwCurInterface < bmtInterface->dwInterfaceMapSize;
+ dwCurInterface++)
+ {
+ // Keep track of the current interface we are trying to calculate the equivalence set of
+ bmtInterfaceEntry * pCurItfEntry = &bmtInterface->pInterfaceMap[dwCurInterface];
+ bmtRTType * pCurItf = pCurItfEntry->GetInterfaceType();
+ MethodTable * pCurItfMT = pCurItf->GetMethodTable();
+ const Substitution * pCurItfSubst = &pCurItf->GetSubstitution();
+
+ UINT32 currentEquivalenceSet = 0;
+
+ // Only interfaces with type equivalence, or that are generic need to be compared for equivalence
+ if (pCurItfMT->HasTypeEquivalence() || pCurItfMT->HasInstantiation())
+ {
+ for (DWORD dwCurInterfaceCompare = 0;
+ dwCurInterfaceCompare < dwCurInterface;
+ dwCurInterfaceCompare++)
+ {
+ // Keep track of the current interface we are trying to calculate the equivalence set of
+ bmtInterfaceEntry * pCompareItfEntry = &bmtInterface->pInterfaceMap[dwCurInterfaceCompare];
+ bmtRTType * pCompareItf = pCompareItfEntry->GetInterfaceType();
+ MethodTable * pCompareItfMT = pCompareItf->GetMethodTable();
+ const Substitution * pCompareItfSubst = &pCompareItf->GetSubstitution();
+
+ // Only interfaces with type equivalence, or that are generic need to be compared for equivalence
+ if (pCompareItfMT->HasTypeEquivalence() || pCompareItfMT->HasInstantiation())
+ {
+ if (MetaSig::CompareTypeDefsUnderSubstitutions(pCurItfMT,
+ pCompareItfMT,
+ pCurItfSubst,
+ pCompareItfSubst,
+ NULL))
+ {
+ currentEquivalenceSet = pCompareItfEntry->GetInterfaceEquivalenceSet();
+ // Use the equivalence set of the interface map entry we just found
+ pCurItfEntry->SetInterfaceEquivalenceSet(currentEquivalenceSet, true);
+ // Update the interface map entry we just found to indicate that it is part of an equivalence
+ // set with multiple entries.
+ pCompareItfEntry->SetInterfaceEquivalenceSet(currentEquivalenceSet, true);
+ break;
+ }
+ }
+ }
+ }
+
+ // If we did not find an equivalent interface above, use the next available equivalence set indicator
+ if (currentEquivalenceSet == 0)
+ {
+ pCurItfEntry->SetInterfaceEquivalenceSet(nextEquivalenceSet, false);
+ nextEquivalenceSet++;
+ }
+ }
+}
+
+//*******************************************************************************
+//
+// Used by PlaceInterfaceMethods
+//
+// Given an interface in our interface map, and a particular method on that interface, place
+// a method from the parent types implementation of an equivalent interface into that method
+// slot. Used by PlaceInterfaceMethods to make equivalent interface implementations have the
+// same behavior as if the parent interface was implemented on this type instead of an equivalent interface.
+//
+// This logic is used in situations such as below. I and I' are equivalent interfaces
+//
+//#
+// class Base : I
+// {void I.Method() { } }
+// interface IOther : I' {}
+// class Derived : IOther
+// { virtual void Method() {}}
+//
+// We should Map I'.Method to Base.Method, not Derived.Method
+//
+// Another example
+// class Base : I
+// { virtual void Method() }
+// interface IOther : I' {}
+// class Derived : IOther
+// { virtual void Method() {}}
+//
+// We should map I'.Method to Base.Method, not Derived.Method
+//
+// class Base : I
+// {void I.Method() { } }
+// class Derived : I'
+// {}
+//
+// We should Map I'.Method to Base.Method, and not throw TypeLoadException
+//
+#ifdef FEATURE_COMINTEROP
+VOID
+MethodTableBuilder::PlaceMethodFromParentEquivalentInterfaceIntoInterfaceSlot(
+ bmtInterfaceEntry::InterfaceSlotIterator & itfSlotIt,
+ bmtInterfaceEntry * pCurItfEntry,
+ DispatchMapTypeID ** prgInterfaceDispatchMapTypeIDs,
+ DWORD dwCurInterface)
+{
+ STANDARD_VM_CONTRACT;
+
+ bmtRTMethod * pCurItfMethod = itfSlotIt->Decl().AsRTMethod();
+
+ if (itfSlotIt->Impl() != INVALID_SLOT_INDEX)
+ {
+ return;
+ }
+
+ // For every equivalent interface entry that was actually implemented by parent, then look at equivalent method slot on that entry
+ // and if it matches and has a slot implementation, then record and continue
+ for (DWORD dwEquivalentInterface = 0;
+ (dwEquivalentInterface < bmtInterface->dwInterfaceMapSize) && (itfSlotIt->Impl() == INVALID_SLOT_INDEX);
+ dwEquivalentInterface++)
+ {
+ bmtInterfaceEntry * pEquivItfEntry = &bmtInterface->pInterfaceMap[dwEquivalentInterface];
+ bmtRTType * pEquivItf = pEquivItfEntry->GetInterfaceType();
+ MethodTable * pEquivItfMT = pEquivItf->GetMethodTable();
+ const Substitution * pEquivItfSubst = &pEquivItf->GetSubstitution();
+ if (pEquivItfEntry->GetInterfaceEquivalenceSet() != pCurItfEntry->GetInterfaceEquivalenceSet())
+ {
+ // Not equivalent
+ continue;
+ }
+ if (!pEquivItfEntry->IsImplementedByParent())
+ {
+ // Not implemented by parent
+ continue;
+ }
+
+ WORD slot = static_cast<WORD>(itfSlotIt.CurrentIndex());
+ BOOL fFound = FALSE;
+
+ // Determine which slot on the equivalent interface would map to the slot we are attempting to fill
+ // in with an implementation.
+ WORD otherMTSlot = GetEquivalentMethodSlot(pCurItfEntry->GetInterfaceType()->GetMethodTable(),
+ pEquivItfEntry->GetInterfaceType()->GetMethodTable(),
+ slot,
+ &fFound);
+
+ if (fFound)
+ {
+ UINT32 cInterfaceDuplicates;
+ if (*prgInterfaceDispatchMapTypeIDs == NULL)
+ {
+ *prgInterfaceDispatchMapTypeIDs =
+ new (GetStackingAllocator()) DispatchMapTypeID[bmtInterface->dwInterfaceMapSize];
+ }
+
+ // Compute all TypeIDs for this interface (all duplicates in the interface map)
+ ComputeDispatchMapTypeIDs(
+ pEquivItfMT,
+ pEquivItfSubst,
+ *prgInterfaceDispatchMapTypeIDs,
+ bmtInterface->dwInterfaceMapSize,
+ &cInterfaceDuplicates);
+ // There cannot be more duplicates than number of interfaces
+ _ASSERTE(cInterfaceDuplicates <= bmtInterface->dwInterfaceMapSize);
+ _ASSERTE(cInterfaceDuplicates > 0);
+
+ // NOTE: This override does not cache the resulting MethodData object
+ MethodTable::MethodDataWrapper hParentData;
+ hParentData = MethodTable::GetMethodData(
+ *prgInterfaceDispatchMapTypeIDs,
+ cInterfaceDuplicates,
+ pEquivItfMT,
+ GetParentMethodTable());
+
+ SLOT_INDEX slotIndex = static_cast<SLOT_INDEX>
+ (hParentData->GetImplSlotNumber(static_cast<UINT32>(otherMTSlot)));
+
+ // Interface is implemented on parent abstract type and this particular slot was not implemented
+ if (slotIndex == INVALID_SLOT_INDEX)
+ {
+ continue;
+ }
+
+ bmtMethodSlot & parentSlotImplementation = (*bmtParent->pSlotTable)[slotIndex];
+ bmtMethodHandle & parentImplementation = parentSlotImplementation.Impl();
+
+ // Check to verify that the equivalent slot on the equivalent interface actually matches the method
+ // on the current interface. If not, then the slot is not a match, and we should search other interfaces
+ // for an implementation of the method.
+ if (!MethodSignature::SignaturesEquivalent(pCurItfMethod->GetMethodSignature(), parentImplementation.GetMethodSignature()))
+ {
+ continue;
+ }
+
+ itfSlotIt->Impl() = slotIndex;
+
+ MethodDesc * pMD = hParentData->GetImplMethodDesc(static_cast<UINT32>(otherMTSlot));
+
+ DispatchMapTypeID dispatchMapTypeID =
+ DispatchMapTypeID::InterfaceClassID(dwCurInterface);
+ bmtVT->pDispatchMapBuilder->InsertMDMapping(
+ dispatchMapTypeID,
+ static_cast<UINT32>(itfSlotIt.CurrentIndex()),
+ pMD,
+ FALSE);
+ }
+ }
+} // MethodTableBuilder::PlaceMethodFromParentEquivalentInterfaceIntoInterfaceSlot
+#endif // FEATURE_COMINTEROP
+
+//*******************************************************************************
+//
+// Used by BuildMethodTable
+//
+//
+// If we are a class, then there may be some unplaced vtable methods (which are by definition
+// interface methods, otherwise they'd already have been placed). Place as many unplaced methods
+// as possible, in the order preferred by interfaces. However, do not allow any duplicates - once
+// a method has been placed, it cannot be placed again - if we are unable to neatly place an interface,
+// create duplicate slots for it starting at dwCurrentDuplicateVtableSlot. Fill out the interface
+// map for all interfaces as they are placed.
+//
+// If we are an interface, then all methods are already placed. Fill out the interface map for
+// interfaces as they are placed.
+//
+// BEHAVIOUR (based on Partition II: 11.2, not including MethodImpls)
+// C is current class, P is a parent class, I is the interface being implemented
+//
+// FOREACH interface I implemented by this class C
+// FOREACH method I::M
+// IF I is EXPLICITLY implemented by C
+// IF some method C::M matches I::M
+// USE C::M as implementation for I::M
+// ELIF we inherit a method P::M that matches I::M
+// USE P::M as implementation for I::M
+// ENDIF
+// ELSE
+// IF I::M lacks implementation
+// IF some method C::M matches I::M
+// USE C::M as implementation for I::M
+// ELIF we inherit a method P::M that matches I::M
+// USE P::M as implementation for I::M
+// ELIF I::M was implemented by the parent type with method Parent::M
+// USE Parent::M for the implementation of I::M // VSD does this by default if we really
+// // implemented I on the parent type, but
+// // equivalent interfaces need to make this
+// // explicit
+// ENDIF
+// ENDIF
+// ENDIF
+// ENDFOR
+// ENDFOR
+//
+
+VOID
+MethodTableBuilder::PlaceInterfaceMethods()
+{
+ STANDARD_VM_CONTRACT;
+
+ BOOL fParentInterface;
+ DispatchMapTypeID * rgInterfaceDispatchMapTypeIDs = NULL;
+
+ for (DWORD dwCurInterface = 0;
+ dwCurInterface < bmtInterface->dwInterfaceMapSize;
+ dwCurInterface++)
+ {
+ // Default to being implemented by the current class
+ fParentInterface = FALSE;
+
+ // Keep track of the current interface we are trying to place
+ bmtInterfaceEntry * pCurItfEntry = &bmtInterface->pInterfaceMap[dwCurInterface];
+ bmtRTType * pCurItf = pCurItfEntry->GetInterfaceType();
+ MethodTable * pCurItfMT = pCurItf->GetMethodTable();
+ const Substitution * pCurItfSubst = &pCurItf->GetSubstitution();
+
+ //
+ // There are three reasons why an interface could be in the implementation list
+ // 1. Inherited from parent
+ // 2. Explicitly declared in the implements list
+ // 3. Implicitly declared through the implements list of an explicitly declared interface
+ //
+ // The reason these cases need to be distinguished is that an inherited interface that is
+ // also explicitly redeclared in the implements list must be fully reimplemented using the
+ // virtual methods of this type (thereby using matching methods in this type that may have
+ // a different slot than an inherited method, but hidden it by name & sig); however all
+ // implicitly redeclared interfaces should not be fully reimplemented if they were also
+ // inherited from the parent.
+ //
+ // Example:
+ // interface I1 : I2
+ // class A : I1
+ // class B : A, I1
+ //
+ // In this example I1 must be fully reimplemented on B, but B can inherit the implementation
+ // of I2.
+ //
+
+ if (pCurItfEntry->IsImplementedByParent())
+ {
+ if (!pCurItfEntry->IsDeclaredOnType())
+ {
+ fParentInterface = TRUE;
+ }
+ }
+
+ bool fEquivalentInterfaceImplementedByParent = pCurItfEntry->IsImplementedByParent();
+ bool fEquivalentInterfaceDeclaredOnType = pCurItfEntry->IsDeclaredOnType();
+
+ if (pCurItfEntry->InEquivalenceSetWithMultipleEntries())
+ {
+ for (DWORD dwEquivalentInterface = 0;
+ dwEquivalentInterface < bmtInterface->dwInterfaceMapSize;
+ dwEquivalentInterface++)
+ {
+ bmtInterfaceEntry * pEquivItfEntry = &bmtInterface->pInterfaceMap[dwEquivalentInterface];
+ if (pEquivItfEntry->GetInterfaceEquivalenceSet() != pCurItfEntry->GetInterfaceEquivalenceSet())
+ {
+ // Not equivalent
+ continue;
+ }
+ if (pEquivItfEntry->IsImplementedByParent())
+ {
+ fEquivalentInterfaceImplementedByParent = true;
+ }
+ if (pEquivItfEntry->IsDeclaredOnType())
+ {
+ fEquivalentInterfaceDeclaredOnType = true;
+ }
+
+ if (fEquivalentInterfaceDeclaredOnType && fEquivalentInterfaceImplementedByParent)
+ break;
+ }
+ }
+
+ bool fParentInterfaceEquivalent = fEquivalentInterfaceImplementedByParent && !fEquivalentInterfaceDeclaredOnType;
+
+ CONSISTENCY_CHECK(!fParentInterfaceEquivalent || HasParent());
+
+ if (fParentInterfaceEquivalent)
+ {
+ // In the case the fParentInterface is TRUE, virtual overrides are enough and the interface
+ // does not have to be explicitly (re)implemented. The only exception is if the parent is
+ // abstract, in which case an inherited interface may not be fully implemented yet.
+ // This is an optimization that allows us to skip the more expensive slot filling in below.
+ // Note that the check here is for fParentInterface and not for fParentInterfaceEquivalent.
+ // This is necessary as if the interface is not actually implemented on the parent type we will
+ // need to fill in the slot table below.
+ if (fParentInterface && !GetParentMethodTable()->IsAbstract())
+ {
+ continue;
+ }
+
+ {
+ // We will reach here in two cases.
+ // 1 .The parent is abstract and the interface has been declared on the parent,
+ // and possibly partially implemented, so we need to populate the
+ // bmtInterfaceSlotImpl table for this interface with the implementation slot
+ // information.
+ // 2 .The the interface has not been declared on the parent,
+ // but an equivalent interface has been. So we need to populate the
+ // bmtInterfaceSlotImpl table for this interface with the implementation slot
+ // information from one of the parent equivalent interfaces. We may or may not
+ // find implementations for all of the methods on the interface on the parent type.
+ // The parent type may or may not be abstract.
+
+ MethodTable::MethodDataWrapper hParentData;
+ CONSISTENCY_CHECK(CheckPointer(GetParentMethodTable()));
+
+ if (rgInterfaceDispatchMapTypeIDs == NULL)
+ {
+ rgInterfaceDispatchMapTypeIDs =
+ new (GetStackingAllocator()) DispatchMapTypeID[bmtInterface->dwInterfaceMapSize];
+ }
+
+ if (pCurItfEntry->IsImplementedByParent())
+ {
+ UINT32 cInterfaceDuplicates;
+ // Compute all TypeIDs for this interface (all duplicates in the interface map)
+ ComputeDispatchMapTypeIDs(
+ pCurItfMT,
+ pCurItfSubst,
+ rgInterfaceDispatchMapTypeIDs,
+ bmtInterface->dwInterfaceMapSize,
+ &cInterfaceDuplicates);
+ // There cannot be more duplicates than number of interfaces
+ _ASSERTE(cInterfaceDuplicates <= bmtInterface->dwInterfaceMapSize);
+ _ASSERTE(cInterfaceDuplicates > 0);
+
+ //#InterfaceMap_UseParentInterfaceImplementations
+ // We rely on the fact that interface map of parent type is subset of this type (incl.
+ // duplicates), see code:#InterfaceMap_SupersetOfParent
+ // NOTE: This override does not cache the resulting MethodData object
+ hParentData = MethodTable::GetMethodData(
+ rgInterfaceDispatchMapTypeIDs,
+ cInterfaceDuplicates,
+ pCurItfMT,
+ GetParentMethodTable());
+
+ bmtInterfaceEntry::InterfaceSlotIterator itfSlotIt =
+ pCurItfEntry->IterateInterfaceSlots(GetStackingAllocator());
+ for (; !itfSlotIt.AtEnd(); itfSlotIt.Next())
+ {
+ itfSlotIt->Impl() = static_cast<SLOT_INDEX>
+ (hParentData->GetImplSlotNumber(static_cast<UINT32>(itfSlotIt.CurrentIndex())));
+ }
+ }
+#ifdef FEATURE_COMINTEROP
+ else
+ {
+ // Iterate through the methods on the interface, and if they have a slot which was filled in
+ // on an equivalent interface inherited from the parent fill in the approrpriate slot.
+ // This code path is only used when there is an implicit implementation of an interface
+ // that was not implemented on a parent type, but there was an equivalent interface implemented
+ // on a parent type.
+ bmtInterfaceEntry::InterfaceSlotIterator itfSlotIt =
+ pCurItfEntry->IterateInterfaceSlots(GetStackingAllocator());
+ for (; !itfSlotIt.AtEnd(); itfSlotIt.Next())
+ {
+ PlaceMethodFromParentEquivalentInterfaceIntoInterfaceSlot(itfSlotIt, pCurItfEntry, &rgInterfaceDispatchMapTypeIDs, dwCurInterface);
+ }
+ }
+#endif // FEATURE_COMINTEROP
+ }
+ }
+
+#ifdef FEATURE_COMINTEROP
+ // WinRT types always use methodimpls to line up methods with interface implementations, so we do not want to allow implicit
+ // interface implementations to kick in. This can especially cause problems with redirected interfaces, where the underlying
+ // runtimeclass doesn't actually implement the interfaces we claim it does. For example, a WinRT class which implements both
+ // IVector<int> and ICalculator will be projected as implementing IList<int> and ICalculator. In this case, we do not want the
+ // ICalculator Add(int) method to get lined up with the ICollection<int> Add method, since that will cause us to dispatch to the
+ // wrong underlying COM interface.
+ //
+ // There are a special WinRT types in mscorlib (notably DisposableRuntimeClass) which do implement interfaces in the normal way
+ // so we skip this check for them. (Note that we can't use a methodimpl directly in mscorlib, since ComImport classes are
+ // forbidden from having implementation code by the C# compiler).
+ if (GetHalfBakedClass()->IsProjectedFromWinRT() && !GetModule()->IsSystem())
+ {
+ continue;
+ }
+#endif // FEATURE_COMINTEROP
+
+ // For each method declared in this interface
+ bmtInterfaceEntry::InterfaceSlotIterator itfSlotIt =
+ pCurItfEntry->IterateInterfaceSlots(GetStackingAllocator());
+ for (; !itfSlotIt.AtEnd(); ++itfSlotIt)
+ {
+ if (fParentInterfaceEquivalent)
+ {
+ if (itfSlotIt->Impl() != INVALID_SLOT_INDEX)
+ { // If this interface is not explicitly declared on this class, and the interface slot has already been
+ // given an implementation, then the only way to provide a new implementation is through an override
+ // or through a MethodImpl. This is necessary in addition to the continue statement before this for
+ // loop because an abstract interface can still have a partial implementation and it is necessary to
+ // skip those interface slots that have already been satisfied.
+ continue;
+ }
+ }
+
+ BOOL fFoundMatchInBuildingClass = FALSE;
+ bmtInterfaceSlotImpl & curItfSlot = *itfSlotIt;
+ bmtRTMethod * pCurItfMethod = curItfSlot.Decl().AsRTMethod();
+ const MethodSignature & curItfMethodSig = pCurItfMethod->GetMethodSignature();
+
+ //
+ // First, try to find the method explicitly declared in our class
+ //
+
+ DeclaredMethodIterator methIt(*this);
+ while (methIt.Next())
+ {
+ // Note that non-publics can legally be exposed via an interface, but only
+ // through methodImpls.
+ if (IsMdVirtual(methIt.Attrs()) && IsMdPublic(methIt.Attrs()))
+ {
+#ifdef _DEBUG
+ if(GetHalfBakedClass()->m_fDebuggingClass && g_pConfig->ShouldBreakOnMethod(methIt.Name()))
+ CONSISTENCY_CHECK_MSGF(false, ("BreakOnMethodName: '%s' ", methIt.Name()));
+#endif // _DEBUG
+
+ if (pCurItfMethod->GetMethodSignature().Equivalent(methIt->GetMethodSignature()))
+ {
+ fFoundMatchInBuildingClass = TRUE;
+ curItfSlot.Impl() = methIt->GetSlotIndex();
+
+ DispatchMapTypeID dispatchMapTypeID =
+ DispatchMapTypeID::InterfaceClassID(dwCurInterface);
+ bmtVT->pDispatchMapBuilder->InsertMDMapping(
+ dispatchMapTypeID,
+ static_cast<UINT32>(itfSlotIt.CurrentIndex()),
+ methIt->GetMethodDesc(),
+ FALSE);
+
+ break;
+ }
+ }
+ } // end ... try to find method
+
+ //
+ // The ECMA CLR spec states that a type will inherit interface implementations
+ // and that explicit re-declaration of an inherited interface will try to match
+ // only newslot methods with methods in the re-declared interface (note that
+ // this also takes care of matching against unsatisfied interface methods in
+ // the abstract parent type scenario).
+ //
+ // So, if the interface was not declared on a parent and we haven't found a
+ // newslot method declared on this type as a match, search all remaining
+ // public virtual methods (including overrides declared on this type) for a
+ // match.
+ //
+ // Please see bug VSW577403 and VSW593884 for details of this breaking change.
+ //
+ if (!fFoundMatchInBuildingClass &&
+ !fEquivalentInterfaceImplementedByParent)
+ {
+ if (HasParent())
+ {
+ // Iterate backward through the parent's method table. This is important to
+ // find the most derived method.
+ bmtParentInfo::Iterator parentMethodIt = bmtParent->IterateSlots();
+ parentMethodIt.ResetToEnd();
+ while (parentMethodIt.Prev())
+ {
+ bmtRTMethod * pCurParentMethod = parentMethodIt->Decl().AsRTMethod();
+ DWORD dwAttrs = pCurParentMethod->GetDeclAttrs();
+ if (!IsMdVirtual(dwAttrs) || !IsMdPublic(dwAttrs))
+ { // Only match mdPublic mdVirtual methods for interface implementation
+ continue;
+ }
+
+ if (curItfMethodSig.Equivalent(pCurParentMethod->GetMethodSignature()))
+ {
+ fFoundMatchInBuildingClass = TRUE;
+ curItfSlot.Impl() = pCurParentMethod->GetSlotIndex();
+
+ DispatchMapTypeID dispatchMapTypeID =
+ DispatchMapTypeID::InterfaceClassID(dwCurInterface);
+ bmtVT->pDispatchMapBuilder->InsertMDMapping(
+ dispatchMapTypeID,
+ static_cast<UINT32>(itfSlotIt.CurrentIndex()),
+ pCurParentMethod->GetMethodDesc(),
+ FALSE);
+
+ break;
+ }
+ } // end ... try to find parent method
+ }
+ }
+
+ // For type equivalent interfaces that had an equivalent interface implemented by their parent
+ // and where the previous logic to fill in the method based on the virtual mappings on the type have
+ // failed, we should attempt to get the mappings from the equivalent interfaces declared on parent types
+ // of the type we are currently building.
+#ifdef FEATURE_COMINTEROP
+ if (!fFoundMatchInBuildingClass && fEquivalentInterfaceImplementedByParent && !pCurItfEntry->IsImplementedByParent())
+ {
+ PlaceMethodFromParentEquivalentInterfaceIntoInterfaceSlot(itfSlotIt, pCurItfEntry, &rgInterfaceDispatchMapTypeIDs, dwCurInterface);
+ }
+#endif
+ }
+ }
+} // MethodTableBuilder::PlaceInterfaceMethods
+
+
+//*******************************************************************************
+//
+// Used by BuildMethodTable
+//
+// Place static fields
+//
+VOID MethodTableBuilder::PlaceRegularStaticFields()
+{
+ STANDARD_VM_CONTRACT;
+
+ DWORD i;
+
+ LOG((LF_CLASSLOADER, LL_INFO10000, "STATICS: Placing statics for %s\n", this->GetDebugClassName()));
+
+ //
+ // Place gc refs and value types first, as they need to have handles created for them.
+ // (Placing them together allows us to easily create the handles when Restoring the class,
+ // and when initializing new DLS for the class.)
+ //
+
+ DWORD dwCumulativeStaticFieldPos = 0 ;
+ DWORD dwCumulativeStaticGCFieldPos = 0;
+ DWORD dwCumulativeStaticBoxFieldPos = 0;
+
+ // We don't need to do any calculations for the gc refs or valuetypes, as they're
+ // guaranteed to be aligned in ModuleStaticsInfo
+ bmtFP->NumRegularStaticFieldsOfSize[LOG2_PTRSIZE] -=
+ bmtFP->NumRegularStaticGCBoxedFields + bmtFP->NumRegularStaticGCPointerFields;
+
+ // Place fields, largest first, padding so that each group is aligned to its natural size
+ for (i = MAX_LOG2_PRIMITIVE_FIELD_SIZE; (signed int) i >= 0; i--)
+ {
+ // Fields of this size start at the next available location
+ bmtFP->RegularStaticFieldStart[i] = dwCumulativeStaticFieldPos;
+ dwCumulativeStaticFieldPos += (bmtFP->NumRegularStaticFieldsOfSize[i] << i);
+
+ // Reset counters for the loop after this one
+ bmtFP->NumRegularStaticFieldsOfSize[i] = 0;
+ }
+
+
+ if (dwCumulativeStaticFieldPos > FIELD_OFFSET_LAST_REAL_OFFSET)
+ BuildMethodTableThrowException(IDS_CLASSLOAD_GENERAL);
+
+ DWORD dwNumHandleStatics = bmtFP->NumRegularStaticGCBoxedFields + bmtFP->NumRegularStaticGCPointerFields;
+ if (!FitsIn<WORD>(dwNumHandleStatics))
+ { // Overflow.
+ BuildMethodTableThrowException(IDS_EE_TOOMANYFIELDS);
+ }
+ SetNumHandleRegularStatics(static_cast<WORD>(dwNumHandleStatics));
+
+ if (!FitsIn<WORD>(bmtFP->NumRegularStaticGCBoxedFields))
+ { // Overflow.
+ BuildMethodTableThrowException(IDS_EE_TOOMANYFIELDS);
+ }
+ SetNumBoxedRegularStatics(static_cast<WORD>(bmtFP->NumRegularStaticGCBoxedFields));
+
+ // Tell the module to give us the offsets we'll be using and commit space for us
+ // if necessary
+ DWORD dwNonGCOffset, dwGCOffset;
+ GetModule()->GetOffsetsForRegularStaticData(bmtInternal->pType->GetTypeDefToken(),
+ bmtProp->fDynamicStatics,
+ GetNumHandleRegularStatics(), dwCumulativeStaticFieldPos,
+ &dwGCOffset, &dwNonGCOffset);
+
+ // Allocate boxed statics first ("x << LOG2_PTRSIZE" is equivalent to "x * sizeof(void *)")
+ dwCumulativeStaticGCFieldPos = bmtFP->NumRegularStaticGCBoxedFields<<LOG2_PTRSIZE;
+
+ FieldDesc *pFieldDescList = GetApproxFieldDescListRaw();
+ // Place static fields
+ for (i = 0; i < bmtEnumFields->dwNumStaticFields - bmtEnumFields->dwNumThreadStaticFields; i++)
+ {
+ FieldDesc * pCurField = &pFieldDescList[bmtEnumFields->dwNumInstanceFields+i];
+ DWORD dwLog2FieldSize = (DWORD)(DWORD_PTR&)pCurField->m_pMTOfEnclosingClass; // log2(field size)
+ DWORD dwOffset = (DWORD) pCurField->m_dwOffset; // offset or type of field
+
+ switch (dwOffset)
+ {
+ case FIELD_OFFSET_UNPLACED_GC_PTR:
+ // Place GC reference static field
+ pCurField->SetOffset(dwCumulativeStaticGCFieldPos + dwGCOffset);
+ dwCumulativeStaticGCFieldPos += 1<<LOG2_PTRSIZE;
+ LOG((LF_CLASSLOADER, LL_INFO10000, "STATICS: Field placed at GC offset 0x%x\n", pCurField->GetOffset_NoLogging()));
+
+ break;
+
+ case FIELD_OFFSET_VALUE_CLASS:
+ // Place boxed GC reference static field
+ pCurField->SetOffset(dwCumulativeStaticBoxFieldPos + dwGCOffset);
+ dwCumulativeStaticBoxFieldPos += 1<<LOG2_PTRSIZE;
+ LOG((LF_CLASSLOADER, LL_INFO10000, "STATICS: Field placed at GC offset 0x%x\n", pCurField->GetOffset_NoLogging()));
+
+ break;
+
+ case FIELD_OFFSET_UNPLACED:
+ // Place non-GC static field
+ pCurField->SetOffset(bmtFP->RegularStaticFieldStart[dwLog2FieldSize] +
+ (bmtFP->NumRegularStaticFieldsOfSize[dwLog2FieldSize] << dwLog2FieldSize) +
+ dwNonGCOffset);
+ bmtFP->NumRegularStaticFieldsOfSize[dwLog2FieldSize]++;
+ LOG((LF_CLASSLOADER, LL_INFO10000, "STATICS: Field placed at non GC offset 0x%x\n", pCurField->GetOffset_NoLogging()));
+ break;
+
+ default:
+ // RVA field
+ break;
+ }
+
+ LOG((LF_CLASSLOADER, LL_INFO1000000, "Offset of %s: %i\n", pCurField->m_debugName, pCurField->GetOffset_NoLogging()));
+ }
+
+ if (bmtProp->fDynamicStatics)
+ {
+ _ASSERTE(dwNonGCOffset == 0 || // no statics at all
+ dwNonGCOffset == DomainLocalModule::DynamicEntry::GetOffsetOfDataBlob()); // We need space to point to the GC statics
+ bmtProp->dwNonGCRegularStaticFieldBytes = dwCumulativeStaticFieldPos;
+ }
+ else
+ {
+ bmtProp->dwNonGCRegularStaticFieldBytes = 0; // Non dynamics shouldnt be using this
+ }
+ LOG((LF_CLASSLOADER, LL_INFO10000, "STATICS: Static field bytes needed (0 is normal for non dynamic case)%i\n", bmtProp->dwNonGCRegularStaticFieldBytes));
+}
+
+
+VOID MethodTableBuilder::PlaceThreadStaticFields()
+{
+ STANDARD_VM_CONTRACT;
+
+ DWORD i;
+
+ LOG((LF_CLASSLOADER, LL_INFO10000, "STATICS: Placing ThreadStatics for %s\n", this->GetDebugClassName()));
+
+ //
+ // Place gc refs and value types first, as they need to have handles created for them.
+ // (Placing them together allows us to easily create the handles when Restoring the class,
+ // and when initializing new DLS for the class.)
+ //
+
+ DWORD dwCumulativeStaticFieldPos = 0 ;
+ DWORD dwCumulativeStaticGCFieldPos = 0;
+ DWORD dwCumulativeStaticBoxFieldPos = 0;
+
+ // We don't need to do any calculations for the gc refs or valuetypes, as they're
+ // guaranteed to be aligned in ModuleStaticsInfo
+ bmtFP->NumThreadStaticFieldsOfSize[LOG2_PTRSIZE] -=
+ bmtFP->NumThreadStaticGCBoxedFields + bmtFP->NumThreadStaticGCPointerFields;
+
+ // Place fields, largest first, padding so that each group is aligned to its natural size
+ for (i = MAX_LOG2_PRIMITIVE_FIELD_SIZE; (signed int) i >= 0; i--)
+ {
+ // Fields of this size start at the next available location
+ bmtFP->ThreadStaticFieldStart[i] = dwCumulativeStaticFieldPos;
+ dwCumulativeStaticFieldPos += (bmtFP->NumThreadStaticFieldsOfSize[i] << i);
+
+ // Reset counters for the loop after this one
+ bmtFP->NumThreadStaticFieldsOfSize[i] = 0;
+ }
+
+
+ if (dwCumulativeStaticFieldPos > FIELD_OFFSET_LAST_REAL_OFFSET)
+ BuildMethodTableThrowException(IDS_CLASSLOAD_GENERAL);
+
+ DWORD dwNumHandleStatics = bmtFP->NumThreadStaticGCBoxedFields + bmtFP->NumThreadStaticGCPointerFields;
+ if (!FitsIn<WORD>(dwNumHandleStatics))
+ { // Overflow.
+ BuildMethodTableThrowException(IDS_EE_TOOMANYFIELDS);
+ }
+
+ SetNumHandleThreadStatics(static_cast<WORD>(dwNumHandleStatics));
+
+ if (!FitsIn<WORD>(bmtFP->NumThreadStaticGCBoxedFields))
+ { // Overflow.
+ BuildMethodTableThrowException(IDS_EE_TOOMANYFIELDS);
+ }
+
+ SetNumBoxedThreadStatics(static_cast<WORD>(bmtFP->NumThreadStaticGCBoxedFields));
+
+ // Tell the module to give us the offsets we'll be using and commit space for us
+ // if necessary
+ DWORD dwNonGCOffset, dwGCOffset;
+
+ GetModule()->GetOffsetsForThreadStaticData(bmtInternal->pType->GetTypeDefToken(),
+ bmtProp->fDynamicStatics,
+ GetNumHandleThreadStatics(), dwCumulativeStaticFieldPos,
+ &dwGCOffset, &dwNonGCOffset);
+
+ // Allocate boxed statics first ("x << LOG2_PTRSIZE" is equivalent to "x * sizeof(void *)")
+ dwCumulativeStaticGCFieldPos = bmtFP->NumThreadStaticGCBoxedFields<<LOG2_PTRSIZE;
+
+ FieldDesc *pFieldDescList = GetHalfBakedClass()->GetFieldDescList();
+ // Place static fields
+ for (i = 0; i < bmtEnumFields->dwNumThreadStaticFields; i++)
+ {
+ FieldDesc * pCurField = &pFieldDescList[bmtEnumFields->dwNumInstanceFields + bmtEnumFields->dwNumStaticFields - bmtEnumFields->dwNumThreadStaticFields + i];
+ DWORD dwLog2FieldSize = (DWORD)(DWORD_PTR&)pCurField->m_pMTOfEnclosingClass; // log2(field size)
+ DWORD dwOffset = (DWORD) pCurField->m_dwOffset; // offset or type of field
+
+ switch (dwOffset)
+ {
+ case FIELD_OFFSET_UNPLACED_GC_PTR:
+ // Place GC reference static field
+ pCurField->SetOffset(dwCumulativeStaticGCFieldPos + dwGCOffset);
+ dwCumulativeStaticGCFieldPos += 1<<LOG2_PTRSIZE;
+ LOG((LF_CLASSLOADER, LL_INFO10000, "THREAD STATICS: Field placed at GC offset 0x%x\n", pCurField->GetOffset_NoLogging()));
+
+ break;
+
+ case FIELD_OFFSET_VALUE_CLASS:
+ // Place boxed GC reference static field
+ pCurField->SetOffset(dwCumulativeStaticBoxFieldPos + dwGCOffset);
+ dwCumulativeStaticBoxFieldPos += 1<<LOG2_PTRSIZE;
+ LOG((LF_CLASSLOADER, LL_INFO10000, "THREAD STATICS: Field placed at GC offset 0x%x\n", pCurField->GetOffset_NoLogging()));
+
+ break;
+
+ case FIELD_OFFSET_UNPLACED:
+ // Place non-GC static field
+ pCurField->SetOffset(bmtFP->ThreadStaticFieldStart[dwLog2FieldSize] +
+ (bmtFP->NumThreadStaticFieldsOfSize[dwLog2FieldSize] << dwLog2FieldSize) +
+ dwNonGCOffset);
+ bmtFP->NumThreadStaticFieldsOfSize[dwLog2FieldSize]++;
+ LOG((LF_CLASSLOADER, LL_INFO10000, "THREAD STATICS: Field placed at non GC offset 0x%x\n", pCurField->GetOffset_NoLogging()));
+ break;
+
+ default:
+ // RVA field
+ break;
+ }
+
+ LOG((LF_CLASSLOADER, LL_INFO1000000, "Offset of %s: %i\n", pCurField->m_debugName, pCurField->GetOffset_NoLogging()));
+ }
+
+ if (bmtProp->fDynamicStatics)
+ {
+ _ASSERTE(dwNonGCOffset == 0 || // no thread statics at all
+ dwNonGCOffset == ThreadLocalModule::DynamicEntry::GetOffsetOfDataBlob()); // We need space to point to the GC statics
+ bmtProp->dwNonGCThreadStaticFieldBytes = dwCumulativeStaticFieldPos;
+ }
+ else
+ {
+ bmtProp->dwNonGCThreadStaticFieldBytes = 0; // Non dynamics shouldnt be using this
+ }
+ LOG((LF_CLASSLOADER, LL_INFO10000, "STATICS: ThreadStatic field bytes needed (0 is normal for non dynamic case)%i\n", bmtProp->dwNonGCThreadStaticFieldBytes));
+}
+
+//*******************************************************************************
+//
+// Used by BuildMethodTable
+//
+// Place instance fields
+//
+VOID MethodTableBuilder::PlaceInstanceFields(MethodTable ** pByValueClassCache)
+{
+ STANDARD_VM_CONTRACT;
+
+
+ DWORD i;
+
+ //===============================================================
+ // BEGIN: Place instance fields
+ //===============================================================
+
+ FieldDesc *pFieldDescList = GetHalfBakedClass()->GetFieldDescList();
+ DWORD dwCumulativeInstanceFieldPos;
+
+ // Instance fields start right after the parent
+ dwCumulativeInstanceFieldPos = HasParent() ? GetParentMethodTable()->GetNumInstanceFieldBytes() : 0;
+
+ DWORD dwOffsetBias = 0;
+#ifdef FEATURE_64BIT_ALIGNMENT
+ // On platforms where the alignment of 64-bit primitives is a requirement (but we're not guaranteed
+ // this implicitly by the GC) field offset 0 is actually not 8-byte aligned in reference classes.
+ // That's because all such platforms are currently 32-bit and the 4-byte MethodTable pointer pushes us
+ // out of alignment. Ideally we'd solve this by arranging to have the object header allocated at a
+ // 4-byte offset from an 8-byte boundary, but this is difficult to achieve for objects allocated on
+ // the large object heap (which actually requires headers to be 8-byte aligned).
+ //
+ // So we adjust dwCumulativeInstanceFieldPos to account for the MethodTable* and our alignment
+ // calculations will automatically adjust and add padding as necessary. We need to remove this
+ // adjustment when setting the field offset in the field desc, however, since the rest of the system
+ // expects that value to not include the MethodTable*.
+ //
+ // This happens only for reference classes: value type field 0 really does lie at offset 0 for unboxed
+ // value types. We deal with boxed value types by allocating their headers mis-aligned (luckily for us
+ // value types can never get large enough to allocate on the LOH).
+ if (!IsValueClass())
+ {
+ dwOffsetBias = sizeof(MethodTable*);
+ dwCumulativeInstanceFieldPos += dwOffsetBias;
+ }
+#endif // FEATURE_64BIT_ALIGNMENT
+
+#ifdef FEATURE_READYTORUN
+ if (NeedsAlignedBaseOffset())
+ {
+ // READYTORUN: FUTURE: Use the minimum possible alignment, reduce padding when inheriting within same bubble
+ DWORD dwAlignment = DATA_ALIGNMENT;
+#ifdef FEATURE_64BIT_ALIGNMENT
+ if (GetHalfBakedClass()->IsAlign8Candidate())
+ dwAlignment = 8;
+#endif
+ dwCumulativeInstanceFieldPos = (DWORD)ALIGN_UP(dwCumulativeInstanceFieldPos, dwAlignment);
+ }
+#endif // FEATURE_READYTORUN
+
+ // place small fields first if the parent have a number of field bytes that is not aligned
+ if (!IS_ALIGNED(dwCumulativeInstanceFieldPos, DATA_ALIGNMENT))
+ {
+ for (i = 0; i < MAX_LOG2_PRIMITIVE_FIELD_SIZE; i++) {
+ DWORD j;
+
+ if (IS_ALIGNED(dwCumulativeInstanceFieldPos, 1<<(i+1)))
+ continue;
+
+ // check whether there are any bigger fields
+ for (j = i + 1; j <= MAX_LOG2_PRIMITIVE_FIELD_SIZE; j++) {
+ if (bmtFP->NumInstanceFieldsOfSize[j] != 0)
+ break;
+ }
+ // nothing to gain if there are no bigger fields
+ // (the subsequent loop will place fields from large to small fields)
+ if (j > MAX_LOG2_PRIMITIVE_FIELD_SIZE)
+ break;
+
+ // check whether there are any small enough fields
+ for (j = i; (signed int) j >= 0; j--) {
+ if (bmtFP->NumInstanceFieldsOfSize[j] != 0)
+ break;
+ // TODO: since we will refuse to place GC references we should filter them out here.
+ // otherwise the "back-filling" process stops completely. If you change it here,
+ // please change it in the corresponding place in src\tools\mdilbind\compactLayoutReader.cpp
+ // (PlaceInstanceFields)
+ // the following code would fix the issue (a replacement for the code above this comment):
+ // if (bmtFP->NumInstanceFieldsOfSize[j] != 0 &&
+ // (j != LOG2SLOT || bmtFP->NumInstanceFieldsOfSize[j] > bmtFP->NumInstanceGCPointerFields))
+ // {
+ // break;
+ // }
+
+ }
+ // nothing to play with if there are no smaller fields
+ if ((signed int) j < 0)
+ break;
+ // eventually go back and use the smaller field as filling
+ i = j;
+
+ CONSISTENCY_CHECK(bmtFP->NumInstanceFieldsOfSize[i] != 0);
+
+ j = bmtFP->FirstInstanceFieldOfSize[i];
+
+ // Avoid reordering of gcfields
+ if (i == LOG2SLOT) {
+ for ( ; j < bmtEnumFields->dwNumInstanceFields; j++) {
+ if ((pFieldDescList[j].GetOffset_NoLogging() == FIELD_OFFSET_UNPLACED) &&
+ ((DWORD_PTR&)pFieldDescList[j].m_pMTOfEnclosingClass == (size_t)i))
+ break;
+ }
+
+ // out of luck - can't reorder gc fields
+ if (j >= bmtEnumFields->dwNumInstanceFields)
+ break;
+ }
+
+ // Place the field
+ dwCumulativeInstanceFieldPos = (DWORD)ALIGN_UP(dwCumulativeInstanceFieldPos, 1 << i);
+
+ pFieldDescList[j].SetOffset(dwCumulativeInstanceFieldPos - dwOffsetBias);
+ dwCumulativeInstanceFieldPos += (1 << i);
+
+ // We've placed this field now, so there is now one less of this size field to place
+ if (--bmtFP->NumInstanceFieldsOfSize[i] == 0)
+ continue;
+
+ // We are done in this round if we haven't picked the first field
+ if (bmtFP->FirstInstanceFieldOfSize[i] != j)
+ continue;
+
+ // Update FirstInstanceFieldOfSize[i] to point to the next such field
+ for (j = j+1; j < bmtEnumFields->dwNumInstanceFields; j++)
+ {
+ // The log of the field size is stored in the method table
+ if ((DWORD_PTR&)pFieldDescList[j].m_pMTOfEnclosingClass == (size_t)i)
+ {
+ bmtFP->FirstInstanceFieldOfSize[i] = j;
+ break;
+ }
+ }
+ _ASSERTE(j < bmtEnumFields->dwNumInstanceFields);
+ }
+ }
+
+ // Place fields, largest first
+ for (i = MAX_LOG2_PRIMITIVE_FIELD_SIZE; (signed int) i >= 0; i--)
+ {
+ if (bmtFP->NumInstanceFieldsOfSize[i] == 0)
+ continue;
+
+ // Align instance fields if we aren't already
+#ifdef FEATURE_64BIT_ALIGNMENT
+ DWORD dwDataAlignment = 1 << i;
+#else
+ DWORD dwDataAlignment = min(1 << i, DATA_ALIGNMENT);
+#endif
+ dwCumulativeInstanceFieldPos = (DWORD)ALIGN_UP(dwCumulativeInstanceFieldPos, dwDataAlignment);
+
+ // Fields of this size start at the next available location
+ bmtFP->InstanceFieldStart[i] = dwCumulativeInstanceFieldPos;
+ dwCumulativeInstanceFieldPos += (bmtFP->NumInstanceFieldsOfSize[i] << i);
+
+ // Reset counters for the loop after this one
+ bmtFP->NumInstanceFieldsOfSize[i] = 0;
+ }
+
+
+ // Make corrections to reserve space for GC Pointer Fields
+ //
+ // The GC Pointers simply take up the top part of the region associated
+ // with fields of that size (GC pointers can be 64 bit on certain systems)
+ if (bmtFP->NumInstanceGCPointerFields)
+ {
+ bmtFP->GCPointerFieldStart = bmtFP->InstanceFieldStart[LOG2SLOT] - dwOffsetBias;
+ bmtFP->InstanceFieldStart[LOG2SLOT] = bmtFP->InstanceFieldStart[LOG2SLOT] + (bmtFP->NumInstanceGCPointerFields << LOG2SLOT);
+ bmtFP->NumInstanceGCPointerFields = 0; // reset to zero here, counts up as pointer slots are assigned below
+ }
+
+ // Place instance fields - be careful not to place any already-placed fields
+ for (i = 0; i < bmtEnumFields->dwNumInstanceFields; i++)
+ {
+ DWORD dwFieldSize = (DWORD)(DWORD_PTR&)pFieldDescList[i].m_pMTOfEnclosingClass;
+ DWORD dwOffset;
+
+ dwOffset = pFieldDescList[i].GetOffset_NoLogging();
+
+ // Don't place already-placed fields
+ if ((dwOffset == FIELD_OFFSET_UNPLACED || dwOffset == FIELD_OFFSET_UNPLACED_GC_PTR || dwOffset == FIELD_OFFSET_VALUE_CLASS))
+ {
+ if (dwOffset == FIELD_OFFSET_UNPLACED_GC_PTR)
+ {
+ pFieldDescList[i].SetOffset(bmtFP->GCPointerFieldStart + (bmtFP->NumInstanceGCPointerFields << LOG2SLOT));
+ bmtFP->NumInstanceGCPointerFields++;
+ }
+ else if (pFieldDescList[i].IsByValue() == FALSE) // it's a regular field
+ {
+ pFieldDescList[i].SetOffset(bmtFP->InstanceFieldStart[dwFieldSize] + (bmtFP->NumInstanceFieldsOfSize[dwFieldSize] << dwFieldSize) - dwOffsetBias);
+ bmtFP->NumInstanceFieldsOfSize[dwFieldSize]++;
+ }
+ }
+ }
+
+ DWORD dwNumGCPointerSeries;
+ // Save Number of pointer series
+ if (bmtFP->NumInstanceGCPointerFields)
+ dwNumGCPointerSeries = bmtParent->NumParentPointerSeries + 1;
+ else
+ dwNumGCPointerSeries = bmtParent->NumParentPointerSeries;
+
+ // Place by value class fields last
+ // Update the number of GC pointer series
+ for (i = 0; i < bmtEnumFields->dwNumInstanceFields; i++)
+ {
+ if (pFieldDescList[i].IsByValue())
+ {
+ MethodTable * pByValueMT = pByValueClassCache[i];
+
+ // value classes could have GC pointers in them, which need to be pointer-size aligned
+ // so do this if it has not been done already
+
+#if !defined(_WIN64) && (DATA_ALIGNMENT > 4)
+ dwCumulativeInstanceFieldPos = (DWORD)ALIGN_UP(dwCumulativeInstanceFieldPos,
+ (pByValueMT->GetNumInstanceFieldBytes() >= DATA_ALIGNMENT) ? DATA_ALIGNMENT : sizeof(void*));
+#else // !(!defined(_WIN64) && (DATA_ALIGNMENT > 4))
+#ifdef FEATURE_64BIT_ALIGNMENT
+ if (pByValueMT->RequiresAlign8())
+ dwCumulativeInstanceFieldPos = (DWORD)ALIGN_UP(dwCumulativeInstanceFieldPos, 8);
+ else
+#endif // FEATURE_64BIT_ALIGNMENT
+ dwCumulativeInstanceFieldPos = (DWORD)ALIGN_UP(dwCumulativeInstanceFieldPos, sizeof(void*));
+#endif // !(!defined(_WIN64) && (DATA_ALIGNMENT > 4))
+
+ pFieldDescList[i].SetOffset(dwCumulativeInstanceFieldPos - dwOffsetBias);
+ dwCumulativeInstanceFieldPos += pByValueMT->GetAlignedNumInstanceFieldBytes();
+
+ // Add pointer series for by-value classes
+ dwNumGCPointerSeries += pByValueMT->ContainsPointers() ?
+ (DWORD)CGCDesc::GetCGCDescFromMT(pByValueMT)->GetNumSeries() : 0;
+ }
+ }
+
+ // Can be unaligned
+ DWORD dwNumInstanceFieldBytes = dwCumulativeInstanceFieldPos - dwOffsetBias;
+
+ if (IsValueClass())
+ {
+ // Like C++ we enforce that there can be no 0 length structures.
+ // Thus for a value class with no fields, we 'pad' the length to be 1
+ if (dwNumInstanceFieldBytes == 0)
+ dwNumInstanceFieldBytes = 1;
+
+ // The JITs like to copy full machine words,
+ // so if the size is bigger than a void* round it up to minAlign
+ // and if the size is smaller than void* round it up to next power of two
+ unsigned minAlign;
+
+#ifdef FEATURE_64BIT_ALIGNMENT
+ if (GetHalfBakedClass()->IsAlign8Candidate()) {
+ minAlign = 8;
+ }
+ else
+#endif // FEATURE_64BIT_ALIGNMENT
+ if (dwNumInstanceFieldBytes > sizeof(void*)) {
+ minAlign = sizeof(void*);
+ }
+ else {
+ minAlign = 1;
+ while (minAlign < dwNumInstanceFieldBytes)
+ minAlign *= 2;
+ }
+
+ dwNumInstanceFieldBytes = (dwNumInstanceFieldBytes + minAlign-1) & ~(minAlign-1);
+ }
+
+ if (dwNumInstanceFieldBytes > FIELD_OFFSET_LAST_REAL_OFFSET) {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_FIELDTOOLARGE);
+ }
+
+ bmtFP->NumInstanceFieldBytes = dwNumInstanceFieldBytes;
+
+ bmtFP->NumGCPointerSeries = dwNumGCPointerSeries;
+
+ //===============================================================
+ // END: Place instance fields
+ //===============================================================
+}
+
+//*******************************************************************************
+// this accesses the field size which is temporarily stored in m_pMTOfEnclosingClass
+// during class loading. Don't use any other time
+DWORD MethodTableBuilder::GetFieldSize(FieldDesc *pFD)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ // We should only be calling this while this class is being built.
+ _ASSERTE(GetHalfBakedMethodTable() == 0);
+ BAD_FORMAT_NOTHROW_ASSERT(! pFD->IsByValue() || HasExplicitFieldOffsetLayout());
+
+ if (pFD->IsByValue())
+ return (DWORD)(DWORD_PTR&)(pFD->m_pMTOfEnclosingClass);
+ return (1 << (DWORD)(DWORD_PTR&)(pFD->m_pMTOfEnclosingClass));
+}
+
+#ifdef FEATURE_HFA
+//---------------------------------------------------------------------------------------
+//
+VOID
+MethodTableBuilder::CheckForHFA(MethodTable ** pByValueClassCache)
+{
+ STANDARD_VM_CONTRACT;
+
+ // This method should be called for valuetypes only
+ _ASSERTE(IsValueClass());
+
+ // No HFAs with explicit layout. There may be cases where explicit layout may be still
+ // eligible for HFA, but it is hard to tell the real intent. Make it simple and just
+ // unconditionally disable HFAs for explicit layout.
+ if (HasExplicitFieldOffsetLayout())
+ return;
+
+ CorElementType hfaType = ELEMENT_TYPE_END;
+
+ FieldDesc *pFieldDescList = GetHalfBakedClass()->GetFieldDescList();
+ for (UINT i = 0; i < bmtEnumFields->dwNumInstanceFields; i++)
+ {
+ FieldDesc *pFD = &pFieldDescList[i];
+ CorElementType fieldType = pFD->GetFieldType();
+
+ switch (fieldType)
+ {
+ case ELEMENT_TYPE_VALUETYPE:
+ fieldType = pByValueClassCache[i]->GetHFAType();
+ break;
+
+ case ELEMENT_TYPE_R4:
+ case ELEMENT_TYPE_R8:
+ break;
+
+ default:
+ // Not HFA
+ return;
+ }
+
+ // Field type should be a valid HFA type.
+ if (fieldType == ELEMENT_TYPE_END)
+ {
+ return;
+ }
+
+ // Initialize with a valid HFA type.
+ if (hfaType == ELEMENT_TYPE_END)
+ {
+ hfaType = fieldType;
+ }
+ // All field types should be equal.
+ else if (fieldType != hfaType)
+ {
+ return;
+ }
+ }
+
+ if (hfaType == ELEMENT_TYPE_END)
+ return;
+
+ int elemSize = (hfaType == ELEMENT_TYPE_R8) ? sizeof(double) : sizeof(float);
+
+ // Note that we check the total size, but do not perform any checks on number of fields:
+ // - Type of fields can be HFA valuetype itself
+ // - Managed C++ HFA valuetypes have just one <alignment member> of type float to signal that
+ // the valuetype is HFA and explicitly specified size
+
+ DWORD totalSize = bmtFP->NumInstanceFieldBytes;
+
+ if (totalSize % elemSize != 0)
+ return;
+
+ // On ARM, HFAs can have a maximum of four fields regardless of whether those are float or double.
+ if (totalSize / elemSize > 4)
+ return;
+
+ // All the above tests passed. It's HFA!
+ GetHalfBakedMethodTable()->SetIsHFA();
+}
+
+//
+// The managed and unmanaged views of the types can differ for non-blitable types. This method
+// mirrors the HFA type computation for the unmanaged view.
+//
+void MethodTableBuilder::CheckForNativeHFA()
+{
+ STANDARD_VM_CONTRACT;
+
+ // No HFAs with inheritance
+ if (!(IsValueClass() || (GetParentMethodTable() == g_pObjectClass)))
+ return;
+
+ // No HFAs with explicit layout. There may be cases where explicit layout may be still
+ // eligible for HFA, but it is hard to tell the real intent. Make it simple and just
+ // unconditionally disable HFAs for explicit layout.
+ if (HasExplicitFieldOffsetLayout())
+ return;
+
+ const FieldMarshaler *pFieldMarshaler = GetLayoutInfo()->GetFieldMarshalers();
+ UINT numReferenceFields = GetLayoutInfo()->GetNumCTMFields();
+
+ CorElementType hfaType = ELEMENT_TYPE_END;
+
+ while (numReferenceFields--)
+ {
+ CorElementType fieldType = ELEMENT_TYPE_END;
+
+ switch (pFieldMarshaler->GetNStructFieldType())
+ {
+ case NFT_COPY4:
+ case NFT_COPY8:
+ fieldType = pFieldMarshaler->GetFieldDesc()->GetFieldType();
+ if (fieldType != ELEMENT_TYPE_R4 && fieldType != ELEMENT_TYPE_R8)
+ return;
+ break;
+
+ case NFT_NESTEDLAYOUTCLASS:
+ fieldType = ((FieldMarshaler_NestedLayoutClass *)pFieldMarshaler)->GetMethodTable()->GetNativeHFAType();
+ break;
+
+ case NFT_NESTEDVALUECLASS:
+ fieldType = ((FieldMarshaler_NestedValueClass *)pFieldMarshaler)->GetMethodTable()->GetNativeHFAType();
+ break;
+
+ case NFT_FIXEDARRAY:
+ fieldType = ((FieldMarshaler_FixedArray *)pFieldMarshaler)->GetElementTypeHandle().GetMethodTable()->GetNativeHFAType();
+ break;
+
+ case NFT_DATE:
+ fieldType = ELEMENT_TYPE_R8;
+ break;
+
+ default:
+ // Not HFA
+ return;
+ }
+
+ // Field type should be a valid HFA type.
+ if (fieldType == ELEMENT_TYPE_END)
+ {
+ return;
+ }
+
+ // Initialize with a valid HFA type.
+ if (hfaType == ELEMENT_TYPE_END)
+ {
+ hfaType = fieldType;
+ }
+ // All field types should be equal.
+ else if (fieldType != hfaType)
+ {
+ return;
+ }
+
+ ((BYTE*&)pFieldMarshaler) += MAXFIELDMARSHALERSIZE;
+ }
+
+ if (hfaType == ELEMENT_TYPE_END)
+ return;
+
+ int elemSize = (hfaType == ELEMENT_TYPE_R8) ? sizeof(double) : sizeof(float);
+
+ // Note that we check the total size, but do not perform any checks on number of fields:
+ // - Type of fields can be HFA valuetype itself
+ // - Managed C++ HFA valuetypes have just one <alignment member> of type float to signal that
+ // the valuetype is HFA and explicitly specified size
+
+ DWORD totalSize = GetHalfBakedClass()->GetNativeSize();
+
+ if (totalSize % elemSize != 0)
+ return;
+
+ // On ARM, HFAs can have a maximum of four fields regardless of whether those are float or double.
+ if (totalSize / elemSize > 4)
+ return;
+
+ // All the above tests passed. It's HFA!
+ GetLayoutInfo()->SetNativeHFAType(hfaType);
+}
+#endif // FEATURE_HFA
+
+//---------------------------------------------------------------------------------------
+//
+// make sure that no object fields are overlapped incorrectly and define the
+// GC pointer series for the class. We are assuming that this class will always be laid out within
+// its enclosing class by the compiler in such a way that offset 0 will be the correct alignment
+// for object ref fields so we don't need to try to align it
+//
+VOID
+MethodTableBuilder::HandleExplicitLayout(
+ MethodTable ** pByValueClassCache)
+{
+ STANDARD_VM_CONTRACT;
+
+
+ // Instance slice size is the total size of an instance, and is calculated as
+ // the field whose offset and size add to the greatest number.
+ UINT instanceSliceSize = 0;
+ DWORD firstObjectOverlapOffset = ((DWORD)(-1));
+
+
+ UINT i;
+ for (i = 0; i < bmtMetaData->cFields; i++)
+ {
+ FieldDesc *pFD = bmtMFDescs->ppFieldDescList[i];
+ if (pFD == NULL || pFD->IsStatic())
+ {
+ continue;
+ }
+
+ UINT fieldExtent = 0;
+ if (!ClrSafeInt<UINT>::addition(pFD->GetOffset_NoLogging(), GetFieldSize(pFD), fieldExtent))
+ {
+ BuildMethodTableThrowException(COR_E_OVERFLOW);
+ }
+
+ if (fieldExtent > instanceSliceSize)
+ {
+ instanceSliceSize = fieldExtent;
+ }
+ }
+
+ CQuickBytes qb;
+ PREFIX_ASSUME(sizeof(BYTE) == 1);
+ BYTE *pFieldLayout = (BYTE*) qb.AllocThrows(instanceSliceSize * sizeof(BYTE));
+ for (i=0; i < instanceSliceSize; i++)
+ {
+ pFieldLayout[i] = empty;
+ }
+
+ // go through each field and look for invalid layout
+ // (note that we are more permissive than what Ecma allows. We only disallow the minimum set necessary to
+ // close security holes.)
+ //
+ // This is what we implment:
+ //
+ // 1. Verify that every OREF is on a valid alignment
+ // 2. Verify that OREFs only overlap with other OREFs.
+ // 3. If an OREF does overlap with another OREF, the class is marked unverifiable.
+ // 4. If an overlap of any kind occurs, the class will be marked NotTightlyPacked (affects ValueType.Equals()).
+ //
+ char emptyObject[sizeof(void*)];
+ char isObject[sizeof(void*)];
+ for (i = 0; i < sizeof(void*); i++)
+ {
+ emptyObject[i] = empty;
+ isObject[i] = oref;
+ }
+
+
+ ExplicitClassTrust explicitClassTrust;
+
+ UINT valueClassCacheIndex = ((UINT)(-1));
+ UINT badOffset = 0;
+ FieldDesc * pFD = NULL;
+ for (i = 0; i < bmtMetaData->cFields; i++)
+ {
+ // Note about this loop body:
+ //
+ // This loop is coded to make it as hard as possible to allow a field to be trusted when it shouldn't.
+ //
+ // Every path in this loop body must lead to an explicit decision as to whether the field nonoverlaps,
+ // overlaps in a verifiable fashion, overlaps in a nonverifiable fashion or overlaps in a completely illegal fashion.
+ //
+ // It must call fieldTrust.SetTrust() with the appropriate result. If you don't call it, fieldTrust's destructor
+ // will intentionally default to kNone and mark the entire class illegal.
+ //
+ // If your result is anything but kNone (class is illegal), you must also explicitly "continue" the loop.
+ // There is a "break" at end of this loop body that will abort the loop if you don't do this. And
+ // if you don't finish iterating through all the fields, this function will automatically mark the entire
+ // class illegal. This rule is a vestige of an earlier version of this function.
+
+ // This object's dtor will aggregate the trust decision for this field into the trust level for the class as a whole.
+ ExplicitFieldTrustHolder fieldTrust(&explicitClassTrust);
+
+ pFD = bmtMFDescs->ppFieldDescList[i];
+ if (pFD == NULL || pFD->IsStatic())
+ {
+ fieldTrust.SetTrust(ExplicitFieldTrust::kNonOverLayed);
+ continue;
+ }
+
+ // "i" indexes all fields, valueClassCacheIndex indexes non-static fields only. Don't get them confused!
+ valueClassCacheIndex++;
+
+ if (CorTypeInfo::IsObjRef(pFD->GetFieldType()))
+ {
+ // Check that the ref offset is pointer aligned
+ if ((pFD->GetOffset_NoLogging() & ((ULONG)sizeof(OBJECTREF) - 1)) != 0)
+ {
+ badOffset = pFD->GetOffset_NoLogging();
+ fieldTrust.SetTrust(ExplicitFieldTrust::kNone);
+
+ // If we got here, OREF field was not pointer aligned. THROW.
+ break;
+ }
+ // check if overlaps another object
+ if (memcmp((void *)&pFieldLayout[pFD->GetOffset_NoLogging()], (void *)isObject, sizeof(isObject)) == 0)
+ {
+ // If we got here, an OREF overlapped another OREF. We permit this but mark the class unverifiable.
+ fieldTrust.SetTrust(ExplicitFieldTrust::kLegal);
+
+ if (firstObjectOverlapOffset == ((DWORD)(-1)))
+ {
+ firstObjectOverlapOffset = pFD->GetOffset_NoLogging();
+ }
+
+ continue;
+ }
+ // check if is empty at this point
+ if (memcmp((void *)&pFieldLayout[pFD->GetOffset_NoLogging()], (void *)emptyObject, sizeof(emptyObject)) == 0)
+ {
+ // If we got here, this OREF is overlapping no other fields (yet). Record that these bytes now contain an OREF.
+ memset((void *)&pFieldLayout[pFD->GetOffset_NoLogging()], oref, sizeof(isObject));
+ fieldTrust.SetTrust(ExplicitFieldTrust::kNonOverLayed);
+ continue;
+ }
+
+ // If we got here, the OREF overlaps a non-OREF. THROW.
+ badOffset = pFD->GetOffset_NoLogging();
+ fieldTrust.SetTrust(ExplicitFieldTrust::kNone);
+ break;
+ }
+ else
+ {
+ UINT fieldSize;
+ if (pFD->IsByValue())
+ {
+ MethodTable *pByValueMT = pByValueClassCache[valueClassCacheIndex];
+ if (pByValueMT->ContainsPointers())
+ {
+ if ((pFD->GetOffset_NoLogging() & ((ULONG)sizeof(void*) - 1)) == 0)
+ {
+ ExplicitFieldTrust::TrustLevel trust;
+ DWORD firstObjectOverlapOffsetInsideValueClass = ((DWORD)(-1));
+ trust = CheckValueClassLayout(pByValueMT, &pFieldLayout[pFD->GetOffset_NoLogging()], &firstObjectOverlapOffsetInsideValueClass);
+ fieldTrust.SetTrust(trust);
+ if (firstObjectOverlapOffsetInsideValueClass != ((DWORD)(-1)))
+ {
+ if (firstObjectOverlapOffset == ((DWORD)(-1)))
+ {
+ firstObjectOverlapOffset = pFD->GetOffset_NoLogging() + firstObjectOverlapOffsetInsideValueClass;
+ }
+ }
+
+ if (trust != ExplicitFieldTrust::kNone)
+ {
+ continue;
+ }
+ else
+ {
+ // If we got here, then an OREF inside the valuetype illegally overlapped a non-OREF field. THROW.
+ badOffset = pFD->GetOffset_NoLogging();
+ break;
+ }
+ }
+ // If we got here, then a valuetype containing an OREF was misaligned.
+ badOffset = pFD->GetOffset_NoLogging();
+ fieldTrust.SetTrust(ExplicitFieldTrust::kNone);
+ break;
+ }
+ // no pointers so fall through to do standard checking
+ fieldSize = pByValueMT->GetNumInstanceFieldBytes();
+ }
+ else
+ {
+ // field size temporarily stored in pInterface field
+ fieldSize = GetFieldSize(pFD);
+ }
+
+ // If we got here, we are trying to place a non-OREF (or a valuetype composed of non-OREFs.)
+ // Look for any orefs under this field
+ BYTE *loc;
+ if ((loc = (BYTE*)memchr((void*)&pFieldLayout[pFD->GetOffset_NoLogging()], oref, fieldSize)) == NULL)
+ {
+ // If we have a nonoref in the range then we are doing an overlay
+ if(memchr((void*)&pFieldLayout[pFD->GetOffset_NoLogging()], nonoref, fieldSize))
+ {
+ fieldTrust.SetTrust(ExplicitFieldTrust::kVerifiable);
+ }
+ else
+ {
+ fieldTrust.SetTrust(ExplicitFieldTrust::kNonOverLayed);
+ }
+ memset((void*)&pFieldLayout[pFD->GetOffset_NoLogging()], nonoref, fieldSize);
+ continue;
+ }
+
+ // If we got here, we tried to place a non-OREF (or a valuetype composed of non-OREFs)
+ // on top of an OREF. THROW.
+ badOffset = (UINT)(loc - pFieldLayout);
+ fieldTrust.SetTrust(ExplicitFieldTrust::kNone);
+ break;
+ // anything else is an error
+ }
+
+ // We have to comment out this assert because otherwise, the compiler refuses to build because the _ASSERT is unreachable
+ // (Thanks for nothing, compiler, that's what the assert is trying to enforce!) But the intent of the assert is correct.
+ //_ASSERTE(!"You aren't supposed to be here. Some path inside the loop body did not execute an explicit break or continue.");
+
+
+ // If we got here, some code above failed to execute an explicit "break" or "continue." This is a bug! To be safe,
+ // we will put a catchall "break" here which will cause the typeload to abort (albeit with a probably misleading
+ // error message.)
+ break;
+ } // for(;;)
+
+ // We only break out of the loop above if we detected an error.
+ if (i < bmtMetaData->cFields || !explicitClassTrust.IsLegal())
+ {
+ ThrowFieldLayoutError(GetCl(),
+ GetModule(),
+ badOffset,
+ IDS_CLASSLOAD_EXPLICIT_LAYOUT);
+ }
+
+ if (!explicitClassTrust.IsVerifiable())
+ {
+ if (!Security::CanSkipVerification(GetAssembly()->GetDomainAssembly()))
+ {
+ ThrowFieldLayoutError(GetCl(),
+ GetModule(),
+ firstObjectOverlapOffset,
+ IDS_CLASSLOAD_UNVERIFIABLE_FIELD_LAYOUT);
+ }
+ }
+
+ if (!explicitClassTrust.IsNonOverLayed())
+ {
+ SetHasOverLayedFields();
+ }
+
+ if (IsBlittable() || IsManagedSequential())
+ {
+ // Bug 849333: We shouldn't update "bmtFP->NumInstanceFieldBytes"
+ // for Blittable/ManagedSequential types. As this will break backward compatiblity
+ // for the size of types that return true for HasExplicitFieldOffsetLayout()
+ //
+ return;
+ }
+
+ FindPointerSeriesExplicit(instanceSliceSize, pFieldLayout);
+
+ // Fixup the offset to include parent as current offsets are relative to instance slice
+ // Could do this earlier, but it's just easier to assume instance relative for most
+ // of the earlier calculations
+
+ // Instance fields start right after the parent
+ S_UINT32 dwInstanceSliceOffset = S_UINT32(HasParent() ? GetParentMethodTable()->GetNumInstanceFieldBytes() : 0);
+ if (bmtGCSeries->numSeries != 0)
+ {
+ dwInstanceSliceOffset.AlignUp(sizeof(void*));
+ }
+ if (dwInstanceSliceOffset.IsOverflow())
+ {
+ // addition overflow or cast truncation
+ BuildMethodTableThrowException(IDS_CLASSLOAD_GENERAL);
+ }
+
+ S_UINT32 numInstanceFieldBytes = dwInstanceSliceOffset + S_UINT32(instanceSliceSize);
+
+ if (IsValueClass())
+ {
+ ULONG clstotalsize;
+ if (FAILED(GetMDImport()->GetClassTotalSize(GetCl(), &clstotalsize)))
+ {
+ clstotalsize = 0;
+ }
+
+ if (clstotalsize != 0)
+ {
+ // size must be large enough to accomodate layout. If not, we use the layout size instead.
+ if (!numInstanceFieldBytes.IsOverflow() && clstotalsize >= numInstanceFieldBytes.Value())
+ {
+ numInstanceFieldBytes = S_UINT32(clstotalsize);
+ }
+ }
+ }
+
+ // The GC requires that all valuetypes containing orefs be sized to a multiple of sizeof(void*).
+ if (bmtGCSeries->numSeries != 0)
+ {
+ numInstanceFieldBytes.AlignUp(sizeof(void*));
+ }
+ if (numInstanceFieldBytes.IsOverflow())
+ {
+ // addition overflow or cast truncation
+ BuildMethodTableThrowException(IDS_CLASSLOAD_GENERAL);
+ }
+
+ // Set the total size
+ bmtFP->NumInstanceFieldBytes = numInstanceFieldBytes.Value();
+
+ for (i = 0; i < bmtMetaData->cFields; i++)
+ {
+ FieldDesc * pTempFD = bmtMFDescs->ppFieldDescList[i];
+ if ((pTempFD == NULL) || pTempFD->IsStatic())
+ {
+ continue;
+ }
+ HRESULT hr = pTempFD->SetOffset(pTempFD->GetOffset_NoLogging() + dwInstanceSliceOffset.Value());
+ if (FAILED(hr))
+ {
+ BuildMethodTableThrowException(hr, *bmtError);
+ }
+ }
+} // MethodTableBuilder::HandleExplicitLayout
+
+//*******************************************************************************
+// make sure that no object fields are overlapped incorrectly, returns S_FALSE if
+// there overlap but nothing illegal, S_OK if there is no overlap
+/*static*/ ExplicitFieldTrust::TrustLevel MethodTableBuilder::CheckValueClassLayout(MethodTable * pMT, BYTE *pFieldLayout, DWORD *pFirstObjectOverlapOffset)
+{
+ STANDARD_VM_CONTRACT;
+
+
+ *pFirstObjectOverlapOffset = (DWORD)(-1);
+
+ // Build a layout of the value class. Don't know the sizes of all the fields easily, but
+ // do know a) vc is already consistent so don't need to check it's overlaps and
+ // b) size and location of all objectrefs. So build it by setting all non-oref
+ // then fill in the orefs later
+ UINT fieldSize = pMT->GetNumInstanceFieldBytes();
+ CQuickBytes qb;
+ BYTE *vcLayout = (BYTE*) qb.AllocThrows(fieldSize * sizeof(BYTE));
+
+ memset((void*)vcLayout, nonoref, fieldSize);
+
+ // use pointer series to locate the orefs
+
+ CGCDesc* map = CGCDesc::GetCGCDescFromMT(pMT);
+ CGCDescSeries *pSeries = map->GetLowestSeries();
+
+ for (SIZE_T j = 0; j < map->GetNumSeries(); j++)
+ {
+ CONSISTENCY_CHECK(pSeries <= map->GetHighestSeries());
+
+ memset((void*)&vcLayout[pSeries->GetSeriesOffset()-sizeof(Object)], oref, pSeries->GetSeriesSize() + pMT->GetBaseSize());
+ pSeries++;
+ }
+
+
+ ExplicitClassTrust explicitClassTrust;
+
+ for (UINT i=0; i < fieldSize; i++) {
+
+ ExplicitFieldTrustHolder fieldTrust(&explicitClassTrust);
+
+ if (vcLayout[i] == oref) {
+ switch (pFieldLayout[i]) {
+ // oref <--> empty
+ case empty:
+ pFieldLayout[i] = oref;
+ fieldTrust.SetTrust(ExplicitFieldTrust::kNonOverLayed);
+ break;
+
+ // oref <--> nonoref
+ case nonoref:
+ fieldTrust.SetTrust(ExplicitFieldTrust::kNone);
+ break;
+
+ // oref <--> oref
+ case oref:
+ fieldTrust.SetTrust(ExplicitFieldTrust::kLegal);
+ if ((*pFirstObjectOverlapOffset) == ((DWORD)(-1)))
+ {
+ *pFirstObjectOverlapOffset = (DWORD)i;
+ }
+ break;
+
+ default:
+ _ASSERTE(!"Can't get here.");
+ }
+ } else if (vcLayout[i] == nonoref) {
+ switch (pFieldLayout[i]) {
+ // nonoref <--> empty
+ case empty:
+ pFieldLayout[i] = nonoref;
+ fieldTrust.SetTrust(ExplicitFieldTrust::kNonOverLayed);
+ break;
+
+ // nonoref <--> nonoref
+ case nonoref:
+ fieldTrust.SetTrust(ExplicitFieldTrust::kVerifiable);
+ break;
+
+ // nonoref <--> oref
+ case oref:
+ fieldTrust.SetTrust(ExplicitFieldTrust::kNone);
+ break;
+
+ default:
+ _ASSERTE(!"Can't get here.");
+ }
+ } else {
+ _ASSERTE(!"Can't get here.");
+ }
+ }
+
+ return explicitClassTrust.GetTrustLevel();
+}
+
+
+
+
+
+
+
+//*******************************************************************************
+void MethodTableBuilder::FindPointerSeriesExplicit(UINT instanceSliceSize,
+ BYTE *pFieldLayout)
+{
+ STANDARD_VM_CONTRACT;
+
+
+ // Allocate a structure to track the series. We know that the worst case is a
+ // ref-non-ref-non, and since only ref series are recorded and non-ref series
+ // are skipped, the max number of series is total instance size / 2 / sizeof(ref).
+ // But watch out for the case where we have e.g. an instanceSlizeSize of 4.
+ DWORD sz = (instanceSliceSize + (2 * sizeof(OBJECTREF)) - 1);
+ bmtGCSeries->pSeries = new bmtGCSeriesInfo::Series[sz/2/sizeof(OBJECTREF)];
+
+ BYTE *loc = pFieldLayout;
+ BYTE *layoutEnd = pFieldLayout + instanceSliceSize;
+ while (loc < layoutEnd)
+ {
+ // Find the next OREF entry.
+ loc = (BYTE*)memchr((void*)loc, oref, layoutEnd-loc);
+ if (loc == NULL)
+ {
+ break;
+ }
+
+ // Find the next non-OREF entry
+ BYTE *cur = loc;
+ while(cur < layoutEnd && *cur == oref)
+ {
+ cur++;
+ }
+
+ // so we have a GC series at loc for cur-loc bytes
+ bmtGCSeries->pSeries[bmtGCSeries->numSeries].offset = (DWORD)(loc - pFieldLayout);
+ bmtGCSeries->pSeries[bmtGCSeries->numSeries].len = (DWORD)(cur - loc);
+
+ CONSISTENCY_CHECK(IS_ALIGNED(cur - loc, sizeof(size_t)));
+
+ bmtGCSeries->numSeries++;
+ loc = cur;
+ }
+
+ // Calculate the total series count including the parent, if a parent exists.
+
+ bmtFP->NumGCPointerSeries = bmtParent->NumParentPointerSeries + bmtGCSeries->numSeries;
+
+}
+
+//*******************************************************************************
+VOID
+MethodTableBuilder::HandleGCForExplicitLayout()
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodTable *pMT = GetHalfBakedMethodTable();
+
+#ifdef FEATURE_COLLECTIBLE_TYPES
+ if (bmtFP->NumGCPointerSeries == 0 && pMT->Collectible())
+ {
+ // For collectible types, insert empty gc series
+ CGCDescSeries *pSeries;
+
+ CGCDesc::Init( (PVOID) pMT, 1);
+ pSeries = ((CGCDesc*)pMT)->GetLowestSeries();
+ pSeries->SetSeriesSize( (size_t) (0) - (size_t) pMT->GetBaseSize());
+ pSeries->SetSeriesOffset(sizeof(Object));
+ }
+ else
+#endif // FEATURE_COLLECTIBLE_TYPES
+ if (bmtFP->NumGCPointerSeries != 0)
+ {
+ pMT->SetContainsPointers();
+
+ // Copy the pointer series map from the parent
+ CGCDesc::Init( (PVOID) pMT, bmtFP->NumGCPointerSeries );
+ if (bmtParent->NumParentPointerSeries != 0)
+ {
+ size_t ParentGCSize = CGCDesc::ComputeSize(bmtParent->NumParentPointerSeries);
+ memcpy( (PVOID) (((BYTE*) pMT) - ParentGCSize), (PVOID) (((BYTE*) GetParentMethodTable()) - ParentGCSize), ParentGCSize - sizeof(UINT) );
+
+ }
+
+ UINT32 dwInstanceSliceOffset = AlignUp(HasParent() ? GetParentMethodTable()->GetNumInstanceFieldBytes() : 0, sizeof(void*));
+
+ // Build the pointer series map for this pointers in this instance
+ CGCDescSeries *pSeries = ((CGCDesc*)pMT)->GetLowestSeries();
+ for (UINT i=0; i < bmtGCSeries->numSeries; i++) {
+ // See gcdesc.h for an explanation of why we adjust by subtracting BaseSize
+ BAD_FORMAT_NOTHROW_ASSERT(pSeries <= CGCDesc::GetCGCDescFromMT(pMT)->GetHighestSeries());
+
+ pSeries->SetSeriesSize( (size_t) bmtGCSeries->pSeries[i].len - (size_t) pMT->GetBaseSize() );
+ pSeries->SetSeriesOffset(bmtGCSeries->pSeries[i].offset + sizeof(Object) + dwInstanceSliceOffset);
+ pSeries++;
+ }
+ }
+
+ delete [] bmtGCSeries->pSeries;
+ bmtGCSeries->pSeries = NULL;
+} // MethodTableBuilder::HandleGCForExplicitLayout
+
+static
+BOOL
+InsertMethodTable(
+ MethodTable *pNew,
+ MethodTable **pArray,
+ DWORD nArraySizeMax,
+ DWORD *pNumAssigned)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ for (DWORD j = 0; j < (*pNumAssigned); j++)
+ {
+ if (pNew == pArray[j])
+ {
+#ifdef _DEBUG
+ LOG((LF_CLASSLOADER, LL_INFO1000, "GENERICS: Found duplicate interface %s (%p) at position %d out of %d\n", pNew->GetDebugClassName(), pNew, j, *pNumAssigned));
+#endif
+ return pNew->HasInstantiation(); // bail out - we found a duplicate instantiated interface
+ }
+ else
+ {
+#ifdef _DEBUG
+ LOG((LF_CLASSLOADER, LL_INFO1000, " GENERICS: InsertMethodTable ignored interface %s (%p) at position %d out of %d\n", pArray[j]->GetDebugClassName(), pArray[j], j, *pNumAssigned));
+#endif
+ }
+ }
+ if (*pNumAssigned >= nArraySizeMax)
+ {
+ LOG((LF_CLASSLOADER, LL_INFO1000, "GENERICS: Found interface %s (%p) exceeding size %d of interface array\n", pNew->GetDebugClassName(), pNew, nArraySizeMax));
+ return TRUE;
+ }
+ LOG((LF_CLASSLOADER, LL_INFO1000, "GENERICS: Inserting interface %s (%p) at position %d\n", pNew->GetDebugClassName(), pNew, *pNumAssigned));
+ pArray[(*pNumAssigned)++] = pNew;
+ return FALSE;
+} // InsertMethodTable
+
+
+//*******************************************************************************
+// --------------------------------------------------------------------------------------------
+// Copy virtual slots inherited from parent:
+//
+// In types created at runtime, inherited virtual slots are initialized using approximate parent
+// during method table building. This method will update them based on the exact parent.
+// In types loaded from NGen image, inherited virtual slots from cross-module parents are not
+// initialized. This method will initialize them based on the actually loaded exact parent
+// if necessary.
+/* static */
+void MethodTableBuilder::CopyExactParentSlots(MethodTable *pMT, MethodTable *pApproxParentMT)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ if (pMT->IsZapped())
+ return;
+
+ DWORD nParentVirtuals = pMT->GetNumParentVirtuals();
+ if (nParentVirtuals == 0)
+ return;
+
+ _ASSERTE(nParentVirtuals == pApproxParentMT->GetNumVirtuals());
+
+ //
+ // Update all inherited virtual slots to match exact parent
+ //
+
+ if (!pMT->IsCanonicalMethodTable())
+ {
+ //
+ // Copy all slots for non-canonical methodtables to avoid touching methoddescs.
+ //
+ MethodTable * pCanonMT = pMT->GetCanonicalMethodTable();
+
+ // Do not write into vtable chunks shared with parent. It would introduce race
+ // with code:MethodDesc::SetStableEntryPointInterlocked.
+ //
+ // Non-canonical method tables either share everything or nothing so it is sufficient to check
+ // just the first indirection to detect sharing.
+ if (pMT->GetVtableIndirections()[0] != pCanonMT->GetVtableIndirections()[0])
+ {
+ for (DWORD i = 0; i < nParentVirtuals; i++)
+ {
+ PCODE target = pCanonMT->GetRestoredSlot(i);
+ pMT->SetSlot(i, target);
+ }
+ }
+ }
+ else
+ {
+ MethodTable::MethodDataWrapper hMTData(MethodTable::GetMethodData(pMT, FALSE));
+
+ MethodTable * pParentMT = pMT->GetParentMethodTable();
+
+ for (DWORD i = 0; i < nParentVirtuals; i++)
+ {
+ // fix up wrongly-inherited method descriptors
+ MethodDesc* pMD = hMTData->GetImplMethodDesc(i);
+ CONSISTENCY_CHECK(pMD == pMT->GetMethodDescForSlot(i));
+
+ if (pMD->GetMethodTable() == pMT)
+ continue;
+
+ // We need to re-inherit this slot from the exact parent.
+
+ DWORD indirectionIndex = MethodTable::GetIndexOfVtableIndirection(i);
+ if (pMT->GetVtableIndirections()[indirectionIndex] == pApproxParentMT->GetVtableIndirections()[indirectionIndex])
+ {
+ // The slot lives in a chunk shared from the approximate parent MT
+ // If so, we need to change to share the chunk from the exact parent MT
+
+#ifdef FEATURE_PREJIT
+ _ASSERTE(MethodTable::CanShareVtableChunksFrom(pParentMT, pMT->GetLoaderModule(), Module::GetPreferredZapModuleForMethodTable(pMT)));
+#else
+ _ASSERTE(MethodTable::CanShareVtableChunksFrom(pParentMT, pMT->GetLoaderModule()));
+#endif
+
+ pMT->GetVtableIndirections()[indirectionIndex] = pParentMT->GetVtableIndirections()[indirectionIndex];
+
+ i = MethodTable::GetEndSlotForVtableIndirection(indirectionIndex, nParentVirtuals) - 1;
+ continue;
+ }
+
+ // The slot lives in an unshared chunk. We need to update the slot contents
+ PCODE target = pParentMT->GetRestoredSlot(i);
+ pMT->SetSlot(i, target);
+ }
+ }
+} // MethodTableBuilder::CopyExactParentSlots
+
+//*******************************************************************************
+/* static */
+void
+MethodTableBuilder::LoadExactInterfaceMap(MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ BOOL hasInstantiatedInterfaces = FALSE;
+ MethodTable::InterfaceMapIterator it = pMT->IterateInterfaceMap();
+ while (it.Next())
+ {
+ if (it.GetInterface()->HasInstantiation())
+ {
+ hasInstantiatedInterfaces = TRUE;
+ break;
+ }
+ }
+
+ // If we have some instantiated interfaces, then we have lots more work to do...
+
+ // In the worst case we have to use the metadata to
+ // (a) load the exact interfaces and determine the order in which they
+ // go. We do those by re-running the interface layout algorithm
+ // and using metadata-comparisons to place interfaces in the list.
+ // (b) do a check to see if any ambiguity in the interface dispatch map is introduced
+ // by the instantiation
+ // See code:#LoadExactInterfaceMap_Algorithm2
+ //
+ // However, we can do something simpler: we just use
+ // the loaded interface method tables to determine ordering. This can be done
+ // if there are no duplicate instantiated interfaces in the interface
+ // set.
+ // See code:#LoadExactInterfaceMap_Algorithm1.
+
+ if (!hasInstantiatedInterfaces)
+ {
+ return;
+ }
+
+ HRESULT hr;
+ TypeHandle thisTH(pMT);
+ SigTypeContext typeContext(thisTH);
+ MethodTable *pParentMT = pMT->GetParentMethodTable();
+
+ //#LoadExactInterfaceMap_Algorithm1
+ // Exact interface instantiation loading TECHNIQUE 1.
+ // (a) For interfaces inherited from an instantiated parent class, just copy down from exact parent
+ // (b) Grab newly declared interfaces by loading and then copying down all their inherited parents
+ // (c) But check for any exact duplicates along the way
+ // (d) If no duplicates then we can use the computed interface map we've created
+ // (e) If duplicates found then use the slow metadata-based technique code:#LoadExactInterfaceMap_Algorithm2
+ DWORD nInterfacesCount = pMT->GetNumInterfaces();
+ MethodTable **pExactMTs = (MethodTable**) _alloca(sizeof(MethodTable *) * nInterfacesCount);
+ DWORD nAssigned = 0;
+ BOOL duplicates = false;
+ if (pParentMT != NULL)
+ {
+ MethodTable::InterfaceMapIterator parentIt = pParentMT->IterateInterfaceMap();
+ while (parentIt.Next())
+ {
+ duplicates |= InsertMethodTable(parentIt.GetInterface(), pExactMTs, nInterfacesCount, &nAssigned);
+ }
+ }
+ InterfaceImplEnum ie(pMT->GetModule(), pMT->GetCl(), NULL);
+ while ((hr = ie.Next()) == S_OK)
+ {
+ MethodTable *pNewIntfMT = ClassLoader::LoadTypeDefOrRefOrSpecThrowing(pMT->GetModule(),
+ ie.CurrentToken(),
+ &typeContext,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::FailIfUninstDefOrRef,
+ ClassLoader::LoadTypes,
+ CLASS_LOAD_EXACTPARENTS,
+ TRUE).GetMethodTable();
+
+ duplicates |= InsertMethodTable(pNewIntfMT, pExactMTs, nInterfacesCount, &nAssigned);
+ MethodTable::InterfaceMapIterator intIt = pNewIntfMT->IterateInterfaceMap();
+ while (intIt.Next())
+ {
+ duplicates |= InsertMethodTable(intIt.GetInterface(), pExactMTs, nInterfacesCount, &nAssigned);
+ }
+ }
+ if (FAILED(hr))
+ {
+ pMT->GetAssembly()->ThrowTypeLoadException(pMT->GetMDImport(), pMT->GetCl(), IDS_CLASSLOAD_BADFORMAT);
+ }
+#ifdef _DEBUG
+ duplicates |= EEConfig::GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_AlwaysUseMetadataInterfaceMapLayout, FALSE);
+
+ //#InjectInterfaceDuplicates_LoadExactInterfaceMap
+ // If we are injecting duplicates also for non-generic interfaces in check builds, we have to use
+ // algorithm code:#LoadExactInterfaceMap_Algorithm2.
+ // Has to be in sync with code:#InjectInterfaceDuplicates_Main.
+ duplicates |= pMT->Debug_HasInjectedInterfaceDuplicates();
+#endif
+ CONSISTENCY_CHECK(duplicates || (nAssigned == pMT->GetNumInterfaces()));
+ if (duplicates)
+ {
+ //#LoadExactInterfaceMap_Algorithm2
+ // Exact interface instantiation loading TECHNIQUE 2 - The exact instantiation has caused some duplicates to
+ // appear in the interface map! This may not be an error: if the duplicates
+ // were ones that arose because because of inheritance from
+ // a parent type then we accept that. For example
+ // class C<T> : I<T>
+ // class D<T> : C<T>, I<string>
+ // is acceptable even when loading D<string>. Note that in such a case
+ // there will be two entries for I<string> in the final interface map for D<string>.
+ // For dispatch the mappings in D take precedence.
+ //
+ // However we consider it an error if there is real ambiguity within
+ // the interface definitions within the one class, e.g.
+ // class E<T> : I<T>, I<string>
+ // In this situation it is not defined how to dispatch calls to I<string>: would
+ // we use the bindings for I<T> or I<string>?
+ //
+ // Because we may had duplicates the interface map we created above may not
+ // be the correct one: for example for D<string> above we would have computed
+ // a map with only one entry. This is incorrect: an exact instantiation's interface
+ // map must have entries that match the ordering of the interface map in the generic case
+ // (this is because code:#InterfaceMap_SupersetOfParent).
+ //
+ // So, in order to determine how to place the interfaces we need go back to
+ // the metadata. We also do this to check if the presence of duplicates
+ // has caused any potential ambiguity, i.e. the E<string> case above.
+
+ // First we do a GetCheckpoint for the thread-based allocator. ExpandExactInheritedInterfaces allocates substitution chains
+ // on the thread allocator rather than on the stack.
+ Thread * pThread = GetThread();
+ CheckPointHolder cph(pThread->m_MarshalAlloc.GetCheckpoint()); //hold checkpoint for autorelease
+
+ // ***********************************************************
+ // ****** This must be consistent with code:ExpandApproxInterface etc. *******
+ //
+ // The correlation to ExpandApproxInterfaces etc. simply drops out by how we
+ // traverse interfaces.
+ // ***********************************************************
+
+ bmtExactInterfaceInfo bmtExactInterface;
+ bmtExactInterface.pInterfaceSubstitution = new (&pThread->m_MarshalAlloc) Substitution[pMT->GetNumInterfaces()];
+ bmtExactInterface.pExactMTs = pExactMTs;
+ bmtExactInterface.nAssigned = 0;
+ bmtExactInterface.typeContext = typeContext;
+
+ // Do the interfaces inherited from a parent class
+ if ((pParentMT != NULL) && (pParentMT->GetNumInterfaces() > 0))
+ {
+ Substitution * pParentSubstForTypeLoad = new (&pThread->m_MarshalAlloc) Substitution(
+ pMT->GetSubstitutionForParent(NULL));
+ Substitution * pParentSubstForComparing = new (&pThread->m_MarshalAlloc) Substitution(
+ pMT->GetSubstitutionForParent(NULL));
+ ExpandExactInheritedInterfaces(
+ &bmtExactInterface,
+ pParentMT,
+ pParentSubstForTypeLoad,
+ pParentSubstForComparing);
+ }
+#ifdef _DEBUG
+ //#ExactInterfaceMap_SupersetOfParent
+ // Check that parent's interface map is subset of this interface map
+ // See code:#InterfaceMap_SupersetOfParent
+ {
+ _ASSERTE(pParentMT->GetNumInterfaces() == bmtExactInterface.nAssigned);
+
+ MethodTable::InterfaceMapIterator parentInterfacesIterator = pParentMT->IterateInterfaceMap();
+ UINT32 nInterfaceIndex = 0;
+ while (parentInterfacesIterator.Next())
+ {
+ if (pMT->IsSharedByGenericInstantiations())
+ { // The type is a canonical instantiation (contains _Canon)
+ // The interface instantiations of parent can be different (see
+ // code:#InterfaceMap_CanonicalSupersetOfParent), therefore we cannot compare
+ // MethodTables
+ _ASSERTE(parentInterfacesIterator.GetInterfaceInfo()->GetApproxMethodTable(pParentMT->GetLoaderModule())->HasSameTypeDefAs(
+ bmtExactInterface.pExactMTs[nInterfaceIndex]));
+ }
+ else
+ { // It is not canonical instantiation, we can compare MethodTables
+ _ASSERTE(parentInterfacesIterator.GetInterface() == bmtExactInterface.pExactMTs[nInterfaceIndex]);
+ }
+ nInterfaceIndex++;
+ }
+ _ASSERTE(nInterfaceIndex == bmtExactInterface.nAssigned);
+ }
+#endif //_DEBUG
+
+ // If there are any __Canon instances in the type argument list, then we defer the
+ // ambiguity checking until an exact instantiation.
+ if (!pMT->IsSharedByGenericInstantiations())
+ {
+ // There are no __Canon types in the instantiation, so do ambiguity check.
+ bmtInterfaceAmbiguityCheckInfo bmtCheckInfo;
+ bmtCheckInfo.pMT = pMT;
+ bmtCheckInfo.ppInterfaceSubstitutionChains = new (&pThread->m_MarshalAlloc) Substitution *[pMT->GetNumInterfaces()];
+ bmtCheckInfo.ppExactDeclaredInterfaces = new (&pThread->m_MarshalAlloc) MethodTable *[pMT->GetNumInterfaces()];
+ bmtCheckInfo.nAssigned = 0;
+ bmtCheckInfo.typeContext = typeContext;
+ MethodTableBuilder::InterfacesAmbiguityCheck(&bmtCheckInfo, pMT->GetModule(), pMT->GetCl(), NULL);
+ }
+
+ // OK, there is no ambiguity amongst the instantiated interfaces declared on this class.
+ MethodTableBuilder::ExpandExactDeclaredInterfaces(
+ &bmtExactInterface,
+ pMT->GetModule(),
+ pMT->GetCl(),
+ NULL,
+ NULL
+ COMMA_INDEBUG(pMT));
+ CONSISTENCY_CHECK(bmtExactInterface.nAssigned == pMT->GetNumInterfaces());
+
+ // We cannot process interface duplicates on types with __Canon. The duplicates are processed on
+ // exact types only
+ if (!pMT->IsSharedByGenericInstantiations())
+ {
+ // Process all pairs of duplicates in the interface map:
+ // i.e. If there are 3 duplicates of the same interface at indexes: i1, i2 and i3, then
+ // process pairs of indexes [i1,i2], [i1,i3] and [i2,i3].
+ // - Update 'declared on type' flag for those interfaces which duplicate is 'declared on type'
+ // - Check interface method implementation ambiguity code:#DuplicateInterface_MethodAmbiguity
+ for (DWORD nOriginalIndex = 0; nOriginalIndex < nInterfacesCount; nOriginalIndex++)
+ {
+ // Search for duplicates further in the interface map
+ for (DWORD nDuplicateIndex = nOriginalIndex + 1; nDuplicateIndex < nInterfacesCount; nDuplicateIndex++)
+ {
+ if (pExactMTs[nOriginalIndex] != pExactMTs[nDuplicateIndex])
+ { // It's not a duplicate of original interface, skip it
+ continue;
+ }
+ // We found a duplicate
+
+ // Set 'declared on type' flag if either original or duplicate interface is
+ // 'declared on type'
+ if (pMT->IsInterfaceDeclaredOnClass(nOriginalIndex) ||
+ pMT->IsInterfaceDeclaredOnClass(nDuplicateIndex))
+ {
+ //
+ // Note that both checks are needed:
+ // A<T> : I<T>
+ // B<T,U> : A<T>, I<U>
+ // C<T,U> : B<T,U>, I<T> // Reimplements interface from A<T>
+ // After code:BuildMethodTableThrowing algorithm, this will happen:
+ // B<int,int> will have interface map similar to B<T,U>:
+ // I<int> ... not 'declared on type'
+ // I<int> ... 'declared on type'
+ // C<int,int> will have interface map similar to C<T,U>:
+ // I<int> ... 'declared on type'
+ // I<int> ... not 'declared on type'
+ //
+
+ pMT->SetInterfaceDeclaredOnClass(nOriginalIndex);
+ pMT->SetInterfaceDeclaredOnClass(nDuplicateIndex);
+ }
+
+ //#DuplicateInterface_MethodAmbiguity
+ //
+ // In the ideal world we would now check for interface method implementation
+ // ambiguity in the instantiation, but that would be a technical breaking change
+ // (against 2.0 RTM/SP1).
+ // Therefore we ALLOW when interface method is implemented twice through this
+ // original and duplicate interface.
+ //
+ // This ambiguity pattern is therefore ALLOWED (can be expressed only in IL, not in C#):
+ // I<T>
+ // void Print(T t);
+ // A<T> : I<T> // abstract class
+ // B<T,U> : A<T>, I<U>
+ // void Print(T t) { ... }
+ // void Print(U u) { ... }
+ // Now B<int,int> has 2 implementations of I<int>.Print(int), while B<int,char> is
+ // fine. Therefore an instantiation can introduce ambiguity.
+
+#if 0 // Removing this code for now as it is a technical breaking change (against CLR 2.0 RTM/SP1).
+ // We might decide later that we want to take this breaking change.
+ //
+ // Note that dispatch map entries are sorted by interface index and then interface
+ // method slot index.
+ //
+ DispatchMapTypeID originalTypeID = DispatchMapTypeID::InterfaceClassID(nOriginalIndex);
+ DispatchMap::EncodedMapIterator originalIt(pMT);
+ // Find first entry for original interface
+ while (originalIt.IsValid())
+ {
+ DispatchMapEntry *pEntry = originalIt.Entry();
+ if (pEntry->GetTypeID().ToUINT32() >= originalTypeID.ToUINT32())
+ { // Found the place where original interface entries should be (dispatch map is
+ // sorted)
+ break;
+ }
+ originalIt.Next();
+ }
+
+ DispatchMapTypeID duplicateTypeID = DispatchMapTypeID::InterfaceClassID(nDuplicateIndex);
+ DispatchMap::EncodedMapIterator duplicateIt(pMT);
+ // Find first entry for duplicate interface
+ while (duplicateIt.IsValid())
+ {
+ DispatchMapEntry *pEntry = duplicateIt.Entry();
+ if (pEntry->GetTypeID().ToUINT32() >= duplicateTypeID.ToUINT32())
+ { // Found the place where original interface entries should be (dispatch map is
+ // sorted)
+ break;
+ }
+ duplicateIt.Next();
+ }
+
+ // Compare original and duplicate interface entries in the dispatch map if they contain
+ // different implementation for the same interface method
+ for (;;)
+ {
+ if (!originalIt.IsValid() || !duplicateIt.IsValid())
+ { // We reached end of one dispatch map iterator
+ break;
+ }
+ DispatchMapEntry *pOriginalEntry = originalIt.Entry();
+ if (pOriginalEntry->GetTypeID().ToUINT32() != originalTypeID.ToUINT32())
+ { // We reached behind original interface entries
+ break;
+ }
+ DispatchMapEntry *pDuplicateEntry = duplicateIt.Entry();
+ if (pDuplicateEntry->GetTypeID().ToUINT32() != duplicateTypeID.ToUINT32())
+ { // We reached behind duplicate interface entries
+ break;
+ }
+
+ if (pOriginalEntry->GetSlotNumber() == pDuplicateEntry->GetSlotNumber())
+ { // Found duplicate implementation of interface method
+ if (pOriginalEntry->GetTargetSlotNumber() != pDuplicateEntry->GetTargetSlotNumber())
+ { // Implementation of the slots is different
+ bmtErrorInfo bmtError;
+
+ bmtError.pModule = pMT->GetModule();
+ bmtError.cl = pMT->GetCl();
+ bmtError.resIDWhy = IDS_CLASSLOAD_MI_MULTIPLEOVERRIDES;
+ bmtError.szMethodNameForError = NULL;
+ bmtError.pThrowable = NULL;
+
+ MethodDesc *pMD = pMT->GetMethodDescForSlot(pDuplicateEntry->GetTargetSlotNumber());
+ bmtError.dMethodDefInError = pMD->GetMemberDef();
+
+ BuildMethodTableThrowException(COR_E_TYPELOAD, bmtError);
+ }
+ // The method is implemented by the same slot on both interfaces (original and
+ // duplicate)
+
+ // Process next dispatch map entry
+ originalIt.Next();
+ duplicateIt.Next();
+ continue;
+ }
+ // Move iterator representing smaller interface method slot index (the dispatch map
+ // is sorted by slot indexes)
+ if (pOriginalEntry->GetSlotNumber() < pDuplicateEntry->GetSlotNumber())
+ {
+ originalIt.Next();
+ continue;
+ }
+ _ASSERTE(pOriginalEntry->GetSlotNumber() > pDuplicateEntry->GetSlotNumber());
+ duplicateIt.Next();
+ }
+#endif //0
+ }
+ // All duplicates of this original interface were processed
+ }
+ // All pairs of duplicates in the interface map are processed
+ }
+ }
+ // Duplicates in the interface map are resolved
+
+ // OK, if we've got this far then pExactMTs should now hold the array of exact instantiated interfaces.
+ MethodTable::InterfaceMapIterator thisIt = pMT->IterateInterfaceMap();
+ DWORD i = 0;
+ while (thisIt.Next())
+ {
+#ifdef _DEBUG
+ MethodTable*pOldMT = thisIt.GetInterface();
+ MethodTable *pNewMT = pExactMTs[i];
+ CONSISTENCY_CHECK(pOldMT->HasSameTypeDefAs(pNewMT));
+#endif // _DEBUG
+ thisIt.SetInterface(pExactMTs[i]);
+ i++;
+ }
+
+} // MethodTableBuilder::LoadExactInterfaceMap
+
+//*******************************************************************************
+void
+MethodTableBuilder::ExpandExactInheritedInterfaces(
+ bmtExactInterfaceInfo * bmtInfo,
+ MethodTable * pMT,
+ const Substitution * pSubstForTypeLoad,
+ Substitution * pSubstForComparing)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodTable *pParentMT = pMT->GetParentMethodTable();
+
+ // Backup type's substitution chain for comparing interfaces
+ Substitution substForComparingBackup = *pSubstForComparing;
+ // Make type an open type for comparing interfaces
+ *pSubstForComparing = Substitution();
+
+ if (pParentMT)
+ {
+ // Chain parent's substitution for exact type load
+ Substitution * pParentSubstForTypeLoad = new (&GetThread()->m_MarshalAlloc) Substitution(
+ pMT->GetSubstitutionForParent(pSubstForTypeLoad));
+
+ // Chain parent's substitution for comparing interfaces (note that this type is temporarily
+ // considered as open type)
+ Substitution * pParentSubstForComparing = new (&GetThread()->m_MarshalAlloc) Substitution(
+ pMT->GetSubstitutionForParent(pSubstForComparing));
+
+ ExpandExactInheritedInterfaces(
+ bmtInfo,
+ pParentMT,
+ pParentSubstForTypeLoad,
+ pParentSubstForComparing);
+ }
+ ExpandExactDeclaredInterfaces(
+ bmtInfo,
+ pMT->GetModule(),
+ pMT->GetCl(),
+ pSubstForTypeLoad,
+ pSubstForComparing
+ COMMA_INDEBUG(pMT));
+
+ // Restore type's subsitution chain for comparing interfaces
+ *pSubstForComparing = substForComparingBackup;
+} // MethodTableBuilder::ExpandExactInheritedInterfaces
+
+//*******************************************************************************
+/* static */
+void
+MethodTableBuilder::ExpandExactDeclaredInterfaces(
+ bmtExactInterfaceInfo * bmtInfo,
+ Module * pModule,
+ mdToken typeDef,
+ const Substitution * pSubstForTypeLoad,
+ Substitution * pSubstForComparing
+ COMMA_INDEBUG(MethodTable * dbg_pClassMT))
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr;
+ InterfaceImplEnum ie(pModule, typeDef, NULL);
+ while ((hr = ie.Next()) == S_OK)
+ {
+ MethodTable * pInterface = ClassLoader::LoadTypeDefOrRefOrSpecThrowing(
+ pModule,
+ ie.CurrentToken(),
+ &bmtInfo->typeContext,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::FailIfUninstDefOrRef,
+ ClassLoader::LoadTypes,
+ CLASS_LOAD_EXACTPARENTS,
+ TRUE,
+ pSubstForTypeLoad).GetMethodTable();
+
+ Substitution ifaceSubstForTypeLoad(ie.CurrentToken(), pModule, pSubstForTypeLoad);
+ Substitution ifaceSubstForComparing(ie.CurrentToken(), pModule, pSubstForComparing);
+ ExpandExactInterface(
+ bmtInfo,
+ pInterface,
+ &ifaceSubstForTypeLoad,
+ &ifaceSubstForComparing
+ COMMA_INDEBUG(dbg_pClassMT));
+ }
+ if (FAILED(hr))
+ {
+ pModule->GetAssembly()->ThrowTypeLoadException(pModule->GetMDImport(), typeDef, IDS_CLASSLOAD_BADFORMAT);
+ }
+} // MethodTableBuilder::ExpandExactDeclaredInterfaces
+
+//*******************************************************************************
+void
+MethodTableBuilder::ExpandExactInterface(
+ bmtExactInterfaceInfo * bmtInfo,
+ MethodTable * pIntf,
+ const Substitution * pSubstForTypeLoad_OnStack, // Allocated on stack!
+ const Substitution * pSubstForComparing_OnStack // Allocated on stack!
+ COMMA_INDEBUG(MethodTable * dbg_pClassMT))
+{
+ STANDARD_VM_CONTRACT;
+
+ // ****** This must be consistent with code:MethodTableBuilder::ExpandApproxInterface ******
+
+ // Is it already present according to the "generic" layout of the interfaces.
+ // Note we use exactly the same algorithm as when we
+ // determined the layout of the interface map for the "generic" version of the class.
+ for (DWORD i = 0; i < bmtInfo->nAssigned; i++)
+ {
+ // Type Equivalence is not respected for this comparision as you can have multiple type equivalent interfaces on a class
+ TokenPairList newVisited = TokenPairList::AdjustForTypeEquivalenceForbiddenScope(NULL);
+ if (MetaSig::CompareTypeDefsUnderSubstitutions(bmtInfo->pExactMTs[i],
+ pIntf,
+ &bmtInfo->pInterfaceSubstitution[i],
+ pSubstForComparing_OnStack,
+ &newVisited))
+ {
+#ifdef _DEBUG
+ //#InjectInterfaceDuplicates_ExactInterfaces
+ // We will inject duplicate interfaces in check builds.
+ // Has to be in sync with code:#InjectInterfaceDuplicates_Main.
+ if (dbg_pClassMT->Debug_HasInjectedInterfaceDuplicates())
+ { // Just pretend we didn't find this match
+ break;
+ }
+#endif //_DEBUG
+ return; // found it, don't add it again
+ }
+ }
+
+ // Add the interface and its sub-interfaces
+ DWORD n = bmtInfo->nAssigned;
+ bmtInfo->pExactMTs[n] = pIntf;
+ bmtInfo->pInterfaceSubstitution[n] = *pSubstForComparing_OnStack;
+ bmtInfo->nAssigned++;
+
+ Substitution * pSubstForTypeLoad = new (&GetThread()->m_MarshalAlloc) Substitution(*pSubstForTypeLoad_OnStack);
+
+ ExpandExactDeclaredInterfaces(
+ bmtInfo,
+ pIntf->GetModule(),
+ pIntf->GetCl(),
+ pSubstForTypeLoad,
+ &bmtInfo->pInterfaceSubstitution[n]
+ COMMA_INDEBUG(dbg_pClassMT));
+} // MethodTableBuilder::ExpandExactInterface
+
+//*******************************************************************************
+/* static */
+void MethodTableBuilder::InterfacesAmbiguityCheck(bmtInterfaceAmbiguityCheckInfo *bmtCheckInfo,
+ Module *pModule,
+ mdToken typeDef,
+ const Substitution *pSubstChain)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr;
+ InterfaceImplEnum ie(pModule, typeDef, pSubstChain);
+ while ((hr = ie.Next()) == S_OK)
+ {
+ MethodTable *pInterface =
+ ClassLoader::LoadTypeDefOrRefOrSpecThrowing(pModule, ie.CurrentToken(),
+ &bmtCheckInfo->typeContext,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::FailIfUninstDefOrRef,
+ ClassLoader::LoadTypes,
+ CLASS_LOAD_EXACTPARENTS,
+ TRUE,
+ pSubstChain).GetMethodTable();
+ InterfaceAmbiguityCheck(bmtCheckInfo, ie.CurrentSubst(), pInterface);
+ }
+ if (FAILED(hr))
+ {
+ pModule->GetAssembly()->ThrowTypeLoadException(pModule->GetMDImport(), typeDef, IDS_CLASSLOAD_BADFORMAT);
+ }
+}
+
+//*******************************************************************************
+void MethodTableBuilder::InterfaceAmbiguityCheck(bmtInterfaceAmbiguityCheckInfo *bmtCheckInfo,
+ const Substitution *pItfSubstChain,
+ MethodTable *pIntf)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Is it already in the generic version of the freshly declared interfaces. We
+ // do this based on metadata, i.e. via the substitution chains.
+ // Note we use exactly the same algorithm as when we
+ // determined the layout of the interface map for the "generic" version of the class.
+ for (DWORD i = 0; i < bmtCheckInfo->nAssigned; i++)
+ {
+ // Type Equivalence is not respected for this comparision as you can have multiple type equivalent interfaces on a class
+ TokenPairList newVisited = TokenPairList::AdjustForTypeEquivalenceForbiddenScope(NULL);
+ if (MetaSig::CompareTypeDefsUnderSubstitutions(bmtCheckInfo->ppExactDeclaredInterfaces[i],
+ pIntf,
+ bmtCheckInfo->ppInterfaceSubstitutionChains[i],
+ pItfSubstChain,
+ &newVisited))
+ return; // found it, don't add it again
+ }
+
+ // OK, so it isn't a duplicate based on the generic IL, now check if the instantiation
+ // makes it a duplicate.
+ for (DWORD i = 0; i < bmtCheckInfo->nAssigned; i++)
+ {
+ if (bmtCheckInfo->ppExactDeclaredInterfaces[i] == pIntf)
+ {
+ bmtCheckInfo->pMT->GetModule()->GetAssembly()->ThrowTypeLoadException(bmtCheckInfo->pMT->GetMDImport(),
+ bmtCheckInfo->pMT->GetCl(),
+ IDS_CLASSLOAD_OVERLAPPING_INTERFACES);
+ }
+ }
+
+ DWORD n = bmtCheckInfo->nAssigned;
+ bmtCheckInfo->ppExactDeclaredInterfaces[n] = pIntf;
+ bmtCheckInfo->ppInterfaceSubstitutionChains[n] = new (&GetThread()->m_MarshalAlloc) Substitution[pItfSubstChain->GetLength()];
+ pItfSubstChain->CopyToArray(bmtCheckInfo->ppInterfaceSubstitutionChains[n]);
+
+ bmtCheckInfo->nAssigned++;
+ InterfacesAmbiguityCheck(bmtCheckInfo,pIntf->GetModule(),pIntf->GetCl(),pItfSubstChain);
+}
+
+#ifdef FEATURE_REMOTING // affects only remoting-related info
+//*******************************************************************************
+// Private helper method used by the code below to check whether the given
+// method is annotated to be a VTS event callback.
+BOOL MethodTableBuilder::CheckForVtsEventMethod(IMDInternalImport *pImport,
+ MethodDesc *pMD,
+ DWORD dwAttrs,
+ LPCUTF8 szAttrName,
+ MethodDesc **ppMethodDesc)
+{
+ STANDARD_VM_CONTRACT;
+
+ // For each method with an attriubte we need to check that:
+ // o The method is not static, virtual, abstract or generic.
+ // o The signature is correct.
+ // o No other method on the same type is marked with the same
+ // attribute.
+
+ if (pImport->GetCustomAttributeByName(pMD->GetMemberDef(),
+ szAttrName,
+ NULL,
+ NULL) == S_OK)
+ {
+ if (IsMdStatic(dwAttrs) ||
+ IsMdVirtual(dwAttrs) ||
+ IsMdAbstract(dwAttrs) ||
+ pMD->IsGenericMethodDefinition())
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_INVALID_VTS_METHOD, pMD->GetMemberDef());
+ }
+
+ // Check whether we've seen one of these methods before.
+ if (*ppMethodDesc != NULL)
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_TOO_MANY_VTS_METHODS, szAttrName);
+ }
+
+ // Check the signature, it should be "void M(StreamingContext)".
+ DWORD cbSig;
+ PCCOR_SIGNATURE pSig;
+ if (FAILED(pImport->GetSigOfMethodDef(pMD->GetMemberDef(), &cbSig, &pSig)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+
+ // Should be an instance method with no generic type parameters.
+ if (CorSigUncompressCallingConv(pSig) != IMAGE_CEE_CS_CALLCONV_HASTHIS)
+ goto BadSignature;
+
+ // Should have one argument.
+ if (CorSigUncompressData(pSig) != 1)
+ goto BadSignature;
+
+ // And a return type of void.
+ if (*pSig++ != (BYTE)ELEMENT_TYPE_VOID)
+ goto BadSignature;
+
+ // The argument should be a value type.
+ if (*pSig++ != (BYTE)ELEMENT_TYPE_VALUETYPE)
+ goto BadSignature;
+
+ // Now the tricky bit: we want to verify the value type is
+ // StreamingContext, but we don't want to simply load the type since it
+ // might be any other arbitrary type and cause recursive loading
+ // problems. SO we manually check the type via the metadata APIs
+ // instead.
+ mdToken tkType = CorSigUncompressToken(pSig);
+ LPCUTF8 szType;
+ LPCUTF8 szNamespace;
+
+ // Compute type name and namespace.
+ if (TypeFromToken(tkType) == mdtTypeDef)
+ {
+ if (FAILED(pImport->GetNameOfTypeDef(tkType, &szType, &szNamespace)))
+ {
+ goto BadSignature;
+ }
+ }
+ else
+ {
+ _ASSERTE(TypeFromToken(tkType) == mdtTypeRef);
+ if (FAILED(pImport->GetNameOfTypeRef(tkType, &szNamespace, &szType)))
+ {
+ goto BadSignature;
+ }
+ }
+
+ // Do the names match?
+ if (strcmp(szType, g_StreamingContextName) != 0 ||
+ strcmp(szNamespace, g_SerializationNS))
+ goto BadSignature;
+
+ // For typedefs we can directly check whether the current module is
+ // part of mscorlib. For refs we have to dig deeper (into the token
+ // resolution scope).
+ if (TypeFromToken(tkType) == mdtTypeDef)
+ {
+ if (bmtError->pModule->GetAssembly()->GetManifestModule() != SystemDomain::SystemAssembly()->GetManifestModule())
+ goto BadSignature;
+ }
+ else
+ {
+ // The scope needs to be an assembly ref.
+ mdToken tkScope;
+ if (FAILED(pImport->GetResolutionScopeOfTypeRef(tkType, &tkScope)))
+ {
+ goto BadSignature;
+ }
+ if (TypeFromToken(tkScope) != mdtAssemblyRef)
+ goto BadSignature;
+
+ // Fetch the name and public key or public key token.
+ BYTE *pbPublicKeyOrToken;
+ DWORD cbPublicKeyOrToken;
+ LPCSTR szAssembly;
+ DWORD dwAssemblyFlags;
+ if (FAILED(pImport->GetAssemblyRefProps(
+ tkScope,
+ (const void**)&pbPublicKeyOrToken,
+ &cbPublicKeyOrToken,
+ &szAssembly,
+ NULL, // AssemblyMetaDataInternal: we don't care about version, culture etc.
+ NULL, // Hash value pointer, obsolete information
+ NULL, // Byte count for above
+ &dwAssemblyFlags)))
+ {
+ goto BadSignature;
+ }
+
+ // Validate the name.
+ if (stricmpUTF8(szAssembly, g_psBaseLibraryName) != 0)
+ goto BadSignature;
+
+ // And the public key or token, whichever was burned into the reference by the compiler. For mscorlib this is the ECMA key or
+ // token.
+ if (IsAfPublicKeyToken(dwAssemblyFlags))
+ {
+ if (cbPublicKeyOrToken != sizeof(g_rbNeutralPublicKeyToken) ||
+ memcmp(pbPublicKeyOrToken, g_rbNeutralPublicKeyToken, cbPublicKeyOrToken) != 0)
+ goto BadSignature;
+ }
+ else
+ {
+ if (cbPublicKeyOrToken != sizeof(g_rbNeutralPublicKey) ||
+ memcmp(pbPublicKeyOrToken, g_rbNeutralPublicKey, cbPublicKeyOrToken) != 0)
+ goto BadSignature;
+ }
+ }
+
+ // We managed to pass all tests; record this method.
+ *ppMethodDesc = pMD;
+
+ return TRUE;
+ }
+
+ return FALSE;
+
+ BadSignature:
+ BuildMethodTableThrowException(IDS_CLASSLOAD_INVALID_VTS_SIG, pMD->GetMemberDef());
+}
+
+//*******************************************************************************
+// Names of the various VTS custom attributes
+#define VTS_ON_SERIALIZING_ATTRIBUTE "System.Runtime.Serialization.OnSerializingAttribute"
+#define VTS_ON_SERIALIZED_ATTRIBUTE "System.Runtime.Serialization.OnSerializedAttribute"
+#define VTS_ON_DESERIALIZING_ATTRIBUTE "System.Runtime.Serialization.OnDeserializingAttribute"
+#define VTS_ON_DESERIALIZED_ATTRIBUTE "System.Runtime.Serialization.OnDeserializedAttribute"
+#define VTS_OPTIONAL_FIELD_ATTRIBUTE "System.Runtime.Serialization.OptionalFieldAttribute"
+
+//*******************************************************************************
+// Look for VTS event methods or fields with interesting serialization
+// attributes on this type (only called for serializable types).
+
+VOID MethodTableBuilder::ScanTypeForVtsInfo()
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(IsTdSerializable(GetAttrClass()));
+ }
+ CONTRACTL_END;
+
+ DWORD i;
+ // Scan all the non-virtual, non-abstract, non-generic instance methods for
+ // one of the special custom attributes indicating a VTS event method.
+ DeclaredMethodIterator it(*this);
+ while (it.Next())
+ {
+ if (CheckForVtsEventMethod(GetMDImport(),
+ it->GetMethodDesc(),
+ it.Attrs(),
+ VTS_ON_SERIALIZING_ATTRIBUTE,
+ &bmtMFDescs->pOnSerializingMethod))
+ bmtMFDescs->fNeedsRemotingVtsInfo = true;
+
+ if (CheckForVtsEventMethod(GetMDImport(),
+ it->GetMethodDesc(),
+ it.Attrs(),
+ VTS_ON_SERIALIZED_ATTRIBUTE,
+ &bmtMFDescs->pOnSerializedMethod))
+ bmtMFDescs->fNeedsRemotingVtsInfo = true;
+
+ if (CheckForVtsEventMethod(GetMDImport(),
+ it->GetMethodDesc(),
+ it.Attrs(),
+ VTS_ON_DESERIALIZING_ATTRIBUTE,
+ &bmtMFDescs->pOnDeserializingMethod))
+ bmtMFDescs->fNeedsRemotingVtsInfo = true;
+
+ if (CheckForVtsEventMethod(GetMDImport(),
+ it->GetMethodDesc(),
+ it.Attrs(),
+ VTS_ON_DESERIALIZED_ATTRIBUTE,
+ &bmtMFDescs->pOnDeserializedMethod))
+ bmtMFDescs->fNeedsRemotingVtsInfo = true;
+ }
+
+ // Scan all the instance fields introduced on this type for NotSerialized or
+ // OptionalField attributes.
+ DWORD dwNumIntroducedInstanceFields = bmtEnumFields->dwNumInstanceFields;
+ FieldDesc *pFieldDescList = GetApproxFieldDescListRaw();
+ for (i = 0; i < dwNumIntroducedInstanceFields; i++)
+ {
+ FieldDesc *pFD = &pFieldDescList[i];
+ DWORD dwFlags;
+
+ if (FAILED(GetMDImport()->GetFieldDefProps(pFD->GetMemberDef(), &dwFlags)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+ if (IsFdNotSerialized(dwFlags))
+ bmtMFDescs->SetFieldNotSerialized(i, dwNumIntroducedInstanceFields);
+
+ if (GetMDImport()->GetCustomAttributeByName(pFD->GetMemberDef(),
+ VTS_OPTIONAL_FIELD_ATTRIBUTE,
+ NULL,
+ NULL) == S_OK)
+ bmtMFDescs->SetFieldOptionallySerialized(i, dwNumIntroducedInstanceFields);
+ }
+}
+#endif // FEATURE_REMOTING
+
+//*******************************************************************************
+void MethodTableBuilder::CheckForSystemTypes()
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodTable * pMT = GetHalfBakedMethodTable();
+ EEClass * pClass = GetHalfBakedClass();
+
+ // We can exit early for generic types - there is just one case to check for.
+ if (g_pNullableClass != NULL && bmtGenerics->HasInstantiation())
+ {
+ _ASSERTE(g_pNullableClass->IsNullable());
+
+ // Pre-compute whether the class is a Nullable<T> so that code:Nullable::IsNullableType is efficient
+ // This is useful to the performance of boxing/unboxing a Nullable
+ if (GetCl() == g_pNullableClass->GetCl())
+ pMT->SetIsNullable();
+ return;
+ }
+
+ if (IsNested() || IsEnum())
+ return;
+
+ LPCUTF8 name, nameSpace;
+
+ if (FAILED(GetMDImport()->GetNameOfTypeDef(GetCl(), &name, &nameSpace)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+
+ if (IsValueClass())
+ {
+ //
+ // Value types
+ //
+
+ // All special value types are in the system namespace
+ if (strcmp(nameSpace, g_SystemNS) != 0)
+ return;
+
+ // Check if it is a primitive type
+ CorElementType type = CorTypeInfo::FindPrimitiveType(name);
+ if (type != ELEMENT_TYPE_END)
+ {
+ pMT->SetInternalCorElementType(type);
+ pMT->SetIsTruePrimitive();
+
+#ifdef _DEBUG
+ if (FAILED(GetMDImport()->GetNameOfTypeDef(GetCl(), &name, &nameSpace)))
+ {
+ name = nameSpace = "Invalid TypeDef record";
+ }
+ LOG((LF_CLASSLOADER, LL_INFO10000, "%s::%s marked as primitive type %i\n", nameSpace, name, type));
+#endif // _DEBUG
+
+ if (type == ELEMENT_TYPE_TYPEDBYREF)
+ {
+ pClass->SetContainsStackPtr();
+ }
+ }
+ else if (strcmp(name, g_NullableName) == 0)
+ {
+ pMT->SetIsNullable();
+ }
+ else if (strcmp(name, g_ArgIteratorName) == 0)
+ {
+ // Mark the special types that have embeded stack poitners in them
+ pClass->SetContainsStackPtr();
+ }
+ else if (strcmp(name, g_RuntimeArgumentHandleName) == 0)
+ {
+ pClass->SetContainsStackPtr();
+#ifndef _TARGET_X86_
+ pMT->SetInternalCorElementType (ELEMENT_TYPE_I);
+#endif
+ }
+#ifndef _TARGET_X86_
+ else if (strcmp(name, g_RuntimeMethodHandleInternalName) == 0)
+ {
+ pMT->SetInternalCorElementType (ELEMENT_TYPE_I);
+ }
+#endif
+#if defined(ALIGN_ACCESS) || defined(FEATURE_64BIT_ALIGNMENT)
+ else if (strcmp(name, g_DecimalName) == 0)
+ {
+ // This is required because native layout of System.Decimal causes it to be aligned
+ // differently to the layout of the native DECIMAL structure, which will cause
+ // data misalignent exceptions if Decimal is embedded in another type.
+
+ EEClassLayoutInfo* pLayout = pClass->GetLayoutInfo();
+ pLayout->m_LargestAlignmentRequirementOfAllMembers = sizeof(ULONGLONG);
+ pLayout->m_ManagedLargestAlignmentRequirementOfAllMembers = sizeof(ULONGLONG);
+
+#ifdef FEATURE_64BIT_ALIGNMENT
+ // Also need to mark the type so it will be allocated on a 64-bit boundary for
+ // platforms that won't do this naturally.
+ SetAlign8Candidate();
+#endif
+ }
+#endif // ALIGN_ACCESS || FEATURE_64BIT_ALIGNMENT
+ }
+ else
+ {
+ //
+ // Reference types
+ //
+ if (strcmp(name, g_StringName) == 0 && strcmp(nameSpace, g_SystemNS) == 0)
+ {
+ // Strings are not "normal" objects, so we need to mess with their method table a bit
+ // so that the GC can figure out how big each string is...
+ DWORD baseSize = ObjSizeOf(StringObject) + sizeof(WCHAR);
+ pMT->SetBaseSize(baseSize); // NULL character included
+
+ GetHalfBakedClass()->SetBaseSizePadding(baseSize - bmtFP->NumInstanceFieldBytes);
+
+ pMT->SetComponentSize(2);
+ }
+ else if (strcmp(name, g_CriticalFinalizerObjectName) == 0 && strcmp(nameSpace, g_ConstrainedExecutionNS) == 0)
+ {
+ // To introduce a class with a critical finalizer,
+ // we'll set the bit here.
+ pMT->SetHasCriticalFinalizer();
+ }
+#ifdef FEATURE_REMOTING
+ else if (strcmp(name, g_TransparentProxyName) == 0 && strcmp(nameSpace, g_ProxiesNS) == 0)
+ {
+ pMT->SetTransparentProxy();
+
+ // This ensures that we take the slow path in JIT_IsInstanceOfClass
+ pMT->SetHasTypeEquivalence();
+ }
+#endif // FEATURE_REMOTING
+#ifdef FEATURE_COMINTEROP
+ else
+ {
+ bool bIsComObject = false;
+ bool bIsRuntimeClass = false;
+
+ if (strcmp(name, g_ComObjectName) == 0 && strcmp(nameSpace, g_SystemNS) == 0)
+ bIsComObject = true;
+
+ if (strcmp(name, g_RuntimeClassName) == 0 && strcmp(nameSpace, g_WinRTNS) == 0)
+ bIsRuntimeClass = true;
+
+ if (bIsComObject || bIsRuntimeClass)
+ {
+ // Make System.__ComObject/System.Runtime.InteropServices.WindowsRuntime.RuntimeClass a ComImport type
+ // We can't do it using attribute as C# won't allow putting code in ComImport types
+ pMT->SetComObjectType();
+
+ // COM objects need an optional field on the EEClass, so ensure this class instance has allocated
+ // the optional field descriptor.
+ EnsureOptionalFieldsAreAllocated(pClass, m_pAllocMemTracker, GetLoaderAllocator()->GetLowFrequencyHeap());
+ }
+
+ if (bIsRuntimeClass)
+ {
+ // Note that we set it here to avoid type loader considering RuntimeClass as a normal WindowsImportType
+ // as functions in RuntimeClass doesn't go through COM interop
+ GetHalfBakedClass()->SetProjectedFromWinRT();
+ }
+ }
+#endif // FEATURE_COMINTEROP
+ }
+}
+
+//==========================================================================================
+// Helper to create a new method table. This is the only
+// way to allocate a new MT. Don't try calling new / ctor.
+// Called from SetupMethodTable
+// This needs to be kept consistent with MethodTable::GetSavedExtent()
+MethodTable * MethodTableBuilder::AllocateNewMT(Module *pLoaderModule,
+ DWORD dwVtableSlots,
+ DWORD dwVirtuals,
+ DWORD dwGCSize,
+ DWORD dwNumInterfaces,
+ DWORD dwNumDicts,
+ DWORD cbInstAndDict,
+ MethodTable *pMTParent,
+ ClassLoader *pClassLoader,
+ LoaderAllocator *pAllocator,
+ BOOL isInterface,
+ BOOL fDynamicStatics,
+ BOOL fHasGenericsStaticsInfo,
+ BOOL fNeedsRCWPerTypeData,
+ BOOL fNeedsRemotableMethodInfo,
+ BOOL fNeedsRemotingVtsInfo,
+ BOOL fHasContextStatics
+#ifdef FEATURE_COMINTEROP
+ , BOOL fHasDynamicInterfaceMap
+#endif
+#ifdef FEATURE_PREJIT
+ , Module *pComputedPZM
+#endif // FEATURE_PREJIT
+ , AllocMemTracker *pamTracker
+ )
+{
+ CONTRACT (MethodTable*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ DWORD dwNonVirtualSlots = dwVtableSlots - dwVirtuals;
+
+ // GCSize must be aligned
+ _ASSERTE(IS_ALIGNED(dwGCSize, sizeof(void*)));
+
+ // size without the interface map
+ S_SIZE_T cbTotalSize = S_SIZE_T(dwGCSize) + S_SIZE_T(sizeof(MethodTable));
+
+ // vtable
+ cbTotalSize += MethodTable::GetNumVtableIndirections(dwVirtuals) * sizeof(PTR_PCODE);
+
+
+ DWORD dwMultipurposeSlotsMask = 0;
+ if (dwNumInterfaces != 0)
+ dwMultipurposeSlotsMask |= MethodTable::enum_flag_HasInterfaceMap;
+ if (dwNumDicts != 0)
+ dwMultipurposeSlotsMask |= MethodTable::enum_flag_HasPerInstInfo;
+ if (bmtVT->pDispatchMapBuilder->Count() > 0)
+ dwMultipurposeSlotsMask |= MethodTable::enum_flag_HasDispatchMapSlot;
+ if (dwNonVirtualSlots != 0)
+ dwMultipurposeSlotsMask |= MethodTable::enum_flag_HasNonVirtualSlots;
+ if (pLoaderModule != GetModule())
+ dwMultipurposeSlotsMask |= MethodTable::enum_flag_HasModuleOverride;
+
+ // Add space for optional members here. Same as GetOptionalMembersSize()
+ cbTotalSize += MethodTable::GetOptionalMembersAllocationSize(dwMultipurposeSlotsMask,
+ fNeedsRemotableMethodInfo,
+ fHasGenericsStaticsInfo,
+ FALSE, // no GuidInfo needed for canonical instantiations
+ FALSE, // no CCW template needed for canonical instantiations
+ fNeedsRCWPerTypeData,
+ fNeedsRemotingVtsInfo,
+ fHasContextStatics,
+ RidFromToken(GetCl()) >= METHODTABLE_TOKEN_OVERFLOW);
+
+ // Interface map starts here
+ S_SIZE_T offsetOfInterfaceMap = cbTotalSize;
+
+ cbTotalSize += S_SIZE_T(dwNumInterfaces) * S_SIZE_T(sizeof(InterfaceInfo_t));
+
+#ifdef FEATURE_COMINTEROP
+ // DynamicInterfaceMap have an extra DWORD added to the end of the normal interface
+ // map. This will be used to store the count of dynamically added interfaces
+ // (the ones that are not in the metadata but are QI'ed for at runtime).
+ cbTotalSize += S_SIZE_T(fHasDynamicInterfaceMap ? sizeof(DWORD_PTR) : 0);
+#endif
+
+ // Dictionary pointers start here
+ S_SIZE_T offsetOfInstAndDict = cbTotalSize;
+
+ if (dwNumDicts != 0)
+ {
+ cbTotalSize += sizeof(GenericsDictInfo);
+ cbTotalSize += S_SIZE_T(dwNumDicts) * S_SIZE_T(sizeof(TypeHandle*));
+ cbTotalSize += cbInstAndDict;
+ }
+
+ S_SIZE_T offsetOfUnsharedVtableChunks = cbTotalSize;
+
+ BOOL canShareVtableChunks = pMTParent && MethodTable::CanShareVtableChunksFrom(pMTParent, pLoaderModule
+#ifdef FEATURE_PREJIT
+ , pComputedPZM
+#endif //FEATURE_PREJIT
+ );
+
+ // If pMTParent has a generic instantiation, we cannot share its vtable chunks
+ // This is because pMTParent is only approximate at this point, and MethodTableBuilder::CopyExactParentSlots
+ // may swap in an exact parent that does not satisfy CanShareVtableChunksFrom
+ if (pMTParent && pMTParent->HasInstantiation())
+ {
+ canShareVtableChunks = FALSE;
+ }
+
+ // We will share any parent vtable chunk that does not contain a method we overrode (or introduced)
+ // For the rest, we need to allocate space
+ for (DWORD i = 0; i < dwVirtuals; i++)
+ {
+ if (!canShareVtableChunks || ChangesImplementationOfVirtualSlot(static_cast<SLOT_INDEX>(i)))
+ {
+ DWORD chunkStart = MethodTable::GetStartSlotForVtableIndirection(MethodTable::GetIndexOfVtableIndirection(i), dwVirtuals);
+ DWORD chunkEnd = MethodTable::GetEndSlotForVtableIndirection(MethodTable::GetIndexOfVtableIndirection(i), dwVirtuals);
+
+ cbTotalSize += S_SIZE_T(chunkEnd - chunkStart) * S_SIZE_T(sizeof(PCODE));
+
+ i = chunkEnd - 1;
+ }
+ }
+
+ // Add space for the non-virtual slots array (pointed to by an optional member) if required
+ // If there is only one non-virtual slot, we store it directly in the optional member and need no array
+ S_SIZE_T offsetOfNonVirtualSlots = cbTotalSize;
+ if (dwNonVirtualSlots > 1)
+ {
+ cbTotalSize += S_SIZE_T(dwNonVirtualSlots) * S_SIZE_T(sizeof(PCODE));
+ }
+
+ BYTE *pData = (BYTE *)pamTracker->Track(pAllocator->GetHighFrequencyHeap()->AllocMem(cbTotalSize));
+
+ _ASSERTE(IS_ALIGNED(pData, sizeof(size_t)));
+
+ // There should be no overflows if we have allocated the memory succesfully
+ _ASSERTE(!cbTotalSize.IsOverflow());
+
+ MethodTable* pMT = (MethodTable*)(pData + dwGCSize);
+
+ pMT->SetMultipurposeSlotsMask(dwMultipurposeSlotsMask);
+
+ MethodTableWriteableData * pMTWriteableData = (MethodTableWriteableData *) (BYTE *)
+ pamTracker->Track(pAllocator->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(MethodTableWriteableData))));
+ // Note: Memory allocated on loader heap is zero filled
+ pMT->SetWriteableData(pMTWriteableData);
+
+ // This also disables IBC logging until the type is sufficiently intitialized so
+ // it needs to be done early
+ pMTWriteableData->SetIsNotFullyLoadedForBuildMethodTable();
+
+#ifdef _DEBUG
+ pClassLoader->m_dwGCSize += dwGCSize;
+ pClassLoader->m_dwInterfaceMapSize += (dwNumInterfaces * sizeof(InterfaceInfo_t));
+ pClassLoader->m_dwMethodTableSize += (DWORD)cbTotalSize.Value();
+ pClassLoader->m_dwVtableData += (dwVtableSlots * sizeof(PCODE));
+#endif // _DEBUG
+
+ // There should be no overflows if we have allocated the memory succesfully
+ _ASSERTE(!offsetOfUnsharedVtableChunks.IsOverflow());
+ _ASSERTE(!offsetOfNonVirtualSlots.IsOverflow());
+ _ASSERTE(!offsetOfInterfaceMap.IsOverflow());
+ _ASSERTE(!offsetOfInstAndDict.IsOverflow());
+
+ // initialize the total number of slots
+ pMT->SetNumVirtuals(static_cast<WORD>(dwVirtuals));
+
+ pMT->SetParentMethodTable(pMTParent);
+
+ // Fill out the vtable indirection slots
+ SIZE_T dwCurrentUnsharedSlotOffset = offsetOfUnsharedVtableChunks.Value();
+ MethodTable::VtableIndirectionSlotIterator it = pMT->IterateVtableIndirectionSlots();
+ while (it.Next())
+ {
+ BOOL shared = canShareVtableChunks;
+
+ // Recalculate whether we will share this chunk
+ if (canShareVtableChunks)
+ {
+ for (DWORD i = it.GetStartSlot(); i < it.GetEndSlot(); i++)
+ {
+ if (ChangesImplementationOfVirtualSlot(static_cast<SLOT_INDEX>(i)))
+ {
+ shared = FALSE;
+ break;
+ }
+ }
+ }
+
+ if (shared)
+ {
+ // Share the parent chunk
+ _ASSERTE(it.GetEndSlot() <= pMTParent->GetNumVirtuals());
+ it.SetIndirectionSlot(pMTParent->GetVtableIndirections()[it.GetIndex()]);
+ }
+ else
+ {
+ // Use the locally allocated chunk
+ it.SetIndirectionSlot((PTR_PCODE)(pData+dwCurrentUnsharedSlotOffset));
+ dwCurrentUnsharedSlotOffset += it.GetSize();
+ }
+ }
+
+#ifdef FEATURE_COMINTEROP
+ // Extensible RCW's are prefixed with the count of dynamic interfaces.
+ if (fHasDynamicInterfaceMap)
+ {
+ _ASSERTE (dwNumInterfaces > 0);
+ pMT->SetInterfaceMap ((WORD) (dwNumInterfaces), (InterfaceInfo_t*)(pData+offsetOfInterfaceMap.Value()+sizeof(DWORD_PTR)));
+
+ *(((DWORD_PTR *)pMT->GetInterfaceMap()) - 1) = 0;
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ // interface map is at the end of the vtable
+ pMT->SetInterfaceMap ((WORD) dwNumInterfaces, (InterfaceInfo_t *)(pData+offsetOfInterfaceMap.Value()));
+ }
+
+ _ASSERTE(((WORD) dwNumInterfaces) == dwNumInterfaces);
+
+ if (fDynamicStatics)
+ {
+ pMT->SetDynamicStatics(fHasGenericsStaticsInfo);
+ }
+
+ if (dwNonVirtualSlots > 0)
+ {
+ if (dwNonVirtualSlots > 1)
+ {
+ pMT->SetNonVirtualSlotsArray((PTR_PCODE)(pData+offsetOfNonVirtualSlots.Value()));
+ }
+ else
+ {
+ pMT->SetHasSingleNonVirtualSlot();
+ }
+ }
+
+ // the dictionary pointers follow the interface map
+ if (dwNumDicts)
+ {
+ Dictionary** pPerInstInfo = (Dictionary**)(pData + offsetOfInstAndDict.Value() + sizeof(GenericsDictInfo));
+
+ pMT->SetPerInstInfo ( pPerInstInfo);
+
+ // Fill in the dictionary for this type, if it's instantiated
+ if (cbInstAndDict)
+ {
+ *(pPerInstInfo + (dwNumDicts-1)) = (Dictionary*) (pPerInstInfo + dwNumDicts);
+ }
+ }
+
+#ifdef _DEBUG
+ pMT->m_pWriteableData->m_dwLastVerifedGCCnt = (DWORD)-1;
+#endif // _DEBUG
+
+ RETURN(pMT);
+}
+
+
+//*******************************************************************************
+//
+// Used by BuildMethodTable
+//
+// Setup the method table
+//
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#endif // _PREFAST_
+
+VOID
+MethodTableBuilder::SetupMethodTable2(
+ Module * pLoaderModule
+#ifdef FEATURE_PREJIT
+ , Module * pComputedPZM
+#endif // FEATURE_PREJIT
+ )
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(CheckPointer(bmtVT));
+ PRECONDITION(CheckPointer(bmtInterface));
+ PRECONDITION(CheckPointer(bmtInternal));
+ PRECONDITION(CheckPointer(bmtProp));
+ PRECONDITION(CheckPointer(bmtMFDescs));
+ PRECONDITION(CheckPointer(bmtEnumFields));
+ PRECONDITION(CheckPointer(bmtError));
+ PRECONDITION(CheckPointer(bmtMetaData));
+ PRECONDITION(CheckPointer(bmtParent));
+ PRECONDITION(CheckPointer(bmtGenerics));
+ }
+ CONTRACTL_END;
+
+ DWORD i;
+
+#ifdef FEATURE_COMINTEROP
+ BOOL fHasDynamicInterfaceMap = bmtInterface->dwInterfaceMapSize > 0 &&
+ bmtProp->fIsComObjectType &&
+ (GetParentMethodTable() != g_pObjectClass);
+ BOOL fNeedsRCWPerTypeData = bmtProp->fNeedsRCWPerTypeData;
+#else // FEATURE_COMINTEROP
+ BOOL fNeedsRCWPerTypeData = FALSE;
+#endif // FEATURE_COMINTEROP
+
+ EEClass *pClass = GetHalfBakedClass();
+
+ DWORD cbDict = bmtGenerics->HasInstantiation()
+ ? DictionaryLayout::GetFirstDictionaryBucketSize(
+ bmtGenerics->GetNumGenericArgs(), pClass->GetDictionaryLayout())
+ : 0;
+
+
+#ifdef FEATURE_REMOTING
+ BOOL fHasContextStatics = (bmtCSInfo) ? (bmtCSInfo->dwContextStaticsSize) : FALSE;
+ BOOL fNeedsRemotableMethodInfo = (IsMarshaledByRef() || IsInterface() || g_pObjectClass == NULL);
+ BOOL fNeedsRemotingVtsInfo = bmtMFDescs->fNeedsRemotingVtsInfo;
+#else // !FEATURE_REMOTING
+ BOOL fHasContextStatics = FALSE;
+ BOOL fNeedsRemotableMethodInfo=FALSE;
+ BOOL fNeedsRemotingVtsInfo = FALSE;
+#endif // !FEATURE_REMOTING
+
+#ifdef FEATURE_COLLECTIBLE_TYPES
+ BOOL fCollectible = pLoaderModule->IsCollectible();
+#endif // FEATURE_COLLECTIBLE_TYPES
+
+ DWORD dwGCSize;
+
+ if (bmtFP->NumGCPointerSeries > 0)
+ {
+ dwGCSize = (DWORD)CGCDesc::ComputeSize(bmtFP->NumGCPointerSeries);
+ }
+ else
+ {
+#ifdef FEATURE_COLLECTIBLE_TYPES
+ if (fCollectible)
+ dwGCSize = (DWORD)CGCDesc::ComputeSize(1);
+ else
+#endif // FEATURE_COLLECTIBLE_TYPES
+ dwGCSize = 0;
+ }
+
+ pClass->SetNumMethods(bmtVT->cTotalSlots);
+ pClass->SetNumNonVirtualSlots(bmtVT->cVtableSlots - bmtVT->cVirtualSlots);
+
+ // Now setup the method table
+ // interface map is allocated along with the method table
+ MethodTable *pMT = AllocateNewMT(pLoaderModule,
+ bmtVT->cVtableSlots,
+ bmtVT->cVirtualSlots,
+ dwGCSize,
+ bmtInterface->dwInterfaceMapSize,
+ bmtGenerics->numDicts,
+ cbDict,
+ GetParentMethodTable(),
+ GetClassLoader(),
+ bmtAllocator,
+ IsInterface(),
+ bmtProp->fDynamicStatics,
+ bmtProp->fGenericsStatics,
+ fNeedsRCWPerTypeData,
+ fNeedsRemotableMethodInfo,
+ fNeedsRemotingVtsInfo,
+ fHasContextStatics,
+#ifdef FEATURE_COMINTEROP
+ fHasDynamicInterfaceMap,
+#endif
+#ifdef FEATURE_PREJIT
+ pComputedPZM,
+#endif //FEATURE_PREJIT
+ GetMemTracker());
+
+ pMT->SetClass(pClass);
+ pClass->m_pMethodTable = pMT;
+ m_pHalfBakedMT = pMT;
+
+#ifdef _DEBUG
+ pMT->SetDebugClassName(GetDebugClassName());
+#endif
+
+#ifdef FEATURE_COMINTEROP
+ if (fNeedsRCWPerTypeData)
+ pMT->SetHasRCWPerTypeData();
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_REMOTING
+ if (bmtMFDescs->fNeedsRemotingVtsInfo)
+ pMT->SetHasRemotingVtsInfo();
+
+ if (fHasContextStatics)
+ pMT->SetHasContextStatics();
+
+ if (IsMarshaledByRef())
+ {
+ if (IsContextful())
+ {
+ COUNTER_ONLY(GetPerfCounters().m_Context.cClasses++);
+ pMT->SetIsContextful();
+ }
+ else
+ {
+ pMT->SetMarshaledByRef();
+ }
+ }
+#endif // FEATURE_REMOTING
+
+ if (IsInterface())
+ pMT->SetIsInterface();
+
+ if (GetParentMethodTable() != NULL)
+ {
+ if (GetParentMethodTable()->HasModuleDependencies())
+ {
+ pMT->SetHasModuleDependencies();
+ }
+ else
+ {
+ Module * pModule = GetModule();
+ Module * pParentModule = GetParentMethodTable()->GetModule();
+ if (pModule != pParentModule && !pModule->HasUnconditionalActiveDependency(pParentModule))
+ {
+ pMT->SetHasModuleDependencies();
+ }
+ }
+
+ if (GetParentMethodTable()->HasPreciseInitCctors() || !pClass->IsBeforeFieldInit())
+ {
+ pMT->SetHasPreciseInitCctors();
+ }
+ }
+
+ // Must be done early because various methods test HasInstantiation() and ContainsGenericVariables()
+ if (bmtGenerics->GetNumGenericArgs() != 0)
+ {
+ pMT->SetHasInstantiation(bmtGenerics->fTypicalInstantiation, bmtGenerics->fSharedByGenericInstantiations);
+
+ if (bmtGenerics->fContainsGenericVariables)
+ pMT->SetContainsGenericVariables();
+ }
+
+ if (bmtGenerics->numDicts != 0)
+ {
+ if (!FitsIn<WORD>(bmtGenerics->GetNumGenericArgs()))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_TOOMANYGENERICARGS);
+ }
+
+ pMT->SetDictInfo(bmtGenerics->numDicts,
+ static_cast<WORD>(bmtGenerics->GetNumGenericArgs()));
+ }
+
+ CONSISTENCY_CHECK(pMT->GetNumGenericArgs() == bmtGenerics->GetNumGenericArgs());
+ CONSISTENCY_CHECK(pMT->GetNumDicts() == bmtGenerics->numDicts);
+ CONSISTENCY_CHECK(pMT->HasInstantiation() == bmtGenerics->HasInstantiation());
+ CONSISTENCY_CHECK(pMT->HasInstantiation() == !pMT->GetInstantiation().IsEmpty());
+
+ pMT->SetLoaderModule(pLoaderModule);
+ pMT->SetLoaderAllocator(bmtAllocator);
+
+ pMT->SetModule(GetModule());
+
+ pMT->SetInternalCorElementType (ELEMENT_TYPE_CLASS);
+
+ SetNonGCRegularStaticFieldBytes (bmtProp->dwNonGCRegularStaticFieldBytes);
+ SetNonGCThreadStaticFieldBytes (bmtProp->dwNonGCThreadStaticFieldBytes);
+
+ PSecurityProperties psp = GetSecurityProperties();
+ // Check whether we have any runtime actions such as Demand, Assert etc
+ // that can result in methods needing the security stub. We dont care about Linkdemands etc
+ if ( !psp || (!psp->GetRuntimeActions() && !psp->GetNullRuntimeActions()))
+ pMT->SetNoSecurityProperties();
+
+#ifdef FEATURE_TYPEEQUIVALENCE
+ if (bmtProp->fHasTypeEquivalence)
+ {
+ pMT->SetHasTypeEquivalence();
+ }
+#endif //FEATURE_TYPEEQUIVALENCE
+
+#ifdef FEATURE_COMINTEROP
+ if (bmtProp->fSparse)
+ pClass->SetSparseForCOMInterop();
+
+ if (IsInterface() && IsComImport())
+ {
+ // Determine if we are creating an interface methodtable that may be used to dispatch through VSD
+ // on an object that has the methodtable of __ComObject.
+
+ // This is done to allow COM tearoff interfaces, but as a side-effect of this feature,
+ // we end up using a domain-shared type (__ComObject) with a domain-specific dispatch token.
+ // This is a problem because the same domain-specific dispatch token value can appear in
+ // multiple unshared domains (VSD takes advantage of the fact that in general a shared type
+ // cannot implement an unshared interface). This means that the same <token, __ComObject> pair
+ // value can mean different things in different domains (since the token could represent
+ // IFoo in one domain and IBar in another). This is a problem because the
+ // VSD polymorphic lookup mechanism relies on a process-wide cache table, and as a result
+ // these duplicate values would collide if we didn't use fat dispatch token to ensure uniqueness
+ // and the interface methodtable is not in the shared domain.
+
+ pMT->SetRequiresFatDispatchTokens();
+ }
+#endif // FEATURE_COMINTEROP
+
+ if (bmtVT->pCCtor != NULL)
+ {
+ pMT->SetHasClassConstructor();
+ CONSISTENCY_CHECK(pMT->GetClassConstructorSlot() == bmtVT->pCCtor->GetSlotIndex());
+ }
+ if (bmtVT->pDefaultCtor != NULL)
+ {
+ pMT->SetHasDefaultConstructor();
+ CONSISTENCY_CHECK(pMT->GetDefaultConstructorSlot() == bmtVT->pDefaultCtor->GetSlotIndex());
+ }
+
+ for (MethodDescChunk *pChunk = GetHalfBakedClass()->GetChunks(); pChunk != NULL; pChunk = pChunk->GetNextChunk())
+ {
+ pChunk->SetMethodTable(pMT);
+ }
+
+#ifdef _DEBUG
+ {
+ // disable ibc logging because we can assert in ComputerPreferredZapModule for partially constructed
+ // generic types
+ IBCLoggingDisabler disableLogging;
+
+ DeclaredMethodIterator it(*this);
+ while (it.Next())
+ {
+ MethodDesc *pMD = it->GetMethodDesc();
+ if (pMD != NULL)
+ {
+ pMD->m_pDebugMethodTable.SetValue(pMT);
+ pMD->m_pszDebugMethodSignature = FormatSig(pMD, GetLoaderAllocator()->GetLowFrequencyHeap(), GetMemTracker());
+ }
+ MethodDesc *pUnboxedMD = it->GetUnboxedMethodDesc();
+ if (pUnboxedMD != NULL)
+ {
+ pUnboxedMD->m_pDebugMethodTable.SetValue(pMT);
+ pUnboxedMD->m_pszDebugMethodSignature = FormatSig(pUnboxedMD, GetLoaderAllocator()->GetLowFrequencyHeap(), GetMemTracker());
+ }
+ }
+ }
+#endif // _DEBUG
+
+ // Note that for value classes, the following calculation is only appropriate
+ // when the instance is in its "boxed" state.
+ if (!IsInterface())
+ {
+ DWORD baseSize = Max<DWORD>(bmtFP->NumInstanceFieldBytes + ObjSizeOf(Object), MIN_OBJECT_SIZE);
+ baseSize = (baseSize + ALLOC_ALIGN_CONSTANT) & ~ALLOC_ALIGN_CONSTANT; // m_BaseSize must be aligned
+ pMT->SetBaseSize(baseSize);
+
+ GetHalfBakedClass()->SetBaseSizePadding(baseSize - bmtFP->NumInstanceFieldBytes);
+
+#ifdef FEATURE_COMINTEROP
+ if (bmtProp->fIsComObjectType)
+ { // Propagate the com specific info
+ pMT->SetComObjectType();
+
+ // COM objects need an optional field on the EEClass, so ensure this class instance has allocated
+ // the optional field descriptor.
+ EnsureOptionalFieldsAreAllocated(pClass, m_pAllocMemTracker, GetLoaderAllocator()->GetLowFrequencyHeap());
+ }
+
+ if (pMT->GetAssembly()->IsManagedWinMD())
+ {
+ // We need to mark classes that are implementations of managed WinRT runtime classes with
+ // the "exported to WinRT" flag. It's not quite possible to tell which ones these are by
+ // reading metadata so we ask the adapter.
+
+ IWinMDImport *pWinMDImport = pMT->GetAssembly()->GetManifestWinMDImport();
+ _ASSERTE(pWinMDImport != NULL);
+
+ BOOL bResult;
+ IfFailThrow(pWinMDImport->IsRuntimeClassImplementation(GetCl(), &bResult));
+
+ if (bResult)
+ {
+ pClass->SetExportedToWinRT();
+
+ // We need optional fields for activation from WinRT.
+ EnsureOptionalFieldsAreAllocated(pClass, m_pAllocMemTracker, GetLoaderAllocator()->GetLowFrequencyHeap());
+ }
+ }
+
+ if (pClass->IsProjectedFromWinRT() || pClass->IsExportedToWinRT())
+ {
+ const BYTE * pVal;
+ ULONG cbVal;
+ HRESULT hr = GetMDImport()->GetCustomAttributeByName(GetCl(), g_WindowsFoundationMarshalingBehaviorAttributeClassName, (const void **) &pVal, &cbVal);
+ if (hr == S_OK)
+ {
+ CustomAttributeParser cap(pVal, cbVal);
+ IfFailThrow(cap.SkipProlog());
+ UINT32 u = 0;
+ IfFailThrow(cap.GetU4(&u));
+ if(u > 0)
+ pClass->SetMarshalingType(u);
+ }
+ }
+#endif // FEATURE_COMINTEROP
+ }
+ else
+ {
+#ifdef FEATURE_COMINTEROP
+ // If this is an interface then we need to set the ComInterfaceType to
+ // -1 to indicate we have not yet determined the interface type.
+ pClass->SetComInterfaceType((CorIfaceAttr)-1);
+
+ // If this is a special COM event interface, then mark the MT as such.
+ if (bmtProp->fComEventItfType)
+ {
+ pClass->SetComEventItfType();
+ }
+#endif // FEATURE_COMINTEROP
+ }
+ _ASSERTE((pMT->IsInterface() == 0) == (IsInterface() == 0));
+
+ if (HasLayout())
+ {
+ pClass->SetNativeSize(GetLayoutInfo()->GetNativeSize());
+ }
+
+ FieldDesc *pFieldDescList = pClass->GetFieldDescList();
+ // Set all field slots to point to the newly created MethodTable
+ for (i = 0; i < (bmtEnumFields->dwNumStaticFields + bmtEnumFields->dwNumInstanceFields); i++)
+ {
+ pFieldDescList[i].m_pMTOfEnclosingClass.SetValue(pMT);
+ }
+
+ // Fill in type parameters before looking up exact parent or fetching the types of any field descriptors!
+ // This must come before the use of GetFieldType in the value class representation optimization below.
+ if (bmtGenerics->GetNumGenericArgs() != 0)
+ {
+ // Space has already been allocated for the instantiation but the parameters haven't been filled in
+ Instantiation destInst = pMT->GetInstantiation();
+ Instantiation inst = bmtGenerics->GetInstantiation();
+
+ // So fill them in...
+ TypeHandle * pInstDest = (TypeHandle *)destInst.GetRawArgs();
+ for (DWORD j = 0; j < bmtGenerics->GetNumGenericArgs(); j++)
+ {
+ pInstDest[j] = inst[j];
+ }
+ }
+
+ CorElementType normalizedType = ELEMENT_TYPE_CLASS;
+ if (IsValueClass())
+ {
+ if (IsEnum())
+ {
+ if (GetNumInstanceFields() != 1 ||
+ !CorTypeInfo::IsPrimitiveType(pFieldDescList[0].GetFieldType()))
+ {
+ BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT, IDS_CLASSLOAD_BAD_FIELD, mdTokenNil);
+ }
+ CONSISTENCY_CHECK(!pFieldDescList[0].IsStatic());
+ normalizedType = pFieldDescList->GetFieldType();
+ }
+ else
+ {
+#ifdef _TARGET_X86_
+ // JIT64 is not aware of normalized value types and this
+ // optimization (return small value types by value in registers)
+ // is already done in JIT64.
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+ normalizedType = EEClass::ComputeInternalCorElementTypeForValueType(pMT);
+#else
+ normalizedType = ELEMENT_TYPE_VALUETYPE;
+#endif
+ }
+ }
+ pMT->SetInternalCorElementType(normalizedType);
+
+ if (GetModule()->IsSystem())
+ {
+ // we are in mscorlib
+ CheckForSystemTypes();
+ }
+
+ // Now fill in the real interface map with the approximate interfaces
+ if (bmtInterface->dwInterfaceMapSize > 0)
+ {
+ // First ensure we have enough space to record extra flag information for each interface (we don't
+ // record this directly into each interface map entry since these flags don't pack well due to
+ // alignment).
+ PVOID pExtraInterfaceInfo = NULL;
+ SIZE_T cbExtraInterfaceInfo = MethodTable::GetExtraInterfaceInfoSize(bmtInterface->dwInterfaceMapSize);
+ if (cbExtraInterfaceInfo)
+ pExtraInterfaceInfo = GetMemTracker()->Track(GetLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(cbExtraInterfaceInfo)));
+
+ // Call this even in the case where pExtraInterfaceInfo == NULL (certain cases are optimized and don't
+ // require extra buffer space).
+ pMT->InitializeExtraInterfaceInfo(pExtraInterfaceInfo);
+
+ InterfaceInfo_t *pInterfaces = pMT->GetInterfaceMap();
+
+ CONSISTENCY_CHECK(CheckPointer(pInterfaces));
+
+ // Copy the interface map member by member so there is no junk in the padding.
+ for (i = 0; i < bmtInterface->dwInterfaceMapSize; i++)
+ {
+ bmtInterfaceEntry * pEntry = &bmtInterface->pInterfaceMap[i];
+
+ if (pEntry->IsDeclaredOnType())
+ pMT->SetInterfaceDeclaredOnClass(i);
+ _ASSERTE(!!pEntry->IsDeclaredOnType() == !!pMT->IsInterfaceDeclaredOnClass(i));
+
+ pInterfaces[i].SetMethodTable(pEntry->GetInterfaceType()->GetMethodTable());
+ }
+ }
+
+ pMT->SetCl(GetCl());
+
+ // The type is sufficiently initialized for most general purpose accessor methods to work.
+ // Mark the type as restored to avoid avoid asserts. Note that this also enables IBC logging.
+ pMT->GetWriteableDataForWrite_NoLogging()->SetIsRestoredForBuildMethodTable();
+
+#ifdef _DEBUG
+ // Store status if we tried to inject duplicate interfaces
+ if (bmtInterface->dbg_fShouldInjectInterfaceDuplicates)
+ pMT->Debug_SetHasInjectedInterfaceDuplicates();
+#endif //_DEBUG
+
+ // Keep bmtInterface data around since we no longer write the flags (IsDeclaredOnType and
+ // IsImplementedByParent) into the interface map (these flags are only required during type loading).
+
+ {
+ for (MethodDescChunk *pChunk = GetHalfBakedClass()->GetChunks(); pChunk != NULL; pChunk = pChunk->GetNextChunk())
+ {
+ // Make sure that temporary entrypoints are create for methods. NGEN uses temporary
+ // entrypoints as surrogate keys for precodes.
+ pChunk->EnsureTemporaryEntryPointsCreated(GetLoaderAllocator(), GetMemTracker());
+ }
+ }
+
+ { // copy onto the real vtable (methods only)
+ //@GENERICS: Because we sometimes load an inexact parent (see ClassLoader::GetParent) the inherited slots might
+ // come from the wrong place and need fixing up once we know the exact parent
+
+ for (bmtVtable::Iterator slotIt = bmtVT->IterateSlots(); !slotIt.AtEnd(); ++slotIt)
+ {
+ SLOT_INDEX iCurSlot = static_cast<SLOT_INDEX>(slotIt.CurrentIndex());
+
+ // We want the unboxed MethodDesc if we're out of the virtual method range
+ // and the method we're dealing with has an unboxing method. If so, then
+ // the unboxing method was placed in the virtual section of the vtable and
+ // we now need to place the unboxed version.
+ MethodDesc * pMD = NULL;
+ if (iCurSlot < bmtVT->cVirtualSlots || !slotIt->Impl().AsMDMethod()->IsUnboxing())
+ {
+ pMD = slotIt->Impl().GetMethodDesc();
+ CONSISTENCY_CHECK(slotIt->Decl().GetSlotIndex() == iCurSlot);
+ }
+ else
+ {
+ pMD = slotIt->Impl().AsMDMethod()->GetUnboxedMethodDesc();
+ CONSISTENCY_CHECK(pMD->GetSlot() == iCurSlot);
+ }
+
+ CONSISTENCY_CHECK(CheckPointer(pMD));
+
+ if (pMD->GetMethodTable() != pMT)
+ {
+ //
+ // Inherited slots
+ //
+ // Do not write into vtable chunks shared with parent. It would introduce race
+ // with code:MethodDesc::SetStableEntryPointInterlocked.
+ //
+ DWORD indirectionIndex = MethodTable::GetIndexOfVtableIndirection(iCurSlot);
+ if (GetParentMethodTable()->GetVtableIndirections()[indirectionIndex] != pMT->GetVtableIndirections()[indirectionIndex])
+ pMT->SetSlot(iCurSlot, pMD->GetMethodEntryPoint());
+ }
+ else
+ {
+ //
+ // Owned slots
+ //
+ _ASSERTE(iCurSlot >= bmtVT->cVirtualSlots || ChangesImplementationOfVirtualSlot(iCurSlot));
+
+ PCODE addr = pMD->GetTemporaryEntryPoint();
+ _ASSERTE(addr != NULL);
+
+ if (pMD->HasNonVtableSlot())
+ {
+ *pMD->GetAddrOfSlot() = addr;
+ }
+ else
+ {
+ pMT->SetSlot(iCurSlot, addr);
+ }
+
+ if (pMD->GetSlot() == iCurSlot && pMD->RequiresStableEntryPoint())
+ {
+ // The rest of the system assumes that certain methods always have stable entrypoints.
+ // Create them now.
+ pMD->GetOrCreatePrecode();
+ }
+ }
+ }
+ }
+
+ // If we have any entries, then finalize them and allocate the object in class loader heap
+ DispatchMap *pDispatchMap = NULL;
+ DispatchMapBuilder *pDispatchMapBuilder = bmtVT->pDispatchMapBuilder;
+ CONSISTENCY_CHECK(CheckPointer(pDispatchMapBuilder));
+
+ if (pDispatchMapBuilder->Count() > 0)
+ {
+ // Create a map in stacking memory.
+ BYTE * pbMap;
+ UINT32 cbMap;
+ DispatchMap::CreateEncodedMapping(
+ pMT,
+ pDispatchMapBuilder,
+ pDispatchMapBuilder->GetAllocator(),
+ &pbMap,
+ &cbMap);
+
+ // Now finalize the impltable and allocate the block in the low frequency loader heap
+ size_t objSize = (size_t) DispatchMap::GetObjectSize(cbMap);
+ void * pv = AllocateFromLowFrequencyHeap(S_SIZE_T(objSize));
+ _ASSERTE(pv != NULL);
+
+ // Use placement new
+ pDispatchMap = new (pv) DispatchMap(pbMap, cbMap);
+ pMT->SetDispatchMap(pDispatchMap);
+
+#ifdef LOGGING
+ g_sdStats.m_cDispatchMap++;
+ g_sdStats.m_cbDispatchMap += (UINT32) objSize;
+ LOG((LF_LOADER, LL_INFO1000, "SD: Dispatch map for %s: %d bytes for map, %d bytes total for object.\n",
+ pMT->GetDebugClassName(), cbMap, objSize));
+#endif // LOGGING
+
+ }
+
+ // GetMethodData by default will cache its result. However, in the case that we're
+ // building a MethodTable, we aren't guaranteed that this type is going to successfully
+ // load and so caching it would result in errors down the road since the memory and
+ // type occupying the same memory location would very likely be incorrect. The second
+ // argument specifies that GetMethodData should not cache the returned object.
+ MethodTable::MethodDataWrapper hMTData(MethodTable::GetMethodData(pMT, FALSE));
+
+ if (!IsInterface())
+ {
+ // Propagate inheritance.
+
+ // NOTE: In the world of unfolded interface this was used to propagate overrides into
+ // the unfolded interface vtables to make sure that overrides of virtual methods
+ // also overrode the interface methods that they contributed to. This had the
+ // unfortunate side-effect of also overwriting regular vtable slots that had been
+ // methodimpl'd and as a result changed the meaning of methodimpl from "substitute
+ // the body of method A with the body of method B" to "unify the slots of methods
+ // A and B". But now compilers have come to rely on this side-effect and it can
+ // not be brought back to its originally intended behaviour.
+
+ // For every slot whose body comes from another slot (determined by getting the MethodDesc
+ // for a slot and seeing if MethodDesc::GetSlot returns a different value than the slot
+ // from which the MethodDesc was recovered), copy the value of the slot stated by the
+ // MethodDesc over top of the current slot.
+
+ // Because of the way slot unification works, we need to iterate the enture vtable until
+ // no slots need updated. To understand this, imagine the following:
+ // C1::M1 is overridden by C2::M2
+ // C1::M2 is methodImpled by C1::M3
+ // C1::M3 is overridden by C2::M3
+ // This should mean that C1::M1 is implemented by C2::M3, but if we didn't run the below
+ // for loop a second time, this would not be propagated properly - it would only be placed
+ // into the slot for C1::M2 and never make its way up to C1::M1.
+
+ BOOL fChangeMade;
+ do
+ {
+ fChangeMade = FALSE;
+ for (i = 0; i < pMT->GetNumVirtuals(); i++)
+ {
+ MethodDesc* pMD = hMTData->GetImplMethodDesc(i);
+
+ CONSISTENCY_CHECK(CheckPointer(pMD));
+ CONSISTENCY_CHECK(pMD == pMT->GetMethodDescForSlot(i));
+
+ // This indicates that the method body in this slot was copied here through a methodImpl.
+ // Thus, copy the value of the slot from which the body originally came, in case it was
+ // overridden, to make sure the two slots stay in sync.
+ INDEBUG(MethodDesc * pMDOld; pMDOld = pMD;)
+ if(pMD->GetSlot() != i &&
+ pMT->GetSlot(i) != pMT->GetSlot(pMD->GetSlot()))
+ {
+ // Copy the slot value in the method's original slot.
+ pMT->SetSlot(i,pMT->GetSlot(pMD->GetSlot()));
+ hMTData->InvalidateCachedVirtualSlot(i);
+
+ // Update the pMD to the new method desc we just copied over ourselves with. This will
+ // be used in the check for missing method block below.
+ pMD = pMT->GetMethodDescForSlot(pMD->GetSlot());
+
+ // This method is now duplicate
+ pMD->SetDuplicate();
+ INDEBUG(g_dupMethods++;)
+ fChangeMade = TRUE;
+ }
+ }
+ }
+ while (fChangeMade);
+ }
+
+ if (!bmtProp->fNoSanityChecks)
+ VerifyVirtualMethodsImplemented(hMTData);
+
+#ifdef _DEBUG
+ {
+ for (bmtVtable::Iterator i = bmtVT->IterateSlots();
+ !i.AtEnd(); ++i)
+ {
+ _ASSERTE(i->Impl().GetMethodDesc() != NULL);
+ }
+ }
+#endif // _DEBUG
+
+
+#ifdef FEATURE_COMINTEROP
+ // for ComObject types, i.e. if the class extends from a COM Imported
+ // class
+ // make sure any interface implementated by the COM Imported class
+ // is overridden fully, (OR) not overridden at all..
+ // We relax this for WinRT where we want to be able to override individual methods.
+ if (bmtProp->fIsComObjectType && !pMT->IsWinRTObjectType())
+ {
+ MethodTable::InterfaceMapIterator intIt = pMT->IterateInterfaceMap();
+ while (intIt.Next())
+ {
+ MethodTable* pIntfMT = intIt.GetInterface();
+ if (pIntfMT->GetNumVirtuals() != 0)
+ {
+ BOOL hasComImportMethod = FALSE;
+ BOOL hasManagedMethod = FALSE;
+
+ // NOTE: Avoid caching the MethodData object for the type being built.
+ MethodTable::MethodDataWrapper hItfImplData(MethodTable::GetMethodData(pIntfMT, pMT, FALSE));
+ MethodTable::MethodIterator it(hItfImplData);
+ for (;it.IsValid(); it.Next())
+ {
+ MethodDesc *pClsMD = NULL;
+ // If we fail to find an _IMPLEMENTATION_ for the interface MD, then
+ // we are a ComImportMethod, otherwise we still be a ComImportMethod or
+ // we can be a ManagedMethod.
+ DispatchSlot impl(it.GetTarget());
+ if (!impl.IsNull())
+ {
+ pClsMD = it.GetMethodDesc();
+
+ CONSISTENCY_CHECK(!pClsMD->IsInterface());
+ if (pClsMD->GetClass()->IsComImport())
+ {
+ hasComImportMethod = TRUE;
+ }
+ else
+ {
+ hasManagedMethod = TRUE;
+ }
+ }
+ else
+ {
+ // Need to set the pClsMD for the error reporting below.
+ pClsMD = it.GetDeclMethodDesc();
+ CONSISTENCY_CHECK(CheckPointer(pClsMD));
+ hasComImportMethod = TRUE;
+ }
+
+ // One and only one of the two must be set.
+ if ((hasComImportMethod && hasManagedMethod) ||
+ (!hasComImportMethod && !hasManagedMethod))
+ {
+ BuildMethodTableThrowException(IDS_EE_BAD_COMEXTENDS_CLASS, pClsMD->GetNameOnNonArrayClass());
+ }
+ }
+ }
+ }
+ }
+
+ // For COM event interfaces, we need to make sure that all the methods are
+ // methods to add or remove events. This means that they all need to take
+ // a delegate derived class and have a void return type.
+ if (bmtProp->fComEventItfType)
+ {
+ // COM event interfaces had better be interfaces.
+ CONSISTENCY_CHECK(IsInterface());
+
+ // Go through all the methods and check the validity of the signature.
+ // NOTE: Uses hMTData to avoid caching a MethodData object for the type being built.
+ MethodTable::MethodIterator it(hMTData);
+ for (;it.IsValid(); it.Next())
+ {
+ MethodDesc* pMD = it.GetMethodDesc();
+ _ASSERTE(pMD);
+
+ MetaSig Sig(pMD);
+
+ {
+ CONTRACT_VIOLATION(LoadsTypeViolation);
+ if (Sig.GetReturnType() != ELEMENT_TYPE_VOID ||
+ Sig.NumFixedArgs() != 1 ||
+ Sig.NextArg() != ELEMENT_TYPE_CLASS ||
+ !Sig.GetLastTypeHandleThrowing().CanCastTo(TypeHandle(g_pDelegateClass)))
+ {
+ BuildMethodTableThrowException(IDS_EE_BAD_COMEVENTITF_CLASS, pMD->GetNameOnNonArrayClass());
+ }
+ }
+ }
+ }
+#endif // FEATURE_COMINTEROP
+
+ // If this class uses any VTS (Version Tolerant Serialization) features
+ // (event callbacks or OptionalField attributes) we've previously cached the
+ // additional information in the bmtMFDescs structure. Now it's time to add
+ // this information as an optional extension to the MethodTable.
+#ifdef FEATURE_REMOTING
+ if (bmtMFDescs->fNeedsRemotingVtsInfo)
+ {
+ DWORD dwNumIntroducedInstanceFields = bmtEnumFields->dwNumInstanceFields;
+ _ASSERTE(bmtAllocator == pMT->GetLoaderAllocator());
+ PTR_RemotingVtsInfo pInfo = pMT->AllocateRemotingVtsInfo(GetMemTracker(), dwNumIntroducedInstanceFields);
+
+ pInfo->m_pCallbacks[RemotingVtsInfo::VTS_CALLBACK_ON_SERIALIZING].SetValue(bmtMFDescs->pOnSerializingMethod);
+ pInfo->m_pCallbacks[RemotingVtsInfo::VTS_CALLBACK_ON_SERIALIZED].SetValue(bmtMFDescs->pOnSerializedMethod);
+ pInfo->m_pCallbacks[RemotingVtsInfo::VTS_CALLBACK_ON_DESERIALIZING].SetValue(bmtMFDescs->pOnDeserializingMethod);
+ pInfo->m_pCallbacks[RemotingVtsInfo::VTS_CALLBACK_ON_DESERIALIZED].SetValue(bmtMFDescs->pOnDeserializedMethod);
+
+ for (i = 0; i < dwNumIntroducedInstanceFields; i++)
+ {
+ if (bmtMFDescs->prfNotSerializedFields && bmtMFDescs->prfNotSerializedFields[i])
+ pInfo->SetIsNotSerialized(i);
+ if (bmtMFDescs->prfOptionallySerializedFields && bmtMFDescs->prfOptionallySerializedFields[i])
+ pInfo->SetIsOptionallySerialized(i);
+ }
+
+#if 0
+ printf("%s has VTS info:\n", pMT->GetDebugClassName());
+ if (bmtMFDescs->pOnSerializingMethod)
+ printf(" OnSerializing: %s\n", bmtMFDescs->pOnSerializingMethod->m_pszDebugMethodName);
+ if (bmtMFDescs->pOnSerializedMethod)
+ printf(" OnSerialized: %s\n", bmtMFDescs->pOnSerializedMethod->m_pszDebugMethodName);
+ if (bmtMFDescs->pOnDeserializingMethod)
+ printf(" OnDeserializing: %s\n", bmtMFDescs->pOnDeserializingMethod->m_pszDebugMethodName);
+ if (bmtMFDescs->pOnDeserializedMethod)
+ printf(" OnDeserialized: %s\n", bmtMFDescs->pOnDeserializedMethod->m_pszDebugMethodName);
+ for (i = 0; i < dwNumIntroducedInstanceFields; i++)
+ {
+ if (bmtMFDescs->prfNotSerializedFields && bmtMFDescs->prfNotSerializedFields[i])
+ {
+ printf(" [NotSerialized] %s\n", GetApproxFieldDescListRaw()[i].m_debugName);
+ _ASSERTE(pInfo->IsNotSerialized(i));
+ }
+ else
+ _ASSERTE(!pInfo->IsNotSerialized(i));
+ if (bmtMFDescs->prfOptionallySerializedFields && bmtMFDescs->prfOptionallySerializedFields[i])
+ {
+ printf(" [OptionalField] %s\n", GetApproxFieldDescListRaw()[i].m_debugName);
+ _ASSERTE(pInfo->IsOptionallySerialized(i));
+ }
+ else
+ _ASSERTE(!pInfo->IsOptionallySerialized(i));
+ }
+ printf("------------\n\n");
+#endif // 0
+ }
+ if (fNeedsRemotableMethodInfo)
+ pMT->SetupRemotableMethodInfo(GetMemTracker());
+#endif // FEATURE_REMOTING
+}
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+void MethodTableBuilder::VerifyVirtualMethodsImplemented(MethodTable::MethodData * hMTData)
+{
+ STANDARD_VM_CONTRACT;
+
+ //
+ // This verification is not applicable or required in many cases
+ //
+
+ if (IsAbstract() || IsInterface())
+ return;
+
+#ifdef FEATURE_COMINTEROP
+ // Note that this is important for WinRT where redirected .NET interfaces appear on the interface
+ // impl list but their methods are not implemented (the adapter only hides the WinRT methods, it
+ // does not make up the .NET ones).
+ if (bmtProp->fIsComObjectType)
+ return;
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_LEGACYNETCF
+ if (GetModule()->GetDomain()->GetAppDomainCompatMode() == BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8)
+ return;
+#endif
+
+ // Since interfaces aren't laid out in the vtable for stub dispatch, what we need to do
+ // is try to find an implementation for every interface contract by iterating through
+ // the interfaces not declared on a parent.
+ BOOL fParentIsAbstract = FALSE;
+ if (HasParent())
+ {
+ fParentIsAbstract = GetParentMethodTable()->IsAbstract();
+ }
+
+ // If the parent is abstract, we need to check that each virtual method is implemented
+ if (fParentIsAbstract)
+ {
+ // NOTE: Uses hMTData to avoid caching a MethodData object for the type being built.
+ MethodTable::MethodIterator it(hMTData);
+ for (; it.IsValid() && it.IsVirtual(); it.Next())
+ {
+ MethodDesc *pMD = it.GetMethodDesc();
+ if (pMD->IsAbstract())
+ {
+ MethodDesc *pDeclMD = it.GetDeclMethodDesc();
+ BuildMethodTableThrowException(IDS_CLASSLOAD_NOTIMPLEMENTED, pDeclMD->GetNameOnNonArrayClass());
+ }
+ }
+ }
+
+ DispatchMapTypeID * rgInterfaceDispatchMapTypeIDs =
+ new (GetStackingAllocator()) DispatchMapTypeID[bmtInterface->dwInterfaceMapSize];
+
+ bmtInterfaceInfo::MapIterator intIt = bmtInterface->IterateInterfaceMap();
+ for (; !intIt.AtEnd(); intIt.Next())
+ {
+ if (fParentIsAbstract || !intIt->IsImplementedByParent())
+ {
+ // Compute all TypeIDs for this interface (all duplicates in the interface map)
+ UINT32 cInterfaceDuplicates;
+ ComputeDispatchMapTypeIDs(
+ intIt->GetInterfaceType()->GetMethodTable(),
+ &intIt->GetInterfaceType()->GetSubstitution(),
+ rgInterfaceDispatchMapTypeIDs,
+ bmtInterface->dwInterfaceMapSize,
+ &cInterfaceDuplicates);
+ _ASSERTE(cInterfaceDuplicates <= bmtInterface->dwInterfaceMapSize);
+ _ASSERTE(cInterfaceDuplicates > 0);
+
+ // NOTE: This override does not cache the resulting MethodData object.
+ MethodTable::MethodDataWrapper hData(MethodTable::GetMethodData(
+ rgInterfaceDispatchMapTypeIDs,
+ cInterfaceDuplicates,
+ intIt->GetInterfaceType()->GetMethodTable(),
+ GetHalfBakedMethodTable()));
+ MethodTable::MethodIterator it(hData);
+ for (; it.IsValid() && it.IsVirtual(); it.Next())
+ {
+ if (it.GetTarget().IsNull())
+ {
+ MethodDesc *pMD = it.GetDeclMethodDesc();
+ BuildMethodTableThrowException(IDS_CLASSLOAD_NOTIMPLEMENTED, pMD->GetNameOnNonArrayClass());
+ }
+ }
+ }
+ }
+}
+
+INT32 __stdcall IsDefined(Module *pModule, mdToken token, TypeHandle attributeClass)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ BOOL isDefined = FALSE;
+
+ IMDInternalImport *pInternalImport = pModule->GetMDImport();
+ BOOL isSealed = FALSE;
+
+ HENUMInternalHolder hEnum(pInternalImport);
+ TypeHandle caTH;
+
+ // Get the enum first but don't get any values
+ hEnum.EnumInit(mdtCustomAttribute, token);
+
+ ULONG cMax = pInternalImport->EnumGetCount(&hEnum);
+ if (cMax)
+ {
+ // we have something to look at
+
+
+ if (!attributeClass.IsNull())
+ isSealed = attributeClass.GetMethodTable()->IsSealed();
+
+ // Loop through the Attributes and look for the requested one
+ mdCustomAttribute cv;
+ while (pInternalImport->EnumNext(&hEnum, &cv))
+ {
+ //
+ // fetch the ctor
+ mdToken tkCtor;
+ IfFailThrow(pInternalImport->GetCustomAttributeProps(cv, &tkCtor));
+
+ mdToken tkType = TypeFromToken(tkCtor);
+ if(tkType != mdtMemberRef && tkType != mdtMethodDef)
+ continue; // we only deal with the ctor case
+
+ //
+ // get the info to load the type, so we can check whether the current
+ // attribute is a subtype of the requested attribute
+ IfFailThrow(pInternalImport->GetParentToken(tkCtor, &tkType));
+
+ _ASSERTE(TypeFromToken(tkType) == mdtTypeRef || TypeFromToken(tkType) == mdtTypeDef);
+ // load the type
+ if (isSealed)
+ {
+ caTH=ClassLoader::LoadTypeDefOrRefThrowing(pModule, tkType,
+ ClassLoader::ReturnNullIfNotFound,
+ ClassLoader::FailIfUninstDefOrRef,
+ TypeFromToken(tkType) == mdtTypeDef ? tdAllTypes : tdNoTypes);
+ }
+ else
+ {
+ caTH = ClassLoader::LoadTypeDefOrRefThrowing(pModule, tkType,
+ ClassLoader::ReturnNullIfNotFound,
+ ClassLoader::FailIfUninstDefOrRef);
+ }
+ if (caTH.IsNull())
+ continue;
+
+ // a null class implies all custom attribute
+ if (!attributeClass.IsNull())
+ {
+ if (isSealed)
+ {
+ if (attributeClass != caTH)
+ continue;
+ }
+ else
+ {
+ if (!caTH.CanCastTo(attributeClass))
+ continue;
+ }
+ }
+
+ //
+ // if we are here we got one
+ isDefined = TRUE;
+ break;
+ }
+ }
+
+ return isDefined;
+}
+
+//*******************************************************************************
+VOID MethodTableBuilder::CheckForRemotingProxyAttrib()
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef FEATURE_REMOTING
+ // See if our parent class has a proxy attribute
+ _ASSERTE(g_pObjectClass != NULL);
+
+ if (!GetParentMethodTable()->GetClass()->HasRemotingProxyAttribute())
+ {
+ // Call the metadata api to look for a proxy attribute on this type
+ // Note: the api does not check for inherited attributes
+
+ // Set the flag is the type has a non-default proxy attribute
+ if(IsDefined(
+ GetModule(),
+ bmtInternal->pType->GetTypeDefToken(),
+ TypeHandle(MscorlibBinder::GetClass(CLASS__PROXY_ATTRIBUTE))))
+ {
+ SetHasRemotingProxyAttribute();
+ }
+ }
+ else
+ {
+ // parent has proxyAttribute ... mark this class as having one too!
+ SetHasRemotingProxyAttribute();
+ }
+#endif // FEATURE_REMOTING
+}
+
+
+//*******************************************************************************
+// Checks for a bunch of special interface names and if it matches then it sets
+// bmtProp->fIsMngStandardItf to TRUE. Additionally, it checks to see if the
+// type is an interface and if it has ComEventInterfaceAttribute custom attribute
+// set, then it sets bmtProp->fComEventItfType to true.
+//
+// NOTE: This only does anything when COM interop is enabled.
+
+VOID MethodTableBuilder::CheckForSpecialTypes()
+{
+#ifdef FEATURE_COMINTEROP
+ STANDARD_VM_CONTRACT;
+
+
+ Module *pModule = GetModule();
+ IMDInternalImport *pMDImport = pModule->GetMDImport();
+
+ // Check to see if this type is a managed standard interface. All the managed
+ // standard interfaces live in mscorlib.dll so checking for that first
+ // makes the strcmp that comes afterwards acceptable.
+ if (pModule->IsSystem())
+ {
+ if (IsInterface())
+ {
+ LPCUTF8 pszClassName;
+ LPCUTF8 pszClassNamespace;
+ if (FAILED(pMDImport->GetNameOfTypeDef(GetCl(), &pszClassName, &pszClassNamespace)))
+ {
+ pszClassName = pszClassNamespace = NULL;
+ }
+ if ((pszClassName != NULL) && (pszClassNamespace != NULL))
+ {
+ LPUTF8 pszFullyQualifiedName = NULL;
+ MAKE_FULLY_QUALIFIED_NAME(pszFullyQualifiedName, pszClassNamespace, pszClassName);
+
+ // This is just to give us a scope to break out of.
+ do
+ {
+
+#define MNGSTDITF_BEGIN_INTERFACE(FriendlyName, strMngItfName, strUCOMMngItfName, strCustomMarshalerName, strCustomMarshalerCookie, strManagedViewName, NativeItfIID, bCanCastOnNativeItfQI) \
+ if (strcmp(strMngItfName, pszFullyQualifiedName) == 0) \
+ { \
+ bmtProp->fIsMngStandardItf = true; \
+ break; \
+ }
+
+#define MNGSTDITF_DEFINE_METH_IMPL(FriendlyName, ECallMethName, MethName, MethSig, FcallDecl)
+
+#define MNGSTDITF_END_INTERFACE(FriendlyName)
+
+#include "mngstditflist.h"
+
+#undef MNGSTDITF_BEGIN_INTERFACE
+#undef MNGSTDITF_DEFINE_METH_IMPL
+#undef MNGSTDITF_END_INTERFACE
+
+ } while (FALSE);
+
+ if (strcmp(pszFullyQualifiedName, g_CollectionsGenericCollectionItfName) == 0 ||
+ strcmp(pszFullyQualifiedName, g_CollectionsGenericReadOnlyCollectionItfName) == 0 ||
+ strcmp(pszFullyQualifiedName, g_CollectionsCollectionItfName) == 0)
+ {
+ // ICollection`1, ICollection and IReadOnlyCollection`1 are special cases the adapter is unaware of
+ bmtProp->fIsRedirectedInterface = true;
+ }
+ else
+ {
+ if (strcmp(pszFullyQualifiedName, WinMDAdapter::GetRedirectedTypeFullCLRName(WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IEnumerable)) == 0 ||
+ strcmp(pszFullyQualifiedName, WinMDAdapter::GetRedirectedTypeFullCLRName(WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IList)) == 0 ||
+ strcmp(pszFullyQualifiedName, WinMDAdapter::GetRedirectedTypeFullCLRName(WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IDictionary)) == 0 ||
+ strcmp(pszFullyQualifiedName, WinMDAdapter::GetRedirectedTypeFullCLRName(WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IReadOnlyList)) == 0 ||
+ strcmp(pszFullyQualifiedName, WinMDAdapter::GetRedirectedTypeFullCLRName(WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IReadOnlyDictionary)) == 0 ||
+ strcmp(pszFullyQualifiedName, WinMDAdapter::GetRedirectedTypeFullCLRName(WinMDAdapter::RedirectedTypeIndex_System_Collections_IEnumerable)) == 0 ||
+ strcmp(pszFullyQualifiedName, WinMDAdapter::GetRedirectedTypeFullCLRName(WinMDAdapter::RedirectedTypeIndex_System_Collections_IList)) == 0 ||
+ strcmp(pszFullyQualifiedName, WinMDAdapter::GetRedirectedTypeFullCLRName(WinMDAdapter::RedirectedTypeIndex_System_IDisposable)) == 0)
+ {
+ bmtProp->fIsRedirectedInterface = true;
+ }
+ }
+
+ // We want to allocate the per-type RCW data optional MethodTable field for
+ // 1. Redirected interfaces
+ // 2. Mscorlib-declared [WindowsRuntimeImport] interfaces
+ bmtProp->fNeedsRCWPerTypeData = (bmtProp->fIsRedirectedInterface || GetHalfBakedClass()->IsProjectedFromWinRT());
+
+ if (!bmtProp->fNeedsRCWPerTypeData)
+ {
+ // 3. Non-generic IEnumerable
+ if (strcmp(pszFullyQualifiedName, g_CollectionsEnumerableItfName) == 0)
+ {
+ bmtProp->fNeedsRCWPerTypeData = true;
+ }
+ }
+ }
+ }
+ else if (IsDelegate() && bmtGenerics->HasInstantiation())
+ {
+ // 4. Redirected delegates
+ if (GetHalfBakedClass()->GetWinRTRedirectedTypeIndex()
+ != WinMDAdapter::RedirectedTypeIndex_Invalid)
+ {
+ bmtProp->fNeedsRCWPerTypeData = true;
+ }
+ }
+ }
+ else if (bmtGenerics->HasInstantiation() && pModule->GetAssembly()->IsWinMD())
+ {
+ // 5. WinRT types with variance
+ if (bmtGenerics->pVarianceInfo != NULL)
+ {
+ bmtProp->fNeedsRCWPerTypeData = true;
+ }
+ else if (IsInterface())
+ {
+ // 6. Windows.Foundation.Collections.IIterator`1
+ LPCUTF8 pszClassName;
+ LPCUTF8 pszClassNamespace;
+ if (SUCCEEDED(pMDImport->GetNameOfTypeDef(GetCl(), &pszClassName, &pszClassNamespace)))
+ {
+ LPUTF8 pszFullyQualifiedName = NULL;
+ MAKE_FULLY_QUALIFIED_NAME(pszFullyQualifiedName, pszClassNamespace, pszClassName);
+
+ if (strcmp(pszFullyQualifiedName, g_WinRTIIteratorClassName) == 0)
+ {
+ bmtProp->fNeedsRCWPerTypeData = true;
+ }
+ }
+ }
+ }
+ else if (GetAppDomain()->IsSystemDll(pModule->GetAssembly()))
+ {
+ // 7. System.Collections.Specialized.INotifyCollectionChanged
+ // 8. System.Collections.Specialized.NotifyCollectionChangedEventHandler
+ // 9. System.ComponentModel.INotifyPropertyChanged
+ // 10. System.ComponentModel.PropertyChangedEventHandler
+ // 11. System.Windows.Input.ICommand
+ if ((IsInterface() || IsDelegate()) && IsTdPublic(GetHalfBakedClass()->GetAttrClass()))
+ {
+ LPCUTF8 pszClassName;
+ LPCUTF8 pszClassNamespace;
+ if (SUCCEEDED(pMDImport->GetNameOfTypeDef(GetCl(), &pszClassName, &pszClassNamespace)))
+ {
+ LPUTF8 pszFullyQualifiedName = NULL;
+ MAKE_FULLY_QUALIFIED_NAME(pszFullyQualifiedName, pszClassNamespace, pszClassName);
+
+ if (strcmp(pszFullyQualifiedName, g_INotifyCollectionChangedName) == 0 ||
+ strcmp(pszFullyQualifiedName, g_NotifyCollectionChangedEventHandlerName) == 0 ||
+ strcmp(pszFullyQualifiedName, g_INotifyPropertyChangedName) == 0 ||
+ strcmp(pszFullyQualifiedName, g_PropertyChangedEventHandlerName) == 0 ||
+ strcmp(pszFullyQualifiedName, g_ICommandName) == 0)
+ {
+ bmtProp->fNeedsRCWPerTypeData = true;
+ }
+ }
+ }
+ }
+
+ // Check to see if the type is a COM event interface (classic COM interop only).
+ if (IsInterface() && !GetHalfBakedClass()->IsProjectedFromWinRT())
+ {
+ HRESULT hr = pMDImport->GetCustomAttributeByName(GetCl(), INTEROP_COMEVENTINTERFACE_TYPE, NULL, NULL);
+ if (hr == S_OK)
+ {
+ bmtProp->fComEventItfType = true;
+ }
+ }
+#endif // FEATURE_COMINTEROP
+}
+
+#ifdef FEATURE_READYTORUN
+//*******************************************************************************
+VOID MethodTableBuilder::CheckLayoutDependsOnOtherModules(MethodTable * pDependencyMT)
+{
+ STANDARD_VM_CONTRACT;
+
+ // These cases are expected to be handled by the caller
+ _ASSERTE(!(pDependencyMT == g_pObjectClass || pDependencyMT->IsTruePrimitive() || ((g_pEnumClass != NULL) && pDependencyMT->IsEnum())));
+
+ //
+ // WARNING: Changes in this algorithm are potential ReadyToRun breaking changes !!!
+ //
+ // Track whether field layout of this type depend on information outside its containing module
+ //
+ // It is a stronger condition than MethodTable::IsInheritanceChainLayoutFixedInCurrentVersionBubble().
+ // It has to remain fixed accross versioning changes in the module dependencies. In particular, it does
+ // not take into account NonVersionable attribute. Otherwise, adding NonVersionable attribute to existing
+ // type would be ReadyToRun incompatible change.
+ //
+ if (pDependencyMT->GetModule() == GetModule())
+ {
+ if (!pDependencyMT->GetClass()->HasLayoutDependsOnOtherModules())
+ return;
+ }
+
+ GetHalfBakedClass()->SetHasLayoutDependsOnOtherModules();
+}
+
+BOOL MethodTableBuilder::NeedsAlignedBaseOffset()
+{
+ STANDARD_VM_CONTRACT;
+
+ //
+ // WARNING: Changes in this algorithm are potential ReadyToRun breaking changes !!!
+ //
+ // This method returns whether the type needs aligned base offset in order to have layout resilient to
+ // base class layout changes.
+ //
+ if (IsValueClass())
+ return FALSE;
+
+ // READYTORUN: TODO: This logic is not correct when NGen image depends on ReadyToRun image. In this case,
+ // GetModule()->IsReadyToRun() flag is going to be false at NGen time, but it is going to be true at runtime.
+ // Thus, the offsets between the two cases are going to be different.
+ if (!(IsReadyToRunCompilation() || GetModule()->IsReadyToRun()))
+ return FALSE;
+
+ MethodTable * pParentMT = GetParentMethodTable();
+
+ // Trivial parents
+ if (pParentMT == NULL || pParentMT == g_pObjectClass)
+ return FALSE;
+
+ if (pParentMT->GetModule() == GetModule())
+ {
+ if (!pParentMT->GetClass()->HasLayoutDependsOnOtherModules())
+ return FALSE;
+ }
+
+ return TRUE;
+}
+#endif // FEATURE_READYTORUN
+
+//*******************************************************************************
+//
+// Used by BuildMethodTable
+//
+// Set the contextful or marshaledbyref flag on the attributes of the class
+//
+VOID MethodTableBuilder::SetContextfulOrByRef()
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(CheckPointer(bmtInternal));
+
+ }
+ CONTRACTL_END;
+#ifdef FEATURE_REMOTING
+
+ if (GetModule()->IsSystem())
+ {
+ // Check whether these classes are the root classes of contextful
+ // and marshalbyref classes i.e. System.ContextBoundObject and
+ // System.MarshalByRefObject respectively.
+
+ // Extract the class name
+ LPCUTF8 pszClassName = NULL;
+ LPCUTF8 pszNameSpace = NULL;
+ if (FAILED(GetMDImport()->GetNameOfTypeDef(GetCl(), &pszClassName, &pszNameSpace)))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_BADFORMAT);
+ }
+
+ StackSString ssFullyQualifiedName;
+ ns::MakePath(ssFullyQualifiedName,
+ StackSString(SString::Utf8, pszNameSpace),
+ StackSString(SString::Utf8, pszClassName));
+
+ if(ssFullyQualifiedName.Equals(SL(g_ContextBoundObjectClassName)))
+ { // Set the contextful and marshalbyref flag
+ bmtProp->fIsContextful = true;
+ bmtProp->fMarshaledByRef = true;
+ return;
+ }
+
+ if(ssFullyQualifiedName.Equals(SL(g_MarshalByRefObjectClassName)))
+ { // Set the marshalbyref flag
+ bmtProp->fMarshaledByRef = true;
+ return;
+ }
+ }
+
+ // First check whether the parent class is contextful or
+ // marshalbyref
+ if(HasParent())
+ {
+ MethodTable * pParent = GetParentMethodTable();
+ if(pParent->IsContextful())
+ { // Set the contextful and marshalbyref flag
+ bmtProp->fIsContextful = true;
+ bmtProp->fMarshaledByRef = true;
+
+ if (bmtGenerics->GetNumGenericArgs() > 0)
+ { // While these could work with a bit of work in the JIT,
+ // we will not support generic context-bound objects in V2.0.
+ BuildMethodTableThrowException(IDS_CLASSLOAD_GENERIC_CONTEXT_BOUND_OBJECT);
+ }
+
+ if (GetAssembly()->IsCollectible())
+ {
+ // Collectible assemblies do not support ContextBoundObject
+ BuildMethodTableThrowException(IDS_CLASSLOAD_COLLECTIBLE_CONTEXT_BOUND_OBJECT);
+ }
+ }
+
+ else if (pParent->IsMarshaledByRef())
+ { // Set the marshalbyref flag
+ bmtProp->fMarshaledByRef = true;
+ }
+ }
+#endif // FEATURE_REMOTING
+
+}
+//*******************************************************************************
+//
+// Used by BuildMethodTable
+//
+// Set the HasFinalizer and HasCriticalFinalizer flags
+//
+VOID MethodTableBuilder::SetFinalizationSemantics()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (g_pObjectFinalizerMD && !IsInterface() && !IsValueClass())
+ {
+ WORD slot = g_pObjectFinalizerMD->GetSlot();
+
+ // Objects not derived from Object will get marked as having a finalizer, if they have
+ // sufficient virtual methods. This will only be an issue if they can be allocated
+ // in the GC heap (which will cause all sorts of other problems).
+ if (slot < bmtVT->cVirtualSlots && (*bmtVT)[slot].Impl().GetMethodDesc() != g_pObjectFinalizerMD)
+ {
+ GetHalfBakedMethodTable()->SetHasFinalizer();
+
+ // The need for a critical finalizer can be inherited from a parent.
+ // Since we set this automatically for CriticalFinalizerObject
+ // elsewhere, the code below is the means by which any derived class
+ // picks up the attribute.
+ if (HasParent() && GetParentMethodTable()->HasCriticalFinalizer())
+ {
+ GetHalfBakedMethodTable()->SetHasCriticalFinalizer();
+ }
+ }
+ }
+}
+
+//*******************************************************************************
+//
+// Used by BuildMethodTable
+//
+// Perform relevant GC calculations for value classes
+//
+VOID MethodTableBuilder::HandleGCForValueClasses(MethodTable ** pByValueClassCache)
+{
+ STANDARD_VM_CONTRACT;
+
+ DWORD i;
+
+ EEClass *pClass = GetHalfBakedClass();
+ MethodTable *pMT = GetHalfBakedMethodTable();
+
+ FieldDesc *pFieldDescList = pClass->GetFieldDescList();
+
+ // Note that for value classes, the following calculation is only appropriate
+ // when the instance is in its "boxed" state.
+#ifdef FEATURE_COLLECTIBLE_TYPES
+ if (bmtFP->NumGCPointerSeries == 0 && pMT->Collectible())
+ {
+ // For collectible types, insert empty gc series
+ CGCDescSeries *pSeries;
+
+ CGCDesc::Init( (PVOID) pMT, 1);
+ pSeries = ((CGCDesc*)pMT)->GetLowestSeries();
+ pSeries->SetSeriesSize( (size_t) (0) - (size_t) pMT->GetBaseSize());
+ pSeries->SetSeriesOffset(sizeof(Object));
+ }
+ else
+#endif // FEATURE_COLLECTIBLE_TYPES
+ if (bmtFP->NumGCPointerSeries != 0)
+ {
+ CGCDescSeries *pSeries;
+ CGCDescSeries *pHighest;
+
+ pMT->SetContainsPointers();
+
+ // Copy the pointer series map from the parent
+ CGCDesc::Init( (PVOID) pMT, bmtFP->NumGCPointerSeries );
+ if (bmtParent->NumParentPointerSeries != 0)
+ {
+ size_t ParentGCSize = CGCDesc::ComputeSize(bmtParent->NumParentPointerSeries);
+ memcpy( (PVOID) (((BYTE*) pMT) - ParentGCSize),
+ (PVOID) (((BYTE*) GetParentMethodTable()) - ParentGCSize),
+ ParentGCSize - sizeof(size_t) // sizeof(size_t) is the NumSeries count
+ );
+
+ }
+
+ // Build the pointer series map for this pointers in this instance
+ pSeries = ((CGCDesc*)pMT)->GetLowestSeries();
+ if (bmtFP->NumInstanceGCPointerFields)
+ {
+ // See gcdesc.h for an explanation of why we adjust by subtracting BaseSize
+ pSeries->SetSeriesSize( (size_t) (bmtFP->NumInstanceGCPointerFields * sizeof(OBJECTREF)) - (size_t) pMT->GetBaseSize());
+ pSeries->SetSeriesOffset(bmtFP->GCPointerFieldStart+sizeof(Object));
+ pSeries++;
+ }
+
+ // Insert GC info for fields which are by-value classes
+ for (i = 0; i < bmtEnumFields->dwNumInstanceFields; i++)
+ {
+ if (pFieldDescList[i].IsByValue())
+ {
+ MethodTable *pByValueMT = pByValueClassCache[i];
+
+ if (pByValueMT->ContainsPointers())
+ {
+ // Offset of the by value class in the class we are building, does NOT include Object
+ DWORD dwCurrentOffset = pFieldDescList[i].GetOffset_NoLogging();
+
+ // The by value class may have more than one pointer series
+ CGCDescSeries * pByValueSeries = CGCDesc::GetCGCDescFromMT(pByValueMT)->GetLowestSeries();
+ SIZE_T dwNumByValueSeries = CGCDesc::GetCGCDescFromMT(pByValueMT)->GetNumSeries();
+
+ for (SIZE_T j = 0; j < dwNumByValueSeries; j++)
+ {
+ size_t cbSeriesSize;
+ size_t cbSeriesOffset;
+
+ _ASSERTE(pSeries <= CGCDesc::GetCGCDescFromMT(pMT)->GetHighestSeries());
+
+ cbSeriesSize = pByValueSeries->GetSeriesSize();
+
+ // Add back the base size of the by value class, since it's being transplanted to this class
+ cbSeriesSize += pByValueMT->GetBaseSize();
+
+ // Subtract the base size of the class we're building
+ cbSeriesSize -= pMT->GetBaseSize();
+
+ // Set current series we're building
+ pSeries->SetSeriesSize(cbSeriesSize);
+
+ // Get offset into the value class of the first pointer field (includes a +Object)
+ cbSeriesOffset = pByValueSeries->GetSeriesOffset();
+
+ // Add it to the offset of the by value class in our class
+ cbSeriesOffset += dwCurrentOffset;
+
+ pSeries->SetSeriesOffset(cbSeriesOffset); // Offset of field
+ pSeries++;
+ pByValueSeries++;
+ }
+ }
+ }
+ }
+
+ // Adjust the inherited series - since the base size has increased by "# new field instance bytes", we need to
+ // subtract that from all the series (since the series always has BaseSize subtracted for it - see gcdesc.h)
+ pHighest = CGCDesc::GetCGCDescFromMT(pMT)->GetHighestSeries();
+ while (pSeries <= pHighest)
+ {
+ CONSISTENCY_CHECK(CheckPointer(GetParentMethodTable()));
+ pSeries->SetSeriesSize( pSeries->GetSeriesSize() - ((size_t) pMT->GetBaseSize() - (size_t) GetParentMethodTable()->GetBaseSize()) );
+ pSeries++;
+ }
+
+ _ASSERTE(pSeries-1 <= CGCDesc::GetCGCDescFromMT(pMT)->GetHighestSeries());
+ }
+
+}
+
+//*******************************************************************************
+//
+// Helper method for VerifyInheritanceSecurity
+//
+VOID MethodTableBuilder::VerifyClassInheritanceSecurityHelper(
+ MethodTable *pParentMT,
+ MethodTable *pChildMT)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pParentMT));
+ PRECONDITION(CheckPointer(pChildMT));
+ }
+ CONTRACTL_END;
+
+ //@ASSUMPTION: The current class has been resolved to the point that
+ // we can construct a reflection object on the class or its methods.
+ // This is required for the security checks.
+
+ // This method throws on failure.
+ Security::ClassInheritanceCheck(pChildMT, pParentMT);
+
+#ifndef FEATURE_CORECLR
+ // Check the entire parent chain for inheritance permission demands.
+ while (pParentMT != NULL)
+ {
+ if (pParentMT->GetClass()->RequiresInheritanceCheck())
+ {
+ // This method throws on failure.
+ Security::ClassInheritanceCheck(pChildMT, pParentMT);
+ }
+
+ pParentMT = pParentMT->GetParentMethodTable();
+ }
+#endif // !FEATURE_CORECLR
+}
+
+//*******************************************************************************
+//
+// Helper method for VerifyInheritanceSecurity
+//
+VOID MethodTableBuilder::VerifyMethodInheritanceSecurityHelper(
+ MethodDesc *pParentMD,
+ MethodDesc *pChildMD)
+{
+ CONTRACTL {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pParentMD));
+ PRECONDITION(CheckPointer(pChildMD));
+ } CONTRACTL_END;
+
+ Security::MethodInheritanceCheck(pChildMD, pParentMD);
+
+#ifndef FEATURE_CORECLR
+
+ // If no inheritance checks are required, just return.
+ if (!pParentMD->RequiresInheritanceCheck() &&
+ !pParentMD->ParentRequiresInheritanceCheck())
+ {
+ return;
+ }
+
+ DWORD dwSlot = pParentMD->GetSlot();
+
+#ifdef _DEBUG
+ // Get the name and signature for the method so we can find the new parent method desc.
+ // We use the parent MethodDesc for this because the child could actually have a very
+ // different name in the case that the child is MethodImpling the parent.
+
+ // Get the name.
+ LPCUTF8 szName;
+ szName = pParentMD->GetName();
+
+ // Get the signature.
+ PCCOR_SIGNATURE pSignature;
+ DWORD cSignature;
+ pParentMD->GetSig(&pSignature, &cSignature);
+ Module *pModule = pParentMD->GetModule();
+#endif // _DEBUG
+
+ do
+ {
+ if (pParentMD->RequiresInheritanceCheck())
+ {
+ Security::MethodInheritanceCheck(pChildMD, pParentMD);
+ }
+
+ if (pParentMD->ParentRequiresInheritanceCheck())
+ {
+ MethodTable *pGrandParentMT = pParentMD->GetMethodTable()->GetParentMethodTable();
+ CONSISTENCY_CHECK(CheckPointer(pGrandParentMT));
+
+ // Find this method in the parent.
+ // If it does exist in the parent, it would be at the same vtable slot.
+ if (dwSlot >= pGrandParentMT->GetNumVirtuals())
+ {
+ // Parent does not have this many vtable slots, so it doesn't exist there
+ pParentMD = NULL;
+ }
+ else
+ {
+ // It is in the vtable of the parent
+ pParentMD = pGrandParentMT->GetMethodDescForSlot(dwSlot);
+ _ASSERTE(pParentMD != NULL);
+
+#ifdef _DEBUG
+ _ASSERTE(pParentMD == MemberLoader::FindMethod(pGrandParentMT,
+ szName,
+ pSignature,
+ cSignature,
+ pModule));
+#endif // _DEBUG
+ }
+ }
+ else
+ {
+ pParentMD = NULL;
+ }
+ } while (pParentMD != NULL);
+
+#endif // !FEATURE_CORECLR
+}
+
+//*******************************************************************************
+//
+// Used by BuildMethodTable
+//
+// Check for the presence of type equivalence. If present, make sure
+// it is permitted to be on this type.
+//
+
+void MethodTableBuilder::CheckForTypeEquivalence(
+ WORD cBuildingInterfaceList,
+ BuildingInterfaceInfo_t *pBuildingInterfaceList)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef FEATURE_TYPEEQUIVALENCE
+ bmtProp->fIsTypeEquivalent = !!IsTypeDefEquivalent(GetCl(), GetModule());
+
+ if (bmtProp->fIsTypeEquivalent)
+ {
+ BOOL fTypeEquivalentNotPermittedDueToType = !(((IsComImport() || bmtProp->fComEventItfType) && IsInterface()) || IsValueClass() || IsDelegate());
+ BOOL fTypeEquivalentNotPermittedDueToGenerics = bmtGenerics->HasInstantiation();
+ BOOL fTypeEquivalentNotPermittedDueToSecurity = !GetModule()->GetSecurityDescriptor()->IsFullyTrusted();
+
+ if (fTypeEquivalentNotPermittedDueToType || fTypeEquivalentNotPermittedDueToGenerics)
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_EQUIVALENTBADTYPE);
+ }
+ else
+ if (fTypeEquivalentNotPermittedDueToSecurity)
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_EQUIVALENTNOTTRUSTED);
+ }
+
+ GetHalfBakedClass()->SetIsEquivalentType();
+ }
+
+ bmtProp->fHasTypeEquivalence = bmtProp->fIsTypeEquivalent;
+
+ if (!bmtProp->fHasTypeEquivalence)
+ {
+ // fHasTypeEquivalence flag is inherited from interfaces so we can quickly detect
+ // types that implement type equivalent interfaces
+ for (WORD i = 0; i < cBuildingInterfaceList; i++)
+ {
+ MethodTable *pItfMT = pBuildingInterfaceList[i].m_pMethodTable;
+ if (pItfMT->HasTypeEquivalence())
+ {
+ bmtProp->fHasTypeEquivalence = true;
+ break;
+ }
+ }
+ }
+
+ if (!bmtProp->fHasTypeEquivalence)
+ {
+ // fHasTypeEquivalence flag is "inherited" from generic arguments so we can quickly detect
+ // types like List<Str> where Str is a structure with the TypeIdentifierAttribute.
+ if (bmtGenerics->HasInstantiation() && !bmtGenerics->IsTypicalTypeDefinition())
+ {
+ Instantiation inst = bmtGenerics->GetInstantiation();
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ if (inst[i].HasTypeEquivalence())
+ {
+ bmtProp->fHasTypeEquivalence = true;
+ break;
+ }
+ }
+ }
+ }
+#endif //FEATURE_TYPEEQUIVALENCE
+}
+
+// Convert linktime security (including link demands and security critical checks) into inheritance security
+// in order to prevent partial trust code from bypassing linktime checks via clever inheritance hierarchies.
+//
+// Arguments:
+// pMDLinkDemand - The method containing the linktime security check that needs to be converted into an
+// inheritance check
+//
+// Notes:
+// #PartialTrustInterfaceMappingCheck
+//
+// Partial trust code can bypass the enforcement of link time security on any public virtual method of a
+// base type by mapping an unprotected interface back to the base method. For instance:
+//
+// Full trust APTCA assembly A:
+// class AptcaClass
+// {
+// [SecurityCritical]
+// public virtual void CriticalMethod() { }
+//
+// [PermissionSet(SecurityAction.LinkDemand, Unrestricted = true)]
+// public virtual void LinkDemandMethod() { }
+// }
+//
+// Partial trust assembly B:
+// interface IBypass
+// {
+// void CriticalMethod();
+// void LinkDemandMethod();
+// }
+//
+// class Bypass : AptcaClass, IBypass { }
+//
+// IBypass o = new Bypass();
+// o.CriticalMethod();
+// o.LinkDemandMethod();
+//
+// Since the static type seen by the JIT is IBypass, and there is no link time security on IBypass, the
+// partial trust code has stepped around the link time security checks.
+//
+// In order to prevent this, types which:
+// 1. Are partially trusted AND
+// 2. Cause an interface to be added to the type WHICH
+// 3. Has a method implemented by a base type in a different assembly AND
+// 4. The base type method has a link time check on it
+//
+// Convert the link time checks into inheritance checks. This effectively says that in order for partially
+// trusted code to turn off link time security, it needs to have the right to directly satisfy that
+// security itself. Since the partial trust code can call the protected method directly, it can also
+// easily wrap the method in an unprotected new method and call through that there is no escalation of
+// privilege.
+//
+// This method is only responsible for doing the actual inheritance demand conversion.
+// VerifyInheritanceSecurity checks for the above set of conditions to know when such a conversion is
+// necessary.
+//
+void MethodTableBuilder::ConvertLinkDemandToInheritanceDemand(MethodDesc *pMDLinkDemand)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pMDLinkDemand));
+ }
+ CONTRACTL_END;
+
+ const bool fNeedTransparencyCheck = Security::IsMethodCritical(pMDLinkDemand) &&
+ !Security::IsMethodSafeCritical(pMDLinkDemand);
+ const bool fNeedLinkDemandCheck = pMDLinkDemand->RequiresLinktimeCheck() &&
+ !pMDLinkDemand->RequiresLinkTimeCheckHostProtectionOnly();
+
+ if (fNeedTransparencyCheck)
+ {
+ // The method being mapped to is security critical, so it effectively has a link time check for full
+ // trust on it. Therefore we need to convert to a full trust inheritance check
+ Security::FullTrustInheritanceDemand(GetAssembly());
+ }
+ else if (fNeedLinkDemandCheck)
+ {
+ // The method being mapped to is protected with a legacy link demand. We need to retrieve the
+ // permission set that is being used to protect the code and then use it to issue an inheritance
+ // demand.
+ Security::InheritanceLinkDemandCheck(GetAssembly(), pMDLinkDemand);
+ }
+}
+
+//*******************************************************************************
+//
+// Used by BuildMethodTable
+//
+// If we have a type equivalent class, then do equivalent security
+// checks on it. The check starts by checking for that the class is
+// transparent or treat as safe, and then does the same for any fields.
+//
+
+void MethodTableBuilder::VerifyEquivalenceSecurity()
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef FEATURE_TYPEEQUIVALENCE
+ if (!bmtProp->fIsTypeEquivalent)
+ return;
+
+ if (!GetHalfBakedMethodTable()->IsExternallyVisible())
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_EQUIVALENTNOTPUBLIC);
+ }
+
+ if (Security::IsTypeCritical(GetHalfBakedMethodTable()) &&
+ !Security::IsTypeSafeCritical(GetHalfBakedMethodTable()))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_EQUIVALENTTRANSPARENCY);
+ }
+
+ // Iterate through every field
+ FieldDesc *pFieldDescList = GetApproxFieldDescListRaw();
+ for (UINT i = 0; i < bmtEnumFields->dwNumInstanceFields; i++)
+ {
+ FieldDesc *pFD = &pFieldDescList[i];
+
+ FieldSecurityDescriptor fieldSecDesc(pFD);
+ if (fieldSecDesc.IsCritical() && !fieldSecDesc.IsTreatAsSafe())
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_EQUIVALENTTRANSPARENCY);
+ }
+ }
+
+ // Iterate through every method
+ DeclaredMethodIterator methIt(*this);
+ while (methIt.Next())
+ {
+ MethodDesc *pMD = methIt->GetMethodDesc();
+ _ASSERTE(pMD != NULL);
+ if (pMD == NULL)
+ continue;
+
+ MethodSecurityDescriptor methodSecDesc(pMD, FALSE);
+ if (Security::IsMethodCritical(pMD) && !Security::IsMethodSafeCritical(pMD))
+ {
+ BuildMethodTableThrowException(IDS_CLASSLOAD_EQUIVALENTTRANSPARENCY);
+ }
+ }
+#endif //FEATURE_TYPEEQUIVALENCE
+}
+
+//*******************************************************************************
+//
+// Used by BuildMethodTable
+//
+// If we have a non-interface class, then do inheritance security
+// checks on it. The check starts by checking for inheritance
+// permission demands on the current class. If these first checks
+// succeeded, then the cached declared method list is scanned for
+// methods that have inheritance permission demands.
+//
+
+void MethodTableBuilder::VerifyInheritanceSecurity()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (IsInterface())
+ return;
+
+ // If we have a non-interface class, then do inheritance security
+ // checks on it. The check starts by checking for inheritance
+ // permission demands on the current class. If these first checks
+ // succeeded, then the cached declared method list is scanned for
+ // methods that have inheritance permission demands.
+#ifdef FEATURE_CORECLR
+ //
+ // If we are transparent, and every class up the inheritence chain is also entirely transparent,
+ // that means that no inheritence rules could be broken. If that's the case, we don't need to check
+ // each individual method. We special case System.Object since it is not entirely transparent, but
+ // every member which can be overriden is.
+ //
+ // This optimization does not currently apply for nested classes, since we may need to evaluate the
+ // outer class in the TypeSecurityDescriptor, and that could end up with a type loading recursion.
+ //
+
+ const BOOL fCurrentTypeAllTransparent = GetHalfBakedClass()->IsNested() ? FALSE : Security::IsTypeAllTransparent(GetHalfBakedMethodTable());
+ BOOL fInheritenceChainTransparent = FALSE;
+
+ if (fCurrentTypeAllTransparent)
+ {
+ fInheritenceChainTransparent = TRUE;
+ MethodTable *pParentMT = GetParentMethodTable();
+ while (fInheritenceChainTransparent &&
+ pParentMT != NULL &&
+ pParentMT != g_pObjectClass)
+ {
+ fInheritenceChainTransparent &= Security::IsTypeAllTransparent(pParentMT);
+ pParentMT = pParentMT->GetParentMethodTable();
+ if (pParentMT != NULL && pParentMT->GetClass()->IsNested())
+ {
+ fInheritenceChainTransparent = FALSE;
+ }
+
+ }
+ }
+#endif // FEATURE_CORECLR
+
+ if (GetParentMethodTable() != NULL
+#if FEATURE_CORECLR
+ && !fInheritenceChainTransparent
+#endif // FEATURE_CORECLR
+ )
+ {
+ // Check the parent for inheritance permission demands.
+ VerifyClassInheritanceSecurityHelper(GetParentMethodTable(), GetHalfBakedMethodTable());
+
+ // Iterate all the declared methods and check each of them for inheritance demands
+ DeclaredMethodIterator mIt(*this);
+ while (mIt.Next())
+ {
+ MethodDesc * pMD = mIt.GetMDMethod()->GetMethodDesc();
+ CONSISTENCY_CHECK(CheckPointer(pMD));
+
+ MethodDesc * pIntroducingMD = mIt.GetIntroducingMethodDesc();
+ if (pIntroducingMD != NULL)
+ {
+ VerifyMethodInheritanceSecurityHelper(pIntroducingMD, pMD);
+ }
+
+ // Make sure that we don't have a transparent method in a critical class; that will lead
+ // to situations where the method doesn't have access to the this pointer, so we want to
+ // fail now, rather than with a strange method access exception at invoke time
+ if (Security::IsTypeCritical(GetHalfBakedMethodTable()) &&
+ !Security::IsTypeSafeCritical(GetHalfBakedMethodTable()))
+ {
+ if (!Security::IsMethodCritical(pMD) && !pMD->IsStatic())
+ {
+ SecurityTransparent::ThrowTypeLoadException(pMD, IDS_E_TRANSPARENT_METHOD_CRITICAL_TYPE);
+ }
+ }
+
+ // If this method is a MethodImpl, we need to verify that all
+ // decls are allowed to be overridden.
+ if (pMD->IsMethodImpl())
+ {
+ // Iterate through each decl that this method is an impl for and
+ // test that inheritance demands are met.
+ MethodImpl *pMethodImpl = pMD->GetMethodImpl();
+ for (DWORD iCurImpl = 0; iCurImpl < pMethodImpl->GetSize(); iCurImpl++)
+ {
+ MethodDesc *pDeclMD = pMethodImpl->GetImplementedMDs()[iCurImpl];
+ _ASSERTE(pDeclMD != NULL);
+ // We deal with interfaces below, so don't duplicate work
+ if (!pDeclMD->IsInterface())
+ {
+ VerifyMethodInheritanceSecurityHelper(pDeclMD, pMD);
+ }
+ }
+ }
+ }
+ }
+
+ // Now we need to verify that we are meeting all inheritance demands
+ // that were placed on interfaces and their methods. The logic is as
+ // follows: for each method contributing an implementation to this type,
+ // if a method it could contribute to any interface described in the
+ // interface map, check that both method-level and type-level inheritance
+ // demands are met (only need to check type-level once per interface).
+ {
+ // We need to do a transparency check if the current type enforces the transparency inheritance
+ // rules. As an optimizaiton, we don't bother to do the check if the module is opportunistically
+ // critical because the transparency setup for opportunitically critical assemblies by definition
+ // statisfies the inheritance rules.
+ const SecurityTransparencyBehavior *pTransparencyBehavior =
+ GetAssembly()->GetSecurityTransparencyBehavior();
+ ModuleSecurityDescriptor *pMSD =
+ ModuleSecurityDescriptor::GetModuleSecurityDescriptor(GetAssembly());
+
+ const bool fNeedTransparencyInheritanceCheck = pTransparencyBehavior->AreInheritanceRulesEnforced() &&
+ !pMSD->IsOpportunisticallyCritical();
+
+
+ // See code:PartialTrustInterfaceMappingCheck
+ IAssemblySecurityDescriptor *pASD = GetAssembly()->GetSecurityDescriptor();
+ const BOOL fNeedPartialTrustInterfaceMappingCheck = !pASD->IsFullyTrusted();
+
+ // Iterate through each interface
+ MethodTable *pMT = GetHalfBakedMethodTable();
+ MethodTable::InterfaceMapIterator itfIt = pMT->IterateInterfaceMap();
+ while (itfIt.Next())
+ {
+ // Get current interface details
+ MethodTable *pCurItfMT = itfIt.GetInterface();
+ CONSISTENCY_CHECK(CheckPointer(pCurItfMT));
+
+#ifdef FEATURE_CORECLR
+ if (fNeedTransparencyInheritanceCheck &&
+ !(Security::IsTypeAllTransparent(itfIt.GetInterface()) &&
+ fCurrentTypeAllTransparent)
+ )
+#else // FEATURE_CORECLR
+ EEClass * pCurItfCls = pCurItfMT->GetClass();
+ if (fNeedTransparencyInheritanceCheck ||
+ fNeedPartialTrustInterfaceMappingCheck ||
+ pCurItfCls->RequiresInheritanceCheck() ||
+ pCurItfCls->SomeMethodsRequireInheritanceCheck())
+#endif // !FEATURE_CORECLR
+ {
+ // An interface is introduced by this type either if it is explicitly declared on the
+ // type's interface list or if one of the type's explicit interfaces requires the
+ // interface. This is detected by seeing an interface which is not declared on this
+ // type, but also wasn't implemented by our parent.
+ //
+ // For instance:
+ //
+ // interface I1 { void M(); }
+ // interface I2 : I1 { }
+ // class B { public void M(); }
+ // class D : B, I2 { }
+ //
+ // In this case, when we see D pulls in I2 explictly (IsDeclaredOnType) but I1 only
+ // because I2 requires I2 (!IsDeclaredOnType and !IsImplementedByParent).
+ bmtInterfaceEntry interfaceEntry = bmtInterface->pInterfaceMap[itfIt.GetIndex()];
+ BOOL fDeclaredOnType = interfaceEntry.IsDeclaredOnType() ||
+ !interfaceEntry.IsImplementedByParent();
+
+ // Now iterate through every method contributing any implementation
+ // and if it lies within the interface vtable, then we must evaluate demands
+ // NOTE: Avoid caching the MethodData object for the type being built.
+ BOOL fImplementedOnCurrentType = FALSE;
+ MethodTable::MethodDataWrapper
+ hItfImplData(MethodTable::GetMethodData(itfIt.GetInterface(), pMT, FALSE));
+ MethodTable::MethodIterator methIt(hItfImplData);
+ for (;methIt.IsValid(); methIt.Next())
+ {
+ // Check the security only if valid method implementation exists!
+ if (methIt.GetTarget().IsNull() == FALSE)
+ {
+ MethodDesc *pMDImpl = methIt.GetMethodDesc();
+ MethodDesc *pMDInterface = methIt.GetDeclMethodDesc();
+
+ //
+ // Check the security method helper if either:
+ // 1. The interface was explicitly declared by the current type (even if the
+ // interface implementation is found on a parent type) OR
+ // 2. The interface implementation method is on the current type
+ //
+ // For instance, we want to catch patterns such as:
+ //
+ // interface I { void M(); }
+ // class B { public void M(); }
+ // class D : B, I { }
+ //
+ // In which D causes I::M to map to B::M because D brought in the interface
+ // declaration.
+ //
+
+ if (fDeclaredOnType || pMDImpl->GetMethodTable() == pMT)
+ {
+ // Check security on the interface for this method in its default slot placement
+ VerifyMethodInheritanceSecurityHelper(pMDInterface, pMDImpl);
+
+ fImplementedOnCurrentType = TRUE;
+ }
+
+ // See code:PartialTrustInterfaceMappingCheck - we need to see if we're mapping
+ // an interface to another type cross-assembly that might have requested link
+ // time protection.
+ if (fDeclaredOnType && fNeedPartialTrustInterfaceMappingCheck)
+ {
+ if (pMDImpl->GetAssembly() != GetAssembly())
+ {
+ ConvertLinkDemandToInheritanceDemand(pMDImpl);
+ }
+ }
+ }
+ }
+
+ // If any previous methods contributed to this interface's implementation, that means we
+ // need to check the type-level inheritance for the interface.
+ if (fDeclaredOnType || fImplementedOnCurrentType)
+ {
+ VerifyClassInheritanceSecurityHelper(pCurItfMT, pMT);
+ }
+ }
+ }
+ }
+}
+
+//*******************************************************************************
+//
+// Used by BuildMethodTable
+//
+// Before we make the final leap, make sure we've allocated all memory needed to
+// fill out the RID maps.
+//
+VOID MethodTableBuilder::EnsureRIDMapsCanBeFilled()
+{
+ STANDARD_VM_CONTRACT;
+
+
+ DWORD i;
+
+
+ // Rather than call Ensure***CanBeStored() hundreds of times, we
+ // will call it once on the largest token we find. This relies
+ // on an invariant that RidMaps don't use some kind of sparse
+ // allocation.
+
+ {
+ mdMethodDef largest = mdMethodDefNil;
+
+ DeclaredMethodIterator it(*this);
+ while (it.Next())
+ {
+ if (it.Token() > largest)
+ {
+ largest = it.Token();
+ }
+ }
+ if ( largest != mdMethodDefNil )
+ {
+ GetModule()->EnsureMethodDefCanBeStored(largest);
+ }
+ }
+
+ {
+ mdFieldDef largest = mdFieldDefNil;
+
+ for (i = 0; i < bmtMetaData->cFields; i++)
+ {
+ if (bmtMetaData->pFields[i] > largest)
+ {
+ largest = bmtMetaData->pFields[i];
+ }
+ }
+ if ( largest != mdFieldDefNil )
+ {
+ GetModule()->EnsureFieldDefCanBeStored(largest);
+ }
+ }
+}
+
+#ifdef FEATURE_COMINTEROP
+//*******************************************************************************
+void MethodTableBuilder::GetCoClassAttribInfo()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (!GetHalfBakedClass()->IsProjectedFromWinRT()) // ignore classic COM interop CA on WinRT interfaces
+ {
+ // Retrieve the CoClassAttribute CA.
+ HRESULT hr = GetMDImport()->GetCustomAttributeByName(GetCl(), INTEROP_COCLASS_TYPE, NULL, NULL);
+ if (hr == S_OK)
+ {
+ // COM class interfaces may lazily populate the m_pCoClassForIntf field of EEClass. This field is
+ // optional so we must ensure the optional field descriptor has been allocated.
+ EnsureOptionalFieldsAreAllocated(GetHalfBakedClass(), m_pAllocMemTracker, GetLoaderAllocator()->GetLowFrequencyHeap());
+ SetIsComClassInterface();
+ }
+ }
+}
+#endif // FEATURE_COMINTEROP
+
+//*******************************************************************************
+void MethodTableBuilder::bmtMethodImplInfo::AddMethodImpl(
+ bmtMDMethod * pImplMethod, bmtMethodHandle declMethod,
+ StackingAllocator * pStackingAllocator)
+{
+ STANDARD_VM_CONTRACT;
+
+ CONSISTENCY_CHECK(CheckPointer(pImplMethod));
+ CONSISTENCY_CHECK(!declMethod.IsNull());
+ if (pIndex >= cMaxIndex)
+ {
+ DWORD newEntriesCount = 0;
+
+ if (!ClrSafeInt<DWORD>::multiply(cMaxIndex, 2, newEntriesCount))
+ ThrowHR(COR_E_OVERFLOW);
+
+ if (newEntriesCount == 0)
+ newEntriesCount = 10;
+
+ // If we have to grow this array, we will not free the old array before we clean up the BuildMethodTable operation
+ // because this is a stacking allocator. However, the old array will get freed when all the stack allocator is freed.
+ Entry *rgEntriesNew = new (pStackingAllocator) Entry[newEntriesCount];
+ memcpy(rgEntriesNew, rgEntries, sizeof(Entry) * cMaxIndex);
+
+ // Start using newly allocated array.
+ rgEntries = rgEntriesNew;
+ cMaxIndex = newEntriesCount;
+ }
+ rgEntries[pIndex++] = Entry(pImplMethod, declMethod);
+}
+
+//*******************************************************************************
+// Returns TRUE if tok acts as a body for any methodImpl entry. FALSE, otherwise.
+BOOL MethodTableBuilder::bmtMethodImplInfo::IsBody(mdToken tok)
+{
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(TypeFromToken(tok) == mdtMethodDef);
+ for (DWORD i = 0; i < pIndex; i++)
+ {
+ if (GetBodyMethodDesc(i)->GetMemberDef() == tok)
+ {
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+//*******************************************************************************
+BYTE *
+MethodTableBuilder::AllocateFromHighFrequencyHeap(S_SIZE_T cbMem)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ return (BYTE *)GetMemTracker()->Track(
+ GetLoaderAllocator()->GetHighFrequencyHeap()->AllocMem(cbMem));
+}
+
+//*******************************************************************************
+BYTE *
+MethodTableBuilder::AllocateFromLowFrequencyHeap(S_SIZE_T cbMem)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ return (BYTE *)GetMemTracker()->Track(
+ GetLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(cbMem));
+}
+
+//-------------------------------------------------------------------------------
+// Make best-case effort to obtain an image name for use in an error message.
+//
+// This routine must expect to be called before the this object is fully loaded.
+// It can return an empty if the name isn't available or the object isn't initialized
+// enough to get a name, but it mustn't crash.
+//-------------------------------------------------------------------------------
+LPCWSTR MethodTableBuilder::GetPathForErrorMessages()
+{
+ STANDARD_VM_CONTRACT;
+
+ return GetModule()->GetPathForErrorMessages();
+}
+
+BOOL MethodTableBuilder::ChangesImplementationOfVirtualSlot(SLOT_INDEX idx)
+{
+ STANDARD_VM_CONTRACT;
+
+ BOOL fChangesImplementation = TRUE;
+
+ _ASSERTE(idx < bmtVT->cVirtualSlots);
+
+ if (HasParent() && idx < GetParentMethodTable()->GetNumVirtuals())
+ {
+ _ASSERTE(idx < bmtParent->pSlotTable->GetSlotCount());
+ bmtMethodHandle VTImpl = (*bmtVT)[idx].Impl();
+ bmtMethodHandle ParentImpl = (*bmtParent)[idx].Impl();
+
+ fChangesImplementation = VTImpl != ParentImpl;
+
+ // See code:MethodTableBuilder::SetupMethodTable2 and its logic
+ // for handling MethodImpl's on parent classes which affect non interface
+ // methods.
+ if (!fChangesImplementation && (ParentImpl.GetSlotIndex() != idx))
+ fChangesImplementation = TRUE;
+ }
+
+ return fChangesImplementation;
+}
+
+// Must be called prior to setting the value of any optional field on EEClass (on a debug build an assert will
+// fire if this invariant is violated).
+void MethodTableBuilder::EnsureOptionalFieldsAreAllocated(EEClass *pClass, AllocMemTracker *pamTracker, LoaderHeap *pHeap)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (pClass->HasOptionalFields())
+ return;
+
+ EEClassOptionalFields *pOptFields = (EEClassOptionalFields*)
+ pamTracker->Track(pHeap->AllocMem(S_SIZE_T(sizeof(EEClassOptionalFields))));
+
+ // Initialize default values for all optional fields.
+ pOptFields->Init();
+
+ // Attach optional fields to the class.
+ pClass->AttachOptionalFields(pOptFields);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Gather information about a generic type
+// - number of parameters
+// - variance annotations
+// - dictionaries
+// - sharability
+//
+//static
+void
+MethodTableBuilder::GatherGenericsInfo(
+ Module * pModule,
+ mdTypeDef cl,
+ Instantiation inst,
+ bmtGenericsInfo * bmtGenericsInfo)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(GetThread() != NULL);
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(CheckPointer(bmtGenericsInfo));
+ }
+ CONTRACTL_END;
+
+ IMDInternalImport * pInternalImport = pModule->GetMDImport();
+
+ // Enumerate the formal type parameters
+ HENUMInternal hEnumGenericPars;
+ HRESULT hr = pInternalImport->EnumInit(mdtGenericParam, cl, &hEnumGenericPars);
+ if (FAILED(hr))
+ pModule->GetAssembly()->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_BADFORMAT);
+
+ DWORD numGenericArgs = pInternalImport->EnumGetCount(&hEnumGenericPars);
+
+ // Work out what kind of EEClass we're creating w.r.t. generics. If there
+ // are no generics involved this will be a VMFLAG_NONGENERIC.
+ BOOL fHasVariance = FALSE;
+ if (numGenericArgs > 0)
+ {
+ // Generic type verification
+ {
+ DWORD dwAttr;
+ mdToken tkParent;
+ if (FAILED(pInternalImport->GetTypeDefProps(cl, &dwAttr, &tkParent)))
+ {
+ pModule->GetAssembly()->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_BADFORMAT);
+ }
+ // A generic with explicit layout is not allowed.
+ if (IsTdExplicitLayout(dwAttr))
+ {
+ pModule->GetAssembly()->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_EXPLICIT_GENERIC);
+ }
+ }
+
+ bmtGenericsInfo->numDicts = 1;
+
+ mdGenericParam tkTyPar;
+ bmtGenericsInfo->pVarianceInfo = new (&GetThread()->m_MarshalAlloc) BYTE[numGenericArgs];
+
+ // If it has generic arguments but none have been specified, then load the instantiation at the formals
+ if (inst.IsEmpty())
+ {
+ bmtGenericsInfo->fTypicalInstantiation = TRUE;
+ S_UINT32 scbAllocSize = S_UINT32(numGenericArgs) * S_UINT32(sizeof(TypeHandle));
+ TypeHandle * genericArgs = (TypeHandle *) GetThread()->m_MarshalAlloc.Alloc(scbAllocSize);
+
+ inst = Instantiation(genericArgs, numGenericArgs);
+
+ bmtGenericsInfo->fSharedByGenericInstantiations = FALSE;
+ }
+ else
+ {
+ bmtGenericsInfo->fTypicalInstantiation = FALSE;
+
+ bmtGenericsInfo->fSharedByGenericInstantiations = TypeHandle::IsCanonicalSubtypeInstantiation(inst);
+ _ASSERTE(bmtGenericsInfo->fSharedByGenericInstantiations == ClassLoader::IsSharableInstantiation(inst));
+
+#ifdef _DEBUG
+ // Set typical instantiation MethodTable
+ {
+ MethodTable * pTypicalInstantiationMT = pModule->LookupTypeDef(cl).AsMethodTable();
+ // Typical instantiation was already loaded by code:ClassLoader::LoadApproxTypeThrowing
+ _ASSERTE(pTypicalInstantiationMT != NULL);
+ bmtGenericsInfo->dbg_pTypicalInstantiationMT = pTypicalInstantiationMT;
+ }
+#endif //_DEBUG
+ }
+
+ TypeHandle * pDestInst = (TypeHandle *)inst.GetRawArgs();
+ for (unsigned int i = 0; i < numGenericArgs; i++)
+ {
+ pInternalImport->EnumNext(&hEnumGenericPars, &tkTyPar);
+ DWORD flags;
+ if (FAILED(pInternalImport->GetGenericParamProps(tkTyPar, NULL, &flags, NULL, NULL, NULL)))
+ {
+ pModule->GetAssembly()->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_BADFORMAT);
+ }
+
+ if (bmtGenericsInfo->fTypicalInstantiation)
+ {
+ // code:Module.m_GenericParamToDescMap maps generic parameter RIDs to TypeVarTypeDesc
+ // instances so that we do not leak by allocating them all over again, if the type
+ // repeatedly fails to load.
+ TypeVarTypeDesc *pTypeVarTypeDesc = pModule->LookupGenericParam(tkTyPar);
+ if (pTypeVarTypeDesc == NULL)
+ {
+ // Do NOT use the alloc tracker for this memory as we need it stay allocated even if the load fails.
+ void *mem = (void *)pModule->GetLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(TypeVarTypeDesc)));
+ pTypeVarTypeDesc = new (mem) TypeVarTypeDesc(pModule, cl, i, tkTyPar);
+
+ // No race here - the row in GenericParam table is owned exclusively by this type and we
+ // are holding a lock preventing other threads from concurrently loading it.
+ pModule->StoreGenericParamThrowing(tkTyPar, pTypeVarTypeDesc);
+ }
+ pDestInst[i] = TypeHandle(pTypeVarTypeDesc);
+ }
+
+ DWORD varianceAnnotation = flags & gpVarianceMask;
+ bmtGenericsInfo->pVarianceInfo[i] = static_cast<BYTE>(varianceAnnotation);
+ if (varianceAnnotation != gpNonVariant)
+ {
+ if (varianceAnnotation != gpContravariant && varianceAnnotation != gpCovariant)
+ {
+ pModule->GetAssembly()->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_BADVARIANCE);
+ }
+ else
+ {
+ fHasVariance = TRUE;
+ }
+ }
+ }
+
+ if (!fHasVariance)
+ bmtGenericsInfo->pVarianceInfo = NULL;
+ }
+ else
+ {
+ bmtGenericsInfo->fTypicalInstantiation = FALSE;
+ bmtGenericsInfo->fSharedByGenericInstantiations = FALSE;
+ bmtGenericsInfo->numDicts = 0;
+ }
+
+ bmtGenericsInfo->fContainsGenericVariables = MethodTable::ComputeContainsGenericVariables(inst);
+
+ SigTypeContext typeContext(inst, Instantiation());
+ bmtGenericsInfo->typeContext = typeContext;
+} // MethodTableBuilder::GatherGenericsInfo
+
+//---------------------------------------------------------------------------------------
+//
+// This service is called for normal classes -- and for the pseudo class we invent to
+// hold the module's public members.
+//
+//static
+TypeHandle
+ClassLoader::CreateTypeHandleForTypeDefThrowing(
+ Module * pModule,
+ mdTypeDef cl,
+ Instantiation inst,
+ AllocMemTracker * pamTracker)
+{
+ CONTRACT(TypeHandle)
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(GetThread() != NULL);
+ PRECONDITION(CheckPointer(pModule));
+ POSTCONDITION(!RETVAL.IsNull());
+ POSTCONDITION(CheckPointer(RETVAL.GetMethodTable()));
+ }
+ CONTRACT_END;
+
+ MethodTable * pMT = NULL;
+
+ Thread * pThread = GetThread();
+ BEGIN_SO_INTOLERANT_CODE_FOR(pThread, DefaultEntryProbeAmount() * 2)
+
+ MethodTable * pParentMethodTable = NULL;
+ SigPointer parentInst;
+ mdTypeDef tdEnclosing = mdTypeDefNil;
+ DWORD cInterfaces;
+ BuildingInterfaceInfo_t * pInterfaceBuildInfo = NULL;
+ IMDInternalImport * pInternalImport = NULL;
+ LayoutRawFieldInfo * pLayoutRawFieldInfos = NULL;
+ MethodTableBuilder::bmtGenericsInfo genericsInfo;
+
+ Assembly * pAssembly = pModule->GetAssembly();
+ pInternalImport = pModule->GetMDImport();
+
+ if (TypeFromToken(cl) != mdtTypeDef || !pInternalImport->IsValidToken(cl))
+ {
+ pAssembly->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_BADFORMAT);
+ }
+
+ // GetCheckpoint for the thread-based allocator
+ // This checkpoint provides a scope for all transient allocations of data structures
+ // used during class loading.
+ // <NICE> Ideally a debug/checked build should pass around tokens indicating the Checkpoint
+ // being used and check these dynamically </NICE>
+ CheckPointHolder cph(pThread->m_MarshalAlloc.GetCheckpoint()); //hold checkpoint for autorelease
+
+ // Gather up generics info
+ MethodTableBuilder::GatherGenericsInfo(pModule, cl, inst, &genericsInfo);
+
+ Module * pLoaderModule = pModule;
+ if (!inst.IsEmpty())
+ {
+ pLoaderModule = ClassLoader::ComputeLoaderModuleWorker(
+ pModule,
+ cl,
+ inst,
+ Instantiation());
+ pLoaderModule->GetLoaderAllocator()->EnsureInstantiation(pModule, inst);
+ }
+
+ LoaderAllocator * pAllocator = pLoaderModule->GetLoaderAllocator();
+
+ {
+ // As this is loading a parent type, we are allowed to override the load type limit.
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOAD_APPROXPARENTS);
+ pParentMethodTable = LoadApproxParentThrowing(pModule, cl, &parentInst, &genericsInfo.typeContext);
+ }
+
+ if (pParentMethodTable != NULL)
+ {
+ // Since methods on System.Array assume the layout of arrays, we can not allow
+ // subclassing of arrays, it is sealed from the users point of view.
+ // Value types and enums should be sealed - disable inheritting from them (we cannot require sealed
+ // flag because of AppCompat)
+ if (pParentMethodTable->IsSealed() ||
+ (pParentMethodTable == g_pArrayClass) ||
+ pParentMethodTable->IsValueType())
+ {
+ pAssembly->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_SEALEDPARENT);
+ }
+
+ DWORD dwTotalDicts = genericsInfo.numDicts + pParentMethodTable->GetNumDicts();
+ if (!FitsIn<WORD>(dwTotalDicts))
+ {
+ pAssembly->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_TOOMANYGENERICARGS);
+ }
+ genericsInfo.numDicts = static_cast<WORD>(dwTotalDicts);
+ }
+
+ GetEnclosingClassThrowing(pInternalImport, pModule, cl, &tdEnclosing);
+
+ BYTE nstructPackingSize = 0, nstructNLT = 0;
+ BOOL fExplicitOffsets = FALSE;
+ // NOTE: HasLayoutMetadata does not load classes
+ BOOL fHasLayout =
+ !genericsInfo.fContainsGenericVariables &&
+ HasLayoutMetadata(
+ pModule->GetAssembly(),
+ pInternalImport,
+ cl,
+ pParentMethodTable,
+ &nstructPackingSize,
+ &nstructNLT,
+ &fExplicitOffsets);
+
+ BOOL fIsEnum = ((g_pEnumClass != NULL) && (pParentMethodTable == g_pEnumClass));
+
+ // enums may not have layout because they derive from g_pEnumClass and that has no layout
+ // this is enforced by HasLayoutMetadata above
+ _ASSERTE(!(fIsEnum && fHasLayout));
+
+ // This is a delegate class if it derives from MulticastDelegate (we do not allow single cast delegates)
+ BOOL fIsDelegate = pParentMethodTable && pParentMethodTable == g_pMulticastDelegateClass;
+
+ // Create a EEClass entry for it, filling out a few fields, such as the parent class token
+ // (and the generic type should we be creating an instantiation)
+ EEClass * pClass = MethodTableBuilder::CreateClass(
+ pModule,
+ cl,
+ fHasLayout,
+ fIsDelegate,
+ fIsEnum,
+ &genericsInfo,
+ pAllocator,
+ pamTracker);
+
+ if ((pParentMethodTable != NULL) && (pParentMethodTable == g_pDelegateClass))
+ {
+ // Note we do not allow single cast delegates
+ if (pModule->GetAssembly() != SystemDomain::SystemAssembly())
+ {
+ pAssembly->ThrowTypeLoadException(pInternalImport, cl, BFA_CANNOT_INHERIT_FROM_DELEGATE);
+ }
+
+#ifdef _DEBUG
+ // Only MultiCastDelegate should inherit from Delegate
+ LPCUTF8 className;
+ LPCUTF8 nameSpace;
+ if (FAILED(pInternalImport->GetNameOfTypeDef(cl, &className, &nameSpace)))
+ {
+ className = nameSpace = "Invalid TypeDef record";
+ }
+ BAD_FORMAT_NOTHROW_ASSERT(strcmp(className, "MulticastDelegate") == 0);
+#endif
+ }
+
+ if (fIsDelegate)
+ {
+ if (!pClass->IsSealed())
+ {
+ pAssembly->ThrowTypeLoadException(pInternalImport, cl, BFA_DELEGATE_CLASS_NOTSEALED);
+ }
+
+ pClass->SetIsDelegate();
+ }
+
+ if (tdEnclosing != mdTypeDefNil)
+ {
+ pClass->SetIsNested();
+ THROW_BAD_FORMAT_MAYBE(IsTdNested(pClass->GetProtection()), VLDTR_E_TD_ENCLNOTNESTED, pModule);
+ }
+ else if (IsTdNested(pClass->GetProtection()))
+ {
+ pAssembly->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_BADFORMAT);
+ }
+
+ // We only permit generic interfaces and delegates to have variant type parameters
+ if (genericsInfo.pVarianceInfo != NULL && !pClass->IsInterface() && !fIsDelegate)
+ {
+ pAssembly->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_VARIANCE_CLASS);
+ }
+
+ // Now load all the interfaces
+ HENUMInternalHolder hEnumInterfaceImpl(pInternalImport);
+ hEnumInterfaceImpl.EnumInit(mdtInterfaceImpl, cl);
+
+ cInterfaces = pInternalImport->EnumGetCount(&hEnumInterfaceImpl);
+
+ if (cInterfaces != 0)
+ {
+ DWORD i;
+
+ // Allocate the BuildingInterfaceList table
+ pInterfaceBuildInfo = new (&GetThread()->m_MarshalAlloc) BuildingInterfaceInfo_t[cInterfaces];
+
+ mdInterfaceImpl ii;
+ for (i = 0; pInternalImport->EnumNext(&hEnumInterfaceImpl, &ii); i++)
+ {
+ // Get properties on this interface
+ mdTypeRef crInterface;
+ if (FAILED(pInternalImport->GetTypeOfInterfaceImpl(ii, &crInterface)))
+ {
+ pAssembly->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_BADFORMAT);
+ }
+ // validate the token
+ mdToken crIntType =
+ (RidFromToken(crInterface) && pInternalImport->IsValidToken(crInterface)) ?
+ TypeFromToken(crInterface) :
+ 0;
+ switch (crIntType)
+ {
+ case mdtTypeDef:
+ case mdtTypeRef:
+ case mdtTypeSpec:
+ break;
+ default:
+ pAssembly->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_INTERFACENULL);
+ }
+
+ TypeHandle intType;
+
+ {
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOAD_APPROXPARENTS);
+ intType = LoadApproxTypeThrowing(pModule, crInterface, NULL, &genericsInfo.typeContext);
+ }
+
+ pInterfaceBuildInfo[i].m_pMethodTable = intType.AsMethodTable();
+ if (pInterfaceBuildInfo[i].m_pMethodTable == NULL)
+ {
+ pAssembly->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_INTERFACENULL);
+ }
+
+ // Ensure this is an interface
+ if (!pInterfaceBuildInfo[i].m_pMethodTable->IsInterface())
+ {
+ pAssembly->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_NOTINTERFACE);
+ }
+
+ // Check interface for use of variant type parameters
+ if ((genericsInfo.pVarianceInfo != NULL) && (TypeFromToken(crInterface) == mdtTypeSpec))
+ {
+ ULONG cSig;
+ PCCOR_SIGNATURE pSig;
+ if (FAILED(pInternalImport->GetTypeSpecFromToken(crInterface, &pSig, &cSig)))
+ {
+ pAssembly->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_BADFORMAT);
+ }
+ // Interfaces behave covariantly
+ if (!EEClass::CheckVarianceInSig(
+ genericsInfo.GetNumGenericArgs(),
+ genericsInfo.pVarianceInfo,
+ pModule,
+ SigPointer(pSig, cSig),
+ gpCovariant))
+ {
+ pAssembly->ThrowTypeLoadException(
+ pInternalImport,
+ cl,
+ IDS_CLASSLOAD_VARIANCE_IN_INTERFACE);
+ }
+ }
+ }
+ _ASSERTE(i == cInterfaces);
+ }
+
+ if (fHasLayout ||
+ /* Variant delegates should not have any instance fields of the variant.
+ type parameter. For now, we just completely disallow all fields even
+ if they are non-variant or static, as it is not a useful scenario.
+ @TODO: A more logical place for this check would be in
+ MethodTableBuilder::EnumerateClassMembers() */
+ (fIsDelegate && genericsInfo.pVarianceInfo))
+ {
+ // check for fields and variance
+ ULONG cFields;
+ HENUMInternalHolder hEnumField(pInternalImport);
+ hEnumField.EnumInit(mdtFieldDef, cl);
+
+ cFields = pInternalImport->EnumGetCount(&hEnumField);
+
+ if ((cFields != 0) && fIsDelegate && (genericsInfo.pVarianceInfo != NULL))
+ {
+ pAssembly->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_VARIANCE_IN_DELEGATE);
+ }
+
+ if (fHasLayout)
+ {
+ // Though we fail on this condition, we should never run into it.
+ CONSISTENCY_CHECK(nstructPackingSize != 0);
+ // MD Val check: PackingSize
+ if((nstructPackingSize == 0) ||
+ (nstructPackingSize > 128) ||
+ (nstructPackingSize & (nstructPackingSize-1)))
+ {
+ THROW_BAD_FORMAT_MAYBE(!"ClassLayout:Invalid PackingSize", BFA_BAD_PACKING_SIZE, pModule);
+ pAssembly->ThrowTypeLoadException(pInternalImport, cl, IDS_CLASSLOAD_BADFORMAT);
+ }
+
+ pLayoutRawFieldInfos = (LayoutRawFieldInfo *)GetThread()->m_MarshalAlloc.Alloc(
+ (S_UINT32(1) + S_UINT32(cFields)) * S_UINT32(sizeof(LayoutRawFieldInfo)));
+
+ {
+ // Warning: this can load classes
+ CONTRACT_VIOLATION(LoadsTypeViolation);
+
+ // Set a flag that allows us to break dead-locks that are result of the LoadsTypeViolation
+ ThreadStateNCStackHolder tsNC(TRUE, Thread::TSNC_LoadsTypeViolation);
+
+ EEClassLayoutInfo::CollectLayoutFieldMetadataThrowing(
+ cl,
+ nstructPackingSize,
+ nstructNLT,
+#ifdef FEATURE_COMINTEROP
+ pClass->IsProjectedFromWinRT(),
+#endif // FEATURE_COMINTEROP
+ fExplicitOffsets,
+ pParentMethodTable,
+ cFields,
+ &hEnumField,
+ pModule,
+ &genericsInfo.typeContext,
+ &(((LayoutEEClass *)pClass)->m_LayoutInfo),
+ pLayoutRawFieldInfos,
+ pAllocator,
+ pamTracker);
+ }
+ }
+ }
+
+ // Resolve this class, given that we know now that all of its dependencies are loaded and resolved.
+ // !!! This must be the last thing in this TRY block: if MethodTableBuilder succeeds, it has published the class
+ // and there is no going back.
+ MethodTableBuilder builder(
+ NULL,
+ pClass,
+ &GetThread()->m_MarshalAlloc,
+ pamTracker);
+
+ pMT = builder.BuildMethodTableThrowing(
+ pAllocator,
+ pLoaderModule,
+ pModule,
+ cl,
+ pInterfaceBuildInfo,
+ pLayoutRawFieldInfos,
+ pParentMethodTable,
+ &genericsInfo,
+ parentInst,
+ (WORD)cInterfaces);
+
+ END_SO_INTOLERANT_CODE;
+ RETURN(TypeHandle(pMT));
+} // ClassLoader::CreateTypeHandleForTypeDefThrowing
diff --git a/src/vm/methodtablebuilder.h b/src/vm/methodtablebuilder.h
new file mode 100644
index 0000000000..bc543c1bf8
--- /dev/null
+++ b/src/vm/methodtablebuilder.h
@@ -0,0 +1,3052 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+// ==++==
+//
+//
+
+//
+// ==--==
+//
+// File: METHODTABLEBUILDER.H
+//
+
+
+//
+
+//
+// ============================================================================
+
+#ifndef METHODTABLEBUILDER_H
+#define METHODTABLEBUILDER_H
+
+//---------------------------------------------------------------------------------------
+//
+// MethodTableBuilder simply acts as a holder for the
+// large algorithm that "compiles" a type into
+// a MethodTable/EEClass/DispatchMap/VTable etc. etc.
+//
+// The user of this class (the ClassLoader) currently builds the EEClass
+// first, and does a couple of other things too, though all
+// that work should probably be folded into BuildMethodTableThrowing.
+//
+class MethodTableBuilder
+{
+
+public:
+
+ friend class EEClass;
+
+ typedef UINT16 SLOT_INDEX;
+ typedef ClrSafeInt<SLOT_INDEX> S_SLOT_INDEX;
+ static const UINT16 INVALID_SLOT_INDEX = static_cast<UINT16>(-1);
+ static const UINT16 MAX_SLOT_INDEX = static_cast<UINT16>(-1) - 10;
+
+#ifndef BINDER
+ // Information gathered by the class loader relating to generics
+ // Fields in this structure are initialized very early in class loading
+ // See code:ClassLoader.CreateTypeHandleForTypeDefThrowing
+ struct bmtGenericsInfo
+ {
+ SigTypeContext typeContext; // Type context used for metadata parsing
+ WORD numDicts; // Number of dictionaries including this class
+ BYTE *pVarianceInfo; // Variance annotations on type parameters, NULL if none specified
+ BOOL fTypicalInstantiation; // TRUE if this is generic type definition
+ BOOL fSharedByGenericInstantiations; // TRUE if this is canonical type shared by instantiations
+ BOOL fContainsGenericVariables; // TRUE if this is an open type
+
+ inline bmtGenericsInfo() { LIMITED_METHOD_CONTRACT; memset((void *)this, NULL, sizeof(*this)); }
+ inline DWORD GetNumGenericArgs() const { LIMITED_METHOD_CONTRACT; return typeContext.m_classInst.GetNumArgs(); }
+ inline BOOL HasInstantiation() const { LIMITED_METHOD_CONTRACT; return typeContext.m_classInst.GetNumArgs() != 0; }
+ inline BOOL IsTypicalTypeDefinition() const { LIMITED_METHOD_CONTRACT; return !HasInstantiation() || fTypicalInstantiation; }
+
+ inline Instantiation GetInstantiation() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return typeContext.m_classInst;
+ }
+
+#ifdef _DEBUG
+ // Typical instantiation (= open type). Non-NULL only when loading any non-typical instantiation.
+ // NULL if 'this' is a typical instantiation or a non-generic type.
+ MethodTable * dbg_pTypicalInstantiationMT;
+
+ inline MethodTable * Debug_GetTypicalMethodTable() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return dbg_pTypicalInstantiationMT;
+ }
+#endif //_DEBUG
+ }; // struct bmtGenericsInfo
+
+
+ // information for Thread and Context Static. Filled by InitializedFieldDesc and used when
+ // setting up a MethodTable
+ struct bmtContextStaticInfo
+ {
+#ifdef FEATURE_REMOTING
+ // size of context statics
+ DWORD dwContextStaticsSize;
+#endif
+
+ inline bmtContextStaticInfo() { LIMITED_METHOD_CONTRACT; memset((void *)this, NULL, sizeof(*this)); }
+ };
+
+ MethodTableBuilder(
+ MethodTable * pHalfBakedMT,
+ EEClass * pHalfBakedClass,
+ StackingAllocator * pStackingAllocator,
+ AllocMemTracker * pAllocMemTracker)
+ : m_pHalfBakedClass(pHalfBakedClass),
+ m_pHalfBakedMT(pHalfBakedMT),
+ m_pStackingAllocator(pStackingAllocator),
+ m_pAllocMemTracker(pAllocMemTracker)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetBMTData(
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL);
+ }
+public:
+ //==========================================================================
+ // This function is very specific about how it constructs a EEClass.
+ //==========================================================================
+ static EEClass * CreateClass(Module *pModule,
+ mdTypeDef cl,
+ BOOL fHasLayout,
+ BOOL fDelegate,
+ BOOL fIsEnum,
+ const bmtGenericsInfo *bmtGenericsInfo,
+ LoaderAllocator *pAllocator,
+ AllocMemTracker *pamTracker);
+
+ static void GatherGenericsInfo(Module *pModule,
+ mdTypeDef cl,
+ Instantiation inst,
+ bmtGenericsInfo *bmtGenericsInfo);
+
+ MethodTable *
+ BuildMethodTableThrowing(
+ LoaderAllocator * pAllocator,
+ Module * pLoaderModule,
+ Module * pModule,
+ mdToken cl,
+ BuildingInterfaceInfo_t * pBuildingInterfaceList,
+ const LayoutRawFieldInfo * pLayoutRawFieldInfos,
+ MethodTable * pParentMethodTable,
+ const bmtGenericsInfo * bmtGenericsInfo,
+ SigPointer parentInst,
+ WORD wNumInterfaces);
+
+ LPCWSTR GetPathForErrorMessages();
+
+ BOOL ChangesImplementationOfVirtualSlot(SLOT_INDEX idx);
+
+private:
+ enum METHOD_IMPL_TYPE
+ {
+ METHOD_IMPL_NOT,
+ METHOD_IMPL
+ };
+
+ enum METHOD_TYPE
+ {
+ // The values of the enum are in sync with MethodClassification.
+ // GetMethodClassification depends on this
+ METHOD_TYPE_NORMAL = 0,
+ METHOD_TYPE_FCALL = 1,
+ METHOD_TYPE_NDIRECT = 2,
+ METHOD_TYPE_EEIMPL = 3,
+ METHOD_TYPE_INSTANTIATED = 5,
+#ifdef FEATURE_COMINTEROP
+ METHOD_TYPE_COMINTEROP = 6,
+#endif
+ };
+
+private:
+ // Determine if this is the special SIMD type System.Numerics.Vector<T>, and set its size.
+ BOOL CheckIfSIMDAndUpdateSize();
+
+ // <NICE> Get rid of this.</NICE>
+ PTR_EEClass m_pHalfBakedClass;
+ PTR_MethodTable m_pHalfBakedMT;
+
+ // GetHalfBakedClass: The EEClass you get back from this function may not have all its fields filled in yet.
+ // Thus you have to make sure that the relevant item which you are accessing has
+ // been correctly initialized in the EEClass/MethodTable construction sequence
+ // at the point at which you access it.
+ //
+ // Gradually we will move the code to a model where the process of constructing an EEClass/MethodTable
+ // is more obviously correct, e.g. by relying much less on reading information using GetHalfBakedClass
+ // and GetHalfBakedMethodTable.
+ //
+ // <NICE> Get rid of this.</NICE>
+ PTR_EEClass GetHalfBakedClass() { LIMITED_METHOD_CONTRACT; return m_pHalfBakedClass; }
+ PTR_MethodTable GetHalfBakedMethodTable() { LIMITED_METHOD_CONTRACT; return m_pHalfBakedMT; }
+
+ // <NOTE> The following functions are used during MethodTable construction to access/set information about the type being constructed.
+ // Beware that some of the fields of the underlying EEClass/MethodTable being constructed may not
+ // be initialized. Because of this, ideally the code will gradually be cleaned up so that
+ // none of these functions are used and instead we use the data in the bmt structures below
+ // or we explicitly pass around the data as arguments. </NOTE>
+ //
+ // <NICE> Get rid of all of these.</NICE>
+ mdTypeDef GetCl() { WRAPPER_NO_CONTRACT; return bmtInternal->pType->GetTypeDefToken(); }
+ BOOL IsGlobalClass() { WRAPPER_NO_CONTRACT; return GetCl() == COR_GLOBAL_PARENT_TOKEN; }
+ DWORD GetAttrClass() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->GetAttrClass(); }
+ WORD GetNumHandleRegularStatics() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->GetNumHandleRegularStatics(); }
+ WORD GetNumHandleThreadStatics() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->GetNumHandleThreadStatics(); }
+ WORD GetNumStaticFields() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->GetNumStaticFields(); }
+ WORD GetNumInstanceFields() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->GetNumInstanceFields(); }
+ BOOL IsInterface() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->IsInterface(); }
+ BOOL HasOverLayedField() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->HasOverLayedField(); }
+ BOOL IsComImport() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->IsComImport(); }
+#ifdef FEATURE_COMINTEROP
+ void SetIsComClassInterface() { WRAPPER_NO_CONTRACT; GetHalfBakedClass()->SetIsComClassInterface(); }
+#endif // FEATURE_COMINTEROP
+ BOOL IsEnum() { WRAPPER_NO_CONTRACT; return bmtProp->fIsEnum; }
+ BOOL ContainsStackPtr() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->ContainsStackPtr(); }
+ BOOL HasNonPublicFields() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->HasNonPublicFields(); }
+ BOOL IsValueClass() { WRAPPER_NO_CONTRACT; return bmtProp->fIsValueClass; }
+ BOOL IsUnsafeValueClass() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->IsUnsafeValueClass(); }
+ BOOL IsAbstract() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->IsAbstract(); }
+ BOOL HasLayout() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->HasLayout(); }
+ BOOL IsDelegate() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->IsDelegate(); }
+#ifdef FEATURE_REMOTING
+ BOOL IsMarshaledByRef() { WRAPPER_NO_CONTRACT; return bmtProp->fMarshaledByRef; }
+ BOOL IsContextful() { WRAPPER_NO_CONTRACT; return bmtProp->fIsContextful; }
+#endif
+ BOOL IsNested() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->IsNested(); }
+ BOOL HasFieldsWhichMustBeInited() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->HasFieldsWhichMustBeInited(); }
+ BOOL HasRemotingProxyAttribute() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->HasRemotingProxyAttribute(); }
+ BOOL IsBlittable() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->IsBlittable(); }
+ PTR_MethodDescChunk GetChunks() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->GetChunks(); }
+ BOOL HasExplicitFieldOffsetLayout() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->HasExplicitFieldOffsetLayout(); }
+ BOOL IsManagedSequential() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->IsManagedSequential(); }
+ BOOL HasExplicitSize() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->HasExplicitSize(); }
+ BOOL RequiresLinktimeCheck() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->RequiresLinktimeCheck(); }
+ BOOL RequiresLinktimeCheckHostProtectionOnly() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->RequiresLinkTimeCheckHostProtectionOnly(); }
+
+ SecurityProperties* GetSecurityProperties() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->GetSecurityProperties(); }
+#ifdef _DEBUG
+ BOOL IsAppDomainAgilityDone() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->IsAppDomainAgilityDone(); }
+ LPCUTF8 GetDebugClassName() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->GetDebugClassName(); }
+#endif // _DEBUG
+ Assembly *GetAssembly() { WRAPPER_NO_CONTRACT; return GetModule()->GetAssembly(); }
+ Module *GetModule() { WRAPPER_NO_CONTRACT; return bmtInternal->pModule; }
+ ClassLoader *GetClassLoader() { WRAPPER_NO_CONTRACT; return GetModule()->GetClassLoader(); }
+ IMDInternalImport* GetMDImport() { WRAPPER_NO_CONTRACT; return bmtInternal->pInternalImport; }
+ FieldDesc *GetApproxFieldDescListRaw() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->GetFieldDescList(); }
+ EEClassLayoutInfo *GetLayoutInfo() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->GetLayoutInfo(); }
+
+ // <NOTE> The following functions are used during MethodTable construction to setup information
+ // about the type being constructed in particular information stored in the EEClass.
+ // USE WITH CAUTION!! TRY NOT TO ADD MORE OF THESE!! </NOTE>
+ //
+ // <NICE> Get rid of all of these - we should be able to evaluate these conditions BEFORE
+ // we create the EEClass object, and thus set the flags immediately at the point
+ // we create that object.</NICE>
+ void SetUnsafeValueClass() { WRAPPER_NO_CONTRACT; GetHalfBakedClass()->SetUnsafeValueClass(); }
+ void SetCannotBeBlittedByObjectCloner() { WRAPPER_NO_CONTRACT; GetHalfBakedClass()->SetCannotBeBlittedByObjectCloner(); }
+ void SetHasFieldsWhichMustBeInited() { WRAPPER_NO_CONTRACT; GetHalfBakedClass()->SetHasFieldsWhichMustBeInited(); }
+ void SetHasNonPublicFields() { WRAPPER_NO_CONTRACT; GetHalfBakedClass()->SetHasNonPublicFields(); }
+ void SetModuleDynamicID(DWORD x) { WRAPPER_NO_CONTRACT; GetHalfBakedClass()->SetModuleDynamicID(x); }
+ void SetNumHandleRegularStatics(WORD x) { WRAPPER_NO_CONTRACT; GetHalfBakedClass()->SetNumHandleRegularStatics(x); }
+ void SetNumHandleThreadStatics(WORD x) { WRAPPER_NO_CONTRACT; GetHalfBakedClass()->SetNumHandleThreadStatics(x); }
+ void SetNumBoxedRegularStatics(WORD x) { WRAPPER_NO_CONTRACT; GetHalfBakedClass()->SetNumBoxedRegularStatics(x); }
+ void SetNumBoxedThreadStatics(WORD x) { WRAPPER_NO_CONTRACT; GetHalfBakedClass()->SetNumBoxedThreadStatics(x); }
+ void SetAlign8Candidate() { WRAPPER_NO_CONTRACT; GetHalfBakedClass()->SetAlign8Candidate(); }
+ void SetHasRemotingProxyAttribute() { WRAPPER_NO_CONTRACT; GetHalfBakedClass()->SetHasRemotingProxyAttribute(); }
+ void SetHasOverLayedFields() { WRAPPER_NO_CONTRACT; GetHalfBakedClass()->SetHasOverLayedFields(); }
+ void SetNonGCRegularStaticFieldBytes(DWORD x) { WRAPPER_NO_CONTRACT; GetHalfBakedClass()->SetNonGCRegularStaticFieldBytes(x); }
+ void SetNonGCThreadStaticFieldBytes(DWORD x) { WRAPPER_NO_CONTRACT; GetHalfBakedClass()->SetNonGCThreadStaticFieldBytes(x); }
+#ifdef _DEBUG
+ void SetDebugClassName(LPUTF8 x) { WRAPPER_NO_CONTRACT; GetHalfBakedClass()->SetDebugClassName(x); }
+#endif
+
+ // Must be called prior to setting the value of any optional field on EEClass (on a debug build an assert
+ // will fire if this invariant is violated).
+ static void EnsureOptionalFieldsAreAllocated(EEClass *pClass, AllocMemTracker *pamTracker, LoaderHeap *pHeap);
+
+ /************************************
+ * PRIVATE INTERNAL STRUCTS
+ ************************************/
+private:
+ //The following structs are used in buildmethodtable
+ // The 'bmt' in front of each struct reminds us these are for MethodTableBuilder
+
+ // --------------------------------------------------------------------------------------------
+ struct bmtErrorInfo
+ {
+ UINT resIDWhy;
+ LPCUTF8 szMethodNameForError;
+ mdToken dMethodDefInError;
+ Module* pModule;
+ mdTypeDef cl;
+ OBJECTREF *pThrowable;
+
+ // Set the reason and the offending method def. If the method information
+ // is not from this class set the method name and it will override the method def.
+ inline bmtErrorInfo()
+ : resIDWhy(0),
+ szMethodNameForError(NULL),
+ dMethodDefInError(mdMethodDefNil),
+ pThrowable(NULL)
+ { LIMITED_METHOD_CONTRACT; }
+ };
+
+ // --------------------------------------------------------------------------------------------
+ class bmtRTType
+ {
+ public:
+ //-----------------------------------------------------------------------------------------
+ // Note that the immediate substitution is copied, but this assumes that
+ // the remaining substitutions in the chain are in a stable memory location
+ // for the lifetime of this object.
+ bmtRTType(
+ const Substitution & subst,
+ MethodTable * pMT)
+ : m_subst(subst),
+ m_pMT(pMT),
+ m_pParent(NULL)
+ { LIMITED_METHOD_CONTRACT; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the parent type. Takes advantage of the fact that an RT type will
+ // have only RT types as parents. I don't anticipate this changing.
+ bmtRTType *
+ GetParentType() const
+ { LIMITED_METHOD_CONTRACT; return m_pParent; }
+
+ //-----------------------------------------------------------------------------------------
+ // Sets the parent type. Used during construction of the type chain, due
+ // to the fact that types point up the chain but substitutions point down.
+ void
+ SetParentType(
+ bmtRTType * pParentType)
+ { LIMITED_METHOD_CONTRACT; m_pParent = pParentType; }
+
+ //-----------------------------------------------------------------------------------------
+ bool
+ IsNested() const
+ { LIMITED_METHOD_CONTRACT; return GetMethodTable()->GetClass()->IsNested() != FALSE; }
+
+ //-----------------------------------------------------------------------------------------
+ mdTypeDef
+ GetEnclosingTypeToken() const;
+
+ //-----------------------------------------------------------------------------------------
+ // Reference to the substitution for this type. Substitutions are linked
+ // inline with the type chain; this is more efficient than creating an
+ // entire type chain for each parent type and also keeps the type and
+ // substitution tightly coupled for easier use.
+ const Substitution &
+ GetSubstitution() const
+ { LIMITED_METHOD_CONTRACT; return m_subst; }
+
+ //-----------------------------------------------------------------------------------------
+ // Changes type's substitution - used for interface map building.
+ void
+ SetSubstitution(const Substitution & subst)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_subst = subst;
+ }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the runtime Module that owns this type.
+ Module *
+ GetModule() const
+ { WRAPPER_NO_CONTRACT; return GetMethodTable()->GetModule(); }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the runtime MethodTable for the type.
+ MethodTable *
+ GetMethodTable() const
+ { LIMITED_METHOD_CONTRACT; return m_pMT; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the metadata token for this type.
+ mdTypeDef
+ GetTypeDefToken() const
+ { WRAPPER_NO_CONTRACT; return GetMethodTable()->GetCl();}
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the metadata attributes for this type.
+ DWORD
+ GetAttrs() const
+ { WRAPPER_NO_CONTRACT; return GetMethodTable()->GetClass()->GetAttrClass(); }
+
+ //-----------------------------------------------------------------------------------------
+ // true if the type is an interface; false otherwise.
+ bool
+ IsInterface() const
+ { WRAPPER_NO_CONTRACT; return GetMethodTable()->IsInterface() != FALSE; }
+
+ //-----------------------------------------------------------------------------------------
+ // Helper function to find a type associated with pTargetMT in the
+ // chain pointed to by pType.
+ static
+ bmtRTType *
+ FindType(
+ bmtRTType * pType,
+ MethodTable * pTargetMT);
+
+ private:
+ //-----------------------------------------------------------------------------------------
+ Substitution m_subst;
+ MethodTable * m_pMT;
+ bmtRTType * m_pParent;
+ }; // class bmtRTType
+
+ // --------------------------------------------------------------------------------------------
+ // This creates a chain of bmtRTType objects representing pMT and all of pMT's parent types.
+ bmtRTType *
+ CreateTypeChain(
+ MethodTable * pMT,
+ const Substitution & subst);
+
+ // --------------------------------------------------------------------------------------------
+ class bmtMDType
+ {
+ public:
+ //-----------------------------------------------------------------------------------------
+ bmtMDType(
+ bmtRTType * pParentType,
+ Module * pModule,
+ mdTypeDef tok,
+ const SigTypeContext & sigContext);
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the parent type. This takes advantage of teh fact that an MD type
+ // will always have an RT type as a parent. This could change, at which point
+ // it would have to return a bmtTypeHandle.
+ bmtRTType *
+ GetParentType() const
+ { LIMITED_METHOD_CONTRACT; return m_pParentType; }
+
+ //-----------------------------------------------------------------------------------------
+ // Used during construction of the type chain, due to the fact that types point
+ // up the chain but substitutions point down.
+ void
+ SetParentType(
+ bmtRTType * pParentType)
+ { LIMITED_METHOD_CONTRACT; m_pParentType = pParentType; }
+
+ //-----------------------------------------------------------------------------------------
+ bool
+ IsNested() const
+ { LIMITED_METHOD_CONTRACT; return m_enclTok != mdTypeDefNil; }
+
+ //-----------------------------------------------------------------------------------------
+ mdTypeDef
+ GetEnclosingTypeToken() const
+ { LIMITED_METHOD_CONTRACT; return m_enclTok; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns a reference to the substitution. Currently, no substitution exists
+ // for the type being built, but it adds uniformity to the types and so a NULL
+ // substitution is created.
+ const Substitution &
+ GetSubstitution() const
+ { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(m_subst.GetModule() == NULL); return m_subst; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the runtime Module that owns this type.
+ Module *
+ GetModule() const
+ { LIMITED_METHOD_CONTRACT; return m_pModule; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the MethodTable for the type. This is null until the very end
+ // of BuildMethodTableThrowing when the MethodTable for this type is finally
+ // created in SetupMethodTable2.
+ MethodTable *
+ GetMethodTable() const
+ { LIMITED_METHOD_CONTRACT; return m_pMT; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the token for the type.
+ mdTypeDef
+ GetTypeDefToken() const
+ { LIMITED_METHOD_CONTRACT; return m_tok;}
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the metadata attributes for the type.
+ DWORD
+ GetAttrs() const
+ { WRAPPER_NO_CONTRACT; return m_dwAttrs; }
+
+ //-----------------------------------------------------------------------------------------
+ // true if the type is an interface; false otherwise.
+ bool
+ IsInterface() const
+ { WRAPPER_NO_CONTRACT; return IsTdInterface(GetAttrs()); }
+
+ private:
+ //-----------------------------------------------------------------------------------------
+ bmtRTType * m_pParentType;
+ Module * m_pModule;
+ mdTypeDef m_tok;
+ mdTypeDef m_enclTok;
+ SigTypeContext m_sigContext;
+ Substitution m_subst;
+ DWORD m_dwAttrs;
+
+ MethodTable * m_pMT;
+ }; // class bmtMDType
+
+ // --------------------------------------------------------------------------------------------
+ // This is similar to the known and loved TypeHandle class, but tailored for use during
+ // type building. It allows for homogeneous collections of heterogeneous implementations.
+ // Currently, it knows the difference between a bmtRTType and a bmtMDType and will
+ // forward method calls such as GetModule, GetParentType and more to the appropriate
+ // target.
+ class bmtTypeHandle
+ {
+ public:
+ //-----------------------------------------------------------------------------------------
+ // Creates a type handle for a bmtRTType pointer. For ease of use, this conversion
+ // constructor is not declared as explicit.
+ bmtTypeHandle(
+ bmtRTType * pRTType)
+ : m_handle(HandleFromRTType(pRTType))
+ { NOT_DEBUG(static_assert_no_msg(sizeof(bmtTypeHandle) == sizeof(UINT_PTR));) INDEBUG(m_pAsRTType = pRTType;) }
+
+ //-----------------------------------------------------------------------------------------
+ // Creates a type handle for a bmtMDType pointer. For ease of use, this conversion
+ // constructor is not declared as explicit.
+ bmtTypeHandle(
+ bmtMDType * pMDType)
+ : m_handle(HandleFromMDType(pMDType))
+ { NOT_DEBUG(static_assert_no_msg(sizeof(bmtTypeHandle) == sizeof(UINT_PTR));) INDEBUG(m_pAsMDType = pMDType;) }
+
+ //-----------------------------------------------------------------------------------------
+ // Copy constructor.
+ bmtTypeHandle(
+ const bmtTypeHandle &other)
+ { LIMITED_METHOD_CONTRACT; m_handle = other.m_handle; INDEBUG(m_pAsRTType = other.m_pAsRTType;) }
+
+ //-----------------------------------------------------------------------------------------
+ // Default, null constructor.
+ bmtTypeHandle()
+ { LIMITED_METHOD_CONTRACT; m_handle = 0; INDEBUG(m_pAsRTType = NULL;) }
+
+ //-----------------------------------------------------------------------------------------
+ // Assignment operator
+ bmtTypeHandle &
+ operator=(
+ const bmtTypeHandle &rhs)
+ { LIMITED_METHOD_CONTRACT; m_handle = rhs.m_handle; INDEBUG(m_pAsRTType = rhs.m_pAsRTType;) return *this; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns true if null (constructed using default ctor, or assigned from one); otherwise false.
+ bool
+ IsNull() const
+ { LIMITED_METHOD_CONTRACT; return m_handle == 0; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns true if this handle contains a bmtRTType pointer; otherwise returns false.
+ bool
+ IsRTType() const
+ { LIMITED_METHOD_CONTRACT; return (m_handle & RTTYPE_FLAG) != 0; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns true if this handle contains a bmtMDType pointer; otherwise returns false.
+ bool
+ IsMDType() const
+ { LIMITED_METHOD_CONTRACT; return (m_handle & MDTYPE_FLAG) != 0; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns pointer to bmtRTType. IsRTType is required
+ // to return true before calling this method.
+ bmtRTType *
+ AsRTType() const
+ {
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(IsRTType());
+ return (bmtRTType *) Decode(m_handle);
+ }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns pointer to bmtMDType. IsMDType is required
+ // to return true before calling this method.
+ bmtMDType *
+ AsMDType() const
+ {
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(IsMDType());
+ return (bmtMDType *) Decode(m_handle);
+ }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the parent type handle, or the null type handle if no parent exists.
+ bmtTypeHandle
+ GetParentType() const;
+
+ //-----------------------------------------------------------------------------------------
+ bool
+ IsNested() const;
+
+ //-----------------------------------------------------------------------------------------
+ mdTypeDef
+ GetEnclosingTypeToken() const;
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the runtime Module* for this type.
+ Module *
+ GetModule() const;
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the token for the type.
+ mdTypeDef
+ GetTypeDefToken() const;
+
+ //-----------------------------------------------------------------------------------------
+ // Returns reference to the substitution for this type.
+ const Substitution &
+ GetSubstitution() const;
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the MethodTable* for the type.
+ MethodTable *
+ GetMethodTable() const;
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the metadata attributes for the type.
+ DWORD
+ GetAttrs() const;
+
+ //-----------------------------------------------------------------------------------------
+ // Returns true if this type is an interface; returns false otherwise.
+ bool
+ IsInterface() const;
+
+ //-----------------------------------------------------------------------------------------
+ static bool
+ Equal(
+ const bmtTypeHandle &lhs,
+ const bmtTypeHandle &rhs)
+ {
+ return lhs.m_handle == rhs.m_handle;
+ }
+
+ protected:
+ //-----------------------------------------------------------------------------------------
+ static const UINT_PTR RTTYPE_FLAG = 0x1;
+ static const UINT_PTR MDTYPE_FLAG = 0x2;
+ static const UINT_PTR MASK_FLAG = 0x3;
+
+ //-----------------------------------------------------------------------------------------
+ // Takes a pointer and encodes it with the flag.
+ static UINT_PTR
+ Encode(
+ LPVOID pv,
+ UINT_PTR flag)
+ {
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK((reinterpret_cast<UINT_PTR>(pv) & MASK_FLAG) == 0);
+ return (reinterpret_cast<UINT_PTR>(pv) | flag);
+ }
+
+ //-----------------------------------------------------------------------------------------
+ // Takes an encoded handle and removes encoding bits.
+ static LPVOID
+ Decode(
+ UINT_PTR handle)
+ { LIMITED_METHOD_CONTRACT; return reinterpret_cast<LPVOID>(handle & ~MASK_FLAG); }
+
+ //-----------------------------------------------------------------------------------------
+ // Uses encode to produce a handle for a bmtRTType*
+ static UINT_PTR
+ HandleFromRTType(
+ bmtRTType * pRTType)
+ { WRAPPER_NO_CONTRACT; return Encode(pRTType, RTTYPE_FLAG); }
+
+ //-----------------------------------------------------------------------------------------
+ // Uses encode to produce a handle for a bmtMDType*
+ static UINT_PTR
+ HandleFromMDType(
+ bmtMDType * pMDType)
+ { WRAPPER_NO_CONTRACT; return Encode(pMDType, MDTYPE_FLAG); }
+
+ //-----------------------------------------------------------------------------------------
+ UINT_PTR m_handle;
+
+#ifdef _DEBUG
+ //-----------------------------------------------------------------------------------------
+ // Used in debug builds to quickly access the type in a debugger.
+ union
+ {
+ bmtRTType * m_pAsRTType;
+ bmtMDType * m_pAsMDType;
+ };
+#endif
+ }; // class bmtTypeHandle
+
+ // --------------------------------------------------------------------------------------------
+ // MethodSignature encapsulates the name and metadata signature of a method, as well as
+ // the scope (Module*) and substitution for the signature. It is intended to facilitate
+ // passing around this tuple of information as well as providing efficient comparison
+ // operations when looking for types.
+ //
+ // Meant to be passed around by reference or by value. Please make sure this is declared
+ // on the stack or properly deleted after use.
+
+ class MethodSignature
+ {
+ public:
+ //-----------------------------------------------------------------------------------------
+ // This is the constructor usually used, and is typically contained inside a
+ // bmtMDMethod or bmtRTMethod.
+ MethodSignature(
+ Module * pModule,
+ mdToken tok,
+ const Substitution * pSubst)
+ : m_pModule(pModule),
+ m_tok(tok),
+ m_szName(NULL),
+ m_pSig(NULL),
+ m_cSig(0),
+ m_pSubst(pSubst),
+ m_nameHash(INVALID_NAME_HASH)
+ {
+ CONTRACTL {
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(TypeFromToken(tok) == mdtMethodDef ||
+ TypeFromToken(tok) == mdtMemberRef);
+ } CONTRACTL_END;
+ INDEBUG(CheckGetMethodAttributes();)
+ }
+
+ //-----------------------------------------------------------------------------------------
+ // This constructor can be used with hard-coded signatures that are used for
+ // locating .ctor and .cctor methods.
+ MethodSignature(
+ Module * pModule,
+ LPCUTF8 szName,
+ PCCOR_SIGNATURE pSig,
+ size_t cSig,
+ const Substitution * pSubst = NULL)
+ : m_pModule(pModule),
+ m_tok(mdTokenNil),
+ m_szName(szName),
+ m_pSig(pSig),
+ m_cSig(cSig),
+ m_pSubst(pSubst),
+ m_nameHash(INVALID_NAME_HASH)
+ {
+ CONTRACTL {
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(CheckPointer(szName));
+ PRECONDITION(CheckPointer(pSig));
+ PRECONDITION(cSig != 0);
+ } CONTRACTL_END;
+ }
+
+ //-----------------------------------------------------------------------------------------
+ // Copy constructor.
+ MethodSignature(
+ const MethodSignature & s)
+ : m_pModule(s.m_pModule),
+ m_tok(s.m_tok),
+ m_szName(s.m_szName),
+ m_pSig(s.m_pSig),
+ m_cSig(s.m_cSig),
+ m_pSubst(s.m_pSubst),
+ m_nameHash(s.m_nameHash)
+ { }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the module that is the scope within which the signature itself lives.
+ Module *
+ GetModule() const
+ { LIMITED_METHOD_CONTRACT; return m_pModule; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the signature token. Note that this can be mdTokenNil if the second
+ // constructor above is used.
+ mdToken
+ GetToken() const
+ { LIMITED_METHOD_CONTRACT; return m_tok; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the name of the method.
+ inline LPCUTF8
+ GetName() const
+ { WRAPPER_NO_CONTRACT; CheckGetMethodAttributes(); return m_szName; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the metadata signature for the method.
+ inline PCCOR_SIGNATURE
+ GetSignature() const
+ { WRAPPER_NO_CONTRACT; CheckGetMethodAttributes(); return m_pSig; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the signature length.
+ inline size_t
+ GetSignatureLength() const
+ { WRAPPER_NO_CONTRACT; CheckGetMethodAttributes(); return m_cSig; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the substitution to be used in interpreting the signature.
+ const Substitution &
+ GetSubstitution() const
+ { return *m_pSubst; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns true if the names are equal; otherwise returns false. This is a
+ // case-sensitive comparison.
+ static bool
+ NamesEqual(
+ const MethodSignature & sig1,
+ const MethodSignature & sig2);
+
+ //-----------------------------------------------------------------------------------------
+ // Returns true if the metadata signatures (PCCOR_SIGNATURE) are equivalent. (Type equivalence permitted)
+ static bool
+ SignaturesEquivalent(
+ const MethodSignature & sig1,
+ const MethodSignature & sig2);
+
+ //-----------------------------------------------------------------------------------------
+ // Returns true if the metadata signatures (PCCOR_SIGNATURE) are exactly equal. (No type equivalence permitted)
+ static bool
+ SignaturesExactlyEqual(
+ const MethodSignature & sig1,
+ const MethodSignature & sig2);
+ //-----------------------------------------------------------------------------------------
+ // This is a combined name and sig comparison. Semantically equivalent to
+ // "NamesEqual(*this, rhs) && SignaturesEquivalent(*this, rhs)".
+ bool
+ Equivalent(
+ const MethodSignature &rhs) const;
+
+ //-----------------------------------------------------------------------------------------
+ // This is a combined name and sig comparison. Semantically equivalent to
+ // "NamesEqual(*this, rhs) && SignaturesExactlyEqual(*this, rhs)".
+ bool
+ ExactlyEqual(
+ const MethodSignature &rhs) const;
+
+ //-----------------------------------------------------------------------------------------
+ // Conversion operator to Module*. This should possibly be removed.
+ operator Module *() const
+ { return GetModule(); }
+
+ //-----------------------------------------------------------------------------------------
+ // Conversion operator to LPCUTF8, returning name. This should possibly be removed.
+ operator LPCUTF8() const
+ { return GetName(); }
+
+ //-----------------------------------------------------------------------------------------
+ // Conversion operator to PCCOR_SIGNATURE. This should possibly be removed.
+ operator PCCOR_SIGNATURE() const
+ { return GetSignature(); }
+
+ protected:
+ //-----------------------------------------------------------------------------------------
+ Module * m_pModule;
+ mdToken m_tok;
+ mutable LPCUTF8 m_szName; // mutable because it is lazily evaluated.
+ mutable PCCOR_SIGNATURE m_pSig; // mutable because it is lazily evaluated.
+ mutable size_t m_cSig; // mutable because it is lazily evaluated.
+ const Substitution * m_pSubst;
+
+ static const ULONG INVALID_NAME_HASH = static_cast<ULONG>(-1);
+ mutable ULONG m_nameHash; // mutable because it is lazily evaluated.
+
+ //-----------------------------------------------------------------------------------------
+ inline void
+ CheckGetMethodAttributes() const
+ {
+ WRAPPER_NO_CONTRACT;
+ if (m_tok != mdTokenNil && m_szName == NULL)
+ {
+ GetMethodAttributes();
+ }
+ }
+
+ //-----------------------------------------------------------------------------------------
+ void
+ GetMethodAttributes() const;
+
+ //-----------------------------------------------------------------------------------------
+ UINT32
+ GetNameHash() const;
+
+ private:
+ //-----------------------------------------------------------------------------------
+ // Private to prevent use.
+ MethodSignature *
+ operator&()
+ { return this; }
+ }; // class MethodSignature
+
+ // --------------------------------------------------------------------------------------------
+ class bmtRTMethod
+ {
+ public:
+ //-----------------------------------------------------------------------------------------
+ // Constructor.
+ bmtRTMethod(
+ bmtRTType * pOwningType,
+ MethodDesc * pMD);
+
+ //-----------------------------------------------------------------------------------------
+ // Returns owning type for this method.
+ bmtRTType *
+ GetOwningType() const
+ { LIMITED_METHOD_CONTRACT; return m_pOwningType; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns MethodDesc* for this method.
+ MethodDesc *
+ GetMethodDesc() const
+ { LIMITED_METHOD_CONTRACT; return m_pMD; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns reference to MethodSignature object for this type.
+ const MethodSignature &
+ GetMethodSignature() const
+ { LIMITED_METHOD_CONTRACT; return m_methodSig; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns metadata declaration attributes for this method.
+ DWORD
+ GetDeclAttrs() const;
+
+ //-----------------------------------------------------------------------------------------
+ // Returns metadata implementation attributes for this method.
+ DWORD
+ GetImplAttrs() const;
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the slot in which this method is placed.
+ SLOT_INDEX
+ GetSlotIndex() const;
+
+ private:
+ //-----------------------------------------------------------------------------------------
+ bmtRTType * m_pOwningType;
+ MethodDesc * m_pMD;
+ MethodSignature m_methodSig;
+ }; // class bmtRTMethod
+
+ // --------------------------------------------------------------------------------------------
+ // Encapsulates method data for a method described by metadata.
+ class bmtMDMethod
+ {
+ public:
+ //-----------------------------------------------------------------------------------------
+ // Constructor. This takes all the information already extracted from metadata interface
+ // because the place that creates these types already has this data. Alternatively,
+ // a constructor could be written to take a token and metadata scope instead. Also,
+ // it might be interesting to move METHOD_TYPE and METHOD_IMPL_TYPE to setter functions.
+ bmtMDMethod(
+ bmtMDType * pOwningType,
+ mdMethodDef tok,
+ DWORD dwDeclAttrs,
+ DWORD dwImplAttrs,
+ DWORD dwRVA,
+ METHOD_TYPE type,
+ METHOD_IMPL_TYPE implType);
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the type that owns the *declaration* of this method. This makes sure that a
+ // method can be properly interpreted in the context of substitutions at any time.
+ bmtMDType *
+ GetOwningType() const
+ { LIMITED_METHOD_CONTRACT; return m_pOwningType; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns a reference to the MethodSignature for this method.
+ const MethodSignature &
+ GetMethodSignature() const
+ { LIMITED_METHOD_CONTRACT; return m_methodSig; }
+
+ //-----------------------------------------------------------------------------------------
+ // Sets the slot that this method is assigned to.
+ void
+ SetSlotIndex(SLOT_INDEX idx);
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the slot that this method is assigned to.
+ SLOT_INDEX
+ GetSlotIndex() const
+ { LIMITED_METHOD_CONTRACT; return m_slotIndex; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the method type (normal, fcall, etc.) that this type was constructed with.
+ METHOD_TYPE
+ GetMethodType() const
+ { LIMITED_METHOD_CONTRACT; return m_type; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the method impl type (is or isn't) that this type was constructed with.
+ METHOD_IMPL_TYPE
+ GetMethodImplType() const
+ { LIMITED_METHOD_CONTRACT; return m_implType; }
+
+ //-----------------------------------------------------------------------------------------
+ // Gets the MethodDesc* for this method. Defaults to NULL until SetMethodDesc is called
+ // with a non-NULL MethodDesc* value.
+ MethodDesc *
+ GetMethodDesc() const
+ { LIMITED_METHOD_CONTRACT; _ASSERTE(m_pMD != NULL); return m_pMD; }
+
+ //-----------------------------------------------------------------------------------------
+ // Once a MethodDesc* is created for this method, this method will store the association.
+ void
+ SetMethodDesc(MethodDesc * pMD)
+ { LIMITED_METHOD_CONTRACT; _ASSERTE(m_pMD == NULL); m_pMD = pMD; }
+
+ //-----------------------------------------------------------------------------------------
+ // Virtual slots for ValueTypes are converted to stubs which unbox the incoming boxed
+ // "this" argument, and forward the call to the unboxed entrypoint.
+ bool
+ IsUnboxing()
+ { WRAPPER_NO_CONTRACT; return GetUnboxedSlotIndex() != INVALID_SLOT_INDEX; }
+
+ //-----------------------------------------------------------------------------------------
+ // This and SetUnboxedMethodDesc are used to indicate that this method exists as a dual
+ // entrypoint method for a ValueType.
+ void
+ SetUnboxedSlotIndex(SLOT_INDEX idx);
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the slot for the unboxed entrypoint. If no such slot exists, returns
+ // INVALID_SLOT_INDEX.
+ SLOT_INDEX
+ GetUnboxedSlotIndex() const
+ { LIMITED_METHOD_CONTRACT; return m_unboxedSlotIndex; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the MethodDesc* for the unboxed entrypoint. If no such pointer exists, returns
+ // NULL.
+ MethodDesc *
+ GetUnboxedMethodDesc() const
+ { LIMITED_METHOD_CONTRACT; _ASSERTE(m_pMD != NULL); return m_pUnboxedMD; }
+
+ //-----------------------------------------------------------------------------------------
+ // Sets the MethodDesc* for the unboxed entrypoint.
+ void
+ SetUnboxedMethodDesc(MethodDesc * pUnboxingMD)
+ { LIMITED_METHOD_CONTRACT; _ASSERTE(m_pUnboxedMD == NULL); m_pUnboxedMD = pUnboxingMD; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the metadata declaration attributes for this method.
+ DWORD
+ GetDeclAttrs() const
+ { LIMITED_METHOD_CONTRACT; return m_dwDeclAttrs; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the metadata implementation attributes for this method.
+ DWORD
+ GetImplAttrs() const
+ { LIMITED_METHOD_CONTRACT; return m_dwImplAttrs; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the RVA for the metadata of this method.
+ DWORD
+ GetRVA() const
+ { LIMITED_METHOD_CONTRACT; return m_dwRVA; }
+
+ private:
+ //-----------------------------------------------------------------------------------------
+ bmtMDType * m_pOwningType;
+
+ DWORD m_dwDeclAttrs;
+ DWORD m_dwImplAttrs;
+ DWORD m_dwRVA;
+ METHOD_TYPE m_type; // Specific MethodDesc flavour
+ METHOD_IMPL_TYPE m_implType; // Whether or not the method is a methodImpl body
+ MethodSignature m_methodSig;
+
+ MethodDesc * m_pMD; // MethodDesc created and assigned to this method
+ MethodDesc * m_pUnboxedMD; // Unboxing MethodDesc if this is a virtual method on a valuetype
+ SLOT_INDEX m_slotIndex; // Vtable slot number this method is assigned to
+ SLOT_INDEX m_unboxedSlotIndex;
+ }; // class bmtMDMethod
+
+ // --------------------------------------------------------------------------------------------
+ // Provides a homogeneous view over potentially different types similar to bmtTypeHandle and
+ // TypeHandle. Currently can handle
+ class bmtMethodHandle
+ {
+ public:
+ //-----------------------------------------------------------------------------------------
+ // Constructor taking a bmtRTMethod*.
+ bmtMethodHandle(
+ bmtRTMethod * pRTMethod)
+ : m_handle(HandleFromRTMethod(pRTMethod))
+ { NOT_DEBUG(static_assert_no_msg(sizeof(bmtMethodHandle) == sizeof(UINT_PTR));) INDEBUG(m_pAsRTMethod = pRTMethod;) }
+
+ //-----------------------------------------------------------------------------------------
+ // Constructor taking a bmtMDMethod*.
+ bmtMethodHandle(
+ bmtMDMethod * pMDMethod)
+ : m_handle(HandleFromMDMethod(pMDMethod))
+ { NOT_DEBUG(static_assert_no_msg(sizeof(bmtMethodHandle) == sizeof(UINT_PTR));) INDEBUG(m_pAsMDMethod = pMDMethod;) }
+
+ //-----------------------------------------------------------------------------------------
+ // Copy constructor.
+ bmtMethodHandle(
+ const bmtMethodHandle &other)
+ { LIMITED_METHOD_CONTRACT; m_handle = other.m_handle; INDEBUG(m_pAsRTMethod = other.m_pAsRTMethod;) }
+
+ //-----------------------------------------------------------------------------------------
+ // Default constructor. Handle defaults to NULL.
+ bmtMethodHandle()
+ { LIMITED_METHOD_CONTRACT; m_handle = 0; INDEBUG(m_pAsRTMethod = NULL;) }
+
+ //-----------------------------------------------------------------------------------------
+ // Assignment.
+ bmtMethodHandle &
+ operator=(
+ const bmtMethodHandle &rhs)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_handle = rhs.m_handle;
+ INDEBUG(m_pAsRTMethod = rhs.m_pAsRTMethod;)
+ return *this;
+ }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns true if default constructed or assigned to from a NULL handle.
+ bool
+ IsNull() const
+ { LIMITED_METHOD_CONTRACT; return m_handle == 0; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns true if the handle points to a bmtRTMethod; returns false otherwise.
+ bool
+ IsRTMethod() const
+ { LIMITED_METHOD_CONTRACT; return (m_handle & RTMETHOD_FLAG) != 0; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns true if the handle points to a bmtMDMethod; returns false otherwise.
+ bool
+ IsMDMethod() const
+ { LIMITED_METHOD_CONTRACT; return (m_handle & MDMETHOD_FLAG) != 0; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns pointer to bmtRTMethod. IsRTMethod is required to return true before
+ // calling this method.
+ bmtRTMethod *
+ AsRTMethod() const
+ {
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(IsRTMethod());
+ return (bmtRTMethod *) Decode(m_handle);
+ }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns pointer to bmtMDMethod. IsMDMethod is required to return true before
+ // calling this method.
+ bmtMDMethod *
+ AsMDMethod() const
+ {
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(IsMDMethod());
+ return (bmtMDMethod *) Decode(m_handle);
+ }
+
+ //-----------------------------------------------------------------------------------------
+ // Comparison operator. Returns true if handles point to the same object; returns
+ // false otherwise.
+ bool
+ operator==(
+ const bmtMethodHandle &rhs) const;
+
+ bool operator !=(const bmtMethodHandle &rhs) const { return !((*this) == rhs); }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the owning type.
+ bmtTypeHandle
+ GetOwningType() const;
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the metadata declaration attributes for this method.
+ DWORD
+ GetDeclAttrs() const;
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the metadata implementation attributes for this method.
+ DWORD
+ GetImplAttrs() const;
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the slot that this method is assigned to.
+ SLOT_INDEX
+ GetSlotIndex() const;
+
+ //-----------------------------------------------------------------------------------------
+ // Returns a reference to the MethodSignature for this method.
+ const MethodSignature &
+ GetMethodSignature() const;
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the MethodDesc* associated with this method.
+ MethodDesc *
+ GetMethodDesc() const;
+
+ protected:
+ //-----------------------------------------------------------------------------------------
+ static const UINT_PTR RTMETHOD_FLAG = 0x1;
+ static const UINT_PTR MDMETHOD_FLAG = 0x2;
+ static const UINT_PTR MASK_FLAG = 0x3;
+
+ //-----------------------------------------------------------------------------------------
+ // Takes a pointer and encodes it with the flag.
+ static UINT_PTR
+ Encode(
+ LPVOID pv,
+ UINT_PTR flag)
+ {
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK((reinterpret_cast<UINT_PTR>(pv) & MASK_FLAG) == 0);
+ return (reinterpret_cast<UINT_PTR>(pv) | flag);
+ }
+
+ //-----------------------------------------------------------------------------------------
+ // Takes an encoded handle and removes encoding bits.
+ static LPVOID
+ Decode(
+ UINT_PTR handle)
+ { LIMITED_METHOD_CONTRACT; return reinterpret_cast<LPVOID>(handle & ~MASK_FLAG); }
+
+ //-----------------------------------------------------------------------------------------
+ // Uses encode to produce a handle for a bmtRTMethod*
+ static UINT_PTR
+ HandleFromRTMethod(
+ bmtRTMethod * pRTMethod)
+ { WRAPPER_NO_CONTRACT; return Encode(pRTMethod, RTMETHOD_FLAG); }
+
+ //-----------------------------------------------------------------------------------------
+ // Uses encode to produce a handle for a bmtMDMethod*
+ static UINT_PTR
+ HandleFromMDMethod(
+ bmtMDMethod * pMDMethod)
+ { WRAPPER_NO_CONTRACT; return Encode(pMDMethod, MDMETHOD_FLAG); }
+
+ //-----------------------------------------------------------------------------------------
+ // This is the value of the encoded pointer.
+ UINT_PTR m_handle;
+
+#ifdef _DEBUG
+ //-----------------------------------------------------------------------------------------
+ // Used in debug builds to quickly access the type in a debugger.
+ union
+ {
+ bmtRTMethod * m_pAsRTMethod;
+ bmtMDMethod * m_pAsMDMethod;
+ };
+#endif
+ }; // class bmtMethodHandle
+
+ // --------------------------------------------------------------------------------------------
+ // Represents a method slot. It has a declaration and implementation value because these can
+ // differ if the slot has been modified with a methodImpl. Otherwise, these two values are
+ // typically identical.
+ class bmtMethodSlot
+ {
+ public:
+ //-----------------------------------------------------------------------------------------
+ // Constructor for an empty slot. Both handles default to null.
+ bmtMethodSlot()
+ : m_decl(),
+ m_impl()
+ { LIMITED_METHOD_CONTRACT; }
+
+ //-----------------------------------------------------------------------------------------
+ // Constructor with both values explicitly provided. Either use this constructor or assign
+ // to each value individually using non-const Decl and Impl methods.
+ bmtMethodSlot(
+ const bmtMethodHandle & decl,
+ const bmtMethodHandle & impl)
+ : m_decl(decl),
+ m_impl(impl)
+ { LIMITED_METHOD_CONTRACT; }
+
+ //-----------------------------------------------------------------------------------------
+ // Copy constructor.
+ bmtMethodSlot(
+ const bmtMethodSlot & other)
+ : m_decl(other.m_decl),
+ m_impl(other.m_impl)
+ { LIMITED_METHOD_CONTRACT; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns a reference to the declaration method for this slot. This can be used as a
+ // getter or a setter.
+ bmtMethodHandle &
+ Decl()
+ { LIMITED_METHOD_CONTRACT; return m_decl; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns a reference to the implementation method for this slot. This can be used as a
+ // getter or a setter.
+ bmtMethodHandle &
+ Impl()
+ { LIMITED_METHOD_CONTRACT; return m_impl; }
+
+ //-----------------------------------------------------------------------------------------
+ // Const version of Decl.
+ const bmtMethodHandle &
+ Decl() const
+ { LIMITED_METHOD_CONTRACT; return m_decl; }
+
+ //-----------------------------------------------------------------------------------------
+ // Const version of Impl.
+ const bmtMethodHandle &
+ Impl() const
+ { LIMITED_METHOD_CONTRACT; return m_impl; }
+
+ private:
+ bmtMethodHandle m_decl;
+ bmtMethodHandle m_impl;
+ }; // class bmtMethodSlot
+
+ // --------------------------------------------------------------------------------------------
+ struct bmtProperties
+ {
+ bool fIsValueClass;
+ bool fIsEnum;
+ bool fNoSanityChecks;
+ bool fSparse; // Set to true if a sparse interface is being used.
+#ifdef FEATURE_REMOTING
+ bool fMarshaledByRef;
+ bool fIsContextful;
+#endif
+
+#ifdef FEATURE_COMINTEROP
+ // Com Interop, ComWrapper classes extend from ComObject
+ bool fIsComObjectType; // whether this class is an instance of ComObject class
+
+ bool fIsMngStandardItf; // Set to true if the interface is a manages standard interface.
+ bool fComEventItfType; // Set to true if the class is a special COM event interface.
+ bool fIsRedirectedInterface; // Set to true if the class is an interface redirected for WinRT
+ bool fNeedsRCWPerTypeData; // Set to true if the class needs optional RCW data attached to the MethodTable
+#endif // FEATURE_COMINTEROP
+#ifdef FEATURE_TYPEEQUIVALENCE
+ bool fHasTypeEquivalence; // Set to true if the class is decorated by TypeIdentifierAttribute, or through some other technique is influenced by type equivalence
+ bool fIsTypeEquivalent; // Set to true if the class is decorated by TypeIdentifierAttribute
+#endif
+
+ bool fDynamicStatics; // Set to true if the statics will be allocated in the dynamic
+ bool fGenericsStatics; // Set to true if the there are per-instantiation statics
+
+ DWORD dwNonGCRegularStaticFieldBytes;
+ DWORD dwNonGCThreadStaticFieldBytes;
+
+ inline bmtProperties() { LIMITED_METHOD_CONTRACT; memset((void *)this, NULL, sizeof(*this)); }
+ }; // struct bmtProperties
+
+ // --------------------------------------------------------------------------------------------
+ // Holds an array of bmtMethodSlot values.
+ class bmtMethodSlotTable
+ {
+ public:
+ //-----------------------------------------------------------------------------------------
+ // Create a table that can hold maxSlotIdx slots. All slots up to maxSlotIdx is initialized
+ // with the default constructor.
+ bmtMethodSlotTable(
+ SLOT_INDEX maxSlotIdx,
+ StackingAllocator * pStackingAllocator)
+ : m_curSlotIdx(0),
+ m_maxSlotIdx(maxSlotIdx),
+ m_rgSlots(new (pStackingAllocator) bmtMethodSlot[maxSlotIdx])
+ { CONTRACTL { THROWS; } CONTRACTL_END; }
+
+ //-----------------------------------------------------------------------------------------
+ // Subscript operator
+ template <typename INDEX_TYPE>
+ bmtMethodSlot & operator[](INDEX_TYPE idx) const
+ { WRAPPER_NO_CONTRACT; ValidateIdx(idx); return m_rgSlots[idx]; }
+
+ //-----------------------------------------------------------------------------------------
+ // Pushes the value of slot to the end of the array.
+ bool
+ AddMethodSlot(const bmtMethodSlot & slot)
+ {
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(m_curSlotIdx <= m_maxSlotIdx);
+ if (m_curSlotIdx == m_maxSlotIdx)
+ return false;
+ (*this)[m_curSlotIdx++] = slot;
+ return true;
+ }
+
+ //-----------------------------------------------------------------------------------------
+ // The current size of the used entries in the array.
+ SLOT_INDEX
+ GetSlotCount()
+ { LIMITED_METHOD_CONTRACT; return m_curSlotIdx; }
+
+ //-----------------------------------------------------------------------------------------
+ // Used to iterate the contents of the array.
+ typedef IteratorUtil::ArrayIterator<bmtMethodSlot> Iterator;
+
+ Iterator
+ IterateSlots()
+ { return Iterator(m_rgSlots, GetSlotCount()); }
+
+ private:
+ //-----------------------------------------------------------------------------------------
+ SLOT_INDEX m_curSlotIdx;
+ SLOT_INDEX m_maxSlotIdx;
+ bmtMethodSlot * m_rgSlots;
+
+ template <typename INDEX_TYPE>
+ void
+ ValidateIdx(
+ INDEX_TYPE idx) const
+ { CONSISTENCY_CHECK(idx < m_curSlotIdx); }
+ }; // class bmtMethodSlotTable
+
+ // --------------------------------------------------------------------------------------------
+ struct bmtParentInfo;
+
+ // --------------------------------------------------------------------------------------------
+ // This type is used in creating the slot layout that will be used in the MethodTable.
+ struct bmtVtable
+ {
+#ifdef _DEBUG
+ //-----------------------------------------------------------------------------------------
+ // Used to make sure no virtual methods are added to the vtable after non-virtuals have
+ // begun to be added.
+ bool m_fIsVirtualSlotSectionSealed;
+
+ bool
+ IsVirtualSlotSectionSealed() const
+ { LIMITED_METHOD_CONTRACT; return m_fIsVirtualSlotSectionSealed; }
+
+ void
+ SealVirtualSlotSection()
+ { LIMITED_METHOD_CONTRACT; m_fIsVirtualSlotSectionSealed = true; }
+#endif
+
+ //-----------------------------------------------------------------------------------------
+ // Implemented using a bmtMethodSlotTable
+ bmtMethodSlotTable * pSlotTable;
+
+ // Used to keep track of the default and static type constructors.
+ bmtMDMethod * pDefaultCtor;
+ bmtMDMethod * pCCtor;
+
+ // Upper bound on size of vtable. Used in initializing pSlotTable
+ DWORD dwMaxVtableSize;
+
+ // Used to keep track of how many virtual and total slots are in the vtable
+ SLOT_INDEX cVirtualSlots;
+ SLOT_INDEX cTotalSlots;
+
+ // Number of slots allocated in Vtable
+ SLOT_INDEX cVtableSlots;
+
+ // The dispatch map builder for this type.
+ //@TODO: This should be moved.
+ DispatchMapBuilder *pDispatchMapBuilder;
+
+ //-----------------------------------------------------------------------------------------
+ // Appends this method to the vtable as a newslot virtual. Decl and Impl are both set to be
+ // the value of pMethod.
+ bool
+ AddVirtualMethod(bmtMDMethod * pMethod)
+ {
+ CONSISTENCY_CHECK(!IsVirtualSlotSectionSealed());
+ pMethod->SetSlotIndex(pSlotTable->GetSlotCount());
+ if (!pSlotTable->AddMethodSlot(bmtMethodSlot(pMethod, pMethod)))
+ return false;
+ ++cVirtualSlots;
+ ++cTotalSlots;
+ return true;
+ }
+
+ //-----------------------------------------------------------------------------------------
+ // Overwrites an existing slot's Decl and Impl values that of pMethod.
+ void
+ SetVirtualMethodOverride(SLOT_INDEX idx, bmtMDMethod * pMethod)
+ {
+ CONSISTENCY_CHECK(!IsVirtualSlotSectionSealed());
+ pMethod->SetSlotIndex(idx);
+ (*pSlotTable)[idx] = bmtMethodSlot(pMethod, pMethod);
+ }
+
+ //-----------------------------------------------------------------------------------------
+ // Overwrites an existing slot's Impl value (but *NOT* Decl) that of pMethod.
+ void
+ SetVirtualMethodImpl(SLOT_INDEX idx, bmtMDMethod * pImplMethod)
+ {
+ LIMITED_METHOD_CONTRACT;
+ (*pSlotTable)[idx] = bmtMethodSlot((*pSlotTable)[idx].Decl(), pImplMethod);
+ }
+
+ //-----------------------------------------------------------------------------------------
+ // Appends this method to the vtable as a newslot non-virtual. Decl and Impl are both set to be
+ // the value of pMethod.
+ bool
+ AddNonVirtualMethod(bmtMDMethod * pMethod)
+ {
+ INDEBUG(SealVirtualSlotSection());
+ CONSISTENCY_CHECK(!IsMdVirtual(pMethod->GetDeclAttrs()));
+ pMethod->SetSlotIndex(pSlotTable->GetSlotCount());
+ if (!pSlotTable->AddMethodSlot(bmtMethodSlot(pMethod, pMethod)))
+ return false;
+ ++cTotalSlots;
+ return true;
+ }
+
+ //-----------------------------------------------------------------------------------------
+ // Adds this method as an unboxed entrypoint to the vtable as a newslot non-virtual.
+ bool
+ AddUnboxedMethod(bmtMDMethod * pMethod)
+ {
+ INDEBUG(SealVirtualSlotSection());
+ CONSISTENCY_CHECK(IsMdVirtual(pMethod->GetDeclAttrs()));
+ pMethod->SetUnboxedSlotIndex(pSlotTable->GetSlotCount());
+ if (!pSlotTable->AddMethodSlot(bmtMethodSlot(pMethod, pMethod)))
+ return false;
+ ++cTotalSlots;
+ return true;
+ }
+
+ //-----------------------------------------------------------------------------------------
+ // If a default constructor has been set, this returns the slot assigned to the method;
+ // otherwise returns INVALID_SLOT_INDEX.
+ SLOT_INDEX
+ GetDefaultCtorSlotIndex() const
+ {
+ if (pDefaultCtor != NULL)
+ {
+ return pDefaultCtor->GetSlotIndex();
+ }
+ else
+ {
+ return INVALID_SLOT_INDEX;
+ }
+ }
+
+ //-----------------------------------------------------------------------------------------
+ // If a static type constructor has been set, this returns the slot assigned to the method;
+ // otherwise returns INVALID_SLOT_INDEX.
+ SLOT_INDEX
+ GetClassCtorSlotIndex() const
+ {
+ if (pCCtor != NULL)
+ {
+ return pCCtor->GetSlotIndex();
+ }
+ else
+ {
+ return INVALID_SLOT_INDEX;
+ }
+ }
+
+ //-----------------------------------------------------------------------------------------
+ // Subscript operator
+ bmtMethodSlot & operator[](SLOT_INDEX idx) const
+ { WRAPPER_NO_CONTRACT; return (*pSlotTable)[idx]; }
+
+ //-----------------------------------------------------------------------------------------
+ inline bmtVtable()
+ : INDEBUG_COMMA(m_fIsVirtualSlotSectionSealed(false))
+ pSlotTable(NULL),
+ pDefaultCtor(NULL),
+ pCCtor(NULL),
+ dwMaxVtableSize(0),
+ cVirtualSlots(0),
+ cTotalSlots(0),
+ pDispatchMapBuilder(NULL)
+ { LIMITED_METHOD_CONTRACT; }
+
+ typedef bmtMethodSlotTable::Iterator Iterator;
+
+ Iterator
+ IterateSlots()
+ { return pSlotTable->IterateSlots(); }
+ }; // struct bmtVtable
+
+ // --------------------------------------------------------------------------------------------
+ typedef FixedCapacityStackingAllocatedUTF8StringHash<bmtRTMethod *> MethodNameHash;
+
+ // --------------------------------------------------------------------------------------------
+ struct bmtParentInfo
+ {
+ bmtMethodSlotTable *pSlotTable;
+
+ typedef bmtMethodSlotTable::Iterator Iterator;
+
+ //-----------------------------------------------------------------------------------------
+ // Iterate the slots of the parent type.
+ Iterator
+ IterateSlots()
+ { return pSlotTable->IterateSlots(); }
+
+ //-----------------------------------------------------------------------------------------
+ // Subscript operator
+ bmtMethodSlot & operator[](SLOT_INDEX idx) const
+ { WRAPPER_NO_CONTRACT; return (*pSlotTable)[idx]; }
+
+ DWORD NumParentPointerSeries;
+ MethodNameHash *pParentMethodHash;
+
+ inline bmtParentInfo() { LIMITED_METHOD_CONTRACT; memset((void *)this, NULL, sizeof(*this)); }
+ }; // struct bmtParentInfo
+
+ // --------------------------------------------------------------------------------------------
+ // This will create bmtMethodSlotTable fully describing the vtable of the parent type. This
+ // currently includes both virtual and non-virtual, though for the purposes of building the
+ // current type virtual methods are the only ones that are necessary and so we could remove
+ // the non-virtual method importing if it proves to be a performance bottleneck.
+ void
+ ImportParentMethods();
+
+ // --------------------------------------------------------------------------------------------
+ // Copies the virtual slots from the parent into the current type's vtable, effectively
+ // performing the virtual method inheritance step of type layout.
+ void
+ CopyParentVtable();
+
+ // --------------------------------------------------------------------------------------------
+ // The ECMA spec declares that interfaces get placed differently depending on how they
+ // are declared (see comment before the implementation of PlaceInterfaceMethods for details).
+ // This is used to keep track of the declaration conditions as interfaces are expanded.
+ struct InterfaceDeclarationScope
+ {
+ //-----------------------------------------------------------------------------------------
+ // States that the interface has been declared by a parent.
+ bool fIsInterfaceDeclaredOnParent;
+
+ //-----------------------------------------------------------------------------------------
+ // States that the interface has been explicitly declared in the interface implementation
+ // list of this type.
+ bool fIsInterfaceDeclaredOnType;
+
+ // If both of the above members are FALSE, then the interface was not declared by a
+ // parent and was not explicitly declared in the interface implementation list, but it
+ // was declared transitively through one of the interfaces appearing in the implementation
+ // list.
+
+ //-----------------------------------------------------------------------------------------
+ InterfaceDeclarationScope(
+ bool fIsInterfaceDeclaredOnParent,
+ bool fIsInterfaceDeclaredOnType)
+ {
+ this->fIsInterfaceDeclaredOnParent = fIsInterfaceDeclaredOnParent;
+ this->fIsInterfaceDeclaredOnType = fIsInterfaceDeclaredOnType;
+ }
+ }; // struct InterfaceDeclarationScope
+
+ // --------------------------------------------------------------------------------------------
+ // This type contains information about the implementation of a particular interface slot.
+ class bmtInterfaceSlotImpl
+ {
+ public:
+ //-----------------------------------------------------------------------------------------
+ // Default constructor.
+ bmtInterfaceSlotImpl()
+ : m_decl(),
+ m_implSlotIndex(INVALID_SLOT_INDEX)
+ { LIMITED_METHOD_CONTRACT; }
+
+ //-----------------------------------------------------------------------------------------
+ // Constructor.
+ bmtInterfaceSlotImpl(
+ const bmtMethodHandle & decl,
+ SLOT_INDEX implSlotIndex)
+ : m_decl(decl),
+ m_implSlotIndex(implSlotIndex)
+ { LIMITED_METHOD_CONTRACT; }
+
+ //-----------------------------------------------------------------------------------------
+ // Copy constructor
+ bmtInterfaceSlotImpl(
+ const bmtInterfaceSlotImpl & other)
+ : m_decl(other.m_decl),
+ m_implSlotIndex(other.m_implSlotIndex)
+ { LIMITED_METHOD_CONTRACT; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns a mutable reference to the decl of the slot.
+ bmtMethodHandle &
+ Decl()
+ { LIMITED_METHOD_CONTRACT; return m_decl; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns a mutable reference to the slot index for the impl of the slot.
+ SLOT_INDEX &
+ Impl()
+ { LIMITED_METHOD_CONTRACT; return m_implSlotIndex; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns a constant reference to the decl of the slot.
+ const bmtMethodHandle &
+ Decl() const
+ { LIMITED_METHOD_CONTRACT; return m_decl; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns a constant reference to the slot index for the impl of the slot.
+ const SLOT_INDEX &
+ Impl() const
+ { LIMITED_METHOD_CONTRACT; return m_implSlotIndex; }
+
+ private:
+ bmtMethodHandle m_decl;
+ SLOT_INDEX m_implSlotIndex;
+ }; // class bmtInterfaceSlotImpl
+
+ // --------------------------------------------------------------------------------------------
+ // This type contains information about the implementation of an interface by the type that
+ // is being built. It includes the declaration context in the form of an
+ // InterfaceDeclarationScope (see comments on type for explanation) as well as an array of
+ // bmtInterfaceSlotImpl values, with the number of entries corresponding to the number of
+ // virtual methods declared on the interface. The slots are constructed with default values
+ // which are interpreted as meaning that the slot has no implementation. Only when an
+ // implementation is found for a slot is the slot updated. Note that this does not include
+ // overrides for methods in slots that already contributed to this interface's implementation,
+ // which can happen when an interface implementation is inherited.
+ class bmtInterfaceEntry
+ {
+ public:
+ //-----------------------------------------------------------------------------------------
+ // Constructor. A default constructor would not be appropriate.
+ bmtInterfaceEntry(
+ bmtRTType * pItfType,
+ const InterfaceDeclarationScope & declScope)
+ : m_pType(pItfType),
+ m_pImplTable(NULL), // Lazily created
+ m_cImplTable(0),
+ m_declScope(declScope),
+ m_equivalenceSet(0),
+ m_fEquivalenceSetWithMultipleEntries(false)
+ { LIMITED_METHOD_CONTRACT; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the bmtRTType for the interface type.
+ bmtRTType *
+ GetInterfaceType() const
+ { LIMITED_METHOD_CONTRACT; return m_pType; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns a reference to a bool. The value is true if the interface is explicitly
+ // declared within the type's interface list; false otherwise.
+ bool &
+ IsDeclaredOnType()
+ { LIMITED_METHOD_CONTRACT; return m_declScope.fIsInterfaceDeclaredOnType; }
+
+ //-----------------------------------------------------------------------------------------
+ // const version
+ const bool &
+ IsDeclaredOnType() const
+ { LIMITED_METHOD_CONTRACT; return m_declScope.fIsInterfaceDeclaredOnType; }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns a reference to a bool. The value is true if the interface is implemented
+ // by a parent type; false otherwise. Const version only because this does not need to
+ // be changed in a dynamic fashion.
+ const bool &
+ IsImplementedByParent()
+ { LIMITED_METHOD_CONTRACT; return m_declScope.fIsInterfaceDeclaredOnParent; }
+
+ //-----------------------------------------------------------------------------------------
+ // Used to iterate the interface implementation slots.
+ typedef IteratorUtil::ArrayIterator<bmtInterfaceSlotImpl>
+ InterfaceSlotIterator;
+
+ InterfaceSlotIterator
+ IterateInterfaceSlots(
+ StackingAllocator * pStackingAllocator)
+ {
+ WRAPPER_NO_CONTRACT;
+ CheckCreateSlotTable(pStackingAllocator);
+ return InterfaceSlotIterator(m_pImplTable, m_cImplTable);
+ }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the number of interface implementation slots.
+ SLOT_INDEX
+ GetInterfaceSlotImplCount()
+ { LIMITED_METHOD_CONTRACT; return m_cImplTable; }
+
+ //-----------------------------------------------------------------------------------------
+ // Subscript operator.
+ bmtInterfaceSlotImpl &
+ operator[](
+ SLOT_INDEX idx)
+ {
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(CheckPointer(m_pImplTable));
+ CONSISTENCY_CHECK(idx < m_cImplTable);
+ return m_pImplTable[idx];
+ }
+
+ //-----------------------------------------------------------------------------------------
+ const bmtInterfaceSlotImpl &
+ operator[](
+ SLOT_INDEX idx) const
+ {
+ return (*const_cast<bmtInterfaceEntry *>(this))[idx];
+ }
+
+ //-----------------------------------------------------------------------------------------
+ void SetInterfaceEquivalenceSet(UINT32 iEquivalenceSet, bool fEquivalenceSetWithMultipleEntries)
+ {
+ LIMITED_METHOD_CONTRACT;
+ // The equivalence set of 0 indicates the value has not yet been calculated
+ // We should set the equivalence set to only one value
+ _ASSERTE((m_equivalenceSet == 0) || (m_equivalenceSet == iEquivalenceSet));
+ m_equivalenceSet = iEquivalenceSet;
+ m_fEquivalenceSetWithMultipleEntries = fEquivalenceSetWithMultipleEntries;
+ }
+
+ UINT32 GetInterfaceEquivalenceSet()
+ {
+ LIMITED_METHOD_CONTRACT;
+ // The equivalence set of 0 indicates the value has not yet been calculated.
+ // We should not be calling this method before calculating equivalence sets
+ _ASSERTE(m_equivalenceSet != 0);
+ return m_equivalenceSet;
+ }
+
+ bool InEquivalenceSetWithMultipleEntries()
+ {
+ LIMITED_METHOD_CONTRACT;
+ // The equivalence set of 0 indicates the value has not yet been calculated.
+ // We should not be calling this method before calculating equivalence sets
+ _ASSERTE(m_equivalenceSet != 0);
+ return m_fEquivalenceSetWithMultipleEntries;
+ }
+
+ private:
+ //-----------------------------------------------------------------------------------------
+ void
+ CheckCreateSlotTable(
+ StackingAllocator * pStackingAllocator)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (m_pImplTable == NULL)
+ {
+ CreateSlotTable(pStackingAllocator);
+ }
+ }
+
+ //-----------------------------------------------------------------------------------------
+ // This creates the interface slot implementation table and correctly creates interface
+ // methods and sets them in the Decl property for each slot.
+ void
+ CreateSlotTable(
+ StackingAllocator * pStackingAllocator);
+
+ //-----------------------------------------------------------------------------------------
+ bmtRTType * m_pType;
+ bmtInterfaceSlotImpl * m_pImplTable;
+ SLOT_INDEX m_cImplTable;
+ InterfaceDeclarationScope m_declScope;
+ UINT32 m_equivalenceSet;
+ bool m_fEquivalenceSetWithMultipleEntries;
+ }; // class bmtInterfaceEntry
+
+ // --------------------------------------------------------------------------------------------
+ // Contains the list of implemented interfaces as an array of bmtInterfaceEntry values.
+ struct bmtInterfaceInfo
+ {
+ bmtInterfaceEntry * pInterfaceMap;
+ DWORD dwInterfaceMapSize; // count of entries in interface map
+ DWORD dwInterfaceMapAllocated; // upper bound on size of interface map
+#ifdef _DEBUG
+ // Should we inject interface duplicates for this type? (Parent has its own value stored in
+ // code:MethodTable::dbg_m_fHasInjectedInterfaceDuplicates)
+ BOOL dbg_fShouldInjectInterfaceDuplicates;
+#endif //_DEBUG
+
+ //-----------------------------------------------------------------------------------------
+ // Used to iterate the interface entries in the map.
+ typedef IteratorUtil::ArrayIterator<bmtInterfaceEntry> MapIterator;
+
+ MapIterator
+ IterateInterfaceMap()
+ { return MapIterator(pInterfaceMap, dwInterfaceMapSize); }
+
+ //-----------------------------------------------------------------------------------------
+ // Constructor
+ inline bmtInterfaceInfo() { LIMITED_METHOD_CONTRACT; memset((void *)this, NULL, sizeof(*this)); }
+ }; // struct bmtInterfaceInfo
+
+ // --------------------------------------------------------------------------------------------
+ // Contains information on fields derived from the metadata of the type.
+ struct bmtEnumFieldInfo
+ {
+ // Counts instance fields
+ DWORD dwNumInstanceFields;
+
+ // Counts both regular statics and thread statics. Currently RVA and
+ // context statics get lumped in with "regular statics".
+ DWORD dwNumStaticFields;
+ DWORD dwNumStaticObjRefFields;
+ DWORD dwNumStaticBoxedFields;
+
+ // We keep a separate count for just thread statics
+ DWORD dwNumThreadStaticFields;
+ DWORD dwNumThreadStaticObjRefFields;
+ DWORD dwNumThreadStaticBoxedFields;
+
+ DWORD dwNumDeclaredFields; // For calculating amount of FieldDesc's to allocate
+
+ IMDInternalImport *m_pInternalImport;
+
+ //-----------------------------------------------------------------------------------------
+ inline bmtEnumFieldInfo(IMDInternalImport *pInternalImport)
+ {
+ LIMITED_METHOD_CONTRACT;
+ memset((void *)this, NULL, sizeof(*this));
+ m_pInternalImport = pInternalImport;
+ }
+ }; // struct bmtEnumFieldInfo
+
+ // --------------------------------------------------------------------------------------------
+ // This contains information specifically about the methods declared by the type being built.
+ struct bmtMethodInfo
+ {
+ //-----------------------------------------------------------------------------------------
+ // The array and bounds of the bmtMDMethod array
+ SLOT_INDEX m_cDeclaredMethods;
+ SLOT_INDEX m_cMaxDeclaredMethods;
+ bmtMDMethod ** m_rgDeclaredMethods;
+
+ //-----------------------------------------------------------------------------------------
+ DWORD dwNumDeclaredNonAbstractMethods; // For calculating approx generic dictionary size
+ DWORD dwNumberMethodImpls; // Number of method impls defined for this type
+ DWORD dwNumberInexactMethodImplCandidates; // Number of inexact method impl candidates (used for type equivalent interfaces)
+
+ //-----------------------------------------------------------------------------------------
+ // Constructor
+ inline bmtMethodInfo()
+ { LIMITED_METHOD_CONTRACT; memset((void *)this, NULL, sizeof(*this)); }
+
+ //-----------------------------------------------------------------------------------------
+ // Add a declared method to the array
+ void
+ AddDeclaredMethod(
+ bmtMDMethod * pMethod)
+ {
+ CONSISTENCY_CHECK(m_cDeclaredMethods < m_cMaxDeclaredMethods);
+ m_rgDeclaredMethods[m_cDeclaredMethods++] = pMethod;
+ }
+
+ //-----------------------------------------------------------------------------------------
+ // Subscript operator
+ bmtMDMethod *
+ operator[](
+ SLOT_INDEX idx) const
+ {
+ CONSISTENCY_CHECK(idx < m_cDeclaredMethods);
+ return m_rgDeclaredMethods[idx];
+ }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the number of declared methods.
+ SLOT_INDEX
+ GetDeclaredMethodCount()
+ { LIMITED_METHOD_CONTRACT; return m_cDeclaredMethods; }
+
+ //-----------------------------------------------------------------------------------------
+ // Searches the declared methods for a method with a token value equal to tok.
+ bmtMDMethod *
+ FindDeclaredMethodByToken(
+ mdMethodDef tok)
+ {
+ LIMITED_METHOD_CONTRACT;
+ for (SLOT_INDEX i = 0; i < m_cDeclaredMethods; ++i)
+ {
+ if ((*this)[i]->GetMethodSignature().GetToken() == tok)
+ {
+ return (*this)[i];
+ }
+ }
+ return NULL;
+ }
+ }; // struct bmtMethodInfo
+
+ // --------------------------------------------------------------------------------------------
+ // Stores metadata info for a
+ struct bmtMetaDataInfo
+ {
+ //-----------------------------------------------------------------------------------------
+ DWORD cFields; // # meta-data fields of this class
+ mdToken *pFields; // Enumeration of metadata fields
+ DWORD *pFieldAttrs; // Enumeration of the attributes of the fields
+
+ //-----------------------------------------------------------------------------------------
+ // Stores the method impl tokens as a pair structure to enable qsort to be
+ // performed on the array.
+ struct MethodImplTokenPair
+ {
+ mdToken methodBody; // MethodDef's for the bodies of MethodImpls. Must be defined in this type.
+ mdToken methodDecl; // Method token that body implements. Is a MethodDef or MemberRef
+ // Does this methodimpl need to be considered during inexact methodimpl processing
+ bool fConsiderDuringInexactMethodImplProcessing;
+ // If when considered during inexact methodimpl processing it does not match any declaration method, throw.
+ // This is to detect situations where a methodimpl does not match any method on any equivalent interface.
+ bool fThrowIfUnmatchedDuringInexactMethodImplProcessing;
+ UINT32 interfaceEquivalenceSet;// Equivalence set in the interface map to examine
+ static int __cdecl Compare(const void *elem1, const void *elem2);
+ static BOOL Equal(const MethodImplTokenPair *elem1, const MethodImplTokenPair *elem2);
+ };
+
+ //-----------------------------------------------------------------------------------------
+ MethodImplTokenPair *rgMethodImplTokens;
+ Substitution *pMethodDeclSubsts; // Used to interpret generic variables in the interface of the declaring type
+
+ //-----------------------------------------------------------------------------------------
+ inline bmtMetaDataInfo() { LIMITED_METHOD_CONTRACT; memset((void *)this, NULL, sizeof(*this)); }
+ }; // struct bmtMetaDataInfo
+
+ // --------------------------------------------------------------------------------------------
+ // Stores a bunch of random info related to method and field descs. This should be separated
+ // into appropriate data structures.
+ struct bmtMethAndFieldDescs
+ {
+ //-----------------------------------------------------------------------------------------
+ FieldDesc **ppFieldDescList; // FieldDesc pointer (or NULL if field not preserved) for each field
+
+#ifdef FEATURE_REMOTING
+ //-----------------------------------------------------------------------------------------
+ // Tracking info for VTS (Version Tolerant Serialization)
+ MethodDesc *pOnSerializingMethod;
+ MethodDesc *pOnSerializedMethod;
+ MethodDesc *pOnDeserializingMethod;
+ MethodDesc *pOnDeserializedMethod;
+ bool *prfNotSerializedFields;
+ bool *prfOptionallySerializedFields;
+ bool fNeedsRemotingVtsInfo;
+
+ //-----------------------------------------------------------------------------------------
+ inline void SetFieldNotSerialized(DWORD dwIndex, DWORD dwNumInstanceFields)
+ {
+ WRAPPER_NO_CONTRACT;
+ if (prfNotSerializedFields == NULL)
+ {
+ DWORD cbSize = sizeof(bool) * dwNumInstanceFields;
+ prfNotSerializedFields = new (&GetThread()->m_MarshalAlloc) bool[dwNumInstanceFields];
+ ZeroMemory(prfNotSerializedFields, cbSize);
+ }
+ prfNotSerializedFields[dwIndex] = true;
+ fNeedsRemotingVtsInfo = true;
+ }
+
+ //-----------------------------------------------------------------------------------------
+ inline void SetFieldOptionallySerialized(DWORD dwIndex, DWORD dwNumInstanceFields)
+ {
+ WRAPPER_NO_CONTRACT;
+ if (prfOptionallySerializedFields == NULL)
+ {
+ prfOptionallySerializedFields = new (&GetThread()->m_MarshalAlloc) bool[dwNumInstanceFields];
+ ZeroMemory(prfOptionallySerializedFields, sizeof(bool) * dwNumInstanceFields);
+ }
+ prfOptionallySerializedFields[dwIndex] = true;
+ fNeedsRemotingVtsInfo = true;
+ }
+#endif // FEATURE_REMOTING
+
+ //-----------------------------------------------------------------------------------------
+ inline bmtMethAndFieldDescs() { LIMITED_METHOD_CONTRACT; memset((void *)this, NULL, sizeof(*this)); }
+ }; // struct bmtMethAndFieldDescs
+
+ // --------------------------------------------------------------------------------------------
+ // Information about the placement of fields during field layout.
+ struct bmtFieldPlacement
+ {
+ // For compacting field placement
+ DWORD InstanceFieldStart[MAX_LOG2_PRIMITIVE_FIELD_SIZE+1];
+
+ DWORD NumInstanceFieldsOfSize[MAX_LOG2_PRIMITIVE_FIELD_SIZE+1];
+ DWORD FirstInstanceFieldOfSize[MAX_LOG2_PRIMITIVE_FIELD_SIZE+1];
+ DWORD GCPointerFieldStart;
+ DWORD NumInstanceGCPointerFields; // does not include inherited pointer fields
+ DWORD NumGCPointerSeries;
+ DWORD NumInstanceFieldBytes;
+
+ bool fHasFixedAddressValueTypes;
+ bool fHasSelfReferencingStaticValueTypeField_WithRVA;
+
+ // These data members are specific to regular statics
+ DWORD RegularStaticFieldStart[MAX_LOG2_PRIMITIVE_FIELD_SIZE+1]; // Byte offset where to start placing fields of this size
+ DWORD NumRegularStaticFieldsOfSize[MAX_LOG2_PRIMITIVE_FIELD_SIZE+1]; // # Fields of this size
+ DWORD NumRegularStaticGCPointerFields; // does not include inherited pointer fields
+ DWORD NumRegularStaticGCBoxedFields; // does not include inherited pointer fields
+
+ // These data members are specific to thread statics
+ DWORD ThreadStaticFieldStart[MAX_LOG2_PRIMITIVE_FIELD_SIZE+1]; // Byte offset where to start placing fields of this size
+ DWORD NumThreadStaticFieldsOfSize[MAX_LOG2_PRIMITIVE_FIELD_SIZE+1]; // # Fields of this size
+ DWORD NumThreadStaticGCPointerFields; // does not include inherited pointer fields
+ DWORD NumThreadStaticGCBoxedFields; // does not include inherited pointer fields
+
+ inline bmtFieldPlacement() { LIMITED_METHOD_CONTRACT; memset((void *)this, 0, sizeof(*this)); }
+ }; // struct bmtFieldPlacement
+
+ // --------------------------------------------------------------------------------------------
+ // Miscelaneous information about the type being built.
+ struct bmtInternalInfo
+ {
+ //-----------------------------------------------------------------------------------------
+ // Metadata for accessing information on the type
+ IMDInternalImport *pInternalImport;
+ Module *pModule;
+
+ //-----------------------------------------------------------------------------------------
+ // Parent method table. It is identical to pType->GetParentType()->GetMethodTable(),
+ // except for EnC. pParentMT is initialized but pType is not when InitializeFieldDesc
+ // is directly called by EnC.
+ MethodTable * pParentMT;
+
+ //-----------------------------------------------------------------------------------------
+ // The representation of the type being built
+ bmtMDType * pType;
+
+ //-----------------------------------------------------------------------------------------
+ // Constructor
+ inline bmtInternalInfo() { LIMITED_METHOD_CONTRACT; memset((void *)this, NULL, sizeof(*this)); }
+ }; // struct bmtInternalInfo
+
+
+ // --------------------------------------------------------------------------------------------
+ // Used for analyzing overlapped fields defined by explicit layout types.
+ enum bmtFieldLayoutTag {empty, nonoref, oref};
+
+ // --------------------------------------------------------------------------------------------
+ // used for calculating pointer series for tdexplicit
+ struct bmtGCSeriesInfo
+ {
+ UINT numSeries;
+ struct Series {
+ UINT offset;
+ UINT len;
+ } *pSeries;
+ bmtGCSeriesInfo() : numSeries(0), pSeries(NULL) {LIMITED_METHOD_CONTRACT;}
+ }; // struct bmtGCSeriesInfo
+
+ // --------------------------------------------------------------------------------------------
+ struct bmtMethodImplInfo
+ {
+ //-----------------------------------------------------------------------------------------
+ // This struct represents the resolved methodimpl pair.
+ struct Entry
+ {
+ bmtMethodHandle declMethod;
+ bmtMDMethod * pImplMethod;
+
+ Entry(bmtMDMethod * pImplMethodIn,
+ bmtMethodHandle declMethodIn)
+ : declMethod(declMethodIn),
+ pImplMethod(pImplMethodIn)
+ {}
+
+ Entry()
+ : declMethod(),
+ pImplMethod(NULL)
+ {}
+ };
+
+ //-----------------------------------------------------------------------------------------
+ // The allocated array of entries and the count indicating how many entries are in use.
+ private:
+ Entry *rgEntries;
+ DWORD cMaxIndex;
+
+ //-----------------------------------------------------------------------------------------
+ // Returns the MethodDesc* for the implementation of the methodimpl pair.
+ MethodDesc*
+ GetBodyMethodDesc(
+ DWORD i)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(i < pIndex);
+ return GetImplementationMethod(i)->GetMethodDesc();
+ }
+
+ public:
+
+ DWORD pIndex; // Next open spot in array, we load the BodyDesc's up in order of
+ // appearance in the type's list of methods (a body can appear
+ // more then once in the list of MethodImpls)
+
+
+ //-----------------------------------------------------------------------------------------
+ // Add a methodimpl to the list.
+ void
+ AddMethodImpl(
+ bmtMDMethod * pImplMethod,
+ bmtMethodHandle declMethod,
+ StackingAllocator * pStackingAllocator);
+
+ //-----------------------------------------------------------------------------------------
+ // Get the decl method for a particular methodimpl entry.
+ bmtMethodHandle
+ GetDeclarationMethod(
+ DWORD i)
+ { LIMITED_METHOD_CONTRACT; _ASSERTE(i < pIndex); return rgEntries[i].declMethod; }
+
+ //-----------------------------------------------------------------------------------------
+ // Get the impl method for a particular methodimpl entry.
+ bmtMDMethod *
+ GetImplementationMethod(
+ DWORD i)
+ { LIMITED_METHOD_CONTRACT; _ASSERTE(i < pIndex); return rgEntries[i].pImplMethod; }
+
+ //-----------------------------------------------------------------------------------------
+ // Constructor
+ inline bmtMethodImplInfo()
+ { LIMITED_METHOD_CONTRACT; memset((void*) this, NULL, sizeof(*this)); }
+
+ //-----------------------------------------------------------------------------------------
+ // Returns TRUE if tok acts as a body for any methodImpl entry. FALSE, otherwise.
+ BOOL IsBody(
+ mdToken tok);
+ }; // struct bmtMethodImplInfo
+
+ // --------------------------------------------------------------------------------------------
+ // These are all the memory allocators available to MethodTableBuilder
+
+ StackingAllocator * m_pStackingAllocator;
+ AllocMemTracker * m_pAllocMemTracker;
+
+ StackingAllocator *
+ GetStackingAllocator()
+ { LIMITED_METHOD_CONTRACT; return m_pStackingAllocator; }
+
+ LoaderAllocator *
+ GetLoaderAllocator()
+ { LIMITED_METHOD_CONTRACT; return bmtAllocator; }
+
+ AllocMemTracker *
+ GetMemTracker()
+ { LIMITED_METHOD_CONTRACT; return m_pAllocMemTracker; }
+
+ BYTE *
+ AllocateFromHighFrequencyHeap(S_SIZE_T cbMem);
+
+ BYTE *
+ AllocateFromLowFrequencyHeap(S_SIZE_T cbMem);
+
+ // --------------------------------------------------------------------------------------------
+ // The following structs, defined as private members of MethodTableBuilder, contain the necessary local
+ // parameters needed for BuildMethodTable
+
+ // Look at the struct definitions for a detailed list of all parameters available
+ // to BuildMethodTable.
+
+ LoaderAllocator *bmtAllocator;
+ bmtErrorInfo *bmtError;
+ bmtProperties *bmtProp;
+ bmtVtable *bmtVT;
+ bmtParentInfo *bmtParent;
+ bmtInterfaceInfo *bmtInterface;
+ bmtMetaDataInfo *bmtMetaData;
+ bmtMethodInfo *bmtMethod;
+ bmtMethAndFieldDescs *bmtMFDescs;
+ bmtFieldPlacement *bmtFP;
+ bmtInternalInfo *bmtInternal;
+ bmtGCSeriesInfo *bmtGCSeries;
+ bmtMethodImplInfo *bmtMethodImpl;
+ const bmtGenericsInfo *bmtGenerics;
+ bmtEnumFieldInfo *bmtEnumFields;
+ bmtContextStaticInfo *bmtCSInfo;
+
+ void SetBMTData(
+ LoaderAllocator *bmtAllocator,
+ bmtErrorInfo *bmtError,
+ bmtProperties *bmtProp,
+ bmtVtable *bmtVT,
+ bmtParentInfo *bmtParent,
+ bmtInterfaceInfo *bmtInterface,
+ bmtMetaDataInfo *bmtMetaData,
+ bmtMethodInfo *bmtMethod,
+ bmtMethAndFieldDescs *bmtMFDescs,
+ bmtFieldPlacement *bmtFP,
+ bmtInternalInfo *bmtInternal,
+ bmtGCSeriesInfo *bmtGCSeries,
+ bmtMethodImplInfo *bmtMethodImpl,
+ const bmtGenericsInfo *bmtGenerics,
+ bmtEnumFieldInfo *bmtEnumFields,
+ bmtContextStaticInfo *bmtCSInfo);
+
+ // --------------------------------------------------------------------------------------------
+ // Returns the parent bmtRTType pointer. Can be null if no parent exists.
+ inline bmtRTType *
+ GetParentType()
+ { WRAPPER_NO_CONTRACT; return bmtInternal->pType->GetParentType(); }
+
+ // --------------------------------------------------------------------------------------------
+ // Takes care of checking against NULL on the pointer returned by GetParentType. Returns true
+ // if the type being built has a parent; returns false otherwise.
+ // NOTE: false will typically only be returned for System.Object and interfaces.
+ inline bool
+ HasParent()
+ {
+ LIMITED_METHOD_CONTRACT; return bmtInternal->pParentMT != NULL;
+ }
+
+ // --------------------------------------------------------------------------------------------
+ inline MethodTable *
+ GetParentMethodTable()
+ {
+ LIMITED_METHOD_CONTRACT; return bmtInternal->pParentMT;
+ }
+
+ // --------------------------------------------------------------------------------------------
+ // Created to help centralize knowledge of where all the information about each method is
+ // stored. Eventually, this can hopefully be removed and it should be sufficient to iterate
+ // over the array of bmtMDMethod* that hold all the declared methods.
+ class DeclaredMethodIterator
+ {
+ private:
+ MethodTableBuilder &m_mtb;
+ int m_idx; // not SLOT_INDEX?
+#ifdef _DEBUG
+ bmtMDMethod * m_debug_pMethod;
+#endif
+
+ public:
+ inline DeclaredMethodIterator(MethodTableBuilder &mtb);
+ inline int CurrentIndex();
+ inline BOOL Next();
+ inline BOOL Prev();
+ inline void ResetToEnd();
+ inline mdToken Token();
+ inline DWORD Attrs();
+ inline DWORD RVA();
+ inline DWORD ImplFlags();
+ inline LPCSTR Name();
+ inline PCCOR_SIGNATURE GetSig(DWORD *pcbSig);
+ inline METHOD_IMPL_TYPE MethodImpl();
+ inline BOOL IsMethodImpl();
+ inline METHOD_TYPE MethodType();
+ inline bmtMDMethod *GetMDMethod();
+ inline MethodDesc *GetIntroducingMethodDesc();
+ inline bmtMDMethod * operator->();
+ inline bmtMDMethod * operator*() { WRAPPER_NO_CONTRACT; return GetMDMethod(); }
+ }; // class DeclaredMethodIterator
+ friend class DeclaredMethodIterator;
+
+ inline SLOT_INDEX NumDeclaredMethods() { LIMITED_METHOD_CONTRACT; return bmtMethod->GetDeclaredMethodCount(); }
+ inline DWORD NumDeclaredFields() { LIMITED_METHOD_CONTRACT; return bmtEnumFields->dwNumDeclaredFields; }
+
+ // --------------------------------------------------------------------------------------------
+ // Used to report an error building this type.
+ static VOID DECLSPEC_NORETURN
+ BuildMethodTableThrowException(
+ HRESULT hr,
+ const bmtErrorInfo & bmtError);
+
+ // --------------------------------------------------------------------------------------------
+ // Used to report an error building this type.
+ inline VOID DECLSPEC_NORETURN
+ BuildMethodTableThrowException(
+ HRESULT hr,
+ UINT idResWhy,
+ mdMethodDef tokMethodDef)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ bmtError->resIDWhy = idResWhy;
+ bmtError->dMethodDefInError = tokMethodDef;
+ bmtError->szMethodNameForError = NULL;
+ bmtError->cl = GetCl();
+ BuildMethodTableThrowException(hr, *bmtError);
+ }
+
+ // --------------------------------------------------------------------------------------------
+ // Used to report an error building this type.
+ inline VOID DECLSPEC_NORETURN
+ BuildMethodTableThrowException(
+ HRESULT hr,
+ UINT idResWhy,
+ LPCUTF8 szMethodName)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ bmtError->resIDWhy = idResWhy;
+ bmtError->dMethodDefInError = mdMethodDefNil;
+ bmtError->szMethodNameForError = szMethodName;
+ bmtError->cl = GetCl();
+ BuildMethodTableThrowException(hr, *bmtError);
+ }
+
+ // --------------------------------------------------------------------------------------------
+ // Used to report an error building this type.
+ inline VOID DECLSPEC_NORETURN
+ BuildMethodTableThrowException(
+ UINT idResWhy,
+ mdMethodDef tokMethodDef = mdMethodDefNil)
+ {
+ WRAPPER_NO_CONTRACT;
+ BuildMethodTableThrowException(COR_E_TYPELOAD, idResWhy, tokMethodDef);
+ }
+
+ // --------------------------------------------------------------------------------------------
+ // Used to report an error building this type.
+ inline VOID DECLSPEC_NORETURN
+ BuildMethodTableThrowException(
+ UINT idResWhy,
+ LPCUTF8 szMethodName)
+ {
+ WRAPPER_NO_CONTRACT;
+ BuildMethodTableThrowException(COR_E_TYPELOAD, idResWhy, szMethodName);
+ }
+
+private:
+ // --------------------------------------------------------------------------------------------
+ // To be removed. Creates a hash table of all the names of the virtual methods in pMT,
+ // and associates them with their corresponding bmtRTMethod* values.
+ MethodNameHash *CreateMethodChainHash(
+ MethodTable *pMT);
+
+ // --------------------------------------------------------------------------------------------
+ // Only used in the resolve phase of the classloader. These are used to calculate
+ // the interface implementation map. The reason it is done in this way is that the
+ // interfaces must be resolved in light of generic types and substitutions, and the fact
+ // that substitutions can make interfaces resolve to be identical when given a child's
+ // instantiation.
+ //
+ // NOTE: See DevDiv bug 795 for details.
+
+ void ExpandApproxInterface(
+ bmtInterfaceInfo * bmtInterface, // out parameter, various parts cumulatively written to.
+ const Substitution * pNewInterfaceSubstChain,
+ MethodTable * pNewInterface,
+ InterfaceDeclarationScope declScope
+ COMMA_INDEBUG(MethodTable * dbg_pClassMT));
+
+ void ExpandApproxDeclaredInterfaces(
+ bmtInterfaceInfo * bmtInterface, // out parameter, various parts cumulatively written to.
+ bmtTypeHandle thType,
+ InterfaceDeclarationScope declScope
+ COMMA_INDEBUG(MethodTable * dbg_pClassMT));
+
+ void ExpandApproxInheritedInterfaces(
+ bmtInterfaceInfo * bmtInterface, // out parameter, various parts cumulatively written to.
+ bmtRTType * pParentType);
+
+ void LoadApproxInterfaceMap();
+
+public:
+ //------------------------------------------------------------------------
+ // Loading exact interface instantiations.(slow technique)
+ //
+ // These place the exact interface instantiations into the interface map at the
+ // appropriate locations.
+
+ struct bmtExactInterfaceInfo
+ {
+ DWORD nAssigned;
+ MethodTable **pExactMTs;
+
+ // Array of substitutions for each interface in the interface map
+ Substitution * pInterfaceSubstitution;
+ SigTypeContext typeContext; // Exact type context used to supply final instantiation to substitution chains
+
+ inline bmtExactInterfaceInfo() { LIMITED_METHOD_CONTRACT; memset((void *)this, NULL, sizeof(*this)); }
+ }; // struct bmtExactInterfaceInfo
+
+private:
+ static void
+ ExpandExactInterface(
+ bmtExactInterfaceInfo * bmtInfo,
+ MethodTable * pIntf,
+ const Substitution * pSubstForTypeLoad_OnStack, // Allocated on stack!
+ const Substitution * pSubstForComparing_OnStack // Allocated on stack!
+ COMMA_INDEBUG(MethodTable * dbg_pClassMT));
+
+public:
+ static void
+ ExpandExactDeclaredInterfaces(
+ bmtExactInterfaceInfo * bmtInfo,
+ Module * pModule,
+ mdToken typeDef,
+ const Substitution * pSubstForTypeLoad,
+ Substitution * pSubstForComparing
+ COMMA_INDEBUG(MethodTable * dbg_pClassMT));
+
+ static void
+ ExpandExactInheritedInterfaces(
+ bmtExactInterfaceInfo * bmtInfo,
+ MethodTable * pParentMT,
+ const Substitution * pSubstForTypeLoad,
+ Substitution * pSubstForComparing);
+
+public:
+ // --------------------------------------------------------------------------------------------
+ // Interface ambiguity checks when loading exact interface instantiations
+ //
+ // These implement the check that the exact instantiation does not introduce any
+ // ambiguity in the interface dispatch logic, i.e. amongst the freshly declared interfaces.
+
+ struct bmtInterfaceAmbiguityCheckInfo
+ {
+ MethodTable *pMT;
+ DWORD nAssigned;
+ MethodTable **ppExactDeclaredInterfaces;
+ Substitution **ppInterfaceSubstitutionChains;
+ SigTypeContext typeContext;
+
+ inline bmtInterfaceAmbiguityCheckInfo() { LIMITED_METHOD_CONTRACT; memset((void *)this, NULL, sizeof(*this)); }
+ }; // struct bmtInterfaceAmbiguityCheckInfo
+
+ static void
+ InterfacesAmbiguityCheck(
+ bmtInterfaceAmbiguityCheckInfo *,
+ Module *pModule,
+ mdToken typeDef,
+ const Substitution *pSubstChain);
+
+private:
+ static void
+ InterfaceAmbiguityCheck(
+ bmtInterfaceAmbiguityCheckInfo *,
+ const Substitution *pSubstChain,
+ MethodTable *pIntfMT);
+
+public:
+ static void
+ LoadExactInterfaceMap(
+ MethodTable *pMT);
+
+ // --------------------------------------------------------------------------------------------
+ // Copy virtual slots inherited from parent:
+ //
+ // In types created at runtime, inherited virtual slots are initialized using approximate parent
+ // during method table building. This method will update them based on the exact parent.
+ // In types loaded from NGen image, inherited virtual slots from cross-module parents are not
+ // initialized. This method will initialize them based on the actually loaded exact parent
+ // if necessary.
+ //
+ static void
+ CopyExactParentSlots(
+ MethodTable *pMT,
+ MethodTable *pApproxParentMT);
+
+ // --------------------------------------------------------------------------------------------
+ // This is used at load time, using metadata-based comparisons. It returns the array of dispatch
+ // map TypeIDs to be used for pDeclIntfMT.
+ //
+ // Arguments:
+ // rg/c DispatchMapTypeIDs - Array of TypeIDs and its count of elements.
+ // pcIfaceDuplicates - Number of duplicate occurences of the interface in the interface map (ideally <=
+ // count of elements TypeIDs).
+ //
+ void
+ ComputeDispatchMapTypeIDs(
+ MethodTable * pDeclInftMT,
+ const Substitution * pDeclIntfSubst,
+ DispatchMapTypeID * rgDispatchMapTypeIDs,
+ UINT32 cDispatchMapTypeIDs,
+ UINT32 * pcIfaceDuplicates);
+
+private:
+ // --------------------------------------------------------------------------------------------
+ // Looks for a virtual method in the parent matching methodSig. pMethodConstraintsMatch is
+ // set if a match is found indicating whether or not the method constraint check passes.
+ bmtRTMethod *
+ LoaderFindMethodInParentClass(
+ const MethodSignature & methodSig,
+ BOOL * pMethodConstraintsMatch);
+
+ // --------------------------------------------------------------------------------------------
+ //
+ VOID
+ ResolveInterfaces(
+ WORD cEntries,
+ BuildingInterfaceInfo_t* pEntries);
+
+ // --------------------------------------------------------------------------------------------
+ VOID
+ ComputeModuleDependencies();
+
+ // --------------------------------------------------------------------------------------------
+ // Finds a method declaration from a MemberRef or Def. It handles the case where
+ // the Ref or Def point back to this class even though it has not been fully
+ // laid out.
+ HRESULT
+ FindMethodDeclarationForMethodImpl(
+ mdToken pToken, // Token that is being located (MemberRef or MemberDef)
+ mdToken* pDeclaration, // Method definition for Member
+ BOOL fSameClass); // Does the declaration need to be in this class
+
+ // --------------------------------------------------------------------------------------------
+ // Enumerates the method impl token pairs and resolves the impl tokens to mdtMethodDef
+ // tokens, since we currently have the limitation that all impls are in the current class.
+ VOID
+ EnumerateMethodImpls();
+
+ // --------------------------------------------------------------------------------------------
+ // Enumerates the methods declared by the class and populates the bmtMethod member with
+ // bmtMDMethods* for each declared method.
+ VOID
+ EnumerateClassMethods();
+
+ // --------------------------------------------------------------------------------------------
+ // Enumerates the fields declared by the type and populates bmtEnumFields.
+ VOID
+ EnumerateClassFields();
+
+ // --------------------------------------------------------------------------------------------
+ // Allocate temporary memory for tracking all information used in building the MethodTable
+ VOID
+ AllocateWorkingSlotTables();
+
+ // --------------------------------------------------------------------------------------------
+ // Allocates all of the FieldDeses required after enumerating the fields declared by the type.
+ VOID
+ AllocateFieldDescs();
+
+ // --------------------------------------------------------------------------------------------
+ // Initializes all allocated FieldDescs
+ VOID
+ InitializeFieldDescs(
+ FieldDesc *,
+ const LayoutRawFieldInfo*,
+ bmtInternalInfo*,
+ const bmtGenericsInfo*,
+ bmtMetaDataInfo*,
+ bmtEnumFieldInfo*,
+ bmtErrorInfo*,
+ MethodTable***,
+ bmtMethAndFieldDescs*,
+ bmtFieldPlacement*,
+ bmtContextStaticInfo*,
+ unsigned * totalDeclaredSize);
+
+ // --------------------------------------------------------------------------------------------
+ // Verify self-referencing static ValueType fields with RVA (when the size of the ValueType is known).
+ void
+ VerifySelfReferencingStaticValueTypeFields_WithRVA(
+ MethodTable ** pByValueClassCache);
+
+ // --------------------------------------------------------------------------------------------
+ // Returns TRUE if dwByValueClassToken refers to the type being built; otherwise returns FALSE.
+ BOOL
+ IsSelfReferencingStaticValueTypeField(
+ mdToken dwByValueClassToken,
+ bmtInternalInfo* bmtInternal,
+ const bmtGenericsInfo * bmtGenericsInfo,
+ PCCOR_SIGNATURE pMemberSignature,
+ DWORD cMemberSignature);
+
+ // --------------------------------------------------------------------------------------------
+ // Performs rudimentary stand-alone validation of methods declared by the type.
+ VOID
+ ValidateMethods();
+
+ // --------------------------------------------------------------------------------------------
+ // Initialize an allocated MethodDesc.
+ VOID
+ InitMethodDesc(
+ MethodDesc * pNewMD,
+ DWORD Classification,
+ mdToken tok,
+ DWORD dwImplFlags,
+ DWORD dwMemberAttrs,
+ BOOL fEnC,
+ DWORD RVA, // Only needed for NDirect case
+ IMDInternalImport * pIMDII, // Needed for NDirect, EEImpl(Delegate) cases
+ LPCSTR pMethodName // Only needed for mcEEImpl (Delegate) case
+ COMMA_INDEBUG(LPCUTF8 pszDebugMethodName)
+ COMMA_INDEBUG(LPCUTF8 pszDebugClassName)
+ COMMA_INDEBUG(LPCUTF8 pszDebugMethodSignature));
+
+ // --------------------------------------------------------------------------------------------
+ // Convert code:MethodTableBuilder::METHOD_TYPE to code:MethodClassification
+ static DWORD
+ GetMethodClassification(METHOD_TYPE type);
+
+ // --------------------------------------------------------------------------------------------
+ // Will determine if a method requires or inherits any security settings and will set the
+ // appropriate flags on the MethodDesc.
+ VOID
+ SetSecurityFlagsOnMethod(
+ bmtRTMethod * pParentMethod,
+ MethodDesc* pNewMD,
+ mdToken tokMethod,
+ DWORD dwMemberAttrs,
+ bmtInternalInfo* bmtInternal,
+ bmtMetaDataInfo* bmtMetaData);
+
+ // --------------------------------------------------------------------------------------------
+ // Essentially, this is a helper method that combines calls to InitMethodDesc and
+ // SetSecurityFlagsOnMethod. It then assigns the newly initialized MethodDesc to
+ // the bmtMDMethod.
+ VOID
+ InitNewMethodDesc(
+ bmtMDMethod * pMethod,
+ MethodDesc * pNewMD);
+
+ // --------------------------------------------------------------------------------------------
+ // For every declared virtual method, determines if the method is an overload or requires a
+ // new slot, performs the proper checks to ensure that an override is valid, and then
+ // places the method in the appropriate slot in bmtVT and sets the SLOT_INDEX value in the
+ // bmtMDMethod and it's MethodDesc.
+ VOID
+ PlaceVirtualMethods();
+
+ // --------------------------------------------------------------------------------------------
+ // For every declared non-virtual method, places the method in the next available slot in
+ // the non-virtual section of bmtVT and sets the SLOT_INDEX value in the bmtMDMethod and it's
+ // MethodDesc.
+ VOID
+ PlaceNonVirtualMethods();
+
+ // --------------------------------------------------------------------------------------------
+ // Determine the equivalence sets within the interface map
+ // See comment in implementation for more details.
+ VOID ComputeInterfaceMapEquivalenceSet();
+
+ // --------------------------------------------------------------------------------------------
+ // Given an interface in our interface map, and a particular method on that interface, place
+ // a method from the parent types implementation of an equivalent interface into that method
+ // slot. Used by PlaceInterfaceMethods to make equivalent interface implementations have the
+ // same behavior as if the parent interface was implemented on this type instead of an equivalent interface.
+ // See comment in implementation for example of where this is necessary.
+ VOID PlaceMethodFromParentEquivalentInterfaceIntoInterfaceSlot(
+ bmtInterfaceEntry::InterfaceSlotIterator &itfSlotIt,
+ bmtInterfaceEntry * pCurItfEntry,
+ DispatchMapTypeID ** prgInterfaceDispatchMapTypeIDs,
+ DWORD dwCurInterface);
+
+ // --------------------------------------------------------------------------------------------
+ // Matches interface methods with implementation methods in this type or a parent type.
+ // See comment in implementation for more details.
+ VOID
+ PlaceInterfaceMethods();
+
+ // --------------------------------------------------------------------------------------------
+ // For every MethodImpl pair (represented by Entry) in bmtMethodImpl, place the body in the
+ // appropriate interface or virtual slot.
+ VOID
+ PlaceMethodImpls();
+
+ // --------------------------------------------------------------------------------------------
+ // This will take the array of bmtMetaData->rgMethodImplTokens and further resolve the tokens
+ // to their corresponding bmtMDMethod or bmtRTMethod pointers and then populate the array
+ // in bmtMethodImpl, which will be used by PlaceMethodImpls
+ VOID
+ ProcessMethodImpls();
+
+ // --------------------------------------------------------------------------------------------
+ // This will take the array of bmtMetaData->rgMethodImplTokens and further resolve the tokens
+ // to their corresponding bmtMDMethod or bmtRTMethod pointers and then populate the array
+ // in bmtMethodImpl for the methodimpls which can resolve to more than one declaration method,
+ // which will be used by PlaceMethodImpls
+ VOID
+ ProcessInexactMethodImpls();
+
+ // --------------------------------------------------------------------------------------------
+ // Find the decl method on a given interface entry that matches the method name+signature specified
+ // If none is found, return a null method handle
+ bmtMethodHandle
+ FindDeclMethodOnInterfaceEntry(bmtInterfaceEntry *pItfEntry, MethodSignature &declSig);
+
+ // --------------------------------------------------------------------------------------------
+ // Throws if an entry already exists that has been MethodImpl'd. Adds the interface slot and
+ // implementation method to the mapping used by virtual stub dispatch.
+ VOID
+ AddMethodImplDispatchMapping(
+ DispatchMapTypeID typeID,
+ SLOT_INDEX slotNumber,
+ bmtMDMethod * pImplMethod);
+
+ // --------------------------------------------------------------------------------------------
+ // Throws if the signatures (excluding names) are not equal or the constraints don't match.
+ // dwConstraintErrorCode is an input argument that states what error to throw in such a case
+ // as the constraints don't match.
+ VOID
+ MethodImplCompareSignatures(
+ bmtMethodHandle hDecl,
+ bmtMethodHandle hImpl,
+ DWORD dwConstraintErrorCode);
+
+ // --------------------------------------------------------------------------------------------
+ // This will provide the array of decls for the slots implemented by a methodImpl MethodDesc.
+ // These are then used to map a slot in a MethodTable to the declaration method to be used in
+ // name+sig matching through method calls and child types.
+ VOID
+ WriteMethodImplData(
+ bmtMDMethod * pImplMethod,
+ DWORD cSlots,
+ DWORD * rgSlots,
+ MethodDesc ** rgDeclMD);
+
+ // --------------------------------------------------------------------------------------------
+ // Places a methodImpl pair where the decl is declared by the type being built.
+ VOID
+ PlaceLocalDeclaration(
+ bmtMDMethod * pDecl,
+ bmtMDMethod * pImpl,
+ DWORD* slots,
+ MethodDesc** replaced,
+ DWORD* pSlotIndex);
+
+ // --------------------------------------------------------------------------------------------
+ // Places a methodImpl pair where the decl is declared by a parent type.
+ VOID
+ PlaceParentDeclaration(
+ bmtRTMethod * pDecl,
+ bmtMDMethod * pImpl,
+ DWORD* slots,
+ MethodDesc** replaced,
+ DWORD* pSlotIndex);
+
+ // --------------------------------------------------------------------------------------------
+ // Places a methodImpl pair where the decl is declared by an interface.
+ VOID
+ PlaceInterfaceDeclaration(
+ bmtRTMethod * pDecl,
+ bmtMDMethod * pImpl,
+ DWORD* slots,
+ MethodDesc** replaced,
+ DWORD* pSlotIndex);
+
+ // --------------------------------------------------------------------------------------------
+ // This will validate that all interface methods that were matched during
+ // layout also validate against type constraints.
+ VOID
+ ValidateInterfaceMethodConstraints();
+
+ // --------------------------------------------------------------------------------------------
+ // Used to allocate and initialize MethodDescs (both the boxed and unboxed entrypoints)
+ VOID
+ AllocAndInitMethodDescs();
+
+ // --------------------------------------------------------------------------------------------
+ // Allocates and initializes one method desc chunk.
+ //
+ // Arguments:
+ // startIndex - index of first method in bmtMethod array.
+ // count - number of methods in this chunk (contiguous region from startIndex)
+ // sizeOfMethodDescs - total expected size of MethodDescs in this chunk
+ //
+ // Used by AllocAndInitMethodDescs.
+ //
+ VOID
+ AllocAndInitMethodDescChunk(COUNT_T startIndex, COUNT_T count, SIZE_T sizeOfMethodDescs);
+
+ // --------------------------------------------------------------------------------------------
+ // MethodTableBuilder equivant of
+ // code:MethodDesc::IsUnboxingStub && code:MethodDesc::IsTightlyBoundToMethodTable.
+ // Returns true if the MethodTable has to have true slot for unboxing stub of this method.
+ // Used for MethodDesc layout.
+ BOOL
+ NeedsTightlyBoundUnboxingStub(bmtMDMethod * pMDMethod);
+
+ // --------------------------------------------------------------------------------------------
+ // MethodTableBuilder equivalent of code:MethodDesc::HasNativeCodeSlot.
+ // Used for MethodDesc layout.
+ BOOL
+ NeedsNativeCodeSlot(bmtMDMethod * pMDMethod);
+
+ // --------------------------------------------------------------------------------------------
+ // MethodTableBuilder version of code:MethodDesc::MayBeRemotingIntercepted. Used for MethodDesc layout.
+ BOOL
+ MayBeRemotingIntercepted(bmtMDMethod * pMDMethod);
+
+ // --------------------------------------------------------------------------------------------
+ // Used to allocate and initialize the dictionary used with generic types.
+ VOID
+ AllocAndInitDictionary();
+
+ VOID
+ PlaceRegularStaticFields();
+
+ VOID
+ PlaceThreadStaticFields();
+
+ VOID
+ PlaceInstanceFields(
+ MethodTable **);
+
+ BOOL
+ CheckForVtsEventMethod(
+ IMDInternalImport *pImport,
+ MethodDesc *pMD,
+ DWORD dwAttrs,
+ LPCUTF8 szAttrName,
+ MethodDesc **ppMethodDesc);
+
+#ifdef FEATURE_REMOTING // affects only remoting-related info
+ VOID ScanTypeForVtsInfo();
+#endif // FEATURE_REMOTING
+
+ VOID
+ CheckForSystemTypes();
+
+ VOID SetupMethodTable2(
+ Module* pLoaderModule
+#ifdef FEATURE_PREJIT
+ , Module* pComputedPZM
+#endif // FEATURE_PREJIT
+ );
+
+ VOID HandleGCForValueClasses(
+ MethodTable **);
+
+ // These methods deal with inheritance security. They're executed
+ // after the type has been constructed, but before it is published.
+ VOID VerifyMethodInheritanceSecurityHelper(
+ MethodDesc *pParentMD,
+ MethodDesc *pChildMD);
+
+ VOID VerifyClassInheritanceSecurityHelper(
+ MethodTable *pParentMT,
+ MethodTable *pChildMT);
+
+ VOID ConvertLinkDemandToInheritanceDemand(MethodDesc *pMDLinkDemand);
+
+ VOID VerifyInheritanceSecurity();
+
+ VOID VerifyEquivalenceSecurity();
+
+ VOID VerifyVirtualMethodsImplemented(MethodTable::MethodData * hMTData);
+
+ VOID CheckForTypeEquivalence(
+ WORD cBuildingInterfaceList,
+ BuildingInterfaceInfo_t *pBuildingInterfaceList);
+
+ VOID EnsureRIDMapsCanBeFilled();
+
+ VOID CheckForRemotingProxyAttrib();
+
+#ifdef FEATURE_COMINTEROP
+
+ VOID GetCoClassAttribInfo();
+
+#endif // FEATURE_COMINTEROP
+
+ VOID CheckForSpecialTypes();
+
+ VOID SetContextfulOrByRef();
+
+#ifdef FEATURE_READYTORUN
+
+ VOID CheckLayoutDependsOnOtherModules(MethodTable * pDependencyMT);
+
+ BOOL NeedsAlignedBaseOffset();
+
+#endif // FEATURE_READYTORUN
+
+ VOID SetFinalizationSemantics();
+
+ VOID HandleExplicitLayout(
+ MethodTable **pByValueClassCache);
+
+ static ExplicitFieldTrust::TrustLevel CheckValueClassLayout(
+ MethodTable * pMT,
+ BYTE * pFieldLayout,
+ DWORD * pFirstObjectOverlapOffset);
+
+ void FindPointerSeriesExplicit(
+ UINT instanceSliceSize,
+ BYTE * pFieldLayout);
+
+ VOID HandleGCForExplicitLayout();
+
+ VOID CheckForHFA(MethodTable ** pByValueClassCache);
+
+ VOID CheckForNativeHFA();
+
+ // this accesses the field size which is temporarily stored in m_pMTOfEnclosingClass
+ // during class loading. Don't use any other time
+ DWORD GetFieldSize(FieldDesc *pFD);
+
+ bool IsEnclosingNestedTypePair(
+ bmtTypeHandle hBase,
+ bmtTypeHandle hChild);
+
+ bool IsBaseTypeAlsoEnclosingType(
+ bmtTypeHandle hBase,
+ bmtTypeHandle hChild);
+
+ BOOL TestOverrideForAccessibility(
+ bmtMethodHandle hParentMethod,
+ bmtTypeHandle hChildType);
+
+ VOID TestOverRide(
+ bmtMethodHandle hParentMethod,
+ bmtMethodHandle hChildMethod);
+
+ VOID TestMethodImpl(
+ bmtMethodHandle hDeclMethod,
+ bmtMethodHandle hImplMethod);
+
+ // Heuristic to detemine if we would like instances of this class 8 byte aligned
+ BOOL ShouldAlign8(
+ DWORD dwR8Fields,
+ DWORD dwTotalFields);
+
+#endif // !BINDER
+
+ MethodTable * AllocateNewMT(Module *pLoaderModule,
+ DWORD dwVtableSlots,
+ DWORD dwVirtuals,
+ DWORD dwGCSize,
+ DWORD dwNumInterfaces,
+ DWORD dwNumDicts,
+ DWORD dwNumTypeSlots,
+ MethodTable *pMTParent,
+#ifndef BINDER
+ ClassLoader *pClassLoader,
+ LoaderAllocator *pAllocator,
+#else // BINDER
+ MdilModule *declaringModule,
+ MdilModule *containingModule,
+ BOOL fHasDispatchMap,
+#endif // BINDER
+ BOOL isIFace,
+ BOOL fDynamicStatics,
+ BOOL fHasGenericsStaticsInfo,
+ BOOL fNeedsRCWPerTypeData,
+ BOOL fNeedsRemotableMethodInfo,
+ BOOL fNeedsRemotingVtsInfo,
+ BOOL fHasContextStatics
+#ifdef FEATURE_COMINTEROP
+ , BOOL bHasDynamicInterfaceMap
+#endif
+#ifdef FEATURE_PREJIT
+ , Module *pComputedPZM
+#endif // FEATURE_PREJIT
+ , AllocMemTracker *pamTracker
+ );
+
+}; // class MethodTableBuilder
+
+#ifndef BINDER
+#include "methodtablebuilder.inl"
+#endif // !BINDER
+
+#endif // !METHODTABLEBUILDER_H
diff --git a/src/vm/methodtablebuilder.inl b/src/vm/methodtablebuilder.inl
new file mode 100644
index 0000000000..111a062336
--- /dev/null
+++ b/src/vm/methodtablebuilder.inl
@@ -0,0 +1,524 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: METHODTABLEBUILDER.INL
+//
+
+
+//
+
+//
+// ============================================================================
+
+#ifndef _METHODTABLEBUILDER_INL_
+#define _METHODTABLEBUILDER_INL_
+
+//***************************************************************************************
+inline MethodTableBuilder::DeclaredMethodIterator::DeclaredMethodIterator(
+ MethodTableBuilder &mtb) : m_mtb(mtb), m_idx(-1)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+//***************************************************************************************
+inline int MethodTableBuilder::DeclaredMethodIterator::CurrentIndex()
+{
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK_MSG(0 <= m_idx && m_idx < (int)m_mtb.NumDeclaredMethods(),
+ "Invalid iterator state.");
+ return m_idx;
+}
+
+//***************************************************************************************
+inline BOOL MethodTableBuilder::DeclaredMethodIterator::Next()
+{
+ LIMITED_METHOD_CONTRACT;
+ if (m_idx + 1 >= (int)m_mtb.NumDeclaredMethods())
+ return FALSE;
+ m_idx++;
+ INDEBUG(m_debug_pMethod = GetMDMethod();)
+ return TRUE;
+}
+
+//***************************************************************************************
+inline BOOL MethodTableBuilder::DeclaredMethodIterator::Prev()
+{
+ LIMITED_METHOD_CONTRACT;
+ if (m_idx - 1 <= -1)
+ return FALSE;
+ m_idx--;
+ INDEBUG(m_debug_pMethod = GetMDMethod();)
+ return TRUE;
+}
+
+//***************************************************************************************
+inline void MethodTableBuilder::DeclaredMethodIterator::ResetToEnd()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_idx = (int)m_mtb.NumDeclaredMethods();
+}
+
+//***************************************************************************************
+inline mdMethodDef MethodTableBuilder::DeclaredMethodIterator::Token()
+{
+ STANDARD_VM_CONTRACT;
+ CONSISTENCY_CHECK(TypeFromToken(GetMDMethod()->GetMethodSignature().GetToken()) == mdtMethodDef);
+ return GetMDMethod()->GetMethodSignature().GetToken();
+}
+
+//***************************************************************************************
+inline DWORD MethodTableBuilder::DeclaredMethodIterator::Attrs()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetMDMethod()->GetDeclAttrs();
+}
+
+//***************************************************************************************
+inline DWORD MethodTableBuilder::DeclaredMethodIterator::RVA()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetMDMethod()->GetRVA();
+}
+
+//***************************************************************************************
+inline DWORD MethodTableBuilder::DeclaredMethodIterator::ImplFlags()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetMDMethod()->GetImplAttrs();
+}
+
+//***************************************************************************************
+inline LPCSTR MethodTableBuilder::DeclaredMethodIterator::Name()
+{
+ STANDARD_VM_CONTRACT;
+ return GetMDMethod()->GetMethodSignature().GetName();
+}
+
+//***************************************************************************************
+inline PCCOR_SIGNATURE MethodTableBuilder::DeclaredMethodIterator::GetSig(DWORD *pcbSig)
+{
+ STANDARD_VM_CONTRACT;
+ *pcbSig = static_cast<DWORD>
+ (GetMDMethod()->GetMethodSignature().GetSignatureLength());
+ return GetMDMethod()->GetMethodSignature().GetSignature();
+}
+
+//***************************************************************************************
+inline MethodTableBuilder::METHOD_IMPL_TYPE MethodTableBuilder::DeclaredMethodIterator::MethodImpl()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetMDMethod()->GetMethodImplType();
+}
+
+//***************************************************************************************
+inline BOOL MethodTableBuilder::DeclaredMethodIterator::IsMethodImpl()
+{
+ LIMITED_METHOD_CONTRACT;
+ return MethodImpl() == METHOD_IMPL;
+}
+
+//***************************************************************************************
+inline MethodTableBuilder::METHOD_TYPE MethodTableBuilder::DeclaredMethodIterator::MethodType()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetMDMethod()->GetMethodType();
+}
+
+//***************************************************************************************
+inline MethodTableBuilder::bmtMDMethod *
+MethodTableBuilder::DeclaredMethodIterator::GetMDMethod()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(FitsIn<SLOT_INDEX>(m_idx)); // Review: m_idx should probably _be_ a SLOT_INDEX, but that asserts.
+ return (*m_mtb.bmtMethod)[static_cast<SLOT_INDEX>(m_idx)];
+}
+
+//*******************************************************************************
+inline class MethodDesc *
+MethodTableBuilder::DeclaredMethodIterator::GetIntroducingMethodDesc()
+{
+ STANDARD_VM_CONTRACT;
+
+ bmtMDMethod *pCurrentMD = GetMDMethod();
+ DWORD dwSlot = pCurrentMD->GetSlotIndex();
+ MethodDesc *pIntroducingMD = NULL;
+
+ bmtRTType *pParentType = pCurrentMD->GetOwningType()->GetParentType();
+ bmtRTType *pPrevParentType = NULL;
+
+ // Find this method in the parent.
+ // If it does exist in the parent, it would be at the same vtable slot.
+ while (pParentType != NULL &&
+ dwSlot < pParentType->GetMethodTable()->GetNumVirtuals())
+ {
+ pPrevParentType = pParentType;
+ pParentType = pParentType->GetParentType();
+ }
+
+ if (pPrevParentType != NULL)
+ {
+ pIntroducingMD =
+ pPrevParentType->GetMethodTable()->GetMethodDescForSlot(dwSlot);
+ }
+
+ return pIntroducingMD;
+}
+
+
+//***************************************************************************************
+inline MethodTableBuilder::bmtMDMethod *
+MethodTableBuilder::DeclaredMethodIterator::operator->()
+{
+ return GetMDMethod();
+}
+
+//***************************************************************************************
+inline bool
+MethodTableBuilder::bmtMethodHandle::operator==(
+ const bmtMethodHandle &rhs) const
+{
+ return m_handle == rhs.m_handle;
+}
+
+//***************************************************************************************
+//
+// The MethodNameHash is a temporary loader structure which may be allocated if there are a large number of
+// methods in a class, to quickly get from a method name to a MethodDesc (potentially a chain of MethodDescs).
+//
+
+//***************************************************************************************
+// Returns TRUE for success, FALSE for failure
+template <typename Data>
+void
+FixedCapacityStackingAllocatedUTF8StringHash<Data>::Init(
+ DWORD dwMaxEntries,
+ StackingAllocator * pAllocator)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(this));
+ }
+ CONTRACTL_END;
+
+ // Given dwMaxEntries, determine a good value for the number of hash buckets
+ m_dwNumBuckets = (dwMaxEntries / 10);
+
+ if (m_dwNumBuckets < 5)
+ m_dwNumBuckets = 5;
+
+ S_UINT32 scbMemory = (S_UINT32(m_dwNumBuckets) * S_UINT32(sizeof(HashEntry*))) +
+ (S_UINT32(dwMaxEntries) * S_UINT32(sizeof(HashEntry)));
+
+ if(scbMemory.IsOverflow())
+ ThrowHR(E_INVALIDARG);
+
+ if (pAllocator)
+ {
+ m_pMemoryStart = (BYTE*)pAllocator->Alloc(scbMemory);
+ }
+ else
+ { // We're given the number of hash table entries we're going to insert,
+ // so we can allocate the appropriate size
+ m_pMemoryStart = new BYTE[scbMemory.Value()];
+ }
+
+ INDEBUG(m_pDebugEndMemory = m_pMemoryStart + scbMemory.Value();)
+
+ // Current alloc ptr
+ m_pMemory = m_pMemoryStart;
+
+ // Allocate the buckets out of the alloc ptr
+ m_pBuckets = (HashEntry**) m_pMemory;
+ m_pMemory += sizeof(HashEntry*)*m_dwNumBuckets;
+
+ // Buckets all point to empty lists to begin with
+ memset(m_pBuckets, 0, scbMemory.Value());
+}
+
+//***************************************************************************************
+// Insert new entry at head of list
+template <typename Data>
+void
+FixedCapacityStackingAllocatedUTF8StringHash<Data>::Insert(
+ LPCUTF8 pszName,
+ const Data & data)
+{
+ LIMITED_METHOD_CONTRACT;
+ DWORD dwHash = GetHashCode(pszName);
+ DWORD dwBucket = dwHash % m_dwNumBuckets;
+ HashEntry * pNewEntry;
+
+ pNewEntry = (HashEntry *) m_pMemory;
+ m_pMemory += sizeof(HashEntry);
+
+ _ASSERTE(m_pMemory <= m_pDebugEndMemory);
+
+ // Insert at head of bucket chain
+ pNewEntry->m_pNext = m_pBuckets[dwBucket];
+ pNewEntry->m_data = data;
+ pNewEntry->m_dwHashValue = dwHash;
+ pNewEntry->m_pKey = pszName;
+
+ m_pBuckets[dwBucket] = pNewEntry;
+}
+
+//***************************************************************************************
+// Return the first HashEntry with this name, or NULL if there is no such entry
+template <typename Data>
+typename FixedCapacityStackingAllocatedUTF8StringHash<Data>::HashEntry *
+FixedCapacityStackingAllocatedUTF8StringHash<Data>::Lookup(
+ LPCUTF8 pszName)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ DWORD dwHash = GetHashCode(pszName);
+ DWORD dwBucket = dwHash % m_dwNumBuckets;
+ HashEntry * pSearch;
+
+ for (pSearch = m_pBuckets[dwBucket]; pSearch; pSearch = pSearch->m_pNext)
+ {
+ if (pSearch->m_dwHashValue == dwHash && !strcmp(pSearch->m_pKey, pszName))
+ {
+ return pSearch;
+ }
+ }
+
+ return NULL;
+}
+
+//***************************************************************************************
+// Return the first HashEntry with this name, or NULL if there is no such entry
+template <typename Data>
+typename FixedCapacityStackingAllocatedUTF8StringHash<Data>::HashEntry *
+FixedCapacityStackingAllocatedUTF8StringHash<Data>::FindNext(
+ HashEntry * pEntry)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ CONSISTENCY_CHECK(CheckPointer(pEntry));
+
+ LPCUTF8 key = pEntry->m_pKey;
+ DWORD hash = pEntry->m_dwHashValue;
+
+ pEntry = pEntry->m_pNext;
+ while (pEntry != NULL)
+ {
+ if (pEntry->m_dwHashValue == hash &&
+ strcmp(pEntry->m_pKey, key) == 0)
+ {
+ break;
+ }
+ pEntry = pEntry->m_pNext;
+ }
+
+ return pEntry;
+}
+
+#endif // DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+
+//***************************************************************************************
+#define CALL_TYPE_HANDLE_METHOD(m) \
+ ((IsRTType()) ? (AsRTType()->m()) : (AsMDType()->m()))
+
+//***************************************************************************************
+inline MethodTableBuilder::bmtTypeHandle
+MethodTableBuilder::bmtTypeHandle::GetParentType() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return CALL_TYPE_HANDLE_METHOD(GetParentType);
+}
+
+//***************************************************************************************
+inline bool
+MethodTableBuilder::bmtTypeHandle::IsNested() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return CALL_TYPE_HANDLE_METHOD(IsNested);
+}
+
+//***************************************************************************************
+inline mdTypeDef
+MethodTableBuilder::bmtTypeHandle::GetEnclosingTypeToken() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return CALL_TYPE_HANDLE_METHOD(GetEnclosingTypeToken);
+}
+
+//***************************************************************************************
+inline Module *
+MethodTableBuilder::bmtTypeHandle::GetModule() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return CALL_TYPE_HANDLE_METHOD(GetModule);
+}
+
+//***************************************************************************************
+inline mdTypeDef
+MethodTableBuilder::bmtTypeHandle::GetTypeDefToken() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return CALL_TYPE_HANDLE_METHOD(GetTypeDefToken);
+}
+
+//***************************************************************************************
+inline const Substitution &
+MethodTableBuilder::bmtTypeHandle::GetSubstitution() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return CALL_TYPE_HANDLE_METHOD(GetSubstitution);
+}
+
+//***************************************************************************************
+inline MethodTable *
+MethodTableBuilder::bmtTypeHandle::GetMethodTable() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return CALL_TYPE_HANDLE_METHOD(GetMethodTable);
+}
+
+//***************************************************************************************
+inline DWORD
+MethodTableBuilder::bmtTypeHandle::GetAttrs() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return CALL_TYPE_HANDLE_METHOD(GetAttrs);
+}
+
+//***************************************************************************************
+inline bool
+MethodTableBuilder::bmtTypeHandle::IsInterface() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return CALL_TYPE_HANDLE_METHOD(IsInterface);
+}
+
+#undef CALL_TYPE_HANDLE_METHOD
+
+//***************************************************************************************
+#define CALL_METHOD_HANDLE_METHOD(m) \
+ ((IsRTMethod()) ? (AsRTMethod()->m()) : (AsMDMethod()->m()))
+
+//***************************************************************************************
+inline MethodTableBuilder::bmtTypeHandle
+MethodTableBuilder::bmtMethodHandle::GetOwningType() const
+{
+ LIMITED_METHOD_CONTRACT;
+ if (IsRTMethod())
+ return bmtTypeHandle(AsRTMethod()->GetOwningType());
+ else
+ return bmtTypeHandle(AsMDMethod()->GetOwningType());
+}
+
+//***************************************************************************************
+inline DWORD
+MethodTableBuilder::bmtMethodHandle::GetDeclAttrs() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return CALL_METHOD_HANDLE_METHOD(GetDeclAttrs);
+}
+
+//***************************************************************************************
+inline DWORD
+MethodTableBuilder::bmtMethodHandle::GetImplAttrs() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return CALL_METHOD_HANDLE_METHOD(GetImplAttrs);
+}
+
+//***************************************************************************************
+inline MethodTableBuilder::SLOT_INDEX
+MethodTableBuilder::bmtMethodHandle::GetSlotIndex() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return CALL_METHOD_HANDLE_METHOD(GetSlotIndex);
+}
+
+//***************************************************************************************
+inline const MethodTableBuilder::MethodSignature &
+MethodTableBuilder::bmtMethodHandle::GetMethodSignature() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return CALL_METHOD_HANDLE_METHOD(GetMethodSignature);
+}
+
+//***************************************************************************************
+inline MethodDesc *
+MethodTableBuilder::bmtMethodHandle::GetMethodDesc() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return CALL_METHOD_HANDLE_METHOD(GetMethodDesc);
+}
+
+#undef CALL_METHOD_HANDLE_METHOD
+
+//***************************************************************************************
+inline DWORD
+MethodTableBuilder::bmtRTMethod::GetDeclAttrs() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetMethodDesc()->GetAttrs();
+}
+
+//***************************************************************************************
+inline DWORD
+MethodTableBuilder::bmtRTMethod::GetImplAttrs() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetMethodDesc()->GetImplAttrs();
+}
+
+//***************************************************************************************
+inline MethodTableBuilder::SLOT_INDEX
+MethodTableBuilder::bmtRTMethod::GetSlotIndex() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetMethodDesc()->GetSlot();
+}
+
+//***************************************************************************************
+inline void
+MethodTableBuilder::bmtMDMethod::SetSlotIndex(SLOT_INDEX idx)
+{
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(m_pMD == NULL);
+ m_slotIndex = idx;
+}
+
+//***************************************************************************************
+inline void
+MethodTableBuilder::bmtMDMethod::SetUnboxedSlotIndex(SLOT_INDEX idx)
+{
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(m_pUnboxedMD == NULL);
+ m_unboxedSlotIndex = idx;
+}
+
+//***************************************************************************************
+inline DWORD
+MethodTableBuilder::GetMethodClassification(MethodTableBuilder::METHOD_TYPE type)
+{
+ LIMITED_METHOD_CONTRACT;
+ // Verify that the enums are in sync, so we can do the conversion by simple cast.
+ C_ASSERT((DWORD)METHOD_TYPE_NORMAL == (DWORD)mcIL);
+ C_ASSERT((DWORD)METHOD_TYPE_FCALL == (DWORD)mcFCall);
+ C_ASSERT((DWORD)METHOD_TYPE_NDIRECT == (DWORD)mcNDirect);
+ C_ASSERT((DWORD)METHOD_TYPE_EEIMPL == (DWORD)mcEEImpl);
+ C_ASSERT((DWORD)METHOD_TYPE_INSTANTIATED == (DWORD)mcInstantiated);
+#ifdef FEATURE_COMINTEROP
+ C_ASSERT((DWORD)METHOD_TYPE_COMINTEROP == (DWORD)mcComInterop);
+#endif
+
+ return (DWORD)type;
+}
+
+#endif // _METHODTABLEBUILDER_INL_
+
diff --git a/src/vm/microsoft.comservices.h b/src/vm/microsoft.comservices.h
new file mode 100644
index 0000000000..8c406f1b2e
--- /dev/null
+++ b/src/vm/microsoft.comservices.h
@@ -0,0 +1,278 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+#pragma warning( disable: 4049 ) /* more than 64k source lines */
+
+/* this ALWAYS GENERATED file contains the definitions for the interfaces */
+
+
+ /* File created by MIDL compiler version 5.03.0280 */
+/* at Mon Jul 17 19:19:10 2000
+ */
+/* Compiler settings for Z:\urt\inst\v1.x86chk\Microsoft.ComServices.idl:
+ Os (OptLev=s), W1, Zp8, env=Win32 (32b run), ms_ext, c_ext
+ error checks: allocation ref bounds_check enum stub_data
+ VC __declspec() decoration level:
+ __declspec(uuid()), __declspec(selectany), __declspec(novtable)
+ DECLSPEC_UUID(), MIDL_INTERFACE()
+*/
+//@@MIDL_FILE_HEADING( )
+
+
+/* verify that the <rpcndr.h> version is high enough to compile this file*/
+#ifndef __REQUIRED_RPCNDR_H_VERSION__
+#define __REQUIRED_RPCNDR_H_VERSION__ 440
+#endif
+
+#include "rpc.h"
+#include "rpcndr.h"
+
+#ifndef __Microsoft2EComServices_h__
+#define __Microsoft2EComServices_h__
+
+/* Forward Declarations */
+
+#ifndef __IRegistrationHelper_FWD_DEFINED__
+#define __IRegistrationHelper_FWD_DEFINED__
+typedef interface IRegistrationHelper IRegistrationHelper;
+#endif /* __IRegistrationHelper_FWD_DEFINED__ */
+
+
+#ifndef __RegistrationHelperTx_FWD_DEFINED__
+#define __RegistrationHelperTx_FWD_DEFINED__
+
+#ifdef __cplusplus
+typedef class RegistrationHelperTx RegistrationHelperTx;
+#else
+typedef struct RegistrationHelperTx RegistrationHelperTx;
+#endif /* __cplusplus */
+
+#endif /* __RegistrationHelperTx_FWD_DEFINED__ */
+
+
+#ifdef __cplusplus
+extern "C"{
+#endif
+
+void __RPC_FAR * __RPC_USER MIDL_user_allocate(size_t);
+void __RPC_USER MIDL_user_free( void __RPC_FAR * );
+
+
+#ifndef __Microsoft_ComServices_LIBRARY_DEFINED__
+#define __Microsoft_ComServices_LIBRARY_DEFINED__
+
+/* library Microsoft_ComServices */
+/* [version][uuid] */
+
+
+typedef /* [public][public][uuid] */ DECLSPEC_UUID("9D667CBC-FE79-3B45-AEBB-6303106B137A")
+enum __MIDL___MIDL_itf_Microsoft2EComServices_0000_0001
+ { InstallationFlags_Default = 0,
+ InstallationFlags_ExpectExistingTypeLib = 1,
+ InstallationFlags_CreateTargetApplication = 2,
+ InstallationFlags_FindOrCreateTargetApplication = 4,
+ InstallationFlags_ReconfigureExistingApplication = 8,
+ InstallationFlags_Register = 256,
+ InstallationFlags_Install = 512,
+ InstallationFlags_Configure = 1024
+ } InstallationFlags;
+
+
+EXTERN_C const IID LIBID_Microsoft_ComServices;
+
+#ifndef __IRegistrationHelper_INTERFACE_DEFINED__
+#define __IRegistrationHelper_INTERFACE_DEFINED__
+
+/* interface IRegistrationHelper */
+/* [object][custom][oleautomation][uuid] */
+
+
+EXTERN_C const IID IID_IRegistrationHelper;
+
+#if defined(__cplusplus) && !defined(CINTERFACE)
+
+ MIDL_INTERFACE("55E3EA25-55CB-4650-8887-18E8D30BB4BC")
+ IRegistrationHelper : public IUnknown
+ {
+ public:
+ virtual HRESULT __stdcall InstallAssembly(
+ /* [in] */ BSTR assembly,
+ /* [out][in] */ BSTR __RPC_FAR *application,
+ /* [out][in] */ BSTR __RPC_FAR *tlb,
+ /* [in] */ InstallationFlags installFlags) = 0;
+
+ virtual HRESULT __stdcall RegisterAssembly(
+ /* [in] */ BSTR assembly,
+ /* [out][in] */ BSTR __RPC_FAR *tlb) = 0;
+
+ virtual HRESULT __stdcall ConfigureAssembly(
+ /* [in] */ BSTR assembly,
+ /* [in] */ BSTR application) = 0;
+
+ virtual HRESULT __stdcall UninstallAssembly(
+ /* [in] */ BSTR assembly,
+ /* [in] */ BSTR application) = 0;
+
+ };
+
+#else /* C style interface */
+
+ typedef struct IRegistrationHelperVtbl
+ {
+ BEGIN_INTERFACE
+
+ HRESULT ( STDMETHODCALLTYPE __RPC_FAR *QueryInterface )(
+ IRegistrationHelper __RPC_FAR * This,
+ /* [in] */ REFIID riid,
+ /* [iid_is][out] */ void __RPC_FAR *__RPC_FAR *ppvObject);
+
+ ULONG ( STDMETHODCALLTYPE __RPC_FAR *AddRef )(
+ IRegistrationHelper __RPC_FAR * This);
+
+ ULONG ( STDMETHODCALLTYPE __RPC_FAR *Release )(
+ IRegistrationHelper __RPC_FAR * This);
+
+ HRESULT ( __stdcall __RPC_FAR *InstallAssembly )(
+ IRegistrationHelper __RPC_FAR * This,
+ /* [in] */ BSTR assembly,
+ /* [out][in] */ BSTR __RPC_FAR *application,
+ /* [out][in] */ BSTR __RPC_FAR *tlb,
+ /* [in] */ InstallationFlags installFlags);
+
+ HRESULT ( __stdcall __RPC_FAR *RegisterAssembly )(
+ IRegistrationHelper __RPC_FAR * This,
+ /* [in] */ BSTR assembly,
+ /* [out][in] */ BSTR __RPC_FAR *tlb);
+
+ HRESULT ( __stdcall __RPC_FAR *ConfigureAssembly )(
+ IRegistrationHelper __RPC_FAR * This,
+ /* [in] */ BSTR assembly,
+ /* [in] */ BSTR application);
+
+ HRESULT ( __stdcall __RPC_FAR *UninstallAssembly )(
+ IRegistrationHelper __RPC_FAR * This,
+ /* [in] */ BSTR assembly,
+ /* [in] */ BSTR application);
+
+ END_INTERFACE
+ } IRegistrationHelperVtbl;
+
+ interface IRegistrationHelper
+ {
+ CONST_VTBL struct IRegistrationHelperVtbl __RPC_FAR *lpVtbl;
+ };
+
+
+
+#ifdef COBJMACROS
+
+
+#define IRegistrationHelper_QueryInterface(This,riid,ppvObject) \
+ (This)->lpVtbl -> QueryInterface(This,riid,ppvObject)
+
+#define IRegistrationHelper_AddRef(This) \
+ (This)->lpVtbl -> AddRef(This)
+
+#define IRegistrationHelper_Release(This) \
+ (This)->lpVtbl -> Release(This)
+
+
+#define IRegistrationHelper_InstallAssembly(This,assembly,application,tlb,installFlags) \
+ (This)->lpVtbl -> InstallAssembly(This,assembly,application,tlb,installFlags)
+
+#define IRegistrationHelper_RegisterAssembly(This,assembly,tlb) \
+ (This)->lpVtbl -> RegisterAssembly(This,assembly,tlb)
+
+#define IRegistrationHelper_ConfigureAssembly(This,assembly,application) \
+ (This)->lpVtbl -> ConfigureAssembly(This,assembly,application)
+
+#define IRegistrationHelper_UninstallAssembly(This,assembly,application) \
+ (This)->lpVtbl -> UninstallAssembly(This,assembly,application)
+
+#endif /* COBJMACROS */
+
+
+#endif /* C style interface */
+
+
+
+HRESULT __stdcall IRegistrationHelper_InstallAssembly_Proxy(
+ IRegistrationHelper __RPC_FAR * This,
+ /* [in] */ BSTR assembly,
+ /* [out][in] */ BSTR __RPC_FAR *application,
+ /* [out][in] */ BSTR __RPC_FAR *tlb,
+ /* [in] */ InstallationFlags installFlags);
+
+
+void __RPC_STUB IRegistrationHelper_InstallAssembly_Stub(
+ IRpcStubBuffer *This,
+ IRpcChannelBuffer *_pRpcChannelBuffer,
+ PRPC_MESSAGE _pRpcMessage,
+ DWORD *_pdwStubPhase);
+
+
+HRESULT __stdcall IRegistrationHelper_RegisterAssembly_Proxy(
+ IRegistrationHelper __RPC_FAR * This,
+ /* [in] */ BSTR assembly,
+ /* [out][in] */ BSTR __RPC_FAR *tlb);
+
+
+void __RPC_STUB IRegistrationHelper_RegisterAssembly_Stub(
+ IRpcStubBuffer *This,
+ IRpcChannelBuffer *_pRpcChannelBuffer,
+ PRPC_MESSAGE _pRpcMessage,
+ DWORD *_pdwStubPhase);
+
+
+HRESULT __stdcall IRegistrationHelper_ConfigureAssembly_Proxy(
+ IRegistrationHelper __RPC_FAR * This,
+ /* [in] */ BSTR assembly,
+ /* [in] */ BSTR application);
+
+
+void __RPC_STUB IRegistrationHelper_ConfigureAssembly_Stub(
+ IRpcStubBuffer *This,
+ IRpcChannelBuffer *_pRpcChannelBuffer,
+ PRPC_MESSAGE _pRpcMessage,
+ DWORD *_pdwStubPhase);
+
+
+HRESULT __stdcall IRegistrationHelper_UninstallAssembly_Proxy(
+ IRegistrationHelper __RPC_FAR * This,
+ /* [in] */ BSTR assembly,
+ /* [in] */ BSTR application);
+
+
+void __RPC_STUB IRegistrationHelper_UninstallAssembly_Stub(
+ IRpcStubBuffer *This,
+ IRpcChannelBuffer *_pRpcChannelBuffer,
+ PRPC_MESSAGE _pRpcMessage,
+ DWORD *_pdwStubPhase);
+
+
+
+#endif /* __IRegistrationHelper_INTERFACE_DEFINED__ */
+
+
+EXTERN_C const CLSID CLSID_RegistrationHelperTx;
+
+#ifdef __cplusplus
+
+class DECLSPEC_UUID("89A86E7B-C229-4008-9BAA-2F5C8411D7E0")
+RegistrationHelperTx;
+#endif
+#endif /* __Microsoft_ComServices_LIBRARY_DEFINED__ */
+
+/* Additional Prototypes for ALL interfaces */
+
+/* end of Additional Prototypes */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
+
diff --git a/src/vm/microsoft.comservices_i.c b/src/vm/microsoft.comservices_i.c
new file mode 100644
index 0000000000..d7b0dcc0da
--- /dev/null
+++ b/src/vm/microsoft.comservices_i.c
@@ -0,0 +1,176 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+#pragma warning( disable: 4049 ) /* more than 64k source lines */
+
+/* this ALWAYS GENERATED file contains the IIDs and CLSIDs */
+
+/* link this file in with the server and any clients */
+
+
+ /* File created by MIDL compiler version 5.03.0280 */
+/* at Mon Jul 17 19:19:10 2000
+ */
+/* Compiler settings for Z:\urt\inst\v1.x86chk\Microsoft.ComServices.idl:
+ Os (OptLev=s), W1, Zp8, env=Win32 (32b run), ms_ext, c_ext
+ error checks: allocation ref bounds_check enum stub_data
+ VC __declspec() decoration level:
+ __declspec(uuid()), __declspec(selectany), __declspec(novtable)
+ DECLSPEC_UUID(), MIDL_INTERFACE()
+*/
+//@@MIDL_FILE_HEADING( )
+
+#if !defined(_M_IA64) && !defined(_M_AXP64)
+
+#ifdef __cplusplus
+extern "C"{
+#endif
+
+
+#include <rpc.h>
+#include <rpcndr.h>
+
+#ifdef _MIDL_USE_GUIDDEF_
+
+#ifndef INITGUID
+#define INITGUID
+#include <guiddef.h>
+#undef INITGUID
+#else
+#include <guiddef.h>
+#endif
+
+#define MIDL_DEFINE_GUID(type,name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) \
+ DEFINE_GUID(name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8)
+
+#else // !_MIDL_USE_GUIDDEF_
+
+#ifndef __IID_DEFINED__
+#define __IID_DEFINED__
+
+typedef struct _IID
+{
+ unsigned long x;
+ unsigned short s1;
+ unsigned short s2;
+ unsigned char c[8];
+} IID;
+
+#endif // __IID_DEFINED__
+
+#ifndef CLSID_DEFINED
+#define CLSID_DEFINED
+typedef IID CLSID;
+#endif // CLSID_DEFINED
+
+#define MIDL_DEFINE_GUID(type,name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) \
+ const type name = {l,w1,w2,{b1,b2,b3,b4,b5,b6,b7,b8}}
+
+#endif // !_MIDL_USE_GUIDDEF_
+
+MIDL_DEFINE_GUID(IID, LIBID_Microsoft_ComServices,0xD7F68C66,0x3833,0x3832,0xB6,0xD0,0xB7,0x96,0xBB,0x7D,0x2D,0xFF);
+
+
+MIDL_DEFINE_GUID(IID, IID_IRegistrationHelper,0x55E3EA25,0x55CB,0x4650,0x88,0x87,0x18,0xE8,0xD3,0x0B,0xB4,0xBC);
+
+
+MIDL_DEFINE_GUID(CLSID, CLSID_RegistrationHelperTx,0x89A86E7B,0xC229,0x4008,0x9B,0xAA,0x2F,0x5C,0x84,0x11,0xD7,0xE0);
+
+#undef MIDL_DEFINE_GUID
+
+#ifdef __cplusplus
+}
+#endif
+
+
+
+#endif /* !defined(_M_IA64) && !defined(_M_AXP64)*/
+
+
+#pragma warning( disable: 4049 ) /* more than 64k source lines */
+
+/* this ALWAYS GENERATED file contains the IIDs and CLSIDs */
+
+/* link this file in with the server and any clients */
+
+
+ /* File created by MIDL compiler version 5.03.0280 */
+/* at Mon Jul 17 19:19:10 2000
+ */
+/* Compiler settings for Z:\urt\inst\v1.x86chk\Microsoft.ComServices.idl:
+ Oicf (OptLev=i2), W1, Zp8, env=Win64 (32b run,appending), ms_ext, c_ext, robust
+ error checks: allocation ref bounds_check enum stub_data
+ VC __declspec() decoration level:
+ __declspec(uuid()), __declspec(selectany), __declspec(novtable)
+ DECLSPEC_UUID(), MIDL_INTERFACE()
+*/
+//@@MIDL_FILE_HEADING( )
+
+#if defined(_M_IA64) || defined(_M_AXP64)
+
+#ifdef __cplusplus
+extern "C"{
+#endif
+
+
+#include <rpc.h>
+#include <rpcndr.h>
+
+#ifdef _MIDL_USE_GUIDDEF_
+
+#ifndef INITGUID
+#define INITGUID
+#include <guiddef.h>
+#undef INITGUID
+#else
+#include <guiddef.h>
+#endif
+
+#define MIDL_DEFINE_GUID(type,name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) \
+ DEFINE_GUID(name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8)
+
+#else // !_MIDL_USE_GUIDDEF_
+
+#ifndef __IID_DEFINED__
+#define __IID_DEFINED__
+
+typedef struct _IID
+{
+ unsigned long x;
+ unsigned short s1;
+ unsigned short s2;
+ unsigned char c[8];
+} IID;
+
+#endif // __IID_DEFINED__
+
+#ifndef CLSID_DEFINED
+#define CLSID_DEFINED
+typedef IID CLSID;
+#endif // CLSID_DEFINED
+
+#define MIDL_DEFINE_GUID(type,name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) \
+ const type name = {l,w1,w2,{b1,b2,b3,b4,b5,b6,b7,b8}}
+
+#endif !_MIDL_USE_GUIDDEF_
+
+MIDL_DEFINE_GUID(IID, LIBID_Microsoft_ComServices,0xD7F68C66,0x3833,0x3832,0xB6,0xD0,0xB7,0x96,0xBB,0x7D,0x2D,0xFF);
+
+
+MIDL_DEFINE_GUID(IID, IID_IRegistrationHelper,0x55E3EA25,0x55CB,0x4650,0x88,0x87,0x18,0xE8,0xD3,0x0B,0xB4,0xBC);
+
+
+MIDL_DEFINE_GUID(CLSID, CLSID_RegistrationHelperTx,0x89A86E7B,0xC229,0x4008,0x9B,0xAA,0x2F,0x5C,0x84,0x11,0xD7,0xE0);
+
+#undef MIDL_DEFINE_GUID
+
+#ifdef __cplusplus
+}
+#endif
+
+
+
+#endif /* defined(_M_IA64) || defined(_M_AXP64)*/
+
diff --git a/src/vm/mixedmode.cpp b/src/vm/mixedmode.cpp
new file mode 100644
index 0000000000..e4bdfc2b8b
--- /dev/null
+++ b/src/vm/mixedmode.cpp
@@ -0,0 +1,237 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: MIXEDMODE.CPP
+//
+
+//
+
+// MIXEDMODE deals with mixed-mode binaries support
+// ===========================================================================
+
+
+
+#include "common.h"
+
+#include "mixedmode.hpp"
+
+#include "dllimportcallback.h"
+
+#ifdef FEATURE_MIXEDMODE
+
+
+IJWNOADThunk::IJWNOADThunk(HMODULE pModulebase, DWORD dwIndex, mdToken Token)
+{
+ WRAPPER_NO_CONTRACT;
+ m_pModulebase=pModulebase;
+ m_dwIndex=dwIndex;
+ m_Token=Token;
+ m_fAccessingCache = 0;
+
+ for (int i=0; i < IJWNOADThunkStubCacheSize; i++)
+ {
+ m_cache[i].m_AppDomainID = (ADID)-1;
+ m_cache[i].m_CodeAddr = 0;
+ }
+
+#ifdef _TARGET_X86_
+ m_code.Encode((BYTE*)GetEEFuncEntryPoint(IJWNOADThunkJumpTarget), this);
+#else // !_TARGET_X86_
+ m_code.Encode((BYTE*)GetEEFuncEntryPoint(MakeCall), this);
+#endif // !_TARGET_X86_
+};
+
+#define E_PROCESS_SHUTDOWN_REENTRY HRESULT_FROM_WIN32(ERROR_PROCESS_ABORTED)
+
+
+#ifdef _TARGET_X86_
+// Slow path lookup...called from stub
+extern "C" LPCVOID __stdcall IJWNOADThunkJumpTargetHelper(IJWNOADThunk* pThunk)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return pThunk->FindThunkTarget();
+}
+#endif // _TARGET_X86_
+
+LPCVOID IJWNOADThunk::FindThunkTarget()
+{
+ CONTRACT(LPCVOID)
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ // We don't plan on fixing this in Whidbey...the IJW scenario has always assumed throwing is "ok" here.
+ CONTRACT_VIOLATION(ThrowsViolation);
+
+ LPCVOID pvTargetCode = NULL;
+
+ AppDomain* pDomain;
+
+ Thread* pThread = SetupThread();
+
+ // Ensure that we're in preemptive mode.
+ // We only need this check for a newly created
+ // CLR thread - it defaults to COOP mode from here.
+ GCX_PREEMP_NO_DTOR();
+
+ pDomain = GetAppDomain();
+
+ if (NULL == pDomain)
+ {
+ _ASSERTE(!"Appdomain should've been set up by SetupThread");
+ pDomain = SystemDomain::System()->DefaultDomain();
+ }
+
+
+ if (NULL != pDomain)
+ {
+ // Get a local copy so we don't have to deal with a race condition.
+ LPCVOID pCacheTarget = NULL;
+ GetCachedInfo(pDomain->GetId(), &pvTargetCode);
+
+ // Cache miss.
+ if (pvTargetCode==NULL)
+ {
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+ BEGIN_SO_INTOLERANT_CODE(pThread);
+ {
+ Module* pModule;
+
+ pModule = pDomain->GetIJWModule(m_pModulebase);
+ if (NULL == pModule)
+ {
+ // New for Whidbey: In V1.1, we just gave up and raised an exception if the target assembly wasn't already loaded
+ // into the current appdomain. We now force-inject the assembly.
+
+ PEAssemblyHolder pFile(pDomain->BindExplicitAssembly(m_pModulebase, FALSE));
+ pDomain->LoadAssembly(NULL, pFile, FILE_ACTIVE);
+
+ // Now, try the lookup again. The LoadAssembly() either worked or it didn't. If it didn't, it is probably
+ // due to lack of memory and all we can do is raise an exception and hope the IJW caller does something reasonable.
+ // Otherwise, we should now succeed in finding the current domain's instantiation of the target module.
+ pModule = pDomain->GetIJWModule(m_pModulebase);
+ }
+
+ if (NULL != pModule)
+ {
+ pModule->EnsureActive();
+
+ UMEntryThunk* pThunkTable;
+
+ pThunkTable = pModule->GetADThunkTable();
+ pvTargetCode = (LPVOID)GetEEFuncEntryPoint((LPVOID)pThunkTable[m_dwIndex].GetCode());
+
+ // Populate the cache with our latest info.
+ SetCachedInfo(pDomain->GetId(), pvTargetCode);
+ }
+ }
+ END_SO_INTOLERANT_CODE;
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+ }
+ }
+
+ if(pvTargetCode==NULL)
+ pvTargetCode=(LPVOID)GetEEFuncEntryPoint(SafeNoModule);
+
+ RETURN (LPCVOID)pvTargetCode;
+}
+
+#ifdef _TARGET_X86_
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning (disable : 4740) // There is inline asm code in this function, which disables
+ // global optimizations.
+#endif // _MSC_VER
+
+__declspec(naked) void _cdecl IJWNOADThunk::MakeCall()
+{
+ WRAPPER_NO_CONTRACT;
+ struct
+ {
+ LPVOID This;
+ LPCVOID RetAddr;
+ } Vars;
+ #define LocalsSize 8
+
+ _asm enter LocalsSize+4,0;
+ _asm push ebx;
+ _asm push ecx;
+ _asm push edx;
+ _asm push esi;
+ _asm push edi;
+
+ _asm mov Vars.This, eax;
+
+ //careful above this point
+ _ASSERTE(sizeof(Vars)<=LocalsSize);
+
+ Vars.RetAddr = ((IJWNOADThunk*)Vars.This)->FindThunkTarget();
+
+ _ASSERTE(NULL != Vars.RetAddr);
+
+ _asm pop edi;
+ _asm pop esi;
+ _asm pop edx;
+ _asm pop ecx;
+ _asm pop ebx;
+ _asm mov eax,Vars.RetAddr;
+ _asm leave;
+ _asm jmp eax;
+};
+
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif
+
+#elif defined(_TARGET_AMD64_)
+// Implemented in AMD64\UMThunkStub.asm
+#elif defined(_TARGET_ARM_)
+// Implemented in Arm\asmhelpers.asm
+#else
+void __cdecl IJWNOADThunk::MakeCall()
+{
+ LIMITED_METHOD_CONTRACT;
+ PORTABILITY_ASSERT("IJWNOADThunk::MakeCall");
+}
+#endif
+
+void IJWNOADThunk::SafeNoModule()
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ if (!CanRunManagedCode())
+ {
+ Thread* pThread=GetThread();
+
+ // DO NOT IMPROVE THIS EXCEPTION! It cannot be a managed exception. It
+ // cannot be a real exception object because we cannot execute any managed
+ // code here.
+ if(pThread)
+ pThread->m_fPreemptiveGCDisabled = 0;
+ COMPlusThrowBoot(E_PROCESS_SHUTDOWN_REENTRY);
+ }
+ NoModule();
+}
+
+void IJWNOADThunk::NoModule()
+{
+ WRAPPER_NO_CONTRACT;
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+ //<TODO>This should give the file name as part of the exception message!</TODO>
+ COMPlusThrowHR(COR_E_DLLNOTFOUND);
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+}
+
+#endif // FEATURE_MIXEDMODE
+
diff --git a/src/vm/mixedmode.hpp b/src/vm/mixedmode.hpp
new file mode 100644
index 0000000000..839447ecf1
--- /dev/null
+++ b/src/vm/mixedmode.hpp
@@ -0,0 +1,140 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: mixedmode.H
+//
+
+//
+// MIXEDMODE.H defines classes to support mixed mode dlls
+// ===========================================================================
+
+
+#ifndef _MIXEDMODE_H_
+#define _MIXEDMODE_H_
+
+#ifdef FEATURE_MIXEDMODE
+
+#ifdef _TARGET_X86_
+extern "C" VOID __stdcall IJWNOADThunkJumpTarget(void);
+#endif
+
+
+#define IJWNOADThunkStubCacheSize 4
+
+struct IJWNOADThunkStubCache
+{
+ ADID m_AppDomainID; // Must be the first member of the struct.
+ LPCVOID m_CodeAddr;
+};
+
+
+
+// Be sure to keep this structure and the assembly view in sync
+class IJWNOADThunk
+{
+ UMEntryThunkCode m_code;
+
+protected:
+ static void __cdecl MakeCall();
+ static void SafeNoModule();
+ static void NoModule();
+
+ HMODULE m_pModulebase;
+ DWORD m_dwIndex;
+ mdToken m_Token;
+
+ DWORD m_fAccessingCache;
+
+public:
+ IJWNOADThunkStubCache m_cache[IJWNOADThunkStubCacheSize];
+
+ BOOL IsCachedAppDomainID(ADID pID)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ for (int i=0; i < IJWNOADThunkStubCacheSize; i++)
+ {
+ if (m_cache[i].m_AppDomainID == pID)
+ return TRUE;
+ }
+
+ return FALSE;
+ };
+
+ void GetCachedInfo(ADID pID, LPCVOID* pCode)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INSTANCE_CHECK;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pCode));
+ }
+ CONTRACTL_END;
+
+ *pCode = NULL;
+
+ for (int i=0; i < IJWNOADThunkStubCacheSize; i++)
+ {
+ if (m_cache[i].m_AppDomainID == pID)
+ *pCode = m_cache[i].m_CodeAddr;
+ }
+ }
+
+ void SetCachedInfo(ADID pID, LPCVOID pCode)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INSTANCE_CHECK;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ YIELD_WHILE (FastInterlockCompareExchange((LONG*)&m_fAccessingCache, 1, 0) != 0);
+
+ // Don't cache if the cache is already full.
+ for (int i=0; i < IJWNOADThunkStubCacheSize; i++)
+ {
+ if (m_cache[i].m_AppDomainID == (ADID)-1)
+ {
+ m_cache[i].m_CodeAddr = pCode;
+ MemoryBarrier();
+ m_cache[i].m_AppDomainID = pID;
+ }
+ }
+
+ m_fAccessingCache = 0;
+ }
+
+ static IJWNOADThunk* FromCode(PCODE pCodeAddr)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (IJWNOADThunk*)(PCODEToPINSTR(pCodeAddr) - offsetof(IJWNOADThunk, m_code) - UMEntryThunkCode::GetEntryPointOffset());
+ };
+ mdToken GetToken()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_Token;
+ }
+
+ IJWNOADThunk(HMODULE pModulebase, DWORD dwIndex, mdToken Token);
+
+ LPCBYTE GetCode()
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_code.GetEntryPoint();
+ }
+
+ LPCVOID IJWNOADThunk::FindThunkTarget();
+};
+
+#endif //FEATURE_MIXEDMODE
+
+#endif // _MIXEDMODE_H_
diff --git a/src/vm/mlinfo.cpp b/src/vm/mlinfo.cpp
new file mode 100644
index 0000000000..107841218e
--- /dev/null
+++ b/src/vm/mlinfo.cpp
@@ -0,0 +1,5384 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: mlinfo.cpp
+//
+
+//
+
+
+#include "common.h"
+#include "mlinfo.h"
+#include "dllimport.h"
+#include "sigformat.h"
+#include "eeconfig.h"
+#include "eehash.h"
+#include "../dlls/mscorrc/resource.h"
+#include "mdaassistants.h"
+#include "typeparse.h"
+#include "comdelegate.h"
+#include "olevariant.h"
+#include "ilmarshalers.h"
+#include "interoputil.h"
+
+#ifdef FEATURE_PREJIT
+ #include "dataimage.h"
+#endif
+
+#ifdef FEATURE_COMINTEROP
+#include "comcallablewrapper.h"
+#include "runtimecallablewrapper.h"
+#include "dispparammarshaler.h"
+#include "winrttypenameconverter.h"
+#endif // FEATURE_COMINTEROP
+
+
+#ifndef lengthof
+ #define lengthof(rg) (sizeof(rg)/sizeof(rg[0]))
+#endif
+
+
+#ifdef FEATURE_COMINTEROP
+
+#ifdef FEATURE_CORECLR
+#define FX_PLATFORM_KEY g_CoreClrKeyToken
+#else
+#define FX_PLATFORM_KEY g_ECMAKeyToken
+#endif
+
+ DEFINE_ASM_QUAL_TYPE_NAME(ENUMERATOR_TO_ENUM_VARIANT_CM_NAME, g_EnumeratorToEnumClassName, "CustomMarshalers", VER_ASSEMBLYVERSION_STR, g_FXKeyToken);
+
+ static const int ENUMERATOR_TO_ENUM_VARIANT_CM_NAME_LEN = lengthof(ENUMERATOR_TO_ENUM_VARIANT_CM_NAME);
+ static const char ENUMERATOR_TO_ENUM_VARIANT_CM_COOKIE[] = {""};
+ static const int ENUMERATOR_TO_ENUM_VARIANT_CM_COOKIE_LEN = lengthof(ENUMERATOR_TO_ENUM_VARIANT_CM_COOKIE);
+
+ DEFINE_ASM_QUAL_TYPE_NAME(COLOR_TRANSLATOR_ASM_QUAL_TYPE_NAME, g_ColorTranslatorClassName, g_DrawingAsmName, VER_ASSEMBLYVERSION_STR, g_FXKeyToken);
+ DEFINE_ASM_QUAL_TYPE_NAME(COLOR_ASM_QUAL_TYPE_NAME, g_ColorClassName, g_DrawingAsmName, VER_ASSEMBLYVERSION_STR, g_FXKeyToken);
+
+ DEFINE_ASM_QUAL_TYPE_NAME(URI_ASM_QUAL_TYPE_NAME, g_SystemUriClassName, g_SystemAsmName, VER_ASSEMBLYVERSION_STR, FX_PLATFORM_KEY);
+
+ DEFINE_ASM_QUAL_TYPE_NAME(NCCEVENTARGS_ASM_QUAL_TYPE_NAME, g_NotifyCollectionChangedEventArgsName, g_SystemAsmName, VER_ASSEMBLYVERSION_STR, FX_PLATFORM_KEY);
+ DEFINE_ASM_QUAL_TYPE_NAME(NCCEVENTARGS_MARSHALER_ASM_QUAL_TYPE_NAME, g_NotifyCollectionChangedEventArgsMarshalerName, g_SystemAsmName, VER_ASSEMBLYVERSION_STR, FX_PLATFORM_KEY);
+
+ DEFINE_ASM_QUAL_TYPE_NAME(PCEVENTARGS_ASM_QUAL_TYPE_NAME, g_PropertyChangedEventArgsName, g_SystemAsmName, VER_ASSEMBLYVERSION_STR, FX_PLATFORM_KEY);
+ DEFINE_ASM_QUAL_TYPE_NAME(PCEVENTARGS_MARSHALER_ASM_QUAL_TYPE_NAME, g_PropertyChangedEventArgsMarshalerName, g_SystemAsmName, VER_ASSEMBLYVERSION_STR, FX_PLATFORM_KEY);
+
+ #define OLECOLOR_TO_SYSTEMCOLOR_METH_NAME "FromOle"
+ #define SYSTEMCOLOR_TO_OLECOLOR_METH_NAME "ToOle"
+
+ #define EVENTARGS_TO_WINRT_EVENTARGS_METH_NAME "ConvertToNative"
+ #define WINRT_EVENTARGS_TO_EVENTARGS_METH_NAME "ConvertToManaged"
+
+ #define ORIGINALSTRING_PROPERTY_NAME "OriginalString"
+#endif // FEATURE_COMINTEROP
+
+
+
+#define INITIAL_NUM_CMHELPER_HASHTABLE_BUCKETS 32
+#define INITIAL_NUM_CMINFO_HASHTABLE_BUCKETS 32
+#define DEBUG_CONTEXT_STR_LEN 2000
+
+
+//-------------------------------------------------------------------------------------
+// Return the copy ctor for a VC class (if any exists)
+//-------------------------------------------------------------------------------------
+void FindCopyCtor(Module *pModule, MethodTable *pMT, MethodDesc **pMDOut)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS; // CompareTypeTokens may trigger GC
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ *pMDOut = NULL;
+
+ HRESULT hr;
+ mdMethodDef tk;
+ mdTypeDef cl = pMT->GetCl();
+ TypeHandle th = TypeHandle(pMT);
+ SigTypeContext typeContext(th);
+
+ IMDInternalImport *pInternalImport = pModule->GetMDImport();
+ MDEnumHolder hEnumMethod(pInternalImport);
+
+ //
+ // First try for the new syntax: <MarshalCopy>
+ //
+ IfFailThrow(pInternalImport->EnumInit(mdtMethodDef, cl, &hEnumMethod));
+
+ while (pInternalImport->EnumNext(&hEnumMethod, &tk))
+ {
+ _ASSERTE(TypeFromToken(tk) == mdtMethodDef);
+ DWORD dwMemberAttrs;
+ IfFailThrow(pInternalImport->GetMethodDefProps(tk, &dwMemberAttrs));
+
+ if (IsMdSpecialName(dwMemberAttrs))
+ {
+ ULONG cSig;
+ PCCOR_SIGNATURE pSig;
+ LPCSTR pName;
+ IfFailThrow(pInternalImport->GetNameAndSigOfMethodDef(tk, &pSig, &cSig, &pName));
+
+ const char *pBaseName = "<MarshalCopy>";
+ int ncBaseName = (int)strlen(pBaseName);
+ int nc = (int)strlen(pName);
+ if (nc >= ncBaseName && 0 == strcmp(pName + nc - ncBaseName, pBaseName))
+ {
+ MetaSig msig(pSig, cSig, pModule, &typeContext);
+
+ // Looking for the prototype void <MarshalCopy>(Ptr VC, Ptr VC);
+ if (msig.NumFixedArgs() == 2)
+ {
+ if (msig.GetReturnType() == ELEMENT_TYPE_VOID)
+ {
+ if (msig.NextArg() == ELEMENT_TYPE_PTR)
+ {
+ SigPointer sp1 = msig.GetArgProps();
+ IfFailThrow(sp1.GetElemType(NULL));
+ CorElementType eType;
+ IfFailThrow(sp1.GetElemType(&eType));
+ if (eType == ELEMENT_TYPE_VALUETYPE)
+ {
+ mdToken tk1;
+ IfFailThrow(sp1.GetToken(&tk1));
+ hr = CompareTypeTokensNT(tk1, cl, pModule, pModule);
+ if (FAILED(hr))
+ {
+ pInternalImport->EnumClose(&hEnumMethod);
+ ThrowHR(hr);
+ }
+
+ if (hr == S_OK)
+ {
+ if (msig.NextArg() == ELEMENT_TYPE_PTR)
+ {
+ SigPointer sp2 = msig.GetArgProps();
+ IfFailThrow(sp2.GetElemType(NULL));
+ IfFailThrow(sp2.GetElemType(&eType));
+ if (eType == ELEMENT_TYPE_VALUETYPE)
+ {
+ mdToken tk2;
+ IfFailThrow(sp2.GetToken(&tk2));
+
+ hr = (tk2 == tk1) ? S_OK : CompareTypeTokensNT(tk2, cl, pModule, pModule);
+ if (hr == S_OK)
+ {
+ *pMDOut = pModule->LookupMethodDef(tk);
+ return;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ //
+ // Next try the old syntax: global .__ctor
+ //
+ IfFailThrow(pInternalImport->EnumGlobalFunctionsInit(&hEnumMethod));
+
+ while (pInternalImport->EnumNext(&hEnumMethod, &tk))
+ {
+ _ASSERTE(TypeFromToken(tk) == mdtMethodDef);
+ DWORD dwMemberAttrs;
+ IfFailThrow(pInternalImport->GetMethodDefProps(tk, &dwMemberAttrs));
+
+ if (IsMdSpecialName(dwMemberAttrs))
+ {
+ ULONG cSig;
+ PCCOR_SIGNATURE pSig;
+ LPCSTR pName;
+ IfFailThrow(pInternalImport->GetNameAndSigOfMethodDef(tk, &pSig, &cSig, &pName));
+
+ const char *pBaseName = ".__ctor";
+ int ncBaseName = (int)strlen(pBaseName);
+ int nc = (int)strlen(pName);
+ if (nc >= ncBaseName && 0 == strcmp(pName + nc - ncBaseName, pBaseName))
+ {
+
+ MetaSig msig(pSig, cSig, pModule, &typeContext);
+
+ // Looking for the prototype Ptr VC __ctor(Ptr VC, ByRef VC);
+ if (msig.NumFixedArgs() == 2)
+ {
+ if (msig.GetReturnType() == ELEMENT_TYPE_PTR)
+ {
+ SigPointer spret = msig.GetReturnProps();
+ IfFailThrow(spret.GetElemType(NULL));
+ CorElementType eType;
+ IfFailThrow(spret.GetElemType(&eType));
+ if (eType == ELEMENT_TYPE_VALUETYPE)
+ {
+ mdToken tk0;
+ IfFailThrow(spret.GetToken(&tk0));
+ hr = CompareTypeTokensNT(tk0, cl, pModule, pModule);
+ if (FAILED(hr))
+ {
+ pInternalImport->EnumClose(&hEnumMethod);
+ ThrowHR(hr);
+ }
+
+ if (hr == S_OK)
+ {
+ if (msig.NextArg() == ELEMENT_TYPE_PTR)
+ {
+ SigPointer sp1 = msig.GetArgProps();
+ IfFailThrow(sp1.GetElemType(NULL));
+ IfFailThrow(sp1.GetElemType(&eType));
+ if (eType == ELEMENT_TYPE_VALUETYPE)
+ {
+ mdToken tk1;
+ IfFailThrow(sp1.GetToken(&tk1));
+ hr = (tk1 == tk0) ? S_OK : CompareTypeTokensNT(tk1, cl, pModule, pModule);
+ if (FAILED(hr))
+ {
+ pInternalImport->EnumClose(&hEnumMethod);
+ ThrowHR(hr);
+ }
+
+ if (hr == S_OK)
+ {
+ if (msig.NextArg() == ELEMENT_TYPE_PTR &&
+ msig.GetArgProps().HasCustomModifier(pModule, "Microsoft.VisualC.IsCXXReferenceModifier", ELEMENT_TYPE_CMOD_OPT))
+ {
+ SigPointer sp2 = msig.GetArgProps();
+ IfFailThrow(sp2.GetElemType(NULL));
+ IfFailThrow(sp2.GetElemType(&eType));
+ if (eType == ELEMENT_TYPE_VALUETYPE)
+ {
+ mdToken tk2;
+ IfFailThrow(sp2.GetToken(&tk2));
+
+ hr = (tk2 == tk0) ? S_OK : CompareTypeTokensNT(tk2, cl, pModule, pModule);
+ if (hr == S_OK)
+ {
+ *pMDOut = pModule->LookupMethodDef(tk);
+ return;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+
+//-------------------------------------------------------------------------------------
+// Return the destructor for a VC class (if any exists)
+//-------------------------------------------------------------------------------------
+void FindDtor(Module *pModule, MethodTable *pMT, MethodDesc **pMDOut)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS; // CompareTypeTokens may trigger GC
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ *pMDOut = NULL;
+
+ HRESULT hr;
+ mdMethodDef tk;
+ mdTypeDef cl = pMT->GetCl();
+ TypeHandle th = TypeHandle(pMT);
+ SigTypeContext typeContext(th);
+
+ IMDInternalImport *pInternalImport = pModule->GetMDImport();
+ MDEnumHolder hEnumMethod(pInternalImport);
+
+ //
+ // First try for the new syntax: <MarshalDestroy>
+ //
+ IfFailThrow(pInternalImport->EnumInit(mdtMethodDef, cl, &hEnumMethod));
+
+ while (pInternalImport->EnumNext(&hEnumMethod, &tk))
+ {
+ _ASSERTE(TypeFromToken(tk) == mdtMethodDef);
+ DWORD dwMemberAttrs;
+ IfFailThrow(pInternalImport->GetMethodDefProps(tk, &dwMemberAttrs));
+
+ if (IsMdSpecialName(dwMemberAttrs))
+ {
+ ULONG cSig;
+ PCCOR_SIGNATURE pSig;
+ LPCSTR pName;
+ IfFailThrow(pInternalImport->GetNameAndSigOfMethodDef(tk, &pSig, &cSig, &pName));
+
+ const char *pBaseName = "<MarshalDestroy>";
+ int ncBaseName = (int)strlen(pBaseName);
+ int nc = (int)strlen(pName);
+ if (nc >= ncBaseName && 0 == strcmp(pName + nc - ncBaseName, pBaseName))
+ {
+ MetaSig msig(pSig, cSig, pModule, &typeContext);
+
+ // Looking for the prototype void <MarshalDestroy>(Ptr VC);
+ if (msig.NumFixedArgs() == 1)
+ {
+ if (msig.GetReturnType() == ELEMENT_TYPE_VOID)
+ {
+ if (msig.NextArg() == ELEMENT_TYPE_PTR)
+ {
+ SigPointer sp1 = msig.GetArgProps();
+ IfFailThrow(sp1.GetElemType(NULL));
+ CorElementType eType;
+ IfFailThrow(sp1.GetElemType(&eType));
+ if (eType == ELEMENT_TYPE_VALUETYPE)
+ {
+ mdToken tk1;
+ IfFailThrow(sp1.GetToken(&tk1));
+
+ hr = CompareTypeTokensNT(tk1, cl, pModule, pModule);
+ IfFailThrow(hr);
+
+ if (hr == S_OK)
+ {
+ *pMDOut = pModule->LookupMethodDef(tk);
+ return;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+
+ //
+ // Next try the old syntax: global .__dtor
+ //
+ IfFailThrow(pInternalImport->EnumGlobalFunctionsInit(&hEnumMethod));
+
+ while (pInternalImport->EnumNext(&hEnumMethod, &tk))
+ {
+ _ASSERTE(TypeFromToken(tk) == mdtMethodDef);
+ ULONG cSig;
+ PCCOR_SIGNATURE pSig;
+ LPCSTR pName;
+ IfFailThrow(pInternalImport->GetNameAndSigOfMethodDef(tk, &pSig, &cSig, &pName));
+
+ const char *pBaseName = ".__dtor";
+ int ncBaseName = (int)strlen(pBaseName);
+ int nc = (int)strlen(pName);
+ if (nc >= ncBaseName && 0 == strcmp(pName + nc - ncBaseName, pBaseName))
+ {
+ MetaSig msig(pSig, cSig, pModule, &typeContext);
+
+ // Looking for the prototype void __dtor(Ptr VC);
+ if (msig.NumFixedArgs() == 1)
+ {
+ if (msig.GetReturnType() == ELEMENT_TYPE_VOID)
+ {
+ if (msig.NextArg() == ELEMENT_TYPE_PTR)
+ {
+ SigPointer sp1 = msig.GetArgProps();
+ IfFailThrow(sp1.GetElemType(NULL));
+ CorElementType eType;
+ IfFailThrow(sp1.GetElemType(&eType));
+ if (eType == ELEMENT_TYPE_VALUETYPE)
+ {
+ mdToken tk1;
+ IfFailThrow(sp1.GetToken(&tk1));
+ hr = CompareTypeTokensNT(tk1, cl, pModule, pModule);
+ if (FAILED(hr))
+ {
+ pInternalImport->EnumClose(&hEnumMethod);
+ ThrowHR(hr);
+ }
+
+ if (hr == S_OK)
+ {
+ *pMDOut = pModule->LookupMethodDef(tk);
+ return;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+//==========================================================================
+// Set's up the custom marshaler information.
+//==========================================================================
+CustomMarshalerHelper *SetupCustomMarshalerHelper(LPCUTF8 strMarshalerTypeName, DWORD cMarshalerTypeNameBytes, LPCUTF8 strCookie, DWORD cCookieStrBytes, Assembly *pAssembly, TypeHandle hndManagedType)
+{
+#ifndef CROSSGEN_COMPILE
+ CONTRACT (CustomMarshalerHelper*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pAssembly));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ EEMarshalingData *pMarshalingData = NULL;
+
+ // Retrieve the marshalling data for the current app domain.
+ if (pAssembly->IsDomainNeutral())
+ {
+ // If the assembly is shared, then it should only reference other domain neutral assemblies.
+ // This assumption MUST be true for the current custom marshaling scheme to work.
+ // This implies that the type of the managed parameter must be a shared type.
+ _ASSERTE(hndManagedType.GetAssembly()->IsDomainNeutral());
+
+ // The assembly is shared so we need to use the system domain's marshaling data.
+ pMarshalingData = SystemDomain::System()->GetMarshalingData();
+ }
+ else
+ {
+ // The assembly is not shared so we use the current app domain's marshaling data.
+ pMarshalingData = GetThread()->GetDomain()->GetMarshalingData();
+ }
+
+ // Retrieve the custom marshaler helper from the EE marshaling data.
+ RETURN pMarshalingData->GetCustomMarshalerHelper(pAssembly, hndManagedType, strMarshalerTypeName, cMarshalerTypeNameBytes, strCookie, cCookieStrBytes);
+#else
+ _ASSERTE(false);
+ RETURN NULL;
+#endif
+}
+
+//==========================================================================
+// Return: S_OK if there is valid data to compress
+// S_FALSE if at end of data block
+// E_FAIL if corrupt data found
+//==========================================================================
+HRESULT CheckForCompressedData(PCCOR_SIGNATURE pvNativeTypeStart, PCCOR_SIGNATURE pvNativeType, ULONG cbNativeType)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (pvNativeTypeStart + cbNativeType == pvNativeType)
+ { // end of data block
+ return S_FALSE;
+ }
+
+ ULONG ulDummy;
+ BYTE const *pbDummy;
+ return CPackedLen::SafeGetLength((BYTE const *)pvNativeType,
+ (BYTE const *)pvNativeTypeStart + cbNativeType,
+ &ulDummy,
+ &pbDummy);
+}
+
+//==========================================================================
+// Parse and validate the NATIVE_TYPE_ metadata.
+// Note! NATIVE_TYPE_ metadata is optional. If it's not present, this
+// routine sets NativeTypeParamInfo->m_NativeType to NATIVE_TYPE_DEFAULT.
+//==========================================================================
+BOOL ParseNativeTypeInfo(NativeTypeParamInfo* pParamInfo, PCCOR_SIGNATURE pvNativeType, ULONG cbNativeType);
+
+BOOL ParseNativeTypeInfo(mdToken token,
+ IMDInternalImport* pScope,
+ NativeTypeParamInfo* pParamInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PCCOR_SIGNATURE pvNativeType;
+ ULONG cbNativeType;
+
+ if (token == mdParamDefNil || pScope->GetFieldMarshal(token, &pvNativeType, &cbNativeType) != S_OK)
+ return TRUE;
+
+ return ParseNativeTypeInfo(pParamInfo, pvNativeType, cbNativeType);
+}
+
+BOOL ParseNativeTypeInfo(NativeTypeParamInfo* pParamInfo,
+ PCCOR_SIGNATURE pvNativeType,
+ ULONG cbNativeType)
+{
+ LIMITED_METHOD_CONTRACT;
+ HRESULT hr;
+
+ PCCOR_SIGNATURE pvNativeTypeStart = pvNativeType;
+ PCCOR_SIGNATURE pvNativeTypeEnd = pvNativeType + cbNativeType;
+
+ if (cbNativeType == 0)
+ return FALSE; // Zero-length NATIVE_TYPE block
+
+ pParamInfo->m_NativeType = (CorNativeType)*(pvNativeType++);
+ ULONG strLen = 0;
+
+ // Retrieve any extra information associated with the native type.
+ switch (pParamInfo->m_NativeType)
+ {
+#ifdef FEATURE_COMINTEROP
+ case NATIVE_TYPE_INTF:
+ case NATIVE_TYPE_IUNKNOWN:
+ case NATIVE_TYPE_IDISPATCH:
+ if (S_OK != CheckForCompressedData(pvNativeTypeStart, pvNativeType, cbNativeType))
+ return TRUE;
+
+ pParamInfo->m_IidParamIndex = (int)CorSigUncompressData(pvNativeType);
+ break;
+#endif
+
+ case NATIVE_TYPE_FIXEDARRAY:
+
+ if (S_OK != CheckForCompressedData(pvNativeTypeStart, pvNativeType, cbNativeType))
+ return FALSE;
+
+ pParamInfo->m_Additive = CorSigUncompressData(pvNativeType);
+
+ if (S_OK != CheckForCompressedData(pvNativeTypeStart, pvNativeType, cbNativeType))
+ return TRUE;
+
+ pParamInfo->m_ArrayElementType = (CorNativeType)CorSigUncompressData(pvNativeType);
+ break;
+
+ case NATIVE_TYPE_FIXEDSYSSTRING:
+ if (S_OK != CheckForCompressedData(pvNativeTypeStart, pvNativeType, cbNativeType))
+ return FALSE;
+
+ pParamInfo->m_Additive = CorSigUncompressData(pvNativeType);
+ break;
+
+#ifdef FEATURE_COMINTEROP
+ case NATIVE_TYPE_SAFEARRAY:
+ // Check for the safe array element type.
+ hr = CheckForCompressedData(pvNativeTypeStart, pvNativeType, cbNativeType);
+ if (FAILED(hr))
+ return FALSE;
+
+ if (hr == S_OK)
+ pParamInfo->m_SafeArrayElementVT = (VARTYPE) (CorSigUncompressData(/*modifies*/pvNativeType));
+
+ // Extract the name of the record type's.
+ if (S_OK == CheckForCompressedData(pvNativeTypeStart, pvNativeType, cbNativeType))
+ {
+ hr = CPackedLen::SafeGetData((BYTE const *)pvNativeType,
+ (BYTE const *)pvNativeTypeEnd,
+ &strLen,
+ (BYTE const **)&pvNativeType);
+ if (FAILED(hr))
+ {
+ return FALSE;
+ }
+
+ pParamInfo->m_strSafeArrayUserDefTypeName = (LPUTF8)pvNativeType;
+ pParamInfo->m_cSafeArrayUserDefTypeNameBytes = strLen;
+ _ASSERTE((ULONG)(pvNativeType + strLen - pvNativeTypeStart) == cbNativeType);
+ }
+ break;
+
+#endif // FEATURE_COMINTEROP
+
+ case NATIVE_TYPE_ARRAY:
+ hr = CheckForCompressedData(pvNativeTypeStart, pvNativeType, cbNativeType);
+ if (FAILED(hr))
+ return FALSE;
+
+ if (hr == S_OK)
+ pParamInfo->m_ArrayElementType = (CorNativeType) (CorSigUncompressData(/*modifies*/pvNativeType));
+
+ // Check for "sizeis" param index
+ hr = CheckForCompressedData(pvNativeTypeStart, pvNativeType, cbNativeType);
+ if (FAILED(hr))
+ return FALSE;
+
+ if (hr == S_OK)
+ {
+ pParamInfo->m_SizeIsSpecified = TRUE;
+ pParamInfo->m_CountParamIdx = (UINT16)(CorSigUncompressData(/*modifies*/pvNativeType));
+
+ // If an "sizeis" param index is present, the defaults for multiplier and additive change
+ pParamInfo->m_Multiplier = 1;
+ pParamInfo->m_Additive = 0;
+
+ // Check for "sizeis" additive
+ hr = CheckForCompressedData(pvNativeTypeStart, pvNativeType, cbNativeType);
+ if (FAILED(hr))
+ return FALSE;
+
+ if (hr == S_OK)
+ {
+ // Extract the additive.
+ pParamInfo->m_Additive = (DWORD)CorSigUncompressData(/*modifies*/pvNativeType);
+
+ // Check to see if the flags field is present.
+ hr = CheckForCompressedData(pvNativeTypeStart, pvNativeType, cbNativeType);
+ if (FAILED(hr))
+ return FALSE;
+
+ if (hr == S_OK)
+ {
+ // If the param index specified flag isn't set then we need to reset the
+ // multiplier to 0 to indicate no size param index was specified.
+ NativeTypeArrayFlags flags = (NativeTypeArrayFlags)CorSigUncompressData(/*modifies*/pvNativeType);;
+ if (!(flags & ntaSizeParamIndexSpecified))
+ pParamInfo->m_Multiplier = 0;
+ }
+ }
+ }
+
+ break;
+
+ case NATIVE_TYPE_CUSTOMMARSHALER:
+ // Skip the typelib guid.
+ if (S_OK != CheckForCompressedData(pvNativeTypeStart, pvNativeType, cbNativeType))
+ return FALSE;
+
+ if (FAILED(CPackedLen::SafeGetData(pvNativeType, pvNativeTypeEnd, &strLen, (void const **)&pvNativeType)))
+ return FALSE;
+
+ pvNativeType += strLen;
+ _ASSERTE((ULONG)(pvNativeType - pvNativeTypeStart) < cbNativeType);
+
+ // Skip the name of the native type.
+ if (S_OK != CheckForCompressedData(pvNativeTypeStart, pvNativeType, cbNativeType))
+ return FALSE;
+
+ if (FAILED(CPackedLen::SafeGetData(pvNativeType, pvNativeTypeEnd, &strLen, (void const **)&pvNativeType)))
+ return FALSE;
+
+ pvNativeType += strLen;
+ _ASSERTE((ULONG)(pvNativeType - pvNativeTypeStart) < cbNativeType);
+
+ // Extract the name of the custom marshaler.
+ if (S_OK != CheckForCompressedData(pvNativeTypeStart, pvNativeType, cbNativeType))
+ return FALSE;
+
+ if (FAILED(CPackedLen::SafeGetData(pvNativeType, pvNativeTypeEnd, &strLen, (void const **)&pvNativeType)))
+ return FALSE;
+
+ pParamInfo->m_strCMMarshalerTypeName = (LPUTF8)pvNativeType;
+ pParamInfo->m_cCMMarshalerTypeNameBytes = strLen;
+ pvNativeType += strLen;
+ _ASSERTE((ULONG)(pvNativeType - pvNativeTypeStart) < cbNativeType);
+
+ // Extract the cookie string.
+ if (S_OK != CheckForCompressedData(pvNativeTypeStart, pvNativeType, cbNativeType))
+ return FALSE;
+
+ if (FAILED(CPackedLen::SafeGetData(pvNativeType, pvNativeTypeEnd, &strLen, (void const **)&pvNativeType)))
+ return FALSE;
+
+ pParamInfo->m_strCMCookie = (LPUTF8)pvNativeType;
+ pParamInfo->m_cCMCookieStrBytes = strLen;
+ _ASSERTE((ULONG)(pvNativeType + strLen - pvNativeTypeStart) == cbNativeType);
+ break;
+
+ default:
+ break;
+ }
+
+ return TRUE;
+}
+
+//==========================================================================
+// Determines whether *pManagedElemType is really normalized (i.e. differs
+// from what sigPtr points to modulo generic instantiation). If it is the
+// case, all types that have been normalized away are checked for valid
+// managed/unmanaged type combination, and *pNativeType is updated to contain
+// the native type of the primitive type field inside. On error (a generic
+// type is encountered or managed/unmanaged type mismatch) or non-default
+// native type of the primitive type inside, *pManagedElemType is un-normalized
+// so that the calling code can deal with the situation in its own way.
+//==========================================================================
+void VerifyAndAdjustNormalizedType(
+ Module * pModule,
+ SigPointer sigPtr,
+ const SigTypeContext * pTypeContext,
+ CorElementType * pManagedElemType,
+ CorNativeType * pNativeType)
+{
+ CorElementType sigElemType = sigPtr.PeekElemTypeClosed(pModule, pTypeContext);
+
+ if (*pManagedElemType != sigElemType)
+ {
+ // Normalized element type differs from closed element type, which means that
+ // normalization has occurred.
+ _ASSERTE(sigElemType == ELEMENT_TYPE_VALUETYPE);
+
+ // Now we know that this is a normalized value type - we have to verify the removed
+ // value type(s) and get to the true primitive type inside.
+ TypeHandle th = sigPtr.GetTypeHandleThrowing(pModule,
+ pTypeContext,
+ ClassLoader::LoadTypes,
+ CLASS_LOAD_UNRESTORED,
+ TRUE);
+ _ASSERTE(!th.IsNull() && !th.IsTypeDesc());
+
+ CorNativeType ntype = *pNativeType;
+
+ if (!th.AsMethodTable()->IsTruePrimitive() &&
+ !th.IsEnum())
+ {
+ // This is a trivial (yet non-primitive) value type that has been normalized.
+ // Loop until we eventually hit the primitive type or enum inside.
+ do
+ {
+ if (th.HasInstantiation())
+ {
+ // generic structures are either not marshalable or special-cased - the caller needs to know either way
+ *pManagedElemType = sigElemType;
+ return;
+ }
+
+ // verify the native type of the value type (must be default or Struct)
+ if (!(ntype == NATIVE_TYPE_DEFAULT || ntype == NATIVE_TYPE_STRUCT))
+ {
+ *pManagedElemType = sigElemType;
+ return;
+ }
+
+ MethodTable *pMT = th.GetMethodTable();
+ _ASSERTE(pMT != NULL && pMT->IsValueType() && pMT->GetNumInstanceFields() == 1);
+
+ // get the only instance field
+ PTR_FieldDesc fieldDesc = pMT->GetApproxFieldDescListRaw();
+
+ // retrieve the MarshalAs of the field
+ NativeTypeParamInfo paramInfo;
+ if (!ParseNativeTypeInfo(fieldDesc->GetMemberDef(), th.GetModule()->GetMDImport(), &paramInfo))
+ {
+ *pManagedElemType = sigElemType;
+ return;
+ }
+
+ ntype = paramInfo.m_NativeType;
+
+ th = fieldDesc->GetApproxFieldTypeHandleThrowing();
+ }
+ while (!th.IsTypeDesc() &&
+ !th.AsMethodTable()->IsTruePrimitive() &&
+ !th.IsEnum());
+
+ // now ntype contains the native type of *pManagedElemType
+ if (ntype == NATIVE_TYPE_DEFAULT)
+ {
+ // Let's update the caller's native type with default type only.
+ // Updating with a non-default native type that is not allowed
+ // for the given managed type would result in confusing exception
+ // messages.
+ *pNativeType = ntype;
+ }
+ else
+ {
+ *pManagedElemType = sigElemType;
+ }
+ }
+ }
+}
+
+VOID ThrowInteropParamException(UINT resID, UINT paramIdx)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SString paramString;
+ if (paramIdx == 0)
+ paramString.Set(W("return value"));
+ else
+ paramString.Printf(W("parameter #%u"), paramIdx);
+
+ SString errorString(W("Unknown error."));
+ errorString.LoadResource(CCompRC::Error, resID);
+
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_BADMARSHAL_ERROR_MSG, paramString.GetUnicode(), errorString.GetUnicode());
+}
+
+//===============================================================
+// Collects paraminfo's in an indexed array so that:
+//
+// aParams[0] == param token for return value
+// aParams[1] == param token for argument #1...
+// aParams[numargs] == param token for argument #n...
+//
+// If no param token exists, the corresponding array element
+// is set to mdParamDefNil.
+//
+// Inputs:
+// pInternalImport -- ifc for metadata api
+// md -- token of method. If token is mdMethodNil,
+// all aParam elements will be set to mdParamDefNil.
+// numargs -- # of arguments in mdMethod
+// aParams -- uninitialized array with numargs+1 elements.
+// on exit, will be filled with param tokens.
+//===============================================================
+VOID CollateParamTokens(IMDInternalImport *pInternalImport, mdMethodDef md, ULONG numargs, mdParamDef *aParams)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ for (ULONG i = 0; i < numargs + 1; i++)
+ aParams[i] = mdParamDefNil;
+
+ if (md != mdMethodDefNil)
+ {
+ MDEnumHolder hEnumParams(pInternalImport);
+ HRESULT hr = pInternalImport->EnumInit(mdtParamDef, md, &hEnumParams);
+ if (FAILED(hr))
+ {
+ // no param info: nothing left to do here
+ }
+ else
+ {
+ mdParamDef CurrParam = mdParamDefNil;
+ while (pInternalImport->EnumNext(&hEnumParams, &CurrParam))
+ {
+ USHORT usSequence;
+ DWORD dwAttr;
+ LPCSTR szParamName_Ignore;
+ if (SUCCEEDED(pInternalImport->GetParamDefProps(CurrParam, &usSequence, &dwAttr, &szParamName_Ignore)))
+ {
+ if (usSequence > numargs)
+ { // Invalid argument index
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ if (aParams[usSequence] != mdParamDefNil)
+ { // Duplicit argument index
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ aParams[usSequence] = CurrParam;
+ }
+ }
+ }
+ }
+}
+
+
+#ifdef FEATURE_COMINTEROP
+
+void *EventArgsMarshalingInfo::operator new(size_t size, LoaderHeap *pHeap)
+{
+ CONTRACT (void*)
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pHeap));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ void* mem = pHeap->AllocMem(S_SIZE_T(size));
+
+ RETURN mem;
+}
+
+
+void EventArgsMarshalingInfo::operator delete(void *pMem)
+{
+ LIMITED_METHOD_CONTRACT;
+ // Instances of this class are always allocated on the loader heap so
+ // the delete operator has nothing to do.
+}
+
+EventArgsMarshalingInfo::EventArgsMarshalingInfo()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Create on-demand as we don't want to create the factories in NGEN time
+ m_pNCCEventArgsFactory = NULL;
+ m_pPCEventArgsFactory = NULL;
+
+ // Load the System.Collections.Specialized.NotifyCollectionChangedEventArgs class.
+ SString qualifiedNCCEventArgsTypeName(SString::Utf8, NCCEVENTARGS_ASM_QUAL_TYPE_NAME);
+ m_hndSystemNCCEventArgsType = TypeName::GetTypeFromAsmQualifiedName(qualifiedNCCEventArgsTypeName.GetUnicode(), FALSE);
+ _ASSERTE(!m_hndSystemNCCEventArgsType.IsNull() && "Cannot load System.Collections.Specialized.NotifyCollectionChangedEventArgs!");
+
+ // Load the System.ComponentModel.PropertyChangedEventArgs class.
+ SString qualifiedPCEventArgsTypeName(SString::Utf8, PCEVENTARGS_ASM_QUAL_TYPE_NAME);
+ m_hndSystemPCEventArgsType = TypeName::GetTypeFromAsmQualifiedName(qualifiedPCEventArgsTypeName.GetUnicode(), FALSE);
+ _ASSERTE(!m_hndSystemPCEventArgsType.IsNull() && "Cannot load System.ComponentModel.PropertyChangedEventArgs!");
+
+ // Load the NCCEventArgs marshaler class.
+ SString qualifiedNCCEventArgsMarshalerTypeName(SString::Utf8, NCCEVENTARGS_MARSHALER_ASM_QUAL_TYPE_NAME);
+ TypeHandle hndNCCEventArgsMarshalerType = TypeName::GetTypeFromAsmQualifiedName(qualifiedNCCEventArgsMarshalerTypeName.GetUnicode(), FALSE);
+
+ // Retrieve the method to convert a .NET NCCEventArgs to a WinRT NCCEventArgs.
+ m_pSystemNCCEventArgsToWinRTNCCEventArgsMD = MemberLoader::FindMethodByName(hndNCCEventArgsMarshalerType.GetMethodTable(), EVENTARGS_TO_WINRT_EVENTARGS_METH_NAME);
+ _ASSERTE(m_pSystemNCCEventArgsToWinRTNCCEventArgsMD && "Unable to find the marshaler method to convert a .NET NCCEventArgs to a WinRT NCCEventArgs!");
+
+ // Retrieve the method to convert a WinRT NCCEventArgs to a .NET NCCEventArgs.
+ m_pWinRTNCCEventArgsToSystemNCCEventArgsMD = MemberLoader::FindMethodByName(hndNCCEventArgsMarshalerType.GetMethodTable(), WINRT_EVENTARGS_TO_EVENTARGS_METH_NAME);
+ _ASSERTE(m_pWinRTNCCEventArgsToSystemNCCEventArgsMD && "Unable to find the marshaler method to convert a WinRT NCCEventArgs to a .NET NCCEventArgs!");
+
+ // Load the PCEventArgs marshaler class.
+ SString qualifiedPCEventArgsMarshalerTypeName(SString::Utf8, PCEVENTARGS_MARSHALER_ASM_QUAL_TYPE_NAME);
+ TypeHandle hndPCEventArgsMarshalerType = TypeName::GetTypeFromAsmQualifiedName(qualifiedPCEventArgsMarshalerTypeName.GetUnicode(), FALSE);
+
+ // Retrieve the method to convert a .NET PCEventArgs to a WinRT PCEventArgs.
+ m_pSystemPCEventArgsToWinRTPCEventArgsMD = MemberLoader::FindMethodByName(hndPCEventArgsMarshalerType.GetMethodTable(), EVENTARGS_TO_WINRT_EVENTARGS_METH_NAME);
+ _ASSERTE(m_pSystemPCEventArgsToWinRTPCEventArgsMD && "Unable to find the marshaler method to convert a .NET PCEventArgs to a WinRT PCEventArgs!");
+
+ // Retrieve the method to convert a WinRT PCEventArgs to a .NET PCEventArgs.
+ m_pWinRTPCEventArgsToSystemPCEventArgsMD = MemberLoader::FindMethodByName(hndPCEventArgsMarshalerType.GetMethodTable(), WINRT_EVENTARGS_TO_EVENTARGS_METH_NAME);
+ _ASSERTE(m_pWinRTPCEventArgsToSystemPCEventArgsMD && "Unable to find the marshaler method to convert a WinRT PCEventArgs to a .NET PCEventArgs!");
+}
+
+EventArgsMarshalingInfo::~EventArgsMarshalingInfo()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_pNCCEventArgsFactory)
+ {
+ SafeRelease(m_pNCCEventArgsFactory);
+ m_pNCCEventArgsFactory = NULL;
+ }
+
+ if (m_pPCEventArgsFactory)
+ {
+ SafeRelease(m_pPCEventArgsFactory);
+ m_pPCEventArgsFactory = NULL;
+ }
+}
+
+void *UriMarshalingInfo::operator new(size_t size, LoaderHeap *pHeap)
+{
+ CONTRACT (void*)
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pHeap));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ void* mem = pHeap->AllocMem(S_SIZE_T(size));
+
+ RETURN mem;
+}
+
+
+void UriMarshalingInfo::operator delete(void *pMem)
+{
+ LIMITED_METHOD_CONTRACT;
+ // Instances of this class are always allocated on the loader heap so
+ // the delete operator has nothing to do.
+}
+
+UriMarshalingInfo::UriMarshalingInfo()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Create on-demand as we don't want to create the factories in NGEN time
+ m_pUriFactory = NULL;
+
+ // Load the System.Uri class.
+ SString qualifiedUriTypeName(SString::Utf8, URI_ASM_QUAL_TYPE_NAME);
+ m_hndSystemUriType = TypeName::GetTypeFromAsmQualifiedName(qualifiedUriTypeName.GetUnicode(), FALSE);
+ _ASSERTE(!m_hndSystemUriType.IsNull() && "Cannot load System.Uri!");
+
+ m_SystemUriOriginalStringGetterMD = MemberLoader::FindPropertyMethod(m_hndSystemUriType.GetMethodTable(), ORIGINALSTRING_PROPERTY_NAME, PropertyGet);
+ _ASSERTE(m_SystemUriOriginalStringGetterMD && "Unable to find the System.Uri.get_OriginalString()!");
+ _ASSERTE(!m_SystemUriOriginalStringGetterMD->IsStatic() && "System.Uri.get_OriginalString() is static!");
+
+ // Windows.Foundation.Uri..ctor(string) and System.Uri..ctor(string)
+ MethodTable* pSystemUriMT = m_hndSystemUriType.AsMethodTable();
+ m_SystemUriCtorMD = MemberLoader::FindConstructor(pSystemUriMT, &gsig_IM_Str_RetVoid);
+ _ASSERTE(m_SystemUriCtorMD && "Unable to find the constructor on System.Uri that takes a string!");
+ _ASSERTE(m_SystemUriCtorMD->IsClassConstructorOrCtor() && !m_SystemUriCtorMD->IsStatic() && "The method retrieved from System.Uri is not a constructor!");
+}
+
+UriMarshalingInfo::~UriMarshalingInfo()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+#ifndef CROSSGEN_COMPILE
+ if (m_pUriFactory)
+ {
+ SafeRelease(m_pUriFactory);
+ m_pUriFactory = NULL;
+ }
+#endif
+}
+
+OleColorMarshalingInfo::OleColorMarshalingInfo() :
+ m_OleColorToSystemColorMD(NULL),
+ m_SystemColorToOleColorMD(NULL)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SString qualifiedColorTranslatorTypeName(SString::Utf8, COLOR_TRANSLATOR_ASM_QUAL_TYPE_NAME);
+
+ // Load the color translator class.
+ TypeHandle hndColorTranslatorType = TypeName::GetTypeFromAsmQualifiedName(qualifiedColorTranslatorTypeName.GetUnicode(), FALSE);
+
+
+ SString qualifiedColorTypeName(SString::Utf8, COLOR_ASM_QUAL_TYPE_NAME);
+ // Load the color class.
+ m_hndColorType = TypeName::GetTypeFromAsmQualifiedName(qualifiedColorTypeName.GetUnicode(), FALSE);
+
+ // Retrieve the method to convert an OLE_COLOR to a System.Drawing.Color.
+ m_OleColorToSystemColorMD = MemberLoader::FindMethodByName(hndColorTranslatorType.GetMethodTable(), OLECOLOR_TO_SYSTEMCOLOR_METH_NAME);
+ _ASSERTE(m_OleColorToSystemColorMD && "Unable to find the translator method to convert an OLE_COLOR to a System.Drawing.Color!");
+ _ASSERTE(m_OleColorToSystemColorMD->IsStatic() && "The translator method to convert an OLE_COLOR to a System.Drawing.Color must be static!");
+
+ // Retrieve the method to convert a System.Drawing.Color to an OLE_COLOR.
+ m_SystemColorToOleColorMD = MemberLoader::FindMethodByName(hndColorTranslatorType.GetMethodTable(), SYSTEMCOLOR_TO_OLECOLOR_METH_NAME);
+ _ASSERTE(m_SystemColorToOleColorMD && "Unable to find the translator method to convert a System.Drawing.Color to an OLE_COLOR!");
+ _ASSERTE(m_SystemColorToOleColorMD->IsStatic() && "The translator method to convert a System.Drawing.Color to an OLE_COLOR must be static!");
+}
+
+
+void *OleColorMarshalingInfo::operator new(size_t size, LoaderHeap *pHeap)
+{
+ CONTRACT (void*)
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pHeap));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ void* mem = pHeap->AllocMem(S_SIZE_T(size));
+
+ RETURN mem;
+}
+
+
+void OleColorMarshalingInfo::operator delete(void *pMem)
+{
+ LIMITED_METHOD_CONTRACT;
+ // Instances of this class are always allocated on the loader heap so
+ // the delete operator has nothing to do.
+}
+
+#endif // FEATURE_COMINTEROP
+
+EEMarshalingData::EEMarshalingData(BaseDomain *pDomain, LoaderHeap *pHeap, CrstBase *pCrst) :
+ m_pHeap(pHeap),
+ m_pDomain(pDomain)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LockOwner lock = {pCrst, IsOwnerOfCrst};
+#ifndef CROSSGEN_COMPILE
+ m_CMHelperHashtable.Init(INITIAL_NUM_CMHELPER_HASHTABLE_BUCKETS, &lock);
+ m_SharedCMHelperToCMInfoMap.Init(INITIAL_NUM_CMINFO_HASHTABLE_BUCKETS, &lock);
+#endif // CROSSGEN_COMPILE
+}
+
+
+EEMarshalingData::~EEMarshalingData()
+{
+ WRAPPER_NO_CONTRACT;
+
+ CustomMarshalerInfo *pCMInfo;
+
+ // <TODO>@TODO(DM): Remove the linked list of CMInfo's and instead hang the OBJECTHANDLE
+ // contained inside the CMInfo off the AppDomain directly. The AppDomain can have
+ // a list of tasks to do when it gets teared down and we could leverage that
+ // to release the object handles.</TODO>
+
+ // Walk through the linked list and delete all the custom marshaler info's.
+ while ((pCMInfo = m_pCMInfoList.RemoveHead()) != NULL)
+ delete pCMInfo;
+
+#ifdef FEATURE_COMINTEROP
+ if (m_pOleColorInfo)
+ {
+ delete m_pOleColorInfo;
+ m_pOleColorInfo = NULL;
+ }
+
+ if (m_pUriInfo)
+ {
+ delete m_pUriInfo;
+ m_pUriInfo = NULL;
+ }
+
+ if (m_pEventArgsInfo)
+ {
+ delete m_pEventArgsInfo;
+ m_pEventArgsInfo = NULL;
+ }
+#endif
+}
+
+
+void *EEMarshalingData::operator new(size_t size, LoaderHeap *pHeap)
+{
+ CONTRACT (void*)
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pHeap));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ void* mem = pHeap->AllocMem(S_SIZE_T(sizeof(EEMarshalingData)));
+
+ RETURN mem;
+}
+
+
+void EEMarshalingData::operator delete(void *pMem)
+{
+ LIMITED_METHOD_CONTRACT;
+ // Instances of this class are always allocated on the loader heap so
+ // the delete operator has nothing to do.
+}
+
+#ifndef CROSSGEN_COMPILE
+
+CustomMarshalerHelper *EEMarshalingData::GetCustomMarshalerHelper(Assembly *pAssembly, TypeHandle hndManagedType, LPCUTF8 strMarshalerTypeName, DWORD cMarshalerTypeNameBytes, LPCUTF8 strCookie, DWORD cCookieStrBytes)
+{
+ CONTRACT (CustomMarshalerHelper*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pAssembly));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ CustomMarshalerHelper *pCMHelper = NULL;
+ CustomMarshalerHelper* pNewCMHelper = NULL;
+ NewHolder<CustomMarshalerInfo> pNewCMInfo(NULL);
+
+ BOOL bSharedHelper = pAssembly->IsDomainNeutral();
+ TypeHandle hndCustomMarshalerType;
+
+ // Create the key that will be used to lookup in the hashtable.
+ EECMHelperHashtableKey Key(cMarshalerTypeNameBytes, strMarshalerTypeName, cCookieStrBytes, strCookie, hndManagedType.GetInstantiation(), bSharedHelper);
+
+ // Lookup the custom marshaler helper in the hashtable.
+ if (m_CMHelperHashtable.GetValue(&Key, (HashDatum*)&pCMHelper))
+ RETURN pCMHelper;
+
+ {
+ GCX_COOP();
+
+ // Validate the arguments.
+ _ASSERTE(strMarshalerTypeName && strCookie && !hndManagedType.IsNull());
+
+ // Append a NULL terminator to the marshaler type name.
+ SString strCMMarshalerTypeName(SString::Utf8, strMarshalerTypeName, cMarshalerTypeNameBytes);
+
+ // Load the custom marshaler class.
+ BOOL fNameIsAsmQualified = FALSE;
+ hndCustomMarshalerType = TypeName::GetTypeUsingCASearchRules(strCMMarshalerTypeName.GetUTF8NoConvert(), pAssembly, &fNameIsAsmQualified);
+
+ if (hndCustomMarshalerType.IsGenericTypeDefinition())
+ {
+ // Instantiate generic custom marshalers using the instantiation of the type being marshaled.
+ hndCustomMarshalerType = hndCustomMarshalerType.Instantiate(hndManagedType.GetInstantiation());
+ }
+
+ // Set the assembly to null to indicate that the custom marshaler name is assembly
+ // qualified.
+ if (fNameIsAsmQualified)
+ pAssembly = NULL;
+
+
+ if (bSharedHelper)
+ {
+ // Create the custom marshaler helper in the specified heap.
+ pNewCMHelper = new (m_pHeap) SharedCustomMarshalerHelper(pAssembly, hndManagedType, strMarshalerTypeName, cMarshalerTypeNameBytes, strCookie, cCookieStrBytes);
+ }
+ else
+ {
+ // Create the custom marshaler info in the specified heap.
+ pNewCMInfo = new (m_pHeap) CustomMarshalerInfo(m_pDomain, hndCustomMarshalerType, hndManagedType, strCookie, cCookieStrBytes);
+
+ // Create the custom marshaler helper in the specified heap.
+ pNewCMHelper = new (m_pHeap) NonSharedCustomMarshalerHelper(pNewCMInfo);
+ }
+ }
+
+ // Take the app domain lock before we insert the custom marshaler info into the hashtable.
+ {
+ BaseDomain::LockHolder lh(m_pDomain);
+
+ // Verify that the custom marshaler helper has not already been added by another thread.
+ if (m_CMHelperHashtable.GetValue(&Key, (HashDatum*)&pCMHelper))
+ {
+ RETURN pCMHelper;
+ }
+
+ // Add the custom marshaler helper to the hash table.
+ m_CMHelperHashtable.InsertValue(&Key, pNewCMHelper, FALSE);
+
+ // If we create the CM info, then add it to the linked list.
+ if (pNewCMInfo)
+ {
+ m_pCMInfoList.InsertHead(pNewCMInfo);
+ pNewCMInfo.SuppressRelease();
+ }
+
+ // Release the lock and return the custom marshaler info.
+ }
+
+ RETURN pNewCMHelper;
+}
+
+CustomMarshalerInfo *EEMarshalingData::GetCustomMarshalerInfo(SharedCustomMarshalerHelper *pSharedCMHelper)
+{
+ CONTRACT (CustomMarshalerInfo*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ CustomMarshalerInfo *pCMInfo = NULL;
+ NewHolder<CustomMarshalerInfo> pNewCMInfo(NULL);
+ TypeHandle hndCustomMarshalerType;
+
+ // Lookup the custom marshaler helper in the hashtable.
+ if (m_SharedCMHelperToCMInfoMap.GetValue(pSharedCMHelper, (HashDatum*)&pCMInfo))
+ RETURN pCMInfo;
+
+ // Append a NULL terminator to the marshaler type name.
+ CQuickArray<char> strCMMarshalerTypeName;
+ DWORD strLen = pSharedCMHelper->GetMarshalerTypeNameByteCount();
+ strCMMarshalerTypeName.ReSizeThrows(pSharedCMHelper->GetMarshalerTypeNameByteCount() + 1);
+ memcpy(strCMMarshalerTypeName.Ptr(), pSharedCMHelper->GetMarshalerTypeName(), strLen);
+ strCMMarshalerTypeName[strLen] = 0;
+
+ // Load the custom marshaler class.
+ hndCustomMarshalerType = TypeName::GetTypeUsingCASearchRules(strCMMarshalerTypeName.Ptr(), pSharedCMHelper->GetAssembly());
+ if (hndCustomMarshalerType.IsGenericTypeDefinition())
+ {
+ // Instantiate generic custom marshalers using the instantiation of the type being marshaled.
+ hndCustomMarshalerType = hndCustomMarshalerType.Instantiate(pSharedCMHelper->GetManagedType().GetInstantiation());
+ }
+
+ // Create the custom marshaler info in the specified heap.
+ pNewCMInfo = new (m_pHeap) CustomMarshalerInfo(m_pDomain,
+ hndCustomMarshalerType,
+ pSharedCMHelper->GetManagedType(),
+ pSharedCMHelper->GetCookieString(),
+ pSharedCMHelper->GetCookieStringByteCount());
+
+ {
+ // Take the app domain lock before we insert the custom marshaler info into the hashtable.
+ BaseDomain::LockHolder lh(m_pDomain);
+
+ // Verify that the custom marshaler info has not already been added by another thread.
+ if (m_SharedCMHelperToCMInfoMap.GetValue(pSharedCMHelper, (HashDatum*)&pCMInfo))
+ {
+ RETURN pCMInfo;
+ }
+
+ // Add the custom marshaler helper to the hash table.
+ m_SharedCMHelperToCMInfoMap.InsertValue(pSharedCMHelper, pNewCMInfo, FALSE);
+
+ // Add the custom marshaler into the linked list.
+ m_pCMInfoList.InsertHead(pNewCMInfo);
+
+ // Release the lock and return the custom marshaler info.
+ }
+
+ pNewCMInfo.SuppressRelease();
+ RETURN pNewCMInfo;
+}
+#endif // CROSSGEN_COMPILE
+
+#ifdef FEATURE_COMINTEROP
+UriMarshalingInfo *EEMarshalingData::GetUriMarshalingInfo()
+{
+ CONTRACT (UriMarshalingInfo*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ if (m_pUriInfo == NULL)
+ {
+ UriMarshalingInfo *pUriInfo = new (m_pHeap) UriMarshalingInfo();
+
+ if (InterlockedCompareExchangeT(&m_pUriInfo, pUriInfo, NULL) != NULL)
+ {
+ // Another thread beat us to it. Delete on UriMarshalingInfo is an empty operation
+ // which is OK, since the possible leak is rare, small, and constant. This is the same
+ // pattern as in code:GetCustomMarshalerInfo.
+ delete pUriInfo;
+ }
+ }
+
+#ifdef _DEBUG
+ BaseDomain *pUriDomain = m_pUriInfo->GetSystemUriType().GetDomain();
+ if (pUriDomain != m_pDomain)
+ {
+ // Make sure that Uri marshaling data is initialized in its own (shared) domain as well.
+ // This allows us to perform quick checks in code:EEMarshalingData.IsUriHelperMethod.
+ (void) pUriDomain->GetMarshalingData()->GetUriMarshalingInfo();
+ }
+#endif // _DEBUG
+
+ RETURN m_pUriInfo;
+}
+
+EventArgsMarshalingInfo *EEMarshalingData::GetEventArgsMarshalingInfo()
+{
+ CONTRACT (EventArgsMarshalingInfo*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ if (m_pEventArgsInfo == NULL)
+ {
+ EventArgsMarshalingInfo *pEventArgsInfo = new (m_pHeap) EventArgsMarshalingInfo();
+
+ if (InterlockedCompareExchangeT(&m_pEventArgsInfo, pEventArgsInfo, NULL) != NULL)
+ {
+ // Another thread beat us to it. Delete on EventArgsMarshalingInfo is an empty operation
+ // which is OK, since the possible leak is rare, small, and constant. This is the same
+ // pattern as in code:GetCustomMarshalerInfo.
+ delete pEventArgsInfo;
+ }
+ }
+
+#ifdef _DEBUG
+ BaseDomain *pEventArgsDomain = m_pEventArgsInfo->GetSystemNCCEventArgsType().GetDomain();
+ if (pEventArgsDomain != m_pDomain)
+ {
+ // Make sure that EventArgs marshaling data is initialized in its own (shared) domain as well.
+ // This allows us to perform quick checks in code:EEMarshalingData.IsEventArgsHelperMethod.
+ (void) pEventArgsDomain->GetMarshalingData()->GetEventArgsMarshalingInfo();
+ }
+#endif // _DEBUG
+
+ RETURN m_pEventArgsInfo;
+}
+
+OleColorMarshalingInfo *EEMarshalingData::GetOleColorMarshalingInfo()
+{
+ CONTRACT (OleColorMarshalingInfo*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ if (m_pOleColorInfo == NULL)
+ {
+ OleColorMarshalingInfo *pOleColorInfo = new (m_pHeap) OleColorMarshalingInfo();
+
+ if (InterlockedCompareExchangeT(&m_pOleColorInfo, pOleColorInfo, NULL) != NULL)
+ {
+ // Another thread beat us to it. Delete on OleColorMarshalingInfo is an empty operation
+ // which is OK, since the possible leak is rare, small, and constant. This is the same
+ // pattern as in code:GetCustomMarshalerInfo.
+ delete pOleColorInfo;
+ }
+ }
+
+#ifdef _DEBUG
+ BaseDomain *pColorDomain = m_pOleColorInfo->GetColorType().GetDomain();
+ if (pColorDomain != m_pDomain)
+ {
+ // Make sure that Color marshaling data is initialized in its own (shared) domain as well.
+ // This allows us to perform quick checks in code:EEMarshalingData.IsOleColorHelperMethod.
+ (void) pColorDomain->GetMarshalingData()->GetOleColorMarshalingInfo();
+ }
+#endif // _DEBUG
+
+ RETURN m_pOleColorInfo;
+}
+#endif // FEATURE_COMINTEROP
+
+//==========================================================================
+// Constructs MarshalInfo.
+//==========================================================================
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#endif
+MarshalInfo::MarshalInfo(Module* pModule,
+ SigPointer sig,
+ const SigTypeContext *pTypeContext,
+ mdToken token,
+ MarshalScenario ms,
+ CorNativeLinkType nlType,
+ CorNativeLinkFlags nlFlags,
+ BOOL isParam,
+ UINT paramidx, // parameter # for use in error messages (ignored if not parameter)
+ UINT numArgs, // number of arguments
+ BOOL BestFit,
+ BOOL ThrowOnUnmappableChar,
+ BOOL fEmitsIL,
+ MethodDesc* pMD,
+ BOOL fLoadCustomMarshal
+#ifdef _DEBUG
+ ,
+ LPCUTF8 pDebugName,
+ LPCUTF8 pDebugClassName,
+ UINT argidx // 0 for return value, -1 for field
+#endif
+)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr;
+ NativeTypeParamInfo ParamInfo;
+
+ // we expect a 1-based paramidx, but we like to use a 0-based paramidx
+ m_paramidx = paramidx - 1;
+
+ // if no one overwrites this with a better message, we'll still at least say something
+ m_resID = IDS_EE_BADMARSHAL_GENERIC;
+
+ // flag for uninitialized type
+ m_type = MARSHAL_TYPE_UNKNOWN;
+
+ CorNativeType nativeType = NATIVE_TYPE_DEFAULT;
+ Assembly *pAssembly = pModule->GetAssembly();
+#ifndef FEATURE_CORECLR
+ BOOL fNeedsCopyCtor = FALSE;
+#endif // !FEATURE_CORECLR
+ m_BestFit = BestFit;
+ m_ThrowOnUnmappableChar = ThrowOnUnmappableChar;
+ m_ms = ms;
+ m_fAnsi = (ms == MARSHAL_SCENARIO_NDIRECT) && (nlType == nltAnsi);
+ m_managedArgSize = 0;
+ m_nativeArgSize = 0;
+ m_pCMHelper = NULL;
+ m_CMVt = VT_EMPTY;
+ m_args.m_pMarshalInfo = this;
+ m_args.m_pMT = NULL;
+ m_pModule = pModule;
+ CorElementType mtype = ELEMENT_TYPE_END;
+ CorElementType corElemType = ELEMENT_TYPE_END;
+ m_pMT = NULL;
+ m_pMD = pMD;
+
+#ifdef FEATURE_COMINTEROP
+ m_fDispItf = FALSE;
+ m_fInspItf = FALSE;
+ m_fErrorNativeType = FALSE;
+ m_hiddenLengthParamIndex = (UINT16)-1;
+ m_dwHiddenLengthManagedHomeLocal= 0xFFFFFFFF;
+ m_dwHiddenLengthNativeHomeLocal = 0xFFFFFFFF;
+
+ m_pDefaultItfMT = NULL;
+#endif // FEATURE_COMINTEROP
+
+
+#ifdef _DEBUG
+
+ CHAR achDbgContext[DEBUG_CONTEXT_STR_LEN] = "";
+ if (!pDebugName)
+ {
+ strncpy_s(achDbgContext, COUNTOF(achDbgContext), "<Unknown>", _TRUNCATE);
+ }
+ else
+ {
+ strncat_s(achDbgContext, COUNTOF(achDbgContext), pDebugClassName, _TRUNCATE);
+ strncat_s(achDbgContext, COUNTOF(achDbgContext), NAMESPACE_SEPARATOR_STR, _TRUNCATE);
+ strncat_s(achDbgContext, COUNTOF(achDbgContext), pDebugName, _TRUNCATE);
+ strncat_s(achDbgContext, COUNTOF(achDbgContext), " ", _TRUNCATE);
+ switch (argidx)
+ {
+ case (UINT)-1:
+ strncat_s(achDbgContext, COUNTOF(achDbgContext), "field", _TRUNCATE);
+ break;
+ case 0:
+ strncat_s(achDbgContext, COUNTOF(achDbgContext), "return value", _TRUNCATE);
+ break;
+ default:
+ {
+ char buf[30];
+ sprintf_s(buf, COUNTOF(buf), "param #%lu", (ULONG)argidx);
+ strncat_s(achDbgContext, COUNTOF(achDbgContext), buf, _TRUNCATE);
+ }
+ }
+ }
+
+ m_strDebugMethName = pDebugName;
+ m_strDebugClassName = pDebugClassName;
+ m_iArg = argidx;
+
+ m_in = m_out = FALSE;
+ m_byref = TRUE;
+#endif
+
+
+
+ // Retrieve the native type for the current parameter.
+ if (!ParseNativeTypeInfo(token, pModule->GetMDImport(), &ParamInfo))
+ {
+ IfFailGoto(E_FAIL, lFail);
+ }
+
+ nativeType = ParamInfo.m_NativeType;
+ corElemType = sig.PeekElemTypeNormalized(pModule, pTypeContext);
+ mtype = corElemType;
+
+#ifdef FEATURE_COMINTEROP
+ if (IsWinRTScenario() && nativeType != NATIVE_TYPE_DEFAULT)
+ {
+ // Do not allow any MarshalAs in WinRT scenarios - marshaling is fully described by the parameter type.
+ m_type = MARSHAL_TYPE_UNKNOWN;
+ m_resID = IDS_EE_BADMARSHAL_WINRT_MARSHAL_AS;
+ IfFailGoto(E_FAIL, lFail);
+ }
+#endif // FEATURE_COMINTEROP
+
+ // Make sure SizeParamIndex < numArgs when marshalling native arrays
+ if (nativeType == NATIVE_TYPE_ARRAY && ParamInfo.m_SizeIsSpecified)
+ {
+ if (ParamInfo.m_Multiplier > 0 && ParamInfo.m_CountParamIdx >= numArgs)
+ {
+ // Do not throw exception here.
+ // We'll use EmitOrThrowInteropException to throw exception in non-COM interop
+ // and emit exception throwing code directly in STUB in COM interop
+ m_type = MARSHAL_TYPE_UNKNOWN;
+ m_resID = IDS_EE_SIZECONTROLOUTOFRANGE;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ }
+
+ // Parse ET_BYREF signature
+ if (mtype == ELEMENT_TYPE_BYREF)
+ {
+ m_byref = TRUE;
+ SigPointer sigtmp = sig;
+ IfFailGoto(sig.GetElemType(NULL), lFail);
+ mtype = sig.PeekElemTypeNormalized(pModule, pTypeContext);
+
+#ifndef FEATURE_CORECLR // no copy ctor support in CoreCLR
+ // Check for Copy Constructor Modifier - peek closed elem type here to prevent ELEMENT_TYPE_VALUETYPE
+ // turning into a primitive.
+ if (sig.PeekElemTypeClosed(pModule, pTypeContext) == ELEMENT_TYPE_VALUETYPE)
+ {
+ // Skip ET_BYREF
+ IfFailGoto(sigtmp.GetByte(NULL), lFail);
+
+ if (sigtmp.HasCustomModifier(pModule, "Microsoft.VisualC.NeedsCopyConstructorModifier", ELEMENT_TYPE_CMOD_REQD) ||
+ sigtmp.HasCustomModifier(pModule, "System.Runtime.CompilerServices.IsCopyConstructed", ELEMENT_TYPE_CMOD_REQD) )
+ {
+ mtype = ELEMENT_TYPE_VALUETYPE;
+ fNeedsCopyCtor = TRUE;
+ m_byref = FALSE;
+ }
+ }
+#endif // !FEATURE_CORECLR
+ }
+ else
+ {
+ m_byref = FALSE;
+ }
+
+ // Check for valid ET_PTR signature
+ if (mtype == ELEMENT_TYPE_PTR)
+ {
+#ifdef FEATURE_COMINTEROP
+ // WinRT does not support ET_PTR
+ if (IsWinRTScenario())
+ {
+ m_type = MARSHAL_TYPE_UNKNOWN;
+ m_resID = IDS_EE_BADMARSHAL_WINRT_ILLEGAL_TYPE;
+ IfFailGoto(E_FAIL, lFail);
+ }
+#endif // FEATURE_COMINTEROP
+
+ SigPointer sigtmp = sig;
+ IfFailGoto(sigtmp.GetElemType(NULL), lFail);
+
+ // Peek closed elem type here to prevent ELEMENT_TYPE_VALUETYPE turning into a primitive.
+ CorElementType mtype2 = sigtmp.PeekElemTypeClosed(pModule, pTypeContext);
+
+ if (mtype2 == ELEMENT_TYPE_VALUETYPE)
+ {
+
+ TypeHandle th = sigtmp.GetTypeHandleThrowing(pModule, pTypeContext);
+ _ASSERTE(!th.IsNull());
+
+ // We want to leave out enums as they surely don't have copy constructors
+ // plus they are not marked as blittable.
+ if (!th.IsEnum())
+ {
+ // It should be blittable
+ if (!th.IsBlittable())
+ {
+ m_resID = IDS_EE_BADMARSHAL_PTRNONBLITTABLE;
+ IfFailGoto(E_FAIL, lFail);
+ }
+
+#ifndef FEATURE_CORECLR
+ // Check for Copy Constructor Modifier
+ if (sigtmp.HasCustomModifier(pModule, "Microsoft.VisualC.NeedsCopyConstructorModifier", ELEMENT_TYPE_CMOD_REQD) ||
+ sigtmp.HasCustomModifier(pModule, "System.Runtime.CompilerServices.IsCopyConstructed", ELEMENT_TYPE_CMOD_REQD) )
+ {
+ mtype = mtype2;
+
+ // Keep the sig pointer in sync with mtype (skip ELEMENT_TYPE_PTR) because for the rest
+ // of this method we are pretending that the parameter is a value type passed by-value.
+ IfFailGoto(sig.GetElemType(NULL), lFail);
+
+ fNeedsCopyCtor = TRUE;
+ m_byref = FALSE;
+ }
+#endif // !FEATURE_CORECLR
+ }
+ }
+ else
+ {
+ if (!(mtype2 != ELEMENT_TYPE_CLASS &&
+ mtype2 != ELEMENT_TYPE_STRING &&
+ mtype2 != ELEMENT_TYPE_CLASS &&
+ mtype2 != ELEMENT_TYPE_OBJECT &&
+ mtype2 != ELEMENT_TYPE_SZARRAY))
+ {
+ m_resID = IDS_EE_BADMARSHAL_PTRSUBTYPE;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ }
+ }
+
+
+ // System primitive types (System.Int32, et.al.) will be marshaled as expected
+ // because the mtype CorElementType is normalized (e.g. ELEMENT_TYPE_I4).
+#ifdef _TARGET_X86_
+ // We however need to detect if such a normalization occurred for non-system
+ // trivial value types, because we hold CorNativeType belonging to the original
+ // "un-normalized" signature type. It has to be verified that all the value types
+ // that have been normalized away have default marshaling or MarshalAs(Struct).
+ // In addition, the nativeType must be updated with the type of the real primitive inside.
+ //
+ VerifyAndAdjustNormalizedType(pModule, sig, pTypeContext, &mtype, &nativeType);
+#endif // _TARGET_X86_
+
+
+ if (nativeType == NATIVE_TYPE_CUSTOMMARSHALER)
+ {
+ switch (mtype)
+ {
+ case ELEMENT_TYPE_VAR:
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_OBJECT:
+ m_CMVt = VT_UNKNOWN;
+ break;
+
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_ARRAY:
+ m_CMVt = VT_I4;
+ break;
+
+ default:
+ m_resID = IDS_EE_BADMARSHAL_CUSTOMMARSHALER;
+ IfFailGoto(E_FAIL, lFail);
+ }
+
+ // Set m_type to MARSHAL_TYPE_UNKNOWN in case SetupCustomMarshalerHelper throws.
+ m_type = MARSHAL_TYPE_UNKNOWN;
+
+ if (fLoadCustomMarshal)
+ {
+ // Set up the custom marshaler info.
+ TypeHandle hndManagedType = sig.GetTypeHandleThrowing(pModule, pTypeContext);
+
+ if (!fEmitsIL)
+ {
+ m_pCMHelper = SetupCustomMarshalerHelper(ParamInfo.m_strCMMarshalerTypeName,
+ ParamInfo.m_cCMMarshalerTypeNameBytes,
+ ParamInfo.m_strCMCookie,
+ ParamInfo.m_cCMCookieStrBytes,
+ pAssembly,
+ hndManagedType);
+ }
+ else
+ {
+ m_pCMHelper = NULL;
+ MethodDesc* pMDforModule = pMD;
+ if (pMD->IsILStub())
+ {
+ pMDforModule = pMD->AsDynamicMethodDesc()->GetILStubResolver()->GetStubTargetMethodDesc();
+ }
+ m_args.rcm.m_pMD = pMDforModule;
+ m_args.rcm.m_paramToken = token;
+ m_args.rcm.m_hndManagedType = hndManagedType.AsPtr();
+ CONSISTENCY_CHECK(pModule == pMDforModule->GetModule());
+ }
+ }
+
+ // Specify which custom marshaler to use.
+ m_type = MARSHAL_TYPE_REFERENCECUSTOMMARSHALER;
+
+ goto lExit;
+ }
+
+ switch (mtype)
+ {
+ case ELEMENT_TYPE_BOOLEAN:
+ switch (nativeType)
+ {
+ case NATIVE_TYPE_BOOLEAN:
+ m_type = MARSHAL_TYPE_WINBOOL;
+ break;
+
+#ifdef FEATURE_COMINTEROP
+ case NATIVE_TYPE_VARIANTBOOL:
+ m_type = MARSHAL_TYPE_VTBOOL;
+ break;
+#endif // FEATURE_COMINTEROP
+
+ case NATIVE_TYPE_U1:
+ case NATIVE_TYPE_I1:
+ m_type = MARSHAL_TYPE_CBOOL;
+ break;
+
+ case NATIVE_TYPE_DEFAULT:
+#ifdef FEATURE_COMINTEROP
+ if (m_ms == MARSHAL_SCENARIO_COMINTEROP)
+ {
+ // 2-byte COM VARIANT_BOOL
+ m_type = MARSHAL_TYPE_VTBOOL;
+ }
+ else if (m_ms == MARSHAL_SCENARIO_WINRT)
+ {
+ // 1-byte WinRT bool
+ m_type = MARSHAL_TYPE_CBOOL;
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ // 4-byte Windows BOOL
+ _ASSERTE(m_ms == MARSHAL_SCENARIO_NDIRECT);
+ m_type = MARSHAL_TYPE_WINBOOL;
+ }
+ break;
+
+ default:
+ m_resID = IDS_EE_BADMARSHAL_BOOLEAN;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ break;
+
+ case ELEMENT_TYPE_CHAR:
+ switch (nativeType)
+ {
+ case NATIVE_TYPE_I1: //fallthru
+ case NATIVE_TYPE_U1:
+ m_type = MARSHAL_TYPE_ANSICHAR;
+ break;
+
+ case NATIVE_TYPE_I2: //fallthru
+ case NATIVE_TYPE_U2:
+ m_type = MARSHAL_TYPE_GENERIC_U2;
+ break;
+
+ case NATIVE_TYPE_DEFAULT:
+ m_type = ( (m_ms == MARSHAL_SCENARIO_NDIRECT && m_fAnsi) ? MARSHAL_TYPE_ANSICHAR : MARSHAL_TYPE_GENERIC_U2 );
+ break;
+
+ default:
+ m_resID = IDS_EE_BADMARSHAL_CHAR;
+ IfFailGoto(E_FAIL, lFail);
+
+ }
+ break;
+
+ case ELEMENT_TYPE_I1:
+ if (!(nativeType == NATIVE_TYPE_I1 || nativeType == NATIVE_TYPE_U1 || nativeType == NATIVE_TYPE_DEFAULT))
+ {
+ m_resID = IDS_EE_BADMARSHAL_I1;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = MARSHAL_TYPE_GENERIC_1;
+ break;
+
+ case ELEMENT_TYPE_U1:
+ if (!(nativeType == NATIVE_TYPE_U1 || nativeType == NATIVE_TYPE_I1 || nativeType == NATIVE_TYPE_DEFAULT))
+ {
+ m_resID = IDS_EE_BADMARSHAL_I1;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = MARSHAL_TYPE_GENERIC_U1;
+ break;
+
+ case ELEMENT_TYPE_I2:
+ if (!(nativeType == NATIVE_TYPE_I2 || nativeType == NATIVE_TYPE_U2 || nativeType == NATIVE_TYPE_DEFAULT))
+ {
+ m_resID = IDS_EE_BADMARSHAL_I2;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = MARSHAL_TYPE_GENERIC_2;
+ break;
+
+ case ELEMENT_TYPE_U2:
+ if (!(nativeType == NATIVE_TYPE_U2 || nativeType == NATIVE_TYPE_I2 || nativeType == NATIVE_TYPE_DEFAULT))
+ {
+ m_resID = IDS_EE_BADMARSHAL_I2;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = MARSHAL_TYPE_GENERIC_U2;
+ break;
+
+ case ELEMENT_TYPE_I4:
+ switch (nativeType)
+ {
+ case NATIVE_TYPE_I4:
+ case NATIVE_TYPE_U4:
+ case NATIVE_TYPE_DEFAULT:
+ break;
+
+#ifdef FEATURE_COMINTEROP
+ case NATIVE_TYPE_ERROR:
+ m_fErrorNativeType = TRUE;
+ break;
+#endif // FEATURE_COMINTEROP
+
+ default:
+ m_resID = IDS_EE_BADMARSHAL_I4;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = MARSHAL_TYPE_GENERIC_4;
+ break;
+
+ case ELEMENT_TYPE_U4:
+ switch (nativeType)
+ {
+ case NATIVE_TYPE_I4:
+ case NATIVE_TYPE_U4:
+ case NATIVE_TYPE_DEFAULT:
+ break;
+
+#ifdef FEATURE_COMINTEROP
+ case NATIVE_TYPE_ERROR:
+ m_fErrorNativeType = TRUE;
+ break;
+#endif // FEATURE_COMINTEROP
+
+ default:
+ m_resID = IDS_EE_BADMARSHAL_I4;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = MARSHAL_TYPE_GENERIC_4;
+ break;
+
+ case ELEMENT_TYPE_I8:
+ if (!(nativeType == NATIVE_TYPE_I8 || nativeType == NATIVE_TYPE_U8 || nativeType == NATIVE_TYPE_DEFAULT))
+ {
+ m_resID = IDS_EE_BADMARSHAL_I8;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = MARSHAL_TYPE_GENERIC_8;
+ break;
+
+ case ELEMENT_TYPE_U8:
+ if (!(nativeType == NATIVE_TYPE_U8 || nativeType == NATIVE_TYPE_I8 || nativeType == NATIVE_TYPE_DEFAULT))
+ {
+ m_resID = IDS_EE_BADMARSHAL_I8;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = MARSHAL_TYPE_GENERIC_8;
+ break;
+
+ case ELEMENT_TYPE_I:
+#ifdef FEATURE_COMINTEROP
+ if (IsWinRTScenario())
+ {
+ m_resID = IDS_EE_BADMARSHAL_WINRT_ILLEGAL_TYPE;
+ IfFailGoto(E_FAIL, lFail);
+ }
+#endif // FEATURE_COMINTEROP
+
+ if (!(nativeType == NATIVE_TYPE_INT || nativeType == NATIVE_TYPE_UINT || nativeType == NATIVE_TYPE_DEFAULT))
+ {
+ m_resID = IDS_EE_BADMARSHAL_I;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = (sizeof(LPVOID) == 4 ? MARSHAL_TYPE_GENERIC_4 : MARSHAL_TYPE_GENERIC_8);
+ break;
+
+ case ELEMENT_TYPE_U:
+#ifdef FEATURE_COMINTEROP
+ if (IsWinRTScenario())
+ {
+ m_resID = IDS_EE_BADMARSHAL_WINRT_ILLEGAL_TYPE;
+ IfFailGoto(E_FAIL, lFail);
+ }
+#endif // FEATURE_COMINTEROP
+
+ if (!(nativeType == NATIVE_TYPE_UINT || nativeType == NATIVE_TYPE_INT || nativeType == NATIVE_TYPE_DEFAULT))
+ {
+ m_resID = IDS_EE_BADMARSHAL_I;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = (sizeof(LPVOID) == 4 ? MARSHAL_TYPE_GENERIC_4 : MARSHAL_TYPE_GENERIC_8);
+ break;
+
+
+ case ELEMENT_TYPE_R4:
+ if (!(nativeType == NATIVE_TYPE_R4 || nativeType == NATIVE_TYPE_DEFAULT))
+ {
+ m_resID = IDS_EE_BADMARSHAL_R4;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = MARSHAL_TYPE_FLOAT;
+ break;
+
+ case ELEMENT_TYPE_R8:
+ if (!(nativeType == NATIVE_TYPE_R8 || nativeType == NATIVE_TYPE_DEFAULT))
+ {
+ m_resID = IDS_EE_BADMARSHAL_R8;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = MARSHAL_TYPE_DOUBLE;
+ break;
+
+ case ELEMENT_TYPE_PTR:
+#ifdef FEATURE_COMINTEROP
+ _ASSERTE(!IsWinRTScenario()); // we checked for this earlier
+#endif // FEATURE_COMINTEROP
+
+ if (nativeType != NATIVE_TYPE_DEFAULT)
+ {
+ m_resID = IDS_EE_BADMARSHAL_PTR;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = ( (sizeof(void*)==4) ? MARSHAL_TYPE_GENERIC_4 : MARSHAL_TYPE_GENERIC_8 );
+ break;
+
+ case ELEMENT_TYPE_FNPTR:
+#ifdef FEATURE_COMINTEROP
+ if (IsWinRTScenario())
+ {
+ m_resID = IDS_EE_BADMARSHAL_WINRT_ILLEGAL_TYPE;
+ IfFailGoto(E_FAIL, lFail);
+ }
+#endif // FEATURE_COMINTEROP
+
+ if (!(nativeType == NATIVE_TYPE_FUNC || nativeType == NATIVE_TYPE_DEFAULT))
+ {
+ m_resID = IDS_EE_BADMARSHAL_FNPTR;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = ( (sizeof(void*)==4) ? MARSHAL_TYPE_GENERIC_4 : MARSHAL_TYPE_GENERIC_8 );
+ break;
+
+ case ELEMENT_TYPE_OBJECT:
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_VAR:
+ {
+ TypeHandle sigTH = sig.GetTypeHandleThrowing(pModule, pTypeContext);
+
+ // Disallow marshaling generic types except for WinRT interfaces.
+ if (sigTH.HasInstantiation())
+ {
+#ifdef FEATURE_COMINTEROP
+ if (!sigTH.SupportsGenericInterop(TypeHandle::Interop_NativeToManaged))
+#endif // FEATURE_COMINTEROP
+ {
+ m_resID = IDS_EE_BADMARSHAL_GENERICS_RESTRICTION;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ }
+
+ m_pMT = sigTH.GetMethodTable();
+ if (m_pMT == NULL)
+ IfFailGoto(COR_E_TYPELOAD, lFail);
+
+#ifdef FEATURE_COMINTEROP
+ MethodTable* pDefaultMT = NULL;
+
+ // Look for marshaling of WinRT runtime classes
+ if ((m_pMT->IsProjectedFromWinRT() || m_pMT->IsExportedToWinRT()) && !m_pMT->HasExplicitGuid())
+ {
+ // The type loader guarantees that there are no WinRT interfaces without explicit GUID
+ _ASSERTE(!m_pMT->IsInterface());
+
+ // Make sure that this is really a legal runtime class and not a custom attribute or delegate
+ if (!m_pMT->IsLegalNonArrayWinRTType() || m_pMT->IsDelegate())
+ {
+ m_resID = IDS_EE_BADMARSHAL_WINRT_ILLEGAL_TYPE;
+ IfFailGoto(E_FAIL, lFail);
+ }
+
+ // This class must have a default interface that describes how it is marshaled
+ pDefaultMT = m_pMT->GetDefaultWinRTInterface();
+ if (pDefaultMT == NULL)
+ {
+ m_resID = IDS_EE_BADMARSHAL_WINRT_MISSING_GUID;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ }
+
+ if (nativeType == NATIVE_TYPE_INTF)
+ {
+ // whatever...
+ if (sig.IsStringType(pModule, pTypeContext))
+ {
+ m_resID = IDS_EE_BADMARSHALPARAM_STRING;
+ IfFailGoto(E_FAIL, lFail);
+ }
+
+ if (m_ms == MARSHAL_SCENARIO_WINRT && COMDelegate::IsDelegate(m_pMT))
+ {
+ // In WinRT scenarios delegates must be WinRT delegates
+ if (!m_pMT->IsProjectedFromWinRT() && !WinRTTypeNameConverter::IsRedirectedType(m_pMT))
+ {
+ m_resID = IDS_EE_BADMARSHAL_WINRT_DELEGATE;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ }
+
+ m_type = MARSHAL_TYPE_INTERFACE;
+ }
+ else if (pDefaultMT != NULL && nativeType == NATIVE_TYPE_DEFAULT)
+ {
+ // Pretend this is really marshaling as the default interface type
+
+ // Validate it's a WinRT interface with GUID
+ if (!pDefaultMT->IsInterface() ||
+ (!pDefaultMT->IsProjectedFromWinRT() && !pDefaultMT->IsExportedToWinRT()) ||
+ !pDefaultMT->HasExplicitGuid())
+ {
+ // This might also be a redirected interface - which is also allowed
+ if (!pDefaultMT->IsWinRTRedirectedInterface(TypeHandle::Interop_NativeToManaged))
+ {
+ m_resID = IDS_EE_BADMARSHAL_DEFAULTIFACE_NOT_WINRT_IFACE;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ }
+
+ // Validate that it's one of the component interfaces of the class in the signature
+ if (!m_pMT->ImplementsEquivalentInterface(pDefaultMT))
+ {
+ m_resID = IDS_EE_BADMARSHAL_DEFAULTIFACE_NOT_SUBTYPE;
+ IfFailGoto(E_FAIL, lFail);
+ }
+
+ // Make sure it's not an unexpected generic case (not clear we can actually get here in practice due
+ // to the above Implements check)
+ if (pDefaultMT->HasInstantiation() && !pDefaultMT->SupportsGenericInterop(TypeHandle::Interop_NativeToManaged))
+ {
+ m_resID = IDS_EE_BADMARSHAL_GENERICS_RESTRICTION;
+ IfFailGoto(E_FAIL, lFail);
+ }
+
+ // Store the marshal data just as if we were marshaling as this default interface type
+ m_type = MARSHAL_TYPE_INTERFACE;
+ m_pDefaultItfMT = pDefaultMT;
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ bool builder = false;
+ if (sig.IsStringTypeThrowing(pModule, pTypeContext)
+ || ((builder = true), 0)
+ || sig.IsClassThrowing(pModule, g_StringBufferClassName, pTypeContext)
+ )
+ {
+ switch ( nativeType )
+ {
+ case NATIVE_TYPE_LPWSTR:
+ m_type = builder ? MARSHAL_TYPE_LPWSTR_BUFFER : MARSHAL_TYPE_LPWSTR;
+ break;
+
+ case NATIVE_TYPE_LPSTR:
+ m_type = builder ? MARSHAL_TYPE_LPSTR_BUFFER : MARSHAL_TYPE_LPSTR;
+ break;
+
+ case NATIVE_TYPE_LPTSTR:
+ {
+#ifdef FEATURE_COMINTEROP
+ if (m_ms != MARSHAL_SCENARIO_NDIRECT)
+ {
+ _ASSERTE(m_ms == MARSHAL_SCENARIO_COMINTEROP);
+ // We disallow NATIVE_TYPE_LPTSTR for COM.
+ IfFailGoto(E_FAIL, lFail);
+ }
+#endif // FEATURE_COMINTEROP
+ // We no longer support Win9x so LPTSTR always maps to a Unicode string.
+ m_type = builder ? MARSHAL_TYPE_LPWSTR_BUFFER : MARSHAL_TYPE_LPWSTR;
+ break;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ case NATIVE_TYPE_BSTR:
+ if (builder)
+ {
+ m_resID = IDS_EE_BADMARSHALPARAM_STRINGBUILDER;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = MARSHAL_TYPE_BSTR;
+ break;
+
+ case NATIVE_TYPE_ANSIBSTR:
+ if (builder)
+ {
+ m_resID = IDS_EE_BADMARSHALPARAM_STRINGBUILDER;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = MARSHAL_TYPE_ANSIBSTR;
+ break;
+
+ case NATIVE_TYPE_TBSTR:
+ {
+ if (builder)
+ {
+ m_resID = IDS_EE_BADMARSHALPARAM_STRINGBUILDER;
+ IfFailGoto(E_FAIL, lFail);
+ }
+
+ // We no longer support Win9x so TBSTR always maps to a normal (unicode) BSTR.
+ m_type = MARSHAL_TYPE_BSTR;
+ break;
+ }
+
+ case NATIVE_TYPE_BYVALSTR:
+ {
+ if (builder)
+ {
+ m_resID = IDS_EE_BADMARSHALPARAM_STRINGBUILDER;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = m_fAnsi ? MARSHAL_TYPE_VBBYVALSTR : MARSHAL_TYPE_VBBYVALSTRW;
+ break;
+ }
+
+ case NATIVE_TYPE_HSTRING:
+ {
+ if (builder)
+ {
+ m_resID = IDS_EE_BADMARSHALPARAM_STRINGBUILDER;
+ IfFailGoto(E_FAIL, lFail);
+ }
+
+ m_type = MARSHAL_TYPE_HSTRING;
+ break;
+ }
+#endif // FEATURE_COMINTEROP
+
+ case NATIVE_TYPE_DEFAULT:
+ {
+#ifdef FEATURE_COMINTEROP
+ if (m_ms == MARSHAL_SCENARIO_WINRT)
+ {
+ if (builder)
+ {
+ m_resID = IDS_EE_BADMARSHALPARAM_STRINGBUILDER;
+ IfFailGoto(E_FAIL, lFail);
+ }
+
+ m_type = MARSHAL_TYPE_HSTRING;
+ }
+ else if (m_ms != MARSHAL_SCENARIO_NDIRECT)
+ {
+ _ASSERTE(m_ms == MARSHAL_SCENARIO_COMINTEROP);
+ m_type = builder ? MARSHAL_TYPE_LPWSTR_BUFFER : MARSHAL_TYPE_BSTR;
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ if (m_fAnsi)
+ {
+ m_type = builder ? MARSHAL_TYPE_LPSTR_BUFFER : MARSHAL_TYPE_LPSTR;
+ }
+ else
+ {
+ m_type = builder ? MARSHAL_TYPE_LPWSTR_BUFFER : MARSHAL_TYPE_LPWSTR;
+ }
+ break;
+ }
+
+ default:
+ m_resID = builder ? IDS_EE_BADMARSHALPARAM_STRINGBUILDER : IDS_EE_BADMARSHALPARAM_STRING;
+ IfFailGoto(E_FAIL, lFail);
+ break;
+ }
+ }
+#ifdef FEATURE_COMINTEROP
+ else if (sig.IsClassThrowing(pModule, g_CollectionsEnumeratorClassName, pTypeContext) &&
+ nativeType == NATIVE_TYPE_DEFAULT)
+ {
+ m_CMVt = VT_UNKNOWN;
+ m_type = MARSHAL_TYPE_REFERENCECUSTOMMARSHALER;
+
+ if (fLoadCustomMarshal)
+ {
+ if (!fEmitsIL)
+ {
+ m_pCMHelper = SetupCustomMarshalerHelper(ENUMERATOR_TO_ENUM_VARIANT_CM_NAME,
+ ENUMERATOR_TO_ENUM_VARIANT_CM_NAME_LEN,
+ ENUMERATOR_TO_ENUM_VARIANT_CM_COOKIE,
+ ENUMERATOR_TO_ENUM_VARIANT_CM_COOKIE_LEN,
+ pAssembly, sigTH);
+ }
+ else
+ {
+ m_pCMHelper = NULL;
+ MethodDesc* pMDforModule = pMD;
+ if (pMD->IsILStub())
+ {
+ pMDforModule = pMD->AsDynamicMethodDesc()->GetILStubResolver()->GetStubTargetMethodDesc();
+ }
+ m_args.rcm.m_pMD = pMDforModule;
+ m_args.rcm.m_paramToken = token;
+ m_args.rcm.m_hndManagedType = sigTH.AsPtr();
+ CONSISTENCY_CHECK(pModule == pMDforModule->GetModule());
+ }
+ }
+ }
+#endif // FEATURE_COMINTEROP
+ else if (sigTH.CanCastTo(TypeHandle(MscorlibBinder::GetClass(CLASS__SAFE_HANDLE))))
+ {
+ if (nativeType != NATIVE_TYPE_DEFAULT)
+ {
+ m_resID = IDS_EE_BADMARSHAL_SAFEHANDLE;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_args.m_pMT = m_pMT;
+ m_type = MARSHAL_TYPE_SAFEHANDLE;
+ }
+ else if (sigTH.CanCastTo(TypeHandle(MscorlibBinder::GetClass(CLASS__CRITICAL_HANDLE))))
+ {
+ if (nativeType != NATIVE_TYPE_DEFAULT)
+ {
+ m_resID = IDS_EE_BADMARSHAL_CRITICALHANDLE;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_args.m_pMT = m_pMT;
+ m_type = MARSHAL_TYPE_CRITICALHANDLE;
+ }
+ else if (sig.IsClassThrowing(pModule, g_ReflectionMethodInterfaceName, pTypeContext))
+ {
+ if (nativeType != NATIVE_TYPE_DEFAULT)
+ {
+ IfFailGoto(E_FAIL, lFail);
+ }
+
+ m_type = MARSHAL_TYPE_RUNTIMEMETHODINFO;
+ }
+#ifdef FEATURE_COMINTEROP
+ else if (m_pMT->IsInterface())
+ {
+ if (!(nativeType == NATIVE_TYPE_DEFAULT ||
+ nativeType == NATIVE_TYPE_INTF))
+ {
+ m_resID = IDS_EE_BADMARSHAL_INTERFACE;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = MARSHAL_TYPE_INTERFACE;
+
+ if (m_ms == MARSHAL_SCENARIO_WINRT)
+ {
+ // all interfaces marshaled in WinRT scenarios are IInspectable-based
+ m_fInspItf = TRUE;
+ }
+ }
+ // Check for Windows.Foundation.HResult <-> Exception
+ else if (m_ms == MARSHAL_SCENARIO_WINRT && MscorlibBinder::IsClass(m_pMT, CLASS__EXCEPTION))
+ {
+ m_args.m_pMT = m_pMT;
+ m_type = MARSHAL_TYPE_EXCEPTION;
+ }
+#endif // FEATURE_COMINTEROP
+ else if (COMDelegate::IsDelegate(m_pMT))
+ {
+ m_args.m_pMT = m_pMT;
+#ifdef FEATURE_COMINTEROP
+ if (m_ms == MARSHAL_SCENARIO_WINRT)
+ {
+ // Delegates must be imported from WinRT and marshaled as Interface
+ if (!m_pMT->IsProjectedFromWinRT() && !WinRTTypeNameConverter::IsRedirectedType(m_pMT))
+ {
+ m_resID = IDS_EE_BADMARSHAL_WINRT_DELEGATE;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ }
+#endif // FEATURE_COMINTEROP
+
+ switch (nativeType)
+ {
+ case NATIVE_TYPE_FUNC:
+ m_type = MARSHAL_TYPE_DELEGATE;
+ break;
+
+ case NATIVE_TYPE_DEFAULT:
+#ifdef FEATURE_COMINTEROP
+ if (m_ms != MARSHAL_SCENARIO_NDIRECT)
+ {
+ _ASSERTE(m_ms == MARSHAL_SCENARIO_COMINTEROP || m_ms == MARSHAL_SCENARIO_WINRT);
+ m_type = MARSHAL_TYPE_INTERFACE;
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ m_type = MARSHAL_TYPE_DELEGATE;
+
+ break;
+
+ default:
+ m_resID = IDS_EE_BADMARSHAL_DELEGATE;
+ IfFailGoto(E_FAIL, lFail);
+ break;
+ }
+ }
+ else if (m_pMT->IsBlittable())
+ {
+ if (!(nativeType == NATIVE_TYPE_DEFAULT || nativeType == NATIVE_TYPE_LPSTRUCT))
+ {
+ m_resID = IDS_EE_BADMARSHAL_CLASS;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = MARSHAL_TYPE_BLITTABLEPTR;
+ m_args.m_pMT = m_pMT;
+ }
+ else if (m_pMT->HasLayout())
+ {
+ if (!(nativeType == NATIVE_TYPE_DEFAULT || nativeType == NATIVE_TYPE_LPSTRUCT))
+ {
+ m_resID = IDS_EE_BADMARSHAL_CLASS;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = MARSHAL_TYPE_LAYOUTCLASSPTR;
+ m_args.m_pMT = m_pMT;
+ }
+ else if (sig.IsClassThrowing(pModule, g_ReflectionModuleName, pTypeContext))
+ {
+ if (nativeType != NATIVE_TYPE_DEFAULT)
+ {
+ IfFailGoto(E_FAIL, lFail);
+ }
+
+ m_type = MARSHAL_TYPE_RUNTIMEMODULE;
+ }
+ else if (sig.IsClassThrowing(pModule, g_ReflectionAssemblyName, pTypeContext))
+ {
+ if (nativeType != NATIVE_TYPE_DEFAULT)
+ {
+ IfFailGoto(E_FAIL, lFail);
+ }
+
+ m_type = MARSHAL_TYPE_RUNTIMEASSEMBLY;
+ }
+#ifdef FEATURE_COMINTEROP
+ else if (m_ms == MARSHAL_SCENARIO_WINRT && sig.IsClassThrowing(pModule, g_SystemUriClassName, pTypeContext))
+ {
+ m_type = MARSHAL_TYPE_URI;
+ }
+ else if (m_ms == MARSHAL_SCENARIO_WINRT && sig.IsClassThrowing(pModule, g_NotifyCollectionChangedEventArgsName, pTypeContext))
+ {
+ m_type = MARSHAL_TYPE_NCCEVENTARGS;
+ }
+ else if (m_ms == MARSHAL_SCENARIO_WINRT && sig.IsClassThrowing(pModule, g_PropertyChangedEventArgsName, pTypeContext))
+ {
+ m_type = MARSHAL_TYPE_PCEVENTARGS;
+ }
+#endif // FEATURE_COMINTEROP
+ else if (m_pMT->IsObjectClass())
+ {
+ switch(nativeType)
+ {
+#ifdef FEATURE_COMINTEROP
+ case NATIVE_TYPE_DEFAULT:
+ if (ms == MARSHAL_SCENARIO_WINRT)
+ {
+ m_fInspItf = TRUE;
+ m_type = MARSHAL_TYPE_INTERFACE;
+ break;
+ }
+ // fall through
+ case NATIVE_TYPE_STRUCT:
+ m_type = MARSHAL_TYPE_OBJECT;
+ break;
+
+ case NATIVE_TYPE_INTF:
+ case NATIVE_TYPE_IUNKNOWN:
+ m_type = MARSHAL_TYPE_INTERFACE;
+ break;
+
+ case NATIVE_TYPE_IDISPATCH:
+ m_fDispItf = TRUE;
+ m_type = MARSHAL_TYPE_INTERFACE;
+ break;
+
+ case NATIVE_TYPE_IINSPECTABLE:
+ m_fInspItf = TRUE;
+ m_type = MARSHAL_TYPE_INTERFACE;
+ break;
+#else
+ case NATIVE_TYPE_DEFAULT:
+ case NATIVE_TYPE_STRUCT:
+ m_resID = IDS_EE_OBJECT_TO_VARIANT_NOT_SUPPORTED;
+ IfFailGoto(E_FAIL, lFail);
+
+ case NATIVE_TYPE_INTF:
+ case NATIVE_TYPE_IUNKNOWN:
+ case NATIVE_TYPE_IDISPATCH:
+ m_resID = IDS_EE_OBJECT_TO_ITF_NOT_SUPPORTED;
+ IfFailGoto(E_FAIL, lFail);
+#endif // FEATURE_COMINTEROP
+
+ case NATIVE_TYPE_ASANY:
+ m_type = m_fAnsi ? MARSHAL_TYPE_ASANYA : MARSHAL_TYPE_ASANYW;
+ break;
+
+ default:
+ m_resID = IDS_EE_BADMARSHAL_OBJECT;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ }
+
+#ifdef FEATURE_COMINTEROP
+ else if (sig.IsClassThrowing(pModule, g_ArrayClassName, pTypeContext))
+ {
+ if (IsWinRTScenario())
+ {
+ m_resID = IDS_EE_BADMARSHAL_WINRT_ILLEGAL_TYPE;
+ IfFailGoto(E_FAIL, lFail);
+ }
+
+ switch(nativeType)
+ {
+ case NATIVE_TYPE_DEFAULT:
+ case NATIVE_TYPE_INTF:
+ m_type = MARSHAL_TYPE_INTERFACE;
+ break;
+
+ case NATIVE_TYPE_SAFEARRAY:
+ {
+ TypeHandle thElement = TypeHandle(g_pObjectClass);
+
+ if (ParamInfo.m_SafeArrayElementVT != VT_EMPTY)
+ {
+ if (ParamInfo.m_cSafeArrayUserDefTypeNameBytes > 0)
+ {
+ // Load the type. Use an SString for the string since we need to NULL terminate the string
+ // that comes from the metadata.
+ StackScratchBuffer utf8Name;
+ SString safeArrayUserDefTypeName(SString::Utf8, ParamInfo.m_strSafeArrayUserDefTypeName, ParamInfo.m_cSafeArrayUserDefTypeNameBytes);
+ thElement = TypeName::GetTypeUsingCASearchRules(safeArrayUserDefTypeName.GetUTF8(utf8Name), pAssembly);
+ }
+ }
+ else
+ {
+ // Compat: If no safe array VT was specified, default to VT_VARIANT.
+ ParamInfo.m_SafeArrayElementVT = VT_VARIANT;
+ }
+
+ IfFailGoto(HandleArrayElemType(&ParamInfo, 0, thElement, -1, FALSE, isParam, pAssembly), lFail);
+ break;
+ }
+
+ default:
+ m_resID = IDS_EE_BADMARSHAL_SYSARRAY;
+ IfFailGoto(E_FAIL, lFail);
+
+ }
+ }
+
+ else if (m_pMT->IsArray())
+ {
+ _ASSERTE(!"This invalid signature should never be hit!");
+ IfFailGoto(E_FAIL, lFail);
+ }
+ else if ((m_ms == MARSHAL_SCENARIO_WINRT) && sig.IsClassThrowing(pModule, g_TypeClassName, pTypeContext))
+ {
+ m_type = MARSHAL_TYPE_SYSTEMTYPE;
+ }
+#endif // FEATURE_COMINTEROP
+ else if (!m_pMT->IsValueType())
+ {
+#ifdef FEATURE_COMINTEROP
+ if (IsWinRTScenario() && !m_pMT->IsLegalNonArrayWinRTType())
+ {
+ m_resID = IDS_EE_BADMARSHAL_WINRT_ILLEGAL_TYPE;
+ IfFailGoto(E_FAIL, lFail);
+ }
+#endif // FEATURE_COMINTEROP
+
+ if (!(nativeType == NATIVE_TYPE_INTF || nativeType == NATIVE_TYPE_DEFAULT))
+ {
+ m_resID = IDS_EE_BADMARSHAL_NOLAYOUT;
+ IfFailGoto(E_FAIL, lFail);
+ }
+#ifdef FEATURE_COMINTEROP
+ // default marshalling is interface
+ m_type = MARSHAL_TYPE_INTERFACE;
+#else // FEATURE_COMINTEROP
+ m_resID = IDS_EE_OBJECT_TO_ITF_NOT_SUPPORTED;
+ IfFailGoto(E_FAIL, lFail);
+#endif // FEATURE_COMINTEROP
+ }
+
+ else
+ {
+ _ASSERTE(m_pMT->IsValueType());
+ goto lValueClass;
+ }
+ }
+ break;
+ }
+
+
+ case ELEMENT_TYPE_VALUETYPE:
+ lValueClass:
+ {
+ if (sig.IsClassThrowing(pModule, g_DecimalClassName, pTypeContext))
+ {
+ switch (nativeType)
+ {
+ case NATIVE_TYPE_DEFAULT:
+ case NATIVE_TYPE_STRUCT:
+ m_type = MARSHAL_TYPE_DECIMAL;
+ break;
+
+ case NATIVE_TYPE_LPSTRUCT:
+ m_type = MARSHAL_TYPE_DECIMAL_PTR;
+ break;
+
+ case NATIVE_TYPE_CURRENCY:
+ m_type = MARSHAL_TYPE_CURRENCY;
+ break;
+
+ default:
+ m_resID = IDS_EE_BADMARSHALPARAM_DECIMAL;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ }
+ else if (sig.IsClassThrowing(pModule, g_GuidClassName, pTypeContext))
+ {
+ switch (nativeType)
+ {
+ case NATIVE_TYPE_DEFAULT:
+ case NATIVE_TYPE_STRUCT:
+ m_type = MARSHAL_TYPE_GUID;
+ break;
+
+ case NATIVE_TYPE_LPSTRUCT:
+ m_type = MARSHAL_TYPE_GUID_PTR;
+ break;
+
+ default:
+ m_resID = IDS_EE_BADMARSHAL_GUID;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ }
+#ifdef FEATURE_COMINTEROP
+ else if (sig.IsClassThrowing(pModule, g_DateTimeOffsetClassName, pTypeContext))
+ {
+ if (!(nativeType == NATIVE_TYPE_DEFAULT || nativeType == NATIVE_TYPE_STRUCT))
+ {
+ m_resID = IDS_EE_BADMARSHAL_DATETIMEOFFSET;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = MARSHAL_TYPE_DATETIME;
+ m_pMT = MscorlibBinder::GetClass(CLASS__DATE_TIME_OFFSET);
+ }
+#endif // FEATURE_COMINTEROP
+ else if (sig.IsClassThrowing(pModule, g_DateClassName, pTypeContext))
+ {
+ if (!(nativeType == NATIVE_TYPE_DEFAULT || nativeType == NATIVE_TYPE_STRUCT))
+ {
+ m_resID = IDS_EE_BADMARSHAL_DATETIME;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = MARSHAL_TYPE_DATE;
+ }
+ else if (sig.IsClassThrowing(pModule, "System.Runtime.InteropServices.ArrayWithOffset", pTypeContext))
+ {
+ if (!(nativeType == NATIVE_TYPE_DEFAULT))
+ {
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = MARSHAL_TYPE_ARRAYWITHOFFSET;
+ }
+ else if (sig.IsClassThrowing(pModule, "System.Runtime.InteropServices.HandleRef", pTypeContext))
+ {
+ if (!(nativeType == NATIVE_TYPE_DEFAULT))
+ {
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = MARSHAL_TYPE_HANDLEREF;
+ }
+ else if (sig.IsClassThrowing(pModule, "System.ArgIterator", pTypeContext))
+ {
+ if (!(nativeType == NATIVE_TYPE_DEFAULT))
+ {
+ IfFailGoto(E_FAIL, lFail);
+ }
+ m_type = MARSHAL_TYPE_ARGITERATOR;
+ }
+#ifdef FEATURE_COMINTEROP
+ else if (sig.IsClassThrowing(pModule, g_ColorClassName, pTypeContext))
+ {
+ if (!(nativeType == NATIVE_TYPE_DEFAULT))
+ {
+ IfFailGoto(E_FAIL, lFail);
+ }
+
+ // This is only supported for COM interop.
+ if (m_ms != MARSHAL_SCENARIO_COMINTEROP)
+ {
+ IfFailGoto(E_FAIL, lFail);
+ }
+
+ m_type = MARSHAL_TYPE_OLECOLOR;
+ }
+#endif // FEATURE_COMINTEROP
+ else if (sig.IsClassThrowing(pModule, g_RuntimeTypeHandleClassName, pTypeContext))
+ {
+ if (nativeType != NATIVE_TYPE_DEFAULT)
+ {
+ IfFailGoto(E_FAIL, lFail);
+ }
+
+ m_type = MARSHAL_TYPE_RUNTIMETYPEHANDLE;
+ }
+ else if (sig.IsClassThrowing(pModule, g_RuntimeFieldHandleClassName, pTypeContext))
+ {
+ if (nativeType != NATIVE_TYPE_DEFAULT)
+ {
+ IfFailGoto(E_FAIL, lFail);
+ }
+
+ m_type = MARSHAL_TYPE_RUNTIMEFIELDHANDLE;
+ }
+ else if (sig.IsClassThrowing(pModule, g_RuntimeMethodHandleClassName, pTypeContext))
+ {
+ if (nativeType != NATIVE_TYPE_DEFAULT)
+ {
+ IfFailGoto(E_FAIL, lFail);
+ }
+
+ m_type = MARSHAL_TYPE_RUNTIMEMETHODHANDLE;
+ }
+ else
+ {
+ m_pMT = sig.GetTypeHandleThrowing(pModule, pTypeContext).GetMethodTable();
+ if (m_pMT == NULL)
+ break;
+
+#ifdef FEATURE_COMINTEROP
+ // Handle Nullable<T> and KeyValuePair<K, V> for WinRT
+ if (m_ms == MARSHAL_SCENARIO_WINRT)
+ {
+ if (m_pMT->HasSameTypeDefAs(g_pNullableClass))
+ {
+ m_type = MARSHAL_TYPE_NULLABLE;
+ m_args.m_pMT = m_pMT;
+ break;
+ }
+
+ if (m_pMT->HasSameTypeDefAs(MscorlibBinder::GetClass(CLASS__KEYVALUEPAIRGENERIC)))
+ {
+ m_type = MARSHAL_TYPE_KEYVALUEPAIR;
+ m_args.m_pMT = m_pMT;
+ break;
+ }
+
+ if (!m_pMT->IsLegalNonArrayWinRTType())
+ {
+ m_resID = IDS_EE_BADMARSHAL_WINRT_ILLEGAL_TYPE;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ }
+#endif // FEATURE_COMINTEROP
+
+ if (m_pMT->HasInstantiation())
+ {
+ m_resID = IDS_EE_BADMARSHAL_GENERICS_RESTRICTION;
+ IfFailGoto(E_FAIL, lFail);
+ }
+
+ UINT managedSize = m_pMT->GetAlignedNumInstanceFieldBytes();
+ UINT nativeSize = m_pMT->GetNativeSize();
+
+ if ( nativeSize > 0xfff0 ||
+ managedSize > 0xfff0)
+ {
+ m_resID = IDS_EE_STRUCTTOOCOMPLEX;
+ IfFailGoto(E_FAIL, lFail);
+ }
+
+ if (m_pMT->IsBlittable())
+ {
+ if (!(nativeType == NATIVE_TYPE_DEFAULT || nativeType == NATIVE_TYPE_STRUCT))
+ {
+ m_resID = IDS_EE_BADMARSHAL_VALUETYPE;
+ IfFailGoto(E_FAIL, lFail);
+ }
+
+ if (m_byref && !isParam)
+ {
+ // Override the prohibition on byref returns so that IJW works
+ m_byref = FALSE;
+ m_type = ((sizeof(void*) == 4) ? MARSHAL_TYPE_GENERIC_4 : MARSHAL_TYPE_GENERIC_8);
+ }
+ else
+ {
+#ifndef FEATURE_CORECLR
+ if (fNeedsCopyCtor)
+ {
+ if (m_ms == MARSHAL_SCENARIO_WINRT)
+ {
+ // our WinRT-optimized GetCOMIPFromRCW helpers don't support copy
+ // constructor stubs so make sure that this marshaler will not be used
+ m_resID = IDS_EE_BADMARSHAL_WINRT_COPYCTOR;
+ IfFailGoto(E_FAIL, lFail);
+ }
+
+ MethodDesc *pCopyCtor;
+ MethodDesc *pDtor;
+ FindCopyCtor(pModule, m_pMT, &pCopyCtor);
+ FindDtor(pModule, m_pMT, &pDtor);
+
+ m_args.mm.m_pMT = m_pMT;
+ m_args.mm.m_pCopyCtor = pCopyCtor;
+ m_args.mm.m_pDtor = pDtor;
+ m_type = MARSHAL_TYPE_BLITTABLEVALUECLASSWITHCOPYCTOR;
+ }
+ else
+#endif // !FEATURE_CORECLR
+#ifdef _TARGET_X86_
+ // JIT64 is not aware of normalized value types and this optimization
+ // (returning small value types by value in registers) is already done in JIT64.
+ if ( !m_byref // Permit register-sized structs as return values
+ && !isParam
+ && CorIsPrimitiveType(m_pMT->GetInternalCorElementType())
+ && !IsUnmanagedValueTypeReturnedByRef(nativeSize)
+ && managedSize <= sizeof(void*)
+ && nativeSize <= sizeof(void*))
+ {
+ m_type = MARSHAL_TYPE_GENERIC_4;
+ m_args.m_pMT = m_pMT;
+ }
+ else
+#endif // _TARGET_X86_
+ {
+ m_args.m_pMT = m_pMT;
+ m_type = MARSHAL_TYPE_BLITTABLEVALUECLASS;
+ }
+ }
+ }
+ else if (m_pMT->HasLayout())
+ {
+ if (!(nativeType == NATIVE_TYPE_DEFAULT || nativeType == NATIVE_TYPE_STRUCT))
+ {
+ m_resID = IDS_EE_BADMARSHAL_VALUETYPE;
+ IfFailGoto(E_FAIL, lFail);
+ }
+
+ m_args.m_pMT = m_pMT;
+ m_type = MARSHAL_TYPE_VALUECLASS;
+ }
+ }
+ break;
+ }
+
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_ARRAY:
+ {
+ // Get class info from array.
+ TypeHandle arrayTypeHnd = sig.GetTypeHandleThrowing(pModule, pTypeContext);
+ _ASSERTE(!arrayTypeHnd.IsNull());
+
+ ArrayTypeDesc* asArray = arrayTypeHnd.AsArray();
+ if (asArray == NULL)
+ IfFailGoto(E_FAIL, lFail);
+
+ TypeHandle thElement = asArray->GetTypeParam();
+
+#ifdef FEATURE_COMINTEROP
+ if (m_ms != MARSHAL_SCENARIO_WINRT)
+#endif // FEATURE_COMINTEROP
+ {
+ if (thElement.HasInstantiation())
+ {
+ m_resID = IDS_EE_BADMARSHAL_GENERICS_RESTRICTION;
+ IfFailGoto(E_FAIL, lFail);
+ }
+ }
+
+ unsigned ofs = 0;
+ if (arrayTypeHnd.GetMethodTable())
+ {
+ ofs = ArrayBase::GetDataPtrOffset(arrayTypeHnd.GetMethodTable());
+ if (ofs > 0xffff)
+ {
+ ofs = 0; // can't represent it, so pass on magic value (which causes fallback to regular ML code)
+ }
+ }
+
+ // Handle retrieving the information for the array type.
+ IfFailGoto(HandleArrayElemType(&ParamInfo, (UINT16)ofs, thElement, asArray->GetRank(), mtype == ELEMENT_TYPE_SZARRAY, isParam, pAssembly), lFail);
+ break;
+ }
+
+ default:
+ m_resID = IDS_EE_BADMARSHAL_BADMANAGED;
+ }
+
+lExit:
+#ifdef FEATURE_COMINTEROP
+#ifdef FEATURE_CORECLR
+//Field scenario is not blocked here because we don’t want to block loading structs that
+//have the types which we are blocking, but never pass it to Interop.
+
+ if (AppX::IsAppXProcess() && ms != MarshalInfo::MARSHAL_SCENARIO_FIELD)
+ {
+ bool set_error = false;
+ switch (m_type)
+ {
+ case MARSHAL_TYPE_ANSIBSTR:
+ m_resID = IDS_EE_BADMARSHAL_TYPE_ANSIBSTR;
+ set_error = true;
+ break;
+ case MARSHAL_TYPE_VBBYVALSTR:
+ case MARSHAL_TYPE_VBBYVALSTRW:
+ m_resID = IDS_EE_BADMARSHAL_TYPE_VBBYVALSTR;
+ set_error = true;
+ break;
+ case MARSHAL_TYPE_REFERENCECUSTOMMARSHALER:
+ m_resID = IDS_EE_BADMARSHAL_TYPE_REFERENCECUSTOMMARSHALER;
+ set_error = true;
+ break;
+ case MARSHAL_TYPE_ASANYA:
+ case MARSHAL_TYPE_ASANYW:
+ m_resID = IDS_EE_BADMARSHAL_TYPE_ASANYA;
+ set_error = true;
+ break;
+ case MARSHAL_TYPE_INTERFACE:
+ if (m_fDispItf)
+ {
+ m_resID = IDS_EE_BADMARSHAL_TYPE_IDISPATCH;
+ set_error = true;
+ }
+ break;
+ }
+
+ if (set_error)
+ COMPlusThrow(kPlatformNotSupportedException, m_resID);
+
+ }
+#endif // FEATURE_CORECLR
+
+ if (IsWinRTScenario() && !IsSupportedForWinRT(m_type))
+ {
+ // the marshaler we came up with is not supported in WinRT scenarios
+ m_type = MARSHAL_TYPE_UNKNOWN;
+ m_resID = IDS_EE_BADMARSHAL_WINRT_ILLEGAL_TYPE;
+ goto lReallyExit;
+ }
+#endif // FEATURE_COMINTEROP
+
+ if (m_byref && !isParam)
+ {
+ // byref returns don't work: the thing pointed to lives on
+ // a stack that disappears!
+ m_type = MARSHAL_TYPE_UNKNOWN;
+ goto lReallyExit;
+ }
+
+ //---------------------------------------------------------------------
+ // Now, figure out the IN/OUT status.
+ // Also set the m_fOleVarArgCandidate here to save perf of invoking Metadata API
+ //---------------------------------------------------------------------
+ m_fOleVarArgCandidate = FALSE;
+ if (m_type != MARSHAL_TYPE_UNKNOWN && IsInOnly(m_type) && !m_byref)
+ {
+ // If we got here, the parameter is something like an "int" where
+ // [in] is the only semantically valid choice. Since there is no
+ // possible way to interpret an [out] for such a type, we will ignore
+ // the metadata and force the bits to "in". We could have defined
+ // it as an error instead but this is less likely to cause problems
+ // with metadata autogenerated from typelibs and poorly
+ // defined C headers.
+ //
+ m_in = TRUE;
+ m_out = FALSE;
+ }
+ else
+ {
+
+ // Capture and save away "In/Out" bits. If none is present, set both to FALSE (they will be properly defaulted downstream)
+ if (token == mdParamDefNil)
+ {
+ m_in = FALSE;
+ m_out = FALSE;
+ }
+ else if (TypeFromToken(token) != mdtParamDef)
+ {
+ _ASSERTE(TypeFromToken(token) == mdtFieldDef);
+
+ // Field setters are always In, the flags are ignored for return values of getters
+ m_in = TRUE;
+ m_out = FALSE;
+ }
+ else
+ {
+ IMDInternalImport *pInternalImport = pModule->GetMDImport();
+ USHORT usSequence;
+ DWORD dwAttr;
+ LPCSTR szParamName_Ignore;
+
+ if (FAILED(pInternalImport->GetParamDefProps(token, &usSequence, &dwAttr, &szParamName_Ignore)))
+ {
+ m_in = FALSE;
+ m_out = FALSE;
+ }
+ else
+ {
+ m_in = IsPdIn(dwAttr) != 0;
+ m_out = IsPdOut(dwAttr) != 0;
+#ifdef FEATURE_COMINTEROP
+ // set m_fOleVarArgCandidate. The rule is same as the one defined in vm\tlbexp.cpp
+ if(paramidx == numArgs && // arg is the last arg of the method
+ !(dwAttr & PARAMFLAG_FOPT) && // arg is not a optional arg
+ !IsNilToken(token) && // token is not a nil token
+ (m_type == MARSHAL_TYPE_SAFEARRAY) && // arg is marshaled as SafeArray
+ (m_arrayElementType == VT_VARIANT)) // the element of the safearray is VARIANT
+ {
+ // check if it has default value
+ MDDefaultValue defaultValue;
+ if (SUCCEEDED(pInternalImport->GetDefaultValue(token, &defaultValue)) && defaultValue.m_bType == ELEMENT_TYPE_VOID)
+ {
+ // check if it has params attribute
+ if (pInternalImport->GetCustomAttributeByName(token, INTEROP_PARAMARRAY_TYPE, 0,0) == S_OK)
+ m_fOleVarArgCandidate = TRUE;
+ }
+ }
+#endif
+ }
+ }
+
+ // If neither IN nor OUT are true, this signals the URT to use the default
+ // rules.
+ if (!m_in && !m_out)
+ {
+ if (m_byref ||
+ (mtype == ELEMENT_TYPE_CLASS
+ && !(sig.IsStringType(pModule, pTypeContext))
+ && sig.IsClass(pModule, g_StringBufferClassName, pTypeContext)))
+ {
+ m_in = TRUE;
+ m_out = TRUE;
+ }
+ else
+ {
+ m_in = TRUE;
+ m_out = FALSE;
+ }
+
+ }
+ }
+
+lReallyExit:
+
+#ifdef _DEBUG
+ DumpMarshalInfo(pModule, sig, pTypeContext, token, ms, nlType, nlFlags);
+#endif
+ return;
+
+
+ lFail:
+ // We got here because of an illegal ELEMENT_TYPE/NATIVE_TYPE combo.
+ m_type = MARSHAL_TYPE_UNKNOWN;
+ //_ASSERTE(!"Invalid ELEMENT_TYPE/NATIVE_TYPE combination");
+ goto lExit;
+}
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+VOID MarshalInfo::EmitOrThrowInteropParamException(NDirectStubLinker* psl, BOOL fMngToNative, UINT resID, UINT paramIdx)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_COMINTEROP
+ // If this is not forward COM interop, throw the exception right away. We rely on this
+ // for example in code:ComPreStubWorker when we fire the InvalidMemberDeclaration MDA.
+ if ((m_ms == MARSHAL_SCENARIO_COMINTEROP || m_ms == MARSHAL_SCENARIO_WINRT) && fMngToNative)
+ {
+ psl->SetInteropParamExceptionInfo(resID, paramIdx);
+ return;
+ }
+#endif // FEATURE_COMINTEROP
+
+ ThrowInteropParamException(resID, paramIdx);
+}
+
+
+HRESULT MarshalInfo::HandleArrayElemType(NativeTypeParamInfo *pParamInfo, UINT16 optbaseoffset, TypeHandle thElement, int iRank, BOOL fNoLowerBounds, BOOL isParam, Assembly *pAssembly)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pParamInfo));
+ }
+ CONTRACTL_END;
+
+ ArrayMarshalInfo arrayMarshalInfo(amiRuntime);
+
+
+ //
+ // Store rank and bound information.
+ //
+
+ m_iArrayRank = iRank;
+ m_nolowerbounds = fNoLowerBounds;
+
+
+ //
+ // Determine which type of marshaler to use.
+ //
+
+#ifdef FEATURE_COMINTEROP
+ if (m_ms == MARSHAL_SCENARIO_WINRT)
+ {
+ m_type = MARSHAL_TYPE_HIDDENLENGTHARRAY;
+ }
+ else if (pParamInfo->m_NativeType == NATIVE_TYPE_SAFEARRAY)
+ {
+ m_type = MARSHAL_TYPE_SAFEARRAY;
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ if (pParamInfo->m_NativeType == NATIVE_TYPE_ARRAY)
+ {
+ m_type = MARSHAL_TYPE_NATIVEARRAY;
+ }
+ else if (pParamInfo->m_NativeType == NATIVE_TYPE_DEFAULT)
+ {
+#ifdef FEATURE_COMINTEROP
+ if (m_ms != MARSHAL_SCENARIO_NDIRECT)
+ {
+ m_type = MARSHAL_TYPE_SAFEARRAY;
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ m_type = MARSHAL_TYPE_NATIVEARRAY;
+ }
+ }
+ else
+ {
+ m_resID = IDS_EE_BADMARSHAL_ARRAY;
+ return E_FAIL;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (m_type == MARSHAL_TYPE_SAFEARRAY)
+ {
+ arrayMarshalInfo.InitForSafeArray(m_ms, thElement, pParamInfo->m_SafeArrayElementVT, m_fAnsi);
+ }
+ else if (m_type == MARSHAL_TYPE_HIDDENLENGTHARRAY)
+ {
+ arrayMarshalInfo.InitForHiddenLengthArray(thElement);
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ _ASSERTE(m_type == MARSHAL_TYPE_NATIVEARRAY);
+ arrayMarshalInfo.InitForNativeArray(m_ms, thElement, pParamInfo->m_ArrayElementType, m_fAnsi);
+ }
+
+ // Make sure the marshalling information is valid.
+ if (!arrayMarshalInfo.IsValid())
+ {
+ m_resID = arrayMarshalInfo.GetErrorResourceId();
+ return E_FAIL;
+ }
+
+ // Set the array type handle and VARTYPE to use for marshalling.
+ m_hndArrayElemType = arrayMarshalInfo.GetElementTypeHandle();
+ m_arrayElementType = arrayMarshalInfo.GetElementVT();
+
+ if (m_type == MARSHAL_TYPE_NATIVEARRAY)
+ {
+ // Retrieve the extra information associated with the native array marshaling.
+ m_args.na.m_vt = m_arrayElementType;
+ m_args.na.m_pMT = !m_hndArrayElemType.IsTypeDesc() ? m_hndArrayElemType.AsMethodTable() : NULL;
+ m_args.na.m_optionalbaseoffset = optbaseoffset;
+ m_countParamIdx = pParamInfo->m_CountParamIdx;
+ m_multiplier = pParamInfo->m_Multiplier;
+ m_additive = pParamInfo->m_Additive;
+ }
+#ifdef FEATURE_COMINTEROP
+ else if (m_type == MARSHAL_TYPE_HIDDENLENGTHARRAY)
+ {
+ m_args.na.m_optionalbaseoffset = optbaseoffset;
+
+ m_args.na.m_vt = m_arrayElementType;
+ m_args.na.m_pMT = m_hndArrayElemType.AsMethodTable();
+ m_args.na.m_cbElementSize = arrayMarshalInfo.GetElementSize();
+ m_args.na.m_redirectedTypeIndex = arrayMarshalInfo.GetRedirectedTypeIndex();
+ }
+#endif // FEATURE_COMINTEROP
+
+ return S_OK;
+}
+
+ILMarshaler* CreateILMarshaler(MarshalInfo::MarshalType mtype, NDirectStubLinker* psl)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ ILMarshaler* pMarshaler = NULL;
+ switch (mtype)
+ {
+
+#define DEFINE_MARSHALER_TYPE(mt, mclass, fWinRTSupported) \
+ case MarshalInfo::mt: \
+ pMarshaler = new IL##mclass(); \
+ break;
+#include "mtypes.h"
+#undef DEFINE_MARSHALER_TYPE
+
+ default:
+ UNREACHABLE_MSG("unexpected MarshalType passed to CreateILMarshaler");
+ }
+
+ pMarshaler->SetNDirectStubLinker(psl);
+ return pMarshaler;
+}
+
+
+
+DWORD CalculateArgumentMarshalFlags(BOOL byref, BOOL in, BOOL out, BOOL fMngToNative)
+{
+ LIMITED_METHOD_CONTRACT;
+ DWORD dwMarshalFlags = 0;
+
+ if (byref)
+ {
+ dwMarshalFlags |= MARSHAL_FLAG_BYREF;
+ }
+
+ if (in)
+ {
+ dwMarshalFlags |= MARSHAL_FLAG_IN;
+ }
+
+ if (out)
+ {
+ dwMarshalFlags |= MARSHAL_FLAG_OUT;
+ }
+
+ if (fMngToNative)
+ {
+ dwMarshalFlags |= MARSHAL_FLAG_CLR_TO_NATIVE;
+ }
+
+ return dwMarshalFlags;
+}
+
+DWORD CalculateReturnMarshalFlags(BOOL hrSwap, BOOL fMngToNative)
+{
+ LIMITED_METHOD_CONTRACT;
+ DWORD dwMarshalFlags = MARSHAL_FLAG_RETVAL;
+
+ if (hrSwap)
+ {
+ dwMarshalFlags |= MARSHAL_FLAG_HRESULT_SWAP;
+ }
+
+ if (fMngToNative)
+ {
+ dwMarshalFlags |= MARSHAL_FLAG_CLR_TO_NATIVE;
+ }
+
+ return dwMarshalFlags;
+}
+
+void MarshalInfo::GenerateArgumentIL(NDirectStubLinker* psl,
+ int argOffset, // the argument's index is m_paramidx + argOffset
+ UINT nativeStackOffset, // offset of the argument on the native stack
+ BOOL fMngToNative)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(psl));
+ }
+ CONTRACTL_END;
+
+ if (m_type == MARSHAL_TYPE_UNKNOWN)
+ {
+ EmitOrThrowInteropParamException(psl, fMngToNative, m_resID, m_paramidx + 1); // m_paramidx is 0-based, but the user wants to see a 1-based index
+ return;
+ }
+
+ // set up m_corArgSize and m_nativeArgSize
+ SetupArgumentSizes();
+
+ MarshalerOverrideStatus amostat;
+ UINT resID = IDS_EE_BADMARSHAL_RESTRICTION;
+ amostat = (GetArgumentOverrideProc(m_type)) (psl,
+ m_byref,
+ m_in,
+ m_out,
+ fMngToNative,
+ &m_args,
+ &resID,
+ m_paramidx + argOffset,
+ nativeStackOffset);
+
+
+ if (amostat == OVERRIDDEN)
+ {
+ return;
+ }
+
+ if (amostat == DISALLOWED)
+ {
+ EmitOrThrowInteropParamException(psl, fMngToNative, resID, m_paramidx + 1); // m_paramidx is 0-based, but the user wants to see a 1-based index
+ return;
+ }
+
+ CONSISTENCY_CHECK(amostat == HANDLEASNORMAL);
+
+ NewHolder<ILMarshaler> pMarshaler = CreateILMarshaler(m_type, psl);
+ DWORD dwMarshalFlags = CalculateArgumentMarshalFlags(m_byref, m_in, m_out, fMngToNative);
+
+ if (!pMarshaler->SupportsArgumentMarshal(dwMarshalFlags, &resID))
+ {
+ EmitOrThrowInteropParamException(psl, fMngToNative, resID, m_paramidx + 1); // m_paramidx is 0-based, but the user wants to see a 1-based index
+ return;
+ }
+
+ ILCodeStream* pcsMarshal = psl->GetMarshalCodeStream();
+ ILCodeStream* pcsUnmarshal = psl->GetUnmarshalCodeStream();
+ ILCodeStream* pcsDispatch = psl->GetDispatchCodeStream();
+
+ pcsMarshal->EmitNOP("// argument { ");
+ pcsUnmarshal->EmitNOP("// argument { ");
+
+ pMarshaler->EmitMarshalArgument(pcsMarshal, pcsUnmarshal, m_paramidx + argOffset, dwMarshalFlags, &m_args);
+
+ //
+ // Increment a counter so that when the finally clause
+ // is run, we only run the cleanup that is needed.
+ //
+ if (pMarshaler->NeedsMarshalCleanupIndex())
+ {
+ // we don't bother writing to the counter if marshaling does not need cleanup
+ psl->EmitSetArgMarshalIndex(pcsMarshal, NDirectStubLinker::CLEANUP_INDEX_ARG0_MARSHAL + m_paramidx + argOffset);
+ }
+ if (pMarshaler->NeedsUnmarshalCleanupIndex())
+ {
+ // we don't bother writing to the counter if unmarshaling does not need exception cleanup
+ psl->EmitSetArgMarshalIndex(pcsUnmarshal, NDirectStubLinker::CLEANUP_INDEX_ARG0_UNMARSHAL + m_paramidx + argOffset);
+ }
+
+ pcsMarshal->EmitNOP("// } argument");
+ pcsUnmarshal->EmitNOP("// } argument");
+
+ pMarshaler->EmitSetupArgument(pcsDispatch);
+ if (m_paramidx == 0)
+ {
+ CorCallingConvention callConv = psl->GetStubTargetCallingConv();
+ if ((callConv & IMAGE_CEE_CS_CALLCONV_MASK) == IMAGE_CEE_UNMANAGED_CALLCONV_THISCALL)
+ {
+ // Make sure the 'this' argument to thiscall is of native int type; JIT asserts this.
+ pcsDispatch->EmitCONV_I();
+ }
+ }
+}
+
+void MarshalInfo::GenerateReturnIL(NDirectStubLinker* psl,
+ int argOffset,
+ BOOL fMngToNative,
+ BOOL fieldGetter,
+ BOOL retval)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(psl));
+ }
+ CONTRACTL_END;
+
+ MarshalerOverrideStatus amostat;
+ UINT resID = IDS_EE_BADMARSHAL_RESTRICTION;
+
+ if (m_type == MARSHAL_TYPE_UNKNOWN)
+ {
+ amostat = HANDLEASNORMAL;
+ }
+ else
+ {
+ amostat = (GetReturnOverrideProc(m_type)) (psl,
+ fMngToNative,
+ retval,
+ &m_args,
+ &resID);
+ }
+
+ if (amostat == DISALLOWED)
+ {
+ EmitOrThrowInteropParamException(psl, fMngToNative, resID, 0);
+ return;
+ }
+
+ if (amostat == HANDLEASNORMAL)
+ {
+ // Historically we have always allowed reading fields that are marshaled as C arrays.
+ if (m_type == MARSHAL_TYPE_UNKNOWN || (!fieldGetter && m_type == MARSHAL_TYPE_NATIVEARRAY))
+ {
+ EmitOrThrowInteropParamException(psl, fMngToNative, m_resID, 0);
+ return;
+ }
+
+ NewHolder<ILMarshaler> pMarshaler = CreateILMarshaler(m_type, psl);
+ DWORD dwMarshalFlags = CalculateReturnMarshalFlags(retval, fMngToNative);
+
+ if (!pMarshaler->SupportsReturnMarshal(dwMarshalFlags, &resID))
+ {
+ EmitOrThrowInteropParamException(psl, fMngToNative, resID, 0);
+ return;
+ }
+
+ ILCodeStream* pcsMarshal = psl->GetMarshalCodeStream();
+ ILCodeStream* pcsUnmarshal = psl->GetReturnUnmarshalCodeStream();
+ ILCodeStream* pcsDispatch = psl->GetDispatchCodeStream();
+
+ pcsMarshal->EmitNOP("// return { ");
+ pcsUnmarshal->EmitNOP("// return { ");
+
+ UINT16 wNativeSize = GetNativeSize(m_type, m_ms);
+
+ // The following statement behaviour has existed for a long time. By aligning the size of the return
+ // value up to stack slot size, we prevent EmitMarshalReturnValue from distinguishing between, say, 3-byte
+ // structure and 4-byte structure. The former is supposed to be returned by-ref using a secret argument
+ // (at least in MSVC compiled code) while the latter is returned in EAX. We are keeping the behavior for
+ // now for backward compatibility.
+ X86_ONLY(wNativeSize = StackElemSize(wNativeSize));
+
+ pMarshaler->EmitMarshalReturnValue(pcsMarshal, pcsUnmarshal, pcsDispatch, m_paramidx + argOffset, wNativeSize, dwMarshalFlags, &m_args);
+
+ pcsMarshal->EmitNOP("// } return");
+ pcsUnmarshal->EmitNOP("// } return");
+
+ return;
+ }
+}
+
+void MarshalInfo::SetupArgumentSizes()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_byref)
+ {
+ m_managedArgSize = StackElemSize(sizeof(void*));
+ m_nativeArgSize = StackElemSize(sizeof(void*));
+ }
+ else
+ {
+ m_managedArgSize = StackElemSize(GetManagedSize(m_type, m_ms));
+ m_nativeArgSize = StackElemSize(GetNativeSize(m_type, m_ms));
+ }
+
+#ifdef ENREGISTERED_PARAMTYPE_MAXSIZE
+ if (m_managedArgSize > ENREGISTERED_PARAMTYPE_MAXSIZE)
+ m_managedArgSize = StackElemSize(sizeof(void*));
+
+ if (m_nativeArgSize > ENREGISTERED_PARAMTYPE_MAXSIZE)
+ m_nativeArgSize = StackElemSize(sizeof(void*));
+#endif // ENREGISTERED_PARAMTYPE_MAXSIZE
+}
+
+UINT16 MarshalInfo::GetManagedSize(MarshalType mtype, MarshalScenario ms)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ static const BYTE managedSizes[]=
+ {
+ #define DEFINE_MARSHALER_TYPE(mt, mclass, fWinRTSupported) IL##mclass::c_CLRSize,
+ #include "mtypes.h"
+ };
+
+ _ASSERTE((SIZE_T)mtype < COUNTOF(managedSizes));
+ BYTE managedSize = managedSizes[mtype];
+
+ if (managedSize == VARIABLESIZE)
+ {
+ switch (mtype)
+ {
+
+ case MARSHAL_TYPE_BLITTABLEVALUECLASS:
+ case MARSHAL_TYPE_VALUECLASS:
+#ifdef FEATURE_COMINTEROP
+ case MARSHAL_TYPE_DATETIME:
+ case MARSHAL_TYPE_NULLABLE:
+ case MARSHAL_TYPE_KEYVALUEPAIR:
+#endif // FEATURE_COMINTEROP
+ return (UINT16) m_pMT->GetAlignedNumInstanceFieldBytes();
+ break;
+
+ default:
+ _ASSERTE(0);
+ }
+ }
+
+ return managedSize;
+}
+
+UINT16 MarshalInfo::GetNativeSize(MarshalType mtype, MarshalScenario ms)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ static const BYTE nativeSizes[]=
+ {
+ #define DEFINE_MARSHALER_TYPE(mt, mclass, fWinRTSupported) IL##mclass::c_nativeSize,
+ #include "mtypes.h"
+ };
+
+ _ASSERTE((SIZE_T)mtype < COUNTOF(nativeSizes));
+ BYTE nativeSize = nativeSizes[mtype];
+
+ if (nativeSize == VARIABLESIZE)
+ {
+ switch (mtype)
+ {
+ case MARSHAL_TYPE_BLITTABLEVALUECLASS:
+ case MARSHAL_TYPE_VALUECLASS:
+#ifndef FEATURE_CORECLR
+ case MARSHAL_TYPE_BLITTABLEVALUECLASSWITHCOPYCTOR:
+#endif // !FEATURE_CORECLR
+ return (UINT16) m_pMT->GetNativeSize();
+
+ default:
+ _ASSERTE(0);
+ }
+ }
+
+ return nativeSize;
+}
+
+bool MarshalInfo::IsInOnly(MarshalType mtype)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ static const bool ILMarshalerIsInOnly[] =
+ {
+ #define DEFINE_MARSHALER_TYPE(mt, mclass, fWinRTSupported) \
+ (IL##mclass::c_fInOnly ? true : false),
+
+ #include "mtypes.h"
+ };
+
+ return ILMarshalerIsInOnly[mtype];
+}
+
+bool MarshalInfo::IsSupportedForWinRT(MarshalType mtype)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ static const bool MarshalerSupportsWinRT[] =
+ {
+ #define DEFINE_MARSHALER_TYPE(mt, mclass, fWinRTSupported) \
+ fWinRTSupported,
+
+ #include "mtypes.h"
+ };
+
+ return MarshalerSupportsWinRT[mtype];
+}
+
+OVERRIDEPROC MarshalInfo::GetArgumentOverrideProc(MarshalType mtype)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ static const OVERRIDEPROC ILArgumentOverrideProcs[] =
+ {
+ #define DEFINE_MARSHALER_TYPE(mt, mclass, fWinRTSupported) IL##mclass::ArgumentOverride,
+ #include "mtypes.h"
+ };
+
+ _ASSERTE((SIZE_T)mtype < COUNTOF(ILArgumentOverrideProcs));
+ return ILArgumentOverrideProcs[mtype];
+}
+
+RETURNOVERRIDEPROC MarshalInfo::GetReturnOverrideProc(MarshalType mtype)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ static const RETURNOVERRIDEPROC ILReturnOverrideProcs[] =
+ {
+ #define DEFINE_MARSHALER_TYPE(mt, mclass, fWinRTSupported) IL##mclass::ReturnOverride,
+ #include "mtypes.h"
+ };
+
+ _ASSERTE((SIZE_T)mtype < COUNTOF(ILReturnOverrideProcs));
+ return ILReturnOverrideProcs[mtype];
+}
+
+void MarshalInfo::GetItfMarshalInfo(ItfMarshalInfo* pInfo)
+{
+ STANDARD_VM_CONTRACT;
+
+ GetItfMarshalInfo(TypeHandle(m_pMT),
+#ifdef FEATURE_COMINTEROP
+ TypeHandle(m_pDefaultItfMT),
+#else // FEATURE_COMINTEROP
+ TypeHandle(),
+#endif // FEATURE_COMINTEROP
+ m_fDispItf,
+ m_fInspItf,
+ m_ms,
+ pInfo);
+}
+
+void MarshalInfo::GetItfMarshalInfo(TypeHandle th, TypeHandle thItf, BOOL fDispItf, BOOL fInspItf, MarshalScenario ms, ItfMarshalInfo *pInfo)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pInfo));
+ PRECONDITION(!th.IsNull());
+ PRECONDITION(!th.IsTypeDesc());
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_COMINTEROP
+
+ // Initialize the output parameter.
+ pInfo->dwFlags = 0;
+ pInfo->thItf = TypeHandle();
+ pInfo->thClass = TypeHandle();
+
+ if (!th.IsInterface())
+ {
+ // If the parameter is not System.Object.
+ if (!th.IsObjectType())
+ {
+ // Set the class method table.
+ pInfo->thClass = th;
+
+ if (th.IsTypeDesc() || !th.AsMethodTable()->IsWinRTDelegate())
+ {
+ // If this is not a WinRT delegate, retrieve the default interface method table.
+ TypeHandle hndDefItfClass;
+ DefaultInterfaceType DefItfType;
+
+ if (!thItf.IsNull())
+ {
+ hndDefItfClass = thItf;
+ DefItfType = DefaultInterfaceType_Explicit;
+ }
+ else if (th.IsProjectedFromWinRT() || th.IsExportedToWinRT())
+ {
+ // WinRT classes use their WinRT default interface
+ hndDefItfClass = th.GetMethodTable()->GetDefaultWinRTInterface();
+ DefItfType = DefaultInterfaceType_Explicit;
+ }
+ else
+ {
+ DefItfType = GetDefaultInterfaceForClassWrapper(th, &hndDefItfClass);
+ }
+ switch (DefItfType)
+ {
+ case DefaultInterfaceType_Explicit:
+ {
+ pInfo->thItf = hndDefItfClass;
+ switch (hndDefItfClass.GetComInterfaceType())
+ {
+ case ifDispatch:
+ case ifDual:
+ pInfo->dwFlags |= ItfMarshalInfo::ITF_MARSHAL_DISP_ITF;
+ break;
+
+ case ifInspectable:
+ pInfo->dwFlags |= ItfMarshalInfo::ITF_MARSHAL_INSP_ITF;
+ break;
+ }
+ break;
+ }
+
+ case DefaultInterfaceType_AutoDual:
+ {
+ pInfo->thItf = hndDefItfClass;
+ pInfo->dwFlags |= ItfMarshalInfo::ITF_MARSHAL_DISP_ITF;
+ break;
+ }
+
+ case DefaultInterfaceType_IUnknown:
+ case DefaultInterfaceType_BaseComClass:
+ {
+ break;
+ }
+
+ case DefaultInterfaceType_AutoDispatch:
+ {
+ pInfo->thItf = hndDefItfClass;
+ pInfo->dwFlags |= ItfMarshalInfo::ITF_MARSHAL_DISP_ITF;
+ break;
+ }
+
+ default:
+ {
+ _ASSERTE(!"Invalid default interface type!");
+ break;
+ }
+ }
+ }
+ }
+ else
+ {
+ // The type will be marshalled as an IUnknown, IInspectable, or IDispatch pointer depending
+ // on the value of fDispItf and fInspItf
+ if (fDispItf)
+ {
+ pInfo->dwFlags |= ItfMarshalInfo::ITF_MARSHAL_DISP_ITF;
+ }
+ else if (fInspItf)
+ {
+ pInfo->dwFlags |= ItfMarshalInfo::ITF_MARSHAL_INSP_ITF;
+ }
+
+ pInfo->dwFlags |= ItfMarshalInfo::ITF_MARSHAL_USE_BASIC_ITF;
+ }
+ }
+ else if (fInspItf)
+ {
+ // IInspectable-based interfaces are simple
+ pInfo->thItf = th;
+ pInfo->dwFlags |= ItfMarshalInfo::ITF_MARSHAL_INSP_ITF;
+ }
+ else
+ {
+ // Determine the interface this type will be marshalled as.
+ if (th.IsComClassInterface())
+ pInfo->thItf = th.GetDefItfForComClassItf();
+ else
+ pInfo->thItf = th;
+
+ // Determine if we are dealing with an IDispatch, IInspectable, or IUnknown based interface.
+ switch (pInfo->thItf.GetComInterfaceType())
+ {
+ case ifDispatch:
+ case ifDual:
+ pInfo->dwFlags |= ItfMarshalInfo::ITF_MARSHAL_DISP_ITF;
+ break;
+
+ case ifInspectable:
+ pInfo->dwFlags |= ItfMarshalInfo::ITF_MARSHAL_INSP_ITF;
+ break;
+ }
+
+ // Look to see if the interface has a coclass defined
+ pInfo->thClass = th.GetCoClassForInterface();
+ if (!pInfo->thClass.IsNull())
+ {
+ pInfo->dwFlags |= ItfMarshalInfo::ITF_MARSHAL_CLASS_IS_HINT;
+ }
+ }
+
+ // store the pre-redirection interface type as thNativeItf
+ pInfo->thNativeItf = pInfo->thItf;
+
+ if (ms == MARSHAL_SCENARIO_WINRT)
+ {
+ // Use the "class is hint" flag so GetObjectRefFromComIP doesn't verify that the
+ // WinRT object really supports IInspectable - note that we'll do the verification
+ // in UnmarshalObjectFromInterface for this exact pInfo->thItf.
+ pInfo->dwFlags |= ItfMarshalInfo::ITF_MARSHAL_CLASS_IS_HINT;
+
+ pInfo->dwFlags |= ItfMarshalInfo::ITF_MARSHAL_WINRT_SCENARIO;
+
+ // Perform interface redirection statically here. When the resulting ItfMarshalInfo
+ // is used for CLR->WinRT marshaling, this is necessary so we know which COM vtable
+ // to pass out (for instance IList could be marshaled out as IList or IBindableVector
+ // depending on the marshal scenario). In the WinRT->CLR direction, it's just an
+ // optimization which saves us from performing redirection at run-time.
+
+ if (!pInfo->thItf.IsNull())
+ {
+ MethodTable *pNewItfMT1;
+ MethodTable *pNewItfMT2;
+ switch (RCW::GetInterfacesForQI(pInfo->thItf.GetMethodTable(), &pNewItfMT1, &pNewItfMT2))
+ {
+ case RCW::InterfaceRedirection_None:
+ case RCW::InterfaceRedirection_UnresolvedIEnumerable:
+ break;
+
+ case RCW::InterfaceRedirection_IEnumerable_RetryOnFailure:
+ case RCW::InterfaceRedirection_IEnumerable:
+ case RCW::InterfaceRedirection_Other:
+ pInfo->thNativeItf = pNewItfMT1;
+ break;
+
+ case RCW::InterfaceRedirection_Other_RetryOnFailure:
+ pInfo->thNativeItf = pNewItfMT2;
+ break;
+ }
+ }
+
+ if (!pInfo->thNativeItf.IsNull())
+ {
+ // The native interface is redirected WinRT interface - need to change the flags
+ _ASSERTE(pInfo->thNativeItf.AsMethodTable()->IsProjectedFromWinRT());
+
+ pInfo->dwFlags &= ~ItfMarshalInfo::ITF_MARSHAL_DISP_ITF;
+ pInfo->dwFlags |= ItfMarshalInfo::ITF_MARSHAL_INSP_ITF;
+ }
+ }
+
+#else // FEATURE_COMINTEROP
+ if (!th.IsInterface())
+ pInfo->thClass = th;
+ else
+ pInfo->thItf = th;
+#endif // FEATURE_COMINTEROP
+}
+
+HRESULT MarshalInfo::TryGetItfMarshalInfo(TypeHandle th, BOOL fDispItf, BOOL fInspItf, ItfMarshalInfo *pInfo)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(!th.IsNull());
+ PRECONDITION(CheckPointer(pInfo));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ GetItfMarshalInfo(th, TypeHandle(), fDispItf, fInspItf,
+#ifdef FEATURE_COMINTEROP
+ MARSHAL_SCENARIO_COMINTEROP,
+#else // FEATURE_COMINTEROP
+ MARSHAL_SCENARIO_NDIRECT,
+#endif // FEATURE_COMINTEROP
+ pInfo);
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ return hr;
+}
+
+#ifdef _DEBUG
+VOID MarshalInfo::DumpMarshalInfo(Module* pModule, SigPointer sig, const SigTypeContext *pTypeContext, mdToken token,
+ MarshalScenario ms, CorNativeLinkType nlType, CorNativeLinkFlags nlFlags)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (LoggingOn(LF_MARSHALER, LL_INFO10))
+ {
+ SString logbuf;
+ StackScratchBuffer scratch;
+
+ IMDInternalImport *pInternalImport = pModule->GetMDImport();
+
+ logbuf.AppendASCII("------------------------------------------------------------\n");
+ LOG((LF_MARSHALER, LL_INFO10, logbuf.GetANSI(scratch)));
+ logbuf.Clear();
+
+ logbuf.AppendASCII("Managed type: ");
+ if (m_byref)
+ logbuf.AppendASCII("Byref ");
+
+ TypeHandle th = sig.GetTypeHandleNT(pModule, pTypeContext);
+ if (th.IsNull())
+ logbuf.AppendASCII("<error>");
+ else
+ {
+ SigFormat sigfmt;
+ sigfmt.AddType(th);
+ logbuf.AppendUTF8(sigfmt.GetCString());
+ }
+
+ logbuf.AppendASCII("\n");
+ LOG((LF_MARSHALER, LL_INFO10, logbuf.GetANSI(scratch)));
+ logbuf.Clear();
+
+ logbuf.AppendASCII("NativeType : ");
+ PCCOR_SIGNATURE pvNativeType;
+ ULONG cbNativeType;
+ if (token == mdParamDefNil
+ || pInternalImport->GetFieldMarshal(token,
+ &pvNativeType,
+ &cbNativeType) != S_OK)
+ {
+ logbuf.AppendASCII("<absent>");
+ }
+ else
+ {
+
+ while (cbNativeType--)
+ {
+ char num[100];
+ sprintf_s(num, COUNTOF(num), "0x%lx ", (ULONG)*pvNativeType);
+ logbuf.AppendASCII(num);
+ switch (*(pvNativeType++))
+ {
+#define XXXXX(nt) case nt: logbuf.AppendASCII("(" #nt ")"); break;
+
+ XXXXX(NATIVE_TYPE_BOOLEAN)
+ XXXXX(NATIVE_TYPE_I1)
+
+ XXXXX(NATIVE_TYPE_U1)
+ XXXXX(NATIVE_TYPE_I2)
+ XXXXX(NATIVE_TYPE_U2)
+ XXXXX(NATIVE_TYPE_I4)
+
+ XXXXX(NATIVE_TYPE_U4)
+ XXXXX(NATIVE_TYPE_I8)
+ XXXXX(NATIVE_TYPE_U8)
+ XXXXX(NATIVE_TYPE_R4)
+
+ XXXXX(NATIVE_TYPE_R8)
+
+ XXXXX(NATIVE_TYPE_LPSTR)
+ XXXXX(NATIVE_TYPE_LPWSTR)
+ XXXXX(NATIVE_TYPE_LPTSTR)
+ XXXXX(NATIVE_TYPE_FIXEDSYSSTRING)
+
+ XXXXX(NATIVE_TYPE_STRUCT)
+
+ XXXXX(NATIVE_TYPE_INT)
+ XXXXX(NATIVE_TYPE_FIXEDARRAY)
+
+ XXXXX(NATIVE_TYPE_UINT)
+
+ XXXXX(NATIVE_TYPE_FUNC)
+
+ XXXXX(NATIVE_TYPE_ASANY)
+
+ XXXXX(NATIVE_TYPE_ARRAY)
+ XXXXX(NATIVE_TYPE_LPSTRUCT)
+
+ XXXXX(NATIVE_TYPE_IUNKNOWN)
+
+#ifdef FEATURE_COMINTEROP
+ XXXXX(NATIVE_TYPE_BSTR)
+ XXXXX(NATIVE_TYPE_TBSTR)
+ XXXXX(NATIVE_TYPE_ANSIBSTR)
+ XXXXX(NATIVE_TYPE_HSTRING)
+ XXXXX(NATIVE_TYPE_BYVALSTR)
+
+ XXXXX(NATIVE_TYPE_VARIANTBOOL)
+ XXXXX(NATIVE_TYPE_SAFEARRAY)
+
+ XXXXX(NATIVE_TYPE_IDISPATCH)
+ XXXXX(NATIVE_TYPE_INTF)
+#endif // FEATURE_COMINTEROP
+
+#undef XXXXX
+
+
+ case NATIVE_TYPE_CUSTOMMARSHALER:
+ {
+ int strLen = 0;
+ logbuf.AppendASCII("(NATIVE_TYPE_CUSTOMMARSHALER)");
+
+ // Skip the typelib guid.
+ logbuf.AppendASCII(" ");
+
+ strLen = CPackedLen::GetLength(pvNativeType, (void const **)&pvNativeType);
+ if (strLen)
+ {
+ BYTE* p = (BYTE*)logbuf.OpenANSIBuffer(strLen);
+ memcpyNoGCRefs(p, pvNativeType, strLen);
+ logbuf.CloseBuffer();
+ logbuf.AppendASCII("\0");
+
+ pvNativeType += strLen;
+ cbNativeType -= strLen + 1;
+
+ // Skip the name of the native type.
+ logbuf.AppendASCII(" ");
+ }
+
+
+ strLen = CPackedLen::GetLength(pvNativeType, (void const **)&pvNativeType);
+ if (strLen)
+ {
+ BYTE* p = (BYTE*)logbuf.OpenANSIBuffer(strLen);
+ memcpyNoGCRefs(p, pvNativeType, strLen);
+ logbuf.CloseBuffer();
+ logbuf.AppendASCII("\0");
+
+ pvNativeType += strLen;
+ cbNativeType -= strLen + 1;
+
+ // Extract the name of the custom marshaler.
+ logbuf.AppendASCII(" ");
+ }
+
+
+ strLen = CPackedLen::GetLength(pvNativeType, (void const **)&pvNativeType);
+ if (strLen)
+ {
+ BYTE* p = (BYTE*)logbuf.OpenANSIBuffer(strLen);
+ memcpyNoGCRefs(p, pvNativeType, strLen);
+ logbuf.CloseBuffer();
+ logbuf.AppendASCII("\0");
+
+ pvNativeType += strLen;
+ cbNativeType -= strLen + 1;
+
+ // Extract the cookie string.
+ logbuf.AppendASCII(" ");
+ }
+
+ strLen = CPackedLen::GetLength(pvNativeType, (void const **)&pvNativeType);
+ if (strLen)
+ {
+ BYTE* p = (BYTE*)logbuf.OpenANSIBuffer(strLen);
+ memcpyNoGCRefs(p, pvNativeType, strLen);
+ logbuf.CloseBuffer();
+ logbuf.AppendASCII("\0");
+
+ pvNativeType += strLen;
+ cbNativeType -= strLen + 1;
+ }
+
+ break;
+ }
+
+ default:
+ logbuf.AppendASCII("(?)");
+ }
+
+ logbuf.AppendASCII(" ");
+ }
+ }
+ logbuf.AppendASCII("\n");
+ LOG((LF_MARSHALER, LL_INFO10, logbuf.GetANSI(scratch)));
+ logbuf.Clear();
+
+ logbuf.AppendASCII("MarshalType : ");
+ {
+ char num[100];
+ sprintf_s(num, COUNTOF(num), "0x%lx ", (ULONG)m_type);
+ logbuf.AppendASCII(num);
+ }
+ switch (m_type)
+ {
+ #define DEFINE_MARSHALER_TYPE(mt, mc, fWinRTSupported) case mt: logbuf.AppendASCII( #mt " (IL" #mc ")"); break;
+ #include "mtypes.h"
+
+ case MARSHAL_TYPE_UNKNOWN:
+ logbuf.AppendASCII("MARSHAL_TYPE_UNKNOWN (illegal combination)");
+ break;
+
+ default:
+ logbuf.AppendASCII("MARSHAL_TYPE_???");
+ break;
+ }
+
+ logbuf.AppendASCII("\n");
+
+
+ logbuf.AppendASCII("Metadata In/Out : ");
+ if (TypeFromToken(token) != mdtParamDef || token == mdParamDefNil)
+ logbuf.AppendASCII("<absent>");
+
+ else
+ {
+ DWORD dwAttr = 0;
+ USHORT usSequence;
+ LPCSTR szParamName_Ignore;
+ if (FAILED(pInternalImport->GetParamDefProps(token, &usSequence, &dwAttr, &szParamName_Ignore)))
+ {
+ logbuf.AppendASCII("Invalid ParamDef record ");
+ }
+ else
+ {
+ if (IsPdIn(dwAttr))
+ logbuf.AppendASCII("In ");
+
+ if (IsPdOut(dwAttr))
+ logbuf.AppendASCII("Out ");
+ }
+ }
+
+ logbuf.AppendASCII("\n");
+
+ logbuf.AppendASCII("Effective In/Out : ");
+ if (m_in)
+ logbuf.AppendASCII("In ");
+
+ if (m_out)
+ logbuf.AppendASCII("Out ");
+
+ logbuf.AppendASCII("\n");
+
+ LOG((LF_MARSHALER, LL_INFO10, logbuf.GetANSI(scratch)));
+ logbuf.Clear();
+ }
+} // MarshalInfo::DumpMarshalInfo
+#endif //_DEBUG
+
+#ifndef CROSSGEN_COMPILE
+#ifdef FEATURE_COMINTEROP
+DispParamMarshaler *MarshalInfo::GenerateDispParamMarshaler()
+{
+ CONTRACT (DispParamMarshaler*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ NewHolder<DispParamMarshaler> pDispParamMarshaler = NULL;
+
+ switch (m_type)
+ {
+ case MARSHAL_TYPE_OLECOLOR:
+ pDispParamMarshaler = new DispParamOleColorMarshaler();
+ break;
+
+ case MARSHAL_TYPE_CURRENCY:
+ pDispParamMarshaler = new DispParamCurrencyMarshaler();
+ break;
+
+ case MARSHAL_TYPE_GENERIC_4:
+ if (m_fErrorNativeType)
+ pDispParamMarshaler = new DispParamErrorMarshaler();
+ break;
+
+ case MARSHAL_TYPE_INTERFACE:
+ {
+ ItfMarshalInfo itfInfo;
+ GetItfMarshalInfo(TypeHandle(m_pMT), TypeHandle(m_pDefaultItfMT), m_fDispItf, m_fInspItf, m_ms, &itfInfo);
+ pDispParamMarshaler = new DispParamInterfaceMarshaler(
+ itfInfo.dwFlags & ItfMarshalInfo::ITF_MARSHAL_DISP_ITF,
+ itfInfo.thItf.GetMethodTable(),
+ itfInfo.thClass.GetMethodTable(),
+ itfInfo.dwFlags & ItfMarshalInfo::ITF_MARSHAL_CLASS_IS_HINT);
+ break;
+ }
+
+ case MARSHAL_TYPE_VALUECLASS:
+ case MARSHAL_TYPE_BLITTABLEVALUECLASS:
+ case MARSHAL_TYPE_BLITTABLEPTR:
+ case MARSHAL_TYPE_LAYOUTCLASSPTR:
+#ifndef FEATURE_CORECLR
+ case MARSHAL_TYPE_BLITTABLEVALUECLASSWITHCOPYCTOR:
+#endif
+ pDispParamMarshaler = new DispParamRecordMarshaler(m_pMT);
+ break;
+
+#ifdef FEATURE_CLASSIC_COMINTEROP
+ case MARSHAL_TYPE_SAFEARRAY:
+ pDispParamMarshaler = new DispParamArrayMarshaler(m_arrayElementType, m_hndArrayElemType.GetMethodTable());
+ break;
+#endif
+
+ case MARSHAL_TYPE_DELEGATE:
+ pDispParamMarshaler = new DispParamDelegateMarshaler(m_pMT);
+ break;
+
+ case MARSHAL_TYPE_REFERENCECUSTOMMARSHALER:
+ pDispParamMarshaler = new DispParamCustomMarshaler(m_pCMHelper, m_CMVt);
+ break;
+ }
+
+ pDispParamMarshaler.SuppressRelease();
+ RETURN pDispParamMarshaler;
+}
+
+
+DispatchWrapperType MarshalInfo::GetDispWrapperType()
+{
+ STANDARD_VM_CONTRACT;
+
+ DispatchWrapperType WrapperType = (DispatchWrapperType)0;
+
+ switch (m_type)
+ {
+ case MARSHAL_TYPE_CURRENCY:
+ WrapperType = DispatchWrapperType_Currency;
+ break;
+
+ case MARSHAL_TYPE_BSTR:
+ WrapperType = DispatchWrapperType_BStr;
+ break;
+
+ case MARSHAL_TYPE_GENERIC_4:
+ if (m_fErrorNativeType)
+ WrapperType = DispatchWrapperType_Error;
+ break;
+
+ case MARSHAL_TYPE_INTERFACE:
+ {
+ ItfMarshalInfo itfInfo;
+ GetItfMarshalInfo(TypeHandle(m_pMT), TypeHandle(m_pDefaultItfMT), m_fDispItf, m_fInspItf, m_ms, &itfInfo);
+ WrapperType = !!(itfInfo.dwFlags & ItfMarshalInfo::ITF_MARSHAL_DISP_ITF) ? DispatchWrapperType_Dispatch : DispatchWrapperType_Unknown;
+ break;
+ }
+
+ case MARSHAL_TYPE_SAFEARRAY:
+ switch (m_arrayElementType)
+ {
+ case VT_CY:
+ WrapperType = (DispatchWrapperType)(DispatchWrapperType_SafeArray | DispatchWrapperType_Currency);
+ break;
+ case VT_UNKNOWN:
+ WrapperType = (DispatchWrapperType)(DispatchWrapperType_SafeArray | DispatchWrapperType_Unknown);
+ break;
+ case VT_DISPATCH:
+ WrapperType = (DispatchWrapperType)(DispatchWrapperType_SafeArray | DispatchWrapperType_Dispatch);
+ break;
+ case VT_ERROR:
+ WrapperType = (DispatchWrapperType)(DispatchWrapperType_SafeArray | DispatchWrapperType_Error);
+ break;
+ case VT_BSTR:
+ WrapperType = (DispatchWrapperType)(DispatchWrapperType_SafeArray | DispatchWrapperType_BStr);
+ break;
+ }
+ break;
+ }
+
+ return WrapperType;
+}
+
+#endif // FEATURE_COMINTEROP
+
+
+VOID MarshalInfo::MarshalTypeToString(SString& strMarshalType, BOOL fSizeIsSpecified)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LPCWSTR strRetVal;
+
+ if (m_type == MARSHAL_TYPE_NATIVEARRAY)
+ {
+ SString strVarType;
+ VarTypeToString(m_arrayElementType, strVarType);
+
+ if (!fSizeIsSpecified)
+ {
+ strMarshalType.Printf(W("native array of %s (size not specified by a parameter)"),
+ strVarType.GetUnicode());
+ }
+ else
+ {
+ strMarshalType.Printf(W("native array of %s (size specified by parameter %i)"),
+ strVarType.GetUnicode(), m_countParamIdx);
+ }
+
+ return;
+ }
+#ifdef FEATURE_COMINTEROP
+ // Some MarshalTypes have extra information and require special handling
+ else if (m_type == MARSHAL_TYPE_INTERFACE)
+ {
+ ItfMarshalInfo itfInfo;
+ GetItfMarshalInfo(TypeHandle(m_pMT), TypeHandle(m_pDefaultItfMT), m_fDispItf, m_fInspItf, m_ms, &itfInfo);
+
+ if (!itfInfo.thItf.IsNull())
+ {
+ StackSString ssClassName;
+ itfInfo.thItf.GetMethodTable()->_GetFullyQualifiedNameForClass(ssClassName);
+
+ if (!!(itfInfo.dwFlags & ItfMarshalInfo::ITF_MARSHAL_DISP_ITF))
+ {
+ strMarshalType.SetLiteral(W("IDispatch "));
+ }
+ else if (!!(itfInfo.dwFlags & ItfMarshalInfo::ITF_MARSHAL_INSP_ITF))
+ {
+ strMarshalType.SetLiteral(W("IInspectable"));
+ }
+ else
+ {
+ strMarshalType.SetLiteral(W("IUnknown "));
+ }
+
+ if (itfInfo.dwFlags & ItfMarshalInfo::ITF_MARSHAL_USE_BASIC_ITF)
+ {
+ strMarshalType.Append(W("(basic) "));
+ }
+
+ strMarshalType.Append(ssClassName);
+ return;
+ }
+ else
+ {
+ if (!!(itfInfo.dwFlags & ItfMarshalInfo::ITF_MARSHAL_DISP_ITF))
+ strRetVal = W("IDispatch");
+ else if (!!(itfInfo.dwFlags & ItfMarshalInfo::ITF_MARSHAL_INSP_ITF))
+ strRetVal = W("IInspectable");
+ else
+ strRetVal = W("IUnknown");
+ }
+ }
+ else if (m_type == MARSHAL_TYPE_SAFEARRAY)
+ {
+ StackSString strVarType;
+ VarTypeToString(m_arrayElementType, strVarType);
+
+ strMarshalType = SL(W("SafeArray of "));
+ strMarshalType.Append(strVarType);
+
+ return;
+ }
+#endif // FEATURE_COMINTEROP
+ else if (m_type == MARSHAL_TYPE_REFERENCECUSTOMMARSHALER)
+ {
+ GCX_COOP();
+
+ OBJECTHANDLE objHandle = m_pCMHelper->GetCustomMarshalerInfo()->GetCustomMarshaler();
+ {
+ OBJECTREF pObjRef = ObjectFromHandle(objHandle);
+ DefineFullyQualifiedNameForClassW();
+
+ strMarshalType.Printf(W("custom marshaler (%s)"),
+ GetFullyQualifiedNameForClassW(pObjRef->GetMethodTable()));
+ }
+
+ return;
+ }
+ else
+ {
+ // All other MarshalTypes with no special handling
+ switch (m_type)
+ {
+ case MARSHAL_TYPE_GENERIC_1:
+ strRetVal = W("BYTE");
+ break;
+ case MARSHAL_TYPE_GENERIC_U1:
+ strRetVal = W("unsigned BYTE");
+ break;
+ case MARSHAL_TYPE_GENERIC_2:
+ strRetVal = W("WORD");
+ break;
+ case MARSHAL_TYPE_GENERIC_U2:
+ strRetVal = W("unsigned WORD");
+ break;
+ case MARSHAL_TYPE_GENERIC_4:
+ strRetVal = W("DWORD");
+ break;
+ case MARSHAL_TYPE_GENERIC_8:
+ strRetVal = W("QUADWORD");
+ break;
+ case MARSHAL_TYPE_WINBOOL:
+ strRetVal = W("Windows Bool");
+ break;
+#ifdef FEATURE_COMINTEROP
+ case MARSHAL_TYPE_VTBOOL:
+ strRetVal = W("VARIANT Bool");
+ break;
+#endif // FEATURE_COMINTEROP
+ case MARSHAL_TYPE_ANSICHAR:
+ strRetVal = W("Ansi character");
+ break;
+ case MARSHAL_TYPE_CBOOL:
+ strRetVal = W("CBool");
+ break;
+ case MARSHAL_TYPE_FLOAT:
+ strRetVal = W("float");
+ break;
+ case MARSHAL_TYPE_DOUBLE:
+ strRetVal = W("double");
+ break;
+ case MARSHAL_TYPE_CURRENCY:
+ strRetVal = W("CURRENCY");
+ break;
+ case MARSHAL_TYPE_DECIMAL:
+ strRetVal = W("DECIMAL");
+ break;
+ case MARSHAL_TYPE_DECIMAL_PTR:
+ strRetVal = W("DECIMAL pointer");
+ break;
+ case MARSHAL_TYPE_GUID:
+ strRetVal = W("GUID");
+ break;
+ case MARSHAL_TYPE_GUID_PTR:
+ strRetVal = W("GUID pointer");
+ break;
+ case MARSHAL_TYPE_DATE:
+ strRetVal = W("DATE");
+ break;
+#ifdef FEATURE_COMINTEROP
+ case MARSHAL_TYPE_BSTR:
+ strRetVal = W("BSTR");
+ break;
+#endif // FEATURE_COMINTEROP
+ case MARSHAL_TYPE_LPWSTR:
+ strRetVal = W("LPWSTR");
+ break;
+ case MARSHAL_TYPE_LPSTR:
+ strRetVal = W("LPSTR");
+ break;
+#ifdef FEATURE_COMINTEROP
+ case MARSHAL_TYPE_ANSIBSTR:
+ strRetVal = W("AnsiBStr");
+ break;
+#endif // FEATURE_COMINTEROP
+ case MARSHAL_TYPE_LPWSTR_BUFFER:
+ strRetVal = W("LPWSTR buffer");
+ break;
+ case MARSHAL_TYPE_LPSTR_BUFFER:
+ strRetVal = W("LPSTR buffer");
+ break;
+ case MARSHAL_TYPE_ASANYA:
+ strRetVal = W("AsAnyA");
+ break;
+ case MARSHAL_TYPE_ASANYW:
+ strRetVal = W("AsAnyW");
+ break;
+ case MARSHAL_TYPE_DELEGATE:
+ strRetVal = W("Delegate");
+ break;
+ case MARSHAL_TYPE_BLITTABLEPTR:
+ strRetVal = W("blittable pointer");
+ break;
+#ifdef FEATURE_COMINTEROP
+ case MARSHAL_TYPE_VBBYVALSTR:
+ strRetVal = W("VBByValStr");
+ break;
+ case MARSHAL_TYPE_VBBYVALSTRW:
+ strRetVal = W("VBByRefStr");
+ break;
+#endif // FEATURE_COMINTEROP
+ case MARSHAL_TYPE_LAYOUTCLASSPTR:
+ strRetVal = W("Layout class pointer");
+ break;
+ case MARSHAL_TYPE_ARRAYWITHOFFSET:
+ strRetVal = W("ArrayWithOffset");
+ break;
+ case MARSHAL_TYPE_BLITTABLEVALUECLASS:
+ strRetVal = W("blittable value class");
+ break;
+ case MARSHAL_TYPE_VALUECLASS:
+ strRetVal = W("value class");
+ break;
+ case MARSHAL_TYPE_ARGITERATOR:
+ strRetVal = W("ArgIterator");
+ break;
+#ifndef FEATURE_CORECLR
+ case MARSHAL_TYPE_BLITTABLEVALUECLASSWITHCOPYCTOR:
+ strRetVal = W("blittable value class with copy constructor");
+ break;
+#endif // FEATURE_CORECLR
+#ifdef FEATURE_COMINTEROP
+ case MARSHAL_TYPE_OBJECT:
+ strRetVal = W("VARIANT");
+ break;
+#endif // FEATURE_COMINTEROP
+ case MARSHAL_TYPE_HANDLEREF:
+ strRetVal = W("HandleRef");
+ break;
+#ifdef FEATURE_COMINTEROP
+ case MARSHAL_TYPE_OLECOLOR:
+ strRetVal = W("OLE_COLOR");
+ break;
+#endif // FEATURE_COMINTEROP
+ case MARSHAL_TYPE_RUNTIMETYPEHANDLE:
+ strRetVal = W("RuntimeTypeHandle");
+ break;
+ case MARSHAL_TYPE_RUNTIMEFIELDHANDLE:
+ strRetVal = W("RuntimeFieldHandle");
+ break;
+ case MARSHAL_TYPE_RUNTIMEMETHODHANDLE:
+ strRetVal = W("RuntimeMethodHandle");
+ break;
+ case MARSHAL_TYPE_RUNTIMEMETHODINFO:
+ strRetVal = W("RuntimeMethodInfo");
+ break;
+ case MARSHAL_TYPE_RUNTIMEMODULE:
+ strRetVal = W("RuntimeModule");
+ break;
+ case MARSHAL_TYPE_RUNTIMEASSEMBLY:
+ strRetVal = W("RuntimeAssembly");
+ break;
+ default:
+ strRetVal = W("<UNKNOWN>");
+ break;
+ }
+ }
+
+ strMarshalType.Set(strRetVal);
+ return;
+}
+
+VOID MarshalInfo::VarTypeToString(VARTYPE vt, SString& strVarType)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+
+ LPCWSTR strRetVal;
+
+ switch(vt)
+ {
+ case VT_I2:
+ strRetVal = W("2-byte signed int");
+ break;
+ case VT_I4:
+ strRetVal = W("4-byte signed int");
+ break;
+ case VT_R4:
+ strRetVal = W("4-byte real");
+ break;
+ case VT_R8:
+ strRetVal = W("8-byte real");
+ break;
+ case VT_CY:
+ strRetVal = W("currency");
+ break;
+ case VT_DATE:
+ strRetVal = W("date");
+ break;
+ case VT_BSTR:
+ strRetVal = W("binary string");
+ break;
+ case VT_DISPATCH:
+ strRetVal = W("IDispatch *");
+ break;
+ case VT_ERROR:
+ strRetVal = W("Scode");
+ break;
+ case VT_BOOL:
+ strRetVal = W("boolean");
+ break;
+ case VT_VARIANT:
+ strRetVal = W("VARIANT *");
+ break;
+ case VT_UNKNOWN:
+ strRetVal = W("IUnknown *");
+ break;
+ case VT_DECIMAL:
+ strRetVal = W("16-byte fixed point");
+ break;
+ case VT_RECORD:
+ strRetVal = W("user defined structure");
+ break;
+ case VT_I1:
+ strRetVal = W("signed char");
+ break;
+ case VT_UI1:
+ strRetVal = W("unsigned char");
+ break;
+ case VT_UI2:
+ strRetVal = W("unsigned short");
+ break;
+ case VT_UI4:
+ strRetVal = W("unsigned short");
+ break;
+ case VT_INT:
+ strRetVal = W("signed int");
+ break;
+ case VT_UINT:
+ strRetVal = W("unsigned int");
+ break;
+ case VT_LPSTR:
+ strRetVal = W("LPSTR");
+ break;
+ case VT_LPWSTR:
+ strRetVal = W("LPWSTR");
+ break;
+ case VT_HRESULT:
+ strRetVal = W("HResult");
+ break;
+ case VT_I8:
+ strRetVal = W("8-byte signed int");
+ break;
+ case VT_NULL:
+ strRetVal = W("null");
+ break;
+ case VT_UI8:
+ strRetVal = W("8-byte unsigned int");
+ break;
+ case VT_VOID:
+ strRetVal = W("void");
+ break;
+ case VTHACK_WINBOOL:
+ strRetVal = W("boolean");
+ break;
+ case VTHACK_ANSICHAR:
+ strRetVal = W("char");
+ break;
+ case VTHACK_CBOOL:
+ strRetVal = W("1-byte C bool");
+ break;
+ default:
+ strRetVal = W("unknown");
+ break;
+ }
+
+ strVarType.Set(strRetVal);
+ return;
+}
+
+#endif // CROSSGEN_COMPILE
+
+// Returns true if the marshaler represented by this instance requires COM to have been started.
+bool MarshalInfo::MarshalerRequiresCOM()
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef FEATURE_COMINTEROP
+ switch (m_type)
+ {
+ case MARSHAL_TYPE_REFERENCECUSTOMMARSHALER:
+
+ case MARSHAL_TYPE_BSTR:
+ case MARSHAL_TYPE_ANSIBSTR:
+ case MARSHAL_TYPE_OBJECT:
+ case MARSHAL_TYPE_OLECOLOR:
+ case MARSHAL_TYPE_SAFEARRAY:
+ case MARSHAL_TYPE_INTERFACE:
+
+ case MARSHAL_TYPE_URI:
+ case MARSHAL_TYPE_KEYVALUEPAIR:
+ case MARSHAL_TYPE_NULLABLE:
+ case MARSHAL_TYPE_SYSTEMTYPE:
+ case MARSHAL_TYPE_EXCEPTION:
+ case MARSHAL_TYPE_HIDDENLENGTHARRAY:
+ case MARSHAL_TYPE_HSTRING:
+ case MARSHAL_TYPE_NCCEVENTARGS:
+ case MARSHAL_TYPE_PCEVENTARGS:
+ {
+ // some of these types do not strictly require COM for the actual marshaling
+ // but they tend to be used in COM context so we keep the logic we had in
+ // previous versions and return true here
+ return true;
+ }
+
+ case MARSHAL_TYPE_LAYOUTCLASSPTR:
+ case MARSHAL_TYPE_VALUECLASS:
+ {
+ // pessimistic guess, but in line with previous versions
+ return true;
+ }
+
+ case MARSHAL_TYPE_NATIVEARRAY:
+ {
+ return (m_arrayElementType == VT_UNKNOWN ||
+ m_arrayElementType == VT_DISPATCH ||
+ m_arrayElementType == VT_VARIANT);
+ }
+ }
+#endif // FEATURE_COMINTEROP
+
+ return false;
+}
+
+#ifdef FEATURE_COMINTEROP
+MarshalInfo::MarshalType MarshalInfo::GetHiddenLengthParamMarshalType()
+{
+ LIMITED_METHOD_CONTRACT;
+ return MARSHAL_TYPE_GENERIC_U4;
+}
+
+CorElementType MarshalInfo::GetHiddenLengthParamElementType()
+{
+ LIMITED_METHOD_CONTRACT;
+ return ELEMENT_TYPE_U4;
+}
+
+UINT16 MarshalInfo::GetHiddenLengthParamStackSize()
+{
+ LIMITED_METHOD_CONTRACT;
+ return StackElemSize(GetNativeSize(GetHiddenLengthParamMarshalType(), m_ms));
+}
+
+void MarshalInfo::MarshalHiddenLengthArgument(NDirectStubLinker *psl, BOOL managedToNative, BOOL isForReturnArray)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(psl));
+ PRECONDITION(m_type == MARSHAL_TYPE_HIDDENLENGTHARRAY);
+ PRECONDITION(m_dwHiddenLengthManagedHomeLocal == 0xFFFFFFFF);
+ PRECONDITION(m_dwHiddenLengthNativeHomeLocal == 0xFFFFFFFF);
+ }
+ CONTRACTL_END;
+
+ NewHolder<ILMarshaler> pHiddenLengthMarshaler = CreateILMarshaler(GetHiddenLengthParamMarshalType(), psl);
+
+
+ ILCodeStream *pcsMarshal = psl->GetMarshalCodeStream();
+ ILCodeStream *pcsUnmarshal = psl->GetUnmarshalCodeStream();
+
+ pcsMarshal->EmitNOP("// hidden length argument { ");
+ pcsUnmarshal->EmitNOP("// hidden length argument { ");
+
+ DWORD dwMarshalFlags = MARSHAL_FLAG_HIDDENLENPARAM;
+ if (isForReturnArray)
+ {
+ // This is a hidden length argument for an [out, retval] argument, so setup flags to match that
+ dwMarshalFlags |= CalculateArgumentMarshalFlags(TRUE, FALSE, TRUE, managedToNative);
+ }
+ else
+ {
+ // The length parameter needs to be an [in] parameter if the array itself is an [in] parameter.
+ // Additionally, in order to support the FillArray pattern:
+ // FillArray([in] UInt32 length, [out, size_is(length)] ElementType* value)
+ //
+ // We need to make sure that the length parameter is [in] if the array pointer is not byref, since
+ // this means that the caller is allocating the array. This includes array buffers which are [out]
+ // but not byref, since the [out] marshaling applies to the array contents but not the array pointer
+ // value itself.
+ BOOL marshalHiddenLengthIn = m_in || !m_byref;
+ dwMarshalFlags |= CalculateArgumentMarshalFlags(m_byref, marshalHiddenLengthIn, m_out, managedToNative);
+ }
+ pHiddenLengthMarshaler->EmitMarshalHiddenLengthArgument(pcsMarshal,
+ pcsUnmarshal,
+ this,
+ m_paramidx,
+ dwMarshalFlags,
+ HiddenLengthParamIndex(),
+ &m_args,
+ &m_dwHiddenLengthManagedHomeLocal,
+ &m_dwHiddenLengthNativeHomeLocal);
+
+ pcsMarshal->EmitNOP("// } hidden length argument");
+ pcsUnmarshal->EmitNOP("// } hidden length argument");
+
+ // Only emit into the dispatch stream for CLR -> Native cases - in the reverse, there is no argument
+ // to pass to the managed method. Instead, the length is encoded in the marshaled array.
+ if (managedToNative)
+ {
+ ILCodeStream* pcsDispatch = psl->GetDispatchCodeStream();
+ pHiddenLengthMarshaler->EmitSetupArgument(pcsDispatch);
+ }
+}
+
+#endif // FEATURE_COMINTEROP
+
+#define ReportInvalidArrayMarshalInfo(resId) \
+ do \
+ { \
+ m_vtElement = VT_EMPTY; \
+ m_errorResourceId = resId; \
+ m_thElement = TypeHandle(); \
+ goto LExit; \
+ } \
+ while (0)
+
+void ArrayMarshalInfo::InitForNativeArray(MarshalInfo::MarshalScenario ms, TypeHandle thElement, CorNativeType ntElement, BOOL isAnsi)
+{
+ WRAPPER_NO_CONTRACT;
+ InitElementInfo(NATIVE_TYPE_ARRAY, ms, thElement, ntElement, isAnsi);
+}
+
+void ArrayMarshalInfo::InitForFixedArray(TypeHandle thElement, CorNativeType ntElement, BOOL isAnsi)
+{
+ WRAPPER_NO_CONTRACT;
+ InitElementInfo(NATIVE_TYPE_FIXEDARRAY, MarshalInfo::MARSHAL_SCENARIO_FIELD, thElement, ntElement, isAnsi);
+}
+
+#ifdef FEATURE_COMINTEROP
+void ArrayMarshalInfo::InitForSafeArray(MarshalInfo::MarshalScenario ms, TypeHandle thElement, VARTYPE vtElement, BOOL isAnsi)
+{
+ STANDARD_VM_CONTRACT;
+
+ InitElementInfo(NATIVE_TYPE_SAFEARRAY, ms, thElement, NATIVE_TYPE_DEFAULT, isAnsi);
+
+ if (IsValid() && vtElement != VT_EMPTY)
+ {
+ if (vtElement == VT_USERDEFINED)
+ {
+ // If the user explicitly sets the VARTYPE to VT_USERDEFINED, we simply ignore it
+ // since the exporter will take care of transforming the vt to VT_USERDEFINED and the
+ // marshallers needs the actual type.
+ }
+ else
+ {
+ m_flags = (ArrayMarshalInfoFlags)(m_flags | amiSafeArraySubTypeExplicitlySpecified);
+ m_vtElement = vtElement;
+ }
+ }
+}
+
+void ArrayMarshalInfo::InitForHiddenLengthArray(TypeHandle thElement)
+{
+ STANDARD_VM_CONTRACT;
+
+ // WinRT supports arrays of any WinRT-legal types
+ if (thElement.IsArray())
+ {
+ ReportInvalidArrayMarshalInfo(IDS_EE_BADMARSHAL_NESTEDARRAY);
+ }
+ else if (thElement.IsTypeDesc() || !thElement.GetMethodTable()->IsLegalNonArrayWinRTType())
+ {
+ ReportInvalidArrayMarshalInfo(IDS_EE_BADMARSHAL_WINRT_ILLEGAL_TYPE);
+ }
+
+ m_thElement = thElement;
+
+ MethodTable *pMT = thElement.GetMethodTable();
+ if (pMT->IsString())
+ {
+ m_vtElement = VTHACK_HSTRING;
+ m_cbElementSize = sizeof(HSTRING);
+ }
+ else if (WinRTTypeNameConverter::ResolveRedirectedType(pMT, &m_redirectedTypeIndex))
+ {
+ m_vtElement = VTHACK_REDIRECTEDTYPE;
+
+ switch (m_redirectedTypeIndex)
+ {
+ case WinMDAdapter::RedirectedTypeIndex_System_DateTimeOffset:
+ m_cbElementSize = ILDateTimeMarshaler::c_nativeSize;
+ break;
+
+ case WinMDAdapter::RedirectedTypeIndex_System_Type:
+ m_cbElementSize = ILSystemTypeMarshaler::c_nativeSize;
+ break;
+
+ case WinMDAdapter::RedirectedTypeIndex_System_Exception:
+ m_cbElementSize = ILHResultExceptionMarshaler::c_nativeSize;
+ break;
+
+ // WinRT delegates are IUnknown pointers
+ case WinMDAdapter::RedirectedTypeIndex_System_EventHandlerGeneric:
+ m_vtElement = VTHACK_INSPECTABLE;
+ m_cbElementSize = sizeof(IUnknown*);
+ break;
+
+ case WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_KeyValuePair:
+ case WinMDAdapter::RedirectedTypeIndex_System_Nullable:
+ case WinMDAdapter::RedirectedTypeIndex_System_Uri:
+ case WinMDAdapter::RedirectedTypeIndex_System_Collections_Specialized_NotifyCollectionChangedEventArgs:
+ case WinMDAdapter::RedirectedTypeIndex_System_ComponentModel_PropertyChangedEventArgs:
+ {
+ m_cbElementSize = sizeof(IInspectable *);
+ break;
+ }
+
+ default:
+ {
+ if (pMT->IsValueType())
+ {
+ // other redirected structs are blittable and don't need special marshaling
+ m_vtElement = VTHACK_BLITTABLERECORD;
+ m_cbElementSize = pMT->GetNativeSize();
+ }
+ else
+ {
+ // redirected interfaces should be treated as interface pointers
+ _ASSERTE(pMT->IsInterface());
+ m_vtElement = VTHACK_INSPECTABLE;
+ m_cbElementSize = sizeof(IInspectable *);
+ }
+ break;
+ }
+ }
+ }
+ else if (pMT->IsBlittable() || pMT->IsTruePrimitive() || pMT->IsEnum())
+ {
+ m_vtElement = VTHACK_BLITTABLERECORD;
+
+ CorElementType elemType = pMT->GetInternalCorElementType();
+ if (CorTypeInfo::IsPrimitiveType(elemType))
+ {
+ // .NET and WinRT primitives have the same size
+ m_cbElementSize = CorTypeInfo::Size(elemType);
+ }
+ else
+ {
+ m_cbElementSize = pMT->GetNativeSize();
+ }
+ }
+ else if (pMT->IsValueType())
+ {
+ m_vtElement = VTHACK_NONBLITTABLERECORD;
+ m_cbElementSize = pMT->GetNativeSize();
+ }
+ else
+ {
+ m_vtElement = VTHACK_INSPECTABLE;
+ m_cbElementSize = sizeof(IInspectable *);
+ }
+
+LExit:;
+}
+#endif // FEATURE_COMINTEROP
+
+void ArrayMarshalInfo::InitElementInfo(CorNativeType arrayNativeType, MarshalInfo::MarshalScenario ms, TypeHandle thElement, CorNativeType ntElement, BOOL isAnsi)
+{
+ CONTRACT_VOID
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(!thElement.IsNull());
+ POSTCONDITION(!IsValid() || !m_thElement.IsNull());
+ }
+ CONTRACT_END;
+
+ CorElementType etElement = ELEMENT_TYPE_END;
+
+ //
+ // IMPORTANT: The error resource IDs used in this function must not contain any placeholders!
+ //
+ // Also please maintain the standard of using IDS_EE_BADMARSHAL_XXX when defining new error
+ // message resource IDs.
+ //
+
+ if (thElement.IsArray())
+ ReportInvalidArrayMarshalInfo(IDS_EE_BADMARSHAL_NESTEDARRAY);
+
+ m_thElement = thElement;
+
+ if (m_thElement.IsPointer())
+ {
+ m_flags = (ArrayMarshalInfoFlags)(m_flags | amiIsPtr);
+ m_thElement = ((ParamTypeDesc*)m_thElement.AsTypeDesc())->GetModifiedType();
+ }
+
+ etElement = m_thElement.GetSignatureCorElementType();
+
+ if (IsAMIPtr(m_flags) && (etElement > ELEMENT_TYPE_R8))
+ {
+ ReportInvalidArrayMarshalInfo(IDS_EE_BADMARSHAL_UNSUPPORTED_SIG);
+ }
+
+ if (etElement == ELEMENT_TYPE_CHAR)
+ {
+ switch (ntElement)
+ {
+ case NATIVE_TYPE_I1: //fallthru
+ case NATIVE_TYPE_U1:
+ m_vtElement = VTHACK_ANSICHAR;
+ break;
+
+ case NATIVE_TYPE_I2: //fallthru
+ case NATIVE_TYPE_U2:
+ m_vtElement = VT_UI2;
+ break;
+
+ // Compat: If the native type doesn't make sense, we need to ignore it and not report an error.
+ case NATIVE_TYPE_DEFAULT: //fallthru
+ default:
+#ifdef FEATURE_COMINTEROP
+ if (ms == MarshalInfo::MARSHAL_SCENARIO_COMINTEROP)
+ m_vtElement = VT_UI2;
+ else
+#endif // FEATURE_COMINTEROP
+ m_vtElement = isAnsi ? VTHACK_ANSICHAR : VT_UI2;
+ }
+ }
+ else if (etElement == ELEMENT_TYPE_BOOLEAN)
+ {
+ switch (ntElement)
+ {
+ case NATIVE_TYPE_BOOLEAN:
+ m_vtElement = VTHACK_WINBOOL;
+ break;
+
+#ifdef FEATURE_COMINTEROP
+ case NATIVE_TYPE_VARIANTBOOL:
+ m_vtElement = VT_BOOL;
+ break;
+#endif // FEATURE_COMINTEROP
+
+ case NATIVE_TYPE_I1 :
+ case NATIVE_TYPE_U1 :
+ m_vtElement = VTHACK_CBOOL;
+ break;
+
+ // Compat: if the native type doesn't make sense, we need to ignore it and not report an error.
+ case NATIVE_TYPE_DEFAULT: //fallthru
+ default:
+#ifdef FEATURE_COMINTEROP
+ if (ms == MarshalInfo::MARSHAL_SCENARIO_COMINTEROP ||
+ arrayNativeType == NATIVE_TYPE_SAFEARRAY)
+ {
+ m_vtElement = VT_BOOL;
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ m_vtElement = VTHACK_WINBOOL;
+ }
+ break;
+ }
+ }
+ else if (etElement == ELEMENT_TYPE_I)
+ {
+ m_vtElement = static_cast<VARTYPE>((GetPointerSize() == 4) ? VT_I4 : VT_I8);
+ }
+ else if (etElement == ELEMENT_TYPE_U)
+ {
+ m_vtElement = static_cast<VARTYPE>((GetPointerSize() == 4) ? VT_UI4 : VT_UI8);
+ }
+ else if (etElement <= ELEMENT_TYPE_R8)
+ {
+ static const BYTE map [] =
+ {
+ VT_NULL, // ELEMENT_TYPE_END
+ VT_VOID, // ELEMENT_TYPE_VOID
+ VT_NULL, // ELEMENT_TYPE_BOOLEAN
+ VT_NULL, // ELEMENT_TYPE_CHAR
+ VT_I1, // ELEMENT_TYPE_I1
+ VT_UI1, // ELEMENT_TYPE_U1
+ VT_I2, // ELEMENT_TYPE_I2
+ VT_UI2, // ELEMENT_TYPE_U2
+ VT_I4, // ELEMENT_TYPE_I4
+ VT_UI4, // ELEMENT_TYPE_U4
+ VT_I8, // ELEMENT_TYPE_I8
+ VT_UI8, // ELEMENT_TYPE_U8
+ VT_R4, // ELEMENT_TYPE_R4
+ VT_R8 // ELEMENT_TYPE_R8
+
+ };
+
+ _ASSERTE(map[etElement] != VT_NULL);
+ m_vtElement = map[etElement];
+ }
+ else
+ {
+ if (m_thElement == TypeHandle(g_pStringClass))
+ {
+ switch (ntElement)
+ {
+ case NATIVE_TYPE_DEFAULT:
+#ifdef FEATURE_COMINTEROP
+ if (arrayNativeType == NATIVE_TYPE_SAFEARRAY || ms == MarshalInfo::MARSHAL_SCENARIO_COMINTEROP)
+ {
+ m_vtElement = VT_BSTR;
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ m_vtElement = static_cast<VARTYPE>(isAnsi ? VT_LPSTR : VT_LPWSTR);
+ }
+ break;
+#ifdef FEATURE_COMINTEROP
+ case NATIVE_TYPE_BSTR:
+ m_vtElement = VT_BSTR;
+ break;
+#endif // FEATURE_COMINTEROP
+ case NATIVE_TYPE_LPSTR:
+ m_vtElement = VT_LPSTR;
+ break;
+ case NATIVE_TYPE_LPWSTR:
+ m_vtElement = VT_LPWSTR;
+ break;
+ case NATIVE_TYPE_LPTSTR:
+ {
+#ifdef FEATURE_COMINTEROP
+ if (ms == MarshalInfo::MARSHAL_SCENARIO_COMINTEROP || IsAMIExport(m_flags))
+ {
+ // We disallow NATIVE_TYPE_LPTSTR for COM or if we are exporting.
+ ReportInvalidArrayMarshalInfo(IDS_EE_BADMARSHALPARAM_NO_LPTSTR);
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ // We no longer support Win9x so LPTSTR always maps to a Unicode string.
+ m_vtElement = VT_LPWSTR;
+ }
+ break;
+ }
+
+ default:
+ ReportInvalidArrayMarshalInfo(IDS_EE_BADMARSHAL_STRINGARRAY);
+ }
+ }
+ else if (m_thElement == TypeHandle(g_pObjectClass))
+ {
+#ifdef FEATURE_COMINTEROP
+ switch(ntElement)
+ {
+ case NATIVE_TYPE_DEFAULT:
+ if (ms == MarshalInfo::MARSHAL_SCENARIO_FIELD)
+ m_vtElement = VT_UNKNOWN;
+ else
+ m_vtElement = VT_VARIANT;
+ break;
+
+ case NATIVE_TYPE_STRUCT:
+ m_vtElement = VT_VARIANT;
+ break;
+
+ case NATIVE_TYPE_INTF:
+ case NATIVE_TYPE_IUNKNOWN:
+ m_vtElement = VT_UNKNOWN;
+ break;
+
+ case NATIVE_TYPE_IDISPATCH:
+ m_vtElement = VT_DISPATCH;
+ break;
+
+ default:
+ ReportInvalidArrayMarshalInfo(IDS_EE_BADMARSHAL_OBJECTARRAY);
+ }
+
+#else // FEATURE_COMINTEROP
+ switch (ntElement)
+ {
+ case NATIVE_TYPE_IUNKNOWN:
+ m_vtElement = VT_UNKNOWN;
+ break;
+
+ default:
+ ReportInvalidArrayMarshalInfo(IDS_EE_BADMARSHAL_UNSUPPORTED_SIG);
+ }
+#endif // FEATURE_COMINTEROP
+ }
+ else if (m_thElement.CanCastTo(TypeHandle(MscorlibBinder::GetClass(CLASS__SAFE_HANDLE))))
+ {
+ // Array's of SAFEHANDLEs are not supported.
+ ReportInvalidArrayMarshalInfo(IDS_EE_BADMARSHAL_SAFEHANDLEARRAY);
+ }
+ else if (m_thElement.CanCastTo(TypeHandle(MscorlibBinder::GetClass(CLASS__CRITICAL_HANDLE))))
+ {
+ // Array's of CRITICALHANDLEs are not supported.
+ ReportInvalidArrayMarshalInfo(IDS_EE_BADMARSHAL_CRITICALHANDLEARRAY);
+ }
+ else if (etElement == ELEMENT_TYPE_VALUETYPE)
+ {
+ if (m_thElement == TypeHandle(MscorlibBinder::GetClass(CLASS__DATE_TIME)))
+ {
+ if (ntElement == NATIVE_TYPE_STRUCT || ntElement == NATIVE_TYPE_DEFAULT)
+ m_vtElement = VT_DATE;
+ else
+ ReportInvalidArrayMarshalInfo(IDS_EE_BADMARSHAL_DATETIMEARRAY);
+ }
+ else if (m_thElement == TypeHandle(MscorlibBinder::GetClass(CLASS__DECIMAL)))
+ {
+ if (ntElement == NATIVE_TYPE_STRUCT || ntElement == NATIVE_TYPE_DEFAULT)
+ m_vtElement = VT_DECIMAL;
+#ifdef FEATURE_COMINTEROP
+ else if (ntElement == NATIVE_TYPE_CURRENCY)
+ m_vtElement = VT_CY;
+#endif // FEATURE_COMINTEROP
+ else
+ ReportInvalidArrayMarshalInfo(IDS_EE_BADMARSHAL_DECIMALARRAY);
+ }
+ else
+ {
+ // When exporting, we need to handle enums specially.
+ if (IsAMIExport(m_flags) && m_thElement.IsEnum())
+ {
+ // Get the element type of the underlying type.
+ CorElementType et = m_thElement.GetInternalCorElementType();
+
+ // If it is not a 32-bit type, convert as the underlying type.
+ if ((et == ELEMENT_TYPE_I4) || (et == ELEMENT_TYPE_U4))
+ m_vtElement = VT_RECORD;
+ else
+ m_vtElement = OleVariant::GetVarTypeForTypeHandle(m_thElement);
+ }
+ else
+ {
+ m_vtElement = OleVariant::GetVarTypeForTypeHandle(m_thElement);
+ }
+ }
+ }
+#ifdef FEATURE_COMINTEROP
+ else if (m_thElement == TypeHandle(MscorlibBinder::GetClass(CLASS__ERROR_WRAPPER)))
+ {
+ m_vtElement = VT_ERROR;
+ }
+#endif
+ else
+ {
+#ifdef FEATURE_COMINTEROP
+
+ // Compat: Even if the classes have layout, we still convert them to interface pointers.
+
+ ItfMarshalInfo itfInfo;
+ MarshalInfo::GetItfMarshalInfo(m_thElement, TypeHandle(), FALSE, FALSE, ms, &itfInfo);
+
+ // Compat: We must always do VT_UNKNOWN marshaling for parameters, even if the interface is marked late-bound.
+ if (ms == MarshalInfo::MARSHAL_SCENARIO_FIELD)
+ m_vtElement = static_cast<VARTYPE>(!!(itfInfo.dwFlags & ItfMarshalInfo::ITF_MARSHAL_DISP_ITF) ? VT_DISPATCH : VT_UNKNOWN);
+ else
+ m_vtElement = VT_UNKNOWN;
+
+ m_thElement = itfInfo.thItf.IsNull() ? TypeHandle(g_pObjectClass) : itfInfo.thItf;
+ m_thInterfaceArrayElementClass = itfInfo.thClass;
+
+#else // FEATURE_COMINTEROP
+ ReportInvalidArrayMarshalInfo(IDS_EE_BADMARSHAL_UNSUPPORTED_SIG);
+#endif // FEATURE_COMINTEROP
+ }
+ }
+
+#ifdef FEATURE_CORECLR
+ // Avoid throwing exceptions for any managed structs that have layouts and have types of fields that gets default to those banned types by default
+ // We don't know if they will be passed to native code anyway, and the right place to make the check is in the marshallers
+ if (AppX::IsAppXProcess() && ms != MarshalInfo::MARSHAL_SCENARIO_FIELD)
+ {
+ bool set_error = false;
+ UINT m_resID = 0;
+ switch (m_vtElement)
+ {
+ case VT_DISPATCH:
+ m_resID = IDS_EE_BADMARSHAL_TYPE_IDISPATCH ;
+ set_error = true;
+ break;
+ }
+ if (set_error)
+ COMPlusThrow(kPlatformNotSupportedException, m_resID);
+ }
+#endif // FEATURE_CORECLR
+
+ // If we are exporting, we need to substitute the VTHACK_* VARTYPE with the actual
+ // types as expressed in the type library.
+ if (IsAMIExport(m_flags))
+ {
+ if (m_vtElement == VTHACK_ANSICHAR)
+ m_vtElement = VT_UI1;
+ else if (m_vtElement == VTHACK_WINBOOL)
+ m_vtElement = VT_I4;
+ else if (m_vtElement == VTHACK_CBOOL)
+ m_vtElement = VT_UI1;
+ }
+
+LExit:;
+
+ RETURN;
+}
+
+bool IsUnsupportedValueTypeReturn(MetaSig& msig)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ CorElementType type = msig.GetReturnTypeNormalized();
+
+ if (type == ELEMENT_TYPE_VALUETYPE || type == ELEMENT_TYPE_TYPEDBYREF)
+ {
+#ifdef _TARGET_X86_
+ // On x86, the internal CorElementType for value types is normalized by the type loader
+ // (see calls to ComputeInternalCorElementTypeForValueType in MethodTableBuilder).
+ // We don't need to redo the normalization here.
+ return true;
+#else
+ TypeHandle th = msig.GetRetTypeHandleThrowing();
+
+ return EEClass::ComputeInternalCorElementTypeForValueType(th.GetMethodTable()) == ELEMENT_TYPE_VALUETYPE;
+#endif // _TARGET_X86_
+ }
+
+ return false;
+}
+
+#ifndef CROSSGEN_COMPILE
+
+#include "stubhelpers.h"
+FCIMPL3(void*, StubHelpers::CreateCustomMarshalerHelper,
+ MethodDesc* pMD,
+ mdToken paramToken,
+ TypeHandle hndManagedType)
+{
+ FCALL_CONTRACT;
+
+ CustomMarshalerHelper* pCMHelper = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ Module* pModule = pMD->GetModule();
+ Assembly* pAssembly = pModule->GetAssembly();
+
+
+#ifdef FEATURE_COMINTEROP
+ if (!hndManagedType.IsTypeDesc() &&
+ IsTypeRefOrDef(g_CollectionsEnumeratorClassName, hndManagedType.GetModule(), hndManagedType.GetCl()))
+ {
+ pCMHelper = SetupCustomMarshalerHelper(ENUMERATOR_TO_ENUM_VARIANT_CM_NAME,
+ ENUMERATOR_TO_ENUM_VARIANT_CM_NAME_LEN,
+ ENUMERATOR_TO_ENUM_VARIANT_CM_COOKIE,
+ ENUMERATOR_TO_ENUM_VARIANT_CM_COOKIE_LEN,
+ pAssembly, hndManagedType);
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ //
+ // Retrieve the native type for the current parameter.
+ //
+
+ BOOL result;
+ NativeTypeParamInfo ParamInfo;
+ result = ParseNativeTypeInfo(paramToken, pModule->GetMDImport(), &ParamInfo);
+
+ //
+ // this should all have been done at stub creation time
+ //
+ CONSISTENCY_CHECK(result != 0);
+ CONSISTENCY_CHECK(ParamInfo.m_NativeType == NATIVE_TYPE_CUSTOMMARSHALER);
+
+ // Set up the custom marshaler info.
+ pCMHelper = SetupCustomMarshalerHelper(ParamInfo.m_strCMMarshalerTypeName,
+ ParamInfo.m_cCMMarshalerTypeNameBytes,
+ ParamInfo.m_strCMCookie,
+ ParamInfo.m_cCMCookieStrBytes,
+ pAssembly,
+ hndManagedType);
+ }
+
+ HELPER_METHOD_FRAME_END();
+
+ return (void*)pCMHelper;
+}
+FCIMPLEND
+
+#endif // CROSSGEN_COMPILE
diff --git a/src/vm/mlinfo.h b/src/vm/mlinfo.h
new file mode 100644
index 0000000000..b4cbd9a929
--- /dev/null
+++ b/src/vm/mlinfo.h
@@ -0,0 +1,997 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: mlinfo.h
+//
+
+//
+
+
+#include "stubgen.h"
+#include "custommarshalerinfo.h"
+
+#ifdef FEATURE_COMINTEROP
+#include <windows.ui.xaml.h>
+#endif
+
+#ifndef _MLINFO_H_
+#define _MLINFO_H_
+
+#define NATIVE_TYPE_DEFAULT NATIVE_TYPE_MAX
+#define VARIABLESIZE ((BYTE)(-1))
+
+
+#ifdef FEATURE_COMINTEROP
+class DispParamMarshaler;
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_COMINTEROP
+enum DispatchWrapperType
+{
+ DispatchWrapperType_Unknown = 0x00000001,
+ DispatchWrapperType_Dispatch = 0x00000002,
+ DispatchWrapperType_Error = 0x00000008,
+ DispatchWrapperType_Currency = 0x00000010,
+ DispatchWrapperType_BStr = 0x00000020,
+ DispatchWrapperType_SafeArray = 0x00010000
+};
+#endif // FEATURE_COMINTEROP
+
+typedef enum
+{
+ HANDLEASNORMAL = 0,
+ OVERRIDDEN = 1,
+ DISALLOWED = 2,
+} MarshalerOverrideStatus;
+
+
+enum MarshalFlags
+{
+ MARSHAL_FLAG_CLR_TO_NATIVE = 0x01,
+ MARSHAL_FLAG_IN = 0x02,
+ MARSHAL_FLAG_OUT = 0x04,
+ MARSHAL_FLAG_BYREF = 0x08,
+ MARSHAL_FLAG_HRESULT_SWAP = 0x10,
+ MARSHAL_FLAG_RETVAL = 0x20,
+ MARSHAL_FLAG_HIDDENLENPARAM = 0x40,
+};
+
+#include <pshpack1.h>
+// Captures arguments for C array marshaling.
+struct CREATE_MARSHALER_CARRAY_OPERANDS
+{
+ MethodTable* methodTable;
+ UINT32 multiplier;
+ UINT32 additive;
+ VARTYPE elementType;
+ UINT16 countParamIdx;
+ BYTE bestfitmapping;
+ BYTE throwonunmappablechar;
+};
+#include <poppack.h>
+
+struct OverrideProcArgs
+{
+ class MarshalInfo* m_pMarshalInfo;
+
+ union
+ {
+ MethodTable* m_pMT;
+
+ struct
+ {
+ VARTYPE m_vt;
+ UINT16 m_optionalbaseoffset; //for fast marshaling, offset of dataptr if known and less than 64k (0 otherwise)
+ MethodTable* m_pMT;
+#ifdef FEATURE_COMINTEROP
+ SIZE_T m_cbElementSize;
+ WinMDAdapter::RedirectedTypeIndex m_redirectedTypeIndex;
+#endif // FEATURE_COMINTEROP
+ } na;
+
+ struct
+ {
+ MethodTable* m_pMT;
+ MethodDesc* m_pCopyCtor;
+ MethodDesc* m_pDtor;
+ } mm;
+
+ struct
+ {
+ MethodDesc* m_pMD;
+ mdToken m_paramToken;
+ void* m_hndManagedType; // TypeHandle cannot be a union member
+ } rcm; // MARSHAL_TYPE_REFERENCECUSTOMMARSHALER
+
+ };
+};
+
+typedef MarshalerOverrideStatus (*OVERRIDEPROC)(NDirectStubLinker* psl,
+ BOOL byref,
+ BOOL fin,
+ BOOL fout,
+ BOOL fManagedToNative,
+ OverrideProcArgs* pargs,
+ UINT* pResID,
+ UINT argidx,
+ UINT nativeStackOffset);
+
+typedef MarshalerOverrideStatus (*RETURNOVERRIDEPROC)(NDirectStubLinker* psl,
+ BOOL fManagedToNative,
+ BOOL fHresultSwap,
+ OverrideProcArgs* pargs,
+ UINT* pResID);
+
+//==========================================================================
+// This structure contains the native type information for a given
+// parameter.
+//==========================================================================
+struct NativeTypeParamInfo
+{
+ NativeTypeParamInfo()
+ : m_NativeType(NATIVE_TYPE_DEFAULT)
+ , m_ArrayElementType(NATIVE_TYPE_DEFAULT)
+ , m_SizeIsSpecified(FALSE)
+ , m_CountParamIdx(0)
+ , m_Multiplier(0)
+ , m_Additive(1)
+ , m_strCMMarshalerTypeName(NULL)
+ , m_cCMMarshalerTypeNameBytes(0)
+ , m_strCMCookie(NULL)
+ , m_cCMCookieStrBytes(0)
+#ifdef FEATURE_COMINTEROP
+ , m_SafeArrayElementVT(VT_EMPTY)
+ , m_strSafeArrayUserDefTypeName(NULL)
+ , m_cSafeArrayUserDefTypeNameBytes(0)
+ , m_IidParamIndex(-1)
+ , m_strInterfaceTypeName(NULL)
+ , m_cInterfaceTypeNameBytes(0)
+#endif // FEATURE_COMINTEROP
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ // The native type of the parameter.
+ CorNativeType m_NativeType;
+
+ // for NT_ARRAY only
+ CorNativeType m_ArrayElementType; // The array element type.
+
+ BOOL m_SizeIsSpecified; // used to do some validation
+ UINT16 m_CountParamIdx; // index of "sizeis" parameter
+ UINT32 m_Multiplier; // multipler for "sizeis"
+ UINT32 m_Additive; // additive for 'sizeis"
+
+ // For NT_CUSTOMMARSHALER only.
+ LPUTF8 m_strCMMarshalerTypeName;
+ DWORD m_cCMMarshalerTypeNameBytes;
+ LPUTF8 m_strCMCookie;
+ DWORD m_cCMCookieStrBytes;
+
+#ifdef FEATURE_COMINTEROP
+ // For NT_SAFEARRAY only.
+ VARTYPE m_SafeArrayElementVT;
+ LPUTF8 m_strSafeArrayUserDefTypeName;
+ DWORD m_cSafeArrayUserDefTypeNameBytes;
+
+ DWORD m_IidParamIndex; // Capture iid_is syntax from IDL.
+
+ // for NATIVE_TYPE_SPECIFIED_INTERFACE
+ LPUTF8 m_strInterfaceTypeName;
+ DWORD m_cInterfaceTypeNameBytes;
+#endif // FEATURE_COMINTEROP
+};
+
+HRESULT CheckForCompressedData(PCCOR_SIGNATURE pvNativeTypeStart, PCCOR_SIGNATURE pvNativeType, ULONG cbNativeType);
+
+BOOL ParseNativeTypeInfo(mdToken token,
+ IMDInternalImport* pScope,
+ NativeTypeParamInfo* pParamInfo);
+
+void VerifyAndAdjustNormalizedType(
+ Module * pModule,
+ SigPointer sigPtr,
+ const SigTypeContext * pTypeContext,
+ CorElementType * pManagedElemType,
+ CorNativeType * pNativeType);
+
+#ifdef FEATURE_COMINTEROP
+
+class EventArgsMarshalingInfo
+{
+public:
+ // Constructor.
+ EventArgsMarshalingInfo();
+
+ // Destructor.
+ ~EventArgsMarshalingInfo();
+
+ // EventArgsMarshalingInfo's are always allocated on the loader heap so we need to redefine
+ // the new and delete operators to ensure this.
+ void *operator new(size_t size, LoaderHeap *pHeap);
+ void operator delete(void *pMem);
+
+ // Accessors.
+ TypeHandle GetSystemNCCEventArgsType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_hndSystemNCCEventArgsType;
+ }
+
+ TypeHandle GetSystemPCEventArgsType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_hndSystemPCEventArgsType;
+ }
+
+ ABI::Windows::UI::Xaml::Interop::INotifyCollectionChangedEventArgsFactory *GetNCCEventArgsFactory()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS; // For potential COOP->PREEMP->COOP switch
+ MODE_ANY;
+ PRECONDITION(!GetAppDomain()->IsCompilationDomain());
+ }
+ CONTRACTL_END;
+
+ if (m_pNCCEventArgsFactory.Load() == NULL)
+ {
+ GCX_PREEMP();
+ SafeComHolderPreemp<ABI::Windows::UI::Xaml::Interop::INotifyCollectionChangedEventArgsFactory> pNCCEventArgsFactory;
+
+ IfFailThrow(clr::winrt::GetActivationFactory(g_WinRTNotifyCollectionChangedEventArgsNameW, (ABI::Windows::UI::Xaml::Interop::INotifyCollectionChangedEventArgsFactory **)&pNCCEventArgsFactory));
+ _ASSERTE_MSG(pNCCEventArgsFactory, "Got NULL NCCEventArgs factory!");
+
+ if (InterlockedCompareExchangeT(&m_pNCCEventArgsFactory, (ABI::Windows::UI::Xaml::Interop::INotifyCollectionChangedEventArgsFactory *)pNCCEventArgsFactory, NULL) == NULL)
+ pNCCEventArgsFactory.SuppressRelease();
+ }
+
+ return m_pNCCEventArgsFactory;
+ }
+
+ ABI::Windows::UI::Xaml::Data::IPropertyChangedEventArgsFactory *GetPCEventArgsFactory()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS; // For potential COOP->PREEMP->COOP switch
+ MODE_ANY;
+ PRECONDITION(!GetAppDomain()->IsCompilationDomain());
+ }
+ CONTRACTL_END;
+
+ if (m_pPCEventArgsFactory.Load() == NULL)
+ {
+ GCX_PREEMP();
+ SafeComHolderPreemp<ABI::Windows::UI::Xaml::Data::IPropertyChangedEventArgsFactory> pPCEventArgsFactory;
+
+ IfFailThrow(clr::winrt::GetActivationFactory(g_WinRTPropertyChangedEventArgsNameW, (ABI::Windows::UI::Xaml::Data::IPropertyChangedEventArgsFactory **)&pPCEventArgsFactory));
+ _ASSERTE_MSG(pPCEventArgsFactory, "Got NULL PCEventArgs factory!");
+
+ if (InterlockedCompareExchangeT(&m_pPCEventArgsFactory, (ABI::Windows::UI::Xaml::Data::IPropertyChangedEventArgsFactory *)pPCEventArgsFactory, NULL) == NULL)
+ pPCEventArgsFactory.SuppressRelease();
+ }
+
+ return m_pPCEventArgsFactory;
+ }
+
+ MethodDesc *GetSystemNCCEventArgsToWinRTNCCEventArgsMD()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pSystemNCCEventArgsToWinRTNCCEventArgsMD;
+ }
+
+ MethodDesc *GetWinRTNCCEventArgsToSystemNCCEventArgsMD()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pWinRTNCCEventArgsToSystemNCCEventArgsMD;
+ }
+
+ MethodDesc *GetSystemPCEventArgsToWinRTPCEventArgsMD()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pSystemPCEventArgsToWinRTPCEventArgsMD;
+ }
+
+ MethodDesc *GetWinRTPCEventArgsToSystemPCEventArgsMD()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pWinRTPCEventArgsToSystemPCEventArgsMD;
+ }
+
+#if defined(_DEBUG) && !defined(FEATURE_CORECLR)
+ BOOL IsEventArgsHelperMethod(MethodDesc *pMD)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (pMD == m_pSystemNCCEventArgsToWinRTNCCEventArgsMD || pMD == m_pWinRTNCCEventArgsToSystemNCCEventArgsMD ||
+ pMD == m_pSystemPCEventArgsToWinRTPCEventArgsMD || pMD == m_pWinRTPCEventArgsToSystemPCEventArgsMD);
+ }
+#endif // #if defined(_DEBUG) && !defined(FEATURE_CORECLR)
+
+private:
+ TypeHandle m_hndSystemNCCEventArgsType;
+ TypeHandle m_hndSystemPCEventArgsType;
+
+ MethodDesc *m_pSystemNCCEventArgsToWinRTNCCEventArgsMD;
+ MethodDesc *m_pWinRTNCCEventArgsToSystemNCCEventArgsMD;
+ MethodDesc *m_pSystemPCEventArgsToWinRTPCEventArgsMD;
+ MethodDesc *m_pWinRTPCEventArgsToSystemPCEventArgsMD;
+
+ VolatilePtr<ABI::Windows::UI::Xaml::Interop::INotifyCollectionChangedEventArgsFactory> m_pNCCEventArgsFactory;
+ VolatilePtr<ABI::Windows::UI::Xaml::Data::IPropertyChangedEventArgsFactory> m_pPCEventArgsFactory;
+};
+
+class UriMarshalingInfo
+{
+public:
+ // Constructor.
+ UriMarshalingInfo();
+
+ // Destructor
+ ~UriMarshalingInfo();
+
+ // UriMarshalingInfo's are always allocated on the loader heap so we need to redefine
+ // the new and delete operators to ensure this.
+ void *operator new(size_t size, LoaderHeap *pHeap);
+ void operator delete(void *pMem);
+
+ // Accessors.
+ TypeHandle GetSystemUriType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_hndSystemUriType;
+ }
+
+ ABI::Windows::Foundation::IUriRuntimeClassFactory* GetUriFactory()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS; // For potential COOP->PREEMP->COOP switch
+ MODE_ANY;
+ PRECONDITION(!GetAppDomain()->IsCompilationDomain());
+ }
+ CONTRACTL_END;
+
+ if (m_pUriFactory.Load() == NULL)
+ {
+ GCX_PREEMP();
+
+ SafeComHolderPreemp<ABI::Windows::Foundation::IUriRuntimeClassFactory> pUriFactory;
+
+ // IUriRuntimeClassFactory: 44A9796F-723E-4FDF-A218-033E75B0C084
+ IfFailThrow(clr::winrt::GetActivationFactory(g_WinRTUriClassNameW, (ABI::Windows::Foundation::IUriRuntimeClassFactory **)&pUriFactory));
+ _ASSERTE_MSG(pUriFactory, "Got Null Uri factory!");
+
+ if (InterlockedCompareExchangeT(&m_pUriFactory, (ABI::Windows::Foundation::IUriRuntimeClassFactory *) pUriFactory, NULL) == NULL)
+ pUriFactory.SuppressRelease();
+ }
+
+ return m_pUriFactory;
+ }
+
+ MethodDesc *GetSystemUriCtorMD()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_SystemUriCtorMD;
+ }
+
+ MethodDesc *GetSystemUriOriginalStringMD()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_SystemUriOriginalStringGetterMD;
+ }
+
+#if defined(_DEBUG) && !defined(FEATURE_CORECLR)
+ BOOL IsUriHelperMethod(MethodDesc *pMD)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return pMD == m_SystemUriCtorMD || pMD == m_SystemUriOriginalStringGetterMD;
+ }
+#endif // #if defined(_DEBUG) && !defined(FEATURE_CORECLR)
+
+private:
+ TypeHandle m_hndSystemUriType;
+
+ MethodDesc* m_SystemUriCtorMD;
+ MethodDesc* m_SystemUriOriginalStringGetterMD;
+
+ VolatilePtr<ABI::Windows::Foundation::IUriRuntimeClassFactory> m_pUriFactory;
+};
+
+class OleColorMarshalingInfo
+{
+public:
+ // Constructor.
+ OleColorMarshalingInfo();
+
+ // OleColorMarshalingInfo's are always allocated on the loader heap so we need to redefine
+ // the new and delete operators to ensure this.
+ void *operator new(size_t size, LoaderHeap *pHeap);
+ void operator delete(void *pMem);
+
+ // Accessors.
+ TypeHandle GetColorType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_hndColorType;
+ }
+ MethodDesc *GetOleColorToSystemColorMD()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_OleColorToSystemColorMD;
+ }
+ MethodDesc *GetSystemColorToOleColorMD()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_SystemColorToOleColorMD;
+ }
+
+#if defined(_DEBUG) && !defined(FEATURE_CORECLR)
+ BOOL IsOleColorHelperMethod(MethodDesc *pMD)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return pMD == m_OleColorToSystemColorMD || pMD == m_SystemColorToOleColorMD;
+ }
+#endif // #if defined(_DEBUG) && !defined(FEATURE_CORECLR)
+
+private:
+ TypeHandle m_hndColorType;
+ MethodDesc* m_OleColorToSystemColorMD;
+ MethodDesc* m_SystemColorToOleColorMD;
+};
+
+#endif // FEATURE_COMINTEROP
+
+
+class EEMarshalingData
+{
+public:
+ EEMarshalingData(BaseDomain *pDomain, LoaderHeap *pHeap, CrstBase *pCrst);
+ ~EEMarshalingData();
+
+ // EEMarshalingData's are always allocated on the loader heap so we need to redefine
+ // the new and delete operators to ensure this.
+ void *operator new(size_t size, LoaderHeap *pHeap);
+ void operator delete(void *pMem);
+
+ // This method returns the custom marshaling helper associated with the name cookie pair. If the
+ // CM info has not been created yet for this pair then it will be created and returned.
+ CustomMarshalerHelper *GetCustomMarshalerHelper(Assembly *pAssembly, TypeHandle hndManagedType, LPCUTF8 strMarshalerTypeName, DWORD cMarshalerTypeNameBytes, LPCUTF8 strCookie, DWORD cCookieStrBytes);
+
+ // This method returns the custom marshaling info associated with shared CM helper.
+ CustomMarshalerInfo *GetCustomMarshalerInfo(SharedCustomMarshalerHelper *pSharedCMHelper);
+
+#ifdef FEATURE_COMINTEROP
+ // This method retrieves OLE_COLOR marshaling info.
+ OleColorMarshalingInfo *GetOleColorMarshalingInfo();
+ UriMarshalingInfo *GetUriMarshalingInfo();
+ EventArgsMarshalingInfo *GetEventArgsMarshalingInfo();
+
+#if defined(_DEBUG) && !defined(FEATURE_CORECLR)
+ BOOL IsOleColorHelperMethod(MethodDesc *pMD)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pOleColorInfo != NULL && m_pOleColorInfo->IsOleColorHelperMethod(pMD);
+ }
+
+ BOOL IsUriHelperMethod(MethodDesc *pMD)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pUriInfo != NULL && m_pUriInfo->IsUriHelperMethod(pMD);
+ }
+
+ BOOL IsEventArgsHelperMethod(MethodDesc *pMD)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pEventArgsInfo != NULL && m_pEventArgsInfo->IsEventArgsHelperMethod(pMD);
+ }
+#endif // #if defined(_DEBUG) && !defined(FEATURE_CORECLR)
+
+#endif // FEATURE_COMINTEROP
+
+private:
+#ifndef CROSSGEN_COMPILE
+ EECMHelperHashTable m_CMHelperHashtable;
+ EEPtrHashTable m_SharedCMHelperToCMInfoMap;
+#endif // CROSSGEN_COMPILE
+ LoaderHeap* m_pHeap;
+ BaseDomain* m_pDomain;
+ CMINFOLIST m_pCMInfoList;
+#ifdef FEATURE_COMINTEROP
+ OleColorMarshalingInfo* m_pOleColorInfo;
+ UriMarshalingInfo* m_pUriInfo;
+ EventArgsMarshalingInfo* m_pEventArgsInfo;
+#endif // FEATURE_COMINTEROP
+};
+
+struct ItfMarshalInfo;
+
+class MarshalInfo
+{
+public:
+ enum MarshalType
+ {
+#define DEFINE_MARSHALER_TYPE(mtype, mclass, fWinRTSupported) mtype,
+#include "mtypes.h"
+ MARSHAL_TYPE_UNKNOWN
+ };
+
+ enum MarshalScenario
+ {
+ MARSHAL_SCENARIO_NDIRECT,
+#ifdef FEATURE_COMINTEROP
+ MARSHAL_SCENARIO_COMINTEROP,
+ MARSHAL_SCENARIO_WINRT,
+#endif // FEATURE_COMINTEROP
+ MARSHAL_SCENARIO_FIELD
+ };
+
+private:
+
+public:
+ void *operator new(size_t size, void *pInPlace)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return pInPlace;
+ }
+
+ MarshalInfo(Module* pModule,
+ SigPointer sig,
+ const SigTypeContext *pTypeContext,
+ mdToken token,
+ MarshalScenario ms,
+ CorNativeLinkType nlType,
+ CorNativeLinkFlags nlFlags,
+ BOOL isParam,
+ UINT paramidx, // parameter # for use in error messages (ignored if not parameter)
+ UINT numArgs, // number of arguments. used to check SizeParamIndex is within valid range
+ BOOL BestFit,
+ BOOL ThrowOnUnmappableChar,
+ BOOL fEmitsIL,
+ MethodDesc* pMD = NULL,
+ BOOL fUseCustomMarshal = TRUE
+#ifdef _DEBUG
+ ,
+ LPCUTF8 pDebugName = NULL,
+ LPCUTF8 pDebugClassName = NULL,
+ UINT argidx = 0 // 0 for return value, -1 for field
+#endif
+
+ );
+
+ VOID EmitOrThrowInteropParamException(NDirectStubLinker* psl, BOOL fMngToNative, UINT resID, UINT paramIdx);
+
+ // These methods retrieve the information for different element types.
+ HRESULT HandleArrayElemType(NativeTypeParamInfo *pParamInfo,
+ UINT16 optbaseoffset,
+ TypeHandle elemTypeHnd,
+ int iRank,
+ BOOL fNoLowerBounds,
+ BOOL isParam,
+ Assembly *pAssembly);
+
+ void GenerateArgumentIL(NDirectStubLinker* psl,
+ int argOffset, // the argument's index is m_paramidx + argOffset
+ UINT nativeStackOffset, // offset of the argument on the native stack
+ BOOL fMngToNative);
+
+ void GenerateReturnIL(NDirectStubLinker* psl,
+ int argOffset, // the argument's index is m_paramidx + argOffset
+ BOOL fMngToNative,
+ BOOL fieldGetter,
+ BOOL retval);
+
+ void SetupArgumentSizes();
+
+ UINT16 GetNativeArgSize()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_nativeArgSize;
+ }
+
+ MarshalType GetMarshalType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_type;
+ }
+
+ BYTE GetBestFitMapping()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ((m_BestFit == 0) ? 0 : 1);
+ }
+
+ BYTE GetThrowOnUnmappableChar()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ((m_ThrowOnUnmappableChar == 0) ? 0 : 1);
+ }
+
+ BOOL IsFpuReturn()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_type == MARSHAL_TYPE_FLOAT || m_type == MARSHAL_TYPE_DOUBLE;
+ }
+
+ BOOL IsIn()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_in;
+ }
+
+ BOOL IsOut()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_out;
+ }
+
+ BOOL IsByRef()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_byref;
+ }
+
+ Module* GetModule()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pModule;
+ }
+
+ int GetArrayRank()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_iArrayRank;
+ }
+
+ BOOL GetNoLowerBounds()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_nolowerbounds;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ void SetHiddenLengthParamIndex(UINT16 index)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_hiddenLengthParamIndex == (UINT16)-1);
+ m_hiddenLengthParamIndex = index;
+ }
+
+ UINT16 HiddenLengthParamIndex()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_hiddenLengthParamIndex != (UINT16)-1);
+ return m_hiddenLengthParamIndex;
+ }
+
+ DWORD GetHiddenLengthManagedHome()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_dwHiddenLengthManagedHomeLocal != 0xFFFFFFFF);
+ return m_dwHiddenLengthManagedHomeLocal;
+ }
+
+ DWORD GetHiddenLengthNativeHome()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_dwHiddenLengthNativeHomeLocal != 0xFFFFFFFF);
+ return m_dwHiddenLengthNativeHomeLocal;
+ }
+
+ MarshalType GetHiddenLengthParamMarshalType();
+ CorElementType GetHiddenLengthParamElementType();
+ UINT16 GetHiddenLengthParamStackSize();
+
+ void MarshalHiddenLengthArgument(NDirectStubLinker *psl, BOOL managedToNative, BOOL isForReturnArray);
+#endif // FEATURE_COMINTEROP
+
+ // used the same logic of tlbexp to check whether the argument of the method is a VarArg
+ BOOL IsOleVarArgCandidate()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_fOleVarArgCandidate; // m_fOleVarArgCandidate is set in the constructor method
+ }
+
+ void GetMops(CREATE_MARSHALER_CARRAY_OPERANDS* pMopsOut)
+ {
+ WRAPPER_NO_CONTRACT;
+ pMopsOut->methodTable = m_hndArrayElemType.AsMethodTable();
+ pMopsOut->elementType = m_arrayElementType;
+ pMopsOut->countParamIdx = m_countParamIdx;
+ pMopsOut->multiplier = m_multiplier;
+ pMopsOut->additive = m_additive;
+ pMopsOut->bestfitmapping = GetBestFitMapping();
+ pMopsOut->throwonunmappablechar = GetThrowOnUnmappableChar();
+ }
+
+ TypeHandle GetArrayElementTypeHandle()
+ {
+ return m_hndArrayElemType;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ DispParamMarshaler *GenerateDispParamMarshaler();
+ DispatchWrapperType GetDispWrapperType();
+#endif // FEATURE_COMINTEROP
+
+ void GetItfMarshalInfo(ItfMarshalInfo* pInfo);
+
+ // Helper functions used to map the specified type to its interface marshalling info.
+ static void GetItfMarshalInfo(TypeHandle th, TypeHandle thItf, BOOL fDispItf, BOOL fInspItf, MarshalScenario ms, ItfMarshalInfo *pInfo);
+ static HRESULT TryGetItfMarshalInfo(TypeHandle th, BOOL fDispItf, BOOL fInspItf, ItfMarshalInfo *pInfo);
+
+ VOID MarshalTypeToString(SString& strMarshalType, BOOL fSizeIsSpecified);
+ static VOID VarTypeToString(VARTYPE vt, SString& strVarType);
+
+ // Returns true if the specified marshaler requires COM to have been started.
+ bool MarshalerRequiresCOM();
+
+ MethodDesc *GetMethodDesc()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pMD;
+ }
+
+ UINT GetParamIndex()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_paramidx;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ BOOL IsWinRTScenario()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_ms == MarshalInfo::MARSHAL_SCENARIO_WINRT;
+ }
+#endif // FEATURE_COMINTEROP
+
+private:
+
+ UINT16 GetManagedSize(MarshalType mtype, MarshalScenario ms);
+ UINT16 GetNativeSize(MarshalType mtype, MarshalScenario ms);
+ static bool IsInOnly(MarshalType mtype);
+ static bool IsSupportedForWinRT(MarshalType mtype);
+
+ static OVERRIDEPROC GetArgumentOverrideProc(MarshalType mtype);
+ static RETURNOVERRIDEPROC GetReturnOverrideProc(MarshalType mtype);
+
+#ifdef _DEBUG
+ VOID DumpMarshalInfo(Module* pModule, SigPointer sig, const SigTypeContext *pTypeContext, mdToken token,
+ MarshalScenario ms, CorNativeLinkType nlType, CorNativeLinkFlags nlFlags);
+#endif
+
+private:
+ MarshalType m_type;
+ BOOL m_byref;
+ BOOL m_in;
+ BOOL m_out;
+ MethodTable* m_pMT; // Used if this is a true value type
+ MethodDesc* m_pMD; // Save MethodDesc for later inspection so that we can pass SizeParamIndex by ref
+ TypeHandle m_hndArrayElemType;
+ VARTYPE m_arrayElementType;
+ int m_iArrayRank;
+ BOOL m_nolowerbounds; // if managed type is SZARRAY, don't allow lower bounds
+
+ // for NT_ARRAY only
+ UINT32 m_multiplier; // multipler for "sizeis"
+ UINT32 m_additive; // additive for 'sizeis"
+ UINT16 m_countParamIdx; // index of "sizeis" parameter
+
+#ifdef FEATURE_COMINTEROP
+ // For NATIVE_TYPE_HIDDENLENGTHARRAY
+ UINT16 m_hiddenLengthParamIndex; // index of the injected hidden length parameter
+ DWORD m_dwHiddenLengthManagedHomeLocal; // home local for the managed hidden length parameter
+ DWORD m_dwHiddenLengthNativeHomeLocal; // home local for the native hidden length parameter
+
+ MethodTable* m_pDefaultItfMT; // WinRT default interface (if m_pMT is a class)
+#endif // FEATURE_COMINTEROP
+
+ UINT16 m_nativeArgSize;
+ UINT16 m_managedArgSize;
+
+ MarshalScenario m_ms;
+ BOOL m_fAnsi;
+ BOOL m_fDispItf;
+ BOOL m_fInspItf;
+ BOOL m_fErrorNativeType;
+
+ // Information used by NT_CUSTOMMARSHALER.
+ CustomMarshalerHelper* m_pCMHelper;
+ VARTYPE m_CMVt;
+
+ OverrideProcArgs m_args;
+
+ UINT m_paramidx;
+ UINT m_resID; // resource ID for error message (if any)
+ BOOL m_BestFit;
+ BOOL m_ThrowOnUnmappableChar;
+
+ BOOL m_fOleVarArgCandidate; // indicate whether the arg is a candidate for vararg or not
+
+#if defined(_DEBUG)
+ LPCUTF8 m_strDebugMethName;
+ LPCUTF8 m_strDebugClassName;
+ UINT m_iArg; // 0 for return value, -1 for field
+#endif
+
+ Module* m_pModule;
+};
+
+
+
+//
+// Flags used to control the behavior of the ArrayMarshalInfo class.
+//
+
+enum ArrayMarshalInfoFlags
+{
+ amiRuntime = 0x0001,
+ amiExport32Bit = 0x0002,
+ amiExport64Bit = 0x0004,
+ amiIsPtr = 0x0008,
+ amiSafeArraySubTypeExplicitlySpecified = 0x0010
+};
+
+#define IsAMIRuntime(flags) (flags & amiRuntime)
+#define IsAMIExport(flags) (flags & (amiExport32Bit | amiExport64Bit))
+#define IsAMIExport32Bit(flags) (flags & amiExport32Bit)
+#define IsAMIExport64Bit(flags) (flags & amiExport64Bit)
+#define IsAMIPtr(flags) (flags & amiIsPtr)
+#define IsAMISafeArraySubTypeExplicitlySpecified(flags) (flags & amiSafeArraySubTypeExplicitlySpecified)
+//
+// Helper classes to determine the marshalling information for arrays.
+//
+
+class ArrayMarshalInfo
+{
+public:
+ ArrayMarshalInfo(ArrayMarshalInfoFlags flags)
+ : m_vtElement(VT_EMPTY)
+ , m_errorResourceId(0)
+ , m_flags(flags)
+#ifdef FEATURE_COMINTEROP
+ , m_redirectedTypeIndex((WinMDAdapter::RedirectedTypeIndex)0)
+ , m_cbElementSize(0)
+#endif // FEATURE_COMINTEROP
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ void InitForNativeArray(MarshalInfo::MarshalScenario ms, TypeHandle elemTypeHnd, CorNativeType elementNativeType, BOOL isAnsi);
+ void InitForFixedArray(TypeHandle elemTypeHnd, CorNativeType elementNativeType, BOOL isAnsi);
+
+#ifdef FEATURE_COMINTEROP
+ void InitForSafeArray(MarshalInfo::MarshalScenario ms, TypeHandle elemTypeHnd, VARTYPE elementVT, BOOL isAnsi);
+ void InitForHiddenLengthArray(TypeHandle elemTypeHnd);
+#endif // FEATURE_COMINTEROP
+
+ TypeHandle GetElementTypeHandle()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_thElement;
+ }
+
+ BOOL IsPtr()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return IsAMIPtr(m_flags);
+ }
+
+ VARTYPE GetElementVT()
+ {
+ LIMITED_METHOD_CONTRACT;
+ if ((IsAMIRuntime(m_flags) && IsAMIPtr(m_flags)) != 0)
+ {
+ // for the purpose of marshaling, we don't care about the inner
+ // type - we just marshal pointer-sized values
+ return (sizeof(LPVOID) == 4 ? VT_I4 : VT_I8);
+ }
+ else
+ {
+ return m_vtElement;
+ }
+ }
+
+ BOOL IsValid()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return m_vtElement != VT_EMPTY;
+ }
+
+ BOOL IsSafeArraySubTypeExplicitlySpecified()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return IsAMISafeArraySubTypeExplicitlySpecified(m_flags);
+ }
+
+ DWORD GetErrorResourceId()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(!IsValid());
+ }
+ CONTRACTL_END;
+
+ return m_errorResourceId;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ WinMDAdapter::RedirectedTypeIndex GetRedirectedTypeIndex()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_redirectedTypeIndex;
+ }
+
+ SIZE_T GetElementSize()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_cbElementSize;
+ }
+#endif // FEATURE_COMINTEROP
+
+protected:
+ // Helper function that does the actual work to figure out the element type handle and var type.
+ void InitElementInfo(CorNativeType arrayNativeType, MarshalInfo::MarshalScenario ms, TypeHandle elemTypeHnd, CorNativeType elementNativeType, BOOL isAnsi);
+
+ VARTYPE GetPointerSize()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // If we are exporting, use the pointer size specified via the flags, otherwise use
+ // the current size of a pointer.
+ if (IsAMIExport32Bit(m_flags))
+ return 4;
+ else if (IsAMIExport64Bit(m_flags))
+ return 8;
+ else
+ return sizeof(LPVOID);
+ }
+
+protected:
+ TypeHandle m_thElement;
+ TypeHandle m_thInterfaceArrayElementClass;
+ VARTYPE m_vtElement;
+ DWORD m_errorResourceId;
+ ArrayMarshalInfoFlags m_flags;
+
+#ifdef FEATURE_COMINTEROP
+ WinMDAdapter::RedirectedTypeIndex m_redirectedTypeIndex;
+ SIZE_T m_cbElementSize;
+#endif // FEATURE_COMINTEROP
+};
+
+
+//===================================================================================
+// Throws an exception indicating a param has invalid element type / native type
+// information.
+//===================================================================================
+VOID ThrowInteropParamException(UINT resID, UINT paramIdx);
+
+VOID CollateParamTokens(IMDInternalImport *pInternalImport, mdMethodDef md, ULONG numargs, mdParamDef *aParams);
+bool IsUnsupportedValueTypeReturn(MetaSig& msig);
+
+void FindCopyCtor(Module *pModule, MethodTable *pMT, MethodDesc **pMDOut);
+void FindDtor(Module *pModule, MethodTable *pMT, MethodDesc **pMDOut);
+
+// We'll cap the total native size at a (somewhat) arbitrary limit to ensure
+// that we don't expose some overflow bug later on.
+#define MAX_SIZE_FOR_INTEROP 0x7ffffff0
+
+#endif // _MLINFO_H_
diff --git a/src/vm/mngstdinterfaces.cpp b/src/vm/mngstdinterfaces.cpp
new file mode 100644
index 0000000000..eff05c744e
--- /dev/null
+++ b/src/vm/mngstdinterfaces.cpp
@@ -0,0 +1,1031 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: MngStdInterfaces.cpp
+**
+**
+** Purpose: Contains the implementation of the MngStdInterfaces
+** class. This class is used to determine the associated
+**
+**
+
+===========================================================*/
+
+#include "common.h"
+
+#include "mngstdinterfaces.h"
+#include "dispex.h"
+#include "class.h"
+#include "method.hpp"
+#include "runtimecallablewrapper.h"
+#include "excep.h"
+#include "security.h"
+#include "typeparse.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+
+//
+// Declare the static field int the ManagedStdInterfaceMap class.
+//
+
+MngStdInterfaceMap *MngStdInterfaceMap::m_pMngStdItfMap=NULL;
+
+
+//
+// Defines used ManagedStdInterfaceMap class implementation.
+//
+
+// Use this macro to define an entry in the managed standard interface map.
+#define STD_INTERFACE_MAP_ENTRY(TypeName, NativeIID) \
+ m_TypeNameToNativeIIDMap.InsertValue((TypeName), (void*)&(NativeIID), TRUE)
+
+//
+// Defines used StdMngItfBase class implementation.
+//
+
+// The GetInstance method name and signature.
+#define GET_INSTANCE_METH_NAME "GetInstance"
+#define GET_INSTANCE_METH_SIG &gsig_SM_Str_RetICustomMarshaler
+
+// The initial number of buckets in the managed standard interface map.
+#define INITIAL_NUM_BUCKETS 64
+
+
+//
+// This method is used to build the managed standard interface map.
+//
+
+MngStdInterfaceMap::MngStdInterfaceMap()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ //
+ // Initialize the hashtable.
+ //
+
+ m_TypeNameToNativeIIDMap.Init(INITIAL_NUM_BUCKETS,NULL,NULL);
+
+ //
+ // Define the mapping for the managed standard interfaces.
+ //
+
+#define MNGSTDITF_BEGIN_INTERFACE(FriendlyName, strMngItfName, strUCOMMngItfName, strCustomMarshalerName, strCustomMarshalerCookie, strManagedViewName, NativeItfIID, bCanCastOnNativeItfQI) \
+ STD_INTERFACE_MAP_ENTRY(strMngItfName, bCanCastOnNativeItfQI ? NativeItfIID : GUID_NULL);
+
+#define MNGSTDITF_DEFINE_METH_IMPL(FriendlyName, ECallMethName, MethName, MethSig, FcallDecl)
+
+#define MNGSTDITF_END_INTERFACE(FriendlyName)
+
+#include "mngstditflist.h"
+
+#undef MNGSTDITF_BEGIN_INTERFACE
+#undef MNGSTDITF_DEFINE_METH_IMPL
+#undef MNGSTDITF_END_INTERFACE
+}
+
+
+//
+// Helper method to load the types used inside the classes that implement the ECall's for
+// the managed standard interfaces.
+//
+
+void MngStdItfBase::InitHelper(
+ LPCUTF8 strMngItfTypeName,
+ LPCUTF8 strUComItfTypeName,
+ LPCUTF8 strCMTypeName,
+ LPCUTF8 strCookie,
+ LPCUTF8 strManagedViewName,
+ TypeHandle *pMngItfType,
+ TypeHandle *pUComItfType,
+ TypeHandle *pCustomMarshalerType,
+ TypeHandle *pManagedViewType,
+ OBJECTHANDLE *phndMarshaler)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ // Load the managed interface type.
+ *pMngItfType = ClassLoader::LoadTypeByNameThrowing(SystemDomain::SystemAssembly(), NULL, strMngItfTypeName);
+
+ // Run the <clinit> for the managed interface type.
+ pMngItfType->GetMethodTable()->CheckRestore();
+ pMngItfType->GetMethodTable()->CheckRunClassInitThrowing();
+
+ // Load the UCom type.
+ *pUComItfType = ClassLoader::LoadTypeByNameThrowing(SystemDomain::SystemAssembly(), NULL, strUComItfTypeName);
+
+ // Run the <clinit> for the UCom type.
+ pUComItfType->GetMethodTable()->CheckRestore();
+ pUComItfType->GetMethodTable()->CheckRunClassInitThrowing();
+
+ // Retrieve the custom marshaler type handle.
+ SString sstrCMTypeName(SString::Utf8, strCMTypeName);
+ *pCustomMarshalerType = TypeName::GetTypeFromAsmQualifiedName(sstrCMTypeName.GetUnicode(), FALSE);
+
+ // Run the <clinit> for the marshaller.
+ pCustomMarshalerType->GetMethodTable()->EnsureInstanceActive();
+ pCustomMarshalerType->GetMethodTable()->CheckRunClassInitThrowing();
+
+ // Load the managed view.
+ SString sstrManagedViewName(SString::Utf8, strManagedViewName);
+ *pManagedViewType = TypeName::GetTypeFromAsmQualifiedName(sstrManagedViewName.GetUnicode(), FALSE);
+
+ // Run the <clinit> for the managed view.
+ pManagedViewType->GetMethodTable()->EnsureInstanceActive();
+ pManagedViewType->GetMethodTable()->CheckRunClassInitThrowing();
+
+ // Retrieve the GetInstance method.
+ MethodDesc *pGetInstanceMD = MemberLoader::FindMethod(pCustomMarshalerType->GetMethodTable(), GET_INSTANCE_METH_NAME, GET_INSTANCE_METH_SIG);
+ _ASSERTE(pGetInstanceMD && "Unable to find specified custom marshaler method");
+
+ // Allocate the string object that will be passed to the GetInstance method.
+ STRINGREF strObj = StringObject::NewString(strCookie);
+ GCPROTECT_BEGIN(strObj);
+ {
+ MethodDescCallSite getInstance(pGetInstanceMD, (OBJECTREF*)&strObj);
+
+ // Prepare the arguments that will be passed to GetInstance.
+ ARG_SLOT GetInstanceArgs[] = {
+ ObjToArgSlot(strObj)
+ };
+
+ // Call the static GetInstance method to retrieve the custom marshaler to use.
+ OBJECTREF Marshaler = getInstance.Call_RetOBJECTREF(GetInstanceArgs);
+
+ // Cache the handle to the marshaler for faster access.
+ (*phndMarshaler) = SystemDomain::GetCurrentDomain()->CreateHandle(Marshaler);
+ }
+ GCPROTECT_END();
+}
+
+
+//
+// Helper method that forwards the calls to either the managed view or to the native component if it
+// implements the managed interface.
+//
+
+LPVOID MngStdItfBase::ForwardCallToManagedView(
+ OBJECTHANDLE hndMarshaler,
+ MethodDesc *pMngItfMD,
+ MethodDesc *pUComItfMD,
+ MethodDesc *pMarshalNativeToManagedMD,
+ MethodDesc *pMngViewMD,
+ IID *pMngItfIID,
+ IID *pNativeItfIID,
+ ARG_SLOT* pArgs)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FAULT;
+
+ Object* Result = 0;
+ ULONG cbRef;
+ HRESULT hr;
+ IUnknown *pMngItf;
+ IUnknown *pNativeItf;
+ OBJECTREF ManagedView;
+ BOOL RetValIsProtected = FALSE;
+ struct LocalGcRefs {
+ OBJECTREF Obj;
+ OBJECTREF Result;
+ } Lr;
+
+ // Retrieve the object that the IExpando call was made on.
+ Lr.Obj = ArgSlotToObj(pArgs[0]);
+ Lr.Result = NULL;
+ GCPROTECT_BEGIN(Lr);
+ {
+ SafeComHolder<IUnknown> pUnk = NULL;
+
+ _ASSERTE(Lr.Obj != NULL);
+
+ MethodTable *pTargetMT = Lr.Obj->GetMethodTable();
+#ifdef FEATURE_REMOTING
+ if (pTargetMT->IsTransparentProxy())
+ {
+ // If we get here with a transparent proxy instead of a COM object we need to re-dispatch the call to the TP stub.
+ // This can be tricky since the stack is no longer in the right state to make the call directly. Instead we build a
+ // small thunk that directly transitions into the remoting system. That way we can use the existing
+ // MethodDesc::CallTarget routine to dispatch the call and get the extra argument on the stack (the method desc
+ // pointer for the interface method needed by the TP stub) without re-routing ourselves through the fcall stub that
+ // would just get us back to here.
+
+ MethodDescCallSite mngItf(pMngItfMD, CRemotingServices::GetStubForInterfaceMethod(pMngItfMD));
+
+ // Call the stub with the args we were passed originally.
+ Result = (Object*)mngItf.CallWithValueTypes_RetArgSlot(pArgs);
+ if (mngItf.GetMetaSig()->IsObjectRefReturnType())
+ {
+ Lr.Result = ObjectToOBJECTREF(Result);
+ RetValIsProtected = TRUE;
+ }
+ }
+ else
+#endif // FEATURE_REMOTING
+ {
+ // The target isn't a TP so it better be a COM object.
+ _ASSERTE(Lr.Obj->GetMethodTable()->IsComObjectType());
+
+ // We are about to call out to ummanaged code so we need to make a security check.
+ Security::SpecialDemand(SSWT_DEMAND_FROM_NATIVE, SECURITY_UNMANAGED_CODE);
+
+ {
+ RCWHolder pRCW(GetThread());
+ RCWPROTECT_BEGIN(pRCW, Lr.Obj);
+
+ // Get the IUnknown on the current thread.
+ pUnk = pRCW->GetIUnknown();
+ _ASSERTE(pUnk);
+
+ RCW_VTABLEPTR(pRCW);
+
+ // Check to see if the component implements the interface natively.
+ hr = SafeQueryInterface(pUnk, *pMngItfIID, &pMngItf);
+ LogInteropQI(pUnk, *pMngItfIID, hr, "Custom marshaler fwd call QI for managed interface");
+ if (SUCCEEDED(hr))
+ {
+ // Release our ref-count on the managed interface.
+ cbRef = SafeRelease(pMngItf);
+ LogInteropRelease(pMngItf, cbRef, "Custom marshaler call releasing managed interface");
+
+ MethodDescCallSite UComItf(pUComItfMD, &Lr.Obj);
+
+ // The component implements the interface natively so we need to dispatch to it directly.
+ Result = UComItf.Call_RetObjPtr(pArgs);
+ if (UComItf.GetMetaSig()->IsObjectRefReturnType())
+ {
+ Lr.Result = ObjectToOBJECTREF(Result);
+ RetValIsProtected = TRUE;
+ }
+ }
+ else
+ {
+ // QI for the native interface that will be passed to MarshalNativeToManaged.
+ hr = SafeQueryInterface(pUnk, *pNativeItfIID, (IUnknown**)&pNativeItf);
+ LogInteropQI(pUnk, *pNativeItfIID, hr, "Custom marshaler call QI for native interface");
+ _ASSERTE(SUCCEEDED(hr));
+
+ MethodDescCallSite marshalNativeToManaged(pMarshalNativeToManagedMD, hndMarshaler);
+
+ // Prepare the arguments that will be passed to GetInstance.
+ ARG_SLOT MarshalNativeToManagedArgs[] = {
+ ObjToArgSlot(ObjectFromHandle(hndMarshaler)),
+ (ARG_SLOT)pNativeItf
+ };
+
+ // Retrieve the managed view for the current native interface pointer.
+ ManagedView = marshalNativeToManaged.Call_RetOBJECTREF(MarshalNativeToManagedArgs);
+ GCPROTECT_BEGIN(ManagedView);
+ {
+ // Release our ref-count on pNativeItf.
+ cbRef = SafeRelease(pNativeItf);
+ LogInteropRelease(pNativeItf, cbRef, "Custom marshaler fwd call releasing native interface");
+
+ MethodDescCallSite mngView(pMngViewMD, &ManagedView);
+
+ // Replace the this in pArgs by the this of the managed view.
+ (*(Object**)pArgs) = OBJECTREFToObject(ManagedView);
+
+ // Do the actual call to the method in the managed view passing in the args.
+ Result = mngView.Call_RetObjPtr(pArgs);
+ if (mngView.GetMetaSig()->IsObjectRefReturnType())
+ {
+ Lr.Result = ObjectToOBJECTREF(Result);
+ RetValIsProtected = TRUE;
+ }
+ }
+ GCPROTECT_END();
+ }
+ RCWPROTECT_END(pRCW);
+ }
+ }
+ }
+ GCPROTECT_END();
+
+ if (RetValIsProtected)
+ Result = OBJECTREFToObject(Lr.Result);
+
+ return (void*)Result;
+}
+
+
+#define MNGSTDITF_BEGIN_INTERFACE(FriendlyName, strMngItfName, strUCOMMngItfName, strCustomMarshalerName, strCustomMarshalerCookie, strManagedViewName, NativeItfIID, bCanCastOnNativeItfQI) \
+
+#define MNGSTDITF_DEFINE_METH_IMPL(FriendlyName, ECallMethName, MethName, MethSig, FcallDecl) \
+\
+ LPVOID __stdcall FriendlyName::ECallMethName##Worker(ARG_SLOT* pArgs) \
+ { \
+ WRAPPER_NO_CONTRACT; \
+ FriendlyName *pMngStdItfInfo = SystemDomain::GetCurrentDomain()->GetMngStdInterfacesInfo()->Get##FriendlyName(); \
+ return ForwardCallToManagedView( \
+ pMngStdItfInfo->m_hndCustomMarshaler, \
+ pMngStdItfInfo->GetMngItfMD(FriendlyName##Methods_##ECallMethName, #MethName, MethSig), \
+ pMngStdItfInfo->GetUComItfMD(FriendlyName##Methods_##ECallMethName, #MethName, MethSig), \
+ pMngStdItfInfo->GetCustomMarshalerMD(CustomMarshalerMethods_MarshalNativeToManaged), \
+ pMngStdItfInfo->GetManagedViewMD(FriendlyName##Methods_##ECallMethName, #MethName, MethSig), \
+ &pMngStdItfInfo->m_MngItfIID, \
+ &pMngStdItfInfo->m_NativeItfIID, \
+ pArgs); \
+ }
+
+#define MNGSTDITF_END_INTERFACE(FriendlyName)
+
+
+#include "mngstditflist.h"
+
+
+#undef MNGSTDITF_BEGIN_INTERFACE
+#undef MNGSTDITF_DEFINE_METH_IMPL
+#undef MNGSTDITF_END_INTERFACE
+
+
+FCIMPL1(FC_BOOL_RET, StdMngIEnumerator::MoveNext, Object* refThisUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(refThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ LPVOID retVal = NULL;
+ ARG_SLOT args[1] =
+ {
+ ObjToArgSlot(ObjectToOBJECTREF(refThisUNSAFE))
+ };
+
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL();
+
+ GCPROTECT_ARRAY_BEGIN(args[0], 1);
+
+ retVal = MoveNextWorker(args);
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END_POLL();
+
+ // Actual return type is a managed 'bool', so only look at a CLR_BOOL-sized
+ // result. The high bits are undefined on AMD64. (Note that a narrowing
+ // cast to CLR_BOOL will not work since it is the same as checking the
+ // size_t result != 0.)
+ FC_RETURN_BOOL(*(CLR_BOOL*)StackElemEndianessFixup(&retVal, sizeof(CLR_BOOL)));
+}
+FCIMPLEND
+
+FCIMPL1(Object*, StdMngIEnumerator::get_Current, Object* refThisUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(refThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF retVal = NULL;
+ ARG_SLOT args[1] =
+ {
+ ObjToArgSlot(ObjectToOBJECTREF(refThisUNSAFE))
+ };
+
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL();
+
+ GCPROTECT_ARRAY_BEGIN(args[0], 1);
+
+ retVal = ObjectToOBJECTREF((Object*)get_CurrentWorker(args));
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+
+ FC_GC_POLL_AND_RETURN_OBJREF(retVal);
+}
+FCIMPLEND
+
+FCIMPL1(void, StdMngIEnumerator::Reset, Object* refThisUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(refThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ ARG_SLOT args[1] =
+ {
+ ObjToArgSlot(ObjectToOBJECTREF(refThisUNSAFE))
+ };
+
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL();
+
+ GCPROTECT_ARRAY_BEGIN(args[0], 1);
+
+ ResetWorker(args);
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END_POLL();
+}
+FCIMPLEND
+
+
+FCIMPL2(Object*, StdMngIExpando::AddField, Object* refThisUNSAFE, StringObject* refNameUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(refThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF retVal = NULL;
+ ARG_SLOT args[2] =
+ {
+ ObjToArgSlot(ObjectToOBJECTREF(refThisUNSAFE)),
+ ObjToArgSlot(ObjectToOBJECTREF(refNameUNSAFE))
+ };
+
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL();
+
+ GCPROTECT_ARRAY_BEGIN(args[0], 2);
+
+ retVal = ObjectToOBJECTREF((Object*)AddFieldWorker(args));
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+
+ FC_GC_POLL_AND_RETURN_OBJREF(retVal);
+}
+FCIMPLEND
+
+FCIMPL2(Object*, StdMngIExpando::AddProperty, Object* refThisUNSAFE, StringObject* refNameUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(refThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF retVal = NULL;
+ ARG_SLOT args[2] =
+ {
+ ObjToArgSlot(ObjectToOBJECTREF(refThisUNSAFE)),
+ ObjToArgSlot(ObjectToOBJECTREF(refNameUNSAFE))
+ };
+
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL();
+
+ GCPROTECT_ARRAY_BEGIN(args[0], 2);
+
+ retVal = ObjectToOBJECTREF((Object*)AddPropertyWorker(args));
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+
+ FC_GC_POLL_AND_RETURN_OBJREF(retVal);
+}
+FCIMPLEND
+
+FCIMPL3(Object*, StdMngIExpando::AddMethod, Object* refThisUNSAFE, StringObject* refNameUNSAFE, Object* refDelegateUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(refThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF retVal = NULL;
+ ARG_SLOT args[3] =
+ {
+ ObjToArgSlot(ObjectToOBJECTREF(refThisUNSAFE)),
+ ObjToArgSlot(ObjectToOBJECTREF(refNameUNSAFE)),
+ ObjToArgSlot(ObjectToOBJECTREF(refDelegateUNSAFE))
+ };
+
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL();
+
+ GCPROTECT_ARRAY_BEGIN(args[0], 3);
+
+ retVal = ObjectToOBJECTREF((Object*)AddMethodWorker(args));
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+
+ FC_GC_POLL_AND_RETURN_OBJREF(retVal);
+}
+FCIMPLEND
+
+
+FCIMPL2(void, StdMngIExpando::RemoveMember, Object* refThisUNSAFE, Object* refMemberInfoUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(refThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ ARG_SLOT args[2] =
+ {
+ ObjToArgSlot(ObjectToOBJECTREF(refThisUNSAFE)),
+ ObjToArgSlot(ObjectToOBJECTREF(refMemberInfoUNSAFE))
+ };
+
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL();
+
+ GCPROTECT_ARRAY_BEGIN(args[0], 2);
+
+ RemoveMemberWorker(args);
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END_POLL();
+}
+FCIMPLEND
+
+
+FCIMPL6(Object*, StdMngIReflect::GetMethod, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr, Object* refBinderUNSAFE, Object* refTypesArrayUNSAFE, Object* refModifiersArrayUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(refThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF retVal = NULL;
+ ARG_SLOT args[6] =
+ {
+ /* 0 */ ObjToArgSlot(ObjectToOBJECTREF(refThisUNSAFE)),
+ /* 1 */ ObjToArgSlot(ObjectToOBJECTREF(refNameUNSAFE)),
+ /* 2 */ enumBindingAttr,
+ /* 3 */ ObjToArgSlot(ObjectToOBJECTREF(refBinderUNSAFE)),
+ /* 4 */ ObjToArgSlot(ObjectToOBJECTREF(refTypesArrayUNSAFE)),
+ /* 5 */ ObjToArgSlot(ObjectToOBJECTREF(refModifiersArrayUNSAFE))
+ };
+
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL();
+
+ GCPROTECT_ARRAY_BEGIN(args[0], 2);
+ GCPROTECT_ARRAY_BEGIN(args[3], 3);
+
+ retVal = ObjectToOBJECTREF((Object*)GetMethodWorker(args));
+
+ GCPROTECT_END();
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+
+ FC_GC_POLL_AND_RETURN_OBJREF(retVal);
+}
+FCIMPLEND
+
+FCIMPL3(Object*, StdMngIReflect::GetMethod_2, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(refThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF retVal = NULL;
+ ARG_SLOT args[3] =
+ {
+ ObjToArgSlot(ObjectToOBJECTREF(refThisUNSAFE)),
+ ObjToArgSlot(ObjectToOBJECTREF(refNameUNSAFE)),
+ enumBindingAttr
+ };
+
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL();
+
+ GCPROTECT_ARRAY_BEGIN(args[0], 2);
+
+ retVal = ObjectToOBJECTREF((Object*)GetMethod_2Worker(args));
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+
+ FC_GC_POLL_AND_RETURN_OBJREF(retVal);
+}
+FCIMPLEND
+
+FCIMPL2(Object*, StdMngIReflect::GetMethods, Object* refThisUNSAFE, INT32 enumBindingAttr)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(refThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF retVal = NULL;
+ ARG_SLOT args[2] =
+ {
+ ObjToArgSlot(ObjectToOBJECTREF(refThisUNSAFE)),
+ enumBindingAttr
+ };
+
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL();
+
+ GCPROTECT_ARRAY_BEGIN(args[0], 1);
+
+ retVal = ObjectToOBJECTREF((Object*)GetMethodsWorker(args));
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+
+ FC_GC_POLL_AND_RETURN_OBJREF(retVal);
+}
+FCIMPLEND
+
+FCIMPL3(Object*, StdMngIReflect::GetField, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(refThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF retVal = NULL;
+ ARG_SLOT args[3] =
+ {
+ ObjToArgSlot(ObjectToOBJECTREF(refThisUNSAFE)),
+ ObjToArgSlot(ObjectToOBJECTREF(refNameUNSAFE)),
+ enumBindingAttr
+ };
+
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL();
+
+ GCPROTECT_ARRAY_BEGIN(args[0], 2);
+
+ retVal = ObjectToOBJECTREF((Object*)GetFieldWorker(args));
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+
+ FC_GC_POLL_AND_RETURN_OBJREF(retVal);
+}
+FCIMPLEND
+
+FCIMPL2(Object*, StdMngIReflect::GetFields, Object* refThisUNSAFE, INT32 enumBindingAttr)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(refThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF retVal = NULL;
+ ARG_SLOT args[2] =
+ {
+ ObjToArgSlot(ObjectToOBJECTREF(refThisUNSAFE)),
+ enumBindingAttr
+ };
+
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL();
+
+ GCPROTECT_ARRAY_BEGIN(args[0], 1);
+
+ retVal = ObjectToOBJECTREF((Object*)GetFieldsWorker(args));
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+
+ FC_GC_POLL_AND_RETURN_OBJREF(retVal);
+}
+FCIMPLEND
+
+FCIMPL7(Object*, StdMngIReflect::GetProperty, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr, Object* refBinderUNSAFE, Object* refTypeUNSAFE, Object* refTypesArrayUNSAFE, Object* refModifiersArrayUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(refThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF retVal = NULL;
+ ARG_SLOT args[7] =
+ {
+ /* 0 */ ObjToArgSlot(ObjectToOBJECTREF(refThisUNSAFE)),
+ /* 1 */ ObjToArgSlot(ObjectToOBJECTREF(refNameUNSAFE)),
+ /* 2 */ enumBindingAttr,
+ /* 3 */ ObjToArgSlot(ObjectToOBJECTREF(refBinderUNSAFE)),
+ /* 4 */ ObjToArgSlot(ObjectToOBJECTREF(refTypeUNSAFE)),
+ /* 5 */ ObjToArgSlot(ObjectToOBJECTREF(refTypesArrayUNSAFE)),
+ /* 6 */ ObjToArgSlot(ObjectToOBJECTREF(refModifiersArrayUNSAFE))
+ };
+
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL();
+
+ GCPROTECT_ARRAY_BEGIN(args[0], 2);
+ GCPROTECT_ARRAY_BEGIN(args[3], 4);
+
+ retVal = ObjectToOBJECTREF((Object*)GetFieldWorker(args));
+
+ GCPROTECT_END();
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+
+ FC_GC_POLL_AND_RETURN_OBJREF(retVal);
+}
+FCIMPLEND
+
+FCIMPL3(Object*, StdMngIReflect::GetProperty_2, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(refThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF retVal = NULL;
+ ARG_SLOT args[3] =
+ {
+ ObjToArgSlot(ObjectToOBJECTREF(refThisUNSAFE)),
+ ObjToArgSlot(ObjectToOBJECTREF(refNameUNSAFE)),
+ enumBindingAttr
+ };
+
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL();
+
+ GCPROTECT_ARRAY_BEGIN(args[0], 2);
+
+ retVal = ObjectToOBJECTREF((Object*)GetProperty_2Worker(args));
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+
+ FC_GC_POLL_AND_RETURN_OBJREF(retVal);
+}
+FCIMPLEND
+
+FCIMPL2(Object*, StdMngIReflect::GetProperties, Object* refThisUNSAFE, INT32 enumBindingAttr)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(refThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF retVal = NULL;
+ ARG_SLOT args[2] =
+ {
+ ObjToArgSlot(ObjectToOBJECTREF(refThisUNSAFE)),
+ enumBindingAttr
+ };
+
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL();
+
+ GCPROTECT_ARRAY_BEGIN(args[0], 1);
+
+ retVal = ObjectToOBJECTREF((Object*)GetPropertiesWorker(args));
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+
+ FC_GC_POLL_AND_RETURN_OBJREF(retVal);
+}
+FCIMPLEND
+
+FCIMPL3(Object*, StdMngIReflect::GetMember, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(refThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF retVal = NULL;
+ ARG_SLOT args[3] =
+ {
+ ObjToArgSlot(ObjectToOBJECTREF(refThisUNSAFE)),
+ ObjToArgSlot(ObjectToOBJECTREF(refNameUNSAFE)),
+ enumBindingAttr
+ };
+
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL();
+
+ GCPROTECT_ARRAY_BEGIN(args[0], 2);
+
+ retVal = ObjectToOBJECTREF((Object*)GetMemberWorker(args));
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+
+ FC_GC_POLL_AND_RETURN_OBJREF(retVal);
+}
+FCIMPLEND
+
+FCIMPL2(Object*, StdMngIReflect::GetMembers, Object* refThisUNSAFE, INT32 enumBindingAttr)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(refThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF retVal = NULL;
+ ARG_SLOT args[2] =
+ {
+ ObjToArgSlot(ObjectToOBJECTREF(refThisUNSAFE)),
+ enumBindingAttr
+ };
+
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL();
+
+ GCPROTECT_ARRAY_BEGIN(args[0], 1);
+
+ retVal = ObjectToOBJECTREF((Object*)GetMembersWorker(args));
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+
+ FC_GC_POLL_AND_RETURN_OBJREF(retVal);
+}
+FCIMPLEND
+
+FCIMPL9(Object*, StdMngIReflect::InvokeMember, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr, Object* refBinderUNSAFE, Object* refTargetUNSAFE, Object* refArgsArrayUNSAFE, Object* refModifiersArrayUNSAFE, Object* refCultureUNSAFE, Object* refNamedParamsArrayUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(refThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF retVal = NULL;
+ ARG_SLOT args[9] =
+ {
+ /* 0 */ ObjToArgSlot(ObjectToOBJECTREF(refThisUNSAFE)),
+ /* 1 */ ObjToArgSlot(ObjectToOBJECTREF(refNameUNSAFE)),
+ /* 2 */ enumBindingAttr,
+ /* 3 */ ObjToArgSlot(ObjectToOBJECTREF(refBinderUNSAFE)),
+ /* 4 */ ObjToArgSlot(ObjectToOBJECTREF(refTargetUNSAFE)),
+ /* 5 */ ObjToArgSlot(ObjectToOBJECTREF(refArgsArrayUNSAFE)),
+ /* 6 */ ObjToArgSlot(ObjectToOBJECTREF(refModifiersArrayUNSAFE)),
+ /* 7 */ ObjToArgSlot(ObjectToOBJECTREF(refCultureUNSAFE)),
+ /* 8 */ ObjToArgSlot(ObjectToOBJECTREF(refNamedParamsArrayUNSAFE))
+ };
+
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL();
+
+ GCPROTECT_ARRAY_BEGIN(args[0], 2);
+ GCPROTECT_ARRAY_BEGIN(args[3], 6);
+
+ retVal = ObjectToOBJECTREF((Object*)InvokeMemberWorker(args));
+
+ GCPROTECT_END();
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+
+ FC_GC_POLL_AND_RETURN_OBJREF(retVal);
+}
+FCIMPLEND
+
+FCIMPL1(Object*, StdMngIReflect::get_UnderlyingSystemType, Object* refThisUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(refThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF retVal = NULL;
+ ARG_SLOT args[1] =
+ {
+ ObjToArgSlot(ObjectToOBJECTREF(refThisUNSAFE))
+ };
+
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL();
+
+ GCPROTECT_ARRAY_BEGIN(args[0], 1);
+
+ retVal = ObjectToOBJECTREF((Object*)get_UnderlyingSystemTypeWorker(args));
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+
+ FC_GC_POLL_AND_RETURN_OBJREF(retVal);
+}
+FCIMPLEND
+
+
+FCIMPL1(Object*, StdMngIEnumerable::GetEnumerator, Object* refThisUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(refThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF retVal = NULL;
+ ARG_SLOT args[1] =
+ {
+ ObjToArgSlot(ObjectToOBJECTREF(refThisUNSAFE))
+ };
+ OBJECTREF *porefThis = (OBJECTREF *)&args[0];
+
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL();
+
+ GCPROTECT_ARRAY_BEGIN(args[0], 1);
+
+ // There are three ways to handle calls via IEnumerable::GetEnumerator on an RCW:
+ // 1. Use BindableIterableToEnumerableAdapter (Jupiter data-binding scenario)
+ // 2. Use IterableToEnumerableAdapter if the object is known to implement IIterable<T> (WinRT)
+ // 3. Use EnumerableToDispatchMarshaler in CustomMarshalers.dll (legacy COM interop)
+ SyncBlock *pSB = (*porefThis)->GetSyncBlock();
+ if (pSB->GetInteropInfoNoCreate() != NULL && pSB->GetInteropInfoNoCreate()->GetRawRCW() != NULL)
+ {
+ RCWHolder pRCW(GetThread());
+ RCWPROTECT_BEGIN(pRCW, pSB);
+
+ if (pRCW->SupportsIInspectable())
+ {
+ //
+ // Test which IEnumerable implementation we want
+ // Prefer WinRT case as WinRT is new scenario
+ //
+ if (pRCW->SupportsWinRTInteropInterface(MscorlibBinder::GetExistingClass(CLASS__IENUMERABLE)))
+ {
+ //
+ // Supports WinRT IEnumerable
+ // Could be either IIterable<T>, IBindableIterable, or IDispatch+DISPID_NEWENUM
+ //
+ MethodDesc *pGetEnumeratorMethod = pRCW->GetGetEnumeratorMethod();
+ if (pGetEnumeratorMethod)
+ {
+ //
+ // IIterable<T>/IDispatch+DISPID_NEWENUM - The right enumerator method is saved in pGetEnumeratorMethod
+ //
+ MethodDescCallSite callSite(pGetEnumeratorMethod, porefThis);
+
+ retVal = callSite.Call_RetOBJECTREF(args);
+ }
+ else
+ {
+ //
+ // IBindableIterable
+ //
+ MethodDescCallSite callSite(MscorlibBinder::GetMethod(METHOD__BINDABLEITERABLE_TO_ENUMERABLE_ADAPTER__GET_ENUMERATOR_STUB));
+ retVal = callSite.Call_RetOBJECTREF(args);
+ }
+ }
+ else if (!pRCW->SupportsLegacyEnumerableInterface())
+ {
+ // The object does not support IBindableEnumerable nor IDispatch DISPID_NEWENUM enumeration.
+ // Try to use IIterable<T>, throw exception if it fails.
+ MethodDesc *pGetEnumeratorMethod = pRCW->GetGetEnumeratorMethod();
+ if (pGetEnumeratorMethod != NULL)
+ {
+ // make a virtual call through the generic IEnumerable<T>
+ MethodDescCallSite callSite(pGetEnumeratorMethod, porefThis);
+
+ retVal = callSite.Call_RetOBJECTREF(args);
+ }
+ else
+ {
+ // If we haven't seen a cast to a generic IEnumerable<T> and haven't been able to infer
+ // the interface statically, we have to throw an exception, suggesting a workaround.
+ // (This is an inherent limitation, we can't know what to do without type information.)
+ COMPlusThrow(kInvalidCastException, IDS_EE_WINRT_IENUMERABLE_BAD_CALL);
+ }
+ }
+ }
+
+ RCWPROTECT_END(pRCW);
+ }
+
+ if (retVal == NULL)
+ {
+ // classic COM interop scenario
+ retVal = ObjectToOBJECTREF((Object*)GetEnumeratorWorker(args));
+ }
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+
+ FC_GC_POLL_AND_RETURN_OBJREF(retVal);
+}
+FCIMPLEND
diff --git a/src/vm/mngstdinterfaces.h b/src/vm/mngstdinterfaces.h
new file mode 100644
index 0000000000..a041ef04b2
--- /dev/null
+++ b/src/vm/mngstdinterfaces.h
@@ -0,0 +1,399 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: MngStdInterfaceMap.h
+**
+**
+** Purpose: Contains types and method signatures for the Com wrapper class
+**
+**
+
+===========================================================*/
+
+#ifndef _MNGSTDINTERFACEMAP_H
+#define _MNGSTDINTERFACEMAP_H
+
+#ifndef FEATURE_COMINTEROP
+#error FEATURE_COMINTEROP is required for this file
+#endif // FEATURE_COMINTEROP
+
+#include "vars.hpp"
+#include "eehash.h"
+#include "class.h"
+#include "mlinfo.h"
+
+#ifndef DACCESS_COMPILE
+//
+// This class is used to establish a mapping between a managed standard interface and its
+// unmanaged counterpart.
+//
+
+class MngStdInterfaceMap
+{
+public:
+ // This method retrieves the native IID of the interface that the specified
+ // managed type is a standard interface for. If the specified type is not
+ // a standard interface then GUIDNULL is returned.
+ inline static IID* GetNativeIIDForType(TypeHandle th)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ // Only simple class types can have native IIDs
+ if (th.IsTypeDesc())
+ return NULL;
+
+ HashDatum Data;
+
+ // Retrieve the name of the type.
+ LPCUTF8 ns, name;
+ LPUTF8 strTypeName;
+ name = th.GetMethodTable()->GetFullyQualifiedNameInfo(&ns);
+ MAKE_FULL_PATH_ON_STACK_UTF8(strTypeName, ns, name);
+
+ if (m_pMngStdItfMap == NULL) {
+ MngStdInterfaceMap *tmp = new MngStdInterfaceMap;
+ if (FastInterlockCompareExchangePointer(&m_pMngStdItfMap, tmp, NULL) != NULL) {
+ tmp->m_TypeNameToNativeIIDMap.ClearHashTable();
+ delete tmp;
+ }
+ }
+ if (m_pMngStdItfMap->m_TypeNameToNativeIIDMap.GetValue(strTypeName, &Data) && (*((GUID*)Data) != GUID_NULL))
+ {
+ // The type is a standard interface.
+ return (IID*)Data;
+ }
+ else
+ {
+ // The type is not a standard interface.
+ return NULL;
+ }
+ }
+
+private:
+ // Disalow creation of this class by anybody outside of it.
+ MngStdInterfaceMap();
+
+ // The map of type names to native IID's.
+ EEUtf8StringHashTable m_TypeNameToNativeIIDMap;
+
+ // The one and only instance of the managed std interface map.
+ static MngStdInterfaceMap *m_pMngStdItfMap;
+};
+
+#endif // DACCESS_COMPILE
+
+//
+// Base class for all the classes that contain the ECall's for the managed standard interfaces.
+//
+
+class MngStdItfBase
+{
+protected:
+ static void InitHelper(
+ LPCUTF8 strMngItfTypeName,
+ LPCUTF8 strUComItfTypeName,
+ LPCUTF8 strCMTypeName,
+ LPCUTF8 strCookie,
+ LPCUTF8 strManagedViewName,
+ TypeHandle *pMngItfType,
+ TypeHandle *pUComItfType,
+ TypeHandle *pCustomMarshalerType,
+ TypeHandle *pManagedViewType,
+ OBJECTHANDLE *phndMarshaler);
+
+ static LPVOID ForwardCallToManagedView(
+ OBJECTHANDLE hndMarshaler,
+ MethodDesc *pMngItfMD,
+ MethodDesc *pUComItfMD,
+ MethodDesc *pMarshalNativeToManagedMD,
+ MethodDesc *pMngViewMD,
+ IID *pMngItfIID,
+ IID *pNativeItfIID,
+ ARG_SLOT* pArgs);
+};
+
+
+//
+// Define the enum of methods on the managed standard interface.
+//
+
+#define MNGSTDITF_BEGIN_INTERFACE(FriendlyName, strMngItfName, strUCOMMngItfName, strCustomMarshalerName, strCustomMarshalerCookie, strManagedViewName, NativeItfIID, bCanCastOnNativeItfQI) \
+\
+enum FriendlyName##Methods \
+{ \
+ FriendlyName##Methods_Dummy = -1,
+
+
+#define MNGSTDITF_DEFINE_METH_IMPL(FriendlyName, ECallMethName, MethName, MethSig, FcallDecl) \
+ FriendlyName##Methods_##ECallMethName,
+
+
+#define MNGSTDITF_END_INTERFACE(FriendlyName) \
+ FriendlyName##Methods_LastMember \
+}; \
+
+
+#include "mngstditflist.h"
+
+
+#undef MNGSTDITF_BEGIN_INTERFACE
+#undef MNGSTDITF_DEFINE_METH_IMPL
+#undef MNGSTDITF_END_INTERFACE
+
+
+//
+// Define the class that implements the ECall's for the managed standard interface.
+//
+
+#define MNGSTDITF_BEGIN_INTERFACE(FriendlyName, strMngItfName, strUCOMMngItfName, strCustomMarshalerName, strCustomMarshalerCookie, strManagedViewName, NativeItfIID, bCanCastOnNativeItfQI) \
+\
+class FriendlyName : public MngStdItfBase \
+{ \
+public: \
+ FriendlyName() \
+ { \
+ CONTRACTL \
+ { \
+ THROWS; \
+ GC_TRIGGERS; \
+ INJECT_FAULT(COMPlusThrowOM()); \
+ } \
+ CONTRACTL_END \
+ InitHelper(strMngItfName, strUCOMMngItfName, strCustomMarshalerName, strCustomMarshalerCookie, strManagedViewName, &m_MngItfType, &m_UComItfType, &m_CustomMarshalerType, &m_ManagedViewType, &m_hndCustomMarshaler); \
+ m_NativeItfIID = NativeItfIID; \
+ m_UComItfType.GetMethodTable()->GetGuid(&m_MngItfIID, TRUE); \
+ memset(m_apCustomMarshalerMD, 0, CustomMarshalerMethods_LastMember * sizeof(MethodDesc *)); \
+ memset(m_apManagedViewMD, 0, FriendlyName##Methods_LastMember * sizeof(MethodDesc *)); \
+ memset(m_apUComItfMD, 0, FriendlyName##Methods_LastMember * sizeof(MethodDesc *)); \
+ memset(m_apMngItfMD, 0, FriendlyName##Methods_LastMember * sizeof(MethodDesc *)); \
+ } \
+\
+ OBJECTREF GetCustomMarshaler() \
+ { \
+ WRAPPER_NO_CONTRACT; \
+ return ObjectFromHandle(m_hndCustomMarshaler); \
+ } \
+\
+ MethodDesc* GetCustomMarshalerMD(EnumCustomMarshalerMethods Method) \
+ { \
+ CONTRACTL \
+ { \
+ THROWS; \
+ GC_TRIGGERS; \
+ INJECT_FAULT(COMPlusThrowOM()); \
+ } \
+ CONTRACTL_END \
+ MethodDesc *pMD = NULL; \
+ \
+ if (m_apCustomMarshalerMD[Method]) \
+ return m_apCustomMarshalerMD[Method]; \
+ \
+ pMD = CustomMarshalerInfo::GetCustomMarshalerMD(Method, m_CustomMarshalerType); \
+ _ASSERTE(pMD && "Unable to find specified method on the custom marshaler"); \
+ MetaSig::EnsureSigValueTypesLoaded(pMD); \
+ \
+ m_apCustomMarshalerMD[Method] = pMD; \
+ return pMD; \
+ } \
+\
+ MethodDesc* GetManagedViewMD(FriendlyName##Methods Method, LPCUTF8 strMethName, LPHARDCODEDMETASIG pSig) \
+ { \
+ CONTRACTL \
+ { \
+ THROWS; \
+ GC_TRIGGERS; \
+ INJECT_FAULT(COMPlusThrowOM()); \
+ } \
+ CONTRACTL_END \
+ MethodDesc *pMD = NULL; \
+ \
+ if (m_apManagedViewMD[Method]) \
+ return m_apManagedViewMD[Method]; \
+ \
+ pMD = MemberLoader::FindMethod(m_ManagedViewType.GetMethodTable(), strMethName, pSig); \
+ _ASSERTE(pMD && "Unable to find specified method on the managed view"); \
+ MetaSig::EnsureSigValueTypesLoaded(pMD); \
+ \
+ m_apManagedViewMD[Method] = pMD; \
+ return pMD; \
+ } \
+\
+ MethodDesc* GetUComItfMD(FriendlyName##Methods Method, LPCUTF8 strMethName, LPHARDCODEDMETASIG pSig) \
+ { \
+ CONTRACTL \
+ { \
+ THROWS; \
+ GC_TRIGGERS; \
+ INJECT_FAULT(COMPlusThrowOM()); \
+ } \
+ CONTRACTL_END \
+ MethodDesc *pMD = NULL; \
+ \
+ if (m_apUComItfMD[Method]) \
+ return m_apUComItfMD[Method]; \
+ \
+ pMD = MemberLoader::FindMethod(m_UComItfType.GetMethodTable(), strMethName, pSig); \
+ _ASSERTE(pMD && "Unable to find specified method in UCom interface"); \
+ MetaSig::EnsureSigValueTypesLoaded(pMD); \
+ \
+ m_apUComItfMD[Method] = pMD; \
+ return pMD; \
+ } \
+\
+ MethodDesc* GetMngItfMD(FriendlyName##Methods Method, LPCUTF8 strMethName, LPHARDCODEDMETASIG pSig) \
+ { \
+ CONTRACTL \
+ { \
+ THROWS; \
+ GC_TRIGGERS; \
+ INJECT_FAULT(COMPlusThrowOM()); \
+ } \
+ CONTRACTL_END \
+ MethodDesc *pMD = NULL; \
+ \
+ if (m_apMngItfMD[Method]) \
+ return m_apMngItfMD[Method]; \
+ \
+ pMD = MemberLoader::FindMethod(m_MngItfType.GetMethodTable(), strMethName, pSig); \
+ _ASSERTE(pMD && "Unable to find specified method in UCom interface"); \
+ MetaSig::EnsureSigValueTypesLoaded(pMD); \
+ \
+ m_apMngItfMD[Method] = pMD; \
+ return pMD; \
+ } \
+\
+private: \
+ MethodDesc* m_apCustomMarshalerMD[CustomMarshalerMethods_LastMember]; \
+ MethodDesc* m_apManagedViewMD[FriendlyName##Methods_LastMember]; \
+ MethodDesc* m_apUComItfMD[FriendlyName##Methods_LastMember]; \
+ MethodDesc* m_apMngItfMD[FriendlyName##Methods_LastMember]; \
+ TypeHandle m_CustomMarshalerType; \
+ TypeHandle m_ManagedViewType; \
+ TypeHandle m_UComItfType; \
+ TypeHandle m_MngItfType; \
+ OBJECTHANDLE m_hndCustomMarshaler; \
+ GUID m_MngItfIID; \
+ GUID m_NativeItfIID; \
+\
+
+#define MNGSTDITF_DEFINE_METH_IMPL(FriendlyName, ECallMethName, MethName, MethSig, FcallDecl) \
+\
+public: static LPVOID __stdcall ECallMethName##Worker(ARG_SLOT* pArgs); \
+public: static FcallDecl; \
+\
+
+#define MNGSTDITF_END_INTERFACE(FriendlyName) \
+}; \
+\
+
+
+#include "mngstditflist.h"
+
+
+#undef MNGSTDITF_BEGIN_INTERFACE
+#undef MNGSTDITF_DEFINE_METH_IMPL
+#undef MNGSTDITF_END_INTERFACE
+
+
+//
+// App domain level information on the managed standard interfaces .
+//
+
+class MngStdInterfacesInfo
+{
+public:
+ // Constructor and destructor.
+ MngStdInterfacesInfo()
+ {
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_FAULT;
+
+#define MNGSTDITF_BEGIN_INTERFACE(FriendlyName, strMngItfName, strUCOMMngItfName, strCustomMarshalerName, strCustomMarshalerCookie, strManagedViewName, NativeItfIID, bCanCastOnNativeItfQI) \
+\
+ m_p##FriendlyName = 0; \
+\
+
+#define MNGSTDITF_DEFINE_METH_IMPL(FriendlyName, ECallMethName, MethName, MethSig, FcallDecl)
+#define MNGSTDITF_END_INTERFACE(FriendlyName)
+
+
+#include "mngstditflist.h"
+
+
+#undef MNGSTDITF_BEGIN_INTERFACE
+#undef MNGSTDITF_DEFINE_METH_IMPL
+#undef MNGSTDITF_END_INTERFACE
+ }
+
+ ~MngStdInterfacesInfo()
+ {
+ WRAPPER_NO_CONTRACT;
+
+#define MNGSTDITF_BEGIN_INTERFACE(FriendlyName, strMngItfName, strUCOMMngItfName, strCustomMarshalerName, strCustomMarshalerCookie, strManagedViewName, NativeItfIID, bCanCastOnNativeItfQI) \
+\
+ if (m_p##FriendlyName) \
+ delete m_p##FriendlyName; \
+\
+
+#define MNGSTDITF_DEFINE_METH_IMPL(FriendlyName, ECallMethName, MethName, MethSig, FcallDecl)
+#define MNGSTDITF_END_INTERFACE(FriendlyName)
+
+
+#include "mngstditflist.h"
+
+
+#undef MNGSTDITF_BEGIN_INTERFACE
+#undef MNGSTDITF_DEFINE_METH_IMPL
+#undef MNGSTDITF_END_INTERFACE
+ }
+
+
+ // Accessors for each of the managed standard interfaces.
+#define MNGSTDITF_BEGIN_INTERFACE(FriendlyName, strMngItfName, strUCOMMngItfName, strCustomMarshalerName, strCustomMarshalerCookie, strManagedViewName, NativeItfIID, bCanCastOnNativeItfQI) \
+\
+public: \
+ FriendlyName *Get##FriendlyName() \
+ { \
+ CONTRACTL \
+ { \
+ THROWS; \
+ GC_TRIGGERS; \
+ INJECT_FAULT(COMPlusThrowOM()); \
+ } \
+ CONTRACTL_END \
+ if (!m_p##FriendlyName) \
+ { \
+ NewHolder<FriendlyName> pFriendlyName = new FriendlyName(); \
+ if (InterlockedCompareExchangeT(&m_p##FriendlyName, pFriendlyName.GetValue(), NULL) == NULL) \
+ pFriendlyName.SuppressRelease(); \
+ } \
+ return m_p##FriendlyName; \
+ } \
+\
+private: \
+ FriendlyName *m_p##FriendlyName; \
+\
+
+#define MNGSTDITF_DEFINE_METH_IMPL(FriendlyName, ECallMethName, MethName, MethSig, FcallDecl)
+#define MNGSTDITF_END_INTERFACE(FriendlyName)
+
+
+#include "mngstditflist.h"
+
+
+#undef MNGSTDITF_BEGIN_INTERFACE
+#undef MNGSTDITF_DEFINE_METH_IMPL
+#undef MNGSTDITF_END_INTERFACE
+};
+
+#endif // _MNGSTDINTERFACEMAP_H
diff --git a/src/vm/mngstditflist.h b/src/vm/mngstditflist.h
new file mode 100644
index 0000000000..5a40074a0f
--- /dev/null
+++ b/src/vm/mngstditflist.h
@@ -0,0 +1,141 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: MngStdItfList.h
+**
+**
+** Purpose: This file contains the list of managed standard
+** interfaces. Each standard interface also has the
+** list of method that it contains.
+**
+===========================================================*/
+
+#ifndef FEATURE_COMINTEROP
+#error FEATURE_COMINTEROP is required for this file
+#endif // FEATURE_COMINTEROP
+
+//
+// Helper macros
+//
+
+#define MNGSTDITF_DEFINE_METH(FriendlyName, MethName, MethSig, FcallDecl) \
+ MNGSTDITF_DEFINE_METH_IMPL(FriendlyName, MethName, MethName, MethSig, FcallDecl)
+
+#define MNGSTDITF_DEFINE_METH2(FriendlyName, MethName, MethSig, FcallDecl) \
+ MNGSTDITF_DEFINE_METH_IMPL(FriendlyName, MethName##_2, MethName, MethSig, FcallDecl)
+
+#define MNGSTDITF_DEFINE_METH3(FriendlyName, MethName, MethSig, FcallDecl) \
+ MNGSTDITF_DEFINE_METH_IMPL(FriendlyName, MethName##_3, MethName, MethSig, FcallDecl)
+
+#define CUSTOM_MARSHALER_ASM ", CustomMarshalers, Version=" VER_ASSEMBLYVERSION_STR ", Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a"
+
+
+
+
+//
+// MNGSTDITF_BEGIN_INTERFACE(FriendlyName, strMngItfName, strUCOMMngItfName, strCustomMarshalerName, strCustomMarshalerCookie, strManagedViewName, NativeItfIID) \
+//
+// This macro defines a new managed standard interface.
+//
+// FriendlyName Friendly name for the class that implements the ECall's.
+// idMngItf BinderClassID of the managed interface.
+// idUCOMMngItf BinderClassID of the UCom version of the managed interface.
+// idCustomMarshaler BinderClassID of the custom marshaler.
+// idGetInstMethod BinderMethodID of the GetInstance method of the custom marshaler.
+// strCustomMarshalerCookie String containing the cookie to be passed to the custom marshaler.
+// strManagedViewName String containing the name of the managed view of the native interface.
+// NativeItfIID IID of the native interface.
+// bCanCastOnNativeItfQI If this is true casting to a COM object that supports the native interface
+// will cause the cast to succeed.
+//
+
+//
+// MNGSTDITF_DEFINE_METH(FriendlyName, MethName, MethSig)
+//
+// This macro defines a method of the standard managed interface.
+// MNGSTDITF_DEFINE_METH2 and MNGSTDITF_DEFINE_METH3 are used to
+// define overloaded versions of the method.
+//
+// FriendlyName Friendly name for the class that implements the ECall's.
+// MethName This is the method name
+// MethSig This is the method signature.
+//
+
+
+//
+// IReflect
+//
+
+
+#define MNGSTDITF_IREFLECT_DECL__GETMETHOD FCDECL6(Object*, GetMethod, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr, Object* refBinderUNSAFE, Object* refTypesArrayUNSAFE, Object* refModifiersArrayUNSAFE)
+#define MNGSTDITF_IREFLECT_DECL__GETMETHOD_2 FCDECL3(Object*, GetMethod_2, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr)
+#define MNGSTDITF_IREFLECT_DECL__GETMETHODS FCDECL2(Object*, GetMethods, Object* refThisUNSAFE, INT32 enumBindingAttr)
+#define MNGSTDITF_IREFLECT_DECL__GETFIELD FCDECL3(Object*, GetField, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr)
+#define MNGSTDITF_IREFLECT_DECL__GETFIELDS FCDECL2(Object*, GetFields, Object* refThisUNSAFE, INT32 enumBindingAttr)
+#define MNGSTDITF_IREFLECT_DECL__GETPROPERTY FCDECL7(Object*, GetProperty, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr, Object* refBinderUNSAFE, Object* refReturnTypeUNSAFE, Object* refTypesArrayUNSAFE, Object* refModifiersArrayUNSAFE)
+#define MNGSTDITF_IREFLECT_DECL__GETPROPERTY_2 FCDECL3(Object*, GetProperty_2, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr)
+#define MNGSTDITF_IREFLECT_DECL__GETPROPERTIES FCDECL2(Object*, GetProperties, Object* refThisUNSAFE, INT32 enumBindingAttr)
+#define MNGSTDITF_IREFLECT_DECL__GETMEMBER FCDECL3(Object*, GetMember, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr)
+#define MNGSTDITF_IREFLECT_DECL__GETMEMBERS FCDECL2(Object*, GetMembers, Object* refThisUNSAFE, INT32 enumBindingAttr)
+#define MNGSTDITF_IREFLECT_DECL__INVOKEMEMBER FCDECL9(Object*, InvokeMember, Object* refThisUNSAFE, Object* refNameUNSAFE, INT32 enumBindingAttr, Object* refBinderUNSAFE, Object* refTargetUNSAFE, Object* refArgsArrayUNSAFE, Object* refModifiersArrayUNSAFE, Object* refCultureUNSAFE, Object* refNamedParamsArrayUNSAFE)
+#define MNGSTDITF_IREFLECT_DECL__GET_UNDERLYING_SYSTEM_TYPE FCDECL1(Object*, get_UnderlyingSystemType, Object* refThisUNSAFE)
+
+MNGSTDITF_BEGIN_INTERFACE(StdMngIReflect, g_ReflectionReflectItfName, "System.Runtime.InteropServices.ComTypes.IReflect", g_CMExpandoToDispatchExMarshaler CUSTOM_MARSHALER_ASM, "IReflect", g_CMExpandoViewOfDispatchEx CUSTOM_MARSHALER_ASM, IID_IDispatchEx, TRUE)
+ MNGSTDITF_DEFINE_METH(StdMngIReflect, GetMethod, &gsig_IM_Str_BindingFlags_Binder_ArrType_ArrParameterModifier_RetMethodInfo, MNGSTDITF_IREFLECT_DECL__GETMETHOD)
+ MNGSTDITF_DEFINE_METH2(StdMngIReflect,GetMethod, &gsig_IM_Str_BindingFlags_RetMethodInfo, MNGSTDITF_IREFLECT_DECL__GETMETHOD_2)
+ MNGSTDITF_DEFINE_METH(StdMngIReflect, GetMethods, &gsig_IM_BindingFlags_RetArrMethodInfo, MNGSTDITF_IREFLECT_DECL__GETMETHODS)
+ MNGSTDITF_DEFINE_METH(StdMngIReflect, GetField, &gsig_IM_Str_BindingFlags_RetFieldInfo, MNGSTDITF_IREFLECT_DECL__GETFIELD)
+ MNGSTDITF_DEFINE_METH(StdMngIReflect, GetFields, &gsig_IM_BindingFlags_RetArrFieldInfo, MNGSTDITF_IREFLECT_DECL__GETFIELDS)
+ MNGSTDITF_DEFINE_METH(StdMngIReflect, GetProperty, &gsig_IM_Str_BindingFlags_Binder_Type_ArrType_ArrParameterModifier_RetPropertyInfo, MNGSTDITF_IREFLECT_DECL__GETPROPERTY)
+ MNGSTDITF_DEFINE_METH2(StdMngIReflect,GetProperty, &gsig_IM_Str_BindingFlags_RetPropertyInfo, MNGSTDITF_IREFLECT_DECL__GETPROPERTY_2)
+ MNGSTDITF_DEFINE_METH(StdMngIReflect, GetProperties,&gsig_IM_BindingFlags_RetArrPropertyInfo, MNGSTDITF_IREFLECT_DECL__GETPROPERTIES)
+ MNGSTDITF_DEFINE_METH(StdMngIReflect, GetMember, &gsig_IM_Str_BindingFlags_RetMemberInfo, MNGSTDITF_IREFLECT_DECL__GETMEMBER)
+ MNGSTDITF_DEFINE_METH(StdMngIReflect, GetMembers, &gsig_IM_BindingFlags_RetArrMemberInfo, MNGSTDITF_IREFLECT_DECL__GETMEMBERS)
+ MNGSTDITF_DEFINE_METH(StdMngIReflect, InvokeMember, &gsig_IM_Str_BindingFlags_Binder_Obj_ArrObj_ArrParameterModifier_CultureInfo_ArrStr_RetObj, MNGSTDITF_IREFLECT_DECL__INVOKEMEMBER)
+ MNGSTDITF_DEFINE_METH(StdMngIReflect, get_UnderlyingSystemType, &gsig_IM_RetType, MNGSTDITF_IREFLECT_DECL__GET_UNDERLYING_SYSTEM_TYPE)
+MNGSTDITF_END_INTERFACE(StdMngIReflect)
+
+
+//
+// IExpando
+//
+
+#define MNGSTDITF_IEXPANDO_DECL__ADD_FIELD FCDECL2(Object*, AddField, Object* refThisUNSAFE, StringObject* refNameUNSAFE)
+#define MNGSTDITF_IEXPANDO_DECL__ADD_PROPERTY FCDECL2(Object*, AddProperty, Object* refThisUNSAFE, StringObject* refNameUNSAFE)
+#define MNGSTDITF_IEXPANDO_DECL__ADD_METHOD FCDECL3(Object*, AddMethod, Object* refThisUNSAFE, StringObject* refNameUNSAFE, Object* refDelegateUNSAFE)
+#define MNGSTDITF_IEXPANDO_DECL__REMOVE_MEMBER FCDECL2(void, RemoveMember, Object* refThisUNSAFE, Object* refMemberInfoUNSAFE)
+
+MNGSTDITF_BEGIN_INTERFACE(StdMngIExpando, g_ReflectionExpandoItfName, "System.Runtime.InteropServices.ComTypes.IExpando", g_CMExpandoToDispatchExMarshaler CUSTOM_MARSHALER_ASM, "IExpando", g_CMExpandoViewOfDispatchEx CUSTOM_MARSHALER_ASM, IID_IDispatchEx, TRUE)
+ MNGSTDITF_DEFINE_METH(StdMngIExpando, AddField, &gsig_IM_Str_RetFieldInfo, MNGSTDITF_IEXPANDO_DECL__ADD_FIELD)
+ MNGSTDITF_DEFINE_METH(StdMngIExpando, AddProperty, &gsig_IM_Str_RetPropertyInfo, MNGSTDITF_IEXPANDO_DECL__ADD_PROPERTY)
+ MNGSTDITF_DEFINE_METH(StdMngIExpando, AddMethod, &gsig_IM_Str_Delegate_RetMethodInfo,MNGSTDITF_IEXPANDO_DECL__ADD_METHOD)
+ MNGSTDITF_DEFINE_METH(StdMngIExpando, RemoveMember, &gsig_IM_MemberInfo_RetVoid, MNGSTDITF_IEXPANDO_DECL__REMOVE_MEMBER)
+MNGSTDITF_END_INTERFACE(StdMngIExpando)
+
+//
+// IEnumerator
+//
+
+#define MNGSTDITF_IENUMERATOR_DECL__MOVE_NEXT FCDECL1(FC_BOOL_RET, MoveNext, Object* refThisUNSAFE)
+#define MNGSTDITF_IENUMERATOR_DECL__GET_CURRENT FCDECL1(Object*, get_Current, Object* refThisUNSAFE)
+#define MNGSTDITF_IENUMERATOR_DECL__RESET FCDECL1(void, Reset, Object* refThisUNSAFE)
+
+MNGSTDITF_BEGIN_INTERFACE(StdMngIEnumerator, g_CollectionsEnumeratorClassName, "System.Runtime.InteropServices.ComTypes.IEnumerator", g_EnumeratorToEnumClassName CUSTOM_MARSHALER_ASM, "", "System.Runtime.InteropServices.CustomMarshalers.EnumeratorViewOfEnumVariant" CUSTOM_MARSHALER_ASM, IID_IEnumVARIANT, TRUE)
+ MNGSTDITF_DEFINE_METH(StdMngIEnumerator, MoveNext, &gsig_IM_RetBool, MNGSTDITF_IENUMERATOR_DECL__MOVE_NEXT)
+ MNGSTDITF_DEFINE_METH(StdMngIEnumerator, get_Current, &gsig_IM_RetObj, MNGSTDITF_IENUMERATOR_DECL__GET_CURRENT)
+ MNGSTDITF_DEFINE_METH(StdMngIEnumerator, Reset, &gsig_IM_RetVoid, MNGSTDITF_IENUMERATOR_DECL__RESET)
+MNGSTDITF_END_INTERFACE(StdMngIEnumerator)
+
+//
+// IEnumerable
+//
+
+#define MNGSTDITF_IENUMERABLE_DECL__GETENUMERATOR FCDECL1(Object*, GetEnumerator, Object* refThisUNSAFE)
+
+MNGSTDITF_BEGIN_INTERFACE(StdMngIEnumerable, g_CollectionsEnumerableItfName, "System.Runtime.InteropServices.ComTypes.IEnumerable", "System.Runtime.InteropServices.CustomMarshalers.EnumerableToDispatchMarshaler" CUSTOM_MARSHALER_ASM, "", "System.Runtime.InteropServices.CustomMarshalers.EnumerableViewOfDispatch" CUSTOM_MARSHALER_ASM, IID_IDispatch, FALSE)
+ MNGSTDITF_DEFINE_METH(StdMngIEnumerable, GetEnumerator, &gsig_IM_RetIEnumerator, MNGSTDITF_IENUMERABLE_DECL__GETENUMERATOR)
+MNGSTDITF_END_INTERFACE(StdMngIEnumerable)
diff --git a/src/vm/mscorlib.cpp b/src/vm/mscorlib.cpp
new file mode 100644
index 0000000000..4853991aea
--- /dev/null
+++ b/src/vm/mscorlib.cpp
@@ -0,0 +1,487 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+
+//
+// This file defines tables for references between VM and mscorlib.
+//
+// When compiling crossgen, this file is compiled with the FEATURE_XXX define settings matching the target.
+// It allows us to strip features (e.g. refection only load) from crossgen without stripping them from the target.
+//
+
+#ifdef CROSSGEN_MSCORLIB
+// Use minimal set of headers for crossgen
+#include "windows.h"
+#include "corinfo.h"
+#else
+#include "common.h"
+#include "ecall.h"
+#endif // CROSSGEN_MSCORLIB
+
+#ifndef CROSSGEN_COMPILE
+//
+// Headers for all ECall entrypoints
+//
+#include "arraynative.h"
+#include "stringnative.h"
+#include "stringbuffer.h"
+#include "securityimperative.h"
+#include "securitystackwalk.h"
+#include "objectnative.h"
+#include "comdelegate.h"
+#include "customattribute.h"
+#include "comdynamic.h"
+#include "commethodrental.h"
+#ifndef FEATURE_LEGACYSURFACEAREA
+#include "nlsinfo.h"
+#endif
+#include "calendardata.h"
+#include "commodule.h"
+#include "marshalnative.h"
+#include "system.h"
+#include "comutilnative.h"
+#include "comsynchronizable.h"
+#include "floatclass.h"
+#include "decimal.h"
+#include "currency.h"
+#include "comdatetime.h"
+#include "comisolatedstorage.h"
+#include "securityconfig.h"
+#include "number.h"
+#include "compatibilityswitch.h"
+#ifdef FEATURE_REMOTING
+#include "remotingnative.h"
+#include "message.h"
+#include "stackbuildersink.h"
+#endif
+#include "debugdebugger.h"
+#include "assemblyname.hpp"
+#include "assemblynative.hpp"
+#include "rwlock.h"
+#include "comthreadpool.h"
+#include "comwaithandle.h"
+#include "nativeoverlapped.h"
+
+#include "proftoeeinterfaceimpl.h"
+
+#include "appdomainnative.hpp"
+#include "arrayhelpers.h"
+#include "runtimehandles.h"
+#include "reflectioninvocation.h"
+#include "managedmdimport.hpp"
+#include "synchronizationcontextnative.h"
+#include "newcompressedstack.h"
+#include "commemoryfailpoint.h"
+#include "typestring.h"
+#include "comdependenthandle.h"
+#include "weakreferencenative.h"
+#include "varargsnative.h"
+
+#ifndef FEATURE_CORECLR
+#include "confighelper.h"
+#include "console.h"
+#endif
+
+#ifdef MDA_SUPPORTED
+#include "mdaassistants.h"
+#endif
+
+#if defined(FEATURE_CRYPTO) || defined(FEATURE_LEGACYNETCFCRYPTO)
+#include "cryptography.h"
+#endif // FEATURE_CRYPTO || FEATURE_LEGACYNETCFCRYPTO
+
+#ifndef FEATURE_CORECLR
+#include "securityprincipal.h"
+#endif // !FEATURE_CORECLR
+
+#ifdef FEATURE_X509
+#include "x509certificate.h"
+#endif // FEATURE_X509
+
+#include "coverage.h"
+
+#ifdef FEATURE_COMINTEROP
+#include "variant.h"
+#ifdef FEATURE_COMINTEROP_TLB_SUPPORT
+#include "comtypelibconverter.h"
+#endif
+#include "oavariant.h"
+#include "registration.h"
+#include "mngstdinterfaces.h"
+#include "extensibleclassfactory.h"
+#endif // FEATURE_COMINTEROP
+
+#include "stubhelpers.h"
+#include "ilmarshalers.h"
+
+#include "hostexecutioncontext.h"
+
+#ifdef FEATURE_MULTICOREJIT
+#include "multicorejit.h"
+#endif
+
+#ifdef FEATURE_COMINTEROP
+#include "clrprivtypecachereflectiononlywinrt.h"
+#endif
+
+#ifdef FEATURE_COMINTEROP
+#include "windowsruntimebufferhelper.h"
+#endif
+
+#endif // CROSSGEN_MSCORLIB
+
+
+
+
+#ifdef CROSSGEN_MSCORLIB
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// Duplicate definitions of constants and datastructures required to define the tables
+//
+
+#define NumItems(s) (sizeof(s) / sizeof(s[0]))
+
+#define GetEEFuncEntryPoint(pfn) 0x1001
+
+enum {
+ FCFuncFlag_EndOfArray = 0x01,
+ FCFuncFlag_HasSignature = 0x02,
+ FCFuncFlag_Unreferenced = 0x04, // Suppress unused fcall check
+ FCFuncFlag_QCall = 0x08, // QCall - mscorlib.dll to mscorwks.dll transition implemented as PInvoke
+};
+
+struct ECClass
+{
+ LPCSTR m_szClassName;
+ LPCSTR m_szNameSpace;
+ const LPVOID * m_pECFunc;
+};
+
+struct HardCodedMetaSig
+{
+ const BYTE* m_pMetaSig; // metasig prefixed with INT8 length:
+ // length > 0 - resolved, lenght < 0 - has unresolved type references
+};
+
+enum BinderClassID
+{
+#define TYPEINFO(e,ns,c,s,g,ia,ip,if,im,gv) CLASS__ ## e,
+#include "cortypeinfo.h"
+#undef TYPEINFO
+
+#define DEFINE_CLASS(i,n,s) CLASS__ ## i,
+#include "mscorlib.h"
+
+ CLASS__MSCORLIB_COUNT,
+
+ CLASS__VOID = CLASS__ELEMENT_TYPE_VOID,
+ CLASS__BOOLEAN = CLASS__ELEMENT_TYPE_BOOLEAN,
+ CLASS__CHAR = CLASS__ELEMENT_TYPE_CHAR,
+ CLASS__BYTE = CLASS__ELEMENT_TYPE_U1,
+ CLASS__SBYTE = CLASS__ELEMENT_TYPE_I1,
+ CLASS__INT16 = CLASS__ELEMENT_TYPE_I2,
+ CLASS__UINT16 = CLASS__ELEMENT_TYPE_U2,
+ CLASS__INT32 = CLASS__ELEMENT_TYPE_I4,
+ CLASS__UINT32 = CLASS__ELEMENT_TYPE_U4,
+ CLASS__INT64 = CLASS__ELEMENT_TYPE_I8,
+ CLASS__UINT64 = CLASS__ELEMENT_TYPE_U8,
+ CLASS__SINGLE = CLASS__ELEMENT_TYPE_R4,
+ CLASS__DOUBLE = CLASS__ELEMENT_TYPE_R8,
+ CLASS__STRING = CLASS__ELEMENT_TYPE_STRING,
+ CLASS__TYPED_REFERENCE = CLASS__ELEMENT_TYPE_TYPEDBYREF,
+ CLASS__INTPTR = CLASS__ELEMENT_TYPE_I,
+ CLASS__UINTPTR = CLASS__ELEMENT_TYPE_U,
+ CLASS__OBJECT = CLASS__ELEMENT_TYPE_OBJECT
+};
+
+struct MscorlibClassDescription
+{
+ LPCSTR nameSpace;
+ LPCSTR name;
+};
+
+struct MscorlibMethodDescription
+{
+ BinderClassID classID;
+ LPCSTR name;
+ const HardCodedMetaSig * sig;
+};
+
+struct MscorlibFieldDescription
+{
+ BinderClassID classID;
+ LPCSTR name;
+};
+
+#endif // CROSSGEN_MSCORLIB
+
+
+#ifdef CROSSGEN_MSCORLIB
+// When compiling crossgen this namespace creates the second version of the tables than matches the target
+namespace CrossGenMscorlib {
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// Hardcoded Meta-Sig
+//
+
+//
+// Helper enum with metasig lengths
+//
+// iterate over the metasig recursing into the complex types
+#define DEFINE_METASIG(body) body,
+#define METASIG_ATOM(x) + 1
+#define METASIG_RECURSE 1
+#define SM(varname, args, retval) gsigl_SM_ ## varname = 1 + 1 retval args
+#define IM(varname, args, retval) gsigl_IM_ ## varname = 1 + 1 retval args
+#define GM(varname, conv, n, args, retval) gsigl_GM_ ## varname = 1 + 1 + 1 + retval args
+#define Fld(varname, val) gsigl_Fld_ ## varname = 1 val
+enum _gsigl {
+#include "metasig.h"
+};
+
+//
+// Helper enum with metasig argcount
+//
+// iterate over the metasig without recursing into the complex types
+#define DEFINE_METASIG(body) body,
+#define METASIG_ATOM(x) + 1
+#define METASIG_RECURSE 0
+#define SM(varname, args, retval) gsigc_SM_ ## varname = 0 args
+#define IM(varname, args, retval) gsigc_IM_ ## varname = 0 args
+#define GM(varname, conv, n, args, retval) gsigc_GM_ ## varname = 0 args
+#define Fld(varname, val) gsigc_Fld_ ## varname = 0
+enum _gsigc {
+#include "metasig.h"
+};
+
+
+//
+// The actual array with the hardcoded metasig:
+//
+// There are 3 variations of the macros for Fields, Static Methods and Instance Methods.
+//
+// Each of them has 2 flavors: one for the fully baked signatures, and the other
+// for the signatures with unresolved type references
+//
+// The signatures with unresolved type references are marked with negative size,
+// and the pointer to them is non-const because of it will be overwritten with
+// the pointer to the resolved signature at runtime.
+//
+
+#define DEFINE_METASIG(body) body
+#define DEFINE_METASIG_T(body) _##body
+#define METASIG_ATOM(x) x,
+#define METASIG_RECURSE 1
+
+// define gsig_ ## varname before gsige_ ## varname to give a hint to the compiler about the desired layout
+
+#define SM(varname, args, retval) extern const BYTE gsige_SM_ ## varname[]; \
+ const HardCodedMetaSig gsig_SM_ ## varname = { gsige_SM_ ## varname }; \
+ const BYTE gsige_SM_ ## varname[] = { gsigl_SM_ ## varname, \
+ IMAGE_CEE_CS_CALLCONV_DEFAULT, gsigc_SM_ ## varname, retval args };
+
+#define IM(varname, args, retval) extern const BYTE gsige_IM_ ## varname[]; \
+ const HardCodedMetaSig gsig_IM_ ## varname = { gsige_IM_ ## varname }; \
+ const BYTE gsige_IM_ ## varname[] = { gsigl_IM_ ## varname, \
+ IMAGE_CEE_CS_CALLCONV_HASTHIS, gsigc_IM_ ## varname, retval args };
+
+#define GM(varname, conv, n, args, retval) extern const BYTE gsige_GM_ ## varname[]; \
+ const HardCodedMetaSig gsig_GM_ ## varname = { gsige_GM_ ## varname }; \
+ const BYTE gsige_GM_ ## varname[] = { gsigl_GM_ ## varname, \
+ conv | IMAGE_CEE_CS_CALLCONV_GENERIC, n, gsigc_GM_ ## varname, retval args };
+
+#define Fld(varname, val) extern const BYTE gsige_Fld_ ## varname[]; \
+ const HardCodedMetaSig gsig_Fld_ ## varname = { gsige_Fld_ ## varname }; \
+ const BYTE gsige_Fld_ ## varname[] = { gsigl_Fld_ ## varname, \
+ IMAGE_CEE_CS_CALLCONV_FIELD, val };
+
+#define _SM(varname, args, retval) extern const BYTE gsige_SM_ ## varname[]; \
+ HardCodedMetaSig gsig_SM_ ## varname = { gsige_SM_ ## varname }; \
+ const BYTE gsige_SM_ ## varname[] = { (BYTE) -gsigl_SM_ ## varname, \
+ IMAGE_CEE_CS_CALLCONV_DEFAULT, gsigc_SM_ ## varname, retval args };
+
+#define _IM(varname, args, retval) extern const BYTE gsige_IM_ ## varname[]; \
+ HardCodedMetaSig gsig_IM_ ## varname = { gsige_IM_ ## varname }; \
+ const BYTE gsige_IM_ ## varname[] = { (BYTE) -gsigl_IM_ ## varname, \
+ IMAGE_CEE_CS_CALLCONV_HASTHIS, gsigc_IM_ ## varname, retval args };
+
+#define _Fld(varname, val) extern const BYTE gsige_Fld_ ## varname[]; \
+ HardCodedMetaSig gsig_Fld_ ## varname = { gsige_Fld_ ## varname }; \
+ const BYTE gsige_Fld_ ## varname[] = { (BYTE) -gsigl_Fld_ ## varname, \
+ IMAGE_CEE_CS_CALLCONV_FIELD, val };
+
+#include "metasig.h"
+
+#undef _SM
+#undef _IM
+#undef _Fld
+
+
+
+#ifdef _DEBUG
+
+//
+// Make sure DEFINE_METASIG is used for signatures that do not reference other types
+//
+// counts number of type references in the signature and C_ASSERTs that
+// it is zero. An assertion failure results in error C2118: negative subscript.
+#define DEFINE_METASIG(body) body
+#define DEFINE_METASIG_T(body)
+#define METASIG_BODY(varname, types) C_ASSERT(types 0 == 0);
+#define METASIG_ATOM(x) 0+
+#define METASIG_RECURSE 1
+#define C(x) 1+
+#define g(x) 1+
+#include "metasig.h"
+
+//
+// Make sure DEFINE_METASIG_T is used only for signatures that reference
+// other types.
+//
+// counts number of type references in the signature and C_ASSERTs that
+// it is non zero. An assertion failure results in error C2118: negative subscript.
+#define DEFINE_METASIG(body)
+#define DEFINE_METASIG_T(body) body
+#define METASIG_BODY(varname, types) C_ASSERT(types 0 != 0);
+#define METASIG_ATOM(x) 0+
+#define METASIG_RECURSE 1
+#define C(x) 1+
+#define g(x) 1+
+#include "metasig.h"
+
+#endif
+
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// Mscorlib binder
+//
+
+// Extern definitions so that binder.cpp can see these tables
+extern const MscorlibClassDescription c_rgMscorlibClassDescriptions[];
+extern const USHORT c_nMscorlibClassDescriptions;
+
+extern const MscorlibMethodDescription c_rgMscorlibMethodDescriptions[];
+extern const USHORT c_nMscorlibMethodDescriptions;
+
+extern const MscorlibFieldDescription c_rgMscorlibFieldDescriptions[];
+extern const USHORT c_nMscorlibFieldDescriptions;
+
+const MscorlibClassDescription c_rgMscorlibClassDescriptions[] =
+{
+ #define TYPEINFO(e,ns,c,s,g,ia,ip,if,im,gv) { ns, c },
+ #include "cortypeinfo.h"
+ #undef TYPEINFO
+
+ #define DEFINE_CLASS(i,n,s) { g_ ## n ## NS, # s },
+ #include "namespace.h"
+ #include "mscorlib.h"
+
+ // Include all exception types here that are defined in mscorlib. Omit exceptions defined elsewhere.
+ #define DEFINE_EXCEPTION(ns, reKind, bHRformessage, ...) { ns , # reKind },
+ #define DEFINE_EXCEPTION_HR_WINRT_ONLY(ns, reKind, ...)
+ #define DEFINE_EXCEPTION_IN_OTHER_FX_ASSEMBLY(ns, reKind, assemblySimpleName, publicKeyToken, bHRformessage, ...)
+ #include "rexcep.h"
+};
+const USHORT c_nMscorlibClassDescriptions = NumItems(c_rgMscorlibClassDescriptions);
+
+#define gsig_NoSig (*(HardCodedMetaSig *)NULL)
+
+const MscorlibMethodDescription c_rgMscorlibMethodDescriptions[] =
+{
+ #define DEFINE_METHOD(c,i,s,g) { CLASS__ ## c , # s, & gsig_ ## g },
+ #include "mscorlib.h"
+};
+const USHORT c_nMscorlibMethodDescriptions = NumItems(c_rgMscorlibMethodDescriptions) + 1;
+
+const MscorlibFieldDescription c_rgMscorlibFieldDescriptions[] =
+{
+ #define DEFINE_FIELD(c,i,s) { CLASS__ ## c , # s },
+ #include "mscorlib.h"
+};
+const USHORT c_nMscorlibFieldDescriptions = NumItems(c_rgMscorlibFieldDescriptions) + 1;
+
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// ECalls
+//
+
+// When compiling crossgen, we only need the target version of the ecall tables
+#if !defined(CROSSGEN_COMPILE) || defined(CROSSGEN_MSCORLIB)
+
+#ifdef CROSSGEN_COMPILE
+
+#define QCFuncElement(name,impl) \
+ FCFuncFlag_QCall + FCFuncFlags(CORINFO_INTRINSIC_Illegal, ECall::InvalidDynamicFCallId), NULL, (LPVOID)name,
+
+#define FCFuncFlags(intrinsicID, dynamicID) \
+ (BYTE*)( (((BYTE)intrinsicID) << 16) )
+
+#else
+
+#define QCFuncElement(name,impl) \
+ FCFuncFlag_QCall + FCFuncFlags(CORINFO_INTRINSIC_Illegal, ECall::InvalidDynamicFCallId), (LPVOID)(impl), (LPVOID)name,
+
+#define FCFuncFlags(intrinsicID, dynamicID) \
+ (BYTE*)( (((BYTE)intrinsicID) << 16) + (((BYTE)dynamicID) << 24) )
+
+#endif
+
+#define FCFuncElement(name, impl) FCFuncFlags(CORINFO_INTRINSIC_Illegal, ECall::InvalidDynamicFCallId), \
+ (LPVOID)GetEEFuncEntryPoint(impl), (LPVOID)name,
+
+#define FCFuncElementSig(name,sig,impl) \
+ FCFuncFlag_HasSignature + FCFuncElement(name, impl) (LPVOID)sig,
+
+#define FCIntrinsic(name,impl,intrinsicID) FCFuncFlags(intrinsicID, ECall::InvalidDynamicFCallId), \
+ (LPVOID)GetEEFuncEntryPoint(impl), (LPVOID)name,
+
+#define FCIntrinsicSig(name,sig,impl,intrinsicID) \
+ FCFuncFlag_HasSignature + FCIntrinsic(name,impl,intrinsicID) (LPVOID)sig,
+
+#define FCDynamic(name,intrinsicID,dynamicID) FCFuncFlags(intrinsicID, dynamicID), \
+ NULL, (LPVOID)name,
+
+#define FCDynamicSig(name,sig,intrinsicID,dynamicID) \
+ FCFuncFlag_HasSignature + FCDynamic(name,intrinsicID,dynamicID) (LPVOID)sig,
+
+#define FCUnreferenced FCFuncFlag_Unreferenced +
+
+#define FCFuncStart(name) static const LPVOID name[] = {
+#define FCFuncEnd() FCFuncFlag_EndOfArray + FCFuncFlags(CORINFO_INTRINSIC_Illegal, ECall::InvalidDynamicFCallId) };
+
+#include "ecalllist.h"
+
+
+// Extern definitions so that ecall.cpp can see these tables
+extern const ECClass c_rgECClasses[];
+extern const int c_nECClasses;
+
+const ECClass c_rgECClasses[] =
+{
+#define FCClassElement(name,namespace,funcs) {name, namespace, funcs},
+#include "ecalllist.h"
+}; // c_rgECClasses[]
+
+const int c_nECClasses = NumItems(c_rgECClasses);
+
+#endif // !CROSSGEN_COMPILE && CROSSGEN_MSCORLIB
+
+
+#ifdef CROSSGEN_MSCORLIB
+}; // namespace CrossGenMscorlib
+#endif
diff --git a/src/vm/mscorlib.h b/src/vm/mscorlib.h
new file mode 100644
index 0000000000..7bbc9c2942
--- /dev/null
+++ b/src/vm/mscorlib.h
@@ -0,0 +1,2201 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// This file contains the classes, methods, and field used by the EE from mscorlib
+
+//
+// To use this, define one of the following macros & include the file like so:
+//
+// #define DEFINE_CLASS(id, nameSpace, stringName) CLASS__ ## id,
+// #define DEFINE_METHOD(classId, id, stringName, gSign)
+// #define DEFINE_FIELD(classId, id, stringName)
+// #include "mscorlib.h"
+//
+// Note: To determine if the namespace you want to use in DEFINE_CLASS is supported or not,
+// examine vm\namespace.h. If it is not present, define it there and then proceed to use it below.
+//
+
+
+//
+// Note: The SM_* and IM_* are signatures defined in file:metasig.h using IM() and SM() macros.
+//
+
+#ifndef DEFINE_CLASS
+#define DEFINE_CLASS(id, nameSpace, stringName)
+#endif
+
+#ifndef DEFINE_METHOD
+#define DEFINE_METHOD(classId, id, stringName, gSign)
+#endif
+
+#ifndef DEFINE_FIELD
+#define DEFINE_FIELD(classId, id, stringName)
+#endif
+
+#ifndef DEFINE_PROPERTY
+#define DEFINE_PROPERTY(classId, id, stringName, gSign) DEFINE_METHOD(classId, GET_ ## id, get_ ## stringName, IM_Ret ## gSign)
+#endif
+
+#ifndef DEFINE_STATIC_PROPERTY
+#define DEFINE_STATIC_PROPERTY(classId, id, stringName, gSign) DEFINE_METHOD(classId, GET_ ## id, get_ ## stringName, SM_Ret ## gSign)
+#endif
+
+#ifndef DEFINE_SET_PROPERTY
+#define DEFINE_SET_PROPERTY(classId, id, stringName, gSign) \
+ DEFINE_PROPERTY(classId, id, stringName, gSign) \
+ DEFINE_METHOD(classId, SET_ ## id, set_ ## stringName, IM_## gSign ## _RetVoid)
+#endif
+
+//
+// DEFINE_CLASS_U and DEFINE_FIELD_U are debug-only checks to verify that the managed and unmanaged layouts are in sync
+//
+#ifndef DEFINE_CLASS_U
+#define DEFINE_CLASS_U(nameSpace, stringName, unmanagedType)
+#endif
+
+#ifndef DEFINE_FIELD_U
+#define DEFINE_FIELD_U(stringName, unmanagedContainingType, unmanagedOffset)
+#endif
+
+// NOTE: Make this window really wide if you want to read the table...
+
+DEFINE_CLASS(ACTIVATOR, System, Activator)
+
+DEFINE_CLASS(ACCESS_VIOLATION_EXCEPTION, System, AccessViolationException)
+DEFINE_FIELD(ACCESS_VIOLATION_EXCEPTION, IP, _ip)
+DEFINE_FIELD(ACCESS_VIOLATION_EXCEPTION, TARGET, _target)
+DEFINE_FIELD(ACCESS_VIOLATION_EXCEPTION, ACCESSTYPE, _accessType)
+
+DEFINE_CLASS_U(System, AppDomain, AppDomainBaseObject)
+DEFINE_FIELD_U(_domainManager, AppDomainBaseObject, m_pDomainManager)
+DEFINE_FIELD_U(_LocalStore, AppDomainBaseObject, m_LocalStore)
+DEFINE_FIELD_U(_FusionStore, AppDomainBaseObject, m_FusionTable)
+DEFINE_FIELD_U(_SecurityIdentity, AppDomainBaseObject, m_pSecurityIdentity)
+DEFINE_FIELD_U(_Policies, AppDomainBaseObject, m_pPolicies)
+DEFINE_FIELD_U(AssemblyLoad, AppDomainBaseObject, m_pAssemblyEventHandler)
+DEFINE_FIELD_U(_TypeResolve, AppDomainBaseObject, m_pTypeEventHandler)
+DEFINE_FIELD_U(_ResourceResolve, AppDomainBaseObject, m_pResourceEventHandler)
+DEFINE_FIELD_U(_AssemblyResolve, AppDomainBaseObject, m_pAsmResolveEventHandler)
+#ifdef FEATURE_REFLECTION_ONLY_LOAD
+DEFINE_FIELD_U(ReflectionOnlyAssemblyResolve, AppDomainBaseObject, m_pReflectionAsmResolveEventHandler)
+#endif
+#ifdef FEATURE_REMOTING
+DEFINE_FIELD_U(_DefaultContext, AppDomainBaseObject, m_pDefaultContext)
+#endif
+#if defined(FEATURE_CLICKONCE)
+DEFINE_FIELD_U(_activationContext, AppDomainBaseObject, m_pActivationContext)
+DEFINE_FIELD_U(_applicationIdentity, AppDomainBaseObject, m_pApplicationIdentity)
+#endif
+DEFINE_FIELD_U(_applicationTrust, AppDomainBaseObject, m_pApplicationTrust)
+#ifdef FEATURE_IMPERSONATION
+DEFINE_FIELD_U(_DefaultPrincipal, AppDomainBaseObject, m_pDefaultPrincipal)
+#endif // FEATURE_IMPERSONATION
+#ifdef FEATURE_REMOTING
+DEFINE_FIELD_U(_RemotingData, AppDomainBaseObject, m_pURITable)
+#endif
+DEFINE_FIELD_U(_processExit, AppDomainBaseObject, m_pProcessExitEventHandler)
+DEFINE_FIELD_U(_domainUnload, AppDomainBaseObject, m_pDomainUnloadEventHandler)
+DEFINE_FIELD_U(_unhandledException, AppDomainBaseObject, m_pUnhandledExceptionEventHandler)
+#ifdef FEATURE_APTCA
+DEFINE_FIELD_U(_aptcaVisibleAssemblies, AppDomainBaseObject, m_aptcaVisibleAssemblies)
+#endif
+DEFINE_FIELD_U(_compatFlags, AppDomainBaseObject, m_compatFlags)
+#ifdef FEATURE_EXCEPTION_NOTIFICATIONS
+DEFINE_FIELD_U(_firstChanceException, AppDomainBaseObject, m_pFirstChanceExceptionHandler)
+#endif // FEATURE_EXCEPTION_NOTIFICATIONS
+DEFINE_FIELD_U(_pDomain, AppDomainBaseObject, m_pDomain)
+#ifdef FEATURE_CAS_POLICY
+DEFINE_FIELD_U(_PrincipalPolicy, AppDomainBaseObject, m_iPrincipalPolicy)
+#endif
+DEFINE_FIELD_U(_HasSetPolicy, AppDomainBaseObject, m_bHasSetPolicy)
+DEFINE_FIELD_U(_IsFastFullTrustDomain, AppDomainBaseObject, m_bIsFastFullTrustDomain)
+DEFINE_FIELD_U(_compatFlagsInitialized, AppDomainBaseObject, m_compatFlagsInitialized)
+
+DEFINE_CLASS(APP_DOMAIN, System, AppDomain)
+DEFINE_METHOD(APP_DOMAIN, PREPARE_DATA_FOR_SETUP,PrepareDataForSetup,SM_Str_AppDomainSetup_Evidence_Evidence_IntPtr_Str_ArrStr_ArrStr_RetObj)
+DEFINE_METHOD(APP_DOMAIN, SETUP,Setup,SM_Obj_RetObj)
+DEFINE_METHOD(APP_DOMAIN, ON_ASSEMBLY_LOAD, OnAssemblyLoadEvent, IM_Assembly_RetVoid)
+DEFINE_METHOD(APP_DOMAIN, ON_RESOURCE_RESOLVE, OnResourceResolveEvent, IM_Assembly_Str_RetAssembly)
+DEFINE_METHOD(APP_DOMAIN, ON_TYPE_RESOLVE, OnTypeResolveEvent, IM_Assembly_Str_RetAssembly)
+DEFINE_METHOD(APP_DOMAIN, ON_ASSEMBLY_RESOLVE, OnAssemblyResolveEvent, IM_Assembly_Str_RetAssembly)
+#ifdef FEATURE_REFLECTION_ONLY_LOAD
+DEFINE_METHOD(APP_DOMAIN, ON_REFLECTION_ONLY_ASSEMBLY_RESOLVE, OnReflectionOnlyAssemblyResolveEvent, IM_Assembly_Str_RetAssembly)
+#ifdef FEATURE_COMINTEROP
+DEFINE_METHOD(APP_DOMAIN, ON_REFLECTION_ONLY_NAMESPACE_RESOLVE, OnReflectionOnlyNamespaceResolveEvent, IM_Assembly_Str_RetArrAssembly)
+#endif //FEATURE_COMINTEROP
+DEFINE_METHOD(APP_DOMAIN, ENABLE_RESOLVE_ASSEMBLIES_FOR_INTROSPECTION, EnableResolveAssembliesForIntrospection, IM_Str_RetVoid)
+#endif //FEATURE_REFLECTION_ONLY_LOAD
+#ifdef FEATURE_COMINTEROP
+DEFINE_METHOD(APP_DOMAIN, ON_DESIGNER_NAMESPACE_RESOLVE, OnDesignerNamespaceResolveEvent, IM_Str_RetArrStr)
+#endif //FEATURE_COMINTEROP
+DEFINE_METHOD(APP_DOMAIN, SETUP_DOMAIN, SetupDomain, IM_Bool_Str_Str_ArrStr_ArrStr_RetVoid)
+#ifdef FEATURE_FUSION
+DEFINE_METHOD(APP_DOMAIN, SETUP_LOADER_OPTIMIZATION,SetupLoaderOptimization, IM_LoaderOptimization_RetVoid)
+DEFINE_METHOD(APP_DOMAIN, SET_DOMAIN_CONTEXT, InternalSetDomainContext, IM_Str_RetVoid)
+#endif // FEATURE_FUSION
+#ifdef FEATURE_REMOTING
+DEFINE_METHOD(APP_DOMAIN, CREATE_DOMAIN, CreateDomain, SM_Str_Evidence_AppDomainSetup_RetAppDomain)
+#ifdef FEATURE_CAS_POLICY
+DEFINE_METHOD(APP_DOMAIN, CREATE_DOMAINEX, CreateDomain, SM_Str_Evidence_Str_Str_Bool_RetAppDomain)
+#endif // FEATURE_CAS_POLICY
+DEFINE_METHOD(APP_DOMAIN, VAL_CREATE_DOMAIN, InternalCreateDomain, SM_Str_RetAppDomain)
+#endif
+#ifdef FEATURE_REMOTING
+DEFINE_METHOD(APP_DOMAIN, MARSHAL_OBJECT, MarshalObject, SM_Obj_RetArrByte)
+DEFINE_METHOD(APP_DOMAIN, MARSHAL_OBJECTS, MarshalObjects, SM_Obj_Obj_RefArrByte_RetArrByte)
+DEFINE_METHOD(APP_DOMAIN, UNMARSHAL_OBJECT, UnmarshalObject, SM_ArrByte_RetObj)
+DEFINE_METHOD(APP_DOMAIN, UNMARSHAL_OBJECTS, UnmarshalObjects, SM_ArrByte_ArrByte_RefObj_RetObj)
+#endif
+#ifdef FEATURE_FUSION
+DEFINE_METHOD(APP_DOMAIN, TURN_ON_BINDING_REDIRECTS, TurnOnBindingRedirects, IM_RetVoid)
+#endif // FEATURE_FUSION
+DEFINE_METHOD(APP_DOMAIN, CREATE_APP_DOMAIN_MANAGER, CreateAppDomainManager, IM_RetVoid)
+DEFINE_METHOD(APP_DOMAIN, INITIALIZE_COMPATIBILITY_FLAGS, InitializeCompatibilityFlags, IM_RetVoid)
+DEFINE_METHOD(APP_DOMAIN, INITIALIZE_DOMAIN_SECURITY, InitializeDomainSecurity, IM_Evidence_Evidence_Bool_IntPtr_Bool_RetVoid)
+#ifdef FEATURE_CLICKONCE
+DEFINE_METHOD(APP_DOMAIN, SETUP_DEFAULT_CLICKONCE_DOMAIN, SetupDefaultClickOnceDomain, IM_Str_ArrStr_ArrStr_RetVoid)
+DEFINE_METHOD(APP_DOMAIN, ACTIVATE_APPLICATION, ActivateApplication, IM_RetInt)
+#endif // FEATURE_CLICKONCE
+#ifdef FEATURE_APTCA
+DEFINE_METHOD(APP_DOMAIN, IS_ASSEMBLY_ON_APTCA_VISIBLE_LIST, IsAssemblyOnAptcaVisibleList, IM_Assembly_RetBool)
+DEFINE_METHOD(APP_DOMAIN, IS_ASSEMBLY_ON_APTCA_VISIBLE_LIST_RAW, IsAssemblyOnAptcaVisibleListRaw, IM_PtrChar_Int_PtrByte_Int_RetBool)
+#endif // FEATURE_APTCA
+#ifndef FEATURE_CORECLR
+DEFINE_METHOD(APP_DOMAIN, PAUSE, Pause, SM_RetVoid)
+DEFINE_METHOD(APP_DOMAIN, RESUME, Resume, SM_RetVoid)
+DEFINE_CLASS(APPDOMAIN_MANAGER, System, AppDomainManager)
+DEFINE_PROPERTY(APPDOMAIN_MANAGER, ENTRY_ASSEMBLY, EntryAssembly, AssemblyBase)
+#endif // FEATURE_CORECLR
+
+DEFINE_CLASS(CLEANUP_WORK_LIST, StubHelpers, CleanupWorkList)
+
+#ifdef FEATURE_COMINTEROP
+// Define earlier in mscorlib.h to avoid BinderClassID to const BYTE truncation warning
+DEFINE_CLASS(DATETIMENATIVE, StubHelpers, DateTimeNative)
+DEFINE_CLASS(TYPENAMENATIVE, StubHelpers, TypeNameNative)
+
+DEFINE_CLASS_U(StubHelpers, TypeNameNative, TypeNameNative)
+DEFINE_FIELD_U(typeName, TypeNameNative, typeName)
+DEFINE_FIELD_U(typeKind, TypeNameNative, typeKind)
+
+#endif
+
+DEFINE_CLASS_U(Policy, ApplicationTrust, ApplicationTrustObject)
+
+#ifdef FEATURE_CLICKONCE
+DEFINE_FIELD_U(m_appId, ApplicationTrustObject, _appId)
+DEFINE_FIELD_U(m_extraInfo, ApplicationTrustObject, _extraInfo)
+DEFINE_FIELD_U(m_elExtraInfo, ApplicationTrustObject, _elExtraInfo)
+#endif // FEATURE_CLICKONCE
+
+DEFINE_FIELD_U(m_psDefaultGrant, ApplicationTrustObject, _psDefaultGrant)
+DEFINE_FIELD_U(m_fullTrustAssemblies, ApplicationTrustObject, _fullTrustAssemblies)
+DEFINE_FIELD_U(m_grantSetSpecialFlags, ApplicationTrustObject, _grantSetSpecialFlags)
+
+#ifdef FEATURE_CLICKONCE
+DEFINE_FIELD_U(m_appTrustedToRun, ApplicationTrustObject, _appTrustedToRun)
+DEFINE_FIELD_U(m_persist, ApplicationTrustObject, _persist)
+#endif // FEATURE_CLICKONCE
+
+DEFINE_CLASS_U(Policy, PolicyStatement, PolicyStatementObject)
+DEFINE_FIELD_U(m_permSet, PolicyStatementObject, _permSet)
+DEFINE_FIELD_U(m_attributes, PolicyStatementObject, _attributes)
+
+DEFINE_CLASS(APPDOMAIN_SETUP, System, AppDomainSetup)
+DEFINE_CLASS_U(System, AppDomainSetup, AppDomainSetupObject)
+DEFINE_FIELD_U(_Entries, AppDomainSetupObject, m_Entries)
+DEFINE_FIELD_U(_AppBase, AppDomainSetupObject, m_AppBase)
+DEFINE_FIELD_U(_AppDomainInitializer, AppDomainSetupObject, m_AppDomainInitializer)
+DEFINE_FIELD_U(_AppDomainInitializerArguments, AppDomainSetupObject, m_AppDomainInitializerArguments)
+#ifdef FEATURE_CLICKONCE
+DEFINE_FIELD_U(_ActivationArguments, AppDomainSetupObject, m_ActivationArguments)
+#endif // FEATURE_CLICKONCE
+DEFINE_FIELD_U(_ApplicationTrust, AppDomainSetupObject, m_ApplicationTrust)
+DEFINE_FIELD_U(_ConfigurationBytes, AppDomainSetupObject, m_ConfigurationBytes)
+DEFINE_FIELD_U(_AppDomainManagerAssembly, AppDomainSetupObject, m_AppDomainManagerAssembly)
+DEFINE_FIELD_U(_AppDomainManagerType, AppDomainSetupObject, m_AppDomainManagerType)
+#if FEATURE_APTCA
+DEFINE_FIELD_U(_AptcaVisibleAssemblies, AppDomainSetupObject, m_AptcaVisibleAssemblies)
+#endif
+DEFINE_FIELD_U(_CompatFlags, AppDomainSetupObject, m_CompatFlags)
+DEFINE_FIELD_U(_TargetFrameworkName, AppDomainSetupObject, m_TargetFrameworkName)
+DEFINE_FIELD_U(_LoaderOptimization, AppDomainSetupObject, m_LoaderOptimization)
+#ifndef FEATURE_CORECLR
+DEFINE_FIELD_U(_AppDomainSortingSetupInfo, AppDomainSetupObject, m_AppDomainSortingSetupInfo)
+#endif // FEATURE_CORECLR
+#ifdef FEATURE_COMINTEROP
+DEFINE_FIELD_U(_DisableInterfaceCache, AppDomainSetupObject, m_DisableInterfaceCache)
+#endif // FEATURE_COMINTEROP
+DEFINE_FIELD_U(_CheckedForTargetFrameworkName, AppDomainSetupObject, m_CheckedForTargetFrameworkName)
+#ifdef FEATURE_RANDOMIZED_STRING_HASHING
+DEFINE_FIELD_U(_UseRandomizedStringHashing, AppDomainSetupObject, m_UseRandomizedStringHashing)
+#endif
+
+DEFINE_CLASS(ARG_ITERATOR, System, ArgIterator)
+DEFINE_CLASS_U(System, ArgIterator, VARARGS) // Includes a SigPointer.
+DEFINE_METHOD(ARG_ITERATOR, CTOR2, .ctor, IM_RuntimeArgumentHandle_PtrVoid_RetVoid)
+
+DEFINE_CLASS(ARGUMENT_HANDLE, System, RuntimeArgumentHandle)
+
+DEFINE_CLASS(ARRAY, System, Array)
+DEFINE_PROPERTY(ARRAY, LENGTH, Length, Int)
+DEFINE_METHOD(ARRAY, GET_DATA_PTR_OFFSET_INTERNAL, GetDataPtrOffsetInternal, IM_RetInt)
+
+#ifdef FEATURE_NONGENERIC_COLLECTIONS
+DEFINE_CLASS(ARRAY_LIST, Collections, ArrayList)
+DEFINE_METHOD(ARRAY_LIST, CTOR, .ctor, IM_RetVoid)
+DEFINE_METHOD(ARRAY_LIST, ADD, Add, IM_Obj_RetInt)
+#endif // FEATURE_NONGENERIC_COLLECTIONS
+
+DEFINE_CLASS(ARRAY_WITH_OFFSET, Interop, ArrayWithOffset)
+DEFINE_FIELD(ARRAY_WITH_OFFSET, M_ARRAY, m_array)
+DEFINE_FIELD(ARRAY_WITH_OFFSET, M_OFFSET, m_offset)
+DEFINE_FIELD(ARRAY_WITH_OFFSET, M_COUNT, m_count)
+
+
+DEFINE_CLASS(ASSEMBLY_BUILDER, ReflectionEmit, AssemblyBuilder)
+DEFINE_CLASS(INTERNAL_ASSEMBLY_BUILDER, ReflectionEmit, InternalAssemblyBuilder)
+
+DEFINE_CLASS(ASSEMBLY_HASH_ALGORITHM, Assemblies, AssemblyHashAlgorithm)
+DEFINE_CLASS(PORTABLE_EXECUTABLE_KINDS, Reflection, PortableExecutableKinds)
+DEFINE_CLASS(IMAGE_FILE_MACHINE, Reflection, ImageFileMachine)
+
+DEFINE_CLASS_U(Reflection, AssemblyName, AssemblyNameBaseObject)
+DEFINE_FIELD_U(_Name, AssemblyNameBaseObject, m_pSimpleName)
+DEFINE_FIELD_U(_PublicKey, AssemblyNameBaseObject, m_pPublicKey)
+DEFINE_FIELD_U(_PublicKeyToken, AssemblyNameBaseObject, m_pPublicKeyToken)
+DEFINE_FIELD_U(_CultureInfo, AssemblyNameBaseObject, m_pCultureInfo)
+DEFINE_FIELD_U(_CodeBase, AssemblyNameBaseObject, m_pCodeBase)
+DEFINE_FIELD_U(_Version, AssemblyNameBaseObject, m_pVersion)
+#ifdef FEATURE_SERIALIZATION
+DEFINE_FIELD_U(m_siInfo, AssemblyNameBaseObject, m_siInfo)
+#endif
+DEFINE_FIELD_U(_HashForControl, AssemblyNameBaseObject, m_HashForControl)
+DEFINE_FIELD_U(_HashAlgorithm, AssemblyNameBaseObject, m_HashAlgorithm)
+DEFINE_FIELD_U(_HashAlgorithmForControl, AssemblyNameBaseObject, m_HashAlgorithmForControl)
+DEFINE_FIELD_U(_VersionCompatibility, AssemblyNameBaseObject, m_VersionCompatibility)
+DEFINE_FIELD_U(_Flags, AssemblyNameBaseObject, m_Flags)
+DEFINE_CLASS(ASSEMBLY_NAME, Reflection, AssemblyName)
+DEFINE_METHOD(ASSEMBLY_NAME, INIT, Init, IM_Str_ArrB_ArrB_Ver_CI_AHA_AVC_Str_ANF_SNKP_RetV)
+DEFINE_METHOD(ASSEMBLY_NAME, SET_PROC_ARCH_INDEX, SetProcArchIndex, IM_PEK_IFM_RetV)
+#ifdef FEATURE_APTCA
+DEFINE_METHOD(ASSEMBLY_NAME, GET_NAME_WITH_PUBLIC_KEY, GetNameWithPublicKey, IM_RetStr)
+#endif // FEATURE_APTCA
+
+DEFINE_CLASS_U(System, Version, VersionBaseObject)
+DEFINE_FIELD_U(_Major, VersionBaseObject, m_Major)
+DEFINE_FIELD_U(_Minor, VersionBaseObject, m_Minor)
+DEFINE_FIELD_U(_Build, VersionBaseObject, m_Build)
+DEFINE_FIELD_U(_Revision, VersionBaseObject, m_Revision)
+DEFINE_CLASS(VERSION, System, Version)
+DEFINE_METHOD(VERSION, CTOR, .ctor, IM_Int_Int_Int_Int_RetVoid)
+
+DEFINE_CLASS(ASSEMBLY_VERSION_COMPATIBILITY, Assemblies, AssemblyVersionCompatibility)
+
+DEFINE_CLASS(ASSEMBLY_NAME_FLAGS, Reflection, AssemblyNameFlags)
+
+// ASSEMBLYBASE is System.ReflectionAssembly while ASSEMBLY is System.Reflection.RuntimeAssembly
+// Maybe we should reverse these two names
+DEFINE_CLASS(ASSEMBLYBASE, Reflection, Assembly)
+
+DEFINE_CLASS_U(Reflection, RuntimeAssembly, AssemblyBaseObject)
+DEFINE_FIELD_U(_ModuleResolve, AssemblyBaseObject, m_pModuleEventHandler)
+DEFINE_FIELD_U(m_fullname, AssemblyBaseObject, m_fullname)
+DEFINE_FIELD_U(m_syncRoot, AssemblyBaseObject, m_pSyncRoot)
+DEFINE_FIELD_U(m_assembly, AssemblyBaseObject, m_pAssembly)
+#ifndef FEATURE_CORECLR
+DEFINE_FIELD_U(m_flags, AssemblyBaseObject, m_flags)
+#endif
+DEFINE_CLASS(ASSEMBLY, Reflection, RuntimeAssembly)
+DEFINE_FIELD(ASSEMBLY, HANDLE, m_assembly)
+DEFINE_METHOD(ASSEMBLY, GET_NAME, GetName, IM_RetAssemblyName)
+#ifdef FEATURE_APTCA
+DEFINE_METHOD(ASSEMBLY, GET_NAME_FOR_CONDITIONAL_APTCA, GetNameForConditionalAptca, IM_RetStr)
+#endif // FEATURE_APTCA
+#ifdef FEATURE_FUSION
+DEFINE_METHOD(ASSEMBLY, LOAD_WITH_PARTIAL_NAME_HACK, LoadWithPartialNameHack, SM_Str_Bool_RetAssembly)
+#endif // FEATURE_FUSION
+DEFINE_METHOD(ASSEMBLY, ON_MODULE_RESOLVE, OnModuleResolveEvent, IM_Str_RetModule)
+#ifdef FEATURE_FUSION
+DEFINE_METHOD(ASSEMBLY, DEMAND_PERMISSION, DemandPermission, SM_Str_Bool_Int_RetV)
+#endif
+
+#ifdef FEATURE_CAS_POLICY
+DEFINE_CLASS(ASSEMBLY_EVIDENCE_FACTORY, Policy, AssemblyEvidenceFactory)
+DEFINE_METHOD(ASSEMBLY_EVIDENCE_FACTORY, UPGRADE_SECURITY_IDENTITY, UpgradeSecurityIdentity, SM_Evidence_Asm_RetEvidence)
+#endif // FEATURE_CAS_POLICY
+
+#ifdef FEATURE_COMINTEROP_REGISTRATION
+DEFINE_CLASS(ASSEMBLY_REGISTRATION_FLAGS, Interop, AssemblyRegistrationFlags)
+#endif // FEATURE_COMINTEROP_REGISTRATION
+
+#ifdef FEATURE_REMOTING
+DEFINE_CLASS(ACTIVATION_SERVICES, Activation, ActivationServices)
+DEFINE_METHOD(ACTIVATION_SERVICES, IS_CURRENT_CONTEXT_OK, IsCurrentContextOK, SM_Class_ArrObject_Bool_RetMarshalByRefObject)
+
+#ifdef FEATURE_CLASSIC_COMINTEROP
+DEFINE_METHOD(ACTIVATION_SERVICES, CREATE_OBJECT_FOR_COM, CreateObjectForCom, SM_Class_ArrObject_Bool_RetMarshalByRefObject)
+
+#endif // FEATURE_CLASSIC_COMINTEROP
+#endif // FEATURE_REMOTING
+
+DEFINE_CLASS(ASYNCCALLBACK, System, AsyncCallback)
+DEFINE_CLASS(ATTRIBUTE, System, Attribute)
+
+
+DEFINE_CLASS(BINDER, Reflection, Binder)
+DEFINE_METHOD(BINDER, CHANGE_TYPE, ChangeType, IM_Obj_Type_CultureInfo_RetObj)
+
+DEFINE_CLASS(BINDING_FLAGS, Reflection, BindingFlags)
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_CLASS(BSTR_WRAPPER, Interop, BStrWrapper)
+#endif // FEATURE_COMINTEROP
+
+DEFINE_CLASS_U(System, RuntimeType, ReflectClassBaseObject)
+DEFINE_FIELD_U(m_cache, ReflectClassBaseObject, m_cache)
+DEFINE_FIELD_U(m_handle, ReflectClassBaseObject, m_typeHandle)
+DEFINE_FIELD_U(m_keepalive, ReflectClassBaseObject, m_keepalive)
+#ifdef FEATURE_APPX
+DEFINE_FIELD_U(m_invocationFlags, ReflectClassBaseObject, m_invocationFlags)
+#endif
+DEFINE_CLASS(CLASS, System, RuntimeType)
+DEFINE_FIELD(CLASS, TYPEHANDLE, m_handle)
+DEFINE_METHOD(CLASS, GET_PROPERTIES, GetProperties, IM_BindingFlags_RetArrPropertyInfo)
+DEFINE_METHOD(CLASS, GET_FIELDS, GetFields, IM_BindingFlags_RetArrFieldInfo)
+DEFINE_METHOD(CLASS, GET_METHODS, GetMethods, IM_BindingFlags_RetArrMethodInfo)
+DEFINE_METHOD(CLASS, INVOKE_MEMBER, InvokeMember, IM_Str_BindingFlags_Binder_Obj_ArrObj_ArrParameterModifier_CultureInfo_ArrStr_RetObj)
+#if defined(FEATURE_CLASSIC_COMINTEROP) && defined(FEATURE_REMOTING)
+DEFINE_METHOD(CLASS, FORWARD_CALL_TO_INVOKE, ForwardCallToInvokeMember, IM_Str_BindingFlags_Obj_ArrInt_RefMessageData_RetObj)
+#endif
+DEFINE_METHOD(CLASS, GET_METHOD_BASE, GetMethodBase, SM_RuntimeType_RuntimeMethodHandleInternal_RetMethodBase)
+DEFINE_METHOD(CLASS, GET_FIELD_INFO, GetFieldInfo, SM_RuntimeType_IRuntimeFieldInfo_RetFieldInfo)
+DEFINE_METHOD(CLASS, GET_PROPERTY_INFO, GetPropertyInfo, SM_RuntimeType_Int_RetPropertyInfo)
+
+DEFINE_CLASS(CLASS_INTROSPECTION_ONLY, System, ReflectionOnlyType)
+
+DEFINE_CLASS(CODE_ACCESS_PERMISSION, Security, CodeAccessPermission)
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_CLASS_U(System, __ComObject, ComObject)
+DEFINE_FIELD_U(m_ObjectToDataMap, ComObject, m_ObjectToDataMap)
+DEFINE_CLASS(COM_OBJECT, System, __ComObject)
+DEFINE_METHOD(COM_OBJECT, RELEASE_ALL_DATA, ReleaseAllData, IM_RetVoid)
+DEFINE_METHOD(COM_OBJECT, GET_EVENT_PROVIDER, GetEventProvider, IM_Class_RetObj)
+
+DEFINE_CLASS(RUNTIME_CLASS, WinRT, RuntimeClass)
+
+#ifdef FEATURE_COMINTEROP_TLB_SUPPORT
+DEFINE_CLASS(ITYPE_LIB_IMPORTER_NOTIFY_SINK, Interop, ITypeLibImporterNotifySink)
+DEFINE_CLASS(ITYPE_LIB_EXPORTER_NOTIFY_SINK, Interop, ITypeLibExporterNotifySink)
+#endif //FEATURE_COMINTEROP_TLB_SUPPORT
+
+#endif // FEATURE_COMINTEROP
+
+DEFINE_CLASS_U(Interop, CriticalHandle, CriticalHandle)
+#ifdef _DEBUG
+DEFINE_FIELD_U(_stackTrace, CriticalHandle, m_debugStackTrace)
+#endif
+DEFINE_FIELD_U(handle, CriticalHandle, m_handle)
+DEFINE_FIELD_U(_isClosed, CriticalHandle, m_isClosed)
+DEFINE_CLASS(CRITICAL_HANDLE, Interop, CriticalHandle)
+DEFINE_FIELD(CRITICAL_HANDLE, HANDLE, handle)
+DEFINE_METHOD(CRITICAL_HANDLE, RELEASE_HANDLE, ReleaseHandle, IM_RetBool)
+DEFINE_METHOD(CRITICAL_HANDLE, GET_IS_INVALID, get_IsInvalid, IM_RetBool)
+DEFINE_METHOD(CRITICAL_HANDLE, DISPOSE, Dispose, IM_RetVoid)
+DEFINE_METHOD(CRITICAL_HANDLE, DISPOSE_BOOL, Dispose, IM_Bool_RetVoid)
+
+DEFINE_CLASS(CRITICAL_FINALIZER_OBJECT, ConstrainedExecution, CriticalFinalizerObject)
+DEFINE_METHOD(CRITICAL_FINALIZER_OBJECT, FINALIZE, Finalize, IM_RetVoid)
+
+DEFINE_CLASS_U(Reflection, RuntimeConstructorInfo, NoClass)
+DEFINE_FIELD_U(m_handle, ReflectMethodObject, m_pMD)
+DEFINE_CLASS(CONSTRUCTOR, Reflection, RuntimeConstructorInfo)
+
+DEFINE_CLASS_U(System, RuntimeMethodInfoStub, ReflectMethodObject)
+DEFINE_FIELD_U(m_value, ReflectMethodObject, m_pMD)
+DEFINE_CLASS(STUBMETHODINFO, System, RuntimeMethodInfoStub)
+DEFINE_FIELD(STUBMETHODINFO, HANDLE, m_value)
+
+DEFINE_CLASS(CONSTRUCTOR_INFO, Reflection, ConstructorInfo)
+
+DEFINE_CLASS_U(Reflection, CustomAttributeEncodedArgument, CustomAttributeValue)
+DEFINE_FIELD_U(m_primitiveValue, CustomAttributeValue, m_rawValue)
+DEFINE_FIELD_U(m_arrayValue, CustomAttributeValue, m_value)
+DEFINE_FIELD_U(m_stringValue, CustomAttributeValue, m_enumOrTypeName)
+DEFINE_FIELD_U(m_type, CustomAttributeValue, m_type)
+DEFINE_CLASS(CUSTOM_ATTRIBUTE_ENCODED_ARGUMENT, Reflection, CustomAttributeEncodedArgument)
+
+DEFINE_CLASS_U(Reflection, CustomAttributeNamedParameter, CustomAttributeNamedArgument)
+DEFINE_FIELD_U(m_argumentName, CustomAttributeNamedArgument, m_argumentName)
+DEFINE_FIELD_U(m_fieldOrProperty, CustomAttributeNamedArgument, m_propertyOrField)
+DEFINE_FIELD_U(m_padding, CustomAttributeNamedArgument, m_padding)
+DEFINE_FIELD_U(m_type, CustomAttributeNamedArgument, m_type)
+DEFINE_FIELD_U(m_encodedArgument, CustomAttributeNamedArgument, m_value)
+
+DEFINE_CLASS_U(Reflection, CustomAttributeCtorParameter, CustomAttributeArgument)
+DEFINE_FIELD_U(m_type, CustomAttributeArgument, m_type)
+DEFINE_FIELD_U(m_encodedArgument, CustomAttributeArgument, m_value)
+
+DEFINE_CLASS_U(Reflection, CustomAttributeType, CustomAttributeType)
+DEFINE_FIELD_U(m_enumName, CustomAttributeType, m_enumName)
+DEFINE_FIELD_U(m_encodedType, CustomAttributeType, m_tag)
+DEFINE_FIELD_U(m_encodedEnumType, CustomAttributeType, m_enumType)
+DEFINE_FIELD_U(m_encodedArrayType, CustomAttributeType, m_arrayType)
+DEFINE_FIELD_U(m_padding, CustomAttributeType, m_padding)
+
+#ifdef FEATURE_REMOTING
+DEFINE_CLASS_U(Contexts, Context, ContextBaseObject)
+DEFINE_FIELD_U(_ctxProps, ContextBaseObject, m_ctxProps)
+DEFINE_FIELD_U(_dphCtx, ContextBaseObject, m_dphCtx)
+DEFINE_FIELD_U(_localDataStore, ContextBaseObject, m_localDataStore)
+DEFINE_FIELD_U(_serverContextChain, ContextBaseObject, m_serverContextChain)
+DEFINE_FIELD_U(_clientContextChain, ContextBaseObject, m_clientContextChain)
+DEFINE_FIELD_U(_appDomain, ContextBaseObject, m_exposedAppDomain)
+DEFINE_FIELD_U(_ctxStatics, ContextBaseObject, m_ctxStatics)
+DEFINE_FIELD_U(_internalContext, ContextBaseObject, m_internalContext)
+DEFINE_FIELD_U(_ctxID, ContextBaseObject, _ctxID)
+DEFINE_FIELD_U(_ctxFlags, ContextBaseObject, _ctxFlags)
+DEFINE_FIELD_U(_numCtxProps, ContextBaseObject, _numCtxProps)
+DEFINE_FIELD_U(_ctxStaticsCurrentBucket, ContextBaseObject, _ctxStaticsCurrentBucket)
+DEFINE_FIELD_U(_ctxStaticsFreeIndex, ContextBaseObject, _ctxStaticsFreeIndex)
+DEFINE_CLASS(CONTEXT, Contexts, Context)
+DEFINE_METHOD(CONTEXT, CALLBACK, DoCallBackFromEE, SM_IntPtr_IntPtr_Int_RetVoid)
+DEFINE_METHOD(CONTEXT, RESERVE_SLOT, ReserveSlot, IM_RetInt)
+#endif
+
+DEFINE_CLASS(CONTEXT_BOUND_OBJECT, System, ContextBoundObject)
+
+
+#if defined(FEATURE_CRYPTO) || defined(FEATURE_LEGACYNETCFCRYPTO)
+DEFINE_CLASS(CSP_PARAMETERS, Cryptography, CspParameters)
+
+DEFINE_FIELD(CSP_PARAMETERS, PROVIDER_TYPE, ProviderType)
+DEFINE_FIELD(CSP_PARAMETERS, PROVIDER_NAME, ProviderName)
+DEFINE_FIELD(CSP_PARAMETERS, KEY_CONTAINER_NAME, KeyContainerName)
+DEFINE_FIELD(CSP_PARAMETERS, FLAGS, m_flags)
+#endif //FEATURE_CRYPTO || FEATURE_LEGACYNETCFCRYPTO
+
+#if defined(FEATURE_X509) || defined(FEATURE_CRYPTO) || defined(FEATURE_LEGACYNETCFCRYPTO)
+DEFINE_CLASS(CRYPTO_EXCEPTION, Cryptography, CryptographicException)
+DEFINE_METHOD(CRYPTO_EXCEPTION, THROW, ThrowCryptographicException, SM_Int_RetVoid)
+#endif // FEATURE_X509 || FEATURE_CRYPTO
+
+#ifndef FEATURE_CORECLR
+DEFINE_CLASS_U(Globalization, AppDomainSortingSetupInfo, AppDomainSortingSetupInfoObject)
+DEFINE_FIELD_U(_pfnIsNLSDefinedString, AppDomainSortingSetupInfoObject, m_pfnIsNLSDefinedString)
+DEFINE_FIELD_U(_pfnCompareStringEx, AppDomainSortingSetupInfoObject, m_pfnCompareStringEx)
+DEFINE_FIELD_U(_pfnLCMapStringEx, AppDomainSortingSetupInfoObject, m_pfnLCMapStringEx)
+DEFINE_FIELD_U(_pfnFindNLSStringEx, AppDomainSortingSetupInfoObject, m_pfnFindNLSStringEx)
+DEFINE_FIELD_U(_pfnCompareStringOrdinal, AppDomainSortingSetupInfoObject, m_pfnCompareStringOrdinal)
+DEFINE_FIELD_U(_pfnGetNLSVersionEx, AppDomainSortingSetupInfoObject, m_pfnGetNLSVersionEx)
+DEFINE_FIELD_U(_pfnFindStringOrdinal, AppDomainSortingSetupInfoObject, m_pfnFindStringOrdinal)
+DEFINE_FIELD_U(_useV2LegacySorting, AppDomainSortingSetupInfoObject, m_useV2LegacySorting)
+DEFINE_FIELD_U(_useV4LegacySorting, AppDomainSortingSetupInfoObject, m_useV4LegacySorting)
+#endif // FEATURE_CORECLR
+
+#ifndef FEATURE_COREFX_GLOBALIZATION
+DEFINE_CLASS_U(Globalization, CultureData, CultureDataBaseObject)
+DEFINE_FIELD_U(sRealName, CultureDataBaseObject, sRealName)
+DEFINE_FIELD_U(sWindowsName, CultureDataBaseObject, sWindowsName)
+DEFINE_FIELD_U(sName, CultureDataBaseObject, sName)
+DEFINE_FIELD_U(sParent, CultureDataBaseObject, sParent)
+DEFINE_FIELD_U(sLocalizedDisplayName, CultureDataBaseObject, sLocalizedDisplayName)
+DEFINE_FIELD_U(sEnglishDisplayName, CultureDataBaseObject, sEnglishDisplayName)
+DEFINE_FIELD_U(sNativeDisplayName, CultureDataBaseObject, sNativeDisplayName)
+DEFINE_FIELD_U(sSpecificCulture, CultureDataBaseObject, sSpecificCulture)
+DEFINE_FIELD_U(sISO639Language, CultureDataBaseObject, sISO639Language)
+DEFINE_FIELD_U(sLocalizedLanguage, CultureDataBaseObject, sLocalizedLanguage)
+DEFINE_FIELD_U(sEnglishLanguage, CultureDataBaseObject, sEnglishLanguage)
+DEFINE_FIELD_U(sNativeLanguage, CultureDataBaseObject, sNativeLanguage)
+DEFINE_FIELD_U(sRegionName, CultureDataBaseObject, sRegionName)
+//DEFINE_FIELD_U(iCountry, CultureDataBaseObject, iCountry)
+DEFINE_FIELD_U(iGeoId, CultureDataBaseObject, iGeoId)
+DEFINE_FIELD_U(sLocalizedCountry, CultureDataBaseObject, sLocalizedCountry)
+DEFINE_FIELD_U(sEnglishCountry, CultureDataBaseObject, sEnglishCountry)
+DEFINE_FIELD_U(sNativeCountry, CultureDataBaseObject, sNativeCountry)
+DEFINE_FIELD_U(sISO3166CountryName, CultureDataBaseObject, sISO3166CountryName)
+DEFINE_FIELD_U(sPositiveSign, CultureDataBaseObject, sPositiveSign)
+DEFINE_FIELD_U(sNegativeSign, CultureDataBaseObject, sNegativeSign)
+DEFINE_FIELD_U(saNativeDigits, CultureDataBaseObject, saNativeDigits)
+DEFINE_FIELD_U(iDigitSubstitution, CultureDataBaseObject, iDigitSubstitution)
+DEFINE_FIELD_U(iLeadingZeros, CultureDataBaseObject, iLeadingZeros)
+DEFINE_FIELD_U(iDigits, CultureDataBaseObject, iDigits)
+DEFINE_FIELD_U(iNegativeNumber, CultureDataBaseObject, iNegativeNumber)
+DEFINE_FIELD_U(waGrouping, CultureDataBaseObject, waGrouping)
+DEFINE_FIELD_U(sDecimalSeparator, CultureDataBaseObject, sDecimalSeparator)
+DEFINE_FIELD_U(sThousandSeparator, CultureDataBaseObject, sThousandSeparator)
+DEFINE_FIELD_U(sNaN, CultureDataBaseObject, sNaN)
+DEFINE_FIELD_U(sPositiveInfinity, CultureDataBaseObject, sPositiveInfinity)
+DEFINE_FIELD_U(sNegativeInfinity, CultureDataBaseObject, sNegativeInfinity)
+DEFINE_FIELD_U(iNegativePercent, CultureDataBaseObject, iNegativePercent)
+DEFINE_FIELD_U(iPositivePercent, CultureDataBaseObject, iPositivePercent)
+DEFINE_FIELD_U(sPercent, CultureDataBaseObject, sPercent)
+DEFINE_FIELD_U(sPerMille, CultureDataBaseObject, sPerMille)
+DEFINE_FIELD_U(sCurrency, CultureDataBaseObject, sCurrency)
+DEFINE_FIELD_U(sIntlMonetarySymbol, CultureDataBaseObject, sIntlMonetarySymbol)
+DEFINE_FIELD_U(sEnglishCurrency, CultureDataBaseObject, sEnglishCurrency)
+DEFINE_FIELD_U(sNativeCurrency, CultureDataBaseObject, sNativeCurrency)
+DEFINE_FIELD_U(iCurrencyDigits, CultureDataBaseObject, iCurrencyDigits)
+DEFINE_FIELD_U(iCurrency, CultureDataBaseObject, iCurrency)
+DEFINE_FIELD_U(iNegativeCurrency, CultureDataBaseObject, iNegativeCurrency)
+DEFINE_FIELD_U(waMonetaryGrouping, CultureDataBaseObject, waMonetaryGrouping)
+DEFINE_FIELD_U(sMonetaryDecimal, CultureDataBaseObject, sMonetaryDecimal)
+DEFINE_FIELD_U(sMonetaryThousand, CultureDataBaseObject, sMonetaryThousand)
+DEFINE_FIELD_U(iMeasure, CultureDataBaseObject, iMeasure)
+DEFINE_FIELD_U(sListSeparator, CultureDataBaseObject, sListSeparator)
+//DEFINE_FIELD_U(iPaperSize, CultureDataBaseObject, iPaperSize)
+//DEFINE_FIELD_U(waFontSignature, CultureDataBaseObject, waFontSignature)
+DEFINE_FIELD_U(sAM1159, CultureDataBaseObject, sAM1159)
+DEFINE_FIELD_U(sPM2359, CultureDataBaseObject, sPM2359)
+DEFINE_FIELD_U(sTimeSeparator, CultureDataBaseObject, sTimeSeparator)
+DEFINE_FIELD_U(saLongTimes, CultureDataBaseObject, saLongTimes)
+DEFINE_FIELD_U(saShortTimes, CultureDataBaseObject, saShortTimes)
+DEFINE_FIELD_U(saDurationFormats, CultureDataBaseObject, saDurationFormats)
+DEFINE_FIELD_U(iFirstDayOfWeek, CultureDataBaseObject, iFirstDayOfWeek)
+DEFINE_FIELD_U(iFirstWeekOfYear, CultureDataBaseObject, iFirstWeekOfYear)
+DEFINE_FIELD_U(waCalendars, CultureDataBaseObject, waCalendars)
+DEFINE_FIELD_U(calendars, CultureDataBaseObject, calendars)
+#ifndef FEATURE_CORECLR
+DEFINE_FIELD_U(iReadingLayout, CultureDataBaseObject, iReadingLayout)
+#endif
+DEFINE_FIELD_U(sTextInfo, CultureDataBaseObject, sTextInfo)
+DEFINE_FIELD_U(sCompareInfo, CultureDataBaseObject, sCompareInfo)
+DEFINE_FIELD_U(sScripts, CultureDataBaseObject, sScripts)
+DEFINE_FIELD_U(bUseOverrides, CultureDataBaseObject, bUseOverrides)
+DEFINE_FIELD_U(bNeutral, CultureDataBaseObject, bNeutral)
+#ifndef FEATURE_CORECLR
+DEFINE_FIELD_U(bWin32Installed, CultureDataBaseObject, bWin32Installed)
+DEFINE_FIELD_U(bFramework, CultureDataBaseObject, bFramework)
+#endif
+#endif
+#ifndef FEATURE_COREFX_GLOBALIZATION
+DEFINE_CLASS_U(Globalization, CalendarData, CalendarDataBaseObject)
+DEFINE_FIELD_U(sNativeName, CalendarDataBaseObject, sNativeName)
+DEFINE_FIELD_U(saShortDates, CalendarDataBaseObject, saShortDates)
+DEFINE_FIELD_U(saYearMonths, CalendarDataBaseObject, saYearMonths)
+DEFINE_FIELD_U(saLongDates, CalendarDataBaseObject, saLongDates)
+DEFINE_FIELD_U(sMonthDay, CalendarDataBaseObject, sMonthDay)
+DEFINE_FIELD_U(saEraNames, CalendarDataBaseObject, saEraNames)
+DEFINE_FIELD_U(saAbbrevEraNames, CalendarDataBaseObject, saAbbrevEraNames)
+DEFINE_FIELD_U(saAbbrevEnglishEraNames,CalendarDataBaseObject, saAbbrevEnglishEraNames)
+DEFINE_FIELD_U(saDayNames, CalendarDataBaseObject, saDayNames)
+DEFINE_FIELD_U(saAbbrevDayNames, CalendarDataBaseObject, saAbbrevDayNames)
+DEFINE_FIELD_U(saSuperShortDayNames, CalendarDataBaseObject, saSuperShortDayNames)
+DEFINE_FIELD_U(saMonthNames, CalendarDataBaseObject, saMonthNames)
+DEFINE_FIELD_U(saAbbrevMonthNames, CalendarDataBaseObject, saAbbrevMonthNames)
+DEFINE_FIELD_U(saMonthGenitiveNames, CalendarDataBaseObject, saMonthGenitiveNames)
+DEFINE_FIELD_U(saAbbrevMonthGenitiveNames, CalendarDataBaseObject, saAbbrevMonthGenitiveNames)
+DEFINE_FIELD_U(saLeapYearMonthNames, CalendarDataBaseObject, saLeapYearMonthNames)
+DEFINE_FIELD_U(iTwoDigitYearMax, CalendarDataBaseObject, iTwoDigitYearMax)
+DEFINE_FIELD_U(iCurrentEra, CalendarDataBaseObject, iCurrentEra)
+DEFINE_FIELD_U(bUseUserOverrides, CalendarDataBaseObject, bUseUserOverrides)
+#endif
+
+DEFINE_CLASS_U(Globalization, CultureInfo, CultureInfoBaseObject)
+DEFINE_FIELD_U(compareInfo, CultureInfoBaseObject, compareInfo)
+DEFINE_FIELD_U(textInfo, CultureInfoBaseObject, textInfo)
+DEFINE_FIELD_U(numInfo, CultureInfoBaseObject, numInfo)
+DEFINE_FIELD_U(dateTimeInfo, CultureInfoBaseObject, dateTimeInfo)
+DEFINE_FIELD_U(calendar, CultureInfoBaseObject, calendar)
+#ifndef FEATURE_CORECLR
+DEFINE_FIELD_U(m_consoleFallbackCulture, CultureInfoBaseObject, m_consoleFallbackCulture)
+#endif // FEATURE_CORECLR
+DEFINE_FIELD_U(m_name, CultureInfoBaseObject, m_name)
+DEFINE_FIELD_U(m_nonSortName, CultureInfoBaseObject, m_nonSortName)
+DEFINE_FIELD_U(m_sortName, CultureInfoBaseObject, m_sortName)
+DEFINE_FIELD_U(m_parent, CultureInfoBaseObject, m_parent)
+#ifdef FEATURE_LEAK_CULTURE_INFO
+DEFINE_FIELD_U(m_createdDomainID, CultureInfoBaseObject, m_createdDomainID)
+#endif // FEATURE_LEAK_CULTURE_INFO
+DEFINE_FIELD_U(m_isReadOnly, CultureInfoBaseObject, m_isReadOnly)
+DEFINE_FIELD_U(m_isInherited, CultureInfoBaseObject, m_isInherited)
+#ifdef FEATURE_LEAK_CULTURE_INFO
+DEFINE_FIELD_U(m_isSafeCrossDomain, CultureInfoBaseObject, m_isSafeCrossDomain)
+#endif // FEATURE_LEAK_CULTURE_INFO
+#ifndef FEATURE_COREFX_GLOBALIZATION
+DEFINE_FIELD_U(m_useUserOverride, CultureInfoBaseObject, m_useUserOverride)
+#endif
+DEFINE_CLASS(CULTURE_INFO, Globalization, CultureInfo)
+DEFINE_METHOD(CULTURE_INFO, STR_CTOR, .ctor, IM_Str_RetVoid)
+DEFINE_FIELD(CULTURE_INFO, CURRENT_CULTURE, s_userDefaultCulture)
+DEFINE_PROPERTY(CULTURE_INFO, NAME, Name, Str)
+#ifdef FEATURE_USE_LCID
+DEFINE_METHOD(CULTURE_INFO, INT_CTOR, .ctor, IM_Int_RetVoid)
+DEFINE_PROPERTY(CULTURE_INFO, ID, LCID, Int)
+#endif
+DEFINE_PROPERTY(CULTURE_INFO, PARENT, Parent, CultureInfo)
+
+DEFINE_CLASS(CURRENCY, System, Currency)
+DEFINE_METHOD(CURRENCY, DECIMAL_CTOR, .ctor, IM_Dec_RetVoid)
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_CLASS(CURRENCY_WRAPPER, Interop, CurrencyWrapper)
+#endif
+
+DEFINE_CLASS(DATE_TIME, System, DateTime)
+DEFINE_METHOD(DATE_TIME, LONG_CTOR, .ctor, IM_Long_RetVoid)
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_CLASS(DATE_TIME_OFFSET, System, DateTimeOffset)
+#endif // FEATURE_COMINTEROP
+
+DEFINE_CLASS(DECIMAL, System, Decimal)
+DEFINE_METHOD(DECIMAL, CURRENCY_CTOR, .ctor, IM_Currency_RetVoid)
+
+DEFINE_CLASS_U(System, Delegate, NoClass)
+DEFINE_FIELD_U(_target, DelegateObject, _target)
+DEFINE_FIELD_U(_methodBase, DelegateObject, _methodBase)
+DEFINE_FIELD_U(_methodPtr, DelegateObject, _methodPtr)
+DEFINE_FIELD_U(_methodPtrAux, DelegateObject, _methodPtrAux)
+DEFINE_CLASS(DELEGATE, System, Delegate)
+DEFINE_FIELD(DELEGATE, TARGET, _target)
+DEFINE_FIELD(DELEGATE, METHOD_PTR, _methodPtr)
+DEFINE_FIELD(DELEGATE, METHOD_PTR_AUX, _methodPtrAux)
+DEFINE_METHOD(DELEGATE, CONSTRUCT_DELEGATE, DelegateConstruct, IM_Obj_IntPtr_RetVoid)
+DEFINE_METHOD(DELEGATE, GET_INVOKE_METHOD, GetInvokeMethod, IM_RetIntPtr)
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_CLASS(DISPATCH_WRAPPER, Interop, DispatchWrapper)
+#endif // FEATURE_COMINTEROP
+
+DEFINE_CLASS(DYNAMICMETHOD, ReflectionEmit, DynamicMethod)
+
+DEFINE_CLASS(DYNAMICRESOLVER, ReflectionEmit, DynamicResolver)
+DEFINE_FIELD(DYNAMICRESOLVER, DYNAMIC_METHOD, m_method)
+
+DEFINE_CLASS(EMPTY, System, Empty)
+
+DEFINE_CLASS(ENC_HELPER, Diagnostics, EditAndContinueHelper)
+DEFINE_FIELD(ENC_HELPER, OBJECT_REFERENCE, _objectReference)
+
+DEFINE_CLASS(ENCODING, Text, Encoding)
+
+DEFINE_CLASS(ENUM, System, Enum)
+
+DEFINE_CLASS(ENVIRONMENT, System, Environment)
+DEFINE_METHOD(ENVIRONMENT, GET_RESOURCE_STRING_LOCAL, GetResourceStringLocal, SM_Str_RetStr)
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_CLASS(ERROR_WRAPPER, Interop, ErrorWrapper)
+#endif
+
+DEFINE_CLASS(EVENT, Reflection, RuntimeEventInfo)
+
+DEFINE_CLASS(EVENT_ARGS, System, EventArgs)
+
+DEFINE_CLASS(EVENT_HANDLERGENERIC, System, EventHandler`1)
+
+DEFINE_CLASS(EVENT_INFO, Reflection, EventInfo)
+
+DEFINE_CLASS(EVIDENCE, Policy, Evidence)
+#ifdef FEATURE_CAS_POLICY
+// .ctor support for ICorRuntimeHost::CreateEvidence
+DEFINE_METHOD(EVIDENCE, CTOR, .ctor, IM_RetVoid)
+DEFINE_METHOD(EVIDENCE, WAS_STRONGNAME_EVIDENCE_USED, WasStrongNameEvidenceUsed, IM_RetBool)
+#endif // FEATURE_CAS_POLICY
+
+DEFINE_CLASS_U(System, Exception, ExceptionObject)
+DEFINE_FIELD_U(_className, ExceptionObject, _className)
+DEFINE_FIELD_U(_exceptionMethod, ExceptionObject, _exceptionMethod)
+DEFINE_FIELD_U(_exceptionMethodString,ExceptionObject, _exceptionMethodString)
+DEFINE_FIELD_U(_message, ExceptionObject, _message)
+DEFINE_FIELD_U(_data, ExceptionObject, _data)
+DEFINE_FIELD_U(_innerException, ExceptionObject, _innerException)
+DEFINE_FIELD_U(_helpURL, ExceptionObject, _helpURL)
+DEFINE_FIELD_U(_source, ExceptionObject, _source)
+DEFINE_FIELD_U(_stackTrace, ExceptionObject, _stackTrace)
+DEFINE_FIELD_U(_watsonBuckets, ExceptionObject, _watsonBuckets)
+DEFINE_FIELD_U(_stackTraceString, ExceptionObject, _stackTraceString)
+DEFINE_FIELD_U(_remoteStackTraceString, ExceptionObject, _remoteStackTraceString)
+DEFINE_FIELD_U(_dynamicMethods, ExceptionObject, _dynamicMethods)
+DEFINE_FIELD_U(_xptrs, ExceptionObject, _xptrs)
+#ifdef FEATURE_SERIALIZATION
+DEFINE_FIELD_U(_safeSerializationManager, ExceptionObject, _safeSerializationManager)
+#endif // FEATURE_SERIALIZATION
+DEFINE_FIELD_U(_HResult, ExceptionObject, _HResult)
+DEFINE_FIELD_U(_xcode, ExceptionObject, _xcode)
+DEFINE_FIELD_U(_remoteStackIndex, ExceptionObject, _remoteStackIndex)
+DEFINE_FIELD_U(_ipForWatsonBuckets,ExceptionObject, _ipForWatsonBuckets)
+DEFINE_CLASS(EXCEPTION, System, Exception)
+DEFINE_METHOD(EXCEPTION, GET_CLASS_NAME, GetClassName, IM_RetStr)
+DEFINE_PROPERTY(EXCEPTION, MESSAGE, Message, Str)
+DEFINE_PROPERTY(EXCEPTION, SOURCE, Source, Str)
+DEFINE_PROPERTY(EXCEPTION, HELP_LINK, HelpLink, Str)
+DEFINE_METHOD(EXCEPTION, INTERNAL_TO_STRING, InternalToString, IM_RetStr)
+DEFINE_METHOD(EXCEPTION, TO_STRING, ToString, IM_Bool_Bool_RetStr)
+DEFINE_METHOD(EXCEPTION, INTERNAL_PRESERVE_STACK_TRACE, InternalPreserveStackTrace, IM_RetVoid)
+#ifdef FEATURE_COMINTEROP
+DEFINE_METHOD(EXCEPTION, ADD_EXCEPTION_DATA_FOR_RESTRICTED_ERROR_INFO, AddExceptionDataForRestrictedErrorInfo, IM_Str_Str_Str_Obj_Bool_RetVoid)
+DEFINE_METHOD(EXCEPTION, TRY_GET_RESTRICTED_LANGUAGE_ERROR_OBJECT, TryGetRestrictedLanguageErrorObject, IM_RefObject_RetBool)
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_CORECLR
+
+DEFINE_CLASS(CROSSAPPDOMAINMARSHALEDEXCEPTION, System, CrossAppDomainMarshaledException)
+DEFINE_METHOD(CROSSAPPDOMAINMARSHALEDEXCEPTION, STR_INT_CTOR, .ctor, IM_Str_Int_RetVoid)
+
+#endif //FEATURE_CORECLR
+
+
+DEFINE_CLASS(SYSTEM_EXCEPTION, System, SystemException)
+DEFINE_METHOD(SYSTEM_EXCEPTION, STR_EX_CTOR, .ctor, IM_Str_Exception_RetVoid)
+
+
+DEFINE_CLASS(TYPE_INIT_EXCEPTION, System, TypeInitializationException)
+DEFINE_METHOD(TYPE_INIT_EXCEPTION, STR_EX_CTOR, .ctor, IM_Str_Exception_RetVoid)
+
+DEFINE_CLASS(THREAD_START_EXCEPTION,Threading, ThreadStartException)
+DEFINE_METHOD(THREAD_START_EXCEPTION,EX_CTOR, .ctor, IM_Exception_RetVoid)
+
+DEFINE_CLASS(TYPE_HANDLE, System, RuntimeTypeHandle)
+DEFINE_CLASS(RT_TYPE_HANDLE, System, RuntimeTypeHandle)
+DEFINE_METHOD(RT_TYPE_HANDLE, GET_TYPE_HELPER, GetTypeHelper, SM_Type_ArrType_IntPtr_int_RetType)
+DEFINE_METHOD(RT_TYPE_HANDLE, PVOID_CTOR, .ctor, IM_RuntimeType_RetVoid)
+DEFINE_METHOD(RT_TYPE_HANDLE, GETVALUEINTERNAL, GetValueInternal, SM_RuntimeTypeHandle_RetIntPtr)
+DEFINE_FIELD(RT_TYPE_HANDLE, M_TYPE, m_type)
+
+DEFINE_CLASS_U(Reflection, RtFieldInfo, NoClass)
+DEFINE_FIELD_U(m_fieldHandle, ReflectFieldObject, m_pFD)
+DEFINE_CLASS(RT_FIELD_INFO, Reflection, RtFieldInfo)
+DEFINE_FIELD(RT_FIELD_INFO, HANDLE, m_fieldHandle)
+
+DEFINE_CLASS_U(System, RuntimeFieldInfoStub, ReflectFieldObject)
+DEFINE_FIELD_U(m_fieldHandle, ReflectFieldObject, m_pFD)
+DEFINE_CLASS(STUBFIELDINFO, System, RuntimeFieldInfoStub)
+
+DEFINE_CLASS(FIELD, Reflection, RuntimeFieldInfo)
+DEFINE_METHOD(FIELD, SET_VALUE, SetValue, IM_Obj_Obj_BindingFlags_Binder_CultureInfo_RetVoid)
+DEFINE_METHOD(FIELD, GET_VALUE, GetValue, IM_Obj_RetObj)
+
+DEFINE_CLASS(FIELD_HANDLE, System, RuntimeFieldHandle)
+DEFINE_FIELD(FIELD_HANDLE, M_FIELD, m_ptr)
+
+DEFINE_CLASS(I_RT_FIELD_INFO, System, IRuntimeFieldInfo)
+
+DEFINE_CLASS(FIELD_INFO, Reflection, FieldInfo)
+
+DEFINE_CLASS_U(IO, FileStreamAsyncResult, AsyncResultBase)
+DEFINE_FIELD_U(_userCallback, AsyncResultBase, _userCallback)
+DEFINE_FIELD_U(_userStateObject, AsyncResultBase, _userStateObject)
+DEFINE_FIELD_U(_waitHandle, AsyncResultBase, _waitHandle)
+DEFINE_FIELD_U(_handle, AsyncResultBase, _fileHandle)
+DEFINE_FIELD_U(_overlapped, AsyncResultBase, _overlapped)
+DEFINE_FIELD_U(_EndXxxCalled, AsyncResultBase, _EndXxxCalled)
+DEFINE_FIELD_U(_numBytes, AsyncResultBase, _numBytes)
+DEFINE_FIELD_U(_errorCode, AsyncResultBase, _errorCode)
+DEFINE_FIELD_U(_numBufferedBytes, AsyncResultBase, _numBufferedBytes)
+DEFINE_FIELD_U(_isWrite, AsyncResultBase, _isWrite)
+DEFINE_FIELD_U(_isComplete, AsyncResultBase, _isComplete)
+DEFINE_FIELD_U(_completedSynchronously, AsyncResultBase, _completedSynchronously)
+DEFINE_CLASS(FILESTREAM_ASYNCRESULT, IO, FileStreamAsyncResult)
+
+DEFINE_CLASS_U(Security, FrameSecurityDescriptor, FrameSecurityDescriptorBaseObject)
+DEFINE_FIELD_U(m_assertions, FrameSecurityDescriptorBaseObject, m_assertions)
+DEFINE_FIELD_U(m_denials, FrameSecurityDescriptorBaseObject, m_denials)
+DEFINE_FIELD_U(m_restriction, FrameSecurityDescriptorBaseObject, m_restriction)
+DEFINE_FIELD_U(m_AssertFT, FrameSecurityDescriptorBaseObject, m_assertFT)
+DEFINE_FIELD_U(m_assertAllPossible,FrameSecurityDescriptorBaseObject, m_assertAllPossible)
+DEFINE_FIELD_U(m_DeclarativeAssertions, FrameSecurityDescriptorBaseObject, m_DeclarativeAssertions)
+DEFINE_FIELD_U(m_DeclarativeDenials, FrameSecurityDescriptorBaseObject, m_DeclarativeDenials)
+DEFINE_FIELD_U(m_DeclarativeRestrictions, FrameSecurityDescriptorBaseObject, m_DeclarativeRestrictions)
+#ifndef FEATURE_PAL
+DEFINE_FIELD_U(m_callerToken, FrameSecurityDescriptorBaseObject, m_callerToken)
+DEFINE_FIELD_U(m_impToken, FrameSecurityDescriptorBaseObject, m_impToken)
+#endif
+DEFINE_CLASS(FRAME_SECURITY_DESCRIPTOR, Security, FrameSecurityDescriptor)
+
+DEFINE_CLASS(GUID, System, Guid)
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_CLASS(HSTRING_HEADER_MANAGED, WinRT, HSTRING_HEADER)
+
+DEFINE_CLASS(ICUSTOMPROPERTY, WinRT, ICustomProperty)
+DEFINE_CLASS(ICUSTOMPROPERTYPROVIDERIMPL, WinRT, ICustomPropertyProviderImpl)
+DEFINE_METHOD(ICUSTOMPROPERTYPROVIDERIMPL, CREATE_PROPERTY, CreateProperty, SM_Obj_Str_RetICustomProperty)
+DEFINE_METHOD(ICUSTOMPROPERTYPROVIDERIMPL, CREATE_INDEXED_PROPERTY, CreateIndexedProperty, SM_Obj_Str_PtrTypeName_RetICustomProperty)
+DEFINE_METHOD(ICUSTOMPROPERTYPROVIDERIMPL, GET_TYPE, GetType, SM_Obj_PtrTypeName_RetVoid)
+DEFINE_CLASS(ICUSTOMPROPERTYPROVIDERPROXY, WinRT, ICustomPropertyProviderProxy`2)
+DEFINE_METHOD(ICUSTOMPROPERTYPROVIDERPROXY, CREATE_INSTANCE, CreateInstance, SM_Obj_RetObj)
+
+DEFINE_CLASS(FACTORYFORIREFERENCE, WinRT, IReferenceFactory)
+DEFINE_METHOD(FACTORYFORIREFERENCE, CREATE_IREFERENCE, CreateIReference, SM_Obj_RetObj)
+DEFINE_CLASS(CLRIREFERENCEIMPL, WinRT, CLRIReferenceImpl`1)
+DEFINE_METHOD(CLRIREFERENCEIMPL, UNBOXHELPER, UnboxHelper, SM_Obj_RetObj)
+DEFINE_CLASS(CLRIREFERENCEARRAYIMPL, WinRT, CLRIReferenceArrayImpl`1)
+DEFINE_METHOD(CLRIREFERENCEARRAYIMPL,UNBOXHELPER, UnboxHelper, SM_Obj_RetObj)
+DEFINE_CLASS(IREFERENCE, WinRT, IReference`1)
+DEFINE_CLASS(CLRIKEYVALUEPAIRIMPL, WinRT, CLRIKeyValuePairImpl`2)
+DEFINE_METHOD(CLRIKEYVALUEPAIRIMPL, BOXHELPER, BoxHelper, SM_Obj_RetObj)
+DEFINE_METHOD(CLRIKEYVALUEPAIRIMPL, UNBOXHELPER, UnboxHelper, SM_Obj_RetObj)
+
+DEFINE_CLASS(WINDOWS_FOUNDATION_EVENTHANDLER, WinRT, WindowsFoundationEventHandler`1)
+
+DEFINE_CLASS(VARIANT, System, Variant)
+DEFINE_METHOD(VARIANT, CONVERT_OBJECT_TO_VARIANT,MarshalHelperConvertObjectToVariant,SM_Obj_RefVariant_RetVoid)
+DEFINE_METHOD(VARIANT, CAST_VARIANT, MarshalHelperCastVariant, SM_Obj_Int_RefVariant_RetVoid)
+DEFINE_METHOD(VARIANT, CONVERT_VARIANT_TO_OBJECT,MarshalHelperConvertVariantToObject,SM_RefVariant_RetObject)
+#endif // FEATURE_COMINTEROP
+
+DEFINE_CLASS(IASYNCRESULT, System, IAsyncResult)
+
+DEFINE_CLASS(ICUSTOM_ATTR_PROVIDER, Reflection, ICustomAttributeProvider)
+DEFINE_METHOD(ICUSTOM_ATTR_PROVIDER,GET_CUSTOM_ATTRIBUTES, GetCustomAttributes, IM_Type_RetArrObj)
+
+DEFINE_CLASS(ICUSTOM_MARSHALER, Interop, ICustomMarshaler)
+DEFINE_METHOD(ICUSTOM_MARSHALER, MARSHAL_NATIVE_TO_MANAGED,MarshalNativeToManaged, IM_IntPtr_RetObj)
+DEFINE_METHOD(ICUSTOM_MARSHALER, MARSHAL_MANAGED_TO_NATIVE,MarshalManagedToNative, IM_Obj_RetIntPtr)
+DEFINE_METHOD(ICUSTOM_MARSHALER, CLEANUP_NATIVE_DATA, CleanUpNativeData, IM_IntPtr_RetVoid)
+DEFINE_METHOD(ICUSTOM_MARSHALER, CLEANUP_MANAGED_DATA, CleanUpManagedData, IM_Obj_RetVoid)
+DEFINE_METHOD(ICUSTOM_MARSHALER, GET_NATIVE_DATA_SIZE, GetNativeDataSize, IM_RetInt)
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_CLASS(ICUSTOM_QUERYINTERFACE, Interop, ICustomQueryInterface)
+DEFINE_METHOD(ICUSTOM_QUERYINTERFACE, GET_INTERFACE, GetInterface, IM_RefGuid_OutIntPtr_RetCustomQueryInterfaceResult)
+DEFINE_CLASS(CUSTOMQUERYINTERFACERESULT, Interop, CustomQueryInterfaceResult)
+#endif //FEATURE_COMINTEROP
+
+#ifdef FEATURE_REMOTING
+DEFINE_CLASS(IDENTITY, Remoting, Identity)
+DEFINE_FIELD(IDENTITY, TP_OR_OBJECT, _tpOrObject)
+DEFINE_FIELD(IDENTITY, LEASE, _lease)
+DEFINE_FIELD(IDENTITY, OBJURI, _ObjURI)
+#endif
+
+#ifdef FEATURE_SERIALIZATION
+DEFINE_CLASS(ISERIALIZABLE, Serialization, ISerializable)
+DEFINE_CLASS(IOBJECTREFERENCE, Serialization, IObjectReference)
+DEFINE_CLASS(IDESERIALIZATIONCB, Serialization, IDeserializationCallback)
+DEFINE_CLASS(STREAMING_CONTEXT, Serialization, StreamingContext)
+DEFINE_CLASS(SERIALIZATION_INFO, Serialization, SerializationInfo)
+#endif
+
+#ifdef FEATURE_REMOTING
+DEFINE_CLASS(OBJECTCLONEHELPER, Serialization, ObjectCloneHelper)
+DEFINE_METHOD(OBJECTCLONEHELPER, GET_OBJECT_DATA, GetObjectData, SM_Obj_OutStr_OutStr_OutArrStr_OutArrObj_RetObj)
+DEFINE_METHOD(OBJECTCLONEHELPER, PREPARE_DATA, PrepareConstructorArgs, SM_Obj_ArrStr_ArrObj_OutStreamingContext_RetSerializationInfo)
+#endif
+
+
+DEFINE_CLASS(IENUMERATOR, Collections, IEnumerator)
+
+DEFINE_CLASS(IENUMERABLE, Collections, IEnumerable)
+DEFINE_CLASS(ICOLLECTION, Collections, ICollection)
+DEFINE_CLASS(ILIST, Collections, IList)
+DEFINE_CLASS(IDISPOSABLE, System, IDisposable)
+
+DEFINE_CLASS(IEXPANDO, Expando, IExpando)
+DEFINE_METHOD(IEXPANDO, ADD_FIELD, AddField, IM_Str_RetFieldInfo)
+DEFINE_METHOD(IEXPANDO, REMOVE_MEMBER, RemoveMember, IM_MemberInfo_RetVoid)
+
+DEFINE_CLASS(IPERMISSION, Security, IPermission)
+
+DEFINE_CLASS(IPRINCIPAL, Principal, IPrincipal)
+
+DEFINE_CLASS(IREFLECT, Reflection, IReflect)
+DEFINE_METHOD(IREFLECT, GET_PROPERTIES, GetProperties, IM_BindingFlags_RetArrPropertyInfo)
+DEFINE_METHOD(IREFLECT, GET_FIELDS, GetFields, IM_BindingFlags_RetArrFieldInfo)
+DEFINE_METHOD(IREFLECT, GET_METHODS, GetMethods, IM_BindingFlags_RetArrMethodInfo)
+DEFINE_METHOD(IREFLECT, INVOKE_MEMBER, InvokeMember, IM_Str_BindingFlags_Binder_Obj_ArrObj_ArrParameterModifier_CultureInfo_ArrStr_RetObj)
+
+#ifdef FEATURE_ISOSTORE
+#ifndef FEATURE_ISOSTORE_LIGHT
+DEFINE_CLASS(ISS_STORE, IsolatedStorage, IsolatedStorage)
+#endif // !FEATURE_ISOSTORE_LIGHT
+DEFINE_CLASS(ISS_STORE_FILE, IsolatedStorage, IsolatedStorageFile)
+DEFINE_CLASS(ISS_STORE_FILE_STREAM, IsolatedStorage, IsolatedStorageFileStream)
+#endif
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_CLASS(LCID_CONVERSION_TYPE, Interop, LCIDConversionAttribute)
+#endif // FEATURE_COMINTEROP
+
+DEFINE_CLASS(LOADER_OPTIMIZATION, System, LoaderOptimization)
+
+#ifdef FEATURE_REMOTING
+DEFINE_CLASS_U(Messaging, LogicalCallContext, LogicalCallContextObject)
+DEFINE_FIELD_U(m_Datastore, LogicalCallContextObject, m_Datastore)
+DEFINE_FIELD_U(m_RemotingData, LogicalCallContextObject, m_RemotingData)
+DEFINE_FIELD_U(m_SecurityData, LogicalCallContextObject, m_SecurityData)
+DEFINE_FIELD_U(m_HostContext, LogicalCallContextObject, m_HostContext)
+DEFINE_FIELD_U(m_IsCorrelationMgr, LogicalCallContextObject, m_IsCorrelationMgr)
+DEFINE_FIELD_U(_sendHeaders, LogicalCallContextObject, _sendHeaders)
+DEFINE_FIELD_U(_recvHeaders, LogicalCallContextObject, _recvHeaders)
+#endif
+
+DEFINE_CLASS(MARSHAL, Interop, Marshal)
+#ifdef FEATURE_COMINTEROP
+DEFINE_METHOD(MARSHAL, LOAD_LICENSE_MANAGER, LoadLicenseManager, SM_Void_RetIntPtr)
+DEFINE_METHOD(MARSHAL, INITIALIZE_WRAPPER_FOR_WINRT, InitializeWrapperForWinRT, SM_Obj_RefIntPtr_RetVoid)
+DEFINE_METHOD(MARSHAL, GET_HR_FOR_EXCEPTION, GetHRForException, SM_Exception_RetInt)
+DEFINE_METHOD(MARSHAL, GET_HR_FOR_EXCEPTION_WINRT, GetHRForException_WinRT, SM_Exception_RetInt)
+#endif // FEATURE_COMINTEROP
+DEFINE_METHOD(MARSHAL, GET_FUNCTION_POINTER_FOR_DELEGATE, GetFunctionPointerForDelegate, SM_Delegate_RetIntPtr)
+DEFINE_METHOD(MARSHAL, GET_DELEGATE_FOR_FUNCTION_POINTER, GetDelegateForFunctionPointer, SM_IntPtr_Type_RetDelegate)
+DEFINE_METHOD(MARSHAL, ALLOC_CO_TASK_MEM, AllocCoTaskMem, SM_Int_RetIntPtr)
+DEFINE_FIELD(MARSHAL, SYSTEM_MAX_DBCS_CHAR_SIZE, SystemMaxDBCSCharSize)
+
+#ifdef FEATURE_REMOTING
+DEFINE_CLASS_U(System, MarshalByRefObject, MarshalByRefObjectBaseObject)
+DEFINE_FIELD_U(__identity, MarshalByRefObjectBaseObject, m_ServerIdentity)
+DEFINE_CLASS(MARSHAL_BY_REF_OBJECT, System, MarshalByRefObject)
+#endif
+
+DEFINE_CLASS(MEMBER, Reflection, MemberInfo)
+
+#ifdef FEATURE_REMOTING
+DEFINE_CLASS_U(Messaging, Message, MessageObject)
+DEFINE_FIELD_U(_MethodName, MessageObject, pMethodName)
+DEFINE_FIELD_U(_MethodSignature, MessageObject, pMethodSig)
+DEFINE_FIELD_U(_MethodBase, MessageObject, pMethodBase)
+DEFINE_FIELD_U(_properties, MessageObject, pHashTable)
+DEFINE_FIELD_U(_URI, MessageObject, pURI)
+DEFINE_FIELD_U(_typeName, MessageObject, pTypeName)
+DEFINE_FIELD_U(_Fault, MessageObject, pFault)
+DEFINE_FIELD_U(_ID, MessageObject, pID)
+DEFINE_FIELD_U(_srvID, MessageObject, pSrvID)
+DEFINE_FIELD_U(_argMapper, MessageObject, pArgMapper)
+DEFINE_FIELD_U(_callContext, MessageObject, pCallCtx)
+DEFINE_FIELD_U(_frame, MessageObject, pFrame)
+DEFINE_FIELD_U(_methodDesc, MessageObject, pMethodDesc)
+DEFINE_FIELD_U(_metaSigHolder, MessageObject, pMetaSigHolder)
+DEFINE_FIELD_U(_delegateMD, MessageObject, pDelegateMD)
+DEFINE_FIELD_U(_governingType, MessageObject, thGoverningType)
+DEFINE_FIELD_U(_flags, MessageObject, iFlags)
+DEFINE_FIELD_U(_initDone, MessageObject, initDone)
+
+DEFINE_CLASS(MESSAGE_DATA, Proxies, MessageData)
+#endif // FEATURE_REMOTING
+
+DEFINE_CLASS_U(Reflection, RuntimeMethodInfo, NoClass)
+DEFINE_FIELD_U(m_handle, ReflectMethodObject, m_pMD)
+DEFINE_CLASS(METHOD, Reflection, RuntimeMethodInfo)
+DEFINE_METHOD(METHOD, INVOKE, Invoke, IM_Obj_BindingFlags_Binder_ArrObj_CultureInfo_RetObj)
+DEFINE_METHOD(METHOD, GET_PARAMETERS, GetParameters, IM_RetArrParameterInfo)
+
+DEFINE_CLASS(METHOD_BASE, Reflection, MethodBase)
+DEFINE_METHOD(METHOD_BASE, GET_METHODDESC, GetMethodDesc, IM_RetIntPtr)
+
+DEFINE_CLASS_U(Reflection, ExceptionHandlingClause, ExceptionHandlingClause)
+DEFINE_FIELD_U(m_methodBody, ExceptionHandlingClause, m_methodBody)
+DEFINE_FIELD_U(m_flags, ExceptionHandlingClause, m_flags)
+DEFINE_FIELD_U(m_tryOffset, ExceptionHandlingClause, m_tryOffset)
+DEFINE_FIELD_U(m_tryLength, ExceptionHandlingClause, m_tryLength)
+DEFINE_FIELD_U(m_handlerOffset, ExceptionHandlingClause, m_handlerOffset)
+DEFINE_FIELD_U(m_handlerLength, ExceptionHandlingClause, m_handlerLength)
+DEFINE_FIELD_U(m_catchMetadataToken, ExceptionHandlingClause, m_catchToken)
+DEFINE_FIELD_U(m_filterOffset, ExceptionHandlingClause, m_filterOffset)
+DEFINE_CLASS(EH_CLAUSE, Reflection, ExceptionHandlingClause)
+
+DEFINE_CLASS_U(Reflection, LocalVariableInfo, LocalVariableInfo)
+DEFINE_FIELD_U(m_type, LocalVariableInfo, m_type)
+DEFINE_FIELD_U(m_isPinned, LocalVariableInfo, m_bIsPinned)
+DEFINE_FIELD_U(m_localIndex, LocalVariableInfo, m_localIndex)
+DEFINE_CLASS(LOCAL_VARIABLE_INFO, Reflection, LocalVariableInfo)
+
+DEFINE_CLASS_U(Reflection, MethodBody, MethodBody)
+DEFINE_FIELD_U(m_IL, MethodBody, m_IL)
+DEFINE_FIELD_U(m_exceptionHandlingClauses, MethodBody, m_exceptionClauses)
+DEFINE_FIELD_U(m_localVariables, MethodBody, m_localVariables)
+DEFINE_FIELD_U(m_methodBase, MethodBody, m_methodBase)
+DEFINE_FIELD_U(m_localSignatureMetadataToken, MethodBody, m_localVarSigToken)
+DEFINE_FIELD_U(m_maxStackSize, MethodBody, m_maxStackSize)
+DEFINE_FIELD_U(m_initLocals, MethodBody, m_initLocals)
+DEFINE_CLASS(METHOD_BODY, Reflection, MethodBody)
+
+DEFINE_CLASS(METHOD_INFO, Reflection, MethodInfo)
+
+DEFINE_CLASS(METHOD_HANDLE_INTERNAL,System, RuntimeMethodHandleInternal)
+
+DEFINE_CLASS(METHOD_HANDLE, System, RuntimeMethodHandle)
+DEFINE_FIELD(METHOD_HANDLE, METHOD, m_value)
+DEFINE_METHOD(METHOD_HANDLE, GETVALUEINTERNAL, GetValueInternal, SM_RuntimeMethodHandle_RetIntPtr)
+
+#ifdef FEATURE_METHOD_RENTAL
+DEFINE_CLASS(METHOD_RENTAL, ReflectionEmit, MethodRental)
+#endif // FEATURE_METHOD_RENTAL
+
+DEFINE_CLASS(MISSING, Reflection, Missing)
+DEFINE_FIELD(MISSING, VALUE, Value)
+
+DEFINE_CLASS_U(Reflection, RuntimeModule, ReflectModuleBaseObject)
+DEFINE_FIELD_U(m_runtimeType, ReflectModuleBaseObject, m_runtimeType)
+DEFINE_FIELD_U(m_pRefClass, ReflectModuleBaseObject, m_ReflectClass)
+DEFINE_FIELD_U(m_pData, ReflectModuleBaseObject, m_pData)
+DEFINE_FIELD_U(m_pGlobals, ReflectModuleBaseObject, m_pGlobals)
+DEFINE_FIELD_U(m_pFields, ReflectModuleBaseObject, m_pGlobalsFlds)
+DEFINE_CLASS(MODULE, Reflection, RuntimeModule)
+DEFINE_FIELD(MODULE, DATA, m_pData)
+
+DEFINE_CLASS(MODULE_BUILDER, ReflectionEmit, InternalModuleBuilder)
+DEFINE_CLASS(TYPE_BUILDER, ReflectionEmit, TypeBuilder)
+DEFINE_CLASS(ENUM_BUILDER, ReflectionEmit, EnumBuilder)
+
+DEFINE_CLASS_U(System, MulticastDelegate, DelegateObject)
+DEFINE_FIELD_U(_invocationList, DelegateObject, _invocationList)
+DEFINE_FIELD_U(_invocationCount, DelegateObject, _invocationCount)
+DEFINE_CLASS(MULTICAST_DELEGATE, System, MulticastDelegate)
+DEFINE_FIELD(MULTICAST_DELEGATE, INVOCATION_LIST, _invocationList)
+DEFINE_FIELD(MULTICAST_DELEGATE, INVOCATION_COUNT, _invocationCount)
+DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_CLOSED, CtorClosed, IM_Obj_IntPtr_RetVoid)
+DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_CLOSED_STATIC, CtorClosedStatic, IM_Obj_IntPtr_RetVoid)
+DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_RT_CLOSED, CtorRTClosed, IM_Obj_IntPtr_RetVoid)
+DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_OPENED, CtorOpened, IM_Obj_IntPtr_IntPtr_RetVoid)
+DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_SECURE_CLOSED, CtorSecureClosed, IM_Obj_IntPtr_IntPtr_IntPtr_RetVoid)
+DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_SECURE_CLOSED_STATIC,CtorSecureClosedStatic, IM_Obj_IntPtr_IntPtr_IntPtr_RetVoid)
+DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_SECURE_RT_CLOSED, CtorSecureRTClosed, IM_Obj_IntPtr_IntPtr_IntPtr_RetVoid)
+DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_SECURE_OPENED, CtorSecureOpened, IM_Obj_IntPtr_IntPtr_IntPtr_IntPtr_RetVoid)
+DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_VIRTUAL_DISPATCH, CtorVirtualDispatch, IM_Obj_IntPtr_IntPtr_RetVoid)
+DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_SECURE_VIRTUAL_DISPATCH, CtorSecureVirtualDispatch, IM_Obj_IntPtr_IntPtr_IntPtr_IntPtr_RetVoid)
+DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_COLLECTIBLE_CLOSED_STATIC, CtorCollectibleClosedStatic, IM_Obj_IntPtr_IntPtr_RetVoid)
+DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_COLLECTIBLE_OPENED, CtorCollectibleOpened, IM_Obj_IntPtr_IntPtr_IntPtr_RetVoid)
+DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_COLLECTIBLE_VIRTUAL_DISPATCH, CtorCollectibleVirtualDispatch, IM_Obj_IntPtr_IntPtr_IntPtr_RetVoid)
+
+DEFINE_CLASS(NULL, System, DBNull)
+DEFINE_FIELD(NULL, VALUE, Value)
+
+DEFINE_CLASS(NULLABLE, System, Nullable`1)
+
+// Keep this in sync with System.Globalization.NumberFormatInfo
+DEFINE_CLASS_U(Globalization, NumberFormatInfo, NumberFormatInfo)
+DEFINE_FIELD_U(numberGroupSizes, NumberFormatInfo, cNumberGroup)
+DEFINE_FIELD_U(currencyGroupSizes, NumberFormatInfo, cCurrencyGroup)
+DEFINE_FIELD_U(percentGroupSizes, NumberFormatInfo, cPercentGroup)
+DEFINE_FIELD_U(positiveSign, NumberFormatInfo, sPositive)
+DEFINE_FIELD_U(negativeSign, NumberFormatInfo, sNegative)
+DEFINE_FIELD_U(numberDecimalSeparator, NumberFormatInfo, sNumberDecimal)
+DEFINE_FIELD_U(numberGroupSeparator, NumberFormatInfo, sNumberGroup)
+DEFINE_FIELD_U(currencyGroupSeparator, NumberFormatInfo, sCurrencyGroup)
+DEFINE_FIELD_U(currencyDecimalSeparator,NumberFormatInfo, sCurrencyDecimal)
+DEFINE_FIELD_U(currencySymbol, NumberFormatInfo, sCurrency)
+#ifndef FEATURE_COREFX_GLOBALIZATION
+DEFINE_FIELD_U(ansiCurrencySymbol, NumberFormatInfo, sAnsiCurrency)
+#endif
+DEFINE_FIELD_U(nanSymbol, NumberFormatInfo, sNaN)
+DEFINE_FIELD_U(positiveInfinitySymbol, NumberFormatInfo, sPositiveInfinity)
+DEFINE_FIELD_U(negativeInfinitySymbol, NumberFormatInfo, sNegativeInfinity)
+DEFINE_FIELD_U(percentDecimalSeparator,NumberFormatInfo, sPercentDecimal)
+DEFINE_FIELD_U(percentGroupSeparator, NumberFormatInfo, sPercentGroup)
+DEFINE_FIELD_U(percentSymbol, NumberFormatInfo, sPercent)
+DEFINE_FIELD_U(perMilleSymbol, NumberFormatInfo, sPerMille)
+DEFINE_FIELD_U(nativeDigits, NumberFormatInfo, sNativeDigits)
+#ifndef FEATURE_COREFX_GLOBALIZATION
+DEFINE_FIELD_U(m_dataItem, NumberFormatInfo, iDataItem)
+#endif
+DEFINE_FIELD_U(numberDecimalDigits, NumberFormatInfo, cNumberDecimals)
+DEFINE_FIELD_U(currencyDecimalDigits, NumberFormatInfo, cCurrencyDecimals)
+DEFINE_FIELD_U(currencyPositivePattern,NumberFormatInfo, cPosCurrencyFormat)
+DEFINE_FIELD_U(currencyNegativePattern,NumberFormatInfo, cNegCurrencyFormat)
+DEFINE_FIELD_U(numberNegativePattern, NumberFormatInfo, cNegativeNumberFormat)
+DEFINE_FIELD_U(percentPositivePattern, NumberFormatInfo, cPositivePercentFormat)
+DEFINE_FIELD_U(percentNegativePattern, NumberFormatInfo, cNegativePercentFormat)
+DEFINE_FIELD_U(percentDecimalDigits, NumberFormatInfo, cPercentDecimals)
+#ifndef FEATURE_CORECLR
+DEFINE_FIELD_U(digitSubstitution, NumberFormatInfo, iDigitSubstitution)
+#endif // !FEATURE_CORECLR
+DEFINE_FIELD_U(isReadOnly, NumberFormatInfo, bIsReadOnly)
+#ifndef FEATURE_COREFX_GLOBALIZATION
+DEFINE_FIELD_U(m_useUserOverride, NumberFormatInfo, bUseUserOverride)
+#endif
+DEFINE_FIELD_U(m_isInvariant, NumberFormatInfo, bIsInvariant)
+#ifndef FEATURE_CORECLR
+DEFINE_FIELD_U(validForParseAsNumber, NumberFormatInfo, bvalidForParseAsNumber)
+DEFINE_FIELD_U(validForParseAsCurrency,NumberFormatInfo, bvalidForParseAsCurrency)
+#endif // !FEATURE_CORECLR
+
+// Defined as element type alias
+// DEFINE_CLASS(OBJECT, System, Object)
+DEFINE_METHOD(OBJECT, CTOR, .ctor, IM_RetVoid)
+DEFINE_METHOD(OBJECT, FINALIZE, Finalize, IM_RetVoid)
+DEFINE_METHOD(OBJECT, TO_STRING, ToString, IM_RetStr)
+DEFINE_METHOD(OBJECT, GET_TYPE, GetType, IM_RetType)
+DEFINE_METHOD(OBJECT, GET_HASH_CODE, GetHashCode, IM_RetInt)
+DEFINE_METHOD(OBJECT, EQUALS, Equals, IM_Obj_RetBool)
+DEFINE_METHOD(OBJECT, FIELD_SETTER, FieldSetter, IM_Str_Str_Obj_RetVoid)
+DEFINE_METHOD(OBJECT, FIELD_GETTER, FieldGetter, IM_Str_Str_RefObj_RetVoid)
+
+DEFINE_CLASS(__CANON, System, __Canon)
+
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_CLASS(OLE_AUT_BINDER, System, OleAutBinder)
+#endif // FEATURE_COMINTEROP
+
+DEFINE_CLASS(MONITOR, Threading, Monitor)
+DEFINE_METHOD(MONITOR, ENTER, Enter, SM_Obj_RetVoid)
+
+// Note: The size of the OverlappedData can be inflated by the CLR host
+DEFINE_CLASS_U(Threading, OverlappedData, NoClass)
+DEFINE_FIELD_U(m_asyncResult, OverlappedDataObject, m_asyncResult)
+DEFINE_FIELD_U(m_iocb, OverlappedDataObject, m_iocb)
+DEFINE_FIELD_U(m_iocbHelper, OverlappedDataObject, m_iocbHelper)
+DEFINE_FIELD_U(m_overlapped, OverlappedDataObject, m_overlapped)
+DEFINE_FIELD_U(m_userObject, OverlappedDataObject, m_userObject)
+DEFINE_FIELD_U(m_pinSelf, OverlappedDataObject, m_pinSelf)
+DEFINE_FIELD_U(m_AppDomainId, OverlappedDataObject, m_AppDomainId)
+DEFINE_FIELD_U(m_isArray, OverlappedDataObject, m_isArray)
+DEFINE_CLASS(OVERLAPPEDDATA, Threading, OverlappedData)
+
+DEFINE_CLASS(NATIVEOVERLAPPED, Threading, NativeOverlapped)
+
+
+DEFINE_CLASS(VOLATILE, Threading, Volatile)
+
+#define DEFINE_VOLATILE_METHODS(methodType, paramType) \
+ DEFINE_METHOD(VOLATILE, READ_##paramType, Read, methodType##_Ref##paramType##_Ret##paramType) \
+ DEFINE_METHOD(VOLATILE, WRITE_##paramType, Write, methodType##_Ref##paramType##_##paramType)
+
+DEFINE_VOLATILE_METHODS(SM,Bool)
+DEFINE_VOLATILE_METHODS(SM,SByt)
+DEFINE_VOLATILE_METHODS(SM,Byte)
+DEFINE_VOLATILE_METHODS(SM,Shrt)
+DEFINE_VOLATILE_METHODS(SM,UShrt)
+DEFINE_VOLATILE_METHODS(SM,Int)
+DEFINE_VOLATILE_METHODS(SM,UInt)
+DEFINE_VOLATILE_METHODS(SM,Long)
+DEFINE_VOLATILE_METHODS(SM,ULong)
+DEFINE_VOLATILE_METHODS(SM,IntPtr)
+DEFINE_VOLATILE_METHODS(SM,UIntPtr)
+DEFINE_VOLATILE_METHODS(SM,Flt)
+DEFINE_VOLATILE_METHODS(SM,Dbl)
+DEFINE_VOLATILE_METHODS(GM,T)
+
+#undef DEFINE_VOLATILE_METHODS
+
+DEFINE_CLASS(PARAMETER, Reflection, ParameterInfo)
+
+DEFINE_CLASS(PARAMETER_MODIFIER, Reflection, ParameterModifier)
+
+// Keep this in sync with System.Security.PermissionSet
+DEFINE_CLASS_U(Security, PermissionSet, PermissionSetObject)
+DEFINE_FIELD_U(m_permSet, PermissionSetObject, _permSet)
+DEFINE_FIELD_U(m_Unrestricted, PermissionSetObject, _Unrestricted)
+DEFINE_FIELD_U(m_allPermissionsDecoded, PermissionSetObject, _allPermissionsDecoded)
+#ifdef FEATURE_CAS_POLICY
+DEFINE_FIELD_U(m_canUnrestrictedOverride,PermissionSetObject, _canUnrestrictedOverride)
+#endif // FEATURE_CAS_POLICY
+DEFINE_FIELD_U(m_ignoreTypeLoadFailures, PermissionSetObject, _ignoreTypeLoadFailures)
+DEFINE_FIELD_U(m_CheckedForNonCas, PermissionSetObject, _CheckedForNonCas)
+DEFINE_FIELD_U(m_ContainsCas, PermissionSetObject, _ContainsCas)
+DEFINE_FIELD_U(m_ContainsNonCas, PermissionSetObject, _ContainsNonCas)
+
+DEFINE_CLASS(PERMISSION_SET, Security, PermissionSet)
+DEFINE_METHOD(PERMISSION_SET, CTOR, .ctor, IM_Bool_RetVoid)
+DEFINE_METHOD(PERMISSION_SET, CREATE_SERIALIZED, CreateSerialized, SM_ArrObj_Bool_RefArrByte_OutPMS_HostProtectionResource_Bool_RetArrByte)
+#ifdef FEATURE_CAS_POLICY
+DEFINE_METHOD(PERMISSION_SET, SETUP_SECURITY, SetupSecurity, SM_RetVoid)
+#endif // FEATURE_CAS_POLICY
+#ifdef FEATURE_CAS_POLICY
+DEFINE_METHOD(PERMISSION_SET, DECODE_XML, DecodeXml, IM_ArrByte_HostProtectionResource_HostProtectionResource_RetBool)
+DEFINE_METHOD(PERMISSION_SET, ENCODE_XML, EncodeXml, IM_RetArrByte)
+#endif // FEATURE_CAS_POLICY
+DEFINE_METHOD(PERMISSION_SET, CONTAINS, Contains, IM_IPermission_RetBool)
+DEFINE_METHOD(PERMISSION_SET, DEMAND, Demand, IM_RetVoid)
+DEFINE_METHOD(PERMISSION_SET, DEMAND_NON_CAS, DemandNonCAS, IM_RetVoid)
+DEFINE_METHOD(PERMISSION_SET, IS_UNRESTRICTED, IsUnrestricted, IM_RetBool)
+#ifdef FEATURE_CAS_POLICY
+DEFINE_METHOD(PERMISSION_SET, IS_SUBSET_OF, IsSubsetOf, IM_PMS_RetBool)
+DEFINE_METHOD(PERMISSION_SET, INTERSECT, Intersect, IM_PMS_RetPMS)
+#endif // #ifdef FEATURE_CAS_POLICY
+DEFINE_METHOD(PERMISSION_SET, INPLACE_UNION, InplaceUnion, IM_PMS_RetVoid)
+DEFINE_METHOD(PERMISSION_SET, UNION, Union, IM_PMS_RetPMS)
+DEFINE_METHOD(PERMISSION_SET, IS_EMPTY, IsEmpty, IM_RetBool)
+DEFINE_METHOD(PERMISSION_SET, ADD_PERMISSION, AddPermission, IM_IPermission_RetIPermission)
+
+DEFINE_CLASS(NAMEDPERMISSION_SET, Security, NamedPermissionSet)
+
+#ifdef FEATURE_CAS_POLICY
+DEFINE_CLASS(PEFILE_EVIDENCE_FACTORY, Policy, PEFileEvidenceFactory)
+DEFINE_METHOD(PEFILE_EVIDENCE_FACTORY, CREATE_SECURITY_IDENTITY, CreateSecurityIdentity, SM_PEFile_Evidence_RetEvidence)
+#endif // FEATURE_CAS_POLICY
+
+DEFINE_CLASS_U(Security, PermissionListSet, PermissionListSetObject)
+DEFINE_FIELD_U(m_firstPermSetTriple, PermissionListSetObject, _firstPermSetTriple)
+DEFINE_FIELD_U(m_permSetTriples, PermissionListSetObject, _permSetTriples)
+#ifdef FEATURE_COMPRESSEDSTACK
+DEFINE_FIELD_U(m_zoneList, PermissionListSetObject, _zoneList)
+DEFINE_FIELD_U(m_originList, PermissionListSetObject, _originList)
+#endif // FEAUTRE_COMPRESSEDSTACK
+DEFINE_CLASS(PERMISSION_LIST_SET, Security, PermissionListSet)
+DEFINE_METHOD(PERMISSION_LIST_SET, CTOR, .ctor, IM_RetVoid)
+DEFINE_METHOD(PERMISSION_LIST_SET, CHECK_DEMAND_NO_THROW, CheckDemandNoThrow, IM_CodeAccessPermission_RetBool)
+DEFINE_METHOD(PERMISSION_LIST_SET, CHECK_SET_DEMAND_NO_THROW, CheckSetDemandNoThrow, IM_PMS_RetBool)
+DEFINE_METHOD(PERMISSION_LIST_SET, UPDATE, Update, IM_PMS_RetVoid)
+
+DEFINE_CLASS(PERMISSION_STATE, Permissions, PermissionState)
+
+DEFINE_CLASS(PERMISSION_TOKEN, Security, PermissionToken)
+
+DEFINE_CLASS(POINTER, Reflection, Pointer)
+
+DEFINE_CLASS_U(Reflection, Pointer, ReflectionPointer)
+DEFINE_FIELD_U(_ptr, ReflectionPointer, _ptr)
+DEFINE_FIELD_U(_ptrType, ReflectionPointer, _ptrType)
+
+DEFINE_CLASS(PROPERTY, Reflection, RuntimePropertyInfo)
+DEFINE_METHOD(PROPERTY, SET_VALUE, SetValue, IM_Obj_Obj_BindingFlags_Binder_ArrObj_CultureInfo_RetVoid)
+DEFINE_METHOD(PROPERTY, GET_VALUE, GetValue, IM_Obj_BindingFlags_Binder_ArrObj_CultureInfo_RetObj)
+DEFINE_METHOD(PROPERTY, GET_INDEX_PARAMETERS, GetIndexParameters, IM_RetArrParameterInfo)
+DEFINE_METHOD(PROPERTY, GET_TOKEN, get_MetadataToken, IM_RetInt)
+DEFINE_METHOD(PROPERTY, GET_MODULE, GetRuntimeModule, IM_RetModule)
+DEFINE_METHOD(PROPERTY, GET_SETTER, GetSetMethod, IM_Bool_RetMethodInfo)
+DEFINE_METHOD(PROPERTY, GET_GETTER, GetGetMethod, IM_Bool_RetMethodInfo)
+
+DEFINE_CLASS(PROPERTY_INFO, Reflection, PropertyInfo)
+
+#ifdef FEATURE_REMOTING
+DEFINE_CLASS(PROXY_ATTRIBUTE, Proxies, ProxyAttribute)
+
+DEFINE_CLASS_U(Proxies, RealProxy, RealProxyObject)
+DEFINE_FIELD_U(_tp, RealProxyObject, _tp)
+DEFINE_FIELD_U(_identity, RealProxyObject, _identity)
+DEFINE_FIELD_U(_serverObject, RealProxyObject, _serverObject)
+DEFINE_FIELD_U(_flags, RealProxyObject, _flags)
+DEFINE_FIELD_U(_optFlags, RealProxyObject, _optFlags)
+DEFINE_FIELD_U(_domainID, RealProxyObject, _domainID)
+DEFINE_FIELD_U(_srvIdentity, RealProxyObject, _srvIdentity)
+DEFINE_CLASS(REAL_PROXY, Proxies, RealProxy)
+DEFINE_METHOD(REAL_PROXY, PRIVATE_INVOKE, PrivateInvoke, IM_RefMessageData_Int_RetVoid)
+#ifdef FEATURE_COMINTEROP
+DEFINE_METHOD(REAL_PROXY, GETDCOMPROXY, GetCOMIUnknown, IM_Bool_RetIntPtr)
+DEFINE_METHOD(REAL_PROXY, SETDCOMPROXY, SetCOMIUnknown, IM_IntPtr_RetVoid)
+DEFINE_METHOD(REAL_PROXY, SUPPORTSINTERFACE, SupportsInterface, IM_RefGuid_RetIntPtr)
+
+#endif // FEATURE_COMINTEROP
+#endif // FEATURE_REMOTING
+
+DEFINE_CLASS(REFLECTION_PERMISSION, Permissions, ReflectionPermission)
+DEFINE_METHOD(REFLECTION_PERMISSION, CTOR, .ctor, IM_ReflectionPermissionFlag_RetVoid)
+
+DEFINE_CLASS(REFLECTION_PERMISSION_FLAG, Permissions, ReflectionPermissionFlag)
+
+#ifdef FEATURE_COMINTEROP_REGISTRATION
+DEFINE_CLASS(REGISTRATION_SERVICES, Interop, RegistrationServices)
+DEFINE_METHOD(REGISTRATION_SERVICES,REGISTER_ASSEMBLY, RegisterAssembly, IM_AssemblyBase_AssemblyRegistrationFlags_RetBool)
+DEFINE_METHOD(REGISTRATION_SERVICES,UNREGISTER_ASSEMBLY, UnregisterAssembly, IM_AssemblyBase_RetBool)
+#endif // FEATURE_COMINTEROP_REGISTRATION
+
+#ifdef FEATURE_RWLOCK
+DEFINE_CLASS_U(Threading, ReaderWriterLock, CRWLock)
+DEFINE_FIELD_U(_hWriterEvent, CRWLock, _hWriterEvent)
+DEFINE_FIELD_U(_hReaderEvent, CRWLock, _hReaderEvent)
+DEFINE_FIELD_U(_hObjectHandle, CRWLock, _hObjectHandle)
+DEFINE_FIELD_U(_dwState, CRWLock, _dwState)
+DEFINE_FIELD_U(_dwULockID, CRWLock, _dwULockID)
+DEFINE_FIELD_U(_dwLLockID, CRWLock, _dwLLockID)
+DEFINE_FIELD_U(_dwWriterID, CRWLock, _dwWriterID)
+DEFINE_FIELD_U(_dwWriterSeqNum, CRWLock, _dwWriterSeqNum)
+DEFINE_FIELD_U(_wWriterLevel, CRWLock, _wWriterLevel)
+#endif // FEATURE_RWLOCK
+
+#ifdef FEATURE_REMOTING
+DEFINE_CLASS(LEASE, Lifetime, Lease)
+DEFINE_METHOD(LEASE, RENEW_ON_CALL, RenewOnCall, IM_RetVoid)
+
+DEFINE_CLASS(REMOTING_PROXY, Proxies, RemotingProxy)
+DEFINE_METHOD(REMOTING_PROXY, INVOKE, Invoke, SM_Obj_RefMessageData_RetVoid)
+
+DEFINE_CLASS(REMOTING_SERVICES, Remoting, RemotingServices)
+DEFINE_METHOD(REMOTING_SERVICES, CHECK_CAST, CheckCast, SM_RealProxy_Class_RetBool)
+DEFINE_METHOD(REMOTING_SERVICES, GET_TYPE, GetType, SM_Obj_RetObj)
+DEFINE_METHOD(REMOTING_SERVICES, WRAP, Wrap, SM_ContextBoundObject_RetObj)
+DEFINE_METHOD(REMOTING_SERVICES, CREATE_PROXY_FOR_DOMAIN,CreateProxyForDomain, SM_Int_IntPtr_RetObj)
+DEFINE_METHOD(REMOTING_SERVICES, GET_SERVER_CONTEXT_FOR_PROXY,GetServerContextForProxy, SM_Obj_RetIntPtr)
+DEFINE_METHOD(REMOTING_SERVICES, GET_SERVER_DOMAIN_ID_FOR_PROXY,GetServerDomainIdForProxy, SM_Obj_RetInt)
+DEFINE_METHOD(REMOTING_SERVICES, MARSHAL_TO_BUFFER, MarshalToBuffer, SM_Obj_Bool_RetArrByte)
+DEFINE_METHOD(REMOTING_SERVICES, UNMARSHAL_FROM_BUFFER, UnmarshalFromBuffer, SM_ArrByte_Bool_RetObj)
+DEFINE_METHOD(REMOTING_SERVICES, DOMAIN_UNLOADED, DomainUnloaded, SM_Int_RetVoid)
+#endif // FEATURE_REMOTING
+
+
+DEFINE_CLASS(METADATA_IMPORT, Reflection, MetadataImport)
+DEFINE_METHOD(METADATA_IMPORT, THROW_ERROR, ThrowError, SM_Int_RetVoid)
+
+DEFINE_CLASS(RESOLVER, System, Resolver)
+DEFINE_METHOD(RESOLVER, GET_JIT_CONTEXT, GetJitContext, IM_RefInt_RetRuntimeType)
+DEFINE_METHOD(RESOLVER, GET_CODE_INFO, GetCodeInfo, IM_RefInt_RefInt_RefInt_RetArrByte)
+DEFINE_METHOD(RESOLVER, GET_LOCALS_SIGNATURE, GetLocalsSignature, IM_RetArrByte)
+DEFINE_METHOD(RESOLVER, GET_EH_INFO, GetEHInfo, IM_Int_VoidPtr_RetVoid)
+DEFINE_METHOD(RESOLVER, GET_RAW_EH_INFO, GetRawEHInfo, IM_RetArrByte)
+DEFINE_METHOD(RESOLVER, GET_STRING_LITERAL, GetStringLiteral, IM_Int_RetStr)
+DEFINE_METHOD(RESOLVER, RESOLVE_TOKEN, ResolveToken, IM_Int_RefIntPtr_RefIntPtr_RefIntPtr_RetVoid)
+DEFINE_METHOD(RESOLVER, RESOLVE_SIGNATURE, ResolveSignature, IM_IntInt_RetArrByte)
+
+DEFINE_CLASS(RESOURCE_MANAGER, Resources, ResourceManager)
+
+DEFINE_CLASS(RTFIELD, Reflection, RtFieldInfo)
+DEFINE_METHOD(RTFIELD, GET_FIELDHANDLE, GetFieldHandle, IM_RetIntPtr)
+
+DEFINE_CLASS(RUNTIME_HELPERS, CompilerServices, RuntimeHelpers)
+DEFINE_METHOD(RUNTIME_HELPERS, PREPARE_CONSTRAINED_REGIONS, PrepareConstrainedRegions, SM_RetVoid)
+DEFINE_METHOD(RUNTIME_HELPERS, PREPARE_CONSTRAINED_REGIONS_NOOP, PrepareConstrainedRegionsNoOP, SM_RetVoid)
+DEFINE_METHOD(RUNTIME_HELPERS, EXECUTE_BACKOUT_CODE_HELPER, ExecuteBackoutCodeHelper, SM_Obj_Obj_Bool_RetVoid)
+
+DEFINE_CLASS(JIT_HELPERS, CompilerServices, JitHelpers)
+#ifdef _DEBUG
+DEFINE_METHOD(JIT_HELPERS, UNSAFE_CAST, UnsafeCastInternal, NoSig)
+DEFINE_METHOD(JIT_HELPERS, UNSAFE_ENUM_CAST, UnsafeEnumCastInternal, NoSig)
+DEFINE_METHOD(JIT_HELPERS, UNSAFE_ENUM_CAST_LONG, UnsafeEnumCastLongInternal, NoSig)
+DEFINE_METHOD(JIT_HELPERS, UNSAFE_CAST_TO_STACKPTR,UnsafeCastToStackPointerInternal, NoSig)
+#else // _DEBUG
+DEFINE_METHOD(JIT_HELPERS, UNSAFE_CAST, UnsafeCast, NoSig)
+DEFINE_METHOD(JIT_HELPERS, UNSAFE_ENUM_CAST, UnsafeEnumCast, NoSig)
+DEFINE_METHOD(JIT_HELPERS, UNSAFE_ENUM_CAST_LONG, UnsafeEnumCastLong, NoSig)
+DEFINE_METHOD(JIT_HELPERS, UNSAFE_CAST_TO_STACKPTR,UnsafeCastToStackPointer, NoSig)
+#endif // _DEBUG
+
+DEFINE_CLASS(INTERLOCKED, Threading, Interlocked)
+DEFINE_METHOD(INTERLOCKED, COMPARE_EXCHANGE_T, CompareExchange, GM_RefT_T_T_RetT)
+DEFINE_METHOD(INTERLOCKED, COMPARE_EXCHANGE_OBJECT,CompareExchange, SM_RefObject_Object_Object_RetObject)
+
+DEFINE_CLASS(PINNING_HELPER, CompilerServices, PinningHelper)
+DEFINE_FIELD(PINNING_HELPER, M_DATA, m_data)
+
+DEFINE_CLASS(RUNTIME_WRAPPED_EXCEPTION, CompilerServices, RuntimeWrappedException)
+DEFINE_METHOD(RUNTIME_WRAPPED_EXCEPTION, OBJ_CTOR, .ctor, IM_Obj_RetVoid)
+DEFINE_FIELD(RUNTIME_WRAPPED_EXCEPTION, WRAPPED_EXCEPTION, m_wrappedException)
+
+DEFINE_CLASS_U(Interop, SafeHandle, SafeHandle)
+DEFINE_FIELD_U(handle, SafeHandle, m_handle)
+DEFINE_FIELD_U(_state, SafeHandle, m_state)
+DEFINE_FIELD_U(_ownsHandle, SafeHandle, m_ownsHandle)
+DEFINE_FIELD_U(_fullyInitialized, SafeHandle, m_fullyInitialized)
+DEFINE_CLASS(SAFE_HANDLE, Interop, SafeHandle)
+DEFINE_FIELD(SAFE_HANDLE, HANDLE, handle)
+DEFINE_METHOD(SAFE_HANDLE, GET_IS_INVALID, get_IsInvalid, IM_RetBool)
+DEFINE_METHOD(SAFE_HANDLE, RELEASE_HANDLE, ReleaseHandle, IM_RetBool)
+DEFINE_METHOD(SAFE_HANDLE, DISPOSE, Dispose, IM_RetVoid)
+DEFINE_METHOD(SAFE_HANDLE, DISPOSE_BOOL, Dispose, IM_Bool_RetVoid)
+
+#ifdef FEATURE_CAS_POLICY
+DEFINE_CLASS(SAFE_PEFILE_HANDLE, SafeHandles, SafePEFileHandle)
+#endif // FEATURE_CAS_POLICY
+
+#ifndef FEATURE_CORECLR
+DEFINE_CLASS(SAFE_TOKENHANDLE, SafeHandles, SafeAccessTokenHandle)
+#endif
+
+#ifndef FEATURE_CORECLR
+DEFINE_CLASS(SAFE_TYPENAMEPARSER_HANDLE, System, SafeTypeNameParserHandle)
+#endif //!FEATURE_CORECLR
+
+#ifdef FEATURE_COMPRESSEDSTACK
+DEFINE_CLASS(SAFE_CSHANDLE, Threading, SafeCompressedStackHandle)
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
+
+
+DEFINE_CLASS(SECURITY_ACTION, Permissions, SecurityAction)
+DEFINE_CLASS(HOST_PROTECTION_RESOURCE, Permissions, HostProtectionResource)
+
+DEFINE_CLASS(SECURITY_ATTRIBUTE, Permissions, SecurityAttribute)
+DEFINE_METHOD(SECURITY_ATTRIBUTE, FIND_SECURITY_ATTRIBUTE_TYPE_HANDLE, FindSecurityAttributeTypeHandle, SM_Str_RetIntPtr)
+
+#ifdef FEATURE_CAS_POLICY
+DEFINE_CLASS(SECURITY_ELEMENT, Security, SecurityElement)
+DEFINE_METHOD(SECURITY_ELEMENT, TO_STRING, ToString, IM_RetStr)
+#endif // FEATURE_CAS_POLICY
+
+DEFINE_CLASS(SECURITY_ENGINE, Security, CodeAccessSecurityEngine)
+DEFINE_METHOD(SECURITY_ENGINE, CHECK_HELPER, CheckHelper, SM_CS_PMS_PMS_CodeAccessPermission_PermissionToken_RuntimeMethodHandleInternal_Assembly_SecurityAction_RetVoid)
+DEFINE_METHOD(SECURITY_ENGINE, CHECK_SET_HELPER, CheckSetHelper, SM_CS_PMS_PMS_PMS_RuntimeMethodHandleInternal_Assembly_SecurityAction_RetVoid)
+#ifdef FEATURE_APTCA
+DEFINE_METHOD(SECURITY_ENGINE, THROW_SECURITY_EXCEPTION, ThrowSecurityException, SM_Assembly_PMS_PMS_RuntimeMethodHandleInternal_SecurityAction_Obj_IPermission_RetVoid)
+#endif // FEATURE_APTCA
+#ifdef FEATURE_CAS_POLICY
+DEFINE_METHOD(SECURITY_ENGINE, RESOLVE_GRANT_SET, ResolveGrantSet, SM_Evidence_RefInt_Bool_RetPMS)
+DEFINE_METHOD(SECURITY_ENGINE, PRE_RESOLVE, PreResolve, SM_RefBool_RefBool_RetVoid)
+#endif // FEATURE_CAS_POLICY
+
+#ifdef FEATURE_PLS
+DEFINE_METHOD(SECURITY_ENGINE, UPDATE_APPDOMAIN_PLS, UpdateAppDomainPLS, SM_PermissionListSet_PMS_PMS_RetPermissionListSet)
+#endif // FEATURE_PLS
+
+#ifdef FEATURE_CAS_POLICY
+#ifdef FEATURE_NONGENERIC_COLLECTIONS
+DEFINE_METHOD(SECURITY_ENGINE, GET_ZONE_AND_ORIGIN_HELPER, GetZoneAndOriginHelper, SM_CS_PMS_PMS_ArrayList_ArrayList_RetVoid)
+#else
+#error Need replacement for GetZoneAndOriginHelper
+#endif // FEATURE_NONGENERIC_COLLECTIONS
+DEFINE_METHOD(SECURITY_ENGINE, REFLECTION_TARGET_DEMAND_HELPER, ReflectionTargetDemandHelper, SM_Int_PMS_RetVoid)
+DEFINE_METHOD(SECURITY_ENGINE, REFLECTION_TARGET_DEMAND_HELPER_WITH_CONTEXT, ReflectionTargetDemandHelper, SM_Int_PMS_Resolver_RetVoid)
+DEFINE_METHOD(SECURITY_ENGINE, CHECK_GRANT_SET_HELPER, CheckGrantSetHelper, SM_PMS_RetVoid)
+#endif // FEATURE_CAS_POLICY
+
+DEFINE_CLASS(SECURITY_EXCEPTION, Security, SecurityException)
+#ifdef FEATURE_CAS_POLICY
+DEFINE_METHOD(SECURITY_EXCEPTION, CTOR, .ctor, IM_Str_Type_Str_RetVoid)
+#endif // FEATURE_CAS_POLICY
+
+#ifdef FEATURE_CAS_POLICY
+DEFINE_CLASS(HOST_PROTECTION_EXCEPTION, Security, HostProtectionException)
+DEFINE_METHOD(HOST_PROTECTION_EXCEPTION, CTOR, .ctor, IM_HPR_HPR_RetVoid)
+#endif // FEATURE_CAS_POLICY
+
+DEFINE_CLASS(SECURITY_MANAGER, Security, SecurityManager)
+#ifdef FEATURE_CAS_POLICY
+DEFINE_METHOD(SECURITY_MANAGER, RESOLVE_CAS_POLICY, ResolveCasPolicy, SM_Evidence_PMS_PMS_PMS_PMS_int_Bool_RetPMS)
+#endif
+
+DEFINE_CLASS(SECURITY_PERMISSION, Permissions, SecurityPermission)
+DEFINE_METHOD(SECURITY_PERMISSION, CTOR, .ctor, IM_SecurityPermissionFlag_RetVoid)
+#ifdef FEATURE_CAS_POLICY
+DEFINE_METHOD(SECURITY_PERMISSION, TOXML, ToXml, IM_RetSecurityElement)
+#endif // FEATURE_CAS_POLICY
+
+DEFINE_CLASS(SECURITY_PERMISSION_FLAG,Permissions, SecurityPermissionFlag)
+
+DEFINE_CLASS(SECURITY_RUNTIME, Security, SecurityRuntime)
+DEFINE_METHOD(SECURITY_RUNTIME, FRAME_DESC_HELPER, FrameDescHelper, SM_FrameSecurityDescriptor_IPermission_PermissionToken_RuntimeMethodHandleInternal_RetBool)
+DEFINE_METHOD(SECURITY_RUNTIME, FRAME_DESC_SET_HELPER, FrameDescSetHelper, SM_FrameSecurityDescriptor_PMS_OutPMS_RuntimeMethodHandleInternal_RetBool)
+#ifdef FEATURE_COMPRESSEDSTACK
+DEFINE_METHOD(SECURITY_RUNTIME, CHECK_DYNAMIC_METHOD_HELPER, CheckDynamicMethodHelper, SM_DynamicResolver_IPermission_PermissionToken_RuntimeMethodHandleInternal_RetBool)
+DEFINE_METHOD(SECURITY_RUNTIME, CHECK_DYNAMIC_METHOD_SET_HELPER, CheckDynamicMethodSetHelper, SM_DynamicResolver_PMS_OutPMS_RuntimeMethodHandleInternal_RetBool)
+#endif // FEATURE_COMPRESSEDSTACK
+
+#ifdef FEATURE_REMOTING
+DEFINE_CLASS(SERVER_IDENTITY, Remoting, ServerIdentity)
+DEFINE_FIELD(SERVER_IDENTITY, SERVER_CONTEXT, _srvCtx)
+#endif // FEATURE_REMOTING
+#ifdef FEATURE_COMPRESSEDSTACK
+DEFINE_CLASS(DOMAIN_COMPRESSED_STACK, Threading, DomainCompressedStack)
+DEFINE_METHOD(DOMAIN_COMPRESSED_STACK, CREATE_MANAGED_OBJECT, CreateManagedObject, SM_IntPtr_RetDCS)
+DEFINE_CLASS(COMPRESSED_STACK, Threading, CompressedStack)
+DEFINE_METHOD(COMPRESSED_STACK, RUN, Run, SM_CompressedStack_ContextCallback_Object_RetVoid)
+#endif // FEATURE_COMPRESSEDSTACK
+
+DEFINE_CLASS(SHARED_STATICS, System, SharedStatics)
+DEFINE_FIELD(SHARED_STATICS, SHARED_STATICS, _sharedStatics)
+
+#ifdef FEATURE_REMOTING
+DEFINE_CLASS(STACK_BUILDER_SINK, Messaging, StackBuilderSink)
+DEFINE_METHOD(STACK_BUILDER_SINK, PRIVATE_PROCESS_MESSAGE,_PrivateProcessMessage, IM_IntPtr_ArrObj_Obj_RefArrObj_RetObj)
+#endif
+
+DEFINE_CLASS_U(Diagnostics, StackFrameHelper, StackFrameHelper)
+DEFINE_FIELD_U(targetThread, StackFrameHelper, TargetThread)
+DEFINE_FIELD_U(rgiOffset, StackFrameHelper, rgiOffset)
+DEFINE_FIELD_U(rgiILOffset, StackFrameHelper, rgiILOffset)
+DEFINE_FIELD_U(rgMethodBase, StackFrameHelper, rgMethodBase)
+DEFINE_FIELD_U(dynamicMethods, StackFrameHelper, dynamicMethods)
+DEFINE_FIELD_U(rgMethodHandle, StackFrameHelper, rgMethodHandle)
+DEFINE_FIELD_U(rgFilename, StackFrameHelper, rgFilename)
+DEFINE_FIELD_U(rgiLineNumber, StackFrameHelper, rgiLineNumber)
+DEFINE_FIELD_U(rgiColumnNumber, StackFrameHelper, rgiColumnNumber)
+#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
+DEFINE_FIELD_U(rgiLastFrameFromForeignExceptionStackTrace, StackFrameHelper, rgiLastFrameFromForeignExceptionStackTrace)
+#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
+DEFINE_FIELD_U(iFrameCount, StackFrameHelper, iFrameCount)
+DEFINE_FIELD_U(fNeedFileInfo, StackFrameHelper, fNeedFileInfo)
+
+DEFINE_CLASS(STACK_TRACE, Diagnostics, StackTrace)
+DEFINE_METHOD(STACK_TRACE, GET_MANAGED_STACK_TRACE_HELPER, GetManagedStackTraceStringHelper, SM_Bool_RetStr)
+
+DEFINE_CLASS(STREAM, IO, Stream)
+
+// Defined as element type alias
+// DEFINE_CLASS(INTPTR, System, IntPtr)
+DEFINE_FIELD(INTPTR, ZERO, Zero)
+
+// Defined as element type alias
+// DEFINE_CLASS(UINTPTR, System, UIntPtr)
+DEFINE_FIELD(UINTPTR, ZERO, Zero)
+
+// Defined as element type alias
+// DEFINE_CLASS(STRING, System, String)
+DEFINE_FIELD(STRING, M_FIRST_CHAR, m_firstChar)
+DEFINE_FIELD(STRING, EMPTY, Empty)
+DEFINE_METHOD(STRING, CREATE_STRING, CreateString, SM_PtrSByt_Int_Int_Encoding_RetStr)
+DEFINE_METHOD(STRING, CTOR_CHARPTR, .ctor, IM_PtrChar_RetVoid)
+DEFINE_METHOD(STRING, CTORF_CHARARRAY, CtorCharArray, IM_ArrChar_RetStr)
+DEFINE_METHOD(STRING, CTORF_CHARARRAY_START_LEN,CtorCharArrayStartLength, IM_ArrChar_Int_Int_RetStr)
+DEFINE_METHOD(STRING, CTORF_CHAR_COUNT, CtorCharCount, IM_Char_Int_RetStr)
+DEFINE_METHOD(STRING, CTORF_CHARPTR, CtorCharPtr, IM_PtrChar_RetStr)
+DEFINE_METHOD(STRING, CTORF_CHARPTR_START_LEN,CtorCharPtrStartLength, IM_PtrChar_Int_Int_RetStr)
+DEFINE_METHOD(STRING, INTERNAL_COPY, InternalCopy, SM_Str_IntPtr_Int_RetVoid)
+DEFINE_METHOD(STRING, WCSLEN, wcslen, SM_PtrChar_RetInt)
+DEFINE_PROPERTY(STRING, LENGTH, Length, Int)
+
+DEFINE_CLASS_U(Text, StringBuilder, StringBufferObject)
+DEFINE_FIELD_U(m_ChunkPrevious, StringBufferObject, m_ChunkPrevious)
+DEFINE_FIELD_U(m_MaxCapacity, StringBufferObject, m_MaxCapacity)
+DEFINE_FIELD_U(m_ChunkLength, StringBufferObject, m_ChunkLength)
+DEFINE_FIELD_U(m_ChunkOffset, StringBufferObject, m_ChunkOffset)
+DEFINE_CLASS(STRING_BUILDER, Text, StringBuilder)
+DEFINE_PROPERTY(STRING_BUILDER, LENGTH, Length, Int)
+DEFINE_PROPERTY(STRING_BUILDER, CAPACITY, Capacity, Int)
+DEFINE_METHOD(STRING_BUILDER, CTOR_INT, .ctor, IM_Int_RetVoid)
+DEFINE_METHOD(STRING_BUILDER, TO_STRING, ToString, IM_RetStr)
+DEFINE_METHOD(STRING_BUILDER, INTERNAL_COPY, InternalCopy, IM_IntPtr_Int_RetVoid)
+DEFINE_METHOD(STRING_BUILDER, REPLACE_BUFFER_INTERNAL,ReplaceBufferInternal, IM_PtrChar_Int_RetVoid)
+DEFINE_METHOD(STRING_BUILDER, REPLACE_BUFFER_ANSI_INTERNAL,ReplaceBufferAnsiInternal, IM_PtrSByt_Int_RetVoid)
+
+DEFINE_CLASS(STRONG_NAME_KEY_PAIR, Reflection, StrongNameKeyPair)
+#ifndef FEATURE_CORECLR
+DEFINE_METHOD(STRONG_NAME_KEY_PAIR, GET_KEY_PAIR, GetKeyPair, IM_RefObject_RetBool)
+#endif
+
+#ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+DEFINE_CLASS_U(Threading, SynchronizationContext, SynchronizationContextObject)
+DEFINE_FIELD_U(_props, SynchronizationContextObject, _props)
+DEFINE_CLASS(SYNCHRONIZATION_CONTEXT, Threading, SynchronizationContext)
+DEFINE_METHOD(SYNCHRONIZATION_CONTEXT, INVOKE_WAIT_METHOD_HELPER, InvokeWaitMethodHelper, SM_SyncCtx_ArrIntPtr_Bool_Int_RetInt)
+#endif // FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+
+#ifdef FEATURE_COMINTEROP_TLB_SUPPORT
+DEFINE_CLASS(TCE_EVENT_ITF_INFO, InteropTCE, EventItfInfo)
+DEFINE_METHOD(TCE_EVENT_ITF_INFO, CTOR, .ctor, IM_Str_Str_Str_Assembly_Assembly_RetVoid)
+#endif // FEATURE_COMINTEROP_TLB_SUPPORT
+
+DEFINE_CLASS(CONTEXTCALLBACK, Threading, ContextCallback)
+
+#if defined(FEATURE_IMPERSONATION) || defined(FEATURE_COMPRESSEDSTACK)
+DEFINE_CLASS_U(Security, SecurityContext, SecurityContextObject)
+DEFINE_FIELD_U(_executionContext, SecurityContextObject, _executionContext)
+#if defined(FEATURE_IMPERSONATION)
+DEFINE_FIELD_U(_windowsIdentity, SecurityContextObject, _windowsIdentity)
+#endif
+DEFINE_FIELD_U(_compressedStack, SecurityContextObject, _compressedStack)
+DEFINE_FIELD_U(_disableFlow, SecurityContextObject, _disableFlow)
+DEFINE_FIELD_U(isNewCapture, SecurityContextObject, _isNewCapture)
+DEFINE_CLASS(SECURITYCONTEXT, Security, SecurityContext)
+DEFINE_METHOD(SECURITYCONTEXT, RUN, Run, SM_SecurityContext_ContextCallback_Object_RetVoid)
+#endif // #if defined(FEATURE_IMPERSONATION) || defined(FEATURE_COMPRESSEDSTACK)
+
+#ifndef FEATURE_CORECLR
+DEFINE_CLASS_U(Threading, ExecutionContext, ExecutionContextObject)
+#ifdef FEATURE_CAS_POLICY
+DEFINE_FIELD_U(_hostExecutionContext, ExecutionContextObject, _hostExecutionContext)
+#endif // FEATURE_CAS_POLICY
+DEFINE_FIELD_U(_syncContext, ExecutionContextObject, _syncContext)
+#if defined(FEATURE_IMPERSONATION) || defined(FEATURE_COMPRESSEDSTACK)
+DEFINE_FIELD_U(_securityContext, ExecutionContextObject, _securityContext)
+#endif // #if defined(FEATURE_IMPERSONATION) || defined(FEATURE_COMPRESSEDSTACK)
+#ifdef FEATURE_REMOTING
+DEFINE_FIELD_U(_logicalCallContext, ExecutionContextObject, _logicalCallContext)
+DEFINE_FIELD_U(_illogicalCallContext, ExecutionContextObject, _illogicalCallContext)
+#endif // #ifdef FEATURE_REMOTING
+DEFINE_CLASS(EXECUTIONCONTEXT, Threading, ExecutionContext)
+DEFINE_METHOD(EXECUTIONCONTEXT, RUN, Run, SM_ExecutionContext_ContextCallback_Object_Bool_RetVoid)
+#endif //FEATURE_CORECLR
+
+#ifdef _DEBUG
+DEFINE_CLASS(STACKCRAWMARK, Threading, StackCrawlMark)
+#endif
+
+DEFINE_CLASS(CROSS_CONTEXT_DELEGATE, Threading, InternalCrossContextDelegate)
+
+DEFINE_CLASS_U(Threading, Thread, ThreadBaseObject)
+#ifdef FEATURE_REMOTING
+DEFINE_FIELD_U(m_Context, ThreadBaseObject, m_ExposedContext)
+#endif
+#ifndef FEATURE_CORECLR
+DEFINE_FIELD_U(m_ExecutionContext, ThreadBaseObject, m_ExecutionContext)
+#endif
+DEFINE_FIELD_U(m_Name, ThreadBaseObject, m_Name)
+DEFINE_FIELD_U(m_Delegate, ThreadBaseObject, m_Delegate)
+#ifdef FEATURE_LEAK_CULTURE_INFO
+DEFINE_FIELD_U(m_CurrentCulture, ThreadBaseObject, m_CurrentUserCulture)
+DEFINE_FIELD_U(m_CurrentUICulture, ThreadBaseObject, m_CurrentUICulture)
+#endif
+DEFINE_FIELD_U(m_ThreadStartArg, ThreadBaseObject, m_ThreadStartArg)
+DEFINE_FIELD_U(DONT_USE_InternalThread, ThreadBaseObject, m_InternalThread)
+DEFINE_FIELD_U(m_Priority, ThreadBaseObject, m_Priority)
+DEFINE_CLASS(THREAD, Threading, Thread)
+#ifndef FEATURE_LEAK_CULTURE_INFO
+DEFINE_FIELD(THREAD, CULTURE, m_CurrentCulture)
+DEFINE_FIELD(THREAD, UI_CULTURE, m_CurrentUICulture)
+#endif
+#ifdef FEATURE_IMPERSONATION
+DEFINE_METHOD(THREAD, SET_PRINCIPAL_INTERNAL, SetPrincipalInternal, IM_IPrincipal_RetVoid)
+#endif
+#ifdef FEATURE_REMOTING
+DEFINE_STATIC_PROPERTY(THREAD, CURRENT_CONTEXT, CurrentContext, Context)
+#endif
+DEFINE_SET_PROPERTY(THREAD, CULTURE, CurrentCulture, CultureInfo)
+DEFINE_SET_PROPERTY(THREAD, UI_CULTURE, CurrentUICulture, CultureInfo)
+DEFINE_STATIC_PROPERTY(THREAD, CURRENT_THREAD, CurrentThread, Thread)
+#ifdef FEATURE_REMOTING
+DEFINE_METHOD(THREAD, COMPLETE_CROSSCONTEXTCALLBACK, CompleteCrossContextCallback, SM_CrossContextDelegate_ArrObj_RetObj)
+#endif
+DEFINE_METHOD(THREAD, INTERNAL_GET_CURRENT_THREAD, InternalGetCurrentThread, SM_RetIntPtr)
+
+DEFINE_CLASS(PARAMETERIZEDTHREADSTART, Threading, ParameterizedThreadStart)
+
+DEFINE_CLASS(IOCB_HELPER, Threading, _IOCompletionCallback)
+DEFINE_METHOD(IOCB_HELPER, PERFORM_IOCOMPLETION_CALLBACK, PerformIOCompletionCallback, SM_UInt_UInt_PtrNativeOverlapped_RetVoid)
+
+DEFINE_CLASS(TPWAITORTIMER_HELPER, Threading, _ThreadPoolWaitOrTimerCallback)
+DEFINE_METHOD(TPWAITORTIMER_HELPER, PERFORM_WAITORTIMER_CALLBACK, PerformWaitOrTimerCallback, SM_Obj_Bool_RetVoid)
+
+DEFINE_CLASS(TP_WAIT_CALLBACK, Threading, _ThreadPoolWaitCallback)
+DEFINE_METHOD(TP_WAIT_CALLBACK, PERFORM_WAIT_CALLBACK, PerformWaitCallback, SM_RetBool)
+
+DEFINE_CLASS(TIMER_QUEUE, Threading, TimerQueue)
+DEFINE_METHOD(TIMER_QUEUE, APPDOMAIN_TIMER_CALLBACK, AppDomainTimerCallback, SM_RetVoid)
+
+DEFINE_CLASS(TIMESPAN, System, TimeSpan)
+
+#ifdef FEATURE_REMOTING
+DEFINE_CLASS_U(Proxies, __TransparentProxy, TransparentProxyObject)
+DEFINE_FIELD_U(_rp, TransparentProxyObject, _rp)
+DEFINE_FIELD_U(_pMT, TransparentProxyObject, _pMT)
+DEFINE_FIELD_U(_pInterfaceMT, TransparentProxyObject, _pInterfaceMT)
+DEFINE_FIELD_U(_stub, TransparentProxyObject, _stub)
+DEFINE_FIELD_U(_stubData, TransparentProxyObject, _stubData)
+DEFINE_CLASS(TRANSPARENT_PROXY, Proxies, __TransparentProxy)
+#endif
+
+DEFINE_CLASS(TYPE, System, Type)
+DEFINE_METHOD(TYPE, GET_TYPE_FROM_HANDLE, GetTypeFromHandle, SM_RuntimeTypeHandle_RetType)
+DEFINE_PROPERTY(TYPE, IS_IMPORT, IsImport, Bool)
+
+DEFINE_CLASS(TYPE_DELEGATOR, Reflection, TypeDelegator)
+
+DEFINE_CLASS(UI_PERMISSION, Permissions, UIPermission)
+DEFINE_METHOD(UI_PERMISSION, CTOR, .ctor, IM_PermissionState_RetVoid)
+
+DEFINE_CLASS(UNHANDLED_EVENTARGS, System, UnhandledExceptionEventArgs)
+DEFINE_METHOD(UNHANDLED_EVENTARGS, CTOR, .ctor, IM_Obj_Bool_RetVoid)
+
+#ifdef FEATURE_EXCEPTION_NOTIFICATIONS
+DEFINE_CLASS(FIRSTCHANCE_EVENTARGS, ExceptionServices, FirstChanceExceptionEventArgs)
+DEFINE_METHOD(FIRSTCHANCE_EVENTARGS, CTOR, .ctor, IM_Exception_RetVoid)
+#endif // FEATURE_EXCEPTION_NOTIFICATIONS
+
+#if defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+
+DEFINE_CLASS(ASSEMBLYLOADCONTEXT, Loader, AssemblyLoadContext)
+DEFINE_METHOD(ASSEMBLYLOADCONTEXT, RESOLVE, Resolve, SM_IntPtr_AssemblyName_RetAssemblyBase)
+DEFINE_METHOD(ASSEMBLYLOADCONTEXT, RESOLVEUNMANAGEDDLL, ResolveUnmanagedDll, SM_Str_IntPtr_RetIntPtr)
+
+#endif // defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+
+DEFINE_CLASS(LAZY, System, Lazy`1)
+
+DEFINE_CLASS(LAZY_INITIALIZER, Threading, LazyInitializer)
+DEFINE_CLASS(LAZY_HELPERS, Threading, LazyHelpers`1)
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_CLASS(UNKNOWN_WRAPPER, Interop, UnknownWrapper)
+#endif
+
+DEFINE_CLASS(VALUE_TYPE, System, ValueType)
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_CLASS(VARIANT_WRAPPER, Interop, VariantWrapper)
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_IMPERSONATION
+DEFINE_CLASS(WINDOWS_IDENTITY, Principal, WindowsIdentity)
+DEFINE_METHOD(WINDOWS_IDENTITY, SERIALIZATION_CTOR, .ctor, IM_SerInfo_RetVoid)
+#endif
+#ifdef FEATURE_X509
+DEFINE_CLASS(X509_CERTIFICATE, X509, X509Certificate)
+DEFINE_METHOD(X509_CERTIFICATE, CTOR, .ctor, IM_ArrByte_RetVoid)
+#endif // FEATURE_X509
+
+DEFINE_CLASS(GC, System, GC)
+DEFINE_METHOD(GC, KEEP_ALIVE, KeepAlive, SM_Obj_RetVoid)
+DEFINE_METHOD(GC, COLLECT, Collect, SM_RetVoid)
+DEFINE_METHOD(GC, WAIT_FOR_PENDING_FINALIZERS, WaitForPendingFinalizers, SM_RetVoid)
+
+DEFINE_CLASS_U(System, WeakReference, WeakReferenceObject)
+DEFINE_FIELD_U(m_handle, WeakReferenceObject, m_Handle)
+DEFINE_CLASS(WEAKREFERENCE, System, WeakReference)
+
+DEFINE_CLASS_U(Threading, WaitHandle, WaitHandleBase)
+DEFINE_FIELD_U(safeWaitHandle, WaitHandleBase, m_safeHandle)
+DEFINE_FIELD_U(waitHandle, WaitHandleBase, m_handle)
+DEFINE_FIELD_U(hasThreadAffinity, WaitHandleBase, m_hasThreadAffinity)
+
+DEFINE_CLASS(DEBUGGER, Diagnostics, Debugger)
+DEFINE_METHOD(DEBUGGER, BREAK_CAN_THROW, BreakCanThrow, SM_RetVoid)
+
+DEFINE_CLASS(BUFFER, System, Buffer)
+DEFINE_METHOD(BUFFER, MEMCPY_PTRBYTE_ARRBYTE, Memcpy, SM_PtrByte_Int_ArrByte_Int_Int_RetVoid)
+DEFINE_METHOD(BUFFER, MEMCPY, Memcpy, SM_PtrByte_PtrByte_Int_RetVoid)
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_CLASS(WINDOWSRUNTIMEMARSHAL, WinRT, WindowsRuntimeMarshal)
+#ifdef FEATURE_COMINTEROP_WINRT_MANAGED_ACTIVATION
+DEFINE_METHOD(WINDOWSRUNTIMEMARSHAL, GET_ACTIVATION_FACTORY_FOR_TYPE, GetActivationFactoryForType, SM_Type_RetIntPtr)
+#ifdef FEATURE_COMINTEROP_WINRT_DESKTOP_HOST
+DEFINE_METHOD(WINDOWSRUNTIMEMARSHAL, GET_CLASS_ACTIVATOR_FOR_APPLICATION, GetClassActivatorForApplication, SM_Str_RetIntPtr)
+#endif // FEATURE_COMINTEROP_WINRT_DESKTOP_HOST
+#endif // FEATURE_COMINTEROP_WINRT_MANAGED_ACTIVATION
+
+DEFINE_CLASS(IACTIVATIONFACTORY, WinRT, IActivationFactory)
+DEFINE_METHOD(IACTIVATIONFACTORY, ACTIVATE_INSTANCE, ActivateInstance, IM_RetObj)
+DEFINE_CLASS(ISTRINGABLEHELPER, WinRT, IStringableHelper)
+DEFINE_METHOD(ISTRINGABLEHELPER, TO_STRING, ToString, SM_Obj_RetStr)
+#endif // FEATURE_COMINTEROP
+
+DEFINE_CLASS(STUBHELPERS, StubHelpers, StubHelpers)
+DEFINE_METHOD(STUBHELPERS, IS_QCALL, IsQCall, SM_IntPtr_RetBool)
+DEFINE_METHOD(STUBHELPERS, INIT_DECLARING_TYPE, InitDeclaringType, SM_IntPtr_RetVoid)
+DEFINE_METHOD(STUBHELPERS, GET_NDIRECT_TARGET, GetNDirectTarget, SM_IntPtr_RetIntPtr)
+DEFINE_METHOD(STUBHELPERS, GET_DELEGATE_TARGET, GetDelegateTarget, SM_Delegate_RefIntPtr_RetIntPtr)
+#ifndef FEATURE_CORECLR // CAS
+DEFINE_METHOD(STUBHELPERS, DEMAND_PERMISSION, DemandPermission, SM_IntPtr_RetVoid)
+#ifdef _TARGET_X86_
+DEFINE_METHOD(STUBHELPERS, SET_COPY_CTOR_COOKIE_CHAIN, SetCopyCtorCookieChain, SM_IntPtr_IntPtr_Int_IntPtr_RetVoid)
+DEFINE_FIELD(STUBHELPERS, COPY_CTOR_STUB_DESC, s_copyCtorStubDesc)
+#endif // _TARGET_X86_
+#endif // !FEATURE_CORECLR
+#ifdef FEATURE_COMINTEROP
+DEFINE_METHOD(STUBHELPERS, GET_COM_HR_EXCEPTION_OBJECT, GetCOMHRExceptionObject, SM_Int_IntPtr_Obj_RetException)
+DEFINE_METHOD(STUBHELPERS, GET_COM_HR_EXCEPTION_OBJECT_WINRT, GetCOMHRExceptionObject_WinRT, SM_Int_IntPtr_Obj_RetException)
+DEFINE_METHOD(STUBHELPERS, GET_COM_IP_FROM_RCW, GetCOMIPFromRCW, SM_Obj_IntPtr_RefIntPtr_RefBool_RetIntPtr)
+DEFINE_METHOD(STUBHELPERS, GET_COM_IP_FROM_RCW_WINRT, GetCOMIPFromRCW_WinRT, SM_Obj_IntPtr_RefIntPtr_RetIntPtr)
+DEFINE_METHOD(STUBHELPERS, GET_COM_IP_FROM_RCW_WINRT_SHARED_GENERIC, GetCOMIPFromRCW_WinRTSharedGeneric, SM_Obj_IntPtr_RefIntPtr_RetIntPtr)
+DEFINE_METHOD(STUBHELPERS, GET_COM_IP_FROM_RCW_WINRT_DELEGATE, GetCOMIPFromRCW_WinRTDelegate, SM_Obj_IntPtr_RefIntPtr_RetIntPtr)
+DEFINE_METHOD(STUBHELPERS, SHOULD_CALL_WINRT_INTERFACE, ShouldCallWinRTInterface, SM_Obj_IntPtr_RetBool)
+DEFINE_METHOD(STUBHELPERS, STUB_REGISTER_RCW, StubRegisterRCW, SM_Obj_RetVoid)
+DEFINE_METHOD(STUBHELPERS, STUB_UNREGISTER_RCW, StubUnregisterRCW, SM_Obj_RetVoid)
+DEFINE_METHOD(STUBHELPERS, GET_WINRT_FACTORY_OBJECT, GetWinRTFactoryObject, SM_IntPtr_RetObj)
+DEFINE_METHOD(STUBHELPERS, GET_DELEGATE_INVOKE_METHOD, GetDelegateInvokeMethod, SM_Delegate_RetIntPtr)
+DEFINE_METHOD(STUBHELPERS, GET_WINRT_FACTORY_RETURN_VALUE, GetWinRTFactoryReturnValue, SM_Obj_IntPtr_RetIntPtr)
+DEFINE_METHOD(STUBHELPERS, GET_OUTER_INSPECTABLE, GetOuterInspectable, SM_Obj_IntPtr_RetIntPtr)
+#ifdef MDA_SUPPORTED
+DEFINE_METHOD(STUBHELPERS, TRIGGER_EXCEPTION_SWALLOWED_MDA, TriggerExceptionSwallowedMDA, SM_Exception_IntPtr_RetException)
+#endif // MDA_SUPPORTED
+#endif // FEATURE_COMINTEROP
+#if defined(MDA_SUPPORTED) || (defined(CROSSGEN_COMPILE) && !defined(FEATURE_CORECLR))
+DEFINE_METHOD(STUBHELPERS, CHECK_COLLECTED_DELEGATE_MDA, CheckCollectedDelegateMDA, SM_IntPtr_RetVoid)
+#endif // MDA_SUPPORTED
+DEFINE_METHOD(STUBHELPERS, SET_LAST_ERROR, SetLastError, SM_RetVoid)
+
+DEFINE_METHOD(STUBHELPERS, THROW_INTEROP_PARAM_EXCEPTION, ThrowInteropParamException, SM_Int_Int_RetVoid)
+DEFINE_METHOD(STUBHELPERS, ADD_TO_CLEANUP_LIST, AddToCleanupList, SM_RefCleanupWorkList_SafeHandle_RetIntPtr)
+DEFINE_METHOD(STUBHELPERS, DESTROY_CLEANUP_LIST, DestroyCleanupList, SM_RefCleanupWorkList_RetVoid)
+DEFINE_METHOD(STUBHELPERS, GET_HR_EXCEPTION_OBJECT, GetHRExceptionObject, SM_Int_RetException)
+DEFINE_METHOD(STUBHELPERS, CREATE_CUSTOM_MARSHALER_HELPER, CreateCustomMarshalerHelper, SM_IntPtr_Int_IntPtr_RetIntPtr)
+
+DEFINE_METHOD(STUBHELPERS, CHECK_STRING_LENGTH, CheckStringLength, SM_Int_RetVoid)
+DEFINE_METHOD(STUBHELPERS, DECIMAL_CANONICALIZE_INTERNAL, DecimalCanonicalizeInternal, SM_RefDec_RetVoid)
+
+DEFINE_METHOD(STUBHELPERS, FMT_CLASS_UPDATE_NATIVE_INTERNAL, FmtClassUpdateNativeInternal, SM_Obj_PtrByte_RefCleanupWorkList_RetVoid)
+DEFINE_METHOD(STUBHELPERS, FMT_CLASS_UPDATE_CLR_INTERNAL, FmtClassUpdateCLRInternal, SM_Obj_PtrByte_RetVoid)
+DEFINE_METHOD(STUBHELPERS, LAYOUT_DESTROY_NATIVE_INTERNAL, LayoutDestroyNativeInternal, SM_PtrByte_IntPtr_RetVoid)
+DEFINE_METHOD(STUBHELPERS, ALLOCATE_INTERNAL, AllocateInternal, SM_IntPtr_RetObj)
+DEFINE_METHOD(STUBHELPERS, STRLEN, strlen, SM_PtrSByt_RetInt)
+DEFINE_METHOD(STUBHELPERS, MARSHAL_TO_MANAGED_VA_LIST_INTERNAL,MarshalToManagedVaListInternal, SM_IntPtr_IntPtr_RetVoid)
+DEFINE_METHOD(STUBHELPERS, MARSHAL_TO_UNMANAGED_VA_LIST_INTERNAL,MarshalToUnmanagedVaListInternal,SM_IntPtr_UInt_IntPtr_RetVoid)
+DEFINE_METHOD(STUBHELPERS, CALC_VA_LIST_SIZE, CalcVaListSize, SM_IntPtr_RetUInt)
+DEFINE_METHOD(STUBHELPERS, VALIDATE_OBJECT, ValidateObject, SM_Obj_IntPtr_Obj_RetVoid)
+DEFINE_METHOD(STUBHELPERS, VALIDATE_BYREF, ValidateByref, SM_IntPtr_IntPtr_Obj_RetVoid)
+DEFINE_METHOD(STUBHELPERS, GET_STUB_CONTEXT, GetStubContext, SM_RetIntPtr)
+DEFINE_METHOD(STUBHELPERS, LOG_PINNED_ARGUMENT, LogPinnedArgument, SM_IntPtr_IntPtr_RetVoid)
+#ifdef _WIN64
+DEFINE_METHOD(STUBHELPERS, GET_STUB_CONTEXT_ADDR, GetStubContextAddr, SM_RetIntPtr)
+#endif // _WIN64
+#ifdef MDA_SUPPORTED
+DEFINE_METHOD(STUBHELPERS, TRIGGER_GC_FOR_MDA, TriggerGCForMDA, SM_RetVoid)
+#endif
+DEFINE_METHOD(STUBHELPERS, SAFE_HANDLE_ADD_REF, SafeHandleAddRef, SM_SafeHandle_RefBool_RetIntPtr)
+DEFINE_METHOD(STUBHELPERS, SAFE_HANDLE_RELEASE, SafeHandleRelease, SM_SafeHandle_RetVoid)
+
+#ifdef PROFILING_SUPPORTED
+DEFINE_METHOD(STUBHELPERS, PROFILER_BEGIN_TRANSITION_CALLBACK, ProfilerBeginTransitionCallback, SM_IntPtr_IntPtr_Obj_RetIntPtr)
+DEFINE_METHOD(STUBHELPERS, PROFILER_END_TRANSITION_CALLBACK, ProfilerEndTransitionCallback, SM_IntPtr_IntPtr_RetVoid)
+#endif
+
+#ifdef FEATURE_ARRAYSTUB_AS_IL
+DEFINE_METHOD(STUBHELPERS, ARRAY_TYPE_CHECK, ArrayTypeCheck, SM_Obj_ArrObject_RetVoid)
+#endif
+
+#ifdef FEATURE_STUBS_AS_IL
+DEFINE_METHOD(STUBHELPERS, MULTICAST_DEBUGGER_TRACE_HELPER, MulticastDebuggerTraceHelper, SM_Obj_Int_RetVoid)
+#endif
+
+#if defined(_TARGET_X86_) && !defined(FEATURE_CORECLR)
+DEFINE_CLASS(COPYCTORSTUBCOOKIE, StubHelpers, CopyCtorStubCookie)
+DEFINE_METHOD(COPYCTORSTUBCOOKIE, SET_DATA, SetData, IM_IntPtr_UInt_IntPtr_IntPtr_RetVoid)
+DEFINE_METHOD(COPYCTORSTUBCOOKIE, SET_NEXT, SetNext, IM_IntPtr_RetVoid)
+#endif // _TARGET_X86_ && !FEATURE_CORECLR
+
+DEFINE_CLASS(ANSICHARMARSHALER, StubHelpers, AnsiCharMarshaler)
+DEFINE_METHOD(ANSICHARMARSHALER, CONVERT_TO_NATIVE, ConvertToNative, SM_Char_Bool_Bool_RetByte)
+DEFINE_METHOD(ANSICHARMARSHALER, CONVERT_TO_MANAGED, ConvertToManaged, SM_Byte_RetChar)
+DEFINE_METHOD(ANSICHARMARSHALER, DO_ANSI_CONVERSION, DoAnsiConversion, SM_Str_Bool_Bool_RefInt_RetArrByte)
+
+DEFINE_CLASS(CSTRMARSHALER, StubHelpers, CSTRMarshaler)
+DEFINE_METHOD(CSTRMARSHALER, CONVERT_TO_NATIVE, ConvertToNative, SM_Int_Str_IntPtr_RetIntPtr)
+DEFINE_METHOD(CSTRMARSHALER, CONVERT_TO_MANAGED, ConvertToManaged, SM_IntPtr_RetStr)
+DEFINE_METHOD(CSTRMARSHALER, CLEAR_NATIVE, ClearNative, SM_IntPtr_RetVoid)
+
+DEFINE_CLASS(WSTRBUFFERMARSHALER, StubHelpers, WSTRBufferMarshaler)
+DEFINE_METHOD(WSTRBUFFERMARSHALER, CONVERT_TO_NATIVE, ConvertToNative, SM_Str_RetIntPtr)
+DEFINE_METHOD(WSTRBUFFERMARSHALER, CONVERT_TO_MANAGED, ConvertToManaged, SM_IntPtr_RetStr)
+DEFINE_METHOD(WSTRBUFFERMARSHALER, CLEAR_NATIVE, ClearNative, SM_IntPtr_RetVoid)
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_CLASS(BSTRMARSHALER, StubHelpers, BSTRMarshaler)
+DEFINE_METHOD(BSTRMARSHALER, CONVERT_TO_NATIVE, ConvertToNative, SM_Str_IntPtr_RetIntPtr)
+DEFINE_METHOD(BSTRMARSHALER, CONVERT_TO_MANAGED, ConvertToManaged, SM_IntPtr_RetStr)
+DEFINE_METHOD(BSTRMARSHALER, CLEAR_NATIVE, ClearNative, SM_IntPtr_RetVoid)
+
+DEFINE_CLASS(ANSIBSTRMARSHALER, StubHelpers, AnsiBSTRMarshaler)
+DEFINE_METHOD(ANSIBSTRMARSHALER, CONVERT_TO_NATIVE, ConvertToNative, SM_Int_Str_RetIntPtr)
+DEFINE_METHOD(ANSIBSTRMARSHALER, CONVERT_TO_MANAGED, ConvertToManaged, SM_IntPtr_RetStr)
+DEFINE_METHOD(ANSIBSTRMARSHALER, CLEAR_NATIVE, ClearNative, SM_IntPtr_RetVoid)
+
+DEFINE_CLASS(OBJECTMARSHALER, StubHelpers, ObjectMarshaler)
+DEFINE_METHOD(OBJECTMARSHALER, CONVERT_TO_NATIVE, ConvertToNative, SM_ObjIntPtr_RetVoid)
+DEFINE_METHOD(OBJECTMARSHALER, CONVERT_TO_MANAGED, ConvertToManaged, SM_IntPtr_RetObj)
+DEFINE_METHOD(OBJECTMARSHALER, CLEAR_NATIVE, ClearNative, SM_IntPtr_RetVoid)
+
+DEFINE_CLASS(HSTRINGMARSHALER, StubHelpers, HStringMarshaler)
+DEFINE_METHOD(HSTRINGMARSHALER, CONVERT_TO_NATIVE_REFERENCE, ConvertToNativeReference, SM_Str_PtrHStringHeader_RetIntPtr)
+DEFINE_METHOD(HSTRINGMARSHALER, CONVERT_TO_NATIVE, ConvertToNative, SM_Str_RetIntPtr)
+DEFINE_METHOD(HSTRINGMARSHALER, CONVERT_TO_MANAGED, ConvertToManaged, SM_IntPtr_RetStr)
+DEFINE_METHOD(HSTRINGMARSHALER, CLEAR_NATIVE, ClearNative, SM_IntPtr_RetVoid)
+
+DEFINE_CLASS(URIMARSHALER, StubHelpers, UriMarshaler)
+DEFINE_METHOD(URIMARSHALER, GET_RAWURI_FROM_NATIVE, GetRawUriFromNative, SM_IntPtr_RetStr)
+DEFINE_METHOD(URIMARSHALER, CREATE_NATIVE_URI_INSTANCE, CreateNativeUriInstance, SM_Str_RetIntPtr)
+
+DEFINE_CLASS(INTERFACEMARSHALER, StubHelpers, InterfaceMarshaler)
+DEFINE_METHOD(INTERFACEMARSHALER, CONVERT_TO_NATIVE, ConvertToNative, SM_Obj_IntPtr_IntPtr_Int_RetIntPtr)
+DEFINE_METHOD(INTERFACEMARSHALER, CONVERT_TO_MANAGED, ConvertToManaged, SM_IntPtr_IntPtr_IntPtr_Int_RetObj)
+DEFINE_METHOD(INTERFACEMARSHALER, CLEAR_NATIVE, ClearNative, SM_IntPtr_RetVoid)
+
+
+DEFINE_CLASS(MNGD_SAFE_ARRAY_MARSHALER, StubHelpers, MngdSafeArrayMarshaler)
+DEFINE_METHOD(MNGD_SAFE_ARRAY_MARSHALER, CREATE_MARSHALER, CreateMarshaler, SM_IntPtr_IntPtr_Int_Int_RetVoid)
+DEFINE_METHOD(MNGD_SAFE_ARRAY_MARSHALER, CONVERT_SPACE_TO_NATIVE, ConvertSpaceToNative, SM_IntPtr_RefObj_IntPtr_RetVoid)
+DEFINE_METHOD(MNGD_SAFE_ARRAY_MARSHALER, CONVERT_CONTENTS_TO_NATIVE, ConvertContentsToNative, SM_IntPtr_RefObj_IntPtr_Obj_RetVoid)
+DEFINE_METHOD(MNGD_SAFE_ARRAY_MARSHALER, CONVERT_SPACE_TO_MANAGED, ConvertSpaceToManaged, SM_IntPtr_RefObj_IntPtr_RetVoid)
+DEFINE_METHOD(MNGD_SAFE_ARRAY_MARSHALER, CONVERT_CONTENTS_TO_MANAGED, ConvertContentsToManaged, SM_IntPtr_RefObj_IntPtr_RetVoid)
+DEFINE_METHOD(MNGD_SAFE_ARRAY_MARSHALER, CLEAR_NATIVE, ClearNative, SM_IntPtr_RefObj_IntPtr_RetVoid)
+
+DEFINE_CLASS(MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER, StubHelpers, MngdHiddenLengthArrayMarshaler)
+DEFINE_METHOD(MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER, CREATE_MARSHALER, CreateMarshaler, SM_IntPtr_IntPtr_IntPtr_UShrt_RetVoid)
+DEFINE_METHOD(MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER, CONVERT_SPACE_TO_MANAGED, ConvertSpaceToManaged, SM_IntPtr_RefObj_IntPtr_Int_RetVoid)
+DEFINE_METHOD(MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER, CONVERT_CONTENTS_TO_MANAGED, ConvertContentsToManaged, SM_IntPtr_RefObj_IntPtr_RetVoid)
+DEFINE_METHOD(MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER, CONVERT_SPACE_TO_NATIVE, ConvertSpaceToNative, SM_IntPtr_RefObj_IntPtr_RetVoid)
+DEFINE_METHOD(MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER, CONVERT_CONTENTS_TO_NATIVE, ConvertContentsToNative, SM_IntPtr_RefObj_IntPtr_RetVoid)
+DEFINE_METHOD(MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER, CLEAR_NATIVE_CONTENTS, ClearNativeContents, SM_IntPtr_IntPtr_Int_RetVoid)
+DEFINE_METHOD(MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER, CLEAR_NATIVE_CONTENTS_TYPE, ClearNativeContents_Type, NoSig)
+
+DEFINE_METHOD(MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER, CONVERT_CONTENTS_TO_MANAGED_DATETIME, ConvertContentsToManaged_DateTime, NoSig)
+DEFINE_METHOD(MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER, CONVERT_CONTENTS_TO_MANAGED_TYPE, ConvertContentsToManaged_Type, NoSig)
+DEFINE_METHOD(MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER, CONVERT_CONTENTS_TO_MANAGED_EXCEPTION, ConvertContentsToManaged_Exception, NoSig)
+DEFINE_METHOD(MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER, CONVERT_CONTENTS_TO_MANAGED_NULLABLE, ConvertContentsToManaged_Nullable, NoSig)
+DEFINE_METHOD(MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER, CONVERT_CONTENTS_TO_MANAGED_KEYVALUEPAIR, ConvertContentsToManaged_KeyValuePair, NoSig)
+
+DEFINE_METHOD(MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER, CONVERT_CONTENTS_TO_NATIVE_DATETIME, ConvertContentsToNative_DateTime, NoSig)
+DEFINE_METHOD(MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER, CONVERT_CONTENTS_TO_NATIVE_TYPE, ConvertContentsToNative_Type, NoSig)
+DEFINE_METHOD(MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER, CONVERT_CONTENTS_TO_NATIVE_EXCEPTION, ConvertContentsToNative_Exception, NoSig)
+DEFINE_METHOD(MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER, CONVERT_CONTENTS_TO_NATIVE_NULLABLE, ConvertContentsToNative_Nullable, NoSig)
+DEFINE_METHOD(MNGD_HIDDEN_LENGTH_ARRAY_MARSHALER, CONVERT_CONTENTS_TO_NATIVE_KEYVALUEPAIR, ConvertContentsToNative_KeyValuePair, NoSig)
+
+DEFINE_CLASS(DATETIMEOFFSETMARSHALER, StubHelpers, DateTimeOffsetMarshaler)
+DEFINE_METHOD(DATETIMEOFFSETMARSHALER, CONVERT_TO_NATIVE, ConvertToNative, SM_RefDateTimeOffset_RefDateTimeNative_RetVoid)
+DEFINE_METHOD(DATETIMEOFFSETMARSHALER, CONVERT_TO_MANAGED, ConvertToManaged, SM_RefDateTimeOffset_RefDateTimeNative_RetVoid)
+
+DEFINE_CLASS(NULLABLEMARSHALER, StubHelpers, NullableMarshaler)
+DEFINE_METHOD(NULLABLEMARSHALER, CONVERT_TO_NATIVE, ConvertToNative, NoSig)
+DEFINE_METHOD(NULLABLEMARSHALER, CONVERT_TO_MANAGED, ConvertToManaged, NoSig)
+DEFINE_METHOD(NULLABLEMARSHALER, CONVERT_TO_MANAGED_RET_VOID, ConvertToManagedRetVoid, NoSig)
+
+DEFINE_CLASS(SYSTEMTYPEMARSHALER, StubHelpers, SystemTypeMarshaler)
+
+DEFINE_METHOD(SYSTEMTYPEMARSHALER, CONVERT_TO_NATIVE, ConvertToNative, SM_Type_PtrTypeName_RetVoid)
+DEFINE_METHOD(SYSTEMTYPEMARSHALER, CONVERT_TO_MANAGED, ConvertToManaged, SM_PtrTypeName_RefType_RetVoid)
+DEFINE_METHOD(SYSTEMTYPEMARSHALER, CLEAR_NATIVE, ClearNative, SM_PtrTypeName_RetVoid)
+
+DEFINE_CLASS(KEYVALUEPAIRMARSHALER, StubHelpers, KeyValuePairMarshaler)
+DEFINE_METHOD(KEYVALUEPAIRMARSHALER, CONVERT_TO_NATIVE, ConvertToNative, NoSig)
+DEFINE_METHOD(KEYVALUEPAIRMARSHALER, CONVERT_TO_MANAGED, ConvertToManaged, NoSig)
+DEFINE_METHOD(KEYVALUEPAIRMARSHALER, CONVERT_TO_MANAGED_BOX, ConvertToManagedBox, NoSig)
+
+DEFINE_CLASS(HRESULTEXCEPTIONMARSHALER, StubHelpers, HResultExceptionMarshaler)
+DEFINE_METHOD(HRESULTEXCEPTIONMARSHALER, CONVERT_TO_NATIVE, ConvertToNative, SM_Exception_RetInt)
+DEFINE_METHOD(HRESULTEXCEPTIONMARSHALER, CONVERT_TO_MANAGED, ConvertToManaged, SM_Int_RetException)
+
+#endif // FEATURE_COMINTEROP
+
+DEFINE_CLASS(VALUECLASSMARSHALER, StubHelpers, ValueClassMarshaler)
+DEFINE_METHOD(VALUECLASSMARSHALER, CONVERT_TO_NATIVE, ConvertToNative, SM_IntPtrIntPtrIntPtr_RefCleanupWorkList_RetVoid)
+DEFINE_METHOD(VALUECLASSMARSHALER, CONVERT_TO_MANAGED, ConvertToManaged, SM_IntPtrIntPtrIntPtr_RetVoid)
+DEFINE_METHOD(VALUECLASSMARSHALER, CLEAR_NATIVE, ClearNative, SM_IntPtr_IntPtr_RetVoid)
+
+DEFINE_CLASS(DATEMARSHALER, StubHelpers, DateMarshaler)
+DEFINE_METHOD(DATEMARSHALER, CONVERT_TO_NATIVE, ConvertToNative, SM_DateTime_RetDbl)
+DEFINE_METHOD(DATEMARSHALER, CONVERT_TO_MANAGED, ConvertToManaged, SM_Dbl_RetLong)
+
+DEFINE_CLASS(VBBYVALSTRMARSHALER, StubHelpers, VBByValStrMarshaler)
+DEFINE_METHOD(VBBYVALSTRMARSHALER, CONVERT_TO_NATIVE, ConvertToNative, SM_Str_Bool_Bool_RefInt_RetIntPtr)
+DEFINE_METHOD(VBBYVALSTRMARSHALER, CONVERT_TO_MANAGED, ConvertToManaged, SM_IntPtr_Int_RetStr)
+DEFINE_METHOD(VBBYVALSTRMARSHALER, CLEAR_NATIVE, ClearNative, SM_IntPtr_RetVoid)
+
+DEFINE_CLASS(MNGD_NATIVE_ARRAY_MARSHALER, StubHelpers, MngdNativeArrayMarshaler)
+DEFINE_METHOD(MNGD_NATIVE_ARRAY_MARSHALER, CREATE_MARSHALER, CreateMarshaler, SM_IntPtr_IntPtr_Int_RetVoid)
+DEFINE_METHOD(MNGD_NATIVE_ARRAY_MARSHALER, CONVERT_SPACE_TO_NATIVE, ConvertSpaceToNative, SM_IntPtr_RefObj_IntPtr_RetVoid)
+DEFINE_METHOD(MNGD_NATIVE_ARRAY_MARSHALER, CONVERT_CONTENTS_TO_NATIVE, ConvertContentsToNative, SM_IntPtr_RefObj_IntPtr_RetVoid)
+DEFINE_METHOD(MNGD_NATIVE_ARRAY_MARSHALER, CONVERT_SPACE_TO_MANAGED, ConvertSpaceToManaged, SM_IntPtr_RefObj_IntPtr_Int_RetVoid)
+DEFINE_METHOD(MNGD_NATIVE_ARRAY_MARSHALER, CONVERT_CONTENTS_TO_MANAGED, ConvertContentsToManaged, SM_IntPtr_RefObj_IntPtr_RetVoid)
+DEFINE_METHOD(MNGD_NATIVE_ARRAY_MARSHALER, CLEAR_NATIVE, ClearNative, SM_IntPtr_IntPtr_Int_RetVoid)
+DEFINE_METHOD(MNGD_NATIVE_ARRAY_MARSHALER, CLEAR_NATIVE_CONTENTS, ClearNativeContents, SM_IntPtr_IntPtr_Int_RetVoid)
+
+DEFINE_CLASS(MNGD_REF_CUSTOM_MARSHALER, StubHelpers, MngdRefCustomMarshaler)
+DEFINE_METHOD(MNGD_REF_CUSTOM_MARSHALER, CREATE_MARSHALER, CreateMarshaler, SM_IntPtr_IntPtr_RetVoid)
+DEFINE_METHOD(MNGD_REF_CUSTOM_MARSHALER, CONVERT_CONTENTS_TO_NATIVE, ConvertContentsToNative, SM_IntPtr_RefObj_IntPtr_RetVoid)
+DEFINE_METHOD(MNGD_REF_CUSTOM_MARSHALER, CONVERT_CONTENTS_TO_MANAGED, ConvertContentsToManaged, SM_IntPtr_RefObj_IntPtr_RetVoid)
+DEFINE_METHOD(MNGD_REF_CUSTOM_MARSHALER, CLEAR_NATIVE, ClearNative, SM_IntPtr_RefObj_IntPtr_RetVoid)
+DEFINE_METHOD(MNGD_REF_CUSTOM_MARSHALER, CLEAR_MANAGED, ClearManaged, SM_IntPtr_RefObj_IntPtr_RetVoid)
+
+DEFINE_CLASS(ASANY_MARSHALER, StubHelpers, AsAnyMarshaler)
+DEFINE_METHOD(ASANY_MARSHALER, CTOR, .ctor, IM_IntPtr_RetVoid)
+DEFINE_METHOD(ASANY_MARSHALER, CONVERT_TO_NATIVE, ConvertToNative, IM_Obj_Int_RetIntPtr)
+DEFINE_METHOD(ASANY_MARSHALER, CONVERT_TO_MANAGED, ConvertToManaged, IM_Obj_IntPtr_RetVoid)
+DEFINE_METHOD(ASANY_MARSHALER, CLEAR_NATIVE, ClearNative, IM_IntPtr_RetVoid)
+
+DEFINE_CLASS(NATIVEVARIANT, StubHelpers, NativeVariant)
+
+DEFINE_CLASS(WIN32NATIVE, Win32, Win32Native)
+DEFINE_METHOD(WIN32NATIVE, COTASKMEMALLOC, CoTaskMemAlloc, SM_UIntPtr_RetIntPtr)
+DEFINE_METHOD(WIN32NATIVE, COTASKMEMFREE, CoTaskMemFree, SM_IntPtr_RetVoid)
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_CLASS(IITERABLE, WinRT, IIterable`1)
+DEFINE_CLASS(IVECTOR, WinRT, IVector`1)
+DEFINE_CLASS(IMAP, WinRT, IMap`2)
+DEFINE_CLASS(IKEYVALUEPAIR, WinRT, IKeyValuePair`2)
+DEFINE_CLASS(IVECTORVIEW, WinRT, IVectorView`1)
+DEFINE_CLASS(IMAPVIEW, WinRT, IMapView`2)
+DEFINE_CLASS(IITERATOR, WinRT, IIterator`1)
+DEFINE_CLASS(IPROPERTYVALUE, WinRT, IPropertyValue)
+DEFINE_CLASS(IBINDABLEITERABLE, WinRT, IBindableIterable)
+DEFINE_CLASS(IBINDABLEITERATOR, WinRT, IBindableIterator)
+DEFINE_CLASS(IBINDABLEVECTOR, WinRT, IBindableVector)
+DEFINE_CLASS(ICLOSABLE, WinRT, IClosable)
+
+DEFINE_CLASS(GET_ENUMERATOR_DELEGATE, WinRT, GetEnumerator_Delegate`1)
+DEFINE_CLASS(ITERABLE_TO_ENUMERABLE_ADAPTER, WinRT, IterableToEnumerableAdapter)
+DEFINE_METHOD(ITERABLE_TO_ENUMERABLE_ADAPTER, GET_ENUMERATOR_STUB, GetEnumerator_Stub, NoSig)
+DEFINE_METHOD(ITERABLE_TO_ENUMERABLE_ADAPTER, GET_ENUMERATOR_VARIANCE_STUB, GetEnumerator_Variance_Stub, NoSig)
+
+DEFINE_CLASS(VECTOR_TO_LIST_ADAPTER, WinRT, VectorToListAdapter)
+DEFINE_METHOD(VECTOR_TO_LIST_ADAPTER, INDEXER_GET, Indexer_Get, NoSig)
+DEFINE_METHOD(VECTOR_TO_LIST_ADAPTER, INDEXER_SET, Indexer_Set, NoSig)
+DEFINE_METHOD(VECTOR_TO_LIST_ADAPTER, INDEX_OF, IndexOf, NoSig)
+DEFINE_METHOD(VECTOR_TO_LIST_ADAPTER, INSERT, Insert, NoSig)
+DEFINE_METHOD(VECTOR_TO_LIST_ADAPTER, REMOVE_AT, RemoveAt, NoSig)
+
+DEFINE_CLASS(MAP_TO_DICTIONARY_ADAPTER, WinRT, MapToDictionaryAdapter)
+DEFINE_METHOD(MAP_TO_DICTIONARY_ADAPTER, INDEXER_GET, Indexer_Get, NoSig)
+DEFINE_METHOD(MAP_TO_DICTIONARY_ADAPTER, INDEXER_SET, Indexer_Set, NoSig)
+DEFINE_METHOD(MAP_TO_DICTIONARY_ADAPTER, KEYS, Keys, NoSig)
+DEFINE_METHOD(MAP_TO_DICTIONARY_ADAPTER, VALUES, Values, NoSig)
+DEFINE_METHOD(MAP_TO_DICTIONARY_ADAPTER, CONTAINS_KEY, ContainsKey, NoSig)
+DEFINE_METHOD(MAP_TO_DICTIONARY_ADAPTER, ADD, Add, NoSig)
+DEFINE_METHOD(MAP_TO_DICTIONARY_ADAPTER, REMOVE, Remove, NoSig)
+DEFINE_METHOD(MAP_TO_DICTIONARY_ADAPTER, TRY_GET_VALUE, TryGetValue, NoSig)
+
+DEFINE_CLASS(VECTOR_TO_COLLECTION_ADAPTER, WinRT, VectorToCollectionAdapter)
+DEFINE_METHOD(VECTOR_TO_COLLECTION_ADAPTER, COUNT, Count, NoSig)
+DEFINE_METHOD(VECTOR_TO_COLLECTION_ADAPTER, IS_READ_ONLY, IsReadOnly, NoSig)
+DEFINE_METHOD(VECTOR_TO_COLLECTION_ADAPTER, ADD, Add, NoSig)
+DEFINE_METHOD(VECTOR_TO_COLLECTION_ADAPTER, CLEAR, Clear, NoSig)
+DEFINE_METHOD(VECTOR_TO_COLLECTION_ADAPTER, CONTAINS, Contains, NoSig)
+DEFINE_METHOD(VECTOR_TO_COLLECTION_ADAPTER, COPY_TO, CopyTo, NoSig)
+DEFINE_METHOD(VECTOR_TO_COLLECTION_ADAPTER, REMOVE, Remove, NoSig)
+
+DEFINE_CLASS(MAP_TO_COLLECTION_ADAPTER, WinRT, MapToCollectionAdapter)
+DEFINE_METHOD(MAP_TO_COLLECTION_ADAPTER, COUNT, Count, NoSig)
+DEFINE_METHOD(MAP_TO_COLLECTION_ADAPTER, IS_READ_ONLY, IsReadOnly, NoSig)
+DEFINE_METHOD(MAP_TO_COLLECTION_ADAPTER, ADD, Add, NoSig)
+DEFINE_METHOD(MAP_TO_COLLECTION_ADAPTER, CLEAR, Clear, NoSig)
+DEFINE_METHOD(MAP_TO_COLLECTION_ADAPTER, CONTAINS, Contains, NoSig)
+DEFINE_METHOD(MAP_TO_COLLECTION_ADAPTER, COPY_TO, CopyTo, NoSig)
+DEFINE_METHOD(MAP_TO_COLLECTION_ADAPTER, REMOVE, Remove, NoSig)
+
+DEFINE_CLASS(BINDABLEITERABLE_TO_ENUMERABLE_ADAPTER, WinRT, BindableIterableToEnumerableAdapter)
+DEFINE_METHOD(BINDABLEITERABLE_TO_ENUMERABLE_ADAPTER, GET_ENUMERATOR_STUB, GetEnumerator_Stub, NoSig)
+
+DEFINE_CLASS(BINDABLEVECTOR_TO_LIST_ADAPTER, WinRT, BindableVectorToListAdapter)
+DEFINE_METHOD(BINDABLEVECTOR_TO_LIST_ADAPTER, INDEXER_GET, Indexer_Get, NoSig)
+DEFINE_METHOD(BINDABLEVECTOR_TO_LIST_ADAPTER, INDEXER_SET, Indexer_Set, NoSig)
+DEFINE_METHOD(BINDABLEVECTOR_TO_LIST_ADAPTER, ADD, Add, NoSig)
+DEFINE_METHOD(BINDABLEVECTOR_TO_LIST_ADAPTER, CONTAINS, Contains, NoSig)
+DEFINE_METHOD(BINDABLEVECTOR_TO_LIST_ADAPTER, CLEAR, Clear, NoSig)
+DEFINE_METHOD(BINDABLEVECTOR_TO_LIST_ADAPTER, IS_READ_ONLY, IsReadOnly, NoSig)
+DEFINE_METHOD(BINDABLEVECTOR_TO_LIST_ADAPTER, IS_FIXED_SIZE, IsFixedSize, NoSig)
+DEFINE_METHOD(BINDABLEVECTOR_TO_LIST_ADAPTER, INDEX_OF, IndexOf, NoSig)
+DEFINE_METHOD(BINDABLEVECTOR_TO_LIST_ADAPTER, INSERT, Insert, NoSig)
+DEFINE_METHOD(BINDABLEVECTOR_TO_LIST_ADAPTER, REMOVE, Remove, NoSig)
+DEFINE_METHOD(BINDABLEVECTOR_TO_LIST_ADAPTER, REMOVE_AT, RemoveAt, NoSig)
+
+DEFINE_CLASS(BINDABLEVECTOR_TO_COLLECTION_ADAPTER, WinRT, BindableVectorToCollectionAdapter)
+DEFINE_METHOD(BINDABLEVECTOR_TO_COLLECTION_ADAPTER, COPY_TO, CopyTo, NoSig)
+DEFINE_METHOD(BINDABLEVECTOR_TO_COLLECTION_ADAPTER, COUNT, Count, NoSig)
+DEFINE_METHOD(BINDABLEVECTOR_TO_COLLECTION_ADAPTER, SYNC_ROOT, SyncRoot, NoSig)
+DEFINE_METHOD(BINDABLEVECTOR_TO_COLLECTION_ADAPTER, IS_SYNCHRONIZED, IsSynchronized, NoSig)
+
+DEFINE_CLASS(ENUMERABLE_TO_ITERABLE_ADAPTER, WinRT, EnumerableToIterableAdapter)
+DEFINE_METHOD(ENUMERABLE_TO_ITERABLE_ADAPTER, FIRST_STUB, First_Stub, NoSig)
+
+DEFINE_CLASS(LIST_TO_VECTOR_ADAPTER, WinRT, ListToVectorAdapter)
+DEFINE_METHOD(LIST_TO_VECTOR_ADAPTER, GET_AT, GetAt, NoSig)
+DEFINE_METHOD(LIST_TO_VECTOR_ADAPTER, SIZE, Size, NoSig)
+DEFINE_METHOD(LIST_TO_VECTOR_ADAPTER, GET_VIEW, GetView, NoSig)
+DEFINE_METHOD(LIST_TO_VECTOR_ADAPTER, INDEX_OF, IndexOf, NoSig)
+DEFINE_METHOD(LIST_TO_VECTOR_ADAPTER, SET_AT, SetAt, NoSig)
+DEFINE_METHOD(LIST_TO_VECTOR_ADAPTER, INSERT_AT, InsertAt, NoSig)
+DEFINE_METHOD(LIST_TO_VECTOR_ADAPTER, REMOVE_AT, RemoveAt, NoSig)
+DEFINE_METHOD(LIST_TO_VECTOR_ADAPTER, APPEND, Append, NoSig)
+DEFINE_METHOD(LIST_TO_VECTOR_ADAPTER, REMOVE_AT_END, RemoveAtEnd, NoSig)
+DEFINE_METHOD(LIST_TO_VECTOR_ADAPTER, CLEAR, Clear, NoSig)
+DEFINE_METHOD(LIST_TO_VECTOR_ADAPTER, GET_MANY, GetMany, NoSig)
+DEFINE_METHOD(LIST_TO_VECTOR_ADAPTER, REPLACE_ALL, ReplaceAll, NoSig)
+
+DEFINE_CLASS(DICTIONARY_TO_MAP_ADAPTER, WinRT, DictionaryToMapAdapter)
+DEFINE_METHOD(DICTIONARY_TO_MAP_ADAPTER, LOOKUP, Lookup, NoSig)
+DEFINE_METHOD(DICTIONARY_TO_MAP_ADAPTER, SIZE, Size, NoSig)
+DEFINE_METHOD(DICTIONARY_TO_MAP_ADAPTER, HAS_KEY, HasKey, NoSig)
+DEFINE_METHOD(DICTIONARY_TO_MAP_ADAPTER, GET_VIEW, GetView, NoSig)
+DEFINE_METHOD(DICTIONARY_TO_MAP_ADAPTER, INSERT, Insert, NoSig)
+DEFINE_METHOD(DICTIONARY_TO_MAP_ADAPTER, REMOVE, Remove, NoSig)
+DEFINE_METHOD(DICTIONARY_TO_MAP_ADAPTER, CLEAR, Clear, NoSig)
+
+DEFINE_CLASS(IVECTORVIEW_TO_IREADONLYCOLLECTION_ADAPTER, WinRT, VectorViewToReadOnlyCollectionAdapter)
+DEFINE_METHOD(IVECTORVIEW_TO_IREADONLYCOLLECTION_ADAPTER, COUNT, Count, NoSig)
+
+DEFINE_CLASS(IMAPVIEW_TO_IREADONLYCOLLECTION_ADAPTER, WinRT, MapViewToReadOnlyCollectionAdapter)
+DEFINE_METHOD(IMAPVIEW_TO_IREADONLYCOLLECTION_ADAPTER, COUNT, Count, NoSig)
+
+DEFINE_CLASS(IREADONLYLIST_TO_IVECTORVIEW_ADAPTER, WinRT, IReadOnlyListToIVectorViewAdapter)
+DEFINE_METHOD(IREADONLYLIST_TO_IVECTORVIEW_ADAPTER, GETAT, GetAt, NoSig)
+DEFINE_METHOD(IREADONLYLIST_TO_IVECTORVIEW_ADAPTER, GETMANY, GetMany, NoSig)
+DEFINE_METHOD(IREADONLYLIST_TO_IVECTORVIEW_ADAPTER, INDEXOF, IndexOf, NoSig)
+DEFINE_METHOD(IREADONLYLIST_TO_IVECTORVIEW_ADAPTER, SIZE, Size, NoSig)
+
+DEFINE_CLASS(INDEXER_GET_DELEGATE, WinRT, Indexer_Get_Delegate`1)
+DEFINE_CLASS(IVECTORVIEW_TO_IREADONLYLIST_ADAPTER, WinRT, IVectorViewToIReadOnlyListAdapter)
+DEFINE_METHOD(IVECTORVIEW_TO_IREADONLYLIST_ADAPTER, INDEXER_GET, Indexer_Get, NoSig)
+DEFINE_METHOD(IVECTORVIEW_TO_IREADONLYLIST_ADAPTER, INDEXER_GET_VARIANCE, Indexer_Get_Variance, NoSig)
+
+DEFINE_CLASS(IREADONLYDICTIONARY_TO_IMAPVIEW_ADAPTER, WinRT, IReadOnlyDictionaryToIMapViewAdapter)
+DEFINE_METHOD(IREADONLYDICTIONARY_TO_IMAPVIEW_ADAPTER, HASKEY, HasKey, NoSig)
+DEFINE_METHOD(IREADONLYDICTIONARY_TO_IMAPVIEW_ADAPTER, LOOKUP, Lookup, NoSig)
+DEFINE_METHOD(IREADONLYDICTIONARY_TO_IMAPVIEW_ADAPTER, SIZE, Size, NoSig)
+DEFINE_METHOD(IREADONLYDICTIONARY_TO_IMAPVIEW_ADAPTER, SPLIT, Split, NoSig)
+
+DEFINE_CLASS(IMAPVIEW_TO_IREADONLYDICTIONARY_ADAPTER, WinRT, IMapViewToIReadOnlyDictionaryAdapter)
+DEFINE_METHOD(IMAPVIEW_TO_IREADONLYDICTIONARY_ADAPTER, CONTAINSKEY, ContainsKey, NoSig)
+DEFINE_METHOD(IMAPVIEW_TO_IREADONLYDICTIONARY_ADAPTER, INDEXER_GET, Indexer_Get, NoSig)
+DEFINE_METHOD(IMAPVIEW_TO_IREADONLYDICTIONARY_ADAPTER, TRYGETVALUE, TryGetValue, NoSig)
+DEFINE_METHOD(IMAPVIEW_TO_IREADONLYDICTIONARY_ADAPTER, KEYS, Keys, NoSig)
+DEFINE_METHOD(IMAPVIEW_TO_IREADONLYDICTIONARY_ADAPTER, VALUES, Values, NoSig)
+
+DEFINE_CLASS(ENUMERABLE_TO_BINDABLEITERABLE_ADAPTER, WinRT, EnumerableToBindableIterableAdapter)
+DEFINE_METHOD(ENUMERABLE_TO_BINDABLEITERABLE_ADAPTER, FIRST_STUB, First_Stub, NoSig)
+
+DEFINE_CLASS(LIST_TO_BINDABLEVECTOR_ADAPTER, WinRT, ListToBindableVectorAdapter)
+DEFINE_METHOD(LIST_TO_BINDABLEVECTOR_ADAPTER, GET_AT, GetAt, NoSig)
+DEFINE_METHOD(LIST_TO_BINDABLEVECTOR_ADAPTER, SIZE, Size, NoSig)
+DEFINE_METHOD(LIST_TO_BINDABLEVECTOR_ADAPTER, GET_VIEW, GetView, NoSig)
+DEFINE_METHOD(LIST_TO_BINDABLEVECTOR_ADAPTER, INDEX_OF, IndexOf, NoSig)
+DEFINE_METHOD(LIST_TO_BINDABLEVECTOR_ADAPTER, SET_AT, SetAt, NoSig)
+DEFINE_METHOD(LIST_TO_BINDABLEVECTOR_ADAPTER, INSERT_AT, InsertAt, NoSig)
+DEFINE_METHOD(LIST_TO_BINDABLEVECTOR_ADAPTER, REMOVE_AT, RemoveAt, NoSig)
+DEFINE_METHOD(LIST_TO_BINDABLEVECTOR_ADAPTER, APPEND, Append, NoSig)
+DEFINE_METHOD(LIST_TO_BINDABLEVECTOR_ADAPTER, REMOVE_AT_END, RemoveAtEnd, NoSig)
+DEFINE_METHOD(LIST_TO_BINDABLEVECTOR_ADAPTER, CLEAR, Clear, NoSig)
+
+DEFINE_CLASS(IDISPOSABLE_TO_ICLOSABLE_ADAPTER, WinRT, IDisposableToIClosableAdapter)
+DEFINE_METHOD(IDISPOSABLE_TO_ICLOSABLE_ADAPTER, CLOSE, Close, NoSig)
+
+DEFINE_CLASS(ICLOSABLE_TO_IDISPOSABLE_ADAPTER, WinRT, IClosableToIDisposableAdapter)
+DEFINE_METHOD(ICLOSABLE_TO_IDISPOSABLE_ADAPTER, DISPOSE, Dispose, NoSig)
+
+#endif // FEATURE_COMINTEROP
+
+DEFINE_CLASS(SZARRAYHELPER, System, SZArrayHelper)
+// Note: The order of methods here has to match order they are implemented on the interfaces in
+// IEnumerable`1
+DEFINE_METHOD(SZARRAYHELPER, GETENUMERATOR, GetEnumerator, NoSig)
+// ICollection`1/IReadOnlyCollection`1
+DEFINE_METHOD(SZARRAYHELPER, GET_COUNT, get_Count, NoSig)
+DEFINE_METHOD(SZARRAYHELPER, ISREADONLY, get_IsReadOnly, NoSig)
+DEFINE_METHOD(SZARRAYHELPER, ADD, Add, NoSig)
+DEFINE_METHOD(SZARRAYHELPER, CLEAR, Clear, NoSig)
+DEFINE_METHOD(SZARRAYHELPER, CONTAINS, Contains, NoSig)
+DEFINE_METHOD(SZARRAYHELPER, COPYTO, CopyTo, NoSig)
+DEFINE_METHOD(SZARRAYHELPER, REMOVE, Remove, NoSig)
+// IList`1/IReadOnlyList`1
+DEFINE_METHOD(SZARRAYHELPER, GET_ITEM, get_Item, NoSig)
+DEFINE_METHOD(SZARRAYHELPER, SET_ITEM, set_Item, NoSig)
+DEFINE_METHOD(SZARRAYHELPER, INDEXOF, IndexOf, NoSig)
+DEFINE_METHOD(SZARRAYHELPER, INSERT, Insert, NoSig)
+DEFINE_METHOD(SZARRAYHELPER, REMOVEAT, RemoveAt, NoSig)
+
+DEFINE_CLASS(IENUMERABLEGENERIC, CollectionsGeneric, IEnumerable`1)
+DEFINE_CLASS(IENUMERATORGENERIC, CollectionsGeneric, IEnumerator`1)
+DEFINE_CLASS(ICOLLECTIONGENERIC, CollectionsGeneric, ICollection`1)
+DEFINE_CLASS(ILISTGENERIC, CollectionsGeneric, IList`1)
+#if !defined(FEATURE_CORECLR) || defined(FEATURE_COMINTEROP) // Silverlight 5 does not contain IReadOnlyList<T>, but we should add it to Silverlight 6.
+DEFINE_CLASS(IREADONLYCOLLECTIONGENERIC,CollectionsGeneric, IReadOnlyCollection`1)
+DEFINE_CLASS(IREADONLYLISTGENERIC, CollectionsGeneric, IReadOnlyList`1)
+DEFINE_CLASS(IREADONLYDICTIONARYGENERIC,CollectionsGeneric, IReadOnlyDictionary`2)
+#endif
+DEFINE_CLASS(IDICTIONARYGENERIC, CollectionsGeneric, IDictionary`2)
+DEFINE_CLASS(KEYVALUEPAIRGENERIC, CollectionsGeneric, KeyValuePair`2)
+
+DEFINE_CLASS(ICOMPARABLEGENERIC, System, IComparable`1)
+DEFINE_CLASS(IEQUATABLEGENERIC, System, IEquatable`1)
+
+DEFINE_CLASS_U(Reflection, LoaderAllocator, LoaderAllocatorObject)
+DEFINE_FIELD_U(m_slots, LoaderAllocatorObject, m_pSlots)
+DEFINE_FIELD_U(m_slotsUsed, LoaderAllocatorObject, m_slotsUsed)
+DEFINE_CLASS(LOADERALLOCATOR, Reflection, LoaderAllocator)
+DEFINE_METHOD(LOADERALLOCATOR, CTOR, .ctor, IM_RetVoid)
+
+DEFINE_CLASS_U(Reflection, LoaderAllocatorScout, LoaderAllocatorScoutObject)
+DEFINE_FIELD_U(m_nativeLoaderAllocator, LoaderAllocatorScoutObject, m_nativeLoaderAllocator)
+DEFINE_CLASS(LOADERALLOCATORSCOUT, Reflection, LoaderAllocatorScout)
+
+DEFINE_CLASS(CONTRACTEXCEPTION, CodeContracts, ContractException)
+
+DEFINE_CLASS_U(CodeContracts, ContractException, ContractExceptionObject)
+DEFINE_FIELD_U(_Kind, ContractExceptionObject, _Kind)
+DEFINE_FIELD_U(_UserMessage, ContractExceptionObject, _UserMessage)
+DEFINE_FIELD_U(_Condition, ContractExceptionObject, _Condition)
+
+// The COM interfaces for the reflection types.
+#if defined(FEATURE_COMINTEROP) && !defined(FEATURE_CORECLR)
+DEFINE_CLASS(IAPPDOMAIN, System, _AppDomain)
+DEFINE_CLASS(ITYPE, InteropServices, _Type)
+DEFINE_CLASS(IASSEMBLY, InteropServices, _Assembly)
+DEFINE_CLASS(IMEMBERINFO, InteropServices, _MemberInfo)
+DEFINE_CLASS(IMETHODBASE, InteropServices, _MethodBase)
+DEFINE_CLASS(IMETHODINFO, InteropServices, _MethodInfo)
+DEFINE_CLASS(ICONSTRUCTORINFO, InteropServices, _ConstructorInfo)
+DEFINE_CLASS(IFIELDINFO, InteropServices, _FieldInfo)
+DEFINE_CLASS(IPROPERTYINFO, InteropServices, _PropertyInfo)
+DEFINE_CLASS(IEVENTINFO, InteropServices, _EventInfo)
+DEFINE_CLASS(IPARAMETERINFO, InteropServices, _ParameterInfo)
+DEFINE_CLASS(IMODULE, InteropServices, _Module)
+#endif // FEATURE_COMINTEROP && !FEATURE_CORECLR
+
+#ifdef FEATURE_COMPRESSEDSTACK
+DEFINE_CLASS_U(Security, FrameSecurityDescriptorWithResolver, FrameSecurityDescriptorWithResolverBaseObject)
+DEFINE_FIELD_U(m_resolver, FrameSecurityDescriptorWithResolverBaseObject, m_resolver)
+DEFINE_CLASS(FRAME_SECURITY_DESCRIPTOR_WITH_RESOLVER, Security, FrameSecurityDescriptorWithResolver)
+#endif // FEATURE_COMPRESSEDSTACK
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_CLASS(ASYNC_TRACING_EVENT_ARGS, WindowsFoundationDiag, TracingStatusChangedEventArgs)
+DEFINE_CLASS(IASYNC_TRACING_EVENT_ARGS, WindowsFoundationDiag, ITracingStatusChangedEventArgs)
+#endif // FEATURE_COMINTEROP
+
+DEFINE_CLASS(MODULEBASE, Reflection, Module)
+
+#undef DEFINE_CLASS
+#undef DEFINE_METHOD
+#undef DEFINE_FIELD
+#undef DEFINE_CLASS_U
+#undef DEFINE_FIELD_U
diff --git a/src/vm/mscoruefwrapper.h b/src/vm/mscoruefwrapper.h
new file mode 100644
index 0000000000..83bc7c9210
--- /dev/null
+++ b/src/vm/mscoruefwrapper.h
@@ -0,0 +1,20 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+//*****************************************************************************
+// MSCorUEFWrapper.h - Wrapper for including the UEF chain manager definition
+// and the global that references it for VM usage.
+//*****************************************************************************
+
+#ifdef FEATURE_UEF_CHAINMANAGER
+
+// This is required to register our UEF callback with the UEF chain manager
+#include <mscoruef.h>
+// Global reference to IUEFManager that will be used in the VM
+extern IUEFManager * g_pUEFManager;
+
+#endif // FEATURE_UEF_CHAINMANAGER
diff --git a/src/vm/mtypes.h b/src/vm/mtypes.h
new file mode 100644
index 0000000000..3618bd1f36
--- /dev/null
+++ b/src/vm/mtypes.h
@@ -0,0 +1,121 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: mtypes.h
+//
+
+//
+// Defines the mapping between MARSHAL_TYPE constants and their Marshaler
+// classes. Used to generate all the enums and tables.
+//
+
+
+// ------------------------------------------------------------------------------------------------------------------
+// Marshaler ID Marshaler class name Supported in WinRT scenarios
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_GENERIC_1, CopyMarshaler1, true)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_GENERIC_U1, CopyMarshalerU1, true)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_GENERIC_2, CopyMarshaler2, true)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_GENERIC_U2, CopyMarshalerU2, true)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_GENERIC_4, CopyMarshaler4, true)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_GENERIC_U4, CopyMarshalerU4, true)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_GENERIC_8, CopyMarshaler8, true)
+
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_WINBOOL, WinBoolMarshaler, false)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_CBOOL, CBoolMarshaler, true)
+#ifdef FEATURE_COMINTEROP
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_VTBOOL, VtBoolMarshaler, false)
+#endif // FEATURE_COMINTEROP
+
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_ANSICHAR, AnsiCharMarshaler, false)
+
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_FLOAT, FloatMarshaler, true)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_DOUBLE, DoubleMarshaler, true)
+
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_CURRENCY, CurrencyMarshaler, false)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_DECIMAL, DecimalMarshaler, false)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_DECIMAL_PTR, DecimalPtrMarshaler, false)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_GUID, GuidMarshaler, true)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_GUID_PTR, GuidPtrMarshaler, false)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_DATE, DateMarshaler, false)
+
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_LPWSTR, WSTRMarshaler, false)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_LPSTR, CSTRMarshaler, false)
+#ifdef FEATURE_COMINTEROP
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_BSTR, BSTRMarshaler, false)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_ANSIBSTR, AnsiBSTRMarshaler, false)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_HSTRING, HSTRINGMarshaler, true)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_DATETIME, DateTimeMarshaler, true)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_URI, UriMarshaler, true)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_NCCEVENTARGS, NCCEventArgsMarshaler, true) // NotifyCollectionChangedEventArgs
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_PCEVENTARGS, PCEventArgsMarshaler, true) // PropertyChangedEventArgs
+#endif // FEATURE_COMINTEROP
+
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_LPWSTR_BUFFER, WSTRBufferMarshaler, false)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_LPSTR_BUFFER, CSTRBufferMarshaler, false)
+
+#if defined(FEATURE_COMINTEROP) || !defined(FEATURE_CORECLR)
+// CoreCLR doesn't have any support for marshalling interface pointers.
+// Not even support for fake CCWs.
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_INTERFACE, InterfaceMarshaler, true)
+#endif // defined(FEATURE_COMINTEROP) || !defined(FEATURE_CORECLR)
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_SAFEARRAY, SafeArrayMarshaler, false)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_HIDDENLENGTHARRAY, HiddenLengthArrayMarshaler, true)
+#endif // FEATURE_COMINTEROP
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_NATIVEARRAY, NativeArrayMarshaler, false)
+
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_ASANYA, AsAnyAMarshaler, false)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_ASANYW, AsAnyWMarshaler, false)
+
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_DELEGATE, DelegateMarshaler, false)
+
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_BLITTABLEPTR, BlittablePtrMarshaler, false)
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_VBBYVALSTR, VBByValStrMarshaler, false)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_VBBYVALSTRW, VBByValStrWMarshaler, false)
+#endif // FEATURE_COMINTEROP
+
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_LAYOUTCLASSPTR, LayoutClassPtrMarshaler, false)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_ARRAYWITHOFFSET, ArrayWithOffsetMarshaler, false)
+
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_BLITTABLEVALUECLASS, BlittableValueClassMarshaler, true)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_VALUECLASS, ValueClassMarshaler, true)
+
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_REFERENCECUSTOMMARSHALER, ReferenceCustomMarshaler, false)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_ARGITERATOR, ArgIteratorMarshaler, false)
+
+#ifndef FEATURE_CORECLR
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_BLITTABLEVALUECLASSWITHCOPYCTOR, BlittableValueClassWithCopyCtorMarshaler, false)
+#endif // !FEATURE_CORECLR
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_OBJECT, ObjectMarshaler, false)
+#endif // FEATURE_COMINTEROP
+
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_HANDLEREF, HandleRefMarshaler, false)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_SAFEHANDLE, SafeHandleMarshaler, false)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_CRITICALHANDLE, CriticalHandleMarshaler, false)
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_OLECOLOR, OleColorMarshaler, false)
+#endif // FEATURE_COMINTEROP
+
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_RUNTIMETYPEHANDLE, RuntimeTypeHandleMarshaler, false)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_RUNTIMEMETHODHANDLE, RuntimeMethodHandleMarshaler, false)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_RUNTIMEFIELDHANDLE, RuntimeFieldHandleMarshaler, false)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_RUNTIMEMETHODINFO, IRuntimeMethodInfoMarshaler, false)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_RUNTIMEMODULE, RuntimeModuleMarshaler, false)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_RUNTIMEASSEMBLY, RuntimeAssemblyMarshaler, false)
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_NULLABLE, NullableMarshaler, true)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_SYSTEMTYPE, SystemTypeMarshaler, true)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_KEYVALUEPAIR, KeyValuePairMarshaler, true)
+DEFINE_MARSHALER_TYPE(MARSHAL_TYPE_EXCEPTION, HResultExceptionMarshaler, true) // For WinRT, marshal exceptions as Windows.Foundation.HResult
+#endif // FEATURE_COMINTEROP
+
+#undef DEFINE_MARSHALER_TYPE
diff --git a/src/vm/multicorejit.cpp b/src/vm/multicorejit.cpp
new file mode 100644
index 0000000000..8517f1d2d2
--- /dev/null
+++ b/src/vm/multicorejit.cpp
@@ -0,0 +1,1669 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: MultiCoreJIT.cpp
+//
+
+// ===========================================================================
+// This file contains the implementation for MultiCore JIT (player in a seperate file MultiCoreJITPlayer.cpp)
+// ===========================================================================
+//
+
+#include "common.h"
+#include "vars.hpp"
+#include "security.h"
+#include "eeconfig.h"
+#include "dllimport.h"
+#include "comdelegate.h"
+#include "dbginterface.h"
+#include "listlock.inl"
+#include "stubgen.h"
+#include "eventtrace.h"
+#include "array.h"
+#include "fstream.h"
+#include "hash.h"
+
+#include "appdomain.hpp"
+#include "qcall.h"
+
+#include "multicorejit.h"
+#include "multicorejitimpl.h"
+
+const wchar_t * AppxProfile = W("Application.Profile");
+
+#if defined(FEATURE_APPX_BINDER)
+
+//static
+bool MulticoreJitManager::IsLoadOkay(Module * pModule)
+{
+ CONTRACTL
+ {
+ THROWS;
+ SO_INTOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (pModule->GetAssembly()->GetManifestFile()->IsWindowsRuntime())
+ {
+ PEFile * pFile = pModule->GetFile();
+
+ ICLRPrivAssembly * pHostAsm = pFile->GetHostAssembly();
+
+ // Allow first party WinMD to load in multicore JIT background thread
+ if (pHostAsm != NULL)
+ {
+ BOOL shared = FALSE;
+
+ if (SUCCEEDED(pHostAsm->IsShareable(& shared)))
+ {
+ if (shared)
+ {
+ LPCUTF8 simpleName = pModule->GetSimpleName();
+
+ if (IsWindowsNamespace(simpleName))
+ {
+ return true;
+ }
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+#endif
+
+
+// #define MCGEN_ENABLE_CHECK(Context, Descriptor) (Context.IsEnabled && McGenEventTracingEnabled(&Context, &Descriptor))
+
+// #define FireEtwMulticoreJit(ClrInstanceID, String1, String2, Int1, Int2, Int3)\
+// MCGEN_ENABLE_CHECK(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, MulticoreJit) ?\
+// CoMofTemplate_hzzddd(Microsoft_Windows_DotNETRuntimePrivateHandle, &MulticoreJit, &CLRMulticoreJitId, ClrInstanceID, String1, String2, Int1, Int2, Int3)\
+// : ERROR_SUCCESS\
+
+void MulticoreJitFireEtw(const wchar_t * pAction, const wchar_t * pTarget, int p1, int p2, int p3)
+{
+ LIMITED_METHOD_CONTRACT
+
+ FireEtwMulticoreJit(GetClrInstanceId(), pAction, pTarget, p1, p2, p3);
+}
+
+
+void MulticoreJitFireEtwA(const wchar_t * pAction, const char * pTarget, int p1, int p2, int p3)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+#ifdef FEATURE_EVENT_TRACE
+ EX_TRY
+ {
+ if (McGenEventTracingEnabled(& MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, & MulticoreJit))
+ {
+ SString wTarget;
+
+ wTarget.SetUTF8(pTarget);
+
+ FireEtwMulticoreJit(GetClrInstanceId(), pAction, wTarget.GetUnicode(), p1, p2, p3);
+ }
+ }
+ EX_CATCH
+ { }
+ EX_END_CATCH(SwallowAllExceptions);
+#endif // FEATURE_EVENT_TRACE
+}
+
+void MulticoreJitFireEtwMethodCodeReturned(MethodDesc * pMethod)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ EX_TRY
+ {
+ if(pMethod)
+ {
+ // Get the module id.
+ Module * pModule = pMethod->GetModule_NoLogging();
+ ULONGLONG ullModuleID = (ULONGLONG)(TADDR) pModule;
+
+ // Get the method id.
+ ULONGLONG ullMethodID = (ULONGLONG)pMethod;
+
+ // Fire the event.
+ FireEtwMulticoreJitMethodCodeReturned(GetClrInstanceId(), ullModuleID, ullMethodID);
+ }
+ }
+ EX_CATCH
+ { }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+#ifdef MULTICOREJIT_LOGGING
+
+// %s ANSI
+// %S UNICODE
+void _MulticoreJitTrace(const char * format, ...)
+{
+ static unsigned s_startTick = 0;
+
+ WRAPPER_NO_CONTRACT;
+
+ if (s_startTick == 0)
+ {
+ s_startTick = GetTickCount();
+ }
+
+ va_list args;
+ va_start(args, format);
+
+#ifdef LOGGING
+ LogSpew2 (LF2_MULTICOREJIT, LL_INFO100, "Mcj ");
+ LogSpew2Valist(LF2_MULTICOREJIT, LL_INFO100, format, args);
+ LogSpew2 (LF2_MULTICOREJIT, LL_INFO100, ", (time=%d ms)\n", GetTickCount() - s_startTick);
+#else
+
+ // Following LogSpewValist(DWORD facility, DWORD level, const char *fmt, va_list args)
+ char buffer[512];
+
+ int len;
+
+ len = sprintf_s(buffer, _countof(buffer), "Mcj TID %04x: ", GetCurrentThreadId());
+ len += _vsnprintf(buffer + len, _countof(buffer) - len, format, args);
+ len += sprintf_s(buffer + len, _countof(buffer) - len, ", (time=%d ms)\r\n", GetTickCount() - s_startTick);
+
+ OutputDebugStringA(buffer);
+#endif
+
+ va_end(args);
+
+}
+
+#endif
+
+
+HRESULT MulticoreJitRecorder::WriteOutput()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY; // Called from AppDomain::Stop which is MODE_ANY
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_FAIL;
+
+ // Go into preemptive mode for file operations
+ GCX_PREEMP();
+
+ {
+ CFileStream fileStream;
+
+ if (SUCCEEDED(hr = fileStream.OpenForWrite(m_fullFileName)))
+ {
+ hr = WriteOutput(& fileStream);
+ }
+ }
+
+ return hr;
+}
+
+
+HRESULT WriteData(IStream * pStream, const void * pData, unsigned len)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END
+
+ ULONG cbWritten;
+
+ HRESULT hr = pStream->Write(pData, len, & cbWritten);
+
+ if (SUCCEEDED(hr) && (cbWritten != len))
+ {
+ hr = E_FAIL;
+ }
+
+ return hr;
+}
+
+// Write string, round to to DWORD alignment
+HRESULT WriteString(const void * pString, unsigned len, IStream * pStream)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ ULONG cbWritten = 0;
+
+ HRESULT hr;
+
+ hr = pStream->Write(pString, len, & cbWritten);
+
+ if (SUCCEEDED(hr))
+ {
+ len = RoundUp(len) - len;
+
+ if (len != 0)
+ {
+ cbWritten = 0;
+
+ hr = pStream->Write(& cbWritten, len, & cbWritten);
+ }
+ }
+
+ return hr;
+}
+
+
+//static
+FileLoadLevel MulticoreJitManager::GetModuleFileLoadLevel(Module * pModule)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ FileLoadLevel level = FILE_LOAD_CREATE; // min level
+
+ if (pModule != NULL)
+ {
+ DomainFile * pDomainFile = pModule->FindDomainFile(GetAppDomain());
+
+ if (pDomainFile != NULL)
+ {
+ level = pDomainFile->GetLoadLevel();
+ }
+ }
+
+ return level;
+}
+
+
+bool ModuleVersion::GetModuleVersion(Module * pModule)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = E_FAIL;
+
+ // GetMVID can throw exception
+ EX_TRY
+ {
+ PEFile * pFile = pModule->GetFile();
+
+ if (pFile != NULL)
+ {
+ PEAssembly * pAsm = pFile->GetAssembly();
+
+ if (pAsm != NULL)
+ {
+ // CorAssemblyFlags, only 16-bit used
+ versionFlags = pAsm->GetFlags();
+
+ _ASSERTE((versionFlags & 0x80000000) == 0);
+
+ if (pFile->HasNativeImage())
+ {
+ hasNativeImage = 1;
+ }
+
+ pAsm->GetVersion(& major, & minor, & build, & revision);
+
+ pAsm->GetMVID(& mvid);
+
+ hr = S_OK;
+ }
+ }
+
+ // If the load context is LOADFROM, store it in the flags.
+#ifdef FEATURE_FUSION
+ Assembly * pAssembly = pModule->GetAssembly();
+ LOADCTX_TYPE loadCtx = pAssembly->GetManifestFile()->GetLoadContext();
+ if(LOADCTX_TYPE_LOADFROM == loadCtx)
+ {
+ versionFlags |= VERSIONFLAG_LOADCTX_LOADFROM;
+ }
+#endif
+ }
+ EX_CATCH
+ {
+ hr = E_FAIL;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return SUCCEEDED(hr);
+}
+
+ModuleRecord::ModuleRecord(unsigned lenName, unsigned lenAsmName)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ memset(this, 0, sizeof(ModuleRecord));
+
+ recordID = Pack8_24(MULTICOREJIT_MODULE_RECORD_ID, sizeof(ModuleRecord));
+
+ wLoadLevel = 0;
+ // Extra data
+ lenModuleName = (unsigned short) lenName;
+#if defined(FEATURE_CORECLR) && defined(FEATURE_HOSTED_BINDER)
+ lenAssemblyName = (unsigned short) lenAsmName;
+ recordID += RoundUp(lenModuleName) + RoundUp(lenAssemblyName);
+#else
+ recordID += RoundUp(lenModuleName);
+#endif
+}
+
+
+bool RecorderModuleInfo::SetModule(Module * pMod)
+{
+ STANDARD_VM_CONTRACT;
+
+ pModule = pMod;
+
+ LPCUTF8 pModuleName = pMod->GetSimpleName();
+ unsigned lenModuleName = (unsigned) strlen(pModuleName);
+ simpleName.Set((const BYTE *) pModuleName, lenModuleName); // SBuffer::Set copies over name
+
+#if defined(FEATURE_CORECLR) && defined(FEATURE_HOSTED_BINDER)
+ SString sAssemblyName;
+ StackScratchBuffer scratch;
+ pMod->GetAssembly()->GetManifestFile()->GetDisplayName(sAssemblyName);
+
+ LPCUTF8 pAssemblyName = sAssemblyName.GetUTF8(scratch);
+ unsigned lenAssemblyName = sAssemblyName.GetCount();
+ assemblyName.Set((const BYTE *) pAssemblyName, lenAssemblyName);
+#endif
+
+#if defined(FEATURE_APPX_BINDER)
+
+ // Allow certain modules to load on background thread
+ if (AppX::IsAppXProcess() && MulticoreJitManager::IsLoadOkay(pMod))
+ {
+ flags |= FLAG_LOADOKAY;
+ }
+
+#endif
+
+ return moduleVersion.GetModuleVersion(pMod);
+}
+
+
+
+/////////////////////////////////////////////////////
+//
+// class MulticoreJitRecorder
+//
+/////////////////////////////////////////////////////
+
+HRESULT MulticoreJitRecorder::WriteModuleRecord(IStream * pStream, const RecorderModuleInfo & module)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+
+ const void * pModuleName = module.simpleName;
+ unsigned lenModuleName = module.simpleName.GetSize();
+
+#if defined(FEATURE_CORECLR) && defined(FEATURE_HOSTED_BINDER)
+ const void * pAssemblyName = module.assemblyName;
+ unsigned lenAssemblyName = module.assemblyName.GetSize();
+#else
+ unsigned lenAssemblyName = 0;
+#endif
+
+ ModuleRecord mod(lenModuleName, lenAssemblyName);
+
+ mod.version = module.moduleVersion;
+ mod.jitMethodCount = module.methodCount;
+ mod.wLoadLevel = (unsigned short) module.loadLevel;
+ mod.flags = module.flags;
+
+ hr = WriteData(pStream, & mod, sizeof(mod));
+
+ if (SUCCEEDED(hr))
+ {
+ hr = WriteString(pModuleName, lenModuleName, pStream);
+
+#if defined(FEATURE_CORECLR) && defined(FEATURE_HOSTED_BINDER)
+ if (SUCCEEDED(hr))
+ {
+ hr = WriteString(pAssemblyName, lenAssemblyName, pStream);
+ }
+#endif
+ }
+
+ return hr;
+}
+
+
+HRESULT MulticoreJitRecorder::WriteOutput(IStream * pStream)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ {
+ HeaderRecord header;
+
+ memset(&header, 0, sizeof(header));
+
+ header.recordID = Pack8_24(MULTICOREJIT_HEADER_RECORD_ID, sizeof(HeaderRecord));
+ header.version = MULTICOREJIT_PROFILE_VERSION;
+ header.moduleCount = m_ModuleCount;
+ header.methodCount = m_JitInfoCount - m_ModuleDepCount;
+ header.moduleDepCount = m_ModuleDepCount;
+
+ MulticoreJitCodeStorage & curStorage = m_pDomain->GetMulticoreJitManager().GetMulticoreJitCodeStorage();
+
+ // Stats about played profile, 14 short, 3 long = 40 bytes
+ header.shortCounters[ 0] = m_stats.m_nTotalMethod;
+ header.shortCounters[ 1] = m_stats.m_nHasNativeCode;
+ header.shortCounters[ 2] = m_stats.m_nTryCompiling;
+ header.shortCounters[ 3] = (unsigned short) curStorage.GetStored();
+ header.shortCounters[ 4] = (unsigned short) curStorage.GetReturned();
+ header.shortCounters[ 5] = m_stats.m_nFilteredMethods;
+ header.shortCounters[ 6] = m_stats.m_nMissingModuleSkip;
+ header.shortCounters[ 7] = m_stats.m_nTotalDelay;
+ header.shortCounters[ 8] = m_stats.m_nDelayCount;
+ header.shortCounters[ 9] = m_stats.m_nWalkBack;
+ header.shortCounters[10] = m_fAppxMode;
+
+ _ASSERTE(HEADER_W_COUNTER >= 14);
+
+ header.longCounters[0] = m_stats.m_hr;
+
+ _ASSERTE(HEADER_D_COUNTER >= 3);
+
+ _ASSERTE((sizeof(header) % sizeof(unsigned)) == 0);
+
+ hr = WriteData(pStream, & header, sizeof(header));
+ }
+
+ DWORD dwData = 0;
+
+ for (unsigned i = 0; SUCCEEDED(hr) && (i < m_ModuleCount); i ++)
+ {
+ hr = WriteModuleRecord(pStream, m_ModuleList[i]);
+ }
+
+ if (SUCCEEDED(hr))
+ {
+ unsigned remain = m_JitInfoCount;
+
+ const unsigned * pInfo = m_JitInfoArray;
+
+ while (SUCCEEDED(hr) && (remain > 0))
+ {
+ unsigned count = remain;
+
+ if (count > MAX_JIT_COUNT)
+ {
+ count = MAX_JIT_COUNT;
+ }
+
+ dwData = Pack8_24(MULTICOREJIT_JITINF_RECORD_ID, count * sizeof(DWORD) + sizeof(DWORD));
+
+ hr = WriteData(pStream, & dwData, sizeof(dwData));
+
+ if (SUCCEEDED(hr))
+ {
+ hr = WriteData(pStream, pInfo, sizeof(unsigned) * count);
+ }
+
+ pInfo += count;
+ remain -= count;
+ }
+ }
+
+ MulticoreJitTrace(("New profile: %d modules, %d methods", m_ModuleCount, m_JitInfoCount));
+
+ _FireEtwMulticoreJit(W("WRITEPROFILE"), m_fullFileName.GetUnicode(), m_ModuleCount, m_JitInfoCount, 0);
+
+ return hr;
+}
+
+
+unsigned MulticoreJitRecorder::FindModule(Module * pModule)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ for (unsigned i = 0 ; i < m_ModuleCount; i ++)
+ {
+ if (m_ModuleList[i].pModule == pModule)
+ {
+ return i;
+ }
+ }
+
+ return UINT_MAX;
+}
+
+
+// Find known module index, or add to module table
+// Return UINT_MAX when table is full, or SetModule fails
+unsigned MulticoreJitRecorder::GetModuleIndex(Module * pModule)
+{
+ STANDARD_VM_CONTRACT;
+
+ unsigned slot = FindModule(pModule);
+
+ if ((slot == UINT_MAX) && (m_ModuleCount < MAX_MODULES))
+ {
+ slot = m_ModuleCount ++;
+
+ if (! m_ModuleList[slot].SetModule(pModule))
+ {
+ return UINT_MAX;
+ }
+ }
+
+ return slot;
+}
+
+
+void MulticoreJitRecorder::RecordJitInfo(unsigned module, unsigned method)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_JitInfoCount < (LONG) MAX_METHOD_ARRAY)
+ {
+ unsigned info1 = Pack8_24(module, method & 0xFFFFFF);
+
+ // Due to incremental loading, there are quite a few RecordModuleLoad coming with increasing load level, merge
+
+ // Previous record and current record are both MODULE_DEPENDENCY
+ if ((m_JitInfoCount > 0) && (info1 & MODULE_DEPENDENCY))
+ {
+ unsigned info0 = m_JitInfoArray[m_JitInfoCount - 1];
+
+ if ((info0 & 0xFFFF00FF) == (info1 & 0xFFFF00FF)) // to/from modules are the same
+ {
+ if (info1 > info0) // higher level
+ {
+ m_JitInfoArray[m_JitInfoCount - 1] = info1; // replace
+ }
+
+ return; // no new record
+ }
+ }
+
+ if (method & MODULE_DEPENDENCY)
+ {
+ m_ModuleDepCount ++;
+ }
+ else
+ {
+ m_ModuleList[module].methodCount ++;
+ }
+
+ m_JitInfoArray[m_JitInfoCount] = info1;
+ m_JitInfoCount ++;
+ }
+}
+
+class MulticoreJitRecorderModuleEnumerator : public MulticoreJitModuleEnumerator
+{
+ MulticoreJitRecorder * m_pRecorder;
+ bool m_fAppxMode;
+
+ HRESULT OnModule(Module * pModule)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ if (MulticoreJitManager::IsSupportedModule(pModule, false, m_fAppxMode))
+ {
+ m_pRecorder->AddModuleDependency(pModule, MulticoreJitManager::GetModuleFileLoadLevel(pModule));
+ }
+
+ return S_OK;
+ }
+
+public:
+ MulticoreJitRecorderModuleEnumerator(MulticoreJitRecorder * pRecorder, bool fAppxMode)
+ {
+ m_pRecorder = pRecorder;
+ m_fAppxMode = fAppxMode;
+ }
+};
+
+
+// The whole AppDomain is depending on pModule
+void MulticoreJitRecorder::AddModuleDependency(Module * pModule, FileLoadLevel loadLevel)
+{
+ STANDARD_VM_CONTRACT;
+
+ MulticoreJitTrace(("AddModuleDependency(%s, %d)", pModule->GetSimpleName(), loadLevel));
+
+ _FireEtwMulticoreJitA(W("ADDMODULEDEPENDENCY"), pModule->GetSimpleName(), loadLevel, 0, 0);
+
+ unsigned moduleTo = GetModuleIndex(pModule);
+
+ if (moduleTo != UINT_MAX)
+ {
+ if (m_ModuleList[moduleTo].loadLevel < loadLevel)
+ {
+ m_ModuleList[moduleTo].loadLevel = loadLevel;
+
+ // Update load level
+ RecordJitInfo(0, ((unsigned) loadLevel << 8) | moduleTo | MODULE_DEPENDENCY);
+ }
+ }
+}
+
+
+// Enumerate all modules within an assembly, call OnModule virtual method
+HRESULT MulticoreJitModuleEnumerator::HandleAssembly(DomainAssembly * pAssembly)
+{
+ STANDARD_VM_CONTRACT;
+
+ DomainAssembly::ModuleIterator modIt = pAssembly->IterateModules(kModIterIncludeLoaded);
+
+ HRESULT hr = S_OK;
+
+ while (modIt.Next() && SUCCEEDED(hr))
+ {
+ Module * pModule = modIt.GetModule();
+
+ if (pModule != NULL)
+ {
+ hr = OnModule(pModule);
+ }
+ }
+
+ return hr;
+}
+
+
+// Enum all loaded modules within pDomain, call OnModule virtual method
+HRESULT MulticoreJitModuleEnumerator::EnumerateLoadedModules(AppDomain * pDomain)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ AppDomain::AssemblyIterator appIt = pDomain->IterateAssembliesEx((AssemblyIterationFlags)(kIncludeLoaded | kIncludeExecution));
+
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+
+ while (appIt.Next(pDomainAssembly.This()) && SUCCEEDED(hr))
+ {
+#if !defined(FEATURE_CORECLR)
+ if (! pDomainAssembly->IsSystem())
+#endif
+ {
+ hr = HandleAssembly(pDomainAssembly);
+ }
+ }
+
+ return hr;
+}
+
+
+#if defined(FEATURE_APPX_BINDER)
+// ProfileName = ProcessName_CoreAppId.Profile ; for server process, always use it for output
+// ProcessName.Profile
+
+void AppendAppxProfileName(SString & name)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ {
+ WCHAR wszProcessName[_MAX_PATH];
+
+ if (WszGetModuleFileName(NULL, wszProcessName, _MAX_PATH) != 0)
+ {
+ WCHAR * pNameOnly = wcsrchr(wszProcessName, W('\\'));
+
+ if (pNameOnly == NULL)
+ {
+ pNameOnly = wszProcessName;
+ }
+ else
+ {
+ pNameOnly ++;
+ }
+
+ WCHAR * pExt = wcsrchr(pNameOnly, W('.')); // last .
+
+ if (pExt != NULL)
+ {
+ * pExt = 0;
+ }
+
+ // Use process name only
+ name.Append(pNameOnly);
+ name.Append(W("_"));
+ }
+ }
+
+ LPCWSTR pAppId = NULL;
+ if (SUCCEEDED(AppX::GetApplicationId(pAppId)))
+ {
+ name.Append(pAppId);
+ name.Append(W(".Profile"));
+
+ return;
+ }
+
+ // default name
+ name.Append(AppxProfile);
+}
+#endif
+
+// static: single instace within a process
+
+#ifndef FEATURE_PAL
+TP_TIMER * MulticoreJitRecorder::s_delayedWriteTimer; // = NULL;
+
+// static
+void CALLBACK
+MulticoreJitRecorder::WriteMulticoreJitProfiler(PTP_CALLBACK_INSTANCE pInstance, PVOID pvContext, PTP_TIMER pTimer)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ // Avoid saving after MulticoreJitRecorder is deleted, and saving twice
+ if (! CloseTimer())
+ {
+ return;
+ }
+
+ MulticoreJitRecorder * pRecorder = (MulticoreJitRecorder *) pvContext;
+
+ if (pRecorder != NULL)
+ {
+#if defined(FEATURE_APPX_BINDER)
+ if (pRecorder->m_fAppxMode)
+ {
+ const wchar_t * pOutputDir = NULL;
+
+ HRESULT hr = Clr::Util::GetLocalAppDataDirectory(&pOutputDir);
+
+ if (SUCCEEDED(hr))
+ {
+ pRecorder->m_fullFileName = pOutputDir;
+ pRecorder->m_fullFileName.Append(W("\\"));
+
+ AppendAppxProfileName(pRecorder->m_fullFileName);
+
+ pRecorder->StopProfile(false);
+ }
+ }
+ else
+#endif
+ {
+ pRecorder->StopProfile(false);
+ }
+ }
+}
+
+#endif // !FEATURE_PAL
+
+void MulticoreJitRecorder::PreRecordFirstMethod()
+{
+ STANDARD_VM_CONTRACT;
+
+ // When first method is added to an AppDomain, add all currently loaded modules as dependent modules
+
+ m_fFirstMethod = false;
+
+ {
+ MulticoreJitRecorderModuleEnumerator enumerator(this, m_fAppxMode);
+
+ enumerator.EnumerateLoadedModules(m_pDomain);
+ }
+
+ // When running under Appx or CoreCLR for K, AppDomain is normally not shut down properly (CLR in hybrid case, or Alt-F4 shutdown),
+ // So we only allow writing out after profileWriteTimeout seconds
+#if !defined(FEATURE_CORECLR)
+ if (m_fAppxMode)
+#endif
+ {
+ // Get the timeout in seconds.
+ int profileWriteTimeout = (int)CLRConfig::GetConfigValue(CLRConfig::INTERNAL_MultiCoreJitProfileWriteDelay);
+
+#ifndef FEATURE_PAL
+ // Using the same threadpool timer used by UsageLog to write out profile when running under Appx or CoreCLR.
+ s_delayedWriteTimer = CreateThreadpoolTimer(WriteMulticoreJitProfiler, this, NULL);
+
+ if (s_delayedWriteTimer != NULL)
+ {
+ ULARGE_INTEGER msDelay;
+
+ // SetThreadpoolTimer needs delay to be given in 100 ns unit, negative
+ msDelay.QuadPart = (ULONGLONG) -(profileWriteTimeout * 10 * 1000 * 1000);
+ FILETIME ftDueTime;
+ ftDueTime.dwLowDateTime = msDelay.u.LowPart;
+ ftDueTime.dwHighDateTime = msDelay.u.HighPart;
+
+ // This will either set the timer to happen in profileWriteTimeout seconds, or reset the timer so the same will happen.
+ // This function is safe to call
+ SetThreadpoolTimer(s_delayedWriteTimer, &ftDueTime, 0, 2000 /* large 2000 ms window for executing this timer is acceptable as the timing here is very much not critical */);
+ }
+#endif // !FEATURE_PAL
+ }
+}
+
+
+void MulticoreJitRecorder::RecordMethodJit(MethodDesc * pMethod, bool application)
+{
+ STANDARD_VM_CONTRACT;
+
+ Module * pModule = pMethod->GetModule_NoLogging();
+
+ // Skip methods from non-supported modules
+ if (! MulticoreJitManager::IsSupportedModule(pModule, true, m_fAppxMode))
+ {
+ return;
+ }
+
+ // pModule could be unknown at this point (modules not enumerated, no event received yet)
+ unsigned moduleIndex = GetModuleIndex(pModule);
+
+ if (moduleIndex < UINT_MAX)
+ {
+ if (m_fFirstMethod)
+ {
+ PreRecordFirstMethod();
+ }
+
+ // Make sure level for current module is recorded properly
+ if (m_ModuleList[moduleIndex].loadLevel != FILE_ACTIVE)
+ {
+ FileLoadLevel needLevel = MulticoreJitManager::GetModuleFileLoadLevel(pModule);
+
+ if (m_ModuleList[moduleIndex].loadLevel < needLevel)
+ {
+ m_ModuleList[moduleIndex].loadLevel = needLevel;
+
+ // Update load level
+ RecordJitInfo(0, ((unsigned) needLevel << 8) | moduleIndex | MODULE_DEPENDENCY);
+ }
+ }
+
+ unsigned methodIndex = pMethod->GetMemberDef_NoLogging() & 0xFFFFFF;
+
+ if (methodIndex <= METHODINDEX_MASK)
+ {
+ if (application) // Jitted by application threads, not background thread
+ {
+ methodIndex |= JIT_BY_APP_THREAD;
+ }
+
+ RecordJitInfo(moduleIndex, methodIndex);
+ }
+ }
+}
+
+
+// Called from AppDomain::RaiseAssemblyResolveEvent, make it simple
+
+void MulticoreJitRecorder::AbortProfile()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Increment session ID tells background thread to stop
+ m_pDomain->GetMulticoreJitManager().GetProfileSession().Increment();
+
+ m_fAborted = true; // Do not save output when StopProfile is called
+}
+
+
+HRESULT MulticoreJitRecorder::StopProfile(bool appDomainShutdown)
+{
+ CONTRACTL
+ {
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Increment session ID tells background thread to stop
+ MulticoreJitManager & manager = m_pDomain->GetMulticoreJitManager();
+
+ manager.GetProfileSession().Increment();
+
+ if (! m_fAborted && ! m_fullFileName.IsEmpty())
+ {
+ hr = WriteOutput();
+ }
+
+ MulticoreJitTrace(("StopProfile: Save new profile to %S, hr=0x%x", m_fullFileName.GetUnicode(), hr));
+
+ return hr;
+}
+
+
+// suffix (>= 0) is used for AutoStartProfile, to support multiple AppDomains. It's set to -1 for normal API call path
+HRESULT MulticoreJitRecorder::StartProfile(const wchar_t * pRoot, const wchar_t * pFile, int suffix, LONG nSession)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_FALSE;
+
+ if ((pRoot == NULL) || (pFile == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ MulticoreJitTrace(("StartProfile('%S', '%S', %d)", pRoot, pFile, suffix));
+
+ size_t lenFile = wcslen(pFile);
+
+ // Options (only AutoStartProfile using environment variable, for testing)
+ // ([d|D]main-thread-delay)
+ if ((suffix >= 0) && (lenFile >= 3) && (pFile[0]=='('))// AutoStartProfile, using environment variable
+ {
+ pFile ++;
+ lenFile --;
+
+ while ((lenFile > 0) && isalpha(pFile[0]))
+ {
+ switch (pFile[0])
+ {
+ case 'd':
+ case 'D':
+ g_MulticoreJitEnabled = false;
+
+ default:
+ break;
+ }
+
+ pFile ++;
+ lenFile --;
+ }
+
+ if ((lenFile > 0) && isdigit(* pFile))
+ {
+ g_MulticoreJitDelay = 0;
+
+ while ((lenFile > 0) && isdigit(* pFile))
+ {
+ g_MulticoreJitDelay = g_MulticoreJitDelay * 10 + (int) (* pFile - '0');
+
+ pFile ++;
+ lenFile --;
+ }
+ }
+
+ // End of options
+ if ((lenFile > 0) && (* pFile == ')'))
+ {
+ pFile ++;
+ lenFile --;
+ }
+ }
+
+ MulticoreJitTrace(("g_MulticoreJitEnabled = %d, disable/enable Mcj feature", g_MulticoreJitEnabled));
+
+ if (g_MulticoreJitEnabled && (lenFile > 0))
+ {
+ m_fullFileName = pRoot;
+
+ // Append seperator if root does not end with one
+ unsigned len = m_fullFileName.GetCount();
+
+ if ((len != 0) && (m_fullFileName[len - 1] != '\\'))
+ {
+ m_fullFileName.Append('\\');
+ }
+
+ m_fullFileName.Append(pFile);
+
+ // Suffix for AutoStartProfile, used for multiple appdomain
+ if (suffix >= 0)
+ {
+ m_fullFileName.AppendPrintf(W("_%s_%s_%d.prof"),
+ SystemDomain::System()->DefaultDomain()->GetFriendlyName(),
+ m_pDomain->GetFriendlyName(),
+ suffix);
+ }
+
+ NewHolder<MulticoreJitProfilePlayer> player(new (nothrow) MulticoreJitProfilePlayer(
+ m_pDomain,
+#if defined(FEATURE_CORECLR) && defined(FEATURE_HOSTED_BINDER)
+ m_pBinderContext,
+#else
+ NULL,
+#endif
+ nSession,
+ m_fAppxMode));
+
+ if (player == NULL)
+ {
+ hr = E_OUTOFMEMORY;
+ }
+ else
+ {
+ HRESULT hr1 = S_OK;
+
+ EX_TRY
+ {
+ hr1 = player->ProcessProfile(m_fullFileName);
+ }
+ EX_CATCH_HRESULT(hr1);
+
+ // If ProcessProfile succeeds, the background thread is responsible for deleting it when it finishes; otherwise, delete now
+ if (SUCCEEDED(hr1))
+ {
+ if (g_MulticoreJitDelay > 0)
+ {
+ MulticoreJitTrace(("Delay main thread %d ms", g_MulticoreJitDelay));
+
+ ClrSleepEx(g_MulticoreJitDelay, FALSE);
+ }
+
+ player.SuppressRelease();
+ }
+
+ MulticoreJitTrace(("ProcessProfile('%S') returns %x", m_fullFileName.GetUnicode(), hr1));
+
+ // Ignore error, even when we can't play back the file, we can still record new one
+
+ // If file exists, but profile header can't be read, pass error to caller (ignored by caller for non Appx)
+ if (hr1 == COR_E_BADIMAGEFORMAT)
+ {
+ hr = hr1;
+ }
+ }
+ }
+
+ MulticoreJitTrace(("StartProfile('%S', '%S', %d) returns %x", pRoot, pFile, suffix, hr));
+
+ _FireEtwMulticoreJit(W("STARTPROFILE"), m_fullFileName.GetUnicode(), hr, 0, 0);
+
+ return hr;
+}
+
+
+// Module load call back, record new module information, update play-back module list
+void MulticoreJitRecorder::RecordModuleLoad(Module * pModule, FileLoadLevel loadLevel)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (pModule != NULL)
+ {
+ if (! m_fFirstMethod) // If m_fFirstMethod flag is still on, defer calling AddModuleDependency until first method JIT
+ {
+ AddModuleDependency(pModule, loadLevel);
+ }
+ }
+}
+
+
+// Call back from MethodDesc::MakeJitWorker for
+PCODE MulticoreJitRecorder::RequestMethodCode(MethodDesc * pMethod, MulticoreJitManager * pManager)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Disable it when profiler is running
+
+#ifdef PROFILING_SUPPORTED
+
+ _ASSERTE(! CORProfilerTrackJITInfo());
+
+#endif
+
+ _ASSERTE(! pMethod->IsDynamicMethod());
+
+ PCODE pCode = NULL;
+
+ pCode = pManager->GetMulticoreJitCodeStorage().QueryMethodCode(pMethod);
+
+ if ((pCode != NULL) && pManager->IsRecorderActive()) // recorder may be off when player is on (e.g. for Appx)
+ {
+ RecordMethodJit(pMethod, false); // JITTed by background thread, returned to application
+ }
+
+ return pCode;
+}
+
+
+//////////////////////////////////////////////////////////
+//
+// class MulticoreJitManager: attachment to AppDomain
+//
+//
+//////////////////////////////////////////////////////////
+
+
+// API Function: SettProfileRoot, store information with MulticoreJitManager class
+// Threading: protected by InterlockedExchange(m_fMulticoreJITEnabled)
+
+void MulticoreJitManager::SetProfileRoot(AppDomain * pDomain, const wchar_t * pProfilePath)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef PROFILING_SUPPORTED
+
+ if (CORProfilerTrackJITInfo())
+ {
+ return;
+ }
+
+#endif
+
+ if (g_SystemInfo.dwNumberOfProcessors >= 2)
+ {
+ if (InterlockedCompareExchange(& m_fSetProfileRootCalled, SETPROFILEROOTCALLED, 0) == 0) // Only allow the first call per appdomain
+ {
+ m_profileRoot = pProfilePath;
+ }
+ }
+}
+
+
+// API Function: StartProfile
+// Threading: protected by m_playerLock
+void MulticoreJitManager::StartProfile(AppDomain * pDomain, ICLRPrivBinder *pBinderContext, const wchar_t * pProfile, int suffix)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_PREEMPTIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ if (m_fSetProfileRootCalled != SETPROFILEROOTCALLED)
+ {
+ MulticoreJitTrace(("StartProfile fail: SetProfileRoot not called/failed"));
+ _FireEtwMulticoreJit(W("STARTPROFILE"), W("No SetProfileRoot"), 0, 0, 0);
+ return;
+ }
+
+ // Need extra processor for multicore JIT feature
+ _ASSERTE(g_SystemInfo.dwNumberOfProcessors >= 2);
+
+#ifdef PROFILING_SUPPORTED
+
+ if (CORProfilerTrackJITInfo())
+ {
+ MulticoreJitTrace(("StartProfile fail: CORProfilerTrackJITInfo on"));
+ _FireEtwMulticoreJit(W("STARTPROFILE"), W("Profiling On"), 0, 0, 0);
+ return;
+ }
+
+#endif
+ CrstHolder hold(& m_playerLock);
+
+ // Stop current profiling first, delete current m_pMulticoreJitRecorder if any
+ StopProfile(false);
+
+ if ((pProfile != NULL) && (pProfile[0] != 0)) // Ignore empty file name, just same as StopProfile
+ {
+ MulticoreJitRecorder * pRecorder = new (nothrow) MulticoreJitRecorder(
+ pDomain,
+#if defined(FEATURE_CORECLR) && defined(FEATURE_HOSTED_BINDER)
+ pBinderContext,
+#else
+ NULL,
+#endif
+ m_fAppxMode);
+
+ if (pRecorder != NULL)
+ {
+ m_pMulticoreJitRecorder = pRecorder;
+
+ LONG sessionID = m_ProfileSession.Increment();
+
+ HRESULT hr = m_pMulticoreJitRecorder->StartProfile(m_profileRoot, pProfile, suffix, sessionID);
+
+ MulticoreJitTrace(("MulticoreJitRecorder session %d created: %x", sessionID, hr));
+
+ if (m_fAppxMode) // In Appx mode, recorder is only enabled when file exists, but header is bad (e.g. zero-length)
+ {
+ if (hr == COR_E_BADIMAGEFORMAT)
+ {
+ m_fRecorderActive = true;
+ }
+ }
+ else if ((hr == COR_E_BADIMAGEFORMAT) || SUCCEEDED(hr)) // Otherwise, ignore COR_E_BADIMAGEFORMAT, alway record new profile
+ {
+ m_fRecorderActive = true;
+ }
+
+ _FireEtwMulticoreJit(W("STARTPROFILE"), W("Recorder"), m_fRecorderActive, hr, 0);
+ }
+ }
+}
+
+
+// Threading: protected by m_playerLock
+void MulticoreJitManager::AbortProfile()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ if (m_fSetProfileRootCalled != SETPROFILEROOTCALLED)
+ {
+ return;
+ }
+
+ CrstHolder hold(& m_playerLock);
+
+ if (m_pMulticoreJitRecorder != NULL)
+ {
+ MulticoreJitTrace(("AbortProfile"));
+
+ _FireEtwMulticoreJit(W("ABORTPROFILE"), W(""), 0, 0, 0);
+
+ m_fRecorderActive = false;
+
+ m_pMulticoreJitRecorder->AbortProfile();
+ }
+
+ // Disable the feature within the AppDomain
+ m_fSetProfileRootCalled = -1;
+}
+
+
+// Stop current profiling, could be called automatically from AppDomain shut down
+// Threading: protected by m_playerLock
+void MulticoreJitManager::StopProfile(bool appDomainShutdown)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ if (m_fSetProfileRootCalled != SETPROFILEROOTCALLED)
+ {
+ return;
+ }
+
+ MulticoreJitRecorder * pRecorder;
+
+ if (appDomainShutdown)
+ {
+ // In the app domain shut down code path, need to hold m_playerLock critical section to wait for other thread to finish using recorder
+ CrstHolder hold(& m_playerLock);
+
+ pRecorder = InterlockedExchangeT(& m_pMulticoreJitRecorder, NULL);
+ }
+ else
+ {
+ // When called from StartProfile, should not take critical section because it's already entered
+
+ pRecorder = InterlockedExchangeT(& m_pMulticoreJitRecorder, NULL);
+ }
+
+ if (pRecorder != NULL)
+ {
+ m_fRecorderActive = false;
+
+ EX_TRY
+ {
+ pRecorder->StopProfile(appDomainShutdown);
+ }
+ EX_CATCH
+ {
+ MulticoreJitTrace(("StopProfile(%d) throws exception", appDomainShutdown));
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ delete pRecorder;
+ }
+
+ MulticoreJitTrace(("StopProfile(%d) returns", appDomainShutdown));
+}
+
+
+LONG g_nMulticoreAutoStart = 0;
+
+// Threading: calls into StartProfile
+void MulticoreJitManager::AutoStartProfile(AppDomain * pDomain)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ CLRConfigStringHolder wszProfile(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_MultiCoreJitProfile));
+
+ if ((wszProfile != NULL) && (wszProfile[0] != 0))
+ {
+ int suffix = (int) InterlockedIncrement(& g_nMulticoreAutoStart);
+
+ SetProfileRoot(pDomain, W("")); // Fake a SetProfileRoot call
+
+ StartProfile(
+ pDomain,
+ NULL,
+ wszProfile,
+ suffix);
+ }
+}
+
+#if defined(FEATURE_APPX_BINDER)
+
+// Called from CorHost2::ExecuteMain
+void MulticoreJitManager::AutoStartProfileAppx(AppDomain * pDomain)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (InterlockedCompareExchange(& m_fAutoStartCalled, SETPROFILEROOTCALLED, 0) == 0) // Only allow the first call
+ {
+ WCHAR wzFilePath[_MAX_PATH];
+
+ UINT32 cchFilePath = NumItems(wzFilePath);
+
+ SString profileName;
+
+ // Try to find ProcessName_AppId.Profile
+ AppendAppxProfileName(profileName);
+
+ // Search for Application.Profile within the package
+ HRESULT hr = AppX::FindFileInCurrentPackage(profileName, &cchFilePath, wzFilePath);
+
+ if (SUCCEEDED(hr))
+ {
+ m_fAppxMode = true;
+ SetProfileRoot(pDomain, W("")); // Fake a SetProfileRoot call
+ StartProfile(pDomain, NULL, wzFilePath);
+ }
+ else
+ {
+ _FireEtwMulticoreJit(W("AUTOSTARTPROFILEAPPX"), profileName, hr, 0, 0);
+ }
+ }
+}
+
+#endif
+
+// Constructor
+
+MulticoreJitManager::MulticoreJitManager()
+{
+ CONTRACTL
+ {
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ m_pMulticoreJitRecorder = NULL;
+ m_fSetProfileRootCalled = 0;
+ m_fAutoStartCalled = 0;
+ m_fRecorderActive = false;
+ m_fAppxMode = false;
+
+ m_playerLock.Init(CrstMulticoreJitManager, (CrstFlags)(CRST_TAKEN_DURING_SHUTDOWN));
+ m_MulticoreJitCodeStorage.Init();
+}
+
+
+// Threading: uses Release to free object
+MulticoreJitManager::~MulticoreJitManager()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_pMulticoreJitRecorder != NULL)
+ {
+ delete m_pMulticoreJitRecorder;
+
+ m_pMulticoreJitRecorder = NULL;
+ }
+
+ m_playerLock.Destroy();
+}
+
+
+// Threading: proected by m_playerLock
+
+void MulticoreJitManager::RecordModuleLoad(Module * pModule, FileLoadLevel loadLevel)
+{
+ STANDARD_VM_CONTRACT;
+
+
+#if defined(FEATURE_APPX_BINDER) && !defined(FEATURE_CORECLR)
+ // When running under Appx, allow framework assembly / first party winmd to load
+ // load-level change not allowed in the background thread, unless for resource DLL (loaded for exception throwing), but this could still happen.
+ _ASSERTE(! GetThread()->HasThreadStateNC(Thread::TSNC_CallingManagedCodeDisabled) || ModuleHasNoCode(pModule)
+ || m_fAppxMode && IsLoadOkay(pModule));
+
+#elif !defined(FEATURE_CORECLR)
+
+ _ASSERTE(! GetThread()->HasThreadStateNC(Thread::TSNC_CallingManagedCodeDisabled) || ModuleHasNoCode(pModule));
+
+#endif
+
+ if (m_fRecorderActive)
+ {
+ if(IsSupportedModule(pModule, false, m_fAppxMode)) // Filter out unsupported module
+ {
+ CrstHolder hold(& m_playerLock);
+
+ if (m_pMulticoreJitRecorder != NULL)
+ {
+ m_pMulticoreJitRecorder->RecordModuleLoad(pModule, loadLevel);
+ }
+ }
+ else
+ {
+ _FireEtwMulticoreJitA(W("UNSUPPORTEDMODULE"), pModule->GetSimpleName(), 0, 0, 0);
+ }
+ }
+}
+
+
+// Call back from MethodDesc::MakeJitWorker for
+// Threading: proected by m_playerLock
+
+PCODE MulticoreJitManager::RequestMethodCode(MethodDesc * pMethod)
+{
+ STANDARD_VM_CONTRACT;
+
+ CrstHolder hold(& m_playerLock);
+
+ if (m_pMulticoreJitRecorder != NULL)
+ {
+ PCODE requestedCode = m_pMulticoreJitRecorder->RequestMethodCode(pMethod, this);
+ if(requestedCode)
+ {
+ _FireEtwMulticoreJitMethodCodeReturned(pMethod);
+ }
+
+ return requestedCode;
+ }
+
+ return NULL;
+}
+
+
+// Call back from MethodDesc::MakeJitWorker for
+// Threading: proected by m_playerLock
+
+void MulticoreJitManager::RecordMethodJit(MethodDesc * pMethod)
+{
+ STANDARD_VM_CONTRACT;
+
+ CrstHolder hold(& m_playerLock);
+
+ if (m_pMulticoreJitRecorder != NULL)
+ {
+ m_pMulticoreJitRecorder->RecordMethodJit(pMethod, true);
+
+ if (m_pMulticoreJitRecorder->IsAtFullCapacity())
+ {
+ m_fRecorderActive = false;
+ }
+ }
+}
+
+
+// static
+bool MulticoreJitManager::IsMethodSupported(MethodDesc * pMethod)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return pMethod->HasILHeader() &&
+ pMethod->IsTypicalSharedInstantiation() &&
+ ! pMethod->IsDynamicMethod();
+}
+
+
+// static
+// Stop all multicore Jitting profile, called from EEShutDown
+void MulticoreJitManager::StopProfileAll()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!AppX::IsAppXProcess());
+
+ AppDomainIterator domain(TRUE);
+
+ while (domain.Next())
+ {
+ AppDomain * pDomain = domain.GetDomain();
+
+ if (pDomain != NULL)
+ {
+ pDomain->GetMulticoreJitManager().StopProfile(true);
+ }
+ }
+}
+
+// static
+// Stop all multicore Jitting in the current process, called from ProfilingAPIUtility::LoadProfiler
+void MulticoreJitManager::DisableMulticoreJit()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+#ifdef PROFILING_SUPPORTED
+
+ AppDomainIterator domain(TRUE);
+
+ while (domain.Next())
+ {
+ AppDomain * pDomain = domain.GetDomain();
+
+ if (pDomain != NULL)
+ {
+ pDomain->GetMulticoreJitManager().AbortProfile();
+ }
+ }
+
+#endif
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// MultiCore JIT
+//
+// Arguments:
+// wszProfile - profile name
+// ptrNativeAssemblyLoadContext - the binding context
+//
+void QCALLTYPE MultiCoreJITNative::InternalStartProfile(__in_z LPCWSTR wszProfile, INT_PTR ptrNativeAssemblyLoadContext)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ AppDomain * pDomain = GetAppDomain();
+
+ ICLRPrivBinder *pBinderContext = reinterpret_cast<ICLRPrivBinder *>(ptrNativeAssemblyLoadContext);
+
+ pDomain->GetMulticoreJitManager().StartProfile(
+ pDomain,
+ pBinderContext,
+ wszProfile);
+
+ END_QCALL;
+}
+
+
+void QCALLTYPE MultiCoreJITNative::InternalSetProfileRoot(__in_z LPCWSTR wszProfilePath)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ AppDomain * pDomain = GetAppDomain();
+
+ pDomain->GetMulticoreJitManager().SetProfileRoot(pDomain, wszProfilePath);
+
+ END_QCALL;
+}
diff --git a/src/vm/multicorejit.h b/src/vm/multicorejit.h
new file mode 100644
index 0000000000..3e7e2b114e
--- /dev/null
+++ b/src/vm/multicorejit.h
@@ -0,0 +1,278 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: MultiCoreJIT.h
+//
+
+//
+// Multicore JIT interface to other part of the VM (Thread, AppDomain, JIT)
+//
+// ======================================================================================
+
+#ifndef __MULTICORE_JIT_H__
+#define __MULTICORE_JIT_H__
+
+class MulticoreJitRecorder;
+
+
+class MulticoreJitCounter
+{
+ volatile LONG m_nValue;
+
+public:
+ MulticoreJitCounter()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_nValue = 0;
+ }
+
+ inline LONG GetValue() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_nValue;
+ }
+
+ LONG Increment()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return InterlockedIncrement(& m_nValue);
+ }
+
+ LONG Decrement()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return InterlockedDecrement(& m_nValue);
+ }
+};
+
+
+// Statistics, information shared by recorder and player
+struct MulticoreJitPlayerStat
+{
+ unsigned short m_nTotalMethod;
+ unsigned short m_nHasNativeCode;
+ unsigned short m_nTryCompiling;
+ unsigned short m_nFilteredMethods;
+ unsigned short m_nMissingModuleSkip;
+ unsigned short m_nTotalDelay;
+ unsigned short m_nDelayCount;
+ unsigned short m_nWalkBack;
+
+ HRESULT m_hr;
+
+ void Clear()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ memset(this, 0, sizeof(MulticoreJitPlayerStat));
+ }
+};
+
+
+// Code Storage
+
+class MulticoreJitCodeStorage
+{
+private:
+ MapSHashWithRemove<PVOID,PCODE> m_nativeCodeMap;
+ CrstExplicitInit m_crstCodeMap; // protecting m_nativeCodeMap
+ unsigned m_nStored;
+ unsigned m_nReturned;
+
+public:
+
+ void Init();
+
+#ifdef DACCESS_COMPILE
+
+ ~MulticoreJitCodeStorage()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+#else
+
+ ~MulticoreJitCodeStorage();
+
+#endif
+
+ void StoreMethodCode(MethodDesc * pMethod, PCODE pCode);
+
+ PCODE QueryMethodCode(MethodDesc * pMethod);
+
+ inline unsigned GetRemainingMethodCount() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_nativeCodeMap.GetCount();
+ }
+
+ inline unsigned GetStored() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_nStored;
+ }
+
+ inline unsigned GetReturned() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_nReturned;
+ }
+
+};
+
+
+const LONG SETPROFILEROOTCALLED = 1;
+
+
+// Multicore JIT attachment to AppDomain class
+class MulticoreJitManager
+{
+private:
+ MulticoreJitCounter m_ProfileSession; // Sequential profile session within the domain,
+ // incremented for every StartProfile/StopProfile/AbortProfile call to signal older players to quit
+ // We're just afraid of keeping pointer to player
+
+ MulticoreJitRecorder * m_pMulticoreJitRecorder; // pointer to current recorder
+ SString m_profileRoot; // profile root string
+ LONG m_fSetProfileRootCalled; // SetProfileRoot has been called
+ LONG m_fAutoStartCalled;
+ bool m_fRecorderActive; // Manager open for recording/event, turned on when initialized properly, turned off when at full capacity
+ bool m_fAppxMode;
+ CrstExplicitInit m_playerLock; // Thread protection (accessing m_pMulticoreJitRecorder)
+ MulticoreJitPlayerStat m_stats; // Statistics: normally gathered by player, written to profile
+
+ MulticoreJitCodeStorage m_MulticoreJitCodeStorage;
+
+public:
+
+#ifndef DACCESS_COMPILE
+ MulticoreJitManager();
+
+ ~MulticoreJitManager();
+#else
+
+ MulticoreJitManager()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_pMulticoreJitRecorder = NULL;
+ m_fSetProfileRootCalled = 0;
+ m_fAutoStartCalled = 0;
+ m_fRecorderActive = false;
+ m_fAppxMode = false;
+ }
+
+ ~MulticoreJitManager()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+#endif
+
+ inline bool IsRecorderActive() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_fRecorderActive;
+ }
+
+ inline MulticoreJitCounter & GetProfileSession()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_ProfileSession;
+ }
+
+ // Once multicore JIT is enabled in an AppDomain, do not allow Cctors to run during JITting for consistency
+ // Called from CEEInfo::initClass
+ inline bool AllowCCtorsToRunDuringJITing() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_fSetProfileRootCalled == 0;
+ }
+
+#if defined(FEATURE_APPX_BINDER)
+
+ // Check for file appx.prof to automatically start multicore JIT
+ void AutoStartProfileAppx(AppDomain * pDomain);
+
+#endif
+
+ // Check for environment variable to automatically start multicore JIT
+ void AutoStartProfile(AppDomain * pDomain);
+
+ // Multicore JIT API function: SetProfileRoot
+ void SetProfileRoot(AppDomain * pDomain, const wchar_t * pProfilePath);
+
+ // Multicore JIT API function: StartProfile
+ void StartProfile(AppDomain * pDomain, ICLRPrivBinder * pBinderContext, const wchar_t * pProfile, int suffix = -1);
+
+ // Multicore JIT API function (internal): AbortProfile
+ void AbortProfile();
+
+ // Called at AppDomain shut down to automatically shut down remaining profiling
+ void StopProfile(bool appDomainShutdown);
+
+ static void StopProfileAll();
+
+ // Track module loading event for recording
+ void RecordModuleLoad(Module * pModule, FileLoadLevel loadLevel);
+
+ static bool IsMethodSupported(MethodDesc * pMethod);
+
+ PCODE RequestMethodCode(MethodDesc * pMethod);
+
+ void RecordMethodJit(MethodDesc * pMethod);
+
+ MulticoreJitPlayerStat & GetStats()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_stats;
+ }
+
+ MulticoreJitCodeStorage & GetMulticoreJitCodeStorage()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_MulticoreJitCodeStorage;
+ }
+
+ static void DisableMulticoreJit();
+
+ static bool IsSupportedModule(Module * pModule, bool fMethodJit, bool fAppx);
+
+ static FileLoadLevel GetModuleFileLoadLevel(Module * pModule);
+
+ static bool ModuleHasNoCode(Module * pModule);
+
+#if defined(FEATURE_APPX_BINDER)
+
+ static bool IsLoadOkay(Module * pModule);
+
+#endif
+
+};
+
+
+// For ecall.cpp
+
+class MultiCoreJITNative
+{
+public:
+ static void QCALLTYPE InternalSetProfileRoot(__in_z LPCWSTR directoryPath);
+
+ static void QCALLTYPE InternalStartProfile(__in_z LPCWSTR wszProfile, INT_PTR ptrNativeAssemblyLoadContext);
+};
+
+#endif // __MULTICORE_JIT_H__
diff --git a/src/vm/multicorejitimpl.h b/src/vm/multicorejitimpl.h
new file mode 100644
index 0000000000..c9ba0fdf97
--- /dev/null
+++ b/src/vm/multicorejitimpl.h
@@ -0,0 +1,498 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: MultiCoreJITImpl.h
+//
+
+//
+// Multicore JIT internal implementation header file
+//
+// ======================================================================================
+
+#ifdef _DEBUG
+
+#define MULTICOREJIT_LOGGING
+
+#else
+// Enable direct logging through OutputDebugMessage in ret build for perf investigation, to be disabled when checkin
+
+// #define MULTICOREJIT_LOGGING
+
+#endif
+
+// Make sure a record can fit within 2048 bytes, 511 methods now
+
+const int MAX_RECORD_SIZE = 2048;
+const unsigned MAX_JIT_COUNT = (MAX_RECORD_SIZE - sizeof(unsigned)) / sizeof(unsigned);
+
+const int HEADER_W_COUNTER = 14; // Extra 16-bit counters in header for statistics: 28
+const int HEADER_D_COUNTER = 3; // Extra 32-bit counters in header for statistics: 12
+const unsigned MAX_MODULES = 512; // Maximum number of modules
+
+const unsigned MAX_METHOD_ARRAY = 16384; // Maximum number of methods
+
+const int MULTICOREJITLIFE = 60 * 1000; // 60 seconds
+
+const int MULTICOREJITBLOCKLIMIT = 10 * 1000; // 10 seconds
+
+ // 8-bit module index
+
+ // Method JIT information: 8-bit module 4-bit flag 20-bit method index
+const unsigned MODULE_DEPENDENCY = 0x800000; // 1-bit module dependency mask
+const unsigned JIT_BY_APP_THREAD = 0x400000; // 1-bit application thread
+
+const unsigned METHODINDEX_MASK = 0x0FFFFF; // 20-bit method index
+
+ // Dependendy information: 8-bit module 4-bit flag 4-bit unused 8-bit level 8-bit module
+const unsigned LEVEL_SHIFT = 8;
+const unsigned LEVEL_MASK = 0xFF; // 8-bit file load level
+const unsigned MODULE_MASK = 0xFF; // 8-bit dependent module index
+
+const int MAX_WALKBACK = 128;
+
+enum
+{
+ MULTICOREJIT_PROFILE_VERSION = 101,
+
+ MULTICOREJIT_HEADER_RECORD_ID = 1,
+ MULTICOREJIT_MODULE_RECORD_ID = 2,
+ MULTICOREJIT_JITINF_RECORD_ID = 3
+};
+
+
+inline unsigned Pack8_24(unsigned up, unsigned low)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (up << 24) + low;
+}
+
+// Multicore JIT profile format
+
+// <profile>::= <HeaderRecord> { <ModuleRecord> | <JitInfRecord> }
+//
+// 1. Each record is DWORD aligned
+// 2. Each record starts with a DWORD <recordID> with Pack8_24(record type, record size)
+// 3. Counter are just statistical information gathed (mainly during play back), good for quick diagnosis, not used to guide playback
+// 4 Maximum number of modules supported is 256
+// 5 Simple module name stored
+// 6 Maximum method index: 20-bit, could extend to 22 bits
+// 7 JIT_BY_APP_THREAD is for diagnosis only
+
+// <HeaderRecord>::= <recordID> <version> <timeStamp> <moduleCount> <methodCount> <DependencyCount> <unsigned short counter>*14 <unsigned counter>*3
+// <ModuleRecord>::= <recordID> <ModuleVersion> <JitMethodCount> <loadLevel> <lenModuleName> char*lenModuleName <padding>
+// <JifInfRecord>::= <recordID> { <moduleDependency> | <methodJitInfo> }
+
+// <moduleDependency>::
+// 8-bit source module index, current always 0 until we track per module dependency
+// 8-bit flag MODULE_DEPENDENCY is 1
+// 8-bit load level
+// 8-bit target module index
+
+// <methodJitInfo>::
+// 8-bit module index, current always 0 until we track per module dependency
+// 4-bit flag MODULE_DEPENDENCY is 0, JIT_BY_APP_THREAD could be 1
+// 20-bit method index
+
+
+struct HeaderRecord
+{
+ unsigned recordID;
+ unsigned version;
+ unsigned timeStamp;
+ unsigned moduleCount;
+ unsigned methodCount;
+ unsigned moduleDepCount;
+ unsigned short shortCounters[HEADER_W_COUNTER];
+ unsigned longCounters [HEADER_D_COUNTER];
+};
+
+
+class ModuleVersion
+{
+public:
+ unsigned short major;
+ unsigned short minor;
+ unsigned short build;
+ unsigned short revision;
+
+ unsigned versionFlags :31;
+ unsigned hasNativeImage:1;
+
+ GUID mvid;
+
+ bool GetModuleVersion(Module * pModule);
+
+ ModuleVersion()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ memset(this, 0, sizeof(ModuleVersion));
+ }
+
+ bool MatchWith(const ModuleVersion & other) const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if ((major == other.major) &&
+ (minor == other.minor) &&
+ (build == other.build) &&
+ (revision == other.revision) &&
+ (versionFlags == other.versionFlags))
+ {
+ return memcmp(& mvid, & other.mvid, sizeof(mvid)) == 0;
+ }
+
+ return false;
+ }
+
+ bool NativeImageFlagDiff(const ModuleVersion & other) const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return hasNativeImage != other.hasNativeImage;
+ }
+};
+
+inline unsigned RoundUp(unsigned val)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (val + 3) / 4 * 4;
+}
+
+const unsigned short FLAG_LOADOKAY = 1; // Okay to load the module in background thread (e.g. for Appx first party WinMD)
+
+// Used to mark a module that was loaded in the LOADFROM context.
+// First 16 bits are reserved for CorAssemblyFlags. Use the last bit (bit 31) to allow for expansion of CorAssemblyFlags.
+const unsigned int VERSIONFLAG_LOADCTX_LOADFROM = 0x40000000;
+
+// Module record stored in the profile without the name
+
+class ModuleRecord
+{
+public:
+ unsigned recordID;
+ ModuleVersion version;
+ unsigned short jitMethodCount;
+ unsigned short flags;
+ unsigned short wLoadLevel;
+ unsigned short lenModuleName;
+#if defined(FEATURE_CORECLR) && defined(FEATURE_HOSTED_BINDER)
+ unsigned short lenAssemblyName;
+#endif
+
+ ModuleRecord(unsigned lenName = 0, unsigned lenAssemblyName = 0);
+
+ bool MatchWithModule(ModuleVersion & version, bool & gotVersion, Module * pModule, bool & shouldAbort, bool fAppx) const;
+
+ unsigned ModuleNameLen() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return lenModuleName;
+ }
+
+ const char * GetModuleName() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (const char *) (this + 1); // after this record
+ }
+
+#if defined(FEATURE_CORECLR) && defined(FEATURE_HOSTED_BINDER)
+ unsigned AssemblyNameLen() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return lenAssemblyName;
+ }
+
+ const char * GetAssemblyName() const
+ {
+ return GetModuleName() + RoundUp(lenModuleName); // after the module name
+ }
+#endif
+
+ void SetLoadLevel(FileLoadLevel loadLevel)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ wLoadLevel = (unsigned short) loadLevel;
+ }
+};
+
+
+class Module;
+class AppDomain;
+
+class PlayerModuleInfo;
+
+// Module enumerator
+class MulticoreJitModuleEnumerator
+{
+ virtual HRESULT OnModule(Module * pModule) = 0;
+
+public:
+ HRESULT EnumerateLoadedModules(AppDomain * pDomain);
+ HRESULT HandleAssembly(DomainAssembly * pAssembly);
+};
+
+
+class PlayerModuleInfo;
+
+// MulticoreJitProfilePlayer manages background thread, playing back profile, storing result into code stoage, and gather statistics information
+
+class MulticoreJitProfilePlayer
+{
+friend class MulticoreJitRecorder;
+
+private:
+ ADID m_DomainID;
+#if defined(FEATURE_CORECLR) && defined(FEATURE_HOSTED_BINDER)
+ ICLRPrivBinder * m_pBinderContext;
+#endif
+ LONG m_nMySession;
+ unsigned m_nStartTime;
+ BYTE * m_pFileBuffer;
+ unsigned m_nFileSize;
+ MulticoreJitPlayerStat & m_stats;
+ MulticoreJitCounter & m_appdomainSession;
+ bool m_shouldAbort;
+ bool m_fAppxMode;
+
+ Thread * m_pThread;
+
+ unsigned m_nBlockingCount;
+ unsigned m_nMissingModule;
+
+ int m_nLoadedModuleCount;
+
+ unsigned m_busyWith;
+
+ unsigned m_headerModuleCount;
+ unsigned m_moduleCount;
+ PlayerModuleInfo * m_pModules;
+
+ void JITMethod(Module * pModule, unsigned methodIndex);
+
+ HRESULT HandleModuleRecord(const ModuleRecord * pModule);
+ HRESULT HandleMethodRecord(unsigned * buffer, int count);
+
+ bool CompileMethodDesc(Module * pModule, MethodDesc * pMD);
+
+ HRESULT PlayProfile();
+
+ bool GroupWaitForModuleLoad(int pos);
+
+ bool ShouldAbort(bool fast) const;
+
+ HRESULT JITThreadProc(Thread * pThread);
+
+ static DWORD WINAPI StaticJITThreadProc(void *args);
+
+ void TraceSummary();
+
+ HRESULT UpdateModuleInfo();
+
+ bool HandleModuleDependency(unsigned jitInfo);
+
+ HRESULT ReadCheckFile(const wchar_t * pFileName);
+
+#if defined(FEATURE_CORECLR) && defined(FEATURE_HOSTED_BINDER)
+ DomainAssembly * LoadAssembly(SString & assemblyName);
+#endif
+
+public:
+
+ MulticoreJitProfilePlayer(AppDomain * pDomain, ICLRPrivBinder * pBinderContext, LONG nSession, bool fAppxMode);
+
+ ~MulticoreJitProfilePlayer();
+
+ HRESULT ProcessProfile(const wchar_t * pFileName);
+
+ HRESULT OnModule(Module * pModule);
+};
+
+
+struct RecorderModuleInfo
+{
+ Module * pModule;
+ unsigned short methodCount;
+ unsigned short flags;
+ ModuleVersion moduleVersion;
+ SBuffer simpleName;
+ SBuffer assemblyName;
+ FileLoadLevel loadLevel;
+
+ RecorderModuleInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ pModule = NULL;
+ methodCount = 0;
+ flags = 0;
+ loadLevel = FILE_LOAD_CREATE;
+ }
+
+ bool SetModule(Module * pModule);
+};
+
+
+class MulticoreJitRecorder
+{
+private:
+ AppDomain * m_pDomain; // AutoStartProfile could be called from SystemDomain
+#if defined(FEATURE_CORECLR) && defined(FEATURE_HOSTED_BINDER)
+ ICLRPrivBinder * m_pBinderContext;
+#endif
+ SString m_fullFileName;
+ MulticoreJitPlayerStat & m_stats;
+
+ RecorderModuleInfo m_ModuleList[MAX_MODULES];
+ unsigned m_ModuleCount;
+ unsigned m_ModuleDepCount;
+
+ unsigned m_JitInfoArray[MAX_METHOD_ARRAY];
+ LONG m_JitInfoCount;
+
+ bool m_fFirstMethod;
+ bool m_fAborted;
+ bool m_fAppxMode;
+
+#ifndef FEATURE_PAL
+ static TP_TIMER * s_delayedWriteTimer;
+#endif // !FEATURE_PAL
+
+
+ unsigned FindModule(Module * pModule);
+ unsigned GetModuleIndex(Module * pModule);
+
+ HRESULT WriteModuleRecord(IStream * pStream, const RecorderModuleInfo & module);
+
+ void RecordJitInfo(unsigned module, unsigned method);
+
+ void AddAllModulesInAsm(DomainAssembly * pAssembly);
+
+ HRESULT WriteOutput(IStream * pStream);
+
+ HRESULT WriteOutput();
+
+ void PreRecordFirstMethod();
+
+#ifndef FEATURE_PAL
+ static void CALLBACK WriteMulticoreJitProfiler(PTP_CALLBACK_INSTANCE pInstance, PVOID pvContext, PTP_TIMER pTimer);
+#endif // !FEATURE_PAL
+
+public:
+
+ MulticoreJitRecorder(AppDomain * pDomain, ICLRPrivBinder * pBinderContext, bool fAppxMode)
+ : m_stats(pDomain->GetMulticoreJitManager().GetStats())
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_pDomain = pDomain;
+#if defined(FEATURE_CORECLR) && defined(FEATURE_HOSTED_BINDER)
+ m_pBinderContext = pBinderContext;
+#endif
+ m_JitInfoCount = 0;
+ m_ModuleCount = 0;
+ m_ModuleDepCount = 0;
+
+ m_fFirstMethod = true;
+ m_fAborted = false;
+ m_fAppxMode = fAppxMode;
+
+#if defined(FEATURE_APPX_BINDER)
+
+ s_delayedWriteTimer = NULL;
+#endif
+
+ m_stats.Clear();
+ }
+
+#ifndef FEATURE_PAL
+ static bool CloseTimer()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ TP_TIMER * pTimer = InterlockedExchangeT(& s_delayedWriteTimer, NULL);
+
+ if (pTimer == NULL)
+ {
+ return false;
+ }
+
+ CloseThreadpoolTimer(pTimer);
+
+ return true;
+ }
+
+ ~MulticoreJitRecorder()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ CloseTimer();
+ }
+#endif // !FEATURE_PAL
+
+ bool IsAtFullCapacity() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_JitInfoCount >= (LONG) MAX_METHOD_ARRAY) ||
+ (m_ModuleCount >= MAX_MODULES);
+ }
+
+ void RecordMethodJit(MethodDesc * pMethod, bool application);
+
+ PCODE RequestMethodCode(MethodDesc * pMethod, MulticoreJitManager * pManager);
+
+ HRESULT StartProfile(const wchar_t * pRoot, const wchar_t * pFileName, int suffix, LONG nSession);
+
+ HRESULT StopProfile(bool appDomainShutdown);
+
+ void AbortProfile();
+
+ void RecordModuleLoad(Module * pModule, FileLoadLevel loadLevel);
+
+ void AddModuleDependency(Module * pModule, FileLoadLevel loadLevel);
+};
+
+#ifdef MULTICOREJIT_LOGGING
+
+void _MulticoreJitTrace(const char * format, ...);
+
+#define MulticoreJitTrace(x) do { _MulticoreJitTrace x; } while (0)
+
+#else
+
+#define MulticoreJitTrace(x)
+
+#endif
+
+extern unsigned g_MulticoreJitDelay; // Delay in StartProfile
+extern bool g_MulticoreJitEnabled; // Enable/Disable feature
+
+
+inline bool PrivateEtwEnabled()
+{
+#ifdef FEATURE_EVENT_TRACE
+ return MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled != 0;
+#else // FEATURE_EVENT_TRACE
+ return FALSE;
+#endif // FEATURE_EVENT_TRACE
+}
+
+void MulticoreJitFireEtw(const wchar_t * pAction, const wchar_t * pTarget, int p1, int p2, int p3);
+
+void MulticoreJitFireEtwA(const wchar_t * pAction, const char * pTarget, int p1, int p2, int p3);
+
+void MulticoreJitFireEtwMethodCodeReturned(MethodDesc * pMethod);
+
+#define _FireEtwMulticoreJit(String1, String2, Int1, Int2, Int3) if (PrivateEtwEnabled()) MulticoreJitFireEtw (String1, String2, Int1, Int2, Int3)
+#define _FireEtwMulticoreJitA(String1, String2, Int1, Int2, Int3) if (PrivateEtwEnabled()) MulticoreJitFireEtwA(String1, String2, Int1, Int2, Int3)
+#define _FireEtwMulticoreJitMethodCodeReturned(pMethod) if(PrivateEtwEnabled()) MulticoreJitFireEtwMethodCodeReturned(pMethod)
+
diff --git a/src/vm/multicorejitplayer.cpp b/src/vm/multicorejitplayer.cpp
new file mode 100644
index 0000000000..5d34fed9a0
--- /dev/null
+++ b/src/vm/multicorejitplayer.cpp
@@ -0,0 +1,1493 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: MultiCoreJITPlayer.cpp
+//
+
+// ===========================================================================
+// This file contains the implementation for MultiCore JIT profile playing back
+// ===========================================================================
+//
+
+#include "common.h"
+#include "vars.hpp"
+#include "security.h"
+#include "eeconfig.h"
+#include "dllimport.h"
+#include "comdelegate.h"
+#include "dbginterface.h"
+#include "listlock.inl"
+#include "stubgen.h"
+#include "eventtrace.h"
+#include "array.h"
+#include "fstream.h"
+#include "hash.h"
+#include "clrex.h"
+
+#include "appdomain.hpp"
+
+#include "multicorejit.h"
+#include "multicorejitimpl.h"
+
+// Options for controlling multicore JIT
+
+unsigned g_MulticoreJitDelay = 0; // Delay in StartProfile
+
+bool g_MulticoreJitEnabled = true; // Enable/Disable feature
+
+///////////////////////////////////////////////////////////////////////////////////
+//
+// class MulticoreJitCodeStorage
+//
+///////////////////////////////////////////////////////////////////////////////////
+
+
+void MulticoreJitCodeStorage::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY; // called from BaseDomain::Init which is MODE_ANY
+ }
+ CONTRACTL_END;
+
+ m_nStored = 0;
+ m_nReturned = 0;
+ m_crstCodeMap.Init(CrstMulticoreJitHash);
+}
+
+
+// Destructor
+MulticoreJitCodeStorage::~MulticoreJitCodeStorage()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_crstCodeMap.Destroy();
+}
+
+
+// Callback from MakeJitWorker to store compiled code, under MethodDesc lock
+void MulticoreJitCodeStorage::StoreMethodCode(MethodDesc * pMD, PCODE pCode)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef PROFILING_SUPPORTED
+ if (CORProfilerTrackJITInfo())
+ {
+ return;
+ }
+#endif
+
+ if (pCode != NULL)
+ {
+ CrstHolder holder(& m_crstCodeMap);
+
+#ifdef MULTICOREJIT_LOGGING
+ if (Logging2On(LF2_MULTICOREJIT, LL_INFO1000))
+ {
+ MulticoreJitTrace(("%p %p StoredMethodCode", pMD, pCode));
+ }
+#endif
+
+ PCODE code = NULL;
+
+ if (! m_nativeCodeMap.Lookup(pMD, & code))
+ {
+ m_nativeCodeMap.Add(pMD, pCode);
+
+ m_nStored ++;
+ }
+ }
+}
+
+
+// Query from MakeJitWorker: Lookup stored JITted methods
+PCODE MulticoreJitCodeStorage::QueryMethodCode(MethodDesc * pMethod)
+{
+ STANDARD_VM_CONTRACT;
+
+ PCODE code = NULL;
+
+ if (m_nStored > m_nReturned) // Quick check before taking lock
+ {
+ CrstHolder holder(& m_crstCodeMap);
+
+ if (m_nativeCodeMap.Lookup(pMethod, & code))
+ {
+ m_nReturned ++;
+
+ // Remove it to keep storage small (hopefully flat)
+ m_nativeCodeMap.Remove(pMethod);
+ }
+ }
+
+#ifdef MULTICOREJIT_LOGGING
+ if (Logging2On(LF2_MULTICOREJIT, LL_INFO1000))
+ {
+ MulticoreJitTrace(("%p %p QueryMethodCode", pMethod, code));
+ }
+#endif
+
+ return code;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////
+//
+// class PlayerModuleInfo
+//
+///////////////////////////////////////////////////////////////////////////////////
+
+// Per module information kept for mapping to Module object
+
+class PlayerModuleInfo
+{
+public:
+
+ const ModuleRecord * m_pRecord;
+ Module * m_pModule;
+ int m_needLevel;
+ int m_curLevel;
+ bool m_enableJit;
+
+ PlayerModuleInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_pRecord = NULL;
+ m_pModule = NULL;
+ m_needLevel = -1;
+ m_curLevel = -1;
+ m_enableJit = true;
+ }
+
+ bool MeetLevel(FileLoadLevel level) const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_pModule != NULL) && (m_curLevel >= (int) level);
+ }
+
+ bool IsModuleLoaded() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pModule != NULL;
+ }
+
+ bool LoadOkay() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_pRecord->flags & FLAG_LOADOKAY) != 0;
+ }
+
+ // UpdateNeedLevel called
+ bool IsDependency() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_needLevel > -1;
+ }
+
+ bool IsLowerLevel() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_curLevel < m_needLevel;
+ }
+
+ // If module is loaded, lower then needed level, update its level
+ void UpdateCurrentLevel()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (m_pModule != NULL)
+ {
+ if (m_curLevel < m_needLevel)
+ {
+ m_curLevel = (int) MulticoreJitManager::GetModuleFileLoadLevel(m_pModule);
+ }
+ }
+ }
+
+ bool UpdateNeedLevel(FileLoadLevel level)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_needLevel < (int) level)
+ {
+ m_needLevel = (int) level;
+
+ return true;
+ }
+
+ return false;
+ }
+
+ bool MatchWith(ModuleVersion & version, bool & gotVersion, Module * pModule, bool & shortAbort, bool fAppx);
+
+#ifdef MULTICOREJIT_LOGGING
+ void Dump(const wchar_t * prefix, int index);
+#endif
+
+};
+
+
+bool PlayerModuleInfo::MatchWith(ModuleVersion & version, bool & gotVersion, Module * pModule, bool & shortAbort, bool fAppx)
+{
+ STANDARD_VM_CONTRACT;
+
+ if ((m_pModule == NULL) && m_pRecord->MatchWithModule(version, gotVersion, pModule, shortAbort, fAppx))
+ {
+ m_pModule = pModule;
+ m_curLevel = (int) MulticoreJitManager::GetModuleFileLoadLevel(pModule);
+
+ if (m_pRecord->jitMethodCount == 0)
+ {
+ m_enableJit = false; // No method to JIT for this module, not really needed; just to be correct
+ }
+ else if (CORDebuggerEnCMode(pModule->GetDebuggerInfoBits()))
+ {
+ m_enableJit = false;
+ MulticoreJitTrace(("Jit disable for module due to EnC"));
+ _FireEtwMulticoreJit(W("FILTERMETHOD-EnC"), W(""), 0, 0, 0);
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+
+#ifdef MULTICOREJIT_LOGGING
+
+void PlayerModuleInfo::Dump(const wchar_t * prefix, int index)
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef LOGGING
+ if (!Logging2On(LF2_MULTICOREJIT, LL_INFO100))
+ return;
+
+ DEBUG_ONLY_FUNCTION;
+#endif
+
+ StackSString ssBuff;
+
+ ssBuff.Append(prefix);
+ ssBuff.AppendPrintf(W("[%2d]: "), index);
+
+ const ModuleVersion & ver = m_pRecord->version;
+
+ ssBuff.AppendPrintf(W(" %d.%d.%05d.%04d.%d level %2d, need %2d"), ver.major, ver.minor, ver.build, ver.revision, ver.versionFlags, m_curLevel, m_needLevel);
+
+ ssBuff.AppendPrintf(W(" pModule: %p "), m_pModule);
+
+ unsigned i;
+
+ for (i = 0; i < m_pRecord->ModuleNameLen(); i ++)
+ {
+ ssBuff.Append((wchar_t) m_pRecord->GetModuleName()[i]);
+ }
+
+ while (i < 32)
+ {
+ ssBuff.Append(' ');
+ i ++;
+ }
+
+ MulticoreJitTrace(("%S", ssBuff.GetUnicode()));
+}
+
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////////
+//
+// MulticoreJitProfilePlayer
+//
+///////////////////////////////////////////////////////////////////////////////////
+
+const unsigned EmptyToken = 0xFFFFFFFF;
+
+bool ModuleRecord::MatchWithModule(ModuleVersion & modVersion, bool & gotVersion, Module * pModule, bool & shouldAbort, bool fAppx) const
+{
+ STANDARD_VM_CONTRACT;
+
+ LPCUTF8 pModuleName = pModule->GetSimpleName();
+ const char * pName = GetModuleName();
+
+ size_t len = strlen(pModuleName);
+
+ if ((len == lenModuleName) && (memcmp(pModuleName, pName, lenModuleName) == 0))
+ {
+ // Ignore version check on play back when running under Appx (also GetModuleVersion is expensive)
+
+ // For Appx, multicore JIT profile is pre-generated by application vendor, installed together with the package.
+ // So it may not have exact match with its own assemblies, and assemblies installed on the system.
+ if (fAppx)
+ {
+ return true;
+ }
+
+ if (! gotVersion) // Calling expensive GetModuleVersion only when simple name matches
+ {
+ gotVersion = true;
+
+ if (! modVersion.GetModuleVersion(pModule))
+ {
+ return false;
+ }
+ }
+
+ if (version.MatchWith(modVersion))
+ {
+ // If matching image with different native image flag is detected, mark and abort playing profile back
+ if (version.NativeImageFlagDiff(modVersion))
+ {
+ MulticoreJitTrace((" Module with different native image flag: %s", pName));
+
+ shouldAbort = true;
+ }
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+MulticoreJitProfilePlayer::MulticoreJitProfilePlayer(AppDomain * pDomain, ICLRPrivBinder * pBinderContext, LONG nSession, bool fAppxMode)
+ : m_stats(pDomain->GetMulticoreJitManager().GetStats()), m_appdomainSession(pDomain->GetMulticoreJitManager().GetProfileSession())
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_DomainID = pDomain->GetId();
+#if defined(FEATURE_CORECLR) && defined(FEATURE_HOSTED_BINDER)
+ m_pBinderContext = pBinderContext;
+#endif
+ m_nMySession = nSession;
+ m_moduleCount = 0;
+ m_headerModuleCount = 0;
+ m_pModules = NULL;
+ m_nBlockingCount = 0;
+ m_nMissingModule = 0;
+ m_nLoadedModuleCount = 0;
+ m_shouldAbort = false;
+ m_fAppxMode = fAppxMode;
+
+ m_pThread = NULL;
+ m_pFileBuffer = NULL;
+ m_nFileSize = 0;
+
+ m_busyWith = EmptyToken;
+
+ m_nStartTime = GetTickCount();
+}
+
+
+MulticoreJitProfilePlayer::~MulticoreJitProfilePlayer()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_pModules != NULL)
+ {
+ delete [] m_pModules;
+ m_pModules = NULL;
+ }
+
+ if (m_pFileBuffer != NULL)
+ {
+ delete [] m_pFileBuffer;
+ }
+}
+
+
+// static
+bool MulticoreJitManager::ModuleHasNoCode(Module * pModule)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (pModule->IsResource())
+ {
+ return true;
+ }
+
+ IMDInternalImport * pImport = pModule->GetMDImport();
+
+ if (pImport != NULL)
+ {
+ if ((pImport->GetCountWithTokenKind(mdtTypeDef) == 0) &&
+ (pImport->GetCountWithTokenKind(mdtMethodDef) == 0) &&
+ (pImport->GetCountWithTokenKind(mdtFieldDef) == 0)
+ )
+ {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+// We only support default load context, non dynamic module, non domain neutral (needed for dependency)
+bool MulticoreJitManager::IsSupportedModule(Module * pModule, bool fMethodJit, bool fAppx)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (pModule == NULL)
+ {
+ return false;
+ }
+
+ PEFile * pFile = pModule->GetFile();
+
+ // dynamic module.
+ if (pFile->IsDynamic()) // Ignore dynamic modules
+ {
+ return false;
+ }
+
+#if defined(FEATURE_CORECLR) && defined(FEATURE_HOSTED_BINDER)
+ if (pFile->GetPath().IsEmpty()) // Ignore in-memory modules
+ {
+ return false;
+ }
+#endif
+
+
+ if (! fMethodJit)
+ {
+ if (ModuleHasNoCode(pModule))
+ {
+ return false;
+ }
+ }
+
+ Assembly * pAssembly = pModule->GetAssembly();
+
+#ifdef FEATURE_FUSION
+
+ LOADCTX_TYPE context = pAssembly->GetManifestFile()->GetLoadContext();
+
+#if defined(FEATURE_APPX_BINDER)
+
+ if (fAppx)
+ {
+ if (context == LOADCTX_TYPE_HOSTED)
+ {
+ return true;
+ }
+ }
+
+#endif
+
+ return ((context == LOADCTX_TYPE_DEFAULT) || (context == LOADCTX_TYPE_LOADFROM));
+
+#else
+
+ return true;
+
+#endif
+}
+
+
+// ModuleRecord handling: add to m_ModuleList
+
+HRESULT MulticoreJitProfilePlayer::HandleModuleRecord(const ModuleRecord * pMod)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ PlayerModuleInfo & info = m_pModules[m_moduleCount];
+
+ info.m_pModule = NULL;
+ info.m_pRecord = pMod;
+
+#ifdef MULTICOREJIT_LOGGING
+ info.Dump(W("ModuleRecord"), m_moduleCount);
+#endif
+
+ m_moduleCount ++;
+
+ return hr;
+}
+
+
+// Call JIT to compile a method
+
+bool MulticoreJitProfilePlayer::CompileMethodDesc(Module * pModule, MethodDesc * pMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ COR_ILMETHOD_DECODER::DecoderStatus status;
+
+ COR_ILMETHOD_DECODER header(pMD->GetILHeader(), pModule->GetMDImport(), & status);
+
+ if (status == COR_ILMETHOD_DECODER::SUCCESS)
+ {
+ if (m_stats.m_nTryCompiling == 0)
+ {
+ MulticoreJitTrace(("First call to MakeJitWorker"));
+ }
+
+ m_stats.m_nTryCompiling ++;
+
+#if defined(FEATURE_CORECLR) && defined(FEATURE_HOSTED_BINDER)
+ // Reset the flag to allow managed code to be called in multicore JIT background thread from this routine
+ ThreadStateNCStackHolder holder(-1, Thread::TSNC_CallingManagedCodeDisabled);
+#endif
+
+ // MakeJitWorker calls back to MulticoreJitCodeStorage::StoreMethodCode under MethodDesc lock
+ pMD->MakeJitWorker(& header, CORJIT_FLG_MCJIT_BACKGROUND, 0);
+
+ return true;
+ }
+
+ return false;
+}
+
+
+// Conditional JIT of a method
+void MulticoreJitProfilePlayer::JITMethod(Module * pModule, unsigned methodIndex)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Ensure non-null module
+ if (pModule == NULL)
+ {
+ if (ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, TRACE_LEVEL_VERBOSE, CLR_PRIVATEMULTICOREJIT_KEYWORD))
+ {
+ _FireEtwMulticoreJitA(W("NULLMODULEPOINTER"), NULL, methodIndex, 0, 0);
+ }
+ return;
+ }
+
+ methodIndex &= METHODINDEX_MASK; // 20-bit
+
+ unsigned token = TokenFromRid(methodIndex, mdtMethodDef);
+
+ // Similar to Module::FindMethod + Module::FindMethodThrowing,
+ // except it calls GetMethodDescFromMemberDefOrRefOrSpec with strictMetadataChecks=FALSE to allow generic instantiation
+ MethodDesc * pMethod = MemberLoader::GetMethodDescFromMemberDefOrRefOrSpec(pModule, token, NULL, FALSE, FALSE);
+
+ if ((pMethod != NULL) && ! pMethod->IsDynamicMethod() && pMethod->HasILHeader())
+ {
+ // MethodDesc::FindOrCreateTypicalSharedInstantiation is expensive, avoid calling it unless the method or class has generic arguments
+ if (pMethod->HasClassOrMethodInstantiation())
+ {
+ pMethod = pMethod->FindOrCreateTypicalSharedInstantiation();
+
+ if (pMethod == NULL)
+ {
+ goto BadMethod;
+ }
+
+ pModule = pMethod->GetModule_NoLogging();
+ }
+
+ if (pMethod->GetNativeCode() != NULL) // last check before
+ {
+ m_stats.m_nHasNativeCode ++;
+
+ return;
+ }
+ else
+ {
+ m_busyWith = methodIndex;
+
+ bool rslt = CompileMethodDesc(pModule, pMethod);
+
+ m_busyWith = EmptyToken;
+
+ if (rslt)
+ {
+ return;
+ }
+ }
+ }
+
+BadMethod:
+
+ m_stats.m_nFilteredMethods ++;
+
+ MulticoreJitTrace(("Filtered out methods: pModule:[%s] token:[%x]", pModule->GetSimpleName(), token));
+
+ if (ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, TRACE_LEVEL_VERBOSE, CLR_PRIVATEMULTICOREJIT_KEYWORD))
+ {
+ _FireEtwMulticoreJitA(W("FILTERMETHOD-GENERIC"), pModule->GetSimpleName(), token, 0, 0);
+ }
+}
+
+
+class MulticoreJitPlayerModuleEnumerator : public MulticoreJitModuleEnumerator
+{
+ MulticoreJitProfilePlayer * m_pPlayer;
+
+ // Implementation of MulticoreJitModuleEnumerator::OnModule
+ HRESULT OnModule(Module * pModule)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ return m_pPlayer->OnModule(pModule);
+ }
+
+public:
+
+ MulticoreJitPlayerModuleEnumerator(MulticoreJitProfilePlayer * pPlayer)
+ {
+ m_pPlayer = pPlayer;
+ }
+};
+
+
+HRESULT MulticoreJitProfilePlayer::OnModule(Module * pModule)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ // Check if already matched
+ for (unsigned i = 0; i < m_moduleCount; i ++)
+ {
+ if (m_pModules[i].m_pModule == pModule)
+ {
+ return hr;
+ }
+ }
+
+ ModuleVersion version; // GetModuleVersion is called on-demand when simple names matches
+
+ bool gotVersion = false;
+
+ // Match with simple name, and then version/flag/guid
+ for (unsigned i = 0; i < m_moduleCount; i ++)
+ {
+ if (m_pModules[i].MatchWith(version, gotVersion, pModule, m_shouldAbort, m_fAppxMode))
+ {
+ m_nLoadedModuleCount ++;
+ return hr;
+ }
+ }
+
+ return hr;
+}
+
+
+HRESULT MulticoreJitProfilePlayer::UpdateModuleInfo()
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ MulticoreJitTrace(("UpdateModuleInfo"));
+
+ // Enumerate module if there is a module needed, but not loaded yet
+ for (unsigned i = 0; i < m_moduleCount; i ++)
+ {
+ PlayerModuleInfo & info = m_pModules[i];
+
+ if (! info.LoadOkay() && info.IsDependency() && ! info.IsModuleLoaded())
+ {
+ MulticoreJitTrace((" Enumerate modules for player"));
+
+ MulticoreJitPlayerModuleEnumerator enumerator(this);
+
+ enumerator.EnumerateLoadedModules(GetAppDomain()); // Enumerate modules, hope to find new matches
+
+ break;
+ }
+ }
+
+ // Update load level, re-calculate blocking count
+ m_nBlockingCount = 0;
+ m_nMissingModule = 0;
+
+ if (m_shouldAbort)
+ {
+ hr = E_ABORT;
+ }
+ else
+ {
+ // Check for blocking level
+ for (unsigned i = 0; i < m_moduleCount; i ++)
+ {
+ PlayerModuleInfo & info = m_pModules[i];
+
+ if (! info.LoadOkay() && info.IsLowerLevel())
+ {
+ if (info.IsModuleLoaded())
+ {
+ info.UpdateCurrentLevel();
+ }
+ else
+ {
+ m_nMissingModule ++;
+ }
+
+ if (info.IsLowerLevel())
+ {
+ #ifdef MULTICOREJIT_LOGGING
+ info.Dump(W(" BlockingModule"), i);
+ #endif
+
+ if (ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, TRACE_LEVEL_VERBOSE, CLR_PRIVATEMULTICOREJIT_KEYWORD))
+ {
+ _FireEtwMulticoreJitA(W("BLOCKINGMODULE"), info.m_pRecord->GetModuleName(), i, info.m_curLevel, info.m_needLevel);
+ }
+
+ m_nBlockingCount ++;
+ }
+ }
+ }
+ }
+
+ MulticoreJitTrace(("Blocking count: %d, missing module: %d, hr=%x", m_nBlockingCount, m_nMissingModule, hr));
+
+ return hr;
+}
+
+
+bool MulticoreJitProfilePlayer::ShouldAbort(bool fast) const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_nMySession != m_appdomainSession.GetValue())
+ {
+ MulticoreJitTrace(("MulticoreJitProfilePlayer::ShouldAbort session over"));
+ _FireEtwMulticoreJit(W("ABORTPLAYER"), W("Session over"), 0, 0, 0);
+ return true;
+ }
+
+ if (fast)
+ {
+ return false;
+ }
+
+ if (GetTickCount() - m_nStartTime > MULTICOREJITLIFE)
+ {
+ MulticoreJitTrace(("MulticoreJitProfilePlayer::ShouldAbort time over"));
+
+ _FireEtwMulticoreJit(W("ABORTPLAYER"), W("Time out"), 0, 0, 0);
+
+ return true;
+ }
+
+ return false;
+}
+
+
+// Basic delay unit
+const int DelayUnit = 1; // 1 ms delay
+const int MissingModuleDelay = 10; // 10 ms for each missing module
+
+
+// Wait for all the module loading and level requests to be fullfilled
+// This allows for longer delay based on number of mismatches, to reduce CPU usage
+
+// Return true blocking count is 0, false if aborted
+bool MulticoreJitProfilePlayer::GroupWaitForModuleLoad(int pos)
+{
+ STANDARD_VM_CONTRACT;
+
+ MulticoreJitTrace(("Enter GroupWaitForModuleLoad(pos=%4d): %d modules loaded, blocking count=%d", pos, m_nLoadedModuleCount, m_nBlockingCount));
+
+ _FireEtwMulticoreJit(W("GROUPWAIT"), W("Enter"), m_nLoadedModuleCount, m_nBlockingCount, pos);
+
+ bool rslt = false;
+
+ // Ensure that we don't block in this particular case for longer than the block limit.
+ // This limit is smaller than the overall MULTICOREJITLIFE and ensures that we don't sit for the
+ // full player lifetime waiting for a module when the app behavior has changed.
+ DWORD currentModuleBlockStart = GetTickCount();
+
+ // Only allow module blocking to occur a certain number of times.
+
+ while (! ShouldAbort(false))
+ {
+ if (FAILED(UpdateModuleInfo()))
+ {
+ break;
+ }
+
+ if (m_nBlockingCount == 0)
+ {
+ rslt = true;
+ break;
+ }
+
+ if(GetTickCount() - currentModuleBlockStart > MULTICOREJITBLOCKLIMIT)
+ {
+ MulticoreJitTrace(("MulticoreJitProfilePlayer::GroupWaitForModuleLoad timeout exceeded."));
+ _FireEtwMulticoreJit(W("ABORTPLAYER"), W("GroupWaitForModuleLoad timeout exceeded."), 0, 0, 0);
+
+ break;
+ }
+
+ // Heuristic for reducing CPU usage: delay longer when there are more blocking modules
+ unsigned delay = min((m_nMissingModule * MissingModuleDelay + m_nBlockingCount) * DelayUnit, 50);
+
+ MulticoreJitTrace(("Delay: %d ms", delay));
+
+ if (ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, TRACE_LEVEL_VERBOSE, CLR_PRIVATEMULTICOREJIT_KEYWORD))
+ {
+ _FireEtwMulticoreJit(W("GROUPWAIT"), W("Delay"), delay, 0, 0);
+ }
+
+ ClrSleepEx(delay, FALSE);
+
+ m_stats.m_nTotalDelay += (unsigned short) delay;
+ m_stats.m_nDelayCount ++;
+ }
+
+ MulticoreJitTrace(("Leave GroupWaitForModuleLoad(pos=%4d): blocking count=%d (rslt=%d)", pos, m_nBlockingCount, rslt));
+
+ _FireEtwMulticoreJit(W("GROUPWAIT"), W("Leave"), m_nLoadedModuleCount, m_nBlockingCount, rslt);
+
+ return rslt;
+}
+
+
+bool MulticoreJitProfilePlayer::HandleModuleDependency(unsigned jitInfo)
+{
+ STANDARD_VM_CONTRACT;
+
+ // depends on moduleTo, which may not loaded yet
+
+ unsigned moduleTo = jitInfo & MODULE_MASK;
+
+ if (moduleTo < m_moduleCount)
+ {
+ unsigned level = (jitInfo >> LEVEL_SHIFT) & LEVEL_MASK;
+
+ PlayerModuleInfo & mod = m_pModules[moduleTo];
+
+#if defined(FEATURE_CORECLR) && defined(FEATURE_HOSTED_BINDER)
+ // Load the module if necessary.
+ if (!mod.m_pModule)
+ {
+ // Update loaded module status.
+ AppDomain * pAppDomain = GetAppDomain();
+ _ASSERTE(pAppDomain != NULL);
+
+ MulticoreJitPlayerModuleEnumerator moduleEnumerator(this);
+ moduleEnumerator.EnumerateLoadedModules(pAppDomain);
+
+ if (!mod.m_pModule)
+ {
+ HRESULT hr;
+
+ // Get the assembly name.
+ SString assemblyName;
+ assemblyName.SetASCII(mod.m_pRecord->GetAssemblyName(), mod.m_pRecord->AssemblyNameLen());
+
+ // Load the assembly.
+ DomainAssembly * pDomainAssembly = LoadAssembly(assemblyName);
+
+ if (pDomainAssembly)
+ {
+ // If we successfully loaded the assembly, enumerate the modules in the assembly
+ // and update all modules status.
+ moduleEnumerator.HandleAssembly(pDomainAssembly);
+
+ if (mod.m_pModule == NULL)
+ {
+ // Unable to load the assembly, so abort.
+ return false;
+ }
+ }
+ else
+ {
+ // Unable to load the assembly, so abort.
+ return false;
+ }
+ }
+ }
+#endif
+
+ if (mod.UpdateNeedLevel((FileLoadLevel) level))
+ {
+ if (! mod.LoadOkay()) // allow first part WinMD to load in background thread
+ {
+ m_nBlockingCount ++;
+ }
+ }
+ }
+
+ return true;
+}
+
+#if defined(FEATURE_CORECLR) && defined(FEATURE_HOSTED_BINDER)
+DomainAssembly * MulticoreJitProfilePlayer::LoadAssembly(SString & assemblyName)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Get the assembly name.
+ StackScratchBuffer scratch;
+ const ANSI* pAnsiAssemblyName = assemblyName.GetANSI(scratch);
+
+ AssemblySpec spec;
+
+ // Initialize the assembly spec.
+ HRESULT hr = spec.Init(pAnsiAssemblyName);
+ if (FAILED(hr))
+ {
+ return NULL;
+ }
+
+ // Set the binding context to the assembly load context.
+ if (m_pBinderContext != NULL)
+ {
+ spec.SetBindingContext(m_pBinderContext);
+ }
+
+ DomainAssembly *pDomainAssembly = NULL;
+
+ // Setup the AssemblyLoadSecurity to perform the assembly load
+ GCX_COOP();
+
+ PTR_AppDomain pCurDomain = GetAppDomain();
+ IApplicationSecurityDescriptor *pDomainSecDesc = pCurDomain->GetSecurityDescriptor();
+
+ OBJECTREF refGrantedPermissionSet = NULL;
+ AssemblyLoadSecurity loadSecurity;
+
+ GCPROTECT_BEGIN(refGrantedPermissionSet);
+
+ loadSecurity.m_dwSpecialFlags = pDomainSecDesc->GetSpecialFlags();
+ refGrantedPermissionSet = pDomainSecDesc->GetGrantedPermissionSet();
+ loadSecurity.m_pGrantSet = &refGrantedPermissionSet;
+
+ // Bind and load the assembly.
+ pDomainAssembly = spec.LoadDomainAssembly(
+ FILE_LOADED,
+ &loadSecurity,
+ FALSE); // Don't throw on FileNotFound.
+
+ GCPROTECT_END();
+
+ return pDomainAssembly;
+}
+#endif
+
+
+inline bool MethodJifInfo(unsigned inst)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return ((inst & MODULE_DEPENDENCY) == 0);
+}
+
+
+// Process a block of methodDef, call JIT if not blocked
+HRESULT MulticoreJitProfilePlayer::HandleMethodRecord(unsigned * buffer, int count)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = E_ABORT;
+
+ MulticoreJitTrace(("MethodRecord(%d) start %d methods, %d mod loaded", m_stats.m_nTotalMethod, count, m_nLoadedModuleCount));
+
+ MulticoreJitManager & manager = GetAppDomain()->GetMulticoreJitManager();
+
+#ifdef MULTICOREJIT_LOGGING
+
+ MulticoreJitCodeStorage & curStorage = manager.GetMulticoreJitCodeStorage();
+
+ int lastCompiled = curStorage.GetStored();
+
+#endif
+
+ int pos = 0;
+
+ while (! ShouldAbort(true) && (pos < count))
+ {
+ unsigned jitInfo = buffer[pos]; // moduleIndex + methodIndex
+
+ unsigned moduleIndex = jitInfo >> 24;
+
+ if (moduleIndex < m_moduleCount)
+ {
+ if (jitInfo & MODULE_DEPENDENCY) // Module depedency information
+ {
+ if (! HandleModuleDependency(jitInfo))
+ {
+ goto Abort;
+ }
+ }
+ else
+ {
+ PlayerModuleInfo & info = m_pModules[moduleIndex];
+
+ m_stats.m_nTotalMethod ++;
+
+ // If module is disabled for Jitting, just skip method without even waiting
+ if (! info.m_enableJit)
+ {
+ m_stats.m_nFilteredMethods ++;
+ }
+ else
+ {
+#if !defined(FEATURE_CORECLR)
+ if (m_nBlockingCount != 0)
+ {
+ if (! GroupWaitForModuleLoad(m_stats.m_nTotalMethod + pos)) // wait for blocking modules
+ {
+ goto Abort;
+ }
+
+ _ASSERTE(m_nBlockingCount == 0);
+ }
+#endif
+
+ // To reduce contention with foreground thread, walk backward within the group of methods Jittable methods, not broken apart by dependency
+ {
+ int run = 1; // size of the group
+
+ while (((pos + run) < count) && MethodJifInfo(buffer[pos + run]))
+ {
+ run ++;
+
+ // If walk-back run is too long, lots of methods in the front will be missed by background thread
+ if (run > MAX_WALKBACK)
+ {
+ break;
+ }
+ }
+
+ if (run > 1)
+ {
+ MulticoreJitTrace(("Jit backwards %d methods", run));
+ }
+
+ // Walk backwards within the same group, may be from different modules
+ for (int p = pos + run - 1; p >= pos; p --)
+ {
+ unsigned inst = buffer[p];
+
+ _ASSERTE(MethodJifInfo(inst));
+
+ PlayerModuleInfo & mod = m_pModules[inst >> 24];
+
+#if defined(FEATURE_CORECLR) && defined(FEATURE_HOSTED_BINDER)
+ _ASSERTE(mod.IsModuleLoaded());
+#else
+ _ASSERTE(mod.IsModuleLoaded() && ! mod.IsLowerLevel());
+#endif
+
+ if (mod.m_enableJit)
+ {
+ JITMethod(mod.m_pModule, inst);
+ }
+ else
+ {
+ m_stats.m_nFilteredMethods ++;
+ }
+ }
+
+ m_stats.m_nWalkBack += (short) (run - 1);
+ m_stats.m_nTotalMethod += (short) (run - 1);
+
+ pos += run - 1; // Skip the group
+ }
+ }
+ }
+ }
+ else
+ {
+ hr = COR_E_BADIMAGEFORMAT;
+ goto Abort;
+ }
+
+ pos ++;
+ }
+
+ // Mark success
+ hr = S_OK;
+
+Abort:
+
+ m_stats.m_nMissingModuleSkip += (short) (count - pos);
+
+ MulticoreJitTrace(("MethodRecord(%d) end %d compiled, %d aborted / %d methods, hr=%x",
+ m_stats.m_nTotalMethod,
+ curStorage.GetStored() - lastCompiled,
+ count - pos, count, hr));
+
+ TraceSummary();
+
+ return hr;
+}
+
+
+void MulticoreJitProfilePlayer::TraceSummary()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ MulticoreJitCodeStorage & curStorage = GetAppDomain()->GetMulticoreJitManager().GetMulticoreJitCodeStorage();
+
+ unsigned returned = curStorage.GetReturned();
+
+#ifdef MULTICOREJIT_LOGGING
+
+ unsigned compiled = curStorage.GetStored();
+
+ MulticoreJitTrace(("PlayerSummary: %d total: %d no mod, %d filtered out, %d had code, %d other, %d tried, %d compiled, %d returned, %d%% efficiency, %d mod loaded, %d ms delay(%d)",
+ m_stats.m_nTotalMethod,
+ m_stats.m_nMissingModuleSkip,
+ m_stats.m_nFilteredMethods,
+ m_stats.m_nHasNativeCode,
+ m_stats.m_nTotalMethod - m_stats.m_nMissingModuleSkip - m_stats.m_nFilteredMethods - m_stats.m_nHasNativeCode - m_stats.m_nTryCompiling,
+ m_stats.m_nTryCompiling,
+ compiled,
+ returned,
+ (m_stats.m_nTotalMethod == 0) ? 100 : returned * 100 / m_stats.m_nTotalMethod,
+ m_nLoadedModuleCount,
+ m_stats.m_nTotalDelay,
+ m_stats.m_nDelayCount
+ ));
+
+#endif
+
+ _FireEtwMulticoreJit(W("PLAYERSUMMARY"), W(""), m_stats.m_nTryCompiling, m_stats.m_nHasNativeCode, returned);
+}
+
+
+HRESULT MulticoreJitProfilePlayer::ReadCheckFile(const wchar_t * pFileName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ {
+ HANDLE hFile = WszCreateFile(pFileName, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
+
+ if (hFile == INVALID_HANDLE_VALUE)
+ {
+ return COR_E_FILENOTFOUND;
+ }
+
+ HeaderRecord header;
+
+ DWORD cbRead = 0;
+
+ if (! ::ReadFile(hFile, & header, sizeof(header), &cbRead, NULL))
+ {
+ hr = COR_E_BADIMAGEFORMAT;
+ }
+ else if (cbRead != sizeof(header))
+ {
+ hr = COR_E_BADIMAGEFORMAT;
+ }
+ else
+ {
+ m_headerModuleCount = header.moduleCount;
+
+ MulticoreJitTrace(("HeaderRecord(version=%d, module=%d, method=%d)", header.version, m_headerModuleCount, header.methodCount));
+
+ if ((header.version != MULTICOREJIT_PROFILE_VERSION) || (header.moduleCount > MAX_MODULES) || (header.methodCount > MAX_METHOD_ARRAY) ||
+ (header.recordID != Pack8_24(MULTICOREJIT_HEADER_RECORD_ID, sizeof(HeaderRecord))))
+ {
+ hr = COR_E_BADIMAGEFORMAT;
+ }
+ else
+ {
+ m_pModules = new (nothrow) PlayerModuleInfo[m_headerModuleCount];
+
+ if (m_pModules == NULL)
+ {
+ hr = E_OUTOFMEMORY;
+ }
+ }
+ }
+
+ if (SUCCEEDED(hr))
+ {
+ m_nFileSize = SafeGetFileSize(hFile, 0);
+
+ if (m_nFileSize > sizeof(header))
+ {
+ m_nFileSize -= sizeof(header);
+
+ m_pFileBuffer = new (nothrow) BYTE[m_nFileSize];
+
+ if (m_pFileBuffer == NULL)
+ {
+ hr = E_OUTOFMEMORY;
+ }
+ else if (::ReadFile(hFile, m_pFileBuffer, m_nFileSize, & cbRead, NULL))
+ {
+ if (cbRead != m_nFileSize)
+ {
+ hr = COR_E_BADIMAGEFORMAT;
+ }
+ }
+ else
+ {
+ hr = CLDB_E_FILE_BADREAD;
+ }
+ }
+ else
+ {
+ hr = COR_E_BADIMAGEFORMAT;
+ }
+ }
+
+ CloseHandle(hFile);
+
+ _FireEtwMulticoreJit(W("PLAYER"), W("Header"), hr, m_headerModuleCount, header.methodCount);
+ }
+
+
+ return hr;
+}
+
+
+HRESULT MulticoreJitProfilePlayer::PlayProfile()
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ DWORD start = GetTickCount();
+
+ Thread * pThread = GetThread();
+
+ {
+ // 1 marks background thread
+ FireEtwThreadCreated((ULONGLONG) pThread, (ULONGLONG) GetAppDomain(), 1, pThread->GetThreadId(), pThread->GetOSThreadId(), GetClrInstanceId());
+ }
+
+ const BYTE * pBuffer = m_pFileBuffer;
+
+ unsigned nSize = m_nFileSize;
+
+ MulticoreJitTrace(("PlayProfile %d bytes in (%d, %s)",
+ nSize,
+ GetAppDomain()->GetId().m_dwId,
+ GetAppDomain()->GetFriendlyNameForLogging()));
+
+ while ((SUCCEEDED(hr)) && (nSize > sizeof(unsigned)))
+ {
+ unsigned data = * (const unsigned *) pBuffer;
+ unsigned rcdLen = data & 0xFFFFFF;
+ unsigned rcdTyp = data >> 24;
+
+ if ((rcdLen > nSize) || (rcdLen & 3)) // Better DWORD align
+ {
+ hr = COR_E_BADIMAGEFORMAT;
+ }
+ else
+ {
+ if (rcdTyp == MULTICOREJIT_MODULE_RECORD_ID)
+ {
+ const ModuleRecord * pRec = (const ModuleRecord * ) pBuffer;
+
+ if (((unsigned)(pRec->lenModuleName
+#if defined(FEATURE_CORECLR) && defined(FEATURE_HOSTED_BINDER)
+ + pRec->lenAssemblyName
+#endif
+ ) > (rcdLen - sizeof(ModuleRecord))) ||
+ (m_moduleCount >= m_headerModuleCount))
+ {
+ hr = COR_E_BADIMAGEFORMAT;
+ }
+ else
+ {
+ hr = HandleModuleRecord(pRec);
+ }
+ }
+ else if (rcdTyp == MULTICOREJIT_JITINF_RECORD_ID)
+ {
+ int mCount = (rcdLen - sizeof(unsigned)) / sizeof(unsigned);
+
+ hr = HandleMethodRecord((unsigned *) (pBuffer + sizeof(unsigned)), mCount);
+ }
+ else
+ {
+ hr = COR_E_BADIMAGEFORMAT;
+ }
+
+ pBuffer += rcdLen;
+ nSize -= rcdLen;
+ }
+
+ if (SUCCEEDED(hr) && ShouldAbort(false))
+ {
+ hr = E_ABORT;
+ }
+ }
+
+ start = GetTickCount() - start;
+
+ {
+ FireEtwThreadTerminated((ULONGLONG) pThread, (ULONGLONG) GetAppDomain(), GetClrInstanceId());
+ }
+
+ MulticoreJitTrace(("Background thread running for %d ms, %d methods, hr=%x", start, m_stats.m_nTotalMethod, hr));
+
+ TraceSummary();
+
+ return hr;
+}
+
+
+HRESULT MulticoreJitProfilePlayer::JITThreadProc(Thread * pThread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ m_stats.m_hr = S_OK;
+
+ EX_TRY
+ {
+ ENTER_DOMAIN_ID(m_DomainID);
+ {
+ // Go into preemptive mode
+ GCX_PREEMP();
+
+ m_stats.m_hr = PlayProfile();
+ }
+ END_DOMAIN_TRANSITION;
+ }
+ EX_CATCH
+ {
+ if (SUCCEEDED(m_stats.m_hr))
+ {
+ m_stats.m_hr = COR_E_EXCEPTION;
+ }
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return (DWORD) m_stats.m_hr;
+}
+
+
+DWORD WINAPI MulticoreJitProfilePlayer::StaticJITThreadProc(void *args)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ ENTRY_POINT;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ MulticoreJitTrace(("StaticJITThreadProc starting"));
+
+ // Mark the background thread via an ETW event for diagnostics.
+ _FireEtwMulticoreJit(W("JITTHREAD"), W(""), 0, 0, 0);
+
+ MulticoreJitProfilePlayer * pPlayer = (MulticoreJitProfilePlayer *) args;
+
+ if (pPlayer != NULL)
+ {
+ Thread * pThread = pPlayer->m_pThread;
+
+ if ((pThread != NULL) && pThread->HasStarted())
+ {
+ // Disable calling managed code in background thread
+ ThreadStateNCStackHolder holder(TRUE, Thread::TSNC_CallingManagedCodeDisabled);
+
+ // Run as background thread, so ThreadStore::WaitForOtherThreads will not wait for it
+ pThread->SetBackground(TRUE);
+
+ hr = pPlayer->JITThreadProc(pThread);
+ }
+
+ // It needs to be deleted after GCX_PREEMP ends
+ if (pThread != NULL)
+ {
+ DestroyThread(pThread);
+ }
+
+ // The background thread is reponsible for deleting the MulticoreJitProfilePlayer object once it's started
+ // Actually after Thread::StartThread succeeds
+ delete pPlayer;
+ }
+
+ MulticoreJitTrace(("StaticJITThreadProc endding(%x)", hr));
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return (DWORD) hr;
+}
+
+
+HRESULT MulticoreJitProfilePlayer::ProcessProfile(const wchar_t * pFileName)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = ReadCheckFile(pFileName);
+
+ if (SUCCEEDED(hr))
+ {
+ _ASSERTE(m_pThread == NULL);
+
+ m_pThread = SetupUnstartedThread();
+
+ _ASSERTE(m_pThread != NULL);
+
+ unsigned stackSize = 64 * sizeof(SIZE_T) * 1024; // 256 Kb for 32-bit, 512 Kb for 64-bit
+
+#ifdef _DEBUG
+ stackSize *= 2; // Double it for CHK build
+#endif
+
+ if (m_pThread->CreateNewThread(stackSize, StaticJITThreadProc, this))
+ {
+ int t = (int) m_pThread->StartThread();
+
+ if (t > 0)
+ {
+ hr = S_OK;
+ }
+ }
+ }
+
+ return hr;
+}
+
+
diff --git a/src/vm/namespace.h b/src/vm/namespace.h
new file mode 100644
index 0000000000..5eb950f250
--- /dev/null
+++ b/src/vm/namespace.h
@@ -0,0 +1,84 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// This file defines namespaces used by the runtime.
+//
+
+
+
+#define g_SystemNS "System"
+
+#define g_RuntimeNS g_SystemNS ".Runtime"
+#define g_IONS g_SystemNS ".IO"
+#define g_ThreadingNS g_SystemNS ".Threading"
+#define g_CollectionsNS g_SystemNS ".Collections"
+#define g_ResourcesNS g_SystemNS ".Resources"
+#define g_DiagnosticsNS g_SystemNS ".Diagnostics"
+#define g_CodeContractsNS g_DiagnosticsNS ".Contracts"
+#define g_AssembliesNS g_SystemNS ".Configuration.Assemblies"
+#define g_GlobalizationNS g_SystemNS ".Globalization"
+#define g_IsolatedStorageNS g_SystemNS ".IO.IsolatedStorage"
+#define g_TextNS g_SystemNS ".Text"
+#define g_CollectionsGenericNS g_SystemNS ".Collections.Generic"
+
+#define g_InteropServicesNS g_SystemNS ".Runtime.InteropServices"
+#define g_ReflectionNS g_SystemNS ".Reflection"
+#define g_ReflectionEmitNS g_ReflectionNS ".Emit"
+
+#define g_InteropNS g_RuntimeNS ".InteropServices"
+#define g_InteropTCENS g_InteropNS ".TCEAdapterGen"
+#define g_ExpandoNS g_InteropNS ".Expando"
+#ifdef FEATURE_COMINTEROP
+#define g_WinRTNS g_InteropNS ".WindowsRuntime"
+#endif // FEATURE_COMINTEROP
+
+#define g_CompilerServicesNS g_RuntimeNS ".CompilerServices"
+
+#define g_ConstrainedExecutionNS g_RuntimeNS ".ConstrainedExecution"
+
+#define g_SecurityNS g_SystemNS ".Security"
+#define g_UtilNS g_SecurityNS ".Util"
+#define g_PublicKeyNS g_SecurityNS ".PublicKey"
+#define g_PermissionsNS g_SecurityNS ".Permissions"
+#define g_PrincipalNS g_SecurityNS ".Principal"
+#define g_PolicyNS g_SecurityNS ".Policy"
+#ifdef FEATURE_X509
+#define g_CryptographyNS g_SecurityNS ".Cryptography"
+#define g_X509NS g_CryptographyNS ".X509Certificates"
+#endif // FEATURE_X509
+
+#define g_SerializationNS g_RuntimeNS ".Serialization"
+#define g_RemotingNS g_RuntimeNS ".Remoting"
+#define g_ActivationNS g_RemotingNS ".Activation"
+#define g_ProxiesNS g_RemotingNS ".Proxies"
+#define g_ContextsNS g_RemotingNS ".Contexts"
+#define g_MessagingNS g_RemotingNS ".Messaging"
+#define g_RemotingServicesNS g_RemotingNS ".Services"
+#define g_LifetimeNS g_RemotingNS ".Lifetime"
+
+#define g_MicrosoftNS "Microsoft"
+
+#define g_Win32NS g_MicrosoftNS ".Win32"
+#define g_SafeHandlesNS g_Win32NS ".SafeHandles"
+
+#define g_StubHelpersNS g_SystemNS ".StubHelpers"
+
+// Jupiter support requires accessing types in the System.Windows namespace & children,
+// but these types may show up in the Windows.UI.Xaml namespace.
+#define g_SysWindowsNS g_SystemNS ".Windows"
+
+#define g_DirectUINS "Windows.UI.Xaml"
+#define g_AutomationNS g_DirectUINS ".Automation"
+#define g_MarkupNS g_DirectUINS ".Markup"
+
+#define g_WindowsFoundationDiagNS "Windows.Foundation.Diagnostics"
+
+#if defined(FEATURE_CORRUPTING_EXCEPTIONS) || defined(FEATURE_EXCEPTION_NOTIFICATIONS)
+#define g_ExceptionServicesNS g_RuntimeNS ".ExceptionServices"
+#endif // defined(FEATURE_CORRUPTING_EXCEPTION) || defined(FEATURE_EXCEPTION_NOTIFICATIONS)
+
+#if defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
+#define g_LoaderNS g_RuntimeNS ".Loader"
+#endif // defined(FEATURE_HOST_ASSEMBLY_RESOLVER)
diff --git a/src/vm/nativeformatreader.h b/src/vm/nativeformatreader.h
new file mode 100644
index 0000000000..e170868014
--- /dev/null
+++ b/src/vm/nativeformatreader.h
@@ -0,0 +1,213 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+// ---------------------------------------------------------------------------
+// NativeFormatReader
+//
+// Utilities to read native data from images
+// ---------------------------------------------------------------------------
+
+#pragma once
+
+// To reduce differences between C# and C++ versions
+#define byte uint8_t
+#define uint uint32_t
+
+#define UInt16 uint16_t
+#define UInt32 uint32_t
+#define UInt64 uint64_t
+
+namespace NativeFormat
+{
+ class NativeReader
+ {
+ PTR_BYTE _base;
+ uint _size;
+
+ public:
+ NativeReader()
+ {
+ _base = NULL;
+ _size = 0;
+ }
+
+ NativeReader(PTR_BYTE base_, uint size)
+ {
+ _base = base_;
+ _size = size;
+ }
+
+ void ThrowBadImageFormatException()
+ {
+ _ASSERTE(false);
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+
+ byte ReadUInt8(uint offset)
+ {
+ if (offset >= _size)
+ ThrowBadImageFormatException();
+ return *(_base + offset); // Assumes little endian and unaligned access
+ }
+
+ UInt16 ReadUInt16(uint offset)
+ {
+ if ((int)offset < 0 || offset + 1 >= _size)
+ ThrowBadImageFormatException();
+ return *dac_cast<PTR_USHORT>(_base + offset); // Assumes little endian and unaligned access
+ }
+
+ UInt32 ReadUInt32(uint offset)
+ {
+ if ((int)offset < 0 || offset + 3 >= _size)
+ ThrowBadImageFormatException();
+ return *dac_cast<PTR_UINT32>(_base + offset); // Assumes little endian and unaligned access
+ }
+
+ uint DecodeUnsigned(uint offset, uint * pValue)
+ {
+ if (offset >= _size)
+ ThrowBadImageFormatException();
+
+ uint val = *(_base + offset);
+ if ((val & 1) == 0)
+ {
+ *pValue = (val >> 1);
+ offset += 1;
+ }
+ else
+ if ((val & 2) == 0)
+ {
+ if (offset + 1 >= _size)
+ ThrowBadImageFormatException();
+ *pValue = (val >> 2) |
+ (((uint)*(_base + offset + 1)) << 6);
+ offset += 2;
+ }
+ else
+ if ((val & 4) == 0)
+ {
+ if (offset + 2 >= _size)
+ ThrowBadImageFormatException();
+ *pValue = (val >> 3) |
+ (((uint)*(_base + offset + 1)) << 5) |
+ (((uint)*(_base + offset + 2)) << 13);
+ offset += 3;
+ }
+ else
+ if ((val & 8) == 0)
+ {
+ if (offset + 3 >= _size)
+ ThrowBadImageFormatException();
+ *pValue = (val >> 4) |
+ (((uint)*(_base + offset + 1)) << 4) |
+ (((uint)*(_base + offset + 2)) << 12) |
+ (((uint)*(_base + offset + 3)) << 20);
+ offset += 4;
+ }
+ else
+ if ((val & 16) == 0)
+ {
+ *pValue = ReadUInt32(offset + 1);
+ offset += 5;
+ }
+ else
+ {
+ ThrowBadImageFormatException();
+ }
+
+ return offset;
+ }
+ };
+
+ class NativeArray
+ {
+ NativeReader * _pReader;
+ uint _baseOffset;
+ uint _nElements;
+ byte _entryIndexSize;
+
+ static const uint _blockSize = 16;
+
+ public:
+ NativeArray()
+ : _pReader(NULL)
+ {
+ }
+
+ NativeArray(NativeReader * pReader, uint offset)
+ : _pReader(pReader)
+ {
+ uint val;
+ _baseOffset = pReader->DecodeUnsigned(offset, &val);
+
+ _nElements = (val >> 2);
+ _entryIndexSize = (val & 3);
+ }
+
+ uint GetCount()
+ {
+ return _nElements;
+ }
+
+ bool TryGetAt(uint index, uint * pOffset)
+ {
+ if (index >= _nElements)
+ return false;
+
+ uint offset;
+ if (_entryIndexSize == 0)
+ {
+ offset = _pReader->ReadUInt8(_baseOffset + (index / _blockSize));
+ }
+ else if (_entryIndexSize == 1)
+ {
+ offset = _pReader->ReadUInt16(_baseOffset + 2 * (index / _blockSize));
+ }
+ else
+ {
+ offset = _pReader->ReadUInt32(_baseOffset + 4 * (index / _blockSize));
+ }
+ offset += _baseOffset;
+
+ for (uint bit = _blockSize >> 1; bit > 0; bit >>= 1)
+ {
+ uint val;
+ uint offset2 = _pReader->DecodeUnsigned(offset, &val);
+ if (index & bit)
+ {
+ if ((val & 2) != 0)
+ {
+ offset = offset + (val >> 2);
+ continue;
+ }
+ }
+ else
+ {
+ if ((val & 1) != 0)
+ {
+ offset = offset2;
+ continue;
+ }
+ }
+
+ // Not found
+ if ((val & 3) == 0)
+ {
+ // Matching special leaf node?
+ if ((val >> 2) == (index & (_blockSize - 1)))
+ {
+ offset = offset2;
+ break;
+ }
+ }
+ return false;
+ }
+
+ *pOffset = offset;
+ return true;
+ }
+ };
+}
diff --git a/src/vm/nativeoverlapped.cpp b/src/vm/nativeoverlapped.cpp
new file mode 100644
index 0000000000..c72399c3ab
--- /dev/null
+++ b/src/vm/nativeoverlapped.cpp
@@ -0,0 +1,535 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+/*============================================================
+**
+** Header: COMNativeOverlapped.h
+**
+** Purpose: Native methods for allocating and freeing NativeOverlapped
+**
+
+**
+===========================================================*/
+#include "common.h"
+#include "fcall.h"
+#include "nativeoverlapped.h"
+#include "corhost.h"
+#include "win32threadpool.h"
+#include "mdaassistants.h"
+#include "comsynchronizable.h"
+#include "comthreadpool.h"
+
+LONG OverlappedDataObject::s_CleanupRequestCount = 0;
+BOOL OverlappedDataObject::s_CleanupInProgress = FALSE;
+BOOL OverlappedDataObject::s_GCDetectsCleanup = FALSE;
+BOOL OverlappedDataObject::s_CleanupFreeHandle = FALSE;
+
+//
+//The function is called from managed code to quicly check if a packet is available.
+//This is a perf-critical function. Even helper method frames are not created. We fall
+//back to the VM to do heavy weight operations like creating a new CP thread.
+//
+FCIMPL3(void, CheckVMForIOPacket, LPOVERLAPPED* lpOverlapped, DWORD* errorCode, DWORD* numBytes)
+{
+ FCALL_CONTRACT;
+
+#ifndef FEATURE_PAL
+ Thread *pThread = GetThread();
+ DWORD adid = pThread->GetDomain()->GetId().m_dwId;
+ size_t key=0;
+
+ _ASSERTE(pThread);
+
+ //Poll and wait if GC is in progress, to avoid blocking GC for too long.
+ FC_GC_POLL();
+
+ *lpOverlapped = ThreadpoolMgr::CompletionPortDispatchWorkWithinAppDomain(pThread, errorCode, numBytes, &key, adid);
+ if(*lpOverlapped == NULL)
+ {
+ return;
+ }
+
+ OVERLAPPEDDATAREF overlapped = ObjectToOVERLAPPEDDATAREF(OverlappedDataObject::GetOverlapped(*lpOverlapped));
+
+ _ASSERTE(overlapped->GetAppDomainId() == adid);
+ _ASSERTE(CLRIoCompletionHosted() == FALSE);
+
+ if(overlapped->m_iocb == NULL)
+ {
+ // no user delegate to callback
+ _ASSERTE((overlapped->m_iocbHelper == NULL) || !"This is benign, but should be optimized");
+
+ if (g_pAsyncFileStream_AsyncResultClass)
+ {
+ SetAsyncResultProperties(overlapped, *errorCode, *numBytes);
+ }
+ else
+ {
+ //We're not initialized yet, go back to the Vm, and process the packet there.
+ ThreadpoolMgr::StoreOverlappedInfoInThread(pThread, *errorCode, *numBytes, key, *lpOverlapped);
+ }
+
+ *lpOverlapped = NULL;
+ return;
+ }
+ else
+ {
+ if(!pThread->IsRealThreadPoolResetNeeded())
+ {
+ pThread->ResetManagedThreadObjectInCoopMode(ThreadNative::PRIORITY_NORMAL);
+ pThread->InternalReset(FALSE, TRUE, FALSE, FALSE);
+ if(ThreadpoolMgr::ShouldGrowCompletionPortThreadpool(ThreadpoolMgr::CPThreadCounter.DangerousGetDirtyCounts()))
+ {
+ //We may have to create a CP thread, go back to the Vm, and process the packet there.
+ ThreadpoolMgr::StoreOverlappedInfoInThread(pThread, *errorCode, *numBytes, key, *lpOverlapped);
+ *lpOverlapped = NULL;
+ }
+ }
+ else
+ {
+ //A more complete reset is needed (due to change in priority etc), go back to the VM,
+ //and process the packet there.
+
+ ThreadpoolMgr::StoreOverlappedInfoInThread(pThread, *errorCode, *numBytes, key, *lpOverlapped);
+ *lpOverlapped = NULL;
+ }
+ }
+
+ // if this will be "dispatched" to the managed callback fire the IODequeue event:
+ if (*lpOverlapped != NULL && ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, ThreadPoolIODequeue))
+ FireEtwThreadPoolIODequeue(*lpOverlapped, (BYTE*)(*lpOverlapped) - offsetof(OverlappedDataObject, Internal), GetClrInstanceId());
+
+#else // !FEATURE_PAL
+ *lpOverlapped = NULL;
+#endif // !FEATURE_PAL
+
+ return;
+}
+FCIMPLEND
+
+FCIMPL1(void*, AllocateNativeOverlapped, OverlappedDataObject* overlappedUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ OVERLAPPEDDATAREF overlapped = ObjectToOVERLAPPEDDATAREF(overlappedUNSAFE);
+ OBJECTREF userObject = overlapped->m_userObject;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_2(Frame::FRAME_ATTR_NONE, overlapped, userObject);
+
+ AsyncPinningHandleHolder handle;
+
+ if (g_pOverlappedDataClass == NULL)
+ {
+ g_pOverlappedDataClass = MscorlibBinder::GetClass(CLASS__OVERLAPPEDDATA);
+ // We have optimization to avoid creating event if IO is in default domain. This depends on default domain
+ // can not be unloaded.
+ _ASSERTE(IsSingleAppDomain() || !SystemDomain::System()->DefaultDomain()->CanUnload());
+ _ASSERTE(SystemDomain::System()->DefaultDomain()->GetId().m_dwId == DefaultADID);
+ }
+
+ CONSISTENCY_CHECK(overlapped->GetMethodTable() == g_pOverlappedDataClass);
+
+ overlapped->m_AppDomainId = GetAppDomain()->GetId().m_dwId;
+
+ if (userObject != NULL)
+ {
+ if (overlapped->m_isArray == 1)
+ {
+ BASEARRAYREF asArray = (BASEARRAYREF) userObject;
+ OBJECTREF *pObj = (OBJECTREF*)(asArray->GetDataPtr());
+ SIZE_T num = asArray->GetNumComponents();
+ SIZE_T i;
+ for (i = 0; i < num; i ++)
+ {
+ GCHandleValidatePinnedObject(pObj[i]);
+ }
+ for (i = 0; i < num; i ++)
+ {
+ asArray = (BASEARRAYREF) userObject;
+ AddMTForPinHandle(pObj[i]);
+ }
+ }
+ else
+ {
+ GCHandleValidatePinnedObject(userObject);
+ AddMTForPinHandle(userObject);
+ }
+
+ }
+
+ handle = GetAppDomain()->CreateTypedHandle(overlapped, HNDTYPE_ASYNCPINNED);
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ // CoreCLR does not have IO completion hosted
+ if (CLRIoCompletionHosted())
+ {
+ _ASSERTE(CorHost2::GetHostIoCompletionManager());
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = CorHost2::GetHostIoCompletionManager()->InitializeHostOverlapped(&overlapped->Internal);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (FAILED(hr))
+ {
+ COMPlusThrowHR(hr);
+ }
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ handle.SuppressRelease();
+ overlapped->m_pinSelf = handle;
+
+ HELPER_METHOD_FRAME_END();
+ LOG((LF_INTEROP, LL_INFO10000, "In AllocNativeOperlapped thread 0x%x\n", GetThread()));
+
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, ThreadPoolIODequeue))
+ FireEtwThreadPoolIOPack(&overlapped->Internal, overlappedUNSAFE, GetClrInstanceId());
+
+ return &overlapped->Internal;
+}
+FCIMPLEND
+
+FCIMPL1(void, FreeNativeOverlapped, LPOVERLAPPED lpOverlapped)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ OVERLAPPEDDATAREF overlapped = ObjectToOVERLAPPEDDATAREF(OverlappedDataObject::GetOverlapped(lpOverlapped));
+ CONSISTENCY_CHECK(g_pOverlappedDataClass && (overlapped->GetMethodTable() == g_pOverlappedDataClass));
+
+ // We don't want to call HasCompleted in the default domain, because we don't have
+ // overlapped handle support.
+ if ((!overlapped->HasCompleted ()))
+ {
+#ifdef MDA_SUPPORTED
+ MdaOverlappedFreeError *pFreeError = MDA_GET_ASSISTANT(OverlappedFreeError);
+ if (pFreeError)
+ {
+ pFreeError->ReportError((LPVOID) OVERLAPPEDDATAREFToObject(overlapped));
+
+ // If we entered ReportError then our overlapped OBJECTREF became technically invalid,
+ // since a gc can be triggered. That causes an assert from FreeAsyncPinHandles() below.
+ // (I say technically because the object is pinned and won't really move)
+ overlapped = ObjectToOVERLAPPEDDATAREF(OverlappedDataObject::GetOverlapped(lpOverlapped));
+ }
+#endif // MDA_SUPPORTED
+ }
+
+ overlapped->FreeAsyncPinHandles();
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL1(OverlappedDataObject*, GetOverlappedFromNative, LPOVERLAPPED lpOverlapped)
+{
+ FCALL_CONTRACT;
+
+ CONSISTENCY_CHECK(g_pOverlappedDataClass && (OverlappedDataObject::GetOverlapped(lpOverlapped)->GetMethodTable() == g_pOverlappedDataClass));
+
+ return OverlappedDataObject::GetOverlapped(lpOverlapped);
+}
+FCIMPLEND
+
+void OverlappedDataObject::FreeAsyncPinHandles()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // This cannot throw or return error, and cannot force SO because it is called
+ // from CCLRIoCompletionManager::OnComplete which probes.
+ CONTRACT_VIOLATION(SOToleranceViolation);
+
+ CONSISTENCY_CHECK(g_pOverlappedDataClass && (this->GetMethodTable() == g_pOverlappedDataClass));
+
+ _ASSERTE(GetThread() != NULL);
+
+ if (m_pinSelf)
+ {
+ OBJECTHANDLE h = m_pinSelf;
+ if (FastInterlockCompareExchangePointer(&m_pinSelf, static_cast<OBJECTHANDLE>(NULL), h) == h)
+ {
+ DestroyAsyncPinningHandle(h);
+ }
+ }
+
+ EventHandle = 0;
+}
+
+
+void OverlappedDataObject::StartCleanup()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ if (GetThread()) {MODE_COOPERATIVE;} else {DISABLED(MODE_COOPERATIVE);}
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ if (s_CleanupRequestCount == 0)
+ {
+ return;
+ }
+
+ LONG curCount = s_CleanupRequestCount;
+ if (FastInterlockExchange((LONG*)&s_CleanupInProgress, TRUE) == FALSE)
+ {
+ {
+ BOOL HasJob = Ref_HandleAsyncPinHandles();
+ if (!HasJob)
+ {
+ s_CleanupInProgress = FALSE;
+ FastInterlockExchangeAdd (&s_CleanupRequestCount, -curCount);
+ return;
+ }
+ }
+
+ if (!ThreadpoolMgr::DrainCompletionPortQueue())
+ {
+ s_CleanupInProgress = FALSE;
+ }
+ else
+ {
+ FastInterlockExchangeAdd (&s_CleanupRequestCount, -curCount);
+ }
+ }
+}
+
+
+void OverlappedDataObject::FinishCleanup(bool wasDrained)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (wasDrained)
+ {
+ GCX_COOP();
+
+ s_CleanupFreeHandle = TRUE;
+ Ref_HandleAsyncPinHandles();
+ s_CleanupFreeHandle = FALSE;
+
+ s_CleanupInProgress = FALSE;
+ if (s_CleanupRequestCount > 0)
+ {
+ StartCleanup();
+ }
+ }
+ else
+ {
+ s_CleanupInProgress = FALSE;
+ }
+}
+
+
+void OverlappedDataObject::HandleAsyncPinHandle()
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE (s_CleanupInProgress);
+ if (m_toBeCleaned || !ThreadpoolMgr::IsCompletionPortInitialized())
+ {
+ OBJECTHANDLE h = m_pinSelf;
+ if (h)
+ {
+ if (FastInterlockCompareExchangePointer(&m_pinSelf, (OBJECTHANDLE)NULL, h) == h)
+ {
+ DestroyAsyncPinningHandle(h);
+ }
+ }
+ }
+ else if (!s_CleanupFreeHandle)
+ {
+ m_toBeCleaned = 1;
+ }
+}
+
+
+// A hash table to track size of objects that may be moved to default domain
+typedef EEHashTable<size_t, EEPtrHashTableHelper<size_t>, FALSE> EEHashTableOfMT;
+EEHashTableOfMT *s_pPinHandleTable;
+
+CrstStatic s_PinHandleTableCrst;
+
+void InitializePinHandleTable()
+{
+ WRAPPER_NO_CONTRACT;
+
+ s_PinHandleTableCrst.Init(CrstPinHandle);
+ LockOwner lock = {&s_PinHandleTableCrst, IsOwnerOfCrst};
+ s_pPinHandleTable = new EEHashTableOfMT();
+ s_pPinHandleTable->Init(10, &lock);
+}
+
+// We can not fail due to OOM when we move an object to default domain during AD unload.
+// If we may need a dummy MethodTable later, we allocate the MethodTable here.
+void AddMTForPinHandle(OBJECTREF obj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ WRAPPER(GC_TRIGGERS);
+ }
+ CONTRACTL_END;
+
+ if (obj == NULL)
+ {
+ return;
+ }
+
+ _ASSERTE (g_pOverlappedDataClass != NULL);
+
+ SSIZE_T size = 0;
+ MethodTable *pMT = obj->GetMethodTable();
+
+ if (pMT->GetLoaderModule()->IsSystem())
+ {
+ return;
+ }
+
+ if (pMT->IsArray())
+ {
+#ifdef _DEBUG
+ BASEARRAYREF asArray = (BASEARRAYREF) obj;
+ TypeHandle th = asArray->GetArrayElementTypeHandle();
+ _ASSERTE (!th.IsTypeDesc());
+ MethodTable *pElemMT = th.AsMethodTable();
+ _ASSERTE (pElemMT->IsValueType() && pElemMT->IsBlittable());
+ _ASSERTE (!pElemMT->GetLoaderModule()->IsSystem());
+#endif
+
+ // Create an ArrayMethodTable that has the same element size
+ // Use negative number for arrays of structs - it assumes that
+ // the maximum type base size is less than 2GB.
+ size = - (SSIZE_T)pMT->GetComponentSize();
+ _ASSERTE(size < 0);
+ }
+ else
+ {
+ size = pMT->GetBaseSize();
+ _ASSERTE(size >= 0);
+ }
+
+ HashDatum data;
+ if (s_pPinHandleTable->GetValue(size, &data) == FALSE)
+ {
+ CrstHolder csh(&s_PinHandleTableCrst);
+ if (s_pPinHandleTable->GetValue(size, &data) == FALSE)
+ {
+ // We do not need to include GCDescr here, since this
+ // methodtable does not contain pointers.
+ BYTE *buffer = new BYTE[sizeof(MethodTable)];
+ memset (buffer, 0, sizeof(MethodTable));
+ MethodTable *pNewMT = (MethodTable *)buffer;
+ NewArrayHolder<BYTE> pMTHolder(buffer);
+ pNewMT->SetIsAsyncPinType();
+ if (size >= 0)
+ {
+ pNewMT->SetBaseSize(static_cast<DWORD>(size));
+ }
+ else
+ {
+ pNewMT->SetBaseSize(ObjSizeOf (ArrayBase));
+ pNewMT->SetComponentSize(static_cast<WORD>(-size));
+ }
+ s_pPinHandleTable->InsertValue(size, (HashDatum)pNewMT);
+ pMTHolder.SuppressRelease();
+ }
+ }
+}
+
+// We need to ensure that the MethodTable of an object is valid in default domain when the object
+// is move to default domain duing AD unload.
+void BashMTForPinnedObject(OBJECTREF obj)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ if (obj == NULL)
+ {
+ return;
+ }
+
+ ADIndex adIndx = obj->GetAppDomainIndex();
+ ADIndex defaultAdIndx = SystemDomain::System()->DefaultDomain()->GetIndex();
+ if (adIndx.m_dwIndex != 0 && adIndx != defaultAdIndx)
+ {
+ obj->GetHeader()->ResetAppDomainIndexNoFailure(defaultAdIndx);
+ }
+ SSIZE_T size = 0;
+ MethodTable *pMT = obj->GetMethodTable();
+
+ if (pMT == g_pOverlappedDataClass)
+ {
+ // Managed Overlapped
+ OVERLAPPEDDATAREF overlapped = (OVERLAPPEDDATAREF)(obj);
+ overlapped->m_asyncResult = NULL;
+ overlapped->m_iocb = NULL;
+ overlapped->m_iocbHelper = NULL;
+ overlapped->m_overlapped = NULL;
+
+ if (overlapped->m_userObject != NULL)
+ {
+ if (overlapped->m_isArray == 1)
+ {
+ BASEARRAYREF asArray = (BASEARRAYREF) (overlapped->m_userObject);
+ OBJECTREF *pObj = (OBJECTREF*)asArray->GetDataPtr (TRUE);
+ SIZE_T num = asArray->GetNumComponents();
+ for (SIZE_T i = 0; i < num; i ++)
+ {
+ BashMTForPinnedObject(pObj[i]);
+ }
+ }
+ else
+ {
+ BashMTForPinnedObject(overlapped->m_userObject);
+ }
+ }
+ STRESS_LOG1 (LF_APPDOMAIN | LF_GC, LL_INFO100, "OverlappedData %p:MT is bashed\n", OBJECTREFToObject (overlapped));
+ return;
+ }
+
+ if (pMT->GetLoaderModule()->IsSystem())
+ {
+ return;
+ }
+
+ if (pMT->IsArray())
+ {
+#ifdef _DEBUG
+ BASEARRAYREF asArray = (BASEARRAYREF) obj;
+ TypeHandle th = asArray->GetArrayElementTypeHandle();
+ _ASSERTE (!th.IsTypeDesc());
+ MethodTable *pElemMT = th.AsMethodTable();
+ _ASSERTE (pElemMT->IsValueType() && pElemMT->IsBlittable());
+ _ASSERTE (!pElemMT->GetLoaderModule()->IsSystem());
+#endif
+
+ // Create an ArrayMethodTable that has the same element size
+ size = - (SSIZE_T)pMT->GetComponentSize();
+ }
+ else
+ {
+ _ASSERTE (pMT->IsBlittable());
+ size = pMT->GetBaseSize();
+ }
+
+ HashDatum data = NULL;
+ BOOL fRet;
+ fRet = s_pPinHandleTable->GetValue(size, &data);
+ _ASSERTE(fRet);
+ PREFIX_ASSUME(data != NULL);
+ obj->SetMethodTable((MethodTable*)data);
+}
diff --git a/src/vm/nativeoverlapped.h b/src/vm/nativeoverlapped.h
new file mode 100644
index 0000000000..c3fa891b43
--- /dev/null
+++ b/src/vm/nativeoverlapped.h
@@ -0,0 +1,157 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+/*============================================================
+**
+** Header: COMNativeOverlapped.h
+**
+** Purpose: Native methods for allocating and freeing NativeOverlapped
+**
+
+**
+===========================================================*/
+
+#ifndef _OVERLAPPED_H
+#define _OVERLAPPED_H
+
+// This should match the managed Overlapped object.
+// If you make any change here, you need to change the managed part Overlapped.
+class OverlappedDataObject : public Object
+{
+public:
+ ASYNCRESULTREF m_asyncResult;
+ OBJECTREF m_iocb;
+ OBJECTREF m_iocbHelper;
+ OBJECTREF m_overlapped;
+ OBJECTREF m_userObject;
+
+ //
+ // NOTE! WCF directly accesses m_pinSelf from managed code, using a hard-coded negative
+ // offset from the Internal member, below. They need this so they can modify the
+ // contents of m_userObject; after such modification, they need to update this handle
+ // to be in the correct GC generation.
+ //
+ // If you need to add or remove fields between this one and Internal, be sure that
+ // you also fix the hard-coded offsets in ndp\cdf\src\WCF\ServiceModel\System\ServiceModel\Channels\OverlappedContext.cs.
+ //
+ OBJECTHANDLE m_pinSelf;
+
+ // OverlappedDataObject is very special. An async pin handle keeps it alive.
+ // During GC, we also make sure
+ // 1. m_userObject itself does not move if m_userObject is not array
+ // 2. Every object pointed by m_userObject does not move if m_userObject is array
+ // We do not want to pin m_userObject if it is array. But m_userObject may be updated
+ // during relocation phase before OverlappedDataObject is doing relocation.
+ // m_userObjectInternal is used to track the location of the m_userObject before it is updated.
+ void *m_userObjectInternal;
+ DWORD m_AppDomainId;
+ unsigned char m_isArray;
+ unsigned char m_toBeCleaned;
+
+ ULONG_PTR Internal;
+ ULONG_PTR InternalHigh;
+ int OffsetLow;
+ int OffsetHigh;
+ ULONG_PTR EventHandle;
+
+ static OverlappedDataObject* GetOverlapped (LPOVERLAPPED nativeOverlapped)
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ _ASSERTE (nativeOverlapped != NULL);
+ _ASSERTE (GCHeap::GetGCHeap()->IsHeapPointer((BYTE *) nativeOverlapped));
+
+ return (OverlappedDataObject*)((BYTE*)nativeOverlapped - offsetof(OverlappedDataObject, Internal));
+ }
+
+ DWORD GetAppDomainId()
+ {
+ return m_AppDomainId;
+ }
+
+ void HandleAsyncPinHandle();
+
+ void FreeAsyncPinHandles();
+
+ BOOL HasCompleted()
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifndef FEATURE_PAL
+ return HasOverlappedIoCompleted((LPOVERLAPPED) &Internal);
+#else // !FEATURE_PAL
+ return FALSE;
+#endif // !FEATURE_PAL
+ }
+
+private:
+ static LONG s_CleanupRequestCount;
+ static BOOL s_CleanupInProgress;
+ static BOOL s_GCDetectsCleanup;
+ static BOOL s_CleanupFreeHandle;
+
+public:
+ static void RequestCleanup()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ FastInterlockIncrement(&s_CleanupRequestCount);
+ if (!s_CleanupInProgress)
+ {
+ StartCleanup();
+ }
+ }
+ static void StartCleanup();
+
+ static void FinishCleanup(bool wasDrained);
+
+ static void MarkCleanupNeededFromGC()
+ {
+ LIMITED_METHOD_CONTRACT;
+ s_GCDetectsCleanup = TRUE;
+ }
+
+ static BOOL CleanupNeededFromGC()
+ {
+ return s_GCDetectsCleanup;
+ }
+
+ static void RequestCleanupFromGC()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (s_GCDetectsCleanup)
+ {
+ s_GCDetectsCleanup = FALSE;
+ RequestCleanup();
+ }
+ }
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+
+typedef REF<OverlappedDataObject> OVERLAPPEDDATAREF;
+#define ObjectToOVERLAPPEDDATAREF(obj) (OVERLAPPEDDATAREF(obj))
+#define OVERLAPPEDDATAREFToObject(objref) (OBJECTREFToObject (objref))
+
+#else
+
+typedef OverlappedDataObject* OVERLAPPEDDATAREF;
+#define ObjectToOVERLAPPEDDATAREF(obj) ((OverlappedDataObject*) (obj))
+#define OVERLAPPEDDATAREFToObject(objref) ((OverlappedDataObject*) (objref))
+
+#endif
+
+FCDECL3(void, CheckVMForIOPacket, LPOVERLAPPED* lpOverlapped, DWORD* errorCode, DWORD* numBytes);
+FCDECL1(void*, AllocateNativeOverlapped, OverlappedDataObject* overlapped);
+FCDECL1(void, FreeNativeOverlapped, LPOVERLAPPED lpOverlapped);
+FCDECL1(OverlappedDataObject*, GetOverlappedFromNative, LPOVERLAPPED lpOverlapped);
+
+void InitializePinHandleTable();
+void AddMTForPinHandle(OBJECTREF obj);
+void BashMTForPinnedObject(OBJECTREF obj);
+
+#endif
diff --git a/src/vm/newcompressedstack.cpp b/src/vm/newcompressedstack.cpp
new file mode 100644
index 0000000000..245a006f1e
--- /dev/null
+++ b/src/vm/newcompressedstack.cpp
@@ -0,0 +1,1075 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+
+//
+
+#include "common.h"
+#ifdef FEATURE_COMPRESSEDSTACK
+
+#include "newcompressedstack.h"
+#include "security.h"
+#ifdef FEATURE_REMOTING
+#include "appdomainhelper.h"
+#endif
+#include "securitystackwalk.h"
+#include "appdomainstack.inl"
+#include "appdomain.inl"
+
+
+DomainCompressedStack::DomainCompressedStack(ADID domainID)
+: m_DomainID(domainID),
+ m_ignoreAD(FALSE),
+ m_dwOverridesCount(0),
+ m_dwAssertCount(0),
+ m_Homogeneous(FALSE)
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+BOOL DomainCompressedStack::IsAssemblyPresent(ISharedSecurityDescriptor* ssd)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }CONTRACTL_END;
+
+
+ // Only checks the first level and does not recurse into compressed stacks
+ void* pEntry = NULL;
+
+ if (m_EntryList.GetCount() == 0)
+ return FALSE;
+
+ // Quick check the last entry we added - common case
+ pEntry = m_EntryList.Get(m_EntryList.GetCount() - 1);
+ if (pEntry == (void *)SET_LOW_BIT(ssd))
+ return TRUE;
+
+ // Go thru the whole list now - is this optimal?
+ ArrayList::Iterator iter = m_EntryList.Iterate();
+
+ while (iter.Next())
+ {
+ pEntry = iter.GetElement();
+ if (pEntry == (void *)SET_LOW_BIT(ssd))
+ return TRUE;
+ }
+ return FALSE;
+}
+
+void DomainCompressedStack::AddEntry(void * ptr)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ IfFailThrow(m_EntryList.Append(ptr));
+
+}
+VOID FrameSecurityDescriptorCopyFrom(FRAMESECDESCREF newFsdRef, FRAMESECDESCREF fsd)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ newFsdRef->SetImperativeAssertions(fsd->GetImperativeAssertions());
+ newFsdRef->SetImperativeDenials(fsd->GetImperativeDenials());
+ newFsdRef->SetImperativeRestrictions(fsd->GetImperativeRestrictions());
+ newFsdRef->SetDeclarativeAssertions(fsd->GetDeclarativeAssertions());
+ newFsdRef->SetDeclarativeDenials(fsd->GetDeclarativeDenials());
+ newFsdRef->SetDeclarativeRestrictions(fsd->GetDeclarativeRestrictions());
+ newFsdRef->SetAssertAllPossible(fsd->HasAssertAllPossible());
+ newFsdRef->SetAssertFT(fsd->HasAssertFT());
+}
+
+void DomainCompressedStack::AddFrameEntry(AppDomain *pAppDomain, FRAMESECDESCREF fsdRef, BOOL bIsAHDMFrame, OBJECTREF dynamicResolverRef)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ ENTER_DOMAIN_PTR(pAppDomain,ADV_RUNNINGIN) //have it on the stack
+ {
+ struct gc
+ {
+ OBJECTREF fsdRef;
+ OBJECTREF newFsdRef;
+ OBJECTREF dynamicResolverRef;
+ } gc;
+ ZeroMemory( &gc, sizeof( gc ) );
+ gc.fsdRef = (OBJECTREF)fsdRef;
+ gc.dynamicResolverRef = dynamicResolverRef;
+
+ GCPROTECT_BEGIN(gc);
+
+ static MethodTable* pMethFrameSecDesc = NULL;
+ if (pMethFrameSecDesc == NULL)
+ pMethFrameSecDesc = MscorlibBinder::GetClass(CLASS__FRAME_SECURITY_DESCRIPTOR);
+
+ static MethodTable* pMethFrameSecDescWCS = NULL;
+ if (pMethFrameSecDescWCS == NULL)
+ pMethFrameSecDescWCS = MscorlibBinder::GetClass(CLASS__FRAME_SECURITY_DESCRIPTOR_WITH_RESOLVER);
+
+ if(!bIsAHDMFrame)
+ {
+ gc.newFsdRef = AllocateObject(pMethFrameSecDesc);
+ }
+ else
+ {
+ gc.newFsdRef = AllocateObject(pMethFrameSecDescWCS);
+ }
+
+ // We will not call the ctor and instead patch up the object based on the fsdRef passed in
+ FRAMESECDESCREF newFsdRef = (FRAMESECDESCREF)gc.newFsdRef;
+ FRAMESECDESCREF fsdRef1 = (FRAMESECDESCREF)gc.fsdRef;
+ if(fsdRef1 != NULL)
+ {
+ FrameSecurityDescriptorCopyFrom(newFsdRef, fsdRef1);
+ }
+ if(bIsAHDMFrame)
+ {
+ _ASSERTE(gc.dynamicResolverRef != NULL);
+ ((FRAMESECDESWITHRESOLVERCREF)newFsdRef)->SetDynamicMethodResolver(gc.dynamicResolverRef);
+ }
+ OBJECTHANDLEHolder tmpHnd(pAppDomain->CreateHandle(gc.newFsdRef));
+
+ AddEntry((void*)tmpHnd);
+ tmpHnd.SuppressRelease();
+ GCPROTECT_END();
+
+ }
+ END_DOMAIN_TRANSITION;
+
+}
+
+
+void DomainCompressedStack::Destroy(void)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Clear Domain info (handles etc.) if the AD has not been unloaded.
+ ClearDomainInfo();
+ return;
+}
+
+FCIMPL1(DWORD, DomainCompressedStack::GetDescCount, DomainCompressedStack* dcs)
+{
+ FCALL_CONTRACT;
+
+ FCUnique(0x42);
+
+ return dcs->m_EntryList.GetCount();
+}
+FCIMPLEND
+
+FCIMPL3(void, DomainCompressedStack::GetDomainPermissionSets, DomainCompressedStack* dcs, OBJECTREF* ppGranted, OBJECTREF* ppDenied)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ *ppGranted = NULL;
+ *ppDenied = NULL;
+
+ AppDomain* appDomain = SystemDomain::GetAppDomainFromId(dcs->GetMyDomain(),ADV_RUNNINGIN);
+ if (appDomain == NULL)
+ {
+ // this might be the unloading AD
+ AppDomain *pUnloadingDomain = SystemDomain::System()->AppDomainBeingUnloaded();
+ if (pUnloadingDomain && pUnloadingDomain->GetId() == dcs->m_DomainID)
+ {
+#ifdef _DEBUG
+ CheckADValidity(pUnloadingDomain, ADV_RUNNINGIN);
+#endif
+ appDomain = pUnloadingDomain;
+ }
+ }
+ _ASSERTE(appDomain != NULL);
+ if (appDomain != NULL)
+ {
+ IApplicationSecurityDescriptor * pAppSecDesc = appDomain->GetSecurityDescriptor();
+ _ASSERTE(pAppSecDesc != NULL);
+ if (pAppSecDesc != NULL)
+ {
+ *ppGranted = pAppSecDesc->GetGrantedPermissionSet(ppDenied);
+ }
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL6(FC_BOOL_RET, DomainCompressedStack::GetDescriptorInfo, DomainCompressedStack* dcs, DWORD index, OBJECTREF* ppGranted, OBJECTREF* ppDenied, OBJECTREF* ppAssembly, OBJECTREF* ppFSD)
+{
+ FCALL_CONTRACT;
+
+ _ASSERTE(dcs != NULL);
+ AppDomain* pCurrentDomain = GetAppDomain();
+ BOOL bRetVal = FALSE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0()
+ *ppGranted = NULL;
+ *ppDenied = NULL;
+ *ppAssembly = NULL;
+ *ppFSD = NULL;
+ void* pEntry = dcs->m_EntryList.Get(index);
+ _ASSERTE(pEntry != NULL);
+ if (IS_LOW_BIT_SET(pEntry))
+ {
+ // Assembly found
+ SharedSecurityDescriptor* pSharedSecDesc = (SharedSecurityDescriptor* )UNSET_LOW_BIT(pEntry);
+ Assembly* pAssembly = pSharedSecDesc->GetAssembly();
+ IAssemblySecurityDescriptor* pAsmSecDesc = pAssembly->GetSecurityDescriptor( pCurrentDomain );
+ *ppGranted = pAsmSecDesc->GetGrantedPermissionSet(ppDenied);
+ *ppAssembly = pAssembly->GetExposedObject();
+ }
+ else
+ {
+ //FSD
+ OBJECTHANDLE objHnd = (OBJECTHANDLE)pEntry;
+ if (objHnd == NULL)
+ {
+ // throw an ADUnloaded exception which we will catch and then look at the serializedBlob
+ COMPlusThrow(kAppDomainUnloadedException);
+ }
+ *ppFSD = ObjectFromHandle(objHnd);
+ bRetVal = TRUE;
+ }
+ HELPER_METHOD_FRAME_END();
+
+ FC_RETURN_BOOL(bRetVal);
+}
+FCIMPLEND
+
+FCIMPL1(FC_BOOL_RET, DomainCompressedStack::IgnoreDomain, DomainCompressedStack* dcs)
+{
+ FCALL_CONTRACT;
+
+ _ASSERTE(dcs != NULL);
+
+ FC_RETURN_BOOL(dcs->IgnoreDomainInternal());
+}
+FCIMPLEND
+
+BOOL DomainCompressedStack::IgnoreDomainInternal()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (m_ignoreAD)
+ return TRUE;
+
+ AppDomainFromIDHolder appDomain(GetMyDomain(), TRUE);
+ if (!appDomain.IsUnloaded())
+ {
+ IApplicationSecurityDescriptor *pAppSecDesc = appDomain->GetSecurityDescriptor();
+ _ASSERTE(pAppSecDesc != NULL);
+ if (pAppSecDesc != NULL)
+ {
+ return pAppSecDesc->IsDefaultAppDomain() || pAppSecDesc->IsInitializationInProgress();
+ }
+ }
+
+ return FALSE;
+}
+
+
+/*
+ Note that this function is called only once: when the managed PLS is being created.
+ It's possible that 2 threads could race at that point: only downside of that is that they will both do the work. No races.
+ Also, we'll never be operating on a DCS whose domain is not on the current callstack. This eliminates all kinds of ADU/demand eval races.
+*/
+OBJECTREF DomainCompressedStack::GetDomainCompressedStackInternal(AppDomain *pDomain)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // If we are going to skip this AppDomain, and there is nothing to compress, then we can skip building the DCS.
+ if (m_EntryList.GetCount() == 0 && IgnoreDomainInternal())
+ return NULL;
+
+ AppDomain* pCurrentDomain = GetAppDomain();
+
+ NewArrayHolder<BYTE> pbtmpSerializedObject(NULL);
+#ifndef FEATURE_CORECLR
+ DWORD cbtmpSerializedObject = 0;
+#endif
+
+ struct gc
+ {
+ OBJECTREF refRetVal;
+ } gc;
+ ZeroMemory( &gc, sizeof( gc ) );
+
+ GCPROTECT_BEGIN( gc );
+
+ // Create object
+ ENTER_DOMAIN_ID (GetMyDomain()) //on the stack
+ {
+
+ // Go ahead and create the object
+#ifdef FEATURE_CORECLR // ignore other appdomains
+ if (GetAppDomain() == pCurrentDomain)
+#endif
+ {
+ MethodDescCallSite createManagedObject(METHOD__DOMAIN_COMPRESSED_STACK__CREATE_MANAGED_OBJECT);
+ ARG_SLOT args[] = {PtrToArgSlot(this)};
+ gc.refRetVal = createManagedObject.Call_RetOBJECTREF(args);
+ }
+
+#ifndef FEATURE_CORECLR
+ // Do we want to marshal this object also?
+ if (GetAppDomain() != pCurrentDomain)
+ {
+ // Serialize to a blob;
+ AppDomainHelper::MarshalObject(GetAppDomain(), &gc.refRetVal, &pbtmpSerializedObject, &cbtmpSerializedObject);
+ if (pbtmpSerializedObject == NULL)
+ {
+ // this is an error: possibly an OOM prevented the blob from getting created.
+ // We could return null and let the managed code use a fully restricted object or throw here.
+ // Let's throw here...
+ COMPlusThrow(kSecurityException);
+ }
+ }
+#endif
+
+ }
+ END_DOMAIN_TRANSITION
+
+#ifndef FEATURE_CORECLR // should never happen for core clr
+ if (GetMyDomain() != pCurrentDomain->GetId())
+ {
+ AppDomainHelper::UnmarshalObject(pCurrentDomain,pbtmpSerializedObject, cbtmpSerializedObject, &gc.refRetVal);
+ }
+#endif
+
+ GCPROTECT_END();
+
+ return gc.refRetVal;
+}
+
+
+void DomainCompressedStack::ClearDomainInfo(void)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+
+ // So, assume mutual exclusion holds and no races occur here
+
+
+ // Now it is time to go NULL out any ObjectHandle we're using in the list of entries
+ ArrayList::Iterator iter = m_EntryList.Iterate();
+
+ while (iter.Next())
+ {
+ void* pEntry = iter.GetElement();
+ if (!IS_LOW_BIT_SET(pEntry))
+ {
+ DestroyHandle((OBJECTHANDLE)pEntry);
+ }
+ pEntry = NULL;
+ }
+
+
+ // Always clear the index into the domain object list and the domainID.
+ m_DomainID = ADID(INVALID_APPDOMAIN_ID);
+ return;
+}
+
+NewCompressedStack::NewCompressedStack()
+: m_DCSListCount(0),
+ m_currentDCS(NULL),
+ m_pCtxTxFrame(NULL),
+ m_CSAD(ADID(INVALID_APPDOMAIN_ID)),
+ m_ADStack(GetThread()->GetAppDomainStack()),
+ m_dwOverridesCount(0),
+ m_dwAssertCount(0)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_ADStack.GetNumDomains() > 0);
+ m_ADStack.InitDomainIteration(&adStackIndex);
+ m_DCSList = new DomainCompressedStack*[m_ADStack.GetNumDomains()];
+ memset(m_DCSList, 0, (m_ADStack.GetNumDomains()*sizeof(DomainCompressedStack*)));
+
+}
+
+
+void NewCompressedStack::Destroy( CLR_BOOL bEntriesOnly )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_DCSList != NULL)
+ {
+ m_currentDCS = NULL;
+ m_pCtxTxFrame = NULL;
+ for (DWORD i=0; i< m_ADStack.GetNumDomains(); i++)
+ {
+ DomainCompressedStack* dcs = m_DCSList[i];
+ if (dcs != NULL)
+ {
+ dcs->Destroy();
+ delete dcs;
+ }
+ }
+ delete[] m_DCSList;
+ m_DCSList = NULL;
+ }
+ if (!bEntriesOnly)
+ delete this;
+
+}
+
+
+void NewCompressedStack::ProcessAppDomainTransition(void)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ // Get the current adstack entry. Note that the first time we enter this function, adStackIndex will
+ // equal the size of the adstack array (similar to what happens in IEnumerator).
+ // So the initial pEntry will be NULL
+ AppDomainStackEntry *pEntry =
+ (adStackIndex == m_ADStack.GetNumDomains() ? NULL : m_ADStack.GetCurrentDomainEntryOnStack(adStackIndex));
+
+ // Updated the value on the ADStack for current domain
+ if (pEntry != NULL)
+ {
+ DWORD domainOverrides_measured = (m_currentDCS == NULL?0:m_currentDCS->GetOverridesCount());
+ DWORD domainAsserts_measured = (m_currentDCS == NULL?0:m_currentDCS->GetAssertCount());
+ if (pEntry->m_dwOverridesCount != domainOverrides_measured || pEntry->m_dwAsserts != domainAsserts_measured)
+ {
+ m_ADStack.UpdateDomainOnStack(adStackIndex, domainAsserts_measured, domainOverrides_measured);
+ GetThread()->UpdateDomainOnStack(adStackIndex, domainAsserts_measured, domainOverrides_measured);
+ }
+ }
+
+ // Move the domain index forward if this is not the last entry
+ if (adStackIndex > 0)
+ m_ADStack.GetNextDomainEntryOnStack(&adStackIndex);
+ m_currentDCS = NULL;
+
+ return;
+
+}
+DWORD NewCompressedStack::ProcessFrame(AppDomain* pAppDomain, Assembly* pAssembly, MethodDesc* pFunc, ISharedSecurityDescriptor* pSsd, FRAMESECDESCREF* pFsdRef)
+{
+ // This function will be called each time we hit a new stack frame in a stack walk.
+
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pAppDomain));
+ PRECONDITION(CheckPointer(pSsd));
+ } CONTRACTL_END;
+
+ // Get the current adstack entry. Note that the first time we enter this function, adStackIndex will
+ // equal the size of the adstack array (similar to what happens in IEnumerator).
+ // So the initial pEntry will be NULL
+ AppDomainStackEntry *pEntry =
+ (adStackIndex == m_ADStack.GetNumDomains() ? NULL : m_ADStack.GetCurrentDomainEntryOnStack(adStackIndex));
+
+
+ _ASSERTE(pEntry != NULL);
+ PREFIX_ASSUME(pEntry != NULL);
+ FRAMESECDESCREF FsdRef = (pFsdRef!=NULL?*pFsdRef:NULL);
+ DWORD dwFlags = 0;
+ if (FsdRef != NULL)
+ {
+
+ if (FsdRef->HasAssertFT())
+ dwFlags |= CORSEC_FT_ASSERT;
+ }
+
+ BOOL bIsAHDMFrame = FALSE;
+
+ if((pFunc != NULL) && !CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_Security_DisableAnonymouslyHostedDynamicMethodCreatorSecurityCheck))
+ {
+ ENTER_DOMAIN_PTR(pAppDomain, ADV_RUNNINGIN)
+ {
+ bIsAHDMFrame = SecurityStackWalk::MethodIsAnonymouslyHostedDynamicMethodWithCSToEvaluate(pFunc);
+ }
+ END_DOMAIN_TRANSITION;
+ }
+
+ if (!bIsAHDMFrame && ((pEntry->IsFullyTrustedWithNoStackModifiers()) ||
+ (m_currentDCS != NULL && m_currentDCS->m_Homogeneous)))
+ {
+ // Nothing to do in this entire AD.
+ return dwFlags;
+ }
+
+ ADID dNewDomainID = pAppDomain->GetId();
+ BOOL bAddSSD = (!pSsd->IsSystem() && !IsAssemblyPresent(dNewDomainID, pSsd));
+ BOOL bHasStackModifiers = FALSE;
+ DWORD overridesCount = 0;
+ DWORD assertCount = 0;
+
+
+ if (FsdRef != NULL)
+ {
+ overridesCount += FsdRef->GetOverridesCount();
+ assertCount += FsdRef->GetAssertCount();
+ }
+
+ // If this is an AHDM frame with a CS to evaluate, it may have overrides or asserts,
+ // so treat it as if it does
+ if(bIsAHDMFrame)
+ {
+ overridesCount++;
+ assertCount++;
+ }
+
+ bHasStackModifiers = ( (assertCount + overridesCount) > 0);
+
+ //
+ // We need to add a new DCS if we don't already have one for this AppDomain. If we've reached this
+ // point, either:
+ // * the AppDomain is partially trusted
+ // * the AppDomain is fully trusted, but may have stack modifiers in play
+ // * we're running in legacy mode where FullTrust doesn't mean FullTrust
+ //
+ // If the domain is partially trusted, we'll always need to capture it. If we got this far due to a
+ // fully trusted domain that might have stack modifiers, we only have to capture if there really were
+ // stack walk modifiers. In the legacy mode case, we need to capture the domain if we had stack
+ // modifiers or we needed to add the shared security descriptor.
+ //
+
+ BOOL bCreateDCS = (m_currentDCS == NULL || m_currentDCS->m_DomainID != dNewDomainID);
+ if (pAppDomain->GetSecurityDescriptor()->IsFullyTrusted())
+ {
+ bCreateDCS &= (bAddSSD || bHasStackModifiers);
+ }
+
+ if (bCreateDCS)
+ {
+ CreateDCS(dNewDomainID);
+ }
+
+ // Add the ISharedSecurityDescriptor (Assembly) to the list if it is not already present in the list
+ if (bAddSSD)
+ {
+ m_currentDCS->AddEntry((void*)SET_LOW_BIT(pSsd));
+ if (pEntry->IsHomogeneousWithNoStackModifiers())
+ m_currentDCS->m_Homogeneous = TRUE;
+ }
+ if (bHasStackModifiers)
+ {
+ OBJECTREF dynamicResolverRef = NULL;
+ if(bIsAHDMFrame)
+ {
+ _ASSERTE(pFunc->IsLCGMethod());
+ dynamicResolverRef = pFunc->AsDynamicMethodDesc()->GetLCGMethodResolver()->GetManagedResolver();
+ }
+
+ // We need to add the FSD entry here
+ m_currentDCS->AddFrameEntry(pAppDomain, FsdRef, bIsAHDMFrame, dynamicResolverRef);
+ m_currentDCS->m_dwOverridesCount += overridesCount;
+ m_currentDCS->m_dwAssertCount += assertCount;
+ m_dwOverridesCount += overridesCount;
+ m_dwAssertCount += assertCount;
+ }
+ return dwFlags;
+}
+
+// Build a CompressedStack given that all domains on the stack are homogeneous with no stack modifiers
+FCIMPL1(void, NewCompressedStack::FCallGetHomogeneousPLS, Object* hgPLSUnsafe)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF refHomogeneousPLS = (OBJECTREF)hgPLSUnsafe;
+
+ HELPER_METHOD_FRAME_BEGIN_1(refHomogeneousPLS);
+
+
+ // Walk the adstack and update the grantSetUnion
+ AppDomainStack* pADStack = GetThread()->GetAppDomainStackPointer();
+ DWORD dwAppDomainIndex;
+ pADStack->InitDomainIteration(&dwAppDomainIndex);
+
+#ifdef FEATURE_REMOTING // without remoting we need only current appdomain
+ while (dwAppDomainIndex != 0)
+#endif
+ {
+ AppDomainStackEntry* pEntry = pADStack->GetNextDomainEntryOnStack(&dwAppDomainIndex);
+ _ASSERTE(pEntry != NULL);
+
+ pEntry->UpdateHomogeneousPLS(&refHomogeneousPLS);
+ }
+
+
+ HELPER_METHOD_FRAME_END();
+ return ;
+}
+FCIMPLEND;
+
+// Special case of ProcessFrame called with the CS at the base of the thread
+void NewCompressedStack::ProcessCS(AppDomain* pAppDomain, COMPRESSEDSTACKREF csRef, Frame *pFrame)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pAppDomain));
+ } CONTRACTL_END;
+
+ _ASSERTE(csRef != NULL && "Shouldn't call this function if CS is NULL");
+ ADID dNewDomainID = pAppDomain->GetId();
+ NewCompressedStack* pCS = (NewCompressedStack* )csRef->GetUnmanagedCompressedStack();
+ if (csRef->IsEmptyPLS() && (pCS == NULL || pCS->GetDCSListCount() == 0))
+ {
+ // Do nothing - empty inner CS
+ return;
+ }
+
+ // Let's special case the 1-domain CS that has no inner CSs here
+ // Check for:
+ // 1. == 1 DCS
+ // 2. DCS is in correct AD (it's possible that pCS is AD-X and it has only one DCS in AD-Y, because AD-X has only mscorlib frames
+ // 3. No inner CS
+ if ( pCS != NULL &&
+ pCS->GetDCSListCount() == 1 &&
+ pCS->m_DCSList != NULL &&
+ pCS->m_currentDCS != NULL &&
+ pCS->m_currentDCS->m_DomainID == dNewDomainID &&
+ pCS->m_CSAD == ADID(INVALID_APPDOMAIN_ID))
+ {
+ ProcessSingleDomainNCS(pCS, pAppDomain);
+ }
+ else
+ {
+
+ // set flag to ignore Domain grant set if the current DCS is the same as the one with the CS
+ if (m_currentDCS != NULL && m_currentDCS->m_DomainID == dNewDomainID)
+ {
+ m_currentDCS->m_ignoreAD = TRUE;
+ }
+
+ // Update overrides/asserts
+ if (pCS != NULL)
+ {
+ m_dwOverridesCount += pCS->GetOverridesCount();
+ m_dwAssertCount += pCS->GetAssertCount();
+ }
+
+
+ // Do we need to store the CtxTransitionFrame or did we get the CS from the thread?
+ if (pFrame != NULL)
+ {
+ _ASSERTE(csRef == SecurityStackWalk::GetCSFromContextTransitionFrame(pFrame));
+ // Use data from the CtxTxFrame
+ m_pCtxTxFrame = pFrame;
+ }
+ m_CSAD = dNewDomainID;
+ }
+
+}
+void NewCompressedStack::ProcessSingleDomainNCS(NewCompressedStack *pCS, AppDomain* pAppDomain)
+{
+
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pAppDomain));
+ } CONTRACTL_END;
+
+ _ASSERTE(pCS->GetDCSListCount() <= 1 && pCS->m_CSAD == ADID(INVALID_APPDOMAIN_ID));
+ ADID newDomainID = pAppDomain->GetId();
+ DomainCompressedStack* otherDCS = pCS->m_currentDCS;
+
+ if (otherDCS == NULL)
+ return;
+ if (m_currentDCS == NULL)
+ CreateDCS(newDomainID);
+
+
+ // Iterate thru the entryList in the current DCS
+ ArrayList::Iterator iter = otherDCS->m_EntryList.Iterate();
+ while (iter.Next())
+ {
+ void* pEntry = iter.GetElement();
+ if (IS_LOW_BIT_SET(pEntry))
+ {
+ if (!IsAssemblyPresent(newDomainID, (ISharedSecurityDescriptor*)UNSET_LOW_BIT(pEntry)))
+ {
+ //Add the assembly
+ m_currentDCS->AddEntry(pEntry);
+ }
+ }
+ else
+ {
+ // FrameSecurityDescriptor
+ OBJECTHANDLE objHnd = (OBJECTHANDLE)pEntry;
+ OBJECTREF fsdRef = ObjectFromHandle(objHnd);
+ OBJECTHANDLEHolder tmpHnd(pAppDomain->CreateHandle(fsdRef));
+
+ m_currentDCS->AddEntry((void*)tmpHnd);
+ tmpHnd.SuppressRelease();
+
+ }
+
+ }
+
+ m_currentDCS->m_dwOverridesCount += pCS->m_dwOverridesCount;
+ m_currentDCS->m_dwAssertCount += pCS->m_dwAssertCount;
+ m_dwOverridesCount += pCS->m_dwOverridesCount;
+ m_dwAssertCount += pCS->m_dwAssertCount;
+}
+void NewCompressedStack::CreateDCS(ADID domainID)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ _ASSERTE(adStackIndex < m_ADStack.GetNumDomains());
+ _ASSERTE (m_DCSList != NULL);
+ m_DCSList[adStackIndex] = new DomainCompressedStack(domainID);
+ m_currentDCS = m_DCSList[adStackIndex];
+ m_DCSListCount++;
+
+ return;
+}
+
+
+BOOL NewCompressedStack::IsAssemblyPresent(ADID domainID, ISharedSecurityDescriptor* pSsd)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(domainID != ADID(INVALID_APPDOMAIN_ID) && "Don't pass invalid domain");
+
+ BOOL bEntryPresent = FALSE;
+
+ for(DWORD i=0; i < m_ADStack.GetNumDomains(); i++)
+ {
+
+ DomainCompressedStack* pTmpDCS = m_DCSList[i];
+
+ if (pTmpDCS != NULL && pTmpDCS->m_DomainID == domainID && pTmpDCS->IsAssemblyPresent(pSsd))
+ {
+ bEntryPresent = TRUE;
+ break;
+ }
+ }
+ return bEntryPresent;
+}
+
+
+BOOL NewCompressedStack::IsDCSContained(DomainCompressedStack *pDCS)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // return FALSE if no DCS or DCS is for a domain that has been unloaded
+ if (pDCS == NULL || pDCS->m_DomainID == ADID(INVALID_APPDOMAIN_ID))
+ return FALSE;
+
+
+
+ // Iterate thru the entryList in the current DCS
+ ArrayList::Iterator iter = pDCS->m_EntryList.Iterate();
+ while (iter.Next())
+ {
+ void* pEntry = iter.GetElement();
+ if (IS_LOW_BIT_SET(pEntry))
+ {
+ // We only check Assemblies.
+ if (!IsAssemblyPresent(pDCS->m_DomainID, (ISharedSecurityDescriptor*)UNSET_LOW_BIT(pEntry)))
+ return FALSE;
+ }
+ }
+ return TRUE;
+}
+
+BOOL NewCompressedStack::IsNCSContained(NewCompressedStack *pCS)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Check if the first level of pCS is contained in this.
+ if (pCS == NULL)
+ return TRUE;
+
+ // Return FALSE if there are any overrides or asserts
+ if (pCS->GetOverridesCount() > 0)
+ return FALSE;
+ // Return FALSE if there is an inner CS
+ if (pCS->m_CSAD != ADID(INVALID_APPDOMAIN_ID))
+ return FALSE;
+
+ for(DWORD i=0; i < m_ADStack.GetNumDomains(); i++)
+ {
+ DomainCompressedStack *pDCS = (DomainCompressedStack *) m_DCSList[i];
+ if (!IsDCSContained(pDCS))
+ return FALSE;
+ }
+ return TRUE;
+
+}
+
+
+// If there is a compressed stack present in the captured CompressedStack, return that CS in the current domain
+OBJECTREF NewCompressedStack::GetCompressedStackInner()
+{
+ _ASSERTE(m_CSAD != ADID(INVALID_APPDOMAIN_ID));
+
+ AppDomain* pCurrentDomain = GetAppDomain();
+ NewArrayHolder<BYTE> pbtmpSerializedObject(NULL);
+
+
+ OBJECTREF refRetVal = NULL;
+
+ if (pCurrentDomain->GetId()== m_CSAD)
+ {
+ // we're in the right domain already
+ if (m_pCtxTxFrame == NULL)
+ {
+ // Get CS from the thread
+ refRetVal = GetThread()->GetCompressedStack();
+ }
+ else
+ {
+ // Get CS from a Ctx transition frame
+ refRetVal = (OBJECTREF)SecurityStackWalk::GetCSFromContextTransitionFrame(m_pCtxTxFrame);
+ _ASSERTE(refRetVal != NULL); //otherwise we would not have saved the frame in the CB data
+ }
+ }
+ else
+#ifndef FEATURE_CORECLR // should never happen for core clr
+ {
+ DWORD cbtmpSerializedObject = 0;
+ GCPROTECT_BEGIN (refRetVal);
+ // need to marshal the CS over into the current AD
+ ENTER_DOMAIN_ID(m_CSAD);
+ {
+ if (m_pCtxTxFrame == NULL)
+ {
+
+ // Get CS from the thread
+ refRetVal = GetThread()->GetCompressedStack();
+ }
+ else
+ {
+ // Get CS from a Ctx transition frame
+ refRetVal = (OBJECTREF)SecurityStackWalk::GetCSFromContextTransitionFrame(m_pCtxTxFrame);
+ _ASSERTE(refRetVal != NULL); //otherwise we would not have saved the frame in the CB data
+ }
+ AppDomainHelper::MarshalObject(GetAppDomain(), &refRetVal, &pbtmpSerializedObject, &cbtmpSerializedObject);
+ }
+ END_DOMAIN_TRANSITION
+ refRetVal = NULL;
+ AppDomainHelper::UnmarshalObject(pCurrentDomain,pbtmpSerializedObject, cbtmpSerializedObject, &refRetVal);
+ GCPROTECT_END ();
+ _ASSERTE(refRetVal != NULL); //otherwise we would not have saved the frame in the CB data
+ }
+#else
+ {
+ UNREACHABLE();
+ }
+#endif // !FEATURE_CORECLR
+
+ return refRetVal;
+
+}
+
+// == Now begin the functions used in building Demand evaluation of a compressed stack
+FCIMPL1(DWORD, NewCompressedStack::FCallGetDCSCount, SafeHandle* hcsUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ DWORD dwRet = 0;
+ if (hcsUNSAFE != NULL)
+ {
+ SAFEHANDLE hcsSAFE = (SAFEHANDLE) hcsUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(hcsSAFE);
+
+ NewCompressedStack* ncs = (NewCompressedStack *)hcsSAFE->GetHandle();
+
+ dwRet = ncs->m_ADStack.GetNumDomains();
+ HELPER_METHOD_FRAME_END();
+ }
+
+ return dwRet;
+
+}
+FCIMPLEND
+
+
+FCIMPL2(FC_BOOL_RET, NewCompressedStack::FCallIsImmediateCompletionCandidate, SafeHandle* hcsUNSAFE, OBJECTREF *innerCS)
+{
+ FCALL_CONTRACT;
+
+ BOOL bRet = FALSE;
+ if (hcsUNSAFE != NULL)
+ {
+ SAFEHANDLE hcsSAFE = (SAFEHANDLE) hcsUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(hcsSAFE);
+
+ *innerCS = NULL;
+
+ NewCompressedStack* ncs = (NewCompressedStack *)hcsSAFE->GetHandle();
+
+ if (ncs != NULL)
+ {
+ // Non-FT case
+
+ // Is there an inner CS?
+ BOOL bHasCS = (ncs->m_CSAD != ADID(INVALID_APPDOMAIN_ID));
+
+ // Is there is a DCS not in the current AD
+ BOOL bHasOtherAppDomain = FALSE;
+ if (ncs->m_DCSList != NULL)
+ {
+ for(DWORD i=0; i < ncs->m_ADStack.GetNumDomains(); i++)
+ {
+ DomainCompressedStack* dcs = ncs->m_DCSList[i];
+ if (dcs != NULL && dcs->GetMyDomain() != GetAppDomain()->GetId())
+ {
+ bHasOtherAppDomain = TRUE;
+ break;
+ }
+ }
+ }
+ if (bHasCS)
+ {
+
+
+ *innerCS = ncs->GetCompressedStackInner();
+ ncs->m_pCtxTxFrame = NULL; // Clear the CtxTxFrame ASAP
+
+ }
+ bRet = bHasOtherAppDomain||bHasCS;
+ }
+
+ HELPER_METHOD_FRAME_END();
+ }
+
+ FC_RETURN_BOOL(bRet);
+}
+FCIMPLEND
+
+
+FCIMPL2(Object*, NewCompressedStack::GetDomainCompressedStack, SafeHandle* hcsUNSAFE, DWORD index)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF refRetVal = NULL;
+ if (hcsUNSAFE != NULL)
+ {
+ SAFEHANDLE hcsSAFE = (SAFEHANDLE) hcsUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_2(refRetVal, hcsSAFE);
+
+
+
+ NewCompressedStack* ncs = (NewCompressedStack *)hcsSAFE->GetHandle();
+
+ // First we check to see if the DCS at index i has a blob. If so, deserialize it into the current AD and return it. Else try create it
+ DomainCompressedStack* dcs = ncs->m_DCSList[index];
+ if (dcs != NULL)
+ {
+ refRetVal = dcs->GetDomainCompressedStackInternal(NULL);
+ }
+
+ HELPER_METHOD_FRAME_END();
+ }
+
+ return OBJECTREFToObject(refRetVal);
+
+}
+FCIMPLEND
+
+FCIMPL1(void, NewCompressedStack::DestroyDCSList, SafeHandle* hcsUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ SAFEHANDLE hcsSAFE = (SAFEHANDLE) hcsUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_1(hcsSAFE);
+
+ NewCompressedStack* ncs = (NewCompressedStack *)hcsSAFE->GetHandle();
+
+ ncs->Destroy(TRUE);
+
+ HELPER_METHOD_FRAME_END();
+
+}
+FCIMPLEND
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
diff --git a/src/vm/newcompressedstack.h b/src/vm/newcompressedstack.h
new file mode 100644
index 0000000000..7f8a6ddc19
--- /dev/null
+++ b/src/vm/newcompressedstack.h
@@ -0,0 +1,198 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+
+//
+
+#ifndef __newcompressedstack_h__
+#define __newcompressedstack_h__
+#ifdef FEATURE_COMPRESSEDSTACK
+
+#include "objectlist.h"
+// Returns true if the low bit in the ptr argument is set to 1
+#define IS_LOW_BIT_SET(ptr) (((UINT_PTR)ptr) & 1)
+// Sets the low bit in the ptr passed in
+#define SET_LOW_BIT(ptr) (((UINT_PTR)ptr)|1)
+// Reset the low bit to 0
+#define UNSET_LOW_BIT(ptr) (((UINT_PTR)ptr)& ~((size_t)1))
+
+class DomainCompressedStack;
+class NewCompressedStack;
+
+
+
+// This is the class that will contain an array of entries.
+// All the entries will be for a single AppDomain
+class DomainCompressedStack
+{
+ friend class NewCompressedStack;
+public:
+ ADID GetMyDomain()
+ {
+ // It is OK if m_DomainID gets set to -1 by ADU code and we return a valid AD.
+ // (what that means is that tmp_adid is not invalid, but m_DomainID is set to invalid by ADU code
+ // after we cached it)
+ // Two cases:
+ // 1. AD has set NoEnter
+ // 1.a) current thread is finalizer: it will be allowed to enter the AD, but Destroy() takes a lock and checks again. So we're good
+ // 1.b) current thread is not finalizer: it will not be allowed to enter AD and the value returned here will be NULL
+ // 2. AD has not set NoEnter, but is in the process of CS processing at ADU
+ // A valid AD pointer is returned, which is all that this function is required to do. Since ADU unload is done handling this DCS, we'll not
+ // enter that AD, but use the blob in the DCS.
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ return m_DomainID;
+ }
+ // Construction and maintenence
+ DomainCompressedStack(ADID domainID); //ctor
+ BOOL IsAssemblyPresent(ISharedSecurityDescriptor* ssd);
+ void AddEntry(void *ptr);
+#ifndef DACCESS_COMPILE
+ void AddFrameEntry(AppDomain * pAppDomain, FRAMESECDESCREF fsdRef, BOOL bIsAHDMFrame, OBJECTREF dynamicResolverRef);
+#endif
+ void Destroy(void);
+
+ // Demand evaluation
+ static FCDECL1(DWORD, GetDescCount, DomainCompressedStack* dcs);
+ static FCDECL3(void, GetDomainPermissionSets, DomainCompressedStack* dcs, OBJECTREF* ppGranted, OBJECTREF* ppDenied);
+ static FCDECL6(FC_BOOL_RET, GetDescriptorInfo, DomainCompressedStack* dcs, DWORD index, OBJECTREF* ppGranted, OBJECTREF* ppDenied, OBJECTREF* ppAssembly, OBJECTREF* ppFSD);
+ static FCDECL1(FC_BOOL_RET, IgnoreDomain, DomainCompressedStack* dcs);
+ OBJECTREF GetDomainCompressedStackInternal(AppDomain *pDomain);
+
+ // AppDomain unload
+ void ClearDomainInfo(void);
+ static void AllHandleAppDomainUnload(AppDomain* pDomain, ADID domainId, ObjectList* list );
+ static void ReleaseDomainCompressedStack( DomainCompressedStack* dcs ) {
+ WRAPPER_NO_CONTRACT;
+ dcs->Destroy();
+ };
+ static DomainCompressedStack* GetNextEntryFromADList(AppDomain* pDomain, ObjectList::Iterator iter);
+ void AppDomainUnloadDone(AppDomain* pDomain);
+
+
+
+private:
+ ArrayList m_EntryList;
+ ADID m_DomainID; // either a valid domain ID or INVALID_APPDOMAIN_ID (set by unloading AppDomain to that value)
+ BOOL m_ignoreAD; // Do not look at domain grant set since we have a CS at the threadbaseobject.
+ DWORD m_dwOverridesCount;
+ DWORD m_dwAssertCount;
+ BOOL m_Homogeneous;
+
+ DWORD GetOverridesCount( void )
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwOverridesCount;
+ }
+
+ DWORD GetAssertCount( void )
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwAssertCount;
+ }
+
+ BOOL IgnoreDomainInternal();
+};
+typedef Holder<DomainCompressedStack*, DoNothing< DomainCompressedStack* >, DomainCompressedStack::ReleaseDomainCompressedStack > DomainCompressedStackHolder;
+
+class NewCompressedStack
+{
+
+private:
+ DomainCompressedStack** m_DCSList;
+ DWORD m_DCSListCount;
+ DomainCompressedStack *m_currentDCS;
+ Frame *m_pCtxTxFrame; // Be super careful where you use this. Remember that this is a stack location and is not always valid.
+ ADID m_CSAD;
+ AppDomainStack m_ADStack;
+ DWORD adStackIndex;
+ DWORD m_dwOverridesCount;
+ DWORD m_dwAssertCount;
+
+
+ void CreateDCS(ADID domainID);
+ BOOL IsAssemblyPresent(ADID domainID, ISharedSecurityDescriptor* pSsd);
+ BOOL IsDCSContained(DomainCompressedStack *pDCS);
+ BOOL IsNCSContained(NewCompressedStack *pCS);
+ void ProcessSingleDomainNCS(NewCompressedStack *pCS, AppDomain* pAppDomain);
+public:
+ DWORD GetDCSListCount(void)
+ {
+ // Returns # of non-NULL DCSList entries;
+ LIMITED_METHOD_CONTRACT;
+ return m_DCSListCount;
+ }
+ void Destroy( CLR_BOOL bEntriesOnly = FALSE);
+
+ static void DestroyCompressedStack( NewCompressedStack* stack ) {
+ WRAPPER_NO_CONTRACT;
+ stack->Destroy();
+ };
+
+ AppDomainStack& GetAppDomainStack( void )
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_ADStack;
+ }
+
+ DWORD GetOverridesCount( void )
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwOverridesCount;
+ }
+
+ DWORD GetAssertCount( void )
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwAssertCount;
+ }
+ DWORD GetInnerAppDomainOverridesCount(void)
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_ADStack.GetInnerAppDomainOverridesCount();
+ }
+ DWORD GetInnerAppDomainAssertCount(void)
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_ADStack.GetInnerAppDomainAssertCount();
+ }
+
+ // This is called every time we hit a stack frame on a stack walk. it will be called with ASD, SSD, FSDs and this func will determine what
+ // (if any) action needs to be performed.
+ // For example:
+ // on seeing a new SSD, we'll add an entry to the current DCS
+ // on seeing an SSD we've already seen, we'll do nothing
+ // on seeing a new ASD, a new DCS will be created on this CS
+#ifndef DACCESS_COMPILE
+ void ProcessAppDomainTransition(void);
+ DWORD ProcessFrame(AppDomain* pAppDomain, Assembly* pAssembly, MethodDesc* pFunc, ISharedSecurityDescriptor* pSsd, FRAMESECDESCREF* pFsdRef);
+ void ProcessCS(AppDomain* pAppDomain, COMPRESSEDSTACKREF csRef, Frame *pFrame);
+
+#endif
+ // ctor
+ NewCompressedStack();
+ OBJECTREF GetCompressedStackInner();
+
+ // FCALLS
+ static FCDECL1(DWORD, FCallGetDCSCount, SafeHandle* hcsUNSAFE);
+ static FCDECL2(FC_BOOL_RET, FCallIsImmediateCompletionCandidate, SafeHandle* hcsUNSAFE, OBJECTREF *innerCS);
+ static FCDECL2(Object*, GetDomainCompressedStack, SafeHandle* hcsUNSAFE, DWORD index);
+ static FCDECL1(void, DestroyDCSList, SafeHandle* hcsUNSAFE);
+ static FCDECL1(void, FCallGetHomogeneousPLS, Object* hgPLSUnsafe);
+
+};
+typedef Holder<NewCompressedStack*, DoNothing< NewCompressedStack* >, NewCompressedStack::DestroyCompressedStack > NewCompressedStackHolder;
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
+#endif /* __newcompressedstack_h__ */
+
+
diff --git a/src/vm/ngenhash.h b/src/vm/ngenhash.h
new file mode 100644
index 0000000000..d4c9f7bc9c
--- /dev/null
+++ b/src/vm/ngenhash.h
@@ -0,0 +1,493 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+// NgenHash is an abstract base class (actually a templated base class) designed to factor out the
+// functionality common to hashes persisted into ngen images.
+//
+// SEMANTICS
+//
+// * Arbitrary entry payload via payload.
+// * 32-bit hash code for entries.
+// * Separate entry allocation and insertion (allowing reliable insertion if required).
+// * Enumerate all entries or entries matching a particular hash.
+// * No entry deletion.
+// * Base logic to efficiently serialize hash contents at ngen time. Hot/cold splitting of entries is
+// supported (along with the ability to tweak the Save and Fixup stages of each entry if needed).
+// * Base logic to support DAC memory enumeration of the hash (including per-entry tweaks as needed).
+// * Lock free lookup (the caller must follow the protocol laid out below under USER REQUIREMENTS).
+// * Automatic hash expansion (with dialable scale factor).
+// * Hash insertion is supported at runtime even when an ngen image is loaded with previously serialized hash
+// entries.
+// * Base logic to support formatting hashes in the nidump tool (only need to supply code for the unique
+// aspects of your hash).
+//
+// BENEFITS
+//
+// * Removes next pointer from all persisted hash entries:
+// o Reduces data footprint of each entry.
+// o Increases density of entries.
+// o Removes a base relocation entry.
+// o Removes a runtime write to each entry (from the relocation above).
+// * Serializes all hot/cold hash entries contigiuously:
+// o Helps keeps hash entries in the same bucket in the same cache line.
+// * Compresses persisted bucket list and removes the use of pointers:
+// o Reduces working set hit of reading hash table (especially on 64-bit systems).
+// o Allows bucket list to be saved in read-only memory and thus use shared rather than private pages.
+// * Factors out common code:
+// o Less chance of bugs, one place to make fixes.
+// o Less code overall.
+//
+// SUB-CLASSING REQUIREMENTS
+//
+// To author a new NgenHash-based hashtable, the following steps are required:
+// 1) In most cases (where each hash entry will have multiple fields) a structure defining the hash entry
+// should be declared (see EEClassHashEntry in ClassHash.h for an example). This structure need not
+// include a field for the hash code or pointer to the next entry in the hash bucket; these are taken care
+// of automatically by the base class. If the entry must reference another entry in the hash (this should
+// be rare) the NgenHashEntryRef<> template class should be used to abstract the reference (this class
+// hides some of the transformation work that must take place when entries are re-ordered during ngen
+// serialization).
+// 2) Declare your new hash class deriving from NgenHash and providing the following template parameters:
+// FINAL_CLASS : The class you're declaring (this is used by the base class to locate certain helper
+// methods in your class used to tweak hash behavior).
+// VALUE : The type of your hash entries (the class defined in the previous step).
+// SCALE_FACTOR : A multipler on bucket count every time the hash table is grown (currently once the
+// number of hash entries exceeds twice the number of buckets). A value of 2 would double
+// the number of buckets on each grow operation for example.
+// 3) Define a constructor that invokes the base class constructor with various setup parameters (see
+// NgenHash constructor in this header). If your hash table is created via a static method rather than
+// direct construction (common) then call your constructor using an in-place new inside the static method
+// (see EEClassHashTable::Create in ClassHash.cpp for an example).
+// 4) Define your basic hash functionality (creation, insertion, lookup, enumeration, ngen Save/Fixup and DAC
+// memory enumeration) using the Base* methods provided by NgenHash.
+// 5) Tweak the operation of BaseSave, BaseFixup and BaseEnumMemoryRegions by providing definitions of the
+// following methods (note that all methods must be defined though they may be no-ops):
+//
+// bool ShouldSave(DataImage *pImage, VALUE *pEntry);
+// Return true if the given entry should be persisted into the ngen image (otherwise it won't be
+// saved with the rest).
+//
+// bool IsHotEntry(VALUE *pEntry, CorProfileData *pProfileData);
+// Return true is the entry is considered hot given the profiling data.
+//
+// bool SaveEntry(DataImage *pImage, CorProfileData *pProfileData, VALUE *pOldEntry, VALUE *pNewEntry, EntryMappingTable *pMap);
+// Gives your hash class a chance to save any additional data needed into the ngen image during
+// the Save phase or otherwise make entry updates prior to saving. The saving process creates a
+// new copy of each hash entry and this method is passed pointers both to the original entry and
+// the new version along with a mapping class that can translate any old entry address in the
+// table into the corresponding new address. If you have inter-entry pointer fields this is your
+// chance to fix up those fields with the new location of their target entries.
+//
+// void FixupEntry(DataImage *pImage, VALUE *pEntry, void *pFixupBase, DWORD cbFixupOffset);
+// Similar to SaveEntry but called during BaseFixup. This is your chance to register fixups for
+// any pointer type fields in your entry. Due to the way hash entries are packed during ngen
+// serialization individual hash entries are not saved as separate ngen zap nodes. So this method
+// is passed a pointer to the enclosing zapped data structure (pFixupBase) and the offset of the
+// entry from this base (cbFixupOffset). When calling pImage->FixupPointerField(...) for
+// instance, pass pFixupBase as the first parameter and cbFixupOffset + offsetof(YourEntryClass,
+// yourField) as the second parameter.
+//
+// void EnumMemoryRegionsForEntry(EEClassHashEntry_t *pEntry, CLRDataEnumMemoryFlags flags);
+// Called during BaseEnumMemoryRegions for each entry in the hash. Use to enumerate any memory
+// referenced by the entry (but not the entry itself).
+//
+// USER REQUIREMENTS
+//
+// Synchronization: It is permissable to read data from the hash without taking a lock as long as:
+// 1) Any hash modifications are performed under a lock or otherwise serialized.
+// 2) Any miss on a lookup is handled by taking a lock are retry-ing the lookup.
+//
+// OVERALL DESIGN
+//
+// The hash contains up to three groups of hash entries. These consist of two groups of entries persisted to
+// disk at ngen time (split into hot and cold based on profile data) and live entries added at runtime (or
+// during the ngen process itself, prior to the save operation).
+//
+// The persisted entries are tightly packed together and can eliminate some pointers and other metadata since
+// we statically know about every entry at the time we format the hash entries (the save phase of ngen
+// generation).
+//
+// Each persisted entry is assigned to a bucket based on its hash code and all entries that collide on a given
+// bucket are placed contiguously in memory. The bucket list itself therefore consists of an array or pairs,
+// each pair containing the count of entries in the bucket and the location of the first entry in the chain.
+// Since all entries are allocated contiguously entry location can be specified by an index into the array of
+// entries.
+//
+// Separate bucket lists and entry arrays are stored for hot and cold entries.
+//
+// The live entries (referred to here as volatile or warm entries) follow a more traditional hash
+// implementation where entries are allocated individually from a loader heap and are chained together with a
+// singly linked list if they collide. Here the bucket list is a simple array of pointers to the first entry
+// in each chain (if any).
+//
+// Unlike the persisted entris the warm section of the table must cope with entry insertions and growing the
+// bucket list when the table becomes too loaded (too many entries causing excessive bucket collisions). This
+// happens when an entry insertion notes that there are twice as many entries as buckets. The bucket list is
+// then reallocated (from a loader heap, consequently the old one is leaked) and resized based on a scale
+// factor supplied by the hash sub-class.
+//
+// At runtime we lookup or enumerate entries by visiting all three sets of entries in the order Hot, Warm and
+// Cold. This imposes a slight but constant time overhead.
+//
+
+#ifndef __NGEN_HASH_INCLUDED
+#define __NGEN_HASH_INCLUDED
+
+#ifdef FEATURE_PREJIT
+#include "corcompile.h"
+#endif
+
+// The type used to contain an entry hash value. This is not customizable on a per-hash class basis: all
+// NgenHash derived hashes will share the same definition. Note that we only care about the data size, and the
+// fact that it is an unsigned integer value (so we can take a modulus for bucket computation and use bitwise
+// equality checks). The base class does not care about or participate in how these hash values are calculated.
+typedef DWORD NgenHashValue;
+
+// The following code (and code in NgenHash.inl) has to replicate the base class template parameters (and in
+// some cases the arguments) many many times. In the interests of brevity (and to make it a whole lot easier
+// to modify these parameters in the future) we define macro shorthands for them here. Scan through the code
+// to see how these are used.
+#define NGEN_HASH_PARAMS typename FINAL_CLASS, typename VALUE, int SCALE_FACTOR
+#define NGEN_HASH_ARGS FINAL_CLASS, VALUE, SCALE_FACTOR
+
+// Forward definition of NgenHashEntryRef (it takes the same template parameters as NgenHash and simplifies
+// hash entries that need to refer to other hash entries).
+template <NGEN_HASH_PARAMS>
+class NgenHashEntryRef;
+
+// The base hash class itself. It's abstract and exposes its functionality via protected members (nothing is
+// public).
+template <NGEN_HASH_PARAMS>
+class NgenHashTable
+{
+ // NgenHashEntryRef needs access to the base table internal during Fixup in order to compute zap node
+ // bases.
+ friend class NgenHashEntryRef<NGEN_HASH_ARGS>;
+
+#ifdef DACCESS_COMPILE
+ // Nidump knows how to walk this data structure.
+ friend class NativeImageDumper;
+#endif
+#ifdef BINDER
+ template<class HashEntry, class HashTableType> friend class NGenHashTableBuilder;
+ friend class MdilModule;
+#endif
+
+protected:
+ // This opaque structure provides enumeration context when walking the set of entries which share a common
+ // hash code. Initialized by BaseFindFirstEntryByHash and read/updated by BaseFindNextEntryByHash.
+ class LookupContext
+ {
+ friend class NgenHashTable<NGEN_HASH_ARGS>;
+
+ TADDR m_pEntry; // The entry the caller is currently looking at (or NULL to begin
+ // with). This is a VolatileEntry* or PersistedEntry* (depending on
+ // m_eType below) and should always be a target address not a DAC
+ // PTR_.
+ DWORD m_eType; // The entry types we're currently walking (Hot, Warm, Cold in that order)
+ DWORD m_cRemainingEntries; // The remaining entries in the bucket chain (Hot or Cold entries only)
+ };
+
+ // This opaque structure provides enumeration context when walking all entries in the table. Initialized
+ // by BaseInitIterator and updated via the BaseIterator::Next. Note that this structure is somewhat
+ // similar to LookupContext above (though it requires a bit more state). It's possible we could factor
+ // these two iterators into some common base code but the actual implementations have enough differing
+ // requirements that the resultant code could be less readable (and slightly less performant).
+ class BaseIterator
+ {
+ public:
+ // Returns a pointer to the next entry in the hash table or NULL once all entries have been
+ // enumerated. Once NULL has been return the only legal operation is to re-initialize the iterator
+ // with BaseInitIterator.
+ DPTR(VALUE) Next();
+
+ private:
+ friend class NgenHashTable<NGEN_HASH_ARGS>;
+
+ NgenHashTable<NGEN_HASH_ARGS> *m_pTable; // Pointer back to the table being enumerated.
+ TADDR m_pEntry; // The entry the caller is currently looking at (or
+ // NULL to begin with). This is a VolatileEntry* or
+ // PersistedEntry* (depending on m_eType below) and
+ // should always be a target address not a DAC PTR_.
+ DWORD m_eType; // The entry types we're currently walking (Hot, Warm,
+ // Cold in that order).
+ union
+ {
+ DWORD m_dwBucket; // Index of bucket we're currently walking (Warm).
+ DWORD m_cRemainingEntries; // Number of entries remaining in hot/cold section
+ // (Hot, Cold).
+ };
+ };
+
+#ifndef DACCESS_COMPILE
+ // Base constructor. Call this from your derived constructor to provide the owning module, loader heap and
+ // initial number of buckets (which must be non-zero). Module must be provided if this hash is to be
+ // serialized into an ngen image. It is exposed to the derived hash class (many need it) but otherwise is
+ // only used to locate a loader heap for allocating bucket lists and entries unless an alternative heap is
+ // provided. Note that the heap provided is not serialized (so you'll allocate from that heap at
+ // ngen-time, but revert to allocating from the module's heap at runtime). If no Module pointer is
+ // supplied (non-ngen'd hash table) you must provide a direct heap pointer.
+ NgenHashTable(Module *pModule, LoaderHeap *pHeap, DWORD cInitialBuckets);
+
+ // Allocate an uninitialized entry for the hash table (it's not inserted). The AllocMemTracker is optional
+ // and may be specified as NULL for untracked allocations. This is split from the hash insertion logic so
+ // that callers can pre-allocate entries and then perform insertions which cannot fault.
+ VALUE *BaseAllocateEntry(AllocMemTracker *pamTracker);
+
+ // Insert an entry previously allocated via BaseAllocateEntry (you cannot allocated entries in any other
+ // manner) and associated with the given hash value. The entry should have been initialized prior to
+ // insertion.
+ void BaseInsertEntry(NgenHashValue iHash, VALUE *pEntry);
+#endif // !DACCESS_COMPILE
+
+ // Return the number of entries held in the table (does not include entries allocated but not inserted
+ // yet).
+ DWORD BaseGetElementCount();
+
+ // Initializes the iterator context passed by the caller to make it ready to walk every entry in the table
+ // in an arbitrary order. Call pIterator->Next() to retrieve the first entry.
+ void BaseInitIterator(BaseIterator *pIterator);
+
+ // Find first entry matching a given hash value (returns NULL on no match). Call BaseFindNextEntryByHash
+ // to iterate the remaining matches (until it returns NULL). The LookupContext supplied by the caller is
+ // initialized by BaseFindFirstEntryByHash and read/updated by BaseFindNextEntryByHash to keep track of
+ // where we are.
+ DPTR(VALUE) BaseFindFirstEntryByHash(NgenHashValue iHash, LookupContext *pContext);
+ DPTR(VALUE) BaseFindNextEntryByHash(LookupContext *pContext);
+
+#if defined(FEATURE_PREJIT) && !defined(DACCESS_COMPILE)
+ // Call during ngen to save hash table data structures into the ngen image. Calls derived-class
+ // implementations of ShouldSave to determine which entries should be serialized, IsHotEntry to hot/cold
+ // split the entries and SaveEntry to allow per-entry extension of the saving process.
+ void BaseSave(DataImage *pImage, CorProfileData *pProfileData);
+
+ // Call during ngen to register fixups for hash table data structure fields. Calls derived-class
+ // implementation of FixupEntry to allow per-entry extension of the fixup process.
+ void BaseFixup(DataImage *pImage);
+
+ // Opaque structure used to store state will BaseSave is re-arranging hash entries and also passed to
+ // sub-classes' SaveEntry method to facilitate mapping old entry addresses to their new locations.
+ class EntryMappingTable
+ {
+ public:
+ ~EntryMappingTable();
+
+ // Given an old entry address (pre-BaseSave) return the address of the entry relocated ready for
+ // saving to disk. Note that this address is the (ngen) runtime address, not the disk image address
+ // you can further obtain by calling DataImage::GetImagePointer().
+ VALUE *GetNewEntryAddress(VALUE *pOldEntry);
+
+ private:
+ friend class NgenHashTable<NGEN_HASH_ARGS>;
+
+ // Each Entry holds the mapping from one old-to-new hash entry.
+ struct Entry
+ {
+ VALUE *m_pOldEntry; // Pointer to the user part of the old entry
+ VALUE *m_pNewEntry; // Pointer to the user part of the new version
+ NgenHashValue m_iHashValue; // The hash code of the entry
+ DWORD m_dwNewBucket; // The new bucket index of the entry
+ DWORD m_dwChainOrdinal; // The 0-based position within the chain of that bucket
+ bool m_fHot; // If true this entry was identified as hot by the sub-class
+ };
+
+ Entry *m_pEntries; // Pointer to array of Entries
+ DWORD m_cEntries; // Count of valid entries in the above (may be smaller than
+ // allocated size)
+ };
+#endif // FEATURE_PREJIT && !DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+ // Call during DAC enumeration of memory regions to save in mini-dump to enumerate all hash table data
+ // structures. Calls derived-class implementation of EnumMemoryRegionsForEntry to allow additional
+ // per-entry memory to be reported.
+ void BaseEnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif // DACCESS_COMPILE
+
+ // Owning module set at hash creation time (possibly NULL if this hash instance is not to be ngen'd).
+ PTR_Module m_pModule;
+
+private:
+ // Internal implementation details. Nothing of interest to sub-classers for here on.
+
+#ifdef FEATURE_PREJIT
+ // This is the format of each hash entry that is persisted to disk (Hot or Cold entries).
+ struct PersistedEntry
+ {
+ VALUE m_sValue; // The sub-class supplied entry layout
+ NgenHashValue m_iHashValue; // The hash code associated with the entry (comes after m_sValue to
+ // minimize chance of pad bytes on a 64-bit system).
+ };
+ typedef DPTR(PersistedEntry) PTR_PersistedEntry;
+ typedef ArrayDPTR(PersistedEntry) APTR_PersistedEntry;
+
+ // This class encapsulates a bucket list identifying chains of related persisted entries. It's compressed
+ // rather than being a simple array, hence the encapsulation.
+ // A bucket list represents a non-zero sequence of buckets, each bucket identified by a zero-based index.
+ // Each bucket holds the index of the entry at the start of the bucket chain and a count of entries in
+ // that chain. (In persisted form hash entries are collated into hot and cold entries which are then
+ // allocated in contiguous blocks: this allows entries to be identified by an index into the entry block).
+ // Buckets with zero entries have an undefined start index (and a zero count obviously).
+ class PersistedBucketList
+ {
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+#ifdef BINDER
+ template<class HashEntry, class HashTableType> friend class NGenHashTableBuilder;
+ friend class MdilModule;
+#endif
+
+ public:
+ // Allocate and initialize a new list with the given count of buckets and configured to hold no more
+ // than the given number of entries or have a bucket chain longer than the specified maximum. These
+ // two maximums allow the implementation to choose an optimal data format for the bucket list at
+ // runtime and are enforced by asserts in the debug build.
+ static PersistedBucketList *CreateList(DWORD cBuckets, DWORD cEntries, DWORD cMaxEntriesInBucket);
+
+ // For the given bucket set the index of the initial entry and the count of entries in the chain. If
+ // the count is zero the initial entry index is meaningless and ignored.
+ void SetBucket(DWORD dwIndex, DWORD dwFirstEntry, DWORD cEntries);
+
+ // Get the size in bytes of this entire bucket list (need to pass in the bucket count since we save
+ // space by not storing it here, but we do validate this in debug mode).
+ size_t GetSize(DWORD cBuckets);
+
+ // Get the initial entry index and entry count for the given bucket. Initial entry index value is
+ // undefined when count comes back as zero.
+ void GetBucket(DWORD dwIndex, DWORD *pdwFirstEntry, DWORD *pdwCount);
+
+ // Simplified initial entry index when you don't need the count (don't call this for buckets with zero
+ // entries).
+ DWORD GetInitialEntry(DWORD dwIndex);
+
+ private:
+ // Return the number of bits required to express a unique ID for the number of entities given.
+ static DWORD BitsRequired(DWORD cEntities);
+
+ // Return the minimum size (in bytes) of each bucket list entry that can express all buckets given the
+ // max count of entries and entries in a single bucket chain.
+ static DWORD GetBucketSize(DWORD cEntries, DWORD cMaxEntriesInBucket);
+
+ // Each bucket is represented by a variable sized bitfield (16, 32 or 64 bits) whose low-order bits
+ // contain the index of the first entry in the chain and higher-order (just above the initial entry
+ // bits) contain the count of entries in the chain.
+
+ DWORD m_cbBucket; // The size in bytes of each bucket descriptor (2, 4 or 8)
+ DWORD m_dwInitialEntryMask; // The bitmask used to extract the initial entry index from a bucket
+ DWORD m_dwEntryCountShift; // The bit shift used to extract the entry count from a bucket
+
+#ifdef _DEBUG
+ // In debug mode we remember more initial state to catch common errors.
+ DWORD m_cBuckets; // Number of buckets in the list
+ DWORD m_cEntries; // Total number of entries mapped by the list
+ DWORD m_cMaxEntriesInBucket; // Largest bucket chain in the list
+#endif
+ };
+ typedef DPTR(PersistedBucketList) PTR_PersistedBucketList;
+
+ // Pointers and counters for entries and buckets persisted to disk during ngen. Collected into a structure
+ // because this logic is replicated for Hot and Cold entries so we can factor some common code.
+ struct PersistedEntries
+ {
+ APTR_PersistedEntry m_pEntries; // Pointer to a contiguous block of PersistedEntry structures
+ // (NULL if zero entries)
+ PTR_PersistedBucketList m_pBuckets; // Pointer to abstracted bucket list mapping above entries
+ // into a hash (NULL if zero buckets, which is iff zero
+ // entries)
+ DWORD m_cEntries; // Count of entries in the above block
+ DWORD m_cBuckets; // Count of buckets in the above bucket list
+ };
+#endif // FEATURE_PREJIT
+
+ // This is the format of a Warm entry, defined for our purposes to be a non-persisted entry (i.e. those
+ // created at runtime or during the creation of the ngen image itself).
+ struct VolatileEntry;
+ typedef DPTR(struct VolatileEntry) PTR_VolatileEntry;
+ struct VolatileEntry
+ {
+ VALUE m_sValue; // The derived-class format of an entry
+ PTR_VolatileEntry m_pNextEntry; // Pointer to the next entry in the bucket chain (or NULL)
+ NgenHashValue m_iHashValue; // The hash value associated with the entry
+ };
+
+ // Types of hash entry.
+ enum EntryType
+ {
+ Cold, // Persisted, profiling suggests this data is not read typically
+ Warm, // Volatile (in-memory)
+ Hot // Persisted, profiling suggests this data is probably read (or no profiling data was available)
+ };
+
+#ifdef FEATURE_PREJIT
+ // Find the first persisted entry (hot or cold based on pEntries) that matches the given hash. Looks only
+ // in the persisted block given (i.e. searches only hot *or* cold entries). Returns NULL on failure.
+ // Otherwise returns pointer to the derived class portion of the entry and initializes the provided
+ // LookupContext to allow enumeration of any further matches.
+ DPTR(VALUE) FindPersistedEntryByHash(PersistedEntries *pEntries, NgenHashValue iHash, LookupContext *pContext);
+#endif // FEATURE_PREJIT
+
+ // Find the first volatile (warm) entry that matches the given hash. Looks only at warm entries. Returns
+ // NULL on failure. Otherwise returns pointer to the derived class portion of the entry and initializes
+ // the provided LookupContext to allow enumeration of any further matches.
+ DPTR(VALUE) FindVolatileEntryByHash(NgenHashValue iHash, LookupContext *pContext);
+
+#ifndef DACCESS_COMPILE
+ // Determine loader heap to be used for allocation of entries and bucket lists.
+ LoaderHeap *GetHeap();
+
+ // Increase the size of the bucket list in order to reduce the size of bucket chains. Does nothing on
+ // failure to allocate (since this impacts perf, not correctness).
+ void GrowTable();
+
+ // Returns the next prime larger (or equal to) than the number given.
+ DWORD NextLargestPrime(DWORD dwNumber);
+#endif // !DACCESS_COMPILE
+
+ // Loader heap provided at construction time. May be NULL (in which case m_pModule must *not* be NULL).
+ LoaderHeap *m_pHeap;
+
+ // Fields related to the runtime (volatile or warm) part of the hash.
+ DPTR(PTR_VolatileEntry) m_pWarmBuckets; // Pointer to a simple bucket list (array of VolatileEntry pointers)
+ DWORD m_cWarmBuckets; // Count of buckets in the above array (always non-zero)
+ DWORD m_cWarmEntries; // Count of elements in the warm section of the hash
+
+#ifdef FEATURE_PREJIT
+ PersistedEntries m_sHotEntries; // Hot persisted hash entries (if any)
+ PersistedEntries m_sColdEntries; // Cold persisted hash entries (if any)
+
+ DWORD m_cInitialBuckets; // Initial number of warm buckets we started with. Only used
+ // to reset warm bucket count in ngen-persisted table.
+#endif // FEATURE_PREJIT
+};
+
+// Abstraction around cross-hash entry references (e.g. EEClassHashTable, where entries for nested types point
+// to entries for their enclosing types). Under the covers we use a relative pointer which avoids the need to
+// allocate a base relocation fixup and the resulting write into the entry at load time. The abstraction hides
+// some of the complexity needed to achieve this.
+template <NGEN_HASH_PARAMS>
+class NgenHashEntryRef
+{
+public:
+ // Get a pointer to the referenced entry.
+ DPTR(VALUE) Get();
+
+#ifndef DACCESS_COMPILE
+ // Set the reference to point to the given entry.
+ void Set(VALUE *pEntry);
+
+#ifdef FEATURE_PREJIT
+ // Call this during the ngen Fixup phase to adjust the relative pointer to account for ngen image layout.
+ void Fixup(DataImage *pImage, NgenHashTable<NGEN_HASH_ARGS> *pTable);
+#endif // FEATURE_PREJIT
+#endif // !DACCESS_COMPILE
+
+private:
+ RelativePointer<DPTR(VALUE)> m_rpEntryRef; // Entry ref encoded as a delta from this field's location.
+};
+
+#endif // __NGEN_HASH_INCLUDED
diff --git a/src/vm/ngenhash.inl b/src/vm/ngenhash.inl
new file mode 100644
index 0000000000..02e396e85a
--- /dev/null
+++ b/src/vm/ngenhash.inl
@@ -0,0 +1,1523 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+// Abstract base class implementation of a hash table suitable for efficient serialization into an ngen image.
+// See NgenHash.h for a more detailed description.
+//
+
+// Our implementation embeds entry data supplied by the hash sub-class into a larger entry structure
+// containing NgenHash metadata. We often end up returning pointers to the inner entry to sub-class code and
+// doing this in a DAC-friendly fashion involves some DAC gymnastics. The following couple of macros factor
+// those complexities out.
+#define VALUE_FROM_VOLATILE_ENTRY(_ptr) dac_cast<DPTR(VALUE)>(PTR_TO_MEMBER_TADDR(VolatileEntry, (_ptr), m_sValue))
+#define VALUE_FROM_PERSISTED_ENTRY(_ptr) dac_cast<DPTR(VALUE)>(PTR_TO_MEMBER_TADDR(PersistedEntry, (_ptr), m_sValue))
+
+// We provide a mechanism for the sub-class to extend per-entry operations via a callback mechanism where the
+// sub-class implements methods with a certain name and signature (details in the module header for
+// NgenHash.h). We could have used virtual methods, but this adds a needless indirection since all the details
+// are known statically. In order to have a base class call a method defined only in a sub-class however we
+// need a little pointer trickery. The following macro hides that.
+#define DOWNCALL(_method) ((FINAL_CLASS*)this)->_method
+
+#ifndef DACCESS_COMPILE
+
+// Base constructor. Call this from your derived constructor to provide the owning module, loader heap and
+// initial number of buckets (which must be non-zero). Module must be provided if this hash is to be
+// serialized into an ngen image. It is exposed to the derived hash class (many need it) but otherwise is only
+// used to locate a loader heap for allocating bucket lists and entries unless an alternative heap is
+// provided. Note that the heap provided is not serialized (so you'll allocate from that heap at ngen-time,
+// but revert to allocating from the module's heap at runtime). If no Module pointer is supplied (non-ngen'd
+// hash table) you must provide a direct heap pointer.
+template <NGEN_HASH_PARAMS>
+NgenHashTable<NGEN_HASH_ARGS>::NgenHashTable(Module *pModule, LoaderHeap *pHeap, DWORD cInitialBuckets)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // An invariant in the code is that we always have a non-zero number of warm buckets.
+ _ASSERTE(cInitialBuckets > 0);
+
+ // At least one of module or heap must have been specified or we won't know how to allocate entries and
+ // buckets.
+ _ASSERTE(pModule || pHeap);
+ m_pModule = pModule;
+ m_pHeap = pHeap;
+
+ S_SIZE_T cbBuckets = S_SIZE_T(sizeof(VolatileEntry*)) * S_SIZE_T(cInitialBuckets);
+
+ m_cWarmEntries = 0;
+ m_cWarmBuckets = cInitialBuckets;
+ m_pWarmBuckets = (PTR_VolatileEntry*)(void*)GetHeap()->AllocMem(cbBuckets);
+
+ // Note: Memory allocated on loader heap is zero filled
+ // memset(m_pWarmBuckets, 0, sizeof(VolatileEntry*) * cInitialBuckets);
+
+#ifdef FEATURE_PREJIT
+ memset(&m_sHotEntries, 0, sizeof(PersistedEntries));
+ memset(&m_sColdEntries, 0, sizeof(PersistedEntries));
+ m_cInitialBuckets = cInitialBuckets;
+#endif // FEATURE_PREJIT
+}
+
+// Allocate an uninitialized entry for the hash table (it's not inserted). The AllocMemTracker is optional and
+// may be specified as NULL for untracked allocations. This is split from the hash insertion logic so that
+// callers can pre-allocate entries and then perform insertions which cannot fault.
+template <NGEN_HASH_PARAMS>
+VALUE *NgenHashTable<NGEN_HASH_ARGS>::BaseAllocateEntry(AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Faults are forbidden in BaseInsertEntry. Make the table writeable now that the faults are still allowed.
+ EnsureWritablePages(this);
+ EnsureWritablePages(this->m_pWarmBuckets, m_cWarmBuckets * sizeof(PTR_VolatileEntry));
+
+ TaggedMemAllocPtr pMemory = GetHeap()->AllocMem(S_SIZE_T(sizeof(VolatileEntry)));
+
+ VolatileEntry *pEntry;
+ if (pamTracker)
+ pEntry = (VolatileEntry*)pamTracker->Track(pMemory);
+ else
+ pEntry = pMemory.cast<VolatileEntry*>();
+
+#ifdef _DEBUG
+ // In debug builds try and catch cases where code attempts to use entries not allocated via this method.
+ pEntry->m_pNextEntry = (VolatileEntry*)0x12345678;
+#endif
+
+ return &pEntry->m_sValue;
+}
+
+// Determine loader heap to be used for allocation of entries and bucket lists.
+template <NGEN_HASH_PARAMS>
+LoaderHeap *NgenHashTable<NGEN_HASH_ARGS>::GetHeap()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Explicitly provided heap takes priority.
+ if (m_pHeap)
+ return m_pHeap;
+
+ // If not specified then we fall back to the owning module's heap (a module must have been specified in
+ // this case).
+ _ASSERTE(m_pModule != NULL);
+ return m_pModule->GetAssembly()->GetLowFrequencyHeap();
+}
+
+// Insert an entry previously allocated via BaseAllocateEntry (you cannot allocated entries in any other
+// manner) and associated with the given hash value. The entry should have been initialized prior to
+// insertion.
+template <NGEN_HASH_PARAMS>
+void NgenHashTable<NGEN_HASH_ARGS>::BaseInsertEntry(NgenHashValue iHash, VALUE *pEntry)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // We are always guaranteed at least one warm bucket (which is important here: some hash table sub-classes
+ // require entry insertion to be fault free).
+ _ASSERTE(m_cWarmBuckets > 0);
+
+ // Recover the volatile entry pointer from the sub-class entry pointer passed to us. In debug builds
+ // attempt to validate that this transform is really valid and the caller didn't attempt to allocate the
+ // entry via some other means than BaseAllocateEntry().
+ PTR_VolatileEntry pVolatileEntry = (PTR_VolatileEntry)((BYTE*)pEntry - offsetof(VolatileEntry, m_sValue));
+ _ASSERTE(pVolatileEntry->m_pNextEntry == (VolatileEntry*)0x12345678);
+
+ // Remember the entry hash code.
+ pVolatileEntry->m_iHashValue = iHash;
+
+ // Compute which bucket the entry belongs in based on the hash.
+ DWORD dwBucket = iHash % m_cWarmBuckets;
+
+ // Prepare to link the new entry at the head of the bucket chain.
+ pVolatileEntry->m_pNextEntry = m_pWarmBuckets[dwBucket];
+
+ // Make sure that all writes to the entry are visible before publishing the entry.
+ MemoryBarrier();
+
+ // Publish the entry by pointing the bucket at it.
+ m_pWarmBuckets[dwBucket] = pVolatileEntry;
+
+ m_cWarmEntries++;
+
+ // If the insertion pushed the table load over our limit then attempt to grow the bucket list. Note that
+ // we ignore any failure (this is a performance operation and is not required for correctness).
+ if (m_cWarmEntries > (2 * m_cWarmBuckets))
+ GrowTable();
+}
+
+// Increase the size of the bucket list in order to reduce the size of bucket chains. Does nothing on failure
+// to allocate (since this impacts perf, not correctness).
+template <NGEN_HASH_PARAMS>
+void NgenHashTable<NGEN_HASH_ARGS>::GrowTable()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // If we can't increase the number of buckets, we lose perf but not correctness. So we won't report this
+ // error to our caller.
+ FAULT_NOT_FATAL();
+
+ // Make the new bucket table larger by the scale factor requested by the subclass (but also prime).
+ DWORD cNewBuckets = NextLargestPrime(m_cWarmBuckets * SCALE_FACTOR);
+ S_SIZE_T cbNewBuckets = S_SIZE_T(cNewBuckets) * S_SIZE_T(sizeof(PTR_VolatileEntry));
+ PTR_VolatileEntry *pNewBuckets = (PTR_VolatileEntry*)(void*)GetHeap()->AllocMem_NoThrow(cbNewBuckets);
+ if (!pNewBuckets)
+ return;
+
+ // All buckets are initially empty.
+ // Note: Memory allocated on loader heap is zero filled
+ // memset(pNewBuckets, 0, cNewBuckets * sizeof(PTR_VolatileEntry));
+
+ // Run through the old table and transfer all the entries. Be sure not to mess with the integrity of the
+ // old table while we are doing this, as there can be concurrent readers! Note that it is OK if the
+ // concurrent reader misses out on a match, though - they will have to acquire the lock on a miss & try
+ // again.
+ for (DWORD i = 0; i < m_cWarmBuckets; i++)
+ {
+ PTR_VolatileEntry pEntry = m_pWarmBuckets[i];
+
+ // Try to lock out readers from scanning this bucket. This is obviously a race which may fail.
+ // However, note that it's OK if somebody is already in the list - it's OK if we mess with the bucket
+ // groups, as long as we don't destroy anything. The lookup function will still do appropriate
+ // comparison even if it wanders aimlessly amongst entries while we are rearranging things. If a
+ // lookup finds a match under those circumstances, great. If not, they will have to acquire the lock &
+ // try again anyway.
+ m_pWarmBuckets[i] = NULL;
+
+ while (pEntry != NULL)
+ {
+ DWORD dwNewBucket = pEntry->m_iHashValue % cNewBuckets;
+ PTR_VolatileEntry pNextEntry = pEntry->m_pNextEntry;
+
+ pEntry->m_pNextEntry = pNewBuckets[dwNewBucket];
+ pNewBuckets[dwNewBucket] = pEntry;
+
+ pEntry = pNextEntry;
+ }
+ }
+
+ // Make sure that all writes are visible before publishing the new array.
+ MemoryBarrier();
+ m_pWarmBuckets = pNewBuckets;
+
+ // The new number of buckets has to be published last (prior to this readers may miscalculate a bucket
+ // index, but the result will always be in range and they'll simply walk the wrong chain and get a miss,
+ // prompting a retry under the lock). If we let the count become visible unordered wrt to the bucket array
+ // itself a reader could potentially read buckets from beyond the end of the old bucket list).
+ MemoryBarrier();
+ m_cWarmBuckets = cNewBuckets;
+}
+
+// Returns the next prime larger (or equal to) than the number given.
+template <NGEN_HASH_PARAMS>
+DWORD NgenHashTable<NGEN_HASH_ARGS>::NextLargestPrime(DWORD dwNumber)
+{
+ for (DWORD i = 0; i < COUNTOF(g_rgPrimes); i++)
+ if (g_rgPrimes[i] >= dwNumber)
+ {
+ dwNumber = g_rgPrimes[i];
+ break;
+ }
+
+ return dwNumber;
+}
+#endif // !DACCESS_COMPILE
+
+// Return the number of entries held in the table (does not include entries allocated but not inserted yet).
+template <NGEN_HASH_PARAMS>
+DWORD NgenHashTable<NGEN_HASH_ARGS>::BaseGetElementCount()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return m_cWarmEntries
+#ifdef FEATURE_PREJIT
+ + m_sHotEntries.m_cEntries + m_sColdEntries.m_cEntries
+#endif
+ ;
+}
+
+// Find first entry matching a given hash value (returns NULL on no match). Call BaseFindNextEntryByHash to
+// iterate the remaining matches (until it returns NULL). The LookupContext supplied by the caller is
+// initialized by BaseFindFirstEntryByHash and read/updated by BaseFindNextEntryByHash to keep track of where
+// we are.
+template <NGEN_HASH_PARAMS>
+DPTR(VALUE) NgenHashTable<NGEN_HASH_ARGS>::BaseFindFirstEntryByHash(NgenHashValue iHash, LookupContext *pContext)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ PRECONDITION(CheckPointer(pContext));
+ }
+ CONTRACTL_END;
+
+ DPTR(VALUE) pEntry;
+
+#ifdef FEATURE_PREJIT
+ // Look in the hot entries first.
+ pEntry = FindPersistedEntryByHash(&m_sHotEntries, iHash, pContext);
+ if (pEntry)
+ return pEntry;
+#endif // FEATURE_PREJIT
+
+ // Then the warm entries.
+ pEntry = FindVolatileEntryByHash(iHash, pContext);
+ if (pEntry)
+ return pEntry;
+
+#ifdef FEATURE_PREJIT
+ // And finally the cold entries.
+ return FindPersistedEntryByHash(&m_sColdEntries, iHash, pContext);
+#else // FEATURE_PREJIT
+ return NULL;
+#endif // FEATURE_PREJIT
+}
+
+// Find first entry matching a given hash value (returns NULL on no match). Call BaseFindNextEntryByHash to
+// iterate the remaining matches (until it returns NULL). The LookupContext supplied by the caller is
+// initialized by BaseFindFirstEntryByHash and read/updated by BaseFindNextEntryByHash to keep track of where
+// we are.
+template <NGEN_HASH_PARAMS>
+DPTR(VALUE) NgenHashTable<NGEN_HASH_ARGS>::BaseFindNextEntryByHash(LookupContext *pContext)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ PRECONDITION(CheckPointer(pContext));
+ }
+ CONTRACTL_END;
+
+ NgenHashValue iHash;
+
+ switch (pContext->m_eType)
+ {
+#ifdef FEATURE_PREJIT
+ case Hot:
+ case Cold:
+ {
+ // Fetch the entry we were looking at last from the context and remember the corresponding hash code.
+ PTR_PersistedEntry pPersistedEntry = dac_cast<PTR_PersistedEntry>(pContext->m_pEntry);
+ iHash = pPersistedEntry->m_iHashValue;
+
+ // Iterate while there are still entries left in the bucket chain.
+ while (pContext->m_cRemainingEntries)
+ {
+ // Advance to next entry, reducing the number of entries left to scan.
+ pPersistedEntry++;
+ pContext->m_cRemainingEntries--;
+
+ if (pPersistedEntry->m_iHashValue == iHash)
+ {
+ // Found a match on hash code. Update our find context to indicate where we got to and return
+ // a pointer to the sub-class portion of the entry.
+ pContext->m_pEntry = dac_cast<TADDR>(pPersistedEntry);
+ return VALUE_FROM_PERSISTED_ENTRY(pPersistedEntry);
+ }
+ }
+
+ // We didn't find a match.
+ if (pContext->m_eType == Hot)
+ {
+ // If we were searching the hot entries then we should try the warm entries next.
+ DPTR(VALUE) pNext = FindVolatileEntryByHash(iHash, pContext);
+ if (pNext)
+ return pNext;
+
+ // If that didn't work try the cold entries.
+ return FindPersistedEntryByHash(&m_sColdEntries, iHash, pContext);
+ }
+
+ // We were already searching cold entries, a failure here means the entry is not in the table.
+ return NULL;
+ }
+#endif // FEATURE_PREJIT
+
+ case Warm:
+ {
+ // Fetch the entry we were looking at last from the context and remember the corresponding hash code.
+ PTR_VolatileEntry pVolatileEntry = dac_cast<PTR_VolatileEntry>(pContext->m_pEntry);
+ iHash = pVolatileEntry->m_iHashValue;
+
+ // Iterate over the bucket chain.
+ while (pVolatileEntry->m_pNextEntry)
+ {
+ // Advance to the next entry.
+ pVolatileEntry = pVolatileEntry->m_pNextEntry;
+ if (pVolatileEntry->m_iHashValue == iHash)
+ {
+ // Found a match on hash code. Update our find context to indicate where we got to and return
+ // a pointer to the sub-class portion of the entry.
+ pContext->m_pEntry = dac_cast<TADDR>(pVolatileEntry);
+ return VALUE_FROM_VOLATILE_ENTRY(pVolatileEntry);
+ }
+ }
+
+ // We didn't find a match, fall through to the cold entries.
+#ifdef FEATURE_PREJIT
+ return FindPersistedEntryByHash(&m_sColdEntries, iHash, pContext);
+#else
+ return NULL;
+#endif
+ }
+
+ default:
+ _ASSERTE(!"Unknown NgenHashTable entry type");
+ return NULL;
+ }
+}
+
+#ifdef FEATURE_PREJIT
+
+// Allocate and initialize a new list with the given count of buckets and configured to hold no more than the
+// given number of entries or have a bucket chain longer than the specified maximum. These two maximums allow
+// the implementation to choose an optimal data format for the bucket list at runtime and are enforced by
+// asserts in the debug build.
+// static
+template <NGEN_HASH_PARAMS>
+typename NgenHashTable<NGEN_HASH_ARGS>::PersistedBucketList *NgenHashTable<NGEN_HASH_ARGS>::PersistedBucketList::CreateList(DWORD cBuckets, DWORD cEntries, DWORD cMaxEntriesInBucket)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // The size of each bucket depends on the number of entries we need to store and how big a bucket chain
+ // ever gets.
+ DWORD cbBucket = GetBucketSize(cEntries, cMaxEntriesInBucket);
+
+ // Allocate enough memory to store the bucket list header and bucket array.
+ S_SIZE_T cbBuckets = S_SIZE_T(sizeof(PersistedBucketList)) + (S_SIZE_T(cbBucket) * S_SIZE_T(cBuckets));
+ if (cbBuckets.IsOverflow())
+ COMPlusThrowOM();
+ PersistedBucketList *pBucketList = (PersistedBucketList*)(new BYTE[cbBuckets.Value()]);
+
+#ifdef _DEBUG
+ // In debug builds we store all the input parameters to validate subsequent requests. In retail none of
+ // this data is needed.
+ pBucketList->m_cBuckets = cBuckets;
+ pBucketList->m_cEntries = cEntries;
+ pBucketList->m_cMaxEntriesInBucket = cMaxEntriesInBucket;
+#endif // _DEBUG
+
+ pBucketList->m_cbBucket = cbBucket;
+ pBucketList->m_dwEntryCountShift = BitsRequired(cEntries);
+ pBucketList->m_dwInitialEntryMask = (1 << pBucketList->m_dwEntryCountShift) - 1;
+
+ // Zero all the buckets (empty all the bucket chains).
+ memset(pBucketList + 1, 0, cBuckets * cbBucket);
+
+ return pBucketList;
+}
+
+// Get the size in bytes of this entire bucket list (need to pass in the bucket count since we save space by
+// not storing it here, but we do validate this in debug mode).
+template <NGEN_HASH_PARAMS>
+size_t NgenHashTable<NGEN_HASH_ARGS>::PersistedBucketList::GetSize(DWORD cBuckets)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE(cBuckets == m_cBuckets);
+ return sizeof(PersistedBucketList) + (cBuckets * m_cbBucket);
+}
+
+// Get the initial entry index and entry count for the given bucket. Initial entry index value is undefined
+// when count comes back as zero.
+template <NGEN_HASH_PARAMS>
+void NgenHashTable<NGEN_HASH_ARGS>::PersistedBucketList::GetBucket(DWORD dwIndex, DWORD *pdwFirstEntry, DWORD *pdwCount)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(dwIndex < m_cBuckets);
+
+ // Find the start of the bucket we're interested in based on the index and size chosen for buckets in this
+ // list instance.
+ TADDR pBucket = dac_cast<TADDR>(this) + sizeof(PersistedBucketList) + (dwIndex * m_cbBucket);
+
+ // Handle each format of bucket separately. In all cases read the correct number of bytes to form one
+ // bitfield, extract the low order bits to retrieve the initial entry index and shift down the remaining
+ // bits to obtain the entry count.
+ switch (m_cbBucket)
+ {
+ case 2:
+ {
+ _ASSERTE(m_dwEntryCountShift < 16 && m_dwInitialEntryMask < 0xffff);
+
+ WORD wBucketContents = *dac_cast<PTR_WORD>(pBucket);
+
+ *pdwFirstEntry = wBucketContents & m_dwInitialEntryMask;
+ *pdwCount = wBucketContents >> m_dwEntryCountShift;
+
+ break;
+ }
+
+ case 4:
+ {
+ _ASSERTE(m_dwEntryCountShift < 32 && m_dwInitialEntryMask < 0xffffffff);
+
+ DWORD dwBucketContents = *dac_cast<PTR_DWORD>(pBucket);
+
+ *pdwFirstEntry = dwBucketContents & m_dwInitialEntryMask;
+ *pdwCount = dwBucketContents >> m_dwEntryCountShift;
+
+ break;
+ }
+
+ case 8:
+ {
+ _ASSERTE(m_dwEntryCountShift < 64);
+
+ ULONG64 qwBucketContents = *dac_cast<PTR_ULONG64>(pBucket);
+
+ *pdwFirstEntry = (DWORD)(qwBucketContents & m_dwInitialEntryMask);
+ *pdwCount = (DWORD)(qwBucketContents >> m_dwEntryCountShift);
+
+ break;
+ }
+
+ default:
+#ifdef DACCESS_COMPILE
+ // Minidumps don't guarantee this will work - memory may not have been dumped, target corrupted, etc.
+ *pdwFirstEntry = 0;
+ *pdwCount = 0;
+#else
+ _ASSERTE(!"Invalid bucket list bucket size");
+#endif
+ }
+
+ _ASSERTE((*pdwFirstEntry < m_cEntries) || (*pdwCount == 0));
+ _ASSERTE(*pdwCount <= m_cMaxEntriesInBucket);
+}
+
+// Simplified initial entry index when you don't need the count (don't call this for buckets with zero
+// entries).
+template <NGEN_HASH_PARAMS>
+DWORD NgenHashTable<NGEN_HASH_ARGS>::PersistedBucketList::GetInitialEntry(DWORD dwIndex)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ DWORD dwInitialEntry, dwEntryCount;
+ GetBucket(dwIndex, &dwInitialEntry, &dwEntryCount);
+
+ _ASSERTE(dwEntryCount > 0);
+
+ return dwInitialEntry;
+}
+
+// For the given bucket set the index of the initial entry and the count of entries in the chain. If the count
+// is zero the initial entry index is meaningless and ignored.
+template <NGEN_HASH_PARAMS>
+void NgenHashTable<NGEN_HASH_ARGS>::PersistedBucketList::SetBucket(DWORD dwIndex, DWORD dwFirstEntry, DWORD cEntries)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(dwIndex < m_cBuckets);
+ _ASSERTE(cEntries <= m_cMaxEntriesInBucket);
+ if (cEntries > 0)
+ {
+ _ASSERTE(dwFirstEntry < m_cEntries);
+ _ASSERTE(dwFirstEntry <= m_dwInitialEntryMask);
+ }
+
+ // Find the start of the bucket we're interested in based on the index and size chosen for buckets in this
+ // list instance.
+ BYTE *pbBucket = (BYTE*)this + sizeof(PersistedBucketList) + (dwIndex * m_cbBucket);
+
+ // Handle each format of bucket separately. In all cases form a single bitfield with low-order bits
+ // specifying the initial entry index and higher bits containing the entry count. Write this into the
+ // bucket entry using the correct number of bytes.
+ ULONG64 qwBucketBits = dwFirstEntry | (cEntries << m_dwEntryCountShift);
+ switch (m_cbBucket)
+ {
+ case 2:
+ {
+ _ASSERTE(m_dwEntryCountShift < 16 && m_dwInitialEntryMask < 0xffff);
+ *(WORD*)pbBucket = (WORD)qwBucketBits;
+ break;
+ }
+
+ case 4:
+ {
+ _ASSERTE(m_dwEntryCountShift < 32 && m_dwInitialEntryMask < 0xffffffff);
+ *(DWORD*)pbBucket = (DWORD)qwBucketBits;
+ break;
+ }
+
+ case 8:
+ {
+ _ASSERTE(m_dwEntryCountShift < 64);
+ *(ULONG64*)pbBucket = qwBucketBits;
+ break;
+ }
+
+ default:
+ _ASSERTE(!"Invalid bucket list bucket size");
+ }
+}
+
+// Return the number of bits required to express a unique ID for the number of entities given.
+//static
+template <NGEN_HASH_PARAMS>
+DWORD NgenHashTable<NGEN_HASH_ARGS>::PersistedBucketList::BitsRequired(DWORD cEntities)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Starting with a bit-mask of the most significant bit and iterating over masks for successively less
+ // significant bits, stop as soon as the mask co-incides with a set bit in the value. Simultaneously we're
+ // counting down the bits required to express the range of values implied by seeing the corresponding bit
+ // set in the value (e.g. when we're testing the high bit we know we'd need 32-bits to encode the range of
+ // values that have this bit set). Stop when we get to one bit (we never return 0 bits required, even for
+ // an input value of 0).
+ DWORD dwMask = 0x80000000;
+ DWORD cBits = 32;
+ while (cBits > 1)
+ {
+ if (cEntities & dwMask)
+ return cBits;
+
+ dwMask >>= 1;
+ cBits--;
+ }
+
+ return 1;
+}
+
+// Return the minimum size (in bytes) of each bucket list entry that can express all buckets given the max
+// count of entries and entries in a single bucket chain.
+// static
+template <NGEN_HASH_PARAMS>
+DWORD NgenHashTable<NGEN_HASH_ARGS>::PersistedBucketList::GetBucketSize(DWORD cEntries, DWORD cMaxEntriesInBucket)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // We need enough bits to express a start entry index (related to the total number of entries in the
+ // table) and a chain count (so take the maximum chain length into consideration).
+ DWORD cTotalBits = BitsRequired(cEntries) + BitsRequired(cMaxEntriesInBucket);
+
+ // Rather than support complete flexibility (an arbitrary number of bytes to express the combination of
+ // the two bitfields above) we'll just pull out the most useful selection (which simplifies the access
+ // code and potentially might give us a perf edge over the more generalized algorithm).
+
+ // We want naturally aligned bucket entries for access perf, 1 byte entries aren't all that interesting
+ // (most tables won't be small enough to be expressed this way and those that are won't get much benefit
+ // from the extra compression of the bucket list). We also don't believe we'll ever need more than 64
+ // bits. This leaves us with 2, 4 and 8 byte entries. The tables in the current desktop CLR for mscorlib
+ // will fit in the 2-byte category and will give us substantial space saving over the naive implementation
+ // of a bucket with two DWORDs.
+
+ if (cTotalBits <= 16)
+ return 2;
+
+ if (cTotalBits <= 32)
+ return 4;
+
+ // Invariant guaranteed by BitsRequired above.
+ _ASSERTE(cTotalBits <= 64);
+ return 8;
+}
+
+#ifndef DACCESS_COMPILE
+
+// Call during ngen to save hash table data structures into the ngen image. Calls derived-class
+// implementations of ShouldSave to determine which entries should be serialized, IsHotEntry to hot/cold split
+// the entries and SaveEntry to allow per-entry extension of the saving process.
+template <NGEN_HASH_PARAMS>
+void NgenHashTable<NGEN_HASH_ARGS>::BaseSave(DataImage *pImage, CorProfileData *pProfileData)
+{
+ STANDARD_VM_CONTRACT;
+
+ // This is a fairly long and complex process but at its heart it's fairly linear. We perform multiple
+ // passes over the data in sequence which might seem slow but everything is arranged to avoid any O(N^2)
+ // algorithms.
+
+ // Persisted hashes had better have supplied an owning module at creation time (otherwise we won't know
+ // how to find a loader heap for further allocations at runtime: we don't know how to serialize a loader
+ // heap pointer).
+ _ASSERTE(m_pModule != NULL);
+
+ // We can only save once during ngen so the hot and cold sections of the hash cannot have been populated
+ // yet.
+ _ASSERTE(m_sHotEntries.m_cEntries == 0 && m_sColdEntries.m_cEntries == 0);
+
+ DWORD i;
+
+ // As we re-arrange volatile warm entries into hot and cold sets of persisted entries we need to keep lots
+ // of intermediate tracking information. We also need to provide a subset of this mapping information to
+ // the sub-class (so it can fix up cross entry references for example). The temporary structure allocated
+ // below performs that function (it will be destructed automatically at the end of this method).
+ EntryMappingTable sEntryMap;
+ sEntryMap.m_cEntries = m_cWarmEntries;
+#ifdef _PREFAST_
+#pragma warning(suppress:6211) // Suppress bogus prefast warning about memory leak (EntryMappingTable acts as a holder)
+#endif
+
+ // The 'typename' keyword shouldn't be necessary, but g++ gets confused without it.
+ sEntryMap.m_pEntries = new typename EntryMappingTable::Entry[m_cWarmEntries];
+
+ //
+ // PHASE 1
+ //
+ // Iterate all the current warm entries, ask the sub-class which of them should be saved into the image
+ // and of those which are hot and which are cold.
+ //
+
+ DWORD cHotEntries = 0;
+ DWORD cColdEntries = 0;
+
+ // Visit each warm bucket.
+ for (i = 0; i < m_cWarmBuckets; i++)
+ {
+ // Iterate through the chain of warm entries for this bucket.
+ VolatileEntry *pOldEntry = m_pWarmBuckets[i];
+ while (pOldEntry)
+ {
+ // Is the current entry being saved into the image?
+ if (DOWNCALL(ShouldSave)(pImage, &pOldEntry->m_sValue))
+ {
+ // Yes, so save the details into the next available slot in the entry map. At this stage we
+ // know the original entry address, the hash value and whether the entry is hot or cold.
+ DWORD dwCurrentEntry = cHotEntries + cColdEntries;
+ sEntryMap.m_pEntries[dwCurrentEntry].m_pOldEntry = &pOldEntry->m_sValue;
+ sEntryMap.m_pEntries[dwCurrentEntry].m_iHashValue = pOldEntry->m_iHashValue;
+
+ // Is the entry hot? When given no profile data we assume cold.
+ if (pProfileData != NULL && DOWNCALL(IsHotEntry)(&pOldEntry->m_sValue, pProfileData))
+ {
+ cHotEntries++;
+ sEntryMap.m_pEntries[dwCurrentEntry].m_fHot = true;
+ }
+ else
+ {
+ cColdEntries++;
+ sEntryMap.m_pEntries[dwCurrentEntry].m_fHot = false;
+ }
+ }
+
+ pOldEntry = pOldEntry->m_pNextEntry;
+ }
+ }
+
+ // Set size of the entry map based on the real number of entries we're going to save.
+ _ASSERTE((cHotEntries + cColdEntries) <= m_cWarmEntries);
+ sEntryMap.m_cEntries = cHotEntries + cColdEntries;
+
+ //
+ // PHASE 2
+ //
+ // Determine the layout of the new hot and cold tables (if applicable). We pick new bucket list sizes
+ // based on the number of entries to go in each table and from that we can calculate the length of each
+ // entry chain off each bucket (which is important both to derive a maximum chain length used when
+ // picking an optimized encoding for the bucket list and allows us to layout the new entries in linear
+ // time).
+ //
+ // We need a couple of extra arrays to track bucket chain sizes until we have enough info to allocate the
+ // new bucket lists themselves.
+ //
+
+ // We'll allocate half as many buckets as entries (with at least 1 bucket, or zero if there are no entries
+ // in this section of the hash).
+ DWORD cHotBuckets = cHotEntries ? NextLargestPrime(cHotEntries / 2) : 0;
+ DWORD cColdBuckets = cColdEntries ? NextLargestPrime(cColdEntries / 2) : 0;
+
+ // Allocate arrays to track bucket chain lengths for each hot or cold bucket list (as needed).
+ DWORD *pHotBucketSizes = cHotBuckets ? new DWORD[cHotBuckets] : NULL;
+ memset(pHotBucketSizes, 0, cHotBuckets * sizeof(DWORD));
+
+ DWORD *pColdBucketSizes = cColdBuckets ? new DWORD[cColdBuckets] : NULL;
+ memset(pColdBucketSizes, 0, cColdBuckets * sizeof(DWORD));
+
+ // We'll calculate the maximum bucket chain length separately for hot and cold sections (each has its own
+ // bucket list that might be optimized differently).
+ DWORD cMaxHotChain = 0;
+ DWORD cMaxColdChain = 0;
+
+ // Iterate through all the entries to be saved (linear scan through the entry map we built in phase 1).
+ for (i = 0; i < sEntryMap.m_cEntries; i++)
+ {
+ // The 'typename' keyword shouldn't be necessary, but g++ gets confused without it.
+ typename EntryMappingTable::Entry *pMapEntry = &sEntryMap.m_pEntries[i];
+
+ // For each entry calculate which bucket it will end up in under the revised bucket list. Also record
+ // its order in the bucket chain (first come, first served). Recording this ordinal now is what allows
+ // us to lay out entries into their final order using a linear algorithm in a later phase.
+ if (pMapEntry->m_fHot)
+ {
+ pMapEntry->m_dwNewBucket = pMapEntry->m_iHashValue % cHotBuckets;
+ pMapEntry->m_dwChainOrdinal = pHotBucketSizes[pMapEntry->m_dwNewBucket]++;
+ if (pHotBucketSizes[pMapEntry->m_dwNewBucket] > cMaxHotChain)
+ cMaxHotChain = pHotBucketSizes[pMapEntry->m_dwNewBucket];
+ }
+ else
+ {
+ // The C++ compiler is currently complaining that cColdBuckets could be zero in the modulo
+ // operation below. It cannot due to the logic in this method (if we have a cold entry we'll have
+ // at least one cold bucket, see the assignments above) but the flow is far too complex for the
+ // C++ compiler to follow. Unfortunately it won't be told (the warning can't be disabled and even
+ // an __assume won't work) so we take the hit of generating the useless extra if below.
+ if (cColdBuckets > 0)
+ {
+ pMapEntry->m_dwNewBucket = pMapEntry->m_iHashValue % cColdBuckets;
+ pMapEntry->m_dwChainOrdinal = pColdBucketSizes[pMapEntry->m_dwNewBucket]++;
+ if (pColdBucketSizes[pMapEntry->m_dwNewBucket] > cMaxColdChain)
+ cMaxColdChain = pColdBucketSizes[pMapEntry->m_dwNewBucket];
+ }
+ else
+ _ASSERTE(!"Should be unreachable, see comment above");
+ }
+ }
+
+ //
+ // PHASE 3
+ //
+ // Allocate the new hot and cold bucket lists and entry arrays (as needed). The bucket lists have
+ // optimized layout based on knowledge of the entries they will map (total number of entries and the size
+ // of the largest single bucket chain).
+ //
+
+ if (cHotEntries)
+ {
+ m_sHotEntries.m_cEntries = cHotEntries;
+ m_sHotEntries.m_cBuckets = cHotBuckets;
+ m_sHotEntries.m_pEntries = new PersistedEntry[cHotEntries];
+ m_sHotEntries.m_pBuckets = PersistedBucketList::CreateList(cHotBuckets, cHotEntries, cMaxHotChain);
+ memset(m_sHotEntries.m_pEntries, 0, cHotEntries * sizeof(PersistedEntry)); // NGen determinism
+ }
+
+ if (cColdEntries)
+ {
+ m_sColdEntries.m_cEntries = cColdEntries;
+ m_sColdEntries.m_cBuckets = cColdBuckets;
+ m_sColdEntries.m_pEntries = new PersistedEntry[cColdEntries];
+ m_sColdEntries.m_pBuckets = PersistedBucketList::CreateList(cColdBuckets, cColdEntries, cMaxColdChain);
+ memset(m_sColdEntries.m_pEntries, 0, cColdEntries * sizeof(PersistedEntry)); // NGen determinism
+ }
+
+ //
+ // PHASE 4
+ //
+ // Initialize the bucket lists. We need to set an initial entry index (index into the entry array) and
+ // entry count for each bucket. The counts we already computed in phase 2 and since we're free to order
+ // the entry array however we like, we can compute the initial entry index for each bucket in turn
+ // trivially by laying out all entries for bucket 0 first followed by all entries for bucket 1 etc.
+ //
+ // This also has the nice effect of placing entries in the same bucket chain contiguously (and in the
+ // order that a full hash traversal will take).
+ //
+
+ DWORD dwNextId = 0; // This represents the index of the next entry to start a bucket chain
+ for (i = 0; i < cHotBuckets; i++)
+ {
+ m_sHotEntries.m_pBuckets->SetBucket(i, dwNextId, pHotBucketSizes[i]);
+ dwNextId += pHotBucketSizes[i];
+ }
+ _ASSERTE(dwNextId == m_sHotEntries.m_cEntries);
+
+ dwNextId = 0; // Reset index for the cold entries (remember they have their own table of entries)
+ for (i = 0; i < cColdBuckets; i++)
+ {
+ m_sColdEntries.m_pBuckets->SetBucket(i, dwNextId, pColdBucketSizes[i]);
+ dwNextId += pColdBucketSizes[i];
+ }
+ _ASSERTE(dwNextId == m_sColdEntries.m_cEntries);
+
+ //
+ // PHASE 5
+ //
+ // Determine new addresses for each entry. This is relatively simple since we know the bucket index, the
+ // index of the first entry for that bucket and how far into that chain each entry is located.
+ //
+
+ for (i = 0; i < sEntryMap.m_cEntries; i++)
+ {
+ // The 'typename' keyword shouldn't be necessary, but g++ gets confused without it.
+ typename EntryMappingTable::Entry *pMapEntry = &sEntryMap.m_pEntries[i];
+
+ // Entry block depends on whether this entry is hot or cold.
+ PersistedEntries *pEntries = pMapEntry->m_fHot ? &m_sHotEntries : &m_sColdEntries;
+
+ // We already know the new bucket this entry will go into. Retrieve the index of the first entry in
+ // that bucket chain.
+ DWORD dwBaseChainIndex = pEntries->m_pBuckets->GetInitialEntry(pMapEntry->m_dwNewBucket);
+
+ // This entry will be located at some offset from the index above (we calculated this ordinal in phase
+ // 2).
+ PersistedEntry *pNewEntry = &pEntries->m_pEntries[dwBaseChainIndex + pMapEntry->m_dwChainOrdinal];
+
+ // Record the address of the embedded sub-class hash entry in the map entry (sub-classes will use this
+ // info to map old entry addresses to their new locations).
+ sEntryMap.m_pEntries[i].m_pNewEntry = &pNewEntry->m_sValue;
+
+ // Initialize the new entry. Note that a simple bit-copy is performed on the sub-classes embedded
+ // entry. If fixups are needed they can be performed in the call to SaveEntry in the next phase.
+ pNewEntry->m_sValue = *pMapEntry->m_pOldEntry;
+ pNewEntry->m_iHashValue = pMapEntry->m_iHashValue;
+ }
+
+ //
+ // PHASE 6
+ //
+ // For each entry give the hash sub-class a chance to perform any additional saving or fixups. We pass
+ // both the old and new address of each entry, plus the mapping table so they can map other entry
+ // addresses (if, for example, they have cross-entry pointer fields in their data).
+ //
+ // We ask for each entry whether the saved data will be immutable. This is an optimization: if all
+ // entries turn out to be immutable we will save the entire entry array in a read-only (shareable)
+ // section.
+ //
+
+ bool fAllEntriesImmutable = true;
+ for (i = 0; i < sEntryMap.m_cEntries; i++)
+ if (!DOWNCALL(SaveEntry)(pImage, pProfileData, sEntryMap.m_pEntries[i].m_pOldEntry, sEntryMap.m_pEntries[i].m_pNewEntry, &sEntryMap))
+ fAllEntriesImmutable = false;
+
+ // We're mostly done. Now just some cleanup and the actual DataImage storage operations.
+
+ // We don't need the bucket size tracking arrays any more.
+ delete [] pHotBucketSizes;
+ delete [] pColdBucketSizes;
+
+ // If there are any hot entries store the entry array and bucket list.
+ if (cHotEntries)
+ {
+ pImage->StoreStructure(m_sHotEntries.m_pEntries,
+ static_cast<ULONG>(sizeof(PersistedEntry) * cHotEntries),
+ fAllEntriesImmutable ? DataImage::ITEM_NGEN_HASH_ENTRIES_RO_HOT : DataImage::ITEM_NGEN_HASH_ENTRIES_HOT);
+
+ pImage->StoreStructure(m_sHotEntries.m_pBuckets,
+ static_cast<ULONG>(m_sHotEntries.m_pBuckets->GetSize(m_sHotEntries.m_cBuckets)),
+ DataImage::ITEM_NGEN_HASH_BUCKETLIST_HOT);
+ }
+
+ // If there are any cold entries store the entry array and bucket list.
+ if (cColdEntries)
+ {
+ pImage->StoreStructure(m_sColdEntries.m_pEntries,
+ static_cast<ULONG>(sizeof(PersistedEntry) * cColdEntries),
+ fAllEntriesImmutable ? DataImage::ITEM_NGEN_HASH_ENTRIES_RO_COLD : DataImage::ITEM_NGEN_HASH_ENTRIES_COLD);
+
+ pImage->StoreStructure(m_sColdEntries.m_pBuckets,
+ static_cast<ULONG>(m_sColdEntries.m_pBuckets->GetSize(m_sColdEntries.m_cBuckets)),
+ DataImage::ITEM_NGEN_HASH_BUCKETLIST_COLD);
+ }
+
+ // Store the root data structure itself.
+ pImage->StoreStructure(this, sizeof(FINAL_CLASS), cHotEntries ?
+ DataImage::ITEM_NGEN_HASH_HOT : DataImage::ITEM_NGEN_HASH_COLD);
+
+ // We've moved the warm entries to hot and cold sections, so reset the warm section of the table. We only
+ // do this on the copy of the table that's going to be saved into the ngen image. This is important since
+ // (especially in the case of generics) we might continue to access this table throughout the rest of the
+ // save/arrange/fixup process. Leaving two copies of saved entries in the table (hot or cold plus warm)
+ // doesn't have any real impact, but removing the warm entries could be problematic where the entry was
+ // culled from the ngen image. In those cases we'll get a miss on the lookup with the result that the
+ // caller might try to add the type back to the table, something that is prohibited in the debug build
+ // during the ngen save/arrange/fixup phases.
+
+ // Reset the warm buckets to their original size or a fairly restrictive cap. These (empty) buckets will
+ // be saved into the ngen image and form the basis for further entries added at runtime. Thus we have a
+ // trade-off between storing dead space in the ngen image and having to re-size the bucket list at
+ // runtime. Note that we can't save a zero sized bucket list: the invariant we have is that there are
+ // always a non-zero number of buckets available when we come to do an insertion (since insertions cannot
+ // fail). An alternative strategy would be to initialize these buckets at ngen image load time.
+ _ASSERTE(m_cWarmBuckets >= m_cInitialBuckets);
+ DWORD cNewWarmBuckets = min(m_cInitialBuckets, 11);
+
+ // Create the ngen version of the warm buckets.
+ pImage->StoreStructure(m_pWarmBuckets,
+ cNewWarmBuckets * sizeof(VolatileEntry*),
+ DataImage::ITEM_NGEN_HASH_HOT);
+
+ // Reset the ngen-version of the table to have no warm entries and the reduced warm bucket count.
+ NgenHashTable<NGEN_HASH_ARGS> *pNewTable = (NgenHashTable<NGEN_HASH_ARGS>*)pImage->GetImagePointer(this);
+ pNewTable->m_cWarmEntries = 0;
+ pNewTable->m_cWarmBuckets = cNewWarmBuckets;
+
+ // Zero-out the ngen version of the warm buckets.
+ VolatileEntry *pNewBuckets = (VolatileEntry*)pImage->GetImagePointer(m_pWarmBuckets);
+ memset(pNewBuckets, 0, cNewWarmBuckets * sizeof(VolatileEntry*));
+}
+
+// Call during ngen to register fixups for hash table data structure fields. Calls derived-class
+// implementation of FixupEntry to allow per-entry extension of the fixup process.
+template <NGEN_HASH_PARAMS>
+void NgenHashTable<NGEN_HASH_ARGS>::BaseFixup(DataImage *pImage)
+{
+ STANDARD_VM_CONTRACT;
+
+ DWORD i;
+
+ // Fixup the module pointer.
+ pImage->FixupPointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_pModule));
+
+ // Throw away the heap pointer, we can't serialize it into the image. We'll rely on the loader heap
+ // associated with the module above at runtime.
+ pImage->ZeroPointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_pHeap));
+
+ // Give the hash sub-class a chance to fixup any pointers in its entries. We provide the pointer to the
+ // hot or cold entry block and the offset into that block for this entry since we don't save individual
+ // zap nodes for each entry; just a single node covering the entire array. As a result all fixups have to
+ // be relative to the base of this array.
+
+ for (i = 0; i < m_sHotEntries.m_cEntries; i++)
+ DOWNCALL(FixupEntry)(pImage, &m_sHotEntries.m_pEntries[i].m_sValue, m_sHotEntries.m_pEntries, i * sizeof(PersistedEntry));
+
+ for (i = 0; i < m_sColdEntries.m_cEntries; i++)
+ DOWNCALL(FixupEntry)(pImage, &m_sColdEntries.m_pEntries[i].m_sValue, m_sColdEntries.m_pEntries, i * sizeof(PersistedEntry));
+
+ // Fixup the warm (empty) bucket list.
+ pImage->FixupPointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_pWarmBuckets));
+
+ // Fixup the hot entry array and bucket list.
+ pImage->FixupPointerField(this,
+ offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sHotEntries) +
+ offsetof(PersistedEntries, m_pEntries));
+ pImage->FixupPointerField(this,
+ offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sHotEntries) +
+ offsetof(PersistedEntries, m_pBuckets));
+
+ // Fixup the cold entry array and bucket list.
+ pImage->FixupPointerField(this,
+ offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sColdEntries) +
+ offsetof(PersistedEntries, m_pEntries));
+ pImage->FixupPointerField(this,
+ offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sColdEntries) +
+ offsetof(PersistedEntries, m_pBuckets));
+}
+#endif // !DACCESS_COMPILE
+#endif // FEATURE_PREJIT
+
+#ifdef DACCESS_COMPILE
+
+// Call during DAC enumeration of memory regions to save all hash table data structures. Calls derived-class
+// implementation of EnumMemoryRegionsForEntry to allow additional per-entry memory to be reported.
+template <NGEN_HASH_PARAMS>
+void NgenHashTable<NGEN_HASH_ARGS>::BaseEnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+
+ // Save the base data structure itself (can't use DAC_ENUM_DTHIS() since the size to save is based on a
+ // sub-class).
+ DacEnumMemoryRegion(dac_cast<TADDR>(this), sizeof(FINAL_CLASS));
+
+ // Save the warm bucket list.
+ DacEnumMemoryRegion(dac_cast<TADDR>(m_pWarmBuckets), m_cWarmBuckets * sizeof(VolatileEntry*));
+
+ // Save all the warm entries.
+ if (m_pWarmBuckets.IsValid())
+ {
+ for (DWORD i = 0; i < m_cWarmBuckets; i++)
+ {
+ PTR_VolatileEntry pEntry = m_pWarmBuckets[i];
+ while (pEntry.IsValid())
+ {
+ pEntry.EnumMem();
+
+ // Ask the sub-class whether each entry points to further data to be saved.
+ DOWNCALL(EnumMemoryRegionsForEntry)(VALUE_FROM_VOLATILE_ENTRY(pEntry), flags);
+
+ pEntry = pEntry->m_pNextEntry;
+ }
+ }
+ }
+
+#ifdef FEATURE_PREJIT
+ // Save hot buckets and entries.
+ if (m_sHotEntries.m_cEntries > 0)
+ {
+ DacEnumMemoryRegion(dac_cast<TADDR>(m_sHotEntries.m_pEntries), m_sHotEntries.m_cEntries * sizeof(PersistedEntry));
+ DacEnumMemoryRegion(dac_cast<TADDR>(m_sHotEntries.m_pBuckets), m_sHotEntries.m_pBuckets->GetSize(m_sHotEntries.m_cBuckets));
+ for (DWORD i = 0; i < m_sHotEntries.m_cEntries; i++)
+ DOWNCALL(EnumMemoryRegionsForEntry)(VALUE_FROM_PERSISTED_ENTRY(dac_cast<PTR_PersistedEntry>(&m_sHotEntries.m_pEntries[i])), flags);
+ }
+
+ // Save cold buckets and entries.
+ if (m_sColdEntries.m_cEntries > 0)
+ {
+ DacEnumMemoryRegion(dac_cast<TADDR>(m_sColdEntries.m_pEntries), m_sColdEntries.m_cEntries * sizeof(PersistedEntry));
+ DacEnumMemoryRegion(dac_cast<TADDR>(m_sColdEntries.m_pBuckets), m_sColdEntries.m_pBuckets->GetSize(m_sColdEntries.m_cBuckets));
+ for (DWORD i = 0; i < m_sColdEntries.m_cEntries; i++)
+ DOWNCALL(EnumMemoryRegionsForEntry)(VALUE_FROM_PERSISTED_ENTRY(dac_cast<PTR_PersistedEntry>(&m_sColdEntries.m_pEntries[i])), flags);
+ }
+#endif // FEATURE_PREJIT
+
+ // Save the module if present.
+ if (m_pModule.IsValid())
+ m_pModule->EnumMemoryRegions(flags, true);
+}
+#endif // DACCESS_COMPILE
+
+#ifdef FEATURE_PREJIT
+
+// Find the first persisted entry (hot or cold based on pEntries) that matches the given hash. Looks only in
+// the persisted block given (i.e. searches only hot *or* cold entries). Returns NULL on failure. Otherwise
+// returns pointer to the derived class portion of the entry and initializes the provided LookupContext to
+// allow enumeration of any further matches.
+template <NGEN_HASH_PARAMS>
+DPTR(VALUE) NgenHashTable<NGEN_HASH_ARGS>::FindPersistedEntryByHash(PersistedEntries *pEntries, NgenHashValue iHash, LookupContext *pContext)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ PRECONDITION(CheckPointer(pContext));
+ }
+ CONTRACTL_END;
+
+ // No point looking if there are no entries.
+ if (pEntries->m_cEntries == 0)
+ return NULL;
+
+ // Since there is at least one entry there must be at least one bucket.
+ _ASSERTE(pEntries->m_cBuckets > 0);
+
+ // Get the first entry and count of entries for the bucket which contains all entries with the given hash
+ // code.
+ DWORD dwEntryIndex, cEntriesLeft;
+ pEntries->m_pBuckets->GetBucket(iHash % pEntries->m_cBuckets, &dwEntryIndex, &cEntriesLeft);
+
+ // Determine the address of the first entry in the chain by indexing into the entry array.
+ PTR_PersistedEntry pEntry = dac_cast<PTR_PersistedEntry>(&pEntries->m_pEntries[dwEntryIndex]);
+
+ // Iterate while we've still got entries left to check in this chain.
+ while (cEntriesLeft--)
+ {
+ if (pEntry->m_iHashValue == iHash)
+ {
+ // We've found our match.
+
+ // Record our current search state into the provided context so that a subsequent call to
+ // BaseFindNextEntryByHash can pick up the search where it left off.
+ pContext->m_pEntry = dac_cast<TADDR>(pEntry);
+ pContext->m_eType = pEntries == &m_sHotEntries ? Hot : Cold;
+ pContext->m_cRemainingEntries = cEntriesLeft;
+
+ // Return the address of the sub-classes' embedded entry structure.
+ return VALUE_FROM_PERSISTED_ENTRY(pEntry);
+ }
+
+ // Move to the next entry in the chain.
+ pEntry++;
+ }
+
+ // If we get here then none of the entries in the target bucket matched the hash code and we have a miss
+ // (for this section of the table at least).
+ return NULL;
+}
+
+#ifndef DACCESS_COMPILE
+template <NGEN_HASH_PARAMS>
+NgenHashTable<NGEN_HASH_ARGS>::EntryMappingTable::~EntryMappingTable()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ delete [] m_pEntries;
+}
+
+// Given an old entry address (pre-BaseSave) return the address of the entry relocated ready for saving to
+// disk. Note that this address is the (ngen) runtime address, not the disk image address you can further
+// obtain by calling DataImage::GetImagePointer().
+template <NGEN_HASH_PARAMS>
+VALUE *NgenHashTable<NGEN_HASH_ARGS>::EntryMappingTable::GetNewEntryAddress(VALUE *pOldEntry)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Perform a simple linear search. If this proves to be a bottleneck in ngen production (the only scenario
+ // in which it's called) we can replace this with something faster such as a hash lookup.
+ for (DWORD i = 0; i < m_cEntries; i++)
+ if (m_pEntries[i].m_pOldEntry == pOldEntry)
+ return m_pEntries[i].m_pNewEntry;
+
+ _ASSERTE(!"Couldn't map old hash entry to new entry");
+ return NULL;
+}
+#endif // !DACCESS_COMPILE
+#endif // FEATURE_PREJIT
+
+// Find the first volatile (warm) entry that matches the given hash. Looks only at warm entries. Returns NULL
+// on failure. Otherwise returns pointer to the derived class portion of the entry and initializes the
+// provided LookupContext to allow enumeration of any further matches.
+template <NGEN_HASH_PARAMS>
+DPTR(VALUE) NgenHashTable<NGEN_HASH_ARGS>::FindVolatileEntryByHash(NgenHashValue iHash, LookupContext *pContext)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ PRECONDITION(CheckPointer(pContext));
+ }
+ CONTRACTL_END;
+
+ // No point looking if there are no entries.
+ if (m_cWarmEntries == 0)
+ return NULL;
+
+ // Since there is at least one entry there must be at least one bucket.
+ _ASSERTE(m_cWarmBuckets > 0);
+
+ // Point at the first entry in the bucket chain which would contain any entries with the given hash code.
+ PTR_VolatileEntry pEntry = m_pWarmBuckets[iHash % m_cWarmBuckets];
+
+ // Walk the bucket chain one entry at a time.
+ while (pEntry)
+ {
+ if (pEntry->m_iHashValue == iHash)
+ {
+ // We've found our match.
+
+ // Record our current search state into the provided context so that a subsequent call to
+ // BaseFindNextEntryByHash can pick up the search where it left off.
+ pContext->m_pEntry = dac_cast<TADDR>(pEntry);
+ pContext->m_eType = Warm;
+
+ // Return the address of the sub-classes' embedded entry structure.
+ return VALUE_FROM_VOLATILE_ENTRY(pEntry);
+ }
+
+ // Move to the next entry in the chain.
+ pEntry = pEntry->m_pNextEntry;
+ }
+
+ // If we get here then none of the entries in the target bucket matched the hash code and we have a miss
+ // (for this section of the table at least).
+ return NULL;
+}
+
+// Initializes the iterator context passed by the caller to make it ready to walk every entry in the table in
+// an arbitrary order. Call pIterator->Next() to retrieve the first entry.
+template <NGEN_HASH_PARAMS>
+void NgenHashTable<NGEN_HASH_ARGS>::BaseInitIterator(BaseIterator *pIterator)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ pIterator->m_pTable = this;
+ pIterator->m_pEntry = NULL;
+#ifdef FEATURE_PREJIT
+ pIterator->m_eType = Hot;
+ pIterator->m_cRemainingEntries = m_sHotEntries.m_cEntries;
+#else
+ pIterator->m_eType = Warm;
+ pIterator->m_dwBucket = 0;
+#endif
+}
+
+// Returns a pointer to the next entry in the hash table or NULL once all entries have been enumerated. Once
+// NULL has been return the only legal operation is to re-initialize the iterator with BaseInitIterator.
+template <NGEN_HASH_PARAMS>
+DPTR(VALUE) NgenHashTable<NGEN_HASH_ARGS>::BaseIterator::Next()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ // We might need to re-iterate our algorithm if we fall off the end of one hash table section (Hot or
+ // Warm) and need to move onto the next.
+ while (true)
+ {
+ // What type of section are we walking (Hot, Warm or Cold)?
+ switch (m_eType)
+ {
+#ifdef FEATURE_PREJIT
+ case Hot:
+ {
+ if (m_cRemainingEntries)
+ {
+ // There's at least one more entry in the hot section to report.
+
+ if (m_pEntry == NULL)
+ {
+ // This is our first lookup in the hot section, return the first entry in the hot array.
+ m_pEntry = dac_cast<TADDR>(m_pTable->m_sHotEntries.m_pEntries);
+ }
+ else
+ {
+ // This is not our first lookup, return the entry immediately after the last one we
+ // reported.
+ m_pEntry = (TADDR)(m_pEntry + sizeof(PersistedEntry));
+ }
+
+ // There's one less entry to report in the future.
+ m_cRemainingEntries--;
+
+ // Return the pointer to the embedded sub-class entry in the entry we found.
+ return VALUE_FROM_PERSISTED_ENTRY(dac_cast<PTR_PersistedEntry>(m_pEntry));
+ }
+
+ // We ran out of hot entries. Set up to search the warm section next and go round the loop again.
+ m_eType = Warm;
+ m_pEntry = NULL;
+ m_dwBucket = 0;
+ break;
+ }
+#endif // FEATURE_PREJIT
+
+ case Warm:
+ {
+ if (m_pEntry == NULL)
+ {
+ // This is our first lookup in the warm section for a particular bucket, return the first
+ // entry in that bucket.
+ m_pEntry = dac_cast<TADDR>(m_pTable->m_pWarmBuckets[m_dwBucket]);
+ }
+ else
+ {
+ // This is not our first lookup, return the entry immediately after the last one we
+ // reported.
+ m_pEntry = dac_cast<TADDR>(dac_cast<PTR_VolatileEntry>(m_pEntry)->m_pNextEntry);
+ }
+
+ // If we found an entry in the last step return with it.
+ if (m_pEntry)
+ return VALUE_FROM_VOLATILE_ENTRY(dac_cast<PTR_VolatileEntry>(m_pEntry));
+
+ // Othwerwise we found the end of a bucket chain. Increment the current bucket and, if there are
+ // buckets left to scan go back around again.
+ m_dwBucket++;
+ if (m_dwBucket < m_pTable->m_cWarmBuckets)
+ break;
+
+ // Othwerwise we should move onto the cold section (if we have one).
+
+#ifdef FEATURE_PREJIT
+ m_eType = Cold;
+ m_pEntry = NULL;
+ m_cRemainingEntries = m_pTable->m_sColdEntries.m_cEntries;
+ break;
+#else
+ return NULL;
+#endif // FEATURE_PREJIT
+ }
+
+#ifdef FEATURE_PREJIT
+ case Cold:
+ {
+ if (m_cRemainingEntries)
+ {
+ // There's at least one more entry in the cold section to report.
+
+ if (m_pEntry == NULL)
+ {
+ // This is our first lookup in the cold section, return the first entry in the cold array.
+ m_pEntry = dac_cast<TADDR>(m_pTable->m_sColdEntries.m_pEntries);
+ }
+ else
+ {
+ // This is not our first lookup, return the entry immediately after the last one we
+ // reported.
+ m_pEntry = (TADDR)(m_pEntry + sizeof(PersistedEntry));
+ }
+
+ // There's one less entry to report in the future.
+ m_cRemainingEntries--;
+
+ // Return the pointer to the embedded sub-class entry in the entry we found.
+ return VALUE_FROM_PERSISTED_ENTRY(dac_cast<PTR_PersistedEntry>(m_pEntry));
+ }
+
+ // If there are no more entries in the cold section that's it, the whole table has been scanned.
+ return NULL;
+ }
+#endif // FEATURE_PREJIT
+
+ default:
+ _ASSERTE(!"Invalid hash entry type");
+ }
+ }
+}
+
+// Get a pointer to the referenced entry.
+template <NGEN_HASH_PARAMS>
+DPTR(VALUE) NgenHashEntryRef<NGEN_HASH_ARGS>::Get()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ // Short-cut the NULL case, it's a lot cheaper than the code below when compiling for DAC.
+ if (m_rpEntryRef.IsNull())
+ return NULL;
+
+ // Note that the following code uses a special DAC lookup for an interior pointer (i.e. "this" isn't a
+ // host address corresponding to a DAC marshalled instance, it's some host address within such an
+ // instance). These lookups are a little slower than the regular kind since we have to search for the
+ // containing instance.
+
+ // @todo: The following causes gcc to choke on Mac 10.4 at least (complains that offsetof is being passed
+ // four arguments instead of two). Expanding the top-level macro manually fixes this.
+ // TADDR pBase = PTR_HOST_INT_MEMBER_TADDR(NgenHashEntryRef<NGEN_HASH_ARGS>, this, m_rpEntryRef);
+ TADDR pBase = PTR_HOST_INT_TO_TADDR(this) + (TADDR)offsetof(NgenHashEntryRef<NGEN_HASH_ARGS>, m_rpEntryRef);
+
+ return m_rpEntryRef.GetValue(pBase);
+}
+
+#ifndef DACCESS_COMPILE
+
+// Set the reference to point to the given entry.
+template <NGEN_HASH_PARAMS>
+void NgenHashEntryRef<NGEN_HASH_ARGS>::Set(VALUE *pEntry)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_rpEntryRef.SetValueMaybeNull(pEntry);
+}
+
+#ifdef FEATURE_PREJIT
+
+// Call this during the ngen Fixup phase to adjust the relative pointer to account for ngen image layout.
+template <NGEN_HASH_PARAMS>
+void NgenHashEntryRef<NGEN_HASH_ARGS>::Fixup(DataImage *pImage, NgenHashTable<NGEN_HASH_ARGS> *pTable)
+{
+ STANDARD_VM_CONTRACT;
+
+ // No fixup required for null pointers.
+ if (m_rpEntryRef.IsNull())
+ return;
+
+ // Location is the field containing the entry reference. We need to determine the ngen zap node that
+ // contains this field (it'll be part of either the hot or cold entry arrays). Then we can determine the
+ // offset of the field from the beginning of the node.
+ BYTE *pLocation = (BYTE*)&m_rpEntryRef;
+ BYTE *pLocationBase;
+ DWORD cbLocationOffset;
+
+ if (pLocation >= (BYTE*)pTable->m_sHotEntries.m_pEntries &&
+ pLocation < (BYTE*)(pTable->m_sHotEntries.m_pEntries + pTable->m_sHotEntries.m_cEntries))
+ {
+ // The field is in a hot entry.
+ pLocationBase = (BYTE*)pTable->m_sHotEntries.m_pEntries;
+ }
+ else if (pLocation >= (BYTE*)pTable->m_sColdEntries.m_pEntries &&
+ pLocation < (BYTE*)(pTable->m_sColdEntries.m_pEntries + pTable->m_sColdEntries.m_cEntries))
+ {
+ // The field is in a cold entry.
+ pLocationBase = (BYTE*)pTable->m_sColdEntries.m_pEntries;
+ }
+ else
+ {
+ // The field doesn't lie in one of the entry arrays. The caller has passed us an NgenHashEntryRef that
+ // wasn't embedded as a field in one of this hash's entries.
+ _ASSERTE(!"NgenHashEntryRef must be a field in an NgenHashTable entry for Fixup to work");
+ return;
+ }
+ cbLocationOffset = static_cast<DWORD>(pLocation - pLocationBase);
+
+ // Target is the address of the entry that this reference points to. Go through the same kind of logic to
+ // determine which section the target entry lives in, hot or cold.
+ BYTE *pTarget = (BYTE*)m_rpEntryRef.GetValue();
+ BYTE *pTargetBase;
+ DWORD cbTargetOffset;
+
+ if (pTarget >= (BYTE*)pTable->m_sHotEntries.m_pEntries &&
+ pTarget < (BYTE*)(pTable->m_sHotEntries.m_pEntries + pTable->m_sHotEntries.m_cEntries))
+ {
+ // The target is a hot entry.
+ pTargetBase = (BYTE*)pTable->m_sHotEntries.m_pEntries;
+ }
+ else if (pTarget >= (BYTE*)pTable->m_sColdEntries.m_pEntries &&
+ pTarget < (BYTE*)(pTable->m_sColdEntries.m_pEntries + pTable->m_sColdEntries.m_cEntries))
+ {
+ // The target is a cold entry.
+ pTargetBase = (BYTE*)pTable->m_sColdEntries.m_pEntries;
+ }
+ else
+ {
+ // The target doesn't lie in one of the entry arrays. The caller has passed us an NgenHashEntryRef that
+ // points to an entry (or other memory) not in our hash table.
+ _ASSERTE(!"NgenHashEntryRef must refer to an entry in the same hash table");
+ return;
+ }
+ cbTargetOffset = static_cast<DWORD>(pTarget - pTargetBase);
+
+ // Now we have enough data to ask for a fixup to be generated for this field. The fixup type
+ // IMAGE_REL_BASED_RELPTR means we won't actually get a base relocation fixup (an entry in the ngen image
+ // that causes a load-time fixup to be applied). Instead this record will just adjust the relative value
+ // in the field once the ngen image layout is finalized and it knows the final locations of the field and
+ // target zap nodes.
+ pImage->FixupField(pLocationBase, cbLocationOffset, pTargetBase, cbTargetOffset, IMAGE_REL_BASED_RELPTR);
+}
+#endif // FEATURE_PREJIT
+#endif // !DACCESS_COMPILE
diff --git a/src/vm/ngenoptout.cpp b/src/vm/ngenoptout.cpp
new file mode 100644
index 0000000000..148bd99485
--- /dev/null
+++ b/src/vm/ngenoptout.cpp
@@ -0,0 +1,38 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ngenoptout.cpp
+//
+
+//
+//
+// Contains functionality to reject native images at runtime
+
+
+#include "common.h"
+#ifndef FEATURE_CORECLR
+#include "ngenoptout.h"
+#include "assemblynamelist.h"
+
+AssemblyNameList g_NgenOptoutList;
+
+BOOL IsNativeImageOptedOut(IAssemblyName* pName)
+{
+ WRAPPER_NO_CONTRACT
+ return g_NgenOptoutList.Lookup(pName) != NULL;
+}
+
+void AddNativeImageOptOut(IAssemblyName* pName)
+{
+ WRAPPER_NO_CONTRACT
+ pName->AddRef();
+ g_NgenOptoutList.Add(pName);
+}
+// HRESULT
+HRESULT RuntimeIsNativeImageOptedOut(IAssemblyName* pName)
+{
+ WRAPPER_NO_CONTRACT
+ return IsNativeImageOptedOut(pName) ? S_OK :S_FALSE;
+}
+#endif // FEATURE_CORECLR
diff --git a/src/vm/ngenoptout.h b/src/vm/ngenoptout.h
new file mode 100644
index 0000000000..d8e2e9e7e6
--- /dev/null
+++ b/src/vm/ngenoptout.h
@@ -0,0 +1,35 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ngenoptout.h
+//
+
+//
+//
+// Contains functionality to reject native images at runtime
+
+
+#ifndef NGENOPTOUT_H
+#define NGENOPTOUT_H
+
+#include "assemblynamesconfigfactory.h"
+
+// throwing
+BOOL IsNativeImageOptedOut(IAssemblyName* pName);
+void AddNativeImageOptOut(IAssemblyName* pName);
+
+// HRESULT
+HRESULT RuntimeIsNativeImageOptedOut(IAssemblyName* pName);
+
+
+class NativeImageOptOutConfigFactory : public AssemblyNamesConfigFactory
+{
+ virtual void AddAssemblyName(IAssemblyName* pName)
+ {
+ WRAPPER_NO_CONTRACT;
+ AddNativeImageOptOut(pName);
+ }
+};
+
+#endif // NGENOPTOUT_H
diff --git a/src/vm/notifyexternals.cpp b/src/vm/notifyexternals.cpp
new file mode 100644
index 0000000000..a08fbc26ec
--- /dev/null
+++ b/src/vm/notifyexternals.cpp
@@ -0,0 +1,282 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: EXTERNALS.CPP
+//
+
+// ===========================================================================
+
+
+#include "common.h"
+
+#include "excep.h"
+#include "interoputil.h"
+#include "comcache.h"
+
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+#include "olecontexthelpers.h"
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+#define INITGUID
+#include <guiddef.h>
+#include "ctxtcall.h"
+#include "notifyexternals.h"
+#include "mdaassistants.h"
+
+DEFINE_GUID(CLSID_ComApartmentState, 0x00000349, 0, 0, 0xC0,0,0,0,0,0,0,0x46);
+static const GUID IID_ITeardownNotification = { 0xa85e0fb6, 0x8bf4, 0x4614, { 0xb1, 0x64, 0x7b, 0x43, 0xef, 0x43, 0xf5, 0xbe } };
+static const GUID IID_IComApartmentState = { 0x7e220139, 0x8dde, 0x47ef, { 0xb1, 0x81, 0x08, 0xbe, 0x60, 0x3e, 0xfd, 0x75 } };
+
+static IComApartmentState* g_pApartmentState = NULL;
+static ULONG_PTR g_TDCookie = 0;
+
+
+// ---------------------------------------------------------------------------
+// %%Class EEClassFactory
+// IClassFactory implementation for COM+ objects
+// ---------------------------------------------------------------------------
+class ApartmentTearDownHandler : public ITeardownNotification
+{
+public:
+ ApartmentTearDownHandler(HRESULT& hr)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+
+ m_pMarshalerObj = NULL;
+ m_cbRefCount = 1;
+ hr = CoCreateFreeThreadedMarshaler(this, &m_pMarshalerObj);
+ if (hr == S_OK)
+ m_cbRefCount = 0;
+ else
+ Release();
+ }
+
+ virtual ~ApartmentTearDownHandler()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_pMarshalerObj != NULL)
+ {
+ DWORD cbRef = SafeRelease(m_pMarshalerObj);
+ LogInteropRelease(m_pMarshalerObj, cbRef, "pMarshaler object");
+ }
+ }
+
+ STDMETHODIMP QueryInterface( REFIID iid, void **ppv)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (ppv == NULL)
+ return E_POINTER;
+
+ *ppv = NULL;
+
+ if (iid == IID_ITeardownNotification || iid == IID_IUnknown)
+ {
+ *ppv = (IClassFactory2 *)this;
+ AddRef();
+ }
+ else if (iid == IID_IMarshal || iid == IID_IAgileObject)
+ {
+ // delegate the IMarshal and IAgileObject Queries
+ return SafeQueryInterface(m_pMarshalerObj, iid, (IUnknown**)ppv);
+ }
+
+ return (*ppv != NULL) ? S_OK : E_NOINTERFACE;
+ }
+
+
+ STDMETHODIMP_(ULONG) AddRef()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ LONG l = FastInterlockIncrement(&m_cbRefCount);
+ return l;
+ }
+ STDMETHODIMP_(ULONG) Release()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ LONG l = FastInterlockDecrement(&m_cbRefCount);
+
+ if (l == 0)
+ delete this;
+
+ return l;
+ }
+
+ STDMETHODIMP TeardownHint(void)
+ {
+ WRAPPER_NO_CONTRACT;
+ return HandleApartmentShutDown();
+ }
+
+ HRESULT HandleApartmentShutDown()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Thread* pThread = GetThread();
+ if (pThread != NULL)
+ {
+ _ASSERTE(!"NYI");
+ // reset the apartment state
+ pThread->ResetApartment();
+ }
+ return S_OK;
+ }
+
+private:
+ LONG m_cbRefCount;
+ IUnknown* m_pMarshalerObj;
+};
+
+HRESULT SetupTearDownNotifications()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ static BOOL fTearDownCalled = FALSE;
+
+ // check if we already have setup a notification
+ if (fTearDownCalled == TRUE)
+ return S_OK;
+
+ fTearDownCalled = TRUE;
+
+ GCX_PREEMP();
+
+ // instantiate the notifier
+ SafeComHolderPreemp<IComApartmentState> pAptState = NULL;
+ hr = CoCreateInstance(CLSID_ComApartmentState, NULL, CLSCTX_ALL, IID_IComApartmentState, (VOID **)&pAptState);
+
+ if (hr == S_OK)
+ {
+ IComApartmentState* pPrevAptState = FastInterlockCompareExchangePointer(&g_pApartmentState, pAptState.GetValue(), NULL);
+
+ if (pPrevAptState == NULL)
+ {
+ _ASSERTE(g_pApartmentState);
+ ApartmentTearDownHandler* pTDHandler = new (nothrow) ApartmentTearDownHandler(hr);
+ if (hr == S_OK)
+ {
+ SafeComHolderPreemp<ITeardownNotification> pITD = NULL;
+ hr = SafeQueryInterface(pTDHandler, IID_ITeardownNotification, (IUnknown **)&pITD);
+ _ASSERTE(hr == S_OK && pITD != NULL);
+ g_pApartmentState->RegisterForTeardownHint(pITD, 0, &g_TDCookie);
+ }
+ else
+ {
+ // oops we couldn't create our handler
+ // release the global apstate pointer
+ if (g_pApartmentState != NULL)
+ {
+ g_pApartmentState->Release();
+ g_pApartmentState = NULL;
+ }
+ }
+
+ // We're either keeping the object alive, or we've already freed it.
+ pAptState.SuppressRelease();
+ }
+ }
+
+ return S_OK;
+}
+
+VOID RemoveTearDownNotifications()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (g_pApartmentState != NULL)
+ {
+ _ASSERTE(g_TDCookie != 0);
+ g_pApartmentState->UnregisterForTeardownHint(g_TDCookie);
+ g_pApartmentState->Release();
+ g_pApartmentState = NULL;
+ g_TDCookie = 0;
+ }
+}
+
+
+// On some platforms, we can detect whether the current thread holds the loader
+// lock. It is unsafe to execute managed code when this is the case
+BOOL ShouldCheckLoaderLock(BOOL fForMDA /*= TRUE*/)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_CORESYSTEM
+ // CoreSystem does not support this.
+ return FALSE;
+#else
+ // Because of how C++ generates code, we must use default initialization to
+ // 0 here. Any explicit initialization will result in thread-safety problems.
+ static BOOL fInited;
+ static BOOL fShouldCheck;
+ static BOOL fShouldCheck_ForMDA;
+
+ if (VolatileLoad(&fInited) == FALSE)
+ {
+ fShouldCheck_ForMDA = FALSE;
+
+ fShouldCheck = AuxUlibInitialize(); // may fail
+
+#ifdef MDA_SUPPORTED
+ if (fShouldCheck)
+ {
+ MdaLoaderLock* pProbe = MDA_GET_ASSISTANT(LoaderLock);
+ if (pProbe)
+ fShouldCheck_ForMDA = TRUE;
+ }
+#endif // MDA_SUPPORTED
+ VolatileStore(&fInited, TRUE);
+ }
+ return (fForMDA ? fShouldCheck_ForMDA : fShouldCheck);
+#endif // FEATURE_CORESYSTEM
+}
diff --git a/src/vm/notifyexternals.h b/src/vm/notifyexternals.h
new file mode 100644
index 0000000000..4f1a540acf
--- /dev/null
+++ b/src/vm/notifyexternals.h
@@ -0,0 +1,27 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+////////////////////////////////////////////////////////////////////////////////
+
+
+////////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef _NOTIFY_EXTERNALS_H
+#define _NOTIFY_EXTERNALS_H
+
+#ifndef FEATURE_COMINTEROP
+#error FEATURE_COMINTEROP is required for this file
+#endif // FEATURE_COMINTEROP
+
+extern BOOL g_fComStarted;
+
+HRESULT SetupTearDownNotifications();
+VOID RemoveTearDownNotifications();
+
+BOOL ShouldCheckLoaderLock(BOOL fForMDA = TRUE);
+
+#include "aux_ulib.h"
+
+#endif
diff --git a/src/vm/nsenumhandleallcases.h b/src/vm/nsenumhandleallcases.h
new file mode 100644
index 0000000000..7a7c2dbe1f
--- /dev/null
+++ b/src/vm/nsenumhandleallcases.h
@@ -0,0 +1,44 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// NSENUMSHANDLEALLCASES.H -
+//
+
+//
+// Meta-programming to ensure that all NFT cases are properly handled in switch statements that should handle all NFT types
+//
+// Uses of this header file are done by
+// - #include the header before the case statement, probably at the top of the cpp file
+// - #define NFT_CASE_VERIFICATION_TYPE_NAME(type) before the switch to give a descriptive name based on type. The type name string
+// is the detail that gets used to find the switch statement that has a problem
+// - Instead of using normal case statements, use NFT_CASE(type). See examples in class.cpp.
+// - In a default: case statement, define NFT_VERIFY_ALL_CASES and then include this file again.
+//
+#ifndef NSENUMHANDLEALLCASES_H
+#define NSENUMHANDLEALLCASES_H
+
+// Requiring all nft types to be handled is done by defining a variable in each case statement, and then in the default: statement
+// computing a value that depends on the value of all of those variables.
+
+#ifdef _DEBUG
+#define NFT_CASE(type) case type: int NFT_CASE_VERIFICATION_TYPE_NAME(type);
+
+#else
+#define NFT_CASE(type) case type:
+#endif
+#endif // NSENUMHANDLEALLCASES_H
+
+#if defined(_DEBUG) && defined(NFT_VERIFY_ALL_CASES)
+
+int *nftAccumulator = nullptr;
+do {
+#undef DEFINE_NFT
+#define DEFINE_NFT(type, size, WinRTSupported) nftAccumulator += (int)&NFT_CASE_VERIFICATION_TYPE_NAME(type),
+#include "nsenums.h"
+nftAccumulator = nullptr;
+} while (false);
+#undef DEFINE_NFT
+#endif // _DEBUG && NFT_VERIFY_ALL_CASES
+#undef NFT_VERIFY_ALL_CASES
+#undef NFT_CASE_VERIFICATION_TYPE_NAME
diff --git a/src/vm/nsenums.h b/src/vm/nsenums.h
new file mode 100644
index 0000000000..cce0c9c19f
--- /dev/null
+++ b/src/vm/nsenums.h
@@ -0,0 +1,77 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// NSENUMS.H -
+//
+
+//
+// Defines NStruct-related enums
+//
+
+// NStruct Field Type's
+//
+// Columns:
+// Name - name of enum
+// Size - the native size (in bytes) of the field.
+// for some fields, this value cannot be computed
+// without more information. if so, put a zero here
+// and make sure CollectNStructFieldMetadata()
+// has code to compute the size.
+// WinRTSupported - true if the field type is supported in WinRT
+// scenarios.
+//
+// PS - Append new entries only at the end of the enum to avoid phone versioning break.
+// Name (COM+ - Native) Size
+
+DEFINE_NFT(NFT_NONE, 0, false)
+
+DEFINE_NFT(NFT_STRINGUNI, sizeof(LPVOID), false)
+DEFINE_NFT(NFT_STRINGANSI, sizeof(LPVOID), false)
+DEFINE_NFT(NFT_FIXEDSTRINGUNI, 0, false)
+DEFINE_NFT(NFT_FIXEDSTRINGANSI, 0, false)
+
+DEFINE_NFT(NFT_FIXEDCHARARRAYANSI, 0, false)
+DEFINE_NFT(NFT_FIXEDARRAY, 0, false)
+
+DEFINE_NFT(NFT_DELEGATE, sizeof(LPVOID), false)
+
+DEFINE_NFT(NFT_COPY1, 1, true)
+DEFINE_NFT(NFT_COPY2, 2, true)
+DEFINE_NFT(NFT_COPY4, 4, true)
+DEFINE_NFT(NFT_COPY8, 8, true)
+
+DEFINE_NFT(NFT_ANSICHAR, 1, false)
+DEFINE_NFT(NFT_WINBOOL, sizeof(BOOL), false)
+
+DEFINE_NFT(NFT_NESTEDLAYOUTCLASS, 0, false)
+DEFINE_NFT(NFT_NESTEDVALUECLASS, 0, true)
+
+DEFINE_NFT(NFT_CBOOL, 1, true)
+
+DEFINE_NFT(NFT_DATE, sizeof(DATE), false)
+DEFINE_NFT(NFT_DECIMAL, sizeof(DECIMAL), false)
+DEFINE_NFT(NFT_INTERFACE, sizeof(IUnknown*), false)
+
+DEFINE_NFT(NFT_SAFEHANDLE, sizeof(LPVOID), false)
+DEFINE_NFT(NFT_CRITICALHANDLE, sizeof(LPVOID), false)
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_NFT(NFT_SAFEARRAY, 0, false)
+DEFINE_NFT(NFT_BSTR, sizeof(BSTR), false)
+DEFINE_NFT(NFT_HSTRING, sizeof(HSTRING), true)
+DEFINE_NFT(NFT_VARIANT, sizeof(VARIANT), false)
+DEFINE_NFT(NFT_VARIANTBOOL, sizeof(VARIANT_BOOL), false)
+DEFINE_NFT(NFT_CURRENCY, sizeof(CURRENCY), false)
+DEFINE_NFT(NFT_DATETIMEOFFSET, sizeof(INT64), true)
+DEFINE_NFT(NFT_SYSTEMTYPE, sizeof(TypeNameNative), true) // System.Type -> Windows.UI.Xaml.Interop.TypeName
+DEFINE_NFT(NFT_WINDOWSFOUNDATIONHRESULT, sizeof(int), true) // Windows.Foundation.HResult is marshaled to System.Exception.
+#endif // FEATURE_COMINTEROP
+
+DEFINE_NFT(NFT_ILLEGAL, 1, true)
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_NFT(NFT_WINDOWSFOUNDATIONIREFERENCE, sizeof(IUnknown*), true) // Windows.Foundation.IReference`1 is marshaled to System.Nullable`1.
+#endif // FEATURE_COMINTEROP
+
+// Append new entries only at the end of the enum to avoid phone versioning break.
diff --git a/src/vm/object.cpp b/src/vm/object.cpp
new file mode 100644
index 0000000000..9b576005d5
--- /dev/null
+++ b/src/vm/object.cpp
@@ -0,0 +1,3510 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// OBJECT.CPP
+//
+// Definitions of a Com+ Object
+//
+
+
+
+#include "common.h"
+
+#include "vars.hpp"
+#include "class.h"
+#include "object.h"
+#include "threads.h"
+#include "excep.h"
+#include "eeconfig.h"
+#include "gc.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "field.h"
+#include "gcscan.h"
+
+#ifdef FEATURE_COMPRESSEDSTACK
+void* CompressedStackObject::GetUnmanagedCompressedStack()
+{
+ LIMITED_METHOD_CONTRACT;
+ return ((m_compressedStackHandle != NULL)?m_compressedStackHandle->GetHandle():NULL);
+}
+#endif // FEATURE_COMPRESSEDSTACK
+
+#ifndef FEATURE_PAL
+LPVOID FrameSecurityDescriptorBaseObject::GetCallerToken()
+{
+ LIMITED_METHOD_CONTRACT;
+ return ((m_callerToken!= NULL)?m_callerToken->GetHandle():NULL);
+
+}
+
+LPVOID FrameSecurityDescriptorBaseObject::GetImpersonationToken()
+{
+ LIMITED_METHOD_CONTRACT;
+ return ((m_impToken != NULL)?m_impToken->GetHandle():NULL);
+}
+#endif
+
+SVAL_IMPL(INT32, ArrayBase, s_arrayBoundsZero);
+
+// follow the necessary rules to get a new valid hashcode for an object
+DWORD Object::ComputeHashCode()
+{
+ DWORD hashCode;
+
+ // note that this algorithm now uses at most HASHCODE_BITS so that it will
+ // fit into the objheader if the hashcode has to be moved back into the objheader
+ // such as for an object that is being frozen
+ do
+ {
+ // we use the high order bits in this case because they're more random
+ hashCode = GetThread()->GetNewHashCode() >> (32-HASHCODE_BITS);
+ }
+ while (hashCode == 0); // need to enforce hashCode != 0
+
+ // verify that it really fits into HASHCODE_BITS
+ _ASSERTE((hashCode & ((1<<HASHCODE_BITS)-1)) == hashCode);
+
+ return hashCode;
+}
+
+#ifndef DACCESS_COMPILE
+INT32 Object::GetHashCodeEx()
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END
+
+ // This loop exists because we're inspecting the header dword of the object
+ // and it may change under us because of races with other threads.
+ // On top of that, it may have the spin lock bit set, in which case we're
+ // not supposed to change it.
+ // In all of these case, we need to retry the operation.
+ DWORD iter = 0;
+ DWORD dwSwitchCount = 0;
+ while (true)
+ {
+ DWORD bits = GetHeader()->GetBits();
+
+ if (bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX)
+ {
+ if (bits & BIT_SBLK_IS_HASHCODE)
+ {
+ // Common case: the object already has a hash code
+ return bits & MASK_HASHCODE;
+ }
+ else
+ {
+ // We have a sync block index. This means if we already have a hash code,
+ // it is in the sync block, otherwise we generate a new one and store it there
+ SyncBlock *psb = GetSyncBlock();
+ DWORD hashCode = psb->GetHashCode();
+ if (hashCode != 0)
+ return hashCode;
+
+ hashCode = ComputeHashCode();
+
+ return psb->SetHashCode(hashCode);
+ }
+ }
+ else
+ {
+ // If a thread is holding the thin lock or an appdomain index is set, we need a syncblock
+ if ((bits & (SBLK_MASK_LOCK_THREADID | (SBLK_MASK_APPDOMAININDEX << SBLK_APPDOMAIN_SHIFT))) != 0)
+ {
+ GetSyncBlock();
+ // No need to replicate the above code dealing with sync blocks
+ // here - in the next iteration of the loop, we'll realize
+ // we have a syncblock, and we'll do the right thing.
+ }
+ else
+ {
+ // We want to change the header in this case, so we have to check the BIT_SBLK_SPIN_LOCK bit first
+ if (bits & BIT_SBLK_SPIN_LOCK)
+ {
+ iter++;
+ if ((iter % 1024) != 0 && g_SystemInfo.dwNumberOfProcessors > 1)
+ {
+ YieldProcessor(); // indicate to the processor that we are spining
+ }
+ else
+ {
+ __SwitchToThread(0, ++dwSwitchCount);
+ }
+ continue;
+ }
+
+ DWORD hashCode = ComputeHashCode();
+
+ DWORD newBits = bits | BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE | hashCode;
+
+ if (GetHeader()->SetBits(newBits, bits) == bits)
+ return hashCode;
+ // Header changed under us - let's restart this whole thing.
+ }
+ }
+ }
+}
+#endif // #ifndef DACCESS_COMPILE
+
+BOOL Object::ValidateObjectWithPossibleAV()
+{
+ CANNOT_HAVE_CONTRACT;
+ SUPPORTS_DAC;
+
+ return GetGCSafeMethodTable()->ValidateWithPossibleAV();
+}
+
+
+#ifndef DACCESS_COMPILE
+
+MethodTable *Object::GetTrueMethodTable()
+{
+ CONTRACT(MethodTable*)
+ {
+ MODE_COOPERATIVE;
+ GC_NOTRIGGER;
+ NOTHROW;
+ SO_TOLERANT;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ MethodTable *mt = GetMethodTable();
+
+#ifdef FEATURE_REMOTING
+ if(mt->IsTransparentProxy())
+ {
+ mt = ((TransparentProxyObject *)this)->GetMethodTableBeingProxied();
+ }
+ _ASSERTE(!mt->IsTransparentProxy());
+#endif
+
+ RETURN mt;
+}
+
+TypeHandle Object::GetTrueTypeHandle()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (m_pMethTab->IsArray())
+ return ((ArrayBase*) this)->GetTypeHandle();
+ else
+ return TypeHandle(GetTrueMethodTable());
+}
+
+// There are cases where it is not possible to get a type handle during a GC.
+// If we can get the type handle, this method will return it.
+// Otherwise, the method will return NULL.
+TypeHandle Object::GetGCSafeTypeHandleIfPossible() const
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ if(!IsGCThread()) { MODE_COOPERATIVE; }
+ }
+ CONTRACTL_END;
+
+ // Although getting the type handle is unsafe and could cause recursive type lookups
+ // in some cases, it's always safe and straightforward to get to the MethodTable.
+ MethodTable * pMT = GetGCSafeMethodTable();
+ _ASSERTE(pMT != NULL);
+
+ // Don't look at types that belong to an unloading AppDomain, or else
+ // pObj->GetGCSafeTypeHandle() can AV. For example, we encountered this AV when pObj
+ // was an array like this:
+ //
+ // MyValueType1<MyValueType2>[] myArray
+ //
+ // where MyValueType1<T> & MyValueType2 are defined in different assemblies. In such
+ // a case, looking up the type handle for myArray requires looking in
+ // MyValueType1<T>'s module's m_AssemblyRefByNameTable, which is garbage if its
+ // AppDomain is unloading.
+ //
+ // Another AV was encountered in a similar case,
+ //
+ // MyRefType1<MyRefType2>[] myArray
+ //
+ // where MyRefType2's module was unloaded by the time the GC occurred. In at least
+ // one case, the GC was caused by the AD unload itself (AppDomain::Unload ->
+ // AppDomain::Exit -> GCInterface::AddMemoryPressure -> WKS::GCHeap::GarbageCollect).
+ //
+ // To protect against all scenarios, verify that
+ //
+ // * The MT of the object is not getting unloaded, OR
+ // * In the case of arrays (potentially of arrays of arrays of arrays ...), the
+ // MT of the innermost element is not getting unloaded. This then ensures the
+ // MT of the original object (i.e., array) itself must not be getting
+ // unloaded either, since the MTs of arrays and of their elements are
+ // allocated on the same loader heap, except the case where the array is
+ // Object[], in which case its MT is in mscorlib and thus doesn't unload.
+
+ MethodTable * pMTToCheck = pMT;
+ if (pMTToCheck->IsArray())
+ {
+ TypeHandle thElem = static_cast<const ArrayBase * const>(this)->GetArrayElementTypeHandle();
+
+ // Ideally, we would just call thElem.GetLoaderModule() here. Unfortunately, the
+ // current TypeDesc::GetLoaderModule() implementation depends on data structures
+ // that might have been unloaded already. So we just simulate
+ // TypeDesc::GetLoaderModule() for the limited array case that we care about. In
+ // case we're dealing with an array of arrays of arrays etc. traverse until we
+ // find the deepest element, and that's the type we'll check
+ while (thElem.HasTypeParam())
+ {
+ thElem = thElem.GetTypeParam();
+ }
+
+ pMTToCheck = thElem.GetMethodTable();
+ }
+
+ Module * pLoaderModule = pMTToCheck->GetLoaderModule();
+
+ BaseDomain * pBaseDomain = pLoaderModule->GetDomain();
+ if ((pBaseDomain != NULL) &&
+ (pBaseDomain->IsAppDomain()) &&
+ (pBaseDomain->AsAppDomain()->IsUnloading()))
+ {
+ return NULL;
+ }
+
+ // Don't look up types that are unloading due to Collectible Assemblies. Haven't been
+ // able to find a case where we actually encounter objects like this that can cause
+ // problems; however, it seems prudent to add this protection just in case.
+ LoaderAllocator * pLoaderAllocator = pLoaderModule->GetLoaderAllocator();
+ _ASSERTE(pLoaderAllocator != NULL);
+ if ((pLoaderAllocator->IsCollectible()) &&
+ (ObjectHandleIsNull(pLoaderAllocator->GetLoaderAllocatorObjectHandle())))
+ {
+ return NULL;
+ }
+
+ // Ok, it should now be safe to get the type handle
+ return GetGCSafeTypeHandle();
+}
+
+/* static */ BOOL Object::SupportsInterface(OBJECTREF pObj, MethodTable* pInterfaceMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pInterfaceMT));
+ PRECONDITION(pObj->GetTrueMethodTable()->IsRestored_NoLogging());
+ PRECONDITION(pInterfaceMT->IsInterface());
+ }
+ CONTRACTL_END
+
+ BOOL bSupportsItf = FALSE;
+
+ GCPROTECT_BEGIN(pObj)
+ {
+ // Make sure the interface method table has been restored.
+ pInterfaceMT->CheckRestore();
+
+ // Check to see if the static class definition indicates we implement the interface.
+ MethodTable * pMT = pObj->GetTrueMethodTable();
+ if (pMT->CanCastToInterface(pInterfaceMT))
+ {
+ bSupportsItf = TRUE;
+ }
+#ifdef FEATURE_COMINTEROP
+ else
+ if (pMT->IsComObjectType())
+ {
+ // If this is a COM object, the static class definition might not be complete so we need
+ // to check if the COM object implements the interface.
+ bSupportsItf = ComObject::SupportsInterface(pObj, pInterfaceMT);
+ }
+#endif // FEATURE_COMINTEROP
+ }
+ GCPROTECT_END();
+
+ return bSupportsItf;
+}
+
+Assembly *AssemblyBaseObject::GetAssembly()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pAssembly->GetAssembly();
+}
+
+#ifdef _DEBUG
+// Object::DEBUG_SetAppDomain specified DEBUG_ONLY in the contract to disable SO-tolerance
+// checking for paths that are DEBUG-only.
+//
+// NOTE: currently this is only used by WIN64 allocation helpers, but they really should
+// be calling the JIT helper SetObjectAppDomain (which currently only exists for
+// x86).
+void Object::DEBUG_SetAppDomain(AppDomain *pDomain)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ DEBUG_ONLY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(CheckPointer(pDomain));
+ }
+ CONTRACTL_END;
+
+ /*_ASSERTE(GetThread()->IsSOTolerant());*/
+ SetAppDomain(pDomain);
+}
+#endif
+
+void Object::SetAppDomain(AppDomain *pDomain)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_INTOLERANT;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(CheckPointer(pDomain));
+ }
+ CONTRACTL_END;
+
+#ifndef _DEBUG
+ if (!GetMethodTable()->IsDomainNeutral())
+ {
+ //
+ // If we have a per-app-domain method table, we can
+ // infer the app domain from the method table, so
+ // there is no reason to mark the object.
+ //
+ // But we don't do this in a debug build, because
+ // we want to be able to detect the case when the
+ // domain was unloaded from underneath an object (and
+ // the MethodTable will be toast in that case.)
+ //
+
+ _ASSERTE(pDomain == GetMethodTable()->GetDomain());
+ }
+ else
+#endif
+ {
+ ADIndex index = pDomain->GetIndex();
+ GetHeader()->SetAppDomainIndex(index);
+ }
+
+ _ASSERTE(GetHeader()->GetAppDomainIndex().m_dwIndex != 0);
+}
+
+
+AppDomain *Object::GetAppDomain()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+#ifndef _DEBUG
+ if (!GetMethodTable()->IsDomainNeutral())
+ return (AppDomain*) GetMethodTable()->GetDomain();
+#endif
+
+ ADIndex index = GetHeader()->GetAppDomainIndex();
+
+ if (index.m_dwIndex == 0)
+ return NULL;
+
+ AppDomain *pDomain = SystemDomain::TestGetAppDomainAtIndex(index);
+
+#if CHECK_APP_DOMAIN_LEAKS
+ if (! g_pConfig->AppDomainLeaks())
+ return pDomain;
+
+ if (IsAppDomainAgile())
+ return NULL;
+
+ //
+ // If an object has an index of an unloaded domain (its ok to be of a
+ // domain where an unload is in progress through), go ahead
+ // and make it agile. If this fails, we have an invalid reference
+ // to an unloaded domain. If it succeeds, the object is no longer
+ // contained in that app domain so we can continue.
+ //
+
+ if (pDomain == NULL)
+ {
+ if (SystemDomain::IndexOfAppDomainBeingUnloaded() == index) {
+ // if appdomain is unloading but still alive and is valid to have instances
+ // in that domain, then use it.
+ AppDomain *tmpDomain = SystemDomain::AppDomainBeingUnloaded();
+ if (tmpDomain && tmpDomain->ShouldHaveInstances())
+ pDomain = tmpDomain;
+ }
+ if (!pDomain && ! TrySetAppDomainAgile(FALSE))
+ {
+ _ASSERTE(!"Attempt to reference an object belonging to an unloaded domain");
+ }
+ }
+#endif
+
+ return pDomain;
+}
+
+STRINGREF AllocateString(SString sstr)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ COUNT_T length = sstr.GetCount(); // count of WCHARs excluding terminating NULL
+ STRINGREF strObj = AllocateString(length);
+ memcpyNoGCRefs(strObj->GetBuffer(), sstr.GetUnicode(), length*sizeof(WCHAR));
+
+ return strObj;
+}
+
+CHARARRAYREF AllocateCharArray(DWORD dwArrayLength)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ return (CHARARRAYREF)AllocatePrimitiveArray(ELEMENT_TYPE_CHAR, dwArrayLength);
+}
+
+#if CHECK_APP_DOMAIN_LEAKS
+
+BOOL Object::IsAppDomainAgile()
+{
+ WRAPPER_NO_CONTRACT;
+ DEBUG_ONLY_FUNCTION;
+
+ SyncBlock *psb = PassiveGetSyncBlock();
+
+ if (psb)
+ {
+ if (psb->IsAppDomainAgile())
+ return TRUE;
+ if (psb->IsCheckedForAppDomainAgile())
+ return FALSE;
+ }
+ return CheckAppDomain(NULL);
+}
+
+BOOL Object::TrySetAppDomainAgile(BOOL raiseAssert)
+{
+ LIMITED_METHOD_CONTRACT;
+ FAULT_NOT_FATAL();
+ DEBUG_ONLY_FUNCTION;
+
+ BOOL ret = TRUE;
+
+ EX_TRY
+ {
+ ret = SetAppDomainAgile(raiseAssert);
+ }
+ EX_CATCH{}
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return ret;
+}
+
+
+BOOL Object::ShouldCheckAppDomainAgile (BOOL raiseAssert, BOOL *pfResult)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ DEBUG_ONLY_FUNCTION;
+
+ if (!g_pConfig->AppDomainLeaks())
+ {
+ *pfResult = TRUE;
+ return FALSE;
+ }
+
+ if (this == NULL)
+ {
+ *pfResult = TRUE;
+ return FALSE;
+ }
+
+ if (IsAppDomainAgile())
+ {
+ *pfResult = TRUE;
+ return FALSE;
+ }
+
+ // if it's not agile and we've already checked it, just bail early
+ if (IsCheckedForAppDomainAgile())
+ {
+ *pfResult = FALSE;
+ return FALSE;
+ }
+
+ if (IsTypeNeverAppDomainAgile())
+ {
+ if (raiseAssert)
+ _ASSERTE(!"Attempt to reference a domain bound object from an agile location");
+ *pfResult = FALSE;
+ return FALSE;
+ }
+
+ //
+ // Do not allow any object to be set to be agile unless we
+ // are compiling field access checking into the class. This
+ // will help guard against unintentional "agile" propagation
+ // as well.
+ //
+
+ if (!IsTypeAppDomainAgile() && !IsTypeCheckAppDomainAgile())
+ {
+ if (raiseAssert)
+ _ASSERTE(!"Attempt to reference a domain bound object from an agile location");
+ *pfResult = FALSE;
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+
+BOOL Object::SetAppDomainAgile(BOOL raiseAssert, SetAppDomainAgilePendingTable *pTable)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM(););
+ DEBUG_ONLY;
+ }
+ CONTRACTL_END;
+ BEGIN_DEBUG_ONLY_CODE;
+ BOOL fResult;
+ if (!this->ShouldCheckAppDomainAgile(raiseAssert, &fResult))
+ return fResult;
+
+ //
+ // If a SetAppDomainAgilePendingTable is provided, then SetAppDomainAgile
+ // was called via SetAppDomainAgile. Simply store this object in the
+ // table, and let the calling SetAppDomainAgile process it later in a
+ // non-recursive manner.
+ //
+
+ if (pTable == NULL)
+ {
+ pTable = (SetAppDomainAgilePendingTable *)ClrFlsGetValue(TlsIdx_AppDomainAgilePendingTable);
+ }
+ if (pTable)
+ {
+ //
+ // If the object is already being checked (on this thread or another),
+ // don't duplicate the effort. Return TRUE to tell the caller to
+ // continue processing other references. Since we're just testing
+ // the bit we don't need to take the spin lock.
+ //
+
+ ObjHeader* pOh = this->GetHeader();
+ _ASSERTE(pOh);
+
+ if (pOh->GetBits() & BIT_SBLK_AGILE_IN_PROGRESS)
+ {
+ return TRUE;
+ }
+
+ pTable->PushReference(this);
+ }
+ else
+ {
+ //
+ // Initialize the table of pending objects
+ //
+
+ SetAppDomainAgilePendingTable table;
+ class ResetPendingTable
+ {
+ public:
+ ResetPendingTable(SetAppDomainAgilePendingTable *pTable)
+ {
+ ClrFlsSetValue(TlsIdx_AppDomainAgilePendingTable, pTable);
+ }
+ ~ResetPendingTable()
+ {
+ ClrFlsSetValue(TlsIdx_AppDomainAgilePendingTable, NULL);
+ }
+ };
+
+ ResetPendingTable resetPendingTable(&table);
+
+ //
+ // Iterate over the table, processing all referenced objects until the
+ // entire graph has its sync block marked, or a non-agile object is
+ // found. The loop will start with the current object, as though we
+ // just removed it from the table as a pending reference.
+ //
+
+ Object *pObject = this;
+
+ do
+ {
+ //
+ // Mark the object to identify recursion.
+ // ~SetAppDomainAgilePendingTable will clean up
+ // BIT_SBLK_AGILE_IN_PROGRESS, so attempt to push the object first
+ // in case it needs to throw an exception.
+ //
+
+ table.PushParent(pObject);
+
+ ObjHeader* pOh = pObject->GetHeader();
+ _ASSERTE(pOh);
+
+ bool fInProgress = false;
+
+ {
+ ENTER_SPIN_LOCK(pOh);
+ {
+ if (pOh->GetBits() & BIT_SBLK_AGILE_IN_PROGRESS)
+ {
+ fInProgress = true;
+ }
+ else
+ {
+ pOh->SetBit(BIT_SBLK_AGILE_IN_PROGRESS);
+ }
+ }
+ LEAVE_SPIN_LOCK(pOh);
+ }
+
+ if (fInProgress)
+ {
+ //
+ // Object is already being processed, so just remove it from
+ // the table and look for another object.
+ //
+
+ bool fReturnedToParent = false;
+ Object *pLastObject = table.GetPendingObject(&fReturnedToParent);
+ CONSISTENCY_CHECK(pLastObject == pObject && fReturnedToParent);
+ }
+ else
+ {
+
+ //
+ // Finish processing this object. Any references will be added to
+ // the table.
+ //
+
+ if (!pObject->SetAppDomainAgileWorker(raiseAssert, &table))
+ return FALSE;
+ }
+
+ //
+ // Find the next object to explore.
+ //
+
+ for (;;)
+ {
+ bool fReturnedToParent;
+ pObject = table.GetPendingObject(&fReturnedToParent);
+
+ //
+ // No more objects in the table?
+ //
+
+ if (!pObject)
+ break;
+
+ //
+ // If we've processed all objects reachable through an object,
+ // then clear BIT_SBLK_AGILE_IN_PROGRESS, and look for another
+ // object in the table.
+ //
+
+ if (fReturnedToParent)
+ {
+ pOh = pObject->GetHeader();
+ _ASSERTE(pOh);
+
+ ENTER_SPIN_LOCK(pOh);
+ pOh->ClrBit(BIT_SBLK_AGILE_IN_PROGRESS);
+ LEAVE_SPIN_LOCK(pOh);
+ }
+ else
+ {
+ //
+ // Re-check whether we should explore through this reference.
+ //
+
+ if (pObject->ShouldCheckAppDomainAgile(raiseAssert, &fResult))
+ break;
+
+ if (!fResult)
+ return FALSE;
+ }
+ }
+ }
+ while (pObject);
+ }
+ END_DEBUG_ONLY_CODE;
+ return TRUE;
+}
+
+
+BOOL Object::SetAppDomainAgileWorker(BOOL raiseAssert, SetAppDomainAgilePendingTable *pTable)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ DEBUG_ONLY_FUNCTION;
+
+ BOOL ret = TRUE;
+
+ if (! IsTypeAppDomainAgile() && ! SetFieldsAgile(raiseAssert, pTable))
+ {
+ SetIsCheckedForAppDomainAgile();
+
+ ret = FALSE;
+ }
+
+ if (ret)
+ {
+ SetSyncBlockAppDomainAgile();
+ }
+
+ return ret;
+}
+
+
+SetAppDomainAgilePendingTable::SetAppDomainAgilePendingTable ()
+ : m_Stack(sizeof(PendingEntry))
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ DEBUG_ONLY_FUNCTION;
+}
+
+
+SetAppDomainAgilePendingTable::~SetAppDomainAgilePendingTable ()
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ DEBUG_ONLY_FUNCTION;
+
+ while (TRUE)
+ {
+ Object *pObj;
+ bool fObjMarked;
+ pObj = GetPendingObject(&fObjMarked);
+ if (pObj == NULL)
+ {
+ break;
+ }
+
+ if (fObjMarked)
+ {
+ ObjHeader* pOh = pObj->GetHeader();
+ _ASSERTE(pOh);
+
+ ENTER_SPIN_LOCK(pOh);
+ pOh->ClrBit(BIT_SBLK_AGILE_IN_PROGRESS);
+ LEAVE_SPIN_LOCK(pOh);
+ }
+}
+}
+
+
+void Object::SetSyncBlockAppDomainAgile()
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ DEBUG_ONLY_FUNCTION;
+
+ SyncBlock *psb = PassiveGetSyncBlock();
+ if (! psb)
+ {
+ psb = GetSyncBlock();
+ }
+ psb->SetIsAppDomainAgile();
+}
+
+#if CHECK_APP_DOMAIN_LEAKS
+BOOL Object::CheckAppDomain(AppDomain *pAppDomain)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ DEBUG_ONLY_FUNCTION;
+
+ if (!g_pConfig->AppDomainLeaks())
+ return TRUE;
+
+ if (this == NULL)
+ return TRUE;
+
+ if (IsAppDomainAgileRaw())
+ return TRUE;
+
+#ifndef _DEBUG
+ MethodTable *pMT = GetGCSafeMethodTable();
+
+ if (!pMT->IsDomainNeutral())
+ return pAppDomain == pMT->GetDomain();
+#endif
+
+ ADIndex index = GetHeader()->GetAppDomainIndex();
+
+ _ASSERTE(index.m_dwIndex != 0);
+
+ return (pAppDomain != NULL && index == pAppDomain->GetIndex());
+}
+#endif
+
+BOOL Object::IsTypeAppDomainAgile()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ DEBUG_ONLY_FUNCTION;
+
+ MethodTable *pMT = GetGCSafeMethodTable();
+
+ if (pMT->IsArray())
+ {
+ TypeHandle th = pMT->GetApproxArrayElementTypeHandle();
+ return th.IsArrayOfElementsAppDomainAgile();
+ }
+ else
+ return pMT->GetClass()->IsAppDomainAgile();
+}
+
+BOOL Object::IsTypeCheckAppDomainAgile()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ DEBUG_ONLY_FUNCTION;
+
+ MethodTable *pMT = GetGCSafeMethodTable();
+
+ if (pMT->IsArray())
+ {
+ TypeHandle th = pMT->GetApproxArrayElementTypeHandle();
+ return th.IsArrayOfElementsCheckAppDomainAgile();
+ }
+ else
+ return pMT->GetClass()->IsCheckAppDomainAgile();
+}
+
+BOOL Object::IsTypeNeverAppDomainAgile()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ DEBUG_ONLY_FUNCTION;
+
+ return !IsTypeAppDomainAgile() && !IsTypeCheckAppDomainAgile();
+}
+
+BOOL Object::IsTypeTypesafeAppDomainAgile()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ DEBUG_ONLY_FUNCTION;
+
+ return IsTypeAppDomainAgile() && !IsTypeCheckAppDomainAgile();
+}
+
+BOOL Object::TryAssignAppDomain(AppDomain *pAppDomain, BOOL raiseAssert)
+{
+ LIMITED_METHOD_CONTRACT;
+ FAULT_NOT_FATAL();
+ DEBUG_ONLY_FUNCTION;
+
+ BOOL ret = TRUE;
+
+ EX_TRY
+ {
+ ret = AssignAppDomain(pAppDomain,raiseAssert);
+ }
+ EX_CATCH{}
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return ret;
+}
+
+BOOL Object::AssignAppDomain(AppDomain *pAppDomain, BOOL raiseAssert)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ DEBUG_ONLY_FUNCTION;
+
+ if (!g_pConfig->AppDomainLeaks())
+ return TRUE;
+
+ if (CheckAppDomain(pAppDomain))
+ return TRUE;
+
+ //
+ // App domain does not match; try to make this object agile
+ //
+
+ if (IsTypeNeverAppDomainAgile())
+ {
+ if (raiseAssert)
+ {
+ if (pAppDomain == NULL)
+ _ASSERTE(!"Attempt to reference a domain bound object from an agile location");
+ else
+ _ASSERTE(!"Attempt to reference a domain bound object from a different domain");
+ }
+ return FALSE;
+ }
+ else
+ {
+ //
+ // Make object agile
+ //
+
+ if (! IsTypeAppDomainAgile() && ! SetFieldsAgile(raiseAssert))
+ {
+ SetIsCheckedForAppDomainAgile();
+ return FALSE;
+ }
+
+ SetSyncBlockAppDomainAgile();
+
+ return TRUE;
+ }
+}
+
+BOOL Object::AssignValueTypeAppDomain(MethodTable *pMT, void *base, AppDomain *pAppDomain, BOOL raiseAssert)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ DEBUG_ONLY_FUNCTION;
+
+ if (!g_pConfig->AppDomainLeaks())
+ return TRUE;
+
+ if (pMT->GetClass()->IsAppDomainAgile())
+ return TRUE;
+
+ if (pAppDomain == NULL)
+ {
+ //
+ // Do not allow any object to be set to be agile unless we
+ // are compiling field access checking into the class. This
+ // will help guard against unintentional "agile" propagation
+ // as well.
+ //
+
+ if (pMT->GetClass()->IsNeverAppDomainAgile())
+ {
+ _ASSERTE(!"Attempt to reference a domain bound object from an agile location");
+ return FALSE;
+ }
+
+ return SetClassFieldsAgile(pMT, base, TRUE/*=baseIsVT*/, raiseAssert);
+ }
+ else
+ {
+ return ValidateClassFields(pMT, base, TRUE/*=baseIsVT*/, pAppDomain, raiseAssert);
+ }
+}
+
+BOOL Object::SetFieldsAgile(BOOL raiseAssert, SetAppDomainAgilePendingTable *pTable)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM(););
+ DEBUG_ONLY;
+ }
+ CONTRACTL_END;
+
+ BOOL result = TRUE;
+
+ MethodTable *pMT= GetGCSafeMethodTable();
+
+ if (pMT->IsArray())
+ {
+ switch (pMT->GetArrayElementType())
+ {
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_ARRAY:
+ case ELEMENT_TYPE_SZARRAY:
+ {
+ PtrArray *pArray = (PtrArray *) this;
+
+ DWORD n = pArray->GetNumComponents();
+ OBJECTREF *p = (OBJECTREF *)
+ (((BYTE*)pArray) + ArrayBase::GetDataPtrOffset(GetGCSafeMethodTable()));
+
+ for (DWORD i=0; i<n; i++)
+ {
+ if (!p[i]->SetAppDomainAgile(raiseAssert, pTable))
+ result = FALSE;
+ }
+
+ break;
+ }
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ ArrayBase *pArray = (ArrayBase *) this;
+
+ MethodTable *pElemMT = pMT->GetApproxArrayElementTypeHandle().GetMethodTable();
+
+ BYTE *p = ((BYTE*)pArray) + ArrayBase::GetDataPtrOffset(GetGCSafeMethodTable());
+ SIZE_T size = pArray->GetComponentSize();
+ SIZE_T n = pArray->GetNumComponents();
+
+ for (SIZE_T i=0; i<n; i++)
+ if (!SetClassFieldsAgile(pElemMT, p + i*size, TRUE/*=baseIsVT*/, raiseAssert, pTable))
+ result = FALSE;
+
+ break;
+ }
+
+ default:
+ _ASSERTE(!"Unexpected array type");
+ }
+ }
+ else
+ {
+ if (pMT->GetClass()->IsNeverAppDomainAgile())
+ {
+ _ASSERTE(!"Attempt to reference a domain bound object from an agile location");
+ return FALSE;
+ }
+
+ while (pMT != NULL && !pMT->GetClass()->IsTypesafeAppDomainAgile())
+ {
+ if (!SetClassFieldsAgile(pMT, this, FALSE/*=baseIsVT*/, raiseAssert, pTable))
+ result = FALSE;
+
+ pMT = pMT->GetParentMethodTable();
+
+ if (pMT->GetClass()->IsNeverAppDomainAgile())
+ {
+ _ASSERTE(!"Attempt to reference a domain bound object from an agile location");
+ return FALSE;
+ }
+ }
+ }
+
+ return result;
+}
+
+BOOL Object::SetClassFieldsAgile(MethodTable *pMT, void *base, BOOL baseIsVT, BOOL raiseAssert, SetAppDomainAgilePendingTable *pTable)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ BOOL result = TRUE;
+
+ if (pMT->GetClass()->IsNeverAppDomainAgile())
+ {
+ _ASSERTE(!"Attempt to reference a domain bound object from an agile location");
+ return FALSE;
+ }
+
+ // This type approximation is OK since we are only checking some layout information
+ // and all compatible instantiations share the same GC characteristics
+ ApproxFieldDescIterator fdIterator(pMT, ApproxFieldDescIterator::INSTANCE_FIELDS);
+ FieldDesc* pField;
+
+ while ((pField = fdIterator.Next()) != NULL)
+ {
+ if (pField->IsDangerousAppDomainAgileField())
+ {
+ if (pField->GetFieldType() == ELEMENT_TYPE_CLASS)
+ {
+ OBJECTREF ref;
+
+ if (baseIsVT)
+ ref = *(OBJECTREF*) pField->GetAddressNoThrowNoGC(base);
+ else
+ ref = *(OBJECTREF*) pField->GetAddressGuaranteedInHeap(base);
+
+ if (ref != 0 && !ref->IsAppDomainAgile())
+ {
+ if (!ref->SetAppDomainAgile(raiseAssert, pTable))
+ result = FALSE;
+ }
+ }
+ else if (pField->GetFieldType() == ELEMENT_TYPE_VALUETYPE)
+ {
+ // Be careful here - we may not have loaded a value
+ // type field of a class under prejit, and we don't
+ // want to trigger class loading here.
+
+ TypeHandle th = pField->LookupFieldTypeHandle();
+ if (!th.IsNull())
+ {
+ void *nestedBase;
+
+ if (baseIsVT)
+ nestedBase = pField->GetAddressNoThrowNoGC(base);
+ else
+ nestedBase = pField->GetAddressGuaranteedInHeap(base);
+
+ if (!SetClassFieldsAgile(th.GetMethodTable(),
+ nestedBase,
+ TRUE/*=baseIsVT*/,
+ raiseAssert,
+ pTable))
+ {
+ result = FALSE;
+ }
+ }
+ }
+ else
+ {
+ _ASSERTE(!"Bad field type");
+ }
+ }
+ }
+
+ return result;
+}
+
+BOOL Object::ValidateAppDomain(AppDomain *pAppDomain)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+
+ if (!g_pConfig->AppDomainLeaks())
+ return TRUE;
+
+ if (this == NULL)
+ return TRUE;
+
+ if (CheckAppDomain())
+ return ValidateAppDomainFields(pAppDomain);
+
+ return AssignAppDomain(pAppDomain);
+}
+
+BOOL Object::ValidateAppDomainFields(AppDomain *pAppDomain)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ BOOL result = TRUE;
+
+ MethodTable *pMT = GetGCSafeMethodTable();
+
+ while (pMT != NULL && !pMT->GetClass()->IsTypesafeAppDomainAgile())
+ {
+ if (!ValidateClassFields(pMT, this, FALSE/*=baseIsVT*/, pAppDomain))
+ result = FALSE;
+
+ pMT = pMT->GetParentMethodTable();
+ }
+
+ return result;
+}
+
+BOOL Object::ValidateValueTypeAppDomain(MethodTable *pMT, void *base, AppDomain *pAppDomain, BOOL raiseAssert)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ if (!g_pConfig->AppDomainLeaks())
+ return TRUE;
+
+ if (pAppDomain == NULL)
+ {
+ if (pMT->GetClass()->IsTypesafeAppDomainAgile())
+ return TRUE;
+ else if (pMT->GetClass()->IsNeverAppDomainAgile())
+ {
+ if (raiseAssert)
+ _ASSERTE(!"Value type cannot be app domain agile");
+ return FALSE;
+ }
+ }
+
+ return ValidateClassFields(pMT, base, TRUE/*=baseIsVT*/, pAppDomain, raiseAssert);
+}
+
+BOOL Object::ValidateClassFields(MethodTable *pMT, void *base, BOOL baseIsVT, AppDomain *pAppDomain, BOOL raiseAssert)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ BOOL result = TRUE;
+
+ // This type approximation is OK since we are only checking some layout information
+ // and all compatible instantiations share the same GC characteristics
+ ApproxFieldDescIterator fdIterator(pMT, ApproxFieldDescIterator::INSTANCE_FIELDS);
+ FieldDesc* pField;
+
+ while ((pField = fdIterator.Next()) != NULL)
+ {
+ if (!pMT->GetClass()->IsCheckAppDomainAgile()
+ || pField->IsDangerousAppDomainAgileField())
+ {
+ if (pField->GetFieldType() == ELEMENT_TYPE_CLASS)
+ {
+ OBJECTREF ref;
+
+ if (baseIsVT)
+ ref = ObjectToOBJECTREF(*(Object**) pField->GetAddressNoThrowNoGC(base));
+ else
+ ref = ObjectToOBJECTREF(*(Object**) pField->GetAddressGuaranteedInHeap(base));
+
+ if (ref != 0 && !ref->AssignAppDomain(pAppDomain, raiseAssert))
+ result = FALSE;
+ }
+ else if (pField->GetFieldType() == ELEMENT_TYPE_VALUETYPE)
+ {
+ // Be careful here - we may not have loaded a value
+ // type field of a class under prejit, and we don't
+ // want to trigger class loading here.
+
+ TypeHandle th = pField->LookupFieldTypeHandle();
+ if (!th.IsNull())
+ {
+ void *nestedBase;
+
+ if (baseIsVT)
+ nestedBase = pField->GetAddressNoThrowNoGC(base);
+ else
+ nestedBase = pField->GetAddressGuaranteedInHeap(base);
+
+ if (!ValidateValueTypeAppDomain(th.GetMethodTable(),
+ nestedBase,
+ pAppDomain,
+ raiseAssert
+ ))
+ result = FALSE;
+
+ }
+ }
+ }
+ }
+
+ return result;
+}
+
+#endif // CHECK_APP_DOMAIN_LEAKS
+
+void Object::ValidatePromote(ScanContext *sc, DWORD flags)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+
+#if defined (VERIFY_HEAP)
+ Validate();
+#endif
+
+#if CHECK_APP_DOMAIN_LEAKS
+ // Do app domain integrity checking here
+ if (g_pConfig->AppDomainLeaks())
+ {
+ AppDomain *pDomain = GetAppDomain();
+
+// This assert will incorrectly trip when
+// InternalCrossContextCallback is on the stack. InternalCrossContextCallback
+// intentionally passes an object across domains on the same thread.
+#if 0
+ if (flags & GC_CALL_CHECK_APP_DOMAIN)
+ _ASSERTE(TryAssignAppDomain(sc->pCurrentDomain));
+#endif
+
+ if ((flags & GC_CALL_CHECK_APP_DOMAIN)
+ && pDomain != NULL
+ && !pDomain->ShouldHaveRoots()
+ && !TrySetAppDomainAgile(FALSE))
+ {
+ _ASSERTE(!"Found GC object which should have been purged during app domain unload.");
+ }
+ }
+#endif
+}
+
+void Object::ValidateHeap(Object *from, BOOL bDeep)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+#if defined (VERIFY_HEAP)
+ //no need to verify next object's header in this case
+ //since this is called in verify_heap, which will verfiy every object anyway
+ Validate(bDeep, FALSE);
+#endif
+
+#if CHECK_APP_DOMAIN_LEAKS
+ // Do app domain integrity checking here
+ if (g_pConfig->AppDomainLeaks() && bDeep)
+ {
+ AppDomain *pDomain = from->GetAppDomain();
+
+ //
+ // Don't perform check if we're checking for agility, and the containing type is not
+ // marked checked agile - this will cover "proxy" type agility
+ // where cross references are allowed
+ //
+
+ // Changed the GetMethodTable calls in this function to GetGCSafeMethodTable
+ // because GC could use the mark bit to simulate a mark and can have it set during
+ // verify heap (and would be cleared when verify heap is done).
+ // We'd get AV pretty soon anyway if it was truly mistakenly set.
+ if (pDomain != NULL || from->GetGCSafeMethodTable()->GetClass()->IsCheckAppDomainAgile())
+ {
+ //special case:thread object is allowed to hold a context belonging to current domain
+ if (from->GetGCSafeMethodTable() == g_pThreadClass &&
+ (
+#ifdef FEATURE_REMOTING
+ this == OBJECTREFToObject(((ThreadBaseObject *)from)->m_ExposedContext) ||
+#endif
+#ifndef FEATURE_CORECLR
+ this == OBJECTREFToObject(((ThreadBaseObject *)from)->m_ExecutionContext) ||
+#endif
+ false))
+ {
+ if (((ThreadBaseObject *)from)->m_InternalThread)
+ _ASSERTE (CheckAppDomain (((ThreadBaseObject *)from)->m_InternalThread->GetDomain ()));
+ }
+ // special case: Overlapped has a field OverlappedData which may be moved to default domain
+ // during AD unload
+ else if (GetGCSafeMethodTable() == g_pOverlappedDataClass &&
+ GetAppDomainIndex() == SystemDomain::System()->DefaultDomain()->GetIndex())
+ {
+ }
+ else
+ {
+ TryAssignAppDomain(pDomain);
+ }
+ }
+
+ if (pDomain != NULL
+ && !pDomain->ShouldHaveInstances()
+ && !TrySetAppDomainAgile(FALSE))
+ _ASSERTE(!"Found GC object which should have been purged during app domain unload.");
+ }
+#endif
+}
+
+void Object::SetOffsetObjectRef(DWORD dwOffset, size_t dwValue)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ OBJECTREF* location;
+ OBJECTREF o;
+
+ location = (OBJECTREF *) &GetData()[dwOffset];
+ o = ObjectToOBJECTREF(*(Object **) &dwValue);
+
+ SetObjectReference( location, o, GetAppDomain() );
+}
+
+/******************************************************************/
+/*
+ * Write Barrier Helper
+ *
+ * Use this function to assign an object reference into
+ * another object.
+ *
+ * It will set the appropriate GC Write Barrier data
+ */
+
+#if CHECK_APP_DOMAIN_LEAKS
+void SetObjectReferenceChecked(OBJECTREF *dst,OBJECTREF ref,AppDomain *pAppDomain)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_CANNOT_TAKE_LOCK;
+
+ DEBUG_ONLY_FUNCTION;
+
+ ref->TryAssignAppDomain(pAppDomain);
+ return SetObjectReferenceUnchecked(dst,ref);
+}
+#endif
+
+void SetObjectReferenceUnchecked(OBJECTREF *dst,OBJECTREF ref)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_CANNOT_TAKE_LOCK;
+
+ // Assign value. We use casting to avoid going thru the overloaded
+ // OBJECTREF= operator which in this case would trigger a false
+ // write-barrier violation assert.
+ VolatileStore((Object**)dst, OBJECTREFToObject(ref));
+#ifdef _DEBUG
+ Thread::ObjectRefAssign(dst);
+#endif
+ ErectWriteBarrier(dst, ref);
+}
+
+/******************************************************************/
+ // copies src to dest worrying about write barriers.
+ // Note that it can work on normal objects (but not arrays)
+ // if dest, points just after the VTABLE.
+#if CHECK_APP_DOMAIN_LEAKS
+void CopyValueClassChecked(void* dest, void* src, MethodTable *pMT, AppDomain *pDomain)
+{
+ STATIC_CONTRACT_DEBUG_ONLY;
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ DEBUG_ONLY_FUNCTION;
+
+ FAULT_NOT_FATAL();
+ EX_TRY
+ {
+ Object::AssignValueTypeAppDomain(pMT, src, pDomain);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ CopyValueClassUnchecked(dest,src,pMT);
+}
+#endif
+
+void STDCALL CopyValueClassUnchecked(void* dest, void* src, MethodTable *pMT)
+{
+
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ _ASSERTE(!pMT->IsArray()); // bunch of assumptions about arrays wrong.
+
+ // Copy the bulk of the data, and any non-GC refs.
+ switch (pMT->GetNumInstanceFieldBytes())
+ {
+ case 1:
+ VolatileStore((UINT8*)dest, *(UINT8*)src);
+ break;
+#ifndef ALIGN_ACCESS
+ // we can hit an alignment fault if the value type has multiple
+ // smaller fields. Example: if there are two I4 fields, the
+ // value class can be aligned to 4-byte boundaries, yet the
+ // NumInstanceFieldBytes is 8
+ case 2:
+ VolatileStore((UINT16*)dest, *(UINT16*)src);
+ break;
+ case 4:
+ VolatileStore((UINT32*)dest, *(UINT32*)src);
+ break;
+ case 8:
+ VolatileStore((UINT64*)dest, *(UINT64*)src);
+ break;
+#endif // !ALIGN_ACCESS
+ default:
+ memcpyNoGCRefs(dest, src, pMT->GetNumInstanceFieldBytes());
+ break;
+ }
+
+ // Tell the GC about any copies.
+ if (pMT->ContainsPointers())
+ {
+ CGCDesc* map = CGCDesc::GetCGCDescFromMT(pMT);
+ CGCDescSeries* cur = map->GetHighestSeries();
+ CGCDescSeries* last = map->GetLowestSeries();
+ DWORD size = pMT->GetBaseSize();
+ _ASSERTE(cur >= last);
+ do
+ {
+ // offset to embedded references in this series must be
+ // adjusted by the VTable pointer, when in the unboxed state.
+ size_t offset = cur->GetSeriesOffset() - sizeof(void*);
+ OBJECTREF* srcPtr = (OBJECTREF*)(((BYTE*) src) + offset);
+ OBJECTREF* destPtr = (OBJECTREF*)(((BYTE*) dest) + offset);
+ OBJECTREF* srcPtrStop = (OBJECTREF*)((BYTE*) srcPtr + cur->GetSeriesSize() + size);
+ while (srcPtr < srcPtrStop)
+ {
+ SetObjectReferenceUnchecked(destPtr, ObjectToOBJECTREF(*(Object**)srcPtr));
+ srcPtr++;
+ destPtr++;
+ }
+ cur--;
+ } while (cur >= last);
+ }
+}
+
+#if defined (VERIFY_HEAP)
+
+#include "dbginterface.h"
+
+ // make the checking code goes as fast as possible!
+#if defined(_MSC_VER)
+#pragma optimize("tgy", on)
+#endif
+
+#define CREATE_CHECK_STRING(x) #x
+#define CHECK_AND_TEAR_DOWN(x) \
+ do{ \
+ if (!(x)) \
+ { \
+ _ASSERTE(!CREATE_CHECK_STRING(x)); \
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); \
+ } \
+ } while (0)
+
+VOID Object::Validate(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncBlock)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_CANNOT_TAKE_LOCK;
+
+ if (this == NULL)
+ {
+ return; // NULL is ok
+ }
+
+ if (g_IBCLogger.InstrEnabled() && !GCStress<cfg_any>::IsEnabled())
+ {
+ // If we are instrumenting for IBC (and GCStress is not enabled)
+ // then skip these Object::Validate() as they slow down the
+ // instrument phase by an order of magnitude
+ return;
+ }
+
+ if (g_fEEShutDown & ShutDown_Phase2)
+ {
+ // During second phase of shutdown the code below is not guaranteed to work.
+ return;
+ }
+
+#ifdef _DEBUG
+ {
+ BEGIN_GETTHREAD_ALLOWED_IN_NO_THROW_REGION;
+ Thread *pThread = GetThread();
+
+ if (pThread != NULL && !(pThread->PreemptiveGCDisabled()))
+ {
+ // Debugger helper threads are special in that they take over for
+ // what would normally be a nonEE thread (the RCThread). If an
+ // EE thread is doing RCThread duty, then it should be treated
+ // as such.
+ //
+ // There are some GC threads in the same kind of category. Note that
+ // GetThread() sometimes returns them, if DLL_THREAD_ATTACH notifications
+ // have run some managed code.
+ if (!dbgOnly_IsSpecialEEThread() && !IsGCSpecialThread())
+ _ASSERTE(!"OBJECTREF being accessed while thread is in preemptive GC mode.");
+ }
+ END_GETTHREAD_ALLOWED_IN_NO_THROW_REGION;
+ }
+#endif
+
+
+ { // ValidateInner can throw or fault on failure which violates contract.
+ CONTRACT_VIOLATION(ThrowsViolation | FaultViolation);
+
+ // using inner helper because of TRY and stack objects with destructors.
+ ValidateInner(bDeep, bVerifyNextHeader, bVerifySyncBlock);
+ }
+}
+
+VOID Object::ValidateInner(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncBlock)
+{
+ STATIC_CONTRACT_THROWS; // See CONTRACT_VIOLATION above
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FAULT; // See CONTRACT_VIOLATION above
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_CANNOT_TAKE_LOCK;
+
+ int lastTest = 0;
+
+ EX_TRY
+ {
+ // in order to avoid contract violations in the EH code we'll allow AVs here,
+ // they'll be handled in the catch block
+ AVInRuntimeImplOkayHolder avOk;
+
+ MethodTable *pMT = GetGCSafeMethodTable();
+ lastTest = 1;
+
+ CHECK_AND_TEAR_DOWN(pMT->Validate());
+ lastTest = 2;
+
+ bool noRangeChecks =
+ (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_NO_RANGE_CHECKS) == EEConfig::HEAPVERIFY_NO_RANGE_CHECKS;
+
+ // noRangeChecks depends on initial values being FALSE
+ BOOL bSmallObjectHeapPtr = FALSE, bLargeObjectHeapPtr = FALSE;
+ if (!noRangeChecks)
+ {
+ bSmallObjectHeapPtr = GCHeap::GetGCHeap()->IsHeapPointer(this, TRUE);
+ if (!bSmallObjectHeapPtr)
+ bLargeObjectHeapPtr = GCHeap::GetGCHeap()->IsHeapPointer(this);
+
+ CHECK_AND_TEAR_DOWN(bSmallObjectHeapPtr || bLargeObjectHeapPtr);
+ }
+
+ lastTest = 3;
+
+ if (bDeep)
+ {
+ CHECK_AND_TEAR_DOWN(GetHeader()->Validate(bVerifySyncBlock));
+ }
+
+ lastTest = 4;
+
+ if (bDeep && (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)) {
+ GCHeap::GetGCHeap()->ValidateObjectMember(this);
+ }
+
+ lastTest = 5;
+
+ // since bSmallObjectHeapPtr is initialized to FALSE
+ // we skip checking noRangeChecks since if skipping
+ // is enabled bSmallObjectHeapPtr will always be false.
+ if (bSmallObjectHeapPtr) {
+ CHECK_AND_TEAR_DOWN(!GCHeap::GetGCHeap()->IsObjectInFixedHeap(this));
+ }
+
+ lastTest = 6;
+
+#if CHECK_APP_DOMAIN_LEAKS
+ // when it's not safe to verify the fields, it's not safe to verify AppDomain either
+ // because the process might try to access fields.
+ if (bDeep && g_pConfig->AppDomainLeaks())
+ {
+ //
+ // Check to see that our domain is valid. This will assert if it has been unloaded.
+ //
+ SCAN_IGNORE_FAULT;
+ GetAppDomain();
+ }
+#endif
+
+ lastTest = 7;
+
+ // try to validate next object's header
+ if (bDeep
+ && bVerifyNextHeader
+ && CNameSpace::GetGcRuntimeStructuresValid ()
+ //NextObj could be very slow if concurrent GC is going on
+ && !(GCHeap::IsGCHeapInitialized() && GCHeap::GetGCHeap ()->IsConcurrentGCInProgress ()))
+ {
+ Object * nextObj = GCHeap::GetGCHeap ()->NextObj (this);
+ if ((nextObj != NULL) &&
+ (nextObj->GetGCSafeMethodTable() != g_pFreeObjectMethodTable))
+ {
+ CHECK_AND_TEAR_DOWN(nextObj->GetHeader()->Validate(FALSE));
+ }
+ }
+
+ lastTest = 8;
+
+#ifdef FEATURE_64BIT_ALIGNMENT
+ if (pMT->RequiresAlign8())
+ {
+ CHECK_AND_TEAR_DOWN((((size_t)this) & 0x7) == (pMT->IsValueType()? 4:0));
+ }
+ lastTest = 9;
+#endif // FEATURE_64BIT_ALIGNMENT
+
+ }
+ EX_CATCH
+ {
+ STRESS_LOG3(LF_ASSERT, LL_ALWAYS, "Detected use of corrupted OBJECTREF: %p [MT=%p] (lastTest=%d)", this, lastTest > 0 ? (*(size_t*)this) : 0, lastTest);
+ CHECK_AND_TEAR_DOWN(!"Detected use of a corrupted OBJECTREF. Possible GC hole.");
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+
+#endif // VERIFY_HEAP
+
+/*==================================NewString===================================
+**Action: Creates a System.String object.
+**Returns:
+**Arguments:
+**Exceptions:
+==============================================================================*/
+STRINGREF StringObject::NewString(INT32 length) {
+ CONTRACTL {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(length>=0);
+ } CONTRACTL_END;
+
+ STRINGREF pString;
+
+ if (length<0) {
+ return NULL;
+ } else if (length == 0) {
+ return GetEmptyString();
+ } else {
+ pString = AllocateString(length);
+ _ASSERTE(pString->GetBuffer()[length] == 0);
+
+ return pString;
+ }
+}
+
+
+/*==================================NewString===================================
+**Action: Many years ago, VB didn't have the concept of a byte array, so enterprising
+** users created one by allocating a BSTR with an odd length and using it to
+** store bytes. A generation later, we're still stuck supporting this behavior.
+** The way that we do this is to take advantage of the difference between the
+** array length and the string length. The string length will always be the
+** number of characters between the start of the string and the terminating 0.
+** If we need an odd number of bytes, we'll take one wchar after the terminating 0.
+** (e.g. at position StringLength+1). The high-order byte of this wchar is
+** reserved for flags and the low-order byte is our odd byte. This function is
+** used to allocate a string of that shape, but we don't actually mark the
+** trailing byte as being in use yet.
+**Returns: A newly allocated string. Null if length is less than 0.
+**Arguments: length -- the length of the string to allocate
+** bHasTrailByte -- whether the string also has a trailing byte.
+**Exceptions: OutOfMemoryException if AllocateString fails.
+==============================================================================*/
+STRINGREF StringObject::NewString(INT32 length, BOOL bHasTrailByte) {
+ CONTRACTL {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(length>=0 && length != INT32_MAX);
+ } CONTRACTL_END;
+
+ STRINGREF pString;
+ if (length<0 || length == INT32_MAX) {
+ return NULL;
+ } else if (length == 0) {
+ return GetEmptyString();
+ } else {
+ pString = AllocateString(length);
+ _ASSERTE(pString->GetBuffer()[length]==0);
+ if (bHasTrailByte) {
+ _ASSERTE(pString->GetBuffer()[length+1]==0);
+ }
+ }
+
+ return pString;
+}
+
+//========================================================================
+// Creates a System.String object and initializes from
+// the supplied null-terminated C string.
+//
+// Maps NULL to null. This function does *not* return null to indicate
+// error situations: it throws an exception instead.
+//========================================================================
+STRINGREF StringObject::NewString(const WCHAR *pwsz)
+{
+ CONTRACTL {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ if (!pwsz)
+ {
+ return NULL;
+ }
+ else
+ {
+
+ DWORD nch = (DWORD)wcslen(pwsz);
+ if (nch==0) {
+ return GetEmptyString();
+ }
+
+#if 0
+ //
+ // This assert is disabled because it is valid for us to get a
+ // pointer from the gc heap here as long as it is pinned. This
+ // can happen when a string is marshalled to unmanaged by
+ // pinning and then later put into a struct and that struct is
+ // then marshalled to managed.
+ //
+ _ASSERTE(!GCHeap::GetGCHeap()->IsHeapPointer((BYTE *) pwsz) ||
+ !"pwsz can not point to GC Heap");
+#endif // 0
+
+ STRINGREF pString = AllocateString( nch );
+
+ memcpyNoGCRefs(pString->GetBuffer(), pwsz, nch*sizeof(WCHAR));
+ _ASSERTE(pString->GetBuffer()[nch] == 0);
+ return pString;
+ }
+}
+
+#if defined(_MSC_VER) && defined(_TARGET_X86_)
+#pragma optimize("y", on) // Small critical routines, don't put in EBP frame
+#endif
+
+STRINGREF StringObject::NewString(const WCHAR *pwsz, int length) {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(length>=0);
+ } CONTRACTL_END;
+
+ if (!pwsz)
+ {
+ return NULL;
+ }
+ else if (length <= 0) {
+ return GetEmptyString();
+ } else {
+#if 0
+ //
+ // This assert is disabled because it is valid for us to get a
+ // pointer from the gc heap here as long as it is pinned. This
+ // can happen when a string is marshalled to unmanaged by
+ // pinning and then later put into a struct and that struct is
+ // then marshalled to managed.
+ //
+ _ASSERTE(!GCHeap::GetGCHeap()->IsHeapPointer((BYTE *) pwsz) ||
+ !"pwsz can not point to GC Heap");
+#endif // 0
+ STRINGREF pString = AllocateString(length);
+
+ memcpyNoGCRefs(pString->GetBuffer(), pwsz, length*sizeof(WCHAR));
+ _ASSERTE(pString->GetBuffer()[length] == 0);
+ return pString;
+ }
+}
+
+#if defined(_MSC_VER) && defined(_TARGET_X86_)
+#pragma optimize("", on) // Go back to command line default optimizations
+#endif
+
+STRINGREF StringObject::NewString(LPCUTF8 psz)
+{
+ CONTRACTL {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ PRECONDITION(CheckPointer(psz));
+ } CONTRACTL_END;
+
+ int length = (int)strlen(psz);
+ if (length == 0) {
+ return GetEmptyString();
+ }
+ CQuickBytes qb;
+ WCHAR* pwsz = (WCHAR*) qb.AllocThrows((length) * sizeof(WCHAR));
+ length = WszMultiByteToWideChar(CP_UTF8, 0, psz, length, pwsz, length);
+ if (length == 0) {
+ COMPlusThrow(kArgumentException, W("Arg_InvalidUTF8String"));
+ }
+ return NewString(pwsz, length);
+}
+
+STRINGREF StringObject::NewString(LPCUTF8 psz, int cBytes)
+{
+ CONTRACTL {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ PRECONDITION(CheckPointer(psz, NULL_OK));
+ } CONTRACTL_END;
+
+ if (!psz)
+ return NULL;
+
+ _ASSERTE(psz);
+ _ASSERTE(cBytes >= 0);
+ if (cBytes == 0) {
+ return GetEmptyString();
+ }
+ int cWszBytes = 0;
+ if (!ClrSafeInt<int>::multiply(cBytes, sizeof(WCHAR), cWszBytes))
+ COMPlusThrowOM();
+ CQuickBytes qb;
+ WCHAR* pwsz = (WCHAR*) qb.AllocThrows(cWszBytes);
+ int length = WszMultiByteToWideChar(CP_UTF8, 0, psz, cBytes, pwsz, cBytes);
+ if (length == 0) {
+ COMPlusThrow(kArgumentException, W("Arg_InvalidUTF8String"));
+ }
+ return NewString(pwsz, length);
+}
+
+//
+//
+// STATIC MEMBER VARIABLES
+//
+//
+STRINGREF* StringObject::EmptyStringRefPtr=NULL;
+
+//The special string helpers are used as flag bits for weird strings that have bytes
+//after the terminating 0. The only case where we use this right now is the VB BSTR as
+//byte array which is described in MakeStringAsByteArrayFromBytes.
+#define SPECIAL_STRING_VB_BYTE_ARRAY 0x100
+
+FORCEINLINE BOOL MARKS_VB_BYTE_ARRAY(WCHAR x)
+{
+ return static_cast<BOOL>(x & SPECIAL_STRING_VB_BYTE_ARRAY);
+}
+
+FORCEINLINE WCHAR MAKE_VB_TRAIL_BYTE(BYTE x)
+{
+ return static_cast<WCHAR>(x) | SPECIAL_STRING_VB_BYTE_ARRAY;
+}
+
+FORCEINLINE BYTE GET_VB_TRAIL_BYTE(WCHAR x)
+{
+ return static_cast<BYTE>(x & 0xFF);
+}
+
+
+/*==============================InitEmptyStringRefPtr============================
+**Action: Gets an empty string refptr, cache the result.
+**Returns: The retrieved STRINGREF.
+==============================================================================*/
+STRINGREF* StringObject::InitEmptyStringRefPtr() {
+ CONTRACTL {
+ THROWS;
+ MODE_ANY;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ GCX_COOP();
+
+ EEStringData data(0, W(""), TRUE);
+ EmptyStringRefPtr = SystemDomain::System()->DefaultDomain()->GetLoaderAllocator()->GetStringObjRefPtrFromUnicodeString(&data);
+ return EmptyStringRefPtr;
+}
+
+/*=============================StringInitCharHelper=============================
+**Action:
+**Returns:
+**Arguments:
+**Exceptions:
+**Note this
+==============================================================================*/
+STRINGREF __stdcall StringObject::StringInitCharHelper(LPCSTR pszSource, int length) {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ STRINGREF pString=NULL;
+ int dwSizeRequired=0;
+ _ASSERTE(length>=-1);
+
+ if (!pszSource || length == 0) {
+ return StringObject::GetEmptyString();
+ }
+ else if ((size_t)pszSource < 64000) {
+ COMPlusThrow(kArgumentException, W("Arg_MustBeStringPtrNotAtom"));
+ }
+
+ // Make sure we can read from the pointer.
+ // This is better than try to read from the pointer and catch the access violation exceptions.
+ if( length == -1) {
+ length = (INT32)strlen(pszSource);
+ }
+
+ if(length > 0) {
+ dwSizeRequired=WszMultiByteToWideChar(CP_ACP, MB_PRECOMPOSED, pszSource, length, NULL, 0);
+ }
+
+ if (dwSizeRequired == 0) {
+ if (length == 0) {
+ return StringObject::GetEmptyString();
+ }
+ COMPlusThrow(kArgumentException, W("Arg_InvalidANSIString"));
+ }
+
+ pString = AllocateString(dwSizeRequired);
+ dwSizeRequired = WszMultiByteToWideChar(CP_ACP, MB_PRECOMPOSED, (LPCSTR)pszSource, length, pString->GetBuffer(), dwSizeRequired);
+ if (dwSizeRequired == 0) {
+ COMPlusThrow(kArgumentException, W("Arg_InvalidANSIString"));
+ }
+
+ _ASSERTE(dwSizeRequired != INT32_MAX && pString->GetBuffer()[dwSizeRequired]==0);
+
+ return pString;
+}
+
+
+// strAChars must be null-terminated, with an appropriate aLength
+// strBChars must be null-terminated, with an appropriate bLength OR bLength == -1
+// If bLength == -1, we stop on the first null character in strBChars
+BOOL StringObject::CaseInsensitiveCompHelper(__in_ecount(aLength) WCHAR *strAChars, __in_z INT8 *strBChars, INT32 aLength, INT32 bLength, INT32 *result) {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(strAChars));
+ PRECONDITION(CheckPointer(strBChars));
+ PRECONDITION(CheckPointer(result));
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ WCHAR *strAStart = strAChars;
+ INT8 *strBStart = strBChars;
+ unsigned charA;
+ unsigned charB;
+
+ for(;;) {
+ charA = *strAChars;
+ charB = (unsigned) *strBChars;
+
+ //Case-insensitive comparison on chars greater than 0x7F
+ //requires a locale-aware casing operation and we're not going there.
+ if ((charA|charB)>0x7F) {
+ *result = 0;
+ return FALSE;
+ }
+
+ // uppercase both chars.
+ if (charA>='a' && charA<='z') {
+ charA ^= 0x20;
+ }
+ if (charB>='a' && charB<='z') {
+ charB ^= 0x20;
+ }
+
+ //Return the (case-insensitive) difference between them.
+ if (charA!=charB) {
+ *result = (int)(charA-charB);
+ return TRUE;
+ }
+
+
+ if (charA==0) // both strings have null character
+ {
+ if (bLength == -1)
+ {
+ *result = aLength - static_cast<INT32>(strAChars - strAStart);
+ return TRUE;
+ }
+ if (strAChars==strAStart + aLength || strBChars==strBStart + bLength)
+ {
+ *result = aLength - bLength;
+ return TRUE;
+ }
+ // else both embedded zeros
+ }
+
+ // Next char
+ strAChars++; strBChars++;
+ }
+
+}
+
+INT32 StringObject::FastCompareStringHelper(DWORD* strAChars, INT32 countA, DWORD* strBChars, INT32 countB)
+{
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ INT32 count = (countA < countB) ? countA : countB;
+
+ PREFIX_ASSUME(count >= 0);
+
+ ptrdiff_t diff = (char *)strAChars - (char *)strBChars;
+
+#if defined(_WIN64) || defined(ALIGN_ACCESS)
+ int alignmentA = ((SIZE_T)strAChars) & (sizeof(SIZE_T) - 1);
+ int alignmentB = ((SIZE_T)strBChars) & (sizeof(SIZE_T) - 1);
+#endif // _WIN64 || ALIGN_ACCESS
+
+#if defined(_WIN64)
+ if (alignmentA == alignmentB)
+ {
+ if ((alignmentA == 2 || alignmentA == 6) && (count >= 1))
+ {
+ LPWSTR ptr2 = (WCHAR *)strBChars;
+
+ if (( *((WCHAR*)((char *)ptr2 + diff)) - *ptr2) != 0)
+ {
+ return ((int)*((WCHAR*)((char *)ptr2 + diff)) - (int)*ptr2);
+ }
+ strBChars = (DWORD*)(++ptr2);
+ count -= 1;
+ alignmentA = (alignmentA == 2 ? 4 : 0);
+ }
+
+ if ((alignmentA == 4) && (count >= 2))
+ {
+ DWORD* ptr2 = (DWORD*)strBChars;
+
+ if (( *((DWORD*)((char *)ptr2 + diff)) - *ptr2) != 0)
+ {
+ LPWSTR chkptr1 = (WCHAR*)((char *)strBChars + diff);
+ LPWSTR chkptr2 = (WCHAR*)strBChars;
+
+ if (*chkptr1 != *chkptr2)
+ {
+ return ((int)*chkptr1 - (int)*chkptr2);
+ }
+ return ((int)*(chkptr1+1) - (int)*(chkptr2+1));
+ }
+ strBChars = ++ptr2;
+ count -= 2;
+ alignmentA = 0;
+ }
+
+ if ((alignmentA == 0))
+ {
+ while (count >= 4)
+ {
+ SIZE_T* ptr2 = (SIZE_T*)strBChars;
+
+ if (( *((SIZE_T*)((char *)ptr2 + diff)) - *ptr2) != 0)
+ {
+ if (( *((DWORD*)((char *)ptr2 + diff)) - *(DWORD*)ptr2) != 0)
+ {
+ LPWSTR chkptr1 = (WCHAR*)((char *)strBChars + diff);
+ LPWSTR chkptr2 = (WCHAR*)strBChars;
+
+ if (*chkptr1 != *chkptr2)
+ {
+ return ((int)*chkptr1 - (int)*chkptr2);
+ }
+ return ((int)*(chkptr1+1) - (int)*(chkptr2+1));
+ }
+ else
+ {
+ LPWSTR chkptr1 = (WCHAR*)((DWORD*)((char *)strBChars + diff) + 1);
+ LPWSTR chkptr2 = (WCHAR*)((DWORD*)strBChars + 1);
+
+ if (*chkptr1 != *chkptr2)
+ {
+ return ((int)*chkptr1 - (int)*chkptr2);
+ }
+ return ((int)*(chkptr1+1) - (int)*(chkptr2+1));
+ }
+ }
+ strBChars = (DWORD*)(++ptr2);
+ count -= 4;
+ }
+ }
+
+ LPWSTR ptr2 = (WCHAR*)strBChars;
+ while ((count -= 1) >= 0)
+ {
+ if (( *((WCHAR*)((char *)ptr2 + diff)) - *ptr2) != 0)
+ {
+ return ((int)*((WCHAR*)((char *)ptr2 + diff)) - (int)*ptr2);
+ }
+ ++ptr2;
+ }
+ }
+ else
+#endif // _WIN64
+#if defined(ALIGN_ACCESS)
+ if ( ( !IS_ALIGNED((size_t)strAChars, sizeof(DWORD)) ||
+ !IS_ALIGNED((size_t)strBChars, sizeof(DWORD)) ) &&
+ (abs(alignmentA - alignmentB) != 4) )
+ {
+ _ASSERTE(IS_ALIGNED((size_t)strAChars, sizeof(WCHAR)));
+ _ASSERTE(IS_ALIGNED((size_t)strBChars, sizeof(WCHAR)));
+ LPWSTR ptr2 = (WCHAR *)strBChars;
+
+ while ((count -= 1) >= 0)
+ {
+ if (( *((WCHAR*)((char *)ptr2 + diff)) - *ptr2) != 0)
+ {
+ return ((int)*((WCHAR*)((char *)ptr2 + diff)) - (int)*ptr2);
+ }
+ ++ptr2;
+ }
+ }
+ else
+#endif // ALIGN_ACCESS
+ {
+#if defined(_WIN64) || defined(ALIGN_ACCESS)
+ if (abs(alignmentA - alignmentB) == 4)
+ {
+ if ((alignmentA == 2) || (alignmentB == 2))
+ {
+ LPWSTR ptr2 = (WCHAR *)strBChars;
+
+ if (( *((WCHAR*)((char *)ptr2 + diff)) - *ptr2) != 0)
+ {
+ return ((int)*((WCHAR*)((char *)ptr2 + diff)) - (int)*ptr2);
+ }
+ strBChars = (DWORD*)(++ptr2);
+ count -= 1;
+ }
+ }
+#endif // WIN64 || ALIGN_ACCESS
+
+ // Loop comparing a DWORD at a time.
+ while ((count -= 2) >= 0)
+ {
+ if ((*((DWORD* )((char *)strBChars + diff)) - *strBChars) != 0)
+ {
+ LPWSTR ptr1 = (WCHAR*)((char *)strBChars + diff);
+ LPWSTR ptr2 = (WCHAR*)strBChars;
+ if (*ptr1 != *ptr2) {
+ return ((int)*ptr1 - (int)*ptr2);
+ }
+ return ((int)*(ptr1+1) - (int)*(ptr2+1));
+ }
+ ++strBChars;
+ }
+
+ int c;
+ if (count == -1)
+ if ((c = *((WCHAR *) ((char *)strBChars + diff)) - *((WCHAR *) strBChars)) != 0)
+ return c;
+ }
+
+ return countA - countB;
+}
+
+
+/*=============================InternalHasHighChars=============================
+**Action: Checks if the string can be sorted quickly. The requirements are that
+** the string contain no character greater than 0x80 and that the string not
+** contain an apostrophe or a hypen. Apostrophe and hyphen are excluded so that
+** words like co-op and coop sort together.
+**Returns: Void. The side effect is to set a bit on the string indicating whether or not
+** the string contains high chars.
+**Arguments: The String to be checked.
+**Exceptions: None
+==============================================================================*/
+DWORD StringObject::InternalCheckHighChars() {
+ WRAPPER_NO_CONTRACT;
+
+ WCHAR *chars;
+ WCHAR c;
+ INT32 length;
+
+ RefInterpretGetStringValuesDangerousForGC((WCHAR **) &chars, &length);
+
+ DWORD stringState = STRING_STATE_FAST_OPS;
+
+ for (int i=0; i<length; i++) {
+ c = chars[i];
+ if (c>=0x80) {
+ SetHighCharState(STRING_STATE_HIGH_CHARS);
+ return STRING_STATE_HIGH_CHARS;
+ } else if (HighCharHelper::IsHighChar((int)c)) {
+ //This means that we have a character which forces special sorting,
+ //but doesn't necessarily force slower casing and indexing. We'll
+ //set a value to remember this, but we need to check the rest of
+ //the string because we may still find a charcter greater than 0x7f.
+ stringState = STRING_STATE_SPECIAL_SORT;
+ }
+ }
+
+ SetHighCharState(stringState);
+ return stringState;
+}
+
+#ifdef VERIFY_HEAP
+/*=============================ValidateHighChars=============================
+**Action: Validate if the HighChars bits is set correctly, no side effect
+**Returns: BOOL for result of validation
+**Arguments: The String to be checked.
+**Exceptions: None
+==============================================================================*/
+BOOL StringObject::ValidateHighChars()
+{
+ WRAPPER_NO_CONTRACT;
+ DWORD curStringState = GetHighCharState ();
+ // state could always be undetermined
+ if (curStringState == STRING_STATE_UNDETERMINED)
+ {
+ return TRUE;
+ }
+
+ WCHAR *chars;
+ INT32 length;
+ RefInterpretGetStringValuesDangerousForGC((WCHAR **) &chars, &length);
+
+ DWORD stringState = STRING_STATE_FAST_OPS;
+ for (int i=0; i<length; i++) {
+ WCHAR c = chars[i];
+ if (c>=0x80)
+ {
+ // if there is a high char in the string, the state has to be STRING_STATE_HIGH_CHARS
+ return curStringState == STRING_STATE_HIGH_CHARS;
+ }
+ else if (HighCharHelper::IsHighChar((int)c)) {
+ //This means that we have a character which forces special sorting,
+ //but doesn't necessarily force slower casing and indexing. We'll
+ //set a value to remember this, but we need to check the rest of
+ //the string because we may still find a charcter greater than 0x7f.
+ stringState = STRING_STATE_SPECIAL_SORT;
+ }
+ }
+
+ return stringState == curStringState;
+}
+
+#endif //VERIFY_HEAP
+
+/*============================InternalTrailByteCheck============================
+**Action: Many years ago, VB didn't have the concept of a byte array, so enterprising
+** users created one by allocating a BSTR with an odd length and using it to
+** store bytes. A generation later, we're still stuck supporting this behavior.
+** The way that we do this is stick the trail byte in the sync block
+** whenever we encounter such a situation. Since we expect this to be a very corner case
+** accessing the sync block seems like a good enough solution
+**
+**Returns: True if <CODE>str</CODE> contains a VB trail byte, false otherwise.
+**Arguments: str -- The string to be examined.
+**Exceptions: None
+==============================================================================*/
+BOOL StringObject::HasTrailByte() {
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ SyncBlock * pSyncBlock = PassiveGetSyncBlock();
+ if(pSyncBlock != NULL)
+ {
+ return pSyncBlock->HasCOMBstrTrailByte();
+ }
+
+ return FALSE;
+}
+
+/*=================================GetTrailByte=================================
+**Action: If <CODE>str</CODE> contains a vb trail byte, returns a copy of it.
+**Returns: True if <CODE>str</CODE> contains a trail byte. *bTrailByte is set to
+** the byte in question if <CODE>str</CODE> does have a trail byte, otherwise
+** it's set to 0.
+**Arguments: str -- The string being examined.
+** bTrailByte -- An out param to hold the value of the trail byte.
+**Exceptions: None.
+==============================================================================*/
+BOOL StringObject::GetTrailByte(BYTE *bTrailByte) {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ _ASSERTE(bTrailByte);
+ *bTrailByte=0;
+
+ BOOL retValue = HasTrailByte();
+
+ if(retValue)
+ {
+ *bTrailByte = GET_VB_TRAIL_BYTE(GetHeader()->PassiveGetSyncBlock()->GetCOMBstrTrailByte());
+ }
+
+ return retValue;
+}
+
+/*=================================SetTrailByte=================================
+**Action: Sets the trail byte in the sync block
+**Returns: True.
+**Arguments: str -- The string into which to set the trail byte.
+** bTrailByte -- The trail byte to be added to the string.
+**Exceptions: None.
+==============================================================================*/
+BOOL StringObject::SetTrailByte(BYTE bTrailByte) {
+ WRAPPER_NO_CONTRACT;
+
+ GetHeader()->GetSyncBlock()->SetCOMBstrTrailByte(MAKE_VB_TRAIL_BYTE(bTrailByte));
+ return TRUE;
+}
+
+
+#define DEFAULT_CAPACITY 16
+#define DEFAULT_MAX_CAPACITY 0x7FFFFFFF
+
+/*================================ReplaceBuffer=================================
+**This is a helper function designed to be used by N/Direct it replaces the entire
+**contents of the String with a new string created by some native method. This
+**will not be exposed through the StringBuilder class.
+==============================================================================*/
+void StringBufferObject::ReplaceBuffer(STRINGBUFFERREF *thisRef, __in_ecount(newLength) WCHAR *newBuffer, INT32 newLength) {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(newBuffer));
+ PRECONDITION(newLength>=0);
+ PRECONDITION(CheckPointer(thisRef));
+ PRECONDITION(IsProtectedByGCFrame(thisRef));
+ } CONTRACTL_END;
+
+ if(newLength > (*thisRef)->GetMaxCapacity())
+ {
+ COMPlusThrowArgumentOutOfRange(W("capacity"), W("ArgumentOutOfRange_Capacity"));
+ }
+
+ CHARARRAYREF newCharArray = AllocateCharArray((*thisRef)->GetAllocationLength(newLength+1));
+ (*thisRef)->ReplaceBuffer(&newCharArray, newBuffer, newLength);
+}
+
+
+/*================================ReplaceBufferAnsi=================================
+**This is a helper function designed to be used by N/Direct it replaces the entire
+**contents of the String with a new string created by some native method. This
+**will not be exposed through the StringBuilder class.
+**
+**This version does Ansi->Unicode conversion along the way. Although
+**making it a member of COMStringBuffer exposes more stringbuffer internals
+**than necessary, it does avoid requiring a temporary buffer to hold
+**the Ansi->Unicode conversion.
+==============================================================================*/
+void StringBufferObject::ReplaceBufferAnsi(STRINGBUFFERREF *thisRef, __in_ecount(newCapacity) CHAR *newBuffer, INT32 newCapacity) {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(newBuffer));
+ PRECONDITION(CheckPointer(thisRef));
+ PRECONDITION(IsProtectedByGCFrame(thisRef));
+ PRECONDITION(newCapacity>=0);
+ } CONTRACTL_END;
+
+ if(newCapacity > (*thisRef)->GetMaxCapacity())
+ {
+ COMPlusThrowArgumentOutOfRange(W("capacity"), W("ArgumentOutOfRange_Capacity"));
+ }
+
+ CHARARRAYREF newCharArray = AllocateCharArray((*thisRef)->GetAllocationLength(newCapacity+1));
+ (*thisRef)->ReplaceBufferWithAnsi(&newCharArray, newBuffer, newCapacity);
+}
+
+
+/*==============================LocalIndexOfString==============================
+**Finds search within base and returns the index where it was found. The search
+**starts from startPos and we return -1 if search isn't found. This is a direct
+**copy from COMString::IndexOfString, but doesn't require that we build up
+**an instance of indexOfStringArgs before calling it.
+**
+**Args:
+**base -- the string in which to search
+**search -- the string for which to search
+**strLength -- the length of base
+**patternLength -- the length of search
+**startPos -- the place from which to start searching.
+**
+==============================================================================*/
+/* static */ INT32 StringBufferObject::LocalIndexOfString(__in_ecount(strLength) WCHAR *base, __in_ecount(patternLength) WCHAR *search, int strLength, int patternLength, int startPos) {
+ LIMITED_METHOD_CONTRACT
+ _ASSERTE(base != NULL);
+ _ASSERTE(search != NULL);
+
+ int iThis, iPattern;
+ for (iThis=startPos; iThis < (strLength-patternLength+1); iThis++) {
+ for (iPattern=0; iPattern<patternLength && base[iThis+iPattern]==search[iPattern]; iPattern++);
+ if (iPattern == patternLength) return iThis;
+ }
+ return -1;
+}
+
+
+#ifdef USE_CHECKED_OBJECTREFS
+
+//-------------------------------------------------------------
+// Default constructor, for non-initializing declarations:
+//
+// OBJECTREF or;
+//-------------------------------------------------------------
+OBJECTREF::OBJECTREF()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ STATIC_CONTRACT_VIOLATION(SOToleranceViolation);
+
+ m_asObj = (Object*)POISONC;
+ Thread::ObjectRefNew(this);
+}
+
+//-------------------------------------------------------------
+// Copy constructor, for passing OBJECTREF's as function arguments.
+//-------------------------------------------------------------
+OBJECTREF::OBJECTREF(const OBJECTREF & objref)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ STATIC_CONTRACT_VIOLATION(SOToleranceViolation);
+
+ VALIDATEOBJECT(objref.m_asObj);
+
+ // !!! If this assert is fired, there are two possibilities:
+ // !!! 1. You are doing a type cast, e.g. *(OBJECTREF*)pObj
+ // !!! Instead, you should use ObjectToOBJECTREF(*(Object**)pObj),
+ // !!! or ObjectToSTRINGREF(*(StringObject**)pObj)
+ // !!! 2. There is a real GC hole here.
+ // !!! Either way you need to fix the code.
+ _ASSERTE(Thread::IsObjRefValid(&objref));
+ if ((objref.m_asObj != 0) &&
+ ((GCHeap*)GCHeap::GetGCHeap())->IsHeapPointer( (BYTE*)this ))
+ {
+ _ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!");
+ }
+ m_asObj = objref.m_asObj;
+
+ if (m_asObj != 0) {
+ ENABLESTRESSHEAP();
+ }
+
+ Thread::ObjectRefNew(this);
+}
+
+
+//-------------------------------------------------------------
+// To allow NULL to be used as an OBJECTREF.
+//-------------------------------------------------------------
+OBJECTREF::OBJECTREF(TADDR nul)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ STATIC_CONTRACT_VIOLATION(SOToleranceViolation);
+
+ //_ASSERTE(nul == 0);
+ m_asObj = (Object*)nul;
+ if( m_asObj != NULL)
+ {
+ // REVISIT_TODO: fix this, why is this constructor being used for non-null object refs?
+ STATIC_CONTRACT_VIOLATION(ModeViolation);
+
+ VALIDATEOBJECT(m_asObj);
+ ENABLESTRESSHEAP();
+ }
+ Thread::ObjectRefNew(this);
+}
+
+//-------------------------------------------------------------
+// This is for the GC's use only. Non-GC code should never
+// use the "Object" class directly. The unused "int" argument
+// prevents C++ from using this to implicitly convert Object*'s
+// to OBJECTREF.
+//-------------------------------------------------------------
+OBJECTREF::OBJECTREF(Object *pObject)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ DEBUG_ONLY_FUNCTION;
+
+ if ((pObject != 0) &&
+ ((GCHeap*)GCHeap::GetGCHeap())->IsHeapPointer( (BYTE*)this ))
+ {
+ _ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!");
+ }
+ m_asObj = pObject;
+ VALIDATEOBJECT(m_asObj);
+ if (m_asObj != 0) {
+ ENABLESTRESSHEAP();
+ }
+ Thread::ObjectRefNew(this);
+}
+
+void OBJECTREF::Validate(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncBlock)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_asObj->Validate(bDeep, bVerifyNextHeader, bVerifySyncBlock);
+}
+
+//-------------------------------------------------------------
+// Test against NULL.
+//-------------------------------------------------------------
+int OBJECTREF::operator!() const
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ // We don't do any validation here, as we want to allow zero comparison in preemptive mode
+ return !m_asObj;
+}
+
+//-------------------------------------------------------------
+// Compare two OBJECTREF's.
+//-------------------------------------------------------------
+int OBJECTREF::operator==(const OBJECTREF &objref) const
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ if (objref.m_asObj != NULL) // Allow comparison to zero in preemptive mode
+ {
+ // REVISIT_TODO: Weakening the contract system a little bit here. We should really
+ // add a special NULLOBJECTREF which can be used for these situations and have
+ // a seperate code path for that with the correct contract protections.
+ STATIC_CONTRACT_VIOLATION(ModeViolation);
+
+ VALIDATEOBJECT(objref.m_asObj);
+
+ // !!! If this assert is fired, there are two possibilities:
+ // !!! 1. You are doing a type cast, e.g. *(OBJECTREF*)pObj
+ // !!! Instead, you should use ObjectToOBJECTREF(*(Object**)pObj),
+ // !!! or ObjectToSTRINGREF(*(StringObject**)pObj)
+ // !!! 2. There is a real GC hole here.
+ // !!! Either way you need to fix the code.
+ _ASSERTE(Thread::IsObjRefValid(&objref));
+ VALIDATEOBJECT(m_asObj);
+ // If this assert fires, you probably did not protect
+ // your OBJECTREF and a GC might have occured. To
+ // where the possible GC was, set a breakpoint in Thread::TriggersGC
+ _ASSERTE(Thread::IsObjRefValid(this));
+
+ if (m_asObj != 0 || objref.m_asObj != 0) {
+ ENABLESTRESSHEAP();
+ }
+ }
+ return m_asObj == objref.m_asObj;
+}
+
+//-------------------------------------------------------------
+// Compare two OBJECTREF's.
+//-------------------------------------------------------------
+int OBJECTREF::operator!=(const OBJECTREF &objref) const
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ if (objref.m_asObj != NULL) // Allow comparison to zero in preemptive mode
+ {
+ // REVISIT_TODO: Weakening the contract system a little bit here. We should really
+ // add a special NULLOBJECTREF which can be used for these situations and have
+ // a seperate code path for that with the correct contract protections.
+ STATIC_CONTRACT_VIOLATION(ModeViolation);
+
+ VALIDATEOBJECT(objref.m_asObj);
+
+ // !!! If this assert is fired, there are two possibilities:
+ // !!! 1. You are doing a type cast, e.g. *(OBJECTREF*)pObj
+ // !!! Instead, you should use ObjectToOBJECTREF(*(Object**)pObj),
+ // !!! or ObjectToSTRINGREF(*(StringObject**)pObj)
+ // !!! 2. There is a real GC hole here.
+ // !!! Either way you need to fix the code.
+ _ASSERTE(Thread::IsObjRefValid(&objref));
+ VALIDATEOBJECT(m_asObj);
+ // If this assert fires, you probably did not protect
+ // your OBJECTREF and a GC might have occured. To
+ // where the possible GC was, set a breakpoint in Thread::TriggersGC
+ _ASSERTE(Thread::IsObjRefValid(this));
+
+ if (m_asObj != 0 || objref.m_asObj != 0) {
+ ENABLESTRESSHEAP();
+ }
+ }
+
+ return m_asObj != objref.m_asObj;
+}
+
+
+//-------------------------------------------------------------
+// Forward method calls.
+//-------------------------------------------------------------
+Object* OBJECTREF::operator->()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ VALIDATEOBJECT(m_asObj);
+ // If this assert fires, you probably did not protect
+ // your OBJECTREF and a GC might have occured. To
+ // where the possible GC was, set a breakpoint in Thread::TriggersGC
+ _ASSERTE(Thread::IsObjRefValid(this));
+
+ if (m_asObj != 0) {
+ ENABLESTRESSHEAP();
+ }
+
+ // if you are using OBJECTREF directly,
+ // you probably want an Object *
+ return (Object *)m_asObj;
+}
+
+
+//-------------------------------------------------------------
+// Forward method calls.
+//-------------------------------------------------------------
+const Object* OBJECTREF::operator->() const
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ VALIDATEOBJECT(m_asObj);
+ // If this assert fires, you probably did not protect
+ // your OBJECTREF and a GC might have occured. To
+ // where the possible GC was, set a breakpoint in Thread::TriggersGC
+ _ASSERTE(Thread::IsObjRefValid(this));
+
+ if (m_asObj != 0) {
+ ENABLESTRESSHEAP();
+ }
+
+ // if you are using OBJECTREF directly,
+ // you probably want an Object *
+ return (Object *)m_asObj;
+}
+
+
+//-------------------------------------------------------------
+// Assignment. We don't validate the destination so as not
+// to break the sequence:
+//
+// OBJECTREF or;
+// or = ...;
+//-------------------------------------------------------------
+OBJECTREF& OBJECTREF::operator=(const OBJECTREF &objref)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ VALIDATEOBJECT(objref.m_asObj);
+
+ // !!! If this assert is fired, there are two possibilities:
+ // !!! 1. You are doing a type cast, e.g. *(OBJECTREF*)pObj
+ // !!! Instead, you should use ObjectToOBJECTREF(*(Object**)pObj),
+ // !!! or ObjectToSTRINGREF(*(StringObject**)pObj)
+ // !!! 2. There is a real GC hole here.
+ // !!! Either way you need to fix the code.
+ _ASSERTE(Thread::IsObjRefValid(&objref));
+
+ if ((objref.m_asObj != 0) &&
+ ((GCHeap*)GCHeap::GetGCHeap())->IsHeapPointer( (BYTE*)this ))
+ {
+ _ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!");
+ }
+ Thread::ObjectRefAssign(this);
+
+ m_asObj = objref.m_asObj;
+ if (m_asObj != 0) {
+ ENABLESTRESSHEAP();
+ }
+ return *this;
+}
+
+//-------------------------------------------------------------
+// Allows for the assignment of NULL to a OBJECTREF
+//-------------------------------------------------------------
+
+OBJECTREF& OBJECTREF::operator=(TADDR nul)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ _ASSERTE(nul == 0);
+ Thread::ObjectRefAssign(this);
+ m_asObj = (Object*)nul;
+ if (m_asObj != 0) {
+ ENABLESTRESSHEAP();
+ }
+ return *this;
+}
+#endif // DEBUG
+
+#ifdef _DEBUG
+
+void* __cdecl GCSafeMemCpy(void * dest, const void * src, size_t len)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ if (!(((*(BYTE**)&dest) < g_lowest_address ) ||
+ ((*(BYTE**)&dest) >= g_highest_address)))
+ {
+ Thread* pThread = GetThread();
+
+ // GCHeap::IsHeapPointer has race when called in preemptive mode. It walks the list of segments
+ // that can be modified by GC. Do the check below only if it is safe to do so.
+ if (pThread != NULL && pThread->PreemptiveGCDisabled())
+ {
+ // Note there is memcpyNoGCRefs which will allow you to do a memcpy into the GC
+ // heap if you really know you don't need to call the write barrier
+
+ _ASSERTE(!GCHeap::GetGCHeap()->IsHeapPointer((BYTE *) dest) ||
+ !"using memcpy to copy into the GC heap, use CopyValueClass");
+ }
+ }
+ return memcpyNoGCRefs(dest, src, len);
+}
+
+#endif // _DEBUG
+
+// This function clears a piece of memory in a GC safe way. It makes the guarantee
+// that it will clear memory in at least pointer sized chunks whenever possible.
+// Unaligned memory at the beginning and remaining bytes at the end are written bytewise.
+// We must make this guarantee whenever we clear memory in the GC heap that could contain
+// object references. The GC or other user threads can read object references at any time,
+// clearing them bytewise can result in a read on another thread getting incorrect data.
+void __fastcall ZeroMemoryInGCHeap(void* mem, size_t size)
+{
+ WRAPPER_NO_CONTRACT;
+ BYTE* memBytes = (BYTE*) mem;
+ BYTE* endBytes = &memBytes[size];
+
+ // handle unaligned bytes at the beginning
+ while (!IS_ALIGNED(memBytes, sizeof(PTR_PTR_VOID)) && memBytes < endBytes)
+ *memBytes++ = 0;
+
+ // now write pointer sized pieces
+ size_t nPtrs = (endBytes - memBytes) / sizeof(PTR_PTR_VOID);
+ PTR_PTR_VOID memPtr = (PTR_PTR_VOID) memBytes;
+ for (size_t i = 0; i < nPtrs; i++)
+ *memPtr++ = 0;
+
+ // handle remaining bytes at the end
+ memBytes = (BYTE*) memPtr;
+ while (memBytes < endBytes)
+ *memBytes++ = 0;
+}
+
+void StackTraceArray::Append(StackTraceElement const * begin, StackTraceElement const * end)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // ensure that only one thread can write to the array
+ EnsureThreadAffinity();
+
+ size_t newsize = Size() + (end - begin);
+ Grow(newsize);
+ memcpyNoGCRefs(GetData() + Size(), begin, (end - begin) * sizeof(StackTraceElement));
+ MemoryBarrier(); // prevent the newsize from being reordered with the array copy
+ SetSize(newsize);
+
+#if defined(_DEBUG)
+ CheckState();
+#endif
+}
+
+void StackTraceArray::AppendSkipLast(StackTraceElement const * begin, StackTraceElement const * end)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // to skip the last element, we need to replace it with the first element
+ // from m_pStackTrace and do it atomically if possible,
+ // otherwise we'll create a copy of the entire array, which is bad for performance,
+ // and so should not be on the main path
+ //
+
+ // ensure that only one thread can write to the array
+ EnsureThreadAffinity();
+
+ assert(Size() > 0);
+
+ StackTraceElement & last = GetData()[Size() - 1];
+ if (last.PartiallyEqual(*begin))
+ {
+ // fast path: atomic update
+ last.PartialAtomicUpdate(*begin);
+
+ // append the rest
+ if (end - begin > 1)
+ Append(begin + 1, end);
+ }
+ else
+ {
+ // slow path: create a copy and append
+ StackTraceArray copy(*this);
+ GCPROTECT_BEGIN(copy);
+ copy.SetSize(copy.Size() - 1);
+ copy.Append(begin, end);
+ this->Swap(copy);
+ GCPROTECT_END();
+ }
+
+#if defined(_DEBUG)
+ CheckState();
+#endif
+}
+
+void StackTraceArray::CheckState() const
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (!m_array)
+ return;
+
+ assert(GetObjectThread() == GetThread());
+
+ size_t size = Size();
+ StackTraceElement const * p;
+ p = GetData();
+ for (size_t i = 0; i < size; ++i)
+ assert(p[i].pFunc != NULL);
+}
+
+void StackTraceArray::Grow(size_t grow_size)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(ThrowOutOfMemory(););
+ }
+ CONTRACTL_END;
+
+ size_t raw_size = grow_size * sizeof(StackTraceElement) + sizeof(ArrayHeader);
+
+ if (!m_array)
+ {
+ SetArray(I1ARRAYREF(AllocatePrimitiveArray(ELEMENT_TYPE_I1, static_cast<DWORD>(raw_size))));
+ SetSize(0);
+ SetObjectThread();
+ }
+ else
+ {
+ if (Capacity() >= raw_size)
+ return;
+
+ // allocate a new array, copy the data
+ size_t new_capacity = Max(Capacity() * 2, raw_size);
+
+ _ASSERTE(new_capacity >= grow_size * sizeof(StackTraceElement) + sizeof(ArrayHeader));
+
+ I1ARRAYREF newarr = (I1ARRAYREF) AllocatePrimitiveArray(ELEMENT_TYPE_I1, static_cast<DWORD>(new_capacity));
+ memcpyNoGCRefs(newarr->GetDirectPointerToNonObjectElements(),
+ GetRaw(),
+ Size() * sizeof(StackTraceElement) + sizeof(ArrayHeader));
+
+ SetArray(newarr);
+ }
+}
+
+void StackTraceArray::EnsureThreadAffinity()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!m_array)
+ return;
+
+ if (GetObjectThread() != GetThread())
+ {
+ // object is being changed by a thread different from the one which created it
+ // make a copy of the array to prevent a race condition when two different threads try to change it
+ StackTraceArray copy(*this);
+ this->Swap(copy);
+ }
+}
+
+#ifdef _MSC_VER
+#pragma warning(disable: 4267)
+#endif
+
+StackTraceArray::StackTraceArray(StackTraceArray const & rhs)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(ThrowOutOfMemory(););
+ }
+ CONTRACTL_END;
+
+ m_array = (I1ARRAYREF) AllocatePrimitiveArray(ELEMENT_TYPE_I1, static_cast<DWORD>(rhs.Capacity()));
+
+ GCPROTECT_BEGIN(m_array);
+ Volatile<size_t> size = rhs.Size();
+ memcpyNoGCRefs(GetRaw(), rhs.GetRaw(), size * sizeof(StackTraceElement) + sizeof(ArrayHeader));
+
+ SetSize(size); // set size to the exact value which was used when we copied the data
+ // another thread might have changed it at the time of copying
+ SetObjectThread(); // affinitize the newly created array with the current thread
+ GCPROTECT_END();
+}
+
+// Deep copies the stack trace array
+void StackTraceArray::CopyFrom(StackTraceArray const & src)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(ThrowOutOfMemory(););
+ }
+ CONTRACTL_END;
+
+ m_array = (I1ARRAYREF) AllocatePrimitiveArray(ELEMENT_TYPE_I1, static_cast<DWORD>(src.Capacity()));
+
+ GCPROTECT_BEGIN(m_array);
+ Volatile<size_t> size = src.Size();
+ memcpyNoGCRefs(GetRaw(), src.GetRaw(), size * sizeof(StackTraceElement) + sizeof(ArrayHeader));
+
+ SetSize(size); // set size to the exact value which was used when we copied the data
+ // another thread might have changed it at the time of copying
+ SetObjectThread(); // affinitize the newly created array with the current thread
+ GCPROTECT_END();
+}
+
+#ifdef _MSC_VER
+#pragma warning(default: 4267)
+#endif
+
+
+#ifdef _DEBUG
+//===============================================================================
+// Code that insures that our unmanaged version of Nullable is consistant with
+// the managed version Nullable<T> for all T.
+
+void Nullable::CheckFieldOffsets(TypeHandle nullableType)
+{
+ LIMITED_METHOD_CONTRACT;
+
+/***
+ // The non-instantiated method tables like List<T> that are used
+ // by reflection and verification do not have correct field offsets
+ // but we never make instances of these anyway.
+ if (nullableMT->ContainsGenericVariables())
+ return;
+***/
+
+ MethodTable* nullableMT = nullableType.GetMethodTable();
+
+ // insure that the managed version of the table is the same as the
+ // unmanaged. Note that we can't do this in mscorlib.h because this
+ // class is generic and field layout depends on the instantiation.
+
+ _ASSERTE(nullableMT->GetNumInstanceFields() == 2);
+ FieldDesc* field = nullableMT->GetApproxFieldDescListRaw();
+
+ _ASSERTE(strcmp(field->GetDebugName(), "hasValue") == 0);
+// _ASSERTE(field->GetOffset() == offsetof(Nullable, hasValue));
+ field++;
+
+ _ASSERTE(strcmp(field->GetDebugName(), "value") == 0);
+// _ASSERTE(field->GetOffset() == offsetof(Nullable, value));
+}
+#endif
+
+//===============================================================================
+// Returns true if nullableMT is Nullable<T> for T is equivalent to paramMT
+
+BOOL Nullable::IsNullableForTypeHelper(MethodTable* nullableMT, MethodTable* paramMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ if (!nullableMT->IsNullable())
+ return FALSE;
+
+ // we require the parameter types to be equivalent
+ return TypeHandle(paramMT).IsEquivalentTo(nullableMT->GetInstantiation()[0]);
+}
+
+//===============================================================================
+// Returns true if nullableMT is Nullable<T> for T == paramMT
+
+BOOL Nullable::IsNullableForTypeHelperNoGC(MethodTable* nullableMT, MethodTable* paramMT)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (!nullableMT->IsNullable())
+ return FALSE;
+
+ // we require an exact match of the parameter types
+ return TypeHandle(paramMT) == nullableMT->GetInstantiation()[0];
+}
+
+//===============================================================================
+CLR_BOOL* Nullable::HasValueAddr(MethodTable* nullableMT) {
+
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(strcmp(nullableMT->GetApproxFieldDescListRaw()[0].GetDebugName(), "hasValue") == 0);
+ _ASSERTE(nullableMT->GetApproxFieldDescListRaw()[0].GetOffset() == 0);
+ return (CLR_BOOL*) this;
+}
+
+//===============================================================================
+void* Nullable::ValueAddr(MethodTable* nullableMT) {
+
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(strcmp(nullableMT->GetApproxFieldDescListRaw()[1].GetDebugName(), "value") == 0);
+ return (((BYTE*) this) + nullableMT->GetApproxFieldDescListRaw()[1].GetOffset());
+}
+
+//===============================================================================
+// Special Logic to box a nullable<T> as a boxed<T>
+
+OBJECTREF Nullable::Box(void* srcPtr, MethodTable* nullableMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ FAULT_NOT_FATAL(); // FIX_NOW: why do we need this?
+
+ Nullable* src = (Nullable*) srcPtr;
+
+ _ASSERTE(IsNullableType(nullableMT));
+ // We better have a concrete instantiation, or our field offset asserts are not useful
+ _ASSERTE(!nullableMT->ContainsGenericVariables());
+
+ if (!*src->HasValueAddr(nullableMT))
+ return NULL;
+
+ OBJECTREF obj = 0;
+ GCPROTECT_BEGININTERIOR (src);
+ MethodTable* argMT = nullableMT->GetInstantiation()[0].GetMethodTable();
+ obj = argMT->Allocate();
+ CopyValueClass(obj->UnBox(), src->ValueAddr(nullableMT), argMT, obj->GetAppDomain());
+ GCPROTECT_END ();
+
+ return obj;
+}
+
+//===============================================================================
+// Special Logic to unbox a boxed T as a nullable<T>
+
+BOOL Nullable::UnBox(void* destPtr, OBJECTREF boxedVal, MethodTable* destMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+ Nullable* dest = (Nullable*) destPtr;
+ BOOL fRet = TRUE;
+
+ // We should only get here if we are unboxing a T as a Nullable<T>
+ _ASSERTE(IsNullableType(destMT));
+
+ // We better have a concrete instantiation, or our field offset asserts are not useful
+ _ASSERTE(!destMT->ContainsGenericVariables());
+
+ if (boxedVal == NULL)
+ {
+ // logicall we are doing *dest->HasValueAddr(destMT) = false;
+ // We zero out the whole structure becasue it may contain GC references
+ // and these need to be initialized to zero. (could optimize in the non-GC case)
+ InitValueClass(destPtr, destMT);
+ fRet = TRUE;
+ }
+ else
+ {
+ GCPROTECT_BEGIN(boxedVal);
+ if (!IsNullableForType(destMT, boxedVal->GetMethodTable()))
+ {
+ // For safety's sake, also allow true nullables to be unboxed normally.
+ // This should not happen normally, but we want to be robust
+ if (destMT->IsEquivalentTo(boxedVal->GetMethodTable()))
+ {
+ CopyValueClass(dest, boxedVal->GetData(), destMT, boxedVal->GetAppDomain());
+ fRet = TRUE;
+ }
+ else
+ {
+ fRet = FALSE;
+ }
+ }
+ else
+ {
+ *dest->HasValueAddr(destMT) = true;
+ CopyValueClass(dest->ValueAddr(destMT), boxedVal->UnBox(), boxedVal->GetMethodTable(), boxedVal->GetAppDomain());
+ fRet = TRUE;
+ }
+ GCPROTECT_END();
+ }
+ return fRet;
+}
+
+//===============================================================================
+// Special Logic to unbox a boxed T as a nullable<T>
+// Does not handle type equivalence (may conservatively return FALSE)
+BOOL Nullable::UnBoxNoGC(void* destPtr, OBJECTREF boxedVal, MethodTable* destMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+ Nullable* dest = (Nullable*) destPtr;
+
+ // We should only get here if we are unboxing a T as a Nullable<T>
+ _ASSERTE(IsNullableType(destMT));
+
+ // We better have a concrete instantiation, or our field offset asserts are not useful
+ _ASSERTE(!destMT->ContainsGenericVariables());
+
+ if (boxedVal == NULL)
+ {
+ // logicall we are doing *dest->HasValueAddr(destMT) = false;
+ // We zero out the whole structure becasue it may contain GC references
+ // and these need to be initialized to zero. (could optimize in the non-GC case)
+ InitValueClass(destPtr, destMT);
+ }
+ else
+ {
+ if (!IsNullableForTypeNoGC(destMT, boxedVal->GetMethodTable()))
+ {
+ // For safety's sake, also allow true nullables to be unboxed normally.
+ // This should not happen normally, but we want to be robust
+ if (destMT == boxedVal->GetMethodTable())
+ {
+ CopyValueClass(dest, boxedVal->GetData(), destMT, boxedVal->GetAppDomain());
+ return TRUE;
+ }
+ return FALSE;
+ }
+
+ *dest->HasValueAddr(destMT) = true;
+ CopyValueClass(dest->ValueAddr(destMT), boxedVal->UnBox(), boxedVal->GetMethodTable(), boxedVal->GetAppDomain());
+ }
+ return TRUE;
+}
+
+//===============================================================================
+// Special Logic to unbox a boxed T as a nullable<T>
+// Does not do any type checks.
+void Nullable::UnBoxNoCheck(void* destPtr, OBJECTREF boxedVal, MethodTable* destMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+ Nullable* dest = (Nullable*) destPtr;
+
+ // We should only get here if we are unboxing a T as a Nullable<T>
+ _ASSERTE(IsNullableType(destMT));
+
+ // We better have a concrete instantiation, or our field offset asserts are not useful
+ _ASSERTE(!destMT->ContainsGenericVariables());
+
+ if (boxedVal == NULL)
+ {
+ // logicall we are doing *dest->HasValueAddr(destMT) = false;
+ // We zero out the whole structure becasue it may contain GC references
+ // and these need to be initialized to zero. (could optimize in the non-GC case)
+ InitValueClass(destPtr, destMT);
+ }
+ else
+ {
+ if (IsNullableType(boxedVal->GetMethodTable()))
+ {
+ // For safety's sake, also allow true nullables to be unboxed normally.
+ // This should not happen normally, but we want to be robust
+ CopyValueClass(dest, boxedVal->GetData(), destMT, boxedVal->GetAppDomain());
+ }
+
+ *dest->HasValueAddr(destMT) = true;
+ CopyValueClass(dest->ValueAddr(destMT), boxedVal->UnBox(), boxedVal->GetMethodTable(), boxedVal->GetAppDomain());
+ }
+}
+
+//===============================================================================
+// a boxed Nullable<T> should either be null or a boxed T, but sometimes it is
+// useful to have a 'true' boxed Nullable<T> (that is it has two fields). This
+// function returns a 'normalized' version of this pointer.
+
+OBJECTREF Nullable::NormalizeBox(OBJECTREF obj) {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (obj != NULL) {
+ MethodTable* retMT = obj->GetMethodTable();
+ if (Nullable::IsNullableType(retMT))
+ obj = Nullable::Box(obj->GetData(), retMT);
+ }
+ return obj;
+}
+
+
+void ThreadBaseObject::SetInternal(Thread *it)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // only allow a transition from NULL to non-NULL
+ _ASSERTE((m_InternalThread == NULL) && (it != NULL));
+ m_InternalThread = it;
+
+ // Now the native Thread will only be destroyed after the managed Thread is collected.
+ // Tell the GC that the managed Thread actually represents much more memory.
+ GCInterface::NewAddMemoryPressure(sizeof(Thread));
+}
+
+void ThreadBaseObject::ClearInternal()
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(m_InternalThread != NULL);
+ m_InternalThread = NULL;
+ GCInterface::NewRemoveMemoryPressure(sizeof(Thread));
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+
+StackTraceElement const & StackTraceArray::operator[](size_t index) const
+{
+ WRAPPER_NO_CONTRACT;
+ return GetData()[index];
+}
+
+StackTraceElement & StackTraceArray::operator[](size_t index)
+{
+ WRAPPER_NO_CONTRACT;
+ return GetData()[index];
+}
+
+#if !defined(DACCESS_COMPILE)
+// Define the lock used to access stacktrace from an exception object
+SpinLock g_StackTraceArrayLock;
+
+void ExceptionObject::SetStackTrace(StackTraceArray const & stackTrace, PTRARRAYREF dynamicMethodArray)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ Thread *m_pThread = GetThread();
+ SpinLock::AcquireLock(&g_StackTraceArrayLock, SPINLOCK_THREAD_PARAM_ONLY_IN_SOME_BUILDS);
+
+ SetObjectReference((OBJECTREF*)&_stackTrace, (OBJECTREF)stackTrace.Get(), GetAppDomain());
+ SetObjectReference((OBJECTREF*)&_dynamicMethods, (OBJECTREF)dynamicMethodArray, GetAppDomain());
+
+ SpinLock::ReleaseLock(&g_StackTraceArrayLock, SPINLOCK_THREAD_PARAM_ONLY_IN_SOME_BUILDS);
+
+}
+
+void ExceptionObject::SetNullStackTrace()
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ Thread *m_pThread = GetThread();
+ SpinLock::AcquireLock(&g_StackTraceArrayLock, SPINLOCK_THREAD_PARAM_ONLY_IN_SOME_BUILDS);
+
+ I1ARRAYREF stackTraceArray = NULL;
+ PTRARRAYREF dynamicMethodArray = NULL;
+
+ SetObjectReference((OBJECTREF*)&_stackTrace, (OBJECTREF)stackTraceArray, GetAppDomain());
+ SetObjectReference((OBJECTREF*)&_dynamicMethods, (OBJECTREF)dynamicMethodArray, GetAppDomain());
+
+ SpinLock::ReleaseLock(&g_StackTraceArrayLock, SPINLOCK_THREAD_PARAM_ONLY_IN_SOME_BUILDS);
+}
+
+#endif // !defined(DACCESS_COMPILE)
+
+void ExceptionObject::GetStackTrace(StackTraceArray & stackTrace, PTRARRAYREF * outDynamicMethodArray /*= NULL*/) const
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#if !defined(DACCESS_COMPILE)
+ Thread *m_pThread = GetThread();
+ SpinLock::AcquireLock(&g_StackTraceArrayLock, SPINLOCK_THREAD_PARAM_ONLY_IN_SOME_BUILDS);
+#endif // !defined(DACCESS_COMPILE)
+
+ StackTraceArray temp(_stackTrace);
+ stackTrace.Swap(temp);
+
+ if (outDynamicMethodArray != NULL)
+ {
+ *outDynamicMethodArray = _dynamicMethods;
+ }
+
+#if !defined(DACCESS_COMPILE)
+ SpinLock::ReleaseLock(&g_StackTraceArrayLock, SPINLOCK_THREAD_PARAM_ONLY_IN_SOME_BUILDS);
+#endif // !defined(DACCESS_COMPILE)
+
+}
diff --git a/src/vm/object.h b/src/vm/object.h
new file mode 100644
index 0000000000..95f7fa0c64
--- /dev/null
+++ b/src/vm/object.h
@@ -0,0 +1,4680 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// OBJECT.H
+//
+// Definitions of a Com+ Object
+//
+
+// See code:EEStartup#TableOfContents for overview
+
+
+#ifndef _OBJECT_H_
+#define _OBJECT_H_
+
+#include "util.hpp"
+#include "syncblk.h"
+#include "gcdesc.h"
+#include "specialstatics.h"
+#include "sstring.h"
+#include "daccess.h"
+
+extern "C" void __fastcall ZeroMemoryInGCHeap(void*, size_t);
+
+void ErectWriteBarrierForMT(MethodTable **dst, MethodTable *ref);
+
+/*
+ #ObjectModel
+ * COM+ Internal Object Model
+ *
+ *
+ * Object - This is the common base part to all COM+ objects
+ * | it contains the MethodTable pointer and the
+ * | sync block index, which is at a negative offset
+ * |
+ * +-- code:StringObject - String objects are specialized objects for string
+ * | storage/retrieval for higher performance
+ * |
+ * +-- code:StringBufferObject - StringBuffer instance layout.
+ * |
+ * +-- BaseObjectWithCachedData - Object Plus one object field for caching.
+ * | |
+ * | +- ReflectClassBaseObject - The base object for the RuntimeType class
+ * | +- ReflectMethodObject - The base object for the RuntimeMethodInfo class
+ * | +- ReflectFieldObject - The base object for the RtFieldInfo class
+ * |
+ * +-- code:ArrayBase - Base portion of all arrays
+ * | |
+ * | +- I1Array - Base type arrays
+ * | | I2Array
+ * | | ...
+ * | |
+ * | +- PtrArray - Array of OBJECTREFs, different than base arrays because of pObjectClass
+ * |
+ * +-- code:AppDomainBaseObject - The base object for the class AppDomain
+ * |
+ * +-- code:AssemblyBaseObject - The base object for the class Assembly
+ * |
+ * +-- code:ContextBaseObject - base object for class Context
+ *
+ *
+ * PLEASE NOTE THE FOLLOWING WHEN ADDING A NEW OBJECT TYPE:
+ *
+ * The size of the object in the heap must be able to be computed
+ * very, very quickly for GC purposes. Restrictions on the layout
+ * of the object guarantee this is possible.
+ *
+ * Any object that inherits from Object must be able to
+ * compute its complete size by using the first 4 bytes of
+ * the object following the Object part and constants
+ * reachable from the MethodTable...
+ *
+ * The formula used for this calculation is:
+ * MT->GetBaseSize() + ((OBJECTTYPEREF->GetSizeField() * MT->GetComponentSize())
+ *
+ * So for Object, since this is of fixed size, the ComponentSize is 0, which makes the right side
+ * of the equation above equal to 0 no matter what the value of GetSizeField(), so the size is just the base size.
+ *
+ */
+
+// <TODO>
+// @TODO: #define COW 0x04
+// @TODO: MOO, MOO - no, not bovine, really Copy On Write bit for StringBuffer, requires 8 byte align MT
+// @TODL: which we don't have yet</TODO>
+
+class MethodTable;
+class Thread;
+class BaseDomain;
+class Assembly;
+class Context;
+class CtxStaticData;
+class DomainAssembly;
+class AssemblyNative;
+class WaitHandleNative;
+struct RCW;
+
+#if CHECK_APP_DOMAIN_LEAKS
+
+class Object;
+
+class SetAppDomainAgilePendingTable
+{
+public:
+
+ SetAppDomainAgilePendingTable ();
+ ~SetAppDomainAgilePendingTable ();
+
+ void PushReference (Object *pObject)
+ {
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ PendingEntry entry;
+ entry.pObject = pObject;
+
+ m_Stack.Push(entry);
+ }
+
+ void PushParent (Object *pObject)
+ {
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ PendingEntry entry;
+ entry.pObject = (Object*)((size_t)pObject | 1);
+
+ m_Stack.Push(entry);
+ }
+
+ Object *GetPendingObject (bool *pfReturnedToParent)
+ {
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ if (!m_Stack.Count())
+ return NULL;
+
+ PendingEntry *pPending = m_Stack.Pop();
+
+ *pfReturnedToParent = pPending->fMarked != 0;
+ return (Object*)((size_t)pPending->pObject & ~1);
+ }
+
+private:
+
+ union PendingEntry
+ {
+ Object *pObject;
+
+ // Indicates whether the current thread set BIT_SBLK_AGILE_IN_PROGRESS
+ // on the object. Entries without this flag set are unexplored
+ // objects.
+ size_t fMarked:1;
+ };
+
+ CStackArray<PendingEntry> m_Stack;
+};
+
+#endif //CHECK_APP_DOMAIN_LEAKS
+
+
+//
+// The generational GC requires that every object be at least 12 bytes
+// in size.
+
+#define MIN_OBJECT_SIZE (2*sizeof(BYTE*) + sizeof(ObjHeader))
+
+#define PTRALIGNCONST (DATA_ALIGNMENT-1)
+
+#ifndef PtrAlign
+#define PtrAlign(size) \
+ ((size + PTRALIGNCONST) & (~PTRALIGNCONST))
+#endif //!PtrAlign
+
+// code:Object is the respesentation of an managed object on the GC heap.
+//
+// See code:#ObjectModel for some important subclasses of code:Object
+//
+// The only fields mandated by all objects are
+//
+// * a pointer to the code:MethodTable at offset 0
+// * a poiner to a code:ObjHeader at a negative offset. This is often zero. It holds information that
+// any addition information that we might need to attach to arbitrary objects.
+//
+class Object
+{
+ protected:
+ PTR_MethodTable m_pMethTab;
+
+ protected:
+ Object() { LIMITED_METHOD_CONTRACT; };
+ ~Object() { LIMITED_METHOD_CONTRACT; };
+
+ public:
+ MethodTable *RawGetMethodTable() const
+ {
+ return m_pMethTab;
+ }
+
+#ifndef DACCESS_COMPILE
+ void RawSetMethodTable(MethodTable *pMT)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pMethTab = pMT;
+ }
+
+ VOID SetMethodTable(MethodTable *pMT)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pMethTab = pMT;
+ }
+
+ VOID SetMethodTableForLargeObject(MethodTable *pMT)
+ {
+ // This function must be used if the allocation occurs on the large object heap, and the method table might be a collectible type
+ WRAPPER_NO_CONTRACT;
+ ErectWriteBarrierForMT(&m_pMethTab, pMT);
+ }
+
+#endif //!DACCESS_COMPILE
+
+ // An object might be a proxy of some sort, with a thunking VTable. If so, we can
+ // advance to the true method table or class.
+ BOOL IsTransparentProxy()
+ {
+#ifdef FEATURE_REMOTING
+ WRAPPER_NO_CONTRACT;
+ return( GetMethodTable()->IsTransparentProxy() );
+#else
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+#endif
+ }
+
+#define MARKED_BIT 0x1
+
+ PTR_MethodTable GetMethodTable() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+#ifndef DACCESS_COMPILE
+ // We should always use GetGCSafeMethodTable() if we're running during a GC.
+ // If the mark bit is set then we're running during a GC
+ _ASSERTE((dac_cast<TADDR>(m_pMethTab) & MARKED_BIT) == 0);
+
+ return m_pMethTab;
+#else //DACCESS_COMPILE
+
+ //@dbgtodo dharvey Make this a type which supports bitwise and operations
+ //when available
+ return PTR_MethodTable((dac_cast<TADDR>(m_pMethTab)) & (~MARKED_BIT));
+#endif //DACCESS_COMPILE
+ }
+
+ DPTR(PTR_MethodTable) GetMethodTablePtr() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return dac_cast<DPTR(PTR_MethodTable)>(PTR_HOST_MEMBER_TADDR(Object, this, m_pMethTab));
+ }
+
+ MethodTable *GetTrueMethodTable();
+
+ TypeHandle GetTypeHandle();
+ TypeHandle GetTrueTypeHandle();
+
+ // Methods used to determine if an object supports a given interface.
+ static BOOL SupportsInterface(OBJECTREF pObj, MethodTable *pInterfaceMT);
+
+ inline DWORD GetNumComponents();
+ inline SIZE_T GetSize();
+
+ CGCDesc* GetSlotMap()
+ {
+ WRAPPER_NO_CONTRACT;
+ return( CGCDesc::GetCGCDescFromMT(GetMethodTable()));
+ }
+
+ // Sync Block & Synchronization services
+
+ // Access the ObjHeader which is at a negative offset on the object (because of
+ // cache lines)
+ PTR_ObjHeader GetHeader()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return dac_cast<PTR_ObjHeader>(this) - 1;
+ }
+
+ // Get the current address of the object (works for debug refs, too.)
+ PTR_BYTE GetAddress()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return dac_cast<PTR_BYTE>(this);
+ }
+
+#ifdef _DEBUG
+ // TRUE if the header has a real SyncBlockIndex (i.e. it has an entry in the
+ // SyncTable, though it doesn't necessarily have an entry in the SyncBlockCache)
+ BOOL HasEmptySyncBlockInfo()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetHeader()->HasEmptySyncBlockInfo();
+ }
+#endif
+
+ // retrieve or allocate a sync block for this object
+ SyncBlock *GetSyncBlock()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetHeader()->GetSyncBlock();
+ }
+
+ DWORD GetSyncBlockIndex()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetHeader()->GetSyncBlockIndex();
+ }
+
+#ifndef BINDER
+ ADIndex GetAppDomainIndex();
+
+ // Get app domain of object, or NULL if it is agile
+ AppDomain *GetAppDomain();
+
+#ifndef DACCESS_COMPILE
+ // Set app domain of object to current domain.
+ void SetAppDomain() { WRAPPER_NO_CONTRACT; SetAppDomain(::GetAppDomain()); }
+#endif
+
+ // Set app domain of object to given domain - it can only be set once
+ void SetAppDomain(AppDomain *pDomain);
+#endif // BINDER
+
+#ifdef _DEBUG
+#ifndef DACCESS_COMPILE
+ // For SO-tolerance contract violation purposes, define these DEBUG_ versions to identify
+ // the codepaths to SetAppDomain that are called only from DEBUG code.
+ void DEBUG_SetAppDomain()
+ {
+ WRAPPER_NO_CONTRACT;
+
+#ifndef BINDER
+ DEBUG_SetAppDomain(::GetAppDomain());
+#endif
+ }
+#endif //!DACCESS_COMPILE
+
+ void DEBUG_SetAppDomain(AppDomain *pDomain);
+#endif //_DEBUG
+
+#if CHECK_APP_DOMAIN_LEAKS
+
+ // Mark object as app domain agile
+ BOOL SetAppDomainAgile(BOOL raiseAssert=TRUE, SetAppDomainAgilePendingTable *pTable = NULL);
+ BOOL TrySetAppDomainAgile(BOOL raiseAssert=TRUE);
+
+ // Mark sync block as app domain agile
+ void SetSyncBlockAppDomainAgile();
+
+ // Check if object is app domain agile
+ BOOL IsAppDomainAgile();
+
+ // Check if object is app domain agile
+ BOOL IsAppDomainAgileRaw()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ SyncBlock *psb = PassiveGetSyncBlock();
+
+ return (psb && psb->IsAppDomainAgile());
+ }
+
+ BOOL Object::IsCheckedForAppDomainAgile()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ SyncBlock *psb = PassiveGetSyncBlock();
+ return (psb && psb->IsCheckedForAppDomainAgile());
+ }
+
+ void Object::SetIsCheckedForAppDomainAgile()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ SyncBlock *psb = PassiveGetSyncBlock();
+ if (psb)
+ psb->SetIsCheckedForAppDomainAgile();
+ }
+
+ // Check object to see if it is usable in the current domain
+ BOOL CheckAppDomain() { WRAPPER_NO_CONTRACT; return CheckAppDomain(::GetAppDomain()); }
+
+ //Check object to see if it is usable in the given domain
+ BOOL CheckAppDomain(AppDomain *pDomain);
+
+ // Check if the object's type is app domain agile
+ BOOL IsTypeAppDomainAgile();
+
+ // Check if the object's type is conditionally app domain agile
+ BOOL IsTypeCheckAppDomainAgile();
+
+ // Check if the object's type is naturally app domain agile
+ BOOL IsTypeTypesafeAppDomainAgile();
+
+ // Check if the object's type is possibly app domain agile
+ BOOL IsTypeNeverAppDomainAgile();
+
+ // Validate object & fields to see that it's usable from the current app domain
+ BOOL ValidateAppDomain() { WRAPPER_NO_CONTRACT; return ValidateAppDomain(::GetAppDomain()); }
+
+ // Validate object & fields to see that it's usable from any app domain
+ BOOL ValidateAppDomainAgile() { WRAPPER_NO_CONTRACT; return ValidateAppDomain(NULL); }
+
+ // Validate object & fields to see that it's usable from the given app domain (or null for agile)
+ BOOL ValidateAppDomain(AppDomain *pAppDomain);
+
+ // Validate fields to see that they are usable from the object's app domain
+ // (or from any domain if the object is agile)
+ BOOL ValidateAppDomainFields() { WRAPPER_NO_CONTRACT; return ValidateAppDomainFields(GetAppDomain()); }
+
+ // Validate fields to see that they are usable from the given app domain (or null for agile)
+ BOOL ValidateAppDomainFields(AppDomain *pAppDomain);
+
+ // Validate a value type's fields to see that it's usable from the current app domain
+ static BOOL ValidateValueTypeAppDomain(MethodTable *pMT, void *base, BOOL raiseAssert = TRUE)
+ { WRAPPER_NO_CONTRACT; return ValidateValueTypeAppDomain(pMT, base, ::GetAppDomain(), raiseAssert); }
+
+ // Validate a value type's fields to see that it's usable from any app domain
+ static BOOL ValidateValueTypeAppDomainAgile(MethodTable *pMT, void *base, BOOL raiseAssert = TRUE)
+ { WRAPPER_NO_CONTRACT; return ValidateValueTypeAppDomain(pMT, base, NULL, raiseAssert); }
+
+ // Validate a value type's fields to see that it's usable from the given app domain (or null for agile)
+ static BOOL ValidateValueTypeAppDomain(MethodTable *pMT, void *base, AppDomain *pAppDomain, BOOL raiseAssert = TRUE);
+
+ // Call when we are assigning this object to a dangerous field
+ // in an object in a given app domain (or agile if null)
+ BOOL AssignAppDomain(AppDomain *pAppDomain, BOOL raiseAssert = TRUE);
+ BOOL TryAssignAppDomain(AppDomain *pAppDomain, BOOL raiseAssert = TRUE);
+
+ // Call when we are assigning to a dangerous value type field
+ // in an object in a given app domain (or agile if null)
+ static BOOL AssignValueTypeAppDomain(MethodTable *pMT, void *base, AppDomain *pAppDomain, BOOL raiseAssert = TRUE);
+
+#endif // CHECK_APP_DOMAIN_LEAKS
+
+ // DO NOT ADD ANY ASSERTS TO THIS METHOD.
+ // DO NOT USE THIS METHOD.
+ // Yes folks, for better or worse the debugger pokes supposed object addresses
+ // to try to see if objects are valid, possibly firing an AccessViolation or worse,
+ // and then catches the AV and reports a failure to the debug client. This makes
+ // the debugger slightly more robust should any corrupted object references appear
+ // in a session. Thus it is "correct" behaviour for this to AV when used with
+ // an invalid object pointer, and incorrect behaviour for it to
+ // assert.
+ BOOL ValidateObjectWithPossibleAV();
+
+ // Validate an object ref out of the Promote routine in the GC
+ void ValidatePromote(ScanContext *sc, DWORD flags);
+
+ // Validate an object ref out of the VerifyHeap routine in the GC
+ void ValidateHeap(Object *from, BOOL bDeep=TRUE);
+
+ PTR_SyncBlock PassiveGetSyncBlock()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetHeader()->PassiveGetSyncBlock();
+ }
+
+ static DWORD ComputeHashCode();
+
+#ifndef DACCESS_COMPILE
+ INT32 GetHashCodeEx();
+#endif // #ifndef DACCESS_COMPILE
+
+ // Synchronization
+#ifndef DACCESS_COMPILE
+
+ void EnterObjMonitor()
+ {
+ WRAPPER_NO_CONTRACT;
+ GetHeader()->EnterObjMonitor();
+ }
+
+ BOOL TryEnterObjMonitor(INT32 timeOut = 0)
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetHeader()->TryEnterObjMonitor(timeOut);
+ }
+
+ FORCEINLINE AwareLock::EnterHelperResult EnterObjMonitorHelper(Thread* pCurThread)
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetHeader()->EnterObjMonitorHelper(pCurThread);
+ }
+
+ FORCEINLINE AwareLock::EnterHelperResult EnterObjMonitorHelperSpin(Thread* pCurThread)
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetHeader()->EnterObjMonitorHelperSpin(pCurThread);
+ }
+
+ BOOL LeaveObjMonitor()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetHeader()->LeaveObjMonitor();
+ }
+
+ // should be called only from unwind code; used in the
+ // case where EnterObjMonitor failed to allocate the
+ // sync-object.
+ BOOL LeaveObjMonitorAtException()
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetHeader()->LeaveObjMonitorAtException();
+ }
+
+ FORCEINLINE AwareLock::LeaveHelperAction LeaveObjMonitorHelper(Thread* pCurThread)
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetHeader()->LeaveObjMonitorHelper(pCurThread);
+ }
+
+ // Returns TRUE if the lock is owned and FALSE otherwise
+ // threadId is set to the ID (Thread::GetThreadId()) of the thread which owns the lock
+ // acquisitionCount is set to the number of times the lock needs to be released before
+ // it is unowned
+ BOOL GetThreadOwningMonitorLock(DWORD *pThreadId, DWORD *pAcquisitionCount)
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return GetHeader()->GetThreadOwningMonitorLock(pThreadId, pAcquisitionCount);
+ }
+
+#endif // #ifndef DACCESS_COMPILE
+
+ BOOL Wait(INT32 timeOut, BOOL exitContext)
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetHeader()->Wait(timeOut, exitContext);
+ }
+
+ void Pulse()
+ {
+ WRAPPER_NO_CONTRACT;
+ GetHeader()->Pulse();
+ }
+
+ void PulseAll()
+ {
+ WRAPPER_NO_CONTRACT;
+ GetHeader()->PulseAll();
+ }
+
+ PTR_VOID UnBox(); // if it is a value class, get the pointer to the first field
+
+ PTR_BYTE GetData(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return dac_cast<PTR_BYTE>(this) + sizeof(Object);
+ }
+
+ static UINT GetOffsetOfFirstField()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return sizeof(Object);
+ }
+
+ DWORD GetOffset32(DWORD dwOffset)
+ {
+ WRAPPER_NO_CONTRACT;
+ return * PTR_DWORD(GetData() + dwOffset);
+ }
+
+ USHORT GetOffset16(DWORD dwOffset)
+ {
+ WRAPPER_NO_CONTRACT;
+ return * PTR_USHORT(GetData() + dwOffset);
+ }
+
+ BYTE GetOffset8(DWORD dwOffset)
+ {
+ WRAPPER_NO_CONTRACT;
+ return * PTR_BYTE(GetData() + dwOffset);
+ }
+
+ __int64 GetOffset64(DWORD dwOffset)
+ {
+ WRAPPER_NO_CONTRACT;
+ return (__int64) * PTR_ULONG64(GetData() + dwOffset);
+ }
+
+ void *GetPtrOffset(DWORD dwOffset)
+ {
+ WRAPPER_NO_CONTRACT;
+ return (void *)(TADDR)*PTR_TADDR(GetData() + dwOffset);
+ }
+
+#ifndef DACCESS_COMPILE
+
+ void SetOffsetObjectRef(DWORD dwOffset, size_t dwValue);
+
+ void SetOffsetPtr(DWORD dwOffset, LPVOID value)
+ {
+ WRAPPER_NO_CONTRACT;
+ *(LPVOID *) &GetData()[dwOffset] = value;
+ }
+
+ void SetOffset32(DWORD dwOffset, DWORD dwValue)
+ {
+ WRAPPER_NO_CONTRACT;
+ *(DWORD *) &GetData()[dwOffset] = dwValue;
+ }
+
+ void SetOffset16(DWORD dwOffset, DWORD dwValue)
+ {
+ WRAPPER_NO_CONTRACT;
+ *(USHORT *) &GetData()[dwOffset] = (USHORT) dwValue;
+ }
+
+ void SetOffset8(DWORD dwOffset, DWORD dwValue)
+ {
+ WRAPPER_NO_CONTRACT;
+ *(BYTE *) &GetData()[dwOffset] = (BYTE) dwValue;
+ }
+
+ void SetOffset64(DWORD dwOffset, __int64 qwValue)
+ {
+ WRAPPER_NO_CONTRACT;
+ *(__int64 *) &GetData()[dwOffset] = qwValue;
+ }
+
+#endif // #ifndef DACCESS_COMPILE
+
+ VOID Validate(BOOL bDeep = TRUE, BOOL bVerifyNextHeader = TRUE, BOOL bVerifySyncBlock = TRUE);
+
+ PTR_MethodTable GetGCSafeMethodTable() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ // lose GC marking bit and the pinning bit
+ // A method table pointer should always be aligned. During GC we set the least
+ // significant bit for marked objects and we set the second to least significant
+ // bit for pinned objects. So if we want the actual MT pointer during a GC
+ // we must zero out the lowest 2 bits.
+ return dac_cast<PTR_MethodTable>((dac_cast<TADDR>(m_pMethTab)) & ~((UINT_PTR)3));
+ }
+
+ // There are some cases where it is unsafe to get the type handle during a GC.
+ // This occurs when the type has already been unloaded as part of an in-progress appdomain shutdown.
+ TypeHandle GetGCSafeTypeHandleIfPossible() const;
+
+ inline TypeHandle GetGCSafeTypeHandle() const;
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(void);
+#endif
+
+ private:
+ VOID ValidateInner(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncBlock);
+
+#if CHECK_APP_DOMAIN_LEAKS
+ friend class ObjHeader;
+ BOOL SetFieldsAgile(BOOL raiseAssert = TRUE, SetAppDomainAgilePendingTable *pTable = NULL);
+ static BOOL SetClassFieldsAgile(MethodTable *pMT, void *base, BOOL baseIsVT, BOOL raiseAssert = TRUE, SetAppDomainAgilePendingTable *pTable = NULL);
+ static BOOL ValidateClassFields(MethodTable *pMT, void *base, BOOL baseIsVT, AppDomain *pAppDomain, BOOL raiseAssert = TRUE);
+ BOOL SetAppDomainAgileWorker(BOOL raiseAssert, SetAppDomainAgilePendingTable *pTable);
+ BOOL ShouldCheckAppDomainAgile(BOOL raiseAssert, BOOL *pfResult);
+#endif
+
+};
+
+/*
+ * Object ref setting routines. You must use these to do
+ * proper write barrier support, as well as app domain
+ * leak checking.
+ *
+ * Note that the AppDomain parameter is the app domain affinity
+ * of the object containing the field or value class. It should
+ * be NULL if the containing object is app domain agile. Note that
+ * you typically get this value by calling obj->GetAppDomain() on
+ * the containing object.
+ */
+
+// SetObjectReference sets an OBJECTREF field
+
+void SetObjectReferenceUnchecked(OBJECTREF *dst,OBJECTREF ref);
+
+#ifdef _DEBUG
+void EnableStressHeapHelper();
+#endif
+
+//Used to clear the object reference
+inline void ClearObjectReference(OBJECTREF* dst)
+{
+ LIMITED_METHOD_CONTRACT;
+ *(void**)(dst) = NULL;
+}
+
+// CopyValueClass sets a value class field
+
+void STDCALL CopyValueClassUnchecked(void* dest, void* src, MethodTable *pMT);
+
+inline void InitValueClass(void *dest, MethodTable *pMT)
+{
+ WRAPPER_NO_CONTRACT;
+ ZeroMemoryInGCHeap(dest, pMT->GetNumInstanceFieldBytes());
+}
+
+#if CHECK_APP_DOMAIN_LEAKS
+
+void SetObjectReferenceChecked(OBJECTREF *dst,OBJECTREF ref, AppDomain *pAppDomain);
+void CopyValueClassChecked(void* dest, void* src, MethodTable *pMT, AppDomain *pAppDomain);
+
+#define SetObjectReference(_d,_r,_a) SetObjectReferenceChecked(_d, _r, _a)
+#define CopyValueClass(_d,_s,_m,_a) CopyValueClassChecked(_d,_s,_m,_a)
+
+#else
+
+#define SetObjectReference(_d,_r,_a) SetObjectReferenceUnchecked(_d, _r)
+#define CopyValueClass(_d,_s,_m,_a) CopyValueClassUnchecked(_d,_s,_m)
+
+#endif
+
+#include <pshpack4.h>
+
+
+// There are two basic kinds of array layouts in COM+
+// ELEMENT_TYPE_ARRAY - a multidimensional array with lower bounds on the dims
+// ELMENNT_TYPE_SZARRAY - A zero based single dimensional array
+//
+// In addition the layout of an array in memory is also affected by
+// whether the method table is shared (eg in the case of arrays of object refs)
+// or not. In the shared case, the array has to hold the type handle of
+// the element type.
+//
+// ArrayBase encapuslates all of these details. In theory you should never
+// have to peek inside this abstraction
+//
+class ArrayBase : public Object
+{
+ friend class GCHeap;
+ friend class CObjectHeader;
+ friend class Object;
+ friend OBJECTREF AllocateArrayEx(TypeHandle arrayClass, INT32 *pArgs, DWORD dwNumArgs, BOOL bAllocateInLargeHeap DEBUG_ARG(BOOL bDontSetAppDomain));
+ friend OBJECTREF FastAllocatePrimitiveArray(MethodTable* arrayType, DWORD cElements, BOOL bAllocateInLargeHeap);
+ friend class JIT_TrialAlloc;
+ friend class CheckAsmOffsets;
+
+private:
+ // This MUST be the first field, so that it directly follows Object. This is because
+ // Object::GetSize() looks at m_NumComponents even though it may not be an array (the
+ // values is shifted out if not an array, so it's ok).
+ DWORD m_NumComponents;
+#ifdef _WIN64
+ DWORD pad;
+#endif // _WIN64
+
+ SVAL_DECL(INT32, s_arrayBoundsZero); // = 0
+
+ // What comes after this conceputally is:
+ // TypeHandle elementType; Only present if the method table is shared among many types (arrays of pointers)
+ // INT32 bounds[rank]; The bounds are only present for Multidimensional arrays
+ // INT32 lowerBounds[rank]; Valid indexes are lowerBounds[i] <= index[i] < lowerBounds[i] + bounds[i]
+
+public:
+ // Gets the unique type handle for this array object.
+ // This will call the loader in don't-load mode - the allocator
+ // always makes sure that the particular ArrayTypeDesc for this array
+ // type is available before allocating any instances of this array type.
+ inline TypeHandle GetTypeHandle() const;
+
+ inline static TypeHandle GetTypeHandle(MethodTable * pMT);
+
+ // Get the element type for the array, this works whether the the element
+ // type is stored in the array or not
+ inline TypeHandle GetArrayElementTypeHandle() const;
+
+ // Get the CorElementType for the elements in the array. Avoids creating a TypeHandle
+ inline CorElementType GetArrayElementType() const;
+
+ inline unsigned GetRank() const;
+
+ // Total element count for the array
+ inline DWORD GetNumComponents() const;
+
+ // Get pointer to elements, handles any number of dimensions
+ PTR_BYTE GetDataPtr(BOOL inGC = FALSE) const {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+#ifdef _DEBUG
+#ifndef DACCESS_COMPILE
+ EnableStressHeapHelper();
+#endif
+#endif
+ return dac_cast<PTR_BYTE>(this) +
+ GetDataPtrOffset(inGC ? GetGCSafeMethodTable() : GetMethodTable());
+ }
+
+ // The component size is actually 16-bit WORD, but this method is returning SIZE_T to ensure
+ // that SIZE_T is used everywhere for object size computation. It is necessary to support
+ // objects bigger than 2GB.
+ SIZE_T GetComponentSize() const {
+ WRAPPER_NO_CONTRACT;
+ MethodTable * pMT;
+#if CHECK_APP_DOMAIN_LEAKS
+ pMT = GetGCSafeMethodTable();
+#else
+ pMT = GetMethodTable();
+#endif //CHECK_APP_DOMAIN_LEAKS
+ _ASSERTE(pMT->HasComponentSize());
+ return pMT->RawGetComponentSize();
+ }
+
+ // Note that this can be a multidimensional array of rank 1
+ // (for example if we had a 1-D array with lower bounds
+ BOOL IsMultiDimArray() const {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return(GetMethodTable()->IsMultiDimArray());
+ }
+
+ // Get pointer to the begining of the bounds (counts for each dim)
+ // Works for any array type
+ PTR_INT32 GetBoundsPtr() const {
+ WRAPPER_NO_CONTRACT;
+ MethodTable * pMT = GetMethodTable();
+ if (pMT->IsMultiDimArray())
+ {
+ return dac_cast<PTR_INT32>(
+ dac_cast<TADDR>(this) + sizeof(*this));
+ }
+ else
+ {
+ return dac_cast<PTR_INT32>(PTR_HOST_MEMBER_TADDR(ArrayBase, this,
+ m_NumComponents));
+ }
+ }
+
+ // Works for any array type
+ PTR_INT32 GetLowerBoundsPtr() const {
+ WRAPPER_NO_CONTRACT;
+ if (IsMultiDimArray())
+ {
+ // Lower bounds info is after total bounds info
+ // and total bounds info has rank elements
+ return GetBoundsPtr() + GetRank();
+ }
+ else
+ return dac_cast<PTR_INT32>(GVAL_ADDR(s_arrayBoundsZero));
+ }
+
+ static unsigned GetOffsetOfNumComponents() {
+ LIMITED_METHOD_CONTRACT;
+ return offsetof(ArrayBase, m_NumComponents);
+ }
+
+ inline static unsigned GetDataPtrOffset(MethodTable* pMT);
+
+ inline static unsigned GetBoundsOffset(MethodTable* pMT);
+ inline static unsigned GetLowerBoundsOffset(MethodTable* pMT);
+};
+
+//
+// Template used to build all the non-object
+// arrays of a single dimension
+//
+
+template < class KIND >
+class Array : public ArrayBase
+{
+ public:
+
+ typedef DPTR(KIND) PTR_KIND;
+ typedef DPTR(const KIND) PTR_CKIND;
+
+ KIND m_Array[1];
+
+ PTR_KIND GetDirectPointerToNonObjectElements()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ // return m_Array;
+ return PTR_KIND(GetDataPtr()); // This also handles arrays of dim 1 with lower bounds present
+
+ }
+
+ PTR_CKIND GetDirectConstPointerToNonObjectElements() const
+ {
+ WRAPPER_NO_CONTRACT;
+ // return m_Array;
+ return PTR_CKIND(GetDataPtr()); // This also handles arrays of dim 1 with lower bounds present
+ }
+};
+
+
+// Warning: Use PtrArray only for single dimensional arrays, not multidim arrays.
+class PtrArray : public ArrayBase
+{
+ friend class GCHeap;
+ friend class ClrDataAccess;
+ friend OBJECTREF AllocateArrayEx(TypeHandle arrayClass, INT32 *pArgs, DWORD dwNumArgs, BOOL bAllocateInLargeHeap);
+ friend class JIT_TrialAlloc;
+ friend class CheckAsmOffsets;
+
+public:
+ TypeHandle GetArrayElementTypeHandle()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return GetMethodTable()->GetApproxArrayElementTypeHandle();
+ }
+
+ static SIZE_T GetDataOffset()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return offsetof(PtrArray, m_Array);
+ }
+
+ void SetAt(SIZE_T i, OBJECTREF ref)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ _ASSERTE(i < GetNumComponents());
+ SetObjectReference(m_Array + i, ref, GetAppDomain());
+ }
+
+ void ClearAt(SIZE_T i)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(i < GetNumComponents());
+ ClearObjectReference(m_Array + i);
+ }
+
+ OBJECTREF GetAt(SIZE_T i)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ _ASSERTE(i < GetNumComponents());
+
+// DAC doesn't know the true size of this array
+// the compiler thinks it is size 1, but really it is size N hanging off the structure
+#ifndef DACCESS_COMPILE
+ return m_Array[i];
+#else
+ TADDR arrayTargetAddress = dac_cast<TADDR>(this) + offsetof(PtrArray, m_Array);
+ __ArrayDPtr<OBJECTREF> targetArray = dac_cast< __ArrayDPtr<OBJECTREF> >(arrayTargetAddress);
+ return targetArray[i];
+#endif
+ }
+
+ friend class StubLinkerCPU;
+#ifdef FEATURE_ARRAYSTUB_AS_IL
+ friend class ArrayOpLinker;
+#endif
+public:
+ OBJECTREF m_Array[1];
+};
+
+/* a TypedByRef is a structure that is used to implement VB's BYREF variants.
+ it is basically a tuple of an address of some data along with a TypeHandle
+ that indicates the type of the address */
+class TypedByRef
+{
+public:
+
+ PTR_VOID data;
+ TypeHandle type;
+};
+
+typedef DPTR(TypedByRef) PTR_TypedByRef;
+
+typedef Array<I1> I1Array;
+typedef Array<I2> I2Array;
+typedef Array<I4> I4Array;
+typedef Array<I8> I8Array;
+typedef Array<R4> R4Array;
+typedef Array<R8> R8Array;
+typedef Array<U1> U1Array;
+typedef Array<U1> BOOLArray;
+typedef Array<U2> U2Array;
+typedef Array<WCHAR> CHARArray;
+typedef Array<U4> U4Array;
+typedef Array<U8> U8Array;
+typedef PtrArray PTRArray;
+
+typedef DPTR(I1Array) PTR_I1Array;
+typedef DPTR(I2Array) PTR_I2Array;
+typedef DPTR(I4Array) PTR_I4Array;
+typedef DPTR(I8Array) PTR_I8Array;
+typedef DPTR(R4Array) PTR_R4Array;
+typedef DPTR(R8Array) PTR_R8Array;
+typedef DPTR(U1Array) PTR_U1Array;
+typedef DPTR(BOOLArray) PTR_BOOLArray;
+typedef DPTR(U2Array) PTR_U2Array;
+typedef DPTR(CHARArray) PTR_CHARArray;
+typedef DPTR(U4Array) PTR_U4Array;
+typedef DPTR(U8Array) PTR_U8Array;
+typedef DPTR(PTRArray) PTR_PTRArray;
+
+class StringObject;
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<ArrayBase> BASEARRAYREF;
+typedef REF<I1Array> I1ARRAYREF;
+typedef REF<I2Array> I2ARRAYREF;
+typedef REF<I4Array> I4ARRAYREF;
+typedef REF<I8Array> I8ARRAYREF;
+typedef REF<R4Array> R4ARRAYREF;
+typedef REF<R8Array> R8ARRAYREF;
+typedef REF<U1Array> U1ARRAYREF;
+typedef REF<BOOLArray> BOOLARRAYREF;
+typedef REF<U2Array> U2ARRAYREF;
+typedef REF<U4Array> U4ARRAYREF;
+typedef REF<U8Array> U8ARRAYREF;
+typedef REF<CHARArray> CHARARRAYREF;
+typedef REF<PTRArray> PTRARRAYREF; // Warning: Use PtrArray only for single dimensional arrays, not multidim arrays.
+typedef REF<StringObject> STRINGREF;
+
+#else // USE_CHECKED_OBJECTREFS
+
+typedef PTR_ArrayBase BASEARRAYREF;
+typedef PTR_I1Array I1ARRAYREF;
+typedef PTR_I2Array I2ARRAYREF;
+typedef PTR_I4Array I4ARRAYREF;
+typedef PTR_I8Array I8ARRAYREF;
+typedef PTR_R4Array R4ARRAYREF;
+typedef PTR_R8Array R8ARRAYREF;
+typedef PTR_U1Array U1ARRAYREF;
+typedef PTR_BOOLArray BOOLARRAYREF;
+typedef PTR_U2Array U2ARRAYREF;
+typedef PTR_U4Array U4ARRAYREF;
+typedef PTR_U8Array U8ARRAYREF;
+typedef PTR_CHARArray CHARARRAYREF;
+typedef PTR_PTRArray PTRARRAYREF; // Warning: Use PtrArray only for single dimensional arrays, not multidim arrays.
+typedef PTR_StringObject STRINGREF;
+
+#endif // USE_CHECKED_OBJECTREFS
+
+
+#include <poppack.h>
+
+
+/*
+ * StringObject
+ *
+ * Special String implementation for performance.
+ *
+ * m_ArrayLength - Length of buffer (m_Characters) in number of WCHARs
+ * m_StringLength - Length of string in number of WCHARs, may be smaller
+ * than the m_ArrayLength implying that there is extra
+ * space at the end. The high two bits of this field are used
+ * to indicate if the String has characters higher than 0x7F
+ * m_Characters - The string buffer
+ *
+ */
+
+
+/**
+ * The high bit state can be one of three value:
+ * STRING_STATE_HIGH_CHARS: We've examined the string and determined that it definitely has values greater than 0x80
+ * STRING_STATE_FAST_OPS: We've examined the string and determined that it definitely has no chars greater than 0x80
+ * STRING_STATE_UNDETERMINED: We've never examined this string.
+ * We've also reserved another bit for future use.
+ */
+
+#define STRING_STATE_UNDETERMINED 0x00000000
+#define STRING_STATE_HIGH_CHARS 0x40000000
+#define STRING_STATE_FAST_OPS 0x80000000
+#define STRING_STATE_SPECIAL_SORT 0xC0000000
+
+#ifdef _MSC_VER
+#pragma warning(disable : 4200) // disable zero-sized array warning
+#endif
+class StringObject : public Object
+{
+#ifdef DACCESS_COMPILE
+ friend class ClrDataAccess;
+#endif
+ friend class GCHeap;
+ friend class JIT_TrialAlloc;
+ friend class CheckAsmOffsets;
+ friend class COMString;
+
+ private:
+ DWORD m_StringLength;
+ WCHAR m_Characters[0];
+ // GC will see a StringObject like this:
+ // DWORD m_StringLength
+ // WCHAR m_Characters[0]
+ // DWORD m_OptionalPadding (this is an optional field and will appear based on need)
+
+ public:
+ VOID SetStringLength(DWORD len) { LIMITED_METHOD_CONTRACT; _ASSERTE(len >= 0); m_StringLength = len; }
+
+ protected:
+ StringObject() {LIMITED_METHOD_CONTRACT; }
+ ~StringObject() {LIMITED_METHOD_CONTRACT; }
+
+ public:
+ static SIZE_T GetSize(DWORD stringLength);
+
+ DWORD GetStringLength() { LIMITED_METHOD_DAC_CONTRACT; return( m_StringLength );}
+ WCHAR* GetBuffer() { LIMITED_METHOD_CONTRACT; _ASSERTE(this); return (WCHAR*)( dac_cast<TADDR>(this) + offsetof(StringObject, m_Characters) ); }
+ WCHAR* GetBuffer(DWORD *pdwSize) { LIMITED_METHOD_CONTRACT; _ASSERTE(this && pdwSize); *pdwSize = GetStringLength(); return GetBuffer(); }
+ WCHAR* GetBufferNullable() { LIMITED_METHOD_CONTRACT; return( (this == 0) ? 0 : (WCHAR*)( dac_cast<TADDR>(this) + offsetof(StringObject, m_Characters) ) ); }
+
+ DWORD GetHighCharState() {
+ WRAPPER_NO_CONTRACT;
+ DWORD ret = GetHeader()->GetBits() & (BIT_SBLK_STRING_HIGH_CHAR_MASK);
+ return ret;
+ }
+
+ VOID SetHighCharState(DWORD value) {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(value==STRING_STATE_HIGH_CHARS || value==STRING_STATE_FAST_OPS
+ || value==STRING_STATE_UNDETERMINED || value==STRING_STATE_SPECIAL_SORT);
+
+ // you need to clear the present state before going to a new state, but we'll allow multiple threads to set it to the same thing.
+ _ASSERTE((GetHighCharState() == STRING_STATE_UNDETERMINED) || (GetHighCharState()==value));
+
+ static_assert_no_msg(BIT_SBLK_STRING_HAS_NO_HIGH_CHARS == STRING_STATE_FAST_OPS &&
+ STRING_STATE_HIGH_CHARS == BIT_SBLK_STRING_HIGH_CHARS_KNOWN &&
+ STRING_STATE_SPECIAL_SORT == BIT_SBLK_STRING_HAS_SPECIAL_SORT);
+
+ GetHeader()->SetBit(value);
+ }
+
+ static UINT GetBufferOffset()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (UINT)(offsetof(StringObject, m_Characters));
+ }
+ static UINT GetStringLengthOffset()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (UINT)(offsetof(StringObject, m_StringLength));
+ }
+ VOID GetSString(SString &result)
+ {
+ WRAPPER_NO_CONTRACT;
+ result.Set(GetBuffer(), GetStringLength());
+ }
+ //========================================================================
+ // Creates a System.String object. All the functions that take a length
+ // or a count of bytes will add the null terminator after length
+ // characters. So this means that if you have a string that has 5
+ // characters and the null terminator you should pass in 5 and NOT 6.
+ //========================================================================
+ static STRINGREF NewString(int length);
+ static STRINGREF NewString(int length, BOOL bHasTrailByte);
+ static STRINGREF NewString(const WCHAR *pwsz);
+ static STRINGREF NewString(const WCHAR *pwsz, int length);
+ static STRINGREF NewString(LPCUTF8 psz);
+ static STRINGREF NewString(LPCUTF8 psz, int cBytes);
+
+ static STRINGREF GetEmptyString();
+ static STRINGREF* GetEmptyStringRefPtr();
+
+ static STRINGREF* InitEmptyStringRefPtr();
+
+ static STRINGREF __stdcall StringInitCharHelper(LPCSTR pszSource, int length);
+ DWORD InternalCheckHighChars();
+
+ BOOL HasTrailByte();
+ BOOL GetTrailByte(BYTE *bTrailByte);
+ BOOL SetTrailByte(BYTE bTrailByte);
+ static BOOL CaseInsensitiveCompHelper(__in_ecount(aLength) WCHAR * strA, __in_z INT8 * strB, int aLength, int bLength, int *result);
+
+#ifdef VERIFY_HEAP
+ //has to use raw object to avoid recursive validation
+ BOOL ValidateHighChars ();
+#endif //VERIFY_HEAP
+
+ /*=================RefInterpretGetStringValuesDangerousForGC======================
+ **N.B.: This perfoms no range checking and relies on the caller to have done this.
+ **Args: (IN)ref -- the String to be interpretted.
+ ** (OUT)chars -- a pointer to the characters in the buffer.
+ ** (OUT)length -- a pointer to the length of the buffer.
+ **Returns: void.
+ **Exceptions: None.
+ ==============================================================================*/
+ // !!!! If you use this function, you have to be careful because chars is a pointer
+ // !!!! to the data buffer of ref. If GC happens after this call, you need to make
+ // !!!! sure that you have a pin handle on ref, or use GCPROTECT_BEGINPINNING on ref.
+ void RefInterpretGetStringValuesDangerousForGC(__deref_out_ecount(*length + 1) WCHAR **chars, int *length) {
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(GetGCSafeMethodTable() == g_pStringClass);
+ *length = GetStringLength();
+ *chars = GetBuffer();
+#ifdef _DEBUG
+ EnableStressHeapHelper();
+#endif
+ }
+
+
+private:
+ static INT32 FastCompareStringHelper(DWORD* strAChars, INT32 countA, DWORD* strBChars, INT32 countB);
+
+ static STRINGREF* EmptyStringRefPtr;
+};
+
+//The first two macros are essentially the same. I just define both because
+//having both can make the code more readable.
+#define IS_FAST_SORT(state) (((state) == STRING_STATE_FAST_OPS))
+#define IS_SLOW_SORT(state) (((state) != STRING_STATE_FAST_OPS))
+
+//This macro should be used to determine things like indexing, casing, and encoding.
+#define IS_FAST_OPS_EXCEPT_SORT(state) (((state)==STRING_STATE_SPECIAL_SORT) || ((state)==STRING_STATE_FAST_OPS))
+#define IS_ASCII(state) (((state)==STRING_STATE_SPECIAL_SORT) || ((state)==STRING_STATE_FAST_OPS))
+#define IS_FAST_CASING(state) IS_ASCII(state)
+#define IS_FAST_INDEX(state) IS_ASCII(state)
+#define IS_STRING_STATE_UNDETERMINED(state) ((state)==STRING_STATE_UNDETERMINED)
+#define HAS_HIGH_CHARS(state) ((state)==STRING_STATE_HIGH_CHARS)
+
+/*================================GetEmptyString================================
+**Get a reference to the empty string. If we haven't already gotten one, we
+**query the String class for a pointer to the empty string that we know was
+**created at startup.
+**
+**Args: None
+**Returns: A STRINGREF to the EmptyString
+**Exceptions: None
+==============================================================================*/
+inline STRINGREF StringObject::GetEmptyString() {
+
+ CONTRACTL {
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+ STRINGREF* refptr = EmptyStringRefPtr;
+
+ //If we've never gotten a reference to the EmptyString, we need to go get one.
+ if (refptr==NULL) {
+ refptr = InitEmptyStringRefPtr();
+ }
+ //We've already have a reference to the EmptyString, so we can just return it.
+ return *refptr;
+}
+
+inline STRINGREF* StringObject::GetEmptyStringRefPtr() {
+
+ CONTRACTL {
+ THROWS;
+ MODE_ANY;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+ STRINGREF* refptr = EmptyStringRefPtr;
+
+ //If we've never gotten a reference to the EmptyString, we need to go get one.
+ if (refptr==NULL) {
+ refptr = InitEmptyStringRefPtr();
+ }
+ //We've already have a reference to the EmptyString, so we can just return it.
+ return refptr;
+}
+
+// This is used to account for the remoting cache on RuntimeType,
+// RuntimeMethodInfo, and RtFieldInfo.
+class BaseObjectWithCachedData : public Object
+{
+#ifdef FEATURE_REMOTING
+ protected:
+ OBJECTREF m_CachedData;
+#endif //FEATURE_REMOTING
+};
+
+#ifndef BINDER
+// This is the Class version of the Reflection object.
+// A Class has adddition information.
+// For a ReflectClassBaseObject the m_pData is a pointer to a FieldDesc array that
+// contains all of the final static primitives if its defined.
+// m_cnt = the number of elements defined in the m_pData FieldDesc array. -1 means
+// this hasn't yet been defined.
+class ReflectClassBaseObject : public BaseObjectWithCachedData
+{
+ friend class MscorlibBinder;
+
+protected:
+ OBJECTREF m_keepalive;
+ OBJECTREF m_cache;
+ TypeHandle m_typeHandle;
+#ifdef FEATURE_APPX
+ UINT32 m_invocationFlags;
+#endif
+
+#ifdef _DEBUG
+ void TypeCheck()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_COOPERATIVE;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ MethodTable *pMT = GetMethodTable();
+ while (pMT != g_pRuntimeTypeClass && pMT != NULL)
+ {
+ pMT = pMT->GetParentMethodTable();
+ }
+ _ASSERTE(pMT == g_pRuntimeTypeClass);
+ }
+#endif // _DEBUG
+
+public:
+ void SetType(TypeHandle type) {
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_COOPERATIVE;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ INDEBUG(TypeCheck());
+ m_typeHandle = type;
+ }
+
+ void SetKeepAlive(OBJECTREF keepalive)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_COOPERATIVE;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ INDEBUG(TypeCheck());
+ SetObjectReference(&m_keepalive, keepalive, GetAppDomain());
+ }
+
+ TypeHandle GetType() {
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_COOPERATIVE;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ INDEBUG(TypeCheck());
+ return m_typeHandle;
+ }
+
+};
+#endif // BINDER
+
+// This is the Method version of the Reflection object.
+// A Method has adddition information.
+// m_pMD - A pointer to the actual MethodDesc of the method.
+// m_object - a field that has a reference type in it. Used only for RuntimeMethodInfoStub to keep the real type alive.
+// This structure matches the structure up to the m_pMD for several different managed types.
+// (RuntimeConstructorInfo, RuntimeMethodInfo, and RuntimeMethodInfoStub). These types are unrelated in the type
+// system except that they all implement a particular interface. It is important that that interface is not attached to any
+// type that does not sufficiently match this data structure.
+class ReflectMethodObject : public BaseObjectWithCachedData
+{
+ friend class MscorlibBinder;
+
+protected:
+ OBJECTREF m_object;
+ OBJECTREF m_empty1;
+ OBJECTREF m_empty2;
+ OBJECTREF m_empty3;
+ OBJECTREF m_empty4;
+ OBJECTREF m_empty5;
+ OBJECTREF m_empty6;
+ OBJECTREF m_empty7;
+ MethodDesc * m_pMD;
+
+public:
+ void SetMethod(MethodDesc *pMethod) {
+ LIMITED_METHOD_CONTRACT;
+ m_pMD = pMethod;
+ }
+
+ // This must only be called on instances of ReflectMethodObject that are actually RuntimeMethodInfoStub
+ void SetKeepAlive(OBJECTREF keepalive)
+ {
+ WRAPPER_NO_CONTRACT;
+ SetObjectReference(&m_object, keepalive, GetAppDomain());
+ }
+
+ MethodDesc *GetMethod() {
+ LIMITED_METHOD_CONTRACT;
+ return m_pMD;
+ }
+
+};
+
+// This is the Field version of the Reflection object.
+// A Method has adddition information.
+// m_pFD - A pointer to the actual MethodDesc of the method.
+// m_object - a field that has a reference type in it. Used only for RuntimeFieldInfoStub to keep the real type alive.
+// This structure matches the structure up to the m_pFD for several different managed types.
+// (RtFieldInfo and RuntimeFieldInfoStub). These types are unrelated in the type
+// system except that they all implement a particular interface. It is important that that interface is not attached to any
+// type that does not sufficiently match this data structure.
+class ReflectFieldObject : public BaseObjectWithCachedData
+{
+ friend class MscorlibBinder;
+
+protected:
+ OBJECTREF m_object;
+ OBJECTREF m_empty1;
+ INT32 m_empty2;
+ OBJECTREF m_empty3;
+ OBJECTREF m_empty4;
+ FieldDesc * m_pFD;
+
+public:
+ void SetField(FieldDesc *pField) {
+ LIMITED_METHOD_CONTRACT;
+ m_pFD = pField;
+ }
+
+ // This must only be called on instances of ReflectFieldObject that are actually RuntimeFieldInfoStub
+ void SetKeepAlive(OBJECTREF keepalive)
+ {
+ WRAPPER_NO_CONTRACT;
+ SetObjectReference(&m_object, keepalive, GetAppDomain());
+ }
+
+ FieldDesc *GetField() {
+ LIMITED_METHOD_CONTRACT;
+ return m_pFD;
+ }
+};
+
+// ReflectModuleBaseObject
+// This class is the base class for managed Module.
+// This class will connect the Object back to the underlying VM representation
+// m_ReflectClass -- This is the real Class that was used for reflection
+// This class was used to get at this object
+// m_pData -- this is a generic pointer which usually points CorModule
+//
+class ReflectModuleBaseObject : public Object
+{
+ friend class MscorlibBinder;
+
+ protected:
+ // READ ME:
+ // Modifying the order or fields of this object may require other changes to the
+ // classlib class definition of this object.
+ OBJECTREF m_runtimeType;
+ OBJECTREF m_runtimeAssembly;
+ void* m_ReflectClass; // Pointer to the ReflectClass structure
+ Module* m_pData; // Pointer to the Module
+ void* m_pGlobals; // Global values....
+ void* m_pGlobalsFlds; // Global Fields....
+
+ protected:
+ ReflectModuleBaseObject() {LIMITED_METHOD_CONTRACT;}
+ ~ReflectModuleBaseObject() {LIMITED_METHOD_CONTRACT;}
+
+ public:
+ void SetModule(Module* p) {
+ LIMITED_METHOD_CONTRACT;
+ m_pData = p;
+ }
+ Module* GetModule() {
+ LIMITED_METHOD_CONTRACT;
+ return m_pData;
+ }
+ void SetAssembly(OBJECTREF assembly)
+ {
+ WRAPPER_NO_CONTRACT;
+ SetObjectReference(&m_runtimeAssembly, assembly, GetAppDomain());
+ }
+};
+
+NOINLINE ReflectModuleBaseObject* GetRuntimeModuleHelper(LPVOID __me, Module *pModule, OBJECTREF keepAlive);
+#define FC_RETURN_MODULE_OBJECT(pModule, refKeepAlive) FC_INNER_RETURN(ReflectModuleBaseObject*, GetRuntimeModuleHelper(__me, pModule, refKeepAlive))
+
+class SafeHandle;
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<SafeHandle> SAFEHANDLE;
+typedef REF<SafeHandle> SAFEHANDLEREF;
+#else // USE_CHECKED_OBJECTREFS
+typedef SafeHandle * SAFEHANDLE;
+typedef SafeHandle * SAFEHANDLEREF;
+#endif // USE_CHECKED_OBJECTREFS
+
+class PermissionListSetObject: public Object
+{
+ friend class MscorlibBinder;
+
+private:
+ OBJECTREF _firstPermSetTriple;
+ OBJECTREF _permSetTriples;
+#ifdef FEATURE_COMPRESSEDSTACK
+ OBJECTREF _zoneList;
+ OBJECTREF _originList;
+#endif // FEATURE_COMPRESSEDSTACK
+
+public:
+ BOOL IsEmpty()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (_firstPermSetTriple == NULL &&
+ _permSetTriples == NULL
+#ifdef FEATURE_COMPRESSEDSTACK
+ && _zoneList == NULL &&
+ _originList == NULL
+#endif // FEATURE_COMPRESSEDSTACK
+ );
+ }
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<PermissionListSetObject> PERMISSIONLISTSETREF;
+#else
+typedef PermissionListSetObject* PERMISSIONLISTSETREF;
+#endif
+#ifdef FEATURE_COMPRESSEDSTACK
+class CompressedStackObject: public Object
+{
+ friend class MscorlibBinder;
+
+private:
+ // These field are also defined in the managed representation. (CompressedStack.cs)If you
+ // add or change these field you must also change the managed code so that
+ // it matches these. This is necessary so that the object is the proper
+ // size.
+ PERMISSIONLISTSETREF m_pls;
+ SAFEHANDLEREF m_compressedStackHandle;
+
+public:
+ void* GetUnmanagedCompressedStack();
+ BOOL IsEmptyPLS()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_pls == NULL || m_pls->IsEmpty());
+ }
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<CompressedStackObject> COMPRESSEDSTACKREF;
+#else
+typedef CompressedStackObject* COMPRESSEDSTACKREF;
+#endif
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
+
+#if defined(FEATURE_IMPERSONATION) || defined(FEATURE_COMPRESSEDSTACK)
+class SecurityContextObject: public Object
+{
+ friend class MscorlibBinder;
+
+private:
+
+ // These field are also defined in the managed representation. (SecurityContext.cs)If you
+ // add or change these field you must also change the managed code so that
+ // it matches these. This is necessary so that the object is the proper
+ // size.
+
+ OBJECTREF _executionContext;
+#ifdef FEATURE_IMPERSONATION
+ OBJECTREF _windowsIdentity;
+#endif // FEATURE_IMPERSONATION
+#ifdef FEATURE_COMPRESSEDSTACK
+ COMPRESSEDSTACKREF _compressedStack;
+#endif // FEATURE_COMPRESSEDSTACK
+ INT32 _disableFlow;
+ CLR_BOOL _isNewCapture;
+public:
+#ifdef FEATURE_COMPRESSEDSTACK
+ COMPRESSEDSTACKREF GetCompressedStack()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _compressedStack;
+ }
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<SecurityContextObject> SECURITYCONTEXTREF;
+#else
+typedef SecurityContextObject* SECURITYCONTEXTREF;
+#endif
+#endif // #if defined(FEATURE_IMPERSONATION) || defined(FEATURE_COMPRESSEDSTACK)
+
+#ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+#define SYNCCTXPROPS_REQUIRESWAITNOTIFICATION 0x1 // Keep in sync with SynchronizationContext.cs SynchronizationContextFlags
+class ThreadBaseObject;
+class SynchronizationContextObject: public Object
+{
+ friend class MscorlibBinder;
+private:
+ // These field are also defined in the managed representation. (SecurityContext.cs)If you
+ // add or change these field you must also change the managed code so that
+ // it matches these. This is necessary so that the object is the proper
+ // size.
+ INT32 _props;
+public:
+ BOOL IsWaitNotificationRequired()
+ {
+ LIMITED_METHOD_CONTRACT;
+ if ((_props & SYNCCTXPROPS_REQUIRESWAITNOTIFICATION) != 0)
+ return TRUE;
+ return FALSE;
+ }
+};
+#endif // FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+
+#ifdef FEATURE_REMOTING
+class CallContextRemotingDataObject : public Object
+{
+private:
+ // These field are also defined in the managed representation. (SecurityContext.cs)If you
+ // add or change these field you must also change the managed code so that
+ // it matches these. This is necessary so that the object is the proper
+ // size.
+ OBJECTREF _logicalCallID;
+public:
+ OBJECTREF GetLogicalCallID()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _logicalCallID;
+ }
+};
+
+class CallContextSecurityDataObject : public Object
+{
+private:
+ // These field are also defined in the managed representation. (SecurityContext.cs)If you
+ // add or change these field you must also change the managed code so that
+ // it matches these. This is necessary so that the object is the proper
+ // size.
+ OBJECTREF _principal;
+public:
+ OBJECTREF GetPrincipal()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _principal;
+ }
+
+ void SetPrincipal(OBJECTREF ref)
+ {
+ WRAPPER_NO_CONTRACT;
+ SetObjectReferenceUnchecked(&_principal, ref);
+ }
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<CallContextSecurityDataObject> CCSECURITYDATAREF;
+typedef REF<CallContextRemotingDataObject> CCREMOTINGDATAREF;
+#else
+typedef CallContextSecurityDataObject* CCSECURITYDATAREF;
+typedef CallContextRemotingDataObject* CCREMOTINGDATAREF;
+#endif
+
+class LogicalCallContextObject : public Object
+{
+ friend class MscorlibBinder;
+
+ // These field are also defined in the managed representation. (CallContext.cs) If you
+ // add or change these field you must also change the managed code so that
+ // it matches these. This is necessary so that the object is the proper
+ // size.
+private :
+ OBJECTREF m_Datastore;
+ CCREMOTINGDATAREF m_RemotingData;
+ CCSECURITYDATAREF m_SecurityData;
+ OBJECTREF m_HostContext;
+ OBJECTREF _sendHeaders;
+ OBJECTREF _recvHeaders;
+ CLR_BOOL m_IsCorrelationMgr;
+
+public:
+ CCSECURITYDATAREF GetSecurityData()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_SecurityData;
+ }
+
+ // This is an unmanaged equivalent of System.Runtime.Remoting.Messaging.LogicalCallContext.HasInfo
+ BOOL ContainsDataForSerialization()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ return (ContainsNonSecurityDataForSerialization() ||
+ (m_SecurityData != NULL && m_SecurityData->GetPrincipal() != NULL));
+ }
+
+ BOOL ContainsNonSecurityDataForSerialization()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // m_Datastore may contain 0 items even if it's non-NULL in which case it does
+ // not really contain any useful data for serialization and this function could
+ // return FALSE. However we don't waste time trying to detect this case - it will
+ // be reset to NULL the first time a call is made due to how LogicalCallContext's
+ // ISerializable implementation works.
+ return (m_Datastore != NULL ||
+ (m_RemotingData != NULL && m_RemotingData->GetLogicalCallID() != NULL) ||
+ m_HostContext != NULL);
+ }
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<LogicalCallContextObject> LOGICALCALLCONTEXTREF;
+#else
+typedef LogicalCallContextObject* LOGICALCALLCONTEXTREF;
+#endif
+
+#endif // FEATURE_REMOTING
+
+#ifndef FEATURE_CORECLR
+class ExecutionContextObject : public Object
+{
+ friend class MscorlibBinder;
+
+ // These fields are also defined in the managed representation. (ExecutionContext.cs) If you
+ // add or change these fields you must also change the managed code so that
+ // it matches these. This is necessary so that the object is the proper
+ // size.
+private :
+#ifdef FEATURE_CAS_POLICY
+ OBJECTREF _hostExecutionContext;
+#endif // FEATURE_CAS_POLICY
+ OBJECTREF _syncContext;
+ OBJECTREF _syncContextNoFlow;
+#if defined(FEATURE_IMPERSONATION) || defined(FEATURE_COMPRESSEDSTACK)
+ SECURITYCONTEXTREF _securityContext;
+#endif // #if defined(FEATURE_IMPERSONATION) || defined(FEATURE_COMPRESSEDSTACK)
+#ifdef FEATURE_REMOTING
+ LOGICALCALLCONTEXTREF _logicalCallContext;
+ OBJECTREF _illogicalCallContext;
+#endif // #ifdef FEATURE_REMOTING
+ INT32 _flags;
+ OBJECTREF _localValues;
+ OBJECTREF _localChangeNotifications;
+
+public:
+ OBJECTREF GetSynchronizationContext()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _syncContext;
+ }
+#if defined(FEATURE_IMPERSONATION) || defined(FEATURE_COMPRESSEDSTACK)
+ SECURITYCONTEXTREF GetSecurityContext()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _securityContext;
+ }
+#endif // #if defined(FEATURE_IMPERSONATION) || defined(FEATURE_COMPRESSEDSTACK)
+#ifdef FEATURE_REMOTING
+ LOGICALCALLCONTEXTREF GetLogicalCallContext()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _logicalCallContext;
+ }
+ void SetLogicalCallContext(LOGICALCALLCONTEXTREF ref)
+ {
+ WRAPPER_NO_CONTRACT;
+ SetObjectReferenceUnchecked((OBJECTREF*)&_logicalCallContext, (OBJECTREF)ref);
+ }
+ OBJECTREF GetIllogicalCallContext()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _illogicalCallContext;
+ }
+ void SetIllogicalCallContext(OBJECTREF ref)
+ {
+ WRAPPER_NO_CONTRACT;
+ SetObjectReferenceUnchecked((OBJECTREF*)&_illogicalCallContext, ref);
+ }
+#endif //#ifdef FEATURE_REMOTING
+#ifdef FEATURE_COMPRESSEDSTACK
+ COMPRESSEDSTACKREF GetCompressedStack()
+ {
+ WRAPPER_NO_CONTRACT;
+ if (_securityContext != NULL)
+ return _securityContext->GetCompressedStack();
+ return NULL;
+ }
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
+
+};
+#endif //FEATURE_CORECLR
+
+
+
+typedef DPTR(class CultureInfoBaseObject) PTR_CultureInfoBaseObject;
+
+#ifdef USE_CHECKED_OBJECTREFS
+#ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+typedef REF<SynchronizationContextObject> SYNCHRONIZATIONCONTEXTREF;
+#endif // FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+typedef REF<ExecutionContextObject> EXECUTIONCONTEXTREF;
+typedef REF<CultureInfoBaseObject> CULTUREINFOBASEREF;
+typedef REF<ArrayBase> ARRAYBASEREF;
+
+#else
+#ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+typedef SynchronizationContextObject* SYNCHRONIZATIONCONTEXTREF;
+#endif // FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+#ifndef FEATURE_CORECLR
+typedef ExecutionContextObject* EXECUTIONCONTEXTREF;
+#endif
+typedef CultureInfoBaseObject* CULTUREINFOBASEREF;
+typedef PTR_ArrayBase ARRAYBASEREF;
+#endif
+
+// Note that the name must always be "" or "en-US". Other cases and nulls
+// aren't allowed (we already checked.)
+__inline bool IsCultureEnglishOrInvariant(LPCWSTR localeName)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (localeName != NULL &&
+ (localeName[0] == W('\0') ||
+ wcscmp(localeName, W("en-US")) == 0))
+ {
+ return true;
+ }
+ return false;
+ }
+
+class CultureInfoBaseObject : public Object
+{
+ friend class MscorlibBinder;
+
+private:
+ OBJECTREF compareInfo;
+ OBJECTREF textInfo;
+#ifndef FEATURE_CORECLR
+ OBJECTREF regionInfo;
+#endif // !FEATURE_CORECLR
+ OBJECTREF numInfo;
+ OBJECTREF dateTimeInfo;
+ OBJECTREF calendar;
+ OBJECTREF m_cultureData;
+#ifndef FEATURE_CORECLR
+ OBJECTREF m_consoleFallbackCulture;
+#endif // !FEATURE_CORECLR
+ STRINGREF m_name; // "real" name - en-US, de-DE_phoneb or fj-FJ
+ STRINGREF m_nonSortName; // name w/o sort info (de-DE for de-DE_phoneb)
+ STRINGREF m_sortName; // Sort only name (de-DE_phoneb, en-us for fj-fj (w/us sort)
+ CULTUREINFOBASEREF m_parent;
+#if !FEATURE_CORECLR
+ INT32 iDataItem; // NEVER USED, DO NOT USE THIS! (Serialized in Whidbey/Everett)
+ INT32 iCultureID; // NEVER USED, DO NOT USE THIS! (Serialized in Whidbey/Everett)
+#endif // !FEATURE_CORECLR
+#ifdef FEATURE_LEAK_CULTURE_INFO
+ INT32 m_createdDomainID;
+#endif // FEATURE_LEAK_CULTURE_INFO
+ CLR_BOOL m_isReadOnly;
+ CLR_BOOL m_isInherited;
+#ifdef FEATURE_LEAK_CULTURE_INFO
+ CLR_BOOL m_isSafeCrossDomain;
+#endif // FEATURE_LEAK_CULTURE_INFO
+#ifndef FEATURE_COREFX_GLOBALIZATION
+ CLR_BOOL m_useUserOverride;
+#endif
+
+public:
+ CULTUREINFOBASEREF GetParent()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_parent;
+ }// GetParent
+
+
+ STRINGREF GetName()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_name;
+ }// GetName
+
+#ifdef FEATURE_LEAK_CULTURE_INFO
+ BOOL IsSafeCrossDomain()
+ {
+ return m_isSafeCrossDomain;
+ }// IsSafeCrossDomain
+
+ ADID GetCreatedDomainID()
+ {
+ return ADID(m_createdDomainID);
+ }// GetCreatedDomain
+#endif // FEATURE_LEAK_CULTURE_INFO
+
+}; // class CultureInfoBaseObject
+
+
+#ifndef FEATURE_COREFX_GLOBALIZATION
+typedef DPTR(class CultureDataBaseObject) PTR_CultureDataBaseObject;
+class CultureDataBaseObject : public Object
+{
+public:
+ // offsets are for Silverlight
+ /* 0x000 */ STRINGREF sRealName ; // Name you passed in (ie: en-US, en, or de-DE_phoneb)
+ /* 0x008 */ STRINGREF sWindowsName ; // Name OS thinks the object is (ie: de-DE_phoneb, or en-US (even if en was passed in))
+ /* 0x010 */ STRINGREF sName ; // locale name (ie: en-us, NO sort info, but could be neutral)
+ /* 0x012 */ STRINGREF sParent ; // Parent name (which may be a custom locale/culture)
+ /* 0x020 */ STRINGREF sLocalizedDisplayName ; // Localized pretty name for this locale
+ /* 0x028 */ STRINGREF sEnglishDisplayName ; // English pretty name for this locale
+ /* 0x030 */ STRINGREF sNativeDisplayName ; // Native pretty name for this locale
+ /* 0x038 */ STRINGREF sSpecificCulture ; // The culture name to be used in CultureInfo.CreateSpecificCulture(), en-US form if neutral, sort name if sort
+ /* 0x040 */ STRINGREF sISO639Language ; // ISO 639 Language Name
+ /* 0x048 */ STRINGREF sLocalizedLanguage ; // Localized name for this language
+ /* 0x050 */ STRINGREF sEnglishLanguage ; // English name for this language
+ /* 0x058 */ STRINGREF sNativeLanguage ; // Native name of this language
+ /* 0x060 */ STRINGREF sRegionName ; // (RegionInfo)
+ /* 0x068 */ STRINGREF sLocalizedCountry ; // localized country name
+ /* 0x070 */ STRINGREF sEnglishCountry ; // english country name (RegionInfo)
+ /* 0x078 */ STRINGREF sNativeCountry ; // native country name
+ /* 0x080 */ STRINGREF sISO3166CountryName ; // (RegionInfo), ie: US
+ /* 0x088 */ STRINGREF sPositiveSign ; // (user can override) positive sign
+ /* 0x090 */ STRINGREF sNegativeSign ; // (user can override) negative sign
+
+ /* 0x098 */ PTRARRAYREF saNativeDigits ; // (user can override) native characters for digits 0-9
+ /* 0x0a0 */ I4ARRAYREF waGrouping ; // (user can override) grouping of digits
+
+ /* 0x0a8 */ STRINGREF sDecimalSeparator ; // (user can override) decimal separator
+ /* 0x0b0 */ STRINGREF sThousandSeparator ; // (user can override) thousands separator
+ /* 0x0b8 */ STRINGREF sNaN ; // Not a Number
+ /* 0x0c0 */ STRINGREF sPositiveInfinity ; // + Infinity
+ /* 0x0c8 */ STRINGREF sNegativeInfinity ; // - Infinity
+ /* 0x0d0 */ STRINGREF sPercent ; // Percent (%) symbol
+ /* 0x0d8 */ STRINGREF sPerMille ; // PerMille (‰) symbol
+ /* 0x0e0 */ STRINGREF sCurrency ; // (user can override) local monetary symbol
+ /* 0x0e8 */ STRINGREF sIntlMonetarySymbol ; // international monetary symbol (RegionInfo)
+ /* 0x0f0 */ STRINGREF sEnglishCurrency ; // English name for this currency
+ /* 0x0f8 */ STRINGREF sNativeCurrency ; // Native name for this currency
+
+ /* 0x100 */ I4ARRAYREF waMonetaryGrouping ; // (user can override) monetary grouping of digits
+
+ /* 0x108 */ STRINGREF sMonetaryDecimal ; // (user can override) monetary decimal separator
+ /* 0x110 */ STRINGREF sMonetaryThousand ; // (user can override) monetary thousands separator
+ /* 0x118 */ STRINGREF sListSeparator ; // (user can override) list separator
+ /* 0x120 */ STRINGREF sAM1159 ; // (user can override) AM designator
+ /* 0x128 */ STRINGREF sPM2359 ; // (user can override) PM designator
+ STRINGREF sTimeSeparator ; // Time Separator
+
+ /* 0x130 */ PTRARRAYREF saLongTimes ; // (user can override) time format
+ /* 0x138 */ PTRARRAYREF saShortTimes ; // short time format
+ /* 0x140 */ PTRARRAYREF saDurationFormats ; // time duration format
+
+ /* 0x148 */ I4ARRAYREF waCalendars ; // all available calendar type(s). The first one is the default calendar
+
+ /* 0x150 */ PTRARRAYREF calendars ; // Store for specific calendar data
+
+ /* 0x158 */ STRINGREF sTextInfo ; // Text info name to use for custom
+ /* 0x160 */ STRINGREF sCompareInfo ; // Compare info name (including sorting key) to use if custom
+ /* 0x168 */ STRINGREF sScripts ; // Typical Scripts for this locale (latn;cyrl; etc)
+
+#if !defined(FEATURE_CORECLR)
+ // desktop only fields - these are ordered correctly
+ /* ????? */ STRINGREF sAbbrevLang ; // abbreviated language name (Windows Language Name) ex: ENU
+ /* ????? */ STRINGREF sAbbrevCountry ; // abbreviated country name (RegionInfo) (Windows Region Name) ex: USA
+ /* ????? */ STRINGREF sISO639Language2 ; // 3 char ISO 639 lang name 2 ex: eng
+ /* ????? */ STRINGREF sISO3166CountryName2 ; // 3 char ISO 639 country name 2 2(RegionInfo) ex: USA (ISO)
+ /* ????? */ STRINGREF sConsoleFallbackName ; // The culture name for the console fallback UI culture
+ /* ????? */ STRINGREF sKeyboardsToInstall ; // Keyboard installation string.
+ /* ????? */ STRINGREF fontSignature ; // Font signature (16 WORDS)
+#endif
+
+// Unused for now: /* ????? */ INT32 iCountry ; // (user can override) country code (RegionInfo)
+ /* 0x170 */ INT32 iGeoId ; // GeoId
+ /* 0x174 */ INT32 iDigitSubstitution ; // (user can override) Digit substitution 0=context, 1=none/arabic, 2=Native/national (2 seems to be unused)
+ /* 0x178 */ INT32 iLeadingZeros ; // (user can override) leading zeros 0 = no leading zeros, 1 = leading zeros
+ /* 0x17c */ INT32 iDigits ; // (user can override) number of fractional digits
+ /* 0x180 */ INT32 iNegativeNumber ; // (user can override) negative number format
+ /* 0x184 */ INT32 iNegativePercent ; // Negative Percent (0-3)
+ /* 0x188 */ INT32 iPositivePercent ; // Positive Percent (0-11)
+ /* 0x18c */ INT32 iCurrencyDigits ; // (user can override) # local monetary fractional digits
+ /* 0x190 */ INT32 iCurrency ; // (user can override) positive currency format
+ /* 0x194 */ INT32 iNegativeCurrency ; // (user can override) negative currency format
+ /* 0x198 */ INT32 iMeasure ; // (user can override) system of measurement 0=metric, 1=US (RegionInfo)
+// Unused for now /* ????? */ INT32 iPaperSize ; // default paper size (RegionInfo)
+ /* 0x19c */ INT32 iFirstDayOfWeek ; // (user can override) first day of week (gregorian really)
+ /* 0x1a0 */ INT32 iFirstWeekOfYear ; // (user can override) first week of year (gregorian really)
+
+ /* ????? */ INT32 iReadingLayout; // Reading Layout Data (0-3)
+#if !defined(FEATURE_CORECLR)
+ // desktop only fields - these are ordered correctly
+ /* ????? */ INT32 iDefaultAnsiCodePage ; // default ansi code page ID (ACP)
+ /* ????? */ INT32 iDefaultOemCodePage ; // default oem code page ID (OCP or OEM)
+ /* ????? */ INT32 iDefaultMacCodePage ; // default macintosh code page
+ /* ????? */ INT32 iDefaultEbcdicCodePage ; // default EBCDIC code page
+ /* ????? */ INT32 iLanguage ; // locale ID (0409) - NO sort information
+ /* ????? */ INT32 iInputLanguageHandle ; // input language handle
+#endif
+ /* 0x1a4 */ CLR_BOOL bUseOverrides ; // use user overrides?
+ /* 0x1a5 */ CLR_BOOL bNeutral ; // Flags for the culture (ie: neutral or not right now)
+#if !defined(FEATURE_CORECLR)
+ /* ????? */ CLR_BOOL bWin32Installed ; // Flags indicate if the culture is Win32 installed
+ /* ????? */ CLR_BOOL bFramework ; // Flags for indicate if the culture is one of Whidbey cultures
+#endif
+
+}; // class CultureDataBaseObject
+
+
+
+typedef DPTR(class CalendarDataBaseObject) PTR_CalendarDataBaseObject;
+class CalendarDataBaseObject : public Object
+{
+public:
+ /* 0x000 */ STRINGREF sNativeName ; // Calendar Name for the locale
+
+ // Formats
+
+ /* 0x008 */ PTRARRAYREF saShortDates ; // Short Data format, default first
+ /* 0x010 */ PTRARRAYREF saYearMonths ; // Year/Month Data format, default first
+ /* 0x018 */ PTRARRAYREF saLongDates ; // Long Data format, default first
+ /* 0x020 */ STRINGREF sMonthDay ; // Month/Day format
+
+ // Calendar Parts Names
+ /* 0x028 */ PTRARRAYREF saEraNames ; // Names of Eras
+ /* 0x030 */ PTRARRAYREF saAbbrevEraNames ; // Abbreviated Era Names
+ /* 0x038 */ PTRARRAYREF saAbbrevEnglishEraNames ; // Abbreviated Era Names in English
+ /* 0x040 */ PTRARRAYREF saDayNames ; // Day Names, null to use locale data, starts on Sunday
+ /* 0x048 */ PTRARRAYREF saAbbrevDayNames ; // Abbrev Day Names, null to use locale data, starts on Sunday
+ /* 0x050 */ PTRARRAYREF saSuperShortDayNames ; // Super short Day of week names
+ /* 0x058 */ PTRARRAYREF saMonthNames ; // Month Names (13)
+ /* 0x060 */ PTRARRAYREF saAbbrevMonthNames ; // Abbrev Month Names (13)
+ /* 0x068 */ PTRARRAYREF saMonthGenitiveNames ; // Genitive Month Names (13)
+ /* 0x070 */ PTRARRAYREF saAbbrevMonthGenitiveNames ; // Genitive Abbrev Month Names (13)
+ /* 0x078 */ PTRARRAYREF saLeapYearMonthNames ; // Multiple strings for the month names in a leap year.
+
+ // Integers at end to make marshaller happier
+ /* 0x080 */ INT32 iTwoDigitYearMax ; // Max 2 digit year (for Y2K bug data entry)
+ /* 0x084 */ INT32 iCurrentEra ; // current era # (usually 1)
+
+ // Use overrides?
+ /* 0x088 */ CLR_BOOL bUseUserOverrides ; // True if we want user overrides.
+}; // class CalendarDataBaseObject
+#endif
+
+
+typedef DPTR(class ThreadBaseObject) PTR_ThreadBaseObject;
+class ThreadBaseObject : public Object
+{
+ friend class ClrDataAccess;
+ friend class ThreadNative;
+ friend class MscorlibBinder;
+ friend class Object;
+
+private:
+
+ // These field are also defined in the managed representation. If you
+ // add or change these field you must also change the managed code so that
+ // it matches these. This is necessary so that the object is the proper
+ // size. The order here must match that order which the loader will choose
+ // when laying out the managed class. Note that the layouts are checked
+ // at run time, not compile time.
+#ifdef FEATURE_REMOTING
+ OBJECTREF m_ExposedContext;
+#endif
+#ifndef FEATURE_CORECLR
+ EXECUTIONCONTEXTREF m_ExecutionContext;
+#endif
+ OBJECTREF m_Name;
+ OBJECTREF m_Delegate;
+#ifdef FEATURE_LEAK_CULTURE_INFO
+ CULTUREINFOBASEREF m_CurrentUserCulture;
+ CULTUREINFOBASEREF m_CurrentUICulture;
+#endif
+#ifdef IO_CANCELLATION_ENABLED
+ OBJECTREF m_CancellationSignals;
+#endif
+ OBJECTREF m_ThreadStartArg;
+
+ // The next field (m_InternalThread) is declared as IntPtr in the managed
+ // definition of Thread. The loader will sort it next.
+
+ // m_InternalThread is always valid -- unless the thread has finalized and been
+ // resurrected. (The thread stopped running before it was finalized).
+ Thread *m_InternalThread;
+ INT32 m_Priority;
+
+ //We need to cache the thread id in managed code for perf reasons.
+ INT32 m_ManagedThreadId;
+
+ CLR_BOOL m_ExecutionContextBelongsToCurrentScope;
+#ifdef _DEBUG
+ CLR_BOOL m_ForbidExecutionContextMutation;
+#endif
+
+protected:
+ // the ctor and dtor can do no useful work.
+ ThreadBaseObject() {LIMITED_METHOD_CONTRACT;};
+ ~ThreadBaseObject() {LIMITED_METHOD_CONTRACT;};
+
+public:
+ Thread *GetInternal()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_InternalThread;
+ }
+
+ void SetInternal(Thread *it);
+ void ClearInternal();
+
+ INT32 GetManagedThreadId()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_ManagedThreadId;
+ }
+
+ void SetManagedThreadId(INT32 id)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_ManagedThreadId = id;
+ }
+
+ OBJECTREF GetThreadStartArg() { LIMITED_METHOD_CONTRACT; return m_ThreadStartArg; }
+ void SetThreadStartArg(OBJECTREF newThreadStartArg)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(newThreadStartArg == NULL);
+ // Note: this is an unchecked assignment. We are cleaning out the ThreadStartArg field when
+ // a thread starts so that ADU does not cause problems
+ SetObjectReferenceUnchecked( (OBJECTREF *)&m_ThreadStartArg, newThreadStartArg);
+
+ }
+
+ OBJECTREF GetDelegate() { LIMITED_METHOD_CONTRACT; return m_Delegate; }
+ void SetDelegate(OBJECTREF delegate);
+
+#ifndef FEATURE_LEAK_CULTURE_INFO
+ CULTUREINFOBASEREF GetCurrentUserCulture();
+ CULTUREINFOBASEREF GetCurrentUICulture();
+ OBJECTREF GetManagedThreadCulture(BOOL bUICulture);
+ void ResetManagedThreadCulture(BOOL bUICulture);
+ void ResetCurrentUserCulture();
+ void ResetCurrentUICulture();
+#endif
+
+#ifdef FEATURE_REMOTING
+ // These expose the remoting context (System\Remoting\Context)
+ OBJECTREF GetExposedContext() { LIMITED_METHOD_CONTRACT; return m_ExposedContext; }
+ OBJECTREF SetExposedContext(OBJECTREF newContext)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ OBJECTREF oldContext = m_ExposedContext;
+
+ // Note: this is a very dangerous unchecked assignment. We are taking
+ // responsibilty here for cleaning out the ExposedContext field when
+ // an app domain is unloaded.
+ SetObjectReferenceUnchecked( (OBJECTREF *)&m_ExposedContext, newContext );
+
+ return oldContext;
+ }
+#endif
+
+#ifdef FEATURE_LEAK_CULTURE_INFO
+ CULTUREINFOBASEREF GetCurrentUserCulture()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_CurrentUserCulture;
+ }
+
+ void ResetCurrentUserCulture()
+ {
+ WRAPPER_NO_CONTRACT;
+ ClearObjectReference((OBJECTREF *)&m_CurrentUserCulture);
+ }
+
+ CULTUREINFOBASEREF GetCurrentUICulture()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_CurrentUICulture;
+ }
+
+ void ResetCurrentUICulture()
+ {
+ WRAPPER_NO_CONTRACT;
+ ClearObjectReference((OBJECTREF *)&m_CurrentUICulture);
+ }
+#endif // FEATURE_LEAK_CULTURE_INFO
+
+#ifndef FEATURE_CORECLR
+ OBJECTREF GetSynchronizationContext()
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (m_ExecutionContext != NULL)
+ return m_ExecutionContext->GetSynchronizationContext();
+ return NULL;
+ }
+ OBJECTREF GetExecutionContext()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (OBJECTREF)m_ExecutionContext;
+ }
+ void SetExecutionContext(OBJECTREF ref)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetObjectReferenceUnchecked((OBJECTREF*)&m_ExecutionContext, ref);
+ }
+#endif //!FEATURE_CORECLR
+#ifdef FEATURE_COMPRESSEDSTACK
+ COMPRESSEDSTACKREF GetCompressedStack()
+ {
+ WRAPPER_NO_CONTRACT;
+ if (m_ExecutionContext != NULL)
+ return m_ExecutionContext->GetCompressedStack();
+ return NULL;
+ }
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
+ // SetDelegate is our "constructor" for the pathway where the exposed object is
+ // created first. InitExisting is our "constructor" for the pathway where an
+ // existing physical thread is later exposed.
+ void InitExisting();
+
+ void ResetCulture()
+ {
+ LIMITED_METHOD_CONTRACT;
+ ResetCurrentUserCulture();
+ ResetCurrentUICulture();
+ }
+
+ void ResetName()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_Name = NULL;
+ }
+
+ void SetPriority(INT32 priority)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_Priority = priority;
+ }
+
+ INT32 GetPriority() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_Priority;
+ }
+};
+
+// MarshalByRefObjectBaseObject
+// This class is the base class for MarshalByRefObject
+//
+class MarshalByRefObjectBaseObject : public Object
+{
+#ifdef FEATURE_REMOTING
+ friend class MscorlibBinder;
+
+ public:
+ static int GetOffsetOfServerIdentity() { LIMITED_METHOD_CONTRACT; return offsetof(MarshalByRefObjectBaseObject, m_ServerIdentity); }
+
+ protected:
+ // READ ME:
+ // Modifying the order or fields of this object may require other changes to the
+ // classlib class definition of this object.
+ OBJECTREF m_ServerIdentity;
+
+ protected:
+ MarshalByRefObjectBaseObject() {LIMITED_METHOD_CONTRACT;}
+ ~MarshalByRefObjectBaseObject() {LIMITED_METHOD_CONTRACT;}
+#endif
+};
+
+
+// ContextBaseObject
+// This class is the base class for Contexts
+//
+class ContextBaseObject : public Object
+{
+ friend class Context;
+ friend class MscorlibBinder;
+
+ private:
+ // READ ME:
+ // Modifying the order or fields of this object may require other changes to the
+ // classlib class definition of this object.
+
+ OBJECTREF m_ctxProps; // array of name-value pairs of properties
+ OBJECTREF m_dphCtx; // dynamic property holder
+ OBJECTREF m_localDataStore; // context local store
+ OBJECTREF m_serverContextChain; // server context sink chain
+ OBJECTREF m_clientContextChain; // client context sink chain
+ OBJECTREF m_exposedAppDomain; //appDomain ??
+ PTRARRAYREF m_ctxStatics; // holder for context relative statics
+
+ Context* m_internalContext; // Pointer to the VM context
+
+ INT32 _ctxID;
+ INT32 _ctxFlags;
+ INT32 _numCtxProps; // current count of properties
+
+ INT32 _ctxStaticsCurrentBucket;
+ INT32 _ctxStaticsFreeIndex;
+
+ protected:
+ ContextBaseObject() { LIMITED_METHOD_CONTRACT; }
+ ~ContextBaseObject() { LIMITED_METHOD_CONTRACT; }
+
+ public:
+
+ void SetInternalContext(Context* pCtx)
+ {
+ LIMITED_METHOD_CONTRACT;
+ // either transitioning from NULL to non-NULL or vice versa.
+ // But not setting NULL to NULL or non-NULL to non-NULL.
+ _ASSERTE((m_internalContext == NULL) != (pCtx == NULL));
+ m_internalContext = pCtx;
+ }
+
+ Context* GetInternalContext()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_internalContext;
+ }
+
+ OBJECTREF GetExposedDomain() { return m_exposedAppDomain; }
+ OBJECTREF SetExposedDomain(OBJECTREF newDomain)
+ {
+ LIMITED_METHOD_CONTRACT;
+ OBJECTREF oldDomain = m_exposedAppDomain;
+ SetObjectReference( (OBJECTREF *)&m_exposedAppDomain, newDomain, GetAppDomain() );
+ return oldDomain;
+ }
+
+ PTRARRAYREF GetContextStaticsHolder()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ // The code that needs this should have faulted it in by now!
+ _ASSERTE(m_ctxStatics != NULL);
+
+ return m_ctxStatics;
+ }
+};
+
+typedef DPTR(ContextBaseObject) PTR_ContextBaseObject;
+
+// AppDomainBaseObject
+// This class is the base class for application domains
+//
+class AppDomainBaseObject : public MarshalByRefObjectBaseObject
+{
+ friend class AppDomain;
+ friend class MscorlibBinder;
+
+ protected:
+ // READ ME:
+ // Modifying the order or fields of this object may require other changes to the
+ // classlib class definition of this object.
+ OBJECTREF m_pDomainManager; // AppDomainManager for host settings.
+ OBJECTREF m_LocalStore;
+ OBJECTREF m_FusionTable;
+ OBJECTREF m_pSecurityIdentity; // Evidence associated with this domain
+ OBJECTREF m_pPolicies; // Array of context policies associated with this domain
+ OBJECTREF m_pAssemblyEventHandler; // Delegate for 'loading assembly' event
+ OBJECTREF m_pTypeEventHandler; // Delegate for 'resolve type' event
+ OBJECTREF m_pResourceEventHandler; // Delegate for 'resolve resource' event
+ OBJECTREF m_pAsmResolveEventHandler; // Delegate for 'resolve assembly' event
+#ifdef FEATURE_REFLECTION_ONLY_LOAD
+ OBJECTREF m_pReflectionAsmResolveEventHandler; //Delegate for 'reflection resolve assembly' event
+#endif
+#ifdef FEATURE_REMOTING
+ OBJECTREF m_pDefaultContext; // Default managed context for this AD.
+#endif
+#ifdef FEATURE_CLICKONCE
+ OBJECTREF m_pActivationContext; // ClickOnce ActivationContext.
+ OBJECTREF m_pApplicationIdentity; // App ApplicationIdentity.
+#endif
+ OBJECTREF m_pApplicationTrust; // App ApplicationTrust.
+#ifdef FEATURE_IMPERSONATION
+ OBJECTREF m_pDefaultPrincipal; // Lazily computed default principle object used by threads
+#endif // FEATURE_IMPERSONATION
+#ifdef FEATURE_REMOTING
+ OBJECTREF m_pURITable; // Identity table for remoting
+#endif
+ OBJECTREF m_pProcessExitEventHandler; // Delegate for 'process exit' event. Only used in Default appdomain.
+ OBJECTREF m_pDomainUnloadEventHandler; // Delegate for 'about to unload domain' event
+ OBJECTREF m_pUnhandledExceptionEventHandler; // Delegate for 'unhandled exception' event
+#ifdef FEATURE_APTCA
+ OBJECTREF m_aptcaVisibleAssemblies; // array of conditional APTCA assembly names that should be APTCA visible
+#endif
+
+ OBJECTREF m_compatFlags;
+
+#ifdef FEATURE_EXCEPTION_NOTIFICATIONS
+ OBJECTREF m_pFirstChanceExceptionHandler; // Delegate for 'FirstChance Exception' event
+#endif // FEATURE_EXCEPTION_NOTIFICATIONS
+
+ AppDomain* m_pDomain; // Pointer to the BaseDomain Structure
+#ifdef FEATURE_CAS_POLICY
+ INT32 m_iPrincipalPolicy; // Type of principal to create by default
+#endif
+ CLR_BOOL m_bHasSetPolicy; // SetDomainPolicy has been called for this domain
+ CLR_BOOL m_bIsFastFullTrustDomain; // We know for sure that this is a homogeneous full trust domain.
+ CLR_BOOL m_compatFlagsInitialized;
+
+ protected:
+ AppDomainBaseObject() { LIMITED_METHOD_CONTRACT; }
+ ~AppDomainBaseObject() { LIMITED_METHOD_CONTRACT; }
+
+ public:
+
+ void SetDomain(AppDomain* p)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pDomain = p;
+ }
+ AppDomain* GetDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pDomain;
+ }
+
+ OBJECTREF GetSecurityIdentity()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pSecurityIdentity;
+ }
+
+ OBJECTREF GetAppDomainManager()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pDomainManager;
+ }
+
+ OBJECTREF GetApplicationTrust()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pApplicationTrust;
+ }
+
+ BOOL GetIsFastFullTrustDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return !!m_bIsFastFullTrustDomain;
+ }
+
+#ifdef FEATURE_APTCA
+ OBJECTREF GetPartialTrustVisibleAssemblies()
+ {
+ LIMITED_METHOD_CONTRACT
+ return m_aptcaVisibleAssemblies;
+ }
+#endif // FEATURE_APTCA
+
+ // Ref needs to be a PTRARRAYREF
+ void SetPolicies(OBJECTREF ref)
+ {
+ WRAPPER_NO_CONTRACT;
+ SetObjectReference(&m_pPolicies, ref, m_pDomain );
+ }
+#ifdef FEATURE_REMOTING
+ void SetDefaultContext(OBJECTREF ref)
+ {
+ WRAPPER_NO_CONTRACT;
+ SetObjectReference(&m_pDefaultContext,ref,m_pDomain);
+ }
+#endif
+ BOOL HasSetPolicy()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_bHasSetPolicy;
+ }
+
+#ifdef FEATURE_CLICKONCE
+ BOOL HasActivationContext()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pActivationContext != NULL;
+ }
+#endif // FEATURE_CLICKONCE
+
+#ifdef FEATURE_EXCEPTION_NOTIFICATIONS
+ // Returns the reference to the delegate of the first chance exception notification handler
+ OBJECTREF GetFirstChanceExceptionNotificationHandler()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pFirstChanceExceptionHandler;
+ }
+#endif // FEATURE_EXCEPTION_NOTIFICATIONS
+};
+
+#ifndef FEATURE_CORECLR
+// The managed definition of AppDomainSortingSetupInfo is in BCL\System\Globalization\AppDomainSortingSetupInfo.cs
+class AppDomainSortingSetupInfoObject : public Object
+{
+ friend class MscorlibBinder;
+
+ protected:
+ INT_PTR m_pfnIsNLSDefinedString;
+ INT_PTR m_pfnCompareStringEx;
+ INT_PTR m_pfnLCMapStringEx;
+ INT_PTR m_pfnFindNLSStringEx;
+ INT_PTR m_pfnCompareStringOrdinal;
+ INT_PTR m_pfnGetNLSVersionEx;
+ INT_PTR m_pfnFindStringOrdinal;
+ CLR_BOOL m_useV2LegacySorting;
+ CLR_BOOL m_useV4LegacySorting;
+
+ protected:
+ AppDomainSortingSetupInfoObject() { LIMITED_METHOD_CONTRACT; }
+ ~AppDomainSortingSetupInfoObject() { LIMITED_METHOD_CONTRACT; }
+
+ public:
+ CLR_BOOL UseV2LegacySorting() { LIMITED_METHOD_CONTRACT; return m_useV2LegacySorting; }
+ CLR_BOOL UseV4LegacySorting() { LIMITED_METHOD_CONTRACT; return m_useV4LegacySorting; }
+
+ INT_PTR GetPFNIsNLSDefinedString() { LIMITED_METHOD_CONTRACT; return m_pfnIsNLSDefinedString; }
+ INT_PTR GetPFNCompareStringEx() { LIMITED_METHOD_CONTRACT; return m_pfnCompareStringEx; }
+ INT_PTR GetPFNLCMapStringEx() { LIMITED_METHOD_CONTRACT; return m_pfnLCMapStringEx; }
+ INT_PTR GetPFNFindNLSStringEx() { LIMITED_METHOD_CONTRACT; return m_pfnFindNLSStringEx; }
+ INT_PTR GetPFNCompareStringOrdinal() { LIMITED_METHOD_CONTRACT; return m_pfnCompareStringOrdinal; }
+ INT_PTR GetPFNGetNLSVersionEx() { LIMITED_METHOD_CONTRACT; return m_pfnGetNLSVersionEx; }
+ INT_PTR GetPFNFindStringOrdinal() { LIMITED_METHOD_CONTRACT; return m_pfnFindStringOrdinal; }
+};
+typedef DPTR(AppDomainSortingSetupInfoObject) PTR_AppDomainSortingSetupInfoObject;
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<AppDomainSortingSetupInfoObject> APPDOMAINSORTINGSETUPINFOREF;
+#else
+typedef AppDomainSortingSetupInfoObject* APPDOMAINSORTINGSETUPINFOREF;
+#endif // USE_CHECKED_OBJECTREFS
+#endif // FEATURE_CORECLR
+
+// The managed definition of AppDomainSetup is in BCL\System\AppDomainSetup.cs
+class AppDomainSetupObject : public Object
+{
+ friend class MscorlibBinder;
+
+ protected:
+ PTRARRAYREF m_Entries;
+ STRINGREF m_AppBase;
+ OBJECTREF m_AppDomainInitializer;
+ PTRARRAYREF m_AppDomainInitializerArguments;
+#ifdef FEATURE_CLICKONCE
+ OBJECTREF m_ActivationArguments;
+#endif // FEATURE_CLICKONCE
+ STRINGREF m_ApplicationTrust;
+ I1ARRAYREF m_ConfigurationBytes;
+ STRINGREF m_AppDomainManagerAssembly;
+ STRINGREF m_AppDomainManagerType;
+#if FEATURE_APTCA
+ PTRARRAYREF m_AptcaVisibleAssemblies;
+#endif
+ OBJECTREF m_CompatFlags;
+ STRINGREF m_TargetFrameworkName;
+#ifndef FEATURE_CORECLR
+ APPDOMAINSORTINGSETUPINFOREF m_AppDomainSortingSetupInfo;
+#endif // FEATURE_CORECLR
+ INT32 m_LoaderOptimization;
+#ifdef FEATURE_COMINTEROP
+ CLR_BOOL m_DisableInterfaceCache;
+#endif // FEATURE_COMINTEROP
+ CLR_BOOL m_CheckedForTargetFrameworkName;
+#ifdef FEATURE_RANDOMIZED_STRING_HASHING
+ CLR_BOOL m_UseRandomizedStringHashing;
+#endif
+
+
+ protected:
+ AppDomainSetupObject() { LIMITED_METHOD_CONTRACT; }
+ ~AppDomainSetupObject() { LIMITED_METHOD_CONTRACT; }
+
+ public:
+#ifndef FEATURE_CORECLR
+ APPDOMAINSORTINGSETUPINFOREF GetAppDomainSortingSetupInfo() { LIMITED_METHOD_CONTRACT; return m_AppDomainSortingSetupInfo; }
+#endif // FEATURE_CORECLR
+#ifdef FEATURE_RANDOMIZED_STRING_HASHING
+ BOOL UseRandomizedStringHashing() { LIMITED_METHOD_CONTRACT; return (BOOL) m_UseRandomizedStringHashing; }
+#endif // FEATURE_RANDOMIZED_STRING_HASHING
+};
+typedef DPTR(AppDomainSetupObject) PTR_AppDomainSetupObject;
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<AppDomainSetupObject> APPDOMAINSETUPREF;
+#else
+typedef AppDomainSetupObject* APPDOMAINSETUPREF;
+#endif
+
+// AssemblyBaseObject
+// This class is the base class for assemblies
+//
+class AssemblyBaseObject : public Object
+{
+ friend class Assembly;
+ friend class MscorlibBinder;
+
+ protected:
+ // READ ME:
+ // Modifying the order or fields of this object may require other changes to the
+ // classlib class definition of this object.
+ OBJECTREF m_pModuleEventHandler; // Delegate for 'resolve module' event
+ STRINGREF m_fullname; // Slot for storing assemblies fullname
+ OBJECTREF m_pSyncRoot; // Pointer to loader allocator to keep collectible types alive, and to serve as the syncroot for assembly building in ref.emit
+ DomainAssembly* m_pAssembly; // Pointer to the Assembly Structure
+#ifdef FEATURE_APPX
+ UINT32 m_flags;
+#endif
+
+ protected:
+ AssemblyBaseObject() { LIMITED_METHOD_CONTRACT; }
+ ~AssemblyBaseObject() { LIMITED_METHOD_CONTRACT; }
+
+ public:
+
+ void SetAssembly(DomainAssembly* p)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pAssembly = p;
+ }
+
+ DomainAssembly* GetDomainAssembly()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pAssembly;
+ }
+
+ Assembly* GetAssembly();
+
+ void SetSyncRoot(OBJECTREF pSyncRoot)
+ {
+ WRAPPER_NO_CONTRACT;
+ SetObjectReferenceUnchecked(&m_pSyncRoot, pSyncRoot);
+ }
+};
+NOINLINE AssemblyBaseObject* GetRuntimeAssemblyHelper(LPVOID __me, DomainAssembly *pAssembly, OBJECTREF keepAlive);
+#define FC_RETURN_ASSEMBLY_OBJECT(pAssembly, refKeepAlive) FC_INNER_RETURN(AssemblyBaseObject*, GetRuntimeAssemblyHelper(__me, pAssembly, refKeepAlive))
+
+// AssemblyNameBaseObject
+// This class is the base class for assembly names
+//
+class AssemblyNameBaseObject : public Object
+{
+ friend class AssemblyNative;
+ friend class AppDomainNative;
+ friend class MscorlibBinder;
+
+ protected:
+ // READ ME:
+ // Modifying the order or fields of this object may require other changes to the
+ // classlib class definition of this object.
+
+ OBJECTREF m_pSimpleName;
+ U1ARRAYREF m_pPublicKey;
+ U1ARRAYREF m_pPublicKeyToken;
+ OBJECTREF m_pCultureInfo;
+ OBJECTREF m_pCodeBase;
+ OBJECTREF m_pVersion;
+ OBJECTREF m_StrongNameKeyPair;
+#ifdef FEATURE_SERIALIZATION
+ OBJECTREF m_siInfo;
+#endif
+ U1ARRAYREF m_HashForControl;
+ DWORD m_HashAlgorithm;
+ DWORD m_HashAlgorithmForControl;
+ DWORD m_VersionCompatibility;
+ DWORD m_Flags;
+
+ protected:
+ AssemblyNameBaseObject() { LIMITED_METHOD_CONTRACT; }
+ ~AssemblyNameBaseObject() { LIMITED_METHOD_CONTRACT; }
+
+ public:
+ OBJECTREF GetSimpleName() { LIMITED_METHOD_CONTRACT; return m_pSimpleName; }
+ U1ARRAYREF GetPublicKey() { LIMITED_METHOD_CONTRACT; return m_pPublicKey; }
+ U1ARRAYREF GetPublicKeyToken() { LIMITED_METHOD_CONTRACT; return m_pPublicKeyToken; }
+ OBJECTREF GetStrongNameKeyPair() { LIMITED_METHOD_CONTRACT; return m_StrongNameKeyPair; }
+ OBJECTREF GetCultureInfo() { LIMITED_METHOD_CONTRACT; return m_pCultureInfo; }
+ OBJECTREF GetAssemblyCodeBase() { LIMITED_METHOD_CONTRACT; return m_pCodeBase; }
+ OBJECTREF GetVersion() { LIMITED_METHOD_CONTRACT; return m_pVersion; }
+ DWORD GetAssemblyHashAlgorithm() { LIMITED_METHOD_CONTRACT; return m_HashAlgorithm; }
+ DWORD GetFlags() { LIMITED_METHOD_CONTRACT; return m_Flags; }
+ U1ARRAYREF GetHashForControl() { LIMITED_METHOD_CONTRACT; return m_HashForControl;}
+ DWORD GetHashAlgorithmForControl() { LIMITED_METHOD_CONTRACT; return m_HashAlgorithmForControl; }
+};
+
+// VersionBaseObject
+// This class is the base class for versions
+//
+class VersionBaseObject : public Object
+{
+ friend class MscorlibBinder;
+
+ protected:
+ // READ ME:
+ // Modifying the order or fields of this object may require other changes to the
+ // classlib class definition of this object.
+
+ int m_Major;
+ int m_Minor;
+ int m_Build;
+ int m_Revision;
+
+ VersionBaseObject() {LIMITED_METHOD_CONTRACT;}
+ ~VersionBaseObject() {LIMITED_METHOD_CONTRACT;}
+
+ public:
+ int GetMajor() { LIMITED_METHOD_CONTRACT; return m_Major; }
+ int GetMinor() { LIMITED_METHOD_CONTRACT; return m_Minor; }
+ int GetBuild() { LIMITED_METHOD_CONTRACT; return m_Build; }
+ int GetRevision() { LIMITED_METHOD_CONTRACT; return m_Revision; }
+};
+
+// FrameSecurityDescriptorBaseObject
+// This class is the base class for the frame security descriptor
+//
+
+class FrameSecurityDescriptorBaseObject : public Object
+{
+ friend class MscorlibBinder;
+
+ protected:
+ // READ ME:
+ // Modifying the order or fields of this object may require other changes to the
+ // classlib class definition of this object.
+
+ OBJECTREF m_assertions; // imperative
+ OBJECTREF m_denials; // imperative
+ OBJECTREF m_restriction; // imperative
+ OBJECTREF m_DeclarativeAssertions;
+ OBJECTREF m_DeclarativeDenials;
+ OBJECTREF m_DeclarativeRestrictions;
+#ifndef FEATURE_PAL
+ SAFEHANDLEREF m_callerToken; // the thread token (or process token if there was no thread token) when a call to Impersonate was made ("previous" token)
+ SAFEHANDLEREF m_impToken; // the thread token after a call to Impersonate is made (the "current" impersonation)
+#endif // !FEATURE_PAL
+ CLR_BOOL m_assertFT;
+ CLR_BOOL m_assertAllPossible;
+ CLR_BOOL m_declSecComputed;
+
+
+
+ protected:
+ FrameSecurityDescriptorBaseObject() {LIMITED_METHOD_CONTRACT;}
+ ~FrameSecurityDescriptorBaseObject() {LIMITED_METHOD_CONTRACT;}
+
+ public:
+
+ INT32 GetOverridesCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+ INT32 ret =0;
+ if (m_restriction != NULL)
+ ret++;
+ if (m_denials != NULL)
+ ret++;
+ if (m_DeclarativeDenials != NULL)
+ ret++;
+ if (m_DeclarativeRestrictions != NULL)
+ ret++;
+ return ret;
+ }
+
+ INT32 GetAssertCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+ INT32 ret =0;
+ if (m_assertions != NULL || m_DeclarativeAssertions != NULL || HasAssertAllPossible())
+ ret++;
+ return ret;
+ }
+
+ BOOL HasAssertFT()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_assertFT;
+ }
+
+ BOOL IsDeclSecComputed()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_declSecComputed;
+ }
+
+ BOOL HasAssertAllPossible()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_assertAllPossible;
+ }
+
+ OBJECTREF GetImperativeAssertions()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_assertions;
+ }
+ OBJECTREF GetDeclarativeAssertions()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_DeclarativeAssertions;
+ }
+ OBJECTREF GetImperativeDenials()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_denials;
+ }
+ OBJECTREF GetDeclarativeDenials()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_DeclarativeDenials;
+ }
+ OBJECTREF GetImperativeRestrictions()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_restriction;
+ }
+ OBJECTREF GetDeclarativeRestrictions()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_DeclarativeRestrictions;
+ }
+ void SetImperativeAssertions(OBJECTREF assertRef)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetObjectReference(&m_assertions, assertRef, this->GetAppDomain());
+ }
+ void SetDeclarativeAssertions(OBJECTREF assertRef)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetObjectReference(&m_DeclarativeAssertions, assertRef, this->GetAppDomain());
+ }
+ void SetImperativeDenials(OBJECTREF denialRef)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetObjectReference(&m_denials, denialRef, this->GetAppDomain());
+ }
+
+ void SetDeclarativeDenials(OBJECTREF denialRef)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetObjectReference(&m_DeclarativeDenials, denialRef, this->GetAppDomain());
+ }
+
+ void SetImperativeRestrictions(OBJECTREF restrictRef)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetObjectReference(&m_restriction, restrictRef, this->GetAppDomain());
+ }
+
+ void SetDeclarativeRestrictions(OBJECTREF restrictRef)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetObjectReference(&m_DeclarativeRestrictions, restrictRef, this->GetAppDomain());
+ }
+ void SetAssertAllPossible(BOOL assertAllPossible)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_assertAllPossible = !!assertAllPossible;
+ }
+
+ void SetAssertFT(BOOL assertFT)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_assertFT = !!assertFT;
+ }
+ void SetDeclSecComputed(BOOL declSec)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_declSecComputed = !!declSec;
+ }
+ LPVOID GetCallerToken();
+ LPVOID GetImpersonationToken();
+};
+
+#ifdef FEATURE_COMPRESSEDSTACK
+class FrameSecurityDescriptorWithResolverBaseObject : public FrameSecurityDescriptorBaseObject
+{
+public:
+ OBJECTREF m_resolver;
+
+public:
+ void SetDynamicMethodResolver(OBJECTREF resolver)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetObjectReference(&m_resolver, resolver, this->GetAppDomain());
+ }
+};
+#endif // FEATURE_COMPRESSEDSTACK
+
+class WeakReferenceObject : public Object
+{
+public:
+ Volatile<OBJECTHANDLE> m_Handle;
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+
+typedef REF<ReflectModuleBaseObject> REFLECTMODULEBASEREF;
+
+typedef REF<ReflectClassBaseObject> REFLECTCLASSBASEREF;
+
+typedef REF<ReflectMethodObject> REFLECTMETHODREF;
+
+typedef REF<ReflectFieldObject> REFLECTFIELDREF;
+
+typedef REF<ThreadBaseObject> THREADBASEREF;
+
+typedef REF<AppDomainBaseObject> APPDOMAINREF;
+
+typedef REF<MarshalByRefObjectBaseObject> MARSHALBYREFOBJECTBASEREF;
+
+typedef REF<ContextBaseObject> CONTEXTBASEREF;
+
+typedef REF<AssemblyBaseObject> ASSEMBLYREF;
+
+typedef REF<AssemblyNameBaseObject> ASSEMBLYNAMEREF;
+
+typedef REF<VersionBaseObject> VERSIONREF;
+
+typedef REF<FrameSecurityDescriptorBaseObject> FRAMESECDESCREF;
+
+#ifdef FEATURE_COMPRESSEDSTACK
+typedef REF<FrameSecurityDescriptorWithResolverBaseObject> FRAMESECDESWITHRESOLVERCREF;
+#endif // FEATURE_COMPRESSEDSTACK
+
+typedef REF<WeakReferenceObject> WEAKREFERENCEREF;
+
+inline ARG_SLOT ObjToArgSlot(OBJECTREF objRef)
+{
+ LIMITED_METHOD_CONTRACT;
+ LPVOID v;
+ v = OBJECTREFToObject(objRef);
+ return (ARG_SLOT)(SIZE_T)v;
+}
+
+inline OBJECTREF ArgSlotToObj(ARG_SLOT i)
+{
+ LIMITED_METHOD_CONTRACT;
+ LPVOID v;
+ v = (LPVOID)(SIZE_T)i;
+ return ObjectToOBJECTREF ((Object*)v);
+}
+
+inline ARG_SLOT StringToArgSlot(STRINGREF sr)
+{
+ LIMITED_METHOD_CONTRACT;
+ LPVOID v;
+ v = OBJECTREFToObject(sr);
+ return (ARG_SLOT)(SIZE_T)v;
+}
+
+inline STRINGREF ArgSlotToString(ARG_SLOT s)
+{
+ LIMITED_METHOD_CONTRACT;
+ LPVOID v;
+ v = (LPVOID)(SIZE_T)s;
+ return ObjectToSTRINGREF ((StringObject*)v);
+}
+
+#else // USE_CHECKED_OBJECTREFS
+
+typedef PTR_ReflectModuleBaseObject REFLECTMODULEBASEREF;
+typedef PTR_ReflectClassBaseObject REFLECTCLASSBASEREF;
+typedef PTR_ReflectMethodObject REFLECTMETHODREF;
+typedef PTR_ReflectFieldObject REFLECTFIELDREF;
+typedef PTR_ThreadBaseObject THREADBASEREF;
+typedef PTR_AppDomainBaseObject APPDOMAINREF;
+typedef PTR_AssemblyBaseObject ASSEMBLYREF;
+typedef PTR_AssemblyNameBaseObject ASSEMBLYNAMEREF;
+typedef PTR_ContextBaseObject CONTEXTBASEREF;
+
+#ifndef DACCESS_COMPILE
+typedef MarshalByRefObjectBaseObject* MARSHALBYREFOBJECTBASEREF;
+typedef VersionBaseObject* VERSIONREF;
+typedef FrameSecurityDescriptorBaseObject* FRAMESECDESCREF;
+
+#ifdef FEATURE_COMPRESSEDSTACK
+typedef FrameSecurityDescriptorWithResolverBaseObject* FRAMESECDESWITHRESOLVERCREF;
+#endif // FEATURE_COMPRESSEDSTACK
+
+typedef WeakReferenceObject* WEAKREFERENCEREF;
+#endif // #ifndef DACCESS_COMPILE
+
+#define ObjToArgSlot(objref) ((ARG_SLOT)(SIZE_T)(objref))
+#define ArgSlotToObj(s) ((OBJECTREF)(SIZE_T)(s))
+
+#define StringToArgSlot(objref) ((ARG_SLOT)(SIZE_T)(objref))
+#define ArgSlotToString(s) ((STRINGREF)(SIZE_T)(s))
+
+#endif //USE_CHECKED_OBJECTREFS
+
+#define PtrToArgSlot(ptr) ((ARG_SLOT)(SIZE_T)(ptr))
+#define ArgSlotToPtr(s) ((LPVOID)(SIZE_T)(s))
+
+#define BoolToArgSlot(b) ((ARG_SLOT)(CLR_BOOL)(!!(b)))
+#define ArgSlotToBool(s) ((BOOL)(s))
+
+STRINGREF AllocateString(SString sstr);
+CHARARRAYREF AllocateCharArray(DWORD dwArrayLength);
+
+
+class TransparentProxyObject : public Object
+{
+ friend class MscorlibBinder;
+ friend class CheckAsmOffsets;
+
+public:
+ MethodTable * GetMethodTableBeingProxied()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _pMT;
+ }
+ void SetMethodTableBeingProxied(MethodTable * pMT)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _pMT = pMT;
+ }
+
+ MethodTable * GetInterfaceMethodTable()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _pInterfaceMT;
+ }
+ void SetInterfaceMethodTable(MethodTable * pInterfaceMT)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _pInterfaceMT = pInterfaceMT;
+ }
+
+ void * GetStub()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _stub;
+ }
+ void SetStub(void * pStub)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _stub = pStub;
+ }
+
+ OBJECTREF GetStubData()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _stubData;
+ }
+ void SetStubData(OBJECTREF stubData)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetObjectReference(&_stubData, stubData, GetAppDomain());
+ }
+
+ OBJECTREF GetRealProxy()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _rp;
+ }
+ void SetRealProxy(OBJECTREF realProxy)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetObjectReference(&_rp, realProxy, GetAppDomain());
+ }
+
+ static int GetOffsetOfRP() { LIMITED_METHOD_CONTRACT; return offsetof(TransparentProxyObject, _rp); }
+
+protected:
+ TransparentProxyObject()
+ {LIMITED_METHOD_CONTRACT;}; // don't instantiate this class directly
+ ~TransparentProxyObject(){LIMITED_METHOD_CONTRACT;};
+
+private:
+ OBJECTREF _rp;
+ OBJECTREF _stubData;
+ MethodTable* _pMT;
+ MethodTable* _pInterfaceMT;
+ void* _stub;
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<TransparentProxyObject> TRANSPARENTPROXYREF;
+#else
+typedef TransparentProxyObject* TRANSPARENTPROXYREF;
+#endif
+
+
+class RealProxyObject : public Object
+{
+ friend class MscorlibBinder;
+
+public:
+ DWORD GetOptFlags()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _optFlags;
+ }
+ VOID SetOptFlags(DWORD flags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _optFlags = flags;
+ }
+
+ DWORD GetDomainID()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _domainID;
+ }
+
+ TRANSPARENTPROXYREF GetTransparentProxy()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (TRANSPARENTPROXYREF&)_tp;
+ }
+ void SetTransparentProxy(TRANSPARENTPROXYREF tp)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetObjectReference(&_tp, (OBJECTREF)tp, GetAppDomain());
+ }
+
+ static int GetOffsetOfIdentity() { LIMITED_METHOD_CONTRACT; return offsetof(RealProxyObject, _identity); }
+ static int GetOffsetOfServerObject() { LIMITED_METHOD_CONTRACT; return offsetof(RealProxyObject, _serverObject); }
+ static int GetOffsetOfServerIdentity() { LIMITED_METHOD_CONTRACT; return offsetof(RealProxyObject, _srvIdentity); }
+
+protected:
+ RealProxyObject()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }; // don't instantiate this class directly
+ ~RealProxyObject(){ LIMITED_METHOD_CONTRACT; };
+
+private:
+ OBJECTREF _tp;
+ OBJECTREF _identity;
+ OBJECTREF _serverObject;
+ DWORD _flags;
+ DWORD _optFlags;
+ DWORD _domainID;
+ OBJECTHANDLE _srvIdentity;
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<RealProxyObject> REALPROXYREF;
+#else
+typedef RealProxyObject* REALPROXYREF;
+#endif
+
+
+#ifndef CLR_STANDALONE_BINDER
+#ifdef FEATURE_COMINTEROP
+
+//-------------------------------------------------------------
+// class ComObject, Exposed class __ComObject
+//
+//
+//-------------------------------------------------------------
+class ComObject : public MarshalByRefObjectBaseObject
+{
+ friend class MscorlibBinder;
+
+protected:
+
+ ComObject()
+ {LIMITED_METHOD_CONTRACT;}; // don't instantiate this class directly
+ ~ComObject(){LIMITED_METHOD_CONTRACT;};
+
+public:
+ OBJECTREF m_ObjectToDataMap;
+
+ //--------------------------------------------------------------------
+ // SupportsInterface
+ static BOOL SupportsInterface(OBJECTREF oref, MethodTable* pIntfTable);
+
+ //--------------------------------------------------------------------
+ // SupportsInterface
+ static void ThrowInvalidCastException(OBJECTREF *pObj, MethodTable* pCastToMT);
+
+ //-----------------------------------------------------------------
+ // GetComIPFromRCW
+ static IUnknown* GetComIPFromRCW(OBJECTREF *pObj, MethodTable* pIntfTable);
+
+ //-----------------------------------------------------------------
+ // GetComIPFromRCWThrowing
+ static IUnknown* GetComIPFromRCWThrowing(OBJECTREF *pObj, MethodTable* pIntfTable);
+
+ //-----------------------------------------------------------
+ // create an empty ComObjectRef
+ static OBJECTREF CreateComObjectRef(MethodTable* pMT);
+
+ //-----------------------------------------------------------
+ // Release all the data associated with the __ComObject.
+ static void ReleaseAllData(OBJECTREF oref);
+
+ //-----------------------------------------------------------
+ // Redirection for ToString
+ static FCDECL1(MethodDesc *, GetRedirectedToStringMD, Object *pThisUNSAFE);
+ static FCDECL2(StringObject *, RedirectToString, Object *pThisUNSAFE, MethodDesc *pToStringMD);
+
+ //-----------------------------------------------------------
+ // Redirection for GetHashCode
+ static FCDECL1(MethodDesc *, GetRedirectedGetHashCodeMD, Object *pThisUNSAFE);
+ static FCDECL2(int, RedirectGetHashCode, Object *pThisUNSAFE, MethodDesc *pGetHashCodeMD);
+
+ //-----------------------------------------------------------
+ // Redirection for Equals
+ static FCDECL1(MethodDesc *, GetRedirectedEqualsMD, Object *pThisUNSAFE);
+ static FCDECL3(FC_BOOL_RET, RedirectEquals, Object *pThisUNSAFE, Object *pOtherUNSAFE, MethodDesc *pEqualsMD);
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<ComObject> COMOBJECTREF;
+#else
+typedef ComObject* COMOBJECTREF;
+#endif
+
+
+//-------------------------------------------------------------
+// class UnknownWrapper, Exposed class UnknownWrapper
+//
+//
+//-------------------------------------------------------------
+class UnknownWrapper : public Object
+{
+protected:
+
+ UnknownWrapper(UnknownWrapper &wrap) {LIMITED_METHOD_CONTRACT}; // dissalow copy construction.
+ UnknownWrapper() {LIMITED_METHOD_CONTRACT;}; // don't instantiate this class directly
+ ~UnknownWrapper() {LIMITED_METHOD_CONTRACT;};
+
+ OBJECTREF m_WrappedObject;
+
+public:
+ OBJECTREF GetWrappedObject()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_WrappedObject;
+ }
+
+ void SetWrappedObject(OBJECTREF pWrappedObject)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_WrappedObject = pWrappedObject;
+ }
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<UnknownWrapper> UNKNOWNWRAPPEROBJECTREF;
+#else
+typedef UnknownWrapper* UNKNOWNWRAPPEROBJECTREF;
+#endif
+
+
+//-------------------------------------------------------------
+// class DispatchWrapper, Exposed class DispatchWrapper
+//
+//
+//-------------------------------------------------------------
+class DispatchWrapper : public Object
+{
+protected:
+
+ DispatchWrapper(DispatchWrapper &wrap) {LIMITED_METHOD_CONTRACT}; // dissalow copy construction.
+ DispatchWrapper() {LIMITED_METHOD_CONTRACT;}; // don't instantiate this class directly
+ ~DispatchWrapper() {LIMITED_METHOD_CONTRACT;};
+
+ OBJECTREF m_WrappedObject;
+
+public:
+ OBJECTREF GetWrappedObject()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_WrappedObject;
+ }
+
+ void SetWrappedObject(OBJECTREF pWrappedObject)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_WrappedObject = pWrappedObject;
+ }
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<DispatchWrapper> DISPATCHWRAPPEROBJECTREF;
+#else
+typedef DispatchWrapper* DISPATCHWRAPPEROBJECTREF;
+#endif
+
+
+//-------------------------------------------------------------
+// class VariantWrapper, Exposed class VARIANTWRAPPEROBJECTREF
+//
+//
+//-------------------------------------------------------------
+class VariantWrapper : public Object
+{
+protected:
+
+ VariantWrapper(VariantWrapper &wrap) {LIMITED_METHOD_CONTRACT}; // dissalow copy construction.
+ VariantWrapper() {LIMITED_METHOD_CONTRACT}; // don't instantiate this class directly
+ ~VariantWrapper() {LIMITED_METHOD_CONTRACT};
+
+ OBJECTREF m_WrappedObject;
+
+public:
+ OBJECTREF GetWrappedObject()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_WrappedObject;
+ }
+
+ void SetWrappedObject(OBJECTREF pWrappedObject)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_WrappedObject = pWrappedObject;
+ }
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<VariantWrapper> VARIANTWRAPPEROBJECTREF;
+#else
+typedef VariantWrapper* VARIANTWRAPPEROBJECTREF;
+#endif
+
+
+//-------------------------------------------------------------
+// class ErrorWrapper, Exposed class ErrorWrapper
+//
+//
+//-------------------------------------------------------------
+class ErrorWrapper : public Object
+{
+protected:
+
+ ErrorWrapper(ErrorWrapper &wrap) {LIMITED_METHOD_CONTRACT}; // dissalow copy construction.
+ ErrorWrapper() {LIMITED_METHOD_CONTRACT;}; // don't instantiate this class directly
+ ~ErrorWrapper() {LIMITED_METHOD_CONTRACT;};
+
+ INT32 m_ErrorCode;
+
+public:
+ INT32 GetErrorCode()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_ErrorCode;
+ }
+
+ void SetErrorCode(int ErrorCode)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_ErrorCode = ErrorCode;
+ }
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<ErrorWrapper> ERRORWRAPPEROBJECTREF;
+#else
+typedef ErrorWrapper* ERRORWRAPPEROBJECTREF;
+#endif
+
+
+//-------------------------------------------------------------
+// class CurrencyWrapper, Exposed class CurrencyWrapper
+//
+//
+//-------------------------------------------------------------
+
+// Keep this in sync with code:MethodTableBuilder.CheckForSystemTypes where
+// alignment requirement of the managed System.Decimal structure is computed.
+#if !defined(ALIGN_ACCESS) && !defined(FEATURE_64BIT_ALIGNMENT)
+#include <pshpack4.h>
+#endif // !ALIGN_ACCESS && !FEATURE_64BIT_ALIGNMENT
+
+class CurrencyWrapper : public Object
+{
+protected:
+
+ CurrencyWrapper(CurrencyWrapper &wrap) {LIMITED_METHOD_CONTRACT}; // dissalow copy construction.
+ CurrencyWrapper() {LIMITED_METHOD_CONTRACT;}; // don't instantiate this class directly
+ ~CurrencyWrapper() {LIMITED_METHOD_CONTRACT;};
+
+ DECIMAL m_WrappedObject;
+
+public:
+ DECIMAL GetWrappedObject()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_WrappedObject;
+ }
+
+ void SetWrappedObject(DECIMAL WrappedObj)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_WrappedObject = WrappedObj;
+ }
+};
+
+#if !defined(ALIGN_ACCESS) && !defined(FEATURE_64BIT_ALIGNMENT)
+#include <poppack.h>
+#endif // !ALIGN_ACCESS && !FEATURE_64BIT_ALIGNMENT
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<CurrencyWrapper> CURRENCYWRAPPEROBJECTREF;
+#else
+typedef CurrencyWrapper* CURRENCYWRAPPEROBJECTREF;
+#endif
+
+//-------------------------------------------------------------
+// class BStrWrapper, Exposed class BSTRWRAPPEROBJECTREF
+//
+//
+//-------------------------------------------------------------
+class BStrWrapper : public Object
+{
+protected:
+
+ BStrWrapper(BStrWrapper &wrap) {LIMITED_METHOD_CONTRACT}; // dissalow copy construction.
+ BStrWrapper() {LIMITED_METHOD_CONTRACT}; // don't instantiate this class directly
+ ~BStrWrapper() {LIMITED_METHOD_CONTRACT};
+
+ STRINGREF m_WrappedObject;
+
+public:
+ STRINGREF GetWrappedObject()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_WrappedObject;
+ }
+
+ void SetWrappedObject(STRINGREF pWrappedObject)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_WrappedObject = pWrappedObject;
+ }
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<BStrWrapper> BSTRWRAPPEROBJECTREF;
+#else
+typedef BStrWrapper* BSTRWRAPPEROBJECTREF;
+#endif
+
+#endif // FEATURE_COMINTEROP
+#endif // CLR_STANDALONE_BINDER
+
+class StringBufferObject;
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<StringBufferObject> STRINGBUFFERREF;
+#else // USE_CHECKED_OBJECTREFS
+typedef StringBufferObject * STRINGBUFFERREF;
+#endif // USE_CHECKED_OBJECTREFS
+
+//
+// StringBufferObject
+//
+// Note that the "copy on write" bit is buried within the implementation
+// of the object in order to make the implementation smaller.
+//
+
+
+class StringBufferObject : public Object
+{
+ friend class MscorlibBinder;
+
+ private:
+ CHARARRAYREF m_ChunkChars;
+ StringBufferObject *m_ChunkPrevious;
+ UINT32 m_ChunkLength;
+ UINT32 m_ChunkOffset;
+ INT32 m_MaxCapacity;
+
+ WCHAR* GetBuffer()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (WCHAR *)m_ChunkChars->GetDirectPointerToNonObjectElements();
+ }
+
+ // This function assumes that requiredLength will be less
+ // than the max capacity of the StringBufferObject
+ DWORD GetAllocationLength(DWORD dwRequiredLength)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE((INT32)dwRequiredLength <= m_MaxCapacity);
+ DWORD dwCurrentLength = GetArrayLength();
+
+ // round the current length to the nearest multiple of 2
+ // that is >= the required length
+ if(dwCurrentLength < dwRequiredLength)
+ {
+ dwCurrentLength = (dwRequiredLength + 1) & ~1;
+ }
+ return dwCurrentLength;
+ }
+
+ protected:
+ StringBufferObject() { LIMITED_METHOD_CONTRACT; };
+ ~StringBufferObject() { LIMITED_METHOD_CONTRACT; };
+
+ public:
+ INT32 GetMaxCapacity()
+ {
+ return m_MaxCapacity;
+ }
+
+ DWORD GetArrayLength()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_ChunkChars);
+ return m_ChunkOffset + m_ChunkChars->GetNumComponents();
+ }
+
+ // Given an ANSI string, use it to replace the StringBufferObject's internal buffer
+ VOID ReplaceBufferWithAnsi(CHARARRAYREF *newArrayRef, __in CHAR *newChars, DWORD dwNewCapacity)
+ {
+#ifndef DACCESS_COMPILE
+ SetObjectReference((OBJECTREF *)&m_ChunkChars, (OBJECTREF)(*newArrayRef), GetAppDomain());
+#endif //!DACCESS_COMPILE
+ WCHAR *thisChars = GetBuffer();
+ // NOTE: This call to MultiByte also writes out the null terminator
+ // which is currently part of the String representation.
+ INT32 ncWritten = MultiByteToWideChar(CP_ACP,
+ MB_PRECOMPOSED,
+ newChars,
+ -1,
+ (LPWSTR)thisChars,
+ dwNewCapacity+1);
+
+ if (ncWritten == 0)
+ {
+ // Normally, we'd throw an exception if the string couldn't be converted.
+ // In this particular case, we paper over it instead. The reason is
+ // that most likely reason a P/Invoke-called api returned a
+ // poison string is that the api failed for some reason, and hence
+ // exercised its right to leave the buffer in a poison state.
+ // Because P/Invoke cannot discover if an api failed, it cannot
+ // know to ignore the buffer on the out marshaling path.
+ // Because normal P/Invoke procedure is for the caller to check error
+ // codes manually, we don't want to throw an exception on him.
+ // We certainly don't want to randomly throw or not throw based on the
+ // nondeterministic contents of a buffer passed to a failing api.
+ *thisChars = W('\0');
+ ncWritten++;
+ }
+
+ m_ChunkOffset = 0;
+ m_ChunkLength = ncWritten-1;
+ m_ChunkPrevious = NULL;
+ }
+
+ // Given a Unicode string, use it to replace the StringBufferObject's internal buffer
+ VOID ReplaceBuffer(CHARARRAYREF *newArrayRef, __in_ecount(dwNewCapacity) WCHAR *newChars, DWORD dwNewCapacity)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+#ifndef DACCESS_COMPILE
+ SetObjectReference((OBJECTREF *)&m_ChunkChars, (OBJECTREF)(*newArrayRef), GetAppDomain());
+#endif //!DACCESS_COMPILE
+ WCHAR *thisChars = GetBuffer();
+ memcpyNoGCRefs(thisChars, newChars, sizeof(WCHAR)*dwNewCapacity);
+ thisChars[dwNewCapacity] = W('\0');
+ m_ChunkLength = dwNewCapacity;
+ m_ChunkPrevious = NULL;
+ m_ChunkOffset = 0;
+ }
+
+ static void ReplaceBuffer(STRINGBUFFERREF *thisRef, __in_ecount(newLength) WCHAR *newBuffer, INT32 newLength);
+ static void ReplaceBufferAnsi(STRINGBUFFERREF *thisRef, __in_ecount(newCapacity) CHAR *newBuffer, INT32 newCapacity);
+ static INT32 LocalIndexOfString(__in_ecount(strLength) WCHAR *base, __in_ecount(patternLength) WCHAR *search, int strLength, int patternLength, int startPos);
+};
+
+class SafeHandle : public Object
+{
+ friend class MscorlibBinder;
+
+ private:
+ // READ ME:
+ // Modifying the order or fields of this object may require
+ // other changes to the classlib class definition of this
+ // object or special handling when loading this system class.
+#ifdef _DEBUG
+ STRINGREF m_debugStackTrace; // Where we allocated this SafeHandle
+#endif
+ Volatile<LPVOID> m_handle;
+ Volatile<INT32> m_state; // Combined ref count and closed/disposed state (for atomicity)
+ Volatile<CLR_BOOL> m_ownsHandle;
+ Volatile<CLR_BOOL> m_fullyInitialized; // Did constructor finish?
+
+ // Describe the bits in the m_state field above.
+ enum StateBits
+ {
+ SH_State_Closed = 0x00000001,
+ SH_State_Disposed = 0x00000002,
+ SH_State_RefCount = 0xfffffffc,
+ SH_RefCountOne = 4, // Amount to increment state field to yield a ref count increment of 1
+ };
+
+ static WORD s_IsInvalidHandleMethodSlot;
+ static WORD s_ReleaseHandleMethodSlot;
+
+ static void RunReleaseMethod(SafeHandle* psh);
+ BOOL IsFullyInitialized() const { LIMITED_METHOD_CONTRACT; return m_fullyInitialized; }
+
+ public:
+ static void Init();
+
+ // To use the SafeHandle from native, look at the SafeHandleHolder, which
+ // will do the AddRef & Release for you.
+ LPVOID GetHandle() const {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(((unsigned int) m_state) >= SH_RefCountOne);
+ return m_handle;
+ }
+
+ BOOL OwnsHandle() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_ownsHandle;
+ }
+
+ static size_t GetHandleOffset() { LIMITED_METHOD_CONTRACT; return offsetof(SafeHandle, m_handle); }
+
+ void AddRef();
+ void Release(bool fDispose = false);
+ void Dispose();
+ void SetHandle(LPVOID handle);
+
+ static FCDECL1(void, DisposeNative, SafeHandle* refThisUNSAFE);
+ static FCDECL1(void, Finalize, SafeHandle* refThisUNSAFE);
+ static FCDECL1(void, SetHandleAsInvalid, SafeHandle* refThisUNSAFE);
+ static FCDECL2(void, DangerousAddRef, SafeHandle* refThisUNSAFE, CLR_BOOL *pfSuccess);
+ static FCDECL1(void, DangerousRelease, SafeHandle* refThisUNSAFE);
+};
+
+// SAFEHANDLEREF defined above because CompressedStackObject needs it
+
+void AcquireSafeHandle(SAFEHANDLEREF* s);
+void ReleaseSafeHandle(SAFEHANDLEREF* s);
+
+typedef Holder<SAFEHANDLEREF*, AcquireSafeHandle, ReleaseSafeHandle> SafeHandleHolder;
+
+class CriticalHandle : public Object
+{
+ friend class MscorlibBinder;
+
+ private:
+ // READ ME:
+ // Modifying the order or fields of this object may require
+ // other changes to the classlib class definition of this
+ // object or special handling when loading this system class.
+#ifdef _DEBUG
+ STRINGREF m_debugStackTrace; // Where we allocated this CriticalHandle
+#endif
+ Volatile<LPVOID> m_handle;
+ Volatile<CLR_BOOL> m_isClosed;
+
+ public:
+ LPVOID GetHandle() const { LIMITED_METHOD_CONTRACT; return m_handle; }
+ static size_t GetHandleOffset() { LIMITED_METHOD_CONTRACT; return offsetof(CriticalHandle, m_handle); }
+
+ void SetHandle(LPVOID handle) { LIMITED_METHOD_CONTRACT; m_handle = handle; }
+
+ static FCDECL1(void, FireCustomerDebugProbe, CriticalHandle* refThisUNSAFE);
+};
+
+
+class ReflectClassBaseObject;
+
+class SafeBuffer : SafeHandle
+{
+ private:
+ size_t m_numBytes;
+
+ public:
+ static FCDECL1(UINT, SizeOfType, ReflectClassBaseObject* typeUNSAFE);
+ static FCDECL1(UINT, AlignedSizeOfType, ReflectClassBaseObject* typeUNSAFE);
+ static FCDECL3(void, PtrToStructure, BYTE* ptr, TypedByRef structure, UINT32 sizeofT);
+ static FCDECL3(void, StructureToPtr, TypedByRef structure, BYTE* ptr, UINT32 sizeofT);
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<CriticalHandle> CRITICALHANDLE;
+typedef REF<CriticalHandle> CRITICALHANDLEREF;
+#else // USE_CHECKED_OBJECTREFS
+typedef CriticalHandle * CRITICALHANDLE;
+typedef CriticalHandle * CRITICALHANDLEREF;
+#endif // USE_CHECKED_OBJECTREFS
+
+// WaitHandleBase
+// Base class for WaitHandle
+class WaitHandleBase :public MarshalByRefObjectBaseObject
+{
+ friend class WaitHandleNative;
+ friend class MscorlibBinder;
+
+public:
+ __inline LPVOID GetWaitHandle() {LIMITED_METHOD_CONTRACT; return m_handle;}
+ __inline SAFEHANDLEREF GetSafeHandle() {LIMITED_METHOD_CONTRACT; return m_safeHandle;}
+
+private:
+ SAFEHANDLEREF m_safeHandle;
+ LPVOID m_handle;
+ CLR_BOOL m_hasThreadAffinity;
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<WaitHandleBase> WAITHANDLEREF;
+#else // USE_CHECKED_OBJECTREFS
+typedef WaitHandleBase* WAITHANDLEREF;
+#endif // USE_CHECKED_OBJECTREFS
+
+// This class corresponds to FileStreamAsyncResult on the managed side.
+class AsyncResultBase :public Object
+{
+ friend class MscorlibBinder;
+
+public:
+ WAITHANDLEREF GetWaitHandle() { LIMITED_METHOD_CONTRACT; return _waitHandle;}
+ void SetErrorCode(int errcode) { LIMITED_METHOD_CONTRACT; _errorCode = errcode;}
+ void SetNumBytes(int numBytes) { LIMITED_METHOD_CONTRACT; _numBytes = numBytes;}
+ void SetIsComplete() { LIMITED_METHOD_CONTRACT; _isComplete = TRUE; }
+ void SetCompletedAsynchronously() { LIMITED_METHOD_CONTRACT; _completedSynchronously = FALSE; }
+
+ // README:
+ // If you modify the order of these fields, make sure to update the definition in
+ // BCL for this object.
+private:
+ OBJECTREF _userCallback;
+ OBJECTREF _userStateObject;
+
+ WAITHANDLEREF _waitHandle;
+ SAFEHANDLEREF _fileHandle; // For cancellation.
+ LPOVERLAPPED _overlapped;
+ int _EndXxxCalled; // Whether we've called EndXxx already.
+ int _numBytes; // number of bytes read OR written
+ int _errorCode;
+ int _numBufferedBytes;
+
+ CLR_BOOL _isWrite; // Whether this is a read or a write
+ CLR_BOOL _isComplete;
+ CLR_BOOL _completedSynchronously; // Which thread called callback
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<AsyncResultBase> ASYNCRESULTREF;
+#else // USE_CHECKED_OBJECTREFS
+typedef AsyncResultBase* ASYNCRESULTREF;
+#endif // USE_CHECKED_OBJECTREFS
+
+// This class corresponds to System.MulticastDelegate on the managed side.
+class DelegateObject : public Object
+{
+ friend class CheckAsmOffsets;
+ friend class MscorlibBinder;
+
+public:
+ BOOL IsWrapperDelegate() { LIMITED_METHOD_CONTRACT; return _methodPtrAux == NULL; }
+
+ OBJECTREF GetTarget() { LIMITED_METHOD_CONTRACT; return _target; }
+ void SetTarget(OBJECTREF target) { WRAPPER_NO_CONTRACT; SetObjectReference(&_target, target, GetAppDomain()); }
+ static int GetOffsetOfTarget() { LIMITED_METHOD_CONTRACT; return offsetof(DelegateObject, _target); }
+
+ PCODE GetMethodPtr() { LIMITED_METHOD_CONTRACT; return _methodPtr; }
+ void SetMethodPtr(PCODE methodPtr) { LIMITED_METHOD_CONTRACT; _methodPtr = methodPtr; }
+ static int GetOffsetOfMethodPtr() { LIMITED_METHOD_CONTRACT; return offsetof(DelegateObject, _methodPtr); }
+
+ PCODE GetMethodPtrAux() { LIMITED_METHOD_CONTRACT; return _methodPtrAux; }
+ void SetMethodPtrAux(PCODE methodPtrAux) { LIMITED_METHOD_CONTRACT; _methodPtrAux = methodPtrAux; }
+ static int GetOffsetOfMethodPtrAux() { LIMITED_METHOD_CONTRACT; return offsetof(DelegateObject, _methodPtrAux); }
+
+ OBJECTREF GetInvocationList() { LIMITED_METHOD_CONTRACT; return _invocationList; }
+ void SetInvocationList(OBJECTREF invocationList) { WRAPPER_NO_CONTRACT; SetObjectReference(&_invocationList, invocationList, GetAppDomain()); }
+ static int GetOffsetOfInvocationList() { LIMITED_METHOD_CONTRACT; return offsetof(DelegateObject, _invocationList); }
+
+ INT_PTR GetInvocationCount() { LIMITED_METHOD_CONTRACT; return _invocationCount; }
+ void SetInvocationCount(INT_PTR invocationCount) { LIMITED_METHOD_CONTRACT; _invocationCount = invocationCount; }
+ static int GetOffsetOfInvocationCount() { LIMITED_METHOD_CONTRACT; return offsetof(DelegateObject, _invocationCount); }
+
+ void SetMethodBase(OBJECTREF newMethodBase) { LIMITED_METHOD_CONTRACT; SetObjectReference((OBJECTREF*)&_methodBase, newMethodBase, GetAppDomain()); }
+
+ // README:
+ // If you modify the order of these fields, make sure to update the definition in
+ // BCL for this object.
+private:
+ // System.Delegate
+ OBJECTREF _target;
+ OBJECTREF _methodBase;
+ PCODE _methodPtr;
+ PCODE _methodPtrAux;
+ // System.MulticastDelegate
+ OBJECTREF _invocationList;
+ INT_PTR _invocationCount;
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<DelegateObject> DELEGATEREF;
+#else // USE_CHECKED_OBJECTREFS
+typedef DelegateObject* DELEGATEREF;
+#endif // USE_CHECKED_OBJECTREFS
+
+// This class corresponds to PermissionSet on the managed side.
+class PermissionSetObject : public Object
+{
+ friend class MscorlibBinder;
+
+public:
+ BOOL AllPermissionsDecoded()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _allPermissionsDecoded == TRUE;
+ }
+
+ BOOL ContainsCas()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _ContainsCas == TRUE;
+ }
+
+ BOOL ContainsNonCas()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _ContainsNonCas == TRUE;
+ }
+
+ BOOL CheckedForNonCas()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _CheckedForNonCas == TRUE;
+ }
+
+ BOOL IsUnrestricted()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _Unrestricted == TRUE;
+ }
+
+ OBJECTREF GetTokenBasedSet()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _permSet;
+ }
+
+
+ // README:
+ // If you modify the order of these fields, make sure to update the definition in
+ // BCL for this object.
+private:
+ // Order of the fields is important as it mirrors the layout of PermissionSet
+ // to access the fields directly from unmanaged code given an OBJECTREF.
+ // Please keep them in sync when you make changes to the fields.
+ OBJECTREF _permSet;
+ STRINGREF _serializedPermissionSet;
+ OBJECTREF _permSetSaved;
+ OBJECTREF _unrestrictedPermSet;
+ OBJECTREF _normalPermSet;
+ CLR_BOOL _Unrestricted;
+ CLR_BOOL _allPermissionsDecoded;
+ CLR_BOOL _ignoreTypeLoadFailures;
+ CLR_BOOL _CheckedForNonCas;
+ CLR_BOOL _ContainsCas;
+ CLR_BOOL _ContainsNonCas;
+ CLR_BOOL _Readable;
+#ifdef FEATURE_CAS_POLICY
+ CLR_BOOL _canUnrestrictedOverride;
+#endif // FEATURE_CAS_POLICY
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<PermissionSetObject> PERMISSIONSETREF;
+#else // USE_CHECKED_OBJECTREFS
+typedef PermissionSetObject* PERMISSIONSETREF;
+#endif // USE_CHECKED_OBJECTREFS
+
+// This class corresponds to TokenBasedSet on the managed side.
+class TokenBasedSetObject : public Object
+{
+public:
+ INT32 GetNumElements () {
+ LIMITED_METHOD_CONTRACT;
+ return _cElt;
+ }
+
+ OBJECTREF GetPermSet () {
+ LIMITED_METHOD_CONTRACT;
+ return _Obj;
+ }
+
+private:
+ // If you modify the order of these fields, make sure
+ // to update the definition in BCL for this object.
+ OBJECTREF _objSet;
+ OBJECTREF _Obj;
+ OBJECTREF _Set;
+ INT32 _initSize;
+ INT32 _increment;
+ INT32 _cElt;
+ INT32 _maxIndex;
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<TokenBasedSetObject> TOKENBASEDSETREF;
+#else // USE_CHECKED_OBJECTREFS
+typedef TokenBasedSetObject* TOKENBASEDSETREF;
+#endif // USE_CHECKED_OBJECTREFS
+
+// This class corresponds to PolicyStatement on the managed side.
+class PolicyStatementObject : public Object
+{
+ friend class MscorlibBinder;
+private:
+ PERMISSIONSETREF _permSet;
+#ifdef FEATURE_CAS_POLICY
+ OBJECTREF _dependentEvidence;
+#endif // FEATURE_CAS_POLICY
+ INT32 _attributes;
+
+public:
+ PERMISSIONSETREF GetPermissionSet()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _permSet;
+ }
+};
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<PolicyStatementObject> POLICYSTATEMENTREF;
+#else // USE_CHECKED_OBJECTREFS
+typedef PolicyStatementObject* POLICYSTATEMENTREF;
+#endif // USE_CHECKED_OBJECTREFS
+
+// This class corresponds to ApplicationTrust on the managed side.
+class ApplicationTrustObject : public Object
+{
+ friend class MscorlibBinder;
+private:
+#ifdef FEATURE_CLICKONCE
+ OBJECTREF _appId;
+ OBJECTREF _extraInfo;
+ OBJECTREF _elExtraInfo;
+#endif // FEATURE_CLICKONCE
+ POLICYSTATEMENTREF _psDefaultGrant;
+ OBJECTREF _fullTrustAssemblies;
+ DWORD _grantSetSpecialFlags;
+#ifdef FEATURE_CLICKONCE
+ CLR_BOOL _appTrustedToRun;
+ CLR_BOOL _persist;
+#endif // FEATURE_CLICKONCE
+
+public:
+ POLICYSTATEMENTREF GetPolicyStatement()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _psDefaultGrant;
+ }
+
+ // The grant set special flags are mapped in the BCL for the DefaultGrantSet of the ApplicationTrust.
+ // Since ApplicationTrust provides a reference to its DefaultGrantSet rather than a copy, the flags may
+ // not be in sync if user code can ever get a hold of the ApplicationTrust object. Therefore, these
+ // flags should only be used in code paths where we are sure that only trusted code can ever get a
+ // reference to the ApplicationTrust (such as the ApplicationTrust created when setting up a homogenous
+ // AppDomain).
+ DWORD GetGrantSetSpecialFlags()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _grantSetSpecialFlags;
+ }
+};
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<ApplicationTrustObject> APPLICATIONTRUSTREF;
+#else // USE_CHECKED_OBJECTREFS
+typedef ApplicationTrustObject* APPLICATIONTRUSTREF;
+#endif // USE_CHECKED_OBJECTREFS
+
+// This class corresponds to SecurityPermission on the managed side.
+class SecurityPermissionObject : public Object
+{
+public:
+ DWORD GetFlags () {
+ LIMITED_METHOD_CONTRACT;
+ return _flags;
+ }
+
+private:
+ // If you modify the order of these fields, make sure
+ // to update the definition in BCL for this object.
+ DWORD _flags;
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<SecurityPermissionObject> SECURITYPERMISSIONREF;
+#else // USE_CHECKED_OBJECTREFS
+typedef SecurityPermissionObject* SECURITYPERMISSIONREF;
+#endif // USE_CHECKED_OBJECTREFS
+
+// This class corresponds to ReflectionPermission on the managed side.
+class ReflectionPermissionObject : public Object
+{
+public:
+ DWORD GetFlags () {
+ LIMITED_METHOD_CONTRACT;
+ return _flags;
+ }
+
+private:
+ DWORD _flags;
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<ReflectionPermissionObject> REFLECTIONPERMISSIONREF;
+#else // USE_CHECKED_OBJECTREFS
+typedef ReflectionPermissionObject* REFLECTIONPERMISSIONREF;
+#endif // USE_CHECKED_OBJECTREFS
+
+struct StackTraceElement;
+class ClrDataAccess;
+
+
+typedef DPTR(StackTraceElement) PTR_StackTraceElement;
+
+class StackTraceArray
+{
+ struct ArrayHeader
+ {
+ size_t m_size;
+ Thread * m_thread;
+ };
+
+ typedef DPTR(ArrayHeader) PTR_ArrayHeader;
+
+public:
+ StackTraceArray()
+ : m_array(static_cast<I1Array *>(NULL))
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ StackTraceArray(I1ARRAYREF array)
+ : m_array(array)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ void Swap(StackTraceArray & rhs)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ SUPPORTS_DAC;
+ I1ARRAYREF t = m_array;
+ m_array = rhs.m_array;
+ rhs.m_array = t;
+ }
+
+ size_t Size() const
+ {
+ WRAPPER_NO_CONTRACT;
+ if (!m_array)
+ return 0;
+ else
+ return GetSize();
+ }
+
+ StackTraceElement const & operator[](size_t index) const;
+ StackTraceElement & operator[](size_t index);
+
+ void Append(StackTraceElement const * begin, StackTraceElement const * end);
+ void AppendSkipLast(StackTraceElement const * begin, StackTraceElement const * end);
+
+ I1ARRAYREF Get() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_array;
+ }
+
+ // Deep copies the array
+ void CopyFrom(StackTraceArray const & src);
+
+private:
+ StackTraceArray(StackTraceArray const & rhs);
+
+ StackTraceArray & operator=(StackTraceArray const & rhs)
+ {
+ WRAPPER_NO_CONTRACT;
+ StackTraceArray copy(rhs);
+ this->Swap(copy);
+ return *this;
+ }
+
+ void Grow(size_t size);
+ void EnsureThreadAffinity();
+ void CheckState() const;
+
+ size_t Capacity() const
+ {
+ WRAPPER_NO_CONTRACT;
+ assert(!!m_array);
+
+ return m_array->GetNumComponents();
+ }
+
+ size_t GetSize() const
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetHeader()->m_size;
+ }
+
+ void SetSize(size_t size)
+ {
+ WRAPPER_NO_CONTRACT;
+ GetHeader()->m_size = size;
+ }
+
+ Thread * GetObjectThread() const
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetHeader()->m_thread;
+ }
+
+#ifndef BINDER
+ void SetObjectThread()
+ {
+ WRAPPER_NO_CONTRACT;
+ GetHeader()->m_thread = GetThread();
+ }
+#endif //!BINDER
+
+ StackTraceElement const * GetData() const
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return dac_cast<PTR_StackTraceElement>(GetRaw() + sizeof(ArrayHeader));
+ }
+
+ PTR_StackTraceElement GetData()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return dac_cast<PTR_StackTraceElement>(GetRaw() + sizeof(ArrayHeader));
+ }
+
+ I1 const * GetRaw() const
+ {
+ WRAPPER_NO_CONTRACT;
+ assert(!!m_array);
+
+ return const_cast<I1ARRAYREF &>(m_array)->GetDirectPointerToNonObjectElements();
+ }
+
+ PTR_I1 GetRaw()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ assert(!!m_array);
+
+ return dac_cast<PTR_I1>(m_array->GetDirectPointerToNonObjectElements());
+ }
+
+ ArrayHeader const * GetHeader() const
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return dac_cast<PTR_ArrayHeader>(GetRaw());
+ }
+
+ PTR_ArrayHeader GetHeader()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return dac_cast<PTR_ArrayHeader>(GetRaw());
+ }
+
+ void SetArray(I1ARRAYREF const & arr)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_array = arr;
+ }
+
+private:
+ // put only things here that can be protected with GCPROTECT
+ I1ARRAYREF m_array;
+};
+
+#ifdef FEATURE_COLLECTIBLE_TYPES
+
+class LoaderAllocatorScoutObject : public Object
+{
+ friend class MscorlibBinder;
+ friend class LoaderAllocatorObject;
+
+protected:
+ LoaderAllocator * m_nativeLoaderAllocator;
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<LoaderAllocatorScoutObject> LOADERALLOCATORSCOUTREF;
+#else // USE_CHECKED_OBJECTREFS
+typedef LoaderAllocatorScoutObject* LOADERALLOCATORSCOUTREF;
+#endif // USE_CHECKED_OBJECTREFS
+
+class LoaderAllocatorObject : public Object
+{
+ friend class MscorlibBinder;
+
+public:
+ PTRARRAYREF GetHandleTable()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (PTRARRAYREF)m_pSlots;
+ }
+
+ void SetHandleTable(PTRARRAYREF handleTable)
+ {
+ LIMITED_METHOD_CONTRACT;
+ SetObjectReferenceUnchecked(&m_pSlots, (OBJECTREF)handleTable);
+ }
+
+ INT32 GetSlotsUsed()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_slotsUsed;
+ }
+
+ void SetSlotsUsed(INT32 newSlotsUsed)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_slotsUsed = newSlotsUsed;
+ }
+
+ void SetNativeLoaderAllocator(LoaderAllocator * pLoaderAllocator)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pLoaderAllocatorScout->m_nativeLoaderAllocator = pLoaderAllocator;
+ }
+
+ // README:
+ // If you modify the order of these fields, make sure to update the definition in
+ // BCL for this object.
+protected:
+ LOADERALLOCATORSCOUTREF m_pLoaderAllocatorScout;
+ OBJECTREF m_pSlots;
+ INT32 m_slotsUsed;
+ OBJECTREF m_methodInstantiationsTable;
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<LoaderAllocatorObject> LOADERALLOCATORREF;
+#else // USE_CHECKED_OBJECTREFS
+typedef DPTR(LoaderAllocatorObject) PTR_LoaderAllocatorObject;
+typedef PTR_LoaderAllocatorObject LOADERALLOCATORREF;
+#endif // USE_CHECKED_OBJECTREFS
+
+#endif // FEATURE_COLLECTIBLE_TYPES
+
+#if !defined(DACCESS_COMPILE) && !defined(CLR_STANDALONE_BINDER)
+// Define the lock used to access stacktrace from an exception object
+EXTERN_C SpinLock g_StackTraceArrayLock;
+#endif // !defined(DACCESS_COMPILE) && !defined(CLR_STANDALONE_BINDER)
+
+// This class corresponds to Exception on the managed side.
+typedef DPTR(class ExceptionObject) PTR_ExceptionObject;
+#include "pshpack4.h"
+class ExceptionObject : public Object
+{
+ friend class MscorlibBinder;
+
+public:
+ void SetHResult(HRESULT hr)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _HResult = hr;
+ }
+
+ HRESULT GetHResult()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _HResult;
+ }
+
+ void SetXCode(DWORD code)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _xcode = code;
+ }
+
+ DWORD GetXCode()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _xcode;
+ }
+
+ void SetXPtrs(void* xptrs)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _xptrs = xptrs;
+ }
+
+ void* GetXPtrs()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _xptrs;
+ }
+
+ void SetStackTrace(StackTraceArray const & stackTrace, PTRARRAYREF dynamicMethodArray);
+ void SetNullStackTrace();
+
+ void GetStackTrace(StackTraceArray & stackTrace, PTRARRAYREF * outDynamicMethodArray = NULL) const;
+
+#ifdef DACCESS_COMPILE
+ I1ARRAYREF GetStackTraceArrayObject() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return _stackTrace;
+ }
+#endif // DACCESS_COMPILE
+
+ void SetInnerException(OBJECTREF innerException)
+ {
+ WRAPPER_NO_CONTRACT;
+ SetObjectReference((OBJECTREF*)&_innerException, (OBJECTREF)innerException, GetAppDomain());
+ }
+
+ OBJECTREF GetInnerException()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return _innerException;
+ }
+
+ // Returns the innermost exception object - equivalent of the
+ // managed System.Exception.GetBaseException method.
+ OBJECTREF GetBaseException()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // Loop and get the innermost exception object
+ OBJECTREF oInnerMostException = NULL;
+ OBJECTREF oCurrent = NULL;
+
+ oCurrent = _innerException;
+ while(oCurrent != NULL)
+ {
+ oInnerMostException = oCurrent;
+ oCurrent = ((ExceptionObject*)(Object *)OBJECTREFToObject(oCurrent))->GetInnerException();
+ }
+
+ // return the innermost exception
+ return oInnerMostException;
+ }
+
+ void SetMessage(STRINGREF message)
+ {
+ WRAPPER_NO_CONTRACT;
+ SetObjectReference((OBJECTREF*)&_message, (OBJECTREF)message, GetAppDomain());
+ }
+
+ STRINGREF GetMessage()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return _message;
+ }
+
+ void SetStackTraceString(STRINGREF stackTraceString)
+ {
+ WRAPPER_NO_CONTRACT;
+ SetObjectReference((OBJECTREF*)&_stackTraceString, (OBJECTREF)stackTraceString, GetAppDomain());
+ }
+
+ STRINGREF GetStackTraceString()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return _stackTraceString;
+ }
+
+ STRINGREF GetRemoteStackTraceString()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return _remoteStackTraceString;
+ }
+
+ void SetHelpURL(STRINGREF helpURL)
+ {
+ WRAPPER_NO_CONTRACT;
+ SetObjectReference((OBJECTREF*)&_helpURL, (OBJECTREF)helpURL, GetAppDomain());
+ }
+
+ void SetSource(STRINGREF source)
+ {
+ WRAPPER_NO_CONTRACT;
+ SetObjectReference((OBJECTREF*)&_source, (OBJECTREF)source, GetAppDomain());
+ }
+
+ void ClearStackTraceForThrow()
+ {
+ WRAPPER_NO_CONTRACT;
+ SetObjectReferenceUnchecked((OBJECTREF*)&_remoteStackTraceString, NULL);
+ SetObjectReferenceUnchecked((OBJECTREF*)&_stackTrace, NULL);
+ SetObjectReferenceUnchecked((OBJECTREF*)&_stackTraceString, NULL);
+ }
+
+ void ClearStackTracePreservingRemoteStackTrace()
+ {
+ WRAPPER_NO_CONTRACT;
+ SetObjectReferenceUnchecked((OBJECTREF*)&_stackTrace, NULL);
+ SetObjectReferenceUnchecked((OBJECTREF*)&_stackTraceString, NULL);
+ }
+
+ // This method will set the reference to the array
+ // containing the watson bucket information (in byte[] form).
+ void SetWatsonBucketReference(OBJECTREF oWatsonBucketArray)
+ {
+ WRAPPER_NO_CONTRACT;
+ SetObjectReference((OBJECTREF*)&_watsonBuckets, (OBJECTREF)oWatsonBucketArray, GetAppDomain());
+ }
+
+ // This method will return the reference to the array
+ // containing the watson buckets
+ U1ARRAYREF GetWatsonBucketReference()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _watsonBuckets;
+ }
+
+ // This method will return a BOOL to indicate if the
+ // watson buckets are present or not.
+ BOOL AreWatsonBucketsPresent()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (_watsonBuckets != NULL)?TRUE:FALSE;
+ }
+
+ // This method will save the IP to be used for watson bucketing.
+ void SetIPForWatsonBuckets(UINT_PTR ip)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ipForWatsonBuckets = ip;
+ }
+
+ // This method will return a BOOL to indicate if Watson bucketing IP
+ // is present (or not).
+ BOOL IsIPForWatsonBucketsPresent()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (_ipForWatsonBuckets != NULL);
+ }
+
+ // This method returns the IP for Watson Buckets.
+ UINT_PTR GetIPForWatsonBuckets()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return _ipForWatsonBuckets;
+ }
+
+ // README:
+ // If you modify the order of these fields, make sure to update the definition in
+ // BCL for this object.
+private:
+ STRINGREF _className; //Needed for serialization.
+ OBJECTREF _exceptionMethod; //Needed for serialization.
+ STRINGREF _exceptionMethodString; //Needed for serialization.
+ STRINGREF _message;
+ OBJECTREF _data;
+ OBJECTREF _innerException;
+ STRINGREF _helpURL;
+ I1ARRAYREF _stackTrace;
+ U1ARRAYREF _watsonBuckets;
+ STRINGREF _stackTraceString; //Needed for serialization.
+ STRINGREF _remoteStackTraceString;
+ PTRARRAYREF _dynamicMethods;
+ STRINGREF _source; // Mainly used by VB.
+#ifdef FEATURE_SERIALIZATION
+ OBJECTREF _safeSerializationManager;
+#endif // FEATURE_SERIALIZATION
+ IN_WIN64(void* _xptrs;)
+ IN_WIN64(UINT_PTR _ipForWatsonBuckets;) // Contains the IP of exception for watson bucketing
+ INT32 _remoteStackIndex;
+ INT32 _HResult;
+ IN_WIN32(void* _xptrs;)
+ INT32 _xcode;
+ IN_WIN32(UINT_PTR _ipForWatsonBuckets;) // Contains the IP of exception for watson bucketing
+};
+
+// Defined in Contracts.cs
+enum ContractFailureKind
+{
+ CONTRACT_FAILURE_PRECONDITION = 0,
+ CONTRACT_FAILURE_POSTCONDITION,
+ CONTRACT_FAILURE_POSTCONDITION_ON_EXCEPTION,
+ CONTRACT_FAILURE_INVARIANT,
+ CONTRACT_FAILURE_ASSERT,
+ CONTRACT_FAILURE_ASSUME,
+};
+
+typedef DPTR(class ContractExceptionObject) PTR_ContractExceptionObject;
+class ContractExceptionObject : public ExceptionObject
+{
+ friend class MscorlibBinder;
+
+public:
+ ContractFailureKind GetContractFailureKind()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return static_cast<ContractFailureKind>(_Kind);
+ }
+
+private:
+ // keep these in sync with ndp/clr/src/bcl/system/diagnostics/contracts/contractsbcl.cs
+ IN_WIN64(INT32 _Kind;)
+ STRINGREF _UserMessage;
+ STRINGREF _Condition;
+ IN_WIN32(INT32 _Kind;)
+};
+#include "poppack.h"
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<ContractExceptionObject> CONTRACTEXCEPTIONREF;
+#else // USE_CHECKED_OBJECTREFS
+typedef PTR_ContractExceptionObject CONTRACTEXCEPTIONREF;
+#endif // USE_CHECKED_OBJECTREFS
+
+class NumberFormatInfo: public Object
+{
+public:
+ // C++ data members // Corresponding data member in NumberFormatInfo.cs
+ // Also update mscorlib.h when you add/remove fields
+
+ I4ARRAYREF cNumberGroup; // numberGroupSize
+ I4ARRAYREF cCurrencyGroup; // currencyGroupSize
+ I4ARRAYREF cPercentGroup; // percentGroupSize
+
+ STRINGREF sPositive; // positiveSign
+ STRINGREF sNegative; // negativeSign
+ STRINGREF sNumberDecimal; // numberDecimalSeparator
+ STRINGREF sNumberGroup; // numberGroupSeparator
+ STRINGREF sCurrencyGroup; // currencyDecimalSeparator
+ STRINGREF sCurrencyDecimal; // currencyGroupSeparator
+ STRINGREF sCurrency; // currencySymbol
+#ifndef FEATURE_COREFX_GLOBALIZATION
+ STRINGREF sAnsiCurrency; // ansiCurrencySymbol
+#endif
+ STRINGREF sNaN; // nanSymbol
+ STRINGREF sPositiveInfinity; // positiveInfinitySymbol
+ STRINGREF sNegativeInfinity; // negativeInfinitySymbol
+ STRINGREF sPercentDecimal; // percentDecimalSeparator
+ STRINGREF sPercentGroup; // percentGroupSeparator
+ STRINGREF sPercent; // percentSymbol
+ STRINGREF sPerMille; // perMilleSymbol
+
+ PTRARRAYREF sNativeDigits; // nativeDigits (a string array)
+
+#ifndef FEATURE_COREFX_GLOBALIZATION
+ INT32 iDataItem; // Index into the CultureInfo Table. Only used from managed code.
+#endif
+ INT32 cNumberDecimals; // numberDecimalDigits
+ INT32 cCurrencyDecimals; // currencyDecimalDigits
+ INT32 cPosCurrencyFormat; // positiveCurrencyFormat
+ INT32 cNegCurrencyFormat; // negativeCurrencyFormat
+ INT32 cNegativeNumberFormat; // negativeNumberFormat
+ INT32 cPositivePercentFormat; // positivePercentFormat
+ INT32 cNegativePercentFormat; // negativePercentFormat
+ INT32 cPercentDecimals; // percentDecimalDigits
+#ifndef FEATURE_CORECLR
+ INT32 iDigitSubstitution; // digitSubstitution
+#endif
+
+ CLR_BOOL bIsReadOnly; // Is this NumberFormatInfo ReadOnly?
+#ifndef FEATURE_COREFX_GLOBALIZATION
+ CLR_BOOL bUseUserOverride; // Flag to use user override. Only used from managed code.
+#endif
+ CLR_BOOL bIsInvariant; // Is this the NumberFormatInfo for the Invariant Culture?
+#ifndef FEATURE_CORECLR
+ CLR_BOOL bvalidForParseAsNumber; // NEVER USED, DO NOT USE THIS! (Serialized in Whidbey/Everett)
+ CLR_BOOL bvalidForParseAsCurrency; // NEVER USED, DO NOT USE THIS! (Serialized in Whidbey/Everett)
+#endif // !FEATURE_CORECLR
+};
+
+typedef NumberFormatInfo * NUMFMTREF;
+
+//===============================================================================
+// #NullableFeature
+// #NullableArchitecture
+//
+// In a nutshell it is counterintuitive to have a boxed Nullable<T>, since a boxed
+// object already has a representation for null (the null pointer), and having
+// multiple representations for the 'not present' value just causes grief. Thus the
+// feature is build make Nullable<T> box to a boxed<T> (not boxed<Nullable<T>).
+//
+// We want to do this in a way that does not impact the perf of the runtime in the
+// non-nullable case.
+//
+// To do this we need to
+// * Modify the boxing helper code:JIT_Box (we don't need a special one because
+// the JIT inlines the common case, so this only gets call in uncommon cases)
+// * Make a new helper for the Unbox case (see code:JIT_Unbox_Nullable)
+// * Plumb the JIT to ask for what kind of Boxing helper is needed
+// (see code:CEEInfo.getBoxHelper, code:CEEInfo.getUnBoxHelper
+// * change all the places in the CLR where we box or unbox by hand, and force
+// them to use code:MethodTable.Box, and code:MethodTable.Unbox which in
+// turn call code:Nullable.Box and code:Nullable.UnBox, most of these
+// are in reflection, and remoting (passing and returning value types).
+//
+// #NullableVerification
+//
+// Sadly, the IL Verifier also needs to know about this change. Basically the 'box'
+// instruction returns a boxed(T) (not a boxed(Nullable<T>)) for the purposes of
+// verfication. The JIT finds out what box returns by calling back to the EE with
+// the code:CEEInfo.getTypeForBox API.
+//
+// #NullableDebugging
+//
+// Sadly, because the debugger also does its own boxing 'by hand' for expression
+// evaluation inside visual studio, it measn that debuggers also need to be aware
+// of the fact that Nullable<T> boxes to a boxed<T>. It is the responcibility of
+// debuggers to follow this convention (which is why this is sad).
+//
+
+//===============================================================================
+// Nullable represents the managed generic value type Nullable<T>
+//
+// The runtime has special logic for this value class. When it is boxed
+// it becomes either null or a boxed T. Similarly a boxed T can be unboxed
+// either as a T (as normal), or as a Nullable<T>
+//
+// See code:Nullable#NullableArchitecture for more.
+//
+class Nullable {
+ Nullable(); // This is purposefully undefined. Do not make instances
+ // of this class.
+public:
+ static void CheckFieldOffsets(TypeHandle nullableType);
+ static BOOL IsNullableType(TypeHandle nullableType);
+ static BOOL IsNullableForType(TypeHandle nullableType, MethodTable* paramMT);
+ static BOOL IsNullableForTypeNoGC(TypeHandle nullableType, MethodTable* paramMT);
+
+ static OBJECTREF Box(void* src, MethodTable* nullable);
+ static BOOL UnBox(void* dest, OBJECTREF boxedVal, MethodTable* destMT);
+ static BOOL UnBoxNoGC(void* dest, OBJECTREF boxedVal, MethodTable* destMT);
+ static void UnBoxNoCheck(void* dest, OBJECTREF boxedVal, MethodTable* destMT);
+ static OBJECTREF BoxedNullableNull(TypeHandle nullableType) { return 0; }
+
+ // if 'Obj' is a true boxed nullable, return the form we want (either null or a boxed T)
+ static OBJECTREF NormalizeBox(OBJECTREF obj);
+
+ static inline CLR_BOOL HasValue(void *src, MethodTable *nullableMT)
+ {
+ Nullable *nullable = (Nullable *)src;
+ return *(nullable->HasValueAddr(nullableMT));
+ }
+
+ static inline void *Value(void *src, MethodTable *nullableMT)
+ {
+ Nullable *nullable = (Nullable *)src;
+ return nullable->ValueAddr(nullableMT);
+ }
+
+private:
+ static BOOL IsNullableForTypeHelper(MethodTable* nullableMT, MethodTable* paramMT);
+ static BOOL IsNullableForTypeHelperNoGC(MethodTable* nullableMT, MethodTable* paramMT);
+
+ CLR_BOOL* HasValueAddr(MethodTable* nullableMT);
+ void* ValueAddr(MethodTable* nullableMT);
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<ExceptionObject> EXCEPTIONREF;
+#else // USE_CHECKED_OBJECTREFS
+typedef PTR_ExceptionObject EXCEPTIONREF;
+#endif // USE_CHECKED_OBJECTREFS
+
+#endif // _OBJECT_H_
diff --git a/src/vm/object.inl b/src/vm/object.inl
new file mode 100644
index 0000000000..d9de9e7c71
--- /dev/null
+++ b/src/vm/object.inl
@@ -0,0 +1,307 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// OBJECT.INL
+//
+// Definitions inline functions of a Com+ Object
+//
+
+
+#ifndef _OBJECT_INL_
+#define _OBJECT_INL_
+
+#include "object.h"
+
+#if !defined(BINDER)
+inline PTR_VOID Object::UnBox() // if it is a value class, get the pointer to the first field
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(GetMethodTable()->IsValueType());
+ _ASSERTE(!Nullable::IsNullableType(TypeHandle(GetMethodTable())));
+
+ return dac_cast<PTR_BYTE>(this) + sizeof(*this);
+}
+
+inline ADIndex Object::GetAppDomainIndex()
+{
+ WRAPPER_NO_CONTRACT;
+#ifndef _DEBUG
+ // ok to cast to AppDomain because we know it's a real AppDomain if it's not shared
+ if (!GetGCSafeMethodTable()->IsDomainNeutral())
+ return (dac_cast<PTR_AppDomain>(GetGCSafeMethodTable()->GetDomain())->GetIndex());
+#endif
+ return GetHeader()->GetAppDomainIndex();
+}
+
+inline DWORD Object::GetNumComponents()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ // Yes, we may not even be an array, which means we are reading some of the object's memory - however,
+ // ComponentSize will multiply out this value. Therefore, m_NumComponents must be the first field in
+ // ArrayBase.
+ return dac_cast<PTR_ArrayBase>(this)->m_NumComponents;
+}
+
+inline SIZE_T Object::GetSize()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // mask the alignment bits because this methos is called during GC
+ MethodTable *mT = GetGCSafeMethodTable();
+
+ // strings have component size2, all other non-arrays should have 0
+ _ASSERTE(( mT->GetComponentSize() <= 2) || mT->IsArray());
+
+ size_t s = mT->GetBaseSize();
+ if (mT->HasComponentSize())
+ s += (size_t)GetNumComponents() * mT->RawGetComponentSize();
+ return s;
+}
+
+__forceinline /*static*/ SIZE_T StringObject::GetSize(DWORD strLen)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // Extra WCHAR for null terminator
+ return ObjSizeOf(StringObject) + sizeof(WCHAR) + strLen * sizeof(WCHAR);
+}
+
+#ifdef DACCESS_COMPILE
+
+inline void Object::EnumMemoryRegions(void)
+{
+ SUPPORTS_DAC;
+
+ PTR_MethodTable methodTable = GetGCSafeMethodTable();
+
+ TADDR ptr = dac_cast<TADDR>(this) - sizeof(ObjHeader);
+ SIZE_T size = sizeof(ObjHeader) + sizeof(Object);
+
+ // If it is unsafe to touch the MethodTable so just enumerate
+ // the base object.
+ if (methodTable.IsValid())
+ {
+ size = sizeof(ObjHeader) + GetSize();
+ }
+
+#if defined (_DEBUG)
+ // Test hook: when testing on debug builds, we want an easy way to test that the following while
+ // correctly terminates in the face of ridiculous stuff from the target.
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DumpGeneration_IntentionallyCorruptDataFromTarget) == 1)
+ {
+ // Pretend all objects are incredibly large.
+ size |= 0xefefefef << 28;
+ }
+#endif // defined (_DEBUG)
+
+ // Unfortunately, DacEnumMemoryRegion takes only ULONG32 as size argument
+ while (size > 0) {
+ // Use 0x10000000 instead of MAX_ULONG32 so that the chunks stays aligned
+ SIZE_T chunk = min(size, 0x10000000);
+ // If for any reason we can't enumerate the memory, stop. This would generally mean
+ // that we have target corruption, or that the target is executing, etc.
+ if (!DacEnumMemoryRegion(ptr, chunk))
+ break;
+ ptr += chunk; size -= chunk;
+ }
+
+ // As an Object is very low-level don't propagate
+ // the enumeration to the MethodTable.
+}
+
+#endif // #ifdef DACCESS_COMPILE
+
+
+inline TypeHandle ArrayBase::GetTypeHandle() const
+{
+ WRAPPER_NO_CONTRACT;
+ return GetTypeHandle(GetMethodTable());
+}
+
+inline /* static */ TypeHandle ArrayBase::GetTypeHandle(MethodTable * pMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ _ASSERTE(pMT != NULL);
+
+ // This ensures that we can always get the typehandle for an object in hand
+ // without triggering the noisy parts of the loader.
+ //
+ // The debugger can cause this routine to be called on an unmanaged thread
+ // so this really is important.
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+ CorElementType kind = pMT->GetInternalCorElementType();
+ unsigned rank = pMT->GetRank();
+ // Note that this load should always succeed because there is an invariant that
+ // if we have allocated an array object of type T then the ArrayTypeDesc
+ // for T[] is available and restored
+
+ // @todo This should be turned into a probe with a hard SO when we have one
+ CONTRACT_VIOLATION(SOToleranceViolation);
+ // == FailIfNotLoadedOrNotRestored
+ TypeHandle arrayType = ClassLoader::LoadArrayTypeThrowing(pMT->GetApproxArrayElementTypeHandle(), kind, rank, ClassLoader::DontLoadTypes);
+ CONSISTENCY_CHECK(!arrayType.IsNull());
+ return(arrayType);
+}
+
+ // Get the CorElementType for the elements in the array. Avoids creating a TypeHandle
+inline CorElementType ArrayBase::GetArrayElementType() const
+{
+ WRAPPER_NO_CONTRACT;
+ return GetMethodTable()->GetArrayElementType();
+}
+
+inline unsigned ArrayBase::GetRank() const
+{
+ WRAPPER_NO_CONTRACT;
+ return GetMethodTable()->GetRank();
+}
+
+// Total element count for the array
+inline DWORD ArrayBase::GetNumComponents() const
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_NumComponents;
+}
+#endif //!BINDER
+
+inline /* static */ unsigned ArrayBase::GetDataPtrOffset(MethodTable* pMT)
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+#if !defined(DACCESS_COMPILE)
+ _ASSERTE(pMT->IsArray());
+#endif // DACCESS_COMPILE
+ // The -sizeof(ObjHeader) is because of the sync block, which is before "this"
+ return pMT->GetBaseSize() - sizeof(ObjHeader);
+}
+
+inline /* static */ unsigned ArrayBase::GetBoundsOffset(MethodTable* pMT)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(pMT->IsArray());
+ if (!pMT->IsMultiDimArray())
+ return(offsetof(ArrayBase, m_NumComponents));
+ _ASSERTE(pMT->GetInternalCorElementType() == ELEMENT_TYPE_ARRAY);
+ return sizeof(ArrayBase);
+}
+inline /* static */ unsigned ArrayBase::GetLowerBoundsOffset(MethodTable* pMT)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(pMT->IsArray());
+ // There is no good offset for this for a SZARRAY.
+ _ASSERTE(pMT->GetInternalCorElementType() == ELEMENT_TYPE_ARRAY);
+ // Lower bounds info is after total bounds info
+ // and total bounds info has rank elements
+ return GetBoundsOffset(pMT) +
+ dac_cast<PTR_ArrayClass>(pMT->GetClass())->GetRank() *
+ sizeof(INT32);
+}
+#ifndef BINDER
+
+// Get the element type for the array, this works whether the the element
+// type is stored in the array or not
+inline TypeHandle ArrayBase::GetArrayElementTypeHandle() const
+{
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_SUPPORTS_DAC;
+
+ return GetGCSafeMethodTable()->GetApproxArrayElementTypeHandle();
+}
+
+//===============================================================================
+// Returns true if this pMT is Nullable<T> for T is equivalent to paramMT
+
+__forceinline BOOL Nullable::IsNullableForType(TypeHandle type, MethodTable* paramMT)
+{
+ if (type.IsTypeDesc())
+ return FALSE;
+ if (!type.AsMethodTable()->HasInstantiation()) // shortcut, if it is not generic it can't be Nullable<T>
+ return FALSE;
+ return Nullable::IsNullableForTypeHelper(type.AsMethodTable(), paramMT);
+}
+
+//===============================================================================
+// Returns true if this pMT is Nullable<T> for T == paramMT
+
+__forceinline BOOL Nullable::IsNullableForTypeNoGC(TypeHandle type, MethodTable* paramMT)
+{
+ if (type.IsTypeDesc())
+ return FALSE;
+ if (!type.AsMethodTable()->HasInstantiation()) // shortcut, if it is not generic it can't be Nullable<T>
+ return FALSE;
+ return Nullable::IsNullableForTypeHelperNoGC(type.AsMethodTable(), paramMT);
+}
+
+//===============================================================================
+// Returns true if this type is Nullable<T> for some T.
+
+inline BOOL Nullable::IsNullableType(TypeHandle type)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ if (type.IsTypeDesc())
+ return FALSE;
+
+ return type.AsMethodTable()->IsNullable();
+}
+
+inline TypeHandle Object::GetTypeHandle()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ _ASSERTE(m_pMethTab == GetGCSafeMethodTable());
+
+ if (m_pMethTab->IsArray())
+ return (dac_cast<PTR_ArrayBase>(this))->GetTypeHandle();
+ else
+ return TypeHandle(m_pMethTab);
+}
+
+inline TypeHandle Object::GetGCSafeTypeHandle() const
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodTable * pMT = GetGCSafeMethodTable();
+ _ASSERTE(pMT != NULL);
+
+ if (pMT->IsArray())
+ return ArrayBase::GetTypeHandle(pMT);
+ else
+ return TypeHandle(pMT);
+}
+
+#endif //!BINDER
+
+#endif // _OBJECT_INL_
diff --git a/src/vm/objectclone.cpp b/src/vm/objectclone.cpp
new file mode 100644
index 0000000000..43ce79f497
--- /dev/null
+++ b/src/vm/objectclone.cpp
@@ -0,0 +1,3866 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: ObjectClone.cpp
+//
+
+//
+
+
+#include "common.h"
+
+#ifdef FEATURE_REMOTING
+#include "objectclone.h"
+#include "frames.h"
+#include "assembly.hpp"
+#include "field.h"
+#include "security.h"
+#include "virtualcallstub.h"
+#include "crossdomaincalls.h"
+#include "callhelpers.h"
+#include "jitinterface.h"
+#include "typestring.h"
+#include "typeparse.h"
+#include "runtimehandles.h"
+#include "appdomain.inl"
+
+// Define the following to re-enable object cloner strict mode (where we require source fields for non-optional destination fields
+// and don't attempt to load assemblies we can't find via display via partial names instead).
+//#define OBJECT_CLONER_STRICT_MODE
+
+void MakeIDeserializationCallback(OBJECTREF refTarget);
+
+MethodDesc *GetInterfaceMethodImpl(MethodTable *pMT, MethodTable *pItfMT, WORD wSlot)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ MethodDesc *pMeth = NULL;
+ DispatchSlot slot(pMT->FindDispatchSlot(pItfMT->GetTypeID(), (UINT32)wSlot));
+ CONSISTENCY_CHECK(!slot.IsNull());
+ pMeth = slot.GetMethodDesc();
+ return pMeth;
+}
+
+// Given a FieldDesc which may be representative and an object which contains said field, return the actual type of the field. This
+// works even when called from a different appdomain from which the type was loaded (though naturally it is the caller's
+// responsbility to ensure such an appdomain cannot be unloaded during the processing of this method).
+TypeHandle LoadExactFieldType(FieldDesc *pFD, OBJECTREF orefParent, AppDomain *pDomain)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ MethodTable *pEnclosingMT = orefParent->GetMethodTable();
+
+ // Set up a field signature with the owning type providing a type context for any type variables.
+ MetaSig sig(pFD, TypeHandle(pEnclosingMT));
+ sig.NextArg();
+
+ // If the enclosing type is resident to this domain or domain neutral and loaded in this domain then we can simply go get it.
+ // The logic is trickier (and more expensive to calculate) for generic types, so skip the optimization there.
+ if (pEnclosingMT->GetDomain() == GetAppDomain() ||
+ (pEnclosingMT->IsDomainNeutral() &&
+ !pEnclosingMT->HasInstantiation() &&
+ pEnclosingMT->GetAssembly()->FindDomainAssembly(GetAppDomain())))
+ return sig.GetLastTypeHandleThrowing();
+
+ TypeHandle retTH;
+
+ // Otherwise we have to do this the expensive way -- switch to the home domain for the type lookup.
+ ENTER_DOMAIN_PTR(pDomain, ADV_RUNNINGIN);
+ retTH = sig.GetLastTypeHandleThrowing();
+ END_DOMAIN_TRANSITION;
+
+ return retTH;
+}
+
+extern TypeHandle GetTypeByName( _In_opt_z_ LPUTF8 szFullClassName,
+ BOOL bThrowOnError,
+ BOOL bIgnoreCase,
+ StackCrawlMark *stackMark,
+ BOOL *pbAssemblyIsLoading);
+
+#ifndef DACCESS_COMPILE
+#define CUSTOM_GCPROTECT_BEGIN(context) do { \
+ FrameWithCookie<GCSafeCollectionFrame> __gcframe(context); \
+ /* work around unreachable code warning */ \
+ if (true) { DEBUG_ASSURE_NO_RETURN_BEGIN(GCPROTECT)
+
+#define CUSTOM_GCPROTECT_END() \
+ DEBUG_ASSURE_NO_RETURN_END(GCPROTECT) } \
+ __gcframe.Pop(); } while(0)
+
+#else // #ifndef DACCESS_COMPILE
+
+#define CUSTOM_GCPROTECT_BEGIN(context)
+#define CUSTOM_GCPROTECT_END()
+
+#endif // #ifndef DACCESS_COMPILE
+
+int GCSafeObjectHashTable::HasID(OBJECTREF refObj, OBJECTREF *newObj)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END
+
+ BOOL seenBefore = FALSE;
+ *newObj = NULL;
+ int index = FindElement(refObj, seenBefore);
+
+ if (seenBefore)
+ {
+ _ASSERTE(index < (int)m_currArraySize);
+ *newObj = m_newObjects[index];
+ return m_ids[index];
+ }
+
+ return -1;
+}
+
+// returns the object id
+int GCSafeObjectHashTable::AddObject(OBJECTREF refObj, OBJECTREF newObj)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END
+
+ int index = -1;
+ GCPROTECT_BEGIN(refObj);
+ GCPROTECT_BEGIN(newObj);
+
+ if (m_count > m_currArraySize / 2)
+ {
+ Resize();
+ }
+
+ BOOL seenBefore = FALSE;
+ index = FindElement(refObj, seenBefore);
+
+ _ASSERTE(index >= 0 && index < (int)m_currArraySize);
+ if (seenBefore)
+ {
+ _ASSERTE(!"Adding an object thats already present");
+ }
+ else
+ {
+ m_objects[index] = refObj;
+ m_newObjects[index] = newObj;
+ m_ids[index] = ++m_count;
+ }
+
+ GCPROTECT_END();
+ GCPROTECT_END();
+
+ return m_ids[index];
+}
+
+// returns the object id
+int GCSafeObjectHashTable::UpdateObject(OBJECTREF refObj, OBJECTREF newObj)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END
+
+ int index = -1;
+ GCPROTECT_BEGIN(refObj);
+ GCPROTECT_BEGIN(newObj);
+
+ BOOL seenBefore = FALSE;
+ index = FindElement(refObj, seenBefore);
+
+ _ASSERTE(index >= 0 && index < (int)m_currArraySize);
+ if (!seenBefore)
+ {
+ _ASSERTE(!"An object has to exist in the table, to update it");
+ }
+ else
+ {
+ _ASSERTE(m_objects[index] == refObj);
+ m_newObjects[index] = newObj;
+ }
+
+ GCPROTECT_END();
+ GCPROTECT_END();
+
+ return m_ids[index];
+}
+
+// returns index into array where obj was found or will fit in
+int GCSafeObjectHashTable::FindElement(OBJECTREF refObj, BOOL &seenBefore)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END
+
+ int currentNumBuckets = m_currArraySize / NUM_SLOTS_PER_BUCKET;
+ int hashcode = 0;
+ GCPROTECT_BEGIN(refObj);
+ hashcode = refObj->GetHashCodeEx();
+ GCPROTECT_END();
+
+ hashcode &= 0x7FFFFFFF; // ignore sign bit
+ int hashIncrement = (1+((hashcode)%(currentNumBuckets-2)));
+#ifdef _DEBUG
+ int numLoops = 0;
+#endif
+
+ do
+ {
+ int index = ((unsigned)hashcode % currentNumBuckets) * NUM_SLOTS_PER_BUCKET;
+ _ASSERTE(index >= 0 && index < (int)m_currArraySize);
+ for (int i = index; i < index + NUM_SLOTS_PER_BUCKET; i++)
+ {
+ if (m_objects[i] == refObj)
+ {
+ seenBefore = TRUE;
+ return i;
+ }
+
+ if (m_objects[i] == NULL)
+ {
+ seenBefore = FALSE;
+ return i;
+ }
+ }
+ hashcode += hashIncrement;
+#ifdef _DEBUG
+ if (++numLoops > currentNumBuckets)
+ _ASSERTE(!"Looped too many times, trying to find object in hashtable. If hitting ignore doesnt seem to help, then contact Ashok");
+#endif
+ }while (true);
+
+ _ASSERTE(!"Not expected to reach here in GCSafeObjectHashTable::FindElement");
+ return -1;
+}
+
+void GCSafeObjectHashTable::Resize()
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END
+ // Allocate new space
+ DWORD newSize = m_currArraySize * 2;
+ for (int i = 0; (DWORD) i < sizeof(g_rgPrimes)/sizeof(DWORD); i++)
+ {
+ if (g_rgPrimes[i] > newSize)
+ {
+ newSize = g_rgPrimes[i];
+ break;
+ }
+ }
+
+ newSize *= NUM_SLOTS_PER_BUCKET;
+ NewArrayHolder<OBJECTREF> refTemp (new OBJECTREF[newSize]);
+ ZeroMemory((void *)refTemp, sizeof(OBJECTREF) * newSize);
+
+ NewArrayHolder<OBJECTREF> refTempNewObj (new OBJECTREF[newSize]);
+#ifdef USE_CHECKED_OBJECTREFS
+ ZeroMemory((void *)refTempNewObj, sizeof(OBJECTREF) * newSize);
+#endif
+
+ NewArrayHolder<int> bTemp (new int[newSize]);
+ ZeroMemory((void *)bTemp, sizeof(int) * newSize);
+
+ // Copy over objects and data
+ NewArrayHolder<OBJECTREF> refOldObj (m_objects);
+ NewArrayHolder<OBJECTREF> refOldNewObj (m_newObjects);
+ NewArrayHolder<int> oldIds (m_ids);
+ DWORD oldArrSize = m_currArraySize;
+
+ if (oldIds == (int *)&m_dataOnStack[0])
+ {
+ refOldObj.SuppressRelease();
+ refOldNewObj.SuppressRelease();
+ oldIds.SuppressRelease();
+ }
+
+ refTemp.SuppressRelease();
+ refTempNewObj.SuppressRelease();
+ bTemp.SuppressRelease();
+
+ m_ids = bTemp;
+ m_objects = refTemp;
+ m_newObjects = refTempNewObj;
+ m_currArraySize = newSize;
+
+ for (DWORD i = 0; i < oldArrSize; i++)
+ {
+ if (refOldObj[i] == NULL)
+ continue;
+
+ BOOL seenBefore = FALSE;
+ int newIndex = FindElement(refOldObj[i], seenBefore);
+
+ if (!seenBefore)
+ {
+ _ASSERTE(newIndex < (int)m_currArraySize);
+ m_objects[newIndex] = refOldObj[i];
+ m_newObjects[newIndex] = refOldNewObj[i];
+ m_ids[newIndex] = oldIds[i];
+ }
+ else
+ _ASSERTE(!"Object seen twice while rehashing");
+ }
+
+#ifdef USE_CHECKED_OBJECTREFS
+ for(DWORD i = 0; i < m_currArraySize; i++)
+ Thread::ObjectRefProtected(&m_objects[i]);
+ for(DWORD i = 0; i < m_currArraySize; i++)
+ Thread::ObjectRefProtected(&m_newObjects[i]);
+#endif
+
+}
+
+void GCSafeObjectTable::Push(OBJECTREF refObj, OBJECTREF refParent, OBJECTREF refAux, QueuedObjectInfo * pQOI)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END
+ _ASSERTE(refObj != NULL);
+ _ASSERTE(m_QueueType == LIFO_QUEUE);
+ _ASSERTE(m_head == 0 && m_dataHead == 0);
+
+ // First find the size of the object info
+ DWORD size = pQOI->GetSize();
+
+ // Check if resize is needed
+ EnsureSize(size);
+
+ // Push on the stack, first the objects
+ DWORD index = m_count;
+ if (m_Objects1)
+ m_Objects1[index] = refObj;
+#ifdef _DEBUG
+ else
+ _ASSERTE(refObj == NULL);
+#endif
+ if (m_Objects2)
+ m_Objects2[index] = refParent;
+#ifdef _DEBUG
+ else
+ _ASSERTE(refParent == NULL);
+#endif
+ if (m_Objects3)
+ m_Objects3[index] = refAux;
+#ifdef _DEBUG
+ else
+ _ASSERTE(refAux == NULL);
+#endif
+
+ // then the info
+ if (m_dataIndices)
+ m_dataIndices[index] = m_numDataBytes;
+ BYTE *pData = &m_data[m_numDataBytes];
+ memcpy(pData, (VOID*)pQOI, size);
+
+ m_numDataBytes += size;
+ m_count++;
+}
+
+OBJECTREF GCSafeObjectTable::Pop(OBJECTREF *refParent, OBJECTREF *refAux, QueuedObjectInfo ** pQOI)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_QueueType == LIFO_QUEUE);
+ _ASSERTE(m_head == 0 && m_dataHead == 0);
+ _ASSERTE(m_dataIndices != NULL);
+
+ *pQOI = NULL;
+ OBJECTREF refRet = NULL;
+ *refParent = NULL;
+ *refAux = NULL;
+ if (m_count == 0)
+ return NULL;
+
+ m_count--;
+ refRet = m_Objects1[m_count];
+ if (m_Objects2)
+ *refParent = m_Objects2[m_count];
+ if (m_Objects3)
+ *refAux = m_Objects3[m_count];
+ *pQOI = (QueuedObjectInfo *) &m_data[m_dataIndices[m_count]];
+
+ m_numDataBytes -= (*pQOI)->GetSize();
+ return refRet;
+}
+
+void GCSafeObjectTable::SetAt(DWORD index, OBJECTREF refObj, OBJECTREF refParent, OBJECTREF refAux, QueuedObjectInfo * pQOI)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END
+ _ASSERTE(refObj != NULL);
+#ifdef _DEBUG
+ if (m_QueueType == LIFO_QUEUE)
+ _ASSERTE(index >= 0 && index < m_count);
+ else
+ _ASSERTE(index < m_currArraySize);
+#endif
+
+ // First find the size of the object info
+ DWORD size = pQOI->GetSize();
+
+ // Push on the stack, first the objects
+ m_Objects1[index] = refObj;
+ if (m_Objects2)
+ m_Objects2[index] = refParent;
+ if (m_Objects3)
+ m_Objects3[index] = refAux;
+
+ // then the info
+ _ASSERTE(m_dataIndices != NULL);
+
+ QueuedObjectInfo *pData = (QueuedObjectInfo *)&m_data[m_dataIndices[index]];
+ _ASSERTE(pData->GetSize() == size);
+
+ memcpy(pData, (VOID*)pQOI, size);
+}
+
+OBJECTREF GCSafeObjectTable::GetAt(DWORD index, OBJECTREF *refParent, OBJECTREF *refAux, QueuedObjectInfo ** pQOI)
+{
+ LIMITED_METHOD_CONTRACT;
+#ifdef _DEBUG
+ if (m_QueueType == LIFO_QUEUE)
+ _ASSERTE(index >= 0 && index < m_count);
+ else
+ _ASSERTE(index < m_currArraySize);
+#endif
+
+ OBJECTREF refRet = m_Objects1[index];
+ if (m_Objects2)
+ *refParent = m_Objects2[index];
+ else
+ *refParent = NULL;
+ if (m_Objects3)
+ *refAux = m_Objects3[index];
+ else
+ *refAux = NULL;
+
+ _ASSERTE(m_dataIndices != NULL);
+
+ *pQOI = (QueuedObjectInfo *) &m_data[m_dataIndices[index]];
+
+ return refRet;
+}
+
+void GCSafeObjectTable::Enqueue(OBJECTREF refObj, OBJECTREF refParent, OBJECTREF refAux, QueuedObjectInfo *pQOI)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END
+
+ _ASSERTE(refObj != NULL);
+ _ASSERTE(m_QueueType == FIFO_QUEUE);
+
+ // First find the size of the object info
+ DWORD size = pQOI ? pQOI->GetSize() : 0;
+
+ // Check if resize is needed
+ EnsureSize(size);
+
+ // Append to queue, first the objects
+ DWORD index = (m_head + m_count) % m_currArraySize;
+ m_Objects1[index] = refObj;
+ if (m_Objects2)
+ m_Objects2[index] = refParent;
+ if (m_Objects3)
+ m_Objects3[index] = refAux;
+
+ // then the info
+ if (pQOI)
+ {
+ DWORD dataIndex = (m_dataHead + m_numDataBytes) % (m_currArraySize * MAGIC_FACTOR);
+ BYTE *pData = &m_data[dataIndex];
+ memcpy(pData, (VOID*)pQOI, size);
+
+ if (m_dataIndices)
+ m_dataIndices[index] = dataIndex;
+ m_numDataBytes += size;
+ }
+
+ m_count++;
+}
+
+OBJECTREF GCSafeObjectTable::Dequeue(OBJECTREF *refParent, OBJECTREF *refAux, QueuedObjectInfo ** pQOI)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(m_QueueType == FIFO_QUEUE);
+
+ if (pQOI)
+ *pQOI = NULL;
+ OBJECTREF refRet = NULL;
+ *refParent = NULL;
+ *refAux = NULL;
+ if (m_count == 0)
+ return NULL;
+
+ refRet = m_Objects1[m_head];
+ if (m_Objects2)
+ *refParent = m_Objects2[m_head];
+ if (m_Objects3)
+ *refAux = m_Objects3[m_head];
+
+ if (pQOI)
+ {
+ *pQOI = (QueuedObjectInfo *) &m_data[m_dataHead];
+
+ m_dataHead = (m_dataHead + (*pQOI)->GetSize()) % (m_currArraySize * MAGIC_FACTOR);
+
+ m_numDataBytes -= (*pQOI)->GetSize();
+ }
+
+ m_head = (m_head + 1) % m_currArraySize;
+ m_count--;
+ return refRet;
+}
+
+OBJECTREF GCSafeObjectTable::Peek(OBJECTREF *refParent, OBJECTREF *refAux, QueuedObjectInfo **pQOI)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ *pQOI = NULL;
+ *refParent = NULL;
+ *refAux = NULL;
+ if (m_count == 0)
+ return NULL;
+
+ DWORD indexToPeek;
+ if (m_QueueType == LIFO_QUEUE)
+ {
+ indexToPeek = m_count;
+ return GetAt(indexToPeek, refParent, refAux, pQOI);
+ }
+ else
+ {
+ indexToPeek = m_head;
+ if (m_Objects2)
+ *refParent = m_Objects2[m_head];
+ if (m_Objects3)
+ *refParent = m_Objects3[m_head];
+ *pQOI = (QueuedObjectInfo *) &m_data[m_dataHead];
+ return m_Objects1[m_head];
+ }
+
+}
+
+void GCSafeObjectTable::EnsureSize(DWORD requiredDataSize)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END
+ // Check if the object queue is sized enough
+ if (m_count == m_currArraySize)
+ {
+ Resize();
+ return;
+ }
+
+ // Check if the data array size is enough
+ if (m_numDataBytes + requiredDataSize > m_currArraySize * MAGIC_FACTOR)
+ {
+ Resize();
+ return;
+ }
+
+ if (m_QueueType == FIFO_QUEUE)
+ {
+ // Will current QueuedObjectInfo go beyond the edge of the array ?
+ if (m_dataHead + m_numDataBytes + requiredDataSize > m_currArraySize * MAGIC_FACTOR)
+ {
+ Resize();
+ return;
+ }
+ }
+}
+
+void GCSafeObjectTable::Resize()
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END
+ // Allocate new space
+ DWORD newSize = m_currArraySize * 2;
+ NewArrayHolder<OBJECTREF> refTemp (NULL);
+ NewArrayHolder<OBJECTREF> refParentTemp (NULL);
+ NewArrayHolder<OBJECTREF> refAuxTemp (NULL);
+
+ refTemp = new OBJECTREF[newSize];
+ if (m_Objects2)
+ refParentTemp = new OBJECTREF[newSize];
+ if (m_Objects3)
+ refAuxTemp = new OBJECTREF[newSize];
+
+#ifdef USE_CHECKED_OBJECTREFS
+ ZeroMemory((void *)refTemp, sizeof(OBJECTREF) * newSize);
+ if (m_Objects2)
+ ZeroMemory((void *)refParentTemp, sizeof(OBJECTREF) * newSize);
+ if (m_Objects3)
+ ZeroMemory((void *)refAuxTemp, sizeof(OBJECTREF) * newSize);
+#endif
+
+ NewArrayHolder<BYTE> bTemp (NULL);
+ NewArrayHolder<DWORD> dwIndicesTemp (NULL);
+
+ bTemp = new BYTE[newSize * MAGIC_FACTOR];
+ if (m_dataIndices)
+ dwIndicesTemp = new DWORD[newSize];
+
+ // Copy over objects and data
+ if (m_QueueType == LIFO_QUEUE || (m_QueueType == FIFO_QUEUE && m_head == 0))
+ {
+ void *pSrc = (void *)&m_Objects1[0];
+ void *pDest = (void *)&refTemp[0];
+ memcpyUnsafe(pDest, pSrc, m_count * sizeof(OBJECTREF));
+
+ if (m_Objects2)
+ {
+ pSrc = (void *)&m_Objects2[0];
+ pDest = (void *)&refParentTemp[0];
+ memcpyUnsafe(pDest, pSrc, m_count * sizeof(OBJECTREF));
+ }
+
+ if (m_Objects3)
+ {
+ pSrc = (void *)&m_Objects3[0];
+ pDest = (void *)&refAuxTemp[0];
+ memcpyUnsafe(pDest, pSrc, m_count * sizeof(OBJECTREF));
+ }
+
+ pSrc = (void *)&m_data[0];
+ pDest = (void *)&bTemp[0];
+ memcpyNoGCRefs(pDest, pSrc, m_numDataBytes);
+
+ if (m_dataIndices)
+ {
+ pSrc = (void *)&m_dataIndices[0];
+ pDest = (void *)&dwIndicesTemp[0];
+ memcpyNoGCRefs(pDest, pSrc, m_count * sizeof(DWORD));
+ }
+
+ }
+ else
+ {
+ _ASSERTE(m_QueueType == FIFO_QUEUE && m_head != 0);
+ _ASSERTE(m_currArraySize > m_head);
+ DWORD numObjRefsToCopy = (m_count > m_currArraySize - m_head ? m_currArraySize - m_head : m_count);
+
+ void *pSrc = (void *)&m_Objects1[m_head];
+ void *pDest = (void *)&refTemp[0];
+ memcpyUnsafe(pDest, pSrc, numObjRefsToCopy * sizeof(OBJECTREF));
+ pSrc = (void *)&m_Objects1[0];
+ pDest = (void *)&refTemp[numObjRefsToCopy];
+ memcpyUnsafe(pDest, pSrc, (m_count - numObjRefsToCopy) * sizeof(OBJECTREF));
+
+ if (m_Objects2)
+ {
+ pSrc = (void *)&m_Objects2[m_head];
+ pDest = (void *)&refParentTemp[0];
+ memcpyUnsafe(pDest, pSrc, numObjRefsToCopy * sizeof(OBJECTREF));
+ pSrc = (void *)&m_Objects2[0];
+ pDest = (void *)&refParentTemp[numObjRefsToCopy];
+ memcpyUnsafe(pDest, pSrc, (m_count - numObjRefsToCopy) * sizeof(OBJECTREF));
+ }
+
+ if (m_Objects3)
+ {
+ pSrc = (void *)&m_Objects3[m_head];
+ pDest = (void *)&refAuxTemp[0];
+ memcpyUnsafe(pDest, pSrc, numObjRefsToCopy * sizeof(OBJECTREF));
+ pSrc = (void *)&m_Objects3[0];
+ pDest = (void *)&refAuxTemp[numObjRefsToCopy];
+ memcpyUnsafe(pDest, pSrc, (m_count - numObjRefsToCopy) * sizeof(OBJECTREF));
+ }
+
+ if (m_dataIndices)
+ {
+ pSrc = (void *)&m_dataIndices[m_head];
+ pDest = (void *)&dwIndicesTemp[0];
+ memcpyUnsafe(pDest, pSrc, numObjRefsToCopy * sizeof(DWORD));
+ pSrc = (void *)&m_dataIndices[0];
+ pDest = (void *)&dwIndicesTemp[numObjRefsToCopy];
+ memcpyUnsafe(pDest, pSrc, (m_count - numObjRefsToCopy) * sizeof(DWORD));
+ }
+
+ DWORD numBytesToCopy = (m_numDataBytes > ((m_currArraySize * MAGIC_FACTOR) - m_dataHead) ? ((m_currArraySize * MAGIC_FACTOR) - m_dataHead) : m_numDataBytes);//(m_currArraySize * MAGIC_FACTOR) - m_dataHead;
+ memcpyNoGCRefs((void *)bTemp, (void *) &m_data[m_dataHead], numBytesToCopy);
+ memcpyNoGCRefs((void *) &bTemp[numBytesToCopy], (void *)m_data, (m_numDataBytes - numBytesToCopy));
+ }
+
+ // Delete old allocation
+ if (m_usingHeap)
+ {
+ delete[] m_data;
+ delete[] m_Objects1;
+ delete[] m_Objects2;
+ delete[] m_Objects3;
+ delete[] m_dataIndices;
+ }
+
+ refTemp.SuppressRelease();
+ refParentTemp.SuppressRelease();
+ refAuxTemp.SuppressRelease();
+ dwIndicesTemp.SuppressRelease();
+ bTemp.SuppressRelease();
+
+ m_currArraySize = newSize;
+ m_Objects1 = refTemp;
+ m_Objects2 = refParentTemp;
+ m_Objects3 = refAuxTemp;
+ m_dataIndices = dwIndicesTemp;
+ m_data = bTemp;
+ m_head = 0;
+ m_dataHead = 0;
+
+ m_usingHeap = TRUE;
+#ifdef USE_CHECKED_OBJECTREFS
+ for(DWORD i = 0; i < m_currArraySize; i++)
+ {
+ Thread::ObjectRefProtected(&m_Objects1[i]);
+ if (m_Objects2)
+ Thread::ObjectRefProtected(&m_Objects2[i]);
+ if (m_Objects3)
+ Thread::ObjectRefProtected(&m_Objects3[i]);
+ }
+#endif
+}
+
+
+VOID GCScanRootsInCollection(promote_func *fn, ScanContext* sc, void *context)
+{
+ STATIC_CONTRACT_SO_TOLERANT;
+ GCSafeCollection *pObjCollection = (GCSafeCollection *)context;
+ pObjCollection->ReportGCRefs(fn, sc);
+}
+
+VOID
+BeginCloning(ObjectClone *pOC)
+{
+ pOC->Init(FALSE);
+}
+
+VOID
+EndCloning(ObjectClone *pOC)
+{
+ pOC->Cleanup(FALSE);
+}
+
+typedef Holder<ObjectClone*, BeginCloning, EndCloning> ObjectCloneHolder;
+
+
+OBJECTREF ObjectClone::Clone(OBJECTREF refObj, TypeHandle expectedType, AppDomain* fromDomain, AppDomain* toDomain, OBJECTREF refExecutionContext)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END
+
+ if (refObj == NULL)
+ return NULL;
+
+ if (m_context != ObjectFreezer && refObj->GetMethodTable() == g_pStringClass)
+ return refObj;
+
+ ObjectCloneHolder ocHolder(this);
+
+ m_fromDomain = fromDomain;
+ m_toDomain = toDomain;
+
+ m_currObject = refObj;
+ GCPROTECT_BEGIN(m_currObject);
+ m_topObject = NULL;
+ GCPROTECT_BEGIN(m_topObject);
+ m_fromExecutionContext = refExecutionContext;
+ GCPROTECT_BEGIN(m_fromExecutionContext);
+
+ // Enter the domain we're cloning into, if we're not already there
+ ENTER_DOMAIN_PTR(toDomain,ADV_RUNNINGIN);
+
+ if (!m_securityChecked)
+ {
+ Security::SpecialDemand(SSWT_DEMAND_FROM_NATIVE, SECURITY_SERIALIZATION);
+ m_securityChecked = TRUE;
+ }
+
+#ifdef _DEBUG
+ DefineFullyQualifiedNameForClass();
+ LOG((LF_REMOTING, LL_INFO100, "Clone. Cloning instance of type %s.\n",
+ GetFullyQualifiedNameForClassNestedAware(m_currObject->GetMethodTable())));
+#endif
+
+ m_newObject = NULL;
+ GCPROTECT_BEGIN(m_newObject);
+ PTRARRAYREF refValues = NULL;
+ GCPROTECT_BEGIN(refValues);
+ OBJECTREF refParent = NULL;
+ GCPROTECT_BEGIN(refParent);
+
+ QueuedObjectInfo *currObjFixupInfo = NULL;
+ // For some dynamically sized stack objects
+ void *pTempStackSpace = NULL;
+ DWORD dwCurrStackSpaceSize = 0;
+
+ // Initialize QOM
+ QueuedObjectInfo topObj;
+ OBJECTREF dummy1, dummy2;
+ QOM.Enqueue(m_currObject, NULL, NULL, (QueuedObjectInfo *)&topObj);
+
+ while ((m_currObject = QOM.Dequeue(&dummy1, &dummy2, &currObjFixupInfo)) != NULL)
+ {
+ m_newObject = NULL;
+ MethodTable *newMT = NULL;
+
+ BOOL repeatObject = FALSE;
+ BOOL isISerializable = FALSE, isIObjRef = FALSE, isBoxed = FALSE;
+ DWORD ISerializableTSOIndex = (DWORD) -1;
+ DWORD IObjRefTSOIndex = (DWORD) -1;
+ DWORD BoxedValTSOIndex = (DWORD) -1;
+ m_skipFieldScan = FALSE;
+
+ // ALLOCATE PHASE
+
+ // Was currObject seen before ?
+ int currID = TOS.HasID(m_currObject, &m_newObject);
+ if (currID != -1)
+ {
+ // Yes
+ repeatObject = TRUE;
+ m_skipFieldScan = TRUE;
+ newMT = m_newObject->GetMethodTable();
+
+ if (m_cbInterface->IsISerializableType(newMT))
+ {
+ currObjFixupInfo->SetIsISerializableInstance();
+ isISerializable = TRUE;
+ ISerializableTSOIndex = FindObjectInTSO(currID, ISerializable);
+ }
+
+#ifdef _DEBUG
+ LOG((LF_REMOTING, LL_INFO1000, "Clone. Object of type %s with id %d seen before.\n",
+ GetFullyQualifiedNameForClassNestedAware(m_currObject->GetMethodTable()), currID));
+#endif
+ }
+ else
+ {
+#ifdef _DEBUG
+ LOG((LF_REMOTING, LL_INFO1000, "Clone. Object of type %s not seen before.\n",
+ GetFullyQualifiedNameForClassNestedAware(m_currObject->GetMethodTable())));
+#endif
+ // No
+ MethodTable *currMT = m_currObject->GetMethodTable();
+
+ // Check whether object is serializable
+ m_cbInterface->ValidateFromType(currMT);
+
+ // Add current object to table of seen objects and get an id
+ currID = TOS.AddObject(m_currObject, m_newObject);
+ LOG((LF_REMOTING, LL_INFO1000, "Clone. Current object added to Table of Objects Seen. Given id %d.\n", currID));
+
+ if ( m_cbInterface->IsRemotedType(currMT, m_fromDomain, m_toDomain))
+ {
+ refValues = AllocateISerializable(currID, TRUE);
+ isISerializable = TRUE;
+ ISerializableTSOIndex = TSO.GetCount() - 1;
+ currObjFixupInfo->SetIsISerializableInstance();
+ if (refValues == NULL)
+ {
+ // We found a smugglable objref. No field scanning needed
+ m_skipFieldScan = TRUE;
+ }
+ }
+ else if( m_cbInterface->IsISerializableType(currMT))
+ {
+ InvokeVtsCallbacks(m_currObject, RemotingVtsInfo::VTS_CALLBACK_ON_SERIALIZING, fromDomain);
+ if (HasVtsCallbacks(m_currObject->GetMethodTable(), RemotingVtsInfo::VTS_CALLBACK_ON_SERIALIZED))
+ VSC.Enqueue(m_currObject, NULL, NULL, NULL);
+
+ refValues = AllocateISerializable(currID, FALSE);
+ isISerializable = TRUE;
+ ISerializableTSOIndex = TSO.GetCount() - 1;
+ currObjFixupInfo->SetIsISerializableInstance();
+ }
+ else if (currMT->IsArray())
+ {
+ AllocateArray();
+ }
+ else
+ {
+ // This is a regular object
+ InvokeVtsCallbacks(m_currObject, RemotingVtsInfo::VTS_CALLBACK_ON_SERIALIZING, fromDomain);
+ if (HasVtsCallbacks(m_currObject->GetMethodTable(), RemotingVtsInfo::VTS_CALLBACK_ON_SERIALIZED))
+ VSC.Enqueue(m_currObject, NULL, NULL, NULL);
+
+ AllocateObject();
+
+ if (m_cbInterface->IsISerializableType(m_newObject->GetMethodTable()))
+ {
+ // We have a situation where the serialized instnce was not ISerializable,
+ // but the target instance is. So we make the from object look like a ISerializable
+ refValues = MakeObjectLookLikeISerializable(currID);
+ isISerializable = TRUE;
+ ISerializableTSOIndex = TSO.GetCount() - 1;
+ currObjFixupInfo->SetIsISerializableInstance();
+ }
+ }
+
+ _ASSERTE(m_newObject != NULL);
+ newMT = m_newObject->GetMethodTable();
+
+ // Check whether new object is serializable
+ m_cbInterface->ValidateToType(newMT);
+
+ // Update the TOS, to include the new object
+ int retId;
+ retId = TOS.UpdateObject(m_currObject, m_newObject);
+ _ASSERTE(retId == currID);
+ }
+ _ASSERTE(m_newObject != NULL);
+
+ // FIXUP PHASE
+ // Get parent to be fixed up
+ ParentInfo *parentInfo;
+ refParent = QOF.Peek(&dummy1, &dummy2, (QueuedObjectInfo **)&parentInfo);
+ MethodTable *pParentMT = NULL;
+
+ if (refParent == NULL)
+ {
+ LOG((LF_REMOTING, LL_INFO1000, "Clone. No parent found. This is the top object.\n"));
+ // This is the top object
+ _ASSERTE(m_topObject == NULL);
+ m_topObject = m_newObject;
+ }
+ else
+ {
+#ifdef _DEBUG
+ LOG((LF_REMOTING, LL_INFO1000, "Clone. Parent is of type %s.\n",
+ GetFullyQualifiedNameForClassNestedAware(m_currObject->GetMethodTable())));
+#endif
+ pParentMT = refParent->GetMethodTable();
+ }
+
+ if (IsDelayedFixup(newMT, currObjFixupInfo))
+ {
+ // New object is IObjRef or a boxed object
+ if (m_cbInterface->IsIObjectReferenceType(newMT))
+ {
+ LOG((LF_REMOTING, LL_INFO1000, "Clone. This is an IObjectReference. Delaying fixup.\n"));
+ DWORD size = sizeof(IObjRefInstanceInfo) + (currObjFixupInfo ? currObjFixupInfo->GetSize() : 0);
+ if (size > dwCurrStackSpaceSize)
+ {
+ pTempStackSpace = _alloca(size);
+ dwCurrStackSpaceSize = size;
+ }
+ IObjRefInstanceInfo *pIORInfo = new (pTempStackSpace) IObjRefInstanceInfo(currID, 0, 0);
+ if (currObjFixupInfo)
+ pIORInfo->SetFixupInfo(currObjFixupInfo);
+ // Check if this instance is ISerializable also
+ if (isISerializable)
+ {
+ LOG((LF_REMOTING, LL_INFO1000, "Clone. This is also an ISerializable type at index %d in TSO.\n", ISerializableTSOIndex));
+ _ASSERTE(ISerializableTSOIndex != (DWORD) -1);
+ pIORInfo->SetISerTSOIndex(ISerializableTSOIndex);
+ }
+
+ if (repeatObject)
+ pIORInfo->SetIsRepeatObject();
+
+ // Add to TSO
+ TSO.Push(m_newObject, m_currObject, refParent, pIORInfo);
+
+ isIObjRef = TRUE;
+ IObjRefTSOIndex = TSO.GetCount() - 1;
+
+ LOG((LF_REMOTING, LL_INFO1000, "Clone. Added to TSO at index %d.\n", IObjRefTSOIndex));
+ // Any special object parent, would wait till the current object is resolved
+ if (parentInfo)
+ {
+ parentInfo->IncrementSpecialMembers();
+ TMappings.Add(IObjRefTSOIndex);
+ }
+
+ }
+ if (currObjFixupInfo->NeedsUnboxing())
+ {
+ LOG((LF_REMOTING, LL_INFO1000, "Clone. This is a boxed value type. Delaying fixup.\n"));
+ DWORD size = sizeof(ValueTypeInfo) + currObjFixupInfo->GetSize();
+ if (size > dwCurrStackSpaceSize)
+ {
+ pTempStackSpace = _alloca(size);
+ dwCurrStackSpaceSize = size;
+ }
+ ValueTypeInfo *valInfo = new (pTempStackSpace) ValueTypeInfo(currID, currObjFixupInfo);
+ // If the value type is also ISer or IObj, then it has to wait till those interfaces are addressed
+ if (isISerializable)
+ {
+ LOG((LF_REMOTING, LL_INFO1000, "Clone. This is also an ISerializable type at index %d in TSO.\n", ISerializableTSOIndex));
+ valInfo->SetISerTSOIndex(ISerializableTSOIndex);
+ }
+ if (isIObjRef)
+ {
+ LOG((LF_REMOTING, LL_INFO1000, "Clone. This is also an IObjectReference type at index %d in TSO.\n", IObjRefTSOIndex));
+ valInfo->SetIObjRefTSOIndex(IObjRefTSOIndex);
+ }
+
+ // Add to TSO
+ TSO.Push(m_newObject, refParent, NULL, valInfo);
+
+ isBoxed = TRUE;
+ BoxedValTSOIndex = TSO.GetCount() - 1;
+
+ LOG((LF_REMOTING, LL_INFO1000, "Clone. Added to TSO at index %d.\n", BoxedValTSOIndex));
+ // An IObjRef parent, or a parent itself boxed, would wait till the current object is resolved
+ if (parentInfo && (parentInfo->NeedsUnboxing() || parentInfo->IsIObjRefInstance()))
+ {
+ parentInfo->IncrementSpecialMembers();
+ TMappings.Add(BoxedValTSOIndex);
+ }
+ }
+ }
+
+ if (refParent != NULL)
+ {
+ if (!IsDelayedFixup(newMT, currObjFixupInfo))
+ Fixup(m_newObject, refParent, currObjFixupInfo);
+
+ // If currObj is ISer, then an IObjRef parent would wait till the current object is resolved
+ if (currObjFixupInfo->IsISerializableInstance() &&
+ parentInfo->IsIObjRefInstance())
+ {
+ parentInfo->IncrementSpecialMembers();
+ TMappings.Add(ISerializableTSOIndex);
+ }
+ }
+
+ // If we are done with this parent, remove it from QOF
+ if (parentInfo && parentInfo->DecrementFixupCount() == 0)
+ {
+ LOG((LF_REMOTING, LL_INFO1000, "Clone. All children fixed up. Removing parent from QOF.\n", BoxedValTSOIndex));
+ LOG((LF_REMOTING, LL_INFO1000, "Clone. Parent has %d special member objects.\n", parentInfo->GetNumSpecialMembers()));
+ OBJECTREF refTemp;
+ ParentInfo *pFITemp;
+ refTemp = QOF.Dequeue(&dummy1, &dummy2, (QueuedObjectInfo **)&pFITemp);
+ _ASSERTE(refTemp == refParent);
+ _ASSERTE(pFITemp == parentInfo);
+
+ // If parent is a special object, then we need to know how many special members it has
+ if ((parentInfo->IsIObjRefInstance() ||
+ parentInfo->IsISerializableInstance() ||
+ parentInfo->NeedsUnboxing())
+ && parentInfo->GetNumSpecialMembers() > 0)
+ {
+ // Make a note in TSO that this parent has non-zero special members
+ DWORD index[3];
+ index[0] = parentInfo->GetIObjRefIndexIntoTSO();
+ index[1] = parentInfo->GetISerIndexIntoTSO();
+ index[2] = parentInfo->GetBoxedValIndexIntoTSO();
+
+ for (DWORD count = 0; count < 3; count++)
+ {
+ OBJECTREF refIser, refNames, refValuesTemp;
+ SpecialObjectInfo *pISerInfo;
+
+ if (index[count] == (DWORD) -1)
+ continue;
+
+ refIser = TSO.GetAt(index[count], &refNames, &refValuesTemp, (QueuedObjectInfo **)&pISerInfo);
+ _ASSERTE(refIser == refParent);
+
+ DWORD numSpecialObjects = parentInfo->GetNumSpecialMembers();
+ pISerInfo->SetNumSpecialMembers(numSpecialObjects);
+
+ _ASSERTE(TMappings.GetCount() >= numSpecialObjects);
+ pISerInfo->SetMappingTableIndex(TMappings.GetCount() - numSpecialObjects);
+ }
+ }
+ }
+
+ // FIELD SCAN PHASE
+ if (!m_skipFieldScan)
+ {
+ if (m_currObject->GetMethodTable()->IsArray())
+ ScanArrayMembers();
+ else if (isISerializable)
+ ScanISerializableMembers(IObjRefTSOIndex, ISerializableTSOIndex, BoxedValTSOIndex, refValues);
+ else
+ ScanMemberFields(IObjRefTSOIndex, BoxedValTSOIndex);
+ }
+
+ } // While there are objects in QOM
+
+ // OBJECT COMPLETION PHASE
+ CompleteSpecialObjects();
+
+ // Deliver VTS OnDeserialized callbacks.
+ CompleteVtsOnDeserializedCallbacks();
+
+ CompleteIDeserializationCallbacks();
+
+ _ASSERTE(m_topObject != NULL);
+ // If a type check was requested, see if the returned object is of the expected type
+ if (!expectedType.IsNull()
+ && !ObjIsInstanceOf(OBJECTREFToObject(m_topObject), expectedType))
+ COMPlusThrow(kArgumentException,W("Arg_ObjObj"));
+
+ GCPROTECT_END(); // refParent
+ GCPROTECT_END(); // refValues
+
+ GCPROTECT_END(); // m_newObject
+
+ END_DOMAIN_TRANSITION;
+
+ // Deliver VTS OnSerialized callbacks.
+ CompleteVtsOnSerializedCallbacks();
+
+ GCPROTECT_END(); // m_fromExecutionContext
+ GCPROTECT_END(); // m_topObject
+ GCPROTECT_END(); // m_currObject
+
+ return m_topObject;
+}
+
+// IObjRef and value types boxed by us, need to be fixed up towards the end
+BOOL ObjectClone::IsDelayedFixup(MethodTable *newMT, QueuedObjectInfo *pCurrInfo)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ }
+ CONTRACTL_END
+ if (m_cbInterface->IsIObjectReferenceType(newMT) ||
+ pCurrInfo->NeedsUnboxing())
+ return TRUE;
+ else
+ return FALSE;
+}
+
+void ObjectClone::Fixup(OBJECTREF newObj, OBJECTREF refParent, QueuedObjectInfo *pFixupInfo)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ }
+ CONTRACTL_END
+ MethodTable *pParentMT = refParent->GetMethodTable();
+
+ if (pFixupInfo->IsISerializableMember())
+ {
+ HandleISerializableFixup(refParent, pFixupInfo);
+ }
+ else if (pParentMT->IsArray())
+ {
+ HandleArrayFixup(refParent, pFixupInfo);
+ }
+ else
+ {
+ HandleObjectFixup(refParent, pFixupInfo);
+ }
+}
+
+PTRARRAYREF ObjectClone::MakeObjectLookLikeISerializable(int objectId)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END
+
+ _ASSERTE(m_context != ObjectFreezer);
+
+ LOG((LF_REMOTING, LL_INFO1000, "MakeObjectLookLikeISerializable. Target object is ISerializable, so making from object look ISerializable\n"));
+ MethodTable *pCurrMT = m_currObject->GetMethodTable();
+ DWORD numFields = pCurrMT->GetNumInstanceFields();
+
+ PTRARRAYREF fieldNames = NULL;
+ PTRARRAYREF fieldValues = NULL;
+
+ GCPROTECT_BEGIN(fieldNames);
+ GCPROTECT_BEGIN(fieldValues);
+
+ // Go back to from domain
+ ENTER_DOMAIN_PTR(m_fromDomain,ADV_RUNNINGIN);
+
+ // Reset the execution context to the original state it was in when we first
+ // left the from domain (this will automatically be popped once we return
+ // from this domain again).
+ Thread *pThread = GetThread();
+ if (pThread->IsExposedObjectSet())
+ {
+ THREADBASEREF refThread = (THREADBASEREF)pThread->GetExposedObjectRaw();
+ refThread->SetExecutionContext(m_fromExecutionContext);
+ }
+
+ fieldNames = (PTRARRAYREF)AllocateObjectArray(numFields, g_pStringClass, FALSE);
+ fieldValues = (PTRARRAYREF)AllocateObjectArray(numFields, g_pObjectClass, FALSE);
+
+ DWORD fieldIndex = 0;
+ while (pCurrMT)
+ {
+
+ DWORD numInstanceFields = pCurrMT->GetNumIntroducedInstanceFields();
+
+ FieldDesc *pFields = pCurrMT->GetApproxFieldDescListRaw();
+
+ for (DWORD i = 0; i < numInstanceFields; i++)
+ {
+ if (pFields[i].IsNotSerialized())
+ {
+ LOG((LF_REMOTING, LL_INFO1000, "MakeObjectLookLikeISerializable. Field %s is marked NonSerialized. Skipping.\n", pFields[i].GetName()));
+ continue;
+ }
+
+ CorElementType typ = pFields[i].GetFieldType();
+ DWORD offset = pFields[i].GetOffset();
+
+ LPCUTF8 szFieldName = pFields[i].GetName();
+ STRINGREF refName = StringObject::NewString(szFieldName);
+ _ASSERTE(refName != NULL);
+
+ fieldNames->SetAt(fieldIndex, refName);
+
+ switch (typ)
+ {
+ case ELEMENT_TYPE_BOOLEAN:
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_CHAR:
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_U:
+ case ELEMENT_TYPE_R4:
+ case ELEMENT_TYPE_R8:
+ {
+ MethodTable *pFldMT = MscorlibBinder::GetElementType(typ);
+ void *pData = m_currObject->GetData() + offset;
+ OBJECTREF refBoxed = pFldMT->Box(pData);
+
+ fieldValues->SetAt(fieldIndex, refBoxed);
+ break;
+ }
+ case ELEMENT_TYPE_VALUETYPE:
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_FNPTR:
+ {
+ TypeHandle th = LoadExactFieldType(&pFields[i], m_currObject, m_fromDomain);
+ _ASSERTE(!th.AsMethodTable()->ContainsStackPtr() && "Field types cannot contain stack pointers.");
+
+ OBJECTREF refBoxed = BoxValueTypeInWrongDomain(m_currObject, offset, th.AsMethodTable());
+
+ fieldValues->SetAt(fieldIndex, refBoxed);
+ break;
+ }
+ case ELEMENT_TYPE_SZARRAY: // Single Dim
+ case ELEMENT_TYPE_ARRAY: // General Array
+ case ELEMENT_TYPE_CLASS: // Class
+ case ELEMENT_TYPE_OBJECT:
+ case ELEMENT_TYPE_STRING: // System.String
+ case ELEMENT_TYPE_VAR:
+ {
+ OBJECTREF refField = *((OBJECTREF *) m_currObject->GetData() + offset);
+ fieldValues->SetAt(fieldIndex, refField);
+ break;
+ }
+ default:
+ _ASSERTE(!"Unknown element type in MakeObjectLookLikeISerializalbe");
+ }
+
+ fieldIndex++;
+ }
+
+ pCurrMT = pCurrMT->GetParentMethodTable();
+ }
+
+ // Back to original domain
+ END_DOMAIN_TRANSITION;
+
+ // Add object to TSO
+ ISerializableInstanceInfo iserInfo(objectId, 0);
+ TSO.Push(m_newObject, fieldNames, NULL, (QueuedObjectInfo *)&iserInfo);
+
+ LOG((LF_REMOTING, LL_INFO1000, "MakeObjectLookLikeISerializable. Added to TSO at index %d.\n", TSO.GetCount() - 1));
+ GCPROTECT_END();
+ GCPROTECT_END();
+
+ return fieldValues;
+}
+
+PTRARRAYREF ObjectClone::AllocateISerializable(int objectId, BOOL bIsRemotingObject)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END
+
+ _ASSERTE(m_context != ObjectFreezer);
+
+ // Go back to from domain
+ StackSString ssAssemName;
+ StackSString ssTypeName;
+
+ struct _gc {
+ STRINGREF typeName;
+ STRINGREF assemblyName;
+ PTRARRAYREF fieldNames;
+ PTRARRAYREF fieldValues;
+ OBJECTREF refObjRef;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ ENTER_DOMAIN_PTR(m_fromDomain,ADV_RUNNINGIN);
+
+ // Reset the execution context to the original state it was in when we first
+ // left the from domain (this will automatically be popped once we return
+ // from this domain again).
+ Thread *pThread = GetThread();
+ if (pThread->IsExposedObjectSet())
+ {
+ THREADBASEREF refThread = (THREADBASEREF)pThread->GetExposedObjectRaw();
+ refThread->SetExecutionContext(m_fromExecutionContext);
+ }
+
+ // Call GetObjectData on the interface
+
+ LOG((LF_REMOTING, LL_INFO1000, "AllocateISerializable. Instance is ISerializable type. Calling GetObjectData.\n"));
+
+ PREPARE_NONVIRTUAL_CALLSITE(METHOD__OBJECTCLONEHELPER__GET_OBJECT_DATA);
+
+ DECLARE_ARGHOLDER_ARRAY(args, 5);
+
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(m_currObject);
+ args[ARGNUM_1] = PTR_TO_ARGHOLDER(&gc.typeName);
+ args[ARGNUM_2] = PTR_TO_ARGHOLDER(&gc.assemblyName);
+ args[ARGNUM_3] = PTR_TO_ARGHOLDER(&gc.fieldNames);
+ args[ARGNUM_4] = PTR_TO_ARGHOLDER(&gc.fieldValues);
+
+ CATCH_HANDLER_FOUND_NOTIFICATION_CALLSITE;
+ CALL_MANAGED_METHOD_RETREF(gc.refObjRef, OBJECTREF, args);
+
+ if (!bIsRemotingObject || gc.refObjRef == NULL)
+ {
+ ssAssemName.Set(gc.assemblyName->GetBuffer());
+ ssTypeName.Set(gc.typeName->GetBuffer());
+ }
+
+ // Back to original domain
+ END_DOMAIN_TRANSITION;
+
+ // if its a remoting object we are dealing with, we may already have the smugglable objref
+ if (bIsRemotingObject && gc.refObjRef != NULL)
+ {
+ m_newObject = gc.refObjRef;
+ // Add object to TSO. We dont need a ISerializable record, because we are smuggling the ObjRef
+ // and so, technically the ISerializable ctor can be considered already called. But we still make an entry in
+ // TSO and mark it "processed", so repeat references to the same remoting object work correctly
+ ISerializableInstanceInfo iserInfo(objectId, 0);
+ iserInfo.SetHasBeenProcessed();
+ TSO.Push(m_newObject, NULL, NULL, (QueuedObjectInfo *)&iserInfo);
+
+ LOG((LF_REMOTING, LL_INFO1000, "AllocateISerializable. GetObjectData returned smugglable ObjRef. Added dummy record to TSO at index %d.\n", TSO.GetCount() - 1));
+ }
+ else
+ {
+ // Find the type (and choke on any exotics such as arrays, function pointers or generic type definitions).
+ TypeHandle th = GetType(ssTypeName, ssAssemName);
+ if (th.IsTypeDesc() || th.ContainsGenericVariables())
+ {
+ StackSString ssBeforeTypeName, ssAfterTypeName;
+ TypeString::AppendType(ssBeforeTypeName, m_currObject->GetTypeHandle(), TypeString::FormatNamespace | TypeString::FormatFullInst);
+ TypeString::AppendType(ssAfterTypeName, th, TypeString::FormatNamespace | TypeString::FormatFullInst);
+ COMPlusThrow(kSerializationException, IDS_SERIALIZATION_BAD_ISER_TYPE, ssBeforeTypeName.GetUnicode(), ssAfterTypeName.GetUnicode());
+ }
+ MethodTable *pSrvMT = th.AsMethodTable();
+ _ASSERTE(pSrvMT);
+
+#ifdef _DEBUG
+ {
+ DefineFullyQualifiedNameForClass();
+ LPCUTF8 __szTypeName = GetFullyQualifiedNameForClassNestedAware(pSrvMT);
+ LOG((LF_REMOTING, LL_INFO1000, "AllocateISerializable. Allocating instance of type %s.\n", &__szTypeName[0]));
+ }
+#endif
+ // Allocate the object
+ m_newObject = m_cbInterface->AllocateObject(m_currObject, pSrvMT);
+
+ // Add object to TSO
+ ISerializableInstanceInfo iserInfo(objectId, 0);
+
+ // Check if the target object is ISerializable. If not, we need to treat construction of this object differently
+ if (!m_cbInterface->IsISerializableType(pSrvMT))
+ {
+ iserInfo.SetTargetNotISerializable();
+ }
+ TSO.Push(m_newObject, gc.fieldNames, NULL, (QueuedObjectInfo *)&iserInfo);
+
+ LOG((LF_REMOTING, LL_INFO1000, "AllocateISerializable. Added to TSO at index %d.\n", TSO.GetCount() - 1));
+ }
+ GCPROTECT_END();
+
+ return gc.fieldValues;
+}
+
+void ObjectClone::AllocateArray()
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END
+
+ LOG((LF_REMOTING, LL_INFO1000, "AllocateArray. Instance is an array type.\n"));
+ MethodTable *pCurrMT = m_currObject->GetMethodTable();
+ _ASSERTE(pCurrMT->IsArray());
+
+ BASEARRAYREF refArray = (BASEARRAYREF)m_currObject;
+ GCPROTECT_BEGIN(refArray);
+
+ TypeHandle elemTh = refArray->GetArrayElementTypeHandle();
+ CorElementType elemType = refArray->GetArrayElementType();
+ DWORD numComponents = refArray->GetNumComponents();
+
+ TypeHandle __elemTh = GetCorrespondingTypeForTargetDomain(elemTh);
+ _ASSERTE(!__elemTh.IsNull());
+
+ unsigned __rank = pCurrMT->GetRank();
+ TypeHandle __arrayTh = ClassLoader::LoadArrayTypeThrowing(__elemTh, __rank == 1 ? ELEMENT_TYPE_SZARRAY : ELEMENT_TYPE_ARRAY, __rank);
+
+ DWORD __numArgs = __rank*2;
+ INT32* __args = (INT32*) _alloca(sizeof(INT32)*__numArgs);
+
+ if (__arrayTh.AsArray()->GetInternalCorElementType() == ELEMENT_TYPE_ARRAY)
+ {
+ const INT32* bounds = refArray->GetBoundsPtr();
+ const INT32* lowerBounds = refArray->GetLowerBoundsPtr();
+ for(unsigned int i=0; i < __rank; i++)
+ {
+ __args[2*i] = lowerBounds[i];
+ __args[2*i+1] = bounds[i];
+ }
+ }
+ else
+ {
+ __numArgs = 1;
+ __args[0] = numComponents;
+ }
+ m_newObject = m_cbInterface->AllocateArray(m_currObject, __arrayTh, __args, __numArgs, FALSE);
+
+ // Treat pointer as a primitive type (we shallow copy the bits).
+ if (CorTypeInfo::IsPrimitiveType(elemType) || elemType == ELEMENT_TYPE_PTR)
+ {
+ LOG((LF_REMOTING, LL_INFO1000, "AllocateArray. Instance is an array of primitive type. Copying contents.\n"));
+ // Copy contents.
+ SIZE_T numBytesToCopy = refArray->GetComponentSize() * numComponents;
+ I1ARRAYREF refI1Arr = (I1ARRAYREF)m_newObject;
+ BYTE *pDest = (BYTE *)refI1Arr->GetDirectPointerToNonObjectElements();
+ I1ARRAYREF refFromArr = (I1ARRAYREF)refArray;
+ BYTE *pSrc = (BYTE *)refFromArr->GetDirectPointerToNonObjectElements();
+
+ memcpyNoGCRefs(pDest, pSrc, numBytesToCopy);
+ m_skipFieldScan = TRUE;
+ }
+ else if (elemType == ELEMENT_TYPE_VALUETYPE)
+ {
+ if (!__elemTh.GetMethodTable()->HasFieldsWhichMustBeInited() && RemotableMethodInfo::TypeIsConduciveToBlitting(elemTh.AsMethodTable(), __elemTh.GetMethodTable()))
+ {
+ LOG((LF_REMOTING, LL_INFO1000, "AllocateArray. Instance is an array of value type with no embedded GC type. Copying contents.\n"));
+ // Copy contents.
+ SIZE_T numBytesToCopy = refArray->GetComponentSize() * numComponents;
+ I1ARRAYREF refI1Arr = (I1ARRAYREF)m_newObject;
+ BYTE *pDest = (BYTE *)refI1Arr->GetDirectPointerToNonObjectElements();
+ I1ARRAYREF refFromArr = (I1ARRAYREF)refArray;
+ BYTE *pSrc = (BYTE *)refFromArr->GetDirectPointerToNonObjectElements();
+
+ memcpyNoGCRefs(pDest, pSrc, numBytesToCopy);
+ m_skipFieldScan = TRUE;
+ }
+ }
+ GCPROTECT_END();
+}
+
+void ObjectClone::AllocateObject()
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END
+
+ LOG((LF_REMOTING, LL_INFO1000, "AllocateObject. Instance is a regular object.\n"));
+ MethodTable *pCurrMT = m_currObject->GetMethodTable();
+ _ASSERTE(!pCurrMT->IsArray());
+ _ASSERTE(!pCurrMT->IsMarshaledByRef() && !pCurrMT->IsTransparentProxy());
+ _ASSERTE(!m_cbInterface->IsISerializableType(pCurrMT));
+
+ MethodTable *pCorrespondingMT = GetCorrespondingTypeForTargetDomain(pCurrMT);
+ _ASSERTE(pCorrespondingMT);
+
+ pCorrespondingMT->EnsureInstanceActive();
+
+ m_newObject = m_cbInterface->AllocateObject(m_currObject, pCorrespondingMT);
+
+ InvokeVtsCallbacks(m_newObject, RemotingVtsInfo::VTS_CALLBACK_ON_DESERIALIZING, m_toDomain);
+}
+
+// Use this wrapper when the type handle can't be represented as a raw MethodTable (i.e. it's a pointer or array type).
+TypeHandle ObjectClone::GetCorrespondingTypeForTargetDomain(TypeHandle thCli)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END
+
+ TypeHandle thBaseType = thCli;
+ TypeHandle thSrvType;
+
+ // Strip off any pointer information (and record the depth). We'll put this back later (when we've translated the base type).
+ DWORD dwPointerDepth = 0;
+ while (thBaseType.IsPointer())
+ {
+ dwPointerDepth++;
+ thBaseType = thBaseType.AsTypeDesc()->GetTypeParam();
+ }
+
+ // If we hit an array then we'll recursively translate the element type then build an array type out of it.
+ if (thBaseType.IsArray())
+ {
+ ArrayTypeDesc *atd = (ArrayTypeDesc *)thBaseType.AsTypeDesc();
+ thSrvType = GetCorrespondingTypeForTargetDomain(atd->GetArrayElementTypeHandle());
+
+ thSrvType = ClassLoader::LoadArrayTypeThrowing(thSrvType, atd->GetInternalCorElementType(), atd->GetRank());
+ }
+ else
+ {
+ // We should have only unshared types if we get here.
+ _ASSERTE(!thBaseType.IsTypeDesc());
+ thSrvType = GetCorrespondingTypeForTargetDomain(thBaseType.AsMethodTable());
+ }
+
+ // Match the level of pointer indirection from the original client type.
+ while (dwPointerDepth--)
+ {
+ thSrvType = thSrvType.MakePointer();
+ }
+
+ return thSrvType;
+}
+
+MethodTable * ObjectClone::GetCorrespondingTypeForTargetDomain(MethodTable *pCliMT)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END
+
+ MethodTable *pSrvMT = NULL;
+ if (m_fromDomain == m_toDomain)
+ return pCliMT;
+
+ _ASSERTE(m_context != ObjectFreezer);
+#ifdef _DEBUG
+ SString __ssTypeName;
+ StackScratchBuffer __scratchBuf;
+ if (pCliMT->IsArray())
+ pCliMT->_GetFullyQualifiedNameForClass(__ssTypeName);
+ else
+ pCliMT->_GetFullyQualifiedNameForClassNestedAware(__ssTypeName);
+#endif
+
+ // Take benefit of shared types. If a type is shared, and its assembly has been loaded
+ // in the target domain, go ahead and use the same MT ptr.
+ // The logic is trickier (and more expensive to calculate) for generic types, so skip the optimization there.
+ if (pCliMT->IsDomainNeutral() && !pCliMT->HasInstantiation())
+ {
+ if (pCliMT->GetAssembly()->FindDomainAssembly(m_toDomain))
+ {
+ LOG((LF_REMOTING, LL_INFO1000,
+ "GetCorrespondingTypeForTargetDomain. Type %s is shared. Using same MethodTable.\n", __ssTypeName.GetUTF8(__scratchBuf)));
+ return pCliMT;
+ }
+ }
+
+ pSrvMT = CrossDomainTypeMap::GetMethodTableForDomain(pCliMT, m_fromDomain, m_toDomain);
+ if (pSrvMT)
+ {
+ LOG((LF_REMOTING, LL_INFO1000,
+ "GetCorrespondingTypeForTargetDomain. Found matching type for %s in domain %d from cache.\n", __ssTypeName.GetUTF8(__scratchBuf), m_toDomain));
+ return pSrvMT;
+ }
+
+ // Need to find the name and lookup in target domain
+ SString ssCliTypeName;
+ if (pCliMT->IsArray())
+ {
+ pCliMT->_GetFullyQualifiedNameForClass(ssCliTypeName);
+ }
+ else if (pCliMT->HasInstantiation())
+ {
+ TypeString::AppendType(ssCliTypeName, TypeHandle(pCliMT), TypeString::FormatNamespace | TypeString::FormatFullInst);
+ }
+ else
+ {
+ pCliMT->_GetFullyQualifiedNameForClassNestedAware(ssCliTypeName);
+ }
+
+
+ SString ssAssemblyName;
+ pCliMT->GetAssembly()->GetDisplayName(ssAssemblyName);
+
+ // Get the assembly
+ TypeHandle th = GetType(ssCliTypeName, ssAssemblyName);
+
+ if (!pCliMT->IsArray())
+ {
+ pSrvMT = th.AsMethodTable();
+ }
+ else
+ {
+ _ASSERTE(th.IsArray());
+ TypeDesc *td = th.AsTypeDesc();
+ pSrvMT = td->GetMethodTable();
+ }
+ CrossDomainTypeMap::SetMethodTableForDomain(pCliMT, m_fromDomain, pSrvMT, m_toDomain);
+ LOG((LF_REMOTING, LL_INFO1000,
+ "GetCorrespondingTypeForTargetDomain. Loaded matching type for %s in domain %d. Added to cache.\n", __ssTypeName.GetUTF8(__scratchBuf), m_toDomain));
+ return pSrvMT;
+}
+
+TypeHandle ObjectClone::GetType(const SString &ssTypeName, const SString &ssAssemName)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ Assembly *pAssembly = NULL;
+
+#ifndef OBJECT_CLONER_STRICT_MODE
+ EX_TRY
+#endif
+ {
+ AssemblySpec spec;
+ StackScratchBuffer scratchBuf;
+ HRESULT hr = spec.Init(ssAssemName.GetUTF8(scratchBuf));
+ if (SUCCEEDED(hr))
+ {
+ pAssembly = spec.LoadAssembly(FILE_ACTIVE);
+ }
+ else
+ {
+ COMPlusThrowHR(hr);
+ }
+ }
+#ifndef OBJECT_CLONER_STRICT_MODE
+ EX_CATCH
+ {
+ if (GET_EXCEPTION()->IsTransient())
+ {
+ EX_RETHROW;
+ }
+
+ DomainAssembly *pDomainAssembly = NULL;
+#ifdef FEATURE_FUSION
+ // If the normal load fails then try loading from a partial assembly name (relaxed serializer rules).
+ pDomainAssembly = LoadAssemblyFromPartialNameHack((SString*)&ssAssemName, TRUE);
+#endif // FEATURE_FUSION
+ if (pDomainAssembly == NULL)
+ COMPlusThrow(kSerializationException, IDS_SERIALIZATION_UNRESOLVED_TYPE,
+ ssTypeName.GetUnicode(), ssAssemName.GetUnicode());
+ else
+ pAssembly = pDomainAssembly->GetAssembly();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+#endif
+
+ _ASSERTE(pAssembly);
+
+ TypeHandle th = TypeName::GetTypeFromAssembly(ssTypeName.GetUnicode(), pAssembly);
+
+ if (th.IsNull())
+ {
+ COMPlusThrow(kSerializationException, IDS_SERIALIZATION_UNRESOLVED_TYPE,
+ ssTypeName.GetUnicode(), ssAssemName.GetUnicode());
+ }
+
+ LOG((LF_REMOTING, LL_INFO1000, "GetType. Loaded type %S from assembly %S in domain %d. \n",
+ ssTypeName.GetUnicode(), ssAssemName.GetUnicode(), m_toDomain->GetId().m_dwId));
+
+ return th;
+}
+
+void ObjectClone::HandleISerializableFixup(OBJECTREF refParent, QueuedObjectInfo *currObjFixupInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_context != ObjectFreezer);
+
+ ISerializableMemberInfo *pIsInfo = (ISerializableMemberInfo *)currObjFixupInfo;
+ OBJECTREF refNames, refValues;
+ ISerializableInstanceInfo *dummy;
+ OBJECTREF parent;
+ parent = TSO.GetAt(pIsInfo->GetTableIndex(), &refNames, &refValues, (QueuedObjectInfo **)&dummy);
+ _ASSERTE(parent == refParent);
+ _ASSERTE(dummy->IsISerializableInstance());
+
+ PTRARRAYREF refFields = (PTRARRAYREF)refValues;
+ _ASSERTE(pIsInfo->GetFieldIndex() < refFields->GetNumComponents());
+ refFields->SetAt(pIsInfo->GetFieldIndex(), m_newObject);
+
+ LOG((LF_REMOTING, LL_INFO1000, "HandleISerializableFixup. Parent is ISerializable. Added field #%d to TSO record at index %d\n", pIsInfo->GetFieldIndex(), pIsInfo->GetTableIndex()));
+}
+
+void ObjectClone::HandleArrayFixup(OBJECTREF refParent, QueuedObjectInfo *currObjFixupInfo)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END
+
+ _ASSERTE(refParent->GetMethodTable()->IsArray());
+ BASEARRAYREF refParentArray = (BASEARRAYREF) refParent;
+ GCPROTECT_BEGIN(refParentArray);
+
+ NDimArrayMemberInfo *pArrInfo = (NDimArrayMemberInfo *)currObjFixupInfo;
+ DWORD *pIndices = pArrInfo->GetIndices();
+
+ TypeHandle arrayElementType = refParentArray->GetArrayElementTypeHandle();
+ MethodTable *pArrayMT = refParentArray->GetMethodTable();
+
+ DWORD Rank = pArrayMT->GetRank();
+ SIZE_T Offset = 0;
+ SIZE_T Multiplier = 1;
+
+ _ASSERTE(Rank == pArrInfo->GetNumDimensions());
+
+ for (int i = Rank-1; i >= 0; i--) {
+ INT32 curIndex = pIndices[i];
+ const INT32 *pBoundsPtr = refParentArray->GetBoundsPtr();
+
+ // Bounds check each index
+ // Casting to unsigned allows us to use one compare for [0..limit-1]
+ _ASSERTE((UINT32) curIndex < (UINT32) pBoundsPtr[i]);
+
+ Offset += curIndex * Multiplier;
+ Multiplier *= pBoundsPtr[i];
+ }
+
+ // The follwing code is loosely based on COMArrayInfo::SetValue
+
+ if (!arrayElementType.IsValueType())
+ {
+ if (!ObjIsInstanceOf(OBJECTREFToObject(m_newObject), arrayElementType))
+ COMPlusThrow(kInvalidCastException,W("InvalidCast_StoreArrayElement"));
+
+ OBJECTREF* pElem = (OBJECTREF*)(refParentArray->GetDataPtr() + (Offset * pArrayMT->GetComponentSize()));
+ SetObjectReference(pElem,m_newObject,GetAppDomain());
+ }
+ else
+ {
+ // value class or primitive type
+ OBJECTREF* pElem = (OBJECTREF*)(refParentArray->GetDataPtr() + (Offset * pArrayMT->GetComponentSize()));
+ if (!arrayElementType.GetMethodTable()->UnBoxInto(pElem, m_newObject))
+ COMPlusThrow(kInvalidCastException, W("InvalidCast_StoreArrayElement"));
+ }
+
+ LOG((LF_REMOTING, LL_INFO1000, "HandleArrayFixup. Parent is an array. Added element at offset %d\n", Offset));
+ GCPROTECT_END();
+}
+
+void ObjectClone::HandleObjectFixup(OBJECTREF refParent, QueuedObjectInfo *currObjFixupInfo)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END
+ ObjectMemberInfo *pObjInfo = (ObjectMemberInfo *)currObjFixupInfo;
+ FieldDesc *pTargetField = pObjInfo->GetFieldDesc();
+ DWORD offset = pTargetField->GetOffset();
+
+#ifdef _DEBUG
+ MethodTable *pTemp = refParent->GetMethodTable();
+ _ASSERTE(offset < pTemp->GetBaseSize());
+#endif
+
+ GCPROTECT_BEGIN(refParent);
+
+ TypeHandle fldType = LoadExactFieldType(pTargetField, refParent, m_toDomain);
+
+ if (!ObjIsInstanceOf(OBJECTREFToObject(m_newObject), fldType))
+ COMPlusThrow(kArgumentException,W("Arg_ObjObj"));
+
+ OBJECTREF *pDest = (OBJECTREF *) (refParent->GetData() + offset);
+ _ASSERTE(GetAppDomain()==m_toDomain);
+ SetObjectReference(pDest, m_newObject, GetAppDomain());
+
+ GCPROTECT_END();
+
+ LOG((LF_REMOTING, LL_INFO1000, "HandleObjectFixup. Parent is a regular object. Added field at offset %d\n", offset));
+}
+
+#ifdef OBJECT_CLONER_STRICT_MODE
+static void DECLSPEC_NORETURN ThrowMissingFieldException(FieldDesc *pFD)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ StackSString szField(SString::Utf8, pFD->GetName());
+
+ StackSString szType;
+ TypeString::AppendType(szType, TypeHandle(pFD->GetApproxEnclosingMethodTable()));
+
+ COMPlusThrow(kSerializationException,
+ IDS_SERIALIZATION_MISSING_FIELD,
+ szField.GetUnicode(),
+ szType.GetUnicode());
+}
+#endif
+
+void ObjectClone::ScanMemberFields(DWORD IObjRefTSOIndex, DWORD BoxedValTSOIndex)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END
+ _ASSERTE(m_currObject != NULL);
+ _ASSERTE(m_newObject != NULL);
+
+ MethodTable *pMT = m_currObject->GetMethodTable();
+ _ASSERTE(!pMT->IsMarshaledByRef() && !pMT->IsTransparentProxy());
+ _ASSERTE(!pMT->IsArray());
+ MethodTable *pTargetMT = m_newObject->GetMethodTable();
+
+ DWORD numFixupsNeeded = 0;
+
+ if (RemotableMethodInfo::TypeIsConduciveToBlitting(pMT, pTargetMT))
+ {
+ _ASSERTE(pMT->GetAlignedNumInstanceFieldBytes() == pTargetMT->GetAlignedNumInstanceFieldBytes());
+ DWORD numBytes = pMT->GetNumInstanceFieldBytes();
+ BYTE *pFrom = m_currObject->GetData();
+ BYTE *pTo = m_newObject->GetData();
+ memcpyNoGCRefs(pTo, pFrom, numBytes);
+ LOG((LF_REMOTING, LL_INFO1000, "ScanMemberFields. Object has no reference type fields. Blitting contents.\n"));
+ }
+ else if (AreTypesEmittedIdentically(pMT, pTargetMT))
+ {
+ LOG((LF_REMOTING, LL_INFO1000, "ScanMemberFields. Object not blittable but types are layed out for easy cloning .\n"));
+ MethodTable *pCurrMT = pMT;
+ MethodTable *pCurrTargetMT = pTargetMT;
+ while (pCurrMT)
+ {
+ DWORD numInstanceFields = pCurrMT->GetNumIntroducedInstanceFields();
+ _ASSERTE(pCurrTargetMT->GetNumIntroducedInstanceFields() == numInstanceFields);
+
+ FieldDesc *pFields = pCurrMT->GetApproxFieldDescListRaw();
+ FieldDesc *pTargetFields = pCurrTargetMT->GetApproxFieldDescListRaw();
+
+ for (DWORD i = 0; i < numInstanceFields; i++)
+ {
+ if (pFields[i].IsNotSerialized())
+ {
+ LOG((LF_REMOTING, LL_INFO1000, "ScanMemberFields. Field %s is marked NonSerialized. Skipping.\n", pFields[i].GetName()));
+ continue;
+ }
+
+ numFixupsNeeded += CloneField(&pFields[i], &pTargetFields[i]);
+ }
+
+ pCurrMT = pCurrMT->GetParentMethodTable();
+ pCurrTargetMT = pCurrTargetMT->GetParentMethodTable();
+ }
+ }
+ else
+ {
+ LOG((LF_REMOTING, LL_INFO1000, "ScanMemberFields. Object type layout is different.\n"));
+
+ // The object types between source and destination have significant differences (some fields may be added, removed or
+ // re-ordered, the type hierarchy may have had layers added or removed). We can still clone the object if every non-optional
+ // field in the destination object can be found and serialized in a type with the same name in the source object. We ignore
+ // fields and entire type layers that have been added in the source object and also any fields or layers that have been
+ // removed as long as they don't include any fields that are mandatory in the destination object. We allow the fields within
+ // a type layer to move around (we key the field by name only, the latter stage of cloning will check type equivalency and
+ // as above we will widen primitive types if necessary). Since it requires significant effort to calculate whether the
+ // objects can be cloned (and then locate corresponding fields in order to do so) we cache a mapping of source object fields
+ // to destination object fields.
+
+ // The following call will return such a mapping (it's an array where each entry is a pointer to a source object field desc
+ // and the entries are in destination field index order, most derived type first, followed by second most derived type
+ // etc.). If a mapping is impossible the method will throw.
+ FieldDesc **pFieldMap = CrossDomainFieldMap::LookupOrCreateFieldMapping(pTargetMT, pMT);
+ DWORD dwMapIndex = 0;
+
+ MethodTable *pDstMT = pTargetMT;
+ while (pDstMT)
+ {
+ FieldDesc *pDstFields = pDstMT->GetApproxFieldDescListRaw();
+ DWORD numInstanceFields = pDstMT->GetNumIntroducedInstanceFields();
+
+ for (DWORD i = 0; i < numInstanceFields; i++)
+ {
+ FieldDesc *pSrcField = pFieldMap[dwMapIndex++];
+
+ // Non-serialized fields in the destination type (or optional fields where the source type doesn't have an
+ // equivalent) don't have a source field desc.
+ if (pSrcField == NULL)
+ continue;
+
+ numFixupsNeeded += CloneField(pSrcField, &pDstFields[i]);
+ }
+
+ pDstMT = pDstMT->GetParentMethodTable();
+ }
+
+ _ASSERTE(dwMapIndex == pTargetMT->GetNumInstanceFields());
+ }
+
+ if (numFixupsNeeded > 0)
+ {
+ ParentInfo fxInfo(numFixupsNeeded);
+ if (IObjRefTSOIndex != (DWORD) -1)
+ {
+ _ASSERTE(m_cbInterface->IsIObjectReferenceType(pMT));
+ fxInfo.SetIsIObjRefInstance();
+ fxInfo.SetIObjRefIndexIntoTSO(IObjRefTSOIndex);
+ }
+ if (BoxedValTSOIndex != (DWORD) -1)
+ {
+ _ASSERTE(pMT->IsValueType());
+ fxInfo.SetNeedsUnboxing();
+ fxInfo.SetBoxedValIndexIntoTSO(BoxedValTSOIndex);
+ }
+ QOF.Enqueue(m_newObject, NULL, NULL, (QueuedObjectInfo *)&fxInfo);
+ LOG((LF_REMOTING, LL_INFO1000, "ScanMemberFields. Current object had total of %d reference type fields. Adding to QOF.\n", numFixupsNeeded));
+ // Delay calling any OnDeserialized callbacks until the end of the cloning operation (it's difficult to tell when all the
+ // children have been deserialized).
+ if (HasVtsCallbacks(m_newObject->GetMethodTable(), RemotingVtsInfo::VTS_CALLBACK_ON_DESERIALIZED))
+ VDC.Enqueue(m_newObject, NULL, NULL, NULL);
+ if (m_cbInterface->RequiresDeserializationCallback(m_newObject->GetMethodTable()))
+ {
+ LOG((LF_REMOTING, LL_INFO1000, "ScanMemberFields. Adding object to Table of IDeserialization Callbacks\n"));
+ QueuedObjectInfo noInfo;
+ TDC.Enqueue(m_newObject, NULL, NULL, &noInfo);
+ }
+ }
+ else
+ {
+ // This is effectively a leaf node (no complex children) so if the type has a callback for OnDeserialized we'll deliver it
+ // now. This fixes callback ordering for a few more edge cases (e.g. VSW 415611) and is reasonably cheap. We can never do a
+ // perfect job (in the presence of object graph cycles) and a near perfect job (intuitively ordered callbacks for acyclic
+ // object graphs) is prohibitively expensive; so we're stuck with workarounds like this.
+ InvokeVtsCallbacks(m_newObject, RemotingVtsInfo::VTS_CALLBACK_ON_DESERIALIZED, m_toDomain);
+ if (m_cbInterface->RequiresDeserializationCallback(m_newObject->GetMethodTable()))
+ MakeIDeserializationCallback(m_newObject);
+ }
+}
+
+DWORD ObjectClone::CloneField(FieldDesc *pSrcField, FieldDesc *pDstField)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ BOOL bFixupNeeded = FALSE;
+
+ CorElementType srcType = pSrcField->GetFieldType();
+ CorElementType dstType = pDstField->GetFieldType();
+ DWORD srcOffset = pSrcField->GetOffset();
+ DWORD dstOffset = pDstField->GetOffset();
+
+ BOOL bUseWidenedValue = FALSE;
+ ARG_SLOT fieldData = 0;
+ if (srcType != dstType)
+ {
+ void *pData = m_currObject->GetData() + srcOffset;
+
+ MethodTable *pSrcFieldMT = NULL;
+ if (CorTypeInfo::IsPrimitiveType(srcType))
+ pSrcFieldMT = MscorlibBinder::GetElementType(srcType);
+ else
+ pSrcFieldMT = LoadExactFieldType(pSrcField, m_currObject, m_fromDomain).AsMethodTable();
+
+ LOG((LF_REMOTING, LL_INFO1000, "ScanMemberFields. Field %s has differing types at source and destination. Will try to convert.\n", pSrcField->GetName()));
+ fieldData = HandleFieldTypeMismatch(dstType, srcType, pData, pSrcFieldMT);
+ bUseWidenedValue = TRUE;
+ }
+
+ switch (dstType)
+ {
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_BOOLEAN:
+ {
+ BYTE *pDest = m_newObject->GetData() + dstOffset;
+ if (bUseWidenedValue)
+ *pDest = (unsigned char) fieldData;
+ else
+ {
+ BYTE *pByte = m_currObject->GetData() + srcOffset;
+ *pDest = *pByte;
+ }
+ }
+ break;
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_CHAR:
+ {
+ WORD *pDest = (WORD*)(m_newObject->GetData() + dstOffset);
+ if (bUseWidenedValue)
+ *pDest = (short) fieldData;
+ else
+ {
+ WORD *pWord = (WORD*)(m_currObject->GetData() + srcOffset);
+ *(pDest) = *pWord;
+ }
+ }
+ break;
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_R4:
+ IN_WIN32(case ELEMENT_TYPE_FNPTR:)
+ IN_WIN32(case ELEMENT_TYPE_I:)
+ IN_WIN32(case ELEMENT_TYPE_U:)
+ {
+ DWORD *pDest = (DWORD*)(m_newObject->GetData() + dstOffset);
+ if (bUseWidenedValue)
+ *pDest = (int) fieldData;
+ else
+ {
+ DWORD *pDword = (DWORD*)(m_currObject->GetData() + srcOffset);
+ *(pDest) = *pDword;
+ }
+ }
+ break;
+ case ELEMENT_TYPE_R8:
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ IN_WIN64(case ELEMENT_TYPE_FNPTR:)
+ IN_WIN64(case ELEMENT_TYPE_I:)
+ IN_WIN64(case ELEMENT_TYPE_U:)
+ {
+ INT64 *pDest = (INT64*)(m_newObject->GetData() + dstOffset);
+ if (bUseWidenedValue)
+ *pDest = fieldData;
+ else
+ {
+ INT64 *pLong = (INT64*)(m_currObject->GetData() + srcOffset);
+ *(pDest) = *pLong;
+ }
+ }
+ break;
+ case ELEMENT_TYPE_PTR:
+ {
+ void **pDest = (void**)(m_newObject->GetData() + dstOffset);
+ void **pPtr = (void**)(m_currObject->GetData() + srcOffset);
+ *(pDest) = *pPtr;
+ }
+ break;
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_CLASS: // objectrefs
+ case ELEMENT_TYPE_OBJECT:
+ case ELEMENT_TYPE_SZARRAY: // single dim, zero
+ case ELEMENT_TYPE_ARRAY: // all other arrays
+ {
+ OBJECTREF *pSrc = (OBJECTREF *)(m_currObject->GetData() + srcOffset);
+ OBJECTREF *pDest = (OBJECTREF *)(m_newObject->GetData() + dstOffset);
+
+ if ((*pSrc) == NULL)
+ break;
+
+ // If no deep copy is required, just copy the reference
+ if (!m_cbInterface->RequiresDeepCopy(*pSrc))
+ {
+ _ASSERTE(GetAppDomain()==m_toDomain);
+ SetObjectReference(pDest, *pSrc, GetAppDomain());
+ break;
+ }
+
+ // Special case String
+ if ((*pSrc)->GetMethodTable() == g_pStringClass)
+ {
+ // Better check the destination really expects a string (or maybe an object).
+ TypeHandle thDstField = LoadExactFieldType(pDstField, m_newObject, m_toDomain);
+ if (thDstField != TypeHandle(g_pStringClass) && thDstField != TypeHandle(g_pObjectClass))
+ COMPlusThrow(kArgumentException, W("Arg_ObjObj"));
+
+ STRINGREF refStr = (STRINGREF) *pSrc;
+ refStr = m_cbInterface->AllocateString(refStr);
+ // Get dest addr again, as a GC might have occured
+ pDest = (OBJECTREF *)(m_newObject->GetData() + dstOffset);
+ _ASSERTE(GetAppDomain()==m_toDomain);
+ SetObjectReference(pDest, refStr, GetAppDomain());
+
+ break;
+ }
+
+ // Add the object to QOM
+ LOG((LF_REMOTING, LL_INFO1000, "ScanMemberFields. Adding object in field %s to Queue of Objects to be Marshalled.\n", pSrcField->GetName()));
+ ObjectMemberInfo objInfo(pDstField);
+ bFixupNeeded = TRUE;
+ QOM.Enqueue(*pSrc, NULL, NULL, (QueuedObjectInfo *)&objInfo);
+ }
+ break;
+
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ TypeHandle th = LoadExactFieldType(pSrcField, m_currObject, m_fromDomain);
+ _ASSERTE(!th.AsMethodTable()->ContainsStackPtr() && "Field types cannot contain stack pointers.");
+
+ TypeHandle thTarget = LoadExactFieldType(pDstField, m_newObject, m_toDomain);
+
+ MethodTable *pValueClassMT = th.AsMethodTable();
+ MethodTable *pValueClassTargetMT = thTarget.AsMethodTable();
+ if (!RemotableMethodInfo::TypeIsConduciveToBlitting(pValueClassMT, pValueClassTargetMT))
+ {
+ // Needs marshalling
+ // We're allocating an object in the "to" domain
+ // using a type from the "from" domain.
+ OBJECTREF refTmpBox = BoxValueTypeInWrongDomain(m_currObject, srcOffset, pValueClassMT);
+
+ // Nullable<T> might return null here. In that case we don't need to do anything
+ // and the null value otherwise confuxes the fixup queue.
+ if (refTmpBox != NULL)
+ {
+ // Add the object to QOM
+ ObjectMemberInfo objInfo(pDstField);
+ objInfo.SetNeedsUnboxing();
+ bFixupNeeded = TRUE;
+ QOM.Enqueue(refTmpBox, NULL, NULL, (QueuedObjectInfo *)&objInfo);
+ LOG((LF_REMOTING, LL_INFO1000, "ScanMemberFields. Value type field %s has reference type contents. Boxing and adding to QOM.\n", pSrcField->GetName()));
+ }
+ }
+ else
+ {
+ DWORD numBytesToCopy = th.AsMethodTable()->GetNumInstanceFieldBytes();
+ BYTE *pByte = m_currObject->GetData() + srcOffset;
+ BYTE *pDest = m_newObject->GetData() + dstOffset;
+ memcpyNoGCRefs(pDest, pByte, numBytesToCopy);
+ LOG((LF_REMOTING, LL_INFO1000, "ScanMemberFields. Value type field %s has no reference type contents. Blitting.\n", pSrcField->GetName()));
+ }
+ }
+ break;
+ default:
+ _ASSERTE(!"Unknown element type seen in ObjectClone::ScanMemberFields");
+ break;
+ }
+
+ return bFixupNeeded ? 1 : 0;
+}
+
+BOOL ObjectClone::AreTypesEmittedIdentically(MethodTable *pMT1, MethodTable *pMT2)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Identical here means that both types have the same hierarchy (depth and names match) and that each level of the hierarchy has
+ // the same fields (by name) at the same index.
+ // We're going to be called quite frequently (once per call to ScanMemberFields) so until we're convinced that caching this
+ // information is worth it we'll just compute the fast cases here and let the rest fall through to the slower technique. The
+ // fast check is that the types are shared and identical or that they're loaded from the same file (in which case we have to be
+ // a little more paranoid and check up the hierarchy).
+ if (pMT1 == pMT2)
+ return TRUE;
+
+ // While the current level of the type is loaded from the same file...
+ // Note that we used to check that the assemblies were the same; now we're more paranoid and check the actual modules scoping
+ // the type are identical. This closes a security hole where identically named types in different modules of the same assembly
+ // could cause the wrong type to be loaded in the server context allowing violation of the type system.
+ while (pMT1->GetModule()->GetFile()->Equals(pMT2->GetModule()->GetFile()))
+ {
+ // Inspect the parents.
+ pMT1 = pMT1->GetParentMethodTable();
+ pMT2 = pMT2->GetParentMethodTable();
+
+ // If the parents are the same shared type (e.g. Object), then we've found a match.
+ if (pMT1 == pMT2)
+ return TRUE;
+
+ // Else check if one of the hierarchies has run out before the other (and therefore can't be equivalent).
+ if (pMT1 == NULL || pMT2 == NULL)
+ return FALSE;
+ }
+
+ return FALSE;
+}
+
+BOOL AreTypesEquivalent(MethodTable *pMT1, MethodTable *pMT2)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ // Equivalent here is quite a weak predicate. All it means is that the types have the same (fully assembly qualified) name. The
+ // derivation hierarchy is not inspected at all.
+ StackSString szType1;
+ StackSString szType2;
+
+ TypeString::AppendType(szType1, TypeHandle(pMT1), TypeString::FormatNamespace |
+ TypeString::FormatFullInst |
+ TypeString::FormatAssembly |
+ TypeString::FormatNoVersion);
+ TypeString::AppendType(szType2, TypeHandle(pMT2), TypeString::FormatNamespace |
+ TypeString::FormatFullInst |
+ TypeString::FormatAssembly |
+ TypeString::FormatNoVersion);
+
+ return szType1.Equals(szType2);
+}
+
+PtrHashMap *CrossDomainFieldMap::s_pFieldMap = NULL;
+SimpleRWLock *CrossDomainFieldMap::s_pFieldMapLock = NULL;
+
+BOOL CrossDomainFieldMap::CompareFieldMapEntry(UPTR val1, UPTR val2)
+{
+ CONTRACTL {
+ MODE_ANY;
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ CrossDomainFieldMap::FieldMapEntry *pEntry1 = (CrossDomainFieldMap::FieldMapEntry *)(val1 << 1);
+ CrossDomainFieldMap::FieldMapEntry *pEntry2 = (CrossDomainFieldMap::FieldMapEntry *)val2;
+
+ if (pEntry1->m_pSrcMT == pEntry2->m_pSrcMT &&
+ pEntry1->m_pDstMT == pEntry2->m_pDstMT)
+ return TRUE;
+
+ return FALSE;
+}
+
+CrossDomainFieldMap::FieldMapEntry::FieldMapEntry(MethodTable *pSrcMT, MethodTable *pDstMT, FieldDesc **pFieldMap)
+{
+ WRAPPER_NO_CONTRACT;
+
+ m_pSrcMT = pSrcMT;
+ m_pDstMT = pDstMT;
+ m_pFieldMap = pFieldMap;
+ BaseDomain *pSrcDomain = pSrcMT->GetDomain();
+ m_dwSrcDomain = pSrcDomain->IsAppDomain() ? ((AppDomain*)pSrcDomain)->GetId() : ADID(0);
+ BaseDomain *pDstDomain = pDstMT->GetDomain();
+ m_dwDstDomain = pDstDomain->IsAppDomain() ? ((AppDomain*)pDstDomain)->GetId() : ADID(0);
+}
+
+static BOOL IsOwnerOfRWLock(LPVOID lock)
+{
+ // @TODO - SimpleRWLock does not have knowledge of which thread gets the writer
+ // lock, so no way to verify
+ return TRUE;
+}
+
+// Remove any entries in the table that refer to an appdomain that is no longer live.
+void CrossDomainFieldMap::FlushStaleEntries()
+{
+ if (s_pFieldMapLock == NULL || s_pFieldMap == NULL)
+ return;
+
+ SimpleWriteLockHolder swlh(s_pFieldMapLock);
+
+ bool fDeletedEntry = false;
+ PtrHashMap::PtrIterator iter = s_pFieldMap->begin();
+ while (!iter.end())
+ {
+ FieldMapEntry *pEntry = (FieldMapEntry *)iter.GetValue();
+ AppDomainFromIDHolder adFrom(pEntry->m_dwSrcDomain, TRUE);
+ AppDomainFromIDHolder adTo(pEntry->m_dwDstDomain, TRUE);
+ if (adFrom.IsUnloaded() ||
+ adTo.IsUnloaded()) //we do not use ptr for anything
+ {
+#ifdef _DEBUG
+ LPVOID pDeletedEntry =
+#endif
+ s_pFieldMap->DeleteValue(pEntry->GetHash(), pEntry);
+ _ASSERTE(pDeletedEntry == pEntry);
+ delete pEntry;
+ fDeletedEntry = true;
+ }
+ ++iter;
+ }
+
+ if (fDeletedEntry)
+ s_pFieldMap->Compact();
+}
+
+FieldDesc **CrossDomainFieldMap::LookupOrCreateFieldMapping(MethodTable *pDstMT, MethodTable *pSrcMT)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ // We lazily allocate the reader/writer lock we synchronize access to the hash with.
+ if (s_pFieldMapLock == NULL)
+ {
+ void *pLockSpace = SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(SimpleRWLock)));
+ SimpleRWLock *pLock = new (pLockSpace) SimpleRWLock(COOPERATIVE_OR_PREEMPTIVE, LOCK_TYPE_DEFAULT);
+
+ if (FastInterlockCompareExchangePointer(&s_pFieldMapLock, pLock, NULL) != NULL)
+ // We lost the race, give up our copy.
+ SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()->BackoutMem(pLockSpace, sizeof(SimpleRWLock));
+ }
+
+ // Now we have a lock we can use to synchronize the remainder of the init.
+ if (s_pFieldMap == NULL)
+ {
+ SimpleWriteLockHolder swlh(s_pFieldMapLock);
+
+ if (s_pFieldMap == NULL)
+ {
+ PtrHashMap *pMap = new (SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()) PtrHashMap();
+ LockOwner lock = {s_pFieldMapLock, IsOwnerOfRWLock};
+ pMap->Init(32, CompareFieldMapEntry, TRUE, &lock);
+ s_pFieldMap = pMap;
+ }
+ }
+ else
+ {
+ // Try getting an existing value first.
+
+ FieldMapEntry sEntry(pSrcMT, pDstMT, NULL);
+
+ SimpleReadLockHolder srlh(s_pFieldMapLock);
+ FieldMapEntry *pFound = (FieldMapEntry *)s_pFieldMap->LookupValue(sEntry.GetHash(), (LPVOID)&sEntry);
+ if (pFound != (FieldMapEntry *)INVALIDENTRY)
+ return pFound->m_pFieldMap;
+ }
+
+ // We couldn't find an existing entry in the hash. Now we must go through the painstaking process of matching fields in the
+ // destination object to their counterparts in the source object. We build an array of pointers to source field descs ordered by
+ // destination type field index (all the fields for the most derived type first, then all the fields for the second most derived
+ // type etc.).
+ NewArrayHolder<FieldDesc*> pFieldMap(new FieldDesc*[pDstMT->GetNumInstanceFields()]);
+ DWORD dwMapIndex = 0;
+
+ // We start with the source and destination types for the object (which we know are equivalent at least in type name). For each
+ // layer of the type hierarchy for the destination object (from the instance type through to Object) we attempt to locate the
+ // corresponding source type in the hierarchy. This is non-trivial since either source or destination type hierarchies may have
+ // added or removed layers. We ignore extra type layers in the source hierarchy and just concentrate on destination type layers
+ // that introduce instance fields that are not marked NotSerializable. For each such layer we first locate the corresponding
+ // source layer (via fully qualified type name) and then map each serialized (and possibly optional) destination field to the
+ // corresponding source field (again by name). We don't allow a field to move around the type hierarchy (i.e. a field defined in
+ // the base class in one version can't move to a derived type in later versions and be recognized as the original field).
+ // Allowing this would introduce all sorts of ambiguity problems (consider the case of private fields all with the same name
+ // implemented at every layer of the type hierarchy).
+
+ bool fFirstPass = true;
+ MethodTable *pCurrDstMT = pDstMT;
+ MethodTable *pCurrSrcMT = pSrcMT;
+ while (pCurrDstMT)
+ {
+ DWORD numInstanceFields = pCurrDstMT->GetNumIntroducedInstanceFields();
+
+ // Skip destination types with no instance fields to clone.
+ if (numInstanceFields == 0)
+ {
+ pCurrDstMT = pCurrDstMT->GetParentMethodTable();
+ // Only safe to skip the source type as well on the first pass (the source version may have eliminated this level of
+ // the type hierarchy).
+ if (fFirstPass)
+ pCurrSrcMT = pCurrSrcMT->GetParentMethodTable();
+ fFirstPass = false;
+ continue;
+ }
+
+ // We need to synchronize the source type with the destination type. This means skipping any source types in the
+ // hierarchy that the destination doesn't know about.
+ MethodTable *pCandidateMT = pCurrSrcMT;
+ while (pCandidateMT)
+ {
+ if (fFirstPass || pCandidateMT == pCurrDstMT || AreTypesEquivalent(pCandidateMT, pCurrDstMT))
+ {
+ // Skip intermediate source types (the destination type didn't know anything about them, so they're surplus
+ // to requirements).
+ pCurrSrcMT = pCandidateMT;
+ break;
+ }
+
+ pCandidateMT = pCandidateMT->GetParentMethodTable();
+ }
+
+#ifdef OBJECT_CLONER_STRICT_MODE
+ // If there's no candidate source type equivalent to the current destination type we need to prove that the destination
+ // type has no mandatory instance fields or throw an exception (since there's no place to fetch the field values from).
+ if (pCandidateMT == NULL)
+ {
+ FieldDesc *pFields = pCurrDstMT->GetApproxFieldDescListRaw();
+
+ for (DWORD i = 0; i < numInstanceFields; i++)
+ {
+ if (pFields[i].IsNotSerialized() || pFields[i].IsOptionallySerialized())
+ {
+ pFieldMap[dwMapIndex++] = NULL;
+ continue;
+ }
+
+ // We've found a field that must be cloned but have no corresponding source-side type to clone it from. Raise an
+ // exception.
+ ThrowMissingFieldException(&pFields[i]);
+ }
+
+ // If we get here we know the current destination type level was effectively a no-op. Move onto the next level.
+ pCurrDstMT = pCurrDstMT->GetParentMethodTable();
+ fFirstPass = false;
+ continue;
+ }
+#else
+ // In lax matching mode we can ignore all fields, even those not marked optional. So the lack of an equivalent type in the
+ // source hierarchy doesn't bother us. Mark all fields as having a default value and then move onto the next level in the
+ // type hierarchy.
+ if (pCandidateMT == NULL)
+ {
+ for (DWORD i = 0; i < numInstanceFields; i++)
+ pFieldMap[dwMapIndex++] = NULL;
+
+ pCurrDstMT = pCurrDstMT->GetParentMethodTable();
+ fFirstPass = false;
+ continue;
+ }
+#endif
+
+ // If we get here we have equivalent types in pCurrDstMT and pCurrSrcMT. Now we need to locate the source field desc
+ // corresponding to every mandatory (and possibly optional) field in the destination type and record it in the field map.
+ DWORD numSrcFields = pCurrSrcMT->GetNumIntroducedInstanceFields();
+ DWORD numDstFields = pCurrDstMT->GetNumIntroducedInstanceFields();
+
+ FieldDesc *pDstFields = pCurrDstMT->GetApproxFieldDescListRaw();
+ FieldDesc *pSrcFields = pCurrSrcMT->GetApproxFieldDescListRaw();
+
+ for (DWORD i = 0; i < numDstFields; i++)
+ {
+ // Non-serialized destination fields aren't filled in from source types.
+ if (pDstFields[i].IsNotSerialized())
+ {
+ pFieldMap[dwMapIndex++] = NULL;
+ continue;
+ }
+
+ // Go look for a field in the source type with the same name.
+ LPCUTF8 szDstFieldName = pDstFields[i].GetName();
+ DWORD j;
+ for (j = 0; j < numSrcFields; j++)
+ {
+ LPCUTF8 szSrcFieldName = pSrcFields[j].GetName();
+ if (strcmp(szDstFieldName, szSrcFieldName) == 0)
+ {
+ // Check that the field isn't marked NotSerialized (if it is then it's invisible to the cloner).
+ if (pSrcFields[j].IsNotSerialized())
+ j = numSrcFields;
+ break;
+ }
+ }
+
+#ifdef OBJECT_CLONER_STRICT_MODE
+ // If we didn't find a corresponding field it might not be fatal; the field could be optionally serializable from the
+ // destination type's point of view.
+ if (j == numSrcFields)
+ {
+ if (pDstFields[i].IsOptionallySerialized())
+ {
+ pFieldMap[dwMapIndex++] = NULL;
+ continue;
+ }
+ // The field was required. Throw an exception.
+ ThrowMissingFieldException(&pDstFields[i]);
+ }
+#else
+ // In lax matching mode we can ignore all fields, even those not marked optional. Simply mark this field as having the
+ // default value.
+ if (j == numSrcFields)
+ {
+ pFieldMap[dwMapIndex++] = NULL;
+ continue;
+ }
+#endif
+
+ // Otherwise we found matching fields (in name at least, type processing is done later).
+ pFieldMap[dwMapIndex++] = &pSrcFields[j];
+ }
+
+ pCurrDstMT = pCurrDstMT->GetParentMethodTable();
+ pCurrSrcMT = pCurrSrcMT->GetParentMethodTable();
+ fFirstPass = false;
+ }
+
+ _ASSERTE(dwMapIndex == pDstMT->GetNumInstanceFields());
+
+ // Now we have a field map we should insert it into the hash.
+ NewHolder<FieldMapEntry> pEntry(new FieldMapEntry(pSrcMT, pDstMT, pFieldMap));
+ PREFIX_ASSUME(pEntry != NULL);
+ pFieldMap.SuppressRelease();
+
+ SimpleWriteLockHolder swlh(s_pFieldMapLock);
+
+ UPTR key = pEntry->GetHash();
+
+ FieldMapEntry *pFound = (FieldMapEntry *)s_pFieldMap->LookupValue(key, (LPVOID)pEntry);
+ if (pFound == (FieldMapEntry *)INVALIDENTRY)
+ {
+ s_pFieldMap->InsertValue(key, (LPVOID)pEntry);
+ pEntry.SuppressRelease();
+ return pFieldMap;
+ }
+ else
+ return pFound->m_pFieldMap;
+}
+
+ARG_SLOT ObjectClone::HandleFieldTypeMismatch(CorElementType dstType, CorElementType srcType, void *pData, MethodTable *pSrcMT)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END
+ _ASSERTE(m_context != ObjectFreezer);
+ ARG_SLOT data = 0;
+ InvokeUtil::CreatePrimitiveValue(dstType, srcType, pData, pSrcMT, &data);
+ return data;
+}
+
+void ObjectClone::ScanISerializableMembers(DWORD IObjRefTSOIndex, DWORD ISerTSOIndex, DWORD BoxedValTSOIndex, PTRARRAYREF refValues)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END
+
+ _ASSERTE(m_context != ObjectFreezer);
+ // Queue the non-primitive types
+ DWORD numFieldsToBeMarshalled = 0;
+ PTRARRAYREF refNewValues = NULL;
+
+ LOG((LF_REMOTING, LL_INFO1000, "ScanISerializableMembers. Scanning members of ISerializable type object.\n"));
+ GCPROTECT_BEGIN(refValues);
+
+ refNewValues = (PTRARRAYREF) AllocateObjectArray(refValues->GetNumComponents(), g_pObjectClass, FALSE);
+
+ _ASSERTE(refNewValues != NULL);
+
+ for (DWORD index = 0; index < refValues->GetNumComponents(); index++)
+ {
+ OBJECTREF refField = refValues->GetAt(index);
+ if (refField == NULL)
+ continue;
+
+ if (CorTypeInfo::IsPrimitiveType(refField->GetTypeHandle().GetSignatureCorElementType()) ||
+ refField->GetMethodTable() == g_pStringClass)
+ {
+ refNewValues->SetAt(index, refField);
+ continue;
+ }
+
+ ISerializableMemberInfo isInfo(ISerTSOIndex, index);
+ QOM.Enqueue(refField, NULL, NULL, (QueuedObjectInfo *) &isInfo);
+ numFieldsToBeMarshalled++;
+ refNewValues->SetAt(index, NULL);
+ LOG((LF_REMOTING, LL_INFO1000, "ScanISerializableMembers. Member at index %d is reference type. Adding to QOM.\n", index));
+ }
+ GCPROTECT_END();
+
+ // Update TSO
+ OBJECTREF refNames = NULL, refFields = NULL;
+ QueuedObjectInfo *pDummy;
+ OBJECTREF newObj;
+ newObj = TSO.GetAt(ISerTSOIndex, &refNames, &refFields, &pDummy);
+ _ASSERTE(newObj == m_newObject);
+
+ TSO.SetAt(ISerTSOIndex, m_newObject, refNames, refNewValues, pDummy);
+
+ if (numFieldsToBeMarshalled > 0)
+ {
+ ParentInfo fxInfo(numFieldsToBeMarshalled);
+ fxInfo.SetIsISerializableInstance();
+ fxInfo.SetIObjRefIndexIntoTSO(IObjRefTSOIndex);
+ fxInfo.SetISerIndexIntoTSO(ISerTSOIndex);
+ fxInfo.SetBoxedValIndexIntoTSO(BoxedValTSOIndex);
+ QOF.Enqueue(m_newObject, NULL, NULL, (QueuedObjectInfo *) &fxInfo);
+ LOG((LF_REMOTING, LL_INFO1000, "ScanISerializableMembers. Current object had total of %d reference type fields. Adding to QOF.\n", numFieldsToBeMarshalled));
+ // Delay calling any OnDeserialized callbacks until the end of the cloning operation (it's difficult to tell when all the
+ // children have been deserialized).
+ if (HasVtsCallbacks(m_newObject->GetMethodTable(), RemotingVtsInfo::VTS_CALLBACK_ON_DESERIALIZED))
+ VDC.Enqueue(m_newObject, NULL, NULL, NULL);
+ if (m_cbInterface->RequiresDeserializationCallback(m_newObject->GetMethodTable()))
+ {
+ LOG((LF_REMOTING, LL_INFO1000, "ScanISerializableMembers. Adding object to Table of IDeserialization Callbacks\n"));
+ QueuedObjectInfo noInfo;
+ TDC.Enqueue(m_newObject, NULL, NULL, &noInfo);
+ }
+ }
+ else
+ {
+ // This is effectively a leaf node (no complex children) so if the type has a callback for OnDeserialized we'll deliver it
+ // now. This fixes callback ordering for a few more edge cases (e.g. VSW 415611) and is reasonably cheap. We can never do a
+ // perfect job (in the presence of object graph cycles) and a near perfect job (intuitively ordered callbacks for acyclic
+ // object graphs) is prohibitively expensive; so we're stuck with workarounds like this.
+ InvokeVtsCallbacks(m_newObject, RemotingVtsInfo::VTS_CALLBACK_ON_DESERIALIZED, m_toDomain);
+ if (m_cbInterface->RequiresDeserializationCallback(m_newObject->GetMethodTable()))
+ MakeIDeserializationCallback(m_newObject);
+ }
+}
+
+void ObjectClone::ScanArrayMembers()
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END
+#ifdef _DEBUG
+ MethodTable *pCurrMT = m_currObject->GetMethodTable();
+ _ASSERTE(pCurrMT && pCurrMT->IsArray());
+ MethodTable *pNewMT = m_newObject->GetMethodTable();
+ _ASSERTE(pNewMT && pNewMT->IsArray());
+#endif
+
+ LOG((LF_REMOTING, LL_INFO1000, "ScanArrayMembers. Scanning members of array object.\n"));
+ BASEARRAYREF refFromArray = (BASEARRAYREF) m_currObject;
+ BASEARRAYREF refToArray = (BASEARRAYREF) m_newObject;
+
+ GCPROTECT_BEGIN(refFromArray);
+ GCPROTECT_BEGIN(refToArray);
+
+ TypeHandle toArrayElementType = refToArray->GetArrayElementTypeHandle();
+ DWORD numComponents = refFromArray->GetNumComponents();
+ MethodTable *pArrayMT = refFromArray->GetMethodTable();
+
+ DWORD rank = pArrayMT->GetRank();
+ DWORD dwOffset = 0;
+
+ DWORD *pIndices = (DWORD*) _alloca(sizeof(DWORD) * rank);
+ VOID *pTemp = _alloca(sizeof(NDimArrayMemberInfo) + rank * sizeof(DWORD));
+ NDimArrayMemberInfo *pArrInfo = new (pTemp) NDimArrayMemberInfo(rank);
+
+ bool boxingObjects = (pArrayMT->GetArrayElementType() == ELEMENT_TYPE_VALUETYPE);
+
+ // Must enter the from domain if we are going to be allocating any non-agile boxes
+ ENTER_DOMAIN_PTR_PREDICATED(m_fromDomain,ADV_RUNNINGIN,boxingObjects);
+
+ if (boxingObjects)
+ {
+ pArrInfo->SetNeedsUnboxing();
+
+ // We may be required to activate value types of array elements, since we
+ // are going to box them. Hoist out the required domain transition and
+ // activation.
+
+ MethodTable *pMT = ((BASEARRAYREF)m_currObject)->GetArrayElementTypeHandle().GetMethodTable();
+ pMT->EnsureInstanceActive();
+ }
+
+ DWORD numFixupsNeeded = 0;
+ for (DWORD i = 0; i < numComponents; i++)
+ {
+ // The array could be huge. To avoid keeping a pending GC waiting (and maybe timing out) we're going to pulse the GC mode
+ // every so often. Do this more freqeuntly in debug builds, where each iteration through this loop takes considerably
+ // longer.
+#ifdef _DEBUG
+#define COPY_CYCLES 1024
+#else
+#define COPY_CYCLES 8192
+#endif
+ if ((i % COPY_CYCLES) == (COPY_CYCLES - 1))
+ GetThread()->PulseGCMode();
+
+ const INT32 *pBoundsPtr = refFromArray->GetBoundsPtr();
+ DWORD findIndices = i;
+ for (DWORD rankIndex = rank; rankIndex > 0; rankIndex--)
+ {
+ DWORD numElementsInDimension = pBoundsPtr[rankIndex - 1];
+ DWORD quotient = findIndices / numElementsInDimension;
+ DWORD remainder = findIndices % numElementsInDimension;
+ pIndices[rankIndex - 1] = remainder;
+ findIndices = quotient;
+ }
+
+ pArrInfo->SetIndices(pIndices);
+
+ Object *rv = GetObjectFromArray((BASEARRAYREF *)&m_currObject, dwOffset);
+ if (rv != NULL)
+ {
+ OBJECTREF oRef = ObjectToOBJECTREF(rv);
+
+ if (oRef->GetMethodTable() == g_pStringClass && m_context != ObjectFreezer)
+ {
+ OBJECTREF* pElem = (OBJECTREF*)(refToArray->GetDataPtr() + (dwOffset * pArrayMT->GetComponentSize()));
+ SetObjectReference(pElem,oRef,GetAppDomain());
+ }
+ else
+ {
+ // Add the object to QOM
+ numFixupsNeeded++;
+ QOM.Enqueue(oRef, NULL, NULL, pArrInfo);
+ LOG((LF_REMOTING, LL_INFO1000, "ScanArrayMembers. Element at offset %d is reference type. Adding to QOM.\n", dwOffset));
+ }
+ }
+ dwOffset ++;
+ }
+
+ if (numFixupsNeeded > 0)
+ {
+ ParentInfo fxInfo(numFixupsNeeded);
+ QOF.Enqueue(m_newObject, NULL, NULL, (QueuedObjectInfo *)&fxInfo);
+ LOG((LF_REMOTING, LL_INFO1000, "ScanArrayMembers. Current object had total of %d reference type fields. Adding to QOF.\n", numFixupsNeeded));
+ }
+
+ END_DOMAIN_TRANSITION;
+
+ GCPROTECT_END();
+ GCPROTECT_END();
+}
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable:4244)
+#endif // _MSC_VER
+Object *ObjectClone::GetObjectFromArray(BASEARRAYREF* arrObj, DWORD dwOffset)
+{
+ CONTRACTL {
+ THROWS;
+ if ((*arrObj)->GetArrayElementTypeHandle().GetMethodTable()->IsValueType()) GC_TRIGGERS; else GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ // Get the type of the element...
+ switch ((*arrObj)->GetArrayElementType()) {
+
+ case ELEMENT_TYPE_VOID:
+ return NULL;
+
+ case ELEMENT_TYPE_CLASS: // Class
+ case ELEMENT_TYPE_SZARRAY: // Single Dim, Zero
+ case ELEMENT_TYPE_ARRAY: // General Array
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_OBJECT:
+ {
+ _ASSERTE((*arrObj)->GetComponentSize() == sizeof(OBJECTREF));
+ BYTE* pData = ((BYTE*)(*arrObj)->GetDataPtr()) + (dwOffset * sizeof(OBJECTREF));
+ return *(Object **)pData;
+ }
+
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ MethodTable *pMT = (*arrObj)->GetArrayElementTypeHandle().GetMethodTable();
+ WORD wComponentSize = (*arrObj)->GetComponentSize();
+ BYTE* pData = ((BYTE*)(*arrObj)->GetDataPtr()) + (dwOffset * wComponentSize);
+ return OBJECTREFToObject(pMT->Box(pData));
+ }
+ case ELEMENT_TYPE_BOOLEAN: // boolean
+ case ELEMENT_TYPE_I1: // sbyte
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_I2: // short
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_CHAR: // char
+ case ELEMENT_TYPE_I4: // int
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_U:
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_I8: // long
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_R4: // float
+ case ELEMENT_TYPE_R8: // double
+ case ELEMENT_TYPE_PTR:
+ {
+ // Note that this is a cloned version of the value class case above for performance
+
+ // Watch for GC here. We allocate the object and then
+ // grab the void* to the data we are going to copy.
+ MethodTable *pMT = (*arrObj)->GetArrayElementTypeHandle().GetMethodTable();
+ OBJECTREF obj = ::AllocateObject(pMT);
+ WORD wComponentSize = (*arrObj)->GetComponentSize();
+ BYTE* pData = ((BYTE*)(*arrObj)->GetDataPtr()) + (dwOffset * wComponentSize);
+ CopyValueClassUnchecked(obj->UnBox(), pData, (*arrObj)->GetArrayElementTypeHandle().GetMethodTable());
+ return OBJECTREFToObject(obj);
+ }
+
+ case ELEMENT_TYPE_END:
+ default:
+ _ASSERTE(!"Unknown array element type");
+ }
+
+ _ASSERTE(!"Should never get here");
+ return NULL;
+}
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif // _MSC_VER: warning C4244
+
+
+void ObjectClone::CompleteValueTypeFields(OBJECTREF newObj, OBJECTREF refParent, QueuedObjectInfo *objInfo)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END
+
+#ifdef _DEBUG
+ {
+ SString ssTypeName;
+ SString ssParentTypeName;
+ newObj->GetMethodTable()->_GetFullyQualifiedNameForClassNestedAware(ssTypeName);
+ refParent->GetMethodTable()->_GetFullyQualifiedNameForClassNestedAware(ssParentTypeName);
+ LOG((LF_REMOTING, LL_INFO1000, "CompleteValueTypeFields. Fixing up value type field of type %S into parent of type %S.\n",
+ ssTypeName.GetUnicode(), ssParentTypeName.GetUnicode()));
+ }
+#endif
+
+ ValueTypeInfo *pValTypeInfo = (ValueTypeInfo *)objInfo;
+ QueuedObjectInfo *pFixupInfo = pValTypeInfo->GetFixupInfo();
+ PREFIX_ASSUME(pFixupInfo != NULL);
+
+ _ASSERTE(pFixupInfo->NeedsUnboxing());
+ if (pFixupInfo->IsArray())
+ {
+ m_newObject = newObj;
+ HandleArrayFixup(refParent, pFixupInfo);
+ }
+ else
+ {
+ GCPROTECT_BEGIN(refParent);
+ GCPROTECT_BEGIN(newObj);
+ ObjectMemberInfo *pObjInfo = (ObjectMemberInfo *)pFixupInfo;
+ FieldDesc *pTargetField = pObjInfo->GetFieldDesc();
+
+ TypeHandle fldType = LoadExactFieldType(pTargetField, refParent, m_toDomain);
+ void *pDest = refParent->GetData() + pTargetField->GetOffset();
+ _ASSERTE(GetAppDomain()==m_toDomain);
+
+ if (!fldType.GetMethodTable()->UnBoxInto(pDest, newObj))
+ COMPlusThrow(kArgumentException,W("Arg_ObjObj"));
+
+ GCPROTECT_END();
+ GCPROTECT_END();
+ }
+ pValTypeInfo->SetHasBeenProcessed();
+}
+
+void ObjectClone::CompleteSpecialObjects()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF nextObj = NULL;
+ OBJECTREF refNames = NULL;
+ OBJECTREF refValues = NULL;
+ SpecialObjectInfo *pObjInfo = NULL;
+
+ GCPROTECT_BEGIN(refNames);
+ GCPROTECT_BEGIN(refValues);
+
+ DWORD skippedObjects = 0;
+ DWORD numLoops = 0;
+
+ if (TSO.GetCount() == 0)
+ goto EarlyExit;
+
+ LOG((LF_REMOTING, LL_INFO1000, "CompleteSpecialObjects. Beginning.\n"));
+ do
+ {
+ skippedObjects = 0;
+ numLoops++;
+ DWORD index = 0;
+ TSO.BeginEnumeration(&index);
+ while((nextObj = TSO.GetNext(&index, &refNames, &refValues, (QueuedObjectInfo **)&pObjInfo)) != NULL)
+ {
+ if (pObjInfo->HasBeenProcessed())
+ continue;
+
+ if (pObjInfo->IsISerializableInstance())
+ {
+ _ASSERTE(m_context != ObjectFreezer);
+
+ LOG((LF_REMOTING, LL_INFO1000, "CompleteSpecialObjects. ISerializable instance at index %d.\n", index));
+ ISerializableInstanceInfo *iserInfo = (ISerializableInstanceInfo *)pObjInfo;
+ if (iserInfo->GetNumSpecialMembers() > 0)
+ {
+ if (CheckForUnresolvedMembers(iserInfo))
+ {
+ LOG((LF_REMOTING, LL_INFO1000, "CompleteSpecialObjects. Skipping ISerializable instance due to unresolved members.\n"));
+ skippedObjects++;
+ continue;
+ }
+ }
+ CompleteISerializableObject(nextObj, refNames, refValues, iserInfo);
+ }
+ else if (pObjInfo->IsIObjRefInstance())
+ {
+ _ASSERTE(m_context != ObjectFreezer);
+
+ LOG((LF_REMOTING, LL_INFO1000, "CompleteSpecialObjects. IObjectReference instance at index %d.\n", index));
+ IObjRefInstanceInfo *iorInfo = (IObjRefInstanceInfo *)pObjInfo;
+ if (iorInfo->GetNumSpecialMembers() > 0 ||
+ iorInfo->GetISerTSOIndex() != (DWORD) -1)
+ {
+ if (CheckForUnresolvedMembers(iorInfo))
+ {
+ LOG((LF_REMOTING, LL_INFO1000, "CompleteSpecialObjects. Skipping IObjectReference instance due to unresolved members.\n"));
+ skippedObjects++;
+ continue;
+ }
+ }
+ if (!CompleteIObjRefObject(nextObj, index, iorInfo))
+ skippedObjects++;
+ }
+ else
+ {
+ _ASSERTE(pObjInfo->IsBoxedObject());
+ LOG((LF_REMOTING, LL_INFO1000, "CompleteSpecialObjects. Boxed valuetype instance at index %d.\n", index));
+ ValueTypeInfo *valTypeInfo = (ValueTypeInfo *)pObjInfo;
+ if (valTypeInfo->GetNumSpecialMembers() > 0 ||
+ valTypeInfo->GetISerTSOIndex() != (DWORD) -1 ||
+ valTypeInfo->GetIObjRefTSOIndex() != (DWORD) -1)
+ {
+ if (CheckForUnresolvedMembers(valTypeInfo))
+ {
+ LOG((LF_REMOTING, LL_INFO1000, "CompleteSpecialObjects. Skipping boxed value instance due to unresolved members.\n"));
+ skippedObjects++;
+ continue;
+ }
+ }
+ // If we were waiting on an IObjRef fixup then the target object will have changed.
+ if (valTypeInfo->GetIObjRefTSOIndex() != (DWORD) -1)
+ {
+ OBJECTREF dummy1, dummy2;
+ QueuedObjectInfo *dummy3;
+ nextObj = TSO.GetAt(valTypeInfo->GetIObjRefTSOIndex(), &dummy1, &dummy2, &dummy3);
+ }
+ CompleteValueTypeFields(nextObj, refNames, valTypeInfo);
+ }
+
+ };
+ } while (skippedObjects > 0 && numLoops < 100);
+
+ if (skippedObjects > 0 && numLoops >= 100)
+ {
+ COMPlusThrow(kSerializationException, IDS_SERIALIZATION_UNRESOLVED_SPECIAL_OBJECT);
+ }
+EarlyExit: ;
+ GCPROTECT_END();
+ GCPROTECT_END();
+}
+
+BOOL ObjectClone::CheckForUnresolvedMembers(SpecialObjectInfo *splInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ BOOL foundUnresolvedMember = FALSE;
+
+ DWORD mappingIndex = splInfo->GetMappingTableIndex();
+ for (DWORD count = 0; count < splInfo->GetNumSpecialMembers(); count++)
+ {
+ DWORD memberIndex = TMappings.GetAt(mappingIndex++);
+ SpecialObjectInfo *pMemberInfo;
+ OBJECTREF dummy1, dummy2, dummy3;
+ dummy1 = TSO.GetAt(memberIndex, &dummy2, &dummy3, (QueuedObjectInfo **)&pMemberInfo);
+ // An unresolved IObjRef member is a blocker for any special object parent
+ if (pMemberInfo->IsIObjRefInstance() && !pMemberInfo->HasBeenProcessed())
+ {
+ LOG((LF_REMOTING, LL_INFO1000, "CheckForUnresolvedMembers. Found unresolved IObjectReference member at index %d.\n", memberIndex));
+ foundUnresolvedMember = TRUE;
+ break;
+ }
+
+ // An unresolved ISer member is a blocker for IObjRef parent
+ if (pMemberInfo->IsISerializableInstance() &&
+ !pMemberInfo->HasBeenProcessed() &&
+ splInfo->IsIObjRefInstance())
+ {
+ LOG((LF_REMOTING, LL_INFO1000, "CheckForUnresolvedMembers. Found unresolved ISerializable member at index %d.\n", memberIndex));
+ foundUnresolvedMember = TRUE;
+ break;
+ }
+
+ // An unresolved boxed object is a blocker for a boxed parent or an IObjRef parent
+ if (pMemberInfo->IsBoxedObject() &&
+ !pMemberInfo->HasBeenProcessed() &&
+ (splInfo->IsIObjRefInstance() || splInfo->IsBoxedObject()))
+ {
+ LOG((LF_REMOTING, LL_INFO1000, "CheckForUnresolvedMembers. Found unresolved boxed valuetype member at index %d.\n", memberIndex));
+ foundUnresolvedMember = TRUE;
+ break;
+ }
+ }
+
+ // Done checking members. Now check if this instance itself needs some processing
+ // If an instance is both ISer and IObj, then ISer should be processed before IObjRef
+ if (!foundUnresolvedMember && splInfo->IsIObjRefInstance())
+ {
+ IObjRefInstanceInfo *pObjRefInfo = (IObjRefInstanceInfo *)splInfo;
+ if (pObjRefInfo->GetISerTSOIndex() != (DWORD) -1)
+ {
+ // Check if the ISer requirements have been met
+ SpecialObjectInfo *pMemberInfo;
+ OBJECTREF dummy1, dummy2, dummy3;
+ dummy1 = TSO.GetAt(pObjRefInfo->GetISerTSOIndex(), &dummy2, &dummy3, (QueuedObjectInfo **)&pMemberInfo);
+ if (!pMemberInfo->HasBeenProcessed())
+ {
+ LOG((LF_REMOTING, LL_INFO1000, "CheckForUnresolvedMembers. This instance is also ISerializable at index %d. Not resolved yet.\n", pObjRefInfo->GetISerTSOIndex()));
+ foundUnresolvedMember = TRUE;
+ }
+ }
+ }
+
+ // If an instance is ISer, IObj and a boxed value type, then ISer,IObj should be processed before unboxing
+ if (!foundUnresolvedMember && splInfo->IsBoxedObject())
+ {
+ ValueTypeInfo *pValTypeInfo = (ValueTypeInfo *)splInfo;
+ if (pValTypeInfo->GetISerTSOIndex() != (DWORD) -1)
+ {
+ // Check if the ISer requirements have been met
+ SpecialObjectInfo *pMemberInfo;
+ OBJECTREF dummy1, dummy2, dummy3;
+ dummy1 = TSO.GetAt(pValTypeInfo->GetISerTSOIndex(), &dummy2, &dummy3, (QueuedObjectInfo **)&pMemberInfo);
+ if (!pMemberInfo->HasBeenProcessed())
+ {
+ LOG((LF_REMOTING, LL_INFO1000, "CheckForUnresolvedMembers. This instance is also ISerializable at index %d. Not resolved yet.\n", pValTypeInfo->GetISerTSOIndex()));
+ foundUnresolvedMember = TRUE;
+ }
+ }
+ if (!foundUnresolvedMember && pValTypeInfo->GetIObjRefTSOIndex() != (DWORD) -1)
+ {
+ // Check if the ISer requirements have been met
+ SpecialObjectInfo *pMemberInfo;
+ OBJECTREF dummy1, dummy2, dummy3;
+ dummy1 = TSO.GetAt(pValTypeInfo->GetIObjRefTSOIndex(), &dummy2, &dummy3, (QueuedObjectInfo **)&pMemberInfo);
+ if (!pMemberInfo->HasBeenProcessed())
+ {
+ LOG((LF_REMOTING, LL_INFO1000, "CheckForUnresolvedMembers. This instance is also IObjectReference at index %d. Not resolved yet.\n", pValTypeInfo->GetIObjRefTSOIndex()));
+ foundUnresolvedMember = TRUE;
+ }
+ }
+ }
+ return foundUnresolvedMember;
+}
+
+void ObjectClone::CompleteISerializableObject(OBJECTREF IserObj, OBJECTREF refNames, OBJECTREF refValues, ISerializableInstanceInfo *iserInfo)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ }
+ CONTRACTL_END
+
+ _ASSERTE(m_context != ObjectFreezer);
+
+ struct _gc {
+ OBJECTREF IserObj;
+ OBJECTREF refNames;
+ OBJECTREF refValues;
+ OBJECTREF refSerInfo;
+ } gc;
+
+ gc.IserObj = IserObj;
+ gc.refNames = refNames;
+ gc.refValues = refValues;
+ gc.refSerInfo = NULL;
+
+ GCPROTECT_BEGIN(gc);
+
+#ifdef _DEBUG
+ {
+ DefineFullyQualifiedNameForClass();
+ LOG((LF_REMOTING, LL_INFO1000, "CompleteISerializableObject. Completing ISerializable object of type %s.\n",
+ GetFullyQualifiedNameForClassNestedAware(gc.IserObj->GetMethodTable())));
+ }
+#endif
+
+ BOOL bIsBoxed = gc.IserObj->GetMethodTable()->IsValueType();
+
+ // StreamingContextData is an out parameter of the managed callback, so it's passed by reference on all platforms.
+ RuntimeMethodHandle::StreamingContextData context = {0};
+
+ PREPARE_NONVIRTUAL_CALLSITE(METHOD__OBJECTCLONEHELPER__PREPARE_DATA);
+
+ DECLARE_ARGHOLDER_ARRAY(args, 4);
+
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(gc.IserObj);
+ args[ARGNUM_1] = OBJECTREF_TO_ARGHOLDER(gc.refNames);
+ args[ARGNUM_2] = OBJECTREF_TO_ARGHOLDER(gc.refValues);
+ args[ARGNUM_3] = PTR_TO_ARGHOLDER(&context);
+
+ CATCH_HANDLER_FOUND_NOTIFICATION_CALLSITE;
+ CALL_MANAGED_METHOD_RETREF(gc.refSerInfo, OBJECTREF, args);
+
+ if (iserInfo->IsTargetNotISerializable())
+ {
+ // Prepare data would have constructed the object already
+ _ASSERTE(gc.refSerInfo == NULL);
+ }
+ else
+ {
+ _ASSERTE(gc.refSerInfo != NULL);
+ MethodTable *pMT = gc.IserObj->GetMethodTable();
+ _ASSERTE(pMT);
+
+ MethodDesc * pCtor;
+
+#ifdef FEATURE_IMPERSONATION
+ // Deal with the WindowsIdentity class specially by calling an internal
+ // serialization constructor; the public one has a security demand that
+ // breaks partial trust scenarios and is too expensive to assert for.
+ if (MscorlibBinder::IsClass(pMT, CLASS__WINDOWS_IDENTITY))
+ pCtor = MscorlibBinder::GetMethod(METHOD__WINDOWS_IDENTITY__SERIALIZATION_CTOR);
+ else
+#endif
+ pCtor = MemberLoader::FindConstructor(pMT, &gsig_IM_SerInfo_StrContext_RetVoid);
+
+ if (pCtor == NULL)
+ {
+ DefineFullyQualifiedNameForClassW();
+ COMPlusThrow(kSerializationException, IDS_SERIALIZATION_CTOR_NOT_FOUND,
+ GetFullyQualifiedNameForClassNestedAwareW(pMT));
+ }
+
+ MethodDescCallSite ctor(pCtor);
+
+ ARG_SLOT argSlots[3];
+ // Nullable<T> does not implement ISerializable.
+ _ASSERTE(!Nullable::IsNullableType(gc.IserObj->GetMethodTable()));
+ argSlots[0] = (bIsBoxed ? (ARG_SLOT)(SIZE_T)(gc.IserObj->UnBox()) : ObjToArgSlot(gc.IserObj));
+ argSlots[1] = ObjToArgSlot(gc.refSerInfo);
+#if defined(_TARGET_X86_) || defined(_TARGET_ARM_)
+ static_assert_no_msg(sizeof(context) == sizeof(ARG_SLOT));
+ argSlots[2] = *(ARG_SLOT*)(&context); // StreamingContext is passed by value on x86 and ARM
+#elif defined(_WIN64)
+ static_assert_no_msg(sizeof(context) > sizeof(ARG_SLOT));
+ argSlots[2] = PtrToArgSlot(&context); // StreamingContext is passed by reference on WIN64
+#else // !_TARGET_X86_ && !_WIN64 && !_TARGET_ARM_
+ PORTABILITY_ASSERT("ObjectClone::CompleteISerializableObject() - NYI on this platform");
+#endif // !_TARGET_X86_ && !_WIN64 && !_TARGET_ARM_
+ ctor.CallWithValueTypes(&argSlots[0]);
+ }
+ iserInfo->SetHasBeenProcessed();
+
+ GCPROTECT_END();
+
+}
+
+// FALSE means the object could not be resolved and need to perform more iterations
+BOOL ObjectClone::CompleteIObjRefObject(OBJECTREF IObjRef, DWORD tsoIndex, IObjRefInstanceInfo *iorInfo)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ }
+ CONTRACTL_END
+
+ BOOL bResult = FALSE;
+
+ struct _gc {
+ OBJECTREF IObjRef;
+ OBJECTREF newObj;
+ OBJECTREF refParent;
+ OBJECTREF refFromObj;
+ OBJECTREF resolvedObject;
+ } gc;
+
+ gc.IObjRef = IObjRef;
+ gc.newObj = NULL;
+ gc.refParent = NULL;
+ gc.refFromObj = NULL;
+ gc.resolvedObject = NULL;
+
+ GCPROTECT_BEGIN(gc);
+
+ _ASSERTE(m_context != ObjectFreezer);
+ // First check if this is a repeat object
+ if (iorInfo->IsRepeatObject())
+ {
+ OBJECTREF dummy;
+ dummy = TSO.GetAt(tsoIndex, &gc.refFromObj, &gc.refParent, (QueuedObjectInfo **)&iorInfo);
+ PREFIX_ASSUME(gc.refFromObj != NULL);
+
+ // Look in the Table of Seen objects whether this IObjRef has been resolved
+ int currId;
+ currId = TOS.HasID(gc.refFromObj, &gc.resolvedObject);
+ _ASSERTE(currId != -1);
+
+ MethodTable *pResolvedMT = gc.resolvedObject->GetMethodTable();
+ if (!pResolvedMT->IsTransparentProxy() &&
+ m_cbInterface->IsIObjectReferenceType(pResolvedMT))
+ {
+ bResult = FALSE;
+ }
+ else
+ {
+#ifdef _DEBUG
+ {
+ DefineFullyQualifiedNameForClass();
+ LOG((LF_REMOTING, LL_INFO1000, "CompleteIObjRefObject. Found IObjectReference object of type %s already resolved.\n",
+ GetFullyQualifiedNameForClassNestedAware(gc.IObjRef->GetMethodTable())));
+ }
+#endif
+
+ // Yes, its been resolved.
+ // Fix the object into its parent (unless it requires unboxing, in which case there's another entry in the TSO ready to
+ // do that).
+ QueuedObjectInfo *pFixupInfo = (QueuedObjectInfo *)iorInfo->GetFixupInfo();
+ PREFIX_ASSUME(pFixupInfo != NULL);
+ if (pFixupInfo->NeedsUnboxing())
+ {
+ TSO.SetAt(tsoIndex, gc.resolvedObject, gc.refFromObj, gc.refParent, iorInfo);
+ iorInfo->SetHasBeenProcessed();
+ bResult = TRUE;
+ }
+ else
+ {
+ if (gc.refParent == NULL)
+ m_topObject = gc.resolvedObject;
+ else
+ {
+ m_newObject = gc.resolvedObject;
+ if (pFixupInfo->NeedsUnboxing())
+ CompleteValueTypeFields(gc.resolvedObject, gc.refParent, pFixupInfo);
+ else
+ Fixup(gc.resolvedObject, gc.refParent, pFixupInfo);
+ }
+ iorInfo->SetHasBeenProcessed();
+ bResult = TRUE;
+ }
+ }
+ }
+ else
+ {
+ MethodTable *pMT = gc.IObjRef->GetMethodTable();
+ _ASSERTE(pMT);
+
+ MethodTable *pItf = MscorlibBinder::GetClass(CLASS__IOBJECTREFERENCE);
+ MethodDesc *pMeth = GetInterfaceMethodImpl(pMT, pItf, 0);
+ MethodDescCallSite method(pMeth, &gc.IObjRef);
+
+ // Ensure Streamingcontext type is loaded. Do not delete this line
+ MethodTable *pMTStreamingContext;
+ pMTStreamingContext = MscorlibBinder::GetClass(CLASS__STREAMING_CONTEXT);
+ _ASSERTE(pMTStreamingContext);
+
+ ARG_SLOT arg[2];
+ arg[0] = ObjToArgSlot(gc.IObjRef);
+
+ RuntimeMethodHandle::StreamingContextData context = { NULL, GetStreamingContextState() };
+#ifdef _WIN64
+ static_assert_no_msg(sizeof(context) > sizeof(ARG_SLOT));
+ arg[1] = PtrToArgSlot(&context);
+#else
+ static_assert_no_msg(sizeof(context) <= sizeof(ARG_SLOT));
+ arg[1] = *(ARG_SLOT*)(&context);
+#endif
+
+ gc.newObj = method.CallWithValueTypes_RetOBJECTREF(&arg[0]);
+
+ INDEBUG(DefineFullyQualifiedNameForClass();)
+
+ _ASSERTE(gc.newObj != NULL);
+ MethodTable *pNewMT = gc.newObj->GetMethodTable();
+ if (!pNewMT->IsTransparentProxy() &&
+ gc.newObj != gc.IObjRef &&
+ m_cbInterface->IsIObjectReferenceType(pNewMT))
+ {
+#ifdef _DEBUG
+ LOG((LF_REMOTING, LL_INFO1000,
+ "CompleteIObjRefObject. GetRealObject on object of type %s returned another IObjectReference. Adding back to TSO.\n",
+ GetFullyQualifiedNameForClassNestedAware(gc.IObjRef->GetMethodTable())));
+#endif
+
+ // Put this back into the table
+ OBJECTREF dummy;
+ dummy = TSO.GetAt(tsoIndex, &gc.refFromObj, &gc.refParent, (QueuedObjectInfo **)&iorInfo);
+ TSO.SetAt(tsoIndex, gc.newObj, gc.refFromObj, gc.refParent, iorInfo);
+ bResult = FALSE;
+ }
+ else
+ {
+#ifdef _DEBUG
+ LOG((LF_REMOTING, LL_INFO1000,
+ "CompleteIObjRefObject. Called GetRealObject on object of type %s. Fixing it up into its parent.\n",
+ GetFullyQualifiedNameForClassNestedAware(gc.IObjRef->GetMethodTable())));
+#endif
+ // Fix the object into its parent (unless it requires unboxing, in which case there's another entry in the TSO ready to
+ // do that).
+ QueuedObjectInfo *pFixupInfo = (QueuedObjectInfo *)iorInfo->GetFixupInfo();
+ OBJECTREF dummy;
+ dummy = TSO.GetAt(tsoIndex, &gc.refFromObj, &gc.refParent, (QueuedObjectInfo **)&iorInfo);
+ if (pFixupInfo->NeedsUnboxing())
+ {
+ TSO.SetAt(tsoIndex, gc.newObj, gc.refFromObj, gc.refParent, iorInfo);
+ iorInfo->SetHasBeenProcessed();
+ bResult = TRUE;
+ }
+ else
+ {
+ if (gc.refParent == NULL)
+ m_topObject = gc.newObj;
+ else
+ {
+ m_newObject = gc.newObj;
+ Fixup(gc.newObj, gc.refParent, pFixupInfo);
+ }
+
+ // Update Table of Seen objects, so that any repeat objects can be updated too
+ TOS.UpdateObject(gc.refFromObj, gc.newObj);
+ iorInfo->SetHasBeenProcessed();
+ bResult = TRUE;
+ }
+ }
+ }
+
+ GCPROTECT_END();
+ return bResult;
+}
+
+void MakeIDeserializationCallback(OBJECTREF refTarget)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ struct _gc {
+ OBJECTREF refTarget;
+ } gc;
+ gc.refTarget = refTarget;
+
+ GCPROTECT_BEGIN(gc);
+
+ MethodTable *pMT = gc.refTarget->GetMethodTable();
+ _ASSERTE(pMT);
+
+ MethodTable *pItf = MscorlibBinder::GetClass(CLASS__IDESERIALIZATIONCB);
+ MethodDesc *pMeth = GetInterfaceMethodImpl(pMT, pItf, 0);
+ PCODE pCode = pMeth->GetSingleCallableAddrOfCode();
+
+ PREPARE_NONVIRTUAL_CALLSITE_USING_CODE(pCode);
+
+ DECLARE_ARGHOLDER_ARRAY(args, 2);
+
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(gc.refTarget);
+ args[ARGNUM_1] = NULL;
+
+ CATCH_HANDLER_FOUND_NOTIFICATION_CALLSITE;
+ CALL_MANAGED_METHOD_NORET(args);
+
+ GCPROTECT_END();
+}
+
+void ObjectClone::CompleteIDeserializationCallbacks()
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ }
+ CONTRACTL_END
+ OBJECTREF Dummy1 = NULL, Dummy2 = NULL;
+ QueuedObjectInfo *pObjInfo = NULL;
+
+ if (TDC.GetCount() == 0)
+ return;
+
+ LOG((LF_REMOTING, LL_INFO1000, "CompleteIDeserializationCallbacks. Beginning.\n"));
+
+ OBJECTREF nextObj;
+ while ((nextObj = TDC.Dequeue(&Dummy1, &Dummy2, &pObjInfo)) != NULL)
+ {
+ MakeIDeserializationCallback(nextObj);
+ }
+}
+
+void ObjectClone::CompleteVtsOnDeserializedCallbacks()
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF nextObj = NULL, Dummy1 = NULL, Dummy2 = NULL;
+
+ if (VDC.GetCount() == 0)
+ return;
+
+ LOG((LF_REMOTING, LL_INFO1000, "CompleteVtsOnDeserializedCallbacks. Beginning.\n"));
+
+ GCPROTECT_BEGIN(nextObj);
+
+ while ((nextObj = VDC.Dequeue(&Dummy1, &Dummy2, NULL)) != NULL)
+ InvokeVtsCallbacks(nextObj, RemotingVtsInfo::VTS_CALLBACK_ON_DESERIALIZED, m_toDomain);
+
+ GCPROTECT_END();
+}
+
+void ObjectClone::CompleteVtsOnSerializedCallbacks()
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF nextObj = NULL, Dummy1 = NULL, Dummy2 = NULL;
+
+ if (VSC.GetCount() == 0)
+ return;
+
+ LOG((LF_REMOTING, LL_INFO1000, "CompleteVtsOnSerializedCallbacks. Beginning.\n"));
+
+ GCPROTECT_BEGIN(nextObj);
+
+ while ((nextObj = VSC.Dequeue(&Dummy1, &Dummy2, NULL)) != NULL)
+ InvokeVtsCallbacks(nextObj, RemotingVtsInfo::VTS_CALLBACK_ON_SERIALIZED, m_fromDomain);
+
+ GCPROTECT_END();
+}
+
+// Does a binary search to find the object with given id, and record of given kind
+DWORD ObjectClone::FindObjectInTSO(int objId, SpecialObjects kind)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END
+
+ DWORD lowIndex = 0;
+ DWORD highIndex = TSO.GetCount();
+ DWORD midIndex = highIndex / 2;
+ DWORD firstMatch;
+
+ if (highIndex == 0)
+ {
+ _ASSERTE(!"Special Object unexpectedly not found for given object id\n");
+ return 0; // throw ?
+ }
+
+ SpecialObjectInfo *splInfo = NULL;
+ while (true)
+ {
+ OBJECTREF refParent, refFromObj;
+ OBJECTREF dummy;
+ dummy = TSO.GetAt(midIndex, &refFromObj, &refParent, (QueuedObjectInfo **)&splInfo);
+
+ if (objId < splInfo->GetObjectId())
+ {
+ highIndex = midIndex;
+ }
+ else
+ {
+ if (objId == splInfo->GetObjectId())
+ break;
+ lowIndex = midIndex;
+ }
+
+ DWORD oldIndex = midIndex;
+ midIndex = lowIndex + (highIndex - lowIndex)/2;
+ if (oldIndex == midIndex)
+ {
+ // Binary search failed. See comments below
+ goto LinearSearch;
+ }
+ }
+
+ // Found match at midIndex
+ // Find the first record for this obj id
+ firstMatch = midIndex;
+ while(midIndex != 0)
+ {
+ midIndex -= 1;
+ SpecialObjectInfo *pTemp;
+ OBJECTREF refParent, refFromObj;
+ OBJECTREF dummy;
+ dummy = TSO.GetAt(midIndex, &refFromObj, &refParent, (QueuedObjectInfo **)&pTemp);
+ if (pTemp->GetObjectId() != objId)
+ break;
+ else
+ firstMatch = midIndex;
+ };
+
+ // Now look for the right kind of record
+ do
+ {
+ OBJECTREF refParent, refFromObj;
+ OBJECTREF dummy;
+ dummy = TSO.GetAt(firstMatch, &refFromObj, &refParent, (QueuedObjectInfo **)&splInfo);
+
+ if (splInfo->GetObjectId() == objId)
+ {
+ switch(kind)
+ {
+ case ISerializable:
+ if (splInfo->IsISerializableInstance())
+ return firstMatch;
+ break;
+ case IObjectReference:
+ if (splInfo->IsIObjRefInstance())
+ return firstMatch;
+ break;
+ case BoxedValueType:
+ if (splInfo->IsBoxedObject())
+ return firstMatch;
+ break;
+ default:
+ _ASSERTE(!"Unknown enum value in FindObjectInTSO");
+ };
+ }
+
+ firstMatch++;
+
+ }while(firstMatch < TSO.GetCount());
+
+LinearSearch:
+ // If there are multiple objects that are ISer/IObj, and some of them repeat in a certain fashion,
+ // then the entries in TSO are not in sorted order. In such a case binary search will fail. Lets do a linear search
+ // in such a case for now. This is probably reasonable since the TSO should usually be short and in-order (and presumably
+ // cheaper than trying to keep the list in sorted order at all times).
+ DWORD currIndex = 0;
+ for (; currIndex < TSO.GetCount(); currIndex++)
+ {
+ OBJECTREF refParent, refFromObj;
+ OBJECTREF dummy;
+ dummy = TSO.GetAt(currIndex, &refFromObj, &refParent, (QueuedObjectInfo **)&splInfo);
+
+ SpecialObjects foundKind = ISerializable;
+ if (splInfo->IsIObjRefInstance())
+ foundKind = IObjectReference;
+ else if (splInfo->IsBoxedObject())
+ foundKind = BoxedValueType;
+ else
+ _ASSERTE(splInfo->IsISerializableInstance());
+
+ if (objId == splInfo->GetObjectId()
+ && kind == foundKind)
+ return currIndex;
+ }
+
+
+ _ASSERTE(!"Special Object unexpectedly not found for given object id\n");
+ return 0; // throw ?
+}
+
+// This function is effectively a replica of MethodTable::Box. Its replicated to avoid "GCPROTECT_INTERIOR" that Box uses
+// and causes some leak detection asserts to go off. This is a controlled leak situation, where we know we're leaking stuff
+// and dont want the asserts.
+OBJECTREF ObjectClone::BoxValueTypeInWrongDomain(OBJECTREF refParent, DWORD offset, MethodTable *pValueTypeMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(pValueTypeMT->IsValueType());
+ PRECONDITION(!pValueTypeMT->ContainsStackPtr());
+ }
+ CONTRACTL_END;
+
+ OBJECTREF ref = NULL;
+ void* pSrc = refParent->GetData() + offset;
+ GCPROTECT_BEGININTERIOR(pSrc);
+
+ // We must enter the target domain if we are boxing a non-agile type. This of course has some overhead
+ // so we want to avoid it if possible. GetLoaderModule() == mscorlib && CanBeBlittedByObjectCloner is a
+ // conservative first approximation of agile types.
+ ENTER_DOMAIN_PTR_PREDICATED(m_fromDomain, ADV_RUNNINGIN,
+ !pValueTypeMT->GetLoaderModule()->IsSystem() || pValueTypeMT->GetClass()->CannotBeBlittedByObjectCloner());
+
+ ref = pValueTypeMT->FastBox(&pSrc);
+
+ END_DOMAIN_TRANSITION;
+
+ GCPROTECT_END();
+ return ref;
+}
+
+// Returns whether or not a given type requires VTS callbacks of the specified kind.
+BOOL ObjectClone::HasVtsCallbacks(MethodTable *pMT, RemotingVtsInfo::VtsCallbackType eCallbackType)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ while (pMT)
+ {
+ if (pMT->HasRemotingVtsInfo())
+ {
+ PTR_RemotingVtsInfo pVtsInfo = pMT->GetRemotingVtsInfo();
+ _ASSERTE(pVtsInfo != NULL);
+
+ if (!pVtsInfo->m_pCallbacks[eCallbackType].IsNull())
+ return TRUE;
+ }
+ pMT = pMT->GetParentMethodTable();
+ }
+
+ return FALSE;
+}
+
+// Calls all of the VTS event methods for a given callback type on the object instance provided (starting at the base class).
+void ObjectClone::InvokeVtsCallbacks(OBJECTREF refTarget, RemotingVtsInfo::VtsCallbackType eCallbackType, AppDomain* pDomain)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ GCPROTECT_BEGIN(refTarget);
+
+ // Quickly walk the target's type hierarchy and determine the number of methods we'll need to call.
+ DWORD cMethods = 0;
+ MethodDesc *pLastCallback;
+ MethodTable *pMT = refTarget->GetMethodTable();
+ while (pMT)
+ {
+ if (pMT->HasRemotingVtsInfo())
+ {
+ PTR_RemotingVtsInfo pVtsInfo = pMT->GetRemotingVtsInfo();
+ _ASSERTE(pVtsInfo != NULL);
+
+ if (!pVtsInfo->m_pCallbacks[eCallbackType].IsNull())
+ {
+ cMethods++;
+
+#ifdef FEATURE_PREJIT
+ // Might have to restore cross module method pointers.
+ Module::RestoreMethodDescPointer(&pVtsInfo->m_pCallbacks[eCallbackType]);
+#endif
+
+ pLastCallback = pVtsInfo->m_pCallbacks[eCallbackType].GetValue();
+ }
+ }
+ pMT = pMT->GetParentMethodTable();
+ }
+
+ // Maybe there's no work to do.
+ if (cMethods == 0)
+ goto Done;
+
+ // Allocate an array to hold the methods to invoke (we do this because the invocation order is the opposite way round from the
+ // way we can easily scan for the methods). We can easily optimize this for the single callback case though.
+ MethodDesc **pCallbacks = cMethods == 1 ? &pLastCallback : (MethodDesc**)_alloca(cMethods * sizeof(MethodDesc*));
+
+ if (cMethods > 1)
+ {
+ // Walk the type hierarchy again, and this time fill in the methods to call in the correct slot of our callback table.
+ DWORD dwSlotIndex = cMethods;
+ pMT = refTarget->GetMethodTable();
+ while (pMT)
+ {
+ if (pMT->HasRemotingVtsInfo())
+ {
+ PTR_RemotingVtsInfo pVtsInfo = pMT->GetRemotingVtsInfo();
+ _ASSERTE(pVtsInfo != NULL);
+
+ if (!pVtsInfo->m_pCallbacks[eCallbackType].IsNull())
+ pCallbacks[--dwSlotIndex] = pVtsInfo->m_pCallbacks[eCallbackType].GetValue();
+ }
+ pMT = pMT->GetParentMethodTable();
+ }
+ _ASSERTE(dwSlotIndex == 0);
+ }
+
+ bool fSwitchDomains = pDomain != GetAppDomain();
+
+ ENTER_DOMAIN_PTR(pDomain,ADV_RUNNINGIN);
+
+ // If we're calling back into the from domain then reset the execution context to its original state (this will automatically be
+ // popped once we return from this domain again).
+ if (pDomain == m_fromDomain && fSwitchDomains)
+ {
+ Thread *pThread = GetThread();
+ if (pThread->IsExposedObjectSet())
+ {
+ THREADBASEREF refThread = (THREADBASEREF)pThread->GetExposedObjectRaw();
+ refThread->SetExecutionContext(m_fromExecutionContext);
+ }
+ }
+
+ // Remember to adjust this pointer for boxed value types.
+ BOOL bIsBoxed = refTarget->GetMethodTable()->IsValueType();
+
+ RuntimeMethodHandle::StreamingContextData sContext = { NULL, GetStreamingContextState() };
+
+ // Ensure Streamingcontext type is loaded. Do not delete this line
+ MethodTable *pMTStreamingContext;
+ pMTStreamingContext = MscorlibBinder::GetClass(CLASS__STREAMING_CONTEXT);
+ _ASSERTE(pMTStreamingContext);
+
+ // Now go and call each method in order.
+ for (DWORD i = 0; i < cMethods; i++)
+ {
+ MethodDescCallSite callback(pCallbacks[i], &refTarget);
+
+ ARG_SLOT argSlots[2];
+
+ // Nullable<T> does not have any VTS functions
+ _ASSERTE(!Nullable::IsNullableType(refTarget->GetMethodTable()));
+
+ argSlots[0] = (bIsBoxed ? (ARG_SLOT)(SIZE_T)(refTarget->UnBox()) : ObjToArgSlot(refTarget));
+#if defined(_TARGET_X86_) || defined(_TARGET_ARM_)
+ static_assert_no_msg(sizeof(sContext) == sizeof(ARG_SLOT));
+ argSlots[1] = *(ARG_SLOT*)(&sContext); // StreamingContext is passed by value on x86 and ARM
+#elif defined(_WIN64)
+ static_assert_no_msg(sizeof(sContext) > sizeof(ARG_SLOT));
+ argSlots[1] = PtrToArgSlot(&sContext); // StreamingContext is passed by reference on WIN64
+#else // !_TARGET_X86_ && !_WIN64 && !_TARGET_ARM_
+ PORTABILITY_ASSERT("ObjectClone::InvokeVtsCallbacks() - NYI on this platform");
+#endif // !_TARGET_X86_ && !_WIN64 && !_TARGET_ARM_
+
+ callback.CallWithValueTypes(&argSlots[0]);
+ }
+
+ END_DOMAIN_TRANSITION;
+
+Done: ;
+ GCPROTECT_END();
+}
+
+#endif // FEATURE_REMOTING
diff --git a/src/vm/objectclone.h b/src/vm/objectclone.h
new file mode 100644
index 0000000000..26e10555ce
--- /dev/null
+++ b/src/vm/objectclone.h
@@ -0,0 +1,1269 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: ObjectClone.h
+//
+
+//
+
+
+#ifndef _OBJECTCLONE_H_
+#define _OBJECTCLONE_H_
+
+#ifndef FEATURE_REMOTING
+#error FEATURE_REMOTING is not set, please do no include objectclone.h
+#endif
+
+#include "invokeutil.h"
+#include "runtimehandles.h"
+
+enum QueueType
+{
+ FIFO,
+ LIFO
+};
+
+enum ObjectProperties
+{
+ enum_Array = 0x01,
+ enum_NeedsUnboxing = 0x02,
+ enum_ISerializableMember = 0x04, // This is set on member of an ISerializable instance
+ enum_Iserializable = 0x08, // This is set on an ISerializable instance
+ enum_IObjRef = 0x10, // This is set on an IObjRef instance
+};
+
+// This is the base class of all the different records that get
+// stored in different tables during cloning
+class QueuedObjectInfo
+{
+protected:
+ BYTE m_properties;
+public:
+ QueuedObjectInfo() { LIMITED_METHOD_CONTRACT; m_properties = 0; }
+ BOOL IsArray() { LIMITED_METHOD_CONTRACT; return m_properties & enum_Array; }
+ BOOL NeedsUnboxing() { LIMITED_METHOD_CONTRACT; return m_properties & enum_NeedsUnboxing; }
+ void SetIsArray() { LIMITED_METHOD_CONTRACT; m_properties |= enum_Array; }
+ void SetNeedsUnboxing() { LIMITED_METHOD_CONTRACT; m_properties |= enum_NeedsUnboxing; }
+ BOOL IsISerializableMember() { LIMITED_METHOD_CONTRACT; return m_properties & enum_ISerializableMember; }
+ void SetIsISerializableMember() { LIMITED_METHOD_CONTRACT; m_properties |= enum_ISerializableMember; }
+ BOOL IsISerializableInstance() { LIMITED_METHOD_CONTRACT; return m_properties & enum_Iserializable; }
+ void SetIsISerializableInstance() { LIMITED_METHOD_CONTRACT; m_properties |= enum_Iserializable; }
+ BOOL IsIObjRefInstance() { LIMITED_METHOD_CONTRACT; return m_properties & enum_IObjRef; }
+ void SetIsIObjRefInstance() { LIMITED_METHOD_CONTRACT; m_properties |= enum_IObjRef; }
+ virtual DWORD GetSize()
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ DWORD size = sizeof(QueuedObjectInfo);
+#if defined(_WIN64) || defined(ALIGN_ACCESS)
+ size = (DWORD)ALIGN_UP(size, sizeof(SIZE_T));
+#endif // _WIN64 || ALIGN_ACCESS
+ return size;
+ }
+};
+
+// These are records in QOF. Represents a parent object which has at least one member to
+// be marshalled and fixed up.
+class ParentInfo : public QueuedObjectInfo
+{
+ DWORD m_fixupCount;
+ DWORD m_numSpecialMembers;
+ DWORD m_IserIndexInTSOTable;
+ DWORD m_IObjRefIndexInTSOTable;
+ DWORD m_BoxedValIndexIntoTSOTable;
+public:
+ ParentInfo(DWORD count)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_fixupCount = count;
+ m_numSpecialMembers = 0;
+ m_IserIndexInTSOTable = (DWORD) -1;
+ m_IObjRefIndexInTSOTable = (DWORD) -1;
+ m_BoxedValIndexIntoTSOTable = (DWORD) -1;
+ }
+ DWORD DecrementFixupCount() { LIMITED_METHOD_CONTRACT; return --m_fixupCount; }
+ DWORD GetNumSpecialMembers() { LIMITED_METHOD_CONTRACT; return m_numSpecialMembers; }
+ DWORD IncrementSpecialMembers() { LIMITED_METHOD_CONTRACT; return ++m_numSpecialMembers; }
+ DWORD GetISerIndexIntoTSO() { LIMITED_METHOD_CONTRACT; return m_IserIndexInTSOTable; }
+ void SetISerIndexIntoTSO(DWORD index) { LIMITED_METHOD_CONTRACT; m_IserIndexInTSOTable = index; }
+ DWORD GetIObjRefIndexIntoTSO() { LIMITED_METHOD_CONTRACT; return m_IObjRefIndexInTSOTable; }
+ void SetIObjRefIndexIntoTSO(DWORD index) { LIMITED_METHOD_CONTRACT; m_IObjRefIndexInTSOTable = index; }
+ DWORD GetBoxedValIndexIntoTSO() { LIMITED_METHOD_CONTRACT; return m_BoxedValIndexIntoTSOTable; }
+ void SetBoxedValIndexIntoTSO(DWORD index) { LIMITED_METHOD_CONTRACT; m_BoxedValIndexIntoTSOTable = index; }
+ virtual DWORD GetSize()
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ DWORD size = sizeof(ParentInfo);
+#if defined(_WIN64) || defined(ALIGN_ACCESS)
+ size = (DWORD)ALIGN_UP(size, sizeof(SIZE_T));
+#endif // _WIN64 || ALIGN_ACCESS
+ return size;
+ }
+};
+
+// Represents an object whose parent is a regular object (not an array, not ISerializable etc)
+// Contains enough information to fix this object into its parent
+class ObjectMemberInfo : public QueuedObjectInfo
+{
+ FieldDesc *m_fieldDesc;
+public:
+ ObjectMemberInfo(FieldDesc *field) { LIMITED_METHOD_CONTRACT; m_fieldDesc = field; }
+ FieldDesc *GetFieldDesc() { LIMITED_METHOD_CONTRACT; return m_fieldDesc; }
+ VOID SetFieldDesc(FieldDesc* field) { LIMITED_METHOD_CONTRACT; m_fieldDesc = field; }
+ virtual DWORD GetSize()
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ DWORD size = sizeof(ObjectMemberInfo);
+#if defined(_WIN64) || defined(ALIGN_ACCESS)
+ size = (DWORD)ALIGN_UP(size, sizeof(SIZE_T));
+#endif // _WIN64 || ALIGN_ACCESS
+ return size;
+ }
+};
+
+// Represents an object whose parent is an array
+// Contains index information to fix this object into its parent
+class NDimArrayMemberInfo : public QueuedObjectInfo
+{
+ DWORD m_numDimensions;
+ DWORD m_index[0];
+public:
+ NDimArrayMemberInfo(DWORD rank)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_numDimensions = rank;
+ SetIsArray();
+ }
+ virtual DWORD GetSize()
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ DWORD size = sizeof(NDimArrayMemberInfo) + (sizeof(DWORD) * (m_numDimensions));
+#if defined(_WIN64) || defined(ALIGN_ACCESS)
+ size = (DWORD)ALIGN_UP(size, sizeof(SIZE_T));
+#endif // _WIN64 || ALIGN_ACCESS
+ return size;
+ }
+ DWORD *GetIndices()
+ { LIMITED_METHOD_CONTRACT; return &m_index[0]; }
+ void SetIndices(DWORD* indices)
+ {
+ LIMITED_METHOD_CONTRACT;
+ memcpy(GetIndices(), indices, GetNumDimensions() * sizeof(DWORD));
+ }
+ DWORD GetNumDimensions()
+ { LIMITED_METHOD_CONTRACT; return m_numDimensions; }
+ void SetNumDimensions(DWORD rank)
+ { LIMITED_METHOD_CONTRACT; m_numDimensions = rank; }
+};
+
+// Represents an object whose parent is an ISerializable object
+// Contains index information to fix this object into its parent
+class ISerializableMemberInfo : public QueuedObjectInfo
+{
+ DWORD m_TIOIndex;
+ DWORD m_fieldIndex;
+public:
+ ISerializableMemberInfo(DWORD tableIndex, DWORD fieldIndex)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_TIOIndex = tableIndex;
+ m_fieldIndex = fieldIndex;
+ SetIsISerializableMember();
+ }
+ DWORD GetTableIndex()
+ { LIMITED_METHOD_CONTRACT; return m_TIOIndex; }
+ DWORD GetFieldIndex()
+ { LIMITED_METHOD_CONTRACT; STATIC_CONTRACT_SO_TOLERANT; return m_fieldIndex; }
+ virtual DWORD GetSize()
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ DWORD size = sizeof(ISerializableMemberInfo);
+#if defined(_WIN64) || defined(ALIGN_ACCESS)
+ size = (DWORD)ALIGN_UP(size, sizeof(SIZE_T));
+#endif // _WIN64 || ALIGN_ACCESS
+ return size;
+ }
+};
+
+// Represents a special object (ISerializable, Boxed value type, IObjectReference)
+// Entries in TSO are of this type
+class SpecialObjectInfo : public QueuedObjectInfo
+{
+protected:
+ DWORD m_specialObjectProperties;
+ int m_objectId;
+ DWORD m_numSpecialMembers;
+ DWORD m_mappingTableIndex;
+public:
+ SpecialObjectInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_specialObjectProperties = 0;
+ m_mappingTableIndex = 0;
+ m_numSpecialMembers = 0;
+ m_objectId = 0;
+ }
+ void SetHasBeenProcessed() { LIMITED_METHOD_CONTRACT; m_specialObjectProperties |= 0x01; }
+ DWORD HasBeenProcessed() { LIMITED_METHOD_CONTRACT; return m_specialObjectProperties & 0x01; }
+ void SetHasFixupInfo() { LIMITED_METHOD_CONTRACT; m_specialObjectProperties |= 0x02; }
+ DWORD HasFixupInfo() { LIMITED_METHOD_CONTRACT; return m_specialObjectProperties & 0x02; }
+ void SetIsRepeatObject() { LIMITED_METHOD_CONTRACT; m_specialObjectProperties |= 0x04; }
+ DWORD IsRepeatObject() { LIMITED_METHOD_CONTRACT; return m_specialObjectProperties & 0x04; }
+ void SetIsBoxedObject() { LIMITED_METHOD_CONTRACT; m_specialObjectProperties |= 0x08; }
+ DWORD IsBoxedObject() { LIMITED_METHOD_CONTRACT; return m_specialObjectProperties & 0x08; }
+ void SetTargetNotISerializable() { LIMITED_METHOD_CONTRACT; m_specialObjectProperties |= 0x10; }
+ DWORD IsTargetNotISerializable() { LIMITED_METHOD_CONTRACT; return m_specialObjectProperties & 0x10; }
+
+ void SetMappingTableIndex(DWORD index) { LIMITED_METHOD_CONTRACT; m_mappingTableIndex = index; }
+ DWORD GetMappingTableIndex() { LIMITED_METHOD_CONTRACT; return m_mappingTableIndex; }
+ DWORD GetNumSpecialMembers() { LIMITED_METHOD_CONTRACT; return m_numSpecialMembers; }
+ void SetNumSpecialMembers(DWORD numSpecialMembers) { LIMITED_METHOD_CONTRACT; m_numSpecialMembers = numSpecialMembers;}
+ void SetObjectId(int id) { LIMITED_METHOD_CONTRACT; m_objectId = id; }
+ int GetObjectId() { LIMITED_METHOD_CONTRACT; return m_objectId; }
+};
+
+// Represents a special object (ISerializable)
+// Contains the number of IObjRef members it has
+class ISerializableInstanceInfo : public SpecialObjectInfo
+{
+public:
+ ISerializableInstanceInfo(int objectId, DWORD numIObjRefMembers)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_numSpecialMembers = numIObjRefMembers;
+ m_objectId = objectId;
+ SetIsISerializableInstance();
+ }
+ virtual DWORD GetSize()
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ DWORD size = sizeof(ISerializableInstanceInfo);
+#if defined(_WIN64) || defined(ALIGN_ACCESS)
+ size = (DWORD)ALIGN_UP(size, sizeof(SIZE_T));
+#endif // _WIN64 || ALIGN_ACCESS
+ return size;
+ }
+};
+
+// Represents a special object (IObjectReference)
+// Contains fixup information to fix the completed object into its parent
+class IObjRefInstanceInfo : public SpecialObjectInfo
+{
+ DWORD m_ISerTSOIndex; // If this is also an Iserializable instance, index of the iser entry in TSO
+#if defined(_WIN64) || defined(ALIGN_ACCESS)
+ DWORD m_padding;
+#endif // _WIN64 || ALIGN_ACCESS
+ BYTE m_fixupData[0];
+public:
+ IObjRefInstanceInfo(int objectId, DWORD numIObjRefMembers, DWORD numISerMembers)
+ {
+ WRAPPER_NO_CONTRACT;
+ static_assert_no_msg((offsetof(IObjRefInstanceInfo, m_fixupData) % sizeof(SIZE_T)) == 0);
+ m_numSpecialMembers = numIObjRefMembers + numISerMembers;
+ m_ISerTSOIndex = (DWORD) -1;
+ m_objectId = objectId;
+ SetIsIObjRefInstance();
+ }
+ DWORD GetISerTSOIndex() {LIMITED_METHOD_CONTRACT; return m_ISerTSOIndex; }
+ void SetISerTSOIndex(DWORD index)
+ { LIMITED_METHOD_CONTRACT; m_ISerTSOIndex = index; }
+ void SetFixupInfo(QueuedObjectInfo *pData)
+ {
+ WRAPPER_NO_CONTRACT;
+ if (pData->GetSize() > 0)
+ {
+ SetHasFixupInfo();
+ memcpy(m_fixupData, pData, pData->GetSize());
+ }
+ }
+ QueuedObjectInfo *GetFixupInfo()
+ {
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return (HasFixupInfo() ? (QueuedObjectInfo *)&m_fixupData[0] : NULL);
+ }
+ virtual DWORD GetSize()
+ {
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ DWORD size = sizeof(IObjRefInstanceInfo) + (HasFixupInfo() ? ((QueuedObjectInfo *)&m_fixupData[0])->GetSize() : 0);
+#if defined(_WIN64) || defined(ALIGN_ACCESS)
+ size = (DWORD)ALIGN_UP(size, sizeof(SIZE_T));
+#endif // _WIN64 || ALIGN_ACCESS
+ return size;
+ }
+};
+
+// Represents a special object (Boxed value type)
+// Contains fixup information to fix the completed object into its parent
+class ValueTypeInfo : public SpecialObjectInfo
+{
+protected:
+ DWORD m_ISerTSOIndex; // If this is also an Iserializable instance, index of the iser entry in TSO
+ DWORD m_IObjRefTSOIndex; // If this is also an IObjRef instance, index of the iser entry in TSO
+ BYTE m_fixupData[0];
+public:
+ ValueTypeInfo(int objectId, QueuedObjectInfo *pFixupInfo)
+ {
+ WRAPPER_NO_CONTRACT;
+ static_assert_no_msg((offsetof(ValueTypeInfo, m_fixupData) % sizeof(SIZE_T)) == 0);
+ m_ISerTSOIndex = (DWORD) -1;
+ m_IObjRefTSOIndex = (DWORD) -1;
+ m_objectId = objectId;
+ SetNeedsUnboxing();
+ SetIsBoxedObject();
+ SetFixupInfo(pFixupInfo);
+ }
+ DWORD GetISerTSOIndex() {LIMITED_METHOD_CONTRACT; return m_ISerTSOIndex; }
+ void SetISerTSOIndex(DWORD index)
+ { LIMITED_METHOD_CONTRACT; m_ISerTSOIndex = index; }
+ DWORD GetIObjRefTSOIndex() {LIMITED_METHOD_CONTRACT; return m_IObjRefTSOIndex; }
+ void SetIObjRefTSOIndex(DWORD index)
+ { LIMITED_METHOD_CONTRACT; m_IObjRefTSOIndex = index; }
+ void SetFixupInfo(QueuedObjectInfo *pData)
+ {
+ WRAPPER_NO_CONTRACT;
+ if (pData->GetSize() > 0)
+ {
+ SetHasFixupInfo();
+ memcpy(m_fixupData, pData, pData->GetSize());
+ }
+ }
+ QueuedObjectInfo *GetFixupInfo()
+ {
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return (HasFixupInfo() ? (QueuedObjectInfo *)&m_fixupData[0] : NULL);
+ }
+ virtual DWORD GetSize()
+ {
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ DWORD size = sizeof(ValueTypeInfo) + (HasFixupInfo() ? ((QueuedObjectInfo *)&m_fixupData[0])->GetSize() : 0);
+#if defined(_WIN64) || defined(ALIGN_ACCESS)
+ size = (DWORD)ALIGN_UP(size, sizeof(SIZE_T));
+#endif // _WIN64 || ALIGN_ACCESS
+ return size;
+ }
+};
+
+// Threshold beyond which the collections switch to using the heap
+// STACK_TO_HEAP_THRESHOLD/NUM_SLOTS_PER_BUCKET must be 1 or a prime, because
+// it is used in GCSafeObjectHashTable as the number of hash table buckets.
+#ifdef _DEBUG
+#define STACK_TO_HEAP_THRESHOLD 5
+#define QOM_STACK_TO_HEAP_THRESHOLD 5
+#define QOF_STACK_TO_HEAP_THRESHOLD 5
+#define TSO_STACK_TO_HEAP_THRESHOLD 5
+#define TDC_STACK_TO_HEAP_THRESHOLD 5
+#define VSC_STACK_TO_HEAP_THRESHOLD 5
+#define VDC_STACK_TO_HEAP_THRESHOLD 5
+#else
+#define STACK_TO_HEAP_THRESHOLD (NUM_SLOTS_PER_BUCKET * 29)
+#define QOM_STACK_TO_HEAP_THRESHOLD 100
+#define QOF_STACK_TO_HEAP_THRESHOLD 16
+#define TSO_STACK_TO_HEAP_THRESHOLD 8
+#define TDC_STACK_TO_HEAP_THRESHOLD 8
+#define VSC_STACK_TO_HEAP_THRESHOLD 8
+#define VDC_STACK_TO_HEAP_THRESHOLD 8
+#endif
+
+#define NUM_SLOTS_PER_BUCKET 4
+
+#define MAGIC_FACTOR 12
+
+#define LIFO_QUEUE 1
+#define FIFO_QUEUE 2
+
+
+class GCSafeCollection
+{
+ VPTR_BASE_VTABLE_CLASS(GCSafeCollection)
+protected:
+ // AppDomain object leak protection: pointer to predicate which flips to false once we should stop reporting GC references.
+ PTR_BOOL m_pfReportRefs;
+
+public:
+ GCSafeCollection(){}
+ virtual void Cleanup() = 0;
+ virtual void ReportGCRefs(promote_func *fn, ScanContext* sc) = 0;
+};
+
+typedef VPTR(GCSafeCollection) PTR_GCSafeCollection;
+
+class GCSafeObjectTable : public GCSafeCollection
+{
+ VPTR_VTABLE_CLASS(GCSafeObjectTable, GCSafeCollection);
+protected:
+
+ PTR_OBJECTREF m_Objects1;
+ PTR_OBJECTREF m_Objects2;
+ PTR_OBJECTREF m_Objects3;
+
+ PTR_DWORD m_dataIndices;
+ PTR_BYTE m_data;
+
+ DWORD m_currArraySize;
+ // Objects
+ DWORD m_count;
+ DWORD m_head;
+ // Data
+ DWORD m_numDataBytes;
+ DWORD m_dataHead;
+
+ // LIFO/FIFO
+ DWORD m_QueueType;
+ BOOL m_usingHeap;
+
+ BOOL m_fCleanedUp;
+
+#ifndef DACCESS_COMPILE
+ void EnsureSize(DWORD requiredDataSize);
+ void Resize();
+#endif
+
+public:
+#ifndef DACCESS_COMPILE
+ void Init(OBJECTREF *ref1, OBJECTREF *ref2, OBJECTREF *ref3, DWORD *dwIndices, BYTE *bData, DWORD currArraySize, DWORD qType, BOOL *pfReportRefs)
+ {
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_INTOLERANT;
+ m_Objects1 = ref1;
+ m_Objects2 = ref2;
+ m_Objects3 = ref3;
+ m_dataIndices = dwIndices;
+ m_data = bData;
+ m_QueueType = qType;
+ m_currArraySize = currArraySize;
+ m_usingHeap = FALSE;
+ m_count = 0;
+ m_head = 0;
+ m_numDataBytes = 0;
+ m_dataHead = 0;
+ _ASSERTE(m_QueueType == LIFO_QUEUE || m_QueueType == FIFO_QUEUE);
+ // If this is a lifo queue, then the data indices are definitely needed
+ _ASSERTE(m_QueueType != LIFO_QUEUE || m_dataIndices != NULL);
+ m_pfReportRefs = pfReportRefs;
+ m_fCleanedUp = FALSE;
+#ifdef USE_CHECKED_OBJECTREFS
+ ZeroMemory(m_Objects1, sizeof(OBJECTREF) * m_currArraySize);
+ if (m_Objects2 != NULL)
+ ZeroMemory(m_Objects2, sizeof(OBJECTREF) * m_currArraySize);
+ if (m_Objects3 != NULL)
+ ZeroMemory(m_Objects3, sizeof(OBJECTREF) * m_currArraySize);
+ for(DWORD i = 0; i < m_currArraySize; i++)
+ {
+ Thread::ObjectRefProtected(&m_Objects1[i]);
+ if (m_Objects2)
+ Thread::ObjectRefProtected(&m_Objects2[i]);
+ if (m_Objects3)
+ Thread::ObjectRefProtected(&m_Objects3[i]);
+ }
+#endif
+ }
+
+ void Init(OBJECTREF *ref1, BYTE *bData, DWORD currArraySize, DWORD qType, BOOL *pfReportRefs)
+ {
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+ m_Objects1 = ref1;
+ m_Objects2 = NULL;
+ m_Objects3 = NULL;
+ m_dataIndices = NULL;
+ m_data = bData;
+ m_QueueType = qType;
+ m_currArraySize = currArraySize;
+ m_usingHeap = FALSE;
+ m_count = 0;
+ m_head = 0;
+ m_numDataBytes = 0;
+ m_dataHead = 0;
+ _ASSERTE(m_QueueType == LIFO_QUEUE || m_QueueType == FIFO_QUEUE);
+ // If this is a lifo queue, then the data indices are definitely needed
+ _ASSERTE(m_QueueType != LIFO_QUEUE || m_dataIndices != NULL);
+ m_pfReportRefs = pfReportRefs;
+ m_fCleanedUp = FALSE;
+#ifdef USE_CHECKED_OBJECTREFS
+ ZeroMemory(m_Objects1, sizeof(OBJECTREF) * m_currArraySize);
+ if (m_Objects2 != NULL)
+ ZeroMemory(m_Objects2, sizeof(OBJECTREF) * m_currArraySize);
+ if (m_Objects3 != NULL)
+ ZeroMemory(m_Objects3, sizeof(OBJECTREF) * m_currArraySize);
+ for(DWORD i = 0; i < m_currArraySize; i++)
+ {
+ Thread::ObjectRefProtected(&m_Objects1[i]);
+ if (m_Objects2)
+ Thread::ObjectRefProtected(&m_Objects2[i]);
+ if (m_Objects3)
+ Thread::ObjectRefProtected(&m_Objects3[i]);
+ }
+#endif
+ }
+
+#endif // !DACCESS_COMPILE
+
+ virtual void Cleanup()
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+#ifndef DACCESS_COMPILE
+ // Set this first, we must disable GC reporting of our objects before we start ripping down the data structures that record
+ // those objects. See ReportGCRefs for a more detailed explanation.
+ m_fCleanedUp = TRUE;
+
+ if (m_usingHeap == TRUE)
+ {
+ if (m_Objects1)
+ delete[] m_Objects1;
+ m_Objects1 = NULL;
+ if (m_Objects2)
+ delete[] m_Objects2;
+ m_Objects2 = NULL;
+ if (m_Objects3)
+ delete[] m_Objects3;
+ m_Objects3 = NULL;
+ if (m_data)
+ delete[] m_data;
+ m_data = NULL;
+ if (m_dataIndices)
+ delete[] m_dataIndices;
+ m_dataIndices = NULL;
+ }
+
+#endif // !DACCESS_COMPILE
+ }
+
+ virtual void ReportGCRefs(promote_func *fn, ScanContext* sc)
+ {
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ // Due to the wacky way that frames are cleaned up (they're popped en masse by exception handling code in finally blocks)
+ // it's possible that this object may be destructed before the custom GC frame is popped. If there's a GC in the timing
+ // window then we end up here with the object destructed. It happens that the underlying storage (the stack) is still valid
+ // since the exception handling code that's doing all this unwinding hasn't actually physically unwound the stack yet, but
+ // the destructor has been run and may have left the object in an inconsistent state. To solve this in a really obvious
+ // manner (less likely to be broken by a random change in the future) we keep a boolean that's set to true once the object
+ // has been destructed (actually, once Cleanup has been called, because that's the destructive part). We don't need to
+ // report anything beyond this point (and to do so would be dangerous given the state of the collection).
+ if (m_fCleanedUp)
+ return;
+
+ // We track a predicate (actually embedded in the cloner which owns us) that tells us whether to report any GC refs at all.
+ // This is used as a rip cord if the server appdomain is unloaded while we're processing in it (because this collection can
+ // survive for a little while after that which is long enough to cause server objects to outlive their domain).
+ if (!*m_pfReportRefs)
+ {
+ m_count = 0;
+ if (m_Objects1)
+ ZeroMemory(m_Objects1, m_currArraySize * sizeof(OBJECTREF));
+ if (m_Objects2)
+ ZeroMemory(m_Objects2, m_currArraySize * sizeof(OBJECTREF));
+ if (m_Objects3)
+ ZeroMemory(m_Objects3, m_currArraySize * sizeof(OBJECTREF));
+ return;
+ }
+
+ PTR_PTR_Object pRefs1 = dac_cast<PTR_PTR_Object>(m_Objects1);
+ PTR_PTR_Object pRefs2 = dac_cast<PTR_PTR_Object>(m_Objects2);
+ PTR_PTR_Object pRefs3 = dac_cast<PTR_PTR_Object>(m_Objects3);
+
+ if (m_QueueType == LIFO_QUEUE)
+ {
+ for (DWORD i = 0; i < m_count; i++)
+ {
+ _ASSERTE(i < m_currArraySize);
+ if (m_Objects1)
+ (*fn)(pRefs1 + i, sc, 0);
+ if (m_Objects2)
+ (*fn)(pRefs2 + i, sc, 0);
+ if (m_Objects3)
+ (*fn)(pRefs3 + i, sc, 0);
+ }
+ }
+ else
+ {
+ for (DWORD i = m_head, count = 0; count < m_count; i++, count++)
+ {
+ i = i % m_currArraySize;
+ if (m_Objects1)
+ (*fn)(pRefs1 + i, sc, 0);
+ if (m_Objects2)
+ (*fn)(pRefs2 + i, sc, 0);
+ if (m_Objects3)
+ (*fn)(pRefs3 + i, sc, 0);
+ }
+ }
+ }
+
+#ifndef DACCESS_COMPILE
+ void Enqueue(OBJECTREF refObj, OBJECTREF refParent, OBJECTREF refAux, QueuedObjectInfo *pQOI);
+ void Push(OBJECTREF refObj, OBJECTREF refParent, OBJECTREF refAux, QueuedObjectInfo *pQOI);
+ void SetAt(DWORD index, OBJECTREF refObj, OBJECTREF refParent, OBJECTREF refAux, QueuedObjectInfo *pQOI);
+ OBJECTREF Dequeue(OBJECTREF *refParent, OBJECTREF *refAux, QueuedObjectInfo **pQOI);
+ OBJECTREF Pop(OBJECTREF *refParent, OBJECTREF *refAux, QueuedObjectInfo **pQOI);
+ OBJECTREF GetAt(DWORD index, OBJECTREF *refParent, OBJECTREF *refAux, QueuedObjectInfo **pQOI);
+ OBJECTREF Peek(OBJECTREF *refParent, OBJECTREF *refAux, QueuedObjectInfo **pQOI);
+ void BeginEnumeration(DWORD *dwIndex)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (m_QueueType == LIFO_QUEUE)
+ *dwIndex = m_count;
+ else
+ *dwIndex = 0;
+ }
+
+ OBJECTREF GetNext(DWORD *dwIndex, OBJECTREF *refParent, OBJECTREF *refAux, QueuedObjectInfo **pQOI)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF refRet = NULL;
+ if (m_QueueType == LIFO_QUEUE)
+ {
+ if (*dwIndex == 0)
+ return NULL;
+
+ (*dwIndex)--;
+ refRet = GetAt(*dwIndex, refParent, refAux, pQOI);
+ }
+ else
+ {
+ if (*dwIndex == m_count)
+ return NULL;
+
+ refRet = GetAt(*dwIndex, refParent, refAux, pQOI);
+ (*dwIndex)++;
+ }
+ return refRet;
+ }
+
+ DWORD GetCount() { LIMITED_METHOD_CONTRACT; return m_count; }
+#endif // !DACCESS_COMPILE
+};
+
+#ifndef DACCESS_COMPILE
+class DwordArrayList
+{
+ DWORD m_dwordsOnStack[STACK_TO_HEAP_THRESHOLD];
+ DWORD *m_dwords;
+
+ DWORD m_count;
+ DWORD m_currSize;
+public:
+
+ void Init()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_dwords = &m_dwordsOnStack[0];
+ m_currSize = STACK_TO_HEAP_THRESHOLD;
+ m_count = 0;
+ }
+
+ void Add(DWORD i)
+ {
+ WRAPPER_NO_CONTRACT;
+ EnsureSize();
+ m_dwords[m_count++] = i;
+ }
+
+ void EnsureSize()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ STATIC_CONTRACT_SO_INTOLERANT;
+ }
+ CONTRACTL_END
+ if (m_count < m_currSize)
+ return;
+
+ DWORD newSize = m_currSize * 2;
+ // Does not need a holder because this is the only allocation in this method
+ DWORD *pTemp = new DWORD[newSize];
+ ZeroMemory(pTemp, sizeof(DWORD) * newSize);
+
+ memcpy((BYTE*)pTemp, m_dwords, sizeof(DWORD) * m_currSize);
+ if (m_dwords != &m_dwordsOnStack[0])
+ {
+ delete[] m_dwords;
+ }
+ m_dwords = pTemp;
+ m_count = m_currSize;
+ m_currSize = newSize;
+ }
+
+ void Cleanup()
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ if (m_dwords != &m_dwordsOnStack[0])
+ {
+ delete[] m_dwords;
+ m_dwords = &m_dwordsOnStack[0];
+ }
+ }
+
+ DWORD GetCount() { LIMITED_METHOD_CONTRACT; return m_count; }
+ DWORD GetAt(DWORD index)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(index >= 0 && index < m_count);
+ return m_dwords[index];
+ }
+};
+#endif // !DACCESS_COMPILE
+
+class GCSafeObjectHashTable : public GCSafeCollection
+{
+ VPTR_VTABLE_CLASS(GCSafeObjectHashTable, GCSafeCollection);
+private:
+ OBJECTREF m_objectsOnStack[STACK_TO_HEAP_THRESHOLD];
+ OBJECTREF m_newObjectsOnStack[STACK_TO_HEAP_THRESHOLD];
+ DWORD m_dataOnStack[STACK_TO_HEAP_THRESHOLD];
+ DWORD m_count;
+ DWORD m_currArraySize;
+ PTR_int m_ids;
+ PTR_OBJECTREF m_objects;
+ PTR_OBJECTREF m_newObjects;
+ BOOL m_fCleanedUp;
+
+#ifndef DACCESS_COMPILE
+ void Resize();
+ int FindElement(OBJECTREF refObj, BOOL &seenBefore);
+#endif // !DACCESS_COMPILE
+
+public:
+
+#ifndef DACCESS_COMPILE
+ virtual void Init(BOOL *pfReportRefs)
+ {
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ ZeroMemory(&m_objectsOnStack[0], sizeof(m_objectsOnStack));
+ ZeroMemory(&m_dataOnStack[0], sizeof(m_dataOnStack));
+ m_objects = &m_objectsOnStack[0];
+ m_newObjects = &m_newObjectsOnStack[0];
+ m_currArraySize = STACK_TO_HEAP_THRESHOLD;
+ m_count = 0;
+ m_ids = (int *) &m_dataOnStack[0];
+ m_pfReportRefs = pfReportRefs;
+ m_fCleanedUp = FALSE;
+#ifdef USE_CHECKED_OBJECTREFS
+ ZeroMemory(&m_newObjects[0], sizeof(m_newObjectsOnStack));
+ for(DWORD i = 0; i < m_currArraySize; i++)
+ {
+ Thread::ObjectRefProtected(&m_objects[i]);
+ Thread::ObjectRefProtected(&m_newObjects[i]);
+ }
+#endif
+ }
+#endif // !DACCESS_COMPILE
+
+ virtual void Cleanup()
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+#ifndef DACCESS_COMPILE
+ // Set this first, we must disable GC reporting of our objects before we start ripping down the data structures that record
+ // those objects. See ReportGCRefs for a more detailed explanation.
+ m_fCleanedUp = TRUE;
+
+ if (m_newObjects != &m_newObjectsOnStack[0])
+ {
+ delete[] m_ids;
+ m_ids = (int *) &m_dataOnStack[0];
+ delete[] m_newObjects;
+ m_newObjects = &m_newObjectsOnStack[0];
+ delete[] m_objects;
+ m_objects = &m_objectsOnStack[0];
+ m_currArraySize = STACK_TO_HEAP_THRESHOLD;
+ }
+#endif // !DACCESS_COMPILE
+ }
+
+ virtual void ReportGCRefs(promote_func *fn, ScanContext* sc)
+ {
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ // Due to the wacky way that frames are cleaned up (they're popped en masse by exception handling code in finally blocks)
+ // it's possible that this object may be destructed before the custom GC frame is popped. If there's a GC in the timing
+ // window then we end up here with the object destructed. It happens that the underlying storage (the stack) is still valid
+ // since the exception handling code that's doing all this unwinding hasn't actually physically unwound the stack yet, but
+ // the destructor has been run and may have left the object in an inconsistent state. To solve this in a really obvious
+ // manner (less likely to be broken by a random change in the future) we keep a boolean that's set to true once the object
+ // has been destructed (actually, once Cleanup has been called, because that's the destructive part). We don't need to
+ // report anything beyond this point (and to do so would be dangerous given the state of the collection).
+ if (m_fCleanedUp)
+ return;
+
+ // We track a predicate (actually embedded in the cloner which owns us) that tells us whether to report any GC refs at all.
+ // This is used as a rip cord if the server appdomain is unloaded while we're processing in it (because this collection can
+ // survive for a little while after that which is long enough to cause server objects to outlive their domain).
+ if (!*m_pfReportRefs)
+ {
+ m_count = 0;
+ ZeroMemory(m_ids, m_currArraySize * sizeof(int));
+ ZeroMemory(m_objects, m_currArraySize * sizeof(OBJECTREF));
+ ZeroMemory(m_newObjects, m_currArraySize * sizeof(OBJECTREF));
+ return;
+ }
+
+ PTR_PTR_Object pRefs = dac_cast<PTR_PTR_Object>(m_objects);
+ PTR_PTR_Object pNewRefs = dac_cast<PTR_PTR_Object>(m_newObjects);
+
+ for (DWORD i = 0; i < m_currArraySize; i++)
+ {
+ if (m_ids[i] != 0)
+ {
+ (*fn)(pRefs + i, sc, 0);
+ (*fn)(pNewRefs + i, sc, 0);
+ }
+ }
+ }
+
+#ifndef DACCESS_COMPILE
+ int HasID(OBJECTREF refObj, OBJECTREF *newObj);
+ int AddObject(OBJECTREF refObj, OBJECTREF newObj);
+ int UpdateObject(OBJECTREF refObj, OBJECTREF newObj);
+#endif // !DACCESS_COMPILE
+};
+
+#ifndef DACCESS_COMPILE
+
+enum SpecialObjects
+{
+ ISerializable = 1,
+ IObjectReference,
+ BoxedValueType
+};
+
+enum CloningContext
+{
+ CrossAppDomain = 1,
+ ObjectFreezer
+};
+
+class CrossAppDomainClonerCallback
+{
+ public:
+ OBJECTREF AllocateObject(OBJECTREF, MethodTable * pMT)
+ {
+ WRAPPER_NO_CONTRACT;
+ return pMT->Allocate();
+ }
+
+ OBJECTREF AllocateArray(OBJECTREF, TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, BOOL bAllocateInLargeHeap)
+ {
+ WRAPPER_NO_CONTRACT;
+ return ::AllocateArrayEx(arrayType, pArgs, dwNumArgs, bAllocateInLargeHeap DEBUG_ARG(FALSE));
+ }
+
+ STRINGREF AllocateString(STRINGREF refSrc)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return refSrc;
+ }
+
+ void ValidateFromType(MethodTable *pFromMT)
+ {
+ WRAPPER_NO_CONTRACT;
+ CheckSerializable(pFromMT);
+ }
+
+ void ValidateToType(MethodTable *pToMT)
+ {
+ WRAPPER_NO_CONTRACT;
+ CheckSerializable(pToMT);
+ }
+
+ BOOL IsRemotedType(MethodTable *pMT, AppDomain* pFromAD, AppDomain* pToDomain)
+ {
+ WRAPPER_NO_CONTRACT;
+ if ((pMT->IsMarshaledByRef() && pFromAD != pToDomain) ||
+ pMT->IsTransparentProxy())
+ return TRUE;
+
+ return FALSE;
+ }
+
+ BOOL IsISerializableType(MethodTable *pMT)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ return pMT->CanCastToNonVariantInterface(MscorlibBinder::GetClass(CLASS__ISERIALIZABLE));
+ }
+
+ BOOL IsIObjectReferenceType(MethodTable *pMT)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ return pMT->CanCastToNonVariantInterface(MscorlibBinder::GetClass(CLASS__IOBJECTREFERENCE));
+ }
+
+ BOOL RequiresDeserializationCallback(MethodTable *pMT)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ return pMT->CanCastToNonVariantInterface(MscorlibBinder::GetClass(CLASS__IDESERIALIZATIONCB));
+ }
+
+ BOOL RequiresDeepCopy(OBJECTREF refObj)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return TRUE;
+ }
+
+ private:
+ void CheckSerializable(MethodTable *pCurrMT)
+ {
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ // Checking whether the type is marked as Serializable is not enough, all of its ancestor types must be marked this way
+ // also. THe only exception is that any type that also implements ISerializable doesn't require a serializable parent.
+ if (pCurrMT->IsSerializable())
+ {
+ MethodTable *pISerializableMT = MscorlibBinder::GetClass(CLASS__ISERIALIZABLE);
+ MethodTable *pMT = pCurrMT;
+ for (;;)
+ {
+ // We've already checked this particular type is marked Serializable, so if it implements ISerializable then
+ // we're done.
+ if (pMT->ImplementsInterface(pISerializableMT))
+ return;
+
+ // Else we get the parent type and check it is marked Serializable as well.
+ pMT = pMT->GetParentMethodTable();
+
+ // If we've run out of parents we're done and the type is serializable.
+ if (pMT == NULL)
+ return;
+
+ // Otherwise check for the attribute.
+ if (!pMT->IsSerializable())
+ break;
+ }
+ }
+
+ if (pCurrMT->IsMarshaledByRef())
+ return;
+
+ if (pCurrMT->IsTransparentProxy())
+ return;
+
+ if (pCurrMT->IsEnum())
+ return;
+
+ if (pCurrMT->IsDelegate())
+ return;
+
+ DefineFullyQualifiedNameForClassW();
+ LPCWSTR wszCliTypeName = GetFullyQualifiedNameForClassNestedAwareW(pCurrMT);
+
+ SString ssAssemblyName;
+ pCurrMT->GetAssembly()->GetDisplayName(ssAssemblyName);
+ COMPlusThrow(kSerializationException, IDS_SERIALIZATION_NONSERTYPE, wszCliTypeName, ssAssemblyName.GetUnicode());
+ }
+};
+
+class CrossDomainFieldMap
+{
+ struct FieldMapEntry
+ {
+ ADID m_dwSrcDomain;
+ ADID m_dwDstDomain;
+ MethodTable *m_pSrcMT;
+ MethodTable *m_pDstMT;
+ FieldDesc **m_pFieldMap;
+
+ FieldMapEntry(MethodTable *pSrcMT, MethodTable *pDstMT, FieldDesc **pFieldMap);
+ ~FieldMapEntry()
+ {
+ LIMITED_METHOD_CONTRACT;
+ delete [] m_pFieldMap;
+ }
+
+ UPTR GetHash()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (UINT)(SIZE_T)m_pSrcMT + ((UINT)(SIZE_T)m_pDstMT >> 2);
+ }
+ };
+
+ static PtrHashMap *s_pFieldMap;
+ static SimpleRWLock *s_pFieldMapLock;
+
+ static BOOL CompareFieldMapEntry(UPTR val1, UPTR val2);
+
+public:
+ static void FlushStaleEntries();
+ static FieldDesc **LookupOrCreateFieldMapping(MethodTable *pDstMT, MethodTable *pSrcMT);
+};
+
+// Currently the object cloner uses DWORDs as indices. We may have to use QWORDs instead
+// if we start to have extremely large object graphs.
+class ObjectClone
+{
+ OBJECTREF m_QOMObjects[QOM_STACK_TO_HEAP_THRESHOLD];
+ BYTE m_QOMData[QOM_STACK_TO_HEAP_THRESHOLD * MAGIC_FACTOR];
+
+ OBJECTREF m_QOFObjects[QOF_STACK_TO_HEAP_THRESHOLD];
+ BYTE m_QOFData[QOF_STACK_TO_HEAP_THRESHOLD * MAGIC_FACTOR];
+
+ OBJECTREF m_TSOObjects1[TSO_STACK_TO_HEAP_THRESHOLD];
+ OBJECTREF m_TSOObjects2[TSO_STACK_TO_HEAP_THRESHOLD];
+ OBJECTREF m_TSOObjects3[TSO_STACK_TO_HEAP_THRESHOLD];
+ BYTE m_TSOData[TSO_STACK_TO_HEAP_THRESHOLD * MAGIC_FACTOR];
+ DWORD m_TSOIndices[TSO_STACK_TO_HEAP_THRESHOLD];
+
+ OBJECTREF m_TDCObjects[TDC_STACK_TO_HEAP_THRESHOLD];
+ BYTE m_TDCData[TDC_STACK_TO_HEAP_THRESHOLD * MAGIC_FACTOR];
+
+ OBJECTREF m_VSCObjects[VSC_STACK_TO_HEAP_THRESHOLD];
+
+ OBJECTREF m_VDCObjects[VDC_STACK_TO_HEAP_THRESHOLD];
+
+ GCSafeObjectTable QOM; // Queue_of_Object_to_be_Marshalled
+ GCSafeObjectTable QOF; // Queue_of_Objects_to_be_Fixed_Up
+ GCSafeObjectHashTable TOS; // Table_of_Objects_Seen
+ GCSafeObjectTable TSO; // Table_of_Special_Objects
+ GCSafeObjectTable TDC; // Table_of_Deserialization_Callbacks
+ GCSafeObjectTable VSC; // Vts_Serialization_Callbacks
+ GCSafeObjectTable VDC; // Vts_Deserialization_Callbacks
+ DwordArrayList TMappings;
+
+ FrameWithCookie<GCSafeCollectionFrame> QOM_Protector;
+ FrameWithCookie<GCSafeCollectionFrame> QOF_Protector;
+ FrameWithCookie<GCSafeCollectionFrame> TOS_Protector;
+ FrameWithCookie<GCSafeCollectionFrame> TSO_Protector;
+ FrameWithCookie<GCSafeCollectionFrame> TDC_Protector;
+ FrameWithCookie<GCSafeCollectionFrame> VSC_Protector;
+ FrameWithCookie<GCSafeCollectionFrame> VDC_Protector;
+
+ BOOL m_skipFieldScan;
+
+ AppDomain* m_fromDomain;
+ AppDomain* m_toDomain;
+
+ OBJECTREF m_currObject; // Updated within the loop in Clone method
+ OBJECTREF m_newObject; // Updated within the loop in Clone method
+ OBJECTREF m_topObject;
+ OBJECTREF m_fromExecutionContext; // Copy of the execution context on the way in (used during callbacks to the from domain)
+
+ BOOL m_securityChecked;
+
+ // AppDomain object leak protection: predicate which flips to false once we should stop reporting GC references in the
+ // collections this cloner owns.
+ BOOL m_fReportRefs;
+
+ CrossAppDomainClonerCallback *m_cbInterface;
+ CloningContext m_context;
+
+ PTRARRAYREF AllocateISerializable(int objectId, BOOL bIsRemotingObject);
+ void AllocateArray();
+ void AllocateObject();
+
+ PTRARRAYREF MakeObjectLookLikeISerializable(int objectId);
+
+ void HandleISerializableFixup(OBJECTREF refParent, QueuedObjectInfo *currObjFixupInfo);
+ void HandleArrayFixup(OBJECTREF refParent, QueuedObjectInfo *currObjFixupInfo);
+ void HandleObjectFixup(OBJECTREF refParent, QueuedObjectInfo *currObjFixupInfo);
+ void Fixup(OBJECTREF newObj, OBJECTREF refParent, QueuedObjectInfo *currObjFixupInfo);
+
+ void ScanMemberFields(DWORD IObjRefTSOIndex, DWORD BoxedValTSOIndex);
+ DWORD CloneField(FieldDesc *pSrcField, FieldDesc *pDstField);
+ static BOOL AreTypesEmittedIdentically(MethodTable *pMT1, MethodTable *pMT2);
+ void ScanISerializableMembers(DWORD IObjRefTSOIndex, DWORD ISerTSOIndex, DWORD BoxedValTSOIndex, PTRARRAYREF refValues);
+ void ScanArrayMembers();
+ Object *GetObjectFromArray(BASEARRAYREF* arrObj, DWORD dwOffset);
+
+ void CompleteValueTypeFields(OBJECTREF newObj, OBJECTREF refParent, QueuedObjectInfo *valTypeInfo);
+ void CompleteSpecialObjects();
+ void CompleteISerializableObject(OBJECTREF IserObj, OBJECTREF refNames, OBJECTREF refValues, ISerializableInstanceInfo *);
+ BOOL CompleteIObjRefObject(OBJECTREF IObjRef, DWORD index, IObjRefInstanceInfo *iorInfo);
+ void CompleteIDeserializationCallbacks();
+ void CompleteVtsOnDeserializedCallbacks();
+ void CompleteVtsOnSerializedCallbacks();
+ BOOL CheckForUnresolvedMembers(SpecialObjectInfo *splInfo);
+
+ TypeHandle GetCorrespondingTypeForTargetDomain(TypeHandle thCli);
+ MethodTable * GetCorrespondingTypeForTargetDomain(MethodTable * pCliMT);
+ TypeHandle GetType(const SString &ssTypeName, const SString &ssAssemName);
+
+ DWORD FindObjectInTSO(int objId, SpecialObjects kind);
+ ARG_SLOT HandleFieldTypeMismatch(CorElementType srcTy, CorElementType destTy, void *pData, MethodTable *pSrcMT);
+ BOOL IsDelayedFixup(MethodTable *newMT, QueuedObjectInfo *);
+ OBJECTREF BoxValueTypeInWrongDomain(OBJECTREF refParent, DWORD offset, MethodTable *pValueTypeMT);
+
+ BOOL HasVtsCallbacks(MethodTable *pMT, RemotingVtsInfo::VtsCallbackType eCallbackType);
+ void InvokeVtsCallbacks(OBJECTREF refTarget, RemotingVtsInfo::VtsCallbackType eCallbackType, AppDomain* pDomain);
+
+ RuntimeMethodHandle::StreamingContextStates GetStreamingContextState()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_context == CrossAppDomain)
+ return RuntimeMethodHandle::CONTEXTSTATE_CrossAppDomain;
+
+ if (m_context == ObjectFreezer)
+ return RuntimeMethodHandle::CONTEXTSTATE_Other;
+
+ _ASSERTE(!"Should not get here; using the cloner with a context we don't understand");
+ return RuntimeMethodHandle::CONTEXTSTATE_Other;
+ }
+public:
+
+ void Init(BOOL bInitialInit)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (bInitialInit)
+ {
+ TOS.Init(&m_fReportRefs);
+ TSO.Init(&m_TSOObjects1[0], &m_TSOObjects2[0], &m_TSOObjects3[0], &m_TSOIndices[0], &m_TSOData[0], TSO_STACK_TO_HEAP_THRESHOLD, LIFO_QUEUE, &m_fReportRefs);
+ }
+ QOM.Init(&m_QOMObjects[0], &m_QOMData[0], QOM_STACK_TO_HEAP_THRESHOLD, FIFO_QUEUE, &m_fReportRefs);
+ QOF.Init(&m_QOFObjects[0], &m_QOFData[0], QOF_STACK_TO_HEAP_THRESHOLD, FIFO_QUEUE, &m_fReportRefs);
+ TDC.Init(&m_TDCObjects[0], &m_TDCData[0], TDC_STACK_TO_HEAP_THRESHOLD, FIFO_QUEUE, &m_fReportRefs);
+ VSC.Init(&m_VSCObjects[0], NULL, VSC_STACK_TO_HEAP_THRESHOLD, FIFO_QUEUE, &m_fReportRefs);
+ VDC.Init(&m_VDCObjects[0], NULL, VDC_STACK_TO_HEAP_THRESHOLD, FIFO_QUEUE, &m_fReportRefs);
+ TMappings.Init();
+ }
+ void Cleanup(BOOL bFinalCleanup)
+ {
+ WRAPPER_NO_CONTRACT;
+ if (bFinalCleanup)
+ {
+ TOS.Cleanup();
+ TSO.Cleanup();
+ }
+
+ QOM.Cleanup();
+ QOF.Cleanup();
+ TDC.Cleanup();
+ VSC.Cleanup();
+ VDC.Cleanup();
+ TMappings.Cleanup();
+ }
+
+ ObjectClone(CrossAppDomainClonerCallback *cbInterface, CloningContext cc=CrossAppDomain, BOOL bNeedSecurityCheck = TRUE)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ static_assert_no_msg((offsetof(ObjectClone, m_QOMObjects) % sizeof(SIZE_T)) == 0);
+ static_assert_no_msg((offsetof(ObjectClone, m_QOFObjects) % sizeof(SIZE_T)) == 0);
+ static_assert_no_msg((offsetof(ObjectClone, m_TSOData) % sizeof(SIZE_T)) == 0);
+ static_assert_no_msg((offsetof(ObjectClone, m_TDCData) % sizeof(SIZE_T)) == 0);
+ static_assert_no_msg((offsetof(ObjectClone, m_VSCObjects) % sizeof(SIZE_T)) == 0);
+ static_assert_no_msg((offsetof(ObjectClone, m_VDCObjects) % sizeof(SIZE_T)) == 0);
+
+ m_securityChecked = !bNeedSecurityCheck;
+ m_context = cc;
+ m_cbInterface = cbInterface;
+ m_fReportRefs = true;
+
+ Init(TRUE);
+
+ // Order of these is important. The frame lowest on the stack (ie declared last inside ObjectClone) has to be pushed first
+ (void)new (VDC_Protector.GetGSCookiePtr()) FrameWithCookie<GCSafeCollectionFrame>(&VDC);
+ (void)new (VSC_Protector.GetGSCookiePtr()) FrameWithCookie<GCSafeCollectionFrame>(&VSC);
+ (void)new (TDC_Protector.GetGSCookiePtr()) FrameWithCookie<GCSafeCollectionFrame>(&TDC);
+ (void)new (TSO_Protector.GetGSCookiePtr()) FrameWithCookie<GCSafeCollectionFrame>(&TSO);
+ (void)new (TOS_Protector.GetGSCookiePtr()) FrameWithCookie<GCSafeCollectionFrame>(&TOS);
+ (void)new (QOF_Protector.GetGSCookiePtr()) FrameWithCookie<GCSafeCollectionFrame>(&QOF);
+ (void)new (QOM_Protector.GetGSCookiePtr()) FrameWithCookie<GCSafeCollectionFrame>(&QOM);
+ }
+
+ void RemoveGCFrames()
+ {
+ LIMITED_METHOD_CONTRACT;
+ // Order of these is important. The frame highest on the stack has to be pushed first
+ QOM_Protector.Pop();
+ QOF_Protector.Pop();
+ TOS_Protector.Pop();
+ TSO_Protector.Pop();
+ TDC_Protector.Pop();
+ VSC_Protector.Pop();
+ VDC_Protector.Pop();
+ }
+
+ ~ObjectClone()
+ {
+ WRAPPER_NO_CONTRACT;
+ Cleanup(TRUE);
+ }
+
+ OBJECTREF Clone(OBJECTREF refObj, AppDomain* fromDomain, AppDomain* toDomain, OBJECTREF refExecutionContext)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ TypeHandle thDummy;
+ OBJECTREF refResult = Clone(refObj, thDummy, fromDomain, toDomain, refExecutionContext);
+ return refResult;
+ }
+
+ OBJECTREF Clone(OBJECTREF refObj,
+ TypeHandle expectedType,
+ AppDomain *fromDomain,
+ AppDomain *toDomain,
+ OBJECTREF refExecutionContext);
+
+ static void StopReportingRefs(ObjectClone *pThis)
+ {
+ pThis->m_fReportRefs = false;
+ }
+};
+
+typedef Holder<ObjectClone *, DoNothing<ObjectClone*>, ObjectClone::StopReportingRefs> ReportClonerRefsHolder;
+
+#endif
+#endif
diff --git a/src/vm/objecthandle.h b/src/vm/objecthandle.h
new file mode 100644
index 0000000000..3d1c5fac50
--- /dev/null
+++ b/src/vm/objecthandle.h
@@ -0,0 +1,6 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+#include "../gc/objecthandle.h"
diff --git a/src/vm/objectlist.cpp b/src/vm/objectlist.cpp
new file mode 100644
index 0000000000..4ee3342a63
--- /dev/null
+++ b/src/vm/objectlist.cpp
@@ -0,0 +1,210 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "common.h"
+#include "objectlist.h"
+
+#ifndef DACCESS_COMPILE
+
+ObjectList::ObjectList( void )
+: freeIndexHead_( INVALID_COMPRESSEDSTACK_INDEX ),
+ listLock_( CrstObjectList, CrstFlags(CRST_UNSAFE_SAMELEVEL | CRST_UNSAFE_ANYMODE) )
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+}
+
+#endif
+
+#define MAX_LOOP 2
+
+DWORD
+ObjectList::AddToList( PVOID ptr )
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(ptr));
+ } CONTRACTL_END;
+
+
+ // sanity check that the pointer low bit is not set
+ _ASSERTE( (((DWORD)(size_t)ptr & 0x1) == 0) && "Invalid pointer" );
+
+ DWORD retval = INVALID_COMPRESSEDSTACK_INDEX;
+
+ CrstHolder ch( &listLock_ );
+
+ // If there is an entry in the free list, simply use it.
+
+ if (this->freeIndexHead_ != INVALID_COMPRESSEDSTACK_INDEX)
+ {
+ _ASSERTE( this->listLock_.OwnedByCurrentThread() );
+
+ // grab the head of the list
+ retval = (this->freeIndexHead_ >> 1);
+
+ DWORD nextFreeIndex = (DWORD)(size_t)this->allEntries_.Get( retval );
+
+ // index in use, pointer values have low bit as 0
+ _ASSERTE( ((nextFreeIndex & 0x01) == 1) && "The free list points to an index that is in use" );
+ // update the head of the list with the next free index stored in the array list
+ this->freeIndexHead_ = nextFreeIndex;
+
+ // store the pointer
+ this->allEntries_.Set( retval, ptr);
+ }
+ // Otherwise we place this new entry at that end of the list.
+ else
+ {
+ _ASSERTE( this->listLock_.OwnedByCurrentThread() );
+ retval = this->allEntries_.GetCount();
+ IfFailThrow(this->allEntries_.Append(ptr));
+ }
+
+ _ASSERTE( retval != INVALID_COMPRESSEDSTACK_INDEX );
+
+ return retval;
+}
+
+void
+ObjectList::RemoveFromList( PVOID ptr )
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(ptr));
+ } CONTRACTL_END;
+
+ // sanity check that the pointer low bit is not set
+ _ASSERTE( (((DWORD)(size_t)ptr & 0x1) == 0) && "Invalid pointer" );
+
+ DWORD index = INVALID_COMPRESSEDSTACK_INDEX;
+
+ CrstHolder ch( &listLock_ );
+
+ ObjectList::Iterator iter = Iterate();
+
+ while (iter.Next())
+ {
+ if (iter.GetElement() == ptr)
+ {
+ index = iter.GetIndex();
+ break;
+ }
+ }
+
+ if (index == INVALID_COMPRESSEDSTACK_INDEX)
+ {
+ _ASSERTE( FALSE && "Unable to find object" );
+ }
+ else
+ {
+ // add the index to the free list ( shift the freeIndex left and set the low bit)
+ this->allEntries_.Set( index, (PVOID)(size_t)(this->freeIndexHead_));
+ this-> freeIndexHead_ = ((index<<1) | 0x1);
+ }
+}
+
+
+
+void
+ObjectList::RemoveFromList( DWORD index, PVOID ptr )
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(ptr));
+ } CONTRACTL_END;
+
+ CrstHolder ch( &listLock_ );
+
+ // sanity check that the pointer low bit is not set
+ _ASSERTE( (((DWORD)(size_t)ptr & 0x1) == 0) && "Invalid pointer" );
+
+ _ASSERTE( index < this->allEntries_.GetCount() );
+ _ASSERTE( this->allEntries_.Get( index ) == ptr && "Index tracking failed for this object" );
+
+ // add the index to the free list ( shift the freeIndex left and set the low bit)
+ this->allEntries_.Set( index, (PVOID)(size_t)(this->freeIndexHead_));
+ this-> freeIndexHead_ = ((index<<1) | 0x1);
+
+}
+
+PVOID
+ObjectList::Get( DWORD index )
+{
+ LIMITED_METHOD_CONTRACT;
+ return this->allEntries_.Get( index );
+}
+
+
+UnsynchronizedBlockAllocator::UnsynchronizedBlockAllocator( size_t blockSize )
+: blockSize_( blockSize ),
+ offset_( blockSize ),
+ index_( INVALID_COMPRESSEDSTACK_INDEX )
+{
+ LIMITED_METHOD_CONTRACT;
+ // We start off the offset at the block size to force the first
+ // allocation to create a new (first) block
+}
+
+UnsynchronizedBlockAllocator::~UnsynchronizedBlockAllocator( void )
+{
+ LIMITED_METHOD_CONTRACT;
+ ArrayList::Iterator iter = this->blockList_.Iterate();
+
+ while (iter.Next())
+ {
+ delete [] (BYTE *) iter.GetElement();
+ }
+}
+
+
+PVOID
+UnsynchronizedBlockAllocator::Allocate( size_t size )
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ _ASSERTE( size <= this->blockSize_ );
+
+ NewHolder<BYTE> buffer;
+
+ S_SIZE_T sizecheck = S_SIZE_T(this->offset_) + S_SIZE_T(size) ;
+ if( sizecheck.IsOverflow() )
+ {
+ ThrowOutOfMemory();
+ }
+
+ if (sizecheck.Value() > this->blockSize_)
+ {
+ buffer.Assign( new BYTE[this->blockSize_] );
+ IfFailThrow(this->blockList_.Append( buffer ));
+ buffer.SuppressRelease();
+ ++this->index_;
+ this->offset_ = 0;
+ }
+ else
+ {
+ buffer.Assign( (BYTE*)this->blockList_.Get( index_ ) );
+ buffer.SuppressRelease();
+ }
+
+ void* retval = buffer.GetValue() + this->offset_;
+ this->offset_ += size;
+
+ return retval;
+}
diff --git a/src/vm/objectlist.h b/src/vm/objectlist.h
new file mode 100644
index 0000000000..8d296852d7
--- /dev/null
+++ b/src/vm/objectlist.h
@@ -0,0 +1,101 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#ifndef __objectlist_h__
+#define __objectlist_h__
+
+
+#include "arraylist.h"
+#include "holder.h"
+
+#define INVALID_COMPRESSEDSTACK_INDEX ((DWORD)-1)
+#ifdef _DEBUG
+#define FREE_LIST_SIZE 128
+#else
+#define FREE_LIST_SIZE 1024
+#endif
+
+
+
+class ObjectList
+{
+public:
+ class Iterator
+ {
+ friend class ObjectList;
+
+ protected:
+ ArrayList::Iterator _iter;
+
+ public:
+
+ PTR_VOID GetElement()
+ {
+ LIMITED_METHOD_CONTRACT;
+ PTR_VOID ptr = _iter.GetElement();
+ if (((DWORD)(size_t)(dac_cast<TADDR>(ptr)) & 0x1) == 0)
+ {
+ return ptr;
+ }
+ else
+ {
+ return NULL;
+ }
+ }
+
+ DWORD GetIndex()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _iter.GetIndex();
+ }
+
+ BOOL Next()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return _iter.Next();
+ }
+ };
+
+ ObjectList() DAC_EMPTY();
+
+ DWORD AddToList( PVOID ptr );
+ void RemoveFromList( PVOID ptr );
+ void RemoveFromList( DWORD index, PVOID ptr );
+ PVOID Get( DWORD index );
+
+ ObjectList::Iterator Iterate()
+ {
+ LIMITED_METHOD_CONTRACT;
+ ObjectList::Iterator i;
+ i._iter = this->allEntries_.Iterate();
+ return i;
+ }
+
+private:
+ ArrayList allEntries_;
+ DWORD freeIndexHead_;
+ Crst listLock_;
+};
+
+class UnsynchronizedBlockAllocator
+{
+public:
+ UnsynchronizedBlockAllocator( size_t blockSize );
+ ~UnsynchronizedBlockAllocator( void );
+
+ PVOID Allocate( size_t size );
+
+private:
+ ArrayList blockList_;
+
+ size_t blockSize_;
+ size_t offset_;
+ DWORD index_;
+
+};
+
+#endif // __objectlist_h__
diff --git a/src/vm/olecontexthelpers.cpp b/src/vm/olecontexthelpers.cpp
new file mode 100644
index 0000000000..72a29c2e6d
--- /dev/null
+++ b/src/vm/olecontexthelpers.cpp
@@ -0,0 +1,172 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "common.h"
+
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+#include "mtx.h"
+#include "oletls.h"
+#include "contxt.h"
+#include "ctxtcall.h"
+
+HRESULT GetCurrentObjCtx(IUnknown **ppObjCtx)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(ppObjCtx));
+#ifdef FEATURE_COMINTEROP
+ PRECONDITION(TRUE == g_fComStarted);
+#endif // FEATURE_COMINTEROP
+ }
+ CONTRACTL_END;
+
+ return CoGetObjectContext(IID_IUnknown, (void **)ppObjCtx);
+}
+
+//=====================================================================
+// LPVOID SetupOleContext()
+LPVOID SetupOleContext()
+{
+ CONTRACT (LPVOID)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ ENTRY_POINT;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ IUnknown* pObjCtx = NULL;
+
+ BEGIN_ENTRYPOINT_VOIDRET;
+
+#ifdef FEATURE_COMINTEROP
+ if (g_fComStarted)
+ {
+ HRESULT hr = GetCurrentObjCtx(&pObjCtx);
+ if (hr == S_OK)
+ {
+ SOleTlsData* _pData = (SOleTlsData *) ClrTeb::GetOleReservedPtr();
+ if (_pData && _pData->pCurrentCtx == NULL)
+ {
+ _pData->pCurrentCtx = (CObjectContext*)pObjCtx; // no release !!!!
+ }
+ else
+ {
+ // We can't call SafeRelease here since that would transition
+ // to preemptive GC mode which is bad since SetupOleContext is called
+ // from places where we can't take a GC.
+ ULONG cbRef = pObjCtx->Release();
+ }
+ }
+ }
+#endif // FEATURE_COMINTEROP
+
+ END_ENTRYPOINT_VOIDRET;
+
+ RETURN pObjCtx;
+}
+
+//================================================================
+// LPVOID GetCurrentCtxCookie()
+LPVOID GetCurrentCtxCookie()
+{
+ CONTRACT (LPVOID)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+#ifdef FEATURE_COMINTEROP
+ // check if com is started
+ if (!g_fComStarted)
+ RETURN NULL;
+#endif // FEATURE_COMINTEROP
+
+ ULONG_PTR ctxptr = 0;
+
+ if (CoGetContextToken(&ctxptr) != S_OK)
+ ctxptr = 0;
+
+ RETURN (LPVOID)ctxptr;
+}
+
+//+-------------------------------------------------------------------------
+//
+// HRESULT GetCurrentThreadTypeNT5(THDTYPE* pType)
+//
+HRESULT GetCurrentThreadTypeNT5(THDTYPE* pType)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pType));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_FAIL;
+
+ IObjectContext *pObjCurrCtx = (IObjectContext *)GetCurrentCtxCookie();
+ if(pObjCurrCtx)
+ {
+ GCX_PREEMP();
+
+ SafeComHolderPreemp<IComThreadingInfo> pThreadInfo;
+ hr = SafeQueryInterface(pObjCurrCtx, IID_IComThreadingInfo, (IUnknown **)&pThreadInfo);
+ if(hr == S_OK)
+ {
+ _ASSERTE(pThreadInfo);
+ hr = pThreadInfo->GetCurrentThreadType(pType);
+ }
+ }
+ return hr;
+}
+
+//+-------------------------------------------------------------------------
+//
+// HRESULT GetCurrentApartmentTypeNT5(APTTYPE* pType)
+//
+HRESULT GetCurrentApartmentTypeNT5(IObjectContext *pObjCurrCtx, APTTYPE* pType)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pType));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_FAIL;
+ if(pObjCurrCtx)
+ {
+ GCX_PREEMP();
+
+ SafeComHolderPreemp<IComThreadingInfo> pThreadInfo;
+ hr = SafeQueryInterface(pObjCurrCtx, IID_IComThreadingInfo, (IUnknown **)&pThreadInfo);
+ if(hr == S_OK)
+ {
+ _ASSERTE(pThreadInfo);
+ hr = pThreadInfo->GetCurrentApartmentType(pType);
+ }
+ }
+ return hr;
+}
+
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
diff --git a/src/vm/olecontexthelpers.h b/src/vm/olecontexthelpers.h
new file mode 100644
index 0000000000..8af53538fb
--- /dev/null
+++ b/src/vm/olecontexthelpers.h
@@ -0,0 +1,30 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// OleContextHelpers.h
+//
+
+//
+// Helper APIs for interacting with Ole32 contexts & apartments.
+
+#ifndef _H_OLECONTEXTHELPERS
+#define _H_OLECONTEXTHELPERS
+
+#ifndef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+#error FEATURE_COMINTEROP_APARTMENT_SUPPORT
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+#include "contxt.h"
+#include "mtx.h"
+#include "ctxtcall.h"
+
+//================================================================
+// OLE32 Context helpers.
+LPVOID GetCurrentCtxCookie();
+HRESULT GetCurrentObjCtx(IUnknown** ppObjCtx);
+LPVOID SetupOleContext();
+HRESULT GetCurrentThreadTypeNT5(THDTYPE* pType);
+HRESULT GetCurrentApartmentTypeNT5(IObjectContext *pObjCurrCtx, APTTYPE* pType);
+
+#endif // _H_OLECONTEXTHELPERS
diff --git a/src/vm/oletls.h b/src/vm/oletls.h
new file mode 100644
index 0000000000..41bb3e45b0
--- /dev/null
+++ b/src/vm/oletls.h
@@ -0,0 +1,211 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//+---------------------------------------------------------------------------
+//
+// File: oletls.h
+//
+
+//
+// Purpose: manage thread local storage for OLE
+//
+// Notes: The gTlsIndex is initialized at process attach time.
+// The per-thread data is allocated in CoInitialize in
+// single-threaded apartments or on first use in
+// multi-threaded apartments.
+//
+//----------------------------------------------------------------------------
+
+#ifndef _OLETLS_H_
+#define _OLETLS_H_
+
+#ifndef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+#error FEATURE_COMINTEROP_APARTMENT_SUPPORT is required for this file
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+//+---------------------------------------------------------------------------
+//
+// forward declarations (in order to avoid type casting when accessing
+// data members of the SOleTlsData structure).
+//
+//+---------------------------------------------------------------------------
+
+class CAptCallCtrl; // see callctrl.hxx
+class CSrvCallState; // see callctrl.hxx
+class CObjServer; // see sobjact.hxx
+class CSmAllocator; // see stg\h\smalloc.hxx
+class CMessageCall; // see call.hxx
+class CClientCall; // see call.hxx
+class CAsyncCall; // see call.hxx
+class CClipDataObject; // see ole232\clipbrd\clipdata.h
+class CSurrogatedObjectList; // see com\inc\comsrgt.hxx
+class CCtxCall; // see PSTable.hxx
+class CPolicySet; // see PSTable.hxx
+class CObjectContext; // see context.hxx
+class CComApartment; // see aprtmnt.hxx
+
+//+-------------------------------------------------------------------
+//
+// Struct: CallEntry
+//
+// Synopsis: Call Table Entry.
+//
+//+-------------------------------------------------------------------
+typedef struct tagCallEntry
+{
+ void *pNext; // ptr to next entry
+ void *pvObject; // Entry object
+} CallEntry;
+
+
+
+//+---------------------------------------------------------------------------
+//
+// Enum: OLETLSFLAGS
+//
+// Synopsys: bit values for dwFlags field of SOleTlsData. If you just want
+// to store a BOOL in TLS, use this enum and the dwFlag field.
+//
+//+---------------------------------------------------------------------------
+typedef enum tagOLETLSFLAGS
+{
+ OLETLS_LOCALTID = 0x01, // This TID is in the current process.
+ OLETLS_UUIDINITIALIZED = 0x02, // This Logical thread is init'd.
+ OLETLS_INTHREADDETACH = 0x04, // This is in thread detach. Needed
+ // due to NT's special thread detach
+ // rules.
+ OLETLS_CHANNELTHREADINITIALZED = 0x08,// This channel has been init'd
+ OLETLS_WOWTHREAD = 0x10, // This thread is a 16-bit WOW thread.
+ OLETLS_THREADUNINITIALIZING = 0x20, // This thread is in CoUninitialize.
+ OLETLS_DISABLE_OLE1DDE = 0x40, // This thread can't use a DDE window.
+ OLETLS_APARTMENTTHREADED = 0x80, // This is an STA apartment thread
+ OLETLS_MULTITHREADED = 0x100, // This is an MTA apartment thread
+ OLETLS_IMPERSONATING = 0x200, // This thread is impersonating
+ OLETLS_DISABLE_EVENTLOGGER = 0x400, // Prevent recursion in event logger
+ OLETLS_INNEUTRALAPT = 0x800, // This thread is in the NTA
+ OLETLS_DISPATCHTHREAD = 0x1000, // This is a dispatch thread
+ OLETLS_HOSTTHREAD = 0x2000, // This is a host thread
+ OLETLS_ALLOWCOINIT = 0x4000, // This thread allows inits
+ OLETLS_PENDINGUNINIT = 0x8000, // This thread has pending uninit
+ OLETLS_FIRSTMTAINIT = 0x10000,// First thread to attempt an MTA init
+ OLETLS_FIRSTNTAINIT = 0x20000,// First thread to attempt an NTA init
+ OLETLS_APTINITIALIZING = 0x40000 // Apartment Object is initializing
+} OLETLSFLAGS;
+
+
+//+---------------------------------------------------------------------------
+//
+// Structure: SOleTlsData
+//
+// Synopsis: structure holding per thread state needed by OLE32
+//
+//+---------------------------------------------------------------------------
+typedef struct tagSOleTlsData
+{
+#if !defined(_CHICAGO_)
+ // Docfile multiple allocator support
+ void *pvThreadBase; // per thread base pointer
+ CSmAllocator *pSmAllocator; // per thread docfile allocator
+#endif
+ DWORD dwApartmentID; // Per thread "process ID"
+ DWORD dwFlags; // see OLETLSFLAGS above
+
+ LONG TlsMapIndex; // index in the global TLSMap
+ void **ppTlsSlot; // Back pointer to the thread tls slot
+ DWORD cComInits; // number of per-thread inits
+ DWORD cOleInits; // number of per-thread OLE inits
+
+ DWORD cCalls; // number of outstanding calls
+ CMessageCall *pCallInfo; // channel call info
+ CAsyncCall *pFreeAsyncCall; // ptr to available call object for this thread.
+ CClientCall *pFreeClientCall; // ptr to available call object for this thread.
+
+ CObjServer *pObjServer; // Activation Server Object for this apartment.
+ DWORD dwTIDCaller; // TID of current calling app
+ CObjectContext *pCurrentCtx; // Current context
+ CObjectContext *pEmptyCtx; // Empty context
+
+ CObjectContext *pNativeCtx; // Native context
+ CComApartment *pNativeApt; // Native apartment for the thread.
+ IUnknown *pCallContext; // call context object
+ CCtxCall *pCtxCall; // Context call object
+
+ CPolicySet *pPS; // Policy set
+ PVOID pvPendingCallsFront;// Per Apt pending async calls
+ PVOID pvPendingCallsBack;
+ CAptCallCtrl *pCallCtrl; // call control for RPC for this apartment
+
+ CSrvCallState *pTopSCS; // top server-side callctrl state
+ IMessageFilter *pMsgFilter; // temp storage for App MsgFilter
+ HWND hwndSTA; // STA server window same as poxid->hServerSTA
+ // ...needed on Win95 before oxid registration
+ LONG cORPCNestingLevel; // call nesting level (DBG only)
+
+ DWORD cDebugData; // count of bytes of debug data in call
+ ULONG cPreRegOidsAvail; // count of server-side OIDs avail
+ unsigned hyper *pPreRegOids; // ptr to array of pre-reg OIDs
+
+ UUID LogicalThreadId; // current logical thread id
+
+ HANDLE hThread; // Thread handle used for cancel
+ HANDLE hRevert; // Token before first impersonate.
+ IUnknown *pAsyncRelease; // Controlling unknown for async release
+ // DDE data
+ HWND hwndDdeServer; // Per thread Common DDE server
+
+ HWND hwndDdeClient; // Per thread Common DDE client
+ ULONG cServeDdeObjects; // non-zero if objects DDE should serve
+ // ClassCache data
+ LPVOID pSTALSvrsFront; // Chain of LServers registers in this thread if STA
+ // upper layer data
+ HWND hwndClip; // Clipboard window
+
+ IDataObject *pDataObjClip; // Current Clipboard DataObject
+ DWORD dwClipSeqNum; // Clipboard Sequence # for the above DataObject
+ DWORD fIsClipWrapper; // Did we hand out the wrapper Clipboard DataObject?
+ IUnknown *punkState; // Per thread "state" object
+ // cancel data
+ DWORD cCallCancellation; // count of CoEnableCallCancellation
+ // async sends data
+ DWORD cAsyncSends; // count of async sends outstanding
+
+ CAsyncCall* pAsyncCallList; // async calls outstanding
+ CSurrogatedObjectList *pSurrogateList; // Objects in the surrogate
+
+ LockEntry lockEntry; // Locks currently held by the thread
+ CallEntry CallEntry; // client-side call chain for this thread
+
+#ifdef WX86OLE
+ IUnknown *punkStateWx86; // Per thread "state" object for Wx86
+#endif
+ void *pDragCursors; // Per thread drag cursor table.
+
+#ifdef _CHICAGO_
+ LPVOID pWcstokContext; // Scan context for wcstok
+#endif
+
+ IUnknown *punkError; // Per thread error object.
+ ULONG cbErrorData; // Maximum size of error data.
+
+#if(_WIN32_WINNT >= 0x0500)
+ IUnknown *punkActiveXSafetyProvider;
+#endif //(_WIN32_WINNT >= 0x0500)
+
+#if DBG==1
+ LONG cTraceNestingLevel; // call nesting level for OLETRACE
+#endif
+
+} SOleTlsData;
+
+#ifdef INITGUID
+#include "initguid.h"
+#endif
+
+#define DEFINE_OLEGUID(name, l, w1, w2) \
+ DEFINE_GUID(name, l, w1, w2, 0xC0,0,0,0,0,0,0,0x46)
+
+DEFINE_OLEGUID(IID_IStdIdentity, 0x0000001bL, 0, 0);
+DEFINE_OLEGUID(IID_IStdWrapper, 0x000001caL, 0, 0);
+
+#endif // _OLETLS_H_
diff --git a/src/vm/olevariant.cpp b/src/vm/olevariant.cpp
new file mode 100644
index 0000000000..0aa56e5908
--- /dev/null
+++ b/src/vm/olevariant.cpp
@@ -0,0 +1,5272 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: OleVariant.cpp
+//
+
+//
+
+
+#include "common.h"
+
+#include "object.h"
+#include "excep.h"
+#include "frames.h"
+#include "vars.hpp"
+#include "security.h"
+#include "olevariant.h"
+#include "comdatetime.h"
+#include "fieldmarshaler.h"
+#include "mdaassistants.h"
+
+/* ------------------------------------------------------------------------- *
+ * Local constants
+ * ------------------------------------------------------------------------- */
+
+#define NO_MAPPING ((BYTE) -1)
+
+#define GCPROTECT_BEGIN_VARIANTDATA(/*VARIANTDATA*/vd) do { \
+ FrameWithCookie<GCFrame> __gcframe(vd.GetObjRefPtr(), 1, FALSE); \
+ /* work around unreachable code warning */ \
+ if (true) { DEBUG_ASSURE_NO_RETURN_BEGIN(GCPROTECT);
+
+
+#define GCPROTECT_END_VARIANTDATA() \
+ DEBUG_ASSURE_NO_RETURN_END(GCPROTECT); } \
+ __gcframe.Pop(); } while(0)
+
+
+//Mapping from CVType to type handle. Used for conversion between the two internally.
+const BinderClassID CVTypeToBinderClassID[] =
+{
+ CLASS__EMPTY, //CV_EMPTY
+ CLASS__VOID, //CV_VOID, Changing this to object messes up signature resolution very badly.
+ CLASS__BOOLEAN, //CV_BOOLEAN
+ CLASS__CHAR, //CV_CHAR
+ CLASS__SBYTE, //CV_I1
+ CLASS__BYTE, //CV_U1
+ CLASS__INT16, //CV_I2
+ CLASS__UINT16, //CV_U2
+ CLASS__INT32, //CV_I4
+ CLASS__UINT32, //CV_UI4
+ CLASS__INT64, //CV_I8
+ CLASS__UINT64, //CV_UI8
+ CLASS__SINGLE, //CV_R4
+ CLASS__DOUBLE, //CV_R8
+ CLASS__STRING, //CV_STRING
+ CLASS__VOID, //CV_PTR...We treat this as void
+ CLASS__DATE_TIME, //CV_DATETIME
+ CLASS__TIMESPAN, //CV_TIMESPAN
+ CLASS__OBJECT, //CV_OBJECT
+ CLASS__DECIMAL, //CV_DECIMAL
+ CLASS__CURRENCY, //CV_CURRENCY
+ CLASS__OBJECT, //ENUM...We treat this as OBJECT
+ CLASS__MISSING, //CV_MISSING
+ CLASS__NULL, //CV_NULL
+ CLASS__NIL, //CV_LAST
+};
+
+// Use this very carefully. There is not a direct mapping between
+// CorElementType and CVTypes for a bunch of things. In this case
+// we return CV_LAST. You need to check this at the call site.
+CVTypes CorElementTypeToCVTypes(CorElementType type)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (type <= ELEMENT_TYPE_STRING)
+ return (CVTypes) type;
+
+ if (type == ELEMENT_TYPE_CLASS || type == ELEMENT_TYPE_OBJECT)
+ return (CVTypes) ELEMENT_TYPE_CLASS;
+
+ return CV_LAST;
+}
+
+/* ------------------------------------------------------------------------- *
+ * Mapping routines
+ * ------------------------------------------------------------------------- */
+
+VARTYPE OleVariant::GetVarTypeForCVType(CVTypes type)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ static const BYTE map[] =
+ {
+ VT_EMPTY, // CV_EMPTY
+ VT_VOID, // CV_VOID
+ VT_BOOL, // CV_BOOLEAN
+ VT_UI2, // CV_CHAR
+ VT_I1, // CV_I1
+ VT_UI1, // CV_U1
+ VT_I2, // CV_I2
+ VT_UI2, // CV_U2
+ VT_I4, // CV_I4
+ VT_UI4, // CV_U4
+ VT_I8, // CV_I8
+ VT_UI8, // CV_U8
+ VT_R4, // CV_R4
+ VT_R8, // CV_R8
+ VT_BSTR, // CV_STRING
+ NO_MAPPING, // CV_PTR
+ VT_DATE, // CV_DATETIME
+ NO_MAPPING, // CV_TIMESPAN
+ VT_DISPATCH, // CV_OBJECT
+ VT_DECIMAL, // CV_DECIMAL
+ VT_CY, // CV_CURRENCY
+ VT_I4, // CV_ENUM
+ VT_ERROR, // CV_MISSING
+ VT_NULL // CV_NULL
+ };
+
+ _ASSERTE(type < (CVTypes) (sizeof(map) / sizeof(map[0])));
+
+ VARTYPE vt = VARTYPE(map[type]);
+
+ if (vt == NO_MAPPING)
+ COMPlusThrow(kArgumentException, IDS_EE_COM_UNSUPPORTED_SIG);
+
+ return vt;
+}
+
+//
+// GetCVTypeForVarType returns the COM+ variant type for a given
+// VARTYPE. This is called by the marshaller in the context of
+// a function call.
+//
+
+CVTypes OleVariant::GetCVTypeForVarType(VARTYPE vt)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ static const BYTE map[] =
+ {
+ CV_EMPTY, // VT_EMPTY
+ CV_NULL, // VT_NULL
+ CV_I2, // VT_I2
+ CV_I4, // VT_I4
+ CV_R4, // VT_R4
+ CV_R8, // VT_R8
+ CV_DECIMAL, // VT_CY
+ CV_DATETIME, // VT_DATE
+ CV_STRING, // VT_BSTR
+ CV_OBJECT, // VT_DISPATCH
+ CV_I4, // VT_ERROR
+ CV_BOOLEAN, // VT_BOOL
+ NO_MAPPING, // VT_VARIANT
+ CV_OBJECT, // VT_UNKNOWN
+ CV_DECIMAL, // VT_DECIMAL
+ NO_MAPPING, // unused
+ CV_I1, // VT_I1
+ CV_U1, // VT_UI1
+ CV_U2, // VT_UI2
+ CV_U4, // VT_UI4
+ CV_I8, // VT_I8
+ CV_U8, // VT_UI8
+ CV_I4, // VT_INT
+ CV_U4, // VT_UINT
+ CV_VOID, // VT_VOID
+ NO_MAPPING, // VT_HRESULT
+ NO_MAPPING, // VT_PTR
+ NO_MAPPING, // VT_SAFEARRAY
+ NO_MAPPING, // VT_CARRAY
+ NO_MAPPING, // VT_USERDEFINED
+ NO_MAPPING, // VT_LPSTR
+ NO_MAPPING, // VT_LPWSTR
+ NO_MAPPING, // unused
+ NO_MAPPING, // unused
+ NO_MAPPING, // unused
+ NO_MAPPING, // unused
+ CV_OBJECT, // VT_RECORD
+ };
+
+ CVTypes type = CV_LAST;
+
+ // Validate the arguments.
+ _ASSERTE((vt & VT_BYREF) == 0);
+
+ // Array's map to CV_OBJECT.
+ if (vt & VT_ARRAY)
+ return CV_OBJECT;
+
+ // This is prety much a workaround because you cannot cast a CorElementType into a CVTYPE
+ if (vt > VT_RECORD || (BYTE)(type = (CVTypes) map[vt]) == NO_MAPPING)
+ COMPlusThrow(kArgumentException, IDS_EE_COM_UNSUPPORTED_TYPE);
+
+ return type;
+} // CVTypes OleVariant::GetCVTypeForVarType()
+
+#ifdef FEATURE_COMINTEROP
+
+// GetVarTypeForComVariant retusn the VARTYPE for the contents
+// of a COM+ variant.
+//
+VARTYPE OleVariant::GetVarTypeForComVariant(VariantData *pComVariant)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ CVTypes type = pComVariant->GetType();
+ VARTYPE vt;
+
+ vt = pComVariant->GetVT();
+ if (vt != VT_EMPTY)
+ {
+ // This variant was originally unmarshaled from unmanaged, and had the original VT recorded in it.
+ // We'll always use that over inference.
+ return vt;
+ }
+
+ if (type == CV_OBJECT)
+ {
+ OBJECTREF obj = pComVariant->GetObjRef();
+
+ // Null objects will be converted to VT_DISPATCH variants with a null
+ // IDispatch pointer.
+ if (obj == NULL)
+ return VT_DISPATCH;
+
+ // Retrieve the object's method table.
+ MethodTable *pMT = obj->GetMethodTable();
+
+ // Handle the value class case.
+ if (pMT->IsValueType())
+ return VT_RECORD;
+
+ // Handle the array case.
+ if (pMT->IsArray())
+ {
+ vt = GetElementVarTypeForArrayRef((BASEARRAYREF)obj);
+ if (vt == VT_ARRAY)
+ vt = VT_VARIANT;
+
+ return vt | VT_ARRAY;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ // SafeHandle's or CriticalHandle's cannot be stored in VARIANT's.
+ if (pMT->CanCastToClass(MscorlibBinder::GetClass(CLASS__SAFE_HANDLE)))
+ COMPlusThrow(kArgumentException, IDS_EE_SH_IN_VARIANT_NOT_SUPPORTED);
+ if (pMT->CanCastToClass(MscorlibBinder::GetClass(CLASS__CRITICAL_HANDLE)))
+ COMPlusThrow(kArgumentException, IDS_EE_CH_IN_VARIANT_NOT_SUPPORTED);
+
+ // VariantWrappers cannot be stored in VARIANT's.
+ if (MscorlibBinder::IsClass(pMT, CLASS__VARIANT_WRAPPER))
+ COMPlusThrow(kArgumentException, IDS_EE_VAR_WRAP_IN_VAR_NOT_SUPPORTED);
+
+ // We are dealing with a normal object (not a wrapper) so we will
+ // leave the VT as VT_DISPATCH for now and we will determine the actual
+ // VT when we convert the object to a COM IP.
+ return VT_DISPATCH;
+#else // FEATURE_COMINTEROP
+ return VT_UNKNOWN;
+#endif // FEATURE_COMINTEROP
+ }
+
+ return GetVarTypeForCVType(type);
+}
+
+#endif // FEATURE_COMINTEROP
+
+VARTYPE OleVariant::GetVarTypeForTypeHandle(TypeHandle type)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Handle primitive types.
+ CorElementType elemType = type.GetSignatureCorElementType();
+ if (elemType <= ELEMENT_TYPE_R8)
+ return GetVarTypeForCVType(CorElementTypeToCVTypes(elemType));
+
+ // Handle objects.
+ if (!type.IsTypeDesc())
+ {
+ MethodTable * pMT = type.AsMethodTable();
+
+ if (pMT == g_pStringClass)
+ return VT_BSTR;
+ if (pMT == g_pObjectClass)
+ return VT_VARIANT;
+
+ // We need to make sure the CVClasses table is populated.
+ if(MscorlibBinder::IsClass(pMT, CLASS__DATE_TIME))
+ return VT_DATE;
+ if(MscorlibBinder::IsClass(pMT, CLASS__DECIMAL))
+ return VT_DECIMAL;
+
+#ifdef _WIN64
+ if (MscorlibBinder::IsClass(pMT, CLASS__INTPTR))
+ return VT_I8;
+ if (MscorlibBinder::IsClass(pMT, CLASS__UINTPTR))
+ return VT_UI8;
+#else
+ if (MscorlibBinder::IsClass(pMT, CLASS__INTPTR))
+ return VT_INT;
+ if (MscorlibBinder::IsClass(pMT, CLASS__UINTPTR))
+ return VT_UINT;
+#endif
+
+#ifdef FEATURE_COMINTEROP
+ if (MscorlibBinder::IsClass(pMT, CLASS__DISPATCH_WRAPPER))
+ return VT_DISPATCH;
+ if (MscorlibBinder::IsClass(pMT, CLASS__UNKNOWN_WRAPPER))
+ return VT_UNKNOWN;
+ if (MscorlibBinder::IsClass(pMT, CLASS__ERROR_WRAPPER))
+ return VT_ERROR;
+ if (MscorlibBinder::IsClass(pMT, CLASS__CURRENCY_WRAPPER))
+ return VT_CY;
+ if (MscorlibBinder::IsClass(pMT, CLASS__BSTR_WRAPPER))
+ return VT_BSTR;
+
+ // VariantWrappers cannot be stored in VARIANT's.
+ if (MscorlibBinder::IsClass(pMT, CLASS__VARIANT_WRAPPER))
+ COMPlusThrow(kArgumentException, IDS_EE_COM_UNSUPPORTED_SIG);
+#endif // FEATURE_COMINTEROP
+
+ if (pMT->IsEnum())
+ return GetVarTypeForCVType((CVTypes)type.GetInternalCorElementType());
+
+ if (pMT->IsValueType())
+ return VT_RECORD;
+
+#ifdef FEATURE_COMINTEROP
+ // There is no VT corresponding to SafeHandles as they cannot be stored in
+ // VARIANTs or Arrays. The same applies to CriticalHandle.
+ if (type.CanCastTo(TypeHandle(MscorlibBinder::GetClass(CLASS__SAFE_HANDLE))))
+ COMPlusThrow(kArgumentException, IDS_EE_COM_UNSUPPORTED_SIG);
+ if (type.CanCastTo(TypeHandle(MscorlibBinder::GetClass(CLASS__CRITICAL_HANDLE))))
+ COMPlusThrow(kArgumentException, IDS_EE_COM_UNSUPPORTED_SIG);
+
+ if (pMT->IsInterface())
+ {
+ CorIfaceAttr ifaceType = pMT->GetComInterfaceType();
+ return static_cast<VARTYPE>(IsDispatchBasedItf(ifaceType) ? VT_DISPATCH : VT_UNKNOWN);
+ }
+
+ TypeHandle hndDefItfClass;
+ DefaultInterfaceType DefItfType = GetDefaultInterfaceForClassWrapper(type, &hndDefItfClass);
+ switch (DefItfType)
+ {
+ case DefaultInterfaceType_Explicit:
+ {
+ CorIfaceAttr ifaceType = hndDefItfClass.GetMethodTable()->GetComInterfaceType();
+ return static_cast<VARTYPE>(IsDispatchBasedItf(ifaceType) ? VT_DISPATCH : VT_UNKNOWN);
+ }
+
+ case DefaultInterfaceType_AutoDual:
+ {
+ return VT_DISPATCH;
+ }
+
+ case DefaultInterfaceType_IUnknown:
+ case DefaultInterfaceType_BaseComClass:
+ {
+ return VT_UNKNOWN;
+ }
+
+ case DefaultInterfaceType_AutoDispatch:
+ {
+ return VT_DISPATCH;
+ }
+
+ default:
+ {
+ _ASSERTE(!"Invalid default interface type!");
+ }
+ }
+#endif // FEATURE_COMINTEROP
+
+ return VT_UNKNOWN;
+ }
+
+ // Handle array's.
+ if (!CorTypeInfo::IsArray(elemType))
+ {
+ // Non interop compatible type.
+ COMPlusThrow(kArgumentException, IDS_EE_COM_UNSUPPORTED_SIG);
+ }
+
+ return VT_ARRAY;
+}
+
+//
+// GetElementVarTypeForArrayRef returns the safearray variant type for the
+// underlying elements in the array.
+//
+
+VARTYPE OleVariant::GetElementVarTypeForArrayRef(BASEARRAYREF pArrayRef)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ TypeHandle elemTypeHnd = pArrayRef->GetArrayElementTypeHandle();
+ return(GetVarTypeForTypeHandle(elemTypeHnd));
+}
+
+#ifdef FEATURE_COMINTEROP
+
+BOOL OleVariant::IsValidArrayForSafeArrayElementType(BASEARRAYREF *pArrayRef, VARTYPE vtExpected)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // Retrieve the VARTYPE for the managed array.
+ VARTYPE vtActual = GetElementVarTypeForArrayRef(*pArrayRef);
+
+ // If the actual type is the same as the expected type, then the array is valid.
+ if (vtActual == vtExpected)
+ return TRUE;
+
+ // Check for additional supported VARTYPES.
+ switch (vtExpected)
+ {
+ case VT_I4:
+ return vtActual == VT_INT;
+
+ case VT_INT:
+ return vtActual == VT_I4;
+
+ case VT_UI4:
+ return vtActual == VT_UINT;
+
+ case VT_UINT:
+ return vtActual == VT_UI4;
+
+ case VT_UNKNOWN:
+ return vtActual == VT_VARIANT || vtActual == VT_DISPATCH;
+
+ case VT_DISPATCH:
+ return vtActual == VT_VARIANT;
+
+ case VT_CY:
+ return vtActual == VT_DECIMAL;
+
+ default:
+ return FALSE;
+ }
+}
+
+#endif // FEATURE_COMINTEROP
+
+//
+// GetArrayClassForVarType returns the element class name and underlying method table
+// to use to represent an array with the given variant type.
+//
+
+TypeHandle OleVariant::GetArrayForVarType(VARTYPE vt, TypeHandle elemType, unsigned rank)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ CorElementType baseElement = ELEMENT_TYPE_END;
+ TypeHandle baseType;
+
+ if (!elemType.IsNull() && elemType.IsEnum())
+ {
+ baseType = elemType;
+ }
+ else
+ {
+ switch (vt)
+ {
+ case VT_BOOL:
+ case VTHACK_WINBOOL:
+ case VTHACK_CBOOL:
+ baseElement = ELEMENT_TYPE_BOOLEAN;
+ break;
+
+ case VTHACK_ANSICHAR:
+ baseElement = ELEMENT_TYPE_CHAR;
+ break;
+
+ case VT_UI1:
+ baseElement = ELEMENT_TYPE_U1;
+ break;
+
+ case VT_I1:
+ baseElement = ELEMENT_TYPE_I1;
+ break;
+
+ case VT_UI2:
+ baseElement = ELEMENT_TYPE_U2;
+ break;
+
+ case VT_I2:
+ baseElement = ELEMENT_TYPE_I2;
+ break;
+
+ case VT_UI4:
+ case VT_UINT:
+ case VT_ERROR:
+ if (vt == VT_UI4)
+ {
+ if (elemType.IsNull() || elemType == TypeHandle(g_pObjectClass))
+ {
+ baseElement = ELEMENT_TYPE_U4;
+ }
+ else
+ {
+ switch (elemType.AsMethodTable()->GetInternalCorElementType())
+ {
+ case ELEMENT_TYPE_U4:
+ baseElement = ELEMENT_TYPE_U4;
+ break;
+ case ELEMENT_TYPE_U:
+ baseElement = ELEMENT_TYPE_U;
+ break;
+ default:
+ _ASSERTE(0);
+ }
+ }
+ }
+ else
+ {
+ baseElement = ELEMENT_TYPE_U4;
+ }
+ break;
+
+ case VT_I4:
+ case VT_INT:
+ if (vt == VT_I4)
+ {
+ if (elemType.IsNull() || elemType == TypeHandle(g_pObjectClass))
+ {
+ baseElement = ELEMENT_TYPE_I4;
+ }
+ else
+ {
+ switch (elemType.AsMethodTable()->GetInternalCorElementType())
+ {
+ case ELEMENT_TYPE_I4:
+ baseElement = ELEMENT_TYPE_I4;
+ break;
+ case ELEMENT_TYPE_I:
+ baseElement = ELEMENT_TYPE_I;
+ break;
+ default:
+ _ASSERTE(0);
+ }
+ }
+ }
+ else
+ {
+ baseElement = ELEMENT_TYPE_I4;
+ }
+ break;
+
+ case VT_I8:
+ if (elemType.IsNull() || elemType == TypeHandle(g_pObjectClass))
+ {
+ baseElement = ELEMENT_TYPE_I8;
+ }
+ else
+ {
+ switch (elemType.AsMethodTable()->GetInternalCorElementType())
+ {
+ case ELEMENT_TYPE_I8:
+ baseElement = ELEMENT_TYPE_I8;
+ break;
+ case ELEMENT_TYPE_I:
+ baseElement = ELEMENT_TYPE_I;
+ break;
+ default:
+ _ASSERTE(0);
+ }
+ }
+ break;
+
+ case VT_UI8:
+ if (elemType.IsNull() || elemType == TypeHandle(g_pObjectClass))
+ {
+ baseElement = ELEMENT_TYPE_U8;
+ }
+ else
+ {
+ switch (elemType.AsMethodTable()->GetInternalCorElementType())
+ {
+ case ELEMENT_TYPE_U8:
+ baseElement = ELEMENT_TYPE_U8;
+ break;
+ case ELEMENT_TYPE_U:
+ baseElement = ELEMENT_TYPE_U;
+ break;
+ default:
+ _ASSERTE(0);
+ }
+ }
+ break;
+
+ case VT_R4:
+ baseElement = ELEMENT_TYPE_R4;
+ break;
+
+ case VT_R8:
+ baseElement = ELEMENT_TYPE_R8;
+ break;
+
+ case VT_CY:
+ baseType = TypeHandle(MscorlibBinder::GetClass(CLASS__DECIMAL));
+ break;
+
+ case VT_DATE:
+ baseType = TypeHandle(MscorlibBinder::GetClass(CLASS__DATE_TIME));
+ break;
+
+ case VT_DECIMAL:
+ baseType = TypeHandle(MscorlibBinder::GetClass(CLASS__DECIMAL));
+ break;
+
+ case VT_VARIANT:
+
+ //
+ // It would be nice if our conversion between SAFEARRAY and
+ // array ref were symmetric. Right now it is not, because a
+ // jagged array converted to a SAFEARRAY and back will come
+ // back as an array of variants.
+ //
+ // We could try to detect the case where we can turn a
+ // safearray of variants into a jagged array. Basically we
+ // needs to make sure that all of the variants in the array
+ // have the same array type. (And if that is array of
+ // variant, we need to look recursively for another layer.)
+ //
+ // We also needs to check the dimensions of each array stored
+ // in the variant to make sure they have the same rank, and
+ // this rank is needed to build the correct array class name.
+ // (Note that it will be impossible to tell the rank if all
+ // elements in the array are NULL.)
+ //
+
+ // <TODO>@nice: implement this functionality if we decide it really makes sense
+ // For now, just live with the asymmetry</TODO>
+
+ baseType = TypeHandle(g_pObjectClass);
+ break;
+
+ case VT_BSTR:
+ case VT_LPWSTR:
+ case VT_LPSTR:
+ baseElement = ELEMENT_TYPE_STRING;
+ break;
+
+ case VT_DISPATCH:
+ case VT_UNKNOWN:
+ if (elemType.IsNull())
+ baseType = TypeHandle(g_pObjectClass);
+ else
+ baseType = elemType;
+ break;
+
+ case VT_RECORD:
+ _ASSERTE(!elemType.IsNull());
+ baseType = elemType;
+ break;
+
+ default:
+ COMPlusThrow(kArgumentException, IDS_EE_COM_UNSUPPORTED_SIG);
+ }
+ }
+
+ if (baseType.IsNull())
+ baseType = TypeHandle(MscorlibBinder::GetElementType(baseElement));
+
+ _ASSERTE(!baseType.IsNull());
+
+ return ClassLoader::LoadArrayTypeThrowing(baseType, rank == 0 ? ELEMENT_TYPE_SZARRAY : ELEMENT_TYPE_ARRAY, rank == 0 ? 1 : rank);
+}
+
+//
+// GetElementSizeForVarType returns the array element size for the given variant type.
+//
+
+UINT OleVariant::GetElementSizeForVarType(VARTYPE vt, MethodTable *pInterfaceMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ static const BYTE map[] =
+ {
+ 0, // VT_EMPTY
+ 0, // VT_NULL
+ 2, // VT_I2
+ 4, // VT_I4
+ 4, // VT_R4
+ 8, // VT_R8
+ sizeof(CURRENCY), // VT_CY
+ sizeof(DATE), // VT_DATE
+ sizeof(BSTR), // VT_BSTR
+ sizeof(IDispatch*), // VT_DISPATCH
+ sizeof(SCODE), // VT_ERROR
+ sizeof(VARIANT_BOOL), // VT_BOOL
+ sizeof(VARIANT), // VT_VARIANT
+ sizeof(IUnknown*), // VT_UNKNOWN
+ sizeof(DECIMAL), // VT_DECIMAL
+ 0, // unused
+ 1, // VT_I1
+ 1, // VT_UI1
+ 2, // VT_UI2
+ 4, // VT_UI4
+ 8, // VT_I8
+ 8, // VT_UI8
+ 4, // VT_INT
+ 4, // VT_UINT
+ 0, // VT_VOID
+ sizeof(HRESULT), // VT_HRESULT
+ sizeof(void*), // VT_PTR
+ sizeof(SAFEARRAY*), // VT_SAFEARRAY
+ sizeof(void*), // VT_CARRAY
+ sizeof(void*), // VT_USERDEFINED
+ sizeof(LPSTR), // VT_LPSTR
+ sizeof(LPWSTR), // VT_LPWSTR
+ };
+
+ // Special cases
+ switch (vt)
+ {
+ case VTHACK_WINBOOL:
+ return sizeof(BOOL);
+ break;
+ case VTHACK_ANSICHAR:
+ return GetMaxDBCSCharByteSize(); // Multi byte characters.
+ break;
+ case VTHACK_CBOOL:
+ return sizeof(BYTE);
+ default:
+ break;
+ }
+
+ // VT_ARRAY indicates a safe array which is always sizeof(SAFEARRAY *).
+ if (vt & VT_ARRAY)
+ return sizeof(SAFEARRAY*);
+
+ if (vt == VTHACK_NONBLITTABLERECORD || vt == VTHACK_BLITTABLERECORD || vt == VT_RECORD)
+ {
+ PREFIX_ASSUME(pInterfaceMT != NULL);
+ return pInterfaceMT->GetNativeSize();
+ }
+ else if (vt > VT_LPWSTR)
+ return 0;
+ else
+ return map[vt];
+}
+
+//
+// GetMarshalerForVarType returns the marshaler for the
+// given VARTYPE.
+//
+
+const OleVariant::Marshaler *OleVariant::GetMarshalerForVarType(VARTYPE vt, BOOL fThrow)
+{
+ CONTRACT (const OleVariant::Marshaler*)
+ {
+ if (fThrow) THROWS; else NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+#ifdef FEATURE_COMINTEROP
+
+#ifdef CROSSGEN_COMPILE
+#define RETURN_MARSHALER(OleToCom, ComToOle, OleRefToCom, ArrayOleToCom, ArrayComToOle, ClearArray) \
+ { static const Marshaler marshaler = { NULL, NULL, NULL, NULL, NULL, NULL }; RETURN &marshaler; }
+#else
+#define RETURN_MARSHALER(OleToCom, ComToOle, OleRefToCom, ArrayOleToCom, ArrayComToOle, ClearArray) \
+ { static const Marshaler marshaler = { OleToCom, ComToOle, OleRefToCom, ArrayOleToCom, ArrayComToOle, ClearArray }; RETURN &marshaler; }
+#endif
+
+#else // FEATURE_COMINTEROP
+
+#ifdef CROSSGEN_COMPILE
+#define RETURN_MARSHALER(OleToCom, ComToOle, OleRefToCom, ArrayOleToCom, ArrayComToOle, ClearArray) \
+ { static const Marshaler marshaler = { NULL, NULL, NULL }; RETURN &marshaler; }
+#else
+#define RETURN_MARSHALER(OleToCom, ComToOle, OleRefToCom, ArrayOleToCom, ArrayComToOle, ClearArray) \
+ { static const Marshaler marshaler = { ArrayOleToCom, ArrayComToOle, ClearArray }; RETURN &marshaler; }
+#endif
+
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_CLASSIC_COMINTEROP
+ if (vt & VT_ARRAY)
+ {
+VariantArray:
+ RETURN_MARSHALER(
+ MarshalArrayVariantOleToCom,
+ MarshalArrayVariantComToOle,
+ MarshalArrayVariantOleRefToCom,
+ NULL,
+ NULL,
+ ClearVariantArray
+ );
+ }
+#endif // FEATURE_CLASSIC_COMINTEROP
+
+ switch (vt)
+ {
+ case VT_BOOL:
+ RETURN_MARSHALER(
+ MarshalBoolVariantOleToCom,
+ NULL,
+ NULL,
+ MarshalBoolArrayOleToCom,
+ MarshalBoolArrayComToOle,
+ NULL
+ );
+
+ case VT_DATE:
+ RETURN_MARSHALER(
+ MarshalDateVariantOleToCom,
+ MarshalDateVariantComToOle,
+ MarshalDateVariantOleRefToCom,
+ MarshalDateArrayOleToCom,
+ MarshalDateArrayComToOle,
+ NULL
+ );
+
+ case VT_DECIMAL:
+ RETURN_MARSHALER(
+ MarshalDecimalVariantOleToCom,
+ MarshalDecimalVariantComToOle,
+ MarshalDecimalVariantOleRefToCom,
+ NULL, NULL, NULL
+ );
+
+#ifdef FEATURE_COMINTEROP
+ case VT_CY:
+ RETURN_MARSHALER(
+ MarshalCurrencyVariantOleToCom,
+ MarshalCurrencyVariantComToOle,
+ MarshalCurrencyVariantOleRefToCom,
+ MarshalCurrencyArrayOleToCom,
+ MarshalCurrencyArrayComToOle,
+ NULL
+ );
+
+ case VT_BSTR:
+ RETURN_MARSHALER(
+ MarshalBSTRVariantOleToCom,
+ MarshalBSTRVariantComToOle,
+ NULL,
+ MarshalBSTRArrayOleToCom,
+ MarshalBSTRArrayComToOle,
+ ClearBSTRArray
+ );
+
+ case VT_UNKNOWN:
+ RETURN_MARSHALER(
+ MarshalInterfaceVariantOleToCom,
+ MarshalInterfaceVariantComToOle,
+ MarshalInterfaceVariantOleRefToCom,
+ MarshalInterfaceArrayOleToCom,
+ MarshalIUnknownArrayComToOle,
+ ClearInterfaceArray
+ );
+
+ case VT_DISPATCH:
+ RETURN_MARSHALER(
+ MarshalInterfaceVariantOleToCom,
+ MarshalInterfaceVariantComToOle,
+ MarshalInterfaceVariantOleRefToCom,
+ MarshalInterfaceArrayOleToCom,
+ MarshalIDispatchArrayComToOle,
+ ClearInterfaceArray
+ );
+
+#ifdef FEATURE_CLASSIC_COMINTEROP
+ case VT_SAFEARRAY:
+ goto VariantArray;
+#endif
+
+ case VT_VARIANT:
+ RETURN_MARSHALER(
+ NULL, NULL, NULL,
+ MarshalVariantArrayOleToCom,
+ MarshalVariantArrayComToOle,
+ ClearVariantArray
+ );
+
+ case VT_ERROR:
+ RETURN_MARSHALER(
+ MarshalErrorVariantOleToCom,
+ MarshalErrorVariantComToOle,
+ MarshalErrorVariantOleRefToCom,
+ NULL, NULL, NULL
+ );
+#endif // FEATURE_COMINTEROP
+
+ case VTHACK_NONBLITTABLERECORD:
+ RETURN_MARSHALER(
+ NULL, NULL, NULL,
+ MarshalNonBlittableRecordArrayOleToCom,
+ MarshalNonBlittableRecordArrayComToOle,
+ ClearNonBlittableRecordArray
+ );
+
+ case VTHACK_BLITTABLERECORD:
+ RETURN NULL; // Requires no marshaling
+
+ case VTHACK_WINBOOL:
+ RETURN_MARSHALER(
+ MarshalWinBoolVariantOleToCom,
+ MarshalWinBoolVariantComToOle,
+ MarshalWinBoolVariantOleRefToCom,
+ MarshalWinBoolArrayOleToCom,
+ MarshalWinBoolArrayComToOle,
+ NULL
+ );
+
+ case VTHACK_CBOOL:
+ RETURN_MARSHALER(
+ MarshalCBoolVariantOleToCom,
+ MarshalCBoolVariantComToOle,
+ MarshalCBoolVariantOleRefToCom,
+ MarshalCBoolArrayOleToCom,
+ MarshalCBoolArrayComToOle,
+ NULL
+ );
+
+ case VTHACK_ANSICHAR:
+ RETURN_MARSHALER(
+ MarshalAnsiCharVariantOleToCom,
+ MarshalAnsiCharVariantComToOle,
+ MarshalAnsiCharVariantOleRefToCom,
+ MarshalAnsiCharArrayOleToCom,
+ MarshalAnsiCharArrayComToOle,
+ NULL
+ );
+
+ case VT_LPSTR:
+ RETURN_MARSHALER(
+ NULL, NULL, NULL,
+ MarshalLPSTRArrayOleToCom,
+ MarshalLPSTRRArrayComToOle,
+ ClearLPSTRArray
+ );
+
+ case VT_LPWSTR:
+ RETURN_MARSHALER(
+ NULL, NULL, NULL,
+ MarshalLPWSTRArrayOleToCom,
+ MarshalLPWSTRRArrayComToOle,
+ ClearLPWSTRArray
+ );
+
+#ifdef FEATURE_CLASSIC_COMINTEROP
+ case VT_RECORD:
+ RETURN_MARSHALER(
+ MarshalRecordVariantOleToCom,
+ MarshalRecordVariantComToOle,
+ MarshalRecordVariantOleRefToCom,
+ MarshalRecordArrayOleToCom,
+ MarshalRecordArrayComToOle,
+ ClearRecordArray
+ );
+#endif
+
+ case VT_CARRAY:
+ case VT_USERDEFINED:
+ if (fThrow)
+ {
+ COMPlusThrow(kArgumentException, IDS_EE_COM_UNSUPPORTED_SIG);
+ }
+ else
+ {
+ RETURN NULL;
+ }
+
+ default:
+ RETURN NULL;
+ }
+} // OleVariant::Marshaler *OleVariant::GetMarshalerForVarType()
+
+#ifndef CROSSGEN_COMPILE
+
+#ifdef FEATURE_COMINTEROP
+
+/*==================================NewVariant==================================
+**N.B.: This method does a GC Allocation. Any method calling it is required to
+** GC_PROTECT the OBJECTREF.
+**
+**Actions: Allocates a new Variant and fills it with the appropriate data.
+**Returns: A new Variant with all of the appropriate fields filled out.
+**Exceptions: OutOfMemoryError if v can't be allocated.
+==============================================================================*/
+void VariantData::NewVariant(VariantData * const& dest, const CVTypes type, INT64 data
+ DEBUG_ARG(BOOL bDestIsInterior))
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ // Don't pass an object in for Empty.
+ PRECONDITION(CheckPointer(dest));
+ PRECONDITION((bDestIsInterior && IsProtectedByGCFrame ((OBJECTREF *) &dest))
+ || (!bDestIsInterior && IsProtectedByGCFrame (dest->GetObjRefPtr ())));
+ PRECONDITION((type == CV_EMPTY) || (type == CV_NULL) || (type == CV_U4) || (type == CV_U8));
+ }
+ CONTRACTL_END;
+
+ //If both arguments are null or both are specified, we're in an illegal situation. Bail.
+ //If all three are null, we're creating an empty variant
+ if ( (type != CV_EMPTY) && (type != CV_NULL) && (type != CV_U4) && (type != CV_U8) )
+ {
+ COMPlusThrow(kArgumentException);
+ }
+
+ //Fill in the data.
+ dest->SetType(type);
+
+ switch (type)
+ {
+ case CV_U4:
+ dest->SetObjRef(NULL);
+ dest->SetDataAsUInt32((UINT32)data);
+ break;
+
+ case CV_U8:
+ dest->SetObjRef(NULL);
+ dest->SetDataAsInt64(data);
+ break;
+
+ case CV_NULL:
+ {
+ FieldDesc * pFD = MscorlibBinder::GetField(FIELD__NULL__VALUE);
+ _ASSERTE(pFD);
+
+ pFD->CheckRunClassInitThrowing();
+
+ OBJECTREF obj = pFD->GetStaticOBJECTREF();
+ _ASSERTE(obj!=NULL);
+
+ dest->SetObjRef(obj);
+ dest->SetDataAsInt64(0);
+ break;
+ }
+
+ case CV_EMPTY:
+ {
+ dest->SetObjRef(NULL);
+ break;
+ }
+
+ default:
+ // Did you add any new CVTypes?
+ COMPlusThrow(kNotSupportedException, W("Arg_InvalidOleVariantTypeException"));
+ }
+}
+
+void SafeVariantClearHelper(VARIANT* pVar)
+{
+ STATIC_CONTRACT_SO_INTOLERANT;
+ WRAPPER_NO_CONTRACT;
+
+ BEGIN_SO_TOLERANT_CODE(GetThread());
+ VariantClear(pVar);
+ END_SO_TOLERANT_CODE;
+}
+
+class OutOfMemoryException;
+
+void SafeVariantClear(VARIANT* pVar)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (pVar)
+ {
+ GCX_PREEMP();
+ SCAN_EHMARKER();
+ PAL_CPP_TRY
+ {
+ // These are holders to tell the contract system that we're catching all exceptions.
+ SCAN_EHMARKER_TRY();
+ CLR_TRY_MARKER();
+
+ // Most of time, oleaut32.dll is loaded already when we get here.
+ // Sometimes, CLR initializes Variant without loading oleaut32.dll, e.g. VT_BOOL.
+ // It is better for performance with EX_TRY than
+
+ SafeVariantClearHelper(pVar);
+
+ SCAN_EHMARKER_END_TRY();
+ }
+ PAL_CPP_CATCH_DERIVED(OutOfMemoryException, obj)
+ {
+ SCAN_EHMARKER_CATCH();
+
+#if defined(STACK_GUARDS_DEBUG)
+ // Catching and just swallowing an exception means we need to tell
+ // the SO code that it should go back to normal operation, as it
+ // currently thinks that the exception is still on the fly.
+ GetThread()->GetCurrentStackGuard()->RestoreCurrentGuard();
+#endif
+
+ SCAN_EHMARKER_END_CATCH();
+ }
+ PAL_CPP_ENDTRY;
+
+ FillMemory(pVar, sizeof(VARIANT), 0x00);
+ }
+}
+
+FORCEINLINE void EmptyVariant(VARIANT* value)
+{
+ WRAPPER_NO_CONTRACT;
+ SafeVariantClear(value);
+}
+
+class VariantEmptyHolder : public Wrapper<VARIANT*, ::DoNothing<VARIANT*>, EmptyVariant, NULL>
+{
+public:
+ VariantEmptyHolder(VARIANT* p = NULL) :
+ Wrapper<VARIANT*, ::DoNothing<VARIANT*>, EmptyVariant, NULL>(p)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ FORCEINLINE void operator=(VARIANT* p)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ Wrapper<VARIANT*, ::DoNothing<VARIANT*>, EmptyVariant, NULL>::operator=(p);
+ }
+};
+
+FORCEINLINE void RecordVariantRelease(VARIANT* value)
+{
+ if (value)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (V_RECORD(value))
+ V_RECORDINFO(value)->RecordDestroy(V_RECORD(value));
+ if (V_RECORDINFO(value))
+ V_RECORDINFO(value)->Release();
+ }
+}
+
+class RecordVariantHolder : public Wrapper<VARIANT*, ::DoNothing<VARIANT*>, RecordVariantRelease, NULL>
+{
+public:
+ RecordVariantHolder(VARIANT* p = NULL)
+ : Wrapper<VARIANT*, ::DoNothing<VARIANT*>, RecordVariantRelease, NULL>(p)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ FORCEINLINE void operator=(VARIANT* p)
+ {
+ WRAPPER_NO_CONTRACT;
+ Wrapper<VARIANT*, ::DoNothing<VARIANT*>, RecordVariantRelease, NULL>::operator=(p);
+ }
+};
+#endif // FEATURE_COMINTEROP
+
+/* ------------------------------------------------------------------------- *
+ * Boolean marshaling routines
+ * ------------------------------------------------------------------------- */
+
+#ifdef FEATURE_COMINTEROP
+
+void OleVariant::MarshalBoolVariantOleToCom(VARIANT *pOleVariant,
+ VariantData *pComVariant)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pComVariant));
+ PRECONDITION(CheckPointer(pOleVariant));
+ }
+ CONTRACTL_END;
+
+ *(INT64*)pComVariant->GetData() = V_BOOL(pOleVariant) ? 1 : 0;
+}
+
+#endif // FEATURE_COMINTEROP
+
+void OleVariant::MarshalBoolArrayOleToCom(void *oleArray, BASEARRAYREF *pComArray,
+ MethodTable *pInterfaceMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ }
+ CONTRACTL_END;
+
+ ASSERT_PROTECTED(pComArray);
+
+ SIZE_T elementCount = (*pComArray)->GetNumComponents();
+
+ VARIANT_BOOL *pOle = (VARIANT_BOOL *) oleArray;
+ VARIANT_BOOL *pOleEnd = pOle + elementCount;
+
+ UCHAR *pCom = (UCHAR *) (*pComArray)->GetDataPtr();
+
+ while (pOle < pOleEnd)
+ {
+ static_assert_no_msg(sizeof(VARIANT_BOOL) == sizeof(UINT16));
+ (*(pCom++)) = MAYBE_UNALIGNED_READ(pOle, 16) ? 1 : 0;
+ pOle++;
+ }
+}
+
+void OleVariant::MarshalBoolArrayComToOle(BASEARRAYREF *pComArray, void *oleArray,
+ MethodTable *pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayIsValid)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ }
+ CONTRACTL_END;
+
+ ASSERT_PROTECTED(pComArray);
+ SIZE_T elementCount = (*pComArray)->GetNumComponents();
+
+ VARIANT_BOOL *pOle = (VARIANT_BOOL *) oleArray;
+ VARIANT_BOOL *pOleEnd = pOle + elementCount;
+
+ UCHAR *pCom = (UCHAR *) (*pComArray)->GetDataPtr();
+
+ while (pOle < pOleEnd)
+ {
+ static_assert_no_msg(sizeof(VARIANT_BOOL) == sizeof(UINT16));
+ MAYBE_UNALIGNED_WRITE(pOle, 16, *pCom ? VARIANT_TRUE : VARIANT_FALSE);
+ pOle++; pCom++;
+ }
+}
+
+/* ------------------------------------------------------------------------- *
+ * WinBoolean marshaling routines
+ * ------------------------------------------------------------------------- */
+
+#ifdef FEATURE_COMINTEROP
+void OleVariant::MarshalWinBoolVariantOleToCom(VARIANT *pOleVariant,
+ VariantData *pComVariant)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!"Not supposed to get here.");
+}
+
+void OleVariant::MarshalWinBoolVariantComToOle(VariantData *pComVariant,
+ VARIANT *pOleVariant)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!"Not supposed to get here.");
+}
+
+void OleVariant::MarshalWinBoolVariantOleRefToCom(VARIANT *pOleVariant,
+ VariantData *pComVariant)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!"Not supposed to get here.");
+}
+#endif // FEATURE_COMINTEROP
+
+void OleVariant::MarshalWinBoolArrayOleToCom(void *oleArray, BASEARRAYREF *pComArray,
+ MethodTable *pInterfaceMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ }
+ CONTRACTL_END;
+
+ ASSERT_PROTECTED(pComArray);
+
+ SIZE_T elementCount = (*pComArray)->GetNumComponents();
+
+ BOOL *pOle = (BOOL *) oleArray;
+ BOOL *pOleEnd = pOle + elementCount;
+
+ UCHAR *pCom = (UCHAR *) (*pComArray)->GetDataPtr();
+
+ while (pOle < pOleEnd)
+ {
+ static_assert_no_msg(sizeof(BOOL) == sizeof(UINT32));
+ (*(pCom++)) = MAYBE_UNALIGNED_READ(pOle, 32) ? 1 : 0;
+ pOle++;
+ }
+}
+
+void OleVariant::MarshalWinBoolArrayComToOle(BASEARRAYREF *pComArray, void *oleArray,
+ MethodTable *pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayIsValid)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ }
+ CONTRACTL_END;
+
+ ASSERT_PROTECTED(pComArray);
+ SIZE_T elementCount = (*pComArray)->GetNumComponents();
+
+ BOOL *pOle = (BOOL *) oleArray;
+ BOOL *pOleEnd = pOle + elementCount;
+
+ UCHAR *pCom = (UCHAR *) (*pComArray)->GetDataPtr();
+
+ while (pOle < pOleEnd)
+ {
+ static_assert_no_msg(sizeof(BOOL) == sizeof(UINT32));
+ MAYBE_UNALIGNED_WRITE(pOle, 32, *pCom ? 1 : 0);
+ pOle++; pCom++;
+ }
+}
+
+/* ------------------------------------------------------------------------- *
+ * CBool marshaling routines
+ * ------------------------------------------------------------------------- */
+
+#ifdef FEATURE_COMINTEROP
+void OleVariant::MarshalCBoolVariantOleToCom(VARIANT* pOleVariant, VariantData* pComVariant)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!"Not supposed to get here.");
+}
+
+void OleVariant::MarshalCBoolVariantComToOle(VariantData* pComVariant, VARIANT* pOleVariant)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!"Not supposed to get here.");
+}
+
+void OleVariant::MarshalCBoolVariantOleRefToCom(VARIANT* pOleVariant, VariantData* pComVariant)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!"Not supposed to get here.");
+}
+#endif // FEATURE_COMINTEROP
+
+void OleVariant::MarshalCBoolArrayOleToCom(void* oleArray, BASEARRAYREF* pComArray,
+ MethodTable* pInterfaceMT)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ }
+ CONTRACTL_END;
+
+ ASSERT_PROTECTED(pComArray);
+
+ _ASSERTE((*pComArray)->GetArrayElementType() == ELEMENT_TYPE_BOOLEAN);
+
+ SIZE_T cbArray = (*pComArray)->GetNumComponents();
+
+ BYTE *pOle = (BYTE *) oleArray;
+ BYTE *pOleEnd = pOle + cbArray;
+
+ UCHAR *pCom = (UCHAR *) (*pComArray)->GetDataPtr();
+
+ while (pOle < pOleEnd)
+ {
+ (*pCom) = (*pOle ? 1 : 0);
+ pOle++; pCom++;
+ }
+}
+
+void OleVariant::MarshalCBoolArrayComToOle(BASEARRAYREF* pComArray, void* oleArray,
+ MethodTable* pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayIsValid)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ }
+ CONTRACTL_END;
+
+ ASSERT_PROTECTED(pComArray);
+
+ _ASSERTE((*pComArray)->GetArrayElementType() == ELEMENT_TYPE_BOOLEAN);
+
+ SIZE_T cbArray = (*pComArray)->GetNumComponents();
+ BYTE *pOle = (BYTE *) oleArray;
+ BYTE *pOleEnd = pOle + cbArray;
+
+ UCHAR *pCom = (UCHAR *) (*pComArray)->GetDataPtr();
+
+ while (pOle < pOleEnd)
+ {
+ *pOle = (*pCom ? 1 : 0);
+ pOle++; pCom++;
+ }
+}
+
+/* ------------------------------------------------------------------------- *
+ * Ansi char marshaling routines
+ * ------------------------------------------------------------------------- */
+
+#ifdef FEATURE_COMINTEROP
+void OleVariant::MarshalAnsiCharVariantOleToCom(VARIANT *pOleVariant,
+ VariantData *pComVariant)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!"Not supposed to get here.");
+}
+
+void OleVariant::MarshalAnsiCharVariantComToOle(VariantData *pComVariant,
+ VARIANT *pOleVariant)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!"Not supposed to get here.");
+}
+
+void OleVariant::MarshalAnsiCharVariantOleRefToCom(VARIANT *pOleVariant,
+ VariantData *pComVariant)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!"Not supposed to get here.");
+}
+#endif // FEATURE_COMINTEROP
+
+void OleVariant::MarshalAnsiCharArrayOleToCom(void *oleArray, BASEARRAYREF *pComArray,
+ MethodTable *pInterfaceMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ }
+ CONTRACTL_END;
+
+ ASSERT_PROTECTED(pComArray);
+
+ SIZE_T elementCount = (*pComArray)->GetNumComponents();
+
+ WCHAR *pCom = (WCHAR *) (*pComArray)->GetDataPtr();
+
+ if (0 == elementCount)
+ {
+ *pCom = '\0';
+ return;
+ }
+
+ if (0 == MultiByteToWideChar(CP_ACP,
+ MB_PRECOMPOSED,
+ (const CHAR *)oleArray,
+ (int)elementCount,
+ pCom,
+ (int)elementCount))
+ {
+ COMPlusThrowWin32();
+ }
+}
+
+void OleVariant::MarshalAnsiCharArrayComToOle(BASEARRAYREF *pComArray, void *oleArray,
+ MethodTable *pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayIsValid)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ }
+ CONTRACTL_END;
+
+ SIZE_T elementCount = (*pComArray)->GetNumComponents();
+
+ const WCHAR *pCom = (const WCHAR *) (*pComArray)->GetDataPtr();
+
+ if (!FitsIn<int>(elementCount))
+ COMPlusThrowHR(COR_E_OVERFLOW);
+
+ int cchCount = (int)elementCount;
+ int cbBuffer;
+
+ if (!ClrSafeInt<int>::multiply(cchCount, GetMaxDBCSCharByteSize(), cbBuffer))
+ COMPlusThrowHR(COR_E_OVERFLOW);
+
+ InternalWideToAnsi((WCHAR*)pCom, cchCount, (CHAR*)oleArray, cbBuffer,
+ fBestFitMapping, fThrowOnUnmappableChar);
+}
+
+/* ------------------------------------------------------------------------- *
+ * Interface marshaling routines
+ * ------------------------------------------------------------------------- */
+
+#ifdef FEATURE_COMINTEROP
+void OleVariant::MarshalInterfaceVariantOleToCom(VARIANT *pOleVariant,
+ VariantData *pComVariant)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pOleVariant));
+ PRECONDITION(CheckPointer(pComVariant));
+ }
+ CONTRACTL_END;
+
+ IUnknown *unk = V_UNKNOWN(pOleVariant);
+
+ OBJECTREF obj = NULL;
+ if (unk != NULL)
+ {
+ GCPROTECT_BEGIN(obj);
+ GetObjectRefFromComIP(&obj, V_UNKNOWN(pOleVariant));
+ GCPROTECT_END();
+ }
+
+ pComVariant->SetObjRef(obj);
+}
+
+void OleVariant::MarshalInterfaceVariantComToOle(VariantData *pComVariant,
+ VARIANT *pOleVariant)
+
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pOleVariant));
+ PRECONDITION(CheckPointer(pComVariant));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF *obj = pComVariant->GetObjRefPtr();
+ VARTYPE vt = pComVariant->GetVT();
+
+ ASSERT_PROTECTED(obj);
+
+ if (*obj == NULL)
+ {
+ // If there is no VT set in the managed variant, then default to VT_UNKNOWN.
+ if (vt == VT_EMPTY)
+ vt = VT_UNKNOWN;
+
+ V_UNKNOWN(pOleVariant) = NULL;
+ V_VT(pOleVariant) = vt;
+ }
+ else
+ {
+#ifdef FEATURE_COMINTEROP
+ ComIpType FetchedIpType = ComIpType_None;
+ ComIpType ReqIpType;
+
+ if (vt != VT_EMPTY)
+ {
+ // We are dealing with an UnknownWrapper or DispatchWrapper.
+ // In this case, we need to respect the VT.
+ _ASSERTE(vt == VT_DISPATCH || vt == VT_UNKNOWN);
+ ReqIpType = vt == VT_DISPATCH ? ComIpType_Dispatch : ComIpType_Unknown;
+ }
+ else
+ {
+ // We are dealing with a normal object so we can give either
+ // IDispatch or IUnknown out depending on what it supports.
+ ReqIpType = ComIpType_Both;
+ }
+
+ IUnknown *unk = GetComIPFromObjectRef(obj, ReqIpType, &FetchedIpType);
+ BOOL ItfIsDispatch = FetchedIpType == ComIpType_Dispatch;
+
+ V_UNKNOWN(pOleVariant) = unk;
+ V_VT(pOleVariant) = static_cast<VARTYPE>(ItfIsDispatch ? VT_DISPATCH : VT_UNKNOWN);
+#else // FEATURE_COMINTEROP
+ V_UNKNOWN(pOleVariant) = GetComIPFromObjectRef(obj);
+ V_VT(pOleVariant) = VT_UNKNOWN;
+#endif // FEATURE_COMINTEROP
+ }
+}
+
+void OleVariant::MarshalInterfaceVariantOleRefToCom(VARIANT *pOleVariant,
+ VariantData *pComVariant)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pOleVariant));
+ PRECONDITION(CheckPointer(pComVariant));
+ }
+ CONTRACTL_END;
+
+ IUnknown *unk = V_UNKNOWN(pOleVariant);
+
+ OBJECTREF obj = NULL;
+ if (unk != NULL)
+ {
+ GCPROTECT_BEGIN(obj);
+ GetObjectRefFromComIP(&obj, *V_UNKNOWNREF(pOleVariant));
+ GCPROTECT_END();
+ }
+
+ pComVariant->SetObjRef(obj);
+}
+
+void OleVariant::MarshalInterfaceArrayOleToCom(void *oleArray, BASEARRAYREF *pComArray,
+ MethodTable *pElementMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ }
+ CONTRACTL_END;
+
+ ASSERT_PROTECTED(pComArray);
+
+ SIZE_T elementCount = (*pComArray)->GetNumComponents();
+
+ IUnknown **pOle = (IUnknown **) oleArray;
+ IUnknown **pOleEnd = pOle + elementCount;
+
+ BASEARRAYREF unprotectedArray = *pComArray;
+ OBJECTREF *pCom = (OBJECTREF *) unprotectedArray->GetDataPtr();
+
+#if CHECK_APP_DOMAIN_LEAKS
+ AppDomain *pDomain = unprotectedArray->GetAppDomain();
+#endif // CHECK_APP_DOMAIN_LEAKS
+
+ OBJECTREF obj = NULL;
+ GCPROTECT_BEGIN(obj)
+ {
+ while (pOle < pOleEnd)
+ {
+ IUnknown *unk = *pOle++;
+
+ if (unk == NULL)
+ obj = NULL;
+ else
+ GetObjectRefFromComIP(&obj, unk);
+
+ //
+ // Make sure the object can be cast to the destination type.
+ //
+
+ if (pElementMT != NULL && !CanCastComObject(obj, pElementMT))
+ {
+ StackSString ssObjClsName;
+ StackSString ssDestClsName;
+ obj->GetMethodTable()->_GetFullyQualifiedNameForClass(ssObjClsName);
+ pElementMT->_GetFullyQualifiedNameForClass(ssDestClsName);
+ COMPlusThrow(kInvalidCastException, IDS_EE_CANNOTCAST,
+ ssObjClsName.GetUnicode(), ssDestClsName.GetUnicode());
+ }
+
+ //
+ // Reset pCom pointer only if array object has moved, rather than
+ // recomputing every time through the loop. Beware implicit calls to
+ // ValidateObject inside OBJECTREF methods.
+ //
+
+ if (*(void **)&unprotectedArray != *(void **)&*pComArray)
+ {
+ SIZE_T currentOffset = ((BYTE *)pCom) - (*(Object **) &unprotectedArray)->GetAddress();
+ unprotectedArray = *pComArray;
+ pCom = (OBJECTREF *) (unprotectedArray->GetAddress() + currentOffset);
+ }
+
+ SetObjectReference(pCom++, obj, pDomain);
+ }
+ }
+ GCPROTECT_END();
+}
+
+void OleVariant::MarshalIUnknownArrayComToOle(BASEARRAYREF *pComArray, void *oleArray,
+ MethodTable *pElementMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayIsValid)
+{
+ WRAPPER_NO_CONTRACT;
+
+ MarshalInterfaceArrayComToOleHelper(pComArray, oleArray, pElementMT, FALSE);
+}
+
+void OleVariant::ClearInterfaceArray(void *oleArray, SIZE_T cElements, MethodTable *pInterfaceMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(oleArray));
+ }
+ CONTRACTL_END;
+
+ IUnknown **pOle = (IUnknown **) oleArray;
+ IUnknown **pOleEnd = pOle + cElements;
+
+ GCX_PREEMP();
+ while (pOle < pOleEnd)
+ {
+ IUnknown *pUnk = *pOle++;
+
+ if (pUnk != NULL)
+ {
+ ULONG cbRef = SafeReleasePreemp(pUnk);
+ LogInteropRelease(pUnk, cbRef, "VariantClearInterfacArray");
+ }
+ }
+}
+
+
+/* ------------------------------------------------------------------------- *
+ * BSTR marshaling routines
+ * ------------------------------------------------------------------------- */
+
+void OleVariant::MarshalBSTRVariantOleToCom(VARIANT *pOleVariant,
+ VariantData *pComVariant)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pOleVariant));
+ PRECONDITION(CheckPointer(pComVariant));
+ }
+ CONTRACTL_END;
+
+ BSTR bstr = V_BSTR(pOleVariant);
+
+ STRINGREF stringObj = NULL;
+ GCPROTECT_BEGIN(stringObj)
+ {
+ ConvertBSTRToString(bstr, &stringObj);
+ pComVariant->SetObjRef((OBJECTREF) stringObj);
+ }
+ GCPROTECT_END();
+
+ pComVariant->SetObjRef((OBJECTREF) stringObj);
+}
+
+void OleVariant::MarshalBSTRVariantComToOle(VariantData *pComVariant,
+ VARIANT *pOleVariant)
+{
+ CONTRACTL
+ {
+ THROWS;
+ WRAPPER(GC_TRIGGERS);
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pOleVariant));
+ PRECONDITION(CheckPointer(pComVariant));
+ }
+ CONTRACTL_END;
+
+ STRINGREF stringObj = (STRINGREF) pComVariant->GetObjRef();
+ GCPROTECT_BEGIN(stringObj)
+ {
+ V_BSTR(pOleVariant) = ConvertStringToBSTR(&stringObj);
+ }
+ GCPROTECT_END();
+}
+
+void OleVariant::MarshalBSTRArrayOleToCom(void *oleArray, BASEARRAYREF *pComArray,
+ MethodTable *pInterfaceMT)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ }
+ CONTRACTL_END;
+
+ STRINGREF stringObj = NULL;
+ GCPROTECT_BEGIN(stringObj)
+ {
+ ASSERT_PROTECTED(pComArray);
+ SIZE_T elementCount = (*pComArray)->GetNumComponents();
+
+ BSTR *pOle = (BSTR *) oleArray;
+ BSTR *pOleEnd = pOle + elementCount;
+
+ BASEARRAYREF unprotectedArray = *pComArray;
+ STRINGREF *pCom = (STRINGREF *) unprotectedArray->GetDataPtr();
+
+#if CHECK_APP_DOMAIN_LEAKS
+ AppDomain *pDomain = unprotectedArray->GetAppDomain();
+#endif // CHECK_APP_DOMAIN_LEAKS
+
+ while (pOle < pOleEnd)
+ {
+ BSTR bstr = *pOle++;
+
+ ConvertBSTRToString(bstr, &stringObj);
+
+ //
+ // Reset pCom pointer only if array object has moved, rather than
+ // recomputing it every time through the loop. Beware implicit calls to
+ // ValidateObject inside OBJECTREF methods.
+ //
+
+ if (*(void **)&unprotectedArray != *(void **)&*pComArray)
+ {
+ SIZE_T currentOffset = ((BYTE *)pCom) - (*(Object **) &unprotectedArray)->GetAddress();
+ unprotectedArray = *pComArray;
+ pCom = (STRINGREF *) (unprotectedArray->GetAddress() + currentOffset);
+ }
+
+ SetObjectReference((OBJECTREF*) pCom++, (OBJECTREF) stringObj, pDomain);
+ }
+ }
+ GCPROTECT_END();
+}
+
+void OleVariant::MarshalBSTRArrayComToOle(BASEARRAYREF *pComArray, void *oleArray,
+ MethodTable *pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayIsValid)
+{
+ CONTRACTL
+ {
+ THROWS;
+ WRAPPER(GC_TRIGGERS);
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ }
+ CONTRACTL_END;
+
+ STRINGREF stringObj = NULL;
+ GCPROTECT_BEGIN(stringObj)
+ {
+ ASSERT_PROTECTED(pComArray);
+
+ SIZE_T elementCount = (*pComArray)->GetNumComponents();
+
+ BSTR *pOle = (BSTR *) oleArray;
+ BSTR *pOleEnd = pOle + elementCount;
+
+ STRINGREF *pCom = (STRINGREF *) (*pComArray)->GetDataPtr();
+
+ while (pOle < pOleEnd)
+ {
+ stringObj = *pCom++;
+ BSTR bstr = ConvertStringToBSTR(&stringObj);
+
+ //
+ // We aren't calling anything which might cause a GC, so don't worry about
+ // the array moving here.
+ //
+
+ *pOle++ = bstr;
+ }
+ }
+ GCPROTECT_END();
+}
+
+void OleVariant::ClearBSTRArray(void *oleArray, SIZE_T cElements, MethodTable *pInterfaceMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(oleArray));
+ }
+ CONTRACTL_END;
+
+ BSTR *pOle = (BSTR *) oleArray;
+ BSTR *pOleEnd = pOle + cElements;
+
+ while (pOle < pOleEnd)
+ {
+ BSTR bstr = *pOle++;
+
+ if (bstr != NULL)
+ SysFreeString(bstr);
+ }
+}
+#endif // FEATURE_COMINTEROP
+
+
+
+/* ------------------------------------------------------------------------- *
+ * Structure marshaling routines
+ * ------------------------------------------------------------------------- */
+void OleVariant::MarshalNonBlittableRecordArrayOleToCom(void *oleArray, BASEARRAYREF *pComArray,
+ MethodTable *pInterfaceMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ PRECONDITION(CheckPointer(pInterfaceMT));
+ }
+ CONTRACTL_END;
+
+ ASSERT_PROTECTED(pComArray);
+ SIZE_T elementCount = (*pComArray)->GetNumComponents();
+ SIZE_T elemSize = pInterfaceMT->GetNativeSize();
+
+ BYTE *pOle = (BYTE *) oleArray;
+ BYTE *pOleEnd = pOle + elemSize * elementCount;
+
+ SIZE_T dstofs = ArrayBase::GetDataPtrOffset( (*pComArray)->GetMethodTable() );
+ while (pOle < pOleEnd)
+ {
+ LayoutUpdateCLR( (LPVOID*)pComArray, dstofs, pInterfaceMT, pOle );
+ dstofs += (*pComArray)->GetComponentSize();
+ pOle += elemSize;
+ }
+}
+
+void OleVariant::MarshalNonBlittableRecordArrayComToOle(BASEARRAYREF *pComArray, void *oleArray,
+ MethodTable *pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayIsValid)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ PRECONDITION(CheckPointer(pInterfaceMT));
+ }
+ CONTRACTL_END;
+
+ ASSERT_PROTECTED(pComArray);
+
+ SIZE_T elementCount = (*pComArray)->GetNumComponents();
+ SIZE_T elemSize = pInterfaceMT->GetNativeSize();
+
+ BYTE *pOle = (BYTE *) oleArray;
+ BYTE *pOleEnd = pOle + elemSize * elementCount;
+
+ if (!fOleArrayIsValid)
+ {
+ // field marshalers assume that the native structure is valid
+ FillMemory(pOle, pOleEnd - pOle, 0);
+ }
+
+ SIZE_T srcofs = ArrayBase::GetDataPtrOffset( (*pComArray)->GetMethodTable() );
+ while (pOle < pOleEnd)
+ {
+ LayoutUpdateNative( (LPVOID*)pComArray, srcofs, pInterfaceMT, pOle, NULL );
+ pOle += elemSize;
+ srcofs += (*pComArray)->GetComponentSize();
+ }
+}
+
+void OleVariant::ClearNonBlittableRecordArray(void *oleArray, SIZE_T cElements, MethodTable *pInterfaceMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pInterfaceMT));
+ }
+ CONTRACTL_END;
+
+ SIZE_T elemSize = pInterfaceMT->GetNativeSize();
+ BYTE *pOle = (BYTE *) oleArray;
+ BYTE *pOleEnd = pOle + elemSize * cElements;
+ while (pOle < pOleEnd)
+ {
+ LayoutDestroyNative(pOle, pInterfaceMT);
+ pOle += elemSize;
+ }
+}
+
+
+/* ------------------------------------------------------------------------- *
+ * LPWSTR marshaling routines
+ * ------------------------------------------------------------------------- */
+
+void OleVariant::MarshalLPWSTRArrayOleToCom(void *oleArray, BASEARRAYREF *pComArray,
+ MethodTable *pInterfaceMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ }
+ CONTRACTL_END;
+
+ ASSERT_PROTECTED(pComArray);
+ SIZE_T elementCount = (*pComArray)->GetNumComponents();
+
+ LPWSTR *pOle = (LPWSTR *) oleArray;
+ LPWSTR *pOleEnd = pOle + elementCount;
+
+ BASEARRAYREF unprotectedArray = *pComArray;
+ STRINGREF *pCom = (STRINGREF *) unprotectedArray->GetDataPtr();
+
+#if CHECK_APP_DOMAIN_LEAKS
+ AppDomain *pDomain = unprotectedArray->GetAppDomain();
+#endif // CHECK_APP_DOMAIN_LEAKS
+
+ while (pOle < pOleEnd)
+ {
+ LPWSTR lpwstr = *pOle++;
+
+ STRINGREF string;
+ if (lpwstr == NULL)
+ string = NULL;
+ else
+ string = StringObject::NewString(lpwstr);
+
+ //
+ // Reset pCom pointer only if array object has moved, rather than
+ // recomputing it every time through the loop. Beware implicit calls to
+ // ValidateObject inside OBJECTREF methods.
+ //
+
+ if (*(void **)&unprotectedArray != *(void **)&*pComArray)
+ {
+ SIZE_T currentOffset = ((BYTE *)pCom) - (*(Object **) &unprotectedArray)->GetAddress();
+ unprotectedArray = *pComArray;
+ pCom = (STRINGREF *) (unprotectedArray->GetAddress() + currentOffset);
+ }
+
+ SetObjectReference((OBJECTREF*) pCom++, (OBJECTREF) string, pDomain);
+ }
+}
+
+void OleVariant::MarshalLPWSTRRArrayComToOle(BASEARRAYREF *pComArray, void *oleArray,
+ MethodTable *pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayIsValid)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ }
+ CONTRACTL_END;
+
+ ASSERT_PROTECTED(pComArray);
+
+ SIZE_T elementCount = (*pComArray)->GetNumComponents();
+
+ LPWSTR *pOle = (LPWSTR *) oleArray;
+ LPWSTR *pOleEnd = pOle + elementCount;
+
+ STRINGREF *pCom = (STRINGREF *) (*pComArray)->GetDataPtr();
+
+ while (pOle < pOleEnd)
+ {
+ //
+ // We aren't calling anything which might cause a GC, so don't worry about
+ // the array moving here.
+ //
+
+ STRINGREF stringRef = *pCom++;
+
+ LPWSTR lpwstr;
+ if (stringRef == NULL)
+ {
+ lpwstr = NULL;
+ }
+ else
+ {
+ // Retrieve the length of the string.
+ int Length = stringRef->GetStringLength();
+ int allocLength = (Length + 1) * sizeof(WCHAR);
+ if (allocLength < Length)
+ ThrowOutOfMemory();
+
+ // Allocate the string using CoTaskMemAlloc.
+ lpwstr = (LPWSTR)CoTaskMemAlloc(allocLength);
+ if (lpwstr == NULL)
+ ThrowOutOfMemory();
+
+ // Copy the COM+ string into the newly allocated LPWSTR.
+ memcpyNoGCRefs(lpwstr, stringRef->GetBuffer(), (Length + 1) * sizeof(WCHAR));
+ lpwstr[Length] = 0;
+ }
+
+ *pOle++ = lpwstr;
+ }
+}
+
+void OleVariant::ClearLPWSTRArray(void *oleArray, SIZE_T cElements, MethodTable *pInterfaceMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(oleArray));
+ }
+ CONTRACTL_END;
+
+ LPWSTR *pOle = (LPWSTR *) oleArray;
+ LPWSTR *pOleEnd = pOle + cElements;
+
+ while (pOle < pOleEnd)
+ {
+ LPWSTR lpwstr = *pOle++;
+
+ if (lpwstr != NULL)
+ CoTaskMemFree(lpwstr);
+ }
+}
+
+/* ------------------------------------------------------------------------- *
+ * LPWSTR marshaling routines
+ * ------------------------------------------------------------------------- */
+
+void OleVariant::MarshalLPSTRArrayOleToCom(void *oleArray, BASEARRAYREF *pComArray,
+ MethodTable *pInterfaceMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ }
+ CONTRACTL_END;
+
+ ASSERT_PROTECTED(pComArray);
+ SIZE_T elementCount = (*pComArray)->GetNumComponents();
+
+ LPSTR *pOle = (LPSTR *) oleArray;
+ LPSTR *pOleEnd = pOle + elementCount;
+
+ BASEARRAYREF unprotectedArray = *pComArray;
+ STRINGREF *pCom = (STRINGREF *) unprotectedArray->GetDataPtr();
+
+#if CHECK_APP_DOMAIN_LEAKS
+ AppDomain *pDomain = unprotectedArray->GetAppDomain();
+#endif // CHECK_APP_DOMAIN_LEAKS
+
+ while (pOle < pOleEnd)
+ {
+ LPSTR lpstr = *pOle++;
+
+ STRINGREF string;
+ if (lpstr == NULL)
+ string = NULL;
+ else
+ string = StringObject::NewString(lpstr);
+
+ //
+ // Reset pCom pointer only if array object has moved, rather than
+ // recomputing it every time through the loop. Beware implicit calls to
+ // ValidateObject inside OBJECTREF methods.
+ //
+
+ if (*(void **)&unprotectedArray != *(void **)&*pComArray)
+ {
+ SIZE_T currentOffset = ((BYTE *)pCom) - (*(Object **) &unprotectedArray)->GetAddress();
+ unprotectedArray = *pComArray;
+ pCom = (STRINGREF *) (unprotectedArray->GetAddress() + currentOffset);
+ }
+
+ SetObjectReference((OBJECTREF*) pCom++, (OBJECTREF) string, pDomain);
+ }
+}
+
+void OleVariant::MarshalLPSTRRArrayComToOle(BASEARRAYREF *pComArray, void *oleArray,
+ MethodTable *pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayIsValid)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ }
+ CONTRACTL_END;
+
+ ASSERT_PROTECTED(pComArray);
+
+ SIZE_T elementCount = (*pComArray)->GetNumComponents();
+
+ LPSTR *pOle = (LPSTR *) oleArray;
+ LPSTR *pOleEnd = pOle + elementCount;
+
+ STRINGREF *pCom = (STRINGREF *) (*pComArray)->GetDataPtr();
+
+ while (pOle < pOleEnd)
+ {
+ //
+ // We aren't calling anything which might cause a GC, so don't worry about
+ // the array moving here.
+ //
+ STRINGREF stringRef = *pCom++;
+
+ CoTaskMemHolder<CHAR> lpstr(NULL);
+ if (stringRef == NULL)
+ {
+ lpstr = NULL;
+ }
+ else
+ {
+ // Retrieve the length of the string.
+ int Length = stringRef->GetStringLength();
+ int allocLength = Length * GetMaxDBCSCharByteSize() + 1;
+ if (allocLength < Length)
+ ThrowOutOfMemory();
+
+ // Allocate the string using CoTaskMemAlloc.
+ lpstr = (LPSTR)CoTaskMemAlloc(allocLength);
+ if (lpstr == NULL)
+ ThrowOutOfMemory();
+
+ // Convert the unicode string to an ansi string.
+ InternalWideToAnsi(stringRef->GetBuffer(), Length, lpstr, allocLength, fBestFitMapping, fThrowOnUnmappableChar);
+ lpstr[Length] = 0;
+ }
+
+ *pOle++ = lpstr;
+ lpstr.SuppressRelease();
+ }
+}
+
+void OleVariant::ClearLPSTRArray(void *oleArray, SIZE_T cElements, MethodTable *pInterfaceMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(oleArray));
+ }
+ CONTRACTL_END;
+
+ LPSTR *pOle = (LPSTR *) oleArray;
+ LPSTR *pOleEnd = pOle + cElements;
+
+ while (pOle < pOleEnd)
+ {
+ LPSTR lpstr = *pOle++;
+
+ if (lpstr != NULL)
+ CoTaskMemFree(lpstr);
+ }
+}
+
+/* ------------------------------------------------------------------------- *
+ * Date marshaling routines
+ * ------------------------------------------------------------------------- */
+
+#ifdef FEATURE_COMINTEROP
+void OleVariant::MarshalDateVariantOleToCom(VARIANT *pOleVariant,
+ VariantData *pComVariant)
+{
+ WRAPPER_NO_CONTRACT;
+
+ *(INT64*)pComVariant->GetData() = COMDateTime::DoubleDateToTicks(V_DATE(pOleVariant));
+}
+
+void OleVariant::MarshalDateVariantComToOle(VariantData *pComVariant,
+ VARIANT *pOleVariant)
+{
+ WRAPPER_NO_CONTRACT;
+
+ V_DATE(pOleVariant) = COMDateTime::TicksToDoubleDate(*(INT64*)pComVariant->GetData());
+}
+
+void OleVariant::MarshalDateVariantOleRefToCom(VARIANT *pOleVariant,
+ VariantData *pComVariant)
+{
+ WRAPPER_NO_CONTRACT;
+
+ *(INT64*)pComVariant->GetData() = COMDateTime::DoubleDateToTicks(*V_DATEREF(pOleVariant));
+}
+#endif // FEATURE_COMINTEROP
+
+void OleVariant::MarshalDateArrayOleToCom(void *oleArray, BASEARRAYREF *pComArray,
+ MethodTable *pInterfaceMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ }
+ CONTRACTL_END;
+
+ ASSERT_PROTECTED(pComArray);
+
+ SIZE_T elementCount = (*pComArray)->GetNumComponents();
+
+ DATE *pOle = (DATE *) oleArray;
+ DATE *pOleEnd = pOle + elementCount;
+
+ INT64 *pCom = (INT64 *) (*pComArray)->GetDataPtr();
+
+ //
+ // We aren't calling anything which might cause a GC, so don't worry about
+ // the array moving here.
+ //
+
+ while (pOle < pOleEnd)
+ *pCom++ = COMDateTime::DoubleDateToTicks(*pOle++);
+}
+
+void OleVariant::MarshalDateArrayComToOle(BASEARRAYREF *pComArray, void *oleArray,
+ MethodTable *pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayIsValid)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ }
+ CONTRACTL_END;
+
+ ASSERT_PROTECTED(pComArray);
+
+ SIZE_T elementCount = (*pComArray)->GetNumComponents();
+
+ DATE *pOle = (DATE *) oleArray;
+ DATE *pOleEnd = pOle + elementCount;
+
+ INT64 *pCom = (INT64 *) (*pComArray)->GetDataPtr();
+
+ //
+ // We aren't calling anything which might cause a GC, so don't worry about
+ // the array moving here.
+ //
+
+ while (pOle < pOleEnd)
+ *pOle++ = COMDateTime::TicksToDoubleDate(*pCom++);
+}
+
+/* ------------------------------------------------------------------------- *
+ * Decimal marshaling routines
+ * ------------------------------------------------------------------------- */
+
+#ifdef FEATURE_COMINTEROP
+
+void OleVariant::MarshalDecimalVariantOleToCom(VARIANT *pOleVariant,
+ VariantData *pComVariant)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pOleVariant));
+ PRECONDITION(CheckPointer(pComVariant));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF pDecimalRef = AllocateObject(MscorlibBinder::GetClass(CLASS__DECIMAL));
+
+ DECIMAL* pDecimal = (DECIMAL *) pDecimalRef->UnBox();
+ *pDecimal = V_DECIMAL(pOleVariant);
+ // Mashaling uses the reserved value to store the variant type, so clear it out when marshaling back
+ pDecimal->wReserved = 0;
+
+ pComVariant->SetObjRef(pDecimalRef);
+}
+
+void OleVariant::MarshalDecimalVariantComToOle(VariantData *pComVariant,
+ VARIANT *pOleVariant)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pOleVariant));
+ PRECONDITION(CheckPointer(pComVariant));
+ }
+ CONTRACTL_END;
+
+ VARTYPE vt = V_VT(pOleVariant);
+ _ASSERTE(vt == VT_DECIMAL);
+ V_DECIMAL(pOleVariant) = * (DECIMAL*) pComVariant->GetObjRef()->UnBox();
+ V_VT(pOleVariant) = vt;
+}
+
+void OleVariant::MarshalDecimalVariantOleRefToCom(VARIANT *pOleVariant,
+ VariantData *pComVariant )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pOleVariant));
+ PRECONDITION(CheckPointer(pComVariant));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF pDecimalRef = AllocateObject(MscorlibBinder::GetClass(CLASS__DECIMAL));
+
+ DECIMAL* pDecimal = (DECIMAL *) pDecimalRef->UnBox();
+ *pDecimal = *V_DECIMALREF(pOleVariant);
+ // Mashaling uses the reserved value to store the variant type, so clear it out when marshaling back
+ pDecimal->wReserved = 0;
+
+ pComVariant->SetObjRef(pDecimalRef);
+}
+#endif // FEATURE_COMINTEROP
+
+/* ------------------------------------------------------------------------- *
+ * Record marshaling routines
+ * ------------------------------------------------------------------------- */
+
+#ifdef FEATURE_CLASSIC_COMINTEROP
+void OleVariant::MarshalRecordVariantOleToCom(VARIANT *pOleVariant,
+ VariantData *pComVariant)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pOleVariant));
+ PRECONDITION(CheckPointer(pComVariant));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ IRecordInfo *pRecInfo = V_RECORDINFO(pOleVariant);
+ if (!pRecInfo)
+ COMPlusThrow(kArgumentException, IDS_EE_INVALID_OLE_VARIANT);
+
+ OBJECTREF BoxedValueClass = NULL;
+ GCPROTECT_BEGIN(BoxedValueClass)
+ {
+ LPVOID pvRecord = V_RECORD(pOleVariant);
+ if (pvRecord)
+ {
+ // Go to the registry to find the value class associated
+ // with the record's guid.
+ GUID guid;
+ IfFailThrow(pRecInfo->GetGuid(&guid));
+ MethodTable *pValueClass = GetValueTypeForGUID(guid);
+ if (!pValueClass)
+ COMPlusThrow(kArgumentException, IDS_EE_CANNOT_MAP_TO_MANAGED_VC);
+
+ Module* pModule = pValueClass->GetModule();
+ if (!Security::CanCallUnmanagedCode(pModule))
+ {
+ COMPlusThrow(kArgumentException, IDS_EE_VTRECORD_SECURITY);
+ }
+
+ // Now that we have the value class, allocate an instance of the
+ // boxed value class and copy the contents of the record into it.
+ BoxedValueClass = AllocateObject(pValueClass);
+ FmtClassUpdateCLR(&BoxedValueClass, (BYTE*)pvRecord);
+ }
+
+ pComVariant->SetObjRef(BoxedValueClass);
+ }
+ GCPROTECT_END();
+}
+
+void OleVariant::MarshalRecordVariantComToOle(VariantData *pComVariant,
+ VARIANT *pOleVariant)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pOleVariant));
+ PRECONDITION(CheckPointer(pComVariant));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF BoxedValueClass = pComVariant->GetObjRef();
+ GCPROTECT_BEGIN(BoxedValueClass)
+ {
+ _ASSERTE(BoxedValueClass != NULL);
+ Module* pModule = BoxedValueClass->GetMethodTable()->GetModule();
+ if (!Security::CanCallUnmanagedCode(pModule))
+ {
+ COMPlusThrow(kArgumentException, IDS_EE_VTRECORD_SECURITY);
+ }
+
+ ConvertValueClassToVariant(&BoxedValueClass, pOleVariant);
+ }
+ GCPROTECT_END();
+}
+
+void OleVariant::MarshalRecordVariantOleRefToCom(VARIANT *pOleVariant,
+ VariantData *pComVariant)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // The representation of a VT_RECORD and a VT_BYREF | VT_RECORD VARIANT are
+ // the same so we can simply forward the call to the non byref API.
+ MarshalRecordVariantOleToCom(pOleVariant, pComVariant);
+}
+#endif // FEATURE_CLASSIC_COMINTEROP
+
+void OleVariant::MarshalRecordArrayOleToCom(void *oleArray, BASEARRAYREF *pComArray,
+ MethodTable *pElementMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ PRECONDITION(CheckPointer(pElementMT));
+ }
+ CONTRACTL_END;
+
+ Module* pModule = pElementMT->GetModule();
+ if (!Security::CanCallUnmanagedCode(pModule))
+ {
+ COMPlusThrow(kArgumentException, IDS_EE_VTRECORD_SECURITY);
+ }
+
+ if (pElementMT->IsBlittable())
+ {
+ // The array is blittable so we can simply copy it.
+ _ASSERTE(pComArray);
+ SIZE_T elementCount = (*pComArray)->GetNumComponents();
+ SIZE_T elemSize = pElementMT->GetNativeSize();
+ memcpyNoGCRefs((*pComArray)->GetDataPtr(), oleArray, elementCount * elemSize);
+ }
+ else
+ {
+ // The array is non blittable so we need to marshal the elements.
+ _ASSERTE(pElementMT->HasLayout());
+ MarshalNonBlittableRecordArrayOleToCom(oleArray, pComArray, pElementMT);
+ }
+}
+
+void OleVariant::MarshalRecordArrayComToOle(BASEARRAYREF *pComArray, void *oleArray,
+ MethodTable *pElementMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayIsValid)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ PRECONDITION(CheckPointer(pElementMT));
+ }
+ CONTRACTL_END;
+
+ Module* pModule = pElementMT->GetModule();
+ if (!Security::CanCallUnmanagedCode(pModule))
+ {
+ COMPlusThrow(kArgumentException, IDS_EE_VTRECORD_SECURITY);
+ }
+
+ if (pElementMT->IsBlittable())
+ {
+ // The array is blittable so we can simply copy it.
+ _ASSERTE(pComArray);
+ SIZE_T elementCount = (*pComArray)->GetNumComponents();
+ SIZE_T elemSize = pElementMT->GetNativeSize();
+ memcpyNoGCRefs(oleArray, (*pComArray)->GetDataPtr(), elementCount * elemSize);
+ }
+ else
+ {
+ // The array is non blittable so we need to marshal the elements.
+ _ASSERTE(pElementMT->HasLayout());
+ MarshalNonBlittableRecordArrayComToOle(pComArray, oleArray, pElementMT, fBestFitMapping, fThrowOnUnmappableChar, fOleArrayIsValid);
+ }
+}
+
+
+void OleVariant::ClearRecordArray(void *oleArray, SIZE_T cElements, MethodTable *pElementMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pElementMT));
+ }
+ CONTRACTL_END;
+
+ if (!pElementMT->IsBlittable())
+ {
+ _ASSERTE(pElementMT->HasLayout());
+ ClearNonBlittableRecordArray(oleArray, cElements, pElementMT);
+ }
+}
+
+#ifdef FEATURE_COMINTEROP
+
+// Warning! VariantClear's previous contents of pVarOut.
+void OleVariant::MarshalOleVariantForObject(OBJECTREF * const & pObj, VARIANT *pOle)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pObj));
+ PRECONDITION(*pObj == NULL || (IsProtectedByGCFrame (pObj)));
+
+ PRECONDITION(CheckPointer(pOle));
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_CORECLR
+ if (AppX::IsAppXProcess())
+ {
+ COMPlusThrow(kPlatformNotSupportedException, IDS_EE_BADMARSHAL_TYPE_VARIANTASOBJECT);
+ }
+#endif // FEATURE_CORECLR
+
+ SafeVariantClear(pOle);
+
+#ifdef _DEBUG
+ FillMemory(pOle, sizeof(VARIANT),0xdd);
+ V_VT(pOle) = VT_EMPTY;
+#endif
+
+ // For perf reasons, let's handle the more common and easy cases
+ // without transitioning to managed code.
+ if (*pObj == NULL)
+ {
+ // null maps to VT_EMPTY - nothing to do here.
+ }
+ else
+ {
+ MethodTable *pMT = (*pObj)->GetMethodTable();
+ if (pMT == MscorlibBinder::GetElementType(ELEMENT_TYPE_I4))
+ {
+ V_I4(pOle) = *(LONG*)( (*pObj)->GetData() );
+ V_VT(pOle) = VT_I4;
+ }
+ else if (pMT == g_pStringClass)
+ {
+ if (*(pObj) == NULL)
+ {
+ V_BSTR(pOle) = NULL;
+ }
+ else
+ {
+ STRINGREF stringRef = (STRINGREF)(*pObj);
+ V_BSTR(pOle) = SysAllocStringLen(stringRef->GetBuffer(), stringRef->GetStringLength());
+ if (NULL == V_BSTR(pOle))
+ COMPlusThrowOM();
+ }
+
+ V_VT(pOle) = VT_BSTR;
+ }
+ else if (pMT == MscorlibBinder::GetElementType(ELEMENT_TYPE_I2))
+ {
+ V_I2(pOle) = *(SHORT*)( (*pObj)->GetData() );
+ V_VT(pOle) = VT_I2;
+ }
+ else if (pMT == MscorlibBinder::GetElementType(ELEMENT_TYPE_I1))
+ {
+ V_I1(pOle) = *(CHAR*)( (*pObj)->GetData() );
+ V_VT(pOle) = VT_I1;
+ }
+ else if (pMT == MscorlibBinder::GetElementType(ELEMENT_TYPE_U4))
+ {
+ V_UI4(pOle) = *(ULONG*)( (*pObj)->GetData() );
+ V_VT(pOle) = VT_UI4;
+ }
+ else if (pMT == MscorlibBinder::GetElementType(ELEMENT_TYPE_U2))
+ {
+ V_UI2(pOle) = *(USHORT*)( (*pObj)->GetData() );
+ V_VT(pOle) = VT_UI2;
+ }
+ else if (pMT == MscorlibBinder::GetElementType(ELEMENT_TYPE_U1))
+ {
+ V_UI1(pOle) = *(BYTE*)( (*pObj)->GetData() );
+ V_VT(pOle) = VT_UI1;
+ }
+ else if (pMT == MscorlibBinder::GetElementType(ELEMENT_TYPE_R4))
+ {
+ V_R4(pOle) = *(FLOAT*)( (*pObj)->GetData() );
+ V_VT(pOle) = VT_R4;
+ }
+ else if (pMT == MscorlibBinder::GetElementType(ELEMENT_TYPE_R8))
+ {
+ V_R8(pOle) = *(DOUBLE*)( (*pObj)->GetData() );
+ V_VT(pOle) = VT_R8;
+ }
+ else if (pMT == MscorlibBinder::GetElementType(ELEMENT_TYPE_BOOLEAN))
+ {
+ V_BOOL(pOle) = *(U1*)( (*pObj)->GetData() ) ? VARIANT_TRUE : VARIANT_FALSE;
+ V_VT(pOle) = VT_BOOL;
+ }
+ else if (pMT == MscorlibBinder::GetElementType(ELEMENT_TYPE_I))
+ {
+ *(LPVOID*)&(V_INT(pOle)) = *(LPVOID*)( (*pObj)->GetData() );
+ V_VT(pOle) = VT_INT;
+ }
+ else if (pMT == MscorlibBinder::GetElementType(ELEMENT_TYPE_U))
+ {
+ *(LPVOID*)&(V_UINT(pOle)) = *(LPVOID*)( (*pObj)->GetData() );
+ V_VT(pOle) = VT_UINT;
+ }
+ else
+ {
+ MethodDescCallSite convertObjectToVariant(METHOD__VARIANT__CONVERT_OBJECT_TO_VARIANT);
+
+ VariantData managedVariant;
+ FillMemory(&managedVariant, sizeof(managedVariant), 0);
+ GCPROTECT_BEGIN_VARIANTDATA(managedVariant)
+ {
+ ARG_SLOT args[] = {
+ ObjToArgSlot(*pObj),
+ PtrToArgSlot(&managedVariant),
+ };
+
+ convertObjectToVariant.Call(args);
+
+ OleVariant::MarshalOleVariantForComVariant(&managedVariant, pOle);
+ }
+ GCPROTECT_END_VARIANTDATA();
+ }
+ }
+}
+
+void OleVariant::MarshalOleRefVariantForObject(OBJECTREF *pObj, VARIANT *pOle)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pObj));
+ PRECONDITION(IsProtectedByGCFrame (pObj));
+ PRECONDITION(CheckPointer(pOle));
+ PRECONDITION(V_VT(pOle) & VT_BYREF);
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_CORECLR
+ if (AppX::IsAppXProcess())
+ {
+ COMPlusThrow(kPlatformNotSupportedException, IDS_EE_BADMARSHAL_TYPE_VARIANTASOBJECT);
+ }
+#endif // FEATURE_CORECLR
+
+ HRESULT hr = MarshalCommonOleRefVariantForObject(pObj, pOle);
+
+ if (FAILED(hr))
+ {
+ if (hr == DISP_E_BADVARTYPE)
+ {
+ COMPlusThrow(kInvalidOleVariantTypeException, IDS_EE_INVALID_OLE_VARIANT);
+ }
+ else if (hr == DISP_E_TYPEMISMATCH)
+ {
+ COMPlusThrow(kInvalidCastException, IDS_EE_CANNOT_COERCE_BYREF_VARIANT);
+ }
+ else
+ {
+ MethodDescCallSite castVariant(METHOD__VARIANT__CAST_VARIANT);
+
+ // MarshalOleRefVariantForObjectNoCast has checked that the variant is not an array
+ // so we can use the marshal cast helper to coerce the object to the proper type.
+ VariantData vd;
+ FillMemory(&vd, sizeof(vd), 0);
+ VARTYPE vt = V_VT(pOle) & ~VT_BYREF;
+
+ GCPROTECT_BEGIN_VARIANTDATA(vd);
+ {
+ ARG_SLOT args[3];
+ args[0] = ObjToArgSlot(*pObj);
+ args[1] = (ARG_SLOT)vt;
+ args[2] = PtrToArgSlot(&vd);
+ castVariant.Call(args);
+ VARIANT vtmp;
+ VariantInit(&vtmp);
+ OleVariant::MarshalOleVariantForComVariant(&vd, &vtmp);
+
+ // If the variant types are still not the same then call VariantChangeType to
+ // try and coerse them.
+ if (V_VT(&vtmp) != vt)
+ {
+ VARIANT vtmp2;
+ memset(&vtmp2, 0, sizeof(VARIANT));
+
+ // The type of the variant has changed so attempt to change
+ // the type back.
+ hr = SafeVariantChangeType(&vtmp2, &vtmp, 0, vt);
+ if (FAILED(hr))
+ {
+ if (hr == DISP_E_TYPEMISMATCH)
+ COMPlusThrow(kInvalidCastException, IDS_EE_CANNOT_COERCE_BYREF_VARIANT);
+ else
+ COMPlusThrowHR(hr);
+ }
+
+ // Copy the converted variant back into the original variant and clear the temp.
+ InsertContentsIntoByrefVariant(&vtmp2, pOle);
+ SafeVariantClear(&vtmp);
+ }
+ else
+ {
+ InsertContentsIntoByrefVariant(&vtmp, pOle);
+ }
+ }
+ GCPROTECT_END_VARIANTDATA();
+ }
+ }
+}
+
+HRESULT OleVariant::MarshalCommonOleRefVariantForObject(OBJECTREF *pObj, VARIANT *pOle)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pObj));
+ PRECONDITION(IsProtectedByGCFrame (pObj));
+ PRECONDITION(CheckPointer(pOle));
+ PRECONDITION(V_VT(pOle) & VT_BYREF);
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Let's try to handle the common trivial cases quickly first before
+ // running the generalized stuff.
+ MethodTable *pMT = (*pObj) == NULL ? NULL : (*pObj)->GetMethodTable();
+ if ( (V_VT(pOle) == (VT_BYREF | VT_I4) || V_VT(pOle) == (VT_BYREF | VT_UI4)) && (pMT == MscorlibBinder::GetElementType(ELEMENT_TYPE_I4) || pMT == MscorlibBinder::GetElementType(ELEMENT_TYPE_U4)) )
+ {
+ // deallocation of old value optimized away since there's nothing to
+ // deallocate for this vartype.
+
+ *(V_I4REF(pOle)) = *(LONG*)( (*pObj)->GetData() );
+ }
+ else if ( (V_VT(pOle) == (VT_BYREF | VT_I2) || V_VT(pOle) == (VT_BYREF | VT_UI2)) && (pMT == MscorlibBinder::GetElementType(ELEMENT_TYPE_I2) || pMT == MscorlibBinder::GetElementType(ELEMENT_TYPE_U2)) )
+ {
+ // deallocation of old value optimized away since there's nothing to
+ // deallocate for this vartype.
+
+ *(V_I2REF(pOle)) = *(SHORT*)( (*pObj)->GetData() );
+ }
+ else if ( (V_VT(pOle) == (VT_BYREF | VT_I1) || V_VT(pOle) == (VT_BYREF | VT_UI1)) && (pMT == MscorlibBinder::GetElementType(ELEMENT_TYPE_I1) || pMT == MscorlibBinder::GetElementType(ELEMENT_TYPE_U1)) )
+ {
+ // deallocation of old value optimized away since there's nothing to
+ // deallocate for this vartype.
+
+ *(V_I1REF(pOle)) = *(CHAR*)( (*pObj)->GetData() );
+ }
+ else if ( V_VT(pOle) == (VT_BYREF | VT_R4) && pMT == MscorlibBinder::GetElementType(ELEMENT_TYPE_R4) )
+ {
+ // deallocation of old value optimized away since there's nothing to
+ // deallocate for this vartype.
+
+ *(V_R4REF(pOle)) = *(FLOAT*)( (*pObj)->GetData() );
+ }
+ else if ( V_VT(pOle) == (VT_BYREF | VT_R8) && pMT == MscorlibBinder::GetElementType(ELEMENT_TYPE_R8) )
+ {
+ // deallocation of old value optimized away since there's nothing to
+ // deallocate for this vartype.
+
+ *(V_R8REF(pOle)) = *(DOUBLE*)( (*pObj)->GetData() );
+ }
+ else if ( V_VT(pOle) == (VT_BYREF | VT_BOOL) && pMT == MscorlibBinder::GetElementType(ELEMENT_TYPE_BOOLEAN) )
+ {
+ // deallocation of old value optimized away since there's nothing to
+ // deallocate for this vartype.
+
+ *(V_BOOLREF(pOle)) = ( *(U1*)( (*pObj)->GetData() ) ) ? VARIANT_TRUE : VARIANT_FALSE;
+ }
+ else if ( (V_VT(pOle) == (VT_BYREF | VT_INT) || V_VT(pOle) == (VT_BYREF | VT_UINT)) && (pMT == MscorlibBinder::GetElementType(ELEMENT_TYPE_I4) || pMT == MscorlibBinder::GetElementType(ELEMENT_TYPE_U4)) )
+ {
+ // deallocation of old value optimized away since there's nothing to
+ // deallocate for this vartype.
+
+ *(V_INTREF(pOle)) = *(LONG*)( (*pObj)->GetData() );
+ }
+ else if ( V_VT(pOle) == (VT_BYREF | VT_BSTR) && pMT == g_pStringClass )
+ {
+ if (*(V_BSTRREF(pOle)))
+ {
+ SysFreeString(*(V_BSTRREF(pOle)));
+ *(V_BSTRREF(pOle)) = NULL;
+ }
+
+ *(V_BSTRREF(pOle)) = ConvertStringToBSTR((STRINGREF*)pObj);
+ }
+ // Special case VT_BYREF|VT_RECORD
+ else if (V_VT(pOle) == (VT_BYREF | VT_RECORD))
+ {
+ // We have a special BYREF RECORD - we cannot call VariantClear on this one, because the caller owns the memory,
+ // so we will call RecordClear, then write our data into the same location.
+ hr = ClearAndInsertContentsIntoByrefRecordVariant(pOle, pObj);
+ goto Exit;
+ }
+ else
+ {
+ VARIANT vtmp;
+ VARTYPE vt = V_VT(pOle) & ~VT_BYREF;
+
+ ExtractContentsFromByrefVariant(pOle, &vtmp);
+ SafeVariantClear(&vtmp);
+
+ if (vt == VT_VARIANT)
+ {
+ // Since variants can contain any VARTYPE we simply convert the object to
+ // a variant and stuff it back into the byref variant.
+ MarshalOleVariantForObject(pObj, &vtmp);
+ InsertContentsIntoByrefVariant(&vtmp, pOle);
+ }
+ else if (vt & VT_ARRAY)
+ {
+ // Since the marshal cast helper does not support array's the best we can do
+ // is marshal the object back to a variant and hope it is of the right type.
+ // If it is not then we must throw an exception.
+ MarshalOleVariantForObject(pObj, &vtmp);
+ if (V_VT(&vtmp) != vt)
+ {
+ hr = DISP_E_TYPEMISMATCH;
+ goto Exit;
+ }
+ InsertContentsIntoByrefVariant(&vtmp, pOle);
+ }
+ else if ( (*pObj) == NULL &&
+ (vt == VT_BSTR ||
+ vt == VT_DISPATCH ||
+ vt == VT_UNKNOWN ||
+ vt == VT_PTR ||
+ vt == VT_CARRAY ||
+ vt == VT_SAFEARRAY ||
+ vt == VT_LPSTR ||
+ vt == VT_LPWSTR) )
+ {
+ // Have to handle this specially since the managed variant
+ // conversion will return a VT_EMPTY which isn't what we want.
+ V_VT(&vtmp) = vt;
+ V_UNKNOWN(&vtmp) = NULL;
+ InsertContentsIntoByrefVariant(&vtmp, pOle);
+ }
+ else
+ {
+ hr = E_FAIL;
+ }
+ }
+Exit:
+ return hr;
+}
+
+void OleVariant::MarshalObjectForOleVariant(const VARIANT * pOle, OBJECTREF * const & pObj)
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pOle));
+ PRECONDITION(CheckPointer(pObj));
+ PRECONDITION(*pObj == NULL || (IsProtectedByGCFrame (pObj)));
+ }
+ CONTRACT_END;
+
+#ifdef FEATURE_CORECLR
+ if (AppX::IsAppXProcess())
+ {
+ COMPlusThrow(kPlatformNotSupportedException, IDS_EE_BADMARSHAL_TYPE_VARIANTASOBJECT);
+ }
+#endif // FEATURE_CORECLR
+
+#ifdef MDA_SUPPORTED
+ MdaInvalidVariant* pProbe = MDA_GET_ASSISTANT(InvalidVariant);
+ if (pProbe && !CheckVariant((VARIANT*)pOle))
+ pProbe->ReportViolation();
+#endif
+
+ // if V_ISBYREF(pOle) and V_BYREF(pOle) is null then we have a problem,
+ // unless we're dealing with VT_EMPTY or VT_NULL in which case that is ok??
+ VARTYPE vt = V_VT(pOle) & ~VT_BYREF;
+ if (V_ISBYREF(pOle) && !V_BYREF(pOle) && !(vt == VT_EMPTY || vt == VT_NULL))
+ COMPlusThrow(kArgumentException, IDS_EE_INVALID_OLE_VARIANT);
+
+ switch (V_VT(pOle))
+ {
+ case VT_EMPTY:
+ SetObjectReference( pObj,
+ NULL,
+ GetAppDomain() );
+ break;
+
+ case VT_I4:
+ case VT_INT:
+ SetObjectReference( pObj,
+ AllocateObject(MscorlibBinder::GetElementType(ELEMENT_TYPE_I4)),
+ GetAppDomain() );
+ *(LONG*)((*pObj)->GetData()) = V_I4(pOle);
+ break;
+
+ case VT_BYREF|VT_I4:
+ case VT_BYREF|VT_INT:
+ SetObjectReference( pObj,
+ AllocateObject(MscorlibBinder::GetElementType(ELEMENT_TYPE_I4)),
+ GetAppDomain() );
+ *(LONG*)((*pObj)->GetData()) = *(V_I4REF(pOle));
+ break;
+
+ case VT_UI4:
+ case VT_UINT:
+ SetObjectReference( pObj,
+ AllocateObject(MscorlibBinder::GetElementType(ELEMENT_TYPE_U4)),
+ GetAppDomain() );
+ *(ULONG*)((*pObj)->GetData()) = V_UI4(pOle);
+ break;
+
+ case VT_BYREF|VT_UI4:
+ case VT_BYREF|VT_UINT:
+ SetObjectReference( pObj,
+ AllocateObject(MscorlibBinder::GetElementType(ELEMENT_TYPE_U4)),
+ GetAppDomain() );
+ *(ULONG*)((*pObj)->GetData()) = *(V_UI4REF(pOle));
+ break;
+
+ case VT_I2:
+ SetObjectReference( pObj,
+ AllocateObject(MscorlibBinder::GetElementType(ELEMENT_TYPE_I2)),
+ GetAppDomain() );
+ (*(SHORT*)((*pObj)->GetData())) = V_I2(pOle);
+ break;
+
+ case VT_BYREF|VT_I2:
+ SetObjectReference( pObj,
+ AllocateObject(MscorlibBinder::GetElementType(ELEMENT_TYPE_I2)),
+ GetAppDomain() );
+ *(SHORT*)((*pObj)->GetData()) = *(V_I2REF(pOle));
+ break;
+
+ case VT_UI2:
+ SetObjectReference( pObj,
+ AllocateObject(MscorlibBinder::GetElementType(ELEMENT_TYPE_U2)),
+ GetAppDomain() );
+ *(USHORT*)((*pObj)->GetData()) = V_UI2(pOle);
+ break;
+
+ case VT_BYREF|VT_UI2:
+ SetObjectReference( pObj,
+ AllocateObject(MscorlibBinder::GetElementType(ELEMENT_TYPE_U2)),
+ GetAppDomain() );
+ *(USHORT*)((*pObj)->GetData()) = *(V_UI2REF(pOle));
+ break;
+
+ case VT_I1:
+ SetObjectReference( pObj,
+ AllocateObject(MscorlibBinder::GetElementType(ELEMENT_TYPE_I1)),
+ GetAppDomain() );
+ *(CHAR*)((*pObj)->GetData()) = V_I1(pOle);
+ break;
+
+ case VT_BYREF|VT_I1:
+ SetObjectReference( pObj,
+ AllocateObject(MscorlibBinder::GetElementType(ELEMENT_TYPE_I1)),
+ GetAppDomain() );
+ *(CHAR*)((*pObj)->GetData()) = *(V_I1REF(pOle));
+ break;
+
+ case VT_UI1:
+ SetObjectReference( pObj,
+ AllocateObject(MscorlibBinder::GetElementType(ELEMENT_TYPE_U1)),
+ GetAppDomain() );
+ *(BYTE*)((*pObj)->GetData()) = V_UI1(pOle);
+ break;
+
+ case VT_BYREF|VT_UI1:
+ SetObjectReference( pObj,
+ AllocateObject(MscorlibBinder::GetElementType(ELEMENT_TYPE_U1)),
+ GetAppDomain() );
+ *(BYTE*)((*pObj)->GetData()) = *(V_UI1REF(pOle));
+ break;
+
+ case VT_R4:
+ SetObjectReference( pObj,
+ AllocateObject(MscorlibBinder::GetElementType(ELEMENT_TYPE_R4)),
+ GetAppDomain() );
+ *(FLOAT*)((*pObj)->GetData()) = V_R4(pOle);
+ break;
+
+ case VT_BYREF|VT_R4:
+ SetObjectReference( pObj,
+ AllocateObject(MscorlibBinder::GetElementType(ELEMENT_TYPE_R4)),
+ GetAppDomain() );
+ *(FLOAT*)((*pObj)->GetData()) = *(V_R4REF(pOle));
+ break;
+
+ case VT_R8:
+ SetObjectReference( pObj,
+ AllocateObject(MscorlibBinder::GetElementType(ELEMENT_TYPE_R8)),
+ GetAppDomain() );
+ *(DOUBLE*)((*pObj)->GetData()) = V_R8(pOle);
+ break;
+
+ case VT_BYREF|VT_R8:
+ SetObjectReference( pObj,
+ AllocateObject(MscorlibBinder::GetElementType(ELEMENT_TYPE_R8)),
+ GetAppDomain() );
+ *(DOUBLE*)((*pObj)->GetData()) = *(V_R8REF(pOle));
+ break;
+
+ case VT_BOOL:
+ SetObjectReference( pObj,
+ AllocateObject(MscorlibBinder::GetElementType(ELEMENT_TYPE_BOOLEAN)),
+ GetAppDomain() );
+ *(VARIANT_BOOL*)((*pObj)->GetData()) = V_BOOL(pOle) ? 1 : 0;
+ break;
+
+ case VT_BYREF|VT_BOOL:
+ SetObjectReference( pObj,
+ AllocateObject(MscorlibBinder::GetElementType(ELEMENT_TYPE_BOOLEAN)),
+ GetAppDomain() );
+ *(VARIANT_BOOL*)((*pObj)->GetData()) = *(V_BOOLREF(pOle)) ? 1 : 0;
+ break;
+
+ case VT_BSTR:
+ ConvertBSTRToString(V_BSTR(pOle), (STRINGREF*)pObj);
+ break;
+
+ case VT_BYREF|VT_BSTR:
+ ConvertBSTRToString(*(V_BSTRREF(pOle)), (STRINGREF*)pObj);
+ break;
+
+ default:
+ {
+ MethodDescCallSite convertVariantToObject(METHOD__VARIANT__CONVERT_VARIANT_TO_OBJECT);
+
+ VariantData managedVariant;
+ FillMemory(&managedVariant, sizeof(managedVariant), 0);
+ GCPROTECT_BEGIN_VARIANTDATA(managedVariant)
+ {
+ OleVariant::MarshalComVariantForOleVariant((VARIANT*)pOle, &managedVariant);
+ ARG_SLOT args[] = { PtrToArgSlot(&managedVariant) };
+ SetObjectReference( pObj,
+ convertVariantToObject.Call_RetOBJECTREF(args),
+ GetAppDomain() );
+ }
+ GCPROTECT_END_VARIANTDATA();
+ }
+ }
+ RETURN;
+ }
+
+/* ------------------------------------------------------------------------- *
+ * Byref variant manipulation helpers.
+ * ------------------------------------------------------------------------- */
+
+void OleVariant::ExtractContentsFromByrefVariant(VARIANT *pByrefVar, VARIANT *pDestVar)
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pByrefVar));
+ PRECONDITION(CheckPointer(pDestVar));
+ }
+ CONTRACT_END;
+
+ VARTYPE vt = V_VT(pByrefVar) & ~VT_BYREF;
+
+ // VT_BYREF | VT_EMPTY is not a valid combination.
+ if (vt == 0 || vt == VT_EMPTY)
+ COMPlusThrow(kInvalidOleVariantTypeException, IDS_EE_INVALID_OLE_VARIANT);
+
+ switch (vt)
+ {
+ case VT_RECORD:
+ {
+ // VT_RECORD's are weird in that regardless of is the VT_BYREF flag is set or not
+ // they have the same internal representation.
+ V_RECORD(pDestVar) = V_RECORD(pByrefVar);
+ V_RECORDINFO(pDestVar) = V_RECORDINFO(pByrefVar);
+
+ // Set the variant type of the destination variant.
+ V_VT(pDestVar) = vt;
+
+ break;
+ }
+
+ case VT_VARIANT:
+ {
+ // A byref variant is not allowed to contain a byref variant.
+ if (V_ISBYREF(V_VARIANTREF(pByrefVar)))
+ COMPlusThrow(kInvalidOleVariantTypeException, IDS_EE_INVALID_OLE_VARIANT);
+
+ // Copy the variant that the byref variant points to into the destination variant.
+ // This will replace the VARTYPE of pDestVar with the VARTYPE of the VARIANT being
+ // pointed to.
+ memcpyNoGCRefs(pDestVar, V_VARIANTREF(pByrefVar), sizeof(VARIANT));
+ break;
+ }
+
+ case VT_DECIMAL:
+ {
+ // Copy the value that the byref variant points to into the destination variant.
+ // Decimal's are special in that they occupy the 16 bits of padding between the
+ // VARTYPE and the intVal field.
+ memcpyNoGCRefs(&V_DECIMAL(pDestVar), V_DECIMALREF(pByrefVar), sizeof(DECIMAL));
+
+ // Set the variant type of the destination variant.
+ V_VT(pDestVar) = vt;
+
+ break;
+ }
+
+ default:
+ {
+ // Copy the value that the byref variant points to into the destination variant.
+ SIZE_T sz = OleVariant::GetElementSizeForVarType(vt, NULL);
+ memcpyNoGCRefs(&V_INT(pDestVar), V_INTREF(pByrefVar), sz);
+
+ // Set the variant type of the destination variant.
+ V_VT(pDestVar) = vt;
+
+ break;
+ }
+ }
+
+ RETURN;
+}
+
+void OleVariant::InsertContentsIntoByrefVariant(VARIANT *pSrcVar, VARIANT *pByrefVar)
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pByrefVar));
+ PRECONDITION(CheckPointer(pSrcVar));
+ }
+ CONTRACT_END;
+
+ _ASSERTE(V_VT(pSrcVar) == (V_VT(pByrefVar) & ~VT_BYREF) || V_VT(pByrefVar) == (VT_BYREF | VT_VARIANT));
+
+
+ VARTYPE vt = V_VT(pByrefVar) & ~VT_BYREF;
+
+ // VT_BYREF | VT_EMPTY is not a valid combination.
+ if (vt == 0 || vt == VT_EMPTY)
+ COMPlusThrow(kInvalidOleVariantTypeException, IDS_EE_INVALID_OLE_VARIANT);
+
+ switch (vt)
+ {
+ case VT_RECORD:
+ {
+ // VT_RECORD's are weird in that regardless of is the VT_BYREF flag is set or not
+ // they have the same internal representation.
+ V_RECORD(pByrefVar) = V_RECORD(pSrcVar);
+ V_RECORDINFO(pByrefVar) = V_RECORDINFO(pSrcVar);
+ break;
+ }
+
+ case VT_VARIANT:
+ {
+ // Copy the variant that the byref variant points to into the destination variant.
+ memcpyNoGCRefs(V_VARIANTREF(pByrefVar), pSrcVar, sizeof(VARIANT));
+ break;
+ }
+
+ case VT_DECIMAL:
+ {
+ // Copy the value inside the source variant into the location pointed to by the byref variant.
+ memcpyNoGCRefs(V_DECIMALREF(pByrefVar), &V_DECIMAL(pSrcVar), sizeof(DECIMAL));
+ break;
+ }
+
+ default:
+ {
+ // Copy the value inside the source variant into the location pointed to by the byref variant.
+
+ SIZE_T sz = OleVariant::GetElementSizeForVarType(vt, NULL);
+ memcpyNoGCRefs(V_INTREF(pByrefVar), &V_INT(pSrcVar), sz);
+ break;
+ }
+ }
+ RETURN;
+}
+
+void OleVariant::CreateByrefVariantForVariant(VARIANT *pSrcVar, VARIANT *pByrefVar)
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pByrefVar));
+ PRECONDITION(CheckPointer(pSrcVar));
+ }
+ CONTRACT_END;
+
+ // Set the type of the byref variant based on the type of the source variant.
+ VARTYPE vt = V_VT(pSrcVar);
+
+ // VT_BYREF | VT_EMPTY is not a valid combination.
+ if (vt == VT_EMPTY)
+ COMPlusThrow(kInvalidOleVariantTypeException, IDS_EE_INVALID_OLE_VARIANT);
+
+ if (vt == VT_NULL)
+ {
+ // VT_BYREF | VT_NULL is not a valid combination either but we'll allow VT_NULL
+ // to be passed this way (meaning that the callee can change the type and return
+ // data), note that the VT_BYREF flag is not added
+ V_VT(pByrefVar) = vt;
+ }
+ else
+ {
+ switch (vt)
+ {
+ case VT_RECORD:
+ {
+ // VT_RECORD's are weird in that regardless of is the VT_BYREF flag is set or not
+ // they have the same internal representation.
+ V_RECORD(pByrefVar) = V_RECORD(pSrcVar);
+ V_RECORDINFO(pByrefVar) = V_RECORDINFO(pSrcVar);
+ break;
+ }
+
+ case VT_VARIANT:
+ {
+ V_VARIANTREF(pByrefVar) = pSrcVar;
+ break;
+ }
+
+ case VT_DECIMAL:
+ {
+ V_DECIMALREF(pByrefVar) = &V_DECIMAL(pSrcVar);
+ break;
+ }
+
+ default:
+ {
+ V_INTREF(pByrefVar) = &V_INT(pSrcVar);
+ break;
+ }
+ }
+
+ V_VT(pByrefVar) = vt | VT_BYREF;
+ }
+
+ RETURN;
+}
+
+/* ------------------------------------------------------------------------- *
+ * Variant marshaling
+ * ------------------------------------------------------------------------- */
+
+//
+// MarshalComVariantForOleVariant copies the contents of the OLE variant from
+// the COM variant.
+//
+
+void OleVariant::MarshalComVariantForOleVariant(VARIANT *pOle, VariantData *pCom)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pOle));
+ PRECONDITION(CheckPointer(pCom));
+ }
+ CONTRACTL_END;
+
+ BOOL byref = V_ISBYREF(pOle);
+ VARTYPE vt = V_VT(pOle) & ~VT_BYREF;
+
+ // Note that the following check also covers VT_ILLEGAL.
+ if ((vt & ~VT_ARRAY) >= 128 )
+ COMPlusThrow(kInvalidOleVariantTypeException, IDS_EE_INVALID_OLE_VARIANT);
+
+ if (byref && !V_BYREF(pOle) && !(vt == VT_EMPTY || vt == VT_NULL))
+ COMPlusThrow(kArgumentException, IDS_EE_INVALID_OLE_VARIANT);
+
+ if (byref && vt == VT_VARIANT)
+ {
+ pOle = V_VARIANTREF(pOle);
+ byref = V_ISBYREF(pOle);
+ vt = V_VT(pOle) & ~VT_BYREF;
+
+ // Byref VARIANTS are not allowed to be nested.
+ if (byref)
+ COMPlusThrow(kInvalidOleVariantTypeException, IDS_EE_INVALID_OLE_VARIANT);
+ }
+
+ CVTypes cvt = GetCVTypeForVarType(vt);
+ const Marshaler *marshal = GetMarshalerForVarType(vt, TRUE);
+
+ pCom->SetType(cvt);
+ pCom->SetVT(vt); // store away VT for return trip.
+ if (marshal == NULL || (byref
+ ? marshal->OleRefToComVariant == NULL
+ : marshal->OleToComVariant == NULL))
+ {
+ if (cvt==CV_EMPTY)
+ {
+ if (V_ISBYREF(pOle))
+ {
+ // Must set ObjectRef field of Variant to a specific instance.
+#ifdef _WIN64
+ VariantData::NewVariant(pCom, CV_U8, (INT64)(size_t)V_BYREF(pOle));
+#else // _WIN64
+ VariantData::NewVariant(pCom, CV_U4, (INT32)(size_t)V_BYREF(pOle));
+#endif // _WIN64
+ }
+ else
+ {
+ VariantData::NewVariant(pCom, cvt, NULL);
+ }
+ }
+ else if (cvt==CV_NULL)
+ {
+ VariantData::NewVariant(pCom, cvt, NULL);
+ }
+ else
+ {
+ pCom->SetObjRef(NULL);
+ if (byref)
+ {
+ INT64 data = 0;
+ CopyMemory(&data, V_R8REF(pOle), GetElementSizeForVarType(vt, NULL));
+ pCom->SetData(&data);
+ }
+ else
+ pCom->SetData(&V_R8(pOle));
+ }
+ }
+ else
+ {
+ if (byref)
+ marshal->OleRefToComVariant(pOle, pCom);
+ else
+ marshal->OleToComVariant(pOle, pCom);
+ }
+}
+
+//
+// MarshalOleVariantForComVariant copies the contents of the OLE variant from
+// the COM variant.
+//
+
+void OleVariant::MarshalOleVariantForComVariant(VariantData *pCom, VARIANT *pOle)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pCom));
+ PRECONDITION(CheckPointer(pOle));
+ }
+ CONTRACTL_END;
+
+ SafeVariantClear(pOle);
+
+ VariantEmptyHolder veh;
+ veh = pOle;
+
+ VARTYPE vt = GetVarTypeForComVariant(pCom);
+ V_VT(pOle) = vt;
+
+ const Marshaler *marshal = GetMarshalerForVarType(vt, TRUE);
+
+ if (marshal == NULL || marshal->ComToOleVariant == NULL)
+ {
+ *(INT64*)&V_R8(pOle) = *(INT64*)pCom->GetData();
+ }
+ else
+ {
+ marshal->ComToOleVariant(pCom, pOle);
+ }
+
+ veh.SuppressRelease();
+}
+
+void OleVariant::MarshalInterfaceArrayComToOleHelper(BASEARRAYREF *pComArray, void *oleArray,
+ MethodTable *pElementMT, BOOL bDefaultIsDispatch)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pComArray));
+ PRECONDITION(CheckPointer(oleArray));
+ }
+ CONTRACTL_END;
+
+ ASSERT_PROTECTED(pComArray);
+ SIZE_T elementCount = (*pComArray)->GetNumComponents();
+
+ BOOL bDispatch = bDefaultIsDispatch;
+ BOOL bHeterogenous = (pElementMT == NULL);
+
+ // If the method table is for Object then don't consider it.
+ if (pElementMT == g_pObjectClass)
+ pElementMT = NULL;
+
+ // If the element MT represents a class, then we need to determine the default
+ // interface to use to expose the object out to COM.
+ if (pElementMT && !pElementMT->IsInterface())
+ {
+ pElementMT = GetDefaultInterfaceMTForClass(pElementMT, &bDispatch);
+ }
+
+ // Determine the start and the end of the data in the OLE array.
+ IUnknown **pOle = (IUnknown **) oleArray;
+ IUnknown **pOleEnd = pOle + elementCount;
+
+ // Retrieve the start of the data in the managed array.
+ BASEARRAYREF unprotectedArray = *pComArray;
+ OBJECTREF *pCom = (OBJECTREF *) unprotectedArray->GetDataPtr();
+
+ OBJECTREF TmpObj = NULL;
+ GCPROTECT_BEGIN(TmpObj)
+ {
+ MethodTable *pLastElementMT = NULL;
+
+ while (pOle < pOleEnd)
+ {
+ TmpObj = *pCom++;
+
+ IUnknown *unk;
+ if (TmpObj == NULL)
+ unk = NULL;
+ else
+ {
+ if (bHeterogenous)
+ {
+ // Inspect the type of each element separately (cache the last type for perf).
+ if (TmpObj->GetMethodTable() != pLastElementMT)
+ {
+ pLastElementMT = TmpObj->GetMethodTable();
+ pElementMT = GetDefaultInterfaceMTForClass(pLastElementMT, &bDispatch);
+ }
+ }
+
+ if (pElementMT)
+ {
+ // Convert to COM IP based on an interface MT (a specific interface will be exposed).
+ unk = GetComIPFromObjectRef(&TmpObj, pElementMT);
+ }
+ else
+ {
+ // Convert to COM IP exposing either IDispatch or IUnknown.
+ unk = GetComIPFromObjectRef(&TmpObj, (bDispatch ? ComIpType_Dispatch : ComIpType_Unknown), NULL);
+ }
+ }
+
+ *pOle++ = unk;
+
+ if (*(void **)&unprotectedArray != *(void **)&*pComArray)
+ {
+ SIZE_T currentOffset = ((BYTE *)pCom) - (*(Object **) &unprotectedArray)->GetAddress();
+ unprotectedArray = *pComArray;
+ pCom = (OBJECTREF *) (unprotectedArray->GetAddress() + currentOffset);
+ }
+ }
+ }
+ GCPROTECT_END();
+}
+
+// Used by customer checked build to test validity of VARIANT
+
+BOOL OleVariant::CheckVariant(VARIANT* pOle)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pOle));
+ }
+ CONTRACTL_END;
+
+ BOOL bValidVariant = FALSE;
+
+ // We need a try/catch here since VariantCopy could cause an AV if the VARIANT isn't valid.
+ EX_TRY
+ {
+ VARIANT pOleCopy;
+ SafeVariantInit(&pOleCopy);
+
+ GCX_PREEMP();
+ if (SUCCEEDED(VariantCopy(&pOleCopy, pOle)))
+ {
+ SafeVariantClear(&pOleCopy);
+ bValidVariant = TRUE;
+ }
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return bValidVariant;
+}
+
+HRESULT OleVariant::ClearAndInsertContentsIntoByrefRecordVariant(VARIANT* pOle, OBJECTREF* pObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (V_VT(pOle) != (VT_BYREF | VT_RECORD))
+ return DISP_E_BADVARTYPE;
+
+ // Clear the current contents of the record.
+ {
+ GCX_PREEMP();
+ V_RECORDINFO(pOle)->RecordClear(V_RECORD(pOle));
+ }
+
+ // Ok - let's marshal the returning object into a VT_RECORD.
+ if ((*pObj) != NULL)
+ {
+ VARIANT vtmp;
+ SafeVariantInit(&vtmp);
+
+ MarshalOleVariantForObject(pObj, &vtmp);
+
+ {
+ GCX_PREEMP();
+
+ // Verify that we have a VT_RECORD.
+ if (V_VT(&vtmp) != VT_RECORD)
+ {
+ SafeVariantClear(&vtmp);
+ return DISP_E_TYPEMISMATCH;
+ }
+
+ // Verify that we have the same type of record.
+ if (! V_RECORDINFO(pOle)->IsMatchingType(V_RECORDINFO(&vtmp)))
+ {
+ SafeVariantClear(&vtmp);
+ return DISP_E_TYPEMISMATCH;
+ }
+
+ // Now copy the contents of the new variant back into the old variant.
+ HRESULT hr = V_RECORDINFO(pOle)->RecordCopy(V_RECORD(&vtmp), V_RECORD(pOle));
+ if (hr != S_OK)
+ {
+ SafeVariantClear(&vtmp);
+ return DISP_E_TYPEMISMATCH;
+ }
+ }
+ }
+ return S_OK;
+}
+
+void OleVariant::MarshalIDispatchArrayComToOle(BASEARRAYREF *pComArray, void *oleArray,
+ MethodTable *pElementMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayIsValid)
+{
+ WRAPPER_NO_CONTRACT;
+
+ MarshalInterfaceArrayComToOleHelper(pComArray, oleArray, pElementMT, TRUE);
+}
+
+
+/* ------------------------------------------------------------------------- *
+ * Currency marshaling routines
+ * ------------------------------------------------------------------------- */
+
+void OleVariant::MarshalCurrencyVariantOleToCom(VARIANT *pOleVariant,
+ VariantData *pComVariant)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pOleVariant));
+ PRECONDITION(CheckPointer(pComVariant));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF pDecimalRef = AllocateObject(MscorlibBinder::GetClass(CLASS__DECIMAL));
+ DECIMAL DecVal;
+
+ // Convert the currency to a decimal.
+ HRESULT hr = VarDecFromCy(V_CY(pOleVariant), &DecVal);
+ IfFailThrow(hr);
+
+ if (FAILED(DecimalCanonicalize(&DecVal)))
+ COMPlusThrow(kOverflowException, W("Overflow_Currency"));
+
+ // Store the value into the unboxes decimal and store the decimal in the variant.
+ *(DECIMAL *) pDecimalRef->UnBox() = DecVal;
+ pComVariant->SetObjRef(pDecimalRef);
+}
+
+void OleVariant::MarshalCurrencyVariantComToOle(VariantData *pComVariant,
+ VARIANT *pOleVariant)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pOleVariant));
+ PRECONDITION(CheckPointer(pComVariant));
+ }
+ CONTRACTL_END;
+
+ CURRENCY CyVal;
+
+ // Convert the decimal to a currency.
+ HRESULT hr = VarCyFromDec((DECIMAL*)pComVariant->GetObjRef()->UnBox(), &CyVal);
+ IfFailThrow(hr);
+
+ // Store the currency in the VARIANT and set the VT.
+ V_CY(pOleVariant) = CyVal;
+}
+
+void OleVariant::MarshalCurrencyVariantOleRefToCom(VARIANT *pOleVariant,
+ VariantData *pComVariant)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pOleVariant));
+ PRECONDITION(CheckPointer(pComVariant));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF pDecimalRef = AllocateObject(MscorlibBinder::GetClass(CLASS__DECIMAL));
+ DECIMAL DecVal;
+
+ // Convert the currency to a decimal.
+ HRESULT hr = VarDecFromCy(*V_CYREF(pOleVariant), &DecVal);
+ IfFailThrow(hr);
+
+ if (FAILED(DecimalCanonicalize(&DecVal)))
+ COMPlusThrow(kOverflowException, W("Overflow_Currency"));
+
+ // Store the value into the unboxes decimal and store the decimal in the variant.
+ *(DECIMAL *) pDecimalRef->UnBox() = DecVal;
+ pComVariant->SetObjRef(pDecimalRef);
+}
+
+void OleVariant::MarshalCurrencyArrayOleToCom(void *oleArray, BASEARRAYREF *pComArray,
+ MethodTable *pInterfaceMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ }
+ CONTRACTL_END;
+
+ ASSERT_PROTECTED(pComArray);
+ SIZE_T elementCount = (*pComArray)->GetNumComponents();
+
+ CURRENCY *pOle = (CURRENCY *) oleArray;
+ CURRENCY *pOleEnd = pOle + elementCount;
+
+ DECIMAL *pCom = (DECIMAL *) (*pComArray)->GetDataPtr();
+
+ while (pOle < pOleEnd)
+ {
+ IfFailThrow(VarDecFromCy(*pOle++, pCom));
+ if (FAILED(DecimalCanonicalize(pCom)))
+ COMPlusThrow(kOverflowException, W("Overflow_Currency"));
+
+ pCom++;
+ }
+}
+
+void OleVariant::MarshalCurrencyArrayComToOle(BASEARRAYREF *pComArray, void *oleArray,
+ MethodTable *pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayIsValid)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ }
+ CONTRACTL_END;
+
+ ASSERT_PROTECTED(pComArray);
+ SIZE_T elementCount = (*pComArray)->GetNumComponents();
+
+ CURRENCY *pOle = (CURRENCY *) oleArray;
+ CURRENCY *pOleEnd = pOle + elementCount;
+
+ DECIMAL *pCom = (DECIMAL *) (*pComArray)->GetDataPtr();
+
+ while (pOle < pOleEnd)
+ IfFailThrow(VarCyFromDec(pCom++, pOle++));
+}
+
+
+/* ------------------------------------------------------------------------- *
+ * Variant marshaling routines
+ * ------------------------------------------------------------------------- */
+
+void OleVariant::MarshalVariantArrayOleToCom(void *oleArray, BASEARRAYREF *pComArray,
+ MethodTable *pInterfaceMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ }
+ CONTRACTL_END;
+
+ ASSERT_PROTECTED(pComArray);
+
+ SIZE_T elementCount = (*pComArray)->GetNumComponents();
+
+ VARIANT *pOle = (VARIANT *) oleArray;
+ VARIANT *pOleEnd = pOle + elementCount;
+
+ BASEARRAYREF unprotectedArray = *pComArray;
+ OBJECTREF *pCom = (OBJECTREF *) unprotectedArray->GetDataPtr();
+
+ AppDomain *pDomain = unprotectedArray->GetAppDomain();
+
+ OBJECTREF TmpObj = NULL;
+ GCPROTECT_BEGIN(TmpObj)
+ {
+ while (pOle < pOleEnd)
+ {
+ // Marshal the OLE variant into a temp managed variant.
+ MarshalObjectForOleVariant(pOle++, &TmpObj);
+
+ // Reset pCom pointer only if array object has moved, rather than
+ // recomputing it every time through the loop. Beware implicit calls to
+ // ValidateObject inside OBJECTREF methods.
+ if (*(void **)&unprotectedArray != *(void **)&*pComArray)
+ {
+ SIZE_T currentOffset = ((BYTE *)pCom) - (*(Object **) &unprotectedArray)->GetAddress();
+ unprotectedArray = *pComArray;
+ pCom = (OBJECTREF *) (unprotectedArray->GetAddress() + currentOffset);
+ }
+ SetObjectReference(pCom++, TmpObj, pDomain);
+ }
+ }
+ GCPROTECT_END();
+}
+
+void OleVariant::MarshalVariantArrayComToOle(BASEARRAYREF *pComArray, void *oleArray,
+ MethodTable *pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayIsValid)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ }
+ CONTRACTL_END;
+
+
+ MarshalVariantArrayComToOle(pComArray, oleArray, pInterfaceMT, fBestFitMapping, fThrowOnUnmappableChar, FALSE, fOleArrayIsValid);
+}
+
+
+void OleVariant::MarshalVariantArrayComToOle(BASEARRAYREF *pComArray, void *oleArray,
+ MethodTable *pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fMarshalByrefArgOnly,
+ BOOL fOleArrayIsValid, int nOleArrayStepLength)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(oleArray));
+ PRECONDITION(CheckPointer(pComArray));
+ }
+ CONTRACTL_END;
+
+ ASSERT_PROTECTED(pComArray);
+
+ SIZE_T elementCount = (*pComArray)->GetNumComponents();
+
+ VARIANT *pOle = (VARIANT *) oleArray;
+ VARIANT *pOleEnd = pOle + elementCount * nOleArrayStepLength;
+
+ BASEARRAYREF unprotectedArray = *pComArray;
+ OBJECTREF *pCom = (OBJECTREF *) unprotectedArray->GetDataPtr();
+
+ OBJECTREF TmpObj = NULL;
+ GCPROTECT_BEGIN(TmpObj)
+ {
+ while (pOle != pOleEnd)
+ {
+ // Reset pCom pointer only if array object has moved, rather than
+ // recomputing it every time through the loop. Beware implicit calls to
+ // ValidateObject inside OBJECTREF methods.
+ if (*(void **)&unprotectedArray != *(void **)&*pComArray)
+ {
+ SIZE_T currentOffset = ((BYTE *)pCom) - (*(Object **) &unprotectedArray)->GetAddress();
+ unprotectedArray = *pComArray;
+ pCom = (OBJECTREF *) (unprotectedArray->GetAddress() + currentOffset);
+ }
+ TmpObj = *pCom++;
+
+ // Marshal the temp managed variant into the OLE variant.
+ if (fOleArrayIsValid)
+ {
+ // We firstly try MarshalCommonOleRefVariantForObject for VT_BYREF variant because
+ // MarshalOleVariantForObject() VariantClear the variant and does not keep the VT_BYREF.
+ // For back compating the old behavior(we used MarshalOleVariantForObject in the previous
+ // version) that casts the managed object to Variant based on the object's MethodTable,
+ // MarshalCommonOleRefVariantForObject is used instead of MarshalOleRefVariantForObject so
+ // that cast will not be done based on the VT of the variant.
+ if (!((pOle->vt & VT_BYREF) &&
+ SUCCEEDED(MarshalCommonOleRefVariantForObject(&TmpObj, pOle))))
+ if (pOle->vt & VT_BYREF || !fMarshalByrefArgOnly)
+ MarshalOleVariantForObject(&TmpObj, pOle);
+ }
+ else
+ {
+ // The contents of pOle is undefined, don't try to handle byrefs.
+ MarshalOleVariantForObject(&TmpObj, pOle);
+ }
+
+ pOle += nOleArrayStepLength;
+ }
+ }
+ GCPROTECT_END();
+}
+
+void OleVariant::ClearVariantArray(void *oleArray, SIZE_T cElements, MethodTable *pInterfaceMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(oleArray));
+ }
+ CONTRACTL_END;
+
+ VARIANT *pOle = (VARIANT *) oleArray;
+ VARIANT *pOleEnd = pOle + cElements;
+
+ while (pOle < pOleEnd)
+ SafeVariantClear(pOle++);
+}
+
+
+/* ------------------------------------------------------------------------- *
+ * Array marshaling routines
+ * ------------------------------------------------------------------------- */
+#ifdef FEATURE_CLASSIC_COMINTEROP
+
+void OleVariant::MarshalArrayVariantOleToCom(VARIANT *pOleVariant,
+ VariantData *pComVariant)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pOleVariant));
+ PRECONDITION(CheckPointer(pComVariant));
+ }
+ CONTRACTL_END;
+
+ SAFEARRAY *pSafeArray = V_ARRAY(pOleVariant);
+
+ VARTYPE vt = V_VT(pOleVariant) & ~VT_ARRAY;
+
+ if (pSafeArray)
+ {
+ if (vt == VT_EMPTY)
+ COMPlusThrow(kInvalidOleVariantTypeException, IDS_EE_INVALID_OLE_VARIANT);
+
+ MethodTable *pElemMT = NULL;
+ if (vt == VT_RECORD)
+ pElemMT = GetElementTypeForRecordSafeArray(pSafeArray).GetMethodTable();
+
+ BASEARRAYREF pArrayRef = CreateArrayRefForSafeArray(pSafeArray, vt, pElemMT);
+ pComVariant->SetObjRef((OBJECTREF) pArrayRef);
+ MarshalArrayRefForSafeArray(pSafeArray, (BASEARRAYREF *) pComVariant->GetObjRefPtr(), vt, pElemMT);
+ }
+ else
+ {
+ pComVariant->SetObjRef(NULL);
+ }
+}
+
+void OleVariant::MarshalArrayVariantComToOle(VariantData *pComVariant,
+ VARIANT *pOleVariant)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pOleVariant));
+ PRECONDITION(CheckPointer(pComVariant));
+ }
+ CONTRACTL_END;
+
+ SafeArrayPtrHolder pSafeArray = NULL;
+ BASEARRAYREF *pArrayRef = (BASEARRAYREF *) pComVariant->GetObjRefPtr();
+ MethodTable *pElemMT = NULL;
+
+ _ASSERTE(pArrayRef);
+
+ VARTYPE vt = GetElementVarTypeForArrayRef(*pArrayRef);
+ if (vt == VT_ARRAY)
+ vt = VT_VARIANT;
+
+ pElemMT = GetArrayElementTypeWrapperAware(pArrayRef).GetMethodTable();
+
+ if (*pArrayRef != NULL)
+ {
+ pSafeArray = CreateSafeArrayForArrayRef(pArrayRef, vt, pElemMT);
+ MarshalSafeArrayForArrayRef(pArrayRef, pSafeArray, vt, pElemMT);
+ }
+ V_ARRAY(pOleVariant) = pSafeArray;
+ pSafeArray.SuppressRelease();
+}
+
+void OleVariant::MarshalArrayVariantOleRefToCom(VARIANT *pOleVariant,
+ VariantData *pComVariant)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pOleVariant));
+ PRECONDITION(CheckPointer(pComVariant));
+ }
+ CONTRACTL_END;
+
+ SAFEARRAY *pSafeArray = *V_ARRAYREF(pOleVariant);
+
+ VARTYPE vt = V_VT(pOleVariant) & ~(VT_ARRAY|VT_BYREF);
+
+ if (pSafeArray)
+ {
+ MethodTable *pElemMT = NULL;
+ if (vt == VT_RECORD)
+ pElemMT = GetElementTypeForRecordSafeArray(pSafeArray).GetMethodTable();
+
+ BASEARRAYREF pArrayRef = CreateArrayRefForSafeArray(pSafeArray, vt, pElemMT);
+ pComVariant->SetObjRef((OBJECTREF) pArrayRef);
+ MarshalArrayRefForSafeArray(pSafeArray, (BASEARRAYREF *) pComVariant->GetObjRefPtr(), vt, pElemMT);
+ }
+ else
+ {
+ pComVariant->SetObjRef(NULL);
+ }
+}
+#endif //FEATURE_CLASSIC_COMINTEROP
+
+
+/* ------------------------------------------------------------------------- *
+ * Error marshaling routines
+ * ------------------------------------------------------------------------- */
+
+void OleVariant::MarshalErrorVariantOleToCom(VARIANT *pOleVariant,
+ VariantData *pComVariant)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pOleVariant));
+ PRECONDITION(CheckPointer(pComVariant));
+ }
+ CONTRACTL_END;
+
+ // Check to see if the variant represents a missing argument.
+ if (V_ERROR(pOleVariant) == DISP_E_PARAMNOTFOUND)
+ {
+ pComVariant->SetType(CV_MISSING);
+ }
+ else
+ {
+ pComVariant->SetDataAsInt32(V_ERROR(pOleVariant));
+ }
+}
+
+void OleVariant::MarshalErrorVariantOleRefToCom(VARIANT *pOleVariant,
+ VariantData *pComVariant)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pOleVariant));
+ PRECONDITION(CheckPointer(pComVariant));
+ }
+ CONTRACTL_END;
+
+ // Check to see if the variant represents a missing argument.
+ if (*V_ERRORREF(pOleVariant) == DISP_E_PARAMNOTFOUND)
+ {
+ pComVariant->SetType(CV_MISSING);
+ }
+ else
+ {
+ pComVariant->SetDataAsInt32(*V_ERRORREF(pOleVariant));
+ }
+}
+
+void OleVariant::MarshalErrorVariantComToOle(VariantData *pComVariant,
+ VARIANT *pOleVariant)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pOleVariant));
+ PRECONDITION(CheckPointer(pComVariant));
+ }
+ CONTRACTL_END;
+
+ if (pComVariant->GetType() == CV_MISSING)
+ {
+ V_ERROR(pOleVariant) = DISP_E_PARAMNOTFOUND;
+ }
+ else
+ {
+ V_ERROR(pOleVariant) = pComVariant->GetDataAsInt32();
+ }
+}
+
+
+/* ------------------------------------------------------------------------- *
+ * Safearray allocation & conversion
+ * ------------------------------------------------------------------------- */
+
+//
+// CreateSafeArrayDescriptorForArrayRef creates a SAFEARRAY descriptor with the
+// appropriate type & dimensions for the given array ref. No memory is
+// allocated.
+//
+// This function is useful when you want to allocate the data specially using
+// a fixed buffer or pinning.
+//
+
+SAFEARRAY *OleVariant::CreateSafeArrayDescriptorForArrayRef(BASEARRAYREF *pArrayRef, VARTYPE vt,
+ MethodTable *pInterfaceMT)
+{
+ CONTRACT (SAFEARRAY*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pArrayRef));
+ PRECONDITION(!(vt & VT_ARRAY));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ ASSERT_PROTECTED(pArrayRef);
+
+ ULONG nElem = (*pArrayRef)->GetNumComponents();
+ ULONG nRank = (*pArrayRef)->GetRank();
+
+ SafeArrayPtrHolder pSafeArray = NULL;
+ SafeComHolder<ITypeInfo> pITI = NULL;
+ SafeComHolder<IRecordInfo> pRecInfo = NULL;
+
+ IfFailThrow(SafeArrayAllocDescriptorEx(vt, nRank, &pSafeArray));
+
+ switch (vt)
+ {
+ case VT_VARIANT:
+ {
+ // OleAut32.dll only sets FADF_HASVARTYPE, but VB says we also need to set
+ // the FADF_VARIANT bit for this safearray to destruct properly. OleAut32
+ // doesn't want to change their code unless there's a strong reason, since
+ // it's all "black magic" anyway.
+ pSafeArray->fFeatures |= FADF_VARIANT;
+ break;
+ }
+
+ case VT_BSTR:
+ {
+ pSafeArray->fFeatures |= FADF_BSTR;
+ break;
+ }
+
+ case VT_UNKNOWN:
+ {
+ pSafeArray->fFeatures |= FADF_UNKNOWN;
+ break;
+ }
+
+ case VT_DISPATCH:
+ {
+ pSafeArray->fFeatures |= FADF_DISPATCH;
+ break;
+ }
+
+ case VT_RECORD:
+ {
+ pSafeArray->fFeatures |= FADF_RECORD;
+ break;
+ }
+ }
+
+ //
+ // Fill in bounds
+ //
+
+ SAFEARRAYBOUND *bounds = pSafeArray->rgsabound;
+ SAFEARRAYBOUND *boundsEnd = bounds + nRank;
+ SIZE_T cElements;
+
+ if (!(*pArrayRef)->IsMultiDimArray())
+ {
+ bounds[0].cElements = nElem;
+ bounds[0].lLbound = 0;
+ cElements = nElem;
+ }
+ else
+ {
+ const INT32 *count = (*pArrayRef)->GetBoundsPtr() + nRank - 1;
+ const INT32 *lower = (*pArrayRef)->GetLowerBoundsPtr() + nRank - 1;
+
+ cElements = 1;
+ while (bounds < boundsEnd)
+ {
+ bounds->lLbound = *lower--;
+ bounds->cElements = *count--;
+ cElements *= bounds->cElements;
+ bounds++;
+ }
+ }
+
+ pSafeArray->cbElements = (unsigned)GetElementSizeForVarType(vt, pInterfaceMT);
+
+ // If the SAFEARRAY contains VT_RECORD's, then we need to set the
+ // IRecordInfo.
+ if (vt == VT_RECORD)
+ {
+ IfFailThrow(GetITypeInfoForEEClass(pInterfaceMT, &pITI));
+ IfFailThrow(GetRecordInfoFromTypeInfo(pITI, &pRecInfo));
+ IfFailThrow(SafeArraySetRecordInfo(pSafeArray, pRecInfo));
+ }
+
+ pSafeArray.SuppressRelease();
+ RETURN pSafeArray;
+}
+
+//
+// CreateSafeArrayDescriptorForArrayRef creates a SAFEARRAY with the appropriate
+// type & dimensions & data for the given array ref. The data is initialized to
+// zero if necessary for safe destruction.
+//
+
+SAFEARRAY *OleVariant::CreateSafeArrayForArrayRef(BASEARRAYREF *pArrayRef, VARTYPE vt,
+ MethodTable *pInterfaceMT)
+{
+ CONTRACT (SAFEARRAY*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pArrayRef));
+// PRECONDITION(CheckPointer(*pArrayRef));
+ PRECONDITION(vt != VT_EMPTY);
+ }
+ CONTRACT_END;
+ ASSERT_PROTECTED(pArrayRef);
+
+ // Validate that the type of the managed array is the expected type.
+ if (!IsValidArrayForSafeArrayElementType(pArrayRef, vt))
+ COMPlusThrow(kSafeArrayTypeMismatchException);
+
+ // For structs and interfaces, verify that the array is of the valid type.
+ if (vt == VT_RECORD || vt == VT_UNKNOWN || vt == VT_DISPATCH)
+ {
+ if (pInterfaceMT && !GetArrayElementTypeWrapperAware(pArrayRef).CanCastTo(TypeHandle(pInterfaceMT)))
+ COMPlusThrow(kSafeArrayTypeMismatchException);
+ }
+
+ SAFEARRAY *pSafeArray = CreateSafeArrayDescriptorForArrayRef(pArrayRef, vt, pInterfaceMT);
+
+ HRESULT hr = SafeArrayAllocData(pSafeArray);
+ if (FAILED(hr))
+ {
+ SafeArrayDestroy(pSafeArray);
+ COMPlusThrowHR(hr);
+ }
+
+ RETURN pSafeArray;
+}
+
+//
+// CreateArrayRefForSafeArray creates an array object with the same layout and type
+// as the given safearray. The variant type of the safearray must be passed in.
+// The underlying element method table may also be specified (or NULL may be passed in
+// to use the base class method table for the VARTYPE.
+//
+
+BASEARRAYREF OleVariant::CreateArrayRefForSafeArray(SAFEARRAY *pSafeArray, VARTYPE vt,
+ MethodTable *pElementMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pSafeArray));
+ PRECONDITION(vt != VT_EMPTY);
+ }
+ CONTRACTL_END;
+
+ TypeHandle arrayType;
+ INT32 *pAllocateArrayArgs;
+ int cAllocateArrayArgs;
+ int Rank;
+ VARTYPE SafeArrayVT;
+
+ // Validate that the type of the SAFEARRAY is the expected type.
+ if (SUCCEEDED(ClrSafeArrayGetVartype(pSafeArray, &SafeArrayVT)) && (SafeArrayVT != VT_EMPTY))
+ {
+ if ((SafeArrayVT != vt) &&
+ !(vt == VT_INT && SafeArrayVT == VT_I4) &&
+ !(vt == VT_UINT && SafeArrayVT == VT_UI4) &&
+ !(vt == VT_I4 && SafeArrayVT == VT_INT) &&
+ !(vt == VT_UI4 && SafeArrayVT == VT_UINT) &&
+ !(vt == VT_UNKNOWN && SafeArrayVT == VT_DISPATCH) &&
+ !(SafeArrayVT == VT_RECORD)) // Add this to allowed values as a VT_RECORD might represent a
+ // valuetype with a single field that we'll just treat as a primitive type if possible.
+ {
+ COMPlusThrow(kSafeArrayTypeMismatchException);
+ }
+ }
+ else
+ {
+ UINT ArrayElemSize = SafeArrayGetElemsize(pSafeArray);
+ if (ArrayElemSize != GetElementSizeForVarType(vt, NULL))
+ {
+ COMPlusThrow(kSafeArrayTypeMismatchException, IDS_EE_SAFEARRAYTYPEMISMATCH);
+ }
+ }
+
+ // Determine if the input SAFEARRAY can be converted to an SZARRAY.
+ if ((pSafeArray->cDims == 1) && (pSafeArray->rgsabound->lLbound == 0))
+ {
+ // The SAFEARRAY maps to an SZARRAY. For SZARRAY's AllocateArrayEx()
+ // expects the arguments to be a pointer to the cound of elements in the array
+ // and the size of the args must be set to 1.
+ Rank = 1;
+ cAllocateArrayArgs = 1;
+ pAllocateArrayArgs = (INT32 *) &pSafeArray->rgsabound[0].cElements;
+ }
+ else
+ {
+ // The SAFEARRAY maps to an general array. For general arrays AllocateArrayEx()
+ // expects the arguments to be composed of the lower bounds / element count pairs
+ // for each of the dimensions. We need to reverse the order that the lower bounds
+ // and element pairs are presented before we call AllocateArrayEx().
+ Rank = pSafeArray->cDims;
+ cAllocateArrayArgs = Rank * 2;
+ pAllocateArrayArgs = (INT32*)_alloca(sizeof(INT32) * Rank * 2);
+ INT32 * pBoundsPtr = pAllocateArrayArgs;
+
+ // Copy the lower bounds and count of elements for the dimensions. These
+ // need to copied in reverse order.
+ for (int i = Rank - 1; i >= 0; i--)
+ {
+ *pBoundsPtr++ = pSafeArray->rgsabound[i].lLbound;
+ *pBoundsPtr++ = pSafeArray->rgsabound[i].cElements;
+ }
+ }
+
+ // Retrieve the type of the array.
+ arrayType = GetArrayForVarType(vt, pElementMT, Rank);
+
+ // Allocate the array.
+ return (BASEARRAYREF) AllocateArrayEx(arrayType, pAllocateArrayArgs, cAllocateArrayArgs);
+}
+
+/* ------------------------------------------------------------------------- *
+ * Safearray marshaling
+ * ------------------------------------------------------------------------- */
+
+//
+// MarshalSafeArrayForArrayRef marshals the contents of the array ref into the given
+// safe array. It is assumed that the type & dimensions of the arrays are compatible.
+//
+void OleVariant::MarshalSafeArrayForArrayRef(BASEARRAYREF *pArrayRef,
+ SAFEARRAY *pSafeArray,
+ VARTYPE vt,
+ MethodTable *pInterfaceMT,
+ BOOL fSafeArrayIsValid /*= TRUE*/)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pSafeArray));
+ PRECONDITION(CheckPointer(pArrayRef));
+// PRECONDITION(CheckPointer(*pArrayRef));
+ PRECONDITION(vt != VT_EMPTY);
+ }
+ CONTRACTL_END;
+
+ ASSERT_PROTECTED(pArrayRef);
+
+ // Retrieve the size and number of components.
+ SIZE_T dwComponentSize = GetElementSizeForVarType(vt, pInterfaceMT);
+ SIZE_T dwNumComponents = (*pArrayRef)->GetNumComponents();
+ BASEARRAYREF Array = NULL;
+
+ GCPROTECT_BEGIN(Array)
+ {
+ // Retrieve the marshaler to use to convert the contents.
+ const Marshaler *marshal = GetMarshalerForVarType(vt, TRUE);
+
+ // If the array is an array of wrappers, then we need to extract the objects
+ // being wrapped and create an array of those.
+ BOOL bArrayOfInterfaceWrappers;
+ if (IsArrayOfWrappers(pArrayRef, &bArrayOfInterfaceWrappers))
+ {
+ Array = ExtractWrappedObjectsFromArray(pArrayRef);
+ }
+ else
+ {
+ Array = *pArrayRef;
+ }
+
+ if (marshal == NULL || marshal->ComToOleArray == NULL)
+ {
+ if (pSafeArray->cDims == 1)
+ {
+ // If the array is single dimensionnal then we can simply copy it over.
+ memcpyNoGCRefs(pSafeArray->pvData, Array->GetDataPtr(), dwNumComponents * dwComponentSize);
+ }
+ else
+ {
+ // Copy and transpose the data.
+ TransposeArrayData((BYTE*)pSafeArray->pvData, Array->GetDataPtr(), dwNumComponents, dwComponentSize, pSafeArray, FALSE);
+ }
+ }
+ else
+ {
+ {
+ PinningHandleHolder handle = GetAppDomain()->CreatePinningHandle((OBJECTREF)Array);
+
+ if (bArrayOfInterfaceWrappers)
+ {
+ _ASSERTE(vt == VT_UNKNOWN || vt == VT_DISPATCH);
+ // Signal to code:OleVariant::MarshalInterfaceArrayComToOleHelper that this was an array
+ // of UnknownWrapper or DispatchWrapper. It shall use a different logic and marshal each
+ // element according to its specific default interface.
+ pInterfaceMT = NULL;
+ }
+ marshal->ComToOleArray(&Array, pSafeArray->pvData, pInterfaceMT, TRUE, FALSE, fSafeArrayIsValid);
+ }
+
+ if (pSafeArray->cDims != 1)
+ {
+ // The array is multidimensionnal we need to transpose it.
+ TransposeArrayData((BYTE*)pSafeArray->pvData, (BYTE*)pSafeArray->pvData, dwNumComponents, dwComponentSize, pSafeArray, FALSE);
+ }
+ }
+ }
+ GCPROTECT_END();
+}
+
+//
+// MarshalArrayRefForSafeArray marshals the contents of the safe array into the given
+// array ref. It is assumed that the type & dimensions of the arrays are compatible.
+//
+
+void OleVariant::MarshalArrayRefForSafeArray(SAFEARRAY *pSafeArray,
+ BASEARRAYREF *pArrayRef,
+ VARTYPE vt,
+ MethodTable *pInterfaceMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pSafeArray));
+ PRECONDITION(CheckPointer(pArrayRef));
+ PRECONDITION(*pArrayRef != NULL);
+ PRECONDITION(vt != VT_EMPTY);
+ }
+ CONTRACTL_END;
+
+ ASSERT_PROTECTED(pArrayRef);
+
+ // Retrieve the number of components.
+ SIZE_T dwNumComponents = (*pArrayRef)->GetNumComponents();
+
+ // Retrieve the marshaler to use to convert the contents.
+ const Marshaler *marshal = GetMarshalerForVarType(vt, TRUE);
+
+ if (marshal == NULL || marshal->OleToComArray == NULL)
+ {
+ SIZE_T dwManagedComponentSize = (*pArrayRef)->GetComponentSize();
+
+#ifdef _DEBUG
+ {
+ // If we're blasting bits, this better be a primitive type. Currency is
+ // an I8 on managed & unmanaged, so it's good enough.
+ TypeHandle th = (*pArrayRef)->GetArrayElementTypeHandle();
+
+ if (!CorTypeInfo::IsPrimitiveType(th.GetInternalCorElementType()))
+ {
+ _ASSERTE(!strcmp(th.AsMethodTable()->GetDebugClassName(),
+ "System.Currency"));
+ }
+ }
+#endif
+ if (pSafeArray->cDims == 1)
+ {
+ // If the array is single dimensionnal then we can simply copy it over.
+ memcpyNoGCRefs((*pArrayRef)->GetDataPtr(), pSafeArray->pvData, dwNumComponents * dwManagedComponentSize);
+ }
+ else
+ {
+ // Copy and transpose the data.
+ TransposeArrayData((*pArrayRef)->GetDataPtr(), (BYTE*)pSafeArray->pvData, dwNumComponents, dwManagedComponentSize, pSafeArray, TRUE);
+ }
+ }
+ else
+ {
+ CQuickArray<BYTE> TmpArray;
+ BYTE* pSrcData = NULL;
+ SIZE_T dwNativeComponentSize = GetElementSizeForVarType(vt, pInterfaceMT);
+
+ if (pSafeArray->cDims != 1)
+ {
+ TmpArray.ReSizeThrows(dwNumComponents * dwNativeComponentSize);
+ pSrcData = TmpArray.Ptr();
+ TransposeArrayData(pSrcData, (BYTE*)pSafeArray->pvData, dwNumComponents, dwNativeComponentSize, pSafeArray, TRUE);
+ }
+ else
+ {
+ pSrcData = (BYTE*)pSafeArray->pvData;
+ }
+
+ PinningHandleHolder handle = GetAppDomain()->CreatePinningHandle((OBJECTREF)*pArrayRef);
+
+ marshal->OleToComArray(pSrcData, pArrayRef, pInterfaceMT);
+ }
+}
+
+void OleVariant::ConvertValueClassToVariant(OBJECTREF *pBoxedValueClass, VARIANT *pOleVariant)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pBoxedValueClass));
+ PRECONDITION(CheckPointer(pOleVariant));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ SafeComHolder<ITypeInfo> pTypeInfo = NULL;
+ RecordVariantHolder pRecHolder = pOleVariant;
+
+ BOOL bSuccess = FALSE;
+
+ // Initialize the OLE variant's VT_RECORD fields to NULL.
+ V_RECORDINFO(pRecHolder) = NULL;
+ V_RECORD(pRecHolder) = NULL;
+
+ // Retrieve the ITypeInfo for the value class.
+ MethodTable *pValueClassMT = (*pBoxedValueClass)->GetMethodTable();
+ IfFailThrow(GetITypeInfoForEEClass(pValueClassMT, &pTypeInfo, TRUE, TRUE, 0));
+
+ // Convert the ITypeInfo to an IRecordInfo.
+ hr = GetRecordInfoFromTypeInfo(pTypeInfo, &V_RECORDINFO(pRecHolder));
+ if (FAILED(hr))
+ {
+ // An HRESULT of TYPE_E_UNSUPFORMAT really means that the struct contains
+ // fields that aren't supported inside a OLEAUT record.
+ if (TYPE_E_UNSUPFORMAT == hr)
+ COMPlusThrow(kArgumentException, IDS_EE_RECORD_NON_SUPPORTED_FIELDS);
+ else
+ COMPlusThrowHR(hr);
+ }
+
+ // Allocate an instance of the record.
+ V_RECORD(pRecHolder) = V_RECORDINFO(pRecHolder)->RecordCreate();
+ IfNullThrow(V_RECORD(pRecHolder));
+
+ // Marshal the contents of the value class into the record.
+ FmtClassUpdateNative(pBoxedValueClass, (BYTE*)V_RECORD(pRecHolder), NULL);
+
+ pRecHolder.SuppressRelease();
+}
+
+void OleVariant::TransposeArrayData(BYTE *pDestData, BYTE *pSrcData, SIZE_T dwNumComponents, SIZE_T dwComponentSize, SAFEARRAY *pSafeArray, BOOL bSafeArrayToMngArray)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pDestData));
+ PRECONDITION(CheckPointer(pSrcData));
+ PRECONDITION(CheckPointer(pSafeArray));
+ }
+ CONTRACTL_END;
+
+ int iDims;
+ DWORD *aDestElemCount = (DWORD*)_alloca(pSafeArray->cDims * sizeof(DWORD));
+ DWORD *aDestIndex = (DWORD*)_alloca(pSafeArray->cDims * sizeof(DWORD));
+ BYTE **aDestDataPos = (BYTE **)_alloca(pSafeArray->cDims * sizeof(BYTE *));
+ SIZE_T *aDestDelta = (SIZE_T*)_alloca(pSafeArray->cDims * sizeof(SIZE_T));
+ CQuickArray<BYTE> TmpArray;
+
+ // If there are no components, then there we are done.
+ if (dwNumComponents == 0)
+ return;
+
+ // Check to see if we are transposing in place or copying and transposing.
+ if (pSrcData == pDestData)
+ {
+ TmpArray.ReSizeThrows(dwNumComponents * dwComponentSize);
+ memcpyNoGCRefs(TmpArray.Ptr(), pSrcData, dwNumComponents * dwComponentSize);
+ pSrcData = TmpArray.Ptr();
+ }
+
+ // Copy the element count in reverse order if we are copying from a safe array to
+ // a managed array and in direct order otherwise.
+ if (bSafeArrayToMngArray)
+ {
+ for (iDims = 0; iDims < pSafeArray->cDims; iDims++)
+ aDestElemCount[iDims] = pSafeArray->rgsabound[pSafeArray->cDims - iDims - 1].cElements;
+ }
+ else
+ {
+ for (iDims = 0; iDims < pSafeArray->cDims; iDims++)
+ aDestElemCount[iDims] = pSafeArray->rgsabound[iDims].cElements;
+ }
+
+ // Initalize the indexes for each dimension to 0.
+ memset(aDestIndex, 0, pSafeArray->cDims * sizeof(int));
+
+ // Set all the destination data positions to the start of the array.
+ for (iDims = 0; iDims < pSafeArray->cDims; iDims++)
+ aDestDataPos[iDims] = (BYTE*)pDestData;
+
+ // Calculate the destination delta for each of the dimensions.
+ aDestDelta[pSafeArray->cDims - 1] = dwComponentSize;
+ for (iDims = pSafeArray->cDims - 2; iDims >= 0; iDims--)
+ aDestDelta[iDims] = aDestDelta[iDims + 1] * aDestElemCount[iDims + 1];
+
+ // Calculate the source data end pointer.
+ BYTE *pSrcDataEnd = pSrcData + dwNumComponents * dwComponentSize;
+ _ASSERTE(pDestData < pSrcData || pDestData >= pSrcDataEnd);
+
+ // Copy and transpose the data.
+ while (TRUE)
+ {
+ // Copy one component.
+ memcpyNoGCRefs(aDestDataPos[0], pSrcData, dwComponentSize);
+
+ // Update the source position.
+ pSrcData += dwComponentSize;
+
+ // Check to see if we have reached the end of the array.
+ if (pSrcData >= pSrcDataEnd)
+ break;
+
+ // Update the destination position.
+ for (iDims = 0; aDestIndex[iDims] >= aDestElemCount[iDims] - 1; iDims++);
+
+ _ASSERTE(iDims < pSafeArray->cDims);
+
+ aDestIndex[iDims]++;
+ aDestDataPos[iDims] += aDestDelta[iDims];
+ for (--iDims; iDims >= 0; iDims--)
+ {
+ aDestIndex[iDims] = 0;
+ aDestDataPos[iDims] = aDestDataPos[iDims + 1];
+ }
+ }
+}
+
+BOOL OleVariant::IsArrayOfWrappers(BASEARRAYREF *pArray, BOOL *pbOfInterfaceWrappers)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ TypeHandle hndElemType = (*pArray)->GetArrayElementTypeHandle();
+
+ if (!hndElemType.IsTypeDesc())
+ {
+ if (hndElemType == TypeHandle(MscorlibBinder::GetClass(CLASS__DISPATCH_WRAPPER)) ||
+ hndElemType == TypeHandle(MscorlibBinder::GetClass(CLASS__UNKNOWN_WRAPPER)))
+ {
+ *pbOfInterfaceWrappers = TRUE;
+ return TRUE;
+ }
+
+ if (hndElemType == TypeHandle(MscorlibBinder::GetClass(CLASS__ERROR_WRAPPER)) ||
+ hndElemType == TypeHandle(MscorlibBinder::GetClass(CLASS__CURRENCY_WRAPPER)) ||
+ hndElemType == TypeHandle(MscorlibBinder::GetClass(CLASS__BSTR_WRAPPER)))
+ {
+ *pbOfInterfaceWrappers = FALSE;
+ return TRUE;
+ }
+ }
+
+ *pbOfInterfaceWrappers = FALSE;
+ return FALSE;
+}
+
+BASEARRAYREF OleVariant::ExtractWrappedObjectsFromArray(BASEARRAYREF *pArray)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pArray));
+ }
+ CONTRACTL_END;
+
+ TypeHandle hndWrapperType = (*pArray)->GetArrayElementTypeHandle();
+ TypeHandle hndElemType;
+ TypeHandle hndArrayType;
+ BOOL bIsMDArray = (*pArray)->IsMultiDimArray();
+ unsigned rank = (*pArray)->GetRank();
+ BASEARRAYREF RetArray = NULL;
+
+ // Retrieve the element type handle for the array to create.
+ if (hndWrapperType == TypeHandle(MscorlibBinder::GetClass(CLASS__DISPATCH_WRAPPER)))
+ hndElemType = TypeHandle(g_pObjectClass);
+
+ else if (hndWrapperType == TypeHandle(MscorlibBinder::GetClass(CLASS__UNKNOWN_WRAPPER)))
+ hndElemType = TypeHandle(g_pObjectClass);
+
+ else if (hndWrapperType == TypeHandle(MscorlibBinder::GetClass(CLASS__BSTR_WRAPPER)))
+ hndElemType = TypeHandle(g_pStringClass);
+
+ else if (hndWrapperType == TypeHandle(MscorlibBinder::GetClass(CLASS__ERROR_WRAPPER)))
+ hndElemType = TypeHandle(MscorlibBinder::GetClass(CLASS__INT32));
+
+ else if (hndWrapperType == TypeHandle(MscorlibBinder::GetClass(CLASS__CURRENCY_WRAPPER)))
+ hndElemType = TypeHandle(MscorlibBinder::GetClass(CLASS__DECIMAL));
+
+ else
+ _ASSERTE(!"Invalid wrapper type");
+
+ // Retrieve the type handle that represents the array.
+ if (bIsMDArray)
+ {
+ hndArrayType = ClassLoader::LoadArrayTypeThrowing(hndElemType, ELEMENT_TYPE_ARRAY, rank);
+ }
+ else
+ {
+ hndArrayType = ClassLoader::LoadArrayTypeThrowing(hndElemType, ELEMENT_TYPE_SZARRAY);
+ }
+ _ASSERTE(!hndArrayType.IsNull());
+
+ // Set up the bounds arguments.
+ DWORD numArgs = rank*2;
+ INT32* args = (INT32*) _alloca(sizeof(INT32)*numArgs);
+
+ if (bIsMDArray)
+ {
+ const INT32* bounds = (*pArray)->GetBoundsPtr();
+ const INT32* lowerBounds = (*pArray)->GetLowerBoundsPtr();
+ for(unsigned int i=0; i < rank; i++)
+ {
+ args[2*i] = lowerBounds[i];
+ args[2*i+1] = bounds[i];
+ }
+ }
+ else
+ {
+ numArgs = 1;
+ args[0] = (*pArray)->GetNumComponents();
+ }
+
+ // Extract the values from the source array and copy them into the destination array.
+ BASEARRAYREF DestArray = (BASEARRAYREF)AllocateArrayEx(hndArrayType, args, numArgs);
+ GCPROTECT_BEGIN(DestArray)
+ {
+ SIZE_T NumComponents = (*pArray)->GetNumComponents();
+ AppDomain *pDomain = DestArray->GetAppDomain();
+
+ if (hndWrapperType == TypeHandle(MscorlibBinder::GetClass(CLASS__DISPATCH_WRAPPER)))
+ {
+ DISPATCHWRAPPEROBJECTREF *pSrc = (DISPATCHWRAPPEROBJECTREF *)(*pArray)->GetDataPtr();
+ DISPATCHWRAPPEROBJECTREF *pSrcEnd = pSrc + NumComponents;
+ OBJECTREF *pDest = (OBJECTREF *)DestArray->GetDataPtr();
+ for (; pSrc < pSrcEnd; pSrc++, pDest++)
+ SetObjectReference(pDest, (*pSrc) != NULL ? (*pSrc)->GetWrappedObject() : NULL, pDomain);
+ }
+ else if (hndWrapperType == TypeHandle(MscorlibBinder::GetClass(CLASS__UNKNOWN_WRAPPER)))
+ {
+ UNKNOWNWRAPPEROBJECTREF *pSrc = (UNKNOWNWRAPPEROBJECTREF *)(*pArray)->GetDataPtr();
+ UNKNOWNWRAPPEROBJECTREF *pSrcEnd = pSrc + NumComponents;
+ OBJECTREF *pDest = (OBJECTREF *)DestArray->GetDataPtr();
+ for (; pSrc < pSrcEnd; pSrc++, pDest++)
+ SetObjectReference(pDest, (*pSrc) != NULL ? (*pSrc)->GetWrappedObject() : NULL, pDomain);
+ }
+ else if (hndWrapperType == TypeHandle(MscorlibBinder::GetClass(CLASS__ERROR_WRAPPER)))
+ {
+ ERRORWRAPPEROBJECTREF *pSrc = (ERRORWRAPPEROBJECTREF *)(*pArray)->GetDataPtr();
+ ERRORWRAPPEROBJECTREF *pSrcEnd = pSrc + NumComponents;
+ INT32 *pDest = (INT32 *)DestArray->GetDataPtr();
+ for (; pSrc < pSrcEnd; pSrc++, pDest++)
+ *pDest = (*pSrc) != NULL ? (*pSrc)->GetErrorCode() : NULL;
+ }
+ else if (hndWrapperType == TypeHandle(MscorlibBinder::GetClass(CLASS__CURRENCY_WRAPPER)))
+ {
+ CURRENCYWRAPPEROBJECTREF *pSrc = (CURRENCYWRAPPEROBJECTREF *)(*pArray)->GetDataPtr();
+ CURRENCYWRAPPEROBJECTREF *pSrcEnd = pSrc + NumComponents;
+ DECIMAL *pDest = (DECIMAL *)DestArray->GetDataPtr();
+ for (; pSrc < pSrcEnd; pSrc++, pDest++)
+ {
+ if (*pSrc != NULL)
+ memcpyNoGCRefs(pDest, &(*pSrc)->GetWrappedObject(), sizeof(DECIMAL));
+ else
+ memset(pDest, 0, sizeof(DECIMAL));
+ }
+ }
+ else if (hndWrapperType == TypeHandle(MscorlibBinder::GetClass(CLASS__BSTR_WRAPPER)))
+ {
+ BSTRWRAPPEROBJECTREF *pSrc = (BSTRWRAPPEROBJECTREF *)(*pArray)->GetDataPtr();
+ BSTRWRAPPEROBJECTREF *pSrcEnd = pSrc + NumComponents;
+ OBJECTREF *pDest = (OBJECTREF *)DestArray->GetDataPtr();
+ for (; pSrc < pSrcEnd; pSrc++, pDest++)
+ SetObjectReference(pDest, (*pSrc) != NULL ? (*pSrc)->GetWrappedObject() : NULL, pDomain);
+ }
+ else
+ {
+ _ASSERTE(!"Invalid wrapper type");
+ }
+
+ // GCPROTECT_END() will wack NewArray so we need to copy the OBJECTREF into
+ // a temp to be able to return it.
+ RetArray = DestArray;
+ }
+ GCPROTECT_END();
+
+ return RetArray;
+}
+
+TypeHandle OleVariant::GetWrappedArrayElementType(BASEARRAYREF *pArray)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pArray));
+ }
+ CONTRACTL_END;
+
+ TypeHandle hndWrapperType = (*pArray)->GetArrayElementTypeHandle();
+ TypeHandle pWrappedObjType;
+
+ if (hndWrapperType == TypeHandle(MscorlibBinder::GetClass(CLASS__DISPATCH_WRAPPER)) ||
+ hndWrapperType == TypeHandle(MscorlibBinder::GetClass(CLASS__UNKNOWN_WRAPPER)))
+ {
+ // There's no need to traverse the array up front. We'll use the default interface
+ // for each element in code:OleVariant::MarshalInterfaceArrayComToOleHelper.
+ pWrappedObjType = TypeHandle(g_pObjectClass);
+ }
+ else if (hndWrapperType == TypeHandle(MscorlibBinder::GetClass(CLASS__ERROR_WRAPPER)))
+ {
+ pWrappedObjType = TypeHandle(MscorlibBinder::GetClass(CLASS__INT32));
+ }
+ else if (hndWrapperType == TypeHandle(MscorlibBinder::GetClass(CLASS__CURRENCY_WRAPPER)))
+ {
+ pWrappedObjType = TypeHandle(MscorlibBinder::GetClass(CLASS__DECIMAL));
+ }
+ else if (hndWrapperType == TypeHandle(MscorlibBinder::GetClass(CLASS__BSTR_WRAPPER)))
+ {
+ pWrappedObjType = TypeHandle(g_pStringClass);
+ }
+ else
+ {
+ _ASSERTE(!"Invalid wrapper type");
+ }
+
+ return pWrappedObjType;
+}
+
+
+TypeHandle OleVariant::GetArrayElementTypeWrapperAware(BASEARRAYREF *pArray)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pArray));
+ }
+ CONTRACTL_END;
+
+ BOOL bArrayOfInterfaceWrappers;
+ if (IsArrayOfWrappers(pArray, &bArrayOfInterfaceWrappers))
+ {
+ return GetWrappedArrayElementType(pArray);
+ }
+ else
+ {
+ return (*pArray)->GetArrayElementTypeHandle();
+ }
+}
+
+#ifdef FEATURE_CLASSIC_COMINTEROP
+TypeHandle OleVariant::GetElementTypeForRecordSafeArray(SAFEARRAY* pSafeArray)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pSafeArray));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ SafeComHolder<IRecordInfo> pRecInfo = NULL;
+
+ GUID guid;
+ IfFailThrow(SafeArrayGetRecordInfo(pSafeArray, &pRecInfo));
+ IfFailThrow(pRecInfo->GetGuid(&guid));
+ MethodTable *pValueClass = GetValueTypeForGUID(guid);
+ if (!pValueClass)
+ COMPlusThrow(kArgumentException, IDS_EE_CANNOT_MAP_TO_MANAGED_VC);
+
+ return TypeHandle(pValueClass);
+}
+#endif //FEATURE_CLASSIC_COMINTEROP
+
+void OleVariant::AllocateEmptyStringForBSTR(BSTR bstr, STRINGREF *pStringObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(bstr));
+ PRECONDITION(CheckPointer(pStringObj));
+ }
+ CONTRACTL_END;
+
+ // The BSTR isn't null so allocate a managed string of the appropriate length.
+ ULONG length = SysStringByteLen(bstr);
+
+ if (length > MAX_SIZE_FOR_INTEROP)
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_STRING_TOOLONG);
+
+ // Check to see if the BSTR has trailing odd byte.
+ BOOL bHasTrailByte = ((length%sizeof(WCHAR)) != 0);
+ length = length / sizeof(WCHAR);
+ SetObjectReference((OBJECTREF*)pStringObj, (OBJECTREF)StringObject::NewString(length, bHasTrailByte), GetAppDomain());
+}
+
+void OleVariant::ConvertContentsBSTRToString(BSTR bstr, STRINGREF *pStringObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(bstr));
+ PRECONDITION(CheckPointer(pStringObj));
+ }
+ CONTRACTL_END;
+
+ // this is the right thing to do, but sometimes we
+ // end up thinking we're marshaling a BSTR when we're not, because
+ // it's the default type.
+ ULONG length = SysStringByteLen((BSTR)bstr);
+ if (length > MAX_SIZE_FOR_INTEROP)
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_STRING_TOOLONG);
+
+ ULONG charLength = length/sizeof(WCHAR);
+ BOOL hasTrailByte = (length%sizeof(WCHAR) != 0);
+
+ memcpyNoGCRefs((*pStringObj)->GetBuffer(), bstr, charLength*sizeof(WCHAR));
+
+ if (hasTrailByte)
+ {
+ BYTE* buff = (BYTE*)bstr;
+ //set the trail byte
+ (*pStringObj)->SetTrailByte(buff[length-1]);
+ }
+
+ // null terminate the StringRef
+ WCHAR* wstr = (WCHAR *)(*pStringObj)->GetBuffer();
+ wstr[charLength] = '\0';
+}
+
+void OleVariant::ConvertBSTRToString(BSTR bstr, STRINGREF *pStringObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(bstr, NULL_OK));
+ PRECONDITION(CheckPointer(pStringObj));
+ }
+ CONTRACTL_END;
+
+ // Initialize the output string object to null to start.
+ *pStringObj = NULL;
+
+ // If the BSTR is null then we leave the output string object set to null.
+ if (bstr == NULL)
+ return;
+
+ AllocateEmptyStringForBSTR(bstr, pStringObj);
+ ConvertContentsBSTRToString(bstr, pStringObj);
+}
+
+BSTR OleVariant::AllocateEmptyBSTRForString(STRINGREF *pStringObj)
+{
+ CONTRACT(BSTR)
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pStringObj));
+ PRECONDITION(*pStringObj != NULL);
+ POSTCONDITION(RETVAL != NULL);
+ }
+ CONTRACT_END;
+
+ ULONG length = (*pStringObj)->GetStringLength();
+ if (length > MAX_SIZE_FOR_INTEROP)
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_STRING_TOOLONG);
+
+ length = length*sizeof(WCHAR);
+ if ((*pStringObj)->HasTrailByte())
+ {
+ length += 1;
+ }
+ BSTR bstr = SysAllocStringByteLen(NULL, length);
+ if (bstr == NULL)
+ ThrowOutOfMemory();
+
+ RETURN bstr;
+}
+
+void OleVariant::ConvertContentsStringToBSTR(STRINGREF *pStringObj, BSTR bstr)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pStringObj));
+ PRECONDITION(*pStringObj != NULL);
+ PRECONDITION(CheckPointer(bstr));
+ }
+ CONTRACTL_END;
+
+ DWORD length = (DWORD)(*pStringObj)->GetStringLength();
+ if (length > MAX_SIZE_FOR_INTEROP)
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_STRING_TOOLONG);
+
+ BYTE *buff = (BYTE*)bstr;
+ ULONG byteLen = length * sizeof(WCHAR);
+
+ memcpyNoGCRefs(bstr, (*pStringObj)->GetBuffer(), byteLen);
+
+ if ((*pStringObj)->HasTrailByte())
+ {
+ BYTE b;
+ BOOL hasTrailB;
+ hasTrailB = (*pStringObj)->GetTrailByte(&b);
+ _ASSERTE(hasTrailB);
+ buff[byteLen] = b;
+ }
+ else
+ {
+ // copy the null terminator
+ bstr[length] = W('\0');
+ }
+}
+
+BSTR OleVariant::ConvertStringToBSTR(STRINGREF *pStringObj)
+{
+ CONTRACT(BSTR)
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pStringObj));
+
+ // A null BSTR should only be returned if the input string is null.
+ POSTCONDITION(RETVAL != NULL || *pStringObj == NULL);
+}
+ CONTRACT_END;
+
+ // Initiatilize the return BSTR value to null.
+ BSTR bstr = NULL;
+
+ // If the string object isn't null then we convert it to a BSTR. Otherwise we will return null.
+ if (*pStringObj != NULL)
+ {
+ bstr = AllocateEmptyBSTRForString(pStringObj);
+ ConvertContentsStringToBSTR(pStringObj, bstr);
+ }
+
+ RETURN bstr;
+}
+#endif // FEATURE_COMINTEROP
+
+#endif // CROSSGEN_COMPILE
diff --git a/src/vm/olevariant.h b/src/vm/olevariant.h
new file mode 100644
index 0000000000..debaf15817
--- /dev/null
+++ b/src/vm/olevariant.h
@@ -0,0 +1,606 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: OleVariant.h
+//
+
+//
+
+
+#ifndef _H_OLEVARIANT_
+#define _H_OLEVARIANT_
+
+
+// The COM interop native array marshaler is built on top of VT_* types.
+// The P/Invoke marshaler supports marshaling to WINBOOL's and ANSICHAR's.
+// This is an annoying workaround to shoehorn these non-OleAut types into
+// the COM interop marshaler.
+#define VTHACK_INSPECTABLE 247
+#define VTHACK_HSTRING 248
+#define VTHACK_REDIRECTEDTYPE 249
+#define VTHACK_CBOOL 250
+#define VTHACK_NONBLITTABLERECORD 251
+#define VTHACK_BLITTABLERECORD 252
+#define VTHACK_ANSICHAR 253
+#define VTHACK_WINBOOL 254
+
+
+//These types must be kept in sync with the CorElementTypes defined in cor.h
+//NOTE: If you add values to this enum you need to look at COMOAVariant.cpp. There is
+// a mapping between CV type and VT types found there.
+//NOTE: This is also found in a table in OleVariant.cpp.
+//NOTE: These are also found in Variant.cs
+typedef enum
+{
+ CV_EMPTY = 0x0, // CV_EMPTY
+ CV_VOID = ELEMENT_TYPE_VOID,
+ CV_BOOLEAN = ELEMENT_TYPE_BOOLEAN,
+ CV_CHAR = ELEMENT_TYPE_CHAR,
+ CV_I1 = ELEMENT_TYPE_I1,
+ CV_U1 = ELEMENT_TYPE_U1,
+ CV_I2 = ELEMENT_TYPE_I2,
+ CV_U2 = ELEMENT_TYPE_U2,
+ CV_I4 = ELEMENT_TYPE_I4,
+ CV_U4 = ELEMENT_TYPE_U4,
+ CV_I8 = ELEMENT_TYPE_I8,
+ CV_U8 = ELEMENT_TYPE_U8,
+ CV_R4 = ELEMENT_TYPE_R4,
+ CV_R8 = ELEMENT_TYPE_R8,
+ CV_STRING = ELEMENT_TYPE_STRING,
+
+ // For the rest, we map directly if it is defined in CorHdr.h and fill
+ // in holes for the rest.
+ CV_PTR = ELEMENT_TYPE_PTR,
+ CV_DATETIME = 0x10, // ELEMENT_TYPE_BYREF
+ CV_TIMESPAN = 0x11, // ELEMENT_TYPE_VALUETYPE
+ CV_OBJECT = ELEMENT_TYPE_CLASS,
+ CV_DECIMAL = 0x13, // ELEMENT_TYPE_UNUSED1
+ CV_CURRENCY = 0x14, // ELEMENT_TYPE_ARRAY
+ CV_ENUM = 0x15, //
+ CV_MISSING = 0x16, //
+ CV_NULL = 0x17, //
+ CV_LAST = 0x18, //
+} CVTypes;
+
+//Mapping from CVType to type handle. Used for conversion between the two internally.
+extern const BinderClassID CVTypeToBinderClassID[];
+
+inline TypeHandle GetTypeHandleForCVType(CVTypes elemType)
+{
+ CONTRACT (TypeHandle)
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ PRECONDITION(elemType < CV_LAST);
+ }
+ CONTRACT_END;
+
+ RETURN TypeHandle(MscorlibBinder::GetClass(CVTypeToBinderClassID[elemType]));
+}
+
+// Use this very carefully. There is not a direct mapping between
+// CorElementType and CVTypes for a bunch of things. In this case
+// we return CV_LAST. You need to check this at the call site.
+extern CVTypes CorElementTypeToCVTypes(CorElementType type);
+
+
+#ifdef FEATURE_COMINTEROP
+
+#include <pshpack1.h>
+
+
+/*** Variant Design Restrictions (ie, decisions we've had to re-do differently):
+ 1) A Variant containing all zeros should be a valid Variant of type empty.
+ 2) Variant must contain an OBJECTREF field for Objects, etc. Since we
+ have no way of expressing a union between an OBJECTREF and an int, we
+ always box Decimals in a Variant.
+ 3) The m_type field is not a CVType and will contain extra bits. People
+ should use VariantData::GetType() to get the CVType.
+ 4) You should use SetObjRef and GetObjRef to manipulate the OBJECTREF field.
+ These will handle write barriers correctly, as well as CV_EMPTY.
+
+
+ Empty, Missing & Null:
+ Variants of type CV_EMPTY will be all zero's. This forces us to add in
+ special cases for all functions that convert a Variant into an object (such
+ as copying a Variant into an Object[]).
+
+ Variants of type Missing and Null will have their objectref field set to
+ Missing.Value and Null.Value respectively. This simplifies the code in
+ Variant.cs and strewn throughout the EE.
+*/
+
+#define VARIANT_TYPE_MASK 0xFFFF
+#define VT_MASK 0xFF000000
+#define VT_BITSHIFT 24
+
+struct VariantData
+{
+public:
+ static void NewVariant(VariantData * const& dest, const CVTypes type, INT64 data
+ DEBUG_ARG(BOOL bDestIsInterior = FALSE));
+
+ FORCEINLINE CVTypes GetType() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (CVTypes)(m_type & VARIANT_TYPE_MASK);
+ }
+
+ FORCEINLINE void SetType(INT32 in)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_type = in;
+ }
+
+ FORCEINLINE VARTYPE GetVT() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ VARTYPE vt = (m_type & VT_MASK) >> VT_BITSHIFT;
+ if (vt & 0x80)
+ {
+ vt &= ~0x80;
+ vt |= VT_ARRAY;
+ }
+ return vt;
+ }
+
+ FORCEINLINE void SetVT(VARTYPE vt)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION( !(vt & VT_BYREF) );
+ PRECONDITION( (vt & ~VT_ARRAY) < 128 );
+ }
+ CONTRACTL_END;
+
+ if (vt & VT_ARRAY)
+ {
+ vt &= ~VT_ARRAY;
+ vt |= 0x80;
+ }
+ m_type = (m_type & ~((INT32)VT_MASK)) | (vt << VT_BITSHIFT);
+ }
+
+
+ FORCEINLINE OBJECTREF GetObjRef() const
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return (OBJECTREF)m_or;
+ }
+
+ OBJECTREF* GetObjRefPtr()
+ {
+ CONTRACT (OBJECTREF*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ RETURN (OBJECTREF*)&m_or;
+ }
+
+ void SetObjRef(OBJECTREF objRef)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (objRef!=NULL)
+ {
+ SetObjectReferenceUnchecked((OBJECTREF*)&m_or, objRef);
+ }
+ else
+ {
+ // Casting trick to avoid going thru overloaded operator= (which
+ // in this case would trigger a false write barrier violation assert.)
+ *(LPVOID*)(OBJECTREF*)&m_or=NULL;
+ }
+ }
+
+ FORCEINLINE void* GetData() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (void *)(&m_data);
+ }
+
+ FORCEINLINE INT8 GetDataAsInt8() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (INT8)m_data;
+ }
+
+ FORCEINLINE UINT8 GetDataAsUInt8() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (UINT8)m_data;
+ }
+
+ FORCEINLINE INT16 GetDataAsInt16() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (INT16)m_data;
+ }
+
+ FORCEINLINE UINT16 GetDataAsUInt16() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (UINT16)m_data;
+ }
+
+ FORCEINLINE INT32 GetDataAsInt32() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (INT32)m_data;
+ }
+
+ FORCEINLINE UINT32 GetDataAsUInt32() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (UINT32)m_data;
+ }
+
+ FORCEINLINE INT64 GetDataAsInt64() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (INT64)m_data;
+ }
+
+ FORCEINLINE UINT64 GetDataAsUInt64() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (UINT64)m_data;
+ }
+
+ FORCEINLINE void SetData(void *in)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (!in)
+ m_data=0;
+ else
+ m_data = *(INT64 *)in;
+ }
+
+ // When possible, please use the most specific SetDataAsXxx function.
+ // This is necessary to guarantee we do sign extension correctly
+ // for all types smaller than 32 bits. R4's, R8's, U8's, DateTimes,
+ // Currencies, and TimeSpans can all be treated as ints of the appropriate
+ // size - sign extension is irrelevant in those cases.
+ FORCEINLINE void SetDataAsInt8(INT8 data)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_data=data;
+ }
+
+ FORCEINLINE void SetDataAsUInt8(UINT8 data)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_data=data;
+ }
+
+ FORCEINLINE void SetDataAsInt16(INT16 data)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_data=data;
+ }
+
+ FORCEINLINE void SetDataAsUInt16(UINT16 data)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_data=data;
+ }
+
+ FORCEINLINE void SetDataAsInt32(INT32 data)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_data=data;
+ }
+
+ FORCEINLINE void SetDataAsUInt32(UINT32 data)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_data=data;
+ }
+
+ FORCEINLINE void SetDataAsInt64(INT64 data)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_data=data;
+ }
+
+private:
+ Object* m_or;
+ INT64 m_data;
+ INT32 m_type;
+};
+
+#include <poppack.h>
+
+#endif // FEATURE_COMINTEROP
+
+
+class OleVariant
+{
+ public:
+
+#ifdef FEATURE_COMINTEROP
+ // New variant conversion
+ static void MarshalOleVariantForObject(OBJECTREF * const & pObj, VARIANT *pOle);
+ static void MarshalObjectForOleVariant(const VARIANT *pOle, OBJECTREF * const & pObj);
+ static void MarshalOleRefVariantForObject(OBJECTREF *pObj, VARIANT *pOle);
+
+ // Helper functions to convert BSTR to managed strings.
+ static void AllocateEmptyStringForBSTR(BSTR bstr, STRINGREF *pStringObj);
+ static void ConvertContentsBSTRToString(BSTR bstr, STRINGREF *pStringObj);
+ static void ConvertBSTRToString(BSTR bstr, STRINGREF *pStringObj);
+
+ // Helper functions to convert managed strings to BSTRs.
+ static BSTR AllocateEmptyBSTRForString(STRINGREF *pStringObj);
+ static void ConvertContentsStringToBSTR(STRINGREF *pStringObj, BSTR bstr);
+ static BSTR ConvertStringToBSTR(STRINGREF *pStringObj);
+ static void MarshalComVariantForOleVariant(VARIANT *pOle, VariantData *pCom);
+ static void MarshalOleVariantForComVariant(VariantData *pCom, VARIANT *pOle);
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_COMINTEROP
+ // Safearray conversion
+
+ static SAFEARRAY* CreateSafeArrayDescriptorForArrayRef(BASEARRAYREF* pArrayRef, VARTYPE vt,
+ MethodTable* pInterfaceMT = NULL);
+
+ static SAFEARRAY* CreateSafeArrayForArrayRef(BASEARRAYREF* pArrayRef, VARTYPE vt,
+ MethodTable* pInterfaceMT = NULL);
+
+ static BASEARRAYREF CreateArrayRefForSafeArray(SAFEARRAY* pSafeArray, VARTYPE vt,
+ MethodTable* pElementMT);
+
+ static void MarshalSafeArrayForArrayRef(BASEARRAYREF* pArrayRef,
+ SAFEARRAY* pSafeArray,
+ VARTYPE vt,
+ MethodTable* pInterfaceMT,
+ BOOL fSafeArrayIsValid = TRUE);
+
+ static void MarshalArrayRefForSafeArray(SAFEARRAY* pSafeArray,
+ BASEARRAYREF* pArrayRef,
+ VARTYPE vt,
+ MethodTable* pInterfaceMT);
+
+ // Helper function to convert a boxed value class to an OLE variant.
+ static void ConvertValueClassToVariant(OBJECTREF *pBoxedValueClass, VARIANT *pOleVariant);
+
+ // Helper function to transpose the data in a multidimensionnal array.
+ static void TransposeArrayData(BYTE *pDestData, BYTE *pSrcData, SIZE_T dwNumComponents, SIZE_T dwComponentSize, SAFEARRAY *pSafeArray, BOOL bSafeArrayToMngArray);
+
+ // Helper to determine if an array is an array of wrappers.
+ static BOOL IsArrayOfWrappers(BASEARRAYREF *pArray, BOOL *pbOfInterfaceWrappers);
+
+ // Helper to extract the wrapped objects from an array.
+ static BASEARRAYREF ExtractWrappedObjectsFromArray(BASEARRAYREF *pArray);
+
+ static HRESULT ClearAndInsertContentsIntoByrefRecordVariant(VARIANT* pOle, OBJECTREF* pObj);
+
+ static BOOL IsValidArrayForSafeArrayElementType(BASEARRAYREF* pArrayRef, VARTYPE vtExpected);
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_COMINTEROP
+ static BOOL CheckVariant(VARIANT *pOle);
+
+ // Type conversion utilities
+ static void ExtractContentsFromByrefVariant(VARIANT* pByrefVar, VARIANT* pDestVar);
+ static void InsertContentsIntoByrefVariant(VARIANT* pSrcVar, VARIANT* pByrefVar);
+ static void CreateByrefVariantForVariant(VARIANT* pSrcVar, VARIANT* pByrefVar);
+
+ static VARTYPE GetVarTypeForComVariant(VariantData* pComVariant);
+#endif // FEATURE_COMINTEROP
+
+ static CVTypes GetCVTypeForVarType(VARTYPE vt);
+ static VARTYPE GetVarTypeForCVType(CVTypes);
+ static VARTYPE GetVarTypeForTypeHandle(TypeHandle typeHnd);
+
+ static VARTYPE GetVarTypeForValueClassArrayName(LPCUTF8 pArrayClassName);
+ static VARTYPE GetElementVarTypeForArrayRef(BASEARRAYREF pArrayRef);
+
+ // Note that Rank == 0 means SZARRAY (that is rank 1, no lower bounds)
+ static TypeHandle GetArrayForVarType(VARTYPE vt, TypeHandle elemType, unsigned rank=0);
+ static UINT GetElementSizeForVarType(VARTYPE vt, MethodTable* pInterfaceMT);
+
+#ifdef FEATURE_COMINTEROP
+ // Determine the element type of the objects being wrapped by an array of wrappers.
+ static TypeHandle GetWrappedArrayElementType(BASEARRAYREF* pArray);
+
+ // Determines the element type of an array taking wrappers into account. This means
+ // that is an array of wrappers is passed in, the returned element type will be that
+ // of the wrapped objects, not of the wrappers.
+ static TypeHandle GetArrayElementTypeWrapperAware(BASEARRAYREF* pArray);
+
+ // Determine the type of the elements for a safe array of records.
+ static TypeHandle GetElementTypeForRecordSafeArray(SAFEARRAY* pSafeArray);
+
+ // Helper called from MarshalIUnknownArrayComToOle and MarshalIDispatchArrayComToOle.
+ static void MarshalInterfaceArrayComToOleHelper(BASEARRAYREF* pComArray, void* oleArray,
+ MethodTable* pElementMT, BOOL bDefaultIsDispatch);
+#endif // FEATURE_COMINTEROP
+
+ struct Marshaler
+ {
+#ifdef FEATURE_COMINTEROP
+ void (*OleToComVariant)(VARIANT* pOleVariant, VariantData* pComVariant);
+ void (*ComToOleVariant)(VariantData* pComVariant, VARIANT* pOleVariant);
+ void (*OleRefToComVariant)(VARIANT* pOleVariant, VariantData* pComVariant);
+#endif // FEATURE_COMINTEROP
+ void (*OleToComArray)(void* oleArray, BASEARRAYREF* pComArray, MethodTable* pInterfaceMT);
+ void (*ComToOleArray)(BASEARRAYREF* pComArray, void* oleArray, MethodTable* pInterfaceMT,
+ BOOL fBestFitMapping, BOOL fThrowOnUnmappableChar, BOOL fOleArrayIsValid);
+ void (*ClearOleArray)(void* oleArray, SIZE_T cElements, MethodTable* pInterfaceMT);
+ };
+
+ static const Marshaler* GetMarshalerForVarType(VARTYPE vt, BOOL fThrow);
+
+ static void MarshalVariantArrayComToOle(BASEARRAYREF* pComArray, void* oleArray,
+ MethodTable* pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fMarshalByrefArgOnly,
+ BOOL fOleArrayIsValid, int nOleArrayStepLength = 1);
+
+private:
+
+
+ // Specific marshaler functions
+
+ static void MarshalBoolArrayOleToCom(void *oleArray, BASEARRAYREF* pComArray,
+ MethodTable* pInterfaceMT);
+ static void MarshalBoolArrayComToOle(BASEARRAYREF* pComArray, void* oleArray,
+ MethodTable* pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayIsValid);
+
+ static void MarshalWinBoolArrayOleToCom(void* oleArray, BASEARRAYREF* pComArray,
+ MethodTable* pInterfaceMT);
+ static void MarshalWinBoolArrayComToOle(BASEARRAYREF* pComArray, void* oleArray,
+ MethodTable* pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayValid);
+ static void MarshalCBoolVariantOleToCom(VARIANT* pOleVariant, VariantData* pComVariant);
+ static void MarshalCBoolVariantComToOle(VariantData* pComVariant, VARIANT* pOleVariant);
+ static void MarshalCBoolVariantOleRefToCom(VARIANT* pOleVariant, VariantData* pComVariant);
+ static void MarshalCBoolArrayOleToCom(void* oleArray, BASEARRAYREF* pComArray,
+ MethodTable* pInterfaceMT);
+ static void MarshalCBoolArrayComToOle(BASEARRAYREF* pComArray, void* oleArray,
+ MethodTable* pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayValid);
+
+ static void MarshalAnsiCharArrayOleToCom(void* oleArray, BASEARRAYREF* pComArray,
+ MethodTable* pInterfaceMT);
+ static void MarshalAnsiCharArrayComToOle(BASEARRAYREF* pComArray, void* oleArray,
+ MethodTable* pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayValid);
+
+#ifdef FEATURE_COMINTEROP
+ static void MarshalIDispatchArrayComToOle(BASEARRAYREF* pComArray, void* oleArray,
+ MethodTable* pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayValid);
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_COMINTEROP
+ static void MarshalBSTRArrayOleToCom(void* oleArray, BASEARRAYREF* pComArray,
+ MethodTable* pInterfaceMT);
+ static void MarshalBSTRArrayComToOle(BASEARRAYREF* pComArray, void* oleArray,
+ MethodTable* pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayValid);
+ static void ClearBSTRArray(void* oleArray, SIZE_T cElements, MethodTable* pInterfaceMT);
+#endif // FEATURE_COMINTEROP
+
+ static void MarshalNonBlittableRecordArrayOleToCom(void* oleArray, BASEARRAYREF* pComArray,
+ MethodTable* pInterfaceMT);
+ static void MarshalNonBlittableRecordArrayComToOle(BASEARRAYREF* pComArray, void* oleArray,
+ MethodTable* pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayValid);
+ static void ClearNonBlittableRecordArray(void* oleArray, SIZE_T cElements, MethodTable* pInterfaceMT);
+
+ static void MarshalLPWSTRArrayOleToCom(void* oleArray, BASEARRAYREF* pComArray,
+ MethodTable* pInterfaceMT);
+ static void MarshalLPWSTRRArrayComToOle(BASEARRAYREF* pComArray, void* oleArray,
+ MethodTable* pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayValid);
+ static void ClearLPWSTRArray(void* oleArray, SIZE_T cElements, MethodTable* pInterfaceMT);
+
+ static void MarshalLPSTRArrayOleToCom(void* oleArray, BASEARRAYREF* pComArray,
+ MethodTable* pInterfaceMT);
+ static void MarshalLPSTRRArrayComToOle(BASEARRAYREF* pComArray, void* oleArray,
+ MethodTable* pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayValid);
+ static void ClearLPSTRArray(void* oleArray, SIZE_T cElements, MethodTable* pInterfaceMT);
+
+ static void MarshalDateArrayOleToCom(void* oleArray, BASEARRAYREF* pComArray,
+ MethodTable* pInterfaceMT);
+ static void MarshalDateArrayComToOle(BASEARRAYREF* pComArray, void* oleArray,
+ MethodTable* pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayValid);
+
+ static void MarshalRecordArrayOleToCom(void* oleArray, BASEARRAYREF* pComArray, MethodTable* pElementMT);
+ static void MarshalRecordArrayComToOle(BASEARRAYREF* pComArray, void* oleArray, MethodTable* pElementMT,
+ BOOL fBestFitMapping, BOOL fThrowOnUnmappableChar, BOOL fOleArrayValid);
+ static void ClearRecordArray(void* oleArray, SIZE_T cElements, MethodTable* pElementMT);
+
+#ifdef FEATURE_COMINTEROP
+ static HRESULT MarshalCommonOleRefVariantForObject(OBJECTREF *pObj, VARIANT *pOle);
+ static void MarshalInterfaceArrayOleToCom(void* oleArray, BASEARRAYREF* pComArray,
+ MethodTable* pInterfaceMT);
+ static void MarshalIUnknownArrayComToOle(BASEARRAYREF* pComArray, void* oleArray,
+ MethodTable* pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayValid);
+ static void ClearInterfaceArray(void* oleArray, SIZE_T cElements, MethodTable* pInterfaceMT);
+
+ static void MarshalBoolVariantOleToCom(VARIANT* pOleVariant, VariantData* pComVariant);
+
+ static void MarshalWinBoolVariantOleToCom(VARIANT* pOleVariant, VariantData* pComVariant);
+ static void MarshalWinBoolVariantComToOle(VariantData* pComVariant, VARIANT* pOleVariant);
+ static void MarshalWinBoolVariantOleRefToCom(VARIANT* pOleVariant, VariantData* pComVariant);
+
+ static void MarshalAnsiCharVariantOleToCom(VARIANT* pOleVariant, VariantData* pComVariant);
+ static void MarshalAnsiCharVariantComToOle(VariantData* pComVariant, VARIANT* pOleVariant);
+ static void MarshalAnsiCharVariantOleRefToCom(VARIANT* pOleVariant, VariantData* pComVariant);
+
+ static void MarshalInterfaceVariantOleToCom(VARIANT* pOleVariant, VariantData* pComVariant);
+ static void MarshalInterfaceVariantComToOle(VariantData* pComVariant, VARIANT* pOleVariant);
+ static void MarshalInterfaceVariantOleRefToCom(VARIANT* pOleVariant, VariantData* pComVariant);
+
+ static void MarshalBSTRVariantOleToCom(VARIANT* pOleVariant, VariantData* pComVariant);
+ static void MarshalBSTRVariantComToOle(VariantData* pComVariant, VARIANT* pOleVariant);
+
+ static void MarshalDateVariantOleToCom(VARIANT* pOleVariant, VariantData* pComVariant);
+ static void MarshalDateVariantComToOle(VariantData* pComVariant, VARIANT* pOleVariant);
+ static void MarshalDateVariantOleRefToCom(VARIANT* pOleVariant, VariantData* pComVariant);
+
+ static void MarshalDecimalVariantOleToCom(VARIANT* pOleVariant, VariantData* pComVariant);
+ static void MarshalDecimalVariantComToOle(VariantData* pComVariant, VARIANT* pOleVariant);
+ static void MarshalDecimalVariantOleRefToCom(VARIANT* pOleVariant, VariantData* pComVariant);
+
+#ifdef FEATURE_CLASSIC_COMINTEROP
+ static void MarshalRecordVariantOleToCom(VARIANT* pOleVariant, VariantData* pComVariant);
+ static void MarshalRecordVariantComToOle(VariantData* pComVariant, VARIANT* pOleVariant);
+ static void MarshalRecordVariantOleRefToCom(VARIANT* pOleVariant, VariantData* pComVariant);
+#endif
+
+ static void MarshalCurrencyVariantOleToCom(VARIANT* pOleVariant, VariantData* pComVariant);
+ static void MarshalCurrencyVariantComToOle(VariantData* pComVariant, VARIANT* pOleVariant);
+ static void MarshalCurrencyVariantOleRefToCom(VARIANT* pOleVariant, VariantData* pComVariant);
+ static void MarshalCurrencyArrayOleToCom(void* oleArray, BASEARRAYREF* pComArray,
+ MethodTable* pInterfaceMT);
+ static void MarshalCurrencyArrayComToOle(BASEARRAYREF* pComArray, void* oleArray,
+ MethodTable* pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayValid);
+
+ static void MarshalVariantArrayOleToCom(void* oleArray, BASEARRAYREF* pComArray,
+ MethodTable* pInterfaceMT);
+ static void MarshalVariantArrayComToOle(BASEARRAYREF* pComArray, void* oleArray,
+ MethodTable* pInterfaceMT, BOOL fBestFitMapping,
+ BOOL fThrowOnUnmappableChar, BOOL fOleArrayValid);
+ static void ClearVariantArray(void* oleArray, SIZE_T cElements, MethodTable* pInterfaceMT);
+
+#ifdef FEATURE_CLASSIC_COMINTEROP
+ static void MarshalArrayVariantOleToCom(VARIANT* pOleVariant, VariantData* pComVariant);
+ static void MarshalArrayVariantComToOle(VariantData* pComVariant, VARIANT* pOleVariant);
+ static void MarshalArrayVariantOleRefToCom(VARIANT* pOleVariant, VariantData* pComVariant);
+#endif
+
+ static void MarshalErrorVariantOleToCom(VARIANT* pOleVariant, VariantData* pComVariant);
+ static void MarshalErrorVariantOleRefToCom(VARIANT* pOleVariant, VariantData* pComVariant);
+ static void MarshalErrorVariantComToOle(VariantData* pComVariant, VARIANT* pOleVariant);
+#endif // FEATURE_COMINTEROP
+};
+
+#endif
diff --git a/src/vm/packedfields.inl b/src/vm/packedfields.inl
new file mode 100644
index 0000000000..f854f788b1
--- /dev/null
+++ b/src/vm/packedfields.inl
@@ -0,0 +1,346 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+//
+// Provides a mechanism to store an array of DWORD-typed fields in a space-efficient manner. There are some
+// caveats:
+// 1) Fields can be written and read in an uncompressed form (a simple array of DWORD values) until the
+// PackFields() method is invoked. Once this method has been invoked (and returned true) fields have been
+// compacted and must not be modified again. That is, the primary usage of this class is to store a set of
+// initialized-once fields.
+// 2) The compaction algorithm relies on the fields containing small values (such as counts). Avoid storing
+// fields that have special sentinel values (such as all bits set) which will frequently set high order
+// bits.
+// 3) An instance of PackedDWORDFields will take up a fixed quantity of memory equivalent to an array of
+// DWORD fields. If PackFields() returns true then the fields values frozen at the time of the call have
+// been compressed into a fewer number of bytes in-place. This smaller size will always be a multiple of
+// sizeof(DWORD) in length and is reported by GetPackedSize(). If a PackedDWORDFields structure is being
+// declared as a field inside another structure it is typically wise to place the field last to take
+// advantage of this size reduction (e.g. when saving the outer structure into an ngen image). If
+// PackFields() returns false then the fields remain unpacked and unchanged.
+// 4) The caller retains the responsibility of recording whether an instance of PackedDWORDFields is in the
+// packed or unpacked state. This is important since incorrect behavior will result if the wrong methods
+// are used for the current state (e.g. calling GetUnpackedField() on a packed instance). This is not done
+// automatically since there are no bits free to store the state. However, under a debug build correct
+// usage will be checked (at the expensive of extra storage space).
+// 5) The space saving made come at a runtime CPU cost to access the fields. Do not use this mechanism to
+// compact fields that must be read on a perfomance critical path. If unsure, measure the performance of
+// this solution before committing to it.
+//
+// ============================================================================
+
+// Describe an array of FIELD_COUNT DWORDs. Each entry is addressed via a zero-based index and is expected to
+// frequently contain a small integer and remain frozen after initialization.
+template <DWORD FIELD_COUNT>
+class PackedDWORDFields
+{
+ // Some constants to make the code a little more readable.
+ enum Constants
+ {
+ kMaxLengthBits = 5, // Number of bits needed to express the maximum length of a field (32-bits)
+ kBitsPerDWORD = 32, // Number of bits in a DWORD
+ };
+
+public:
+ // Fields are all initialized to zero.
+ PackedDWORDFields()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ memset(m_rgUnpackedFields, 0, sizeof(m_rgUnpackedFields));
+#ifdef _DEBUG
+ memset(m_rgDebugUnpackedFields, 0, sizeof(m_rgDebugUnpackedFields));
+ m_fFieldsPacked = false;
+#endif // _DEBUG
+ }
+
+ // Get the value of the given field when the structure is in its unpacked state.
+ DWORD GetUnpackedField(DWORD dwFieldIndex)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE(dwFieldIndex < FIELD_COUNT);
+ _ASSERTE(!m_fFieldsPacked);
+
+ return m_rgUnpackedFields[dwFieldIndex];
+ }
+
+ // Set the value of the given field when the structure is in its unpacked state. Setting field values
+ // multiple times is allowed but only until a successful call to PackFields is made.
+ void SetUnpackedField(DWORD dwFieldIndex, DWORD dwValue)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(dwFieldIndex < FIELD_COUNT);
+ _ASSERTE(!m_fFieldsPacked);
+
+ m_rgUnpackedFields[dwFieldIndex] = dwValue;
+
+#ifdef _DEBUG
+ m_rgDebugUnpackedFields[dwFieldIndex] = dwValue;
+#endif // _DEBUG
+ }
+
+ // Attempt to optimize the set of fields given their current values. Returns false if compaction wouldn't
+ // achieve any space savings (in this case the structure remains in the unpacked state and the caller can
+ // continue to use the *UnpackedField methods above or even re-attempt PackFields() with different field
+ // values). If true is returned the data has been compacted into a smaller amount of space (this will
+ // always be a multiple of sizeof(DWORD) in size). This size can be queried using GetPackedSize() below.
+ // Once PackFields() has returned true fields can no longer be modified and field values must be retrieved
+ // via GetPackedField() rather than GetUnpackedField().
+ bool PackFields()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ // Can't re-pack a packed structure.
+ _ASSERTE(!m_fFieldsPacked);
+
+ // First compute the number of bits of space we'd need for a packed representation. Do this before
+ // making any changes since sometimes we'd end up expanding the data instead and in this case we wish
+ // to return false and make no updates to the structure.
+
+ // There's a fixed overhead of kMaxLengthBits for each field (we store the packed fields as a
+ // bit-stream that alternates between a field length (of kMaxLengthBits) followed by a variable length
+ // bitfield containing the field value.
+ DWORD dwTotalPackedBits = FIELD_COUNT * kMaxLengthBits;
+
+ // For each field calculate excatly how many bits we'd need to store the field value and add this to
+ // the total.
+ for (DWORD i = 0; i < FIELD_COUNT; i++)
+ dwTotalPackedBits += BitsRequired(m_rgUnpackedFields[i]);
+
+ // Now we have the total is it smaller than a simple array of DWORDs?
+ if (dwTotalPackedBits >= (FIELD_COUNT * kBitsPerDWORD))
+ return false;
+
+ // Compaction will save us space. We're committed to implementing that compaction now.
+
+ // Work from a copy of the unpacked fields since we're about to start modifying the space in which
+ // they're currently stored.
+ DWORD rgUnpackedFields[FIELD_COUNT];
+ memcpy(rgUnpackedFields, m_rgUnpackedFields, sizeof(rgUnpackedFields));
+
+ // Start writing a stream of bits. For each field write a fixed sized header describing the number of
+ // bits required to express the field followed by the field value itself.
+ DWORD dwOffset = 0;
+ for (DWORD i = 0; i < FIELD_COUNT; i++)
+ {
+ // Find the minimal number of bits required to encode the current field's value.
+ DWORD dwFieldLength = BitsRequired(rgUnpackedFields[i]);
+ _ASSERTE(dwFieldLength > 0 && dwFieldLength <= kBitsPerDWORD);
+
+ // Write the size field. Note that we store the size biased by one. That is, a field length of one
+ // is encoded as zero. We do this so we can express a range of field sizes from 1 through 32,
+ // emcompassing the worst case scenario (a full 32 bits). It comes at the cost of not being able
+ // to encode zero-valued fields with zero bits. Is this is deemed an important optimization in the
+ // future we could always given up on a simple linear mapping of the size field and use a lookup
+ // table to map values encoded into the real sizes. Experiments with EEClass packed fields over
+ // mscorlib show that this currently doesn't yield us much benefit, primarily due to the DWORD
+ // round-up size semantic, which implies we'd need a lot more optimization than this to reduce the
+ // average structure size below the next DWORD threshhold.
+ BitVectorSet(dwOffset, kMaxLengthBits, dwFieldLength - 1);
+ dwOffset += kMaxLengthBits;
+
+ // Write the field value itself.
+ BitVectorSet(dwOffset, dwFieldLength, rgUnpackedFields[i]);
+ dwOffset += dwFieldLength;
+ }
+
+#ifdef _DEBUG
+ m_fFieldsPacked = true;
+#endif // _DEBUG
+
+ // Compaction was successful.
+ return true;
+ }
+
+ // Return the size in bytes of a compacted structure (it is illegal to call this on an uncompacted
+ // structure).
+ DWORD GetPackedSize()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE(m_fFieldsPacked);
+
+ // Walk the field stream reading header (which are fixed size) and then using the value of the headers
+ // to skip the field value.
+ DWORD cBits = 0;
+ for (DWORD i = 0; i < FIELD_COUNT; i++)
+ cBits += kMaxLengthBits + BitVectorGet(cBits, kMaxLengthBits) + 1; // +1 since size is [1,32] not [0,31]
+
+ // Compute the number of DWORDs needed to store the bits of the encoding.
+ // static_cast would not be necessary if ALIGN_UP were templated like FitsIn.
+ DWORD cDWORDs = static_cast<DWORD>(ALIGN_UP(cBits, kBitsPerDWORD)) / kBitsPerDWORD;
+
+ // Return the total structure size.
+ return offsetof(PackedDWORDFields<FIELD_COUNT>, m_rgPackedFields) + (cDWORDs * sizeof(DWORD));
+ }
+
+ // Get the value of a packed field. Illegal to call this on an uncompacted structure.
+ DWORD GetPackedField(DWORD dwFieldIndex)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(dwFieldIndex < FIELD_COUNT);
+ _ASSERTE(m_fFieldsPacked);
+
+ // Walk past all the predecessor fields.
+ DWORD dwOffset = 0;
+ for (DWORD i = 0; i < dwFieldIndex; i++)
+ dwOffset += kMaxLengthBits + BitVectorGet(dwOffset, kMaxLengthBits) + 1; // +1 since size is [1,32] not [0,31]
+
+ // The next kMaxLengthBits bits contain the length in bits of the field we want (-1 due to the way we
+ // encode the length).
+ DWORD dwFieldLength = BitVectorGet(dwOffset, kMaxLengthBits) + 1;
+ dwOffset += kMaxLengthBits;
+
+ // Grab the field value.
+ DWORD dwReturn = BitVectorGet(dwOffset, dwFieldLength);
+
+ // On debug builds ensure the encoded field value is the same as the original unpacked version.
+ _ASSERTE(dwReturn == m_rgDebugUnpackedFields[dwFieldIndex]);
+ return dwReturn;
+ }
+
+private:
+ // Return the minimum number of bits required to encode a DWORD value by stripping out the
+ // most-significant leading zero bits). Returns a value between 1 and 32 inclusive (we never encode
+ // anything with zero bits).
+ DWORD BitsRequired(DWORD dwValue)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // Starting with a bit-mask of the most significant bit and iterating over masks for successively less
+ // significant bits, stop as soon as the mask co-incides with a set bit in the value. Simultaneously
+ // we're counting down the bits required to express the range of values implied by seeing the
+ // corresponding bit set in the value (e.g. when we're testing the high bit we know we'd need 32-bits
+ // to encode the range of values that have this bit set). Stop when we get to one bit (we never return
+ // 0 bits required, even for an input value of 0).
+ DWORD dwMask = 0x80000000;
+ DWORD cBits = 32;
+ while (cBits > 1)
+ {
+ if (dwValue & dwMask)
+ return cBits;
+
+ dwMask >>= 1;
+ cBits--;
+ }
+
+ return 1;
+ }
+
+ // Set the dwLength bits at m_rgPackedFields + dwOffset bits to the value dwValue.
+ void BitVectorSet(DWORD dwOffset, DWORD dwLength, DWORD dwValue)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(dwLength > 0 && dwLength <= kBitsPerDWORD); // Can set at most one DWORD at a time
+ _ASSERTE((dwLength == kBitsPerDWORD) || (dwValue < (1U << dwLength))); // Value had better fit in the given length
+
+ // Calculate the start and end naturally aligned DWORDs into which the value will go.
+ DWORD dwStartBlock = dwOffset / kBitsPerDWORD;
+ DWORD dwEndBlock = (dwOffset + dwLength - 1) / kBitsPerDWORD;
+ if (dwStartBlock == dwEndBlock)
+ {
+ // Easy case: the new value fits entirely within one aligned DWORD. Compute the number of bits
+ // we'll need to shift the input value (to the left) and a mask of the bits that will be set in
+ // the destination DWORD.
+ DWORD dwValueShift = dwOffset % kBitsPerDWORD;
+ DWORD dwValueMask = ((1U << dwLength) - 1) << dwValueShift;
+
+ m_rgPackedFields[dwStartBlock] &= ~dwValueMask; // Zero the target bits
+ m_rgPackedFields[dwStartBlock] |= dwValue << dwValueShift; // Or in the new value (suitably shifted)
+ }
+ else
+ {
+ // Hard case: the new value is split across two DWORDs (two DWORDs is the max as the new value can
+ // be at most DWORD-sized itself). For simplicity we'll simply break this into two separate
+ // non-spanning sets. We can revisit this in the future if the perf is a problem.
+ DWORD dwInitialBits = kBitsPerDWORD - (dwOffset % kBitsPerDWORD); // Number of bits to set in the first DWORD
+ DWORD dwInitialMask = (1U << dwInitialBits) - 1; // Mask covering those value bits
+
+ // Set the portion of the value residing in the first DWORD.
+ BitVectorSet(dwOffset, dwInitialBits, dwValue & dwInitialMask);
+
+ // And then the remainder in the second DWORD.
+ BitVectorSet(dwOffset + dwInitialBits, dwLength - dwInitialBits, dwValue >> dwInitialBits);
+ }
+
+ _ASSERTE(BitVectorGet(dwOffset, dwLength) == dwValue);
+ }
+
+ // Get the dwLength bits at m_rgPackedFields + dwOffset bits. Value is zero-extended to DWORD size.
+ DWORD BitVectorGet(DWORD dwOffset, DWORD dwLength)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE(dwLength > 0 && dwLength <= kBitsPerDWORD); // Can get at most one DWORD at a time
+
+ // Calculate the start and end naturally aligned DWORDs from which the value will come.
+ DWORD dwStartBlock = dwOffset / kBitsPerDWORD;
+ DWORD dwEndBlock = (dwOffset + dwLength - 1) / kBitsPerDWORD;
+ if (dwStartBlock == dwEndBlock)
+ {
+ // Easy case: the new value fits entirely within one aligned DWORD. Compute the number of bits
+ // we'll need to shift the extracted value (to the right) and a mask of the bits that will be
+ // extracted in the destination DWORD.
+ DWORD dwValueShift = dwOffset % kBitsPerDWORD;
+ DWORD dwValueMask = ((1U << dwLength) - 1) << dwValueShift;
+
+ // Mask out the bits we want and shift them down into the bottom of the result DWORD.
+ return (m_rgPackedFields[dwStartBlock] & dwValueMask) >> dwValueShift;
+ }
+ else
+ {
+ // Hard case: the return value is split across two DWORDs (two DWORDs is the max as the new value
+ // can be at most DWORD-sized itself). For simplicity we'll simply break this into two separate
+ // non-spanning gets and stitch the result together from that. We can revisit this in the future
+ // if the perf is a problem.
+ DWORD dwInitialBits = kBitsPerDWORD - (dwOffset % kBitsPerDWORD); // Number of bits to get in the first DWORD
+ DWORD dwReturn;
+
+ // Get the initial (low-order) bits from the first DWORD.
+ dwReturn = BitVectorGet(dwOffset, dwInitialBits);
+
+ // Get the remaining bits from the second DWORD. These bits will need to be shifted to the left
+ // (past the bits we've already read) before being OR'd into the result.
+ dwReturn |= BitVectorGet(dwOffset + dwInitialBits, dwLength - dwInitialBits) << dwInitialBits;
+
+ return dwReturn;
+ }
+ }
+
+#ifdef _DEBUG
+ DWORD m_rgDebugUnpackedFields[FIELD_COUNT]; // A copy of the unpacked fields so we can validate
+ // packed reads
+ bool m_fFieldsPacked; // The current packed/unpacked state so we can check
+ // the right methods are being called
+#endif // _DEBUG
+
+ union
+ {
+ DWORD m_rgUnpackedFields[FIELD_COUNT]; // The fields in their unpacked state
+ DWORD m_rgPackedFields[1]; // The first DWORD block of fields in the packed state
+ };
+};
diff --git a/src/vm/pefile.cpp b/src/vm/pefile.cpp
new file mode 100644
index 0000000000..4b20970bb3
--- /dev/null
+++ b/src/vm/pefile.cpp
@@ -0,0 +1,5310 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// --------------------------------------------------------------------------------
+// PEFile.cpp
+//
+
+// --------------------------------------------------------------------------------
+
+
+#include "common.h"
+#include "pefile.h"
+#include "strongname.h"
+#include "corperm.h"
+#include "eecontract.h"
+#include "apithreadstress.h"
+#include "eeconfig.h"
+#ifdef FEATURE_FUSION
+#include "fusionpriv.h"
+#include "shlwapi.h"
+#endif
+#include "product_version.h"
+#include "eventtrace.h"
+#include "security.h"
+#include "corperm.h"
+#include "dbginterface.h"
+#include "peimagelayout.inl"
+#include "dlwrap.h"
+#include "invokeutil.h"
+#ifdef FEATURE_PREJIT
+#include "compile.h"
+#endif
+#include "strongnameinternal.h"
+
+#ifdef FEATURE_VERSIONING
+#include "../binder/inc/applicationcontext.hpp"
+#endif
+
+#ifndef FEATURE_FUSION
+#include "clrprivbinderutil.h"
+#include "../binder/inc/coreclrbindercommon.h"
+#endif
+
+#ifdef FEATURE_CAS_POLICY
+#include <wintrust.h>
+#endif
+
+#ifdef FEATURE_PREJIT
+#include "compile.h"
+
+#ifdef DEBUGGING_SUPPORTED
+SVAL_IMPL_INIT(DWORD, PEFile, s_NGENDebugFlags, 0);
+#endif
+#endif
+
+#include "sha1.h"
+
+#if defined(FEATURE_HOSTED_BINDER) && defined(FEATURE_FUSION)
+#include "clrprivbinderfusion.h"
+#include "clrprivbinderappx.h"
+#include "clrprivbinderloadfile.h"
+#endif
+
+#ifndef DACCESS_COMPILE
+
+// ================================================================================
+// PEFile class - this is an abstract base class for PEModule and PEAssembly
+// <TODO>@todo: rename TargetFile</TODO>
+// ================================================================================
+
+PEFile::PEFile(PEImage *identity, BOOL fCheckAuthenticodeSignature/*=TRUE*/) :
+#if _DEBUG
+ m_pDebugName(NULL),
+#endif
+ m_identity(NULL),
+ m_openedILimage(NULL),
+#ifdef FEATURE_PREJIT
+ m_nativeImage(NULL),
+ m_fCanUseNativeImage(TRUE),
+#endif
+ m_MDImportIsRW_Debugger_Use_Only(FALSE),
+ m_bHasPersistentMDImport(FALSE),
+ m_pMDImport(NULL),
+ m_pImporter(NULL),
+ m_pEmitter(NULL),
+#ifndef FEATURE_CORECLR
+ m_pAssemblyImporter(NULL),
+ m_pAssemblyEmitter(NULL),
+#endif
+ m_pMetadataLock(::new SimpleRWLock(PREEMPTIVE, LOCK_TYPE_DEFAULT)),
+ m_refCount(1),
+ m_hash(NULL),
+ m_flags(0),
+ m_fStrongNameVerified(FALSE)
+#ifdef FEATURE_CAS_POLICY
+ ,m_certificate(NULL),
+ m_fCheckedCertificate(FALSE)
+ ,m_pSecurityManager(NULL)
+ ,m_securityManagerLock(CrstPEFileSecurityManager)
+#endif // FEATURE_CAS_POLICY
+#ifdef FEATURE_HOSTED_BINDER
+ ,m_pHostAssembly(nullptr)
+#endif // FEATURE_HOSTED_BINDER
+{
+ CONTRACTL
+ {
+ CONSTRUCTOR_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (identity)
+ {
+ identity->AddRef();
+ m_identity = identity;
+
+ if(identity->IsOpened())
+ {
+ //already opened, prepopulate
+ identity->AddRef();
+ m_openedILimage = identity;
+ }
+ }
+
+
+#ifdef FEATURE_CAS_POLICY
+ if (fCheckAuthenticodeSignature)
+ {
+ CheckAuthenticodeSignature();
+ }
+#endif // FEATURE_CAS_POLICY
+}
+
+
+
+PEFile::~PEFile()
+{
+ CONTRACTL
+ {
+ DESTRUCTOR_CHECK;
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ReleaseMetadataInterfaces(TRUE);
+
+ if (m_hash != NULL)
+ delete m_hash;
+
+#ifdef FEATURE_PREJIT
+ if (m_nativeImage != NULL)
+ {
+ MarkNativeImageInvalidIfOwned();
+
+ m_nativeImage->Release();
+ }
+#endif //FEATURE_PREJIT
+
+
+ if (m_openedILimage != NULL)
+ m_openedILimage->Release();
+ if (m_identity != NULL)
+ m_identity->Release();
+ if (m_pMetadataLock)
+ delete m_pMetadataLock;
+#ifdef FEATURE_CAS_POLICY
+ if (m_pSecurityManager) {
+ m_pSecurityManager->Release();
+ m_pSecurityManager = NULL;
+ }
+ if (m_certificate && !g_pCertificateCache->Contains(m_certificate))
+ CoTaskMemFree(m_certificate);
+#endif // FEATURE_CAS_POLICY
+
+#ifdef FEATURE_HOSTED_BINDER
+ if (m_pHostAssembly != NULL)
+ {
+ m_pHostAssembly->Release();
+ }
+#endif
+}
+
+#ifndef DACCESS_COMPILE
+void PEFile::ReleaseIL()
+{
+ WRAPPER_NO_CONTRACT;
+ if (m_openedILimage!=NULL )
+ {
+ ReleaseMetadataInterfaces(TRUE, TRUE);
+ if (m_identity != NULL)
+ {
+ m_identity->Release();
+ m_identity=NULL;
+ }
+ m_openedILimage->Release();
+ m_openedILimage = NULL;
+ }
+}
+#endif
+
+/* static */
+PEFile *PEFile::Open(PEImage *image)
+{
+ CONTRACT(PEFile *)
+ {
+ PRECONDITION(image != NULL);
+ PRECONDITION(image->CheckFormat());
+ POSTCONDITION(RETVAL != NULL);
+ POSTCONDITION(!RETVAL->IsModule());
+ POSTCONDITION(!RETVAL->IsAssembly());
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ PEFile *pFile = new PEFile(image, FALSE);
+
+ if (image->HasNTHeaders() && image->HasCorHeader())
+ pFile->OpenMDImport_Unsafe(); //no one else can see the object yet
+
+#if _DEBUG
+ pFile->m_debugName = image->GetPath();
+ pFile->m_debugName.Normalize();
+ pFile->m_pDebugName = pFile->m_debugName;
+#endif
+
+ RETURN pFile;
+}
+
+// ------------------------------------------------------------
+// Loader support routines
+// ------------------------------------------------------------
+
+template<class T> void CoTaskFree(T *p)
+{
+ if (p != NULL)
+ {
+ p->T::~T();
+
+ CoTaskMemFree(p);
+ }
+}
+
+
+NEW_WRAPPER_TEMPLATE1(CoTaskNewHolder, CoTaskFree<_TYPE>);
+
+BOOL PEFile::CanLoadLibrary()
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Dynamic and resource modules don't need LoadLibrary.
+ if (IsDynamic() || IsResource()||IsLoaded())
+ return TRUE;
+
+ // If we're been granted skip verification, OK
+ if (HasSkipVerification())
+ return TRUE;
+
+ // Otherwise, we can only load if IL only.
+ return IsILOnly();
+}
+
+
+#ifdef FEATURE_CORECLR
+void PEFile::ValidateImagePlatformNeutrality()
+{
+ STANDARD_VM_CONTRACT;
+
+ //--------------------------------------------------------------------------------
+ // There are no useful applications of the "/platform" switch for CoreCLR.
+ // CoreCLR will do the conservative thing and by default only accept appbase assemblies
+ // compiled with "/platform:anycpu" (or no "/platform" switch at all.)
+ // However, with hosting flags it is possible to suppress this check and allow
+ // platform specific assemblies. This was primarily added to support C++/CLI
+ // generated assemblies build with /CLR:PURE flags. This was a need for the CoreSystem
+ // server work.
+ //
+ // We do allow Platform assemblies to have platform specific code (because they
+ // in fact do have such code.
+ //--------------------------------------------------------------------------------
+ if (!(GetAssembly()->IsProfileAssembly()) && !GetAppDomain()->AllowPlatformSpecificAppAssemblies())
+ {
+
+ DWORD machine, kind;
+ BOOL fMachineOk,fPlatformFlagsOk;
+
+#ifdef FEATURE_TREAT_NI_AS_MSIL_DURING_DIAGNOSTICS
+ if (ShouldTreatNIAsMSIL() && GetILimage()->HasNativeHeader())
+ {
+ GetILimage()->GetNativeILPEKindAndMachine(&kind, &machine);
+ }
+ else
+#endif // FEATURE_TREAT_NI_AS_MSIL_DURING_DIAGNOSTICS
+ {
+ //The following function gets the kind and machine given by the IL image.
+ //In the case of NGened images- It gets the original kind and machine of the IL image
+ //from the copy maintained by NI
+ GetPEKindAndMachine(&kind, &machine);
+ }
+
+ fMachineOk = (machine == IMAGE_FILE_MACHINE_I386);
+ fPlatformFlagsOk = ((kind & (peILonly | pe32Plus | pe32BitRequired)) == peILonly);
+
+#ifdef FEATURE_LEGACYNETCF
+ if (GetAppDomain()->GetAppDomainCompatMode() == BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8)
+ fPlatformFlagsOk = ((kind & (peILonly | pe32Plus)) == peILonly);
+#endif
+
+ if (!(fMachineOk &&
+ fPlatformFlagsOk))
+ {
+ // This exception matches what the desktop OS hook throws - unfortunate that this is so undescriptive.
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ }
+}
+#endif
+
+#ifdef FEATURE_MIXEDMODE
+
+#ifndef CROSSGEN_COMPILE
+
+// Returns TRUE if this file references managed CRT (msvcmNN*).
+BOOL PEFile::ReferencesManagedCRT()
+{
+ STANDARD_VM_CONTRACT;
+
+ IMDInternalImportHolder pImport = GetMDImport();
+ MDEnumHolder hEnum(pImport);
+
+ IfFailThrow(pImport->EnumInit(mdtModuleRef, mdTokenNil, &hEnum));
+
+ mdModuleRef tk;
+ while (pImport->EnumNext(&hEnum, &tk))
+ {
+ // we are looking for "msvcmNN*"
+ LPCSTR szName;
+ IfFailThrow(pImport->GetModuleRefProps(tk, &szName));
+
+ if (_strnicmp(szName, "msvcm", 5) == 0 && isdigit(szName[5]) && isdigit(szName[6]))
+ {
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+void PEFile::CheckForDisallowedInProcSxSLoadWorker()
+{
+ STANDARD_VM_CONTRACT;
+
+ // provide an opt-out switch for now
+ if (CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_DisableIJWVersionCheck) != 0)
+ return;
+
+ // ************************************************************************************************
+ // 1. See if this file should be checked
+ // The following checks filter out non-mixed mode assemblies that don't reference msvcmNN*. We only
+ // care about non-ILONLY images (IJW) or 2.0 C++/CLI pure images.
+ if (IsResource() || IsDynamic())
+ return;
+
+ // check the metadata version string
+ COUNT_T size;
+ PVOID pMetaData = (PVOID)GetMetadata(&size);
+ if (!pMetaData)
+ {
+ // No metadata section? Well somebody should have caught this earlier so report as
+ // ExecutionEngine rather than BIF.
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
+ }
+
+ LPCSTR pVersion = NULL;
+ IfFailThrow(GetImageRuntimeVersionString(pMetaData, &pVersion));
+
+ char chV;
+ unsigned uiMajor, uiMinor;
+ BOOL fLegacyImage = (sscanf_s(pVersion, "%c%u.%u", &chV, 1, &uiMajor, &uiMinor) == 3 && (chV == W('v') || chV == W('V')) && uiMajor <= 2);
+
+ // Note that having VTFixups properly working really is limited to non-ILONLY images. In particular,
+ // the shim does not even attempt to patch ILONLY images in any way with bootstrap thunks.
+ if (IsILOnly())
+ {
+ // all >2.0 ILONLY images are fine because >2.0 managed CRTs can be loaded in multiple runtimes
+ if (!fLegacyImage)
+ return;
+
+ // legacy ILONLY images that don't reference the managed CRT are fine
+ if (!ReferencesManagedCRT())
+ return;
+ }
+
+ // get the version of this runtime
+ WCHAR wzThisRuntimeVersion[_MAX_PATH];
+ DWORD cchVersion = COUNTOF(wzThisRuntimeVersion);
+ IfFailThrow(g_pCLRRuntime->GetVersionString(wzThisRuntimeVersion, &cchVersion));
+
+ // ************************************************************************************************
+ // 2. For legacy assemblies, verify that legacy APIs are/would be bound to this runtime
+ if (fLegacyImage)
+ {
+ WCHAR wzAPIVersion[_MAX_PATH];
+ bool fLegacyAPIsAreBound = false;
+
+ { // Check if the legacy APIs have already been bound to us using the new hosting APIs.
+ ReleaseHolder<ICLRMetaHost> pMetaHost;
+ IfFailThrow(CLRCreateInstance(CLSID_CLRMetaHost, IID_ICLRMetaHost, (LPVOID*)&pMetaHost));
+
+ ReleaseHolder<ICLRRuntimeInfo> pInfo;
+ // Returns S_FALSE when no runtime is currently bound, S_OK when one is.
+ HRESULT hr = pMetaHost->QueryLegacyV2RuntimeBinding(IID_ICLRRuntimeInfo, (LPVOID*)&pInfo);
+ IfFailThrow(hr);
+
+ if (hr == S_OK)
+ { // Legacy APIs are bound, now check if they are bound to us.
+ fLegacyAPIsAreBound = true;
+
+ cchVersion = COUNTOF(wzAPIVersion);
+ IfFailThrow(pInfo->GetVersionString(wzAPIVersion, &cchVersion));
+
+ if (SString::_wcsicmp(wzThisRuntimeVersion, wzAPIVersion) == 0)
+ { // This runtime is the one bound to the legacy APIs, ok to load legacy assembly.
+ return;
+ }
+ }
+ }
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable : 4996) // we are going to call deprecated APIs
+#endif
+ // We need the above QueryLegacyV2RuntimeBinding check because GetRequestedRuntimeInfo will not take into
+ // account the current binding, which could have been set by the host rather than through an EXE config.
+ // If, however, the legacy APIs are not bound (indicated in fLegacyAPIsAreBound) then we can assume that
+ // the legacy APIs would bind using the equivalent of CorBindToRuntime(NULL) as a result of loading this
+ // legacy IJW assembly, and so we use GetRequestedRuntimeInfo to check without actually causing the bind.
+ // By avoiding causing the bind, we avoid a binding side effect in the failure case.
+ if (!fLegacyAPIsAreBound &&
+ SUCCEEDED(GetRequestedRuntimeInfo(NULL, NULL, NULL, 0, // pExe, pwszVersion, pConfigurationFile, startupFlags
+ RUNTIME_INFO_UPGRADE_VERSION | RUNTIME_INFO_DONT_RETURN_DIRECTORY | RUNTIME_INFO_DONT_SHOW_ERROR_DIALOG,
+ NULL, 0, NULL, // pDirectory, dwDirectory, pdwDirectoryLength
+ wzAPIVersion, COUNTOF(wzAPIVersion), &cchVersion))) // pVersion, cchBuffer, pdwLength
+ {
+ if (SString::_wcsicmp(wzThisRuntimeVersion, wzAPIVersion) == 0)
+ {
+ // it came back as this version - call CorBindToRuntime to actually bind it
+ ReleaseHolder<ICLRRuntimeHost> pHost;
+ IfFailThrow(CorBindToRuntime(wzAPIVersion, NULL, CLSID_CLRRuntimeHost, IID_ICLRRuntimeHost, (LPVOID *)&pHost));
+
+ // and verify that nobody beat us to it
+ IfFailThrow(GetCORVersion(wzAPIVersion, COUNTOF(wzAPIVersion), &cchVersion));
+
+ if (SString::_wcsicmp(wzThisRuntimeVersion, wzAPIVersion) == 0)
+ {
+ // we have verified that when the assembly calls CorBindToRuntime(NULL),
+ // it will get this runtime, so we allow it to be loaded
+ return;
+ }
+ }
+ }
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+ MAKE_WIDEPTR_FROMUTF8(pwzVersion, pVersion);
+
+ ExternalLog(LF_LOADER, LL_ERROR, W("ERR: Rejecting IJW module built against %s because it could be loaded into another runtime in this process."), pwzVersion);
+ COMPlusThrow(kFileLoadException, IDS_EE_IJWLOAD_CROSSVERSION_DISALLOWED, pwzVersion, QUOTE_MACRO_L(VER_MAJORVERSION.VER_MINORVERSION));
+ }
+
+ // ************************************************************************************************
+ // 3. For 4.0+ assemblies, verify that it hasn't been loaded into another runtime
+ ReleaseHolder<ICLRRuntimeHostInternal> pRuntimeHostInternal;
+ IfFailThrow(g_pCLRRuntime->GetInterface(CLSID_CLRRuntimeHostInternal,
+ IID_ICLRRuntimeHostInternal,
+ &pRuntimeHostInternal));
+
+ PTR_VOID pModuleBase = GetLoadedIL()->GetBase();
+
+ ReleaseHolder<ICLRRuntimeInfo> pRuntimeInfo;
+ HRESULT hr = pRuntimeHostInternal->LockModuleForRuntime((BYTE *)pModuleBase, IID_ICLRRuntimeInfo, &pRuntimeInfo);
+
+ IfFailThrow(hr);
+
+ if (hr == S_OK)
+ {
+ // this runtime was the first one to lock the module
+ return;
+ }
+
+ // another runtime has loaded this module so we have to block the load
+ WCHAR wzLoadedRuntimeVersion[_MAX_PATH];
+ cchVersion = COUNTOF(wzLoadedRuntimeVersion);
+ IfFailThrow(pRuntimeInfo->GetVersionString(wzLoadedRuntimeVersion, &cchVersion));
+
+ ExternalLog(LF_LOADER, LL_ERROR, W("ERR: Rejecting IJW module because it is already loaded into runtime version %s in this process."), wzLoadedRuntimeVersion);
+ COMPlusThrow(kFileLoadException, IDS_EE_IJWLOAD_MULTIRUNTIME_DISALLOWED, wzThisRuntimeVersion, wzLoadedRuntimeVersion);
+}
+
+// We don't allow loading IJW and C++/CLI pure images built against <=2.0 if legacy APIs are not bound to this
+// runtime. For IJW images built against >2.0, we don't allow the load if the image has already been loaded by
+// another runtime in this process.
+void PEFile::CheckForDisallowedInProcSxSLoad()
+{
+ STANDARD_VM_CONTRACT;
+
+ // have we checked this one before?
+ if (!IsInProcSxSLoadVerified())
+ {
+ CheckForDisallowedInProcSxSLoadWorker();
+
+ // if no exception was thrown, remember the fact that we don't have to do the check again
+ SetInProcSxSLoadVerified();
+ }
+}
+
+#else // CROSSGEN_COMPILE
+
+void PEFile::CheckForDisallowedInProcSxSLoad()
+{
+ // Noop for crossgen
+}
+
+#endif // CROSSGEN_COMPILE
+
+#endif // FEATURE_MIXEDMODE
+
+
+//-----------------------------------------------------------------------------------------------------
+// Catch attempts to load x64 assemblies on x86, etc.
+//-----------------------------------------------------------------------------------------------------
+static void ValidatePEFileMachineType(PEFile *peFile)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (peFile->IsIntrospectionOnly())
+ return; // ReflectionOnly assemblies permitted to violate CPU restrictions
+
+ if (peFile->IsDynamic())
+ return; // PEFiles for ReflectionEmit assemblies don't cache the machine type.
+
+ if (peFile->IsResource())
+ return; // PEFiles for resource assemblies don't cache the machine type.
+
+ if (peFile->HasNativeImage())
+ return; // If it passed the native binder, no need to do the check again esp. at the risk of inviting an IL page-in.
+
+ DWORD peKind;
+ DWORD actualMachineType;
+ peFile->GetPEKindAndMachine(&peKind, &actualMachineType);
+
+ if (actualMachineType == IMAGE_FILE_MACHINE_I386 && ((peKind & (peILonly | pe32BitRequired)) == peILonly))
+ return; // Image is marked CPU-agnostic.
+
+ if (actualMachineType != IMAGE_FILE_MACHINE_NATIVE)
+ {
+#ifdef FEATURE_LEGACYNETCF
+ if (GetAppDomain()->GetAppDomainCompatMode() == BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8)
+ {
+ if (actualMachineType == IMAGE_FILE_MACHINE_I386 && ((peKind & peILonly)) == peILonly)
+ return;
+ }
+#endif
+
+#ifdef _TARGET_AMD64_
+ // v4.0 64-bit compatibility workaround. The 64-bit v4.0 CLR's Reflection.Load(byte[]) api does not detect cpu-matches. We should consider fixing that in
+ // the next SxS release. In the meantime, this bypass will retain compat for 64-bit v4.0 CLR for target platforms that existed at the time.
+ //
+ // Though this bypass kicks in for all Load() flavors, the other Load() flavors did detect cpu-matches through various other code paths that still exist.
+ // Or to put it another way, this #ifdef makes the (4.5 only) ValidatePEFileMachineType() a NOP for x64, hence preserving 4.0 compatibility.
+ if (actualMachineType == IMAGE_FILE_MACHINE_I386 || actualMachineType == IMAGE_FILE_MACHINE_IA64)
+ return;
+#endif // _WIN64_
+
+ // Image has required machine that doesn't match the CLR.
+ StackSString name;
+ if (peFile->IsAssembly())
+ ((PEAssembly*)peFile)->GetDisplayName(name);
+ else
+ name = StackSString(SString::Utf8, peFile->GetSimpleName());
+
+ COMPlusThrow(kBadImageFormatException, IDS_CLASSLOAD_WRONGCPU, name.GetUnicode());
+ }
+
+ return; // If we got here, all is good.
+}
+
+void PEFile::LoadLibrary(BOOL allowNativeSkip/*=TRUE*/) // if allowNativeSkip==FALSE force IL image load
+{
+ CONTRACT_VOID
+ {
+ INSTANCE_CHECK;
+ POSTCONDITION(CheckLoaded());
+ STANDARD_VM_CHECK;
+ }
+ CONTRACT_END;
+
+ // Catch attempts to load x64 assemblies on x86, etc.
+ ValidatePEFileMachineType(this);
+
+ // See if we've already loaded it.
+ if (CheckLoaded(allowNativeSkip))
+ {
+#ifdef FEATURE_CORECLR
+ if (!IsResource() && !IsDynamic())
+ ValidateImagePlatformNeutrality();
+#endif //FEATURE_CORECLR
+
+#ifdef FEATURE_MIXEDMODE
+ // Prevent loading C++/CLI images into multiple runtimes in the same process. Note that if ILOnly images
+ // stop being LoadLibrary'ed, the check for pure 2.0 C++/CLI images will need to be done somewhere else.
+ if (!IsIntrospectionOnly())
+ CheckForDisallowedInProcSxSLoad();
+#endif // FEATURE_MIXEDMODE
+ RETURN;
+ }
+
+ // Note that we may be racing other threads here, in the case of domain neutral files
+
+ // Resource images are always flat.
+ if (IsResource())
+ {
+ GetILimage()->LoadNoMetaData(IsIntrospectionOnly());
+ RETURN;
+ }
+
+#ifdef FEATURE_CORECLR
+ ValidateImagePlatformNeutrality();
+#endif //FEATURE_CORECLR
+
+#if !defined(_WIN64)
+ if (!HasNativeImage() && (!GetILimage()->Has32BitNTHeaders()) && !IsIntrospectionOnly())
+ {
+ // Tried to load 64-bit assembly on 32-bit platform.
+ EEFileLoadException::Throw(this, COR_E_BADIMAGEFORMAT, NULL);
+ }
+#endif
+
+ // Don't do this if we are unverifiable
+ if (!CanLoadLibrary())
+ ThrowHR(SECURITY_E_UNVERIFIABLE);
+
+
+ // We need contents now
+ if (!HasNativeImage())
+ {
+ EnsureImageOpened();
+ }
+
+ if (IsIntrospectionOnly())
+ {
+ GetILimage()->LoadForIntrospection();
+ RETURN;
+ }
+
+
+ //---- Below this point, only do the things necessary for execution ----
+ _ASSERTE(!IsIntrospectionOnly());
+
+#ifdef FEATURE_PREJIT
+ // For on-disk Dlls, we can call LoadLibrary
+ if (IsDll() && !((HasNativeImage()?m_nativeImage:GetILimage())->GetPath().IsEmpty()))
+ {
+ // Note that we may get a DllMain notification inside here.
+ if (allowNativeSkip && HasNativeImage())
+ {
+ m_nativeImage->Load();
+ if(!m_nativeImage->IsNativeILILOnly())
+ GetILimage()->Load(); // For IJW we have to load IL also...
+ }
+ else
+ GetILimage()->Load();
+ }
+ else
+#endif // FEATURE_PREJIT
+ {
+
+ // Since we couldn't call LoadLibrary, we must be an IL only image
+ // or the image may still contain unfixed up stuff
+ // Note that we make an exception for CompilationDomains, since PEImage
+ // will map non-ILOnly images in a compilation domain.
+ if (!GetILimage()->IsILOnly() && !GetAppDomain()->IsCompilationDomain())
+ {
+ if (!GetILimage()->HasV1Metadata())
+ ThrowHR(COR_E_FIXUPSINEXE); // <TODO>@todo: better error</TODO>
+ }
+
+
+
+ // If we are already mapped, we can just use the current image.
+#ifdef FEATURE_PREJIT
+ if (allowNativeSkip && HasNativeImage())
+ {
+ m_nativeImage->LoadFromMapped();
+
+ if( !m_nativeImage->IsNativeILILOnly())
+ GetILimage()->LoadFromMapped(); // For IJW we have to load IL also...
+ }
+ else
+#endif
+ {
+ if (GetILimage()->IsFile())
+ GetILimage()->LoadFromMapped();
+ else
+ GetILimage()->LoadNoFile();
+ }
+ }
+
+#ifdef FEATURE_MIXEDMODE
+ // Prevent loading C++/CLI images into multiple runtimes in the same process. Note that if ILOnly images
+ // stop being LoadLibrary'ed, the check for pure 2.0 C++/CLI images will need to be done somewhere else.
+ CheckForDisallowedInProcSxSLoad();
+#endif // FEATURE_MIXEDMODE
+
+ RETURN;
+}
+
+void PEFile::SetLoadedHMODULE(HMODULE hMod)
+{
+ CONTRACT_VOID
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(hMod));
+ PRECONDITION(CanLoadLibrary());
+ POSTCONDITION(CheckLoaded());
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ // See if the image is an internal PEImage.
+ GetILimage()->SetLoadedHMODULE(hMod);
+
+ RETURN;
+}
+
+/* static */
+void PEFile::DefineEmitScope(
+ GUID iid,
+ void **ppEmit)
+{
+ CONTRACT_VOID
+ {
+ PRECONDITION(CheckPointer(ppEmit));
+ POSTCONDITION(CheckPointer(*ppEmit));
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ SafeComHolder<IMetaDataDispenserEx> pDispenser;
+
+ // Get the Dispenser interface.
+ MetaDataGetDispenser(
+ CLSID_CorMetaDataDispenser,
+ IID_IMetaDataDispenserEx,
+ (void **)&pDispenser);
+ if (pDispenser == NULL)
+ {
+ ThrowOutOfMemory();
+ }
+
+ // Set the option on the dispenser turn on duplicate check for TypeDef and moduleRef
+ VARIANT varOption;
+ V_VT(&varOption) = VT_UI4;
+ V_I4(&varOption) = MDDupDefault | MDDupTypeDef | MDDupModuleRef | MDDupExportedType | MDDupAssemblyRef | MDDupPermission | MDDupFile;
+ IfFailThrow(pDispenser->SetOption(MetaDataCheckDuplicatesFor, &varOption));
+
+ // Set minimal MetaData size
+ V_VT(&varOption) = VT_UI4;
+ V_I4(&varOption) = MDInitialSizeMinimal;
+ IfFailThrow(pDispenser->SetOption(MetaDataInitialSize, &varOption));
+
+ // turn on the thread safety!
+ V_I4(&varOption) = MDThreadSafetyOn;
+ IfFailThrow(pDispenser->SetOption(MetaDataThreadSafetyOptions, &varOption));
+
+ IfFailThrow(pDispenser->DefineScope(CLSID_CorMetaDataRuntime, 0, iid, (IUnknown **)ppEmit));
+
+ RETURN;
+} // PEFile::DefineEmitScope
+
+// ------------------------------------------------------------
+// Identity
+// ------------------------------------------------------------
+
+BOOL PEFile::Equals(PEFile *pFile)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pFile));
+ GC_NOTRIGGER;
+ NOTHROW;
+ CANNOT_TAKE_LOCK;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Same object is equal
+ if (pFile == this)
+ return TRUE;
+
+
+ // Execution and introspection files are NOT equal
+ if ( (!IsIntrospectionOnly()) != !(pFile->IsIntrospectionOnly()) )
+ {
+ return FALSE;
+ }
+
+#ifdef FEATURE_HOSTED_BINDER
+ // Different host assemblies cannot be equal unless they are associated with the same host binder
+ // It's ok if only one has a host binder because multiple threads can race to load the same assembly
+ // and that may cause temporary candidate PEAssembly objects that never get bound to a host assembly
+ // because another thread beats it; the losing thread will pick up the PEAssembly in the cache.
+ if (pFile->HasHostAssembly() && this->HasHostAssembly())
+ {
+ UINT_PTR fileBinderId = 0;
+ if (FAILED(pFile->GetHostAssembly()->GetBinderID(&fileBinderId)))
+ return FALSE;
+
+ UINT_PTR thisBinderId = 0;
+ if (FAILED(this->GetHostAssembly()->GetBinderID(&thisBinderId)))
+ return FALSE;
+
+ if (fileBinderId != thisBinderId)
+ return FALSE;
+
+ }
+#endif // FEATURE_HOSTED_BINDER
+
+
+ // Same identity is equal
+ if (m_identity != NULL && pFile->m_identity != NULL
+ && m_identity->Equals(pFile->m_identity))
+ return TRUE;
+
+ // Same image is equal
+ if (m_openedILimage != NULL && pFile->m_openedILimage != NULL
+ && m_openedILimage->Equals(pFile->m_openedILimage))
+ return TRUE;
+
+ return FALSE;
+}
+
+BOOL PEFile::Equals(PEImage *pImage)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pImage));
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Same object is equal
+ if (pImage == m_identity || pImage == m_openedILimage)
+ return TRUE;
+
+#ifdef FEATURE_PREJIT
+ if(pImage == m_nativeImage)
+ return TRUE;
+#endif
+ // Same identity is equal
+ if (m_identity != NULL
+ && m_identity->Equals(pImage))
+ return TRUE;
+
+ // Same image is equal
+ if (m_openedILimage != NULL
+ && m_openedILimage->Equals(pImage))
+ return TRUE;
+
+
+ return FALSE;
+}
+
+// ------------------------------------------------------------
+// Descriptive strings
+// ------------------------------------------------------------
+
+void PEFile::GetCodeBaseOrName(SString &result)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (m_identity != NULL && !m_identity->GetPath().IsEmpty())
+ {
+ result.Set(m_identity->GetPath());
+ }
+ else if (IsAssembly())
+ {
+ ((PEAssembly*)this)->GetCodeBase(result);
+ }
+ else
+ result.SetUTF8(GetSimpleName());
+}
+
+#ifdef FEATURE_CAS_POLICY
+
+// Returns security information for the assembly based on the codebase
+void PEFile::GetSecurityIdentity(SString &codebase, SecZone *pdwZone, DWORD dwFlags, BYTE *pbUniqueID, DWORD *pcbUniqueID)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pdwZone));
+ PRECONDITION(CheckPointer(pbUniqueID));
+ PRECONDITION(CheckPointer(pcbUniqueID));
+ }
+ CONTRACTL_END;
+
+ if (IsAssembly())
+ {
+ ((PEAssembly*)this)->GetCodeBase(codebase);
+ }
+ else if (m_identity != NULL && !m_identity->GetPath().IsEmpty())
+ {
+ codebase.Set(W("file:///"));
+ codebase.Append(m_identity->GetPath());
+ }
+ else
+ {
+ _ASSERTE( !"Unable to determine security identity" );
+ }
+
+ GCX_PREEMP();
+
+ if(!codebase.IsEmpty())
+ {
+ *pdwZone = NoZone;
+
+ InitializeSecurityManager();
+
+ // We have a class name, return a class factory for it
+ _ASSERTE(sizeof(SecZone) == sizeof(DWORD));
+ IfFailThrow(m_pSecurityManager->MapUrlToZone(codebase,
+ reinterpret_cast<DWORD *>(pdwZone),
+ dwFlags));
+
+ if (*pdwZone>=NumZones)
+ IfFailThrow(SecurityPolicy::ApplyCustomZoneOverride(pdwZone));
+
+ IfFailThrow(m_pSecurityManager->GetSecurityId(codebase,
+ pbUniqueID,
+ pcbUniqueID,
+ 0));
+ }
+}
+
+void PEFile::InitializeSecurityManager()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ CAN_TAKE_LOCK;
+ MODE_PREEMPTIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ if(m_pSecurityManager == NULL)
+ {
+ CrstHolder holder(&m_securityManagerLock);
+ if (m_pSecurityManager == NULL)
+ {
+ IfFailThrow(CoInternetCreateSecurityManager(NULL,
+ &m_pSecurityManager,
+ 0));
+ }
+ }
+}
+
+#endif // FEATURE_CAS_POLICY
+
+// ------------------------------------------------------------
+// Checks
+// ------------------------------------------------------------
+
+
+
+CHECK PEFile::CheckLoaded(BOOL bAllowNativeSkip/*=TRUE*/)
+{
+ CONTRACT_CHECK
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACT_CHECK_END;
+
+ CHECK(IsLoaded(bAllowNativeSkip)
+ // We are allowed to skip LoadLibrary in most cases for ngen'ed IL only images
+ || (bAllowNativeSkip && HasNativeImage() && IsILOnly()));
+
+ CHECK_OK;
+}
+
+#ifndef FEATURE_CORECLR
+// ------------------------------------------------------------
+// Hash support
+// ------------------------------------------------------------
+
+#ifndef SHA1_HASH_SIZE
+#define SHA1_HASH_SIZE 20
+#endif
+
+void PEFile::GetSHA1Hash(SBuffer &result)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckValue(result));
+ THROWS;
+ MODE_ANY;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // Cache the SHA1 hash in a buffer
+ if (m_hash == NULL)
+ {
+ // We shouldn't have to compute a SHA1 hash in any scenarios
+ // where the image opening should be suppressed.
+ EnsureImageOpened();
+
+ m_hash = new InlineSBuffer<SHA1_HASH_SIZE>();
+ GetILimage()->ComputeHash(CALG_SHA1, *m_hash);
+ }
+
+ result.Set(*m_hash);
+}
+#endif // FEATURE_CORECLR
+
+// ------------------------------------------------------------
+// Metadata access
+// ------------------------------------------------------------
+
+PTR_CVOID PEFile::GetMetadata(COUNT_T *pSize)
+{
+ CONTRACT(PTR_CVOID)
+ {
+ INSTANCE_CHECK;
+ POSTCONDITION(CheckPointer(pSize, NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+#ifdef FEATURE_PREJIT
+ if (HasNativeImageMetadata())
+ {
+ RETURN m_nativeImage->GetMetadata(pSize);
+ }
+#endif
+
+ if (IsDynamic()
+ || !GetILimage()->HasNTHeaders()
+ || !GetILimage()->HasCorHeader())
+ {
+ if (pSize != NULL)
+ *pSize = 0;
+ RETURN NULL;
+ }
+ else
+ {
+ RETURN GetILimage()->GetMetadata(pSize);
+ }
+}
+#endif // #ifndef DACCESS_COMPILE
+
+PTR_CVOID PEFile::GetLoadedMetadata(COUNT_T *pSize)
+{
+ CONTRACT(PTR_CVOID)
+ {
+ INSTANCE_CHECK;
+ POSTCONDITION(CheckPointer(pSize, NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+#ifdef FEATURE_PREJIT
+ if (HasNativeImageMetadata())
+ {
+ RETURN GetLoadedNative()->GetMetadata(pSize);
+ }
+#endif
+
+ if (!HasLoadedIL()
+ || !GetLoadedIL()->HasNTHeaders()
+ || !GetLoadedIL()->HasCorHeader())
+ {
+ if (pSize != NULL)
+ *pSize = 0;
+ RETURN NULL;
+ }
+ else
+ {
+ RETURN GetLoadedIL()->GetMetadata(pSize);
+ }
+}
+
+TADDR PEFile::GetIL(RVA il)
+{
+ CONTRACT(TADDR)
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(il != 0);
+ PRECONDITION(!IsDynamic());
+ PRECONDITION(!IsResource());
+#ifndef DACCESS_COMPILE
+ PRECONDITION(CheckLoaded());
+#endif
+ POSTCONDITION(RETVAL != NULL);
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ PEImageLayout *image = NULL;
+
+#ifdef FEATURE_PREJIT
+ // Note it is important to get the IL from the native image if
+ // available, since we are using the metadata from the native image
+ // which has different IL rva's.
+ if (HasNativeImageMetadata())
+ {
+ image = GetLoadedNative();
+
+#ifndef DACCESS_COMPILE
+ // NGen images are trusted to be well-formed.
+ _ASSERTE(image->CheckILMethod(il));
+#endif
+ }
+ else
+#endif // FEATURE_PREJIT
+ {
+ image = GetLoadedIL();
+
+#ifndef DACCESS_COMPILE
+ // Verify that the IL blob is valid before giving it out
+ if (!image->CheckILMethod(il))
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_IL_RANGE);
+#endif
+ }
+
+ RETURN image->GetRvaData(il);
+}
+
+#ifndef DACCESS_COMPILE
+
+void PEFile::OpenImporter()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // Make sure internal MD is in RW format.
+ ConvertMDInternalToReadWrite();
+
+ IMetaDataImport2 *pIMDImport = NULL;
+ IfFailThrow(GetMetaDataPublicInterfaceFromInternal((void*)GetPersistentMDImport(),
+ IID_IMetaDataImport2,
+ (void **)&pIMDImport));
+
+ // Atomically swap it into the field (release it if we lose the race)
+ if (FastInterlockCompareExchangePointer(&m_pImporter, pIMDImport, NULL) != NULL)
+ pIMDImport->Release();
+}
+
+void PEFile::ConvertMDInternalToReadWrite()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(EX_THROW(EEMessageException, (E_OUTOFMEMORY)););
+ }
+ CONTRACTL_END;
+
+ IMDInternalImport *pOld; // Old (current RO) value of internal import.
+ IMDInternalImport *pNew = NULL; // New (RW) value of internal import.
+
+ // Take a local copy of *ppImport. This may be a pointer to an RO
+ // or to an RW MDInternalXX.
+ pOld = m_pMDImport;
+ IMetaDataImport *pIMDImport = m_pImporter;
+ if (pIMDImport != NULL)
+ {
+ HRESULT hr = GetMetaDataInternalInterfaceFromPublic(pIMDImport, IID_IMDInternalImport, (void **)&pNew);
+ if (FAILED(hr))
+ {
+ EX_THROW(EEMessageException, (hr));
+ }
+ if (pNew == pOld)
+ {
+ pNew->Release();
+ return;
+ }
+ }
+ else
+ {
+ // If an RO, convert to an RW, return S_OK. If already RW, no conversion
+ // needed, return S_FALSE.
+ HRESULT hr = ConvertMDInternalImport(pOld, &pNew);
+
+ if (FAILED(hr))
+ {
+ EX_THROW(EEMessageException, (hr));
+ }
+
+ // If no conversion took place, don't change pointers.
+ if (hr == S_FALSE)
+ return;
+ }
+
+ // Swap the pointers in a thread safe manner. If the contents of *ppImport
+ // equals pOld then no other thread got here first, and the old contents are
+ // replaced with pNew. The old contents are returned.
+ _ASSERTE(m_bHasPersistentMDImport);
+ if (FastInterlockCompareExchangePointer(&m_pMDImport, pNew, pOld) == pOld)
+ {
+ //if the debugger queries, it will now see that we have RW metadata
+ m_MDImportIsRW_Debugger_Use_Only = TRUE;
+
+ // Swapped -- get the metadata to hang onto the old Internal import.
+ HRESULT hr=m_pMDImport->SetUserContextData(pOld);
+ _ASSERTE(SUCCEEDED(hr)||!"Leaking old MDImport");
+ IfFailThrow(hr);
+ }
+ else
+ { // Some other thread finished first. Just free the results of this conversion.
+ pNew->Release();
+ }
+}
+
+void PEFile::ConvertMetadataToRWForEnC()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_INTOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // This should only ever be called on EnC capable files.
+ _ASSERTE(Module::IsEditAndContinueCapable(this));
+
+ // This should only be called if we're debugging, stopped, and on the helper thread.
+ _ASSERTE(CORDebuggerAttached());
+ _ASSERTE((g_pDebugInterface != NULL) && g_pDebugInterface->ThisIsHelperThread());
+ _ASSERTE((g_pDebugInterface != NULL) && g_pDebugInterface->IsStopped());
+
+ // Convert the metadata to RW for Edit and Continue, properly replacing the metadata import interface pointer and
+ // properly preserving the old importer. This will be called before the EnC system tries to apply a delta to the module's
+ // metadata. ConvertMDInternalToReadWrite() does that quite nicely for us.
+ ConvertMDInternalToReadWrite();
+}
+
+void PEFile::OpenMDImport_Unsafe()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (m_pMDImport != NULL)
+ return;
+#ifdef FEATURE_PREJIT
+ if (m_nativeImage != NULL
+#ifdef FEATURE_CORECLR
+ && m_nativeImage->GetMDImport() != NULL
+#endif
+ )
+ {
+ // Use native image for metadata
+ m_flags |= PEFILE_HAS_NATIVE_IMAGE_METADATA;
+ m_pMDImport=m_nativeImage->GetMDImport();
+ }
+ else
+#endif
+ {
+#ifdef FEATURE_PREJIT
+ m_flags &= ~PEFILE_HAS_NATIVE_IMAGE_METADATA;
+#endif
+ if (!IsDynamic()
+ && GetILimage()->HasNTHeaders()
+ && GetILimage()->HasCorHeader())
+ {
+ m_pMDImport=GetILimage()->GetMDImport();
+ }
+ else
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+
+ m_bHasPersistentMDImport=TRUE;
+ }
+ _ASSERTE(m_pMDImport);
+ m_pMDImport->AddRef();
+}
+
+void PEFile::OpenEmitter()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // Make sure internal MD is in RW format.
+ ConvertMDInternalToReadWrite();
+
+ IMetaDataEmit *pIMDEmit = NULL;
+ IfFailThrow(GetMetaDataPublicInterfaceFromInternal((void*)GetPersistentMDImport(),
+ IID_IMetaDataEmit,
+ (void **)&pIMDEmit));
+
+ // Atomically swap it into the field (release it if we lose the race)
+ if (FastInterlockCompareExchangePointer(&m_pEmitter, pIMDEmit, NULL) != NULL)
+ pIMDEmit->Release();
+}
+
+#ifndef FEATURE_CORECLR
+void PEFile::OpenAssemblyImporter()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // Make sure internal MD is in RW format.
+ ConvertMDInternalToReadWrite();
+
+ // Get the interface
+ IMetaDataAssemblyImport *pIMDAImport = NULL;
+ IfFailThrow(GetMetaDataPublicInterfaceFromInternal((void*)GetPersistentMDImport(),
+ IID_IMetaDataAssemblyImport,
+ (void **)&pIMDAImport));
+
+ // Atomically swap it into the field (release it if we lose the race)
+ if (FastInterlockCompareExchangePointer(&m_pAssemblyImporter, pIMDAImport, NULL) != NULL)
+ pIMDAImport->Release();
+}
+
+void PEFile::OpenAssemblyEmitter()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // Make sure internal MD is in RW format.
+ ConvertMDInternalToReadWrite();
+
+ IMetaDataAssemblyEmit *pIMDAssemblyEmit = NULL;
+ IfFailThrow(GetMetaDataPublicInterfaceFromInternal((void*)GetPersistentMDImport(),
+ IID_IMetaDataAssemblyEmit,
+ (void **)&pIMDAssemblyEmit));
+
+ // Atomically swap it into the field (release it if we lose the race)
+ if (FastInterlockCompareExchangePointer(&m_pAssemblyEmitter, pIMDAssemblyEmit, NULL) != NULL)
+ pIMDAssemblyEmit->Release();
+}
+#endif // FEATURE_CORECLR
+
+void PEFile::ReleaseMetadataInterfaces(BOOL bDestructor, BOOL bKeepNativeData/*=FALSE*/)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(bDestructor||m_pMetadataLock->IsWriterLock());
+ }
+ CONTRACTL_END;
+ _ASSERTE(bDestructor || !m_bHasPersistentMDImport);
+#ifndef FEATURE_CORECLR
+ if (m_pAssemblyImporter != NULL)
+ {
+ m_pAssemblyImporter->Release();
+ m_pAssemblyImporter = NULL;
+ }
+ if(m_pAssemblyEmitter)
+ {
+ m_pAssemblyEmitter->Release();
+ m_pAssemblyEmitter=NULL;
+ }
+#endif
+
+ if (m_pImporter != NULL)
+ {
+ m_pImporter->Release();
+ m_pImporter = NULL;
+ }
+ if (m_pEmitter != NULL)
+ {
+ m_pEmitter->Release();
+ m_pEmitter = NULL;
+ }
+
+ if (m_pMDImport != NULL && (!bKeepNativeData || !HasNativeImage()))
+ {
+ m_pMDImport->Release();
+ m_pMDImport=NULL;
+ }
+}
+
+#ifdef FEATURE_CAS_POLICY
+
+void PEFile::CheckAuthenticodeSignature()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // Check any security signature in the header.
+
+ // This publisher data can potentially be cached and passed back in via
+ // PEAssembly::CreateDelayed.
+ //
+ // HOWEVER - even if we cache it, the certificate still may need to be verified at
+ // load time. The only real caching can be done when the COR_TRUST certificate is
+ // ABSENT.
+ //
+ // (In the case where it is present, we could still theoretically
+ // cache the certificate and re-verify it and at least avoid touching the image
+ // again, however this path is not implemented yet, so this is TBD if we decide
+ // it is an important case to optimize for.)
+
+ if (!HasSecurityDirectory())
+ {
+ LOG((LF_SECURITY, LL_INFO1000, "No certificates found in module\n"));
+ }
+ else if(g_pConfig->GeneratePublisherEvidence())
+ {
+ // <TODO>@todo: Just because we don't have a file path, doesn't mean we can't have a certicate (does it?)</TODO>
+ if (!GetPath().IsEmpty())
+ {
+ GCX_PREEMP();
+
+ // Ignore any errors here - if we fail to validate a certificate, we just don't
+ // include it as evidence.
+
+ DWORD size;
+ CoTaskNewHolder<COR_TRUST> pCor = NULL;
+ // Failing to find a signature is OK.
+ LPWSTR pFileName = (LPWSTR) GetPath().GetUnicode();
+ DWORD dwAuthFlags = COR_NOUI|COR_NOPOLICY;
+#ifndef FEATURE_CORECLR
+ // Authenticode Verification Start
+ FireEtwAuthenticodeVerificationStart_V1(dwAuthFlags, 0, pFileName, GetClrInstanceId());
+#endif // !FEATURE_CORECLR
+
+ HRESULT hr = ::GetPublisher(pFileName,
+ NULL,
+ dwAuthFlags,
+ &pCor,
+ &size);
+
+#ifndef FEATURE_CORECLR
+ // Authenticode Verification End
+ FireEtwAuthenticodeVerificationStop_V1(dwAuthFlags, (ULONG)hr, pFileName, GetClrInstanceId());
+#endif // !FEATURE_CORECLR
+
+ if( SUCCEEDED(hr) ) {
+ DWORD index = 0;
+ EnumCertificateAdditionFlags dwFlags = g_pCertificateCache->AddEntry(pCor, &index);
+ switch (dwFlags) {
+ case CacheSaturated:
+ pCor.SuppressRelease();
+ m_certificate = pCor.GetValue();
+ break;
+
+ case Success:
+ pCor.SuppressRelease();
+ // falling through
+ case AlreadyExists:
+ m_certificate = g_pCertificateCache->GetEntry(index);
+ _ASSERTE(m_certificate);
+ break;
+ }
+ }
+ }
+ }
+ else
+ {
+ LOG((LF_SECURITY, LL_INFO1000, "Assembly has an Authenticode signature, but Publisher evidence has been disabled.\n"));
+ }
+
+ m_fCheckedCertificate = TRUE;
+}
+
+HRESULT STDMETHODCALLTYPE
+GetPublisher(__in __in_z IN LPWSTR pwsFileName, // File name, this is required even with the handle
+ IN HANDLE hFile, // Optional file name
+ IN DWORD dwFlags, // COR_NOUI or COR_NOPOLICY
+ OUT PCOR_TRUST *pInfo, // Returns a PCOR_TRUST (Use FreeM)
+ OUT DWORD *dwInfo) // Size of pInfo.
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ GUID gV2 = COREE_POLICY_PROVIDER;
+ COR_POLICY_PROVIDER sCorPolicy;
+
+ WINTRUST_DATA sWTD;
+ WINTRUST_FILE_INFO sWTFI;
+
+ // Set up the COR trust provider
+ memset(&sCorPolicy, 0, sizeof(COR_POLICY_PROVIDER));
+ sCorPolicy.cbSize = sizeof(COR_POLICY_PROVIDER);
+
+ // Set up the winverify provider structures
+ memset(&sWTD, 0x00, sizeof(WINTRUST_DATA));
+ memset(&sWTFI, 0x00, sizeof(WINTRUST_FILE_INFO));
+
+ sWTFI.cbStruct = sizeof(WINTRUST_FILE_INFO);
+ sWTFI.hFile = hFile;
+ sWTFI.pcwszFilePath = pwsFileName;
+
+ sWTD.cbStruct = sizeof(WINTRUST_DATA);
+ sWTD.pPolicyCallbackData = &sCorPolicy; // Add in the cor trust information!!
+ if (dwFlags & COR_NOUI)
+ {
+ sWTD.dwUIChoice = WTD_UI_NONE; // No bad UI is overridden in COR TRUST provider
+ }
+ else
+ {
+ sWTD.dwUIChoice = WTD_UI_ALL; // No bad UI is overridden in COR TRUST provider
+ }
+ sWTD.dwUnionChoice = WTD_CHOICE_FILE;
+ sWTD.pFile = &sWTFI;
+
+ // Set the policies for the VM (we have stolen VMBased and use it like a flag)
+ if (dwFlags != 0)
+ sCorPolicy.VMBased = dwFlags;
+
+ LeaveRuntimeHolder holder((size_t)WinVerifyTrust);
+
+ // WinVerifyTrust calls mscorsecimpl.dll to do the policy check
+ hr = WinVerifyTrust(GetFocus(), &gV2, &sWTD);
+
+ *pInfo = sCorPolicy.pbCorTrust;
+ *dwInfo = sCorPolicy.cbCorTrust;
+
+ return hr;
+} // GetPublisher
+
+#endif // FEATURE_CAS_POLICY
+
+// ------------------------------------------------------------
+// PE file access
+// ------------------------------------------------------------
+
+// Note that most of these APIs are currently passed through
+// to the main image. However, in the near future they will
+// be rerouted to the native image in the prejitted case so
+// we can avoid using the original IL image.
+
+#endif //!DACCESS_COMPILE
+
+#ifdef FEATURE_PREJIT
+#ifndef DACCESS_COMPILE
+// ------------------------------------------------------------
+// Native image access
+// ------------------------------------------------------------
+
+void PEFile::SetNativeImage(PEImage *image)
+{
+ CONTRACT_VOID
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(!HasNativeImage());
+ STANDARD_VM_CHECK;
+ }
+ CONTRACT_END;
+
+ _ASSERTE(image != NULL);
+ PREFIX_ASSUME(image != NULL);
+
+ if (image->GetLoadedLayout()->GetBase() != image->GetLoadedLayout()->GetPreferredBase())
+ {
+ ExternalLog(LL_WARNING,
+ W("Native image loaded at base address") LFMT_ADDR
+ W("rather than preferred address:") LFMT_ADDR ,
+ DBG_ADDR(image->GetLoadedLayout()->GetBase()),
+ DBG_ADDR(image->GetLoadedLayout()->GetPreferredBase()));
+ }
+
+#ifdef FEATURE_TREAT_NI_AS_MSIL_DURING_DIAGNOSTICS
+ // In Apollo, first ask if we're supposed to be ignoring the prejitted code &
+ // structures in NGENd images. If so, bail now and do not set m_nativeImage. We've
+ // already set m_identity & m_openedILimage (possibly even pointing to the
+ // NGEN/Triton image), and will use those PEImages to find and JIT IL (even if they
+ // point to an NGENd/Tritonized image).
+ if (ShouldTreatNIAsMSIL())
+ RETURN;
+#endif
+
+ m_nativeImage = image;
+ m_nativeImage->AddRef();
+ m_nativeImage->Load();
+ m_nativeImage->AllocateLazyCOWPages();
+
+ ExternalLog(LL_INFO100, W("Attempting to use native image %s."), image->GetPath().GetUnicode());
+ RETURN;
+}
+
+void PEFile::ClearNativeImage()
+{
+ CONTRACT_VOID
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(HasNativeImage());
+ POSTCONDITION(!HasNativeImage());
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ ExternalLog(LL_WARNING, "Discarding native image.");
+
+
+ MarkNativeImageInvalidIfOwned();
+
+ {
+ GCX_PREEMP();
+ SafeComHolderPreemp<IMDInternalImport> pOldImport=GetMDImportWithRef();
+ SimpleWriteLockHolder lock(m_pMetadataLock);
+
+ EX_TRY
+ {
+ ReleaseMetadataInterfaces(FALSE);
+ m_flags &= ~PEFILE_HAS_NATIVE_IMAGE_METADATA;
+ if (m_nativeImage)
+ m_nativeImage->Release();
+ m_nativeImage = NULL;
+ // Make sure our normal image is open
+ EnsureImageOpened();
+
+ // Reopen metadata from normal image
+ OpenMDImport();
+ }
+ EX_HOOK
+ {
+ RestoreMDImport(pOldImport);
+ }
+ EX_END_HOOK;
+ }
+
+ RETURN;
+}
+
+
+extern DWORD g_dwLogLevel;
+
+//===========================================================================================================
+// Encapsulates CLR and Fusion logging for runtime verification of native images.
+//===========================================================================================================
+static void RuntimeVerifyVLog(DWORD level, LoggableAssembly *pLogAsm, const WCHAR *fmt, va_list args)
+{
+ STANDARD_VM_CONTRACT;
+
+ BOOL fOutputToDebugger = (level == LL_ERROR && IsDebuggerPresent());
+ BOOL fOutputToLogging = LoggingOn(LF_ZAP, level);
+
+ StackSString message;
+ message.VPrintf(fmt, args);
+
+ if (fOutputToLogging)
+ {
+ SString displayString = pLogAsm->DisplayString();
+ LOG((LF_ZAP, level, "%s: \"%S\"\n", "ZAP", displayString.GetUnicode()));
+ LOG((LF_ZAP, level, "%S", message.GetUnicode()));
+ LOG((LF_ZAP, level, "\n"));
+ }
+
+ if (fOutputToDebugger)
+ {
+ SString displayString = pLogAsm->DisplayString();
+ WszOutputDebugString(W("CLR:("));
+ WszOutputDebugString(displayString.GetUnicode());
+ WszOutputDebugString(W(") "));
+ WszOutputDebugString(message);
+ WszOutputDebugString(W("\n"));
+ }
+
+#ifdef FEATURE_FUSION
+ IFusionBindLog *pFusionBindLog = pLogAsm->FusionBindLog();
+ if (pFusionBindLog)
+ {
+ pFusionBindLog->LogMessage(0, FUSION_BIND_LOG_CATEGORY_NGEN, message);
+
+ if (level == LL_ERROR) {
+ pFusionBindLog->SetResultCode(FUSION_BIND_LOG_CATEGORY_NGEN, E_FAIL);
+ pFusionBindLog->Flush(g_dwLogLevel, FUSION_BIND_LOG_CATEGORY_NGEN);
+ pFusionBindLog->Flush(g_dwLogLevel, FUSION_BIND_LOG_CATEGORY_DEFAULT);
+ }
+ }
+#endif //FEATURE_FUSION
+}
+
+
+//===========================================================================================================
+// Encapsulates CLR and Fusion logging for runtime verification of native images.
+//===========================================================================================================
+static void RuntimeVerifyLog(DWORD level, LoggableAssembly *pLogAsm, const WCHAR *fmt, ...)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Avoid calling RuntimeVerifyVLog unless logging is on
+ if ( ((level == LL_ERROR) && IsDebuggerPresent())
+ || LoggingOn(LF_ZAP, level)
+#ifdef FEATURE_FUSION
+ || (pLogAsm->FusionBindLog() != NULL)
+#endif
+ )
+ {
+ va_list args;
+ va_start(args, fmt);
+
+ RuntimeVerifyVLog(level, pLogAsm, fmt, args);
+
+ va_end(args);
+ }
+}
+
+//==============================================================================
+
+static const LPCWSTR CorCompileRuntimeDllNames[NUM_RUNTIME_DLLS] =
+{
+#ifdef FEATURE_CORECLR
+ MAKEDLLNAME_W(W("CORECLR"))
+#else
+ MAKEDLLNAME_W(W("CLR")),
+ MAKEDLLNAME_W(W("CLRJIT"))
+#endif
+};
+
+#if !defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE)
+static LPCWSTR s_ngenCompilerDllName = NULL;
+#endif //!FEATURE_CORECLR && !CROSSGEN_COMPILE
+
+LPCWSTR CorCompileGetRuntimeDllName(CorCompileRuntimeDlls id)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+#if !defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE)
+ if (id == NGEN_COMPILER_INFO)
+ {
+ // The NGen compiler needs to be handled differently as it can be customized,
+ // unlike the other runtime DLLs.
+
+ if (s_ngenCompilerDllName == NULL)
+ {
+ // Check if there is an override for the compiler DLL
+ LPCWSTR ngenCompilerOverride = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_NGen_JitName);
+
+ if (ngenCompilerOverride == NULL)
+ {
+ s_ngenCompilerDllName = DEFAULT_NGEN_COMPILER_DLL_NAME;
+ }
+ else
+ {
+ if (wcsstr(ngenCompilerOverride, W(".dll")) == NULL)
+ {
+ EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE,
+ NGEN_COMPILER_OVERRIDE_KEY W(" should have a .DLL suffix"));
+ }
+
+ s_ngenCompilerDllName = ngenCompilerOverride;
+ }
+ }
+
+ return s_ngenCompilerDllName;
+ }
+#endif //!FEATURE_CORECLR && !CROSSGEN_COMPILE
+
+ return CorCompileRuntimeDllNames[id];
+}
+
+#ifndef CROSSGEN_COMPILE
+
+//==============================================================================
+// Will always return a valid HMODULE for CLR_INFO, but will return NULL for NGEN_COMPILER_INFO
+// if the DLL has not yet been loaded (it does not try to cause a load).
+
+// Gets set by IJitManager::LoadJit (yes, this breaks the abstraction boundary).
+HMODULE s_ngenCompilerDll = NULL;
+
+extern HMODULE CorCompileGetRuntimeDll(CorCompileRuntimeDlls id)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_INTOLERANT;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // Currently special cased for every entry.
+#ifdef FEATURE_CORECLR
+ static_assert_no_msg(NUM_RUNTIME_DLLS == 1);
+ static_assert_no_msg(CORECLR_INFO == 0);
+#else // !FEATURE_CORECLR
+ static_assert_no_msg(NUM_RUNTIME_DLLS == 2);
+ static_assert_no_msg(CLR_INFO == 0);
+ static_assert_no_msg(NGEN_COMPILER_INFO == 1);
+#endif // else FEATURE_CORECLR
+
+ HMODULE hMod = NULL;
+
+ // Try to load the correct DLL
+ switch (id)
+ {
+#ifdef FEATURE_CORECLR
+ case CORECLR_INFO:
+ hMod = GetCLRModule();
+ break;
+#else // !FEATURE_CORECLR
+ case CLR_INFO:
+ hMod = GetCLRModule();
+ break;
+
+ case NGEN_COMPILER_INFO:
+ hMod = s_ngenCompilerDll;
+ break;
+#endif // else FEATURE_CORECLR
+
+ default:
+ COMPlusThrowNonLocalized(kExecutionEngineException,
+ W("Invalid runtime DLL ID"));
+ break;
+ }
+
+ return hMod;
+}
+#endif // CROSSGEN_COMPILE
+
+//===========================================================================================================
+// Helper for RuntimeVerifyNativeImageVersion(). Compares the loaded clr.dll and clrjit.dll's against
+// the ones the native image was compiled against.
+//===========================================================================================================
+static BOOL RuntimeVerifyNativeImageTimestamps(const CORCOMPILE_VERSION_INFO *info, LoggableAssembly *pLogAsm)
+{
+ STANDARD_VM_CONTRACT;
+
+#if !defined(CROSSGEN_COMPILE) && !defined(FEATURE_CORECLR)
+ //
+ // We will automatically fail any zap files which were compiled with different runtime dlls.
+ // This is so that we don't load bad ngen images after recompiling or patching the runtime.
+ //
+
+ for (DWORD index = 0; index < NUM_RUNTIME_DLLS; index++)
+ {
+ HMODULE hMod = CorCompileGetRuntimeDll((CorCompileRuntimeDlls)index);
+
+ if (hMod == NULL)
+ {
+ if (!IsCompilationProcess())
+ continue;
+
+ // If we are doing ngen, then eagerly make sure all the system
+ // dependencies are loaded. Else ICorCompileInfo::CheckAssemblyZap()
+ // will not work correctly.
+
+ LPCWSTR wszDllName = CorCompileGetRuntimeDllName((CorCompileRuntimeDlls)index);
+ if (FAILED(g_pCLRRuntime->LoadLibrary(wszDllName, &hMod)))
+ {
+ EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, W("Unable to load CLR DLL during ngen"));
+ }
+ }
+
+ _ASSERTE(hMod != NULL);
+
+ PEDecoder pe(hMod);
+
+ // Match NT header timestamp and checksum to test DLL identity
+
+ if ((info->runtimeDllInfo[index].timeStamp == pe.GetTimeDateStamp()
+ || info->runtimeDllInfo[index].timeStamp == 0)
+ && (info->runtimeDllInfo[index].virtualSize == pe.GetVirtualSize()
+ || info->runtimeDllInfo[index].virtualSize == 0))
+ {
+ continue;
+ }
+
+ {
+ // set "ComPlus_CheckNGenImageTimeStamp" to 0 to ignore time-stamp-checking
+ static ConfigDWORD checkNGenImageTimeStamp;
+ BOOL enforceCheck = checkNGenImageTimeStamp.val(CLRConfig::EXTERNAL_CheckNGenImageTimeStamp);
+
+ RuntimeVerifyLog(enforceCheck ? LL_ERROR : LL_WARNING,
+ pLogAsm,
+ W("Compiled with different CLR DLL (%s). Exact match expected."),
+ CorCompileGetRuntimeDllName((CorCompileRuntimeDlls)index));
+
+ if (enforceCheck)
+ return FALSE;
+ }
+ }
+#endif // !CROSSGEN_COMPILE && !FEATURE_CORECLR
+
+ return TRUE;
+}
+
+//===========================================================================================================
+// Validates that an NI matches the running CLR, OS, CPU, etc. This is the entrypoint used by the CLR loader.
+//
+//===========================================================================================================
+BOOL PEAssembly::CheckNativeImageVersion(PEImage *peimage)
+{
+ STANDARD_VM_CONTRACT;
+
+ //
+ // Get the zap version header. Note that modules will not have version
+ // headers - they add no additional versioning constraints from their
+ // assemblies.
+ //
+ PEImageLayoutHolder image=peimage->GetLayout(PEImageLayout::LAYOUT_ANY,PEImage::LAYOUT_CREATEIFNEEDED);
+
+ if (!image->HasNativeHeader())
+ return FALSE;
+
+ if (!image->CheckNativeHeaderVersion())
+ {
+#ifdef FEATURE_CORECLR
+ // Wrong native image version is fatal error on CoreCLR
+ ThrowHR(COR_E_NI_AND_RUNTIME_VERSION_MISMATCH);
+#else
+ return FALSE;
+#endif
+ }
+
+ CORCOMPILE_VERSION_INFO *info = image->GetNativeVersionInfo();
+ if (info == NULL)
+ return FALSE;
+
+ LoggablePEAssembly logAsm(this);
+ if (!RuntimeVerifyNativeImageVersion(info, &logAsm))
+ {
+#ifdef FEATURE_CORECLR
+ // Wrong native image version is fatal error on CoreCLR
+ ThrowHR(COR_E_NI_AND_RUNTIME_VERSION_MISMATCH);
+#else
+ return FALSE;
+#endif
+ }
+
+#ifdef FEATURE_CORECLR
+ CorCompileConfigFlags configFlags = PEFile::GetNativeImageConfigFlagsWithOverrides();
+
+ if (IsSystem())
+ {
+ // Require instrumented flags for mscorlib when collecting IBC data
+ CorCompileConfigFlags instrumentationConfigFlags = (CorCompileConfigFlags) (configFlags & CORCOMPILE_CONFIG_INSTRUMENTATION);
+ if ((info->wConfigFlags & instrumentationConfigFlags) != instrumentationConfigFlags)
+ {
+ ExternalLog(LL_ERROR, "Instrumented native image for Mscorlib.dll expected.");
+ ThrowHR(COR_E_NI_AND_RUNTIME_VERSION_MISMATCH);
+ }
+ }
+
+ // Otherwise, match regardless of the instrumentation flags
+ configFlags = (CorCompileConfigFlags) (configFlags & ~(CORCOMPILE_CONFIG_INSTRUMENTATION_NONE | CORCOMPILE_CONFIG_INSTRUMENTATION));
+
+ if ((info->wConfigFlags & configFlags) != configFlags)
+ {
+ return FALSE;
+ }
+#else
+ //
+ // Check image flavor. Skip this check in RuntimeVerifyNativeImageVersion called from fusion - fusion is responsible for choosing the right flavor.
+ //
+ if (!RuntimeVerifyNativeImageFlavor(info, &logAsm))
+ {
+ return FALSE;
+ }
+#endif
+
+ return TRUE;
+}
+
+#ifndef FEATURE_CORECLR
+//===========================================================================================================
+// Validates that an NI matches the required flavor (debug, instrumented, etc.)
+//
+//===========================================================================================================
+BOOL RuntimeVerifyNativeImageFlavor(const CORCOMPILE_VERSION_INFO *info, LoggableAssembly *pLogAsm)
+{
+ STANDARD_VM_CONTRACT;
+
+ CorCompileConfigFlags configFlags = PEFile::GetNativeImageConfigFlagsWithOverrides();
+
+ if ((info->wConfigFlags & configFlags) != configFlags)
+ return FALSE;
+
+ return TRUE;
+}
+#endif
+
+//===========================================================================================================
+// Validates that an NI matches the running CLR, OS, CPU, etc.
+//
+// For historial reasons, some versions of the runtime perform this check at native bind time (preferrred),
+// while others check at CLR load time.
+//
+// This is the common funnel for both versions and is agnostic to whether the "assembly" is represented
+// by a CLR object or Fusion object.
+//===========================================================================================================
+BOOL RuntimeVerifyNativeImageVersion(const CORCOMPILE_VERSION_INFO *info, LoggableAssembly *pLogAsm)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (!RuntimeVerifyNativeImageTimestamps(info, pLogAsm))
+ return FALSE;
+
+ //
+ // Check that the EE version numbers are the same.
+ //
+
+ if (info->wVersionMajor != VER_MAJORVERSION
+ || info->wVersionMinor != VER_MINORVERSION
+ || info->wVersionBuildNumber != VER_PRODUCTBUILD
+ || info->wVersionPrivateBuildNumber != VER_PRODUCTBUILD_QFE)
+ {
+ RuntimeVerifyLog(LL_ERROR, pLogAsm, W("CLR version recorded in native image doesn't match the current CLR."));
+ return FALSE;
+ }
+
+ //
+ // Check checked/free status
+ //
+
+ if (info->wBuild !=
+#if _DEBUG
+ CORCOMPILE_BUILD_CHECKED
+#else
+ CORCOMPILE_BUILD_FREE
+#endif
+ )
+ {
+ RuntimeVerifyLog(LL_ERROR, pLogAsm, W("Checked/free mismatch with native image."));
+ return FALSE;
+ }
+
+ //
+ // Check processor
+ //
+
+ if (info->wMachine != IMAGE_FILE_MACHINE_NATIVE)
+ {
+ RuntimeVerifyLog(LL_ERROR, pLogAsm, W("Processor type recorded in native image doesn't match this machine's processor."));
+ return FALSE;
+ }
+
+#ifndef CROSSGEN_COMPILE
+ //
+ // Check the processor specific ID
+ //
+
+ CORINFO_CPU cpuInfo;
+ GetSpecificCpuInfo(&cpuInfo);
+
+ if (!IsCompatibleCpuInfo(&cpuInfo, &info->cpuInfo))
+ {
+ RuntimeVerifyLog(LL_ERROR, pLogAsm, W("Required CPU features recorded in native image don't match this machine's processor."));
+ return FALSE;
+ }
+#endif // CROSSGEN_COMPILE
+
+ //
+ // The zap is up to date.
+ //
+
+ RuntimeVerifyLog(LL_INFO100, pLogAsm, W("Native image has correct version information."));
+ return TRUE;
+}
+
+#endif // !DACCESS_COMPILE
+
+/* static */
+CorCompileConfigFlags PEFile::GetNativeImageConfigFlags(BOOL fForceDebug/*=FALSE*/,
+ BOOL fForceProfiling/*=FALSE*/,
+ BOOL fForceInstrument/*=FALSE*/)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ CorCompileConfigFlags result = (CorCompileConfigFlags)0;
+
+ // Debugging
+
+#ifdef DEBUGGING_SUPPORTED
+ // if these have been set, the take precedence over anything else
+ if (s_NGENDebugFlags)
+ {
+ if ((s_NGENDebugFlags & CORCOMPILE_CONFIG_DEBUG_NONE) != 0)
+ {
+ result = (CorCompileConfigFlags) (result|CORCOMPILE_CONFIG_DEBUG_NONE);
+ }
+ else
+ {
+ if ((s_NGENDebugFlags & CORCOMPILE_CONFIG_DEBUG) != 0)
+ {
+ result = (CorCompileConfigFlags) (result|CORCOMPILE_CONFIG_DEBUG);
+ }
+ }
+ }
+ else
+#endif // DEBUGGING_SUPPORTED
+ {
+ if (fForceDebug)
+ {
+ result = (CorCompileConfigFlags) (result|CORCOMPILE_CONFIG_DEBUG);
+ }
+ else
+ {
+ result = (CorCompileConfigFlags) (result|CORCOMPILE_CONFIG_DEBUG_DEFAULT);
+ }
+ }
+
+ // Profiling
+
+#ifdef PROFILING_SUPPORTED
+ if (fForceProfiling || CORProfilerUseProfileImages())
+ {
+ result = (CorCompileConfigFlags) (result|CORCOMPILE_CONFIG_PROFILING);
+
+ result = (CorCompileConfigFlags) (result & ~(CORCOMPILE_CONFIG_DEBUG_NONE|
+ CORCOMPILE_CONFIG_DEBUG|
+ CORCOMPILE_CONFIG_DEBUG_DEFAULT));
+ }
+ else
+#endif //PROFILING_SUPPORTED
+ result = (CorCompileConfigFlags) (result|CORCOMPILE_CONFIG_PROFILING_NONE);
+
+ // Instrumentation
+#ifndef DACCESS_COMPILE
+ BOOL instrumented = (!IsCompilationProcess() && g_pConfig->GetZapBBInstr());
+#else
+ BOOL instrumented = FALSE;
+#endif
+ if (instrumented || fForceInstrument)
+ {
+ result = (CorCompileConfigFlags) (result|CORCOMPILE_CONFIG_INSTRUMENTATION);
+ }
+ else
+ {
+ result = (CorCompileConfigFlags) (result|CORCOMPILE_CONFIG_INSTRUMENTATION_NONE);
+ }
+
+ // NOTE: Right now we are not taking instrumentation into account when binding.
+
+ return result;
+}
+
+CorCompileConfigFlags PEFile::GetNativeImageConfigFlagsWithOverrides()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ BOOL fForceDebug, fForceProfiling, fForceInstrument;
+ SystemDomain::GetCompilationOverrides(&fForceDebug,
+ &fForceProfiling,
+ &fForceInstrument);
+ return PEFile::GetNativeImageConfigFlags(fForceDebug,
+ fForceProfiling,
+ fForceInstrument);
+}
+
+#ifndef DACCESS_COMPILE
+
+
+
+//===========================================================================================================
+// Validates that a hard-dep matches the a parent NI's compile-time hard-dep.
+//
+// For historial reasons, some versions of the runtime perform this check at native bind time (preferrred),
+// while others check at CLR load time.
+//
+// This is the common funnel for both versions and is agnostic to whether the "assembly" is represented
+// by a CLR object or Fusion object.
+//
+//===========================================================================================================
+BOOL RuntimeVerifyNativeImageDependency(const CORCOMPILE_NGEN_SIGNATURE &ngenSigExpected,
+ const CORCOMPILE_VERSION_INFO *pActual,
+ LoggableAssembly *pLogAsm)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (ngenSigExpected != pActual->signature)
+ {
+ // Signature did not match
+ SString displayString = pLogAsm->DisplayString();
+ RuntimeVerifyLog(LL_ERROR,
+ pLogAsm,
+ W("Rejecting native image because native image dependency %s ")
+ W("had a different identity than expected"),
+ displayString.GetUnicode());
+#if (defined FEATURE_PREJIT) && (defined FEATURE_FUSION)
+ if (pLogAsm->FusionBindLog())
+ {
+ if (ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, TRACE_LEVEL_INFORMATION, CLR_PRIVATEFUSION_KEYWORD))
+ {
+ pLogAsm->FusionBindLog()->ETWTraceLogMessage(ETW::BinderLog::BinderStructs::NGEN_BIND_DEPENDENCY_HAS_DIFFERENT_IDENTITY, pLogAsm->FusionAssemblyName());
+ }
+ }
+#endif
+
+ return FALSE;
+ }
+ return TRUE;
+}
+// Wrapper function for use by parts of the runtime that actually have a CORCOMPILE_DEPENDENCY to work with.
+BOOL RuntimeVerifyNativeImageDependency(const CORCOMPILE_DEPENDENCY *pExpected,
+ const CORCOMPILE_VERSION_INFO *pActual,
+ LoggableAssembly *pLogAsm)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return RuntimeVerifyNativeImageDependency(pExpected->signNativeImage,
+ pActual,
+ pLogAsm);
+}
+
+#endif // !DACCESS_COMPILE
+
+#ifdef DEBUGGING_SUPPORTED
+//
+// Called through ICorDebugAppDomain2::SetDesiredNGENCompilerFlags to specify
+// which kinds of ngen'd images fusion should load wrt debugging support
+// Overrides any previous settings
+//
+void PEFile::SetNGENDebugFlags(BOOL fAllowOpt)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ if (fAllowOpt)
+ s_NGENDebugFlags = CORCOMPILE_CONFIG_DEBUG_NONE;
+ else
+ s_NGENDebugFlags = CORCOMPILE_CONFIG_DEBUG;
+ }
+
+//
+// Called through ICorDebugAppDomain2::GetDesiredNGENCompilerFlags to determine
+// which kinds of ngen'd images fusion should load wrt debugging support
+//
+void PEFile::GetNGENDebugFlags(BOOL *fAllowOpt)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ CorCompileConfigFlags configFlags = PEFile::GetNativeImageConfigFlagsWithOverrides();
+
+ *fAllowOpt = ((configFlags & CORCOMPILE_CONFIG_DEBUG) == 0);
+}
+#endif // DEBUGGING_SUPPORTED
+
+
+
+#ifndef DACCESS_COMPILE
+#ifdef FEATURE_TREAT_NI_AS_MSIL_DURING_DIAGNOSTICS
+
+//---------------------------------------------------------------------------------------
+//
+// Used in Apollo, this method determines whether profiling or debugging has requested
+// the runtime to provide debuggable / profileable code. In other CLR builds, this would
+// normally result in requiring the appropriate NGEN scenario be loaded (/Debug or
+// /Profile) and to JIT if unavailable. In Apollo, however, these NGEN scenarios are
+// never available, and even MSIL assemblies are often not available. So this function
+// tells its caller to use the NGENd assembly as if it were an MSIL assembly--ignore the
+// prejitted code and prebaked structures, and just JIT code and load classes from
+// scratch.
+//
+// Return Value:
+// nonzero iff NGENd images should be treated as MSIL images.
+//
+
+// static
+BOOL PEFile::ShouldTreatNIAsMSIL()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Ask profiling API & config vars whether NGENd images should be avoided
+ // completely.
+ if (!NGENImagesAllowed())
+ return TRUE;
+
+ // Ask profiling and debugging if they're requesting us to use ngen /Debug or
+ // /Profile images (which aren't available under Apollo)
+
+ CorCompileConfigFlags configFlags = PEFile::GetNativeImageConfigFlagsWithOverrides();
+
+ if ((configFlags & (CORCOMPILE_CONFIG_DEBUG | CORCOMPILE_CONFIG_PROFILING)) != 0)
+ return TRUE;
+
+ return FALSE;
+}
+
+#endif // FEATURE_TREAT_NI_AS_MSIL_DURING_DIAGNOSTICS
+
+#endif //!DACCESS_COMPILE
+#endif // FEATURE_PREJIT
+
+#ifndef DACCESS_COMPILE
+
+// ------------------------------------------------------------
+// Resource access
+// ------------------------------------------------------------
+
+void PEFile::GetEmbeddedResource(DWORD dwOffset, DWORD *cbResource, PBYTE *pbInMemoryResource)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(ThrowOutOfMemory(););
+ }
+ CONTRACTL_END;
+
+ // NOTE: it's not clear whether to load this from m_image or m_loadedImage.
+ // m_loadedImage is probably preferable, but this may be called by security
+ // before the image is loaded.
+
+ PEImage *image;
+
+#ifdef FEATURE_PREJIT
+ if (m_nativeImage != NULL)
+ image = m_nativeImage;
+ else
+#endif
+ {
+ EnsureImageOpened();
+ image = GetILimage();
+ }
+
+ PEImageLayoutHolder theImage(image->GetLayout(PEImageLayout::LAYOUT_ANY,PEImage::LAYOUT_CREATEIFNEEDED));
+ if (!theImage->CheckResource(dwOffset))
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+
+ COUNT_T size;
+ const void *resource = theImage->GetResource(dwOffset, &size);
+
+ *cbResource = size;
+ *pbInMemoryResource = (PBYTE) resource;
+}
+
+// ------------------------------------------------------------
+// File loading
+// ------------------------------------------------------------
+
+PEAssembly *
+PEFile::LoadAssembly(
+ mdAssemblyRef kAssemblyRef,
+ IMDInternalImport * pImport, // = NULL
+ LPCUTF8 szWinRtTypeNamespace, // = NULL
+ LPCUTF8 szWinRtTypeClassName) // = NULL
+{
+ CONTRACT(PEAssembly *)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ if (pImport == NULL)
+ pImport = GetPersistentMDImport();
+
+ if (((TypeFromToken(kAssemblyRef) != mdtAssembly) &&
+ (TypeFromToken(kAssemblyRef) != mdtAssemblyRef)) ||
+ (!pImport->IsValidToken(kAssemblyRef)))
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+
+ AssemblySpec spec;
+
+ spec.InitializeSpec(kAssemblyRef, pImport, GetAppDomain()->FindAssembly(GetAssembly()), IsIntrospectionOnly());
+ if (szWinRtTypeClassName != NULL)
+ spec.SetWindowsRuntimeType(szWinRtTypeNamespace, szWinRtTypeClassName);
+
+ RETURN GetAppDomain()->BindAssemblySpec(&spec, TRUE, IsIntrospectionOnly());
+}
+
+// ------------------------------------------------------------
+// Logging
+// ------------------------------------------------------------
+#ifdef FEATURE_PREJIT
+void PEFile::ExternalLog(DWORD facility, DWORD level, const WCHAR *fmt, ...)
+{
+ WRAPPER_NO_CONTRACT;
+
+ va_list args;
+ va_start(args, fmt);
+
+ ExternalVLog(facility, level, fmt, args);
+
+ va_end(args);
+}
+
+void PEFile::ExternalLog(DWORD level, const WCHAR *fmt, ...)
+{
+ WRAPPER_NO_CONTRACT;
+
+ va_list args;
+ va_start(args, fmt);
+
+ ExternalVLog(LF_ZAP, level, fmt, args);
+
+ va_end(args);
+}
+
+void PEFile::ExternalLog(DWORD level, const char *msg)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // It is OK to use %S here. We know that msg is ASCII-only.
+ ExternalLog(level, W("%S"), msg);
+}
+
+void PEFile::ExternalVLog(DWORD facility, DWORD level, const WCHAR *fmt, va_list args)
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACT_END;
+
+ BOOL fOutputToDebugger = (level == LL_ERROR && IsDebuggerPresent());
+ BOOL fOutputToLogging = LoggingOn(facility, level);
+
+ if (!fOutputToDebugger && !fOutputToLogging)
+ return;
+
+ StackSString message;
+ message.VPrintf(fmt, args);
+
+ if (fOutputToLogging)
+ {
+ if (GetMDImport() != NULL)
+ LOG((facility, level, "%s: \"%s\"\n", (facility == LF_ZAP ? "ZAP" : "LOADER"), GetSimpleName()));
+ else
+ LOG((facility, level, "%s: \"%S\"\n", (facility == LF_ZAP ? "ZAP" : "LOADER"), ((const WCHAR *)GetPath())));
+
+ LOG((facility, level, "%S", message.GetUnicode()));
+ LOG((facility, level, "\n"));
+ }
+
+ if (fOutputToDebugger)
+ {
+ WszOutputDebugString(W("CLR:("));
+
+ StackSString codebase;
+ GetCodeBaseOrName(codebase);
+ WszOutputDebugString(codebase);
+
+ WszOutputDebugString(W(") "));
+
+ WszOutputDebugString(message);
+ WszOutputDebugString(W("\n"));
+ }
+
+ RETURN;
+}
+
+void PEFile::FlushExternalLog()
+{
+ LIMITED_METHOD_CONTRACT;
+}
+#endif
+
+BOOL PEFile::GetResource(LPCSTR szName, DWORD *cbResource,
+ PBYTE *pbInMemoryResource, DomainAssembly** pAssemblyRef,
+ LPCSTR *szFileName, DWORD *dwLocation,
+ StackCrawlMark *pStackMark, BOOL fSkipSecurityCheck,
+ BOOL fSkipRaiseResolveEvent, DomainAssembly* pDomainAssembly, AppDomain* pAppDomain)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ WRAPPER(GC_TRIGGERS);
+ }
+ CONTRACTL_END;
+
+
+ mdToken mdLinkRef;
+ DWORD dwResourceFlags;
+ DWORD dwOffset;
+ mdManifestResource mdResource;
+ Assembly* pAssembly = NULL;
+ PEFile* pPEFile = NULL;
+ ReleaseHolder<IMDInternalImport> pImport (GetMDImportWithRef());
+ if (SUCCEEDED(pImport->FindManifestResourceByName(szName, &mdResource)))
+ {
+ pPEFile = this;
+ IfFailThrow(pImport->GetManifestResourceProps(
+ mdResource,
+ NULL, //&szName,
+ &mdLinkRef,
+ &dwOffset,
+ &dwResourceFlags));
+ }
+ else
+ {
+ if (fSkipRaiseResolveEvent || pAppDomain == NULL)
+ return FALSE;
+
+ DomainAssembly* pParentAssembly = GetAppDomain()->FindAssembly(GetAssembly());
+ pAssembly = pAppDomain->RaiseResourceResolveEvent(pParentAssembly, szName);
+ if (pAssembly == NULL)
+ return FALSE;
+
+ pDomainAssembly = pAssembly->GetDomainAssembly(pAppDomain);
+ pPEFile = pDomainAssembly->GetFile();
+
+ if (FAILED(pAssembly->GetManifestImport()->FindManifestResourceByName(
+ szName,
+ &mdResource)))
+ {
+ return FALSE;
+ }
+
+ if (dwLocation != 0)
+ {
+ if (pAssemblyRef != NULL)
+ *pAssemblyRef = pDomainAssembly;
+
+ *dwLocation = *dwLocation | 2; // ResourceLocation.containedInAnotherAssembly
+ }
+ IfFailThrow(pPEFile->GetPersistentMDImport()->GetManifestResourceProps(
+ mdResource,
+ NULL, //&szName,
+ &mdLinkRef,
+ &dwOffset,
+ &dwResourceFlags));
+ }
+
+
+ switch(TypeFromToken(mdLinkRef)) {
+ case mdtAssemblyRef:
+ {
+ if (pDomainAssembly == NULL)
+ return FALSE;
+
+ AssemblySpec spec;
+ spec.InitializeSpec(mdLinkRef, GetPersistentMDImport(), pDomainAssembly, pDomainAssembly->GetFile()->IsIntrospectionOnly());
+ pDomainAssembly = spec.LoadDomainAssembly(FILE_LOADED);
+
+ if (dwLocation) {
+ if (pAssemblyRef)
+ *pAssemblyRef = pDomainAssembly;
+
+ *dwLocation = *dwLocation | 2; // ResourceLocation.containedInAnotherAssembly
+ }
+
+ return pDomainAssembly->GetResource(szName,
+ cbResource,
+ pbInMemoryResource,
+ pAssemblyRef,
+ szFileName,
+ dwLocation,
+ pStackMark,
+ fSkipSecurityCheck,
+ fSkipRaiseResolveEvent);
+ }
+
+ case mdtFile:
+ if (mdLinkRef == mdFileNil)
+ {
+ // The resource is embedded in the manifest file
+
+#ifndef CROSSGEN_COMPILE
+ if (!IsMrPublic(dwResourceFlags) && pStackMark && !fSkipSecurityCheck)
+ {
+ Assembly *pCallersAssembly = SystemDomain::GetCallersAssembly(pStackMark);
+
+ if (pCallersAssembly && // full trust for interop
+ (!pCallersAssembly->GetManifestFile()->Equals(this)))
+ {
+ RefSecContext sCtx(AccessCheckOptions::kMemberAccess);
+
+ AccessCheckOptions accessCheckOptions(
+ AccessCheckOptions::kMemberAccess, /*accessCheckType*/
+ NULL, /*pAccessContext*/
+ FALSE, /*throwIfTargetIsInaccessible*/
+ (MethodTable *) NULL /*pTargetMT*/
+ );
+
+ // SL: return TRUE only if the caller is critical
+ // Desktop: return TRUE only if demanding MemberAccess succeeds
+ if (!accessCheckOptions.DemandMemberAccessOrFail(&sCtx, NULL, TRUE /*visibilityCheck*/))
+ return FALSE;
+ }
+ }
+#endif // CROSSGEN_COMPILE
+
+ if (dwLocation) {
+ *dwLocation = *dwLocation | 5; // ResourceLocation.embedded |
+
+ // ResourceLocation.containedInManifestFile
+ return TRUE;
+ }
+
+ pPEFile->GetEmbeddedResource(dwOffset, cbResource, pbInMemoryResource);
+
+ return TRUE;
+ }
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ // The resource is either linked or embedded in a non-manifest-containing file
+ if (pDomainAssembly == NULL)
+ return FALSE;
+
+ return pDomainAssembly->GetModuleResource(mdLinkRef, szName, cbResource,
+ pbInMemoryResource, szFileName,
+ dwLocation, IsMrPublic(dwResourceFlags),
+ pStackMark, fSkipSecurityCheck);
+#else
+ return FALSE;
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+
+ default:
+ ThrowHR(COR_E_BADIMAGEFORMAT, BFA_INVALID_TOKEN_IN_MANIFESTRES);
+ }
+}
+
+void PEFile::GetPEKindAndMachine(DWORD* pdwKind, DWORD* pdwMachine)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (IsResource() || IsDynamic())
+ {
+ if (pdwKind)
+ *pdwKind = 0;
+ if (pdwMachine)
+ *pdwMachine = 0;
+ return;
+ }
+
+#ifdef FEATURE_PREJIT
+ if (IsNativeLoaded())
+ {
+ CONSISTENCY_CHECK(HasNativeImage());
+
+ m_nativeImage->GetNativeILPEKindAndMachine(pdwKind, pdwMachine);
+ return;
+ }
+#ifndef DACCESS_COMPILE
+ if (!HasOpenedILimage())
+ {
+ //don't want to touch the IL image unless we already have
+ ReleaseHolder<PEImage> pNativeImage = GetNativeImageWithRef();
+ if (pNativeImage)
+ {
+ pNativeImage->GetNativeILPEKindAndMachine(pdwKind, pdwMachine);
+ return;
+ }
+ }
+#endif // DACCESS_COMPILE
+#endif // FEATURE_PREJIT
+
+ GetILimage()->GetPEKindAndMachine(pdwKind, pdwMachine);
+ return;
+}
+
+ULONG PEFile::GetILImageTimeDateStamp()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_PREJIT
+ if (IsNativeLoaded())
+ {
+ CONSISTENCY_CHECK(HasNativeImage());
+
+ // The IL image's time stamp is copied to the native image.
+ CORCOMPILE_VERSION_INFO* pVersionInfo = GetLoadedNative()->GetNativeVersionInfoMaybeNull();
+ if (pVersionInfo == NULL)
+ {
+ return 0;
+ }
+ else
+ {
+ return pVersionInfo->sourceAssembly.timeStamp;
+ }
+ }
+#endif // FEATURE_PREJIT
+
+ return GetLoadedIL()->GetTimeDateStamp();
+}
+
+#ifdef FEATURE_CAS_POLICY
+
+//---------------------------------------------------------------------------------------
+//
+// Get a SafePEFileHandle for this PEFile
+//
+
+SAFEHANDLE PEFile::GetSafeHandle()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ SAFEHANDLE objSafeHandle = NULL;
+
+ GCPROTECT_BEGIN(objSafeHandle);
+
+ objSafeHandle = (SAFEHANDLE)AllocateObject(MscorlibBinder::GetClass(CLASS__SAFE_PEFILE_HANDLE));
+ CallDefaultConstructor(objSafeHandle);
+
+ this->AddRef();
+ objSafeHandle->SetHandle(this);
+
+ GCPROTECT_END();
+
+ return objSafeHandle;
+}
+
+#endif // FEATURE_CAS_POLICY
+
+// ================================================================================
+// PEAssembly class - a PEFile which represents an assembly
+// ================================================================================
+
+// Statics initialization.
+/* static */
+void PEAssembly::Attach()
+{
+ STANDARD_VM_CONTRACT;
+}
+
+#ifdef FEATURE_FUSION
+PEAssembly::PEAssembly(PEImage *image,
+ IMetaDataEmit *pEmit,
+ IAssembly *pIAssembly,
+ IBindResult *pNativeFusionAssembly,
+ PEImage *pPEImageNI,
+ IFusionBindLog *pFusionLog,
+ IHostAssembly *pIHostAssembly,
+ PEFile *creator,
+ BOOL system,
+ BOOL introspectionOnly/*=FALSE*/,
+ ICLRPrivAssembly * pHostAssembly)
+ : PEFile(image, FALSE),
+ m_creator(NULL),
+ m_pFusionAssemblyName(NULL),
+ m_pFusionAssembly(NULL),
+ m_pFusionLog(NULL),
+ m_bFusionLogEnabled(TRUE),
+ m_pIHostAssembly(NULL),
+ m_pNativeAssemblyLocation(NULL),
+ m_pNativeImageClosure(NULL),
+ m_fStrongNameBypassed(FALSE)
+{
+ CONTRACTL
+ {
+ CONSTRUCTOR_CHECK;
+ PRECONDITION(CheckPointer(image, NULL_OK));
+ PRECONDITION(CheckPointer(pEmit, NULL_OK));
+ PRECONDITION(image != NULL || pEmit != NULL);
+ PRECONDITION(CheckPointer(pIAssembly, NULL_OK));
+ PRECONDITION(CheckPointer(pFusionLog, NULL_OK));
+ PRECONDITION(CheckPointer(pIHostAssembly, NULL_OK));
+ PRECONDITION(CheckPointer(creator, NULL_OK));
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+
+ if (introspectionOnly)
+ {
+ if (!system) // Implementation restriction: mscorlib.dll cannot be loaded as introspection. The architecture depends on there being exactly one mscorlib.
+ {
+ m_flags |= PEFILE_INTROSPECTIONONLY;
+#ifdef FEATURE_PREJIT
+ SetCannotUseNativeImage();
+#endif // FEATURE_PREJIT
+ }
+ }
+
+ if (pIAssembly)
+ {
+ m_pFusionAssembly = pIAssembly;
+ pIAssembly->AddRef();
+
+ IfFailThrow(pIAssembly->GetAssemblyNameDef(&m_pFusionAssemblyName));
+ }
+ else if (pIHostAssembly)
+ {
+ m_flags |= PEFILE_ISTREAM;
+#ifdef FEATURE_PREJIT
+ m_fCanUseNativeImage = FALSE;
+#endif // FEATURE_PREJIT
+
+ m_pIHostAssembly = pIHostAssembly;
+ pIHostAssembly->AddRef();
+
+ IfFailThrow(pIHostAssembly->GetAssemblyNameDef(&m_pFusionAssemblyName));
+ }
+
+ if (pFusionLog)
+ {
+ m_pFusionLog = pFusionLog;
+ pFusionLog->AddRef();
+ }
+
+ if (creator)
+ {
+ m_creator = creator;
+ creator->AddRef();
+ }
+
+ m_flags |= PEFILE_ASSEMBLY;
+ if (system)
+ m_flags |= PEFILE_SYSTEM;
+
+#ifdef FEATURE_PREJIT
+ // Find the native image
+ if (pIAssembly)
+ {
+ if (pNativeFusionAssembly != NULL)
+ SetNativeImage(pNativeFusionAssembly);
+ }
+ // Only one of pNativeFusionAssembly and pPEImageNI may be set.
+ _ASSERTE(!(pNativeFusionAssembly && pPEImageNI));
+
+ if (pPEImageNI != NULL)
+ this->PEFile::SetNativeImage(pPEImageNI);
+#endif // FEATURE_PREJIT
+
+ // If we have no native image, we require a mapping for the file.
+ if (!HasNativeImage() || !IsILOnly())
+ EnsureImageOpened();
+
+ // Open metadata eagerly to minimize failure windows
+ if (pEmit == NULL)
+ OpenMDImport_Unsafe(); //constructor, cannot race with anything
+ else
+ {
+ _ASSERTE(!m_bHasPersistentMDImport);
+ IfFailThrow(GetMetaDataInternalInterfaceFromPublic(pEmit, IID_IMDInternalImport,
+ (void **)&m_pMDImport));
+ m_pEmitter = pEmit;
+ pEmit->AddRef();
+ m_bHasPersistentMDImport=TRUE;
+ m_MDImportIsRW_Debugger_Use_Only = TRUE;
+ }
+
+ // m_pMDImport can be external
+ // Make sure this is an assembly
+ if (!m_pMDImport->IsValidToken(TokenFromRid(1, mdtAssembly)))
+ ThrowHR(COR_E_ASSEMBLYEXPECTED);
+
+ // Make sure we perform security checks after we've obtained IMDInternalImport interface
+ DoLoadSignatureChecks();
+
+ // Verify name eagerly
+ LPCUTF8 szName = GetSimpleName();
+ if (!*szName)
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT, BFA_EMPTY_ASSEMDEF_NAME);
+ }
+
+#ifdef FEATURE_PREJIT
+ if (IsResource() || IsDynamic())
+ m_fCanUseNativeImage = FALSE;
+#endif // FEATURE_PREJIT
+
+ if (m_pFusionAssembly)
+ {
+ m_loadContext = m_pFusionAssembly->GetFusionLoadContext();
+ m_pFusionAssembly->GetAssemblyLocation(&m_dwLocationFlags);
+ }
+ else if (pHostAssembly != nullptr)
+ {
+ m_loadContext = LOADCTX_TYPE_HOSTED;
+ m_dwLocationFlags = ASMLOC_UNKNOWN;
+ m_pHostAssembly = clr::SafeAddRef(pHostAssembly); // Should use SetHostAssembly(pHostAssembly) here
+ }
+ else
+ {
+ m_loadContext = LOADCTX_TYPE_UNKNOWN;
+ m_dwLocationFlags = ASMLOC_UNKNOWN;
+ }
+
+ TESTHOOKCALL(CompletedNativeImageBind(image,szName,HasNativeImage()));
+
+#if _DEBUG
+ GetCodeBaseOrName(m_debugName);
+ m_debugName.Normalize();
+ m_pDebugName = m_debugName;
+#endif
+}
+
+#else // FEATURE_FUSION
+
+PEAssembly::PEAssembly(
+ CoreBindResult* pBindResultInfo,
+ IMetaDataEmit* pEmit,
+ PEFile *creator,
+ BOOL system,
+ BOOL introspectionOnly/*=FALSE*/
+#ifdef FEATURE_HOSTED_BINDER
+ ,
+ PEImage * pPEImageIL /*= NULL*/,
+ PEImage * pPEImageNI /*= NULL*/,
+ ICLRPrivAssembly * pHostAssembly /*= NULL*/
+#endif
+ )
+
+ : PEFile(pBindResultInfo ? (pBindResultInfo->GetPEImage() ? pBindResultInfo->GetPEImage() :
+ (pBindResultInfo->HasNativeImage() ? pBindResultInfo->GetNativeImage() : NULL)
+#ifdef FEATURE_HOSTED_BINDER
+ ): pPEImageIL? pPEImageIL:(pPEImageNI? pPEImageNI:NULL), FALSE),
+#else
+ ): NULL, FALSE),
+#endif
+ m_creator(clr::SafeAddRef(creator)),
+ m_bIsFromGAC(FALSE),
+ m_bIsOnTpaList(FALSE)
+#ifdef FEATURE_CORECLR
+ ,m_fProfileAssembly(0)
+#else
+ ,m_fStrongNameBypassed(FALSE)
+#endif
+{
+ CONTRACTL
+ {
+ CONSTRUCTOR_CHECK;
+ PRECONDITION(CheckPointer(pEmit, NULL_OK));
+ PRECONDITION(CheckPointer(creator, NULL_OK));
+#ifdef FEATURE_HOSTED_BINDER
+ PRECONDITION(pBindResultInfo == NULL || (pPEImageIL == NULL && pPEImageNI == NULL));
+#endif
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+
+ if (introspectionOnly)
+ {
+ if (!system) // Implementation restriction: mscorlib.dll cannot be loaded as introspection. The architecture depends on there being exactly one mscorlib.
+ {
+ m_flags |= PEFILE_INTROSPECTIONONLY;
+ }
+ }
+
+ m_flags |= PEFILE_ASSEMBLY;
+ if (system)
+ m_flags |= PEFILE_SYSTEM;
+
+ // We check the precondition above that either pBindResultInfo is null or both pPEImageIL and pPEImageNI are,
+ // so we'll only get a max of one native image passed in.
+#ifdef FEATURE_HOSTED_BINDER
+ if (pPEImageNI != NULL)
+ {
+ SetNativeImage(pPEImageNI);
+ }
+#endif
+
+#ifdef FEATURE_PREJIT
+ if (pBindResultInfo && pBindResultInfo->HasNativeImage())
+ SetNativeImage(pBindResultInfo->GetNativeImage());
+#endif
+
+ // If we have no native image, we require a mapping for the file.
+ if (!HasNativeImage() || !IsILOnly())
+ EnsureImageOpened();
+
+ // Initialize the status of the assembly being in the GAC, or being part of the TPA list, before
+ // we start to do work (like strong name verification) that relies on those states to be valid.
+ if(pBindResultInfo != nullptr)
+ {
+ m_bIsFromGAC = pBindResultInfo->IsFromGAC();
+ m_bIsOnTpaList = pBindResultInfo->IsOnTpaList();
+ }
+
+ // Check security related stuff
+ VerifyStrongName();
+
+ // Open metadata eagerly to minimize failure windows
+ if (pEmit == NULL)
+ OpenMDImport_Unsafe(); //constructor, cannot race with anything
+ else
+ {
+ _ASSERTE(!m_bHasPersistentMDImport);
+ IfFailThrow(GetMetaDataInternalInterfaceFromPublic(pEmit, IID_IMDInternalImport,
+ (void **)&m_pMDImport));
+ m_pEmitter = pEmit;
+ pEmit->AddRef();
+ m_bHasPersistentMDImport=TRUE;
+ m_MDImportIsRW_Debugger_Use_Only = TRUE;
+ }
+
+ // m_pMDImport can be external
+ // Make sure this is an assembly
+ if (!m_pMDImport->IsValidToken(TokenFromRid(1, mdtAssembly)))
+ ThrowHR(COR_E_ASSEMBLYEXPECTED);
+
+ // Verify name eagerly
+ LPCUTF8 szName = GetSimpleName();
+ if (!*szName)
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT, BFA_EMPTY_ASSEMDEF_NAME);
+ }
+
+#ifdef FEATURE_HOSTED_BINDER
+ // Set the host assembly and binding context as the AssemblySpec initialization
+ // for CoreCLR will expect to have it set.
+ if (pHostAssembly != nullptr)
+ {
+ m_pHostAssembly = clr::SafeAddRef(pHostAssembly);
+ }
+
+ if(pBindResultInfo != nullptr)
+ {
+ // Cannot have both pHostAssembly and a coreclr based bind
+ _ASSERTE(pHostAssembly == nullptr);
+ pBindResultInfo->GetBindAssembly(&m_pHostAssembly);
+ }
+#endif // FEATURE_HOSTED_BINDER
+
+#if _DEBUG
+ GetCodeBaseOrName(m_debugName);
+ m_debugName.Normalize();
+ m_pDebugName = m_debugName;
+
+ AssemblySpec spec;
+ spec.InitializeSpec(this);
+
+ spec.GetFileOrDisplayName(ASM_DISPLAYF_VERSION |
+ ASM_DISPLAYF_CULTURE |
+ ASM_DISPLAYF_PUBLIC_KEY_TOKEN,
+ m_sTextualIdentity);
+#endif
+}
+#endif // FEATURE_FUSION
+
+
+#if defined(FEATURE_HOSTED_BINDER)
+
+#ifdef FEATURE_FUSION
+
+PEAssembly *PEAssembly::Open(
+ PEAssembly *pParentAssembly,
+ PEImage *pPEImageIL,
+ BOOL isIntrospectionOnly)
+{
+ STANDARD_VM_CONTRACT;
+ PEAssembly * pPEAssembly = new PEAssembly(
+ pPEImageIL, // PEImage
+ nullptr, // IMetaDataEmit
+ nullptr, // IAssembly
+ nullptr, // IBindResult pNativeFusionAssembly
+ nullptr, // PEImage *pNIImage
+ nullptr, // IFusionBindLog
+ nullptr, // IHostAssembly
+ pParentAssembly, // creator
+ FALSE, // isSystem
+ isIntrospectionOnly, // isIntrospectionOnly
+ NULL);
+
+ return pPEAssembly;
+}
+
+PEAssembly *PEAssembly::Open(
+ PEAssembly * pParent,
+ PEImage * pPEImageIL,
+ PEImage * pPEImageNI,
+ ICLRPrivAssembly * pHostAssembly,
+ BOOL fIsIntrospectionOnly)
+{
+ STANDARD_VM_CONTRACT;
+ PEAssembly * pPEAssembly = new PEAssembly(
+ pPEImageIL, // PEImage
+ nullptr, // IMetaDataEmit
+ nullptr, // IAssembly
+ nullptr, // IBindResult pNativeFusionAssembly
+ pPEImageNI, // Native Image PEImage
+ nullptr, // IFusionBindLog
+ nullptr, // IHostAssembly
+ pParent, // creator
+ FALSE, // isSystem
+ fIsIntrospectionOnly,
+ pHostAssembly);
+
+ return pPEAssembly;
+}
+
+#else //FEATURE_FUSION
+
+PEAssembly *PEAssembly::Open(
+ PEAssembly * pParent,
+ PEImage * pPEImageIL,
+ PEImage * pPEImageNI,
+ ICLRPrivAssembly * pHostAssembly,
+ BOOL fIsIntrospectionOnly)
+{
+ STANDARD_VM_CONTRACT;
+
+ PEAssembly * pPEAssembly = new PEAssembly(
+ nullptr, // BindResult
+ nullptr, // IMetaDataEmit
+ pParent, // PEFile creator
+ FALSE, // isSystem
+ fIsIntrospectionOnly,
+ pPEImageIL,
+ pPEImageNI,
+ pHostAssembly);
+
+ return pPEAssembly;
+}
+
+#endif // FEATURE_FUSION
+
+#endif // FEATURE_HOSTED_BINDER
+
+
+PEAssembly::~PEAssembly()
+{
+ CONTRACTL
+ {
+ DESTRUCTOR_CHECK;
+ NOTHROW;
+ GC_TRIGGERS; // Fusion uses crsts on AddRef/Release
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+#ifdef FEATURE_FUSION
+ if (m_pFusionAssemblyName != NULL)
+ m_pFusionAssemblyName->Release();
+ if (m_pFusionAssembly != NULL)
+ m_pFusionAssembly->Release();
+ if (m_pIHostAssembly != NULL)
+ m_pIHostAssembly->Release();
+ if (m_pNativeAssemblyLocation != NULL)
+ {
+ m_pNativeAssemblyLocation->Release();
+ }
+ if (m_pNativeImageClosure!=NULL)
+ m_pNativeImageClosure->Release();
+ if (m_pFusionLog != NULL)
+ m_pFusionLog->Release();
+#endif // FEATURE_FUSION
+ if (m_creator != NULL)
+ m_creator->Release();
+
+}
+
+#ifndef DACCESS_COMPILE
+void PEAssembly::ReleaseIL()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+#ifdef FEATURE_FUSION
+ if (m_pFusionAssemblyName != NULL)
+ {
+ m_pFusionAssemblyName->Release();
+ m_pFusionAssemblyName=NULL;
+ }
+ if (m_pFusionAssembly != NULL)
+ {
+ m_pFusionAssembly->Release();
+ m_pFusionAssembly=NULL;
+ }
+ if (m_pIHostAssembly != NULL)
+ {
+ m_pIHostAssembly->Release();
+ m_pIHostAssembly=NULL;
+ }
+ if (m_pNativeAssemblyLocation != NULL)
+ {
+ m_pNativeAssemblyLocation->Release();
+ m_pNativeAssemblyLocation=NULL;
+ }
+ _ASSERTE(m_pNativeImageClosure==NULL);
+
+ if (m_pFusionLog != NULL)
+ {
+ m_pFusionLog->Release();
+ m_pFusionLog=NULL;
+ }
+#endif // FEATURE_FUSION
+ if (m_creator != NULL)
+ {
+ m_creator->Release();
+ m_creator=NULL;
+ }
+
+ PEFile::ReleaseIL();
+}
+#endif
+
+/* static */
+
+#ifdef FEATURE_FUSION
+PEAssembly *PEAssembly::OpenSystem(IApplicationContext * pAppCtx)
+#else
+PEAssembly *PEAssembly::OpenSystem(IUnknown * pAppCtx)
+#endif
+{
+ STANDARD_VM_CONTRACT;
+
+ PEAssembly *result = NULL;
+
+ EX_TRY
+ {
+ result = DoOpenSystem(pAppCtx);
+ }
+ EX_HOOK
+ {
+ Exception *ex = GET_EXCEPTION();
+
+ // Rethrow non-transient exceptions as file load exceptions with proper
+ // context
+
+ if (!ex->IsTransient())
+ EEFileLoadException::Throw(SystemDomain::System()->BaseLibrary(), ex->GetHR(), ex);
+ }
+ EX_END_HOOK;
+ return result;
+}
+
+/* static */
+#ifdef FEATURE_FUSION
+PEAssembly *PEAssembly::DoOpenSystem(IApplicationContext * pAppCtx)
+#else
+PEAssembly *PEAssembly::DoOpenSystem(IUnknown * pAppCtx)
+#endif
+{
+ CONTRACT(PEAssembly *)
+ {
+ POSTCONDITION(CheckPointer(RETVAL));
+ STANDARD_VM_CHECK;
+ }
+ CONTRACT_END;
+
+#ifdef FEATURE_FUSION
+ SafeComHolder<IAssemblyName> pName;
+ IfFailThrow(CreateAssemblyNameObject(&pName, W("mscorlib"), 0, NULL));
+
+ UINT64 publicKeyValue = I64(CONCAT_MACRO(0x, VER_ECMA_PUBLICKEY));
+ BYTE publicKeyToken[8] =
+ {
+ (BYTE) (publicKeyValue>>56),
+ (BYTE) (publicKeyValue>>48),
+ (BYTE) (publicKeyValue>>40),
+ (BYTE) (publicKeyValue>>32),
+ (BYTE) (publicKeyValue>>24),
+ (BYTE) (publicKeyValue>>16),
+ (BYTE) (publicKeyValue>>8),
+ (BYTE) (publicKeyValue),
+ };
+
+ IfFailThrow(pName->SetProperty(ASM_NAME_PUBLIC_KEY_TOKEN, publicKeyToken, sizeof(publicKeyToken)));
+
+ USHORT version = VER_ASSEMBLYMAJORVERSION;
+ IfFailThrow(pName->SetProperty(ASM_NAME_MAJOR_VERSION, &version, sizeof(version)));
+ version = VER_ASSEMBLYMINORVERSION;
+ IfFailThrow(pName->SetProperty(ASM_NAME_MINOR_VERSION, &version, sizeof(version)));
+ version = VER_ASSEMBLYBUILD;
+ IfFailThrow(pName->SetProperty(ASM_NAME_BUILD_NUMBER, &version, sizeof(version)));
+ version = VER_ASSEMBLYBUILD_QFE;
+ IfFailThrow(pName->SetProperty(ASM_NAME_REVISION_NUMBER, &version, sizeof(version)));
+
+ IfFailThrow(pName->SetProperty(ASM_NAME_CULTURE, W(""), sizeof(WCHAR)));
+
+#ifdef FEATURE_PREJIT
+#ifdef PROFILING_SUPPORTED
+ if (NGENImagesAllowed())
+ {
+ // Binding flags, zap string
+ CorCompileConfigFlags configFlags = PEFile::GetNativeImageConfigFlagsWithOverrides();
+ IfFailThrow(pName->SetProperty(ASM_NAME_CONFIG_MASK, &configFlags, sizeof(configFlags)));
+
+ LPCWSTR configString = g_pConfig->ZapSet();
+ IfFailThrow(pName->SetProperty(ASM_NAME_CUSTOM, (PVOID)configString,
+ (DWORD) (wcslen(configString)+1)*sizeof(WCHAR)));
+
+ // @TODO: Need some fuslogvw logging here
+ }
+#endif //PROFILING_SUPPORTED
+#endif // FEATURE_PREJIT
+
+ SafeComHolder<IAssembly> pIAssembly;
+ SafeComHolder<IBindResult> pNativeFusionAssembly;
+ SafeComHolder<IFusionBindLog> pFusionLog;
+
+ {
+ ETWOnStartup (FusionBinding_V1, FusionBindingEnd_V1);
+ IfFailThrow(BindToSystem(pName, SystemDomain::System()->SystemDirectory(), NULL, pAppCtx, &pIAssembly, &pNativeFusionAssembly, &pFusionLog));
+ }
+
+ StackSString path;
+ FusionBind::GetAssemblyManifestModulePath(pIAssembly, path);
+
+ // Open the image with no required mapping. This will be
+ // promoted to a real open if we don't have a native image.
+ PEImageHolder image (PEImage::OpenImage(path));
+
+ PEAssembly* pPEAssembly = new PEAssembly(image, NULL, pIAssembly,pNativeFusionAssembly, NULL, pFusionLog, NULL, NULL, TRUE, FALSE);
+
+#ifdef FEATURE_APPX_BINDER
+ if (AppX::IsAppXProcess())
+ {
+ // Since mscorlib is loaded as a special case, create and assign an ICLRPrivAssembly for the new PEAssembly here.
+ CLRPrivBinderAppX * pBinder = CLRPrivBinderAppX::GetOrCreateBinder();
+ CLRPrivBinderFusion * pFusionBinder = pBinder->GetFusionBinder();
+
+ pFusionBinder->BindMscorlib(pPEAssembly);
+ }
+#endif
+
+ RETURN pPEAssembly;
+#else // FEATURE_FUSION
+ ETWOnStartup (FusionBinding_V1, FusionBindingEnd_V1);
+ CoreBindResult bindResult;
+ ReleaseHolder<ICLRPrivAssembly> pPrivAsm;
+ IfFailThrow(CCoreCLRBinderHelper::BindToSystem(&pPrivAsm, !IsCompilationProcess() || g_fAllowNativeImages));
+ if(pPrivAsm != NULL)
+ {
+ bindResult.Init(pPrivAsm, TRUE, TRUE);
+ }
+
+ RETURN new PEAssembly(&bindResult, NULL, NULL, TRUE, FALSE);
+#endif // FEATURE_FUSION
+}
+
+#ifdef FEATURE_FUSION
+/* static */
+PEAssembly *PEAssembly::Open(IAssembly *pIAssembly,
+ IBindResult *pNativeFusionAssembly,
+ IFusionBindLog *pFusionLog/*=NULL*/,
+ BOOL isSystemAssembly/*=FALSE*/,
+ BOOL isIntrospectionOnly/*=FALSE*/)
+{
+ STANDARD_VM_CONTRACT;
+
+ PEAssembly *result = NULL;
+ EX_TRY
+ {
+ result = DoOpen(pIAssembly, pNativeFusionAssembly, pFusionLog, isSystemAssembly, isIntrospectionOnly);
+ }
+ EX_HOOK
+ {
+ Exception *ex = GET_EXCEPTION();
+
+ // Rethrow non-transient exceptions as file load exceptions with proper
+ // context
+ if (!ex->IsTransient())
+ EEFileLoadException::Throw(pIAssembly, NULL, ex->GetHR(), ex);
+ }
+ EX_END_HOOK;
+
+ return result;
+}
+
+// Thread stress
+class DoOpenIAssemblyStress : APIThreadStress
+{
+public:
+ IAssembly *pIAssembly;
+ IBindResult *pNativeFusionAssembly;
+ IFusionBindLog *pFusionLog;
+ DoOpenIAssemblyStress(IAssembly *pIAssembly, IBindResult *pNativeFusionAssembly, IFusionBindLog *pFusionLog)
+ : pIAssembly(pIAssembly), pNativeFusionAssembly(pNativeFusionAssembly),pFusionLog(pFusionLog) {LIMITED_METHOD_CONTRACT;}
+ void Invoke()
+ {
+ WRAPPER_NO_CONTRACT;
+ PEAssemblyHolder result (PEAssembly::Open(pIAssembly, pNativeFusionAssembly, pFusionLog, FALSE, FALSE));
+ }
+};
+
+/* static */
+PEAssembly *PEAssembly::DoOpen(IAssembly *pIAssembly,
+ IBindResult *pNativeFusionAssembly,
+ IFusionBindLog *pFusionLog,
+ BOOL isSystemAssembly,
+ BOOL isIntrospectionOnly/*=FALSE*/)
+{
+ CONTRACT(PEAssembly *)
+ {
+ PRECONDITION(CheckPointer(pIAssembly));
+ POSTCONDITION(CheckPointer(RETVAL));
+ STANDARD_VM_CHECK;
+ }
+ CONTRACT_END;
+
+ DoOpenIAssemblyStress ts(pIAssembly,pNativeFusionAssembly,pFusionLog);
+
+ PEImageHolder image;
+
+ StackSString path;
+ FusionBind::GetAssemblyManifestModulePath(pIAssembly, path);
+
+ // Open the image with no required mapping. This will be
+ // promoted to a real open if we don't have a native image.
+ image = PEImage::OpenImage(path, MDInternalImport_NoCache); // "identity" does not need to be cached
+
+ PEAssemblyHolder assembly (new PEAssembly(image, NULL, pIAssembly, pNativeFusionAssembly, NULL, pFusionLog,
+ NULL, NULL, isSystemAssembly, isIntrospectionOnly));
+
+ RETURN assembly.Extract();
+}
+
+/* static */
+PEAssembly *PEAssembly::Open(IHostAssembly *pIHostAssembly, BOOL isSystemAssembly, BOOL isIntrospectionOnly)
+{
+ STANDARD_VM_CONTRACT;
+
+ PEAssembly *result = NULL;
+
+ EX_TRY
+ {
+ result = DoOpen(pIHostAssembly, isSystemAssembly, isIntrospectionOnly);
+ }
+ EX_HOOK
+ {
+ Exception *ex = GET_EXCEPTION();
+
+ // Rethrow non-transient exceptions as file load exceptions with proper
+ // context
+
+ if (!ex->IsTransient())
+ EEFileLoadException::Throw(NULL, pIHostAssembly, ex->GetHR(), ex);
+ }
+ EX_END_HOOK;
+ return result;
+}
+
+// Thread stress
+class DoOpenIHostAssemblyStress : APIThreadStress
+{
+public:
+ IHostAssembly *pIHostAssembly;
+ DoOpenIHostAssemblyStress(IHostAssembly *pIHostAssembly) :
+ pIHostAssembly(pIHostAssembly) {LIMITED_METHOD_CONTRACT;}
+ void Invoke()
+ {
+ WRAPPER_NO_CONTRACT;
+ PEAssemblyHolder result (PEAssembly::Open(pIHostAssembly, FALSE, FALSE));
+ }
+};
+
+/* static */
+PEAssembly *PEAssembly::DoOpen(IHostAssembly *pIHostAssembly, BOOL isSystemAssembly,
+ BOOL isIntrospectionOnly)
+{
+ CONTRACT(PEAssembly *)
+ {
+ PRECONDITION(CheckPointer(pIHostAssembly));
+ POSTCONDITION(CheckPointer(RETVAL));
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ DoOpenIHostAssemblyStress ts(pIHostAssembly);
+
+ UINT64 AssemblyId;
+ IfFailThrow(pIHostAssembly->GetAssemblyId(&AssemblyId));
+
+ PEImageHolder image(PEImage::FindById(AssemblyId, 0));
+
+ PEAssemblyHolder assembly (new PEAssembly(image, NULL, NULL, NULL, NULL, NULL,
+ pIHostAssembly, NULL, isSystemAssembly, isIntrospectionOnly));
+
+ RETURN assembly.Extract();
+}
+#endif // FEATURE_FUSION
+
+#ifndef CROSSGEN_COMPILE
+/* static */
+PEAssembly *PEAssembly::OpenMemory(PEAssembly *pParentAssembly,
+ const void *flat, COUNT_T size,
+ BOOL isIntrospectionOnly/*=FALSE*/,
+ CLRPrivBinderLoadFile* pBinderToUse)
+{
+ STANDARD_VM_CONTRACT;
+
+ PEAssembly *result = NULL;
+
+ EX_TRY
+ {
+ result = DoOpenMemory(pParentAssembly, flat, size, isIntrospectionOnly, pBinderToUse);
+ }
+ EX_HOOK
+ {
+ Exception *ex = GET_EXCEPTION();
+
+ // Rethrow non-transient exceptions as file load exceptions with proper
+ // context
+
+ if (!ex->IsTransient())
+ EEFileLoadException::Throw(pParentAssembly, flat, size, ex->GetHR(), ex);
+ }
+ EX_END_HOOK;
+
+ return result;
+}
+
+
+// Thread stress
+
+class DoOpenFlatStress : APIThreadStress
+{
+public:
+ PEAssembly *pParentAssembly;
+ const void *flat;
+ COUNT_T size;
+ DoOpenFlatStress(PEAssembly *pParentAssembly, const void *flat, COUNT_T size)
+ : pParentAssembly(pParentAssembly), flat(flat), size(size) {LIMITED_METHOD_CONTRACT;}
+ void Invoke()
+ {
+ WRAPPER_NO_CONTRACT;
+ PEAssemblyHolder result(PEAssembly::OpenMemory(pParentAssembly, flat, size, FALSE));
+ }
+};
+
+/* static */
+PEAssembly *PEAssembly::DoOpenMemory(
+ PEAssembly *pParentAssembly,
+ const void *flat,
+ COUNT_T size,
+ BOOL isIntrospectionOnly,
+ CLRPrivBinderLoadFile* pBinderToUse)
+{
+ CONTRACT(PEAssembly *)
+ {
+ PRECONDITION(CheckPointer(flat));
+ PRECONDITION(CheckOverflow(flat, size));
+ PRECONDITION(CheckPointer(pParentAssembly));
+ STANDARD_VM_CHECK;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ // Thread stress
+ DoOpenFlatStress ts(pParentAssembly, flat, size);
+
+ // Note that we must have a flat image stashed away for two reasons.
+ // First, we need a private copy of the data which we can verify
+ // before doing the mapping. And secondly, we can only compute
+ // the strong name hash on a flat image.
+
+ PEImageHolder image(PEImage::LoadFlat(flat, size));
+
+ // Need to verify that this is a CLR assembly
+ if (!image->CheckILFormat())
+ ThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_IL);
+
+#if defined(FEATURE_HOSTED_BINDER) && !defined(FEATURE_CORECLR)
+ if(pBinderToUse != NULL && !isIntrospectionOnly)
+ {
+ ReleaseHolder<ICLRPrivAssembly> pAsm;
+ ReleaseHolder<IAssemblyName> pAssemblyName;
+ IfFailThrow(pBinderToUse->BindAssemblyExplicit(image, &pAssemblyName, &pAsm));
+ PEAssembly* pFile = nullptr;
+ IfFailThrow(GetAppDomain()->BindHostedPrivAssembly(pParentAssembly, pAsm, pAssemblyName, &pFile));
+ _ASSERTE(pFile);
+ RETURN pFile;
+ }
+#endif // FEATURE_HOSTED_BINDER && !FEATURE_CORECLR
+
+#ifdef FEATURE_FUSION
+ RETURN new PEAssembly(image, NULL, NULL, NULL, NULL, NULL, NULL, pParentAssembly, FALSE, isIntrospectionOnly);
+#else
+ CoreBindResult bindResult;
+ ReleaseHolder<ICLRPrivAssembly> assembly;
+ IfFailThrow(CCoreCLRBinderHelper::GetAssemblyFromImage(image, NULL, &assembly));
+ bindResult.Init(assembly,FALSE,FALSE);
+
+ RETURN new PEAssembly(&bindResult, NULL, pParentAssembly, FALSE, isIntrospectionOnly);
+#endif
+}
+#endif // !CROSSGEN_COMPILE
+
+#if defined(FEATURE_MIXEDMODE) && !defined(CROSSGEN_COMPILE)
+// Use for main exe loading
+// This is also used for "spontaneous" (IJW) dll loading where
+// we need to deliver DllMain callbacks, but we should eliminate this case
+
+/* static */
+PEAssembly *PEAssembly::OpenHMODULE(HMODULE hMod,
+ IAssembly *pFusionAssembly,
+ IBindResult *pNativeFusionAssembly,
+ IFusionBindLog *pFusionLog/*=NULL*/,
+ BOOL isIntrospectionOnly/*=FALSE*/)
+{
+ STANDARD_VM_CONTRACT;
+
+ PEAssembly *result = NULL;
+
+ ETWOnStartup (OpenHModule_V1, OpenHModuleEnd_V1);
+
+ EX_TRY
+ {
+ result = DoOpenHMODULE(hMod, pFusionAssembly, pNativeFusionAssembly, pFusionLog, isIntrospectionOnly);
+ }
+ EX_HOOK
+ {
+ Exception *ex = GET_EXCEPTION();
+
+ // Rethrow non-transient exceptions as file load exceptions with proper
+ // context
+ if (!ex->IsTransient())
+ EEFileLoadException::Throw(pFusionAssembly, NULL, ex->GetHR(), ex);
+ }
+ EX_END_HOOK;
+ return result;
+}
+
+// Thread stress
+class DoOpenHMODULEStress : APIThreadStress
+{
+public:
+ HMODULE hMod;
+ IAssembly *pFusionAssembly;
+ IBindResult *pNativeFusionAssembly;
+ IFusionBindLog *pFusionLog;
+ DoOpenHMODULEStress(HMODULE hMod, IAssembly *pFusionAssembly, IBindResult *pNativeFusionAssembly, IFusionBindLog *pFusionLog)
+ : hMod(hMod), pFusionAssembly(pFusionAssembly), pNativeFusionAssembly(pNativeFusionAssembly),pFusionLog(pFusionLog) {LIMITED_METHOD_CONTRACT;}
+ void Invoke()
+ {
+ WRAPPER_NO_CONTRACT;
+ PEAssemblyHolder result(PEAssembly::OpenHMODULE(hMod, pFusionAssembly,pNativeFusionAssembly, pFusionLog, FALSE));
+ }
+};
+
+/* static */
+PEAssembly *PEAssembly::DoOpenHMODULE(HMODULE hMod,
+ IAssembly *pFusionAssembly,
+ IBindResult *pNativeFusionAssembly,
+ IFusionBindLog *pFusionLog,
+ BOOL isIntrospectionOnly/*=FALSE*/)
+{
+ CONTRACT(PEAssembly *)
+ {
+ PRECONDITION(CheckPointer(hMod));
+ PRECONDITION(CheckPointer(pFusionAssembly));
+ PRECONDITION(CheckPointer(pNativeFusionAssembly,NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL));
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ DoOpenHMODULEStress ts(hMod, pFusionAssembly, pNativeFusionAssembly, pFusionLog);
+
+ PEImageHolder image(PEImage::LoadImage(hMod));
+
+ RETURN new PEAssembly(image, NULL, pFusionAssembly, pNativeFusionAssembly, NULL, pFusionLog, NULL, NULL, FALSE, isIntrospectionOnly);
+}
+#endif // FEATURE_MIXEDMODE && !CROSSGEN_COMPILE
+
+
+#ifndef FEATURE_FUSION
+PEAssembly* PEAssembly::Open(CoreBindResult* pBindResult,
+ BOOL isSystem, BOOL isIntrospectionOnly)
+{
+
+ return new PEAssembly(pBindResult,NULL,NULL,isSystem,isIntrospectionOnly);
+
+};
+#endif
+
+/* static */
+PEAssembly *PEAssembly::Create(PEAssembly *pParentAssembly,
+ IMetaDataAssemblyEmit *pAssemblyEmit,
+ BOOL bIsIntrospectionOnly)
+{
+ CONTRACT(PEAssembly *)
+ {
+ PRECONDITION(CheckPointer(pParentAssembly));
+ PRECONDITION(CheckPointer(pAssemblyEmit));
+ STANDARD_VM_CHECK;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ // Set up the metadata pointers in the PEAssembly. (This is the only identity
+ // we have.)
+ SafeComHolder<IMetaDataEmit> pEmit;
+ pAssemblyEmit->QueryInterface(IID_IMetaDataEmit, (void **)&pEmit);
+#ifdef FEATURE_FUSION
+ ReleaseHolder<ICLRPrivAssembly> pPrivAssembly;
+ if (pParentAssembly->HasHostAssembly())
+ {
+ // Dynamic assemblies in AppX use their parent's ICLRPrivAssembly as the binding context.
+ pPrivAssembly = clr::SafeAddRef(new CLRPrivBinderUtil::CLRPrivBinderAsAssemblyWrapper(
+ pParentAssembly->GetHostAssembly()));
+ }
+
+ PEAssemblyHolder pFile(new PEAssembly(
+ NULL, pEmit, NULL, NULL, NULL, NULL, NULL, pParentAssembly,
+ FALSE, bIsIntrospectionOnly,
+ pPrivAssembly));
+#else
+ PEAssemblyHolder pFile(new PEAssembly(NULL, pEmit, pParentAssembly, FALSE, bIsIntrospectionOnly));
+#endif
+ RETURN pFile.Extract();
+}
+
+
+#ifdef FEATURE_PREJIT
+
+#ifdef FEATURE_FUSION
+BOOL PEAssembly::HasEqualNativeClosure(DomainAssembly * pDomainAssembly)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pDomainAssembly));
+ }
+ CONTRACTL_END;
+ if (IsSystem())
+ return TRUE;
+ HRESULT hr = S_OK;
+
+
+ if (m_pNativeImageClosure == NULL)
+ return FALSE;
+
+ // ensure theclosures are walked
+ IAssemblyBindingClosure * pClosure = pDomainAssembly->GetAssemblyBindingClosure(LEVEL_COMPLETE);
+ _ASSERTE(pClosure != NULL);
+
+ if (m_pNativeImageClosure->HasBeenWalked(LEVEL_COMPLETE) != S_OK )
+ {
+ GCX_COOP();
+
+ ENTER_DOMAIN_PTR(SystemDomain::System()->DefaultDomain(),ADV_DEFAULTAD);
+ {
+ GCX_PREEMP();
+ IfFailThrow(m_pNativeImageClosure->EnsureWalked(GetFusionAssembly(),GetAppDomain()->GetFusionContext(),LEVEL_COMPLETE));
+ }
+ END_DOMAIN_TRANSITION;
+ }
+
+
+ hr = pClosure->IsEqual(m_pNativeImageClosure);
+ IfFailThrow(hr);
+ return (hr == S_OK);
+}
+#endif //FEATURE_FUSION
+
+#ifdef FEATURE_FUSION
+void PEAssembly::SetNativeImage(IBindResult *pNativeFusionAssembly)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+
+ StackSString path;
+ WCHAR pwzPath[MAX_PATH];
+ DWORD dwCCPath = MAX_PATH;
+ ReleaseHolder<IAssemblyLocation> pIAssemblyLocation;
+
+ IfFailThrow(pNativeFusionAssembly->GetAssemblyLocation(&pIAssemblyLocation));
+ IfFailThrow(pIAssemblyLocation->GetPath(pwzPath, &dwCCPath));
+ path.Set(pwzPath);
+
+ PEImageHolder image(PEImage::OpenImage(path));
+ image->Load();
+
+ // For desktop dev11, this verification is now done at native binding time.
+ _ASSERTE(CheckNativeImageVersion(image));
+
+ PEFile::SetNativeImage(image);
+ IfFailThrow(pNativeFusionAssembly->GetAssemblyLocation(&m_pNativeAssemblyLocation));
+}
+#else //FEATURE_FUSION
+void PEAssembly::SetNativeImage(PEImage * image)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+
+ image->Load();
+
+ if (CheckNativeImageVersion(image))
+ {
+ PEFile::SetNativeImage(image);
+#if 0
+ //Enable this code if you want to make sure we never touch the flat layout in the presence of the
+ //ngen image.
+//#if defined(_DEBUG)
+ //find all the layouts in the il image and make sure we never touch them.
+ unsigned ignored = 0;
+ PTR_PEImageLayout layout = m_ILimage->GetLayout(PEImageLayout::LAYOUT_FLAT, 0);
+ if (layout != NULL)
+ {
+ //cache a bunch of PE metadata in the PEDecoder
+ m_ILimage->CheckILFormat();
+
+ //we also need some of metadata (for the public key), so cache this too
+ DWORD verifyOutputFlags;
+ m_ILimage->VerifyStrongName(&verifyOutputFlags);
+ //fudge this by a few pages to make sure we can still mess with the PE headers
+ const size_t fudgeSize = 4096 * 4;
+ ClrVirtualProtect((void*)(((char *)layout->GetBase()) + fudgeSize),
+ layout->GetSize() - fudgeSize, 0, &ignored);
+ layout->Release();
+ }
+#endif
+ }
+ else
+ {
+ ExternalLog(LL_WARNING, "Native image is not correct version.");
+ }
+}
+#endif //FEATURE_FUSION
+
+#ifdef FEATURE_FUSION
+void PEAssembly::ClearNativeImage()
+{
+ CONTRACT_VOID
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(HasNativeImage());
+ POSTCONDITION(!HasNativeImage());
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ PEFile::ClearNativeImage();
+
+ if (m_pNativeAssemblyLocation != NULL)
+ m_pNativeAssemblyLocation->Release();
+ m_pNativeAssemblyLocation = NULL;
+ if (m_pNativeImageClosure != NULL)
+ m_pNativeImageClosure->Release();
+ m_pNativeImageClosure = NULL;
+ RETURN;
+}
+#endif //FEATURE_FUSION
+#endif // FEATURE_PREJIT
+
+
+#ifdef FEATURE_FUSION
+BOOL PEAssembly::IsBindingCodeBase()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (m_pIHostAssembly != NULL)
+ return FALSE;
+
+ if (m_pFusionAssembly == NULL)
+ return (!GetPath().IsEmpty());
+
+ if (m_dwLocationFlags == ASMLOC_UNKNOWN)
+ return FALSE;
+
+ return ((m_dwLocationFlags & ASMLOC_CODEBASE_HINT) != 0);
+}
+
+BOOL PEAssembly::IsSourceGAC()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if ((m_pIHostAssembly != NULL) || (m_pFusionAssembly == NULL))
+ {
+ return FALSE;
+ }
+
+ return ((m_dwLocationFlags & ASMLOC_LOCATION_MASK) == ASMLOC_GAC);
+}
+
+BOOL PEAssembly::IsSourceDownloadCache()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if ((m_pIHostAssembly != NULL) || (m_pFusionAssembly == NULL))
+ {
+ return FALSE;
+ }
+
+ return ((m_dwLocationFlags & ASMLOC_LOCATION_MASK) == ASMLOC_DOWNLOAD_CACHE);
+}
+
+#else // FEATURE_FUSION
+BOOL PEAssembly::IsSourceGAC()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_bIsFromGAC;
+};
+
+#endif // FEATURE_FUSION
+
+#endif // #ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_FUSION
+BOOL PEAssembly::IsContextLoad()
+{
+ LIMITED_METHOD_CONTRACT;
+ if ((m_pIHostAssembly != NULL) || (m_pFusionAssembly == NULL))
+ {
+ return FALSE;
+ }
+ return (IsSystem() || (m_loadContext == LOADCTX_TYPE_DEFAULT));
+}
+
+LOADCTX_TYPE PEAssembly::GetLoadContext()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_loadContext;
+}
+
+DWORD PEAssembly::GetLocationFlags()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_dwLocationFlags;
+}
+
+#endif
+
+
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_FUSION
+PEKIND PEAssembly::GetFusionProcessorArchitecture()
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ PEImage * pImage = NULL;
+
+#ifdef FEATURE_PREJIT
+ pImage = m_nativeImage;
+#endif
+
+ if (pImage == NULL)
+ pImage = GetILimage();
+
+ return pImage->GetFusionProcessorArchitecture();
+}
+
+IAssemblyName * PEAssembly::GetFusionAssemblyName()
+{
+ CONTRACT(IAssemblyName *)
+ {
+ INSTANCE_CHECK;
+ POSTCONDITION(CheckPointer(RETVAL));
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ if (m_pFusionAssemblyName == NULL)
+ {
+ AssemblySpec spec;
+ spec.InitializeSpec(this);
+ PEImage * pImage = GetILimage();
+
+#ifdef FEATURE_PREJIT
+ if ((pImage != NULL) && !pImage->MDImportLoaded())
+ pImage = m_nativeImage;
+#endif
+
+ if (pImage != NULL)
+ {
+ spec.SetPEKIND(pImage->GetFusionProcessorArchitecture());
+ }
+
+ GCX_PREEMP();
+
+ IfFailThrow(spec.CreateFusionName(&m_pFusionAssemblyName, FALSE));
+ }
+
+ RETURN m_pFusionAssemblyName;
+}
+
+// This version of GetFusionAssemlyName that can be used to return the reference in a
+// NOTHROW/NOTRIGGER fashion. This is useful for scenarios where you dont want to invoke the THROWS/GCTRIGGERS
+// version when you know the name would have been created and is available.
+IAssemblyName * PEAssembly::GetFusionAssemblyNameNoCreate()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pFusionAssemblyName;
+}
+
+IAssembly *PEAssembly::GetFusionAssembly()
+{
+ CONTRACT(IAssembly *)
+ {
+ INSTANCE_CHECK;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+ RETURN m_pFusionAssembly;
+}
+
+IHostAssembly *PEAssembly::GetIHostAssembly()
+{
+ CONTRACT(IHostAssembly *)
+ {
+ INSTANCE_CHECK;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+ RETURN m_pIHostAssembly;
+}
+
+IAssemblyLocation *PEAssembly::GetNativeAssemblyLocation()
+{
+ CONTRACT(IAssemblyLocation *)
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(HasNativeImage());
+ POSTCONDITION(CheckPointer(RETVAL));
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+ RETURN m_pNativeAssemblyLocation;
+}
+#endif // FEATURE_FUSION
+
+// ------------------------------------------------------------
+// Hash support
+// ------------------------------------------------------------
+
+void PEAssembly::VerifyStrongName()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // If we've already done the signature checks, we don't need to do them again.
+ if (m_fStrongNameVerified)
+ {
+ return;
+ }
+
+#ifdef FEATURE_FUSION
+ // System and dynamic assemblies don't need hash checks
+ if (IsSystem() || IsDynamic())
+#else
+ // Without FUSION/GAC, we need to verify SN on all assemblies, except dynamic assemblies.
+ if (IsDynamic())
+#endif
+ {
+
+ m_flags |= PEFILE_SKIP_MODULE_HASH_CHECKS;
+ m_fStrongNameVerified = TRUE;
+ return;
+ }
+
+ // Next, verify the strong name, if necessary
+#ifdef FEATURE_FUSION
+ // See if the assembly comes from a secure location
+ IAssembly *pFusionAssembly = GetAssembly()->GetFusionAssembly();
+ if (pFusionAssembly)
+ {
+ DWORD dwLocation;
+ IfFailThrow(pFusionAssembly->GetAssemblyLocation(&dwLocation));
+
+ switch (dwLocation & ASMLOC_LOCATION_MASK)
+ {
+ case ASMLOC_GAC:
+ case ASMLOC_DOWNLOAD_CACHE:
+ case ASMLOC_DEV_OVERRIDE:
+ // Assemblies from the GAC or download cache have
+ // already been verified by Fusion.
+ m_flags |= PEFILE_SKIP_MODULE_HASH_CHECKS;
+ m_fStrongNameVerified = TRUE;
+ return;
+
+ case ASMLOC_RUN_FROM_SOURCE:
+ case ASMLOC_UNKNOWN:
+ // For now, just verify these every time, we need to
+ // cache the fact that at least one verification has
+ // been performed (if strong name policy permits
+ // caching of verification results)
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+#endif
+
+ // Check format of image. Note we must delay this until after the GAC status has been
+ // checked, to handle the case where we are not loading m_image.
+ EnsureImageOpened();
+
+#if !defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE)
+ if (IsWindowsRuntime())
+ {
+ // Winmd files are always loaded in full trust.
+ m_flags |= PEFILE_SKIP_MODULE_HASH_CHECKS;
+ m_fStrongNameVerified = TRUE;
+ return;
+ }
+#endif
+
+#if defined(FEATURE_CORECLR) || defined(CROSSGEN_COMPILE)
+ if (m_nativeImage == NULL && !GetILimage()->IsTrustedNativeImage())
+#else
+ if (!GetILimage()->IsTrustedNativeImage())
+#endif
+ {
+ if (!GetILimage()->CheckILFormat())
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+
+#if defined(CROSSGEN_COMPILE) && !defined(FEATURE_CORECLR)
+ // Do not validate strong name signature during CrossGen. This is necessary
+ // to make build-lab scenarios to work.
+ if (IsCompilationProcess())
+ {
+ m_flags |= PEFILE_SKIP_MODULE_HASH_CHECKS;
+ }
+ else
+#endif
+ // Check the strong name if present.
+ if (IsIntrospectionOnly())
+ {
+ // For introspection assemblies, we don't need to check strong names and we don't
+ // need to do module hash checks.
+ m_flags |= PEFILE_SKIP_MODULE_HASH_CHECKS;
+ }
+#if !defined(FEATURE_CORECLR)
+ //We do this to early out for WinMD files that are unsigned but have NI images as well.
+ else if (!HasStrongNameSignature())
+ {
+#ifdef FEATURE_CAS_POLICY
+ // We only check module hashes if there is a strong name or Authenticode signature
+ if (m_certificate == NULL)
+ {
+ m_flags |= PEFILE_SKIP_MODULE_HASH_CHECKS;
+ }
+#endif
+ }
+#endif // !defined(FEATURE_CORECLR)
+ else
+ {
+#if defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE)
+ // Runtime policy on CoreCLR is to skip verification of ALL assemblies
+ m_flags |= PEFILE_SKIP_MODULE_HASH_CHECKS;
+ m_fStrongNameVerified = TRUE;
+#else
+
+#ifdef FEATURE_CORECLR
+ BOOL skip = FALSE;
+
+ // Skip verification for assemblies from the trusted path
+ if (IsSystem() || m_bIsOnTpaList)
+ skip = TRUE;
+
+#ifdef FEATURE_LEGACYNETCF
+ // crossgen should skip verification for Mango
+ if (RuntimeIsLegacyNetCF(0))
+ skip = TRUE;
+#endif
+
+ if (skip)
+ {
+ m_flags |= PEFILE_SKIP_MODULE_HASH_CHECKS;
+ m_fStrongNameVerified = TRUE;
+ return;
+ }
+#endif // FEATURE_CORECLR
+
+ DWORD verifyOutputFlags = 0;
+ HRESULT hr = GetILimage()->VerifyStrongName(&verifyOutputFlags);
+
+ if (SUCCEEDED(hr))
+ {
+ // Strong name verified or delay sign OK'ed.
+ // We will skip verification of modules in the delay signed case.
+
+ if ((verifyOutputFlags & SN_OUTFLAG_WAS_VERIFIED) == 0)
+ m_flags |= PEFILE_SKIP_MODULE_HASH_CHECKS;
+ }
+ else
+ {
+ // Strong name missing or error. Throw in the latter case.
+ if (hr != CORSEC_E_MISSING_STRONGNAME)
+ ThrowHR(hr);
+
+#ifdef FEATURE_CAS_POLICY
+ // Since we are not strong named, don't check module hashes.
+ // (Unless we have a security certificate, in which case check anyway.)
+
+ if (m_certificate == NULL)
+ m_flags |= PEFILE_SKIP_MODULE_HASH_CHECKS;
+#endif
+ }
+
+#endif // FEATURE_CORECLR && !CROSSGEN_COMPILE
+ }
+
+ m_fStrongNameVerified = TRUE;
+}
+
+#ifdef FEATURE_CORECLR
+BOOL PEAssembly::IsProfileAssembly()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ //
+ // For now, cache the result of the check below. This cache should be removed once/if the check below
+ // becomes cheap (e.g. does not access metadata anymore).
+ //
+ if (VolatileLoadWithoutBarrier(&m_fProfileAssembly) != 0)
+ {
+ return m_fProfileAssembly > 0;
+ }
+
+ //
+ // In order to be a platform (profile) assembly, you must be from a trusted location (TPA list)
+ // If we are binding by TPA list and this assembly is on it, IsSourceGAC is true => Assembly is Profile
+ // If the assembly is a WinMD, it is automatically trusted since all WinMD scenarios are full trust scenarios.
+ //
+ // The check for Silverlight strongname platform assemblies is legacy backdoor. It was introduced by accidental abstraction leak
+ // from the old Silverlight binder, people took advantage of it and we cannot easily get rid of it now. See DevDiv #710462.
+ //
+ BOOL bProfileAssembly = IsSourceGAC() && (IsSystem() || m_bIsOnTpaList);
+ if(!AppX::IsAppXProcess())
+ {
+ bProfileAssembly |= IsSourceGAC() && IsSilverlightPlatformStrongNameSignature();
+ }
+
+ m_fProfileAssembly = bProfileAssembly ? 1 : -1;
+ return bProfileAssembly;
+}
+
+BOOL PEAssembly::IsSilverlightPlatformStrongNameSignature()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (IsDynamic())
+ return FALSE;
+
+ DWORD cbPublicKey;
+ const BYTE *pbPublicKey = static_cast<const BYTE *>(GetPublicKey(&cbPublicKey));
+ if (pbPublicKey == nullptr)
+ {
+ return false;
+ }
+
+ if (StrongNameIsSilverlightPlatformKey(pbPublicKey, cbPublicKey))
+ return true;
+
+#ifdef FEATURE_STRONGNAME_TESTKEY_ALLOWED
+ if (StrongNameIsTestKey(pbPublicKey, cbPublicKey))
+ return true;
+#endif
+
+ return false;
+}
+
+#ifdef FEATURE_STRONGNAME_TESTKEY_ALLOWED
+BOOL PEAssembly::IsProfileTestAssembly()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return IsSourceGAC() && IsTestKeySignature();
+}
+
+BOOL PEAssembly::IsTestKeySignature()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (IsDynamic())
+ return FALSE;
+
+ DWORD cbPublicKey;
+ const BYTE *pbPublicKey = static_cast<const BYTE *>(GetPublicKey(&cbPublicKey));
+ if (pbPublicKey == nullptr)
+ {
+ return false;
+ }
+
+ return StrongNameIsTestKey(pbPublicKey, cbPublicKey);
+}
+#endif // FEATURE_STRONGNAME_TESTKEY_ALLOWED
+
+#endif // FEATURE_CORECLR
+
+// ------------------------------------------------------------
+// Descriptive strings
+// ------------------------------------------------------------
+
+// Effective path is the path of nearest parent (creator) assembly which has a nonempty path.
+
+const SString &PEAssembly::GetEffectivePath()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PEAssembly *pAssembly = this;
+
+ while (pAssembly->m_identity == NULL
+ || pAssembly->m_identity->GetPath().IsEmpty())
+ {
+ if (pAssembly->m_creator)
+ pAssembly = pAssembly->m_creator->GetAssembly();
+ else // Unmanaged exe which loads byte[]/IStream assemblies
+ return SString::Empty();
+ }
+
+ return pAssembly->m_identity->GetPath();
+}
+
+
+// Codebase is the fusion codebase or path for the assembly. It is in URL format.
+// Note this may be obtained from the parent PEFile if we don't have a path or fusion
+// assembly.
+//
+// fCopiedName means to get the "shadow copied" path rather than the original path, if applicable
+void PEAssembly::GetCodeBase(SString &result, BOOL fCopiedName/*=FALSE*/)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+#ifdef FEATURE_FUSION
+ // For a copied name, we always use the actual file path rather than the fusion info
+ if (!fCopiedName && m_pFusionAssembly)
+ {
+ if ( ((m_dwLocationFlags & ASMLOC_LOCATION_MASK) == ASMLOC_RUN_FROM_SOURCE) ||
+ ((m_dwLocationFlags & ASMLOC_LOCATION_MASK) == ASMLOC_DOWNLOAD_CACHE) )
+ {
+ // Assemblies in the download cache or run from source should have
+ // a proper codebase set in them.
+ FusionBind::GetAssemblyNameStringProperty(GetFusionAssemblyName(),
+ ASM_NAME_CODEBASE_URL,
+ result);
+ return;
+ }
+ }
+ else if (m_pIHostAssembly)
+ {
+ FusionBind::GetAssemblyNameStringProperty(GetFusionAssemblyName(),
+ ASM_NAME_CODEBASE_URL,
+ result);
+ return;
+ }
+#endif
+
+ // All other cases use the file path.
+ result.Set(GetEffectivePath());
+ if (!result.IsEmpty())
+ PathToUrl(result);
+}
+
+/* static */
+void PEAssembly::PathToUrl(SString &string)
+{
+ CONTRACTL
+ {
+ PRECONDITION(PEImage::CheckCanonicalFullPath(string));
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ SString::Iterator i = string.Begin();
+
+#if !defined(PLATFORM_UNIX)
+ if (i[0] == W('\\'))
+ {
+ // Network path
+ string.Insert(i, SL("file://"));
+ string.Skip(i, SL("file://"));
+ }
+ else
+ {
+ // Disk path
+ string.Insert(i, SL("file:///"));
+ string.Skip(i, SL("file:///"));
+ }
+#else
+ // Unix doesn't have a distinction between a network or a local path
+ _ASSERTE( i[0] == W('\\') || i[0] == W('/'));
+ SString sss(SString::Literal, W("file://"));
+ string.Insert(i, sss);
+ string.Skip(i, sss);
+#endif
+
+ while (string.Find(i, W('\\')))
+ {
+ string.Replace(i, W('/'));
+ }
+}
+
+void PEAssembly::UrlToPath(SString &string)
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACT_END;
+
+ SString::Iterator i = string.Begin();
+
+ SString sss2(SString::Literal, W("file://"));
+#if !defined(PLATFORM_UNIX)
+ SString sss3(SString::Literal, W("file:///"));
+ if (string.MatchCaseInsensitive(i, sss3))
+ string.Delete(i, 8);
+ else
+#endif
+ if (string.MatchCaseInsensitive(i, sss2))
+ string.Delete(i, 7);
+
+ while (string.Find(i, W('/')))
+ {
+ string.Replace(i, W('\\'));
+ }
+
+ RETURN;
+}
+
+BOOL PEAssembly::FindLastPathSeparator(const SString &path, SString::Iterator &i)
+{
+#ifdef PLATFORM_UNIX
+ SString::Iterator slash = i;
+ SString::Iterator backSlash = i;
+ BOOL foundSlash = path.FindBack(slash, '/');
+ BOOL foundBackSlash = path.FindBack(backSlash, '\\');
+ if (!foundSlash && !foundBackSlash)
+ return FALSE;
+ else if (foundSlash && !foundBackSlash)
+ i = slash;
+ else if (!foundSlash && foundBackSlash)
+ i = backSlash;
+ else
+ i = (backSlash > slash) ? backSlash : slash;
+ return TRUE;
+#else
+ return path.FindBack(i, '\\');
+#endif //PLATFORM_UNIX
+}
+
+
+// ------------------------------------------------------------
+// Logging
+// ------------------------------------------------------------
+#ifdef FEATURE_PREJIT
+void PEAssembly::ExternalVLog(DWORD facility, DWORD level, const WCHAR *fmt, va_list args)
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACT_END;
+
+ PEFile::ExternalVLog(facility, level, fmt, args);
+
+#ifdef FEATURE_FUSION
+ if (FusionLoggingEnabled())
+ {
+ DWORD dwLogCategory = (facility == LF_ZAP ? FUSION_BIND_LOG_CATEGORY_NGEN : FUSION_BIND_LOG_CATEGORY_DEFAULT);
+
+ StackSString message;
+ message.VPrintf(fmt, args);
+ m_pFusionLog->LogMessage(0, dwLogCategory, message);
+
+ if (level == LL_ERROR) {
+ m_pFusionLog->SetResultCode(dwLogCategory, E_FAIL);
+ FlushExternalLog();
+ }
+ }
+#endif //FEATURE_FUSION
+
+ RETURN;
+}
+
+void PEAssembly::FlushExternalLog()
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACT_END;
+
+#ifdef FEATURE_FUSION
+ if (FusionLoggingEnabled()) {
+ m_pFusionLog->Flush(g_dwLogLevel, FUSION_BIND_LOG_CATEGORY_NGEN);
+ m_pFusionLog->Flush(g_dwLogLevel, FUSION_BIND_LOG_CATEGORY_DEFAULT);
+ }
+#endif //FEATURE_FUSION
+
+ RETURN;
+}
+#endif //FEATURE_PREJIT
+// ------------------------------------------------------------
+// Metadata access
+// ------------------------------------------------------------
+
+HRESULT PEFile::GetVersion(USHORT *pMajor, USHORT *pMinor, USHORT *pBuild, USHORT *pRevision)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pMajor, NULL_OK));
+ PRECONDITION(CheckPointer(pMinor, NULL_OK));
+ PRECONDITION(CheckPointer(pBuild, NULL_OK));
+ PRECONDITION(CheckPointer(pRevision, NULL_OK));
+ NOTHROW;
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ AssemblyMetaDataInternal md;
+ HRESULT hr = S_OK;;
+ if (m_bHasPersistentMDImport)
+ {
+ _ASSERTE(GetPersistentMDImport()->IsValidToken(TokenFromRid(1, mdtAssembly)));
+ IfFailRet(GetPersistentMDImport()->GetAssemblyProps(TokenFromRid(1, mdtAssembly), NULL, NULL, NULL, NULL, &md, NULL));
+ }
+ else
+ {
+ ReleaseHolder<IMDInternalImport> pImport(GetMDImportWithRef());
+ _ASSERTE(pImport->IsValidToken(TokenFromRid(1, mdtAssembly)));
+ IfFailRet(pImport->GetAssemblyProps(TokenFromRid(1, mdtAssembly), NULL, NULL, NULL, NULL, &md, NULL));
+ }
+
+ if (pMajor != NULL)
+ *pMajor = md.usMajorVersion;
+ if (pMinor != NULL)
+ *pMinor = md.usMinorVersion;
+ if (pBuild != NULL)
+ *pBuild = md.usBuildNumber;
+ if (pRevision != NULL)
+ *pRevision = md.usRevisionNumber;
+
+ return hr;
+}
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+// ================================================================================
+// PEModule class - a PEFile which represents a satellite module
+// ================================================================================
+
+PEModule::PEModule(PEImage *image, PEAssembly *assembly, mdFile token, IMetaDataEmit *pEmit)
+ : PEFile(image),
+ m_assembly(NULL),
+ m_token(token),
+ m_bIsResource(-1)
+{
+ CONTRACTL
+ {
+ PRECONDITION(CheckPointer(image, NULL_OK));
+ PRECONDITION(CheckPointer(assembly));
+ PRECONDITION(!IsNilToken(token));
+ PRECONDITION(CheckPointer(pEmit, NULL_OK));
+ PRECONDITION(image != NULL || pEmit != NULL);
+ CONSTRUCTOR_CHECK;
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+
+ DWORD flags;
+
+ // get only the data which is required, here - flags
+ // this helps avoid unnecessary memory touches
+ IfFailThrow(assembly->GetPersistentMDImport()->GetFileProps(token, NULL, NULL, NULL, &flags));
+
+ if (image != NULL)
+ {
+ if (IsFfContainsMetaData(flags) && !image->CheckILFormat())
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+
+ if (assembly->IsIStream())
+ {
+ m_flags |= PEFILE_ISTREAM;
+#ifdef FEATURE_PREJIT
+ m_fCanUseNativeImage = FALSE;
+#endif
+ }
+ }
+
+ assembly->AddRef();
+
+ m_assembly = assembly;
+
+ m_flags |= PEFILE_MODULE;
+ if (assembly->IsSystem())
+ {
+ m_flags |= PEFILE_SYSTEM;
+ }
+ else
+ {
+ if (assembly->IsIntrospectionOnly())
+ {
+ m_flags |= PEFILE_INTROSPECTIONONLY;
+#ifdef FEATURE_PREJIT
+ SetCannotUseNativeImage();
+#endif
+ }
+ }
+
+
+ // Verify module format. Note that some things have already happened:
+ // - Fusion has verified the name matches the metadata
+ // - PEimage has performed PE file format validation
+
+ if (assembly->NeedsModuleHashChecks())
+ {
+ ULONG size;
+ const void *hash;
+ IfFailThrow(assembly->GetPersistentMDImport()->GetFileProps(token, NULL, &hash, &size, NULL));
+
+ if (!CheckHash(assembly->GetHashAlgId(), hash, size))
+ ThrowHR(COR_E_MODULE_HASH_CHECK_FAILED);
+ }
+
+#if defined(FEATURE_PREJIT) && !defined(CROSSGEN_COMPILE)
+ // Find the native image
+ if (IsFfContainsMetaData(flags)
+ && m_fCanUseNativeImage
+ && assembly->HasNativeImage()
+ && assembly->GetFusionAssembly() != NULL)
+ {
+ IAssemblyLocation *pIAssemblyLocation = assembly->GetNativeAssemblyLocation();
+
+ WCHAR wzPath[MAX_PATH];
+ WCHAR *pwzTemp = NULL;
+ DWORD dwCCPath = MAX_PATH;
+ SString path;
+ SString moduleName(SString::Utf8, GetSimpleName());
+
+ // Compute the module path from the manifest module path
+ IfFailThrow(pIAssemblyLocation->GetPath(wzPath, &dwCCPath));
+ pwzTemp = PathFindFileName(wzPath);
+ *pwzTemp = (WCHAR) 0x00;
+
+ // <TODO>@todo: GetAppDomain????</TODO>
+ path.Set(wzPath);
+ path.Append((LPCWSTR) moduleName);
+
+ SetNativeImage(path);
+ }
+#endif // FEATURE_PREJIT && !CROSSGEN_COMPILE
+
+#if _DEBUG
+ GetCodeBaseOrName(m_debugName);
+ m_pDebugName = m_debugName;
+#endif
+
+ if (IsFfContainsMetaData(flags))
+ {
+ if (image != NULL)
+ {
+ OpenMDImport_Unsafe(); //constructor. cannot race with anything
+ }
+ else
+ {
+ _ASSERTE(!m_bHasPersistentMDImport);
+ IfFailThrow(GetMetaDataInternalInterfaceFromPublic(pEmit, IID_IMDInternalImport,
+ (void **)&m_pMDImport));
+ m_pEmitter = pEmit;
+ pEmit->AddRef();
+ m_bHasPersistentMDImport=TRUE;
+ m_MDImportIsRW_Debugger_Use_Only = TRUE;
+ }
+
+ // Fusion probably checks this, but we need to check this ourselves if
+ // this file didn't come from Fusion
+ if (!m_pMDImport->IsValidToken(m_pMDImport->GetModuleFromScope()))
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ else
+ {
+ // Go ahead and "load" image since it is essentially a noop, but will enable
+ // more operations on the module earlier in the loading process.
+ LoadLibrary();
+ }
+#ifdef FEATURE_PREJIT
+ if (IsResource() || IsDynamic())
+ m_fCanUseNativeImage = FALSE;
+#endif
+}
+
+PEModule::~PEModule()
+{
+ CONTRACTL
+ {
+ DESTRUCTOR_CHECK;
+ NOTHROW;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_assembly->Release();
+}
+
+/* static */
+PEModule *PEModule::Open(PEAssembly *assembly, mdFile token,
+ const SString &fileName)
+{
+ STANDARD_VM_CONTRACT;
+
+ PEModule *result = NULL;
+
+ EX_TRY
+ {
+ result = DoOpen(assembly, token, fileName);
+ }
+ EX_HOOK
+ {
+ Exception *ex = GET_EXCEPTION();
+
+ // Rethrow non-transient exceptions as file load exceptions with proper
+ // context
+
+ if (!ex->IsTransient())
+ EEFileLoadException::Throw(fileName, ex->GetHR(), ex);
+ }
+ EX_END_HOOK;
+
+ return result;
+}
+// Thread stress
+class DoOpenPathStress : APIThreadStress
+{
+public:
+ PEAssembly *assembly;
+ mdFile token;
+ const SString &fileName;
+ DoOpenPathStress(PEAssembly *assembly, mdFile token,
+ const SString &fileName)
+ : assembly(assembly), token(token), fileName(fileName)
+ {
+ WRAPPER_NO_CONTRACT;
+ fileName.Normalize();
+ }
+ void Invoke()
+ {
+ WRAPPER_NO_CONTRACT;
+ PEModuleHolder result(PEModule::Open(assembly, token, fileName));
+ }
+};
+
+/* static */
+PEModule *PEModule::DoOpen(PEAssembly *assembly, mdFile token,
+ const SString &fileName)
+{
+ CONTRACT(PEModule *)
+ {
+ PRECONDITION(CheckPointer(assembly));
+ PRECONDITION(CheckValue(fileName));
+ PRECONDITION(!IsNilToken(token));
+ PRECONDITION(!fileName.IsEmpty());
+ POSTCONDITION(CheckPointer(RETVAL));
+ STANDARD_VM_CHECK;
+ }
+ CONTRACT_END;
+
+ DoOpenPathStress ts(assembly, token, fileName);
+
+ // If this is a resource module, we must explicitly request a flat mapping
+ DWORD flags;
+ IfFailThrow(assembly->GetPersistentMDImport()->GetFileProps(token, NULL, NULL, NULL, &flags));
+
+ PEImageHolder image;
+#ifdef FEATURE_FUSION
+ if (assembly->IsIStream())
+ {
+ SafeComHolder<IHostAssemblyModuleImport> pModuleImport;
+ IfFailThrow(assembly->GetIHostAssembly()->GetModuleByName(fileName, &pModuleImport));
+
+ SafeComHolder<IStream> pIStream;
+ IfFailThrow(pModuleImport->GetModuleStream(&pIStream));
+
+ DWORD dwModuleId;
+ IfFailThrow(pModuleImport->GetModuleId(&dwModuleId));
+ image = PEImage::OpenImage(pIStream, assembly->m_identity->m_StreamAsmId,
+ dwModuleId, (flags & ffContainsNoMetaData));
+ }
+ else
+#endif
+ {
+ image = PEImage::OpenImage(fileName);
+ }
+
+ if (flags & ffContainsNoMetaData)
+ image->LoadNoMetaData(assembly->IsIntrospectionOnly());
+
+ PEModuleHolder module(new PEModule(image, assembly, token, NULL));
+
+ RETURN module.Extract();
+}
+
+/* static */
+PEModule *PEModule::OpenMemory(PEAssembly *assembly, mdFile token,
+ const void *flat, COUNT_T size)
+{
+ STANDARD_VM_CONTRACT;
+
+ PEModule *result = NULL;
+
+ EX_TRY
+ {
+ result = DoOpenMemory(assembly, token, flat, size);
+ }
+ EX_HOOK
+ {
+ Exception *ex = GET_EXCEPTION();
+
+ // Rethrow non-transient exceptions as file load exceptions with proper
+ // context
+ if (!ex->IsTransient())
+ EEFileLoadException::Throw(assembly, flat, size, ex->GetHR(), ex);
+ }
+ EX_END_HOOK;
+ return result;
+}
+
+// Thread stress
+class DoOpenTokenStress : APIThreadStress
+{
+public:
+ PEAssembly *assembly;
+ mdFile token;
+ const void *flat;
+ COUNT_T size;
+ DoOpenTokenStress(PEAssembly *assembly, mdFile token,
+ const void *flat, COUNT_T size)
+ : assembly(assembly), token(token), flat(flat), size(size) {LIMITED_METHOD_CONTRACT;}
+ void Invoke()
+ {
+ WRAPPER_NO_CONTRACT;
+ PEModuleHolder result(PEModule::OpenMemory(assembly, token, flat, size));
+ }
+};
+
+// REVIEW: do we need to know the creator module which emitted the module (separately
+// from the assembly parent) for security reasons?
+/* static */
+PEModule *PEModule::DoOpenMemory(PEAssembly *assembly, mdFile token,
+ const void *flat, COUNT_T size)
+{
+ CONTRACT(PEModule *)
+ {
+ PRECONDITION(CheckPointer(assembly));
+ PRECONDITION(!IsNilToken(token));
+ PRECONDITION(CheckPointer(flat));
+ POSTCONDITION(CheckPointer(RETVAL));
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ DoOpenTokenStress ts(assembly, token, flat, size);
+
+ PEImageHolder image(PEImage::LoadFlat(flat, size));
+
+ RETURN new PEModule(image, assembly, token, NULL);
+}
+
+/* static */
+PEModule *PEModule::Create(PEAssembly *assembly, mdFile token, IMetaDataEmit *pEmit)
+{
+ CONTRACT(PEModule *)
+ {
+ PRECONDITION(CheckPointer(assembly));
+ PRECONDITION(!IsNilToken(token));
+ STANDARD_VM_CHECK;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN new PEModule(NULL, assembly, token, pEmit);
+}
+
+// ------------------------------------------------------------
+// Logging
+// ------------------------------------------------------------
+#ifdef FEATURE_PREJIT
+void PEModule::ExternalVLog(DWORD facility, DWORD level, const WCHAR *fmt, va_list args)
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACT_END;
+
+ m_assembly->ExternalVLog(facility, level, fmt, args);
+
+ RETURN;
+}
+
+void PEModule::FlushExternalLog()
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACT_END;
+
+ m_assembly->FlushExternalLog();
+
+ RETURN;
+}
+
+// ------------------------------------------------------------
+// Loader support routines
+// ------------------------------------------------------------
+void PEModule::SetNativeImage(const SString &fullPath)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckValue(fullPath));
+ PRECONDITION(!fullPath.IsEmpty());
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+
+ PEImageHolder image(PEImage::OpenImage(fullPath));
+ image->Load();
+
+ PEFile::SetNativeImage(image);
+}
+#endif // FEATURE_PREJIT
+
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+
+
+void PEFile::EnsureImageOpened()
+{
+ WRAPPER_NO_CONTRACT;
+ if (IsDynamic())
+ return;
+#ifdef FEATURE_PREJIT
+ if(HasNativeImage())
+ m_nativeImage->GetLayout(PEImageLayout::LAYOUT_ANY,PEImage::LAYOUT_CREATEIFNEEDED)->Release();
+ else
+#endif
+ GetILimage()->GetLayout(PEImageLayout::LAYOUT_ANY,PEImage::LAYOUT_CREATEIFNEEDED)->Release();
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+
+void
+PEFile::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ // sizeof(PEFile) == 0xb8
+ DAC_ENUM_VTHIS();
+ EMEM_OUT(("MEM: %p PEFile\n", dac_cast<TADDR>(this)));
+
+#ifdef _DEBUG
+ // Not a big deal if it's NULL or fails.
+ m_debugName.EnumMemoryRegions(flags);
+#endif
+
+ if (m_identity.IsValid())
+ {
+ m_identity->EnumMemoryRegions(flags);
+ }
+ if (GetILimage().IsValid())
+ {
+ GetILimage()->EnumMemoryRegions(flags);
+ }
+#ifdef FEATURE_PREJIT
+ if (m_nativeImage.IsValid())
+ {
+ m_nativeImage->EnumMemoryRegions(flags);
+ DacEnumHostDPtrMem(m_nativeImage->GetLoadedLayout()->GetNativeVersionInfo());
+ }
+#endif
+}
+
+void
+PEAssembly::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ WRAPPER_NO_CONTRACT;
+
+ PEFile::EnumMemoryRegions(flags);
+
+ if (m_creator.IsValid())
+ {
+ m_creator->EnumMemoryRegions(flags);
+ }
+}
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+void
+PEModule::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ WRAPPER_NO_CONTRACT;
+
+ PEFile::EnumMemoryRegions(flags);
+
+ if (m_assembly.IsValid())
+ {
+ m_assembly->EnumMemoryRegions(flags);
+ }
+}
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+#endif // #ifdef DACCESS_COMPILE
+
+
+//-------------------------------------------------------------------------------
+// Make best-case effort to obtain an image name for use in an error message.
+//
+// This routine must expect to be called before the this object is fully loaded.
+// It can return an empty if the name isn't available or the object isn't initialized
+// enough to get a name, but it mustn't crash.
+//-------------------------------------------------------------------------------
+LPCWSTR PEFile::GetPathForErrorMessages()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ SUPPORTS_DAC_HOST_ONLY;
+ }
+ CONTRACTL_END
+
+ if (!IsDynamic())
+ {
+ return m_identity->GetPathForErrorMessages();
+ }
+ else
+ {
+ return W("");
+ }
+}
+
+#ifndef FEATURE_CORECLR
+BOOL PEAssembly::IsReportedToUsageLog()
+{
+ LIMITED_METHOD_CONTRACT;
+ BOOL fReported = TRUE;
+
+ if (!IsDynamic())
+ fReported = m_identity->IsReportedToUsageLog();
+
+ return fReported;
+}
+
+void PEAssembly::SetReportedToUsageLog()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (!IsDynamic())
+ m_identity->SetReportedToUsageLog();
+}
+#endif // !FEATURE_CORECLR
+
+#ifdef DACCESS_COMPILE
+TADDR PEFile::GetMDInternalRWAddress()
+{
+ if (!m_MDImportIsRW_Debugger_Use_Only)
+ return 0;
+ else
+ {
+ // This line of code is a bit scary, but it is correct for now at least...
+ // 1) We are using 'm_pMDImport_Use_Accessor' directly, and not the accessor. The field is
+ // named this way to prevent debugger code that wants a host implementation of IMDInternalImport
+ // from accidentally trying to use this pointer. This pointer is a target pointer, not
+ // a host pointer. However in this function we do want the target pointer, so the usage is
+ // accurate.
+ // 2) ASSUMPTION: We are assuming that the only valid implementation of RW metadata is
+ // MDInternalRW. If that ever changes we would need some way to disambiguate, and
+ // probably this entire code path would need to be redesigned.
+ // 3) ASSUMPTION: We are assuming that no pointer adjustment is required to convert between
+ // IMDInternalImport*, IMDInternalImportENC* and MDInternalRW*. Ideally I was hoping to do this with a
+ // static_cast<> but the compiler complains that the ENC<->RW is an unrelated conversion.
+ return (TADDR) m_pMDImport_UseAccessor;
+ }
+}
+#endif
+
+#if defined(FEATURE_HOSTED_BINDER)
+// Returns the ICLRPrivBinder* instance associated with the PEFile
+PTR_ICLRPrivBinder PEFile::GetBindingContext()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ PTR_ICLRPrivBinder pBindingContext = NULL;
+
+#if defined(FEATURE_CORECLR)
+ // Mscorlib is always bound in context of the TPA Binder. However, since it gets loaded and published
+ // during EEStartup *before* TPAbinder is initialized, we dont have a binding context to publish against.
+ // Thus, we will always return NULL for its binding context.
+ if (!IsSystem())
+#endif // defined(FEATURE_CORECLR)
+ {
+ pBindingContext = dac_cast<PTR_ICLRPrivBinder>(GetHostAssembly());
+ }
+
+ return pBindingContext;
+}
+#endif // FEATURE_HOSTED_BINDER
+
diff --git a/src/vm/pefile.h b/src/vm/pefile.h
new file mode 100644
index 0000000000..a62ba774c8
--- /dev/null
+++ b/src/vm/pefile.h
@@ -0,0 +1,1239 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// --------------------------------------------------------------------------------
+// PEFile.h
+//
+
+// --------------------------------------------------------------------------------
+
+
+#ifndef PEFILE_H_
+#define PEFILE_H_
+
+// --------------------------------------------------------------------------------
+// Required headers
+// --------------------------------------------------------------------------------
+
+#include <windef.h>
+
+#include <corpolicy.h>
+#include "sstring.h"
+#include "peimage.h"
+#include "metadata.h"
+#include "corhlpr.h"
+#include "utilcode.h"
+#include "loaderheap.h"
+#include "sstring.h"
+#include "ex.h"
+#ifdef FEATURE_FUSION
+#include <fusion.h>
+#include <fusionbind.h>
+#include "binderngen.h"
+#endif
+#include "assemblyspecbase.h"
+#include "eecontract.h"
+#include "metadatatracker.h"
+#include "stackwalktypes.h"
+#include <specstrings.h>
+#include "slist.h"
+#include "corperm.h"
+#include "eventtrace.h"
+
+#ifdef FEATURE_HOSTED_BINDER
+#include "clrprivbinderutil.h"
+#endif
+
+// --------------------------------------------------------------------------------
+// Forward declared classes
+// --------------------------------------------------------------------------------
+
+class Module;
+class EditAndContinueModule;
+
+class PEFile;
+class PEModule;
+class PEAssembly;
+class SimpleRWLock;
+
+class CLRPrivBinderLoadFile;
+
+typedef VPTR(PEModule) PTR_PEModule;
+typedef VPTR(PEAssembly) PTR_PEAssembly;
+
+// --------------------------------------------------------------------------------
+// Types
+// --------------------------------------------------------------------------------
+
+// --------------------------------------------------------------------------------
+// A PEFile is an input to the CLR loader. It is produced as a result of
+// binding, usually through fusion (although there are a few less common methods to
+// obtain one which do not go through fusion, e.g. IJW loads)
+//
+// Although a PEFile is usually a disk based PE file (hence the name), it is not
+// always the case. Thus it is a conscious decision to not export access to the PE
+// file directly; rather the specific information required should be provided via
+// individual query API.
+//
+// There are multiple "flavors" of PEFiles:
+//
+// 1. HMODULE - these PE Files are loaded in response to "spontaneous" OS callbacks.
+// These should only occur for .exe main modules and IJW dlls loaded via LoadLibrary
+// or static imports in umnanaged code.
+//
+// 2. Fusion loads - these are the most common case. A path is obtained from fusion and
+// the result is loaded via PEImage.
+// a. Display name loads - these are metadata-based binds
+// b. Path loads - these are loaded from an explicit path
+//
+// 3. Byte arrays - loaded explicitly by user code. These also go through PEImage.
+//
+// 4. Dynamic - these are not actual PE images at all, but are placeholders
+// for reflection-based modules.
+//
+// PEFiles are segmented into two subtypes: PEAssembly and PEModule. The formere
+// is a file to be loaded as an assembly, and the latter is to be loaded as a module.
+//
+// See also file:..\inc\corhdr.h#ManagedHeader for more on the format of managed images.
+// See code:Module for more on modules
+// --------------------------------------------------------------------------------
+
+typedef VPTR(class PEFile) PTR_PEFile;
+
+typedef ReleaseHolder<IMDInternalImport> IMDInternalImportHolder;
+
+class PEFile
+{
+ // ------------------------------------------------------------
+ // SOS support
+ // ------------------------------------------------------------
+
+ VPTR_BASE_CONCRETE_VTABLE_CLASS(PEFile)
+
+public:
+
+ // ------------------------------------------------------------
+ // Public API
+ // ------------------------------------------------------------
+
+ STDMETHOD_(ULONG, AddRef)();
+ STDMETHOD_(ULONG, Release)();
+
+#ifdef DACCESS_COMPILE
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+#if CHECK_INVARIANTS
+ CHECK Invariant();
+#endif
+
+private:
+ // ------------------------------------------------------------
+ // Loader access API
+ // ------------------------------------------------------------
+
+ friend class DomainFile;
+ friend class PEModule;
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+
+ // Load actually triggers loading side effects of the module. This should ONLY
+ // be done after validation has been passed
+ BOOL CanLoadLibrary();
+public:
+ void LoadLibrary(BOOL allowNativeSkip = TRUE);
+
+#ifdef FEATURE_MIXEDMODE
+protected:
+ // Returns TRUE if this file references managed CRT (msvcmNN*).
+ BOOL ReferencesManagedCRT();
+
+ // Checks for unsupported loads of C++/CLI assemblies into multiple runtimes in this process.
+ void CheckForDisallowedInProcSxSLoad();
+#endif // FEATURE_MIXEDMODE
+
+private:
+ void CheckForDisallowedInProcSxSLoadWorker();
+ void ValidateImagePlatformNeutrality();
+
+ // For use inside LoadLibrary callback
+ friend HRESULT ExecuteDLLForAttach(HINSTANCE hInst,
+ DWORD dwReason,
+ LPVOID lpReserved,
+ BOOL fFromThunk);
+ void SetLoadedHMODULE(HMODULE hMod);
+
+ BOOL HasSkipVerification();
+ void SetSkipVerification();
+
+ // DO NOT USE !!! this is to be removed when we move to new fusion binding API
+ friend class DomainAssembly;
+
+ // Helper for creating metadata for CreateDynamic
+ friend class Assembly;
+ friend class COMDynamicWrite;
+ friend class AssemblyNative;
+ static void DefineEmitScope(
+ GUID iid,
+ void **ppEmit);
+
+protected:
+ IMDInternalImportHolder GetMDImport();
+
+public:
+ // ------------------------------------------------------------
+ // Generic PEFile - can be used to access metadata
+ // ------------------------------------------------------------
+
+ static PEFile *Open(PEImage *image);
+
+ // ------------------------------------------------------------
+ // Identity
+ // ------------------------------------------------------------
+
+#ifndef DACCESS_COMPILE
+ BOOL Equals(PEFile *pFile);
+ BOOL Equals(PEImage *pImage);
+#endif // DACCESS_COMPILE
+
+#ifndef FEATURE_CORECLR
+ BOOL IsShareable();
+#endif
+
+ void GetMVID(GUID *pMvid);
+
+ // ------------------------------------------------------------
+ // Descriptive strings
+ // ------------------------------------------------------------
+
+ // Path is the file path to the file; empty if not a file
+ const SString &GetPath();
+
+#ifdef DACCESS_COMPILE
+ // This is the metadata module name. Used as a hint as file name.
+ const SString &GetModuleFileNameHint();
+#endif // DACCESS_COMPILE
+
+ // Full name is the most descriptive name available (path, codebase, or name as appropriate)
+ void GetCodeBaseOrName(SString &result);
+
+ // Returns security information for the assembly based on the codebase
+ void GetSecurityIdentity(SString &codebase, SecZone *pdwZone, DWORD dwFlags, BYTE *pbUniqueID, DWORD *pcbUniqueID);
+ void InitializeSecurityManager();
+
+#ifdef LOGGING
+ // This is useful for log messages
+ LPCWSTR GetDebugName();
+#endif
+
+ // ------------------------------------------------------------
+ // Checks
+ // ------------------------------------------------------------
+
+ CHECK CheckLoaded(BOOL allowNativeSkip = TRUE);
+ void ValidateForExecution();
+ BOOL IsMarkedAsNoPlatform();
+ BOOL IsMarkedAsContentTypeWindowsRuntime();
+
+
+ // ------------------------------------------------------------
+ // Classification
+ // ------------------------------------------------------------
+
+ BOOL IsAssembly() const;
+ PTR_PEAssembly AsAssembly();
+ BOOL IsModule() const;
+ PTR_PEModule AsModule();
+ BOOL IsSystem() const;
+ BOOL IsDynamic() const;
+ BOOL IsResource() const;
+ BOOL IsIStream() const;
+ BOOL IsIntrospectionOnly() const;
+ // Returns self (if assembly) or containing assembly (if module)
+ PEAssembly *GetAssembly() const;
+
+ // ------------------------------------------------------------
+ // Hash support
+ // ------------------------------------------------------------
+
+#ifndef DACCESS_COMPILE
+ void GetImageBits(SBuffer &result);
+ void GetHash(ALG_ID algorithm, SBuffer &result);
+#endif // DACCESS_COMPILE
+
+ void GetSHA1Hash(SBuffer &result);
+ CHECK CheckHash(ALG_ID algorithm, const void *hash, COUNT_T size);
+
+ // ------------------------------------------------------------
+ // Metadata access
+ // ------------------------------------------------------------
+
+ BOOL HasMetadata();
+
+ IMDInternalImport *GetPersistentMDImport();
+ IMDInternalImport *GetMDImportWithRef();
+ void MakeMDImportPersistent() {m_bHasPersistentMDImport=TRUE;};
+
+#ifndef DACCESS_COMPILE
+ IMetaDataEmit *GetEmitter();
+ IMetaDataAssemblyEmit *GetAssemblyEmitter();
+ IMetaDataImport2 *GetRWImporter();
+ IMetaDataAssemblyImport *GetAssemblyImporter();
+#else
+ TADDR GetMDInternalRWAddress();
+#endif // DACCESS_COMPILE
+
+ LPCUTF8 GetSimpleName();
+ HRESULT GetScopeName(LPCUTF8 * pszName);
+ BOOL IsStrongNameVerified();
+ BOOL IsStrongNamed();
+ const void *GetPublicKey(DWORD *pcbPK);
+ ULONG GetHashAlgId();
+ HRESULT GetVersion(USHORT *pMajor, USHORT *pMinor, USHORT *pBuild, USHORT *pRevision);
+ LPCSTR GetLocale();
+ DWORD GetFlags();
+ HRESULT GetFlagsNoTrigger(DWORD * pdwFlags);
+#ifdef FEATURE_CAS_POLICY
+ COR_TRUST *GetAuthenticodeSignature();
+#endif
+ // ------------------------------------------------------------
+ // PE file access
+ // ------------------------------------------------------------
+
+ BOOL HasSecurityDirectory();
+ BOOL IsIbcOptimized();
+ WORD GetSubsystem();
+ mdToken GetEntryPointToken(
+#ifdef _DEBUG
+ BOOL bAssumeLoaded = FALSE
+#endif //_DEBUG
+ );
+ BOOL IsILOnly();
+ BOOL IsDll();
+
+ TADDR GetIL(RVA il);
+
+ PTR_VOID GetRvaField(RVA field);
+ CHECK CheckRvaField(RVA field);
+ CHECK CheckRvaField(RVA field, COUNT_T size);
+
+ PCCOR_SIGNATURE GetSignature(RVA signature);
+ RVA GetSignatureRva(PCCOR_SIGNATURE signature);
+ CHECK CheckSignature(PCCOR_SIGNATURE signature);
+ CHECK CheckSignatureRva(RVA signature);
+
+ BOOL HasTls();
+ BOOL IsRvaFieldTls(RVA field);
+ UINT32 GetFieldTlsOffset(RVA field);
+ UINT32 GetTlsIndex();
+
+ const void *GetInternalPInvokeTarget(RVA target);
+ CHECK CheckInternalPInvokeTarget(RVA target);
+
+ IMAGE_COR_VTABLEFIXUP *GetVTableFixups(COUNT_T *pCount = NULL);
+ void *GetVTable(RVA rva);
+
+ BOOL GetResource(LPCSTR szName, DWORD *cbResource,
+ PBYTE *pbInMemoryResource, DomainAssembly** pAssemblyRef,
+ LPCSTR *szFileName, DWORD *dwLocation,
+ StackCrawlMark *pStackMark, BOOL fSkipSecurityCheck,
+ BOOL fSkipRaiseResolveEvent, DomainAssembly* pDomainAssembly,
+ AppDomain* pAppDomain);
+#ifndef DACCESS_COMPILE
+ PTR_CVOID GetMetadata(COUNT_T *pSize);
+#endif
+ PTR_CVOID GetLoadedMetadata(COUNT_T *pSize);
+
+ void GetPEKindAndMachine(DWORD* pdwKind, DWORD* pdwMachine);
+
+ ULONG GetILImageTimeDateStamp();
+
+#ifdef FEATURE_CAS_POLICY
+ SAFEHANDLE GetSafeHandle();
+#endif // FEATURE_CAS_POLICY
+
+ // ------------------------------------------------------------
+ // Image memory access
+ //
+ // WARNING: do not abuse these. There are scenarios where the image
+ // is not in memory as an optimization.
+ //
+ // In general, you should add an entry point to get the specific info
+ // you are interested in, rather than using these general purpose
+ // entry points. The info can then be extracted from the native image
+ // in the no-IL image case.
+ // ------------------------------------------------------------
+
+ // For IJW purposes only - this asserts that we have an IJW image.
+ HMODULE GetIJWBase();
+
+ // The debugger can tolerate a null value here for native only loading cases
+ PTR_VOID GetDebuggerContents(COUNT_T *pSize = NULL);
+
+#ifndef DACCESS_COMPILE
+ // Returns the IL image range; may force a LoadLibrary
+ const void *GetManagedFileContents(COUNT_T *pSize = NULL);
+#endif // DACCESS_COMPILE
+
+ PTR_CVOID GetLoadedImageContents(COUNT_T *pSize = NULL);
+
+ // SetInProcSxSLoadVerified can run concurrently as we don't hold locks during LoadLibrary but
+ // it is the only flag that can be set during this phase so no mutual exclusion is necessary.
+ void SetInProcSxSLoadVerified() { LIMITED_METHOD_CONTRACT; m_flags |= PEFILE_SXS_LOAD_VERIFIED; }
+ BOOL IsInProcSxSLoadVerified() { LIMITED_METHOD_CONTRACT; return m_flags & PEFILE_SXS_LOAD_VERIFIED; }
+
+ // ------------------------------------------------------------
+ // Native image access
+ // ------------------------------------------------------------
+
+ // Does the loader support using a native image for this file?
+ // Some implementation restrictions prevent native images from being used
+ // in some cases.
+#ifdef FEATURE_PREJIT
+ BOOL CanUseNativeImage() { LIMITED_METHOD_CONTRACT; return m_fCanUseNativeImage; }
+ void SetCannotUseNativeImage() { LIMITED_METHOD_CONTRACT; m_fCanUseNativeImage = FALSE; }
+ void SetNativeImageUsedExclusively() { LIMITED_METHOD_CONTRACT; m_flags|=PEFILE_NATIVE_IMAGE_USED_EXCLUSIVELY; }
+ BOOL IsNativeImageUsedExclusively() { LIMITED_METHOD_CONTRACT; return m_flags&PEFILE_NATIVE_IMAGE_USED_EXCLUSIVELY; }
+ void SetSafeToHardBindTo() { LIMITED_METHOD_CONTRACT; m_flags|=PEFILE_SAFE_TO_HARDBINDTO; }
+ BOOL IsSafeToHardBindTo() { LIMITED_METHOD_CONTRACT; return m_flags&PEFILE_SAFE_TO_HARDBINDTO; }
+
+ BOOL IsNativeLoaded();
+ PEImage *GetNativeImageWithRef();
+ PEImage *GetPersistentNativeImage();
+#endif
+ BOOL HasNativeImage();
+ PTR_PEImageLayout GetLoaded();
+ PTR_PEImageLayout GetLoadedNative();
+ PTR_PEImageLayout GetLoadedIL();
+ PTR_PEImageLayout GetAnyILWithRef(); //AddRefs!
+ IStream * GetPdbStream();
+ void ClearPdbStream();
+ BOOL IsLoaded(BOOL bAllowNativeSkip=TRUE) ;
+ BOOL PassiveDomainOnly();
+ BOOL IsPtrInILImage(PTR_CVOID data);
+
+#ifdef DACCESS_COMPILE
+ PEImage *GetNativeImage()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+#ifdef FEATURE_PREJIT
+ return m_nativeImage;
+#else
+ return NULL;
+#endif
+ }
+#endif
+
+#ifdef FEATURE_PREJIT
+ // ------------------------------------------------------------
+ // Native image config utilities
+ // ------------------------------------------------------------
+
+ static CorCompileConfigFlags GetNativeImageConfigFlags(BOOL fForceDebug = FALSE,
+ BOOL fForceProfiling = FALSE,
+ BOOL fForceInstrument = FALSE);
+
+ static CorCompileConfigFlags GetNativeImageConfigFlagsWithOverrides();
+
+#ifdef DEBUGGING_SUPPORTED
+ static void SetNGENDebugFlags(BOOL fAllowOpt);
+ static void GetNGENDebugFlags(BOOL *fAllowOpt);
+#endif
+
+#ifdef FEATURE_TREAT_NI_AS_MSIL_DURING_DIAGNOSTICS
+ static BOOL ShouldTreatNIAsMSIL();
+#endif // FEATURE_TREAT_NI_AS_MSIL_DURING_DIAGNOSTICS
+
+#endif // FEATURE_PREJIT
+
+ // ------------------------------------------------------------
+ // Resource access
+ // ------------------------------------------------------------
+
+ void GetEmbeddedResource(DWORD dwOffset, DWORD *cbResource, PBYTE *pbInMemoryResource);
+
+ // ------------------------------------------------------------
+ // File loading
+ // ------------------------------------------------------------
+
+ PEAssembly * LoadAssembly(
+ mdAssemblyRef kAssemblyRef,
+ IMDInternalImport * pImport = NULL,
+ LPCUTF8 szWinRtTypeNamespace = NULL,
+ LPCUTF8 szWinRtTypeClassName = NULL);
+
+ // ------------------------------------------------------------
+ // Logging
+ // ------------------------------------------------------------
+
+ // The format string is intentionally unicode to avoid globalization bugs
+#ifdef FEATURE_PREJIT
+ void ExternalLog(DWORD facility, DWORD level, const WCHAR *fmt, ...) DAC_EMPTY();
+ void ExternalLog(DWORD level, const WCHAR *fmt, ...) DAC_EMPTY();
+ void ExternalLog(DWORD level, const char *msg) DAC_EMPTY();
+ virtual void ExternalVLog(DWORD facility, DWORD level, const WCHAR *fmt, va_list args) DAC_EMPTY();
+ virtual void FlushExternalLog() DAC_EMPTY();
+#endif
+
+protected:
+ // ------------------------------------------------------------
+ // Internal constants
+ // ------------------------------------------------------------
+
+ enum
+ {
+ PEFILE_SYSTEM = 0x01,
+ PEFILE_ASSEMBLY = 0x02,
+ PEFILE_MODULE = 0x04,
+ PEFILE_SKIP_VERIFICATION = 0x08,
+ PEFILE_SKIP_MODULE_HASH_CHECKS= 0x10,
+ PEFILE_ISTREAM = 0x100,
+#ifdef FEATURE_PREJIT
+ PEFILE_HAS_NATIVE_IMAGE_METADATA = 0x200,
+ PEFILE_NATIVE_IMAGE_USED_EXCLUSIVELY =0x1000,
+ PEFILE_SAFE_TO_HARDBINDTO = 0x4000, // NGEN-only flag
+#endif
+ PEFILE_INTROSPECTIONONLY = 0x400,
+ PEFILE_SXS_LOAD_VERIFIED = 0x2000
+ };
+
+ // ------------------------------------------------------------
+ // Internal routines
+ // ------------------------------------------------------------
+
+#ifndef DACCESS_COMPILE
+ PEFile(PEImage *image, BOOL fCheckAuthenticodeSignature = TRUE);
+ virtual ~PEFile();
+
+ virtual void ReleaseIL();
+#endif
+
+ void OpenMDImport();
+ void RestoreMDImport(IMDInternalImport* pImport);
+ void OpenMDImport_Unsafe();
+ void OpenImporter();
+ void OpenAssemblyImporter();
+ void OpenEmitter();
+ void OpenAssemblyEmitter();
+
+ void ConvertMDInternalToReadWrite();
+ void ReleaseMetadataInterfaces(BOOL bDestructor, BOOL bKeepNativeData=FALSE);
+
+#ifdef FEATURE_CAS_POLICY
+ // Check the Authenticode signature of a PE file
+ void CheckAuthenticodeSignature();
+#endif // FEATURE_CAS_POLICY
+
+ friend class Module;
+#ifdef FEATURE_PREJIT
+ void SetNativeImage(PEImage *nativeImage);
+#ifndef DACCESS_COMPILE
+ virtual void ClearNativeImage();
+#endif
+#endif
+
+#ifndef DACCESS_COMPILE
+ void EnsureImageOpened();
+#endif // DACCESS_COMPILE
+
+ friend class ClrDataAccess;
+ BOOL HasNativeImageMetadata();
+
+ // ------------------------------------------------------------
+ // Instance fields
+ // ------------------------------------------------------------
+
+#ifdef _DEBUG
+ LPCWSTR m_pDebugName;
+ SString m_debugName;
+#endif
+
+ // Identity image
+ PTR_PEImage m_identity;
+ // IL image, NULL if we didn't need to open the file
+ PTR_PEImage m_openedILimage;
+#ifdef FEATURE_PREJIT
+ // Native image
+ PTR_PEImage m_nativeImage;
+
+ BOOL m_fCanUseNativeImage;
+#endif
+ // This flag is not updated atomically with m_pMDImport. Its fine for debugger usage
+ // but don't rely on it in the runtime. In runtime try QI'ing the m_pMDImport for
+ // IID_IMDInternalImportENC
+ BOOL m_MDImportIsRW_Debugger_Use_Only;
+ Volatile<BOOL> m_bHasPersistentMDImport;
+
+#ifndef DACCESS_COMPILE
+ IMDInternalImport *m_pMDImport;
+#else
+ IMDInternalImport *m_pMDImport_UseAccessor;
+#endif
+ IMetaDataImport2 *m_pImporter;
+ IMetaDataEmit *m_pEmitter;
+#ifndef FEATURE_CORECLR
+ IMetaDataAssemblyImport *m_pAssemblyImporter;
+ IMetaDataAssemblyEmit *m_pAssemblyEmitter;
+#endif
+ SimpleRWLock *m_pMetadataLock;
+ Volatile<LONG> m_refCount;
+ SBuffer *m_hash; // cached SHA1 hash value
+ int m_flags;
+ BOOL m_fStrongNameVerified;
+#ifdef FEATURE_CAS_POLICY
+ COR_TRUST *m_certificate;
+ BOOL m_fCheckedCertificate;
+ IInternetSecurityManager *m_pSecurityManager;
+ Crst m_securityManagerLock;
+#endif // FEATURE_CAS_POLICY
+
+#ifdef DEBUGGING_SUPPORTED
+#ifdef FEATURE_PREJIT
+ SVAL_DECL(DWORD, s_NGENDebugFlags);
+#endif
+#endif
+public:
+
+ PTR_PEImage GetILimage()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+#ifndef DACCESS_COMPILE
+ if (m_openedILimage == NULL && m_identity != NULL)
+ {
+ PEImage* pOpenedILimage;
+ m_identity->Clone(MDInternalImport_Default,&pOpenedILimage);
+ if (InterlockedCompareExchangeT(&m_openedILimage,pOpenedILimage,NULL) != NULL)
+ pOpenedILimage->Release();
+ }
+#endif
+ return m_openedILimage;
+ }
+
+ PEImage *GetOpenedILimage()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(HasOpenedILimage());
+ return m_openedILimage;
+ }
+
+
+ BOOL HasOpenedILimage()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_openedILimage != NULL;
+
+ }
+
+ BOOL HasLoadedIL()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return HasOpenedILimage() && GetOpenedILimage()->HasLoadedLayout();
+ }
+
+ BOOL IsDesignerBindingContext()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef FEATURE_HOSTED_BINDER
+ DWORD binderFlags = BINDER_NONE;
+
+ HRESULT hr = E_FAIL;
+ if (HasHostAssembly())
+ hr = GetHostAssembly()->GetBinderFlags(&binderFlags);
+
+ return hr == S_OK ? binderFlags & BINDER_DESIGNER_BINDING_CONTEXT : FALSE;
+#else
+ return FALSE;
+#endif
+ }
+
+ LPCWSTR GetPathForErrorMessages();
+
+ static PEFile* Dummy();
+ void MarkNativeImageInvalidIfOwned();
+ void ConvertMetadataToRWForEnC();
+
+#if defined(FEATURE_VERSIONING) || defined(FEATURE_HOSTED_BINDER)
+protected:
+ PTR_ICLRPrivAssembly m_pHostAssembly;
+#endif
+
+#ifdef FEATURE_HOSTED_BINDER
+protected:
+
+ friend class CLRPrivBinderFusion;
+#ifndef DACCESS_COMPILE
+ // CLRPrivBinderFusion calls this for Fusion-bound assemblies in AppX processes.
+ void SetHostAssembly(ICLRPrivAssembly * pHostAssembly)
+ { LIMITED_METHOD_CONTRACT; m_pHostAssembly = clr::SafeAddRef(pHostAssembly); }
+#endif //DACCESS_COMPILE
+
+public:
+ // Returns a non-AddRef'ed ICLRPrivAssembly*
+ PTR_ICLRPrivAssembly GetHostAssembly()
+ {
+ STATIC_CONTRACT_LIMITED_METHOD;
+ return m_pHostAssembly;
+ }
+
+ // Returns the ICLRPrivBinder* instance associated with the PEFile
+ PTR_ICLRPrivBinder GetBindingContext();
+
+ bool HasHostAssembly()
+ { STATIC_CONTRACT_WRAPPER; return GetHostAssembly() != nullptr; }
+
+ bool CanUseWithBindingCache()
+ { LIMITED_METHOD_CONTRACT; return !HasHostAssembly(); }
+#endif // FEATURE_HOSTED_BINDER
+}; // class PEFile
+
+
+class PEAssembly : public PEFile
+{
+ VPTR_VTABLE_CLASS(PEAssembly, PEFile)
+
+ public:
+ // ------------------------------------------------------------
+ // Statics initialization.
+ // ------------------------------------------------------------
+ static
+ void Attach();
+
+ // ------------------------------------------------------------
+ // Public API
+ // ------------------------------------------------------------
+
+#if defined(FEATURE_HOSTED_BINDER)
+#if !defined(FEATURE_CORECLR)
+ static PEAssembly * Open(
+ PEAssembly * pParentAssembly,
+ PEImage * pPEImageIL,
+ PEImage * pPEImageNI,
+ ICLRPrivAssembly * pHostAssembly,
+ BOOL fIsIntrospectionOnly);
+
+ static PEAssembly * Open(
+ PEAssembly * pParentAssembly,
+ PEImage * pPEImageIL,
+ BOOL isIntrospectionOnly = FALSE);
+#else //!FEATURE_CORECLR
+ // CoreCLR's PrivBinder PEAssembly creation entrypoint
+ static PEAssembly * Open(
+ PEAssembly * pParent,
+ PEImage * pPEImageIL,
+ PEImage * pPEImageNI,
+ ICLRPrivAssembly * pHostAssembly,
+ BOOL fIsIntrospectionOnly = FALSE);
+#endif //!FEATURE_CORECLR
+#endif //FEATURE_HOSTED_BINDER
+
+ // This opens the canonical mscorlib.dll
+#ifdef FEATURE_FUSION
+ static PEAssembly *OpenSystem(IApplicationContext *pAppCtx);
+#else
+ static PEAssembly *OpenSystem(IUnknown *pAppCtx);
+#endif
+#ifdef DACCESS_COMPILE
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+#ifdef FEATURE_FUSION
+ static PEAssembly *Open(
+ IAssembly *pIAssembly,
+ IBindResult *pNativeFusionAssembly,
+ IFusionBindLog *pFusionLog = NULL,
+ BOOL isSystemAssembly = FALSE,
+ BOOL isIntrospectionOnly = FALSE);
+
+ static PEAssembly *Open(
+ IHostAssembly *pIHostAssembly,
+ BOOL isSystemAssembly = FALSE,
+ BOOL isIntrospectionOnly = FALSE);
+
+#ifdef FEATURE_MIXEDMODE
+ // Use for main exe loading
+ // NOTE: This may also be used for "spontaneous" (IJW) dll loading where
+ // we need to deliver DllMain callbacks, but we should eliminate this case
+
+ static PEAssembly *OpenHMODULE(
+ HMODULE hMod,
+ IAssembly *pFusionAssembly,
+ IBindResult *pNativeFusionAssembly,
+ IFusionBindLog *pFusionLog = NULL,
+ BOOL isIntrospectionOnly = FALSE);
+#endif // FEATURE_MIXEDMODE
+
+ static PEAssembly *DoOpen(
+ IAssembly *pIAssembly,
+ IBindResult *pNativeFusionAssembly,
+ IFusionBindLog *pFusionLog,
+ BOOL isSystemAssembly,
+ BOOL isIntrospectionOnly = FALSE);
+
+ static PEAssembly *DoOpen(
+ IHostAssembly *pIHostAssembly,
+ BOOL isSystemAssembly,
+ BOOL isIntrospectionOnly = FALSE);
+#ifdef FEATURE_MIXEDMODE
+ static PEAssembly *DoOpenHMODULE(
+ HMODULE hMod,
+ IAssembly *pFusionAssembly,
+ IBindResult *pNativeFusionAssembly,
+ IFusionBindLog *pFusionLog,
+ BOOL isIntrospectionOnly = FALSE);
+#endif // FEATURE_MIXEDMODE
+#else
+ static PEAssembly *Open(
+ CoreBindResult* pBindResult,
+ BOOL isSystem,
+ BOOL isIntrospectionOnly);
+#endif // FEATURE_FUSION
+
+ static PEAssembly *Create(
+ PEAssembly *pParentAssembly,
+ IMetaDataAssemblyEmit *pEmit,
+ BOOL isIntrospectionOnly);
+
+ static PEAssembly *OpenMemory(
+ PEAssembly *pParentAssembly,
+ const void *flat,
+ COUNT_T size,
+ BOOL isIntrospectionOnly = FALSE,
+ CLRPrivBinderLoadFile* pBinderToUse = NULL);
+
+ static PEAssembly *DoOpenMemory(
+ PEAssembly *pParentAssembly,
+ const void *flat,
+ COUNT_T size,
+ BOOL isIntrospectionOnly,
+ CLRPrivBinderLoadFile* pBinderToUse);
+
+ private:
+ // Private helpers for crufty exception handling reasons
+#ifdef FEATURE_FUSION
+ static PEAssembly *DoOpenSystem(IApplicationContext *pAppCtx);
+#else
+ static PEAssembly *DoOpenSystem(IUnknown *pAppCtx);
+#endif
+
+ public:
+
+ // ------------------------------------------------------------
+ // binding & source
+ // ------------------------------------------------------------
+
+ BOOL IsSourceGAC();
+#ifdef FEATURE_CORECLR
+ BOOL IsProfileAssembly();
+ BOOL IsSilverlightPlatformStrongNameSignature();
+ BOOL IsProfileTestAssembly();
+ BOOL IsTestKeySignature();
+#endif // FEATURE_CORECLR
+
+ ULONG HashIdentity();
+#ifdef FEATURE_FUSION
+
+ BOOL FusionLoggingEnabled()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_bFusionLogEnabled && (m_pFusionLog != NULL);
+ };
+ void DisableFusionLogging()
+ {
+ m_bFusionLogEnabled = FALSE;
+ };
+
+ IFusionBindLog *GetFusionBindLog()
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifdef FEATURE_FUSION
+ return (m_bFusionLogEnabled && (m_pFusionLog != NULL)) ? m_pFusionLog : NULL;
+#else
+ return NULL;
+#endif
+ }
+
+
+ BOOL IsBindingCodeBase();
+
+ BOOL IsSourceDownloadCache();
+
+ LOADCTX_TYPE GetLoadContext();
+ BOOL IsContextLoad();
+
+ // Can we avoid exposing these?
+ IAssembly *GetFusionAssembly();
+ IHostAssembly *GetIHostAssembly();
+ IAssemblyName *GetFusionAssemblyName();
+ IAssemblyName *GetFusionAssemblyNameNoCreate();
+ IAssemblyLocation* GetNativeAssemblyLocation();
+ DWORD GetLocationFlags();
+ PEKIND GetFusionProcessorArchitecture();
+#endif
+
+#ifndef DACCESS_COMPILE
+ virtual void ReleaseIL();
+#endif
+
+ // ------------------------------------------------------------
+ // Hash support
+ // ------------------------------------------------------------
+
+ BOOL NeedsModuleHashChecks();
+
+ BOOL HasStrongNameSignature();
+ BOOL IsFullySigned();
+
+ void SetStrongNameBypassed();
+ void VerifyStrongName();
+
+ // ------------------------------------------------------------
+ // Descriptive strings
+ // ------------------------------------------------------------
+
+ // This returns a non-empty path representing the source of the assembly; it may
+ // be the parent assembly for dynamic or memory assemblies
+ const SString &GetEffectivePath();
+
+ // Codebase is the fusion codebase or path for the assembly. It is in URL format.
+ // Note this may be obtained from the parent PEFile if we don't have a path or fusion
+ // assembly.
+ //
+ // fCopiedName means to get the "shadow copied" path rather than the original path, if applicable
+ void GetCodeBase(SString &result, BOOL fCopiedName = FALSE);
+ // Get the fully qualified assembly name from its metadata token
+ static void GetFullyQualifiedAssemblyName(IMDInternalImport* pImport, mdAssembly mda, SString &result, DWORD flags = 0);
+
+ // Display name is the fusion binding name for an assembly
+ void GetDisplayName(SString &result, DWORD flags = 0);
+
+ // ------------------------------------------------------------
+ // Metadata access
+ // ------------------------------------------------------------
+
+ LPCUTF8 GetSimpleName();
+
+ // ------------------------------------------------------------
+ // Utility functions
+ // ------------------------------------------------------------
+
+ static void PathToUrl(SString &string);
+ static void UrlToPath(SString &string);
+ static BOOL FindLastPathSeparator(const SString &path, SString::Iterator &i);
+
+ // ------------------------------------------------------------
+ // Logging
+ // ------------------------------------------------------------
+#ifdef FEATURE_PREJIT
+ void ExternalVLog(DWORD facility, DWORD level, const WCHAR *fmt, va_list args) DAC_EMPTY();
+ void FlushExternalLog() DAC_EMPTY();
+#ifdef FEATURE_FUSION
+ void ETWTraceLogMessage(DWORD dwETWLogCategory, PEAssembly *pAsm)
+ {
+ LIMITED_METHOD_CONTRACT
+ if (FusionLoggingEnabled() &&
+ (ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, TRACE_LEVEL_INFORMATION, CLR_PRIVATEFUSION_KEYWORD)))
+ {
+ m_pFusionLog->ETWTraceLogMessage(dwETWLogCategory, (pAsm?pAsm->m_pFusionAssemblyName:NULL));
+ }
+ }
+ ULONGLONG GetBindingID()
+ {
+ LIMITED_METHOD_CONTRACT;
+ ULONGLONG ullBindingID = 0;
+ if (FusionLoggingEnabled())
+ m_pFusionLog->GetBindingID(&ullBindingID);
+ return ullBindingID;
+ }
+#endif
+#endif
+
+#ifndef FEATURE_CORECLR
+ BOOL IsReportedToUsageLog();
+ void SetReportedToUsageLog();
+#endif // !FEATURE_CORECLR
+
+ protected:
+
+#ifndef DACCESS_COMPILE
+#ifdef FEATURE_FUSION
+ PEAssembly(
+ PEImage *image,
+ IMetaDataEmit *pEmit,
+ IAssembly *pIAssembly,
+ IBindResult *pNativeFusionAssembly,
+ PEImage *pNIImage,
+ IFusionBindLog *pFusionLog,
+ IHostAssembly *pIHostAssembly,
+ PEFile *creator,
+ BOOL system,
+ BOOL introspectionOnly = FALSE,
+ ICLRPrivAssembly * pHostAssembly = NULL);
+#else
+ PEAssembly(
+ CoreBindResult* pBindResultInfo,
+ IMetaDataEmit *pEmit,
+ PEFile *creator,
+ BOOL system,
+ BOOL introspectionOnly = FALSE
+#ifdef FEATURE_HOSTED_BINDER
+ ,
+ PEImage * pPEImageIL = NULL,
+ PEImage * pPEImageNI = NULL,
+ ICLRPrivAssembly * pHostAssembly = NULL
+#endif
+ );
+#endif
+ virtual ~PEAssembly();
+#endif
+
+ // ------------------------------------------------------------
+ // Loader access API
+ // ------------------------------------------------------------
+
+ friend class DomainAssembly;
+#ifdef FEATURE_PREJIT
+
+#ifdef FEATURE_FUSION
+ void SetNativeImage(IBindResult *pNativeFusionAssembly);
+#else
+ void SetNativeImage(PEImage *image);
+#endif
+
+ BOOL CheckNativeImageVersion(PEImage *image);
+
+
+#ifdef FEATURE_FUSION
+ void ClearNativeImage();
+ void SetNativeImageClosure(IAssemblyBindingClosure *pClosure)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (m_pNativeImageClosure!=NULL)
+ m_pNativeImageClosure->Release();
+ if (pClosure)
+ pClosure->AddRef();
+ m_pNativeImageClosure=pClosure;
+ };
+ BOOL HasEqualNativeClosure(DomainAssembly* pDomainAssembly);
+#endif //FEATURE_FUSION
+
+#endif // FEATURE_PREJIT
+
+ private:
+ // Check both the StrongName and Authenticode signature of an assembly. If the application is using
+ // strong name bypass, then this call may not result in a strong name verificaiton. VerifyStrongName
+ // should be called if a strong name must be forced to verify.
+ void DoLoadSignatureChecks();
+
+
+ private:
+ // ------------------------------------------------------------
+ // Instance fields
+ // ------------------------------------------------------------
+
+ PTR_PEFile m_creator;
+#ifdef FEATURE_FUSION
+ IAssemblyName *m_pFusionAssemblyName;
+ IAssembly *m_pFusionAssembly;
+ IFusionBindLog *m_pFusionLog;
+ BOOL m_bFusionLogEnabled;
+ IHostAssembly *m_pIHostAssembly;
+ IAssemblyLocation *m_pNativeAssemblyLocation;
+ IAssemblyBindingClosure *m_pNativeImageClosure; //present only for shared
+ LOADCTX_TYPE m_loadContext;
+ DWORD m_dwLocationFlags;
+#else
+ BOOL m_bIsFromGAC;
+ BOOL m_bIsOnTpaList;
+ // Using a separate entry and not m_pHostAssembly because otherwise
+ // HasHostAssembly becomes true that trips various other code paths resulting in bad
+ // things
+ SString m_sTextualIdentity;
+#endif
+#ifdef FEATURE_CORECLR
+ int m_fProfileAssembly; // Tri-state cache
+#else
+ BOOL m_fStrongNameBypassed;
+#endif
+
+ public:
+ PTR_PEFile GetCreator()
+ { LIMITED_METHOD_CONTRACT; return m_creator; }
+
+ // Returns TRUE if the assembly is .winmd file (WinRT assembly)
+ bool IsWindowsRuntime();
+
+ // Used to determine if this assembly has an identity that may be used for
+ // binding purposes. Currently this is true for standard .NET assemblies
+ // and false for WinRT assemblies (where assemblies are identified by their
+ // member types).
+ bool HasBindableIdentity();
+
+ // Indicates if the assembly can be cached in a binding cache such as AssemblySpecBindingCache.
+ inline bool CanUseWithBindingCache()
+ {
+#if defined(FEATURE_HOSTED_BINDER)
+ STATIC_CONTRACT_WRAPPER;
+#if !defined(FEATURE_APPX_BINDER)
+ return (HasBindableIdentity());
+#else
+ return (PEFile::CanUseWithBindingCache() && HasBindableIdentity());
+#endif // FEATURE_CORECLR
+#else
+ STATIC_CONTRACT_LIMITED_METHOD;
+ return true;
+#endif // FEATURE_HOSTED_BINDER
+ }
+};
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+
+class PEModule : public PEFile
+{
+ VPTR_VTABLE_CLASS(PEModule, PEFile)
+
+ public:
+
+ // ------------------------------------------------------------
+ // Public API
+ // ------------------------------------------------------------
+
+ static PEModule *Open(PEAssembly *assembly, mdFile token,
+ const SString &fileName);
+
+ static PEModule *OpenMemory(PEAssembly *assembly, mdFile kToken,
+ const void *flat, COUNT_T size);
+
+ static PEModule *Create(PEAssembly *assembly, mdFile kToken, IMetaDataEmit *pEmit);
+
+#ifdef DACCESS_COMPILE
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+ private:
+ // Private helpers for crufty exception handling reasons
+ static PEModule *DoOpen(PEAssembly *assembly, mdFile token,
+ const SString &fileName);
+
+ static PEModule *DoOpenMemory(PEAssembly *assembly, mdFile kToken,
+ const void *flat, COUNT_T size);
+ public:
+
+ // ------------------------------------------------------------
+ // Metadata access
+ // ------------------------------------------------------------
+
+ PEAssembly *GetAssembly();
+ mdFile GetToken();
+ BOOL IsResource();
+ BOOL IsIStream();
+ LPCUTF8 GetSimpleName();
+
+ // ------------------------------------------------------------
+ // Logging
+ // ------------------------------------------------------------
+#ifdef FEATURE_PREJIT
+ void ExternalVLog(DWORD facility, DWORD level, const WCHAR *fmt, va_list args) DAC_EMPTY();
+ void FlushExternalLog() DAC_EMPTY();
+#endif
+private:
+ // ------------------------------------------------------------
+ // Loader access API
+ // ------------------------------------------------------------
+
+ friend class DomainModule;
+#ifdef FEATURE_PREJIT
+ void SetNativeImage(const SString &fullPath);
+#endif // FEATURE_PREJIT
+
+private:
+
+#ifndef DACCESS_COMPILE
+ PEModule(PEImage *image, PEAssembly *assembly, mdFile token, IMetaDataEmit *pEmit);
+ virtual ~PEModule();
+#endif
+
+ // ------------------------------------------------------------
+ // Instance fields
+ // ------------------------------------------------------------
+
+ PTR_PEAssembly m_assembly;
+ mdFile m_token;
+ BOOL m_bIsResource;
+};
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+
+typedef ReleaseHolder<PEFile> PEFileHolder;
+
+typedef ReleaseHolder<PEAssembly> PEAssemblyHolder;
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+typedef ReleaseHolder<PEModule> PEModuleHolder;
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+
+
+// A small shim around PEAssemblies/IBindResult that allow us to write Fusion/CLR-agnostic
+// for logging native bind failures to the Fusion log/CLR log.
+//
+// These structures are stack-based, non-thread-safe and created for the duration of a single RuntimeVerify call.
+// The methods are expected to compute their data lazily as they are only used in bind failures or in checked builds.
+class LoggablePEAssembly : public LoggableAssembly
+{
+ public:
+ virtual SString DisplayString()
+ {
+ STANDARD_VM_CONTRACT;
+
+ return m_peAssembly->GetPath();
+ }
+
+#ifdef FEATURE_FUSION
+ virtual IAssemblyName* FusionAssemblyName()
+ {
+ STANDARD_VM_CONTRACT;
+
+ return m_peAssembly->GetFusionAssemblyName();
+ }
+
+ virtual IFusionBindLog* FusionBindLog()
+ {
+ STANDARD_VM_CONTRACT;
+
+ return m_peAssembly->GetFusionBindLog();
+ }
+#endif // FEATURE_FUSION
+
+ LoggablePEAssembly(PEAssembly *peAssembly)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_peAssembly = peAssembly;
+ peAssembly->AddRef();
+ }
+
+ ~LoggablePEAssembly()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_peAssembly->Release();
+ }
+
+ private:
+ PEAssembly *m_peAssembly;
+};
+
+
+// ================================================================================
+// Inline definitions
+// ================================================================================
+
+
+#endif // PEFILE_H_
diff --git a/src/vm/pefile.inl b/src/vm/pefile.inl
new file mode 100644
index 0000000000..e5c95f7621
--- /dev/null
+++ b/src/vm/pefile.inl
@@ -0,0 +1,2137 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// --------------------------------------------------------------------------------
+// PEFile.inl
+//
+
+// --------------------------------------------------------------------------------
+
+#ifndef PEFILE_INL_
+#define PEFILE_INL_
+
+#include "strongname.h"
+#include "strongnameholders.h"
+#ifdef FEATURE_FUSION
+#include "fusionbind.h"
+#endif
+#include "check.h"
+#include "simplerwlock.hpp"
+#include "eventtrace.h"
+#include "peimagelayout.inl"
+
+#if CHECK_INVARIANTS
+inline CHECK PEFile::Invariant()
+{
+ CONTRACT_CHECK
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACT_CHECK_END;
+
+ if (IsDynamic())
+ {
+ // dynamic module case
+ CHECK(m_openedILimage == NULL);
+#ifdef FEATURE_PREJIT
+ CHECK(m_nativeImage == NULL);
+#endif
+ CHECK(CheckPointer(m_pEmitter));
+ }
+ else
+ {
+ // If m_image is null, then we should have a native image. However, this is not valid initially
+ // during construction. We should find a way to assert this.
+ CHECK(CheckPointer((PEImage*) m_openedILimage, NULL_OK));
+#ifdef FEATURE_PREJIT
+ CHECK(CheckPointer((PEImage*) m_nativeImage, NULL_OK));
+#endif
+ }
+ CHECK_OK;
+}
+#endif // CHECK_INVARIANTS
+
+// ------------------------------------------------------------
+// AddRef/Release
+// ------------------------------------------------------------
+
+inline ULONG PEFile::AddRef()
+{
+ CONTRACTL
+ {
+ PRECONDITION(m_refCount < COUNT_T_MAX);
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return FastInterlockIncrement(&m_refCount);
+}
+
+inline ULONG PEFile::Release()
+{
+ CONTRACT(COUNT_T)
+ {
+ DESTRUCTOR_CHECK;
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+ LONG result = FastInterlockDecrement(&m_refCount);
+ _ASSERTE(result >= 0);
+ if (result == 0)
+ delete this;
+
+ RETURN result;
+}
+
+// ------------------------------------------------------------
+// Identity
+// ------------------------------------------------------------
+
+inline ULONG PEAssembly::HashIdentity()
+{
+ CONTRACTL
+ {
+ PRECONDITION(CheckPointer(m_identity));
+ MODE_ANY;
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+#ifdef FEATURE_VERSIONING
+ return BINDER_SPACE::GetAssemblyFromPrivAssemblyFast(m_pHostAssembly)->GetAssemblyName()->Hash(BINDER_SPACE::AssemblyName::INCLUDE_VERSION);
+#else
+ if (!m_identity->HasID())
+ {
+ if (!IsLoaded())
+ return 0;
+ else
+ return (ULONG) dac_cast<TADDR>(GetLoaded()->GetBase());
+ }
+ else
+ return m_identity->GetIDHash();
+#endif
+}
+
+inline void PEFile::ValidateForExecution()
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ // We do not need to check NGen images; if it had the attribute, it would have failed to load
+ // at NGen time and so there would be no NGen image.
+ if (HasNativeImage() || IsIntrospectionOnly())
+ return;
+
+ //
+ // Ensure reference assemblies are not loaded for execution
+ //
+ ReleaseHolder<IMDInternalImport> mdImport(this->GetMDImportWithRef());
+ if (mdImport->GetCustomAttributeByName(TokenFromRid(1, mdtAssembly),
+ g_ReferenceAssemblyAttribute,
+ NULL,
+ NULL) == S_OK) {
+ ThrowHR(COR_E_LOADING_REFERENCE_ASSEMBLY, BFA_REFERENCE_ASSEMBLY);
+ }
+
+ //
+ // Ensure platform is valid for execution
+ //
+ if (!IsDynamic() && !IsResource())
+ {
+ if (IsMarkedAsNoPlatform())
+ {
+ if (IsMarkedAsContentTypeWindowsRuntime())
+ {
+ ThrowHR(COR_E_LOADING_WINMD_REFERENCE_ASSEMBLY);
+ }
+ else
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ }
+ }
+}
+
+
+inline BOOL PEFile::IsMarkedAsNoPlatform()
+{
+ WRAPPER_NO_CONTRACT;
+ return (IsAfPA_NoPlatform(GetFlags()));
+}
+
+inline BOOL PEFile::IsMarkedAsContentTypeWindowsRuntime()
+{
+ WRAPPER_NO_CONTRACT;
+ return (IsAfContentType_WindowsRuntime(GetFlags()));
+}
+
+#ifndef FEATURE_CORECLR
+inline BOOL PEFile::IsShareable()
+{
+ CONTRACTL
+ {
+ PRECONDITION(CheckPointer(m_identity));
+ MODE_ANY;
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ if (!m_identity->HasID())
+ return FALSE;
+ return TRUE ;
+}
+#endif
+
+inline void PEFile::GetMVID(GUID *pMvid)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ IfFailThrow(GetPersistentMDImport()->GetScopeProps(NULL, pMvid));
+}
+
+inline BOOL PEFile::PassiveDomainOnly()
+{
+ WRAPPER_NO_CONTRACT;
+ return HasOpenedILimage() && GetOpenedILimage()->PassiveDomainOnly();
+}
+
+// ------------------------------------------------------------
+// Loader support routines
+// ------------------------------------------------------------
+
+inline void PEFile::SetSkipVerification()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_flags |= PEFILE_SKIP_VERIFICATION;
+}
+
+inline BOOL PEFile::HasSkipVerification()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_flags & (PEFILE_SKIP_VERIFICATION | PEFILE_SYSTEM)) != 0;
+}
+
+// ------------------------------------------------------------
+// Descriptive strings
+// ------------------------------------------------------------
+
+inline const SString &PEFile::GetPath()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ GC_NOTRIGGER;
+ NOTHROW;
+ CANNOT_TAKE_LOCK;
+ MODE_ANY;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ if (IsDynamic())
+ {
+ return SString::Empty();
+ }
+ else
+ return m_identity->GetPath();
+}
+
+
+#ifdef DACCESS_COMPILE
+inline const SString &PEFile::GetModuleFileNameHint()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (IsDynamic())
+ {
+ return SString::Empty();
+ }
+ else
+ return m_identity->GetModuleFileNameHintForDAC();
+}
+#endif // DACCESS_COMPILE
+
+#ifdef LOGGING
+inline LPCWSTR PEFile::GetDebugName()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ return m_pDebugName;
+#else
+ return GetPath();
+#endif
+}
+#endif
+
+// ------------------------------------------------------------
+// Classification
+// ------------------------------------------------------------
+
+inline BOOL PEFile::IsAssembly() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (m_flags & PEFILE_ASSEMBLY) != 0;
+}
+
+inline PTR_PEAssembly PEFile::AsAssembly()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (this == NULL)
+ return dac_cast<PTR_PEAssembly>(NULL);
+ if (IsAssembly())
+ return dac_cast<PTR_PEAssembly>(this);
+ else
+ return dac_cast<PTR_PEAssembly>(NULL);
+}
+
+inline BOOL PEFile::IsModule() const
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return (m_flags & PEFILE_MODULE) != 0;
+}
+
+inline PTR_PEModule PEFile::AsModule()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (this == NULL)
+ return dac_cast<PTR_PEModule>(NULL);
+ if (IsModule())
+ return dac_cast<PTR_PEModule>(this);
+ else
+ return dac_cast<PTR_PEModule>(NULL);
+}
+
+inline BOOL PEFile::IsSystem() const
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return (m_flags & PEFILE_SYSTEM) != 0;
+}
+
+inline BOOL PEFile::IsDynamic() const
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return m_identity == NULL;
+}
+
+inline BOOL PEFile::IsResource() const
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ return IsModule() && dac_cast<PTR_PEModule>(this)->IsResource();
+#else
+ return FALSE;
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+}
+
+inline BOOL PEFile::IsIStream() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_flags & PEFILE_ISTREAM) != 0;
+}
+
+inline BOOL PEFile::IsIntrospectionOnly() const
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ if (IsModule())
+ {
+ return dac_cast<PTR_PEModule>(this)->GetAssembly()->IsIntrospectionOnly();
+ }
+ else
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+ {
+ return (m_flags & PEFILE_INTROSPECTIONONLY) != 0;
+ }
+}
+
+
+inline PEAssembly *PEFile::GetAssembly() const
+{
+ WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ if (IsAssembly())
+ return dac_cast<PTR_PEAssembly>(this);
+ else
+ return dac_cast<PTR_PEModule>(this)->GetAssembly();
+#else
+ _ASSERTE(IsAssembly());
+ return dac_cast<PTR_PEAssembly>(this);
+
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+}
+
+// ------------------------------------------------------------
+// Hash support
+// ------------------------------------------------------------
+
+#ifndef DACCESS_COMPILE
+inline void PEFile::GetImageBits(SBuffer &result)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckValue(result));
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ EnsureImageOpened();
+ // We don't cache any other hashes right now.
+ if (!IsDynamic())
+ GetILimage()->GetImageBits(PEImageLayout::LAYOUT_FLAT,result);
+}
+
+inline void PEFile::GetHash(ALG_ID algorithm, SBuffer &result)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckValue(result));
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (algorithm == CALG_SHA1)
+ {
+ GetSHA1Hash(result);
+ }
+ else
+ {
+ EnsureImageOpened();
+ // We don't cache any other hashes right now.
+ GetILimage()->ComputeHash(algorithm, result);
+ }
+}
+
+inline CHECK PEFile::CheckHash(ALG_ID algorithm, const void *hash, COUNT_T size)
+{
+ CONTRACT_CHECK
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(hash));
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACT_CHECK_END;
+
+ StackSBuffer hashBuffer;
+ GetHash(algorithm, hashBuffer);
+
+ CHECK(hashBuffer.Equals((const BYTE *)hash, size));
+
+ CHECK_OK;
+}
+#endif // DACCESS_COMPILE
+
+// ------------------------------------------------------------
+// Metadata access
+// ------------------------------------------------------------
+
+inline BOOL PEFile::HasMetadata()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ return !IsResource();
+}
+
+inline IMDInternalImportHolder PEFile::GetMDImport()
+{
+ WRAPPER_NO_CONTRACT;
+ if (m_bHasPersistentMDImport)
+ return IMDInternalImportHolder(GetPersistentMDImport(),FALSE);
+ else
+ return IMDInternalImportHolder(GetMDImportWithRef(),TRUE);
+};
+
+inline IMDInternalImport* PEFile::GetPersistentMDImport()
+{
+/*
+ CONTRACT(IMDInternalImport *)
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(!IsResource());
+ POSTCONDITION(CheckPointer(RETVAL));
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACT_END;
+*/
+ SUPPORTS_DAC;
+#ifndef FEATURE_CORECLR
+_ASSERTE(m_bHasPersistentMDImport);
+#endif
+#if !defined(__GNUC__)
+
+ _ASSERTE(!IsResource());
+#endif
+#ifdef DACCESS_COMPILE
+ WRAPPER_NO_CONTRACT;
+ return DacGetMDImport(this, true);
+#else
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pMDImport;
+#endif
+}
+
+inline IMDInternalImport *PEFile::GetMDImportWithRef()
+{
+/*
+ CONTRACT(IMDInternalImport *)
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(!IsResource());
+ POSTCONDITION(CheckPointer(RETVAL));
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACT_END;
+*/
+#if !defined(__GNUC__)
+ _ASSERTE(!IsResource());
+#endif
+#ifdef DACCESS_COMPILE
+ WRAPPER_NO_CONTRACT;
+ return DacGetMDImport(this, true);
+#else
+ CONTRACTL
+ {
+ NOTHROW;
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+ SimpleReadLockHolder lock(m_pMetadataLock);
+ if(m_pMDImport)
+ m_pMDImport->AddRef();
+ return m_pMDImport;
+#endif
+}
+
+#ifndef DACCESS_COMPILE
+
+inline IMetaDataImport2 *PEFile::GetRWImporter()
+{
+ CONTRACT(IMetaDataImport2 *)
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(!IsResource());
+ POSTCONDITION(CheckPointer(RETVAL));
+ PRECONDITION(m_bHasPersistentMDImport);
+ GC_NOTRIGGER;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+ if (m_pImporter == NULL)
+ OpenImporter();
+
+ RETURN m_pImporter;
+}
+
+inline IMetaDataEmit *PEFile::GetEmitter()
+{
+ CONTRACT(IMetaDataEmit *)
+ {
+ INSTANCE_CHECK;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ PRECONDITION(!IsResource());
+ POSTCONDITION(CheckPointer(RETVAL));
+ PRECONDITION(m_bHasPersistentMDImport);
+ THROWS;
+ }
+ CONTRACT_END;
+
+ if (m_pEmitter == NULL)
+ OpenEmitter();
+
+ RETURN m_pEmitter;
+}
+
+#ifndef FEATURE_CORECLR
+inline IMetaDataAssemblyImport *PEFile::GetAssemblyImporter()
+{
+ CONTRACT(IMetaDataAssemblyImport *)
+ {
+ INSTANCE_CHECK;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ PRECONDITION(!IsResource());
+ POSTCONDITION(CheckPointer(RETVAL));
+ PRECONDITION(m_bHasPersistentMDImport);
+ THROWS;
+ }
+ CONTRACT_END;
+
+ if (m_pAssemblyImporter == NULL)
+ OpenAssemblyImporter();
+
+ RETURN m_pAssemblyImporter;
+}
+
+inline IMetaDataAssemblyEmit *PEFile::GetAssemblyEmitter()
+{
+ CONTRACT(IMetaDataAssemblyEmit *)
+ {
+ INSTANCE_CHECK;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ PRECONDITION(!IsResource());
+ POSTCONDITION(CheckPointer(RETVAL));
+ PRECONDITION(m_bHasPersistentMDImport);
+ }
+ CONTRACT_END;
+
+ if (m_pAssemblyEmitter == NULL)
+ OpenAssemblyEmitter();
+
+ RETURN m_pAssemblyEmitter;
+}
+#endif // FEATURE_CORECLR
+
+#endif // DACCESS_COMPILE
+
+// The simple name is not actually very simple. The name returned comes from one of
+// various metadata tables, depending on whether this is a manifest module,
+// non-manifest module, or something else
+inline LPCUTF8 PEFile::GetSimpleName()
+{
+ CONTRACT(LPCUTF8)
+ {
+ INSTANCE_CHECK;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ NOTHROW;
+ SUPPORTS_DAC;
+ WRAPPER(GC_TRIGGERS);
+ }
+ CONTRACT_END;
+
+ if (IsAssembly())
+ RETURN dac_cast<PTR_PEAssembly>(this)->GetSimpleName();
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+ else if (IsModule())
+ RETURN dac_cast<PTR_PEModule>(this)->GetSimpleName();
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+ else
+ {
+ LPCUTF8 szScopeName;
+ if (FAILED(GetScopeName(&szScopeName)))
+ {
+ szScopeName = "";
+ }
+ RETURN szScopeName;
+ }
+}
+
+
+// Same as the managed Module.ScopeName property, this unconditionally looks in the
+// metadata Module table to get the name. Useful for profilers and others who don't
+// like sugar coating on their names.
+inline HRESULT PEFile::GetScopeName(LPCUTF8 * pszName)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ MODE_ANY;
+ NOTHROW;
+ SUPPORTS_DAC;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return GetMDImport()->GetScopeProps(pszName, NULL);
+}
+
+
+// ------------------------------------------------------------
+// PE file access
+// ------------------------------------------------------------
+
+inline BOOL PEFile::HasSecurityDirectory()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (IsResource() || IsDynamic())
+ return FALSE;
+
+#ifdef FEATURE_PREJIT
+ if (IsNativeLoaded())
+ {
+ CONSISTENCY_CHECK(HasNativeImage());
+
+ return m_nativeImage->GetNativeILHasSecurityDirectory();
+ }
+#ifndef DACCESS_COMPILE
+ if (!HasOpenedILimage())
+ {
+ //don't want to touch the IL image unless we already have
+ ReleaseHolder<PEImage> pNativeImage = GetNativeImageWithRef();
+ if (pNativeImage)
+ return pNativeImage->GetNativeILHasSecurityDirectory();
+ }
+#endif // DACCESS_COMPILE
+#endif // FEATURE_PREJIT
+
+ if (!GetILimage()->HasNTHeaders())
+ return FALSE;
+
+ return GetOpenedILimage()->HasDirectoryEntry(IMAGE_DIRECTORY_ENTRY_SECURITY);
+}
+
+inline BOOL PEFile::IsIbcOptimized()
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef FEATURE_PREJIT
+ if (IsNativeLoaded())
+ {
+ CONSISTENCY_CHECK(HasNativeImage());
+
+ return m_nativeImage->IsIbcOptimized();
+ }
+#endif
+
+ return FALSE;
+}
+
+
+inline WORD PEFile::GetSubsystem()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (IsResource() || IsDynamic())
+ return 0;
+
+#ifdef FEATURE_PREJIT
+ if (IsNativeLoaded())
+ {
+ CONSISTENCY_CHECK(HasNativeImage());
+
+ return GetLoadedNative()->GetSubsystem();
+ }
+#ifndef DACCESS_COMPILE
+ if (!HasOpenedILimage())
+ {
+ //don't want to touch the IL image unless we already have
+ ReleaseHolder<PEImage> pNativeImage = GetNativeImageWithRef();
+ if (pNativeImage)
+ return pNativeImage->GetSubsystem();
+ }
+#endif // DACCESS_COMPILE
+#endif // FEATURE_PREJIT
+
+ return GetLoadedIL()->GetSubsystem();
+}
+
+inline mdToken PEFile::GetEntryPointToken(
+#ifdef _DEBUG
+ BOOL bAssumeLoaded
+#endif //_DEBUG
+ )
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (IsResource() || IsDynamic())
+ return mdTokenNil;
+
+
+#ifdef FEATURE_PREJIT
+ if (IsNativeLoaded())
+ {
+ CONSISTENCY_CHECK(HasNativeImage());
+ _ASSERTE (!bAssumeLoaded || m_nativeImage->HasLoadedLayout ());
+ return m_nativeImage->GetEntryPointToken();
+ }
+#ifndef DACCESS_COMPILE
+ if (!HasOpenedILimage())
+ {
+ //don't want to touch the IL image unless we already have
+ ReleaseHolder<PEImage> pNativeImage = GetNativeImageWithRef();
+ if (pNativeImage) {
+ _ASSERTE (!bAssumeLoaded || pNativeImage->HasLoadedLayout ());
+ return pNativeImage->GetEntryPointToken();
+ }
+ }
+#endif // DACCESS_COMPILE
+#endif // FEATURE_PREJIT
+ _ASSERTE (!bAssumeLoaded || HasLoadedIL ());
+ return GetOpenedILimage()->GetEntryPointToken();
+}
+
+#ifdef FEATURE_PREJIT
+inline BOOL PEFile::IsNativeLoaded()
+{
+ WRAPPER_NO_CONTRACT;
+ return (m_nativeImage && m_bHasPersistentMDImport && m_nativeImage->HasLoadedLayout());
+}
+inline void PEFile::MarkNativeImageInvalidIfOwned()
+{
+ WRAPPER_NO_CONTRACT;
+ // If owned, mark the PEFile as dummy, so the image does not get reused
+ PEImageHolder nativeImage(GetNativeImageWithRef());
+ Module * pNativeModule = nativeImage->GetLoadedLayout()->GetPersistedModuleImage();
+ PEFile ** ppNativeFile = (PEFile**) (PBYTE(pNativeModule) + Module::GetFileOffset());
+
+ // Attempt to write only if we claimed the ownership.
+ if (*ppNativeFile == this)
+ FastInterlockCompareExchangePointer(ppNativeFile, Dummy(), this);
+}
+
+
+#endif
+
+inline BOOL PEFile::IsILOnly()
+{
+ STATIC_CONTRACT_SO_TOLERANT;
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ CONTRACT_VIOLATION(ThrowsViolation|GCViolation|FaultViolation);
+
+ if (IsResource() || IsDynamic())
+ return FALSE;
+
+#ifdef FEATURE_PREJIT
+ if (IsNativeLoaded())
+ {
+ CONSISTENCY_CHECK(HasNativeImage());
+
+ return m_nativeImage->IsNativeILILOnly();
+ }
+#ifndef DACCESS_COMPILE
+ if (!HasOpenedILimage())
+ {
+ BOOL retVal = FALSE;
+
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+
+ //don't want to touch the IL image unless we already have
+ ReleaseHolder<PEImage> pNativeImage = GetNativeImageWithRef();
+ if (pNativeImage)
+ {
+ retVal = pNativeImage->IsNativeILILOnly();
+ }
+
+ END_SO_INTOLERANT_CODE;
+
+ return retVal;
+ }
+#endif // DACCESS_COMPILE
+#endif // FEATURE_PREJIT
+
+ return GetOpenedILimage()->IsILOnly();
+}
+
+inline BOOL PEFile::IsDll()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (IsResource() || IsDynamic())
+ return TRUE;
+
+#ifdef FEATURE_PREJIT
+ if (IsNativeLoaded())
+ {
+ CONSISTENCY_CHECK(HasNativeImage());
+
+ return m_nativeImage->IsNativeILDll();
+ }
+#ifndef DACCESS_COMPILE
+ if (!HasOpenedILimage())
+ {
+ //don't want to touch the IL image unless we already have
+ ReleaseHolder<PEImage> pNativeImage =GetNativeImageWithRef();
+ if (pNativeImage)
+ return pNativeImage->IsNativeILDll();
+ }
+ EnsureImageOpened();
+#endif // DACCESS_COMPILE
+#endif // FEATURE_PREJIT
+
+ return GetOpenedILimage()->IsDll();
+}
+
+inline PTR_VOID PEFile::GetRvaField(RVA field)
+{
+ CONTRACT(void *)
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(!IsDynamic());
+ PRECONDITION(!IsResource());
+ PRECONDITION(CheckRvaField(field));
+ PRECONDITION(CheckLoaded());
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ // Note that the native image Rva fields are currently cut off before
+ // this point. We should not get here for an IL only native image.
+
+ RETURN dac_cast<PTR_VOID>(GetLoadedIL()->GetRvaData(field,NULL_OK));
+}
+
+inline CHECK PEFile::CheckRvaField(RVA field)
+{
+ CONTRACT_CHECK
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(!IsDynamic());
+ PRECONDITION(!IsResource());
+ PRECONDITION(CheckLoaded());
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACT_CHECK_END;
+
+ // Note that the native image Rva fields are currently cut off before
+ // this point. We should not get here for an IL only native image.
+
+ CHECK(GetLoadedIL()->CheckRva(field,NULL_OK));
+ CHECK_OK;
+}
+
+inline CHECK PEFile::CheckRvaField(RVA field, COUNT_T size)
+{
+ CONTRACT_CHECK
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(!IsDynamic());
+ PRECONDITION(!IsResource());
+ PRECONDITION(CheckLoaded());
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACT_CHECK_END;
+
+ // Note that the native image Rva fields are currently cut off before
+ // this point. We should not get here for an IL only native image.
+
+ CHECK(GetLoadedIL()->CheckRva(field, size,0,NULL_OK));
+ CHECK_OK;
+}
+
+inline BOOL PEFile::HasTls()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckLoaded());
+ }
+ CONTRACTL_END;
+
+ // Resource modules do not contain TLS data.
+ if (IsResource())
+ return FALSE;
+ // Dynamic modules do not contain TLS data.
+ else if (IsDynamic())
+ return FALSE;
+ // ILOnly modules do not contain TLS data.
+ else if (IsILOnly())
+ return FALSE;
+ else
+ return GetLoadedIL()->HasTls();
+}
+
+inline BOOL PEFile::IsRvaFieldTls(RVA field)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckLoaded());
+ }
+ CONTRACTL_END;
+
+ if (!HasTls())
+ return FALSE;
+
+ PTR_VOID address = PTR_VOID(GetLoadedIL()->GetRvaData(field));
+
+ COUNT_T tlsSize;
+ PTR_VOID tlsRange = GetLoadedIL()->GetTlsRange(&tlsSize);
+
+ return (address >= tlsRange
+ && address < (dac_cast<PTR_BYTE>(tlsRange)+tlsSize));
+}
+
+inline UINT32 PEFile::GetFieldTlsOffset(RVA field)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckRvaField(field));
+ PRECONDITION(IsRvaFieldTls(field));
+ PRECONDITION(CheckLoaded());
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return (UINT32)(dac_cast<PTR_BYTE>(GetRvaField(field)) -
+ dac_cast<PTR_BYTE>(GetLoadedIL()->GetTlsRange()));
+}
+
+inline UINT32 PEFile::GetTlsIndex()
+{
+ CONTRACTL
+ {
+ PRECONDITION(CheckLoaded());
+ INSTANCE_CHECK;
+ PRECONDITION(HasTls());
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return GetLoadedIL()->GetTlsIndex();
+}
+
+inline const void *PEFile::GetInternalPInvokeTarget(RVA target)
+{
+ CONTRACT(void *)
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(!IsDynamic());
+ PRECONDITION(!IsResource());
+ PRECONDITION(CheckInternalPInvokeTarget(target));
+ PRECONDITION(CheckLoaded());
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN (void*)GetLoadedIL()->GetRvaData(target);
+}
+
+inline CHECK PEFile::CheckInternalPInvokeTarget(RVA target)
+{
+ CONTRACT_CHECK
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(!IsDynamic());
+ PRECONDITION(!IsResource());
+ PRECONDITION(CheckLoaded());
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACT_CHECK_END;
+
+ CHECK(!IsILOnly());
+ CHECK(GetLoadedIL()->CheckRva(target));
+
+ CHECK_OK;
+}
+
+inline PCCOR_SIGNATURE PEFile::GetSignature(RVA signature)
+{
+ CONTRACT(PCCOR_SIGNATURE)
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(!IsDynamic() || signature == 0);
+ PRECONDITION(!IsResource());
+ PRECONDITION(CheckSignatureRva(signature));
+ POSTCONDITION(CheckSignature(RETVAL));
+ PRECONDITION(CheckLoaded());
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+ if (signature == 0)
+ RETURN NULL;
+ else
+ RETURN (PCCOR_SIGNATURE) GetLoadedIL()->GetRvaData(signature);
+}
+
+inline RVA PEFile::GetSignatureRva(PCCOR_SIGNATURE signature)
+{
+ CONTRACT(RVA)
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(!IsDynamic() || signature == NULL);
+ PRECONDITION(!IsResource());
+ PRECONDITION(CheckSignature(signature));
+ POSTCONDITION(CheckSignatureRva(RETVAL));
+ PRECONDITION(CheckLoaded());
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+ if (signature == NULL)
+ RETURN 0;
+ else
+ RETURN GetLoadedIL()->GetDataRva(
+ dac_cast<TADDR>(signature));
+}
+
+inline CHECK PEFile::CheckSignature(PCCOR_SIGNATURE signature)
+{
+ CONTRACT_CHECK
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(!IsDynamic() || signature == NULL);
+ PRECONDITION(!IsResource());
+ PRECONDITION(CheckLoaded());
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACT_CHECK_END;
+
+ CHECK(GetLoadedIL()->CheckData(signature,NULL_OK));
+ CHECK_OK;
+}
+
+inline CHECK PEFile::CheckSignatureRva(RVA signature)
+{
+ CONTRACT_CHECK
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(!IsDynamic() || signature == NULL);
+ PRECONDITION(!IsResource());
+ PRECONDITION(CheckLoaded());
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACT_CHECK_END;
+
+ CHECK(GetLoadedIL()->CheckRva(signature,NULL_OK));
+ CHECK_OK;
+}
+
+inline IMAGE_COR_VTABLEFIXUP *PEFile::GetVTableFixups(COUNT_T *pCount/*=NULL*/)
+{
+ CONTRACT(IMAGE_COR_VTABLEFIXUP *)
+ {
+ PRECONDITION(CheckLoaded());
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ if (IsResource() || IsDynamic() || IsILOnly())
+ {
+ if (pCount != NULL)
+ *pCount = 0;
+ RETURN NULL;
+ }
+ else
+ RETURN GetLoadedIL()->GetVTableFixups(pCount);
+}
+
+inline void *PEFile::GetVTable(RVA rva)
+{
+ CONTRACT(void *)
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(!IsDynamic());
+ PRECONDITION(!IsResource());
+ PRECONDITION(CheckLoaded());
+ PRECONDITION(!IsILOnly());
+ PRECONDITION(GetLoadedIL()->CheckRva(rva));
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN (void *)GetLoadedIL()->GetRvaData(rva);
+}
+
+// @todo: this is bad to expose. But it is needed to support current IJW thunks
+inline HMODULE PEFile::GetIJWBase()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(!IsDynamic());
+ PRECONDITION(!IsResource());
+ PRECONDITION(CheckLoaded());
+ PRECONDITION(!IsILOnly());
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return (HMODULE) dac_cast<TADDR>(GetLoadedIL()->GetBase());
+}
+
+inline PTR_VOID PEFile::GetDebuggerContents(COUNT_T *pSize/*=NULL*/)
+{
+ CONTRACT(PTR_VOID)
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pSize, NULL_OK));
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ // We cannot in general force a LoadLibrary; we might be in the
+ // helper thread. The debugger will have to expect a zero base
+ // in some circumstances.
+
+ if (IsLoaded())
+ {
+ if (pSize != NULL)
+ *pSize = GetLoaded()->GetSize();
+
+ RETURN GetLoaded()->GetBase();
+ }
+ else
+ {
+ if (pSize != NULL)
+ *pSize = 0;
+
+ RETURN NULL;
+ }
+}
+
+inline PTR_CVOID PEFile::GetLoadedImageContents(COUNT_T *pSize/*=NULL*/)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ if (IsLoaded() && !IsDynamic())
+ {
+ if (pSize != NULL)
+ {
+ *pSize = GetLoaded()->GetSize();
+ }
+ return GetLoaded()->GetBase();
+ }
+ else
+ {
+ if (pSize != NULL)
+ {
+ *pSize = 0;
+ }
+ return NULL;
+ }
+}
+
+#ifndef DACCESS_COMPILE
+inline const void *PEFile::GetManagedFileContents(COUNT_T *pSize/*=NULL*/)
+{
+ CONTRACT(const void *)
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckLoaded());
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ POSTCONDITION((!GetLoaded()->GetSize()) || CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ // Right now, we will trigger a LoadLibrary for the caller's sake,
+ // even if we are in a scenario where we could normally avoid it.
+ LoadLibrary(FALSE);
+
+ if (pSize != NULL)
+ *pSize = GetLoadedIL()->GetSize();
+
+
+ RETURN GetLoadedIL()->GetBase();
+}
+#endif // DACCESS_COMPILE
+
+inline BOOL PEFile::IsPtrInILImage(PTR_CVOID data)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ if (HasOpenedILimage())
+ {
+#if defined(FEATURE_PREJIT) && defined(FEATURE_CORECLR)
+ if (m_openedILimage == m_nativeImage)
+ {
+ // On Apollo builds, we sometimes open the native image into the slot
+ // normally reserved for the IL image (as the IL image is often not available
+ // on the disk at all). In such a case, data is not coming directly from an
+ // actual IL image, but see if it's coming from the metadata that we copied
+ // from the IL image into the NI.
+ TADDR taddrData = dac_cast<TADDR>(data);
+ PEDecoder * pDecoder = m_nativeImage->GetLoadedLayout();
+ COUNT_T cbILMetadata;
+ TADDR taddrILMetadata = dac_cast<TADDR>(pDecoder->GetMetadata(&cbILMetadata));
+ return ((taddrILMetadata <= taddrData) && (taddrData < taddrILMetadata + cbILMetadata));
+ }
+#endif // defined(FEATURE_PREJIT) && defined(FEATURE_CORECLR)
+ return GetOpenedILimage()->IsPtrInImage(data);
+ }
+ else
+ return FALSE;
+}
+// ------------------------------------------------------------
+// Native image access
+// ------------------------------------------------------------
+inline BOOL PEFile::HasNativeImage()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ CANNOT_TAKE_LOCK;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_PREJIT
+ return (m_nativeImage != NULL);
+#else
+ return FALSE;
+#endif
+}
+
+inline PTR_PEImageLayout PEFile::GetLoadedIL()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(HasOpenedILimage());
+ if(IsIntrospectionOnly())
+ return GetOpenedILimage()->GetLoadedIntrospectionLayout();
+
+ return GetOpenedILimage()->GetLoadedLayout();
+};
+
+inline PTR_PEImageLayout PEFile::GetAnyILWithRef()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetILimage()->GetLayout(PEImageLayout::LAYOUT_ANY,PEImage::LAYOUT_CREATEIFNEEDED);
+};
+
+
+inline BOOL PEFile::IsLoaded(BOOL bAllowNative/*=TRUE*/)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ if(IsDynamic())
+ return TRUE;
+ if(IsIntrospectionOnly())
+ {
+ return HasOpenedILimage() && GetOpenedILimage()->HasLoadedIntrospectionLayout();
+ }
+#ifdef FEATURE_PREJIT
+ if (bAllowNative && HasNativeImage())
+ {
+ PEImage *pNativeImage = GetPersistentNativeImage();
+ return pNativeImage->HasLoadedLayout() && (pNativeImage->GetLoadedLayout()->IsNativeILILOnly() || (HasLoadedIL()));
+ }
+ else
+#endif
+ return HasLoadedIL();
+};
+
+
+inline PTR_PEImageLayout PEFile::GetLoaded()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return HasNativeImage()?GetLoadedNative():GetLoadedIL();
+};
+
+inline PTR_PEImageLayout PEFile::GetLoadedNative()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_PREJIT
+ PEImage* pImage=GetPersistentNativeImage();
+ _ASSERTE(pImage && pImage->GetLoadedLayout());
+ return pImage->GetLoadedLayout();
+#else
+ // Should never get here
+ PRECONDITION(HasNativeImage());
+ return NULL;
+#endif
+};
+
+#ifdef FEATURE_PREJIT
+inline PEImage *PEFile::GetPersistentNativeImage()
+{
+ CONTRACT(PEImage *)
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(HasNativeImage());
+ POSTCONDITION(CheckPointer(RETVAL));
+ PRECONDITION(m_bHasPersistentMDImport);
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ CANNOT_TAKE_LOCK;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ RETURN m_nativeImage;
+}
+
+#ifndef DACCESS_COMPILE
+inline PEImage *PEFile::GetNativeImageWithRef()
+{
+ CONTRACT(PEImage *)
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL,NULL_OK));
+ }
+ CONTRACT_END;
+ GCX_PREEMP();
+ SimpleReadLockHolder mdlock(m_pMetadataLock);
+ if(m_nativeImage)
+ m_nativeImage->AddRef();
+ RETURN m_nativeImage;
+}
+#endif // DACCESS_COMPILE
+
+inline BOOL PEFile::HasNativeImageMetadata()
+{
+ CONTRACT(BOOL)
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ RETURN ((m_flags & PEFILE_HAS_NATIVE_IMAGE_METADATA) != 0);
+}
+#endif
+
+ // Function to get the fully qualified name of an assembly
+ inline void PEAssembly::GetFullyQualifiedAssemblyName(IMDInternalImport* pImport, mdAssembly mda, SString &result, DWORD flags)
+{
+ CONTRACTL
+ {
+ PRECONDITION(CheckValue(result));
+#ifndef DACCESS_COMPILE
+ THROWS;
+#else
+ NOTHROW;
+ #endif // !DACCESS_COMPILE
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if(pImport != NULL)
+ {
+ // This is for DAC, ONLY for the binding tool. Don't use for other
+ // purposes, since this is not canonicalized through Fusion.
+ LPCSTR name;
+ AssemblyMetaDataInternal context;
+ DWORD dwFlags;
+ PBYTE pbPublicKey;
+ DWORD cbPublicKey;
+ if (FAILED(pImport->GetAssemblyProps(
+ mda,
+ (const void **) &pbPublicKey,
+ &cbPublicKey,
+ NULL,
+ &name,
+ &context,
+ &dwFlags)))
+ {
+ _ASSERTE(!"If this fires, then we have to throw for corrupted images");
+ result.SetUTF8("");
+ return;
+ }
+
+ result.SetUTF8(name);
+
+ result.AppendPrintf(W(", Version=%u.%u.%u.%u"),
+ context.usMajorVersion, context.usMinorVersion,
+ context.usBuildNumber, context.usRevisionNumber);
+
+ result.Append(W(", Culture="));
+ if (!*context.szLocale)
+ {
+ result.Append(W("neutral"));
+ }
+ else
+ {
+ result.AppendUTF8(context.szLocale);
+ }
+
+ if (cbPublicKey != 0)
+ {
+#ifndef DACCESS_COMPILE
+
+ StrongNameBufferHolder<BYTE> pbToken;
+ DWORD cbToken;
+ CQuickBytes qb;
+
+ if (StrongNameTokenFromPublicKey(pbPublicKey, cbPublicKey,
+ &pbToken, &cbToken))
+ {
+ // two hex digits per byte
+ WCHAR* szToken = (WCHAR*) qb.AllocNoThrow(sizeof(WCHAR) * (cbToken*2+1));
+ if (szToken)
+ {
+#define TOHEX(a) ((a)>=10 ? L'a'+(a)-10 : L'0'+(a))
+ UINT x;
+ UINT y;
+ for ( x = 0, y = 0; x < cbToken; ++x )
+ {
+ WCHAR v = static_cast<WCHAR>(pbToken[x] >> 4);
+ szToken[y++] = TOHEX( v );
+ v = static_cast<WCHAR>(pbToken[x] & 0x0F);
+ szToken[y++] = TOHEX( v );
+ }
+ szToken[y] = L'\0';
+
+ result.Append(W(", PublicKeyToken="));
+ result.Append(szToken);
+#undef TOHEX
+ }
+ }
+#endif
+
+ }
+ else
+ {
+ result.Append(W(", PublicKeyToken=null"));
+ }
+
+ if (dwFlags & afPA_Mask)
+ {
+ result.Append(W(", ProcessorArchitecture="));
+
+ if (dwFlags & afPA_MSIL)
+ result.Append(W("MSIL"));
+ else if (dwFlags & afPA_x86)
+ result.Append(W("x86"));
+ else if (dwFlags & afPA_IA64)
+ result.Append(W("IA64"));
+ else if (dwFlags & afPA_AMD64)
+ result.Append(W("AMD64"));
+ else if (dwFlags & afPA_ARM)
+ result.Append(W("ARM"));
+ }
+ }
+}
+
+
+// ------------------------------------------------------------
+// Descriptive strings
+// ------------------------------------------------------------
+inline void PEAssembly::GetDisplayName(SString &result, DWORD flags)
+{
+ CONTRACTL
+ {
+ PRECONDITION(CheckValue(result));
+#ifndef DACCESS_COMPILE
+ THROWS;
+#else
+ NOTHROW;
+#endif // DACCESS_COMPILE
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_FUSION
+ FusionBind::GetAssemblyNameDisplayName(GetFusionAssemblyName(), result, flags);
+#else
+ if ((flags == (ASM_DISPLAYF_VERSION | ASM_DISPLAYF_CULTURE | ASM_DISPLAYF_PUBLIC_KEY_TOKEN)) &&
+ !m_sTextualIdentity.IsEmpty())
+ {
+ result.Set(m_sTextualIdentity);
+ }
+ else
+ {
+ AssemblySpec spec;
+ spec.InitializeSpec(this);
+ spec.GetFileOrDisplayName(flags, result);
+ }
+#endif // FEATURE_FUSION
+
+#else
+ IMDInternalImport *pImport = GetMDImport();
+ GetFullyQualifiedAssemblyName(pImport, TokenFromRid(1, mdtAssembly), result, flags);
+#endif //DACCESS_COMPILE
+}
+
+// ------------------------------------------------------------
+// Metadata access
+// ------------------------------------------------------------
+
+inline LPCSTR PEAssembly::GetSimpleName()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ if (!m_bHasPersistentMDImport) { GC_TRIGGERS;} else {DISABLED(GC_TRIGGERS);};
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ LPCSTR name = "";
+ IMDInternalImportHolder pImport = GetMDImport();
+ if (pImport != NULL)
+ {
+ if (FAILED(pImport->GetAssemblyProps(TokenFromRid(1, mdtAssembly), NULL, NULL, NULL, &name, NULL, NULL)))
+ {
+ _ASSERTE(!"If this fires, then we have to throw for corrupted images");
+ name = "";
+ }
+ }
+ return name;
+}
+
+inline BOOL PEFile::IsStrongNamed()
+{
+ CONTRACTL
+ {
+ THROWS;
+ WRAPPER(GC_NOTRIGGER);
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ DWORD flags = 0;
+ IfFailThrow(GetMDImport()->GetAssemblyProps(TokenFromRid(1, mdtAssembly), NULL, NULL, NULL, NULL, NULL, &flags));
+ return (flags & afPublicKey) != NULL;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Check to see if this assembly has had its strong name signature verified yet.
+//
+
+inline BOOL PEFile::IsStrongNameVerified()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fStrongNameVerified;
+}
+
+inline const void *PEFile::GetPublicKey(DWORD *pcbPK)
+{
+ CONTRACTL
+ {
+ PRECONDITION(CheckPointer(pcbPK, NULL_OK));
+ THROWS;
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ const void *pPK;
+ IfFailThrow(GetMDImport()->GetAssemblyProps(TokenFromRid(1, mdtAssembly), &pPK, pcbPK, NULL, NULL, NULL, NULL));
+ return pPK;
+}
+
+inline ULONG PEFile::GetHashAlgId()
+{
+ CONTRACTL
+ {
+ THROWS;
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ULONG hashAlgId;
+ IfFailThrow(GetMDImport()->GetAssemblyProps(TokenFromRid(1, mdtAssembly), NULL, NULL, &hashAlgId, NULL, NULL, NULL));
+ return hashAlgId;
+}
+
+inline LPCSTR PEFile::GetLocale()
+{
+ CONTRACTL
+ {
+ THROWS;
+ WRAPPER(GC_NOTRIGGER);
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ AssemblyMetaDataInternal md;
+ IfFailThrow(GetMDImport()->GetAssemblyProps(TokenFromRid(1, mdtAssembly), NULL, NULL, NULL, NULL, &md, NULL));
+ return md.szLocale;
+}
+
+inline DWORD PEFile::GetFlags()
+{
+ CONTRACTL
+ {
+ PRECONDITION(IsAssembly());
+ INSTANCE_CHECK;
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ DWORD flags;
+ IfFailThrow(GetMDImport()->GetAssemblyProps(TokenFromRid(1, mdtAssembly), NULL, NULL, NULL, NULL, NULL, &flags));
+ return flags;
+}
+
+// In the cases where you know the module is loaded, and cannot tolerate triggering and
+// loading, this alternative to PEFile::GetFlags is useful. Profiling API uses this.
+inline HRESULT PEFile::GetFlagsNoTrigger(DWORD * pdwFlags)
+{
+ CONTRACTL
+ {
+ PRECONDITION(IsAssembly());
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE (pdwFlags != NULL);
+
+ if (!m_bHasPersistentMDImport)
+ return E_FAIL;
+
+ return GetPersistentMDImport()->GetAssemblyProps(TokenFromRid(1, mdtAssembly), NULL, NULL, NULL, NULL, NULL, pdwFlags);
+}
+
+#ifdef FEATURE_CAS_POLICY
+inline COR_TRUST *PEFile::GetAuthenticodeSignature()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ if (!m_fCheckedCertificate && HasSecurityDirectory())
+ {
+ CheckAuthenticodeSignature();
+ }
+
+ return m_certificate;
+}
+#endif
+
+// ------------------------------------------------------------
+// Hash support
+// ------------------------------------------------------------
+
+inline BOOL PEAssembly::HasStrongNameSignature()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (IsDynamic())
+ return FALSE;
+
+#ifdef FEATURE_PREJIT
+ if (IsNativeLoaded())
+ {
+ CONSISTENCY_CHECK(HasNativeImage());
+
+ // The NGen images do not have strong name signature
+ return FALSE;
+ }
+#endif // FEATURE_PREJIT
+
+ return GetILimage()->HasStrongNameSignature();
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Check to see that an assembly is not delay or test signed
+//
+
+inline BOOL PEAssembly::IsFullySigned()
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef FEATURE_PREJIT
+ if (IsNativeLoaded())
+ {
+ CONSISTENCY_CHECK(HasNativeImage());
+
+ // If we are strongly named and successfully strong named, then we consider ourselves fully
+ // signed since either our signature verified at ngen time, or skip verification was in effect
+ // The only code that differentiates between skip verification and fully signed is in the strong
+ // name verification path itself, and therefore we abstract that away at this level.
+ //
+ // Note that this is consistent with other abstractions at the PEFile level such as
+ // HasStrongNameSignature()
+ return IsStrongNamed();
+ } else
+#endif // FEATURE_PREJIT
+ if (HasOpenedILimage())
+ {
+ return GetOpenedILimage()->IsStrongNameSigned();
+ }
+ else
+ {
+ return FALSE;
+ }
+}
+
+#ifndef FEATURE_CORECLR
+//---------------------------------------------------------------------------------------
+//
+// Mark that an assembly has had its strong name verification bypassed
+//
+
+inline void PEAssembly::SetStrongNameBypassed()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_fStrongNameBypassed = TRUE;
+}
+
+inline BOOL PEAssembly::NeedsModuleHashChecks()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return ((m_flags & PEFILE_SKIP_MODULE_HASH_CHECKS) == 0) && !m_fStrongNameBypassed;
+}
+#endif // FEATURE_CORECLR
+
+#ifdef FEATURE_CAS_POLICY
+//---------------------------------------------------------------------------------------
+//
+// Verify the Authenticode and strong name signatures of an assembly during the assembly
+// load code path. To verify the strong name signature outside of assembly load, use the
+// VefifyStrongName method instead.
+//
+// If the applicaiton is using strong name bypass, then this method may not cause a real
+// strong name verification, delaying the assembly's strong name load until we know that
+// the verification is required. If the assembly must be forced to have its strong name
+// verified, then the VerifyStrongName method should also be chosen.
+//
+// See code:AssemblySecurityDescriptor::ResolveWorker#StrongNameBypass
+//
+
+inline void PEAssembly::DoLoadSignatureChecks()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS; // Fusion uses crsts on AddRef/Release
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ETWOnStartup(SecurityCatchCall_V1, SecurityCatchCallEnd_V1);
+
+ // If this isn't mscorlib or a dynamic assembly, verify the Authenticode signature.
+ if (IsSystem() || IsDynamic())
+ {
+ // If it was a dynamic module (or mscorlib), then we don't want to be doing module hash checks on it
+ m_flags |= PEFILE_SKIP_MODULE_HASH_CHECKS;
+ }
+
+ // Check strong name signature. We only want to do this now if the application is not using the strong
+ // name bypass feature. Otherwise we'll delay strong name verification until we figure out how trusted
+ // the assembly is.
+ //
+ // For more information see code:AssemblySecurityDescriptor::ResolveWorker#StrongNameBypass
+
+ // Make sure m_pMDImport is initialized as we need to call VerifyStrongName which calls GetFlags
+ // BypassTrustedAppStrongNames = false is a relatively uncommon scenario so we need to make sure
+ // the initialization order is always correct and we don't miss this uncommon case
+ _ASSERTE(GetMDImport());
+
+ if (!g_pConfig->BypassTrustedAppStrongNames())
+ {
+ VerifyStrongName();
+ }
+}
+#endif // FEATURE_CAS_POLICY
+
+// ------------------------------------------------------------
+// Metadata access
+// ------------------------------------------------------------
+#ifdef FEATURE_MULTIMODULE_ASSEMBLIES
+inline PEAssembly *PEModule::GetAssembly()
+{
+ CONTRACT(PEAssembly *)
+ {
+ POSTCONDITION(CheckPointer(RETVAL));
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CANNOT_TAKE_LOCK;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ RETURN m_assembly;
+}
+
+inline BOOL PEModule::IsResource()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+#ifdef DACCESS_COMPILE
+ _ASSERTE(m_bIsResource!=-1);
+#else
+ if (m_bIsResource==-1)
+ {
+ DWORD flags;
+ if (FAILED(m_assembly->GetPersistentMDImport()->GetFileProps(m_token, NULL, NULL, NULL, &flags)))
+ {
+ _ASSERTE(!"If this fires, then we have to throw for corrupted images");
+ flags = 0;
+ }
+ m_bIsResource=((flags & ffContainsNoMetaData) != 0);
+ }
+#endif
+
+ return m_bIsResource;
+}
+
+inline LPCUTF8 PEModule::GetSimpleName()
+{
+ CONTRACT(LPCUTF8)
+ {
+ POSTCONDITION(CheckPointer(RETVAL));
+ POSTCONDITION(strlen(RETVAL) > 0);
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END;
+
+ LPCUTF8 name;
+
+ if (FAILED(m_assembly->GetPersistentMDImport()->GetFileProps(m_token, &name, NULL, NULL, NULL)))
+ {
+ _ASSERTE(!"If this fires, then we have to throw for corrupted images");
+ name = "";
+ }
+
+ RETURN name;
+}
+
+inline mdFile PEModule::GetToken()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_token;
+}
+#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+
+#ifndef DACCESS_COMPILE
+inline void PEFile::RestoreMDImport(IMDInternalImport* pImport)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_pMetadataLock->LockTaken() && m_pMetadataLock->IsWriterLock());
+ if (m_pMDImport != NULL)
+ return;
+ m_pMDImport=pImport;
+ if(m_pMDImport)
+ m_pMDImport->AddRef();
+}
+#endif
+inline void PEFile::OpenMDImport()
+{
+ WRAPPER_NO_CONTRACT;
+ //need synchronization
+ _ASSERTE(m_pMetadataLock->LockTaken() && m_pMetadataLock->IsWriterLock());
+ OpenMDImport_Unsafe();
+}
+
+inline PEFile* PEFile::Dummy()
+{
+ return (PEFile*)(-1);
+}
+
+inline bool PEAssembly::HasBindableIdentity()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ return !IsAfContentType_WindowsRuntime(GetFlags());
+}
+
+inline bool PEAssembly::IsWindowsRuntime()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return IsAfContentType_WindowsRuntime(GetFlags());
+}
+
+#endif // PEFILE_INL_
diff --git a/src/vm/pefingerprint.cpp b/src/vm/pefingerprint.cpp
new file mode 100644
index 0000000000..10d28b2dfb
--- /dev/null
+++ b/src/vm/pefingerprint.cpp
@@ -0,0 +1,624 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// --------------------------------------------------------------------------------
+// PEFingerprint.cpp
+//
+
+//
+//
+// Dev11 note on timing of torn state detection:
+//
+// This implementation of PEFingerprint contains a known flaw: The MVID/SNHash/TPBand
+// torn state test only occurs after the file is already opened and made available
+// for the runtime to use. In fact, we don't do it until someone asks for a Commit
+// on the fingerprint.
+//
+// This is clearly a perversion of the design: however, it was not feasible
+// to do the check beforehand within the current codebase without incurring
+// severe performance costs or major code surgery.
+//
+// For Dev11, however, we accept this because of two things:
+//
+// - GAC assemblies are installed through gacutil.exe which always timestamps
+// the assembly based on the time of install. Thus, timestamp collisions
+// inside the GAC should not happen unless someone manually tampers with the GAC.
+// Since we do verify the timestamp and lock the file before opening it,
+// it is not a problem that the actual mvid/snhash check happens later than it should.
+// --------------------------------------------------------------------------------
+
+
+
+#include "common.h"
+#include "pefile.h"
+#include "pefingerprint.h"
+
+#ifdef FEATURE_FUSION
+
+static VOID ThrowTornState(LPCWSTR path);
+static void FetchILTimestampAndSize(LPCWSTR path, FILETIME *pTimestamp, DWORD *pSize, HANDLE hFileHandleIfOpen = INVALID_HANDLE_VALUE);
+
+
+const size_t PEFingerprint::s_offsets[] =
+{
+ offsetof(PEFingerprint, m_timeStamp),
+ offsetof(PEFingerprint, m_size),
+ offsetof(PEFingerprint, m_mvid),
+};
+
+const DWORD PEFingerprint::s_sizes[] =
+{
+ sizeof(((PEFingerprint *)NULL)->m_timeStamp),
+ sizeof(((PEFingerprint *)NULL)->m_size),
+ sizeof(((PEFingerprint *)NULL)->m_mvid),
+};
+
+
+
+//---------------------------------------------------------------
+// Ctor
+//---------------------------------------------------------------
+PEFingerprint::PEFingerprint(PEImage *owner) :
+ m_pcrst(NULL)
+ ,m_peimage(owner)
+ ,m_commitMask(0)
+ ,m_alreadyLoaded(FALSE)
+ ,m_priorLockAndLoadFailure(S_OK)
+{
+
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(owner);
+
+ memset(&m_timeStamp, 0xcc, sizeof(m_timeStamp));
+ memset(&m_size, 0xcc, sizeof(m_size));
+ memset(&m_mvid, 0xcc, sizeof(m_mvid));
+
+ return;
+}
+
+
+//---------------------------------------------------------------
+// PEFingerprint factory
+//---------------------------------------------------------------
+/*static*/ PEFingerprint *PEFingerprint::CreatePEFingerprint(PEImage *owner)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ NewHolder<PEFingerprint> pPEFingerprint = new PEFingerprint(owner);
+ pPEFingerprint->m_pcrst = new Crst(CrstLeafLock);
+
+ //---------------------------------------------------------------
+ // Since obtaining the timestamp is cheap and doesn't need to open the
+ // file, go ahead and get it now and commit into the fingerprint.
+ //
+ // @review: Would it be better to lock the file right now to
+ // prevent overwriter for the life of the fingerprint?
+ //---------------------------------------------------------------
+ LPCWSTR path = pPEFingerprint->m_peimage->GetPath();
+ _ASSERTE(path);
+
+ FILETIME lastWriteTime;
+ DWORD size;
+ FetchILTimestampAndSize(path, &lastWriteTime, &size);
+
+ ILFingerprintComponent components[] =
+ {
+ { ILFTagTimestamp, &lastWriteTime },
+ { ILFTagSize, &size },
+ };
+ BOOL success = pPEFingerprint->CommitAndCompareMulti(COUNTOF(components), components);
+ _ASSERTE(success); // No way this commit can fail - we own the only pointer!
+ return pPEFingerprint.Extract();
+}
+
+
+
+//---------------------------------------------------------------
+// Dtor
+//---------------------------------------------------------------
+PEFingerprint::~PEFingerprint()
+{
+ LIMITED_METHOD_CONTRACT;
+ delete m_pcrst;
+ return;
+}
+
+//---------------------------------------------------------------
+// AddRef
+//---------------------------------------------------------------
+ULONG PEFingerprint::AddRef()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_peimage->AddRef();
+}
+
+//---------------------------------------------------------------
+// Release
+//---------------------------------------------------------------
+ULONG PEFingerprint::Release()
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+ return m_peimage->Release();
+}
+
+//---------------------------------------------------------------------------------------------
+// Convenience fcn: equivalent to calling CommitAndCompareMulti() with one component.
+//---------------------------------------------------------------------------------------------
+BOOL PEFingerprint::CommitAndCompare(ILFingerprintTag componentType, LPCVOID data)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ ILFingerprintComponent c = {componentType, data};
+ return CommitAndCompareMulti(1, &c);
+}
+
+
+ //---------------------------------------------------------------------------------------------
+ // CommitAndCompareMulti(): Atomically commits one or more fingerprint components into
+ // the fingerprint. Once a component is committed, its value can never change.
+ //
+ // An attempt to commit a component succeeds only if the component was not already committed
+ // or the prior value maches the new one exactly.
+ //
+ // Calling CommitAndCompare() multiple times is not equivalent to calling CommitAndCompareMulti().
+ // CommitAndCompareMulti() is atomic - either all the commits happen or none of them do.
+ //
+ // Returns:
+ // TRUE: All passed components committed successful.
+ // FALSE: At leat one component failed to commit successfully.
+ //---------------------------------------------------------------------------------------------
+BOOL PEFingerprint::CommitAndCompareMulti(UINT numComponents, const ILFingerprintComponent *pComponents)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ //------------------------------------------------------------------------------
+ // See "Dev11 note on timing of torn state detection". This step should not be
+ // here but this is how we "verify" the MVID/SNHash on IL open. We wait until
+ // the first time someone attempts a commit on an opened file to do the check.
+ // The caller will think we did the check at file open time, even though we
+ // actually left a window of vulnerability.
+ //------------------------------------------------------------------------------
+ if (!m_alreadyLoaded)
+ {
+ PEImageHolder pOpenedILimage;
+ m_peimage->Clone(MDInternalImport_OnlyLookInCache,&pOpenedILimage);
+
+ if(pOpenedILimage != NULL && pOpenedILimage->IsOpened())
+ {
+
+ for (UINT j = 0; j < numComponents; j++)
+ {
+ // Don't open if we're just checking timestamp (forecloses possible reentrancy problems
+ // due to timestamp commits occurring within PEImage itself.)
+ ILFingerprintTag tag = pComponents[j]._tag;
+ if (tag == ILFTagMvid)
+ {
+ this->LockAndLoadIL();
+ break;
+ }
+
+ }
+ }
+ }
+
+ //------------------------------------------------------------------------------
+ // Inside the crit section, make sure all the components can successfully commit
+ // before commitng any of them.
+ //------------------------------------------------------------------------------
+ CrstHolder ch(m_pcrst);
+ UINT i;
+ for (i = 0; i < numComponents; i++)
+ {
+ ILFingerprintTag tag = pComponents[i]._tag;
+ if (IsComponentCommitted(tag))
+ {
+ if (0 != memcmp(pComponents[i]._data, TagDataStart(tag), TagDataSize(tag)))
+ return FALSE;
+ }
+ }
+ for (i = 0; i < numComponents; i++)
+ {
+ ILFingerprintTag tag = pComponents[i]._tag;
+ if (!IsComponentCommitted(tag))
+ {
+ memcpy(TagDataStart(tag), pComponents[i]._data, TagDataSize(tag));
+ SetComponentCommitted(tag);
+ }
+ }
+
+ return TRUE;
+}
+
+
+
+//---------------------------------------------------------------------------------------------
+// LockAndLoadIL()
+//
+// Forces the runtime to open the IL file and lock it against future overwrites. This
+// is bad for working set so this should be avoided.
+//
+// Once opened and locked, this method extracts the actual fingerprint from the IL file
+// and attempts to commit it into the ILFingerprint. If successful, all future commits
+// will now be compared against this trusted data. If unsuccessful, this is a torn state
+// situation and LockAndLoadIL() throws the torn state exception.
+//---------------------------------------------------------------------------------------------
+void PEFingerprint::LockAndLoadIL()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ //----------------------------------------------------------------------------------
+ // If already loaded, return the prior result.
+ //----------------------------------------------------------------------------------
+ if (m_alreadyLoaded)
+ {
+ if (FAILED(m_priorLockAndLoadFailure))
+ {
+ ThrowHR(m_priorLockAndLoadFailure);
+ }
+ else
+ {
+ return;
+ }
+ }
+ PEImageHolder pOpenedILimage;
+ m_peimage->Clone(MDInternalImport_Default,&pOpenedILimage);
+ HRESULT hr = S_OK;
+ {
+ GCX_PREEMP();
+ IfFailThrow(m_peimage->TryOpenFile());
+ }
+ //----------------------------------------------------------------------------------
+ // Force the file open (by requesting a metadata pointer to it.)
+ //----------------------------------------------------------------------------------
+ IMDInternalImport *pMDImport = NULL;
+ EX_TRY
+ {
+ pMDImport = pOpenedILimage->GetMDImport();
+ hr = S_OK;
+ }
+ EX_CATCH_HRESULT(hr);
+ if (Exception::IsTransient(hr))
+ ThrowHR(hr);
+ if (FAILED(hr))
+ {
+ m_priorLockAndLoadFailure = hr;
+ m_alreadyLoaded = TRUE;
+ ThrowHR(hr);
+ }
+
+ m_alreadyLoaded = TRUE;
+
+ //------------------------------------------------------------------------------
+ // See "Dev11 note on timing of torn state detection". This step should not be
+ // here as the "right" design is to extract the actual MVID before we officially
+ // open the file. But since we don't do that in the current implementation, we do
+ // it now.
+ //------------------------------------------------------------------------------
+ GUID mvid;
+ pOpenedILimage->GetMVID(&mvid);
+
+ BOOL success = this->CommitAndCompare(ILFTagMvid, &mvid);
+ if (!success)
+ ThrowTornState(m_peimage->GetPath());
+}
+
+
+//==================================================================================
+// Helper for throwing a torn state exception.
+//==================================================================================
+static VOID ThrowTornState(LPCWSTR path)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ COMPlusThrow(kFileLoadException, IDS_EE_TORNSTATE, path);
+}
+
+#endif // FEATURE_FUSION
+
+
+
+
+//==================================================================================
+// This holder must be wrapped around any code that opens an IL image.
+// It will verify that the actual fingerprint doesn't conflict with the stored
+// assumptions in the PEFingerprint. (If it does, the holder constructor throws
+// a torn state exception.)
+//
+// It is a holder because it needs to keep a file handle open to prevent
+// anyone from overwriting the IL after the check has been done. Once
+// you've opened the "real" handle to the IL (i.e. LoadLibrary/CreateFile),
+// you can safely destruct the holder.
+//==================================================================================
+PEFingerprintVerificationHolder::PEFingerprintVerificationHolder(PEImage *owner)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+#ifdef FEATURE_FUSION
+ if (owner->IsTrustedNativeImage())
+ return; // Waste of cycles to check timestamps for NI images.
+
+
+ LPCWSTR path = owner->GetPath();
+ _ASSERTE(path);
+
+ if (owner->IsOpened())
+ return; // Not the first layout to be opened - no need to repeat the work in that case.
+
+ // First, lock the file and verify that the timestamp hasn't changed.
+ TESTHOOKCALL(AboutToLockImage(path, IsCompilationProcess()));
+ m_fileHandle = WszCreateFile(path, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
+ if (m_fileHandle == INVALID_HANDLE_VALUE)
+ {
+ // @review: If this call to open the file fails, it sounds a bit risky to fail the PE open altogether
+ // just to do a torn state check. Let the torn state detection bake a bit before we take this step.
+ return;
+ }
+
+ FILETIME lastWriteTime;
+ DWORD size;
+ FetchILTimestampAndSize(path, &lastWriteTime, &size, m_fileHandle);
+ ReleaseHolder<IILFingerprint> fingerPrint;
+ ILFingerprintComponent components[] =
+ {
+ { ILFTagTimestamp, &lastWriteTime },
+ { ILFTagSize, &size },
+ };
+ IfFailThrow(owner->GetILFingerprint(&fingerPrint));
+ if (!fingerPrint->CommitAndCompareMulti(COUNTOF(components), components))
+ ThrowTornState(path);
+
+
+ // Now, verify that the MVID/SNHash/TPBand hasn't changed.
+ // Oh wait, where that'd code go? See "Dev11 note on timing of torn state detection".
+#endif // FEATURE_FUSION
+ return;
+}
+
+#ifdef FEATURE_FUSION
+#ifndef DACCESS_COMPILE
+class CachingILFingerprintFactory : public IILFingerprintFactory
+{
+private:
+ LONG m_refCount;
+ Crst m_lock;
+
+ // Hash Type ... NOTE! This is a case sensitive hash of a filename to an IL fingerprint.
+ // This is acceptable as duplicates are not errors, and chosen as case insensitive hashes
+ // are somewhat slower, and most hash lookups will actually match in case. If this is not
+ // the case, converting to a case-insensitive hash should be trivial.
+ typedef StringSHashWithCleanup< IILFingerprint, WCHAR > ILFingerprintHash;
+ typedef StringHashElement< IILFingerprint, WCHAR > ILFingerprintHashElement;
+
+ ILFingerprintHash m_hash;
+
+ ~CachingILFingerprintFactory()
+ {
+ }
+
+public:
+
+ CachingILFingerprintFactory() : m_refCount(1), m_lock(CrstILFingerprintCache)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+ }
+
+ STDMETHOD_(ULONG, AddRef)()
+ {
+ CONTRACT(ULONG)
+ {
+ PRECONDITION(m_refCount>0 && m_refCount < COUNT_T_MAX);
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACT_END;
+
+ RETURN (static_cast<ULONG>(FastInterlockIncrement(&m_refCount)));
+ }
+
+ STDMETHOD_(ULONG, Release)()
+ {
+ CONTRACTL
+ {
+ DESTRUCTOR_CHECK;
+ NOTHROW;
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ ULONG result = 0;
+ result=FastInterlockDecrement(&m_refCount);
+ if (result == 0)
+ delete this;
+
+ return result;
+ }
+
+ STDMETHOD(GetILFingerprintForPath)(
+ LPCWSTR pwzPath,
+ IILFingerprint **ppFingerprint)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ CrstHolder ch(&m_lock);
+ // Lookup in cache
+ ILFingerprintHashElement *pCacheElement = m_hash.Lookup(pwzPath);
+
+ // If that fails, run the parser, and populate the cache
+ if (pCacheElement != NULL)
+ {
+ *ppFingerprint = clr::SafeAddRef(pCacheElement->Object);
+ }
+ else
+ {
+ // Create new assembly name object;
+ ReleaseHolder<IILFingerprint> pFingerprint;
+ NewArrayHolder<WCHAR> pwzPathCopy;
+ IfFailThrow(RuntimeGetILFingerprintForPath(pwzPath, &pFingerprint));
+
+ // Create hash element object
+ NewHolder<ILFingerprintHashElement> pHashElem = new ILFingerprintHashElement();
+ pwzPathCopy = DuplicateStringThrowing(pwzPath);
+ pHashElem->String = pwzPathCopy;
+ pHashElem->Object = pFingerprint;
+
+ // Insert into hash table
+ m_hash.Add(pHashElem);
+
+ *ppFingerprint = clr::SafeAddRef(pFingerprint);
+
+ // Prevent disastrous cleanup
+ pwzPathCopy.SuppressRelease();
+ pHashElem.SuppressRelease();
+ pFingerprint.SuppressRelease();
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+ }
+};
+
+HRESULT RuntimeCreateCachingILFingerprintFactory(IILFingerprintFactory **ppILFingerprintFactory)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ *ppILFingerprintFactory = new CachingILFingerprintFactory();
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+}
+
+//-------------------------------------------------------------------------------------------------------------
+// Common routine to fetch the IL file's timestamp and size. If the caller already has an open file handle, it should
+// pass that as "hFileHandleIfOpen" to avoid the overhead of opening the file again.
+//-------------------------------------------------------------------------------------------------------------
+static void FetchILTimestampAndSize(LPCWSTR path, FILETIME *pTimestamp, DWORD *pSize, HANDLE hFileHandleIfOpen /* = INVALID_HANDLE_VALUE*/)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ _ASSERTE(pTimestamp != NULL && pSize != NULL);
+
+ if (hFileHandleIfOpen != INVALID_HANDLE_VALUE)
+ {
+ BY_HANDLE_FILE_INFORMATION info;
+ if (!GetFileInformationByHandle(hFileHandleIfOpen, &info))
+ ThrowLastError();
+ *pTimestamp = info.ftLastWriteTime;
+ *pSize = info.nFileSizeLow;
+ return;
+ }
+
+ // For normal files, we can obtain the timestamp without opening the file - attempt to do so.
+ WIN32_FILE_ATTRIBUTE_DATA wfd;
+ if (!WszGetFileAttributesEx(path, GetFileExInfoStandard, &wfd))
+ ThrowLastError();
+ if (!(wfd.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT))
+ {
+ *pTimestamp = wfd.ftLastWriteTime;
+ *pSize = wfd.nFileSizeLow;
+ return;
+ }
+
+ // If we got here, the original path pointed to a symbolic or some other form of reparse point. In such cases, GetFileAttributesEx
+ // may not return the same timestamp as GetFileInformationByHandle. (E.g. in the symbolic link case, GetFileAttributeEx returns
+ // the symbolic link's timestamp rather than the target's timestamp.)
+ //
+ // Since this is the uncommon case, we can justify the perf hit of opening the file so we get the timestamp
+ // on the actual target.
+ HandleHolder hFile(WszCreateFile(path, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL));
+ if (hFile == INVALID_HANDLE_VALUE)
+ ThrowLastError();
+ BY_HANDLE_FILE_INFORMATION info;
+ if (!GetFileInformationByHandle(hFile, &info))
+ ThrowLastError();
+ *pTimestamp = info.ftLastWriteTime;
+ *pSize = info.nFileSizeLow;
+ return;
+}
+
+#endif // !DACCESS_COMPILE
+#endif // FEATURE_FUSION
+
diff --git a/src/vm/pefingerprint.h b/src/vm/pefingerprint.h
new file mode 100644
index 0000000000..19de312ac0
--- /dev/null
+++ b/src/vm/pefingerprint.h
@@ -0,0 +1,127 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// --------------------------------------------------------------------------------
+// PEFingerprint.h
+//
+
+// --------------------------------------------------------------------------------
+
+#ifndef PEFINGERPRINT_H_
+#define PEFINGERPRINT_H_
+
+
+#ifdef FEATURE_FUSION
+
+#include "corcompile.h"
+
+class PEImage;
+
+//==================================================================================
+// This is the implementation of IILFingerprint object maintained by PEImage objects.
+// IILFingerprint is described in detail in IILFingerprint.h
+//==================================================================================
+class PEFingerprint : public IILFingerprint
+{
+ public:
+ //----------------------------------------------------------------
+ // IILFingerprint methods
+ //----------------------------------------------------------------
+ STDMETHOD_(ULONG, AddRef)();
+ STDMETHOD_(ULONG, Release)();
+ STDMETHOD_(BOOL, CommitAndCompare)(ILFingerprintTag componentType, LPCVOID data);
+ STDMETHOD_(BOOL, CommitAndCompareMulti)(UINT numComponents, const ILFingerprintComponent *pComponents);
+ STDMETHOD_(void, LockAndLoadIL)();
+
+ //----------------------------------------------------------------
+ // Non-interface public methods.
+ //----------------------------------------------------------------
+ public:
+ static PEFingerprint* PEFingerprint::CreatePEFingerprint(PEImage *owner);
+ virtual ~PEFingerprint();
+
+ private:
+ PEFingerprint(PEImage *owner);
+
+ //----------------------------------------------------------------
+ // Private methods.
+ //----------------------------------------------------------------
+ private:
+
+ BOOL IsComponentCommitted(ILFingerprintTag tag)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(tag < ILFTagCount);
+ return 0 != (m_commitMask & (1 << tag));
+ }
+
+ void SetComponentCommitted(ILFingerprintTag tag)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(tag < ILFTagCount);
+ m_commitMask |= (1 << tag);
+ }
+
+ LPVOID TagDataStart(ILFingerprintTag tag)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(tag < ILFTagCount);
+ return (LPVOID)(((LPBYTE)this) + s_offsets[tag]);
+ }
+
+ DWORD TagDataSize(ILFingerprintTag tag)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(tag < ILFTagCount);
+ return s_sizes[tag];
+ }
+
+
+ //----------------------------------------------------------------
+ // Private instance data
+ //----------------------------------------------------------------
+ private:
+ Crst *m_pcrst; // Synchronizes updates to fingerprint
+ PEImage *m_peimage; // Backpointer to PEImage (for ref-counting purposes, the PEImage and PEFingerprint have the same identity)
+ DWORD m_commitMask; // Bitmask to indicate which components have been committed ( fCommitted = (m_commitMask & (1 << tag)) )
+ FILETIME m_timeStamp; // Component: File system lastwrite Timestamp
+ DWORD m_size; // Component: File size
+ GUID m_mvid; // Component: Mvid
+
+ BOOL m_alreadyLoaded; // Turns repeated attempts to LockAndLoadIL() into NOP's
+ HRESULT m_priorLockAndLoadFailure; // If LockAndLoadIL() failed the first time, return the same failure on subsequent attempts.
+
+ //----------------------------------------------------------------
+ // Private static data
+ //----------------------------------------------------------------
+ private:
+ const static size_t s_offsets[ILFTagCount]; // static: Maps tags to offsets within PEFingerprint
+ const static DWORD s_sizes[ILFTagCount]; // static: Maps tag to expected data size
+};
+
+#endif // FEATURE_FUSION
+
+
+//==================================================================================
+// This holder must be wrapped around any code that opens an IL image.
+// It will verify that the actual fingerprint doesn't conflict with the stored
+// assumptions in the PEFingerprint. (If it does, the holder constructor throws
+// a torn state exception.)
+//
+// It is a holder because it needs to keep a file handle open to prevent
+// anyone from overwriting the IL after the check has been done. Once
+// you've opened the "real" handle to the IL (i.e. LoadLibrary/CreateFile),
+// you can safely destruct the holder.
+//==================================================================================
+class PEFingerprintVerificationHolder
+{
+ public:
+ PEFingerprintVerificationHolder(PEImage *owner);
+
+ private:
+ FileHandleHolder m_fileHandle;
+};
+
+
+#endif //PEFINGERPRINT
diff --git a/src/vm/peimage.cpp b/src/vm/peimage.cpp
new file mode 100644
index 0000000000..66f89db6e0
--- /dev/null
+++ b/src/vm/peimage.cpp
@@ -0,0 +1,2154 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// --------------------------------------------------------------------------------
+// PEImage.cpp
+//
+
+// --------------------------------------------------------------------------------
+
+
+#include "common.h"
+
+#include "peimage.h"
+#include "eeconfig.h"
+#include "apithreadstress.h"
+#include <objbase.h>
+
+#include "sha1.h"
+#include "eventtrace.h"
+#include "peimagelayout.inl"
+
+#ifdef FEATURE_PREJIT
+#include "compile.h"
+#endif
+
+#ifndef DACCESS_COMPILE
+
+
+CrstStatic PEImage::s_hashLock;
+PtrHashMap *PEImage::s_Images = NULL;
+#ifdef FEATURE_MIXEDMODE
+CrstStatic PEImage::s_ijwHashLock;
+PtrHashMap *PEImage::s_ijwFixupDataHash;
+#endif
+
+extern LocaleID g_lcid; // fusion path comparison lcid
+
+/* static */
+void PEImage::Startup()
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ POSTCONDITION(CheckStartup());
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ if (CheckStartup())
+ RETURN;
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(COMPlusThrowSO());
+
+ s_hashLock.Init(CrstPEImage, (CrstFlags)(CRST_REENTRANCY|CRST_TAKEN_DURING_SHUTDOWN));
+ LockOwner lock = { &s_hashLock, IsOwnerOfCrst };
+ s_Images = ::new PtrHashMap;
+ s_Images->Init(CompareImage, FALSE, &lock);
+#ifdef FEATURE_MIXEDMODE
+ s_ijwHashLock.Init(CrstIJWHash, CRST_REENTRANCY);
+ LockOwner ijwLock = { &s_ijwHashLock, IsOwnerOfCrst };
+ s_ijwFixupDataHash = ::new PtrHashMap;
+ s_ijwFixupDataHash->Init(CompareIJWDataBase, FALSE, &ijwLock);
+#endif
+ PEImageLayout::Startup();
+#ifndef FEATURE_FUSION
+#ifdef FEATURE_USE_LCID
+ g_lcid = MAKELCID(LOCALE_INVARIANT, SORT_DEFAULT);
+#else // FEATURE_USE_LCID
+ g_lcid = NULL; // invariant
+#endif //FEATURE_USE_LCID
+#endif
+ END_SO_INTOLERANT_CODE;
+
+ RETURN;
+}
+
+/* static */
+CHECK PEImage::CheckStartup()
+{
+ WRAPPER_NO_CONTRACT;
+ CHECK(s_Images != NULL);
+ CHECK_OK;
+}
+
+/* static */
+CHECK PEImage::CheckLayoutFormat(PEDecoder *pe)
+{
+ CONTRACT_CHECK
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_CHECK_END;
+
+ // If we are in a compilation domain, we will allow
+ // non-IL only files to be treated as IL only
+
+ // <TODO>@todo: this is not really the right model here. This is a per-app domain
+ // choice, but an image created this way would become available globally.
+ // (Also, this call prevents us from moving peimage into utilcode.)</TODO>
+
+ if (GetAppDomain() == NULL ||
+ (!GetAppDomain()->IsCompilationDomain()))
+ {
+ CHECK(pe->IsILOnly());
+ }
+
+ CHECK(!pe->HasNativeHeader());
+ CHECK_OK;
+}
+
+CHECK PEImage::CheckILFormat()
+{
+ WRAPPER_NO_CONTRACT;
+
+ PTR_PEImageLayout pLayoutToCheck;
+ PEImageLayoutHolder pLayoutHolder;
+
+ if (HasLoadedLayout())
+ {
+ pLayoutToCheck = GetLoadedLayout();
+ }
+ else
+ {
+ pLayoutHolder = GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED);
+ pLayoutToCheck = pLayoutHolder;
+ }
+
+#ifdef FEATURE_TREAT_NI_AS_MSIL_DURING_DIAGNOSTICS
+ if (PEFile::ShouldTreatNIAsMSIL())
+ {
+ // This PEImage may intentionally be an NI image, being used as if it were an
+ // MSIL image. In that case, rather than using CheckILFormat on its layout,
+ // do CheckCORFormat(), which is the same as CheckILFormat, except it allows for
+ // a native header. (CheckILFormat() fails if it finds a native header.)
+ CHECK(pLayoutToCheck->CheckCORFormat());
+ }
+ else
+#endif // FEATURE_TREAT_NI_AS_MSIL_DURING_DIAGNOSTICS
+ {
+ CHECK(pLayoutToCheck->CheckILFormat());
+ }
+
+ CHECK_OK;
+};
+
+/* static */
+// This method is only intended to be called during NGen. It doesn't AddRef to the objects it returns,
+// and can be unsafe for general use.
+void PEImage::GetAll(SArray<PEImage*> &images)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ CrstHolder holder(&s_hashLock);
+
+ for (PtrHashMap::PtrIterator i = s_Images->begin(); !i.end(); ++i)
+ {
+ PEImage *image = (PEImage*) i.GetValue();
+ images.Append(image);
+ }
+}
+
+/* static */
+ULONG PEImage::HashStreamIds(UINT64 id1, DWORD id2)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ ULONG hash = 5381;
+
+ hash ^= id2;
+ hash = _rotl(hash, 4);
+
+ void *data = &id1;
+ hash ^= *(INT32 *) data;
+
+ hash = _rotl(hash, 4);
+ ((INT32 *&)data)++;
+ hash ^= *(INT32 *) data;
+
+ return hash;
+}
+
+PEImage::~PEImage()
+{
+ CONTRACTL
+ {
+ PRECONDITION(CheckStartup());
+ PRECONDITION(m_refCount == 0);
+ DESTRUCTOR_CHECK;
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+
+ if (m_pLayoutLock)
+ delete m_pLayoutLock;
+ if(m_hFile!=INVALID_HANDLE_VALUE && m_bOwnHandle)
+ CloseHandle(m_hFile);
+
+ for (unsigned int i=0;i<COUNTOF(m_pLayouts);i++)
+ {
+ if (m_pLayouts[i]!=NULL)
+ m_pLayouts[i]->Release();
+ }
+
+ if (m_pMDImport)
+ m_pMDImport->Release();
+ if(m_pNativeMDImport)
+ m_pNativeMDImport->Release();
+#ifdef METADATATRACKER_ENABLED
+ if (m_pMDTracker != NULL)
+ m_pMDTracker->Deactivate();
+#endif // METADATATRACKER_ENABLED
+
+#ifdef FEATURE_FUSION
+ delete m_pILFingerprint;
+#endif // FEATURE_FUSION
+}
+
+#ifdef FEATURE_MIXEDMODE
+/* static */
+BOOL PEImage::CompareIJWDataBase(UPTR base, UPTR mapping)
+{
+ CONTRACTL {
+ PRECONDITION(CheckStartup());
+ PRECONDITION(CheckPointer((BYTE *) (base<<1)));
+ PRECONDITION(CheckPointer((IJWFixupData *)mapping));
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ return ((BYTE *) (base<<1) == ((IJWFixupData*)mapping)->GetBase());
+}
+#endif // FEATURE_MIXEDMODE
+
+ // Thread stress
+#if 0
+class OpenFileStress : APIThreadStress
+ {
+ public:
+ const SString &path;
+ PEImage::Layout layout;
+ OpenFileStress(const SString &path, PEImage::Layout layout)
+ : path(path), layout(layout)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ path.Normalize();
+ }
+ void Invoke()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ PEImageHolder result(PEImage::Open(path, layout));
+ }
+};
+#endif
+
+ULONG PEImage::Release()
+{
+ CONTRACTL
+ {
+ DESTRUCTOR_CHECK;
+ NOTHROW;
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ CONTRACT_VIOLATION(FaultViolation|ThrowsViolation);
+ COUNT_T result = 0;
+ {
+ // Use scoping to hold the hash lock
+ CrstHolder holder(&s_hashLock);
+
+ // Decrement and check the refcount - if we hit 0, remove it from the hash and delete it.
+ result=FastInterlockDecrement(&m_refCount);
+ if (result == 0 )
+ {
+ LOG((LF_LOADER, LL_INFO100, "PEImage: Closing Image %S\n", (LPCWSTR) m_path));
+ if(m_bInHashMap)
+ {
+ PEImageLocator locator(this);
+ PEImage* deleted = (PEImage *)s_Images->DeleteValue(GetIDHash(), &locator);
+ _ASSERTE(deleted == this);
+ }
+ }
+ }
+
+#ifdef FEATURE_LAZY_COW_PAGES
+ if (result == 0 && m_bAllocatedLazyCOWPages)
+ ::FreeLazyCOWPages(GetLoadedLayout());
+#endif
+
+ // This needs to be done outside of the hash lock, since this can call FreeLibrary,
+ // which can cause _CorDllMain to be executed, which can cause the hash lock to be
+ // taken again because we need to release the IJW fixup data in another PEImage hash.
+ if (result == 0)
+ delete this;
+
+ return result;
+}
+
+/* static */
+CHECK PEImage::CheckCanonicalFullPath(const SString &path)
+{
+ CONTRACT_CHECK
+ {
+ PRECONDITION(CheckValue(path));
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACT_CHECK_END;
+
+ CCHECK_START
+ {
+ // This is not intended to be an exhaustive test, just to provide a sanity check
+
+ SString::CIterator i = path.Begin();
+
+ SString sNetworkPathPrefix(SString::Literal, W("\\\\"));
+ if (path.Skip(i, sNetworkPathPrefix))
+ {
+ // Network path
+ }
+ else if (iswalpha(*i))
+ {
+ // Drive path
+ i++;
+ SString sDrivePath(SString::Literal, ":\\");
+ CCHECK(path.Skip(i, sDrivePath));
+ }
+ else
+ {
+ CCHECK_FAIL("Not a full path");
+ }
+
+ while (i != path.End())
+ {
+ // Check for multiple slashes
+ if(*i != '\\')
+ {
+
+ // Check for . or ..
+ SString sParentDir(SString::Ascii, "..");
+ SString sCurrentDir(SString::Ascii, ".");
+ if ((path.Skip(i, sParentDir) || path.Skip(i, sCurrentDir))
+ && (path.Match(i, '\\')))
+ {
+ CCHECK_FAIL("Illegal . or ..");
+ }
+
+ if (!path.Find(i, '\\'))
+ break;
+ }
+
+ i++;
+ }
+ }
+ CCHECK_END;
+
+ CHECK_OK;
+}
+
+#ifdef FEATURE_USE_LCID
+LCID g_lcid =0; // fusion path comparison lcid
+#else
+LPCWSTR g_lcid=NULL;
+#endif
+/* static */
+LocaleID PEImage::GetFileSystemLocale()
+{
+ LIMITED_METHOD_CONTRACT;
+ return g_lcid;
+}
+
+BOOL PEImage::PathEquals(const SString &p1, const SString &p2)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_CASE_SENSITIVE_FILESYSTEM
+ return p1.Equals(p2);
+#else
+ return p1.EqualsCaseInsensitive(p2, g_lcid);
+#endif
+}
+
+#ifndef FEATURE_PAL
+/* static */
+void PEImage::GetPathFromDll(HINSTANCE hMod, SString &result)
+{
+ CONTRACTL
+ {
+ PRECONDITION(CheckStartup());
+ PRECONDITION(CheckPointer(hMod));
+ PRECONDITION(CheckValue(result));
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ DWORD ret;
+ DWORD length = MAX_PATH;
+ do
+ {
+ WCHAR *buffer = result.OpenUnicodeBuffer(length);
+ ret = WszGetModuleFileName(hMod, buffer, length);
+ result.CloseBuffer(ret);
+ length *= 2;
+ } while (ret == 0);
+}
+#endif // !FEATURE_PAL
+
+/* static */
+BOOL PEImage::CompareImage(UPTR u1, UPTR u2)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // This is the input to the lookup
+ PEImageLocator *pLocator = (PEImageLocator *) (u1<<1);
+
+ // This is the value stored in the table
+ PEImage *pImage = (PEImage *) u2;
+
+#ifdef FEATURE_FUSION
+ if (pLocator->m_fIsIStream)
+ {
+ return pImage->m_fIsIStream && (pLocator->m_StreamAsmId == pImage->m_StreamAsmId) && (pLocator->m_dwStreamModuleId == pImage->m_dwStreamModuleId);
+ }
+#endif
+
+ BOOL ret = FALSE;
+ HRESULT hr;
+ EX_TRY
+ {
+ SString path(SString::Literal, pLocator->m_pPath);
+ if (PathEquals(path, pImage->GetPath()))
+ ret = TRUE;
+ }
+ EX_CATCH_HRESULT(hr); //<TODO>ignores failure!</TODO>
+ return ret;
+}
+
+BOOL PEImage::Equals(PEImage *pImage)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pImage));
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_CORECLR
+ // PEImage is always unique on CoreCLR so a simple pointer check is sufficient
+ _ASSERTE(m_bInHashMap || GetPath().IsEmpty());
+ _ASSERTE(pImage->m_bInHashMap || pImage->GetPath().IsEmpty());
+
+ return dac_cast<TADDR>(pImage) == dac_cast<TADDR>(this);
+#else // FEATURE_CORECLR
+ if (pImage == this)
+ return TRUE;
+
+ if (GetPath().IsEmpty())
+ {
+#ifdef FEATURE_FUSION
+ if (m_fIsIStream && pImage->m_fIsIStream)
+ {
+ return (m_StreamAsmId == pImage->m_StreamAsmId) && (m_dwStreamModuleId == pImage->m_dwStreamModuleId);
+ }
+#endif
+
+ return FALSE;
+ }
+ else
+ {
+ BOOL ret = FALSE;
+ HRESULT hr;
+ EX_TRY
+ {
+ if (PathEquals(GetPath(), pImage->GetPath()))
+ ret = TRUE;
+ }
+ EX_CATCH_HRESULT(hr); //<TODO>ignores failure!</TODO>
+ return ret;
+ }
+#endif // FEATURE_CORECLR
+}
+
+#ifndef FEATURE_CORECLR
+void PEImage::ComputeHash(ALG_ID algorithm, SBuffer &result)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckStartup());
+ PRECONDITION(CheckValue(result));
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ PEImageLayoutHolder pview(GetLayout(PEImageLayout::LAYOUT_FLAT,PEImage::LAYOUT_CREATEIFNEEDED));
+
+ if (algorithm == CALG_SHA1)
+ {
+ SHA1Hash hasher;
+ hasher.AddData((BYTE *) pview->GetBase(), pview->GetSize());
+ result.Set(hasher.GetHash(), SHA1_HASH_SIZE);
+ return;
+ }
+
+ DWORD size = 0;
+ if(!StrongNameHashSize(algorithm, &size))
+ {
+ ThrowHR(StrongNameErrorInfo());
+ }
+
+ BYTE *buffer = result.OpenRawBuffer(size);
+
+ DWORD hashSize;
+ IfFailThrow(GetHashFromBlob((BYTE *) pview->GetBase(), pview->GetSize(), &algorithm, buffer, size, &hashSize));
+
+ _ASSERTE(size == hashSize);
+
+ result.CloseRawBuffer(hashSize);
+}
+
+CHECK PEImage::CheckHash(ALG_ID algorithm, const void *pbHash, COUNT_T cbHash)
+{
+ CONTRACT_CHECK
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckStartup());
+ INSTANCE_CHECK;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACT_CHECK_END;
+
+ StackSBuffer hash;
+ ComputeHash(algorithm, hash);
+
+ CHECK(hash.Equals((const BYTE *) pbHash, cbHash));
+
+ CHECK_OK;
+}
+#endif // FEATURE_CORECLR
+
+IMDInternalImport* PEImage::GetMDImport()
+{
+ WRAPPER_NO_CONTRACT;
+ if (!m_pMDImport)
+ OpenMDImport();
+ return m_pMDImport;
+}
+
+#ifdef FEATURE_PREJIT
+IMDInternalImport* PEImage::GetNativeMDImport(BOOL loadAllowed)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(HasNativeHeader());
+ if (loadAllowed) GC_TRIGGERS; else GC_NOTRIGGER;
+ if (loadAllowed) THROWS; else NOTHROW;
+ if (loadAllowed) INJECT_FAULT(COMPlusThrowOM()); else FORBID_FAULT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_pNativeMDImport == NULL)
+ {
+ if (loadAllowed)
+ OpenNativeMDImport();
+ else
+ return NULL;
+ }
+
+ _ASSERTE(m_pNativeMDImport);
+ return m_pNativeMDImport;
+}
+
+void PEImage::OpenNativeMDImport()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(HasNativeHeader());
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+ if (m_pNativeMDImport==NULL)
+ {
+ IMDInternalImport* m_pNewImport;
+ COUNT_T cMeta=0;
+ const void* pMeta=GetNativeManifestMetadata(&cMeta);
+
+ if(pMeta==NULL)
+ return;
+
+ IfFailThrow(GetMetaDataInternalInterface((void *) pMeta,
+ cMeta,
+ ofRead,
+ IID_IMDInternalImport,
+ (void **) &m_pNewImport));
+
+ if(FastInterlockCompareExchangePointer(&m_pNativeMDImport, m_pNewImport, NULL))
+ m_pNewImport->Release();
+ }
+ _ASSERTE(m_pNativeMDImport);
+}
+#endif
+
+void PEImage::OpenMDImport()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(HasCorHeader());
+ PRECONDITION(HasContents());
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+ if (m_pMDImport==NULL)
+ {
+ IMDInternalImport* m_pNewImport;
+ const void* pMeta=NULL;
+ COUNT_T cMeta=0;
+ if(HasNTHeaders() && HasCorHeader())
+ pMeta=GetMetadata(&cMeta);
+
+ if(pMeta==NULL)
+ return;
+
+#if METADATATRACKER_ENABLED
+ m_pMDTracker = MetaDataTracker::GetOrCreateMetaDataTracker((BYTE *)pMeta,
+ cMeta,
+ GetPath().GetUnicode());
+#endif // METADATATRACKER_ENABLED
+
+ IfFailThrow(GetMetaDataInternalInterface((void *) pMeta,
+ cMeta,
+ ofRead,
+ IID_IMDInternalImport,
+ (void **) &m_pNewImport));
+
+ if(FastInterlockCompareExchangePointer(&m_pMDImport, m_pNewImport, NULL))
+ {
+ m_pNewImport->Release();
+ }
+ else
+ {
+ // grab the module name. This information is only used for dac. But we need to get
+ // it when module is instantiated in the managed process. The module name is stored
+ // in Metadata's module table in UTF8. Convert it to unicode.
+ //
+ if (m_path.IsEmpty())
+ {
+ // No need to check error here since this info is only used by DAC when inspecting
+ // dump file.
+ //
+ LPCSTR strModuleName;
+ IfFailThrow(m_pMDImport->GetScopeProps(&strModuleName, NULL));
+ m_sModuleFileNameHintUsedByDac.SetUTF8(strModuleName);
+ m_sModuleFileNameHintUsedByDac.Normalize();
+ }
+ }
+
+ if (IsCompilationProcess())
+ {
+ m_pMDImport->SetOptimizeAccessForSpeed(TRUE);
+ }
+ }
+ _ASSERTE(m_pMDImport);
+
+}
+
+void PEImage::GetMVID(GUID *pMvid)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pMvid));
+ PRECONDITION(HasCorHeader());
+ PRECONDITION(HasContents());
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ IfFailThrow(GetMDImport()->GetScopeProps(NULL, pMvid));
+
+#ifdef _DEBUG
+ COUNT_T cMeta;
+ const void *pMeta = GetMetadata(&cMeta);
+ GUID MvidDEBUG;
+
+ if (pMeta == NULL)
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+
+ SafeComHolder<IMDInternalImport> pMDImport;
+
+ IfFailThrow(GetMetaDataInternalInterface((void *) pMeta,
+ cMeta,
+ ofRead,
+ IID_IMDInternalImport,
+ (void **) &pMDImport));
+
+ pMDImport->GetScopeProps(NULL, &MvidDEBUG);
+
+ _ASSERTE(memcmp(pMvid, &MvidDEBUG, sizeof(GUID)) == 0);
+
+#endif // _DEBUG
+}
+
+void PEImage::GetHashedStrongNameSignature(SBuffer &result)
+{
+ COUNT_T size;
+ const void *sig = GetStrongNameSignature(&size);
+
+ SHA1Hash hasher;
+ hasher.AddData((BYTE *) sig, size);
+ result.Set(hasher.GetHash(), SHA1_HASH_SIZE);
+}
+
+
+void PEImage::VerifyIsAssembly()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ VerifyIsILOrNIAssembly(TRUE);
+}
+
+void PEImage::VerifyIsNIAssembly()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ VerifyIsILOrNIAssembly(FALSE);
+}
+
+void PEImage::VerifyIsILOrNIAssembly(BOOL fIL)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // buch of legacy stuff here wrt the error codes...
+
+ if (!HasNTHeaders())
+ ThrowFormat(COR_E_BADIMAGEFORMAT);
+
+ if(!HasCorHeader())
+ ThrowFormat(COR_E_ASSEMBLYEXPECTED);
+
+ CHECK checkGoodFormat;
+ if (fIL)
+ {
+ checkGoodFormat = CheckILFormat();
+ }
+ else
+ {
+ checkGoodFormat = CheckNativeFormat();
+ }
+ if (!checkGoodFormat)
+ ThrowFormat(COR_E_BADIMAGEFORMAT);
+
+ mdAssembly a;
+ if (FAILED(GetMDImport()->GetAssemblyFromScope(&a)))
+ ThrowFormat(COR_E_ASSEMBLYEXPECTED);
+}
+
+void DECLSPEC_NORETURN PEImage::ThrowFormat(HRESULT hrError)
+{
+ WRAPPER_NO_CONTRACT;
+ EEFileLoadException::Throw(m_path, hrError);
+}
+
+#ifdef FEATURE_FUSION
+// --------------------------------------------------------------------------------
+// Exports for the metadata APIs for fusion.
+// --------------------------------------------------------------------------------
+
+HRESULT STDMETHODCALLTYPE RuntimeOpenImage(LPCWSTR pszFileName, HCORMODULE* hHandle)
+{
+ WRAPPER_NO_CONTRACT;
+ return RuntimeOpenImageInternal(pszFileName, hHandle, NULL, MDInternalImport_Default);
+}
+
+HRESULT STDMETHODCALLTYPE RuntimeOpenImageInternal(LPCWSTR pszFileName, HCORMODULE* hHandle, DWORD *pdwLength, MDInternalImportFlags flags, HANDLE hFile)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_SO_INTOLERANT;
+ HRESULT hr = S_OK;
+ ETWOnStartup (LoaderCatchCall_V1,LoaderCatchCallEnd_V1);
+ EX_TRY
+ {
+ PEImage::Startup();
+ PEImageHolder pFile(PEImage::OpenImage(pszFileName, flags));
+ if (hFile != INVALID_HANDLE_VALUE)
+ {
+ pFile->SetFileHandle(hFile);
+ }
+ if (pdwLength)
+ {
+ PEImageLayoutHolder pLayout(pFile->GetLayout(PEImageLayout::LAYOUT_MAPPED,PEImage::LAYOUT_CREATEIFNEEDED));
+ pFile->CachePEKindAndMachine();
+ *pdwLength = pLayout->GetSize();
+ }
+ *hHandle = (HCORMODULE)pFile.Extract();
+ }
+ EX_CATCH_HRESULT(hr);
+ return hr;
+}
+
+HRESULT STDMETHODCALLTYPE RuntimeOpenImageByStream(IStream* pIStream, UINT64 AssemblyId,
+ DWORD dwModuleId,
+ HCORMODULE* hHandle, DWORD *pdwLength, MDInternalImportFlags flags)
+{
+ STATIC_CONTRACT_NOTHROW;
+ HRESULT hr = S_OK;
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+ EX_TRY
+ {
+ PEImage::Startup();
+
+ PEImageHolder pFile(PEImage::OpenImage(pIStream, AssemblyId, dwModuleId, FALSE, flags));
+ *hHandle = (HCORMODULE) pFile.Extract();
+ if (pdwLength)
+ {
+ PEImageLayoutHolder pImage(pFile->GetLayout(PEImageLayout::LAYOUT_ANY,0));
+ pFile->CachePEKindAndMachine();
+ *pdwLength = pImage->GetSize();
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+ END_SO_INTOLERANT_CODE;
+
+ return hr;
+}
+
+HRESULT STDMETHODCALLTYPE RuntimeReleaseHandle(HCORMODULE hHandle)
+{
+ STATIC_CONTRACT_NOTHROW;
+ HRESULT hr = S_OK;
+
+ PEImage *pImage = (PEImage*)hHandle;
+
+ if (pImage != NULL)
+ pImage->Release();
+
+ return hr;
+}
+
+void RuntimeAddRefHandle(HCORMODULE hHandle)
+{
+ STATIC_CONTRACT_NOTHROW;
+
+ PEImage *pImage = (PEImage*)hHandle;
+
+ if (pImage != NULL)
+ pImage->AddRef();
+}
+
+HRESULT STDMETHODCALLTYPE RuntimeGetMDInternalImport(HCORMODULE hHandle, MDInternalImportFlags flags, IMDInternalImport** ppMDImport)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+ PEImage* pImage=(PEImage*)hHandle;
+ HRESULT hr=S_OK;
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+ EX_TRY
+ {
+ if (!pImage->HasNTHeaders() || !pImage->HasCorHeader())
+ hr=HRESULT_FROM_WIN32(ERROR_FILE_INVALID);
+ else
+ {
+#ifdef FEATURE_PREJIT
+ if (pImage->HasNativeHeader())
+ {
+ if (!pImage->CheckNativeFormat())
+ hr=COR_E_BADIMAGEFORMAT;
+ else
+ {
+ if (flags & MDInternalImport_ILMetaData)
+ goto OPEN_IL_METADATA;
+
+ *ppMDImport=pImage->GetNativeMDImport();
+ if (*ppMDImport)
+ (*ppMDImport)->AddRef();
+ else
+ hr=COR_E_BADIMAGEFORMAT;
+ }
+ }
+ else
+#endif //FEATURE_PREJIT
+ {
+ if (!pImage->CheckILFormat())
+ hr=COR_E_BADIMAGEFORMAT;
+ else
+ {
+#ifdef FEATURE_PREJIT
+ OPEN_IL_METADATA:
+#endif
+ *ppMDImport=pImage->GetMDImport();
+ if (*ppMDImport)
+ (*ppMDImport)->AddRef();
+ else
+ hr=COR_E_BADIMAGEFORMAT;
+ }
+ }
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+ END_SO_INTOLERANT_CODE;
+
+ return hr;
+}
+
+HRESULT STDMETHODCALLTYPE RuntimeGetImageBase(HCORMODULE hHandle,LPVOID* base, BOOL bMapped, COUNT_T* dwSize)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+ HRESULT hr=S_FALSE;
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+ EX_TRY
+ {
+ PEImage* pImage=(PEImage*)hHandle;
+ *base=NULL;
+ if (!pImage->HasLoadedLayout())
+ {
+ PEImageLayoutHolder pLayout(pImage->GetLayout(bMapped
+ ?PEImageLayout::LAYOUT_MAPPED
+ :PEImageLayout::LAYOUT_FLAT,0));
+ if (pLayout!=NULL)
+ {
+ if(dwSize)
+ *dwSize=pLayout->GetSize();
+ *base=pLayout->GetBase();
+ hr=S_OK;
+ }
+ }
+
+ if (hr==S_FALSE && pImage->HasLoadedLayout())
+ {
+ BOOL bIsMapped=pImage->GetLoadedLayout()->IsMapped();
+ if ((bIsMapped && bMapped) || (!bIsMapped && !bMapped))
+ {
+ //the one we want
+ *base=pImage->GetLoadedLayout()->GetBase();
+ if (dwSize)
+ *dwSize=pImage->GetLoadedLayout()->GetSize();
+ hr=S_OK;
+ }
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+ END_SO_INTOLERANT_CODE;
+
+ return hr;
+}
+
+HRESULT STDMETHODCALLTYPE RuntimeGetImageKind(HCORMODULE hHandle,DWORD* pdwKind, DWORD* pdwMachine)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+ HRESULT hr=S_FALSE;
+
+ PEImage* pImage=(PEImage*)hHandle;
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+ EX_TRY
+ {
+ pImage->GetPEKindAndMachine(pdwKind, pdwMachine);
+ hr = S_OK;
+ }
+ EX_CATCH_HRESULT(hr);
+ END_SO_INTOLERANT_CODE;
+ return hr;
+}
+
+HRESULT STDMETHODCALLTYPE RuntimeOSHandle(HCORMODULE hHandle, HMODULE* hModule)
+{
+ LIMITED_METHOD_CONTRACT;
+ if(hHandle==NULL || hModule == NULL)
+ return E_POINTER;
+ PEImage* pImage= (PEImage*) hHandle;
+ if (!pImage->HasLoadedLayout())
+ return HRESULT_FROM_WIN32(ERROR_NOT_FOUND);
+ *hModule=(HMODULE)pImage->GetLoadedLayout()->GetBase();
+ return S_OK;
+}
+
+HRESULT RuntimeGetAssemblyStrongNameHashForModule(HCORMODULE hModule,
+ IMetaDataImport * pMDImport,
+ BYTE *pbSNHash,
+ DWORD *pcbSNHash)
+{
+ STATIC_CONTRACT_NOTHROW;
+ HRESULT hr = S_OK;
+
+ PEImage* pImage = (PEImage*)hModule;
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+ EX_TRY
+ {
+
+ if (pImage->HasStrongNameSignature())
+ {
+ if (pImage->IsStrongNameSigned())
+ {
+ SBuffer signature;
+ pImage->GetHashedStrongNameSignature(signature);
+ *pcbSNHash = min(signature.GetSize(), *pcbSNHash);
+ signature.Copy(pbSNHash, signature.Begin(), *pcbSNHash);
+ }
+ else
+ {
+ // This assembly is delay signed (in this limited scenario).
+ // We'll use the assembly MVID as the hash and leave assembly verification
+ // up to the loader to determine if delay signed assemblies are allowed.
+ // This allows us to fix the perf degrade observed with the hashing code and
+ // detailed in BUG 126760.
+
+ // <TODO>@TODO:workaround: This is a workaround because Fusion is expecting at least 20 bytes of data.</TODO>
+ if (max(sizeof(GUID), 20) <= *pcbSNHash)
+ {
+ memset(pbSNHash, 0, *pcbSNHash);
+ hr = pMDImport->GetScopeProps(NULL, 0, NULL, (GUID *) pbSNHash);
+ }
+ else
+ hr = HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
+
+ *pcbSNHash = max(sizeof(GUID), 20);
+ }
+ }
+ else
+ {
+ hr = CORSEC_E_MISSING_STRONGNAME;
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+ END_SO_INTOLERANT_CODE;
+
+ return hr;
+}
+
+#endif // FEATURE_FUSION
+
+
+#if defined(FEATURE_MIXEDMODE) && !defined(CROSSGEN_COMPILE)
+
+//may outlive PEImage
+PEImage::IJWFixupData::IJWFixupData(void *pBase)
+ : m_lock(CrstIJWFixupData),
+ m_base(pBase), m_flags(0),m_DllThunkHeap(NULL),m_iNextFixup(0),m_iNextMethod(0)
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+PEImage::IJWFixupData::~IJWFixupData()
+{
+ WRAPPER_NO_CONTRACT;
+ if (m_DllThunkHeap)
+ delete m_DllThunkHeap;
+}
+
+
+// Self-initializing accessor for m_DllThunkHeap
+LoaderHeap *PEImage::IJWFixupData::GetThunkHeap()
+{
+ CONTRACT (LoaderHeap *)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END
+
+ if (!m_DllThunkHeap)
+ {
+ size_t * pPrivatePCLBytes = NULL;
+ size_t * pGlobalPCLBytes = NULL;
+
+#ifdef PROFILING_SUPPORTED
+ pPrivatePCLBytes = &(GetPerfCounters().m_Loading.cbLoaderHeapSize);
+#endif
+
+ LoaderHeap *pNewHeap = new LoaderHeap(VIRTUAL_ALLOC_RESERVE_GRANULARITY, // DWORD dwReserveBlockSize
+ 0, // DWORD dwCommitBlockSize
+ pPrivatePCLBytes,
+ ThunkHeapStubManager::g_pManager->GetRangeList(),
+ TRUE); // BOOL fMakeExecutable
+
+ if (FastInterlockCompareExchangePointer((PVOID*)&m_DllThunkHeap, (VOID*)pNewHeap, (VOID*)0) != 0)
+ {
+ delete pNewHeap;
+ }
+ }
+
+ RETURN m_DllThunkHeap;
+}
+
+void PEImage::IJWFixupData::MarkMethodFixedUp(COUNT_T iFixup, COUNT_T iMethod)
+{
+ LIMITED_METHOD_CONTRACT;
+ // supports only sequential fixup/method
+ _ASSERTE( (iFixup == m_iNextFixup+1 && iMethod ==0) || //first method of the next fixup or
+ (iFixup == m_iNextFixup && iMethod == m_iNextMethod) ); //the method that was next to fixup
+
+ m_iNextFixup = iFixup;
+ m_iNextMethod = iMethod+1;
+}
+
+BOOL PEImage::IJWFixupData::IsMethodFixedUp(COUNT_T iFixup, COUNT_T iMethod)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (iFixup < m_iNextFixup)
+ return TRUE;
+ if (iFixup > m_iNextFixup)
+ return FALSE;
+ if (iMethod < m_iNextMethod)
+ return TRUE;
+
+ return FALSE;
+}
+
+/*static */
+PTR_LoaderHeap PEImage::GetDllThunkHeap(void *pBase)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ return GetIJWData(pBase)->GetThunkHeap();
+}
+
+/* static */
+PEImage::IJWFixupData *PEImage::GetIJWData(void *pBase)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END
+
+ // Take the IJW hash lock
+ CrstHolder hashLockHolder(&s_ijwHashLock);
+
+ // Try to find the data
+ IJWFixupData *pData = (IJWFixupData *)s_ijwFixupDataHash->LookupValue((UPTR) pBase, pBase);
+
+ // No data, must create
+ if ((UPTR)pData == (UPTR)INVALIDENTRY)
+ {
+ pData = new IJWFixupData(pBase);
+ s_ijwFixupDataHash->InsertValue((UPTR) pBase, pData);
+ }
+
+ // Return the new data
+ return (pData);
+}
+
+/* static */
+void PEImage::UnloadIJWModule(void *pBase)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END
+
+ // Take the IJW hash lock
+ CrstHolder hashLockHolder(&s_ijwHashLock);
+
+ // Try to delete the hash entry
+ IJWFixupData *pData = (IJWFixupData *)s_ijwFixupDataHash->DeleteValue((UPTR) pBase, pBase);
+
+ // Now delete the data
+ if ((UPTR)pData != (UPTR)INVALIDENTRY)
+ delete pData;
+
+}
+
+#endif // FEATURE_MIXEDMODE && !CROSSGEN_COMPILE
+
+#ifdef FEATURE_FUSION
+void PEImage::Init(IStream* pIStream, UINT64 uAsmStreamId,
+ DWORD dwModuleId, BOOL resourceFile)
+{
+ CONTRACT_VOID
+ {
+ CONSTRUCTOR_CHECK;
+ PRECONDITION(CheckStartup());
+ STANDARD_VM_CHECK;
+ }
+ CONTRACT_END;
+
+ m_StreamAsmId = uAsmStreamId;
+ m_dwStreamModuleId = dwModuleId;
+ m_fIsIStream = TRUE;
+
+ LOG((LF_LOADER, LL_INFO100, "PEImage: Opening flat stream\n"));
+
+ if (!pIStream)
+ ThrowHR(COR_E_FILELOAD);
+
+ // Just copy bytes.
+
+ PEImageLayoutHolder pFlatLayout(PEImageLayout::CreateFromStream(pIStream, this));
+
+ if (!resourceFile) {
+ if (!pFlatLayout->CheckCORFormat())
+ ThrowFormat(COR_E_BADIMAGEFORMAT);
+
+ if (!CheckLayoutFormat(pFlatLayout))
+ ThrowHR(COR_E_NOTSUPPORTED);
+ }
+
+ pFlatLayout.SuppressRelease();
+ SetLayout(IMAGE_FLAT, pFlatLayout);
+
+ RETURN;
+}
+#endif // FEATURE_FUSION
+
+#endif // #ifndef DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+
+void PEImage::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ // There are codepaths that will enumerate the PEImage without
+ // calling EnumMemoryRegions; ensure that we will still get
+ // these necessary fields enumerated no matter what.
+ m_path.EnumMemoryRegions(flags);
+
+ // We always want this field in mini/triage/heap dumps.
+ m_sModuleFileNameHintUsedByDac.EnumMemoryRegions(CLRDATA_ENUM_MEM_DEFAULT);
+
+
+ EX_TRY
+ {
+ if (HasLoadedLayout() && HasNTHeaders() && HasDirectoryEntry(IMAGE_DIRECTORY_ENTRY_DEBUG))
+ {
+ // Get a pointer to the contents and size of the debug directory and report it
+ COUNT_T cbDebugDir;
+ TADDR taDebugDir = GetLoadedLayout()->GetDirectoryEntryData(IMAGE_DIRECTORY_ENTRY_DEBUG, &cbDebugDir);
+ DacEnumMemoryRegion(taDebugDir, cbDebugDir);
+
+ // Report the memory that each debug directory entry points to
+ UINT cNumEntries = cbDebugDir / sizeof(IMAGE_DEBUG_DIRECTORY);
+ PTR_IMAGE_DEBUG_DIRECTORY pDebugEntry = dac_cast<PTR_IMAGE_DEBUG_DIRECTORY>(taDebugDir);
+ for (UINT iIndex = 0; iIndex < cNumEntries; iIndex++)
+ {
+ TADDR taEntryAddr = GetLoadedLayout()->GetRvaData(pDebugEntry[iIndex].AddressOfRawData);
+ DacEnumMemoryRegion(taEntryAddr, pDebugEntry[iIndex].SizeOfData);
+
+ // Triage dumps must not dump full paths as they may contain PII data.
+ // Thus, we replace debug directory's pdbs full path for with filaname only.
+ if (flags == CLRDATA_ENUM_MEM_TRIAGE &&
+ pDebugEntry[iIndex].Type == IMAGE_DEBUG_TYPE_CODEVIEW)
+ {
+ DWORD CvSignature = *(dac_cast<PTR_DWORD>(taEntryAddr));
+ if(CvSignature == CV_SIGNATURE_RSDS)
+ {
+ CV_INFO_PDB70* pCvInfo = (CV_INFO_PDB70*)DacInstantiateTypeByAddressNoReport(taEntryAddr, sizeof(CV_INFO_PDB70), false);
+
+ if (pCvInfo == NULL || pCvInfo->path == NULL)
+ {
+ continue;
+ }
+ // Because data may be corrupted make sure we null terminate the string.
+ pCvInfo->path[MAX_PATH - 1] = '\0';
+
+ //Find the filename from pdb full path
+ char* fileName = strrchr(pCvInfo->path, '\\');
+ if (fileName != NULL)
+ fileName++;
+ else
+ fileName = pCvInfo->path;
+
+ size_t fileNameLenght = strlen(fileName);
+ size_t fullPathLenght = strlen(pCvInfo->path);
+ memmove(pCvInfo->path, fileName, fileNameLenght);
+
+ // NULL out the rest of the path buffer.
+ for (size_t i = fileNameLenght; i < MAX_PATH - 1; i++)
+ {
+ pCvInfo->path[i] = '\0';
+ }
+
+ DacUpdateMemoryRegion( taEntryAddr + offsetof(CV_INFO_PDB70, path), sizeof(pCvInfo->path), (PBYTE)pCvInfo->path );
+ }
+ }
+ }
+ }
+ }
+ EX_CATCH_RETHROW_ONLY_COR_E_OPERATIONCANCELLED
+
+ DAC_ENUM_DTHIS();
+
+ EMEM_OUT(("MEM: %p PEImage\n", dac_cast<TADDR>(this)));
+
+ // This just gets the image headers into the dump.
+ // This is used, for example, for ngen images to ensure we have the debug directory so we
+ // can find the managed PDBs.
+ // No lock here as the processs should be suspended.
+ if (m_pLayouts[IMAGE_FLAT].IsValid() && m_pLayouts[IMAGE_FLAT]!=NULL)
+ m_pLayouts[IMAGE_FLAT]->EnumMemoryRegions(flags);
+ if (m_pLayouts[IMAGE_MAPPED].IsValid() && m_pLayouts[IMAGE_MAPPED]!=NULL)
+ m_pLayouts[IMAGE_MAPPED]->EnumMemoryRegions(flags);
+ if (m_pLayouts[IMAGE_LOADED].IsValid() && m_pLayouts[IMAGE_LOADED]!=NULL)
+ m_pLayouts[IMAGE_LOADED]->EnumMemoryRegions(flags);
+ if (m_pLayouts[IMAGE_LOADED_FOR_INTROSPECTION].IsValid() && m_pLayouts[IMAGE_LOADED]!=NULL)
+ m_pLayouts[IMAGE_LOADED_FOR_INTROSPECTION]->EnumMemoryRegions(flags);
+}
+
+#endif // #ifdef DACCESS_COMPILE
+
+
+PEImage::PEImage():
+ m_refCount(1),
+ m_bIsTrustedNativeImage(FALSE),
+ m_bIsNativeImageInstall(FALSE),
+ m_bPassiveDomainOnly(FALSE),
+#ifndef FEATURE_CORECLR
+ m_fReportedToUsageLog(FALSE),
+#endif // !FEATURE_CORECLR
+ m_bInHashMap(FALSE),
+#ifdef METADATATRACKER_DATA
+ m_pMDTracker(NULL),
+#endif // METADATATRACKER_DATA
+ m_pMDImport(NULL),
+ m_pNativeMDImport(NULL),
+#ifdef FEATURE_FUSION
+ m_StreamAsmId(0),
+ m_dwStreamModuleId(0),
+ m_fIsIStream(FALSE),
+#endif
+ m_hFile(INVALID_HANDLE_VALUE),
+ m_bOwnHandle(true),
+ m_bSignatureInfoCached(FALSE),
+ m_hrSignatureInfoStatus(E_UNEXPECTED),
+ m_dwSignatureInfo(0),
+#ifdef FEATURE_FUSION
+ m_pILFingerprint(NULL),
+#endif //FEATURE_FUSION
+ m_dwPEKind(0),
+ m_dwMachine(0),
+ m_fCachedKindAndMachine(FALSE)
+#ifdef FEATURE_APTCA
+ , m_fMayBeConditionalAptca(TRUE)
+#endif // FEATURE_APTCA
+#ifdef FEATURE_LAZY_COW_PAGES
+ ,m_bAllocatedLazyCOWPages(FALSE)
+#endif // FEATURE_LAZY_COW_PAGES
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ for (DWORD i=0;i<COUNTOF(m_pLayouts);i++)
+ m_pLayouts[i]=NULL ;
+ m_pLayoutLock=new SimpleRWLock(PREEMPTIVE,LOCK_TYPE_DEFAULT);
+}
+
+PTR_PEImageLayout PEImage::GetLayout(DWORD imageLayoutMask,DWORD flags)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ PTR_PEImageLayout pRetVal;
+
+#ifndef DACCESS_COMPILE
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+ // First attempt to find an existing layout matching imageLayoutMask. If that fails,
+ // and the caller has asked us to create layouts if needed, then try again passing
+ // the create flag to GetLayoutInternal. We need this to be synchronized, but the common
+ // case is that the layout already exists, so use a reader-writer lock.
+ GCX_PREEMP();
+ {
+ SimpleReadLockHolder lock(m_pLayoutLock);
+ pRetVal=GetLayoutInternal(imageLayoutMask,flags&(~LAYOUT_CREATEIFNEEDED));
+ }
+
+ if (!(pRetVal || (flags&LAYOUT_CREATEIFNEEDED)==0))
+ {
+ SimpleWriteLockHolder lock(m_pLayoutLock);
+ pRetVal = GetLayoutInternal(imageLayoutMask,flags);
+ }
+ END_SO_INTOLERANT_CODE;
+
+ return pRetVal;
+
+#else
+ // In DAC builds, we can't create any layouts - we must require that they already exist.
+ // We also don't take any AddRefs or locks in DAC builds - it's inspection-only.
+ pRetVal = GetExistingLayoutInternal(imageLayoutMask);
+ if ((pRetVal==NULL) && (flags & LAYOUT_CREATEIFNEEDED))
+ {
+ _ASSERTE_MSG(false, "DACization error - caller expects PEImage layout to exist and it doesn't");
+ DacError(E_UNEXPECTED);
+ }
+ return pRetVal;
+#endif
+}
+
+#ifndef DACCESS_COMPILE
+
+PTR_PEImageLayout PEImage::GetLayoutInternal(DWORD imageLayoutMask,DWORD flags)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PTR_PEImageLayout pRetVal=GetExistingLayoutInternal(imageLayoutMask);
+
+ if (pRetVal==NULL && (flags&LAYOUT_CREATEIFNEEDED))
+ {
+ _ASSERTE(HasID());
+
+ if (imageLayoutMask&PEImageLayout::LAYOUT_MAPPED)
+ {
+ PEImageLayout * pLoadLayout = NULL;
+
+#ifdef FEATURE_CORECLR
+ if (m_bIsTrustedNativeImage || IsFile())
+ {
+ // For CoreCLR, try to load all files via LoadLibrary first. If LoadLibrary did not work, retry using
+ // regular mapping - but not for native images.
+ pLoadLayout = PEImageLayout::Load(this, TRUE /* bNTSafeLoad */, m_bIsTrustedNativeImage /* bThrowOnError */);
+ }
+#else
+ if (m_bIsTrustedNativeImage)
+ {
+ pLoadLayout = PEImageLayout::Load(this, FALSE);
+ }
+ else if (m_bIsNativeImageInstall)
+ {
+ // When ESB (extended secure boot) is enabled, a native image that is being installed can
+ // only be loaded flat.
+ PEImageLayout* pFlatLayout=PEImageLayout::LoadFlat(GetFileHandle(),this);
+ SetLayout(IMAGE_FLAT,pFlatLayout);
+ pLoadLayout = new ConvertedImageLayout(pFlatLayout);
+ }
+#endif
+
+ if (pLoadLayout != NULL)
+ {
+ SetLayout(IMAGE_MAPPED,pLoadLayout);
+ pLoadLayout->AddRef();
+ SetLayout(IMAGE_LOADED,pLoadLayout);
+ pRetVal=pLoadLayout;
+ }
+ else
+ if (IsFile())
+ {
+ PEImageLayoutHolder pLayout(PEImageLayout::Map(GetFileHandle(),this));
+
+ bool fMarkAnyCpuImageAsLoaded = false;
+ // Avoid mapping another image if we can. We can only do this for IL-ONLY images
+ // since LoadLibrary is needed if we are to actually load code.
+ if (pLayout->HasCorHeader() && pLayout->IsILOnly())
+ {
+#ifdef FEATURE_CORECLR
+ // For CoreCLR, IL only images will always be mapped. We also dont bother doing the conversion of PE header on 64bit,
+ // as done below for the desktop case, as there is no appcompat burden for CoreCLR on 64bit to have that conversion done.
+ fMarkAnyCpuImageAsLoaded = true;
+#else // !FEATURE_CORECLR
+
+#ifdef _WIN64
+ // When attempting to load an assembly using LoadLibrary on x64,
+ // the execution will go via the shell-shim that will try to determine
+ // if the assembly is ILOnly with Pe32 header (i.e. built as anycpu). If it is,
+ // it will convert the in-memory PEheader of the image to be PE32+ (i.e. mark it as 64bit image).
+ //
+ // Since we are trying to avoid mapping twice for ILOnly images by simply memory mapping them,
+ // we should emulate the shell-shim behaviour for 64bit. This will allow inproc-components (e.g. ASP.NET),
+ // which check for Pe32+ header, to continue working as expected.
+ //
+ // If we fail for some reason to change the header, in retail build, we will simply fallback to the double-loading behaviour without
+ // any functional problems.
+ if (pLayout->Has32BitNTHeaders())
+ {
+ fMarkAnyCpuImageAsLoaded = pLayout->ConvertILOnlyPE32ToPE64();
+ }
+ else
+ {
+ // Before assuming that PE32+ file can be loaded, confirm that
+ // it is the expected machine type. This will ensure AMD64 does not load ARM64 or IA64 assemblies (and likewise).
+ // If the machine type does not match, the Loader will fail the load at a later point.
+ if (pLayout->GetMachine() == IMAGE_FILE_MACHINE_NATIVE)
+ {
+ fMarkAnyCpuImageAsLoaded = true; // PE32+ (aka native 64bit) binaries dont require any extra processing.
+ }
+ }
+#else // !_WIN64
+ // Why can we not blindly assume that on 32bit OS, image should always be loaded? This is because it is possible to load
+ // PE32+ image and map it to the 32bit process in WOW64.
+ if (pLayout->Has32BitNTHeaders())
+ fMarkAnyCpuImageAsLoaded = true;
+#endif // _WIN64
+
+#endif // FEATURE_CORECLR
+ }
+
+ pLayout.SuppressRelease();
+
+ SetLayout(IMAGE_MAPPED,pLayout);
+ if (fMarkAnyCpuImageAsLoaded)
+ {
+ pLayout->AddRef();
+ SetLayout(IMAGE_LOADED, pLayout);
+ }
+ pRetVal=pLayout;
+ }
+ else
+ {
+ PEImageLayoutHolder flatPE(GetLayoutInternal(PEImageLayout::LAYOUT_FLAT,LAYOUT_CREATEIFNEEDED));
+ if (!flatPE->CheckFormat())
+ ThrowFormat(COR_E_BADIMAGEFORMAT);
+ pRetVal=PEImageLayout::LoadFromFlat(flatPE);
+ SetLayout(IMAGE_MAPPED,pRetVal);
+ }
+ }
+ else
+ if (imageLayoutMask&PEImageLayout::LAYOUT_FLAT)
+ {
+#ifdef FEATURE_FUSION
+ _ASSERTE(!m_fIsIStream); //images created from streams should always have this one
+#endif
+ pRetVal=PEImageLayout::LoadFlat(GetFileHandle(),this);
+ m_pLayouts[IMAGE_FLAT]=pRetVal;
+ }
+
+ }
+ if (pRetVal)
+ {
+ pRetVal->AddRef();
+ }
+ return pRetVal;
+}
+
+/* static */
+PTR_PEImage PEImage::LoadFlat(const void *flat, COUNT_T size)
+{
+ CONTRACT(PTR_PEImage)
+ {
+ STANDARD_VM_CHECK;
+ }
+ CONTRACT_END;
+
+ PEImageHolder pImage(new PEImage());
+ PTR_PEImageLayout pLayout = PEImageLayout::CreateFlat(flat,size,pImage);
+ _ASSERTE(!pLayout->IsMapped());
+ pImage->SetLayout(IMAGE_FLAT,pLayout);
+ RETURN dac_cast<PTR_PEImage>(pImage.Extract());
+}
+
+#ifndef FEATURE_PAL
+/* static */
+PTR_PEImage PEImage::LoadImage(HMODULE hMod)
+{
+ CONTRACT(PTR_PEImage)
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(hMod!=NULL);
+ POSTCONDITION(RETVAL->HasLoadedLayout());
+ }
+ CONTRACT_END;
+
+ StackSString path;
+ GetPathFromDll(hMod, path);
+ PEImageHolder pImage(PEImage::OpenImage(path,(MDInternalImportFlags)(MDInternalImport_CheckLongPath|MDInternalImport_CheckShortPath)));
+ if (pImage->HasLoadedLayout())
+ RETURN dac_cast<PTR_PEImage>(pImage.Extract());
+
+ SimpleWriteLockHolder lock(pImage->m_pLayoutLock);
+
+ if(pImage->m_pLayouts[IMAGE_LOADED]==NULL)
+ pImage->SetLayout(IMAGE_LOADED,PEImageLayout::CreateFromHMODULE(hMod,pImage,WszGetModuleHandle(NULL)!=hMod));
+
+ if(pImage->m_pLayouts[IMAGE_MAPPED]==NULL)
+ {
+ pImage->m_pLayouts[IMAGE_LOADED]->AddRef();
+ pImage->SetLayout(IMAGE_MAPPED,pImage->m_pLayouts[IMAGE_LOADED]);
+ }
+
+ RETURN dac_cast<PTR_PEImage>(pImage.Extract());
+}
+#endif // !FEATURE_PAL
+
+void PEImage::Load()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (HasLoadedLayout())
+ {
+ _ASSERTE(GetLoadedLayout()->IsMapped()||GetLoadedLayout()->IsILOnly());
+ return;
+ }
+
+ SimpleWriteLockHolder lock(m_pLayoutLock);
+ if(!IsFile())
+ {
+ if (!m_pLayouts[IMAGE_FLAT]->CheckILOnly())
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ if(m_pLayouts[IMAGE_LOADED]==NULL)
+ SetLayout(IMAGE_LOADED,PEImageLayout::LoadFromFlat(m_pLayouts[IMAGE_FLAT]));
+ }
+ else
+ {
+#ifdef FEATURE_CORECLR
+ if(m_pLayouts[IMAGE_LOADED]==NULL)
+ SetLayout(IMAGE_LOADED,PEImageLayout::Load(this,TRUE));
+#else
+
+ //as part of Load() call we may initialize loaded image in DllMain
+ //so we have to leave the lock and be prepared that when PEImageLayout::Load returns
+ //m_pLayouts[IMAGE_LOADED] is set to something else
+ lock.Release();
+
+ FileHandleHolder pProtect=GetProtectingFileHandle(FALSE);
+
+ // if the image is IL-only, try to load it in the safe manner
+
+ // using the Internal function here because we are under the writer lock
+ PEImageLayoutHolder pLayout=GetLayoutInternal(PEImageLayout::LAYOUT_ANY,0);
+ BOOL bPreferSafeLoad=(pLayout && pLayout->IsILOnly());
+
+ // Always use safe load during NGen to avoid running unmanaged code in IJW assemblies
+ if (IsCompilationProcess())
+ bPreferSafeLoad = TRUE;
+
+ PEImageLayoutHolder pLoaded(PEImageLayout::Load(this,bPreferSafeLoad));
+
+ lock.Acquire();
+
+ if(m_pLayouts[IMAGE_LOADED]==NULL)
+ SetLayout(IMAGE_LOADED,pLoaded.Extract());
+
+#endif // FEATURE_CORECLR
+ }
+}
+
+void PEImage::SetLoadedHMODULE(HMODULE hMod)
+{
+ WRAPPER_NO_CONTRACT;
+ SimpleWriteLockHolder lock(m_pLayoutLock);
+ if(m_pLayouts[IMAGE_LOADED])
+ {
+ _ASSERTE(m_pLayouts[IMAGE_LOADED]->GetBase()==hMod);
+ return;
+ }
+ SetLayout(IMAGE_LOADED,PEImageLayout::CreateFromHMODULE(hMod,this,TRUE));
+}
+
+void PEImage::LoadFromMapped()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (HasLoadedLayout())
+ {
+ _ASSERTE(GetLoadedLayout()->IsMapped());
+ return;
+ }
+
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_MAPPED,LAYOUT_CREATEIFNEEDED));
+ SimpleWriteLockHolder lock(m_pLayoutLock);
+ if(m_pLayouts[IMAGE_LOADED]==NULL)
+ SetLayout(IMAGE_LOADED,pLayout.Extract());
+}
+
+void PEImage::LoadForIntrospection()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (HasLoadedIntrospectionLayout())
+ return;
+
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ SimpleWriteLockHolder lock(m_pLayoutLock);
+ if(m_pLayouts[IMAGE_LOADED_FOR_INTROSPECTION]==NULL)
+ SetLayout(IMAGE_LOADED_FOR_INTROSPECTION,pLayout.Extract());
+}
+
+void PEImage::LoadNoFile()
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(!IsFile());
+ }
+ CONTRACTL_END;
+ if (HasLoadedLayout())
+ return;
+
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,0));
+ if (!pLayout->CheckILOnly())
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ SimpleWriteLockHolder lock(m_pLayoutLock);
+ if(m_pLayouts[IMAGE_LOADED]==NULL)
+ SetLayout(IMAGE_LOADED,pLayout.Extract());
+}
+
+
+void PEImage::LoadNoMetaData(BOOL bIntrospection)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (bIntrospection)
+ {
+ if (HasLoadedIntrospectionLayout())
+ return;
+ }
+ else
+ if (HasLoadedLayout())
+ return;
+
+ SimpleWriteLockHolder lock(m_pLayoutLock);
+ int layoutKind=bIntrospection?IMAGE_LOADED_FOR_INTROSPECTION:IMAGE_LOADED;
+ if (m_pLayouts[layoutKind]!=NULL)
+ return;
+ if (m_pLayouts[IMAGE_FLAT]!=NULL)
+ {
+ m_pLayouts[IMAGE_FLAT]->AddRef();
+ SetLayout(layoutKind,m_pLayouts[IMAGE_FLAT]);
+ }
+ else
+ {
+ _ASSERTE(!m_path.IsEmpty());
+ SetLayout(layoutKind,PEImageLayout::LoadFlat(GetFileHandle(),this));
+ }
+}
+
+
+#endif //DACCESS_COMPILE
+
+//-------------------------------------------------------------------------------
+// Make best-case effort to obtain an image name for use in an error message.
+//
+// This routine must expect to be called before the this object is fully loaded.
+// It can return an empty if the name isn't available or the object isn't initialized
+// enough to get a name, but it mustn't crash.
+//-------------------------------------------------------------------------------
+LPCWSTR PEImage::GetPathForErrorMessages()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ SUPPORTS_DAC_HOST_ONLY;
+ }
+ CONTRACTL_END
+
+ return m_path;
+}
+
+#ifdef FEATURE_FUSION
+PEKIND PEImage::GetFusionProcessorArchitecture()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ DWORD dwPEKind, dwMachine;
+ GetPEKindAndMachine(&dwPEKind, &dwMachine);
+
+ DWORD dwAssemblyFlags = 0;
+
+ IfFailThrow(m_pMDImport->GetAssemblyProps(TokenFromRid(1, mdtAssembly),
+ NULL, NULL, NULL,
+ NULL, NULL, &dwAssemblyFlags));
+
+ PEKIND retval;
+ if (FAILED(TranslatePEToArchitectureType(
+ (CorPEKind)dwPEKind,
+ dwMachine,
+ dwAssemblyFlags,
+ &retval)))
+ {
+ return peInvalid;
+ }
+ return retval;
+}
+#endif //FEATURE_FUSION
+
+HANDLE PEImage::GetFileHandle()
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(m_pLayoutLock->IsWriterLock());
+ }
+ CONTRACTL_END;
+
+ if (m_hFile!=INVALID_HANDLE_VALUE)
+ return m_hFile;
+
+ {
+ ErrorModeHolder mode(SEM_NOOPENFILEERRORBOX|SEM_FAILCRITICALERRORS);
+ m_hFile=WszCreateFile((LPCWSTR) m_path,
+ GENERIC_READ,
+ FILE_SHARE_READ|FILE_SHARE_DELETE,
+ NULL,
+ OPEN_EXISTING,
+ FILE_ATTRIBUTE_NORMAL,
+ NULL);
+ }
+
+ if (m_hFile == INVALID_HANDLE_VALUE)
+ ThrowLastError();
+
+ return m_hFile;
+}
+
+// Like GetFileHandle, but can be called without the PEImage being locked for writing.
+// Only intend to be called by NGen.
+HANDLE PEImage::GetFileHandleLocking()
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+
+ if (m_hFile!=INVALID_HANDLE_VALUE)
+ return m_hFile;
+
+ SimpleWriteLockHolder lock(m_pLayoutLock);
+ return GetFileHandle();
+}
+
+void PEImage::SetFileHandle(HANDLE hFile)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+
+ SimpleWriteLockHolder lock(m_pLayoutLock);
+ if (m_hFile == INVALID_HANDLE_VALUE)
+ {
+ m_hFile = hFile;
+ m_bOwnHandle = false;
+ }
+}
+
+HRESULT PEImage::TryOpenFile()
+{
+ STANDARD_VM_CONTRACT;
+
+ SimpleWriteLockHolder lock(m_pLayoutLock);
+
+ if (m_hFile!=INVALID_HANDLE_VALUE)
+ return S_OK;
+ {
+ ErrorModeHolder mode(SEM_NOOPENFILEERRORBOX|SEM_FAILCRITICALERRORS);
+ m_hFile=WszCreateFile((LPCWSTR) m_path,
+ GENERIC_READ,
+ FILE_SHARE_READ|FILE_SHARE_DELETE,
+ NULL,
+ OPEN_EXISTING,
+ FILE_ATTRIBUTE_NORMAL,
+ NULL);
+ }
+ if (m_hFile != INVALID_HANDLE_VALUE)
+ return S_OK;
+ if (GetLastError())
+ return HRESULT_FROM_WIN32(GetLastError());
+ return HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND);
+}
+
+
+
+HANDLE PEImage::GetProtectingFileHandle(BOOL bProtectIfNotOpenedYet)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (m_hFile==INVALID_HANDLE_VALUE && !bProtectIfNotOpenedYet)
+ return INVALID_HANDLE_VALUE;
+
+ HANDLE hRet=INVALID_HANDLE_VALUE;
+ {
+ ErrorModeHolder mode(SEM_NOOPENFILEERRORBOX|SEM_FAILCRITICALERRORS);
+ hRet=WszCreateFile((LPCWSTR) m_path,
+ GENERIC_READ,
+ FILE_SHARE_READ,
+ NULL,
+ OPEN_EXISTING,
+ FILE_ATTRIBUTE_NORMAL,
+ NULL);
+ }
+ if (hRet == INVALID_HANDLE_VALUE)
+ ThrowLastError();
+ if (m_hFile!=INVALID_HANDLE_VALUE && !CompareFiles(m_hFile,hRet))
+ ThrowHR(FUSION_E_REF_DEF_MISMATCH);
+
+ return hRet;
+}
+
+BOOL PEImage::IsPtrInImage(PTR_CVOID data)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ for (int i = 0; i < IMAGE_COUNT; i++)
+ {
+ if (m_pLayouts[i] != NULL)
+ {
+ if (m_pLayouts[i]->PointerInPE(data))
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+#ifdef FEATURE_FUSION
+#ifndef DACCESS_COMPILE
+HRESULT PEImage::GetILFingerprint(IILFingerprint **ppFingerprint)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+ *ppFingerprint = NULL;
+ if (m_pILFingerprint == NULL)
+ {
+ HRESULT hr = S_OK;
+ NewHolder<PEFingerprint> pNewFingerprint;
+ EX_TRY
+ {
+ pNewFingerprint = PEFingerprint::CreatePEFingerprint(this);
+ hr = S_OK;
+ }
+ EX_CATCH_HRESULT(hr);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ if (InterlockedCompareExchangeT(&m_pILFingerprint, (PEFingerprint*)(pNewFingerprint.GetValue()), NULL) == NULL)
+ {
+ pNewFingerprint.SuppressRelease(); // Won the race
+ }
+ }
+
+ *ppFingerprint = m_pILFingerprint;
+ (*ppFingerprint)->AddRef();
+ return S_OK;
+}
+
+// NOTE: Performance critical codepaths should cache the result of this function.
+HRESULT RuntimeGetILFingerprintForPath(LPCWSTR path, IILFingerprint **ppFingerprint)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ HCORMODULE hCorModule;
+ IfFailGo(RuntimeOpenImageInternal(path, &hCorModule, NULL, MDInternalImport_NoCache));
+ {
+ ReleaseHolder<PEImage> peImage((PEImage*)hCorModule);
+ IfFailGo(peImage->GetILFingerprint(ppFingerprint));
+ }
+ hr = S_OK;
+ ErrExit:
+ return hr;
+}
+
+#endif //!DACCESS_COMPILE
+#endif //FEATURE_FUSION
+
+#if defined(FEATURE_HOSTED_BINDER) && !defined(DACCESS_COMPILE)
+PEImage * PEImage::OpenImage(
+ ICLRPrivResource * pIResource,
+ MDInternalImportFlags flags)
+{
+ STANDARD_VM_CONTRACT;
+ HRESULT hr = S_OK;
+
+ PEImageHolder pPEImage;
+
+
+ IID iidResource;
+ IfFailThrow(pIResource->GetResourceType(&iidResource));
+
+ if (iidResource == __uuidof(ICLRPrivResourcePath))
+ {
+ ReleaseHolder<ICLRPrivResourcePath> pIResourcePath;
+ IfFailThrow(pIResource->QueryInterface(__uuidof(ICLRPrivResourcePath), (LPVOID*)&pIResourcePath));
+ WCHAR wzPath[_MAX_PATH];
+ DWORD cchPath = NumItems(wzPath);
+ IfFailThrow(pIResourcePath->GetPath(cchPath, &cchPath, wzPath));
+ pPEImage = PEImage::OpenImage(wzPath, flags);
+ }
+#ifndef FEATURE_PAL
+ else if (iidResource ==__uuidof(ICLRPrivResourceHMODULE))
+ {
+ ReleaseHolder<ICLRPrivResourceHMODULE> pIResourceHMODULE;
+ _ASSERTE(flags == MDInternalImport_Default);
+ IfFailThrow(pIResource->QueryInterface(__uuidof(ICLRPrivResourceHMODULE), (LPVOID*)&pIResourceHMODULE));
+ HMODULE hMod;
+ IfFailThrow(pIResourceHMODULE->GetHMODULE(&hMod));
+ pPEImage = PEImage::LoadImage(hMod);
+ }
+#endif // !FEATURE_PAL
+#ifdef FEATURE_FUSION
+ else if (iidResource == __uuidof(ICLRPrivResourceStream))
+ {
+ ReleaseHolder<ICLRPrivResourceStream> pIResourceStream;
+ IfFailThrow(pIResource->QueryInterface(__uuidof(ICLRPrivResourceStream), (LPVOID*)&pIResourceStream));
+ ReleaseHolder<IStream> pStream;
+ IfFailThrow(pIResourceStream->GetStream(__uuidof(IStream), (LPVOID*)&pStream));
+ UINT64 i64AssemblyId = static_cast<UINT64>(reinterpret_cast<UINT_PTR>(reinterpret_cast<ICLRPrivAssembly*>(pIResource)));
+ DWORD dwModuleId = static_cast<DWORD>(i64AssemblyId);
+ pPEImage = PEImage::OpenImage(pStream, i64AssemblyId, FALSE, dwModuleId, flags);
+ }
+#endif
+ else
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+
+ return pPEImage.Extract();
+}
+#endif
+
+
diff --git a/src/vm/peimage.h b/src/vm/peimage.h
new file mode 100644
index 0000000000..8fb4efe850
--- /dev/null
+++ b/src/vm/peimage.h
@@ -0,0 +1,504 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// --------------------------------------------------------------------------------
+// PEImage.h
+//
+
+// --------------------------------------------------------------------------------
+
+
+#ifndef PEIMAGE_H_
+#define PEIMAGE_H_
+
+// --------------------------------------------------------------------------------
+// Required headers
+// --------------------------------------------------------------------------------
+
+#include "clrtypes.h"
+#include "peimagelayout.h"
+#include "sstring.h"
+#include "holder.h"
+#include "pefingerprint.h"
+
+class SimpleRWLock;
+// --------------------------------------------------------------------------------
+// Forward declarations
+// --------------------------------------------------------------------------------
+
+class Crst;
+class Thread;
+
+Thread* GetThreadNULLOk();
+
+// --------------------------------------------------------------------------------
+// PEImage is a PE file loaded by our "simulated LoadLibrary" mechanism. A PEImage
+// can be loaded either FLAT (same layout as on disk) or MAPPED (PE sections
+// mapped into virtual addresses.)
+//
+// The MAPPED format is currently limited to "IL only" images - this can be checked
+// for via PEDecoder::IsILOnlyImage.
+//
+// NOTE: PEImage will NEVER call LoadLibrary.
+// --------------------------------------------------------------------------------
+
+
+
+#define CV_SIGNATURE_RSDS 0x53445352
+
+// CodeView RSDS debug information -> PDB 7.00
+struct CV_INFO_PDB70
+{
+ DWORD magic;
+ GUID signature; // unique identifier
+ DWORD age; // an always-incrementing value
+ char path[MAX_PATH]; // zero terminated string with the name of the PDB file
+};
+
+typedef DPTR(class PEImage) PTR_PEImage;
+
+class PEImage
+{
+ friend class PEModule;
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ friend class CCLRDebugManager;
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+public:
+ // ------------------------------------------------------------
+ // Public constants
+ // ------------------------------------------------------------
+
+ enum
+ {
+ LAYOUT_CREATEIFNEEDED=1
+ };
+ PTR_PEImageLayout GetLayout(DWORD imageLayoutMask,DWORD flags); //with ref
+ PTR_PEImageLayout GetLoadedLayout(); //no ref
+ PTR_PEImageLayout GetLoadedIntrospectionLayout(); //no ref, introspection only
+ BOOL IsOpened();
+ BOOL HasLoadedLayout();
+ BOOL HasLoadedIntrospectionLayout();
+
+
+public:
+ // ------------------------------------------------------------
+ // Public API
+ // ------------------------------------------------------------
+
+ static void Startup();
+
+ // Normal constructed PEImages do NOT share images between calls and
+ // cannot be accessed by Get methods.
+ //
+ // DO NOT USE these unless you want a private copy-on-write mapping of
+ // the file.
+
+
+
+public:
+ ~PEImage();
+ PEImage();
+
+#ifndef DACCESS_COMPILE
+ static PTR_PEImage LoadFlat(
+ const void *flat,
+ COUNT_T size);
+#ifndef FEATURE_PAL
+ static PTR_PEImage LoadImage(
+ HMODULE hMod);
+#endif // !FEATURE_PAL
+ static PTR_PEImage OpenImage(
+ LPCWSTR pPath,
+ MDInternalImportFlags flags = MDInternalImport_Default);
+
+#ifdef FEATURE_FUSION
+ static PTR_PEImage OpenImage(
+ IStream *pIStream,
+ UINT64 uStreamAsmId,
+ DWORD dwModuleId,
+ BOOL resourceFile,
+ MDInternalImportFlags flags = MDInternalImport_Default);
+#endif
+
+ // clones the image with new flags (this is pretty much about cached / noncached difference)
+ void Clone(MDInternalImportFlags flags, PTR_PEImage* ppImage)
+ {
+ if (GetPath().IsEmpty())
+ {
+ AddRef();
+ *ppImage = this;
+ }
+ else
+ *ppImage = PEImage::OpenImage(GetPath(), flags);
+
+ };
+
+#ifdef FEATURE_HOSTED_BINDER
+ // pUnkResource must be one of the ICLRPrivResource* interfaces defined in CLRPrivBinding.IDL.
+ // pUnkResource will be queried for each of these to find a match and
+ static PEImage * OpenImage(
+ ICLRPrivResource * pIResource,
+ MDInternalImportFlags flags = MDInternalImport_Default);
+#endif
+
+ static PTR_PEImage FindById(UINT64 uStreamAsmId, DWORD dwModuleId);
+ static PTR_PEImage FindByPath(LPCWSTR pPath);
+ static PTR_PEImage FindByShortPath(LPCWSTR pPath);
+ static PTR_PEImage FindByLongPath(LPCWSTR pPath);
+ void AddToHashMap();
+
+ void Load();
+ void SetLoadedHMODULE(HMODULE hMod);
+ void LoadNoMetaData(BOOL bIntrospection);
+ void LoadNoFile();
+ void LoadFromMapped();
+ void LoadForIntrospection();
+
+ void AllocateLazyCOWPages();
+#endif
+
+ BOOL HasID();
+ ULONG GetIDHash();
+
+ PTR_CVOID GetStrongNameSignature(COUNT_T *pSize = NULL);
+
+
+ // Refcount above images.
+ ULONG AddRef();
+ ULONG Release();
+
+ // Accessors
+ const SString &GetPath();
+ BOOL IsFile();
+ HANDLE GetFileHandle();
+ HANDLE GetFileHandleLocking();
+ void SetFileHandle(HANDLE hFile);
+ HRESULT TryOpenFile();
+
+ HANDLE GetProtectingFileHandle(BOOL bProtectIfNotOpenedYet);
+
+ LPCWSTR GetPathForErrorMessages();
+
+ // Equality
+ BOOL Equals(PEImage *pImage);
+ static ULONG HashStreamIds(UINT64 id1, DWORD id2);
+
+ // Hashing utilities. (These require a flat version of the file, and
+ // will open one if necessary.)
+
+#ifndef DACCESS_COMPILE
+ void GetImageBits(DWORD layout, SBuffer &result);
+#endif
+
+ void ComputeHash(ALG_ID algorithm, SBuffer &result);
+ CHECK CheckHash(ALG_ID algorithm, const void *pbHash, COUNT_T cbHash);
+
+ void GetMVID(GUID *pMvid);
+ const BOOL HasV1Metadata();
+ IMDInternalImport* GetMDImport();
+ BOOL MDImportLoaded();
+ IMDInternalImport* GetNativeMDImport(BOOL loadAllowed = TRUE);
+
+ BOOL HasSecurityDirectory();
+ BOOL HasContents() ;
+ BOOL HasNativeHeader() ;
+ BOOL IsPtrInImage(PTR_CVOID data);
+ CHECK CheckFormat();
+
+ // Check utilites
+ CHECK CheckILFormat();
+#ifdef FEATURE_PREJIT
+ CHECK CheckNativeFormat();
+#endif // FEATURE_PREJIT
+ static CHECK CheckCanonicalFullPath(const SString &path);
+ static CHECK CheckStartup();
+ PTR_CVOID GetMetadata(COUNT_T *pSize = NULL);
+ void GetHashedStrongNameSignature(SBuffer &result);
+
+#ifndef FEATURE_PAL
+ static void GetPathFromDll(HINSTANCE hMod, SString &result);
+#endif // !FEATURE_PAL
+ static LocaleID GetFileSystemLocale();
+ static BOOL PathEquals(const SString &p1, const SString &p2);
+ BOOL IsTrustedNativeImage(){LIMITED_METHOD_CONTRACT; return m_bIsTrustedNativeImage;};
+ void SetIsTrustedNativeImage(){LIMITED_METHOD_CONTRACT; m_bIsTrustedNativeImage=TRUE;};
+ BOOL IsNativeImageInstall(){LIMITED_METHOD_CONTRACT; return m_bIsNativeImageInstall;}
+ void SetIsNativeImageInstall(){LIMITED_METHOD_CONTRACT; m_bIsNativeImageInstall=TRUE;};
+
+ void SetModuleFileNameHintForDAC();
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+ const SString &GetModuleFileNameHintForDAC();
+#endif
+
+ const BOOL HasNTHeaders();
+ const BOOL HasCorHeader();
+ const BOOL HasReadyToRunHeader();
+ void SetPassiveDomainOnly();
+ BOOL PassiveDomainOnly();
+ BOOL IsReferenceAssembly();
+#ifdef FEATURE_PREJIT
+ const BOOL GetNativeILHasSecurityDirectory();
+ const BOOL IsNativeILILOnly();
+ const BOOL IsNativeILDll();
+ void GetNativeILPEKindAndMachine(DWORD* pdwKind, DWORD* pdwMachine);
+ PTR_CVOID GetNativeManifestMetadata(COUNT_T *pSize = NULL);
+#endif
+ const BOOL HasDirectoryEntry(int entry);
+ const mdToken GetEntryPointToken();
+ const DWORD GetCorHeaderFlags();
+ const BOOL IsILOnly();
+ const BOOL IsDll();
+ const WORD GetSubsystem();
+ BOOL IsFileLocked();
+ const BOOL HasStrongNameSignature();
+#ifndef DACCESS_COMPILE
+ const HRESULT VerifyStrongName(DWORD* verifyOutputFlags);
+#endif
+
+ BOOL IsStrongNameSigned();
+ BOOL IsIbcOptimized();
+ BOOL Has32BitNTHeaders();
+
+ void VerifyIsAssembly();
+ void VerifyIsNIAssembly();
+
+#ifndef FEATURE_CORECLR
+ BOOL IsReportedToUsageLog();
+ void SetReportedToUsageLog();
+#ifndef DACCESS_COMPILE
+ HRESULT GetILFingerprint(IILFingerprint **ppFingerprint);
+#endif //!DACCESS_COMPILE
+#endif //!FEATURE_CORECLR
+
+ static void GetAll(SArray<PEImage*> &images);
+
+private:
+#ifndef DACCESS_COMPILE
+ // Get or create the layout corresponding to the mask, with an AddRef
+ PTR_PEImageLayout GetLayoutInternal(DWORD imageLayoutMask, DWORD flags);
+#endif
+ // Get an existing layout corresponding to the mask, no AddRef
+ PTR_PEImageLayout GetExistingLayoutInternal(DWORD imageLayoutMask);
+
+ void OpenMDImport();
+ void OpenNativeMDImport();
+ // ------------------------------------------------------------
+ // Private routines
+ // ------------------------------------------------------------
+
+ void Init(LPCWSTR pPath);
+ void Init(IStream* pStream, UINT64 uStreamAsmId,
+ DWORD dwModuleId, BOOL resourceFile);
+
+ void VerifyIsILOrNIAssembly(BOOL fIL);
+
+ struct PEImageLocator
+ {
+#ifdef FEATURE_FUSION
+ BOOL m_fIsIStream;
+ DWORD m_dwStreamModuleId;
+ UINT64 m_StreamAsmId;
+#endif
+
+ LPCWSTR m_pPath;
+
+#ifdef FEATURE_FUSION
+ PEImageLocator(LPCWSTR pPath)
+ : m_fIsIStream(FALSE), m_pPath(pPath)
+ {
+ }
+
+ PEImageLocator(UINT64 uStreamAsmId, DWORD dwModuleId)
+ : m_fIsIStream(TRUE), m_dwStreamModuleId(dwModuleId), m_StreamAsmId(uStreamAsmId)
+ {
+ }
+
+ PEImageLocator(PEImage * pImage)
+ : m_fIsIStream(pImage->m_fIsIStream),
+ m_dwStreamModuleId(pImage->m_dwStreamModuleId),
+ m_StreamAsmId(pImage->m_StreamAsmId),
+ m_pPath(pImage->m_path.GetUnicode())
+ {
+ }
+#else // FEATURE_FUSION
+ PEImageLocator(LPCWSTR pPath)
+ : m_pPath(pPath)
+ {
+ }
+
+ PEImageLocator(PEImage * pImage)
+ : m_pPath(pImage->m_path.GetUnicode())
+ {
+ }
+#endif // FEATURE_FUSION
+ };
+
+ static BOOL CompareImage(UPTR image1, UPTR image2);
+#ifdef FEATURE_MIXEDMODE
+ static BOOL CompareIJWDataBase(UPTR base, UPTR mapping);
+#endif // FEATURE_MIXEDMODE
+
+ void DECLSPEC_NORETURN ThrowFormat(HRESULT hr);
+
+ static CHECK CheckLayoutFormat(PEDecoder *pe);
+
+ // ------------------------------------------------------------
+ // Instance members
+ // ------------------------------------------------------------
+
+ SString m_path;
+ LONG m_refCount;
+
+ // This variable will have the data of module name.
+ // It is only used by DAC to remap fusion loaded modules back to
+ // disk IL. This really is a workaround. The real fix is for fusion loader
+ // hook (public API on hosting) to take an additional file name hint.
+ // We are piggy backing on the fact that module name is the same as file name!!!
+ //
+ SString m_sModuleFileNameHintUsedByDac; // This is only used by DAC
+private:
+ BOOL m_bIsTrustedNativeImage;
+ BOOL m_bIsNativeImageInstall;
+ BOOL m_bPassiveDomainOnly;
+#ifndef FEATURE_CORECLR
+ BOOL m_fReportedToUsageLog;
+#endif // !FEATURE_CORECLR
+#ifdef FEATURE_LAZY_COW_PAGES
+ BOOL m_bAllocatedLazyCOWPages;
+#endif // FEATURE_LAZY_COW_PAGES
+
+protected:
+
+ enum
+ {
+ IMAGE_FLAT=0,
+ IMAGE_MAPPED=1,
+ IMAGE_LOADED=2,
+ IMAGE_LOADED_FOR_INTROSPECTION=3,
+ IMAGE_COUNT=4
+ };
+
+ SimpleRWLock *m_pLayoutLock;
+ PTR_PEImageLayout m_pLayouts[IMAGE_COUNT] ;
+ BOOL m_bInHashMap;
+#ifndef DACCESS_COMPILE
+ void SetLayout(DWORD dwLayout, PTR_PEImageLayout pLayout);
+#endif // DACCESS_COMPILE
+
+
+#ifdef METADATATRACKER_DATA
+ class MetaDataTracker *m_pMDTracker;
+#endif // METADATATRACKER_DATA
+
+ IMDInternalImport* m_pMDImport;
+ IMDInternalImport* m_pNativeMDImport;
+
+#ifdef FEATURE_FUSION
+ UINT64 m_StreamAsmId;
+ DWORD m_dwStreamModuleId;
+ BOOL m_fIsIStream;
+#endif
+
+private:
+
+
+ // ------------------------------------------------------------
+ // Static members
+ // ------------------------------------------------------------
+
+ static CrstStatic s_hashLock;
+
+ static PtrHashMap *s_Images;
+
+ HANDLE m_hFile;
+ bool m_bOwnHandle;
+
+ BOOL m_bSignatureInfoCached;
+ HRESULT m_hrSignatureInfoStatus;
+ DWORD m_dwSignatureInfo;
+#ifdef FEATURE_MIXEDMODE
+ //@TODO:workaround: Remove this when we have one PEImage per mapped image,
+ //@TODO:workaround: and move the lock there
+ // This is for IJW thunk initialization, as it is no longer guaranteed
+ // that the initialization will occur under the loader lock.
+ static CrstStatic s_ijwHashLock;
+ static PtrHashMap *s_ijwFixupDataHash;
+
+public:
+ class IJWFixupData
+ {
+ private:
+ Crst m_lock;
+ void *m_base;
+ DWORD m_flags;
+ PTR_LoaderHeap m_DllThunkHeap;
+
+ // the fixup for the next iteration in FixupVTables
+ // we use it to make sure that we do not try to fix up the same entry twice
+ // if there was a pass that was aborted in the middle
+ COUNT_T m_iNextFixup;
+ COUNT_T m_iNextMethod;
+
+ enum {
+ e_FIXED_UP = 0x1
+ };
+
+ public:
+ IJWFixupData(void *pBase);
+ ~IJWFixupData();
+ void *GetBase() { LIMITED_METHOD_CONTRACT; return m_base; }
+ Crst *GetLock() { LIMITED_METHOD_CONTRACT; return &m_lock; }
+ BOOL IsFixedUp() { LIMITED_METHOD_CONTRACT; return m_flags & e_FIXED_UP; }
+ void SetIsFixedUp() { LIMITED_METHOD_CONTRACT; m_flags |= e_FIXED_UP; }
+ PTR_LoaderHeap GetThunkHeap();
+ void MarkMethodFixedUp(COUNT_T iFixup, COUNT_T iMethod);
+ BOOL IsMethodFixedUp(COUNT_T iFixup, COUNT_T iMethod);
+ };
+
+ static IJWFixupData *GetIJWData(void *pBase);
+ static PTR_LoaderHeap GetDllThunkHeap(void *pBase);
+ static void UnloadIJWModule(void *pBase);
+#endif //FEATURE_MIXEDMODE
+private:
+ DWORD m_dwPEKind;
+ DWORD m_dwMachine;
+ BOOL m_fCachedKindAndMachine;
+
+#ifdef FEATURE_APTCA
+ BOOL m_fMayBeConditionalAptca;
+#endif // FEATURE_APTCA
+
+#ifdef FEATURE_FUSION
+ PEFingerprint *m_pILFingerprint; // has to be the real type (as opposed to an interface) so we can delete it
+#endif // FEATURE_FUSION
+
+public:
+ void CachePEKindAndMachine();
+ void GetPEKindAndMachine(DWORD* pdwKind, DWORD* pdwMachine);
+#ifdef FEATURE_FUSION
+ PEKIND GetFusionProcessorArchitecture();
+#endif
+
+#ifdef FEATURE_APTCA
+ inline BOOL MayBeConditionalAptca();
+ inline void SetIsNotConditionalAptca();
+#endif // FEATURE_APTCA
+};
+
+FORCEINLINE void PEImageRelease(PEImage *i)
+{
+ WRAPPER_NO_CONTRACT;
+ i->Release();
+}
+
+typedef Wrapper<PEImage *, DoNothing, PEImageRelease> PEImageHolder;
+
+// ================================================================================
+// Inline definitions
+// ================================================================================
+
+#include "peimage.inl"
+
+#endif // PEIMAGE_H_
diff --git a/src/vm/peimage.inl b/src/vm/peimage.inl
new file mode 100644
index 0000000000..3014c96a1c
--- /dev/null
+++ b/src/vm/peimage.inl
@@ -0,0 +1,952 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// --------------------------------------------------------------------------------
+// PEImage.inl
+//
+
+// --------------------------------------------------------------------------------
+
+#ifndef PEIMAGE_INL_
+#define PEIMAGE_INL_
+
+#include "peimage.h"
+#include "../dlls/mscorrc/resource.h"
+
+inline ULONG PEImage::AddRef()
+{
+ CONTRACT(ULONG)
+ {
+ PRECONDITION(m_refCount>0 && m_refCount < COUNT_T_MAX);
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACT_END;
+
+ RETURN (static_cast<ULONG>(FastInterlockIncrement(&m_refCount)));
+}
+
+inline const SString &PEImage::GetPath()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ return m_path;
+}
+
+inline void PEImage::SetModuleFileNameHintForDAC()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+ // Grab module name only for triage dumps where full paths are excluded
+ // because may contain PII data.
+ // m_sModuleFileNameHintUsedByDac will just point to module name starting character.
+ const WCHAR* pStartPath = m_path.GetUnicode();
+ COUNT_T nChars = m_path.GetCount();
+ if (pStartPath != NULL && nChars > 0 && nChars <= MAX_PATH)
+ {
+ const WCHAR* pChar = pStartPath + nChars;
+ nChars = 0;
+ while ((pChar >= pStartPath) && (*pChar != L'\\'))
+ {
+ pChar--;
+ nChars++;
+ }
+ pChar++;
+ m_sModuleFileNameHintUsedByDac.SetPreallocated(pChar, nChars);
+ }
+}
+
+#ifdef DACCESS_COMPILE
+inline const SString &PEImage::GetModuleFileNameHintForDAC()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_sModuleFileNameHintUsedByDac;
+}
+#endif
+
+
+
+inline BOOL PEImage::IsFile()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return !m_path.IsEmpty();
+}
+
+#ifndef DACCESS_COMPILE
+inline void PEImage::SetLayout(DWORD dwLayout, PEImageLayout* pLayout)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(dwLayout<IMAGE_COUNT);
+ _ASSERTE(m_pLayouts[dwLayout]==NULL);
+ FastInterlockExchangePointer((m_pLayouts+dwLayout),pLayout);
+}
+#endif // DACCESS_COMPILE
+inline PTR_PEImageLayout PEImage::GetLoadedLayout()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(m_pLayouts[IMAGE_LOADED]!=NULL);
+ return m_pLayouts[IMAGE_LOADED]; //no addref
+}
+
+inline PTR_PEImageLayout PEImage::GetLoadedIntrospectionLayout()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(m_pLayouts[IMAGE_LOADED_FOR_INTROSPECTION]!=NULL);
+ return m_pLayouts[IMAGE_LOADED_FOR_INTROSPECTION]; //no addref
+}
+
+
+//
+// GetExistingLayout - get an layout corresponding to the specified mask, or null if none.
+// Does not take any locks or call AddRef.
+//
+// Arguments:
+// imageLayoutMask - bits from PEImageLayout specifying which layouts the caller would be
+// interested in getting
+//
+// Return value:
+// a PEImageLayout of a type matching one of the bits specified in the mask, or NULL if
+// none exists yet. Does not call AddRef on the returned value.
+//
+inline PTR_PEImageLayout PEImage::GetExistingLayoutInternal(DWORD imageLayoutMask)
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ PTR_PEImageLayout pRetVal = NULL;
+
+ if (imageLayoutMask&PEImageLayout::LAYOUT_LOADED)
+ pRetVal=m_pLayouts[IMAGE_LOADED];
+ if (pRetVal==NULL && (imageLayoutMask & PEImageLayout::LAYOUT_LOADED_FOR_INTROSPECTION))
+ pRetVal=m_pLayouts[IMAGE_LOADED_FOR_INTROSPECTION];
+ if (pRetVal==NULL && (imageLayoutMask & PEImageLayout::LAYOUT_MAPPED))
+ pRetVal=m_pLayouts[IMAGE_MAPPED];
+ if (pRetVal==NULL && (imageLayoutMask & PEImageLayout::LAYOUT_FLAT))
+ pRetVal=m_pLayouts[IMAGE_FLAT];
+
+ return pRetVal;
+}
+
+
+inline BOOL PEImage::HasLoadedLayout()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_pLayouts[IMAGE_LOADED]!=NULL;
+}
+
+inline BOOL PEImage::IsOpened()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pLayouts[IMAGE_LOADED]!=NULL ||m_pLayouts[IMAGE_MAPPED]!=NULL || m_pLayouts[IMAGE_FLAT] !=NULL || m_pLayouts[IMAGE_LOADED_FOR_INTROSPECTION]!=NULL;
+}
+
+
+inline BOOL PEImage::HasLoadedIntrospectionLayout() //introspection only!!!
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pLayouts[IMAGE_LOADED_FOR_INTROSPECTION]!=NULL;
+}
+
+
+#ifdef FEATURE_PREJIT
+inline CHECK PEImage::CheckNativeFormat()
+{
+ WRAPPER_NO_CONTRACT;
+ if (HasLoadedLayout())
+ CHECK(GetLoadedLayout()->CheckNativeFormat());
+ else
+ {
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ CHECK(pLayout->CheckNativeFormat());
+ }
+ CHECK_OK;
+};
+#endif // FEATURE_PREJIT
+
+inline BOOL PEImage::IsReferenceAssembly()
+{
+ CONTRACTL
+ {
+ PRECONDITION(HasCorHeader());
+ }
+ CONTRACTL_END;
+
+ IMDInternalImport* mdImport = this->GetMDImport();
+ HRESULT hr = mdImport->GetCustomAttributeByName(TokenFromRid(1, mdtAssembly),
+ g_ReferenceAssemblyAttribute,
+ NULL,
+ NULL);
+ IfFailThrow(hr);
+ if (hr == S_OK) {
+ return TRUE;
+ }
+ _ASSERTE(hr == S_FALSE);
+ return FALSE;
+}
+
+
+inline const BOOL PEImage::HasNTHeaders()
+{
+ WRAPPER_NO_CONTRACT;
+ if (HasLoadedLayout())
+ return GetLoadedLayout()->HasNTHeaders();
+ else
+ {
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ return pLayout->HasNTHeaders();
+ }
+}
+
+inline const BOOL PEImage::HasCorHeader()
+{
+ WRAPPER_NO_CONTRACT;
+ if (HasLoadedLayout())
+ return GetLoadedLayout()->HasCorHeader();
+ else
+ {
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ return pLayout->HasCorHeader();
+ }
+}
+
+inline const BOOL PEImage::HasReadyToRunHeader()
+{
+ WRAPPER_NO_CONTRACT;
+ if (HasLoadedLayout())
+ return GetLoadedLayout()->HasReadyToRunHeader();
+ else
+ {
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ return pLayout->HasReadyToRunHeader();
+ }
+}
+
+inline void PEImage::SetPassiveDomainOnly()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_bPassiveDomainOnly=TRUE;
+}
+
+inline BOOL PEImage::PassiveDomainOnly()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_bPassiveDomainOnly;
+}
+
+#ifdef FEATURE_PREJIT
+
+inline const BOOL PEImage::GetNativeILHasSecurityDirectory()
+{
+ WRAPPER_NO_CONTRACT;
+ if (HasLoadedLayout())
+ return GetLoadedLayout()->GetNativeILHasSecurityDirectory();
+ else
+ {
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ return pLayout->GetNativeILHasSecurityDirectory();
+ }
+}
+#endif
+
+inline const BOOL PEImage::HasDirectoryEntry(int entry)
+{
+ WRAPPER_NO_CONTRACT;
+ if (HasLoadedLayout())
+ return GetLoadedLayout()->HasDirectoryEntry(entry);
+ else
+ {
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ return pLayout->HasDirectoryEntry(entry);
+ }
+}
+
+inline const mdToken PEImage::GetEntryPointToken()
+{
+ WRAPPER_NO_CONTRACT;
+ if (HasLoadedLayout())
+ {
+ PTR_PEImageLayout pLayout = GetLoadedLayout();
+ if (!pLayout->HasManagedEntryPoint())
+ return mdTokenNil;
+ return pLayout->GetEntryPointToken();
+ }
+ else
+ {
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ if (!pLayout->HasManagedEntryPoint())
+ return mdTokenNil;
+ return pLayout->GetEntryPointToken();
+ }
+}
+
+inline const DWORD PEImage::GetCorHeaderFlags()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (HasLoadedLayout())
+ {
+ PTR_PEImageLayout pLayout = GetLoadedLayout();
+ return VAL32(pLayout->GetCorHeader()->Flags);
+ }
+ else
+ {
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ return VAL32(pLayout->GetCorHeader()->Flags);
+ }
+}
+
+inline BOOL PEImage::MDImportLoaded()
+{
+ return m_pMDImport != NULL;
+}
+
+inline const BOOL PEImage::HasV1Metadata()
+{
+ WRAPPER_NO_CONTRACT;
+ return GetMDImport()->GetMetadataStreamVersion()==MD_STREAM_VER_1X;
+}
+
+inline const BOOL PEImage::IsILOnly()
+{
+ WRAPPER_NO_CONTRACT;
+ if (HasLoadedLayout())
+ return GetLoadedLayout()->IsILOnly();
+ else
+ {
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ return pLayout->IsILOnly();
+ }
+}
+
+inline const WORD PEImage::GetSubsystem()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ if (HasLoadedLayout())
+ return GetLoadedLayout()->GetSubsystem();
+ else
+ {
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ return pLayout->GetSubsystem();
+ }
+}
+
+#ifdef FEATURE_PREJIT
+inline const BOOL PEImage::IsNativeILILOnly()
+{
+ WRAPPER_NO_CONTRACT;
+ if (HasLoadedLayout())
+ return GetLoadedLayout()->IsNativeILILOnly();
+ else
+ {
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ return pLayout->IsNativeILILOnly();
+ }
+}
+
+inline void PEImage::GetNativeILPEKindAndMachine(DWORD* pdwKind, DWORD* pdwMachine)
+{
+ WRAPPER_NO_CONTRACT;
+ if (HasLoadedLayout())
+ GetLoadedLayout()->GetNativeILPEKindAndMachine(pdwKind, pdwMachine);
+ else
+ {
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ pLayout->GetNativeILPEKindAndMachine(pdwKind, pdwMachine);
+ }
+}
+
+inline const BOOL PEImage::IsNativeILDll()
+{
+ WRAPPER_NO_CONTRACT;
+ if (HasLoadedLayout())
+ return GetLoadedLayout()->IsNativeILDll();
+ else
+ {
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ return pLayout->IsNativeILDll();
+ }
+}
+#endif // FEATURE_PREJIT
+
+inline const BOOL PEImage::IsDll()
+{
+ WRAPPER_NO_CONTRACT;
+ if (HasLoadedLayout())
+ return GetLoadedLayout()->IsDll();
+ else
+ {
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ return pLayout->IsDll();
+ }
+}
+
+inline const BOOL PEImage::HasStrongNameSignature()
+{
+ WRAPPER_NO_CONTRACT;
+ if (HasLoadedLayout())
+ return GetLoadedLayout()->HasStrongNameSignature();
+ else
+ {
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ return pLayout->HasStrongNameSignature();
+ }
+}
+
+#ifndef DACCESS_COMPILE
+
+#if !defined(FEATURE_CORECLR) || defined(CROSSGEN_COMPILE)
+inline const HRESULT PEImage::VerifyStrongName(DWORD* verifyOutputFlags)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(verifyOutputFlags);
+ if (m_bSignatureInfoCached)
+ {
+ if (SUCCEEDED(m_hrSignatureInfoStatus))
+ *verifyOutputFlags=m_dwSignatureInfo;
+ return m_hrSignatureInfoStatus;
+ }
+
+ BOOL result = FALSE;
+
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_FLAT,0));
+ if(pLayout!=NULL)
+ {
+ result = StrongNameSignatureVerificationFromImage((BYTE *) pLayout->GetBase(), pLayout->GetSize(),
+ SN_INFLAG_INSTALL|SN_INFLAG_ALL_ACCESS,
+ verifyOutputFlags);
+ }
+ else
+ {
+ CONSISTENCY_CHECK(!GetPath().IsEmpty());
+ _ASSERTE(IsFileLocked());
+ result = StrongNameSignatureVerification(GetPath(),
+ SN_INFLAG_INSTALL|SN_INFLAG_ALL_ACCESS|SN_INFLAG_RUNTIME,
+ verifyOutputFlags);
+ }
+
+ HRESULT hr=result?S_OK: StrongNameErrorInfo();
+
+ if (SUCCEEDED(hr) || !Exception::IsTransient(hr))
+ {
+ m_hrSignatureInfoStatus=hr;
+ m_dwSignatureInfo=*verifyOutputFlags;
+ m_bSignatureInfoCached=TRUE;
+ }
+ return hr;
+}
+#endif // !FEATURE_CORECLR || CROSSGEN_COMPILE
+
+#endif // !DACCESS_COMPILE
+
+inline BOOL PEImage::IsStrongNameSigned()
+{
+ WRAPPER_NO_CONTRACT;
+ if (HasLoadedLayout())
+ return GetLoadedLayout()->IsStrongNameSigned();
+ else
+ {
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ return pLayout->IsStrongNameSigned();
+ }
+}
+
+inline BOOL PEImage::IsIbcOptimized()
+{
+ WRAPPER_NO_CONTRACT;
+ if (HasLoadedLayout())
+ return GetLoadedLayout()->GetNativeILIsIbcOptimized();
+ else
+ {
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ return pLayout->GetNativeILIsIbcOptimized();
+ }
+}
+
+#ifndef DACCESS_COMPILE
+
+inline void PEImage::GetImageBits(DWORD layout, SBuffer &result)
+{
+ WRAPPER_NO_CONTRACT;
+ PEImageLayoutHolder pLayout(GetLayout(layout,LAYOUT_CREATEIFNEEDED));
+ BYTE* buffer=result.OpenRawBuffer(pLayout->GetSize());
+ PREFIX_ASSUME(buffer != NULL);
+ memcpyNoGCRefs(buffer,pLayout->GetBase(),pLayout->GetSize());
+ result.CloseRawBuffer(pLayout->GetSize());
+}
+
+#endif
+
+
+
+#ifdef FEATURE_PREJIT
+inline PTR_CVOID PEImage::GetNativeManifestMetadata(COUNT_T *pSize)
+{
+ WRAPPER_NO_CONTRACT;
+ if (HasLoadedLayout())
+ return GetLoadedLayout()->GetNativeManifestMetadata(pSize);
+ else
+ {
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ return pLayout->GetNativeManifestMetadata(pSize);
+ }
+}
+#endif
+
+inline PTR_CVOID PEImage::GetMetadata(COUNT_T *pSize)
+{
+ WRAPPER_NO_CONTRACT;
+ if (HasLoadedLayout())
+ return GetLoadedLayout()->GetMetadata(pSize);
+ else
+ {
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ return pLayout->GetMetadata(pSize);
+ }
+}
+
+inline BOOL PEImage::HasNativeHeader()
+{
+ WRAPPER_NO_CONTRACT;
+ if (HasLoadedLayout())
+ return GetLoadedLayout()->HasNativeHeader();
+ else
+ {
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ return pLayout->HasNativeHeader();
+ }
+}
+
+inline BOOL PEImage::HasContents()
+{
+ WRAPPER_NO_CONTRACT;
+ if (HasLoadedLayout())
+ return GetLoadedLayout()->HasContents();
+ else
+ {
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ return pLayout->HasContents();
+ }
+}
+
+
+inline CHECK PEImage::CheckFormat()
+{
+ WRAPPER_NO_CONTRACT;
+ if (HasLoadedLayout())
+ CHECK(GetLoadedLayout()->CheckFormat());
+ else
+ {
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ CHECK(pLayout->CheckFormat());
+ }
+ CHECK_OK;
+}
+inline PTR_CVOID PEImage::GetStrongNameSignature(COUNT_T *pSize)
+{
+ WRAPPER_NO_CONTRACT;
+ if (HasLoadedLayout())
+ return GetLoadedLayout()->GetStrongNameSignature(pSize);
+ else
+ {
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ return pLayout->GetStrongNameSignature(pSize);
+ }
+}
+
+inline void PEImage::Init(LPCWSTR pPath)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ m_path = pPath;
+ m_path.Normalize();
+ SetModuleFileNameHintForDAC();
+}
+#ifndef DACCESS_COMPILE
+
+#if !defined(FEATURE_CORECLR)
+/*static*/
+inline PTR_PEImage PEImage::FindByLongPath(LPCWSTR pPath)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(s_hashLock.OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ InlineSString<MAX_PATH> sLongPath;
+ // Note: GetLongPathName return the number of characters written NOT INCLUDING the
+ // null character on success, and on failure returns the buffer size required
+ // INCLUDING the null. This means the result must not be equal to MAX_PATH -
+ // it must be greater or less then.
+ COUNT_T nLen = WszGetLongPathName(pPath, sLongPath.OpenUnicodeBuffer(MAX_PATH-1), MAX_PATH);
+ CONSISTENCY_CHECK(nLen != MAX_PATH);
+
+ // If this was insufficient buffer, then try again with a reallocated buffer
+ if (nLen > MAX_PATH)
+ {
+ // Close the buffer before reopening
+ sLongPath.CloseBuffer();
+ INDEBUG(SIZE_T nOldLen = nLen;)
+ nLen = WszGetLongPathName(pPath, sLongPath.OpenUnicodeBuffer(nLen-1), nLen);
+ CONSISTENCY_CHECK(nLen == (nOldLen - 1));
+ }
+ sLongPath.CloseBuffer(nLen);
+
+ // Check for any kind of error other than an insufficient buffer result.
+ if (nLen == 0)
+ {
+ HRESULT hr=HRESULT_FROM_WIN32(GetLastError());
+ if(Exception::IsTransient(hr))
+ ThrowHR(hr);
+ return (PEImage*)INVALIDENTRY;
+ }
+ return FindByPath(sLongPath);
+}
+
+/*static*/
+inline PTR_PEImage PEImage::FindByShortPath(LPCWSTR pPath)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(s_hashLock.OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ InlineSString<MAX_PATH> sShortPath;
+ // Note: GetLongPathName return the number of characters written NOT INCLUDING the
+ // null character on success, and on failure returns the buffer size required
+ // INCLUDING the null. This means the result must not be equal to MAX_PATH -
+ // it must be greater or less then.
+ COUNT_T nLen = WszGetShortPathName(pPath, sShortPath.OpenUnicodeBuffer(MAX_PATH-1), MAX_PATH);
+ CONSISTENCY_CHECK(nLen != MAX_PATH);
+
+ // If this was insufficient buffer, then try again with a reallocated buffer
+ if (nLen > MAX_PATH)
+ {
+ // Close the buffer before reopening
+ sShortPath.CloseBuffer();
+ INDEBUG(SIZE_T nOldLen = nLen;)
+ nLen = WszGetShortPathName(pPath, sShortPath.OpenUnicodeBuffer(nLen-1), nLen);
+ CONSISTENCY_CHECK(nLen == (nOldLen - 1));
+ }
+ sShortPath.CloseBuffer(nLen);
+
+ // Check for any kind of error other than an insufficient buffer result.
+ if (nLen == 0)
+ {
+ HRESULT hr=HRESULT_FROM_WIN32(GetLastError());
+ if(Exception::IsTransient(hr))
+ ThrowHR(hr);
+ return (PEImage*)INVALIDENTRY;
+ }
+ return FindByPath(sShortPath);
+}
+#endif // !FEATURE_CORECLR
+
+/*static*/
+inline PTR_PEImage PEImage::FindByPath(LPCWSTR pPath)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(s_hashLock.OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ int CaseHashHelper(const WCHAR *buffer, COUNT_T count, LocaleID lcid);
+
+ PEImageLocator locator(pPath);
+#ifdef FEATURE_CASE_SENSITIVE_FILESYSTEM
+ DWORD dwHash=path.Hash();
+#else
+ DWORD dwHash = CaseHashHelper(pPath, (COUNT_T) wcslen(pPath), PEImage::GetFileSystemLocale());
+#endif
+ return (PEImage *) s_Images->LookupValue(dwHash, &locator);
+
+}
+
+/* static */
+inline PTR_PEImage PEImage::OpenImage(LPCWSTR pPath, MDInternalImportFlags flags /* = MDInternalImport_Default */)
+{
+ BOOL fUseCache = !((flags & MDInternalImport_NoCache) == MDInternalImport_NoCache);
+
+ if (!fUseCache)
+ {
+ PEImageHolder pImage(new PEImage);
+ pImage->Init(pPath);
+ return dac_cast<PTR_PEImage>(pImage.Extract());
+ }
+
+ CrstHolder holder(&s_hashLock);
+
+ PEImage* found = FindByPath(pPath);
+
+#if !defined(FEATURE_CORECLR)
+ if(found == (PEImage*) INVALIDENTRY && (flags & MDInternalImport_CheckLongPath))
+ found=FindByLongPath(pPath);
+
+ if(found == (PEImage*) INVALIDENTRY && (flags & MDInternalImport_CheckShortPath))
+ found=FindByShortPath(pPath);
+#endif
+
+ if (found == (PEImage*) INVALIDENTRY)
+ {
+ // We did not find the entry in the Cache, and we've been asked to only use the cache.
+ if ((flags & MDInternalImport_OnlyLookInCache) == MDInternalImport_OnlyLookInCache)
+ {
+ return NULL;
+ }
+
+ PEImageHolder pImage(new PEImage);
+#ifdef FEATURE_PREJIT
+ if (flags & MDInternalImport_TrustedNativeImage)
+ pImage->SetIsTrustedNativeImage();
+ if (flags & MDInternalImport_NativeImageInstall)
+ pImage->SetIsNativeImageInstall();
+#endif
+ pImage->Init(pPath);
+
+ pImage->AddToHashMap();
+ return dac_cast<PTR_PEImage>(pImage.Extract());
+ }
+
+ found->AddRef();
+
+ return dac_cast<PTR_PEImage>(found);
+}
+#endif
+
+inline BOOL PEImage::IsFileLocked()
+{
+ WRAPPER_NO_CONTRACT;
+ return (m_pLayouts[IMAGE_FLAT])!=NULL || (m_pLayouts[IMAGE_MAPPED])!=NULL ;
+}
+
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_FUSION
+/* static */
+inline PTR_PEImage PEImage::FindById(UINT64 uStreamAsmId, DWORD dwModuleId)
+{
+ PEImageLocator locator(uStreamAsmId, dwModuleId);
+ CrstHolder holder(&s_hashLock);
+ PEImage* found = (PEImage *) s_Images->LookupValue(HashStreamIds(uStreamAsmId, dwModuleId), &locator);
+ if (found == (PEImage*) INVALIDENTRY)
+ return NULL;
+ found->AddRef();
+ return dac_cast<PTR_PEImage>(found);
+}
+
+/* static */
+inline PTR_PEImage PEImage::OpenImage(IStream *pIStream, UINT64 uStreamAsmId,
+ DWORD dwModuleId, BOOL resourceFile, MDInternalImportFlags flags /* = MDInternalImport_Default */)
+{
+ BOOL fUseCache = !((flags & MDInternalImport_NoCache) == MDInternalImport_NoCache);
+
+ if (!fUseCache)
+ {
+ PEImageHolder pImage(new PEImage());
+ pImage->Init(pIStream, uStreamAsmId, dwModuleId, resourceFile);
+ return dac_cast<PTR_PEImage>(pImage.Extract());
+ }
+
+
+ DWORD hash = HashStreamIds(uStreamAsmId, dwModuleId);
+ PEImageLocator locator(uStreamAsmId,dwModuleId);
+ CrstHolder holder(&s_hashLock);
+ PEImage* found = (PEImage *) s_Images->LookupValue(hash, &locator);
+ if (found != (PEImage*) INVALIDENTRY)
+ {
+ found->AddRef();
+ return dac_cast<PTR_PEImage>(found);
+ }
+ PEImageHolder pImage(new PEImage());
+ pImage->Init(pIStream, uStreamAsmId, dwModuleId, resourceFile);
+
+ pImage->AddToHashMap();
+ return dac_cast<PTR_PEImage>(pImage.Extract());
+}
+#endif // FEATURE_FUSION
+
+inline void PEImage::AddToHashMap()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(s_hashLock.OwnedByCurrentThread());
+ s_Images->InsertValue(GetIDHash(),this);
+ m_bInHashMap=TRUE;
+}
+
+#endif
+
+
+
+
+inline BOOL PEImage::Has32BitNTHeaders()
+{
+ WRAPPER_NO_CONTRACT;
+ if (HasLoadedLayout())
+ return GetLoadedLayout()->Has32BitNTHeaders();
+ else
+ {
+ PEImageLayoutHolder pLayout(GetLayout(PEImageLayout::LAYOUT_ANY,LAYOUT_CREATEIFNEEDED));
+ return pLayout->Has32BitNTHeaders();
+ }
+}
+
+inline BOOL PEImage::HasID()
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef FEATURE_FUSION
+ if (m_fIsIStream)
+ return TRUE;
+#endif
+
+ return !GetPath().IsEmpty();
+}
+
+inline ULONG PEImage::GetIDHash()
+{
+ CONTRACT(ULONG)
+ {
+ PRECONDITION(HasID());
+ MODE_ANY;
+ GC_NOTRIGGER;
+ THROWS;
+ }
+ CONTRACT_END;
+
+#ifdef FEATURE_FUSION
+ if (m_fIsIStream)
+ RETURN HashStreamIds(m_StreamAsmId, m_dwStreamModuleId);
+#endif
+
+#ifdef FEATURE_CASE_SENSITIVE_FILESYSTEM
+ RETURN m_path.Hash();
+#else
+ RETURN m_path.HashCaseInsensitive(PEImage::GetFileSystemLocale());
+#endif
+}
+
+inline void PEImage::CachePEKindAndMachine()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Do nothing if we have cached the information already
+ if(m_fCachedKindAndMachine)
+ return;
+
+ PEImageLayoutHolder pLayout;
+ if (HasLoadedLayout())
+ {
+ pLayout.Assign(GetLoadedLayout(), false);
+ }
+ else
+ {
+ pLayout.Assign(GetLayout(PEImageLayout::LAYOUT_MAPPED|PEImageLayout::LAYOUT_FLAT,
+ PEImage::LAYOUT_CREATEIFNEEDED));
+ }
+
+ // Compute result into a local variables first
+ DWORD dwPEKind, dwMachine;
+ pLayout->GetPEKindAndMachine(&dwPEKind, &dwMachine);
+
+ // Write the final result into the lock-free cache.
+ m_dwPEKind = dwPEKind;
+ m_dwMachine = dwMachine;
+ MemoryBarrier();
+ m_fCachedKindAndMachine = TRUE;
+}
+
+inline void PEImage::GetPEKindAndMachine(DWORD* pdwKind, DWORD* pdwMachine)
+{
+ WRAPPER_NO_CONTRACT;
+ CachePEKindAndMachine();
+ if (pdwKind)
+ *pdwKind = m_dwPEKind;
+ if (pdwMachine)
+ *pdwMachine = m_dwMachine;
+}
+
+#ifdef FEATURE_APTCA
+inline BOOL PEImage::MayBeConditionalAptca()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fMayBeConditionalAptca;
+}
+
+inline void PEImage::SetIsNotConditionalAptca()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_fMayBeConditionalAptca = FALSE;
+}
+#endif // FEATURE_APTCA
+
+#ifndef FEATURE_CORECLR
+inline BOOL PEImage::IsReportedToUsageLog()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fReportedToUsageLog;
+}
+
+inline void PEImage::SetReportedToUsageLog()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_fReportedToUsageLog = TRUE;
+}
+#endif // !FEATURE_CORECLR
+
+#ifndef DACCESS_COMPILE
+inline void PEImage::AllocateLazyCOWPages()
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef FEATURE_LAZY_COW_PAGES
+ if (!m_bAllocatedLazyCOWPages && CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ZapLazyCOWPagesEnabled))
+ {
+ ::AllocateLazyCOWPages(GetLoadedLayout());
+ m_bAllocatedLazyCOWPages = TRUE;
+ }
+#endif
+}
+#endif
+
+#endif // PEIMAGE_INL_
diff --git a/src/vm/peimagelayout.cpp b/src/vm/peimagelayout.cpp
new file mode 100644
index 0000000000..0178664ebd
--- /dev/null
+++ b/src/vm/peimagelayout.cpp
@@ -0,0 +1,855 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+//
+
+#include "common.h"
+#include "peimagelayout.h"
+#include "peimagelayout.inl"
+#include "pefingerprint.h"
+
+#ifndef DACCESS_COMPILE
+PEImageLayout* PEImageLayout::CreateFlat(const void *flat, COUNT_T size,PEImage* pOwner)
+{
+ STANDARD_VM_CONTRACT;
+ return new RawImageLayout(flat,size,pOwner);
+}
+
+#ifdef FEATURE_FUSION
+PEImageLayout* PEImageLayout::CreateFromStream(IStream* pIStream,PEImage* pOwner)
+{
+ STANDARD_VM_CONTRACT;
+ return new StreamImageLayout(pIStream,pOwner);
+}
+#endif
+
+PEImageLayout* PEImageLayout::CreateFromHMODULE(HMODULE hModule,PEImage* pOwner, BOOL bTakeOwnership)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ return new RawImageLayout(hModule,pOwner,bTakeOwnership,TRUE);
+}
+
+PEImageLayout* PEImageLayout::LoadFromFlat(PEImageLayout* pflatimage)
+{
+ STANDARD_VM_CONTRACT;
+ return new ConvertedImageLayout(pflatimage);
+}
+
+PEImageLayout* PEImageLayout::Load(PEImage* pOwner, BOOL bNTSafeLoad, BOOL bThrowOnError)
+{
+ STANDARD_VM_CONTRACT;
+
+#if defined(CROSSGEN_COMPILE) || defined(FEATURE_PAL)
+ return PEImageLayout::Map(pOwner->GetFileHandle(), pOwner);
+#else
+ PEImageLayoutHolder pAlloc(new LoadedImageLayout(pOwner,bNTSafeLoad,bThrowOnError));
+ if (pAlloc->GetBase()==NULL)
+ return NULL;
+ return pAlloc.Extract();
+#endif
+}
+
+PEImageLayout* PEImageLayout::LoadFlat(HANDLE hFile,PEImage* pOwner)
+{
+ STANDARD_VM_CONTRACT;
+ return new FlatImageLayout(hFile,pOwner);
+}
+
+PEImageLayout* PEImageLayout::Map(HANDLE hFile, PEImage* pOwner)
+{
+ CONTRACT(PEImageLayout*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pOwner));
+ POSTCONDITION(CheckPointer(RETVAL));
+ POSTCONDITION(RETVAL->CheckFormat());
+ }
+ CONTRACT_END;
+
+ PEImageLayoutHolder pAlloc(new MappedImageLayout(hFile,pOwner));
+ if (pAlloc->GetBase()==NULL)
+ {
+ //cross-platform or a bad image
+ PEImageLayoutHolder pFlat(new FlatImageLayout(hFile, pOwner));
+ if (!pFlat->CheckFormat())
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+
+ pAlloc=new ConvertedImageLayout(pFlat);
+ }
+ else
+ if(!pAlloc->CheckFormat())
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ RETURN pAlloc.Extract();
+}
+
+#ifdef FEATURE_PREJIT
+//To force base relocation on Vista (which uses ASLR), unmask IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE
+//(0x40) for OptionalHeader.DllCharacteristics
+void PEImageLayout::ApplyBaseRelocations()
+{
+ STANDARD_VM_CONTRACT;
+
+ //
+ // Note that this is not a univeral routine for applying relocations. It handles only the subset
+ // required by NGen images. Also, it assumes that the image format is valid.
+ //
+
+ SSIZE_T delta = (SIZE_T) GetBase() - (SIZE_T) GetPreferredBase();
+
+ // Nothing to do - image is loaded at preferred base
+ if (delta == 0)
+ return;
+
+ LOG((LF_LOADER, LL_INFO100, "PEImage: Applying base relocations (preferred: %x, actual: %x)\n",
+ GetPreferredBase(), GetBase()));
+
+ COUNT_T dirSize;
+ TADDR dir = GetDirectoryEntryData(IMAGE_DIRECTORY_ENTRY_BASERELOC, &dirSize);
+
+ // Minimize number of calls to VirtualProtect by keeping a whole section unprotected at a time.
+ BYTE * pWriteableRegion = NULL;
+ SIZE_T cbWriteableRegion = 0;
+ DWORD dwOldProtection = 0;
+
+ COUNT_T dirPos = 0;
+ while (dirPos < dirSize)
+ {
+ PIMAGE_BASE_RELOCATION r = (PIMAGE_BASE_RELOCATION)(dir + dirPos);
+
+ DWORD rva = VAL32(r->VirtualAddress);
+
+ BYTE * pageAddress = (BYTE *)GetBase() + rva;
+
+ // Check whether the page is outside the unprotected region
+ if ((SIZE_T)(pageAddress - pWriteableRegion) >= cbWriteableRegion)
+ {
+ // Restore the protection
+ if (dwOldProtection != 0)
+ {
+ if (!ClrVirtualProtect(pWriteableRegion, cbWriteableRegion,
+ dwOldProtection, &dwOldProtection))
+ ThrowLastError();
+
+ dwOldProtection = 0;
+ }
+
+ IMAGE_SECTION_HEADER *pSection = RvaToSection(rva);
+ PREFIX_ASSUME(pSection != NULL);
+
+ pWriteableRegion = (BYTE*)GetRvaData(VAL32(pSection->VirtualAddress));
+ cbWriteableRegion = VAL32(pSection->SizeOfRawData);
+
+ // Unprotect the section if it is not writable
+ if (((pSection->Characteristics & VAL32(IMAGE_SCN_MEM_WRITE)) == 0))
+ {
+ if (!ClrVirtualProtect(pWriteableRegion, cbWriteableRegion,
+ PAGE_READWRITE, &dwOldProtection))
+ ThrowLastError();
+ }
+ }
+
+ COUNT_T fixupsSize = VAL32(r->SizeOfBlock);
+
+ USHORT *fixups = (USHORT *) (r + 1);
+
+ _ASSERTE(fixupsSize > sizeof(IMAGE_BASE_RELOCATION));
+ _ASSERTE((fixupsSize - sizeof(IMAGE_BASE_RELOCATION)) % 2 == 0);
+
+ COUNT_T fixupsCount = (fixupsSize - sizeof(IMAGE_BASE_RELOCATION)) / 2;
+
+ _ASSERTE((BYTE *)(fixups + fixupsCount) <= (BYTE *)(dir + dirSize));
+
+ for (COUNT_T fixupIndex = 0; fixupIndex < fixupsCount; fixupIndex++)
+ {
+ USHORT fixup = VAL16(fixups[fixupIndex]);
+
+ BYTE * address = pageAddress + (fixup & 0xfff);
+
+ switch (fixup>>12)
+ {
+ case IMAGE_REL_BASED_PTR:
+ *(TADDR *)address += delta;
+ break;
+
+#ifdef _TARGET_ARM_
+ case IMAGE_REL_BASED_THUMB_MOV32:
+ PutThumb2Mov32((UINT16 *)address, GetThumb2Mov32((UINT16 *)address) + delta);
+ break;
+#endif
+
+ case IMAGE_REL_BASED_ABSOLUTE:
+ //no adjustment
+ break;
+
+ default:
+ _ASSERTE(!"Unhandled reloc type!");
+ }
+ }
+
+ dirPos += fixupsSize;
+ }
+ _ASSERTE(dirSize == dirPos);
+
+ if (dwOldProtection != 0)
+ {
+ // Restore the protection
+ if (!ClrVirtualProtect(pWriteableRegion, cbWriteableRegion,
+ dwOldProtection, &dwOldProtection))
+ ThrowLastError();
+ }
+}
+#endif // FEATURE_PREJIT
+
+#ifndef FEATURE_CORECLR
+// Event Tracing for Windows is used to log data for performance and functional testing purposes.
+// The events in this structure are used to measure the time taken by PE image mapping. This is useful to reliably measure the
+// performance of the assembly loader by subtracting the time taken by the possibly I/O-intensive work of PE image mapping.
+struct ETWLoaderMappingPhaseHolder { // Special-purpose holder structure to ensure the LoaderMappingPhaseEnd ETW event is fired when returning from a function.
+ StackSString ETWCodeBase;
+ DWORD _dwAppDomainId;
+ BOOL initialized;
+
+ ETWLoaderMappingPhaseHolder(){
+ LIMITED_METHOD_CONTRACT;
+ _dwAppDomainId = ETWAppDomainIdNotAvailable;
+ initialized = FALSE;
+ }
+
+ void Init(DWORD dwAppDomainId, SString wszCodeBase) {
+ _dwAppDomainId = dwAppDomainId;
+
+ EX_TRY
+ {
+ ETWCodeBase.Append(wszCodeBase);
+ ETWCodeBase.Normalize(); // Ensures that the later cast to LPCWSTR does not throw.
+ }
+ EX_CATCH
+ {
+ ETWCodeBase.Clear();
+ }
+ EX_END_CATCH(RethrowTransientExceptions)
+
+ FireEtwLoaderMappingPhaseStart(_dwAppDomainId, ETWLoadContextNotAvailable, ETWFieldUnused, ETWLoaderLoadTypeNotAvailable, ETWCodeBase.IsEmpty() ? NULL : (LPCWSTR)ETWCodeBase, NULL, GetClrInstanceId());
+
+ initialized = TRUE;
+ }
+
+ ~ETWLoaderMappingPhaseHolder() {
+ if (initialized) {
+ FireEtwLoaderMappingPhaseEnd(_dwAppDomainId, ETWLoadContextNotAvailable, ETWFieldUnused, ETWLoaderLoadTypeNotAvailable, ETWCodeBase.IsEmpty() ? NULL : (LPCWSTR)ETWCodeBase, NULL, GetClrInstanceId());
+ }
+ }
+};
+#endif // FEATURE_CORECLR
+
+RawImageLayout::RawImageLayout(const void *flat, COUNT_T size,PEImage* pOwner)
+{
+ CONTRACTL
+ {
+ CONSTRUCTOR_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+ m_pOwner=pOwner;
+ m_Layout=LAYOUT_FLAT;
+
+ PEFingerprintVerificationHolder verifyHolder(pOwner); // Do not remove: This holder ensures the IL file hasn't changed since the runtime started making assumptions about it.
+
+#ifndef FEATURE_CORECLR
+ ETWLoaderMappingPhaseHolder loaderMappingPhaseHolder;
+ if (ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, TRACE_LEVEL_INFORMATION, CLR_PRIVATEBINDING_KEYWORD)) {
+ loaderMappingPhaseHolder.Init(GetAppDomain() ? GetAppDomain()->GetId().m_dwId : ETWAppDomainIdNotAvailable, GetPath());
+ }
+#endif // FEATURE_CORECLR
+
+ if (size)
+ {
+ HandleHolder mapping(WszCreateFileMapping(INVALID_HANDLE_VALUE, NULL,
+ PAGE_READWRITE, 0,
+ size, NULL));
+ if (mapping==NULL)
+ ThrowLastError();
+ m_DataCopy.Assign(CLRMapViewOfFile(mapping, FILE_MAP_ALL_ACCESS, 0, 0, 0));
+ if(m_DataCopy==NULL)
+ ThrowLastError();
+ memcpy(m_DataCopy,flat,size);
+ flat=m_DataCopy;
+ }
+ TESTHOOKCALL(ImageMapped(GetPath(),flat,IM_FLAT));
+ Init((void*)flat,size);
+}
+RawImageLayout::RawImageLayout(const void *mapped, PEImage* pOwner, BOOL bTakeOwnership, BOOL bFixedUp)
+{
+ CONTRACTL
+ {
+ CONSTRUCTOR_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+ m_pOwner=pOwner;
+ m_Layout=LAYOUT_MAPPED;
+
+ PEFingerprintVerificationHolder verifyHolder(pOwner); // Do not remove: This holder ensures the IL file hasn't changed since the runtime started making assumptions about it.
+
+#ifndef FEATURE_CORECLR
+ ETWLoaderMappingPhaseHolder loaderMappingPhaseHolder;
+ if (ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, TRACE_LEVEL_INFORMATION, CLR_PRIVATEBINDING_KEYWORD)) {
+ loaderMappingPhaseHolder.Init(GetAppDomain() ? GetAppDomain()->GetId().m_dwId : ETWAppDomainIdNotAvailable, GetPath());
+ }
+#endif // FEATURE_CORECLR
+
+ if (bTakeOwnership)
+ {
+#ifndef FEATURE_PAL
+ WCHAR wszDllName[MAX_PATH];
+ WszGetModuleFileName((HMODULE)mapped, wszDllName, MAX_PATH);
+ wszDllName[MAX_PATH - 1] = W('\0');
+ m_LibraryHolder=CLRLoadLibraryEx(wszDllName,NULL,GetLoadWithAlteredSearchPathFlag());
+#else // !FEATURE_PAL
+ _ASSERTE(!"bTakeOwnership Should not be used on FEATURE_PAL");
+#endif // !FEATURE_PAL
+ }
+
+ TESTHOOKCALL(ImageMapped(GetPath(),mapped,bFixedUp?IM_IMAGEMAP|IM_FIXEDUP:IM_IMAGEMAP));
+ IfFailThrow(Init((void*)mapped,(bool)(bFixedUp!=FALSE)));
+}
+
+ConvertedImageLayout::ConvertedImageLayout(PEImageLayout* source)
+{
+ CONTRACTL
+ {
+ CONSTRUCTOR_CHECK;
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+ m_Layout=LAYOUT_LOADED;
+ m_pOwner=source->m_pOwner;
+ _ASSERTE(!source->IsMapped());
+
+ PEFingerprintVerificationHolder verifyHolder(source->m_pOwner); // Do not remove: This holder ensures the IL file hasn't changed since the runtime started making assumptions about it.
+
+#ifndef FEATURE_CORECLR
+ ETWLoaderMappingPhaseHolder loaderMappingPhaseHolder;
+ if (ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, TRACE_LEVEL_INFORMATION, CLR_PRIVATEBINDING_KEYWORD)) {
+ loaderMappingPhaseHolder.Init(GetAppDomain() ? GetAppDomain()->GetId().m_dwId : ETWAppDomainIdNotAvailable, GetPath());
+ }
+#endif // FEATURE_CORECLR
+
+ if (!source->HasNTHeaders())
+ EEFileLoadException::Throw(GetPath(), COR_E_BADIMAGEFORMAT);
+ LOG((LF_LOADER, LL_INFO100, "PEImage: Opening manually mapped stream\n"));
+
+
+ m_FileMap.Assign(WszCreateFileMapping(INVALID_HANDLE_VALUE, NULL,
+ PAGE_READWRITE, 0,
+ source->GetVirtualSize(), NULL));
+ if (m_FileMap == NULL)
+ ThrowLastError();
+
+
+ m_FileView.Assign(CLRMapViewOfFileEx(m_FileMap, FILE_MAP_ALL_ACCESS, 0, 0, 0,
+ (void *) source->GetPreferredBase()));
+ if (m_FileView == NULL)
+ m_FileView.Assign(CLRMapViewOfFile(m_FileMap, FILE_MAP_ALL_ACCESS, 0, 0, 0));
+
+ if (m_FileView == NULL)
+ ThrowLastError();
+
+ source->LayoutILOnly(m_FileView, TRUE); //@TODO should be false for streams
+ TESTHOOKCALL(ImageMapped(GetPath(),m_FileView,IM_IMAGEMAP));
+ IfFailThrow(Init(m_FileView));
+
+#ifdef CROSSGEN_COMPILE
+ if (HasNativeHeader())
+ ApplyBaseRelocations();
+#endif
+}
+
+MappedImageLayout::MappedImageLayout(HANDLE hFile, PEImage* pOwner)
+{
+ CONTRACTL
+ {
+ CONSTRUCTOR_CHECK;
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+ m_Layout=LAYOUT_MAPPED;
+ m_pOwner=pOwner;
+
+ // If mapping was requested, try to do SEC_IMAGE mapping
+ LOG((LF_LOADER, LL_INFO100, "PEImage: Opening OS mapped %S (hFile %p)\n", (LPCWSTR) GetPath(), hFile));
+
+ PEFingerprintVerificationHolder verifyHolder(pOwner); // Do not remove: This holder ensures the IL file hasn't changed since the runtime started making assumptions about it.
+
+#ifndef FEATURE_PAL
+#ifndef FEATURE_CORECLR
+ ETWLoaderMappingPhaseHolder loaderMappingPhaseHolder;
+ if (ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, TRACE_LEVEL_INFORMATION, CLR_PRIVATEBINDING_KEYWORD)) {
+ loaderMappingPhaseHolder.Init(GetAppDomain() ? GetAppDomain()->GetId().m_dwId : ETWAppDomainIdNotAvailable, GetPath());
+ }
+#endif // FEATURE_CORECLR
+
+ // Let OS map file for us
+
+ // This may fail on e.g. cross-platform (32/64) loads.
+ m_FileMap.Assign(WszCreateFileMapping(hFile, NULL, PAGE_READONLY | SEC_IMAGE, 0, 0, NULL));
+ if (m_FileMap == NULL)
+ {
+#ifndef CROSSGEN_COMPILE
+#ifdef FEATURE_CORECLR
+
+ // There is no reflection-only load on CoreCLR and so we can always throw an error here.
+ // It is important on Windows Phone. All assemblies that we load must have SEC_IMAGE set
+ // so that the OS can perform signature verification.
+ ThrowLastError();
+
+#else // FEATURE_CORECLR
+
+ // We need to ensure any signature validation errors are caught if Extended Secure Boot (ESB) is on.
+ // Also, we have to always throw here during NGen to ensure that the signature validation is never skipped.
+ if (GetLastError() != ERROR_BAD_EXE_FORMAT || IsCompilationProcess())
+ {
+ ThrowLastError();
+ }
+
+#endif // FEATURE_CORECLR
+#endif // CROSSGEN_COMPILE
+
+ return;
+ }
+
+#ifdef _DEBUG
+ // Force relocs by occuping the preferred base while the actual mapping is performed
+ CLRMapViewHolder forceRelocs;
+ if (PEDecoder::GetForceRelocs())
+ {
+ forceRelocs.Assign(CLRMapViewOfFile(m_FileMap, 0, 0, 0, 0));
+ }
+#endif // _DEBUG
+
+#ifdef FEATURE_MIXEDMODE
+ //
+ // For our preliminary loads, we don't want to take the preferred base address. We want to leave
+ // that open for a LoadLibrary. So, we first a phony MapViewOfFile to occupy the base
+ // address temporarily.
+ //
+ // Note that this is bad if we are racing another thread which is doing a LoadLibrary. We
+ // may want to tweak this logic, but it's pretty difficult to tell MapViewOfFileEx to map
+ // a file NOT at its preferred base address. Hopefully the ulimate solution here will be
+ // just mapping the file once.
+ //
+ // There are two distinct cases that this code takes care of:
+ //
+ // * NGened IL-only assembly: The IL image will get mapped here and LoadLibrary will be called
+ // on the NGen image later. If we need to, we can avoid creating the fake view on VISTA in this
+ // case. ASLR will map the IL image and NGen image at different addresses for free.
+ //
+ // * Mixed-mode assembly (either NGened or not): The mixed-mode image will get mapped here and
+ // LoadLibrary will be called on the same image again later. Note that ASLR does not help
+ // in this case. The fake view has to be created even on VISTA in this case to avoid relocations.
+ //
+ CLRMapViewHolder temp;
+
+ // We don't want to map at the prefered address, so have the temporary view take it.
+ temp.Assign(CLRMapViewOfFile(m_FileMap, 0, 0, 0, 0));
+ if (temp == NULL)
+ ThrowLastError();
+#endif // FEATURE_MIXEDMODE
+ m_FileView.Assign(CLRMapViewOfFile(m_FileMap, 0, 0, 0, 0));
+ if (m_FileView == NULL)
+ ThrowLastError();
+ TESTHOOKCALL(ImageMapped(GetPath(),m_FileView,IM_IMAGEMAP));
+ IfFailThrow(Init((void *) m_FileView));
+
+#ifdef CROSSGEN_COMPILE
+ //Do base relocation for PE. Unlike LoadLibrary, MapViewOfFile will not do that for us even with SEC_IMAGE
+ if (pOwner->IsTrustedNativeImage())
+ {
+ // This should never happen in correctly setup system, but do a quick check right anyway to
+ // avoid running too far with bogus data
+
+ if (!HasCorHeader())
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+
+ // For phone, we need to be permissive of MSIL assemblies pretending to be native images,
+ // to support forced fall back to JIT
+ // if (!HasNativeHeader())
+ // ThrowHR(COR_E_BADIMAGEFORMAT);
+
+ if (HasNativeHeader())
+ {
+ if (!IsNativeMachineFormat())
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+
+ ApplyBaseRelocations();
+ }
+ }
+ else
+#endif
+ if (!IsNativeMachineFormat() && !IsI386())
+ {
+ //can't rely on the image
+ Reset();
+ return;
+ }
+
+#ifdef _DEBUG
+ if (forceRelocs != NULL)
+ {
+ forceRelocs.Release();
+
+ if (CheckNTHeaders()) {
+ // Reserve the space so nobody can use it. A potential bug is likely to
+ // result in a plain AV this way. It is not a good idea to use the original
+ // mapping for the reservation since since it would lock the file on the disk.
+
+ // ignore any errors
+ ClrVirtualAlloc((void*)GetPreferredBase(), GetVirtualSize(), MEM_RESERVE, PAGE_NOACCESS);
+ }
+ }
+#endif // _DEBUG
+
+#else //!FEATURE_PAL
+
+#ifdef FEATURE_PREJIT
+ if (pOwner->IsTrustedNativeImage())
+ {
+ m_FileView = PAL_LOADLoadPEFile(hFile);
+ if (m_FileView == NULL)
+ ThrowHR(E_FAIL); // we don't have any indication of what kind of failure. Possibly a corrupt image.
+
+ LOG((LF_LOADER, LL_INFO1000, "PEImage: image %S (hFile %p) mapped @ %p\n",
+ (LPCWSTR) GetPath(), hFile, (void*)m_FileView));
+
+ TESTHOOKCALL(ImageMapped(GetPath(),m_FileView,IM_IMAGEMAP));
+ IfFailThrow(Init((void *) m_FileView));
+
+ // This should never happen in correctly setup system, but do a quick check right anyway to
+ // avoid running too far with bogus data
+#ifdef MDIL
+ // In MDIL we need to be permissive of MSIL assemblies pretending to be native images,
+ // to support forced fall back to JIT
+ if ((HasNativeHeader() && !IsNativeMachineFormat()) || !HasCorHeader())
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+
+ if (HasNativeHeader())
+ ApplyBaseRelocations();
+#else
+ if (!IsNativeMachineFormat() || !HasCorHeader() || !HasNativeHeader())
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+
+ //Do base relocation for PE, if necessary.
+ ApplyBaseRelocations();
+#endif // MDIL
+ }
+#else //FEATURE_PREJIT
+ //Do nothing. The file cannot be mapped unless it is an ngen image.
+#endif //FEATURE_PREJIT
+
+#endif // !FEATURE_PAL
+}
+
+#if !defined(CROSSGEN_COMPILE) && !defined(FEATURE_PAL)
+LoadedImageLayout::LoadedImageLayout(PEImage* pOwner, BOOL bNTSafeLoad, BOOL bThrowOnError)
+{
+ CONTRACTL
+ {
+ CONSTRUCTOR_CHECK;
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pOwner));
+ }
+ CONTRACTL_END;
+
+ m_Layout=LAYOUT_LOADED;
+ m_pOwner=pOwner;
+
+ PEFingerprintVerificationHolder verifyHolder(pOwner); // Do not remove: This holder ensures the IL file hasn't changed since the runtime started making assumptions about it.
+
+#ifndef FEATURE_CORECLR
+ ETWLoaderMappingPhaseHolder loaderMappingPhaseHolder;
+ if (ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, TRACE_LEVEL_INFORMATION, CLR_PRIVATEBINDING_KEYWORD)) {
+ loaderMappingPhaseHolder.Init(GetAppDomain() ? GetAppDomain()->GetId().m_dwId : ETWAppDomainIdNotAvailable, GetPath());
+ }
+#endif // FEATURE_CORECLR
+
+ DWORD dwFlags = GetLoadWithAlteredSearchPathFlag();
+ if (bNTSafeLoad)
+ dwFlags|=DONT_RESOLVE_DLL_REFERENCES;
+
+ m_Module = CLRLoadLibraryEx(pOwner->GetPath(), NULL, dwFlags);
+ if (m_Module == NULL)
+ {
+ if (!bThrowOnError)
+ return;
+
+ // Fetch the HRESULT upfront before anybody gets a chance to corrupt it
+ HRESULT hr = HRESULT_FROM_GetLastError();
+ EEFileLoadException::Throw(pOwner->GetPath(), hr, NULL);
+ }
+ TESTHOOKCALL(ImageMapped(GetPath(),m_Module,IM_LOADLIBRARY));
+ IfFailThrow(Init(m_Module,true));
+
+ LOG((LF_LOADER, LL_INFO1000, "PEImage: Opened HMODULE %S\n", (LPCWSTR) GetPath()));
+}
+#endif // !CROSSGEN_COMPILE && !FEATURE_PAL
+
+FlatImageLayout::FlatImageLayout(HANDLE hFile, PEImage* pOwner)
+{
+ CONTRACTL
+ {
+ CONSTRUCTOR_CHECK;
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pOwner));
+ }
+ CONTRACTL_END;
+ m_Layout=LAYOUT_FLAT;
+ m_pOwner=pOwner;
+ LOG((LF_LOADER, LL_INFO100, "PEImage: Opening flat %S\n", (LPCWSTR) GetPath()));
+
+ PEFingerprintVerificationHolder verifyHolder(pOwner); // Do not remove: This holder ensures the IL file hasn't changed since the runtime started making assumptions about it.
+
+#ifndef FEATURE_CORECLR
+ ETWLoaderMappingPhaseHolder loaderMappingPhaseHolder;
+ if (ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, TRACE_LEVEL_INFORMATION, CLR_PRIVATEBINDING_KEYWORD)) {
+ loaderMappingPhaseHolder.Init(GetAppDomain() ? GetAppDomain()->GetId().m_dwId : ETWAppDomainIdNotAvailable, GetPath());
+ }
+#endif // FEATURE_CORECLR
+
+ COUNT_T size = SafeGetFileSize(hFile, NULL);
+ if (size == 0xffffffff && GetLastError() != NOERROR)
+ {
+ ThrowLastError();
+ }
+
+ // It's okay if resource files are length zero
+ if (size > 0)
+ {
+ m_FileMap.Assign(WszCreateFileMapping(hFile, NULL, PAGE_READONLY, 0, 0, NULL));
+ if (m_FileMap == NULL)
+ ThrowLastError();
+
+ m_FileView.Assign(CLRMapViewOfFile(m_FileMap, FILE_MAP_READ, 0, 0, 0));
+ if (m_FileView == NULL)
+ ThrowLastError();
+ }
+ TESTHOOKCALL(ImageMapped(GetPath(),m_FileView,IM_FLAT));
+ Init(m_FileView, size);
+}
+
+#ifdef FEATURE_FUSION
+StreamImageLayout::StreamImageLayout(IStream* pIStream,PEImage* pOwner)
+{
+ CONTRACTL
+ {
+ CONSTRUCTOR_CHECK;
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+
+ m_Layout=LAYOUT_FLAT;
+ m_pOwner=pOwner;
+
+ PEFingerprintVerificationHolder verifyHolder(pOwner); // Do not remove: This holder ensures the IL file hasn't changed since the runtime started making assumptions about it.
+
+#ifndef FEATURE_CORECLR
+ ETWLoaderMappingPhaseHolder loaderMappingPhaseHolder;
+ if (ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, TRACE_LEVEL_INFORMATION, CLR_PRIVATEBINDING_KEYWORD)) {
+ loaderMappingPhaseHolder.Init(GetAppDomain() ? GetAppDomain()->GetId().m_dwId : ETWAppDomainIdNotAvailable, GetPath());
+ }
+#endif // FEATURE_CORECLR
+
+ STATSTG statStg;
+ IfFailThrow(pIStream->Stat(&statStg, STATFLAG_NONAME));
+ if (statStg.cbSize.u.HighPart > 0)
+ ThrowHR(COR_E_FILELOAD);
+
+ DWORD cbRead = 0;
+
+ // Resources files may have zero length (and would be mapped as FLAT)
+ if (statStg.cbSize.u.LowPart) {
+ m_FileMap.Assign(WszCreateFileMapping(INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE, 0,
+ statStg.cbSize.u.LowPart, NULL));
+ if (m_FileMap == NULL)
+ ThrowWin32(GetLastError());
+
+ m_FileView.Assign(CLRMapViewOfFile(m_FileMap, FILE_MAP_ALL_ACCESS, 0, 0, 0));
+
+ if (m_FileView == NULL)
+ ThrowWin32(GetLastError());
+
+ HRESULT hr = pIStream->Read(m_FileView, statStg.cbSize.u.LowPart, &cbRead);
+ if (hr == S_FALSE)
+ hr = COR_E_FILELOAD;
+
+ IfFailThrow(hr);
+ }
+ TESTHOOKCALL(ImageMapped(GetPath(),m_FileView,IM_FLAT));
+ Init(m_FileView,(COUNT_T)cbRead);
+}
+#endif // FEATURE_FUSION
+
+#ifdef MDIL
+BOOL PEImageLayout::GetILSizeFromMDILCLRCtlData(DWORD* pdwActualILSize)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ IMAGE_SECTION_HEADER* pMDILSection = FindSection(".mdil");
+ if (pMDILSection)
+ {
+ TADDR pMDILSectionStart = GetRvaData(VAL32(pMDILSection->VirtualAddress));
+ MDILHeader* mdilHeader = (MDILHeader*)pMDILSectionStart;
+ ClrCtlData* pClrCtlData = (ClrCtlData*)(pMDILSectionStart + mdilHeader->hdrSize);
+ *pdwActualILSize = pClrCtlData->ilImageSize;
+ return TRUE;
+ }
+ return FALSE;
+}
+#endif // MDIL
+
+#endif // !DACESS_COMPILE
+
+
+
+#ifdef DACCESS_COMPILE
+void
+PEImageLayout::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ WRAPPER_NO_CONTRACT;
+ DAC_ENUM_VTHIS();
+ EMEM_OUT(("MEM: %p PEFile\n", dac_cast<TADDR>(this)));
+ PEDecoder::EnumMemoryRegions(flags,false);
+}
+#endif //DACCESS_COMPILE
+
+#if defined(_WIN64) && !defined(DACCESS_COMPILE)
+
+#define IMAGE_HEADER_3264_SIZE_DIFF (sizeof(IMAGE_NT_HEADERS64) - sizeof(IMAGE_NT_HEADERS32))
+
+// This function is expected to be in sync with LdrpCorFixupImage in the OS loader implementation (//depot/winmain/minkernel/ntdll/ldrcor.c).
+bool PEImageLayout::ConvertILOnlyPE32ToPE64Worker()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(IsILOnly()); // This should be called for IL-Only images
+ PRECONDITION(Has32BitNTHeaders()); // // Image should be marked to have a PE32 header only.
+ PRECONDITION(IsPlatformNeutral());
+ }
+ CONTRACTL_END;
+
+ PBYTE pImage = (PBYTE)GetBase();
+
+ IMAGE_DOS_HEADER *pDosHeader = (IMAGE_DOS_HEADER*)pImage;
+ IMAGE_NT_HEADERS32 *pHeader32 = GetNTHeaders32();
+ IMAGE_NT_HEADERS64 *pHeader64 = GetNTHeaders64();
+
+ _ASSERTE(&pHeader32->OptionalHeader.Magic == &pHeader64->OptionalHeader.Magic);
+ _ASSERTE(pHeader32->OptionalHeader.Magic == VAL16(IMAGE_NT_OPTIONAL_HDR32_MAGIC));
+
+ // Move the data directory and section headers down IMAGE_HEADER_3264_SIZE_DIFF bytes.
+ PBYTE pStart32 = (PBYTE) &pHeader32->OptionalHeader.DataDirectory[0];
+ PBYTE pStart64 = (PBYTE) &pHeader64->OptionalHeader.DataDirectory[0];
+ _ASSERTE(pStart64 - pStart32 == IMAGE_HEADER_3264_SIZE_DIFF);
+
+ PBYTE pEnd32 = (PBYTE) (IMAGE_FIRST_SECTION(pHeader32)
+ + VAL16(pHeader32->FileHeader.NumberOfSections));
+
+ // On AMD64, used for a 12-byte jump thunk + the original entry point offset.
+ if (((pEnd32 + IMAGE_HEADER_3264_SIZE_DIFF /* delta in headers to compute end of 64bit header */) - pImage) > OS_PAGE_SIZE ) {
+ // This should never happen. An IL_ONLY image should at most 3 sections.
+ _ASSERTE(!"ConvertILOnlyPE32ToPE64Worker: Insufficient room to rewrite headers as PE64");
+ return false;
+ }
+
+ memmove(pStart64, pStart32, pEnd32 - pStart32);
+
+ // Move the tail fields in reverse order.
+ pHeader64->OptionalHeader.NumberOfRvaAndSizes = pHeader32->OptionalHeader.NumberOfRvaAndSizes;
+ pHeader64->OptionalHeader.LoaderFlags = pHeader32->OptionalHeader.LoaderFlags;
+ pHeader64->OptionalHeader.SizeOfHeapCommit = VAL64(VAL32(pHeader32->OptionalHeader.SizeOfHeapCommit));
+ pHeader64->OptionalHeader.SizeOfHeapReserve = VAL64(VAL32(pHeader32->OptionalHeader.SizeOfHeapReserve));
+ pHeader64->OptionalHeader.SizeOfStackCommit = VAL64(VAL32(pHeader32->OptionalHeader.SizeOfStackCommit));
+ pHeader64->OptionalHeader.SizeOfStackReserve = VAL64(VAL32(pHeader32->OptionalHeader.SizeOfStackReserve));
+
+ // One more field that's not the same
+ pHeader64->OptionalHeader.ImageBase = VAL64(VAL32(pHeader32->OptionalHeader.ImageBase));
+
+ // The optional header changed size.
+ pHeader64->FileHeader.SizeOfOptionalHeader = VAL16(VAL16(pHeader64->FileHeader.SizeOfOptionalHeader) + 16);
+ pHeader64->OptionalHeader.Magic = VAL16(IMAGE_NT_OPTIONAL_HDR64_MAGIC);
+
+ // Now we just have to make a new 16-byte PPLABEL_DESCRIPTOR for the new entry point address & gp
+ PBYTE pEnd64 = (PBYTE) (IMAGE_FIRST_SECTION(pHeader64) + VAL16(pHeader64->FileHeader.NumberOfSections));
+ pHeader64->OptionalHeader.AddressOfEntryPoint = VAL32((ULONG) (pEnd64 - pImage));
+
+ // Should be PE32+ now
+ _ASSERTE(!Has32BitNTHeaders());
+
+ return true;
+}
+
+bool PEImageLayout::ConvertILOnlyPE32ToPE64()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(IsILOnly()); // This should be called for IL-Only images
+ PRECONDITION(Has32BitNTHeaders());
+ }
+ CONTRACTL_END;
+
+ bool fConvertedToPE64 = false;
+
+ // Only handle platform neutral IL assemblies
+ if (!IsPlatformNeutral())
+ {
+ return false;
+ }
+
+ PBYTE pageBase = (PBYTE)GetBase();
+ DWORD oldProtect;
+
+ if (!ClrVirtualProtect(pageBase, OS_PAGE_SIZE, PAGE_READWRITE, &oldProtect))
+ {
+ // We are not going to be able to update header.
+ return false;
+ }
+
+ fConvertedToPE64 = ConvertILOnlyPE32ToPE64Worker();
+
+ DWORD ignore;
+ if (!ClrVirtualProtect(pageBase, OS_PAGE_SIZE, oldProtect, &ignore))
+ {
+ // This is not so bad; just ignore it
+ }
+
+ return fConvertedToPE64;
+}
+#endif // defined(_WIN64) && !defined(DACCESS_COMPILE)
diff --git a/src/vm/peimagelayout.h b/src/vm/peimagelayout.h
new file mode 100644
index 0000000000..51830f8f91
--- /dev/null
+++ b/src/vm/peimagelayout.h
@@ -0,0 +1,200 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// --------------------------------------------------------------------------------
+// PEImageLayout.h
+//
+
+// --------------------------------------------------------------------------------
+
+
+#ifndef PEIMAGELAYOUT_H_
+#define PEIMAGELAYOUT_H_
+
+// --------------------------------------------------------------------------------
+// Required headers
+// --------------------------------------------------------------------------------
+
+#include "clrtypes.h"
+#include "pedecoder.h"
+#include "holder.h"
+
+// --------------------------------------------------------------------------------
+// Forward declarations
+// --------------------------------------------------------------------------------
+
+class Crst;
+class PEImage;
+
+
+typedef VPTR(class PEImageLayout) PTR_PEImageLayout;
+
+class PEImageLayout : public PEDecoder
+{
+ VPTR_BASE_CONCRETE_VTABLE_CLASS(PEImageLayout)
+ friend class PEModule;
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ friend class CCLRDebugManager;
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+public:
+ // ------------------------------------------------------------
+ // Public constants
+ // ------------------------------------------------------------
+ enum
+ {
+ LAYOUT_MAPPED =1,
+ LAYOUT_FLAT =2,
+ LAYOUT_LOADED =4,
+ LAYOUT_LOADED_FOR_INTROSPECTION =8,
+ LAYOUT_ANY =0xf
+ };
+
+
+public:
+#ifndef DACCESS_COMPILE
+ static PEImageLayout* CreateFlat(const void *flat, COUNT_T size,PEImage* pOwner);
+ static PEImageLayout* CreateFromStream(IStream* pIStream, PEImage* pOwner);
+ static PEImageLayout* CreateFromHMODULE(HMODULE mappedbase,PEImage* pOwner, BOOL bTakeOwnership);
+ static PEImageLayout* LoadFromFlat(PEImageLayout* pflatimage);
+ static PEImageLayout* Load(PEImage* pOwner, BOOL bNTSafeLoad, BOOL bThrowOnError = TRUE);
+ static PEImageLayout* LoadFlat(HANDLE hFile, PEImage* pOwner);
+ static PEImageLayout* Map (HANDLE hFile, PEImage* pOwner);
+#endif
+ PEImageLayout();
+ virtual ~PEImageLayout();
+ static void Startup();
+ static CHECK CheckStartup();
+ static BOOL CompareBase(UPTR path, UPTR mapping);
+
+ // Refcount above images.
+ void AddRef();
+ ULONG Release();
+ const SString& GetPath();
+#ifdef MDIL
+ BOOL GetILSizeFromMDILCLRCtlData(DWORD* pdwActualILSize);
+#endif // MDIL
+
+#ifdef FEATURE_PREJIT
+ void ApplyBaseRelocations();
+#endif
+
+public:
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+#if defined(_WIN64) && !defined(DACCESS_COMPILE)
+ bool ConvertILOnlyPE32ToPE64();
+private:
+ bool ConvertILOnlyPE32ToPE64Worker();
+#endif // defined(_WIN64) && !defined(DACCESS_COMPILE)
+
+private:
+ Volatile<LONG> m_refCount;
+public:
+ PEImage* m_pOwner;
+ DWORD m_Layout;
+};
+
+typedef ReleaseHolder<PEImageLayout> PEImageLayoutHolder;
+
+
+//RawImageView is built on external data, does not need cleanup
+class RawImageLayout: public PEImageLayout
+{
+ VPTR_VTABLE_CLASS(RawImageLayout,PEImageLayout)
+protected:
+ CLRMapViewHolder m_DataCopy;
+#ifndef FEATURE_PAL
+ HModuleHolder m_LibraryHolder;
+#endif // !FEATURE_PAL
+
+public:
+ RawImageLayout(const void *flat, COUNT_T size,PEImage* pOwner);
+ RawImageLayout(const void *mapped, PEImage* pOwner, BOOL bTakeOwnerShip, BOOL bFixedUp);
+};
+
+// ConvertedImageView is for the case when we manually layout a flat image
+class ConvertedImageLayout: public PEImageLayout
+{
+ VPTR_VTABLE_CLASS(ConvertedImageLayout,PEImageLayout)
+protected:
+ HandleHolder m_FileMap;
+ CLRMapViewHolder m_FileView;
+public:
+#ifndef DACCESS_COMPILE
+ ConvertedImageLayout(PEImageLayout* source);
+#endif
+};
+
+class MappedImageLayout: public PEImageLayout
+{
+ VPTR_VTABLE_CLASS(MappedImageLayout,PEImageLayout)
+ VPTR_UNIQUE(0x15)
+protected:
+ HandleHolder m_FileMap;
+ CLRMapViewHolder m_FileView;
+public:
+#ifndef DACCESS_COMPILE
+ MappedImageLayout(HANDLE hFile, PEImage* pOwner);
+#endif
+};
+
+#if !defined(CROSSGEN_COMPILE) && !defined(FEATURE_PAL)
+class LoadedImageLayout: public PEImageLayout
+{
+ VPTR_VTABLE_CLASS(LoadedImageLayout,PEImageLayout)
+protected:
+ HINSTANCE m_Module;
+public:
+#ifndef DACCESS_COMPILE
+ LoadedImageLayout(PEImage* pOwner, BOOL bNTSafeLoad, BOOL bThrowOnError);
+ ~LoadedImageLayout()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ if (m_Module)
+ CLRFreeLibrary(m_Module);
+ }
+#endif // !DACCESS_COMPILE
+};
+#endif // !CROSSGEN_COMPILE && !FEATURE_PAL
+
+class FlatImageLayout: public PEImageLayout
+{
+ VPTR_VTABLE_CLASS(FlatImageLayout,PEImageLayout)
+ VPTR_UNIQUE(0x59)
+protected:
+ HandleHolder m_FileMap;
+ CLRMapViewHolder m_FileView;
+public:
+#ifndef DACCESS_COMPILE
+ FlatImageLayout(HANDLE hFile, PEImage* pOwner);
+#endif
+
+};
+
+#ifdef FEATURE_FUSION
+class StreamImageLayout: public PEImageLayout
+{
+ VPTR_VTABLE_CLASS(StreamImageLayout,PEImageLayout)
+ VPTR_UNIQUE(0x71)
+protected:
+ HandleHolder m_FileMap;
+ CLRMapViewHolder m_FileView;
+public:
+#ifndef DACCESS_COMPILE
+ StreamImageLayout(IStream* pIStream,PEImage* pOwner);
+#endif
+};
+#endif // FEATURE_FUSION
+
+
+#endif // PEIMAGELAYOUT_H_
+
diff --git a/src/vm/peimagelayout.inl b/src/vm/peimagelayout.inl
new file mode 100644
index 0000000000..2450d3f43a
--- /dev/null
+++ b/src/vm/peimagelayout.inl
@@ -0,0 +1,121 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//
+
+#ifndef PEIMAGEVIEW_INL_
+#define PEIMAGEVIEW_INL_
+
+#include "util.hpp"
+#include "peimage.h"
+
+inline const SString &PEImageLayout::GetPath()
+{
+ return m_pOwner?m_pOwner->GetPath():SString::Empty();
+}
+
+inline void PEImageLayout::AddRef()
+{
+ CONTRACT_VOID
+ {
+ PRECONDITION(m_refCount>0 && m_refCount < COUNT_T_MAX);
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACT_END;
+
+ FastInterlockIncrement(&m_refCount);
+
+ RETURN;
+}
+
+inline ULONG PEImageLayout::Release()
+{
+ CONTRACTL
+ {
+ DESTRUCTOR_CHECK;
+ NOTHROW;
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+#ifdef DACCESS_COMPILE
+ // when DAC accesses layouts via PEImage it does not addref
+ if (m_pOwner)
+ return m_refCount;
+#endif
+
+ ULONG result=FastInterlockDecrement(&m_refCount);
+ if (result == 0 )
+ {
+ delete this;
+ }
+ return result;
+}
+
+
+inline PEImageLayout::~PEImageLayout()
+{
+ CONTRACTL
+ {
+ DESTRUCTOR_CHECK;
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+}
+
+inline PEImageLayout::PEImageLayout()
+ : m_refCount(1)
+ , m_pOwner(NULL)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+inline void PEImageLayout::Startup()
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckStartup());
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACT_END;
+
+ if (CheckStartup())
+ RETURN;
+
+ RETURN;
+}
+
+inline CHECK PEImageLayout::CheckStartup()
+{
+ WRAPPER_NO_CONTRACT;
+ CHECK_OK;
+}
+
+inline BOOL PEImageLayout::CompareBase(UPTR base, UPTR mapping)
+{
+ CONTRACTL
+ {
+ PRECONDITION(CheckPointer((PEImageLayout *)mapping));
+ PRECONDITION(CheckPointer((PEImageLayout *)(base<<1),NULL_OK));
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ if (base==NULL) //we were searching for 'Any'
+ return TRUE;
+ return ((PEImageLayout*)mapping)->GetBase()==((PEImageLayout*)(base<<1))->GetBase();
+
+}
+
+#endif //PEIMAGEVIEW_INL_
diff --git a/src/vm/pendingload.cpp b/src/vm/pendingload.cpp
new file mode 100644
index 0000000000..77f441aa9f
--- /dev/null
+++ b/src/vm/pendingload.cpp
@@ -0,0 +1,255 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: pendingload.cpp
+//
+
+//
+
+#include "common.h"
+#include "excep.h"
+#include "pendingload.h"
+
+#ifndef DACCESS_COMPILE
+
+
+// ============================================================================
+// Pending type load hash table methods
+// ============================================================================
+/*static */ PendingTypeLoadTable* PendingTypeLoadTable::Create(LoaderHeap *pHeap,
+ DWORD dwNumBuckets,
+ AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pHeap));
+ }
+ CONTRACTL_END;
+
+ size_t size = sizeof(PendingTypeLoadTable);
+ BYTE * pMem;
+ PendingTypeLoadTable * pThis;
+
+ _ASSERT( dwNumBuckets >= 0 );
+ S_SIZE_T allocSize = S_SIZE_T( dwNumBuckets )
+ * S_SIZE_T( sizeof(PendingTypeLoadTable::TableEntry*) )
+ + S_SIZE_T( size );
+ if( allocSize.IsOverflow() )
+ {
+ ThrowHR(E_INVALIDARG);
+ }
+ pMem = (BYTE *) pamTracker->Track(pHeap->AllocMem( allocSize ));
+
+ pThis = (PendingTypeLoadTable *) pMem;
+
+#ifdef _DEBUG
+ pThis->m_dwDebugMemory = (DWORD)(size + dwNumBuckets*sizeof(PendingTypeLoadTable::TableEntry*));
+#endif
+
+ pThis->m_dwNumBuckets = dwNumBuckets;
+ pThis->m_pBuckets = (PendingTypeLoadTable::TableEntry**) (pMem + size);
+
+ // Don't need to memset() since this was ClrVirtualAlloc()'d memory
+ // memset(pThis->m_pBuckets, 0, dwNumBuckets*sizeof(PendingTypeLoadTable::TableEntry*));
+
+ return pThis;
+}
+
+
+
+PendingTypeLoadTable::TableEntry *PendingTypeLoadTable::AllocNewEntry()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT( return NULL; );
+ }
+ CONTRACTL_END
+
+#ifdef _DEBUG
+ m_dwDebugMemory += (DWORD) (sizeof(PendingTypeLoadTable::TableEntry));
+#endif
+
+ return (PendingTypeLoadTable::TableEntry *) new (nothrow) BYTE[sizeof(PendingTypeLoadTable::TableEntry)];
+}
+
+
+void PendingTypeLoadTable::FreeEntry(PendingTypeLoadTable::TableEntry * pEntry)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ // keep in sync with the allocator used in AllocNewEntry
+ delete[] ((BYTE*)pEntry);
+
+#ifdef _DEBUG
+ m_dwDebugMemory -= (DWORD) (sizeof(PendingTypeLoadTable::TableEntry));
+#endif
+}
+
+
+//
+// Does not handle duplicates!
+//
+BOOL PendingTypeLoadTable::InsertValue(PendingTypeLoadEntry *pData)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT( return FALSE; );
+ PRECONDITION(CheckPointer(pData));
+ PRECONDITION(FindItem(&pData->GetTypeKey()) == NULL);
+ }
+ CONTRACTL_END
+
+ _ASSERTE(m_dwNumBuckets != 0);
+
+ DWORD dwHash = pData->GetTypeKey().ComputeHash();
+ DWORD dwBucket = dwHash % m_dwNumBuckets;
+ PendingTypeLoadTable::TableEntry * pNewEntry = AllocNewEntry();
+ if (pNewEntry == NULL)
+ return FALSE;
+
+ // Insert at head of bucket
+ pNewEntry->pNext = m_pBuckets[dwBucket];
+ pNewEntry->pData = pData;
+ pNewEntry->dwHashValue = dwHash;
+
+ m_pBuckets[dwBucket] = pNewEntry;
+
+ return TRUE;
+}
+
+
+BOOL PendingTypeLoadTable::DeleteValue(TypeKey *pKey)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ PRECONDITION(CheckPointer(pKey));
+ }
+ CONTRACTL_END
+
+ _ASSERTE(m_dwNumBuckets != 0);
+
+ DWORD dwHash = pKey->ComputeHash();
+ DWORD dwBucket = dwHash % m_dwNumBuckets;
+ PendingTypeLoadTable::TableEntry * pSearch;
+ PendingTypeLoadTable::TableEntry **ppPrev = &m_pBuckets[dwBucket];
+
+ for (pSearch = m_pBuckets[dwBucket]; pSearch; pSearch = pSearch->pNext)
+ {
+ TypeKey entryTypeKey = pSearch->pData->GetTypeKey();
+ if (pSearch->dwHashValue == dwHash && TypeKey::Equals(pKey, &entryTypeKey))
+ {
+ *ppPrev = pSearch->pNext;
+ FreeEntry(pSearch);
+ return TRUE;
+ }
+
+ ppPrev = &pSearch->pNext;
+ }
+
+ return FALSE;
+}
+
+
+PendingTypeLoadTable::TableEntry *PendingTypeLoadTable::FindItem(TypeKey *pKey)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ PRECONDITION(CheckPointer(pKey));
+ }
+ CONTRACTL_END
+
+ _ASSERTE(m_dwNumBuckets != 0);
+
+
+ DWORD dwHash = pKey->ComputeHash();
+ DWORD dwBucket = dwHash % m_dwNumBuckets;
+ PendingTypeLoadTable::TableEntry * pSearch;
+
+ for (pSearch = m_pBuckets[dwBucket]; pSearch; pSearch = pSearch->pNext)
+ {
+ TypeKey entryTypeKey = pSearch->pData->GetTypeKey();
+ if (pSearch->dwHashValue == dwHash && TypeKey::Equals(pKey, &entryTypeKey))
+ {
+ return pSearch;
+ }
+ }
+
+ return NULL;
+}
+
+
+#ifdef _DEBUG
+void PendingTypeLoadTable::Dump()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ LOG((LF_CLASSLOADER, LL_INFO10000, "PHASEDLOAD: table contains:\n"));
+ for (DWORD i = 0; i < m_dwNumBuckets; i++)
+ {
+ for (TableEntry *pSearch = m_pBuckets[i]; pSearch; pSearch = pSearch->pNext)
+ {
+ SString name;
+ TypeKey entryTypeKey = pSearch->pData->GetTypeKey();
+ TypeString::AppendTypeKeyDebug(name, &entryTypeKey);
+ LOG((LF_CLASSLOADER, LL_INFO10000, " Entry %S with handle %p at level %s\n", name.GetUnicode(), pSearch->pData->m_typeHandle.AsPtr(),
+ pSearch->pData->m_typeHandle.IsNull() ? "not-applicable" : classLoadLevelName[pSearch->pData->m_typeHandle.GetLoadLevel()]));
+ }
+ }
+}
+#endif
+
+PendingTypeLoadEntry* PendingTypeLoadTable::GetValue(TypeKey *pKey)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ PRECONDITION(CheckPointer(pKey));
+ }
+ CONTRACTL_END
+
+ PendingTypeLoadTable::TableEntry *pItem = FindItem(pKey);
+
+ if (pItem != NULL)
+ {
+ return pItem->pData;
+ }
+ else
+ {
+ return NULL;
+ }
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
diff --git a/src/vm/pendingload.h b/src/vm/pendingload.h
new file mode 100644
index 0000000000..389336b8a9
--- /dev/null
+++ b/src/vm/pendingload.h
@@ -0,0 +1,259 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// pendingload.h
+//
+
+//
+
+#ifndef _H_PENDINGLOAD
+#define _H_PENDINGLOAD
+
+#include "crst.h"
+#include "class.h"
+#include "typekey.h"
+#include "typehash.h"
+#include "vars.hpp"
+#include "shash.h"
+#include "typestring.h"
+
+//
+// A temporary structure used when loading and resolving classes
+//
+class PendingTypeLoadEntry
+{
+ friend class ClassLoader; // workaround really need to beef up the API below
+
+public:
+ PendingTypeLoadEntry(TypeKey typeKey, TypeHandle typeHnd)
+ : m_Crst(
+ CrstPendingTypeLoadEntry,
+ CrstFlags(CRST_HOST_BREAKABLE|CRST_UNSAFE_SAMELEVEL)
+ ),
+ m_typeKey(typeKey)
+
+ {
+ WRAPPER_NO_CONTRACT;
+
+ m_typeHandle = typeHnd;
+ m_dwWaitCount = 1;
+ m_hrResult = S_OK;
+ m_pException = NULL;
+#ifdef _DEBUG
+ if (LoggingOn(LF_CLASSLOADER, LL_INFO10000))
+ {
+ SString name;
+ TypeString::AppendTypeKeyDebug(name, &m_typeKey);
+ LOG((LF_CLASSLOADER, LL_INFO10000, "PHASEDLOAD: Creating loading entry for type %S\n", name.GetUnicode()));
+ }
+#endif
+
+ m_fLockAcquired = TRUE;
+
+ //---------------------------------------------------------------------------
+ // The PendingTypeLoadEntry() lock has a higher level than UnresolvedClassLock.
+ // But whenever we create one, we have to acquire it while holding the UnresolvedClassLock.
+ // This is safe since we're the ones that created the lock and are guaranteed to acquire
+ // it without blocking. But to prevent the crstlevel system from asserting, we
+ // must acquire using a special method.
+ //---------------------------------------------------------------------------
+ m_Crst.Enter(INDEBUG(Crst::CRST_NO_LEVEL_CHECK));
+ }
+
+ ~PendingTypeLoadEntry()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_fLockAcquired)
+ m_Crst.Leave();
+
+ if (m_pException && !m_pException->IsPreallocatedException()) {
+ delete m_pException;
+ }
+ }
+
+#ifdef _DEBUG
+ BOOL HasLock()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_Crst.OwnedByCurrentThread();
+ }
+#endif
+
+#ifndef DACCESS_COMPILE
+ VOID DECLSPEC_NORETURN ThrowException()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (m_pException)
+ PAL_CPP_THROW(Exception *, m_pException->Clone());
+
+ _ASSERTE(FAILED(m_hrResult));
+
+ if (m_hrResult == COR_E_TYPELOAD)
+ {
+ TypeKey typeKey = GetTypeKey();
+ ClassLoader::ThrowTypeLoadException(&typeKey,
+ IDS_CLASSLOAD_GENERAL);
+
+ }
+ else
+ EX_THROW(EEMessageException, (m_hrResult));
+ }
+
+ void SetException(Exception *pException)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ PRECONDITION(HasLock());
+ PRECONDITION(m_pException == NULL);
+ PRECONDITION(m_dwWaitCount > 0);
+ }
+ CONTRACTL_END;
+
+ m_typeHandle = TypeHandle();
+ m_hrResult = COR_E_TYPELOAD;
+
+ // we don't care if this fails
+ // we already know the HRESULT so if we can't store
+ // the details - so be it
+ EX_TRY
+ {
+ FAULT_NOT_FATAL();
+ m_pException = pException->Clone();
+ }
+ EX_CATCH
+ {
+ m_pException=NULL;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ _ASSERTE(m_fLockAcquired);
+ m_Crst.Leave();
+ m_fLockAcquired = FALSE;
+ }
+
+ void SetResult(TypeHandle typeHnd)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ PRECONDITION(HasLock());
+ PRECONDITION(m_pException == NULL);
+ PRECONDITION(m_dwWaitCount > 0);
+ }
+ CONTRACTL_END;
+
+ m_typeHandle = typeHnd;
+
+ _ASSERTE(m_fLockAcquired);
+ m_Crst.Leave();
+ m_fLockAcquired = FALSE;
+ }
+#endif //DACCESS_COMPILE
+
+ TypeKey GetTypeKey()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_typeKey;
+ }
+
+ void AddRef()
+ {
+ LIMITED_METHOD_CONTRACT;
+ InterlockedIncrement(&m_dwWaitCount);
+ }
+
+ void Release()
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (InterlockedDecrement(&m_dwWaitCount) == 0)
+ delete this;
+ }
+
+ BOOL HasWaiters()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwWaitCount > 1;
+ }
+
+ private:
+ Crst m_Crst;
+
+ public:
+ // Result of loading; this is first created in the CREATE stage of class loading
+ TypeHandle m_typeHandle;
+
+ private:
+ // Type that we're loading
+ TypeKey m_typeKey;
+
+ // Number of threads waiting for this type
+ LONG m_dwWaitCount;
+
+ // Error result, propagated to all threads loading this class
+ HRESULT m_hrResult;
+
+ // Exception object to throw
+ Exception *m_pException;
+
+ // m_Crst was acquired
+ BOOL m_fLockAcquired;
+};
+
+// Hash table used to hold pending type loads
+// @todo : use shash.h when Rotor build problems are fixed and it supports LoaderHeap/Alloc\MemTracker
+class PendingTypeLoadTable
+{
+protected:
+ struct TableEntry
+ {
+ TableEntry* pNext;
+ DWORD dwHashValue;
+ PendingTypeLoadEntry* pData;
+ };
+
+ TableEntry **m_pBuckets; // Pointer to first entry for each bucket
+ DWORD m_dwNumBuckets;
+
+public:
+
+#ifdef _DEBUG
+ DWORD m_dwDebugMemory;
+#endif
+
+ static PendingTypeLoadTable *Create(LoaderHeap *pHeap, DWORD dwNumBuckets, AllocMemTracker *pamTracker);
+
+private:
+ // These functions don't actually exist - declared private to prevent bypassing PendingTypeLoadTable::Create
+ void * operator new(size_t size);
+ void operator delete(void *p);
+
+ PendingTypeLoadTable();
+ ~PendingTypeLoadTable();
+
+public:
+ BOOL InsertValue(PendingTypeLoadEntry* pEntry);
+ BOOL DeleteValue(TypeKey *pKey);
+ PendingTypeLoadEntry* GetValue(TypeKey *pKey);
+ TableEntry* AllocNewEntry();
+ void FreeEntry(TableEntry* pEntry);
+#ifdef _DEBUG
+ void Dump();
+#endif
+
+private:
+ TableEntry* FindItem(TypeKey *pKey);
+};
+
+
+#endif // _H_PENDINGLOAD
diff --git a/src/vm/perfdefaults.cpp b/src/vm/perfdefaults.cpp
new file mode 100644
index 0000000000..ee16e2df06
--- /dev/null
+++ b/src/vm/perfdefaults.cpp
@@ -0,0 +1,148 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*************************************************************************************************
+// PerfDefaults.cpp
+//
+
+//
+// Implementation of the "PerformanceScenario" config option, which defines a workload-specific
+// set of performance defaults. The key point of the code below is that it be clear exactly what
+// gets enabled for each scenario -- see the switch statements
+//
+// for host STARTUP_FLAGS, in GetModifiedStartupFlags
+// for environment, registry, or config CLRConfig values, in LookupConfigValue
+// for other global initialization, in InitializeForScenario
+//
+//*************************************************************************************************
+
+#include "common.h"
+#include "perfdefaults.h"
+
+// Useful to the readability of lists of settings below
+#define MATCHES(a,b) (SString::_wcsicmp((a),(b)) == 0)
+
+// The scenario we have been asked to run under
+PerformanceDefaults::PerformanceScenario PerformanceDefaults::s_Scenario = Uninitialized;
+
+// See use in code:PerformanceDefaults:InitializeForScenario
+extern LONG g_bLowMemoryFromHost;
+
+
+//
+// Initialize our system to provide performance defaults for a given scenario.
+// If the scenario name is not recognized, intentionally ignore it and operate as if not specified.
+//
+ void PerformanceDefaults::InitializeForScenario(__in_opt LPWSTR scenarioName)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // First convert the scenario name to the corresponding enum value
+ s_Scenario = None;
+ if (scenarioName != NULL)
+ {
+ if (MATCHES(scenarioName, W("HighDensityWebHosting"))) s_Scenario = HighDensityWebHosting;
+ }
+
+ // Next do any scenario-specific initialization
+ switch (s_Scenario)
+ {
+ case None:
+ break;
+
+ case HighDensityWebHosting:
+ // Tell the hosting API that we want the GC to operate as if the machine is under memory pressure.
+ // This is a workaround because we do not want to expose "force low memory mode" as either a CLR
+ // config option or a host startup flag (the two types of knob that PerformanceDefaults can alter) and
+ // ASP.Net has not yet become a memory host. When they do, this can be removed and they can
+ // make an initial call to ICLRMemoryNotificationCallback:OnMemoryNotification(eMemoryAvailableLow).
+ //
+ // Note that in order for ASP.Net to become a memory host the CLR will need to support profiler attach
+ // in that condition.
+ g_bLowMemoryFromHost = 1;
+ break;
+
+ case Uninitialized:
+ // Unreachable, but make GCC happy
+ break;
+ }
+
+ // Finally register our lookup function with the CLRConfig system so we get called when a 'MayHavePerformanceDefault'
+ // config option is about to resolve to its runtime default
+ if (s_Scenario != None)
+ {
+ CLRConfig::RegisterGetPerformanceDefaultValueCallback(&LookupConfigValue);
+ }
+}
+
+
+//
+// Called at runtime startup to allow host-specified STARTUP_FLAGS to be overridden when running
+// under a scenario.
+//
+// Note that we are comfortable overriding the values the host has asked us to use (see
+// file:PerfDefaults.h), but we never want to override a value specified by a user to CLRConfig.
+// This comes up here because there are settings that are configurable through both systems.
+// So where an option has both STARTUP_FLAG A and CLRConfig value B, first check whether B has
+// been specified before altering the value of A. This way, we maintain complete compatibility
+// with whatever the interaction of A and B has produced in the past.
+//
+STARTUP_FLAGS PerformanceDefaults::GetModifiedStartupFlags(STARTUP_FLAGS originalFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DWORD newFlags = (DWORD)originalFlags;
+
+ switch (s_Scenario)
+ {
+ case None:
+ break;
+
+ case HighDensityWebHosting:
+ if (!CLRConfig::IsConfigOptionSpecified(W("gcServer"))) newFlags &= ~STARTUP_SERVER_GC;
+ if (!CLRConfig::IsConfigOptionSpecified(W("gcConcurrent"))) newFlags &= ~STARTUP_CONCURRENT_GC;
+ if (!CLRConfig::IsConfigOptionSpecified(W("gcTrimCommitOnLowMemory"))) newFlags |= STARTUP_TRIM_GC_COMMIT;
+
+ break;
+
+ case Uninitialized:
+ // Check that no request for startup flags happens before our code has been initialized
+ // and given a chance to modify them.
+ _ASSERTE(!"PerformanceDefaults::InitializeForScenario should have already been called");
+ break;
+ }
+
+ return (STARTUP_FLAGS)newFlags;
+}
+
+
+//
+// Called by the CLRConfig system whenever a 'MayHavePerformanceDefault' config option is about
+// to resolve to its runtime default (i.e. none of the corresponding environment variable, registry, or
+// config file values were specified).
+//
+BOOL PerformanceDefaults::LookupConfigValue(LPCWSTR name, DWORD *pValue)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(pValue != NULL);
+
+ switch (s_Scenario)
+ {
+ case None:
+ return FALSE;
+
+ case HighDensityWebHosting:
+ if (MATCHES(name, W("shadowCopyVerifyByTimestamp"))) { *pValue = 1; return TRUE; }
+ return FALSE;
+
+ case Uninitialized:
+ // Check that no request for a MayHavePerformanceDefault CLRConfig option happens
+ // before our code has been initialized and given a chance to provide a default value.
+ _ASSERTE(!"PerformanceDefaults::InitializeForScenario should have already been called");
+ break;
+ }
+
+ return FALSE;
+}
+
diff --git a/src/vm/perfdefaults.h b/src/vm/perfdefaults.h
new file mode 100644
index 0000000000..c040cf69f4
--- /dev/null
+++ b/src/vm/perfdefaults.h
@@ -0,0 +1,91 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// --------------------------------------------------------------------------------------------------
+// PerfDefaults.h
+//
+
+//
+// Implementation of the "PerformanceScenario" config option, which defines a workload-specific
+// set of performance defaults.
+//
+// The motivation is that every release we work closely with a partner to understand the performance
+// of their scenario and provide CLR behavior to improve it. Sometimes we are able to give them
+// an entirely different build of the CLR (because it happens to be on a different architecture, or
+// it needs to have a small size, etc), but not always. Sometimes we are able to change the
+// runtime to have the new behavior by default, but not always. When neither applies, we fall to
+// the ugly option of a set of one-off config values for the scenario (or machine administrators)
+// to opt in. This then creates the problem of a huge number of generic knobs that any application
+// can use in any combination. It also means that as we add additional improvements for the same
+// scenario, we need to ask developers or machine administrators to keep up with the growing list.
+//
+// Our solution is to opt-in to a set of performance settings all at once and based on the workload.
+// When a recognized 'PerformanceScenario' is specified in a config file, we want the runtime to
+// operate with a defined set of performance settings. This set may evolve over time to
+// automatically provide additional behavior to the same workload, just as we would for runtime
+// defaults. Note however that a key design point is that in any conflict between these performance
+// settings and one that is explicitly set by the user, the explicit user setting wins. Hence the
+// name PerformanceDefaults -- it is as if we had created a separate build of the runtime for a
+// particular scenario, changing only the default performance behavior, and continued to let people
+// opt in or opt-out individually to those settings that are important and general-purpose enough
+// to have an individual setting.
+//
+// To add a scenario,
+// Update the PerformanceScenario enum and InitializeForScenario so that it is recognized
+//
+// To add an overridden host STARTUP_FLAG,
+// Update the switch statement in GetModifiedStartupFlags; see note about IsConfigOptionSpecified
+//
+// To add an overridden CLRConfig default value,
+// Mark it with LookupOption CLRConfig::MayHavePerformanceDefault in clrconfigvalues.h
+// Update the switch statement in LookupConfigValue
+// If a new CLRConfig option, decide if it needs to be a general-purpose switch (hopefully no)
+// and if not create it as INTERNAL_
+//
+// --------------------------------------------------------------------------------------------------
+
+
+#ifndef __PerfDefaults_h__
+#define __PerfDefaults_h__
+
+class PerformanceDefaults
+{
+public:
+
+ static void InitializeForScenario(__in_opt LPWSTR scenarioName);
+
+ // Called at runtime startup to allow host-specified STARTUP_FLAGS to be overridden when running
+ // under a scenario.
+ //
+ // Note that this does not follow the model of "only override if not specified." By the nature
+ // of the startup flags being bits, they are all specified (as on or off). We have no way of
+ // knowing which the host actually care about and which were left as the runtime default
+ // out of lack of concern. However we found that so far the specified startup flags are hard
+ // coded by the host (no way for the user to override at run time) and so we can build in
+ // to the set of defaults which the host allows us to override.
+ //
+ // In the future, if a host needs us to be able to override startup flags conditionally, the
+ // solution would be a new hosting API ICLRRuntimeInfo::SetOverridableStartupFlags.
+ static STARTUP_FLAGS GetModifiedStartupFlags(STARTUP_FLAGS originalFlags);
+
+ // Called by the CLRConfig system whenever a 'MayHavePerformanceDefault' config option is about
+ // to resolve to its runtime default (i.e. none of the corresponding environment variable,
+ // registry, or config file values were specified).
+ static BOOL LookupConfigValue(LPCWSTR name, DWORD *pValue);
+
+private:
+
+ enum PerformanceScenario
+ {
+ Uninitialized,
+ None,
+ HighDensityWebHosting,
+ };
+
+ static PerformanceScenario s_Scenario;
+
+};
+
+#endif //__PerfDefaults_h__
diff --git a/src/vm/precode.cpp b/src/vm/precode.cpp
new file mode 100644
index 0000000000..c214c4fea3
--- /dev/null
+++ b/src/vm/precode.cpp
@@ -0,0 +1,795 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// precode.cpp
+//
+
+//
+// Stub that runs before the actual native code
+//
+
+
+#include "common.h"
+
+#ifdef FEATURE_PREJIT
+#include "compile.h"
+#endif
+
+//==========================================================================================
+// class Precode
+//==========================================================================================
+BOOL Precode::IsValidType(PrecodeType t)
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ switch (t) {
+ case PRECODE_STUB:
+#ifdef HAS_NDIRECT_IMPORT_PRECODE
+ case PRECODE_NDIRECT_IMPORT:
+#endif // HAS_NDIRECT_IMPORT_PRECODE
+#ifdef HAS_REMOTING_PRECODE
+ case PRECODE_REMOTING:
+#endif // HAS_REMOTING_PRECODE
+#ifdef HAS_FIXUP_PRECODE
+ case PRECODE_FIXUP:
+#endif // HAS_FIXUP_PRECODE
+#ifdef HAS_THISPTR_RETBUF_PRECODE
+ case PRECODE_THISPTR_RETBUF:
+#endif // HAS_THISPTR_RETBUF_PRECODE
+ return TRUE;
+ default:
+ return FALSE;
+ }
+}
+
+SIZE_T Precode::SizeOf(PrecodeType t)
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ switch (t)
+ {
+ case PRECODE_STUB:
+ return sizeof(StubPrecode);
+#ifdef HAS_NDIRECT_IMPORT_PRECODE
+ case PRECODE_NDIRECT_IMPORT:
+ return sizeof(NDirectImportPrecode);
+#endif // HAS_NDIRECT_IMPORT_PRECODE
+#ifdef HAS_REMOTING_PRECODE
+ case PRECODE_REMOTING:
+ return sizeof(RemotingPrecode);
+#endif // HAS_REMOTING_PRECODE
+#ifdef HAS_FIXUP_PRECODE
+ case PRECODE_FIXUP:
+ return sizeof(FixupPrecode);
+#endif // HAS_FIXUP_PRECODE
+#ifdef HAS_THISPTR_RETBUF_PRECODE
+ case PRECODE_THISPTR_RETBUF:
+ return sizeof(ThisPtrRetBufPrecode);
+#endif // HAS_THISPTR_RETBUF_PRECODE
+
+ default:
+ UnexpectedPrecodeType("Precode::SizeOf", t);
+ break;
+ }
+ return 0;
+}
+
+// Note: This is immediate target of the precode. It does not follow jump stub if there is one.
+PCODE Precode::GetTarget()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ PCODE target = NULL;
+
+ PrecodeType precodeType = GetType();
+ switch (precodeType)
+ {
+ case PRECODE_STUB:
+ target = AsStubPrecode()->GetTarget();
+ break;
+#ifdef HAS_REMOTING_PRECODE
+ case PRECODE_REMOTING:
+ target = AsRemotingPrecode()->GetTarget();
+ break;
+#endif // HAS_REMOTING_PRECODE
+#ifdef HAS_FIXUP_PRECODE
+ case PRECODE_FIXUP:
+ target = AsFixupPrecode()->GetTarget();
+ break;
+#endif // HAS_FIXUP_PRECODE
+#ifdef HAS_THISPTR_RETBUF_PRECODE
+ case PRECODE_THISPTR_RETBUF:
+ target = AsThisPtrRetBufPrecode()->GetTarget();
+ break;
+#endif // HAS_THISPTR_RETBUF_PRECODE
+
+ default:
+ UnexpectedPrecodeType("Precode::GetTarget", precodeType);
+ break;
+ }
+ return target;
+}
+
+MethodDesc* Precode::GetMethodDesc(BOOL fSpeculative /*= FALSE*/)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ TADDR pMD = NULL;
+
+ PrecodeType precodeType = GetType();
+ switch (precodeType)
+ {
+ case PRECODE_STUB:
+ pMD = AsStubPrecode()->GetMethodDesc();
+ break;
+#ifdef HAS_NDIRECT_IMPORT_PRECODE
+ case PRECODE_NDIRECT_IMPORT:
+ pMD = AsNDirectImportPrecode()->GetMethodDesc();
+ break;
+#endif // HAS_NDIRECT_IMPORT_PRECODE
+#ifdef HAS_REMOTING_PRECODE
+ case PRECODE_REMOTING:
+ pMD = AsRemotingPrecode()->GetMethodDesc();
+ break;
+#endif // HAS_REMOTING_PRECODE
+#ifdef HAS_FIXUP_PRECODE
+ case PRECODE_FIXUP:
+ pMD = AsFixupPrecode()->GetMethodDesc();
+ break;
+#endif // HAS_FIXUP_PRECODE
+#ifdef HAS_THISPTR_RETBUF_PRECODE
+ case PRECODE_THISPTR_RETBUF:
+ pMD = AsThisPtrRetBufPrecode()->GetMethodDesc();
+ break;
+#endif // HAS_THISPTR_RETBUF_PRECODE
+
+ default:
+ break;
+ }
+
+ if (pMD == NULL)
+ {
+ if (fSpeculative)
+ return NULL;
+ else
+ UnexpectedPrecodeType("Precode::GetMethodDesc", precodeType);
+ }
+
+ // GetMethodDesc() on platform specific precode types returns TADDR. It should return
+ // PTR_MethodDesc instead. It is a workaround to resolve cyclic dependency between headers.
+ // Once we headers factoring of headers cleaned up, we should be able to get rid of it.
+
+ // For speculative calls, pMD can be garbage that causes IBC logging to crash
+ if (!fSpeculative)
+ g_IBCLogger.LogMethodPrecodeAccess((PTR_MethodDesc)pMD);
+
+ return (PTR_MethodDesc)pMD;
+}
+
+BOOL Precode::IsCorrectMethodDesc(MethodDesc * pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ MethodDesc * pMDfromPrecode = GetMethodDesc(TRUE);
+
+ if (pMDfromPrecode == pMD)
+ return TRUE;
+
+#ifdef HAS_FIXUP_PRECODE_CHUNKS
+ if (pMDfromPrecode == NULL)
+ {
+ PrecodeType precodeType = GetType();
+
+#ifdef HAS_FIXUP_PRECODE_CHUNKS
+ // We do not keep track of the MethodDesc in every kind of fixup precode
+ if (precodeType == PRECODE_FIXUP)
+ return TRUE;
+#endif
+ }
+#endif // HAS_FIXUP_PRECODE_CHUNKS
+
+ return FALSE;
+}
+
+BOOL Precode::IsPointingToPrestub(PCODE target)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (IsPointingTo(target, GetPreStubEntryPoint()))
+ return TRUE;
+
+#ifdef HAS_FIXUP_PRECODE
+ if (IsPointingTo(target, GetEEFuncEntryPoint(PrecodeFixupThunk)))
+ return TRUE;
+#endif
+
+#ifdef FEATURE_PREJIT
+ Module *pZapModule = GetMethodDesc()->GetZapModule();
+ if (pZapModule != NULL)
+ {
+ if (IsPointingTo(target, pZapModule->GetPrestubJumpStub()))
+ return TRUE;
+
+#ifdef HAS_FIXUP_PRECODE
+ if (IsPointingTo(target, pZapModule->GetPrecodeFixupJumpStub()))
+ return TRUE;
+#endif
+ }
+#endif // FEATURE_PREJIT
+
+ return FALSE;
+}
+
+// If addr is patched fixup precode, returns address that it points to. Otherwise returns NULL.
+PCODE Precode::TryToSkipFixupPrecode(PCODE addr)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ PCODE pTarget = NULL;
+
+#if defined(FEATURE_PREJIT) && defined(HAS_FIXUP_PRECODE)
+ // Early out for common cases
+ if (!FixupPrecode::IsFixupPrecodeByASM(addr))
+ return NULL;
+
+ // This optimization makes sense in NGened code only.
+ Module * pModule = ExecutionManager::FindZapModule(addr);
+ if (pModule == NULL)
+ return NULL;
+
+ // Verify that the address is in precode section
+ if (!pModule->IsZappedPrecode(addr))
+ return NULL;
+
+ pTarget = GetPrecodeFromEntryPoint(addr)->GetTarget();
+
+ // Verify that the target is in code section
+ if (!pModule->IsZappedCode(pTarget))
+ return NULL;
+
+#if defined(_DEBUG)
+ MethodDesc * pMD_orig = MethodTable::GetMethodDescForSlotAddress(addr);
+ MethodDesc * pMD_direct = MethodTable::GetMethodDescForSlotAddress(pTarget);
+
+ // Both the original and direct entrypoint should map to same MethodDesc
+ // Some FCalls are remapped to private methods (see System.String.CtorCharArrayStartLength)
+ _ASSERTE((pMD_orig == pMD_direct) || pMD_orig->IsRuntimeSupplied());
+#endif
+
+#endif // defined(FEATURE_PREJIT) && defined(HAS_FIXUP_PRECODE)
+
+ return pTarget;
+}
+
+Precode* Precode::GetPrecodeForTemporaryEntryPoint(TADDR temporaryEntryPoints, int index)
+{
+ WRAPPER_NO_CONTRACT;
+ PrecodeType t = PTR_Precode(temporaryEntryPoints)->GetType();
+#ifdef HAS_FIXUP_PRECODE_CHUNKS
+ if (t == PRECODE_FIXUP)
+ {
+ return PTR_Precode(temporaryEntryPoints + index * sizeof(FixupPrecode));
+ }
+#endif
+ SIZE_T oneSize = SizeOfTemporaryEntryPoint(t);
+ return PTR_Precode(temporaryEntryPoints + index * oneSize);
+}
+
+SIZE_T Precode::SizeOfTemporaryEntryPoints(PrecodeType t, int count)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+#ifdef HAS_FIXUP_PRECODE_CHUNKS
+ if (t == PRECODE_FIXUP)
+ {
+ return count * (sizeof(FixupPrecode) + sizeof(PCODE)) + sizeof(PTR_MethodDesc);
+ }
+#endif
+ SIZE_T oneSize = SizeOfTemporaryEntryPoint(t);
+ return count * oneSize;
+}
+
+#ifndef DACCESS_COMPILE
+
+Precode* Precode::Allocate(PrecodeType t, MethodDesc* pMD,
+ LoaderAllocator * pLoaderAllocator,
+ AllocMemTracker * pamTracker)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SIZE_T size;
+
+#ifdef HAS_FIXUP_PRECODE_CHUNKS
+ if (t == PRECODE_FIXUP)
+ {
+ size = sizeof(FixupPrecode) + sizeof(PTR_MethodDesc);
+ }
+ else
+#endif
+ {
+ size = Precode::SizeOf(t);
+ }
+
+ Precode* pPrecode = (Precode*)pamTracker->Track(pLoaderAllocator->GetPrecodeHeap()->AllocAlignedMem(size, AlignOf(t)));
+ pPrecode->Init(t, pMD, pLoaderAllocator);
+
+#ifndef CROSSGEN_COMPILE
+ ClrFlushInstructionCache(pPrecode, size);
+
+ _ASSERTE(PrecodeStubManager::IsPrecodeByAsm(pPrecode->GetEntryPoint()));
+#endif
+
+ return pPrecode;
+}
+
+void Precode::Init(PrecodeType t, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ switch (t) {
+ case PRECODE_STUB:
+ ((StubPrecode*)this)->Init(pMD, pLoaderAllocator);
+ break;
+#ifdef HAS_NDIRECT_IMPORT_PRECODE
+ case PRECODE_NDIRECT_IMPORT:
+ ((NDirectImportPrecode*)this)->Init(pMD, pLoaderAllocator);
+ break;
+#endif // HAS_NDIRECT_IMPORT_PRECODE
+#ifdef HAS_REMOTING_PRECODE
+ case PRECODE_REMOTING:
+ ((RemotingPrecode*)this)->Init(pMD, pLoaderAllocator);
+ break;
+#endif // HAS_REMOTING_PRECODE
+#ifdef HAS_FIXUP_PRECODE
+ case PRECODE_FIXUP:
+ ((FixupPrecode*)this)->Init(pMD, pLoaderAllocator);
+ break;
+#endif // HAS_FIXUP_PRECODE
+#ifdef HAS_THISPTR_RETBUF_PRECODE
+ case PRECODE_THISPTR_RETBUF:
+ ((ThisPtrRetBufPrecode*)this)->Init(pMD, pLoaderAllocator);
+ break;
+#endif // HAS_THISPTR_RETBUF_PRECODE
+ default:
+ UnexpectedPrecodeType("Precode::Init", t);
+ break;
+ }
+
+ _ASSERTE(IsValidType(GetType()));
+}
+
+BOOL Precode::SetTargetInterlocked(PCODE target)
+{
+ WRAPPER_NO_CONTRACT;
+
+ PCODE expected = GetTarget();
+ BOOL ret = FALSE;
+
+ if (!IsPointingToPrestub(expected))
+ return FALSE;
+
+ g_IBCLogger.LogMethodPrecodeWriteAccess(GetMethodDesc());
+
+ PrecodeType precodeType = GetType();
+ switch (precodeType)
+ {
+ case PRECODE_STUB:
+ ret = AsStubPrecode()->SetTargetInterlocked(target, expected);
+ break;
+
+#ifdef HAS_REMOTING_PRECODE
+ case PRECODE_REMOTING:
+ ret = AsRemotingPrecode()->SetTargetInterlocked(target, expected);
+ break;
+#endif // HAS_REMOTING_PRECODE
+
+#ifdef HAS_FIXUP_PRECODE
+ case PRECODE_FIXUP:
+ ret = AsFixupPrecode()->SetTargetInterlocked(target, expected);
+ break;
+#endif // HAS_FIXUP_PRECODE
+
+#ifdef HAS_THISPTR_RETBUF_PRECODE
+ case PRECODE_THISPTR_RETBUF:
+ ret = AsThisPtrRetBufPrecode()->SetTargetInterlocked(target, expected);
+ break;
+#endif // HAS_THISPTR_RETBUF_PRECODE
+
+ default:
+ UnexpectedPrecodeType("Precode::SetTargetInterlocked", precodeType);
+ break;
+ }
+
+ //
+ // SetTargetInterlocked does not modify code on ARM so the flush instruction cache is
+ // not necessary.
+ //
+#if !defined(_TARGET_ARM_)
+ if (ret) {
+ FlushInstructionCache(GetCurrentProcess(),this,SizeOf());
+ }
+#endif
+
+ _ASSERTE(!IsPointingToPrestub());
+ return ret;
+}
+
+void Precode::Reset()
+{
+ WRAPPER_NO_CONTRACT;
+
+ MethodDesc* pMD = GetMethodDesc();
+ Init(GetType(), pMD, pMD->GetLoaderAllocatorForCode());
+ FlushInstructionCache(GetCurrentProcess(),this,SizeOf());
+}
+
+/* static */
+TADDR Precode::AllocateTemporaryEntryPoints(MethodDescChunk * pChunk,
+ LoaderAllocator * pLoaderAllocator,
+ AllocMemTracker * pamTracker)
+{
+ WRAPPER_NO_CONTRACT;
+
+ MethodDesc* pFirstMD = pChunk->GetFirstMethodDesc();
+
+ int count = pChunk->GetCount();
+
+ PrecodeType t = PRECODE_STUB;
+
+#ifdef HAS_FIXUP_PRECODE
+ // Default to faster fixup precode if possible
+ if (!pFirstMD->RequiresMethodDescCallingConvention(count > 1))
+ {
+ t = PRECODE_FIXUP;
+ }
+#endif // HAS_FIXUP_PRECODE
+
+ SIZE_T totalSize = SizeOfTemporaryEntryPoints(t, count);
+
+#ifdef HAS_COMPACT_ENTRYPOINTS
+ // Note that these are just best guesses to save memory. If we guessed wrong,
+ // we will allocate a new exact type of precode in GetOrCreatePrecode.
+ BOOL fForcedPrecode = pFirstMD->RequiresStableEntryPoint(count > 1);
+ if (!fForcedPrecode && (totalSize > MethodDescChunk::SizeOfCompactEntryPoints(count)))
+ return NULL;
+#endif
+
+ TADDR temporaryEntryPoints = (TADDR)pamTracker->Track(pLoaderAllocator->GetPrecodeHeap()->AllocAlignedMem(totalSize, AlignOf(t)));
+
+#ifdef HAS_FIXUP_PRECODE_CHUNKS
+ if (t == PRECODE_FIXUP)
+ {
+ TADDR entryPoint = temporaryEntryPoints;
+ MethodDesc * pMD = pChunk->GetFirstMethodDesc();
+ for (int i = 0; i < count; i++)
+ {
+ ((FixupPrecode *)entryPoint)->Init(pMD, pLoaderAllocator, pMD->GetMethodDescIndex(), (count - 1) - i);
+
+ _ASSERTE((Precode *)entryPoint == GetPrecodeForTemporaryEntryPoint(temporaryEntryPoints, i));
+ entryPoint += sizeof(FixupPrecode);
+
+ pMD = (MethodDesc *)(dac_cast<TADDR>(pMD) + pMD->SizeOf());
+ }
+
+ ClrFlushInstructionCache((LPVOID)temporaryEntryPoints, count * sizeof(FixupPrecode));
+
+ return temporaryEntryPoints;
+ }
+#endif
+
+ SIZE_T oneSize = SizeOfTemporaryEntryPoint(t);
+ TADDR entryPoint = temporaryEntryPoints;
+ MethodDesc * pMD = pChunk->GetFirstMethodDesc();
+ for (int i = 0; i < count; i++)
+ {
+ ((Precode *)entryPoint)->Init(t, pMD, pLoaderAllocator);
+
+ _ASSERTE((Precode *)entryPoint == GetPrecodeForTemporaryEntryPoint(temporaryEntryPoints, i));
+ entryPoint += oneSize;
+
+ pMD = (MethodDesc *)(dac_cast<TADDR>(pMD) + pMD->SizeOf());
+ }
+
+ ClrFlushInstructionCache((LPVOID)temporaryEntryPoints, count * oneSize);
+
+ return temporaryEntryPoints;
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+
+static DataImage::ItemKind GetPrecodeItemKind(DataImage * image, MethodDesc * pMD, BOOL fIsPrebound = FALSE)
+{
+ STANDARD_VM_CONTRACT;
+
+ DataImage::ItemKind kind = DataImage::ITEM_METHOD_PRECODE_COLD_WRITEABLE;
+
+ DWORD flags = image->GetMethodProfilingFlags(pMD);
+
+ if (flags & (1 << WriteMethodPrecode))
+ {
+ kind = fIsPrebound ? DataImage::ITEM_METHOD_PRECODE_HOT : DataImage::ITEM_METHOD_PRECODE_HOT_WRITEABLE;
+ }
+ else
+ if (flags & (1 << ReadMethodPrecode))
+ {
+ kind = DataImage::ITEM_METHOD_PRECODE_HOT;
+ }
+ else
+ if (
+ fIsPrebound ||
+ // The generic method definitions get precode to make GetMethodDescForSlot work.
+ // This precode should not be ever written to.
+ pMD->ContainsGenericVariables() ||
+ // Interface MDs are run only for remoting and cominterop which is pretty rare. Make them cold.
+ pMD->IsInterface()
+ )
+ {
+ kind = DataImage::ITEM_METHOD_PRECODE_COLD;
+ }
+
+ return kind;
+}
+
+void Precode::Save(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc * pMD = GetMethodDesc();
+ PrecodeType t = GetType();
+
+#ifdef HAS_FIXUP_PRECODE_CHUNKS
+ _ASSERTE(GetType() != PRECODE_FIXUP);
+#endif
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+ // StubPrecode and RemotingPrecode may have straddlers (relocations crossing pages) on x86 and x64. We need
+ // to insert padding to eliminate it. To do that, we need to save these using custom ZapNode that can only
+ // be implemented in dataimage.cpp or zapper due to factoring of the header files.
+ BOOL fIsPrebound = IsPrebound(image);
+ image->SavePrecode(this,
+ pMD,
+ t,
+ GetPrecodeItemKind(image, pMD, fIsPrebound),
+ fIsPrebound);
+#else
+ _ASSERTE(FitsIn<ULONG>(SizeOf(t)));
+ image->StoreStructure((void*)GetStart(),
+ static_cast<ULONG>(SizeOf(t)),
+ GetPrecodeItemKind(image, pMD, IsPrebound(image)),
+ AlignOf(t));
+#endif // _TARGET_X86_ || _TARGET_AMD64_
+}
+
+void Precode::Fixup(DataImage *image, MethodDesc * pMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ PrecodeType precodeType = GetType();
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+#if defined(HAS_FIXUP_PRECODE)
+ if (precodeType == PRECODE_FIXUP)
+ {
+ AsFixupPrecode()->Fixup(image, pMD);
+ }
+#endif
+#else // _TARGET_X86_ || _TARGET_AMD64_
+ ZapNode * pCodeNode = NULL;
+
+ if (IsPrebound(image))
+ {
+ pCodeNode = image->GetCodeAddress(pMD);
+ }
+
+ switch (precodeType) {
+ case PRECODE_STUB:
+ AsStubPrecode()->Fixup(image);
+ break;
+#ifdef HAS_NDIRECT_IMPORT_PRECODE
+ case PRECODE_NDIRECT_IMPORT:
+ AsNDirectImportPrecode()->Fixup(image);
+ break;
+#endif // HAS_NDIRECT_IMPORT_PRECODE
+#ifdef HAS_REMOTING_PRECODE
+ case PRECODE_REMOTING:
+ AsRemotingPrecode()->Fixup(image, pCodeNode);
+ break;
+#endif // HAS_REMOTING_PRECODE
+#ifdef HAS_FIXUP_PRECODE
+ case PRECODE_FIXUP:
+ AsFixupPrecode()->Fixup(image, pMD);
+ break;
+#endif // HAS_FIXUP_PRECODE
+ default:
+ UnexpectedPrecodeType("Precode::Save", precodeType);
+ break;
+ }
+#endif // _TARGET_X86_ || _TARGET_AMD64_
+}
+
+BOOL Precode::IsPrebound(DataImage *image)
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef HAS_REMOTING_PRECODE
+ // This will make sure that when IBC logging is on, the precode goes thru prestub.
+ if (GetAppDomain()->ToCompilationDomain()->m_fForceInstrument)
+ return FALSE;
+
+ if (GetType() != PRECODE_REMOTING)
+ return FALSE;
+
+ // Prebind the remoting precode if possible
+ return image->CanDirectCall(GetMethodDesc(), CORINFO_ACCESS_THIS);
+#else
+ return FALSE;
+#endif
+}
+
+void Precode::SaveChunk::Save(DataImage* image, MethodDesc * pMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ PrecodeType precodeType = pMD->GetPrecodeType();
+
+#ifdef HAS_FIXUP_PRECODE_CHUNKS
+ if (precodeType == PRECODE_FIXUP)
+ {
+ m_rgPendingChunk.Append(pMD);
+ return;
+ }
+#endif // HAS_FIXUP_PRECODE_CHUNKS
+
+ SIZE_T size = Precode::SizeOf(precodeType);
+ Precode* pPrecode = (Precode *)new (image->GetHeap()) BYTE[size];
+ pPrecode->Init(precodeType, pMD, NULL);
+ pPrecode->Save(image);
+
+ // Alias the temporary entrypoint
+ image->RegisterSurrogate(pMD, pPrecode);
+}
+
+#ifdef HAS_FIXUP_PRECODE_CHUNKS
+static void SaveFixupPrecodeChunk(DataImage * image, MethodDesc ** rgMD, COUNT_T count, DataImage::ItemKind kind)
+{
+ STANDARD_VM_CONTRACT;
+
+ ULONG size = sizeof(FixupPrecode) * count + sizeof(PTR_MethodDesc);
+ FixupPrecode * pBase = (FixupPrecode *)new (image->GetHeap()) BYTE[size];
+
+ ZapStoredStructure * pNode = image->StoreStructure(NULL, size, kind,
+ Precode::AlignOf(PRECODE_FIXUP));
+
+ for (COUNT_T i = 0; i < count; i++)
+ {
+ MethodDesc * pMD = rgMD[i];
+ FixupPrecode * pPrecode = pBase + i;
+
+ pPrecode->InitForSave((count - 1) - i);
+
+ image->BindPointer(pPrecode, pNode, i * sizeof(FixupPrecode));
+
+ // Alias the temporary entrypoint
+ image->RegisterSurrogate(pMD, pPrecode);
+ }
+
+ image->CopyData(pNode, pBase, size);
+}
+#endif // HAS_FIXUP_PRECODE_CHUNKS
+
+void Precode::SaveChunk::Flush(DataImage * image)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef HAS_FIXUP_PRECODE_CHUNKS
+ if (m_rgPendingChunk.GetCount() == 0)
+ return;
+
+ // Sort MethodDescs using the item kind for hot-cold spliting
+ struct SortMethodDesc : CQuickSort< MethodDesc * >
+ {
+ DataImage * m_image;
+
+ SortMethodDesc(DataImage *image, MethodDesc **pBase, SSIZE_T iCount)
+ : CQuickSort< MethodDesc * >(pBase, iCount),
+ m_image(image)
+ {
+ }
+
+ int Compare(MethodDesc ** ppMD1, MethodDesc ** ppMD2)
+ {
+ MethodDesc * pMD1 = *ppMD1;
+ MethodDesc * pMD2 = *ppMD2;
+
+ // Compare item kind
+ DataImage::ItemKind kind1 = GetPrecodeItemKind(m_image, pMD1);
+ DataImage::ItemKind kind2 = GetPrecodeItemKind(m_image, pMD2);
+
+ return kind1 - kind2;
+ }
+ };
+
+ SortMethodDesc sort(image, &(m_rgPendingChunk[0]), m_rgPendingChunk.GetCount());
+ sort.Sort();
+
+ DataImage::ItemKind pendingKind = DataImage::ITEM_METHOD_PRECODE_COLD_WRITEABLE;
+ COUNT_T pendingCount = 0;
+
+ COUNT_T i;
+ for (i = 0; i < m_rgPendingChunk.GetCount(); i++)
+ {
+ MethodDesc * pMD = m_rgPendingChunk[i];
+
+ DataImage::ItemKind kind = GetPrecodeItemKind(image, pMD);
+ if (kind != pendingKind)
+ {
+ if (pendingCount != 0)
+ SaveFixupPrecodeChunk(image, &(m_rgPendingChunk[i-pendingCount]), pendingCount, pendingKind);
+
+ pendingKind = kind;
+ pendingCount = 0;
+ }
+
+ pendingCount++;
+ }
+
+ // Flush the remaining items
+ SaveFixupPrecodeChunk(image, &(m_rgPendingChunk[i-pendingCount]), pendingCount, pendingKind);
+#endif // HAS_FIXUP_PRECODE_CHUNKS
+}
+
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+#endif // !DACCESS_COMPILE
+
+
+#ifdef DACCESS_COMPILE
+void Precode::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ PrecodeType t = GetType();
+
+#ifdef HAS_FIXUP_PRECODE_CHUNKS
+ if (t == PRECODE_FIXUP)
+ {
+ AsFixupPrecode()->EnumMemoryRegions(flags);
+ return;
+ }
+#endif
+
+ DacEnumMemoryRegion(GetStart(), SizeOf(t));
+}
+#endif
+
diff --git a/src/vm/precode.h b/src/vm/precode.h
new file mode 100644
index 0000000000..dcbb7f3867
--- /dev/null
+++ b/src/vm/precode.h
@@ -0,0 +1,370 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// precode.h
+//
+
+//
+// Stub that runs before the actual native code
+
+#ifndef __PRECODE_H__
+#define __PRECODE_H__
+
+typedef DPTR(class Precode) PTR_Precode;
+
+#ifndef PRECODE_ALIGNMENT
+#define PRECODE_ALIGNMENT sizeof(void*)
+#endif
+
+enum PrecodeType {
+ PRECODE_INVALID = InvalidPrecode::Type,
+ PRECODE_STUB = StubPrecode::Type,
+#ifdef HAS_NDIRECT_IMPORT_PRECODE
+ PRECODE_NDIRECT_IMPORT = NDirectImportPrecode::Type,
+#endif // HAS_NDIRECT_IMPORT_PRECODE
+#ifdef HAS_REMOTING_PRECODE
+ PRECODE_REMOTING = RemotingPrecode::Type,
+#endif // HAS_REMOTING_PRECODE
+#ifdef HAS_FIXUP_PRECODE
+ PRECODE_FIXUP = FixupPrecode::Type,
+#endif // HAS_FIXUP_PRECODE
+#ifdef HAS_THISPTR_RETBUF_PRECODE
+ PRECODE_THISPTR_RETBUF = ThisPtrRetBufPrecode::Type,
+#endif // HAS_THISPTR_RETBUF_PRECODE
+};
+
+// For more details see. file:../../doc/BookOfTheRuntime/ClassLoader/MethodDescDesign.doc
+class Precode {
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+
+ BYTE m_data[SIZEOF_PRECODE_BASE];
+
+ StubPrecode* AsStubPrecode()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return dac_cast<PTR_StubPrecode>(this);
+ }
+
+#ifdef HAS_NDIRECT_IMPORT_PRECODE
+public:
+ // Fake precodes has to be exposed
+ NDirectImportPrecode* AsNDirectImportPrecode()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return dac_cast<PTR_NDirectImportPrecode>(this);
+ }
+
+private:
+#endif // HAS_NDIRECT_IMPORT_PRECODE
+
+#ifdef HAS_REMOTING_PRECODE
+ RemotingPrecode* AsRemotingPrecode()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return dac_cast<PTR_RemotingPrecode>(this);
+ }
+#endif // HAS_REMOTING_PRECODE
+
+#ifdef HAS_FIXUP_PRECODE
+ FixupPrecode* AsFixupPrecode()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return dac_cast<PTR_FixupPrecode>(this);
+ }
+#endif // HAS_FIXUP_PRECODE
+
+#ifdef HAS_THISPTR_RETBUF_PRECODE
+ ThisPtrRetBufPrecode* AsThisPtrRetBufPrecode()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return dac_cast<PTR_ThisPtrRetBufPrecode>(this);
+ }
+#endif // HAS_THISPTR_RETBUF_PRECODE
+
+ TADDR GetStart()
+ {
+ SUPPORTS_DAC;
+ LIMITED_METHOD_CONTRACT;
+ return dac_cast<TADDR>(this);
+ }
+
+ static void UnexpectedPrecodeType(const char * originator, PrecodeType precodeType)
+
+ {
+ SUPPORTS_DAC;
+#ifdef DACCESS_COMPILE
+ DacError(E_UNEXPECTED);
+#else
+#ifdef _PREFIX_
+ // We only use __UNREACHABLE here since otherwise it would be a hint
+ // for the compiler to fold this case with the other cases in a switch
+ // statement. However, we would rather have this case be a separate
+ // code path so that we will get a clean crash sooner.
+ __UNREACHABLE("Unexpected precode type");
+#endif
+ CONSISTENCY_CHECK_MSGF(false, ("%s: Unexpected precode type: 0x%02x.", originator, precodeType));
+#endif
+ }
+
+public:
+ PrecodeType GetType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+#ifdef OFFSETOF_PRECODE_TYPE
+
+ BYTE type = m_data[OFFSETOF_PRECODE_TYPE];
+#ifdef _TARGET_X86_
+ if (type == X86_INSTR_MOV_RM_R)
+ type = m_data[OFFSETOF_PRECODE_TYPE_MOV_RM_R];
+#endif // _TARGET_X86_
+
+#ifdef _TARGET_AMD64_
+ if (type == (X86_INSTR_MOV_R10_IMM64 & 0xFF))
+ type = m_data[OFFSETOF_PRECODE_TYPE_MOV_R10];
+ else if ((type == (X86_INSTR_CALL_REL32 & 0xFF)) || (type == (X86_INSTR_JMP_REL32 & 0xFF)))
+ type = m_data[OFFSETOF_PRECODE_TYPE_CALL_OR_JMP];
+#endif // _AMD64
+
+#if defined(HAS_FIXUP_PRECODE) && (defined(_TARGET_X86_) || defined(_TARGET_AMD64_))
+ if (type == FixupPrecode::TypePrestub)
+ type = FixupPrecode::Type;
+#endif
+
+#ifdef _TARGET_ARM_
+ static_assert_no_msg(offsetof(StubPrecode, m_pTarget) == offsetof(NDirectImportPrecode, m_pMethodDesc));
+ // If the precode does not have thumb bit on target, it must be NDirectImportPrecode.
+ if (type == StubPrecode::Type && ((AsStubPrecode()->m_pTarget & THUMB_CODE) == 0))
+ type = NDirectImportPrecode::Type;
+#endif
+
+ return (PrecodeType)type;
+
+#else // OFFSETOF_PRECODE_TYPE
+ return PRECODE_STUB;
+#endif // OFFSETOF_PRECODE_TYPE
+ }
+
+ static BOOL IsValidType(PrecodeType t);
+
+ static int AlignOf(PrecodeType t)
+ {
+ SUPPORTS_DAC;
+ int align = PRECODE_ALIGNMENT;
+
+#if defined(_TARGET_X86_) && defined(HAS_FIXUP_PRECODE)
+ // Fixup precodes has to be aligned to allow atomic patching
+ if (t == PRECODE_FIXUP)
+ align = 8;
+#endif // _TARGET_X86_ && HAS_FIXUP_PRECODE
+
+ return align;
+ }
+
+ static SIZE_T SizeOf(PrecodeType t);
+
+ SIZE_T SizeOf()
+ {
+ WRAPPER_NO_CONTRACT;
+ return SizeOf(GetType());
+ }
+
+ // Note: This is immediate target of the precode. It does not follow jump stub if there is one.
+ PCODE GetTarget();
+
+ BOOL IsPointingTo(PCODE target, PCODE addr)
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+#ifdef CROSSGEN_COMPILE
+ // Crossgen does not create jump stubs on AMD64, so just return always false here to
+ // avoid non-deterministic behavior.
+ return FALSE;
+#else // CROSSGEN_COMPILE
+ if (target == addr)
+ return TRUE;
+
+#ifdef _TARGET_AMD64_
+ // Handle jump stubs
+ if (isJumpRel64(target)) {
+ target = decodeJump64(target);
+ if (target == addr)
+ return TRUE;
+ }
+#endif // _TARGET_AMD64_
+
+ return FALSE;
+#endif // CROSSGEN_COMPILE
+ }
+
+ BOOL IsPointingToNativeCode(PCODE pNativeCode)
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+#ifdef HAS_REMOTING_PRECODE
+ // Remoting precode is special case
+ if (GetType() == PRECODE_REMOTING)
+ return FALSE;
+#endif
+
+ return IsPointingTo(GetTarget(), pNativeCode);
+ }
+
+ BOOL IsPointingToPrestub(PCODE target);
+
+ BOOL IsPointingToPrestub()
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsPointingToPrestub(GetTarget());
+ }
+
+ PCODE GetEntryPoint()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return dac_cast<TADDR>(this) + GetEntryPointOffset();
+ }
+
+ static SIZE_T GetEntryPointOffset()
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifdef _TARGET_ARM_
+ return THUMB_CODE;
+#else
+ return 0;
+#endif
+ }
+
+ MethodDesc * GetMethodDesc(BOOL fSpeculative = FALSE);
+ BOOL IsCorrectMethodDesc(MethodDesc * pMD);
+
+ static Precode* Allocate(PrecodeType t, MethodDesc* pMD,
+ LoaderAllocator *pLoaderAllocator, AllocMemTracker *pamTracker);
+ void Init(PrecodeType t, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
+
+#ifndef DACCESS_COMPILE
+ BOOL SetTargetInterlocked(PCODE target);
+
+ // Reset precode to point to prestub
+ void Reset();
+#endif // DACCESS_COMPILE
+
+ static Precode* GetPrecodeFromEntryPoint(PCODE addr, BOOL fSpeculative = FALSE)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+#ifdef DACCESS_COMPILE
+ // Always use speculative checks with DAC
+ fSpeculative = TRUE;
+#endif
+
+ TADDR pInstr = PCODEToPINSTR(addr);
+
+ // Always do consistency check in debug
+ if (fSpeculative INDEBUG(|| TRUE))
+ {
+ if (!IS_ALIGNED(pInstr, PRECODE_ALIGNMENT) || !IsValidType(PTR_Precode(pInstr)->GetType()))
+ {
+ if (fSpeculative) return NULL;
+ _ASSERTE(!"Precode::GetPrecodeFromEntryPoint: Unexpected code in precode");
+ }
+ }
+
+ Precode* pPrecode = PTR_Precode(pInstr);
+
+ if (!fSpeculative)
+ {
+ g_IBCLogger.LogMethodPrecodeAccess(pPrecode->GetMethodDesc());
+ }
+
+ return pPrecode;
+ }
+
+ // If addr is patched fixup precode, returns address that it points to. Otherwise returns NULL.
+ static PCODE TryToSkipFixupPrecode(PCODE addr);
+
+ //
+ // Precode as temporary entrypoint
+ //
+
+ static SIZE_T SizeOfTemporaryEntryPoint(PrecodeType t)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+#ifdef HAS_FIXUP_PRECODE_CHUNKS
+ _ASSERTE(t != PRECODE_FIXUP);
+#endif
+ return ALIGN_UP(SizeOf(t), AlignOf(t));
+ }
+
+ static Precode * GetPrecodeForTemporaryEntryPoint(TADDR temporaryEntryPoints, int index);
+
+ static SIZE_T SizeOfTemporaryEntryPoints(PrecodeType t, int count);
+ static SIZE_T SizeOfTemporaryEntryPoints(TADDR temporaryEntryPoints, int count)
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return SizeOfTemporaryEntryPoints(PTR_Precode(temporaryEntryPoints)->GetType(), count);
+ }
+
+ static TADDR AllocateTemporaryEntryPoints(MethodDescChunk* pChunk,
+ LoaderAllocator *pLoaderAllocator, AllocMemTracker *pamTracker);
+
+#ifdef FEATURE_PREJIT
+ //
+ // NGEN stuff
+ //
+
+ void Save(DataImage *image);
+ void Fixup(DataImage *image, MethodDesc * pMD);
+
+ BOOL IsPrebound(DataImage *image);
+
+ // Helper class for saving precodes in chunks
+ class SaveChunk
+ {
+#ifdef HAS_FIXUP_PRECODE_CHUNKS
+ // Array of methods to be saved in the method desc chunk
+ InlineSArray<MethodDesc *, 20> m_rgPendingChunk;
+#endif // HAS_FIXUP_PRECODE_CHUNKS
+
+ public:
+ void Save(DataImage * image, MethodDesc * pMD);
+ void Flush(DataImage * image);
+ };
+#endif // FEATURE_PREJIT
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+#ifdef HAS_FIXUP_PRECODE_CHUNKS
+ static DWORD GetOffsetOfBase(PrecodeType t, DWORD count)
+ {
+ assert(t == PRECODE_FIXUP);
+ return (DWORD)(count * sizeof(FixupPrecode));
+ }
+
+ static DWORD GetOffset(PrecodeType t, DWORD index, DWORD count)
+ {
+ assert(t == PRECODE_FIXUP);
+ assert(index < count);
+ return (DWORD)((count - index - 1)* sizeof(FixupPrecode));
+ }
+#endif
+};
+
+#endif // __PRECODE_H__
diff --git a/src/vm/prestub.cpp b/src/vm/prestub.cpp
new file mode 100644
index 0000000000..a2e6fd3b37
--- /dev/null
+++ b/src/vm/prestub.cpp
@@ -0,0 +1,2657 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: Prestub.cpp
+//
+
+// ===========================================================================
+// This file contains the implementation for creating and using prestubs
+// ===========================================================================
+//
+
+
+#include "common.h"
+#include "vars.hpp"
+#include "security.h"
+#include "eeconfig.h"
+#include "dllimport.h"
+#include "comdelegate.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "dbginterface.h"
+#include "listlock.inl"
+#include "stubgen.h"
+#include "eventtrace.h"
+#include "constrainedexecutionregion.h"
+#include "array.h"
+#include "compile.h"
+#include "ecall.h"
+#include "virtualcallstub.h"
+
+#ifdef FEATURE_PREJIT
+#include "compile.h"
+#endif
+
+#ifdef FEATURE_INTERPRETER
+#include "interpreter.h"
+#endif
+
+#ifdef FEATURE_COMINTEROP
+#include "clrtocomcall.h"
+#endif
+
+#include "mdaassistants.h"
+
+#ifdef FEATURE_STACK_SAMPLING
+#include "stacksampler.h"
+#endif
+
+#ifndef DACCESS_COMPILE
+
+EXTERN_C void STDCALL ThePreStub();
+EXTERN_C void STDCALL ThePreStubPatch();
+
+//==========================================================================
+
+PCODE MethodDesc::DoBackpatch(MethodTable * pMT, MethodTable *pDispatchingMT, BOOL fFullBackPatch)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(!ContainsGenericVariables());
+#ifndef FEATURE_INTERPRETER
+ PRECONDITION(HasStableEntryPoint());
+#endif // FEATURE_INTERPRETER
+ PRECONDITION(pMT == GetMethodTable());
+ }
+ CONTRACTL_END;
+#ifdef FEATURE_INTERPRETER
+ PCODE pTarget = GetMethodEntryPoint();
+#else
+ PCODE pTarget = GetStableEntryPoint();
+#endif
+
+ if (!HasTemporaryEntryPoint())
+ return pTarget;
+
+ PCODE pExpected = GetTemporaryEntryPoint();
+
+ if (pExpected == pTarget)
+ return pTarget;
+
+ // True interface methods are never backpatched
+ if (pMT->IsInterface() && !IsStatic())
+ return pTarget;
+
+ if (fFullBackPatch)
+ {
+ FuncPtrStubs * pFuncPtrStubs = GetLoaderAllocator()->GetFuncPtrStubsNoCreate();
+ if (pFuncPtrStubs != NULL)
+ {
+ Precode* pFuncPtrPrecode = pFuncPtrStubs->Lookup(this);
+ if (pFuncPtrPrecode != NULL)
+ {
+ // If there is a funcptr precode to patch, we are done for this round.
+ if (pFuncPtrPrecode->SetTargetInterlocked(pTarget))
+ return pTarget;
+ }
+ }
+
+#ifndef HAS_COMPACT_ENTRYPOINTS
+ // Patch the fake entrypoint if necessary
+ Precode::GetPrecodeFromEntryPoint(pExpected)->SetTargetInterlocked(pTarget);
+#endif // HAS_COMPACT_ENTRYPOINTS
+ }
+
+ if (HasNonVtableSlot())
+ return pTarget;
+
+ BOOL fBackpatched = FALSE;
+
+#define BACKPATCH(pPatchedMT) \
+ do \
+ { \
+ if (pPatchedMT->GetSlot(dwSlot) == pExpected) \
+ { \
+ pPatchedMT->SetSlot(dwSlot, pTarget); \
+ fBackpatched = TRUE; \
+ } \
+ } \
+ while(0)
+
+ // The owning slot has been updated already, so there is no need to backpatch it
+ _ASSERTE(pMT->GetSlot(GetSlot()) == pTarget);
+
+ if (pDispatchingMT != NULL && pDispatchingMT != pMT)
+ {
+ DWORD dwSlot = GetSlot();
+
+ BACKPATCH(pDispatchingMT);
+
+ if (fFullBackPatch)
+ {
+ //
+ // Backpatch the MethodTable that code:MethodTable::GetRestoredSlot() reads the value from.
+ // VSD reads the slot value using code:MethodTable::GetRestoredSlot(), and so we need to make sure
+ // that it returns the stable entrypoint eventually to avoid going through the slow path all the time.
+ //
+ MethodTable * pRestoredSlotMT = pDispatchingMT->GetRestoredSlotMT(dwSlot);
+
+ BACKPATCH(pRestoredSlotMT);
+ }
+ }
+
+ if (IsMethodImpl())
+ {
+ MethodImpl::Iterator it(this);
+ while (it.IsValid())
+ {
+ DWORD dwSlot = it.GetSlot();
+
+ BACKPATCH(pMT);
+
+ if (pDispatchingMT != NULL)
+ {
+ BACKPATCH(pDispatchingMT);
+ }
+
+ it.Next();
+ }
+ }
+
+ if (fFullBackPatch && !fBackpatched && IsDuplicate())
+ {
+ // If this is a duplicate, let's scan the rest of the VTable hunting for other hits.
+ unsigned numSlots = pMT->GetNumVirtuals();
+ for (DWORD dwSlot=0; dwSlot<numSlots; dwSlot++)
+ {
+ BACKPATCH(pMT);
+
+ if (pDispatchingMT != NULL)
+ {
+ BACKPATCH(pDispatchingMT);
+ }
+ }
+ }
+
+#undef BACKPATCH
+
+ return pTarget;
+}
+
+// <TODO> FIX IN BETA 2
+//
+// g_pNotificationTable is only modified by the DAC and therefore the
+// optmizer can assume that it will always be its default value and has
+// been seen to (on IA64 free builds) eliminate the code in DACNotifyCompilationFinished
+// such that DAC notifications are no longer sent.
+//
+// TODO: fix this in Beta 2
+// the RIGHT fix is to make g_pNotificationTable volatile, but currently
+// we don't have DAC macros to do that. Additionally, there are a number
+// of other places we should look at DAC definitions to determine if they
+// should be also declared volatile.
+//
+// for now we just turn off optimization for these guys
+#ifdef _MSC_VER
+#pragma optimize("", off)
+#endif
+
+void DACNotifyCompilationFinished(MethodDesc *methodDesc)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ // Is the list active?
+ JITNotifications jn(g_pNotificationTable);
+ if (jn.IsActive())
+ {
+ // Get Module and mdToken
+ mdToken t = methodDesc->GetMemberDef();
+ Module *modulePtr = methodDesc->GetModule();
+
+ _ASSERTE(modulePtr);
+
+ // Are we listed?
+ USHORT jnt = jn.Requested((TADDR) modulePtr, t);
+ if (jnt & CLRDATA_METHNOTIFY_GENERATED)
+ {
+ // If so, throw an exception!
+ DACNotify::DoJITNotification(methodDesc);
+ }
+ }
+}
+
+#ifdef _MSC_VER
+#pragma optimize("", on)
+#endif
+// </TODO>
+
+
+// ********************************************************************
+// README!!
+// ********************************************************************
+
+// MakeJitWorker is the thread safe way to invoke the JIT compiler
+// If multiple threads get in here for the same pMD, ALL of them
+// MUST return the SAME value for pstub.
+//
+// This function creates a DeadlockAware list of methods being jitted
+// which prevents us from trying to JIT the same method more that once.
+
+
+PCODE MethodDesc::MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, DWORD flags, DWORD flags2)
+{
+ STANDARD_VM_CONTRACT;
+
+ BOOL fIsILStub = IsILStub(); // @TODO: understand the need for this special case
+
+ LOG((LF_JIT, LL_INFO1000000,
+ "MakeJitWorker(" FMT_ADDR ", %s) for %s:%s\n",
+ DBG_ADDR(this),
+ fIsILStub ? " TRUE" : "FALSE",
+ GetMethodTable()->GetDebugClassName(),
+ m_pszDebugMethodName));
+
+ PCODE pCode = NULL;
+#ifdef FEATURE_INTERPRETER
+ PCODE pPreviousInterpStub = NULL;
+ BOOL fInterpreted = FALSE;
+ BOOL fStable = TRUE; // True iff the new code address (to be stored in pCode), is a stable entry point.
+#endif
+
+#ifdef FEATURE_MULTICOREJIT
+ MulticoreJitManager & mcJitManager = GetAppDomain()->GetMulticoreJitManager();
+
+ bool fBackgroundThread = (flags & CORJIT_FLG_MCJIT_BACKGROUND) != 0;
+#endif
+
+ {
+ // Enter the global lock which protects the list of all functions being JITd
+ ListLockHolder pJitLock (GetDomain()->GetJitLock());
+
+ // It is possible that another thread stepped in before we entered the global lock for the first time.
+ pCode = GetNativeCode();
+ if (pCode != NULL)
+ {
+#ifdef FEATURE_INTERPRETER
+ if (Interpreter::InterpretationStubToMethodInfo(pCode) == this)
+ {
+ pPreviousInterpStub = pCode;
+ }
+ else
+#endif // FEATURE_INTERPRETER
+ goto Done;
+ }
+
+ const char *description = "jit lock";
+ INDEBUG(description = m_pszDebugMethodName;)
+ ListLockEntryHolder pEntry(ListLockEntry::Find(pJitLock, this, description));
+
+ // We have an entry now, we can release the global lock
+ pJitLock.Release();
+
+ // Take the entry lock
+ {
+ ListLockEntryLockHolder pEntryLock(pEntry, FALSE);
+
+ if (pEntryLock.DeadlockAwareAcquire())
+ {
+ if (pEntry->m_hrResultCode == S_FALSE)
+ {
+ // Nobody has jitted the method yet
+ }
+ else
+ {
+ // We came in to jit but someone beat us so return the
+ // jitted method!
+
+ // We can just fall through because we will notice below that
+ // the method has code.
+
+ // @todo: Note that we may have a failed HRESULT here -
+ // we might want to return an early error rather than
+ // repeatedly failing the jit.
+ }
+ }
+ else
+ {
+ // Taking this lock would cause a deadlock (presumably because we
+ // are involved in a class constructor circular dependency.) For
+ // instance, another thread may be waiting to run the class constructor
+ // that we are jitting, but is currently jitting this function.
+ //
+ // To remedy this, we want to go ahead and do the jitting anyway.
+ // The other threads contending for the lock will then notice that
+ // the jit finished while they were running class constructors, and abort their
+ // current jit effort.
+ //
+ // We don't have to do anything special right here since we
+ // can check HasNativeCode() to detect this case later.
+ //
+ // Note that at this point we don't have the lock, but that's OK because the
+ // thread which does have the lock is blocked waiting for us.
+ }
+
+ // It is possible that another thread stepped in before we entered the lock.
+ pCode = GetNativeCode();
+#ifdef FEATURE_INTERPRETER
+ if (pCode != NULL && (pCode != pPreviousInterpStub))
+#else
+ if (pCode != NULL)
+#endif // FEATURE_INTERPRETER
+ {
+ goto Done;
+ }
+
+ SString namespaceOrClassName, methodName, methodSignature;
+
+ PCODE pOtherCode = NULL; // Need to move here due to 'goto GotNewCode'
+
+#ifdef FEATURE_MULTICOREJIT
+
+ bool fCompiledInBackground = false;
+
+ // If not called from multi-core JIT thread,
+ if (! fBackgroundThread)
+ {
+ // Quick check before calling expensive out of line function on this method's domain has code JITted by background thread
+ if (mcJitManager.GetMulticoreJitCodeStorage().GetRemainingMethodCount() > 0)
+ {
+ if (MulticoreJitManager::IsMethodSupported(this))
+ {
+ pCode = mcJitManager.RequestMethodCode(this); // Query multi-core JIT manager for compiled code
+
+ // Multicore JIT manager starts background thread to pre-compile methods, but it does not back-patch it/notify profiler/notify DAC,
+ // Jumtp to GotNewCode to do so
+ if (pCode != NULL)
+ {
+ fCompiledInBackground = true;
+
+#ifdef DEBUGGING_SUPPORTED
+ // Notify the debugger of the jitted function
+ if (g_pDebugInterface != NULL)
+ {
+ g_pDebugInterface->JITComplete(this, pCode);
+ }
+#endif
+
+ goto GotNewCode;
+ }
+ }
+ }
+ }
+#endif
+
+ if (fIsILStub)
+ {
+ // we race with other threads to JIT the code for an IL stub and the
+ // IL header is released once one of the threads completes. As a result
+ // we must be inside the lock to reliably get the IL header for the
+ // stub.
+
+ ILStubResolver* pResolver = AsDynamicMethodDesc()->GetILStubResolver();
+ ILHeader = pResolver->GetILHeader();
+ }
+
+#ifdef MDA_SUPPORTED
+ MdaJitCompilationStart* pProbe = MDA_GET_ASSISTANT(JitCompilationStart);
+ if (pProbe)
+ pProbe->NowCompiling(this);
+#endif // MDA_SUPPORTED
+
+#ifdef PROFILING_SUPPORTED
+ // If profiling, need to give a chance for a tool to examine and modify
+ // the IL before it gets to the JIT. This allows one to add probe calls for
+ // things like code coverage, performance, or whatever.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
+
+ // Multicore JIT should be disabled when CORProfilerTrackJITInfo is on
+ // But there could be corner case in which profiler is attached when multicore background thread is calling MakeJitWorker
+ // Disable this block when calling from multicore JIT background thread
+ if (!IsNoMetadata()
+#ifdef FEATURE_MULTICOREJIT
+
+ && (! fBackgroundThread)
+#endif
+ )
+ {
+ g_profControlBlock.pProfInterface->JITCompilationStarted((FunctionID) this, TRUE);
+ // The profiler may have changed the code on the callback. Need to
+ // pick up the new code. Note that you have to be fully trusted in
+ // this mode and the code will not be verified.
+ COR_ILMETHOD *pilHeader = GetILHeader(TRUE);
+ new (ILHeader) COR_ILMETHOD_DECODER(pilHeader, GetMDImport(), NULL);
+ }
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+#ifdef FEATURE_INTERPRETER
+ // We move the ETW event for start of JITting inward, after we make the decision
+ // to JIT rather than interpret.
+#else // FEATURE_INTERPRETER
+ // Fire an ETW event to mark the beginning of JIT'ing
+ ETW::MethodLog::MethodJitting(this, &namespaceOrClassName, &methodName, &methodSignature);
+#endif // FEATURE_INTERPRETER
+
+#ifdef FEATURE_STACK_SAMPLING
+#ifdef FEATURE_MULTICOREJIT
+ if (!fBackgroundThread)
+#endif // FEATURE_MULTICOREJIT
+ {
+ StackSampler::RecordJittingInfo(this, flags, flags2);
+ }
+#endif // FEATURE_STACK_SAMPLING
+
+ EX_TRY
+ {
+ pCode = UnsafeJitFunction(this, ILHeader, flags, flags2);
+ }
+ EX_CATCH
+ {
+ // If the current thread threw an exception, but a competing thread
+ // somehow succeeded at JITting the same function (e.g., out of memory
+ // encountered on current thread but not competing thread), then go ahead
+ // and swallow this current thread's exception, since we somehow managed
+ // to successfully JIT the code on the other thread.
+ //
+ // Note that if a deadlock cycle is broken, that does not result in an
+ // exception--the thread would just pass through the lock and JIT the
+ // function in competition with the other thread (with the winner of the
+ // race decided later on when we do SetNativeCodeInterlocked). This
+ // try/catch is purely to deal with the (unusual) case where a competing
+ // thread succeeded where we aborted.
+
+ pOtherCode = GetNativeCode();
+
+ if (pOtherCode == NULL)
+ {
+ pEntry->m_hrResultCode = E_FAIL;
+ EX_RETHROW;
+ }
+ }
+ EX_END_CATCH(RethrowTerminalExceptions)
+
+ if (pOtherCode != NULL)
+ {
+ // Somebody finished jitting recursively while we were jitting the method.
+ // Just use their method & leak the one we finished. (Normally we hope
+ // not to finish our JIT in this case, as we will abort early if we notice
+ // a reentrant jit has occurred. But we may not catch every place so we
+ // do a definitive final check here.
+ pCode = pOtherCode;
+ goto Done;
+ }
+
+ _ASSERTE(pCode != NULL);
+
+#ifdef HAVE_GCCOVER
+ if (GCStress<cfg_instr_jit>::IsEnabled())
+ {
+ SetupGcCoverage(this, (BYTE*) pCode);
+ }
+#endif // HAVE_GCCOVER
+
+#ifdef FEATURE_INTERPRETER
+ // Determine whether the new code address is "stable"...= is not an interpreter stub.
+ fInterpreted = (Interpreter::InterpretationStubToMethodInfo(pCode) == this);
+ fStable = !fInterpreted;
+#endif // FEATURE_INTERPRETER
+
+#ifdef FEATURE_MULTICOREJIT
+
+ // If called from multi-core JIT background thread, store code under lock, delay patching until code is queried from application threads
+ if (fBackgroundThread)
+ {
+ // Fire an ETW event to mark the end of JIT'ing
+ ETW::MethodLog::MethodJitted(this, &namespaceOrClassName, &methodName, &methodSignature, pCode, 0 /* ReJITID */);
+
+ mcJitManager.GetMulticoreJitCodeStorage().StoreMethodCode(this, pCode);
+
+ goto Done;
+ }
+
+GotNewCode:
+#endif
+ // If this function had already been requested for rejit (before its original
+ // code was jitted), then give the rejit manager a chance to jump-stamp the
+ // code we just compiled so the first thread entering the function will jump
+ // to the prestub and trigger the rejit. Note that the PublishMethodHolder takes
+ // a lock to avoid a particular kind of rejit race. See
+ // code:ReJitManager::PublishMethodHolder::PublishMethodHolder#PublishCode for
+ // details on the rejit race.
+ //
+ // Aside from rejit, performing a SetNativeCodeInterlocked at this point
+ // generally ensures that there is only one winning version of the native
+ // code. This also avoid races with profiler overriding ngened code (see
+ // matching SetNativeCodeInterlocked done after
+ // JITCachedFunctionSearchStarted)
+#ifdef FEATURE_INTERPRETER
+ PCODE pExpected = pPreviousInterpStub;
+ if (pExpected == NULL) pExpected = GetTemporaryEntryPoint();
+#endif
+ {
+ ReJitPublishMethodHolder publishWorker(this, pCode);
+ if (!SetNativeCodeInterlocked(pCode
+#ifdef FEATURE_INTERPRETER
+ , pExpected, fStable
+#endif
+ ))
+ {
+ // Another thread beat us to publishing its copy of the JITted code.
+ pCode = GetNativeCode();
+ goto Done;
+ }
+ }
+
+#ifdef FEATURE_INTERPRETER
+ // State for dynamic methods cannot be freed if the method was ever interpreted,
+ // since there is no way to ensure that it is not in use at the moment.
+ if (IsDynamicMethod() && !fInterpreted && (pPreviousInterpStub == NULL))
+ {
+ AsDynamicMethodDesc()->GetResolver()->FreeCompileTimeState();
+ }
+#endif // FEATURE_INTERPRETER
+
+ // We succeeded in jitting the code, and our jitted code is the one that's going to run now.
+ pEntry->m_hrResultCode = S_OK;
+
+ #ifdef PROFILING_SUPPORTED
+ // Notify the profiler that JIT completed.
+ // Must do this after the address has been set.
+ // @ToDo: Why must we set the address before notifying the profiler ??
+ // Note that if IsInterceptedForDeclSecurity is set no one should access the jitted code address anyway.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
+ if (!IsNoMetadata())
+ {
+ g_profControlBlock.pProfInterface->
+ JITCompilationFinished((FunctionID) this,
+ pEntry->m_hrResultCode,
+ TRUE);
+ }
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+#ifdef FEATURE_MULTICOREJIT
+ if (! fCompiledInBackground)
+#endif
+#ifdef FEATURE_INTERPRETER
+ // If we didn't JIT, but rather, created an interpreter stub (i.e., fStable is false), don't tell ETW that we did.
+ if (fStable)
+#endif // FEATURE_INTERPRETER
+ {
+ // Fire an ETW event to mark the end of JIT'ing
+ ETW::MethodLog::MethodJitted(this, &namespaceOrClassName, &methodName, &methodSignature, pCode, 0 /* ReJITID */);
+ }
+
+
+#ifdef FEATURE_MULTICOREJIT
+
+ // If not called from multi-core JIT thread, not got code from storage, quick check before calling out of line function
+ if (! fBackgroundThread && ! fCompiledInBackground && mcJitManager.IsRecorderActive())
+ {
+ if (MulticoreJitManager::IsMethodSupported(this))
+ {
+ mcJitManager.RecordMethodJit(this); // Tell multi-core JIT manager to record method on successful JITting
+ }
+ }
+#endif
+
+ if (!fIsILStub)
+ {
+ // The notification will only occur if someone has registered for this method.
+ DACNotifyCompilationFinished(this);
+ }
+ }
+ }
+
+Done:
+
+ // We must have a code by now.
+ _ASSERTE(pCode != NULL);
+
+ LOG((LF_CORDB, LL_EVERYTHING, "MethodDesc::MakeJitWorker finished. Stub is" FMT_ADDR "\n",
+ DBG_ADDR(pCode)));
+
+ return pCode;
+}
+
+#ifdef FEATURE_STUBS_AS_IL
+
+// CreateInstantiatingILStubTargetSig:
+// This method is used to create the signature of the target of the ILStub
+// for instantiating and unboxing stubs, when/where we need to introduce a generic context.
+// And since the generic context is a hidden parameter, we're creating a signature that
+// looks like non-generic but has one additional parameter right after the thisptr
+void CreateInstantiatingILStubTargetSig(MethodDesc *pBaseMD,
+ SigTypeContext &typeContext,
+ SigBuilder *stubSigBuilder)
+{
+ STANDARD_VM_CONTRACT;
+
+ MetaSig msig(pBaseMD);
+ BYTE callingConvention = IMAGE_CEE_CS_CALLCONV_DEFAULT;
+ if (msig.HasThis())
+ callingConvention |= IMAGE_CEE_CS_CALLCONV_HASTHIS;
+ // CallingConvention
+ stubSigBuilder->AppendByte(callingConvention);
+
+ // ParamCount
+ stubSigBuilder->AppendData(msig.NumFixedArgs() + 1); // +1 is for context param
+
+ // Return type
+ SigPointer pReturn = msig.GetReturnProps();
+ pReturn.ConvertToInternalExactlyOne(msig.GetModule(), &typeContext, stubSigBuilder, FALSE);
+
+ // The hidden context parameter
+ stubSigBuilder->AppendElementType(ELEMENT_TYPE_I);
+
+ // Copy rest of the arguments
+ msig.NextArg();
+ SigPointer pArgs = msig.GetArgProps();
+ for (unsigned i = 0; i < msig.NumFixedArgs(); i++)
+ {
+ pArgs.ConvertToInternalExactlyOne(msig.GetModule(), &typeContext, stubSigBuilder);
+ }
+
+}
+
+Stub * CreateUnboxingILStubForSharedGenericValueTypeMethods(MethodDesc* pTargetMD)
+{
+
+ CONTRACT(Stub*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ SigTypeContext typeContext(pTargetMD);
+
+ MetaSig msig(pTargetMD);
+
+ _ASSERTE(msig.HasThis());
+
+ ILStubLinker sl(pTargetMD->GetModule(),
+ pTargetMD->GetSignature(),
+ &typeContext,
+ pTargetMD,
+ TRUE, // fTargetHasThis
+ TRUE, // fStubHasThis
+ FALSE // fIsNDirectStub
+ );
+
+ ILCodeStream *pCode = sl.NewCodeStream(ILStubLinker::kDispatch);
+
+ // 1. Build the new signature
+ SigBuilder stubSigBuilder;
+ CreateInstantiatingILStubTargetSig(pTargetMD, typeContext, &stubSigBuilder);
+
+ // 2. Emit the method body
+ mdToken tokPinningHelper = pCode->GetToken(MscorlibBinder::GetField(FIELD__PINNING_HELPER__M_DATA));
+
+ // 2.1 Push the thisptr
+ // We need to skip over the MethodTable*
+ // The trick below will do that.
+ pCode->EmitLoadThis();
+ pCode->EmitLDFLDA(tokPinningHelper);
+
+ // 2.2 Push the hidden context param
+ // The context is going to be captured from the thisptr
+ pCode->EmitLoadThis();
+ pCode->EmitLDFLDA(tokPinningHelper);
+ pCode->EmitLDC(Object::GetOffsetOfFirstField());
+ pCode->EmitSUB();
+ pCode->EmitLDIND_I();
+
+ // 2.3 Push the rest of the arguments
+ for (unsigned i = 0; i < msig.NumFixedArgs();i++)
+ {
+ pCode->EmitLDARG(i);
+ }
+
+ // 2.4 Push the target address
+ pCode->EmitLDC((TADDR)pTargetMD->GetMultiCallableAddrOfCode(CORINFO_ACCESS_ANY));
+
+ // 2.5 Do the calli
+ pCode->EmitCALLI(TOKEN_ILSTUB_TARGET_SIG, msig.NumFixedArgs() + 1, msig.IsReturnTypeVoid() ? 0 : 1);
+ pCode->EmitRET();
+
+ PCCOR_SIGNATURE pSig;
+ DWORD cbSig;
+ pTargetMD->GetSig(&pSig,&cbSig);
+ PTR_Module pLoaderModule = pTargetMD->GetLoaderModule();
+ MethodDesc * pStubMD = ILStubCache::CreateAndLinkNewILStubMethodDesc(pTargetMD->GetLoaderAllocator(),
+ pLoaderModule->GetILStubCache()->GetOrCreateStubMethodTable(pLoaderModule),
+ ILSTUB_UNBOXINGILSTUB,
+ pTargetMD->GetModule(),
+ pSig, cbSig,
+ &typeContext,
+ &sl);
+
+ ILStubResolver *pResolver = pStubMD->AsDynamicMethodDesc()->GetILStubResolver();
+
+ DWORD cbTargetSig = 0;
+ PCCOR_SIGNATURE pTargetSig = (PCCOR_SIGNATURE) stubSigBuilder.GetSignature(&cbTargetSig);
+ pResolver->SetStubTargetMethodSig(pTargetSig, cbTargetSig);
+ pResolver->SetStubTargetMethodDesc(pTargetMD);
+
+ RETURN Stub::NewStub(JitILStub(pStubMD));
+
+}
+
+Stub * CreateInstantiatingILStub(MethodDesc* pTargetMD, void* pHiddenArg)
+{
+
+ CONTRACT(Stub*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pHiddenArg));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ SigTypeContext typeContext;
+ if (pTargetMD->HasMethodInstantiation())
+ {
+ // The pHiddenArg shall be a MethodDesc*
+ SigTypeContext::InitTypeContext(static_cast<MethodDesc *>(pHiddenArg), &typeContext);
+ }
+ else
+ {
+ // The pHiddenArg shall be a MethodTable*
+ SigTypeContext::InitTypeContext(TypeHandle::FromPtr(pHiddenArg), &typeContext);
+ }
+
+ MetaSig msig(pTargetMD);
+
+ ILStubLinker sl(pTargetMD->GetModule(),
+ pTargetMD->GetSignature(),
+ &typeContext,
+ pTargetMD,
+ msig.HasThis(), // fTargetHasThis
+ msig.HasThis(), // fStubHasThis
+ FALSE // fIsNDirectStub
+ );
+
+ ILCodeStream *pCode = sl.NewCodeStream(ILStubLinker::kDispatch);
+
+ // 1. Build the new signature
+ SigBuilder stubSigBuilder;
+ CreateInstantiatingILStubTargetSig(pTargetMD, typeContext, &stubSigBuilder);
+
+ // 2. Emit the method body
+ if (msig.HasThis())
+ {
+ // 2.1 Push the thisptr
+ pCode->EmitLoadThis();
+ }
+
+ // 2.2 Push the hidden context param
+ // InstantiatingStub
+ pCode->EmitLDC((TADDR)pHiddenArg);
+
+ // 2.3 Push the rest of the arguments
+ for (unsigned i = 0; i < msig.NumFixedArgs();i++)
+ {
+ pCode->EmitLDARG(i);
+ }
+
+ // 2.4 Push the target address
+ pCode->EmitLDC((TADDR)pTargetMD->GetMultiCallableAddrOfCode(CORINFO_ACCESS_ANY));
+
+ // 2.5 Do the calli
+ pCode->EmitCALLI(TOKEN_ILSTUB_TARGET_SIG, msig.NumFixedArgs() + 1, msig.IsReturnTypeVoid() ? 0 : 1);
+ pCode->EmitRET();
+
+ PCCOR_SIGNATURE pSig;
+ DWORD cbSig;
+ pTargetMD->GetSig(&pSig,&cbSig);
+ PTR_Module pLoaderModule = pTargetMD->GetLoaderModule();
+ MethodDesc * pStubMD = ILStubCache::CreateAndLinkNewILStubMethodDesc(pTargetMD->GetLoaderAllocator(),
+ pLoaderModule->GetILStubCache()->GetOrCreateStubMethodTable(pLoaderModule),
+ ILSTUB_INSTANTIATINGSTUB,
+ pTargetMD->GetModule(),
+ pSig, cbSig,
+ &typeContext,
+ &sl);
+
+ ILStubResolver *pResolver = pStubMD->AsDynamicMethodDesc()->GetILStubResolver();
+
+ DWORD cbTargetSig = 0;
+ PCCOR_SIGNATURE pTargetSig = (PCCOR_SIGNATURE) stubSigBuilder.GetSignature(&cbTargetSig);
+ pResolver->SetStubTargetMethodSig(pTargetSig, cbTargetSig);
+ pResolver->SetStubTargetMethodDesc(pTargetMD);
+
+ RETURN Stub::NewStub(JitILStub(pStubMD));
+}
+#endif
+
+/* Make a stub that for a value class method that expects a BOXed this poitner */
+Stub * MakeUnboxingStubWorker(MethodDesc *pMD)
+{
+ CONTRACT(Stub*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ Stub *pstub = NULL;
+
+ _ASSERTE (pMD->GetMethodTable()->IsValueType());
+ _ASSERTE(!pMD->ContainsGenericVariables());
+ MethodDesc *pUnboxedMD = pMD->GetWrappedMethodDesc();
+
+ _ASSERTE(pUnboxedMD != NULL && pUnboxedMD != pMD);
+
+#ifdef FEATURE_STUBS_AS_IL
+ if (pUnboxedMD->RequiresInstMethodTableArg())
+ {
+ pstub = CreateUnboxingILStubForSharedGenericValueTypeMethods(pUnboxedMD);
+ }
+ else
+#endif
+ {
+ CPUSTUBLINKER sl;
+ sl.EmitUnboxMethodStub(pUnboxedMD);
+ pstub = sl.Link(pMD->GetLoaderAllocator()->GetStubHeap());
+ }
+ RETURN pstub;
+}
+
+#if defined(FEATURE_SHARE_GENERIC_CODE)
+Stub * MakeInstantiatingStubWorker(MethodDesc *pMD)
+{
+ CONTRACT(Stub*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(pMD->IsInstantiatingStub());
+ PRECONDITION(!pMD->RequiresInstArg());
+ PRECONDITION(!pMD->IsSharedByGenericMethodInstantiations());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ // Note: this should be kept idempotent ... in the sense that
+ // if multiple threads get in here for the same pMD
+ // it should not matter whose stuff finally gets used.
+
+ MethodDesc *pSharedMD = NULL;
+ void* extraArg = NULL;
+
+ // It's an instantiated generic method
+ // Fetch the shared code associated with this instantiation
+ pSharedMD = pMD->GetWrappedMethodDesc();
+ _ASSERTE(pSharedMD != NULL && pSharedMD != pMD);
+
+ if (pMD->HasMethodInstantiation())
+ {
+ extraArg = pMD;
+ }
+ else
+ {
+ // It's a per-instantiation static method
+ extraArg = pMD->GetMethodTable();
+ }
+ Stub *pstub = NULL;
+
+#ifdef FEATURE_STUBS_AS_IL
+ pstub = CreateInstantiatingILStub(pSharedMD, extraArg);
+#else
+ CPUSTUBLINKER sl;
+ _ASSERTE(pSharedMD != NULL && pSharedMD != pMD);
+ sl.EmitInstantiatingMethodStub(pSharedMD, extraArg);
+
+ pstub = sl.Link(pMD->GetLoaderAllocator()->GetStubHeap());
+#endif
+
+ RETURN pstub;
+}
+#endif // defined(FEATURE_SHARE_GENERIC_CODE)
+
+//=============================================================================
+// This function generates the real code for a method and installs it into
+// the methoddesc. Usually ***BUT NOT ALWAYS***, this function runs only once
+// per methoddesc. In addition to installing the new code, this function
+// returns a pointer to the new code for the prestub's convenience.
+//=============================================================================
+extern "C" PCODE STDCALL PreStubWorker(TransitionBlock * pTransitionBlock, MethodDesc * pMD)
+{
+ PCODE pbRetVal = NULL;
+
+ BEGIN_PRESERVE_LAST_ERROR;
+
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_ENTRY_POINT;
+
+ MAKE_CURRENT_THREAD_AVAILABLE();
+
+#ifdef _DEBUG
+ Thread::ObjectRefFlush(CURRENT_THREAD);
+#endif
+
+ FrameWithCookie<PrestubMethodFrame> frame(pTransitionBlock, pMD);
+ PrestubMethodFrame * pPFrame = &frame;
+
+ pPFrame->Push(CURRENT_THREAD);
+
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+ ETWOnStartup (PrestubWorker_V1,PrestubWorkerEnd_V1);
+
+ _ASSERTE(!NingenEnabled() && "You cannot invoke managed code inside the ngen compilation process.");
+
+ // Running the PreStubWorker on a method causes us to access its MethodTable
+ g_IBCLogger.LogMethodDescAccess(pMD);
+
+ // Make sure the method table is restored, and method instantiation if present
+ pMD->CheckRestore();
+
+ CONSISTENCY_CHECK(GetAppDomain()->CheckCanExecuteManagedCode(pMD));
+
+ // Note this is redundant with the above check but we do it anyway for safety
+ //
+ // This has been disabled so we have a better chance of catching these. Note that this check is
+ // NOT sufficient for domain neutral and ngen cases.
+ //
+ // pMD->EnsureActive();
+
+ MethodTable *pDispatchingMT = NULL;
+
+ if (pMD->IsVtableMethod())
+ {
+ OBJECTREF curobj = pPFrame->GetThis();
+
+ if (curobj != NULL) // Check for virtual function called non-virtually on a NULL object
+ {
+ pDispatchingMT = curobj->GetTrueMethodTable();
+
+ // For value types, the only virtual methods are interface implementations.
+ // Thus pDispatching == pMT because there
+ // is no inheritance in value types. Note the BoxedEntryPointStubs are shared
+ // between all sharable generic instantiations, so the == test is on
+ // canonical method tables.
+#ifdef _DEBUG
+ MethodTable *pMDMT = pMD->GetMethodTable(); // put this here to see what the MT is in debug mode
+ _ASSERTE(!pMD->GetMethodTable()->IsValueType() ||
+ (pMD->IsUnboxingStub() && (pDispatchingMT->GetCanonicalMethodTable() == pMDMT->GetCanonicalMethodTable())));
+#endif // _DEBUG
+ }
+ }
+
+ GCX_PREEMP_THREAD_EXISTS(CURRENT_THREAD);
+ pbRetVal = pMD->DoPrestub(pDispatchingMT);
+
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+ // Give debugger opportunity to stop here
+ ThePreStubPatch();
+
+ pPFrame->Pop(CURRENT_THREAD);
+
+ POSTCONDITION(pbRetVal != NULL);
+
+ END_PRESERVE_LAST_ERROR;
+
+ return pbRetVal;
+}
+
+#ifdef _DEBUG
+//
+// These are two functions for testing purposes only, in debug builds only. They can be used by setting
+// InjectFatalError to 3. They ensure that we really can restore the guard page for SEH try/catch clauses.
+//
+// @todo: Do we use this for anything anymore?
+//
+static void TestSEHGuardPageRestoreOverflow()
+{
+}
+
+static void TestSEHGuardPageRestore()
+{
+ PAL_TRY(void *, unused, NULL)
+ {
+ TestSEHGuardPageRestoreOverflow();
+ }
+ PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ {
+ _ASSERTE(!"Got first overflow.");
+ }
+ PAL_ENDTRY;
+
+ PAL_TRY(void *, unused, NULL)
+ {
+ TestSEHGuardPageRestoreOverflow();
+ }
+ PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ {
+ // If you get two asserts, then it works!
+ _ASSERTE(!"Got second overflow.");
+ }
+ PAL_ENDTRY;
+}
+#endif // _DEBUG
+
+// Separated out the body of PreStubWorker for the case where we don't have a frame.
+//
+// Note that pDispatchingMT may not actually be the MT that is indirected through.
+// If a virtual method is called non-virtually, pMT will be used to indirect through
+//
+// This returns a pointer to the stable entrypoint for the jitted method. Typically, this
+// is the same as the pointer to the top of the JITted code of the method. However, in
+// the case of methods that require stubs to be executed first (e.g., remoted methods
+// that require remoting stubs to be executed first), this stable entrypoint would be a
+// pointer to the stub, and not a pointer directly to the JITted code.
+PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
+{
+ CONTRACT(PCODE)
+ {
+ STANDARD_VM_CHECK;
+ POSTCONDITION(RETVAL != NULL);
+ }
+ CONTRACT_END;
+
+ Stub *pStub = NULL;
+ PCODE pCode = NULL;
+
+ Thread *pThread = GetThread();
+
+ MethodTable *pMT = GetMethodTable();
+
+ // Running a prestub on a method causes us to access its MethodTable
+ g_IBCLogger.LogMethodDescAccess(this);
+
+ // A secondary layer of defense against executing code in inspection-only assembly.
+ // This should already have been taken care of by not allowing inspection assemblies
+ // to be activated. However, this is a very inexpensive piece of insurance in the name
+ // of security.
+ if (IsIntrospectionOnly())
+ {
+ _ASSERTE(!"A ReflectionOnly assembly reached the prestub. This should not have happened.");
+ COMPlusThrow(kInvalidOperationException, IDS_EE_CODEEXECUTION_IN_INTROSPECTIVE_ASSEMBLY);
+ }
+
+ if (ContainsGenericVariables())
+ {
+ COMPlusThrow(kInvalidOperationException, IDS_EE_CODEEXECUTION_CONTAINSGENERICVAR);
+ }
+
+ /************************** DEBUG CHECKS *************************/
+ /*-----------------------------------------------------------------
+ // Halt if needed, GC stress, check the sharing count etc.
+ */
+
+#ifdef _DEBUG
+ static unsigned ctr = 0;
+ ctr++;
+
+ if (g_pConfig->ShouldPrestubHalt(this))
+ {
+ _ASSERTE(!"PreStubHalt");
+ }
+
+ LOG((LF_CLASSLOADER, LL_INFO10000, "In PreStubWorker for %s::%s\n",
+ m_pszDebugClassName, m_pszDebugMethodName));
+
+ // This is a nice place to test out having some fatal EE errors. We do this only in a checked build, and only
+ // under the InjectFatalError key.
+ if (g_pConfig->InjectFatalError() == 1)
+ {
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
+ }
+ else if (g_pConfig->InjectFatalError() == 2)
+ {
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_STACKOVERFLOW);
+ }
+ else if (g_pConfig->InjectFatalError() == 3)
+ {
+ TestSEHGuardPageRestore();
+ }
+
+ // Useful to test GC with the prestub on the call stack
+ if (g_pConfig->ShouldPrestubGC(this))
+ {
+ GCX_COOP();
+ GCHeap::GetGCHeap()->GarbageCollect(-1);
+ }
+#endif // _DEBUG
+
+ STRESS_LOG1(LF_CLASSLOADER, LL_INFO10000, "Prestubworker: method %pM\n", this);
+
+
+ GCStress<cfg_any, EeconfigFastGcSPolicy, CoopGcModePolicy>::MaybeTrigger();
+
+ // Are we in the prestub because of a rejit request? If so, let the ReJitManager
+ // take it from here.
+ pCode = ReJitManager::DoReJitIfNecessary(this);
+ if (pCode != NULL)
+ {
+ // A ReJIT was performed, so nothing left for DoPrestub() to do. Return now.
+ //
+ // The stable entrypoint will either be a pointer to the original JITted code
+ // (with a jmp at the top to jump to the newly-rejitted code) OR a pointer to any
+ // stub code that must be executed first (e.g., a remoting stub), which in turn
+ // will call the original JITted code (which then jmps to the newly-rejitted
+ // code).
+ RETURN GetStableEntryPoint();
+ }
+
+#ifdef FEATURE_PREJIT
+ // If this method is the root of a CER call graph and we've recorded this fact in the ngen image then we're in the prestub in
+ // order to trip any runtime level preparation needed for this graph (P/Invoke stub generation/library binding, generic
+ // dictionary prepopulation etc.).
+ GetModule()->RestoreCer(this);
+#endif // FEATURE_PREJIT
+
+#ifdef FEATURE_COMINTEROP
+ /************************** INTEROP *************************/
+ /*-----------------------------------------------------------------
+ // Some method descriptors are COMPLUS-to-COM call descriptors
+ // they are not your every day method descriptors, for example
+ // they don't have an IL or code.
+ */
+ if (IsComPlusCall() || IsGenericComPlusCall())
+ {
+ pCode = GetStubForInteropMethod(this);
+
+ GetPrecode()->SetTargetInterlocked(pCode);
+
+ RETURN GetStableEntryPoint();
+ }
+#endif // FEATURE_COMINTEROP
+
+ // workaround: This is to handle a punted work item dealing with a skipped module constructor
+ // due to appdomain unload. Basically shared code was JITted in domain A, and then
+ // this caused a link to another shared module with a module CCTOR, which was skipped
+ // or aborted in another appdomain we were trying to propagate the activation to.
+ //
+ // Note that this is not a fix, but that it just minimizes the window in which the
+ // issue can occur.
+ if (pThread->IsAbortRequested())
+ {
+ pThread->HandleThreadAbort();
+ }
+
+ /************************** CLASS CONSTRUCTOR ********************/
+ // Make sure .cctor has been run
+
+ if (IsClassConstructorTriggeredViaPrestub())
+ {
+ pMT->CheckRunClassInitThrowing();
+ }
+
+ /************************** BACKPATCHING *************************/
+ // See if the addr of code has changed from the pre-stub
+#ifdef FEATURE_INTERPRETER
+ if (!IsReallyPointingToPrestub())
+#else
+ if (!IsPointingToPrestub())
+#endif
+ {
+ LOG((LF_CLASSLOADER, LL_INFO10000,
+ " In PreStubWorker, method already jitted, backpatching call point\n"));
+
+ RETURN DoBackpatch(pMT, pDispatchingMT, TRUE);
+ }
+
+ // record if remoting needs to intercept this call
+ BOOL fRemotingIntercepted = IsRemotingInterceptedViaPrestub();
+
+ /************************** CODE CREATION *************************/
+ if (IsUnboxingStub())
+ {
+ pStub = MakeUnboxingStubWorker(this);
+ }
+#ifdef FEATURE_REMOTING
+ else if (pMT->IsInterface() && !IsStatic() && !IsFCall())
+ {
+ pCode = CRemotingServices::GetDispatchInterfaceHelper(this);
+ GetOrCreatePrecode();
+ }
+#endif // FEATURE_REMOTING
+#if defined(FEATURE_SHARE_GENERIC_CODE)
+ else if (IsInstantiatingStub())
+ {
+ pStub = MakeInstantiatingStubWorker(this);
+ }
+#endif // defined(FEATURE_SHARE_GENERIC_CODE)
+ else if (IsIL() || IsNoMetadata())
+ {
+ // remember if we need to backpatch the MethodTable slot
+ BOOL fBackpatch = !fRemotingIntercepted
+ && !IsEnCMethod();
+
+#ifdef FEATURE_PREJIT
+ //
+ // See if we have any prejitted code to use.
+ //
+
+ pCode = GetPreImplementedCode();
+
+#ifdef PROFILING_SUPPORTED
+ if (pCode != NULL)
+ {
+ BOOL fShouldSearchCache = TRUE;
+
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackCacheSearches());
+ g_profControlBlock.pProfInterface->
+ JITCachedFunctionSearchStarted((FunctionID) this,
+ &fShouldSearchCache);
+ END_PIN_PROFILER();
+ }
+
+ if (!fShouldSearchCache)
+ {
+#ifdef FEATURE_INTERPRETER
+ SetNativeCodeInterlocked(NULL, pCode, FALSE);
+#else
+ SetNativeCodeInterlocked(NULL, pCode);
+#endif
+ _ASSERTE(!IsPreImplemented());
+ pCode = NULL;
+ }
+ }
+#endif // PROFILING_SUPPORTED
+
+ if (pCode != NULL)
+ {
+ LOG((LF_ZAP, LL_INFO10000,
+ "ZAP: Using code" FMT_ADDR "for %s.%s sig=\"%s\" (token %x).\n",
+ DBG_ADDR(pCode),
+ m_pszDebugClassName,
+ m_pszDebugMethodName,
+ m_pszDebugMethodSignature,
+ GetMemberDef()));
+
+ TADDR pFixupList = GetFixupList();
+ if (pFixupList != NULL)
+ {
+ Module *pZapModule = GetZapModule();
+ _ASSERTE(pZapModule != NULL);
+ if (!pZapModule->FixupDelayList(pFixupList))
+ {
+ _ASSERTE(!"FixupDelayList failed");
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ }
+
+#ifdef HAVE_GCCOVER
+ if (GCStress<cfg_instr_ngen>::IsEnabled())
+ SetupGcCoverage(this, (BYTE*) pCode);
+#endif // HAVE_GCCOVER
+
+#ifdef PROFILING_SUPPORTED
+ /*
+ * This notifies the profiler that a search to find a
+ * cached jitted function has been made.
+ */
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackCacheSearches());
+ g_profControlBlock.pProfInterface->
+ JITCachedFunctionSearchFinished((FunctionID) this, COR_PRF_CACHED_FUNCTION_FOUND);
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+ }
+
+ //
+ // If not, try to jit it
+ //
+
+#endif // FEATURE_PREJIT
+
+#ifdef FEATURE_READYTORUN
+ if (pCode == NULL)
+ {
+ Module * pModule = GetModule();
+ if (pModule->IsReadyToRun())
+ pCode = pModule->GetReadyToRunInfo()->GetEntryPoint(this);
+ }
+#endif // FEATURE_READYTORUN
+
+ if (pCode == NULL)
+ {
+ NewHolder<COR_ILMETHOD_DECODER> pHeader(NULL);
+ // Get the information on the method
+ if (!IsNoMetadata())
+ {
+ COR_ILMETHOD* ilHeader = GetILHeader(TRUE);
+ if(ilHeader == NULL)
+ {
+#ifdef FEATURE_COMINTEROP
+ // Abstract methods can be called through WinRT derivation if the deriving type
+ // is not implemented in managed code, and calls through the CCW to the abstract
+ // method. Throw a sensible exception in that case.
+ if (pMT->IsExportedToWinRT() && IsAbstract())
+ {
+ COMPlusThrowHR(E_NOTIMPL);
+ }
+#endif // FEATURE_COMINTEROP
+
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_IL);
+ }
+
+ COR_ILMETHOD_DECODER::DecoderStatus status = COR_ILMETHOD_DECODER::FORMAT_ERROR;
+
+ {
+ // Decoder ctor can AV on a malformed method header
+ AVInRuntimeImplOkayHolder AVOkay;
+ pHeader = new COR_ILMETHOD_DECODER(ilHeader, GetMDImport(), &status);
+ if(pHeader == NULL)
+ status = COR_ILMETHOD_DECODER::FORMAT_ERROR;
+ }
+
+ if (status == COR_ILMETHOD_DECODER::VERIFICATION_ERROR &&
+ Security::CanSkipVerification(GetModule()->GetDomainAssembly()))
+ {
+ status = COR_ILMETHOD_DECODER::SUCCESS;
+ }
+
+ if (status != COR_ILMETHOD_DECODER::SUCCESS)
+ {
+ if (status == COR_ILMETHOD_DECODER::VERIFICATION_ERROR)
+ {
+ // Throw a verification HR
+ COMPlusThrowHR(COR_E_VERIFICATION);
+ }
+ else
+ {
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_IL);
+ }
+ }
+
+#ifdef _VER_EE_VERIFICATION_ENABLED
+ static ConfigDWORD peVerify;
+
+ if (peVerify.val(CLRConfig::EXTERNAL_PEVerify))
+ Verify(pHeader, TRUE, FALSE); // Throws a VerifierException if verification fails
+#endif // _VER_EE_VERIFICATION_ENABLED
+ } // end if (!IsNoMetadata())
+
+ // JIT it
+ LOG((LF_CLASSLOADER, LL_INFO1000000,
+ " In PreStubWorker, calling MakeJitWorker\n"));
+
+ // Create the precode eagerly if it is going to be needed later.
+ if (!fBackpatch)
+ {
+ GetOrCreatePrecode();
+ }
+
+ // Mark the code as hot in case the method ends up in the native image
+ g_IBCLogger.LogMethodCodeAccess(this);
+
+ pCode = MakeJitWorker(pHeader, 0, 0);
+
+#ifdef FEATURE_INTERPRETER
+ if ((pCode != NULL) && !HasStableEntryPoint())
+ {
+ // We don't yet have a stable entry point, so don't do backpatching yet.
+ // But we do have to handle some extra cases that occur in backpatching.
+ // (Perhaps I *should* get to the backpatching code, but in a mode where we know
+ // we're not dealing with the stable entry point...)
+ if (HasNativeCodeSlot())
+ {
+ // We called "SetNativeCodeInterlocked" in MakeJitWorker, which updated the native
+ // code slot, but I think we also want to update the regular slot...
+ PCODE tmpEntry = GetTemporaryEntryPoint();
+ PCODE pFound = FastInterlockCompareExchangePointer(GetAddrOfSlot(), pCode, tmpEntry);
+ // Doesn't matter if we failed -- if we did, it's because somebody else made progress.
+ if (pFound != tmpEntry) pCode = pFound;
+ }
+
+ // Now we handle the case of a FuncPtrPrecode.
+ FuncPtrStubs * pFuncPtrStubs = GetLoaderAllocator()->GetFuncPtrStubsNoCreate();
+ if (pFuncPtrStubs != NULL)
+ {
+ Precode* pFuncPtrPrecode = pFuncPtrStubs->Lookup(this);
+ if (pFuncPtrPrecode != NULL)
+ {
+ // If there is a funcptr precode to patch, attempt to patch it. If we lose, that's OK,
+ // somebody else made progress.
+ pFuncPtrPrecode->SetTargetInterlocked(pCode);
+ }
+ }
+ }
+#endif // FEATURE_INTERPRETER
+ } // end if (pCode == NULL)
+ } // end else if (IsIL() || IsNoMetadata())
+ else if (IsNDirect())
+ {
+ if (!GetModule()->GetSecurityDescriptor()->CanCallUnmanagedCode())
+ Security::ThrowSecurityException(g_SecurityPermissionClassName, SPFLAGSUNMANAGEDCODE);
+
+ pCode = GetStubForInteropMethod(this);
+ GetOrCreatePrecode();
+ }
+ else if (IsFCall())
+ {
+ // Get the fcall implementation
+ BOOL fSharedOrDynamicFCallImpl;
+ pCode = ECall::GetFCallImpl(this, &fSharedOrDynamicFCallImpl);
+
+ if (fSharedOrDynamicFCallImpl)
+ {
+ // Fake ctors share one implementation that has to be wrapped by prestub
+ GetOrCreatePrecode();
+ }
+ }
+ else if (IsArray())
+ {
+ pStub = GenerateArrayOpStub((ArrayMethodDesc*)this);
+ }
+ else if (IsEEImpl())
+ {
+ _ASSERTE(GetMethodTable()->IsDelegate());
+ pCode = COMDelegate::GetInvokeMethodStub((EEImplMethodDesc*)this);
+ GetOrCreatePrecode();
+ }
+ else
+ {
+ // This is a method type we don't handle yet
+ _ASSERTE(!"Unknown Method Type");
+ }
+
+ /************************** POSTJIT *************************/
+#ifndef FEATURE_INTERPRETER
+ _ASSERTE(pCode == NULL || GetNativeCode() == NULL || pCode == GetNativeCode());
+#else // FEATURE_INTERPRETER
+ // Interpreter adds a new possiblity == someone else beat us to installing an intepreter stub.
+ _ASSERTE(pCode == NULL || GetNativeCode() == NULL || pCode == GetNativeCode()
+ || Interpreter::InterpretationStubToMethodInfo(pCode) == this);
+#endif // FEATURE_INTERPRETER
+
+ // At this point we must have either a pointer to managed code or to a stub. All of the above code
+ // should have thrown an exception if it couldn't make a stub.
+ _ASSERTE((pStub != NULL) ^ (pCode != NULL));
+
+ /************************** SECURITY *************************/
+
+ // Lets check to see if we need declarative security on this stub, If we have
+ // security checks on this method or class then we need to add an intermediate
+ // stub that performs declarative checks prior to calling the real stub.
+ // record if security needs to intercept this call (also depends on whether we plan to use stubs for declarative security)
+
+#if !defined( HAS_REMOTING_PRECODE) && defined (FEATURE_REMOTING)
+ /************************** REMOTING *************************/
+
+ // check for MarshalByRef scenarios ... we need to intercept
+ // Non-virtual calls on MarshalByRef types
+ if (fRemotingIntercepted)
+ {
+ // let us setup a remoting stub to intercept all the calls
+ Stub *pRemotingStub = CRemotingServices::GetStubForNonVirtualMethod(this,
+ (pStub != NULL) ? (LPVOID)pStub->GetEntryPoint() : (LPVOID)pCode, pStub);
+
+ if (pRemotingStub != NULL)
+ {
+ pStub = pRemotingStub;
+ pCode = NULL;
+ }
+ }
+#endif // HAS_REMOTING_PRECODE
+
+ _ASSERTE((pStub != NULL) ^ (pCode != NULL));
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+ //
+ // We are seeing memory reordering race around fixups (see DDB 193514 and related bugs). We get into
+ // situation where the patched precode is visible by other threads, but the resolved fixups
+ // are not. IT SHOULD NEVER HAPPEN according to our current understanding of x86/x64 memory model.
+ // (see email thread attached to the bug for details).
+ //
+ // We suspect that there may be bug in the hardware or that hardware may have shortcuts that may be
+ // causing grief. We will try to avoid the race by executing an extra memory barrier.
+ //
+ MemoryBarrier();
+#endif
+
+ if (pCode != NULL)
+ {
+ if (HasPrecode())
+ GetPrecode()->SetTargetInterlocked(pCode);
+ else
+ if (!HasStableEntryPoint())
+ {
+ // Is the result an interpreter stub?
+#ifdef FEATURE_INTERPRETER
+ if (Interpreter::InterpretationStubToMethodInfo(pCode) == this)
+ {
+ SetEntryPointInterlocked(pCode);
+ }
+ else
+#endif // FEATURE_INTERPRETER
+ {
+ SetStableEntryPointInterlocked(pCode);
+ }
+ }
+ }
+ else
+ {
+ if (!GetOrCreatePrecode()->SetTargetInterlocked(pStub->GetEntryPoint()))
+ {
+ pStub->DecRef();
+ }
+ else
+ if (pStub->HasExternalEntryPoint())
+ {
+ // If the Stub wraps code that is outside of the Stub allocation, then we
+ // need to free the Stub allocation now.
+ pStub->DecRef();
+ }
+ }
+
+#ifdef FEATURE_INTERPRETER
+ _ASSERTE(!IsReallyPointingToPrestub());
+#else // FEATURE_INTERPRETER
+ _ASSERTE(!IsPointingToPrestub());
+ _ASSERTE(HasStableEntryPoint());
+#endif // FEATURE_INTERPRETER
+
+ RETURN DoBackpatch(pMT, pDispatchingMT, FALSE);
+}
+
+#endif // !DACCESS_COMPILE
+
+//==========================================================================
+// The following code manages the PreStub. All method stubs initially
+// use the prestub.
+//==========================================================================
+
+#ifdef _TARGET_X86_
+static PCODE g_UMThunkPreStub;
+#endif
+
+#ifndef DACCESS_COMPILE
+
+void ThePreStubManager::Init(void)
+{
+ STANDARD_VM_CONTRACT;
+
+ //
+ // Add the prestub manager
+ //
+
+ StubManager::AddStubManager(new ThePreStubManager());
+}
+
+//-----------------------------------------------------------
+// Initialize the prestub.
+//-----------------------------------------------------------
+void InitPreStubManager(void)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (NingenEnabled())
+ {
+ return;
+ }
+
+#ifdef _TARGET_X86_
+ g_UMThunkPreStub = GenerateUMThunkPrestub()->GetEntryPoint();
+#endif
+
+ ThePreStubManager::Init();
+}
+
+PCODE TheUMThunkPreStub()
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef _TARGET_X86_
+ return g_UMThunkPreStub;
+#else
+ return GetEEFuncEntryPoint(TheUMEntryPrestub);
+#endif
+}
+
+PCODE TheVarargNDirectStub(BOOL hasRetBuffArg)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#if defined(_TARGET_ARM_) || defined(_TARGET_AMD64_)
+ if (hasRetBuffArg)
+ {
+ return GetEEFuncEntryPoint(VarargPInvokeStub_RetBuffArg);
+ }
+ else
+#endif
+ {
+ return GetEEFuncEntryPoint(VarargPInvokeStub);
+ }
+}
+
+static PCODE PatchNonVirtualExternalMethod(MethodDesc * pMD, PCODE pCode, PTR_CORCOMPILE_IMPORT_SECTION pImportSection, TADDR pIndirection)
+{
+ STANDARD_VM_CONTRACT;
+
+ //
+ // Skip fixup precode jump for better perf. Since we have MethodDesc available, we can use cheaper method
+ // than code:Precode::TryToSkipFixupPrecode.
+ //
+#ifdef HAS_FIXUP_PRECODE
+ if (pMD->HasPrecode() && pMD->GetPrecode()->GetType() == PRECODE_FIXUP
+ && !pMD->IsEnCMethod()
+#ifndef HAS_REMOTING_PRECODE
+ && !pMD->IsRemotingInterceptedViaPrestub()
+#endif
+ )
+ {
+ PCODE pDirectTarget = pMD->IsFCall() ? ECall::GetFCallImpl(pMD) : pMD->GetNativeCode();
+ if (pDirectTarget != NULL)
+ pCode = pDirectTarget;
+ }
+#endif //HAS_FIXUP_PRECODE
+
+ if (pImportSection->Flags & CORCOMPILE_IMPORT_FLAGS_CODE)
+ {
+ CORCOMPILE_EXTERNAL_METHOD_THUNK * pThunk = (CORCOMPILE_EXTERNAL_METHOD_THUNK *)pIndirection;
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+ INT64 oldValue = *(INT64*)pThunk;
+ BYTE* pOldValue = (BYTE*)&oldValue;
+
+ if (pOldValue[0] == X86_INSTR_CALL_REL32)
+ {
+ INT64 newValue = oldValue;
+ BYTE* pNewValue = (BYTE*)&newValue;
+ pNewValue[0] = X86_INSTR_JMP_REL32;
+
+ *(INT32 *)(pNewValue+1) = rel32UsingJumpStub((INT32*)(&pThunk->callJmp[1]), pCode, pMD, NULL);
+
+ _ASSERTE(IS_ALIGNED((size_t)pThunk, sizeof(INT64)));
+ EnsureWritableExecutablePages(pThunk, sizeof(INT64));
+ FastInterlockCompareExchangeLong((INT64*)pThunk, newValue, oldValue);
+
+ FlushInstructionCache(GetCurrentProcess(), pThunk, 8);
+ }
+#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+ // Patchup the thunk to point to the actual implementation of the cross module external method
+ EnsureWritableExecutablePages(&pThunk->m_pTarget);
+ pThunk->m_pTarget = pCode;
+
+ #if defined(_TARGET_ARM_)
+ // ThumbBit must be set on the target address
+ _ASSERTE(pCode & THUMB_CODE);
+ #endif
+#else
+ PORTABILITY_ASSERT("ExternalMethodFixupWorker");
+#endif
+ }
+ else
+ {
+ *EnsureWritableExecutablePages((TADDR *)pIndirection) = pCode;
+ }
+
+ return pCode;
+}
+
+//==========================================================================================
+// In NGen images calls to external methods start out pointing to jump thunks.
+// These jump thunks initially point to the assembly code _ExternalMethodFixupStub
+// It transfers control to ExternalMethodFixupWorker which will patch the jump
+// thunk to point to the actual cross module address for the method body
+// Some methods also have one-time prestubs we defer the patching until
+// we have the final stable method entry point.
+//
+EXTERN_C PCODE STDCALL ExternalMethodFixupWorker(TransitionBlock * pTransitionBlock, TADDR pIndirection, DWORD sectionIndex, Module * pModule)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_ENTRY_POINT;
+
+ // We must save (and restore) the Last Error code before we call anything
+ // that could overwrite it. Any callsite that leads to TlsGetValue will
+ // potentially overwrite the Last Error code.
+
+ //
+ // In Dev10 bug 837293 we were overwriting the Last Error code on the first
+ // call to a PInvoke method. This occurred when we were running a
+ // (precompiled) PInvoke IL stub implemented in the ngen image.
+ //
+ // In this IL stub implementation we call the native method kernel32!GetFileAttributes,
+ // and then we immediately try to save the Last Error code by calling the
+ // mscorlib method System.StubHelpers.StubHelpers.SetLastError().
+ //
+ // However when we are coming from a precompiled IL Stub in an ngen image
+ // we must use an ExternalMethodFixup to find the target address of
+ // System.StubHelpers.StubHelpers.SetLastError() and this was overwriting
+ // the value of the Last Error before it could be retrieved and saved.
+ //
+
+ PCODE pCode = NULL;
+
+ BEGIN_PRESERVE_LAST_ERROR;
+
+ MAKE_CURRENT_THREAD_AVAILABLE();
+
+ FrameWithCookie<ExternalMethodFrame> frame(pTransitionBlock);
+ ExternalMethodFrame * pEMFrame = &frame;
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+ // Decode indirection cell from callsite if it is not present
+ if (pIndirection == NULL)
+ {
+ // Asssume that the callsite is call [xxxxxxxx]
+ PCODE retAddr = pEMFrame->GetReturnAddress();
+#ifdef _TARGET_X86_
+ pIndirection = *(((TADDR *)retAddr) - 1);
+#else
+ pIndirection = *(((INT32 *)retAddr) - 1) + retAddr;
+#endif
+ }
+#endif
+
+ // FUTURE: Consider always passing in module and section index to avoid the lookups
+ if (pModule == NULL)
+ {
+ pModule = ExecutionManager::FindZapModule(pIndirection);
+ sectionIndex = (DWORD)-1;
+ }
+ _ASSERTE(pModule != NULL);
+
+ pEMFrame->SetCallSite(pModule, pIndirection);
+
+ pEMFrame->Push(CURRENT_THREAD); // Push the new ExternalMethodFrame onto the frame stack
+
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+ bool fVirtual = false;
+ MethodDesc * pMD = NULL;
+ MethodTable * pMT = NULL;
+ DWORD slot = 0;
+
+ {
+ GCX_PREEMP_THREAD_EXISTS(CURRENT_THREAD);
+
+ PEImageLayout *pNativeImage = pModule->GetNativeOrReadyToRunImage();
+
+ RVA rva = pNativeImage->GetDataRva(pIndirection);
+
+ PTR_CORCOMPILE_IMPORT_SECTION pImportSection;
+ if (sectionIndex != (DWORD)-1)
+ {
+ pImportSection = pModule->GetImportSectionFromIndex(sectionIndex);
+ _ASSERTE(pImportSection == pModule->GetImportSectionForRVA(rva));
+ }
+ else
+ {
+ pImportSection = pModule->GetImportSectionForRVA(rva);
+ }
+ _ASSERTE(pImportSection != NULL);
+
+ COUNT_T index;
+ if (pImportSection->Flags & CORCOMPILE_IMPORT_FLAGS_CODE)
+ {
+ _ASSERTE(pImportSection->EntrySize == sizeof(CORCOMPILE_EXTERNAL_METHOD_THUNK));
+ index = (rva - pImportSection->Section.VirtualAddress) / sizeof(CORCOMPILE_EXTERNAL_METHOD_THUNK);
+ }
+ else
+ {
+ _ASSERTE(pImportSection->EntrySize == sizeof(TADDR));
+ index = (rva - pImportSection->Section.VirtualAddress) / sizeof(TADDR);
+ }
+
+ PTR_DWORD pSignatures = dac_cast<PTR_DWORD>(pNativeImage->GetRvaData(pImportSection->Signatures));
+
+ PCCOR_SIGNATURE pBlob = (BYTE *)pNativeImage->GetRvaData(pSignatures[index]);
+
+ BYTE kind = *pBlob++;
+
+ Module * pInfoModule = pModule;
+ if (kind & ENCODE_MODULE_OVERRIDE)
+ {
+ DWORD moduleIndex = CorSigUncompressData(pBlob);
+ pInfoModule = pModule->GetModuleFromIndex(moduleIndex);
+ kind &= ~ENCODE_MODULE_OVERRIDE;
+ }
+
+ TypeHandle th;
+ switch (kind)
+ {
+ case ENCODE_METHOD_ENTRY:
+ {
+ pMD = ZapSig::DecodeMethod(pModule,
+ pInfoModule,
+ pBlob);
+
+ if (pModule->IsReadyToRun())
+ {
+ // We do not emit activation fixups for version resilient references. Activate the target explicitly.
+ pMD->EnsureActive();
+ }
+
+ break;
+ }
+
+ case ENCODE_METHOD_ENTRY_DEF_TOKEN:
+ {
+ mdToken MethodDef = TokenFromRid(CorSigUncompressData(pBlob), mdtMethodDef);
+ pMD = MemberLoader::GetMethodDescFromMethodDef(pInfoModule, MethodDef, FALSE);
+
+ pMD->PrepareForUseAsADependencyOfANativeImage();
+
+ if (pModule->IsReadyToRun())
+ {
+ // We do not emit activation fixups for version resilient references. Activate the target explicitly.
+ pMD->EnsureActive();
+ }
+
+ break;
+ }
+
+ case ENCODE_METHOD_ENTRY_REF_TOKEN:
+ {
+ SigTypeContext typeContext;
+ mdToken MemberRef = TokenFromRid(CorSigUncompressData(pBlob), mdtMemberRef);
+ FieldDesc * pFD = NULL;
+
+ MemberLoader::GetDescFromMemberRef(pInfoModule, MemberRef, &pMD, &pFD, &typeContext, FALSE /* strict metadata checks */, &th);
+ _ASSERTE(pMD != NULL);
+
+ pMD->PrepareForUseAsADependencyOfANativeImage();
+
+ if (pModule->IsReadyToRun())
+ {
+ // We do not emit activation fixups for version resilient references. Activate the target explicitly.
+ pMD->EnsureActive();
+ }
+ else
+ {
+#ifdef FEATURE_WINMD_RESILIENT
+ // We do not emit activation fixups for version resilient references. Activate the target explicitly.
+ pMD->EnsureActive();
+#endif
+ }
+
+ break;
+ }
+
+ case ENCODE_VIRTUAL_ENTRY:
+ {
+ pMD = ZapSig::DecodeMethod(pModule, pInfoModule, pBlob, &th);
+
+ VirtualEntry:
+ pMD->PrepareForUseAsADependencyOfANativeImage();
+
+ if (pMD->IsVtableMethod())
+ {
+ slot = pMD->GetSlot();
+ pMT = th.IsNull() ? pMD->GetMethodTable() : th.GetMethodTable();
+
+ fVirtual = true;
+ }
+ else
+ if (pModule->IsReadyToRun())
+ {
+ // We do not emit activation fixups for version resilient references. Activate the target explicitly.
+ pMD->EnsureActive();
+ }
+ break;
+ }
+
+ case ENCODE_VIRTUAL_ENTRY_DEF_TOKEN:
+ {
+ mdToken MethodDef = TokenFromRid(CorSigUncompressData(pBlob), mdtMethodDef);
+ pMD = MemberLoader::GetMethodDescFromMethodDef(pInfoModule, MethodDef, FALSE);
+
+ goto VirtualEntry;
+ }
+
+ case ENCODE_VIRTUAL_ENTRY_REF_TOKEN:
+ {
+ mdToken MemberRef = TokenFromRid(CorSigUncompressData(pBlob), mdtMemberRef);
+
+ FieldDesc * pFD = NULL;
+
+ SigTypeContext typeContext;
+ MemberLoader::GetDescFromMemberRef(pInfoModule, MemberRef, &pMD, &pFD, &typeContext, FALSE /* strict metadata checks */, &th, TRUE /* actual type required */);
+ _ASSERTE(pMD != NULL);
+
+ goto VirtualEntry;
+ }
+
+ case ENCODE_VIRTUAL_ENTRY_SLOT:
+ {
+ slot = CorSigUncompressData(pBlob);
+ pMT = ZapSig::DecodeType(pModule, pInfoModule, pBlob).GetMethodTable();
+
+ fVirtual = true;
+ break;
+ }
+
+ default:
+ _ASSERTE(!"Unexpected CORCOMPILE_FIXUP_BLOB_KIND");
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+
+ if (fVirtual)
+ {
+ GCX_COOP_THREAD_EXISTS(CURRENT_THREAD);
+
+ // Get the stub manager for this module
+ VirtualCallStubManager *pMgr = pModule->GetLoaderAllocator()->GetVirtualCallStubManager();
+
+ DispatchToken token;
+ if (pMT->IsInterface())
+ token = pMT->GetLoaderAllocator()->GetDispatchToken(pMT->GetTypeID(), slot);
+ else
+ token = DispatchToken::CreateDispatchToken(slot);
+
+ OBJECTREF pObj = pEMFrame->GetThis();
+ if (pObj == NULL) {
+ COMPlusThrow(kNullReferenceException);
+ }
+
+ StubCallSite callSite(pIndirection, pEMFrame->GetReturnAddress());
+ pCode = pMgr->ResolveWorker(&callSite, pObj, token, VirtualCallStubManager::SK_LOOKUP);
+ _ASSERTE(pCode != NULL);
+ }
+ else
+ {
+ _ASSERTE(pMD != NULL);
+
+ {
+ // Switch to cooperative mode to avoid racing with GC stackwalk
+ GCX_COOP_THREAD_EXISTS(CURRENT_THREAD);
+ pEMFrame->SetFunction(pMD);
+ }
+
+ pCode = pMD->GetMethodEntryPoint();
+
+ //
+ // Note that we do not want to call code:MethodDesc::IsPointingToPrestub() here. It does not take remoting interception
+ // into account and so it would cause otherwise intercepted methods to be JITed. It is a compat issue if the JITing fails.
+ //
+ if (DoesSlotCallPrestub(pCode))
+ {
+ ETWOnStartup(PrestubWorker_V1, PrestubWorkerEnd_V1);
+ pCode = pMD->DoPrestub(NULL);
+ }
+
+ pCode = PatchNonVirtualExternalMethod(pMD, pCode, pImportSection, pIndirection);
+ }
+ }
+
+ // Force a GC on every jit if the stress level is high enough
+ GCStress<cfg_any>::MaybeTrigger();
+
+ // Ready to return
+
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+ pEMFrame->Pop(CURRENT_THREAD); // Pop the ExternalMethodFrame from the frame stack
+
+ END_PRESERVE_LAST_ERROR;
+
+ return pCode;
+}
+
+
+#if !defined(_TARGET_X86_) && !defined(_TARGET_AMD64_)
+
+//==========================================================================================
+// In NGen image, virtual slots inherited from cross-module dependencies point to jump thunks.
+// These jump thunk initially point to VirtualMethodFixupStub which transfers control here.
+// This method 'VirtualMethodFixupWorker' will patch the jump thunk to point to the actual
+// inherited method body after we have execute the precode and a stable entry point.
+//
+EXTERN_C PCODE VirtualMethodFixupWorker(Object * pThisPtr, CORCOMPILE_VIRTUAL_IMPORT_THUNK *pThunk)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pThisPtr != NULL);
+ VALIDATEOBJECT(pThisPtr);
+
+ MethodTable * pMT = pThisPtr->GetTrueMethodTable();
+
+ WORD slotNumber = pThunk->slotNum;
+ _ASSERTE(slotNumber != (WORD)-1);
+
+ PCODE pCode = pMT->GetRestoredSlot(slotNumber);
+
+ if (!DoesSlotCallPrestub(pCode))
+ {
+ // Skip fixup precode jump for better perf
+ PCODE pDirectTarget = Precode::TryToSkipFixupPrecode(pCode);
+ if (pDirectTarget != NULL)
+ pCode = pDirectTarget;
+
+ // Patch the thunk to the actual method body
+ if (EnsureWritableExecutablePagesNoThrow(&pThunk->m_pTarget, sizeof(pThunk->m_pTarget)))
+ pThunk->m_pTarget = pCode;
+ }
+#if defined(_TARGET_ARM_)
+ // The target address should have the thumb bit set
+ _ASSERTE(pCode & THUMB_CODE);
+#endif
+ return pCode;
+}
+#endif // !defined(_TARGET_X86_) && !defined(_TARGET_AMD64_)
+
+#ifdef FEATURE_READYTORUN
+
+static PCODE getHelperForInitializedStatic(Module * pModule, CORCOMPILE_FIXUP_BLOB_KIND kind, MethodTable * pMT, FieldDesc * pFD)
+{
+ STANDARD_VM_CONTRACT;
+
+ PCODE pHelper = NULL;
+
+ switch (kind)
+ {
+ case ENCODE_STATIC_BASE_NONGC_HELPER:
+ {
+ PVOID baseNonGC;
+ {
+ GCX_COOP();
+ baseNonGC = pMT->GetNonGCStaticsBasePointer();
+ }
+ pHelper = DynamicHelpers::CreateReturnConst(pModule->GetLoaderAllocator(), (TADDR)baseNonGC);
+ }
+ break;
+ case ENCODE_STATIC_BASE_GC_HELPER:
+ {
+ PVOID baseGC;
+ {
+ GCX_COOP();
+ baseGC = pMT->GetGCStaticsBasePointer();
+ }
+ pHelper = DynamicHelpers::CreateReturnConst(pModule->GetLoaderAllocator(), (TADDR)baseGC);
+ }
+ break;
+ case ENCODE_CCTOR_TRIGGER:
+ pHelper = DynamicHelpers::CreateReturn(pModule->GetLoaderAllocator());
+ break;
+ case ENCODE_FIELD_ADDRESS:
+ {
+ _ASSERTE(pFD->IsStatic());
+
+ PTR_VOID pAddress;
+
+ {
+ GCX_COOP();
+
+ PTR_BYTE base = 0;
+ if (!pFD->IsRVA()) // for RVA the base is ignored
+ base = pFD->GetBase();
+ pAddress = pFD->GetStaticAddressHandle((void *)dac_cast<TADDR>(base));
+ }
+
+ // The following code assumes that the statics are pinned that is not the case for collectible types
+ _ASSERTE(!pFD->GetEnclosingMethodTable()->Collectible());
+
+ // Unbox valuetype fields
+ if (pFD->GetFieldType() == ELEMENT_TYPE_VALUETYPE && !pFD->IsRVA())
+ pHelper = DynamicHelpers::CreateReturnIndirConst(pModule->GetLoaderAllocator(), (TADDR)pAddress, (INT8)Object::GetOffsetOfFirstField());
+ else
+ pHelper = DynamicHelpers::CreateReturnConst(pModule->GetLoaderAllocator(), (TADDR)pAddress);
+ }
+ break;
+ default:
+ _ASSERTE(!"Unexpected statics CORCOMPILE_FIXUP_BLOB_KIND");
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+
+ return pHelper;
+}
+
+static PCODE getHelperForSharedStatic(Module * pModule, CORCOMPILE_FIXUP_BLOB_KIND kind, MethodTable * pMT, FieldDesc * pFD)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(kind == ENCODE_FIELD_ADDRESS);
+
+ CorInfoHelpFunc helpFunc = CEEInfo::getSharedStaticsHelper(pFD, pMT);
+
+ TADDR moduleID = pMT->GetModuleForStatics()->GetModuleID();
+
+ TADDR classID = 0;
+ if (helpFunc != CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR && helpFunc != CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR)
+ {
+ if (pMT->IsDynamicStatics())
+ {
+ classID = pMT->GetModuleDynamicEntryID();
+ }
+ else
+ {
+ classID = pMT->GetClassIndex();
+ }
+ }
+
+ bool fUnbox = (pFD->GetFieldType() == ELEMENT_TYPE_VALUETYPE);
+
+ AllocMemTracker amTracker;
+
+ StaticFieldAddressArgs * pArgs = (StaticFieldAddressArgs *)amTracker.Track(
+ pModule->GetLoaderAllocator()->GetHighFrequencyHeap()->
+ AllocMem(S_SIZE_T(sizeof(StaticFieldAddressArgs))));
+
+ pArgs->staticBaseHelper = (FnStaticBaseHelper)CEEJitInfo::getHelperFtnStatic((CorInfoHelpFunc)helpFunc);
+ pArgs->arg0 = moduleID;
+ pArgs->arg1 = classID;
+ pArgs->offset = pFD->GetOffset();
+
+ PCODE pHelper = DynamicHelpers::CreateHelper(pModule->GetLoaderAllocator(), (TADDR)pArgs,
+ fUnbox ? GetEEFuncEntryPoint(JIT_StaticFieldAddressUnbox_Dynamic) : GetEEFuncEntryPoint(JIT_StaticFieldAddress_Dynamic));
+
+ amTracker.SuppressRelease();
+
+ return pHelper;
+}
+
+static PCODE getHelperForStaticBase(Module * pModule, CORCOMPILE_FIXUP_BLOB_KIND kind, MethodTable * pMT)
+{
+ STANDARD_VM_CONTRACT;
+
+ int helpFunc = CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE;
+
+ if (kind == ENCODE_STATIC_BASE_GC_HELPER || kind == ENCODE_THREAD_STATIC_BASE_GC_HELPER)
+ {
+ helpFunc = CORINFO_HELP_GETSHARED_GCSTATIC_BASE;
+ }
+
+ if (pMT->IsDynamicStatics())
+ {
+ const int delta = CORINFO_HELP_GETSHARED_GCSTATIC_BASE_DYNAMICCLASS - CORINFO_HELP_GETSHARED_GCSTATIC_BASE;
+ helpFunc += delta;
+ }
+ else
+ if (!pMT->HasClassConstructor() && !pMT->HasBoxedRegularStatics())
+ {
+ const int delta = CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR - CORINFO_HELP_GETSHARED_GCSTATIC_BASE;
+ helpFunc += delta;
+ }
+
+ if (kind == ENCODE_THREAD_STATIC_BASE_NONGC_HELPER || kind == ENCODE_THREAD_STATIC_BASE_GC_HELPER)
+ {
+ const int delta = CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE - CORINFO_HELP_GETSHARED_GCSTATIC_BASE;
+ helpFunc += delta;
+ }
+
+ PCODE pHelper;
+ if (helpFunc == CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR || helpFunc == CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR)
+ {
+ pHelper = DynamicHelpers::CreateHelper(pModule->GetLoaderAllocator(), pMT->GetModule()->GetModuleID(), CEEJitInfo::getHelperFtnStatic((CorInfoHelpFunc)helpFunc));
+ }
+ else
+ {
+ TADDR moduleID = pMT->GetModuleForStatics()->GetModuleID();
+
+ TADDR classID;
+ if (pMT->IsDynamicStatics())
+ {
+ classID = pMT->GetModuleDynamicEntryID();
+ }
+ else
+ {
+ classID = pMT->GetClassIndex();
+ }
+
+ pHelper = DynamicHelpers::CreateHelper(pModule->GetLoaderAllocator(), moduleID, classID, CEEJitInfo::getHelperFtnStatic((CorInfoHelpFunc)helpFunc));
+ }
+
+ return pHelper;
+}
+
+PCODE DynamicHelperFixup(TransitionBlock * pTransitionBlock, TADDR * pCell, DWORD sectionIndex, Module * pModule, CORCOMPILE_FIXUP_BLOB_KIND * pKind, TypeHandle * pTH, MethodDesc ** ppMD, FieldDesc ** ppFD)
+{
+ STANDARD_VM_CONTRACT;
+
+ PEImageLayout *pNativeImage = pModule->GetNativeOrReadyToRunImage();
+
+ RVA rva = pNativeImage->GetDataRva((TADDR)pCell);
+
+ PTR_CORCOMPILE_IMPORT_SECTION pImportSection = pModule->GetImportSectionFromIndex(sectionIndex);
+ _ASSERTE(pImportSection == pModule->GetImportSectionForRVA(rva));
+
+ _ASSERTE(pImportSection->EntrySize == sizeof(TADDR));
+
+ COUNT_T index = (rva - pImportSection->Section.VirtualAddress) / sizeof(TADDR);
+
+ PTR_DWORD pSignatures = dac_cast<PTR_DWORD>(pNativeImage->GetRvaData(pImportSection->Signatures));
+
+ PCCOR_SIGNATURE pBlob = (BYTE *)pNativeImage->GetRvaData(pSignatures[index]);
+
+ BYTE kind = *pBlob++;
+
+ Module * pInfoModule = pModule;
+ if (kind & ENCODE_MODULE_OVERRIDE)
+ {
+ DWORD moduleIndex = CorSigUncompressData(pBlob);
+ pInfoModule = pModule->GetModuleFromIndex(moduleIndex);
+ kind &= ~ENCODE_MODULE_OVERRIDE;
+ }
+
+ bool fReliable = false;
+ TypeHandle th;
+ MethodDesc * pMD = NULL;
+ FieldDesc * pFD = NULL;
+
+ switch (kind)
+ {
+ case ENCODE_NEW_HELPER:
+ th = ZapSig::DecodeType(pModule, pInfoModule, pBlob);
+ th.AsMethodTable()->EnsureInstanceActive();
+ break;
+ case ENCODE_ISINSTANCEOF_HELPER:
+ case ENCODE_CHKCAST_HELPER:
+ fReliable = true;
+ case ENCODE_NEW_ARRAY_HELPER:
+ th = ZapSig::DecodeType(pModule, pInfoModule, pBlob);
+ break;
+ case ENCODE_THREAD_STATIC_BASE_NONGC_HELPER:
+ case ENCODE_THREAD_STATIC_BASE_GC_HELPER:
+ case ENCODE_STATIC_BASE_NONGC_HELPER:
+ case ENCODE_STATIC_BASE_GC_HELPER:
+ case ENCODE_CCTOR_TRIGGER:
+ th = ZapSig::DecodeType(pModule, pInfoModule, pBlob);
+ Statics:
+ th.AsMethodTable()->EnsureInstanceActive();
+ th.AsMethodTable()->CheckRunClassInitThrowing();
+ fReliable = true;
+ break;
+ case ENCODE_FIELD_ADDRESS:
+ pFD = ZapSig::DecodeField(pModule, pInfoModule, pBlob, &th);
+ _ASSERTE(pFD->IsStatic());
+ goto Statics;
+ case ENCODE_VIRTUAL_ENTRY:
+ // case ENCODE_VIRTUAL_ENTRY_DEF_TOKEN:
+ // case ENCODE_VIRTUAL_ENTRY_REF_TOKEN:
+ // case ENCODE_VIRTUAL_ENTRY_SLOT:
+ fReliable = true;
+ case ENCODE_DELEGATE_CTOR:
+ pMD = ZapSig::DecodeMethod(pModule, pInfoModule, pBlob, &th);
+ pMD->EnsureActive();
+ break;
+ default:
+ _ASSERTE(!"Unexpected CORCOMPILE_FIXUP_BLOB_KIND");
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+
+ PCODE pHelper = NULL;
+
+ if (fReliable)
+ {
+ // For reliable helpers, exceptions in creating the optimized helper are non-fatal. Swallow them to make CER work well.
+ EX_TRY
+ {
+ switch (kind)
+ {
+ case ENCODE_ISINSTANCEOF_HELPER:
+ case ENCODE_CHKCAST_HELPER:
+ {
+ bool fClassMustBeRestored;
+ CorInfoHelpFunc helpFunc = CEEInfo::getCastingHelperStatic(th, /* throwing */ (kind == ENCODE_CHKCAST_HELPER), &fClassMustBeRestored);
+ pHelper = DynamicHelpers::CreateHelperArgMove(pModule->GetLoaderAllocator(), th.AsTAddr(), CEEJitInfo::getHelperFtnStatic(helpFunc));
+ }
+ break;
+ case ENCODE_THREAD_STATIC_BASE_NONGC_HELPER:
+ case ENCODE_THREAD_STATIC_BASE_GC_HELPER:
+ case ENCODE_STATIC_BASE_NONGC_HELPER:
+ case ENCODE_STATIC_BASE_GC_HELPER:
+ case ENCODE_CCTOR_TRIGGER:
+ case ENCODE_FIELD_ADDRESS:
+ {
+ MethodTable * pMT = th.AsMethodTable();
+
+ bool fNeedsNonTrivialHelper = false;
+
+ if (pMT->IsDomainNeutral() && !IsSingleAppDomain())
+ {
+ fNeedsNonTrivialHelper = true;
+ }
+ else
+ if (pMT->Collectible() && (kind != ENCODE_CCTOR_TRIGGER))
+ {
+ // Collectible statics are not pinned - the fast getters expect statics to be pinned
+ fNeedsNonTrivialHelper = true;
+ }
+ else
+ {
+ if (pFD != NULL)
+ {
+ fNeedsNonTrivialHelper = !!pFD->IsSpecialStatic();
+ }
+ else
+ {
+ fNeedsNonTrivialHelper = (kind == ENCODE_THREAD_STATIC_BASE_NONGC_HELPER) || (kind == ENCODE_THREAD_STATIC_BASE_GC_HELPER);
+ }
+ }
+
+ if (fNeedsNonTrivialHelper)
+ {
+ if (pFD != NULL)
+ {
+ if (pFD->IsRVA() || pFD->IsContextStatic())
+ {
+ _ASSERTE(!"Fast getter for rare kinds of static fields");
+ }
+ else
+ {
+ pHelper = getHelperForSharedStatic(pModule, (CORCOMPILE_FIXUP_BLOB_KIND)kind, pMT, pFD);
+ }
+ }
+ else
+ {
+ pHelper = getHelperForStaticBase(pModule, (CORCOMPILE_FIXUP_BLOB_KIND)kind, pMT);
+ }
+ }
+ else
+ {
+ // Delay the creation of the helper until the type is initialized
+ if (pMT->IsClassInited())
+ pHelper = getHelperForInitializedStatic(pModule, (CORCOMPILE_FIXUP_BLOB_KIND)kind, pMT, pFD);
+ }
+ }
+ break;
+
+ case ENCODE_VIRTUAL_ENTRY:
+ // case ENCODE_VIRTUAL_ENTRY_DEF_TOKEN:
+ // case ENCODE_VIRTUAL_ENTRY_REF_TOKEN:
+ // case ENCODE_VIRTUAL_ENTRY_SLOT:
+ {
+ if (!pMD->IsVtableMethod())
+ {
+ pHelper = DynamicHelpers::CreateReturnConst(pModule->GetLoaderAllocator(), pMD->GetMultiCallableAddrOfCode());
+ }
+ else
+ {
+ AllocMemTracker amTracker;
+
+ VirtualFunctionPointerArgs * pArgs = (VirtualFunctionPointerArgs *)amTracker.Track(
+ pModule->GetLoaderAllocator()->GetHighFrequencyHeap()->
+ AllocMem(S_SIZE_T(sizeof(VirtualFunctionPointerArgs))));
+
+ pArgs->classHnd = (CORINFO_CLASS_HANDLE)th.AsPtr();
+ pArgs->methodHnd = (CORINFO_METHOD_HANDLE)pMD;
+
+ pHelper = DynamicHelpers::CreateHelperWithArg(pModule->GetLoaderAllocator(), (TADDR)pArgs,
+ GetEEFuncEntryPoint(JIT_VirtualFunctionPointer_Dynamic));
+
+ amTracker.SuppressRelease();
+ }
+ }
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+
+ if (pHelper != NULL)
+ {
+ *EnsureWritableExecutablePages((TADDR *)pCell) = pHelper;
+ }
+
+#ifdef _DEBUG
+ // Always execute the reliable fallback in debug builds
+ pHelper = NULL;
+#endif
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH (SwallowAllExceptions);
+ }
+ else
+ {
+ switch (kind)
+ {
+ case ENCODE_NEW_HELPER:
+ {
+ CorInfoHelpFunc helpFunc = CEEInfo::getNewHelperStatic(th.AsMethodTable());
+ pHelper = DynamicHelpers::CreateHelper(pModule->GetLoaderAllocator(), th.AsTAddr(), CEEJitInfo::getHelperFtnStatic(helpFunc));
+ }
+ break;
+ case ENCODE_NEW_ARRAY_HELPER:
+ {
+ CorInfoHelpFunc helpFunc = CEEInfo::getNewArrHelperStatic(th);
+ pHelper = DynamicHelpers::CreateHelperArgMove(pModule->GetLoaderAllocator(), th.AsTAddr(), CEEJitInfo::getHelperFtnStatic(helpFunc));
+ }
+ break;
+
+ case ENCODE_DELEGATE_CTOR:
+ {
+ MethodTable * pDelegateType = NULL;
+
+ {
+ GCX_COOP();
+
+ TADDR pArgument = (TADDR)pTransitionBlock + TransitionBlock::GetOffsetOfArgumentRegisters();
+#ifdef _TARGET_X86_
+ // x86 is special as always
+ pArgument += offsetof(ArgumentRegisters, ECX);
+#endif
+
+ if (pArgument != NULL)
+ {
+ pDelegateType = (*(Object **)pArgument)->GetMethodTable();
+ _ASSERTE(pDelegateType->IsDelegate());
+ }
+ }
+
+ DelegateCtorArgs ctorData;
+ ctorData.pMethod = NULL;
+ ctorData.pArg3 = NULL;
+ ctorData.pArg4 = NULL;
+ ctorData.pArg5 = NULL;
+
+ MethodDesc * pDelegateCtor = NULL;
+
+ if (pDelegateType != NULL)
+ {
+ pDelegateCtor = COMDelegate::GetDelegateCtor(TypeHandle(pDelegateType), pMD, &ctorData);
+
+ if (ctorData.pArg4 != NULL || ctorData.pArg5 != NULL)
+ {
+ // This should never happen - we should never get collectible or secure delegates here
+ _ASSERTE(false);
+ pDelegateCtor = NULL;
+ }
+ }
+
+ TADDR target = NULL;
+
+ if (pDelegateCtor != NULL)
+ {
+ target = pDelegateCtor->GetMultiCallableAddrOfCode();
+ }
+ else
+ {
+ target = ECall::GetFCallImpl(MscorlibBinder::GetMethod(METHOD__DELEGATE__CONSTRUCT_DELEGATE));
+ ctorData.pArg3 = NULL;
+ }
+
+ if (ctorData.pArg3 != NULL)
+ {
+ pHelper = DynamicHelpers::CreateHelperWithTwoArgs(pModule->GetLoaderAllocator(), pMD->GetMultiCallableAddrOfCode(), (TADDR)ctorData.pArg3, target);
+ }
+ else
+ {
+ pHelper = DynamicHelpers::CreateHelperWithTwoArgs(pModule->GetLoaderAllocator(), pMD->GetMultiCallableAddrOfCode(), target);
+ }
+ }
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+
+ if (pHelper != NULL)
+ {
+ *EnsureWritableExecutablePages((TADDR *)pCell) = pHelper;
+ }
+ }
+
+ *pKind = (CORCOMPILE_FIXUP_BLOB_KIND)kind;
+ *pTH = th;
+ *ppMD = pMD;
+ *ppFD = pFD;
+
+ return pHelper;
+}
+
+extern "C" SIZE_T STDCALL DynamicHelperWorker(TransitionBlock * pTransitionBlock, TADDR * pCell, DWORD sectionIndex, Module * pModule, INT frameFlags)
+{
+ PCODE pHelper = NULL;
+ SIZE_T result = NULL;
+
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ MAKE_CURRENT_THREAD_AVAILABLE();
+
+#ifdef _DEBUG
+ Thread::ObjectRefFlush(CURRENT_THREAD);
+#endif
+
+ FrameWithCookie<DynamicHelperFrame> frame(pTransitionBlock, frameFlags);
+ DynamicHelperFrame * pFrame = &frame;
+
+ pFrame->Push(CURRENT_THREAD);
+
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+ // Decode indirection cell from callsite if it is not present
+ if (pCell == NULL)
+ {
+ // Asssume that the callsite is call [xxxxxxxx]
+ PCODE retAddr = pFrame->GetReturnAddress();
+#ifdef _TARGET_X86_
+ pCell = *(((TADDR **)retAddr) - 1);
+#else
+ pCell = (TADDR *)(*(((INT32 *)retAddr) - 1) + retAddr);
+#endif
+ }
+#endif
+ _ASSERTE(pCell != NULL);
+
+ TypeHandle th;
+ MethodDesc * pMD = NULL;
+ FieldDesc * pFD = NULL;
+ CORCOMPILE_FIXUP_BLOB_KIND kind = (CORCOMPILE_FIXUP_BLOB_KIND)0;
+
+ {
+ GCX_PREEMP_THREAD_EXISTS(CURRENT_THREAD);
+
+ pHelper = DynamicHelperFixup(pTransitionBlock, pCell, sectionIndex, pModule, &kind, &th, &pMD, &pFD);
+ }
+
+ if (pHelper == NULL)
+ {
+ TADDR pArgument = (TADDR)pTransitionBlock + TransitionBlock::GetOffsetOfArgumentRegisters();
+#ifdef _TARGET_X86_
+ // x86 is special as always
+ pArgument += offsetof(ArgumentRegisters, ECX);
+#endif
+
+ switch (kind)
+ {
+ case ENCODE_ISINSTANCEOF_HELPER:
+ case ENCODE_CHKCAST_HELPER:
+ if (*(Object **)pArgument == NULL || ObjIsInstanceOf(*(Object **)pArgument, th))
+ {
+ result = (SIZE_T)(*(Object **)pArgument);
+ }
+ else
+ {
+ if (kind == ENCODE_CHKCAST_HELPER)
+ {
+ OBJECTREF obj = ObjectToOBJECTREF(*(Object **)pArgument);
+ GCPROTECT_BEGIN(obj);
+ COMPlusThrowInvalidCastException(&obj, th);
+ GCPROTECT_END();
+ }
+
+ result = NULL;
+ }
+ break;
+ case ENCODE_STATIC_BASE_NONGC_HELPER:
+ result = (SIZE_T)th.AsMethodTable()->GetNonGCStaticsBasePointer();
+ break;
+ case ENCODE_STATIC_BASE_GC_HELPER:
+ result = (SIZE_T)th.AsMethodTable()->GetGCStaticsBasePointer();
+ break;
+ case ENCODE_THREAD_STATIC_BASE_NONGC_HELPER:
+ ThreadStatics::GetTLM(th.AsMethodTable())->EnsureClassAllocated(th.AsMethodTable());
+ result = (SIZE_T)th.AsMethodTable()->GetNonGCThreadStaticsBasePointer();
+ break;
+ case ENCODE_THREAD_STATIC_BASE_GC_HELPER:
+ ThreadStatics::GetTLM(th.AsMethodTable())->EnsureClassAllocated(th.AsMethodTable());
+ result = (SIZE_T)th.AsMethodTable()->GetGCThreadStaticsBasePointer();
+ break;
+ case ENCODE_CCTOR_TRIGGER:
+ break;
+ case ENCODE_FIELD_ADDRESS:
+ result = (SIZE_T)pFD->GetCurrentStaticAddress();
+ break;
+ case ENCODE_VIRTUAL_ENTRY:
+ // case ENCODE_VIRTUAL_ENTRY_DEF_TOKEN:
+ // case ENCODE_VIRTUAL_ENTRY_REF_TOKEN:
+ // case ENCODE_VIRTUAL_ENTRY_SLOT:
+ {
+ OBJECTREF objRef = ObjectToOBJECTREF(*(Object **)pArgument);
+
+ GCPROTECT_BEGIN(objRef);
+
+ if (objRef == NULL)
+ COMPlusThrow(kNullReferenceException);
+
+ // Duplicated logic from JIT_VirtualFunctionPointer_Framed
+ if (!pMD->IsVtableMethod())
+ {
+ result = pMD->GetMultiCallableAddrOfCode();
+ }
+ else
+ {
+ result = pMD->GetMultiCallableAddrOfVirtualizedCode(&objRef, th);
+ }
+
+ GCPROTECT_END();
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+ pFrame->Pop(CURRENT_THREAD);
+
+ if (pHelper == NULL)
+ *(SIZE_T *)((TADDR)pTransitionBlock + TransitionBlock::GetOffsetOfArgumentRegisters()) = result;
+ return pHelper;
+}
+
+#endif // FEATURE_READYTORUN
+
+#endif // !DACCESS_COMPILE
diff --git a/src/vm/profattach.cpp b/src/vm/profattach.cpp
new file mode 100644
index 0000000000..0703606dc6
--- /dev/null
+++ b/src/vm/profattach.cpp
@@ -0,0 +1,1337 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// ProfAttach.cpp
+//
+
+//
+// Definitions of functions that help with attaching and detaching profilers
+//
+
+// ======================================================================================
+
+#include "common.h"
+
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+
+#include <sddl.h> // Windows security descriptor language
+#include <SecurityUtil.h>
+#include "eeprofinterfaces.h"
+#include "eetoprofinterfaceimpl.h"
+#include "corprof.h"
+#include "proftoeeinterfaceimpl.h"
+#include "proftoeeinterfaceimpl.inl"
+#include "profilinghelper.h"
+#include "profilinghelper.inl"
+#include "profattach.h"
+#include "profattach.inl"
+#include "securitywrapper.h"
+#include "profattachserver.h"
+#include "profattachserver.inl"
+#include "profattachclient.h"
+#include "profdetach.h"
+
+PSECURITY_DESCRIPTOR ProfilingAPIAttachDetach::s_pSecurityDescriptor = NULL;
+HANDLE ProfilingAPIAttachDetach::s_hAttachEvent = NULL;
+ProfilingAPIAttachDetach::AttachThreadingMode ProfilingAPIAttachDetach::s_attachThreadingMode =
+ ProfilingAPIAttachDetach::kUninitialized;
+BOOL ProfilingAPIAttachDetach::s_fInitializeCalled = FALSE;
+
+// Both the trigger (via code:ProfilingAPIAttachClient) and the target profilee (via
+// code:ProfilingAPIAttachServer) use this constant to identify their own version.
+const VersionBlock ProfilingAPIAttachDetach::kCurrentProcessVersion(
+ VER_MAJORVERSION,
+ VER_MINORVERSION,
+ VER_PRODUCTBUILD,
+ VER_PRODUCTBUILD_QFE);
+
+// Note that the following two VersionBlocks are initialized with static numerals rather
+// than using the VER_* preproc defines, as we don't want these VersionBlocks to change
+// on us from version to version unless we explicitly make a choice to begin breaking
+// compatibility between triggers and profilees (and hopefully we won't need to do this
+// ever!).
+
+// A profilee compiled into this mscorwks.dll states that it can only interoperate with
+// triggers (i.e., AttachProfiler() implementations (pipe clients)) whose runtime version
+// is >= this constant.
+//
+// This value should not change as new runtimes are released unless
+// code:ProfilingAPIAttachServer is modified to accept newer requests or send newer
+// response messages in a way incompatible with older code:ProfilingAPIAttachClient
+// objects implementing AttachProfiler(). And that is generally discouraged anyway.
+const VersionBlock ProfilingAPIAttachDetach::kMinimumAllowableTriggerVersion(
+ 4,
+ 0,
+ 0,
+ 0);
+
+// An AttachProfiler() implementation compiled into this mscorwks.dll, and called within
+// a trigger process, can only interoperate with target profilee apps (pipe servers)
+// whose runtime version is >= this constant.
+//
+// This value should not change as new runtimes are released unless
+// code:ProfilingAPIAttachClient is modified to send newer request or interpret newer
+// response messages in a way incompatible with older code:ProfilingAPIAttachServer
+// objects implementing the pipe server. And that is generally discouraged anyway.
+const VersionBlock ProfilingAPIAttachDetach::kMinimumAllowableProfileeVersion(
+ 4,
+ 0,
+ 0,
+ 0);
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachDetach::OverlappedResultHolder implementation. See
+// code:ProfilingAPIAttachDetach::OverlappedResultHolder for more information
+//
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachDetach::OverlappedResultHolder::Initialize
+//
+// Description:
+// Call this first! This initializes the contained OVERLAPPED structure
+//
+// Return Value:
+// Returns E_OUTOFMEMORY if OVERLAPPED structure could not be allocated.
+// Else S_OK.
+//
+
+HRESULT ProfilingAPIAttachDetach::OverlappedResultHolder::Initialize()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_INTOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Assign(new (nothrow) OVERLAPPED);
+ if (m_value == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ memset(m_value, 0, sizeof(OVERLAPPED));
+ return S_OK;
+}
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachDetach::OverlappedResultHolder::Wait
+//
+// Description:
+// Uses the contained OVERLAPPED structure (pointed to by m_value) to call
+// WaitForSingleObject to wait for an overlapped read or write on the pipe to complete
+// (or timeout).
+//
+// Arguments:
+// * dwMillisecondsMax - [in] Timeout for the wait
+// * hPipe - [in] Handle to the pipe object carrying out the request (may be either a
+// server or client pipe handle).
+// * pcbReceived - [out] Number of bytes received from the overlapped request
+//
+// Return Value:
+// HRESULT indicating success or failure
+//
+// Assumptions:
+// * Must call code:ProfilingAPIAttachDetach::OverlappedResultHolder::Initialize first
+
+HRESULT ProfilingAPIAttachDetach::OverlappedResultHolder::Wait(
+ DWORD dwMillisecondsMax,
+ HANDLE hPipe,
+ DWORD * pcbReceived)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(IsValidHandle(hPipe));
+ _ASSERTE(m_value != NULL);
+ _ASSERTE(pcbReceived != NULL);
+
+ HRESULT hr = E_UNEXPECTED;
+
+ // Since the OVERLAPPED structure referenced by m_value contains a NULL event, the OS
+ // will signal hPipe itself when the operation is complete
+ switch (WaitForSingleObject(hPipe, dwMillisecondsMax))
+ {
+ default:
+ _ASSERTE(!"Unexpected return from WaitForSingleObject()");
+ hr = E_UNEXPECTED;
+ break;
+
+ case WAIT_FAILED:
+ hr = HRESULT_FROM_GetLastError();
+ break;
+
+ case WAIT_TIMEOUT:
+ hr = HRESULT_FROM_WIN32(ERROR_TIMEOUT);
+ break;
+
+ case WAIT_OBJECT_0:
+ // Operation finished in time. Get the results
+ if (!GetOverlappedResult(
+ hPipe,
+ m_value,
+ pcbReceived,
+ TRUE)) // bWait: operation is done, so this returns immediately anyway
+ {
+ hr = HRESULT_FROM_GetLastError();
+ }
+ else
+ {
+ hr = S_OK;
+ }
+ break;
+ }
+
+ // The gymnastics below are to ensure that Windows is done with the overlapped
+ // structure, so we know it's safe to allow the base class (NewHolder) to free it
+ // when the destructor is called.
+
+ if (SUCCEEDED(hr))
+ {
+ // Operation successful, so we're done with the OVERLAPPED structure pointed to
+ // by m_value and may return
+ return hr;
+ }
+
+ _ASSERTE(FAILED(hr));
+
+ // There was a failure waiting for or retrieving the result. Cancel the operation and
+ // wait again for verification that the operation is completed or canceled.
+
+ // Note that we're ignoring whether CancelIo succeeds or fails, as our action is the
+ // same either way: Wait on the pipe again to verify that no active operation remains.
+ CancelIo(hPipe);
+
+ if (WaitForSingleObject(hPipe, dwMillisecondsMax) == WAIT_OBJECT_0)
+ {
+ // Typical case: The wait returns successfully and quickly, so we have
+ // verification that the OVERLAPPED structured pointed to by m_value is done
+ // being used.
+ return hr;
+ }
+
+ // Atypical case: For all our trying, we're unable to force this request to end
+ // before returning. Therefore, we're intentionally leaking the OVERLAPPED structured
+ // pointed to by m_value, as Windows may write to it at a later time.
+ SuppressRelease();
+ return hr;
+}
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachDetach::ProfilingAPIAttachThreadStart
+//
+// Description:
+// Thread proc for AttachThread. Serves as simple try/catch wrapper around
+// ProfilingAPIAttachThreadMain
+//
+// Arguments:
+// * LPVOID thread proc param is ignored
+//
+// Return Value:
+// Just returns 0 always.
+//
+
+// static
+DWORD WINAPI ProfilingAPIAttachDetach::ProfilingAPIAttachThreadStart(LPVOID)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ // At start of this thread, set its type so SOS !threads and anyone else knows who we
+ // are.
+ ClrFlsSetThreadType(ThreadType_ProfAPI_Attach);
+
+ LOG((
+ LF_CORPROF,
+ LL_INFO10,
+ "**PROF: AttachThread created and executing.\n"));
+
+ // This try block is a last-ditch stop-gap to prevent an unhandled exception on the
+ // AttachThread from bringing down the process. Note that if the unhandled
+ // exception is a terminal one, then hey, sure, let's tear everything down. Also
+ // note that any naughtiness in the profiler (e.g., throwing an exception from its
+ // Initialize callback) should already be handled before we pop back to here, so this
+ // is just being super paranoid.
+ EX_TRY
+ {
+ // Don't care about return value, thread proc will just return 0 regardless
+ ProfilingAPIAttachThreadMain();
+ }
+ EX_CATCH
+ {
+ _ASSERTE(!"Unhandled exception on profiling API attach / detach thread");
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ LOG((
+ LF_CORPROF,
+ LL_INFO10,
+ "**PROF: AttachThread exiting.\n"));
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachDetach::ProfilingAPIAttachThreadMain
+//
+// Description:
+// Main code for AttachThread. Includes all attach functionality.
+//
+// Return Value:
+// S_OK if a profiler ever attached, error HRESULT otherwise
+//
+
+// static
+HRESULT ProfilingAPIAttachDetach::ProfilingAPIAttachThreadMain()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+
+ ProfilingAPIAttachServer attachServer;
+ hr = attachServer.ExecutePipeRequests();
+ if (FAILED(hr))
+ {
+ // No profiler got attached, so we're done
+ return hr;
+ }
+
+ // If we made it here, a profiler was successfully attached. It would be nice to be
+ // able to assert g_profControlBlock.curProfStatus.Get() == kProfStatusActive, but
+ // that's prone to a theoretical race: the profiler might have attached and detached
+ // by the time we get here.
+
+ return S_OK;
+}
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachDetach::InitSecurityAttributes
+//
+// Description:
+// Initializes a SECURITY_ATTRIBUTES struct using the result of
+// code:ProfilingAPIAttachDetach::GetSecurityDescriptor
+//
+// Arguments:
+// * pSecAttrs - [in/out] SECURITY_ATTRIBUTES struct to initialize
+// * cbSecAttrs - Size in bytes of *pSecAttrs
+//
+// Return Value:
+// HRESULT indicating success or failure
+//
+
+// static
+HRESULT ProfilingAPIAttachDetach::InitSecurityAttributes(
+ SECURITY_ATTRIBUTES * pSecAttrs,
+ DWORD cbSecAttrs)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ PSECURITY_DESCRIPTOR psd = NULL;
+ HRESULT hr = GetSecurityDescriptor(&psd);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ _ASSERTE(psd != NULL);
+ memset(pSecAttrs, 0, cbSecAttrs);
+ pSecAttrs->nLength = cbSecAttrs;
+ pSecAttrs->lpSecurityDescriptor = psd;
+ pSecAttrs->bInheritHandle = FALSE;
+
+ return S_OK;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Helper function that gets the string (SDDL) form of the mandatory SID for this
+// process. This encodes the integrity level of the process for use in security
+// descriptors. The integrity level is capped at "high". See code:#HighGoodEnough.
+//
+// Arguments:
+// * pwszIntegritySidString - [out] On return will point to a buffer allocated by
+// Windows that contains the string representation of the SID. If
+// GetIntegritySidString succeeds, the caller is responsible for freeing
+// *pwszIntegritySidString via LocalFree().
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+//
+
+static HRESULT GetIntegritySidString(__out LPWSTR * pwszIntegritySidString)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ _ASSERTE(pwszIntegritySidString != NULL);
+
+ NewArrayHolder<BYTE> pbLabel;
+
+ // This grabs the mandatory label SID of the current process. We will write this
+ // SID into the security descriptor, to ensure that triggers of lower integrity
+ // levels may NOT access the object... with one exception. See code:#HighGoodEnough
+ hr = SecurityUtil::GetMandatoryLabelFromProcess(GetCurrentProcess(), &pbLabel);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ TOKEN_MANDATORY_LABEL * ptml = (TOKEN_MANDATORY_LABEL *) pbLabel.GetValue();
+
+ // #HighGoodEnough:
+ // The mandatory label SID we write into the security descriptor is the same as that
+ // of the current process, with one exception. If the current process's integrity
+ // level > high (e.g., ASP.NET running at "system" integrity level), then write
+ // "high" into the security descriptor instead of the current process's actual
+ // integrity level. This allows a high integrity trigger to access the object. This
+ // implements the policy that a high integrity level is "good enough" to profile any
+ // process, even if the target process is at an even higher integrity level than
+ // "high". Why have this policy:
+ // * A high integrity process represents an elevated admin, which morally equates
+ // to a principal that should have complete control over the machine. This
+ // includes debugging or profiling any process.
+ // * According to a security expert dev on Windows, integrity level is not a
+ // "security feature". It's mainly useful as defense-in-depth or to protect
+ // IE users and admins from themselves in most cases.
+ // * It's impossible to spawn a system integrity trigger process outside of
+ // session 0 services. So profiling ASP.NET would be crazy hard without this
+ // policy.
+ DWORD * pdwIntegrityLevel = SecurityUtil::GetIntegrityLevelFromMandatorySID(ptml->Label.Sid);
+ if (*pdwIntegrityLevel > SECURITY_MANDATORY_HIGH_RID)
+ {
+ *pdwIntegrityLevel = SECURITY_MANDATORY_HIGH_RID;
+ }
+
+ if (!ConvertSidToStringSid(ptml->Label.Sid, pwszIntegritySidString))
+ {
+ return HRESULT_FROM_GetLastError();
+ }
+
+ return S_OK;
+}
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachDetach::GetSecurityDescriptor
+//
+// Description:
+// Generates a security descriptor based on an ACL containing (1) an ACE that allows
+// the current user read / write and (2) an ACE that allows admins read / write.
+// Resulting security descriptor is returned in an [out] param, and is also cached for
+// future use.
+//
+// Arguments:
+// * ppsd - [out] Generated (or cached) security descriptor
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+
+// static
+HRESULT ProfilingAPIAttachDetach::GetSecurityDescriptor(PSECURITY_DESCRIPTOR * ppsd)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(ppsd != NULL);
+
+ if (s_pSecurityDescriptor != NULL)
+ {
+ *ppsd = s_pSecurityDescriptor;
+ return S_OK;
+ }
+
+ // Get the user SID for the DACL
+
+ PSID psidUser = NULL;
+ HRESULT hr = ProfilingAPIUtility::GetCurrentProcessUserSid(&psidUser);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ WinAllocatedBlockHolder pvCurrentUserSidString;
+
+ if (!ConvertSidToStringSid(psidUser, (LPWSTR *)(LPVOID *) &pvCurrentUserSidString))
+ {
+ return HRESULT_FROM_GetLastError();
+ }
+
+ // Get the integrity / mandatory SID for the SACL, if Vista+
+
+ LPCWSTR pwszIntegritySid = NULL;
+ WinAllocatedBlockHolder pvIntegritySidString;
+
+ hr = GetIntegritySidString((LPWSTR *) (LPVOID *) &pvIntegritySidString);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+ pwszIntegritySid = (LPCWSTR) pvIntegritySidString.GetValue();
+
+ ULONG cbsd;
+ StackSString sddlSecurityDescriptor;
+ WinAllocatedBlockHolder pvSecurityDescriptor;
+
+ // The following API (ConvertStringSecurityDescriptorToSecurityDescriptorW) takes a
+ // string representation of a security descriptor (using the SDDL language), and
+ // returns back the security descriptor object to be used when defining the globally
+ // named event or pipe object. For a description of this language, go to the help on
+ // the API, and click on "string-format security descriptor":
+ // http://msdn.microsoft.com/library/default.asp?url=/library/en-us/secauthz/security/security_descriptor_string_format.asp
+ // or look through sddl.h.
+
+ // Cheat sheet for the subset of the format that we're using:
+ //
+ // Security Descriptor string:
+ // D:dacl_flags(string_ace1)(string_ace2)... (string_acen)
+ // Security SACL string:
+ // S:sacl_flags(string_ace1)(string_ace2)... (string_acen)
+ // Each string_ace:
+ // ace_type;ace_flags;rights;object_guid;inherit_object_guid;account_sid
+ //
+ // The following portions of the security descriptor string are NOT used:
+ // O:owner_sid (b/c we want current user to be the owner)
+ // G:group_sid (b/c not setting the primary group of the object)
+
+ // This reusable chunk defines the "(string_ace)" portion of the DACL. Given
+ // a SID, this makes an ACE for the SID with GENERIC_READ | GENERIC_WRITE access
+ #define ACE_STRING(AccountSidString) \
+ \
+ SDDL_ACE_BEGIN \
+ \
+ /* ace_type: "A;" An "allow" DACL (not "deny") */ \
+ SDDL_ACCESS_ALLOWED SDDL_SEPERATOR \
+ \
+ /* (skipping ace_flags, so that no child auto-inherits from this object) */ \
+ SDDL_SEPERATOR \
+ \
+ /* rights: "GRGW": GENERIC_READ | GENERIC_WRITE access allowed */ \
+ SDDL_GENERIC_READ SDDL_GENERIC_WRITE SDDL_SEPERATOR \
+ \
+ /* (skipping object_guid) */ \
+ SDDL_SEPERATOR \
+ \
+ /* (skipping inherit_object_guid) */ \
+ SDDL_SEPERATOR \
+ \
+ /* account_sid (filled in by macro user) */ \
+ AccountSidString \
+ \
+ SDDL_ACE_END
+
+
+ // First, construct the DACL
+
+ sddlSecurityDescriptor.Printf(
+ // "D:" This is a DACL
+ SDDL_DACL SDDL_DELIMINATOR
+
+ // dacl_flags:
+
+ // "P" This is protected (i.e., don't allow security descriptor to be modified
+ // by inheritable ACEs)
+ SDDL_PROTECTED
+
+ // (string_ace1)
+ // account_sid: "BA" built-in local administrators group
+ ACE_STRING(SDDL_BUILTIN_ADMINISTRATORS)
+
+ // (string_ace2)
+ // account_sid: to be filled in with the current process token's primary SID
+ ACE_STRING(W("%s")),
+
+ // current process token's primary SID
+ (LPCWSTR) (LPVOID) pvCurrentUserSidString);
+
+ // Next, add the SACL (Vista+ only)
+
+ if (pwszIntegritySid != NULL)
+ {
+ sddlSecurityDescriptor.AppendPrintf(
+ // "S:" This is a SACL -- for the integrity level of the current process
+ SDDL_SACL SDDL_DELIMINATOR
+
+ // The SACL ACE begins here
+ SDDL_ACE_BEGIN
+
+ // ace_type: "ML;" A Mandatory Label ACE (i.e., integrity level)
+ SDDL_MANDATORY_LABEL SDDL_SEPERATOR
+
+ // (skipping ace_flags, so that no child auto-inherits from this object)
+ SDDL_SEPERATOR
+
+ // rights: "NWNR;" If the trigger's integrity level is lower than the
+ // integrity level we're writing into this security descriptor, then that
+ // trigger may not read or write to this object.
+ SDDL_NO_WRITE_UP SDDL_NO_READ_UP SDDL_SEPERATOR
+
+ // (skipping object_guid)
+ SDDL_SEPERATOR
+
+ // (skipping inherit_object_guid)
+ SDDL_SEPERATOR
+
+ // To be filled in with the current process's mandatory label SID (which
+ // describes the current process's integrity level, capped at "high integrity")
+ W("%s")
+
+ SDDL_ACE_END,
+
+ // current process's mandatory label SID
+ pwszIntegritySid);
+ }
+
+ if (!ConvertStringSecurityDescriptorToSecurityDescriptorW(
+ sddlSecurityDescriptor.GetUnicode(),
+ SDDL_REVISION_1,
+ (PSECURITY_DESCRIPTOR *) (LPVOID *) &pvSecurityDescriptor,
+ &cbsd))
+ {
+ return HRESULT_FROM_GetLastError();
+ }
+
+ if (FastInterlockCompareExchangePointer(
+ &s_pSecurityDescriptor,
+ (PSECURITY_DESCRIPTOR) pvSecurityDescriptor,
+ NULL) == NULL)
+ {
+ // Ownership transferred to s_pSecurityDescriptor, so don't free it here
+ pvSecurityDescriptor.SuppressRelease();
+ }
+
+ _ASSERTE(s_pSecurityDescriptor != NULL);
+ *ppsd = s_pSecurityDescriptor;
+ return S_OK;
+}
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachDetach::Initialize
+//
+// Description:
+// Perform startup (one-time-only) initialization for attach / detach infrastructure.
+// This includes the Global Attach Event, but does NOT include the Global Attach Pipe
+// (which is created only on demand). This is lazily called the first time the
+// finalizer asks for the attach event.
+//
+// Return Value:
+// S_OK: Attach / detach infrastructure initialized ok
+// S_FALSE: Attach / detach infrastructure not initialized, but for an acceptable reason
+// (e.g., executing memory- or sync- hosted)
+// else: error HRESULT indicating an unacceptable failure that prevented attach /
+// detach infrastructure from initializing (e.g., security problem, OOM, etc.)
+//
+// Assumptions:
+// * By the time this is called:
+// * Configuration must have been read from the registry
+// * If there is a host, it has already initialized its state, including its
+// intent to memory-host or sync-host.
+// * Finalizer thread is initializing and is first asking for the attach event.
+//
+
+// static
+HRESULT ProfilingAPIAttachDetach::Initialize()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ // This one assert verifies two things:
+ // * 1. Configuration has been read from the registry, AND
+ // * 2. If there is a host, it has already initialized its state.
+ // #2 is implied by this assert, because the host initializes its state before
+ // EEStartup is even called: Host directly calls CorHost2::SetHostControl to
+ // initialize itself, announce whether the CLR will be memory hosted, sync hosted,
+ // etc., and then host calls CorHost2::Start, which calls EEStartup, which
+ // initializes configuration information. So if configuration information is
+ // available, the host must have already initialized itself.
+ //
+ // The reason we care is that, for profiling API attach to be enabled during this
+ // run, we need to have the finalizer thread wait on multiple sync objects. And
+ // waiting on multiple objects is disallowed if we're memory / sync-hosted. So we
+ // need to know now whether waiting on multiple objects is allowed, so we know
+ // whether we can initialize the Attach support objects.
+ _ASSERTE(g_pConfig != NULL);
+
+ // Even if we fail to create the event, this BOOL indicates we at least
+ // tried to.
+ _ASSERTE(!s_fInitializeCalled);
+ s_fInitializeCalled = TRUE;
+
+ INDEBUG(VerifyMessageStructureLayout());
+
+ // If the CLR is being memory- or sync-hosted, then attach is not supported
+ // (see comments above)
+ if (CLRMemoryHosted() || CLRSyncHosted())
+ {
+ LOG((
+ LF_CORPROF,
+ LL_INFO10,
+ "**PROF: Process is running with a host that implements custom memory or "
+ "synchronization management. So it will not be possible to attach a "
+ "profiler to this process.\n"));
+
+ // NOTE: Intentionally not logging this to the event log, as it would be
+ // obnoxious to see such a message every time SQL started up
+
+ return S_FALSE;
+ }
+
+ InitializeAttachThreadingMode();
+
+ if (s_attachThreadingMode == kOnDemand)
+ {
+ return InitializeForOnDemandMode();
+ }
+
+ _ASSERTE(s_attachThreadingMode == kAlwaysOn);
+ return InitializeForAlwaysOnMode();
+}
+
+#ifdef _DEBUG
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachDetach::VerifyMessageStructureLayout
+//
+// Description:
+// Debug-only function that asserts if there appear to be changes to structures that
+// are not allowed to change (for backward-compatibility reasons). In particular:
+// * VersionBlock must not change
+// * BaseRequestMessage must not change
+//
+
+// static
+void ProfilingAPIAttachDetach::VerifyMessageStructureLayout()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // If any of these asserts fire, then VersionBlock is changing its binary
+ // layout in an incompatible way. Bad!
+ _ASSERTE(sizeof(VersionBlock) == 16);
+ _ASSERTE(offsetof(VersionBlock, m_dwMajor) == 0);
+ _ASSERTE(offsetof(VersionBlock, m_dwMinor) == 4);
+ _ASSERTE(offsetof(VersionBlock, m_dwBuild) == 8);
+ _ASSERTE(offsetof(VersionBlock, m_dwQFE) == 12);
+
+ // If any of these asserts fire, then GetVersionRequestMessage is changing its binary
+ // layout in an incompatible way. Bad!
+ _ASSERTE(sizeof(GetVersionRequestMessage) == 8);
+ _ASSERTE(offsetof(GetVersionRequestMessage, m_cbMessage) == 0);
+ _ASSERTE(offsetof(GetVersionRequestMessage, m_requestMessageType) == 4);
+
+ // If any of these asserts fire, then GetVersionResponseMessage is changing its binary
+ // layout in an incompatible way. Bad!
+ _ASSERTE(sizeof(GetVersionResponseMessage) == 36);
+ _ASSERTE(offsetof(GetVersionResponseMessage, m_hr) == 0);
+ _ASSERTE(offsetof(GetVersionResponseMessage, m_profileeVersion) == 4);
+ _ASSERTE(offsetof(GetVersionResponseMessage, m_minimumAllowableTriggerVersion) == 20);
+}
+
+#endif //_DEBUG
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachDetach::InitializeAttachThreadingMode
+//
+// Description:
+// Looks at environment and GC mode to determine whether the AttachThread should
+// always be around, or created only on demand. See
+// code:ProfilingAPIAttachDetach::AttachThreadingMode.
+//
+
+// static
+void ProfilingAPIAttachDetach::InitializeAttachThreadingMode()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(s_attachThreadingMode == kUninitialized);
+
+ // Environment variable trumps all, so check it first
+ DWORD dwAlwaysOn = g_pConfig->GetConfigDWORD_DontUse_(
+ CLRConfig::EXTERNAL_AttachThreadAlwaysOn,
+ GCHeap::IsServerHeap() ? 1 : 0); // Default depends on GC server mode
+
+ if (dwAlwaysOn == 0)
+ {
+ s_attachThreadingMode = kOnDemand;
+ }
+ else
+ {
+ s_attachThreadingMode = kAlwaysOn;
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachDetach::InitializeForAlwaysOnMode
+//
+// Description:
+// Performs initialization specific to running in Always On mode. Specifically, this
+// means creating the AttachThread. The attach event is not created in this case.
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+
+// static
+HRESULT ProfilingAPIAttachDetach::InitializeForAlwaysOnMode()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(s_attachThreadingMode == kAlwaysOn);
+
+ LOG((LF_CORPROF, LL_INFO10, "**PROF: Attach AlwaysOn mode invoked; creating new AttachThread.\n"));
+
+ CreateAttachThread();
+
+ return S_OK;
+}
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachDetach::InitializeForOnDemandMode
+//
+// Description:
+// Performs initialization specific to running in On Demand mode. Specifically, this
+// means creating the attach event. (The AttachThread will only be created when this
+// event is signaled by a trigger process.)
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+
+// static
+HRESULT ProfilingAPIAttachDetach::InitializeForOnDemandMode()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(s_attachThreadingMode == kOnDemand);
+
+ LOG((LF_CORPROF, LL_INFO10, "**PROF: Attach OnDemand mode invoked; creating attach event.\n"));
+
+ // The only part of attach that gets initialized before a profiler has
+ // actually requested to attach is the single global event that gets
+ // signaled from out-of-process.
+
+ StackSString attachEventName;
+ HRESULT hr;
+ hr = GetAttachEventName(::GetCurrentProcess(), &attachEventName);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ // Deliberately NOT using CLREvent, as it does not have support for a global name.
+ // It's ok not to use CLREvent, as we're assured above that we're not sync-hosted,
+ // which means CLREvent would just use raw Windows events anyway.
+
+ SECURITY_ATTRIBUTES *psa = NULL;
+
+ SECURITY_ATTRIBUTES sa;
+
+ // Only assign security attributes for non-app container scenario
+ // We are assuming the default (blocking everything for app container scenario is good enough
+ if (!IsAppContainerProcess(::GetCurrentProcess()))
+ {
+ hr = InitSecurityAttributes(&sa, sizeof(sa));
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ psa = &sa;
+ }
+
+ _ASSERTE(s_hAttachEvent == NULL);
+ s_hAttachEvent = WszCreateEvent(
+ psa, // security attributes
+ FALSE, // bManualReset = FALSE: autoreset after waiting thread is unblocked
+ FALSE, // initial state = FALSE, i.e., unsignaled
+ attachEventName.GetUnicode() // Global name seen out-of-proc
+ );
+ if (s_hAttachEvent == NULL)
+ {
+ return HRESULT_FROM_GetLastError();
+ }
+
+ return S_OK;
+}
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachDetach::GetAttachEvent
+//
+// Description:
+// Used by finalizer thread to get the profiling API attach event. First time this is
+// called, the event and other supporting objects will be created.
+//
+// Return Value:
+// The attach event or NULL if attach event creation failed during startup. In either
+// case, do NOT call CloseHandle on the returned event handle.
+//
+// Assumptions:
+// * ProfilingAPIUtility::InitializeProfiling should already have been called before
+// this is called. That ensures that, if a profiler was configured to load on
+// startup, then that load has already occurred by now.
+// * The event's HANDLE refcount is managed solely by ProfilingAPIAttachDetach. So do
+// not call CloseHandle() on the HANDLE returned.
+//
+// Notes:
+// * If the attach event was not created on startup, then this will return NULL.
+// Possible reasons why this can occur:
+// * The current process is the NGEN service, OR
+// * The process is sync- or memory- hosted, OR
+// * Attach is running in "always on" mode, meaning we always have an AttachThread
+// with a pipe, so there's no need for an event.
+//
+
+// static
+HANDLE ProfilingAPIAttachDetach::GetAttachEvent()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ if (IsCompilationProcess())
+ {
+ // No profiler attach on NGEN!
+ return NULL;
+ }
+
+ if (!s_fInitializeCalled)
+ {
+ // If a profiler was supposed to load on startup, it's already happened
+ // now. So it's safe to set up the attach support objects, and allow
+ // an attaching profiler to make an attempt (which can now gracefully fail
+ // if a startup profiler has loaded).
+
+ HRESULT hr = Initialize();
+ if (FAILED(hr))
+ {
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF: ProfilingAPIAttachDetach::Initialize failed, so this process will not "
+ "be able to attach a profiler. hr=0x%x.\n",
+ hr));
+ ProfilingAPIUtility::LogProfError(IDS_E_PROF_ATTACH_INIT, hr);
+
+ return NULL;
+ }
+ }
+
+ if (s_attachThreadingMode == kAlwaysOn)
+ {
+ // In always-on mode, we always have an AttachThread listening on the pipe, so
+ // there's no need for an event.
+ _ASSERTE(s_hAttachEvent == NULL);
+ }
+
+ return s_hAttachEvent;
+}
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachDetach::ProcessSignaledAttachEvent
+//
+// Description:
+// Called by finalizer thread when the finalizer thread detects that the globally
+// named Profiler Attach Event is signaled. This simply spins up the AttachThread
+// (starting in ProfilingAPIAttachThreadStart) and returns.
+//
+
+// static
+void ProfilingAPIAttachDetach::ProcessSignaledAttachEvent()
+{
+ // This function is practically a leaf (though not quite), and is called from the
+ // finalizer thread at various points, so keeping the contract strict to allow for
+ // maximum flexibility on when this may called.
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORPROF, LL_INFO10, "**PROF: Attach event signaled; creating new AttachThread.\n"));
+
+ CreateAttachThread();
+}
+
+typedef BOOL
+(WINAPI *PFN_GetAppContainerNamedObjectPath)(
+ HANDLE Token,
+ PSID AppContainerSid,
+ ULONG ObjectPathLength,
+ WCHAR * ObjectPath,
+ PULONG ReturnLength
+ );
+
+static Volatile<PFN_GetAppContainerNamedObjectPath> g_pfnGetAppContainerNamedObjectPath = NULL;
+
+// ----------------------------------------------------------------------------
+// GetAppContainerNamedObjectPath
+//
+// Description:
+// Retrieve named object path for the specified app container process
+// The name looks something like the following:
+// LowBoxNamedObjects\<AppContainer_SID>
+// AppContainer_SID is the SID for the app container, for example: S-1-15-2-3-4-5-6-7-8
+//
+// Arguments:
+// * hProcess - handle of the app container proces
+// * wszObjectPath - [out] Buffer to fill in
+// * dwObjectPathSizeInChar - Size of buffer
+//
+HRESULT ProfilingAPIAttachDetach::GetAppContainerNamedObjectPath(HANDLE hProcess, __out_ecount(dwObjectPathSizeInChar) WCHAR * wszObjectPath, DWORD dwObjectPathSizeInChar)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(wszObjectPath != NULL);
+
+ HandleHolder hToken;
+
+ if (!OpenProcessToken(hProcess, TOKEN_QUERY, &hToken))
+ {
+ return HRESULT_FROM_GetLastError();
+ }
+
+ if (g_pfnGetAppContainerNamedObjectPath.Load() == NULL)
+ {
+ HMODULE hMod = WszGetModuleHandle(W("kernel32.dll"));
+ if (hMod == NULL)
+ {
+ // This should never happen but I'm checking it anyway
+ return HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND);
+ }
+
+ PFN_GetAppContainerNamedObjectPath pfnGetAppContainerNamedObjectPath = (PFN_GetAppContainerNamedObjectPath)
+ ::GetProcAddress(
+ hMod,
+ "GetAppContainerNamedObjectPath");
+
+ if (!pfnGetAppContainerNamedObjectPath)
+ {
+
+ return HRESULT_FROM_GetLastError();
+ }
+
+ // We should always get the same address back from GetProcAddress so there is no concern for race condition
+ g_pfnGetAppContainerNamedObjectPath = pfnGetAppContainerNamedObjectPath;
+ }
+
+ DWORD dwBufferLength;
+ if (!g_pfnGetAppContainerNamedObjectPath(
+ hToken, // Process token
+ NULL, // AppContainer package SID optional.
+ dwObjectPathSizeInChar, // Object path length
+ wszObjectPath, // Object path
+ &dwBufferLength // return length
+ ))
+ {
+ return HRESULT_FROM_GetLastError();
+ }
+
+ return S_OK;
+}
+
+
+// @TODO: Update this once Windows header file is updated to Win8
+#ifndef TokenIsAppContainer
+ #define TokenIsAppContainer ((TOKEN_INFORMATION_CLASS) 29)
+#endif
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachDetach::IsAppContainerProcess
+//
+// Description:
+// Return whether the specified process is a app container process
+//
+
+// static
+BOOL ProfilingAPIAttachDetach::IsAppContainerProcess(HANDLE hProcess)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ HandleHolder hToken;
+
+ if(!::OpenProcessToken(hProcess, TOKEN_QUERY, &hToken))
+ {
+ return FALSE;
+ }
+
+ BOOL fIsAppContainerProcess;
+ DWORD dwReturnLength;
+ if (!::GetTokenInformation(
+ hToken,
+ TokenIsAppContainer,
+ &fIsAppContainerProcess,
+ sizeof(BOOL),
+ &dwReturnLength) ||
+ dwReturnLength != sizeof(BOOL))
+ {
+ return FALSE;
+ }
+ else
+ {
+ return fIsAppContainerProcess;
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Called by other points in the runtime (e.g., finalizer thread) to create a new thread
+// to fill the role of the AttachThread.
+//
+
+// static
+void ProfilingAPIAttachDetach::CreateAttachThread()
+{
+ // This function is practically a leaf (though not quite), and is called from the
+ // finalizer thread at various points, so keeping the contract strict to allow for
+ // maximum flexibility on when this may called.
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ HandleHolder hAttachThread;
+
+ // The AttachThread is intentionally not an EE Thread-object thread
+ hAttachThread = ::CreateThread(
+ NULL, // lpThreadAttributes; don't want child processes inheriting this handle
+ 0, // dwStackSize (0 = use default)
+ ProfilingAPIAttachThreadStart,
+ NULL, // lpParameter (none to pass)
+ 0, // dwCreationFlags (0 = use default flags, start thread immediately)
+ NULL // lpThreadId (don't need therad ID)
+ );
+ if (hAttachThread == NULL)
+ {
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF: Failed to create AttachThread. GetLastError=%d.\n",
+ GetLastError()));
+
+ // No other error-specific code really makes much sense here. An error here is
+ // probably due to serious OOM issues which would also probably prevent logging
+ // an event. A trigger process will report that it waited for the pipe to be
+ // created, and timed out during the wait. That should be enough for the user.
+ }
+}
+
+// ----------------------------------------------------------------------------
+// CLRProfilingClassFactoryImpl::CreateInstance
+//
+// Description:
+// A standard IClassFactory interface function to allow a profiling trigger
+// to query for IID_ICLRProfiling interface
+//
+HRESULT CLRProfilingClassFactoryImpl::CreateInstance(IUnknown * pUnkOuter, REFIID riid, void ** ppv)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ if (ppv == NULL)
+ return E_POINTER;
+
+ *ppv = NULL;
+
+ NewHolder<CLRProfilingImpl> pProfilingImpl = new (nothrow) CLRProfilingImpl();
+ if (pProfilingImpl == NULL)
+ return E_OUTOFMEMORY;
+
+ HRESULT hr = pProfilingImpl->QueryInterface(riid, ppv);
+ if (SUCCEEDED(hr))
+ {
+ pProfilingImpl.SuppressRelease();
+ }
+
+ return hr;
+}
+
+// ----------------------------------------------------------------------------
+// CLRProfilingClassFactoryImpl::LockServer
+//
+// Description:
+// A standard IClassFactory interface function that doesn't do anything interesting here
+//
+HRESULT CLRProfilingClassFactoryImpl::LockServer(BOOL fLock)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return S_OK;
+}
+
+// ----------------------------------------------------------------------------
+// CLRProfilingImpl::AttachProfiler
+//
+// Description:
+// A wrapper COM function to invoke AttachProfiler with parameters from
+// profiling trigger along with a runtime version string
+//
+HRESULT CLRProfilingImpl::AttachProfiler(DWORD dwProfileeProcessID,
+ DWORD dwMillisecondsMax,
+ const CLSID *pClsidProfiler,
+ LPCWSTR wszProfilerPath,
+ void *pvClientData,
+ UINT cbClientData)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ WCHAR wszRuntimeVersion[MAX_PATH];
+ DWORD dwSize = _countof(wszRuntimeVersion);
+ HRESULT hr = GetCORVersionInternal(wszRuntimeVersion, dwSize, &dwSize);
+ if (FAILED(hr))
+ return hr;
+
+ return ::AttachProfiler(dwProfileeProcessID,
+ dwMillisecondsMax,
+ pClsidProfiler,
+ wszProfilerPath,
+ pvClientData,
+ cbClientData,
+ wszRuntimeVersion);
+}
+
+// ----------------------------------------------------------------------------
+// ICLRProfilingGetClassObject
+//
+// Description:
+// A wrapper to create a CLRProfilingImpl object and to QueryInterface on the CLRProfilingImpl object
+//
+HRESULT ICLRProfilingGetClassObject(REFCLSID rclsid, REFIID riid, void **ppv)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ SO_NOT_MAINLINE;
+ PRECONDITION(rclsid == CLSID_CLRProfiling);
+ }
+ CONTRACTL_END;
+
+ if (ppv == NULL)
+ return E_POINTER;
+
+ *ppv = NULL;
+
+ NewHolder<CLRProfilingClassFactoryImpl> pCLRProfilingClassFactoryImpl = new (nothrow) CLRProfilingClassFactoryImpl();
+ if (pCLRProfilingClassFactoryImpl == NULL)
+ return E_OUTOFMEMORY;
+
+ HRESULT hr = pCLRProfilingClassFactoryImpl->QueryInterface(riid, ppv);
+ if (SUCCEEDED(hr))
+ {
+ pCLRProfilingClassFactoryImpl.SuppressRelease();
+ }
+
+ return hr;
+}
+
+
+#endif // FEATURE_PROFAPI_ATTACH_DETACH
diff --git a/src/vm/profattach.h b/src/vm/profattach.h
new file mode 100644
index 0000000000..8811479e09
--- /dev/null
+++ b/src/vm/profattach.h
@@ -0,0 +1,426 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// ProfAttach.h
+//
+
+//
+// Declaration of functions that help with attaching and detaching profilers, including
+// message structures that are passed back and forth between the trigger (client) and
+// the target profilee (server). For code specific to triggers and profilees, see
+// code:ProfilingAPIAttachClient and code:ProfilingAPIAttachServer, respectively.
+//
+
+// ======================================================================================
+
+#ifndef __PROF_ATTACH_H__
+#define __PROF_ATTACH_H__
+
+#include "internalunknownimpl.h"
+
+//---------------------------------------------------------------------------------------
+// Structure representing the runtime's version. Used to negotiate versions between the
+// trigger and profilee.
+//
+// **** COMPATIBILITY WARNING ***
+//
+// You are not allowed to change the binary layout of this structure, or else the
+// trigger & profilee will be unable to negotiate version information. Asserts in
+// code:ProfilingAPIAttachDetach::VerifyMessageStructureLayout attempt to enforce this.
+//
+// **** COMPATIBILITY WARNING ***
+//
+struct VersionBlock
+{
+public:
+ DWORD m_dwMajor;
+ DWORD m_dwMinor;
+ DWORD m_dwBuild;
+ DWORD m_dwQFE;
+
+ VersionBlock(DWORD dwMajor, DWORD dwMinor, DWORD dwBuild, DWORD dwQFE);
+ VersionBlock();
+ BOOL operator <(const VersionBlock & otherVersionBlock) const;
+};
+
+
+//---------------------------------------------------------------------------------------
+// Types of request messages that may be sent from trigger across the pipe
+//
+enum RequestMessageType
+{
+ // Client (trigger) asks server (profilee) for server's version information.
+ // The message type must be code:ProfilingAPIAttachDetach::BaseRequestMessage
+ kMsgGetVersion,
+
+ // Client (trigger) asks server (profilee) to attach the profiler. The message
+ // type must be code:ProfilingAPIAttachDetach::AttachRequestMessage or AttachRequestMessageV2
+ kMsgAttach,
+
+ kMsgCount
+};
+
+
+// ---------------------------------------------------------------------------------------
+// Base request message format. All request messages sent by trigger across pipe derive
+// from this.
+//
+// **** COMPATIBILITY WARNING ***
+//
+// You are not allowed to change this structure in such a way as would modify the binary
+// layout of derived type GetVersionRequestMessage, or else the trigger & profilee will
+// be unable to negotiate version information. Asserts in
+// code:ProfilingAPIAttachDetach::VerifyMessageStructureLayout attempt to enforce this.
+//
+// **** COMPATIBILITY WARNING ***
+//
+struct BaseRequestMessage
+{
+public:
+ // Total size of the message (including size of derived type, client data, etc., if
+ // present in the message)
+ DWORD m_cbMessage;
+
+ // What kind of message is this?
+ RequestMessageType m_requestMessageType;
+
+ BaseRequestMessage(DWORD cbMessage, RequestMessageType requestMessageType);
+
+private:
+ // Use parameterized constructor above to initialize this struct
+ BaseRequestMessage();
+};
+
+
+// ---------------------------------------------------------------------------------------
+// Message format for requesting version information from the target profilee
+//
+// **** COMPATIBILITY WARNING ***
+//
+// You are not allowed to change the binary layout of this structure, or else the trigger
+// & profilee will be unable to negotiate version information. Asserts in
+// code:ProfilingAPIAttachDetach::VerifyMessageStructureLayout attempt to enforce this.
+//
+// **** COMPATIBILITY WARNING ***
+//
+struct GetVersionRequestMessage : public BaseRequestMessage
+{
+public:
+ GetVersionRequestMessage();
+};
+
+
+//---------------------------------------------------------------------------------------
+// Attach request message format. A kMsgAttach message sent by trigger must be of this
+// type.
+struct AttachRequestMessage : public BaseRequestMessage
+{
+public:
+ // Trigger sends its version info here. This allows the target profilee to
+ // customize its response for the format expected by the trigger.
+ VersionBlock m_triggerVersion;
+
+ // The GUID of the profiler’s COM object to load
+ CLSID m_clsidProfiler;
+
+ // The path to the profiler’s COM object to load
+ WCHAR m_wszProfilerPath[MAX_PATH];
+
+ // Client data is custom data that the profiler’s
+ // trigger-process wishes to copy into this process.
+ // Profiler authors will typically use this as a way to
+ // communicate to the profiler DLL what options the profiler
+ // user has chosen. This will help the profiler DLL configure
+ // itself (e.g., to determine which callbacks to request).
+ //
+ // Since the client data is variable length, and we may
+ // want to tail-extend this structure in the future, we use
+ // an offset to point to the client data. Client data
+ // begins at this + m_dwClientDataStartOffset bytes.
+ DWORD m_dwClientDataStartOffset;
+ DWORD m_cbClientDataLength;
+
+ AttachRequestMessage(
+ DWORD cbMessage,
+ const VersionBlock & triggerVersion,
+ const CLSID * pClsidProfiler,
+ LPCWSTR wszProfilerPath,
+ DWORD dwClientDataStartOffset,
+ DWORD cbClientDataLength);
+
+private:
+ // Use parameterized constructor above to initialize this struct
+ AttachRequestMessage();
+};
+
+//---------------------------------------------------------------------------------------
+// Attach request message V2
+// Pass the timeout information from client (the trigger process) to server (the profilee)
+struct AttachRequestMessageV2 : public AttachRequestMessage
+{
+
+public :
+ // Timeout for the wait operation for concurrent GC in server side
+ // Basically time out passed from AttachProfiler API minus the amount of time already
+ // elapsed in client side
+ DWORD m_dwConcurrentGCWaitTimeoutInMs;
+
+public :
+ AttachRequestMessageV2(
+ DWORD cbMessage,
+ const VersionBlock & triggerVersion,
+ const CLSID * pClsidProfiler,
+ LPCWSTR wszProfilerPath,
+ DWORD dwClientDataStartOffset,
+ DWORD cbClientDataLength,
+ DWORD dwConcurrentGCWaitTimeoutInMs);
+
+ // Whether the attach request message is a V2 message (including V2+)
+ static BOOL CanCastTo(const AttachRequestMessage * pMsg);
+
+private:
+ // Use parameterized constructor above to initialize this struct
+ AttachRequestMessageV2();
+};
+
+// ---------------------------------------------------------------------------------------
+// Base response message format. All response messages returned by profilee across the
+// pipe to the trigger derive from this.
+//
+// **** COMPATIBILITY WARNING ***
+//
+// You are not allowed to change this structure in such a way as would change the binary
+// layout of derived type GetVersionResponseMessage, or else the trigger & profilee will
+// be unable to negotiate version information. Asserts in
+// code:ProfilingAPIAttachDetach::VerifyMessageStructureLayout attempt to enforce this.
+//
+// **** COMPATIBILITY WARNING ***
+//
+struct BaseResponseMessage
+{
+public:
+ // HRESULT indicating success or failure of carrying out the request
+ HRESULT m_hr;
+
+ BaseResponseMessage(HRESULT hr);
+
+protected:
+ // Use parameterized constructor above to initialize this struct
+ BaseResponseMessage();
+};
+
+// ---------------------------------------------------------------------------------------
+// GetVersion response message format. The server responds to a kMsgGetVersion message
+// request with a message of this type.
+//
+// **** COMPATIBILITY WARNING ***
+//
+// You are not allowed to change the binary layout of this structure, or else the trigger
+// & profilee will be unable to negotiate version information. Asserts in
+// code:ProfilingAPIAttachDetach::VerifyMessageStructureLayout attempt to enforce this.
+//
+// **** COMPATIBILITY WARNING ***
+//
+struct GetVersionResponseMessage : public BaseResponseMessage
+{
+public:
+ // The target profilee constructs this response by filling out the following two
+ // values. The trigger process uses these values to determine whether it's compatible
+ // with the target profilee.
+
+ // Target profilee provides its version info here. If trigger determines that
+ // this number is too small, then trigger refuses the profilee as being too old.
+ VersionBlock m_profileeVersion;
+
+ // Target profilee provides here the oldest version of a trigger process that it
+ // can communicate with. If trigger determines that this number is too big,
+ // then trigger refuses the profilee as being too new.
+ VersionBlock m_minimumAllowableTriggerVersion;
+
+ GetVersionResponseMessage(
+ HRESULT hr,
+ const VersionBlock & profileeVersion,
+ const VersionBlock & minimumAllowableTriggerVersion);
+
+ GetVersionResponseMessage();
+};
+
+
+// ---------------------------------------------------------------------------------------
+// Attach response message format. The server responds to a kMsgAttach message
+// request with a message of this type.
+//
+struct AttachResponseMessage : public BaseResponseMessage
+{
+public:
+ AttachResponseMessage(HRESULT hr);
+};
+
+// ---------------------------------------------------------------------------------------
+// Static-only class to handle attach request communication and detach functionality
+//
+// The target profilee app generally calls functions in ProfilingAPIAttachServer, while
+// the trigger process (by way of the AttachProfiler API) generally calls functions in
+// ProfilingAPIAttachClient. ProfilingAPIAttachDetach contains functionality common to
+// target profilees and triggers, as well as initialization and other routines exposed to
+// other parts of the EE.
+//
+class ProfilingAPIAttachDetach
+{
+public:
+ // ---------------------------------------------------------------------------------------
+ // Indicates whether AttachThread is always available without the need for an event
+ // (that the finalizer thread listens to), or whether the AttachThread is only
+ // available on demand (when finalizer thread detects the attach event has been
+ // signaled). The mode used by default is determined by the gc mode (server vs.
+ // workstation). But this can be overridden in either case by setting
+ // COMPLUS_AttachThreadAlwaysOn: 0=kOnDemand, nonzero=kAlwaysOn.
+ enum AttachThreadingMode
+ {
+ // Too early in startup to know the mode yet
+ kUninitialized,
+
+ // Default GC-workstation mode: AttachThread is only created when the attach
+ // event is signaled. AttachThread automatically exits when pipe requests quiet
+ // down.
+ kOnDemand,
+
+ // Default GC-server mode: AttachThread and attach pipe are created on startup,
+ // and they never go away. There is no need for an attach event in this mode, so
+ // the attach event is never created.
+ kAlwaysOn,
+ };
+
+ // ---------------------------------------------------------------------------------------
+ // Helper class used by both the target profilee app (server) and the trigger process
+ // (client) to create and dispose of an OVERLAPPED structure and to use it in a call
+ // to the OS API GetOverlappedResult (wrapped via
+ // code:ProfilingAPIAttachDetach::OverlappedResultHolder::Wait). The point of having
+ // this holder is to encapsulate the code that verifies when the OS is finished with
+ // the OVERLAPPED structure (usually when OverlappedResultHolder goes out of scope).
+ // See code:ProfilingAPIAttachDetach::OverlappedResultHolder::Wait for details. Since
+ // this class derives from NewHolder<OVERLAPPED>, users may automagically cast
+ // instances to OVERLAPPED* for use in passing to Windows OS APIs
+ class OverlappedResultHolder : public NewHolder<OVERLAPPED>
+ {
+ public:
+ HRESULT Initialize();
+ HRESULT Wait(
+ DWORD dwMillisecondsMax,
+ HANDLE hPipe,
+ DWORD * pcbReceived);
+ };
+
+ static const VersionBlock kCurrentProcessVersion;
+ static const VersionBlock kMinimumAllowableTriggerVersion;
+ static const VersionBlock kMinimumAllowableProfileeVersion;
+
+ static DWORD WINAPI ProfilingAPIAttachThreadStart(LPVOID lpParameter);
+ static void ProcessSignaledAttachEvent();
+ static HANDLE GetAttachEvent();
+ static HRESULT Initialize();
+ static HRESULT InitSecurityAttributes(SECURITY_ATTRIBUTES * pSecAttrs, DWORD cbSecAttrs);
+ static AttachThreadingMode GetAttachThreadingMode();
+
+ static HRESULT GetAttachPipeName(HANDLE hProfileeProcess, SString * pAttachPipeName);
+ static void GetAttachPipeNameForPidAndVersion(HANDLE hProfileeProcess, LPCWSTR wszRuntimeVersion, SString * pAttachPipeName);
+ static HRESULT GetAttachEventName(HANDLE hProfileeProcess, SString * pAttachEventName);
+ static void GetAttachEventNameForPidAndVersion(HANDLE hProfileeProcess, LPCWSTR wszRuntimeVersion, SString * pAttachEventName);
+ static HRESULT GetAppContainerNamedObjectPath(HANDLE hProcess, __out_ecount(dwObjectPathSizeInChar) WCHAR * wszObjectPath, DWORD dwObjectPathSizeInChar);
+ static BOOL IsAppContainerProcess(HANDLE hProcess);
+
+private:
+ // This caches the security descriptor to be used when generating the
+ // SECURITY_ATTRIBUTES structure for the event and pipe objects.
+ //
+ // Technically, this should be freed via LocalFree() or HeapFree (with the current
+ // process heap), but there's only one of these per runtime, and it is used
+ // throughout the process's lifetime, and there isn't much point to freeing it when
+ // the process shuts down since the OS does that automatically.
+ static PSECURITY_DESCRIPTOR s_pSecurityDescriptor;
+
+ // HANDLE to event object created on startup and listened to by finalizer thread
+ // (when running in code:ProfilingAPIAttachDetach::kOnDemand mode)
+ //
+ // Technically, this should be freed via CloseHandle(), but there's only one of these
+ // per runtime, and it is used throughout the process's lifetime, and there isn't
+ // much point to freeing it when the process shuts down since the OS does that
+ // automatically.
+ static HANDLE s_hAttachEvent;
+
+ // See code:ProfilingAPIAttachDetach::AttachThreadingMode
+ static AttachThreadingMode s_attachThreadingMode;
+
+ static BOOL s_fInitializeCalled;
+
+ // Static-only class. Private constructor enforces you don't try to make an instance
+ ProfilingAPIAttachDetach() {}
+
+ INDEBUG(static void VerifyMessageStructureLayout());
+ static void InitializeAttachThreadingMode();
+ static HRESULT InitializeForOnDemandMode();
+ static HRESULT InitializeForAlwaysOnMode();
+ static HRESULT ProfilingAPIAttachThreadMain();
+ static void CreateAttachThread();
+
+ static HRESULT GetSecurityDescriptor(PSECURITY_DESCRIPTOR * ppsd);
+};
+
+// IClassFactory implementation for ICLRProfiling inteface.
+class CLRProfilingClassFactoryImpl : public IUnknownCommon<IClassFactory>
+{
+public:
+ CLRProfilingClassFactoryImpl()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ ~CLRProfilingClassFactoryImpl()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ //
+ // IClassFactory methods
+ //
+ STDMETHOD(CreateInstance(
+ IUnknown *pUnkOuter,
+ REFIID riid,
+ void **ppv));
+
+ STDMETHOD(LockServer(
+ BOOL fLock));
+};
+
+// CLRProfiling implementation.
+class CLRProfilingImpl : public IUnknownCommon<ICLRProfiling>
+{
+public:
+ CLRProfilingImpl()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ ~CLRProfilingImpl()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ //
+ // ICLRProfiling method
+ //
+ STDMETHOD(AttachProfiler(
+ DWORD dwProfileeProcessID,
+ DWORD dwMillisecondsMax,
+ const CLSID * pClsidProfiler,
+ LPCWSTR wszProfilerPath,
+ void * pvClientData,
+ UINT cbClientData));
+};
+
+
+HRESULT ICLRProfilingGetClassObject(REFCLSID rclsid, REFIID riid, void **ppv);
+
+#endif // __PROF_ATTACH_H__
diff --git a/src/vm/profattach.inl b/src/vm/profattach.inl
new file mode 100644
index 0000000000..490a6a847f
--- /dev/null
+++ b/src/vm/profattach.inl
@@ -0,0 +1,567 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// ProfAttach.inl
+//
+
+//
+// Implementation of inlineable functions that help with attaching and detaching
+// profilers
+//
+
+// ======================================================================================
+
+#ifndef __PROF_ATTACH_INL__
+#define __PROF_ATTACH_INL__
+
+
+// ----------------------------------------------------------------------------
+// VersionBlock::VersionBlock
+//
+// Description:
+// VersionBlock constructor with no arguments; just zeroes out fields
+//
+
+inline VersionBlock::VersionBlock()
+{
+ LIMITED_METHOD_CONTRACT;
+ memset(this, 0, sizeof(*this));
+}
+
+// ----------------------------------------------------------------------------
+// VersionBlock::VersionBlock
+//
+// Description:
+// VersionBlock constructor with version number parameters
+//
+// Arguments:
+// * dwMajor - Major version number
+// * dwMinor - Minor version number
+// * dwBuild - Product build number
+// * dwQFE - Product build QFE number
+//
+
+inline VersionBlock::VersionBlock(
+ DWORD dwMajor,
+ DWORD dwMinor,
+ DWORD dwBuild,
+ DWORD dwQFE) :
+ m_dwMajor(dwMajor),
+ m_dwMinor(dwMinor),
+ m_dwBuild(dwBuild),
+ m_dwQFE(dwQFE)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+// ----------------------------------------------------------------------------
+// VersionBlock::operator <
+//
+// Description:
+// Allows for in-fix comparison operator between two VersionBlocks. Compares fields
+// from most-significant to least-significant: m_dwMajor, m_dwMinor, m_dwBuild,
+// m_dwQFE
+//
+// Arguments:
+// * otherVersionBlock - VersionBlock to compare against this (shown on RHS of <
+// operator)
+//
+// Return Value:
+// Nonzero if this is strictly before otherVersionBlock, else 0.
+//
+
+inline BOOL VersionBlock::operator <(const VersionBlock & otherVersionBlock) const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // COMPARE MAJOR
+ if (m_dwMajor < otherVersionBlock.m_dwMajor)
+ {
+ return TRUE;
+ }
+ if (m_dwMajor > otherVersionBlock.m_dwMajor)
+ {
+ return FALSE;
+ }
+ _ASSERTE(m_dwMajor == otherVersionBlock.m_dwMajor);
+
+ // COMPARE MINOR
+ if (m_dwMinor < otherVersionBlock.m_dwMinor)
+ {
+ return TRUE;
+ }
+ if (m_dwMinor > otherVersionBlock.m_dwMinor)
+ {
+ return FALSE;
+ }
+ _ASSERTE(m_dwMinor == otherVersionBlock.m_dwMinor);
+
+ // COMPARE BUILD
+ if (m_dwBuild < otherVersionBlock.m_dwBuild)
+ {
+ return TRUE;
+ }
+ if (m_dwBuild > otherVersionBlock.m_dwBuild)
+ {
+ return FALSE;
+ }
+ _ASSERTE(m_dwBuild == otherVersionBlock.m_dwBuild);
+
+ // COMPARE QFE
+ if (m_dwQFE < otherVersionBlock.m_dwQFE)
+ {
+ return TRUE;
+ }
+ if (m_dwQFE > otherVersionBlock.m_dwQFE)
+ {
+ return FALSE;
+ }
+ _ASSERTE(m_dwQFE == otherVersionBlock.m_dwQFE);
+
+ return FALSE;
+}
+
+// ----------------------------------------------------------------------------
+// BaseRequestMessage::BaseRequestMessage
+//
+// Description:
+// Constructor for base class of all request messages sent from trigger (client) to
+// profilee (server).
+//
+// Arguments:
+// * cbMessage - Size, in bytes, of the entire request message (including size of
+// derived type, client data, etc., if present in the message)
+// * requestMessageType - Enum representing type of request this constitutes
+//
+
+inline BaseRequestMessage::BaseRequestMessage(
+ DWORD cbMessage,
+ RequestMessageType requestMessageType) :
+ m_cbMessage(cbMessage),
+ m_requestMessageType(requestMessageType)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+// ----------------------------------------------------------------------------
+// GetVersionRequestMessage::GetVersionRequestMessage
+//
+// Description:
+// Constructor to create a fully initialized GetVersionRequestMessage
+//
+
+inline GetVersionRequestMessage::GetVersionRequestMessage()
+ : BaseRequestMessage(sizeof(GetVersionRequestMessage), kMsgGetVersion)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+// ----------------------------------------------------------------------------
+// AttachRequestMessage::AttachRequestMessage
+//
+// Description:
+// Constructor for request message of type kMsgAttach sent from trigger (client) to
+// profilee (server)
+//
+// Arguments:
+// * cbMessage - Size, in bytes, of the entire request message (including size of
+// derived type, client data, etc., if present in the message)
+// * triggerVersion - VersionBlock representing runtime version used by trigger
+// * pClsidProfiler - CLSID of profiler to attach
+// * wszProfilerPath - path to profiler DLL
+// * dwClientDataStartOffset - see code:AttachRequestMessage
+// * cbClientDataLength - see code:AttachRequestMessage
+//
+
+inline AttachRequestMessage::AttachRequestMessage(
+ DWORD cbMessage,
+ const VersionBlock & triggerVersion,
+ const CLSID * pClsidProfiler,
+ LPCWSTR wszProfilerPath,
+ DWORD dwClientDataStartOffset,
+ DWORD cbClientDataLength) :
+ BaseRequestMessage(cbMessage, kMsgAttach),
+ m_triggerVersion(triggerVersion),
+ m_dwClientDataStartOffset(dwClientDataStartOffset),
+ m_cbClientDataLength(cbClientDataLength)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERT(cbMessage >= sizeof(AttachRequestMessage) + cbClientDataLength);
+ memcpy(&m_clsidProfiler, pClsidProfiler, sizeof(m_clsidProfiler));
+ if (wszProfilerPath != NULL)
+ {
+ _ASSERTE(wcslen(wszProfilerPath) < _countof(m_wszProfilerPath));
+ wcscpy_s(m_wszProfilerPath, _countof(m_wszProfilerPath), wszProfilerPath);
+ }
+ else
+ {
+ m_wszProfilerPath[0] = L'\0';
+ }
+}
+
+// ----------------------------------------------------------------------------
+// AttachRequestMessageV2::AttachRequestMessageV2
+//
+// Description:
+// Constructor for request message V2 of type kMsgAttach sent from trigger (client) to
+// profilee (server)
+//
+// Arguments:
+// * cbMessage - Size, in bytes, of the entire request message (including size of
+// derived type, client data, etc., if present in the message)
+// * triggerVersion - VersionBlock representing runtime version used by trigger
+// * pClsidProfiler - CLSID of profiler to attach
+// * wszProfilerPath - path to profiler DLL
+// * dwClientDataStartOffset - see code:AttachRequestMessage
+// * cbClientDataLength - see code:AttachRequestMessage
+// * dwConcurrentGCWaitTimeoutInMs - the time out for wait operation on concurrent GC to finish.
+// Attach scenario only.
+//
+inline AttachRequestMessageV2::AttachRequestMessageV2(
+ DWORD cbMessage,
+ const VersionBlock & triggerVersion,
+ const CLSID * pClsidProfiler,
+ LPCWSTR wszProfilerPath,
+ DWORD dwClientDataStartOffset,
+ DWORD cbClientDataLength,
+ DWORD dwConcurrentGCWaitTimeoutInMs)
+ :AttachRequestMessage(
+ cbMessage,
+ triggerVersion,
+ pClsidProfiler,
+ wszProfilerPath,
+ dwClientDataStartOffset,
+ cbClientDataLength),
+ m_dwConcurrentGCWaitTimeoutInMs(dwConcurrentGCWaitTimeoutInMs)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERT(cbMessage >= sizeof(AttachRequestMessageV2) + cbClientDataLength);
+}
+
+// ----------------------------------------------------------------------------
+// AttachRequestMessageV2::CanCastTo
+inline BOOL AttachRequestMessageV2::CanCastTo(const AttachRequestMessage *pMsg)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // We already have checks that the client data doesn't go beyond the message body.
+ // If someone creates a bad message that pretends to be a V2 message, the worst scenario
+ // is we got a bad time out.
+ if (pMsg->m_cbMessage >= sizeof(AttachRequestMessageV2) + pMsg->m_cbClientDataLength)
+ return TRUE;
+
+ return FALSE;
+}
+
+// ----------------------------------------------------------------------------
+// BaseResponseMessage::BaseResponseMessage
+//
+// Description:
+// Constructor for base class of all response messages returned by profilee (server)
+// to trigger (client)
+//
+// Arguments:
+// * hr - HRESULT indicating success or failure of executing the request that the
+// trigger had made to the profilee
+//
+
+inline BaseResponseMessage::BaseResponseMessage(HRESULT hr) :
+ m_hr(hr)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+// ----------------------------------------------------------------------------
+// BaseResponseMessage::BaseResponseMessage
+//
+// Description:
+// Zero-parameter constructor for BaseResponseMessage for use when hr is not yet
+// known.
+//
+
+inline BaseResponseMessage::BaseResponseMessage() :
+ m_hr(E_UNEXPECTED)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+// ----------------------------------------------------------------------------
+// GetVersionResponseMessage::GetVersionResponseMessage
+//
+// Description:
+// Constructor to create a fully initialized GetVersionResponseMessage
+//
+// Arguments:
+// * hr - Success / failure of carrying out the GetVersion request
+// * profileeVersion - Version of the target profilee app's runtime (server)
+// * minimumAllowableTriggerVersion - Oldest version of a trigger process that this
+// target profilee app is willing to talk to.
+//
+
+inline GetVersionResponseMessage::GetVersionResponseMessage(
+ HRESULT hr,
+ const VersionBlock & profileeVersion,
+ const VersionBlock & minimumAllowableTriggerVersion) :
+ BaseResponseMessage(hr),
+ m_profileeVersion(profileeVersion),
+ m_minimumAllowableTriggerVersion(minimumAllowableTriggerVersion)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+// ----------------------------------------------------------------------------
+// GetVersionResponseMessage::GetVersionResponseMessage
+//
+// Description:
+// Constructor to use for GetVersionResponseMessage when the data is not known yet.
+// The trigger will typically use this constructor to create an empty
+// GetVersionResponseMessage as storage to receive the GetVersionResponseMessage data
+// that will come in over the pipe from the target profilee app.
+//
+
+inline GetVersionResponseMessage::GetVersionResponseMessage()
+{
+ LIMITED_METHOD_CONTRACT;
+ memset(this, 0, sizeof(*this));
+ m_hr = E_UNEXPECTED;
+}
+
+// ----------------------------------------------------------------------------
+// AttachResponseMessage::AttachResponseMessage
+//
+// Description:
+// Constructor for AttachResponseMessage
+//
+// Arguments:
+// * hr - Success / failure of carrying out the attach request
+//
+
+inline AttachResponseMessage::AttachResponseMessage(HRESULT hr)
+ : BaseResponseMessage(hr)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachDetach::GetAttachThreadingMode
+//
+// Description:
+// Returns the profiling attach threading mode for this runtime instance. See
+// code:ProfilingAPIAttachDetach::AttachThreadingMode.
+//
+// Return Value:
+// The profiling attach threading mode
+//
+// Assumptions:
+// * code:ProfilingAPIAttachDetach::Initialize must be called before this function.
+//
+
+// static
+inline ProfilingAPIAttachDetach::AttachThreadingMode ProfilingAPIAttachDetach::GetAttachThreadingMode()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // ProfilingAPIAttachDetach::Initialize must be called before this function.
+ _ASSERTE(s_fInitializeCalled);
+ return s_attachThreadingMode;
+}
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachDetach::GetAttachEventNameForPidAndVersion
+//
+// Description:
+// Generates name for Globally Named Attach Event, based on PID and the runtime version
+// Name looks like this:
+// CPFATE_nnnn_RuntimeVersion
+// CPFATE stands for CLR Profiling API attach trigger event
+// nnnn is decimal process ID
+// RuntimeVersion is the string of the runtime version
+//
+// Arguments:
+// * hProfileeProcess - The profilee process we want to attach to
+// * wszRuntimeVersion - runtime version string
+// * pAttachEventName - [in/out] SString to hold the generated name
+//
+
+// static
+inline void ProfilingAPIAttachDetach::GetAttachEventNameForPidAndVersion(HANDLE hProfileeProcess, LPCWSTR wszRuntimeVersion, SString * pAttachEventName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ // Convert to lower case using invariant culture
+ SString strRuntimeVersion(wszRuntimeVersion);
+ strRuntimeVersion.LowerCase();
+
+ DWORD dwProfileeProcessPid = ::GetProcessId(hProfileeProcess);
+
+ if (IsAppContainerProcess(hProfileeProcess))
+ {
+ HANDLE hCurrentProcess = ::GetCurrentProcess();
+ if (hProfileeProcess == hCurrentProcess || IsAppContainerProcess(::GetCurrentProcess()))
+ {
+ // App container to app container or the current process is the profilee process
+ // In any case, use a local name
+ pAttachEventName->Printf(L"CPFATE_%d_%s", dwProfileeProcessPid, strRuntimeVersion.GetUnicode());
+ }
+ else
+ {
+ // Otherwise, we'll assume it is full-trust to lowbox, and in this case we need to prefix the name with app container path
+ WCHAR wszObjectPath[MAX_PATH];
+ HRESULT hr = GetAppContainerNamedObjectPath(hProfileeProcess, wszObjectPath, sizeof(wszObjectPath)/sizeof(WCHAR));
+ IfFailThrow(hr);
+
+ //
+ // Retrieve the session ID
+ //
+ DWORD dwSessionId;
+ if (!ProcessIdToSessionId(dwProfileeProcessPid, &dwSessionId))
+ {
+ COMPlusThrowHR(HRESULT_FROM_GetLastError());
+ }
+
+ pAttachEventName->Printf(L"Session\\%d\\%s\\CPFATE_%d_%s", dwSessionId, wszObjectPath, dwProfileeProcessPid, strRuntimeVersion.GetUnicode());
+ }
+ }
+ else
+ {
+ // Non-app conatiner scenario
+ // Create in global namespace
+ pAttachEventName->Printf(L"Global\\CPFATE_%d_%s", dwProfileeProcessPid, strRuntimeVersion.GetUnicode());
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachDetach::GetAttachPipeNameForPidAndVersion
+//
+// Description:
+// Generates name for Globally Named Attach Pipe, based on PID and the runtime version
+// Name looks like this:
+// \\.\pipe\CPFATP_nnnn_RuntimeVersion
+// CPFATP stands for CLR Profiling API attach trigger pipe
+// nnnn is decimal process ID
+// RuntimeVersion is the string of the runtime version
+//
+// Arguments:
+// * hProfileeProcess - The profilee process we want to attach to
+// * wszRuntimeVersion - runtime version string
+// * pAttachPipeName - [in/out] SString to hold the generated name
+//
+
+// static
+inline void ProfilingAPIAttachDetach::GetAttachPipeNameForPidAndVersion(HANDLE hProfileeProcess, LPCWSTR wszRuntimeVersion, SString * pAttachPipeName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ // Convert to lower case using invariant culture
+ SString strRuntimeVersion(wszRuntimeVersion);
+ strRuntimeVersion.LowerCase();
+
+ DWORD dwProfileeProcessPid = ::GetProcessId(hProfileeProcess);
+
+ if (IsAppContainerProcess(hProfileeProcess))
+ {
+
+ //
+ // Retrieve low object path
+ //
+ WCHAR wszObjectPath[MAX_PATH];
+ HRESULT hr = GetAppContainerNamedObjectPath(hProfileeProcess, wszObjectPath, sizeof(wszObjectPath)/sizeof(WCHAR));
+ IfFailThrow(hr);
+
+ //
+ // Retrieve the session ID
+ //
+ DWORD dwSessionId;
+ if (!ProcessIdToSessionId(dwProfileeProcessPid, &dwSessionId))
+ {
+ COMPlusThrowHR(HRESULT_FROM_GetLastError());
+ }
+
+ pAttachPipeName->Printf(L"\\\\.\\pipe\\Sessions\\%d\\%s\\CPFATP_%d_%s", dwSessionId, wszObjectPath, dwProfileeProcessPid, strRuntimeVersion.GetUnicode());
+ }
+ else
+ {
+ pAttachPipeName->Printf(L"\\\\.\\pipe\\CPFATP_%d_%s", dwProfileeProcessPid, strRuntimeVersion.GetUnicode());
+ }
+}
+
+// Simple wrapper around code:ProfilingAPIAttachDetach::GetAttachEventNameForPidAndVersion using
+// current process's PID and current runtime directory
+// static
+inline HRESULT ProfilingAPIAttachDetach::GetAttachEventName(HANDLE hProfileeProcess, SString * pAttachEventName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ WCHAR wszRuntimeVersion[MAX_PATH];
+ wszRuntimeVersion[0] = L'\0';
+
+ // Note: CoreCLR can have the same version as Desktop CLR. And it's possible to have mutilple
+ // instances of the same version of the CoreCLR in the process. We need to come up with
+ // something other than version When Attach is enabled for CoreCLR.
+ DWORD dwSize = _countof(wszRuntimeVersion);
+ HRESULT hr = GetCORVersionInternal(wszRuntimeVersion, dwSize, &dwSize);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ GetAttachEventNameForPidAndVersion(hProfileeProcess, wszRuntimeVersion, pAttachEventName);
+ return S_OK;
+}
+
+// Simple wrapper around code:ProfilingAPIAttachDetach::GetAttachPipeNameForPidAndVersion using
+// current process's PID and current runtime directory
+// static
+inline HRESULT ProfilingAPIAttachDetach::GetAttachPipeName(HANDLE hProfileeProcess, SString * pAttachPipeName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ WCHAR wszRuntimeVersion[MAX_PATH];
+ wszRuntimeVersion[0] = L'\0';
+
+ // Note: CoreCLR can have the same version as Desktop CLR. And it's possible to have mutilple
+ // instances of the same version of the CoreCLR in the process. We need to come up with
+ // something other than version When Attach is enabled for CoreCLR.
+ DWORD dwSize = _countof(wszRuntimeVersion);
+ HRESULT hr = GetCORVersionInternal(wszRuntimeVersion, dwSize, &dwSize);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ GetAttachPipeNameForPidAndVersion(hProfileeProcess, wszRuntimeVersion, pAttachPipeName);
+ return S_OK;
+}
+
+#endif // __PROF_ATTACH_INL__
diff --git a/src/vm/profattachclient.cpp b/src/vm/profattachclient.cpp
new file mode 100644
index 0000000000..b406a68118
--- /dev/null
+++ b/src/vm/profattachclient.cpp
@@ -0,0 +1,949 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// ProfAttachClient.cpp
+//
+
+//
+// Implementation of the AttachProfiler() API, used by CLRProfilingImpl::AttachProfiler.
+//
+// CLRProfilingImpl::AttachProfiler (in ndp\clr\src\DLLS\shim\shimapi.cpp) just thunks down
+// to mscorwks!AttachProfiler (below), which calls other functions in this file, all of
+// which are in mscorwks.dll. The AttachProfiler() API is consumed by trigger processes
+// in order to force the runtime of a target process to load a profiler. The prime
+// portion of this implementation lives in ProfilingAPIAttachClient, which handles
+// opening a client connection to the pipe created by the target profilee, and sending
+// requests across that pipe to force the target profilee (which acts as the pipe server)
+// to attach a profiler.
+
+//
+// Since these functions are executed by the trigger process, they intentionally seek the
+// event and pipe objects by names based on the PID of the target app to profile (which
+// is NOT the PID of the current process, as the current process is just the trigger
+// process). This implies, for example, that the variable
+// ProfilingAPIAttachDetach::s_hAttachEvent is of no use to the current process, as
+// s_hAttachEvent is only applicable to the target profilee app's process.
+//
+// Most of the contracts in this file follow the lead of default contracts throughout the
+// CLR (triggers, throws, etc.). Since AttachProfiler() is called by native code either
+// on a native thread created by the trigger process, or via a P/Invoke, these functions
+// will all run on threads in MODE_PREEMPTIVE.
+// * MODE_PREEMPTIVE also allows for GetThread() == NULL, which will be the case for
+// a native-only thread calling AttachProfiler()
+//
+
+// ======================================================================================
+
+#include "common.h"
+
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+#include "tlhelp32.h" // For CreateToolhelp32Snapshot, etc. in MightProcessExist()
+#include "profilinghelper.h"
+#include "profattach.h"
+#include "profattach.inl"
+#include "profattachclient.h"
+
+// CLRProfilingImpl::AttachProfiler calls this, which itself is just a simple wrapper around
+// code:ProfilingAPIAttachClient::AttachProfiler. See public documentation for a
+// description of the parameters, return value, etc.
+extern "C" HRESULT STDMETHODCALLTYPE AttachProfiler(
+ DWORD dwProfileeProcessID,
+ DWORD dwMillisecondsMax,
+ const CLSID * pClsidProfiler,
+ LPCWSTR wszProfilerPath,
+ void * pvClientData,
+ UINT cbClientData,
+ LPCWSTR wszRuntimeVersion)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+
+ // This is the entrypoint into the EE by a trigger process. As such, this
+ // is profiling-specific and not considered mainline EE code.
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_UNEXPECTED;
+
+ EX_TRY
+ {
+ ProfilingAPIAttachClient attachClient;
+ hr = attachClient.AttachProfiler(
+ dwProfileeProcessID,
+ dwMillisecondsMax,
+ pClsidProfiler,
+ wszProfilerPath,
+ pvClientData,
+ cbClientData,
+ wszRuntimeVersion);
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ _ASSERTE(!"Unhandled exception executing AttachProfiler API");
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ // For ease-of-use by profilers, normalize similar HRESULTs down.
+ if ((hr == HRESULT_FROM_WIN32(ERROR_BROKEN_PIPE)) ||
+ (hr == HRESULT_FROM_WIN32(ERROR_PIPE_NOT_CONNECTED)) ||
+ (hr == HRESULT_FROM_WIN32(ERROR_BAD_PIPE)))
+ {
+ hr = CORPROF_E_IPC_FAILED;
+ }
+
+ return hr;
+}
+
+
+// ----------------------------------------------------------------------------
+// AdjustRemainingMs
+//
+// Description:
+// Simple helper to do timeout arithmetic. Timeout arithmetic is based on
+// CLRGetTickCount64, which returns an unsigned 64-bit int representing the number of
+// milliseconds transpired since the machine has been up. Since a machine is unlikely
+// to be up for > 500 million years, wraparound issues may be ignored.
+//
+// Caller repeatedly calls this function (usually once before a lenghty operation
+// with a timeout) to check on its remaining time allotment and get alerted when time
+// runs out.
+//
+// Arguments:
+// * ui64StartTimeMs - [in] When did caller begin, in tick counts (ms)?
+// * dwMillisecondsMax - [in] How much time does caller have, total?
+// * pdwMillisecondsRemaining - [out] Remaining ms caller has before exceeding its
+// timeout.
+//
+// Return Value:
+// HRESULT_FROM_WIN32(ERROR_TIMEOUT) if caller is out of time; else S_OK
+//
+
+static HRESULT AdjustRemainingMs(
+ ULONGLONG ui64StartTimeMs,
+ DWORD dwMillisecondsMax,
+ DWORD * pdwMillisecondsRemaining)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pdwMillisecondsRemaining != NULL);
+
+ ULONGLONG ui64NowMs = CLRGetTickCount64();
+
+ if (ui64NowMs - ui64StartTimeMs > dwMillisecondsMax)
+ {
+ // Out of time!
+ return HRESULT_FROM_WIN32(ERROR_TIMEOUT);
+ }
+
+ // How much of dwMillisecondsMax remain to be used?
+ *pdwMillisecondsRemaining = dwMillisecondsMax - static_cast<DWORD>(ui64NowMs - ui64StartTimeMs);
+ return S_OK;
+}
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachClient::AttachProfiler
+//
+// Description:
+// Main worker for AttachProfiler API. Trigger process calls mscoree!AttachProfiler
+// which just defers to this function to do all the work.
+//
+// ** See public API docs for description of params / return value. **
+//
+// Note that, in the trigger process, the dwMillisecondsMax timeouts are cumulative:
+// the caller specifies a single timeout value for the entire AttachProfiler API call.
+// So we must constantly adjust the timeouts we use so they're based on the time
+// remaining from the original dwMillisecondsMax specified by the AttachProfiler API
+// client.
+//
+
+HRESULT ProfilingAPIAttachClient::AttachProfiler(
+ DWORD dwProfileeProcessID,
+ DWORD dwMillisecondsMax,
+ const CLSID * pClsidProfiler,
+ LPCWSTR wszProfilerPath,
+ void * pvClientData,
+ UINT cbClientData,
+ LPCWSTR wszRuntimeVersion)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ InitializeLogging();
+
+ HRESULT hr;
+
+ // Is cbClientData just crazy-sick-overflow big?
+ if (cbClientData >= 0xFFFFffffUL - sizeof(AttachRequestMessage))
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ if ((pvClientData == NULL) && (cbClientData != 0))
+ {
+ return E_INVALIDARG;
+ }
+
+ if (pClsidProfiler == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ if ((wszProfilerPath != NULL) && (wcslen(wszProfilerPath) >= MAX_PATH))
+ {
+ return E_INVALIDARG;
+ }
+
+ // See if we can early-out due to the profilee process ID not existing.
+ // MightProcessExist() only returns FALSE if it has positively verified the process
+ // ID didn't exist when MightProcessExist() was called. So it might incorrectly
+ // return TRUE (if it hit an error trying to determine whether the process exists).
+ // But that's ok, as we'll catch a nonexistent process later on when we try to fiddle
+ // with its event & pipe. MightProcessExist() is used strictly as an optional
+ // optimization to early-out before waiting for the event to appear.
+ if (!MightProcessExist(dwProfileeProcessID))
+ {
+ return CORPROF_E_PROFILEE_PROCESS_NOT_FOUND;
+ }
+
+ // Adjust time out value according to env var COMPLUS_ProfAPI_AttachProfilerTimeoutInMs
+ // The default is 10 seconds as we want to avoid client (trigger process) time out too early
+ // due to wait operation for concurrent GC in the server (profilee side)
+ DWORD dwMillisecondsMinFromEnv = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ProfAPI_AttachProfilerMinTimeoutInMs);
+
+ if (dwMillisecondsMax < dwMillisecondsMinFromEnv)
+ dwMillisecondsMax = dwMillisecondsMinFromEnv;
+
+#ifdef _DEBUG
+ {
+ WCHAR wszClsidProfiler[40];
+ if (!StringFromGUID2(*pClsidProfiler, wszClsidProfiler, _countof(wszClsidProfiler)))
+ {
+ wcscpy_s(&wszClsidProfiler[0], _countof(wszClsidProfiler), W("(error)"));
+ }
+ LOG((
+ LF_CORPROF,
+ LL_INFO10,
+ "**PROF TRIGGER: mscorwks!AttachProfiler invoked with Trigger Process ID: '%d', "
+ "Target Profilee Process ID: '%d', dwMillisecondsMax: '%d', pClsidProfiler: '%S',"
+ "wszProfilerPath: '%S'\n",
+ GetProcessId(GetCurrentProcess()),
+ dwProfileeProcessID,
+ dwMillisecondsMax,
+ wszClsidProfiler,
+ wszProfilerPath == NULL ? W("") : wszProfilerPath));
+ }
+#endif // _DEBUG
+
+ // See code:AdjustRemainingMs
+ ULONGLONG ui64StartTimeMs = CLRGetTickCount64();
+ DWORD dwMillisecondsRemaining = dwMillisecondsMax;
+
+ HandleHolder hProfileeProcess = ::OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, dwProfileeProcessID);
+ if (!hProfileeProcess)
+ {
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF TRIGGER: OpenProcess failed. LastError=0x%x.\n",
+ ::GetLastError()));
+ return HRESULT_FROM_GetLastError();
+ }
+
+ StackSString attachPipeName;
+ ProfilingAPIAttachDetach::GetAttachPipeNameForPidAndVersion(hProfileeProcess, wszRuntimeVersion, &attachPipeName);
+
+ // Try to open pipe with 0ms timeout in case the pipe is still around from
+ // a previous attach request
+ hr = OpenPipeClient(attachPipeName.GetUnicode(), 0);
+ if (hr == HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND))
+ {
+ // Pipe doesn't exist, so signal attach event and retry. Note that any other
+ // failure from the above OpenPipeClient call will NOT cause us to signal
+ // the attach event, as signaling the attach event can only help with making
+ // sure the pipe gets created, and nothing else.
+ StackSString attachEventName;
+ ProfilingAPIAttachDetach::GetAttachEventNameForPidAndVersion(hProfileeProcess, wszRuntimeVersion, &attachEventName);
+ hr = SignalAttachEvent(attachEventName.GetUnicode());
+ if (FAILED(hr))
+ {
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF TRIGGER: Unable to signal the global attach event. hr=0x%x.\n",
+ hr));
+
+ // It's reasonable for SignalAttachEvent to err out if the event
+ // simply doesn't exist. This happens on server apps that just circumvent
+ // using an event. They just create the AttachThread and attach pipe on
+ // startup, and are always listening on the pipe. So if event signaling
+ // failed due to nonexistent event, keep on going and try connecting to the
+ // pipe again. But if event signaling failed for any other reason, that's
+ // unexpected so give up.
+ if (hr != HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND))
+ {
+ return hr;
+ }
+ }
+
+ hr = AdjustRemainingMs(ui64StartTimeMs, dwMillisecondsMax, &dwMillisecondsRemaining);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ hr = OpenPipeClient(attachPipeName.GetUnicode(), dwMillisecondsRemaining);
+ }
+
+ // hr now holds the result of either the original OpenPipeClient call (if it
+ // failed for a reason other than ERROR_FILE_NOT_FOUND) or the 2nd
+ // OpenPipeClient call (if the first call yielded ERROR_FILE_NOT_FOUND and we
+ // signaled the event and retried).
+ if (FAILED(hr))
+ {
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF TRIGGER: Unable to open a client connection to the pipe. hr=0x%x.\n",
+ hr));
+ return hr;
+ }
+
+ // At this point the pipe is definitely open
+ _ASSERTE(IsValidHandle(m_hPipeClient));
+
+ hr = AdjustRemainingMs(ui64StartTimeMs, dwMillisecondsMax, &dwMillisecondsRemaining);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ // Send the GetVersion message and verify we're talking the same language
+ hr = VerifyVersionIsCompatible(dwMillisecondsRemaining);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ hr = AdjustRemainingMs(ui64StartTimeMs, dwMillisecondsMax, &dwMillisecondsRemaining);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ // Send the attach message!
+ HRESULT hrAttach;
+ hr = SendAttachRequest(
+ dwMillisecondsRemaining,
+ pClsidProfiler,
+ wszProfilerPath,
+ pvClientData,
+ cbClientData,
+ &hrAttach);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ LOG((
+ LF_CORPROF,
+ LL_INFO10,
+ "**PROF TRIGGER: AttachProfiler succeeded sending attach request. Trigger Process ID: '%d', "
+ "Target Profilee Process ID: '%d', Attach HRESULT: '0x%x'\n",
+ GetProcessId(GetCurrentProcess()),
+ dwProfileeProcessID,
+ hrAttach));
+
+ return hrAttach;
+}
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachClient::MightProcessExist
+//
+// Description:
+// Returns BOOL indicating whether a process with the specified process ID might exist
+// on the local computer.
+//
+// Arguments:
+// * dwProcessID - Process ID to look up
+//
+// Return Value:
+// nonzero if process might possibly exist; FALSE if not
+//
+// Notes:
+// * Since processes come and go while this function executes, this should only be
+// used on a process ID that is supposed to exist both before and after this
+// function returns. A return of FALSE reliably tells you that supposition is
+// wrong. A return of TRUE, however, only means the process ID existed when this
+// function did its search. It's still possible the process has exited by the time
+// this function returns.
+// * If this function is unsure of a process's existence (e.g., if it encounters an
+// error while trying to find out), it errs on the side of optimism and returns
+// TRUE.
+//
+
+BOOL ProfilingAPIAttachClient::MightProcessExist(DWORD dwProcessID)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ // There are a few ways to check whether a process exists. Some dismissed
+ // alternatives:
+ //
+ // * OpenProcess() with a "limited" access right.
+ // * Even relatively limited access rights such as SYNCHRONIZE and
+ // PROCESS_QUERY_INFORMATION often fail with ERROR_ACCESS_DENIED, even if
+ // the caller is running as administrator.
+ //
+ // * EnumProcesses() + search through returned PIDs
+ // * EnumProcesses() requires psychic powers to know how big to allocate the
+ // array of PIDs to receive (EnumProcesses() won't give you a hint if
+ // you're wrong).
+ //
+ // Method of choice is CreateToolhelp32Snapshot, which gives an enumerator to iterate
+ // through all processes.
+
+ // Take a snapshot of all processes in the system.
+ HandleHolder hProcessSnap = CreateToolhelp32Snapshot(
+ TH32CS_SNAPPROCESS,
+ 0 // Unused when snap type is TH32CS_SNAPPROCESS
+ );
+ if (hProcessSnap == INVALID_HANDLE_VALUE)
+ {
+ // Dunno if process exists. Err on the side of optimism
+ return TRUE;
+ }
+
+ // Set the size of the structure before using it.
+ PROCESSENTRY32 entry;
+ ZeroMemory(&entry, sizeof(entry));
+ entry.dwSize = sizeof(PROCESSENTRY32);
+
+ // Start enumeration with Process32First. It will set dwSize to tell us how many
+ // members of PROCESSENTRY32 we can trust. We only need th32ProcessID
+ if (!Process32First(hProcessSnap, &entry) ||
+ (offsetof(PROCESSENTRY32, th32ProcessID) + sizeof(entry.th32ProcessID) > entry.dwSize))
+ {
+ // Can't tell if process exists, so assume it might
+ return TRUE;
+ }
+
+ do
+ {
+ if (entry.th32ProcessID == dwProcessID)
+ {
+ // Definitely exists
+ return TRUE;
+ }
+ } while (Process32Next(hProcessSnap, &entry));
+
+ // Process32Next() failed. Return FALSE only if we exhausted our search
+ return (GetLastError() != ERROR_NO_MORE_FILES);
+}
+
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachClient::OpenPipeClient
+//
+// Description:
+// Attempts to create a client connection to the remote server pipe
+//
+// Arguments:
+// * wszPipeName - Name of pipe to connect to.
+// * dwMillisecondsMax - Total ms to spend trying to connect to the pipe.
+//
+// Return Value:
+// HRESULT indicating success / failure
+//
+
+HRESULT ProfilingAPIAttachClient::OpenPipeClient(
+ LPCWSTR wszPipeName,
+ DWORD dwMillisecondsMax)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ const DWORD kSleepMsUntilRetryCreateFile = 100;
+ HRESULT hr;
+ DWORD dwErr;
+
+ // See code:AdjustRemainingMs
+ ULONGLONG ui64StartTimeMs = CLRGetTickCount64();
+ DWORD dwMillisecondsRemaining = dwMillisecondsMax;
+
+ HandleHolder hPipeClient;
+
+ // We need to wait until the pipe is both CREATED (i.e., target profilee app has
+ // created the server end of the pipe) and AVAILABLE (i.e., no other trigger has opened
+ // the client end to the pipe). There is no Win32 API to wait until the pipe is
+ // CREATED, so we must make our own retry loop that calls CreateFileW. Once the pipe
+ // is known to be CREATED, we can use WaitNamedPipe to wait until the pipe is
+ // AVAILABLE. (Note: It would have been nice if we could use WaitNamedPipe to wait
+ // until the pipe is both CREATED and AVAILABLE. But WaitNamedPipe just returns an
+ // error immediately if the pipe is not yet CREATED, regardless of the timeout value
+ // specified.)
+ while (TRUE)
+ {
+ // This CreateFile call doesn't create the pipe. The pipe must be created by the
+ // target profilee. This CreateFile call attempts to open a client connection to
+ // the pipe. If CreateFile succeeds, that implies the pipe had already been
+ // successfully CREATED by the target profilee, and is AVAILABLE, and we now have
+ // a client connection to the pipe ready to go.
+ hPipeClient = CreateFileW(
+ wszPipeName,
+ GENERIC_READ | GENERIC_WRITE,
+ 0, // dwShareMode (i.e., no sharing)
+ NULL, // lpSecurityAttributes (i.e., handle not inheritable and
+ // only current user may access this handle)
+ OPEN_EXISTING, // Only open (don't create) the pipe
+ FILE_FLAG_OVERLAPPED, // Using overlapped I/O allows async ops w/ timeout
+ NULL); // hTemplateFile
+
+ if (hPipeClient != INVALID_HANDLE_VALUE)
+ {
+ // CreateFile succeeded! Pipe is CREATED (by target profilee)
+ // and AVAILABLE and we're connected
+ break;
+ }
+
+ // Opening the pipe failed. Why?
+ dwErr = GetLastError();
+ switch(dwErr)
+ {
+ default:
+ // Any error other than the ones specifically brought out below isn't
+ // retry-able (e.g., security failure)
+ return HRESULT_FROM_WIN32(dwErr);
+
+ case ERROR_FILE_NOT_FOUND:
+ // Pipe not CREATED yet. Can we retry?
+ if (dwMillisecondsRemaining <= kSleepMsUntilRetryCreateFile)
+ {
+ // No time left, gotta bail!
+ return HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND);
+ }
+
+ // Sleep and retry
+ // (bAlertable=FALSE: don't wake up due to overlapped I/O)
+ ClrSleepEx(kSleepMsUntilRetryCreateFile, FALSE);
+ dwMillisecondsRemaining -= kSleepMsUntilRetryCreateFile;
+ break;
+
+ case ERROR_PIPE_BUSY:
+ // Pipe CREATED, but it's not AVAILABLE. Wait until it's AVAILABLE
+
+ LOG((
+ LF_CORPROF,
+ LL_INFO10,
+ "**PROF TRIGGER: Found pipe, but pipe is busy. Waiting until pipe is available.\n"));
+
+ hr = AdjustRemainingMs(ui64StartTimeMs, dwMillisecondsMax, &dwMillisecondsRemaining);
+ if (FAILED(hr))
+ {
+ return HRESULT_FROM_WIN32(ERROR_PIPE_BUSY);
+ }
+
+ if (!WaitNamedPipeW(wszPipeName, dwMillisecondsRemaining))
+ {
+ // If we timeout here, convert the error into something more useful
+ dwErr = GetLastError();
+ if ((dwErr == ERROR_TIMEOUT) || (dwErr == ERROR_SEM_TIMEOUT))
+ {
+ return HRESULT_FROM_WIN32(ERROR_PIPE_BUSY);
+ }
+
+ // Failed for a reason other timeout. Send that reason back to caller
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF TRIGGER: WaitNamedPipe failed for a reason other timeout. hr=0x%x.\n",
+ HRESULT_FROM_WIN32(dwErr)));
+ return HRESULT_FROM_WIN32(dwErr);
+ }
+
+ // Pipe should be ready to open now, so retry. Note that it's still
+ // possible that another client sneaks in and connects before we get a
+ // chance to. If that happens, CreateFile will fail again, and we'll end up
+ // here waiting again (until we timeout).
+ break;
+ }
+ }
+
+ // Only way to exit loop above is if pipe is CREATED and AVAILABLE.
+ _ASSERTE(IsValidHandle(hPipeClient));
+
+ // We now have a valid handle on the pipe, which means we're connected
+ // to the pipe, and no one else is
+
+ // change to message-read mode.
+ DWORD dwMode = PIPE_READMODE_MESSAGE;
+ if (!SetNamedPipeHandleState(
+ hPipeClient, // pipe handle
+ &dwMode, // new pipe mode (PIPE_READMODE_MESSAGE)
+ NULL, // lpMaxCollectionCount, must be NULL when client & server on same box
+ NULL)) // lpCollectDataTimeout, must be NULL when client & server on same box
+ {
+ hr = HRESULT_FROM_GetLastError();
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF TRIGGER: SetNamedPipeHandleState failed. hr=0x%x.\n",
+ hr));
+ return hr;
+ }
+
+ // Pipe's client handle is now ready for use by this class
+ m_hPipeClient = (HANDLE) hPipeClient;
+
+ // Ownership transferred to this class, so this function shouldn't call CloseHandle()
+ hPipeClient.SuppressRelease();
+
+ return S_OK;
+}
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachClient::SignalAttachEvent
+//
+// Description:
+// Trigger process calls this (indirectly via AttachProfiler()) to find, open, and
+// signal the Globally Named Attach Event.
+//
+// Arguments:
+// * wszEventName - Name of event to signal
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+
+HRESULT ProfilingAPIAttachClient::SignalAttachEvent(LPCWSTR wszEventName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ HandleHolder hAttachEvent;
+
+ hAttachEvent = OpenEventW(
+ EVENT_MODIFY_STATE, // dwDesiredAccess
+ FALSE, // bInheritHandle
+ wszEventName);
+ if (hAttachEvent == NULL)
+ {
+ return HRESULT_FROM_GetLastError();
+ }
+
+ // Dealing directly with Windows event objects, not CLR event cookies, so
+ // using Win32 API directly. Note that none of this code executes on rotor
+ // or if we're memory / sync-hosted, so the CLR wrapper is of no use to us anyway.
+#pragma push_macro("SetEvent")
+#undef SetEvent
+ if (!SetEvent(hAttachEvent))
+#pragma pop_macro("SetEvent")
+ {
+ return HRESULT_FROM_GetLastError();
+ }
+
+ return S_OK;
+}
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachClient::VerifyVersionIsCompatible
+//
+// Description:
+// Sends a GetVersion request message across the pipe to the target profilee, reads
+// the response, and determines if the response allows for compatible communication.
+//
+// Arguments:
+// * dwMillisecondsMax - How much time do we have left to wait for the response?
+//
+// Return Value:
+// HRESULT indicating success or failure. If pipe communication succeeds, but we
+// determine that the response doesn't allow for compatible communication, return
+// CORPROF_E_PROFILEE_INCOMPATIBLE_WITH_TRIGGER.
+//
+// Assumptions:
+// * Client connection should be established before calling this function (or a
+// callee will assert).
+//
+
+HRESULT ProfilingAPIAttachClient::VerifyVersionIsCompatible(
+ DWORD dwMillisecondsMax)
+{
+ STANDARD_VM_CONTRACT;
+ HRESULT hr;
+ DWORD cbReceived;
+ GetVersionRequestMessage requestMsg;
+ GetVersionResponseMessage responseMsg;
+
+ hr = SendAndReceive(
+ dwMillisecondsMax,
+ reinterpret_cast<LPVOID>(&requestMsg),
+ static_cast<DWORD>(sizeof(requestMsg)),
+ reinterpret_cast<LPVOID>(&responseMsg),
+ static_cast<DWORD>(sizeof(responseMsg)),
+ &cbReceived);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ // Did profilee successfully carry out the GetVersion request?
+ if (FAILED(responseMsg.m_hr))
+ {
+ return responseMsg.m_hr;
+ }
+
+ // We should have valid version info for the target profilee. Now do the
+ // comparisons to determine if we're compatible.
+ if (
+ // Am I too old (i.e., profilee requires a newer trigger)?
+ (ProfilingAPIAttachDetach::kCurrentProcessVersion <
+ responseMsg.m_minimumAllowableTriggerVersion) ||
+
+ // Is the profilee too old (i.e., this trigger requires a newer profilee)?
+ (responseMsg.m_profileeVersion <
+ ProfilingAPIAttachDetach::kMinimumAllowableProfileeVersion))
+ {
+ return CORPROF_E_PROFILEE_INCOMPATIBLE_WITH_TRIGGER;
+ }
+
+ return S_OK;
+}
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachClient::SendAttachRequest
+//
+// Description:
+// Sends an Attach request message across the pipe to the target profilee, and returns
+// the response.
+//
+// Arguments:
+// * dwMillisecondsMax - [in] How much time is left to wait for response?
+// * pClsidProfiler - [in] CLSID of profiler to attach
+// * pvClientData - [in] Client data to pass to profiler's InitializeForAttach
+// callback
+// * cbClientData - [in] Size of client data
+// * phrAttach - [out] Response HRESULT sent back by target profilee
+//
+// Return Value:
+// HRESULT indicating success / failure with sending request & receiving response. If
+// S_OK is returned, consult phrAttach to determine success / failure of the actual
+// attach operation.
+//
+// Assumptions:
+// * Client connection should be established before calling this function (or a callee
+// will assert).
+//
+
+HRESULT ProfilingAPIAttachClient::SendAttachRequest(
+ DWORD dwMillisecondsMax,
+ const CLSID * pClsidProfiler,
+ LPCWSTR wszProfilerPath,
+ void * pvClientData,
+ UINT cbClientData,
+ HRESULT * phrAttach)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(phrAttach != NULL);
+
+ // These were already verified early on
+ _ASSERTE(cbClientData < 0xFFFFffffUL - sizeof(AttachRequestMessageV2));
+ _ASSERTE((pvClientData != NULL) || (cbClientData == 0));
+
+ // Allocate enough space for the message, including the variable-length client data.
+ DWORD cbMessage = sizeof(AttachRequestMessageV2) + cbClientData;
+ _ASSERTE(cbMessage >= sizeof(AttachRequestMessageV2));
+ NewHolder<BYTE> pbMessageStart(new (nothrow) BYTE[cbMessage]);
+ if (pbMessageStart == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ // Initialize the message. First the client data at the tail end...
+ memcpy(pbMessageStart + sizeof(AttachRequestMessageV2), pvClientData, cbClientData);
+
+ // ...then the message struct fields (use constructor in-place)
+ new ((void *) pbMessageStart) AttachRequestMessageV2(
+ cbMessage,
+ ProfilingAPIAttachDetach::kCurrentProcessVersion, // Version of the trigger process
+ pClsidProfiler,
+ wszProfilerPath,
+ sizeof(AttachRequestMessageV2), // dwClientDataStartOffset
+ cbClientData,
+ dwMillisecondsMax
+ );
+
+ HRESULT hr;
+ DWORD cbReceived;
+ AttachResponseMessage attachResponseMessage(E_UNEXPECTED);
+
+ hr = SendAndReceive(
+ dwMillisecondsMax,
+ (LPVOID) pbMessageStart,
+ cbMessage,
+ reinterpret_cast<LPVOID>(&attachResponseMessage),
+ static_cast<DWORD>(sizeof(attachResponseMessage)),
+ &cbReceived);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ // Successfully got a response from target. The response contained the HRESULT
+ // indicating whether the attach was successful, so return that HRESULT in the [out]
+ // param.
+ *phrAttach = attachResponseMessage.m_hr;
+ return S_OK;
+}
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachClient::SendAndReceive
+//
+// Description:
+// Used in trigger process to send a request and receive the response.
+//
+// Arguments:
+// * dwMillisecondsMax - [in] Timeout for entire send/receive operation
+// * pvInBuffer - [in] Buffer contaning the request message
+// * cbInBuffer - [in] Number of bytes in the request message
+// * pvOutBuffer - [in/out] Buffer to write the response into
+// * cbOutBuffer - [in] Size of the response buffer
+// * pcbReceived - [out] Number of bytes actually written into response buffer
+//
+// Return Value:
+// HRESULT indicating success or failure
+//
+// Notes:
+// * The [out] parameters may be written to even if this function fails. But their
+// contents should be ignored by the caller in this case.
+//
+
+HRESULT ProfilingAPIAttachClient::SendAndReceive(
+ DWORD dwMillisecondsMax,
+ LPVOID pvInBuffer,
+ DWORD cbInBuffer,
+ LPVOID pvOutBuffer,
+ DWORD cbOutBuffer,
+ DWORD * pcbReceived)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(IsValidHandle(m_hPipeClient));
+ _ASSERTE(pvInBuffer != NULL);
+ _ASSERTE(pvOutBuffer != NULL);
+ _ASSERTE(pcbReceived != NULL);
+
+ HRESULT hr;
+ DWORD dwErr;
+ ProfilingAPIAttachDetach::OverlappedResultHolder overlapped;
+ hr = overlapped.Initialize();
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ if (TransactNamedPipe(
+ m_hPipeClient,
+ pvInBuffer,
+ cbInBuffer,
+ pvOutBuffer,
+ cbOutBuffer,
+ pcbReceived,
+ overlapped))
+ {
+ // Hot dog! Send and receive succeeded immediately! All done.
+ return S_OK;
+ }
+
+ dwErr = GetLastError();
+ if (dwErr != ERROR_IO_PENDING)
+ {
+ // An unexpected error. Caller has to deal with it
+ hr = HRESULT_FROM_WIN32(dwErr);
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF TRIGGER: TransactNamedPipe failed. hr=0x%x.\n",
+ hr));
+ return hr;
+ }
+
+ // Typical case=ERROR_IO_PENDING: TransactNamedPipe has begun the transaction, and
+ // it's still in progress. Wait until it's done (or timeout expires).
+ hr = overlapped.Wait(
+ dwMillisecondsMax,
+ m_hPipeClient,
+ pcbReceived);
+ if (FAILED(hr))
+ {
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF TRIGGER: Waiting for overlapped result for TransactNamedPipe failed. hr=0x%x.\n",
+ hr));
+ return hr;
+ }
+
+ return S_OK;
+}
+
+#endif // FEATURE_PROFAPI_ATTACH_DETACH
diff --git a/src/vm/profattachclient.h b/src/vm/profattachclient.h
new file mode 100644
index 0000000000..69a8fb53dc
--- /dev/null
+++ b/src/vm/profattachclient.h
@@ -0,0 +1,79 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// ProfAttachClient.h
+//
+
+//
+// Definition of ProfilingAPIAttachClient, which houses the prime portion of the
+// implementation of the AttachProfiler() API, exported by mscoree.dll, and consumed by
+// trigger processes in order to force the runtime of a target process to load a
+// profiler. This handles opening a client connection to the pipe created by the target
+// profilee, and sending requests across that pipe to force the target profilee (which
+// acts as the pipe server) to attach a profiler.
+//
+
+// ======================================================================================
+
+#ifndef __PROF_ATTACH_CLIENT_H__
+#define __PROF_ATTACH_CLIENT_H__
+
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+extern "C" HRESULT STDMETHODCALLTYPE AttachProfiler(
+ DWORD dwProfileeProcessID,
+ DWORD dwMillisecondsMax,
+ const CLSID * pClsidProfiler,
+ LPCWSTR wszProfilerPath,
+ void * pvClientData,
+ UINT cbClientData,
+ LPCWSTR wszRuntimeVersion);
+#endif // FEATURE_PROFAPI_ATTACH_DETACH
+// ---------------------------------------------------------------------------------------
+// Here's the beef. All the pipe client stuff running in the trigger process (via call to
+// AttachProfiler()) is housed in this class. Note that these functions cannot assume a
+// fully initialized runtime (e.g., it would be nonsensical for these functions to
+// reference ProfilingAPIAttachDetach::s_hAttachEvent). These functions operate solely by
+// finding the attach event & pipes by name, and using them to communicate with the
+// target profilee app.
+
+class ProfilingAPIAttachClient
+{
+public:
+ HRESULT AttachProfiler(
+ DWORD dwProfileeProcessID,
+ DWORD dwMillisecondsMax,
+ const CLSID * pClsidProfiler,
+ LPCWSTR wszProfilerPath,
+ void * pvClientData,
+ UINT cbClientData,
+ LPCWSTR wszRuntimeVersion);
+
+protected:
+ // Client connection to the pipe that connects to the target profilee (server)
+ HandleHolder m_hPipeClient;
+
+ BOOL MightProcessExist(DWORD dwProcessID);
+ HRESULT SignalAttachEvent(LPCWSTR wszEventName);
+ HRESULT OpenPipeClient(
+ LPCWSTR wszPipeName,
+ DWORD dwMillisecondsMax);
+ HRESULT VerifyVersionIsCompatible(DWORD dwMillisecondsMax);
+ HRESULT SendAttachRequest(
+ DWORD dwMillisecondsMax,
+ const CLSID * pClsidProfiler,
+ LPCWSTR wszProfilerPath,
+ void * pvClientData,
+ UINT cbClientData,
+ HRESULT * phrAttach);
+ HRESULT SendAndReceive(
+ DWORD dwMillisecondsMax,
+ LPVOID pvInBuffer,
+ DWORD cbInBuffer,
+ LPVOID pvOutBuffer,
+ DWORD cbOutBuffer,
+ DWORD * pcbReceived);
+};
+
+#endif //__PROF_ATTACH_CLIENT_H__
diff --git a/src/vm/profattachserver.cpp b/src/vm/profattachserver.cpp
new file mode 100644
index 0000000000..0ee72201b6
--- /dev/null
+++ b/src/vm/profattachserver.cpp
@@ -0,0 +1,1297 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// ProfAttachServer.cpp
+//
+
+//
+// Implementation of ProfilingAPIAttachServer, which is instantiated on the stack of the
+// AttachThread running in the target profilee (server end of the pipe) to receive and
+// carry out requests that are sent by the trigger (client end of the pipe).
+//
+// Most of the contracts in this file follow the lead of default contracts throughout the
+// CLR (triggers, throws, etc.) and many are marked as CAN_TAKE_LOCK, as event logging
+// happens all over the place, and that loads resource strings, which takes locks. Some
+// notes:
+// * MODE_PREEMPTIVE also allows for GetThread() == NULL, which will be the case for
+// most of these functions most of the time (as most are called on the
+// AttachThread).
+// * NOTHROW is used at the root of the AttachThread (to protect AttachThread from
+// unhandled exceptions which would tear down the entire process), and at the
+// root of the AttachProfiler() API (to protect trigger processes from unhandled
+// exceptions).
+//
+
+// ======================================================================================
+
+#include "common.h"
+
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+
+#include "profilinghelper.h"
+#include "profilinghelper.inl"
+#include "profattach.h"
+#include "profattach.inl"
+#include "profattachserver.h"
+#include "profattachserver.inl"
+
+
+// ----------------------------------------------------------------------------
+// Implementation of RequestMessageVerifier; a helper to verify incoming messages to the
+// target profilee.
+//
+
+// ----------------------------------------------------------------------------
+// RequestMessageVerifier::Verify
+//
+// Description:
+// Verifies self-consistency of a request message expressed as a byte array from
+// the pipe. This also calls the appropriate helper to check consistency of the
+// derived request message type, based on the kind of request this is.
+//
+// Return Value:
+// S_OK or CORPROF_E_UNRECOGNIZED_PIPE_MSG_FORMAT
+//
+
+HRESULT RequestMessageVerifier::Verify()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ // In the beginning, the message is not yet verified
+ _ASSERTE(!m_fVerified);
+
+ HRESULT hr = CORPROF_E_UNRECOGNIZED_PIPE_MSG_FORMAT;
+
+ // First, do we have something big enough to fit in a BaseRequestMessage?
+ if (m_cbRequestMessage < sizeof(BaseRequestMessage))
+ {
+ return CORPROF_E_UNRECOGNIZED_PIPE_MSG_FORMAT;
+ }
+
+ // Yes, but do the fields lie?
+ const BaseRequestMessage * pUnverifiedBaseRequestMessage
+ = (const BaseRequestMessage *) m_pbRequestMessage;
+
+ // Does the struct claim a size different than the entire message?
+ if (pUnverifiedBaseRequestMessage->m_cbMessage != m_cbRequestMessage)
+ {
+ return CORPROF_E_UNRECOGNIZED_PIPE_MSG_FORMAT;
+ }
+
+ // Check for an unknown type, or a known type but with invalid subclass fields
+ switch(pUnverifiedBaseRequestMessage->m_requestMessageType)
+ {
+ default:
+ // Unknown message type
+ hr = CORPROF_E_UNRECOGNIZED_PIPE_MSG_FORMAT;
+ break;
+
+ case kMsgGetVersion:
+ hr = VerifyGetVersionRequestMessage();
+ break;
+
+ case kMsgAttach:
+ hr = VerifyAttachRequestMessage();
+ break;
+ }
+
+ // For debug builds, remember whether we successfully verified the message
+ INDEBUG(m_fVerified = SUCCEEDED(hr));
+ return hr;
+}
+
+// ----------------------------------------------------------------------------
+// RequestMessageVerifier::VerifyGetVersionRequestMessage
+//
+// Description:
+// Once a BaseRequestMessage has been verified as self-consistent, and is of type
+// kMsgGetVersion, this helper is called to verify consistency as a Get Version
+// message
+//
+// Return Value:
+// S_OK or CORPROF_E_UNRECOGNIZED_PIPE_MSG_FORMAT
+//
+// Assumptions:
+// * Verify() calls this, but only after it has verified base type
+//
+
+HRESULT RequestMessageVerifier::VerifyGetVersionRequestMessage()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ const BaseRequestMessage * pBaseRequestMessage =
+ (const BaseRequestMessage *) m_pbRequestMessage;
+
+ // Not much to verify here, since the get version request message is simply a
+ // BaseRequestMessage (no subtype)
+
+ // Not allowed to call this unless you checked the m_requestMessageType first!
+ _ASSERTE(pBaseRequestMessage->m_requestMessageType == kMsgGetVersion);
+
+ if (pBaseRequestMessage->m_cbMessage != sizeof(BaseRequestMessage))
+ {
+ return CORPROF_E_UNRECOGNIZED_PIPE_MSG_FORMAT;
+ }
+
+ return S_OK;
+}
+
+// ----------------------------------------------------------------------------
+// RequestMessageVerifier::VerifyAttachRequestMessage
+//
+// Description:
+// Once a BaseRequestMessage has been verified as self-consistent, and is of type
+// kMsgAttach, this helper is called to verify consistency of derived type
+// AttachRequestMessage
+//
+// Return Value:
+// S_OK or CORPROF_E_UNRECOGNIZED_PIPE_MSG_FORMAT
+//
+// Assumptions:
+// * Verify() calls this, but only after it has verified base type
+//
+
+HRESULT RequestMessageVerifier::VerifyAttachRequestMessage()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ const BaseRequestMessage * pBaseRequestMessage =
+ (const BaseRequestMessage *) m_pbRequestMessage;
+
+ // Not allowed to call this unless you checked the m_requestMessageType first!
+ _ASSERTE(pBaseRequestMessage->m_requestMessageType == kMsgAttach);
+
+ // Enough memory to cast to AttachRequestMessage?
+ if (pBaseRequestMessage->m_cbMessage < sizeof(AttachRequestMessage))
+ {
+ return CORPROF_E_UNRECOGNIZED_PIPE_MSG_FORMAT;
+ }
+
+ AttachRequestMessage * pUnverifiedAttachRequestMessage =
+ (AttachRequestMessage *) pBaseRequestMessage;
+
+ // Is client data properly contained inside message? Use 64-bit arithmetic to
+ // detect overflow
+ UINT64 ui64TotalMsgLength = (UINT64) pUnverifiedAttachRequestMessage->m_cbMessage;
+ UINT64 ui64ClientDataStartOffset = (UINT64) pUnverifiedAttachRequestMessage->m_dwClientDataStartOffset;
+ UINT64 ui64ClientDataLength = (UINT64) pUnverifiedAttachRequestMessage->m_cbClientDataLength;
+
+ // Client data must occur AFTER struct
+ if (ui64ClientDataStartOffset < sizeof(AttachRequestMessage))
+ {
+ return CORPROF_E_UNRECOGNIZED_PIPE_MSG_FORMAT;
+ }
+
+ // Client data should be wholly contained inside the message
+ if (ui64ClientDataStartOffset + ui64ClientDataLength > ui64TotalMsgLength)
+ {
+ return CORPROF_E_UNRECOGNIZED_PIPE_MSG_FORMAT;
+ }
+
+ // m_wszProfilerPath must be a NULL-terminated string.
+ if (wmemchr(pUnverifiedAttachRequestMessage->m_wszProfilerPath,
+ W('\0'),
+ _countof(pUnverifiedAttachRequestMessage->m_wszProfilerPath)) == NULL)
+ {
+ return CORPROF_E_UNRECOGNIZED_PIPE_MSG_FORMAT;
+ }
+
+ return S_OK;
+}
+
+
+// ----------------------------------------------------------------------------
+// RequestMessageVerifier::GetBaseRequestMessage
+//
+// Description:
+// After you've called code:RequestMessageVerifier::Verify, this function will hand
+// you a pointer to the verified request message. (If you call this before verifying
+// the message, it'll assert.)
+//
+// Return Value:
+// Pointer to the verified message
+//
+// Assumptions:
+// * Call code:RequestMessageVerifier::Verify first!
+//
+
+const BaseRequestMessage * RequestMessageVerifier::GetBaseRequestMessage()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Not allowed to ask for the message unless it's been successfully verified!
+ _ASSERTE(m_fVerified);
+
+ return (const BaseRequestMessage *) m_pbRequestMessage;
+}
+
+
+//---------------------------------------------------------------------------------------
+// #ConnectedPipeHolder
+//
+// Simple holder that ensures a connected pipe disconnects its client when the scope is
+// over. User of the class is responsible for creating the pipe and connecting the pipe,
+// before using this holder. The user of the class is responsible for closing the pipe
+// after this holder goes away.
+//
+
+// ----------------------------------------------------------------------------
+// AcquireConnectedPipe
+//
+// Description:
+// Used for ConnectedPipeHolder when acquiring a pipe HANDLE. Does nothing but
+// assert that the handle is valid.
+//
+// Arguments:
+// * hConnectedPipe - HANDLE being acquired
+//
+
+void AcquireConnectedPipe(HANDLE hConnectedPipe)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(IsValidHandle(hConnectedPipe));
+}
+
+// ----------------------------------------------------------------------------
+// ReleaseConnectedPipe
+//
+// Description:
+// Used for ConnectedPipeHolder when releasing a pipe HANDLE. Disconnects the pipe
+// from its client, but leaves the pipe open and ready for the next client connection.
+//
+// Arguments:
+// * hConnectedPipe - HANDLE to pipe being disconnected
+//
+
+void ReleaseConnectedPipe(HANDLE hConnectedPipe)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(IsValidHandle(hConnectedPipe));
+
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF: Disconnecting pipe from current client.\n"));
+
+ if (!DisconnectNamedPipe(hConnectedPipe))
+ {
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF: DisconnectNamedPipe failed with %d.\n",
+ GetLastError()));
+ }
+}
+
+// See code:#ConnectedPipeHolder
+typedef Wrapper<HANDLE, AcquireConnectedPipe, ReleaseConnectedPipe,
+ (UINT_PTR) INVALID_HANDLE_VALUE> ConnectedPipeHolder;
+
+
+// ----------------------------------------------------------------------------
+// Implementation of ProfilingAPIAttachServer: the primary class that handles the server
+// end of the pipe by receiving trigger requests, carrying them out, and then sending
+// responses back to the trigger (client end of pipe).
+//
+// This is the meat. Savor its juices.
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachServer::ExecutePipeRequests
+//
+// Description:
+// The AttachThread is responsible for performing attach and detach operations. This
+// function comprises the main loop for the "attach" operations. Creates the pipe
+// server, and repeatedly connects to clients (i.e., trigger processes calling
+// AttachProfiler() API), services them, and disconnects them. Once client connections
+// stop arriving for a while (default is 5 minutes), the loop ends, the pipe server is
+// destroyed, and this function returns. (Note: the exception is when running in
+// code:ProfilingAPIAttachDetach::kAlwaysOn mode. In that case, this function loops
+// forever over all clients, without timing out and returning if it takes a long time
+// for the next connection request to come in.)
+//
+// Return Value:
+// Any success code implies one client successfully attached a profiler, else, error
+// HRESULT indicating the last error encountered with a client.
+//
+
+HRESULT ProfilingAPIAttachServer::ExecutePipeRequests()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ AttachStatus attachStatusOverall = kNoAttachRequested;
+
+ // First create the pipe server. If this fails, all is lost
+ hr = CreateAttachPipe();
+ if (FAILED(hr))
+ {
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF: Failed trying to create attach pipe server. hr=0x%x.\n",
+ hr));
+ ProfilingAPIUtility::LogProfError(IDS_E_PROF_ATTACHTHREAD_INIT, hr);
+ return hr;
+ }
+
+ // Thank you, CreateAttachPipe()!
+ _ASSERTE(IsValidHandle(m_hPipeServer));
+
+ // Now loop until there are no more clients to service. Remember if any of the
+ // clients got a profiler to attach, so we can return the appropriate HRESULT.
+ //
+ // Note that we intentionally keep on looping even after a profiler is attached, just
+ // in case there are any extra client requests coming in (e.g., user launched a
+ // couple triggers simultanously, or a single trigger retried AttachProfiler API a
+ // couple times). Once client connections stop coming in for a while,
+ // ServiceOneClient will fail with a timeout, and we'll break out of the loop.
+ //
+ // Also note that, in kAlwaysOn mode, we loop forever until the thread naturally dies
+ // during app shutdown
+
+ while (SUCCEEDED(hr) ||
+ (ProfilingAPIAttachDetach::GetAttachThreadingMode() ==
+ ProfilingAPIAttachDetach::kAlwaysOn))
+ {
+ AttachStatus attachStatusForThisClient = kNoAttachRequested;
+
+ hr = ServiceOneClient(&attachStatusForThisClient);
+
+ // #AttachStatusOrder
+ // Here's where the order of the AttachStatus enum is important. Any given client
+ // must have an attach status "better" than the current overall attach status,
+ // for us to want the overall attach status to change (to match the client's
+ // status). See code:ProfilingAPIAttachServer::AttachStatus
+ if ((int) attachStatusForThisClient > (int) attachStatusOverall)
+ {
+ attachStatusOverall = attachStatusForThisClient;
+ }
+ }
+
+ // We reach this point only when we're in kOnDemand mode, and a failure is causing
+ // us to destroy the pipe (usually the failure is simply a timeout waiting for the
+ // next client to come along).
+ _ASSERTE(FAILED(hr) &&
+ (ProfilingAPIAttachDetach::GetAttachThreadingMode() ==
+ ProfilingAPIAttachDetach::kOnDemand));
+
+ // This switch statement can forgive, but it will never forget. We went through all
+ // the trouble of making an AttachThread and a pipe, and now we're destroying them.
+ // If no one even asked to attach a profiler in the meantime, this switch notes that
+ // in the event log. Conversely, if at least some client successfully attached a
+ // profiler, return S_OK.
+
+ switch(attachStatusOverall)
+ {
+ default:
+ _ASSERTE(!"Unknown AttachStatus value!");
+ return E_UNEXPECTED;
+
+ case kNoAttachRequested:
+ // All this time, and no one even asked for an attach? Wack. Log and return the
+ // last error we got
+ _ASSERTE(FAILED(hr));
+ ProfilingAPIUtility::LogProfError(IDS_E_PROF_NO_ATTACH_REQ, hr);
+ return hr;
+
+ case kAttachFailed:
+ // Someone tried to attach and failed. Event was already logged at that time
+ _ASSERTE(FAILED(hr));
+ return hr;
+
+ case kAttachSucceeded:
+ // At least one of the clients managed to get a profiler successfully attached
+ // (info event was logged at that time), so all is well
+ return S_OK;
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachServer::CreateAttachPipe
+//
+// Description:
+// Creates a new pipe server, that is not yet connected to a client
+//
+// Return Value:
+// HRESULT indicating success or failure
+//
+
+HRESULT ProfilingAPIAttachServer::CreateAttachPipe()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ SECURITY_ATTRIBUTES *psa = NULL;
+ SECURITY_ATTRIBUTES sa;
+
+ // Only assign security attributes for non-app container scenario
+ // We are assuming the default for app container scenario is good enough
+ if (!ProfilingAPIAttachDetach::IsAppContainerProcess(GetCurrentProcess()))
+ {
+ hr = ProfilingAPIAttachDetach::InitSecurityAttributes(&sa, sizeof(sa));
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ psa = &sa;
+ }
+
+ StackSString attachPipeName;
+ hr = ProfilingAPIAttachDetach::GetAttachPipeName(::GetCurrentProcess(), &attachPipeName);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ m_hPipeServer = CreateNamedPipeW(
+ attachPipeName.GetUnicode(),
+ PIPE_ACCESS_DUPLEX | // server and client read/write to pipe
+ FILE_FLAG_OVERLAPPED, // server may read asynchronously & use a timeout
+ PIPE_TYPE_MESSAGE | // pipe data written as stream of messages
+ PIPE_READMODE_MESSAGE, // pipe data read as stream of messages
+ 1, // Only one instance of the pipe is allowed
+ sizeof(GetVersionResponseMessage), // Hint of typical response size (GetVersion is the biggest)
+ sizeof(AttachRequestMessage) +
+ 0x100, // Hint of typical request size (attach requests are the
+ // biggest, plus figure 0x100 for client data)
+ 1000, // nDefaultTimeOut: unused. Clients will always
+ // specify their own timeout when waiting
+ // for the pipe to appear
+ psa // lpSecurityAttributes
+ );
+ if (m_hPipeServer == INVALID_HANDLE_VALUE)
+ {
+ return HRESULT_FROM_GetLastError();
+ }
+
+ _ASSERTE(IsValidHandle(m_hPipeServer));
+
+ LOG((
+ LF_CORPROF,
+ LL_INFO10,
+ "**PROF: Successfully created attach pipe server. Name: '%S'.\n",
+ attachPipeName.GetUnicode()));
+
+ return S_OK;
+}
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachServer::ServiceOneClient
+//
+// Description:
+// Awaits a connection from a client, receives the client's requests, executes and
+// responds to those requests, and then disconnects the client on error or once a
+// profiler has been attached as a result. If any blocking operation takes too long,
+// this will disconnect the client as well.
+//
+// Arguments:
+// * pAttachStatusForClient - [out] enum indicating whether an attach request was
+// received and processed successfully. NOTE: This out param is always set
+// properly, even if this function returns an error.
+//
+// Return Value:
+// * error HRESULT: something bad happened with the pipe itself (e.g., couldn't
+// connect to a new client due to timeout or something worse). When in kOnDemand
+// mode, an error return from this function indicates the entire AttachThread
+// should go away.
+// * S_OK: Pipe is fine and connected to at least one client. That connection may or
+// may not have resulted in successful communication or a profiler attach. But in
+// any case, the pipe is still intact, and the caller should connect with the next
+// client.
+//
+// Notes:
+// * A failure event will be logged for any kind of user-actionable failure that
+// occurs in this function or callees.
+// * A failure event is NOT logged for a NON-actionable failure such as failure in
+// communicating a response message back to the trigger (client). See comment at
+// top of code:ProfilingAPIAttachServer::WriteResponseToPipe
+
+HRESULT ProfilingAPIAttachServer::ServiceOneClient(
+ AttachStatus * pAttachStatusForClient)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(IsValidHandle(m_hPipeServer));
+ _ASSERTE(pAttachStatusForClient != NULL);
+
+ HRESULT hr = E_UNEXPECTED;
+ *pAttachStatusForClient = kNoAttachRequested;
+
+ // What is the max timeout for each blocking wait for the trigger? Examples of
+ // blocking waits: wait for a pipe client to show up, or for the client to send a
+ // request, or for the pipe to transfer our response to the client.
+ //
+ // If any blocking operation takes longer than this, the current function will
+ // timeout.
+ // * While in kOnDemand mode, a timeout waiting for a client to connect will
+ // cause the AttachThread to give up, go away, and the app reverts to
+ // non-attach performance characteristics. The Global Attach Event will need
+ // to be signaled again by a trigger process (via AttachProfiler API) before
+ // a new AttachThread gets created and tries again.
+ // * Once a client is connected, timeouts from this function simply cause that
+ // client to be disconnected, and this function will be called again to wait
+ // (with timeout!) for the next client to connect.
+ m_dwMillisecondsMaxPerWait = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ProfAPIMaxWaitForTriggerMs);
+
+ hr = ConnectToClient();
+ if (FAILED(hr))
+ {
+ if (hr != HRESULT_FROM_WIN32(ERROR_TIMEOUT))
+ {
+ // Any error other than timeout is unexpected and should be logged. Timeouts,
+ // however, are expected, as eventually clients will stop trying to connect
+ // to the pipe, so no need to log that.
+ ProfilingAPIUtility::LogProfError(IDS_E_PROF_CONNECT_TO_TRIGGER, hr);
+ }
+ return hr;
+ }
+
+ LOG((
+ LF_CORPROF,
+ LL_INFO10,
+ "**PROF: Pipe server is now connected to a new client.\n"));
+
+ // This forces a call to DisconnectNamedPipe before we return. That kicks the current
+ // client off of the pipe, and leaves the pipe available for the next client
+ // connection.
+ ConnectedPipeHolder connectedPipeHolder(m_hPipeServer);
+
+ // Keep executing requests from this client until it asks for (and we attempt) an
+ // attach. Whether the attach succeeds or fails, that's the end of this client, and
+ // we'll fall out of the loop and return.
+ while (*pAttachStatusForClient == kNoAttachRequested)
+ {
+ hr = ServiceOneRequest(pAttachStatusForClient);
+ if (FAILED(hr))
+ {
+ // Low-level error on the pipe itself indicating that we should disconnect
+ // from this client, and try connecting to a new one. Typical errors you
+ // might see here:
+ // * HRESULT_FROM_WIN32(ERROR_BROKEN_PIPE)
+ // * Someone killed the trigger process (or it timed out) before an
+ // attach could be requested.
+ // * HRESULT_FROM_WIN32(ERROR_TIMEOUT)
+ // * HRESULT_FROM_WIN32(ERROR_SEM_TIMEOUT)
+ // * Client's taking too long to send a request
+ //
+ // Since a failure here indicates a problem with this particular client, and
+ // not a global problem with the pipe, just convert to S_OK and return so we
+ // disconnect this client, and the caller knows to try connecting to another
+ // client. Note that ServiceOneRequest() has already reported any actionable
+ // problem into the event log.
+ return S_OK;
+ }
+ }
+
+ // A trigger finally managed to request an attach (success of the attach may be
+ // found in pAttachStatusForClient). So we can return to disconnect this client and
+ // poll for the next client.
+ return S_OK;
+}
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachServer::ConnectToClient
+//
+// Description:
+// Waits until a client connects to the pipe server, or until timeout.
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+
+HRESULT ProfilingAPIAttachServer::ConnectToClient()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(IsValidHandle(m_hPipeServer));
+
+ HRESULT hr;
+ BOOL fRet;
+ DWORD dwErr;
+ DWORD cbReceived;
+ ProfilingAPIAttachDetach::OverlappedResultHolder overlapped;
+ hr = overlapped.Initialize();
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ // Start an overlapped connection for this pipe instance.
+ fRet = ConnectNamedPipe(m_hPipeServer, overlapped);
+ if (fRet)
+ {
+ // No need to wait, pipe connected already
+ return S_OK;
+ }
+
+ dwErr = GetLastError();
+ if (dwErr == ERROR_PIPE_CONNECTED)
+ {
+ // In true Windows style, a "failure" with ERROR_PIPE_CONNECTED is
+ // actually a success case: a client tried to connect before we (the
+ // server) called ConnectNamedPipe, so that we're now connected
+ // just fine
+ return S_OK;
+ }
+
+ if (dwErr != ERROR_IO_PENDING)
+ {
+ // An error we cannot recover from
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF: ConnectNamedPipe failed. hr=0x%x.\n",
+ HRESULT_FROM_WIN32(dwErr)));
+ return HRESULT_FROM_WIN32(dwErr);
+ }
+
+ // Typical case: ERROR_IO_PENDING. ConnectNamedPipe is waiting (in overlapped mode)
+ // for a client to connect. Block until this happens (or we timeout)
+
+ hr = overlapped.Wait(
+
+ // How long we wait for the next client to show up depends on our threading mode
+ (ProfilingAPIAttachDetach::GetAttachThreadingMode() ==
+ ProfilingAPIAttachDetach::kAlwaysOn) ?
+
+ // In always-on mode, we're willing to wait forever until the next client
+ // shows up.
+ INFINITE :
+
+ // In on-demand mode, we want the AttachThread to exit if there aren't
+ // any new clients in a reasonable amount of time.
+ m_dwMillisecondsMaxPerWait,
+
+ m_hPipeServer,
+ &cbReceived);
+ if (FAILED(hr))
+ {
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF: Waiting for overlapped result for ConnectNamedPipe failed. hr=0x%x.\n",
+ hr));
+ return hr;
+ }
+
+ return S_OK;
+}
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachServer::ServiceOneRequest
+//
+// Description:
+// Receives, executes, and responds to a single request from a single client.
+//
+// Arguments:
+// * pAttachStatus - [out] enum indicating whether an attach request was received and
+// processed successfully. NOTE: This out param is always set properly, even if
+// this function returns an error.
+//
+// Return Value:
+// * S_OK: Request was received. It may or may not have been processed successfully.
+// Any processing failure would be due to a high level problem, like an unknown
+// request format, or a CLR problem in handling the request ("can't attach
+// profiler cuz profiler already loaded"). In any case, the caller may leave the
+// pipe connection to this client open, as the connection is valid.
+// * error: Low-level error (e.g., OS pipe failure or timeout) trying to receive the
+// request or send a response. Such an error is generally unexpected and will
+// cause the caller to close the connection to the client (though the pipe will
+// remain up for the next client to try connecting).
+//
+// Notes:
+// * A failure event will be logged for any kind of user-actionable failure that
+// occurs in this function or callees.
+// * A failure event is NOT logged for a NON-actionable failure such as failure in
+// communicating a response message back to the trigger (client). See comment at
+// top of code:ProfilingAPIAttachServer::WriteResponseToPipe
+//
+
+HRESULT ProfilingAPIAttachServer::ServiceOneRequest(AttachStatus * pAttachStatus)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(IsValidHandle(m_hPipeServer));
+ _ASSERTE(pAttachStatus != NULL);
+
+ HRESULT hr;
+ DWORD cbRequestMessageRead;
+ *pAttachStatus = kNoAttachRequested;
+
+ // Reading from the pipe is a 3-step process.
+ //
+ // * 1. Read into a 0-sized buffer. This causes us to block (with timeout) until the
+ // message is in the pipe and ready to be analyzed. Since the buffer is 0-sized,
+ // the message is not actually read out of the pipe yet.
+ // * 2. Now that we know the message is available, peek into the pipe to extract the
+ // size of the message
+ // * 3. Now that we know the size, allocate a sufficient buffer, and repeat step 1,
+ // but with the appropriately sized buffer. This time the data is emptied out of
+ // the pipe.
+
+ // Step 1: Read request once w/ 0-sized buffer so we know when the message is ready;
+ // at that point we can ask how long the message is
+ hr = ReadRequestFromPipe(
+ NULL, // Request buffer
+ 0, // Size of request buffer
+ &cbRequestMessageRead);
+ if (FAILED(hr) && (hr != HRESULT_FROM_WIN32(ERROR_MORE_DATA)))
+ {
+ ProfilingAPIUtility::LogProfError(IDS_E_PROF_PIPE_RCV, hr);
+ return hr;
+ }
+
+ // Step 2: Message is ready. How big is it?
+ DWORD cbRequestMessage;
+ if (!PeekNamedPipe(
+ m_hPipeServer,
+ NULL, // Request buffer (0-size for now)
+ 0, // Size of request buffer
+ NULL, // lpBytesRead (NULL cuz message shan't be read)
+ NULL, // lpTotalBytesAvail (NULL cuz don't care)
+ &cbRequestMessage))
+ {
+ ProfilingAPIUtility::LogProfError(IDS_E_PROF_PIPE_RCV, hr);
+ return hr;
+ }
+
+ // 0-sized requests are invalid. Something wrong with the pipe?
+ if (cbRequestMessage == 0)
+ {
+ hr = E_UNEXPECTED;
+ ProfilingAPIUtility::LogProfError(IDS_E_PROF_PIPE_RCV, hr);
+ return hr;
+ }
+
+ // Step 3: message is ready and we know the size. Make the buffer, and read it in.
+
+ NewHolder<BYTE> pbRequestMessage(new (nothrow) BYTE[cbRequestMessage]);
+ if (pbRequestMessage == NULL)
+ {
+ hr = E_OUTOFMEMORY;
+ ProfilingAPIUtility::LogProfError(IDS_E_PROF_PIPE_RCV, hr);
+ return hr;
+ }
+
+ hr = ReadRequestFromPipe(
+ pbRequestMessage,
+ cbRequestMessage,
+ &cbRequestMessageRead);
+ if (FAILED(hr))
+ {
+ ProfilingAPIUtility::LogProfError(IDS_E_PROF_PIPE_RCV, hr);
+ return hr;
+ }
+
+ if (cbRequestMessage != cbRequestMessageRead)
+ {
+ // Somehow we read a different number of bytes than we were told was in the pipe
+ // buffer. Pipe having problems?
+ hr = E_UNEXPECTED;
+ ProfilingAPIUtility::LogProfError(IDS_E_PROF_PIPE_RCV, hr);
+ return hr;
+ }
+
+ // Request successfully read! Now figure out what the request is, carry it out, and
+ // send a response. This function will report to the event log any user-actionable
+ // error.
+ return InterpretAndExecuteRequestMessage(pbRequestMessage, cbRequestMessage, pAttachStatus);
+}
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachServer::ReadRequestFromPipe
+//
+// Description:
+// Performs a ReadFile with timeout on the pipe server to read the client's request
+// message.
+//
+// Arguments:
+// * pvRequestBuffer - [out] Buffer into which the request will be placed
+// * cbRequestBuffer - [in] Size, in bytes, of the request buffer
+// * pcbActualRequest - [out] Actual number of bytes placed into the request buffer.
+//
+// Return Value:
+// HRESULT indicating success or failure
+//
+// Assumptions:
+// * m_hPipeServer must be connected to a client.
+//
+// Notes:
+// * The [out] parameters may be written to even if this function fails. But their
+// contents should be ignored by the caller in this case.
+//
+
+HRESULT ProfilingAPIAttachServer::ReadRequestFromPipe(
+ LPVOID pvRequestBuffer,
+ DWORD cbRequestBuffer,
+ DWORD * pcbActualRequest)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(IsValidHandle(m_hPipeServer));
+
+ // NULL buffer implies zero size!
+ _ASSERTE((pvRequestBuffer != NULL) || (cbRequestBuffer == 0));
+
+ _ASSERTE(pcbActualRequest != NULL);
+
+ HRESULT hr;
+ DWORD dwErr;
+ ProfilingAPIAttachDetach::OverlappedResultHolder overlapped;
+ hr = overlapped.Initialize();
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ if (ReadFile(
+ m_hPipeServer,
+ pvRequestBuffer,
+ cbRequestBuffer,
+ pcbActualRequest,
+ overlapped))
+ {
+ // Quick read, no waiting
+ return S_OK;
+ }
+
+ dwErr = GetLastError();
+ if (dwErr != ERROR_IO_PENDING)
+ {
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF: ReadFile on the pipe failed. hr=0x%x.\n",
+ HRESULT_FROM_WIN32(dwErr)));
+ return HRESULT_FROM_WIN32(dwErr);
+ }
+
+ // Typical case=ERROR_IO_PENDING: gotta wait until request comes in (or we timeout)
+
+ hr = overlapped.Wait(
+ m_dwMillisecondsMaxPerWait,
+ m_hPipeServer,
+ pcbActualRequest);
+ if (FAILED(hr))
+ {
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF: Waiting for overlapped result for ReadFile on the pipe failed. hr=0x%x.\n",
+ hr));
+ return hr;
+ }
+
+ return S_OK;
+}
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachServer::InterpretAndExecuteRequestMessage
+//
+// Description:
+// Takes an unverified stream of bytes read from the pipe, and then verifies the bytes
+// as a self-consistent message and executes the request (either get version or
+// attach). Once the request has been executed, a response is sent back across the
+// pipe.
+//
+// Arguments:
+// * pbRequestMessage - [in] Bytes read from pipe
+// * cbRequestMessage - [in] Count of bytes read from pipe
+// * pAttachStatus - [out] (see comment header for
+// code:ProfilingAPIAttachServer::ServiceOneRequest)
+//
+// Return Value:
+// HRESULT indicating success or failure with low-level reading / writing operations
+// on the pipe that indicate whether the caller should abandon this client connection.
+// Higher-level failures (e.g., bogus request messages, or failure performing the
+// actual attach) do not cause an error to be returned from this function. Caller may
+// use pAttachStatus to determine whether this request resulted in a successful
+// profiler attach.
+//
+// Notes:
+// * This (or callee) will log an event on actionable failures. (Failure to send a
+// response back to the trigger is not considered actionable. See comment at top
+// of code:ProfilingAPIAttachServer::WriteResponseToPipe.)
+//
+
+HRESULT ProfilingAPIAttachServer::InterpretAndExecuteRequestMessage(
+ LPCBYTE pbRequestMessage,
+ DWORD cbRequestMessage,
+ AttachStatus * pAttachStatus)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+
+ // This causes events to be logged, which loads resource strings,
+ // which takes locks.
+ CAN_TAKE_LOCK;
+
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pbRequestMessage != NULL);
+ _ASSERTE(pAttachStatus != NULL);
+
+ HRESULT hr;
+
+ *pAttachStatus = kNoAttachRequested;
+
+ // Message bytes have not been verified, so none of the contents (such as sizes or
+ // offsets) may be trusted until they're all verified.
+ RequestMessageVerifier messageVerifier(pbRequestMessage, cbRequestMessage);
+ hr = messageVerifier.Verify();
+ if (FAILED(hr))
+ {
+ // Bogus request message. Log to event log
+ ProfilingAPIUtility::LogProfError(IDS_E_PROF_INVALID_MSG);
+
+ // And send complaint back to trigger
+ BaseResponseMessage responseMsg(hr);
+ return WriteResponseToPipe(&responseMsg, sizeof(responseMsg));
+ }
+
+ // Yay! Message is valid
+ const BaseRequestMessage * pBaseRequestMessage = messageVerifier.GetBaseRequestMessage();
+
+ // Execute request based on its type
+ switch(pBaseRequestMessage->m_requestMessageType)
+ {
+ default:
+ // RequestMessageVerifier should have verified no unexpected request message
+ // types slipped through.
+ _ASSERTE(!"Unexpected m_requestMessageType");
+ return E_UNEXPECTED;
+
+ case kMsgGetVersion:
+ return ExecuteGetVersionRequestMessage();
+
+ case kMsgAttach:
+ return ExecuteAttachRequestMessage(
+ (const AttachRequestMessage *) pBaseRequestMessage,
+ pAttachStatus);
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachServer::ExecuteAttachRequestMessage
+//
+// Description:
+// Once an attach request message has been verified as self-consistent (see
+// code:RequestMessageVerifier), call this function to actually attach the profiler
+// using data from the message
+//
+// Arguments:
+// * pAttachRequestMessage - [in] An already-verified attach request message that was
+// received from trigger.
+// * pAttachStatus - [out] (see comment header for
+// code:ProfilingAPIAttachServer::ServiceOneRequest)
+//
+// Return Value:
+// HRESULT indicating success or failure in sending response over the pipe back to the
+// trigger. Note that a failure to perform the attach does not necessarily cause a
+// failure HRESULT to be returned by this function (only low-level pipe problems will
+// cause this function to fail). A failure performing the attach is noted in
+// pAttachStatus.
+//
+// Notes:
+// * This (or a callee) will log an event on failure or success of performing the
+// attach. However, once the attach is complete (failed or succeeded), no event
+// will be logged if there is a communication error sending the response back to
+// the trigger. (See comment at top of
+// code:ProfilingAPIAttachServer::WriteResponseToPipe)
+//
+
+HRESULT ProfilingAPIAttachServer::ExecuteAttachRequestMessage(
+ const AttachRequestMessage * pAttachRequestMessage,
+ AttachStatus * pAttachStatus)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+
+ // This causes events to be logged, which loads resource strings,
+ // which takes locks.
+ CAN_TAKE_LOCK;
+
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pAttachRequestMessage != NULL);
+ _ASSERTE(pAttachStatus != NULL);
+
+ // Start off pessimistic
+ *pAttachStatus = kAttachFailed;
+
+ if (g_profControlBlock.curProfStatus.Get() != kProfStatusNone)
+ {
+ // Sorry, profiler's already here.
+ //
+ // Note: It might appear that there's a race here (i.e.,
+ // g_profControlBlock.curProfStatus.Get() == kProfStatusNone so we try to load the
+ // profiler, but another profiler is already getting loaded somehow, and
+ // g_profControlBlock.curProfStatus.Get() just hasn't been updated yet. So we end
+ // up loading two profilers at once.) But there is actually no race here for a
+ // couple reasons:
+ // * 1. Startup load of profiler occurs before the pipe is even constructed. So
+ // we won't get an attach request while a startup load is in progress
+ // * 2. Pipe requests are serialized. OS handles this for us because:
+ // * a. Only one instance of the attach pipe is allowed at a time, because
+ // our call to CreateNamedPipeW specifies only 1 instance is allowed, and
+ // * b. Within that single pipe instance, messages are processed serially,
+ // from the single AttachThread that successfully created the pipe in the
+ // first place.
+ ProfilingAPIUtility::LogProfError(IDS_E_PROF_PROFILER_ALREADY_ACTIVE);
+
+ _ASSERTE(*pAttachStatus == kAttachFailed);
+
+ // Inform trigger that attach cannot happen now
+ AttachResponseMessage responseMsg(CORPROF_E_PROFILER_ALREADY_ACTIVE);
+ return WriteResponseToPipe(&responseMsg, sizeof(responseMsg));
+ }
+
+ // If the client sends us a V2 message, retrieve the time out value
+ // In theory both client and server should be both on v4.5+ but I'm assigning a default value
+ // just in case
+ DWORD dwConcurrentGCWaitTimeoutInMs = INFINITE;
+ if (AttachRequestMessageV2::CanCastTo(pAttachRequestMessage))
+ dwConcurrentGCWaitTimeoutInMs =
+ static_cast<const AttachRequestMessageV2 *>(pAttachRequestMessage)->m_dwConcurrentGCWaitTimeoutInMs;
+
+ // LoadProfilerForAttach & callees ensure an event is logged on error.
+ HRESULT hrAttach = ProfilingAPIUtility::LoadProfilerForAttach(
+
+ // Profiler's CLSID
+ &(pAttachRequestMessage->m_clsidProfiler),
+
+ // wszProfilerDLL
+ pAttachRequestMessage->m_wszProfilerPath,
+
+ // Client data ptr
+ (pAttachRequestMessage->m_cbClientDataLength == 0) ?
+ // No client data: use NULL
+ NULL :
+ // Else, follow offset to find client data
+ (LPVOID) (((LPBYTE) pAttachRequestMessage) +
+ pAttachRequestMessage->m_dwClientDataStartOffset),
+
+ // Client data size
+ pAttachRequestMessage->m_cbClientDataLength,
+
+ // Time out for wait operation on current gc that is in progress
+ dwConcurrentGCWaitTimeoutInMs);
+
+ // Inform caller if attach succeeded
+ if (SUCCEEDED(hrAttach))
+ {
+ *pAttachStatus = kAttachSucceeded;
+ }
+ else
+ {
+ _ASSERTE(*pAttachStatus == kAttachFailed);
+ }
+
+ // Inform trigger about how the attach went
+ AttachResponseMessage responseMsg(hrAttach);
+ return WriteResponseToPipe(&responseMsg, sizeof(responseMsg));
+}
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachServer::ExecuteGetVersionRequestMessage
+//
+// Description:
+// Composes a response message to the "GetVersion" request message. Response contains
+// the version of the profilee (server), and the minimum allowable version of a
+// trigger (client) we're willing to talk to.
+//
+// Return Value:
+// HRESULT Indicating success or failure.
+//
+// Notes:
+// * Composing the response cannot fail, and we are not logging communcation failures
+// in sending response messages (see comment at top of
+// code:ProfilingAPIAttachServer::WriteResponseToPipe), so no event will be logged
+// by this function or callees.
+//
+
+HRESULT ProfilingAPIAttachServer::ExecuteGetVersionRequestMessage()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ GetVersionResponseMessage responseMsg(
+ // S_OK means we successfully carried out the "GetVersion" request
+ S_OK,
+
+ // This is the version of the target profilee app
+ ProfilingAPIAttachDetach::kCurrentProcessVersion,
+
+ // This is the oldest trigger that we allow communicating with
+ ProfilingAPIAttachDetach::kMinimumAllowableTriggerVersion);
+
+ return WriteResponseToPipe(&responseMsg, sizeof(responseMsg));
+}
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachServer::WriteResponseToPipeNoBufferSizeCheck
+//
+// Description:
+// Performs a WriteFile with timeout on the pipe server to write the specified
+// response back to the client. This is an internal helper used by
+// code:ProfilingAPIAttachServer::WriteResponseToPipe
+//
+// Arguments:
+// * pvResponse - [in] Buffer containing the response to be sent to the client
+// * cbResponse - [in] Size, in bytes, of the response to send.
+// * pcbWritten - [out] Actual number of bytes sent to client
+//
+// Return Value:
+// HRESULT indicating success or failure
+//
+// Assumptions:
+// * m_hPipeServer must be connected to a client.
+//
+// Notes:
+// * The [out] parameter may be written to even if this function fails. But its
+// contents should be ignored by the caller in this case.
+//
+
+HRESULT ProfilingAPIAttachServer::WriteResponseToPipeNoBufferSizeCheck(
+ LPVOID pvResponse,
+ DWORD cbResponse,
+ DWORD * pcbWritten)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(IsValidHandle(m_hPipeServer));
+ _ASSERTE(pvResponse != NULL);
+ _ASSERTE(pcbWritten != NULL);
+
+ HRESULT hr;
+ DWORD dwErr;
+ ProfilingAPIAttachDetach::OverlappedResultHolder overlapped;
+ hr = overlapped.Initialize();
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ if (WriteFile(
+ m_hPipeServer,
+ pvResponse,
+ cbResponse,
+ pcbWritten,
+ overlapped))
+ {
+ // Quick write, no waiting
+ return S_OK;
+ }
+
+ dwErr = GetLastError();
+ if (dwErr != ERROR_IO_PENDING)
+ {
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF: WriteFile on the pipe failed. hr=0x%x.\n",
+ HRESULT_FROM_WIN32(dwErr)));
+ return HRESULT_FROM_WIN32(dwErr);
+ }
+
+ // Typical case=ERROR_IO_PENDING: gotta wait until response is sent (or we timeout)
+
+ hr = overlapped.Wait(
+ m_dwMillisecondsMaxPerWait,
+ m_hPipeServer,
+ pcbWritten);
+ if (FAILED(hr))
+ {
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF: Waiting for overlapped result for WriteFile on the pipe failed. hr=0x%x.\n",
+ hr));
+ return hr;
+ }
+
+ return S_OK;
+}
+
+#endif //FEATURE_PROFAPI_ATTACH_DETACH
diff --git a/src/vm/profattachserver.h b/src/vm/profattachserver.h
new file mode 100644
index 0000000000..6cae18a71c
--- /dev/null
+++ b/src/vm/profattachserver.h
@@ -0,0 +1,110 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// ProfAttachServer.h
+//
+
+//
+// Definitions of ProfilingAPIAttachServer and helpers, which are used by the
+// AttachThread running in the target profilee (server end of the pipe) to receive and
+// carry out requests that are sent by the trigger (client end of the pipe).
+//
+
+// ======================================================================================
+
+#ifndef __PROF_ATTACH_SERVER_H__
+#define __PROF_ATTACH_SERVER_H__
+
+
+//---------------------------------------------------------------------------------------
+// Helper to verify any messages received by the target profilee, before the target
+// profilee is allowed to trust any of the message contents.
+
+class RequestMessageVerifier
+{
+public:
+ RequestMessageVerifier(LPCBYTE pbRequestMessage, DWORD cbRequestMessage);
+ HRESULT Verify();
+ const BaseRequestMessage * GetBaseRequestMessage();
+
+protected:
+ LPCBYTE m_pbRequestMessage;
+ DWORD m_cbRequestMessage;
+ INDEBUG(BOOL m_fVerified);
+
+ HRESULT VerifyGetVersionRequestMessage();
+ HRESULT VerifyAttachRequestMessage();
+};
+
+//---------------------------------------------------------------------------------------
+// Here's the beef. All the pipe server stuff running on the AttachThread is housed in
+// this class.
+
+class ProfilingAPIAttachServer
+{
+public:
+ ProfilingAPIAttachServer();
+ ~ProfilingAPIAttachServer();
+
+ HRESULT ExecutePipeRequests();
+
+protected:
+ //---------------------------------------------------------------------------------------
+ // Notes whether an attach was requested, and whether the request was serviced
+ // successfully. Primarily used to aggregate status across multiple trigger processes
+ // that connect over the pipe, so we know what we've logged to the event log.
+ //
+ // Notes:
+ // * The order is important! Overall attach status may change only in ascending
+ // order of the values of this enum. See
+ // code:ProfilingAPIAttachDetach::ExecutePipeRequests#AttachStatusOrder
+ enum AttachStatus
+ {
+ // Default, and worst of all: No one requested a profiler attach
+ kNoAttachRequested = 0,
+
+ // Slightly better: someone figured out how to ask for an attach, but it failed.
+ kAttachFailed = 1,
+
+ // Bestest of all: someone requested an attach, and it worked
+ kAttachSucceeded = 2,
+ };
+
+ // Server end of the pipe created by the current process (which is the target
+ // profilee).
+ HandleHolder m_hPipeServer;
+
+ // Most blocking operations on the server end of the pipe (i.e., this process), use
+ // this as the timeout. The exception is waiting for new connections when in
+ // code:ProfilingAPIAttachDetach::kAlwaysOn mode (which waits with INFINITE timeout).
+ DWORD m_dwMillisecondsMaxPerWait;
+
+ HRESULT CreateAttachPipe();
+ HRESULT ServiceOneClient(AttachStatus * pAttachStatusForClient);
+ HRESULT ConnectToClient();
+ HRESULT ServiceOneRequest(
+ AttachStatus * pAttachStatus);
+ HRESULT ReadRequestFromPipe(
+ LPVOID pvRequestBuffer,
+ DWORD cbRequestBuffer,
+ DWORD * pcbActualRequest);
+ HRESULT InterpretAndExecuteRequestMessage(
+ LPCBYTE pbRequestMessage,
+ DWORD cbRequestMessage,
+ AttachStatus * pAttachStatus);
+ HRESULT WriteResponseToPipeNoBufferSizeCheck(
+ LPVOID pvResponse,
+ DWORD cbResponse,
+ DWORD * pcbWritten);
+ HRESULT WriteResponseToPipe(
+ LPVOID pvResponse,
+ DWORD cbResponse);
+ HRESULT ExecuteGetVersionRequestMessage();
+ HRESULT ExecuteAttachRequestMessage(
+ const AttachRequestMessage * pAttachRequestMessage,
+ AttachStatus * pAttachStatus);
+};
+
+#endif // __PROF_ATTACH_SERVER_H__
diff --git a/src/vm/profattachserver.inl b/src/vm/profattachserver.inl
new file mode 100644
index 0000000000..5e80b7ecc1
--- /dev/null
+++ b/src/vm/profattachserver.inl
@@ -0,0 +1,130 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// ProfAttachServer.inl
+//
+
+//
+// Inlined implementations of portions of ProfilingAPIAttachServer and helpers, which are
+// used by the AttachThread running in the target profilee (server end of the pipe) to
+// receive and carry out requests that are sent by the trigger (client end of the pipe).
+//
+
+// ======================================================================================
+
+
+// ----------------------------------------------------------------------------
+// RequestMessageVerifier::RequestMessageVerifier()
+//
+// Description:
+// Constructor that takes stream of bytes read by the target profilee on its pipe.
+// After construction, call Verify() to verify the stream of bytes makes a
+// well-formed message.
+//
+// Arguments:
+// * pbRequestMessage - Bytes read from pipe
+// * cbRequestMessage - Number of bytes read from pipe.
+//
+
+inline RequestMessageVerifier::RequestMessageVerifier(
+ LPCBYTE pbRequestMessage,
+ DWORD cbRequestMessage) :
+ m_pbRequestMessage(pbRequestMessage),
+ m_cbRequestMessage(cbRequestMessage)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ INDEBUG(m_fVerified = FALSE);
+}
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachServer::ProfilingAPIAttachServer()
+//
+// Description:
+// Constructor for ProfilingAPIAttachServer, which owns the server end of the pipe
+// running in the target profilee
+
+inline ProfilingAPIAttachServer::ProfilingAPIAttachServer() :
+ m_dwMillisecondsMaxPerWait(0)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+inline ProfilingAPIAttachServer::~ProfilingAPIAttachServer()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ if (IsValidHandle(m_hPipeServer))
+ {
+ // m_hPipeServer's destructor is about to destroy the pipe
+ LOG((
+ LF_CORPROF,
+ LL_INFO10,
+ "**PROF: Finished communication; closing attach pipe server.\n"));
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIAttachServer::WriteResponseToPipe
+//
+// Description:
+// Sends response bytes across pipe to trigger process.
+//
+// Arguments:
+// * pvResponse - Pointer to bytes to send
+// * cbResponse - How many bytes to send
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+// Notes:
+// * Purposely does NOT log an event on failure, as an event at this stage would be
+// confusing to the user. The requested operation (e.g., Attach) has already been
+// performed; this is just the part that communicates the result back to the
+// trigger. There's nothing the user could (or would want to) do if response
+// communication failed. Either the attach worked or not, and that's already been
+// logged to the event log.
+//
+
+inline HRESULT ProfilingAPIAttachServer::WriteResponseToPipe(
+ LPVOID pvResponse,
+ DWORD cbResponse)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(IsValidHandle(m_hPipeServer));
+ _ASSERTE(pvResponse != NULL);
+
+ DWORD cbWritten;
+
+ HRESULT hr = WriteResponseToPipeNoBufferSizeCheck(
+ pvResponse,
+ cbResponse,
+ &cbWritten);
+
+ // Check the buffer size against what was written
+ if (SUCCEEDED(hr) && (cbResponse != cbWritten))
+ {
+ // Partial response sent. Be sure hr reflects there was a problem
+ hr = E_UNEXPECTED;
+ }
+
+ return hr;
+}
diff --git a/src/vm/profdetach.cpp b/src/vm/profdetach.cpp
new file mode 100644
index 0000000000..868917942a
--- /dev/null
+++ b/src/vm/profdetach.cpp
@@ -0,0 +1,714 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// ProfDetach.cpp
+//
+
+//
+// Implementation of helper classes and structures used for Profiling API Detaching
+//
+// ======================================================================================
+
+#include "common.h"
+
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+
+#include "profdetach.h"
+#include "profilinghelper.h"
+
+// Class static member variables
+ProfilerDetachInfo ProfilingAPIDetach::s_profilerDetachInfo;
+CLREvent ProfilingAPIDetach::s_eventDetachWorkAvailable;
+
+
+// ---------------------------------------------------------------------------------------
+// ProfilerDetachInfo constructor
+//
+// Description:
+// Set every member variable to NULL or 0. They'll get initialized to real values
+// in ProfilingAPIDetach::RequestProfilerDetach.
+//
+
+ProfilerDetachInfo::ProfilerDetachInfo()
+{
+ // Executed during construction of a global object, therefore we cannot
+ // use real contracts, as this requires that utilcode has been initialized.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_CANNOT_TAKE_LOCK;
+
+ Init();
+}
+
+void ProfilerDetachInfo::Init()
+{
+ // Executed during construction of a global object, therefore we cannot
+ // use real contracts, as this requires that utilcode has been initialized.
+ STATIC_CONTRACT_LEAF;
+
+ m_pEEToProf = NULL;
+ m_ui64DetachStartTime = 0;
+ m_dwExpectedCompletionMilliseconds = 0;
+}
+
+
+// ----------------------------------------------------------------------------
+// Implementation of ProfilingAPIAttachDetach statics
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIDetach::Initialize
+//
+// Description:
+// Initialize static event
+
+// static
+HRESULT ProfilingAPIDetach::Initialize()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ if (!s_eventDetachWorkAvailable.IsValid())
+ {
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ s_eventDetachWorkAvailable.CreateAutoEvent(FALSE);
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ if (SUCCEEDED(hr))
+ {
+ // For exceptions that give us useless hr's, just use E_FAIL
+ hr = E_FAIL;
+ }
+ }
+ EX_END_CATCH(RethrowTerminalExceptions)
+
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+ }
+
+ return S_OK;
+}
+
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIDetach::RequestProfilerDetach
+//
+// Description:
+// Initialize ProfilerDetachInfo structures with parameters passed from
+// ICorProfilerInfo3::RequestProfilerDetach
+//
+// Arguments:
+// * dwExpectedCompletionMilliseconds - A hint to the CLR as to how long it should
+// wait before checking to see if execution has evacuated the profiler and all
+// profiler-instrumented code. If this is 0, the CLR will select a default.
+//
+// Notes:
+//
+// Invariants maintained by profiler:
+// * Before calling RequestProfilerDetach, the profiler must turn off all hijacking.
+// * If RequestProfilerDetach is called from a thread created by the CLR (i.e., from
+// within a callback), the profiler must first have exited all threads of its own
+// creation
+// * If RequestProfilerDetach is called from a thread of the profiler's own creation,
+// then
+// * The profiler must first have exited all OTHER threads of its own creation,
+// AND
+// * The profiler must immediately call FreeLibraryAndExitThread() after
+// RequestProfilerDetach returns.
+//
+// The above invariants result in the following possiblities:
+// * RequestProfilerDetach() may be called multi-threaded, but only from within
+// profiler callbacks. As such, evacuation counters will have been incremented
+// before entry into RequestProfilerDetach(), so the DetachThread will be
+// blocked until all such threads have returned from RequestProfilerDetach and
+// the callback from which RequestProfilerDetach was called. OR
+// * RequestProfilerDetach() is called single-threaded, from a thread of the
+// profiler's creation, which promises not to make any more calls into the CLR
+// afterward. In this case, the DetachThread will be blocked until
+// RequestProfilerDetach signals s_eventDetachWorkAvailable at the end.
+//
+
+// static
+HRESULT ProfilingAPIDetach::RequestProfilerDetach(DWORD dwExpectedCompletionMilliseconds)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ // Crst is used so GC may be triggered
+ GC_TRIGGERS;
+ MODE_ANY;
+ EE_THREAD_NOT_REQUIRED;
+ // Crst is used to synchronize the initialization of ProfilingAPIDetach internal structure
+ CAN_TAKE_LOCK;
+ PRECONDITION(ProfilingAPIUtility::GetStatusCrst() != NULL);
+ PRECONDITION(s_eventDetachWorkAvailable.IsValid());
+ }
+ CONTRACTL_END;
+
+ // Runtime must be fully started, or else CpuStoreBufferControl used below may not
+ // be initialized yet.
+ if (!g_fEEStarted)
+ {
+ return CORPROF_E_RUNTIME_UNINITIALIZED;
+ }
+
+ if (dwExpectedCompletionMilliseconds == 0)
+ {
+ // Pick suitable default if the profiler just leaves this at 0. 5 seconds is
+ // reasonable.
+ dwExpectedCompletionMilliseconds = 5000;
+ }
+
+ {
+ CRITSEC_Holder csh(ProfilingAPIUtility::GetStatusCrst());
+
+ // return immediately if detach is in progress
+
+ if (s_profilerDetachInfo.m_pEEToProf != NULL)
+ {
+ return CORPROF_E_PROFILER_DETACHING;
+ }
+
+ ProfilerStatus curProfStatus = g_profControlBlock.curProfStatus.Get();
+
+ if ((curProfStatus == kProfStatusInitializingForStartupLoad) ||
+ (curProfStatus == kProfStatusInitializingForAttachLoad))
+ {
+ return CORPROF_E_PROFILER_NOT_YET_INITIALIZED;
+ }
+
+ if (curProfStatus != kProfStatusActive)
+ {
+ // Before we acquired the lock, someone else must have unloaded the profiler
+ // for us (e.g., shutdown or the DetachThread in response to a prior
+ // RequestProfilerDetach call).
+ return CORPROF_E_PROFILER_DETACHING;
+ }
+
+ EEToProfInterfaceImpl * pEEToProf = g_profControlBlock.pProfInterface;
+
+ // Since prof status was active after entering the lock, the profiler must not
+ // have unloaded out from under us.
+ _ASSERTE(pEEToProf != NULL);
+
+ if (!pEEToProf->IsCallback3Supported())
+ {
+ return CORPROF_E_CALLBACK3_REQUIRED;
+ }
+
+ // Did the profiler do anything immutable? That will prevent us from allowing it to
+ // detach.
+ HRESULT hr = pEEToProf->EnsureProfilerDetachable();
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+ s_profilerDetachInfo.m_pEEToProf = pEEToProf;
+ s_profilerDetachInfo.m_ui64DetachStartTime = CLRGetTickCount64();
+ s_profilerDetachInfo.m_dwExpectedCompletionMilliseconds = dwExpectedCompletionMilliseconds;
+
+ // Ok, time to seal the profiler from receiving or making calls with the CLR.
+ // (This will force a FlushStoreBuffers().)
+ g_profControlBlock.curProfStatus.Set(kProfStatusDetaching);
+ }
+
+ // Sealing done. Wake up the DetachThread so it can loop until the profiler code is
+ // fully evacuated off of all stacks.
+ if (!s_eventDetachWorkAvailable.Set())
+ {
+ return HRESULT_FROM_WIN32(GetLastError());
+ }
+
+ // FUTURE: Currently, kProfStatusDetaching prevents callbacks from being sent to the
+ // profiler AND prevents another profiler from attaching. In the future, when
+ // implementing the reattach-with-neutered-profilers feature crew, we may want to add
+ // another block here to call ProfilingAPIUtility::SetProfStatus(kProfStatusNone), so callbacks are
+ // prevented but a new profiler may attempt to attach.
+
+ EX_TRY
+ {
+ ProfilingAPIUtility::LogProfInfo(IDS_PROF_DETACH_INITIATED);
+ }
+ EX_CATCH
+ {
+ // Oh well, rest of detach succeeded, so we should still return success to the
+ // profiler.
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ return S_OK;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// This is where the DetachThread spends its life. This waits until there's a profiler
+// to detach, then loops until the profiler code is completely evacuated off all stacks.
+// This will then unload the profiler.
+//
+
+// static
+void ProfilingAPIDetach::ExecuteEvacuationLoop()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ // Wait until there's a profiler to detach (or until this thread should "wake up"
+ // for some other reason, such as exiting due to an unsuccessful startup-load of a
+ // profiler).
+ DWORD dwRet = s_eventDetachWorkAvailable.Wait(INFINITE, FALSE /* alertable */);
+ if (dwRet != WAIT_OBJECT_0)
+ {
+ // The wait ended due to a failure or a reason other than the event getting
+ // signaled (e.g., WAIT_ABANDONED)
+ DWORD dwErr;
+ if (dwRet == WAIT_FAILED)
+ {
+ dwErr = GetLastError();
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF: DetachThread wait for s_eventDetachWorkAvailable failed with GetLastError = %d.\n",
+ dwErr));
+ }
+ else
+ {
+ dwErr = dwRet; // No extra error info available beyond the return code
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF: DetachThread wait for s_eventDetachWorkAvailable terminated with %d.\n",
+ dwErr));
+ }
+
+ ProfilingAPIUtility::LogProfError(IDS_PROF_DETACH_THREAD_ERROR, dwErr);
+ return;
+ }
+
+ // Peek to make sure there's actually a profiler to detach
+ {
+ CRITSEC_Holder csh(ProfilingAPIUtility::GetStatusCrst());
+
+ if (s_profilerDetachInfo.m_pEEToProf == NULL)
+ {
+ // Nothing to detach. This can happen if the DetachThread (i.e., current
+ // thread) was created but then the profiler failed to load.
+ return;
+ }
+ }
+
+ do
+ {
+ // Give profiler a chance to return from its procs
+ SleepWhileProfilerEvacuates();
+ }
+ while (!IsProfilerEvacuated());
+
+ UnloadProfiler();
+}
+
+//---------------------------------------------------------------------------------------
+//
+// This is called in between evacuation counter checks. This calculates how long to
+// sleep, and then sleeps.
+//
+
+// static
+void ProfilingAPIDetach::SleepWhileProfilerEvacuates()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ // Don't want to check evacuation any more frequently than every 300ms
+ const DWORD kdwDefaultMinSleepMs = 300;
+
+ // The default "steady state" max sleep is how long we'll wait if, after a couple
+ // tries the profiler still hasn't evacuated. Default to every 10 minutes
+ const DWORD kdwDefaultMaxSleepMs = 600000;
+
+ static DWORD s_dwMinSleepMs = 0;
+ static DWORD s_dwMaxSleepMs = 0;
+
+ // First time through, initialize the static min / max sleep times. Normally, we'll
+ // just use the constants above, but the user may customize these (within reason).
+
+ // They should either both be uninitialized or both initialized
+ _ASSERTE(
+ ((s_dwMinSleepMs == 0) && (s_dwMaxSleepMs == 0)) ||
+ ((s_dwMinSleepMs != 0) && (s_dwMaxSleepMs != 0)));
+
+ if (s_dwMaxSleepMs == 0)
+ {
+ // No race here, since only the DetachThread runs this code
+
+ s_dwMinSleepMs = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ProfAPI_DetachMinSleepMs);
+ s_dwMaxSleepMs = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ProfAPI_DetachMaxSleepMs);
+
+ // Here's the "within reason" part: the user may not customize these values to
+ // be more "extreme" than the constants, or to be 0 (which would confuse the
+ // issue of whether these statics were intialized yet).
+ if ((s_dwMinSleepMs < kdwDefaultMinSleepMs) || (s_dwMinSleepMs > kdwDefaultMaxSleepMs))
+ {
+ // Sleeping less than 300ms between evac checks could negatively affect the
+ // app by having the DetachThread execute too often. And a min sleep time
+ // that's too high could result in a profiler hanging around way too long
+ // when it's actually ready to be unloaded.
+ s_dwMinSleepMs = kdwDefaultMinSleepMs;
+ }
+ if ((s_dwMaxSleepMs < kdwDefaultMinSleepMs) || (s_dwMaxSleepMs > kdwDefaultMaxSleepMs))
+ {
+ // A steady state that's too small would retry the evac checks too often on
+ // an ongoing basis. A steady state that's too high could result in a
+ // profiler hanging around way too long when it's actually ready to be
+ // unloaded.
+ s_dwMaxSleepMs = kdwDefaultMaxSleepMs;
+ }
+ }
+
+ // Take note of when the detach was requested and how long to sleep for
+ ULONGLONG ui64ExpectedCompletionMilliseconds;
+ ULONGLONG ui64DetachStartTime;
+ {
+ CRITSEC_Holder csh(ProfilingAPIUtility::GetStatusCrst());
+
+ _ASSERTE(s_profilerDetachInfo.m_pEEToProf != NULL);
+ ui64ExpectedCompletionMilliseconds = s_profilerDetachInfo.m_dwExpectedCompletionMilliseconds;
+ ui64DetachStartTime = s_profilerDetachInfo.m_ui64DetachStartTime;
+ }
+
+ // ui64SleepMilliseconds is calculated to ensure that CLR checks evacuation status roughly:
+ // * After profiler's ui64ExpectedCompletionMilliseconds hint has elapsed (but not
+ // too soon)
+ // * At least once more after 2*ui64ExpectedCompletionMilliseconds have elapsed
+ // (but not too soon)
+ // * Occasionally thereafter (steady state)
+
+ ULONGLONG ui64ElapsedMilliseconds = CLRGetTickCount64() - ui64DetachStartTime;
+ ULONGLONG ui64SleepMilliseconds;
+ if (ui64ExpectedCompletionMilliseconds > ui64ElapsedMilliseconds)
+ {
+ // Haven't hit ui64ExpectedCompletionMilliseconds yet, so sleep for remainder
+ ui64SleepMilliseconds = ui64ExpectedCompletionMilliseconds - ui64ElapsedMilliseconds;
+ }
+ else if ((2*ui64ExpectedCompletionMilliseconds) > ui64ElapsedMilliseconds)
+ {
+ // We're between ui64ExpectedCompletionMilliseconds &
+ // 2*ui64ExpectedCompletionMilliseconds, so sleep until
+ // 2*ui64ExpectedCompletionMilliseconds have transpired
+ ui64SleepMilliseconds = (2*ui64ExpectedCompletionMilliseconds) - ui64ElapsedMilliseconds;
+ }
+ else
+ {
+ // Steady state
+ ui64SleepMilliseconds = s_dwMaxSleepMs;
+ }
+
+ // ...but keep it in bounds!
+ ui64SleepMilliseconds = min(
+ max(ui64SleepMilliseconds, s_dwMinSleepMs),
+ s_dwMaxSleepMs);
+
+ // At this point it's safe to cast ui64SleepMilliseconds down to a DWORD since we
+ // know it's between s_dwMinSleepMs & s_dwMaxSleepMs
+ _ASSERTE(ui64SleepMilliseconds <= 0xFFFFffff);
+ ClrSleepEx((DWORD) ui64SleepMilliseconds, FALSE /* alertable */);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Performs the evacuation checks by grabbing the thread store lock, iterating through
+// all EE Threads, and querying each one's evacuation counter. If they're all 0, the
+// profiler is ready to be unloaded.
+//
+// Return Value:
+// Nonzero iff the profiler is fully evacuated and ready to be unloaded.
+//
+
+// static
+BOOL ProfilingAPIDetach::IsProfilerEvacuated()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(g_profControlBlock.curProfStatus.Get() == kProfStatusDetaching);
+
+ // Check evacuation counters on all the threads (see
+ // code:ProfilingAPIUtility::InitializeProfiling#LoadUnloadCallbackSynchronization
+ // for details). Doing this under the thread store lock not only ensures we can
+ // iterate through the Thread objects safely, but also forces us to serialize with
+ // the GC. The latter is important, as server GC enters the profiler on non-EE
+ // Threads, and so no evacuation counters might be incremented during server GC even
+ // though control could be entering the profiler.
+ {
+ ThreadStoreLockHolder TSLockHolder;
+
+ Thread * pThread = ThreadStore::GetAllThreadList(
+ NULL, // cursor thread; always NULL to begin with
+ 0, // mask to AND with Thread::m_State to filter returned threads
+ 0); // bits to match the result of the above AND. (m_State & 0 == 0,
+ // so we won't filter out any threads)
+
+ // Note that, by not filtering out any of the threads, we're intentionally including
+ // stuff like TS_Dead or TS_Unstarted. But that keeps us on the safe
+ // side. If an EE Thread object exists, we want to check its counters to be
+ // absolutely certain it isn't executing in a profiler.
+
+ while (pThread != NULL)
+ {
+ // Note that pThread is still in motion as we check its evacuation counter.
+ // This is ok, because we've already changed the profiler status to
+ // kProfStatusDetaching and flushed CPU buffers. So at this point the counter
+ // will typically only go down to 0 (and not increment anymore), with one
+ // small exception (below). So if we get a read of 0 below, the counter will
+ // typically stay there. Specifically:
+ // * pThread is most likely not about to increment its evacuation counter
+ // from 0 to 1 because pThread sees that the status is
+ // kProfStatusDetaching.
+ // * Note that there is a small race where pThread might actually
+ // increment its evac counter from 0 to 1 (if it dirty-read the
+ // profiler status a tad too early), but that implies that when
+ // pThread rechecks the profiler status (clean read) then pThread
+ // will immediately decrement the evac counter back to 0 and avoid
+ // calling into the EEToProfInterfaceImpl pointer.
+ //
+ // (see
+ // code:ProfilingAPIUtility::InitializeProfiling#LoadUnloadCallbackSynchronization
+ // for details)
+ DWORD dwEvacCounter = pThread->GetProfilerEvacuationCounter();
+ if (dwEvacCounter != 0)
+ {
+ LOG((
+ LF_CORPROF,
+ LL_INFO100,
+ "**PROF: Profiler not yet evacuated because OS Thread ID 0x%x has evac counter of %d (decimal).\n",
+ pThread->GetOSThreadId(),
+ dwEvacCounter));
+ return FALSE;
+ }
+
+ pThread = ThreadStore::GetAllThreadList(pThread, 0, 0);
+ }
+ }
+
+ // FUTURE: When rejit feature crew complete, add code to verify all rejitted
+ // functions are fully reverted and off of all stacks. If this is very easy to
+ // verify (e.g., checking a single value), consider putting it above the loop
+ // above so we can early-out quicker if rejitted code is still around.
+
+ // We got this far without returning, so the profiler is fully evacuated
+ return TRUE;
+}
+
+// ---------------------------------------------------------------------------------------
+// After we've verified a detaching profiler has fully evacuated, call this to unload the
+// profiler and clean up state.
+//
+// Assumptions:
+// Since this is called well after the profiler called RequestProfilerDetach, the
+// profiler must not have any other threads in use. Also, now that the profiler has
+// been evacuated, no CLR threads will be calling into the profiler (thus the
+// profiler will not gain control via CLR threads either). That means the profiler
+// may not call back into the CLR on any other threads.
+//
+
+// static
+void ProfilingAPIDetach::UnloadProfiler()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(g_profControlBlock.curProfStatus.Get() == kProfStatusDetaching);
+
+ {
+ CRITSEC_Holder csh(ProfilingAPIUtility::GetStatusCrst());
+
+ // Notify profiler it's about to be unloaded
+ _ASSERTE(s_profilerDetachInfo.m_pEEToProf != NULL);
+ s_profilerDetachInfo.m_pEEToProf->ProfilerDetachSucceeded();
+
+ // Reset detach state.
+ s_profilerDetachInfo.Init();
+
+ // This deletes the EEToProfInterfaceImpl object managing the detaching profiler,
+ // releases the profiler's callback interfaces, unloads the profiler DLL, sets
+ // the status to kProfStatusNone, and resets g_profControlBlock for use next time
+ // a profiler tries to attach.
+ //
+ // Note that s_profilerDetachInfo.Init() has already NULL'd out
+ // s_profilerDetachInfo.m_pEEToProf, so we won't have a dangling pointer to the
+ // EEToProfInterfaceImpl that's about to be destroyed.
+ ProfilingAPIUtility::TerminateProfiling();
+ }
+
+ ProfilingAPIUtility::LogProfInfo(IDS_PROF_DETACH_COMPLETE);
+}
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIDetach::ProfilingAPIDetachThreadStart
+//
+// Description:
+// Thread proc for DetachThread. Serves as a simple try/catch wrapper around a call to
+// ProfilingAPIDetach::ExecuteEvacuationLoop. This thread proc is specified by
+// code:ProfilingAPIDetach::CreateDetachThread when it spins up the new DetachThread.
+// This occurs when a profiler is either startup-loaded or attach-loaded.
+//
+// Arguments:
+// * LPVOID thread proc param is ignored
+//
+// Return Value:
+// Just returns 0 always.
+//
+
+// static
+DWORD WINAPI ProfilingAPIDetach::ProfilingAPIDetachThreadStart(LPVOID)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ // At start of this thread, set its type so SOS !threads and anyone else knows who we
+ // are.
+ ClrFlsSetThreadType(ThreadType_ProfAPI_Detach);
+
+ LOG((
+ LF_CORPROF,
+ LL_INFO10,
+ "**PROF: DetachThread created and executing.\n"));
+
+ // This try block is a last-ditch stop-gap to prevent an unhandled exception on the
+ // DetachThread from bringing down the process. Note that if the unhandled
+ // exception is a terminal one, then hey, sure, let's tear everything down. Also
+ // note that any naughtiness in the profiler (e.g., throwing an exception from its
+ // Initialize callback) should already be handled before we pop back to here, so this
+ // is just being super paranoid.
+ EX_TRY
+ {
+ // Don't care about return value, thread proc will just return 0 regardless
+ ExecuteEvacuationLoop();
+ }
+ EX_CATCH
+ {
+ _ASSERTE(!"Unhandled exception on profiling API detach thread");
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ LOG((
+ LF_CORPROF,
+ LL_INFO10,
+ "**PROF: DetachThread exiting.\n"));
+
+ return 0;
+}
+
+// ---------------------------------------------------------------------------------------
+// Called during startup or attach load of a profiler to create a new thread to fill the role of
+// the DetachThread.
+//
+
+// static
+HRESULT ProfilingAPIDetach::CreateDetachThread()
+{
+ // This function is practically a leaf (though not quite), so keeping the contract
+ // strict to allow for maximum flexibility on when this may called.
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ // FUTURE: When reattach with neutered profilers is implemented, this
+ // function should check if a DetachThread already exists (use synchronization
+ // to prevent race), and just return if so.
+
+ HandleHolder hDetachThread;
+
+ // The DetachThread is intentionally not an EE Thread-object thread (it won't
+ // execute managed code).
+ hDetachThread = ::CreateThread(
+ NULL, // lpThreadAttributes; don't want child processes inheriting this handle
+ 0, // dwStackSize (0 = use default)
+ ProfilingAPIDetachThreadStart,
+ NULL, // lpParameter (none to pass)
+ 0, // dwCreationFlags (0 = use default flags, start thread immediately)
+ NULL // lpThreadId (don't need therad ID)
+ );
+ if (hDetachThread == NULL)
+ {
+ DWORD dwErr = GetLastError();
+
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF: Failed to create DetachThread. GetLastError=%d.\n",
+ dwErr));
+
+ return HRESULT_FROM_WIN32(dwErr);
+ }
+
+ return S_OK;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Accessor for ProfilingAPIDetach::s_profilerDetachInfo.m_pEEToProf, which is the
+// profiler being detached (or NULL if no profiler is being detached).
+//
+// Return Value:
+// EEToProfInterfaceImpl * for the profiler being detached.
+//
+
+// static
+EEToProfInterfaceImpl * ProfilingAPIDetach::GetEEToProfPtr()
+{
+ LIMITED_METHOD_CONTRACT;
+ return s_profilerDetachInfo.m_pEEToProf;
+}
+
+#endif // FEATURE_PROFAPI_ATTACH_DETACH
diff --git a/src/vm/profdetach.h b/src/vm/profdetach.h
new file mode 100644
index 0000000000..5c3f55c29f
--- /dev/null
+++ b/src/vm/profdetach.h
@@ -0,0 +1,79 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// ProfDetach.h
+//
+
+//
+// Declaration of helper classes and structures used for Profiling API Detaching
+//
+// ======================================================================================
+
+#ifndef __PROFDETACH_H__
+#define __PROFDETACH_H__
+
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+
+// The struct below is the medium by which RequestProfilerDetach communicates with
+// the DetachThread about a profiler being detached. Initial core attach /
+// detach feature crew will have only one global instance of this struct.
+// When we allow re-attach with neutered profilers, there will likely be a
+// linked list of these, one per profiler in the act of being detached.
+struct ProfilerDetachInfo
+{
+ ProfilerDetachInfo();
+ void Init();
+
+ // NULL if we're not trying to detach a profiler. Otherwise, this is the
+ // EEToProfInterfaceImpl instance we're detaching.
+ //
+ // FUTURE: Although m_pEEToProf, when non-NULL, is always the same as
+ // g_profControlBlock.pProfInterface, that will no longer be the case once we allow
+ // re-attach with neutered profilers.
+ EEToProfInterfaceImpl * m_pEEToProf;
+
+ // Time when profiler originally called RequestProfilerDetach()
+ ULONGLONG m_ui64DetachStartTime;
+
+ // # milliseconds hint profiler specified in RequestProfilerDetach()
+ DWORD m_dwExpectedCompletionMilliseconds;
+};
+
+//--------------------------------------------------------------------------
+// Static-only class to coordinate initialization of the various profiling
+// API detaching structures, plus other utility stuff.
+//
+class ProfilingAPIDetach
+{
+public:
+ static HRESULT Initialize();
+
+ static HRESULT RequestProfilerDetach(DWORD dwExpectedCompletionMilliseconds);
+
+ static HRESULT CreateDetachThread();
+ static DWORD WINAPI ProfilingAPIDetachThreadStart(LPVOID lpParameter);
+ static void ExecuteEvacuationLoop();
+ static BOOL IsProfilerEvacuated();
+
+ static EEToProfInterfaceImpl * GetEEToProfPtr();
+
+private:
+ static ProfilerDetachInfo s_profilerDetachInfo;
+
+ // Signaled by RequestProfilerDetach() when there is detach work ready to be
+ // done by the DetachThread
+ static CLREvent s_eventDetachWorkAvailable;
+
+ static void SleepWhileProfilerEvacuates();
+ static void UnloadProfiler();
+
+ // Prevent instantiation of ProfilingAPIDetach objects (should be static-only)
+ ProfilingAPIDetach();
+ ~ProfilingAPIDetach();
+};
+
+#endif // FEATURE_PROFAPI_ATTACH_DETACH
+
+#endif //__PROFDETACH_H__
diff --git a/src/vm/profilermetadataemitvalidator.cpp b/src/vm/profilermetadataemitvalidator.cpp
new file mode 100644
index 0000000000..bcd573a938
--- /dev/null
+++ b/src/vm/profilermetadataemitvalidator.cpp
@@ -0,0 +1,1788 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#include "common.h"
+#include "profilermetadataemitvalidator.h"
+
+ProfilerMetadataEmitValidator::ProfilerMetadataEmitValidator(IMetaDataEmit* pInnerEmit) :
+m_cRefCount(0)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ ReleaseHolder<IGetIMDInternalImport> pGetIMDInternalImport;
+ pInnerEmit->QueryInterface(IID_IGetIMDInternalImport, (void**)&pGetIMDInternalImport);
+ pGetIMDInternalImport->GetIMDInternalImport(&m_pInnerInternalImport);
+
+ pInnerEmit->QueryInterface(IID_IMetaDataImport2, (void**)&m_pInnerImport);
+ pInnerEmit->QueryInterface(IID_IMetaDataAssemblyImport, (void**)&m_pInnerAssemblyImport);
+ pInnerEmit->QueryInterface(IID_IMetaDataEmit2, (void**) &m_pInner);
+ pInnerEmit->QueryInterface(IID_IMetaDataAssemblyEmit, (void**) &m_pInnerAssembly);
+
+ // GetCountWithTokenType does not count the 0 RID token, thus the max valid RID = count
+ // Confusingly the method treats TypeDef specially by ignoring 0x02000001 as well. For TypeDef max RID is count+1
+ maxInitialTypeDef = TokenFromRid(m_pInnerInternalImport->GetCountWithTokenKind(mdtTypeDef) + 1, mdtTypeDef);
+ maxInitialMethodDef = TokenFromRid(m_pInnerInternalImport->GetCountWithTokenKind(mdtMethodDef), mdtMethodDef);
+ maxInitialFieldDef = TokenFromRid(m_pInnerInternalImport->GetCountWithTokenKind(mdtFieldDef), mdtFieldDef);
+ maxInitialMemberRef = TokenFromRid(m_pInnerInternalImport->GetCountWithTokenKind(mdtMemberRef), mdtMemberRef);
+ maxInitialParamDef = TokenFromRid(m_pInnerInternalImport->GetCountWithTokenKind(mdtParamDef), mdtParamDef);
+ maxInitialCustomAttribute = TokenFromRid(m_pInnerInternalImport->GetCountWithTokenKind(mdtCustomAttribute), mdtCustomAttribute);
+ maxInitialEvent = TokenFromRid(m_pInnerInternalImport->GetCountWithTokenKind(mdtEvent), mdtEvent);
+ maxInitialProperty = TokenFromRid(m_pInnerInternalImport->GetCountWithTokenKind(mdtProperty), mdtProperty);
+ maxInitialGenericParam = TokenFromRid(m_pInnerInternalImport->GetCountWithTokenKind(mdtGenericParam), mdtGenericParam);
+}
+
+ //IUnknown
+HRESULT ProfilerMetadataEmitValidator::QueryInterface(REFIID riid, void** ppInterface)
+{
+ if(riid == IID_IUnknown)
+ {
+ *ppInterface = static_cast<IUnknown*>(static_cast<IMetaDataEmit*>(this));
+ AddRef();
+ }
+ else if(riid == IID_IMetaDataEmit)
+ {
+ *ppInterface = static_cast<IMetaDataEmit*>(this);
+ AddRef();
+ }
+ else if(riid == IID_IMetaDataEmit2)
+ {
+ *ppInterface = static_cast<IMetaDataEmit2*>(this);
+ AddRef();
+ }
+ else if(riid == IID_IMetaDataAssemblyEmit)
+ {
+ *ppInterface = static_cast<IMetaDataAssemblyEmit*>(this);
+ AddRef();
+ }
+ else if (riid == IID_IMetaDataImport)
+ {
+ *ppInterface = static_cast<IMetaDataImport*>(this);
+ AddRef();
+ }
+ else if (riid == IID_IMetaDataImport2)
+ {
+ *ppInterface = static_cast<IMetaDataImport2*>(this);
+ AddRef();
+ }
+ else if (riid == IID_IMetaDataAssemblyImport)
+ {
+ *ppInterface = static_cast<IMetaDataAssemblyImport*>(this);
+ AddRef();
+ }
+ else
+ {
+ return E_NOINTERFACE;
+ }
+
+ return S_OK;
+}
+
+ULONG ProfilerMetadataEmitValidator::AddRef()
+{
+ return InterlockedIncrement(&m_cRefCount);
+}
+
+ULONG ProfilerMetadataEmitValidator::Release()
+{
+ ULONG ret = InterlockedDecrement(&m_cRefCount);
+ if(ret == 0)
+ {
+ delete this;
+ }
+ return ret;
+}
+
+ //IMetaDataEmit
+HRESULT ProfilerMetadataEmitValidator::SetModuleProps(
+ LPCWSTR szName)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::Save(
+ LPCWSTR szFile,
+ DWORD dwSaveFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::SaveToStream(
+ IStream *pIStream,
+ DWORD dwSaveFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetSaveSize(
+ CorSaveSize fSave,
+ DWORD *pdwSaveSize)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::DefineTypeDef(
+ LPCWSTR szTypeDef,
+ DWORD dwTypeDefFlags,
+ mdToken tkExtends,
+ mdToken rtkImplements[],
+ mdTypeDef *ptd)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInner->DefineTypeDef(szTypeDef, dwTypeDefFlags, tkExtends, rtkImplements, ptd);
+}
+
+HRESULT ProfilerMetadataEmitValidator::DefineNestedType(
+ LPCWSTR szTypeDef,
+ DWORD dwTypeDefFlags,
+ mdToken tkExtends,
+ mdToken rtkImplements[],
+ mdTypeDef tdEncloser,
+ mdTypeDef *ptd)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInner->DefineNestedType(szTypeDef, dwTypeDefFlags, tkExtends, rtkImplements, tdEncloser, ptd);
+}
+
+HRESULT ProfilerMetadataEmitValidator::SetHandler(
+ IUnknown *pUnk)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::DefineMethod(
+ mdTypeDef td,
+ LPCWSTR szName,
+ DWORD dwMethodFlags,
+ PCCOR_SIGNATURE pvSigBlob,
+ ULONG cbSigBlob,
+ ULONG ulCodeRVA,
+ DWORD dwImplFlags,
+ mdMethodDef *pmd)
+{
+ LIMITED_METHOD_CONTRACT;
+ //modifying pre-existing types is not allowed
+ if (td <= maxInitialTypeDef)
+ {
+ return COR_E_NOTSUPPORTED;
+ }
+ return m_pInner->DefineMethod(td, szName, dwMethodFlags, pvSigBlob, cbSigBlob, ulCodeRVA, dwImplFlags, pmd);
+}
+
+HRESULT ProfilerMetadataEmitValidator::DefineMethodImpl(
+ mdTypeDef td,
+ mdToken tkBody,
+ mdToken tkDecl)
+{
+ LIMITED_METHOD_CONTRACT;
+ //modifying pre-existing types is not allowed
+ if (td <= maxInitialTypeDef)
+ {
+ return COR_E_NOTSUPPORTED;
+ }
+ return m_pInner->DefineMethodImpl(td, tkBody, tkDecl);
+}
+
+HRESULT ProfilerMetadataEmitValidator::DefineTypeRefByName(
+ mdToken tkResolutionScope,
+ LPCWSTR szName,
+ mdTypeRef *ptr)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInner->DefineTypeRefByName(tkResolutionScope, szName, ptr);
+}
+
+HRESULT ProfilerMetadataEmitValidator::DefineImportType(
+ IMetaDataAssemblyImport *pAssemImport,
+ const void *pbHashValue,
+ ULONG cbHashValue,
+ IMetaDataImport *pImport,
+ mdTypeDef tdImport,
+ IMetaDataAssemblyEmit *pAssemEmit,
+ mdTypeRef *ptr)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::DefineMemberRef(
+ mdToken tkImport,
+ LPCWSTR szName,
+ PCCOR_SIGNATURE pvSigBlob,
+ ULONG cbSigBlob,
+ mdMemberRef *pmr)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInner->DefineMemberRef(tkImport, szName, pvSigBlob, cbSigBlob, pmr);
+}
+
+HRESULT ProfilerMetadataEmitValidator::DefineImportMember(
+ IMetaDataAssemblyImport *pAssemImport,
+ const void *pbHashValue,
+ ULONG cbHashValue,
+ IMetaDataImport *pImport,
+ mdToken mbMember,
+ IMetaDataAssemblyEmit *pAssemEmit,
+ mdToken tkParent,
+ mdMemberRef *pmr)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::DefineEvent(
+ mdTypeDef td,
+ LPCWSTR szEvent,
+ DWORD dwEventFlags,
+ mdToken tkEventType,
+ mdMethodDef mdAddOn,
+ mdMethodDef mdRemoveOn,
+ mdMethodDef mdFire,
+ mdMethodDef rmdOtherMethods[],
+ mdEvent *pmdEvent)
+{
+ LIMITED_METHOD_CONTRACT;
+ //modifying pre-existing types is not allowed
+ if (td <= maxInitialTypeDef)
+ {
+ return COR_E_NOTSUPPORTED;
+ }
+ return m_pInner->DefineEvent(td, szEvent, dwEventFlags, tkEventType, mdAddOn, mdRemoveOn, mdFire, rmdOtherMethods, pmdEvent);
+}
+
+HRESULT ProfilerMetadataEmitValidator::SetClassLayout(
+ mdTypeDef td,
+ DWORD dwPackSize,
+ COR_FIELD_OFFSET rFieldOffsets[],
+ ULONG ulClassSize)
+{
+ LIMITED_METHOD_CONTRACT;
+ //modifying pre-existing types is not allowed
+ if (td <= maxInitialTypeDef)
+ {
+ return COR_E_NOTSUPPORTED;
+ }
+ return m_pInner->SetClassLayout(td, dwPackSize, rFieldOffsets, ulClassSize);
+}
+
+HRESULT ProfilerMetadataEmitValidator::DeleteClassLayout(
+ mdTypeDef td)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::SetFieldMarshal(
+ mdToken tk,
+ PCCOR_SIGNATURE pvNativeType,
+ ULONG cbNativeType)
+{
+ LIMITED_METHOD_CONTRACT;
+ //modifying pre-existing field/property is not allowed
+ if ((TypeFromToken(tk) == mdtProperty && tk <= maxInitialProperty) ||
+ (TypeFromToken(tk) == mdtFieldDef && tk <= maxInitialFieldDef))
+ {
+ return COR_E_NOTSUPPORTED;
+ }
+ //if the token wasn't a field/param we let it through just to get
+ //the appropriate error behavior from the inner emitter
+ return m_pInner->SetFieldMarshal(tk, pvNativeType, cbNativeType);
+}
+
+HRESULT ProfilerMetadataEmitValidator::DeleteFieldMarshal(
+ mdToken tk)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::DefinePermissionSet(
+ mdToken tk,
+ DWORD dwAction,
+ void const *pvPermission,
+ ULONG cbPermission,
+ mdPermission *ppm)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::SetRVA(
+ mdMethodDef md,
+ ULONG ulRVA)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetTokenFromSig(
+ PCCOR_SIGNATURE pvSig,
+ ULONG cbSig,
+ mdSignature *pmsig)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInner->GetTokenFromSig(pvSig, cbSig, pmsig);
+}
+
+HRESULT ProfilerMetadataEmitValidator::DefineModuleRef(
+ LPCWSTR szName,
+ mdModuleRef *pmur)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInner->DefineModuleRef(szName, pmur);
+}
+
+HRESULT ProfilerMetadataEmitValidator::SetParent(
+ mdMemberRef mr,
+ mdToken tk)
+{
+ LIMITED_METHOD_CONTRACT;
+ //modifying pre-existing memberref is not allowed
+ if (mr <= maxInitialMemberRef)
+ {
+ return COR_E_NOTSUPPORTED;
+ }
+ return m_pInner->SetParent(mr, tk);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetTokenFromTypeSpec(
+ PCCOR_SIGNATURE pvSig,
+ ULONG cbSig,
+ mdTypeSpec *ptypespec)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInner->GetTokenFromTypeSpec(pvSig, cbSig, ptypespec);
+}
+
+HRESULT ProfilerMetadataEmitValidator::SaveToMemory(
+ void *pbData,
+ ULONG cbData)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::DefineUserString(
+ LPCWSTR szString,
+ ULONG cchString,
+ mdString *pstk)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInner->DefineUserString(szString, cchString, pstk);
+}
+
+HRESULT ProfilerMetadataEmitValidator::DeleteToken(
+ mdToken tkObj)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::SetMethodProps(
+ mdMethodDef md,
+ DWORD dwMethodFlags,
+ ULONG ulCodeRVA,
+ DWORD dwImplFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+ //modifying pre-existing methods is not allowed
+ if (md <= maxInitialMethodDef)
+ {
+ return COR_E_NOTSUPPORTED;
+ }
+ return m_pInner->SetMethodProps(md, dwMethodFlags, ulCodeRVA, dwImplFlags);
+}
+
+HRESULT ProfilerMetadataEmitValidator::SetTypeDefProps(
+ mdTypeDef td,
+ DWORD dwTypeDefFlags,
+ mdToken tkExtends,
+ mdToken rtkImplements[])
+{
+ LIMITED_METHOD_CONTRACT;
+ //modifying pre-existing types is not allowed
+ if (td <= maxInitialTypeDef)
+ {
+ return COR_E_NOTSUPPORTED;
+ }
+ return m_pInner->SetTypeDefProps(td, dwTypeDefFlags, tkExtends, rtkImplements);
+}
+
+HRESULT ProfilerMetadataEmitValidator::SetEventProps(
+ mdEvent ev,
+ DWORD dwEventFlags,
+ mdToken tkEventType,
+ mdMethodDef mdAddOn,
+ mdMethodDef mdRemoveOn,
+ mdMethodDef mdFire,
+ mdMethodDef rmdOtherMethods[])
+{
+ LIMITED_METHOD_CONTRACT;
+ //modifying pre-existing events is not allowed
+ if (ev <= maxInitialEvent)
+ {
+ return COR_E_NOTSUPPORTED;
+ }
+ return m_pInner->SetEventProps(ev, dwEventFlags, tkEventType, mdAddOn, mdRemoveOn, mdFire, rmdOtherMethods);
+}
+
+HRESULT ProfilerMetadataEmitValidator::SetPermissionSetProps(
+ mdToken tk,
+ DWORD dwAction,
+ void const *pvPermission,
+ ULONG cbPermission,
+ mdPermission *ppm)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::DefinePinvokeMap(
+ mdToken tk,
+ DWORD dwMappingFlags,
+ LPCWSTR szImportName,
+ mdModuleRef mrImportDLL)
+{
+ LIMITED_METHOD_CONTRACT;
+ //modifying pre-existing methods is not allowed
+ if (tk <= maxInitialMethodDef)
+ {
+ return COR_E_NOTSUPPORTED;
+ }
+ return m_pInner->DefinePinvokeMap(tk, dwMappingFlags, szImportName, mrImportDLL);
+}
+
+HRESULT ProfilerMetadataEmitValidator::SetPinvokeMap(
+ mdToken tk,
+ DWORD dwMappingFlags,
+ LPCWSTR szImportName,
+ mdModuleRef mrImportDLL)
+{
+ LIMITED_METHOD_CONTRACT;
+ //modifying pre-existing types is not allowed
+ if (tk <= maxInitialTypeDef)
+ {
+ return COR_E_NOTSUPPORTED;
+ }
+ return m_pInner->SetPinvokeMap(tk, dwMappingFlags, szImportName, mrImportDLL);
+}
+
+HRESULT ProfilerMetadataEmitValidator::DeletePinvokeMap(
+ mdToken tk)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::DefineCustomAttribute(
+ mdToken tkOwner,
+ mdToken tkCtor,
+ void const *pCustomAttribute,
+ ULONG cbCustomAttribute,
+ mdCustomAttribute *pcv)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInner->DefineCustomAttribute(tkOwner, tkCtor, pCustomAttribute, cbCustomAttribute, pcv);
+}
+
+HRESULT ProfilerMetadataEmitValidator::SetCustomAttributeValue(
+ mdCustomAttribute pcv,
+ void const *pCustomAttribute,
+ ULONG cbCustomAttribute)
+{
+ LIMITED_METHOD_CONTRACT;
+ //modifying pre-existing CAs is not allowed
+ if (pcv <= maxInitialCustomAttribute)
+ {
+ return COR_E_NOTSUPPORTED;
+ }
+ return m_pInner->SetCustomAttributeValue(pcv, pCustomAttribute, cbCustomAttribute);
+}
+
+HRESULT ProfilerMetadataEmitValidator::DefineField(
+ mdTypeDef td,
+ LPCWSTR szName,
+ DWORD dwFieldFlags,
+ PCCOR_SIGNATURE pvSigBlob,
+ ULONG cbSigBlob,
+ DWORD dwCPlusTypeFlag,
+ void const *pValue,
+ ULONG cchValue,
+ mdFieldDef *pmd)
+{
+ LIMITED_METHOD_CONTRACT;
+ //modifying pre-existing types is not allowed
+ if (td <= maxInitialTypeDef)
+ {
+ return COR_E_NOTSUPPORTED;
+ }
+ return m_pInner->DefineField(td, szName, dwFieldFlags, pvSigBlob, cbSigBlob, dwCPlusTypeFlag, pValue, cchValue, pmd);
+}
+
+HRESULT ProfilerMetadataEmitValidator::DefineProperty(
+ mdTypeDef td,
+ LPCWSTR szProperty,
+ DWORD dwPropFlags,
+ PCCOR_SIGNATURE pvSig,
+ ULONG cbSig,
+ DWORD dwCPlusTypeFlag,
+ void const *pValue,
+ ULONG cchValue,
+ mdMethodDef mdSetter,
+ mdMethodDef mdGetter,
+ mdMethodDef rmdOtherMethods[],
+ mdProperty *pmdProp)
+{
+ LIMITED_METHOD_CONTRACT;
+ //modifying pre-existing types is not allowed
+ if (td <= maxInitialTypeDef)
+ {
+ return COR_E_NOTSUPPORTED;
+ }
+ return m_pInner->DefineProperty(td, szProperty, dwPropFlags, pvSig, cbSig, dwCPlusTypeFlag, pValue, cchValue, mdSetter, mdGetter, rmdOtherMethods, pmdProp);
+}
+
+HRESULT ProfilerMetadataEmitValidator::DefineParam(
+ mdMethodDef md,
+ ULONG ulParamSeq,
+ LPCWSTR szName,
+ DWORD dwParamFlags,
+ DWORD dwCPlusTypeFlag,
+ void const *pValue,
+ ULONG cchValue,
+ mdParamDef *ppd)
+{
+ LIMITED_METHOD_CONTRACT;
+ //modifying pre-existing methods is not allowed
+ if (md <= maxInitialMethodDef)
+ {
+ return COR_E_NOTSUPPORTED;
+ }
+ return m_pInner->DefineParam(md, ulParamSeq, szName, dwParamFlags, dwCPlusTypeFlag, pValue, cchValue, ppd);
+}
+
+HRESULT ProfilerMetadataEmitValidator::SetFieldProps(
+ mdFieldDef fd,
+ DWORD dwFieldFlags,
+ DWORD dwCPlusTypeFlag,
+ void const *pValue,
+ ULONG cchValue)
+{
+ LIMITED_METHOD_CONTRACT;
+ //modifying pre-existing fields is not allowed
+ if (fd <= maxInitialFieldDef)
+ {
+ return COR_E_NOTSUPPORTED;
+ }
+ return m_pInner->SetFieldProps(fd, dwFieldFlags, dwCPlusTypeFlag, pValue, cchValue);
+}
+
+HRESULT ProfilerMetadataEmitValidator::SetPropertyProps(
+ mdProperty pr,
+ DWORD dwPropFlags,
+ DWORD dwCPlusTypeFlag,
+ void const *pValue,
+ ULONG cchValue,
+ mdMethodDef mdSetter,
+ mdMethodDef mdGetter,
+ mdMethodDef rmdOtherMethods[])
+{
+ LIMITED_METHOD_CONTRACT;
+ //modifying pre-existing properties is not allowed
+ if (pr <= maxInitialProperty)
+ {
+ return COR_E_NOTSUPPORTED;
+ }
+ return m_pInner->SetPropertyProps(pr, dwPropFlags, dwCPlusTypeFlag, pValue, cchValue, mdSetter, mdGetter, rmdOtherMethods);
+}
+
+HRESULT ProfilerMetadataEmitValidator::SetParamProps(
+ mdParamDef pd,
+ LPCWSTR szName,
+ DWORD dwParamFlags,
+ DWORD dwCPlusTypeFlag,
+ void const *pValue,
+ ULONG cchValue)
+{
+ LIMITED_METHOD_CONTRACT;
+ //modifying pre-existing params is not allowed
+ if (pd <= maxInitialParamDef)
+ {
+ return COR_E_NOTSUPPORTED;
+ }
+ return m_pInner->SetParamProps(pd, szName, dwParamFlags, dwCPlusTypeFlag, pValue, cchValue);
+}
+
+HRESULT ProfilerMetadataEmitValidator::DefineSecurityAttributeSet(
+ mdToken tkObj,
+ COR_SECATTR rSecAttrs[],
+ ULONG cSecAttrs,
+ ULONG *pulErrorAttr)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::ApplyEditAndContinue(
+ IUnknown *pImport)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::TranslateSigWithScope(
+ IMetaDataAssemblyImport *pAssemImport,
+ const void *pbHashValue,
+ ULONG cbHashValue,
+ IMetaDataImport *import,
+ PCCOR_SIGNATURE pbSigBlob,
+ ULONG cbSigBlob,
+ IMetaDataAssemblyEmit *pAssemEmit,
+ IMetaDataEmit *emit,
+ PCOR_SIGNATURE pvTranslatedSig,
+ ULONG cbTranslatedSigMax,
+ ULONG *pcbTranslatedSig)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::SetMethodImplFlags(
+ mdMethodDef md,
+ DWORD dwImplFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+ //modifying pre-existing methods is not supported
+ if (md <= maxInitialMethodDef)
+ {
+ return COR_E_NOTSUPPORTED;
+ }
+ return m_pInner->SetMethodImplFlags(md, dwImplFlags);
+}
+
+HRESULT ProfilerMetadataEmitValidator::SetFieldRVA(
+ mdFieldDef fd,
+ ULONG ulRVA)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::Merge(
+ IMetaDataImport *pImport,
+ IMapToken *pHostMapToken,
+ IUnknown *pHandler)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::MergeEnd()
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+ // IMetaDataEmit2
+HRESULT ProfilerMetadataEmitValidator::DefineMethodSpec(
+ mdToken tkParent,
+ PCCOR_SIGNATURE pvSigBlob,
+ ULONG cbSigBlob,
+ mdMethodSpec *pmi)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInner->DefineMethodSpec(tkParent, pvSigBlob, cbSigBlob, pmi);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetDeltaSaveSize(
+ CorSaveSize fSave,
+ DWORD *pdwSaveSize)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::SaveDelta(
+ LPCWSTR szFile,
+ DWORD dwSaveFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::SaveDeltaToStream(
+ IStream *pIStream,
+ DWORD dwSaveFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::SaveDeltaToMemory(
+ void *pbData,
+ ULONG cbData)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::DefineGenericParam(
+ mdToken tk,
+ ULONG ulParamSeq,
+ DWORD dwParamFlags,
+ LPCWSTR szname,
+ DWORD reserved,
+ mdToken rtkConstraints[],
+ mdGenericParam *pgp)
+{
+ LIMITED_METHOD_CONTRACT;
+ //modifying pre-existing methods/types is not allowed
+ if ((TypeFromToken(tk) == mdtTypeDef && tk <= maxInitialTypeDef) ||
+ (TypeFromToken(tk) == mdtMethodDef && tk <= maxInitialMethodDef))
+ {
+ return COR_E_NOTSUPPORTED;
+ }
+ return m_pInner->DefineGenericParam(tk, ulParamSeq, dwParamFlags, szname, reserved, rtkConstraints, pgp);
+}
+
+HRESULT ProfilerMetadataEmitValidator::SetGenericParamProps(
+ mdGenericParam gp,
+ DWORD dwParamFlags,
+ LPCWSTR szName,
+ DWORD reserved,
+ mdToken rtkConstraints[])
+{
+ LIMITED_METHOD_CONTRACT;
+ //modifying pre-existing generic param is not allowed
+ if (gp <= maxInitialGenericParam)
+ {
+ return COR_E_NOTSUPPORTED;
+ }
+ return m_pInner->SetGenericParamProps(gp, dwParamFlags, szName, reserved, rtkConstraints);
+}
+
+HRESULT ProfilerMetadataEmitValidator::ResetENCLog()
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+ //IMetaDataAssemblyEmit
+HRESULT ProfilerMetadataEmitValidator::DefineAssembly(
+ const void *pbPublicKey,
+ ULONG cbPublicKey,
+ ULONG ulHashAlgId,
+ LPCWSTR szName,
+ const ASSEMBLYMETADATA *pMetaData,
+ DWORD dwAssemblyFlags,
+ mdAssembly *pma)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::DefineAssemblyRef(
+ const void *pbPublicKeyOrToken,
+ ULONG cbPublicKeyOrToken,
+ LPCWSTR szName,
+ const ASSEMBLYMETADATA *pMetaData,
+ const void *pbHashValue,
+ ULONG cbHashValue,
+ DWORD dwAssemblyRefFlags,
+ mdAssemblyRef *pmdar)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::DefineFile(
+ LPCWSTR szName,
+ const void *pbHashValue,
+ ULONG cbHashValue,
+ DWORD dwFileFlags,
+ mdFile *pmdf)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::DefineExportedType(
+ LPCWSTR szName,
+ mdToken tkImplementation,
+ mdTypeDef tkTypeDef,
+ DWORD dwExportedTypeFlags,
+ mdExportedType *pmdct)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::DefineManifestResource(
+ LPCWSTR szName,
+ mdToken tkImplementation,
+ DWORD dwOffset,
+ DWORD dwResourceFlags,
+ mdManifestResource *pmdmr)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::SetAssemblyProps(
+ mdAssembly pma,
+ const void *pbPublicKey,
+ ULONG cbPublicKey,
+ ULONG ulHashAlgId,
+ LPCWSTR szName,
+ const ASSEMBLYMETADATA *pMetaData,
+ DWORD dwAssemblyFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::SetAssemblyRefProps(
+ mdAssemblyRef ar,
+ const void *pbPublicKeyOrToken,
+ ULONG cbPublicKeyOrToken,
+ LPCWSTR szName,
+ const ASSEMBLYMETADATA *pMetaData,
+ const void *pbHashValue,
+ ULONG cbHashValue,
+ DWORD dwAssemblyRefFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::SetFileProps(
+ mdFile file,
+ const void *pbHashValue,
+ ULONG cbHashValue,
+ DWORD dwFileFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::SetExportedTypeProps(
+ mdExportedType ct,
+ mdToken tkImplementation,
+ mdTypeDef tkTypeDef,
+ DWORD dwExportedTypeFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+HRESULT ProfilerMetadataEmitValidator::SetManifestResourceProps(
+ mdManifestResource mr,
+ mdToken tkImplementation,
+ DWORD dwOffset,
+ DWORD dwResourceFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+ return COR_E_NOTSUPPORTED;
+}
+
+//IMetaDataImport
+void ProfilerMetadataEmitValidator::CloseEnum(HCORENUM hEnum)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_pInnerImport->CloseEnum(hEnum);
+}
+
+HRESULT ProfilerMetadataEmitValidator::CountEnum(HCORENUM hEnum, ULONG *pulCount)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->CountEnum(hEnum, pulCount);
+}
+
+HRESULT ProfilerMetadataEmitValidator::ResetEnum(HCORENUM hEnum, ULONG ulPos)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->ResetEnum(hEnum, ulPos);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumTypeDefs(HCORENUM *phEnum, mdTypeDef rTypeDefs[],
+ ULONG cMax, ULONG *pcTypeDefs)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumTypeDefs(phEnum, rTypeDefs, cMax, pcTypeDefs);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumInterfaceImpls(HCORENUM *phEnum, mdTypeDef td,
+ mdInterfaceImpl rImpls[], ULONG cMax,
+ ULONG* pcImpls)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumInterfaceImpls(phEnum, td, rImpls, cMax, pcImpls);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumTypeRefs(HCORENUM *phEnum, mdTypeRef rTypeRefs[],
+ ULONG cMax, ULONG* pcTypeRefs)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumTypeRefs(phEnum, rTypeRefs, cMax, pcTypeRefs);
+}
+
+HRESULT ProfilerMetadataEmitValidator::FindTypeDefByName(
+ LPCWSTR szTypeDef,
+ mdToken tkEnclosingClass,
+ mdTypeDef *ptd)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->FindTypeDefByName(szTypeDef, tkEnclosingClass, ptd);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetScopeProps(
+ LPWSTR szName,
+ ULONG cchName,
+ ULONG *pchName,
+ GUID *pmvid)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetScopeProps(szName, cchName, pchName, pmvid);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetModuleFromScope(
+ mdModule *pmd)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetModuleFromScope(pmd);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetTypeDefProps(
+ mdTypeDef td,
+ LPWSTR szTypeDef,
+ ULONG cchTypeDef,
+ ULONG *pchTypeDef,
+ DWORD *pdwTypeDefFlags,
+ mdToken *ptkExtends)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetTypeDefProps(td, szTypeDef, cchTypeDef, pchTypeDef, pdwTypeDefFlags, ptkExtends);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetInterfaceImplProps(
+ mdInterfaceImpl iiImpl,
+ mdTypeDef *pClass,
+ mdToken *ptkIface)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetInterfaceImplProps(iiImpl, pClass, ptkIface);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetTypeRefProps(
+ mdTypeRef tr,
+ mdToken *ptkResolutionScope,
+ LPWSTR szName,
+ ULONG cchName,
+ ULONG *pchName)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetTypeRefProps(tr, ptkResolutionScope, szName, cchName, pchName);
+}
+
+HRESULT ProfilerMetadataEmitValidator::ResolveTypeRef(mdTypeRef tr, REFIID riid, IUnknown **ppIScope, mdTypeDef *ptd)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->ResolveTypeRef(tr, riid, ppIScope, ptd);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumMembers(
+ HCORENUM *phEnum,
+ mdTypeDef cl,
+ mdToken rMembers[],
+ ULONG cMax,
+ ULONG *pcTokens)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumMembers(phEnum, cl, rMembers, cMax, pcTokens);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumMembersWithName(
+ HCORENUM *phEnum,
+ mdTypeDef cl,
+ LPCWSTR szName,
+ mdToken rMembers[],
+ ULONG cMax,
+ ULONG *pcTokens)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumMembersWithName(phEnum, cl, szName, rMembers, cMax, pcTokens);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumMethods(
+ HCORENUM *phEnum,
+ mdTypeDef cl,
+ mdMethodDef rMethods[],
+ ULONG cMax,
+ ULONG *pcTokens)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumMethods(phEnum, cl, rMethods, cMax, pcTokens);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumMethodsWithName(
+ HCORENUM *phEnum,
+ mdTypeDef cl,
+ LPCWSTR szName,
+ mdMethodDef rMethods[],
+ ULONG cMax,
+ ULONG *pcTokens)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumMethodsWithName(phEnum, cl, szName, rMethods, cMax, pcTokens);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumFields(
+ HCORENUM *phEnum,
+ mdTypeDef cl,
+ mdFieldDef rFields[],
+ ULONG cMax,
+ ULONG *pcTokens)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumFields(phEnum, cl, rFields, cMax, pcTokens);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumFieldsWithName(
+ HCORENUM *phEnum,
+ mdTypeDef cl,
+ LPCWSTR szName,
+ mdFieldDef rFields[],
+ ULONG cMax,
+ ULONG *pcTokens)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumFieldsWithName(phEnum, cl, szName, rFields, cMax, pcTokens);
+}
+
+
+HRESULT ProfilerMetadataEmitValidator::EnumParams(
+ HCORENUM *phEnum,
+ mdMethodDef mb,
+ mdParamDef rParams[],
+ ULONG cMax,
+ ULONG *pcTokens)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumParams(phEnum, mb, rParams, cMax, pcTokens);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumMemberRefs(
+ HCORENUM *phEnum,
+ mdToken tkParent,
+ mdMemberRef rMemberRefs[],
+ ULONG cMax,
+ ULONG *pcTokens)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumMemberRefs(phEnum, tkParent, rMemberRefs, cMax, pcTokens);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumMethodImpls(
+ HCORENUM *phEnum,
+ mdTypeDef td,
+ mdToken rMethodBody[],
+ mdToken rMethodDecl[],
+ ULONG cMax,
+ ULONG *pcTokens)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumMethodImpls(phEnum, td, rMethodBody, rMethodDecl, cMax, pcTokens);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumPermissionSets(
+ HCORENUM *phEnum,
+ mdToken tk,
+ DWORD dwActions,
+ mdPermission rPermission[],
+ ULONG cMax,
+ ULONG *pcTokens)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumPermissionSets(phEnum, tk, dwActions, rPermission, cMax, pcTokens);
+}
+
+HRESULT ProfilerMetadataEmitValidator::FindMember(
+ mdTypeDef td,
+ LPCWSTR szName,
+ PCCOR_SIGNATURE pvSigBlob,
+ ULONG cbSigBlob,
+ mdToken *pmb)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->FindMember(td, szName, pvSigBlob, cbSigBlob, pmb);
+}
+
+HRESULT ProfilerMetadataEmitValidator::FindMethod(
+ mdTypeDef td,
+ LPCWSTR szName,
+ PCCOR_SIGNATURE pvSigBlob,
+ ULONG cbSigBlob,
+ mdMethodDef *pmb)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->FindMethod(td, szName, pvSigBlob, cbSigBlob, pmb);
+}
+
+HRESULT ProfilerMetadataEmitValidator::FindField(
+ mdTypeDef td,
+ LPCWSTR szName,
+ PCCOR_SIGNATURE pvSigBlob,
+ ULONG cbSigBlob,
+ mdFieldDef *pmb)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->FindField(td, szName, pvSigBlob, cbSigBlob, pmb);
+}
+
+HRESULT ProfilerMetadataEmitValidator::FindMemberRef(
+ mdTypeRef td,
+ LPCWSTR szName,
+ PCCOR_SIGNATURE pvSigBlob,
+ ULONG cbSigBlob,
+ mdMemberRef *pmr)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->FindMemberRef(td, szName, pvSigBlob, cbSigBlob, pmr);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetMethodProps(
+ mdMethodDef mb,
+ mdTypeDef *pClass,
+ LPWSTR szMethod,
+ ULONG cchMethod,
+ ULONG *pchMethod,
+ DWORD *pdwAttr,
+ PCCOR_SIGNATURE *ppvSigBlob,
+ ULONG *pcbSigBlob,
+ ULONG *pulCodeRVA,
+ DWORD *pdwImplFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetMethodProps(mb, pClass, szMethod, cchMethod, pchMethod, pdwAttr, ppvSigBlob, pcbSigBlob, pulCodeRVA, pdwImplFlags);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetMemberRefProps(
+ mdMemberRef mr,
+ mdToken *ptk,
+ LPWSTR szMember,
+ ULONG cchMember,
+ ULONG *pchMember,
+ PCCOR_SIGNATURE *ppvSigBlob,
+ ULONG *pbSig)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetMemberRefProps(mr, ptk, szMember, cchMember, pchMember, ppvSigBlob, pbSig);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumProperties(
+ HCORENUM *phEnum,
+ mdTypeDef td,
+ mdProperty rProperties[],
+ ULONG cMax,
+ ULONG *pcProperties)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumProperties(phEnum, td, rProperties, cMax, pcProperties);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumEvents(
+ HCORENUM *phEnum,
+ mdTypeDef td,
+ mdEvent rEvents[],
+ ULONG cMax,
+ ULONG *pcEvents)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumEvents(phEnum, td, rEvents, cMax, pcEvents);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetEventProps(
+ mdEvent ev,
+ mdTypeDef *pClass,
+ LPCWSTR szEvent,
+ ULONG cchEvent,
+ ULONG *pchEvent,
+ DWORD *pdwEventFlags,
+ mdToken *ptkEventType,
+ mdMethodDef *pmdAddOn,
+ mdMethodDef *pmdRemoveOn,
+ mdMethodDef *pmdFire,
+ mdMethodDef rmdOtherMethod[],
+ ULONG cMax,
+ ULONG *pcOtherMethod)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetEventProps(ev, pClass, szEvent, cchEvent, pchEvent, pdwEventFlags, ptkEventType, pmdAddOn, pmdRemoveOn, pmdFire, rmdOtherMethod, cMax, pcOtherMethod);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumMethodSemantics(
+ HCORENUM *phEnum,
+ mdMethodDef mb,
+ mdToken rEventProp[],
+ ULONG cMax,
+ ULONG *pcEventProp)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumMethodSemantics(phEnum, mb, rEventProp, cMax, pcEventProp);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetMethodSemantics(
+ mdMethodDef mb,
+ mdToken tkEventProp,
+ DWORD *pdwSemanticsFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetMethodSemantics(mb, tkEventProp, pdwSemanticsFlags);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetClassLayout(
+ mdTypeDef td,
+ DWORD *pdwPackSize,
+ COR_FIELD_OFFSET rFieldOffset[],
+ ULONG cMax,
+ ULONG *pcFieldOffset,
+ ULONG *pulClassSize)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetClassLayout(td, pdwPackSize, rFieldOffset, cMax, pcFieldOffset, pulClassSize);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetFieldMarshal(
+ mdToken tk,
+ PCCOR_SIGNATURE *ppvNativeType,
+ ULONG *pcbNativeType)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetFieldMarshal(tk, ppvNativeType, pcbNativeType);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetRVA(
+ mdToken tk,
+ ULONG *pulCodeRVA,
+ DWORD *pdwImplFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetRVA(tk, pulCodeRVA, pdwImplFlags);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetPermissionSetProps(
+ mdPermission pm,
+ DWORD *pdwAction,
+ void const **ppvPermission,
+ ULONG *pcbPermission)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetPermissionSetProps(pm, pdwAction, ppvPermission, pcbPermission);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetSigFromToken(
+ mdSignature mdSig,
+ PCCOR_SIGNATURE *ppvSig,
+ ULONG *pcbSig)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetSigFromToken(mdSig, ppvSig, pcbSig);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetModuleRefProps(
+ mdModuleRef mur,
+ LPWSTR szName,
+ ULONG cchName,
+ ULONG *pchName)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetModuleRefProps(mur, szName, cchName, pchName);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumModuleRefs(
+ HCORENUM *phEnum,
+ mdModuleRef rModuleRefs[],
+ ULONG cmax,
+ ULONG *pcModuleRefs)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumModuleRefs(phEnum, rModuleRefs, cmax, pcModuleRefs);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetTypeSpecFromToken(
+ mdTypeSpec typespec,
+ PCCOR_SIGNATURE *ppvSig,
+ ULONG *pcbSig)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetTypeSpecFromToken(typespec, ppvSig, pcbSig);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetNameFromToken(
+ mdToken tk,
+ MDUTF8CSTR *pszUtf8NamePtr)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetNameFromToken(tk, pszUtf8NamePtr);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumUnresolvedMethods(
+ HCORENUM *phEnum,
+ mdToken rMethods[],
+ ULONG cMax,
+ ULONG *pcTokens)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumUnresolvedMethods(phEnum, rMethods, cMax, pcTokens);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetUserString(
+ mdString stk,
+ LPWSTR szString,
+ ULONG cchString,
+ ULONG *pchString)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetUserString(stk, szString, cchString, pchString);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetPinvokeMap(
+ mdToken tk,
+ DWORD *pdwMappingFlags,
+ LPWSTR szImportName,
+ ULONG cchImportName,
+ ULONG *pchImportName,
+ mdModuleRef *pmrImportDLL)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetPinvokeMap(tk, pdwMappingFlags, szImportName, cchImportName, pchImportName, pmrImportDLL);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumSignatures(
+ HCORENUM *phEnum,
+ mdSignature rSignatures[],
+ ULONG cmax,
+ ULONG *pcSignatures)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumSignatures(phEnum, rSignatures, cmax, pcSignatures);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumTypeSpecs(
+ HCORENUM *phEnum,
+ mdTypeSpec rTypeSpecs[],
+ ULONG cmax,
+ ULONG *pcTypeSpecs)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumTypeSpecs(phEnum, rTypeSpecs, cmax, pcTypeSpecs);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumUserStrings(
+ HCORENUM *phEnum,
+ mdString rStrings[],
+ ULONG cmax,
+ ULONG *pcStrings)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumUserStrings(phEnum, rStrings, cmax, pcStrings);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetParamForMethodIndex(
+ mdMethodDef md,
+ ULONG ulParamSeq,
+ mdParamDef *ppd)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetParamForMethodIndex(md, ulParamSeq, ppd);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumCustomAttributes(
+ HCORENUM *phEnum,
+ mdToken tk,
+ mdToken tkType,
+ mdCustomAttribute rCustomAttributes[],
+ ULONG cMax,
+ ULONG *pcCustomAttributes)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumCustomAttributes(phEnum, tk, tkType, rCustomAttributes, cMax, pcCustomAttributes);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetCustomAttributeProps(
+ mdCustomAttribute cv,
+ mdToken *ptkObj,
+ mdToken *ptkType,
+ void const **ppBlob,
+ ULONG *pcbSize)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetCustomAttributeProps(cv, ptkObj, ptkType, ppBlob, pcbSize);
+}
+
+HRESULT ProfilerMetadataEmitValidator::FindTypeRef(
+ mdToken tkResolutionScope,
+ LPCWSTR szName,
+ mdTypeRef *ptr)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->FindTypeRef(tkResolutionScope, szName, ptr);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetMemberProps(
+ mdToken mb,
+ mdTypeDef *pClass,
+ LPWSTR szMember,
+ ULONG cchMember,
+ ULONG *pchMember,
+ DWORD *pdwAttr,
+ PCCOR_SIGNATURE *ppvSigBlob,
+ ULONG *pcbSigBlob,
+ ULONG *pulCodeRVA,
+ DWORD *pdwImplFlags,
+ DWORD *pdwCPlusTypeFlag,
+ UVCP_CONSTANT *ppValue,
+ ULONG *pcchValue)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetMemberProps(mb, pClass, szMember, cchMember, pchMember, pdwAttr, ppvSigBlob, pcbSigBlob, pulCodeRVA, pdwImplFlags, pdwCPlusTypeFlag, ppValue, pcchValue);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetFieldProps(
+ mdFieldDef mb,
+ mdTypeDef *pClass,
+ LPWSTR szField,
+ ULONG cchField,
+ ULONG *pchField,
+ DWORD *pdwAttr,
+ PCCOR_SIGNATURE *ppvSigBlob,
+ ULONG *pcbSigBlob,
+ DWORD *pdwCPlusTypeFlag,
+ UVCP_CONSTANT *ppValue,
+ ULONG *pcchValue)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetFieldProps(mb, pClass, szField, cchField, pchField, pdwAttr, ppvSigBlob, pcbSigBlob, pdwCPlusTypeFlag, ppValue, pcchValue);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetPropertyProps(
+ mdProperty prop,
+ mdTypeDef *pClass,
+ LPCWSTR szProperty,
+ ULONG cchProperty,
+ ULONG *pchProperty,
+ DWORD *pdwPropFlags,
+ PCCOR_SIGNATURE *ppvSig,
+ ULONG *pbSig,
+ DWORD *pdwCPlusTypeFlag,
+ UVCP_CONSTANT *ppDefaultValue,
+ ULONG *pcchDefaultValue,
+ mdMethodDef *pmdSetter,
+ mdMethodDef *pmdGetter,
+ mdMethodDef rmdOtherMethod[],
+ ULONG cMax,
+ ULONG *pcOtherMethod)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetPropertyProps(prop, pClass, szProperty, cchProperty, pchProperty, pdwPropFlags, ppvSig, pbSig, pdwCPlusTypeFlag, ppDefaultValue, pcchDefaultValue, pmdSetter, pmdGetter,
+ rmdOtherMethod, cMax, pcOtherMethod);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetParamProps(
+ mdParamDef tk,
+ mdMethodDef *pmd,
+ ULONG *pulSequence,
+ LPWSTR szName,
+ ULONG cchName,
+ ULONG *pchName,
+ DWORD *pdwAttr,
+ DWORD *pdwCPlusTypeFlag,
+ UVCP_CONSTANT *ppValue,
+ ULONG *pcchValue)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetParamProps(tk, pmd, pulSequence, szName, cchName, pchName, pdwAttr, pdwCPlusTypeFlag, ppValue, pcchValue);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetCustomAttributeByName(
+ mdToken tkObj,
+ LPCWSTR szName,
+ const void **ppData,
+ ULONG *pcbData)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetCustomAttributeByName(tkObj, szName, ppData, pcbData);
+}
+
+BOOL ProfilerMetadataEmitValidator::IsValidToken(
+ mdToken tk)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->IsValidToken(tk);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetNestedClassProps(
+ mdTypeDef tdNestedClass,
+ mdTypeDef *ptdEnclosingClass)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetNestedClassProps(tdNestedClass, ptdEnclosingClass);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetNativeCallConvFromSig(
+ void const *pvSig,
+ ULONG cbSig,
+ ULONG *pCallConv)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetNativeCallConvFromSig(pvSig, cbSig, pCallConv);
+}
+
+HRESULT ProfilerMetadataEmitValidator::IsGlobal(
+ mdToken pd,
+ int *pbGlobal)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->IsGlobal(pd, pbGlobal);
+}
+
+//IMetaDataImport2
+HRESULT ProfilerMetadataEmitValidator::EnumGenericParams(
+ HCORENUM *phEnum,
+ mdToken tk,
+ mdGenericParam rGenericParams[],
+ ULONG cMax,
+ ULONG *pcGenericParams)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumGenericParams(phEnum, tk, rGenericParams, cMax, pcGenericParams);
+}
+
+
+HRESULT ProfilerMetadataEmitValidator::GetGenericParamProps(
+ mdGenericParam gp,
+ ULONG *pulParamSeq,
+ DWORD *pdwParamFlags,
+ mdToken *ptOwner,
+ DWORD *reserved,
+ LPWSTR wzname,
+ ULONG cchName,
+ ULONG *pchName)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetGenericParamProps(gp, pulParamSeq, pdwParamFlags, ptOwner, reserved, wzname, cchName, pchName);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetMethodSpecProps(
+ mdMethodSpec mi,
+ mdToken *tkParent,
+ PCCOR_SIGNATURE *ppvSigBlob,
+ ULONG *pcbSigBlob)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetMethodSpecProps(mi, tkParent, ppvSigBlob, pcbSigBlob);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumGenericParamConstraints(
+ HCORENUM *phEnum,
+ mdGenericParam tk,
+ mdGenericParamConstraint rGenericParamConstraints[],
+ ULONG cMax,
+ ULONG *pcGenericParamConstraints)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumGenericParamConstraints(phEnum, tk, rGenericParamConstraints, cMax, pcGenericParamConstraints);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetGenericParamConstraintProps(
+ mdGenericParamConstraint gpc,
+ mdGenericParam *ptGenericParam,
+ mdToken *ptkConstraintType)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetGenericParamConstraintProps(gpc, ptGenericParam, ptkConstraintType);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetPEKind(
+ DWORD* pdwPEKind,
+ DWORD* pdwMachine)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetPEKind(pdwPEKind, pdwMachine);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetVersionString(
+ LPWSTR pwzBuf,
+ DWORD ccBufSize,
+ DWORD *pccBufSize)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->GetVersionString(pwzBuf, ccBufSize, pccBufSize);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumMethodSpecs(
+ HCORENUM *phEnum,
+ mdToken tk,
+ mdMethodSpec rMethodSpecs[],
+ ULONG cMax,
+ ULONG *pcMethodSpecs)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerImport->EnumMethodSpecs(phEnum, tk, rMethodSpecs, cMax, pcMethodSpecs);
+}
+
+
+// IMetaDataAssemblyImport
+HRESULT ProfilerMetadataEmitValidator::GetAssemblyProps(
+ mdAssembly mda,
+ const void **ppbPublicKey,
+ ULONG *pcbPublicKey,
+ ULONG *pulHashAlgId,
+ LPWSTR szName,
+ ULONG cchName,
+ ULONG *pchName,
+ ASSEMBLYMETADATA *pMetaData,
+ DWORD *pdwAssemblyFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerAssemblyImport->GetAssemblyProps(mda, ppbPublicKey, pcbPublicKey, pulHashAlgId, szName, cchName, pchName, pMetaData, pdwAssemblyFlags);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetAssemblyRefProps(
+ mdAssemblyRef mdar,
+ const void **ppbPublicKeyOrToken,
+ ULONG *pcbPublicKeyOrToken,
+ LPWSTR szName,
+ ULONG cchName,
+ ULONG *pchName,
+ ASSEMBLYMETADATA *pMetaData,
+ const void **ppbHashValue,
+ ULONG *pcbHashValue,
+ DWORD *pdwAssemblyRefFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerAssemblyImport->GetAssemblyRefProps(mdar, ppbPublicKeyOrToken, pcbPublicKeyOrToken, szName, cchName, pchName, pMetaData, ppbHashValue, pcbHashValue, pdwAssemblyRefFlags);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetFileProps(
+ mdFile mdf,
+ LPWSTR szName,
+ ULONG cchName,
+ ULONG *pchName,
+ const void **ppbHashValue,
+ ULONG *pcbHashValue,
+ DWORD *pdwFileFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerAssemblyImport->GetFileProps(mdf, szName, cchName, pchName, ppbHashValue, pcbHashValue, pdwFileFlags);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetExportedTypeProps(
+ mdExportedType mdct,
+ LPWSTR szName,
+ ULONG cchName,
+ ULONG *pchName,
+ mdToken *ptkImplementation,
+ mdTypeDef *ptkTypeDef,
+ DWORD *pdwExportedTypeFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerAssemblyImport->GetExportedTypeProps(mdct, szName, cchName, pchName, ptkImplementation, ptkTypeDef, pdwExportedTypeFlags);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetManifestResourceProps(
+ mdManifestResource mdmr,
+ LPWSTR szName,
+ ULONG cchName,
+ ULONG *pchName,
+ mdToken *ptkImplementation,
+ DWORD *pdwOffset,
+ DWORD *pdwResourceFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerAssemblyImport->GetManifestResourceProps(mdmr, szName, cchName, pchName, ptkImplementation, pdwOffset, pdwResourceFlags);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumAssemblyRefs(
+ HCORENUM *phEnum,
+ mdAssemblyRef rAssemblyRefs[],
+ ULONG cMax,
+ ULONG *pcTokens)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerAssemblyImport->EnumAssemblyRefs(phEnum, rAssemblyRefs, cMax, pcTokens);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumFiles(
+ HCORENUM *phEnum,
+ mdFile rFiles[],
+ ULONG cMax,
+ ULONG *pcTokens)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerAssemblyImport->EnumFiles(phEnum, rFiles, cMax, pcTokens);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumExportedTypes(
+ HCORENUM *phEnum,
+ mdExportedType rExportedTypes[],
+ ULONG cMax,
+ ULONG *pcTokens)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerAssemblyImport->EnumExportedTypes(phEnum, rExportedTypes, cMax, pcTokens);
+}
+
+HRESULT ProfilerMetadataEmitValidator::EnumManifestResources(
+ HCORENUM *phEnum,
+ mdManifestResource rManifestResources[],
+ ULONG cMax,
+ ULONG *pcTokens)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerAssemblyImport->EnumManifestResources(phEnum, rManifestResources, cMax, pcTokens);
+}
+
+HRESULT ProfilerMetadataEmitValidator::GetAssemblyFromScope(
+ mdAssembly *ptkAssembly)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerAssemblyImport->GetAssemblyFromScope(ptkAssembly);
+}
+
+HRESULT ProfilerMetadataEmitValidator::FindExportedTypeByName(
+ LPCWSTR szName,
+ mdToken mdtExportedType,
+ mdExportedType *ptkExportedType)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerAssemblyImport->FindExportedTypeByName(szName, mdtExportedType, ptkExportedType);
+}
+
+HRESULT ProfilerMetadataEmitValidator::FindManifestResourceByName(
+ LPCWSTR szName,
+ mdManifestResource *ptkManifestResource)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerAssemblyImport->FindManifestResourceByName(szName, ptkManifestResource);
+}
+
+HRESULT ProfilerMetadataEmitValidator::FindAssembliesByName(
+ LPCWSTR szAppBase,
+ LPCWSTR szPrivateBin,
+ LPCWSTR szAssemblyName,
+ IUnknown *ppIUnk[],
+ ULONG cMax,
+ ULONG *pcAssemblies)
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pInnerAssemblyImport->FindAssembliesByName(szAppBase, szPrivateBin, szAssemblyName, ppIUnk, cMax, pcAssemblies);
+}
diff --git a/src/vm/profilermetadataemitvalidator.h b/src/vm/profilermetadataemitvalidator.h
new file mode 100644
index 0000000000..3c0306c24e
--- /dev/null
+++ b/src/vm/profilermetadataemitvalidator.h
@@ -0,0 +1,1027 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#ifndef _PROFILER_METADATA_EMIT_VALIDATOR_H_
+#define _PROFILER_METADATA_EMIT_VALIDATOR_H_
+
+// This is a wrapper over IMetaDataEmit interfaces to prevent profilers from making changes the runtime can't support
+// Annoyingly, it is legal to QI IMetaDataImport from the emitter and vice-versa, so the wrapper also proxies those interfaces
+
+class ProfilerMetadataEmitValidator : public IMetaDataEmit2, public IMetaDataAssemblyEmit, public IMetaDataImport2, public IMetaDataAssemblyImport
+{
+public:
+ ProfilerMetadataEmitValidator(IMetaDataEmit* pInnerEmit);
+
+ //IUnknown
+ virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid, void** ppInterface);
+ virtual ULONG STDMETHODCALLTYPE AddRef();
+ virtual ULONG STDMETHODCALLTYPE Release();
+
+ //IMetaDataEmit
+ virtual HRESULT STDMETHODCALLTYPE SetModuleProps(
+ LPCWSTR szName);
+
+ virtual HRESULT STDMETHODCALLTYPE Save(
+ LPCWSTR szFile,
+ DWORD dwSaveFlags);
+
+ virtual HRESULT STDMETHODCALLTYPE SaveToStream(
+ IStream *pIStream,
+ DWORD dwSaveFlags);
+
+ virtual HRESULT STDMETHODCALLTYPE GetSaveSize(
+ CorSaveSize fSave,
+ DWORD *pdwSaveSize);
+
+ virtual HRESULT STDMETHODCALLTYPE DefineTypeDef(
+ LPCWSTR szTypeDef,
+ DWORD dwTypeDefFlags,
+ mdToken tkExtends,
+ mdToken rtkImplements[],
+ mdTypeDef *ptd);
+
+ virtual HRESULT STDMETHODCALLTYPE DefineNestedType(
+ LPCWSTR szTypeDef,
+ DWORD dwTypeDefFlags,
+ mdToken tkExtends,
+ mdToken rtkImplements[],
+ mdTypeDef tdEncloser,
+ mdTypeDef *ptd);
+
+ virtual HRESULT STDMETHODCALLTYPE SetHandler(
+ IUnknown *pUnk);
+
+ virtual HRESULT STDMETHODCALLTYPE DefineMethod(
+ mdTypeDef td,
+ LPCWSTR szName,
+ DWORD dwMethodFlags,
+ PCCOR_SIGNATURE pvSigBlob,
+ ULONG cbSigBlob,
+ ULONG ulCodeRVA,
+ DWORD dwImplFlags,
+ mdMethodDef *pmd);
+
+ virtual HRESULT STDMETHODCALLTYPE DefineMethodImpl(
+ mdTypeDef td,
+ mdToken tkBody,
+ mdToken tkDecl);
+
+ virtual HRESULT STDMETHODCALLTYPE DefineTypeRefByName(
+ mdToken tkResolutionScope,
+ LPCWSTR szName,
+ mdTypeRef *ptr);
+
+ virtual HRESULT STDMETHODCALLTYPE DefineImportType(
+ IMetaDataAssemblyImport *pAssemImport,
+ const void *pbHashValue,
+ ULONG cbHashValue,
+ IMetaDataImport *pImport,
+ mdTypeDef tdImport,
+ IMetaDataAssemblyEmit *pAssemEmit,
+ mdTypeRef *ptr);
+
+ virtual HRESULT STDMETHODCALLTYPE DefineMemberRef(
+ mdToken tkImport,
+ LPCWSTR szName,
+ PCCOR_SIGNATURE pvSigBlob,
+ ULONG cbSigBlob,
+ mdMemberRef *pmr);
+
+ virtual HRESULT STDMETHODCALLTYPE DefineImportMember(
+ IMetaDataAssemblyImport *pAssemImport,
+ const void *pbHashValue,
+ ULONG cbHashValue,
+ IMetaDataImport *pImport,
+ mdToken mbMember,
+ IMetaDataAssemblyEmit *pAssemEmit,
+ mdToken tkParent,
+ mdMemberRef *pmr);
+
+ virtual HRESULT STDMETHODCALLTYPE DefineEvent(
+ mdTypeDef td,
+ LPCWSTR szEvent,
+ DWORD dwEventFlags,
+ mdToken tkEventType,
+ mdMethodDef mdAddOn,
+ mdMethodDef mdRemoveOn,
+ mdMethodDef mdFire,
+ mdMethodDef rmdOtherMethods[],
+ mdEvent *pmdEvent);
+
+ virtual HRESULT STDMETHODCALLTYPE SetClassLayout(
+ mdTypeDef td,
+ DWORD dwPackSize,
+ COR_FIELD_OFFSET rFieldOffsets[],
+ ULONG ulClassSize);
+
+ virtual HRESULT STDMETHODCALLTYPE DeleteClassLayout(
+ mdTypeDef td);
+
+ virtual HRESULT STDMETHODCALLTYPE SetFieldMarshal(
+ mdToken tk,
+ PCCOR_SIGNATURE pvNativeType,
+ ULONG cbNativeType);
+
+ virtual HRESULT STDMETHODCALLTYPE DeleteFieldMarshal(
+ mdToken tk);
+
+ virtual HRESULT STDMETHODCALLTYPE DefinePermissionSet(
+ mdToken tk,
+ DWORD dwAction,
+ void const *pvPermission,
+ ULONG cbPermission,
+ mdPermission *ppm);
+
+ virtual HRESULT STDMETHODCALLTYPE SetRVA(
+ mdMethodDef md,
+ ULONG ulRVA);
+
+ virtual HRESULT STDMETHODCALLTYPE GetTokenFromSig(
+ PCCOR_SIGNATURE pvSig,
+ ULONG cbSig,
+ mdSignature *pmsig);
+
+ virtual HRESULT STDMETHODCALLTYPE DefineModuleRef(
+ LPCWSTR szName,
+ mdModuleRef *pmur);
+
+ virtual HRESULT STDMETHODCALLTYPE SetParent(
+ mdMemberRef mr,
+ mdToken tk);
+
+ virtual HRESULT STDMETHODCALLTYPE GetTokenFromTypeSpec(
+ PCCOR_SIGNATURE pvSig,
+ ULONG cbSig,
+ mdTypeSpec *ptypespec);
+
+ virtual HRESULT STDMETHODCALLTYPE SaveToMemory(
+ void *pbData,
+ ULONG cbData);
+
+ virtual HRESULT STDMETHODCALLTYPE DefineUserString(
+ LPCWSTR szString,
+ ULONG cchString,
+ mdString *pstk);
+
+ virtual HRESULT STDMETHODCALLTYPE DeleteToken(
+ mdToken tkObj);
+
+ virtual HRESULT STDMETHODCALLTYPE SetMethodProps(
+ mdMethodDef md,
+ DWORD dwMethodFlags,
+ ULONG ulCodeRVA,
+ DWORD dwImplFlags);
+
+ virtual HRESULT STDMETHODCALLTYPE SetTypeDefProps(
+ mdTypeDef td,
+ DWORD dwTypeDefFlags,
+ mdToken tkExtends,
+ mdToken rtkImplements[]);
+
+ virtual HRESULT STDMETHODCALLTYPE SetEventProps(
+ mdEvent ev,
+ DWORD dwEventFlags,
+ mdToken tkEventType,
+ mdMethodDef mdAddOn,
+ mdMethodDef mdRemoveOn,
+ mdMethodDef mdFire,
+ mdMethodDef rmdOtherMethods[]);
+
+ virtual HRESULT STDMETHODCALLTYPE SetPermissionSetProps(
+ mdToken tk,
+ DWORD dwAction,
+ void const *pvPermission,
+ ULONG cbPermission,
+ mdPermission *ppm);
+
+ virtual HRESULT STDMETHODCALLTYPE DefinePinvokeMap(
+ mdToken tk,
+ DWORD dwMappingFlags,
+ LPCWSTR szImportName,
+ mdModuleRef mrImportDLL);
+
+ virtual HRESULT STDMETHODCALLTYPE SetPinvokeMap(
+ mdToken tk,
+ DWORD dwMappingFlags,
+ LPCWSTR szImportName,
+ mdModuleRef mrImportDLL);
+
+ virtual HRESULT STDMETHODCALLTYPE DeletePinvokeMap(
+ mdToken tk);
+
+ // New CustomAttribute functions.
+ virtual HRESULT STDMETHODCALLTYPE DefineCustomAttribute(
+ mdToken tkOwner,
+ mdToken tkCtor,
+ void const *pCustomAttribute,
+ ULONG cbCustomAttribute,
+ mdCustomAttribute *pcv);
+
+ virtual HRESULT STDMETHODCALLTYPE SetCustomAttributeValue(
+ mdCustomAttribute pcv,
+ void const *pCustomAttribute,
+ ULONG cbCustomAttribute);
+
+ virtual HRESULT STDMETHODCALLTYPE DefineField(
+ mdTypeDef td,
+ LPCWSTR szName,
+ DWORD dwFieldFlags,
+ PCCOR_SIGNATURE pvSigBlob,
+ ULONG cbSigBlob,
+ DWORD dwCPlusTypeFlag,
+ void const *pValue,
+ ULONG cchValue,
+ mdFieldDef *pmd);
+
+ virtual HRESULT STDMETHODCALLTYPE DefineProperty(
+ mdTypeDef td,
+ LPCWSTR szProperty,
+ DWORD dwPropFlags,
+ PCCOR_SIGNATURE pvSig,
+ ULONG cbSig,
+ DWORD dwCPlusTypeFlag,
+ void const *pValue,
+ ULONG cchValue,
+ mdMethodDef mdSetter,
+ mdMethodDef mdGetter,
+ mdMethodDef rmdOtherMethods[],
+ mdProperty *pmdProp);
+
+ virtual HRESULT STDMETHODCALLTYPE DefineParam(
+ mdMethodDef md,
+ ULONG ulParamSeq,
+ LPCWSTR szName,
+ DWORD dwParamFlags,
+ DWORD dwCPlusTypeFlag,
+ void const *pValue,
+ ULONG cchValue,
+ mdParamDef *ppd);
+
+ virtual HRESULT STDMETHODCALLTYPE SetFieldProps(
+ mdFieldDef fd,
+ DWORD dwFieldFlags,
+ DWORD dwCPlusTypeFlag,
+ void const *pValue,
+ ULONG cchValue);
+
+ virtual HRESULT STDMETHODCALLTYPE SetPropertyProps(
+ mdProperty pr,
+ DWORD dwPropFlags,
+ DWORD dwCPlusTypeFlag,
+ void const *pValue,
+ ULONG cchValue,
+ mdMethodDef mdSetter,
+ mdMethodDef mdGetter,
+ mdMethodDef rmdOtherMethods[]);
+
+ virtual HRESULT STDMETHODCALLTYPE SetParamProps(
+ mdParamDef pd,
+ LPCWSTR szName,
+ DWORD dwParamFlags,
+ DWORD dwCPlusTypeFlag,
+ void const *pValue,
+ ULONG cchValue);
+
+ virtual HRESULT STDMETHODCALLTYPE DefineSecurityAttributeSet(
+ mdToken tkObj,
+ COR_SECATTR rSecAttrs[],
+ ULONG cSecAttrs,
+ ULONG *pulErrorAttr);
+
+ virtual HRESULT STDMETHODCALLTYPE ApplyEditAndContinue(
+ IUnknown *pImport);
+
+ virtual HRESULT STDMETHODCALLTYPE TranslateSigWithScope(
+ IMetaDataAssemblyImport *pAssemImport,
+ const void *pbHashValue,
+ ULONG cbHashValue,
+ IMetaDataImport *import,
+ PCCOR_SIGNATURE pbSigBlob,
+ ULONG cbSigBlob,
+ IMetaDataAssemblyEmit *pAssemEmit,
+ IMetaDataEmit *emit,
+ PCOR_SIGNATURE pvTranslatedSig,
+ ULONG cbTranslatedSigMax,
+ ULONG *pcbTranslatedSig);
+
+ virtual HRESULT STDMETHODCALLTYPE SetMethodImplFlags(
+ mdMethodDef md,
+ DWORD dwImplFlags);
+
+ virtual HRESULT STDMETHODCALLTYPE SetFieldRVA(
+ mdFieldDef fd,
+ ULONG ulRVA);
+
+ virtual HRESULT STDMETHODCALLTYPE Merge(
+ IMetaDataImport *pImport,
+ IMapToken *pHostMapToken,
+ IUnknown *pHandler);
+
+ virtual HRESULT STDMETHODCALLTYPE MergeEnd();
+
+ // IMetaDataEmit2
+ virtual HRESULT STDMETHODCALLTYPE DefineMethodSpec(
+ mdToken tkParent,
+ PCCOR_SIGNATURE pvSigBlob,
+ ULONG cbSigBlob,
+ mdMethodSpec *pmi);
+
+ virtual HRESULT STDMETHODCALLTYPE GetDeltaSaveSize(
+ CorSaveSize fSave,
+ DWORD *pdwSaveSize);
+
+ virtual HRESULT STDMETHODCALLTYPE SaveDelta(
+ LPCWSTR szFile,
+ DWORD dwSaveFlags);
+
+ virtual HRESULT STDMETHODCALLTYPE SaveDeltaToStream(
+ IStream *pIStream,
+ DWORD dwSaveFlags);
+
+ virtual HRESULT STDMETHODCALLTYPE SaveDeltaToMemory(
+ void *pbData,
+ ULONG cbData);
+
+ virtual HRESULT STDMETHODCALLTYPE DefineGenericParam(
+ mdToken tk,
+ ULONG ulParamSeq,
+ DWORD dwParamFlags,
+ LPCWSTR szname,
+ DWORD reserved,
+ mdToken rtkConstraints[],
+ mdGenericParam *pgp);
+
+ virtual HRESULT STDMETHODCALLTYPE SetGenericParamProps(
+ mdGenericParam gp,
+ DWORD dwParamFlags,
+ LPCWSTR szName,
+ DWORD reserved,
+ mdToken rtkConstraints[]);
+
+ virtual HRESULT STDMETHODCALLTYPE ResetENCLog();
+
+ //IMetaDataAssemblyEmit
+ virtual HRESULT STDMETHODCALLTYPE DefineAssembly(
+ const void *pbPublicKey,
+ ULONG cbPublicKey,
+ ULONG ulHashAlgId,
+ LPCWSTR szName,
+ const ASSEMBLYMETADATA *pMetaData,
+ DWORD dwAssemblyFlags,
+ mdAssembly *pma);
+
+ virtual HRESULT STDMETHODCALLTYPE DefineAssemblyRef(
+ const void *pbPublicKeyOrToken,
+ ULONG cbPublicKeyOrToken,
+ LPCWSTR szName,
+ const ASSEMBLYMETADATA *pMetaData,
+ const void *pbHashValue,
+ ULONG cbHashValue,
+ DWORD dwAssemblyRefFlags,
+ mdAssemblyRef *pmdar);
+
+ virtual HRESULT STDMETHODCALLTYPE DefineFile(
+ LPCWSTR szName,
+ const void *pbHashValue,
+ ULONG cbHashValue,
+ DWORD dwFileFlags,
+ mdFile *pmdf);
+
+ virtual HRESULT STDMETHODCALLTYPE DefineExportedType(
+ LPCWSTR szName,
+ mdToken tkImplementation,
+ mdTypeDef tkTypeDef,
+ DWORD dwExportedTypeFlags,
+ mdExportedType *pmdct);
+
+ virtual HRESULT STDMETHODCALLTYPE DefineManifestResource(
+ LPCWSTR szName,
+ mdToken tkImplementation,
+ DWORD dwOffset,
+ DWORD dwResourceFlags,
+ mdManifestResource *pmdmr);
+
+ virtual HRESULT STDMETHODCALLTYPE SetAssemblyProps(
+ mdAssembly pma,
+ const void *pbPublicKey,
+ ULONG cbPublicKey,
+ ULONG ulHashAlgId,
+ LPCWSTR szName,
+ const ASSEMBLYMETADATA *pMetaData,
+ DWORD dwAssemblyFlags);
+
+ virtual HRESULT STDMETHODCALLTYPE SetAssemblyRefProps(
+ mdAssemblyRef ar,
+ const void *pbPublicKeyOrToken,
+ ULONG cbPublicKeyOrToken,
+ LPCWSTR szName,
+ const ASSEMBLYMETADATA *pMetaData,
+ const void *pbHashValue,
+ ULONG cbHashValue,
+ DWORD dwAssemblyRefFlags);
+
+ virtual HRESULT STDMETHODCALLTYPE SetFileProps(
+ mdFile file,
+ const void *pbHashValue,
+ ULONG cbHashValue,
+ DWORD dwFileFlags);
+
+ virtual HRESULT STDMETHODCALLTYPE SetExportedTypeProps(
+ mdExportedType ct,
+ mdToken tkImplementation,
+ mdTypeDef tkTypeDef,
+ DWORD dwExportedTypeFlags);
+
+ virtual HRESULT STDMETHODCALLTYPE SetManifestResourceProps(
+ mdManifestResource mr,
+ mdToken tkImplementation,
+ DWORD dwOffset,
+ DWORD dwResourceFlags);
+
+ //IMetaDataImport
+ virtual void STDMETHODCALLTYPE CloseEnum(HCORENUM hEnum);
+ virtual HRESULT STDMETHODCALLTYPE CountEnum(HCORENUM hEnum, ULONG *pulCount);
+ virtual HRESULT STDMETHODCALLTYPE ResetEnum(HCORENUM hEnum, ULONG ulPos);
+ virtual HRESULT STDMETHODCALLTYPE EnumTypeDefs(HCORENUM *phEnum, mdTypeDef rTypeDefs[],
+ ULONG cMax, ULONG *pcTypeDefs);
+ virtual HRESULT STDMETHODCALLTYPE EnumInterfaceImpls(HCORENUM *phEnum, mdTypeDef td,
+ mdInterfaceImpl rImpls[], ULONG cMax,
+ ULONG* pcImpls);
+ virtual HRESULT STDMETHODCALLTYPE EnumTypeRefs(HCORENUM *phEnum, mdTypeRef rTypeRefs[],
+ ULONG cMax, ULONG* pcTypeRefs);
+
+ virtual HRESULT STDMETHODCALLTYPE FindTypeDefByName( // S_OK or error.
+ LPCWSTR szTypeDef, // [IN] Name of the Type.
+ mdToken tkEnclosingClass, // [IN] TypeDef/TypeRef for Enclosing class.
+ mdTypeDef *ptd); // [OUT] Put the TypeDef token here.
+
+ virtual HRESULT STDMETHODCALLTYPE GetScopeProps( // S_OK or error.
+ LPWSTR szName, // [OUT] Put the name here.
+ ULONG cchName, // [IN] Size of name buffer in wide chars.
+ ULONG *pchName, // [OUT] Put size of name (wide chars) here.
+ GUID *pmvid); // [OUT, OPTIONAL] Put MVID here.
+
+ virtual HRESULT STDMETHODCALLTYPE GetModuleFromScope( // S_OK.
+ mdModule *pmd); // [OUT] Put mdModule token here.
+
+ virtual HRESULT STDMETHODCALLTYPE GetTypeDefProps( // S_OK or error.
+ mdTypeDef td, // [IN] TypeDef token for inquiry.
+ LPWSTR szTypeDef, // [OUT] Put name here.
+ ULONG cchTypeDef, // [IN] size of name buffer in wide chars.
+ ULONG *pchTypeDef, // [OUT] put size of name (wide chars) here.
+ DWORD *pdwTypeDefFlags, // [OUT] Put flags here.
+ mdToken *ptkExtends); // [OUT] Put base class TypeDef/TypeRef here.
+
+ virtual HRESULT STDMETHODCALLTYPE GetInterfaceImplProps( // S_OK or error.
+ mdInterfaceImpl iiImpl, // [IN] InterfaceImpl token.
+ mdTypeDef *pClass, // [OUT] Put implementing class token here.
+ mdToken *ptkIface); // [OUT] Put implemented interface token here.
+
+ virtual HRESULT STDMETHODCALLTYPE GetTypeRefProps( // S_OK or error.
+ mdTypeRef tr, // [IN] TypeRef token.
+ mdToken *ptkResolutionScope, // [OUT] Resolution scope, ModuleRef or AssemblyRef.
+ LPWSTR szName, // [OUT] Name of the TypeRef.
+ ULONG cchName, // [IN] Size of buffer.
+ ULONG *pchName); // [OUT] Size of Name.
+
+ virtual HRESULT STDMETHODCALLTYPE ResolveTypeRef(mdTypeRef tr, REFIID riid, IUnknown **ppIScope, mdTypeDef *ptd);
+
+ virtual HRESULT STDMETHODCALLTYPE EnumMembers( // S_OK, S_FALSE, or error.
+ HCORENUM *phEnum, // [IN|OUT] Pointer to the enum.
+ mdTypeDef cl, // [IN] TypeDef to scope the enumeration.
+ mdToken rMembers[], // [OUT] Put MemberDefs here.
+ ULONG cMax, // [IN] Max MemberDefs to put.
+ ULONG *pcTokens); // [OUT] Put # put here.
+
+ virtual HRESULT STDMETHODCALLTYPE EnumMembersWithName( // S_OK, S_FALSE, or error.
+ HCORENUM *phEnum, // [IN|OUT] Pointer to the enum.
+ mdTypeDef cl, // [IN] TypeDef to scope the enumeration.
+ LPCWSTR szName, // [IN] Limit results to those with this name.
+ mdToken rMembers[], // [OUT] Put MemberDefs here.
+ ULONG cMax, // [IN] Max MemberDefs to put.
+ ULONG *pcTokens); // [OUT] Put # put here.
+
+ virtual HRESULT STDMETHODCALLTYPE EnumMethods( // S_OK, S_FALSE, or error.
+ HCORENUM *phEnum, // [IN|OUT] Pointer to the enum.
+ mdTypeDef cl, // [IN] TypeDef to scope the enumeration.
+ mdMethodDef rMethods[], // [OUT] Put MethodDefs here.
+ ULONG cMax, // [IN] Max MethodDefs to put.
+ ULONG *pcTokens); // [OUT] Put # put here.
+
+ virtual HRESULT STDMETHODCALLTYPE EnumMethodsWithName( // S_OK, S_FALSE, or error.
+ HCORENUM *phEnum, // [IN|OUT] Pointer to the enum.
+ mdTypeDef cl, // [IN] TypeDef to scope the enumeration.
+ LPCWSTR szName, // [IN] Limit results to those with this name.
+ mdMethodDef rMethods[], // [OU] Put MethodDefs here.
+ ULONG cMax, // [IN] Max MethodDefs to put.
+ ULONG *pcTokens); // [OUT] Put # put here.
+
+ virtual HRESULT STDMETHODCALLTYPE EnumFields( // S_OK, S_FALSE, or error.
+ HCORENUM *phEnum, // [IN|OUT] Pointer to the enum.
+ mdTypeDef cl, // [IN] TypeDef to scope the enumeration.
+ mdFieldDef rFields[], // [OUT] Put FieldDefs here.
+ ULONG cMax, // [IN] Max FieldDefs to put.
+ ULONG *pcTokens); // [OUT] Put # put here.
+
+ virtual HRESULT STDMETHODCALLTYPE EnumFieldsWithName( // S_OK, S_FALSE, or error.
+ HCORENUM *phEnum, // [IN|OUT] Pointer to the enum.
+ mdTypeDef cl, // [IN] TypeDef to scope the enumeration.
+ LPCWSTR szName, // [IN] Limit results to those with this name.
+ mdFieldDef rFields[], // [OUT] Put MemberDefs here.
+ ULONG cMax, // [IN] Max MemberDefs to put.
+ ULONG *pcTokens); // [OUT] Put # put here.
+
+
+ virtual HRESULT STDMETHODCALLTYPE EnumParams( // S_OK, S_FALSE, or error.
+ HCORENUM *phEnum, // [IN|OUT] Pointer to the enum.
+ mdMethodDef mb, // [IN] MethodDef to scope the enumeration.
+ mdParamDef rParams[], // [OUT] Put ParamDefs here.
+ ULONG cMax, // [IN] Max ParamDefs to put.
+ ULONG *pcTokens); // [OUT] Put # put here.
+
+ virtual HRESULT STDMETHODCALLTYPE EnumMemberRefs( // S_OK, S_FALSE, or error.
+ HCORENUM *phEnum, // [IN|OUT] Pointer to the enum.
+ mdToken tkParent, // [IN] Parent token to scope the enumeration.
+ mdMemberRef rMemberRefs[], // [OUT] Put MemberRefs here.
+ ULONG cMax, // [IN] Max MemberRefs to put.
+ ULONG *pcTokens); // [OUT] Put # put here.
+
+ virtual HRESULT STDMETHODCALLTYPE EnumMethodImpls( // S_OK, S_FALSE, or error
+ HCORENUM *phEnum, // [IN|OUT] Pointer to the enum.
+ mdTypeDef td, // [IN] TypeDef to scope the enumeration.
+ mdToken rMethodBody[], // [OUT] Put Method Body tokens here.
+ mdToken rMethodDecl[], // [OUT] Put Method Declaration tokens here.
+ ULONG cMax, // [IN] Max tokens to put.
+ ULONG *pcTokens); // [OUT] Put # put here.
+
+ virtual HRESULT STDMETHODCALLTYPE EnumPermissionSets( // S_OK, S_FALSE, or error.
+ HCORENUM *phEnum, // [IN|OUT] Pointer to the enum.
+ mdToken tk, // [IN] if !NIL, token to scope the enumeration.
+ DWORD dwActions, // [IN] if !0, return only these actions.
+ mdPermission rPermission[], // [OUT] Put Permissions here.
+ ULONG cMax, // [IN] Max Permissions to put.
+ ULONG *pcTokens); // [OUT] Put # put here.
+
+ virtual HRESULT STDMETHODCALLTYPE FindMember(
+ mdTypeDef td, // [IN] given typedef
+ LPCWSTR szName, // [IN] member name
+ PCCOR_SIGNATURE pvSigBlob, // [IN] point to a blob value of CLR signature
+ ULONG cbSigBlob, // [IN] count of bytes in the signature blob
+ mdToken *pmb); // [OUT] matching memberdef
+
+ virtual HRESULT STDMETHODCALLTYPE FindMethod(
+ mdTypeDef td, // [IN] given typedef
+ LPCWSTR szName, // [IN] member name
+ PCCOR_SIGNATURE pvSigBlob, // [IN] point to a blob value of CLR signature
+ ULONG cbSigBlob, // [IN] count of bytes in the signature blob
+ mdMethodDef *pmb); // [OUT] matching memberdef
+
+ virtual HRESULT STDMETHODCALLTYPE FindField(
+ mdTypeDef td, // [IN] given typedef
+ LPCWSTR szName, // [IN] member name
+ PCCOR_SIGNATURE pvSigBlob, // [IN] point to a blob value of CLR signature
+ ULONG cbSigBlob, // [IN] count of bytes in the signature blob
+ mdFieldDef *pmb); // [OUT] matching memberdef
+
+ virtual HRESULT STDMETHODCALLTYPE FindMemberRef(
+ mdTypeRef td, // [IN] given typeRef
+ LPCWSTR szName, // [IN] member name
+ PCCOR_SIGNATURE pvSigBlob, // [IN] point to a blob value of CLR signature
+ ULONG cbSigBlob, // [IN] count of bytes in the signature blob
+ mdMemberRef *pmr); // [OUT] matching memberref
+
+ virtual HRESULT STDMETHODCALLTYPE GetMethodProps(
+ mdMethodDef mb, // The method for which to get props.
+ mdTypeDef *pClass, // Put method's class here.
+ LPWSTR szMethod, // Put method's name here.
+ ULONG cchMethod, // Size of szMethod buffer in wide chars.
+ ULONG *pchMethod, // Put actual size here
+ DWORD *pdwAttr, // Put flags here.
+ PCCOR_SIGNATURE *ppvSigBlob, // [OUT] point to the blob value of meta data
+ ULONG *pcbSigBlob, // [OUT] actual size of signature blob
+ ULONG *pulCodeRVA, // [OUT] codeRVA
+ DWORD *pdwImplFlags); // [OUT] Impl. Flags
+
+ virtual HRESULT STDMETHODCALLTYPE GetMemberRefProps( // S_OK or error.
+ mdMemberRef mr, // [IN] given memberref
+ mdToken *ptk, // [OUT] Put classref or classdef here.
+ LPWSTR szMember, // [OUT] buffer to fill for member's name
+ ULONG cchMember, // [IN] the count of char of szMember
+ ULONG *pchMember, // [OUT] actual count of char in member name
+ PCCOR_SIGNATURE *ppvSigBlob, // [OUT] point to meta data blob value
+ ULONG *pbSig); // [OUT] actual size of signature blob
+
+ virtual HRESULT STDMETHODCALLTYPE EnumProperties( // S_OK, S_FALSE, or error.
+ HCORENUM *phEnum, // [IN|OUT] Pointer to the enum.
+ mdTypeDef td, // [IN] TypeDef to scope the enumeration.
+ mdProperty rProperties[], // [OUT] Put Properties here.
+ ULONG cMax, // [IN] Max properties to put.
+ ULONG *pcProperties); // [OUT] Put # put here.
+
+ virtual HRESULT STDMETHODCALLTYPE EnumEvents( // S_OK, S_FALSE, or error.
+ HCORENUM *phEnum, // [IN|OUT] Pointer to the enum.
+ mdTypeDef td, // [IN] TypeDef to scope the enumeration.
+ mdEvent rEvents[], // [OUT] Put events here.
+ ULONG cMax, // [IN] Max events to put.
+ ULONG *pcEvents); // [OUT] Put # put here.
+
+ virtual HRESULT STDMETHODCALLTYPE GetEventProps( // S_OK, S_FALSE, or error.
+ mdEvent ev, // [IN] event token
+ mdTypeDef *pClass, // [OUT] typedef containing the event declarion.
+ LPCWSTR szEvent, // [OUT] Event name
+ ULONG cchEvent, // [IN] the count of wchar of szEvent
+ ULONG *pchEvent, // [OUT] actual count of wchar for event's name
+ DWORD *pdwEventFlags, // [OUT] Event flags.
+ mdToken *ptkEventType, // [OUT] EventType class
+ mdMethodDef *pmdAddOn, // [OUT] AddOn method of the event
+ mdMethodDef *pmdRemoveOn, // [OUT] RemoveOn method of the event
+ mdMethodDef *pmdFire, // [OUT] Fire method of the event
+ mdMethodDef rmdOtherMethod[], // [OUT] other method of the event
+ ULONG cMax, // [IN] size of rmdOtherMethod
+ ULONG *pcOtherMethod); // [OUT] total number of other method of this event
+
+ virtual HRESULT STDMETHODCALLTYPE EnumMethodSemantics( // S_OK, S_FALSE, or error.
+ HCORENUM *phEnum, // [IN|OUT] Pointer to the enum.
+ mdMethodDef mb, // [IN] MethodDef to scope the enumeration.
+ mdToken rEventProp[], // [OUT] Put Event/Property here.
+ ULONG cMax, // [IN] Max properties to put.
+ ULONG *pcEventProp); // [OUT] Put # put here.
+
+ virtual HRESULT STDMETHODCALLTYPE GetMethodSemantics( // S_OK, S_FALSE, or error.
+ mdMethodDef mb, // [IN] method token
+ mdToken tkEventProp, // [IN] event/property token.
+ DWORD *pdwSemanticsFlags); // [OUT] the role flags for the method/propevent pair
+
+ virtual HRESULT STDMETHODCALLTYPE GetClassLayout(
+ mdTypeDef td, // [IN] give typedef
+ DWORD *pdwPackSize, // [OUT] 1, 2, 4, 8, or 16
+ COR_FIELD_OFFSET rFieldOffset[], // [OUT] field offset array
+ ULONG cMax, // [IN] size of the array
+ ULONG *pcFieldOffset, // [OUT] needed array size
+ ULONG *pulClassSize); // [OUT] the size of the class
+
+ virtual HRESULT STDMETHODCALLTYPE GetFieldMarshal(
+ mdToken tk, // [IN] given a field's memberdef
+ PCCOR_SIGNATURE *ppvNativeType, // [OUT] native type of this field
+ ULONG *pcbNativeType); // [OUT] the count of bytes of *ppvNativeType
+
+ virtual HRESULT STDMETHODCALLTYPE GetRVA( // S_OK or error.
+ mdToken tk, // Member for which to set offset
+ ULONG *pulCodeRVA, // The offset
+ DWORD *pdwImplFlags); // the implementation flags
+
+ virtual HRESULT STDMETHODCALLTYPE GetPermissionSetProps(
+ mdPermission pm, // [IN] the permission token.
+ DWORD *pdwAction, // [OUT] CorDeclSecurity.
+ void const **ppvPermission, // [OUT] permission blob.
+ ULONG *pcbPermission); // [OUT] count of bytes of pvPermission.
+
+ virtual HRESULT STDMETHODCALLTYPE GetSigFromToken( // S_OK or error.
+ mdSignature mdSig, // [IN] Signature token.
+ PCCOR_SIGNATURE *ppvSig, // [OUT] return pointer to token.
+ ULONG *pcbSig); // [OUT] return size of signature.
+
+ virtual HRESULT STDMETHODCALLTYPE GetModuleRefProps( // S_OK or error.
+ mdModuleRef mur, // [IN] moduleref token.
+ __out_ecount_part_opt(cchName, *pchName)
+ LPWSTR szName, // [OUT] buffer to fill with the moduleref name.
+ ULONG cchName, // [IN] size of szName in wide characters.
+ ULONG *pchName); // [OUT] actual count of characters in the name.
+
+ virtual HRESULT STDMETHODCALLTYPE EnumModuleRefs( // S_OK or error.
+ HCORENUM *phEnum, // [IN|OUT] pointer to the enum.
+ mdModuleRef rModuleRefs[], // [OUT] put modulerefs here.
+ ULONG cmax, // [IN] max memberrefs to put.
+ ULONG *pcModuleRefs); // [OUT] put # put here.
+
+ virtual HRESULT STDMETHODCALLTYPE GetTypeSpecFromToken( // S_OK or error.
+ mdTypeSpec typespec, // [IN] TypeSpec token.
+ PCCOR_SIGNATURE *ppvSig, // [OUT] return pointer to TypeSpec signature
+ ULONG *pcbSig); // [OUT] return size of signature.
+
+ virtual HRESULT STDMETHODCALLTYPE GetNameFromToken( // Not Recommended! May be removed!
+ mdToken tk, // [IN] Token to get name from. Must have a name.
+ MDUTF8CSTR *pszUtf8NamePtr); // [OUT] Return pointer to UTF8 name in heap.
+
+ virtual HRESULT STDMETHODCALLTYPE EnumUnresolvedMethods( // S_OK, S_FALSE, or error.
+ HCORENUM *phEnum, // [IN|OUT] Pointer to the enum.
+ mdToken rMethods[], // [OUT] Put MemberDefs here.
+ ULONG cMax, // [IN] Max MemberDefs to put.
+ ULONG *pcTokens); // [OUT] Put # put here.
+
+ virtual HRESULT STDMETHODCALLTYPE GetUserString( // S_OK or error.
+ mdString stk, // [IN] String token.
+ LPWSTR szString, // [OUT] Copy of string.
+ ULONG cchString, // [IN] Max chars of room in szString.
+ ULONG *pchString); // [OUT] How many chars in actual string.
+
+ virtual HRESULT STDMETHODCALLTYPE GetPinvokeMap( // S_OK or error.
+ mdToken tk, // [IN] FieldDef or MethodDef.
+ DWORD *pdwMappingFlags, // [OUT] Flags used for mapping.
+ LPWSTR szImportName, // [OUT] Import name.
+ ULONG cchImportName, // [IN] Size of the name buffer.
+ ULONG *pchImportName, // [OUT] Actual number of characters stored.
+ mdModuleRef *pmrImportDLL); // [OUT] ModuleRef token for the target DLL.
+
+ virtual HRESULT STDMETHODCALLTYPE EnumSignatures( // S_OK or error.
+ HCORENUM *phEnum, // [IN|OUT] pointer to the enum.
+ mdSignature rSignatures[], // [OUT] put signatures here.
+ ULONG cmax, // [IN] max signatures to put.
+ ULONG *pcSignatures); // [OUT] put # put here.
+
+ virtual HRESULT STDMETHODCALLTYPE EnumTypeSpecs( // S_OK or error.
+ HCORENUM *phEnum, // [IN|OUT] pointer to the enum.
+ mdTypeSpec rTypeSpecs[], // [OUT] put TypeSpecs here.
+ ULONG cmax, // [IN] max TypeSpecs to put.
+ ULONG *pcTypeSpecs); // [OUT] put # put here.
+
+ virtual HRESULT STDMETHODCALLTYPE EnumUserStrings( // S_OK or error.
+ HCORENUM *phEnum, // [IN/OUT] pointer to the enum.
+ mdString rStrings[], // [OUT] put Strings here.
+ ULONG cmax, // [IN] max Strings to put.
+ ULONG *pcStrings); // [OUT] put # put here.
+
+ virtual HRESULT STDMETHODCALLTYPE GetParamForMethodIndex( // S_OK or error.
+ mdMethodDef md, // [IN] Method token.
+ ULONG ulParamSeq, // [IN] Parameter sequence.
+ mdParamDef *ppd); // [IN] Put Param token here.
+
+ virtual HRESULT STDMETHODCALLTYPE EnumCustomAttributes( // S_OK or error.
+ HCORENUM *phEnum, // [IN, OUT] COR enumerator.
+ mdToken tk, // [IN] Token to scope the enumeration, 0 for all.
+ mdToken tkType, // [IN] Type of interest, 0 for all.
+ mdCustomAttribute rCustomAttributes[], // [OUT] Put custom attribute tokens here.
+ ULONG cMax, // [IN] Size of rCustomAttributes.
+ ULONG *pcCustomAttributes); // [OUT, OPTIONAL] Put count of token values here.
+
+ virtual HRESULT STDMETHODCALLTYPE GetCustomAttributeProps( // S_OK or error.
+ mdCustomAttribute cv, // [IN] CustomAttribute token.
+ mdToken *ptkObj, // [OUT, OPTIONAL] Put object token here.
+ mdToken *ptkType, // [OUT, OPTIONAL] Put AttrType token here.
+ void const **ppBlob, // [OUT, OPTIONAL] Put pointer to data here.
+ ULONG *pcbSize); // [OUT, OPTIONAL] Put size of date here.
+
+ virtual HRESULT STDMETHODCALLTYPE FindTypeRef(
+ mdToken tkResolutionScope, // [IN] ModuleRef, AssemblyRef or TypeRef.
+ LPCWSTR szName, // [IN] TypeRef Name.
+ mdTypeRef *ptr); // [OUT] matching TypeRef.
+
+ virtual HRESULT STDMETHODCALLTYPE GetMemberProps(
+ mdToken mb, // The member for which to get props.
+ mdTypeDef *pClass, // Put member's class here.
+ LPWSTR szMember, // Put member's name here.
+ ULONG cchMember, // Size of szMember buffer in wide chars.
+ ULONG *pchMember, // Put actual size here
+ DWORD *pdwAttr, // Put flags here.
+ PCCOR_SIGNATURE *ppvSigBlob, // [OUT] point to the blob value of meta data
+ ULONG *pcbSigBlob, // [OUT] actual size of signature blob
+ ULONG *pulCodeRVA, // [OUT] codeRVA
+ DWORD *pdwImplFlags, // [OUT] Impl. Flags
+ DWORD *pdwCPlusTypeFlag, // [OUT] flag for value type. selected ELEMENT_TYPE_*
+ UVCP_CONSTANT *ppValue, // [OUT] constant value
+ ULONG *pcchValue); // [OUT] size of constant string in chars, 0 for non-strings.
+
+ virtual HRESULT STDMETHODCALLTYPE GetFieldProps(
+ mdFieldDef mb, // The field for which to get props.
+ mdTypeDef *pClass, // Put field's class here.
+ LPWSTR szField, // Put field's name here.
+ ULONG cchField, // Size of szField buffer in wide chars.
+ ULONG *pchField, // Put actual size here
+ DWORD *pdwAttr, // Put flags here.
+ PCCOR_SIGNATURE *ppvSigBlob, // [OUT] point to the blob value of meta data
+ ULONG *pcbSigBlob, // [OUT] actual size of signature blob
+ DWORD *pdwCPlusTypeFlag, // [OUT] flag for value type. selected ELEMENT_TYPE_*
+ UVCP_CONSTANT *ppValue, // [OUT] constant value
+ ULONG *pcchValue); // [OUT] size of constant string in chars, 0 for non-strings.
+
+ virtual HRESULT STDMETHODCALLTYPE GetPropertyProps( // S_OK, S_FALSE, or error.
+ mdProperty prop, // [IN] property token
+ mdTypeDef *pClass, // [OUT] typedef containing the property declarion.
+ LPCWSTR szProperty, // [OUT] Property name
+ ULONG cchProperty, // [IN] the count of wchar of szProperty
+ ULONG *pchProperty, // [OUT] actual count of wchar for property name
+ DWORD *pdwPropFlags, // [OUT] property flags.
+ PCCOR_SIGNATURE *ppvSig, // [OUT] property type. pointing to meta data internal blob
+ ULONG *pbSig, // [OUT] count of bytes in *ppvSig
+ DWORD *pdwCPlusTypeFlag, // [OUT] flag for value type. selected ELEMENT_TYPE_*
+ UVCP_CONSTANT *ppDefaultValue, // [OUT] constant value
+ ULONG *pcchDefaultValue, // [OUT] size of constant string in chars, 0 for non-strings.
+ mdMethodDef *pmdSetter, // [OUT] setter method of the property
+ mdMethodDef *pmdGetter, // [OUT] getter method of the property
+ mdMethodDef rmdOtherMethod[], // [OUT] other method of the property
+ ULONG cMax, // [IN] size of rmdOtherMethod
+ ULONG *pcOtherMethod); // [OUT] total number of other method of this property
+
+ virtual HRESULT STDMETHODCALLTYPE GetParamProps( // S_OK or error.
+ mdParamDef tk, // [IN]The Parameter.
+ mdMethodDef *pmd, // [OUT] Parent Method token.
+ ULONG *pulSequence, // [OUT] Parameter sequence.
+ LPWSTR szName, // [OUT] Put name here.
+ ULONG cchName, // [OUT] Size of name buffer.
+ ULONG *pchName, // [OUT] Put actual size of name here.
+ DWORD *pdwAttr, // [OUT] Put flags here.
+ DWORD *pdwCPlusTypeFlag, // [OUT] Flag for value type. selected ELEMENT_TYPE_*.
+ UVCP_CONSTANT *ppValue, // [OUT] Constant value.
+ ULONG *pcchValue); // [OUT] size of constant string in chars, 0 for non-strings.
+
+ virtual HRESULT STDMETHODCALLTYPE GetCustomAttributeByName( // S_OK or error.
+ mdToken tkObj, // [IN] Object with Custom Attribute.
+ LPCWSTR szName, // [IN] Name of desired Custom Attribute.
+ const void **ppData, // [OUT] Put pointer to data here.
+ ULONG *pcbData); // [OUT] Put size of data here.
+
+ virtual BOOL STDMETHODCALLTYPE IsValidToken( // True or False.
+ mdToken tk); // [IN] Given token.
+
+ virtual HRESULT STDMETHODCALLTYPE GetNestedClassProps( // S_OK or error.
+ mdTypeDef tdNestedClass, // [IN] NestedClass token.
+ mdTypeDef *ptdEnclosingClass); // [OUT] EnclosingClass token.
+
+ virtual HRESULT STDMETHODCALLTYPE GetNativeCallConvFromSig( // S_OK or error.
+ void const *pvSig, // [IN] Pointer to signature.
+ ULONG cbSig, // [IN] Count of signature bytes.
+ ULONG *pCallConv); // [OUT] Put calling conv here (see CorPinvokemap).
+
+ virtual HRESULT STDMETHODCALLTYPE IsGlobal( // S_OK or error.
+ mdToken pd, // [IN] Type, Field, or Method token.
+ int *pbGlobal); // [OUT] Put 1 if global, 0 otherwise.
+
+ //IMetaDataImport2
+ virtual HRESULT STDMETHODCALLTYPE EnumGenericParams(
+ HCORENUM *phEnum, // [IN|OUT] Pointer to the enum.
+ mdToken tk, // [IN] TypeDef or MethodDef whose generic parameters are requested
+ mdGenericParam rGenericParams[], // [OUT] Put GenericParams here.
+ ULONG cMax, // [IN] Max GenericParams to put.
+ ULONG *pcGenericParams); // [OUT] Put # put here.
+
+ virtual HRESULT STDMETHODCALLTYPE GetGenericParamProps( // S_OK or error.
+ mdGenericParam gp, // [IN] GenericParam
+ ULONG *pulParamSeq, // [OUT] Index of the type parameter
+ DWORD *pdwParamFlags, // [OUT] Flags, for future use (e.g. variance)
+ mdToken *ptOwner, // [OUT] Owner (TypeDef or MethodDef)
+ DWORD *reserved, // [OUT] For future use (e.g. non-type parameters)
+ LPWSTR wzname, // [OUT] Put name here
+ ULONG cchName, // [IN] Size of buffer
+ ULONG *pchName); // [OUT] Put size of name (wide chars) here.
+
+ virtual HRESULT STDMETHODCALLTYPE GetMethodSpecProps(
+ mdMethodSpec mi, // [IN] The method instantiation
+ mdToken *tkParent, // [OUT] MethodDef or MemberRef
+ PCCOR_SIGNATURE *ppvSigBlob, // [OUT] point to the blob value of meta data
+ ULONG *pcbSigBlob); // [OUT] actual size of signature blob
+
+ virtual HRESULT STDMETHODCALLTYPE EnumGenericParamConstraints(
+ HCORENUM *phEnum, // [IN|OUT] Pointer to the enum.
+ mdGenericParam tk, // [IN] GenericParam whose constraints are requested
+ mdGenericParamConstraint rGenericParamConstraints[], // [OUT] Put GenericParamConstraints here.
+ ULONG cMax, // [IN] Max GenericParamConstraints to put.
+ ULONG *pcGenericParamConstraints); // [OUT] Put # put here.
+
+ virtual HRESULT STDMETHODCALLTYPE GetGenericParamConstraintProps( // S_OK or error.
+ mdGenericParamConstraint gpc, // [IN] GenericParamConstraint
+ mdGenericParam *ptGenericParam, // [OUT] GenericParam that is constrained
+ mdToken *ptkConstraintType); // [OUT] TypeDef/Ref/Spec constraint
+
+ virtual HRESULT STDMETHODCALLTYPE GetPEKind( // S_OK or error.
+ DWORD* pdwPEKind, // [OUT] The kind of PE (0 - not a PE)
+ DWORD* pdwMAchine); // [OUT] Machine as defined in NT header
+
+ virtual HRESULT STDMETHODCALLTYPE GetVersionString( // S_OK or error.
+ LPWSTR pwzBuf, // [OUT] Put version string here.
+ DWORD ccBufSize, // [IN] size of the buffer, in wide chars
+ DWORD *pccBufSize); // [OUT] Size of the version string, wide chars, including terminating nul.
+
+ virtual HRESULT STDMETHODCALLTYPE EnumMethodSpecs(
+ HCORENUM *phEnum, // [IN|OUT] Pointer to the enum.
+ mdToken tk, // [IN] MethodDef or MemberRef whose MethodSpecs are requested
+ mdMethodSpec rMethodSpecs[], // [OUT] Put MethodSpecs here.
+ ULONG cMax, // [IN] Max tokens to put.
+ ULONG *pcMethodSpecs); // [OUT] Put actual count here.
+
+
+ // IMetaDataAssemblyImport
+ virtual HRESULT STDMETHODCALLTYPE GetAssemblyProps( // S_OK or error.
+ mdAssembly mda, // [IN] The Assembly for which to get the properties.
+ const void **ppbPublicKey, // [OUT] Pointer to the public key.
+ ULONG *pcbPublicKey, // [OUT] Count of bytes in the public key.
+ ULONG *pulHashAlgId, // [OUT] Hash Algorithm.
+ LPWSTR szName, // [OUT] Buffer to fill with assembly's simply name.
+ ULONG cchName, // [IN] Size of buffer in wide chars.
+ ULONG *pchName, // [OUT] Actual # of wide chars in name.
+ ASSEMBLYMETADATA *pMetaData, // [OUT] Assembly MetaData.
+ DWORD *pdwAssemblyFlags); // [OUT] Flags.
+
+ virtual HRESULT STDMETHODCALLTYPE GetAssemblyRefProps( // S_OK or error.
+ mdAssemblyRef mdar, // [IN] The AssemblyRef for which to get the properties.
+ const void **ppbPublicKeyOrToken, // [OUT] Pointer to the public key or token.
+ ULONG *pcbPublicKeyOrToken, // [OUT] Count of bytes in the public key or token.
+ LPWSTR szName, // [OUT] Buffer to fill with name.
+ ULONG cchName, // [IN] Size of buffer in wide chars.
+ ULONG *pchName, // [OUT] Actual # of wide chars in name.
+ ASSEMBLYMETADATA *pMetaData, // [OUT] Assembly MetaData.
+ const void **ppbHashValue, // [OUT] Hash blob.
+ ULONG *pcbHashValue, // [OUT] Count of bytes in the hash blob.
+ DWORD *pdwAssemblyRefFlags); // [OUT] Flags.
+
+ virtual HRESULT STDMETHODCALLTYPE GetFileProps( // S_OK or error.
+ mdFile mdf, // [IN] The File for which to get the properties.
+ LPWSTR szName, // [OUT] Buffer to fill with name.
+ ULONG cchName, // [IN] Size of buffer in wide chars.
+ ULONG *pchName, // [OUT] Actual # of wide chars in name.
+ const void **ppbHashValue, // [OUT] Pointer to the Hash Value Blob.
+ ULONG *pcbHashValue, // [OUT] Count of bytes in the Hash Value Blob.
+ DWORD *pdwFileFlags); // [OUT] Flags.
+
+ virtual HRESULT STDMETHODCALLTYPE GetExportedTypeProps( // S_OK or error.
+ mdExportedType mdct, // [IN] The ExportedType for which to get the properties.
+ LPWSTR szName, // [OUT] Buffer to fill with name.
+ ULONG cchName, // [IN] Size of buffer in wide chars.
+ ULONG *pchName, // [OUT] Actual # of wide chars in name.
+ mdToken *ptkImplementation, // [OUT] mdFile or mdAssemblyRef or mdExportedType.
+ mdTypeDef *ptkTypeDef, // [OUT] TypeDef token within the file.
+ DWORD *pdwExportedTypeFlags); // [OUT] Flags.
+
+ virtual HRESULT STDMETHODCALLTYPE GetManifestResourceProps( // S_OK or error.
+ mdManifestResource mdmr, // [IN] The ManifestResource for which to get the properties.
+ LPWSTR szName, // [OUT] Buffer to fill with name.
+ ULONG cchName, // [IN] Size of buffer in wide chars.
+ ULONG *pchName, // [OUT] Actual # of wide chars in name.
+ mdToken *ptkImplementation, // [OUT] mdFile or mdAssemblyRef that provides the ManifestResource.
+ DWORD *pdwOffset, // [OUT] Offset to the beginning of the resource within the file.
+ DWORD *pdwResourceFlags);// [OUT] Flags.
+
+ virtual HRESULT STDMETHODCALLTYPE EnumAssemblyRefs( // S_OK or error
+ HCORENUM *phEnum, // [IN|OUT] Pointer to the enum.
+ mdAssemblyRef rAssemblyRefs[], // [OUT] Put AssemblyRefs here.
+ ULONG cMax, // [IN] Max AssemblyRefs to put.
+ ULONG *pcTokens); // [OUT] Put # put here.
+
+ virtual HRESULT STDMETHODCALLTYPE EnumFiles( // S_OK or error
+ HCORENUM *phEnum, // [IN|OUT] Pointer to the enum.
+ mdFile rFiles[], // [OUT] Put Files here.
+ ULONG cMax, // [IN] Max Files to put.
+ ULONG *pcTokens); // [OUT] Put # put here.
+
+ virtual HRESULT STDMETHODCALLTYPE EnumExportedTypes( // S_OK or error
+ HCORENUM *phEnum, // [IN|OUT] Pointer to the enum.
+ mdExportedType rExportedTypes[], // [OUT] Put ExportedTypes here.
+ ULONG cMax, // [IN] Max ExportedTypes to put.
+ ULONG *pcTokens); // [OUT] Put # put here.
+
+ virtual HRESULT STDMETHODCALLTYPE EnumManifestResources( // S_OK or error
+ HCORENUM *phEnum, // [IN|OUT] Pointer to the enum.
+ mdManifestResource rManifestResources[], // [OUT] Put ManifestResources here.
+ ULONG cMax, // [IN] Max Resources to put.
+ ULONG *pcTokens); // [OUT] Put # put here.
+
+ virtual HRESULT STDMETHODCALLTYPE GetAssemblyFromScope( // S_OK or error
+ mdAssembly *ptkAssembly); // [OUT] Put token here.
+
+ virtual HRESULT STDMETHODCALLTYPE FindExportedTypeByName( // S_OK or error
+ LPCWSTR szName, // [IN] Name of the ExportedType.
+ mdToken mdtExportedType, // [IN] ExportedType for the enclosing class.
+ mdExportedType *ptkExportedType); // [OUT] Put the ExportedType token here.
+
+ virtual HRESULT STDMETHODCALLTYPE FindManifestResourceByName( // S_OK or error
+ LPCWSTR szName, // [IN] Name of the ManifestResource.
+ mdManifestResource *ptkManifestResource); // [OUT] Put the ManifestResource token here.
+
+ //STDMETHOD_(void, CloseEnum(
+ // HCORENUM hEnum); // Enum to be closed.
+
+ virtual HRESULT STDMETHODCALLTYPE FindAssembliesByName( // S_OK or error
+ LPCWSTR szAppBase, // [IN] optional - can be NULL
+ LPCWSTR szPrivateBin, // [IN] optional - can be NULL
+ LPCWSTR szAssemblyName, // [IN] required - this is the assembly you are requesting
+ IUnknown *ppIUnk[], // [OUT] put IMetaDataAssemblyImport pointers here
+ ULONG cMax, // [IN] The max number to put
+ ULONG *pcAssemblies); // [OUT] The number of assemblies returned.
+
+private:
+ Volatile<LONG> m_cRefCount;
+ ReleaseHolder<IMetaDataImport2> m_pInnerImport;
+ ReleaseHolder<IMDInternalImport> m_pInnerInternalImport;
+ ReleaseHolder<IMetaDataAssemblyImport> m_pInnerAssemblyImport;
+ ReleaseHolder<IMetaDataEmit2> m_pInner;
+ ReleaseHolder<IMetaDataAssemblyEmit> m_pInnerAssembly;
+
+ // all tokens with values <= these maximums are considered pre-existing content
+ // and can not be altered using this emitter
+ mdTypeDef maxInitialTypeDef;
+ mdMethodDef maxInitialMethodDef;
+ mdFieldDef maxInitialFieldDef;
+ mdMemberRef maxInitialMemberRef;
+ mdParamDef maxInitialParamDef;
+ mdCustomAttribute maxInitialCustomAttribute;
+ mdEvent maxInitialEvent;
+ mdProperty maxInitialProperty;
+ mdGenericParam maxInitialGenericParam;
+};
+
+
+
+#endif
diff --git a/src/vm/profilingenumerators.cpp b/src/vm/profilingenumerators.cpp
new file mode 100644
index 0000000000..4466f6ad59
--- /dev/null
+++ b/src/vm/profilingenumerators.cpp
@@ -0,0 +1,693 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// FILE: ProfilingEnumerators.cpp
+//
+// All enumerators returned by the profiling API to enumerate objects or to catch up on
+// the current CLR state (usually for attaching profilers) are defined in
+// ProfilingEnumerators.h,cpp.
+//
+// This cpp file contains implementations specific to the derived enumerator classes, as
+// well as helpers for iterating over AppDomains, assemblies, modules, etc., that have
+// been loaded enough that they may be made visible to profilers.
+//
+
+//
+
+#include "common.h"
+
+#ifdef PROFILING_SUPPORTED
+
+#include "proftoeeinterfaceimpl.h"
+#include "profilingenumerators.h"
+
+// ---------------------------------------------------------------------------------------
+// ProfilerFunctionEnum/ICorProfilerFunctionEnum implementation
+// ---------------------------------------------------------------------------------------
+
+BOOL ProfilerFunctionEnum::Init(BOOL fWithReJITIDs)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ // If we needs to get rejit ID, which requires a lock (which, in turn may switch us to
+ // preemptive mode).
+ if (fWithReJITIDs) GC_TRIGGERS; else GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Depending on our GC mode, the jit manager may have to take a
+ // reader lock to prevent things from changing while reading...
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ } CONTRACTL_END;
+
+ EEJitManager::CodeHeapIterator heapIterator;
+ while(heapIterator.Next())
+ {
+ MethodDesc *pMD = heapIterator.GetMethod();
+
+ // On AMD64 JumpStub is used to call functions that is 2GB away. JumpStubs have a CodeHeader
+ // with NULL MethodDesc, are stored in code heap and are reported by EEJitManager::EnumCode.
+ if (pMD == NULL)
+ continue;
+
+ // There are two possible reasons to skip this MD.
+ //
+ // 1) If it has no metadata (i.e., LCG / IL stubs), then skip it
+ //
+ // 2) If it has no code compiled yet for it, then skip it.
+ //
+ if (pMD->IsNoMetadata() || !pMD->HasNativeCode())
+ {
+ continue;
+ }
+
+ COR_PRF_FUNCTION * element = m_elements.Append();
+ if (element == NULL)
+ {
+ return FALSE;
+ }
+ element->functionId = (FunctionID) pMD;
+
+ if (fWithReJITIDs)
+ {
+ // This guy causes triggering and locking, while the non-rejitid case does not.
+ element->reJitId = pMD->GetReJitManager()->GetReJitId(pMD, heapIterator.GetMethodCode());
+ }
+ else
+ {
+ element->reJitId = 0;
+ }
+ }
+
+ return TRUE;
+}
+
+// ---------------------------------------------------------------------------------------
+// Catch-up helpers
+//
+// #ProfilerEnumGeneral
+//
+// The following functions factor out the iteration code to ensure we only consider
+// AppDomains, assemblies, modules, etc., that the profiler can safely query about. The
+// parameters to these functions are of types that may have confusing syntax, but all
+// that's going on is that the caller may supply an object instance and a member function
+// on that object (non-static) to be called for each iterated item. This is just a
+// statically-typed way of doing the usual pattern of providing a function pointer for
+// the callback plus a void * context object to pass to the function. If the
+// caller-supplied callback returns anything other than S_OK, the iteration code will
+// stop iterating, and immediately propagate the callback's return value to the original
+// caller. Start looking at code:ProfilerModuleEnum::Init for an example of how these
+// helpers get used.
+//
+// The reason we have helpers to begin with is so we can centralize the logic that
+// enforces the following rather subtle invariants:
+//
+// * Provide enough entities that the profiler gets a complete set of entities from
+// the union of catch-up enumeration and "callbacks" (e.g., ModuleLoadFinished).
+// * Exclude entities that have unloaded to the point where it's no longer safe to
+// query information about them.
+//
+// The catch-up spec summarizes this via the following timeline for any given entity:
+//
+// Entity available in catch-up enumeration
+// < Entity's LoadFinished (or equivalent) callback is issued
+// < Entity NOT available from catch-up enumeration
+// < Entity's UnloadStarted (or equivalent) callback is issued
+//
+// These helpers avoid duplicate code in the ProfilerModuleEnum implementation, and will
+// also help avoid future duplicate code should we decide to provide more catch-up
+// enumerations for attaching profilers to find currently loaded AppDomains, Classes,
+// etc.
+//
+// Note: The debugging API has similar requirements around which entities at which stage
+// of loading are permitted to be enumerated over. See code:IDacDbiInterface#Enumeration
+// for debugger details. Note that profapi's needs are not exactly the same. For example,
+// Assemblies appear in the debugging API enumerations as soon as they begin to load,
+// whereas Assemblies (like all other entities) appear in the profiling API enumerations
+// once their load is complete (i.e., just before AssemblyLoadFinished). Also,
+// debuggers enumerate DomainModules and DomainAssemblies, whereas profilers enumerate
+// Modules and Assemblies.
+//
+// For information about other synchronization issues with profiler catch-up, see
+// code:ProfilingAPIUtility::LoadProfiler#ProfCatchUpSynchronization
+//
+// ---------------------------------------------------------------------------------------
+
+
+//---------------------------------------------------------------------------------------
+//
+// Iterates through exactly those AppDomains that should be visible to the profiler, and
+// calls a caller-supplied function to operate on each iterated AppDomain
+//
+// Arguments:
+// * callbackObj - Caller-supplied object containing the callback method to call for
+// each AppDomain
+// * callbackMethod - Caller-supplied method to call for each AppDomain. If this
+// method returns anything other than S_OK, then the iteration is aborted, and
+// callbackMethod's return value is returned to our caller.
+//
+
+template<typename CallbackObject>
+HRESULT IterateAppDomains(CallbackObject * callbackObj,
+ HRESULT (CallbackObject:: * callbackMethod)(AppDomain *))
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ // (See comments in code:ProfToEEInterfaceImpl::EnumModules for info about contracts.)
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ // #ProfilerEnumAppDomains (See also code:#ProfilerEnumGeneral)
+ //
+ // When enumerating AppDomains, ensure this timeline:
+ // AD available in catch-up enumeration
+ // < AppDomainCreationFinished issued
+ // < AD NOT available from catch-up enumeration
+ // < AppDomainShutdownStarted issued
+ //
+ // The AppDomainIterator constructor parameter m_bActive is set to be TRUE below,
+ // meaning only AppDomains in the range [STAGE_ACTIVE;STAGE_CLOSED) will be included
+ // in the iteration.
+ // * AppDomainCreationFinished (with S_OK hrStatus) is issued once the AppDomain
+ // reaches STAGE_ACTIVE.
+ // * AppDomainShutdownStarted is issued while the AppDomain is in STAGE_EXITED,
+ // just before it hits STAGE_FINALIZING. (STAGE_EXITED < STAGE_CLOSED)
+ // * To prevent AppDomains from appearing in the enumeration after we would have
+ // sent the AppDomainShutdownStarted event for them, we must add an
+ // additional check in the enumeration loop to exclude ADs such that
+ // pAppDomain->IsUnloading() (i.e., > STAGE_UNLOAD_REQUESTED). Thus, for an
+ // AD for which AppDomainShutdownStarted callback is issued, we have AD >=
+ // STAGE_EXITED > STAGE_UNLOAD_REQUESTED, and thus, that AD will be excluded
+ // by the pAppDomain->IsUnloading() check.
+ AppDomainIterator appDomainIterator(TRUE);
+ while (appDomainIterator.Next())
+ {
+ AppDomain * pAppDomain = appDomainIterator.GetDomain();
+ if (pAppDomain->IsUnloading())
+ {
+ // Must skip app domains that are in the process of unloading, to ensure
+ // the rules around which entities the profiler should find in the
+ // enumeration. See code:#ProfilerEnumAppDomains for details.
+ continue;
+ }
+
+ // Of course, the AD could start unloading here, but if it does we're guaranteed
+ // the profiler has had a chance to see the Unload callback for the AD, and thus
+ // the profiler can block in that callback until it's done with the enumerator
+ // we provide.
+
+ // Call user-supplied callback, and cancel iteration if requested
+ HRESULT hr = (callbackObj->*callbackMethod)(pAppDomain);
+ if (hr != S_OK)
+ {
+ return hr;
+ }
+ }
+
+ return S_OK;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Iterates through exactly those Modules that should be visible to the profiler, and
+// calls a caller-supplied function to operate on each iterated Module. Any module that
+// is loaded domain-neutral is skipped.
+//
+// Arguments:
+// * pAppDomain - Only unshared modules loaded into this AppDomain will be iterated
+// * callbackObj - Caller-supplied object containing the callback method to call for
+// each Module
+// * callbackMethod - Caller-supplied method to call for each Module. If this
+// method returns anything other than S_OK, then the iteration is aborted, and
+// callbackMethod's return value is returned to our caller.
+//
+// Notes:
+// * In theory, this could be broken down into an unshared assembly iterator that
+// takes a callback, and an unshared module iterator (based on an input
+// assembly) that takes a callback. But that kind of granularity is unnecessary
+// now, and probably not useful in the future. If that turns out to be wrong,
+// this can still be broken down that way later on.
+//
+
+template<typename CallbackObject>
+HRESULT IterateUnsharedModules(AppDomain * pAppDomain,
+ CallbackObject * callbackObj,
+ HRESULT (CallbackObject:: * callbackMethod)(Module *))
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ // #ProfilerEnumAssemblies (See also code:#ProfilerEnumGeneral)
+ //
+ // When enumerating assemblies, ensure this timeline:
+ // Assembly available in catch-up enumeration
+ // < AssemblyLoadFinished issued
+ // < Assembly NOT available from catch-up enumeration
+ // < AssemblyUnloadStarted issued
+ //
+ // The IterateAssembliesEx parameter below ensures we will only include assemblies at
+ // load level >= FILE_LOAD_LOADLIBRARY.
+ // * AssemblyLoadFinished is issued once the Assembly reaches
+ // code:FILE_LOAD_LOADLIBRARY
+ // * AssemblyUnloadStarted is issued as a result of either:
+ // * AppDomain unloading. In this case such assemblies / modules would be
+ // excluded by the AD iterator above, because it excludes ADs if
+ // pAppDomain->IsUnloading()
+ // * Collectible assemblies unloading. Such assemblies will no longer be
+ // enumerable.
+ //
+ // Note: To determine what happens in a given load stage of a module or assembly,
+ // look at the switch statement in code:DomainFile::DoIncrementalLoad, and keep in
+ // mind that it takes cases on the *next* load stage; in other words, the actions
+ // that appear in a case for a given load stage are actually executed as we attempt
+ // to transition TO that load stage, and thus they actually execute while the module
+ // / assembly is still in the previous load stage.
+ //
+ // Note that the CLR may issue ModuleLoadFinished / AssemblyLoadFinished later, at
+ // FILE_LOAD_EAGER_FIXUPS stage, if for some reason MLF/ALF hadn't been issued
+ // earlier during FILE_LOAD_LOADLIBRARY. This does not affect the timeline, as either
+ // way the profiler receives the notification AFTER the assembly would appear in the
+ // enumeration.
+ //
+ // Although it's called an "AssemblyIterator", it actually iterates over
+ // DomainAssembly instances.
+ AppDomain::AssemblyIterator domainAssemblyIterator =
+ pAppDomain->IterateAssembliesEx(
+ (AssemblyIterationFlags) (kIncludeAvailableToProfilers | kIncludeExecution | kIncludeIntrospection));
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+
+ while (domainAssemblyIterator.Next(pDomainAssembly.This()))
+ {
+ _ASSERTE(pDomainAssembly != NULL);
+ _ASSERTE(pDomainAssembly->GetAssembly() != NULL);
+
+ // We're only adding unshared assemblies / modules
+ if (pDomainAssembly->GetAssembly()->IsDomainNeutral())
+ {
+ continue;
+ }
+
+ // #ProfilerEnumModules (See also code:#ProfilerEnumGeneral)
+ //
+ // When enumerating modules, ensure this timeline:
+ // Module available in catch-up enumeration
+ // < ModuleLoadFinished issued
+ // < Module NOT available from catch-up enumeration
+ // < ModuleUnloadStarted issued
+ //
+ // The IterateModules parameter below ensures only modules at level >=
+ // code:FILE_LOAD_LOADLIBRARY will be included in the iteration.
+ //
+ // Details for module callbacks are the same as those for assemblies, so see
+ // code:#ProfilerEnumAssemblies for info on how the timing works.
+ DomainModuleIterator domainModuleIterator =
+ pDomainAssembly->IterateModules(kModIterIncludeAvailableToProfilers);
+ while (domainModuleIterator.Next())
+ {
+ // Call user-supplied callback, and cancel iteration if requested
+ HRESULT hr = (callbackObj->*callbackMethod)(domainModuleIterator.GetModule());
+ if (hr != S_OK)
+ {
+ return hr;
+ }
+ }
+ }
+
+ return S_OK;
+}
+
+//---------------------------------------------------------------------------------------
+// ProfilerModuleEnum implementation
+//---------------------------------------------------------------------------------------
+
+
+//---------------------------------------------------------------------------------------
+// This is a helper class used by ProfilerModuleEnum when determining which shared
+// modules should be added to the enumerator. See code:ProfilerModuleEnum::Init for how
+// this gets used
+
+class IterateAppDomainsForSharedModule
+{
+public:
+ IterateAppDomainsForSharedModule(CDynArray< ModuleID > * pElements, Module * pModule)
+ : m_pElements(pElements), m_pModule(pModule)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ //---------------------------------------------------------------------------------------
+ // Callback passed to IterateAppDomains, that takes the currently iterated AppDomain,
+ // and adds m_pModule to the enumerator if it's loaded into the AppDomain. See
+ // code:ProfilerModuleEnum::Init for how this gets used.
+ //
+ // Arguments:
+ // * pAppDomain - Current AppDomain being iterated.
+ //
+ // Return Value:
+ // * S_OK = the iterator should continue after we return.
+ // * S_FALSE = we verified m_pModule is loaded into this AppDomain, so no need
+ // for the iterator to continue with the next AppDomain
+ // * error indicating a failure
+ //
+ HRESULT AddSharedModuleForAppDomain(AppDomain * pAppDomain)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ DomainFile * pDomainFile = m_pModule->FindDomainFile(pAppDomain);
+ if ((pDomainFile == NULL) || !pDomainFile->IsAvailableToProfilers())
+ {
+ // This AD doesn't contain a fully loaded DomainFile for m_pModule. So continue
+ // iterating with the next AD
+ return S_OK;
+ }
+
+ ModuleID * pElement = m_pElements->Append();
+ if (pElement == NULL)
+ {
+ // Stop iteration with error
+ return E_OUTOFMEMORY;
+ }
+
+ // If we're here, we found a fully loaded DomainFile for m_pModule. So add
+ // m_pModule to our array, and no need to look at other other ADs for this
+ // m_pModule.
+ *pElement = (ModuleID) m_pModule;
+ return S_FALSE;
+ }
+
+private:
+ // List of ModuleIDs in the enumerator we're building
+ CDynArray< ModuleID > * m_pElements;
+
+ // Shared Module we're testing for load status in the iterated ADs.
+ Module * m_pModule;
+};
+
+
+//---------------------------------------------------------------------------------------
+//
+// Callback passed to IterateAppDomains, that takes the currently iterated AppDomain,
+// and then iterates through the unshared modules loaded into that AD. See
+// code:ProfilerModuleEnum::Init for how this gets used.
+//
+// Arguments:
+// * pAppDomain - Current AppDomain being iterated.
+//
+// Return Value:
+// * S_OK = the iterator should continue after we return.
+// * S_FALSE = we verified m_pModule is loaded into this AppDomain, so no need
+// for the iterator to continue with the next AppDomain
+// * error indicating a failure
+//
+
+HRESULT ProfilerModuleEnum::AddUnsharedModulesFromAppDomain(AppDomain * pAppDomain)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ return IterateUnsharedModules<ProfilerModuleEnum>(
+ pAppDomain,
+ this,
+ &ProfilerModuleEnum::AddUnsharedModule);
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Callback passed to IterateUnsharedModules, that takes the currently iterated unshared
+// Module, and adds it to the enumerator. See code:ProfilerModuleEnum::Init for how this
+// gets used.
+//
+// Arguments:
+// * pModule - Current Module being iterated.
+//
+// Return Value:
+// * S_OK = the iterator should continue after we return.
+// * error indicating a failure
+//
+HRESULT ProfilerModuleEnum::AddUnsharedModule(Module * pModule)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ ModuleID * pElement = m_elements.Append();
+ if (pElement == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ *pElement = (ModuleID) pModule;
+ return S_OK;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Populate the module enumerator that's about to be given to the profiler. This is
+// called from the ICorProfilerInfo3::EnumModules implementation.
+//
+// This code controls how the above iterator helpers and callbacks are used, so you might
+// want to look here first to understand how how the helpers and callbacks are used.
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+HRESULT ProfilerModuleEnum::Init()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ // (See comments in code:ProfToEEInterfaceImpl::EnumModules for info about contracts.)
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // When an assembly or module is loaded into an AppDomain, a separate DomainFile is
+ // created (one per pairing of the AppDomain with the module or assembly). This means
+ // that we'll create multiple DomainFiles for the same module if it is loaded
+ // domain-neutral (i.e., "shared"). The profiling API callbacks shield the profiler
+ // from this, and only report a given module the first time it's loaded. So a
+ // profiler sees only one ModuleLoadFinished for a module loaded domain-neutral, even
+ // though the module may be used by multiple AppDomains. The module enumerator must
+ // mirror the behavior of the profiling API callbacks, by avoiding duplicate Modules
+ // in the module list we return to the profiler. So first add unshared modules (non
+ // domain-neutral) to the enumerator, and then separately add any shared modules that
+ // were loaded into at least one AD.
+
+ // First, iterate through all ADs. For each one, call
+ // AddUnsharedModulesFromAppDomain, which iterates through all UNSHARED modules and
+ // adds them to the enumerator.
+ hr = IterateAppDomains<ProfilerModuleEnum>(
+ this,
+ &ProfilerModuleEnum::AddUnsharedModulesFromAppDomain);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ // Next, find all SHARED modules that have a corresponding DomainModule loaded into
+ // at least one AppDomain with a load level high enough that it should be visible to
+ // profilers. For each such shared module, add it once to the enumerator. Note that
+ // enumerating assemblies/modules from the SharedDomain uses different internal CLR
+ // interators than enumerating DomainAssemblies/DomainModules from AppDomains. So we
+ // need to special case the iteration here. We could probably factor the following
+ // into yet more iterator helpers the same way we've already done for the
+ // DomainAssembly/DomainModule iterators above, but it's unclear how useful that
+ // would be.
+ SharedDomain::SharedAssemblyIterator sharedAssemblyIterator;
+ while (sharedAssemblyIterator.Next())
+ {
+ Assembly * pAssembly = sharedAssemblyIterator.GetAssembly();
+ Assembly::ModuleIterator moduleIterator = pAssembly->IterateModules();
+ while (moduleIterator.Next())
+ {
+ Module * pModule = moduleIterator.GetModule();
+
+ // Create an instance of this helper class (IterateAppDomainsForSharedModule)
+ // to remember which Module we're testing. This will be used as our callback
+ // for when we iterate AppDomains trying to find at least one AD that has loaded
+ // pModule enough that pModule would be made visible to profilers.
+ IterateAppDomainsForSharedModule iterateAppDomainsForSharedModule(&m_elements, pModule);
+ hr = IterateAppDomains<IterateAppDomainsForSharedModule>(
+ &iterateAppDomainsForSharedModule,
+ &IterateAppDomainsForSharedModule::AddSharedModuleForAppDomain);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+ }
+ }
+ return S_OK;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Callback passed to IterateAppDomains, that takes the currently iterated AppDomain,
+// and adds it to the enumerator if it has loaded the given module. See
+// code:IterateAppDomainContainingModule::PopulateArray for how this gets used.
+//
+// Arguments:
+// * pAppDomain - Current AppDomain being iterated.
+//
+// Return Value:
+// * S_OK = the iterator should continue after we return.
+// * error indicating a failure
+//
+HRESULT IterateAppDomainContainingModule::AddAppDomainContainingModule(AppDomain * pAppDomain)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ // This method iterates over AppDomains, which adds, then releases, a reference on
+ // each AppDomain iterated. This causes locking, and can cause triggering if the
+ // AppDomain gets destroyed as a result of the release. (See code:AppDomainIterator::Next
+ // and its call to code:AppDomain::Release.)
+ GC_TRIGGERS;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ DomainFile * pDomainFile = m_pModule->FindDomainFile(pAppDomain);
+ if ((pDomainFile != NULL) && (pDomainFile->IsAvailableToProfilers()))
+ {
+ if (m_index < m_cAppDomainIds)
+ {
+ m_rgAppDomainIds[m_index] = reinterpret_cast<AppDomainID>(pAppDomain);
+ }
+
+ m_index++;
+ }
+
+ return S_OK;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Populate the array with AppDomains in which the given module has been loaded
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+HRESULT IterateAppDomainContainingModule::PopulateArray()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ // This method iterates over AppDomains, which adds, then releases, a reference on
+ // each AppDomain iterated. This causes locking, and can cause triggering if the
+ // AppDomain gets destroyed as a result of the release. (See code:AppDomainIterator::Next
+ // and its call to code:AppDomain::Release.)
+ GC_TRIGGERS;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = IterateAppDomains<IterateAppDomainContainingModule>(
+ this,
+ &IterateAppDomainContainingModule::AddAppDomainContainingModule);
+
+ *m_pcAppDomainIds = m_index;
+
+ return hr;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Populate the thread enumerator that's about to be given to the profiler. This is
+// called from the ICorProfilerInfo4::EnumThread implementation.
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+HRESULT ProfilerThreadEnum::Init()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ ThreadStoreLockHolder tsLock;
+
+ Thread * pThread = NULL;
+
+ //
+ // Walk through all the threads with the lock taken
+ // Because the thread enumeration status need to change before the ThreadCreated/ThreadDestroyed
+ // callback, we need to:
+ // 1. Include Thread::TS_FullyInitialized threads for ThreadCreated
+ // 2. Exclude Thread::TS_Dead | Thread::TS_ReportDead for ThreadDestroyed
+ //
+ while((pThread = ThreadStore::GetAllThreadList(
+ pThread,
+ Thread::TS_Dead | Thread::TS_ReportDead | Thread::TS_FullyInitialized,
+ Thread::TS_FullyInitialized
+ )))
+ {
+ if (pThread->IsGCSpecial())
+ continue;
+
+ *m_elements.Append() = (ThreadID) pThread;
+ }
+
+ return S_OK;
+}
+
+
+#endif // PROFILING_SUPPORTED
diff --git a/src/vm/profilingenumerators.h b/src/vm/profilingenumerators.h
new file mode 100644
index 0000000000..5f5d4f487e
--- /dev/null
+++ b/src/vm/profilingenumerators.h
@@ -0,0 +1,531 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// FILE: ProfilingEnumerators.h
+//
+// All enumerators returned by the profiling API to enumerate objects or to catch up on
+// the current CLR state (usually for attaching profilers) are defined in
+// ProfilingEnumerators.h,cpp.
+//
+// This header file contains the base enumerator template implementation, plus the
+// definitions of the derived enumerators.
+//
+
+
+#ifndef __PROFILINGENUMERATORS_H__
+#define __PROFILINGENUMERATORS_H__
+
+
+//---------------------------------------------------------------------------------------
+//
+// ProfilerEnum
+//
+// This class is a one-size-fits-all implementation for COM style enumerators
+//
+// Template parameters:
+// EnumInterface -- the parent interface for this enumerator
+// (e.g., ICorProfilerObjectEnum)
+// Element -- the type of the objects this enumerator returns.
+//
+// pEnumInterfaceIID -- pointer to the class ID for this interface
+// (you probably don't need to use this)
+//
+//
+template< typename EnumInterface, typename Element, const IID* pEnumInterfaceIID = &__uuidof(EnumInterface) >
+class ProfilerEnum : public EnumInterface
+{
+public:
+ ProfilerEnum(CDynArray< Element >* elements);
+ ProfilerEnum();
+ ~ProfilerEnum();
+
+ // IUnknown functions
+
+ virtual HRESULT __stdcall QueryInterface(REFIID id, void** pInterface);
+ virtual ULONG __stdcall AddRef();
+ virtual ULONG __stdcall Release();
+
+
+ // This template assumes that the enumerator confors to the interface
+ //
+ // (this matches the IEnumXXX interface documented in MSDN)
+
+ virtual HRESULT __stdcall Skip(ULONG count);
+ virtual HRESULT __stdcall Reset();
+ virtual HRESULT __stdcall Clone(EnumInterface** ppEnum);
+ virtual HRESULT __stdcall GetCount(ULONG *count);
+ virtual HRESULT __stdcall Next(ULONG count,
+ Element elements[],
+ ULONG* elementsFetched);
+
+
+protected:
+ ULONG m_currentElement;
+
+ CDynArray< Element > m_elements;
+
+ LONG m_refCount;
+};
+
+//
+//
+// ProfilerEnum implementation
+//
+//
+
+
+//
+// ProfilerEnum::ProfilerEnum
+//
+// Description
+// The enumerator constructor
+//
+// Parameters
+// elements -- the array of elements in the enumeration.
+//
+// Notes
+// The enumerator does NOT take ownership of data in the array of elements;
+// it maintains its own private copy.
+//
+// <TODO>
+// nickbe 12/12/2003 11:31:34
+//
+// If someone comes back and complains that the enumerators are too slow or use
+// too much memory, I can reference count or otherwise garbage collect the data
+// used by the enumerators
+// </TODO>
+//
+//
+template< typename EnumInterface, typename Element, const IID* pEnumInterfaceIID >
+ProfilerEnum< EnumInterface, Element, pEnumInterfaceIID >::ProfilerEnum(CDynArray< Element >* elements) :
+ m_currentElement(0),
+ m_refCount(1)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ const ULONG count = elements->Count();
+ m_elements.AllocateBlockThrowing(count);
+
+ for (ULONG i = 0; i < count; ++i)
+ {
+ m_elements[i] = (*elements)[i];
+ }
+}
+
+template< typename EnumInterface, typename Element, const IID* pEnumInterfaceIID >
+ProfilerEnum< EnumInterface, Element, pEnumInterfaceIID >::ProfilerEnum() :
+ m_currentElement(0),
+ m_refCount(1)
+{
+}
+
+
+//
+// ProfilerEnum::ProfileEnum
+//
+// Description
+// Destructor for enumerators
+//
+// Parameters
+// None
+//
+// Returns
+// None
+//
+template< typename EnumInterface, typename Element, const IID* pEnumInterfaceIID >
+ProfilerEnum< EnumInterface, Element, pEnumInterfaceIID >::~ProfilerEnum()
+{
+}
+
+//
+// ProfilerEnum::QueryInterface
+//
+// Description
+// dynamically cast this object to a specific interface.
+//
+// Parameters
+// id -- the interface ID requested
+// ppInterface -- [out] pointer to the appropriate interface
+//
+// Returns
+// S_OK -- if the QueryInterface succeeded
+// E_NOINTERFACE -- if the enumerator does not implement the requested interface
+//
+
+template< typename EnumInterface, typename Element, const IID* pEnumInterfaceIID >
+HRESULT
+ProfilerEnum< EnumInterface, Element, pEnumInterfaceIID >::QueryInterface(REFIID id, void** pInterface)
+{
+ if (*pEnumInterfaceIID == id)
+ {
+ *pInterface = static_cast< EnumInterface* >(this);
+ }
+ else if (IID_IUnknown == id)
+ {
+ *pInterface = static_cast< IUnknown* >(this);
+ }
+ else
+ {
+ *pInterface = NULL;
+ return E_NOINTERFACE;
+ }
+
+ this->AddRef();
+ return S_OK;
+}
+
+template< typename EnumInterface, typename Element, const IID* pEnumInterfaceIID >
+ULONG
+ProfilerEnum< EnumInterface, Element, pEnumInterfaceIID >::AddRef()
+{
+ return InterlockedIncrement(&m_refCount);
+}
+
+template< typename EnumInterface, typename Element, const IID* pEnumInterfaceIID >
+ULONG
+ProfilerEnum< EnumInterface, Element, pEnumInterfaceIID >::Release()
+{
+ ULONG refCount = InterlockedDecrement(&m_refCount);
+
+ if (0 == refCount)
+ {
+ delete this;
+ }
+
+ return refCount;
+}
+
+//
+// ProfilerEnum::Next
+//
+// Description
+// Retrieves elements from the enumeration and advances the enumerator
+//
+// Parameters
+// elementsRequested -- the number of elements to read
+// elements -- [out] an array to store the retrieved elements
+// elementsFetched -- [out] the number of elements actually retrieved
+//
+//
+// Returns
+// S_OK -- elementedRequested was fully satisfied
+// S_FALSE -- less than elementsRequested were returned
+// E_INVALIDARG
+//
+// Notes
+// if elementsRequested is 1 and elementsFetched is NULL, the enumerator will
+// try to advance 1 item and return S_OK if it is successful
+//
+
+template< typename EnumInterface, typename Element, const IID* pEnumInterfaceIID >
+HRESULT
+ProfilerEnum< EnumInterface, Element, pEnumInterfaceIID >::Next(ULONG elementsRequested,
+ Element elements[],
+ ULONG* elementsFetched)
+{
+ // sanity check the location of the iterator
+ _ASSERTE(0 <= m_currentElement);
+ _ASSERTE(m_currentElement <= static_cast< ULONG >(m_elements.Count()));
+
+ // It's illegal to try and advance more than one element without giving a
+ // legitimate pointer for elementsRequested
+ if ((NULL == elementsFetched) && (1 < elementsRequested))
+ {
+ return E_INVALIDARG;
+ }
+
+ // If, for some reason, you ask for zero elements, well, we'll just tell
+ // you that's fine.
+ if (0 == elementsRequested)
+ {
+ if (NULL != elementsFetched)
+ {
+ *elementsFetched = 0;
+ }
+
+ return S_OK;
+ }
+
+ if (elements == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ // okay, enough with the corner cases.
+
+ // We don't want to walk past the end of our array, so figure out how far we
+ // need to walk.
+ const ULONG elementsToCopy = min(elementsRequested, m_elements.Count() - m_currentElement);
+
+ for (ULONG i = 0; i < elementsToCopy; ++i)
+ {
+ elements[i] = m_elements[m_currentElement + i];
+ }
+
+ // advance the enumerator
+ m_currentElement += elementsToCopy;
+
+ // sanity check that we haven't gone any further than we were supposed to
+ _ASSERTE(0 <= m_currentElement);
+ _ASSERTE(m_currentElement <= static_cast< ULONG >(m_elements.Count()));
+
+
+ if (NULL != elementsFetched)
+ {
+ *elementsFetched = elementsToCopy;
+ }
+
+ if (elementsToCopy < elementsRequested)
+ {
+ return S_FALSE;
+ }
+
+ return S_OK;
+}
+
+
+//
+// ProfilerEnum:GetCount
+//
+// Description
+// Computes the number of elements remaining in the enumeration
+//
+// Parameters
+// count -- [out] the number of element remaining in the enumeration
+//
+// Returns
+// S_OK
+// E_INVALIDARG -- if count is an invalid pointer
+//
+//
+
+template< typename EnumInterface, typename Element, const IID* pEnumInterfaceIID >
+HRESULT
+ProfilerEnum< EnumInterface, Element, pEnumInterfaceIID >::GetCount(ULONG* count)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ if (NULL == count)
+ {
+ return E_INVALIDARG;
+ }
+
+ *count = m_elements.Count() - m_currentElement;
+
+ return S_OK;
+}
+
+//
+// ProfilerEnum::Skip
+//
+// Description
+// Advances the enumerator without retrieving any elements.
+//
+// Parameters
+// count -- number of elements to skip
+//
+// Returns
+// S_OK -- if the number of elements skipped was equal to count
+// S_FALSE -- if the number of elements skipped was less than count
+//
+//
+// TODO
+//
+// The API for IEnumXXX listed in MSDN here is broken. We should really have an
+// out parameter that represents the number of elements actually skipped ... all
+// though you could theoretically work that number out by calling GetCount()
+// before and after calling Skip()
+//
+//
+template< typename EnumInterface, typename Element, const IID* pEnumInterfaceIID >
+HRESULT
+ProfilerEnum< EnumInterface, Element, pEnumInterfaceIID >::Skip(ULONG count)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ const ULONG elementsToSkip = min(count, m_elements.Count() - m_currentElement);
+ m_currentElement += elementsToSkip;
+
+ if (elementsToSkip < count)
+ {
+ return S_FALSE;
+ }
+
+ return S_OK;
+}
+
+
+
+//
+// ProfilerEnum::Reset
+//
+// Description
+// Returns the enumerator to the beginning of the enumeration
+//
+// Parameters
+// None
+//
+// Returns
+// S_OK -- always (function never fails)
+//
+//
+
+template< typename EnumInterface, typename Element, const IID* pEnumInterfaceIID >
+HRESULT
+ProfilerEnum< EnumInterface, Element, pEnumInterfaceIID >::Reset()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ m_currentElement = 0;
+ return S_OK;
+}
+
+//
+// ProfilerEnum::Clone
+//
+// Description
+// Creates a copy of this enumerator.
+//
+// Parameters
+// None
+//
+// Returns
+// S_OK -- if copying is successful
+// E_OUTOFMEMORY -- if OOM occurs
+// E_INVALIDARG -- if pInterface is an invalid pointer
+//
+
+template< typename EnumInterface, typename Element, const IID* pEnumInterfaceIID >
+HRESULT
+ProfilerEnum< EnumInterface, Element, pEnumInterfaceIID >::Clone(EnumInterface** pInterface)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ if (pInterface == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ *pInterface = new ProfilerEnum< EnumInterface, Element, pEnumInterfaceIID >(&m_elements);
+ }
+ EX_CATCH
+ {
+ *pInterface = NULL;
+ hr = E_OUTOFMEMORY;
+ }
+ EX_END_CATCH(RethrowTerminalExceptions)
+
+ return hr;
+}
+
+// ---------------------------------------------------------------------------------------
+// Enumerators have their base class defined here, as an instantiation of ProfilerEnum
+// ---------------------------------------------------------------------------------------
+
+typedef ProfilerEnum< ICorProfilerObjectEnum, ObjectID > ProfilerObjectEnum;
+typedef ProfilerEnum< ICorProfilerFunctionEnum, COR_PRF_FUNCTION > ProfilerFunctionEnumBase;
+typedef ProfilerEnum< ICorProfilerModuleEnum, ModuleID > ProfilerModuleEnumBase;
+typedef ProfilerEnum< ICorProfilerThreadEnum, ThreadID > ProfilerThreadEnumBase;
+typedef ProfilerEnum< ICorProfilerMethodEnum, COR_PRF_METHOD > ProfilerMethodEnum;
+
+// ---------------------------------------------------------------------------------------
+// This class derives from the template enumerator instantiation, and provides specific
+// code to populate the enumerator with the function list
+
+class ProfilerFunctionEnum : public ProfilerFunctionEnumBase
+{
+public:
+ BOOL Init(BOOL fWithReJITIDs = FALSE);
+};
+
+
+// ---------------------------------------------------------------------------------------
+// This class derives from the template enumerator instantiation, and provides specific
+// code to populate the enumerator with the module list
+
+class ProfilerModuleEnum : public ProfilerModuleEnumBase
+{
+public:
+ HRESULT Init();
+ HRESULT AddUnsharedModulesFromAppDomain(AppDomain * pAppDomain);
+ HRESULT AddUnsharedModule(Module * pModule);
+};
+
+
+class IterateAppDomainContainingModule
+{
+public:
+ IterateAppDomainContainingModule(Module * pModule, ULONG32 cAppDomainIds, ULONG32 * pcAppDomainIds, AppDomainID * pAppDomainIds)
+ : m_pModule(pModule), m_cAppDomainIds(cAppDomainIds), m_pcAppDomainIds(pcAppDomainIds), m_rgAppDomainIds(pAppDomainIds), m_index(0)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE((pModule != NULL) &&
+ ((m_rgAppDomainIds != NULL) || (m_cAppDomainIds == 0)) &&
+ (m_pcAppDomainIds != NULL));
+ }
+
+ HRESULT PopulateArray();
+
+ HRESULT AddAppDomainContainingModule(AppDomain * pAppDomain);
+
+private:
+ Module * m_pModule;
+ ULONG32 m_cAppDomainIds;
+ ULONG32 * m_pcAppDomainIds;
+ AppDomainID * m_rgAppDomainIds;
+ ULONG32 m_index;
+};
+
+
+// ---------------------------------------------------------------------------------------
+// This class derives from the template enumerator instantiation, and provides specific
+// code to populate the enumerator with the thread store
+class ProfilerThreadEnum : public ProfilerThreadEnumBase
+{
+
+public :
+ HRESULT Init();
+};
+
+#endif //__PROFILINGENUMERATORS_H__
diff --git a/src/vm/profilinghelper.cpp b/src/vm/profilinghelper.cpp
new file mode 100644
index 0000000000..1a4f2f4184
--- /dev/null
+++ b/src/vm/profilinghelper.cpp
@@ -0,0 +1,1479 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// ProfilingHelper.cpp
+//
+
+//
+// Implementation of helper classes used for miscellaneous purposes within the profiling
+// API
+//
+// ======================================================================================
+
+//
+// #LoadUnloadCallbackSynchronization
+//
+// There is synchronization around loading profilers, unloading profilers, and issuing
+// callbacks to profilers, to ensure that we know when it's safe to detach profilers or
+// to call into profilers. The synchronization scheme is intentionally lockless on the
+// mainline path (issuing callbacks into the profiler), with heavy locking on the
+// non-mainline path (loading / unloading profilers).
+//
+// PROTECTED DATA
+//
+// The synchronization protects the following data:
+//
+// * ProfilingAPIDetach::s_profilerDetachInfo
+// * (volatile) g_profControlBlock.curProfStatus.m_profStatus
+// * (volatile) g_profControlBlock.pProfInterface
+// * latter implies the profiler DLL's load status is protected as well, as
+// pProfInterface changes between non-NULL and NULL as a profiler DLL is
+// loaded and unloaded, respectively.
+//
+// SYNCHRONIZATION COMPONENTS
+//
+// * Simple Crst: code:ProfilingAPIUtility::s_csStatus
+// * Lockless, volatile per-thread counters: code:EvacuationCounterHolder
+// * Profiler status transition invariants and CPU buffer flushing:
+// code:CurrentProfilerStatus::Set
+//
+// WRITERS
+//
+// The above data is considered to be "written to" when a profiler is loaded or unloaded,
+// or the status changes (see code:ProfilerStatus), or a request to detach the profiler
+// is received (see code:ProfilingAPIDetach::RequestProfilerDetach), or the DetachThread
+// consumes or modifies the contents of code:ProfilingAPIDetach::s_profilerDetachInfo.
+// All these cases are serialized with each other by the simple Crst:
+// code:ProfilingAPIUtility::s_csStatus
+//
+// READERS
+//
+// Readers are the mainline case and are lockless. A "reader" is anyone who wants to
+// issue a profiler callback. Readers are scattered throughout the runtime, and have the
+// following format:
+// {
+// BEGIN_PIN_PROFILER(CORProfilerTrackAppDomainLoads());
+// g_profControlBlock.pProfInterface->AppDomainCreationStarted(MyAppDomainID);
+// END_PIN_PROFILER();
+// }
+// The BEGIN / END macros do the following:
+// * Evaluate the expression argument (e.g., CORProfilerTrackAppDomainLoads()). This is a
+// "dirty read" as the profiler could be detached at any moment during or after that
+// evaluation.
+// * If true, push a code:EvacuationCounterHolder on the stack, which increments the
+// per-thread evacuation counter (not interlocked).
+// * Re-evaluate the expression argument. This time, it's a "clean read" (see below for
+// why).
+// * If still true, execute the statements inside the BEGIN/END block. Inside that block,
+// the profiler is guaranteed to remain loaded, because the evacuation counter
+// remains nonzero (again, see below).
+// * Once the BEGIN/END block is exited, the evacuation counter is decremented, and the
+// profiler is unpinned and allowed to detach.
+//
+// READER / WRITER COORDINATION
+//
+// The above ensures that a reader never touches g_profControlBlock.pProfInterface and
+// all it embodies (including the profiler DLL code and callback implementations) unless
+// the reader was able to increment its thread's evacuation counter AND re-verify that
+// the profiler's status is still active (the status check is included in the macro's
+// expression argument, such as CORProfilerTrackAppDomainLoads()).
+//
+// At the same time, a profiler DLL is never unloaded (nor
+// g_profControlBlock.pProfInterface deleted and NULLed out) UNLESS the writer performs
+// these actions:
+// * (a) Set the profiler's status to a non-active state like kProfStatusDetaching or
+// kProfStatusNone
+// * (b) Call FlushProcessWriteBuffers()
+// * (c) Grab thread store lock, iterate through all threads, and verify each per-thread
+// evacuation counter is zero.
+//
+// The above steps are why it's considered a "clean read" if a reader first increments
+// its evacuation counter and then checks the profiler status. Once the writer flushes
+// the CPU buffers (b), the reader will see the updated status (from a) and know not to
+// use g_profControlBlock.pProfInterface. And if the reader clean-reads the status before
+// the buffers were flushed, then the reader will have incremented its evacuation counter
+// first, which the writer will be sure to see in (c). For more details about how the
+// evacuation counters work, see code:ProfilingAPIDetach::IsProfilerEvacuated.
+//
+// WHEN ARE BEGIN/END_PIN_PROFILER REQUIRED?
+//
+// In general, any time you access g_profControlBlock.pProfInterface, you must be inside
+// a BEGIN/END_PIN_PROFILER block. This is pretty much always true throughout the EE, but
+// there are some exceptions inside the profiling API code itself, where the BEGIN / END
+// macros are unnecessary:
+// * If you are inside a public ICorProfilerInfo function's implementation, the
+// profiler is already pinned. This is because the profiler called the Info
+// function from either:
+// * a callback implemented inside of g_profControlBlock.pProfInterface, in which
+// case the BEGIN/END macros are already in place around the call to that
+// callback, OR
+// * a hijacked thread or a thread of the profiler's own creation. In either
+// case, it's the profiler's responsibility to end hijacking and end its own
+// threads before requesting a detach. So the profiler DLL is guaranteed not
+// to disappear while hijacking or profiler-created threads are in action.
+// * If you're executing while code:ProfilingAPIUtility::s_csStatus is held, then
+// you're explicitly serialized against all code that might unload the profiler's
+// DLL and delete g_profControlBlock.pProfInterface. So the profiler is therefore
+// still guaranteed not to disappear.
+// * If slow ELT helpers, fast ELT hooks, or profiler-instrumented code is on the
+// stack, then the profiler cannot be detached yet anyway. Today, we outright
+// refuse a detach request from a profiler that instrumented code or enabled ELT.
+// Once rejit / revert is implemented, the evacuation checks will ensure all
+// instrumented code (including ELT) are reverted and off all stacks before
+// attempting to unload the profielr.
+
+
+#include "common.h"
+
+#ifdef PROFILING_SUPPORTED
+
+#include "eeprofinterfaces.h"
+#include "eetoprofinterfaceimpl.h"
+#include "eetoprofinterfaceimpl.inl"
+#include "corprof.h"
+#include "proftoeeinterfaceimpl.h"
+#include "proftoeeinterfaceimpl.inl"
+#include "profilinghelper.h"
+#include "profilinghelper.inl"
+#include "eemessagebox.h"
+
+#if defined(FEATURE_PROFAPI_EVENT_LOGGING) && !defined(FEATURE_CORECLR)
+#include <eventmsg.h>
+#endif // FEATURE_PROFAPI_EVENT_LOGGING) && !FEATURE_CORECLR
+
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+#include "profattach.h"
+#include "profdetach.h"
+#endif // FEATURE_PROFAPI_ATTACH_DETACH
+
+#include "utilcode.h"
+
+#ifndef FEATURE_PAL
+#include "securitywrapper.h"
+#endif // !FEATURE_PAL
+
+//---------------------------------------------------------------------------------------
+// Normally, this would go in profilepriv.inl, but it's not easily inlineable because of
+// the use of BEGIN/END_PIN_PROFILER
+//
+// Return Value:
+// TRUE iff security transparency checks in full trust assemblies should be disabled
+// due to the profiler.
+//
+BOOL CORProfilerBypassSecurityChecks()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ {
+ BEGIN_PIN_PROFILER(CORProfilerPresent());
+
+ // V2 profiler binaries, for compatibility purposes, should bypass transparency
+ // checks in full trust assemblies.
+ if (!(&g_profControlBlock)->pProfInterface->IsCallback3Supported())
+ return TRUE;
+
+ // V4 profiler binaries must opt in to bypasssing transparency checks in full trust
+ // assemblies.
+ if (((&g_profControlBlock)->dwEventMask & COR_PRF_DISABLE_TRANSPARENCY_CHECKS_UNDER_FULL_TRUST) != 0)
+ return TRUE;
+
+ END_PIN_PROFILER();
+ }
+
+ // All other cases, including no profiler loaded at all: Don't bypass
+ return FALSE;
+}
+
+// ----------------------------------------------------------------------------
+// CurrentProfilerStatus methods
+
+
+//---------------------------------------------------------------------------------------
+//
+// Updates the value indicating the profiler's current status
+//
+// Arguments:
+// profStatus - New value (from enum ProfilerStatus) to set.
+//
+// Notes:
+// Sets the status under a lock, and performs a debug-only check to verify that the
+// status transition is a legal one. Also performs a FlushStoreBuffers() after
+// changing the status when necessary.
+//
+
+void CurrentProfilerStatus::Set(ProfilerStatus newProfStatus)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(ProfilingAPIUtility::GetStatusCrst() != NULL);
+
+ {
+ // Need to serialize attempts to transition the profiler status. For example, a
+ // profiler in one thread could request a detach, while the CLR in another
+ // thread is transitioning the profiler from kProfStatusInitializing* to
+ // kProfStatusActive
+ CRITSEC_Holder csh(ProfilingAPIUtility::GetStatusCrst());
+
+ // Based on what the old status is, verify the new status is a legal transition.
+ switch(m_profStatus)
+ {
+ default:
+ _ASSERTE(!"Unknown ProfilerStatus");
+ break;
+
+ case kProfStatusNone:
+ _ASSERTE((newProfStatus == kProfStatusInitializingForStartupLoad) ||
+ (newProfStatus == kProfStatusInitializingForAttachLoad));
+ break;
+
+ case kProfStatusDetaching:
+ _ASSERTE(newProfStatus == kProfStatusNone);
+ break;
+
+ case kProfStatusInitializingForStartupLoad:
+ case kProfStatusInitializingForAttachLoad:
+ _ASSERTE((newProfStatus == kProfStatusActive) ||
+ (newProfStatus == kProfStatusNone));
+ break;
+
+ case kProfStatusActive:
+ _ASSERTE((newProfStatus == kProfStatusNone) ||
+ (newProfStatus == kProfStatusDetaching));
+ break;
+ }
+
+ m_profStatus = newProfStatus;
+ }
+
+#if !defined(DACCESS_COMPILE)
+ if (((newProfStatus == kProfStatusNone) ||
+ (newProfStatus == kProfStatusDetaching) ||
+ (newProfStatus == kProfStatusActive)))
+ {
+ // Flush the store buffers on all CPUs, to ensure other threads see that
+ // g_profControlBlock.curProfStatus has changed. The important status changes to
+ // flush are:
+ // * to kProfStatusNone or kProfStatusDetaching so other threads know to stop
+ // making calls into the profiler
+ // * to kProfStatusActive, to ensure callbacks can be issued by the time an
+ // attaching profiler receives ProfilerAttachComplete(), so the profiler
+ // can safely perform catchup at that time (see
+ // code:#ProfCatchUpSynchronization).
+ //
+ ::FlushProcessWriteBuffers();
+ }
+#endif // !defined(DACCESS_COMPILE)
+}
+
+
+//---------------------------------------------------------------------------------------
+// ProfilingAPIUtility members
+
+
+// See code:#LoadUnloadCallbackSynchronization.
+CRITSEC_COOKIE ProfilingAPIUtility::s_csStatus = NULL;
+
+#ifndef FEATURE_PAL
+
+SidBuffer * ProfilingAPIUtility::s_pSidBuffer = NULL;
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIUtility::AppendSupplementaryInformation
+//
+// Description:
+// Helper to the event logging functions to append the process ID and string
+// resource ID to the end of the message.
+//
+// Arguments:
+// * iStringResource - [in] String resource ID to append to message.
+// * pString - [in/out] On input, the string to log so far. On output, the original
+// string with the process ID info appended.
+//
+
+// static
+void ProfilingAPIUtility::AppendSupplementaryInformation(int iStringResource, SString * pString)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ // This loads resource strings, which takes locks.
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ StackSString supplementaryInformation;
+
+ if (!supplementaryInformation.LoadResource(
+ CCompRC::Debugging,
+ IDS_PROF_SUPPLEMENTARY_INFO
+ ))
+ {
+ // Resource not found; should never happen.
+ return;
+ }
+
+ pString->Append(W(" "));
+ pString->AppendPrintf(
+ supplementaryInformation,
+ GetCurrentProcessId(),
+ iStringResource);
+}
+
+#endif // !FEATURE_PAL
+
+//---------------------------------------------------------------------------------------
+//
+// Helper function to log publicly-viewable errors about profiler loading and
+// initialization.
+//
+//
+// Arguments:
+// * iStringResourceID - resource ID of string containing message to log
+// * wEventType - same constant used in win32 to specify the type of event:
+// usually EVENTLOG_ERROR_TYPE, EVENTLOG_WARNING_TYPE, or
+// EVENTLOG_INFORMATION_TYPE
+// * insertionArgs - 0 or more values to be inserted into the string to be logged
+// (>0 only if iStringResourceID contains format arguments (%)).
+//
+
+// static
+void ProfilingAPIUtility::LogProfEventVA(
+ int iStringResourceID,
+ WORD wEventType,
+ va_list insertionArgs)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ // This loads resource strings, which takes locks.
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_PROFAPI_EVENT_LOGGING
+
+
+ // Rotor messages go to message boxes
+
+ EEMessageBoxCatastrophic(
+ iStringResourceID, // Text message to display
+ IDS_EE_PROFILING_FAILURE, // Titlebar of message box
+ insertionArgs); // Insertion strings for text message
+
+#else // FEATURE_PROFAPI_EVENT_LOGGING
+
+ // Non-rotor messages go to the event log
+
+ StackSString messageFromResource;
+ StackSString messageToLog;
+
+ if (!messageFromResource.LoadResource(
+ CCompRC::Debugging,
+ iStringResourceID
+ ))
+ {
+ // Resource not found; should never happen.
+ return;
+ }
+
+ messageToLog.VPrintf(messageFromResource, insertionArgs);
+
+ AppendSupplementaryInformation(iStringResourceID, &messageToLog);
+
+#if defined(FEATURE_CORECLR)
+ // CoreCLR on Windows ouputs debug strings for diagnostic messages.
+ WszOutputDebugString(messageToLog);
+#else
+ // Get the user SID for the current process, so it can be provided to the event
+ // logging API, which will then fill out the "User" field in the event log entry. If
+ // this fails, that's not fatal. We can just pass NULL for the PSID, and the "User"
+ // field will be left blank.
+ PSID psid = NULL;
+ HRESULT hr = GetCurrentProcessUserSid(&psid);
+ if (FAILED(hr))
+ {
+ // No biggie. Just pass in a NULL psid, and the User field will be empty
+ _ASSERTE(psid == NULL);
+ }
+
+ // On desktop CLR builds, the profiling API uses the event log for end-user-friendly
+ // diagnostic messages.
+ ReportEventCLR(wEventType, // wType
+ 0, // wCategory
+ COR_Profiler, // dwEventID
+ psid, // lpUserSid
+ &messageToLog); // uh duh
+#endif // FEATURE_CORECLR
+
+#endif // FEATURE_PROFAPI_EVENT_LOGGING
+}
+
+// See code:ProfilingAPIUtility.LogProfEventVA for description of arguments.
+// static
+void ProfilingAPIUtility::LogProfError(int iStringResourceID, ...)
+{
+ CONTRACTL
+{
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ // This loads resource strings, which takes locks.
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ va_list insertionArgs;
+ va_start(insertionArgs, iStringResourceID);
+ LogProfEventVA(
+ iStringResourceID,
+ EVENTLOG_ERROR_TYPE,
+ insertionArgs);
+ va_end(insertionArgs);
+}
+
+// See code:ProfilingAPIUtility.LogProfEventVA for description of arguments.
+// static
+void ProfilingAPIUtility::LogProfInfo(int iStringResourceID, ...)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ // This loads resource strings, which takes locks.
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+// Rotor uses message boxes instead of event log, and it would be disruptive to
+// pop a messagebox in the user's face every time an app runs with a profiler
+// configured to load. So only log this only when we don't do a pop-up.
+#ifdef FEATURE_PROFAPI_EVENT_LOGGING
+ va_list insertionArgs;
+ va_start(insertionArgs, iStringResourceID);
+ LogProfEventVA(
+ iStringResourceID,
+ EVENTLOG_INFORMATION_TYPE,
+ insertionArgs);
+ va_end(insertionArgs);
+#endif //FEATURE_PROFAPI_EVENT_LOGGING
+}
+
+#ifdef PROF_TEST_ONLY_FORCE_ELT
+// Special forward-declarations of the profiling API's slow-path enter/leave/tailcall
+// hooks. These need to be forward-declared here so that they may be referenced in
+// InitializeProfiling() below solely for the debug-only, test-only code to allow
+// enter/leave/tailcall to be turned on at startup without a profiler. See
+// code:ProfControlBlock#TestOnlyELT
+EXTERN_C void __stdcall ProfileEnterNaked(UINT_PTR clientData);
+EXTERN_C void __stdcall ProfileLeaveNaked(UINT_PTR clientData);
+EXTERN_C void __stdcall ProfileTailcallNaked(UINT_PTR clientData);
+#endif //PROF_TEST_ONLY_FORCE_ELT
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIUtility::InitializeProfiling
+//
+// This is the top-most level of profiling API initialization, and is called directly by
+// EEStartupHelper() (in ceemain.cpp). This initializes internal structures relating to the
+// Profiling API. This also orchestrates loading the profiler and initializing it (if
+// its GUID is specified in the environment).
+//
+// Return Value:
+// HRESULT indicating success or failure. This is generally very lenient about internal
+// failures, as we don't want them to prevent the startup of the app:
+// S_OK = Environment didn't request a profiler, or
+// Environment did request a profiler, and it was loaded successfully
+// S_FALSE = There was a problem loading the profiler, but that shouldn't prevent the app
+// from starting up
+// else (failure) = There was a serious problem that should be dealt with by the caller
+//
+// Notes:
+// This function (or one of its callees) will log an error to the event log
+// if there is a failure
+//
+// Assumptions:
+// InitializeProfiling is called during startup, AFTER the host has initialized its
+// settings and the config variables have been read, but BEFORE the finalizer thread
+// has entered its first wait state. ASSERTs are placed in
+// code:ProfilingAPIAttachDetach::Initialize (which is called by this function, and
+// which depends on these assumptions) to verify.
+
+// static
+HRESULT ProfilingAPIUtility::InitializeProfiling()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+
+ // This causes events to be logged, which loads resource strings,
+ // which takes locks.
+ CAN_TAKE_LOCK;
+
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ InitializeLogging();
+
+ // NULL out / initialize members of the global profapi structure
+ g_profControlBlock.Init();
+
+ if (IsCompilationProcess())
+ {
+ LOG((LF_CORPROF, LL_INFO10, "**PROF: Profiling disabled for ngen process.\n"));
+ return S_OK;
+ }
+
+ AttemptLoadProfilerForStartup();
+ // For now, the return value from AttemptLoadProfilerForStartup is of no use to us.
+ // Any event has been logged already by AttemptLoadProfilerForStartup, and
+ // regardless of whether a profiler got loaded, we still need to continue.
+
+
+#ifdef PROF_TEST_ONLY_FORCE_ELT
+ // Test-only, debug-only code to enable ELT on startup regardless of whether a
+ // startup profiler is loaded. See code:ProfControlBlock#TestOnlyELT.
+ DWORD dwEnableSlowELTHooks = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_TestOnlyEnableSlowELTHooks);
+ if (dwEnableSlowELTHooks != 0)
+ {
+ (&g_profControlBlock)->fTestOnlyForceEnterLeave = TRUE;
+ SetJitHelperFunction(CORINFO_HELP_PROF_FCN_ENTER, (void *) ProfileEnterNaked);
+ SetJitHelperFunction(CORINFO_HELP_PROF_FCN_LEAVE, (void *) ProfileLeaveNaked);
+ SetJitHelperFunction(CORINFO_HELP_PROF_FCN_TAILCALL, (void *) ProfileTailcallNaked);
+ LOG((LF_CORPROF, LL_INFO10, "**PROF: Enabled test-only slow ELT hooks.\n"));
+ }
+#endif //PROF_TEST_ONLY_FORCE_ELT
+
+#ifdef PROF_TEST_ONLY_FORCE_OBJECT_ALLOCATED
+ // Test-only, debug-only code to enable ObjectAllocated callbacks on startup regardless of whether a
+ // startup profiler is loaded. See code:ProfControlBlock#TestOnlyObjectAllocated.
+ DWORD dwEnableObjectAllocated = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_TestOnlyEnableObjectAllocatedHook);
+ if (dwEnableObjectAllocated != 0)
+ {
+ (&g_profControlBlock)->fTestOnlyForceObjectAllocated = TRUE;
+ LOG((LF_CORPROF, LL_INFO10, "**PROF: Enabled test-only object ObjectAllocated hooks.\n"));
+ }
+#endif //PROF_TEST_ONLY_FORCE_ELT
+
+
+#ifdef _DEBUG
+ // Test-only, debug-only code to allow attaching profilers to call ICorProfilerInfo inteface,
+ // which would otherwise be disallowed for attaching profilers
+ DWORD dwTestOnlyEnableICorProfilerInfo = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_TestOnlyEnableICorProfilerInfo);
+ if (dwTestOnlyEnableICorProfilerInfo != 0)
+ {
+ (&g_profControlBlock)->fTestOnlyEnableICorProfilerInfo = TRUE;
+ }
+#endif // _DEBUG
+
+ return S_OK;
+}
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIUtility::ProfilerCLSIDFromString
+//
+// Description:
+// Takes a string form of a CLSID (or progid, believe it or not), and returns the
+// corresponding CLSID structure.
+//
+// Arguments:
+// * wszClsid - [in / out] CLSID string to convert. This may also be a progid. This
+// ensures our behavior is backward-compatible with previous CLR versions. I don't
+// know why previous versions allowed the user to set a progid in the environment,
+// but well whatever. On [out], this string is normalized in-place (e.g.,
+// double-quotes around progid are removed).
+// * pClsid - [out] CLSID structure corresponding to wszClsid
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+// Notes:
+// * An event is logged if there is a failure.
+//
+
+// static
+HRESULT ProfilingAPIUtility::ProfilerCLSIDFromString(
+ __inout_z LPWSTR wszClsid,
+ CLSID * pClsid)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+
+ // This causes events to be logged, which loads resource strings,
+ // which takes locks.
+ CAN_TAKE_LOCK;
+
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(wszClsid != NULL);
+ _ASSERTE(pClsid != NULL);
+
+ HRESULT hr;
+
+ // Translate the string into a CLSID
+ if (*wszClsid == W('{'))
+ {
+ hr = IIDFromString(wszClsid, pClsid);
+ }
+ else
+ {
+#ifndef FEATURE_PAL
+ WCHAR *szFrom, *szTo;
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:26000) // "espX thinks there is an overflow here, but there isn't any"
+#endif
+ for (szFrom=szTo=wszClsid; *szFrom; )
+ {
+ if (*szFrom == W('"'))
+ {
+ ++szFrom;
+ continue;
+ }
+ *szTo++ = *szFrom++;
+ }
+ *szTo = 0;
+ hr = CLSIDFromProgID(wszClsid, pClsid);
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif /*_PREFAST_*/
+
+#else // !FEATURE_PAL
+ // ProgID not supported on FEATURE_PAL
+ hr = E_INVALIDARG;
+#endif // !FEATURE_PAL
+ }
+
+ if (FAILED(hr))
+ {
+ LOG((
+ LF_CORPROF,
+ LL_INFO10,
+ "**PROF: Invalid CLSID or ProgID (%S). hr=0x%x.\n",
+ wszClsid,
+ hr));
+ ProfilingAPIUtility::LogProfError(IDS_E_PROF_BAD_CLSID, wszClsid, hr);
+ return hr;
+ }
+
+ return S_OK;
+}
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIUtility::AttemptLoadProfilerForStartup
+//
+// Description:
+// Checks environment or registry to see if the app is configured to run with a
+// profiler loaded on startup. If so, this calls LoadProfiler() to load it up.
+//
+// Arguments:
+//
+// Return Value:
+// * S_OK: Startup-profiler has been loaded
+// * S_FALSE: No profiler is configured for startup load
+// * else, HRESULT indicating failure that occurred
+//
+// Assumptions:
+// * This should be called on startup, after g_profControlBlock is initialized, but
+// before any attach infrastructure is initialized. This ensures we don't receive
+// an attach request while startup-loading a profiler.
+//
+// Notes:
+// * This or its callees will ensure an event is logged on failure (though will be
+// silent if no profiler is configured for startup load (which causes S_FALSE to
+// be returned)
+//
+
+// static
+HRESULT ProfilingAPIUtility::AttemptLoadProfilerForStartup()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+
+ // This causes events to be logged, which loads resource strings,
+ // which takes locks.
+ CAN_TAKE_LOCK;
+
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+
+ // Find out if profiling is enabled
+ DWORD fProfEnabled = 0;
+
+#ifdef FEATURE_CORECLR
+ fProfEnabled = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_ENABLE_PROFILING);
+#else //FEATURE_CORECLR
+ fProfEnabled = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_COR_ENABLE_PROFILING);
+#endif //FEATURE_CORECLR
+
+ // If profiling is not enabled, return.
+ if (fProfEnabled == 0)
+ {
+ LOG((LF_CORPROF, LL_INFO10, "**PROF: Profiling not enabled.\n"));
+ return S_FALSE;
+ }
+
+ LOG((LF_CORPROF, LL_INFO10, "**PROF: Initializing Profiling Services.\n"));
+
+ // Get the CLSID of the profiler to CoCreate
+ NewArrayHolder<WCHAR> wszClsid(NULL);
+ NewArrayHolder<WCHAR> wszProfilerDLL(NULL);
+
+#ifdef FEATURE_CORECLR
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_PROFILER, &wszClsid));
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_CORECLR_PROFILER_PATH, &wszProfilerDLL));
+#else // FEATURE_CORECLR
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_COR_PROFILER, &wszClsid));
+ IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_COR_PROFILER_PATH, &wszProfilerDLL));
+#endif // FEATURE_CORECLR
+
+ // If the environment variable doesn't exist, profiling is not enabled.
+ if (wszClsid == NULL)
+ {
+ LOG((LF_CORPROF, LL_INFO10, "**PROF: Profiling flag set, but required "
+ "environment variable does not exist.\n"));
+
+ LogProfError(IDS_E_PROF_NO_CLSID);
+
+ return S_FALSE;
+ }
+
+ if ((wszProfilerDLL != NULL) && (wcslen(wszProfilerDLL) >= MAX_PATH))
+ {
+ LOG((LF_CORPROF, LL_INFO10, "**PROF: Profiling flag set, but COR_PROFILER_PATH was not set properly.\n"));
+
+ LogProfError(IDS_E_PROF_BAD_PATH);
+
+ return S_FALSE;
+ }
+
+#ifdef FEATURE_PAL
+ // If the environment variable doesn't exist, profiling is not enabled.
+ if (wszProfilerDLL == NULL)
+ {
+ LOG((LF_CORPROF, LL_INFO10, "**PROF: Profiling flag set, but required "
+ "environment variable does not exist.\n"));
+
+ LogProfError(IDS_E_PROF_BAD_PATH);
+
+ return S_FALSE;
+ }
+#endif // FEATURE_PAL
+
+ CLSID clsid;
+ hr = ProfilingAPIUtility::ProfilerCLSIDFromString(wszClsid, &clsid);
+ if (FAILED(hr))
+ {
+ // ProfilerCLSIDFromString already logged an event if there was a failure
+ return hr;
+ }
+
+ hr = LoadProfiler(
+ kStartupLoad,
+ &clsid,
+ wszClsid,
+ wszProfilerDLL,
+ NULL, // No client data for startup load
+ 0); // No client data for startup load
+ if (FAILED(hr))
+ {
+ // A failure in either the CLR or the profiler prevented it from
+ // loading. Event has been logged. Propagate hr
+ return hr;
+ }
+
+ return S_OK;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Performs lazy initialization that need not occur on startup, but does need to occur
+// before trying to load a profiler.
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+
+HRESULT ProfilingAPIUtility::PerformDeferredInit()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ CAN_TAKE_LOCK;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+ // Initialize internal resources for detaching
+ HRESULT hr = ProfilingAPIDetach::Initialize();
+ if (FAILED(hr))
+ {
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF: Unable to initialize resources for detaching. hr=0x%x.\n",
+ hr));
+ return hr;
+ }
+#endif // FEATURE_PROFAPI_ATTACH_DETACH
+
+ if (s_csStatus == NULL)
+ {
+ s_csStatus = ClrCreateCriticalSection(
+ CrstProfilingAPIStatus,
+ (CrstFlags) (CRST_REENTRANCY | CRST_TAKEN_DURING_SHUTDOWN));
+ if (s_csStatus == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ }
+
+ return S_OK;
+}
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIUtility::LoadProfiler
+//
+// Description:
+// Outermost common code for loading the profiler DLL. Both startup and attach code
+// paths use this.
+//
+// Arguments:
+// * loadType - Startup load or attach load?
+// * pClsid - Profiler's CLSID
+// * wszClsid - Profiler's CLSID (or progid) in string form, for event log messages
+// * wszProfilerDLL - Profiler's DLL path
+// * pvClientData - For attach loads, this is the client data the trigger wants to
+// pass to the profiler DLL
+// * cbClientData - For attach loads, size of client data in bytes
+// * dwConcurrentGCWaitTimeoutInMs - Time out for wait operation on concurrent GC. Attach scenario only
+//
+// Return Value:
+// HRESULT indicating success or failure of the load
+//
+// Notes:
+// * On failure, this function or a callee will have logged an event
+//
+
+// static
+HRESULT ProfilingAPIUtility::LoadProfiler(
+ LoadType loadType,
+ const CLSID * pClsid,
+ LPCWSTR wszClsid,
+ LPCWSTR wszProfilerDLL,
+ LPVOID pvClientData,
+ UINT cbClientData,
+ DWORD dwConcurrentGCWaitTimeoutInMs)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+
+ // This causes events to be logged, which loads resource strings,
+ // which takes locks.
+ CAN_TAKE_LOCK;
+
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (g_fEEShutDown)
+ {
+ return CORPROF_E_RUNTIME_UNINITIALIZED;
+ }
+
+ enum ProfilerCompatibilityFlag
+ {
+ // Default: disable V2 profiler
+ kDisableV2Profiler = 0x0,
+
+ // Enable V2 profilers
+ kEnableV2Profiler = 0x1,
+
+ // Disable Profiling
+ kPreventLoad = 0x2,
+ };
+
+ ProfilerCompatibilityFlag profilerCompatibilityFlag = kDisableV2Profiler;
+ NewArrayHolder<WCHAR> wszProfilerCompatibilitySetting(NULL);
+
+ if (loadType == kStartupLoad)
+ {
+ CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ProfAPI_ProfilerCompatibilitySetting, &wszProfilerCompatibilitySetting);
+ if (wszProfilerCompatibilitySetting != NULL)
+ {
+ if (SString::_wcsicmp(wszProfilerCompatibilitySetting, W("EnableV2Profiler")) == 0)
+ {
+ profilerCompatibilityFlag = kEnableV2Profiler;
+ }
+ else if (SString::_wcsicmp(wszProfilerCompatibilitySetting, W("PreventLoad")) == 0)
+ {
+ profilerCompatibilityFlag = kPreventLoad;
+ }
+ }
+
+ if (profilerCompatibilityFlag == kPreventLoad)
+ {
+ LOG((LF_CORPROF, LL_INFO10, "**PROF: COMPLUS_ProfAPI_ProfilerCompatibilitySetting is set to PreventLoad. "
+ "Profiler will not be loaded.\n"));
+
+ LogProfInfo(IDS_PROF_PROFILER_DISABLED,
+ CLRConfig::EXTERNAL_ProfAPI_ProfilerCompatibilitySetting.name,
+ wszProfilerCompatibilitySetting.GetValue(),
+ wszClsid);
+
+ return S_OK;
+ }
+ }
+
+ HRESULT hr;
+
+ hr = PerformDeferredInit();
+ if (FAILED(hr))
+ {
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF: ProfilingAPIUtility::PerformDeferredInit failed. hr=0x%x.\n",
+ hr));
+ LogProfError(IDS_E_PROF_INTERNAL_INIT, wszClsid, hr);
+ return hr;
+ }
+
+ // Valid loadType?
+ _ASSERTE((loadType == kStartupLoad) || (loadType == kAttachLoad));
+
+ // If a nonzero client data size is reported, there'd better be client data!
+ _ASSERTE((cbClientData == 0) || (pvClientData != NULL));
+
+ // Client data is currently only specified on attach
+ _ASSERTE((pvClientData == NULL) || (loadType == kAttachLoad));
+
+ // Don't be telling me to load a profiler if there already is one.
+ _ASSERTE(g_profControlBlock.curProfStatus.Get() == kProfStatusNone);
+
+ // Create the ProfToEE interface to provide to the profiling services
+ NewHolder<ProfToEEInterfaceImpl> pProfEE(new (nothrow) ProfToEEInterfaceImpl());
+ if (pProfEE == NULL)
+ {
+ LOG((LF_CORPROF, LL_ERROR, "**PROF: Unable to allocate ProfToEEInterfaceImpl.\n"));
+ LogProfError(IDS_E_PROF_INTERNAL_INIT, wszClsid, E_OUTOFMEMORY);
+ return E_OUTOFMEMORY;
+ }
+
+ // Initialize the interface
+ hr = pProfEE->Init();
+ if (FAILED(hr))
+ {
+ LOG((LF_CORPROF, LL_ERROR, "**PROF: ProfToEEInterface::Init failed.\n"));
+ LogProfError(IDS_E_PROF_INTERNAL_INIT, wszClsid, hr);
+ return hr;
+ }
+
+ // Provide the newly created and inited interface
+ LOG((LF_CORPROF, LL_INFO10, "**PROF: Profiling code being provided with EE interface.\n"));
+
+ // Create a new EEToProf object
+ NewHolder<EEToProfInterfaceImpl> pEEProf(new (nothrow) EEToProfInterfaceImpl());
+ if (pEEProf == NULL)
+ {
+ LOG((LF_CORPROF, LL_ERROR, "**PROF: Unable to allocate EEToProfInterfaceImpl.\n"));
+ LogProfError(IDS_E_PROF_INTERNAL_INIT, wszClsid, E_OUTOFMEMORY);
+ return E_OUTOFMEMORY;
+ }
+
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+ // We're about to load the profiler, so first make sure we successfully create the
+ // DetachThread and abort the load of the profiler if we can't. This ensures we don't
+ // load a profiler unless we're prepared to detach it later.
+ hr = ProfilingAPIDetach::CreateDetachThread();
+ if (FAILED(hr))
+ {
+ LOG((
+ LF_CORPROF,
+ LL_ERROR,
+ "**PROF: Unable to create DetachThread. hr=0x%x.\n",
+ hr));
+ ProfilingAPIUtility::LogProfError(IDS_E_PROF_INTERNAL_INIT, wszClsid, hr);
+ return hr;
+ }
+#endif // FEATURE_PROFAPI_ATTACH_DETACH
+
+ // Initialize internal state of our EEToProfInterfaceImpl. This also loads the
+ // profiler itself, but does not yet call its Initalize() callback
+ hr = pEEProf->Init(pProfEE, pClsid, wszClsid, wszProfilerDLL, (loadType == kAttachLoad), dwConcurrentGCWaitTimeoutInMs);
+ if (FAILED(hr))
+ {
+ LOG((LF_CORPROF, LL_ERROR, "**PROF: EEToProfInterfaceImpl::Init failed.\n"));
+ // EEToProfInterfaceImpl::Init logs an event log error on failure
+ return hr;
+ }
+
+ // EEToProfInterfaceImpl::Init takes over the ownership of pProfEE when Init succeeds, and
+ // EEToProfInterfaceImpl::~EEToProfInterfaceImpl is responsible for releasing the resource pointed
+ // by pProfEE. Calling SuppressRelease here is necessary to avoid double release that
+ // the resource pointed by pProfEE are released by both pProfEE and pEEProf's destructor.
+ pProfEE.SuppressRelease();
+ pProfEE = NULL;
+
+ if (loadType == kAttachLoad) // V4 profiler from attach
+ {
+ // Profiler must support ICorProfilerCallback3 to be attachable
+ if (!pEEProf->IsCallback3Supported())
+ {
+ LogProfError(IDS_E_PROF_NOT_ATTACHABLE, wszClsid);
+ return CORPROF_E_PROFILER_NOT_ATTACHABLE;
+ }
+ }
+ else if (!pEEProf->IsCallback3Supported()) // V2 profiler from startup
+ {
+ if (profilerCompatibilityFlag == kDisableV2Profiler)
+ {
+ LOG((LF_CORPROF, LL_INFO10, "**PROF: COMPLUS_ProfAPI_ProfilerCompatibilitySetting is set to DisableV2Profiler (the default). "
+ "V2 profilers are not allowed, so that the configured V2 profiler is going to be unloaded.\n"));
+
+ LogProfInfo(IDS_PROF_V2PROFILER_DISABLED, wszClsid);
+ return S_OK;
+ }
+
+ _ASSERTE(profilerCompatibilityFlag == kEnableV2Profiler);
+
+ // To prevent V2 profilers from AV, once a V2 profiler is already loaded by a V2 rutnime in the process,
+ // V4 runtime will not try to load the V2 profiler again.
+ if (IsV2RuntimeLoaded())
+ {
+ LogProfInfo(IDS_PROF_V2PROFILER_ALREADY_LOADED, wszClsid);
+ return S_OK;
+ }
+
+ LOG((LF_CORPROF, LL_INFO10, "**PROF: COMPLUS_ProfAPI_ProfilerCompatibilitySetting is set to EnableV2Profiler. "
+ "The configured V2 profiler is going to be initialized.\n"));
+
+ LogProfInfo(IDS_PROF_V2PROFILER_ENABLED,
+ CLRConfig::EXTERNAL_ProfAPI_ProfilerCompatibilitySetting.name,
+ wszProfilerCompatibilitySetting.GetValue(),
+ wszClsid);
+ }
+
+ _ASSERTE(s_csStatus != NULL);
+ {
+ // All modification of the profiler's status and
+ // g_profControlBlock.pProfInterface need to be serialized against each other,
+ // in particular, this code should be serialized against detach and unloading
+ // code.
+ CRITSEC_Holder csh(s_csStatus);
+
+ // We've successfully allocated and initialized the callback wrapper object and the
+ // Info interface implementation objects. The profiler DLL is therefore also
+ // successfully loaded (but not yet Initialized). Transfer ownership of the
+ // callback wrapper object to globals (thus suppress a release when the local
+ // vars go out of scope).
+ //
+ // Setting this state now enables us to call into the profiler's Initialize()
+ // callback (which we do immediately below), and have it successfully call
+ // back into us via the Info interface (ProfToEEInterfaceImpl) to perform its
+ // initialization.
+ g_profControlBlock.pProfInterface = pEEProf.GetValue();
+ pEEProf.SuppressRelease();
+ pEEProf = NULL;
+
+ // Set global status to reflect the proper type of Init we're doing (attach vs
+ // startup)
+ g_profControlBlock.curProfStatus.Set(
+ (loadType == kStartupLoad) ?
+ kProfStatusInitializingForStartupLoad :
+ kProfStatusInitializingForAttachLoad);
+ }
+
+ // Now that the profiler is officially loaded and in Init status, call into the
+ // profiler's appropriate Initialize() callback. Note that if the profiler fails this
+ // call, we should abort the rest of the profiler loading, and reset our state so we
+ // appear as if we never attempted to load the profiler.
+
+ if (loadType == kStartupLoad)
+ {
+ hr = g_profControlBlock.pProfInterface->Initialize();
+ }
+ else
+ {
+ _ASSERTE(loadType == kAttachLoad);
+ _ASSERTE(g_profControlBlock.pProfInterface->IsCallback3Supported());
+ hr = g_profControlBlock.pProfInterface->InitializeForAttach(pvClientData, cbClientData);
+ }
+
+ if (FAILED(hr))
+ {
+ LOG((
+ LF_CORPROF,
+ LL_INFO10,
+ "**PROF: Profiler failed its Initialize callback. hr=0x%x.\n",
+ hr));
+
+ // If we timed out due to waiting on concurrent GC to finish, it is very likely this is
+ // the reason InitializeForAttach callback failed even though we cannot be sure and we cannot
+ // cannot assume hr is going to be CORPROF_E_TIMEOUT_WAITING_FOR_CONCURRENT_GC.
+ // The best we can do in this case is to report this failure anyway.
+ if (g_profControlBlock.pProfInterface->HasTimedOutWaitingForConcurrentGC())
+ {
+ ProfilingAPIUtility::LogProfError(IDS_E_PROF_TIMEOUT_WAITING_FOR_CONCURRENT_GC, dwConcurrentGCWaitTimeoutInMs, wszClsid);
+ }
+
+ // Check for known failure types, to customize the event we log
+ if ((loadType == kAttachLoad) &&
+ ((hr == CORPROF_E_PROFILER_NOT_ATTACHABLE) || (hr == E_NOTIMPL)))
+ {
+ _ASSERTE(g_profControlBlock.pProfInterface->IsCallback3Supported());
+
+ // Profiler supports ICorProfilerCallback3, but explicitly doesn't support
+ // Attach loading. So log specialized event
+ LogProfError(IDS_E_PROF_NOT_ATTACHABLE, wszClsid);
+
+ // Normalize (CORPROF_E_PROFILER_NOT_ATTACHABLE || E_NOTIMPL) down to
+ // CORPROF_E_PROFILER_NOT_ATTACHABLE
+ hr = CORPROF_E_PROFILER_NOT_ATTACHABLE;
+ }
+ else if (hr == CORPROF_E_PROFILER_CANCEL_ACTIVATION)
+ {
+ // Profiler didn't encounter a bad error, but is voluntarily choosing not to
+ // profile this runtime. Profilers that need to set system environment
+ // variables to be able to profile services may use this HRESULT to avoid
+ // profiling all the other managed apps on the box.
+ LogProfInfo(IDS_PROF_CANCEL_ACTIVATION, wszClsid);
+ }
+ else
+ {
+ LogProfError(IDS_E_PROF_INIT_CALLBACK_FAILED, wszClsid, hr);
+ }
+
+ // Profiler failed; reset everything. This will automatically reset
+ // g_profControlBlock and will unload the profiler's DLL.
+ TerminateProfiling();
+ return hr;
+ }
+
+#ifdef FEATURE_MULTICOREJIT
+
+ // Disable multicore JIT when profiling is enabled
+ if (g_profControlBlock.dwEventMask & COR_PRF_MONITOR_JIT_COMPILATION)
+ {
+ MulticoreJitManager::DisableMulticoreJit();
+ }
+
+#endif
+
+ // Indicate that profiling is properly initialized. On an attach-load, this will
+ // force a FlushStoreBuffers(), which is important for catch-up synchronization (see
+ // code:#ProfCatchUpSynchronization)
+ g_profControlBlock.curProfStatus.Set(kProfStatusActive);
+
+ LOG((
+ LF_CORPROF,
+ LL_INFO10,
+ "**PROF: Profiler successfully loaded and initialized.\n"));
+
+ LogProfInfo(IDS_PROF_LOAD_COMPLETE, wszClsid);
+
+ LOG((LF_CORPROF, LL_INFO10, "**PROF: Profiler created and enabled.\n"));
+
+ if (loadType == kStartupLoad)
+ {
+ // For startup profilers only: If the profiler is interested in tracking GC
+ // events, then we must disable concurrent GC since concurrent GC can allocate
+ // and kill objects without relocating and thus not doing a heap walk.
+ if (CORProfilerTrackGC())
+ {
+ LOG((LF_CORPROF, LL_INFO10, "**PROF: Turning off concurrent GC at startup.\n"));
+ g_pConfig->SetGCconcurrent(0);
+ LOG((LF_CORPROF, LL_INFO10, "**PROF: Concurrent GC has been turned off at startup.\n"));
+ }
+ }
+
+ if (loadType == kAttachLoad)
+ {
+ // #ProfCatchUpSynchronization
+ //
+ // Now that callbacks are enabled (and all threads are aware), tell an attaching
+ // profiler that it's safe to request catchup information.
+ //
+ // There's a race we're preventing that's worthwhile to spell out. An attaching
+ // profiler should be able to get a COMPLETE set of data through the use of
+ // callbacks unioned with the use of catch-up enumeration Info functions. To
+ // achieve this, we must ensure that there is no "hole"--any new data the
+ // profiler seeks must be available from a callback or a catch-up info function
+ // (or both, as dupes are ok). That means that:
+ //
+ // * callbacks must be enabled on other threads NO LATER THAN the profiler begins
+ // requesting catch-up information on this thread
+ // * Abbreviate: callbacks <= catch-up.
+ //
+ // Otherwise, if catch-up < callbacks, then it would be possible to have this:
+ //
+ // * catch-up < new data arrives < callbacks.
+ //
+ // In this nightmare scenario, the new data would not be accessible from the
+ // catch-up calls made by the profiler (cuz the profiler made the calls too
+ // early) or the callbacks made into the profiler (cuz the callbacks were enabled
+ // too late). That's a hole, and that's bad. So we ensure callbacks <= catch-up
+ // by the following order of operations:
+ //
+ // * This thread:
+ // * a: Set (volatile) currentProfStatus = kProfStatusActive (done above) and
+ // event mask bits (profiler did this in Initialize() callback above,
+ // when it called SetEventMask)
+ // * b: Flush CPU buffers (done automatically when we set status to
+ // kProfStatusActive)
+ // * c: CLR->Profiler call: ProfilerAttachComplete() (below). Inside this
+ // call:
+ // * Profiler->CLR calls: Catch-up Info functions
+ // * Other threads:
+ // * a: New data (thread, JIT info, etc.) is created
+ // * b: This new data is now available to a catch-up Info call
+ // * c: currentProfStatus & event mask bits are accurately visible to thread
+ // in determining whether to make a callback
+ // * d: Read currentProfStatus & event mask bits and make callback
+ // (CLR->Profiler) if necessary
+ //
+ // So as long as OtherThreads.c <= ThisThread.c we're ok. This means other
+ // threads must be able to get a clean read of the (volatile) currentProfStatus &
+ // event mask bits BEFORE this thread calls ProfilerAttachComplete(). Use of the
+ // "volatile" keyword ensures that compiler optimizations and (w/ VC2005+
+ // compilers) the CPU's instruction reordering optimizations at runtime are
+ // disabled enough such that they do not hinder the order above. Use of
+ // FlushStoreBuffers() ensures that multiple caches on multiple CPUs do not
+ // hinder the order above (by causing other threads to get stale reads of the
+ // volatiles).
+ //
+ // For more information about catch-up enumerations and exactly which entities,
+ // and which stage of loading, are permitted to appear in the enumerations, see
+ // code:ProfilerFunctionEnum::Init#ProfilerEnumGeneral
+
+ {
+ BEGIN_PIN_PROFILER(CORProfilerPresent());
+ g_profControlBlock.pProfInterface->ProfilerAttachComplete();
+ END_PIN_PROFILER();
+ }
+ }
+ return S_OK;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// This is the top-most level of profiling API teardown, and is called directly by
+// EEShutDownHelper() (in ceemain.cpp). This cleans up internal structures relating to
+// the Profiling API. If we're not in process teardown, then this also releases the
+// profiler COM object and frees the profiler DLL
+//
+
+// static
+void ProfilingAPIUtility::TerminateProfiling()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ if (IsAtProcessExit())
+ {
+ // We're tearing down the process so don't bother trying to clean everything up.
+ // There's no reliable way to verify other threads won't be trying to re-enter
+ // the profiler anyway, so cleaning up here could cause AVs.
+ return;
+ }
+
+ _ASSERTE(s_csStatus != NULL);
+ {
+ // We're modifying status and possibly unloading the profiler DLL below, so
+ // serialize this code with any other loading / unloading / detaching code.
+ CRITSEC_Holder csh(s_csStatus);
+
+
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+ if (ProfilingAPIDetach::GetEEToProfPtr() != NULL)
+ {
+ // The profiler is still being referenced by
+ // ProfilingAPIDetach::s_profilerDetachInfo, so don't try to release and
+ // unload it. This can happen if Shutdown and Detach race, and Shutdown wins.
+ // For example, we could be called as part of Shutdown, but the profiler
+ // called RequestProfilerDetach near shutdown time as well (or even earlier
+ // but remains un-evacuated as shutdown begins). Whatever the cause, just
+ // don't unload the profiler here (as part of shutdown), and let the Detach
+ // Thread deal with it (if it gets the chance).
+ //
+ // Note: Since this check occurs inside s_csStatus, we don't have to worry
+ // that ProfilingAPIDetach::GetEEToProfPtr() will suddenly change during the
+ // code below.
+ //
+ // FUTURE: For reattach-with-neutered-profilers feature crew, change the
+ // above to scan through list of detaching profilers to make sure none of
+ // them give a GetEEToProfPtr() equal to g_profControlBlock.pProfInterface.
+ return;
+ }
+
+ if (g_profControlBlock.curProfStatus.Get() == kProfStatusActive)
+ {
+ g_profControlBlock.curProfStatus.Set(kProfStatusDetaching);
+
+ // Profiler was active when TerminateProfiling() was called, so we're unloading
+ // it due to shutdown. But other threads may still be trying to enter profiler
+ // callbacks (e.g., ClassUnloadStarted() can get called during shutdown). Now
+ // that the status has been changed to kProfStatusDetaching, no new threads will
+ // attempt to enter the profiler. But use the detach evacuation counters to see
+ // if other threads already began to enter the profiler.
+ if (!ProfilingAPIDetach::IsProfilerEvacuated())
+ {
+ // Other threads might be entering the profiler, so just skip cleanup
+ return;
+ }
+ }
+#endif // FEATURE_PROFAPI_ATTACH_DETACH
+
+ // If we have a profiler callback wrapper and / or info implementation
+ // active, then terminate them.
+
+ if (g_profControlBlock.pProfInterface.Load() != NULL)
+ {
+ // This destructor takes care of releasing the profiler's ICorProfilerCallback*
+ // interface, and unloading the DLL when we're not in process teardown.
+ delete g_profControlBlock.pProfInterface;
+ g_profControlBlock.pProfInterface.Store(NULL);
+ }
+
+ // NOTE: Intentionally not deleting s_pSidBuffer. Doing so can cause annoying races
+ // with other threads that lazily create and initialize it when needed. (Example:
+ // it's used to fill out the "User" field of profiler event log entries.) Keeping
+ // s_pSidBuffer around after a profiler detaches and before a new one attaches
+ // consumes a bit more memory unnecessarily, but it'll get paged out if another
+ // profiler doesn't attach.
+
+ // NOTE: Similarly, intentionally not destroying / NULLing s_csStatus. If
+ // s_csStatus is already initialized, we can reuse it each time we do another
+ // attach / detach, so no need to destroy it.
+
+ // If we disabled concurrent GC and somehow failed later during the initialization
+ if (g_profControlBlock.fConcurrentGCDisabledForAttach)
+ {
+ // We know for sure GC has been fully initialized as we've turned off concurrent GC before
+ _ASSERTE(IsGarbageCollectorFullyInitialized());
+ GCHeap::GetGCHeap()->TemporaryEnableConcurrentGC();
+ g_profControlBlock.fConcurrentGCDisabledForAttach = FALSE;
+ }
+
+ // #ProfileResetSessionStatus Reset all the status variables that are for the current
+ // profiling attach session.
+ // When you are adding new status in g_profControlBlock, you need to think about whether
+ // your new status is per-session, or consistent across sessions
+ g_profControlBlock.ResetPerSessionStatus();
+
+ g_profControlBlock.curProfStatus.Set(kProfStatusNone);
+ }
+}
+
+#ifndef FEATURE_PAL
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIUtility::GetCurrentProcessUserSid
+//
+// Description:
+// Generates a SID of the current user from the current process's token. SID is
+// returned in an [out] param, and is also cached for future use. The SID is used for
+// two purposes: event log entries (for filling out the User field) and the ACL used
+// on the globally named pipe object for attaching profilers.
+//
+// Arguments:
+// * ppsid - [out] Generated (or cached) SID
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+
+// static
+HRESULT ProfilingAPIUtility::GetCurrentProcessUserSid(PSID * ppsid)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (s_pSidBuffer == NULL)
+ {
+ HRESULT hr;
+ NewHolder<SidBuffer> pSidBuffer(new (nothrow) SidBuffer);
+ if (pSidBuffer == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ // This gets the SID of the user from the process token
+ hr = pSidBuffer->InitFromProcessUserNoThrow(GetCurrentProcessId());
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ if (FastInterlockCompareExchangePointer(
+ &s_pSidBuffer,
+ pSidBuffer.GetValue(),
+ NULL) == NULL)
+ {
+ // Lifetime successfully transferred to s_pSidBuffer, so don't delete it here
+ pSidBuffer.SuppressRelease();
+ }
+ }
+
+ _ASSERTE(s_pSidBuffer != NULL);
+ _ASSERTE(s_pSidBuffer->GetSid().RawSid() != NULL);
+ *ppsid = s_pSidBuffer->GetSid().RawSid();
+ return S_OK;
+}
+
+#endif // !FEATURE_PAL
+
+#endif // PROFILING_SUPPORTED
diff --git a/src/vm/profilinghelper.h b/src/vm/profilinghelper.h
new file mode 100644
index 0000000000..ce35d6f418
--- /dev/null
+++ b/src/vm/profilinghelper.h
@@ -0,0 +1,137 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// ProfilingHelper.h
+//
+
+//
+// Declaration of helper classes used for miscellaneous purposes within the
+// profiling API
+//
+
+// ======================================================================================
+
+#ifndef __PROFILING_HELPER_H__
+#define __PROFILING_HELPER_H__
+
+#ifndef PROFILING_SUPPORTED
+#error PROFILING_SUPPORTED is not set. Do not include ProfilingHelper.h.
+#endif
+
+#include <windows.h>
+
+#include "corprof.h"
+#include "eeprofinterfaces.h"
+
+#define COM_METHOD HRESULT STDMETHODCALLTYPE
+
+#ifdef _DEBUG
+// On DEBUG builds, setting the COMPLUS_ProfAPIFault to a bitmask of the flags
+// below forces the Profiling API to return failures at various points.
+// Useful for event log testing. Also see code:ProfilingAPIUtility.ShouldInjectProfAPIFault
+enum ProfAPIFaultFlags
+{
+ // Forces the startup path to log an IDS_E_PROF_INTERNAL_INIT error
+ kProfAPIFault_StartupInternal = 0x00001,
+};
+#endif // _DEBUG
+
+#ifndef FEATURE_PAL
+class SidBuffer;
+#endif // !FEATURE_PAL
+
+//---------------------------------------------------------------------------------------
+// Static-only class to coordinate initialization of the various profiling API
+// structures, plus other utility stuff.
+//
+class ProfilingAPIUtility
+{
+public:
+ static HRESULT InitializeProfiling();
+ static HRESULT LoadProfilerForAttach(
+ const CLSID * pClsid,
+ LPCWSTR wszProfilerDLL,
+ LPVOID pvClientData,
+ UINT cbClientData,
+ DWORD dwConcurrentGCWaitTimeoutInMs);
+
+ static void TerminateProfiling();
+ static void LogProfError(int iStringResourceID, ...);
+ static void LogProfInfo(int iStringResourceID, ...);
+ static void LogNoInterfaceError(REFIID iidRequested, LPCWSTR wszClsid);
+ INDEBUG(static BOOL ShouldInjectProfAPIFault(ProfAPIFaultFlags faultFlag);)
+#ifndef FEATURE_PAL
+ static HRESULT GetCurrentProcessUserSid(PSID * ppsid);
+#endif // !FEATURE_PAL
+
+ // helper functions for profiler evacuation counter holder
+ static void IncEvacuationCounter(Thread * pThread);
+ static void DecEvacuationCounter(Thread * pThread);
+
+ // See code:ProfilingAPIUtility::InitializeProfiling#LoadUnloadCallbackSynchronization
+ static CRITSEC_COOKIE GetStatusCrst();
+
+private:
+ // ---------------------------------------------------------------------------------------
+ // Enum used in LoadProfiler() to differentiate whether we're loading the profiler
+ // for startup or for attach
+ enum LoadType
+ {
+ kStartupLoad,
+ kAttachLoad,
+ };
+
+#ifndef FEATURE_PAL
+ // Allocated lazily the first time it's needed, and then remains allocated until the
+ // process exits.
+ static SidBuffer * s_pSidBuffer;
+#endif // !FEATURE_PAL
+
+ // See code:ProfilingAPIUtility::InitializeProfiling#LoadUnloadCallbackSynchronization
+ static CRITSEC_COOKIE s_csStatus;
+
+ // Static-only class. Private constructor enforces you don't try to make an instance
+ ProfilingAPIUtility() {}
+
+ static HRESULT PerformDeferredInit();
+ static HRESULT LoadProfiler(
+ LoadType loadType,
+ const CLSID * pClsid,
+ LPCWSTR wszClsid,
+ LPCWSTR wszProfilerDLL,
+ LPVOID pvClientData,
+ UINT cbClientData,
+ DWORD dwConcurrentGCWaitTimeoutInMs = INFINITE);
+ static HRESULT ProfilerCLSIDFromString(__inout_z LPWSTR wszClsid, CLSID * pClsid);
+ static HRESULT AttemptLoadProfilerForStartup();
+
+#ifndef FEATURE_PAL
+ static void AppendSupplementaryInformation(int iStringResource, SString * pString);
+#endif // !FEATURE_PAL
+
+ static void LogProfEventVA(
+ int iStringResourceID,
+ WORD wEventType,
+ va_list insertionArgs);
+};
+
+
+//---------------------------------------------------------------------------------------
+// When we call into profiler code, we push one of these babies onto the stack to
+// remember on the Thread how the profiler was called. If the profiler calls back into us,
+// we use the flags that this set to authorize.
+//
+class SetCallbackStateFlagsHolder
+{
+public:
+ SetCallbackStateFlagsHolder(DWORD dwFlags);
+ ~SetCallbackStateFlagsHolder();
+
+private:
+ Thread * m_pThread;
+ DWORD m_dwOriginalFullState;
+};
+
+#endif //__PROFILING_HELPER_H__
diff --git a/src/vm/profilinghelper.inl b/src/vm/profilinghelper.inl
new file mode 100644
index 0000000000..853a34c834
--- /dev/null
+++ b/src/vm/profilinghelper.inl
@@ -0,0 +1,277 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// ProfilingHelper.inl
+//
+
+//
+// Inlined implementation of some helper class methods used for
+// miscellaneous purposes within the profiling API
+//
+
+// ======================================================================================
+
+#ifndef __PROFILING_HELPER_INL__
+#define __PROFILING_HELPER_INL__
+
+FORCEINLINE SetCallbackStateFlagsHolder::SetCallbackStateFlagsHolder(DWORD dwFlags)
+{
+ // This is called before entering a profiler. We set the specified dwFlags on
+ // the Thread object, and remember the previous flags for later.
+ BEGIN_GETTHREAD_ALLOWED_IN_NO_THROW_REGION;
+ m_pThread = GetThread();
+ if (m_pThread != NULL)
+ {
+ m_dwOriginalFullState = m_pThread->SetProfilerCallbackStateFlags(dwFlags);
+ }
+ else
+ {
+ m_dwOriginalFullState = 0;
+ }
+ END_GETTHREAD_ALLOWED_IN_NO_THROW_REGION;
+}
+
+FORCEINLINE SetCallbackStateFlagsHolder::~SetCallbackStateFlagsHolder()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ // This is called after the profiler returns to us. We reinstate the
+ // original flag set here.
+ if (m_pThread != NULL)
+ {
+ BEGIN_GETTHREAD_ALLOWED_IN_NO_THROW_REGION;
+ m_pThread->SetProfilerCallbackFullState(m_dwOriginalFullState);
+ END_GETTHREAD_ALLOWED_IN_NO_THROW_REGION;
+ }
+}
+
+#ifdef ENABLE_CONTRACTS
+//---------------------------------------------------------------------------------------
+//
+// This function, used only on debug builds, fetches the triggers bits from the contract
+// to help verify that the contract is compatible with the flags passed in to the
+// entrypoint macros.
+//
+// Arguments:
+// * fTriggers - If nonzero, this function asserts the contract says GC_TRIGGERS,
+// else this function asserts the contract says GC_NOTRIGGER
+//
+inline void AssertTriggersContract(BOOL fTriggers)
+{
+ // NOTE: This function cannot have contract, as this function needs to inspect the
+ // contract of the calling function
+
+ ClrDebugState * pClrDbgState = GetClrDebugState(FALSE);
+ if ((pClrDbgState == NULL) || (pClrDbgState->GetContractStackTrace() == NULL))
+ {
+ return;
+ }
+
+ UINT testMask = pClrDbgState->GetContractStackTrace()->m_testmask;
+
+ if (fTriggers)
+ {
+ // If this assert fires, the contract says GC_NOTRIGGER (or is disabled), but the
+ // PROFILER_TO_CLR_ENTRYPOINT* / CLR_TO_PROFILER_ENTRYPOINT* macro implies triggers
+ _ASSERTE((testMask & Contract::GC_Mask) == Contract::GC_Triggers);
+ }
+ else
+ {
+ // If this assert fires, the contract says GC_TRIGGERS, but the
+ // PROFILER_TO_CLR_ENTRYPOINT* / CLR_TO_PROFILER_ENTRYPOINT* macro implies no
+ // trigger
+ _ASSERTE(((testMask & Contract::GC_Mask) == Contract::GC_NoTrigger) ||
+ ((testMask & Contract::GC_Disabled) != 0));
+
+ }
+}
+#endif //ENABLE_CONTRACTS
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIUtility::LogNoInterfaceError
+//
+// Description:
+// Simple helper to log an IDS_E_PROF_NO_CALLBACK_IFACE event
+//
+// Arguments:
+// * iidRequested - IID to convert to string and log (as insertion string)
+// * wszCLSID - CLSID to log (as insertion string)
+//
+
+// static
+inline void ProfilingAPIUtility::LogNoInterfaceError(REFIID iidRequested, LPCWSTR wszCLSID)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ WCHAR wszIidRequested[39];
+ if (StringFromGUID2(iidRequested, wszIidRequested, lengthof(wszIidRequested)) == 0)
+ {
+ // This is a little super-paranoid; but just use an empty string if GUIDs
+ // get bigger than we expect.
+ _ASSERTE(!"IID buffer too small.");
+ wszIidRequested[0] = L'\0';
+ }
+ ProfilingAPIUtility::LogProfError(IDS_E_PROF_NO_CALLBACK_IFACE, wszCLSID, wszIidRequested);
+}
+
+#ifdef _DEBUG
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIUtility::ShouldInjectProfAPIFault
+//
+// Description:
+// Determines whether COMPLUS_ProfAPIFault is set to a bitmask value
+// with the specified flag set
+//
+// Return Value:
+// Nonzero if the specified fault flag is set; 0 otherwise.
+//
+
+// static
+inline BOOL ProfilingAPIUtility::ShouldInjectProfAPIFault(ProfAPIFaultFlags faultFlag)
+{
+ return ((CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ProfAPIFault) & faultFlag) != 0);
+}
+
+#endif // _DEBUG
+
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIUtility::LoadProfilerForAttach
+//
+// Description:
+// Simple, public wrapper around code:ProfilingAPIUtility::LoadProfiler to load a
+// profiler in response to an Attach request.
+//
+// Arguments:
+// * pClsid - Profiler's CLSID
+// * wszProfilerDLL - Profiler's DLL
+// * pvClientData - Client data received from trigger, to send to profiler DLL
+// * cbClientData - Size of client data
+//
+// Return Value:
+// HRESULT indicating success or failure
+//
+
+// static
+inline HRESULT ProfilingAPIUtility::LoadProfilerForAttach(
+ const CLSID * pClsid,
+ LPCWSTR wszProfilerDLL,
+ LPVOID pvClientData,
+ UINT cbClientData,
+ DWORD dwConcurrentGCWaitTimeoutInMs)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+
+ // This causes events to be logged, which loads resource strings,
+ // which takes locks.
+ CAN_TAKE_LOCK;
+
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ // Need string version of CLSID for event log messages
+ WCHAR wszClsid[40];
+ if (StringFromGUID2(*pClsid, wszClsid, _countof(wszClsid)) == 0)
+ {
+ _ASSERTE(!"StringFromGUID2 failed!");
+ return E_UNEXPECTED;
+ }
+
+ // Inform user we're about to try attaching the profiler
+ ProfilingAPIUtility::LogProfInfo(IDS_PROF_ATTACH_REQUEST_RECEIVED, wszClsid);
+
+ return LoadProfiler(
+ kAttachLoad,
+ pClsid,
+ wszClsid,
+ wszProfilerDLL,
+ pvClientData,
+ cbClientData,
+ dwConcurrentGCWaitTimeoutInMs);
+}
+
+inline /* static */ CRITSEC_COOKIE ProfilingAPIUtility::GetStatusCrst()
+{
+ LIMITED_METHOD_CONTRACT;
+ return s_csStatus;
+}
+
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIUtility::IncEvacuationCounter
+//
+// Description:
+// Simple helper to increase the evacuation counter inside an EE thread by one
+//
+// Arguments:
+// * pThread - pointer to an EE Thread
+//
+// static
+FORCEINLINE void ProfilingAPIUtility::IncEvacuationCounter(Thread * pThread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ CANNOT_TAKE_LOCK;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ if (pThread)
+ pThread->IncProfilerEvacuationCounter();
+}
+
+// ----------------------------------------------------------------------------
+// ProfilingAPIUtility::DecEvacuationCounter
+//
+// Description:
+// Simple helper to decrease the evacuation counter inside an EE thread by one
+//
+// Arguments:
+// * pThread - pointer to an EE Thread
+//
+// static
+FORCEINLINE void ProfilingAPIUtility::DecEvacuationCounter(Thread * pThread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ CANNOT_TAKE_LOCK;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ if (pThread)
+ pThread->DecProfilerEvacuationCounter();
+}
+
+#endif // FEATURE_PROFAPI_ATTACH_DETACH
+
+#endif //__PROFILING_HELPER_INL__
diff --git a/src/vm/proftoeeinterfaceimpl.cpp b/src/vm/proftoeeinterfaceimpl.cpp
new file mode 100644
index 0000000000..b12185c6e8
--- /dev/null
+++ b/src/vm/proftoeeinterfaceimpl.cpp
@@ -0,0 +1,9888 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// FILE: ProfToEEInterfaceImpl.cpp
+//
+// This module implements the ICorProfilerInfo* interfaces, which allow the
+// Profiler to communicate with the EE. This allows the Profiler DLL to get
+// access to private EE data structures and other things that should never be
+// exported outside of the EE.
+//
+
+//
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+// NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE!
+//
+// PLEASE READ!
+//
+// There are strict rules for how to implement ICorProfilerInfo* methods. Please read
+// http://devdiv/sites/CLR/ProfilingAPI/Shared%20Documents/ImplementingProfilability.doc
+// to understand the rules and why they exist.
+//
+// As a reminder, here is a short summary of your responsibilities. Every PUBLIC
+// ENTRYPOINT (from profiler to EE) must have:
+//
+// - An entrypoint macro at the top (see code:#P2CLRRestrictionsOverview). Your choices are:
+// PROFILER_TO_CLR_ENTRYPOINT_SYNC (typical choice):
+// Indicates the method may only be called by the profiler from within
+// a callback (from EE to profiler).
+// PROFILER_TO_CLR_ENTRYPOINT_CALLABLE_ON_INIT_ONLY
+// Even more restrictive, this indicates the method may only be called
+// from within the Initialize() callback
+// PROFILER_TO_CLR_ENTRYPOINT_ASYNC
+// Indicates this method may be called anytime.
+// THIS IS DANGEROUS. PLEASE READ ABOVE DOC FOR GUIDANCE ON HOW TO SAFELY
+// CODE AN ASYNCHRONOUS METHOD.
+// You may use variants of these macros ending in _EX that accept bit flags (see
+// code:ProfToClrEntrypointFlags) if you need to specify additional parameters to how
+// the entrypoint should behave, though typically you can omit the flags and the
+// default (kP2EENone) will be used.
+//
+// - A complete contract block with comments over every contract choice. Wherever
+// possible, use the preferred contracts (if not possible, you must comment why):
+// NOTHROW
+// GC_NOTRIGGER
+// MODE_ANY
+// CANNOT_TAKE_LOCK
+// SO_NOT_MAINLINE
+// (EE_THREAD_(NOT)_REQUIRED are unenforced and are thus optional. If you wish
+// to specify these, EE_THREAD_NOT_REQUIRED is preferred.)
+// Note that the preferred contracts in this file are DIFFERENT than the preferred
+// contracts for eetoprofinterfaceimpl.cpp.
+//
+// Private helper functions in this file do not have the same preferred contracts as
+// public entrypoints, and they should be contracted following the same guidelines
+// as per the rest of the EE.
+//
+// NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE!
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+//
+//
+// #P2CLRRestrictionsOverview
+//
+// The public ICorProfilerInfo(N) functions below have different restrictions on when
+// they're allowed to be called. Listed roughly in order from most to least restrictive:
+// * PROFILER_TO_CLR_ENTRYPOINT_CALLABLE_ON_INIT_ONLY: Functions that are only
+// allowed to be called while the profiler is initializing on startup, from
+// inside the profiler's ICorProfilerCallback::Initialize method
+// * PROFILER_TO_CLR_ENTRYPOINT_SYNC: Functions that may be called from within any of
+// the profiler's callbacks, or anytime from a thread created by the profiler.
+// These functions may only be called by profilers loaded on startup
+// * PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach): Same as above,
+// except these may be called by startup AND attaching profilers.
+// * PROFILER_TO_CLR_ENTRYPOINT_ASYNC: Functions that may be called at any time and
+// from any thread by a profiler loaded on startup
+// * PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach): Same as above,
+// except these may be called by startup AND attaching profilers.
+//
+// The above restrictions are lifted for certain tests that run with these environment
+// variables set. (These are only available on DEBUG builds--including chk--not retail
+// builds.)
+// * COMPLUS_TestOnlyEnableSlowELTHooks:
+// * If nonzero, then on startup the runtime will act as if a profiler was loaded
+// on startup and requested ELT slow-path (even if no profiler is loaded on
+// startup). This will also allow the SetEnterLeaveFunctionHooks(2) info
+// functions to be called outside of Initialize(). If a profiler later
+// attaches and calls these functions, then the slow-path wrapper will call
+// into the profiler’s ELT hooks.
+// * COMPLUS_TestOnlyEnableObjectAllocatedHook:
+// * If nonzero, then on startup the runtime will act as if a profiler was loaded
+// on startup and requested ObjectAllocated callback (even if no profiler is loaded
+// on startup). If a profiler later attaches and calls these functions, then the
+// ObjectAllocated notifications will call into the profiler’s ObjectAllocated callback.
+// * COMPLUS_TestOnlyEnableICorProfilerInfo:
+// * If nonzero, then attaching profilers allows to call ICorProfilerInfo inteface,
+// which would otherwise be disallowed for attaching profilers
+// * COMPLUS_TestOnlyAllowedEventMask
+// * If a profiler needs to work around the restrictions of either
+// COR_PRF_ALLOWABLE_AFTER_ATTACH or COR_PRF_MONITOR_IMMUTABLE it may set
+// this environment variable. Its value should be a bitmask containing all
+// the flags that are:
+// * normally immutable or disallowed after attach, AND
+// * that the test plans to set after startup and / or by an attaching
+// profiler.
+//
+//
+
+//
+// ======================================================================================
+
+#include "common.h"
+#include <posterror.h>
+#include "proftoeeinterfaceimpl.h"
+#include "proftoeeinterfaceimpl.inl"
+#include "dllimport.h"
+#include "threads.h"
+#include "method.hpp"
+#include "vars.hpp"
+#include "dbginterface.h"
+#include "corprof.h"
+#include "class.h"
+#include "object.h"
+#include "ceegen.h"
+#include "eeconfig.h"
+#include "generics.h"
+#include "gcinfo.h"
+#include "safemath.h"
+#include "threadsuspend.h"
+#include "inlinetracking.h"
+
+#ifdef PROFILING_SUPPORTED
+#include "profilinghelper.h"
+#include "profilinghelper.inl"
+#include "eetoprofinterfaceimpl.inl"
+#include "profilingenumerators.h"
+#endif
+
+#include "profdetach.h"
+
+#include "metadataexports.h"
+
+//---------------------------------------------------------------------------------------
+// Helpers
+
+// An OR'd combination of these flags may be specified in the _EX entrypoint macros to
+// customize the behavior.
+enum ProfToClrEntrypointFlags
+{
+ // Just use the default behavior (this one is used if the non-_EX entrypoint macro is
+ // specified without any flags).
+ kP2EENone = 0x00000000,
+
+ // By default, Info functions are not allowed to be used by an attaching profiler.
+ // Specify this flag to override the default.
+ kP2EEAllowableAfterAttach = 0x00000001,
+
+ // This info method has a GC_TRIGGERS contract. Whereas contracts are debug-only,
+ // this flag is used in retail builds as well.
+ kP2EETriggers = 0x00000002,
+};
+
+// Default versions of the entrypoint macros use kP2EENone if no
+// ProfToClrEntrypointFlags are specified
+
+#define PROFILER_TO_CLR_ENTRYPOINT_ASYNC(logParams) \
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EENone, logParams)
+
+#define PROFILER_TO_CLR_ENTRYPOINT_SYNC(logParams) \
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EENone, logParams)
+
+// ASYNC entrypoints log and ensure an attaching profiler isn't making a call that's
+// only supported by startup profilers.
+
+#define CHECK_IF_ATTACHING_PROFILER_IS_ALLOWED_HELPER(p2eeFlags) \
+ do \
+ { \
+ if ((((p2eeFlags) & kP2EEAllowableAfterAttach) == 0) && \
+ (g_profControlBlock.pProfInterface->IsLoadedViaAttach())) \
+ { \
+ LOG((LF_CORPROF, \
+ LL_ERROR, \
+ "**PROF: ERROR: Returning CORPROF_E_UNSUPPORTED_FOR_ATTACHING_PROFILER " \
+ "due to a call illegally made by an attaching profiler \n")); \
+ return CORPROF_E_UNSUPPORTED_FOR_ATTACHING_PROFILER; \
+ } \
+ } while(0)
+
+#ifdef _DEBUG
+
+#define CHECK_IF_ATTACHING_PROFILER_IS_ALLOWED(p2eeFlags) \
+ do \
+ { \
+ if (!((&g_profControlBlock)->fTestOnlyEnableICorProfilerInfo)) \
+ { \
+ CHECK_IF_ATTACHING_PROFILER_IS_ALLOWED_HELPER(p2eeFlags); \
+ } \
+ } while(0)
+
+
+
+#else //_DEBUG
+
+#define CHECK_IF_ATTACHING_PROFILER_IS_ALLOWED(p2eeFlags) \
+ do \
+ { \
+ CHECK_IF_ATTACHING_PROFILER_IS_ALLOWED_HELPER(p2eeFlags); \
+ } while(0)
+
+#endif //_DEBUG
+
+#define PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(p2eeFlags, logParams) \
+ do \
+ { \
+ INCONTRACT(AssertTriggersContract(((p2eeFlags) & kP2EETriggers))); \
+ _ASSERTE(g_profControlBlock.curProfStatus.Get() != kProfStatusNone); \
+ LOG(logParams); \
+ /* If profiler was neutered, disallow call */ \
+ if (g_profControlBlock.curProfStatus.Get() == kProfStatusDetaching) \
+ { \
+ LOG((LF_CORPROF, \
+ LL_ERROR, \
+ "**PROF: ERROR: Returning CORPROF_E_PROFILER_DETACHING " \
+ "due to a post-neutered profiler call\n")); \
+ return CORPROF_E_PROFILER_DETACHING; \
+ } \
+ CHECK_IF_ATTACHING_PROFILER_IS_ALLOWED(p2eeFlags); \
+ } while(0)
+
+// SYNC entrypoints must ensure the current EE Thread shows evidence that we're
+// inside a callback. If there's no EE Thread, then we automatically "pass"
+// the check, and the SYNC call is allowed.
+#define PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(p2eeFlags, logParams) \
+ do \
+ { \
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(p2eeFlags, logParams); \
+ DWORD __dwExpectedCallbackState = COR_PRF_CALLBACKSTATE_INCALLBACK; \
+ if (((p2eeFlags) & kP2EETriggers) != 0) \
+ { \
+ __dwExpectedCallbackState |= COR_PRF_CALLBACKSTATE_IN_TRIGGERS_SCOPE; \
+ } \
+ if (!AreCallbackStateFlagsSet(__dwExpectedCallbackState)) \
+ { \
+ LOG((LF_CORPROF, \
+ LL_ERROR, \
+ "**PROF: ERROR: Returning CORPROF_E_UNSUPPORTED_CALL_SEQUENCE " \
+ "due to illegal asynchronous profiler call\n")); \
+ return CORPROF_E_UNSUPPORTED_CALL_SEQUENCE; \
+ } \
+ } while(0)
+
+// INIT_ONLY entrypoints must ensure we're executing inside the profiler's
+// Initialize() implementation on startup (attach init doesn't count!).
+#define PROFILER_TO_CLR_ENTRYPOINT_CALLABLE_ON_INIT_ONLY(logParams) \
+ do \
+ { \
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC(logParams); \
+ if (g_profControlBlock.curProfStatus.Get() != kProfStatusInitializingForStartupLoad && \
+ g_profControlBlock.curProfStatus.Get() != kProfStatusInitializingForAttachLoad) \
+ { \
+ return CORPROF_E_CALL_ONLY_FROM_INIT; \
+ } \
+ } while(0)
+
+// This macro is used to ensure that the current thread is not in a forbid
+// suspend region. Some methods are allowed to be called asynchronously,
+// but some of them call JIT functions that take a reader lock. So we need to ensure
+// the current thread hasn't been hijacked by a profiler while it was holding the writer lock.
+// Checking the ForbidSuspendThread region is a sufficient test for this
+#define FAIL_IF_IN_FORBID_SUSPEND_REGION() \
+ do \
+ { \
+ Thread * __pThread = GetThreadNULLOk(); \
+ if ((__pThread != NULL) && (__pThread->IsInForbidSuspendRegion())) \
+ { \
+ return CORPROF_E_ASYNCHRONOUS_UNSAFE; \
+ } \
+ } while(0)
+
+//
+// This type is an overlay onto the exported type COR_PRF_FRAME_INFO.
+// The first four fields *must* line up with the same fields in the
+// exported type. After that, we can add to the end as we wish.
+//
+typedef struct _COR_PRF_FRAME_INFO_INTERNAL {
+ USHORT size;
+ USHORT version;
+ FunctionID funcID;
+ UINT_PTR IP;
+ void *extraArg;
+ LPVOID thisArg;
+} COR_PRF_FRAME_INFO_INTERNAL, *PCOR_PRF_FRAME_INFO_INTERNAL;
+
+//
+// After we ship a product with a certain struct type for COR_PRF_FRAME_INFO_INTERNAL
+// we have that as a version. If we change that in a later product, we can increment
+// the counter below and then we can properly do versioning.
+//
+#define COR_PRF_FRAME_INFO_INTERNAL_CURRENT_VERSION 1
+
+
+//---------------------------------------------------------------------------------------
+//
+// Converts TypeHandle to a ClassID
+//
+// Arguments:
+// th - TypeHandle to convert
+//
+// Return Value:
+// Requested ClassID.
+//
+
+ClassID TypeHandleToClassID(TypeHandle th)
+{
+ WRAPPER_NO_CONTRACT;
+ return reinterpret_cast<ClassID> (th.AsPtr());
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Converts TypeHandle for a non-generic type to a ClassID
+//
+// Arguments:
+// th - TypeHandle to convert
+//
+// Return Value:
+// Requested ClassID. NULL if th represents a generic type
+//
+#ifdef PROFILING_SUPPORTED
+
+static ClassID NonGenericTypeHandleToClassID(TypeHandle th)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ if ((!th.IsNull()) && (th.HasInstantiation()))
+{
+ return NULL;
+}
+
+ return TypeHandleToClassID(th);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Converts MethodDesc * to FunctionID
+//
+// Arguments:
+// pMD - MethodDesc * to convert
+//
+// Return Value:
+// Requested FunctionID
+//
+
+static FunctionID MethodDescToFunctionID(MethodDesc * pMD)
+{
+ LIMITED_METHOD_CONTRACT;
+ return reinterpret_cast< FunctionID > (pMD);
+}
+
+#endif
+
+//---------------------------------------------------------------------------------------
+//
+// Converts FunctionID to MethodDesc *
+//
+// Arguments:
+// functionID - FunctionID to convert
+//
+// Return Value:
+// MethodDesc * requested
+//
+
+MethodDesc *FunctionIdToMethodDesc(FunctionID functionID)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ MethodDesc *pMethodDesc;
+
+ pMethodDesc = reinterpret_cast< MethodDesc* >(functionID);
+
+ _ASSERTE(pMethodDesc != NULL);
+ return pMethodDesc;
+}
+
+// (See comments for ArrayKindFromTypeHandle below.)
+typedef enum
+{
+ ARRAY_KIND_TYPEDESC, // Normal, garden-variety typedesc array
+ ARRAY_KIND_METHODTABLE, // Weirdo array with its own unshared methodtable (e.g., System.Object[])
+ ARRAY_KIND_NOTARRAY, // Not an array
+} ARRAY_KIND;
+
+//---------------------------------------------------------------------------------------
+//
+// A couple Info calls need to understand what constitutes an "array", and what
+// kinds of arrays there are. ArrayKindFromTypeHandle tries to put some of this
+// knowledge in a single place
+//
+// Arguments:
+// th - TypeHandle to inspect
+//
+// Return Value:
+// ARRAY_KIND describing th
+//
+
+inline ARRAY_KIND ArrayKindFromTypeHandle(TypeHandle th)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (th.IsArray())
+ {
+ return ARRAY_KIND_TYPEDESC;
+ }
+
+ if (!th.IsTypeDesc() && th.GetMethodTable()->IsArray())
+ {
+ return ARRAY_KIND_METHODTABLE;
+ }
+
+ return ARRAY_KIND_NOTARRAY;
+}
+
+#ifdef PROFILING_SUPPORTED
+
+//---------------------------------------------------------------------------------------
+// ModuleILHeap IUnknown implementation
+//
+// Function headers unnecessary, as MSDN adequately documents IUnknown
+//
+
+ULONG ModuleILHeap::AddRef()
+{
+ // Lifetime of this object is controlled entirely by the CLR. This
+ // is created on first request, and is automatically destroyed when
+ // the profiler is detached.
+ return 1;
+}
+
+
+ULONG ModuleILHeap::Release()
+{
+ // Lifetime of this object is controlled entirely by the CLR. This
+ // is created on first request, and is automatically destroyed when
+ // the profiler is detached.
+ return 1;
+}
+
+
+HRESULT ModuleILHeap::QueryInterface(REFIID riid, void ** pp)
+{
+ HRESULT hr = S_OK;
+
+ if (pp == NULL)
+ {
+ return E_POINTER;
+ }
+
+ *pp = 0;
+ if (riid == IID_IUnknown)
+ {
+ *pp = static_cast<IUnknown *>(this);
+ }
+ else if (riid == IID_IMethodMalloc)
+ {
+ *pp = static_cast<IMethodMalloc *>(this);
+ }
+ else
+ {
+ hr = E_NOINTERFACE;
+ }
+
+ if (hr == S_OK)
+ {
+ // CLR manages lifetime of this object, but in case that changes (or
+ // this code gets copied/pasted elsewhere), we'll still AddRef here so
+ // QI remains a good citizen either way.
+ AddRef();
+ }
+ return hr;
+}
+
+//---------------------------------------------------------------------------------------
+// Profiler entrypoint to allocate space from this module's heap.
+//
+// Arguments
+// cb - size in bytes of allocation request
+//
+// Return value
+// pointer to allocated memory, or NULL if there was an error
+
+void * STDMETHODCALLTYPE ModuleILHeap::Alloc(ULONG cb)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // (see GC_TRIGGERS comment below)
+ CAN_TAKE_LOCK;
+
+ // Allocations using loader heaps below enter a critsec, which switches
+ // to preemptive, which is effectively a GC trigger
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_ANY;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORPROF, LL_INFO1000, "**PROF: ModuleILHeap::Alloc 0x%08xp.\n", cb));
+
+ if (cb == 0)
+ {
+ return NULL;
+ }
+
+ return new (nothrow) BYTE[cb];
+}
+
+//---------------------------------------------------------------------------------------
+// The one and only instance of the IL heap
+
+ModuleILHeap ModuleILHeap::s_Heap;
+
+//---------------------------------------------------------------------------------------
+// Implementation of ProfToEEInterfaceImpl's IUnknown
+
+//
+// The VM controls the lifetime of ProfToEEInterfaceImpl, not the
+// profiler. We'll automatically take care of cleanup when profilers
+// unload and detach.
+//
+
+ULONG STDMETHODCALLTYPE ProfToEEInterfaceImpl::AddRef()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+}
+
+ULONG STDMETHODCALLTYPE ProfToEEInterfaceImpl::Release()
+{
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+}
+
+COM_METHOD ProfToEEInterfaceImpl::QueryInterface(REFIID id, void ** pInterface)
+{
+ if (pInterface == NULL)
+ {
+ return E_POINTER;
+ }
+
+ if (id == IID_ICorProfilerInfo)
+ {
+ *pInterface = static_cast<ICorProfilerInfo *>(this);
+ }
+ else if (id == IID_ICorProfilerInfo2)
+ {
+ *pInterface = static_cast<ICorProfilerInfo2 *>(this);
+ }
+ else if (id == IID_ICorProfilerInfo3)
+ {
+ *pInterface = static_cast<ICorProfilerInfo3 *>(this);
+ }
+ else if (id == IID_ICorProfilerInfo4)
+ {
+ *pInterface = static_cast<ICorProfilerInfo4 *>(this);
+ }
+ else if (id == IID_ICorProfilerInfo5)
+ {
+ *pInterface = static_cast<ICorProfilerInfo5 *>(this);
+ }
+ else if (id == IID_ICorProfilerInfo6)
+ {
+ *pInterface = static_cast<ICorProfilerInfo6 *>(this);
+ }
+ else if (id == IID_IUnknown)
+ {
+ *pInterface = static_cast<IUnknown *>(static_cast<ICorProfilerInfo *>(this));
+ }
+ else
+ {
+ *pInterface = NULL;
+ return E_NOINTERFACE;
+ }
+
+ // CLR manages lifetime of this object, but in case that changes (or
+ // this code gets copied/pasted elsewhere), we'll still AddRef here so
+ // QI remains a good citizen either way.
+ AddRef();
+
+ return S_OK;
+}
+#endif // PROFILING_SUPPORTED
+
+//---------------------------------------------------------------------------------------
+//
+// GC-related helpers. These are called from elsewhere in the EE to determine profiler
+// state, and to update the profiling API with info from the GC.
+//
+
+//---------------------------------------------------------------------------------------
+//
+// ProfilerObjectAllocatedCallback is called if a profiler is attached, requesting
+// ObjectAllocated callbacks.
+//
+// Arguments:
+// objref - Reference to newly-allocated object
+// classId - ClassID of newly-allocated object
+//
+
+void __stdcall ProfilerObjectAllocatedCallback(OBJECTREF objref, ClassID classId)
+{
+ CONTRACTL
+{
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ TypeHandle th = OBJECTREFToObject(objref)->GetTypeHandle();
+
+ // WARNING: objref can move as a result of the ObjectAllocated() call below if
+ // the profiler causes a GC, so any operations on the objref should occur above
+ // this comment (unless you're prepared to add a GCPROTECT around the objref).
+
+#ifdef PROFILING_SUPPORTED
+ // Notify the profiler of the allocation
+
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackAllocations());
+ // Note that for generic code we always return uninstantiated ClassIDs and FunctionIDs.
+ // Thus we strip any instantiations of the ClassID (which is really a type handle) here.
+ g_profControlBlock.pProfInterface->ObjectAllocated(
+ (ObjectID) OBJECTREFToObject(objref),
+ classId);
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Wrapper around the GC Started callback
+//
+// Arguments:
+// generation - Generation being collected
+// induced - Was this GC induced by GC.Collect?
+//
+
+void __stdcall GarbageCollectionStartedCallback(int generation, BOOL induced)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY; // can be called even on GC threads
+ }
+ CONTRACTL_END;
+
+#ifdef PROFILING_SUPPORTED
+ //
+ // Mark that we are starting a GC. This will allow profilers to do limited object inspection
+ // during callbacks that occur while a GC is happening.
+ //
+ g_profControlBlock.fGCInProgress = TRUE;
+
+ // Notify the profiler of start of the collection
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackGC());
+ BOOL generationCollected[COR_PRF_GC_LARGE_OBJECT_HEAP+1];
+ if (generation == COR_PRF_GC_GEN_2)
+ generation = COR_PRF_GC_LARGE_OBJECT_HEAP;
+ for (int gen = 0; gen <= COR_PRF_GC_LARGE_OBJECT_HEAP; gen++)
+ generationCollected[gen] = gen <= generation;
+
+ g_profControlBlock.pProfInterface->GarbageCollectionStarted(
+ COR_PRF_GC_LARGE_OBJECT_HEAP+1,
+ generationCollected,
+ induced ? COR_PRF_GC_INDUCED : COR_PRF_GC_OTHER);
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Wrapper around the GC Finished callback
+//
+
+void __stdcall GarbageCollectionFinishedCallback()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY; // can be called even on GC threads
+ }
+ CONTRACTL_END;
+
+#ifdef PROFILING_SUPPORTED
+ // Notify the profiler of end of the collection
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackGC());
+ g_profControlBlock.pProfInterface->GarbageCollectionFinished();
+ END_PIN_PROFILER();
+ }
+
+ // Mark that GC is finished.
+ g_profControlBlock.fGCInProgress = FALSE;
+#endif // PROFILING_SUPPORTED
+}
+
+#ifdef PROFILING_SUPPORTED
+//---------------------------------------------------------------------------------------
+//
+// Describes a GC generation by number and address range
+//
+
+struct GenerationDesc
+{
+ int generation;
+ BYTE *rangeStart;
+ BYTE *rangeEnd;
+ BYTE *rangeEndReserved;
+};
+
+struct GenerationTable
+{
+ ULONG count;
+ ULONG capacity;
+ static const ULONG defaultCapacity = 4; // that's the minimum for 3 generation plus the large object heap
+ GenerationTable *prev;
+ GenerationDesc *genDescTable;
+#ifdef _DEBUG
+ ULONG magic;
+#define GENERATION_TABLE_MAGIC 0x34781256
+#define GENERATION_TABLE_BAD_MAGIC 0x55aa55aa
+#endif
+};
+
+
+//---------------------------------------------------------------------------------------
+//
+// This is a callback used by the GC when we call GCHeap::DescrGenerationsToProfiler
+// (from UpdateGenerationBounds() below). The GC gives us generation information through
+// this callback, which we use to update the GenerationDesc in the corresponding
+// GenerationTable
+//
+// Arguments:
+// context - The containing GenerationTable
+// generation - Generation number
+// rangeStart - Address where generation starts
+// rangeEnd - Address where generation ends
+// rangeEndReserved - Address where generation reserved space ends
+//
+
+// static
+static void GenWalkFunc(void * context,
+ int generation,
+ BYTE * rangeStart,
+ BYTE * rangeEnd,
+ BYTE * rangeEndReserved)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY; // can be called even on GC threads
+ PRECONDITION(CheckPointer(context));
+ PRECONDITION(0 <= generation && generation <= 3);
+ PRECONDITION(CheckPointer(rangeStart));
+ PRECONDITION(CheckPointer(rangeEnd));
+ PRECONDITION(CheckPointer(rangeEndReserved));
+ } CONTRACT_END;
+
+ GenerationTable *generationTable = (GenerationTable *)context;
+
+ _ASSERTE(generationTable->magic == GENERATION_TABLE_MAGIC);
+
+ ULONG count = generationTable->count;
+ if (count >= generationTable->capacity)
+ {
+ ULONG newCapacity = generationTable->capacity == 0 ? GenerationTable::defaultCapacity : generationTable->capacity * 2;
+ GenerationDesc *newGenDescTable = new (nothrow) GenerationDesc[newCapacity];
+ if (newGenDescTable == NULL)
+ {
+ // if we can't allocate a bigger table, we'll have to ignore this call
+ RETURN;
+ }
+ memcpy(newGenDescTable, generationTable->genDescTable, sizeof(generationTable->genDescTable[0]) * generationTable->count);
+ delete[] generationTable->genDescTable;
+ generationTable->genDescTable = newGenDescTable;
+ generationTable->capacity = newCapacity;
+ }
+ _ASSERTE(count < generationTable->capacity);
+
+ GenerationDesc *genDescTable = generationTable->genDescTable;
+
+ genDescTable[count].generation = generation;
+ genDescTable[count].rangeStart = rangeStart;
+ genDescTable[count].rangeEnd = rangeEnd;
+ genDescTable[count].rangeEndReserved = rangeEndReserved;
+
+ generationTable->count = count + 1;
+}
+
+// This is the table of generation bounds updated by the gc
+// and read by the profiler. So this is a single writer,
+// multiple readers scenario.
+static GenerationTable *s_currentGenerationTable;
+
+// The generation table is updated atomically by replacing the
+// pointer to it. The only tricky part is knowing when
+// the old table can be deleted.
+static Volatile<LONG> s_generationTableLock;
+
+// This is just so we can assert there's a single writer
+#ifdef ENABLE_CONTRACTS
+static Volatile<LONG> s_generationTableWriterCount;
+#endif
+#endif // PROFILING_SUPPORTED
+
+//---------------------------------------------------------------------------------------
+//
+// This is called from the gc to push a new set of generation bounds
+//
+
+void __stdcall UpdateGenerationBounds()
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY; // can be called even on GC threads
+#ifdef PROFILING_SUPPORTED
+ PRECONDITION(FastInterlockIncrement(&s_generationTableWriterCount) == 1);
+ POSTCONDITION(FastInterlockDecrement(&s_generationTableWriterCount) == 0);
+#endif // PROFILING_SUPPORTED
+ } CONTRACT_END;
+
+#ifdef PROFILING_SUPPORTED
+ // Notify the profiler of start of the collection
+ if (CORProfilerTrackGC())
+ {
+ // generate a new generation table
+ GenerationTable *newGenerationTable = new (nothrow) GenerationTable();
+ if (newGenerationTable == NULL)
+ RETURN;
+ newGenerationTable->count = 0;
+ newGenerationTable->capacity = GenerationTable::defaultCapacity;
+ // if there is already a current table, use its count as a guess for the capacity
+ if (s_currentGenerationTable != NULL)
+ newGenerationTable->capacity = s_currentGenerationTable->count;
+ newGenerationTable->prev = NULL;
+ newGenerationTable->genDescTable = new (nothrow) GenerationDesc[newGenerationTable->capacity];
+ if (newGenerationTable->genDescTable == NULL)
+ newGenerationTable->capacity = 0;
+
+#ifdef _DEBUG
+ newGenerationTable->magic = GENERATION_TABLE_MAGIC;
+#endif
+ // fill in the values by calling back into the gc, which will report
+ // the ranges by calling GenWalkFunc for each one
+ GCHeap *hp = GCHeap::GetGCHeap();
+ hp->DescrGenerationsToProfiler(GenWalkFunc, newGenerationTable);
+
+ // remember the old table and plug in the new one
+ GenerationTable *oldGenerationTable = s_currentGenerationTable;
+ s_currentGenerationTable = newGenerationTable;
+
+ // WARNING: tricky code!
+ //
+ // We sample the generation table lock *after* plugging in the new table
+ // We do so using an interlocked operation so the cpu can't reorder
+ // the write to the s_currentGenerationTable with the increment.
+ // If the interlocked increment returns 1, we know nobody can be using
+ // the old table (readers increment the lock before using the table,
+ // and decrement it afterwards). Any new readers coming in
+ // will use the new table. So it's safe to delete the old
+ // table.
+ // On the other hand, if the interlocked increment returns
+ // something other than one, we put the old table on a list
+ // dangling off of the new one. Next time around, we'll try again
+ // deleting any old tables.
+ if (FastInterlockIncrement(&s_generationTableLock) == 1)
+ {
+ // We know nobody can be using any of the old tables
+ while (oldGenerationTable != NULL)
+ {
+ _ASSERTE(oldGenerationTable->magic == GENERATION_TABLE_MAGIC);
+#ifdef _DEBUG
+ oldGenerationTable->magic = GENERATION_TABLE_BAD_MAGIC;
+#endif
+ GenerationTable *temp = oldGenerationTable;
+ oldGenerationTable = oldGenerationTable->prev;
+ delete[] temp->genDescTable;
+ delete temp;
+ }
+ }
+ else
+ {
+ // put the old table on a list
+ newGenerationTable->prev = oldGenerationTable;
+ }
+ FastInterlockDecrement(&s_generationTableLock);
+ }
+#endif // PROFILING_SUPPORTED
+ RETURN;
+}
+
+#ifdef PROFILING_SUPPORTED
+
+//---------------------------------------------------------------------------------------
+//
+// Determines whether we are in a window to allow object inspection.
+//
+// Return Value:
+// Returns S_OK if we can determine that we are in a window to allow object
+// inspection. Otherwise a failure HRESULT is returned
+//
+
+HRESULT AllowObjectInspection()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY; // tests for preemptive mode dynamically as its main function so contract enforcement is not appropriate
+ }
+ CONTRACTL_END;
+
+ //
+ // Check first to see if we are in the process of doing a GC and presume that the profiler
+ // is making this object inspection from the same thread that notified of a valid ObjectID.
+ //
+ if (g_profControlBlock.fGCInProgress)
+ {
+ return S_OK;
+ }
+
+ //
+ // Thus we must have a managed thread, and it must be in coop mode.
+ // (That will also guarantee we're in a callback).
+ //
+ Thread * pThread = GetThreadNULLOk();
+
+ if (pThread == NULL)
+ {
+ return CORPROF_E_NOT_MANAGED_THREAD;
+ }
+
+ // Note this is why we don't enforce the contract of being in cooperative mode the whole point
+ // is that clients of this fellow want to return a robust error if not cooperative
+ // so technically they are mode_any although the only true preemptive support they offer
+ // is graceful failure in that case
+ if (!pThread->PreemptiveGCDisabled())
+ {
+ return CORPROF_E_UNSUPPORTED_CALL_SEQUENCE;
+ }
+
+ return S_OK;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// It's generally unsafe for profiling API code to call Get(GCSafe)TypeHandle() on
+// objects, since we can encounter objects on the heap whose types belong to unloading
+// AppDomains. In such cases, getting the type handle of the object could AV. Use this
+// function instead, which will return NULL for potentially unloaded types.
+//
+// Arguments:
+// pObj - Object * whose ClassID is desired
+//
+// Return Value:
+// ClassID of the object, if it's safe to look it up. Else NULL.
+//
+
+ClassID SafeGetClassIDFromObject(Object * pObj)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ if (!NativeThreadInGC()) { MODE_COOPERATIVE; }
+ }
+ CONTRACTL_END;
+
+ TypeHandle th = pObj->GetGCSafeTypeHandleIfPossible();
+ if(th == NULL)
+ {
+ return NULL;
+ }
+
+ return TypeHandleToClassID(th);
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// helper functions for the GC events
+//
+
+
+//---------------------------------------------------------------------------------------
+//
+// Callback of type walk_fn used by GCHeap::WalkObject. Keeps a count of each
+// object reference found.
+//
+// Arguments:
+// pBO - Object reference encountered in walk
+// context - running count of object references encountered
+//
+// Return Value:
+// Always returns TRUE to object walker so it walks the entire object
+//
+
+BOOL CountContainedObjectRef(Object * pBO, void * context)
+{
+ LIMITED_METHOD_CONTRACT;
+ // Increase the count
+ (*((size_t *)context))++;
+
+ return TRUE;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Callback of type walk_fn used by GCHeap::WalkObject. Stores each object reference
+// encountered into an array.
+//
+// Arguments:
+// pBO - Object reference encountered in walk
+// context - Array of locations within the walked object that point to other
+// objects. On entry, (*context) points to the next unfilled array
+// entry. On exit, that location is filled, and (*context) is incremented
+// to point to the next entry.
+//
+// Return Value:
+// Always returns TRUE to object walker so it walks the entire object
+//
+
+BOOL SaveContainedObjectRef(Object * pBO, void * context)
+{
+ LIMITED_METHOD_CONTRACT;
+ // Assign the value
+ **((Object ***)context) = pBO;
+
+ // Now increment the array pointer
+ //
+ // Note that HeapWalkHelper has already walked the references once to count them up,
+ // and then allocated an array big enough to hold those references. First time this
+ // callback is called for a given object, (*context) points to the first entry in the
+ // array. So "blindly" incrementing (*context) here and using it next time around
+ // for the next reference, over and over again, should be safe.
+ (*((Object ***)context))++;
+
+ return TRUE;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Callback of type walk_fn used by the GC when walking the heap, to help profapi and ETW
+// track objects. This guy orchestrates the use of the above callbacks which dig
+// into object references contained each object encountered by this callback.
+//
+// Arguments:
+// pBO - Object reference encountered on the heap
+// pvContext - Pointer to ProfilerWalkHeapContext, containing ETW context built up
+// during this GC, and which remembers if profapi-profiler is supposed to be called.
+//
+// Return Value:
+// BOOL indicating whether the heap walk should continue.
+// TRUE=continue
+// FALSE=stop
+//
+
+extern bool s_forcedGCInProgress;
+
+BOOL HeapWalkHelper(Object * pBO, void * pvContext)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_INTOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF * arrObjRef = NULL;
+ size_t cNumRefs = 0;
+ bool bOnStack = false;
+ MethodTable * pMT = pBO->GetMethodTable();
+
+ ProfilerWalkHeapContext * pProfilerWalkHeapContext = (ProfilerWalkHeapContext *) pvContext;
+
+ if (pMT->ContainsPointersOrCollectible())
+ {
+ // First round through calculates the number of object refs for this class
+ GCHeap::GetGCHeap()->WalkObject(pBO, &CountContainedObjectRef, (void *)&cNumRefs);
+
+ if (cNumRefs > 0)
+ {
+ // Create an array to contain all of the refs for this object
+ bOnStack = cNumRefs <= 32 ? true : false;
+
+ if (bOnStack)
+ {
+ // It's small enough, so just allocate on the stack
+ arrObjRef = (OBJECTREF *)_alloca(cNumRefs * sizeof(OBJECTREF));
+ }
+ else
+ {
+ // Otherwise, allocate from the heap
+ arrObjRef = new (nothrow) OBJECTREF[cNumRefs];
+
+ if (!arrObjRef)
+ {
+ return FALSE;
+ }
+ }
+
+ // Second round saves off all of the ref values
+ OBJECTREF * pCurObjRef = arrObjRef;
+ GCHeap::GetGCHeap()->WalkObject(pBO, &SaveContainedObjectRef, (void *)&pCurObjRef);
+ }
+ }
+
+ HRESULT hr = E_FAIL;
+
+ if (pProfilerWalkHeapContext->fProfilerPinned)
+ {
+ // It is not safe and could be overflowed to downcast size_t to ULONG on WIN64.
+ // However, we have to do this dangerous downcast here to comply with the existing Profiling COM interface.
+ // We are currently evaluating ways to fix this potential overflow issue.
+ hr = g_profControlBlock.pProfInterface->ObjectReference(
+ (ObjectID) pBO,
+ SafeGetClassIDFromObject(pBO),
+ (ULONG) cNumRefs,
+ (ObjectID *) arrObjRef);
+ }
+
+ if (s_forcedGCInProgress &&
+ ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_GCHEAPDUMP_KEYWORD))
+ {
+ ETW::GCLog::ObjectReference(
+ pProfilerWalkHeapContext,
+ pBO,
+ (ULONGLONG) SafeGetClassIDFromObject(pBO),
+ cNumRefs,
+ (Object **) arrObjRef);
+
+ }
+
+ // If the data was not allocated on the stack, need to clean it up.
+ if ((arrObjRef != NULL) && !bOnStack)
+ {
+ delete [] arrObjRef;
+ }
+
+ // Return TRUE iff we want to the heap walk to continue. The only way we'd abort the
+ // heap walk is if we're issuing profapi callbacks, and the profapi profiler
+ // intentionally returned a failed HR (as its request that we stop the walk). There's
+ // a potential conflict here. If a profapi profiler and an ETW profiler are both
+ // monitoring the heap dump, and the profapi profiler requests to abort the walk (but
+ // the ETW profiler may not want to abort the walk), then what do we do? The profapi
+ // profiler gets precedence. We don't want to accidentally send more callbacks to a
+ // profapi profiler that explicitly requested an abort. The ETW profiler will just
+ // have to deal. In theory, I could make the code more complex by remembering that a
+ // profapi profiler requested to abort the dump but an ETW profiler is still
+ // attached, and then intentionally inhibit the remainder of the profapi callbacks
+ // for this GC. But that's unnecessary complexity. In practice, it should be
+ // extremely rare that a profapi profiler is monitoring heap dumps AND an ETW
+ // profiler is also monitoring heap dumps.
+ return (pProfilerWalkHeapContext->fProfilerPinned) ? SUCCEEDED(hr) : TRUE;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Callback of type walk_fn used by the GC when walking the heap, to help profapi
+// track objects. This is really just a wrapper around
+// EEToProfInterfaceImpl::AllocByClass, which does the real work
+//
+// Arguments:
+// pBO - Object reference encountered on the heap
+// pv - Structure used by EEToProfInterfaceImpl::AllocByClass to do its work.
+//
+// Return Value:
+// BOOL indicating whether the heap walk should continue.
+// TRUE=continue
+// FALSE=stop
+// Currently always returns TRUE
+//
+
+BOOL AllocByClassHelper(Object * pBO, void * pv)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ _ASSERTE(pv != NULL);
+
+ {
+ BEGIN_PIN_PROFILER(CORProfilerPresent());
+ // Pass along the call
+ g_profControlBlock.pProfInterface->AllocByClass(
+ (ObjectID) pBO,
+ SafeGetClassIDFromObject(pBO),
+ pv);
+ END_PIN_PROFILER();
+ }
+
+ return TRUE;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Callback of type promote_func called by GC while scanning roots (in GCProfileWalkHeap,
+// called after the collection). Wrapper around EEToProfInterfaceImpl::RootReference2,
+// which does the real work.
+//
+// Arguments:
+// o - Object reference encountered
+// pSC - ProfilingScanContext * containing the root kind and GCReferencesData used
+// by RootReference2
+// dwFlags - Properties of the root as GC_CALL* constants (this function converts
+// to COR_PRF_GC_ROOT_FLAGS.
+//
+
+void ScanRootsHelper(Object** ppObject, ScanContext *pSC, DWORD dwFlags)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_INTOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // RootReference2 can return E_OUTOFMEMORY, and we're swallowing that.
+ // Furthermore, we can't really handle it because we're callable during GC promotion.
+ // On the other hand, this only means profiling information will be incomplete,
+ // so it's ok to swallow E_OUTOFMEMORY.
+ //
+ FAULT_NOT_FATAL();
+
+ ProfilingScanContext *pPSC = (ProfilingScanContext *)pSC;
+
+ DWORD dwEtwRootFlags = 0;
+ if (dwFlags & GC_CALL_INTERIOR)
+ dwEtwRootFlags |= kEtwGCRootFlagsInterior;
+ if (dwFlags & GC_CALL_PINNED)
+ dwEtwRootFlags |= kEtwGCRootFlagsPinning;
+ void *rootID = NULL;
+ switch (pPSC->dwEtwRootKind)
+ {
+ case kEtwGCRootKindStack:
+ rootID = pPSC->pMD;
+ break;
+
+ case kEtwGCRootKindHandle:
+ _ASSERT(!"Shouldn't see handle here");
+
+ case kEtwGCRootKindFinalizer:
+ default:
+ break;
+ }
+
+ // Notify profiling API of the root
+ if (pPSC->fProfilerPinned)
+ {
+ // Let the profiling code know about this root reference
+ g_profControlBlock.pProfInterface->
+ RootReference2((BYTE *)*ppObject, pPSC->dwEtwRootKind, (EtwGCRootFlags)dwEtwRootFlags, (BYTE *)rootID, &((pPSC)->pHeapId));
+ }
+
+ // Notify ETW of the root
+ if (s_forcedGCInProgress &&
+ ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_INFORMATION,
+ CLR_GCHEAPDUMP_KEYWORD))
+ {
+ ETW::GCLog::RootReference(
+ NULL, // handle is NULL, cuz this is a non-HANDLE root
+ *ppObject, // object being rooted
+ NULL, // pSecondaryNodeForDependentHandle is NULL, cuz this isn't a dependent handle
+ FALSE, // is dependent handle
+ pPSC,
+ dwFlags, // dwGCFlags
+ dwEtwRootFlags);
+ }
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Private ProfToEEInterfaceImpl maintenance functions
+//
+
+
+//---------------------------------------------------------------------------------------
+//
+// Initialize ProfToEEInterfaceImpl (including ModuleILHeap statics)
+//
+// Return Value:
+// HRESULT indicating success
+//
+
+HRESULT ProfToEEInterfaceImpl::Init()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ CANNOT_TAKE_LOCK;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORPROF, LL_INFO1000, "**PROF: Init.\n"));
+
+#ifdef _DEBUG
+ if (ProfilingAPIUtility::ShouldInjectProfAPIFault(kProfAPIFault_StartupInternal))
+ {
+ return E_OUTOFMEMORY;
+ }
+#endif //_DEBUG
+
+ return S_OK;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Destroy ProfToEEInterfaceImpl (including ModuleILHeap statics)
+//
+
+ProfToEEInterfaceImpl::~ProfToEEInterfaceImpl()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORPROF, LL_INFO1000, "**PROF: Terminate.\n"));
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Obsolete info functions
+//
+
+HRESULT ProfToEEInterfaceImpl::GetInprocInspectionInterface(IUnknown **)
+{
+ LIMITED_METHOD_CONTRACT;
+ return E_NOTIMPL;
+}
+
+HRESULT ProfToEEInterfaceImpl::GetInprocInspectionIThisThread(IUnknown **)
+{
+ LIMITED_METHOD_CONTRACT;
+ return E_NOTIMPL;
+}
+
+HRESULT ProfToEEInterfaceImpl::BeginInprocDebugging(BOOL, DWORD *)
+{
+ LIMITED_METHOD_CONTRACT;
+ return E_NOTIMPL;
+}
+
+HRESULT ProfToEEInterfaceImpl::EndInprocDebugging(DWORD)
+{
+ LIMITED_METHOD_CONTRACT;
+ return E_NOTIMPL;
+}
+
+HRESULT ProfToEEInterfaceImpl::SetFunctionReJIT(FunctionID)
+{
+ LIMITED_METHOD_CONTRACT;
+ return E_NOTIMPL;
+}
+
+
+
+
+//---------------------------------------------------------------------------------------
+//
+// *******************************
+// Public Profiler->EE entrypoints
+// *******************************
+//
+// ProfToEEInterfaceImpl implementation of public ICorProfilerInfo* methods
+//
+// NOTE: All ICorProfilerInfo* method implementations must follow the rules stated
+// at the top of this file!
+//
+
+// See corprof.idl / MSDN for detailed comments about each of these public
+// functions, their parameters, return values, etc.
+
+HRESULT ProfToEEInterfaceImpl::SetEventMask(DWORD dwEventMask)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: SetEventMask 0x%08x.\n",
+ dwEventMask));
+
+ _ASSERTE(CORProfilerPresentOrInitializing());
+
+ return g_profControlBlock.pProfInterface->SetEventMask(dwEventMask, 0 /* No high bits */);
+}
+
+HRESULT ProfToEEInterfaceImpl::SetEventMask2(DWORD dwEventsLow, DWORD dwEventsHigh)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: SetEventMask2 0x%08x, 0x%08x.\n",
+ dwEventsLow, dwEventsHigh));
+
+ _ASSERTE(CORProfilerPresentOrInitializing());
+
+ return g_profControlBlock.pProfInterface->SetEventMask(dwEventsLow, dwEventsHigh);
+}
+
+
+HRESULT ProfToEEInterfaceImpl::GetHandleFromThread(ThreadID threadId, HANDLE *phThread)
+{
+ CONTRACTL
+{
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetHandleFromThread 0x%p.\n",
+ threadId));
+
+ if (!IsManagedThread(threadId))
+ {
+ return E_INVALIDARG;
+ }
+
+ HRESULT hr = S_OK;
+
+ HANDLE hThread = ((Thread *)threadId)->GetThreadHandle();
+
+ if (hThread == INVALID_HANDLE_VALUE)
+ hr = E_INVALIDARG;
+
+ else if (phThread)
+ *phThread = hThread;
+
+ return (hr);
+}
+
+HRESULT ProfToEEInterfaceImpl::GetObjectSize(ObjectID objectId, ULONG *pcSize)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay! Fail at runtime if in preemptive mode via AllowObjectInspection()
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetObjectSize 0x%p.\n",
+ objectId));
+
+ if (objectId == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ HRESULT hr = AllowObjectInspection();
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ // Get the object pointer
+ Object *pObj = reinterpret_cast<Object *>(objectId);
+
+ // Get the size
+ if (pcSize)
+ {
+ SIZE_T size = pObj->GetSize();
+
+ if(size < MIN_OBJECT_SIZE)
+ {
+ size = PtrAlign(size);
+ }
+
+ if (size > ULONG_MAX)
+ {
+ *pcSize = ULONG_MAX;
+ return COR_E_OVERFLOW;
+ }
+ *pcSize = (ULONG)size;
+ }
+
+ // Indicate success
+ return (S_OK);
+}
+
+HRESULT ProfToEEInterfaceImpl::GetObjectSize2(ObjectID objectId, SIZE_T *pcSize)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay! Fail at runtime if in preemptive mode via AllowObjectInspection()
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetObjectSize2 0x%p.\n",
+ objectId));
+
+ if (objectId == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ HRESULT hr = AllowObjectInspection();
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ // Get the object pointer
+ Object *pObj = reinterpret_cast<Object *>(objectId);
+
+ // Get the size
+ if (pcSize)
+ {
+ SIZE_T size = pObj->GetSize();
+
+ if(size < MIN_OBJECT_SIZE)
+ {
+ size = PtrAlign(size);
+ }
+
+ *pcSize = size;
+ }
+
+ // Indicate success
+ return (S_OK);
+}
+
+
+HRESULT ProfToEEInterfaceImpl::IsArrayClass(
+ /* [in] */ ClassID classId,
+ /* [out] */ CorElementType *pBaseElemType,
+ /* [out] */ ClassID *pBaseClassId,
+ /* [out] */ ULONG *pcRank)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: IsArrayClass 0x%p.\n",
+ classId));
+
+ HRESULT hr;
+
+ if (classId == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ TypeHandle th = TypeHandle::FromPtr((void *)classId);
+
+ ARRAY_KIND arrayKind = ArrayKindFromTypeHandle(th);
+
+ // If this is indeed an array class, get some info about it
+ switch (arrayKind)
+ {
+ default:
+ {
+ _ASSERTE(!"Unexpected return from ArrayKindFromTypeHandle()");
+ hr = E_UNEXPECTED;
+ break;
+ }
+
+ case ARRAY_KIND_TYPEDESC:
+ {
+ // This is actually an array, so cast it up
+ ArrayTypeDesc *pArr = th.AsArray();
+
+ // Fill in the type if they want it
+ if (pBaseElemType != NULL)
+ {
+ *pBaseElemType = pArr->GetArrayElementTypeHandle().GetVerifierCorElementType();
+ }
+
+ // If this is an array of classes and they wish to have the base type
+ // If there is no associated class with this type, then there's no problem
+ // because GetClass returns NULL which is the default we want to return in
+ // this case.
+ // Note that for generic code we always return uninstantiated ClassIDs and FunctionIDs
+ if (pBaseClassId != NULL)
+ {
+ *pBaseClassId = TypeHandleToClassID(pArr->GetTypeParam());
+ }
+
+ // If they want the number of dimensions of the array
+ if (pcRank != NULL)
+ {
+ *pcRank = (ULONG) pArr->GetRank();
+ }
+
+ // S_OK indicates that this was indeed an array
+ hr = S_OK;
+ break;
+ }
+ case ARRAY_KIND_METHODTABLE:
+ {
+ MethodTable *pArrMT = th.GetMethodTable();
+
+ // Fill in the type if they want it
+ if (pBaseElemType != NULL)
+ {
+ *pBaseElemType = pArrMT->GetArrayElementType();
+ }
+
+ // If this is an array of classes and they wish to have the base type.
+ if (pBaseClassId != NULL)
+ {
+ *pBaseClassId = TypeHandleToClassID(pArrMT->GetApproxArrayElementTypeHandle());
+ }
+
+ // If they want the number of dimensions of the array
+ if (pcRank != NULL)
+ {
+ *pcRank = (ULONG) pArrMT->GetRank();
+ }
+
+ // S_OK indicates that this was indeed an array
+ hr = S_OK;
+ break;
+ }
+ case ARRAY_KIND_NOTARRAY:
+ {
+ if (pBaseClassId != NULL)
+ {
+ *pBaseClassId = NULL;
+ }
+
+ // This is not an array, S_FALSE indicates so.
+ hr = S_FALSE;
+ break;
+ }
+ }
+
+ return hr;
+}
+
+HRESULT ProfToEEInterfaceImpl::GetThreadInfo(ThreadID threadId, DWORD *pdwWin32ThreadId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetThreadInfo 0x%p.\n",
+ threadId));
+
+ if (!IsManagedThread(threadId))
+ {
+ return E_INVALIDARG;
+ }
+
+ if (pdwWin32ThreadId)
+ {
+ *pdwWin32ThreadId = ((Thread *)threadId)->GetOSThreadId();
+ }
+
+ return S_OK;
+}
+
+HRESULT ProfToEEInterfaceImpl::GetCurrentThreadID(ThreadID *pThreadId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetCurrentThreadID.\n"));
+
+ HRESULT hr = S_OK;
+
+ // No longer assert that GetThread doesn't return NULL, since callbacks
+ // can now occur on non-managed threads (such as the GC helper threads)
+ Thread * pThread = GetThreadNULLOk();
+
+ // If pThread is null, then the thread has never run managed code and
+ // so has no ThreadID
+ if (!IsManagedThread(pThread))
+ hr = CORPROF_E_NOT_MANAGED_THREAD;
+
+ // Only provide value if they want it
+ else if (pThreadId)
+ *pThreadId = (ThreadID) pThread;
+
+ return (hr);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Internal helper function to wrap a call into the JIT manager to get information about
+// a managed function based on IP
+//
+// Arguments:
+// ip - IP address inside managed function of interest
+// ppCodeInfo - [out] information about the managed function based on IP
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+//
+
+HRESULT GetFunctionInfoInternal(LPCBYTE ip, EECodeInfo * pCodeInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+
+ GC_NOTRIGGER;
+ EE_THREAD_NOT_REQUIRED;
+ CAN_TAKE_LOCK;
+ CANNOT_RETAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ // If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the
+ // host (SQL). Corners will be cut to ensure this is the case
+ if (ShouldAvoidHostCalls()) { HOST_NOCALLS; } else { HOST_CALLS; }
+ }
+ CONTRACTL_END;
+
+ // Before calling into the code manager, ensure the GC heap has been
+ // initialized--else the code manager will assert trying to get info from the heap.
+ if (!IsGarbageCollectorFullyInitialized())
+ {
+ return CORPROF_E_NOT_YET_AVAILABLE;
+ }
+
+ if (ShouldAvoidHostCalls())
+ {
+ ExecutionManager::ReaderLockHolder rlh(NoHostCalls);
+ if (!rlh.Acquired())
+ {
+ // Couldn't get the info. Try again later
+ return CORPROF_E_ASYNCHRONOUS_UNSAFE;
+ }
+
+ pCodeInfo->Init((PCODE)ip, ExecutionManager::ScanNoReaderLock);
+ }
+ else
+ {
+ pCodeInfo->Init((PCODE)ip);
+ }
+
+ if (!pCodeInfo->IsValid())
+ {
+ return E_FAIL;
+ }
+
+ return S_OK;
+}
+
+
+HRESULT GetFunctionFromIPInternal(LPCBYTE ip, EECodeInfo * pCodeInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ EE_THREAD_NOT_REQUIRED;
+ CAN_TAKE_LOCK;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE (pCodeInfo != NULL);
+
+ HRESULT hr = GetFunctionInfoInternal(ip, pCodeInfo);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ // never return a method that the user of the profiler API cannot use
+ if (pCodeInfo->GetMethodDesc()->IsNoMetadata())
+ {
+ return E_FAIL;
+ }
+
+ return S_OK;
+}
+
+
+HRESULT ProfToEEInterfaceImpl::GetFunctionFromIP(LPCBYTE ip, FunctionID * pFunctionId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Querying the code manager requires a reader lock. However, see
+ // code:#DisableLockOnAsyncCalls
+ DISABLED(CAN_TAKE_LOCK);
+
+ // Asynchronous functions can be called at arbitrary times when runtime
+ // is holding locks that cannot be reentered without causing deadlock.
+ // This contract detects any attempts to reenter locks held at the time
+ // this function was called.
+ CANNOT_RETAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ // If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the
+ // host (SQL). Corners will be cut to ensure this is the case
+ if (ShouldAvoidHostCalls()) { HOST_NOCALLS; } else { HOST_CALLS; }
+ }
+ CONTRACTL_END;
+
+ // See code:#DisableLockOnAsyncCalls
+ PERMANENT_CONTRACT_VIOLATION(TakesLockViolation, ReasonProfilerAsyncCannotRetakeLock);
+
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetFunctionFromIP 0x%p.\n",
+ ip));
+
+ // This call is allowed asynchronously, but the JIT functions take a reader lock.
+ // So we need to ensure the current thread hasn't been hijacked by a profiler while
+ // it was holding the writer lock. Checking the ForbidSuspendThread region is a
+ // sufficient test for this
+ FAIL_IF_IN_FORBID_SUSPEND_REGION();
+
+ HRESULT hr = S_OK;
+
+ EECodeInfo codeInfo;
+
+ hr = GetFunctionFromIPInternal(ip, &codeInfo);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ if (pFunctionId)
+ {
+ *pFunctionId = MethodDescToFunctionID(codeInfo.GetMethodDesc());
+ }
+
+ return S_OK;
+}
+
+
+HRESULT ProfToEEInterfaceImpl::GetFunctionFromIP2(LPCBYTE ip, FunctionID * pFunctionId, ReJITID * pReJitId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Grabbing the rejitid requires entering the rejit manager's hash table & lock,
+ // which can switch us to preemptive mode and trigger GCs
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Grabbing the rejitid requires entering the rejit manager's hash table & lock,
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ // See code:#DisableLockOnAsyncCalls
+ PERMANENT_CONTRACT_VIOLATION(TakesLockViolation, ReasonProfilerAsyncCannotRetakeLock);
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
+ kP2EEAllowableAfterAttach | kP2EETriggers,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetFunctionFromIP2 0x%p.\n",
+ ip));
+
+ HRESULT hr = S_OK;
+
+ EECodeInfo codeInfo;
+
+ hr = GetFunctionFromIPInternal(ip, &codeInfo);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ if (pFunctionId)
+ {
+ *pFunctionId = MethodDescToFunctionID(codeInfo.GetMethodDesc());
+ }
+
+ if (pReJitId != NULL)
+ {
+ MethodDesc * pMD = codeInfo.GetMethodDesc();
+ *pReJitId = pMD->GetReJitManager()->GetReJitId(pMD, codeInfo.GetStartAddress());
+ }
+
+ return S_OK;
+}
+
+//*****************************************************************************
+// Given a function id, retrieve the metadata token and a reader api that
+// can be used against the token.
+//*****************************************************************************
+HRESULT ProfToEEInterfaceImpl::GetTokenAndMetaDataFromFunction(
+ FunctionID functionId,
+ REFIID riid,
+ IUnknown **ppOut,
+ mdToken *pToken)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // PEFile::GetRWImporter and GetReadablePublicMetaDataInterface take locks
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetTokenAndMetaDataFromFunction 0x%p.\n",
+ functionId));
+
+ if (functionId == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ HRESULT hr = S_OK;
+
+ MethodDesc *pMD = FunctionIdToMethodDesc(functionId);
+
+ // it's not safe to examine a methoddesc that has not been restored so do not do so
+ if (!pMD->IsRestored())
+ return CORPROF_E_DATAINCOMPLETE;
+
+ if (pToken)
+ {
+ *pToken = pMD->GetMemberDef();
+ }
+
+ // don't bother with any of this module fetching if the metadata access isn't requested
+ if (ppOut)
+ {
+ Module * pMod = pMD->GetModule();
+ hr = pMod->GetReadablePublicMetaDataInterface(ofRead, riid, (LPVOID *) ppOut);
+ }
+
+ return hr;
+}
+
+//---------------------------------------------------------------------------------------
+// What follows are the GetCodeInfo* APIs and their helpers. The two helpers factor out
+// some of the common code to validate parameters and then determine the code info from
+// the start of the code. Each individual GetCodeInfo* API differs in how it uses these
+// helpers, particuarly in how it determines the start of the code (GetCodeInfo3 needs
+// to use the rejit manager to determine the code start, whereas the others do not).
+// Factoring out like this allows us to have statically determined contracts that differ
+// based on whether we need to use the rejit manager, which requires locking and
+// may trigger GCs.
+//---------------------------------------------------------------------------------------
+
+
+HRESULT ValidateParametersForGetCodeInfo(
+ MethodDesc * pMethodDesc,
+ ULONG32 cCodeInfos,
+ COR_PRF_CODE_INFO codeInfos[])
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (pMethodDesc == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ if ((cCodeInfos != 0) && (codeInfos == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ // it's not safe to examine a methoddesc that has not been restored so do not do so
+ if (!pMethodDesc->IsRestored())
+ return CORPROF_E_DATAINCOMPLETE;
+
+ if (pMethodDesc->HasClassOrMethodInstantiation() && pMethodDesc->IsTypicalMethodDefinition())
+ {
+ // In this case, we used to replace pMethodDesc with its canonical instantiation
+ // (FindOrCreateTypicalSharedInstantiation). However, a profiler should never be able
+ // to get to this point anyway, since any MethodDesc a profiler gets from us
+ // cannot be typical (i.e., cannot be a generic with types still left uninstantiated).
+ // We assert here just in case a test proves me wrong, but generally we will
+ // disallow this code path.
+ _ASSERTE(!"Profiler passed a typical method desc (a generic with types still left uninstantiated) to GetCodeInfo2");
+ return E_INVALIDARG;
+ }
+
+ return S_OK;
+}
+
+HRESULT GetCodeInfoFromCodeStart(
+ PCODE start,
+ ULONG32 cCodeInfos,
+ ULONG32 * pcCodeInfos,
+ COR_PRF_CODE_INFO codeInfos[])
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+
+ // We need to take the ExecutionManager reader lock to find the
+ // appropriate jit manager.
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ // If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the
+ // host (SQL). Corners will be cut to ensure this is the case
+ if (ShouldAvoidHostCalls()) { HOST_NOCALLS; } else { HOST_CALLS; }
+ }
+ CONTRACTL_END;
+
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+ ///////////////////////////////////
+ // Get the code region info for this function. This is a multi step process.
+ //
+ // MethodDesc ==> Code Address ==> JitMananger ==>
+ // MethodToken ==> MethodRegionInfo
+ //
+ // (Our caller handled the first step: MethodDesc ==> Code Address.)
+ //
+ // <WIN64-ONLY>
+ //
+ // On WIN64 we have a choice of where to go to find out the function address range size:
+ // GC info (which is what we're doing below on all architectures) or the OS unwind
+ // info, stored in the RUNTIME_FUNCTION structure. The latter produces
+ // a SMALLER size than the former, because the latter excludes some data from
+ // the set we report to the OS for unwind info. For example, switch tables can be
+ // separated out from the regular code and not be reported as OS unwind info, and thus
+ // those addresses will not appear in the range reported by the RUNTIME_FUNCTION gotten via:
+ //
+ // IJitManager* pJitMan = ExecutionManager::FindJitMan((PBYTE)codeInfos[0].startAddress);
+ // PRUNTIME_FUNCTION pfe = pJitMan->GetUnwindInfo((PBYTE)codeInfos[0].startAddress);
+ // *pcCodeInfos = (ULONG) (pfe->EndAddress - pfe->BeginAddress);
+ //
+ // (Note that GCInfo & OS unwind info report the same start address--it's the size that's
+ // different.)
+ //
+ // The advantage of using the GC info is that it's available on all architectures,
+ // and it gives you a more complete picture of the addresses belonging to the function.
+ //
+ // A disadvantage of using GC info is we'll report those extra addresses (like switch
+ // tables) that a profiler might turn back around and use in a call to
+ // GetFunctionFromIP. A profiler may expect we'd be able to map back any address
+ // in the function's GetCodeInfo ranges back to that function's FunctionID (methoddesc). But
+ // querying these extra addresses will cause GetFunctionFromIP to fail, as they're not
+ // actually valid instruction addresses that the IP register can be set to.
+ //
+ // The advantage wins out, so we're going with GC info everywhere.
+ //
+ // </WIN64-ONLY>
+
+ HRESULT hr;
+
+ if (start == NULL)
+ {
+ return CORPROF_E_FUNCTION_NOT_COMPILED;
+ }
+
+ EECodeInfo codeInfo;
+ hr = GetFunctionInfoInternal(
+ (LPCBYTE) start,
+ &codeInfo);
+ if (hr == CORPROF_E_ASYNCHRONOUS_UNSAFE)
+ {
+ _ASSERTE(ShouldAvoidHostCalls());
+ return hr;
+ }
+ if (FAILED(hr))
+ {
+ return CORPROF_E_FUNCTION_NOT_COMPILED;
+ }
+
+ IJitManager::MethodRegionInfo methodRegionInfo;
+ codeInfo.GetMethodRegionInfo(&methodRegionInfo);
+
+ //
+ // Fill out the codeInfo structures with valuse from the
+ // methodRegion
+ //
+ // Note that we're assuming that a method will never be split into
+ // more than two regions ... this is unlikely to change any time in
+ // the near future.
+ //
+ if (NULL != codeInfos)
+ {
+ if (cCodeInfos > 0)
+ {
+ //
+ // We have to return the two regions in the order that they would appear
+ // if straight-line compiled
+ //
+ if (PCODEToPINSTR(start) == methodRegionInfo.hotStartAddress)
+ {
+ codeInfos[0].startAddress =
+ (UINT_PTR)methodRegionInfo.hotStartAddress;
+ codeInfos[0].size = methodRegionInfo.hotSize;
+ }
+ else
+ {
+ _ASSERTE(methodRegionInfo.coldStartAddress != NULL);
+ codeInfos[0].startAddress =
+ (UINT_PTR)methodRegionInfo.coldStartAddress;
+ codeInfos[0].size = methodRegionInfo.coldSize;
+ }
+
+ if (NULL != methodRegionInfo.coldStartAddress)
+ {
+ if (cCodeInfos > 1)
+ {
+ if (PCODEToPINSTR(start) == methodRegionInfo.hotStartAddress)
+ {
+ codeInfos[1].startAddress =
+ (UINT_PTR)methodRegionInfo.coldStartAddress;
+ codeInfos[1].size = methodRegionInfo.coldSize;
+ }
+ else
+ {
+ codeInfos[1].startAddress =
+ (UINT_PTR)methodRegionInfo.hotStartAddress;
+ codeInfos[1].size = methodRegionInfo.hotSize;
+ }
+ }
+ }
+ }
+ }
+
+ if (NULL != pcCodeInfos)
+ {
+ *pcCodeInfos = (NULL != methodRegionInfo.coldStartAddress) ? 2 : 1;
+ }
+
+
+ return S_OK;
+}
+
+//*****************************************************************************
+// Gets the location and size of a jitted function
+//*****************************************************************************
+
+HRESULT ProfToEEInterfaceImpl::GetCodeInfo(FunctionID functionId, LPCBYTE * pStart, ULONG * pcSize)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // (See locking contract comment in GetCodeInfoHelper.)
+ DISABLED(CAN_TAKE_LOCK);
+
+ // (See locking contract comment in GetCodeInfoHelper.)
+ CANNOT_RETAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ // If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the
+ // host (SQL). Corners will be cut to ensure this is the case
+ if (ShouldAvoidHostCalls()) { HOST_NOCALLS; } else { HOST_CALLS; }
+ }
+ CONTRACTL_END;
+
+ // See code:#DisableLockOnAsyncCalls
+ PERMANENT_CONTRACT_VIOLATION(TakesLockViolation, ReasonProfilerAsyncCannotRetakeLock);
+
+ // This is called asynchronously, but GetCodeInfoHelper() will
+ // ensure we're not called at a dangerous time
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetCodeInfo 0x%p.\n",
+ functionId));
+
+ // GetCodeInfo may be called asynchronously, and the JIT functions take a reader
+ // lock. So we need to ensure the current thread hasn't been hijacked by a profiler while
+ // it was holding the writer lock. Checking the ForbidSuspendThread region is a sufficient test for this
+ FAIL_IF_IN_FORBID_SUSPEND_REGION();
+
+ if (functionId == 0)
+ {
+ return E_INVALIDARG;
+ }
+
+ MethodDesc * pMethodDesc = FunctionIdToMethodDesc(functionId);
+
+ COR_PRF_CODE_INFO codeInfos[2];
+ ULONG32 cCodeInfos;
+
+ HRESULT hr = GetCodeInfoFromCodeStart(
+ pMethodDesc->GetNativeCode(),
+ _countof(codeInfos),
+ &cCodeInfos,
+ codeInfos);
+
+ if ((FAILED(hr)) || (0 == cCodeInfos))
+ {
+ return hr;
+ }
+
+ if (NULL != pStart)
+ {
+ *pStart = reinterpret_cast< LPCBYTE >(codeInfos[0].startAddress);
+ }
+
+ if (NULL != pcSize)
+ {
+ if (!FitsIn<ULONG>(codeInfos[0].size))
+ {
+ return E_UNEXPECTED;
+ }
+ *pcSize = static_cast<ULONG>(codeInfos[0].size);
+ }
+
+ return hr;
+}
+
+HRESULT ProfToEEInterfaceImpl::GetCodeInfo2(FunctionID functionId,
+ ULONG32 cCodeInfos,
+ ULONG32 * pcCodeInfos,
+ COR_PRF_CODE_INFO codeInfos[])
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // (See locking contract comment in GetCodeInfoHelper.)
+ DISABLED(CAN_TAKE_LOCK);
+
+ // (See locking contract comment in GetCodeInfoHelper.)
+ CANNOT_RETAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ // If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the
+ // host (SQL). Corners will be cut to ensure this is the case
+ if (ShouldAvoidHostCalls()) { HOST_NOCALLS; } else { HOST_CALLS; }
+
+ PRECONDITION(CheckPointer(pcCodeInfos, NULL_OK));
+ PRECONDITION(CheckPointer(codeInfos, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // See code:#DisableLockOnAsyncCalls
+ PERMANENT_CONTRACT_VIOLATION(TakesLockViolation, ReasonProfilerAsyncCannotRetakeLock);
+
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetCodeInfo2 0x%p.\n",
+ functionId));
+
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ MethodDesc * pMethodDesc = FunctionIdToMethodDesc(functionId);
+
+ hr = ValidateParametersForGetCodeInfo(pMethodDesc, cCodeInfos, codeInfos);
+ if (SUCCEEDED(hr))
+ {
+ hr = GetCodeInfoFromCodeStart(
+ pMethodDesc->GetNativeCode(),
+ cCodeInfos,
+ pcCodeInfos,
+ codeInfos);
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+}
+
+
+HRESULT ProfToEEInterfaceImpl::GetCodeInfo3(FunctionID functionId,
+ ReJITID reJitId,
+ ULONG32 cCodeInfos,
+ ULONG32* pcCodeInfos,
+ COR_PRF_CODE_INFO codeInfos[])
+
+
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // We need to access the rejitmanager, which means taking locks, which means we
+ // may trigger a GC
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // We need to access the rejitmanager, which means taking locks
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ PRECONDITION(CheckPointer(pcCodeInfos, NULL_OK));
+ PRECONDITION(CheckPointer(codeInfos, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // See code:#DisableLockOnAsyncCalls
+ PERMANENT_CONTRACT_VIOLATION(TakesLockViolation, ReasonProfilerAsyncCannotRetakeLock);
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
+ kP2EEAllowableAfterAttach | kP2EETriggers,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetCodeInfo3 0x%p 0x%p.\n",
+ functionId, reJitId));
+
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ MethodDesc * pMethodDesc = FunctionIdToMethodDesc(functionId);
+
+ hr = ValidateParametersForGetCodeInfo(pMethodDesc, cCodeInfos, codeInfos);
+ if (SUCCEEDED(hr))
+ {
+ hr = GetCodeInfoFromCodeStart(
+ // Note here that we must consult the rejit manager to determine the code
+ // start address
+ pMethodDesc->GetReJitManager()->GetCodeStart(pMethodDesc, reJitId),
+ cCodeInfos,
+ pcCodeInfos,
+ codeInfos);
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+}
+
+
+HRESULT ProfToEEInterfaceImpl::GetEventMask(DWORD * pdwEvents)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO10,
+ "**PROF: GetEventMask.\n"));
+
+ if (pdwEvents == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ *pdwEvents = g_profControlBlock.dwEventMask;
+ return S_OK;
+}
+
+HRESULT ProfToEEInterfaceImpl::GetEventMask2(DWORD *pdwEventsLow, DWORD *pdwEventsHigh)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO10,
+ "**PROF: GetEventMask2.\n"));
+
+ if ((pdwEventsLow == NULL) || (pdwEventsHigh == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ *pdwEventsLow = g_profControlBlock.dwEventMask;
+ *pdwEventsHigh = g_profControlBlock.dwEventMaskHigh;
+ return S_OK;
+}
+
+// static
+void ProfToEEInterfaceImpl::MethodTableCallback(void* context, void* objectUNSAFE)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_NOT_MAINLINE;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // each callback identifies the address of a method table within the frozen object segment
+ // that pointer is an object ID by definition -- object references point to the method table
+ CDynArray< ObjectID >* objects = reinterpret_cast< CDynArray< ObjectID >* >(context);
+
+ *objects->Append() = reinterpret_cast< ObjectID >(objectUNSAFE);
+}
+
+// static
+void ProfToEEInterfaceImpl::ObjectRefCallback(void* context, void* objectUNSAFE)
+{
+ // we don't care about embedded object references, ignore them
+}
+
+
+HRESULT ProfToEEInterfaceImpl::EnumModuleFrozenObjects(ModuleID moduleID,
+ ICorProfilerObjectEnum** ppEnum)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: EnumModuleFrozenObjects 0x%p.\n",
+ moduleID));
+
+ if (NULL == ppEnum)
+ {
+ return E_INVALIDARG;
+ }
+
+ Module* pModule = reinterpret_cast< Module* >(moduleID);
+ if (pModule == NULL || pModule->IsBeingUnloaded())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ // If we don't support frozen objects at all, then just return empty
+ // enumerator.
+ *ppEnum = new ProfilerObjectEnum();
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+}
+
+
+
+/*
+ * GetArrayObjectInfo
+ *
+ * This function returns informatin about array objects. In particular, the dimensions
+ * and where the data buffer is stored.
+ *
+ */
+HRESULT ProfToEEInterfaceImpl::GetArrayObjectInfo(ObjectID objectId,
+ ULONG32 cDimensionSizes,
+ ULONG32 pDimensionSizes[],
+ int pDimensionLowerBounds[],
+ BYTE **ppData)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay! Fail at runtime if in preemptive mode via AllowObjectInspection()
+ MODE_ANY;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetArrayObjectInfo 0x%p.\n",
+ objectId));
+
+ if (objectId == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ if ((pDimensionSizes == NULL) ||
+ (pDimensionLowerBounds == NULL) ||
+ (ppData == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ HRESULT hr = AllowObjectInspection();
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ Object * pObj = reinterpret_cast<Object *>(objectId);
+
+ // GC callbacks may come from a non-EE thread, which is considered permanently preemptive.
+ // We are about calling some object inspection functions, which require to be in co-op mode.
+ // Given that none managed objects can be changed by managed code until GC resumes the
+ // runtime, it is safe to violate the mode contract and to inspect managed objects from a
+ // non-EE thread when GetArrayObjectInfo is called within GC callbacks.
+ if (NativeThreadInGC())
+ {
+ CONTRACT_VIOLATION(ModeViolation);
+ return GetArrayObjectInfoHelper(pObj, cDimensionSizes, pDimensionSizes, pDimensionLowerBounds, ppData);
+ }
+
+ return GetArrayObjectInfoHelper(pObj, cDimensionSizes, pDimensionSizes, pDimensionLowerBounds, ppData);
+}
+
+HRESULT ProfToEEInterfaceImpl::GetArrayObjectInfoHelper(Object * pObj,
+ ULONG32 cDimensionSizes,
+ __out_ecount(cDimensionSizes) ULONG32 pDimensionSizes[],
+ __out_ecount(cDimensionSizes) int pDimensionLowerBounds[],
+ BYTE **ppData)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Because of the object pointer parameter, we must be either in CO-OP mode,
+ // or on a non-EE thread in the process of doing a GC .
+ if (!NativeThreadInGC()) { MODE_COOPERATIVE; }
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ // Must have an array.
+ MethodTable * pMT = pObj->GetTrueMethodTable();
+ if (!pMT->IsArray())
+ {
+ return E_INVALIDARG;
+ }
+
+ ArrayBase * pArray = static_cast<ArrayBase*> (pObj);
+
+ unsigned rank = pArray->GetRank();
+
+ if (cDimensionSizes < rank)
+ {
+ return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
+ }
+
+ // Copy range for each dimension (rank)
+ int * pBounds = pArray->GetBoundsPtr();
+ int * pLowerBounds = pArray->GetLowerBoundsPtr();
+
+ unsigned i;
+ for(i = 0; i < rank; i++)
+ {
+ pDimensionSizes[i] = pBounds[i];
+ pDimensionLowerBounds[i] = pLowerBounds[i];
+ }
+
+ // Pointer to data.
+ *ppData = pArray->GetDataPtr();
+
+ return S_OK;
+}
+
+/*
+ * GetBoxClassLayout
+ *
+ * Returns information about how a particular value type is laid out.
+ *
+ */
+HRESULT ProfToEEInterfaceImpl::GetBoxClassLayout(ClassID classId,
+ ULONG32 *pBufferOffset)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetBoxClassLayout 0x%p.\n",
+ classId));
+
+ if (pBufferOffset == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ if (classId == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ TypeHandle typeHandle = TypeHandle::FromPtr((void *)classId);
+
+ //
+ // This is the incorrect API for arrays. Use GetArrayInfo and GetArrayLayout.
+ //
+ if (!typeHandle.IsValueType())
+ {
+ return E_INVALIDARG;
+ }
+
+ *pBufferOffset = Object::GetOffsetOfFirstField();
+
+ return S_OK;
+}
+
+/*
+ * GetThreadAppDomain
+ *
+ * Returns the app domain currently associated with the given thread.
+ *
+ */
+HRESULT ProfToEEInterfaceImpl::GetThreadAppDomain(ThreadID threadId,
+ AppDomainID *pAppDomainId)
+
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetThreadAppDomain 0x%p.\n",
+ threadId));
+
+ if (pAppDomainId == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ Thread *pThread;
+
+ if (threadId == NULL)
+ {
+ pThread = GetThreadNULLOk();
+ }
+ else
+ {
+ pThread = (Thread *)threadId;
+ }
+
+ //
+ // If pThread is null, then the thread has never run managed code and
+ // so has no ThreadID.
+ //
+ if (!IsManagedThread(pThread))
+ {
+ return CORPROF_E_NOT_MANAGED_THREAD;
+ }
+
+ *pAppDomainId = (AppDomainID)pThread->GetDomain();
+
+ return S_OK;
+}
+
+
+/*
+ * GetRVAStaticAddress
+ *
+ * This function returns the absolute address of the given field in the given
+ * class. The field must be an RVA Static token.
+ *
+ * Parameters:
+ * classId - the containing class.
+ * fieldToken - the field we are querying.
+ * pAddress - location for storing the resulting address location.
+ *
+ * Returns:
+ * S_OK on success,
+ * E_INVALIDARG if not an RVA static,
+ * CORPROF_E_DATAINCOMPLETE if not yet initialized.
+ *
+ */
+HRESULT ProfToEEInterfaceImpl::GetRVAStaticAddress(ClassID classId,
+ mdFieldDef fieldToken,
+ void **ppAddress)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // FieldDesc::GetStaticAddress takes a lock
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetRVAStaticAddress 0x%p, 0x%08x.\n",
+ classId,
+ fieldToken));
+
+ //
+ // Check for NULL parameters
+ //
+ if ((classId == NULL) || (ppAddress == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ if (GetThread() == NULL)
+ {
+ return CORPROF_E_NOT_MANAGED_THREAD;
+ }
+
+ if (GetAppDomain() == NULL)
+ {
+ return E_FAIL;
+ }
+
+ TypeHandle typeHandle = TypeHandle::FromPtr((void *)classId);
+
+ //
+ // If this class is not fully restored, that is all the information we can get at this time.
+ //
+ if (!typeHandle.IsRestored())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ //
+ // Get the field descriptor object
+ //
+ FieldDesc *pFieldDesc = typeHandle.GetModule()->LookupFieldDef(fieldToken);
+
+ if (pFieldDesc == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ //
+ // Verify this field is of the right type
+ //
+ if(!pFieldDesc->IsStatic() ||
+ !pFieldDesc->IsRVA() ||
+ pFieldDesc->IsThreadStatic() ||
+ pFieldDesc->IsContextStatic())
+ {
+ return E_INVALIDARG;
+ }
+
+ // It may seem redundant to try to retrieve the same method table from GetEnclosingMethodTable, but classId
+ // leads to the instantiated method table while GetEnclosingMethodTable returns the uninstantiated one.
+ MethodTable *pMethodTable = pFieldDesc->GetEnclosingMethodTable();
+
+ //
+ // Check that the data is available
+ //
+ if (!IsClassOfMethodTableInited(pMethodTable, GetAppDomain()))
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ //
+ // Store the result and return
+ //
+ PTR_VOID pAddress = pFieldDesc->GetStaticAddress(NULL);
+ if (pAddress == NULL)
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ *ppAddress = pAddress;
+
+ return S_OK;
+}
+
+
+/*
+ * GetAppDomainStaticAddress
+ *
+ * This function returns the absolute address of the given field in the given
+ * class in the given app domain. The field must be an App Domain Static token.
+ *
+ * Parameters:
+ * classId - the containing class.
+ * fieldToken - the field we are querying.
+ * appDomainId - the app domain container.
+ * pAddress - location for storing the resulting address location.
+ *
+ * Returns:
+ * S_OK on success,
+ * E_INVALIDARG if not an app domain static,
+ * CORPROF_E_DATAINCOMPLETE if not yet initialized.
+ *
+ */
+HRESULT ProfToEEInterfaceImpl::GetAppDomainStaticAddress(ClassID classId,
+ mdFieldDef fieldToken,
+ AppDomainID appDomainId,
+ void **ppAddress)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // FieldDesc::GetStaticAddress & FieldDesc::GetBaseInDomain take locks
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetAppDomainStaticAddress 0x%p, 0x%08x, 0x%p.\n",
+ classId,
+ fieldToken,
+ appDomainId));
+
+ //
+ // Check for NULL parameters
+ //
+ if ((classId == NULL) || (appDomainId == NULL) || (ppAddress == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ // Some domains, like the system domain, aren't APP domains, and thus don't contain any
+ // statics. See if the profiler is trying to be naughty.
+ if (!((BaseDomain*) appDomainId)->IsAppDomain())
+ {
+ return E_INVALIDARG;
+ }
+
+ TypeHandle typeHandle = TypeHandle::FromPtr((void *)classId);
+
+ //
+ // If this class is not fully restored, that is all the information we can get at this time.
+ //
+ if (!typeHandle.IsRestored())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ //
+ // Get the field descriptor object
+ //
+ FieldDesc *pFieldDesc = typeHandle.GetModule()->LookupFieldDef(fieldToken);
+
+ if (pFieldDesc == NULL)
+ {
+ //
+ // Give specific error code for literals.
+ //
+ DWORD dwFieldAttrs;
+ if (FAILED(typeHandle.GetModule()->GetMDImport()->GetFieldDefProps(fieldToken, &dwFieldAttrs)))
+ {
+ return E_INVALIDARG;
+ }
+
+ if (IsFdLiteral(dwFieldAttrs))
+ {
+ return CORPROF_E_LITERALS_HAVE_NO_ADDRESS;
+ }
+
+ return E_INVALIDARG;
+ }
+
+ //
+ // Verify this field is of the right type
+ //
+ if(!pFieldDesc->IsStatic() ||
+ pFieldDesc->IsRVA() ||
+ pFieldDesc->IsThreadStatic() ||
+ pFieldDesc->IsContextStatic())
+ {
+ return E_INVALIDARG;
+ }
+
+ // It may seem redundant to try to retrieve the same method table from GetEnclosingMethodTable, but classId
+ // leads to the instantiated method table while GetEnclosingMethodTable returns the uninstantiated one.
+ MethodTable *pMethodTable = pFieldDesc->GetEnclosingMethodTable();
+ AppDomain * pAppDomain = (AppDomain *)appDomainId;
+
+ //
+ // Check that the data is available
+ //
+ if (!IsClassOfMethodTableInited(pMethodTable, pAppDomain))
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ //
+ // Get the address
+ //
+ void *base = (void*)pFieldDesc->GetBaseInDomain(pAppDomain);
+
+ if (base == NULL)
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ //
+ // Store the result and return
+ //
+ PTR_VOID pAddress = pFieldDesc->GetStaticAddress(base);
+ if (pAddress == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ *ppAddress = pAddress;
+
+ return S_OK;
+}
+
+/*
+ * GetThreadStaticAddress
+ *
+ * This function returns the absolute address of the given field in the given
+ * class on the given thread. The field must be an Thread Static token. threadId
+ * must be the current thread ID or NULL, which means using curernt thread ID.
+ *
+ * Parameters:
+ * classId - the containing class.
+ * fieldToken - the field we are querying.
+ * threadId - the thread container, which has to be the current managed thread or
+ * NULL, which means to use the current managed thread.
+ * pAddress - location for storing the resulting address location.
+ *
+ * Returns:
+ * S_OK on success,
+ * E_INVALIDARG if not a thread static,
+ * CORPROF_E_DATAINCOMPLETE if not yet initialized.
+ *
+ */
+HRESULT ProfToEEInterfaceImpl::GetThreadStaticAddress(ClassID classId,
+ mdFieldDef fieldToken,
+ ThreadID threadId,
+ void **ppAddress)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetThreadStaticAddress 0x%p, 0x%08x, 0x%p.\n",
+ classId,
+ fieldToken,
+ threadId));
+
+ //
+ // Verify the value of threadId, which must be the current thread ID or NULL, which means using curernt thread ID.
+ //
+ if ((threadId != NULL) && (threadId != ((ThreadID)GetThread())))
+ {
+ return E_INVALIDARG;
+ }
+
+ threadId = reinterpret_cast<ThreadID>(GetThread());
+ AppDomainID appDomainId = reinterpret_cast<AppDomainID>(GetAppDomain());
+
+ //
+ // Check for NULL parameters
+ //
+ if ((classId == NULL) || (ppAddress == NULL) || !IsManagedThread(threadId) || (appDomainId == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ return GetThreadStaticAddress2(classId,
+ fieldToken,
+ appDomainId,
+ threadId,
+ ppAddress);
+}
+
+/*
+ * GetThreadStaticAddress2
+ *
+ * This function returns the absolute address of the given field in the given
+ * class on the given thread. The field must be an Thread Static token.
+ *
+ * Parameters:
+ * classId - the containing class.
+ * fieldToken - the field we are querying.
+ * appDomainId - the AppDomain container.
+ * threadId - the thread container.
+ * pAddress - location for storing the resulting address location.
+ *
+ * Returns:
+ * S_OK on success,
+ * E_INVALIDARG if not a thread static,
+ * CORPROF_E_DATAINCOMPLETE if not yet initialized.
+ *
+ */
+HRESULT ProfToEEInterfaceImpl::GetThreadStaticAddress2(ClassID classId,
+ mdFieldDef fieldToken,
+ AppDomainID appDomainId,
+ ThreadID threadId,
+ void **ppAddress)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetThreadStaticAddress2 0x%p, 0x%08x, 0x%p, 0x%p.\n",
+ classId,
+ fieldToken,
+ appDomainId,
+ threadId));
+
+
+ if (threadId == NULL)
+ {
+ if (GetThread() == NULL)
+ {
+ return CORPROF_E_NOT_MANAGED_THREAD;
+ }
+
+ threadId = reinterpret_cast<ThreadID>(GetThread());
+ }
+
+ //
+ // Check for NULL parameters
+ //
+ if ((classId == NULL) || (ppAddress == NULL) || !IsManagedThread(threadId) || (appDomainId == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ // Some domains, like the system domain, aren't APP domains, and thus don't contain any
+ // statics. See if the profiler is trying to be naughty.
+ if (!((BaseDomain*) appDomainId)->IsAppDomain())
+ {
+ return E_INVALIDARG;
+ }
+
+ TypeHandle typeHandle = TypeHandle::FromPtr((void *)classId);
+
+ //
+ // If this class is not fully restored, that is all the information we can get at this time.
+ //
+ if (!typeHandle.IsRestored())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ //
+ // Get the field descriptor object
+ //
+ FieldDesc *pFieldDesc = typeHandle.GetModule()->LookupFieldDef(fieldToken);
+
+ if (pFieldDesc == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ //
+ // Verify this field is of the right type
+ //
+ if(!pFieldDesc->IsStatic() ||
+ !pFieldDesc->IsThreadStatic() ||
+ pFieldDesc->IsRVA() ||
+ pFieldDesc->IsContextStatic())
+ {
+ return E_INVALIDARG;
+ }
+
+ // It may seem redundant to try to retrieve the same method table from GetEnclosingMethodTable, but classId
+ // leads to the instantiated method table while GetEnclosingMethodTable returns the uninstantiated one.
+ MethodTable *pMethodTable = pFieldDesc->GetEnclosingMethodTable();
+ AppDomain * pAppDomain = (AppDomain *)appDomainId;
+
+ //
+ // Check that the data is available
+ //
+ if (!IsClassOfMethodTableInited(pMethodTable, pAppDomain))
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ //
+ // Store the result and return
+ //
+ PTR_VOID pAddress = (void *)(((Thread *)threadId)->GetStaticFieldAddrNoCreate(pFieldDesc, pAppDomain));
+ if (pAddress == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ *ppAddress = pAddress;
+
+ return S_OK;
+}
+
+/*
+ * GetContextStaticAddress
+ *
+ * This function returns the absolute address of the given field in the given
+ * class in the given context. The field must be an Context Static token.
+ *
+ * Parameters:
+ * classId - the containing class.
+ * fieldToken - the field we are querying.
+ * contextId - the context container.
+ * pAddress - location for storing the resulting address location.
+ *
+ * Returns:
+ * S_OK on success,
+ * E_INVALIDARG if not a context static,
+ * CORPROF_E_DATAINCOMPLETE if not yet initialized.
+ *
+ */
+HRESULT ProfToEEInterfaceImpl::GetContextStaticAddress(ClassID classId,
+ mdFieldDef fieldToken,
+ ContextID contextId,
+ void **ppAddress)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetContextStaticAddress 0x%p, 0x%08x, 0x%p.\n",
+ classId,
+ fieldToken,
+ contextId));
+
+#ifdef FEATURE_REMOTING
+
+ //
+ // Check for NULL parameters
+ //
+ if ((classId == NULL) || (contextId == NULL) || (ppAddress == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ if (GetThread() == NULL)
+ {
+ return CORPROF_E_NOT_MANAGED_THREAD;
+ }
+
+ if (GetAppDomain() == NULL)
+ {
+ return E_FAIL;
+ }
+
+ TypeHandle typeHandle = TypeHandle::FromPtr((void *)classId);
+
+ //
+ // If this class is not fully restored, that is all the information we can get at this time.
+ //
+ if (!typeHandle.IsRestored())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ //
+ // Get the field descriptor object
+ //
+ FieldDesc *pFieldDesc = typeHandle.GetModule()->LookupFieldDef(fieldToken);
+
+ if (pFieldDesc == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ //
+ // Verify this field is of the right type
+ //
+ if(!pFieldDesc->IsStatic() ||
+ !pFieldDesc->IsContextStatic() ||
+ pFieldDesc->IsRVA() ||
+ pFieldDesc->IsThreadStatic())
+ {
+ return E_INVALIDARG;
+ }
+
+ // It may seem redundant to try to retrieve the same method table from GetEnclosingMethodTable, but classId
+ // leads to the instantiated method table while GetEnclosingMethodTable returns the uninstantiated one.
+ MethodTable *pMethodTable = pFieldDesc->GetEnclosingMethodTable();
+
+ //
+ // Check that the data is available
+ //
+ if (!IsClassOfMethodTableInited(pMethodTable, GetAppDomain()))
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ //
+ // Get the context
+ //
+ Context *pContext = reinterpret_cast<Context *>(contextId);
+
+ //
+ // Store the result and return
+ //
+ PTR_VOID pAddress = pContext->GetStaticFieldAddrNoCreate(pFieldDesc);
+ if (pAddress == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ *ppAddress = pAddress;
+
+ return S_OK;
+
+#else // FEATURE_REMOTING
+ return E_NOTIMPL;
+#endif // FEATURE_REMOTING
+}
+
+/*
+ * GetAppDomainsContainingModule
+ *
+ * This function returns the AppDomains in which the given module has been loaded
+ *
+ * Parameters:
+ * moduleId - the module with static variables.
+ * cAppDomainIds - the input size of appDomainIds array.
+ * pcAppDomainIds - the output size of appDomainIds array.
+ * appDomainIds - the array to be filled up with AppDomainIDs containing initialized
+ * static variables from the moduleId's moudle.
+ *
+ * Returns:
+ * S_OK on success,
+ * E_INVALIDARG for invalid parameters,
+ * CORPROF_E_DATAINCOMPLETE if moduleId's module is not yet initialized.
+ *
+ */
+HRESULT ProfToEEInterfaceImpl::GetAppDomainsContainingModule(ModuleID moduleId,
+ ULONG32 cAppDomainIds,
+ ULONG32 * pcAppDomainIds,
+ AppDomainID appDomainIds[])
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // This method iterates over AppDomains, which adds, then releases, a reference on
+ // each AppDomain iterated. This causes locking, and can cause triggering if the
+ // AppDomain gets destroyed as a result of the release. (See code:AppDomainIterator::Next
+ // and its call to code:AppDomain::Release.)
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_ANY;
+
+ // (See comment above GC_TRIGGERS.)
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
+ kP2EEAllowableAfterAttach | kP2EETriggers,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetAppDomainsContainingModule 0x%p, 0x%08x, 0x%p, 0x%p.\n",
+ moduleId,
+ cAppDomainIds,
+ pcAppDomainIds,
+ appDomainIds));
+
+
+ //
+ // Check for NULL parameters
+ //
+ if ((moduleId == NULL) || ((appDomainIds == NULL) && (cAppDomainIds != 0)) || (pcAppDomainIds == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ Module* pModule = reinterpret_cast< Module* >(moduleId);
+ if (pModule->IsBeingUnloaded())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ // IterateAppDomainContainingModule uses AppDomainIterator, which cannot be called while the current thread
+ // is holding the ThreadStore lock.
+ if (ThreadStore::HoldingThreadStore())
+ {
+ return CORPROF_E_UNSUPPORTED_CALL_SEQUENCE;
+ }
+
+ IterateAppDomainContainingModule iterateAppDomainContainingModule(pModule, cAppDomainIds, pcAppDomainIds, appDomainIds);
+
+ return iterateAppDomainContainingModule.PopulateArray();
+}
+
+
+
+/*
+ * GetStaticFieldInfo
+ *
+ * This function returns a bit mask of the type of statics the
+ * given field is.
+ *
+ * Parameters:
+ * classId - the containing class.
+ * fieldToken - the field we are querying.
+ * pFieldInfo - location for storing the resulting bit mask.
+ *
+ * Returns:
+ * S_OK on success,
+ * E_INVALIDARG if pFieldInfo is NULL
+ *
+ */
+HRESULT ProfToEEInterfaceImpl::GetStaticFieldInfo(ClassID classId,
+ mdFieldDef fieldToken,
+ COR_PRF_STATIC_TYPE *pFieldInfo)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetStaticFieldInfo 0x%p, 0x%08x.\n",
+ classId,
+ fieldToken));
+
+ //
+ // Check for NULL parameters
+ //
+ if ((classId == NULL) || (pFieldInfo == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ TypeHandle typeHandle = TypeHandle::FromPtr((void *)classId);
+
+ //
+ // If this class is not fully restored, that is all the information we can get at this time.
+ //
+ if (!typeHandle.IsRestored())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ //
+ // Get the field descriptor object
+ //
+ FieldDesc *pFieldDesc = typeHandle.GetModule()->LookupFieldDef(fieldToken);
+
+ if (pFieldDesc == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ *pFieldInfo = COR_PRF_FIELD_NOT_A_STATIC;
+
+ if (pFieldDesc->IsContextStatic())
+ {
+ *pFieldInfo = (COR_PRF_STATIC_TYPE)(*pFieldInfo | COR_PRF_FIELD_CONTEXT_STATIC);
+ }
+
+ if (pFieldDesc->IsRVA())
+ {
+ *pFieldInfo = (COR_PRF_STATIC_TYPE)(*pFieldInfo | COR_PRF_FIELD_RVA_STATIC);
+ }
+
+ if (pFieldDesc->IsThreadStatic())
+ {
+ *pFieldInfo = (COR_PRF_STATIC_TYPE)(*pFieldInfo | COR_PRF_FIELD_THREAD_STATIC);
+ }
+
+ if ((*pFieldInfo == COR_PRF_FIELD_NOT_A_STATIC) && pFieldDesc->IsStatic())
+ {
+ *pFieldInfo = (COR_PRF_STATIC_TYPE)(*pFieldInfo | COR_PRF_FIELD_APP_DOMAIN_STATIC);
+ }
+
+ return S_OK;
+}
+
+
+
+/*
+ * GetClassIDInfo2
+ *
+ * This function generalizes GetClassIDInfo for all types, both generic and non-generic. It returns
+ * the module, type token, and an array of instantiation classIDs that were used to instantiate the
+ * given classId.
+ *
+ * Parameters:
+ * classId - The classId (TypeHandle) to query information about.
+ * pParentClassId - The ClassID (TypeHandle) of the parent class.
+ * pModuleId - An optional parameter for returning the module of the class.
+ * pTypeDefToken - An optional parameter for returning the metadata token of the class.
+ * cNumTypeArgs - The count of the size of the array typeArgs
+ * pcNumTypeArgs - Returns the number of elements of typeArgs filled in, or if typeArgs is NULL
+ * the number that would be needed.
+ * typeArgs - An array to store generic type parameters for the class.
+ *
+ * Returns:
+ * S_OK if successful.
+ */
+HRESULT ProfToEEInterfaceImpl::GetClassIDInfo2(ClassID classId,
+ ModuleID *pModuleId,
+ mdTypeDef *pTypeDefToken,
+ ClassID *pParentClassId,
+ ULONG32 cNumTypeArgs,
+ ULONG32 *pcNumTypeArgs,
+ ClassID typeArgs[])
+{
+
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ PRECONDITION(CheckPointer(pParentClassId, NULL_OK));
+ PRECONDITION(CheckPointer(pModuleId, NULL_OK));
+ PRECONDITION(CheckPointer(pTypeDefToken, NULL_OK));
+ PRECONDITION(CheckPointer(pcNumTypeArgs, NULL_OK));
+ PRECONDITION(CheckPointer(typeArgs, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetClassIDInfo2 0x%p.\n",
+ classId));
+
+ //
+ // Verify parameters.
+ //
+ if (classId == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ if ((cNumTypeArgs != 0) && (typeArgs == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ TypeHandle typeHandle = TypeHandle::FromPtr((void *)classId);
+
+ //
+ // If this class is not fully restored, that is all the information we can get at this time.
+ //
+ if (!typeHandle.IsRestored())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ //
+ // Handle globals which don't have the instances.
+ //
+ if (classId == PROFILER_GLOBAL_CLASS)
+ {
+ if (pParentClassId != NULL)
+ {
+ *pParentClassId = NULL;
+ }
+
+ if (pModuleId != NULL)
+ {
+ *pModuleId = PROFILER_GLOBAL_MODULE;
+ }
+
+ if (pTypeDefToken != NULL)
+ {
+ *pTypeDefToken = mdTokenNil;
+ }
+
+ return S_OK;
+ }
+
+ //
+ // Do not do arrays via this API
+ //
+ ARRAY_KIND arrayKind = ArrayKindFromTypeHandle(typeHandle);
+ if (arrayKind == ARRAY_KIND_TYPEDESC || arrayKind == ARRAY_KIND_METHODTABLE)
+ {
+ return CORPROF_E_CLASSID_IS_ARRAY;
+ }
+
+ _ASSERTE (arrayKind == ARRAY_KIND_NOTARRAY);
+
+ if (typeHandle.IsTypeDesc())
+ {
+ // Not an array, but still a typedesc? We don't know how to
+ // deal with those.
+ return CORPROF_E_CLASSID_IS_COMPOSITE;
+ }
+
+ //
+ // Fill in the basic information
+ //
+ if (pParentClassId != NULL)
+ {
+ TypeHandle parentTypeHandle = typeHandle.GetParent();
+ if (!parentTypeHandle.IsNull())
+ {
+ *pParentClassId = TypeHandleToClassID(parentTypeHandle);
+ }
+ else
+ {
+ *pParentClassId = NULL;
+ }
+ }
+
+ if (pModuleId != NULL)
+ {
+ *pModuleId = (ModuleID) typeHandle.GetModule();
+ _ASSERTE(*pModuleId != NULL);
+ }
+
+ if (pTypeDefToken != NULL)
+ {
+ *pTypeDefToken = typeHandle.GetCl();
+ _ASSERTE(*pTypeDefToken != NULL);
+ }
+
+ //
+ // See if they are just looking to get the buffer size.
+ //
+ if (cNumTypeArgs == 0)
+ {
+ if (pcNumTypeArgs != NULL)
+ {
+ *pcNumTypeArgs = typeHandle.GetMethodTable()->GetNumGenericArgs();
+ }
+ return S_OK;
+ }
+
+ //
+ // Adjust the count for the size of the given array.
+ //
+ if (cNumTypeArgs > typeHandle.GetMethodTable()->GetNumGenericArgs())
+ {
+ cNumTypeArgs = typeHandle.GetMethodTable()->GetNumGenericArgs();
+ }
+
+ if (pcNumTypeArgs != NULL)
+ {
+ *pcNumTypeArgs = cNumTypeArgs;
+ }
+
+ //
+ // Copy over the instantiating types.
+ //
+ ULONG32 count;
+ Instantiation inst = typeHandle.GetMethodTable()->GetInstantiation();
+
+ for (count = 0; count < cNumTypeArgs; count ++)
+ {
+ typeArgs[count] = TypeHandleToClassID(inst[count]);
+ }
+
+ return S_OK;
+}
+
+HRESULT ProfToEEInterfaceImpl::GetModuleInfo(ModuleID moduleId,
+ LPCBYTE * ppBaseLoadAddress,
+ ULONG cchName,
+ ULONG * pcchName,
+ __out_ecount_part_opt(cchName, *pcchName) WCHAR wszName[],
+ AssemblyID * pAssemblyId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // See comment in code:ProfToEEInterfaceImpl::GetModuleInfo2
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ SO_NOT_MAINLINE;
+
+ PRECONDITION(CheckPointer((Module *)moduleId, NULL_OK));
+ PRECONDITION(CheckPointer(ppBaseLoadAddress, NULL_OK));
+ PRECONDITION(CheckPointer(pcchName, NULL_OK));
+ PRECONDITION(CheckPointer(wszName, NULL_OK));
+ PRECONDITION(CheckPointer(pAssemblyId, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetModuleInfo 0x%p.\n",
+ moduleId));
+
+ // Paramter validation is taken care of in GetModuleInfo2.
+
+ return GetModuleInfo2(
+ moduleId,
+ ppBaseLoadAddress,
+ cchName,
+ pcchName,
+ wszName,
+ pAssemblyId,
+ NULL); // Don't need module type
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Helper used by GetModuleInfo2 to determine the bitmask of COR_PRF_MODULE_FLAGS for
+// the specified module.
+//
+// Arguments:
+// pModule - Module to get the flags for
+//
+// Return Value:
+// Bitmask of COR_PRF_MODULE_FLAGS corresponding to pModule
+//
+
+DWORD ProfToEEInterfaceImpl::GetModuleFlags(Module * pModule)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CAN_TAKE_LOCK; // IsWindowsRuntimeModule accesses metadata directly, which takes locks
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PEFile * pPEFile = pModule->GetFile();
+ if (pPEFile == NULL)
+ {
+ // Hopefully this should never happen; but just in case, don't try to determine the
+ // flags without a PEFile.
+ return 0;
+ }
+
+ DWORD dwRet = 0;
+
+ // First, set the flags that are dependent on which PEImage / layout we look at
+ // inside the Module (disk/ngen/flat)
+
+ if (pModule->HasNativeImage())
+ {
+ // NGEN
+ dwRet |= (COR_PRF_MODULE_DISK | COR_PRF_MODULE_NGEN);
+
+ // Intentionally not checking for flat, since NGEN PEImages never have flat
+ // layouts.
+ }
+ else
+ {
+ // Not NGEN.
+
+ if (pPEFile->HasOpenedILimage())
+ {
+ PEImage * pILImage = pPEFile->GetOpenedILimage();
+ if (pILImage->IsFile())
+ {
+ dwRet |= COR_PRF_MODULE_DISK;
+ }
+ if (pPEFile->GetLoadedIL()->IsFlat())
+ {
+ dwRet |= COR_PRF_MODULE_FLAT_LAYOUT;
+ }
+ }
+ }
+
+ if (pModule->IsReflection())
+ {
+ dwRet |= COR_PRF_MODULE_DYNAMIC;
+ }
+
+ if (pModule->IsCollectible())
+ {
+ dwRet |= COR_PRF_MODULE_COLLECTIBLE;
+ }
+
+ if (pModule->IsResource())
+ {
+ dwRet |= COR_PRF_MODULE_RESOURCE;
+ }
+
+ if (pModule->IsWindowsRuntimeModule())
+ {
+ dwRet |= COR_PRF_MODULE_WINDOWS_RUNTIME;
+ }
+
+ return dwRet;
+}
+
+HRESULT ProfToEEInterfaceImpl::GetModuleInfo2(ModuleID moduleId,
+ LPCBYTE * ppBaseLoadAddress,
+ ULONG cchName,
+ ULONG * pcchName,
+ __out_ecount_part_opt(cchName, *pcchName) WCHAR wszName[],
+ AssemblyID * pAssemblyId,
+ DWORD * pdwModuleFlags)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // The pModule->GetScopeName() call below can result in locks getting taken to
+ // access the metadata implementation. However, these locks do not do a mode
+ // change.
+ CAN_TAKE_LOCK;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ SO_NOT_MAINLINE;
+
+ PRECONDITION(CheckPointer((Module *)moduleId, NULL_OK));
+ PRECONDITION(CheckPointer(ppBaseLoadAddress, NULL_OK));
+ PRECONDITION(CheckPointer(pcchName, NULL_OK));
+ PRECONDITION(CheckPointer(wszName, NULL_OK));
+ PRECONDITION(CheckPointer(pAssemblyId, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetModuleInfo2 0x%p.\n",
+ moduleId));
+
+ if (moduleId == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ Module * pModule = (Module *) moduleId;
+ if (pModule->IsBeingUnloaded())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+
+ PEFile * pFile = pModule->GetFile();
+
+ // Pick some safe defaults to begin with.
+ if (ppBaseLoadAddress != NULL)
+ *ppBaseLoadAddress = 0;
+ if (wszName != NULL)
+ *wszName = 0;
+ if (pcchName != NULL)
+ *pcchName = 0;
+ if (pAssemblyId != NULL)
+ *pAssemblyId = PROFILER_PARENT_UNKNOWN;
+
+ // Module flags can be determined first without fear of error
+ if (pdwModuleFlags != NULL)
+ *pdwModuleFlags = GetModuleFlags(pModule);
+
+ // Get the module file name
+ LPCWSTR wszFileName = pFile->GetPath();
+ _ASSERTE(wszFileName != NULL);
+ PREFIX_ASSUME(wszFileName != NULL);
+
+ // If there is no filename, which is the case for RefEmit modules and for SQL
+ // modules, then rather than returning an empty string for the name, just use the
+ // module name from metadata (a.k.a. Module.ScopeName). This is required to
+ // support SQL F1 sampling profiling.
+ StackSString strScopeName;
+ LPCUTF8 szScopeName = NULL;
+ if ((*wszFileName == W('\0')) && SUCCEEDED(pModule->GetScopeName(&szScopeName)))
+ {
+ strScopeName.SetUTF8(szScopeName);
+ strScopeName.Normalize();
+ wszFileName = strScopeName.GetUnicode();
+ }
+
+ ULONG trueLen = (ULONG)(wcslen(wszFileName) + 1);
+
+ // Return name of module as required.
+ if (wszName && cchName > 0)
+ {
+ if (cchName < trueLen)
+ {
+ hr = HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
+ }
+ else
+ {
+ wcsncpy_s(wszName, cchName, wszFileName, trueLen);
+ }
+ }
+
+ // If they request the actual length of the name
+ if (pcchName)
+ *pcchName = trueLen;
+
+ if (ppBaseLoadAddress != NULL && !pFile->IsDynamic())
+ {
+ if (pModule->IsProfilerNotified())
+ {
+ // Set the base load address -- this could be null in certain error conditions
+ *ppBaseLoadAddress = pModule->GetProfilerBase();
+ }
+ else
+ {
+ *ppBaseLoadAddress = NULL;
+ }
+
+ if (*ppBaseLoadAddress == NULL)
+ {
+ hr = CORPROF_E_DATAINCOMPLETE;
+ }
+ }
+
+ // Return the parent assembly for this module if desired.
+ if (pAssemblyId != NULL)
+ {
+ // Lie and say the assembly isn't avaialable until we are loaded (even though it is.)
+ // This is for backward compatibilty - we may want to change it
+ if (pModule->IsProfilerNotified())
+ {
+ Assembly *pAssembly = pModule->GetAssembly();
+ _ASSERTE(pAssembly);
+
+ *pAssemblyId = (AssemblyID) pAssembly;
+ }
+ else
+ {
+ hr = CORPROF_E_DATAINCOMPLETE;
+ }
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return (hr);
+}
+
+
+/*
+ * Get a metadata interface instance which maps to the given module.
+ * One may ask for the metadata to be opened in read+write mode, but
+ * this will result in slower metadata execution of the program, because
+ * changes made to the metadata cannot be optimized as they were from
+ * the compiler.
+ */
+HRESULT ProfToEEInterfaceImpl::GetModuleMetaData(ModuleID moduleId,
+ DWORD dwOpenFlags,
+ REFIID riid,
+ IUnknown **ppOut)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Currently, this function is technically EE_THREAD_REQUIRED because
+ // some functions in synch.cpp assert that there is a Thread object,
+ // but we might be able to lift that restriction and make this be
+ // EE_THREAD_NOT_REQUIRED.
+
+ // PEFile::GetRWImporter & PEFile::GetEmitter &
+ // GetReadablePublicMetaDataInterface take locks
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetModuleMetaData 0x%p, 0x%08x.\n",
+ moduleId,
+ dwOpenFlags));
+
+ if (moduleId == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ // Check for unsupported bits, and return E_INVALIDARG if present
+ if ((dwOpenFlags & ~(ofNoTransform | ofRead | ofWrite)) != 0)
+ {
+ return E_INVALIDARG;
+ }
+
+ Module * pModule;
+ HRESULT hr = S_OK;
+
+ pModule = (Module *) moduleId;
+ _ASSERTE(pModule != NULL);
+ if (pModule->IsBeingUnloaded())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ // Make sure we can get the importer first
+ if (pModule->IsResource())
+ {
+ if (ppOut)
+ *ppOut = NULL;
+ return S_FALSE;
+ }
+
+ // Decide which type of open mode we are in to see which you require.
+ if ((dwOpenFlags & ofWrite) == 0)
+ {
+ // Readable interface
+ return pModule->GetReadablePublicMetaDataInterface(dwOpenFlags, riid, (LPVOID *) ppOut);
+ }
+
+ // Writeable interface
+ IUnknown *pObj = NULL;
+ EX_TRY
+ {
+ pObj = pModule->GetValidatedEmitter();
+ }
+ EX_CATCH_HRESULT_NO_ERRORINFO(hr);
+
+ // Ask for the interface the caller wanted, only if they provide a out param
+ if (SUCCEEDED(hr) && ppOut)
+ hr = pObj->QueryInterface(riid, (void **) ppOut);
+
+ return (hr);
+}
+
+
+/*
+ * Retrieve a pointer to the body of a method starting at it's header.
+ * A method is scoped by the module it lives in. Because this function
+ * is designed to give a tool access to IL before it has been loaded
+ * by the Runtime, it uses the metadata token of the method to find
+ * the instance desired. Note that this function has no effect on
+ * already compiled code.
+ */
+HRESULT ProfToEEInterfaceImpl::GetILFunctionBody(ModuleID moduleId,
+ mdMethodDef methodId,
+ LPCBYTE *ppMethodHeader,
+ ULONG *pcbMethodSize)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // PEFile::CheckLoaded & Module::GetDynamicIL both take a lock
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetILFunctionBody 0x%p, 0x%08x.\n",
+ moduleId,
+ methodId));
+
+ Module * pModule; // Working pointer for real class.
+ ULONG RVA; // Return RVA of the method body.
+ DWORD dwImplFlags; // Flags for the item.
+
+ if ((moduleId == NULL) ||
+ (methodId == mdMethodDefNil) ||
+ (methodId == 0) ||
+ (TypeFromToken(methodId) != mdtMethodDef))
+ {
+ return E_INVALIDARG;
+ }
+
+ pModule = (Module *) moduleId;
+ _ASSERTE(pModule != NULL && methodId != mdMethodDefNil);
+ if (pModule->IsBeingUnloaded())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ // Find the method body based on metadata.
+ IMDInternalImport *pImport = pModule->GetMDImport();
+ _ASSERTE(pImport);
+
+ PEFile *pFile = pModule->GetFile();
+
+ if (!pFile->CheckLoaded())
+ return (CORPROF_E_DATAINCOMPLETE);
+
+ LPCBYTE pbMethod = NULL;
+
+ // Don't return rewritten IL, use the new API to get that.
+ pbMethod = (LPCBYTE) pModule->GetDynamicIL(methodId, FALSE);
+
+ // Method not overriden - get the original copy of the IL by going to metadata
+ if (pbMethod == NULL)
+ {
+ HRESULT hr = S_OK;
+ IfFailRet(pImport->GetMethodImplProps(methodId, &RVA, &dwImplFlags));
+
+ // Check to see if the method has associated IL
+ if ((RVA == 0 && !pFile->IsDynamic()) || !(IsMiIL(dwImplFlags) || IsMiOPTIL(dwImplFlags) || IsMiInternalCall(dwImplFlags)))
+ {
+ return (CORPROF_E_FUNCTION_NOT_IL);
+ }
+
+ EX_TRY
+ {
+ // Get the location of the IL
+ pbMethod = (LPCBYTE) (pModule->GetIL(RVA));
+ }
+ EX_CATCH_HRESULT(hr);
+
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+ }
+
+ // Fill out param if provided
+ if (ppMethodHeader)
+ *ppMethodHeader = pbMethod;
+
+ // Calculate the size of the method itself.
+ if (pcbMethodSize)
+ {
+ if (!FitsIn<ULONG>(PEDecoder::ComputeILMethodSize((TADDR)pbMethod)))
+ {
+ return E_UNEXPECTED;
+ }
+ *pcbMethodSize = static_cast<ULONG>(PEDecoder::ComputeILMethodSize((TADDR)pbMethod));
+ }
+ return (S_OK);
+}
+
+//---------------------------------------------------------------------------------------
+// Retrieves an IMethodMalloc pointer around a ModuleILHeap instance that will own
+// allocating heap space for this module (for IL rewriting).
+//
+// Arguments:
+// moduleId - ModuleID this allocator shall allocate for
+// ppMalloc - [out] IMethodMalloc pointer the profiler will use for allocation requests
+// against this module
+//
+// Return value
+// HRESULT indicating success / failure
+//
+// Notes
+// IL method bodies used to have the requirement that they must be referenced as
+// RVA's to the loaded module, which means they come after the module within
+// METHOD_MAX_RVA. In order to make it easier for a tool to swap out the body of
+// a method, this allocator will ensure memory allocated after that point.
+//
+// Now that requirement is completely gone, so there's nothing terribly special
+// about this allocator, we just keep it around for legacy purposes.
+
+HRESULT ProfToEEInterfaceImpl::GetILFunctionBodyAllocator(ModuleID moduleId,
+ IMethodMalloc ** ppMalloc)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // ModuleILHeap::FindOrCreateHeap may take a Crst if it
+ // needs to create a new allocator and add it to the list. Taking a crst
+ // switches to preemptive, which is effectively a GC trigger
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // (see GC_TRIGGERS comment)
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
+ kP2EETriggers,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetILFunctionBodyAllocator 0x%p.\n",
+ moduleId));
+
+ if ((moduleId == NULL) || (ppMalloc == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ Module * pModule = (Module *) moduleId;
+
+ if (pModule->IsBeingUnloaded() ||
+ !pModule->GetFile()->CheckLoaded())
+ {
+ return (CORPROF_E_DATAINCOMPLETE);
+ }
+
+ *ppMalloc = &ModuleILHeap::s_Heap;
+ return S_OK;
+}
+
+/*
+ * Replaces the method body for a function in a module. This will replace
+ * the RVA of the method in the metadata to point to this new method body,
+ * and adjust any internal data structures as required. This function can
+ * only be called on those methods which have never been compiled by a JITTER.
+ * Please use the GetILFunctionBodyAllocator to allocate space for the new method to
+ * ensure the buffer is compatible.
+ */
+HRESULT ProfToEEInterfaceImpl::SetILFunctionBody(ModuleID moduleId,
+ mdMethodDef methodId,
+ LPCBYTE pbNewILMethodHeader)
+{
+ CONTRACTL
+ {
+ // PEFile::GetEmitter, Module::SetDynamicIL all throw
+ THROWS;
+
+ // Locks are taken (see CAN_TAKE_LOCK below), which may cause mode switch to
+ // preemptive, which is triggers.
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_ANY;
+
+ // Module::SetDynamicIL & PEFile::CheckLoaded & PEFile::GetEmitter take locks
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
+ kP2EETriggers,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: SetILFunctionBody 0x%p, 0x%08x.\n",
+ moduleId,
+ methodId));
+
+ if ((moduleId == NULL) ||
+ (methodId == mdMethodDefNil) ||
+ (TypeFromToken(methodId) != mdtMethodDef) ||
+ (pbNewILMethodHeader == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ Module *pModule; // Working pointer for real class.
+ HRESULT hr = S_OK;
+
+ // Cannot set the body for anything other than a method def
+ if (TypeFromToken(methodId) != mdtMethodDef)
+ return (E_INVALIDARG);
+
+ // Cast module to appropriate type
+ pModule = (Module *) moduleId;
+ _ASSERTE (pModule != NULL); // Enforced in CorProfInfo::SetILFunctionBody
+ if (pModule->IsBeingUnloaded())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ // Remember the profiler is doing this, as that means we must never detach it!
+ g_profControlBlock.pProfInterface->SetUnrevertiblyModifiedILFlag();
+
+ // This action is not temporary!
+ // If the profiler want to be able to revert, they need to use
+ // the new ReJIT APIs.
+ pModule->SetDynamicIL(methodId, (TADDR)pbNewILMethodHeader, FALSE);
+
+ return (hr);
+}
+
+/*
+ * Sets the codemap for the replaced IL function body
+ */
+HRESULT ProfToEEInterfaceImpl::SetILInstrumentedCodeMap(FunctionID functionId,
+ BOOL fStartJit,
+ ULONG cILMapEntries,
+ COR_IL_MAP rgILMapEntries[])
+{
+ CONTRACTL
+ {
+ // Debugger::SetILInstrumentedCodeMap throws
+ THROWS;
+
+ // Debugger::SetILInstrumentedCodeMap triggers
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Debugger::SetILInstrumentedCodeMap takes a lock when it calls Debugger::GetOrCreateMethodInfo
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
+ kP2EETriggers,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: SetILInstrumentedCodeMap 0x%p, %d.\n",
+ functionId,
+ fStartJit));
+
+ if (functionId == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ if (cILMapEntries >= (MAXULONG / sizeof(COR_IL_MAP)))
+ {
+ // Too big! The allocation below would overflow when calculating the size.
+ return E_INVALIDARG;
+ }
+
+
+#ifdef DEBUGGING_SUPPORTED
+
+ MethodDesc *pMethodDesc = FunctionIdToMethodDesc(functionId);
+
+ // it's not safe to examine a methoddesc that has not been restored so do not do so
+ if (!pMethodDesc ->IsRestored())
+ return CORPROF_E_DATAINCOMPLETE;
+
+#ifdef FEATURE_CORECLR
+ if (g_pDebugInterface == NULL)
+ {
+ return CORPROF_E_DEBUGGING_DISABLED;
+ }
+#else
+ // g_pDebugInterface is initialized on startup on desktop CLR, regardless of whether a debugger
+ // or profiler is loaded. So it should always be available.
+ _ASSERTE(g_pDebugInterface != NULL);
+#endif // FEATURE_CORECLR
+
+ COR_IL_MAP * rgNewILMapEntries = new (nothrow) COR_IL_MAP[cILMapEntries];
+
+ if (rgNewILMapEntries == NULL)
+ return E_OUTOFMEMORY;
+
+ memcpy_s(rgNewILMapEntries, sizeof(COR_IL_MAP) * cILMapEntries, rgILMapEntries, sizeof(COR_IL_MAP) * cILMapEntries);
+
+ return g_pDebugInterface->SetILInstrumentedCodeMap(pMethodDesc,
+ fStartJit,
+ cILMapEntries,
+ rgNewILMapEntries);
+
+#else //DEBUGGING_SUPPORTED
+ return E_NOTIMPL;
+#endif //DEBUGGING_SUPPORTED
+}
+
+HRESULT ProfToEEInterfaceImpl::ForceGC()
+{
+ CONTRACTL
+ {
+ // GC calls "new" which throws
+ THROWS;
+
+ // Uh duh, look at the name of the function, dude
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Initiating a GC causes a runtime suspension which requires the
+ // mother of all locks: the thread store lock.
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ ASSERT_NO_EE_LOCKS_HELD();
+
+ // We need to use IsGarbageCollectorFullyInitialized() instead of IsGCHeapInitialized() because
+ // there are other GC initialization being done after IsGCHeapInitialized() becomes TRUE,
+ // and before IsGarbageCollectorFullyInitialized() becomes TRUE.
+ if (!IsGarbageCollectorFullyInitialized())
+ {
+ return CORPROF_E_NOT_YET_AVAILABLE;
+ }
+
+ // Disallow the cases where a profiler calls this off a hijacked CLR thread
+ // or inside a profiler callback. (Allow cases where this is a native thread, or a
+ // thread which previously successfully called ForceGC.)
+ Thread * pThread = GetThreadNULLOk();
+ if ((pThread != NULL) &&
+ (!AreCallbackStateFlagsSet(COR_PRF_CALLBACKSTATE_FORCEGC_WAS_CALLED)) &&
+ (pThread->GetFrame() != FRAME_TOP
+ || AreCallbackStateFlagsSet(COR_PRF_CALLBACKSTATE_INCALLBACK)))
+ {
+ LOG((LF_CORPROF,
+ LL_ERROR,
+ "**PROF: ERROR: Returning CORPROF_E_UNSUPPORTED_CALL_SEQUENCE "
+ "due to illegal hijacked profiler call or call from inside another callback\n"));
+ return CORPROF_E_UNSUPPORTED_CALL_SEQUENCE;
+ }
+
+ // NOTE: We cannot use the standard macro PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX
+ // here because the macro ensures that either the current thread is not an
+ // EE thread, or, if it is, that the CALLBACK flag is set. In classic apps
+ // a profiler-owned native thread will not get an EE thread associated with
+ // it, however, in AppX apps, during the first call into the GC on a
+ // profiler-owned thread, the EE will associate an EE-thread with the profiler
+ // thread. As a consequence the second call to ForceGC on the same thread
+ // would fail, since this is now an EE thread and this API is not called from
+ // a callback.
+
+ // First part of the PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX macro:
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(
+ kP2EEAllowableAfterAttach | kP2EETriggers,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: ForceGC.\n"));
+
+ // This helper, used by ETW and profAPI ensures a managed thread gets created for
+ // this thread before forcing the GC (to work around Jupiter issues where it's
+ // expected this thread is already managed before starting the GC).
+ HRESULT hr = ETW::GCLog::ForceGCForDiagnostics();
+
+ // If a Thread object was just created for this thread, remember the fact that it
+ // was a ForceGC() thread, so we can be more lenient when doing
+ // COR_PRF_CALLBACKSTATE_INCALLBACK later on from other APIs
+ pThread = GetThreadNULLOk();
+ if (pThread != NULL)
+ {
+ pThread->SetProfilerCallbackStateFlags(COR_PRF_CALLBACKSTATE_FORCEGC_WAS_CALLED);
+ }
+
+ return hr;
+}
+
+
+/*
+ * Returns the ContextID for the current thread.
+ */
+HRESULT ProfToEEInterfaceImpl::GetThreadContext(ThreadID threadId,
+ ContextID *pContextId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetThreadContext 0x%p.\n",
+ threadId));
+
+ if (!IsManagedThread(threadId))
+ {
+ return E_INVALIDARG;
+ }
+
+ // Cast to right type
+ Thread *pThread = reinterpret_cast<Thread *>(threadId);
+
+ // Get the context for the Thread* provided
+ Context *pContext = pThread->GetContext();
+ _ASSERTE(pContext);
+
+ // If there's no current context, return incomplete info
+ if (!pContext)
+ return (CORPROF_E_DATAINCOMPLETE);
+
+ // Set the result and return
+ if (pContextId)
+ *pContextId = reinterpret_cast<ContextID>(pContext);
+
+ return (S_OK);
+}
+
+HRESULT ProfToEEInterfaceImpl::GetClassIDInfo(ClassID classId,
+ ModuleID *pModuleId,
+ mdTypeDef *pTypeDefToken)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetClassIDInfo 0x%p.\n",
+ classId));
+
+ if (classId == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ if (pModuleId != NULL)
+ {
+ *pModuleId = NULL;
+ }
+
+ if (pTypeDefToken != NULL)
+ {
+ *pTypeDefToken = NULL;
+ }
+
+ // Handle globals which don't have the instances.
+ if (classId == PROFILER_GLOBAL_CLASS)
+ {
+ if (pModuleId != NULL)
+ {
+ *pModuleId = PROFILER_GLOBAL_MODULE;
+ }
+
+ if (pTypeDefToken != NULL)
+ {
+ *pTypeDefToken = mdTokenNil;
+ }
+ }
+ else if (classId == NULL)
+ {
+ return E_INVALIDARG;
+ }
+ // Get specific data.
+ else
+ {
+ TypeHandle th = TypeHandle::FromPtr((void *)classId);
+
+ if (!th.IsTypeDesc())
+ {
+ if (!th.IsArray())
+ {
+ //
+ // If this class is not fully restored, that is all the information we can get at this time.
+ //
+ if (!th.IsRestored())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ if (pModuleId != NULL)
+ {
+ *pModuleId = (ModuleID) th.GetModule();
+ _ASSERTE(*pModuleId != NULL);
+ }
+
+ if (pTypeDefToken != NULL)
+ {
+ *pTypeDefToken = th.GetCl();
+ _ASSERTE(*pTypeDefToken != NULL);
+ }
+ }
+ }
+ }
+
+ return (S_OK);
+}
+
+
+HRESULT ProfToEEInterfaceImpl::GetFunctionInfo(FunctionID functionId,
+ ClassID *pClassId,
+ ModuleID *pModuleId,
+ mdToken *pToken)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetFunctionInfo 0x%p.\n",
+ functionId));
+
+ if (functionId == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ MethodDesc *pMDesc = (MethodDesc *) functionId;
+ if (!pMDesc->IsRestored())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ MethodTable *pMT = pMDesc->GetMethodTable();
+ if (!pMT->IsRestored())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ ClassID classId = PROFILER_GLOBAL_CLASS;
+
+ if (pMT != NULL)
+ {
+ classId = NonGenericTypeHandleToClassID(TypeHandle(pMT));
+ }
+
+ if (pClassId != NULL)
+ {
+ *pClassId = classId;
+ }
+
+ if (pModuleId != NULL)
+ {
+ *pModuleId = (ModuleID) pMDesc->GetModule();
+ }
+
+ if (pToken != NULL)
+ {
+ *pToken = pMDesc->GetMemberDef();
+ }
+
+ return (S_OK);
+}
+
+/*
+ * GetILToNativeMapping returns a map from IL offsets to native
+ * offsets for this code. An array of COR_DEBUG_IL_TO_NATIVE_MAP
+ * structs will be returned, and some of the ilOffsets in this array
+ * may be the values specified in CorDebugIlToNativeMappingTypes.
+ */
+HRESULT ProfToEEInterfaceImpl::GetILToNativeMapping(FunctionID functionId,
+ ULONG32 cMap,
+ ULONG32 * pcMap, // [out]
+ COR_DEBUG_IL_TO_NATIVE_MAP map[]) // [out]
+{
+ CONTRACTL
+ {
+ // MethodDesc::FindOrCreateTypicalSharedInstantiation throws
+ THROWS;
+
+ // MethodDesc::FindOrCreateTypicalSharedInstantiation triggers, but shouldn't trigger when
+ // called from here. Since the profiler has a valid functionId, the methoddesc for
+ // this code will already have been created. We should be able to enforce this by
+ // passing allowCreate=FALSE to FindOrCreateTypicalSharedInstantiation.
+ DISABLED(GC_NOTRIGGER);
+
+ // Yay!
+ MODE_ANY;
+
+ // The call to g_pDebugInterface->GetILToNativeMapping() below may call
+ // Debugger::AcquireDebuggerLock
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetILToNativeMapping 0x%p.\n",
+ functionId));
+
+ return GetILToNativeMapping2(functionId, 0, cMap, pcMap, map);
+}
+
+HRESULT ProfToEEInterfaceImpl::GetILToNativeMapping2(FunctionID functionId,
+ ReJITID reJitId,
+ ULONG32 cMap,
+ ULONG32 * pcMap, // [out]
+ COR_DEBUG_IL_TO_NATIVE_MAP map[]) // [out]
+ {
+ CONTRACTL
+ {
+ // MethodDesc::FindOrCreateTypicalSharedInstantiation throws
+ THROWS;
+
+ // MethodDesc::FindOrCreateTypicalSharedInstantiation triggers, but shouldn't trigger when
+ // called from here. Since the profiler has a valid functionId, the methoddesc for
+ // this code will already have been created. We should be able to enforce this by
+ // passing allowCreate=FALSE to FindOrCreateTypicalSharedInstantiation.
+ DISABLED(GC_NOTRIGGER);
+
+ // Yay!
+ MODE_ANY;
+
+ // The call to g_pDebugInterface->GetILToNativeMapping() below may call
+ // Debugger::AcquireDebuggerLock
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetILToNativeMapping2 0x%p 0x%p.\n",
+ functionId, reJitId));
+
+ if (functionId == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ if ((cMap > 0) &&
+ ((pcMap == NULL) || (map == NULL)))
+ {
+ return E_INVALIDARG;
+ }
+
+ if (reJitId != 0)
+ {
+ return E_NOTIMPL;
+ }
+
+#ifdef DEBUGGING_SUPPORTED
+ // Cast to proper type
+ MethodDesc * pMD = FunctionIdToMethodDesc(functionId);
+
+ if (pMD->HasClassOrMethodInstantiation() && pMD->IsTypicalMethodDefinition())
+ {
+ // In this case, we used to replace pMD with its canonical instantiation
+ // (FindOrCreateTypicalSharedInstantiation). However, a profiler should never be able
+ // to get to this point anyway, since any MethodDesc a profiler gets from us
+ // cannot be typical (i.e., cannot be a generic with types still left uninstantiated).
+ // We assert here just in case a test proves me wrong, but generally we will
+ // disallow this code path.
+ _ASSERTE(!"Profiler passed a typical method desc (a generic with types still left uninstantiated) to GetILToNativeMapping2");
+ return E_INVALIDARG;
+ }
+
+#ifdef FEATURE_CORECLR
+ if (g_pDebugInterface == NULL)
+ {
+ return CORPROF_E_DEBUGGING_DISABLED;
+ }
+#else
+ // g_pDebugInterface is initialized on startup on desktop CLR, regardless of whether a debugger
+ // or profiler is loaded. So it should always be available.
+ _ASSERTE(g_pDebugInterface != NULL);
+#endif // FEATURE_CORECLR
+
+ return (g_pDebugInterface->GetILToNativeMapping(pMD, cMap, pcMap, map));
+#else
+ return E_NOTIMPL;
+#endif
+}
+
+
+
+//*****************************************************************************
+// Given an ObjectID, go get the EE ClassID for it.
+//*****************************************************************************
+HRESULT ProfToEEInterfaceImpl::GetClassFromObject(ObjectID objectId,
+ ClassID * pClassId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay! Fail at runtime if in preemptive mode via AllowObjectInspection()
+ MODE_ANY;
+
+ // Object::GetTypeHandle takes a lock
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetClassFromObject 0x%p.\n",
+ objectId));
+
+ if (objectId == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ HRESULT hr = AllowObjectInspection();
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ // Cast the ObjectID as a Object
+ Object *pObj = reinterpret_cast<Object *>(objectId);
+
+ // Set the out param and indicate success
+ // Note that for generic code we always return uninstantiated ClassIDs and FunctionIDs
+ if (pClassId)
+ {
+ *pClassId = SafeGetClassIDFromObject(pObj);
+ }
+
+ return S_OK;
+}
+
+//*****************************************************************************
+// Given a module and a token for a class, go get the EE data structure for it.
+//*****************************************************************************
+HRESULT ProfToEEInterfaceImpl::GetClassFromToken(ModuleID moduleId,
+ mdTypeDef typeDef,
+ ClassID *pClassId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // ClassLoader::LoadTypeDefOrRefThrowing triggers
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_ANY;
+
+ // ClassLoader::LoadTypeDefOrRefThrowing takes a lock
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
+ kP2EEAllowableAfterAttach | kP2EETriggers,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetClassFromToken 0x%p, 0x%08x.\n",
+ moduleId,
+ typeDef));
+
+ if ((moduleId == NULL) || (typeDef == mdTypeDefNil) || (typeDef == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ if (!g_profControlBlock.fBaseSystemClassesLoaded)
+ {
+ return CORPROF_E_RUNTIME_UNINITIALIZED;
+ }
+
+ // Get the module
+ Module *pModule = (Module *) moduleId;
+
+ // No module, or it's disassociated from metadata
+ if ((pModule == NULL) || (pModule->IsBeingUnloaded()))
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ // First, check the RID map. This is important since it
+ // works during teardown (and the below doesn't)
+ TypeHandle th;
+ th = pModule->LookupTypeDef(typeDef);
+ if (th.IsNull())
+ {
+ HRESULT hr = S_OK;
+
+ EX_TRY {
+ th = ClassLoader::LoadTypeDefOrRefThrowing(pModule, typeDef,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef);
+ }
+ EX_CATCH_HRESULT(hr);
+
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+ }
+
+ if (!th.GetMethodTable())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ //
+ // Check if it is generic
+ //
+ ClassID classId = NonGenericTypeHandleToClassID(th);
+
+ if (classId == NULL)
+ {
+ return CORPROF_E_TYPE_IS_PARAMETERIZED;
+ }
+
+ // Return value if necessary
+ if (pClassId)
+ {
+ *pClassId = classId;
+ }
+
+ return S_OK;
+}
+
+
+HRESULT ProfToEEInterfaceImpl::GetClassFromTokenAndTypeArgs(ModuleID moduleID,
+ mdTypeDef typeDef,
+ ULONG32 cTypeArgs,
+ ClassID typeArgs[],
+ ClassID* pClassID)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // LoadGenericInstantiationThrowing may load
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_ANY;
+
+ // ClassLoader::LoadGenericInstantiationThrowing takes a lock
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
+ kP2EEAllowableAfterAttach | kP2EETriggers,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetClassFromTokenAndTypeArgs 0x%p, 0x%08x.\n",
+ moduleID,
+ typeDef));
+
+ if (!g_profControlBlock.fBaseSystemClassesLoaded)
+ {
+ return CORPROF_E_RUNTIME_UNINITIALIZED;
+ }
+
+ Module* pModule = reinterpret_cast< Module* >(moduleID);
+
+ if (pModule == NULL || pModule->IsBeingUnloaded())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ // This array needs to be accessible at least until the call to
+ // ClassLoader::LoadGenericInstantiationThrowing
+ TypeHandle* genericParameters = new (nothrow) TypeHandle[cTypeArgs];
+ NewArrayHolder< TypeHandle > holder(genericParameters);
+
+ if (NULL == genericParameters)
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ for (ULONG32 i = 0; i < cTypeArgs; ++i)
+ {
+ genericParameters[i] = TypeHandle(reinterpret_cast< MethodTable* >(typeArgs[i]));
+ }
+
+ //
+ // nickbe 11/24/2003 10:12:56
+ //
+ // In RTM/Everett we decided to load the class if it hadn't been loaded yet
+ // (see ProfToEEInterfaceImpl::GetClassFromToken). For compatibility we're
+ // going to make the same decision here. It's potentially confusing to tell
+ // someone a type doesn't exist at one point in time, but does exist later,
+ // and there is no good way for us to determing that a class may eventually
+ // be loaded without going ahead and loading it
+ //
+ TypeHandle th;
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ // Not sure if this is a valid override or not - making this a VIOLATION
+ // until we're sure.
+ CONTRACT_VIOLATION(LoadsTypeViolation);
+
+ if (GetThreadNULLOk() == NULL)
+ {
+ // Type system will try to validate as part of its contract if the current
+ // AppDomain returned by GetAppDomain can load types in specified module’s
+ // assembly. On a non-EE thread it results in an AV in a check build
+ // since the type system tries to dereference NULL returned by GetAppDomain.
+ // More importantly, loading a type on a non-EE thread is not allowed.
+ //
+ // ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE() states that callers will not
+ // try to load a type, so that type system will not try to test type
+ // loadability in the current AppDomain. However,
+ // ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE does not prevent callers from
+ // loading a type. It is profiler's responsiblity not to attempt to load
+ // a type in unsupported ways (e.g. from a non-EE thread). It doesn't
+ // impact retail builds, in which contracts are not available.
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+ // ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE also defines FAULT_FORBID, which
+ // causes Scanruntime to flag a fault violation in AssemblySpec::InitializeSpec,
+ // which is defined as FAULTS. It only happneds in a type-loading path, which
+ // is not supported on a non-EE thread. Suppressing a contract violation in an
+ // unsupported execution path is more preferable than casuing AV when calling
+ // GetClassFromTokenAndTypeArgs on a non-EE thread in a check build. See Dev10
+ // 682526 for more details.
+ FAULT_NOT_FATAL();
+
+ th = ClassLoader::LoadGenericInstantiationThrowing(pModule,
+ typeDef,
+ Instantiation(genericParameters, cTypeArgs),
+ ClassLoader::LoadTypes);
+ }
+ else
+ {
+ th = ClassLoader::LoadGenericInstantiationThrowing(pModule,
+ typeDef,
+ Instantiation(genericParameters, cTypeArgs),
+ ClassLoader::LoadTypes);
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ if (th.IsNull())
+ {
+ // Hmm, the type isn't loaded yet.
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ *pClassID = TypeHandleToClassID(th);
+
+ return S_OK;
+}
+
+//*****************************************************************************
+// Given the token for a method, return the fucntion id.
+//*****************************************************************************
+HRESULT ProfToEEInterfaceImpl::GetFunctionFromToken(ModuleID moduleId,
+ mdToken typeDef,
+ FunctionID *pFunctionId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetFunctionFromToken 0x%p, 0x%08x.\n",
+ moduleId,
+ typeDef));
+
+ if ((moduleId == NULL) || (typeDef == mdTokenNil))
+ {
+ return E_INVALIDARG;
+ }
+
+ if (!g_profControlBlock.fBaseSystemClassesLoaded)
+ {
+ return CORPROF_E_RUNTIME_UNINITIALIZED;
+ }
+
+ // Default HRESULT
+ HRESULT hr = S_OK;
+
+ // Get the module
+ Module *pModule = (Module *) moduleId;
+
+ // No module, or disassociated from metadata
+ if (pModule == NULL || pModule->IsBeingUnloaded())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ // Default return value of NULL
+ MethodDesc *pDesc = NULL;
+
+ // Different lookup depending on whether it's a Def or Ref
+ if (TypeFromToken(typeDef) == mdtMethodDef)
+ {
+ pDesc = pModule->LookupMethodDef(typeDef);
+ }
+ else if (TypeFromToken(typeDef) == mdtMemberRef)
+ {
+ pDesc = pModule->LookupMemberRefAsMethod(typeDef);
+ }
+ else
+ {
+ return E_INVALIDARG;
+ }
+
+ if (NULL == pDesc)
+ {
+ return E_INVALIDARG;
+ }
+
+ //
+ // Check that this is a non-generic method
+ //
+ if (pDesc->HasClassOrMethodInstantiation())
+ {
+ return CORPROF_E_FUNCTION_IS_PARAMETERIZED;
+ }
+
+ if (pFunctionId && SUCCEEDED(hr))
+ {
+ *pFunctionId = MethodDescToFunctionID(pDesc);
+ }
+
+ return (hr);
+}
+
+HRESULT ProfToEEInterfaceImpl::GetFunctionFromTokenAndTypeArgs(ModuleID moduleID,
+ mdMethodDef funcDef,
+ ClassID classId,
+ ULONG32 cTypeArgs,
+ ClassID typeArgs[],
+ FunctionID* pFunctionID)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // It can trigger type loads
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_ANY;
+
+ // MethodDesc::FindOrCreateAssociatedMethodDesc enters a Crst
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
+ kP2EEAllowableAfterAttach | kP2EETriggers,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetFunctionFromTokenAndTypeArgs 0x%p, 0x%08x, 0x%p.\n",
+ moduleID,
+ funcDef,
+ classId));
+
+ TypeHandle typeHandle = TypeHandle::FromPtr((void *)classId);
+ Module* pModule = reinterpret_cast< Module* >(moduleID);
+
+ if ((pModule == NULL) || typeHandle.IsNull())
+ {
+ return E_INVALIDARG;
+ }
+
+ if (!g_profControlBlock.fBaseSystemClassesLoaded)
+ {
+ return CORPROF_E_RUNTIME_UNINITIALIZED;
+ }
+
+ if (pModule->IsBeingUnloaded())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ MethodDesc* pMethodDesc = NULL;
+
+ if (mdtMethodDef == TypeFromToken(funcDef))
+ {
+ pMethodDesc = pModule->LookupMethodDef(funcDef);
+ }
+ else if (mdtMemberRef == TypeFromToken(funcDef))
+ {
+ pMethodDesc = pModule->LookupMemberRefAsMethod(funcDef);
+ }
+ else
+ {
+ return E_INVALIDARG;
+ }
+
+ MethodTable* pMethodTable = typeHandle.GetMethodTable();
+
+ if (pMethodTable == NULL || !pMethodTable->IsRestored() ||
+ pMethodDesc == NULL || !pMethodDesc->IsRestored())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ // This array needs to be accessible at least until the call to
+ // MethodDesc::FindOrCreateAssociatedMethodDesc
+ TypeHandle* genericParameters = new (nothrow) TypeHandle[cTypeArgs];
+ NewArrayHolder< TypeHandle > holder(genericParameters);
+
+ if (NULL == genericParameters)
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ for (ULONG32 i = 0; i < cTypeArgs; ++i)
+ {
+ genericParameters[i] = TypeHandle(reinterpret_cast< MethodTable* >(typeArgs[i]));
+ }
+
+ MethodDesc* result = NULL;
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ result = MethodDesc::FindOrCreateAssociatedMethodDesc(pMethodDesc,
+ pMethodTable,
+ FALSE,
+ Instantiation(genericParameters, cTypeArgs),
+ TRUE);
+ }
+ EX_CATCH_HRESULT(hr);
+
+ if (NULL != result)
+ {
+ *pFunctionID = MethodDescToFunctionID(result);
+ }
+
+ return hr;
+}
+
+//*****************************************************************************
+// Retrive information about a given application domain, which is like a
+// sub-process.
+//*****************************************************************************
+HRESULT ProfToEEInterfaceImpl::GetAppDomainInfo(AppDomainID appDomainId,
+ ULONG cchName,
+ ULONG *pcchName,
+ __out_ecount_part_opt(cchName, *pcchName) WCHAR szName[],
+ ProcessID *pProcessId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // AppDomain::GetFriendlyNameForDebugger triggers
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_ANY;
+
+ // AppDomain::GetFriendlyNameForDebugger takes a lock
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
+ kP2EEAllowableAfterAttach | kP2EETriggers,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetAppDomainInfo 0x%p.\n",
+ appDomainId));
+
+ if (appDomainId == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ BaseDomain *pDomain; // Internal data structure.
+ HRESULT hr = S_OK;
+
+ // <TODO>@todo:
+ // Right now, this ID is not a true AppDomain, since we use the old
+ // AppDomain/SystemDomain model in the profiling API. This means that
+ // the profiler exposes the SharedDomain and the SystemDomain to the
+ // outside world. It's not clear whether this is actually the right thing
+ // to do or not. - seantrow
+ //
+ // Postponed to V2.
+ // </TODO>
+
+ pDomain = (BaseDomain *) appDomainId;
+
+ // Make sure they've passed in a valid appDomainId
+ if (pDomain == NULL)
+ return (E_INVALIDARG);
+
+ // Pick sensible defaults.
+ if (pcchName)
+ *pcchName = 0;
+ if (szName)
+ *szName = 0;
+ if (pProcessId)
+ *pProcessId = 0;
+
+ LPCWSTR szFriendlyName;
+ if (pDomain == SystemDomain::System())
+ szFriendlyName = g_pwBaseLibrary;
+ else if (pDomain == SharedDomain::GetDomain())
+ szFriendlyName = W("EE Shared Assembly Repository");
+ else
+ szFriendlyName = ((AppDomain*)pDomain)->GetFriendlyNameForDebugger();
+
+ if (szFriendlyName != NULL)
+ {
+ // Get the module file name
+ ULONG trueLen = (ULONG)(wcslen(szFriendlyName) + 1);
+
+ // Return name of module as required.
+ if (szName && cchName > 0)
+ {
+ ULONG copyLen = trueLen;
+
+ if (copyLen >= cchName)
+ {
+ copyLen = cchName - 1;
+ }
+
+ wcsncpy_s(szName, cchName, szFriendlyName, copyLen);
+ }
+
+ // If they request the actual length of the name
+ if (pcchName)
+ *pcchName = trueLen;
+ }
+
+ // If we don't have a friendly name but the call was requesting it, then return incomplete data HR
+ else
+ {
+ if ((szName != NULL && cchName > 0) || pcchName)
+ hr = CORPROF_E_DATAINCOMPLETE;
+ }
+
+ if (pProcessId)
+ *pProcessId = (ProcessID) GetCurrentProcessId();
+
+ return (hr);
+}
+
+
+//*****************************************************************************
+// Retrieve information about an assembly, which is a collection of dll's.
+//*****************************************************************************
+HRESULT ProfToEEInterfaceImpl::GetAssemblyInfo(AssemblyID assemblyId,
+ ULONG cchName,
+ ULONG *pcchName,
+ __out_ecount_part_opt(cchName, *pcchName) WCHAR szName[],
+ AppDomainID *pAppDomainId,
+ ModuleID *pModuleId)
+{
+ CONTRACTL
+ {
+ // SString::SString throws
+ THROWS;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // PEAssembly::GetSimpleName() enters a lock via use of the metadata interface
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetAssemblyInfo 0x%p.\n",
+ assemblyId));
+
+ if (assemblyId == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ HRESULT hr = S_OK;
+
+ Assembly *pAssembly; // Internal data structure for assembly.
+
+ pAssembly = (Assembly *) assemblyId;
+ _ASSERTE(pAssembly != NULL);
+
+ if (pcchName || szName)
+ {
+ // Get the friendly name of the assembly
+ SString name(SString::Utf8, pAssembly->GetSimpleName());
+
+ const COUNT_T nameLength = name.GetCount() + 1;
+
+ if ((NULL != szName) && (cchName > 0))
+ {
+ wcsncpy_s(szName, cchName, name.GetUnicode(), min(nameLength, cchName - 1));
+ }
+
+ if (NULL != pcchName)
+ {
+ *pcchName = nameLength;
+ }
+ }
+
+ // Get the parent application domain.
+ if (pAppDomainId)
+ {
+ *pAppDomainId = (AppDomainID) pAssembly->GetDomain();
+ _ASSERTE(*pAppDomainId != NULL);
+ }
+
+ // Find the module the manifest lives in.
+ if (pModuleId)
+ {
+ *pModuleId = (ModuleID) pAssembly->GetManifestModule();
+
+ // This is the case where the profiler has called GetAssemblyInfo
+ // on an assembly that has been completely created yet.
+ if (!*pModuleId)
+ hr = CORPROF_E_DATAINCOMPLETE;
+ }
+
+ return (hr);
+}
+
+// Setting ELT hooks is only allowed from within Initialize(). However, test-only
+// profilers may need to set those hooks from an attaching profiling. See
+// code:ProfControlBlock#TestOnlyELT
+#ifdef PROF_TEST_ONLY_FORCE_ELT
+#define PROFILER_TO_CLR_ENTRYPOINT_SET_ELT(logParams) \
+ do \
+ { \
+ if (g_profControlBlock.fTestOnlyForceEnterLeave) \
+ { \
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach, logParams); \
+ } \
+ else \
+ { \
+ PROFILER_TO_CLR_ENTRYPOINT_CALLABLE_ON_INIT_ONLY(logParams); \
+ } \
+ } while(0)
+#else // PROF_TEST_ONLY_FORCE_ELT
+#define PROFILER_TO_CLR_ENTRYPOINT_SET_ELT \
+ PROFILER_TO_CLR_ENTRYPOINT_CALLABLE_ON_INIT_ONLY
+#endif // PROF_TEST_ONLY_FORCE_ELT
+
+
+HRESULT ProfToEEInterfaceImpl::SetEnterLeaveFunctionHooks(FunctionEnter * pFuncEnter,
+ FunctionLeave * pFuncLeave,
+ FunctionTailcall * pFuncTailcall)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ // The profiler must call SetEnterLeaveFunctionHooks during initialization, since
+ // the enter/leave events are immutable and must also be set during initialization.
+ PROFILER_TO_CLR_ENTRYPOINT_SET_ELT((LF_CORPROF,
+ LL_INFO10,
+ "**PROF: SetEnterLeaveFunctionHooks 0x%p, 0x%p, 0x%p.\n",
+ pFuncEnter,
+ pFuncLeave,
+ pFuncTailcall));
+
+ return g_profControlBlock.pProfInterface->SetEnterLeaveFunctionHooks(pFuncEnter, pFuncLeave, pFuncTailcall);
+}
+
+
+HRESULT ProfToEEInterfaceImpl::SetEnterLeaveFunctionHooks2(FunctionEnter2 * pFuncEnter,
+ FunctionLeave2 * pFuncLeave,
+ FunctionTailcall2 * pFuncTailcall)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ // The profiler must call SetEnterLeaveFunctionHooks2 during initialization, since
+ // the enter/leave events are immutable and must also be set during initialization.
+ PROFILER_TO_CLR_ENTRYPOINT_SET_ELT((LF_CORPROF,
+ LL_INFO10,
+ "**PROF: SetEnterLeaveFunctionHooks2 0x%p, 0x%p, 0x%p.\n",
+ pFuncEnter,
+ pFuncLeave,
+ pFuncTailcall));
+
+ return
+ g_profControlBlock.pProfInterface->SetEnterLeaveFunctionHooks2(pFuncEnter, pFuncLeave, pFuncTailcall);
+}
+
+
+HRESULT ProfToEEInterfaceImpl::SetEnterLeaveFunctionHooks3(FunctionEnter3 * pFuncEnter3,
+ FunctionLeave3 * pFuncLeave3,
+ FunctionTailcall3 * pFuncTailcall3)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ // The profiler must call SetEnterLeaveFunctionHooks3 during initialization, since
+ // the enter/leave events are immutable and must also be set during initialization.
+ PROFILER_TO_CLR_ENTRYPOINT_SET_ELT((LF_CORPROF,
+ LL_INFO10,
+ "**PROF: SetEnterLeaveFunctionHooks3 0x%p, 0x%p, 0x%p.\n",
+ pFuncEnter3,
+ pFuncLeave3,
+ pFuncTailcall3));
+
+ return
+ g_profControlBlock.pProfInterface->SetEnterLeaveFunctionHooks3(pFuncEnter3,
+ pFuncLeave3,
+ pFuncTailcall3);
+}
+
+
+
+HRESULT ProfToEEInterfaceImpl::SetEnterLeaveFunctionHooks3WithInfo(FunctionEnter3WithInfo * pFuncEnter3WithInfo,
+ FunctionLeave3WithInfo * pFuncLeave3WithInfo,
+ FunctionTailcall3WithInfo * pFuncTailcall3WithInfo)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ // The profiler must call SetEnterLeaveFunctionHooks3WithInfo during initialization, since
+ // the enter/leave events are immutable and must also be set during initialization.
+ PROFILER_TO_CLR_ENTRYPOINT_SET_ELT((LF_CORPROF,
+ LL_INFO10,
+ "**PROF: SetEnterLeaveFunctionHooks3WithInfo 0x%p, 0x%p, 0x%p.\n",
+ pFuncEnter3WithInfo,
+ pFuncLeave3WithInfo,
+ pFuncTailcall3WithInfo));
+
+ return
+ g_profControlBlock.pProfInterface->SetEnterLeaveFunctionHooks3WithInfo(pFuncEnter3WithInfo,
+ pFuncLeave3WithInfo,
+ pFuncTailcall3WithInfo);
+}
+
+
+HRESULT ProfToEEInterfaceImpl::SetFunctionIDMapper(FunctionIDMapper *pFunc)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC((LF_CORPROF,
+ LL_INFO10,
+ "**PROF: SetFunctionIDMapper 0x%p.\n",
+ pFunc));
+
+ g_profControlBlock.pProfInterface->SetFunctionIDMapper(pFunc);
+
+ return (S_OK);
+}
+
+HRESULT ProfToEEInterfaceImpl::SetFunctionIDMapper2(FunctionIDMapper2 *pFunc, void * clientData)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC((LF_CORPROF,
+ LL_INFO10,
+ "**PROF: SetFunctionIDMapper2. pFunc: 0x%p. clientData: 0x%p.\n",
+ pFunc,
+ clientData));
+
+ g_profControlBlock.pProfInterface->SetFunctionIDMapper2(pFunc, clientData);
+
+ return (S_OK);
+}
+
+/*
+ * GetFunctionInfo2
+ *
+ * This function takes the frameInfo returned from a profiler callback and splays it
+ * out into as much information as possible.
+ *
+ * Parameters:
+ * funcId - The function that is being requested.
+ * frameInfo - Frame specific information from a callback (for resolving generics).
+ * pClassId - An optional parameter for returning the class id of the function.
+ * pModuleId - An optional parameter for returning the module of the function.
+ * pToken - An optional parameter for returning the metadata token of the function.
+ * cTypeArgs - The count of the size of the array typeArgs
+ * pcTypeArgs - Returns the number of elements of typeArgs filled in, or if typeArgs is NULL
+ * the number that would be needed.
+ * typeArgs - An array to store generic type parameters for the function.
+ *
+ * Returns:
+ * S_OK if successful.
+ */
+HRESULT ProfToEEInterfaceImpl::GetFunctionInfo2(FunctionID funcId,
+ COR_PRF_FRAME_INFO frameInfo,
+ ClassID *pClassId,
+ ModuleID *pModuleId,
+ mdToken *pToken,
+ ULONG32 cTypeArgs,
+ ULONG32 *pcTypeArgs,
+ ClassID typeArgs[])
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Generics::GetExactInstantiationsOfMethodAndItsClassFromCallInformation eventually
+ // reads metadata which causes us to take a reader lock. However, see
+ // code:#DisableLockOnAsyncCalls
+ DISABLED(CAN_TAKE_LOCK);
+
+ // Asynchronous functions can be called at arbitrary times when runtime
+ // is holding locks that cannot be reentered without causing deadlock.
+ // This contract detects any attempts to reenter locks held at the time
+ // this function was called.
+ CANNOT_RETAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ PRECONDITION(CheckPointer(pClassId, NULL_OK));
+ PRECONDITION(CheckPointer(pModuleId, NULL_OK));
+ PRECONDITION(CheckPointer(pToken, NULL_OK));
+ PRECONDITION(CheckPointer(pcTypeArgs, NULL_OK));
+ PRECONDITION(CheckPointer(typeArgs, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // See code:#DisableLockOnAsyncCalls
+ PERMANENT_CONTRACT_VIOLATION(TakesLockViolation, ReasonProfilerAsyncCannotRetakeLock);
+
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetFunctionInfo2 0x%p.\n",
+ funcId));
+
+ //
+ // Verify parameters.
+ //
+ COR_PRF_FRAME_INFO_INTERNAL *pFrameInfo = (COR_PRF_FRAME_INFO_INTERNAL *)frameInfo;
+
+ if ((funcId == NULL) ||
+ ((pFrameInfo != NULL) && (pFrameInfo->funcID != funcId)))
+ {
+ return E_INVALIDARG;
+ }
+
+ MethodDesc *pMethDesc = FunctionIdToMethodDesc(funcId);
+
+ if (pMethDesc == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ if ((cTypeArgs != 0) && (typeArgs == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ // it's not safe to examine a methoddesc that has not been restored so do not do so
+ if (!pMethDesc ->IsRestored())
+ return CORPROF_E_DATAINCOMPLETE;
+
+ //
+ // Find the exact instantiation of this function.
+ //
+ TypeHandle specificClass;
+ MethodDesc* pActualMethod;
+
+ ClassID classId = NULL;
+
+ if (pMethDesc->IsSharedByGenericInstantiations())
+ {
+ BOOL exactMatch;
+ OBJECTREF pThis = NULL;
+
+ if (pFrameInfo != NULL)
+ {
+ // If FunctionID represents a generic methoddesc on a struct, then pFrameInfo->thisArg
+ // isn't an Object*. It's a pointer directly into the struct's members (i.e., it's not pointing at the
+ // method table). That means pFrameInfo->thisArg cannot be casted to an OBJECTREF for
+ // use by Generics::GetExactInstantiationsOfMethodAndItsClassFromCallInformation. However,
+ // Generics::GetExactInstantiationsOfMethodAndItsClassFromCallInformation won't even need a this pointer
+ // for the methoddesc it's processing if the methoddesc is on a value type. So we
+ // can safely pass NULL for the methoddesc's this in such a case.
+ if (pMethDesc->GetMethodTable()->IsValueType())
+ {
+ _ASSERTE(!pMethDesc->AcquiresInstMethodTableFromThis());
+ _ASSERTE(pThis == NULL);
+ }
+ else
+ {
+ pThis = ObjectToOBJECTREF((PTR_Object)(pFrameInfo->thisArg));
+ }
+ }
+
+ exactMatch = Generics::GetExactInstantiationsOfMethodAndItsClassFromCallInformation(
+ pMethDesc,
+ pThis,
+ PTR_VOID((pFrameInfo != NULL) ? pFrameInfo->extraArg : NULL),
+ &specificClass,
+ &pActualMethod);
+
+ if (exactMatch)
+ {
+ classId = TypeHandleToClassID(specificClass);
+ }
+ else if (!specificClass.HasInstantiation() || !specificClass.IsSharedByGenericInstantiations())
+ {
+ //
+ // In this case we could not get the type args for the method, but if the class
+ // is not a generic class or is instantiated with value types, this value is correct.
+ //
+ classId = TypeHandleToClassID(specificClass);
+ }
+ else
+ {
+ //
+ // We could not get any class information.
+ //
+ classId = NULL;
+ }
+ }
+ else
+ {
+ TypeHandle typeHandle(pMethDesc->GetMethodTable());
+ classId = TypeHandleToClassID(typeHandle);
+ pActualMethod = pMethDesc;
+ }
+
+
+ //
+ // Fill in the ClassId, if desired
+ //
+ if (pClassId != NULL)
+ {
+ *pClassId = classId;
+ }
+
+ //
+ // Fill in the ModuleId, if desired.
+ //
+ if (pModuleId != NULL)
+ {
+ *pModuleId = (ModuleID)pMethDesc->GetModule();
+ }
+
+ //
+ // Fill in the token, if desired.
+ //
+ if (pToken != NULL)
+ {
+ *pToken = (mdToken)pMethDesc->GetMemberDef();
+ }
+
+ if ((cTypeArgs == 0) && (pcTypeArgs != NULL))
+ {
+ //
+ // They are searching for the size of the array needed, we can return that now and
+ // short-circuit all the work below.
+ //
+ if (pcTypeArgs != NULL)
+ {
+ *pcTypeArgs = pActualMethod->GetNumGenericMethodArgs();
+ }
+ return S_OK;
+ }
+
+ //
+ // If no place to store resulting count, quit now.
+ //
+ if (pcTypeArgs == NULL)
+ {
+ return S_OK;
+ }
+
+ //
+ // Fill in the type args
+ //
+ DWORD cArgsToFill = pActualMethod->GetNumGenericMethodArgs();
+
+ if (cArgsToFill > cTypeArgs)
+ {
+ cArgsToFill = cTypeArgs;
+ }
+
+ *pcTypeArgs = cArgsToFill;
+
+ if (cArgsToFill == 0)
+ {
+ return S_OK;
+ }
+
+ Instantiation inst = pActualMethod->GetMethodInstantiation();
+
+ for (DWORD i = 0; i < cArgsToFill; i++)
+ {
+ typeArgs[i] = TypeHandleToClassID(inst[i]);
+ }
+
+ return S_OK;
+}
+
+/*
+ * GetStringLayout
+ *
+ * This function describes to a profiler the internal layout of a string.
+ *
+ * Parameters:
+ * pBufferLengthOffset - Offset within an OBJECTREF of a string of the ArrayLength field.
+ * pStringLengthOffset - Offset within an OBJECTREF of a string of the StringLength field.
+ * pBufferOffset - Offset within an OBJECTREF of a string of the Buffer field.
+ *
+ * Returns:
+ * S_OK if successful.
+ */
+HRESULT ProfToEEInterfaceImpl::GetStringLayout(ULONG *pBufferLengthOffset,
+ ULONG *pStringLengthOffset,
+ ULONG *pBufferOffset)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ PRECONDITION(CheckPointer(pBufferLengthOffset, NULL_OK));
+ PRECONDITION(CheckPointer(pStringLengthOffset, NULL_OK));
+ PRECONDITION(CheckPointer(pBufferOffset, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetStringLayout.\n"));
+
+ return this->GetStringLayoutHelper(pBufferLengthOffset, pStringLengthOffset, pBufferOffset);
+}
+
+/*
+ * GetStringLayout2
+ *
+ * This function describes to a profiler the internal layout of a string.
+ *
+ * Parameters:
+ * pStringLengthOffset - Offset within an OBJECTREF of a string of the StringLength field.
+ * pBufferOffset - Offset within an OBJECTREF of a string of the Buffer field.
+ *
+ * Returns:
+ * S_OK if successful.
+ */
+HRESULT ProfToEEInterfaceImpl::GetStringLayout2(ULONG *pStringLengthOffset,
+ ULONG *pBufferOffset)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ PRECONDITION(CheckPointer(pStringLengthOffset, NULL_OK));
+ PRECONDITION(CheckPointer(pBufferOffset, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetStringLayout2.\n"));
+
+ ULONG dummyBufferLengthOffset;
+ return this->GetStringLayoutHelper(&dummyBufferLengthOffset, pStringLengthOffset, pBufferOffset);
+}
+
+/*
+ * GetStringLayoutHelper
+ *
+ * This function describes to a profiler the internal layout of a string.
+ *
+ * Parameters:
+ * pBufferLengthOffset - Offset within an OBJECTREF of a string of the ArrayLength field.
+ * pStringLengthOffset - Offset within an OBJECTREF of a string of the StringLength field.
+ * pBufferOffset - Offset within an OBJECTREF of a string of the Buffer field.
+ *
+ * Returns:
+ * S_OK if successful.
+ */
+HRESULT ProfToEEInterfaceImpl::GetStringLayoutHelper(ULONG *pBufferLengthOffset,
+ ULONG *pStringLengthOffset,
+ ULONG *pBufferOffset)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ PRECONDITION(CheckPointer(pBufferLengthOffset, NULL_OK));
+ PRECONDITION(CheckPointer(pStringLengthOffset, NULL_OK));
+ PRECONDITION(CheckPointer(pBufferOffset, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // The String class no longer has a bufferLength field in it.
+ // We are returning the offset of the stringLength because that is the closest we can get
+ // This is most certainly a breaking change and a new method
+ // ICorProfilerInfo3::GetStringLayout2 has been added on the interface ICorProfilerInfo3
+ if (pBufferLengthOffset != NULL)
+ {
+ *pBufferLengthOffset = StringObject::GetStringLengthOffset();
+ }
+
+ if (pStringLengthOffset != NULL)
+ {
+ *pStringLengthOffset = StringObject::GetStringLengthOffset();
+ }
+
+ if (pBufferOffset != NULL)
+ {
+ *pBufferOffset = StringObject::GetBufferOffset();
+ }
+
+ return S_OK;
+}
+
+/*
+ * GetClassLayout
+ *
+ * This function describes to a profiler the internal layout of a class.
+ *
+ * Parameters:
+ * classID - The class that is being queried. It is really a TypeHandle.
+ * rFieldOffset - An array to store information about each field in the class.
+ * cFieldOffset - Count of the number of elements in rFieldOffset.
+ * pcFieldOffset - Upon return contains the number of elements filled in, or if
+ * cFieldOffset is zero, the number of elements needed.
+ * pulClassSize - Optional parameter for containing the size in bytes of the underlying
+ * internal class structure.
+ *
+ * Returns:
+ * S_OK if successful.
+ */
+HRESULT ProfToEEInterfaceImpl::GetClassLayout(ClassID classID,
+ COR_FIELD_OFFSET rFieldOffset[],
+ ULONG cFieldOffset,
+ ULONG *pcFieldOffset,
+ ULONG *pulClassSize)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ PRECONDITION(CheckPointer(rFieldOffset, NULL_OK));
+ PRECONDITION(CheckPointer(pcFieldOffset));
+ PRECONDITION(CheckPointer(pulClassSize, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetClassLayout 0x%p.\n",
+ classID));
+
+ //
+ // Verify parameters
+ //
+ if ((pcFieldOffset == NULL) || (classID == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ if ((cFieldOffset != 0) && (rFieldOffset == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ TypeHandle typeHandle = TypeHandle::FromPtr((void *)classID);
+
+ //
+ // This is the incorrect API for arrays or strings. Use GetArrayObjectInfo, and GetStringLayout
+ //
+ if (typeHandle.IsTypeDesc() || typeHandle.AsMethodTable()->IsArray())
+ {
+ return E_INVALIDARG;
+ }
+
+ //
+ // We used to have a bug where this API incorrectly succeeded for strings during startup. Profilers
+ // took dependency on this bug. Let the API to succeed for strings during startup for backward compatibility.
+ //
+ if (typeHandle.AsMethodTable()->IsString() && g_profControlBlock.fBaseSystemClassesLoaded)
+ {
+ return E_INVALIDARG;
+ }
+
+ //
+ // If this class is not fully restored, that is all the information we can get at this time.
+ //
+ if (!typeHandle.IsRestored())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ // Types can be pre-restored, but they still aren't expected to handle queries before
+ // eager fixups have run. This is a targetted band-aid for a bug intellitrace was
+ // running into - attempting to get the class layout for all types at module load time.
+ // If we don't detect this the runtime will AV during the field iteration below. Feel
+ // free to eliminate this check when a more complete solution is available.
+ if (CORCOMPILE_IS_POINTER_TAGGED(*(typeHandle.AsMethodTable()->GetParentMethodTablePtr())))
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ // !IsValueType = IsArray || IsReferenceType Since IsArry has been ruled out above, it must
+ // be a reference type if !IsValueType.
+ BOOL fReferenceType = !typeHandle.IsValueType();
+
+ //
+ // Fill in class size now
+ //
+ // Move after the check for typeHandle.GetMethodTable()->IsRestored()
+ // because an unrestored MethodTable may have a bad EE class pointer
+ // which will be used by MethodTable::GetNumInstanceFieldBytes
+ //
+ if (pulClassSize != NULL)
+ {
+ if (fReferenceType)
+ {
+ // aligned size including the object header for reference types
+ *pulClassSize = typeHandle.GetMethodTable()->GetBaseSize();
+ }
+ else
+ {
+ // unboxed and unaligned size for value types
+ *pulClassSize = typeHandle.GetMethodTable()->GetNumInstanceFieldBytes();
+ }
+ }
+
+ ApproxFieldDescIterator fieldDescIterator(typeHandle.GetMethodTable(), ApproxFieldDescIterator::INSTANCE_FIELDS);
+
+ ULONG cFields = fieldDescIterator.Count();
+
+ //
+ // If they are looking to just get the count, return that.
+ //
+ if ((cFieldOffset == 0) || (rFieldOffset == NULL))
+ {
+ *pcFieldOffset = cFields;
+ return S_OK;
+ }
+
+ //
+ // Dont put too many in the array.
+ //
+ if (cFields > cFieldOffset)
+ {
+ cFields = cFieldOffset;
+ }
+
+ *pcFieldOffset = cFields;
+
+ //
+ // Now fill in the array
+ //
+ ULONG i;
+ FieldDesc *pField;
+
+ for (i = 0; i < cFields; i++)
+ {
+ pField = fieldDescIterator.Next();
+ rFieldOffset[i].ridOfField = (ULONG)pField->GetMemberDef();
+ rFieldOffset[i].ulOffset = (ULONG)pField->GetOffset() + (fReferenceType ? Object::GetOffsetOfFirstField() : 0);
+ }
+
+ return S_OK;
+}
+
+
+typedef struct _PROFILER_STACK_WALK_DATA
+{
+ StackSnapshotCallback *callback;
+ ULONG32 infoFlags;
+ ULONG32 contextFlags;
+ void *clientData;
+
+#ifdef WIN64EXCEPTIONS
+ StackFrame sfParent;
+#endif
+} PROFILER_STACK_WALK_DATA;
+
+
+/*
+ * ProfilerStackWalkCallback
+ *
+ * This routine is used as the callback from the general stack walker for
+ * doing snapshot stack walks
+ *
+ */
+StackWalkAction ProfilerStackWalkCallback(CrawlFrame *pCf, PROFILER_STACK_WALK_DATA *pData)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW; // throw is RIGHT out... the throw at minimum allocates the thrown object which we *must* not do
+ GC_NOTRIGGER; // the stack is not necessarily crawlable at this state !!!) we must not induce a GC
+ }
+ CONTRACTL_END;
+
+ MethodDesc *pFunc = pCf->GetFunction();
+
+ COR_PRF_FRAME_INFO_INTERNAL frameInfo;
+ ULONG32 contextSize = 0;
+ BYTE *context = NULL;
+
+ UINT_PTR currentIP = 0;
+ REGDISPLAY *pRegDisplay = pCf->GetRegisterSet();
+#if defined(_TARGET_X86_)
+ CONTEXT builtContext;
+#endif
+
+ //
+ // For Unmanaged-to-managed transitions we get a NativeMarker back, which we want
+ // to return to the profiler as the context seed if it wants to walk the unmanaged
+ // stack frame, so we report the functionId as NULL to indicate this.
+ //
+ if (pCf->IsNativeMarker())
+ {
+ pFunc = NULL;
+ }
+
+ //
+ // Skip all Lightweight reflection/emit functions
+ //
+ if ((pFunc != NULL) && pFunc->IsNoMetadata())
+ {
+ return SWA_CONTINUE;
+ }
+
+ //
+ // If this is not a transition of any sort and not a managed
+ // method, ignore it.
+ //
+ if (!pCf->IsNativeMarker() && !pCf->IsFrameless())
+ {
+ return SWA_CONTINUE;
+ }
+
+ currentIP = (UINT_PTR)pRegDisplay->ControlPC;
+
+ frameInfo.size = sizeof(COR_PRF_FRAME_INFO_INTERNAL);
+ frameInfo.version = COR_PRF_FRAME_INFO_INTERNAL_CURRENT_VERSION;
+
+ if (pFunc != NULL)
+ {
+ frameInfo.funcID = MethodDescToFunctionID(pFunc);
+ frameInfo.extraArg = NULL;
+ }
+ else
+ {
+ frameInfo.funcID = NULL;
+ frameInfo.extraArg = NULL;
+ }
+
+ frameInfo.IP = currentIP;
+ frameInfo.thisArg = NULL;
+
+ if (pData->infoFlags & COR_PRF_SNAPSHOT_REGISTER_CONTEXT)
+ {
+#if defined(_TARGET_X86_)
+ //
+ // X86 stack walking does not keep the context up-to-date during the
+ // walk. Instead it keeps the REGDISPLAY up-to-date. Thus, we need to
+ // build a CONTEXT from the REGDISPLAY.
+ //
+
+ memset(&builtContext, 0, sizeof(builtContext));
+ builtContext.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL;
+ CopyRegDisplay(pRegDisplay, NULL, &builtContext);
+ context = (BYTE *)(&builtContext);
+#else
+ context = (BYTE *)pRegDisplay->pCurrentContext;
+#endif
+ contextSize = sizeof(CONTEXT);
+ }
+
+ // NOTE: We are intentionally not setting any callback state flags here (i.e., not using
+ // SetCallbackStateFlagsHolder), as we want the DSS callback to "inherit" the
+ // same callback state that DSS has: if DSS was called asynchronously, then consider
+ // the DSS callback to be called asynchronously.
+ if (pData->callback(frameInfo.funcID,
+ frameInfo.IP,
+ (COR_PRF_FRAME_INFO)&frameInfo,
+ contextSize,
+ context,
+ pData->clientData) == S_OK)
+ {
+ return SWA_CONTINUE;
+ }
+
+ return SWA_ABORT;
+}
+
+//---------------------------------------------------------------------------------------
+// Normally, calling GetFunction() on the frame is sufficient to ensure
+// HelperMethodFrames are intialized. However, sometimes we need to be able to specify
+// that we should not enter the host while initializing, so we need to initialize such
+// frames more directly. This small helper function directly forces the initialization,
+// and ensures we don't enter the host as a result if we're executing in an asynchronous
+// call (i.e., hijacked thread)
+//
+// Arguments:
+// pFrame - Frame to initialize.
+//
+// Return Value:
+// TRUE iff pFrame was successfully initialized (or was already initialized). If
+// pFrame is not a HelperMethodFrame (or derived type), this returns TRUE
+// immediately. FALSE indicates we tried to initialize w/out entering the host, and
+// had to abort as a result when a reader lock was needed but unavailable.
+//
+
+static BOOL EnsureFrameInitialized(Frame * pFrame)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+
+ // If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the
+ // host (SQL). Corners will be cut to ensure this is the case
+ if (ShouldAvoidHostCalls()) { HOST_NOCALLS; } else { HOST_CALLS; }
+
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ if (pFrame->GetFrameType() != Frame::TYPE_HELPER_METHOD_FRAME)
+ {
+ // This frame is not a HelperMethodFrame or a frame derived from
+ // HelperMethodFrame, so HMF-specific lazy initialization is not an issue.
+ return TRUE;
+ }
+
+ HelperMethodFrame * pHMF = (HelperMethodFrame *) pFrame;
+
+ if (pHMF->InsureInit(
+ false, // initialInit
+ NULL, // unwindState
+ (ShouldAvoidHostCalls() ?
+ NoHostCalls :
+ AllowHostCalls)
+ ) != NULL)
+ {
+ // InsureInit() succeeded and found the return address
+ return TRUE;
+ }
+
+ // No return address was found. It must be because we asked InsureInit() to bail if
+ // it would have entered the host
+ _ASSERTE(ShouldAvoidHostCalls());
+ return FALSE;
+}
+
+
+#ifdef _TARGET_X86_
+//---------------------------------------------------------------------------------------
+//
+// Implements the COR_PRF_SNAPSHOT_X86_OPTIMIZED algorithm called by DoStackSnapshot.
+// Does a simple EBP walk, rather than invoking all of StackWalkFramesEx.
+//
+// Arguments:
+// pThreadToSnapshot - Thread whose stack should be walked
+// pctxSeed - Register context with which to seed the walk
+// callback - Function to call at each frame found during the walk
+// clientData - Parameter to pass through to callback
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+
+HRESULT ProfToEEInterfaceImpl::ProfilerEbpWalker(
+ Thread * pThreadToSnapshot,
+ LPCONTEXT pctxSeed,
+ StackSnapshotCallback * callback,
+ void * clientData)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_ANY;
+ EE_THREAD_NOT_REQUIRED;
+
+ // If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the
+ // host (SQL). Corners will be cut to ensure this is the case
+ if (ShouldAvoidHostCalls()) { HOST_NOCALLS; } else { HOST_CALLS; }
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+
+ // We haven't set the stackwalker thread type flag yet (see next line), so it shouldn't be set. Only
+ // exception to this is if the current call is made by a hijacking profiler which
+ // redirected this thread while it was previously in the middle of another stack walk
+ _ASSERTE(IsCalledAsynchronously() || !IsStackWalkerThread());
+
+ // Remember that we're walking the stack. This holder will reinstate the original
+ // value of the stackwalker flag (from the thread type mask) in its destructor.
+ ClrFlsValueSwitch _threadStackWalking(TlsIdx_StackWalkerWalkingThread, pThreadToSnapshot);
+
+ // This flag remembers if we reported a managed frame since the last unmanaged block
+ // we reported. It's used to avoid reporting two unmanaged blocks in a row.
+ BOOL fReportedAtLeastOneManagedFrameSinceLastUnmanagedBlock = FALSE;
+
+ Frame * pFrameCur = pThreadToSnapshot->GetFrame();
+
+ CONTEXT ctxCur;
+ ZeroMemory(&ctxCur, sizeof(ctxCur));
+
+ // Use seed if we got one. Otherwise, EE explicit Frame chain will seed the walk.
+ if (pctxSeed != NULL)
+ {
+ ctxCur.Ebp = pctxSeed->Ebp;
+ ctxCur.Eip = pctxSeed->Eip;
+ ctxCur.Esp = pctxSeed->Esp;
+ }
+
+ while (TRUE)
+ {
+ // At each iteration of the loop:
+ // * Analyze current frame (get managed data if it's a managed frame)
+ // * Report current frame via callback()
+ // * Walk down to next frame
+
+ // **** Managed or unmanaged frame? ****
+
+ EECodeInfo codeInfo;
+ MethodDesc * pMethodDescCur = NULL;
+
+ if (ctxCur.Eip != 0)
+ {
+ hr = GetFunctionInfoInternal(
+ (LPCBYTE) ctxCur.Eip,
+ &codeInfo);
+ if (hr == CORPROF_E_ASYNCHRONOUS_UNSAFE)
+ {
+ _ASSERTE(ShouldAvoidHostCalls());
+ return hr;
+ }
+ if (SUCCEEDED(hr))
+ {
+ pMethodDescCur = codeInfo.GetMethodDesc();
+ }
+ }
+
+ // **** Report frame to profiler ****
+
+ if (
+ // Make sure the frame gave us an IP
+ (ctxCur.Eip != 0) &&
+
+ // Make sure any managed frame isn't for an IL stub or LCG
+ ((pMethodDescCur == NULL) || !pMethodDescCur->IsNoMetadata()) &&
+
+ // Only report unmanaged frames if the last frame we reported was managed
+ // (avoid reporting two unmanaged blocks in a row)
+ ((pMethodDescCur != NULL) || fReportedAtLeastOneManagedFrameSinceLastUnmanagedBlock))
+ {
+ // Around the call to the profiler, temporarily clear the
+ // ThreadType_StackWalker type flag, as we have no control over what the
+ // profiler may do inside its callback (it could theoretically attempt to
+ // load other types, though I don't personally know of profilers that
+ // currently do this).
+
+ CLEAR_THREAD_TYPE_STACKWALKER();
+ hr = callback(
+ (FunctionID) pMethodDescCur,
+ ctxCur.Eip,
+ NULL, // COR_PRF_FRAME_INFO
+ sizeof(ctxCur), // contextSize,
+ (LPBYTE) &ctxCur, // context,
+ clientData);
+ SET_THREAD_TYPE_STACKWALKER(pThreadToSnapshot);
+
+ if (hr != S_OK)
+ {
+ return hr;
+ }
+ if (pMethodDescCur == NULL)
+ {
+ // Just reported an unmanaged block, so reset the flag
+ fReportedAtLeastOneManagedFrameSinceLastUnmanagedBlock = FALSE;
+ }
+ else
+ {
+ // Just reported a managed block, so remember it
+ fReportedAtLeastOneManagedFrameSinceLastUnmanagedBlock = TRUE;
+ }
+ }
+
+ // **** Walk down to next frame ****
+
+ // Is current frame managed or unmanaged?
+ if (pMethodDescCur == NULL)
+ {
+ // Unmanaged frame. Use explicit EE Frame chain to help
+
+ REGDISPLAY frameRD;
+ ZeroMemory(&frameRD, sizeof(frameRD));
+
+ while (pFrameCur != FRAME_TOP)
+ {
+ // Frame is only useful if it will contain register context info
+ if (!pFrameCur->NeedsUpdateRegDisplay())
+ {
+ goto Loop;
+ }
+
+
+ // This should be the first call we make to the Frame, as it will
+ // ensure we force lazy initialize of HelperMethodFrames
+ if (!EnsureFrameInitialized(pFrameCur))
+ {
+ return CORPROF_E_ASYNCHRONOUS_UNSAFE;
+ }
+
+ // This frame is only useful if it gives us an actual return address,
+ // and is situated on the stack at or below our current ESP (stack
+ // grows up)
+ if ((pFrameCur->GetReturnAddress() != NULL) &&
+ (dac_cast<TADDR>(pFrameCur) >= dac_cast<TADDR>(ctxCur.Esp)))
+ {
+ pFrameCur->UpdateRegDisplay(&frameRD);
+ break;
+ }
+
+Loop:
+ pFrameCur = pFrameCur->PtrNextFrame();
+ }
+
+ if (pFrameCur == FRAME_TOP)
+ {
+ // No more frames. Stackwalk is over
+ return S_OK;
+ }
+
+ // Update ctxCur based on frame
+ ctxCur.Eip = pFrameCur->GetReturnAddress();
+ ctxCur.Ebp = GetRegdisplayFP(&frameRD);
+ ctxCur.Esp = GetRegdisplaySP(&frameRD);
+ }
+ else
+ {
+ // Managed frame.
+
+ // GC info will assist us in determining whether this is a non-EBP frame and
+ // info about pushed arguments.
+ PTR_VOID gcInfo = codeInfo.GetGCInfo();
+ InfoHdr header;
+ unsigned uiMethodSizeDummy;
+ PTR_CBYTE table = PTR_CBYTE(gcInfo);
+ table += decodeUnsigned(table, &uiMethodSizeDummy);
+ table = decodeHeader(table, &header);
+
+ // Ok, GCInfo, can we do a simple EBP walk or what?
+
+ if ((codeInfo.GetRelOffset() < header.prologSize) ||
+ (!header.ebpFrame && !header.doubleAlign))
+ {
+ // We're either in the prolog or we're not in an EBP frame, in which case
+ // we'll just defer to the code manager to unwind for us. This condition
+ // is relatively rare, but can occur if:
+ //
+ // * The profiler did a DSS from its Enter hook, in which case we're
+ // still inside the prolog, OR
+ // * The seed context or explicit EE Frame chain seeded us with a
+ // non-EBP frame function. In this case, using a naive EBP
+ // unwinding algorithm would actually skip over the next EBP
+ // frame, and would get SP all wrong as we try skipping over
+ // the pushed parameters. So let's just ask the code manager for
+ // help.
+ //
+ // Note that there are yet more conditions (much more rare) where the EBP
+ // walk could get lost (e.g., we're inside an epilog). But we only care
+ // about the most likely cases, and it's ok if the unlikely cases result
+ // in truncated stacks, as unlikely cases will be statistically
+ // irrelevant to CPU performance sampling profilers
+ CodeManState codeManState;
+ codeManState.dwIsSet = 0;
+ REGDISPLAY rd;
+ ZeroMemory(&rd, sizeof(rd));
+
+ rd.pEbp = &ctxCur.Ebp;
+ rd.Esp = ctxCur.Esp;
+ rd.ControlPC = ctxCur.Eip;
+
+ codeInfo.GetCodeManager()->UnwindStackFrame(
+ &rd,
+ &codeInfo,
+ SpeculativeStackwalk,
+ &codeManState,
+ NULL);
+
+ ctxCur.Ebp = *(rd.pEbp);
+ ctxCur.Esp = rd.Esp;
+ ctxCur.Eip = rd.ControlPC;
+ }
+ else
+ {
+ // We're in an actual EBP frame, so we can simplistically walk down to
+ // the next frame using EBP.
+
+ // Return address is stored just below saved EBP (stack grows up)
+ ctxCur.Eip = *(DWORD *) (ctxCur.Ebp + sizeof(DWORD));
+
+ ctxCur.Esp =
+ // Stack location where current function pushed its EBP
+ ctxCur.Ebp +
+
+ // Skip past that EBP
+ sizeof(DWORD) +
+
+ // Skip past return address pushed by caller
+ sizeof(DWORD) +
+
+ // Skip past arguments to current function that were pushed by caller.
+ // (Caller will pop varargs, so don't count those.)
+ (header.varargs ? 0 : (header.argCount * sizeof(DWORD)));
+
+ // EBP for frame below us (stack grows up) has been saved onto our own
+ // frame. Dereference it now.
+ ctxCur.Ebp = *(DWORD *) ctxCur.Ebp;
+ }
+ }
+ }
+}
+#endif // _TARGET_X86_
+
+//*****************************************************************************
+// The profiler stackwalk Wrapper
+//*****************************************************************************
+HRESULT ProfToEEInterfaceImpl::ProfilerStackWalkFramesWrapper(Thread * pThreadToSnapshot, PROFILER_STACK_WALK_DATA * pData, unsigned flags)
+{
+ STATIC_CONTRACT_WRAPPER;
+
+ StackWalkAction swaRet = pThreadToSnapshot->StackWalkFrames(
+ (PSTACKWALKFRAMESCALLBACK)ProfilerStackWalkCallback,
+ pData,
+ flags,
+ NULL);
+
+ switch (swaRet)
+ {
+ default:
+ _ASSERTE(!"Unexpected StackWalkAction returned from Thread::StackWalkFrames");
+ return E_FAIL;
+
+ case SWA_FAILED:
+ return E_FAIL;
+
+ case SWA_ABORT:
+ return CORPROF_E_STACKSNAPSHOT_ABORTED;
+
+ case SWA_DONE:
+ return S_OK;
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// DoStackSnapshot helper to call FindJitMan to determine if the specified
+// context is in managed code.
+//
+// Arguments:
+// pCtx - Context to look at
+// hostCallPreference - Describes how to acquire the reader lock--either AllowHostCalls
+// or NoHostCalls (see code:HostCallPreference).
+//
+// Return Value:
+// S_OK: The context is in managed code
+// S_FALSE: The context is not in managed code.
+// Error: Unable to determine (typically because hostCallPreference was NoHostCalls
+// and the reader lock was unattainable without yielding)
+//
+
+HRESULT IsContextInManagedCode(const CONTEXT * pCtx, HostCallPreference hostCallPreference)
+{
+ WRAPPER_NO_CONTRACT;
+ BOOL fFailedReaderLock = FALSE;
+
+ // if there's no Jit Manager for the IP, it's not managed code.
+ BOOL fIsManagedCode = ExecutionManager::IsManagedCode(GetIP(pCtx), hostCallPreference, &fFailedReaderLock);
+ if (fFailedReaderLock)
+ {
+ return CORPROF_E_ASYNCHRONOUS_UNSAFE;
+ }
+
+ return fIsManagedCode ? S_OK : S_FALSE;
+}
+
+//*****************************************************************************
+// Perform a stack walk, calling back to callback at each managed frame.
+//*****************************************************************************
+HRESULT ProfToEEInterfaceImpl::DoStackSnapshot(ThreadID thread,
+ StackSnapshotCallback *callback,
+ ULONG32 infoFlags,
+ void *clientData,
+ BYTE * pbContext,
+ ULONG32 contextSize)
+{
+
+#ifdef _TARGET_ARM_
+ // DoStackSnapshot is not supported on arm. Profilers can use OS apis to get the call stack.
+ return E_NOTIMPL;
+#endif
+
+#ifndef FEATURE_HIJACK
+
+ // DoStackSnapshot needs Thread::Suspend/ResumeThread functionality.
+ // On platforms w/o support for these APIs return E_NOTIMPL.
+ return E_NOTIMPL;
+
+#else // FEATURE_HIJACK
+
+ CONTRACTL
+ {
+ // Yay! (Note: NOTHROW is vital. The throw at minimum allocates
+ // the thrown object which we *must* not do.)
+ NOTHROW;
+
+ // Yay! (Note: this is called asynchronously to view the stack at arbitrary times,
+ // so the stack is not necessarily crawlable for GC at this state!)
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // #DisableLockOnAsyncCalls
+ // This call is allowed asynchronously, however it does take locks. Therefore,
+ // we will hit contract asserts if we happen to be in a CANNOT_TAKE_LOCK zone when
+ // a hijacking profiler hijacks this thread to run DoStackSnapshot. CANNOT_RETAKE_LOCK
+ // is a more granular locking contract that says "I promise that if I take locks, I
+ // won't reenter any locks that were taken before this function was called".
+ DISABLED(CAN_TAKE_LOCK);
+
+ // Asynchronous functions can be called at arbitrary times when runtime
+ // is holding locks that cannot be reentered without causing deadlock.
+ // This contract detects any attempts to reenter locks held at the time
+ // this function was called.
+ CANNOT_RETAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ // This CONTRACT_VIOLATION is still needed because DISABLED(CAN_TAKE_LOCK) does not
+ // turn off contract violations.
+ PERMANENT_CONTRACT_VIOLATION(TakesLockViolation, ReasonProfilerAsyncCannotRetakeLock);
+
+ LPCONTEXT pctxSeed = reinterpret_cast<LPCONTEXT> (pbContext);
+
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: DoStackSnapshot 0x%p, 0x%p, 0x%08x, 0x%p, 0x%p, 0x%08x.\n",
+ thread,
+ callback,
+ infoFlags,
+ clientData,
+ pctxSeed,
+ contextSize));
+
+ HRESULT hr = E_UNEXPECTED;
+ // (hr assignment is to appease the rotor compiler; we won't actually return without explicitly setting hr again)
+
+ Thread *pThreadToSnapshot = NULL;
+ Thread * pCurrentThread = GetThreadNULLOk();
+ BOOL fResumeThread = FALSE;
+ INDEBUG(ULONG ulForbidTypeLoad = 0;)
+ BOOL fResetSnapshotThreadExternalCount = FALSE;
+ int cRefsSnapshotThread = 0;
+
+ // Remember whether we've already determined the current context of the target thread
+ // is in managed (S_OK), not in managed (S_FALSE), or unknown (error).
+ HRESULT hrCurrentContextIsManaged = E_FAIL;
+
+ CONTEXT ctxCurrent;
+ memset(&ctxCurrent, 0, sizeof(ctxCurrent));
+
+ REGDISPLAY rd;
+
+ PROFILER_STACK_WALK_DATA data;
+
+ if (!g_fEEStarted )
+ {
+ // no managed code has run and things are likely in a very bad have loaded state
+ // this is a bad time to try to walk the stack
+
+ // Returning directly as there is nothing to cleanup yet
+ return CORPROF_E_STACKSNAPSHOT_UNSAFE;
+ }
+
+ if (!CORProfilerStackSnapshotEnabled())
+ {
+ // Returning directly as there is nothing to cleanup yet, and can't skip gcholder ctor
+ return CORPROF_E_INCONSISTENT_WITH_FLAGS;
+ }
+
+ if (thread == NULL)
+ {
+ pThreadToSnapshot = pCurrentThread;
+ }
+ else
+ {
+ pThreadToSnapshot = (Thread *)thread;
+ }
+
+#ifdef _TARGET_X86_
+ if ((infoFlags & ~(COR_PRF_SNAPSHOT_REGISTER_CONTEXT | COR_PRF_SNAPSHOT_X86_OPTIMIZED)) != 0)
+#else
+ if ((infoFlags & ~(COR_PRF_SNAPSHOT_REGISTER_CONTEXT)) != 0)
+#endif
+ {
+ // Returning directly as there is nothing to cleanup yet, and can't skip gcholder ctor
+ return E_INVALIDARG;
+ }
+
+ if (!IsManagedThread(pThreadToSnapshot) || !IsGarbageCollectorFullyInitialized())
+ {
+ //
+ // No managed frames, return now.
+ //
+ // Returning directly as there is nothing to cleanup yet, and can't skip gcholder ctor
+ return S_OK;
+ }
+
+ // We must make sure no other thread tries to hijack the thread we're about to walk
+ // Hijacking means Thread::HijackThread, i.e. bashing return addresses which would break the stack walk
+ Thread::HijackLockHolder hijackLockHolder(pThreadToSnapshot);
+ if (!hijackLockHolder.Acquired())
+ {
+ // Returning directly as there is nothing to cleanup yet, and can't skip gcholder ctor
+ return CORPROF_E_STACKSNAPSHOT_UNSAFE;
+ }
+
+ if (pThreadToSnapshot != pCurrentThread // Walking separate thread
+ && pCurrentThread != NULL // Walker (current) thread is a managed / VM thread
+ && ThreadSuspend::SysIsSuspendInProgress()) // EE is trying suspend itself
+ {
+ // Since we're walking a separate thread, we'd have to suspend it first (see below).
+ // And since the current thread is a VM thread, that means the current thread's
+ // m_dwForbidSuspendThread count will go up while it's trying to suspend the
+ // target thread (see Thread::SuspendThread). THAT means no one will be able
+ // to suspend the current thread until its m_dwForbidSuspendThread is decremented
+ // (which happens as soon as the target thread of DoStackSnapshot has been suspended).
+ // Since we're in the process of suspending the entire runtime, now would be a bad time to
+ // make the walker thread un-suspendable (see VsWhidbey bug 454936). So let's just abort
+ // now. Note that there is no synchronization around calling Thread::SysIsSuspendInProgress().
+ // So we will get occasional false positives or false negatives. But that's benign, as the worst
+ // that might happen is we might occasionally delay the EE suspension a little bit, or we might
+ // too eagerly fail from ProfToEEInterfaceImpl::DoStackSnapshot sometimes. But there won't
+ // be any corruption or AV.
+ //
+ // Returning directly as there is nothing to cleanup yet, and can't skip gcholder ctor
+ return CORPROF_E_STACKSNAPSHOT_UNSAFE;
+ }
+
+ // We only allow stackwalking if:
+ // 1) Target thread to walk == current thread OR Target thread is suspended, AND
+ // 2) Target thread to walk is currently executing JITted / NGENd code, AND
+ // 3) Target thread to walk is seeded OR currently NOT unwinding the stack, AND
+ // 4) Target thread to walk != current thread OR current thread is NOT in a can't stop or forbid suspend region
+
+ // If the thread is in a forbid suspend region, it's dangerous to do anything:
+ // - The code manager datastructures accessed during the stackwalk may be in inconsistent state.
+ // - Thread::Suspend won't be able to suspend the thread.
+ if (pThreadToSnapshot->IsInForbidSuspendRegion())
+ {
+ hr = CORPROF_E_STACKSNAPSHOT_UNSAFE;
+ goto Cleanup;
+ }
+
+ HostCallPreference hostCallPreference;
+
+ // First, check "1) Target thread to walk == current thread OR Target thread is suspended"
+ if (pThreadToSnapshot != pCurrentThread)
+ {
+ // Walking separate thread, so it must be suspended. First, ensure that
+ // target thread exists.
+ //
+ // NOTE: We're using the "dangerous" variant of this refcount function, because we
+ // rely on the profiler to ensure it never tries to walk a thread being destroyed.
+ // (Profiler must block in its ThreadDestroyed() callback until all uses of that thread,
+ // such as walking its stack, are complete.)
+ cRefsSnapshotThread = pThreadToSnapshot->IncExternalCountDANGEROUSProfilerOnly();
+ fResetSnapshotThreadExternalCount = TRUE;
+
+ if (cRefsSnapshotThread == 1 || !pThreadToSnapshot->HasValidThreadHandle())
+ {
+ // At this point, we've modified the VM state based on bad input
+ // (pThreadToSnapshot) from the profiler. This could cause
+ // memory corruption and leave us vulnerable to security problems.
+ // So destroy the process.
+ _ASSERTE(!"Profiler trying to walk destroyed thread");
+ EEPOLICY_HANDLE_FATAL_ERROR(CORPROF_E_STACKSNAPSHOT_INVALID_TGT_THREAD);
+ }
+
+ // Thread::SuspendThread() ensures that no one else should try to suspend us
+ // while we're suspending pThreadToSnapshot.
+ //
+ // TRUE: OneTryOnly. Don't loop waiting for others to get out of our way in
+ // order to suspend the thread. If it's not safe, just return an error immediately.
+ Thread::SuspendThreadResult str = pThreadToSnapshot->SuspendThread(TRUE);
+ if (str == Thread::STR_Success)
+ {
+ fResumeThread = TRUE;
+ }
+ else
+ {
+ hr = CORPROF_E_STACKSNAPSHOT_UNSAFE;
+ goto Cleanup;
+ }
+ }
+
+ hostCallPreference =
+ ShouldAvoidHostCalls() ?
+ NoHostCalls : // Async call: Ensure this thread won't yield & re-enter host
+ AllowHostCalls; // Synchronous calls may re-enter host just fine
+
+ // If target thread is in pre-emptive mode, the profiler's seed context is unnecessary
+ // because our frame chain is good enough: it will give us at least as accurate a
+ // starting point as the profiler could. Also, since profiler contexts cannot be
+ // trusted, we don't want to set the thread's profiler filter context to this, as a GC
+ // that interrupts the profiler's stackwalk will end up using the profiler's (potentially
+ // bogus) filter context.
+ if (!pThreadToSnapshot->PreemptiveGCDisabledOther())
+ {
+ // Thread to be walked is in preemptive mode. Throw out seed.
+ pctxSeed = NULL;
+ }
+ else if (pThreadToSnapshot != pCurrentThread)
+ {
+ // With cross-thread stack-walks, the target thread's context could be unreliable.
+ // That would shed doubt on either a profiler-provided context, or a default
+ // context we chose. So check if we're in a potentially unreliable case, and return
+ // an error if so.
+ //
+ // These heurisitics are based on an actual bug where GetThreadContext returned a
+ // self-consistent, but stale, context for a thread suspended after being redirected by
+ // the GC (TFS Dev 10 bug # 733263).
+ //
+ // (Note that this whole block is skipped if pThreadToSnapshot is in preemptive mode (the IF
+ // above), as the context is unused in such a case--the EE Frame chain is used
+ // to seed the walk instead.)
+
+ if (!pThreadToSnapshot->GetSafelyRedirectableThreadContext(Thread::kDefaultChecks, &ctxCurrent, &rd))
+ {
+ LOG((LF_CORPROF, LL_INFO100, "**PROF: GetSafelyRedirectableThreadContext failure leads to CORPROF_E_STACKSNAPSHOT_UNSAFE.\n"));
+ hr = CORPROF_E_STACKSNAPSHOT_UNSAFE;
+ goto Cleanup;
+ }
+
+ hrCurrentContextIsManaged = IsContextInManagedCode(&ctxCurrent, hostCallPreference);
+ if (FAILED(hrCurrentContextIsManaged))
+ {
+ // Couldn't get the info. Try again later
+ _ASSERTE(ShouldAvoidHostCalls());
+ hr = CORPROF_E_ASYNCHRONOUS_UNSAFE;
+ goto Cleanup;
+ }
+
+ if ((hrCurrentContextIsManaged == S_OK) &&
+ (!pThreadToSnapshot->PreemptiveGCDisabledOther()))
+ {
+ // Thread is in preemptive mode while executing managed code?! This lie is
+ // an early warning sign that the context is bogus. Bail.
+ LOG((LF_CORPROF, LL_INFO100, "**PROF: Target thread context is likely bogus. Returning CORPROF_E_STACKSNAPSHOT_UNSAFE.\n"));
+ hr = CORPROF_E_STACKSNAPSHOT_UNSAFE;
+ goto Cleanup;
+ }
+
+ Frame * pFrame = pThreadToSnapshot->GetFrame();
+ if (pFrame != FRAME_TOP)
+ {
+ TADDR spTargetThread = GetSP(&ctxCurrent);
+ if (dac_cast<TADDR>(pFrame) < spTargetThread)
+ {
+ // An Explicit EE Frame is more recent on the stack than the current
+ // stack pointer itself? This lie is an early warning sign that the
+ // context is bogus. Bail.
+ LOG((LF_CORPROF, LL_INFO100, "**PROF: Target thread context is likely bogus. Returning CORPROF_E_STACKSNAPSHOT_UNSAFE.\n"));
+ hr = CORPROF_E_STACKSNAPSHOT_UNSAFE;
+ goto Cleanup;
+ }
+ }
+
+ // If the profiler did not specify a seed context of its own, use the current one we
+ // just produced.
+ //
+ // Failing to seed the walk can cause us to to "miss" functions on the stack. This is
+ // because StackWalkFrames(), when doing an unseeded stackwalk, sets the
+ // starting regdisplay's IP/SP to 0. This, in turn causes StackWalkFramesEx
+ // to set cf.isFrameless = (pEEJM != NULL); (which is FALSE, since we have no
+ // jit manager, since we have no IP). Once frameless is false, we look solely to
+ // the Frame chain for our goodies, rather than looking at the code actually
+ // being executed by the thread. The problem with the frame chain is that some
+ // frames (e.g., GCFrame) don't point to any functions being executed. So
+ // StackWalkFramesEx just skips such frames and moves to the next one. That
+ // can cause a chunk of calls to be skipped. To prevent this from happening, we
+ // "fake" a seed by just seeding the thread with its current context. This forces
+ // StackWalkFramesEx() to look at the IP rather than just the frame chain.
+ if (pctxSeed == NULL)
+ {
+ pctxSeed = &ctxCurrent;
+ }
+ }
+
+ // Second, check "2) Target thread to walk is currently executing JITted / NGENd code"
+ // To do this, we need to find the proper context to investigate. Start with
+ // the seeded context, if available. If not, use the target thread's current context.
+ if (pctxSeed != NULL)
+ {
+ BOOL fSeedIsManaged;
+
+ // Short cut: If we're just using the current context as the seed, we may
+ // already have determined whether it's in managed code. If so, just use that
+ // result rather than calculating it again
+ if ((pctxSeed == &ctxCurrent) && SUCCEEDED(hrCurrentContextIsManaged))
+ {
+ fSeedIsManaged = (hrCurrentContextIsManaged == S_OK);
+ }
+ else
+ {
+ hr = IsContextInManagedCode(pctxSeed, hostCallPreference);
+ if (FAILED(hr))
+ {
+ hr = CORPROF_E_ASYNCHRONOUS_UNSAFE;
+ goto Cleanup;
+ }
+ fSeedIsManaged = (hr == S_OK);
+ }
+
+ if (!fSeedIsManaged)
+ {
+ hr = CORPROF_E_STACKSNAPSHOT_UNMANAGED_CTX;
+ goto Cleanup;
+ }
+ }
+
+#ifdef _DEBUG
+ //
+ // Sanity check: If we are doing a cross-thread walk and there is no seed context, then
+ // we better not be in managed code, otw we do not have a Frame on the stack from which to start
+ // walking and we may miss the leaf-most chain of managed calls due to the way StackWalkFrames
+ // is implemented. However, there is an exception when the leaf-most EE frame of pThreadToSnapshot
+ // is an InlinedCallFrame, which has an active call, implying pThreadToShanpshot is inside an
+ // inlined P/Invoke. In this case, the InlinedCallFrame will be used to help start off our
+ // stackwalk at the top of the stack.
+ //
+ if (pThreadToSnapshot != pCurrentThread)
+ {
+ if (pctxSeed == NULL)
+ {
+ if (pThreadToSnapshot->GetSafelyRedirectableThreadContext(Thread::kDefaultChecks, &ctxCurrent, &rd))
+ {
+ BOOL fFailedReaderLock = FALSE;
+ BOOL fIsManagedCode = ExecutionManager::IsManagedCode(GetIP(&ctxCurrent), hostCallPreference, &fFailedReaderLock);
+
+ if (!fFailedReaderLock)
+ {
+ // not in jitted or ngend code or inside an inlined P/Invoke (the leaf-most EE Frame is
+ // an InlinedCallFrame with an active call)
+ _ASSERTE(!fIsManagedCode ||
+ (InlinedCallFrame::FrameHasActiveCall(pThreadToSnapshot->GetFrame())));
+ }
+ }
+ }
+ }
+#endif
+
+ // Third, verify the target thread is seeded or not in the midst of an unwind.
+ if (pctxSeed == NULL)
+ {
+ ThreadExceptionState* pExState = pThreadToSnapshot->GetExceptionState();
+
+ // this tests to see if there is an exception in flight
+ if (pExState->IsExceptionInProgress() && pExState->GetFlags()->UnwindHasStarted())
+ {
+ EHClauseInfo *pCurrentEHClauseInfo = pThreadToSnapshot->GetExceptionState()->GetCurrentEHClauseInfo();
+
+ // if the exception code is telling us that we have entered a managed context then all is well
+ if (!pCurrentEHClauseInfo->IsManagedCodeEntered())
+ {
+ hr = CORPROF_E_STACKSNAPSHOT_UNMANAGED_CTX;
+ goto Cleanup;
+ }
+ }
+ }
+
+ // Check if the exception state is consistent. See the comment for ThreadExceptionFlag for more information.
+ if (pThreadToSnapshot->GetExceptionState()->HasThreadExceptionFlag(ThreadExceptionState::TEF_InconsistentExceptionState))
+ {
+ hr = CORPROF_E_STACKSNAPSHOT_UNSAFE;
+ goto Cleanup;
+ }
+
+ data.callback = callback;
+ data.infoFlags = infoFlags;
+ data.contextFlags = 0;
+ data.clientData = clientData;
+#ifdef WIN64EXCEPTIONS
+ data.sfParent.Clear();
+#endif
+
+ // workaround: The ForbidTypeLoad book keeping in the stackwalker is not robust against exceptions.
+ // Unfortunately, it is hard to get it right in the stackwalker since it has to be exception
+ // handling free (frame unwinding may never return). We restore the ForbidTypeLoad counter here
+ // in case it got messed up by exception thrown during the stackwalk.
+ INDEBUG(if (pCurrentThread) ulForbidTypeLoad = pCurrentThread->m_ulForbidTypeLoad;)
+
+ {
+ // An AV during a profiler stackwalk is an isolated event and shouldn't bring
+ // down the runtime. Need to place the holder here, outside of ProfilerStackWalkFramesWrapper
+ // since ProfilerStackWalkFramesWrapper uses __try, which doesn't like objects
+ // with destructors.
+ AVInRuntimeImplOkayHolder AVOkay;
+
+ hr = DoStackSnapshotHelper(
+ pThreadToSnapshot,
+ &data,
+ HANDLESKIPPEDFRAMES |
+ FUNCTIONSONLY |
+ NOTIFY_ON_U2M_TRANSITIONS |
+ ((pThreadToSnapshot == pCurrentThread) ?
+ 0 :
+ ALLOW_ASYNC_STACK_WALK | THREAD_IS_SUSPENDED) |
+ THREAD_EXECUTING_MANAGED_CODE |
+ PROFILER_DO_STACK_SNAPSHOT |
+ ALLOW_INVALID_OBJECTS, // stack walk logic should not look at objects - we could be in the middle of a gc.
+ pctxSeed);
+ }
+
+ INDEBUG(if (pCurrentThread) pCurrentThread->m_ulForbidTypeLoad = ulForbidTypeLoad;)
+
+
+Cleanup:
+ if (fResumeThread)
+ {
+ pThreadToSnapshot->ResumeThread();
+ }
+ if (fResetSnapshotThreadExternalCount)
+ {
+ pThreadToSnapshot->DecExternalCountDANGEROUSProfilerOnly();
+ }
+
+ return hr;
+
+#endif // FEATURE_HIJACK
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Exception swallowing wrapper around the profiler stackwalk
+//
+// Arguments:
+// pThreadToSnapshot - Thread whose stack should be walked
+// pData - data for stack walker
+// flags - flags parameter to pass to StackWalkFramesEx, and StackFrameIterator
+// pctxSeed - Register context with which to seed the walk
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+HRESULT ProfToEEInterfaceImpl::DoStackSnapshotHelper(Thread * pThreadToSnapshot,
+ PROFILER_STACK_WALK_DATA * pData,
+ unsigned flags,
+ LPCONTEXT pctxSeed)
+{
+ STATIC_CONTRACT_NOTHROW;
+
+ // We want to catch and swallow AVs here. For example, if the profiler gives
+ // us a bogus seed context (this happens), we could AV when inspecting memory pointed to
+ // by the (bogus) EBP register.
+ //
+ // EX_TRY/EX_CATCH does a lot of extras that we do not need and that can go wrong for us.
+ // E.g. It asserts in debug build for AVs in mscorwks or it synthetizes an object for the exception.
+ // We use a plain PAL_TRY/PAL_EXCEPT since it is all we need.
+ struct Param {
+ HRESULT hr;
+ Thread * pThreadToSnapshot;
+ PROFILER_STACK_WALK_DATA * pData;
+ unsigned flags;
+ ProfToEEInterfaceImpl * pProfToEE;
+ LPCONTEXT pctxSeed;
+ BOOL fResetProfilerFilterContext;
+ };
+
+ Param param;
+ param.hr = E_UNEXPECTED;
+ param.pThreadToSnapshot = pThreadToSnapshot;
+ param.pData = pData;
+ param.flags = flags;
+ param.pProfToEE = this;
+ param.pctxSeed = pctxSeed;
+ param.fResetProfilerFilterContext = FALSE;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ if ((pParam->pData->infoFlags & COR_PRF_SNAPSHOT_X86_OPTIMIZED) != 0)
+ {
+#ifndef _TARGET_X86_
+ // If check in the begining of DoStackSnapshot (to return E_INVALIDARG) should
+ // make this unreachable
+ _ASSERTE(!"COR_PRF_SNAPSHOT_X86_OPTIMIZED on non-X86 should be unreachable!");
+#else
+ // New, simple EBP walker
+ pParam->hr = pParam->pProfToEE->ProfilerEbpWalker(
+ pParam->pThreadToSnapshot,
+ pParam->pctxSeed,
+ pParam->pData->callback,
+ pParam->pData->clientData);
+#endif // _TARGET_X86_
+ }
+ else
+ {
+ // We're now fairly confident the stackwalk should be ok, so set
+ // the context seed, if one was provided or cooked up.
+ if (pParam->pctxSeed != NULL)
+ {
+ pParam->pThreadToSnapshot->SetProfilerFilterContext(pParam->pctxSeed);
+ pParam->fResetProfilerFilterContext = TRUE;
+ }
+
+ // Whidbey-style walker, uses StackWalkFramesEx
+ pParam->hr = pParam->pProfToEE->ProfilerStackWalkFramesWrapper(
+ pParam->pThreadToSnapshot,
+ pParam->pData,
+ pParam->flags);
+ }
+ }
+ PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ {
+ param.hr = E_UNEXPECTED;
+ }
+ PAL_ENDTRY;
+
+ // Undo the context seeding & thread suspend we did (if any)
+ // to ensure that the thread we walked stayed suspended
+ if (param.fResetProfilerFilterContext)
+ {
+ pThreadToSnapshot->SetProfilerFilterContext(NULL);
+ }
+
+ return param.hr;
+}
+
+
+HRESULT ProfToEEInterfaceImpl::GetGenerationBounds(ULONG cObjectRanges,
+ ULONG *pcObjectRanges,
+ COR_PRF_GC_GENERATION_RANGE ranges[])
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ PRECONDITION(CheckPointer(pcObjectRanges));
+ PRECONDITION(cObjectRanges <= 0 || ranges != NULL);
+ PRECONDITION(s_generationTableLock >= 0);
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetGenerationBounds.\n"));
+
+ // Announce we are using the generation table now
+ CounterHolder genTableLock(&s_generationTableLock);
+
+ GenerationTable *generationTable = s_currentGenerationTable;
+
+ if (generationTable == NULL)
+ {
+ return E_FAIL;
+ }
+
+ _ASSERTE(generationTable->magic == GENERATION_TABLE_MAGIC);
+
+ GenerationDesc *genDescTable = generationTable->genDescTable;
+ ULONG count = min(generationTable->count, cObjectRanges);
+ for (ULONG i = 0; i < count; i++)
+ {
+ ranges[i].generation = (COR_PRF_GC_GENERATION)genDescTable[i].generation;
+ ranges[i].rangeStart = (ObjectID)genDescTable[i].rangeStart;
+ ranges[i].rangeLength = genDescTable[i].rangeEnd - genDescTable[i].rangeStart;
+ ranges[i].rangeLengthReserved = genDescTable[i].rangeEndReserved - genDescTable[i].rangeStart;
+ }
+
+ *pcObjectRanges = generationTable->count;
+
+ return S_OK;
+}
+
+
+HRESULT ProfToEEInterfaceImpl::GetNotifiedExceptionClauseInfo(COR_PRF_EX_CLAUSE_INFO * pinfo)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ PRECONDITION(CheckPointer(pinfo));
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetNotifiedExceptionClauseInfo.\n"));
+
+ HRESULT hr = S_OK;
+
+ ThreadExceptionState* pExState = NULL;
+ EHClauseInfo* pCurrentEHClauseInfo = NULL;
+
+ // notification requires that we are on a managed thread with an exception in flight
+ Thread *pThread = GetThread();
+
+ // If pThread is null, then the thread has never run managed code
+ if (pThread == NULL)
+ {
+ hr = CORPROF_E_NOT_MANAGED_THREAD;
+ goto NullReturn;
+ }
+
+ pExState = pThread->GetExceptionState();
+ if (!pExState->IsExceptionInProgress())
+ {
+ // no exception is in flight -- successful failure
+ hr = S_FALSE;
+ goto NullReturn;
+ }
+
+ pCurrentEHClauseInfo = pExState->GetCurrentEHClauseInfo();
+ if (pCurrentEHClauseInfo->GetClauseType() == COR_PRF_CLAUSE_NONE)
+ {
+ // no exception is in flight -- successful failure
+ hr = S_FALSE;
+ goto NullReturn;
+ }
+
+ pinfo->clauseType = pCurrentEHClauseInfo->GetClauseType();
+ pinfo->programCounter = pCurrentEHClauseInfo->GetIPForEHClause();
+ pinfo->framePointer = pCurrentEHClauseInfo->GetFramePointerForEHClause();
+ pinfo->shadowStackPointer = 0;
+
+ return S_OK;
+
+NullReturn:
+ memset(pinfo, 0, sizeof(*pinfo));
+ return hr;
+}
+
+
+HRESULT ProfToEEInterfaceImpl::GetObjectGeneration(ObjectID objectId,
+ COR_PRF_GC_GENERATION_RANGE *range)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ PRECONDITION(objectId != NULL);
+ PRECONDITION(CheckPointer(range));
+ PRECONDITION(s_generationTableLock >= 0);
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetObjectGeneration 0x%p.\n",
+ objectId));
+
+ BEGIN_GETTHREAD_ALLOWED;
+ _ASSERTE((GetThread() == NULL) || (GetThread()->PreemptiveGCDisabled()));
+ END_GETTHREAD_ALLOWED;
+
+ // Announce we are using the generation table now
+ CounterHolder genTableLock(&s_generationTableLock);
+
+ GenerationTable *generationTable = s_currentGenerationTable;
+
+ if (generationTable == NULL)
+ {
+ return E_FAIL;
+ }
+
+ _ASSERTE(generationTable->magic == GENERATION_TABLE_MAGIC);
+
+ GenerationDesc *genDescTable = generationTable->genDescTable;
+ ULONG count = generationTable->count;
+ for (ULONG i = 0; i < count; i++)
+ {
+ if (genDescTable[i].rangeStart <= (BYTE *)objectId && (BYTE *)objectId < genDescTable[i].rangeEndReserved)
+ {
+ range->generation = (COR_PRF_GC_GENERATION)genDescTable[i].generation;
+ range->rangeStart = (ObjectID)genDescTable[i].rangeStart;
+ range->rangeLength = genDescTable[i].rangeEnd - genDescTable[i].rangeStart;
+ range->rangeLengthReserved = genDescTable[i].rangeEndReserved - genDescTable[i].rangeStart;
+
+ return S_OK;
+ }
+ }
+
+ return E_FAIL;
+}
+
+HRESULT ProfToEEInterfaceImpl::GetReJITIDs(
+ FunctionID functionId, // in
+ ULONG cReJitIds, // in
+ ULONG * pcReJitIds, // out
+ ReJITID reJitIds[]) // out
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // taking a lock causes a GC
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_ANY;
+
+ // The rejit tables use a lock
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ PRECONDITION(CheckPointer(pcReJitIds, NULL_OK));
+ PRECONDITION(CheckPointer(reJitIds, NULL_OK));
+
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetReJITIDs 0x%p.\n",
+ functionId));
+
+ if (functionId == 0)
+ {
+ return E_INVALIDARG;
+ }
+
+ if ((cReJitIds == 0) || (pcReJitIds == NULL) || (reJitIds == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ MethodDesc * pMD = FunctionIdToMethodDesc(functionId);
+
+ return pMD->GetReJitManager()->GetReJITIDs(pMD, cReJitIds, pcReJitIds, reJitIds);
+}
+
+HRESULT ProfToEEInterfaceImpl::RequestReJIT(ULONG cFunctions, // in
+ ModuleID moduleIds[], // in
+ mdMethodDef methodIds[]) // in
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // When we suspend the runtime we drop into premptive mode
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_ANY;
+
+ // We need to suspend the runtime, this takes a lot of locks!
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ PRECONDITION(CheckPointer(moduleIds, NULL_OK));
+ PRECONDITION(CheckPointer(methodIds, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
+ kP2EETriggers,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: RequestReJIT.\n"));
+
+ if (!g_profControlBlock.pProfInterface->IsCallback4Supported())
+ {
+ return CORPROF_E_CALLBACK4_REQUIRED;
+ }
+
+ if (!CORProfilerEnableRejit())
+ {
+ return CORPROF_E_REJIT_NOT_ENABLED;
+ }
+
+ // Request at least 1 method to reJIT!
+ if ((cFunctions == 0) || (moduleIds == NULL) || (methodIds == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ // Remember the profiler is doing this, as that means we must never detach it!
+ g_profControlBlock.pProfInterface->SetUnrevertiblyModifiedILFlag();
+
+ GCX_PREEMP();
+ return ReJitManager::RequestReJIT(cFunctions, moduleIds, methodIds);
+}
+
+HRESULT ProfToEEInterfaceImpl::RequestRevert(ULONG cFunctions, // in
+ ModuleID moduleIds[], // in
+ mdMethodDef methodIds[], // in
+ HRESULT rgHrStatuses[]) // out
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // The rejit manager requires a lock to iterate through methods to revert, and
+ // taking the lock can drop us into preemptive mode.
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_ANY;
+
+ // The rejit manager requires a lock to iterate through methods to revert
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ PRECONDITION(CheckPointer(moduleIds, NULL_OK));
+ PRECONDITION(CheckPointer(methodIds, NULL_OK));
+ PRECONDITION(CheckPointer(rgHrStatuses, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
+ kP2EETriggers,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: RequestRevert.\n"));
+
+ if (!CORProfilerEnableRejit())
+ {
+ return CORPROF_E_REJIT_NOT_ENABLED;
+ }
+
+ // Request at least 1 method to revert!
+ if ((cFunctions == 0) || (moduleIds == NULL) || (methodIds == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ // Remember the profiler is doing this, as that means we must never detach it!
+ g_profControlBlock.pProfInterface->SetUnrevertiblyModifiedILFlag();
+
+ // Initialize the status array
+ if (rgHrStatuses != NULL)
+ {
+ memset(rgHrStatuses, 0, sizeof(HRESULT) * cFunctions);
+ _ASSERTE(S_OK == rgHrStatuses[0]);
+ }
+
+ GCX_PREEMP();
+ return ReJitManager::RequestRevert(cFunctions, moduleIds, methodIds, rgHrStatuses);
+}
+
+
+HRESULT ProfToEEInterfaceImpl::EnumJITedFunctions(ICorProfilerFunctionEnum ** ppEnum)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // If we're in preemptive mode we need to take a read lock to safely walk
+ // the JIT data structures.
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ PRECONDITION(CheckPointer(ppEnum, NULL_OK));
+
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO10,
+ "**PROF: EnumJITedFunctions.\n"));
+
+ if (ppEnum == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ *ppEnum = NULL;
+
+ NewHolder<ProfilerFunctionEnum> pJitEnum(new (nothrow) ProfilerFunctionEnum());
+ if (pJitEnum == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ if (!pJitEnum->Init())
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ // Ownership transferred to [out] param. Caller must Release() when done with this.
+ *ppEnum = (ICorProfilerFunctionEnum *)pJitEnum.Extract();
+
+ return S_OK;
+}
+
+HRESULT ProfToEEInterfaceImpl::EnumJITedFunctions2(ICorProfilerFunctionEnum ** ppEnum)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Gathering rejitids requires taking a lock and that lock might switch to
+ // preemptimve mode...
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_ANY;
+
+ // If we're in preemptive mode we need to take a read lock to safely walk
+ // the JIT data structures.
+ // Gathering RejitIDs also takes a lock.
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ PRECONDITION(CheckPointer(ppEnum, NULL_OK));
+
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
+ kP2EEAllowableAfterAttach | kP2EETriggers,
+ (LF_CORPROF,
+ LL_INFO10,
+ "**PROF: EnumJITedFunctions.\n"));
+
+ if (ppEnum == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ *ppEnum = NULL;
+
+ NewHolder<ProfilerFunctionEnum> pJitEnum(new (nothrow) ProfilerFunctionEnum());
+ if (pJitEnum == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ if (!pJitEnum->Init(TRUE /* fWithReJITIDs */))
+ {
+ // If it fails, it's because of OOM.
+ return E_OUTOFMEMORY;
+ }
+
+ // Ownership transferred to [out] param. Caller must Release() when done with this.
+ *ppEnum = (ICorProfilerFunctionEnum *)pJitEnum.Extract();
+
+ return S_OK;
+}
+
+HRESULT ProfToEEInterfaceImpl::EnumModules(ICorProfilerModuleEnum ** ppEnum)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // This method populates the enumerator, which requires iterating over
+ // AppDomains, which adds, then releases, a reference on each AppDomain iterated.
+ // This causes locking, and can cause triggering if the AppDomain gets destroyed
+ // as a result of the release. (See code:AppDomainIterator::Next and its call to
+ // code:AppDomain::Release.)
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_ANY;
+
+ // (See comment above GC_TRIGGERS.)
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ PRECONDITION(CheckPointer(ppEnum, NULL_OK));
+
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
+ kP2EEAllowableAfterAttach | kP2EETriggers,
+ (LF_CORPROF,
+ LL_INFO10,
+ "**PROF: EnumModules.\n"));
+
+ HRESULT hr;
+
+ if (ppEnum == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ *ppEnum = NULL;
+
+ // ProfilerModuleEnum uese AppDomainIterator, which cannot be called while the current thead
+ // is holding the ThreadStore lock.
+ if (ThreadStore::HoldingThreadStore())
+ {
+ return CORPROF_E_UNSUPPORTED_CALL_SEQUENCE;
+ }
+
+ NewHolder<ProfilerModuleEnum> pModuleEnum(new (nothrow) ProfilerModuleEnum);
+ if (pModuleEnum == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ hr = pModuleEnum->Init();
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ // Ownership transferred to [out] param. Caller must Release() when done with this.
+ *ppEnum = (ICorProfilerModuleEnum *) pModuleEnum.Extract();
+
+ return S_OK;
+}
+
+HRESULT ProfToEEInterfaceImpl::GetRuntimeInformation(USHORT * pClrInstanceId,
+ COR_PRF_RUNTIME_TYPE * pRuntimeType,
+ USHORT * pMajorVersion,
+ USHORT * pMinorVersion,
+ USHORT * pBuildNumber,
+ USHORT * pQFEVersion,
+ ULONG cchVersionString,
+ ULONG * pcchVersionString,
+ __out_ecount_part_opt(cchVersionString, *pcchVersionString) WCHAR szVersionString[])
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Yay!
+ CANNOT_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetRuntimeInformation.\n"));
+
+ if ((szVersionString != NULL) && (pcchVersionString == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ if (pcchVersionString != NULL)
+ {
+ HRESULT hr = GetCORVersionInternal(szVersionString, (DWORD)cchVersionString, (DWORD *)pcchVersionString);
+ if (FAILED(hr))
+ return hr;
+ }
+
+ if (pClrInstanceId != NULL)
+ *pClrInstanceId = static_cast<USHORT>(GetClrInstanceId());
+
+ if (pRuntimeType != NULL)
+ {
+#ifdef FEATURE_CORECLR
+ *pRuntimeType = COR_PRF_CORE_CLR;
+#else // FEATURE_CORECLR
+ *pRuntimeType = COR_PRF_DESKTOP_CLR;
+#endif // FEATURE_CORECLR
+ }
+
+ if (pMajorVersion != NULL)
+ *pMajorVersion = VER_MAJORVERSION;
+
+ if (pMinorVersion != NULL)
+ *pMinorVersion = VER_MINORVERSION;
+
+ if (pBuildNumber != NULL)
+ *pBuildNumber = VER_PRODUCTBUILD;
+
+ if (pQFEVersion != NULL)
+ *pQFEVersion = VER_PRODUCTBUILD_QFE;
+
+ return S_OK;
+}
+
+
+HRESULT ProfToEEInterfaceImpl::RequestProfilerDetach(DWORD dwExpectedCompletionMilliseconds)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Crst is used in ProfilingAPIDetach::RequestProfilerDetach so GC may be triggered
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_ANY;
+
+ // Yay!
+ EE_THREAD_NOT_REQUIRED;
+
+ // Crst is used in ProfilingAPIDetach::RequestProfilerDetach
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
+ kP2EEAllowableAfterAttach | kP2EETriggers,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: RequestProfilerDetach.\n"));
+
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+ return ProfilingAPIDetach::RequestProfilerDetach(dwExpectedCompletionMilliseconds);
+#else // FEATURE_PROFAPI_ATTACH_DETACH
+ return E_NOTIMPL;
+#endif // FEATURE_PROFAPI_ATTACH_DETACH
+}
+
+typedef struct _COR_PRF_ELT_INFO_INTERNAL
+{
+ // Point to a platform dependent structure ASM helper push on the stack
+ void * platformSpecificHandle;
+
+ // startAddress of COR_PRF_FUNCTION_ARGUMENT_RANGE structure needs to point
+ // TO the argument value, not BE the argument value. So, when the argument
+ // is this, we need to point TO this. Because of the calling sequence change
+ // in ELT3, we need to reserve the pointer here instead of using one of our
+ // stack variables.
+ void * pThis;
+
+ // Reserve space for output parameter COR_PRF_FRAME_INFO of
+ // GetFunctionXXXX3Info functions
+ COR_PRF_FRAME_INFO_INTERNAL frameInfo;
+
+} COR_PRF_ELT_INFO_INTERNAL;
+
+//---------------------------------------------------------------------------------------
+//
+// ProfilingGetFunctionEnter3Info provides frame information and argument infomation of
+// the function ELT callback is inspecting. It is called either by the profiler or the
+// C helper function.
+//
+// Arguments:
+// * functionId - [in] FunctionId of the function being inspected by ELT3
+// * eltInfo - [in] The opaque pointer FunctionEnter3WithInfo callback passed to the profiler
+// * pFrameInfo - [out] Pointer to COR_PRF_FRAME_INFO the profiler later can use to inspect
+// generic types
+// * pcbArgumentInfo - [in, out] Pointer to ULONG that specifies the size of structure
+// pointed by pArgumentInfo
+// * pArgumentInfo - [out] Pointer to COR_PRF_FUNCTION_ARGUMENT_INFO structure the profiler
+// must preserve enough space for the function it is inspecting
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+
+HRESULT ProfilingGetFunctionEnter3Info(FunctionID functionId, // in
+ COR_PRF_ELT_INFO eltInfo, // in
+ COR_PRF_FRAME_INFO * pFrameInfo, // out
+ ULONG * pcbArgumentInfo, // in, out
+ COR_PRF_FUNCTION_ARGUMENT_INFO * pArgumentInfo) // out
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // ProfileArgIterator::ProfileArgIterator may take locks
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ }
+ CONTRACTL_END;
+
+ if ((functionId == NULL) || (eltInfo == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ COR_PRF_ELT_INFO_INTERNAL * pELTInfo = (COR_PRF_ELT_INFO_INTERNAL *)eltInfo;
+ ProfileSetFunctionIDInPlatformSpecificHandle(pELTInfo->platformSpecificHandle, functionId);
+
+ // The loader won't trigger a GC or throw for already loaded argument types.
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+ //
+ // Find the method this is referring to, so we can get the signature
+ //
+ MethodDesc * pMethodDesc = FunctionIdToMethodDesc(functionId);
+ MetaSig metaSig(pMethodDesc);
+
+ NewHolder<ProfileArgIterator> pProfileArgIterator;
+
+ {
+ // Can handle E_OUTOFMEMORY from ProfileArgIterator.
+ FAULT_NOT_FATAL();
+
+ pProfileArgIterator = new (nothrow) ProfileArgIterator(&metaSig, pELTInfo->platformSpecificHandle);
+
+ if (pProfileArgIterator == NULL)
+ {
+ return E_UNEXPECTED;
+ }
+ }
+
+ if (CORProfilerFrameInfoEnabled())
+ {
+ if (pFrameInfo == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ //
+ // Setup the COR_PRF_FRAME_INFO structure first.
+ //
+ COR_PRF_FRAME_INFO_INTERNAL * pCorPrfFrameInfo = &(pELTInfo->frameInfo);
+
+ pCorPrfFrameInfo->size = sizeof(COR_PRF_FRAME_INFO_INTERNAL);
+ pCorPrfFrameInfo->version = COR_PRF_FRAME_INFO_INTERNAL_CURRENT_VERSION;
+ pCorPrfFrameInfo->funcID = functionId;
+ pCorPrfFrameInfo->IP = ProfileGetIPFromPlatformSpecificHandle(pELTInfo->platformSpecificHandle);
+ pCorPrfFrameInfo->extraArg = pProfileArgIterator->GetHiddenArgValue();
+ pCorPrfFrameInfo->thisArg = pProfileArgIterator->GetThis();
+
+ *pFrameInfo = (COR_PRF_FRAME_INFO)pCorPrfFrameInfo;
+ }
+
+ //
+ // Do argument processing if desired.
+ //
+ if (CORProfilerFunctionArgsEnabled())
+ {
+ if (pcbArgumentInfo == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ if ((*pcbArgumentInfo != 0) && (pArgumentInfo == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ ULONG32 count = pProfileArgIterator->GetNumArgs();
+
+ if (metaSig.HasThis())
+ {
+ count++;
+ }
+
+ ULONG ulArgInfoSize = sizeof(COR_PRF_FUNCTION_ARGUMENT_INFO) + (count * sizeof(COR_PRF_FUNCTION_ARGUMENT_RANGE));
+
+ if (*pcbArgumentInfo < ulArgInfoSize)
+ {
+ *pcbArgumentInfo = ulArgInfoSize;
+ return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
+ }
+
+ _ASSERTE(pArgumentInfo != NULL);
+
+ pArgumentInfo->numRanges = count;
+ pArgumentInfo->totalArgumentSize = 0;
+
+ count = 0;
+
+ if (metaSig.HasThis())
+ {
+ pELTInfo->pThis = pProfileArgIterator->GetThis();
+ pArgumentInfo->ranges[count].startAddress = (UINT_PTR) (&(pELTInfo->pThis));
+
+ UINT length = sizeof(pELTInfo->pThis);
+ pArgumentInfo->ranges[count].length = length;
+ pArgumentInfo->totalArgumentSize += length;
+ count++;
+ }
+
+ while (count < pArgumentInfo->numRanges)
+ {
+ pArgumentInfo->ranges[count].startAddress = (UINT_PTR)(pProfileArgIterator->GetNextArgAddr());
+
+ UINT length = pProfileArgIterator->GetArgSize();
+ pArgumentInfo->ranges[count].length = length;
+ pArgumentInfo->totalArgumentSize += length;
+ count++;
+ }
+ }
+
+ return S_OK;
+}
+
+
+
+HRESULT ProfToEEInterfaceImpl::GetFunctionEnter3Info(FunctionID functionId, // in
+ COR_PRF_ELT_INFO eltInfo, // in
+ COR_PRF_FRAME_INFO * pFrameInfo, // out
+ ULONG * pcbArgumentInfo, // in, out
+ COR_PRF_FUNCTION_ARGUMENT_INFO * pArgumentInfo) // out
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // ProfilingGetFunctionEnter3Info may take locks
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetFunctionEnter3Info.\n"));
+
+ _ASSERTE(g_profControlBlock.pProfInterface->GetEnter3WithInfoHook() != NULL);
+
+ if (!CORProfilerELT3SlowPathEnterEnabled())
+ {
+ return CORPROF_E_INCONSISTENT_WITH_FLAGS;
+ }
+
+ return ProfilingGetFunctionEnter3Info(functionId, eltInfo, pFrameInfo, pcbArgumentInfo, pArgumentInfo);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// ProfilingGetFunctionLeave3Info provides frame information and return value infomation
+// of the function ELT callback is inspecting. It is called either by the profiler or the
+// C helper function.
+//
+// Arguments:
+// * functionId - [in] FunctionId of the function being inspected by ELT3
+// * eltInfo - [in] The opaque pointer FunctionLeave3WithInfo callback passed to the profiler
+// * pFrameInfo - [out] Pointer to COR_PRF_FRAME_INFO the profiler later can use to inspect
+// generic types
+// * pRetvalRange - [out] Pointer to COR_PRF_FUNCTION_ARGUMENT_RANGE to store return value
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+
+HRESULT ProfilingGetFunctionLeave3Info(FunctionID functionId, // in
+ COR_PRF_ELT_INFO eltInfo, // in
+ COR_PRF_FRAME_INFO * pFrameInfo, // out
+ COR_PRF_FUNCTION_ARGUMENT_RANGE * pRetvalRange) // out
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // ProfileArgIterator::ProfileArgIterator may take locks
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ if ((pFrameInfo == NULL) || (eltInfo == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ COR_PRF_ELT_INFO_INTERNAL * pELTInfo = (COR_PRF_ELT_INFO_INTERNAL *)eltInfo;
+ ProfileSetFunctionIDInPlatformSpecificHandle(pELTInfo->platformSpecificHandle, functionId);
+
+ // The loader won't trigger a GC or throw for already loaded argument types.
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+ //
+ // Find the method this is referring to, so we can get the signature
+ //
+ MethodDesc * pMethodDesc = FunctionIdToMethodDesc(functionId);
+ MetaSig metaSig(pMethodDesc);
+
+ NewHolder<ProfileArgIterator> pProfileArgIterator;
+
+ {
+ // Can handle E_OUTOFMEMORY from ProfileArgIterator.
+ FAULT_NOT_FATAL();
+
+ pProfileArgIterator = new (nothrow) ProfileArgIterator(&metaSig, pELTInfo->platformSpecificHandle);
+
+ if (pProfileArgIterator == NULL)
+ {
+ return E_UNEXPECTED;
+ }
+ }
+
+ if (CORProfilerFrameInfoEnabled())
+ {
+ if (pFrameInfo == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ COR_PRF_FRAME_INFO_INTERNAL * pCorPrfFrameInfo = &(pELTInfo->frameInfo);
+
+ //
+ // Setup the COR_PRF_FRAME_INFO structure first.
+ //
+ pCorPrfFrameInfo->size = sizeof(COR_PRF_FRAME_INFO_INTERNAL);
+ pCorPrfFrameInfo->version = COR_PRF_FRAME_INFO_INTERNAL_CURRENT_VERSION;
+ pCorPrfFrameInfo->funcID = functionId;
+ pCorPrfFrameInfo->IP = ProfileGetIPFromPlatformSpecificHandle(pELTInfo->platformSpecificHandle);
+
+ // Upon entering Leave hook, the register assigned to store this pointer on function calls may
+ // already be reused and is likely not to contain this pointer.
+ pCorPrfFrameInfo->extraArg = NULL;
+ pCorPrfFrameInfo->thisArg = NULL;
+
+ *pFrameInfo = (COR_PRF_FRAME_INFO)pCorPrfFrameInfo;
+ }
+
+ //
+ // Do argument processing if desired.
+ //
+ if (CORProfilerFunctionReturnValueEnabled())
+ {
+ if (pRetvalRange == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ if (!metaSig.IsReturnTypeVoid())
+ {
+ pRetvalRange->length = metaSig.GetReturnTypeSize();
+ pRetvalRange->startAddress = (UINT_PTR)pProfileArgIterator->GetReturnBufferAddr();
+ }
+ else
+ {
+ pRetvalRange->length = 0;
+ pRetvalRange->startAddress = 0;
+ }
+ }
+
+ return S_OK;
+}
+
+
+HRESULT ProfToEEInterfaceImpl::GetFunctionLeave3Info(FunctionID functionId, // in
+ COR_PRF_ELT_INFO eltInfo, // in
+ COR_PRF_FRAME_INFO * pFrameInfo, // out
+ COR_PRF_FUNCTION_ARGUMENT_RANGE * pRetvalRange) // out
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // ProfilingGetFunctionLeave3Info may take locks
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetFunctionLeave3Info.\n"));
+
+ _ASSERTE(g_profControlBlock.pProfInterface->GetLeave3WithInfoHook() != NULL);
+
+ if (!CORProfilerELT3SlowPathLeaveEnabled())
+ {
+ return CORPROF_E_INCONSISTENT_WITH_FLAGS;
+ }
+
+ return ProfilingGetFunctionLeave3Info(functionId, eltInfo, pFrameInfo, pRetvalRange);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// ProfilingGetFunctionTailcall3Info provides frame information of the function ELT callback
+// is inspecting. It is called either by the profiler or the C helper function.
+//
+// Arguments:
+// * functionId - [in] FunctionId of the function being inspected by ELT3
+// * eltInfo - [in] The opaque pointer FunctionTailcall3WithInfo callback passed to the
+// profiler
+// * pFrameInfo - [out] Pointer to COR_PRF_FRAME_INFO the profiler later can use to inspect
+// generic types
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+
+HRESULT ProfilingGetFunctionTailcall3Info(FunctionID functionId, // in
+ COR_PRF_ELT_INFO eltInfo, // in
+ COR_PRF_FRAME_INFO * pFrameInfo) // out
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // ProfileArgIterator::ProfileArgIterator may take locks
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ }
+ CONTRACTL_END;
+
+ if ((functionId == NULL) || (eltInfo == NULL) || (pFrameInfo == NULL))
+ {
+ return E_INVALIDARG;
+ }
+
+ COR_PRF_ELT_INFO_INTERNAL * pELTInfo = (COR_PRF_ELT_INFO_INTERNAL *)eltInfo;
+ ProfileSetFunctionIDInPlatformSpecificHandle(pELTInfo->platformSpecificHandle, functionId);
+
+ // The loader won't trigger a GC or throw for already loaded argument types.
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+ //
+ // Find the method this is referring to, so we can get the signature
+ //
+ MethodDesc * pMethodDesc = FunctionIdToMethodDesc(functionId);
+ MetaSig metaSig(pMethodDesc);
+
+ NewHolder<ProfileArgIterator> pProfileArgIterator;
+
+ {
+ // Can handle E_OUTOFMEMORY from ProfileArgIterator.
+ FAULT_NOT_FATAL();
+
+ pProfileArgIterator = new (nothrow) ProfileArgIterator(&metaSig, pELTInfo->platformSpecificHandle);
+
+ if (pProfileArgIterator == NULL)
+ {
+ return E_UNEXPECTED;
+ }
+ }
+
+ COR_PRF_FRAME_INFO_INTERNAL * pCorPrfFrameInfo = &(pELTInfo->frameInfo);
+
+ //
+ // Setup the COR_PRF_FRAME_INFO structure first.
+ //
+ pCorPrfFrameInfo->size = sizeof(COR_PRF_FRAME_INFO_INTERNAL);
+ pCorPrfFrameInfo->version = COR_PRF_FRAME_INFO_INTERNAL_CURRENT_VERSION;
+ pCorPrfFrameInfo->funcID = functionId;
+ pCorPrfFrameInfo->IP = ProfileGetIPFromPlatformSpecificHandle(pELTInfo->platformSpecificHandle);
+
+ // Tailcall is designed to report the caller, not the callee. But the taillcall hook is invoked
+ // with registers containing parameters passed to the callee before calling into the callee.
+ // This pointer we get here is for the callee. Because of the constraints imposed on tailcall
+ // optimization, this pointer passed to the callee accidentally happens to be the same this pointer
+ // passed to the caller.
+ //
+ // It is a fragile coincidence we should not depend on because JIT is free to change the
+ // implementation details in the future.
+ pCorPrfFrameInfo->extraArg = NULL;
+ pCorPrfFrameInfo->thisArg = NULL;
+
+ *pFrameInfo = (COR_PRF_FRAME_INFO)pCorPrfFrameInfo;
+
+ return S_OK;
+}
+
+
+HRESULT ProfToEEInterfaceImpl::GetFunctionTailcall3Info(FunctionID functionId, // in
+ COR_PRF_ELT_INFO eltInfo, // in
+ COR_PRF_FRAME_INFO * pFrameInfo) // out
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // ProfilingGetFunctionTailcall3Info may take locks
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetFunctionTailcall3Info.\n"));
+
+ _ASSERTE(g_profControlBlock.pProfInterface->GetTailcall3WithInfoHook() != NULL);
+
+ if (!CORProfilerELT3SlowPathTailcallEnabled())
+ {
+ return CORPROF_E_INCONSISTENT_WITH_FLAGS;
+ }
+
+ return ProfilingGetFunctionTailcall3Info(functionId, eltInfo, pFrameInfo);
+}
+
+HRESULT ProfToEEInterfaceImpl::EnumThreads(
+ /* out */ ICorProfilerThreadEnum ** ppEnum)
+{
+
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // Need to acquire the thread store lock
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ PRECONDITION(CheckPointer(ppEnum, NULL_OK));
+
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
+ kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO10,
+ "**PROF: EnumThreads.\n"));
+
+ HRESULT hr;
+
+ if (ppEnum == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ *ppEnum = NULL;
+
+ NewHolder<ProfilerThreadEnum> pThreadEnum(new (nothrow) ProfilerThreadEnum);
+ if (pThreadEnum == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ hr = pThreadEnum->Init();
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ // Ownership transferred to [out] param. Caller must Release() when done with this.
+ *ppEnum = (ICorProfilerThreadEnum *) pThreadEnum.Extract();
+
+ return S_OK;
+}
+
+// This function needs to be called on any thread before making any ICorProfilerInfo* calls and must be
+// made before any thread is suspended by this profiler.
+// As you might have already figured out, this is done to avoid deadlocks situation when
+// the suspended thread holds on the loader lock / heap lock while the current thread is trying to obtain
+// the same lock.
+HRESULT ProfToEEInterfaceImpl::InitializeCurrentThread()
+{
+
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_NOTRIGGER;
+
+ // Yay!
+ MODE_ANY;
+
+ // May take thread store lock and OS APIs may also take locks
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
+ kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO10,
+ "**PROF: InitializeCurrentThread.\n"));
+
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ CExecutionEngine::SetupTLSForThread(GetThread());
+ }
+ EX_CATCH_HRESULT(hr);
+
+ if (FAILED(hr))
+ return hr;
+
+ return S_OK;
+}
+
+struct InternalProfilerModuleEnum : public ProfilerModuleEnum
+{
+ CDynArray<ModuleID> *GetRawElementsArray()
+ {
+ return &m_elements;
+ }
+};
+
+HRESULT ProfToEEInterfaceImpl::EnumNgenModuleMethodsInliningThisMethod(
+ ModuleID inlinersModuleId,
+ ModuleID inlineeModuleId,
+ mdMethodDef inlineeMethodId,
+ BOOL *incompleteData,
+ ICorProfilerMethodEnum** ppEnum)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_NOT_MAINLINE;
+ CAN_TAKE_LOCK;
+ PRECONDITION(CheckPointer(ppEnum));
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EETriggers, (LF_CORPROF, LL_INFO1000, "**PROF: EnumNgenModuleMethodsInliningThisMethod.\n"));
+
+ typedef DPTR(class MethodDesc) PTR_MethodDesc;
+
+ if (ppEnum == NULL)
+ {
+ return E_INVALIDARG;
+ }
+ *ppEnum = NULL;
+ HRESULT hr = S_OK;
+
+ Module *inlineeOwnerModule = reinterpret_cast<Module *>(inlineeModuleId);
+ if (inlineeOwnerModule == NULL)
+ {
+ return E_INVALIDARG;
+ }
+ if (inlineeOwnerModule->IsBeingUnloaded())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ Module *inlinersModule = reinterpret_cast<Module *>(inlinersModuleId);
+ if (inlinersModule == NULL)
+ {
+ return E_INVALIDARG;
+ }
+ if(inlinersModule->IsBeingUnloaded())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ PersistentInlineTrackingMap *inliningMap = inlinersModule->GetNgenInlineTrackingMap();
+ if (inliningMap == NULL)
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ CDynArray<COR_PRF_METHOD> results;
+ const COUNT_T staticBufferSize = 10;
+ MethodInModule staticBuffer[staticBufferSize];
+ NewArrayHolder<MethodInModule> dynamicBuffer;
+ MethodInModule *methodsBuffer = staticBuffer;
+ EX_TRY
+ {
+ // Trying to use static buffer
+ COUNT_T methodsAvailable = inliningMap->GetInliners(inlineeOwnerModule, inlineeMethodId, staticBufferSize, staticBuffer, incompleteData);
+
+ // If static buffer is not enough, allocate an array.
+ if (methodsAvailable > staticBufferSize)
+ {
+ DWORD dynamicBufferSize = methodsAvailable;
+ dynamicBuffer = methodsBuffer = new MethodInModule[dynamicBufferSize];
+ methodsAvailable = inliningMap->GetInliners(inlineeOwnerModule, inlineeMethodId, dynamicBufferSize, dynamicBuffer, incompleteData);
+ if (methodsAvailable > dynamicBufferSize)
+ {
+ _ASSERTE(!"Ngen image inlining info changed, this shouldn't be possible.");
+ methodsAvailable = dynamicBufferSize;
+ }
+ }
+
+ //Go through all inliners found in the inlinersModule and prepare them to export via results.
+ results.AllocateBlockThrowing(methodsAvailable);
+ for (COUNT_T j = 0; j < methodsAvailable; j++)
+ {
+ COR_PRF_METHOD *newPrfMethod = &results[j];
+ newPrfMethod->moduleId = reinterpret_cast<ModuleID>(methodsBuffer[j].m_module);
+ newPrfMethod->methodId = methodsBuffer[j].m_methodDef;
+ }
+ *ppEnum = new ProfilerMethodEnum(&results);
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+}
+
+
+
+//---------------------------------------------------------------------------------------
+//
+// Simple wrapper around EEToProfInterfaceImpl::ManagedToUnmanagedTransition. This
+// can be called by C++ code and directly by generated stubs.
+//
+// Arguments:
+// pMD - MethodDesc for the managed function involved in the transition
+// reason - Passed on to profiler to indicate why the transition is occurring
+//
+
+void __stdcall ProfilerManagedToUnmanagedTransitionMD(MethodDesc *pMD,
+ COR_PRF_TRANSITION_REASON reason)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // This function is called within the runtime, not directly from managed code.
+ // Also, the only case MD is NULL is the calli pinvoke case, and we still
+ // want to notify the profiler in that case.
+
+ // Do not notify the profiler about QCalls
+ if (pMD == NULL || !pMD->IsQCall())
+ {
+ BEGIN_PIN_PROFILER(CORProfilerPresent());
+ g_profControlBlock.pProfInterface->ManagedToUnmanagedTransition(MethodDescToFunctionID(pMD),
+ reason);
+ END_PIN_PROFILER();
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Simple wrapper around EEToProfInterfaceImpl::UnmanagedToManagedTransition. This
+// can be called by C++ code and directly by generated stubs.
+//
+// Arguments:
+// pMD - MethodDesc for the managed function involved in the transition
+// reason - Passed on to profiler to indicate why the transition is occurring
+//
+
+void __stdcall ProfilerUnmanagedToManagedTransitionMD(MethodDesc *pMD,
+ COR_PRF_TRANSITION_REASON reason)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // This function is called within the runtime, not directly from managed code.
+ // Also, the only case MD is NULL is the calli pinvoke case, and we still
+ // want to notify the profiler in that case.
+
+ // Do not notify the profiler about QCalls
+ if (pMD == NULL || !pMD->IsQCall())
+ {
+ BEGIN_PIN_PROFILER(CORProfilerPresent());
+ g_profControlBlock.pProfInterface->UnmanagedToManagedTransition(MethodDescToFunctionID(pMD),
+ reason);
+ END_PIN_PROFILER();
+ }
+}
+
+
+
+#endif // PROFILING_SUPPORTED
+
+
+FCIMPL0(FC_BOOL_RET, ProfilingFCallHelper::FC_TrackRemoting)
+{
+ FCALL_CONTRACT;
+
+#ifdef PROFILING_SUPPORTED
+ FC_RETURN_BOOL(CORProfilerTrackRemoting());
+#else // !PROFILING_SUPPORTED
+ FC_RETURN_BOOL(FALSE);
+#endif // !PROFILING_SUPPORTED
+}
+FCIMPLEND
+
+FCIMPL0(FC_BOOL_RET, ProfilingFCallHelper::FC_TrackRemotingCookie)
+{
+ FCALL_CONTRACT;
+
+#ifdef PROFILING_SUPPORTED
+ FC_RETURN_BOOL(CORProfilerTrackRemotingCookie());
+#else // !PROFILING_SUPPORTED
+ FC_RETURN_BOOL(FALSE);
+#endif // !PROFILING_SUPPORTED
+}
+FCIMPLEND
+
+FCIMPL0(FC_BOOL_RET, ProfilingFCallHelper::FC_TrackRemotingAsync)
+{
+ FCALL_CONTRACT;
+
+#ifdef PROFILING_SUPPORTED
+ FC_RETURN_BOOL(CORProfilerTrackRemotingAsync());
+#else // !PROFILING_SUPPORTED
+ FC_RETURN_BOOL(FALSE);
+#endif // !PROFILING_SUPPORTED
+}
+FCIMPLEND
+
+FCIMPL2(void, ProfilingFCallHelper::FC_RemotingClientSendingMessage, GUID *pId, CLR_BOOL fIsAsync)
+{
+ FCALL_CONTRACT;
+
+#ifdef PROFILING_SUPPORTED
+ // Need to erect a GC frame so that GCs can occur without a problem
+ // within the profiler code.
+
+ // Note that we don't need to worry about pId moving around since
+ // it is a value class declared on the stack and so GC doesn't
+ // know about it.
+
+ _ASSERTE (!GCHeap::GetGCHeap()->IsHeapPointer(pId)); // should be on the stack, not in the heap
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL();
+
+ {
+ BEGIN_PIN_PROFILER(CORProfilerPresent());
+ GCX_PREEMP();
+ if (CORProfilerTrackRemotingCookie())
+ {
+ g_profControlBlock.pProfInterface->GetGUID(pId);
+ _ASSERTE(pId->Data1);
+
+ g_profControlBlock.pProfInterface->RemotingClientSendingMessage(pId, fIsAsync);
+ }
+ else
+ {
+ g_profControlBlock.pProfInterface->RemotingClientSendingMessage(NULL, fIsAsync);
+ }
+ END_PIN_PROFILER();
+ }
+ HELPER_METHOD_FRAME_END_POLL();
+#endif // PROFILING_SUPPORTED
+}
+FCIMPLEND
+
+
+FCIMPL2_VI(void, ProfilingFCallHelper::FC_RemotingClientReceivingReply, GUID id, CLR_BOOL fIsAsync)
+{
+ FCALL_CONTRACT;
+
+#ifdef PROFILING_SUPPORTED
+ // Need to erect a GC frame so that GCs can occur without a problem
+ // within the profiler code.
+
+ // Note that we don't need to worry about pId moving around since
+ // it is a value class declared on the stack and so GC doesn't
+ // know about it.
+
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL();
+
+
+ {
+ BEGIN_PIN_PROFILER(CORProfilerPresent());
+ GCX_PREEMP();
+ if (CORProfilerTrackRemotingCookie())
+ {
+ g_profControlBlock.pProfInterface->RemotingClientReceivingReply(&id, fIsAsync);
+ }
+ else
+ {
+ g_profControlBlock.pProfInterface->RemotingClientReceivingReply(NULL, fIsAsync);
+ }
+ END_PIN_PROFILER();
+ }
+
+ HELPER_METHOD_FRAME_END_POLL();
+#endif // PROFILING_SUPPORTED
+}
+FCIMPLEND
+
+
+FCIMPL2_VI(void, ProfilingFCallHelper::FC_RemotingServerReceivingMessage, GUID id, CLR_BOOL fIsAsync)
+{
+ FCALL_CONTRACT;
+
+#ifdef PROFILING_SUPPORTED
+ // Need to erect a GC frame so that GCs can occur without a problem
+ // within the profiler code.
+
+ // Note that we don't need to worry about pId moving around since
+ // it is a value class declared on the stack and so GC doesn't
+ // know about it.
+
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL();
+
+ {
+ BEGIN_PIN_PROFILER(CORProfilerPresent());
+ GCX_PREEMP();
+ if (CORProfilerTrackRemotingCookie())
+ {
+ g_profControlBlock.pProfInterface->RemotingServerReceivingMessage(&id, fIsAsync);
+ }
+ else
+ {
+ g_profControlBlock.pProfInterface->RemotingServerReceivingMessage(NULL, fIsAsync);
+ }
+ END_PIN_PROFILER();
+ }
+
+ HELPER_METHOD_FRAME_END_POLL();
+#endif // PROFILING_SUPPORTED
+}
+FCIMPLEND
+
+FCIMPL2(void, ProfilingFCallHelper::FC_RemotingServerSendingReply, GUID *pId, CLR_BOOL fIsAsync)
+{
+ FCALL_CONTRACT;
+
+#ifdef PROFILING_SUPPORTED
+ // Need to erect a GC frame so that GCs can occur without a problem
+ // within the profiler code.
+
+ // Note that we don't need to worry about pId moving around since
+ // it is a value class declared on the stack and so GC doesn't
+ // know about it.
+
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL();
+
+ {
+ BEGIN_PIN_PROFILER(CORProfilerPresent());
+ GCX_PREEMP();
+ if (CORProfilerTrackRemotingCookie())
+ {
+ g_profControlBlock.pProfInterface->GetGUID(pId);
+ _ASSERTE(pId->Data1);
+
+ g_profControlBlock.pProfInterface->RemotingServerSendingReply(pId, fIsAsync);
+ }
+ else
+ {
+ g_profControlBlock.pProfInterface->RemotingServerSendingReply(NULL, fIsAsync);
+ }
+ END_PIN_PROFILER();
+ }
+
+ HELPER_METHOD_FRAME_END_POLL();
+#endif // PROFILING_SUPPORTED
+}
+FCIMPLEND
+
+//
+// Define wrapper functions for rotor.
+//
+// NOTE: These do not currently implement correctly passing the platform
+// specific handle. The Rotor people need to implement these correctly if
+// they care to support getting arguments, return value, and generic information.
+//
+#if !defined(_TARGET_X86_) && !defined(_WIN64) && !defined(_TARGET_ARM_) && defined(PROFILING_SUPPORTED)
+
+FCIMPL1(EXTERN_C void, ProfileEnterWrapper, FunctionIDOrClientID functionIDOrClientID)
+{
+ FCALL_CONTRACT;
+
+ //
+ // Create some empty buffer space for the platformSpecificHandle
+ //
+ BYTE buffer[60] = {0};
+
+ PORTABILITY_ASSERT("ProfileEnterWrapper not implemented");
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+ ProfileEnter(functionIDOrClientID.clientID, &buffer);
+}
+FCIMPLEND
+
+FCIMPL1(EXTERN_C void, ProfileLeaveWrapper, FunctionIDOrClientID functionIDOrClientID)
+{
+ FCALL_CONTRACT;
+
+ //
+ // Create some empty buffer space for the platformSpecificHandle
+ //
+ BYTE buffer[60] = {0};
+
+ PORTABILITY_ASSERT("ProfileLeaveWrapper not implemented");
+
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+ ProfileLeave(functionIDOrClientID.clientID, &buffer);
+}
+FCIMPLEND
+
+FCIMPL1(EXTERN_C void, ProfileTailcallWrapper, FunctionIDOrClientID functionIDOrClientID)
+{
+ FCALL_CONTRACT;
+
+ //
+ // Create some empty buffer space for the platformSpecificHandle
+ //
+ BYTE buffer[60] = {0};
+
+ PORTABILITY_ASSERT("ProfileTailcallWrapper not implemented");
+
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+ ProfileTailcall(functionIDOrClientID.clientID, &buffer);
+}
+FCIMPLEND
+
+#endif
+
+
+//*******************************************************************************************
+// These do a lot of work for us, setting up Frames, gathering arg info and resolving generics.
+ //*******************************************************************************************
+
+HCIMPL2(EXTERN_C void, ProfileEnter, UINT_PTR clientData, void * platformSpecificHandle)
+{
+ FCALL_CONTRACT;
+
+#ifdef PROFILING_SUPPORTED
+
+#ifdef PROF_TEST_ONLY_FORCE_ELT
+ // If this test-only flag is set, it's possible we might not have a profiler
+ // attached, or might not have any of the hooks set. See
+ // code:ProfControlBlock#TestOnlyELT
+ if (g_profControlBlock.fTestOnlyForceEnterLeave)
+ {
+ if ((g_profControlBlock.pProfInterface.Load() == NULL) ||
+ (
+ (g_profControlBlock.pProfInterface->GetEnterHook() == NULL) &&
+ (g_profControlBlock.pProfInterface->GetEnter2Hook() == NULL) &&
+ (g_profControlBlock.pProfInterface->GetEnter3Hook() == NULL) &&
+ (g_profControlBlock.pProfInterface->GetEnter3WithInfoHook() == NULL)
+ )
+ )
+ {
+ return;
+ }
+ }
+#endif // PROF_TEST_ONLY_FORCE_ELT
+
+ // ELT3 Fast-Path hooks should be NULL when ELT intermediary is used.
+ _ASSERTE(g_profControlBlock.pProfInterface->GetEnter3Hook() == NULL);
+ _ASSERTE(GetThread()->PreemptiveGCDisabled());
+ _ASSERTE(platformSpecificHandle != NULL);
+
+ // Set up a frame
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
+
+ // Our contract is FCALL_CONTRACT, which is considered triggers if you set up a
+ // frame, like we're about to do.
+ SetCallbackStateFlagsHolder csf(
+ COR_PRF_CALLBACKSTATE_INCALLBACK | COR_PRF_CALLBACKSTATE_IN_TRIGGERS_SCOPE);
+
+ COR_PRF_ELT_INFO_INTERNAL eltInfo;
+ eltInfo.platformSpecificHandle = platformSpecificHandle;
+
+ //
+ // CLR v4 Slow-Path ELT
+ //
+ if (g_profControlBlock.pProfInterface->GetEnter3WithInfoHook() != NULL)
+ {
+ FunctionIDOrClientID functionIDOrClientID;
+ functionIDOrClientID.clientID = clientData;
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+ g_profControlBlock.pProfInterface->GetEnter3WithInfoHook()(
+ functionIDOrClientID,
+ (COR_PRF_ELT_INFO)&eltInfo);
+ goto LExit;
+ }
+
+ if (g_profControlBlock.pProfInterface->GetEnter2Hook() != NULL)
+ {
+ // We have run out of heap memory, so the content of the mapping table becomes stale.
+ // All Whidbey ETL hooks must be turned off.
+ if (!g_profControlBlock.pProfInterface->IsClientIDToFunctionIDMappingEnabled())
+ {
+ goto LExit;
+ }
+
+ // If ELT2 is in use, FunctionID will be returned to the JIT to be embedded into the ELT3 probes
+ // instead of using clientID because the profiler may map several functionIDs to a clientID to
+ // do things like code coverage analysis. FunctionID to clientID has the one-on-one relationship,
+ // while the reverse may not have this one-on-one mapping. Therefore, FunctionID is used as the
+ // key to retrieve the corresponding clientID from the internal FunctionID hash table.
+ FunctionID functionId = clientData;
+ _ASSERTE(functionId != NULL);
+ clientData = g_profControlBlock.pProfInterface->LookupClientIDFromCache(functionId);
+
+ //
+ // Whidbey Fast-Path ELT
+ //
+ if (CORProfilerELT2FastPathEnterEnabled())
+ {
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+ g_profControlBlock.pProfInterface->GetEnter2Hook()(
+ functionId,
+ clientData,
+ NULL,
+ NULL);
+ goto LExit;
+ }
+
+ //
+ // Whidbey Slow-Path ELT
+ //
+ ProfileSetFunctionIDInPlatformSpecificHandle(platformSpecificHandle, functionId);
+
+ COR_PRF_FRAME_INFO frameInfo = NULL;
+ COR_PRF_FUNCTION_ARGUMENT_INFO * pArgumentInfo = NULL;
+ ULONG ulArgInfoSize = 0;
+
+ if (CORProfilerFunctionArgsEnabled())
+ {
+ // The loader won't trigger a GC or throw for already loaded argument types.
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+ //
+ // Find the method this is referring to, so we can get the signature
+ //
+ MethodDesc * pMethodDesc = FunctionIdToMethodDesc(functionId);
+ MetaSig metaSig(pMethodDesc);
+
+ NewHolder<ProfileArgIterator> pProfileArgIterator;
+
+ {
+ // Can handle E_OUTOFMEMORY from ProfileArgIterator.
+ FAULT_NOT_FATAL();
+
+ pProfileArgIterator = new (nothrow) ProfileArgIterator(&metaSig, platformSpecificHandle);
+
+ if (pProfileArgIterator == NULL)
+ {
+ goto LExit;
+ }
+ }
+
+ ULONG32 count = pProfileArgIterator->GetNumArgs();
+
+ if (metaSig.HasThis())
+ {
+ count++;
+ }
+
+ ulArgInfoSize = sizeof(COR_PRF_FUNCTION_ARGUMENT_INFO) + count * sizeof(COR_PRF_FUNCTION_ARGUMENT_RANGE);
+ pArgumentInfo = (COR_PRF_FUNCTION_ARGUMENT_INFO *)_alloca(ulArgInfoSize);
+ }
+
+ HRESULT hr = ProfilingGetFunctionEnter3Info(functionId, (COR_PRF_ELT_INFO)&eltInfo, &frameInfo, &ulArgInfoSize, pArgumentInfo);
+
+ _ASSERTE(hr == S_OK);
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+ g_profControlBlock.pProfInterface->GetEnter2Hook()(functionId, clientData, frameInfo, pArgumentInfo);
+
+ goto LExit;
+ }
+
+
+ // We will not be here unless the jit'd or ngen'd function we're about to enter
+ // was backpatched with this wrapper around the profiler's hook, and that
+ // wouldn't have happened unless the profiler supplied us with a hook
+ // in the first place. (Note that SetEnterLeaveFunctionHooks* will return
+ // an error unless it's called in the profiler's Initialize(), so a profiler can't change
+ // its mind about where the hooks are.)
+ _ASSERTE(g_profControlBlock.pProfInterface->GetEnterHook() != NULL);
+
+ // Note that we cannot assert CORProfilerTrackEnterLeave() (i.e., profiler flag
+ // COR_PRF_MONITOR_ENTERLEAVE), because the profiler may decide whether
+ // to enable the jitter to add enter/leave callouts independently of whether
+ // the profiler actually has enter/leave hooks. (If the profiler has no such hooks,
+ // the callouts quickly return and do nothing.)
+
+ //
+ // Everett ELT
+ //
+ {
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+ g_profControlBlock.pProfInterface->GetEnterHook()((FunctionID)clientData);
+ }
+
+LExit:
+ ;
+
+ HELPER_METHOD_FRAME_END(); // Un-link the frame
+
+#endif // PROFILING_SUPPORTED
+}
+HCIMPLEND
+
+HCIMPL2(EXTERN_C void, ProfileLeave, UINT_PTR clientData, void * platformSpecificHandle)
+{
+ FCALL_CONTRACT;
+
+ FC_GC_POLL_NOT_NEEDED(); // we pulse GC mode, so we are doing a poll
+
+#ifdef PROFILING_SUPPORTED
+
+#ifdef PROF_TEST_ONLY_FORCE_ELT
+ // If this test-only flag is set, it's possible we might not have a profiler
+ // attached, or might not have any of the hooks set. See
+ // code:ProfControlBlock#TestOnlyELT
+ if (g_profControlBlock.fTestOnlyForceEnterLeave)
+ {
+ if ((g_profControlBlock.pProfInterface.Load() == NULL) ||
+ (
+ (g_profControlBlock.pProfInterface->GetLeaveHook() == NULL) &&
+ (g_profControlBlock.pProfInterface->GetLeave2Hook() == NULL) &&
+ (g_profControlBlock.pProfInterface->GetLeave3Hook() == NULL) &&
+ (g_profControlBlock.pProfInterface->GetLeave3WithInfoHook() == NULL)
+ )
+ )
+ {
+ return;
+ }
+ }
+#endif // PROF_TEST_ONLY_FORCE_ELT
+
+ // ELT3 Fast-Path hooks should be NULL when ELT intermediary is used.
+ _ASSERTE(g_profControlBlock.pProfInterface->GetLeave3Hook() == NULL);
+ _ASSERTE(GetThread()->PreemptiveGCDisabled());
+ _ASSERTE(platformSpecificHandle != NULL);
+
+ // Set up a frame
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
+
+ // Our contract is FCALL_CONTRACT, which is considered triggers if you set up a
+ // frame, like we're about to do.
+ SetCallbackStateFlagsHolder csf(
+ COR_PRF_CALLBACKSTATE_INCALLBACK | COR_PRF_CALLBACKSTATE_IN_TRIGGERS_SCOPE);
+
+ COR_PRF_ELT_INFO_INTERNAL eltInfo;
+ eltInfo.platformSpecificHandle = platformSpecificHandle;
+
+ //
+ // CLR v4 Slow-Path ELT
+ //
+ if (g_profControlBlock.pProfInterface->GetLeave3WithInfoHook() != NULL)
+ {
+ FunctionIDOrClientID functionIDOrClientID;
+ functionIDOrClientID.clientID = clientData;
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+ g_profControlBlock.pProfInterface->GetLeave3WithInfoHook()(
+ functionIDOrClientID,
+ (COR_PRF_ELT_INFO)&eltInfo);
+ goto LExit;
+ }
+
+ if (g_profControlBlock.pProfInterface->GetLeave2Hook() != NULL)
+ {
+ // We have run out of heap memory, so the content of the mapping table becomes stale.
+ // All Whidbey ETL hooks must be turned off.
+ if (!g_profControlBlock.pProfInterface->IsClientIDToFunctionIDMappingEnabled())
+ {
+ goto LExit;
+ }
+
+ // If ELT2 is in use, FunctionID will be returned to the JIT to be embedded into the ELT3 probes
+ // instead of using clientID because the profiler may map several functionIDs to a clientID to
+ // do things like code coverage analysis. FunctionID to clientID has the one-on-one relationship,
+ // while the reverse may not have this one-on-one mapping. Therefore, FunctionID is used as the
+ // key to retrieve the corresponding clientID from the internal FunctionID hash table.
+ FunctionID functionId = clientData;
+ _ASSERTE(functionId != NULL);
+ clientData = g_profControlBlock.pProfInterface->LookupClientIDFromCache(functionId);
+
+ //
+ // Whidbey Fast-Path ELT
+ //
+ if (CORProfilerELT2FastPathLeaveEnabled())
+ {
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+ g_profControlBlock.pProfInterface->GetLeave2Hook()(
+ functionId,
+ clientData,
+ NULL,
+ NULL);
+ goto LExit;
+ }
+
+ //
+ // Whidbey Slow-Path ELT
+ //
+ COR_PRF_FRAME_INFO frameInfo = NULL;
+ COR_PRF_FUNCTION_ARGUMENT_RANGE argumentRange;
+
+ HRESULT hr = ProfilingGetFunctionLeave3Info(functionId, (COR_PRF_ELT_INFO)&eltInfo, &frameInfo, &argumentRange);
+ _ASSERTE(hr == S_OK);
+
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+ g_profControlBlock.pProfInterface->GetLeave2Hook()(functionId, clientData, frameInfo, &argumentRange);
+ goto LExit;
+ }
+
+ // We will not be here unless the jit'd or ngen'd function we're about to leave
+ // was backpatched with this wrapper around the profiler's hook, and that
+ // wouldn't have happened unless the profiler supplied us with a hook
+ // in the first place. (Note that SetEnterLeaveFunctionHooks* will return
+ // an error unless it's called in the profiler's Initialize(), so a profiler can't change
+ // its mind about where the hooks are.)
+ _ASSERTE(g_profControlBlock.pProfInterface->GetLeaveHook() != NULL);
+
+ // Note that we cannot assert CORProfilerTrackEnterLeave() (i.e., profiler flag
+ // COR_PRF_MONITOR_ENTERLEAVE), because the profiler may decide whether
+ // to enable the jitter to add enter/leave callouts independently of whether
+ // the profiler actually has enter/leave hooks. (If the profiler has no such hooks,
+ // the callouts quickly return and do nothing.)
+
+ //
+ // Everett ELT
+ //
+ {
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+ g_profControlBlock.pProfInterface->GetLeaveHook()((FunctionID)clientData);
+ }
+
+LExit:
+
+ ;
+
+ HELPER_METHOD_FRAME_END(); // Un-link the frame
+
+#endif // PROFILING_SUPPORTED
+}
+HCIMPLEND
+
+HCIMPL2(EXTERN_C void, ProfileTailcall, UINT_PTR clientData, void * platformSpecificHandle)
+{
+ FCALL_CONTRACT;
+
+ FC_GC_POLL_NOT_NEEDED(); // we pulse GC mode, so we are doing a poll
+
+#ifdef PROFILING_SUPPORTED
+
+#ifdef PROF_TEST_ONLY_FORCE_ELT
+ // If this test-only flag is set, it's possible we might not have a profiler
+ // attached, or might not have any of the hooks set. See
+ // code:ProfControlBlock#TestOnlyELT
+ if (g_profControlBlock.fTestOnlyForceEnterLeave)
+ {
+ if ((g_profControlBlock.pProfInterface.Load() == NULL) ||
+ (
+ (g_profControlBlock.pProfInterface->GetTailcallHook() == NULL) &&
+ (g_profControlBlock.pProfInterface->GetTailcall2Hook() == NULL) &&
+ (g_profControlBlock.pProfInterface->GetTailcall3Hook() == NULL) &&
+ (g_profControlBlock.pProfInterface->GetTailcall3WithInfoHook() == NULL)
+ )
+ )
+ {
+ return;
+ }
+ }
+#endif // PROF_TEST_ONLY_FORCE_ELT
+
+ // ELT3 fast-path hooks should be NULL when ELT intermediary is used.
+ _ASSERTE(g_profControlBlock.pProfInterface->GetTailcall3Hook() == NULL);
+ _ASSERTE(GetThread()->PreemptiveGCDisabled());
+ _ASSERTE(platformSpecificHandle != NULL);
+
+ // Set up a frame
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
+
+ // Our contract is FCALL_CONTRACT, which is considered triggers if you set up a
+ // frame, like we're about to do.
+ SetCallbackStateFlagsHolder csf(
+ COR_PRF_CALLBACKSTATE_INCALLBACK | COR_PRF_CALLBACKSTATE_IN_TRIGGERS_SCOPE);
+
+ COR_PRF_ELT_INFO_INTERNAL eltInfo;
+ eltInfo.platformSpecificHandle = platformSpecificHandle;
+
+ //
+ // CLR v4 Slow-Path ELT
+ //
+ if (g_profControlBlock.pProfInterface->GetTailcall3WithInfoHook() != NULL)
+ {
+ FunctionIDOrClientID functionIDOrClientID;
+ functionIDOrClientID.clientID = clientData;
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+ g_profControlBlock.pProfInterface->GetTailcall3WithInfoHook()(
+ functionIDOrClientID,
+ (COR_PRF_ELT_INFO)&eltInfo);
+ goto LExit;
+ }
+
+ if (g_profControlBlock.pProfInterface->GetTailcall2Hook() != NULL)
+ {
+ // We have run out of heap memory, so the content of the mapping table becomes stale.
+ // All Whidbey ETL hooks must be turned off.
+ if (!g_profControlBlock.pProfInterface->IsClientIDToFunctionIDMappingEnabled())
+ {
+ goto LExit;
+ }
+
+ // If ELT2 is in use, FunctionID will be returned to the JIT to be embedded into the ELT3 probes
+ // instead of using clientID because the profiler may map several functionIDs to a clientID to
+ // do things like code coverage analysis. FunctionID to clientID has the one-on-one relationship,
+ // while the reverse may not have this one-on-one mapping. Therefore, FunctionID is used as the
+ // key to retrieve the corresponding clientID from the internal FunctionID hash table.
+ FunctionID functionId = clientData;
+ _ASSERTE(functionId != NULL);
+ clientData = g_profControlBlock.pProfInterface->LookupClientIDFromCache(functionId);
+
+ //
+ // Whidbey Fast-Path ELT
+ //
+ if (CORProfilerELT2FastPathTailcallEnabled())
+ {
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+ g_profControlBlock.pProfInterface->GetTailcall2Hook()(
+ functionId,
+ clientData,
+ NULL);
+ goto LExit;
+ }
+
+ //
+ // Whidbey Slow-Path ELT
+ //
+ COR_PRF_FRAME_INFO frameInfo = NULL;
+
+ HRESULT hr = ProfilingGetFunctionTailcall3Info(functionId, (COR_PRF_ELT_INFO)&eltInfo, &frameInfo);
+ _ASSERTE(hr == S_OK);
+
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+ g_profControlBlock.pProfInterface->GetTailcall2Hook()(functionId, clientData, frameInfo);
+ goto LExit;
+ }
+
+ // We will not be here unless the jit'd or ngen'd function we're about to tailcall
+ // was backpatched with this wrapper around the profiler's hook, and that
+ // wouldn't have happened unless the profiler supplied us with a hook
+ // in the first place. (Note that SetEnterLeaveFunctionHooks* will return
+ // an error unless it's called in the profiler's Initialize(), so a profiler can't change
+ // its mind about where the hooks are.)
+ _ASSERTE(g_profControlBlock.pProfInterface->GetTailcallHook() != NULL);
+
+ // Note that we cannot assert CORProfilerTrackEnterLeave() (i.e., profiler flag
+ // COR_PRF_MONITOR_ENTERLEAVE), because the profiler may decide whether
+ // to enable the jitter to add enter/leave callouts independently of whether
+ // the profiler actually has enter/leave hooks. (If the profiler has no such hooks,
+ // the callouts quickly return and do nothing.)
+
+ //
+ // Everett ELT
+ //
+ {
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+ g_profControlBlock.pProfInterface->GetTailcallHook()((FunctionID)clientData);
+ }
+
+LExit:
+
+ ;
+
+ HELPER_METHOD_FRAME_END(); // Un-link the frame
+
+#endif // PROFILING_SUPPORTED
+}
+HCIMPLEND
+
diff --git a/src/vm/proftoeeinterfaceimpl.h b/src/vm/proftoeeinterfaceimpl.h
new file mode 100644
index 0000000000..cae25d9dae
--- /dev/null
+++ b/src/vm/proftoeeinterfaceimpl.h
@@ -0,0 +1,617 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// FILE: ProfToEEInterfaceImpl.h
+//
+// Declaration of class that implements the ICorProfilerInfo* interfaces, which allow the
+// Profiler to communicate with the EE. This allows the Profiler DLL to get
+// access to private EE data structures and other things that should never be exported
+// outside of the EE.
+//
+
+//
+
+// ======================================================================================
+
+
+#ifndef __PROFTOEEINTERFACEIMPL_H__
+#define __PROFTOEEINTERFACEIMPL_H__
+
+#ifdef PROFILING_SUPPORTED
+
+#include "eeprofinterfaces.h"
+#include "vars.hpp"
+#include "threads.h"
+#include "codeman.h"
+#include "cor.h"
+#include "callingconvention.h"
+
+
+#include "profilinghelper.h"
+
+
+class ProfilerFunctionEnum;
+
+//
+// Helper routines.
+//
+extern MethodDesc *FunctionIdToMethodDesc(FunctionID functionID);
+extern ClassID TypeHandleToClassID(TypeHandle th);
+
+
+//
+// Function declarations for those functions that are platform specific.
+//
+extern UINT_PTR ProfileGetIPFromPlatformSpecificHandle(void * handle);
+
+extern void ProfileSetFunctionIDInPlatformSpecificHandle(void * pPlatformSpecificHandle, FunctionID functionID);
+
+//
+// The following class is implemented differently on each platform, using
+// the PlatformSpecificHandle to initialize an ArgIterator.
+//
+class ProfileArgIterator
+{
+private:
+ void *m_handle;
+ ArgIterator m_argIterator;
+
+public:
+ ProfileArgIterator(MetaSig * pMetaSig, void* platformSpecificHandle);
+
+ ~ProfileArgIterator();
+
+ //
+ // Returns number of arguments returned by GetNextArgAddr
+ //
+ UINT GetNumArgs()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_argIterator.NumFixedArgs();
+ }
+
+ //
+ // After initialization, this method is called repeatedly until it
+ // returns NULL to get the address of each arg.
+ //
+ // Note: this address could be anywhere on the stack.
+ //
+ LPVOID GetNextArgAddr();
+
+ //
+ // Returns argument size
+ //
+ UINT GetArgSize()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_argIterator.GetArgSize();
+ }
+
+ //
+ // Called after initialization, any number of times, to retrieve any
+ // hidden argument, so that resolution for Generics can be done.
+ //
+ LPVOID GetHiddenArgValue(void);
+
+ //
+ // Called after initialization, any number of times, to retrieve the
+ // value of 'this'.
+ //
+ LPVOID GetThis(void);
+
+ //
+ // Called after initialization, any number of times, to retrieve the
+ // address of the return buffer, if there is one. NULL indicates no
+ // return buffer.
+ //
+ LPVOID GetReturnBufferAddr(void);
+};
+
+//---------------------------------------------------------------------------------------
+// This helper class wraps a loader heap which can be used to allocate
+// memory for IL after the current module.
+
+class ModuleILHeap : public IMethodMalloc
+{
+public:
+ // IUnknown
+ virtual ULONG STDMETHODCALLTYPE AddRef();
+ virtual ULONG STDMETHODCALLTYPE Release();
+ virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid, void ** pp);
+
+ // IMethodMalloc
+ virtual void *STDMETHODCALLTYPE Alloc(ULONG cb);
+
+ static ModuleILHeap s_Heap;
+};
+
+typedef struct _PROFILER_STACK_WALK_DATA PROFILER_STACK_WALK_DATA;
+
+//---------------------------------------------------------------------------------------
+// One of these is allocated per EE instance. A pointer is cached to this
+// from the profiler implementation. The profiler will call back on the v-table
+// to get at EE internals as required.
+
+class ProfToEEInterfaceImpl : public ICorProfilerInfo6
+{
+public:
+
+ // Internal Housekeeping
+
+ static void MethodTableCallback(void* context, void* methodTable);
+ static void ObjectRefCallback(void* context, void* objectRefUNSAFE);
+
+ ProfToEEInterfaceImpl();
+ ~ProfToEEInterfaceImpl();
+ HRESULT Init();
+
+ // IUnknown
+ ULONG STDMETHODCALLTYPE AddRef();
+ ULONG STDMETHODCALLTYPE Release();
+ COM_METHOD QueryInterface(REFIID id, void ** pInterface);
+
+ // ICorProfilerInfo2
+
+ COM_METHOD GetEventMask(DWORD * pdwEvents);
+ COM_METHOD SetEventMask(DWORD dwEventMask);
+
+ COM_METHOD GetHandleFromThread(ThreadID threadId, HANDLE * phThread);
+
+ COM_METHOD GetObjectSize(ObjectID objectId, ULONG * pcSize);
+
+ COM_METHOD GetObjectSize2(ObjectID objectId, SIZE_T * pcSize);
+
+ COM_METHOD IsArrayClass(
+ /* [in] */ ClassID classId,
+ /* [out] */ CorElementType * pBaseElemType,
+ /* [out] */ ClassID * pBaseClassId,
+ /* [out] */ ULONG * pcRank);
+
+ COM_METHOD GetThreadInfo(ThreadID threadId, DWORD * pdwWin32ThreadId);
+
+ COM_METHOD GetCurrentThreadID(ThreadID * pThreadId);
+
+ COM_METHOD GetFunctionFromIP(LPCBYTE ip, FunctionID * pFunctionId);
+
+ COM_METHOD GetTokenAndMetaDataFromFunction(
+ FunctionID functionId,
+ REFIID riid,
+ IUnknown ** ppOut,
+ mdToken * pToken);
+
+ COM_METHOD GetCodeInfo(FunctionID functionId, LPCBYTE * pStart, ULONG * pcSize);
+
+ COM_METHOD GetModuleInfo(
+ ModuleID moduleId,
+ LPCBYTE * ppBaseLoadAddress,
+ ULONG cchName,
+ ULONG * pcchName,
+ __out_ecount_part_opt(cchName, *pcchName) WCHAR szName[],
+ AssemblyID * pAssemblyId);
+
+ COM_METHOD GetModuleMetaData(
+ ModuleID moduleId,
+ DWORD dwOpenFlags,
+ REFIID riid,
+ IUnknown ** ppOut);
+
+ COM_METHOD GetILFunctionBody(
+ ModuleID moduleId,
+ mdMethodDef methodid,
+ LPCBYTE * ppMethodHeader,
+ ULONG * pcbMethodSize);
+
+ COM_METHOD GetILFunctionBodyAllocator(
+ ModuleID moduleId,
+ IMethodMalloc ** ppMalloc);
+
+ COM_METHOD SetILFunctionBody(
+ ModuleID moduleId,
+ mdMethodDef methodid,
+ LPCBYTE pbNewILMethodHeader);
+
+ COM_METHOD SetILInstrumentedCodeMap(
+ FunctionID functionId,
+ BOOL fStartJit,
+ ULONG cILMapEntries,
+ COR_IL_MAP rgILMapEntries[]);
+
+ COM_METHOD ForceGC();
+
+ COM_METHOD GetClassIDInfo(
+ ClassID classId,
+ ModuleID * pModuleId,
+ mdTypeDef * pTypeDefToken);
+
+ COM_METHOD GetFunctionInfo(
+ FunctionID functionId,
+ ClassID * pClassId,
+ ModuleID * pModuleId,
+ mdToken * pToken);
+
+ COM_METHOD GetClassFromObject(
+ ObjectID objectId,
+ ClassID * pClassId);
+
+ COM_METHOD GetClassFromToken(
+ ModuleID moduleId,
+ mdTypeDef typeDef,
+ ClassID * pClassId);
+
+ COM_METHOD GetFunctionFromToken(
+ ModuleID moduleId,
+ mdToken typeDef,
+ FunctionID * pFunctionId);
+
+ COM_METHOD GetAppDomainInfo(
+ AppDomainID appDomainId,
+ ULONG cchName,
+ ULONG * pcchName,
+ __out_ecount_part_opt(cchName, *pcchName) WCHAR szName[],
+ ProcessID * pProcessId);
+
+ COM_METHOD GetAssemblyInfo(
+ AssemblyID assemblyId,
+ ULONG cchName,
+ ULONG * pcchName,
+ __out_ecount_part_opt(cchName, *pcchName) WCHAR szName[],
+ AppDomainID * pAppDomainId,
+ ModuleID * pModuleId);
+
+ COM_METHOD SetEnterLeaveFunctionHooks(
+ FunctionEnter * pFuncEnter,
+ FunctionLeave * pFuncLeave,
+ FunctionTailcall * pFuncTailcall);
+
+ COM_METHOD SetEnterLeaveFunctionHooks2(
+ FunctionEnter2 * pFuncEnter,
+ FunctionLeave2 * pFuncLeave,
+ FunctionTailcall2 * pFuncTailcall);
+
+ COM_METHOD SetFunctionIDMapper(
+ FunctionIDMapper * pFunc);
+
+ COM_METHOD GetThreadContext(
+ ThreadID threadId,
+ ContextID * pContextId);
+
+ COM_METHOD GetILToNativeMapping(
+ /* [in] */ FunctionID functionId,
+ /* [in] */ ULONG32 cMap,
+ /* [out] */ ULONG32 * pcMap,
+ /* [out, size_is(cMap), length_is(*pcMap)] */
+ COR_DEBUG_IL_TO_NATIVE_MAP map[]);
+
+ COM_METHOD GetFunctionInfo2(
+ /* in */ FunctionID funcId,
+ /* in */ COR_PRF_FRAME_INFO frameInfo,
+ /* out */ ClassID * pClassId,
+ /* out */ ModuleID * pModuleId,
+ /* out */ mdToken * pToken,
+ /* in */ ULONG32 cTypeArgs,
+ /* out */ ULONG32 * pcTypeArgs,
+ /* out */ ClassID typeArgs[]);
+
+ COM_METHOD GetStringLayout(
+ /* out */ ULONG * pBufferLengthOffset,
+ /* out */ ULONG * pStringLengthOffset,
+ /* out */ ULONG * pBufferOffset);
+
+ COM_METHOD GetClassLayout(
+ /* in */ ClassID classID,
+ /* in.out*/ COR_FIELD_OFFSET rFieldOffset[],
+ /* in */ ULONG cFieldOffset,
+ /* out */ ULONG * pcFieldOffset,
+ /* out */ ULONG * pulClassSize);
+
+ COM_METHOD DoStackSnapshot(
+ ThreadID thread,
+ StackSnapshotCallback *callback,
+ ULONG32 infoFlags,
+ void * clientData,
+ BYTE * pctx,
+ ULONG32 contextSize);
+
+ COM_METHOD GetCodeInfo2(FunctionID functionId,
+ ULONG32 cCodeInfos,
+ ULONG32 * pcCodeInfos,
+ COR_PRF_CODE_INFO codeInfos[]);
+
+ COM_METHOD GetArrayObjectInfo(ObjectID objectId,
+ ULONG32 cDimensionSizes,
+ ULONG32 pDimensionSizes[],
+ int pDimensionLowerBounds[],
+ BYTE ** ppData);
+
+ COM_METHOD GetBoxClassLayout(ClassID classId,
+ ULONG32 * pBufferOffset);
+
+ COM_METHOD GetClassIDInfo2(ClassID classId,
+ ModuleID * pModuleId,
+ mdTypeDef * pTypeDefToken,
+ ClassID * pParentClassId,
+ ULONG32 cNumTypeArgs,
+ ULONG32 * pcNumTypeArgs,
+ ClassID typeArgs[]);
+
+ COM_METHOD GetThreadAppDomain(ThreadID threadId,
+ AppDomainID * pAppDomainId);
+
+ COM_METHOD GetRVAStaticAddress(ClassID classId,
+ mdFieldDef fieldToken,
+ void ** ppAddress);
+
+ COM_METHOD GetAppDomainStaticAddress(ClassID classId,
+ mdFieldDef fieldToken,
+ AppDomainID appDomainId,
+ void ** ppAddress);
+
+ COM_METHOD GetThreadStaticAddress(ClassID classId,
+ mdFieldDef fieldToken,
+ ThreadID threadId,
+ void ** ppAddress);
+
+ COM_METHOD GetContextStaticAddress(ClassID classId,
+ mdFieldDef fieldToken,
+ ContextID contextId,
+ void ** ppAddress);
+
+ COM_METHOD GetStaticFieldInfo(ClassID classId,
+ mdFieldDef fieldToken,
+ COR_PRF_STATIC_TYPE * pFieldInfo);
+
+ COM_METHOD GetClassFromTokenAndTypeArgs(ModuleID moduleID,
+ mdTypeDef typeDef,
+ ULONG32 cTypeArgs,
+ ClassID typeArgs[],
+ ClassID* pClassID);
+
+ COM_METHOD EnumModuleFrozenObjects(ModuleID moduleID,
+ ICorProfilerObjectEnum** ppEnum);
+
+
+
+ COM_METHOD GetFunctionFromTokenAndTypeArgs(ModuleID moduleID,
+ mdMethodDef funcDef,
+ ClassID classId,
+ ULONG32 cTypeArgs,
+ ClassID typeArgs[],
+ FunctionID* pFunctionID);
+
+ COM_METHOD GetGenerationBounds(ULONG cObjectRanges,
+ ULONG * pcObjectRanges,
+ COR_PRF_GC_GENERATION_RANGE ranges[]);
+
+ COM_METHOD GetObjectGeneration(ObjectID objectId,
+ COR_PRF_GC_GENERATION_RANGE *range);
+
+ COM_METHOD GetNotifiedExceptionClauseInfo(COR_PRF_EX_CLAUSE_INFO * pinfo);
+
+ COM_METHOD SetFunctionReJIT(FunctionID);
+ COM_METHOD GetInprocInspectionInterface(IUnknown **);
+ COM_METHOD GetInprocInspectionIThisThread(IUnknown **);
+ COM_METHOD BeginInprocDebugging(BOOL,DWORD *);
+ COM_METHOD EndInprocDebugging(DWORD);
+
+ // ICorProfilerInfo3
+
+ COM_METHOD EnumJITedFunctions(ICorProfilerFunctionEnum** ppEnum);
+ COM_METHOD EnumModules(ICorProfilerModuleEnum ** ppEnum);
+
+ COM_METHOD RequestProfilerDetach(
+ /* in */ DWORD dwExpectedCompletionMilliseconds);
+
+ COM_METHOD SetFunctionIDMapper2(
+ FunctionIDMapper2 * pFunc, // in
+ void * clientData); // in
+
+ COM_METHOD SetEnterLeaveFunctionHooks3(
+ FunctionEnter3 * pFuncEnter3, // in
+ FunctionLeave3 * pFuncLeave3, // in
+ FunctionTailcall3 * pFuncTailcall3); // in
+
+ COM_METHOD SetEnterLeaveFunctionHooks3WithInfo(
+ FunctionEnter3WithInfo * pFuncEnter3WithInfo, // in
+ FunctionLeave3WithInfo * pFuncLeave3WithInfo, // in
+ FunctionTailcall3WithInfo * pFuncTailcall3WithInfo); // in
+
+ COM_METHOD GetFunctionEnter3Info(
+ FunctionID functionId, // in
+ COR_PRF_ELT_INFO eltInfo, // in
+ COR_PRF_FRAME_INFO * pFrameInfo, // out
+ ULONG * pcbArgumentInfo, // in, out
+ COR_PRF_FUNCTION_ARGUMENT_INFO * pArgumentInfo); // out
+
+ COM_METHOD GetFunctionLeave3Info(
+ FunctionID functionId, // in
+ COR_PRF_ELT_INFO eltInfo, // in
+ COR_PRF_FRAME_INFO * pFrameInfo, // out
+ COR_PRF_FUNCTION_ARGUMENT_RANGE * pRetvalRange); // out
+
+ COM_METHOD GetFunctionTailcall3Info(
+ FunctionID functionId, // in
+ COR_PRF_ELT_INFO pFrameInfo, // in
+ COR_PRF_FRAME_INFO * pFunc); // out
+
+ COM_METHOD GetStringLayout2(
+ /* out */ ULONG * pStringLengthOffset,
+ /* out */ ULONG * pBufferOffset);
+
+ COM_METHOD GetRuntimeInformation(USHORT * pClrInstanceId, // out
+ COR_PRF_RUNTIME_TYPE * pRuntimeType, // out
+ USHORT * pMajorVersion, // out
+ USHORT * pMinorVersion, // out
+ USHORT * pBuildNumber, // out
+ USHORT * pQFEVersion, // out
+ ULONG cchVersionString, // in
+ ULONG * pcchVersionString, // out
+ __out_ecount_part_opt(cchVersionString, *pcchVersionString) WCHAR szVersionString[]); // out
+
+ COM_METHOD GetThreadStaticAddress2(ClassID classId, // in
+ mdFieldDef fieldToken, // in
+ AppDomainID appDomainId, // in
+ ThreadID threadId, // in
+ void ** ppAddress); // out
+
+ COM_METHOD GetAppDomainsContainingModule(ModuleID moduleId, // in
+ ULONG32 cAppDomainIds, // in
+ ULONG32 *pcAppDomainIds, // out
+ AppDomainID appDomainIds[]); // out
+
+ COM_METHOD GetModuleInfo2(
+ ModuleID moduleId,
+ LPCBYTE * ppBaseLoadAddress,
+ ULONG cchName,
+ ULONG * pcchName,
+ __out_ecount_part_opt(cchName, *pcchName) WCHAR szName[],
+ AssemblyID * pAssemblyId,
+ DWORD * pdwModuleFlags);
+
+ // end ICorProfilerInfo3
+
+ // ICorProfilerInfo4
+
+ COM_METHOD EnumThreads(
+ /* out */ ICorProfilerThreadEnum ** ppEnum);
+
+ COM_METHOD InitializeCurrentThread();
+
+ // end ICorProfilerInfo4
+
+ COM_METHOD RequestReJIT(ULONG cFunctions, // in
+ ModuleID moduleIds[], // in
+ mdMethodDef methodIds[]); // in
+
+ COM_METHOD RequestRevert(ULONG cFunctions, // in
+ ModuleID moduleIds[], // in
+ mdMethodDef methodIds[], // in
+ HRESULT status[]); // out
+
+ COM_METHOD GetCodeInfo3(FunctionID functionID, // in
+ ReJITID reJitId, // in
+ ULONG32 cCodeInfos, // in
+ ULONG32 * pcCodeInfos, // out
+ COR_PRF_CODE_INFO codeInfos[]); // out
+
+ COM_METHOD GetFunctionFromIP2(LPCBYTE ip, // in
+ FunctionID * pFunctionId, // out
+ ReJITID * pReJitId); // out
+
+ COM_METHOD GetReJITIDs(FunctionID functionId, // in
+ ULONG cReJitIds, // in
+ ULONG * pcReJitIds, // out
+ ReJITID reJitIds[]); // out
+
+ COM_METHOD GetILToNativeMapping2(
+ FunctionID functionId, // in
+ ReJITID reJitId, // in
+ ULONG32 cMap, // in
+ ULONG32 * pcMap, // out
+ COR_DEBUG_IL_TO_NATIVE_MAP map[]); // out
+
+ COM_METHOD EnumJITedFunctions2(ICorProfilerFunctionEnum** ppEnum);
+
+ // end ICorProfilerInfo4
+
+
+ // begin ICorProfilerInfo5
+
+ COM_METHOD SetEventMask2(
+ DWORD dwEventsLow,
+ DWORD dwEventsHigh);
+
+ COM_METHOD GetEventMask2(DWORD *pdwEventsLow, DWORD *pdwEventsHigh);
+
+ COM_METHOD EnumNgenModuleMethodsInliningThisMethod(
+ ModuleID inlinersModuleId,
+ ModuleID inlineeModuleId,
+ mdMethodDef inlineeMethodId,
+ BOOL *incompleteData,
+ ICorProfilerMethodEnum** ppEnum);
+
+
+ // end ICorProfilerInfo5
+
+protected:
+
+ // Internal Helper Functions
+
+ HRESULT GetCodeInfoHelper(FunctionID functionId,
+ ReJITID reJitId,
+ ULONG32 cCodeInfos,
+ ULONG32 * pcCodeInfos,
+ COR_PRF_CODE_INFO codeInfos[]);
+
+ HRESULT GetStringLayoutHelper(ULONG * pBufferLengthOffset,
+ ULONG * pStringLengthOffset,
+ ULONG * pBufferOffset);
+
+ HRESULT GetArrayObjectInfoHelper(Object * pObj,
+ ULONG32 cDimensionSizes,
+ __out_ecount(cDimensionSizes) ULONG32 pDimensionSizes[],
+ __out_ecount(cDimensionSizes) int pDimensionLowerBounds[],
+ BYTE ** ppData);
+
+ DWORD GetModuleFlags(Module * pModule);
+
+ HRESULT DoStackSnapshotHelper(Thread * pThreadToSnapshot,
+ PROFILER_STACK_WALK_DATA * pData,
+ unsigned flags,
+ LPCONTEXT pctxSeed);
+
+ HRESULT ProfilerStackWalkFramesWrapper(Thread * pThreadToSnapshot, PROFILER_STACK_WALK_DATA * pData, unsigned flags);
+
+ HRESULT EnumJITedFunctionsHelper(ProfilerFunctionEnum ** ppEnum, IJitManager ** ppJitMgr);
+
+#ifdef _TARGET_X86_
+ HRESULT ProfilerEbpWalker(Thread * pThreadToSnapshot, LPCONTEXT pctxSeed, StackSnapshotCallback * callback, void * clientData);
+#endif //_TARGET_X86_
+};
+
+#endif // PROFILING_SUPPORTED
+
+//---------------------------------------------------------------------------------------
+// This provides the implementations for FCALLs in managed code related to profiling
+
+class ProfilingFCallHelper
+{
+public:
+ // This is a high-efficiency way for managed profiler code to determine if
+ // profiling of remoting is active.
+ static FCDECL0(FC_BOOL_RET, FC_TrackRemoting);
+
+ // This is a high-efficiency way for managed profiler code to determine if
+ // profiling of remoting with RPC cookie IDs is active.
+ static FCDECL0(FC_BOOL_RET, FC_TrackRemotingCookie);
+
+ // This is a high-efficiency way for managed profiler code to determine if
+ // profiling of asynchronous remote calls is profiled
+ static FCDECL0(FC_BOOL_RET, FC_TrackRemotingAsync);
+
+ // This will let the profiler know that the client side is sending a message to
+ // the server-side.
+ static FCDECL2(void, FC_RemotingClientSendingMessage, GUID * pId, CLR_BOOL fIsAsync);
+
+ // For __cdecl calling convention both arguments end up on
+ // the stack but the order in which the jit puts them there needs to be reversed
+ // For __fastcall calling convention the reversal has no effect because the GUID doesn't
+ // fit in a register. On IA64 the macro is different.
+
+ // This will let the profiler know that the client side is receiving a reply
+ // to a message that it sent
+ static FCDECL2_VI(void, FC_RemotingClientReceivingReply, GUID id, CLR_BOOL fIsAsync);
+
+ // This will let the profiler know that the server side is receiving a message
+ // from a client
+ static FCDECL2_VI(void, FC_RemotingServerReceivingMessage, GUID id, CLR_BOOL fIsAsync);
+
+ // This will let the profiler know that the server side is sending a reply to
+ // a received message.
+ static FCDECL2(void, FC_RemotingServerSendingReply, GUID * pId, CLR_BOOL fIsAsync);
+};
+
+#endif // __PROFTOEEINTERFACEIMPL_H__
+
+
diff --git a/src/vm/proftoeeinterfaceimpl.inl b/src/vm/proftoeeinterfaceimpl.inl
new file mode 100644
index 0000000000..4103f83e12
--- /dev/null
+++ b/src/vm/proftoeeinterfaceimpl.inl
@@ -0,0 +1,196 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// FILE: ProfToEEInterfaceImpl.inl
+//
+// Inline implementation of portions of the code used to help implement the
+// ICorProfilerInfo* interfaces, which allow the Profiler to communicate with the EE.
+//
+
+//
+// ======================================================================================
+
+#ifndef __PROFTOEEINTERFACEIMPL_INL__
+#define __PROFTOEEINTERFACEIMPL_INL__
+
+#ifdef PROFILING_SUPPORTED
+
+//---------------------------------------------------------------------------------------
+// Helpers
+
+
+//---------------------------------------------------------------------------------------
+//
+// "Callback flags" are typically set on the current EE Thread object just before we
+// call into a profiler (see SetCallbackStateFlagsHolder). This helps us remember that
+// we deliberately called into the profiler, as opposed to the profiler gaining control
+// by hijacking a thread. This helper function is used in PROFILER_TO_CLR_ENTRYPOINT_SYNC
+// to test the flags in order to authorize a profiler's call into us. The macro is
+// placed at the top of any call that's supposed to be synchronous-only. If no flags are
+// set, that implies the profiler hijacked the thread, so we reject the call. In
+// contrast, PROFILER_TO_CLR_ENTRYPOINT_ASYNC does NOT call this helper function, and
+// thus deliberately allows the hijacked thread to continue calling back into the runtime.
+//
+// Arguments:
+// dwFlags - Flags to test
+//
+// Return Value:
+// If no EE Thread object: nonzero
+// If EE Thread object AND any of the specified flags are set on it: nonzero
+// Else zero (FALSE)
+//
+
+inline BOOL AreCallbackStateFlagsSet(DWORD dwFlags)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CANNOT_TAKE_LOCK;
+ EE_THREAD_NOT_REQUIRED;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ Thread * pThread = GetThreadNULLOk();
+ if (pThread == NULL)
+ {
+ // Not a managed thread; profiler can do whatever it wants
+ return TRUE;
+ }
+
+ BOOL fRet;
+ BEGIN_GETTHREAD_ALLOWED_IN_NO_THROW_REGION;
+ DWORD dwProfilerCallbackFullStateFlags = pThread->GetProfilerCallbackFullState();
+ if ((dwProfilerCallbackFullStateFlags & COR_PRF_CALLBACKSTATE_FORCEGC_WAS_CALLED) != 0)
+ {
+ // Threads on which ForceGC() was successfully called should be treated just
+ // like native threads. Profiler can do whatever it wants
+ return TRUE;
+ }
+
+ fRet = ((dwProfilerCallbackFullStateFlags & dwFlags) == dwFlags);
+ END_GETTHREAD_ALLOWED_IN_NO_THROW_REGION;
+ return fRet;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Simple helper that returns nonzero iff the currently-executing function was called
+// asynchronously (i.e., from outside of a callback, as hijacking profilers do)
+//
+
+inline BOOL IsCalledAsynchronously()
+{
+ LIMITED_METHOD_CONTRACT;
+ return !(AreCallbackStateFlagsSet(COR_PRF_CALLBACKSTATE_INCALLBACK));
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Simple helper that decides whether we should avoid calling into the host. Generally,
+// host calls should be avoided if the current Info method was called asynchronously
+// (i.e., from an F1-style hijack), for fear of re-entering the host (mainly SQL).
+//
+// Server GC threads are native (non-EE) threads, which therefore do not track enough
+// state for us to determine if a call is made asynhronously on those threads. So we
+// pessimistically assume that the current call on a server GC thread is from a hijack
+// for the purposes of determining whether we may enter the host. Reasoning for this:
+// * SQL enables server-mode GC
+// * server GC threads are responsible for performing runtime suspension, and thus
+// call Thread::SuspendThread() which yields/sleeps and thus enters the host. So
+// server GC threads are examples of non-EE Threads that actually do spend time
+// in the host (this otherwise almost never happens for other non-EE threads).
+// * In spite of this pessimism, the effect on the profiler should be minimal. The
+// host calls we're avoiding are from the code manager's lock, which:
+// * a) Is only used when doing stack walks or translating IPs to functions
+// * b) Is only affected if it tries to yield/sleep when the code manager
+// writer lock is taken, and that happens for incredibly tiny windows of
+// time.
+//
+
+inline BOOL ShouldAvoidHostCalls()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return
+ (
+ IsCalledAsynchronously() ||
+ (
+ (GetThreadNULLOk() == NULL) && IsGCSpecialThread()
+ )
+ );
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Simple helper that returns nonzero iff the current thread is a non-EE thread in the
+// process of doing a GC
+//
+
+inline BOOL NativeThreadInGC()
+{
+ LIMITED_METHOD_CONTRACT;
+ return ((g_profControlBlock.fGCInProgress) && (GetThreadNULLOk() == NULL));
+}
+
+//---------------------------------------------------------------------------------------
+//
+// ProfToEE functions can use these overloads to determine whether a Thread should be
+// visible to a profiler and thus be suitable for querying information about, by a
+// profiler. If the Thread is non-NULL and is NOT a GCSpecial thread, then it's
+// considered "managed", and is thus visible to the profiler.
+//
+// Arguments:
+// pThread or threadId - Thread to check
+//
+// Return Value:
+// nonzero iff the thread can run managed code
+
+// Notes:
+// See code:Thread::m_fGCSpecial for more information
+//
+
+inline BOOL IsManagedThread(Thread * pThread)
+{
+ LIMITED_METHOD_CONTRACT;
+ return ((pThread != NULL) && (!pThread->IsGCSpecial()));
+}
+
+inline BOOL IsManagedThread(ThreadID threadId)
+{
+ LIMITED_METHOD_CONTRACT;
+ return IsManagedThread(reinterpret_cast<Thread *>(threadId));
+}
+
+//---------------------------------------------------------------------------------------
+//
+// ProfToEEInterfaceImpl ctor.
+//
+
+inline ProfToEEInterfaceImpl::ProfToEEInterfaceImpl()
+{
+ LIMITED_METHOD_CONTRACT;
+};
+
+
+inline BOOL IsClassOfMethodTableInited(MethodTable * pMethodTable, AppDomain * pAppDomain)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (pMethodTable->IsRestored() &&
+ (pMethodTable->GetModuleForStatics() != NULL) &&
+ (pMethodTable->GetDomainLocalModule(pAppDomain) != NULL) &&
+ pMethodTable->IsClassInited(pAppDomain));
+}
+
+
+#endif // PROFILING_SUPPORTED
+
+#endif // __PROFTOEEINTERFACEIMPL_INL__
diff --git a/src/vm/qcall.cpp b/src/vm/qcall.cpp
new file mode 100644
index 0000000000..c134a98d6e
--- /dev/null
+++ b/src/vm/qcall.cpp
@@ -0,0 +1,108 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// QCALL.CPP
+//
+
+
+
+#include "common.h"
+
+//
+// Helpers for returning managed string from QCall
+//
+
+void QCall::StringHandleOnStack::Set(const SString& value)
+{
+ STANDARD_VM_CONTRACT;
+
+ GCX_COOP();
+
+ Set(StringObject::NewString(value));
+}
+
+void QCall::StringHandleOnStack::Set(LPCWSTR pwzValue)
+{
+ STANDARD_VM_CONTRACT;
+
+ GCX_COOP();
+
+ Set(StringObject::NewString(pwzValue));
+}
+
+void QCall::StringHandleOnStack::Set(LPCUTF8 pszValue)
+{
+ STANDARD_VM_CONTRACT;
+
+ GCX_COOP();
+
+ Set(StringObject::NewString(pszValue));
+}
+
+//
+// Helpers for returning common managed types from QCall
+//
+
+void QCall::ObjectHandleOnStack::SetByteArray(const BYTE * p, COUNT_T length)
+{
+ STANDARD_VM_CONTRACT;
+
+ GCX_COOP();
+
+ BASEARRAYREF arr = (BASEARRAYREF) AllocatePrimitiveArray(ELEMENT_TYPE_U1, length);
+ memcpyNoGCRefs(arr->GetDataPtr(), p, length * sizeof(BYTE));
+ Set(arr);
+}
+
+void QCall::ObjectHandleOnStack::SetIntPtrArray(const PVOID * p, COUNT_T length)
+{
+ STANDARD_VM_CONTRACT;
+
+ GCX_COOP();
+
+ BASEARRAYREF arr = (BASEARRAYREF) AllocatePrimitiveArray(ELEMENT_TYPE_I, length);
+ memcpyNoGCRefs(arr->GetDataPtr(), p, length * sizeof(PVOID));
+ Set(arr);
+}
+
+void QCall::ObjectHandleOnStack::SetGuidArray(const GUID * p, COUNT_T length)
+{
+ STANDARD_VM_CONTRACT;
+
+ GCX_COOP();
+
+ TypeHandle typeHandle = MscorlibBinder::GetClass(CLASS__GUID);
+ BASEARRAYREF arr = (BASEARRAYREF) AllocateValueSzArray(typeHandle, length);
+ memcpyNoGCRefs(arr->GetDataPtr(), p, length * sizeof(GUID));
+ Set(arr);
+}
+
+//
+// Helpers for passing an AppDomain to a QCall
+//
+
+#ifdef _DEBUG
+
+//---------------------------------------------------------------------------------------
+//
+// Verify that the AppDomain being passed from the BCL is valid for use in a QCall. Note: some additional
+// checks are in System.AppDomain.GetNativeHandle()
+//
+
+void QCall::AppDomainHandle::VerifyDomainHandle() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // System.AppDomain.GetNativeHandle() should ensure that we're not calling through with a null AppDomain pointer.
+ _ASSERTE(m_pAppDomain);
+
+ // QCalls should only be made with pointers to the current domain
+ _ASSERTE(GetAppDomain() == m_pAppDomain);
+
+ // We should not have a QCall made on an invalid AppDomain. Once unload a domain, we won't let anyone else
+ // in and any threads that are already in will be unwound.
+ _ASSERTE(SystemDomain::GetAppDomainAtIndex(m_pAppDomain->GetIndex()) != NULL);
+}
+
+#endif // _DEBUG
diff --git a/src/vm/qcall.h b/src/vm/qcall.h
new file mode 100644
index 0000000000..2cb55f483c
--- /dev/null
+++ b/src/vm/qcall.h
@@ -0,0 +1,348 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// QCall.H
+
+
+
+#ifndef __QCall_h__
+#define __QCall_h__
+
+#include "clr_std/type_traits"
+
+//
+// QCALLS
+//
+
+// QCalls are internal calls from managed code in mscorlib.dll to unmanaged code in mscorwks.dll. QCalls are very much like
+// a normal P/Invoke from mscorlib.dll to mscorwks.dll.
+//
+// Unlike FCalls, QCalls will marshal all arguments as unmanaged types like a normal P/Invoke. QCall also switch to preemptive
+// GC mode like a normal P/Invoke. These two features should make QCalls easier to write reliably compared to FCalls.
+// QCalls are not prone to GC holes and GC starvation bugs that are common with FCalls.
+//
+// QCalls perform better compared to FCalls w/ HelperMethodFrame. The QCall overhead is about 1.4x less compared to
+// FCall w/ HelperMethodFrame overhead on x86. The performance is about the same on x64. However, the implementation
+// of P/Invoke marshaling on x64 is not tuned for performance yet. The QCalls should become significantly faster compared
+// to FCalls w/ HelperMethodFrame on x64 as we do performance tuning of P/Invoke marshaling on x64.
+//
+//
+// The preferred type of QCall arguments is primitive types that efficiently handled by the P/Invoke marshaler (INT32, LPCWSTR, BOOL).
+// (Notice that BOOL is the correct boolean flavor for QCall arguments. CLR_BOOL is the correct boolean flavor for FCall arguments.)
+//
+// The pointers to common unmanaged EE structures should be wrapped into helper handle types. This is to make the managed implementation
+// type safe and avoid falling into unsafe C# everywhere. See the AssemblyHandle below for a good example.
+//
+// There is a way how to pass a raw object references in and out of QCalls. It is done by wrapping a pointer to
+// a local variable in a handle. It is intentionally cumbersome and should be avoided if reasonably possible.
+// See the StringHandleOnStack in the example below. Strings arguments will get marshalled in as LPCWSTR.
+// Returning objects, especially strings, from QCalls is the only common pattern
+// where returning the raw objects (as an OUT argument) is widely acceptable.
+//
+//
+// QCall example - managed part (do not replicate the comments into your actual QCall implementation):
+// ---------------------------------------------------------------------------------------------------
+//
+// class Foo {
+//
+// // All QCalls should have the following DllImport and SuppressUnmanagedCodeSecurity attributes
+// [DllImport(JitHelpers.QCall, CharSet = CharSet.Unicode)]
+// [SuppressUnmanagedCodeSecurity]
+// // QCalls should always be static extern.
+// private static extern bool Bar(int flags, string inString, StringHandleOnStack retString);
+//
+// // Many QCalls have a thin managed wrapper around them to expose them to the world in more meaningful way.
+// public string Bar(int flags)
+// {
+// string retString = null;
+//
+// // The strings are returned from QCalls by taking address
+// // of a local variable using JitHelpers.GetStringHandleOnStack method
+// if (!Bar(flags, this.Id, JitHelpers.GetStringHandleOnStack(ref retString)))
+// FatalError();
+//
+// return retString;
+// }
+//
+// Every QCall produces a couple of bogus FXCop warnings currently. Just add them to the FXCop exlusion list for now.
+//
+//
+// QCall example - unmanaged part (do not replicate the comments into your actual QCall implementation):
+// -----------------------------------------------------------------------------------------------------
+//
+// The entrypoints of all QCalls has to be registered in tables in vm\ecall.cpp using QCFuncEntry macro,
+// For example: QCFuncElement("Bar", FooNative::Bar)
+//
+// class FooNative {
+// public:
+// // All QCalls should be static and should be tagged with QCALLTYPE
+// static
+// BOOL QCALLTYPE Bar(int flags, LPCWSTR wszString, QCall::StringHandleOnStack retString);
+// };
+//
+// BOOL QCALLTYPE FooNative::Bar(int flags, LPCWSTR wszString, QCall::StringHandleOnStack retString)
+// {
+// // All QCalls should have QCALL_CONTRACT. It is alias for THROWS; GC_TRIGGERS; MODE_PREEMPTIVE; SO_TOLERANT.
+// QCALL_CONTRACT;
+//
+// // Optionally, use QCALL_CHECK instead and the expanded form of the contract if you want to specify preconditions:
+// // CONTRACTL {
+// // QCALL_CHECK;
+// // PRECONDITION(wszString != NULL);
+// // } CONTRACTL_END;
+//
+// // The only line between QCALL_CONTRACT and BEGIN_QCALL
+// // should be the return value declaration if there is one.
+// BOOL retVal = FALSE;
+//
+// // The body has to be enclosed in BEGIN_QCALL/END_QCALL macro. It is necessary to make the exception handling work.
+// BEGIN_QCALL;
+//
+// // Validate arguments if necessary and throw exceptions like anywhere else in the EE. There is no convention currently
+// // on whether the argument validation should be done in managed or unmanaged code.
+// if (flags != 0)
+// COMPlusThrow(kArgumentException, L"InvalidFlags");
+//
+// // No need to worry about GC moving strings passed into QCall. Marshalling pins them for us.
+// printf("%S", wszString);
+//
+// // This is most the efficient way of how to return strings back to managed code. No need to use StringBuilder.
+// retString.Set(L"Hello");
+//
+// // You can not return from inside of BEGIN_QCALL/END_QCALL. The return value has to be passed out in helper variable.
+// retVal = TRUE;
+//
+// END_QCALL;
+//
+// return retVal;
+// }
+
+
+#define QCALLTYPE __stdcall
+
+#define BEGIN_QCALL INSTALL_UNWIND_AND_CONTINUE_HANDLER
+#define END_QCALL UNINSTALL_UNWIND_AND_CONTINUE_HANDLER
+
+#define BEGIN_QCALL_SO_TOLERANT INSTALL_UNWIND_AND_CONTINUE_HANDLER_NO_PROBE
+#define END_QCALL_SO_TOLERANT UNINSTALL_UNWIND_AND_CONTINUE_HANDLER_NO_PROBE
+
+
+#define QCALL_CHECK \
+ THROWS; \
+ GC_TRIGGERS; \
+ MODE_PREEMPTIVE; \
+ SO_TOLERANT; \
+
+#define QCALL_CONTRACT CONTRACTL { QCALL_CHECK; } CONTRACTL_END;
+
+//
+// Scope class for QCall helper methods and types
+//
+class QCall
+{
+public:
+
+ //
+ // Helper types to aid marshaling of QCall arguments in type-safe manner
+ //
+ // The C/C++ compiler has to treat these types as POD (plain old data) to generate
+ // a calling convention compatible with P/Invoke marshaling. This means that:
+ // NONE OF THESE HELPER TYPES CAN HAVE CONSTRUCTOR OR DESTRUCTOR!
+ // THESE HELPER TYPES CAN NOT BE IMPLEMENTED USING INHERITANCE OR TEMPLATES!
+ //
+
+ //
+ // StringHandleOnStack is used for managed strings
+ //
+ struct StringHandleOnStack
+ {
+ StringObject ** m_ppStringObject;
+
+#ifndef DACCESS_COMPILE
+ //
+ // Helpers for returning managed string from QCall
+ //
+
+ // Raw setter - note that you need to be in cooperative mode
+ void Set(STRINGREF s)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // The space for the return value has to be on the stack
+ _ASSERTE(GetThread()->IsAddressInStack(m_ppStringObject));
+
+ *m_ppStringObject = STRINGREFToObject(s);
+ }
+
+ void Set(const SString& value);
+ void Set(LPCWSTR pwzValue);
+ void Set(LPCUTF8 pszValue);
+#endif // !DACCESS_COMPILE
+ };
+
+ //
+ // ObjectHandleOnStack type is used for managed objects
+ //
+ struct ObjectHandleOnStack
+ {
+ Object ** m_ppObject;
+
+#ifndef DACCESS_COMPILE
+ //
+ // Helpers for returning common managed types from QCall
+ //
+ void Set(OBJECTREF o)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // The space for the return value has to be on the stack
+ _ASSERTE(GetThread()->IsAddressInStack(m_ppObject));
+
+ *m_ppObject = OBJECTREFToObject(o);
+ }
+
+ void SetByteArray(const BYTE * p, COUNT_T length);
+ void SetIntPtrArray(const PVOID * p, COUNT_T length);
+ void SetGuidArray(const GUID * p, COUNT_T length);
+
+ // Do not add operator overloads to convert this object into a stack reference to a specific object type
+ // such as OBJECTREF *. While such things are correct, our debug checking logic is unable to verify that
+ // the object reference is actually protected from access and there fore will assert.
+ // See bug 254159 for details.
+
+#endif // !DACCESS_COMPILE
+ };
+
+ //
+ // StackCrawlMarkHandle is used for passing StackCrawlMark into QCalls
+ //
+ struct StackCrawlMarkHandle
+ {
+ StackCrawlMark * m_pMark;
+
+ operator StackCrawlMark * ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pMark;
+ }
+ };
+
+ // AppDomainHandle is used for apssing AppDomains into QCalls via System.AppDomainHandle
+ struct AppDomainHandle
+ {
+ AppDomain *m_pAppDomain;
+
+ operator AppDomain *()
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifdef _DEBUG
+ VerifyDomainHandle();
+#endif // _DEBUG
+ return m_pAppDomain;
+ }
+
+ AppDomain *operator->() const
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifdef _DEBUG
+ VerifyDomainHandle();
+#endif // _DEBUG
+ return m_pAppDomain;
+ }
+
+ private:
+#ifdef _DEBUG
+ void VerifyDomainHandle() const;
+#endif // _DEBUG
+ };
+
+ struct AssemblyHandle
+ {
+ DomainAssembly * m_pAssembly;
+
+ operator DomainAssembly * ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pAssembly;
+ }
+
+ DomainAssembly * operator->() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pAssembly;
+ }
+ };
+
+ struct ModuleHandle
+ {
+ Module * m_pModule;
+
+ operator Module * ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pModule;
+ }
+
+ Module * operator->() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pModule;
+ }
+ };
+
+ struct LoaderAllocatorHandle
+ {
+ LoaderAllocator * m_pLoaderAllocator;
+
+ operator LoaderAllocator * ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pLoaderAllocator;
+ }
+
+ LoaderAllocator * operator -> () const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pLoaderAllocator;
+ }
+
+ static LoaderAllocatorHandle From(LoaderAllocator * pLoaderAllocator)
+ {
+ LoaderAllocatorHandle h;
+ h.m_pLoaderAllocator = pLoaderAllocator;
+ return h;
+ }
+ };
+
+ // The lifetime management between managed and native Thread objects is broken. There is resurrection
+ // race where one can get dangling pointer to the unmanaged Thread object. Once this race is fixed
+ // we may need to revisit how the unmanaged thread handles are passed around.
+ struct ThreadHandle
+ {
+ Thread * m_pThread;
+
+ operator Thread * ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pThread;
+ }
+
+ Thread * operator->() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pThread;
+ }
+ };
+};
+
+typedef void* EnregisteredTypeHandle;
+
+#endif //__QCall_h__
diff --git a/src/vm/rcwrefcache.cpp b/src/vm/rcwrefcache.cpp
new file mode 100644
index 0000000000..83f27960ad
--- /dev/null
+++ b/src/vm/rcwrefcache.cpp
@@ -0,0 +1,291 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+/*============================================================
+**
+** Class: RCWRefCache
+**
+**
+** Purpose: The implementation of RCWRefCache class
+** This class maintains per-AppDomain cache that can be used
+** by RCW to reference other CCWs
+===========================================================*/
+
+#include "common.h"
+
+#include "objecthandle.h"
+#include "rcwrefcache.h"
+#include "comcallablewrapper.h"
+#include "runtimecallablewrapper.h"
+
+// SHRINK_TOTAL_THRESHOLD - only shrink when the total number of handles exceed this number
+#ifdef _DEBUG
+// Exercise the shrink path more often
+#define SHRINK_TOTAL_THRESHOLD 4
+#else
+#define SHRINK_TOTAL_THRESHOLD 100
+#endif // _DEBUG
+
+// SHRINK_HINT_THRESHOLD - only shrink when we consistently see a hint that we probably need to shrink
+#ifdef _DEBUG
+// Exercise the shrink path more often
+#define SHRINK_HINT_THRESHOLD 0
+#else
+#define SHRINK_HINT_THRESHOLD 10
+#endif // _DEBUG
+
+RCWRefCache::RCWRefCache(AppDomain *pAppDomain)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pAppDomain != NULL);
+
+ m_pAppDomain = pAppDomain;
+
+ m_dwDepHndListFreeIndex = 0;
+}
+
+RCWRefCache::~RCWRefCache()
+{
+
+ LIMITED_METHOD_CONTRACT;
+
+ // We don't need to clear the handles because this AppDomain will go away
+ // and GC will clean everything for us
+}
+
+//
+// Reset dependent handle cache by assigning 0 to m_dwDepHndListFreeIndex.
+//
+void RCWRefCache::ResetDependentHandles()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_INTEROP, LL_INFO100, "\t[RCWRefCache 0x%p] ----- RCWRefCache::ResetDependentHandles BEGINS -----\n", this));
+ LOG((LF_INTEROP, LL_INFO100,
+ "\t[RCWRefCache 0x%p] Dependent handle cache status: Total SLOTs = %d, Next free SLOT index = %d\n",
+ this, (ULONG) m_depHndList.Size(), m_dwDepHndListFreeIndex
+ ));
+
+ // Reset the index - now every handle in the cache can be reused
+ m_dwDepHndListFreeIndex = 0;
+
+ LOG((LF_INTEROP, LL_INFO100, "\t[RCWRefCache 0x%p] ----- RCWRefCache::ResetDependentHandles ENDS -----\n", this));
+}
+
+//
+// Shrink the dependent handle cache if necessary (will destroy handles) and clear unused handles.
+//
+void RCWRefCache::ShrinkDependentHandles()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_dwDepHndListFreeIndex <= m_depHndList.Size());
+
+ LOG((LF_INTEROP, LL_INFO100, "\t[RCWRefCache 0x%p] ----- RCWRefCache::ShrinkDependentHandles BEGINS -----\n", this));
+ LOG((LF_INTEROP, LL_INFO100,
+ "\t[RCWRefCache 0x%p] Dependent handle cache status: Total SLOTs = %d, Next free SLOT index = %d\n",
+ this, (ULONG) m_depHndList.Size(), m_dwDepHndListFreeIndex
+ ));
+
+ SIZE_T depHndListSize = m_depHndList.Size();
+
+
+ //
+ // If we are not using half of the handles last time, it is a hint that probably we need to shrink
+ //
+ _ASSERTE(SHRINK_TOTAL_THRESHOLD / 2 > 0);
+ if (m_dwDepHndListFreeIndex < depHndListSize / 2 && depHndListSize > SHRINK_TOTAL_THRESHOLD)
+ {
+ m_dwShrinkHint++;
+ LOG((LF_INTEROP, LL_INFO100, "\t[RCWRefCache 0x%p] m_dwShrinkHint = %d\n", this, m_dwShrinkHint));
+
+ //
+ // Only shrink if we consistently seen such hint more than SHRINK_TOTAL_THRESHOLD times
+ //
+ if (m_dwShrinkHint > SHRINK_HINT_THRESHOLD)
+ {
+
+ LOG((LF_INTEROP, LL_INFO100,
+ "\t[RCWRefCache 0x%p] Shrinking dependent handle cache. Total SLOTS = %d\n",
+ this, m_depHndList.Size()
+ ));
+
+ //
+ // Destroy the handles we don't need and resize
+ //
+ SIZE_T newSize = depHndListSize / 2;
+
+ _ASSERTE(newSize > 0);
+
+ for (SIZE_T i = newSize; i < depHndListSize; ++i)
+ {
+ OBJECTHANDLE hnd = m_depHndList.Pop();
+ DestroyDependentHandle(hnd);
+ LOG((LF_INTEROP, LL_INFO1000,
+ "\t[RCWRefCache 0x%p] DependentHandle 0x%p destroyed @ index %d\n",
+ this, hnd, (ULONG)(depHndListSize - (i - newSize + 1))));
+ }
+
+ // Try realloc - I don't expect this to fail but who knows
+ // If it fails, we just don't care
+ m_depHndList.ReSizeNoThrow(newSize);
+
+ //
+ // Reset shrink hint as we've just shrinked
+ //
+ m_dwShrinkHint = 0;
+ LOG((LF_INTEROP, LL_INFO100, "\t[RCWRefCache 0x%p] Reset m_dwShrinkHint = 0\n", this));
+ }
+ }
+ else
+ {
+ //
+ // Reset shrink hint and start over
+ //
+ m_dwShrinkHint = 0;
+ LOG((LF_INTEROP, LL_INFO100, "\t[RCWRefCache 0x%p] Reset m_dwShrinkCount = 0\n", this));
+ }
+
+ //
+ // Iterate through the list and clear the unused handles
+ //
+ for (SIZE_T i = m_dwDepHndListFreeIndex; i < m_depHndList.Size(); ++i)
+ {
+ OBJECTHANDLE depHnd = m_depHndList[i];
+
+ HndAssignHandle(depHnd, NULL);
+ SetDependentHandleSecondary(depHnd, NULL);
+
+ LOG((LF_INTEROP, LL_INFO1000, "\t[RCWRefCache 0x%p] DependentHandle 0x%p cleared @ index %d\n", this, depHnd, (ULONG) i));
+ }
+
+ LOG((LF_INTEROP, LL_INFO100,
+ "\t[RCWRefCache 0x%p] Dependent handle cache status: Total SLOTs = %d, Next free SLOT index = %d\n",
+ this, (ULONG) m_depHndList.Size(), m_dwDepHndListFreeIndex
+ ));
+
+ LOG((LF_INTEROP, LL_INFO100, "\t[RCWRefCache 0x%p] ----- RCWRefCache::ShrinkDependentHandles ENDS -----\n", this));
+}
+
+//
+// Add a reference from RCW to CCW
+//
+HRESULT RCWRefCache::AddReferenceFromRCWToCCW(RCW *pRCW, ComCallWrapper *pCCW)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pRCW));
+ PRECONDITION(CheckPointer(pCCW));
+ PRECONDITION(CheckPointer(OBJECTREFToObject(pCCW->GetObjectRef())));
+ PRECONDITION(pRCW->IsJupiterObject());
+ PRECONDITION(pCCW->GetJupiterRefCount() > 0);
+ }
+ CONTRACTL_END;
+
+ // Try adding reference using dependent handles
+ return AddReferenceUsingDependentHandle(pRCW, pCCW);
+}
+
+//
+// Add RCW -> CCW reference using dependent handle
+// May fail if OOM
+//
+HRESULT RCWRefCache::AddReferenceUsingDependentHandle(RCW *pRCW, ComCallWrapper *pCCW)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pRCW));
+ PRECONDITION(CheckPointer(pCCW));
+ PRECONDITION(CheckPointer(OBJECTREFToObject(pCCW->GetObjectRef())));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ LOG((LF_INTEROP, LL_INFO1000,
+ "\t[RCWRefCache 0x%p] Dependent handle cache status: Total SLOTs = %d, Next free SLOT index = %d\n",
+ this, (ULONG) m_depHndList.Size(), m_dwDepHndListFreeIndex
+ ));
+
+ // Is there an valid DependentHandle in the array?
+ if (m_dwDepHndListFreeIndex >= m_depHndList.Size())
+ {
+ // No, we need to create a new handle
+ EX_TRY
+ {
+ OBJECTHANDLE depHnd = m_pAppDomain->CreateDependentHandle(pRCW->GetExposedObject(), pCCW->GetObjectRef());
+ m_depHndList.Push(depHnd);
+
+ STRESS_LOG2(
+ LF_INTEROP, LL_INFO1000,
+ "\t[RCWRefCache] Created DependentHandle 0x%p @ appended SLOT %d\n",
+ depHnd,
+ m_dwDepHndListFreeIndex
+ );
+
+ // Increment the index so that next time we still need to append
+ m_dwDepHndListFreeIndex++;
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+ }
+ else
+ {
+ // Yes, there is a valid DependentHandle entry on the list, use that
+ OBJECTHANDLE depHnd = (OBJECTHANDLE) m_depHndList[m_dwDepHndListFreeIndex];
+
+ HndAssignHandle(depHnd, pRCW->GetExposedObject());
+ SetDependentHandleSecondary(depHnd, pCCW->GetObjectRef());
+
+ STRESS_LOG3(
+ LF_INTEROP, LL_INFO1000,
+ "\t[RCWRefCache 0x%p] Reused DependentHandle 0x%p @ valid SLOT %d\n",
+ this, depHnd, m_dwDepHndListFreeIndex);
+
+ // Increment the index and the next one will be used
+ m_dwDepHndListFreeIndex++;
+ }
+
+ if (SUCCEEDED(hr))
+ {
+ LOG((LF_INTEROP, LL_INFO1000,
+ "\t[RCWRefCache 0x%p] Dependent handle cache status: Total SLOTs = %d, Next free SLOT index = %d\n",
+ this, (ULONG) m_depHndList.Size(), m_dwDepHndListFreeIndex
+ ));
+ }
+
+ return hr;
+}
diff --git a/src/vm/rcwrefcache.h b/src/vm/rcwrefcache.h
new file mode 100644
index 0000000000..ca79f86388
--- /dev/null
+++ b/src/vm/rcwrefcache.h
@@ -0,0 +1,104 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+/*============================================================
+**
+** Header: RCWRefCache.h
+**
+**
+** Purpose: Defines RCWRefCache class
+** This class maintains per-AppDomain cache that can be used
+** by RCW to reference other CCWs
+===========================================================*/
+
+#ifndef _H_RCWREFCACHE_
+#define _H_RCWREFCACHE_
+
+#ifdef FEATURE_COMINTEROP
+
+class RCWRefCache
+{
+public :
+ RCWRefCache(AppDomain *pAppDomain);
+ ~RCWRefCache();
+
+ //
+ // Add a reference from RCW to CCW
+ //
+ HRESULT AddReferenceFromRCWToCCW(RCW *pRCW, ComCallWrapper *pCCW);
+
+ //
+ // Enumerate all Jupiter RCWs in the RCW cache and do the callback
+ // I'm using template here so there is no perf penality
+ //
+ template<class Function, class T> HRESULT EnumerateAllJupiterRCWs(Function fn, T userData)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+
+ // Go through the RCW cache and call the callback for all Jupiter objects
+ RCWCache *pRCWCache = m_pAppDomain->GetRCWCacheNoCreate();
+ if (pRCWCache != NULL)
+ {
+ SHash<RCWCache::RCWCacheTraits> *pHashMap = &pRCWCache->m_HashMap;
+
+ for (SHash<RCWCache::RCWCacheTraits>::Iterator it = pHashMap->Begin(); it != pHashMap->End(); it++)
+ {
+ RCW *pRCW = *it;
+ _ASSERTE(pRCW != NULL);
+
+ if (pRCW->IsJupiterObject())
+ {
+ hr = fn(pRCW, userData);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+ }
+ }
+ }
+
+ return S_OK;
+ }
+
+ //
+ // Reset dependent handle cache by assigning 0 to m_dwDepHndListFreeIndex.
+ //
+ void ResetDependentHandles();
+
+ //
+ // Shrink the dependent handle cache if necessary (will destroy handles) and clear unused handles.
+ //
+ void ShrinkDependentHandles();
+
+private :
+ //
+ // Add RCW -> CCW reference using dependent handle
+ // May fail if OOM
+ //
+ HRESULT AddReferenceUsingDependentHandle(RCW *pRCW, ComCallWrapper *pCCW);
+
+private :
+ AppDomain *m_pAppDomain; // Domain
+
+ CQuickArrayList<OBJECTHANDLE> m_depHndList; // Internal DependentHandle cache
+ // non-NULL dependent handles followed by NULL slots
+ DWORD m_dwDepHndListFreeIndex; // The starting index where m_depHndList has available slots
+ DWORD m_dwShrinkHint; // Keep track of how many times we use less than half handles
+};
+
+#endif // FEATURE_COMINTEROP
+
+#endif // _H_RCWREFCACHE_
diff --git a/src/vm/rcwwalker.cpp b/src/vm/rcwwalker.cpp
new file mode 100644
index 0000000000..ea4de627c9
--- /dev/null
+++ b/src/vm/rcwwalker.cpp
@@ -0,0 +1,983 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+/*============================================================
+**
+** Class: RCWWalker
+**
+**
+** Purpose: The implementation of RCWWalker class which walks
+** RCW objects
+===========================================================*/
+
+#include "common.h"
+
+#include "runtimecallablewrapper.h"
+#include "comcallablewrapper.h"
+#include "rcwwalker.h"
+#include "olecontexthelpers.h"
+#include "rcwrefcache.h"
+#include "cominterfacemarshaler.h"
+#include "excep.h"
+#include "finalizerthread.h"
+
+const IID IID_ICLRServices = __uuidof(ICLRServices);
+
+const IID IID_ICCW = __uuidof(ICCW);
+
+const IID IID_IJupiterObject = __uuidof(IJupiterObject);
+
+const IID IID_IJupiterGCManager = __uuidof(IJupiterGCManager);
+
+const IID IID_IFindDependentWrappersCallback = __uuidof(IFindDependentWrappersCallback);
+
+VolatilePtr<IJupiterGCManager> RCWWalker::s_pGCManager = NULL; // Global GC manager pointer
+BOOL RCWWalker::s_bGCStarted = FALSE; // Has GC started?
+SVAL_IMPL_INIT(BOOL, RCWWalker, s_bIsGlobalPeggingOn, TRUE); // Do we need to peg every jupiter CCW?
+
+#ifndef DACCESS_COMPILE
+
+// Our implementation of ICLRServices provided to Jupiter via IJupiterGCManager::SetCLRServices.
+class CLRServicesImpl : public IUnknownCommon<ICLRServices>
+{
+private:
+ // flags for CollectGarbage(DWORD dwFlags)
+ enum {
+ GC_FOR_APPX_SUSPEND = 0x00000001
+ };
+public:
+ STDMETHOD(GarbageCollect)(DWORD dwFlags);
+ STDMETHOD(FinalizerThreadWait)();
+ STDMETHOD(DisconnectRCWsInCurrentApartment)();
+ STDMETHOD(CreateManagedReference)(IUnknown *pJupiterObject, ICCW **ppNewReference);
+ STDMETHOD(AddMemoryPressure)(UINT64 bytesAllocated);
+ STDMETHOD(RemoveMemoryPressure)(UINT64 bytesAllocated);
+};
+
+#pragma warning(push)
+#pragma warning(disable : 4702) // Disable unreachable code warning for RCWWalker_UnhandledExceptionFilter
+
+//
+// We never expect exceptions to be thrown outside of RCWWalker
+// So make sure we fail fast here, instead of going through normal
+// exception processing and fail later
+// This will make analyzing dumps much easier
+//
+inline LONG RCWWalker_UnhandledExceptionFilter(EXCEPTION_POINTERS* pExceptionPointers, PVOID pv)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if ((pExceptionPointers->ExceptionRecord->ExceptionCode == STATUS_BREAKPOINT) ||
+ (pExceptionPointers->ExceptionRecord->ExceptionCode == STATUS_SINGLE_STEP))
+ {
+ // We don't want to fail fast on debugger exceptions
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ // Exceptions here are considered fatal - just fail fast
+ EEPolicy::HandleFatalError(COR_E_EXECUTIONENGINE, (UINT_PTR)GetIP(pExceptionPointers->ContextRecord), NULL, pExceptionPointers);
+
+ // We may trigger C4702 warning as we'll never reach here
+ // I've temporarily disabled the warning. See #pragma above
+ UNREACHABLE();
+
+ return EXCEPTION_EXECUTE_HANDLER;
+}
+
+#pragma warning(pop)
+
+//
+// Release context-bound RCWs and Jupiter RCWs (which are free-threaded but context-bound)
+// in the current apartment
+//
+STDMETHODIMP CLRServicesImpl::DisconnectRCWsInCurrentApartment()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ ReleaseRCWsInCaches(GetCurrentCtxCookie());
+ }
+ END_EXTERNAL_ENTRYPOINT;
+ return hr;
+}
+
+STDMETHODIMP CLRServicesImpl::GarbageCollect(DWORD dwFlags)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+ if (dwFlags & GC_FOR_APPX_SUSPEND) {
+ GCHeap::GetGCHeap()->GarbageCollect(2, TRUE, collection_blocking | collection_optimized);
+ }
+ else
+ GCHeap::GetGCHeap()->GarbageCollect();
+ }
+ END_EXTERNAL_ENTRYPOINT;
+ return hr;
+}
+
+STDMETHODIMP CLRServicesImpl::AddMemoryPressure(UINT64 bytesAllocated)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ HRESULT hr = S_OK;
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ GCInterface::NewAddMemoryPressure(bytesAllocated);
+ }
+ END_EXTERNAL_ENTRYPOINT;
+ return hr;
+}
+
+STDMETHODIMP CLRServicesImpl::RemoveMemoryPressure(UINT64 bytesAllocated)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ HRESULT hr = S_OK;
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ GCInterface::NewRemoveMemoryPressure(bytesAllocated);
+ }
+ END_EXTERNAL_ENTRYPOINT;
+ return hr;
+}
+
+
+STDMETHODIMP CLRServicesImpl::FinalizerThreadWait()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ FinalizerThread::FinalizerThreadWait();
+ }
+ END_EXTERNAL_ENTRYPOINT;
+ return hr;
+}
+
+//
+// Creates a proxy object that points to the given RCW
+// The proxy
+// 1. Has a managed reference pointing to the RCW, and therefore forms a cycle that can be resolved by GC
+// 2. Forwards data binding requests
+// For example:
+//
+// Grid <---- RCW Grid <------RCW
+// | ^ | ^
+// | | Becomes | |
+// v | v |
+// Rectangle Rectangle ----->Proxy
+//
+// Arguments
+// pTarget - The identity IUnknown* where a RCW points to (Grid, in this case)
+// Note that
+// 1) we can either create a new RCW or get back an old one from cache
+// 2) This pTarget could be a regular WinRT object (such as WinRT collection) for data binding
+// ppNewReference - The ICCW* for the proxy created
+// Jupiter will call ICCW to establish a jupiter reference
+//
+STDMETHODIMP CLRServicesImpl::CreateManagedReference(IUnknown *pTarget, ICCW **ppNewReference)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pTarget));
+ PRECONDITION(CheckPointer(ppNewReference));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ //
+ // QI for IUnknown to get the identity unknown
+ //
+ SafeComHolderPreemp<IUnknown> pIdentity;
+ IfFailThrow(SafeQueryInterfacePreemp(pTarget, IID_IUnknown, &pIdentity));
+
+ //
+ // Get RCW for pJupiterObject
+ //
+ COMInterfaceMarshaler marshaler;
+ marshaler.Init(
+ pIdentity,
+ g_pBaseCOMObject,
+ GET_THREAD(),
+ RCW::CF_SupportsIInspectable // Returns a WinRT RCW
+ );
+
+ //
+ // Then create a proxy based on the RCW
+ //
+ {
+ GCX_COOP();
+
+ struct _gc {
+ OBJECTREF TargetObj;
+ OBJECTREF RetVal;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.TargetObj = marshaler.FindOrCreateObjectRef(&pTarget);
+
+ //
+ // Figure out the right IVector<T1>/IVectorView<T2>
+ //
+ MethodTable *pMT = gc.TargetObj->GetTrueMethodTable();
+
+ TypeHandle thArgs[2];
+
+ //
+ // This RCW could be strongly typed - figure out T1/T2 using metadata
+ //
+ MethodTable::InterfaceMapIterator it = pMT->IterateInterfaceMap();
+ while (it.Next())
+ {
+ MethodTable *pItfMT = it.GetInterface();
+ if (thArgs[0].IsNull() && pItfMT->HasSameTypeDefAs(MscorlibBinder::GetClass(CLASS__ILISTGENERIC)))
+ {
+ thArgs[0] = pItfMT->GetInstantiation()[0];
+
+ // Are we done?
+ if (!thArgs[1].IsNull())
+ break;
+ }
+
+ if (thArgs[1].IsNull() && pItfMT->HasSameTypeDefAs(MscorlibBinder::GetClass(CLASS__IREADONLYLISTGENERIC)))
+ {
+ thArgs[1] = pItfMT->GetInstantiation()[0];
+
+ // Are we done?
+ if (!thArgs[0].IsNull())
+ break;
+ }
+ }
+
+ if (thArgs[0].IsNull() || thArgs[1].IsNull())
+ {
+ //
+ // Try the RCW cache if didn't find match for both types and this is a RCW
+ //
+ if (pMT->IsComObjectType())
+ {
+ RCWHolder pRCW(GET_THREAD());
+ pRCW.Init(gc.TargetObj);
+
+ RCW::CachedInterfaceEntryIterator it = pRCW->IterateCachedInterfacePointers();
+ while (it.Next())
+ {
+ MethodTable *pItfMT = (MethodTable *)it.GetEntry()->m_pMT;
+
+ // Unfortunately the iterator could return NULL entry
+ if (pItfMT == NULL) continue;
+
+ if (thArgs[0].IsNull() && pItfMT->HasSameTypeDefAs(MscorlibBinder::GetClass(CLASS__ILISTGENERIC)))
+ {
+ thArgs[0] = pItfMT->GetInstantiation()[0];
+
+ // Are we done?
+ if (!thArgs[1].IsNull())
+ break;
+ }
+
+ if (thArgs[1].IsNull() && pItfMT->HasSameTypeDefAs(MscorlibBinder::GetClass(CLASS__IREADONLYLISTGENERIC)))
+ {
+ thArgs[1] = pItfMT->GetInstantiation()[0];
+
+ // Are we done?
+ if (!thArgs[0].IsNull())
+ break;
+ }
+ }
+ }
+ }
+
+ //
+ // If not found, use object (IInspectable*) as the last resort
+ //
+ if (thArgs[0].IsNull())
+ thArgs[0] = TypeHandle(g_pObjectClass);
+ if (thArgs[1].IsNull())
+ thArgs[1] = TypeHandle(g_pObjectClass);
+
+ //
+ // Instantiate ICustomPropertyProviderProxy<T1, T2>.CreateInstance
+ //
+ TypeHandle thCustomPropertyProviderProxy = TypeHandle(MscorlibBinder::GetClass(CLASS__ICUSTOMPROPERTYPROVIDERPROXY));
+
+ MethodTable *pthCustomPropertyProviderProxyExactMT = thCustomPropertyProviderProxy.Instantiate(Instantiation(thArgs, 2)).GetMethodTable();
+
+ MethodDesc *pCreateInstanceMD = MethodDesc::FindOrCreateAssociatedMethodDesc(
+ MscorlibBinder::GetMethod(METHOD__ICUSTOMPROPERTYPROVIDERPROXY__CREATE_INSTANCE),
+ pthCustomPropertyProviderProxyExactMT,
+ FALSE,
+ Instantiation(),
+ FALSE);
+
+ //
+ // Call ICustomPropertyProviderProxy.CreateInstance
+ //
+ PREPARE_NONVIRTUAL_CALLSITE_USING_METHODDESC(pCreateInstanceMD);
+ DECLARE_ARGHOLDER_ARRAY(args, 1);
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(gc.TargetObj);
+
+ CALL_MANAGED_METHOD_RETREF(gc.RetVal, OBJECTREF, args);
+
+ CCWHolder pCCWHold = ComCallWrapper::InlineGetWrapper(&gc.RetVal);
+ *ppNewReference = (ICCW *)ComCallWrapper::GetComIPFromCCW(pCCWHold, IID_ICCW, /* pIntfMT = */ NULL);
+ GCPROTECT_END();
+ }
+ }
+ END_EXTERNAL_ENTRYPOINT;
+ return hr;
+}
+
+//
+// Called when Jupiter RCW is being created
+// We do one-time initialization for RCWWalker related stuff here
+// This could throw
+//
+void RCWWalker::OnJupiterRCWCreated(RCW *pRCW, IJupiterObject *pJupiterObject)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pRCW));
+ PRECONDITION(CheckPointer(pJupiterObject));
+ }
+ CONTRACTL_END;
+
+ LOG((LF_INTEROP, LL_INFO100, "[RCW Walker] ----- RCWWalker::OnJupiterRCWCreated (RCW = 0x%p) BEGINS -----\n", pRCW));
+
+ //
+ // Retrieve IJupiterGCManager
+ //
+ if (!s_pGCManager)
+ {
+ SafeComHolderPreemp<IJupiterGCManager> pGCManager;
+ HRESULT hr = pJupiterObject->GetJupiterGCManager(&pGCManager);
+ if (SUCCEEDED(hr))
+ {
+ if (pGCManager == NULL)
+ {
+ LOG((LF_INTEROP, LL_INFO100, "\t[RCW Walker] ERROR: Failed to Retrieve IGCManager, IGCManager = NULL\n"));
+ COMPlusThrowHR(E_POINTER);
+ }
+
+ //
+ // Perform all operation that could fail here
+ //
+ NewHolder<CLRServicesImpl> pCLRServicesImpl = new CLRServicesImpl();
+ ReleaseHolder<ICLRServices> pCLRServices;
+ IfFailThrow(pCLRServicesImpl->QueryInterface(IID_ICLRServices, (void **)&pCLRServices));
+
+ // Temporarily switch back to coop and disable GC to avoid racing with the very first RCW walk
+ GCX_COOP();
+ GCX_FORBID();
+
+ if (FastInterlockCompareExchangePointer((IJupiterGCManager **)&s_pGCManager, (IJupiterGCManager *)pGCManager, NULL) == NULL)
+ {
+ //
+ // OK. It is time to do our RCWWalker initialization
+ // It's safe to do it here because we are in COOP and only one thread wins the race
+ //
+ LOG((LF_INTEROP, LL_INFO100, "\t[RCW Walker] Assigning RCWWalker::s_pIGCManager = 0x%p\n", (void *)pGCManager));
+
+ pGCManager.SuppressRelease();
+ pCLRServicesImpl.SuppressRelease();
+ pCLRServices.SuppressRelease();
+
+ LOG((LF_INTEROP, LL_INFO100, "\t[RCW Walker] Calling IGCManager::SetCLRServices(0x%p)\n", (void *)pCLRServices));
+ pGCManager->SetCLRServices(pCLRServices);
+ }
+ }
+ else
+ {
+ LOG((LF_INTEROP, LL_INFO100, "\t[RCW Walker] ERROR: Failed to Retrieve IGCManager, hr = 0x%x\n", hr));
+ COMPlusThrowHR(hr);
+ }
+ }
+
+ LOG((LF_INTEROP, LL_INFO100, "[RCW Walker] ----- RCWWalker::OnJupiterRCWCreated (RCW = 0x%p) ENDS ----- \n", pRCW));
+}
+
+//
+// Called after Jupiter RCW has been created
+// This should never throw
+//
+void RCWWalker::AfterJupiterRCWCreated(RCW *pRCW)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pRCW));
+ PRECONDITION(pRCW->IsJupiterObject());
+ }
+ CONTRACTL_END;
+
+ LOG((LF_INTEROP, LL_INFO100, "[RCW Walker] ----- RCWWalker::AfterJupiterRCWCreated (RCW = 0x%p) BEGINS ----- \n", pRCW));
+
+ IJupiterObject *pJupiterObject = pRCW->GetJupiterObject();
+
+ //
+ // Notify Jupiter that we've created a new RCW for this Jupiter object
+ // To avoid surprises, we should notify them before we fire the first AfterAddRef
+ //
+ STRESS_LOG2(LF_INTEROP, LL_INFO100, "[RCW Walker] Calling IJupiterObject::Connect (IJupiterObject = 0x%p, RCW = 0x%p)\n", pJupiterObject, pRCW);
+ pJupiterObject->Connect();
+
+ //
+ // Send out AfterAddRef callbacks to notify Jupiter we've done AddRef for certain interfaces
+ // We should do this *after* we made a AddRef because we should never
+ // be in a state where report refs > actual refs
+ //
+
+ // Send out AfterAddRef for cached IUnknown
+ RCWWalker::AfterInterfaceAddRef(pRCW);
+
+ if (!pRCW->IsURTAggregated())
+ {
+ // Send out AfterAddRef for cached IJupiterObject
+ RCWWalker::AfterInterfaceAddRef(pRCW);
+ }
+
+ LOG((LF_INTEROP, LL_INFO100, "[RCW Walker] ----- RCWWalker::AfterJupiterRCWCreated (RCW = 0x%p) ENDS ----- \n", pRCW));
+}
+
+//
+// Called before Jupiter RCW is about to be destroyed (the same lifetime as short weak handle)
+//
+void RCWWalker::BeforeJupiterRCWDestroyed(RCW *pRCW)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pRCW));
+ PRECONDITION(pRCW->IsJupiterObject());
+ }
+ CONTRACTL_END;
+
+ LOG((LF_INTEROP, LL_INFO100, "[RCW Walker] ----- RCWWalker::BeforeJupiterRCWDestroyed (RCW = 0x%p) BEGINS ----- \n", pRCW));
+
+ IJupiterObject *pJupiterObject = pRCW->GetJupiterObject();
+ _ASSERTE(pJupiterObject != NULL);
+
+ //
+ // Notify Jupiter that we are about to destroy a RCW (same timing as short weak handle)
+ // for this Jupiter object.
+ // They need this information to disconnect weak refs and stop firing events,
+ // so that they can avoid resurrecting the Jupiter object (not the RCW - we prevent that)
+ // We only call this inside GC, so don't need to switch to preemptive here
+ // Ignore the failure as there is no way we can handle that failure during GC
+ //
+ STRESS_LOG2(LF_INTEROP, LL_INFO100, "[RCW Walker] Calling IJupiterObject::Disconnect (IJupiterObject = 0x%p, RCW = 0x%p)\n", pJupiterObject, pRCW);
+ pJupiterObject->Disconnect();
+
+ LOG((LF_INTEROP, LL_INFO100, "[RCW Walker] ----- RCWWalker::BeforeJupiterRCWDestroyed (RCW = 0x%p) ENDS ----- \n", pRCW));
+}
+
+//
+// Cleanup stuff when EE is about to unload
+//
+void RCWWalker::OnEEShutdown()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (s_pGCManager)
+ {
+ LOG((LF_INTEROP, LL_INFO100, "[RCW Walker] Releasing RCWWalker::s_pIGCManager 0x%p\n", s_pGCManager));
+
+ // Make sure s_pGCManager is always either NULL or a valid IJupiterGCManager *
+ // this will make crash easier to diagnose
+ IJupiterGCManager *pGCManager = FastInterlockExchangePointer((IJupiterGCManager **)&s_pGCManager, NULL);
+ if (pGCManager != NULL)
+ pGCManager->Release();
+ }
+}
+
+//
+// Walk all the jupiter RCWs in all AppDomains and build references from RCW -> CCW as we go
+//
+void RCWWalker::WalkRCWs()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ BOOL bWalkFailed = FALSE;
+
+ //
+ // Walk every AppDomain
+ // Use UnsafeAppDomain iterator to avoid taking locks
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ UnsafeAppDomainIterator appDomainIterator(TRUE);
+ appDomainIterator.Init();
+ while (appDomainIterator.Next())
+ {
+ AppDomain *pDomain = appDomainIterator.GetDomain();
+
+ RCWRefCache *pRCWRefCache = pDomain->GetRCWRefCache();
+ _ASSERTE(pRCWRefCache != NULL);
+
+ STRESS_LOG2(LF_INTEROP, LL_INFO100, "[RCW Walker] Walking all Jupiter RCWs in AppDomain 0x%p, RCWRefCache 0x%p\n", pDomain, pRCWRefCache);
+
+ //
+ // Reset the cache
+ //
+ pRCWRefCache->ResetDependentHandles();
+
+ //
+ // Enumerate all Jupiter RCWs in that AppDomain
+ //
+ hr = pRCWRefCache->EnumerateAllJupiterRCWs(RCWWalker::WalkOneRCW, pRCWRefCache);
+
+ //
+ // Shrink the dependent handle cache if necessary and clear unused handles.
+ //
+ pRCWRefCache->ShrinkDependentHandles();
+
+ if (FAILED(hr))
+ {
+ break;
+ }
+ }
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(RethrowCorruptingExceptions) // Make sure we crash on AV (instead of swallowing everything)
+
+ if (FAILED(hr))
+ {
+ // Remember the fact that we've failed and stop walking
+ STRESS_LOG1(LF_INTEROP, LL_INFO100, "[RCW Walker] RCW walk failed, hr = 0x%p\n", hr);
+ bWalkFailed = TRUE;
+
+ STRESS_LOG0(LF_INTEROP, LL_INFO100, "[RCW Walker] Turning on global pegging flag as fail-safe\n");
+ VolatileStore(&s_bIsGlobalPeggingOn, TRUE);
+ }
+
+ //
+ // Let Jupiter know RCW walk is done and they need to:
+ // 1. Unpeg all CCWs if the CCW needs to be unpegged (when the CCW is only reachable by other jupiter RCWs)
+ // 2. Peg all CCWs if the CCW needs to be pegged (when the above condition is not true)
+ // 3. Unlock reference cache when they are done
+ //
+ // If the walk has failed - Jupiter doesn't need to do anything and could just return immediately
+ //
+ // Note: IGCManager should be free-threaded as it will be called on arbitary threads
+ //
+ LOG((LF_INTEROP, LL_INFO100, "[RCW Walker] Calling IGCManager::OnRCWWalkFinished on 0x%p, bWalkFailed = %d\n", s_pGCManager, bWalkFailed));
+ _ASSERTE(s_pGCManager);
+ s_pGCManager->OnRCWWalkFinished(bWalkFailed);
+
+ STRESS_LOG0 (LF_INTEROP, LL_INFO100, "[RCW Walker] RCW Walk finished\n");
+}
+
+//
+// Callback implementation of IFindDependentWrappersCallback
+//
+class CFindDependentWrappersCallback : public IFindDependentWrappersCallback
+{
+public :
+ CFindDependentWrappersCallback(RCW *pRCW, RCWRefCache*pRCWRefCache)
+ :m_pRCW(pRCW), m_pRCWRefCache(pRCWRefCache)
+ {
+#ifdef _DEBUG
+ m_hr = S_OK;
+ m_dwCreatedRefs = 0;
+#endif // _DEBUG
+ }
+
+ STDMETHOD_(ULONG, AddRef)()
+ {
+
+ // Lifetime maintained by stack - we don't care about ref counts
+ return 1;
+ }
+
+ STDMETHOD_(ULONG, Release)()
+ {
+ // Lifetime maintained by stack - we don't care about ref counts
+ return 1;
+ }
+
+ STDMETHOD(QueryInterface)(REFIID riid, void **ppvObject)
+ {
+ if (IsEqualIID(riid, IID_IUnknown) || IsEqualIID(riid, IID_IFindDependentWrappersCallback))
+ {
+ *ppvObject = this;
+ return S_OK;
+ }
+ else
+ {
+ *ppvObject = NULL;
+ return E_NOINTERFACE;
+ }
+ }
+
+
+ STDMETHOD(OnFoundDependentWrapper)(ICCW *pUnk)
+ {
+#ifdef _DEBUG
+ _ASSERTE(SUCCEEDED(m_hr) && W("Should not receive OnFoundDependentWrapper again if failed"));
+#endif // _DEBUG
+ _ASSERTE(pUnk != NULL);
+
+ ComCallWrapper *pCCW = MapIUnknownToWrapper(pUnk);
+ _ASSERTE(pCCW != NULL);
+
+ LOG((LF_INTEROP, LL_INFO1000, "\t[RCW Walker] IFindDependentWrappersCallback::OnFoundDependentWrapper being called: RCW 0x%p, CCW 0x%p\n", m_pRCW, pCCW));
+
+ //
+ // Skip dependent handle creation if RCW/CCW points to the same managed object
+ //
+ if (m_pRCW->GetSyncBlock() == pCCW->GetSyncBlock())
+ return S_OK;
+
+ //
+ // Jupiter might return CCWs with outstanding references that are either :
+ // 1. Neutered - in this case it is unsafe to touch m_ppThis
+ // 2. RefCounted handle NULLed out by GC
+ //
+ // Skip those to avoid crashes
+ //
+ if (pCCW->GetSimpleWrapper()->IsNeutered() ||
+ pCCW->GetObjectRef() == NULL)
+ return S_OK;
+
+ //
+ // Add a reference from pRCW -> pCCW so that GC knows about this reference
+ //
+ STRESS_LOG4(
+ LF_INTEROP, LL_INFO1000,
+ "\t[RCW Walker] Adding reference: RCW 0x%p (Managed Object = 0x%p) -> CCW 0x%p (Managed Object = 0x%p)\n",
+ m_pRCW, OBJECTREFToObject(m_pRCW->GetExposedObject()), pCCW, OBJECTREFToObject(pCCW->GetObjectRef())
+ );
+
+ HRESULT hr = m_pRCWRefCache->AddReferenceFromRCWToCCW(m_pRCW, pCCW);
+
+#ifdef _DEBUG
+ m_dwCreatedRefs++;
+#endif // _DEBUG
+
+ if (FAILED(hr))
+ {
+#ifdef _DEBUG
+ m_hr = hr;
+#endif // _DEBUG
+ STRESS_LOG1(LF_INTEROP, LL_INFO1000, "[RCW Walker] Adding reference failed, hr = 0x%x", hr);
+
+ return E_FAIL;
+ }
+
+ return S_OK;
+ }
+
+#ifdef _DEBUG
+ HRESULT GetHRESULT()
+ {
+
+ return m_hr;
+ }
+
+ DWORD GetCreatedRefs()
+ {
+
+ return m_dwCreatedRefs;
+ }
+#endif // _DEBUG
+
+private :
+ RCW *m_pRCW;
+ RCWRefCache *m_pRCWRefCache;
+
+#ifdef _DEBUG
+ HRESULT m_hr; // Holds the last failed HRESULT to make sure our contract with Jupiter is correctly honored
+ DWORD m_dwCreatedRefs; // Total number of refs created from this RCW
+#endif // _DEBUG
+};
+
+//
+// Ask Jupiter all the CCWs referenced (through native code) by this RCW and build reference for RCW -> CCW
+// so that GC knows about this reference
+//
+HRESULT RCWWalker::WalkOneRCW(RCW *pRCW, RCWRefCache *pRCWRefCache)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pRCW));
+ }
+ CONTRACTL_END;
+
+ LOG((LF_INTEROP, LL_INFO1000, "\t[RCW Walker] ----- RCWWalker::WalkOneRCW (RCW = 0x%p) BEGINS ----- \n", pRCW));
+
+ _ASSERTE(pRCW->IsJupiterObject());
+
+ HRESULT hr = S_OK;
+
+ // Get IJupiterObject * from RCW - we can call IJupiterObject* from any thread and it won't be a proxy
+ IJupiterObject *pJupiterObject = pRCW->GetJupiterObject();
+ _ASSERTE(pJupiterObject);
+
+ _ASSERTE(pRCW->GetExposedObject() != NULL);
+
+ CFindDependentWrappersCallback callback(pRCW, pRCWRefCache);
+
+ STRESS_LOG2 (LF_INTEROP, LL_INFO1000, "\t[RCW Walker] Walking RCW 0x%p (Managed Object = 0x%p)\n", pRCW, OBJECTREFToObject(pRCW->GetExposedObject()));
+
+ LOG((LF_INTEROP, LL_INFO1000, "\t[RCW Walker] Calling IJupiterObject::FindDependentWrappers\n", pRCW));
+ hr = pJupiterObject->FindDependentWrappers(&callback);
+
+#ifdef _DEBUG
+ if (FAILED(callback.GetHRESULT()))
+ {
+ _ASSERTE(callback.GetHRESULT() == hr && W("FindDepedentWrappers should return the failed result from the callback method OnFoundDependentWrapper"));
+ }
+
+ LOG((LF_INTEROP, LL_INFO1000, "\t[RCW Walker] Total %d refs created for RCW 0x%p\n", callback.GetCreatedRefs(), pRCW));
+#endif // _DEBUG
+
+ LOG((LF_INTEROP, LL_INFO1000, "\t[RCW Walker] ----- RCWWalker::WalkOneRCW (RCW = 0x%p) ENDS -----\n", pRCW));
+ return hr;
+}
+
+typedef void (*OnGCEventProc)();
+inline void SetupFailFastFilterAndCall(OnGCEventProc pGCEventProc)
+{
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_MODE_ANY;
+
+ //
+ // Use RCWWalker_UnhandledExceptionFilter to fail fast and early in case any exception is thrown
+ // See code:RCWWalker_UnhandledExceptionFilter for more details why we need this
+ //
+ PAL_TRY_NAKED
+ {
+ // Call the internal worker function which has the runtime contracts
+ pGCEventProc();
+ }
+ PAL_EXCEPT_FILTER_NAKED(RCWWalker_UnhandledExceptionFilter, NULL)
+ {
+ _ASSERT(!W("Should not get here"));
+ }
+ PAL_ENDTRY_NAKED
+}
+
+//
+// Called when GC started
+// We do most of our work here
+//
+// Note that we could get nested GCStart/GCEnd calls, such as :
+// GCStart for Gen 2 background GC
+// GCStart for Gen 0/1 foregorund GC
+// GCEnd for Gen 0/1 foreground GC
+// ....
+// GCEnd for Gen 2 background GC
+//
+// The nCondemnedGeneration >= 2 check takes care of this nesting problem
+//
+void RCWWalker::OnGCStarted(int nCondemnedGeneration)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_INTEROP, LL_INFO100, "[RCW Walker] ----- RCWWalker::OnGCStarted (nCondemnedGeneration = %d) BEGINS ----- \n", nCondemnedGeneration));
+
+ if (RCWWalker::NeedToWalkRCWs()) // Have we seen Jupiter RCWs?
+ {
+ if (nCondemnedGeneration >= 2) // We are only doing walk in Gen2 GC
+ {
+ // Make sure we fail fast if anything goes wrong when we interact with Jupiter
+ SetupFailFastFilterAndCall(RCWWalker::OnGCStartedWorker);
+ }
+ else
+ {
+ LOG((LF_INTEROP, LL_INFO100, "[RCW Walker] GC skipped: Not a Gen2 GC \n"));
+ }
+ }
+ else
+ {
+
+ LOG((LF_INTEROP, LL_INFO100, "[RCW Walker] GC skipped: No Jupiter RCWs seen \n"));
+ }
+
+ LOG((LF_INTEROP, LL_INFO100, "[RCW Walker] ----- RCWWalker::OnGCStarted (nCondemnedGeneration = %d) ENDS -----\n", nCondemnedGeneration));
+}
+
+//
+// Called when GC finished
+//
+// Note that we could get nested GCStart/GCEnd calls, such as :
+// GCStart for Gen 2 background GC
+// GCStart for Gen 0/1 foregorund GC
+// GCEnd for Gen 0/1 foreground GC
+// ....
+// GCEnd for Gen 2 background GC
+//
+// The nCondemnedGeneration >= 2 check takes care of this nesting problem
+//
+void RCWWalker::OnGCFinished(int nCondemnedGeneration)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_INTEROP, LL_INFO100, "[RCW Walker] ----- RCWWalker::OnGCFinished(nCondemnedGeneration = %d) BEGINS ----- \n", nCondemnedGeneration));
+
+ //
+ // Note that we need to check in both OnGCFinished and OnGCStarted
+ // As there could be multiple OnGCFinished with nCondemnedGeneration < 2 in the case of Gen 2 GC
+ //
+ // Also, if this is background GC, the NeedToWalkRCWs predicate may change from FALSE to TRUE while
+ // the GC is running. We don't want to do any work if it's the case (i.e. if s_bGCStarted is FALSE).
+ //
+ if (RCWWalker::NeedToWalkRCWs() && // Have we seen Jupiter RCWs?
+ s_bGCStarted && // Had we seen Jupiter RCWs when the GC started?
+ nCondemnedGeneration >= 2 // We are only doing walk in Gen2 GC
+ )
+ {
+ // Make sure we fail fast if anything goes wrong when we interact with Jupiter
+ SetupFailFastFilterAndCall(RCWWalker::OnGCFinishedWorker);
+ }
+
+ LOG((LF_INTEROP, LL_INFO100, "[RCW Walker] ----- RCWWalker::OnGCFinished(nCondemnedGeneration = %d) ENDS ----- \n", nCondemnedGeneration));
+}
+
+void RCWWalker::OnGCStartedWorker()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ STRESS_LOG0 (LF_INTEROP, LL_INFO100, "[RCW Walker] Gen 2 GC Started - Ready to walk Jupiter RCWs\n");
+
+ // Due to the nesting GCStart/GCEnd pairs (see comment for this function), we need to check
+ // those flags inside nCondemnedGeneration >= 2 check
+ _ASSERTE(!s_bGCStarted);
+ _ASSERTE(VolatileLoad(&s_bIsGlobalPeggingOn));
+
+ s_bGCStarted = TRUE;
+
+ _ASSERTE(s_pGCManager);
+
+ //
+ // Let Jupiter know we are about to walk RCWs so that they can lock their reference cache
+ // Note that Jupiter doesn't need to unpeg all CCWs at this point and they can do the pegging/unpegging in OnRCWWalkFinished
+ //
+ // Note: IGCManager should be free-threaded as it will be called on arbitary threads
+ //
+ LOG((LF_INTEROP, LL_INFO100, "[RCW Walker] Calling IGCManager::OnGCStarted on 0x%p\n", s_pGCManager));
+ s_pGCManager->OnGCStarted();
+
+ // From this point, jupiter decides whether a CCW should be pegged or not as global pegging flag is now off
+ s_bIsGlobalPeggingOn = FALSE;
+ LOG((LF_INTEROP, LL_INFO100, "[RCW Walker] Global pegging flag is off\n"));
+
+ //
+ // OK. Time to walk all the Jupiter RCWs
+ //
+ WalkRCWs();
+}
+
+void RCWWalker::OnGCFinishedWorker()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ //
+ // Let Jupiter know RCW walk is done and they need to:
+ // 1. Unpeg all CCWs if the CCW needs to be unpegged (when the CCW is only reachable by other jupiter RCWs)
+ // 2. Peg all CCWs if the CCW needs to be pegged (when the above condition is not true)
+ // 3. Unlock reference cache when they are done
+ //
+ // If the walk has failed - Jupiter doesn't need to do anything and could just return immediately
+ //
+ // Note: We can IJupiterGCManager from any thread and it is guaranteed by Jupiter
+ //
+ _ASSERTE(s_pGCManager);
+ LOG((LF_INTEROP, LL_INFO100, "[RCW Walker] Calling IGCManager::OnGCFinished on 0x%p\n", s_pGCManager));
+ s_pGCManager->OnGCFinished();
+
+ s_bIsGlobalPeggingOn = TRUE;
+ LOG((LF_INTEROP, LL_INFO100, "[RCW Walker] Global pegging flag is on\n"));
+
+ s_bGCStarted = FALSE;
+
+ STRESS_LOG0 (LF_INTEROP, LL_INFO100, "[RCW Walker] Gen 2 GC Finished\n");
+}
+
+#endif // DACCESS_COMPILE
diff --git a/src/vm/rcwwalker.h b/src/vm/rcwwalker.h
new file mode 100644
index 0000000000..a2272eafbb
--- /dev/null
+++ b/src/vm/rcwwalker.h
@@ -0,0 +1,151 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+/*============================================================
+**
+** Header: RCWWalker.h
+**
+**
+** Purpose: Solve native/manage cyclic reference issue by
+** walking RCW objects
+**
+==============================================================*/
+
+#ifndef _H_RCWWALKER_
+#define _H_RCWWALKER_
+
+#ifdef FEATURE_COMINTEROP
+
+#include "internalunknownimpl.h"
+#include "utilcode.h"
+#include "runtimecallablewrapper.h"
+
+
+//
+// RCW Walker
+// Walks jupiter RCW objects and create references from RCW to referenced CCW (in native side)
+//
+class RCWWalker
+{
+private :
+ static VolatilePtr<IJupiterGCManager> s_pGCManager; // The one and only GCManager instance
+ static BOOL s_bGCStarted; // Has GC started?
+ SVAL_DECL(BOOL, s_bIsGlobalPeggingOn); // Do we need to peg every CCW?
+
+public :
+#ifndef DACCESS_COMPILE
+ static void OnJupiterRCWCreated(RCW *pRCW, IJupiterObject *pJupiterObject);
+ static void AfterJupiterRCWCreated(RCW *pRCW);
+ static void BeforeJupiterRCWDestroyed(RCW *pRCW);
+ static void OnEEShutdown();
+
+ //
+ // Send out a AfterAddRef callback to notify Jupiter we've done a AddRef
+ // We should do this *after* we made a AddRef because we should never
+ // be in a state where reported refs > actual refs
+ //
+ FORCEINLINE static void AfterInterfaceAddRef(RCW *pRCW)
+ {
+
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ IJupiterObject *pJupiterObject = pRCW->GetJupiterObject();
+ if (pJupiterObject)
+ {
+ STRESS_LOG2(LF_INTEROP, LL_INFO100, "[RCW Walker] Calling IJupiterObject::AfterAddRef (IJupiterObject = 0x%p, RCW = 0x%p)\n", pJupiterObject, pRCW);
+ pJupiterObject->AfterAddRef();
+ }
+ }
+
+ //
+ // Send out BeforeRelease callback for every cached interface pointer
+ // This needs to be made before call Release because we should never be in a
+ // state that reported refs > actual refs
+ //
+ FORCEINLINE static void BeforeInterfaceRelease(RCW *pRCW)
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ IJupiterObject *pJupiterObject = pRCW->GetJupiterObject();
+ if (pJupiterObject)
+ {
+ STRESS_LOG2(LF_INTEROP, LL_INFO100, "[RCW Walker] Calling IJupiterObject::BeforeRelease before Release (IJupiterObject = 0x%p, RCW = 0x%p)\n", pJupiterObject, pRCW);
+ pJupiterObject->BeforeRelease();
+ }
+ }
+
+
+#endif // !DACCESS_COMPILE
+
+
+public :
+ //
+ // Called in ComCallableWrapper::IsWrapperActive
+ // Used to override the individual pegging flag on CCWs and force pegging every jupiter referenced CCW
+ // See IsWrapperActive for more details
+ //
+ static FORCEINLINE BOOL IsGlobalPeggingOn()
+ {
+ // We need this weird cast because s_bIsGlobalPeggingOn is used in DAC and defined as
+ // __GlobalVal in DAC build
+ // C++'s operator magic didn't work if two levels of operator overloading are involved...
+ return VolatileLoad((BOOL *)&s_bIsGlobalPeggingOn);
+ }
+
+#ifndef DACCESS_COMPILE
+ //
+ // Tells GC whether walking all the Jupiter RCW is necessary, which only should happen
+ // if we have seen jupiter RCWs
+ //
+ static FORCEINLINE BOOL NeedToWalkRCWs()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (((IJupiterGCManager *)s_pGCManager) != NULL);
+ }
+
+ //
+ // Whether a GC has been started and we need to RCW walk
+ //
+ static FORCEINLINE BOOL HasGCStarted()
+ {
+ return s_bGCStarted;
+ }
+
+ //
+ // Called when GC started
+ // We do most of our work here
+ //
+ static void OnGCStarted(int nCondemnedGeneration);
+
+ //
+ // Called when GC finished
+ //
+ static void OnGCFinished(int nCondemnedGeneration);
+
+private :
+ static void OnGCStartedWorker();
+ static void OnGCFinishedWorker();
+ static void WalkRCWs();
+ static HRESULT WalkOneRCW(RCW *pRCW, RCWRefCache *pRCWRefCache);
+#endif // DACCESS_COMPILE
+};
+
+#endif // FEATURE_COMINTEROP
+
+#endif // _H_RCWWALKER_
diff --git a/src/vm/readytoruninfo.cpp b/src/vm/readytoruninfo.cpp
new file mode 100644
index 0000000000..df659582d2
--- /dev/null
+++ b/src/vm/readytoruninfo.cpp
@@ -0,0 +1,317 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: ReadyToRunInfo.cpp
+//
+
+//
+// Runtime support for Ready to Run
+// ===========================================================================
+
+#include "common.h"
+
+#include "dbginterface.h"
+#include "compile.h"
+
+using namespace NativeFormat;
+
+IMAGE_DATA_DIRECTORY * ReadyToRunInfo::FindSection(DWORD type)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ PTR_READYTORUN_SECTION pSections = dac_cast<PTR_READYTORUN_SECTION>(dac_cast<TADDR>(m_pHeader) + sizeof(READYTORUN_HEADER));
+ for (DWORD i = 0; i < m_pHeader->NumberOfSections; i++)
+ {
+ // Verify that section types are sorted
+ _ASSERTE(i == 0 || (pSections[i-1].Type < pSections[i].Type));
+
+ READYTORUN_SECTION * pSection = pSections + i;
+ if (pSection->Type == type)
+ return &pSection->Section;
+ }
+ return NULL;
+}
+
+MethodDesc * ReadyToRunInfo::GetMethodDescForEntryPoint(PCODE entryPoint)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+#ifdef _TARGET_AMD64_
+ // A normal method entry point is always 8 byte aligned, but a funclet can start at an odd address.
+ // Since PtrHashMap can't handle odd pointers, check for this case and return NULL.
+ if ((entryPoint & 0x1) != 0)
+ return NULL;
+#endif
+
+ TADDR val = (TADDR)m_entryPointToMethodDescMap.LookupValue(PCODEToPINSTR(entryPoint), (LPVOID)PCODEToPINSTR(entryPoint));
+ if (val == (TADDR)INVALIDENTRY)
+ return NULL;
+ return dac_cast<PTR_MethodDesc>(val);
+}
+
+PTR_BYTE ReadyToRunInfo::GetDebugInfo(PTR_RUNTIME_FUNCTION pRuntimeFunction)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ THROWS;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ IMAGE_DATA_DIRECTORY * pDebugInfoDir = FindSection(READYTORUN_SECTION_DEBUG_INFO);
+ if (pDebugInfoDir == NULL)
+ return NULL;
+
+ SIZE_T methodIndex = pRuntimeFunction - m_pRuntimeFunctions;
+ _ASSERTE(methodIndex < m_nRuntimeFunctions);
+
+ NativeArray debugInfoIndex(&m_nativeReader, pDebugInfoDir->VirtualAddress);
+
+ uint offset;
+ if (!debugInfoIndex.TryGetAt((DWORD)methodIndex, &offset))
+ return NULL;
+
+ uint lookBack;
+ uint debugInfoOffset = m_nativeReader.DecodeUnsigned(offset, &lookBack);
+
+ if (lookBack != 0)
+ debugInfoOffset = offset - lookBack;
+
+ return dac_cast<PTR_BYTE>(m_pLayout->GetBase()) + debugInfoOffset;
+}
+
+#ifndef DACCESS_COMPILE
+
+BOOL ReadyToRunInfo::IsReadyToRunEnabled()
+{
+ STANDARD_VM_CONTRACT;
+
+ static ConfigDWORD configReadyToRun;
+ return configReadyToRun.val(CLRConfig::EXTERNAL_ReadyToRun);
+}
+
+PTR_ReadyToRunInfo ReadyToRunInfo::Initialize(Module * pModule, AllocMemTracker *pamTracker)
+{
+ STANDARD_VM_CONTRACT;
+
+ PEFile * pFile = pModule->GetFile();
+
+ // Ignore ReadyToRun for introspection-only loads
+ if (pFile->IsIntrospectionOnly())
+ return NULL;
+
+ if (!pFile->HasLoadedIL())
+ return NULL;
+
+ PEImageLayout * pLayout = pFile->GetLoadedIL();
+ if (!pLayout->HasReadyToRunHeader())
+ return NULL;
+
+ if (!IsReadyToRunEnabled())
+ return NULL;
+
+ if (!pLayout->IsNativeMachineFormat())
+ {
+#ifdef FEATURE_CORECLR
+ // For CoreCLR, be strict about disallowing machine mismatches.
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+#else
+ return NULL;
+#endif
+ }
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+ // Ignore ReadyToRun during NGen
+ if (IsCompilationProcess() && !IsNgenPDBCompilationProcess())
+ return NULL;
+#endif
+
+#ifndef CROSSGEN_COMPILE
+ // The file must have been loaded using LoadLibrary
+ if (!pLayout->IsRelocated())
+ return NULL;
+#endif
+
+ READYTORUN_HEADER * pHeader = pLayout->GetReadyToRunHeader();
+
+ // Ignore the content if the image major version is higher than the major version currently supported by the runtime
+ if (pHeader->MajorVersion > READYTORUN_MAJOR_VERSION)
+ return NULL;
+
+ LoaderHeap *pHeap = pModule->GetLoaderAllocator()->GetHighFrequencyHeap();
+ void * pMemory = pamTracker->Track(pHeap->AllocMem((S_SIZE_T)sizeof(ReadyToRunInfo)));
+
+ return new (pMemory) ReadyToRunInfo(pModule, pLayout, pHeader);
+}
+
+ReadyToRunInfo::ReadyToRunInfo(Module * pModule, PEImageLayout * pLayout, READYTORUN_HEADER * pHeader)
+ : m_pModule(pModule), m_pLayout(pLayout), m_pHeader(pHeader), m_Crst(CrstLeafLock)
+{
+ STANDARD_VM_CONTRACT;
+
+ IMAGE_DATA_DIRECTORY * pRuntimeFunctionsDir = FindSection(READYTORUN_SECTION_RUNTIME_FUNCTIONS);
+ if (pRuntimeFunctionsDir != NULL)
+ {
+ m_pRuntimeFunctions = (RUNTIME_FUNCTION *)pLayout->GetDirectoryData(pRuntimeFunctionsDir);
+ m_nRuntimeFunctions = pRuntimeFunctionsDir->Size / sizeof(RUNTIME_FUNCTION);
+ }
+ else
+ {
+ m_nRuntimeFunctions = 0;
+ }
+
+ IMAGE_DATA_DIRECTORY * pImportSectionsDir = FindSection(READYTORUN_SECTION_IMPORT_SECTIONS);
+ if (pImportSectionsDir != NULL)
+ {
+ m_pImportSections = (CORCOMPILE_IMPORT_SECTION*)pLayout->GetDirectoryData(pImportSectionsDir);
+ m_nImportSections = pImportSectionsDir->Size / sizeof(CORCOMPILE_IMPORT_SECTION);
+ }
+ else
+ {
+ m_nImportSections = 0;
+ }
+
+ m_nativeReader = NativeReader((byte *)pLayout->GetBase(), pLayout->GetVirtualSize());
+
+ IMAGE_DATA_DIRECTORY * pEntryPointsDir = FindSection(READYTORUN_SECTION_METHODDEF_ENTRYPOINTS);
+ if (pEntryPointsDir != NULL)
+ {
+ m_methodDefEntryPoints = NativeArray(&m_nativeReader, pEntryPointsDir->VirtualAddress);
+ }
+
+ {
+ LockOwner lock = {&m_Crst, IsOwnerOfCrst};
+ m_entryPointToMethodDescMap.Init(TRUE, &lock);
+ }
+}
+
+PCODE ReadyToRunInfo::GetEntryPoint(MethodDesc * pMD, BOOL fFixups /*=TRUE*/)
+{
+ STANDARD_VM_CONTRACT;
+
+ // READYTORUN: FUTURE: Support for generics
+ if (pMD->HasClassOrMethodInstantiation())
+ return NULL;
+
+ mdToken token = pMD->GetMemberDef();
+ int rid = RidFromToken(token);
+ if (rid == 0)
+ return NULL;
+
+ uint offset;
+ if (!m_methodDefEntryPoints.TryGetAt(rid - 1, &offset))
+ return NULL;
+
+ uint id;
+ offset = m_nativeReader.DecodeUnsigned(offset, &id);
+
+ if (id & 1)
+ {
+ if (id & 2)
+ {
+ uint val;
+ m_nativeReader.DecodeUnsigned(offset, &val);
+ offset -= val;
+ }
+
+ if (fFixups)
+ {
+ if (!m_pModule->FixupDelayList(dac_cast<TADDR>(m_pLayout->GetBase()) + offset))
+ return NULL;
+ }
+
+ id >>= 2;
+ }
+ else
+ {
+ id >>= 1;
+ }
+
+ _ASSERTE(id < m_nRuntimeFunctions);
+ PCODE pEntryPoint = dac_cast<TADDR>(m_pLayout->GetBase()) + m_pRuntimeFunctions[id].BeginAddress;
+
+ {
+ CrstHolder ch(&m_Crst);
+
+ if (m_entryPointToMethodDescMap.LookupValue(PCODEToPINSTR(pEntryPoint), (LPVOID)PCODEToPINSTR(pEntryPoint)) == (LPVOID)INVALIDENTRY)
+ m_entryPointToMethodDescMap.InsertValue(PCODEToPINSTR(pEntryPoint), pMD);
+ }
+
+ if (g_pDebugInterface != NULL)
+ {
+ g_pDebugInterface->JITComplete(pMD, pEntryPoint);
+ }
+
+ return pEntryPoint;
+}
+
+BOOL ReadyToRunInfo::MethodIterator::Next()
+{
+ STANDARD_VM_CONTRACT;
+
+ while (++m_methodDefIndex < (int)m_pInfo->m_methodDefEntryPoints.GetCount())
+ {
+ uint offset;
+ if (m_pInfo->m_methodDefEntryPoints.TryGetAt(m_methodDefIndex, &offset))
+ return TRUE;
+ }
+ return FALSE;
+}
+
+MethodDesc * ReadyToRunInfo::MethodIterator::GetMethodDesc()
+{
+ STANDARD_VM_CONTRACT;
+
+ return MemberLoader::GetMethodDescFromMethodDef(m_pInfo->m_pModule, mdtMethodDef | (m_methodDefIndex + 1), FALSE);
+}
+
+PCODE ReadyToRunInfo::MethodIterator::GetMethodStartAddress()
+{
+ STANDARD_VM_CONTRACT;
+
+ PCODE ret = m_pInfo->GetEntryPoint(GetMethodDesc(), FALSE);
+ _ASSERTE(ret != NULL);
+ return ret;
+}
+
+DWORD ReadyToRunInfo::GetFieldBaseOffset(MethodTable * pMT)
+{
+ STANDARD_VM_CONTRACT;
+
+ DWORD dwAlignment = DATA_ALIGNMENT;
+ DWORD dwOffsetBias = 0;
+#ifdef FEATURE_64BIT_ALIGNMENT
+ dwOffsetBias = 4;
+ if (pMT->RequiresAlign8())
+ dwAlignment = 8;
+#endif
+
+ MethodTable * pParentMT = pMT->GetParentMethodTable();
+ DWORD dwCumulativeInstanceFieldPos = (pParentMT != NULL) ? pParentMT->GetNumInstanceFieldBytes() : 0;
+
+ dwCumulativeInstanceFieldPos += dwOffsetBias;
+
+ dwCumulativeInstanceFieldPos = (DWORD)ALIGN_UP(dwCumulativeInstanceFieldPos, dwAlignment);
+
+ return (DWORD)sizeof(Object) + dwCumulativeInstanceFieldPos - dwOffsetBias;
+}
+
+#endif // DACCESS_COMPILE
diff --git a/src/vm/readytoruninfo.h b/src/vm/readytoruninfo.h
new file mode 100644
index 0000000000..8ddebcb735
--- /dev/null
+++ b/src/vm/readytoruninfo.h
@@ -0,0 +1,131 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: ReadyToRunInfo.h
+//
+
+//
+// Runtime support for Ready to Run
+// ===========================================================================
+
+#ifndef _READYTORUNINFO_H_
+#define _READYTORUNINFO_H_
+
+#include "nativeformatreader.h"
+
+typedef DPTR(struct READYTORUN_SECTION) PTR_READYTORUN_SECTION;
+
+typedef DPTR(class ReadyToRunInfo) PTR_ReadyToRunInfo;
+class ReadyToRunInfo
+{
+ friend class ReadyToRunJitManager;
+
+ PTR_Module m_pModule;
+
+ PTR_PEImageLayout m_pLayout;
+ PTR_READYTORUN_HEADER m_pHeader;
+
+ PTR_RUNTIME_FUNCTION m_pRuntimeFunctions;
+ DWORD m_nRuntimeFunctions;
+
+ PTR_CORCOMPILE_IMPORT_SECTION m_pImportSections;
+ DWORD m_nImportSections;
+
+ NativeFormat::NativeReader m_nativeReader;
+ NativeFormat::NativeArray m_methodDefEntryPoints;
+
+ Crst m_Crst;
+ PtrHashMap m_entryPointToMethodDescMap;
+
+ ReadyToRunInfo(Module * pModule, PEImageLayout * pLayout, READYTORUN_HEADER * pHeader);
+
+public:
+ static BOOL IsReadyToRunEnabled();
+
+ static PTR_ReadyToRunInfo Initialize(Module * pModule, AllocMemTracker *pamTracker);
+
+ PCODE GetEntryPoint(MethodDesc * pMD, BOOL fFixups = TRUE);
+
+ MethodDesc * GetMethodDescForEntryPoint(PCODE entryPoint);
+
+ BOOL SkipTypeValidation()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pHeader->Flags & READYTORUN_FLAG_SKIP_TYPE_VALIDATION;
+ }
+
+ PTR_PEImageLayout GetImage()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pLayout;
+ }
+
+ IMAGE_DATA_DIRECTORY * FindSection(DWORD type);
+
+ PTR_CORCOMPILE_IMPORT_SECTION GetImportSections(COUNT_T * pCount)
+ {
+ LIMITED_METHOD_CONTRACT;
+ *pCount = m_nImportSections;
+ return m_pImportSections;
+ }
+
+ PTR_CORCOMPILE_IMPORT_SECTION GetImportSectionFromIndex(COUNT_T index)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(index < m_nImportSections);
+ return m_pImportSections + index;
+ }
+
+ PTR_CORCOMPILE_IMPORT_SECTION GetImportSectionForRVA(RVA rva)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ PTR_CORCOMPILE_IMPORT_SECTION pEnd = m_pImportSections + m_nImportSections;
+ for (PTR_CORCOMPILE_IMPORT_SECTION pSection = m_pImportSections; pSection < pEnd; pSection++)
+ {
+ if (rva >= VAL32(pSection->Section.VirtualAddress) && rva < VAL32(pSection->Section.VirtualAddress) + VAL32(pSection->Section.Size))
+ return pSection;
+ }
+
+ return NULL;
+ }
+
+ PTR_BYTE GetDebugInfo(PTR_RUNTIME_FUNCTION pRuntimeFunction);
+
+ class MethodIterator
+ {
+ ReadyToRunInfo * m_pInfo;
+ int m_methodDefIndex;
+
+ public:
+ MethodIterator(ReadyToRunInfo * pInfo)
+ : m_pInfo(pInfo), m_methodDefIndex(-1)
+ {
+ }
+
+ BOOL Next();
+
+ MethodDesc * GetMethodDesc();
+ PCODE GetMethodStartAddress();
+ };
+
+ static DWORD GetFieldBaseOffset(MethodTable * pMT);
+};
+
+class DynamicHelpers
+{
+public:
+ static PCODE CreateHelper(LoaderAllocator * pAllocator, TADDR arg, PCODE target);
+ static PCODE CreateHelperWithArg(LoaderAllocator * pAllocator, TADDR arg, PCODE target);
+ static PCODE CreateHelper(LoaderAllocator * pAllocator, TADDR arg, TADDR arg2, PCODE target);
+ static PCODE CreateHelperArgMove(LoaderAllocator * pAllocator, TADDR arg, PCODE target);
+ static PCODE CreateReturn(LoaderAllocator * pAllocator);
+ static PCODE CreateReturnConst(LoaderAllocator * pAllocator, TADDR arg);
+ static PCODE CreateReturnIndirConst(LoaderAllocator * pAllocator, TADDR arg, INT8 offset);
+ static PCODE CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADDR arg, PCODE target);
+ static PCODE CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADDR arg, TADDR arg2, PCODE target);
+};
+
+#endif // _READYTORUNINFO_H_
diff --git a/src/vm/reflectclasswriter.cpp b/src/vm/reflectclasswriter.cpp
new file mode 100644
index 0000000000..b4cc48af47
--- /dev/null
+++ b/src/vm/reflectclasswriter.cpp
@@ -0,0 +1,248 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+#include "common.h"
+#include "reflectclasswriter.h"
+
+// Forward declaration.
+STDAPI GetMetaDataInternalInterfaceFromPublic(
+ IUnknown *pv, // [IN] Given interface.
+ REFIID riid, // [IN] desired interface
+ void **ppv); // [OUT] returned interface
+
+//******************************************************
+//*
+//* constructor for RefClassWriter
+//*
+//******************************************************
+HRESULT RefClassWriter::Init(ICeeGen *pCeeGen, IUnknown *pUnk, LPCWSTR szName)
+{
+ CONTRACT(HRESULT) {
+ NOTHROW;
+ GC_NOTRIGGER;
+ // we know that the com implementation is ours so we use mode-any to simplify
+ // having to switch mode
+ MODE_ANY;
+ INJECT_FAULT(CONTRACT_RETURN(E_OUTOFMEMORY));
+
+ PRECONDITION(CheckPointer(pCeeGen));
+ PRECONDITION(CheckPointer(pUnk));
+
+ POSTCONDITION(SUCCEEDED(RETVAL) ? CheckPointer(m_emitter) : TRUE);
+ POSTCONDITION(SUCCEEDED(RETVAL) ? CheckPointer(m_importer) : TRUE);
+ POSTCONDITION(SUCCEEDED(RETVAL) ? CheckPointer(m_pEmitHelper) : TRUE);
+ POSTCONDITION(SUCCEEDED(RETVAL) ? CheckPointer(m_internalimport) : TRUE);
+ }
+ CONTRACT_END;
+
+ // Initialize the Import and Emitter interfaces
+ m_emitter = NULL;
+ m_importer = NULL;
+ m_internalimport = NULL;
+ m_pCeeFileGen = NULL;
+ m_ceeFile = NULL;
+ m_ulResourceSize = 0;
+ m_tkFile = mdFileNil;
+
+ m_pCeeGen = pCeeGen;
+ pCeeGen->AddRef();
+
+ // Get the interfaces
+ HRESULT hr = pUnk->QueryInterface(IID_IMetaDataEmit2, (void**)&m_emitter);
+ if (FAILED(hr))
+ RETURN(hr);
+
+ hr = pUnk->QueryInterface(IID_IMetaDataImport, (void**)&m_importer);
+ if (FAILED(hr))
+ RETURN(hr);
+
+ hr = pUnk->QueryInterface(IID_IMetaDataEmitHelper, (void**)&m_pEmitHelper);
+ if (FAILED(hr))
+ RETURN(hr);
+
+ hr = GetMetaDataInternalInterfaceFromPublic(pUnk, IID_IMDInternalImport, (void**)&m_internalimport);
+ if (FAILED(hr))
+ RETURN(hr);
+
+ // <TODO> We will need to set this at some point.</TODO>
+ hr = m_emitter->SetModuleProps(szName);
+ if (FAILED(hr))
+ RETURN(hr);
+
+ RETURN(S_OK);
+}
+
+
+//******************************************************
+//*
+//* destructor for RefClassWriter
+//*
+//******************************************************
+RefClassWriter::~RefClassWriter()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ // we know that the com implementation is ours so we use mode-any to simplify
+ // having to switch mode
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ if (m_emitter) {
+ m_emitter->Release();
+ }
+
+ if (m_importer) {
+ m_importer->Release();
+ }
+
+ if (m_pEmitHelper) {
+ m_pEmitHelper->Release();
+ }
+
+ if (m_internalimport) {
+ m_internalimport->Release();
+ }
+
+ if (m_pCeeGen) {
+ m_pCeeGen->Release();
+ m_pCeeGen = NULL;
+ }
+
+ if (m_pOnDiskEmitter) {
+ m_pOnDiskEmitter->Release();
+ m_pOnDiskEmitter = NULL;
+ }
+
+
+#ifndef FEATURE_CORECLR
+ DestroyCeeFileGen();
+#endif // FEATURE_CORECLR
+}
+
+#ifndef FEATURE_CORECLR
+
+#include <MscorpeSxSWrapper.h>
+
+// Loads mscorpe.dll (uses shim hosting API)
+HRESULT
+LoadMscorpeDll(HMODULE * phModule)
+{
+ // Load SxS version of mscorpe.dll (i.e. mscorpehost.dll) and initialize it
+ return g_pCLRRuntime->LoadLibrary(W("mscorpe.dll"), phModule);
+}
+
+// Wrapper for mscorpe.dll calls
+typedef MscorpeSxSWrapper<LoadMscorpeDll> MscorpeSxS;
+
+//******************************************************
+//*
+//* Make sure that CeeFileGen for this module is created for emitting to disk
+//*
+//******************************************************
+HRESULT
+RefClassWriter::EnsureCeeFileGenCreated(
+ DWORD corhFlags,
+ DWORD peFlags)
+{
+ CONTRACT(HRESULT) {
+ NOTHROW;
+ GC_TRIGGERS;
+ // we know that the com implementation is ours so we use mode-any to simplify
+ // having to switch mode
+ MODE_ANY;
+ INJECT_FAULT(CONTRACT_RETURN(E_OUTOFMEMORY));
+
+ POSTCONDITION(SUCCEEDED(RETVAL) ? CheckPointer(m_pCeeFileGen) : (int)(m_pCeeFileGen == NULL));
+ POSTCONDITION(SUCCEEDED(RETVAL) ? CheckPointer(m_ceeFile) : (int)(m_pCeeFileGen == NULL));
+ }
+ CONTRACT_END;
+
+ HRESULT hr = NOERROR;
+
+ if (m_pCeeFileGen == NULL)
+ {
+ EX_TRY
+ {
+ IfFailGo(MscorpeSxS::CreateICeeFileGen(&m_pCeeFileGen));
+
+ IfFailGo(m_pCeeFileGen->CreateCeeFileFromICeeGen(m_pCeeGen, &m_ceeFile, peFlags));
+
+ IfFailGo(m_pCeeFileGen->ClearComImageFlags(m_ceeFile, COMIMAGE_FLAGS_ILONLY));
+
+ IfFailGo(m_pCeeFileGen->SetComImageFlags(m_ceeFile, corhFlags));
+ ErrExit:
+ ;
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (FAILED(hr))
+ {
+ DestroyCeeFileGen();
+ }
+ }
+
+ RETURN(hr);
+} // RefClassWriter::EnsureCeeFileGenCreated
+
+
+//******************************************************
+//*
+//* Destroy the instance of CeeFileGen that we created
+//*
+//******************************************************
+HRESULT RefClassWriter::DestroyCeeFileGen()
+{
+ CONTRACT(HRESULT) {
+ NOTHROW;
+ GC_TRIGGERS;
+ // we know that the com implementation is ours so we use mode-any to simplify
+ // having to switch mode
+ MODE_ANY;
+ FORBID_FAULT;
+
+ POSTCONDITION(m_pCeeFileGen == NULL);
+ POSTCONDITION(m_ceeFile == NULL);
+ }
+ CONTRACT_END;
+
+ HRESULT hr = NOERROR;
+
+ if (m_pCeeFileGen != NULL)
+ {
+ //Cleanup the HCEEFILE.
+ if (m_ceeFile != NULL)
+ {
+ hr = m_pCeeFileGen->DestroyCeeFile(&m_ceeFile);
+ _ASSERTE_MSG(SUCCEEDED(hr), "Destory CeeFile");
+ m_ceeFile = NULL;
+ }
+
+ //Cleanup the ICeeFileGen.
+ {
+ CONTRACT_VIOLATION(ThrowsViolation);
+
+ // code:EnsureCeeFileGenCreated already loaded the DLL
+ _ASSERTE(MscorpeSxS::Debug_IsLoaded());
+
+ hr = MscorpeSxS::DestroyICeeFileGen(&m_pCeeFileGen);
+ }
+ _ASSERTE_MSG(SUCCEEDED(hr), "Destroy ICeeFileGen");
+ m_pCeeFileGen = NULL;
+ }
+
+ RETURN(hr);
+} // RefClassWriter::DestroyCeeFileGen
+
+#endif //!FEATURE_CORECLR
diff --git a/src/vm/reflectclasswriter.h b/src/vm/reflectclasswriter.h
new file mode 100644
index 0000000000..f0a2bf142d
--- /dev/null
+++ b/src/vm/reflectclasswriter.h
@@ -0,0 +1,104 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+#ifndef _REFCLASSWRITER_H_
+#define _REFCLASSWRITER_H_
+
+#include "iceefilegen.h"
+
+// RefClassWriter
+// This will create a Class
+class RefClassWriter {
+protected:
+ friend class COMDynamicWrite;
+ IMetaDataEmit2* m_emitter; // Emit interface.
+ IMetaDataImport* m_importer; // Import interface.
+ IMDInternalImport* m_internalimport; // Scopeless internal import interface
+ ICeeGen* m_pCeeGen;
+ ICeeFileGen* m_pCeeFileGen;
+ HCEEFILE m_ceeFile;
+ IMetaDataEmitHelper* m_pEmitHelper;
+ ULONG m_ulResourceSize;
+ mdFile m_tkFile;
+ IMetaDataEmit* m_pOnDiskEmitter;
+
+public:
+ RefClassWriter() {
+ LIMITED_METHOD_CONTRACT;
+ m_pOnDiskEmitter = NULL;
+ }
+
+ HRESULT Init(ICeeGen *pCeeGen, IUnknown *pUnk, LPCWSTR szName);
+
+ IMetaDataEmit2* GetEmitter() {
+ LIMITED_METHOD_CONTRACT;
+ return m_emitter;
+ }
+
+ IMetaDataEmitHelper* GetEmitHelper() {
+ LIMITED_METHOD_CONTRACT;
+ return m_pEmitHelper;
+ }
+
+ IMetaDataImport* GetRWImporter() {
+ LIMITED_METHOD_CONTRACT;
+ return m_importer;
+ }
+
+ IMDInternalImport* GetMDImport() {
+ LIMITED_METHOD_CONTRACT;
+ return m_internalimport;
+ }
+
+ ICeeGen* GetCeeGen() {
+ LIMITED_METHOD_CONTRACT;
+ return m_pCeeGen;
+ }
+
+ ICeeFileGen* GetCeeFileGen() {
+ LIMITED_METHOD_CONTRACT;
+ return m_pCeeFileGen;
+ }
+
+ HCEEFILE GetHCEEFILE() {
+ LIMITED_METHOD_CONTRACT;
+ return m_ceeFile;
+ }
+
+ IMetaDataEmit* GetOnDiskEmitter() {
+ LIMITED_METHOD_CONTRACT;
+ return m_pOnDiskEmitter;
+ }
+
+ void SetOnDiskEmitter(IMetaDataEmit *pOnDiskEmitter) {
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ // we know that the com implementation is ours so we use mode-any to simplify
+ // having to switch mode
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+ if (pOnDiskEmitter)
+ pOnDiskEmitter->AddRef();
+ if (m_pOnDiskEmitter)
+ m_pOnDiskEmitter->Release();
+ m_pOnDiskEmitter = pOnDiskEmitter;
+ }
+
+#ifndef FEATURE_CORECLR
+ //HRESULT EnsureCeeFileGenCreated(DWORD corhFlags = COMIMAGE_FLAGS_ILONLY, DWORD peFlags = ICEE_CREATE_FILE_PURE_IL);
+ HRESULT EnsureCeeFileGenCreated(DWORD corhFlags, DWORD peFlags);
+ HRESULT DestroyCeeFileGen();
+#endif
+
+ ~RefClassWriter();
+};
+
+#endif // _REFCLASSWRITER_H_
diff --git a/src/vm/reflectioninvocation.cpp b/src/vm/reflectioninvocation.cpp
new file mode 100644
index 0000000000..da0f51c7ce
--- /dev/null
+++ b/src/vm/reflectioninvocation.cpp
@@ -0,0 +1,3902 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#include "common.h"
+#include "reflectioninvocation.h"
+#include "invokeutil.h"
+#include "object.h"
+#include "class.h"
+#include "method.hpp"
+#include "typehandle.h"
+#include "field.h"
+#include "security.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "eeconfig.h"
+#include "vars.hpp"
+#include "jitinterface.h"
+#include "contractimpl.h"
+#include "virtualcallstub.h"
+#include "comdelegate.h"
+#include "constrainedexecutionregion.h"
+#include "generics.h"
+
+#ifdef FEATURE_COMINTEROP
+#include "interoputil.h"
+#include "runtimecallablewrapper.h"
+#endif
+
+#include "dbginterface.h"
+
+// these flags are defined in XXXInfo.cs and only those that are used are replicated here
+#define INVOCATION_FLAGS_UNKNOWN 0x00000000
+#define INVOCATION_FLAGS_INITIALIZED 0x00000001
+
+// it's used for both method and field to signify that no access is allowed
+#define INVOCATION_FLAGS_NO_INVOKE 0x00000002
+
+#define INVOCATION_FLAGS_NEED_SECURITY 0x00000004
+
+// because field and method are different we can reuse the same bits
+//method
+#define INVOCATION_FLAGS_IS_CTOR 0x00000010
+#define INVOCATION_FLAGS_RISKY_METHOD 0x00000020
+#define INVOCATION_FLAGS_W8P_API 0x00000040
+#define INVOCATION_FLAGS_IS_DELEGATE_CTOR 0x00000080
+#define INVOCATION_FLAGS_CONTAINS_STACK_POINTERS 0x00000100
+// field
+#define INVOCATION_FLAGS_SPECIAL_FIELD 0x00000010
+#define INVOCATION_FLAGS_FIELD_SPECIAL_CAST 0x00000020
+
+// temporary flag used for flagging invocation of method vs ctor
+#define INVOCATION_FLAGS_CONSTRUCTOR_INVOKE 0x10000000
+
+/**************************************************************************/
+/* if the type handle 'th' is a byref to a nullable type, return the
+ type handle to the nullable type in the byref. Otherwise return
+ the null type handle */
+static TypeHandle NullableTypeOfByref(TypeHandle th) {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (th.GetVerifierCorElementType() != ELEMENT_TYPE_BYREF)
+ return TypeHandle();
+
+ TypeHandle subType = th.AsTypeDesc()->GetTypeParam();
+ if (!Nullable::IsNullableType(subType))
+ return TypeHandle();
+
+ return subType;
+}
+
+static void TryDemand(DWORD whatPermission, RuntimeExceptionKind reKind, LPCWSTR wszTag) {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+
+ EX_TRY {
+ Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, whatPermission);
+ }
+ EX_CATCH {
+ COMPlusThrow(reKind, wszTag);
+ }
+ EX_END_CATCH_UNREACHABLE
+}
+
+static void TryCallMethodWorker(MethodDescCallSite* pMethodCallSite, ARG_SLOT* args, Frame* pDebuggerCatchFrame)
+{
+ // Use static contracts b/c we have SEH.
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+
+ struct Param: public NotifyOfCHFFilterWrapperParam
+ {
+ MethodDescCallSite * pMethodCallSite;
+ ARG_SLOT* args;
+ } param;
+
+ param.pFrame = pDebuggerCatchFrame;
+ param.pMethodCallSite = pMethodCallSite;
+ param.args = args;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ pParam->pMethodCallSite->CallWithValueTypes(pParam->args);
+ }
+ PAL_EXCEPT_FILTER(NotifyOfCHFFilterWrapper)
+ {
+ // Should never reach here b/c handler should always continue search.
+ _ASSERTE(false);
+ }
+ PAL_ENDTRY
+}
+
+// Warning: This method has subtle differences from CallDescrWorkerReflectionWrapper
+// In particular that one captures watson bucket data and corrupting exception severity,
+// then transfers that data to the newly produced TargetInvocationException. This one
+// doesn't take those same steps.
+//
+static void TryCallMethod(MethodDescCallSite* pMethodCallSite, ARG_SLOT* args) {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF ppException = NULL;
+ GCPROTECT_BEGIN(ppException);
+
+ // The sole purpose of having this frame is to tell the debugger that we have a catch handler here
+ // which may swallow managed exceptions. The debugger needs this in order to send a
+ // CatchHandlerFound (CHF) notification.
+ FrameWithCookie<DebuggerU2MCatchHandlerFrame> catchFrame;
+ EX_TRY {
+ TryCallMethodWorker(pMethodCallSite, args, &catchFrame);
+ }
+ EX_CATCH {
+ ppException = GET_THROWABLE();
+ _ASSERTE(ppException);
+ }
+ EX_END_CATCH(RethrowTransientExceptions)
+ catchFrame.Pop();
+
+ // It is important to re-throw outside the catch block because re-throwing will invoke
+ // the jitter and managed code and will cause us to use more than the backout stack limit.
+ if (ppException != NULL)
+ {
+ // If we get here we need to throw an TargetInvocationException
+ OBJECTREF except = InvokeUtil::CreateTargetExcept(&ppException);
+ COMPlusThrow(except);
+ }
+ GCPROTECT_END();
+}
+
+
+
+
+FCIMPL5(Object*, RuntimeFieldHandle::GetValue, ReflectFieldObject *pFieldUNSAFE, Object *instanceUNSAFE, ReflectClassBaseObject *pFieldTypeUNSAFE, ReflectClassBaseObject *pDeclaringTypeUNSAFE, CLR_BOOL *pDomainInitialized) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ struct _gc
+ {
+ OBJECTREF target;
+ REFLECTCLASSBASEREF pFieldType;
+ REFLECTCLASSBASEREF pDeclaringType;
+ REFLECTFIELDREF refField;
+ }gc;
+
+ gc.target = ObjectToOBJECTREF(instanceUNSAFE);
+ gc.pFieldType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pFieldTypeUNSAFE);
+ gc.pDeclaringType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pDeclaringTypeUNSAFE);
+ gc.refField = (REFLECTFIELDREF)ObjectToOBJECTREF(pFieldUNSAFE);
+
+ if ((gc.pFieldType == NULL) || (gc.refField == NULL))
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ TypeHandle fieldType = gc.pFieldType->GetType();
+ TypeHandle declaringType = (gc.pDeclaringType != NULL) ? gc.pDeclaringType->GetType() : TypeHandle();
+
+ Assembly *pAssem;
+ if (declaringType.IsNull())
+ {
+ // global field
+ pAssem = gc.refField->GetField()->GetModule()->GetAssembly();
+ }
+ else
+ {
+ pAssem = declaringType.GetAssembly();
+ }
+
+ if (pAssem->IsIntrospectionOnly())
+ FCThrowEx(kInvalidOperationException, IDS_EE_CODEEXECUTION_IN_INTROSPECTIVE_ASSEMBLY, NULL, NULL, NULL);
+
+ // We should throw NotSupportedException here.
+ // But for backward compatibility we are throwing FieldAccessException instead.
+ if (pAssem->IsDynamic() && !pAssem->HasRunAccess())
+ FCThrow(kFieldAccessException);
+
+ OBJECTREF rv = NULL; // not protected
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+ // There can be no GC after this until the Object is returned.
+ rv = InvokeUtil::GetFieldValue(gc.refField->GetField(), fieldType, &gc.target, declaringType, pDomainInitialized);
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(rv);
+}
+FCIMPLEND
+
+FCIMPL5(void, ReflectionInvocation::PerformVisibilityCheckOnField, FieldDesc *pFieldDesc, Object *target, ReflectClassBaseObject *pDeclaringTypeUNSAFE, DWORD attr, DWORD invocationFlags) {
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pFieldDesc));
+ PRECONDITION(CheckPointer(pDeclaringTypeUNSAFE));
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_CORECLR
+ // Security checks are expensive as they involve stack walking. Avoid them if we can.
+ // In immersive we don't allow private reflection to framework code. So we need to perform
+ // the access check even if all the domains on the stack are fully trusted.
+ if (Security::AllDomainsOnStackFullyTrusted() && !AppX::IsAppXProcess())
+ return;
+#endif
+
+ REFLECTCLASSBASEREF refDeclaringType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pDeclaringTypeUNSAFE);
+
+ TypeHandle declaringType = refDeclaringType->GetType();
+ OBJECTREF targetObj = ObjectToOBJECTREF(target);
+
+ HELPER_METHOD_FRAME_BEGIN_2(targetObj, refDeclaringType);
+
+ if ((invocationFlags & INVOCATION_FLAGS_SPECIAL_FIELD) != 0) {
+ // Verify that this is not a Final Field
+ if (IsFdInitOnly(attr))
+ TryDemand(SECURITY_SERIALIZATION, kFieldAccessException, W("Acc_ReadOnly"));
+ if (IsFdHasFieldRVA(attr))
+ TryDemand(SECURITY_SKIP_VER, kFieldAccessException, W("Acc_RvaStatic"));
+ }
+
+ if ((invocationFlags & INVOCATION_FLAGS_NEED_SECURITY) != 0) {
+ // Verify the callee/caller access
+
+ bool targetRemoted = FALSE;
+
+#ifndef FEATURE_CORECLR
+ targetRemoted = targetObj != NULL && InvokeUtil::IsTargetRemoted(pFieldDesc, targetObj->GetMethodTable());
+#endif //FEATURE_CORECLR
+
+ RefSecContext sCtx(InvokeUtil::GetInvocationAccessCheckType(targetRemoted));
+
+ MethodTable* pInstanceMT = NULL;
+ if (targetObj != NULL && !pFieldDesc->IsStatic()) {
+ TypeHandle targetType = targetObj->GetTypeHandle();
+ if (!targetType.IsTypeDesc())
+ pInstanceMT = targetType.AsMethodTable();
+ }
+
+ // Perform the normal access check (caller vs field).
+ InvokeUtil::CanAccessField(&sCtx,
+ declaringType.GetMethodTable(),
+ pInstanceMT,
+ pFieldDesc);
+ }
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL2(FC_BOOL_RET, ReflectionInvocation::CanValueSpecialCast, ReflectClassBaseObject *pValueTypeUNSAFE, ReflectClassBaseObject *pTargetTypeUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pValueTypeUNSAFE));
+ PRECONDITION(CheckPointer(pTargetTypeUNSAFE));
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refValueType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pValueTypeUNSAFE);
+ REFLECTCLASSBASEREF refTargetType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTargetTypeUNSAFE);
+
+ TypeHandle valueType = refValueType->GetType();
+ TypeHandle targetType = refTargetType->GetType();
+
+ // we are here only if the target type is a primitive, an enum or a pointer
+
+ CorElementType targetCorElement = targetType.GetVerifierCorElementType();
+
+ BOOL ret = TRUE;
+ HELPER_METHOD_FRAME_BEGIN_RET_2(refValueType, refTargetType);
+ // the field type is a pointer
+ if (targetCorElement == ELEMENT_TYPE_PTR || targetCorElement == ELEMENT_TYPE_FNPTR) {
+ // the object must be an IntPtr or a System.Reflection.Pointer
+ if (valueType == TypeHandle(MscorlibBinder::GetClass(CLASS__INTPTR))) {
+ //
+ // it's an IntPtr, it's good. Demand SkipVerification and proceed
+
+ Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_SKIP_VER);
+ }
+ //
+ // it's a System.Reflection.Pointer object
+
+ // void* assigns to any pointer. Otherwise the type of the pointer must match
+ else if (!InvokeUtil::IsVoidPtr(targetType)) {
+ if (!valueType.CanCastTo(targetType))
+ ret = FALSE;
+ else
+ // demand SkipVerification and proceed
+ Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_SKIP_VER);
+ }
+ else
+ // demand SkipVerification and proceed
+ Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_SKIP_VER);
+ } else {
+ // the field type is an enum or a primitive. To have any chance of assignement the object type must
+ // be an enum or primitive as well.
+ // So get the internal cor element and that must be the same or widen
+ CorElementType valueCorElement = valueType.GetVerifierCorElementType();
+ if (InvokeUtil::IsPrimitiveType(valueCorElement))
+ ret = (InvokeUtil::CanPrimitiveWiden(targetCorElement, valueCorElement)) ? TRUE : FALSE;
+ else
+ ret = FALSE;
+ }
+ HELPER_METHOD_FRAME_END();
+ FC_RETURN_BOOL(ret);
+}
+FCIMPLEND
+
+FCIMPL3(Object*, ReflectionInvocation::AllocateValueType, ReflectClassBaseObject *pTargetTypeUNSAFE, Object *valueUNSAFE, CLR_BOOL fForceTypeChange) {
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pTargetTypeUNSAFE));
+ PRECONDITION(CheckPointer(valueUNSAFE, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ struct _gc
+ {
+ REFLECTCLASSBASEREF refTargetType;
+ OBJECTREF value;
+ OBJECTREF obj;
+ }gc;
+
+ gc.value = ObjectToOBJECTREF(valueUNSAFE);
+ gc.obj = gc.value;
+ gc.refTargetType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTargetTypeUNSAFE);
+
+ TypeHandle targetType = gc.refTargetType->GetType();
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+ CorElementType targetElementType = targetType.GetSignatureCorElementType();
+ if (InvokeUtil::IsPrimitiveType(targetElementType) || targetElementType == ELEMENT_TYPE_VALUETYPE)
+ {
+ MethodTable* allocMT = targetType.AsMethodTable();
+ if (gc.value != NULL)
+ {
+ // ignore the type of the incoming box if fForceTypeChange is set
+ // and the target type is not nullable
+ if (!fForceTypeChange || Nullable::IsNullableType(targetType))
+ allocMT = gc.value->GetMethodTable();
+ }
+
+ // for null Nullable<T> we don't want a default value being created.
+ // just allow the null value to be passed, as it will be converted to
+ // a true nullable
+ if (!(gc.value == NULL && Nullable::IsNullableType(targetType)))
+ {
+ // boxed value type are 'read-only' in the sence that you can't
+ // only the implementor of the value type can expose mutators.
+ // To insure byrefs don't mutate value classes in place, we make
+ // a copy (and if we were not given one, we create a null value type
+ // instance.
+ gc.obj = allocMT->Allocate();
+
+ if (gc.value != NULL)
+ CopyValueClassUnchecked(gc.obj->UnBox(), gc.value->UnBox(), allocMT);
+ }
+ }
+
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(gc.obj);
+}
+FCIMPLEND
+
+FCIMPL7(void, RuntimeFieldHandle::SetValue, ReflectFieldObject *pFieldUNSAFE, Object *targetUNSAFE, Object *valueUNSAFE, ReflectClassBaseObject *pFieldTypeUNSAFE, DWORD attr, ReflectClassBaseObject *pDeclaringTypeUNSAFE, CLR_BOOL *pDomainInitialized) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ struct _gc {
+ OBJECTREF target;
+ OBJECTREF value;
+ REFLECTCLASSBASEREF fieldType;
+ REFLECTCLASSBASEREF declaringType;
+ REFLECTFIELDREF refField;
+ } gc;
+
+ gc.target = ObjectToOBJECTREF(targetUNSAFE);
+ gc.value = ObjectToOBJECTREF(valueUNSAFE);
+ gc.fieldType= (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pFieldTypeUNSAFE);
+ gc.declaringType= (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pDeclaringTypeUNSAFE);
+ gc.refField = (REFLECTFIELDREF)ObjectToOBJECTREF(pFieldUNSAFE);
+
+ if ((gc.fieldType == NULL) || (gc.refField == NULL))
+ FCThrowResVoid(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ TypeHandle fieldType = gc.fieldType->GetType();
+ TypeHandle declaringType = gc.declaringType != NULL ? gc.declaringType->GetType() : TypeHandle();
+
+ Assembly *pAssem;
+ if (declaringType.IsNull())
+ {
+ // global field
+ pAssem = gc.refField->GetField()->GetModule()->GetAssembly();
+ }
+ else
+ {
+ pAssem = declaringType.GetAssembly();
+ }
+
+ if (pAssem->IsIntrospectionOnly())
+ FCThrowExVoid(kInvalidOperationException, IDS_EE_CODEEXECUTION_IN_INTROSPECTIVE_ASSEMBLY, NULL, NULL, NULL);
+
+ // We should throw NotSupportedException here.
+ // But for backward compatibility we are throwing FieldAccessException instead.
+ if (pAssem->IsDynamic() && !pAssem->HasRunAccess())
+ FCThrowVoid(kFieldAccessException);
+
+ FC_GC_POLL_NOT_NEEDED();
+
+ FieldDesc* pFieldDesc = gc.refField->GetField();
+
+ HELPER_METHOD_FRAME_BEGIN_PROTECT(gc);
+
+ //TODO: cleanup this function
+ InvokeUtil::SetValidField(fieldType.GetSignatureCorElementType(), fieldType, pFieldDesc, &gc.target, &gc.value, declaringType, pDomainInitialized);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+//A.CI work
+FCIMPL1(Object*, RuntimeTypeHandle::Allocate, ReflectClassBaseObject* pTypeUNSAFE)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pTypeUNSAFE));
+ }
+ CONTRACTL_END
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+ TypeHandle type = refType->GetType();
+
+ // Handle the nullable<T> special case
+ if (Nullable::IsNullableType(type)) {
+ return OBJECTREFToObject(Nullable::BoxedNullableNull(type));
+ }
+
+ OBJECTREF rv = NULL;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refType);
+ rv = AllocateObject(type.GetMethodTable());
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(rv);
+
+}//Allocate
+FCIMPLEND
+
+FCIMPL6(Object*, RuntimeTypeHandle::CreateInstance, ReflectClassBaseObject* refThisUNSAFE,
+ CLR_BOOL publicOnly,
+ CLR_BOOL securityOff,
+ CLR_BOOL* pbCanBeCached,
+ MethodDesc** pConstructor,
+ CLR_BOOL *pbNeedSecurityCheck) {
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(refThisUNSAFE));
+ PRECONDITION(CheckPointer(pbCanBeCached));
+ PRECONDITION(CheckPointer(pbNeedSecurityCheck));
+ PRECONDITION(CheckPointer(pConstructor));
+ PRECONDITION(*pbCanBeCached == false);
+ PRECONDITION(*pConstructor == NULL);
+ PRECONDITION(*pbNeedSecurityCheck == true);
+ }
+ CONTRACTL_END;
+
+ if (refThisUNSAFE == NULL)
+ FCThrow(kNullReferenceException);
+
+ MethodDesc* pMeth;
+
+ OBJECTREF rv = NULL;
+ REFLECTCLASSBASEREF refThis = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(refThisUNSAFE);
+ TypeHandle thisTH = refThis->GetType();
+
+ Assembly *pAssem = thisTH.GetAssembly();
+
+ if (pAssem->IsIntrospectionOnly())
+ FCThrowEx(kInvalidOperationException, IDS_EE_CODEEXECUTION_IN_INTROSPECTIVE_ASSEMBLY, NULL, NULL, NULL);
+
+ if (pAssem->IsDynamic() && !pAssem->HasRunAccess())
+ FCThrowRes(kNotSupportedException, W("NotSupported_DynamicAssemblyNoRunAccess"));
+
+ HELPER_METHOD_FRAME_BEGIN_RET_2(rv, refThis);
+
+#ifdef FEATURE_LEGACYNETCF
+ BOOL fNetCFCompat = GetAppDomain()->GetAppDomainCompatMode() == BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8;
+#else
+ const BOOL fNetCFCompat = FALSE;
+#endif
+
+ MethodTable* pVMT;
+ bool bNeedAccessCheck;
+
+ if (fNetCFCompat && !thisTH.IsNull() && thisTH.IsArray())
+ {
+ ArrayTypeDesc *atd = thisTH.AsArray();
+ if (atd->GetTypeParam().IsArray())
+ {
+ // We could do this, but Mango doesn't support creating
+ // arrays of arrays here
+ COMPlusThrow(kMissingMethodException,W("Arg_NoDefCTor"));
+ }
+
+ INT32 rank = atd->GetRank();
+ INT32* lengths = (INT32*) _alloca(sizeof(INT32) * rank);
+ for (INT32 i = 0; i < rank; ++i)
+ {
+ lengths[i] = 0;
+ }
+ rv = AllocateArrayEx(thisTH, lengths, rank);
+ goto Exit;
+ }
+
+ // Get the type information associated with refThis
+ if (thisTH.IsNull() || thisTH.IsTypeDesc())
+ COMPlusThrow(kMissingMethodException,W("Arg_NoDefCTor"));
+
+ pVMT = thisTH.AsMethodTable();
+
+ pVMT->EnsureInstanceActive();
+
+ bNeedAccessCheck = false;
+
+#ifdef FEATURE_COMINTEROP
+ // If this is __ComObject then create the underlying COM object.
+ if (IsComObjectClass(refThis->GetType())) {
+#ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+ SyncBlock* pSyncBlock = refThis->GetSyncBlock();
+
+ void* pClassFactory = (void*)pSyncBlock->GetInteropInfo()->GetComClassFactory();
+ if (!pClassFactory)
+ COMPlusThrow(kInvalidComObjectException, IDS_EE_NO_BACKING_CLASS_FACTORY);
+
+ // Check for the required permissions (SecurityPermission.UnmanagedCode),
+ // since arbitrary unmanaged code in the class factory will execute below).
+ Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_UNMANAGED_CODE);
+
+ // create an instance of the Com Object
+ rv = ((ComClassFactory*)pClassFactory)->CreateInstance(NULL);
+
+#else // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+
+ COMPlusThrow(kInvalidComObjectException, IDS_EE_NO_BACKING_CLASS_FACTORY);
+
+#endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ // If we are creating a COM object which has backing metadata we still
+ // need to ensure that the caller has unmanaged code access permission.
+ if (pVMT->IsComObjectType())
+ Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_UNMANAGED_CODE);
+
+ // if this is an abstract class then we will fail this
+ if (pVMT->IsAbstract()) {
+ if (pVMT->IsInterface())
+ COMPlusThrow(kMissingMethodException,W("Acc_CreateInterface"));
+ else
+ COMPlusThrow(kMissingMethodException,W("Acc_CreateAbst"));
+ }
+ else if (pVMT->ContainsGenericVariables()) {
+ COMPlusThrow(kArgumentException,W("Acc_CreateGeneric"));
+ }
+
+ if (pVMT->ContainsStackPtr())
+ COMPlusThrow(kNotSupportedException, W("NotSupported_ContainsStackPtr"));
+
+ if (pVMT->IsSharedByGenericInstantiations())
+ COMPlusThrow(kNotSupportedException, W("NotSupported_Type"));
+
+ if (!pVMT->HasDefaultConstructor())
+ {
+ // We didn't find the parameterless constructor,
+ // if this is a Value class we can simply allocate one and return it
+
+ if (!pVMT->IsValueType()) {
+ COMPlusThrow(kMissingMethodException,W("Arg_NoDefCTor"));
+ }
+
+ if (!securityOff)
+ {
+#ifndef FEATURE_CORECLR
+ // Security checks are expensive as they involve stack walking. Avoid them if we can.
+ // In immersive we don't allow private reflection to framework code. So we need to perform
+ // the access check even if all the domains on the stack are fully trusted.
+ if (Security::AllDomainsOnStackFullyTrusted() && !AppX::IsAppXProcess())
+ {
+ bNeedAccessCheck = false;
+ }
+ else
+#endif //FEATURE_CORECLR
+ {
+ // Public critical types cannot be accessed by transparent callers
+ bNeedAccessCheck = !pVMT->IsExternallyVisible() || Security::TypeRequiresTransparencyCheck(pVMT);
+ }
+
+ if (bNeedAccessCheck)
+ {
+ RefSecContext sCtx(InvokeUtil::GetInvocationAccessCheckType());
+ InvokeUtil::CanAccessClass(&sCtx, pVMT, TRUE);
+ }
+ }
+
+ // Handle the nullable<T> special case
+ if (Nullable::IsNullableType(thisTH)) {
+ rv = Nullable::BoxedNullableNull(thisTH);
+ }
+ else
+ rv = pVMT->Allocate();
+
+ // Since no security checks will be performed on cached value types without default ctors,
+ // we cannot cache those types that require access checks.
+ // In fact, we don't even need to set pbNeedSecurityCheck to false here.
+ if (!pVMT->Collectible() && !bNeedAccessCheck)
+ {
+ *pbCanBeCached = true;
+ *pbNeedSecurityCheck = false;
+ }
+ }
+ else // !pVMT->HasDefaultConstructor()
+ {
+ pMeth = pVMT->GetDefaultConstructor();
+
+ // Validate the method can be called by this caller
+ DWORD attr = pMeth->GetAttrs();
+
+ if (!IsMdPublic(attr) && publicOnly)
+ COMPlusThrow(kMissingMethodException,W("Arg_NoDefCTor"));
+
+ if (!securityOff)
+ {
+ // If the type is critical or the constructor we're using is critical, we need to ensure that
+ // the caller is allowed to invoke it.
+ bool needsTransparencyCheck = Security::TypeRequiresTransparencyCheck(pVMT) ||
+ (Security::IsMethodCritical(pMeth) && !Security::IsMethodSafeCritical(pMeth));
+
+ // We also need to do a check if the method or type is not public
+ bool needsVisibilityCheck = !IsMdPublic(attr) || !pVMT->IsExternallyVisible();
+
+ // If the visiblity, transparency, or legacy LinkDemands on the type or constructor dictate that
+ // we need to check the caller, then do that now.
+ bNeedAccessCheck = needsTransparencyCheck ||
+ needsVisibilityCheck ||
+ pMeth->RequiresLinktimeCheck();
+
+ if (bNeedAccessCheck)
+ {
+ // this security context will be used in cast checking as well
+ RefSecContext sCtx(InvokeUtil::GetInvocationAccessCheckType());
+ InvokeUtil::CanAccessMethod(pMeth, pVMT, NULL, &sCtx);
+ }
+ }
+
+ // We've got the class, lets allocate it and call the constructor
+ OBJECTREF o;
+ bool remoting = false;
+
+#ifdef FEATURE_REMOTING
+ if (pVMT->IsTransparentProxy())
+ COMPlusThrow(kMissingMethodException,W("NotSupported_Constructor"));
+
+ if (pVMT->MayRequireManagedActivation())
+ {
+ o = CRemotingServices::CreateProxyOrObject(pVMT);
+ remoting = true;
+ }
+ else
+ o = AllocateObject(pVMT);
+
+#else
+ o = AllocateObject(pVMT);
+#endif
+ GCPROTECT_BEGIN(o);
+
+ MethodDescCallSite ctor(pMeth, &o);
+
+ // Copy "this" pointer
+ ARG_SLOT arg;
+ if (pVMT->IsValueType())
+ arg = PtrToArgSlot(o->UnBox());
+ else
+ arg = ObjToArgSlot(o);
+
+ // Call the method
+ TryCallMethod(&ctor, &arg);
+
+ rv = o;
+ GCPROTECT_END();
+
+ // No need to set these if they cannot be cached
+ if (!remoting && !pVMT->Collectible())
+ {
+ *pbCanBeCached = true;
+ *pConstructor = pMeth;
+ *pbNeedSecurityCheck = bNeedAccessCheck;
+ }
+ }
+ }
+
+Exit:
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(rv);
+}
+FCIMPLEND
+
+FCIMPL2(Object*, RuntimeTypeHandle::CreateInstanceForGenericType, ReflectClassBaseObject* pTypeUNSAFE, ReflectClassBaseObject* pParameterTypeUNSAFE) {
+ FCALL_CONTRACT;
+
+ struct _gc
+ {
+ OBJECTREF rv;
+ REFLECTCLASSBASEREF refType;
+ REFLECTCLASSBASEREF refParameterType;
+ } gc;
+
+ gc.rv = NULL;
+ gc.refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+ gc.refParameterType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pParameterTypeUNSAFE);
+
+ MethodDesc* pMeth;
+ TypeHandle genericType = gc.refType->GetType();
+
+ TypeHandle parameterHandle = gc.refParameterType->GetType();
+
+ _ASSERTE (genericType.HasInstantiation());
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ TypeHandle instantiatedType = ((TypeHandle)genericType.GetCanonicalMethodTable()).Instantiate(Instantiation(&parameterHandle, 1));
+
+ // Get the type information associated with refThis
+ MethodTable* pVMT = instantiatedType.GetMethodTable();
+ _ASSERTE (pVMT != 0 && !instantiatedType.IsTypeDesc());
+ _ASSERTE(!(pVMT->GetAssembly()->IsDynamic() && !pVMT->GetAssembly()->HasRunAccess()));
+ _ASSERTE( !pVMT->IsAbstract() ||! instantiatedType.ContainsGenericVariables());
+ _ASSERTE(!pVMT->ContainsStackPtr() && pVMT->HasDefaultConstructor());
+
+ pMeth = pVMT->GetDefaultConstructor();
+ MethodDescCallSite ctor(pMeth);
+
+ // We've got the class, lets allocate it and call the constructor
+#ifdef FEATURE_REMOTING
+ _ASSERTE(!pVMT->IsTransparentProxy());
+ _ASSERTE(!pVMT->MayRequireManagedActivation());
+#endif
+
+ // Nullables don't take this path, if they do we need special logic to make an instance
+ _ASSERTE(!Nullable::IsNullableType(instantiatedType));
+ gc.rv = instantiatedType.GetMethodTable()->Allocate();
+
+ ARG_SLOT arg = ObjToArgSlot(gc.rv);
+
+ // Call the method
+ TryCallMethod(&ctor, &arg);
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(gc.rv);
+}
+FCIMPLEND
+
+NOINLINE FC_BOOL_RET IsInstanceOfTypeHelper(OBJECTREF obj, REFLECTCLASSBASEREF refType)
+{
+ FCALL_CONTRACT;
+
+ BOOL canCast = false;
+
+ FC_INNER_PROLOG(RuntimeTypeHandle::IsInstanceOfType);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_2(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, obj, refType);
+ canCast = ObjIsInstanceOf(OBJECTREFToObject(obj), refType->GetType());
+ HELPER_METHOD_FRAME_END();
+
+ FC_RETURN_BOOL(canCast);
+}
+
+FCIMPL2(FC_BOOL_RET, RuntimeTypeHandle::IsInstanceOfType, ReflectClassBaseObject* pTypeUNSAFE, Object *objectUNSAFE) {
+ FCALL_CONTRACT;
+
+ OBJECTREF obj = ObjectToOBJECTREF(objectUNSAFE);
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ // Null is not instance of anything in reflection world
+ if (obj == NULL)
+ FC_RETURN_BOOL(false);
+
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ switch (ObjIsInstanceOfNoGC(objectUNSAFE, refType->GetType())) {
+ case TypeHandle::CanCast:
+ FC_RETURN_BOOL(true);
+ case TypeHandle::CannotCast:
+ FC_RETURN_BOOL(false);
+ default:
+ // fall through to the slow helper
+ break;
+ }
+
+ FC_INNER_RETURN(FC_BOOL_RET, IsInstanceOfTypeHelper(obj, refType));
+}
+FCIMPLEND
+
+FCIMPL1(DWORD, ReflectionInvocation::GetSpecialSecurityFlags, ReflectMethodObject *pMethodUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ DWORD dwFlags = 0;
+
+ struct
+ {
+ REFLECTMETHODREF refMethod;
+ }
+ gc;
+
+ gc.refMethod = (REFLECTMETHODREF)ObjectToOBJECTREF(pMethodUNSAFE);
+
+ if (!gc.refMethod)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ MethodDesc* pMethod = gc.refMethod->GetMethod();
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ // this is an information that is critical for ctors, otherwise is not important
+ // we get it here anyway to simplify code
+ MethodTable *pMT = pMethod->GetMethodTable();
+ _ASSERTE(pMT);
+
+ // We should also check the return type here.
+ // Is there an easier way to get the return type of a method?
+ MetaSig metaSig(pMethod);
+ TypeHandle retTH = metaSig.GetRetTypeHandleThrowing();
+ MethodTable *pRetMT = retTH.GetMethodTable();
+
+ // If either the declaring type or the return type contains stack pointers (ByRef or typedbyref),
+ // the type cannot be boxed and thus cannot be invoked through reflection invocation.
+ if ( pMT->ContainsStackPtr() || (pRetMT != NULL && pRetMT->ContainsStackPtr()) )
+ dwFlags |= INVOCATION_FLAGS_CONTAINS_STACK_POINTERS;
+
+ // Is this a call to a potentially dangerous method? (If so, we're going
+ // to demand additional permission).
+ if (InvokeUtil::IsDangerousMethod(pMethod))
+ dwFlags |= INVOCATION_FLAGS_RISKY_METHOD;
+
+ // Is there a link demand?
+ if (pMethod->RequiresLinktimeCheck()) {
+ dwFlags |= INVOCATION_FLAGS_NEED_SECURITY;
+ }
+ else
+ if (Security::IsMethodCritical(pMethod) && !Security::IsMethodSafeCritical(pMethod)) {
+ dwFlags |= INVOCATION_FLAGS_NEED_SECURITY;
+ }
+
+ HELPER_METHOD_FRAME_END();
+ return dwFlags;
+}
+FCIMPLEND
+
+// Can not inline this function.
+#ifdef _MSC_VER
+__declspec(noinline)
+#endif
+void PerformSecurityCheckHelper(Object *targetUnsafe, MethodDesc *pMeth, MethodTable* pParentMT, DWORD dwFlags)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+
+ PRECONDITION(CheckPointer(pMeth));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF target (targetUnsafe);
+ GCPROTECT_BEGIN (target);
+ FrameWithCookie<DebuggerSecurityCodeMarkFrame> __dbgSecFrame;
+
+ bool targetRemoted = false;
+
+#ifndef FEATURE_CORECLR
+ targetRemoted = target != NULL && InvokeUtil::IsTargetRemoted(pMeth, target->GetMethodTable());
+#endif //FEATURE_CORECLR
+
+ RefSecContext sCtx(InvokeUtil::GetInvocationAccessCheckType(targetRemoted));
+
+ MethodTable* pInstanceMT = NULL;
+ if (target != NULL) {
+ if (!target->GetTypeHandle().IsTypeDesc())
+ pInstanceMT = target->GetTypeHandle().AsMethodTable();
+ }
+
+#ifdef FEATURE_CORECLR
+ if (dwFlags & (INVOCATION_FLAGS_RISKY_METHOD|INVOCATION_FLAGS_IS_DELEGATE_CTOR))
+ {
+ // On CoreCLR we assert that "dangerous" methods (see IsDangerousMethods) can only
+ // be reflection-invoked by platform code (C or SC).
+
+ // Also, for delegates, in desktop we used to demand unmanaged
+ // code permission for this since it's hard to validate the target address.
+ // Here we just restrict access to Critical code.
+ MethodDesc *pCallerMD = sCtx.GetCallerMethod();
+
+ if (pCallerMD && Security::IsMethodTransparent(pCallerMD))
+ {
+ ThrowMethodAccessException(pCallerMD, pMeth, FALSE, IDS_E_TRANSPARENT_REFLECTION);
+ }
+ }
+
+ if (dwFlags & (INVOCATION_FLAGS_NEED_SECURITY|INVOCATION_FLAGS_CONSTRUCTOR_INVOKE))
+#endif
+ {
+
+ if (dwFlags & INVOCATION_FLAGS_CONSTRUCTOR_INVOKE)
+ InvokeUtil::CanAccessMethod(pMeth,
+ pParentMT,
+ pInstanceMT,
+ &sCtx,
+ TRUE /*fCriticalToFullDemand*/);
+ else
+ InvokeUtil::CanAccessMethod(pMeth,
+ pParentMT,
+ pInstanceMT,
+ &sCtx,
+ TRUE /*fCriticalToFullDemand*/,
+ (dwFlags & INVOCATION_FLAGS_IS_CTOR) != 0 /*checkSkipVer*/);
+ }
+
+ __dbgSecFrame.Pop();
+ GCPROTECT_END();
+}
+
+FCIMPL4(void, ReflectionInvocation::PerformSecurityCheck, Object *target, MethodDesc *pMeth, ReflectClassBaseObject *pParentUNSAFE, DWORD dwFlags) {
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pMeth));
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_CORECLR
+ // Security checks are expensive as they involve stack walking. Avoid them if we can.
+ // In immersive we don't allow private reflection to framework code. So we need to perform
+ // the access check even if all the domains on the stack are fully trusted.
+ if (Security::AllDomainsOnStackFullyTrusted() && !AppX::IsAppXProcess())
+ return;
+#endif
+
+ REFLECTCLASSBASEREF refParent = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pParentUNSAFE);
+
+ HELPER_METHOD_FRAME_BEGIN_1(refParent);
+ //CAUTION: PerformSecurityCheckHelper could trigger GC!
+
+ TypeHandle parent = refParent != NULL ? refParent->GetType() : TypeHandle();
+ PerformSecurityCheckHelper(target,pMeth,parent.GetMethodTable(),dwFlags);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+/****************************************************************************/
+/* boxed Nullable<T> are represented as a boxed T, so there is no unboxed
+ Nullable<T> inside to point at by reference. Because of this a byref
+ parameters of type Nullable<T> are copied out of the boxed instance
+ (to a place on the stack), before the call is made (and this copy is
+ pointed at). After the call returns, this copy must be copied back to
+ the original argument array. ByRefToNullable, is a simple linked list
+ that remembers what copy-backs are needed */
+
+struct ByRefToNullable {
+ unsigned argNum; // The argument number for this byrefNullable argument
+ void* data; // The data to copy back to the ByRefNullable. This points to the stack
+ TypeHandle type; // The type of Nullable for this argument
+ ByRefToNullable* next; // list of these
+
+ ByRefToNullable(unsigned aArgNum, void* aData, TypeHandle aType, ByRefToNullable* aNext) {
+ argNum = aArgNum;
+ data = aData;
+ type = aType;
+ next = aNext;
+ }
+};
+
+void CallDescrWorkerReflectionWrapper(CallDescrData * pCallDescrData, Frame * pFrame)
+{
+ // Use static contracts b/c we have SEH.
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+
+ struct Param: public NotifyOfCHFFilterWrapperParam
+ {
+ CallDescrData * pCallDescrData;
+ } param;
+
+ param.pFrame = pFrame;
+ param.pCallDescrData = pCallDescrData;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ CallDescrWorkerWithHandler(pParam->pCallDescrData);
+ }
+ PAL_EXCEPT_FILTER(ReflectionInvocationExceptionFilter)
+ {
+ // Should never reach here b/c handler should always continue search.
+ _ASSERTE(false);
+ }
+ PAL_ENDTRY
+} // CallDescrWorkerReflectionWrapper
+
+OBJECTREF InvokeArrayConstructor(ArrayTypeDesc* arrayDesc, MethodDesc* pMeth, PTRARRAYREF* objs, int argCnt)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ DWORD i;
+
+ // If we're trying to create an array of pointers or function pointers,
+ // check that the caller has skip verification permission.
+ CorElementType et = arrayDesc->GetArrayElementTypeHandle().GetVerifierCorElementType();
+ if (et == ELEMENT_TYPE_PTR || et == ELEMENT_TYPE_FNPTR)
+ Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_SKIP_VER);
+
+ // Validate the argCnt an the Rank. Also allow nested SZARRAY's.
+ _ASSERTE(argCnt == (int) arrayDesc->GetRank() || argCnt == (int) arrayDesc->GetRank() * 2 ||
+ arrayDesc->GetInternalCorElementType() == ELEMENT_TYPE_SZARRAY);
+
+ // Validate all of the parameters. These all typed as integers
+ int allocSize = 0;
+ if (!ClrSafeInt<int>::multiply(sizeof(INT32), argCnt, allocSize))
+ COMPlusThrow(kArgumentException, IDS_EE_SIGTOOCOMPLEX);
+
+ INT32* indexes = (INT32*) _alloca((size_t)allocSize);
+ ZeroMemory(indexes, allocSize);
+
+ for (i=0; i<(DWORD)argCnt; i++)
+ {
+ if (!(*objs)->m_Array[i])
+ COMPlusThrowArgumentException(W("parameters"), W("Arg_NullIndex"));
+
+ MethodTable* pMT = ((*objs)->m_Array[i])->GetMethodTable();
+ CorElementType oType = TypeHandle(pMT).GetVerifierCorElementType();
+
+ if (!InvokeUtil::IsPrimitiveType(oType) || !InvokeUtil::CanPrimitiveWiden(ELEMENT_TYPE_I4,oType))
+ COMPlusThrow(kArgumentException,W("Arg_PrimWiden"));
+
+ memcpy(&indexes[i],(*objs)->m_Array[i]->UnBox(),pMT->GetNumInstanceFieldBytes());
+ }
+
+ return AllocateArrayEx(TypeHandle(arrayDesc), indexes, argCnt);
+}
+
+static BOOL IsActivationNeededForMethodInvoke(MethodDesc * pMD)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // The activation for non-generic instance methods is covered by non-null "this pointer"
+ if (!pMD->IsStatic() && !pMD->HasMethodInstantiation() && !pMD->IsInterface())
+ return FALSE;
+
+ // We need to activate each time for domain neutral types
+ if (pMD->IsDomainNeutral())
+ return TRUE;
+
+ // We need to activate the instance at least once
+ pMD->EnsureActive();
+ return FALSE;
+}
+
+class ArgIteratorBaseForMethodInvoke
+{
+protected:
+ SIGNATURENATIVEREF * m_ppNativeSig;
+
+ FORCEINLINE CorElementType GetReturnType(TypeHandle * pthValueType)
+ {
+ WRAPPER_NO_CONTRACT;
+ return (*pthValueType = (*m_ppNativeSig)->GetReturnTypeHandle()).GetInternalCorElementType();
+ }
+
+ FORCEINLINE CorElementType GetNextArgumentType(DWORD iArg, TypeHandle * pthValueType)
+ {
+ WRAPPER_NO_CONTRACT;
+ return (*pthValueType = (*m_ppNativeSig)->GetArgumentAt(iArg)).GetInternalCorElementType();
+ }
+
+ FORCEINLINE void Reset()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+public:
+ BOOL HasThis()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (*m_ppNativeSig)->HasThis();
+ }
+
+ BOOL HasParamType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ // param type methods are not supported for reflection invoke, so HasParamType is always false for them
+ return FALSE;
+ }
+
+ BOOL IsVarArg()
+ {
+ LIMITED_METHOD_CONTRACT;
+ // vararg methods are not supported for reflection invoke, so IsVarArg is always false for them
+ return FALSE;
+ }
+
+ DWORD NumFixedArgs()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (*m_ppNativeSig)->NumFixedArgs();
+ }
+
+#ifdef FEATURE_INTERPRETER
+ BYTE CallConv()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return IMAGE_CEE_CS_CALLCONV_DEFAULT;
+ }
+#endif // FEATURE_INTERPRETER
+};
+
+class ArgIteratorForMethodInvoke : public ArgIteratorTemplate<ArgIteratorBaseForMethodInvoke>
+{
+public:
+ ArgIteratorForMethodInvoke(SIGNATURENATIVEREF * ppNativeSig)
+ {
+ m_ppNativeSig = ppNativeSig;
+
+ DWORD dwFlags = (*m_ppNativeSig)->GetArgIteratorFlags();
+
+ // Use the cached values if they are available
+ if (dwFlags & SIZE_OF_ARG_STACK_COMPUTED)
+ {
+ m_dwFlags = dwFlags;
+ m_nSizeOfArgStack = (*m_ppNativeSig)->GetSizeOfArgStack();
+ return;
+ }
+
+ //
+ // Compute flags and stack argument size, and cache them for next invocation
+ //
+
+ ForceSigWalk();
+
+ if (IsActivationNeededForMethodInvoke((*m_ppNativeSig)->GetMethod()))
+ {
+ m_dwFlags |= METHOD_INVOKE_NEEDS_ACTIVATION;
+ }
+
+ (*m_ppNativeSig)->SetSizeOfArgStack(m_nSizeOfArgStack);
+ _ASSERTE((*m_ppNativeSig)->GetSizeOfArgStack() == m_nSizeOfArgStack);
+
+ // This has to be last
+ (*m_ppNativeSig)->SetArgIteratorFlags(m_dwFlags);
+ _ASSERTE((*m_ppNativeSig)->GetArgIteratorFlags() == m_dwFlags);
+ }
+
+ BOOL IsActivationNeeded()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_dwFlags & METHOD_INVOKE_NEEDS_ACTIVATION) != 0;
+ }
+};
+
+
+void DECLSPEC_NORETURN ThrowInvokeMethodException(MethodDesc * pMethod, OBJECTREF targetException)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ GCPROTECT_BEGIN(targetException);
+
+#if defined(_DEBUG) && !defined(FEATURE_PAL)
+#ifdef FEATURE_CORECLR
+ if (IsWatsonEnabled())
+#endif // FEATURE_CORECLR
+ {
+ if (!CLRException::IsPreallocatedExceptionObject(targetException))
+ {
+ // If the exception is not preallocated, we should be having the
+ // watson buckets in the throwable already.
+ if(!((EXCEPTIONREF)targetException)->AreWatsonBucketsPresent())
+ {
+ // If an exception is raised by the VM (e.g. type load exception by the JIT) and it comes
+ // across the reflection invocation boundary before CLR's personality routine for managed
+ // code has been invoked, then no buckets would be available for us at this point.
+ //
+ // Since we cannot assert this, better log it for diagnosis if required.
+ LOG((LF_EH, LL_INFO100, "InvokeImpl - No watson buckets available - regular exception likely raised within VM and not seen by managed code.\n"));
+ }
+ }
+ else
+ {
+ // Exception is preallocated.
+ PTR_EHWatsonBucketTracker pUEWatsonBucketTracker = GetThread()->GetExceptionState()->GetUEWatsonBucketTracker();
+ if ((IsThrowableThreadAbortException(targetException) && pUEWatsonBucketTracker->CapturedForThreadAbort())||
+ (pUEWatsonBucketTracker->CapturedAtReflectionInvocation()))
+ {
+ // ReflectionInvocationExceptionFilter would have captured
+ // the watson bucket details for preallocated exceptions
+ // in the UE watson bucket tracker.
+
+ if(pUEWatsonBucketTracker->RetrieveWatsonBuckets() == NULL)
+ {
+ // See comment above
+ LOG((LF_EH, LL_INFO100, "InvokeImpl - No watson buckets available - preallocated exception likely raised within VM and not seen by managed code.\n"));
+ }
+ }
+ }
+ }
+#endif // _DEBUG && !FEATURE_PAL
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // Get the corruption severity of the exception that came in through reflection invocation.
+ CorruptionSeverity severity = GetThread()->GetExceptionState()->GetLastActiveExceptionCorruptionSeverity();
+
+ // Since we are dealing with an exception, set the flag indicating if the target of Reflection can handle exception or not.
+ // This flag is used in CEHelper::CanIDispatchTargetHandleException.
+ GetThread()->GetExceptionState()->SetCanReflectionTargetHandleException(CEHelper::CanMethodHandleException(severity, pMethod));
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ OBJECTREF except = InvokeUtil::CreateTargetExcept(&targetException);
+
+#ifndef FEATURE_PAL
+#ifdef FEATURE_CORECLR
+ if (IsWatsonEnabled())
+#endif // FEATURE_CORECLR
+ {
+ struct
+ {
+ OBJECTREF oExcept;
+ } gcTIE;
+ ZeroMemory(&gcTIE, sizeof(gcTIE));
+ GCPROTECT_BEGIN(gcTIE);
+
+ gcTIE.oExcept = except;
+
+ _ASSERTE(!CLRException::IsPreallocatedExceptionObject(gcTIE.oExcept));
+
+ // If the original exception was preallocated, then copy over the captured
+ // watson buckets to the TargetInvocationException object, if available.
+ //
+ // We dont need to do this if the original exception was not preallocated
+ // since it already contains the watson buckets inside the object.
+ if (CLRException::IsPreallocatedExceptionObject(targetException))
+ {
+ PTR_EHWatsonBucketTracker pUEWatsonBucketTracker = GetThread()->GetExceptionState()->GetUEWatsonBucketTracker();
+ BOOL fCopyWatsonBuckets = TRUE;
+ PTR_VOID pBuckets = pUEWatsonBucketTracker->RetrieveWatsonBuckets();
+ if (pBuckets != NULL)
+ {
+ // Copy the buckets to the exception object
+ CopyWatsonBucketsToThrowable(pBuckets, gcTIE.oExcept);
+
+ // Confirm that they are present.
+ _ASSERTE(((EXCEPTIONREF)gcTIE.oExcept)->AreWatsonBucketsPresent());
+ }
+
+ // Clear the UE watson bucket tracker since the bucketing
+ // details are now in the TargetInvocationException object.
+ pUEWatsonBucketTracker->ClearWatsonBucketDetails();
+ }
+
+ // update "except" incase the reference to the object
+ // was updated by the GC
+ except = gcTIE.oExcept;
+ GCPROTECT_END();
+ }
+#endif // !FEATURE_PAL
+
+ // Since the original exception is inner of target invocation exception,
+ // when TIE is seen to be raised for the first time, we will end up
+ // using the inner exception buckets automatically.
+
+ // Since VM is throwing the exception, we set it to use the same corruption severity
+ // that the original exception came in with from reflection invocation.
+ COMPlusThrow(except
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , severity
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ );
+
+ GCPROTECT_END();
+}
+
+FCIMPL4(Object*, RuntimeMethodHandle::InvokeMethod,
+ Object *target, PTRArray *objs, SignatureNative* pSigUNSAFE, CLR_BOOL fConstructor)
+{
+ FCALL_CONTRACT;
+
+ struct {
+ OBJECTREF target;
+ PTRARRAYREF args;
+ SIGNATURENATIVEREF pSig;
+ OBJECTREF retVal;
+ } gc;
+
+ gc.target = ObjectToOBJECTREF(target);
+ gc.args = (PTRARRAYREF)objs;
+ gc.pSig = (SIGNATURENATIVEREF)pSigUNSAFE;
+ gc.retVal = NULL;
+
+ MethodDesc* pMeth = gc.pSig->GetMethod();
+ TypeHandle ownerType = gc.pSig->GetDeclaringType();
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ Assembly *pAssem = pMeth->GetAssembly();
+
+ if (pAssem->IsIntrospectionOnly())
+ COMPlusThrow(kInvalidOperationException, IDS_EE_CODEEXECUTION_IN_INTROSPECTIVE_ASSEMBLY);
+
+ // We should throw NotSupportedException here.
+ // But for backward compatibility we are throwing TargetException instead.
+ if (pAssem->IsDynamic() && !pAssem->HasRunAccess())
+ COMPlusThrow(kTargetException);
+
+ if (ownerType.IsSharedByGenericInstantiations())
+ COMPlusThrow(kNotSupportedException, W("NotSupported_Type"));
+
+#ifdef _DEBUG
+ if (g_pConfig->ShouldInvokeHalt(pMeth))
+ {
+ _ASSERTE(!"InvokeHalt");
+ }
+#endif
+
+ // Skip the activation optimization for remoting because of remoting proxy is not always activated.
+ // It would be nice to clean this up and get remoting to always activate methodtable behind the proxy.
+ BOOL fForceActivationForRemoting = FALSE;
+
+ if (fConstructor)
+ {
+ // If we are invoking a constructor on an array then we must
+ // handle this specially. String objects allocate themselves
+ // so they are a special case.
+ if (ownerType.IsArray()) {
+ gc.retVal = InvokeArrayConstructor(ownerType.AsArray(),
+ pMeth,
+ &gc.args,
+ gc.pSig->NumFixedArgs());
+ goto Done;
+ }
+
+ MethodTable * pMT = ownerType.AsMethodTable();
+
+#ifdef FEATURE_REMOTING
+ if (pMT->MayRequireManagedActivation())
+ {
+ gc.retVal = CRemotingServices::CreateProxyOrObject(pMT);
+ fForceActivationForRemoting = TRUE;
+ }
+ else
+#endif
+ {
+ if (pMT != g_pStringClass)
+ gc.retVal = pMT->Allocate();
+ }
+ }
+ else
+ {
+#ifdef FEATURE_REMOTING
+ if (gc.target != NULL)
+ {
+ fForceActivationForRemoting = gc.target->IsTransparentProxy();
+ }
+#endif
+ }
+
+ {
+ ArgIteratorForMethodInvoke argit(&gc.pSig);
+
+ if (argit.IsActivationNeeded() || fForceActivationForRemoting)
+ pMeth->EnsureActive();
+ CONSISTENCY_CHECK(pMeth->CheckActivated());
+
+ UINT nStackBytes = argit.SizeOfFrameArgumentArray();
+
+ // Note that SizeOfFrameArgumentArray does overflow checks with sufficient margin to prevent overflows here
+ SIZE_T nAllocaSize = TransitionBlock::GetNegSpaceSize() + sizeof(TransitionBlock) + nStackBytes;
+
+ Thread * pThread = GET_THREAD();
+
+ // Make sure we have enough room on the stack for this. Note that we will need the stack amount twice - once to build the stack
+ // and second time to actually make the call.
+ INTERIOR_STACK_PROBE_FOR(pThread, 1 + static_cast<UINT>((2 * nAllocaSize) / OS_PAGE_SIZE) + static_cast<UINT>(HOLDER_CODE_NORMAL_STACK_LIMIT));
+
+ LPBYTE pAlloc = (LPBYTE)_alloca(nAllocaSize);
+
+ LPBYTE pTransitionBlock = pAlloc + TransitionBlock::GetNegSpaceSize();
+
+ CallDescrData callDescrData;
+
+ callDescrData.pSrc = pTransitionBlock + sizeof(TransitionBlock);
+ callDescrData.numStackSlots = nStackBytes / STACK_ELEM_SIZE;
+#ifdef CALLDESCR_ARGREGS
+ callDescrData.pArgumentRegisters = (ArgumentRegisters*)(pTransitionBlock + TransitionBlock::GetOffsetOfArgumentRegisters());
+#endif
+#ifdef CALLDESCR_FPARGREGS
+ callDescrData.pFloatArgumentRegisters = NULL;
+#endif
+#ifdef CALLDESCR_REGTYPEMAP
+ callDescrData.dwRegTypeMap = 0;
+#endif
+ callDescrData.fpReturnSize = argit.GetFPReturnSize();
+
+ // This is duplicated logic from MethodDesc::GetCallTarget
+ PCODE pTarget;
+ if (pMeth->IsVtableMethod())
+ {
+ pTarget = pMeth->GetSingleCallableAddrOfVirtualizedCode(&gc.target, ownerType);
+ }
+ else
+ {
+ pTarget = pMeth->GetSingleCallableAddrOfCode();
+ }
+ callDescrData.pTarget = pTarget;
+
+ // Build the arguments on the stack
+
+ GCStress<cfg_any>::MaybeTrigger();
+
+ FrameWithCookie<ProtectValueClassFrame> *pProtectValueClassFrame = NULL;
+ ValueClassInfo *pValueClasses = NULL;
+ ByRefToNullable* byRefToNullables = NULL;
+
+ // if we have the magic Value Class return, we need to allocate that class
+ // and place a pointer to it on the stack.
+
+ TypeHandle retTH = gc.pSig->GetReturnTypeHandle();
+ BOOL fHasRetBuffArg = argit.HasRetBuffArg();
+ CorElementType retType = retTH.GetInternalCorElementType();
+ if (retType == ELEMENT_TYPE_VALUETYPE || fHasRetBuffArg) {
+ gc.retVal = retTH.GetMethodTable()->Allocate();
+ }
+
+ // Copy "this" pointer
+ if (!pMeth->IsStatic()) {
+ PVOID pThisPtr;
+
+ if (fConstructor)
+ {
+ // Copy "this" pointer: only unbox if type is value type and method is not unboxing stub
+ if (ownerType.IsValueType() && !pMeth->IsUnboxingStub()) {
+ // Note that we create a true boxed nullabe<T> and then convert it to a T below
+ pThisPtr = gc.retVal->GetData();
+ }
+ else
+ pThisPtr = OBJECTREFToObject(gc.retVal);
+ }
+ else
+ if (!pMeth->GetMethodTable()->IsValueType())
+ pThisPtr = OBJECTREFToObject(gc.target);
+ else {
+ if (pMeth->IsUnboxingStub())
+ pThisPtr = OBJECTREFToObject(gc.target);
+ else {
+ // Create a true boxed Nullable<T> and use that as the 'this' pointer.
+ // since what is passed in is just a boxed T
+ MethodTable* pMT = pMeth->GetMethodTable();
+ if (Nullable::IsNullableType(pMT)) {
+ OBJECTREF bufferObj = pMT->Allocate();
+ void* buffer = bufferObj->GetData();
+ Nullable::UnBox(buffer, gc.target, pMT);
+ pThisPtr = buffer;
+ }
+ else
+ pThisPtr = gc.target->UnBox();
+ }
+ }
+
+ *((LPVOID*) (pTransitionBlock + argit.GetThisOffset())) = pThisPtr;
+ }
+
+ // NO GC AFTER THIS POINT. The object references in the method frame are not protected.
+ //
+ // We have already copied "this" pointer so we do not want GC to happen even sooner. Unfortunately,
+ // we may allocate in the process of copying this pointer that makes it hard to express using contracts.
+ //
+ // If an exception occurs a gc may happen but we are going to dump the stack anyway and we do
+ // not need to protect anything.
+
+ PVOID pRetBufStackCopy = NULL;
+
+ {
+ BEGINFORBIDGC();
+#ifdef _DEBUG
+ GCForbidLoaderUseHolder forbidLoaderUse;
+#endif
+
+ // Take care of any return arguments
+ if (fHasRetBuffArg)
+ {
+ // We stack-allocate this ret buff, to preserve the invariant that ret-buffs are always in the
+ // caller's stack frame. We'll copy into gc.retVal later.
+ TypeHandle retTH = gc.pSig->GetReturnTypeHandle();
+ MethodTable* pMT = retTH.GetMethodTable();
+ if (pMT->IsStructRequiringStackAllocRetBuf())
+ {
+ SIZE_T sz = pMT->GetNumInstanceFieldBytes();
+ pRetBufStackCopy = _alloca(sz);
+ memset(pRetBufStackCopy, 0, sz);
+
+ pValueClasses = new (_alloca(sizeof(ValueClassInfo))) ValueClassInfo(pRetBufStackCopy, pMT, pValueClasses);
+ *((LPVOID*) (pTransitionBlock + argit.GetRetBuffArgOffset())) = pRetBufStackCopy;
+ }
+ else
+ {
+ PVOID pRetBuff = gc.retVal->GetData();
+ *((LPVOID*) (pTransitionBlock + argit.GetRetBuffArgOffset())) = pRetBuff;
+ }
+ }
+
+ // copy args
+ UINT nNumArgs = gc.pSig->NumFixedArgs();
+ for (UINT i = 0 ; i < nNumArgs; i++) {
+
+ TypeHandle th = gc.pSig->GetArgumentAt(i);
+
+ int ofs = argit.GetNextOffset();
+ _ASSERTE(ofs != TransitionBlock::InvalidOffset);
+
+#ifdef CALLDESCR_REGTYPEMAP
+ FillInRegTypeMap(ofs, argit.GetArgType(), (BYTE *)&callDescrData.dwRegTypeMap);
+#endif
+
+#ifdef CALLDESCR_FPARGREGS
+ // Under CALLDESCR_FPARGREGS -ve offsets indicate arguments in floating point registers. If we have at
+ // least one such argument we point the call worker at the floating point area of the frame (we leave
+ // it null otherwise since the worker can perform a useful optimization if it knows no floating point
+ // registers need to be set up).
+ if ((ofs < 0) && (callDescrData.pFloatArgumentRegisters == NULL))
+ callDescrData.pFloatArgumentRegisters = (FloatArgumentRegisters*) (pTransitionBlock +
+ TransitionBlock::GetOffsetOfFloatArgumentRegisters());
+#endif
+
+ UINT structSize = argit.GetArgSize();
+
+ bool needsStackCopy = false;
+ PVOID pArgDst = pTransitionBlock + ofs;
+
+ TypeHandle nullableType = NullableTypeOfByref(th);
+ if (!nullableType.IsNull()) {
+ th = nullableType;
+ structSize = th.GetSize();
+ needsStackCopy = true;
+ }
+#ifdef ENREGISTERED_PARAMTYPE_MAXSIZE
+ else
+ if (argit.IsArgPassedByRef()) {
+ needsStackCopy = true;
+ }
+#endif
+
+ if(needsStackCopy)
+ {
+ MethodTable * pMT = th.GetMethodTable();
+ _ASSERTE(pMT && pMT->IsValueType());
+
+ PVOID pStackCopy = _alloca(structSize);
+ *(PVOID *)pArgDst = pStackCopy;
+ pArgDst = pStackCopy;
+
+ if (!nullableType.IsNull())
+ {
+ byRefToNullables = new(_alloca(sizeof(ByRefToNullable))) ByRefToNullable(i, pStackCopy, nullableType, byRefToNullables);
+ }
+
+ // save the info into ValueClassInfo
+ if (pMT->ContainsPointers())
+ {
+ pValueClasses = new (_alloca(sizeof(ValueClassInfo))) ValueClassInfo(pStackCopy, pMT, pValueClasses);
+ }
+ }
+
+ InvokeUtil::CopyArg(th, &(gc.args->m_Array[i]), pArgDst);
+ }
+
+ ENDFORBIDGC();
+ }
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // By default, set the flag in TES indicating the reflection target can handle CSE.
+ // This flag is used in CEHelper::CanIDispatchTargetHandleException.
+ pThread->GetExceptionState()->SetCanReflectionTargetHandleException(TRUE);
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ if (pValueClasses != NULL)
+ {
+ pProtectValueClassFrame = new (_alloca (sizeof (FrameWithCookie<ProtectValueClassFrame>)))
+ FrameWithCookie<ProtectValueClassFrame>(pThread, pValueClasses);
+ }
+
+ // The sole purpose of having this frame is to tell the debugger that we have a catch handler here
+ // which may swallow managed exceptions. The debugger needs this in order to send a
+ // CatchHandlerFound (CHF) notification.
+ FrameWithCookie<DebuggerU2MCatchHandlerFrame> catchFrame(pThread);
+
+ // Call the method
+ bool fExceptionThrown = false;
+ EX_TRY_THREAD(pThread) {
+ CallDescrWorkerReflectionWrapper(&callDescrData, &catchFrame);
+ } EX_CATCH {
+ // Rethrow transient exceptions for constructors for backward compatibility
+ if (fConstructor && GET_EXCEPTION()->IsTransient())
+ {
+ EX_RETHROW;
+ }
+
+ // Abuse retval to store the exception object
+ gc.retVal = GET_THROWABLE();
+ _ASSERTE(gc.retVal);
+
+ fExceptionThrown = true;
+ } EX_END_CATCH(SwallowAllExceptions);
+
+ catchFrame.Pop(pThread);
+
+ // Now that we are safely out of the catch block, we can create and raise the
+ // TargetInvocationException.
+ if (fExceptionThrown)
+ {
+ ThrowInvokeMethodException(pMeth, gc.retVal);
+ }
+
+ // It is still illegal to do a GC here. The return type might have/contain GC pointers.
+ if (fConstructor)
+ {
+ // We have a special case for Strings...The object is returned...
+ if (ownerType == TypeHandle(g_pStringClass)) {
+ PVOID pReturnValue = &callDescrData.returnValue;
+ gc.retVal = *(OBJECTREF *)pReturnValue;
+ }
+
+ // If it is a Nullable<T>, box it using Nullable<T> conventions.
+ // TODO: this double allocates on constructions which is wasteful
+ gc.retVal = Nullable::NormalizeBox(gc.retVal);
+ }
+ else
+ if (retType == ELEMENT_TYPE_VALUETYPE)
+ {
+ _ASSERTE(gc.retVal != NULL);
+
+ // if the structure is returned by value, then we need to copy in the boxed object
+ // we have allocated for this purpose.
+ if (!fHasRetBuffArg)
+ {
+ CopyValueClass(gc.retVal->GetData(), &callDescrData.returnValue, gc.retVal->GetMethodTable(), gc.retVal->GetAppDomain());
+ }
+ else if (pRetBufStackCopy)
+ {
+ CopyValueClass(gc.retVal->GetData(), pRetBufStackCopy, gc.retVal->GetMethodTable(), gc.retVal->GetAppDomain());
+ }
+ // From here on out, it is OK to have GCs since the return object (which may have had
+ // GC pointers has been put into a GC object and thus protected.
+
+ // TODO this creates two objects which is inefficient
+ // If the return type is a Nullable<T> box it into the correct form
+ gc.retVal = Nullable::NormalizeBox(gc.retVal);
+ }
+ else
+ {
+ gc.retVal = InvokeUtil::CreateObject(retTH, &callDescrData.returnValue);
+ }
+
+ while (byRefToNullables != NULL) {
+ OBJECTREF obj = Nullable::Box(byRefToNullables->data, byRefToNullables->type.GetMethodTable());
+ SetObjectReference(&gc.args->m_Array[byRefToNullables->argNum], obj, gc.args->GetAppDomain());
+ byRefToNullables = byRefToNullables->next;
+ }
+
+ if (pProtectValueClassFrame != NULL)
+ pProtectValueClassFrame->Pop(pThread);
+
+ END_INTERIOR_STACK_PROBE;
+ }
+
+Done:
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(gc.retVal);
+}
+FCIMPLEND
+
+#ifdef FEATURE_SERIALIZATION
+FCIMPL4(void, RuntimeMethodHandle::SerializationInvoke,
+ ReflectMethodObject *pMethodUNSAFE, Object* targetUNSAFE, Object* serializationInfoUNSAFE, struct StreamingContextData * pContext) {
+ FCALL_CONTRACT;
+
+ struct _gc
+ {
+ OBJECTREF target;
+ OBJECTREF serializationInfo;
+ REFLECTMETHODREF refMethod;
+ } gc;
+
+ gc.target = (OBJECTREF) targetUNSAFE;
+ gc.serializationInfo = (OBJECTREF) serializationInfoUNSAFE;
+ gc.refMethod = (REFLECTMETHODREF)ObjectToOBJECTREF(pMethodUNSAFE);
+
+ MethodDesc* pMethod = pMethodUNSAFE->GetMethod();
+
+ Assembly *pAssem = pMethod->GetAssembly();
+
+ if (pAssem->IsIntrospectionOnly())
+ FCThrowExVoid(kInvalidOperationException, IDS_EE_CODEEXECUTION_IN_INTROSPECTIVE_ASSEMBLY, NULL, NULL, NULL);
+
+ if (pAssem->IsDynamic() && !pAssem->HasRunAccess())
+ FCThrowResVoid(kNotSupportedException, W("NotSupported_DynamicAssemblyNoRunAccess"));
+
+ HELPER_METHOD_FRAME_BEGIN_PROTECT(gc);
+
+ {
+ ARG_SLOT newArgs[3];
+
+ // Nullable<T> does not support the ISerializable constructor, so we should never get here.
+ _ASSERTE(!Nullable::IsNullableType(gc.target->GetMethodTable()));
+
+ if (pMethod == MscorlibBinder::GetMethod(METHOD__WINDOWS_IDENTITY__SERIALIZATION_CTOR))
+ {
+ // WindowsIdentity.ctor takes only one argument
+ MethodDescCallSite method(pMethod, &gsig_IM_SerInfo_RetVoid, &gc.target);
+
+ // NO GC AFTER THIS POINT
+ // Copy "this" pointer: only unbox if type is value type and method is not unboxing stub
+ if (pMethod->GetMethodTable()->IsValueType() && !pMethod->IsUnboxingStub())
+ newArgs[0] = PtrToArgSlot(gc.target->UnBox());
+ else
+ newArgs[0] = ObjToArgSlot(gc.target);
+
+ newArgs[1] = ObjToArgSlot(gc.serializationInfo);
+
+ TryCallMethod(&method, newArgs);
+ }
+ else
+ {
+ //
+ // Use hardcoded sig for performance
+ //
+ MethodDescCallSite method(pMethod, &gsig_IM_SerInfo_StrContext_RetVoid, &gc.target);
+
+ // NO GC AFTER THIS POINT
+ // Copy "this" pointer: only unbox if type is value type and method is not unboxing stub
+ if (pMethod->GetMethodTable()->IsValueType() && !pMethod->IsUnboxingStub())
+ newArgs[0] = PtrToArgSlot(gc.target->UnBox());
+ else
+ newArgs[0] = ObjToArgSlot(gc.target);
+
+ newArgs[1] = ObjToArgSlot(gc.serializationInfo);
+
+#ifdef _WIN64
+ //
+ // on win64 the struct does not fit in an ARG_SLOT, so we pass it by reference
+ //
+ static_assert_no_msg(sizeof(*pContext) > sizeof(ARG_SLOT));
+ newArgs[2] = PtrToArgSlot(pContext);
+#else // _WIN64
+ //
+ // on x86 the struct fits in an ARG_SLOT, so we pass it by value
+ //
+ static_assert_no_msg(sizeof(*pContext) == sizeof(ARG_SLOT));
+ newArgs[2] = *(ARG_SLOT*)pContext;
+#endif // _WIN64
+
+ TryCallMethod(&method, newArgs);
+ }
+ }
+
+ HELPER_METHOD_FRAME_END_POLL();
+}
+FCIMPLEND
+#endif // FEATURE_SERIALIZATION
+
+struct SkipStruct {
+ StackCrawlMark* pStackMark;
+ MethodDesc* pMeth;
+};
+
+// This method is called by the GetMethod function and will crawl backward
+// up the stack for integer methods.
+static StackWalkAction SkipMethods(CrawlFrame* frame, VOID* data) {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SkipStruct* pSkip = (SkipStruct*) data;
+
+ MethodDesc *pFunc = frame->GetFunction();
+
+ /* We asked to be called back only for functions */
+ _ASSERTE(pFunc);
+
+ // The check here is between the address of a local variable
+ // (the stack mark) and a pointer to the EIP for a frame
+ // (which is actually the pointer to the return address to the
+ // function from the previous frame). So we'll actually notice
+ // which frame the stack mark was in one frame later. This is
+ // fine since we only implement LookForMyCaller.
+ _ASSERTE(*pSkip->pStackMark == LookForMyCaller);
+ if (!frame->IsInCalleesFrames(pSkip->pStackMark))
+ return SWA_CONTINUE;
+
+ if (pFunc->RequiresInstMethodDescArg())
+ {
+ pSkip->pMeth = (MethodDesc *) frame->GetParamTypeArg();
+ if (pSkip->pMeth == NULL)
+ pSkip->pMeth = pFunc;
+ }
+ else
+ pSkip->pMeth = pFunc;
+ return SWA_ABORT;
+}
+
+// Return the MethodInfo that represents the current method (two above this one)
+FCIMPL1(ReflectMethodObject*, RuntimeMethodHandle::GetCurrentMethod, StackCrawlMark* stackMark) {
+ FCALL_CONTRACT;
+ REFLECTMETHODREF pRet = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+ SkipStruct skip;
+ skip.pStackMark = stackMark;
+ skip.pMeth = 0;
+ StackWalkFunctions(GetThread(), SkipMethods, &skip);
+
+ // If C<Foo>.m<Bar> was called, the stack walker returns C<object>.m<object>. We cannot
+ // get know that the instantiation used Foo or Bar at that point. So the next best thing
+ // is to return C<T>.m<P> and that's what LoadTypicalMethodDefinition will do for us.
+
+ if (skip.pMeth != NULL)
+ pRet = skip.pMeth->LoadTypicalMethodDefinition()->GetStubMethodInfo();
+ else
+ pRet = NULL;
+
+ HELPER_METHOD_FRAME_END();
+
+ return (ReflectMethodObject*)OBJECTREFToObject(pRet);
+}
+FCIMPLEND
+
+static OBJECTREF DirectObjectFieldGet(FieldDesc *pField, TypeHandle fieldType, TypeHandle enclosingType, TypedByRef *pTarget, CLR_BOOL *pDomainInitialized) {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+
+ PRECONDITION(CheckPointer(pField));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF refRet;
+ OBJECTREF objref = NULL;
+ GCPROTECT_BEGIN(objref);
+ if (!pField->IsStatic()) {
+ objref = ObjectToOBJECTREF(*((Object**)pTarget->data));
+ }
+
+ InvokeUtil::ValidateObjectTarget(pField, enclosingType, &objref);
+ refRet = InvokeUtil::GetFieldValue(pField, fieldType, &objref, enclosingType, pDomainInitialized);
+ GCPROTECT_END();
+ return refRet;
+}
+
+FCIMPL4(Object*, RuntimeFieldHandle::GetValueDirect, ReflectFieldObject *pFieldUNSAFE, ReflectClassBaseObject *pFieldTypeUNSAFE, TypedByRef *pTarget, ReflectClassBaseObject *pDeclaringTypeUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ struct
+ {
+ REFLECTCLASSBASEREF refFieldType;
+ REFLECTCLASSBASEREF refDeclaringType;
+ REFLECTFIELDREF refField;
+ }gc;
+ gc.refFieldType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pFieldTypeUNSAFE);
+ gc.refDeclaringType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pDeclaringTypeUNSAFE);
+ gc.refField = (REFLECTFIELDREF)ObjectToOBJECTREF(pFieldUNSAFE);
+
+ if ((gc.refFieldType == NULL) || (gc.refField == NULL))
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ TypeHandle fieldType = gc.refFieldType->GetType();
+
+ FieldDesc *pField = gc.refField->GetField();
+
+ Assembly *pAssem = pField->GetModule()->GetAssembly();
+
+ if (pAssem->IsIntrospectionOnly())
+ FCThrowEx(kInvalidOperationException, IDS_EE_CODEEXECUTION_IN_INTROSPECTIVE_ASSEMBLY, NULL, NULL, NULL);
+
+ // We should throw NotSupportedException here.
+ // But for backward compatibility we are throwing FieldAccessException instead.
+ if (pAssem->IsDynamic() && !pAssem->HasRunAccess())
+ FCThrow(kFieldAccessException);
+
+ OBJECTREF refRet = NULL;
+ CorElementType fieldElType;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ // Find the Object and its type
+ TypeHandle targetType = pTarget->type;
+ _ASSERTE(gc.refDeclaringType == NULL || !gc.refDeclaringType->GetType().IsTypeDesc());
+ MethodTable *pEnclosingMT = (gc.refDeclaringType != NULL ? gc.refDeclaringType->GetType() : TypeHandle()).AsMethodTable();
+
+ // Verify the callee/caller access
+ if (!pField->IsPublic() || (pEnclosingMT != NULL && !pEnclosingMT->IsExternallyVisible()))
+ {
+
+ bool targetRemoted = false;
+
+#ifndef FEATURE_CORECLR
+ targetRemoted = !targetType.IsNull() && InvokeUtil::IsTargetRemoted(pField, targetType.AsMethodTable());
+#endif //FEATURE_CORECLR
+
+ RefSecContext sCtx(InvokeUtil::GetInvocationAccessCheckType(targetRemoted));
+
+ MethodTable* pInstanceMT = NULL;
+ if (!pField->IsStatic())
+ {
+ if (!targetType.IsTypeDesc())
+ pInstanceMT = targetType.AsMethodTable();
+ }
+
+ //TODO: missing check that the field is consistent
+
+ // Perform the normal access check (caller vs field).
+ InvokeUtil::CanAccessField(&sCtx,
+ pEnclosingMT,
+ pInstanceMT,
+ pField);
+ }
+
+ CLR_BOOL domainInitialized = FALSE;
+ if (pField->IsStatic() || !targetType.IsValueType()) {
+ refRet = DirectObjectFieldGet(pField, fieldType, TypeHandle(pEnclosingMT), pTarget, &domainInitialized);
+ goto lExit;
+ }
+
+ // Validate that the target type can be cast to the type that owns this field info.
+ if (!targetType.CanCastTo(TypeHandle(pEnclosingMT)))
+ COMPlusThrowArgumentException(W("obj"), NULL);
+
+ // This is a workaround because from the previous case we may end up with an
+ // Enum. We want to process it here.
+ // Get the value from the field
+ void* p;
+ fieldElType = fieldType.GetSignatureCorElementType();
+ switch (fieldElType) {
+ case ELEMENT_TYPE_VOID:
+ _ASSERTE(!"Void used as Field Type!");
+ COMPlusThrow(kInvalidProgramException);
+
+ case ELEMENT_TYPE_BOOLEAN: // boolean
+ case ELEMENT_TYPE_I1: // byte
+ case ELEMENT_TYPE_U1: // unsigned byte
+ case ELEMENT_TYPE_I2: // short
+ case ELEMENT_TYPE_U2: // unsigned short
+ case ELEMENT_TYPE_CHAR: // char
+ case ELEMENT_TYPE_I4: // int
+ case ELEMENT_TYPE_U4: // unsigned int
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_U:
+ case ELEMENT_TYPE_R4: // float
+ case ELEMENT_TYPE_I8: // long
+ case ELEMENT_TYPE_U8: // unsigned long
+ case ELEMENT_TYPE_R8: // double
+ case ELEMENT_TYPE_VALUETYPE:
+ _ASSERTE(!fieldType.IsTypeDesc());
+ p = ((BYTE*) pTarget->data) + pField->GetOffset();
+ refRet = fieldType.AsMethodTable()->Box(p);
+ break;
+
+ case ELEMENT_TYPE_OBJECT:
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_SZARRAY: // Single Dim, Zero
+ case ELEMENT_TYPE_ARRAY: // general array
+ p = ((BYTE*) pTarget->data) + pField->GetOffset();
+ refRet = ObjectToOBJECTREF(*(Object**) p);
+ break;
+
+ case ELEMENT_TYPE_PTR:
+ {
+ p = ((BYTE*) pTarget->data) + pField->GetOffset();
+
+ refRet = InvokeUtil::CreatePointer(fieldType, *(void **)p);
+
+ break;
+ }
+
+ default:
+ _ASSERTE(!"Unknown Type");
+ // this is really an impossible condition
+ COMPlusThrow(kNotSupportedException);
+ }
+
+lExit: ;
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(refRet);
+}
+FCIMPLEND
+
+static void DirectObjectFieldSet(FieldDesc *pField, TypeHandle fieldType, TypeHandle enclosingType, TypedByRef *pTarget, OBJECTREF *pValue, CLR_BOOL *pDomainInitialized) {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+
+ PRECONDITION(CheckPointer(pField));
+ PRECONDITION(!fieldType.IsNull());
+ }
+ CONTRACTL_END;
+
+ OBJECTREF objref = NULL;
+ GCPROTECT_BEGIN(objref);
+ if (!pField->IsStatic()) {
+ objref = ObjectToOBJECTREF(*((Object**)pTarget->data));
+ }
+ // Validate the target/fld type relationship
+ InvokeUtil::ValidateObjectTarget(pField, enclosingType, &objref);
+
+ InvokeUtil::ValidField(fieldType, pValue);
+ InvokeUtil::SetValidField(pField->GetFieldType(), fieldType, pField, &objref, pValue, enclosingType, pDomainInitialized);
+ GCPROTECT_END();
+}
+
+FCIMPL5(void, RuntimeFieldHandle::SetValueDirect, ReflectFieldObject *pFieldUNSAFE, ReflectClassBaseObject *pFieldTypeUNSAFE, TypedByRef *pTarget, Object *valueUNSAFE, ReflectClassBaseObject *pContextTypeUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ struct _gc
+ {
+ OBJECTREF oValue;
+ REFLECTCLASSBASEREF pFieldType;
+ REFLECTCLASSBASEREF pContextType;
+ REFLECTFIELDREF refField;
+ }gc;
+
+ gc.oValue = ObjectToOBJECTREF(valueUNSAFE);
+ gc.pFieldType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pFieldTypeUNSAFE);
+ gc.pContextType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pContextTypeUNSAFE);
+ gc.refField = (REFLECTFIELDREF)ObjectToOBJECTREF(pFieldUNSAFE);
+
+ if ((gc.pFieldType == NULL) || (gc.refField == NULL))
+ FCThrowResVoid(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ TypeHandle fieldType = gc.pFieldType->GetType();
+ TypeHandle contextType = (gc.pContextType != NULL) ? gc.pContextType->GetType() : NULL;
+
+ FieldDesc *pField = gc.refField->GetField();
+
+ Assembly *pAssem = pField->GetModule()->GetAssembly();
+
+ if (pAssem->IsIntrospectionOnly())
+ FCThrowExVoid(kInvalidOperationException, IDS_EE_CODEEXECUTION_IN_INTROSPECTIVE_ASSEMBLY, NULL, NULL, NULL);
+
+ // We should throw NotSupportedException here.
+ // But for backward compatibility we are throwing FieldAccessException instead.
+ if (pAssem->IsDynamic() && !pAssem->HasRunAccess())
+ FCThrowVoid(kFieldAccessException);
+
+ BYTE *pDst = NULL;
+ ARG_SLOT value = NULL;
+ CorElementType fieldElType;
+
+ HELPER_METHOD_FRAME_BEGIN_PROTECT(gc);
+
+ // Find the Object and its type
+ TypeHandle targetType = pTarget->type;
+ MethodTable *pEnclosingMT = contextType.GetMethodTable();
+
+ {
+ // Verify that the value passed can be widened into the target
+ InvokeUtil::ValidField(fieldType, &gc.oValue);
+
+ // Verify that this is not a Final Field
+ DWORD attr = pField->GetAttributes(); // should we cache?
+ if (IsFdInitOnly(attr)) {
+ TryDemand(SECURITY_SERIALIZATION, kFieldAccessException, W("Acc_ReadOnly"));
+ }
+ if (IsFdHasFieldRVA(attr)) {
+ TryDemand(SECURITY_SKIP_VER, kFieldAccessException, W("Acc_RvaStatic"));
+ }
+ if (IsFdLiteral(attr))
+ COMPlusThrow(kFieldAccessException,W("Acc_ReadOnly"));
+
+ // Verify the callee/caller access
+ if (!pField->IsPublic() || (pEnclosingMT != NULL && !pEnclosingMT->IsExternallyVisible()))
+ {
+ // security and consistency checks
+
+ bool targetRemoted = false;
+#ifndef FEATURE_CORECLR
+ targetRemoted = targetType.IsNull() && InvokeUtil::IsTargetRemoted(pField, targetType.AsMethodTable());
+#endif //FEATURE_CORECLR
+
+ RefSecContext sCtx(InvokeUtil::GetInvocationAccessCheckType(targetRemoted));
+
+ MethodTable* pInstanceMT = NULL;
+ if (!pField->IsStatic()) {
+ if (!targetType.IsTypeDesc())
+ pInstanceMT = targetType.AsMethodTable();
+ }
+
+ //TODO: missing check that the field is consistent
+
+ // Perform the normal access check (caller vs field).
+ InvokeUtil::CanAccessField(&sCtx,
+ pEnclosingMT,
+ pInstanceMT,
+ pField);
+ }
+
+ }
+
+ CLR_BOOL domainInitialized = FALSE;
+ if (pField->IsStatic() || !targetType.IsValueType()) {
+ DirectObjectFieldSet(pField, fieldType, TypeHandle(pEnclosingMT), pTarget, &gc.oValue, &domainInitialized);
+ goto lExit;
+ }
+
+ if (gc.oValue == NULL && fieldType.IsValueType() && !Nullable::IsNullableType(fieldType))
+ COMPlusThrowArgumentNull(W("value"));
+
+ // Validate that the target type can be cast to the type that owns this field info.
+ if (!targetType.CanCastTo(TypeHandle(pEnclosingMT)))
+ COMPlusThrowArgumentException(W("obj"), NULL);
+
+ // Set the field
+ fieldElType = fieldType.GetInternalCorElementType();
+ if (ELEMENT_TYPE_BOOLEAN <= fieldElType && fieldElType <= ELEMENT_TYPE_R8) {
+ CorElementType objType = gc.oValue->GetTypeHandle().GetInternalCorElementType();
+ if (objType != fieldElType)
+ InvokeUtil::CreatePrimitiveValue(fieldElType, objType, gc.oValue, &value);
+ else
+ value = *(ARG_SLOT*)gc.oValue->UnBox();
+ }
+ pDst = ((BYTE*) pTarget->data) + pField->GetOffset();
+
+ switch (fieldElType) {
+ case ELEMENT_TYPE_VOID:
+ _ASSERTE(!"Void used as Field Type!");
+ COMPlusThrow(kInvalidProgramException);
+
+ case ELEMENT_TYPE_BOOLEAN: // boolean
+ case ELEMENT_TYPE_I1: // byte
+ case ELEMENT_TYPE_U1: // unsigned byte
+ VolatileStore((UINT8*)pDst, *(UINT8*)&value);
+ break;
+
+ case ELEMENT_TYPE_I2: // short
+ case ELEMENT_TYPE_U2: // unsigned short
+ case ELEMENT_TYPE_CHAR: // char
+ VolatileStore((UINT16*)pDst, *(UINT16*)&value);
+ break;
+
+ case ELEMENT_TYPE_I4: // int
+ case ELEMENT_TYPE_U4: // unsigned int
+ case ELEMENT_TYPE_R4: // float
+ VolatileStore((UINT32*)pDst, *(UINT32*)&value);
+ break;
+
+ case ELEMENT_TYPE_I8: // long
+ case ELEMENT_TYPE_U8: // unsigned long
+ case ELEMENT_TYPE_R8: // double
+ VolatileStore((UINT64*)pDst, *(UINT64*)&value);
+ break;
+
+ case ELEMENT_TYPE_I:
+ {
+ INT_PTR valuePtr = (INT_PTR) InvokeUtil::GetIntPtrValue(gc.oValue);
+ VolatileStore((INT_PTR*) pDst, valuePtr);
+ }
+ break;
+ case ELEMENT_TYPE_U:
+ {
+ UINT_PTR valuePtr = (UINT_PTR) InvokeUtil::GetIntPtrValue(gc.oValue);
+ VolatileStore((UINT_PTR*) pDst, valuePtr);
+ }
+ break;
+
+ case ELEMENT_TYPE_PTR: // pointers
+ if (gc.oValue != 0) {
+ value = 0;
+ if (MscorlibBinder::IsClass(gc.oValue->GetMethodTable(), CLASS__POINTER)) {
+ value = (size_t) InvokeUtil::GetPointerValue(gc.oValue);
+#ifdef _MSC_VER
+#pragma warning(disable: 4267) //work-around for compiler
+#endif
+ VolatileStore((size_t*) pDst, (size_t) value);
+#ifdef _MSC_VER
+#pragma warning(default: 4267)
+#endif
+ break;
+ }
+ }
+ // drop through
+ case ELEMENT_TYPE_FNPTR:
+ {
+ value = 0;
+ if (gc.oValue != 0) {
+ CorElementType objType = gc.oValue->GetTypeHandle().GetInternalCorElementType();
+ InvokeUtil::CreatePrimitiveValue(objType, objType, gc.oValue, &value);
+ }
+#ifdef _MSC_VER
+#pragma warning(disable: 4267) //work-around for compiler
+#endif
+ VolatileStore((size_t*) pDst, (size_t) value);
+#ifdef _MSC_VER
+#pragma warning(default: 4267)
+#endif
+ }
+ break;
+
+ case ELEMENT_TYPE_SZARRAY: // Single Dim, Zero
+ case ELEMENT_TYPE_ARRAY: // General Array
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_OBJECT:
+ SetObjectReferenceUnchecked((OBJECTREF*)pDst, gc.oValue);
+ break;
+
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ _ASSERTE(!fieldType.IsTypeDesc());
+ MethodTable* pMT = fieldType.AsMethodTable();
+
+ // If we have a null value then we must create an empty field
+ if (gc.oValue == 0)
+ InitValueClass(pDst, pMT);
+ else {
+ pMT->UnBoxIntoUnchecked(pDst, gc.oValue);
+ }
+ }
+ break;
+
+ default:
+ _ASSERTE(!"Unknown Type");
+ // this is really an impossible condition
+ COMPlusThrow(kNotSupportedException);
+ }
+
+lExit: ;
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+void QCALLTYPE ReflectionInvocation::CompileMethod(MethodDesc * pMD)
+{
+ QCALL_CONTRACT;
+
+ // Argument is checked on the managed side
+ PRECONDITION(pMD != NULL);
+
+ if (!pMD->IsPointingToPrestub())
+ return;
+
+ BEGIN_QCALL;
+ pMD->DoPrestub(NULL);
+ END_QCALL;
+}
+
+// This method triggers the class constructor for a give type
+FCIMPL1(void, ReflectionInvocation::RunClassConstructor, ReflectClassBaseObject *pTypeUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ if (refType == NULL)
+ FCThrowArgumentVoidEx(kArgumentException, NULL, W("InvalidOperation_HandleIsNotInitialized"));
+
+ TypeHandle typeHnd = refType->GetType();
+ if (typeHnd.IsTypeDesc())
+ return;
+
+ MethodTable *pMT = typeHnd.AsMethodTable();
+
+ Assembly *pAssem = pMT->GetAssembly();
+
+ if (pAssem->IsIntrospectionOnly())
+ FCThrowExVoid(kInvalidOperationException, IDS_EE_CODEEXECUTION_IN_INTROSPECTIVE_ASSEMBLY, NULL, NULL, NULL);
+
+ if (pAssem->IsDynamic() && !pAssem->HasRunAccess())
+ {
+ FCThrowResVoid(kNotSupportedException, W("NotSupported_DynamicAssemblyNoRunAccess"));
+ }
+
+ if (!pMT->IsClassInited())
+ {
+ HELPER_METHOD_FRAME_BEGIN_1(refType);
+
+ // We perform the access check only on CoreCLR for backward compatibility.
+#ifdef FEATURE_CORECLR
+ RefSecContext sCtx(InvokeUtil::GetInvocationAccessCheckType());
+ InvokeUtil::CanAccessClass(&sCtx, pMT);
+#endif //FEATURE_CORECLR
+
+ pMT->CheckRestore();
+ pMT->EnsureInstanceActive();
+ pMT->CheckRunClassInitThrowing();
+
+ HELPER_METHOD_FRAME_END();
+ }
+}
+FCIMPLEND
+
+// This method triggers the module constructor for a give module
+FCIMPL1(void, ReflectionInvocation::RunModuleConstructor, ReflectModuleBaseObject *pModuleUNSAFE) {
+ FCALL_CONTRACT;
+
+ REFLECTMODULEBASEREF refModule = (REFLECTMODULEBASEREF)ObjectToOBJECTREF(pModuleUNSAFE);
+
+ if(refModule == NULL)
+ FCThrowArgumentVoidEx(kArgumentException, NULL, W("InvalidOperation_HandleIsNotInitialized"));
+
+ Module *pModule = refModule->GetModule();
+
+ Assembly *pAssem = pModule->GetAssembly();
+
+ if (pAssem->IsIntrospectionOnly())
+ FCThrowExVoid(kInvalidOperationException, IDS_EE_CODEEXECUTION_IN_INTROSPECTIVE_ASSEMBLY, NULL, NULL, NULL);
+
+ if (pAssem->IsDynamic() && !pAssem->HasRunAccess())
+ FCThrowResVoid(kNotSupportedException, W("NotSupported_DynamicAssemblyNoRunAccess"));
+
+ DomainFile *pDomainFile = pModule->FindDomainFile(GetAppDomain());
+ if (pDomainFile==NULL || !pDomainFile->IsActive())
+ {
+ HELPER_METHOD_FRAME_BEGIN_1(refModule);
+ if(pDomainFile==NULL)
+ pDomainFile=pModule->GetDomainFile();
+ pDomainFile->EnsureActive();
+ HELPER_METHOD_FRAME_END();
+ }
+}
+FCIMPLEND
+
+#ifndef FEATURE_CORECLR
+// This method triggers a given method to be jitted
+FCIMPL3(void, ReflectionInvocation::PrepareMethod, ReflectMethodObject* pMethodUNSAFE, TypeHandle *pInstantiation, UINT32 cInstantiation)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pMethodUNSAFE, NULL_OK));
+ PRECONDITION(CheckPointer(pInstantiation, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ REFLECTMETHODREF refMethod = (REFLECTMETHODREF)ObjectToOBJECTREF(pMethodUNSAFE);
+
+ if (refMethod == NULL)
+ FCThrowArgumentVoidEx(kArgumentException, NULL, W("InvalidOperation_HandleIsNotInitialized"));
+
+ MethodDesc *pMD = refMethod->GetMethod();
+
+ HELPER_METHOD_FRAME_BEGIN_1(refMethod);
+
+ if (pMD->IsAbstract())
+ COMPlusThrowArgumentNull(W("method"), W("Argument_CannotPrepareAbstract"));
+
+ pMD->CheckRestore();
+
+ MethodTable * pExactMT = pMD->GetMethodTable();
+ if (pInstantiation != NULL)
+ {
+ // We were handed an instantiation, check that the method expects it and the right number of types has been provided (the
+ // caller supplies one array containing the class instantiation immediately followed by the method instantiation).
+ if (cInstantiation != (pMD->GetNumGenericMethodArgs() + pMD->GetNumGenericClassArgs()))
+ COMPlusThrow(kArgumentException, W("Argument_InvalidGenericInstantiation"));
+
+ // We need to find the actual class and/or method instantiations, even though we've been passed them. This is an issue of
+ // lifetime -- the instantiation passed in will go away at some point whereas preparation of the method has the potential to
+ // persist a copy of the instantiation pointer. By finding the actual instantiation we get a stable pointer whose lifetime
+ // is at least as long as the data generated by preparation.
+
+ // Check we've got a reasonable looking instantiation.
+ if (!Generics::CheckInstantiation(Instantiation(pInstantiation, cInstantiation)))
+ COMPlusThrow(kArgumentException, W("Argument_InvalidGenericInstantiation"));
+ for (ULONG i = 0; i < cInstantiation; i++)
+ if (pInstantiation[i].ContainsGenericVariables())
+ COMPlusThrow(kArgumentException, W("Argument_InvalidGenericInstantiation"));
+
+ // Load the exact type of the method if it needs to be instantiated (because it's a generic type definition, e.g. C<T>, or a
+ // shared type instantiation, e.g. C<Object>).
+ if (pExactMT->IsGenericTypeDefinition() || pExactMT->IsSharedByGenericInstantiations())
+ {
+ TypeHandle thExactType = ClassLoader::LoadGenericInstantiationThrowing(pMD->GetModule(),
+ pMD->GetMethodTable()->GetCl(),
+ Instantiation(pInstantiation, pMD->GetNumGenericClassArgs()));
+ pExactMT = thExactType.AsMethodTable();
+ }
+
+ // As for the class we might need to find a method desc with an exact instantiation if the one we have is too vague.
+ // Note: IsGenericMethodDefinition implies ContainsGenericVariables so there's no need to check it separately.
+ if (pMD->IsSharedByGenericInstantiations() || pMD->ContainsGenericVariables())
+ pMD = MethodDesc::FindOrCreateAssociatedMethodDesc(pMD,
+ pExactMT,
+ FALSE,
+ Instantiation(&pInstantiation[pMD->GetNumGenericClassArgs()], pMD->GetNumGenericMethodArgs()),
+ FALSE);
+ }
+ else
+ {
+ // No instantiation provided, the method better not be expecting one.
+
+ // Methods that are generic definitions (e.g. C.Foo<U>) and those that are shared (e.g. C<Object>.Foo, C.Foo<Object>) need
+ // extra instantiation data.
+ // Note: IsGenericMethodDefinition implies ContainsGenericVariables so there's no need to check it separately.
+ if (pMD->IsSharedByGenericInstantiations() || pMD->ContainsGenericVariables())
+ COMPlusThrow(kArgumentException, W("Argument_InvalidGenericInstantiation"));
+
+ // The rest of the cases (non-generics related methods, instantiating stubs, methods instantiated over non-shared types
+ // etc.) should be able to provide their instantiation for us as necessary.
+ }
+
+ // Go prepare the method at the specified instantiation.
+ PrepareMethodDesc(pMD, pExactMT->GetInstantiation(), pMD->GetMethodInstantiation());
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+// This method triggers a given delegate to be prepared. This involves preparing the
+// delegate's Invoke method and preparing the target of that Invoke. In the case of
+// a multi-cast delegate, we rely on the fact that each individual component was prepared
+// prior to the Combine. If our event sinks perform the Combine, this is always true.
+// If the client calls Combine himself, he is responsible for his own preparation.
+FCIMPL1(void, ReflectionInvocation::PrepareDelegate, Object* delegateUNSAFE)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(delegateUNSAFE, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ if (delegateUNSAFE == NULL)
+ return;
+
+ OBJECTREF delegate = ObjectToOBJECTREF(delegateUNSAFE);
+ HELPER_METHOD_FRAME_BEGIN_1(delegate);
+
+ PrepareDelegateHelper(&delegate, FALSE);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+#endif // !FEATURE_CORECLR
+
+FCIMPL1(void, ReflectionInvocation::PrepareContractedDelegate, Object * delegateUNSAFE)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(delegateUNSAFE, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ if (delegateUNSAFE == NULL)
+ return;
+
+ OBJECTREF delegate = ObjectToOBJECTREF(delegateUNSAFE);
+ HELPER_METHOD_FRAME_BEGIN_1(delegate);
+
+ PrepareDelegateHelper(&delegate, TRUE);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+void ReflectionInvocation::PrepareDelegateHelper(OBJECTREF *pDelegate, BOOL onlyContractedMethod)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pDelegate));
+ PRECONDITION(CheckPointer(OBJECTREFToObject(*pDelegate)));
+ }
+ CONTRACTL_END;
+
+ // Make sure the delegate subsystem itself is prepared.
+ // Force the immediate creation of any global stubs required. This is platform specific.
+#ifdef _TARGET_X86_
+ {
+ GCX_PREEMP();
+ COMDelegate::TheDelegateInvokeStub();
+ }
+#endif
+
+ MethodDesc *pMDTarget = COMDelegate::GetMethodDesc(*pDelegate);
+ MethodDesc *pMDInvoke = COMDelegate::FindDelegateInvokeMethod((*pDelegate)->GetMethodTable());
+
+ // If someone does give us a multicast delegate, then both MDs will be the same -- they
+ // will both be the Delegate's Invoke member. Normally, pMDTarget points at the method
+ // the delegate is wrapping, of course.
+ if (pMDTarget == pMDInvoke)
+ {
+ pMDTarget->CheckRestore();
+
+ // The invoke method itself is never generic, but the delegate class itself might be.
+ PrepareMethodDesc(pMDInvoke,
+ pMDInvoke->GetExactClassInstantiation((*pDelegate)->GetTypeHandle()),
+ Instantiation(),
+ onlyContractedMethod);
+ }
+ else
+ {
+ pMDTarget->CheckRestore();
+ pMDInvoke->CheckRestore();
+
+ // Prepare the eventual target method first.
+
+ // Load the exact type of the method if it needs to be instantiated (because it's a generic type definition, e.g. C<T>, or a
+ // shared type instantiation, e.g. C<Object>).
+ MethodTable *pExactMT = pMDTarget->GetMethodTable();
+ if (pExactMT->IsGenericTypeDefinition() || pExactMT->IsSharedByGenericInstantiations())
+ {
+ OBJECTREF targetObj = COMDelegate::GetTargetObject(*pDelegate);
+
+#ifdef FEATURE_REMOTING
+ // We prepare the delegate for the sole purpose of reliability (CER).
+ // If the target is a transparent proxy, we cannot guarantee reliability anyway.
+ if (CRemotingServices::IsTransparentProxy(OBJECTREFToObject(targetObj)))
+ return;
+#endif //FEATURE_REMOTING
+
+ pExactMT = targetObj->GetMethodTable();
+ }
+
+
+ // For delegates with generic target methods it must be the case that we are passed an instantiating stub -- there's no
+ // other way the necessary method instantiation information can be passed to us.
+ // The target MD may be shared by generic instantiations as long as it does not require extra instantiation arguments.
+ // We have the actual target object so we can extract the exact class instantiation from it.
+ _ASSERTE(!pMDTarget->RequiresInstArg() &&
+ !pMDTarget->ContainsGenericVariables());
+
+ PrepareMethodDesc(pMDTarget,
+ pMDTarget->GetExactClassInstantiation(TypeHandle(pExactMT)),
+ pMDTarget->GetMethodInstantiation(),
+ onlyContractedMethod);
+
+ // Now prepare the delegate invoke method.
+ // The invoke method itself is never generic, but the delegate class itself might be.
+ PrepareMethodDesc(pMDInvoke,
+ pMDInvoke->GetExactClassInstantiation((*pDelegate)->GetTypeHandle()),
+ Instantiation(),
+ onlyContractedMethod);
+ }
+}
+
+FCIMPL0(void, ReflectionInvocation::ProbeForSufficientStack)
+{
+ FCALL_CONTRACT;
+
+#ifdef FEATURE_STACK_PROBE
+ // probe for our entry point amount and throw if not enough stack
+ RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT));
+#else
+ FCUnique(0x69);
+#endif
+
+}
+FCIMPLEND
+
+// This method checks to see if there is sufficient stack to execute the average Framework method.
+// If there is not, then it throws System.InsufficientExecutionStackException. The limit for each
+// thread is precomputed when the thread is created.
+FCIMPL0(void, ReflectionInvocation::EnsureSufficientExecutionStack)
+{
+ FCALL_CONTRACT;
+
+ Thread *pThread = GetThread();
+
+ // We use the address of a local variable as our "current stack pointer", which is
+ // plenty close enough for the purposes of this method.
+ UINT_PTR current = reinterpret_cast<UINT_PTR>(&pThread);
+ UINT_PTR limit = pThread->GetCachedStackSufficientExecutionLimit();
+
+ if (current < limit)
+ {
+ FCThrowVoid(kInsufficientExecutionStackException);
+ }
+}
+FCIMPLEND
+
+#ifdef FEATURE_CORECLR
+// As with EnsureSufficientExecutionStack, this method checks and returns whether there is
+// sufficient stack to execute the average Framework method, but rather than throwing,
+// it simply returns a Boolean: true for sufficient stack space, otherwise false.
+FCIMPL0(FC_BOOL_RET, ReflectionInvocation::TryEnsureSufficientExecutionStack)
+{
+ FCALL_CONTRACT;
+
+ Thread *pThread = GetThread();
+
+ // Same logic as EnsureSufficientExecutionStack
+ UINT_PTR current = reinterpret_cast<UINT_PTR>(&pThread);
+ UINT_PTR limit = pThread->GetCachedStackSufficientExecutionLimit();
+
+ FC_RETURN_BOOL(current >= limit);
+}
+FCIMPLEND
+#endif // FEATURE_CORECLR
+
+struct ECWGCFContext
+{
+ BOOL fHandled;
+ Frame *pStartFrame;
+};
+
+// Crawl the stack looking for Thread Abort related information (whether we're executing inside a CER or an error handling clauses
+// of some sort).
+StackWalkAction ECWGCFCrawlCallBack(CrawlFrame* pCf, void* data)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ ECWGCFContext *pData = (ECWGCFContext *)data;
+
+ Frame *pFrame = pCf->GetFrame();
+ if (pFrame && pFrame->GetFunction() != NULL && pFrame != pData->pStartFrame)
+ {
+ // We walk through a transition frame, but it is not our start frame.
+ // This means ExecuteCodeWithGuarantee is not at the bottom of stack.
+ pData->fHandled = TRUE;
+ return SWA_ABORT;
+ }
+
+ MethodDesc *pMD = pCf->GetFunction();
+
+ // Non-method frames don't interest us.
+ if (pMD == NULL)
+ return SWA_CONTINUE;
+
+ if (!pMD->GetModule()->IsSystem())
+ {
+ // We walk through some user code. This means that ExecuteCodeWithGuarantee is not at the bottom of stack.
+ pData->fHandled = TRUE;
+ return SWA_ABORT;
+ }
+
+ return SWA_CONTINUE;
+}
+
+struct ECWGC_Param
+{
+ BOOL fExceptionThrownInTryCode;
+ BOOL fStackOverflow;
+ struct ECWGC_GC *gc;
+ ECWGC_Param()
+ {
+ fExceptionThrownInTryCode = FALSE;
+ fStackOverflow = FALSE;
+ }
+};
+
+LONG SODetectionFilter(EXCEPTION_POINTERS *ep, void* pv)
+{
+ WRAPPER_NO_CONTRACT;
+ DefaultCatchFilterParam param(COMPLUS_EXCEPTION_EXECUTE_HANDLER);
+ if (DefaultCatchFilter(ep, &param) == EXCEPTION_CONTINUE_EXECUTION)
+ {
+ return EXCEPTION_CONTINUE_EXECUTION;
+ }
+
+ // Record the fact that an exception occured while running the try code.
+ ECWGC_Param *pParam= (ECWGC_Param *)pv;
+ pParam->fExceptionThrownInTryCode = TRUE;
+
+ // We unwind the stack only in the case of a stack overflow.
+ if (ep->ExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW)
+ {
+ pParam->fStackOverflow = TRUE;
+ return EXCEPTION_EXECUTE_HANDLER;
+ }
+
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+
+struct ECWGC_GC
+{
+ DELEGATEREF codeDelegate;
+ DELEGATEREF backoutDelegate;
+ OBJECTREF userData;
+};
+
+void ExecuteCodeWithGuaranteedCleanupBackout(ECWGC_GC *gc, BOOL fExceptionThrownInTryCode)
+{
+ // We need to prevent thread aborts from occuring for the duration of the call to the backout code.
+ // Once we enter managed code, the CER will take care of it as well; however without this holder,
+ // MethodDesc::Call would raise a thread abort exception if the thread is currently requesting one.
+ ThreadPreventAbortHolder preventAbort;
+
+#ifdef _DEBUG
+ // We have prevented abort on this thread. Normally we don't allow
+ // a thread to enter managed code if abort is prevented. But here the code
+ // requires the thread not be aborted.
+ Thread::DisableAbortCheckHolder dach;
+#endif
+
+ GCX_COOP();
+
+ PREPARE_NONVIRTUAL_CALLSITE_USING_METHODDESC(g_pExecuteBackoutCodeHelperMethod);
+
+ DECLARE_ARGHOLDER_ARRAY(args, 3);
+
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(gc->backoutDelegate);
+ args[ARGNUM_1] = OBJECTREF_TO_ARGHOLDER(gc->userData);
+ args[ARGNUM_2] = DWORD_TO_ARGHOLDER(fExceptionThrownInTryCode);
+
+ CRITICAL_CALLSITE;
+ CALL_MANAGED_METHOD_NORET(args);
+}
+
+void ExecuteCodeWithGuaranteedCleanupHelper (ECWGC_GC *gc)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ ECWGC_Param param;
+ param.gc = gc;
+
+ PAL_TRY(ECWGC_Param *, pParamOuter, &param)
+ {
+ PAL_TRY(ECWGC_Param *, pParam, pParamOuter)
+ {
+ PREPARE_NONVIRTUAL_CALLSITE_USING_CODE(pParam->gc->codeDelegate->GetMethodPtr());
+
+ DECLARE_ARGHOLDER_ARRAY(args, 2);
+
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(pParam->gc->codeDelegate->GetTarget());
+ args[ARGNUM_1] = OBJECTREF_TO_ARGHOLDER(pParam->gc->userData);
+
+ CALL_MANAGED_METHOD_NORET(args);
+ }
+ PAL_EXCEPT_FILTER(SODetectionFilter)
+ {
+ }
+ PAL_ENDTRY;
+
+ if (pParamOuter->fStackOverflow)
+ {
+ GCX_COOP_NO_DTOR();
+ }
+ }
+ PAL_FINALLY
+ {
+ ExecuteCodeWithGuaranteedCleanupBackout(gc, param.fExceptionThrownInTryCode);
+ }
+ PAL_ENDTRY;
+
+#ifdef FEATURE_STACK_PROBE
+ if (param.fStackOverflow)
+ COMPlusThrowSO();
+#else
+ //This will not be set as clr to managed transition code will terminate the
+ //process if there is an SO before SODetectionFilter() is called.
+ _ASSERTE(!param.fStackOverflow);
+#endif
+}
+
+//
+// ExecuteCodeWithGuaranteedCleanup ensures that we will call the backout code delegate even if an SO occurs. We do this by calling the
+// try delegate from within an EX_TRY/EX_CATCH block that will catch any thrown exceptions and thus cause the stack to be unwound. This
+// guarantees that the backout delegate is called with at least DEFAULT_ENTRY_PROBE_SIZE pages of stack. After the backout delegate is called,
+// we re-raise any exceptions that occured inside the try delegate. Note that any CER that uses large or arbitraty amounts of stack in
+// it's try block must use ExecuteCodeWithGuaranteedCleanup.
+//
+// ExecuteCodeWithGuaranteedCleanup also guarantees that the backount code will be run before any filters higher up on the stack. This
+// is important to prevent security exploits.
+//
+FCIMPL3(void, ReflectionInvocation::ExecuteCodeWithGuaranteedCleanup, Object* codeDelegateUNSAFE, Object* backoutDelegateUNSAFE, Object* userDataUNSAFE)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(codeDelegateUNSAFE, NULL_OK));
+ PRECONDITION(CheckPointer(backoutDelegateUNSAFE, NULL_OK));
+ PRECONDITION(CheckPointer(userDataUNSAFE, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ ECWGC_GC gc;
+
+ gc.codeDelegate = (DELEGATEREF)ObjectToOBJECTREF(codeDelegateUNSAFE);
+ gc.backoutDelegate = (DELEGATEREF)ObjectToOBJECTREF(backoutDelegateUNSAFE);
+ gc.userData = ObjectToOBJECTREF(userDataUNSAFE);
+
+ HELPER_METHOD_FRAME_BEGIN_PROTECT(gc);
+
+ if (gc.codeDelegate == NULL)
+ COMPlusThrowArgumentNull(W("code"));
+ if (gc.backoutDelegate == NULL)
+ COMPlusThrowArgumentNull(W("backoutCode"));
+
+ if (!IsCompilationProcess())
+ {
+ // Delegates are prepared as part of the ngen process, so only prepare the backout
+ // delegate for non-ngen processes.
+ PrepareDelegateHelper((OBJECTREF *)&gc.backoutDelegate, FALSE);
+
+ // Make sure the managed backout code helper function has been prepared before we
+ // attempt to run the backout code.
+ PrepareMethodDesc(g_pExecuteBackoutCodeHelperMethod, Instantiation(), Instantiation(), FALSE, TRUE);
+ }
+
+ ExecuteCodeWithGuaranteedCleanupHelper(&gc);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+
+FCIMPL4(void, ReflectionInvocation::MakeTypedReference, TypedByRef * value, Object* targetUNSAFE, ArrayBase* fldsUNSAFE, ReflectClassBaseObject *pFieldTypeUNSAFE)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(targetUNSAFE));
+ PRECONDITION(CheckPointer(fldsUNSAFE));
+ }
+ CONTRACTL_END;
+
+ DWORD offset = 0;
+
+ struct _gc
+ {
+ OBJECTREF target;
+ BASEARRAYREF flds;
+ REFLECTCLASSBASEREF refFieldType;
+ } gc;
+ gc.target = (OBJECTREF) targetUNSAFE;
+ gc.flds = (BASEARRAYREF) fldsUNSAFE;
+ gc.refFieldType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pFieldTypeUNSAFE);
+
+ TypeHandle fieldType = gc.refFieldType->GetType();
+
+ HELPER_METHOD_FRAME_BEGIN_PROTECT(gc);
+ GCPROTECT_BEGININTERIOR (value)
+
+ DWORD cnt = gc.flds->GetNumComponents();
+ FieldDesc** fields = (FieldDesc**)gc.flds->GetDataPtr();
+ for (DWORD i = 0; i < cnt; i++) {
+ FieldDesc* pField = fields[i];
+ offset += pField->GetOffset();
+ }
+
+ // Fields already are prohibted from having ArgIterator and RuntimeArgumentHandles
+ _ASSERTE(!gc.target->GetTypeHandle().GetMethodTable()->ContainsStackPtr());
+
+ // Create the ByRef
+ value->data = ((BYTE *)(gc.target->GetAddress() + offset)) + sizeof(Object);
+ value->type = fieldType;
+
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL2(void, ReflectionInvocation::SetTypedReference, TypedByRef * target, Object* objUNSAFE) {
+ FCALL_CONTRACT;
+
+ // <TODO>@TODO: We fixed serious bugs in this method very late in the endgame
+ // for V1 RTM. So it was decided to disable this API (nobody would seem to
+ // be using it anyway). If this API is enabled again, the implementation should
+ // be similar to COMArrayInfo::SetValue.
+ // </TODO>
+ HELPER_METHOD_FRAME_BEGIN_0();
+ COMPlusThrow(kNotSupportedException);
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+
+// This is an internal helper function to TypedReference class.
+// It extracts the object from the typed reference.
+FCIMPL1(Object*, ReflectionInvocation::TypedReferenceToObject, TypedByRef * value) {
+ FCALL_CONTRACT;
+
+ OBJECTREF Obj = NULL;
+
+ TypeHandle th(value->type);
+
+ if (th.IsNull())
+ FCThrowRes(kArgumentNullException, W("ArgumentNull_TypedRefType"));
+
+ MethodTable* pMT = th.GetMethodTable();
+ PREFIX_ASSUME(NULL != pMT);
+
+ if (pMT->IsValueType())
+ {
+ // value->data is protected by the caller
+ HELPER_METHOD_FRAME_BEGIN_RET_1(Obj);
+
+ Obj = pMT->Box(value->data);
+
+ HELPER_METHOD_FRAME_END();
+ }
+ else {
+ Obj = ObjectToOBJECTREF(*((Object**)value->data));
+ }
+
+ return OBJECTREFToObject(Obj);
+}
+FCIMPLEND
+
+#ifdef _DEBUG
+FCIMPL1(FC_BOOL_RET, ReflectionInvocation::IsAddressInStack, void * ptr)
+{
+ FCALL_CONTRACT;
+ FC_RETURN_BOOL(GetThread()->IsAddressInStack(ptr));
+}
+FCIMPLEND
+#endif
+
+FCIMPL2_IV(Object*, ReflectionInvocation::CreateEnum, ReflectClassBaseObject *pTypeUNSAFE, INT64 value) {
+ FCALL_CONTRACT;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ TypeHandle typeHandle = refType->GetType();
+ _ASSERTE(typeHandle.IsEnum());
+ OBJECTREF obj = NULL;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refType);
+ MethodTable *pEnumMT = typeHandle.AsMethodTable();
+ obj = pEnumMT->Box(ArgSlotEndianessFixup ((ARG_SLOT*)&value,
+ pEnumMT->GetNumInstanceFieldBytes()));
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(obj);
+}
+FCIMPLEND
+
+#ifdef FEATURE_COMINTEROP
+
+static void TryGetClassFromProgID(STRINGREF className, STRINGREF server, OBJECTREF* pRefClass, DWORD bThrowOnError) {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ EX_TRY
+ {
+ // NOTE: this call enables GC
+ GetComClassFromProgID(className, server, pRefClass);
+ }
+ EX_CATCH
+ {
+ if (bThrowOnError)
+ {
+ EX_RETHROW;
+ }
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+}
+
+// GetClassFromProgID
+// This method will return a Class object for a COM Classic object based
+// upon its ProgID. The COM Classic object is found and a wrapper object created
+FCIMPL3(Object*, ReflectionInvocation::GetClassFromProgID, StringObject* classNameUNSAFE,
+ StringObject* serverUNSAFE,
+ CLR_BOOL bThrowOnError) {
+ FCALL_CONTRACT;
+
+ REFLECTCLASSBASEREF refClass = NULL;
+ STRINGREF className = (STRINGREF) classNameUNSAFE;
+ STRINGREF server = (STRINGREF) serverUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_2(className, server);
+
+ GCPROTECT_BEGIN(refClass)
+
+ // Since we will be returning a type that represents a COM component, we need
+ // to make sure COM is started before we return it.
+ EnsureComStarted();
+
+ // Make sure a prog id was provided
+ if (className == NULL)
+ COMPlusThrowArgumentNull(W("progID"),W("ArgumentNull_String"));
+
+ TryGetClassFromProgID(className, server, (OBJECTREF*) &refClass, bThrowOnError);
+ GCPROTECT_END();
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(refClass);
+}
+FCIMPLEND
+
+static void TryGetClassFromCLSID(GUID clsid, STRINGREF server, OBJECTREF* pRefClass, DWORD bThrowOnError) {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ EX_TRY
+ {
+ // NOTE: this call enables GC
+ GetComClassFromCLSID(clsid, server, pRefClass);
+ }
+ EX_CATCH
+ {
+ if (bThrowOnError)
+ {
+ EX_RETHROW;
+ }
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+}
+
+// GetClassFromCLSID
+// This method will return a Class object for a COM Classic object based
+// upon its ProgID. The COM Classic object is found and a wrapper object created
+FCIMPL3(Object*, ReflectionInvocation::GetClassFromCLSID, GUID clsid, StringObject* serverUNSAFE, CLR_BOOL bThrowOnError) {
+ FCALL_CONTRACT;
+
+ struct _gc {
+ REFLECTCLASSBASEREF refClass;
+ STRINGREF server;
+ } gc;
+
+ gc.refClass = NULL;
+ gc.server = (STRINGREF) serverUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc.server);
+
+ // Since we will be returning a type that represents a COM component, we need
+ // to make sure COM is started before we return it.
+ EnsureComStarted();
+
+ TryGetClassFromCLSID(clsid, gc.server, (OBJECTREF*) &gc.refClass, bThrowOnError);
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(gc.refClass);
+}
+FCIMPLEND
+
+
+FCIMPL8(Object*, ReflectionInvocation::InvokeDispMethod, ReflectClassBaseObject* refThisUNSAFE,
+ StringObject* nameUNSAFE,
+ INT32 invokeAttr,
+ Object* targetUNSAFE,
+ PTRArray* argsUNSAFE,
+ PTRArray* byrefModifiersUNSAFE,
+ LCID lcid,
+ PTRArray* namedParametersUNSAFE) {
+ FCALL_CONTRACT;
+
+ struct _gc
+ {
+ REFLECTCLASSBASEREF refThis;
+ STRINGREF name;
+ OBJECTREF target;
+ PTRARRAYREF args;
+ PTRARRAYREF byrefModifiers;
+ PTRARRAYREF namedParameters;
+ OBJECTREF RetObj;
+ } gc;
+
+ gc.refThis = (REFLECTCLASSBASEREF) refThisUNSAFE;
+ gc.name = (STRINGREF) nameUNSAFE;
+ gc.target = (OBJECTREF) targetUNSAFE;
+ gc.args = (PTRARRAYREF) argsUNSAFE;
+ gc.byrefModifiers = (PTRARRAYREF) byrefModifiersUNSAFE;
+ gc.namedParameters = (PTRARRAYREF) namedParametersUNSAFE;
+ gc.RetObj = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ _ASSERTE(gc.target != NULL);
+ _ASSERTE(gc.target->GetMethodTable()->IsComObjectType());
+
+ // Unless security is turned off, we need to validate that the calling code
+ // has unmanaged code access privilege.
+ Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_UNMANAGED_CODE);
+
+ WORD flags = 0;
+ if (invokeAttr & BINDER_InvokeMethod)
+ flags |= DISPATCH_METHOD;
+ if (invokeAttr & BINDER_GetProperty)
+ flags |= DISPATCH_PROPERTYGET;
+ if (invokeAttr & BINDER_SetProperty)
+ flags = DISPATCH_PROPERTYPUT | DISPATCH_PROPERTYPUTREF;
+ if (invokeAttr & BINDER_PutDispProperty)
+ flags = DISPATCH_PROPERTYPUT;
+ if (invokeAttr & BINDER_PutRefDispProperty)
+ flags = DISPATCH_PROPERTYPUTREF;
+ if (invokeAttr & BINDER_CreateInstance)
+ flags = DISPATCH_CONSTRUCT;
+
+ IUInvokeDispMethod(&gc.refThis,
+ &gc.target,
+ (OBJECTREF*)&gc.name,
+ NULL,
+ (OBJECTREF*)&gc.args,
+ (OBJECTREF*)&gc.byrefModifiers,
+ (OBJECTREF*)&gc.namedParameters,
+ &gc.RetObj,
+ lcid,
+ flags,
+ invokeAttr & BINDER_IgnoreReturn,
+ invokeAttr & BINDER_IgnoreCase);
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(gc.RetObj);
+}
+FCIMPLEND
+#endif // FEATURE_COMINTEROP
+
+FCIMPL2(void, ReflectionInvocation::GetGUID, ReflectClassBaseObject* refThisUNSAFE, GUID * result) {
+ FCALL_CONTRACT;
+
+ REFLECTCLASSBASEREF refThis = (REFLECTCLASSBASEREF) refThisUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_1(refThis);
+ GCPROTECT_BEGININTERIOR (result);
+
+ if (result == NULL || refThis == NULL)
+ COMPlusThrow(kNullReferenceException);
+
+ TypeHandle type = refThis->GetType();
+ if (type.IsTypeDesc()) {
+ memset(result,0,sizeof(GUID));
+ goto lExit;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (IsComObjectClass(type))
+ {
+ SyncBlock* pSyncBlock = refThis->GetSyncBlock();
+
+#ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+ ComClassFactory* pComClsFac = pSyncBlock->GetInteropInfo()->GetComClassFactory();
+ if (pComClsFac)
+ {
+ memcpyNoGCRefs(result, &pComClsFac->m_rclsid, sizeof(GUID));
+ }
+ else
+#endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+ {
+ memset(result, 0, sizeof(GUID));
+ }
+
+ goto lExit;
+ }
+#endif // FEATURE_COMINTEROP
+
+ GUID guid;
+ type.AsMethodTable()->GetGuid(&guid, TRUE);
+ memcpyNoGCRefs(result, &guid, sizeof(GUID));
+
+lExit: ;
+ GCPROTECT_END();
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+//*************************************************************************************************
+//*************************************************************************************************
+//*************************************************************************************************
+// ReflectionSerialization
+//*************************************************************************************************
+//*************************************************************************************************
+//*************************************************************************************************
+FCIMPL1(Object*, ReflectionSerialization::GetUninitializedObject, ReflectClassBaseObject* objTypeUNSAFE) {
+ FCALL_CONTRACT;
+
+ OBJECTREF retVal = NULL;
+ REFLECTCLASSBASEREF objType = (REFLECTCLASSBASEREF) objTypeUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL();
+
+ if (objType == NULL) {
+ COMPlusThrowArgumentNull(W("type"), W("ArgumentNull_Type"));
+ }
+
+ TypeHandle type = objType->GetType();
+
+ // Don't allow arrays, pointers, byrefs or function pointers.
+ if (type.IsTypeDesc())
+ COMPlusThrow(kArgumentException, W("Argument_InvalidValue"));
+
+ MethodTable *pMT = type.GetMethodTable();
+ PREFIX_ASSUME(pMT != NULL);
+
+ //We don't allow unitialized strings.
+ if (pMT == g_pStringClass) {
+ COMPlusThrow(kArgumentException, W("Argument_NoUninitializedStrings"));
+ }
+
+ // if this is an abstract class or an interface type then we will
+ // fail this
+ if (pMT->IsAbstract()) {
+ COMPlusThrow(kMemberAccessException,W("Acc_CreateAbst"));
+ }
+ else if (pMT->ContainsGenericVariables()) {
+ COMPlusThrow(kMemberAccessException,W("Acc_CreateGeneric"));
+ }
+ // Never allow allocation of generics actually instantiated over __Canon
+ else if (pMT->IsSharedByGenericInstantiations()) {
+ COMPlusThrow(kNotSupportedException, W("NotSupported_Type"));
+ }
+
+ // Never allow the allocation of an unitialized ContextBoundObject derived type, these must always be created with a paired
+ // transparent proxy or the jit will get confused.
+#ifdef FEATURE_REMOTING
+ if (pMT->IsContextful())
+ COMPlusThrow(kNotSupportedException, W("NotSupported_ManagedActivation"));
+#endif
+
+#ifdef FEATURE_COMINTEROP
+ // Also do not allow allocation of uninitialized RCWs (COM objects).
+ if (pMT->IsComObjectType())
+ COMPlusThrow(kNotSupportedException, W("NotSupported_ManagedActivation"));
+#endif // FEATURE_COMINTEROP
+
+ // If it is a nullable, return the underlying type instead.
+ if (Nullable::IsNullableType(pMT))
+ pMT = pMT->GetInstantiation()[0].GetMethodTable();
+
+ retVal = pMT->Allocate();
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(retVal);
+}
+FCIMPLEND
+
+FCIMPL1(Object*, ReflectionSerialization::GetSafeUninitializedObject, ReflectClassBaseObject* objTypeUNSAFE) {
+ FCALL_CONTRACT;
+
+ OBJECTREF retVal = NULL;
+ REFLECTCLASSBASEREF objType = (REFLECTCLASSBASEREF) objTypeUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(objType);
+
+ if (objType == NULL)
+ COMPlusThrowArgumentNull(W("type"), W("ArgumentNull_Type"));
+
+ TypeHandle type = objType->GetType();
+
+ // Don't allow arrays, pointers, byrefs or function pointers.
+ if (type.IsTypeDesc())
+ COMPlusThrow(kArgumentException, W("Argument_InvalidValue"));
+
+ MethodTable *pMT = type.GetMethodTable();
+ PREFIX_ASSUME(pMT != NULL);
+
+ //We don't allow unitialized strings.
+ if (pMT == g_pStringClass)
+ COMPlusThrow(kArgumentException, W("Argument_NoUninitializedStrings"));
+
+
+ // if this is an abstract class or an interface type then we will
+ // fail this
+ if (pMT->IsAbstract())
+ COMPlusThrow(kMemberAccessException,W("Acc_CreateAbst"));
+ else if (pMT->ContainsGenericVariables()) {
+ COMPlusThrow(kMemberAccessException,W("Acc_CreateGeneric"));
+ }
+
+ // Never allow the allocation of an unitialized ContextBoundObject derived type, these must always be created with a paired
+ // transparent proxy or the jit will get confused.
+#ifdef FEATURE_REMOTING
+ if (pMT->IsContextful())
+ COMPlusThrow(kNotSupportedException, W("NotSupported_ManagedActivation"));
+#endif
+
+#ifdef FEATURE_COMINTEROP
+ // Also do not allow allocation of uninitialized RCWs (COM objects).
+ if (pMT->IsComObjectType())
+ COMPlusThrow(kNotSupportedException, W("NotSupported_ManagedActivation"));
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_APTCA
+ if (!pMT->GetAssembly()->AllowUntrustedCaller()) {
+ OBJECTREF permSet = NULL;
+ Security::GetPermissionInstance(&permSet, SECURITY_FULL_TRUST);
+ Security::DemandSet(SSWT_LATEBOUND_LINKDEMAND, permSet);
+ }
+#endif // FEATURE_APTCA
+
+#ifdef FEATURE_CAS_POLICY
+ if (pMT->GetClass()->RequiresLinktimeCheck()) {
+ OBJECTREF refClassNonCasDemands = NULL;
+ OBJECTREF refClassCasDemands = NULL;
+
+ refClassCasDemands = TypeSecurityDescriptor::GetLinktimePermissions(pMT, &refClassNonCasDemands);
+
+ if (refClassCasDemands != NULL)
+ Security::DemandSet(SSWT_LATEBOUND_LINKDEMAND, refClassCasDemands);
+
+ }
+#endif // FEATURE_CAS_POLICY
+
+ // If it is a nullable, return the underlying type instead.
+ if (Nullable::IsNullableType(pMT))
+ pMT = pMT->GetInstantiation()[0].GetMethodTable();
+
+ retVal = pMT->Allocate();
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(retVal);
+}
+FCIMPLEND
+
+FCIMPL0(FC_BOOL_RET, ReflectionSerialization::GetEnableUnsafeTypeForwarders)
+{
+ FCALL_CONTRACT;
+ FC_RETURN_BOOL(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_Serialization_UnsafeTypeForwarding));
+}
+FCIMPLEND
+
+
+//*************************************************************************************************
+//*************************************************************************************************
+//*************************************************************************************************
+// ReflectionEnum
+//*************************************************************************************************
+//*************************************************************************************************
+//*************************************************************************************************
+
+FCIMPL1(Object *, ReflectionEnum::InternalGetEnumUnderlyingType, ReflectClassBaseObject *target) {
+ FCALL_CONTRACT;
+
+ VALIDATEOBJECT(target);
+ TypeHandle th = target->GetType();
+ if (!th.IsEnum())
+ FCThrowArgument(NULL, NULL);
+
+ OBJECTREF result = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+ MethodTable *pMT = MscorlibBinder::GetElementType(th.AsMethodTable()->GetInternalCorElementType());
+ result = pMT->GetManagedClassObject();
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(result);
+}
+FCIMPLEND
+
+FCIMPL1(INT32, ReflectionEnum::InternalGetCorElementType, Object *pRefThis) {
+ FCALL_CONTRACT;
+
+ VALIDATEOBJECT(pRefThis);
+ if (pRefThis == NULL)
+ FCThrowArgumentNull(NULL);
+
+ return pRefThis->GetMethodTable()->GetInternalCorElementType();
+}
+FCIMPLEND
+
+//*******************************************************************************
+struct TempEnumValue
+{
+ LPCUTF8 name;
+ UINT64 value;
+};
+
+//*******************************************************************************
+class TempEnumValueSorter : public CQuickSort<TempEnumValue>
+{
+public:
+ TempEnumValueSorter(TempEnumValue *pArray, SSIZE_T iCount)
+ : CQuickSort<TempEnumValue>(pArray, iCount) { LIMITED_METHOD_CONTRACT; }
+
+ int Compare(TempEnumValue *pFirst, TempEnumValue *pSecond)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (pFirst->value == pSecond->value)
+ return 0;
+ if (pFirst->value > pSecond->value)
+ return 1;
+ else
+ return -1;
+ }
+};
+
+void QCALLTYPE ReflectionEnum::GetEnumValuesAndNames(EnregisteredTypeHandle pEnumType, QCall::ObjectHandleOnStack pReturnValues, QCall::ObjectHandleOnStack pReturnNames, BOOL fGetNames)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ TypeHandle th = TypeHandle::FromPtr(pEnumType);
+
+ if (!th.IsEnum())
+ COMPlusThrow(kArgumentException, W("Arg_MustBeEnum"));
+
+ MethodTable *pMT = th.AsMethodTable();
+
+ IMDInternalImport *pImport = pMT->GetMDImport();
+
+ StackSArray<TempEnumValue> temps;
+ UINT64 previousValue = 0;
+
+ HENUMInternalHolder fieldEnum(pImport);
+ fieldEnum.EnumInit(mdtFieldDef, pMT->GetCl());
+
+ //
+ // Note that we're fine treating signed types as unsigned, because all we really
+ // want to do is sort them based on a convenient strong ordering.
+ //
+
+ BOOL sorted = TRUE;
+
+ CorElementType type = pMT->GetInternalCorElementType();
+
+ mdFieldDef field;
+ while (pImport->EnumNext(&fieldEnum, &field))
+ {
+ DWORD dwFlags;
+ IfFailThrow(pImport->GetFieldDefProps(field, &dwFlags));
+ if (IsFdStatic(dwFlags))
+ {
+ TempEnumValue temp;
+
+ if (fGetNames)
+ IfFailThrow(pImport->GetNameOfFieldDef(field, &temp.name));
+
+ UINT64 value = 0;
+
+ MDDefaultValue defaultValue;
+ IfFailThrow(pImport->GetDefaultValue(field, &defaultValue));
+
+ // The following code assumes that the address of all union members is the same.
+ static_assert_no_msg(offsetof(MDDefaultValue, m_byteValue) == offsetof(MDDefaultValue, m_usValue));
+ static_assert_no_msg(offsetof(MDDefaultValue, m_ulValue) == offsetof(MDDefaultValue, m_ullValue));
+ PVOID pValue = &defaultValue.m_byteValue;
+
+ switch (type) {
+ case ELEMENT_TYPE_I1:
+ value = *((INT8 *)pValue);
+ break;
+
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_BOOLEAN:
+ value = *((UINT8 *)pValue);
+ break;
+
+ case ELEMENT_TYPE_I2:
+ value = *((INT16 *)pValue);
+ break;
+
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_CHAR:
+ value = *((UINT16 *)pValue);
+ break;
+
+ case ELEMENT_TYPE_I4:
+ IN_WIN32(case ELEMENT_TYPE_I:)
+ value = *((INT32 *)pValue);
+ break;
+
+ case ELEMENT_TYPE_U4:
+ IN_WIN32(case ELEMENT_TYPE_U:)
+ value = *((UINT32 *)pValue);
+ break;
+
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ IN_WIN64(case ELEMENT_TYPE_I:)
+ IN_WIN64(case ELEMENT_TYPE_U:)
+ value = *((INT64 *)pValue);
+ break;
+
+ default:
+ break;
+ }
+
+ temp.value = value;
+
+ //
+ // Check to see if we are already sorted. This may seem extraneous, but is
+ // actually probably the normal case.
+ //
+
+ if (previousValue > value)
+ sorted = FALSE;
+ previousValue = value;
+
+ temps.Append(temp);
+ }
+ }
+
+ TempEnumValue * pTemps = &(temps[0]);
+ DWORD cFields = temps.GetCount();
+
+ if (!sorted)
+ {
+ TempEnumValueSorter sorter(pTemps, cFields);
+ sorter.Sort();
+ }
+
+ {
+ GCX_COOP();
+
+ struct gc {
+ I8ARRAYREF values;
+ PTRARRAYREF names;
+ } gc;
+ gc.values = NULL;
+ gc.names = NULL;
+
+ GCPROTECT_BEGIN(gc);
+
+ {
+ gc.values = (I8ARRAYREF) AllocatePrimitiveArray(ELEMENT_TYPE_U8, cFields);
+
+ INT64 *pToValues = gc.values->GetDirectPointerToNonObjectElements();
+
+ for (DWORD i = 0; i < cFields; i++) {
+ pToValues[i] = pTemps[i].value;
+ }
+
+ pReturnValues.Set(gc.values);
+ }
+
+ if (fGetNames)
+ {
+ gc.names = (PTRARRAYREF) AllocateObjectArray(cFields, g_pStringClass);
+
+ for (DWORD i = 0; i < cFields; i++) {
+ STRINGREF str = StringObject::NewString(pTemps[i].name);
+ gc.names->SetAt(i, str);
+ }
+
+ pReturnNames.Set(gc.names);
+ }
+
+ GCPROTECT_END();
+ }
+
+ END_QCALL;
+}
+
+FCIMPL2_IV(Object*, ReflectionEnum::InternalBoxEnum, ReflectClassBaseObject* target, INT64 value) {
+ FCALL_CONTRACT;
+
+ VALIDATEOBJECT(target);
+ OBJECTREF ret = NULL;
+
+ MethodTable* pMT = target->GetType().AsMethodTable();
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ ret = pMT->Box(ArgSlotEndianessFixup((ARG_SLOT*)&value, pMT->GetNumInstanceFieldBytes()));
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(ret);
+}
+FCIMPLEND
+
+//*************************************************************************************************
+//*************************************************************************************************
+//*************************************************************************************************
+// ReflectionBinder
+//*************************************************************************************************
+//*************************************************************************************************
+//*************************************************************************************************
+
+FCIMPL2(FC_BOOL_RET, ReflectionBinder::DBCanConvertPrimitive, ReflectClassBaseObject* source, ReflectClassBaseObject* target) {
+ FCALL_CONTRACT;
+
+ VALIDATEOBJECT(source);
+ VALIDATEOBJECT(target);
+
+ CorElementType tSRC = source->GetType().GetSignatureCorElementType();
+ CorElementType tTRG = target->GetType().GetSignatureCorElementType();
+
+ FC_RETURN_BOOL(InvokeUtil::IsPrimitiveType(tTRG) && InvokeUtil::CanPrimitiveWiden(tTRG, tSRC));
+}
+FCIMPLEND
+
+FCIMPL2(FC_BOOL_RET, ReflectionBinder::DBCanConvertObjectPrimitive, Object* sourceObj, ReflectClassBaseObject* target) {
+ FCALL_CONTRACT;
+
+ VALIDATEOBJECT(sourceObj);
+ VALIDATEOBJECT(target);
+
+ if (sourceObj == 0)
+ FC_RETURN_BOOL(true);
+
+ TypeHandle th(sourceObj->GetMethodTable());
+ CorElementType tSRC = th.GetVerifierCorElementType();
+
+ CorElementType tTRG = target->GetType().GetSignatureCorElementType();
+ FC_RETURN_BOOL(InvokeUtil::IsPrimitiveType(tTRG) && InvokeUtil::CanPrimitiveWiden(tTRG, tSRC));
+}
+FCIMPLEND
+
+FCIMPL2(FC_BOOL_RET, ReflectionEnum::InternalEquals, Object *pRefThis, Object* pRefTarget)
+{
+ FCALL_CONTRACT;
+
+ VALIDATEOBJECT(pRefThis);
+ BOOL ret = false;
+ if (pRefTarget == NULL) {
+ FC_RETURN_BOOL(ret);
+ }
+
+ if( pRefThis == pRefTarget)
+ FC_RETURN_BOOL(true);
+
+ //Make sure we are comparing same type.
+ MethodTable* pMTThis = pRefThis->GetMethodTable();
+ _ASSERTE(!pMTThis->IsArray()); // bunch of assumptions about arrays wrong.
+ if ( pMTThis != pRefTarget->GetMethodTable()) {
+ FC_RETURN_BOOL(ret);
+ }
+
+ void * pThis = pRefThis->UnBox();
+ void * pTarget = pRefTarget->UnBox();
+ switch (pMTThis->GetNumInstanceFieldBytes()) {
+ case 1:
+ ret = (*(UINT8*)pThis == *(UINT8*)pTarget);
+ break;
+ case 2:
+ ret = (*(UINT16*)pThis == *(UINT16*)pTarget);
+ break;
+ case 4:
+ ret = (*(UINT32*)pThis == *(UINT32*)pTarget);
+ break;
+ case 8:
+ ret = (*(UINT64*)pThis == *(UINT64*)pTarget);
+ break;
+ default:
+ // should not reach here.
+ UNREACHABLE_MSG("Incorrect Enum Type size!");
+ break;
+ }
+
+ FC_RETURN_BOOL(ret);
+}
+FCIMPLEND
+
+// preform (this & flags) != flags
+FCIMPL2(FC_BOOL_RET, ReflectionEnum::InternalHasFlag, Object *pRefThis, Object* pRefFlags)
+{
+ FCALL_CONTRACT;
+
+ VALIDATEOBJECT(pRefThis);
+
+ BOOL cmp = false;
+
+ _ASSERTE(pRefFlags != NULL); // Enum.cs would have thrown ArgumentNullException before calling into InternalHasFlag
+
+ VALIDATEOBJECT(pRefFlags);
+
+ void * pThis = pRefThis->UnBox();
+ void * pFlags = pRefFlags->UnBox();
+
+ MethodTable* pMTThis = pRefThis->GetMethodTable();
+
+ _ASSERTE(!pMTThis->IsArray()); // bunch of assumptions about arrays wrong.
+ _ASSERTE(pMTThis->GetNumInstanceFieldBytes() == pRefFlags->GetMethodTable()->GetNumInstanceFieldBytes()); // Enum.cs verifies that the types are Equivalent
+
+ switch (pMTThis->GetNumInstanceFieldBytes()) {
+ case 1:
+ cmp = ((*(UINT8*)pThis & *(UINT8*)pFlags) == *(UINT8*)pFlags);
+ break;
+ case 2:
+ cmp = ((*(UINT16*)pThis & *(UINT16*)pFlags) == *(UINT16*)pFlags);
+ break;
+ case 4:
+ cmp = ((*(UINT32*)pThis & *(UINT32*)pFlags) == *(UINT32*)pFlags);
+ break;
+ case 8:
+ cmp = ((*(UINT64*)pThis & *(UINT64*)pFlags) == *(UINT64*)pFlags);
+ break;
+ default:
+ // should not reach here.
+ UNREACHABLE_MSG("Incorrect Enum Type size!");
+ break;
+ }
+
+ FC_RETURN_BOOL(cmp);
+}
+FCIMPLEND
+
+// compare two boxed enums using their underlying enum type
+FCIMPL2(int, ReflectionEnum::InternalCompareTo, Object *pRefThis, Object* pRefTarget)
+{
+ FCALL_CONTRACT;
+
+ const int retIncompatibleMethodTables = 2; // indicates that the method tables did not match
+ const int retInvalidEnumType = 3; // indicates that the enum was of an unknown/unsupported unerlying type
+
+ VALIDATEOBJECT(pRefThis);
+
+ if (pRefTarget == NULL) {
+ return 1; // all values are greater than null
+ }
+
+ if( pRefThis == pRefTarget)
+ return 0;
+
+ VALIDATEOBJECT(pRefTarget);
+
+ //Make sure we are comparing same type.
+ MethodTable* pMTThis = pRefThis->GetMethodTable();
+
+ _ASSERTE(pMTThis->IsEnum());
+
+ if ( pMTThis != pRefTarget->GetMethodTable()) {
+ return retIncompatibleMethodTables; // error case, types incompatible
+ }
+
+ void * pThis = pRefThis->UnBox();
+ void * pTarget = pRefTarget->UnBox();
+
+ #define CMPEXPR(x1,x2) ((x1) == (x2)) ? 0 : ((x1) < (x2)) ? -1 : 1
+
+ switch (pMTThis->GetInternalCorElementType()) {
+
+ case ELEMENT_TYPE_I1:
+ {
+ INT8 i1 = *(INT8*)pThis;
+ INT8 i2 = *(INT8*)pTarget;
+
+ return CMPEXPR(i1,i2);
+ }
+ break;
+
+ case ELEMENT_TYPE_I2:
+ {
+ INT16 i1 = *(INT16*)pThis;
+ INT16 i2 = *(INT16*)pTarget;
+
+ return CMPEXPR(i1,i2);
+ }
+ break;
+
+
+ case ELEMENT_TYPE_I4:
+ IN_WIN32(case ELEMENT_TYPE_I:)
+ {
+ INT32 i1 = *(INT32*)pThis;
+ INT32 i2 = *(INT32*)pTarget;
+
+ return CMPEXPR(i1,i2);
+ }
+ break;
+
+
+ case ELEMENT_TYPE_I8:
+ IN_WIN64(case ELEMENT_TYPE_I:)
+ {
+ INT64 i1 = *(INT64*)pThis;
+ INT64 i2 = *(INT64*)pTarget;
+
+ return CMPEXPR(i1,i2);
+ }
+ break;
+
+ case ELEMENT_TYPE_BOOLEAN:
+ {
+ bool b1 = !!*(UINT8 *)pThis;
+ bool b2 = !!*(UINT8 *)pTarget;
+
+ return CMPEXPR(b1,b2);
+ }
+ break;
+
+ case ELEMENT_TYPE_U1:
+ {
+ UINT8 u1 = *(UINT8 *)pThis;
+ UINT8 u2 = *(UINT8 *)pTarget;
+
+ return CMPEXPR(u1,u2);
+ }
+ break;
+
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_CHAR:
+ {
+ UINT16 u1 = *(UINT16 *)pThis;
+ UINT16 u2 = *(UINT16 *)pTarget;
+
+ return CMPEXPR(u1,u2);
+ }
+ break;
+
+ case ELEMENT_TYPE_U4:
+ IN_WIN32(case ELEMENT_TYPE_U:)
+ {
+ UINT32 u1 = *(UINT32 *)pThis;
+ UINT32 u2 = *(UINT32 *)pTarget;
+
+ return CMPEXPR(u1,u2);
+ }
+ break;
+
+ case ELEMENT_TYPE_U8:
+ IN_WIN64(case ELEMENT_TYPE_U:)
+ {
+ UINT64 u1 = *(UINT64*)pThis;
+ UINT64 u2 = *(UINT64*)pTarget;
+
+ return CMPEXPR(u1,u2);
+ }
+ break;
+
+ case ELEMENT_TYPE_R4:
+ {
+ static_assert_no_msg(sizeof(float) == 4);
+
+ float f1 = *(float*)pThis;
+ float f2 = *(float*)pTarget;
+
+ return CMPEXPR(f1,f2);
+ }
+ break;
+
+ case ELEMENT_TYPE_R8:
+ {
+ static_assert_no_msg(sizeof(double) == 8);
+
+ double d1 = *(double*)pThis;
+ double d2 = *(double*)pTarget;
+
+ return CMPEXPR(d1,d2);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return retInvalidEnumType; // second error case -- unsupported enum type
+}
+FCIMPLEND
+
diff --git a/src/vm/reflectioninvocation.h b/src/vm/reflectioninvocation.h
new file mode 100644
index 0000000000..a87ffc1654
--- /dev/null
+++ b/src/vm/reflectioninvocation.h
@@ -0,0 +1,125 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+#ifndef _REFLECTIONINVOCATION_H_
+#define _REFLECTIONINVOCATION_H_
+
+#include "object.h"
+#include "fcall.h"
+#include "field.h"
+#include "stackwalktypes.h"
+#include "runtimehandles.h"
+#include "invokeutil.h"
+
+// NOTE: The following constants are defined in BindingFlags.cs
+#define BINDER_IgnoreCase 0x01
+#define BINDER_DeclaredOnly 0x02
+#define BINDER_Instance 0x04
+#define BINDER_Static 0x08
+#define BINDER_Public 0x10
+#define BINDER_NonPublic 0x20
+#define BINDER_FlattenHierarchy 0x40
+
+#define BINDER_InvokeMethod 0x00100
+#define BINDER_CreateInstance 0x00200
+#define BINDER_GetField 0x00400
+#define BINDER_SetField 0x00800
+#define BINDER_GetProperty 0x01000
+#define BINDER_SetProperty 0x02000
+#define BINDER_PutDispProperty 0x04000
+#define BINDER_PutRefDispProperty 0x08000
+
+#define BINDER_ExactBinding 0x010000
+#define BINDER_SuppressChangeType 0x020000
+#define BINDER_OptionalParamBinding 0x040000
+
+#define BINDER_IgnoreReturn 0x1000000
+
+#define BINDER_DefaultLookup (BINDER_Instance | BINDER_Static | BINDER_Public)
+#define BINDER_AllLookup (BINDER_Instance | BINDER_Static | BINDER_Public | BINDER_Instance)
+
+class ReflectionInvocation {
+
+public:
+ static
+ void QCALLTYPE CompileMethod(MethodDesc * pMD);
+
+ static FCDECL1(void, RunClassConstructor, ReflectClassBaseObject *pTypeUNSAFE);
+ static FCDECL1(void, RunModuleConstructor, ReflectModuleBaseObject *pModuleUNSAFE);
+#ifndef FEATURE_CORECLR
+ static FCDECL3(void, PrepareMethod, ReflectMethodObject* pMethodUNSAFE, TypeHandle *pInstantiation, UINT32 cInstantiation);
+ static FCDECL1(void, PrepareDelegate, Object* delegateUNSAFE);
+#endif // !FEATURE_CORECLR
+ static FCDECL1(void, PrepareContractedDelegate, Object* delegateUNSAFE);
+ static FCDECL0(void, ProbeForSufficientStack);
+ static FCDECL0(void, EnsureSufficientExecutionStack);
+#ifdef FEATURE_CORECLR // currently only used from mscorlib in FEATURE_CORECLR
+ static FCDECL0(FC_BOOL_RET, TryEnsureSufficientExecutionStack);
+#endif // FEATURE_CORECLR
+ static FCDECL3(void, ExecuteCodeWithGuaranteedCleanup, Object* pCodeDelegateUNSAFE, Object* pBackoutDelegateUNSAFE, Object* pUserDataUNSAFE);
+
+ // TypedReference functions, should go somewhere else
+ static FCDECL4(void, MakeTypedReference, TypedByRef * value, Object* targetUNSAFE, ArrayBase* fldsUNSAFE, ReflectClassBaseObject *pFieldType);
+ static FCDECL2(void, SetTypedReference, TypedByRef * target, Object* objUNSAFE);
+ static FCDECL1(Object*, TypedReferenceToObject, TypedByRef * value);
+#ifdef _DEBUG
+ static FCDECL1(FC_BOOL_RET, IsAddressInStack, void * ptr);
+#endif
+
+#ifdef FEATURE_COMINTEROP
+ static FCDECL3(Object*, GetClassFromProgID, StringObject* classNameUNSAFE, StringObject* serverUNSAFE, CLR_BOOL bThrowOnError);
+ static FCDECL3(Object*, GetClassFromCLSID, GUID clsid, StringObject* serverUNSAFE, CLR_BOOL bThrowOnError);
+ static FCDECL8(Object*, InvokeDispMethod, ReflectClassBaseObject* refThisUNSAFE, StringObject* nameUNSAFE, INT32 invokeAttr, Object* targetUNSAFE, PTRArray* argsUNSAFE, PTRArray* byrefModifiersUNSAFE, LCID lcid, PTRArray* namedParametersUNSAFE);
+#endif // FEATURE_COMINTEROP
+ static FCDECL2(void, GetGUID, ReflectClassBaseObject* refThisUNSAFE, GUID * result);
+ static FCDECL2_IV(Object*, CreateEnum, ReflectClassBaseObject *pTypeUNSAFE, INT64 value);
+
+ // helper fcalls for invocation
+ static FCDECL1(DWORD, GetSpecialSecurityFlags, ReflectMethodObject *pMethodUNSAFE);
+ static FCDECL2(FC_BOOL_RET, CanValueSpecialCast, ReflectClassBaseObject *valueType, ReflectClassBaseObject *targetType);
+ static FCDECL3(Object*, AllocateValueType, ReflectClassBaseObject *targetType, Object *valueUNSAFE, CLR_BOOL fForceTypeChange);
+
+ static FCDECL4(void, PerformSecurityCheck, Object *target, MethodDesc *pMeth, ReflectClassBaseObject *pParent, DWORD dwFlags);
+ static FCDECL2(void, CheckArgs, PTRArray *objs, SignatureNative sig);
+
+ static FCDECL5(void, PerformVisibilityCheckOnField, FieldDesc *fieldDesc, Object *target, ReflectClassBaseObject *pDeclaringType, DWORD attr, DWORD invocationFlags);
+
+ static void PrepareDelegateHelper(OBJECTREF* pDelegate, BOOL onlyContractedMethod);
+ static void CanCacheTargetAndCrackedSig(MethodDesc* pMD);
+};
+
+class ReflectionSerialization {
+public:
+ static FCDECL1(Object*, GetUninitializedObject, ReflectClassBaseObject* objTypeUNSAFE);
+ static FCDECL1(Object*, GetSafeUninitializedObject, ReflectClassBaseObject* objTypeUNSAFE);
+ static FCDECL0(FC_BOOL_RET, GetEnableUnsafeTypeForwarders);
+};
+
+class ReflectionEnum {
+public:
+ static FCDECL1(Object *, InternalGetEnumUnderlyingType, ReflectClassBaseObject *target);
+ static FCDECL1(INT32, InternalGetCorElementType, Object *pRefThis);
+
+ static
+ void QCALLTYPE GetEnumValuesAndNames(EnregisteredTypeHandle pEnumType, QCall::ObjectHandleOnStack pReturnValues, QCall::ObjectHandleOnStack pReturnNames, BOOL fGetNames);
+
+ static FCDECL2_IV(Object*, InternalBoxEnum, ReflectClassBaseObject* pEnumType, INT64 value);
+ static FCDECL2(FC_BOOL_RET, InternalEquals, Object *pRefThis, Object* pRefTarget);
+ static FCDECL2(FC_BOOL_RET, InternalHasFlag, Object *pRefThis, Object* pRefFlags);
+ static FCDECL2(int, InternalCompareTo, Object *pRefThis, Object* pRefTarget);
+};
+
+class ReflectionBinder {
+public:
+ static FCDECL2(FC_BOOL_RET, DBCanConvertPrimitive, ReflectClassBaseObject* vSource, ReflectClassBaseObject* vTarget);
+ static FCDECL2(FC_BOOL_RET, DBCanConvertObjectPrimitive, Object* vSourceObj, ReflectClassBaseObject* vTarget);
+
+
+};
+
+#endif // _REFLECTIONINVOCATION_H_
diff --git a/src/vm/registration.h b/src/vm/registration.h
new file mode 100644
index 0000000000..4504be6fd6
--- /dev/null
+++ b/src/vm/registration.h
@@ -0,0 +1,26 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: Registration.h
+**
+**
+** Purpose: Native methods on System.Runtime.InteropServices.RegistrationServices
+**
+
+**
+===========================================================*/
+#ifndef __REGISTRATION_H
+#define __REGISTRATION_H
+
+#ifndef FEATURE_COMINTEROP
+#error FEATURE_COMINTEROP is required for this file
+#endif // FEATURE_COMINTEROP
+
+FCDECL2(VOID, RegisterTypeForComClientsNative, ReflectClassBaseObject* pTypeUNSAFE, GUID* pGuid);
+FCDECL3(DWORD, RegisterTypeForComClientsExNative, ReflectClassBaseObject* pTypeUNSAFE, CLSCTX clsContext, REGCLS flags);
+
+#endif
diff --git a/src/vm/rejit.cpp b/src/vm/rejit.cpp
new file mode 100644
index 0000000000..9da9118df2
--- /dev/null
+++ b/src/vm/rejit.cpp
@@ -0,0 +1,3989 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// ReJit.cpp
+//
+
+//
+// This module implements the tracking and execution of rejit requests. In order to avoid
+// any overhead on the non-profiled case we don't intrude on any 'normal' data structures
+// except one member on the AppDomain to hold our main hashtable and crst (the
+// ReJitManager). See comments in rejit.h to understand relationships between ReJitInfo,
+// SharedReJitInfo, and ReJitManager, particularly SharedReJitInfo::InternalFlags which
+// capture the state of a rejit request, and ReJitInfo::InternalFlags which captures the
+// state of a particular MethodDesc from a rejit request.
+//
+// A ReJIT request (tracked via SharedReJitInfo) is made at the level of a (Module *,
+// methodDef) pair, and thus affects all instantiations of a generic. Each MethodDesc
+// affected by a ReJIT request has its state tracked via a ReJitInfo instance. A
+// ReJitInfo can represent a rejit request against an already-jitted MethodDesc, or a
+// rejit request against a not-yet-jitted MethodDesc (called a "pre-rejit" request). A
+// Pre-ReJIT request happens when a profiler specifies a (Module *, methodDef) pair that
+// has not yet been JITted, or that represents a generic function which always has the
+// potential to JIT new instantiations in the future.
+//
+// Top-level functions in this file of most interest are:
+//
+// * (static) code:ReJitManager::RequestReJIT:
+// Profiling API just delegates all rejit requests directly to this function. It is
+// responsible for recording the request into the appropriate ReJITManagers and for
+// jump-stamping any already-JITted functions affected by the request (so that future
+// calls hit the prestub)
+//
+// * code:ReJitManager::DoReJitIfNecessary:
+// MethodDesc::DoPrestub calls this to determine whether it's been invoked to do a rejit.
+// If so, ReJitManager::DoReJitIfNecessary is responsible for (indirectly) gathering the
+// appropriate IL and codegen flags, calling UnsafeJitFunction(), and redirecting the
+// jump-stamp from the prestub to the newly-rejitted code.
+//
+// * code:ReJitPublishMethodHolder::ReJitPublishMethodHolder
+// MethodDesc::MakeJitWorker() calls this to determine if there's an outstanding
+// "pre-rejit" request for a MethodDesc that has just been jitted for the first time. We
+// also call this from MethodDesc::CheckRestore when restoring generic methods.
+// The holder applies the jump-stamp to the
+// top of the originally JITted code, with the jump target being the prestub.
+// When ReJIT is enabled this holder enters the ReJIT
+// lock to enforce atomicity of doing the pre-rejit-jmp-stamp & publishing/restoring
+// the PCODE, which is required to avoid races with a profiler that calls RequestReJIT
+// just as the method finishes compiling/restoring.
+//
+// * code:ReJitPublishMethodTableHolder::ReJitPublishMethodTableHolder
+// Does the same thing as ReJitPublishMethodHolder except iterating over every
+// method in the MethodTable. This is called from MethodTable::SetIsRestored.
+//
+// * code:ReJitManager::GetCurrentReJitFlags:
+// CEEInfo::canInline() calls this as part of its calculation of whether it may inline a
+// given method. (Profilers may specify on a per-rejit-request basis whether the rejit of
+// a method may inline callees.)
+//
+//
+// #Invariants:
+//
+// For a given Module/MethodDef there is at most 1 SharedReJitInfo that is not Reverted,
+// though there may be many that are in the Reverted state. If a method is rejitted
+// multiple times, with multiple versions actively in use on the stacks, then all but the
+// most recent are put into the Reverted state even though they may not yet be physically
+// reverted and pitched yet.
+//
+// For a given MethodDesc there is at most 1 ReJitInfo in the kJumpToPrestub or kJumpToRejittedCode
+// state.
+//
+// The ReJitManager::m_crstTable lock is held whenever reading or writing to that
+// ReJitManager instance's table (including state transitions applied to the ReJitInfo &
+// SharedReJitInfo instances stored in that table).
+//
+// The ReJitManager::m_crstTable lock is never held during callbacks to the profiler
+// such as GetReJITParameters, ReJITStarted, JITComplete, ReportReJITError
+//
+// Any thread holding the ReJitManager::m_crstTable lock can't block during runtime suspension
+// therefore it can't call any GC_TRIGGERS functions
+//
+// Transitions between SharedRejitInfo states happen only in the following cicumstances:
+// 1) New SharedRejitInfo added to table (Requested State)
+// Inside RequestRejit
+// Global Crst held, table Crst held
+//
+// 2) Requested -> GettingReJITParameters
+// Inside DoRejitIfNecessary
+// Global Crst NOT held, table Crst held
+//
+// 3) GettingReJITParameters -> Active
+// Inside DoRejitIfNecessary
+// Global Crst NOT held, table Crst held
+//
+// 4) * -> Reverted
+// Inside RequestRejit or RequestRevert
+// Global Crst held, table Crst held
+//
+//
+// Transitions between RejitInfo states happen only in the following circumstances:
+// 1) New RejitInfo added to table (kJumpNone state)
+// Inside RequestRejit, DoJumpStampIfNecessary
+// Global Crst MAY/MAY NOT be held, table Crst held
+// Allowed SharedReJit states: Requested, GettingReJITParameters, Active
+//
+// 2) kJumpNone -> kJumpToPrestub
+// Inside RequestRejit, DoJumpStampIfNecessary
+// Global Crst MAY/MAY NOT be held, table Crst held
+// Allowed SharedReJit states: Requested, GettingReJITParameters, Active
+//
+// 3) kJumpToPreStub -> kJumpToRejittedCode
+// Inside DoReJitIfNecessary
+// Global Crst NOT held, table Crst held
+// Allowed SharedReJit states: Active
+//
+// 4) * -> kJumpNone
+// Inside RequestRevert, RequestRejit
+// Global Crst held, table crst held
+// Allowed SharedReJit states: Reverted
+//
+//
+// #Beware Invariant misconceptions - don't make bad assumptions!
+// Even if a SharedReJitInfo is in the Reverted state:
+// a) RejitInfos may still be in the kJumpToPreStub or kJumpToRejittedCode state
+// Reverted really just means the runtime has started reverting, but it may not
+// be complete yet on the thread executing Revert or RequestRejit.
+// b) The code for this version of the method may be executing on any number of
+// threads. Even after transitioning all rejit infos to kJumpNone state we
+// have no power to abort or hijack threads already running the rejitted code.
+//
+// Even if a SharedReJitInfo is in the Active state:
+// a) The corresponding ReJitInfos may not be jump-stamped yet.
+// Some thread is still in the progress of getting this thread jump-stamped
+// OR it is a place-holder ReJitInfo.
+// b) An older ReJitInfo linked to a reverted SharedReJitInfo could still be
+// in kJumpToPreStub or kJumpToReJittedCode state. RequestRejit is still in
+// progress on some thread.
+//
+//
+// #Known issues with REJIT at this time:
+// NGEN inlined methods will not be properly rejitted
+// Exception callstacks through rejitted code do not produce correct StackTraces
+// Live debugging is not supported when rejit is enabled
+// Rejit leaks rejitted methods, RejitInfos, and SharedRejitInfos until AppDomain unload
+// Dump debugging doesn't correctly locate RejitInfos that are keyed by MethodDesc
+// Metadata update creates large memory increase switching to RW (not specifically a rejit issue)
+//
+// ======================================================================================
+
+#include "common.h"
+#include "rejit.h"
+#include "method.hpp"
+#include "eeconfig.h"
+#include "methoditer.h"
+#include "dbginterface.h"
+#include "threadsuspend.h"
+
+#ifdef FEATURE_REJIT
+
+#include "../debug/ee/debugger.h"
+#include "../debug/ee/walker.h"
+#include "../debug/ee/controller.h"
+
+// This HRESULT is only used as a private implementation detail. If it escapes functions
+// defined in this file it is a bug. Corerror.xml has a comment in it reserving this
+// value for our use but it doesn't appear in the public headers.
+#define CORPROF_E_RUNTIME_SUSPEND_REQUIRED 0x80131381
+
+// This is just used as a unique id. Overflow is OK. If we happen to have more than 4+Billion rejits
+// and somehow manage to not run out of memory, we'll just have to redefine ReJITID as size_t.
+/* static */
+ReJITID SharedReJitInfo::s_GlobalReJitId = 1;
+
+/* static */
+CrstStatic ReJitManager::s_csGlobalRequest;
+
+
+//---------------------------------------------------------------------------------------
+// Helpers
+
+static DWORD JitFlagsFromProfCodegenFlags(DWORD dwCodegenFlags)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ DWORD jitFlags = 0;
+
+ // Note: COR_PRF_CODEGEN_DISABLE_INLINING is checked in
+ // code:CEEInfo::canInline#rejit (it has no equivalent CORJIT flag).
+
+ if ((dwCodegenFlags & COR_PRF_CODEGEN_DISABLE_ALL_OPTIMIZATIONS) != 0)
+ {
+ jitFlags |= CORJIT_FLG_DEBUG_CODE;
+ }
+
+ // In the future more flags may be added that need to be converted here (e.g.,
+ // COR_PRF_CODEGEN_ENTERLEAVE / CORJIT_FLG_PROF_ENTERLEAVE)
+
+ return jitFlags;
+}
+
+//---------------------------------------------------------------------------------------
+// Allocation helpers used by ReJitInfo / SharedReJitInfo to ensure they
+// stick stuff on the appropriate loader heap.
+
+void * LoaderHeapAllocatedRejitStructure::operator new (size_t size, LoaderHeap * pHeap, const NoThrow&)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(return NULL;);
+ PRECONDITION(CheckPointer(pHeap));
+ }
+ CONTRACTL_END;
+
+#ifdef DACCESS_COMPILE
+ return ::operator new(size, nothrow);
+#else
+ return pHeap->AllocMem_NoThrow(S_SIZE_T(size));
+#endif
+}
+
+void * LoaderHeapAllocatedRejitStructure::operator new (size_t size, LoaderHeap * pHeap)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pHeap));
+ }
+ CONTRACTL_END;
+
+#ifdef DACCESS_COMPILE
+ return ::operator new(size);
+#else
+ return pHeap->AllocMem(S_SIZE_T(size));
+#endif
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Simple, thin abstraction of debugger breakpoint patching. Given an address and a
+// previously procured DebuggerControllerPatch governing the code address, this decides
+// whether the code address is patched. If so, it returns a pointer to the debugger's
+// buffer (of what's "underneath" the int 3 patch); otherwise, it returns the code
+// address itself.
+//
+// Arguments:
+// * pbCode - Code address to return if unpatched
+// * dbgpatch - DebuggerControllerPatch to test
+//
+// Return Value:
+// Either pbCode or the debugger's patch buffer, as per description above.
+//
+// Assumptions:
+// Caller must manually grab (and hold) the ControllerLockHolder and get the
+// DebuggerControllerPatch before calling this helper.
+//
+// Notes:
+// pbCode need not equal the code address governed by dbgpatch, but is always
+// "related" (and sometimes really is equal). For example, this helper may be used
+// when writing a code byte to an internal rejit buffer (e.g., in preparation for an
+// eventual 64-bit interlocked write into the code stream), and thus pbCode would
+// point into the internal rejit buffer whereas dbgpatch governs the corresponding
+// code byte in the live code stream. This function would then be used to determine
+// whether a byte should be written into the internal rejit buffer OR into the
+// debugger controller's breakpoint buffer.
+//
+
+LPBYTE FirstCodeByteAddr(LPBYTE pbCode, DebuggerControllerPatch * dbgpatch)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (dbgpatch != NULL && dbgpatch->IsActivated())
+ {
+ // Debugger has patched the code, so return the address of the buffer
+ return LPBYTE(&(dbgpatch->opcode));
+ }
+
+ // no active patch, just return the direct code address
+ return pbCode;
+}
+
+
+//---------------------------------------------------------------------------------------
+// ProfilerFunctionControl implementation
+
+ProfilerFunctionControl::ProfilerFunctionControl(LoaderHeap * pHeap) :
+ m_refCount(1),
+ m_pHeap(pHeap),
+ m_dwCodegenFlags(0),
+ m_cbIL(0),
+ m_pbIL(NULL),
+ m_cInstrumentedMapEntries(0),
+ m_rgInstrumentedMapEntries(NULL)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+ProfilerFunctionControl::~ProfilerFunctionControl()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Intentionally not deleting m_pbIL or m_rgInstrumentedMapEntries, as its ownership gets transferred to the
+ // SharedReJitInfo that manages that rejit request.
+}
+
+
+HRESULT ProfilerFunctionControl::QueryInterface(REFIID id, void** pInterface)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if ((id != IID_IUnknown) &&
+ (id != IID_ICorProfilerFunctionControl))
+ {
+ *pInterface = NULL;
+ return E_NOINTERFACE;
+ }
+
+ *pInterface = this;
+ this->AddRef();
+ return S_OK;
+}
+
+ULONG ProfilerFunctionControl::AddRef()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return InterlockedIncrement(&m_refCount);
+}
+
+ULONG ProfilerFunctionControl::Release()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ ULONG refCount = InterlockedDecrement(&m_refCount);
+
+ if (0 == refCount)
+ {
+ delete this;
+ }
+
+ return refCount;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Profiler calls this to specify a set of flags from COR_PRF_CODEGEN_FLAGS
+// to control rejitting a particular methodDef.
+//
+// Arguments:
+// * flags - set of flags from COR_PRF_CODEGEN_FLAGS
+//
+// Return Value:
+// Always S_OK;
+//
+
+HRESULT ProfilerFunctionControl::SetCodegenFlags(DWORD flags)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_dwCodegenFlags = flags;
+ return S_OK;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Profiler calls this to specify the IL to use when rejitting a particular methodDef.
+//
+// Arguments:
+// * cbNewILMethodHeader - Size in bytes of pbNewILMethodHeader
+// * pbNewILMethodHeader - Pointer to beginning of IL header + IL bytes.
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+// Notes:
+// Caller owns allocating and freeing pbNewILMethodHeader as expected.
+// SetILFunctionBody copies pbNewILMethodHeader into a separate buffer.
+//
+
+HRESULT ProfilerFunctionControl::SetILFunctionBody(ULONG cbNewILMethodHeader, LPCBYTE pbNewILMethodHeader)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (cbNewILMethodHeader == 0)
+ {
+ return E_INVALIDARG;
+ }
+
+ if (pbNewILMethodHeader == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ _ASSERTE(m_cbIL == 0);
+ _ASSERTE(m_pbIL == NULL);
+
+#ifdef DACCESS_COMPILE
+ m_pbIL = new (nothrow) BYTE[cbNewILMethodHeader];
+#else
+ // IL is stored on the appropriate loader heap, and its memory will be owned by the
+ // SharedReJitInfo we copy the pointer to.
+ m_pbIL = (LPBYTE) (void *) m_pHeap->AllocMem_NoThrow(S_SIZE_T(cbNewILMethodHeader));
+#endif
+ if (m_pbIL == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ m_cbIL = cbNewILMethodHeader;
+ memcpy(m_pbIL, pbNewILMethodHeader, cbNewILMethodHeader);
+
+ return S_OK;
+}
+
+HRESULT ProfilerFunctionControl::SetILInstrumentedCodeMap(ULONG cILMapEntries, COR_IL_MAP * rgILMapEntries)
+{
+#ifdef DACCESS_COMPILE
+ // I'm not sure why any of these methods would need to be compiled in DAC? Could we remove the
+ // entire class from the DAC'ized code build?
+ _ASSERTE(!"This shouldn't be called in DAC");
+ return E_NOTIMPL;
+#else
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (cILMapEntries >= (MAXULONG / sizeof(COR_IL_MAP)))
+ {
+ // Too big! The allocation below would overflow when calculating the size.
+ return E_INVALIDARG;
+ }
+
+#ifdef FEATURE_CORECLR
+ if (g_pDebugInterface == NULL)
+ {
+ return CORPROF_E_DEBUGGING_DISABLED;
+ }
+#else
+ // g_pDebugInterface is initialized on startup on desktop CLR, regardless of whether a debugger
+ // or profiler is loaded. So it should always be available.
+ _ASSERTE(g_pDebugInterface != NULL);
+#endif // FEATURE_CORECLR
+
+
+ // copy the il map and il map entries into the corresponding fields.
+ m_cInstrumentedMapEntries = cILMapEntries;
+
+ // IL is stored on the appropriate loader heap, and its memory will be owned by the
+ // SharedReJitInfo we copy the pointer to.
+ m_rgInstrumentedMapEntries = (COR_IL_MAP*) (void *) m_pHeap->AllocMem_NoThrow(S_SIZE_T(cILMapEntries * sizeof(COR_IL_MAP)));
+
+ if (m_rgInstrumentedMapEntries == NULL)
+ return E_OUTOFMEMORY;
+
+
+ memcpy_s(m_rgInstrumentedMapEntries, sizeof(COR_IL_MAP) * cILMapEntries, rgILMapEntries, sizeof(COR_IL_MAP) * cILMapEntries);
+
+ return S_OK;
+#endif // DACCESS_COMPILE
+}
+
+//---------------------------------------------------------------------------------------
+//
+// ReJitManager may use this to access the codegen flags the profiler had set on this
+// ICorProfilerFunctionControl.
+//
+// Return Value:
+// * codegen flags previously set via SetCodegenFlags; 0 if none were set.
+//
+DWORD ProfilerFunctionControl::GetCodegenFlags()
+{
+ return m_dwCodegenFlags;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// ReJitManager may use this to access the IL header + instructions the
+// profiler had set on this ICorProfilerFunctionControl via SetIL
+//
+// Return Value:
+// * Pointer to ProfilerFunctionControl-allocated buffer containing the
+// IL header and instructions the profiler had provided.
+//
+LPBYTE ProfilerFunctionControl::GetIL()
+{
+ return m_pbIL;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// ReJitManager may use this to access the count of instrumented map entry flags the
+// profiler had set on this ICorProfilerFunctionControl.
+//
+// Return Value:
+// * size of the instrumented map entry array
+//
+ULONG ProfilerFunctionControl::GetInstrumentedMapEntryCount()
+{
+ return m_cInstrumentedMapEntries;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// ReJitManager may use this to access the instrumented map entries the
+// profiler had set on this ICorProfilerFunctionControl.
+//
+// Return Value:
+// * the array of instrumented map entries
+//
+COR_IL_MAP* ProfilerFunctionControl::GetInstrumentedMapEntries()
+{
+ return m_rgInstrumentedMapEntries;
+}
+
+//---------------------------------------------------------------------------------------
+// ReJitManager implementation
+
+// All the state-changey stuff is kept up here in the !DACCESS_COMPILE block.
+// The more read-only inspection-y stuff follows the block.
+
+#ifndef DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+// Called by the prestub worker, this function is a simple wrapper which determines the
+// appropriate ReJitManager, and then calls DoReJitIfNecessaryWorker() on it. See the
+// comment at the top of code:ReJitManager::DoReJitIfNecessaryWorker for more info,
+// including parameter & return value descriptions.
+
+// static
+PCODE ReJitManager::DoReJitIfNecessary(PTR_MethodDesc pMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (!pMD->HasNativeCode())
+ {
+ // If method hasn't been jitted yet, the prestub worker should just continue as
+ // usual.
+ return NULL;
+ }
+
+ // We've already published the JITted code for this MethodDesc, and yet we're
+ // back in the prestub (who called us). Ask the appropriate rejit manager if that's because of a rejit request. If so, the
+ // ReJitManager will take care of the rejit now
+ return pMD->GetReJitManager()->DoReJitIfNecessaryWorker(pMD);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// ICorProfilerInfo4::RequestReJIT calls into this guy to do most of the
+// work. Takes care of finding the appropriate ReJitManager instances to
+// record the rejit requests and perform jmp-stamping.
+//
+// Arguments:
+// * cFunctions - Element count of rgModuleIDs & rgMethodDefs
+// * rgModuleIDs - Parallel array of ModuleIDs to rejit
+// * rgMethodDefs - Parallel array of methodDefs to rejit
+//
+// Return Value:
+// HRESULT indicating success or failure of the overall operation. Each
+// individual methodDef (or MethodDesc associated with the methodDef)
+// may encounter its own failure, which is reported by the ReJITError()
+// callback, which is called into the profiler directly.
+//
+
+// static
+HRESULT ReJitManager::RequestReJIT(
+ ULONG cFunctions,
+ ModuleID rgModuleIDs[],
+ mdMethodDef rgMethodDefs[])
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ CAN_TAKE_LOCK;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ // Serialize all RequestReJIT() and Revert() calls against each other (even across AppDomains)
+ CrstHolder ch(&(s_csGlobalRequest));
+
+ HRESULT hr = S_OK;
+
+ // Request at least 1 method to reJIT!
+ _ASSERTE ((cFunctions != 0) && (rgModuleIDs != NULL) && (rgMethodDefs != NULL));
+
+ // Temporary storage to batch up all the ReJitInfos that will get jump stamped
+ // later when the runtime is suspended.
+ //
+ //BUGBUG: Its not clear to me why it is safe to hold ReJitInfo* lists
+ // outside the table locks. If an AppDomain unload occured I don't see anything
+ // that prevents them from being deleted. If this is a bug it is a pre-existing
+ // condition and nobody has reported it as an issue yet. AppDomainExit probably
+ // needs to synchronize with something.
+ // Jan also pointed out the ModuleIDs have the same issue, in order to use this
+ // function safely the profiler needs prevent the AppDomain which contains the
+ // modules from being unloaded. I doubt any profilers are doing this intentionally
+ // but calling from within typical callbacks like ModuleLoadFinished or
+ // JIT events would do it for the current domain I think. Of course RequestRejit
+ // could always be called with ModuleIDs in some other AppDomain.
+ //END BUGBUG
+ SHash<ReJitManagerJumpStampBatchTraits> mgrToJumpStampBatch;
+ CDynArray<ReJitReportErrorWorkItem> errorRecords;
+ for (ULONG i = 0; i < cFunctions; i++)
+ {
+ Module * pModule = reinterpret_cast< Module * >(rgModuleIDs[i]);
+ if (pModule == NULL || TypeFromToken(rgMethodDefs[i]) != mdtMethodDef)
+ {
+ ReportReJITError(pModule, rgMethodDefs[i], NULL, E_INVALIDARG);
+ continue;
+ }
+
+ if (pModule->IsBeingUnloaded())
+ {
+ ReportReJITError(pModule, rgMethodDefs[i], NULL, CORPROF_E_DATAINCOMPLETE);
+ continue;
+ }
+
+ if (pModule->IsReflection())
+ {
+ ReportReJITError(pModule, rgMethodDefs[i], NULL, CORPROF_E_MODULE_IS_DYNAMIC);
+ continue;
+ }
+
+ if (!pModule->GetMDImport()->IsValidToken(rgMethodDefs[i]))
+ {
+ ReportReJITError(pModule, rgMethodDefs[i], NULL, E_INVALIDARG);
+ continue;
+ }
+
+ MethodDesc * pMD = pModule->LookupMethodDef(rgMethodDefs[i]);
+
+ if (pMD != NULL)
+ {
+ _ASSERTE(!pMD->IsNoMetadata());
+
+ // Weird, non-user functions can't be rejitted
+ if (!pMD->IsIL())
+ {
+ // Intentionally not reporting an error in this case, to be consistent
+ // with the pre-rejit case, as we have no opportunity to report an error
+ // in a pre-rejit request for a non-IL method, since the rejit manager
+ // never gets a call from the prestub worker for non-IL methods. Thus,
+ // since pre-rejit requests silently ignore rejit requests for non-IL
+ // methods, regular rejit requests will also silently ignore rejit requests for
+ // non-IL methods to be consistent.
+ continue;
+ }
+ }
+
+ ReJitManager * pReJitMgr = pModule->GetReJitManager();
+ _ASSERTE(pReJitMgr != NULL);
+ ReJitManagerJumpStampBatch * pJumpStampBatch = mgrToJumpStampBatch.Lookup(pReJitMgr);
+ if (pJumpStampBatch == NULL)
+ {
+ pJumpStampBatch = new (nothrow)ReJitManagerJumpStampBatch(pReJitMgr);
+ if (pJumpStampBatch == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ hr = S_OK;
+ EX_TRY
+ {
+ // This guy throws when out of memory, but remains internally
+ // consistent (without adding the new element)
+ mgrToJumpStampBatch.Add(pJumpStampBatch);
+ }
+ EX_CATCH_HRESULT(hr);
+
+ _ASSERT(hr == S_OK || hr == E_OUTOFMEMORY);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+ }
+
+
+ // At this stage, pMD may be NULL or non-NULL, and the specified function may or
+ // may not be a generic (or a function on a generic class). The operations
+ // below depend on these conditions as follows:
+ //
+ // (1) If pMD == NULL || PMD has no code || pMD is generic
+ // Do a "PRE-REJIT" (add a placeholder ReJitInfo that points to module/token;
+ // there's nothing to jump-stamp)
+ //
+ // (2) IF pMD != NULL, but not generic (or function on generic class)
+ // Do a REAL REJIT (add a real ReJitInfo that points to pMD and jump-stamp)
+ //
+ // (3) IF pMD != NULL, and is a generic (or function on generic class)
+ // Do a real rejit (including jump-stamp) for all already-jitted instantiations.
+
+ BaseDomain * pBaseDomainFromModule = pModule->GetDomain();
+ SharedReJitInfo * pSharedInfo = NULL;
+ {
+ CrstHolder ch(&(pReJitMgr->m_crstTable));
+
+ // Do a PRE-rejit
+ if (pMD == NULL || !pMD->HasNativeCode() || pMD->HasClassOrMethodInstantiation())
+ {
+ hr = pReJitMgr->MarkForReJit(
+ pModule,
+ rgMethodDefs[i],
+ pJumpStampBatch,
+ &errorRecords,
+ &pSharedInfo);
+ if (FAILED(hr))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ }
+
+ if (pMD == NULL)
+ {
+ // nothing is loaded yet so only the pre-rejit placeholder is needed. We're done for this method.
+ continue;
+ }
+
+ if (!pMD->HasClassOrMethodInstantiation() && pMD->HasNativeCode())
+ {
+ // We have a JITted non-generic. Easy case. Just mark the JITted method
+ // desc as needing to be rejitted
+ hr = pReJitMgr->MarkForReJit(
+ pMD,
+ pSharedInfo,
+ pJumpStampBatch,
+ &errorRecords,
+ NULL); // Don't need the SharedReJitInfo to be returned
+
+ if (FAILED(hr))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ }
+
+ if (!pMD->HasClassOrMethodInstantiation())
+ {
+ // not generic, we're done for this method
+ continue;
+ }
+
+ // Ok, now the case of a generic function (or function on generic class), which
+ // is loaded, and may thus have compiled instantiations.
+ // It's impossible to get to any other kind of domain from the profiling API
+ _ASSERTE(pBaseDomainFromModule->IsAppDomain() ||
+ pBaseDomainFromModule->IsSharedDomain());
+
+ if (pBaseDomainFromModule->IsSharedDomain())
+ {
+ // Iterate through all modules loaded into the shared domain, to
+ // find all instantiations living in the shared domain. This will
+ // include orphaned code (i.e., shared code used by ADs that have
+ // all unloaded), which is good, because orphaned code could get
+ // re-adopted if a new AD is created that can use that shared code
+ hr = pReJitMgr->MarkAllInstantiationsForReJit(
+ pSharedInfo,
+ NULL, // NULL means to search SharedDomain instead of an AD
+ pModule,
+ rgMethodDefs[i],
+ pJumpStampBatch,
+ &errorRecords);
+ }
+ else
+ {
+ // Module is unshared, so just use the module's domain to find instantiations.
+ hr = pReJitMgr->MarkAllInstantiationsForReJit(
+ pSharedInfo,
+ pBaseDomainFromModule->AsAppDomain(),
+ pModule,
+ rgMethodDefs[i],
+ pJumpStampBatch,
+ &errorRecords);
+ }
+ if (FAILED(hr))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ }
+
+ // We want to iterate through all compilations of existing instantiations to
+ // ensure they get marked for rejit. Note: There may be zero instantiations,
+ // but we won't know until we try.
+ if (pBaseDomainFromModule->IsSharedDomain())
+ {
+ // Iterate through all real domains, to find shared instantiations.
+ AppDomainIterator appDomainIterator(TRUE);
+ while (appDomainIterator.Next())
+ {
+ AppDomain * pAppDomain = appDomainIterator.GetDomain();
+ if (pAppDomain->IsUnloading())
+ {
+ continue;
+ }
+ CrstHolder ch(&(pReJitMgr->m_crstTable));
+ hr = pReJitMgr->MarkAllInstantiationsForReJit(
+ pSharedInfo,
+ pAppDomain,
+ pModule,
+ rgMethodDefs[i],
+ pJumpStampBatch,
+ &errorRecords);
+ if (FAILED(hr))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ }
+ }
+ } // for (ULONG i = 0; i < cFunctions; i++)
+
+ // For each rejit mgr, if there's work to do, suspend EE if needed,
+ // enter the rejit mgr's crst, and do the batched work.
+ BOOL fEESuspended = FALSE;
+ SHash<ReJitManagerJumpStampBatchTraits>::Iterator beginIter = mgrToJumpStampBatch.Begin();
+ SHash<ReJitManagerJumpStampBatchTraits>::Iterator endIter = mgrToJumpStampBatch.End();
+ for (SHash<ReJitManagerJumpStampBatchTraits>::Iterator iter = beginIter; iter != endIter; iter++)
+ {
+ ReJitManagerJumpStampBatch * pJumpStampBatch = *iter;
+ ReJitManager * pMgr = pJumpStampBatch->pReJitManager;
+
+ int cBatchedPreStubMethods = pJumpStampBatch->preStubMethods.Count();
+ if (cBatchedPreStubMethods == 0)
+ {
+ continue;
+ }
+ if(!fEESuspended)
+ {
+ // As a potential future optimization we could speculatively try to update the jump stamps without
+ // suspending the runtime. That needs to be plumbed through BatchUpdateJumpStamps though.
+
+ ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_FOR_REJIT);
+ fEESuspended = TRUE;
+ }
+
+ CrstHolder ch(&(pMgr->m_crstTable));
+ _ASSERTE(ThreadStore::HoldingThreadStore());
+ hr = pMgr->BatchUpdateJumpStamps(&(pJumpStampBatch->undoMethods), &(pJumpStampBatch->preStubMethods), &errorRecords);
+ if (FAILED(hr))
+ break;
+ }
+ if (fEESuspended)
+ {
+ ThreadSuspend::RestartEE(FALSE, TRUE);
+ }
+
+ if (FAILED(hr))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+
+ // Report any errors that were batched up
+ for (int i = 0; i < errorRecords.Count(); i++)
+ {
+ ReportReJITError(&(errorRecords[i]));
+ }
+
+ INDEBUG(SharedDomain::GetDomain()->GetReJitManager()->Dump(
+ "Finished RequestReJIT(). Dumping Shared ReJitManager\n"));
+
+ // We got through processing everything, but profiler will need to see the individual ReJITError
+ // callbacks to know what, if anything, failed.
+ return S_OK;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Helper used by ReJitManager::RequestReJIT to jump stamp all the methods that were
+// specified by the caller. Also used by RejitManager::DoJumpStampForAssemblyIfNecessary
+// when rejitting a batch of generic method instantiations in a newly loaded NGEN assembly.
+//
+// This method is responsible for calling ReJITError on the profiler if anything goes
+// wrong.
+//
+// Arguments:
+// * pUndoMethods - array containing the methods that need the jump stamp removed
+// * pPreStubMethods - array containing the methods that need to be jump stamped to prestub
+// * pErrors - any errors will be appended to this array
+//
+// Returns:
+// S_OK - all methods are updated or added an error to the pErrors array
+// E_OUTOFMEMORY - some methods neither updated nor added an error to pErrors array
+// ReJitInfo state remains consistent
+//
+// Assumptions:
+// 1) Caller prevents contention by either:
+// a) Suspending the runtime
+// b) Ensuring all methods being updated haven't been published
+//
+HRESULT ReJitManager::BatchUpdateJumpStamps(CDynArray<ReJitInfo *> * pUndoMethods, CDynArray<ReJitInfo *> * pPreStubMethods, CDynArray<ReJitReportErrorWorkItem> * pErrors)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pUndoMethods));
+ PRECONDITION(CheckPointer(pPreStubMethods));
+ PRECONDITION(CheckPointer(pErrors));
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_crstTable.OwnedByCurrentThread());
+ HRESULT hr = S_OK;
+
+ ReJitInfo ** ppInfoEnd = pUndoMethods->Ptr() + pUndoMethods->Count();
+ for (ReJitInfo ** ppInfoCur = pUndoMethods->Ptr(); ppInfoCur < ppInfoEnd; ppInfoCur++)
+ {
+ // If we are undoing jumpstamps they have been published already
+ // and our caller is holding the EE suspended
+ _ASSERTE(ThreadStore::HoldingThreadStore());
+ if (FAILED(hr = (*ppInfoCur)->UndoJumpStampNativeCode(TRUE)))
+ {
+ if (FAILED(hr = AddReJITError(*ppInfoCur, hr, pErrors)))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ }
+ }
+
+ ppInfoEnd = pPreStubMethods->Ptr() + pPreStubMethods->Count();
+ for (ReJitInfo ** ppInfoCur = pPreStubMethods->Ptr(); ppInfoCur < ppInfoEnd; ppInfoCur++)
+ {
+ if (FAILED(hr = (*ppInfoCur)->JumpStampNativeCode()))
+ {
+ if (FAILED(hr = AddReJITError(*ppInfoCur, hr, pErrors)))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ }
+ }
+ return S_OK;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Helper used by ReJitManager::RequestReJIT to iterate through any generic
+// instantiations of a function in a given AppDomain, and to create the corresponding
+// ReJitInfos for those MethodDescs. This also adds corresponding entries to a temporary
+// dynamic array created by our caller for batching up the jump-stamping we'll need to do
+// later.
+//
+// This method is responsible for calling ReJITError on the profiler if anything goes
+// wrong.
+//
+// Arguments:
+// * pSharedForAllGenericInstantiations - The SharedReJitInfo for this mdMethodDef's
+// rejit request. This is what we must associate any newly-created ReJitInfo with.
+// * pAppDomainToSearch - AppDomain in which to search for generic instantiations
+// matching the specified methodDef. If it is NULL, then we'll search for all
+// MethodDescs whose metadata definition appears in a Module loaded into the
+// SharedDomain (regardless of which ADs--if any--are using those MethodDescs).
+// This captures the case of domain-neutral code that was in use by an AD that
+// unloaded, and may come into use again once a new AD loads that can use the
+// shared code.
+// * pModuleContainingMethodDef - Module* containing the specified methodDef token.
+// * methodDef - Token for the method for which we're searching for MethodDescs.
+// * pJumpStampBatch - Batch we're responsible for placing ReJitInfo's into, on which
+// the caller will update the jump stamps.
+// * pRejitErrors - Dynamic array we're responsible for adding error records into.
+// The caller will report them to the profiler outside the table lock
+//
+// Returns:
+// S_OK - all methods were either marked for rejit OR have appropriate error records
+// in pRejitErrors
+// E_OUTOFMEMORY - some methods weren't marked for rejit AND we didn't have enough
+// memory to create the error records
+//
+// Assumptions:
+// * This function should only be called on the ReJitManager that owns the (generic)
+// definition of methodDef
+// * If pModuleContainingMethodDef is loaded into the SharedDomain, then
+// pAppDomainToSearch may be NULL (to search all instantiations loaded shared),
+// or may be non-NULL (to search all instantiations loaded into
+// pAppDomainToSearch)
+// * If pModuleContainingMethodDef is not loaded domain-neutral, then
+// pAppDomainToSearch must be non-NULL (and, indeed, must be the very AD that
+// pModuleContainingMethodDef is loaded into).
+//
+
+HRESULT ReJitManager::MarkAllInstantiationsForReJit(
+ SharedReJitInfo * pSharedForAllGenericInstantiations,
+ AppDomain * pAppDomainToSearch,
+ PTR_Module pModuleContainingMethodDef,
+ mdMethodDef methodDef,
+ ReJitManagerJumpStampBatch* pJumpStampBatch,
+ CDynArray<ReJitReportErrorWorkItem> * pRejitErrors)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ PRECONDITION(CheckPointer(pSharedForAllGenericInstantiations));
+ PRECONDITION(CheckPointer(pAppDomainToSearch, NULL_OK));
+ PRECONDITION(CheckPointer(pModuleContainingMethodDef));
+ PRECONDITION(CheckPointer(pJumpStampBatch));
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_crstTable.OwnedByCurrentThread());
+ _ASSERTE(methodDef != mdTokenNil);
+ _ASSERTE(pJumpStampBatch->pReJitManager == this);
+
+ HRESULT hr;
+
+ BaseDomain * pDomainContainingGenericDefinition = pModuleContainingMethodDef->GetDomain();
+
+#ifdef _DEBUG
+ // This function should only be called on the ReJitManager that owns the (generic)
+ // definition of methodDef
+ _ASSERTE(this == pDomainContainingGenericDefinition->GetReJitManager());
+
+ // If the generic definition is not loaded domain-neutral, then all its
+ // instantiations will also be non-domain-neutral and loaded into the same
+ // domain as the generic definition. So the caller may only pass the
+ // domain containing the generic definition as pAppDomainToSearch
+ if (!pDomainContainingGenericDefinition->IsSharedDomain())
+ {
+ _ASSERTE(pDomainContainingGenericDefinition == pAppDomainToSearch);
+ }
+#endif //_DEBUG
+
+ // If pAppDomainToSearch is NULL, iterate through all existing
+ // instantiations loaded into the SharedDomain. If pAppDomainToSearch is non-NULL,
+ // iterate through all existing instantiations in pAppDomainToSearch, and only consider
+ // instantiations in non-domain-neutral assemblies (as we already covered domain
+ // neutral assemblies when we searched the SharedDomain).
+ LoadedMethodDescIterator::AssemblyIterationMode mode = LoadedMethodDescIterator::kModeSharedDomainAssemblies;
+ // these are the default flags which won't actually be used in shared mode other than
+ // asserting they were specified with their default values
+ AssemblyIterationFlags assemFlags = (AssemblyIterationFlags) (kIncludeLoaded | kIncludeExecution);
+ ModuleIterationOption moduleFlags = (ModuleIterationOption) kModIterIncludeLoaded;
+ if (pAppDomainToSearch != NULL)
+ {
+ mode = LoadedMethodDescIterator::kModeUnsharedADAssemblies;
+ assemFlags = (AssemblyIterationFlags)(kIncludeAvailableToProfilers | kIncludeExecution);
+ moduleFlags = (ModuleIterationOption)kModIterIncludeAvailableToProfilers;
+ }
+ LoadedMethodDescIterator it(
+ pAppDomainToSearch,
+ pModuleContainingMethodDef,
+ methodDef,
+ mode,
+ assemFlags,
+ moduleFlags);
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+ while (it.Next(pDomainAssembly.This()))
+ {
+ MethodDesc * pLoadedMD = it.Current();
+
+ if (!pLoadedMD->HasNativeCode())
+ {
+ // Skip uninstantiated MethodDescs. The placeholder added by our caller
+ // is sufficient to ensure they'll eventually be rejitted when they get
+ // compiled.
+ continue;
+ }
+
+ if (FAILED(hr = IsMethodSafeForReJit(pLoadedMD)))
+ {
+ if (FAILED(hr = AddReJITError(pModuleContainingMethodDef, methodDef, pLoadedMD, hr, pRejitErrors)))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ continue;
+ }
+
+#ifdef _DEBUG
+ if (!pDomainContainingGenericDefinition->IsSharedDomain())
+ {
+ // Method is defined outside of the shared domain, so its instantiation must
+ // be defined in the AD we're iterating over (pAppDomainToSearch, which, as
+ // asserted above, must be the same domain as the generic's definition)
+ _ASSERTE(pLoadedMD->GetDomain() == pAppDomainToSearch);
+ }
+#endif // _DEBUG
+
+ // This will queue up the MethodDesc for rejitting and create all the
+ // look-aside tables needed.
+ SharedReJitInfo * pSharedUsed = NULL;
+ hr = MarkForReJit(
+ pLoadedMD,
+ pSharedForAllGenericInstantiations,
+ pJumpStampBatch,
+ pRejitErrors,
+ &pSharedUsed);
+ if (FAILED(hr))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ }
+
+ return S_OK;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Helper used by ReJitManager::MarkAllInstantiationsForReJit and
+// ReJitManager::RequestReJIT to do the actual ReJitInfo allocation and
+// placement inside m_table. Note that callers don't use MarkForReJitHelper
+// directly. Instead, callers actually use the inlined overloaded wrappers
+// ReJitManager::MarkForReJit (one for placeholder (i.e., methodDef pre-rejit)
+// ReJitInfos and one for regular (i.e., MethodDesc) ReJitInfos). When the
+// overloaded MarkForReJit wrappers call this, they ensure that either pMD is
+// valid XOR (pModule, methodDef) is valid.
+//
+// Arguments:
+// * pMD - MethodDesc for which to find / create ReJitInfo. Only used if
+// we're creating a regular ReJitInfo
+// * pModule - Module for which to find / create ReJitInfo. Only used if
+// we're creating a placeholder ReJitInfo
+// * methodDef - methodDef for which to find / create ReJitInfo. Only used
+// if we're creating a placeholder ReJitInfo
+// * pSharedToReuse - SharedReJitInfo to associate any newly created
+// ReJitInfo with. If NULL, we'll create a new one.
+// * pJumpStampBatch - a batch of methods that need to have jump stamps added
+// or removed. This method will add new ReJitInfos to the batch as needed.
+// * pRejitErrors - An array of rejit errors that this call will append to
+// if there is an error marking
+// * ppSharedUsed - [out]: SharedReJitInfo used for this request. If
+// pSharedToReuse is non-NULL, *ppSharedUsed == pSharedToReuse. Else,
+// *ppSharedUsed is the SharedReJitInfo newly-created to associate with
+// the ReJitInfo used for this request.
+//
+// Return Value:
+// * S_OK: Successfully created a new ReJitInfo to manage this request
+// * S_FALSE: An existing ReJitInfo was already available to manage this
+// request, so we didn't need to create a new one.
+// * E_OUTOFMEMORY
+// * Else, a failure HRESULT indicating what went wrong.
+//
+
+HRESULT ReJitManager::MarkForReJitHelper(
+ PTR_MethodDesc pMD,
+ PTR_Module pModule,
+ mdMethodDef methodDef,
+ SharedReJitInfo * pSharedToReuse,
+ ReJitManagerJumpStampBatch* pJumpStampBatch,
+ CDynArray<ReJitReportErrorWorkItem> * pRejitErrors,
+ /* out */ SharedReJitInfo ** ppSharedUsed)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ PRECONDITION(CheckPointer(pMD, NULL_OK));
+ PRECONDITION(CheckPointer(pModule, NULL_OK));
+ PRECONDITION(CheckPointer(pJumpStampBatch));
+ PRECONDITION(CheckPointer(pRejitErrors));
+ PRECONDITION(CheckPointer(ppSharedUsed, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ CrstHolder ch(&m_crstTable);
+
+ // Either pMD is valid, xor (pModule,methodDef) is valid
+ _ASSERTE(
+ ((pMD != NULL) && (pModule == NULL) && (methodDef == mdTokenNil)) ||
+ ((pMD == NULL) && (pModule != NULL) && (methodDef != mdTokenNil)));
+ _ASSERTE(pJumpStampBatch->pReJitManager == this);
+
+ if (ppSharedUsed != NULL)
+ *ppSharedUsed = NULL;
+ HRESULT hr = S_OK;
+
+ // Check if there was there a previous rejit request for pMD
+
+ ReJitInfoHash::KeyIterator beginIter(&m_table, TRUE /* begin */);
+ ReJitInfoHash::KeyIterator endIter(&m_table, FALSE /* begin */);
+
+ if (pMD != NULL)
+ {
+ beginIter = GetBeginIterator(pMD);
+ endIter = GetEndIterator(pMD);
+ }
+ else
+ {
+ beginIter = GetBeginIterator(pModule, methodDef);
+ endIter = GetEndIterator(pModule, methodDef);
+ }
+
+ for (ReJitInfoHash::KeyIterator iter = beginIter;
+ iter != endIter;
+ iter++)
+ {
+ ReJitInfo * pInfo = *iter;
+ _ASSERTE(pInfo->m_pShared != NULL);
+
+#ifdef _DEBUG
+ if (pMD != NULL)
+ {
+ _ASSERTE(pInfo->GetMethodDesc() == pMD);
+ }
+ else
+ {
+ Module * pModuleTest = NULL;
+ mdMethodDef methodDefTest = mdTokenNil;
+ pInfo->GetModuleAndToken(&pModuleTest, &methodDefTest);
+ _ASSERTE((pModule == pModuleTest) && (methodDef == methodDefTest));
+ }
+#endif //_DEBUG
+
+ SharedReJitInfo * pShared = pInfo->m_pShared;
+
+ switch (pShared->GetState())
+ {
+ case SharedReJitInfo::kStateRequested:
+ // We can 'reuse' this instance because the profiler doesn't know about
+ // it yet. (This likely happened because a profiler called RequestReJIT
+ // twice in a row, without us having a chance to jmp-stamp the code yet OR
+ // while iterating through instantiations of a generic, the iterator found
+ // duplicate entries for the same instantiation.)
+ _ASSERTE(pShared->m_pbIL == NULL);
+ _ASSERTE(pInfo->m_pCode == NULL);
+
+ if (ppSharedUsed != NULL)
+ *ppSharedUsed = pShared;
+
+ INDEBUG(AssertRestOfEntriesAreReverted(iter, endIter));
+ return S_FALSE;
+
+ case SharedReJitInfo::kStateGettingReJITParameters:
+ case SharedReJitInfo::kStateActive:
+ {
+ // Profiler has already requested to rejit this guy, AND we've already
+ // at least started getting the rejit parameters from the profiler. We need to revert this
+ // instance (this will put back the original code)
+
+ INDEBUG(AssertRestOfEntriesAreReverted(iter, endIter));
+ hr = Revert(pShared, pJumpStampBatch);
+ if (FAILED(hr))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ _ASSERTE(pShared->GetState() == SharedReJitInfo::kStateReverted);
+
+ // No need to continue looping. Break out of loop to create a new
+ // ReJitInfo to service the request.
+ goto EXIT_LOOP;
+ }
+ case SharedReJitInfo::kStateReverted:
+ // just ignore this guy
+ continue;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+EXIT_LOOP:
+
+ // Either there was no ReJitInfo yet for this MethodDesc OR whatever we've found
+ // couldn't be reused (and needed to be reverted). Create a new ReJitInfo to return
+ // to the caller.
+ //
+ // If the caller gave us a pMD that is a new generic instantiation, then the caller
+ // may also have provided a pSharedToReuse for the generic. Use that instead of
+ // creating a new one.
+
+ SharedReJitInfo * pShared = NULL;
+
+ if (pSharedToReuse != NULL)
+ {
+ pShared = pSharedToReuse;
+ }
+ else
+ {
+ PTR_LoaderHeap pHeap = NULL;
+ if (pModule != NULL)
+ {
+ pHeap = pModule->GetLoaderAllocator()->GetLowFrequencyHeap();
+ }
+ else
+ {
+ pHeap = pMD->GetLoaderAllocator()->GetLowFrequencyHeap();
+ }
+ pShared = new (pHeap, nothrow) SharedReJitInfo;
+ if (pShared == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ }
+
+ _ASSERTE(pShared != NULL);
+
+ // ReJitInfos with MethodDesc's need to be jump-stamped,
+ // ReJitInfos with Module/MethodDef are placeholders that don't need a stamp
+ ReJitInfo * pInfo = NULL;
+ ReJitInfo ** ppInfo = &pInfo;
+ if (pMD != NULL)
+ {
+ ppInfo = pJumpStampBatch->preStubMethods.Append();
+ if (ppInfo == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ }
+ hr = AddNewReJitInfo(pMD, pModule, methodDef, pShared, ppInfo);
+ if (FAILED(hr))
+ {
+ // NOTE: We could consider using an AllocMemTracker or AllocMemHolder
+ // here to back out the allocation of pShared, but it probably
+ // wouldn't make much of a difference. We'll only get here if we ran
+ // out of memory allocating the pInfo, so our memory has already been
+ // blown. We can't cause much leaking due to this error path.
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+
+ _ASSERTE(*ppInfo != NULL);
+
+ if (ppSharedUsed != NULL)
+ *ppSharedUsed = pShared;
+
+ return S_OK;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Helper used by the above helpers (and also during jump-stamping) to
+// allocate and store a new ReJitInfo.
+//
+// Arguments:
+// * pMD - MethodDesc for which to create ReJitInfo. Only used if we're
+// creating a regular ReJitInfo
+// * pModule - Module for which create ReJitInfo. Only used if we're
+// creating a placeholder ReJitInfo
+// * methodDef - methodDef for which to create ReJitInfo. Only used if
+// we're creating a placeholder ReJitInfo
+// * pShared - SharedReJitInfo to associate the newly created ReJitInfo
+// with.
+// * ppInfo - [out]: ReJitInfo created
+//
+// Return Value:
+// * S_OK: ReJitInfo successfully created & stored.
+// * Else, failure indicating the problem. Currently only E_OUTOFMEMORY.
+//
+// Assumptions:
+// * Caller should be holding this ReJitManager's table crst.
+//
+
+HRESULT ReJitManager::AddNewReJitInfo(
+ PTR_MethodDesc pMD,
+ PTR_Module pModule,
+ mdMethodDef methodDef,
+ SharedReJitInfo * pShared,
+ ReJitInfo ** ppInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ PRECONDITION(CheckPointer(pMD, NULL_OK));
+ PRECONDITION(CheckPointer(pModule, NULL_OK));
+ PRECONDITION(CheckPointer(pShared));
+ PRECONDITION(CheckPointer(ppInfo));
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_crstTable.OwnedByCurrentThread());
+ _ASSERTE(pShared->GetState() != SharedReJitInfo::kStateReverted);
+
+ // Either pMD is valid, xor (pModule,methodDef) is valid
+ _ASSERTE(
+ ((pMD != NULL) && (pModule == NULL) && (methodDef == mdTokenNil)) ||
+ ((pMD == NULL) && (pModule != NULL) && (methodDef != mdTokenNil)));
+
+ HRESULT hr;
+ ReJitInfo * pInfo = NULL;
+
+ if (pMD != NULL)
+ {
+ PTR_LoaderHeap pHeap = pMD->GetLoaderAllocator()->GetLowFrequencyHeap();
+ pInfo = new (pHeap, nothrow) ReJitInfo(pMD, pShared);
+ }
+ else
+ {
+ PTR_LoaderHeap pHeap = pModule->GetLoaderAllocator()->GetLowFrequencyHeap();
+ pInfo = new (pHeap, nothrow) ReJitInfo(pModule, methodDef, pShared);
+ }
+ if (pInfo == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ hr = S_OK;
+ EX_TRY
+ {
+ // This guy throws when out of memory, but remains internally
+ // consistent (without adding the new element)
+ m_table.Add(pInfo);
+ }
+ EX_CATCH_HRESULT(hr);
+
+ _ASSERT(hr == S_OK || hr == E_OUTOFMEMORY);
+ if (FAILED(hr))
+ {
+ pInfo = NULL;
+ return hr;
+ }
+
+ *ppInfo = pInfo;
+ return S_OK;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Given a MethodDesc, call ReJitInfo::JumpStampNativeCode to stamp the top of its
+// originally-jitted-code with a jmp that goes to the prestub. This is called by the
+// prestub worker after jitting the original code of a function (i.e., the "pre-rejit"
+// scenario). In this case, the EE is not suspended. But that's ok, because the PCODE has
+// not yet been published to the MethodDesc, and no thread can be executing inside the
+// originally JITted function yet.
+//
+// Arguments:
+// * pMD - MethodDesc to jmp-stamp
+// * pCode - Top of the code that was just jitted (using original IL).
+//
+//
+// Return value:
+// * S_OK: Either we successfully did the jmp-stamp, or we didn't have to (e.g., there
+// was no outstanding pre-rejit request for this MethodDesc, or a racing thread
+// took care of it for us).
+// * Else, HRESULT indicating failure.
+
+// Assumptions:
+// The caller has not yet published pCode to the MethodDesc, so no threads can be
+// executing inside pMD's code yet. Thus, we don't need to suspend the runtime while
+// applying the jump-stamp like we usually do for rejit requests that are made after
+// a function has been JITted.
+//
+
+HRESULT ReJitManager::DoJumpStampIfNecessary(MethodDesc* pMD, PCODE pCode)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(pCode != NULL);
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+
+ _ASSERTE(IsTableCrstOwnedByCurrentThread());
+
+ ReJitInfo * pInfoToJumpStamp = NULL;
+
+ // First, try looking up ReJitInfo by MethodDesc. A "regular" MethodDesc-based
+ // ReJitInfo already exists for "case 1" (see comment above
+ // code:ReJitInfo::JumpStampNativeCode), and could even exist for "case 2"
+ // (pre-rejit), if either:
+ // * The pre-rejit was requested after the MD had already been loaded (though
+ // before it had been jitted) OR
+ // * there was a race to JIT the original code for the MD, and another thread got
+ // here before us and already added the ReJitInfo for that MD.
+
+ ReJitInfoHash::KeyIterator beginIter = GetBeginIterator(pMD);
+ ReJitInfoHash::KeyIterator endIter = GetEndIterator(pMD);
+
+ pInfoToJumpStamp = FindPreReJittedReJitInfo(beginIter, endIter);
+ if (pInfoToJumpStamp != NULL)
+ {
+ // Found it. Jump-stamp, SetNativeCode, and we're done.
+ _ASSERTE(pInfoToJumpStamp->GetMethodDesc() == pMD);
+ return pInfoToJumpStamp->JumpStampNativeCode(pCode);
+ }
+
+ // In this case, try looking up by module / metadata token. This is the case where
+ // the pre-rejit request occurred before the MD was loaded.
+
+ Module * pModule = pMD->GetModule();
+ _ASSERTE(pModule != NULL);
+ mdMethodDef methodDef = pMD->GetMemberDef();
+
+ beginIter = GetBeginIterator(pModule, methodDef);
+ endIter = GetEndIterator(pModule, methodDef);
+ ReJitInfo * pInfoPlaceholder = NULL;
+
+ pInfoPlaceholder = FindPreReJittedReJitInfo(beginIter, endIter);
+ if (pInfoPlaceholder == NULL)
+ {
+ // No jump stamping to do.
+ return S_OK;
+ }
+
+#ifdef _DEBUG
+ {
+ Module * pModuleTest = NULL;
+ mdMethodDef methodDefTest = mdTokenNil;
+ INDEBUG(pInfoPlaceholder->GetModuleAndToken(&pModuleTest, &methodDefTest));
+ _ASSERTE((pModule == pModuleTest) && (methodDef == methodDefTest));
+ }
+#endif //_DEBUG
+
+ // We have finished JITting the original code for a function that had been
+ // "pre-rejitted" (i.e., requested to be rejitted before it was first compiled). So
+ // now is the first time where we know the MethodDesc of the request.
+ if (FAILED(hr = IsMethodSafeForReJit(pMD)))
+ {
+ // No jump stamping to do.
+ return hr;
+ }
+
+ // Create the ReJitInfo associated with the MethodDesc now (pInfoToJumpStamp), and
+ // jump-stamp the original code.
+ pInfoToJumpStamp = NULL;
+ hr = AddNewReJitInfo(pMD, NULL /*pModule*/, NULL /*methodDef*/, pInfoPlaceholder->m_pShared, &pInfoToJumpStamp);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ _ASSERTE(pInfoToJumpStamp != NULL);
+ return pInfoToJumpStamp->JumpStampNativeCode(pCode);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// ICorProfilerInfo4::RequestRevert calls into this guy to do most of the
+// work. Takes care of finding the appropriate ReJitManager instances to
+// perform the revert
+//
+// Arguments:
+// * cFunctions - Element count of rgModuleIDs & rgMethodDefs
+// * rgModuleIDs - Parallel array of ModuleIDs to revert
+// * rgMethodDefs - Parallel array of methodDefs to revert
+// * rgHrStatuses - [out] Parallel array of HRESULTs indicating success/failure
+// of reverting each (ModuleID, methodDef).
+//
+// Return Value:
+// HRESULT indicating success or failure of the overall operation. Each
+// individual methodDef (or MethodDesc associated with the methodDef)
+// may encounter its own failure, which is reported by the rgHrStatuses
+// [out] parameter.
+//
+
+// static
+HRESULT ReJitManager::RequestRevert(
+ ULONG cFunctions,
+ ModuleID rgModuleIDs[],
+ mdMethodDef rgMethodDefs[],
+ HRESULT rgHrStatuses[])
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ CAN_TAKE_LOCK;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ // Serialize all RequestReJIT() and Revert() calls against each other (even across AppDomains)
+ CrstHolder ch(&(s_csGlobalRequest));
+
+ // Request at least 1 method to revert!
+ _ASSERTE ((cFunctions != 0) && (rgModuleIDs != NULL) && (rgMethodDefs != NULL));
+
+ ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_FOR_REJIT);
+ for (ULONG i = 0; i < cFunctions; i++)
+ {
+ HRESULT hr = E_UNEXPECTED;
+ Module * pModule = reinterpret_cast< Module * >(rgModuleIDs[i]);
+ if (pModule == NULL || TypeFromToken(rgMethodDefs[i]) != mdtMethodDef)
+ {
+ hr = E_INVALIDARG;
+ }
+ else if (pModule->IsBeingUnloaded())
+ {
+ hr = CORPROF_E_DATAINCOMPLETE;
+ }
+ else if (pModule->IsReflection())
+ {
+ hr = CORPROF_E_MODULE_IS_DYNAMIC;
+ }
+ else
+ {
+ hr = pModule->GetReJitManager()->RequestRevertByToken(pModule, rgMethodDefs[i]);
+ }
+
+ if (rgHrStatuses != NULL)
+ {
+ rgHrStatuses[i] = hr;
+ }
+ }
+
+ ThreadSuspend::RestartEE(FALSE /* bFinishedGC */, TRUE /* SuspendSucceded */);
+
+ return S_OK;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Called by AppDomain::Exit() to notify the SharedDomain's ReJitManager that this
+// AppDomain is exiting. The SharedDomain's ReJitManager will then remove any
+// ReJitInfos relating to MDs owned by AppDomain. This is how we remove
+// non-domain-neutral instantiations of domain-neutral generics from the SharedDomain's
+// ReJitManager.
+//
+// Arguments:
+// pAppDomain - AppDomain that is exiting.
+//
+
+// static
+void ReJitManager::OnAppDomainExit(AppDomain * pAppDomain)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CAN_TAKE_LOCK;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // All ReJitInfos and SharedReJitInfos for this AD's ReJitManager automatically get
+ // cleaned up as they're allocated on the AD's loader heap.
+
+ // We explicitly clean up the SHash here, as its entries get allocated using regular
+ // "new"
+ pAppDomain->GetReJitManager()->m_table.RemoveAll();
+
+ // We need to ensure that any MethodDescs from pAppDomain that are stored on the
+ // SharedDomain's ReJitManager get removed from the SharedDomain's ReJitManager's
+ // hash table, and from the linked lists tied to their owning SharedReJitInfo. (This
+ // covers the case of non-domain-neutral instantiations of domain-neutral generics.)
+ SharedDomain::GetDomain()->GetReJitManager()->RemoveReJitInfosFromDomain(pAppDomain);
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Small helper to determine whether a given (possibly instantiated generic) MethodDesc
+// is safe to rejit. If not, this function is responsible for calling into the
+// profiler's ReJITError()
+//
+// Arguments:
+// pMD - MethodDesc to test
+// Return Value:
+// S_OK iff pMD is safe to rejit
+// CORPROF_E_FUNCTION_IS_COLLECTIBLE - function can't be rejitted because it is collectible
+//
+
+// static
+HRESULT ReJitManager::IsMethodSafeForReJit(PTR_MethodDesc pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CAN_TAKE_LOCK;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pMD != NULL);
+
+ // Weird, non-user functions were already weeded out in RequestReJIT(), and will
+ // also never be passed to us by the prestub worker (for the pre-rejit case).
+ _ASSERTE(pMD->IsIL());
+
+ // Any MethodDescs that could be collected are not currently supported. Although we
+ // rule out all Ref.Emit modules in RequestReJIT(), there can still exist types defined
+ // in a non-reflection module and instantiated into a collectible assembly
+ // (e.g., List<MyCollectibleStruct>). In the future we may lift this
+ // restriction by updating the ReJitManager when the collectible assemblies
+ // owning the instantiations get collected.
+ if (pMD->GetLoaderAllocator()->IsCollectible())
+ {
+ return CORPROF_E_FUNCTION_IS_COLLECTIBLE;
+ }
+
+ return S_OK;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Simple wrapper around GetCurrentReJitWorker. See
+// code:ReJitManager::GetCurrentReJitWorker for information about parameters, return
+// values, etc.
+
+// static
+DWORD ReJitManager::GetCurrentReJitFlags(PTR_MethodDesc pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ return pMD->GetReJitManager()->GetCurrentReJitFlagsWorker(pMD);
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Given a methodDef token, finds the corresponding ReJitInfo, and asks the
+// ReJitInfo to perform a revert.
+//
+// Arguments:
+// * pModule - Module to revert
+// * methodDef - methodDef token to revert
+//
+// Return Value:
+// HRESULT indicating success or failure. If the method was never
+// rejitted in the first place, this method returns a special error code
+// (CORPROF_E_ACTIVE_REJIT_REQUEST_NOT_FOUND).
+// E_OUTOFMEMORY
+//
+
+HRESULT ReJitManager::RequestRevertByToken(PTR_Module pModule, mdMethodDef methodDef)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CAN_TAKE_LOCK;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(ThreadStore::HoldingThreadStore());
+ CrstHolder ch(&m_crstTable);
+
+ _ASSERTE(pModule != NULL);
+ _ASSERTE(methodDef != mdTokenNil);
+
+ ReJitInfo * pInfo = NULL;
+ MethodDesc * pMD = NULL;
+
+ pInfo = FindNonRevertedReJitInfo(pModule, methodDef);
+ if (pInfo == NULL)
+ {
+ pMD = pModule->LookupMethodDef(methodDef);
+ pInfo = FindNonRevertedReJitInfo(pMD);
+ if (pInfo == NULL)
+ return CORPROF_E_ACTIVE_REJIT_REQUEST_NOT_FOUND;
+ }
+
+ _ASSERTE (pInfo != NULL);
+ _ASSERTE (pInfo->m_pShared != NULL);
+ _ASSERTE (pInfo->m_pShared->GetState() != SharedReJitInfo::kStateReverted);
+ ReJitManagerJumpStampBatch batch(this);
+ HRESULT hr = Revert(pInfo->m_pShared, &batch);
+ if (FAILED(hr))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ CDynArray<ReJitReportErrorWorkItem> errorRecords;
+ hr = BatchUpdateJumpStamps(&(batch.undoMethods), &(batch.preStubMethods), &errorRecords);
+ if (FAILED(hr))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ // If there were any errors, return the first one. This matches previous error handling
+ // behavior that only returned the first error encountered within Revert().
+ for (int i = 0; i < errorRecords.Count(); i++)
+ {
+ _ASSERTE(FAILED(errorRecords[i].hrStatus));
+ return errorRecords[i].hrStatus;
+ }
+ return S_OK;
+}
+
+
+
+
+//---------------------------------------------------------------------------------------
+//
+// Called by the prestub worker, this function decides if the MethodDesc needs to be
+// rejitted, and if so, this will call the profiler to get the rejit parameters (if they
+// are not yet stored), and then perform the actual re-JIT (by calling, indirectly,
+// UnsafeJitFunction).
+//
+// In order to allow the re-JIT to occur outside of any locks, the following sequence is
+// performed:
+//
+// * Enter this ReJitManager's table crst
+// * Find the single ReJitInfo (if any) in the table matching the input pMD. This
+// represents the outstanding rejit request against thie pMD
+// * If necessary, ask profiler for IL & codegen flags (by calling
+// GetReJITParameters()), thus transitioning the corresponding SharedReJitInfo
+// state kStateRequested-->kStateActive
+// * Exit this ReJitManager's table crst
+// * (following steps occur when DoReJitIfNecessary() calls DoReJit())
+// * Call profiler's ReJitCompilationStarted()
+// * Call UnsafeJitFunction with the IL / codegen flags provided by profiler, as stored
+// on the SharedReJitInfo. Note that if another Rejit request came in, then we would
+// create new SharedReJitInfo & ReJitInfo structures to track it, rather than
+// modifying the ReJitInfo / SharedReJitInfo we found above. So the ReJitInfo we're
+// using here (outside the lock), is "fixed" in the sense that its IL / codegen flags
+// will not change.
+// * (below is where we handle any races that might have occurred between threads
+// simultaneously rejitting this function)
+// * Enter this ReJitManager's table crst
+// * Check to see if another thread has already published the rejitted PCODE to
+// ReJitInfo::m_pCode. If so, bail.
+// * If we're the winner, publish our rejitted PCODE to ReJitInfo::m_pCode...
+// * ...and update the jump-stamp at the top of the originally JITted code so that it
+// now points to our rejitted code (instead of the prestub)
+// * Exit this ReJitManager's table crst
+// * Call profiler's ReJitCompilationFinished()
+// * Fire relevant ETW events
+//
+// Arguments:
+// pMD - MethodDesc to decide whether to rejit
+//
+// Return Value:
+// * If a rejit was performed, the PCODE of the generated code.
+// * If the ReJitManager changed its mind and chose not to do a rejit (e.g., a
+// revert request raced with this rejit request, and the revert won), just
+// return the PCODE of the originally JITted code (pMD->GetNativeCode())
+// * Else, NULL (which means the ReJitManager doesn't know or care about this
+// MethodDesc)
+//
+
+PCODE ReJitManager::DoReJitIfNecessaryWorker(PTR_MethodDesc pMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(!IsTableCrstOwnedByCurrentThread());
+
+ // Fast-path: If the rejit map is empty, no need to look up anything. Do this outside
+ // of a lock to impact our caller (the prestub worker) as little as possible. If the
+ // map is nonempty, we'll acquire the lock at that point and do the lookup for real.
+ if (m_table.GetCount() == 0)
+ {
+ return NULL;
+ }
+
+ HRESULT hr = S_OK;
+ ReJitInfo * pInfoToRejit = NULL;
+ Module* pModule = NULL;
+ mdMethodDef methodDef = mdTokenNil;
+ BOOL fNeedsParameters = FALSE;
+ BOOL fWaitForParameters = FALSE;
+
+ {
+ // Serialize access to the rejit table. Though once we find the ReJitInfo we want,
+ // exit the Crst so we can ReJIT the method without holding a lock.
+ CrstHolder ch(&m_crstTable);
+
+ ReJitInfoHash::KeyIterator iter = GetBeginIterator(pMD);
+ ReJitInfoHash::KeyIterator end = GetEndIterator(pMD);
+
+ if (iter == end)
+ {
+ // No rejit actions necessary
+ return NULL;
+ }
+
+
+ for (; iter != end; iter++)
+ {
+ ReJitInfo * pInfo = *iter;
+ _ASSERTE(pInfo->GetMethodDesc() == pMD);
+ _ASSERTE(pInfo->m_pShared != NULL);
+ SharedReJitInfo * pShared = pInfo->m_pShared;
+
+ switch (pShared->GetState())
+ {
+ case SharedReJitInfo::kStateRequested:
+ if (pInfo->GetState() == ReJitInfo::kJumpNone)
+ {
+ // We haven't actually suspended threads and jump-stamped the
+ // method's prolog so just ignore this guy
+ INDEBUG(AssertRestOfEntriesAreReverted(iter, end));
+ return NULL;
+ }
+ // When the SharedReJitInfo is still in the requested state, we haven't
+ // gathered IL & codegen flags from the profiler yet. So, we can't be
+ // pointing to rejitted code already. So we must be pointing to the prestub
+ _ASSERTE(pInfo->GetState() == ReJitInfo::kJumpToPrestub);
+
+ pInfo->GetModuleAndTokenRegardlessOfKeyType(&pModule, &methodDef);
+ pShared->m_dwInternalFlags &= ~SharedReJitInfo::kStateMask;
+ pShared->m_dwInternalFlags |= SharedReJitInfo::kStateGettingReJITParameters;
+ pInfoToRejit = pInfo;
+ fNeedsParameters = TRUE;
+ break;
+
+ case SharedReJitInfo::kStateGettingReJITParameters:
+ if (pInfo->GetState() == ReJitInfo::kJumpNone)
+ {
+ // We haven't actually suspended threads and jump-stamped the
+ // method's prolog so just ignore this guy
+ INDEBUG(AssertRestOfEntriesAreReverted(iter, end));
+ return NULL;
+ }
+ pInfoToRejit = pInfo;
+ fWaitForParameters = TRUE;
+ break;
+
+ case SharedReJitInfo::kStateActive:
+ INDEBUG(AssertRestOfEntriesAreReverted(iter, end));
+ if (pInfo->GetState() == ReJitInfo::kJumpNone)
+ {
+ // We haven't actually suspended threads and jump-stamped the
+ // method's prolog so just ignore this guy
+ return NULL;
+ }
+ if (pInfo->GetState() == ReJitInfo::kJumpToRejittedCode)
+ {
+ // Looks like another thread has beat us in a race to rejit, so ignore.
+ return NULL;
+ }
+
+ // Found a ReJitInfo to actually rejit.
+ _ASSERTE(pInfo->GetState() == ReJitInfo::kJumpToPrestub);
+ pInfoToRejit = pInfo;
+ goto ExitLoop;
+
+ case SharedReJitInfo::kStateReverted:
+ // just ignore this guy
+ continue;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+ ExitLoop:
+ ;
+ }
+
+ if (pInfoToRejit == NULL)
+ {
+ // Didn't find the requested MD to rejit.
+ return NULL;
+ }
+
+ if (fNeedsParameters)
+ {
+ // Here's where we give a chance for the rejit requestor to
+ // examine and modify the IL & codegen flags before it gets to
+ // the JIT. This allows one to add probe calls for things like
+ // code coverage, performance, or whatever. These will be
+ // stored in pShared.
+ _ASSERTE(pModule != NULL);
+ _ASSERTE(methodDef != mdTokenNil);
+ ReleaseHolder<ProfilerFunctionControl> pFuncControl =
+ new (nothrow)ProfilerFunctionControl(pModule->GetLoaderAllocator()->GetLowFrequencyHeap());
+ HRESULT hr = S_OK;
+ if (pFuncControl == NULL)
+ {
+ hr = E_OUTOFMEMORY;
+ }
+ else
+ {
+ BEGIN_PIN_PROFILER(CORProfilerPresent());
+ hr = g_profControlBlock.pProfInterface->GetReJITParameters(
+ (ModuleID)pModule,
+ methodDef,
+ pFuncControl);
+ END_PIN_PROFILER();
+ }
+
+ if (FAILED(hr))
+ {
+ {
+ CrstHolder ch(&m_crstTable);
+ if (pInfoToRejit->m_pShared->m_dwInternalFlags == SharedReJitInfo::kStateGettingReJITParameters)
+ {
+ pInfoToRejit->m_pShared->m_dwInternalFlags &= ~SharedReJitInfo::kStateMask;
+ pInfoToRejit->m_pShared->m_dwInternalFlags |= SharedReJitInfo::kStateRequested;
+ }
+ }
+ ReportReJITError(pModule, methodDef, pMD, hr);
+ return NULL;
+ }
+
+ {
+ CrstHolder ch(&m_crstTable);
+ if (pInfoToRejit->m_pShared->m_dwInternalFlags == SharedReJitInfo::kStateGettingReJITParameters)
+ {
+ // Inside the above call to ICorProfilerCallback4::GetReJITParameters, the profiler
+ // will have used the specified pFuncControl to provide its IL and codegen flags.
+ // So now we transfer it out to the SharedReJitInfo.
+ pInfoToRejit->m_pShared->m_dwCodegenFlags = pFuncControl->GetCodegenFlags();
+ pInfoToRejit->m_pShared->m_pbIL = pFuncControl->GetIL();
+ // pShared is now the owner of the memory for the IL buffer
+ pInfoToRejit->m_pShared->m_instrumentedILMap.SetMappingInfo(pFuncControl->GetInstrumentedMapEntryCount(),
+ pFuncControl->GetInstrumentedMapEntries());
+ pInfoToRejit->m_pShared->m_dwInternalFlags &= ~SharedReJitInfo::kStateMask;
+ pInfoToRejit->m_pShared->m_dwInternalFlags |= SharedReJitInfo::kStateActive;
+ _ASSERTE(pInfoToRejit->m_pCode == NULL);
+ _ASSERTE(pInfoToRejit->GetState() == ReJitInfo::kJumpToPrestub);
+ }
+ }
+ }
+ else if (fWaitForParameters)
+ {
+ // This feels lame, but it doesn't appear like we have the good threading primitves
+ // for this. What I would like is an AutoResetEvent that atomically exits the table
+ // Crst when I wait on it. From what I can tell our AutoResetEvent doesn't have
+ // that atomic transition which means this ordering could occur:
+ // [Thread 1] detect kStateGettingParameters and exit table lock
+ // [Thread 2] enter table lock, transition kStateGettingParameters -> kStateActive
+ // [Thread 2] signal AutoResetEvent
+ // [Thread 2] exit table lock
+ // [Thread 1] wait on AutoResetEvent (which may never be signaled again)
+ //
+ // Another option would be ManualResetEvents, one for each SharedReJitInfo, but
+ // that feels like a lot of memory overhead to handle a case which occurs rarely.
+ // A third option would be dynamically creating ManualResetEvents in a side
+ // dictionary on demand, but that feels like a lot of complexity for an event
+ // that occurs rarely.
+ //
+ // I just ended up with this simple polling loop. Assuming profiler
+ // writers implement GetReJITParameters performantly we will only iterate
+ // this loop once, and even then only in the rare case of threads racing
+ // to JIT the same IL. If this really winds up causing performance issues
+ // We can build something more sophisticated.
+ while (true)
+ {
+ {
+ CrstHolder ch(&m_crstTable);
+ if (pInfoToRejit->m_pShared->GetState() == SharedReJitInfo::kStateActive)
+ {
+ break; // the other thread got the parameters succesfully, go race to rejit
+ }
+ else if (pInfoToRejit->m_pShared->GetState() == SharedReJitInfo::kStateRequested)
+ {
+ return NULL; // the other thread had an error getting parameters and went
+ // back to requested
+ }
+ else if (pInfoToRejit->m_pShared->GetState() == SharedReJitInfo::kStateReverted)
+ {
+ break; // we got reverted, enter DoReJit anyways and it will detect this and
+ // bail out.
+ }
+ }
+ ClrSleepEx(1, FALSE);
+ }
+ }
+
+ // We've got the info from the profiler, so JIT the method. This is also
+ // responsible for updating the jump target from the prestub to the newly
+ // rejitted code AND for publishing the top of the newly rejitted code to
+ // pInfoToRejit->m_pCode. If two threads race to rejit, DoReJit handles the
+ // race, and ensures the winner publishes his result to pInfoToRejit->m_pCode.
+ return DoReJit(pInfoToRejit);
+
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Called by DoReJitIfNecessaryWorker(), this function assumes the IL & codegen flags have
+// already been gathered from the profiler, and then calls UnsafeJitFunction to perform
+// the re-JIT (bracketing that with profiler callbacks to announce the start/finish of
+// the rejit).
+//
+// This is also responsible for handling any races between multiple threads
+// simultaneously rejitting a function. See the comment at the top of
+// code:ReJitManager::DoReJitIfNecessaryWorker for details.
+//
+// Arguments:
+// pInfo - ReJitInfo tracking this MethodDesc's rejit request
+//
+// Return Value:
+// * Generally, return the PCODE of the start of the rejitted code. However,
+// depending on the result of races determined by DoReJit(), the return value
+// can be different:
+// * If the current thread races with another thread to do the rejit, return the
+// PCODE generated by the winner.
+// * If the current thread races with another thread doing a revert, and the revert
+// wins, then return the PCODE of the start of the originally JITted code
+// (i.e., pInfo->GetMethodDesc()->GetNativeCode())
+//
+
+PCODE ReJitManager::DoReJit(ReJitInfo * pInfo)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef PROFILING_SUPPORTED
+
+ INDEBUG(Dump("Inside DoRejit(). Dumping this ReJitManager\n"));
+
+ _ASSERTE(!pInfo->GetMethodDesc()->IsNoMetadata());
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
+ g_profControlBlock.pProfInterface->ReJITCompilationStarted((FunctionID)pInfo->GetMethodDesc(),
+ pInfo->m_pShared->GetId(),
+ TRUE);
+ END_PIN_PROFILER();
+ }
+
+ COR_ILMETHOD_DECODER ILHeader(pInfo->GetIL(), pInfo->GetMethodDesc()->GetMDImport(), NULL);
+ PCODE pCodeOfRejittedCode = NULL;
+
+ // Note that we're intentionally not enclosing UnsafeJitFunction in a try block
+ // to swallow exceptions. It's expected that any exception thrown is fatal and
+ // should pass through. This is in contrast to MethodDesc::MakeJitWorker, which
+ // does enclose UnsafeJitFunction in a try block, and attempts to swallow an
+ // exception that occurs on the current thread when another thread has
+ // simultaneously attempted (and provably succeeded in) the JITting of the same
+ // function. This is a very unusual case (likely due to an out of memory error
+ // encountered on the current thread and not on the competing thread), which is
+ // not worth attempting to cover.
+ pCodeOfRejittedCode = UnsafeJitFunction(
+ pInfo->GetMethodDesc(),
+ &ILHeader,
+ JitFlagsFromProfCodegenFlags(pInfo->m_pShared->m_dwCodegenFlags),
+ 0);
+
+ _ASSERTE(pCodeOfRejittedCode != NULL);
+
+ // This atomically updates the jmp target (from prestub to top of rejitted code) and publishes
+ // the top of rejitted code into pInfo, all inside the same acquisition of this
+ // ReJitManager's table Crst.
+ HRESULT hr = S_OK;
+ BOOL fEESuspended = FALSE;
+ BOOL fNotify = FALSE;
+ PCODE ret = NULL;
+ while (true)
+ {
+ if (fEESuspended)
+ {
+ ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_FOR_REJIT);
+ }
+ CrstHolder ch(&m_crstTable);
+
+ // Now that we're under the lock, recheck whether pInfo->m_pCode has been filled
+ // in...
+ if (pInfo->m_pCode != NULL)
+ {
+ // Yup, another thread rejitted this request at the same time as us, and beat
+ // us to publishing the result. Intentionally skip the rest of this, and do
+ // not issue a ReJITCompilationFinished from this thread.
+ ret = pInfo->m_pCode;
+ break;
+ }
+
+ // BUGBUG: This revert check below appears to introduce behavior we probably don't want.
+ // This is a pre-existing issue and I don't have time to create a test for this right now,
+ // but wanted to capture the issue in a comment for future work.
+ // Imagine the profiler has one thread which is calling RequestReJIT periodically
+ // updating the method's IL:
+ // 1) RequestReJit (table lock keeps these atomic)
+ // 1.1) Revert old shared rejit info
+ // 1.2) Create new shared rejit info
+ // 2) RequestReJit (table lock keeps these atomic)
+ // 2.1) Revert old shared rejit info
+ // 2.2) Create new shared rejit info
+ // ...
+ // On a second thread we keep calling the method which needs to periodically rejit
+ // to update to the newest version:
+ // a) [DoReJitIfNecessaryWorker] detects active rejit request
+ // b) [DoReJit] if shared rejit info is reverted, execute original method code.
+ //
+ // Because (a) and (b) are not under the same lock acquisition this ordering is possible:
+ // (1), (a), (2), (b)
+ // The result is that (b) sees the shared rejit is reverted and the method executes its
+ // original code. As a profiler using rejit I would expect either the IL specified in
+ // (1) or the IL specified in (2) would be used, but never the original IL.
+ //
+ // I think the correct behavior is to bind a method execution to the current rejit
+ // version at some point, and from then on we guarantee to execute that version of the
+ // code, regardless of reverts or re-rejit request.
+ //
+ // There is also a related issue with GetCurrentReJitFlagsWorker which assumes jitting
+ // always corresponds to the most recent version of the method. If we start pinning
+ // method invocations to particular versions then that method can't be allowed to
+ // float forward to the newest version, nor can it abort if the most recent version
+ // is reverted.
+ // END BUGBUG
+ //
+ // And recheck whether some other thread tried to revert this method in the
+ // meantime (this check would also include an attempt to re-rejit the method
+ // (i.e., calling RequestReJIT on the method multiple times), which would revert
+ // this pInfo before creating a new one to track the latest rejit request).
+ if (pInfo->m_pShared->GetState() == SharedReJitInfo::kStateReverted)
+ {
+ // Yes, we've been reverted, so the jmp-to-prestub has already been removed,
+ // and we should certainly not attempt to redirect that nonexistent jmp to
+ // the code we just rejitted
+ _ASSERTE(pInfo->GetMethodDesc()->GetNativeCode() != NULL);
+ ret = pInfo->GetMethodDesc()->GetNativeCode();
+ break;
+ }
+
+#ifdef DEBUGGING_SUPPORTED
+ // Notify the debugger of the rejitted function, so it can generate
+ // DebuggerMethodInfo / DebugJitInfo for it. Normally this is done inside
+ // UnsafeJitFunction (via CallCompileMethodWithSEHWrapper), but it skips this
+ // when it detects the MethodDesc was already jitted. Since we know here that
+ // we're rejitting it (and this is not just some sort of multi-thread JIT race),
+ // now is a good place to notify the debugger.
+ if (g_pDebugInterface != NULL)
+ {
+ g_pDebugInterface->JITComplete(pInfo->GetMethodDesc(), pCodeOfRejittedCode);
+ }
+
+#endif // DEBUGGING_SUPPORTED
+
+ _ASSERTE(pInfo->m_pShared->GetState() == SharedReJitInfo::kStateActive);
+ _ASSERTE(pInfo->GetState() == ReJitInfo::kJumpToPrestub);
+
+ // Atomically publish the PCODE and update the jmp stamp (to go to the rejitted
+ // code) under the lock
+ hr = pInfo->UpdateJumpTarget(fEESuspended, pCodeOfRejittedCode);
+ if (hr == CORPROF_E_RUNTIME_SUSPEND_REQUIRED)
+ {
+ _ASSERTE(!fEESuspended);
+ fEESuspended = TRUE;
+ continue;
+ }
+ if (FAILED(hr))
+ {
+ break;
+ }
+ pInfo->m_pCode = pCodeOfRejittedCode;
+ fNotify = TRUE;
+ ret = pCodeOfRejittedCode;
+
+ _ASSERTE(pInfo->m_pShared->GetState() == SharedReJitInfo::kStateActive);
+ _ASSERTE(pInfo->GetState() == ReJitInfo::kJumpToRejittedCode);
+ break;
+ }
+
+ if (fEESuspended)
+ {
+ ThreadSuspend::RestartEE(FALSE /* bFinishedGC */, TRUE /* SuspendSucceded */);
+ fEESuspended = FALSE;
+ }
+
+ if (FAILED(hr))
+ {
+ Module* pModule = NULL;
+ mdMethodDef methodDef = mdTokenNil;
+ pInfo->GetModuleAndTokenRegardlessOfKeyType(&pModule, &methodDef);
+ ReportReJITError(pModule, methodDef, pInfo->GetMethodDesc(), hr);
+ }
+
+ // Notify the profiler that JIT completed.
+ if (fNotify)
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
+ g_profControlBlock.pProfInterface->ReJITCompilationFinished((FunctionID)pInfo->GetMethodDesc(),
+ pInfo->m_pShared->GetId(),
+ S_OK,
+ TRUE);
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ // Fire relevant ETW events
+ if (fNotify)
+ {
+ ETW::MethodLog::MethodJitted(
+ pInfo->GetMethodDesc(),
+ NULL, // namespaceOrClassName
+ NULL, // methodName
+ NULL, // methodSignature
+ pCodeOfRejittedCode,
+ pInfo->m_pShared->GetId());
+ }
+ return ret;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Transition SharedReJitInfo to Reverted state and add all associated ReJitInfos to the
+// undo list in the method batch
+//
+// Arguments:
+// pShared - SharedReJitInfo to revert
+// pJumpStampBatch - a batch of methods that need their jump stamps reverted. This method
+// is responsible for adding additional ReJitInfos to the list.
+//
+// Return Value:
+// S_OK if all MDs are batched and the SharedReJitInfo is marked reverted
+// E_OUTOFMEMORY (MDs couldn't be added to batch, SharedReJitInfo is not reverted)
+//
+// Assumptions:
+// Caller must be holding this ReJitManager's table crst.
+//
+
+HRESULT ReJitManager::Revert(SharedReJitInfo * pShared, ReJitManagerJumpStampBatch* pJumpStampBatch)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_crstTable.OwnedByCurrentThread());
+ _ASSERTE((pShared->GetState() == SharedReJitInfo::kStateRequested) ||
+ (pShared->GetState() == SharedReJitInfo::kStateGettingReJITParameters) ||
+ (pShared->GetState() == SharedReJitInfo::kStateActive));
+ _ASSERTE(pShared->GetMethods() != NULL);
+ _ASSERTE(pJumpStampBatch->pReJitManager == this);
+
+ HRESULT hrReturn = S_OK;
+ for (ReJitInfo * pInfo = pShared->GetMethods(); pInfo != NULL; pInfo = pInfo->m_pNext)
+ {
+ if (pInfo->GetState() == ReJitInfo::kJumpNone)
+ {
+ // Nothing to revert for this MethodDesc / instantiation.
+ continue;
+ }
+
+ ReJitInfo** ppInfo = pJumpStampBatch->undoMethods.Append();
+ if (ppInfo == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ *ppInfo = pInfo;
+ }
+
+ pShared->m_dwInternalFlags &= ~SharedReJitInfo::kStateMask;
+ pShared->m_dwInternalFlags |= SharedReJitInfo::kStateReverted;
+ return S_OK;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Removes any ReJitInfos relating to MDs for the specified AppDomain from this
+// ReJitManager. This is used to remove non-domain-neutral instantiations of
+// domain-neutral generics from the SharedDomain's ReJitManager, when the AppDomain
+// containing those non-domain-neutral instantiations is unloaded.
+//
+// Arguments:
+// * pAppDomain - AppDomain that is exiting, and is thus the one for which we should
+// find ReJitInfos to remove
+//
+//
+
+void ReJitManager::RemoveReJitInfosFromDomain(AppDomain * pAppDomain)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CAN_TAKE_LOCK;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ CrstHolder ch(&m_crstTable);
+
+ INDEBUG(Dump("Dumping SharedDomain rejit manager BEFORE AD Unload"));
+
+ for (ReJitInfoHash::Iterator iterCur = m_table.Begin(), iterEnd = m_table.End();
+ iterCur != iterEnd;
+ iterCur++)
+ {
+ ReJitInfo * pInfo = *iterCur;
+
+ if (pInfo->m_key.m_keyType != ReJitInfo::Key::kMethodDesc)
+ {
+ // Skip all "placeholder" ReJitInfos--they'll always be allocated on a
+ // loader heap for the shared domain.
+ _ASSERTE(pInfo->m_key.m_keyType == ReJitInfo::Key::kMetadataToken);
+ _ASSERTE(PTR_Module(pInfo->m_key.m_pModule)->GetDomain()->IsSharedDomain());
+ continue;
+ }
+
+ if (pInfo->GetMethodDesc()->GetDomain() != pAppDomain)
+ {
+ // We only care about non-domain-neutral instantiations that live in
+ // pAppDomain.
+ continue;
+ }
+
+ // Remove this ReJitInfo from the linked-list of ReJitInfos associated with its
+ // SharedReJitInfo.
+ pInfo->m_pShared->RemoveMethod(pInfo);
+
+ // Remove this ReJitInfo from the ReJitManager's hash table.
+ m_table.Remove(iterCur);
+
+ // pInfo is not deallocated yet. That will happen when pAppDomain finishes
+ // unloading and its loader heaps get freed.
+ }
+ INDEBUG(Dump("Dumping SharedDomain rejit manager AFTER AD Unload"));
+}
+
+#endif // DACCESS_COMPILE
+// The rest of the ReJitManager methods are safe to compile for DAC
+
+
+//---------------------------------------------------------------------------------------
+//
+// Helper to iterate through m_table, finding the single matching non-reverted ReJitInfo.
+// The caller may search either by MethodDesc * XOR by (Module *, methodDef) pair.
+//
+// Arguments:
+// * pMD - MethodDesc * to search for. (NULL if caller is searching by (Module *,
+// methodDef)
+// * pModule - Module * to search for. (NULL if caller is searching by MethodDesc *)
+// * methodDef - methodDef to search for. (NULL if caller is searching by MethodDesc
+// *)
+//
+// Return Value:
+// ReJitInfo * requested, or NULL if none is found
+//
+// Assumptions:
+// Caller should be holding this ReJitManager's table crst.
+//
+
+PTR_ReJitInfo ReJitManager::FindNonRevertedReJitInfoHelper(
+ PTR_MethodDesc pMD,
+ PTR_Module pModule,
+ mdMethodDef methodDef)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ INSTANCE_CHECK;
+ }
+ CONTRACTL_END;
+
+ // Either pMD is valid, xor (pModule,methodDef) is valid
+ _ASSERTE(
+ ((pMD != NULL) && (pModule == NULL) && (methodDef == mdTokenNil)) ||
+ ((pMD == NULL) && (pModule != NULL) && (methodDef != mdTokenNil)));
+
+ // Caller should hold the Crst around calling this function and using the ReJitInfo.
+#ifndef DACCESS_COMPILE
+ _ASSERTE(m_crstTable.OwnedByCurrentThread());
+#endif
+
+ ReJitInfoHash::KeyIterator beginIter(&m_table, TRUE /* begin */);
+ ReJitInfoHash::KeyIterator endIter(&m_table, FALSE /* begin */);
+
+ if (pMD != NULL)
+ {
+ beginIter = GetBeginIterator(pMD);
+ endIter = GetEndIterator(pMD);
+ }
+ else
+ {
+ beginIter = GetBeginIterator(pModule, methodDef);
+ endIter = GetEndIterator(pModule, methodDef);
+ }
+
+ for (ReJitInfoHash::KeyIterator iter = beginIter;
+ iter != endIter;
+ iter++)
+ {
+ PTR_ReJitInfo pInfo = *iter;
+ _ASSERTE(pInfo->m_pShared != NULL);
+
+ if (pInfo->m_pShared->GetState() == SharedReJitInfo::kStateReverted)
+ continue;
+
+ INDEBUG(AssertRestOfEntriesAreReverted(iter, endIter));
+ return pInfo;
+ }
+
+ return NULL;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// ReJitManager instance constructor--for now, does nothing
+//
+
+ReJitManager::ReJitManager()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Called from BaseDomain::BaseDomain to do any constructor-time initialization.
+// Presently, this takes care of initializing the Crst, choosing the type based on
+// whether this ReJitManager belongs to the SharedDomain.
+//
+// Arguments:
+// * fSharedDomain - nonzero iff this ReJitManager belongs to the SharedDomain.
+//
+
+void ReJitManager::PreInit(BOOL fSharedDomain)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ CAN_TAKE_LOCK;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+ m_crstTable.Init(
+ fSharedDomain ? CrstReJITSharedDomainTable : CrstReJITDomainTable,
+ CrstFlags(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD | CRST_REENTRANCY | CRST_TAKEN_DURING_SHUTDOWN));
+#endif // DACCESS_COMPILE
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Finds the ReJitInfo tracking a pre-rejit request.
+//
+// Arguments:
+// * beginIter - Iterator to start search
+// * endIter - Iterator to end search
+//
+// Return Value:
+// NULL if no such ReJitInfo exists. This can occur if two thread race
+// to JIT the original code and we're the loser. Else, the ReJitInfo * found.
+//
+// Assumptions:
+// Caller must be holding this ReJitManager's table lock.
+//
+
+ReJitInfo * ReJitManager::FindPreReJittedReJitInfo(
+ ReJitInfoHash::KeyIterator beginIter,
+ ReJitInfoHash::KeyIterator endIter)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Caller shouldn't be handing out iterators unless he's already locking the table.
+#ifndef DACCESS_COMPILE
+ _ASSERTE(m_crstTable.OwnedByCurrentThread());
+#endif
+
+ for (ReJitInfoHash::KeyIterator iter = beginIter;
+ iter != endIter;
+ iter++)
+ {
+ ReJitInfo * pInfo = *iter;
+ SharedReJitInfo * pShared = pInfo->m_pShared;
+ _ASSERTE(pShared != NULL);
+
+ switch (pShared->GetState())
+ {
+ case SharedReJitInfo::kStateRequested:
+ case SharedReJitInfo::kStateGettingReJITParameters:
+ case SharedReJitInfo::kStateActive:
+ if (pInfo->GetState() == ReJitInfo::kJumpToRejittedCode)
+ {
+ // There was a race for the original JIT, and we're the loser. (The winner
+ // has already published the original JIT's pcode, jump-stamped, and begun
+ // the rejit!)
+ return NULL;
+ }
+
+ // Otherwise, either we have a rejit request that has not yet been
+ // jump-stamped, or there was a race for the original JIT, and another
+ // thread jump-stamped its copy of the originally JITted code already. In
+ // that case, we still don't know who the winner or loser will be (PCODE may
+ // not yet be published), so we'll have to jump-stamp our copy just in case
+ // we win.
+ _ASSERTE((pInfo->GetState() == ReJitInfo::kJumpNone) ||
+ (pInfo->GetState() == ReJitInfo::kJumpToPrestub));
+ INDEBUG(AssertRestOfEntriesAreReverted(iter, endIter));
+ return pInfo;
+
+
+ case SharedReJitInfo::kStateReverted:
+ // just ignore this guy
+ continue;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ return NULL;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Used by profiler to get the ReJITID corrseponding to a (MethodDesc *, PCODE) pair.
+// Can also be used to determine whether (MethodDesc *, PCODE) corresponds to a rejit
+// (vs. a regular JIT) for the purposes of deciding whether to notify the debugger about
+// the rejit (and building the debugger JIT info structure).
+//
+// Arguments:
+// * pMD - MethodDesc * of interestg
+// * pCodeStart - PCODE of the particular interesting JITting of that MethodDesc *
+//
+// Return Value:
+// 0 if no such ReJITID found (e.g., PCODE is from a JIT and not a rejit), else the
+// ReJITID requested.
+//
+
+ReJITID ReJitManager::GetReJitId(PTR_MethodDesc pMD, PCODE pCodeStart)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ CAN_TAKE_LOCK;
+ GC_TRIGGERS;
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(pCodeStart != NULL);
+ }
+ CONTRACTL_END;
+
+ // Fast-path: If the rejit map is empty, no need to look up anything. Do this outside
+ // of a lock to impact our caller (the prestub worker) as little as possible. If the
+ // map is nonempty, we'll acquire the lock at that point and do the lookup for real.
+ if (m_table.GetCount() == 0)
+ {
+ return 0;
+ }
+
+ CrstHolder ch(&m_crstTable);
+
+ return GetReJitIdNoLock(pMD, pCodeStart);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// See comment above code:ReJitManager::GetReJitId for main details of what this does.
+//
+// This function is basically the same as GetReJitId, except caller is expected to take
+// the ReJitManager lock directly (via ReJitManager::TableLockHolder). This exists so
+// that ETW can explicitly take the triggering ReJitManager lock up front, and in the
+// proper order, to avoid lock leveling issues, and triggering issues with other locks it
+// takes that are CRST_UNSAFE_ANYMODE
+//
+
+ReJITID ReJitManager::GetReJitIdNoLock(PTR_MethodDesc pMD, PCODE pCodeStart)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ CANNOT_TAKE_LOCK;
+ GC_NOTRIGGER;
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(pCodeStart != NULL);
+ }
+ CONTRACTL_END;
+
+ // Caller must ensure this lock is taken!
+ _ASSERTE(m_crstTable.OwnedByCurrentThread());
+
+ ReJitInfo * pInfo = FindReJitInfo(pMD, pCodeStart, 0);
+ if (pInfo == NULL)
+ {
+ return 0;
+ }
+
+ _ASSERTE(pInfo->m_pShared->GetState() == SharedReJitInfo::kStateActive ||
+ pInfo->m_pShared->GetState() == SharedReJitInfo::kStateReverted);
+ return pInfo->m_pShared->GetId();
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Used by profilers to map a (MethodDesc *, ReJITID) pair to the corresponding PCODE for
+// that rejit attempt. This can also be used for reverted methods, as the PCODE may still
+// be available and in use even after a rejitted function has been reverted.
+//
+// Arguments:
+// * pMD - MethodDesc * of interest
+// * reJitId - ReJITID of interest
+//
+// Return Value:
+// Corresponding PCODE of the rejit attempt, or NULL if no such rejit attempt can be
+// found.
+//
+
+PCODE ReJitManager::GetCodeStart(PTR_MethodDesc pMD, ReJITID reJitId)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ CAN_TAKE_LOCK;
+ GC_NOTRIGGER;
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(reJitId != 0);
+ }
+ CONTRACTL_END;
+
+ // Fast-path: If the rejit map is empty, no need to look up anything. Do this outside
+ // of a lock to impact our caller (the prestub worker) as little as possible. If the
+ // map is nonempty, we'll acquire the lock at that point and do the lookup for real.
+ if (m_table.GetCount() == 0)
+ {
+ return NULL;
+ }
+
+ CrstHolder ch(&m_crstTable);
+
+ ReJitInfo * pInfo = FindReJitInfo(pMD, NULL, reJitId);
+ if (pInfo == NULL)
+ {
+ return NULL;
+ }
+
+ _ASSERTE(pInfo->m_pShared->GetState() == SharedReJitInfo::kStateActive ||
+ pInfo->m_pShared->GetState() == SharedReJitInfo::kStateReverted);
+
+ return pInfo->m_pCode;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// If a function has been requested to be rejitted, finds the one current
+// SharedReJitInfo (ignoring all that are in the reverted state) and returns the codegen
+// flags recorded on it (which were thus used to rejit the MD). CEEInfo::canInline() calls
+// this as part of its calculation of whether it may inline a given method. (Profilers
+// may specify on a per-rejit-request basis whether the rejit of a method may inline
+// callees.)
+//
+// Arguments:
+// * pMD - MethodDesc * of interest.
+//
+// Return Value:
+// Returns the requested codegen flags, or 0 (i.e., no flags set) if no rejit attempt
+// can be found for the MD.
+//
+
+DWORD ReJitManager::GetCurrentReJitFlagsWorker(PTR_MethodDesc pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ // Fast-path: If the rejit map is empty, no need to look up anything. Do this outside
+ // of a lock to impact our caller (e.g., the JIT asking if it can inline) as little as possible. If the
+ // map is nonempty, we'll acquire the lock at that point and do the lookup for real.
+ if (m_table.GetCount() == 0)
+ {
+ return 0;
+ }
+
+ CrstHolder ch(&m_crstTable);
+
+ for (ReJitInfoHash::KeyIterator iter = GetBeginIterator(pMD), end = GetEndIterator(pMD);
+ iter != end;
+ iter++)
+ {
+ ReJitInfo * pInfo = *iter;
+ _ASSERTE(pInfo->GetMethodDesc() == pMD);
+ _ASSERTE(pInfo->m_pShared != NULL);
+
+ DWORD dwState = pInfo->m_pShared->GetState();
+
+ if (dwState != SharedReJitInfo::kStateActive)
+ {
+ // Not active means we never asked profiler for the codegen flags OR the
+ // rejit request has been reverted. So this one is useless.
+ continue;
+ }
+
+ // Found it!
+#ifdef _DEBUG
+ // This must be the only such ReJitInfo for this MethodDesc. Check the rest and
+ // assert otherwise.
+ {
+ ReJitInfoHash::KeyIterator iterTest = iter;
+ iterTest++;
+
+ while(iterTest != end)
+ {
+ ReJitInfo * pInfoTest = *iterTest;
+ _ASSERTE(pInfoTest->GetMethodDesc() == pMD);
+ _ASSERTE(pInfoTest->m_pShared != NULL);
+
+ DWORD dwStateTest = pInfoTest->m_pShared->GetState();
+
+ if (dwStateTest == SharedReJitInfo::kStateActive)
+ {
+ _ASSERTE(!"Multiple active ReJitInfos for same MethodDesc");
+ break;
+ }
+ iterTest++;
+ }
+ }
+#endif //_DEBUG
+ return pInfo->m_pShared->m_dwCodegenFlags;
+ }
+
+ return 0;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Helper to find the matching ReJitInfo by methoddesc paired with either pCodeStart or
+// reJitId (exactly one should be non-zero, and will be used as the key for the lookup)
+//
+// Arguments:
+// * pMD - MethodDesc * to look up
+// * pCodeStart - PCODE of the particular rejit attempt to look up. NULL if looking
+// up by ReJITID.
+// * reJitId - ReJITID of the particular rejit attempt to look up. NULL if looking
+// up by PCODE.
+//
+// Return Value:
+// ReJitInfo * matching input parameters, or NULL if no such ReJitInfo could be
+// found.
+//
+// Assumptions:
+// Caller must be holding this ReJitManager's table lock.
+//
+
+PTR_ReJitInfo ReJitManager::FindReJitInfo(PTR_MethodDesc pMD, PCODE pCodeStart, ReJITID reJitId)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ // Caller should hold the Crst around calling this function and using the ReJitInfo.
+#ifndef DACCESS_COMPILE
+ _ASSERTE(m_crstTable.OwnedByCurrentThread());
+#endif
+
+ // One of these two keys should be used, but not both!
+ _ASSERTE(
+ ((pCodeStart != NULL) || (reJitId != 0)) &&
+ !((pCodeStart != NULL) && (reJitId != 0)));
+
+ for (ReJitInfoHash::KeyIterator iter = GetBeginIterator(pMD), end = GetEndIterator(pMD);
+ iter != end;
+ iter++)
+ {
+ PTR_ReJitInfo pInfo = *iter;
+ _ASSERTE(pInfo->GetMethodDesc() == pMD);
+ _ASSERTE(pInfo->m_pShared != NULL);
+
+ if ((pCodeStart != NULL && pInfo->m_pCode == pCodeStart) || // pCodeStart is key
+ (reJitId != 0 && pInfo->m_pShared->GetId() == reJitId)) // reJitId is key
+ {
+ return pInfo;
+ }
+ }
+
+ return NULL;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Called by profiler to retrieve an array of ReJITIDs corresponding to a MethodDesc *
+//
+// Arguments:
+// * pMD - MethodDesc * to look up
+// * cReJitIds - Element count capacity of reJitIds
+// * pcReJitIds - [out] Place total count of ReJITIDs found here; may be more than
+// cReJitIds if profiler passed an array that's too small to hold them all
+// * reJitIds - [out] Place ReJITIDs found here. Count of ReJITIDs returned here is
+// min(cReJitIds, *pcReJitIds)
+//
+// Return Value:
+// * S_OK: ReJITIDs successfully returned, array is big enough
+// * S_FALSE: ReJITIDs successfully found, but array was not big enough. Only
+// cReJitIds were returned and cReJitIds < *pcReJitId (latter being the total
+// number of ReJITIDs available).
+//
+
+HRESULT ReJitManager::GetReJITIDs(PTR_MethodDesc pMD, ULONG cReJitIds, ULONG * pcReJitIds, ReJITID reJitIds[])
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ CAN_TAKE_LOCK;
+ GC_NOTRIGGER;
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(pcReJitIds != NULL);
+ PRECONDITION(reJitIds != NULL);
+ }
+ CONTRACTL_END;
+
+ CrstHolder ch(&m_crstTable);
+
+ ULONG cnt = 0;
+
+ for (ReJitInfoHash::KeyIterator iter = GetBeginIterator(pMD), end = GetEndIterator(pMD);
+ iter != end;
+ iter++)
+ {
+ ReJitInfo * pInfo = *iter;
+ _ASSERTE(pInfo->GetMethodDesc() == pMD);
+ _ASSERTE(pInfo->m_pShared != NULL);
+
+ if (pInfo->m_pShared->GetState() == SharedReJitInfo::kStateActive ||
+ pInfo->m_pShared->GetState() == SharedReJitInfo::kStateReverted)
+ {
+ if (cnt < cReJitIds)
+ {
+ reJitIds[cnt] = pInfo->m_pShared->GetId();
+ }
+ ++cnt;
+
+ // no overflow
+ _ASSERTE(cnt != 0);
+ }
+ }
+ *pcReJitIds = cnt;
+
+ return (cnt > cReJitIds) ? S_FALSE : S_OK;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Helper that inits a new ReJitReportErrorWorkItem and adds it to the pErrors array
+//
+// Arguments:
+// * pModule - The module in the module/MethodDef identifier pair for the method which
+// had an error during rejit
+// * methodDef - The MethodDef in the module/MethodDef identifier pair for the method which
+// had an error during rejit
+// * pMD - If available, the specific method instance which had an error during rejit
+// * hrStatus - HRESULT for the rejit error that occured
+// * pErrors - the list of error records that this method will append to
+//
+// Return Value:
+// * S_OK: error was appended
+// * E_OUTOFMEMORY: Not enough memory to create the new error item. The array is unchanged.
+//
+
+//static
+HRESULT ReJitManager::AddReJITError(Module* pModule, mdMethodDef methodDef, MethodDesc* pMD, HRESULT hrStatus, CDynArray<ReJitReportErrorWorkItem> * pErrors)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ReJitReportErrorWorkItem* pError = pErrors->Append();
+ if (pError == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ pError->pModule = pModule;
+ pError->methodDef = methodDef;
+ pError->pMethodDesc = pMD;
+ pError->hrStatus = hrStatus;
+ return S_OK;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Helper that inits a new ReJitReportErrorWorkItem and adds it to the pErrors array
+//
+// Arguments:
+// * pReJitInfo - The method which had an error during rejit
+// * hrStatus - HRESULT for the rejit error that occured
+// * pErrors - the list of error records that this method will append to
+//
+// Return Value:
+// * S_OK: error was appended
+// * E_OUTOFMEMORY: Not enough memory to create the new error item. The array is unchanged.
+//
+
+//static
+HRESULT ReJitManager::AddReJITError(ReJitInfo* pReJitInfo, HRESULT hrStatus, CDynArray<ReJitReportErrorWorkItem> * pErrors)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Module * pModule = NULL;
+ mdMethodDef methodDef = mdTokenNil;
+ pReJitInfo->GetModuleAndTokenRegardlessOfKeyType(&pModule, &methodDef);
+ return AddReJITError(pModule, methodDef, pReJitInfo->GetMethodDesc(), hrStatus, pErrors);
+}
+
+#ifdef _DEBUG
+//---------------------------------------------------------------------------------------
+//
+// Debug-only helper used while iterating through the hash table of
+// ReJitInfos to verify that all entries between the specified iterators are
+// reverted. Asserts if it finds any non-reverted entries.
+//
+// Arguments:
+// * iter - Iterator to start verifying at
+// * end - Iterator to stop verifying at
+//
+//
+
+void ReJitManager::AssertRestOfEntriesAreReverted(
+ ReJitInfoHash::KeyIterator iter,
+ ReJitInfoHash::KeyIterator end)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // All other rejits should be in the reverted state
+ while (++iter != end)
+ {
+ _ASSERTE((*iter)->m_pShared->GetState() == SharedReJitInfo::kStateReverted);
+ }
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Debug-only helper to dump ReJitManager contents to stdout. Only used if
+// COMPLUS_ProfAPI_EnableRejitDiagnostics is set.
+//
+// Arguments:
+// * szIntroText - Intro text passed by caller to be output before this ReJitManager
+// is dumped.
+//
+//
+
+void ReJitManager::Dump(LPCSTR szIntroText)
+{
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ProfAPI_EnableRejitDiagnostics) == 0)
+ return;
+
+ printf(szIntroText);
+ fflush(stdout);
+
+ CrstHolder ch(&m_crstTable);
+
+ printf("BEGIN ReJitManager::Dump: 0x%p\n", this);
+
+ for (ReJitInfoHash::Iterator iterCur = m_table.Begin(), iterEnd = m_table.End();
+ iterCur != iterEnd;
+ iterCur++)
+ {
+ ReJitInfo * pInfo = *iterCur;
+ printf(
+ "\tInfo 0x%p: State=0x%x, Next=0x%p, Shared=%p, SharedState=0x%x\n",
+ pInfo,
+ pInfo->GetState(),
+ pInfo->m_pNext,
+ pInfo->m_pShared,
+ pInfo->m_pShared->GetState());
+
+ switch(pInfo->m_key.m_keyType)
+ {
+ case ReJitInfo::Key::kMethodDesc:
+ printf(
+ "\t\tMD=0x%p, %s.%s (%s)\n",
+ pInfo->GetMethodDesc(),
+ pInfo->GetMethodDesc()->m_pszDebugClassName,
+ pInfo->GetMethodDesc()->m_pszDebugMethodName,
+ pInfo->GetMethodDesc()->m_pszDebugMethodSignature);
+ break;
+
+ case ReJitInfo::Key::kMetadataToken:
+ Module * pModule;
+ mdMethodDef methodDef;
+ pInfo->GetModuleAndToken(&pModule, &methodDef);
+ printf(
+ "\t\tModule=0x%p, Token=0x%x\n",
+ pModule,
+ methodDef);
+ break;
+
+ case ReJitInfo::Key::kUninitialized:
+ printf("\t\tUNINITIALIZED\n");
+ break;
+
+ default:
+ _ASSERTE(!"Unrecognized pInfo key type");
+ }
+ fflush(stdout);
+ }
+ printf("END ReJitManager::Dump: 0x%p\n", this);
+ fflush(stdout);
+}
+
+#endif // _DEBUG
+
+//---------------------------------------------------------------------------------------
+// ReJitInfo implementation
+
+// All the state-changey stuff is kept up here in the !DACCESS_COMPILE block.
+// The more read-only inspection-y stuff follows the block.
+
+
+#ifndef DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+//
+// Do the actual work of stamping the top of originally-jitted-code with a jmp that goes
+// to the prestub. This can be called in one of three ways:
+// * Case 1: By RequestReJIT against an already-jitted function, in which case the
+// PCODE may be inferred by the MethodDesc, and our caller will have suspended
+// the EE for us, OR
+// * Case 2: By the prestub worker after jitting the original code of a function
+// (i.e., the "pre-rejit" scenario). In this case, the EE is not suspended. But
+// that's ok, because the PCODE has not yet been published to the MethodDesc, and
+// no thread can be executing inside the originally JITted function yet.
+// * Case 3: At type/method restore time for an NGEN'ed assembly. This is also the pre-rejit
+// scenario because we are guaranteed to do this before the code in the module
+// is executable. EE suspend is not required.
+//
+// Arguments:
+// * pCode - Case 1 (above): will be NULL, and we can infer the PCODE from the
+// MethodDesc; Case 2+3 (above, pre-rejit): will be non-NULL, and we'll need to use
+// this to find the code to stamp on top of.
+//
+// Return Value:
+// * S_OK: Either we successfully did the jmp-stamp, or a racing thread took care of
+// it for us.
+// * Else, HRESULT indicating failure.
+//
+// Assumptions:
+// The caller will have suspended the EE if necessary (case 1), before this is
+// called.
+//
+HRESULT ReJitInfo::JumpStampNativeCode(PCODE pCode /* = NULL */)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+
+ // It may seem dangerous to be stamping jumps over code while a GC is going on,
+ // but we're actually safe. As we assert below, either we're holding the thread
+ // store lock (and thus preventing a GC) OR we're stamping code that has not yet
+ // been published (and will thus not be executed by managed therads or examined
+ // by the GC).
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PCODE pCodePublished = GetMethodDesc()->GetNativeCode();
+
+ _ASSERTE((pCode != NULL) || (pCodePublished != NULL));
+ _ASSERTE(GetMethodDesc()->GetReJitManager()->IsTableCrstOwnedByCurrentThread());
+
+ HRESULT hr = S_OK;
+
+ // We'll jump-stamp over pCode, or if pCode is NULL, jump-stamp over the published
+ // code for this's MethodDesc.
+ LPBYTE pbCode = (LPBYTE) pCode;
+ if (pbCode == NULL)
+ {
+ // If caller didn't specify a pCode, just use the one that was published after
+ // the original JIT. (A specific pCode would be passed in the pre-rejit case,
+ // to jump-stamp the original code BEFORE the PCODE gets published.)
+ pbCode = (LPBYTE) pCodePublished;
+ }
+ _ASSERTE (pbCode != NULL);
+
+ // The debugging API may also try to write to the very top of this function (though
+ // with an int 3 for breakpoint purposes). Coordinate with the debugger so we know
+ // whether we can safely patch the actual code, or instead write to the debugger's
+ // buffer.
+ DebuggerController::ControllerLockHolder lockController;
+
+ // We could be in a race. Either two threads simultaneously JITting the same
+ // method for the first time or two threads restoring NGEN'ed code.
+ // Another thread may (or may not) have jump-stamped its copy of the code already
+ _ASSERTE((GetState() == kJumpNone) || (GetState() == kJumpToPrestub));
+
+ if (GetState() == kJumpToPrestub)
+ {
+ // The method has already been jump stamped so nothing left to do
+ _ASSERTE(CodeIsSaved());
+ return S_OK;
+ }
+
+ // Remember what we're stamping our jump on top of, so we can replace it during a
+ // revert.
+ for (int i = 0; i < sizeof(m_rgSavedCode); i++)
+ {
+ m_rgSavedCode[i] = *FirstCodeByteAddr(pbCode+i, DebuggerController::GetPatchTable()->GetPatch((CORDB_ADDRESS_TYPE *)(pbCode+i)));
+ }
+
+ EX_TRY
+ {
+ AllocMemTracker amt;
+
+ // This guy might throw on out-of-memory, so rely on the tracker to clean-up
+ Precode * pPrecode = Precode::Allocate(PRECODE_STUB, GetMethodDesc(), GetMethodDesc()->GetLoaderAllocator(), &amt);
+ PCODE target = pPrecode->GetEntryPoint();
+
+#if defined(_X86_) || defined(_AMD64_)
+
+ // Normal unpatched code never starts with a jump
+ // so make sure this code isn't already patched
+ _ASSERTE(*FirstCodeByteAddr(pbCode, DebuggerController::GetPatchTable()->GetPatch((CORDB_ADDRESS_TYPE *)pbCode)) != X86_INSTR_JMP_REL32);
+
+ INT64 i64OldCode = *(INT64*)pbCode;
+ INT64 i64NewCode = i64OldCode;
+ LPBYTE pbNewValue = (LPBYTE)&i64NewCode;
+ *pbNewValue = X86_INSTR_JMP_REL32;
+ INT32 UNALIGNED * pOffset = reinterpret_cast<INT32 UNALIGNED *>(&pbNewValue[1]);
+ // This will throw for out-of-memory, so don't write anything until
+ // after he succeeds
+ // This guy will leak/cache/reuse the jumpstub
+ *pOffset = rel32UsingJumpStub(reinterpret_cast<INT32 UNALIGNED *>(pbCode + 1), target, GetMethodDesc(), GetMethodDesc()->GetLoaderAllocator());
+
+ // If we have the EE suspended or the code is unpublished there won't be contention on this code
+ hr = UpdateJumpStampHelper(pbCode, i64OldCode, i64NewCode, FALSE);
+ if (FAILED(hr))
+ {
+ ThrowHR(hr);
+ }
+
+ //
+ // No failure point after this!
+ //
+ amt.SuppressRelease();
+
+#else // _X86_ || _AMD64_
+#error "Need to define a way to jump-stamp the prolog in a safe way for this platform"
+
+#endif // _X86_ || _AMD64_
+
+ m_dwInternalFlags &= ~kStateMask;
+ m_dwInternalFlags |= kJumpToPrestub;
+ }
+ EX_CATCH_HRESULT(hr);
+ _ASSERT(hr == S_OK || hr == E_OUTOFMEMORY);
+
+ if (SUCCEEDED(hr))
+ {
+ _ASSERTE(GetState() == kJumpToPrestub);
+ _ASSERTE(m_rgSavedCode[0] != 0); // saved code should not start with 0
+ }
+
+ return hr;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Poke the JITted code to satsify a revert request (or to perform an implicit revert as
+// part of a second, third, etc. rejit request). Reinstates the originally JITted code
+// that had been jump-stamped over to perform a prior rejit.
+//
+// Arguments
+// fEESuspended - TRUE if the caller keeps the EE suspended during this call
+//
+//
+// Return Value:
+// S_OK to indicate the revert succeeded,
+// CORPROF_E_RUNTIME_SUSPEND_REQUIRED to indicate the jumpstamp hasn't been reverted
+// and EE suspension will be needed for success
+// other failure HRESULT indicating what went wrong.
+//
+// Assumptions:
+// Caller must be holding the owning ReJitManager's table crst.
+//
+
+HRESULT ReJitInfo::UndoJumpStampNativeCode(BOOL fEESuspended)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(GetMethodDesc()->GetReJitManager()->IsTableCrstOwnedByCurrentThread());
+ _ASSERTE((m_pShared->GetState() == SharedReJitInfo::kStateReverted));
+ _ASSERTE((GetState() == kJumpToPrestub) || (GetState() == kJumpToRejittedCode));
+ _ASSERTE(m_rgSavedCode[0] != 0); // saved code should not start with 0 (see above test)
+
+ BYTE * pbCode = (BYTE*)GetMethodDesc()->GetNativeCode();
+ DebuggerController::ControllerLockHolder lockController;
+
+#if defined(_X86_) || defined(_AMD64_)
+ _ASSERTE(m_rgSavedCode[0] != X86_INSTR_JMP_REL32);
+ _ASSERTE(*FirstCodeByteAddr(pbCode, DebuggerController::GetPatchTable()->GetPatch((CORDB_ADDRESS_TYPE *)pbCode)) == X86_INSTR_JMP_REL32);
+#else
+#error "Need to define a way to jump-stamp the prolog in a safe way for this platform"
+#endif // _X86_ || _AMD64_
+
+ // For the interlocked compare, remember what pbCode is right now
+ INT64 i64OldValue = *(INT64 *)pbCode;
+ // Assemble the INT64 of the new code bytes to write. Start with what's there now
+ INT64 i64NewValue = i64OldValue;
+ memcpy(LPBYTE(&i64NewValue), m_rgSavedCode, sizeof(m_rgSavedCode));
+ HRESULT hr = UpdateJumpStampHelper(pbCode, i64OldValue, i64NewValue, !fEESuspended);
+ _ASSERTE(hr == S_OK || (hr == CORPROF_E_RUNTIME_SUSPEND_REQUIRED && !fEESuspended));
+ if (hr != S_OK)
+ return hr;
+
+ // Transition state of this ReJitInfo to indicate the MD no longer has any jump stamp
+ m_dwInternalFlags &= ~kStateMask;
+ m_dwInternalFlags |= kJumpNone;
+ return S_OK;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// After code has been rejitted, this is called to update the jump-stamp to go from
+// pointing to the prestub, to pointing to the newly rejitted code.
+//
+// Arguments:
+// fEESuspended - TRUE if the caller keeps the EE suspended during this call
+// pRejittedCode - jitted code for the updated IL this method should execute
+//
+// Assumptions:
+// This rejit manager's table crst should be held by the caller
+//
+// Returns - S_OK if the jump target is updated
+// CORPROF_E_RUNTIME_SUSPEND_REQUIRED if the ee isn't suspended and it
+// will need to be in order to do the update safely
+HRESULT ReJitInfo::UpdateJumpTarget(BOOL fEESuspended, PCODE pRejittedCode)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ MethodDesc * pMD = GetMethodDesc();
+ _ASSERTE(pMD->GetReJitManager()->IsTableCrstOwnedByCurrentThread());
+ _ASSERTE(m_pShared->GetState() == SharedReJitInfo::kStateActive);
+ _ASSERTE(GetState() == kJumpToPrestub);
+ _ASSERTE(m_pCode == NULL);
+
+ // Beginning of originally JITted code containing the jmp that we will redirect.
+ BYTE * pbCode = (BYTE*)pMD->GetNativeCode();
+
+#if defined(_X86_) || defined(_AMD64_)
+
+ HRESULT hr = S_OK;
+ {
+ DebuggerController::ControllerLockHolder lockController;
+
+ // This will throw for out-of-memory, so don't write anything until
+ // after he succeeds
+ // This guy will leak/cache/reuse the jumpstub
+ INT32 offset = 0;
+ EX_TRY
+ {
+ offset = rel32UsingJumpStub(
+ reinterpret_cast<INT32 UNALIGNED *>(&pbCode[1]), // base of offset
+ pRejittedCode, // target of jump
+ pMD,
+ pMD->GetLoaderAllocator());
+ }
+ EX_CATCH_HRESULT(hr);
+ _ASSERT(hr == S_OK || hr == E_OUTOFMEMORY);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+ // For validation later, remember what pbCode is right now
+ INT64 i64OldValue = *(INT64 *)pbCode;
+
+ // Assemble the INT64 of the new code bytes to write. Start with what's there now
+ INT64 i64NewValue = i64OldValue;
+ LPBYTE pbNewValue = (LPBYTE)&i64NewValue;
+
+ // First byte becomes a rel32 jmp instruction (should be a no-op as asserted
+ // above, but can't hurt)
+ *pbNewValue = X86_INSTR_JMP_REL32;
+ // Next 4 bytes are the jmp target (offset to jmp stub)
+ INT32 UNALIGNED * pnOffset = reinterpret_cast<INT32 UNALIGNED *>(&pbNewValue[1]);
+ *pnOffset = offset;
+
+ hr = UpdateJumpStampHelper(pbCode, i64OldValue, i64NewValue, !fEESuspended);
+ _ASSERTE(hr == S_OK || (hr == CORPROF_E_RUNTIME_SUSPEND_REQUIRED && !fEESuspended));
+ }
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+#else // _X86_ || _AMD64_
+#error "Need to define a way to jump-stamp the prolog in a safe way for this platform"
+#endif // _X86_ || _AMD64_
+
+ // State transition
+ m_dwInternalFlags &= ~kStateMask;
+ m_dwInternalFlags |= kJumpToRejittedCode;
+ return S_OK;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// This is called to modify the jump-stamp area, the first ReJitInfo::JumpStubSize bytes
+// in the method's code.
+//
+// Notes:
+// Callers use this method in a variety of circumstances:
+// a) when the code is unpublished (fContentionPossible == FALSE)
+// b) when the caller has taken the ThreadStoreLock and suspended the EE
+// (fContentionPossible == FALSE)
+// c) when the code is published, the EE isn't suspended, and the jumpstamp
+// area consists of a single 5 byte long jump instruction
+// (fContentionPossible == TRUE)
+// This method will attempt to alter the jump-stamp even if the caller has not prevented
+// contention, but there is no guarantee it will be succesful. When the caller has prevented
+// contention, then success is assured. Callers may oportunistically try without
+// EE suspension, and then upgrade to EE suspension if the first attempt fails.
+//
+// Assumptions:
+// This rejit manager's table crst should be held by the caller or fContentionPossible==FALSE
+// The debugger patch table lock should be held by the caller
+//
+// Arguments:
+// pbCode - pointer to the code where the jump stamp is placed
+// i64OldValue - the bytes which should currently be at the start of the method code
+// i64NewValue - the new bytes which should be written at the start of the method code
+// fContentionPossible - See the Notes section above.
+//
+// Returns:
+// S_OK => the jumpstamp has been succesfully updated.
+// CORPROF_E_RUNTIME_SUSPEND_REQUIRED => the jumpstamp remains unchanged (preventing contention will be necessary)
+// other failing HR => VirtualProtect failed, the jumpstamp remains unchanged
+//
+HRESULT ReJitInfo::UpdateJumpStampHelper(BYTE* pbCode, INT64 i64OldValue, INT64 i64NewValue, BOOL fContentionPossible)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodDesc * pMD = GetMethodDesc();
+ _ASSERTE(pMD->GetReJitManager()->IsTableCrstOwnedByCurrentThread() || !fContentionPossible);
+
+ // When ReJIT is enabled, method entrypoints are always at least 8-byte aligned (see
+ // code:EEJitManager::allocCode), so we can do a single 64-bit interlocked operation
+ // to update the jump target. However, some code may have gotten compiled before
+ // the profiler had a chance to enable ReJIT (e.g., NGENd code, or code JITted
+ // before a profiler attaches). In such cases, we cannot rely on a simple
+ // interlocked operation, and instead must suspend the runtime to ensure we can
+ // safely update the jmp instruction.
+ //
+ // This method doesn't verify that the method is actually safe to rejit, we expect
+ // callers to do that. At the moment NGEN'ed code is safe to rejit even if
+ // it is unaligned, but code generated before the profiler attaches is not.
+ if (fContentionPossible && !(IS_ALIGNED(pbCode, sizeof(INT64))))
+ {
+ return CORPROF_E_RUNTIME_SUSPEND_REQUIRED;
+ }
+
+ // The debugging API may also try to write to this function (though
+ // with an int 3 for breakpoint purposes). Coordinate with the debugger so we know
+ // whether we can safely patch the actual code, or instead write to the debugger's
+ // buffer.
+ if (fContentionPossible)
+ {
+ for (CORDB_ADDRESS_TYPE* pbProbeAddr = pbCode; pbProbeAddr < pbCode + ReJitInfo::JumpStubSize; pbProbeAddr++)
+ {
+ if (NULL != DebuggerController::GetPatchTable()->GetPatch(pbProbeAddr))
+ {
+ return CORPROF_E_RUNTIME_SUSPEND_REQUIRED;
+ }
+ }
+ }
+
+#if defined(_X86_) || defined(_AMD64_)
+
+ DWORD oldProt;
+ if (!ClrVirtualProtect((LPVOID)pbCode, 8, PAGE_EXECUTE_READWRITE, &oldProt))
+ {
+ return HRESULT_FROM_WIN32(GetLastError());
+ }
+
+ if (fContentionPossible)
+ {
+ INT64 i64InterlockReportedOldValue = FastInterlockCompareExchangeLong((INT64 *)pbCode, i64NewValue, i64OldValue);
+ // Since changes to these bytes are protected by this rejitmgr's m_crstTable, we
+ // shouldn't have two writers conflicting.
+ _ASSERTE(i64InterlockReportedOldValue == i64OldValue);
+ }
+ else
+ {
+ // In this path the caller ensures:
+ // a) no thread will execute through the prologue area we are modifying
+ // b) no thread is stopped in a prologue such that it resumes in the middle of code we are modifying
+ // c) no thread is doing a debugger patch skip operation in which an unmodified copy of the method's
+ // code could be executed from a patch skip buffer.
+
+ // PERF: we might still want a faster path through here if we aren't debugging that doesn't do
+ // all the patch checks
+ for (int i = 0; i < ReJitInfo::JumpStubSize; i++)
+ {
+ *FirstCodeByteAddr(pbCode+i, DebuggerController::GetPatchTable()->GetPatch(pbCode+i)) = ((BYTE*)&i64NewValue)[i];
+ }
+ }
+
+ if (oldProt != PAGE_EXECUTE_READWRITE)
+ {
+ // The CLR codebase in many locations simply ignores failures to restore the page protections
+ // Its true that it isn't a problem functionally, but it seems a bit sketchy?
+ // I am following the convention for now.
+ ClrVirtualProtect((LPVOID)pbCode, 8, oldProt, &oldProt);
+ }
+
+ FlushInstructionCache(GetCurrentProcess(), pbCode, ReJitInfo::JumpStubSize);
+ return S_OK;
+
+#else // _X86_ || _AMD64_
+#error "Need to define a way to jump-stamp the prolog in a safe way for this platform"
+#endif // _X86_ || _AMD64_
+}
+
+
+#endif // DACCESS_COMPILE
+// The rest of the ReJitInfo methods are safe to compile for DAC
+
+
+
+//---------------------------------------------------------------------------------------
+//
+// ReJitInfos can be constructed in two ways: As a "regular" ReJitInfo indexed by
+// MethodDesc *, or as a "placeholder" ReJitInfo (to satisfy pre-rejit requests) indexed
+// by (Module *, methodDef). Both constructors call this helper to do all the common
+// code for initializing the ReJitInfo.
+//
+
+void ReJitInfo::CommonInit()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_pCode = NULL;
+ m_pNext = NULL;
+ m_dwInternalFlags = kJumpNone;
+ m_pShared->AddMethod(this);
+ ZeroMemory(m_rgSavedCode, sizeof(m_rgSavedCode));
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Regardless of which kind of ReJitInfo this is, this will always return its
+// corresponding Module * & methodDef
+//
+// Arguments:
+// * ppModule - [out] Module * related to this ReJitInfo (which contains the
+// returned methodDef)
+// * pMethodDef - [out] methodDef related to this ReJitInfo
+//
+
+void ReJitInfo::GetModuleAndTokenRegardlessOfKeyType(Module ** ppModule, mdMethodDef * pMethodDef)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(ppModule != NULL);
+ _ASSERTE(pMethodDef != NULL);
+
+ if (m_key.m_keyType == Key::kMetadataToken)
+ {
+ GetModuleAndToken(ppModule, pMethodDef);
+ }
+ else
+ {
+ MethodDesc * pMD = GetMethodDesc();
+ _ASSERTE(pMD != NULL);
+ _ASSERTE(pMD->IsRestored());
+
+ *ppModule = pMD->GetModule();
+ *pMethodDef = pMD->GetMemberDef();
+ }
+
+ _ASSERTE(*ppModule != NULL);
+ _ASSERTE(*pMethodDef != mdTokenNil);
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Used as part of the hash table implementation in the containing ReJitManager, this
+// hashes a ReJitInfo by MethodDesc * when available, else by (Module *, methodDef)
+//
+// Arguments:
+// key - Key representing the ReJitInfo to hash
+//
+// Return Value:
+// Hash value of the ReJitInfo represented by the specified key
+//
+
+// static
+COUNT_T ReJitInfo::Hash(Key key)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (key.m_keyType == Key::kMethodDesc)
+ {
+ return HashPtr(0, PTR_MethodDesc(key.m_pMD));
+ }
+
+ _ASSERTE (key.m_keyType == Key::kMetadataToken);
+
+ return HashPtr(key.m_methodDef, PTR_Module(key.m_pModule));
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Return the IL to compile for a given ReJitInfo
+//
+// Return Value:
+// Pointer to IL buffer to compile. If the profiler has specified IL to rejit,
+// this will be our copy of the IL buffer specified by the profiler. Else, this
+// points to the original IL for the method from its module's metadata.
+//
+// Notes:
+// IL memory is managed by us, not the caller. Caller must not free the buffer.
+//
+
+COR_ILMETHOD * ReJitInfo::GetIL()
+{
+ CONTRACTL
+ {
+ THROWS; // Getting original IL via PEFile::GetIL can throw
+ CAN_TAKE_LOCK; // Looking up dynamically overridden IL takes a lock
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_pShared->m_pbIL != NULL)
+ {
+ return reinterpret_cast<COR_ILMETHOD *>(m_pShared->m_pbIL);
+ }
+
+ // If the user hasn't overriden us, get whatever the original IL had
+ return GetMethodDesc()->GetILHeader(TRUE);
+}
+
+
+//---------------------------------------------------------------------------------------
+// SharedReJitInfo implementation
+
+
+SharedReJitInfo::SharedReJitInfo()
+ : m_reJitId(InterlockedIncrement(reinterpret_cast<LONG*>(&s_GlobalReJitId))),
+ m_dwInternalFlags(kStateRequested),
+ m_pbIL(NULL),
+ m_dwCodegenFlags(0),
+ m_pInfoList(NULL)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Link in the specified ReJitInfo to the list maintained by this SharedReJitInfo
+//
+// Arguments:
+// pInfo - ReJitInfo being added
+//
+
+void SharedReJitInfo::AddMethod(ReJitInfo * pInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(pInfo->m_pShared == this);
+
+ // Push it on the head of our list
+ _ASSERTE(pInfo->m_pNext == NULL);
+ pInfo->m_pNext = PTR_ReJitInfo(m_pInfoList);
+ m_pInfoList = pInfo;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Unlink the specified ReJitInfo from the list maintained by this SharedReJitInfo.
+// Currently this is only used on AD unload to remove ReJitInfos of non-domain-neutral instantiations
+// of domain-neutral generics (which are tracked in the SharedDomain's ReJitManager).
+// This may be used in the future once we implement memory reclamation on revert().
+//
+// Arguments:
+// pInfo - ReJitInfo being removed
+//
+
+void SharedReJitInfo::RemoveMethod(ReJitInfo * pInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifndef DACCESS_COMPILE
+
+ // Find it
+ ReJitInfo ** ppEntry = &m_pInfoList;
+ while (*ppEntry != pInfo)
+ {
+ ppEntry = &(*ppEntry)->m_pNext;
+ _ASSERTE(*ppEntry != NULL);
+ }
+
+ // Remove it
+ _ASSERTE((*ppEntry)->m_pShared == this);
+ *ppEntry = (*ppEntry)->m_pNext;
+
+#endif // DACCESS_COMPILE
+}
+
+//---------------------------------------------------------------------------------------
+//
+// MethodDesc::MakeJitWorker() calls this to determine if there's an outstanding
+// "pre-rejit" request for a MethodDesc that has just been jitted for the first time.
+// This is also called when methods are being restored in NGEN images. The sequence looks like:
+// *Enter holder
+// Enter Rejit table lock
+// DoJumpStampIfNecessary
+// *Runtime code publishes/restores method
+// *Exit holder
+// Leave rejit table lock
+// Send rejit error callbacks if needed
+//
+// This also has a non-locking early-out if ReJIT is not enabled.
+//
+// #PublishCode:
+// Note that the runtime needs to publish/restore the PCODE while this holder is
+// on the stack, so it can happen under the ReJitManager's lock.
+// This prevents a "lost pre-rejit" race with a profiler that calls
+// RequestReJIT just as the method finishes compiling. In particular, the locking ensures
+// atomicity between this set of steps (performed in DoJumpStampIfNecessary):
+// * (1) Checking whether there is a pre-rejit request for this MD
+// * (2) If not, skip doing the pre-rejit-jmp-stamp
+// * (3) Publishing the PCODE
+//
+// with respect to these steps performed in RequestReJIT:
+// * (a) Is PCODE published yet?
+// * (b) If not, create pre-rejit (placeholder) ReJitInfo which the prestub will
+// consult when it JITs the original IL
+//
+// Without this atomicity, we could get the ordering (1), (2), (a), (b), (3), resulting
+// in the rejit request getting completely ignored (i.e., we file away the pre-rejit
+// placeholder AFTER the prestub checks for it).
+//
+// A similar race is possible for code being restored. In that case the restoring thread
+// does:
+// * (1) Check if there is a pre-rejit request for this MD
+// * (2) If not, no need to jmp-stamp
+// * (3) Restore the MD
+
+// And RequestRejit does:
+// * (a) [In LoadedMethodDescIterator] Is a potential MD restored yet?
+// * (b) [In MarkInstantiationsForReJit] If not, don't queue it for jump-stamping
+//
+// Same ordering (1), (2), (a), (b), (3) results in missing both opportunities to jump
+// stamp.
+
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+ReJitPublishMethodHolder::ReJitPublishMethodHolder(MethodDesc* pMethodDesc, PCODE pCode) :
+m_pMD(NULL), m_hr(S_OK)
+{
+ // This method can't have a contract because entering the table lock
+ // below increments GCNoTrigger count. Contracts always revert these changes
+ // at the end of the method but we need the incremented count to flow out of the
+ // method. The balancing decrement occurs in the destructor.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+ STATIC_CONTRACT_MODE_ANY;
+
+ // We come here from the PreStub and from MethodDesc::CheckRestore
+ // The method should be effectively restored, but we haven't yet
+ // cleared the unrestored bit so we can't assert pMethodDesc->IsRestored()
+ // We can assert:
+ _ASSERTE(pMethodDesc->GetMethodTable()->IsRestored());
+
+ if (ReJitManager::IsReJITEnabled() && (pCode != NULL))
+ {
+ m_pMD = pMethodDesc;
+ ReJitManager* pReJitManager = pMethodDesc->GetReJitManager();
+ pReJitManager->m_crstTable.Enter();
+ m_hr = pReJitManager->DoJumpStampIfNecessary(pMethodDesc, pCode);
+ }
+}
+
+
+ReJitPublishMethodHolder::~ReJitPublishMethodHolder()
+{
+ // This method can't have a contract because leaving the table lock
+ // below decrements GCNoTrigger count. Contracts always revert these changes
+ // at the end of the method but we need the decremented count to flow out of the
+ // method. The balancing increment occured in the constructor.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS; // NOTRIGGER until we leave the lock
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+ STATIC_CONTRACT_MODE_ANY;
+
+ if (m_pMD)
+ {
+ ReJitManager* pReJitManager = m_pMD->GetReJitManager();
+ pReJitManager->m_crstTable.Leave();
+ if (FAILED(m_hr))
+ {
+ ReJitManager::ReportReJITError(m_pMD->GetModule(), m_pMD->GetMemberDef(), m_pMD, m_hr);
+ }
+ }
+}
+
+ReJitPublishMethodTableHolder::ReJitPublishMethodTableHolder(MethodTable* pMethodTable) :
+m_pMethodTable(NULL)
+{
+ // This method can't have a contract because entering the table lock
+ // below increments GCNoTrigger count. Contracts always revert these changes
+ // at the end of the method but we need the incremented count to flow out of the
+ // method. The balancing decrement occurs in the destructor.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+ STATIC_CONTRACT_MODE_ANY;
+
+ // We come here from MethodTable::SetIsRestored
+ // The method table should be effectively restored, but we haven't yet
+ // cleared the unrestored bit so we can't assert pMethodTable->IsRestored()
+
+ if (ReJitManager::IsReJITEnabled())
+ {
+ m_pMethodTable = pMethodTable;
+ ReJitManager* pReJitManager = pMethodTable->GetModule()->GetReJitManager();
+ pReJitManager->m_crstTable.Enter();
+ MethodTable::IntroducedMethodIterator itMethods(pMethodTable, FALSE);
+ for (; itMethods.IsValid(); itMethods.Next())
+ {
+ // Although the MethodTable is restored, the methods might not be.
+ // We need to be careful to only query portions of the MethodDesc
+ // that work in a partially restored state. The only methods that need
+ // further restoration are IL stubs (which aren't rejittable) and
+ // generic methods. The only generic methods directly accesible from
+ // the MethodTable are definitions. GetNativeCode() on generic defs
+ // will run succesfully and return NULL which short circuits the
+ // rest of the logic.
+ MethodDesc * pMD = itMethods.GetMethodDesc();
+ PCODE pCode = pMD->GetNativeCode();
+ if (pCode != NULL)
+ {
+ HRESULT hr = pReJitManager->DoJumpStampIfNecessary(pMD, pCode);
+ if (FAILED(hr))
+ {
+ ReJitManager::AddReJITError(pMD->GetModule(), pMD->GetMemberDef(), pMD, hr, &m_errors);
+ }
+ }
+ }
+ }
+}
+
+
+ReJitPublishMethodTableHolder::~ReJitPublishMethodTableHolder()
+{
+ // This method can't have a contract because leaving the table lock
+ // below decrements GCNoTrigger count. Contracts always revert these changes
+ // at the end of the method but we need the decremented count to flow out of the
+ // method. The balancing increment occured in the constructor.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS; // NOTRIGGER until we leave the lock
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+ STATIC_CONTRACT_MODE_ANY;
+
+ if (m_pMethodTable)
+ {
+ ReJitManager* pReJitManager = m_pMethodTable->GetModule()->GetReJitManager();
+ pReJitManager->m_crstTable.Leave();
+ for (int i = 0; i < m_errors.Count(); i++)
+ {
+ ReJitManager::ReportReJITError(&(m_errors[i]));
+ }
+ }
+}
+#endif // !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+
+#else // FEATURE_REJIT
+
+// On architectures that don't support rejit, just keep around some do-nothing
+// stubs so the rest of the VM doesn't have to be littered with #ifdef FEATURE_REJIT
+
+// static
+HRESULT ReJitManager::RequestReJIT(
+ ULONG cFunctions,
+ ModuleID rgModuleIDs[],
+ mdMethodDef rgMethodDefs[])
+{
+ return E_NOTIMPL;
+}
+
+// static
+HRESULT ReJitManager::RequestRevert(
+ ULONG cFunctions,
+ ModuleID rgModuleIDs[],
+ mdMethodDef rgMethodDefs[],
+ HRESULT rgHrStatuses[])
+{
+ return E_NOTIMPL;
+}
+
+// static
+void ReJitManager::OnAppDomainExit(AppDomain * pAppDomain)
+{
+}
+
+ReJitManager::ReJitManager()
+{
+}
+
+void ReJitManager::PreInit(BOOL fSharedDomain)
+{
+}
+
+ReJITID ReJitManager::GetReJitId(PTR_MethodDesc pMD, PCODE pCodeStart)
+{
+ return 0;
+}
+
+ReJITID ReJitManager::GetReJitIdNoLock(PTR_MethodDesc pMD, PCODE pCodeStart)
+{
+ return 0;
+}
+
+PCODE ReJitManager::GetCodeStart(PTR_MethodDesc pMD, ReJITID reJitId)
+{
+ return NULL;
+}
+
+HRESULT ReJitManager::GetReJITIDs(PTR_MethodDesc pMD, ULONG cReJitIds, ULONG * pcReJitIds, ReJITID reJitIds[])
+{
+ return E_NOTIMPL;
+}
+
+#endif // FEATURE_REJIT
diff --git a/src/vm/rejit.h b/src/vm/rejit.h
new file mode 100644
index 0000000000..1b4040dcd0
--- /dev/null
+++ b/src/vm/rejit.h
@@ -0,0 +1,570 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// ===========================================================================
+// File: REJIT.H
+//
+
+//
+// REJIT.H defines the class and structures used to store info about rejitted
+// methods. See comment at top of rejit.cpp for more information on how
+// rejit works.
+//
+// ===========================================================================
+#ifndef _REJIT_H_
+#define _REJIT_H_
+
+#include "common.h"
+#include "contractimpl.h"
+#include "shash.h"
+#include "corprof.h"
+
+struct ReJitInfo;
+struct SharedReJitInfo;
+class ReJitManager;
+class MethodDesc;
+class ClrDataAccess;
+
+#ifdef FEATURE_REJIT
+
+//---------------------------------------------------------------------------------------
+// The CLR's implementation of ICorProfilerFunctionControl, which is passed
+// to the profiler. The profiler calls methods on this to specify the IL and
+// codegen flags for a given rejit request.
+//
+class ProfilerFunctionControl : public ICorProfilerFunctionControl
+{
+public:
+ ProfilerFunctionControl(LoaderHeap * pHeap);
+ ~ProfilerFunctionControl();
+
+ // IUnknown functions
+ virtual HRESULT __stdcall QueryInterface(REFIID id, void** pInterface);
+ virtual ULONG __stdcall AddRef();
+ virtual ULONG __stdcall Release();
+
+ // ICorProfilerFunctionControl functions
+ virtual HRESULT __stdcall SetCodegenFlags(DWORD flags);
+ virtual HRESULT __stdcall SetILFunctionBody(ULONG cbNewILMethodHeader, LPCBYTE pbNewILMethodHeader);
+ virtual HRESULT __stdcall SetILInstrumentedCodeMap(ULONG cILMapEntries, COR_IL_MAP * rgILMapEntries);
+
+ // Accessors
+ DWORD GetCodegenFlags();
+ LPBYTE GetIL();
+ ULONG GetInstrumentedMapEntryCount();
+ COR_IL_MAP* GetInstrumentedMapEntries();
+
+
+protected:
+ Volatile<LONG> m_refCount;
+ LoaderHeap * m_pHeap;
+ DWORD m_dwCodegenFlags;
+ ULONG m_cbIL;
+
+ // This pointer will get copied into SharedReJitInfo::m_pbIL and owned there.
+ LPBYTE m_pbIL;
+ ULONG m_cInstrumentedMapEntries;
+ COR_IL_MAP * m_rgInstrumentedMapEntries;
+};
+
+//---------------------------------------------------------------------------------------
+// Helper base class used by the structures below to enforce that their
+// pieces get allocated on the appropriate loader heaps
+//
+struct LoaderHeapAllocatedRejitStructure
+{
+public:
+ void * operator new (size_t size, LoaderHeap * pHeap, const NoThrow&);
+ void * operator new (size_t size, LoaderHeap * pHeap);
+};
+
+//---------------------------------------------------------------------------------------
+// One instance of this per rejit request for each mdMethodDef. Contains IL and
+// compilation flags. This is used primarily as a structure, so most of its
+// members are left public.
+//
+struct SharedReJitInfo : public LoaderHeapAllocatedRejitStructure
+{
+private:
+ // This determines what to use next as the value of the profiling API's ReJITID.
+ static ReJITID s_GlobalReJitId;
+
+public:
+ // These represent the various states a SharedReJitInfo can be in.
+ enum InternalFlags
+ {
+ // The profiler has requested a ReJit, so we've allocated stuff, but we haven't
+ // called back to the profiler to get any info or indicate that the ReJit has
+ // started. (This Info can be 'reused' for a new ReJit if the
+ // profiler calls RequestRejit again before we transition to the next state.)
+ kStateRequested = 0x00000000,
+
+ // The CLR has initiated the call to the profiler's GetReJITParameters() callback
+ // but it hasn't completed yet. At this point we have to assume the profiler has
+ // commited to a specific IL body, even if the CLR doesn't know what it is yet.
+ // If the profiler calls RequestRejit we need to allocate a new SharedReJitInfo
+ // and call GetReJITParameters() again.
+ kStateGettingReJITParameters = 0x00000001,
+
+ // We have asked the profiler about this method via ICorProfilerFunctionControl,
+ // and have thus stored the IL and codegen flags the profiler specified. Can only
+ // transition to kStateReverted from this state.
+ kStateActive = 0x00000002,
+
+ // The methoddef has been reverted, but not freed yet. It (or its instantiations
+ // for generics) *MAY* still be active on the stack someplace or have outstanding
+ // memory references.
+ kStateReverted = 0x00000003,
+
+
+ kStateMask = 0x0000000F,
+ };
+
+ DWORD m_dwInternalFlags;
+
+ // Data
+ LPBYTE m_pbIL;
+ DWORD m_dwCodegenFlags;
+ InstrumentedILOffsetMapping m_instrumentedILMap;
+
+private:
+ // This is the value of the profiling API's ReJITID for this particular
+ // rejit request.
+ const ReJITID m_reJitId;
+
+ // Children
+ ReJitInfo * m_pInfoList;
+
+public:
+ // Constructor
+ SharedReJitInfo();
+
+ // Intentionally no destructor. SharedReJitInfo and its contents are
+ // allocated on a loader heap, so SharedReJitInfo and its contents will be
+ // freed when the AD is unloaded.
+
+ // Read-Only Identifcation
+ ReJITID GetId() { return m_reJitId; }
+
+ void AddMethod(ReJitInfo * pInfo);
+
+ void RemoveMethod(ReJitInfo * pInfo);
+
+ ReJitInfo * GetMethods() { return m_pInfoList; }
+
+ InternalFlags GetState();
+};
+
+//---------------------------------------------------------------------------------------
+// One instance of this per rejit request for each MethodDesc*. One SharedReJitInfo
+// corresponds to many ReJitInfos, as the SharedReJitInfo tracks the rejit request for
+// the methodDef token whereas the ReJitInfo tracks the rejit request for each correspond
+// MethodDesc* (instantiation). Points to actual generated code.
+//
+// In the case of "pre-rejit" (see comment at top of rejit.cpp), a special "placeholder"
+// instance of ReJitInfo is used to "remember" to jmp-stamp a not-yet-jitted-method once
+// it finally gets jitted the first time.
+//
+// Each ReJitManager contains a hash table of ReJitInfo instances, keyed by
+// ReJitManager::m_key.
+//
+// This is used primarily as a structure, so most of its members are left public.
+//
+struct ReJitInfo : public LoaderHeapAllocatedRejitStructure
+{
+public:
+ // The size of the code used to jump stamp the prolog
+ static const size_t JumpStubSize =
+#if defined(_X86_) || defined(_AMD64_)
+ 5;
+#else
+#error "Need to define size of rejit jump-stamp for this platform"
+ 1;
+#endif
+
+ // Used by PtrSHash template as the key for this ReJitInfo. For regular
+ // ReJitInfos, the key is the MethodDesc*. For placeholder ReJitInfos
+ // (to facilitate pre-rejit), the key is (Module*, mdMethodDef).
+ struct Key
+ {
+ public:
+ enum
+ {
+ // The key has not yet had its values initialized
+ kUninitialized = 0x0,
+
+ // The key represents a loaded MethodDesc, and is identified by the m_pMD
+ // field
+ kMethodDesc = 0x1,
+
+ // The key represents a "placeholder" ReJitInfo identified not by loaded
+ // MethodDesc, but by the module and metadata token (m_pModule,
+ // m_methodDef).
+ kMetadataToken = 0x2,
+ };
+
+ // Storage consists of a discriminated union between MethodDesc* or
+ // (Module*, mdMethodDef), with the key type as the discriminator.
+ union
+ {
+ TADDR m_pMD;
+ TADDR m_pModule;
+ };
+ ULONG32 m_methodDef : 28;
+ ULONG32 m_keyType : 2;
+
+ Key();
+ Key(PTR_MethodDesc pMD);
+ Key(PTR_Module pModule, mdMethodDef methodDef);
+ };
+
+ static COUNT_T Hash(Key key);
+
+ enum InternalFlags
+ {
+ // This ReJitInfo is either a placeholder (identified by module and
+ // metadata token, rather than loaded MethodDesc) OR this ReJitInfo is
+ // identified by a loaded MethodDesc that has been reverted OR not yet
+ // been jump-stamped. In the last case, the time window where this
+ // ReJitInfo would stay in kJumpNone is rather small, as
+ // RequestReJIT() will immediately cause the originally JITted code to
+ // be jump-stamped.
+ kJumpNone = 0x00000000,
+
+ // This ReJitInfo is identified by a loaded MethodDesc that has been compiled and
+ // jump-stamped, with the target being the prestub. The MethodDesc has not yet
+ // been rejitted
+ kJumpToPrestub = 0x00000001,
+
+ // This ReJitInfo is identified by a loaded MethodDesc that has been compiled AND
+ // rejitted. The top of the originally JITted code has been jump-stamped, with
+ // the target being the latest version of the rejitted code.
+ kJumpToRejittedCode = 0x00000002,
+
+ kStateMask = 0x0000000F,
+ };
+
+ Key m_key;
+ DWORD m_dwInternalFlags;
+
+ // The beginning of the rejitted code
+ PCODE m_pCode;
+
+ // The parent SharedReJitInfo, which manages the rejit request for all
+ // instantiations.
+ PTR_SharedReJitInfo const m_pShared;
+
+ // My next sibling ReJitInfo for this rejit request (e.g., another
+ // generic instantiation of the same method)
+ PTR_ReJitInfo m_pNext;
+
+ // The originally JITted code that was overwritten with the jmp stamp.
+ BYTE m_rgSavedCode[JumpStubSize];
+
+
+ ReJitInfo(PTR_MethodDesc pMD, SharedReJitInfo * pShared);
+ ReJitInfo(PTR_Module pModule, mdMethodDef methodDef, SharedReJitInfo * pShared);
+
+ // Intentionally no destructor. ReJitInfo is allocated on a loader heap,
+ // and will be freed (along with its associated SharedReJitInfo) when the
+ // AD is unloaded.
+
+ Key GetKey();
+ PTR_MethodDesc GetMethodDesc();
+ void GetModuleAndToken(Module ** ppModule, mdMethodDef * pMethodDef);
+ void GetModuleAndTokenRegardlessOfKeyType(Module ** ppModule, mdMethodDef * pMethodDef);
+ InternalFlags GetState();
+
+ COR_ILMETHOD * GetIL();
+
+ HRESULT JumpStampNativeCode(PCODE pCode = NULL);
+ HRESULT UndoJumpStampNativeCode(BOOL fEESuspended);
+ HRESULT UpdateJumpTarget(BOOL fEESuspended, PCODE pRejittedCode);
+ HRESULT UpdateJumpStampHelper(BYTE* pbCode, INT64 i64OldValue, INT64 i64newValue, BOOL fContentionPossible);
+
+
+protected:
+ void CommonInit();
+ INDEBUG(BOOL CodeIsSaved();)
+};
+
+//---------------------------------------------------------------------------------------
+// Used by the SHash inside ReJitManager which maintains the set of ReJitInfo instances.
+//
+class ReJitInfoTraits : public DefaultSHashTraits<PTR_ReJitInfo>
+{
+public:
+
+ // explicitly declare local typedefs for these traits types, otherwise
+ // the compiler may get confused
+ typedef DefaultSHashTraits<PTR_ReJitInfo> PARENT;
+ typedef PARENT::element_t element_t;
+ typedef PARENT::count_t count_t;
+
+ typedef ReJitInfo::Key key_t;
+
+ static key_t GetKey(const element_t &e);
+ static BOOL Equals(key_t k1, key_t k2);
+ static count_t Hash(key_t k);
+ static bool IsNull(const element_t &e);
+};
+
+// RequestRejit and RequestRevert use these batches to accumulate ReJitInfos that need their
+// jump stamps updated
+class ReJitManager;
+struct ReJitManagerJumpStampBatch
+{
+ ReJitManagerJumpStampBatch(ReJitManager * pReJitManager) : undoMethods(), preStubMethods()
+ {
+ LIMITED_METHOD_CONTRACT;
+ this->pReJitManager = pReJitManager;
+ }
+
+ ReJitManager* pReJitManager;
+ CDynArray<ReJitInfo *> undoMethods;
+ CDynArray<ReJitInfo *> preStubMethods;
+};
+
+class ReJitManagerJumpStampBatchTraits : public DefaultSHashTraits<ReJitManagerJumpStampBatch *>
+{
+public:
+
+ // explicitly declare local typedefs for these traits types, otherwise
+ // the compiler may get confused
+ typedef DefaultSHashTraits<ReJitManagerJumpStampBatch *> PARENT;
+ typedef PARENT::element_t element_t;
+ typedef PARENT::count_t count_t;
+
+ typedef ReJitManager * key_t;
+
+ static key_t GetKey(const element_t &e)
+ {
+ return e->pReJitManager;
+ }
+
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ return (k1 == k2);
+ }
+
+ static count_t Hash(key_t k)
+ {
+ return (count_t)k;
+ }
+
+ static bool IsNull(const element_t &e)
+ {
+ return (e == NULL);
+ }
+};
+
+struct ReJitReportErrorWorkItem
+{
+ Module* pModule;
+ mdMethodDef methodDef;
+ MethodDesc* pMethodDesc;
+ HRESULT hrStatus;
+};
+
+
+#endif // FEATURE_REJIT
+
+//
+// These holders are used by runtime code that is making new code
+// available for execution, either by publishing jitted code
+// or restoring NGEN code. It ensures the publishing is synchronized
+// with rejit requests
+//
+class ReJitPublishMethodHolder
+{
+public:
+#if !defined(FEATURE_REJIT) || defined(DACCESS_COMPILE) || defined(CROSSGEN_COMPILE)
+ ReJitPublishMethodHolder(MethodDesc* pMethod, PCODE pCode) { }
+#else
+ ReJitPublishMethodHolder(MethodDesc* pMethod, PCODE pCode);
+ ~ReJitPublishMethodHolder();
+#endif
+
+private:
+#if defined(FEATURE_REJIT)
+ MethodDesc * m_pMD;
+ HRESULT m_hr;
+#endif
+};
+
+class ReJitPublishMethodTableHolder
+{
+public:
+#if !defined(FEATURE_REJIT) || defined(DACCESS_COMPILE) || defined(CROSSGEN_COMPILE)
+ ReJitPublishMethodTableHolder(MethodTable* pMethodTable) { }
+#else
+ ReJitPublishMethodTableHolder(MethodTable* pMethodTable);
+ ~ReJitPublishMethodTableHolder();
+#endif
+
+private:
+#if defined(FEATURE_REJIT)
+ MethodTable* m_pMethodTable;
+ CDynArray<ReJitReportErrorWorkItem> m_errors;
+#endif
+};
+
+//---------------------------------------------------------------------------------------
+// The big honcho. One of these per AppDomain, plus one for the
+// SharedDomain. Contains the hash table of ReJitInfo structures to manage
+// every rejit and revert request for its owning domain.
+//
+class ReJitManager
+{
+ friend class ClrDataAccess;
+ friend class DacDbiInterfaceImpl;
+
+ //I would have prefered to make these inner classes, but
+ //then I can't friend them from crst easily.
+ friend class ReJitPublishMethodHolder;
+ friend class ReJitPublishMethodTableHolder;
+
+private:
+
+#ifdef FEATURE_REJIT
+
+ // Hash table mapping MethodDesc* (or (ModuleID, mdMethodDef)) to its
+ // ReJitInfos. One key may map to multiple ReJitInfos if there have been
+ // multiple rejit requests made for the same MD. See
+ // code:ReJitManager::ReJitManager#Invariants for more information.
+ typedef SHash<ReJitInfoTraits> ReJitInfoHash;
+
+ // One global crst (for the entire CLR instance) to synchronize
+ // cross-ReJitManager operations, such as batch calls to RequestRejit and
+ // RequestRevert (which modify multiple ReJitManager instances).
+ static CrstStatic s_csGlobalRequest;
+
+ // All The ReJitInfos (and their linked SharedReJitInfos) for this domain.
+ ReJitInfoHash m_table;
+
+ // The crst that synchronizes the data in m_table, including
+ // adding/removing to m_table, as well as state changes made to
+ // individual ReJitInfos & SharedReJitInfos in m_table.
+ CrstExplicitInit m_crstTable;
+
+#endif //FEATURE_REJIT
+
+public:
+ // The ReJITManager takes care of grabbing its m_crstTable when necessary. However,
+ // for clients who need to do this explicitly (like ETW rundown), this holder may be
+ // used.
+ class TableLockHolder
+#ifdef FEATURE_REJIT
+ : public CrstHolder
+#endif
+ {
+ public:
+ TableLockHolder(ReJitManager * pReJitManager);
+ };
+
+ static void InitStatic();
+
+ static BOOL IsReJITEnabled();
+
+ static void OnAppDomainExit(AppDomain * pAppDomain);
+
+ static HRESULT RequestReJIT(
+ ULONG cFunctions,
+ ModuleID rgModuleIDs[],
+ mdMethodDef rgMethodDefs[]);
+
+ static HRESULT RequestRevert(
+ ULONG cFunctions,
+ ModuleID rgModuleIDs[],
+ mdMethodDef rgMethodDefs[],
+ HRESULT rgHrStatuses[]);
+
+ static PCODE DoReJitIfNecessary(PTR_MethodDesc pMD); // Invokes the jit, or returns previously rejitted code
+
+ static void DoJumpStampForAssemblyIfNecessary(Assembly* pAssemblyToSearch);
+
+ static DWORD GetCurrentReJitFlags(PTR_MethodDesc pMD);
+
+ ReJitManager();
+
+ void PreInit(BOOL fSharedDomain);
+
+ ReJITID GetReJitId(PTR_MethodDesc pMD, PCODE pCodeStart);
+
+ ReJITID GetReJitIdNoLock(PTR_MethodDesc pMD, PCODE pCodeStart);
+
+ PCODE GetCodeStart(PTR_MethodDesc pMD, ReJITID reJitId);
+
+ HRESULT GetReJITIDs(PTR_MethodDesc pMD, ULONG cReJitIds, ULONG * pcReJitIds, ReJITID reJitIds[]);
+
+#ifdef FEATURE_REJIT
+
+
+ INDEBUG(BOOL IsTableCrstOwnedByCurrentThread());
+
+private:
+ static HRESULT IsMethodSafeForReJit(PTR_MethodDesc pMD);
+ static void ReportReJITError(ReJitReportErrorWorkItem* pErrorRecord);
+ static void ReportReJITError(Module* pModule, mdMethodDef methodDef, MethodDesc* pMD, HRESULT hrStatus);
+ static HRESULT AddReJITError(ReJitInfo* pReJitInfo, HRESULT hrStatus, CDynArray<ReJitReportErrorWorkItem> * pErrors);
+ static HRESULT AddReJITError(Module* pModule, mdMethodDef methodDef, MethodDesc* pMD, HRESULT hrStatus, CDynArray<ReJitReportErrorWorkItem> * pErrors);
+ HRESULT BatchUpdateJumpStamps(CDynArray<ReJitInfo *> * pUndoMethods, CDynArray<ReJitInfo *> * pPreStubMethods, CDynArray<ReJitReportErrorWorkItem> * pErrors);
+
+ PCODE DoReJitIfNecessaryWorker(PTR_MethodDesc pMD); // Invokes the jit, or returns previously rejitted code
+ DWORD GetCurrentReJitFlagsWorker(PTR_MethodDesc pMD);
+
+ HRESULT MarkAllInstantiationsForReJit(
+ SharedReJitInfo * pSharedForAllGenericInstantiations,
+ AppDomain * pAppDomainToSearch,
+ PTR_Module pModuleContainingGenericDefinition,
+ mdMethodDef methodDef,
+ ReJitManagerJumpStampBatch* pJumpStampBatch,
+ CDynArray<ReJitReportErrorWorkItem> * pRejitErrors);
+
+ INDEBUG(BaseDomain * m_pDomain;)
+ INDEBUG(void Dump(LPCSTR szIntroText);)
+ INDEBUG(void AssertRestOfEntriesAreReverted(
+ ReJitInfoHash::KeyIterator iter,
+ ReJitInfoHash::KeyIterator end);)
+
+
+ HRESULT DoJumpStampIfNecessary(MethodDesc* pMD, PCODE pCode);
+ HRESULT MarkForReJit(PTR_MethodDesc pMD, SharedReJitInfo * pSharedToReuse, ReJitManagerJumpStampBatch* pJumpStampBatch, CDynArray<ReJitReportErrorWorkItem> * pRejitErrors, SharedReJitInfo ** ppSharedUsed);
+ HRESULT MarkForReJit(PTR_Module pModule, mdMethodDef methodDef, ReJitManagerJumpStampBatch* pJumpStampBatch, CDynArray<ReJitReportErrorWorkItem> * pRejitErrors, SharedReJitInfo ** ppSharedUsed);
+ HRESULT MarkForReJitHelper(
+ PTR_MethodDesc pMD,
+ PTR_Module pModule,
+ mdMethodDef methodDef,
+ SharedReJitInfo * pSharedToReuse,
+ ReJitManagerJumpStampBatch* pJumpStampBatch,
+ CDynArray<ReJitReportErrorWorkItem> * pRejitErrors,
+ /* out */ SharedReJitInfo ** ppSharedUsed);
+ HRESULT AddNewReJitInfo(
+ PTR_MethodDesc pMD,
+ PTR_Module pModule,
+ mdMethodDef methodDef,
+ SharedReJitInfo * pShared,
+ ReJitInfo ** ppInfo);
+ HRESULT RequestRevertByToken(PTR_Module pModule, mdMethodDef methodDef);
+ PTR_ReJitInfo FindReJitInfo(PTR_MethodDesc pMD, PCODE pCodeStart, ReJITID reJitId);
+ PTR_ReJitInfo FindNonRevertedReJitInfo(PTR_Module pModule, mdMethodDef methodDef);
+ PTR_ReJitInfo FindNonRevertedReJitInfo(PTR_MethodDesc pMD);
+ PTR_ReJitInfo FindNonRevertedReJitInfoHelper(PTR_MethodDesc pMD, PTR_Module pModule, mdMethodDef methodDef);
+ ReJitInfo* FindPreReJittedReJitInfo(ReJitInfoHash::KeyIterator beginIter, ReJitInfoHash::KeyIterator endIter);
+ HRESULT Revert(SharedReJitInfo * pShared, ReJitManagerJumpStampBatch* pJumpStampBatch);
+ PCODE DoReJit(ReJitInfo * pInfo);
+ ReJitInfoHash::KeyIterator GetBeginIterator(PTR_MethodDesc pMD);
+ ReJitInfoHash::KeyIterator GetEndIterator(PTR_MethodDesc pMD);
+ ReJitInfoHash::KeyIterator GetBeginIterator(PTR_Module pModule, mdMethodDef methodDef);
+ ReJitInfoHash::KeyIterator GetEndIterator(PTR_Module pModule, mdMethodDef methodDef);
+ void RemoveReJitInfosFromDomain(AppDomain * pAppDomain);
+
+#endif // FEATURE_REJIT
+
+};
+
+#include "rejit.inl"
+
+#endif // _REJIT_H_
diff --git a/src/vm/rejit.inl b/src/vm/rejit.inl
new file mode 100644
index 0000000000..5434752ef4
--- /dev/null
+++ b/src/vm/rejit.inl
@@ -0,0 +1,346 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: REJIT.INL
+//
+
+//
+// Inline definitions of various items declared in REJIT.H\
+// ===========================================================================
+#ifndef _REJIT_INL_
+#define _REJIT_INL_
+
+#ifdef FEATURE_REJIT
+
+inline SharedReJitInfo::InternalFlags SharedReJitInfo::GetState()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (InternalFlags)(m_dwInternalFlags & kStateMask);
+}
+
+inline ReJitInfo::ReJitInfo(PTR_MethodDesc pMD, SharedReJitInfo * pShared) :
+ m_key(pMD),
+ m_pShared(pShared)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ CommonInit();
+}
+
+inline ReJitInfo::ReJitInfo(PTR_Module pModule, mdMethodDef methodDef, SharedReJitInfo * pShared) :
+ m_key(pModule, methodDef),
+ m_pShared(pShared)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ CommonInit();
+}
+
+inline ReJitInfo::Key::Key() :
+ m_keyType(kUninitialized),
+ m_pMD(NULL),
+ m_methodDef(mdTokenNil)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+inline ReJitInfo::Key::Key(PTR_MethodDesc pMD) :
+ m_keyType(kMethodDesc),
+ m_pMD(dac_cast<TADDR>(pMD)),
+ m_methodDef(mdTokenNil)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+inline ReJitInfo::Key::Key(PTR_Module pModule, mdMethodDef methodDef) :
+ m_keyType(kMetadataToken),
+ m_pModule(dac_cast<TADDR>(pModule)),
+ m_methodDef(methodDef)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+inline ReJitInfo::Key ReJitInfo::GetKey()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_key;
+}
+
+inline ReJitInfo::InternalFlags ReJitInfo::GetState()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (InternalFlags)(m_dwInternalFlags & kStateMask);
+}
+
+inline PTR_MethodDesc ReJitInfo::GetMethodDesc()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(m_key.m_keyType == Key::kMethodDesc);
+ return PTR_MethodDesc(m_key.m_pMD);
+}
+
+inline void ReJitInfo::GetModuleAndToken(Module ** ppModule, mdMethodDef * pMethodDef)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(ppModule != NULL);
+ _ASSERTE(pMethodDef != NULL);
+ _ASSERTE(m_key.m_keyType == Key::kMetadataToken);
+
+ *ppModule = PTR_Module(m_key.m_pModule);
+ *pMethodDef = (mdMethodDef) m_key.m_methodDef;
+}
+
+#ifdef _DEBUG
+inline BOOL ReJitInfo::CodeIsSaved()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ for (size_t i=0; i < sizeof(m_rgSavedCode); i++)
+ {
+ if (m_rgSavedCode[i] != 0)
+ return TRUE;
+ }
+ return FALSE;
+}
+#endif //_DEBUG
+
+// static
+inline ReJitInfoTraits::key_t ReJitInfoTraits::GetKey(const element_t &e)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return e->GetKey();
+}
+
+// static
+inline BOOL ReJitInfoTraits::Equals(key_t k1, key_t k2)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Always use the values of the TADDRs of the MethodDesc * and Module * when treating
+ // them as lookup keys into the SHash.
+
+ if (k1.m_keyType == ReJitInfo::Key::kMethodDesc)
+ {
+ return ((k2.m_keyType == ReJitInfo::Key::kMethodDesc) &&
+ (dac_cast<TADDR>(PTR_MethodDesc(k1.m_pMD)) ==
+ dac_cast<TADDR>(PTR_MethodDesc(k2.m_pMD))));
+ }
+
+ _ASSERTE(k1.m_keyType == ReJitInfo::Key::kMetadataToken);
+ return ((k2.m_keyType == ReJitInfo::Key::kMetadataToken) &&
+ (dac_cast<TADDR>(PTR_Module(k1.m_pModule)) ==
+ dac_cast<TADDR>(PTR_Module(k2.m_pModule))) &&
+ (k1.m_methodDef == k2.m_methodDef));
+}
+
+// static
+inline ReJitInfoTraits::count_t ReJitInfoTraits::Hash(key_t k)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return ReJitInfo::Hash(k);
+}
+
+// static
+inline bool ReJitInfoTraits::IsNull(const element_t &e)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return e == NULL;
+}
+
+// static
+inline void ReJitManager::InitStatic()
+{
+ STANDARD_VM_CONTRACT;
+
+ s_csGlobalRequest.Init(CrstReJITGlobalRequest);
+}
+
+// static
+inline BOOL ReJitManager::IsReJITEnabled()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return CORProfilerEnableRejit();
+}
+
+inline ReJitManager::ReJitInfoHash::KeyIterator ReJitManager::GetBeginIterator(PTR_MethodDesc pMD)
+{
+ LIMITED_METHOD_CONTRACT;
+#ifndef DACCESS_COMPILE
+ _ASSERTE(m_crstTable.OwnedByCurrentThread());
+#endif
+ return m_table.Begin(ReJitInfo::Key(pMD));
+}
+
+inline ReJitManager::ReJitInfoHash::KeyIterator ReJitManager::GetEndIterator(PTR_MethodDesc pMD)
+{
+ LIMITED_METHOD_CONTRACT;
+#ifndef DACCESS_COMPILE
+ _ASSERTE(m_crstTable.OwnedByCurrentThread());
+#endif
+ return m_table.End(ReJitInfo::Key(pMD));
+}
+
+inline ReJitManager::ReJitInfoHash::KeyIterator ReJitManager::GetBeginIterator(PTR_Module pModule, mdMethodDef methodDef)
+{
+ LIMITED_METHOD_CONTRACT;
+#ifndef DACCESS_COMPILE
+ _ASSERTE(m_crstTable.OwnedByCurrentThread());
+#endif
+ return m_table.Begin(ReJitInfo::Key(pModule, methodDef));
+}
+
+inline ReJitManager::ReJitInfoHash::KeyIterator ReJitManager::GetEndIterator(PTR_Module pModule, mdMethodDef methodDef)
+{
+ LIMITED_METHOD_CONTRACT;
+#ifndef DACCESS_COMPILE
+ _ASSERTE(m_crstTable.OwnedByCurrentThread());
+#endif
+ return m_table.End(ReJitInfo::Key(pModule, methodDef));
+}
+
+#ifdef _DEBUG
+inline BOOL ReJitManager::IsTableCrstOwnedByCurrentThread()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_crstTable.OwnedByCurrentThread();
+}
+#endif //_DEBUG
+
+
+inline HRESULT ReJitManager::MarkForReJit(
+ PTR_MethodDesc pMD,
+ SharedReJitInfo * pSharedToReuse,
+ ReJitManagerJumpStampBatch* pJumpStampBatch,
+ CDynArray<ReJitReportErrorWorkItem> * pRejitErrors,
+ /* out */ SharedReJitInfo ** ppSharedUsed)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return MarkForReJitHelper(pMD, NULL, mdTokenNil, pSharedToReuse, pJumpStampBatch, pRejitErrors, ppSharedUsed);
+}
+
+inline HRESULT ReJitManager::MarkForReJit(
+ PTR_Module pModule,
+ mdMethodDef methodDef,
+ ReJitManagerJumpStampBatch* pJumpStampBatch,
+ CDynArray<ReJitReportErrorWorkItem> * pRejitErrors,
+ /* out */ SharedReJitInfo ** ppSharedUsed)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return MarkForReJitHelper(NULL, pModule, methodDef, NULL, pJumpStampBatch, pRejitErrors, ppSharedUsed);
+}
+
+inline PTR_ReJitInfo ReJitManager::FindNonRevertedReJitInfo(PTR_Module pModule, mdMethodDef methodDef)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return FindNonRevertedReJitInfoHelper(NULL, pModule, methodDef);
+}
+
+inline PTR_ReJitInfo ReJitManager::FindNonRevertedReJitInfo(PTR_MethodDesc pMD)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return FindNonRevertedReJitInfoHelper(pMD, NULL, NULL);
+}
+
+//static
+inline void ReJitManager::ReportReJITError(ReJitReportErrorWorkItem* pErrorRecord)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ CAN_TAKE_LOCK;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ ReportReJITError(pErrorRecord->pModule, pErrorRecord->methodDef, pErrorRecord->pMethodDesc, pErrorRecord->hrStatus);
+}
+
+// static
+inline void ReJitManager::ReportReJITError(Module* pModule, mdMethodDef methodDef, MethodDesc* pMD, HRESULT hrStatus)
+{
+#ifdef PROFILING_SUPPORTED
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ CAN_TAKE_LOCK;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ {
+ BEGIN_PIN_PROFILER(CORProfilerPresent());
+ _ASSERTE(CORProfilerEnableRejit());
+ {
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->ReJITError(
+ reinterpret_cast< ModuleID > (pModule),
+ methodDef,
+ reinterpret_cast< FunctionID > (pMD),
+ hrStatus);
+ }
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+}
+
+inline ReJitManager::TableLockHolder::TableLockHolder(ReJitManager * pReJitManager)
+#ifdef FEATURE_REJIT
+ : CrstHolder(&pReJitManager->m_crstTable)
+#endif // FEATURE_REJIT
+{
+ WRAPPER_NO_CONTRACT;
+}
+
+#else // FEATURE_REJIT
+
+// On architectures that don't support rejit, just keep around some do-nothing
+// stubs so the rest of the VM doesn't have to be littered with #ifdef FEATURE_REJIT
+
+// static
+inline PCODE ReJitManager::DoReJitIfNecessary(PTR_MethodDesc)
+{
+ return NULL;
+}
+
+// static
+inline BOOL ReJitManager::IsReJITEnabled()
+{
+ return FALSE;
+}
+
+// static
+inline DWORD ReJitManager::GetCurrentReJitFlags(PTR_MethodDesc)
+{
+ return 0;
+}
+
+// static
+inline void ReJitManager::InitStatic()
+{
+}
+
+inline ReJitManager::TableLockHolder::TableLockHolder(ReJitManager *)
+{
+}
+
+#endif // FEATURE_REJIT
+
+
+#endif // _REJIT_INL_
diff --git a/src/vm/remoting.cpp b/src/vm/remoting.cpp
new file mode 100644
index 0000000000..99f19db042
--- /dev/null
+++ b/src/vm/remoting.cpp
@@ -0,0 +1,3774 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: remoting.cpp
+//
+
+//
+// Purpose: Defines various remoting related objects such as
+// proxies
+//
+
+//
+
+
+#include "common.h"
+
+#ifdef FEATURE_REMOTING
+#include "virtualcallstub.h"
+#include "excep.h"
+#include "comdelegate.h"
+#include "remoting.h"
+#include "field.h"
+#include "siginfo.hpp"
+#include "stackbuildersink.h"
+#include "eehash.h"
+#include "profilepriv.h"
+#include "message.h"
+#include "eeconfig.h"
+#include "comcallablewrapper.h"
+#include "interopconverter.h"
+#include "asmconstants.h"
+#include "crossdomaincalls.h"
+#include "contractimpl.h"
+#include "typestring.h"
+#include "generics.h"
+#include "appdomain.inl"
+#include "dbginterface.h"
+
+#ifndef DACCESS_COMPILE
+
+// These hold label offsets into non-virtual thunks. They are used by
+// CNonVirtualThunkMgr::DoTraceStub and ::TraceManager to help the
+// debugger figure out where the thunk is going to go.
+DWORD g_dwNonVirtualThunkRemotingLabelOffset = 0;
+DWORD g_dwNonVirtualThunkReCheckLabelOffset = 0;
+
+// Statics
+
+MethodTable *CRemotingServices::s_pMarshalByRefObjectClass;
+MethodTable *CRemotingServices::s_pServerIdentityClass;
+
+MethodDesc *CRemotingServices::s_pRPPrivateInvoke;
+MethodDesc *CRemotingServices::s_pRPInvokeStatic;
+MethodDesc *CRemotingServices::s_pWrapMethodDesc;
+MethodDesc *CRemotingServices::s_pIsCurrentContextOK;
+MethodDesc *CRemotingServices::s_pCheckCast;
+MethodDesc *CRemotingServices::s_pFieldSetterDesc;
+MethodDesc *CRemotingServices::s_pFieldGetterDesc;
+MethodDesc *CRemotingServices::s_pObjectGetTypeDesc;
+MethodDesc *CRemotingServices::s_pGetTypeDesc;
+MethodDesc *CRemotingServices::s_pProxyForDomainDesc;
+MethodDesc *CRemotingServices::s_pServerContextForProxyDesc;
+MethodDesc *CRemotingServices::s_pServerDomainIdForProxyDesc;
+DWORD CRemotingServices::s_dwServerOffsetInRealProxy;
+DWORD CRemotingServices::s_dwSrvIdentityOffsetInRealProxy;
+DWORD CRemotingServices::s_dwIdOffset;
+DWORD CRemotingServices::s_dwTPOrObjOffsetInIdentity;
+DWORD CRemotingServices::s_dwMBRIDOffset;
+DWORD CRemotingServices::s_dwLeaseOffsetInIdentity;
+DWORD CRemotingServices::s_dwURIOffsetInIdentity;
+CrstStatic CRemotingServices::s_RemotingCrst;
+BOOL CRemotingServices::s_fRemotingStarted;
+MethodDesc *CRemotingServices::s_pRenewLeaseOnCallDesc;
+
+
+#ifdef FEATURE_COMINTEROP
+MethodDesc *CRemotingServices::s_pCreateObjectForCom;
+#endif
+
+// CTPMethodTable Statics
+DWORD CTPMethodTable::s_dwCommitedTPSlots;
+DWORD CTPMethodTable::s_dwReservedTPSlots;
+DWORD CTPMethodTable::s_dwReservedTPIndirectionSlotSize;
+DWORD CTPMethodTable::s_dwGCInfoBytes;
+DWORD CTPMethodTable::s_dwMTDataSlots;
+MethodTable *CTPMethodTable::s_pRemotingProxyClass;
+CrstStatic CTPMethodTable::s_TPMethodTableCrst;
+EEThunkHashTable *CTPMethodTable::s_pThunkHashTable;
+BOOL CTPMethodTable::s_fTPTableFieldsInitialized;
+
+#endif // !DACCESS_COMPILE
+
+
+SPTR_IMPL(MethodTable, CTPMethodTable, s_pThunkTable);
+
+#ifndef DACCESS_COMPILE
+
+// CVirtualThunks statics
+CVirtualThunks *CVirtualThunks::s_pVirtualThunks;
+
+// CVirtualThunkMgr statics
+CVirtualThunkMgr *CVirtualThunkMgr::s_pVirtualThunkMgr;
+
+#ifndef HAS_REMOTING_PRECODE
+// CNonVirtualThunk statics
+CNonVirtualThunk *CNonVirtualThunk::s_pNonVirtualThunks;
+SimpleRWLock* CNonVirtualThunk::s_pNonVirtualThunksListLock;
+
+// CNonVirtualThunkMgr statics
+CNonVirtualThunkMgr *CNonVirtualThunkMgr::s_pNonVirtualThunkMgr;
+#endif
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::Initialize public
+//
+// Synopsis: Initialized remoting state
+//
+//+----------------------------------------------------------------------------
+VOID CRemotingServices::Initialize()
+{
+ STANDARD_VM_CONTRACT;
+
+ // Initialize the remoting services critical section
+ s_RemotingCrst.Init(CrstRemoting, CrstFlags(CRST_REENTRANCY|CRST_HOST_BREAKABLE));
+
+ CTPMethodTable::Initialize();
+}
+
+INT32 CRemotingServices::IsTransparentProxy(Object* orTP)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ INT32 fIsTPMT = FALSE;
+
+ if(orTP != NULL)
+ {
+ // Check if the supplied object has transparent proxy method table
+ MethodTable *pMT = orTP->GetMethodTable();
+ fIsTPMT = pMT->IsTransparentProxy() ? TRUE : FALSE;
+ }
+
+ LOG((LF_REMOTING, LL_EVERYTHING, "!IsTransparentProxyEx(0x%x) returning %s",
+ orTP, fIsTPMT ? "TRUE" : "FALSE"));
+
+ return(fIsTPMT);
+}
+
+
+Object* CRemotingServices::GetRealProxy(Object* objTP)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF rv = NULL;
+
+ if ((objTP != NULL) && (IsTransparentProxy(objTP)))
+ {
+ _ASSERTE(s_fRemotingStarted);
+ rv = CTPMethodTable::GetRP(OBJECTREF(objTP));
+ }
+
+ LOG((LF_REMOTING, LL_INFO100, "!GetRealProxy(0x%x) returning 0x%x\n", objTP, OBJECTREFToObject(rv)));
+
+ return OBJECTREFToObject(rv);
+}
+
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::EnsureRemotingStarted
+//
+// Synopsis: Startup the remoting services.
+//
+//
+//+----------------------------------------------------------------------------
+VOID CRemotingServices::EnsureRemotingStarted()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!CRemotingServices::s_fRemotingStarted)
+ CRemotingServices::StartRemoting();
+
+ if (!CTPMethodTable::s_fTPTableFieldsInitialized)
+ CTPMethodTable::EnsureFieldsInitialized();
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::StartRemoting private
+//
+// Synopsis: Initialize the static fields of CRemotingServices class
+//
+//
+//+----------------------------------------------------------------------------
+VOID CRemotingServices::StartRemoting()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Acquire the remoting lock before initializing fields
+ GCX_PREEMP();
+
+ CrstHolder ch(&s_RemotingCrst);
+
+ // Make sure that no other thread has initialized the fields
+ if (!s_fRemotingStarted)
+ {
+ InitActivationServicesClass();
+ InitRealProxyClass();
+ InitRemotingProxyClass();
+ InitIdentityClass();
+ InitServerIdentityClass();
+ InitMarshalByRefObjectClass();
+ InitRemotingServicesClass();
+ InitObjectClass();
+ InitLeaseClass();
+
+ // ********* NOTE ************
+ // This must always be the last statement in this block to prevent races
+ //
+ VolatileStore(&s_fRemotingStarted, TRUE);
+ // ********* END NOTE ************
+ }
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::InitActivationServicesClass private
+//
+// Synopsis: Extract the method descriptors and fields of ActivationServices class
+//
+//
+//+----------------------------------------------------------------------------
+VOID CRemotingServices::InitActivationServicesClass()
+{
+ STANDARD_VM_CONTRACT;
+
+ s_pIsCurrentContextOK = MscorlibBinder::GetMethod(METHOD__ACTIVATION_SERVICES__IS_CURRENT_CONTEXT_OK);
+#ifdef FEATURE_COMINTEROP
+ s_pCreateObjectForCom = MscorlibBinder::GetMethod(METHOD__ACTIVATION_SERVICES__CREATE_OBJECT_FOR_COM);
+#endif
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::InitRealProxyClass private
+//
+// Synopsis: Extract the method descriptors and fields of Real Proxy class
+//
+//
+//+----------------------------------------------------------------------------
+VOID CRemotingServices::InitRealProxyClass()
+{
+ STANDARD_VM_CONTRACT;
+
+ // Now store the methoddesc of the PrivateInvoke method on the RealProxy class
+ s_pRPPrivateInvoke = MscorlibBinder::GetMethod(METHOD__REAL_PROXY__PRIVATE_INVOKE);
+
+ // Now find the offset to the _identity field inside the
+ // RealProxy class
+ s_dwIdOffset = RealProxyObject::GetOffsetOfIdentity() - Object::GetOffsetOfFirstField();
+
+ s_dwServerOffsetInRealProxy = RealProxyObject::GetOffsetOfServerObject() - Object::GetOffsetOfFirstField();
+
+ s_dwSrvIdentityOffsetInRealProxy = RealProxyObject::GetOffsetOfServerIdentity() - Object::GetOffsetOfFirstField();
+
+ return;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::InitRemotingProxyClass private
+//
+// Synopsis: Extract the method descriptors and fields of RemotingProxy class
+//
+//
+//+----------------------------------------------------------------------------
+VOID CRemotingServices::InitRemotingProxyClass()
+{
+ STANDARD_VM_CONTRACT;
+
+ s_pRPInvokeStatic = MscorlibBinder::GetMethod(METHOD__REMOTING_PROXY__INVOKE);
+
+ // Note: We cannot do this inside TPMethodTable::InitializeFields ..
+ // that causes recursions if in some situation only the latter is called
+ // If you do this you will see Asserts when running any process under CorDbg
+ // This is because jitting of NV methods on MBR objects calls
+ // InitializeFields and when actually doing that we should not need to
+ // JIT another NV method on some MBR object.
+ CTPMethodTable::s_pRemotingProxyClass = MscorlibBinder::GetClass(CLASS__REMOTING_PROXY);
+ _ASSERTE(CTPMethodTable::s_pRemotingProxyClass);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::InitServerIdentityClass private
+//
+// Synopsis: Extract the method descriptors and fields of ServerIdentity class
+//
+//
+//+----------------------------------------------------------------------------
+VOID CRemotingServices::InitServerIdentityClass()
+{
+ STANDARD_VM_CONTRACT;
+
+ s_pServerIdentityClass = MscorlibBinder::GetClass(CLASS__SERVER_IDENTITY);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::InitIdentityClass private
+//
+// Synopsis: Extract the method descriptors and fields of Identity class
+//
+//
+//+----------------------------------------------------------------------------
+VOID CRemotingServices::InitIdentityClass()
+{
+ STANDARD_VM_CONTRACT;
+
+ s_dwTPOrObjOffsetInIdentity = MscorlibBinder::GetFieldOffset(FIELD__IDENTITY__TP_OR_OBJECT);
+
+ s_dwLeaseOffsetInIdentity = MscorlibBinder::GetFieldOffset(FIELD__IDENTITY__LEASE);
+
+ s_dwURIOffsetInIdentity = MscorlibBinder::GetFieldOffset(FIELD__IDENTITY__OBJURI);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::InitMarshalByRefObjectClass private
+//
+// Synopsis: Extract the method descriptors and fields of MarshalByRefObject class
+//
+//
+//+----------------------------------------------------------------------------
+VOID CRemotingServices::InitMarshalByRefObjectClass()
+{
+ STANDARD_VM_CONTRACT;
+
+ s_pMarshalByRefObjectClass = MscorlibBinder::GetClass(CLASS__MARSHAL_BY_REF_OBJECT);
+ s_dwMBRIDOffset = MarshalByRefObjectBaseObject::GetOffsetOfServerIdentity() - Object::GetOffsetOfFirstField();
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::InitRemotingServicesClass private
+//
+// Synopsis: Extract the method descriptors and fields of RemotingServices class
+//
+//
+//+----------------------------------------------------------------------------
+VOID CRemotingServices::InitRemotingServicesClass()
+{
+ STANDARD_VM_CONTRACT;
+
+ s_pCheckCast = MscorlibBinder::GetMethod(METHOD__REMOTING_SERVICES__CHECK_CAST);
+
+ // Need these to call wrap/unwrap from the VM (message.cpp).
+ // Also used by JIT helpers to wrap/unwrap
+ s_pWrapMethodDesc = MscorlibBinder::GetMethod(METHOD__REMOTING_SERVICES__WRAP);
+ s_pProxyForDomainDesc = MscorlibBinder::GetMethod(METHOD__REMOTING_SERVICES__CREATE_PROXY_FOR_DOMAIN);
+ s_pServerContextForProxyDesc = MscorlibBinder::GetMethod(METHOD__REMOTING_SERVICES__GET_SERVER_CONTEXT_FOR_PROXY);
+ s_pServerDomainIdForProxyDesc = MscorlibBinder::GetMethod(METHOD__REMOTING_SERVICES__GET_SERVER_DOMAIN_ID_FOR_PROXY);
+ s_pGetTypeDesc = MscorlibBinder::GetMethod(METHOD__REMOTING_SERVICES__GET_TYPE);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::InitObjectClass private
+//
+// Synopsis: Extract the method descriptors and fields of Object class
+//
+//
+//+----------------------------------------------------------------------------
+VOID CRemotingServices::InitObjectClass()
+{
+ STANDARD_VM_CONTRACT;
+
+ s_pFieldSetterDesc = MscorlibBinder::GetMethod(METHOD__OBJECT__FIELD_SETTER);
+ s_pFieldGetterDesc = MscorlibBinder::GetMethod(METHOD__OBJECT__FIELD_GETTER);
+ s_pObjectGetTypeDesc = MscorlibBinder::GetMethod(METHOD__OBJECT__GET_TYPE);
+}
+
+VOID CRemotingServices::InitLeaseClass()
+{
+ STANDARD_VM_CONTRACT;
+
+ s_pRenewLeaseOnCallDesc = MscorlibBinder::GetMethod(METHOD__LEASE__RENEW_ON_CALL);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::RequiresManagedActivation private
+//
+// Synopsis: Determine if a config file has been parsed or if there
+// are any attributes on the class that would require us
+// to go into the managed activation codepath.
+//
+//
+// Note: Called by CreateProxyOrObject (JIT_NewCrossContext)
+//
+//+----------------------------------------------------------------------------
+ManagedActivationType __stdcall CRemotingServices::RequiresManagedActivation(TypeHandle ty)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(!ty.IsNull());
+ }
+ CONTRACTL_END;
+
+ MethodTable* pMT = ty.GetMethodTable();
+
+ PREFIX_ASSUME(pMT != NULL);
+ if (!pMT->MayRequireManagedActivation())
+ return NoManagedActivation;
+
+#ifdef _DEBUG
+
+ ManagedActivationType bManaged = NoManagedActivation;
+ if (pMT->IsRemotingConfigChecked())
+ {
+ // We have done work to figure this out in the past ...
+ // use the cached result
+ bManaged = pMT->RequiresManagedActivation() ? ManagedActivation : NoManagedActivation;
+ }
+ else if (pMT->IsContextful() || pMT->GetClass()->HasRemotingProxyAttribute())
+ {
+ // Contextful and classes that have a remoting proxy attribute
+ // (whether they are MarshalByRef or ContextFul) always take the slow
+ // path of managed activation
+ bManaged = ManagedActivation;
+ }
+ else
+ {
+ // If we have parsed a config file that might have configured
+ // this Type to be activated remotely
+ if (GetAppDomain()->IsRemotingConfigured())
+ {
+ bManaged = ManagedActivation;
+ // We will remember if the activation is actually going
+ // remote based on if the managed call to IsContextOK returned us
+ // a proxy or not
+ }
+
+#ifdef FEATURE_COMINTEROP
+ else if (pMT->IsComObjectType())
+ {
+ bManaged = ComObjectType;
+ }
+#endif // FEATURE_COMINTEROP
+
+ }
+
+#endif // _DEBUG
+
+ if (pMT->RequiresManagedActivation())
+ {
+ // Contextful and classes that have a remoting proxy attribute
+ // (whether they are MarshalByRef or ContextFul) always take the slow
+ // path of managed activation
+ _ASSERTE(bManaged == ManagedActivation);
+ return ManagedActivation;
+ }
+
+ ManagedActivationType bMng = NoManagedActivation;
+ if (!pMT->IsRemotingConfigChecked())
+ {
+ g_IBCLogger.LogMethodTableAccess(pMT);
+
+ // If we have parsed a config file that might have configured
+ // this Type to be activated remotely
+ if (GetAppDomain()->IsRemotingConfigured())
+ {
+ bMng = ManagedActivation;
+ // We will remember if the activation is actually going
+ // remote based on if the managed call to IsContextOK returned us
+ // a proxy or not
+ }
+
+#ifdef FEATURE_COMINTEROP
+ else if (pMT->IsComObjectType())
+ {
+ bMng = ComObjectType;
+ }
+#endif // FEATURE_COMINTEROP
+
+ if (bMng == NoManagedActivation)
+ {
+ pMT->TrySetRemotingConfigChecked();
+ }
+ }
+
+ _ASSERTE(bManaged == bMng);
+ return bMng;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::CreateProxyOrObject public
+//
+// Synopsis: Determine if the current context is appropriate
+// for activation. If the current context is OK then it creates
+// an object else it creates a proxy.
+//
+//
+// Note: Called by JIT_NewCrossContext
+//
+//+----------------------------------------------------------------------------
+OBJECTREF CRemotingServices::CreateProxyOrObject(MethodTable* pMT,
+ BOOL fIsCom /*default:FALSE*/, BOOL fIsNewObj /*default:FALSE*/)
+ /* fIsCom == Did we come here through CoCreateInstance */
+ /* fIsNewObj == Did we come here through Jit_NewCrossContext (newObj) */
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(!pMT->IsTransparentProxy());
+
+ // By the time we reach here, we have already checked that the class may require
+ // managed activation. This check is made either through the JIT_NewCrossContext helper
+ // or Activator.CreateInstance codepath.
+ PRECONDITION(pMT->MayRequireManagedActivation());
+ }
+ CONTRACTL_END;
+
+ // Ensure remoting has been started.
+ EnsureRemotingStarted();
+
+ // Get the address of IsCurrentContextOK in managed code
+ MethodDesc* pTargetMD = NULL;
+ Object *pServer = NULL;
+
+#ifdef FEATURE_COMINTEROP
+ if(fIsCom)
+ {
+ pTargetMD = CRemotingServices::MDofCreateObjectForCom();
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ pTargetMD = CRemotingServices::MDofIsCurrentContextOK();
+ }
+
+ // Arrays are not created by JIT_NewCrossContext
+ _ASSERTE(!pMT->IsArray());
+
+ // Get the type seen by reflection
+ REFLECTCLASSBASEREF reflectType = (REFLECTCLASSBASEREF) pMT->GetManagedClassObject();
+ LPVOID pvType = NULL;
+ *(REFLECTCLASSBASEREF *)&pvType = reflectType;
+
+ // This will return either an uninitialized object or a proxy
+ pServer = (Object *)CTPMethodTable::CallTarget(pTargetMD, pvType, NULL, (LPVOID)(size_t)(fIsNewObj?1:0));
+
+ if (!pMT->IsContextful() && !pMT->IsComObjectType())
+ {
+ // Cache the result of the activation attempt ...
+ // if a strictly MBR class is not configured for remote
+ // activation we will not go
+ // through this slow path next time!
+ // (see RequiresManagedActivation)
+ if (IsTransparentProxy(pServer))
+ {
+ // Set the flag that this class is remote activate
+ // which means activation will go to managed code.
+ pMT->SetRequiresManagedActivation();
+ }
+ else
+ {
+ // Set only the flag that no managed checks are required
+ // for this class next time.
+ pMT->SetRemotingConfigChecked();
+ }
+ }
+
+ LOG((LF_REMOTING, LL_INFO1000, "CreateProxyOrObject returning 0x%p\n", pServer));
+ if (pMT->IsContextful())
+ {
+ COUNTER_ONLY(GetPerfCounters().m_Context.cObjAlloc++);
+ }
+ return ObjectToOBJECTREF(pServer);
+}
+
+
+#ifndef HAS_REMOTING_PRECODE
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::GetStubForNonVirtualMethod public
+//
+// Synopsis: Get a stub for a non virtual method.
+//
+//
+//+----------------------------------------------------------------------------
+Stub* CRemotingServices::GetStubForNonVirtualMethod(MethodDesc* pMD, LPVOID pvAddrOfCode, Stub* pInnerStub)
+{
+ CONTRACT (Stub*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(CheckPointer(pvAddrOfCode));
+ PRECONDITION(CheckPointer(pInnerStub, NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ CPUSTUBLINKER sl;
+ Stub* pStub = CTPMethodTable::CreateStubForNonVirtualMethod(pMD, &sl, pvAddrOfCode, pInnerStub);
+
+ RETURN pStub;
+}
+#endif // HAS_REMOTING_PRECODE
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::GetNonVirtualEntryPointForVirtualMethod public
+//
+// Synopsis: Get a thunk for a non-virtual call to a virtual method.
+// Virtual methods do not normally get thunked in the vtable. This
+// is because virtual calls use the object's vtable, and proxied objects
+// would use the proxy's vtable. Hence local object (which would
+// have the real vtable) can make virtual calls without going through
+// the thunk.
+// However, if the virtual function is called non-virtually, we have
+// a problem (since this would bypass the proxy's vtable). Since this
+// is not a common case, we fix it by using a stub in such cases.
+//
+//
+//+----------------------------------------------------------------------------
+PCODE CRemotingServices::GetNonVirtualEntryPointForVirtualMethod(MethodDesc* pMD)
+{
+ CONTRACT (PCODE)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(pMD->IsRemotingInterceptedViaVirtualDispatch());
+ POSTCONDITION(RETVAL != NULL);
+ }
+ CONTRACT_END;
+
+#ifdef HAS_REMOTING_PRECODE
+ RETURN pMD->GetLoaderAllocator()->GetFuncPtrStubs()->GetFuncPtrStub(pMD, PRECODE_REMOTING);
+#else
+ GCX_PREEMP();
+ RETURN *CTPMethodTable::GetOrCreateNonVirtualSlotForVirtualMethod(pMD);
+#endif
+}
+
+#ifndef HAS_REMOTING_PRECODE
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::DestroyThunk public
+//
+// Synopsis: Destroy the thunk for the non virtual method.
+//
+//
+//+----------------------------------------------------------------------------
+void CRemotingServices::DestroyThunk(MethodDesc* pMD)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Delegate to a helper routine
+ CTPMethodTable::DestroyThunk(pMD);
+}
+#endif // HAS_REMOTING_PRECODE
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::GetDispatchInterfaceHelper public
+//
+// Synopsis: Returns helper for dispatching interface call into the remoting system
+// with exact MethodDesc. Used for remoting of calls on generic interfaces.
+// The returned helper has MethodDesc calling convention
+//+----------------------------------------------------------------------------
+PCODE CRemotingServices::GetDispatchInterfaceHelper(MethodDesc* pMD)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return GetEEFuncEntryPoint(CRemotingServices__DispatchInterfaceCall);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::CheckCast public
+//
+// Synopsis: Checks either
+// (1) If the object type supports the given interface OR
+// (2) If the given type is present in the hierarchy of the
+// object type
+//
+//+----------------------------------------------------------------------------
+BOOL CRemotingServices::CheckCast(OBJECTREF orTP, TypeHandle objTy, TypeHandle ty)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(orTP != NULL);
+ PRECONDITION(!objTy.IsNull());
+ PRECONDITION(!ty.IsNull());
+
+ // Object class can never be an interface. We use a separate cached
+ // entry for storing interfaces that the proxy supports.
+ PRECONDITION(!objTy.IsInterface());
+ }
+ CONTRACTL_END;
+
+ // Early out if someone's trying to cast us to a type desc (such as a byref,
+ // array or function pointer).
+ if (ty.IsTypeDesc())
+ return FALSE;
+
+ BOOL fCastOK = FALSE;
+
+ // (1) We are trying to cast to an interface
+ if (ty.IsInterface())
+ {
+ // Do a quick check for interface cast by comparing it against the
+ // cached entry
+ MethodTable *pItfMT = ((TRANSPARENTPROXYREF)orTP)->GetInterfaceMethodTable();
+ if (NULL != pItfMT)
+ {
+ if(pItfMT == ty.GetMethodTable())
+ fCastOK = TRUE;
+ else
+ fCastOK = pItfMT->CanCastToInterface(ty.GetMethodTable());
+ }
+
+ if(!fCastOK)
+ fCastOK = objTy.GetMethodTable()->CanCastToInterface(ty.GetMethodTable());
+ }
+ // (2) Everything else...
+ else
+ {
+ // Walk up the class hierarchy and find a matching class
+ while (ty != objTy)
+ {
+ if (objTy.IsNull())
+ {
+ // Oh-oh, the cast did not succeed. Maybe we have to refine
+ // the proxy to match the clients view
+ break;
+ }
+
+ // Continue searching
+ objTy = objTy.GetParent();
+ }
+
+ if(objTy == ty)
+ fCastOK = TRUE;
+ }
+
+ return fCastOK;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::CheckCast public
+//
+// Synopsis: Refine the type hierarchy that the proxy represents to match
+// the client view. If the client is trying to cast the proxy
+// to a type not supported by the server object then we
+// return NULL
+//
+//
+//+----------------------------------------------------------------------------
+BOOL CRemotingServices::CheckCast(OBJECTREF orTP, TypeHandle ty)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(orTP != NULL);
+ PRECONDITION(!ty.IsNull());
+ }
+ CONTRACTL_END;
+
+ BOOL fCastOK = FALSE;
+
+ GCPROTECT_BEGIN(orTP);
+
+ // Make sure the type being cast to has been restored.
+ ty.CheckRestore();
+
+ MethodTable *pMT = orTP->GetMethodTable();
+
+ // Make sure that we have a transparent proxy
+ _ASSERTE(pMT->IsTransparentProxy());
+
+ pMT = orTP->GetTrueMethodTable();
+
+ // Do a cast check without taking a lock
+ fCastOK = CheckCast(orTP, TypeHandle(pMT), ty);
+
+ if (!fCastOK && !ty.IsTypeDesc())
+ {
+ // We reach here only if any of the types in the current type hierarchy
+ // represented by the proxy does not match the given type.
+ // Call a helper routine in managed RemotingServices to find out
+ // whether the server object supports the given type
+ MethodDesc* pTargetMD = MDofCheckCast();
+ fCastOK = CTPMethodTable::CheckCast(pTargetMD, (TRANSPARENTPROXYREF)orTP, ty);
+ }
+
+ if (fCastOK)
+ {
+ // Do the type equivalence tests
+ CRealProxy::UpdateOptFlags(orTP);
+ }
+
+ GCPROTECT_END();
+
+ LOG((LF_REMOTING, LL_INFO100, "CheckCast returning %s for object 0x%x and class 0x%x \n", (fCastOK ? "TRUE" : "FALSE")));
+
+ return (fCastOK);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::FieldAccessor public
+//
+// Synopsis: Sets/Gets the value of the field given an instance or a proxy
+//
+//+----------------------------------------------------------------------------
+void CRemotingServices::FieldAccessor(FieldDesc* pFD, OBJECTREF o, LPVOID pVal, BOOL fIsGetter)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pFD));
+ PRECONDITION(o != NULL);
+ PRECONDITION(CheckPointer(pVal, NULL_OK));
+ PRECONDITION(o->IsTransparentProxy() || o->GetMethodTable()->IsMarshaledByRef());
+ }
+ CONTRACTL_END;
+
+ MethodTable *pMT = o->GetMethodTable();
+ TypeHandle fldClass;
+ TypeHandle thRealObjectType;
+
+ GCPROTECT_BEGIN(o);
+ GCPROTECT_BEGININTERIOR(pVal);
+
+ // If the field descriptor type is not exact (i.e. it's a representative
+ // descriptor for a generic field) then we need to be more careful
+ // determining the properties of the field.
+ if (pFD->IsSharedByGenericInstantiations())
+ {
+ // We need to resolve the field type in the context of the actual object
+ // it belongs to. If we've been handed a proxy we have to go grab the
+ // proxied type for this to work.
+ thRealObjectType = o->GetTrueTypeHandle();
+
+ // Evaluate the field signature in the type context of the parent object.
+ MetaSig sig(pFD, thRealObjectType);
+ sig.NextArg();
+ fldClass = sig.GetLastTypeHandleThrowing();
+ }
+ else
+ {
+ fldClass = pFD->GetFieldTypeHandleThrowing();
+ }
+
+ GCPROTECT_END();
+ GCPROTECT_END();
+
+ CorElementType fieldType = fldClass.GetSignatureCorElementType();
+ UINT cbSize = GetSizeForCorElementType(fieldType);
+ BOOL fIsGCRef = CorTypeInfo::IsObjRef(fieldType);
+ BOOL fIsByValue = fieldType == ELEMENT_TYPE_VALUETYPE;
+
+ if(pMT->IsMarshaledByRef())
+ {
+ GCX_FORBID();
+
+ _ASSERTE(!o->IsTransparentProxy());
+
+ // This is a reference to a real object. Get/Set the field value
+ // and return
+ LPVOID pFieldAddress = pFD->GetAddress((LPVOID)OBJECTREFToObject(o));
+ LPVOID pDest = (fIsGetter ? pVal : pFieldAddress);
+ LPVOID pSrc = (fIsGetter ? pFieldAddress : pVal);
+ if(fIsGCRef && !fIsGetter)
+ {
+ SetObjectReference((OBJECTREF*)pDest, ObjectToOBJECTREF(*(Object **)pSrc), o->GetAppDomain());
+ }
+ else if(fIsByValue)
+ {
+ CopyValueClass(pDest, pSrc, fldClass.AsMethodTable(), o->GetAppDomain());
+ }
+ else
+ {
+ CopyDestToSrc(pDest, pSrc, cbSize);
+ }
+ }
+ else
+ {
+ // Call the managed code to start the field access call
+ CallFieldAccessor(pFD, o, pVal, fIsGetter, fIsByValue, fIsGCRef, thRealObjectType, fldClass, fieldType, cbSize);
+ }
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::CopyDestToSrc private
+//
+// Synopsis: Copies the specified number of bytes from the src to dest
+//
+//
+//+----------------------------------------------------------------------------
+VOID CRemotingServices::CopyDestToSrc(LPVOID pDest, LPVOID pSrc, UINT cbSize)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pDest));
+ PRECONDITION(CheckPointer(pSrc));
+ }
+ CONTRACTL_END;
+
+ switch (cbSize)
+ {
+ case 1:
+ VolatileStore((INT8*)pDest, *(INT8*)pSrc);
+ break;
+
+ case 2:
+ VolatileStore((INT16*)pDest, *(INT16*)pSrc);
+ break;
+
+ case 4:
+ VolatileStore((INT32*)pDest, *(INT32*)pSrc);
+ break;
+
+ case 8:
+ VolatileStore((INT64*)pDest, *(INT64*)pSrc);
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::CallFieldAccessor private
+//
+// Synopsis: Sets up the arguments and calls RealProxy::FieldAccessor
+//
+//
+//+----------------------------------------------------------------------------
+VOID CRemotingServices::CallFieldAccessor(FieldDesc* pFD,
+ OBJECTREF o,
+ VOID* pVal,
+ BOOL fIsGetter,
+ BOOL fIsByValue,
+ BOOL fIsGCRef,
+ TypeHandle ty,
+ TypeHandle fldTy,
+ CorElementType fieldType,
+ UINT cbSize)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pFD));
+ PRECONDITION(o != NULL);
+ PRECONDITION(CheckPointer(pVal));
+ }
+ CONTRACTL_END;
+
+ //****************************WARNING******************************
+ // GC Protect all non-primitive variables
+ //*****************************************************************
+
+ FieldArgs fieldArgs;
+ fieldArgs.obj = NULL;
+ fieldArgs.val = NULL;
+ fieldArgs.typeName = NULL;
+ fieldArgs.fieldName = NULL;
+
+ GCPROTECT_BEGIN(fieldArgs);
+ GCPROTECT_BEGININTERIOR(pVal);
+
+ fieldArgs.obj = o;
+
+ // protect the field value if it is a gc-ref type
+ if(fIsGCRef)
+ fieldArgs.val = ObjectToOBJECTREF(*(Object **)pVal);
+
+
+ // Set up the arguments
+
+ // Argument 1: String typeName
+ // Argument 2: String fieldName
+ // Get the type name and field name strings
+ GetTypeAndFieldName(&fieldArgs, pFD, ty);
+
+ // Argument 3: Object val
+ OBJECTREF val = NULL;
+ if(!fIsGetter)
+ {
+ // If we are setting a field value then we create a variant data
+ // structure to hold the field value
+ // Extract the field from the gc protected structure if it is an object
+ // else use the value passed to the function
+ LPVOID pvFieldVal = (fIsGCRef ? (LPVOID)&(fieldArgs.val) : pVal);
+ // <REVISIT_TODO>: This can cause a GC. We need some way to protect the variant
+ // data</REVISIT_TODO>
+ OBJECTREF *lpVal = &val;
+ GCPROTECT_BEGININTERIOR (pvFieldVal);
+ CMessage::GetObjectFromStack(lpVal, &pvFieldVal, fieldType, fldTy, TRUE);
+ GCPROTECT_END ();
+ }
+
+ // Get the method descriptor of the call
+ MethodDesc *pMD = (fIsGetter ? MDofFieldGetter() : MDofFieldSetter());
+
+ // Call the field accessor function
+ //////////////////////////////// GETTER ///////////////////////////////////
+ if(fIsGetter)
+ {
+ // Set up the return value
+ OBJECTREF oRet = NULL;
+
+ GCPROTECT_BEGIN (oRet);
+ CRemotingServices__CallFieldGetter(pMD,
+ (LPVOID)OBJECTREFToObject(fieldArgs.obj),
+ (LPVOID)OBJECTREFToObject(fieldArgs.typeName),
+ (LPVOID)OBJECTREFToObject(fieldArgs.fieldName),
+ (LPVOID)&(oRet));
+
+ // If we are getting a field value then extract the field value
+ // based on the type of the field
+ if(fIsGCRef)
+ {
+ // Do a check cast to ensure that the field type and the
+ // return value are compatible
+ OBJECTREF orRet = oRet;
+ OBJECTREF orSaved = orRet;
+ if(IsTransparentProxy(OBJECTREFToObject(orRet)))
+ {
+ GCPROTECT_BEGIN(orRet);
+
+ if(!CheckCast(orRet, fldTy))
+ COMPlusThrow(kInvalidCastException, W("Arg_ObjObj"));
+
+ orSaved = orRet;
+
+ GCPROTECT_END();
+ }
+
+ *(OBJECTREF *)pVal = orSaved;
+ }
+ else if (fIsByValue)
+ {
+ // Copy from the source to the destination
+ if (oRet != NULL)
+ {
+ fldTy.GetMethodTable()->UnBoxIntoUnchecked(pVal, oRet);
+ }
+ }
+ else
+ {
+ if (oRet != NULL)
+ CopyDestToSrc(pVal, oRet->UnBox(), cbSize);
+ }
+ GCPROTECT_END ();
+ }
+ ///////////////////////// SETTER //////////////////////////////////////////
+ else
+ {
+ CRemotingServices__CallFieldSetter(pMD,
+ (LPVOID)OBJECTREFToObject(fieldArgs.obj),
+ (LPVOID)OBJECTREFToObject(fieldArgs.typeName),
+ (LPVOID)OBJECTREFToObject(fieldArgs.fieldName),
+ (LPVOID)OBJECTREFToObject(val));
+ }
+
+ GCPROTECT_END(); // pVal
+ GCPROTECT_END(); // fieldArgs
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::GetTypeAndFieldName private
+//
+// Synopsis: Get the type name and field name of the
+//
+//
+//+----------------------------------------------------------------------------
+VOID CRemotingServices::GetTypeAndFieldName(FieldArgs *pArgs, FieldDesc *pFD, TypeHandle thEnclosingClass)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pArgs));
+ PRECONDITION(CheckPointer(pFD));
+ }
+ CONTRACTL_END;
+
+ TypeHandle thDeclaringType = !thEnclosingClass.IsNull() ?
+ pFD->GetExactDeclaringType(thEnclosingClass.AsMethodTable()) : pFD->GetEnclosingMethodTable();
+ _ASSERTE(!thDeclaringType.IsNull());
+
+ // Extract the type name and field name string
+ // <REVISIT_TODO>FUTURE: Put this in the reflection data structure cache TarunA 11/26/00</REVISIT_TODO>
+ StackSString ss;
+ TypeString::AppendType(ss, thDeclaringType, TypeString::FormatNamespace | TypeString::FormatFullInst);
+ pArgs->typeName = StringObject::NewString(ss);
+
+ pArgs->fieldName = StringObject::NewString(pFD->GetName());
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::MatchField private
+//
+// Synopsis: Find out whether the given field name is the same as the name
+// of the field descriptor field name.
+//
+//
+//+----------------------------------------------------------------------------
+BOOL CRemotingServices::MatchField(FieldDesc* pCurField, LPCUTF8 szFieldName)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCurField));
+ PRECONDITION(CheckPointer(szFieldName));
+ }
+ CONTRACTL_END;
+
+ // Get the name of the field
+ LPCUTF8 szCurFieldName;
+ if (FAILED(pCurField->GetName_NoThrow(&szCurFieldName)))
+ {
+ return FALSE;
+ }
+
+ return strcmp(szCurFieldName, szFieldName) == 0;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::Wrap public
+//
+// Synopsis: Wrap a contextful object to create a proxy
+// Delegates to a helper method to do the actual work
+//
+//
+//+----------------------------------------------------------------------------
+OBJECTREF CRemotingServices::Wrap(OBJECTREF obj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // Basic sanity check
+ VALIDATEOBJECTREF(obj);
+
+ // ******************* WARNING ********************************************
+ // Do not throw any exceptions or provoke GC without setting up a frame.
+ // At present its the callers responsibility to setup a frame that can
+ // handle exceptions.
+ // ************************************************************************
+ OBJECTREF orProxy = obj;
+ if(obj != NULL && (obj->GetMethodTable()->IsContextful()))
+ {
+ if(!IsTransparentProxy(OBJECTREFToObject(obj)))
+ {
+ // See if we can extract the proxy from the object
+ orProxy = GetProxyFromObject(obj);
+ if(orProxy == NULL)
+ {
+ // ask the remoting services to wrap the object
+ orProxy = CRemotingServices::WrapHelper(obj);
+ }
+ }
+ }
+
+ return orProxy;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::WrapHelper public
+//
+// Synopsis: Wrap an object to return a proxy. This function assumes that
+// a fcall frame is already setup.
+//
+//+----------------------------------------------------------------------------
+OBJECTREF CRemotingServices::WrapHelper(OBJECTREF obj)
+{
+ // Basic sanity check
+ VALIDATEOBJECTREF(obj);
+
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(obj != NULL);
+ PRECONDITION(!IsTransparentProxy(OBJECTREFToObject(obj)));
+ PRECONDITION(obj->GetMethodTable()->IsContextful());
+ }
+ CONTRACTL_END;
+
+
+ // Default return value indicates an error
+ OBJECTREF newobj = NULL;
+ MethodDesc* pTargetMD = NULL;
+
+ // Ensure remoting has been started.
+ EnsureRemotingStarted();
+
+ // Get the address of wrap in managed code
+ pTargetMD = CRemotingServices::MDofWrap();
+
+ // call the managed method to wrap
+ newobj = ObjectToOBJECTREF( (Object *)CTPMethodTable::CallTarget(pTargetMD,
+ (LPVOID)OBJECTREFToObject(obj),
+ NULL));
+
+ return newobj;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::GetProxyFromObject public
+//
+// Synopsis: Extract the proxy from the field in the
+// ContextBoundObject class
+//
+//
+//+----------------------------------------------------------------------------
+OBJECTREF CRemotingServices::GetProxyFromObject(OBJECTREF obj)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(obj != NULL);
+ }
+ CONTRACTL_END;
+
+ // Basic sanity check
+ VALIDATEOBJECTREF(obj);
+
+ // We can derive a proxy for contextful types only.
+ _ASSERTE(obj->GetMethodTable()->IsContextful());
+
+ OBJECTREF srvID = (OBJECTREF)(Object*)obj->GetPtrOffset(s_dwMBRIDOffset);
+ OBJECTREF orProxy = NULL;
+
+ if (srvID != NULL)
+ orProxy = (OBJECTREF)(Object*)srvID->GetPtrOffset(s_dwTPOrObjOffsetInIdentity);
+
+ // This should either be null or a proxy type
+ _ASSERTE((orProxy == NULL) || IsTransparentProxy(OBJECTREFToObject(orProxy)));
+
+ return orProxy;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::IsProxyToRemoteObject public
+//
+// Synopsis: Check if the proxy is to a remote object
+// (1) TRUE : if object is non local (ie outside this PROCESS) otherwise
+// (2) FALSE
+//
+//+----------------------------------------------------------------------------
+BOOL CRemotingServices::IsProxyToRemoteObject(OBJECTREF obj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(obj != NULL);
+ }
+ CONTRACTL_END;
+
+ // Basic sanity check
+ VALIDATEOBJECTREF(obj);
+
+ // If remoting is not started, for now let us just return FALSE
+ if(!s_fRemotingStarted)
+ return FALSE;
+
+ if(!obj->IsTransparentProxy())
+ return FALSE;
+
+ // so it is a transparent proxy
+ AppDomain *pDomain = GetServerDomainForProxy(obj);
+ if(pDomain != NULL)
+ return TRUE;
+
+ return FALSE;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::GetObjectFromProxy public
+//
+// Synopsis: Extract the object given a proxy.
+//
+//
+//+----------------------------------------------------------------------------
+OBJECTREF CRemotingServices::GetObjectFromProxy(OBJECTREF obj)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(obj != NULL);
+ PRECONDITION(s_fRemotingStarted);
+ PRECONDITION(IsTransparentProxy(OBJECTREFToObject(obj)));
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // Basic sanity check
+ VALIDATEOBJECTREF(obj);
+
+ OBJECTREF oref = NULL;
+ if (CTPMethodTable__GenericCheckForContextMatch(OBJECTREFToObject(obj)))
+ {
+ OBJECTREF objRef = ObjectToOBJECTREF(GetRealProxy(OBJECTREFToObject(obj)));
+ oref = (OBJECTREF)(Object*)objRef->GetPtrOffset(s_dwServerOffsetInRealProxy);
+ if (oref != NULL)
+ obj = oref;
+ }
+
+ return obj;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::GetServerIdentityFromProxy private
+//
+// Synopsis: Gets the server identity (if one exists) from a proxy
+//
+//
+//
+//
+//+----------------------------------------------------------------------------
+OBJECTREF CRemotingServices::GetServerIdentityFromProxy(OBJECTREF obj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(obj != NULL);
+ PRECONDITION(IsTransparentProxy(OBJECTREFToObject(obj)));
+ }
+ CONTRACTL_END;
+
+
+ // Extract the real proxy underlying the transparent proxy
+ OBJECTREF pObj = ObjectToOBJECTREF(GetRealProxy(OBJECTREFToObject(obj)));
+
+ OBJECTREF id = NULL;
+
+ // Extract the identity object
+ pObj = (OBJECTREF)(Object*)pObj->GetPtrOffset(s_dwIdOffset);
+
+ // Extract the _identity from the real proxy only if it is an instance of
+ // remoting proxy
+ if((pObj != NULL) && IsInstanceOfServerIdentity(pObj->GetMethodTable()))
+ id = pObj;
+
+ return id;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::GetServerDomainForProxy public
+//
+// Synopsis: Returns the AppDomain corresponding to the server
+// if the proxy and the server are in the same process.
+//
+//
+//+----------------------------------------------------------------------------
+AppDomain *CRemotingServices::GetServerDomainForProxy(OBJECTREF proxy)
+{
+ CONTRACT (AppDomain*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(proxy != NULL);
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ // call the managed method
+ Context *pContext = (Context *)GetServerContextForProxy(proxy);
+ if (pContext)
+ RETURN pContext->GetDomain();
+ else
+ RETURN NULL;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::GetServerDomainIdForProxy public
+//
+// Synopsis: Returns the AppDomain ID corresponding to the server
+// if the proxy and the server are in the same process.
+// Returns 0 if it cannot determine.
+//
+//
+//+----------------------------------------------------------------------------
+int CRemotingServices::GetServerDomainIdForProxy(OBJECTREF proxy)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(proxy != NULL);
+ PRECONDITION(IsTransparentProxy(OBJECTREFToObject(proxy)));
+ }
+ CONTRACTL_END;
+
+ // Get the address of GetDomainIdForProxy in managed code
+ MethodDesc* pTargetMD = CRemotingServices::MDofGetServerDomainIdForProxy();
+
+ // This will just read the appDomain ID from the marshaled data
+ // for the proxy. It returns 0 if the proxy is to a server in another
+ // process. It may also return 0 if it cannot determine the server
+ // domain ID (eg. for Well Known Object proxies).
+
+ // call the managed method
+ // <REVISIT_TODO>This cast to Int32 actually causes a potential loss
+ // of data.</REVISIT_TODO>
+ return (int)(INT_PTR)CTPMethodTable::CallTarget(
+ pTargetMD,
+ (LPVOID)OBJECTREFToObject(proxy),
+ NULL);
+}
+
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::GetServerContextForProxy public
+//
+// Synopsis: Returns the AppDomain corresponding to the server
+// if the proxy and the server are in the same process.
+//
+//
+//+----------------------------------------------------------------------------
+Context *CRemotingServices::GetServerContextForProxy(OBJECTREF proxy)
+{
+ CONTRACT (Context*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(proxy != NULL);
+ PRECONDITION(IsTransparentProxy(OBJECTREFToObject(proxy)));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ // Get the address of GetAppDomainForProxy in managed code
+ MethodDesc* pTargetMD = CRemotingServices::MDofGetServerContextForProxy();
+
+ // This will return the correct VM Context object for the server if
+ // the proxy is true cross domain proxy to a server in another domain
+ // in the same process. The managed method will Assert if called on a proxy
+ // which is either half-built or does not have an ObjRef ... which may
+ // happen for eg. if the proxy and the server are in the same appdomain.
+
+ // we return NULL if the server object for the proxy is in another
+ // process or if the appDomain for the server is invalid or if we cannot
+ // determine the context (eg. well known object proxies).
+
+ // call the managed method
+ RETURN (Context *)CTPMethodTable::CallTarget(
+ pTargetMD,
+ (LPVOID)OBJECTREFToObject(proxy),
+ NULL);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::CreateProxyForDomain public
+//
+// Synopsis: Create a proxy for the app domain object by calling marshal
+// inside the newly created domain and unmarshaling in the old
+// domain
+//
+//
+//+----------------------------------------------------------------------------
+OBJECTREF CRemotingServices::CreateProxyForDomain(AppDomain* pDomain)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pDomain));
+ }
+ CONTRACTL_END;
+
+ // Ensure remoting has been started.
+ EnsureRemotingStarted();
+
+ MethodDesc* pTargetMD = MDOfCreateProxyForDomain();
+
+ // Call the managed method which will marshal and unmarshal the
+ // appdomain object to create the proxy
+
+ // We pass the ContextID of the default context of the new appDomain
+ // object. This helps the boot-strapping! (i.e. entering the new domain
+ // to marshal itself out).
+
+ Object *proxy = (Object *)CTPMethodTable::CallTarget(
+ pTargetMD,
+ (LPVOID)(DWORD_PTR)pDomain->GetId().m_dwId,
+ (LPVOID)pDomain->GetDefaultContext());
+ return ObjectToOBJECTREF(proxy);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::GetClass public
+//
+// Synopsis: Extract the true class of the object whose proxy is given.
+//
+//
+//
+//+----------------------------------------------------------------------------
+REFLECTCLASSBASEREF CRemotingServices::GetClass(OBJECTREF pThis)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(pThis != NULL);
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refClass = NULL;
+ MethodTable *pMT = NULL;
+
+ GCPROTECT_BEGIN(pThis);
+
+ // For proxies to objects in the same appdomain, we always know the
+ // correct type
+ if(GetServerIdentityFromProxy(pThis) != NULL)
+ {
+ pMT = pThis->GetTrueMethodTable();
+ }
+ else
+ {
+ // For everything else either we have refined the proxy to its correct type
+ // or we have to consult the objref to get the true type
+
+ MethodDesc* pTargetMD = CRemotingServices::MDofGetType();
+
+ refClass = (REFLECTCLASSBASEREF)(ObjectToOBJECTREF((Object *)CTPMethodTable::CallTarget(pTargetMD,
+ (LPVOID)OBJECTREFToObject(pThis), NULL)));
+
+ if(refClass == NULL)
+ {
+ // There was no objref associated with the proxy or it is a proxy
+ // that we do not understand.
+ // In this case, we return the class that is stored in the proxy
+ pMT = pThis->GetTrueMethodTable();
+ }
+
+ _ASSERTE(refClass != NULL || pMT != NULL);
+
+ // Refine the proxy to the class just retrieved
+ if(refClass != NULL)
+ {
+ CTPMethodTable::RefineProxy((TRANSPARENTPROXYREF)pThis, refClass->GetType());
+ }
+ }
+
+ if (refClass == NULL)
+ {
+ PREFIX_ASSUME(pMT != NULL);
+ refClass = (REFLECTCLASSBASEREF)pMT->GetManagedClassObject();
+ }
+
+ GCPROTECT_END();
+
+ _ASSERTE(refClass != NULL);
+ return refClass;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRealProxy::SetStubData public
+//
+// Synopsis: Set the stub data in the transparent proxy
+//
+//+----------------------------------------------------------------------------
+FCIMPL2(VOID, CRealProxy::SetStubData, Object* orRPUNSAFE, Object* orStubDataUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ BOOL fThrow = FALSE;
+ REALPROXYREF orRP = (REALPROXYREF)ObjectToOBJECTREF(orRPUNSAFE);
+ OBJECTREF orStubData = ObjectToOBJECTREF(orStubDataUNSAFE);
+
+ if (orRP != NULL && orStubData != NULL)
+ {
+ TRANSPARENTPROXYREF orTP = orRP->GetTransparentProxy();
+ if (orTP != NULL)
+ {
+ orTP->SetStubData(orStubData);
+ }
+ else
+ {
+ fThrow = TRUE;
+ }
+ }
+ else
+ {
+ fThrow = TRUE;
+ }
+
+ if(fThrow)
+ FCThrowVoid(kArgumentNullException);
+}
+FCIMPLEND
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRealProxy::GetStubData public
+//
+// Synopsis: Get the stub data in the transparent proxy
+//
+//+----------------------------------------------------------------------------
+FCIMPL1(Object*, CRealProxy::GetStubData, Object* orRPUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ BOOL fThrow = FALSE;
+ REALPROXYREF orRP = (REALPROXYREF)ObjectToOBJECTREF(orRPUNSAFE);
+ OBJECTREF orRet = NULL;
+
+ if (orRP != NULL)
+ {
+ TRANSPARENTPROXYREF orTP = orRP->GetTransparentProxy();
+ if (orTP != NULL)
+ orRet = orTP->GetStubData();
+ else
+ fThrow = TRUE;
+ }
+ else
+ {
+ fThrow = TRUE;
+ }
+
+ if(fThrow)
+ FCThrow(kArgumentNullException);
+
+ return OBJECTREFToObject(orRet);
+}
+FCIMPLEND
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRealProxy::GetDefaultStub public
+//
+// Synopsis: Get the default stub implemented by us which matches contexts
+//
+//+----------------------------------------------------------------------------
+FCIMPL0(LPVOID, CRealProxy::GetDefaultStub)
+{
+ FCALL_CONTRACT;
+
+ return (LPVOID)CRemotingServices__CheckForContextMatch;
+}
+FCIMPLEND
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRealProxy::GetStub public
+//
+// Synopsis: Get the stub pointer in the transparent proxy
+//
+//+----------------------------------------------------------------------------
+FCIMPL1(LPVOID, CRealProxy::GetStub, Object* orRPUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(orRPUNSAFE));
+ }
+ CONTRACTL_END;
+
+ REALPROXYREF orRP = (REALPROXYREF)ObjectToOBJECTREF(orRPUNSAFE);
+ TRANSPARENTPROXYREF orTP = orRP->GetTransparentProxy();
+
+ return orTP->GetStub();
+}
+FCIMPLEND
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRealProxy::GetProxiedType public
+//
+// Synopsis: Get the type that is represented by the transparent proxy
+//
+//+----------------------------------------------------------------------------
+FCIMPL1(Object*, CRealProxy::GetProxiedType, Object* orRPUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ REFLECTCLASSBASEREF refClass = NULL;
+ REALPROXYREF orRP = (REALPROXYREF)ObjectToOBJECTREF(orRPUNSAFE);
+ HELPER_METHOD_FRAME_BEGIN_RET_1(orRP);
+
+ TRANSPARENTPROXYREF orTP = orRP->GetTransparentProxy();
+
+ refClass = CRemotingServices::GetClass(orTP);
+ _ASSERTE(refClass != NULL);
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(refClass);
+}
+FCIMPLEND
+
+//+----------------------------------------------------------------------------
+//
+// Method: CTPMethodTable::Initialize public
+//
+// Synopsis: Initialized data structures needed for managing tranparent
+// proxies
+//
+//+----------------------------------------------------------------------------
+VOID CTPMethodTable::Initialize()
+{
+ STANDARD_VM_CONTRACT;
+
+ s_TPMethodTableCrst.Init(CrstTPMethodTable);
+}
+
+//+----------------------------------------------------------------------------
+
+PCODE CTPMethodTable::GetTPStubEntryPoint()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetEEFuncEntryPoint(TransparentProxyStub);
+}
+
+PCODE CTPMethodTable::GetDelegateStubEntryPoint()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetEEFuncEntryPoint(TransparentProxyStub_CrossContext);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CTPMethodTable::EnsureFieldsInitialized private
+//
+// Synopsis: Initialize the static fields of CTPMethodTable class
+// and the thunk manager classes
+//
+//
+//+----------------------------------------------------------------------------
+void CTPMethodTable::EnsureFieldsInitialized()
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(s_fTPTableFieldsInitialized);
+ }
+ CONTRACT_END;
+
+ if (!s_fTPTableFieldsInitialized)
+ {
+ GCX_PREEMP();
+
+ // Load Tranparent proxy class (do this before we enter the critical section)
+ MethodTable* pTPMT = MscorlibBinder::GetClass(CLASS__TRANSPARENT_PROXY);
+ _ASSERTE(pTPMT->IsTransparentProxy());
+
+ CrstHolder ch(&s_TPMethodTableCrst);
+
+ if(!s_fTPTableFieldsInitialized)
+ {
+ // Obtain size of GCInfo stored above the method table
+ CGCDesc *pGCDesc = CGCDesc::GetCGCDescFromMT(pTPMT);
+ BYTE *pGCTop = (BYTE *) pGCDesc->GetLowestSeries();
+ s_dwGCInfoBytes = (DWORD)(((BYTE *) pTPMT) - pGCTop);
+ _ASSERTE((s_dwGCInfoBytes & 3) == 0);
+
+ // Obtain the number of bytes to be copied for creating the TP
+ // method tables containing thunks
+ _ASSERTE(((s_dwGCInfoBytes + sizeof(MethodTable)) & (sizeof(PCODE)-1)) == 0);
+ s_dwMTDataSlots = ((s_dwGCInfoBytes + sizeof(MethodTable)) / sizeof(PCODE));
+ _ASSERTE(sizeof(MethodTable) == MethodTable::GetVtableOffset());
+
+ // We rely on the number of interfaces implemented by the
+ // Transparent proxy being 0, so that InterfaceInvoke hints
+ // fail and trap to InnerFailStub which also fails and
+ // in turn traps to FailStubWorker. In FailStubWorker, we
+ // determine the class being proxied and return correct slot.
+ _ASSERTE(pTPMT->GetNumInterfaces() == 0);
+
+ CVirtualThunkMgr::InitVirtualThunkManager();
+
+ // Create the global thunk table and set the cycle between
+ // the transparent proxy class and the global thunk table
+ CreateTPMethodTable(pTPMT);
+
+#ifdef HAS_REMOTING_PRECODE
+ // Activate the remoting precode helper
+ ActivatePrecodeRemotingThunk();
+#endif // HAS_REMOTING_PRECODE
+
+ // NOTE: This must always be the last statement in this block
+ // to prevent races
+ // Load Tranparent proxy class
+ s_fTPTableFieldsInitialized = TRUE;
+ }
+ }
+
+ RETURN;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CTPMethodTable::GetRP public
+//
+// Synopsis: Get the real proxy backing the transparent proxy
+//
+//+----------------------------------------------------------------------------
+REALPROXYREF CTPMethodTable::GetRP(OBJECTREF orTP)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(orTP != NULL);
+ PRECONDITION(orTP->IsTransparentProxy());
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ return (REALPROXYREF)(((TRANSPARENTPROXYREF)orTP)->GetRealProxy());
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CTPMethodTable::GetMethodTableBeingProxied public
+//
+// Synopsis: Get the real type backing the transparent proxy
+//
+//+----------------------------------------------------------------------------
+MethodTable * CTPMethodTable::GetMethodTableBeingProxied(OBJECTREF orTP)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ PRECONDITION(orTP != NULL);
+ PRECONDITION(orTP->IsTransparentProxy());
+ }
+ CONTRACTL_END;
+
+ return ((TRANSPARENTPROXYREF)orTP)->GetMethodTableBeingProxied();
+}
+
+#define PAGE_ROUND_UP(cb) (((cb) + g_SystemInfo.dwAllocationGranularity) & ~(g_SystemInfo.dwAllocationGranularity - 1))
+
+//+----------------------------------------------------------------------------
+//
+// Method: CTPMethodTable::CreateTPMethodTable private
+//
+// Synopsis: (1) Reserves a transparent proxy method table that is large
+// enough to support the largest vtable
+// (2) Commits memory for the GC info of the global thunk table and
+// sets the cycle between the transparent proxy class and the
+// globale thunk table.
+//
+//+----------------------------------------------------------------------------
+
+void CTPMethodTable::CreateTPMethodTable(MethodTable* pTPMT)
+{
+ CONTRACT_VOID {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(s_pThunkTable));
+ } CONTRACT_END;
+
+ // The largest possible vtable size 64K
+ DWORD dwMaxSlots = 64*1024;
+
+ // Allocate virtual memory that is big enough to hold a method table
+ // of the maximum possible size
+ DWORD dwReserveSize = 0;
+ DWORD dwMethodTableReserveSize = (DWORD)(s_dwMTDataSlots * sizeof(PCODE));
+ s_dwReservedTPIndirectionSlotSize = MethodTable::GetNumVtableIndirections(dwMaxSlots) * sizeof(PTR_PCODE);
+ dwMethodTableReserveSize += s_dwReservedTPIndirectionSlotSize;
+
+ dwMethodTableReserveSize += (DWORD)(dwMaxSlots * sizeof(PCODE));
+ dwReserveSize = PAGE_ROUND_UP(dwMethodTableReserveSize);
+
+ void *pAlloc = ::ClrVirtualAlloc(0, dwReserveSize, MEM_RESERVE | MEM_TOP_DOWN, PAGE_EXECUTE_READWRITE);
+
+ if (pAlloc)
+ {
+ BOOL bFailed = TRUE;
+
+ // Make sure that we have not created the one and only
+ // transparent proxy method table before
+ _ASSERTE(NULL == s_pThunkTable);
+
+ // Commit the required amount of memory
+ DWORD dwCommitSize = 0;
+
+ // MethodTable memory
+ DWORD dwMethodTableCommitSize = (s_dwMTDataSlots) * sizeof(PCODE);
+ if (!ClrSafeInt<DWORD>::addition(0, dwMethodTableCommitSize, dwCommitSize))
+ {
+ COMPlusThrowHR(COR_E_OVERFLOW);
+ }
+
+ if (::ClrVirtualAlloc(pAlloc, dwCommitSize, MEM_COMMIT, PAGE_EXECUTE_READWRITE))
+ {
+ // Copy the fixed portion from the true TP Method Table
+ memcpy(pAlloc,MTToAlloc(pTPMT, s_dwGCInfoBytes), (dwMethodTableCommitSize));
+
+ // Initialize the transparent proxy method table
+ InitThunkTable(0, dwMaxSlots, AllocToMT((BYTE *) pAlloc, s_dwGCInfoBytes));
+
+ // At this point the transparent proxy class points to the
+ // the true TP Method Table and not the transparent
+ // proxy method table. We do not use the true method table
+ // any more. Instead we use the transparent proxy method table
+ // for allocating transparent proxies. So, we have to make the
+ // transparent proxy class point to the one and only transparent
+ // proxy method table
+ pTPMT->GetClass()->SetMethodTableForTransparentProxy(s_pThunkTable);
+
+ // Allocate the slots of the Object class method table because
+ // we can reflect on the __Transparent proxy class even though
+ // we never intend to use remoting.
+ _ASSERTE(NULL != g_pObjectClass);
+ _ASSERTE(0 == GetCommitedTPSlots());
+ if(ExtendCommitedSlots(g_pObjectClass->GetNumMethods()))
+ bFailed = FALSE;
+ }
+ else
+ {
+ ClrVirtualFree(pAlloc, 0, MEM_RELEASE);
+ }
+
+ if(bFailed)
+ DestroyThunkTable();
+ }
+ else {
+ if (pAlloc != NULL)
+ ::ClrVirtualFree(pAlloc, 0, MEM_RELEASE);
+ }
+
+ // Note that the thunk table is set to null on any failure path
+ // via DestroyThunkTable
+ if (!s_pThunkTable)
+ COMPlusThrowOM();
+
+ RETURN;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CTPMethodTable::ExtendCommitedSlots private
+//
+// Synopsis: Extends the commited slots of transparent proxy method table to
+// the desired number
+//
+//+----------------------------------------------------------------------------
+BOOL CTPMethodTable::ExtendCommitedSlots(_In_range_(1,64*1024) DWORD dwSlots)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(return FALSE);
+ PRECONDITION(s_dwCommitedTPSlots <= dwSlots);
+ PRECONDITION(dwSlots <= s_dwReservedTPSlots);
+ PRECONDITION((CVirtualThunks::GetVirtualThunks() == NULL) ||
+ (s_dwCommitedTPSlots == CVirtualThunks::GetVirtualThunks()->_dwCurrentThunk));
+
+ // Either we have initialized everything or we are asked to allocate
+ // some slots during initialization
+ PRECONDITION(s_fTPTableFieldsInitialized || (0 == s_dwCommitedTPSlots));
+ }
+ CONTRACTL_END;
+
+ // Commit memory for TPMethodTable
+ BOOL bAlloc = FALSE;
+ void *pAlloc = MTToAlloc(s_pThunkTable, s_dwGCInfoBytes);
+ ClrSafeInt<DWORD> dwCommitSize;
+ dwCommitSize += s_dwMTDataSlots * sizeof(PCODE);
+ dwCommitSize += MethodTable::GetNumVtableIndirections(dwSlots) * sizeof(PTR_PCODE);
+
+ DWORD dwLastIndirectionSlot = s_pThunkTable->GetIndexOfVtableIndirection(s_pThunkTable->GetNumVirtuals() - 1);
+ DWORD dwSlotsCommitSize = dwSlots * sizeof(PCODE);
+ PCODE *pAllocSlots = (PCODE*)(((BYTE*)s_pThunkTable) + s_dwMTDataSlots * sizeof(PCODE) + s_dwReservedTPIndirectionSlotSize);
+
+ if (dwCommitSize.IsOverflow())
+ {
+ return FALSE; // error condition
+ }
+
+ if (::ClrVirtualAlloc(pAlloc, dwCommitSize.Value(), MEM_COMMIT, PAGE_EXECUTE_READWRITE) &&
+ ::ClrVirtualAlloc(pAllocSlots, dwSlotsCommitSize, MEM_COMMIT, PAGE_EXECUTE_READWRITE))
+ {
+ _ASSERTE(FitsIn<WORD>(dwSlots));
+ s_pThunkTable->SetNumVirtuals((WORD)dwSlots);
+
+ MethodTable::VtableIndirectionSlotIterator it = s_pThunkTable->IterateVtableIndirectionSlotsFrom(dwLastIndirectionSlot);
+ do
+ {
+ it.SetIndirectionSlot(&pAllocSlots[it.GetStartSlot()]);
+ }
+ while (it.Next());
+
+ bAlloc = AllocateThunks(dwSlots, dwCommitSize.Value());
+ }
+
+ return bAlloc;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CTPMethodTable::AllocateThunks private
+//
+// Synopsis: Allocates the desired number of thunks for virtual methods
+//
+//+----------------------------------------------------------------------------
+BOOL CTPMethodTable::AllocateThunks(DWORD dwSlots, DWORD dwCommitSize)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ // Check for existing thunks
+ DWORD dwCommitThunks = 0;
+ DWORD dwAllocThunks = dwSlots;
+ MethodTable *pThunkTable = s_pThunkTable;
+
+ CVirtualThunks* pThunks = CVirtualThunks::GetVirtualThunks();
+ if (pThunks)
+ {
+ // Compute the sizes of memory to be commited and allocated
+ BOOL fCommit;
+ if (dwSlots < pThunks->_dwReservedThunks)
+ {
+ fCommit = TRUE;
+ dwCommitThunks = dwSlots;
+ dwAllocThunks = 0;
+ }
+ else
+ {
+ fCommit = (pThunks->_dwCurrentThunk != pThunks->_dwReservedThunks);
+ dwCommitThunks = pThunks->_dwReservedThunks;
+ dwAllocThunks = dwSlots - pThunks->_dwReservedThunks;
+ }
+
+ // Commit memory if needed
+ if (fCommit)
+ {
+ DWORD dwCommitSizeTmp = (sizeof(CVirtualThunks) - ConstVirtualThunkSize) +
+ ((dwCommitThunks - pThunks->_dwStartThunk) * ConstVirtualThunkSize);
+
+ if (!::ClrVirtualAlloc(pThunks, dwCommitSizeTmp, MEM_COMMIT, PAGE_EXECUTE_READWRITE))
+ return(NULL);
+
+ // Generate thunks that push slot number and jump to TP stub
+ DWORD dwStartSlot = pThunks->_dwStartThunk;
+ DWORD dwCurrentSlot = pThunks->_dwCurrentThunk;
+ while (dwCurrentSlot < dwCommitThunks)
+ {
+ PCODE pCode = CreateThunkForVirtualMethod(dwCurrentSlot, (BYTE *)&pThunks->ThunkCode[dwCurrentSlot-dwStartSlot]);
+ pThunkTable->SetSlot(dwCurrentSlot, pCode);
+ ++dwCurrentSlot;
+ }
+
+ ClrFlushInstructionCache(&pThunks->ThunkCode[pThunks->_dwCurrentThunk-dwStartSlot],
+ (dwCommitThunks-pThunks->_dwCurrentThunk)*ConstVirtualThunkSize);
+
+ s_dwCommitedTPSlots = dwCommitThunks;
+ pThunks->_dwCurrentThunk = dwCommitThunks;
+ }
+ }
+
+ // <REVISIT_TODO>
+ // Check for the avialability of a TP method table that is no longer being
+ // reused </REVISIT_TODO>
+
+ // Allocate memory if necessary
+ if (dwAllocThunks)
+ {
+ DWORD dwReserveSize = ((sizeof(CVirtualThunks) - ConstVirtualThunkSize) +
+ ((dwAllocThunks << 1) * ConstVirtualThunkSize) +
+ g_SystemInfo.dwAllocationGranularity) & ~((size_t) g_SystemInfo.dwAllocationGranularity - 1);
+
+ void *pAlloc = ::ClrVirtualAlloc(0, dwReserveSize,
+ MEM_RESERVE | MEM_TOP_DOWN,
+ PAGE_EXECUTE_READWRITE);
+ if (pAlloc)
+ {
+ // Commit the required amount of memory
+ DWORD dwCommitSizeTmp = (sizeof(CVirtualThunks) - ConstVirtualThunkSize) +
+ (dwAllocThunks * ConstVirtualThunkSize);
+
+ if (::ClrVirtualAlloc(pAlloc, dwCommitSizeTmp, MEM_COMMIT, PAGE_EXECUTE_READWRITE))
+ {
+ ((CVirtualThunks *) pAlloc)->_pNext = pThunks;
+ pThunks = CVirtualThunks::SetVirtualThunks((CVirtualThunks *) pAlloc);
+ pThunks->_dwReservedThunks = (dwReserveSize -
+ (sizeof(CVirtualThunks) - ConstVirtualThunkSize)) /
+ ConstVirtualThunkSize;
+ pThunks->_dwStartThunk = dwCommitThunks;
+ pThunks->_dwCurrentThunk = dwCommitThunks;
+
+ // Generate thunks that push slot number and jump to TP stub
+ DWORD dwStartSlot = pThunks->_dwStartThunk;
+ DWORD dwCurrentSlot = pThunks->_dwCurrentThunk;
+ while (dwCurrentSlot < dwSlots)
+ {
+ PCODE pCode = CreateThunkForVirtualMethod(dwCurrentSlot, (BYTE *)&pThunks->ThunkCode[dwCurrentSlot-dwStartSlot]);
+ pThunkTable->SetSlot(dwCurrentSlot, pCode);
+ ++dwCurrentSlot;
+ }
+
+ ClrFlushInstructionCache(&pThunks->ThunkCode[pThunks->_dwCurrentThunk-dwStartSlot],
+ (dwSlots-pThunks->_dwCurrentThunk)*ConstVirtualThunkSize);
+
+ s_dwCommitedTPSlots = dwSlots;
+ pThunks->_dwCurrentThunk = dwSlots;
+ }
+ else
+ {
+ ::ClrVirtualFree(pAlloc, 0, MEM_RELEASE);
+ return FALSE;
+ }
+ }
+ else
+ {
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CTPMethodTable::CreateTPOfClassForRP private
+//
+// Synopsis: Creates a transparent proxy that behaves as an object of the
+// supplied class
+//
+//+----------------------------------------------------------------------------
+void CTPMethodTable::CreateTPOfClassForRP(TypeHandle ty, REALPROXYREF *pRP, TRANSPARENTPROXYREF *pTP)
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(!ty.IsNull());
+ PRECONDITION(pRP != NULL);
+ PRECONDITION(*pRP != NULL);
+ PRECONDITION(pTP != NULL);
+ POSTCONDITION(*pTP != NULL);
+ }
+ CONTRACT_END;
+
+ // Ensure remoting is started.
+ EnsureFieldsInitialized();
+
+ MethodTable * pMT = ty.GetMethodTable();
+
+ // Get the size of the VTable for the class to proxy
+ DWORD dwSlots = pMT->GetNumVirtuals();
+
+ if (dwSlots == 0)
+ dwSlots = 1;
+
+ // The global thunk table must have been initialized
+ _ASSERTE(s_pThunkTable != NULL);
+
+ // Check for the need to extend existing TP method table
+ if (dwSlots > GetCommitedTPSlots())
+ {
+ CrstHolder ch(&s_TPMethodTableCrst);
+
+ if (dwSlots > GetCommitedTPSlots())
+ {
+ if (!ExtendCommitedSlots(dwSlots))
+ COMPlusThrowOM();
+ }
+ }
+
+ // Create a TP Object
+ IfNullThrow(*pTP = (TRANSPARENTPROXYREF) AllocateObject(GetMethodTable()));
+
+ // Create the cycle between TP and RP
+ (*pRP)->SetTransparentProxy(*pTP);
+
+ // Make the TP behave as an object of supplied class
+ (*pTP)->SetRealProxy(*pRP);
+
+ // If we are creating a proxy for an interface then the class
+ // is the object class else it is the class supplied
+ if (pMT->IsInterface())
+ {
+ _ASSERTE(NULL != g_pObjectClass);
+
+ (*pTP)->SetMethodTableBeingProxied(CRemotingServices::GetMarshalByRefClass());
+
+ // Set the cached interface method table to the given interface
+ // method table
+ (*pTP)->SetInterfaceMethodTable(pMT);
+ }
+ else
+ {
+ (*pTP)->SetMethodTableBeingProxied(pMT);
+ }
+
+ RETURN;
+}
+
+Signature InitMessageData(messageData *msgData,
+ FramedMethodFrame *pFrame,
+ Module **ppModule,
+ SigTypeContext *pTypeContext)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(msgData));
+ PRECONDITION(CheckPointer(pFrame));
+ PRECONDITION(CheckPointer(ppModule));
+ PRECONDITION(CheckPointer(pTypeContext));
+ }
+ CONTRACTL_END;
+
+ msgData->pFrame = pFrame;
+ msgData->iFlags = 0;
+
+ MethodDesc *pMD = pFrame->GetFunction();
+ _ASSERTE(!pMD->ContainsGenericVariables());
+ _ASSERTE(pMD->IsRuntimeMethodHandle());
+
+ TypeHandle thGoverningType;
+ BOOL fIsDelegate = pMD->GetMethodTable()->IsDelegate();
+
+ // We want to calculate and store a governing type for the method since
+ // sometimes the parent method table might be representative. We get the
+ // exact type context from the this reference we're calling on (adjusting
+ // for the fact it's a TP).
+
+ // But cope with the common cases first for speed:
+ // * If the method is not on a generic type and this is not the async
+ // delegate case (which requires us to unwrap the delegate and have a
+ // look) then we know the method desc's parent method table will be exact.
+ // * We require method descs to be exact for the interface case as well (since
+ // the target object doesn't help us resolve the interface type at all).
+ // * COM interop can use this code path, but that doesn't support generics so
+ // we can use the quick logic for that too.
+ if ((!pMD->HasClassInstantiation() && !fIsDelegate) ||
+ pMD->IsInterface() ||
+ pMD->IsComPlusCall())
+ {
+ thGoverningType = TypeHandle(pMD->GetMethodTable());
+ }
+ else
+ {
+ MethodDesc *pTargetMD;
+ MethodTable *pTargetMT;
+ if (fIsDelegate)
+ {
+ // Async delegates are also handled differently in that the method and the
+ // this are delegate wrappers round the real method and target.
+ pTargetMD = COMDelegate::GetMethodDesc(pFrame->GetThis());
+
+ // Delegates on static methods don't have a useful target instance.
+ // But in that case the target method is guaranteed to have exact
+ // type information.
+ if (pTargetMD->IsStatic())
+ pTargetMT = pTargetMD->GetMethodTable();
+ else
+ {
+ OBJECTREF refDelegateTarget = COMDelegate::GetTargetObject(pFrame->GetThis());
+ pTargetMT = refDelegateTarget->GetTrueMethodTable();
+ }
+ }
+ else
+ {
+ pTargetMD = pMD;
+ pTargetMT = CTPMethodTable::GetMethodTableBeingProxied(pFrame->GetThis());
+ }
+
+ // One last check to see if we can optimize the delegate case now we've
+ // unwrapped it.
+ if (fIsDelegate && !pTargetMD->HasClassInstantiation() && !pTargetMT->IsDelegate())
+ {
+ thGoverningType = TypeHandle(pTargetMD->GetMethodTable());
+ }
+ else
+ {
+ // Not quite done yet, we need to get the type that declares the method,
+ // which may be a superclass of the type we're calling on.
+ MethodTable *pDeclaringMT = pTargetMD->GetMethodTable();
+ thGoverningType = ClassLoader::LoadGenericInstantiationThrowing(pDeclaringMT->GetModule(),
+ pDeclaringMT->GetCl(),
+ pTargetMD->GetExactClassInstantiation(TypeHandle(pTargetMT)));
+ }
+ }
+
+ msgData->thGoverningType = thGoverningType;
+
+ if (fIsDelegate)
+ {
+ DelegateEEClass* delegateCls = (DelegateEEClass*) pMD->GetMethodTable()->GetClass();
+
+ _ASSERTE(pFrame->GetThis()->GetMethodTable()->IsDelegate());
+
+ msgData->pDelegateMD = pMD;
+ msgData->pMethodDesc = COMDelegate::GetMethodDesc(pFrame->GetThis());
+
+ _ASSERTE(msgData->pMethodDesc != NULL);
+ _ASSERTE(!msgData->pMethodDesc->ContainsGenericVariables());
+ _ASSERTE(msgData->pMethodDesc->IsRuntimeMethodHandle());
+
+ if (pMD == delegateCls->m_pBeginInvokeMethod)
+ {
+ msgData->iFlags |= MSGFLG_BEGININVOKE;
+ }
+ else
+ {
+ _ASSERTE(pMD == delegateCls->m_pEndInvokeMethod);
+ msgData->iFlags |= MSGFLG_ENDINVOKE;
+ }
+ }
+ else
+ {
+ msgData->pDelegateMD = NULL;
+ msgData->pMethodDesc = pMD;
+ _ASSERTE(msgData->pMethodDesc->IsRuntimeMethodHandle());
+ }
+
+ if (msgData->pMethodDesc->IsOneWay())
+ {
+ msgData->iFlags |= MSGFLG_ONEWAY;
+ }
+
+ if (msgData->pMethodDesc->IsCtor())
+ {
+ msgData->iFlags |= MSGFLG_CTOR;
+ }
+
+ Signature signature;
+ Module *pModule;
+
+ if (msgData->pDelegateMD)
+ {
+ signature = msgData->pDelegateMD->GetSignature();
+ pModule = msgData->pDelegateMD->GetModule();
+
+ // If the delegate is generic, pDelegateMD may not represent the exact instantiation so we recover it from 'this'.
+ SigTypeContext::InitTypeContext(pFrame->GetThis()->GetMethodTable()->GetInstantiation(), Instantiation(), pTypeContext);
+ }
+ else if (msgData->pMethodDesc->IsVarArg())
+ {
+ VASigCookie *pVACookie = pFrame->GetVASigCookie();
+ signature = pVACookie->signature;
+ pModule = pVACookie->pModule;
+ SigTypeContext::InitTypeContext(pTypeContext);
+
+ }
+ else
+ {
+ signature = msgData->pMethodDesc->GetSignature();
+ pModule = msgData->pMethodDesc->GetModule();
+ SigTypeContext::InitTypeContext(msgData->pMethodDesc, thGoverningType, pTypeContext);
+ }
+
+ *ppModule = pModule;
+ return signature;
+}
+
+VOID CRealProxy::UpdateOptFlags(OBJECTREF refTP)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ }
+ CONTRACTL_END
+
+ DWORD hierarchyDepth = 0;
+ REALPROXYREF refRP = CTPMethodTable::GetRP(refTP);
+
+ OBJECTHANDLE hServerIdentity = (OBJECTHANDLE)refRP->GetPtrOffset(CRemotingServices::GetOffsetOfSrvIdentityInRP());
+ if (hServerIdentity == NULL)
+ return;
+
+ // Check if the proxy has already been marked as not equivalent.
+ // In which case, it can never get marked as anything else
+ RealProxyObject *rpTemp = (RealProxyObject *)OBJECTREFToObject(refRP);
+
+ DWORD domainID = rpTemp->GetDomainID();
+ AppDomainFromIDHolder ad((ADID)domainID, TRUE);
+ if (domainID == 0 || ad.IsUnloaded()) //we do not use ptr
+ return; // The appdomain the server belongs to, has been unloaded
+ ad.Release();
+ DWORD optFlag = rpTemp->GetOptFlags();
+ if ((optFlag & OPTIMIZATION_FLAG_INITTED) &&
+ !(optFlag & OPTIMIZATION_FLAG_PROXY_EQUIVALENT))
+ return;
+
+ OBJECTREF refSrvIdentity = ObjectFromHandle(hServerIdentity);
+ // Is this a disconnected proxy ?
+ if (refSrvIdentity == NULL)
+ return;
+
+ OBJECTREF refSrvObject = ObjectToOBJECTREF((Object *)refSrvIdentity->GetPtrOffset(CRemotingServices::GetOffsetOfTPOrObjInIdentity()));
+
+ MethodTable *pCliMT = CTPMethodTable::GetMethodTableBeingProxied(refTP);
+
+ BOOL bProxyQualifies = FALSE;
+ BOOL bCastToSharedType = FALSE;
+
+ // Check if modules are physically the same
+
+ // Check the inheritance hierarchy of the server object, to find the type
+ // that corresponds to the type the proxy is being cast to
+ // @TODO - If being cast to an interface, currently the proxy doesnt get marked equivalent
+ // @TODO - Need to check equivalency of the interface being cast to, and then reuse interface slot # on other side
+ LPCUTF8 szCliTypeName, szCliNameSpace;
+ szCliTypeName = pCliMT->GetFullyQualifiedNameInfo(&szCliNameSpace);
+ PREFIX_ASSUME(szCliTypeName != NULL);
+
+ MethodTable *pSrvHierarchy = refSrvObject->GetMethodTable();
+
+ GCPROTECT_BEGIN(refRP);
+ while (pSrvHierarchy)
+ {
+ LPCUTF8 szSrvTypeName, szSrvNameSpace;
+ szSrvTypeName = pSrvHierarchy->GetFullyQualifiedNameInfo(&szSrvNameSpace);
+ PREFIX_ASSUME(szSrvNameSpace != NULL);
+
+ if (!strcmp(szCliTypeName, szSrvTypeName) && !strcmp(szCliNameSpace, szSrvNameSpace))
+ {
+ // Check if the types are shared. If they are, no further check neccesary
+ if (pSrvHierarchy == pCliMT)
+ {
+ bProxyQualifies = TRUE;
+ bCastToSharedType = TRUE;
+ }
+ else
+ {
+ bProxyQualifies = CRealProxy::ProxyTypeIdentityCheck(pCliMT, pSrvHierarchy);
+ }
+ break;
+ }
+
+ pSrvHierarchy = pSrvHierarchy->GetParentMethodTable();
+ hierarchyDepth++;
+ }
+ GCPROTECT_END();
+
+ optFlag = 0;
+ if (bProxyQualifies && hierarchyDepth < OPTIMIZATION_FLAG_DEPTH_MASK)
+ {
+ optFlag = OPTIMIZATION_FLAG_INITTED | OPTIMIZATION_FLAG_PROXY_EQUIVALENT;
+ if (bCastToSharedType)
+ optFlag |= OPTIMIZATION_FLAG_PROXY_SHARED_TYPE;
+ optFlag |= (hierarchyDepth & OPTIMIZATION_FLAG_DEPTH_MASK);
+ }
+ else
+ optFlag = OPTIMIZATION_FLAG_INITTED;
+
+ RealProxyObject *rpUNSAFE = (RealProxyObject *)OBJECTREFToObject(refRP);
+ rpUNSAFE->SetOptFlags(optFlag);
+}
+
+BOOL CRealProxy::ProxyTypeIdentityCheck(MethodTable *pCliHierarchy, MethodTable *pSrvHierarchy)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ THROWS;
+ }
+ CONTRACTL_END
+ // We have found the server side type that corresponds to the most derived type
+ // on client side, that the proxy is cast to
+ // Now do identity check on the server type hierarchy to see if there is an exact match
+
+ BOOL bProxyQualifies = FALSE;
+ do
+ {
+ LPCUTF8 szCliTypeName, szCliNameSpace;
+ LPCUTF8 szSrvTypeName, szSrvNameSpace;
+ szCliTypeName = pCliHierarchy->GetFullyQualifiedNameInfo(&szCliNameSpace);
+ szSrvTypeName = pSrvHierarchy->GetFullyQualifiedNameInfo(&szSrvNameSpace);
+ PREFIX_ASSUME(szCliTypeName != NULL);
+ PREFIX_ASSUME(szSrvNameSpace != NULL);
+
+ // If type names are different, there is no match
+ if (strcmp(szCliTypeName, szSrvTypeName) ||
+ strcmp(szCliNameSpace, szSrvNameSpace))
+ {
+ bProxyQualifies = FALSE;
+ return bProxyQualifies;
+ }
+
+ PEAssembly *pClientPE = pCliHierarchy->GetAssembly()->GetManifestFile();
+ PEAssembly *pServerPE = pSrvHierarchy->GetAssembly()->GetManifestFile();
+ // If the PE files are different, there is no match
+ if (!pClientPE->Equals(pServerPE))
+ {
+ bProxyQualifies = FALSE;
+ return bProxyQualifies;
+ }
+
+ // If the number of interfaces implemented are different, there is no match
+ if (pSrvHierarchy->GetNumInterfaces() != pCliHierarchy->GetNumInterfaces())
+ {
+ bProxyQualifies = FALSE;
+ return bProxyQualifies;
+ }
+
+ MethodTable::InterfaceMapIterator srvItfIt = pSrvHierarchy->IterateInterfaceMap();
+ MethodTable::InterfaceMapIterator cliItfIt = pCliHierarchy->IterateInterfaceMap();
+ while (srvItfIt.Next())
+ {
+ BOOL succeeded;
+ succeeded = cliItfIt.Next();
+ CONSISTENCY_CHECK(succeeded);
+ if (!ProxyTypeIdentityCheck(srvItfIt.GetInterface(), cliItfIt.GetInterface()))
+ {
+ bProxyQualifies = FALSE;
+ return bProxyQualifies;
+ }
+ }
+
+ pSrvHierarchy = pSrvHierarchy->GetParentMethodTable();
+ pCliHierarchy = pCliHierarchy->GetParentMethodTable();
+ }
+ while (pSrvHierarchy && pCliHierarchy);
+
+ if (pSrvHierarchy || pCliHierarchy)
+ {
+ bProxyQualifies = FALSE;
+ return bProxyQualifies;
+ }
+
+ bProxyQualifies = TRUE;
+ return bProxyQualifies;
+
+}
+
+ProfilerRemotingClientCallbackHolder::ProfilerRemotingClientCallbackHolder()
+{
+#ifdef PROFILING_SUPPORTED
+ // If profiling is active, notify it that remoting stuff is kicking in
+ BEGIN_PIN_PROFILER(CORProfilerTrackRemoting());
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->RemotingClientInvocationStarted();
+ END_PIN_PROFILER();
+#endif // PROFILING_SUPPORTED
+}
+
+ProfilerRemotingClientCallbackHolder::~ProfilerRemotingClientCallbackHolder()
+{
+#ifdef PROFILING_SUPPORTED
+ // If profiling is active, tell profiler we've made the call, received the
+ // return value, done any processing necessary, and now remoting is done.
+ BEGIN_PIN_PROFILER(CORProfilerTrackRemoting());
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->RemotingClientInvocationFinished();
+ END_PIN_PROFILER();
+#endif // PROFILING_SUPPORTED
+}
+
+enum
+{
+ CALLTYPE_INVALIDCALL = 0x0, // Important:: sync this with RealProxy.cs
+ CALLTYPE_METHODCALL = 0x1, // Important:: sync this with RealProxy.cs
+ CALLTYPE_CONSTRUCTORCALL = 0x2 // Important:: sync this with RealProxy.cs
+};
+
+extern "C" void STDCALL TransparentProxyStubPatch();
+
+//+----------------------------------------------------------------------------
+//
+// Method: TransparentProxyStubWorker
+//
+// Synopsis: This function gets control in two situations
+// (1) When a call is made on the transparent proxy it delegates to
+// PrivateInvoke method on the real proxy
+// (2) When a call is made on the constructor it again delegates to the
+// PrivateInvoke method on the real proxy.
+//
+//
+//+----------------------------------------------------------------------------
+extern "C" UINT32 STDCALL TransparentProxyStubWorker(TransitionBlock * pTransitionBlock, TADDR pMethodDescOrSlot)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ ENTRY_POINT;
+ PRECONDITION(CheckPointer(pTransitionBlock));
+ }
+ CONTRACTL_END;
+
+ UINT fpRetSize = 0;
+
+ FrameWithCookie<TPMethodFrame> frame(pTransitionBlock);
+ TPMethodFrame * pFrame = &frame;
+
+ //we need to zero out the return value buffer because we will report it during GC
+#ifdef ENREGISTERED_RETURNTYPE_MAXSIZE
+ ZeroMemory (pFrame->GetReturnValuePtr(), ENREGISTERED_RETURNTYPE_MAXSIZE);
+#else
+ *(ARG_SLOT *)pFrame->GetReturnValuePtr() = 0;
+#endif
+
+ // For virtual calls the slot number is pushed but for
+ // non virtual calls/interface invoke the method descriptor is already
+ // pushed
+ MethodDesc * pMD;
+ if ((pMethodDescOrSlot >> 16) == 0)
+ {
+ // The frame is not completly setup at this point.
+ // Do not throw exceptions or provoke GC
+ MethodTable* pMT = CTPMethodTable::GetMethodTableBeingProxied(pFrame->GetThis());
+ _ASSERTE(pMT);
+
+ // Replace the slot number with the method descriptor on the stack
+ pMD = pMT->GetMethodDescForSlot((WORD)pMethodDescOrSlot);
+ }
+ else
+ {
+ pMD = dac_cast<PTR_MethodDesc>(pMethodDescOrSlot);
+ }
+ pFrame->SetFunction(pMD);
+
+ pFrame->Push();
+
+ // Give debugger opportunity to stop here now that we know the MethodDesc *
+ TransparentProxyStubPatch();
+
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+ if (g_pConfig->UseNewCrossDomainRemoting())
+ {
+ BOOL bOptSuccess = FALSE;
+ CrossDomainChannel cdc;
+ bOptSuccess = cdc.CheckCrossDomainCall(pFrame);
+ if (bOptSuccess)
+ {
+ fpRetSize = cdc.GetFPReturnSize();
+ goto Done;
+ }
+ }
+
+ {
+ messageData msgData;
+ Module *pModule = NULL;
+ SigTypeContext inst;
+ Signature signature = InitMessageData(&msgData, pFrame, &pModule, &inst);
+
+ _ASSERTE(!signature.IsEmpty() && pModule);
+
+ // Allocate metasig on the stack
+ MetaSig mSig(signature, pModule, &inst);
+ msgData.pSig = &mSig;
+
+ MethodDesc *pMD = pFrame->GetFunction();
+ if (pMD->GetMethodTable()->IsDelegate())
+ {
+ // check that there is only one target
+ if (COMDelegate::IsTrueMulticastDelegate(pFrame->GetThis()))
+ {
+ COMPlusThrow(kArgumentException, W("Remoting_Delegate_TooManyTargets"));
+ }
+ }
+
+ {
+ ProfilerRemotingClientCallbackHolder profilerHolder;
+
+ OBJECTREF pThisPointer = NULL;
+
+ if (pMD->GetMethodTable()->IsDelegate())
+ {
+ // this is an async call
+ _ASSERTE(pFrame->GetThis()->GetMethodTable()->IsDelegate());
+
+ pThisPointer = COMDelegate::GetTargetObject(pFrame->GetThis());
+ }
+ else
+ {
+ pThisPointer = pFrame->GetThis();
+ }
+
+ OBJECTREF firstParameter;
+ MethodDesc* pTargetMD = NULL;
+ size_t callType = CALLTYPE_INVALIDCALL;
+
+ // We are invoking either the constructor or a method on the object
+ if(pMD->IsCtor())
+ {
+ // Get the address of PrivateInvoke in managed code
+ pTargetMD = CRemotingServices::MDofPrivateInvoke();
+ _ASSERTE(pThisPointer->IsTransparentProxy());
+
+ firstParameter = CTPMethodTable::GetRP(pThisPointer);
+
+ // Set a field to indicate that it is a constructor call
+ callType = CALLTYPE_CONSTRUCTORCALL;
+ }
+ else
+ {
+ // Set a field to indicate that it is a method call
+ callType = CALLTYPE_METHODCALL;
+
+ if (pThisPointer->IsTransparentProxy())
+ {
+ // Extract the real proxy underlying the transparent proxy
+ firstParameter = CTPMethodTable::GetRP(pThisPointer);
+
+ // Get the address of PrivateInvoke in managed code
+ pTargetMD = CRemotingServices::MDofPrivateInvoke();
+ _ASSERTE(pTargetMD);
+ }
+ else
+ {
+ // must be async if this is not a TP
+ _ASSERTE(pMD->GetMethodTable()->IsDelegate());
+ firstParameter = NULL;
+
+ // Get the address of PrivateInvoke in managed code
+ pTargetMD = CRemotingServices::MDofInvokeStatic();
+ }
+
+ // Go ahead and call PrivateInvoke on Real proxy. There is no need to
+ // catch exceptions thrown by it
+ // See RealProxy.cs
+ }
+
+ _ASSERTE(pTargetMD);
+
+ // Call the appropriate target
+ CTPMethodTable::CallTarget(pTargetMD, (LPVOID)OBJECTREFToObject(firstParameter), (LPVOID)&msgData, (LPVOID)callType);
+
+ // Check for the need to trip thread
+ if (GetThread()->CatchAtSafePointOpportunistic())
+ {
+ // There is no need to GC protect the return object as
+ // TPFrame is GC protecting it
+ CommonTripThread();
+ }
+ } // ProfilerClientCallbackHolder
+
+ {
+ mSig.Reset();
+
+ ArgIterator argit(&mSig);
+
+#ifdef _TARGET_X86_
+ // Set the number of bytes to pop for x86
+ pFrame->SetCbStackPop(argit.CbStackPop());
+#endif // _TARGET_X86_
+
+ fpRetSize = argit.GetFPReturnSize();
+ }
+ }
+
+Done: ;
+
+ pFrame->Pop();
+
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+ return fpRetSize;
+}
+
+
+// Helper due to inability to combine SEH with anything interesting.
+BOOL CTPMethodTable::CheckCastHelper(MethodDesc* pTargetMD, LPVOID pFirst, LPVOID pSecond)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pTargetMD));
+ PRECONDITION(CheckPointer(pFirst, NULL_OK));
+ PRECONDITION(CheckPointer(pSecond, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // Actual return type is a managed 'bool', so only look at a CLR_BOOL-sized
+ // result. The high bits are undefined on AMD64. (Note that a narrowing
+ // cast to CLR_BOOL will not work since it is the same as checking the
+ // size_t result != 0.)
+ LPVOID ret = CallTarget(pTargetMD, pFirst, pSecond);
+ return *(CLR_BOOL*)StackElemEndianessFixup(&ret, sizeof(CLR_BOOL));
+}
+
+
+
+//+----------------------------------------------------------------------------
+//
+// Method: CTPMethodTable::CheckCast private
+//
+// Synopsis: Call the managed checkcast method to determine whether the
+// server type can be cast to the given type
+//
+//
+//
+//+----------------------------------------------------------------------------
+BOOL CTPMethodTable::CheckCast(MethodDesc* pTargetMD, TRANSPARENTPROXYREF orTP, TypeHandle ty)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pTargetMD));
+ PRECONDITION(orTP != NULL);
+ PRECONDITION(!ty.IsNull());
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF reflectType = NULL;
+ LPVOID pvType = NULL;
+ BOOL fCastOK = FALSE;
+
+ typedef struct _GCStruct
+ {
+ TRANSPARENTPROXYREF orTP;
+ REALPROXYREF orRP;
+ } GCStruct;
+
+ GCStruct gcValues;
+ gcValues.orTP = orTP;
+ gcValues.orRP = GetRP(orTP);
+
+ GCPROTECT_BEGIN (gcValues);
+
+ reflectType = (REFLECTCLASSBASEREF) ty.GetMethodTable()->GetManagedClassObject();
+ *(REFLECTCLASSBASEREF *)&pvType = reflectType;
+
+ fCastOK = CheckCastHelper(pTargetMD,
+ (LPVOID)OBJECTREFToObject(gcValues.orRP),
+ pvType);
+
+ if (fCastOK)
+ {
+ _ASSERTE(s_fTPTableFieldsInitialized);
+
+ // The cast succeeded. Replace the current type in the proxy
+ // with the given type.
+
+ CrstHolder ch(&s_TPMethodTableCrst);
+
+ if (ty.IsInterface())
+ {
+ // We replace the cached interface method table with the interface
+ // method table that we are trying to cast to. This will ensure that
+ // casts to this interface, which are likely to happen, will succeed.
+ gcValues.orTP->SetInterfaceMethodTable(ty.GetMethodTable());
+ }
+ else
+ {
+ MethodTable *pCurrent = gcValues.orTP->GetMethodTableBeingProxied();
+
+ BOOL fDerivedClass = FALSE;
+ // Check whether this class derives from the current class
+ fDerivedClass = CRemotingServices::CheckCast(gcValues.orTP, ty,
+ TypeHandle(pCurrent));
+ // We replace the current method table only if we cast to a more
+ // derived class
+ if (fDerivedClass)
+ {
+ // Set the method table in the proxy to the given method table
+ RefineProxy(gcValues.orTP, ty);
+ }
+ }
+ }
+
+ GCPROTECT_END();
+ return fCastOK;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CTPMethodTable::RefineProxy public
+//
+// Synopsis: Set the method table in the proxy to the given class' method table.
+// Additionally, expand the TP method table to the required number of slots.
+//
+//
+//+----------------------------------------------------------------------------
+void CTPMethodTable::RefineProxy(TRANSPARENTPROXYREF orTP, TypeHandle ty)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(orTP != NULL);
+ PRECONDITION(!ty.IsNull());
+ }
+ CONTRACTL_END;
+
+ // Do the expansion only if necessary
+ MethodTable *pMT = ty.GetMethodTable();
+
+ if (pMT != orTP->GetMethodTableBeingProxied())
+ {
+ orTP->SetMethodTableBeingProxied(pMT);
+
+ // Extend the vtable if necessary
+ DWORD dwSlots = pMT->GetNumVirtuals();
+
+ if (dwSlots == 0)
+ dwSlots = 1;
+
+ if((dwSlots > GetCommitedTPSlots()) && !ExtendCommitedSlots(dwSlots))
+ {
+ // We failed to extend the committed slots. Out of memory.
+ COMPlusThrowOM();
+ }
+
+ }
+}
+
+#ifndef HAS_REMOTING_PRECODE
+//+----------------------------------------------------------------------------
+//
+// Method: CTPMethodTable::GetOrCreateNonVirtualSlotForVirtualMethod private
+//
+// Synopsis: Get a slot for a non-virtual call to a virtual method.
+//
+//+----------------------------------------------------------------------------
+PTR_PCODE CTPMethodTable::GetOrCreateNonVirtualSlotForVirtualMethod(MethodDesc* pMD)
+{
+ CONTRACT (PTR_PCODE)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(pMD->IsRemotingInterceptedViaVirtualDispatch());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ // Ensure the TP MethodTable's fields have been initialized.
+ EnsureFieldsInitialized();
+
+ PTR_PCODE pSlot;
+
+ {
+ // Create the thunk in a thread safe manner
+ CrstHolder ch(&s_TPMethodTableCrst);
+
+ // NOTE: CNonVirtualThunk::SetNonVirtualThunks() depends on the lock being initialized
+ CNonVirtualThunk::InitializeListLock();
+
+ // Create hash table if we do not have one yet
+ if (s_pThunkHashTable == NULL)
+ {
+ NewHolder <EEThunkHashTable> pTempHash(new EEThunkHashTable());
+
+ LockOwner lock = {&s_TPMethodTableCrst, IsOwnerOfCrst};
+ IfNullThrow(pTempHash->Init(23,&lock));
+
+ s_pThunkHashTable = pTempHash.Extract();
+ }
+
+ if (!s_pThunkHashTable->GetValue(pMD, (HashDatum *)&pSlot))
+ {
+ PCODE pThunkCode = CreateNonVirtualThunkForVirtualMethod(pMD);
+
+ _ASSERTE(CNonVirtualThunkMgr::IsThunkByASM(pThunkCode));
+ _ASSERTE(CNonVirtualThunkMgr::GetMethodDescByASM(pThunkCode));
+
+ // Set the generated thunk once and for all..
+ CNonVirtualThunk *pThunk = CNonVirtualThunk::SetNonVirtualThunks((BYTE*)pThunkCode);
+
+ // Remember the thunk address in a hash table
+ // so that we dont generate it again
+ pSlot = (PTR_PCODE)pThunk->GetAddrOfCode();
+ s_pThunkHashTable->InsertValue(pMD, (HashDatum)pSlot);
+ }
+ }
+
+ RETURN pSlot;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CTPMethodTable::DestroyThunk public
+//
+// Synopsis: Destroy the thunk for the non virtual method.
+//
+//
+//+----------------------------------------------------------------------------
+void CTPMethodTable::DestroyThunk(MethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ if(s_pThunkHashTable)
+ {
+ CrstHolder ch(&s_TPMethodTableCrst);
+
+ LPVOID pvCode = NULL;
+ s_pThunkHashTable->GetValue(pMD, (HashDatum *)&pvCode);
+ CNonVirtualThunk *pThunk = NULL;
+ if(NULL != pvCode)
+ {
+ pThunk = CNonVirtualThunk::AddrToThunk(pvCode);
+ delete pThunk;
+ s_pThunkHashTable->DeleteValue(pMD);
+ }
+ }
+}
+#endif // HAS_REMOTING_PRECODE
+
+static LPVOID CallTargetWorker1(MethodDesc* pTargetMD,
+ LPVOID pvFirst,
+ LPVOID pvSecond)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+ LPVOID ret = NULL;
+ PCODE pTarget = pTargetMD->GetSingleCallableAddrOfCode();
+
+#if defined(DEBUGGING_SUPPORTED)
+ if (CORDebuggerTraceCall())
+ {
+ g_pDebugInterface->TraceCall((const BYTE*)pTarget);
+ }
+#endif // DEBUGGING_SUPPORTED
+
+
+ BEGIN_CALL_TO_MANAGED();
+
+ ret = CTPMethodTable__CallTargetHelper2((const BYTE*)pTarget, pvFirst, pvSecond);
+
+ END_CALL_TO_MANAGED();
+
+ return ret;
+}
+
+
+//+----------------------------------------------------------------------------
+//
+// Method: CTPMethodTable::CallTarget private
+//
+// Synopsis: Calls the target method on the given object
+//
+//+----------------------------------------------------------------------------
+LPVOID __stdcall CTPMethodTable::CallTarget (MethodDesc* pTargetMD,
+ LPVOID pvFirst,
+ LPVOID pvSecond)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_INTOLERANT;
+ PRECONDITION(CheckPointer(pTargetMD));
+ PRECONDITION(CheckPointer(pvFirst, NULL_OK));
+ PRECONDITION(CheckPointer(pvSecond, NULL_OK));
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+
+ Thread* curThread = GetThread();
+
+ Object* ObjRefTable[OBJREF_TABSIZE];
+
+ if (curThread)
+ memcpy(ObjRefTable, curThread->dangerousObjRefs, sizeof(curThread->dangerousObjRefs));
+
+#endif // _DEBUG
+
+ LPVOID ret = CallTargetWorker1(pTargetMD, pvFirst, pvSecond);
+
+#ifdef _DEBUG
+ // Restore dangerousObjRefs when we return back to EE after call
+ if (curThread)
+ memcpy(curThread->dangerousObjRefs, ObjRefTable, sizeof(curThread->dangerousObjRefs));
+
+ ENABLESTRESSHEAP ();
+#endif // _DEBUG
+
+ return ret;
+}
+
+
+static LPVOID CallTargetWorker2(MethodDesc* pTargetMD,
+ LPVOID pvFirst,
+ LPVOID pvSecond,
+ LPVOID pvThird)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+ LPVOID ret = NULL;
+ PCODE pTarget = pTargetMD->GetSingleCallableAddrOfCode();
+
+#if defined(DEBUGGING_SUPPORTED)
+ if (CORDebuggerTraceCall())
+ {
+ g_pDebugInterface->TraceCall((const BYTE*)pTarget);
+ }
+#endif // DEBUGGING_SUPPORTED
+
+ BEGIN_CALL_TO_MANAGED();
+
+ ret = CTPMethodTable__CallTargetHelper3((const BYTE*)pTarget, pvFirst, pvSecond, pvThird);
+
+ END_CALL_TO_MANAGED();
+ return ret;
+
+}
+
+LPVOID __stdcall CTPMethodTable::CallTarget (MethodDesc* pTargetMD,
+ LPVOID pvFirst,
+ LPVOID pvSecond,
+ LPVOID pvThird)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_INTOLERANT;
+ PRECONDITION(CheckPointer(pTargetMD));
+ PRECONDITION(CheckPointer(pvFirst, NULL_OK));
+ PRECONDITION(CheckPointer(pvSecond, NULL_OK));
+ PRECONDITION(CheckPointer(pvThird, NULL_OK));
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ Thread* curThread = GetThread();
+
+ Object* ObjRefTable[OBJREF_TABSIZE];
+ if (curThread)
+ memcpy(ObjRefTable, curThread->dangerousObjRefs, sizeof(curThread->dangerousObjRefs));
+
+#endif // _DEBUG
+
+ LPVOID ret = CallTargetWorker2(pTargetMD, pvFirst, pvSecond, pvThird);
+
+#ifdef _DEBUG
+ // Restore dangerousObjRefs when we return back to EE after call
+ if (curThread)
+ memcpy(curThread->dangerousObjRefs, ObjRefTable, sizeof(curThread->dangerousObjRefs));
+
+ ENABLESTRESSHEAP ();
+#endif // _DEBUG
+
+ return ret;
+}
+
+
+#ifndef HAS_REMOTING_PRECODE
+//+----------------------------------------------------------------------------
+//
+// Method: CNonVirtualThunk::SetNextThunk public
+//
+// Synopsis: Creates a thunk for the given address and adds it to the global
+// list
+//
+//+----------------------------------------------------------------------------
+CNonVirtualThunk* CNonVirtualThunk::SetNonVirtualThunks(const BYTE* pbCode)
+{
+ CONTRACT (CNonVirtualThunk*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pbCode));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ CNonVirtualThunk *pThunk = new CNonVirtualThunk(pbCode);
+
+ // Put the generated thunk in a global list
+ // Note: this is called when a NV thunk is being created ..
+ // The TPMethodTable critsec is held at this point
+ pThunk->SetNextThunk();
+
+ // Set up the stub manager if necessary
+ CNonVirtualThunkMgr::InitNonVirtualThunkManager();
+
+ RETURN pThunk;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CNonVirtualThunk::~CNonVirtualThunk public
+//
+// Synopsis: Deletes the thunk from the global list of thunks
+//
+//
+//+----------------------------------------------------------------------------
+CNonVirtualThunk::~CNonVirtualThunk()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(s_pNonVirtualThunks));
+ }
+ CONTRACTL_END;
+
+ CNonVirtualThunk* pCurr = s_pNonVirtualThunks;
+ CNonVirtualThunk* pPrev = NULL;
+ BOOL found = FALSE;
+
+ // Note: This is called with the TPMethodTable critsec held
+ while(!found && (NULL != pCurr))
+ {
+ if(pCurr == this)
+ {
+ found = TRUE;
+ SimpleRWLock::SimpleWriteLockHolder swlh(s_pNonVirtualThunksListLock);
+
+ // Unlink from the chain
+ if(NULL != pPrev)
+ {
+ pPrev->_pNext = pCurr->_pNext;
+ }
+ else
+ {
+ // First entry needs to be deleted
+ s_pNonVirtualThunks = pCurr->_pNext;
+ }
+ }
+ pPrev = pCurr;
+ pCurr = pCurr->_pNext;
+ }
+
+ _ASSERTE(found);
+}
+#endif // HAS_REMOTING_PRECODE
+
+//+----------------------------------------------------------------------------
+//
+// Method: CVirtualThunkMgr::InitVirtualThunkManager public
+//
+// Synopsis: Adds the stub manager to aid debugger in stepping into calls
+//
+//
+//+----------------------------------------------------------------------------
+void CVirtualThunkMgr::InitVirtualThunkManager()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ // This is function is already threadsafe since this method is called from within a
+ // critical section
+ if(NULL == s_pVirtualThunkMgr)
+ {
+ // Add the stub manager for vtable calls
+ s_pVirtualThunkMgr = new CVirtualThunkMgr();
+
+ StubManager::AddStubManager(s_pVirtualThunkMgr);
+ }
+
+}
+
+#endif // !DACCESS_COMPILE
+
+//+----------------------------------------------------------------------------
+//
+// Method: CVirtualThunkMgr::CheckIsStub_Internal public
+//
+// Synopsis: Returns TRUE if the given address is the starting address of
+// the transparent proxy stub
+//
+//+----------------------------------------------------------------------------
+BOOL CVirtualThunkMgr::CheckIsStub_Internal(PCODE stubStartAddress)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ BOOL bIsStub = FALSE;
+
+#ifndef DACCESS_COMPILE
+ if (!IsThunkByASM(stubStartAddress))
+ return FALSE;
+ if(NULL != FindThunk((const BYTE *) stubStartAddress))
+ bIsStub = TRUE;
+#endif // !DACCESS_COMPILE
+
+ return bIsStub;
+}
+
+#ifndef DACCESS_COMPILE
+
+//+----------------------------------------------------------------------------
+//
+// Method: CVirtualThunkMgr::Entry2MethodDesc public
+//
+// Synopsis: Convert a starting address to a MethodDesc
+//
+//+----------------------------------------------------------------------------
+MethodDesc *CVirtualThunkMgr::Entry2MethodDesc(PCODE StubStartAddress, MethodTable *pMT)
+{
+ CONTRACT (MethodDesc*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT, NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ if (s_pVirtualThunkMgr == NULL)
+ RETURN NULL;
+
+ if (!pMT)
+ RETURN NULL;
+
+ if (!s_pVirtualThunkMgr->CheckIsStub_Internal(StubStartAddress))
+ RETURN NULL;
+
+ RETURN GetMethodDescByASM(StubStartAddress, pMT);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CVirtualThunkMgr::FindThunk private
+//
+// Synopsis: Finds a thunk that matches the given starting address
+//
+//+----------------------------------------------------------------------------
+LPBYTE CVirtualThunkMgr::FindThunk(const BYTE *stubStartAddress)
+{
+ CONTRACT (LPBYTE)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(stubStartAddress, NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ SO_TOLERANT;
+ }
+ CONTRACT_END;
+
+ CVirtualThunks* pThunks = CVirtualThunks::GetVirtualThunks();
+ LPBYTE pThunkAddr = NULL;
+
+ while(NULL != pThunks)
+ {
+ DWORD dwStartSlot = pThunks->_dwStartThunk;
+ DWORD dwCurrSlot = pThunks->_dwStartThunk;
+ DWORD dwMaxSlot = pThunks->_dwCurrentThunk;
+ while (dwCurrSlot < dwMaxSlot)
+ {
+ LPBYTE pStartAddr = pThunks->ThunkCode[dwCurrSlot-dwStartSlot].pCode;
+ if((stubStartAddress >= pStartAddr) &&
+ (stubStartAddress < (pStartAddr + ConstVirtualThunkSize)))
+ {
+ pThunkAddr = pStartAddr;
+ break;
+ }
+ ++dwCurrSlot;
+ }
+
+ pThunks = pThunks->GetNextThunk();
+ }
+
+ RETURN pThunkAddr;
+}
+
+#endif // !DACCESS_COMPILE
+
+#ifndef HAS_REMOTING_PRECODE
+
+#ifndef DACCESS_COMPILE
+
+//+----------------------------------------------------------------------------
+//
+// Method: CNonVirtualThunkMgr::InitNonVirtualThunkManager public
+//
+// Synopsis: Adds the stub manager to aid debugger in stepping into calls
+//
+//
+//+----------------------------------------------------------------------------
+void CNonVirtualThunkMgr::InitNonVirtualThunkManager()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ // This function is already thread safe since this method is called from within a
+ // critical section
+ if(NULL == s_pNonVirtualThunkMgr)
+ {
+ // Add the stub manager for non vtable calls
+ s_pNonVirtualThunkMgr = new CNonVirtualThunkMgr();
+
+ StubManager::AddStubManager(s_pNonVirtualThunkMgr);
+ }
+}
+
+#endif // !DACCESS_COMPILE
+
+//+----------------------------------------------------------------------------
+//
+// Method: CNonVirtualThunkMgr::CheckIsStub_Internal public
+//
+// Synopsis: Returns TRUE if the given address is the starting address of
+// one of our thunks
+//
+//+----------------------------------------------------------------------------
+BOOL CNonVirtualThunkMgr::CheckIsStub_Internal(PCODE stubStartAddress)
+{
+ WRAPPER_NO_CONTRACT;
+
+ BOOL bIsStub = FALSE;
+
+#ifndef DACCESS_COMPILE
+ if (!IsThunkByASM(stubStartAddress))
+ return FALSE;
+ if(NULL != FindThunk((const BYTE *) stubStartAddress))
+ bIsStub = TRUE;
+#endif // !DACCESS_COMPILE
+
+ return bIsStub;
+}
+
+#ifndef DACCESS_COMPILE
+
+//+----------------------------------------------------------------------------
+//
+// Method: CNonVirtualThunkMgr::Entry2MethodDesc public
+//
+// Synopsis: Convert a starting address to a MethodDesc
+//
+//+----------------------------------------------------------------------------
+MethodDesc *CNonVirtualThunkMgr::Entry2MethodDesc(PCODE StubStartAddress, MethodTable *pMT)
+{
+ CONTRACT (MethodDesc*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT, NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ if (s_pNonVirtualThunkMgr == NULL)
+ RETURN NULL;
+
+ if (!s_pNonVirtualThunkMgr->CheckIsStub_Internal(StubStartAddress))
+ RETURN NULL;
+
+ RETURN GetMethodDescByASM(StubStartAddress);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: CNonVirtualThunkMgr::FindThunk private
+//
+// Synopsis: Finds a thunk that matches the given starting address
+//
+//+----------------------------------------------------------------------------
+CNonVirtualThunk* CNonVirtualThunkMgr::FindThunk(const BYTE *stubStartAddress)
+{
+ CONTRACT (CNonVirtualThunk*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(stubStartAddress, NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ SO_TOLERANT;
+ }
+ CONTRACT_END;
+
+ SimpleRWLock::SimpleReadLockHolder srlh(CNonVirtualThunk::GetThunksListLock());
+ CNonVirtualThunk* pThunk = CNonVirtualThunk::GetNonVirtualThunks();
+
+ while(NULL != pThunk)
+ {
+ if(stubStartAddress == pThunk->GetThunkCode())
+ break;
+
+ pThunk = pThunk->GetNextThunk();
+ }
+
+ RETURN pThunk;
+}
+
+#endif // !DACCESS_COMPILE
+
+#endif // HAS_REMOTING_PRECODE
+
+
+#ifndef DACCESS_COMPILE
+
+//+----------------------------------------------------------------------------
+//+- HRESULT MethodDescDispatchHelper(MethodDesc* pMD, ARG_SLOT[] args, ARG_SLOT *pret)
+//+----------------------------------------------------------------------------
+HRESULT MethodDescDispatchHelper(MethodDescCallSite* pMethodCallSite, ARG_SLOT args[], ARG_SLOT *pret)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pMethodCallSite));
+ PRECONDITION(CheckPointer(args));
+ PRECONDITION(CheckPointer(pret));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ *pret = pMethodCallSite->Call_RetArgSlot(args);
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+}
+
+
+#ifdef FEATURE_COMINTEROP
+
+//+----------------------------------------------------------------------------
+//
+// Method: VOID CRemotingServices::CallSetDCOMProxy(OBJECTREF realProxy, IUnknown* pUnk)
+//
+//+----------------------------------------------------------------------------
+
+VOID CRemotingServices::CallSetDCOMProxy(OBJECTREF realProxy, IUnknown* pUnk)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(realProxy != NULL);
+ PRECONDITION(CheckPointer(pUnk, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ GCPROTECT_BEGIN(realProxy);
+
+ MethodDescCallSite setDCOMProxy(METHOD__REAL_PROXY__SETDCOMPROXY, &realProxy);
+
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(realProxy),
+ (ARG_SLOT)pUnk
+ };
+
+ ARG_SLOT ret;
+ MethodDescDispatchHelper(&setDCOMProxy, args, &ret);
+
+ GCPROTECT_END();
+}
+
+
+BOOL CRemotingServices::CallSupportsInterface(OBJECTREF realProxy, REFIID iid, ARG_SLOT* pret)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(realProxy != NULL);
+ PRECONDITION(CheckPointer(pret));
+ }
+ CONTRACTL_END;
+
+ BOOL fResult = TRUE;
+
+ GCPROTECT_BEGIN(realProxy);
+
+ MethodDescCallSite supportsInterface(METHOD__REAL_PROXY__SUPPORTSINTERFACE, &realProxy);
+
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(realProxy),
+ (ARG_SLOT)&iid
+ };
+
+ HRESULT hr = MethodDescDispatchHelper(&supportsInterface, args, pret);
+
+ // It is allowed for the managed code to return a NULL interface pointer without returning
+ // a failure HRESULT. This is done for performance to avoid having to throw an exception.
+ // If this occurs, we need to return E_NOINTERFACE.
+ if ((*(IUnknown**)pret) == NULL)
+ hr = E_NOINTERFACE;
+
+ if (FAILED(hr))
+ fResult = FALSE;
+
+ GCPROTECT_END();
+ return fResult;
+}
+#endif // FEATURE_COMINTEROP
+
+//+----------------------------------------------------------------------------
+//
+// Method: CRemotingServices::GetStubForInterfaceMethod
+//
+// Synopsis: Given the exact interface method we wish to invoke on, return
+// the entry point of a stub that will correctly transition into
+// the remoting system passing it this method.
+// The stubs is just another kind of precode. They are cached
+// in per appdomain hash.
+//
+//
+//+----------------------------------------------------------------------------
+PCODE CRemotingServices::GetStubForInterfaceMethod(MethodDesc *pItfMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pItfMD));
+ PRECONDITION(pItfMD->IsInterface() && !pItfMD->IsStatic());
+ }
+ CONTRACTL_END;
+
+ return pItfMD->GetLoaderAllocator()->GetFuncPtrStubs()->GetFuncPtrStub(pItfMD, PRECODE_STUB);
+}
+
+#endif // !DACCESS_COMPILE
+#endif // FEATURE_REMOTING
diff --git a/src/vm/remoting.h b/src/vm/remoting.h
new file mode 100644
index 0000000000..f2e0d0f561
--- /dev/null
+++ b/src/vm/remoting.h
@@ -0,0 +1,958 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: remoting.h
+//
+
+//
+// Purpose: Defines various remoting related objects such as
+// proxies
+//
+
+//
+
+
+#ifndef __REMOTING_H__
+#define __REMOTING_H__
+
+#ifndef FEATURE_REMOTING
+#error FEATURE_REMOTING is not set, please do not include remoting.h
+#endif
+
+#include "fcall.h"
+#include "stubmgr.h"
+
+// Forward declaration
+class TPMethodFrame;
+
+// <TODO>@TODO: Set the hashtable to delete the data.</TODO>
+
+// Thunk hash table - the keys are MethodDesc
+typedef EEHashTable<MethodDesc *, EEPtrHashTableHelper<MethodDesc *>, FALSE> EEThunkHashTable;
+
+// ConstVirtualThunkSize declares the size of the code generated by
+// CTPMethodTable::CreateThunkForVirtualMethod
+#ifdef _TARGET_X86_
+
+static const DWORD ConstVirtualThunkSize = sizeof(BYTE) + sizeof(DWORD) + sizeof(BYTE) + sizeof(LONG);
+
+#elif defined(_TARGET_AMD64_)
+
+static const DWORD ConstVirtualThunkSize = sizeof(DWORD) + sizeof(UINT64) + 7;
+
+#elif defined(_TARGET_ARM_)
+
+static const DWORD ConstVirtualThunkSize = 12;
+
+#else
+PORTABILITY_WARNING("Remoting thunk size not defined for this platform.")
+static const DWORD ConstVirtualThunkSize = sizeof(LPVOID);
+#endif
+
+extern "C"
+{
+ UINT_PTR __stdcall CRemotingServices__CheckForContextMatch(Object* pStubData);
+ void __stdcall CRemotingServices__DispatchInterfaceCall();
+ void __stdcall CRemotingServices__CallFieldGetter(MethodDesc *pMD, LPVOID pThis, LPVOID pFirst, LPVOID pSecond, LPVOID pThird);
+ void __stdcall CRemotingServices__CallFieldSetter(MethodDesc *pMD, LPVOID pThis, LPVOID pFirst, LPVOID pSecond, LPVOID pThird);
+}
+
+extern "C" LPVOID __stdcall CTPMethodTable__CallTargetHelper2(const void *pTarget, LPVOID pvFirst, LPVOID pvSecond);
+extern "C" LPVOID __stdcall CTPMethodTable__CallTargetHelper3(const void *pTarget, LPVOID pvFirst, LPVOID pvSecond, LPVOID pvThird);
+extern "C" BOOL __stdcall CTPMethodTable__GenericCheckForContextMatch(Object* orTP);
+
+
+// These are the values returned by RequiresManagedActivation
+enum ManagedActivationType
+{
+ NoManagedActivation = 0,
+ ManagedActivation = 0x1,
+#ifdef FEATURE_COMINTEROP
+ ComObjectType = 0x2,
+#endif // FEATURE_COMINTEROP
+};
+
+
+struct timingData
+{
+ DWORD threadId;
+ BYTE stage;
+ __int64 cycleCount;
+};
+
+
+// This struct is also accessed from managed world
+struct messageData
+{
+ PVOID pFrame;
+ MethodDesc *pMethodDesc;
+ MethodDesc *pDelegateMD;
+ MetaSig *pSig;
+ TypeHandle thGoverningType;
+ INT32 iFlags;
+};
+
+
+// The real proxy class is the class behind the
+// transparent proxy class
+class CRealProxy
+{
+public:
+ // Native helpers
+ static FCDECL2(VOID, SetStubData, Object* orRPUNSAFE, Object* orStubDataUNSAFE);
+ static FCDECL1(Object*, GetStubData, Object* orRPUNSAFE);
+ static FCDECL1(LPVOID, GetStub, Object* orRPUNSAFE);
+ static FCDECL0(LPVOID, GetDefaultStub);
+ static FCDECL1(Object*, GetProxiedType, Object* orRPUNSAFE);
+
+ static VOID UpdateOptFlags(OBJECTREF refTP);
+ static BOOL ProxyTypeIdentityCheck(MethodTable *pCliHierarchy, MethodTable *pSrvHierarchy);
+};
+
+// Forward declarations
+class CVirtualThunkMgr;
+class CNonVirtualThunkMgr;
+
+
+
+
+// Class that provides various remoting services
+// to the exposed world
+class CRemotingServices
+{
+private:
+ //+-------------------------------------------------------------------
+ //
+ // Struct: FieldArgs
+ //
+ // Synopsis: Structure to GC protect arguments for a field accessor call.
+ // DO NOT add non OBJECTREF data types in the structure
+ // see GCPROTECT_BEGIN() for a better explanation.
+ //
+ //+-------------------------------------------------------------------
+ typedef struct _FieldArgs
+ {
+ OBJECTREF obj;
+ OBJECTREF val;
+ STRINGREF typeName;
+ STRINGREF fieldName;
+ } FieldArgs;
+
+public:
+
+ // Methods related to interception of non virtual methods & virtual methods called
+ // non virtually
+ static PCODE GetNonVirtualEntryPointForVirtualMethod(MethodDesc* pMD);
+
+#ifndef HAS_REMOTING_PRECODE
+ static Stub* GetStubForNonVirtualMethod(MethodDesc* pMD, LPVOID pvAddrOfCode, Stub* pInnerStub);
+#endif
+
+ static void DestroyThunk(MethodDesc* pMD);
+
+ // Methods related to interception of interface calls
+ static PCODE GetDispatchInterfaceHelper(MethodDesc* pMD);
+
+ static OBJECTREF CreateProxyOrObject(MethodTable *pMT, BOOL fIsCom = FALSE, BOOL fIsNewObj = FALSE);
+
+ // Methods related to field accessors
+ static void FieldAccessor(FieldDesc* pFD, OBJECTREF o, LPVOID pVal, BOOL fIsGetter);
+
+ // Methods related to wrapping/unwrapping of objects
+ static OBJECTREF WrapHelper(OBJECTREF obj);
+
+ static OBJECTREF Wrap(OBJECTREF obj);
+ static OBJECTREF GetProxyFromObject(OBJECTREF obj);
+ static OBJECTREF GetObjectFromProxy(OBJECTREF obj);
+ static BOOL IsProxyToRemoteObject(OBJECTREF obj);
+ static OBJECTREF GetServerContext(OBJECTREF obj);
+
+ // Methods related to creation and marshaling of appdomains
+ static OBJECTREF CreateProxyForDomain(AppDomain *pDomain);
+
+ // Extract the true class of a proxy
+ static REFLECTCLASSBASEREF GetClass(OBJECTREF pThis);
+
+ // Initialization function.
+ static VOID Initialize();
+
+ // Start up function. This actually starts up the remoting services.
+ static void EnsureRemotingStarted();
+
+ // Other helper functions.
+ inline static MethodDesc *MDofPrivateInvoke()
+ {
+ CONTRACT (MethodDesc*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN s_pRPPrivateInvoke;
+ }
+
+ inline static MethodDesc *MDofInvokeStatic()
+ {
+ CONTRACT (MethodDesc*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN s_pRPInvokeStatic;
+ }
+
+ inline static MethodDesc *MDofIsCurrentContextOK()
+ {
+ CONTRACT (MethodDesc*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN s_pIsCurrentContextOK;
+ }
+
+ inline static MethodDesc *MDofCheckCast()
+ {
+ CONTRACT (MethodDesc*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN s_pCheckCast;
+ }
+
+ inline static MethodDesc *MDofWrap()
+ {
+ CONTRACT (MethodDesc*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN s_pWrapMethodDesc;
+ }
+
+ inline static MethodDesc *MDofFieldSetter()
+ {
+ CONTRACT (MethodDesc*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN s_pFieldSetterDesc;
+ }
+
+ inline static MethodDesc *MDofFieldGetter()
+ {
+ CONTRACT (MethodDesc*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN s_pFieldGetterDesc;
+ }
+
+ inline static MethodDesc *MDofGetType()
+ {
+ CONTRACT (MethodDesc*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN s_pGetTypeDesc;
+ }
+
+ inline static MethodDesc *MDofObjectGetType()
+ {
+ CONTRACT (MethodDesc*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN s_pObjectGetTypeDesc;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ inline static MethodDesc *MDofCreateObjectForCom()
+ {
+ CONTRACT (MethodDesc*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN s_pCreateObjectForCom;
+ }
+
+
+ static BOOL CallSupportsInterface(OBJECTREF realProxy, REFIID iid, ARG_SLOT *pret);
+
+ // helpers to call methods in real proxy
+ static VOID CallSetDCOMProxy(OBJECTREF realProxy, IUnknown* pUnk);
+
+#endif // FEATURE_COMINTEROP
+
+ inline static BOOL IsInstanceOfServerIdentity(MethodTable* pMT)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ return s_pServerIdentityClass == pMT;
+ }
+
+ inline static MethodTable *GetMarshalByRefClass()
+ {
+ CONTRACT (MethodTable*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN s_pMarshalByRefObjectClass;
+ }
+
+ static INT32 IsTransparentProxy(Object* obj);
+ static Object* GetRealProxy(Object* obj);
+
+ static BOOL CheckCast(OBJECTREF orTP, TypeHandle ty);
+ static BOOL CheckCast(OBJECTREF orTP, TypeHandle objTy, TypeHandle ty);
+ static OBJECTREF GetExposedContext();
+ static AppDomain *GetServerDomainForProxy(OBJECTREF orTP);
+ static Context *GetServerContextForProxy(OBJECTREF orTP);
+ static int GetServerDomainIdForProxy(OBJECTREF orTP);
+ static UINT_PTR CheckForContextMatch(Object* pStubData);
+
+ static ManagedActivationType __stdcall RequiresManagedActivation(TypeHandle ty);
+ static BOOL IsRemotingStarted()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return s_fRemotingStarted;
+ };
+
+ static DWORD GetOffsetOfSrvIdentityInRP() { return s_dwSrvIdentityOffsetInRealProxy; }
+ static DWORD GetOffsetOfCliIdentityInRP() { return s_dwIdOffset; }
+ static DWORD GetOffsetOfTPOrObjInIdentity() { return s_dwTPOrObjOffsetInIdentity; }
+ static DWORD GetOffsetOfLeaseInIdentity() { return s_dwLeaseOffsetInIdentity; }
+ static DWORD GetOffsetOfURIInIdentity() { return s_dwURIOffsetInIdentity; }
+ inline static MethodDesc *MDofRenewLeaseOnCall() { return s_pRenewLeaseOnCallDesc; }
+
+ static PCODE GetStubForInterfaceMethod(MethodDesc *pItfMD);
+private:
+ static void StartRemoting();
+ static void CopyDestToSrc(LPVOID pDest, LPVOID pSrc, UINT cbSize);
+ static void CallFieldAccessor(FieldDesc* pFD, OBJECTREF o, VOID * pVal,
+ BOOL fIsGetter, BOOL fIsByValue, BOOL fIsGCRef,
+ TypeHandle ty, TypeHandle fldTy,
+ CorElementType fieldType, UINT cbSize);
+
+ static void GetTypeAndFieldName(FieldArgs *pArgs, FieldDesc *pFD, TypeHandle thEnclosingClass);
+ static BOOL MatchField(FieldDesc* pCurField, LPCUTF8 szFieldName);
+ static OBJECTREF SetExposedContext(OBJECTREF newContext);
+ static OBJECTREF GetServerIdentityFromProxy(OBJECTREF obj);
+ inline static MethodDesc *MDOfCreateProxyForDomain()
+ {
+ CONTRACT (MethodDesc*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN s_pProxyForDomainDesc;
+ }
+
+ inline static MethodDesc *MDofGetServerContextForProxy()
+ {
+ CONTRACT (MethodDesc*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN s_pServerContextForProxyDesc;
+ }
+
+ inline static MethodDesc *MDofGetServerDomainIdForProxy()
+ {
+ CONTRACT (MethodDesc*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN s_pServerDomainIdForProxyDesc;
+ }
+
+ static VOID InitActivationServicesClass();
+ static VOID InitRealProxyClass();
+ static VOID InitRemotingProxyClass();
+ static VOID InitServerIdentityClass();
+ static VOID InitIdentityClass();
+ static VOID InitMarshalByRefObjectClass();
+ static VOID InitRemotingServicesClass();
+ static VOID InitObjectClass();
+ static VOID InitLeaseClass();
+
+ static MethodTable *s_pMarshalByRefObjectClass;
+ static MethodTable *CRemotingServices::s_pServerIdentityClass;
+ static MethodTable *CRemotingServices::s_pContextClass;
+
+ static MethodDesc *s_pRPPrivateInvoke;
+ static MethodDesc *s_pRPInvokeStatic;
+ static MethodDesc *s_pIsCurrentContextOK;
+ static MethodDesc *s_pCheckCast;
+ static MethodDesc *s_pWrapMethodDesc;
+ static MethodDesc *s_pFieldSetterDesc;
+ static MethodDesc *s_pFieldGetterDesc;
+ static MethodDesc *s_pObjectGetTypeDesc;
+ static MethodDesc *s_pGetTypeDesc;
+ static MethodDesc *s_pProxyForDomainDesc;
+ static MethodDesc *s_pServerContextForProxyDesc;
+ static MethodDesc *s_pServerDomainIdForProxyDesc;
+ static MethodDesc *s_pRenewLeaseOnCallDesc;
+ static DWORD s_dwIdOffset;
+ static DWORD s_dwServerOffsetInRealProxy;
+ static DWORD s_dwSrvIdentityOffsetInRealProxy;
+ static DWORD s_dwTPOrObjOffsetInIdentity;
+ static DWORD s_dwLeaseOffsetInIdentity;
+ static DWORD s_dwURIOffsetInIdentity;
+ static DWORD s_dwMBRIDOffset;
+ static CrstStatic s_RemotingCrst;
+ static BOOL s_fRemotingStarted;
+
+#ifdef FEATURE_COMINTEROP
+ static MethodDesc *s_pCreateObjectForCom;
+#endif // FEATURE_COMINTEROP
+
+};
+
+// Class that manages transparent proxy thunks
+class CVirtualThunks
+{
+public:
+ inline static CVirtualThunks* GetVirtualThunks()
+ {
+ CONTRACT (CVirtualThunks*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ SO_TOLERANT;
+ }
+ CONTRACT_END;
+
+ RETURN s_pVirtualThunks;
+ }
+
+ inline static CVirtualThunks* SetVirtualThunks(CVirtualThunks* pThunks)
+ {
+ CONTRACT (CVirtualThunks*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pThunks));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN (s_pVirtualThunks = pThunks);
+ }
+
+ inline CVirtualThunks* GetNextThunk()
+ {
+ CONTRACT (CVirtualThunks*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ SO_TOLERANT;
+ }
+ CONTRACT_END;
+
+ RETURN _pNext;
+ }
+
+ // Public member variables
+ CVirtualThunks *_pNext;
+ DWORD _dwReservedThunks;
+ DWORD _dwStartThunk;
+ DWORD _dwCurrentThunk;
+
+#ifdef CVIRTUALTHUNKS_ALIGNPAD_BYTES
+ BYTE pad[CVIRTUALTHUNKS_ALIGNPAD_BYTES];
+#endif
+
+ struct tagThunkCode
+ {
+ BYTE pCode[ConstVirtualThunkSize];
+ } ThunkCode[1];
+
+private:
+ // Cannot be created
+ CVirtualThunks(CVirtualThunks *pNext, DWORD dwCommitedSlots, DWORD dwReservedSlots, DWORD dwStartSlot, DWORD dwCurrentSlot)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ // Private statics
+ static CVirtualThunks *s_pVirtualThunks;
+};
+
+
+#ifndef HAS_REMOTING_PRECODE
+
+class CNonVirtualThunk
+{
+public:
+ // Constructor
+ CNonVirtualThunk(const BYTE* pbCode)
+ : _addrOfCode(pbCode), _pNext(NULL)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ ~CNonVirtualThunk();
+
+ inline LPVOID* GetAddrOfCode()
+ {
+ CONTRACT (LPVOID*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN (LPVOID*)&_addrOfCode;
+ }
+
+ inline const BYTE* GetThunkCode()
+ {
+ CONTRACT (const BYTE*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ SO_TOLERANT;
+ }
+ CONTRACT_END;
+
+ RETURN _addrOfCode;
+ }
+
+ inline CNonVirtualThunk* GetNextThunk()
+ {
+ CONTRACT (CNonVirtualThunk*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ SO_TOLERANT;
+ }
+ CONTRACT_END;
+
+ RETURN _pNext;
+ }
+
+ static void InitializeListLock();
+ static CNonVirtualThunk* AddrToThunk(LPVOID pAddr);
+ inline static CNonVirtualThunk* GetNonVirtualThunks()
+ {
+ CONTRACT (CNonVirtualThunk*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ SO_TOLERANT;
+ }
+ CONTRACT_END;
+
+ RETURN s_pNonVirtualThunks;
+ }
+
+ inline static SimpleRWLock* GetThunksListLock()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return s_pNonVirtualThunksListLock;
+ }
+
+ static CNonVirtualThunk* SetNonVirtualThunks(const BYTE* pbCode);
+
+ const BYTE* _addrOfCode;
+
+private:
+
+ void SetNextThunk();
+
+ // Private statics
+ static CNonVirtualThunk *s_pNonVirtualThunks;
+
+ // reader/writer lock to be taken when manipulating s_pNonVirtualThunks
+ static SimpleRWLock* s_pNonVirtualThunksListLock;
+
+ // Private members
+ CNonVirtualThunk* _pNext;
+};
+
+inline void CNonVirtualThunk::InitializeListLock()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (s_pNonVirtualThunksListLock == NULL)
+ s_pNonVirtualThunksListLock = new SimpleRWLock(COOPERATIVE_OR_PREEMPTIVE, LOCK_TYPE_DEFAULT);
+}
+
+inline void CNonVirtualThunk::SetNextThunk()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ SimpleRWLock::SimpleWriteLockHolder swlh(s_pNonVirtualThunksListLock);
+
+ _pNext = s_pNonVirtualThunks;
+ s_pNonVirtualThunks = this;
+}
+
+inline CNonVirtualThunk* CNonVirtualThunk::AddrToThunk(LPVOID pAddr)
+{
+ CONTRACT (CNonVirtualThunk*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pAddr));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN (CNonVirtualThunk *)((size_t)pAddr - (size_t)offsetof(CNonVirtualThunk, _addrOfCode));
+}
+
+#endif // HAS_REMOTING_PRECODE
+
+
+class CTPMethodTable
+{
+ friend class CRemotingServices;
+ friend class RemotingNative;
+
+public:
+ // Public statics
+ static DWORD GetCommitedTPSlots()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return s_dwCommitedTPSlots;
+ }
+
+ static MethodTable *GetMethodTable()
+ {
+ CONTRACT (MethodTable*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ SO_TOLERANT;
+ }
+ CONTRACT_END;
+
+ RETURN s_pThunkTable;
+ }
+
+ static VOID Initialize();
+
+#ifndef HAS_REMOTING_PRECODE
+ static PTR_PCODE GetOrCreateNonVirtualSlotForVirtualMethod(MethodDesc* pMD);
+ static PCODE CreateNonVirtualThunkForVirtualMethod(MethodDesc* pMD);
+ static Stub* CreateStubForNonVirtualMethod(MethodDesc* pMD, CPUSTUBLINKER *psl, LPVOID pvAddrOfCode, Stub* pInnerStub);
+#endif // HAS_REMOTING_PRECODE
+
+ static REALPROXYREF GetRP(OBJECTREF orTP);
+ static MethodTable * GetMethodTableBeingProxied(OBJECTREF orTP);
+
+ static LPVOID __stdcall CallTarget(MethodDesc* pTargetMD, LPVOID pvFirst, LPVOID pvSecond);
+ static LPVOID __stdcall CallTarget(MethodDesc* pTargetMD, LPVOID pvFirst, LPVOID pvSecond, LPVOID pvThird);
+ static BOOL CheckCastHelper(MethodDesc* pTargetMD, LPVOID pvFirst, LPVOID pvSecond);
+ static BOOL CheckCast(MethodDesc* pTargetMD, TRANSPARENTPROXYREF orTP, TypeHandle ty);
+ static void RefineProxy(TRANSPARENTPROXYREF orTP, TypeHandle ty);
+
+ static PCODE GetTPStubEntryPoint();
+ static PCODE GetDelegateStubEntryPoint();
+
+ static void DestroyThunk(MethodDesc* pMD);
+
+ // Interpretation of __TransparentProxy._stub
+ typedef UINT_PTR CheckContextCrossingProc (Object*);
+
+ inline static BOOL IsInstanceOfRemotingProxy(MethodTable *pMT)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ return s_pRemotingProxyClass == pMT;
+ }
+
+private:
+
+#ifndef DACCESS_COMPILE
+
+ // Private statics
+ static void InitThunkTable(DWORD dwCommitedTPSlots, DWORD dwReservedTPSlots, MethodTable* pTPMethodTable)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pTPMethodTable));
+ }
+ CONTRACTL_END;
+
+ s_dwCommitedTPSlots = dwCommitedTPSlots;
+ s_dwReservedTPSlots = dwReservedTPSlots;
+ s_pThunkTable = pTPMethodTable;
+ }
+
+
+ static void DestroyThunkTable()
+ {
+ WRAPPER_NO_CONTRACT;
+ ::ClrVirtualFree(MTToAlloc(s_pThunkTable, s_dwGCInfoBytes), 0, MEM_RELEASE);
+ s_pThunkTable = NULL;
+ s_dwCommitedTPSlots = 0;
+ s_dwReservedTPSlots = 0;
+ }
+
+#endif // #ifndef DACCESS_COMPILE
+
+ static void EnsureFieldsInitialized();
+
+ static void CreateTPOfClassForRP(TypeHandle ty, REALPROXYREF *pRP, TRANSPARENTPROXYREF *pTP);
+ static void CreateTPMethodTable(MethodTable* pTPMT);
+ static BOOL ExtendCommitedSlots(_In_range_(1,64*1024) DWORD dwSlots);
+ static BOOL AllocateThunks(DWORD dwSlots, DWORD dwCommitSize);
+#ifdef HAS_REMOTING_PRECODE
+ static void ActivatePrecodeRemotingThunk();
+#endif // HAS_REMOTING_PRECODE
+ static MethodTable *AllocToMT(BYTE *Alloc, LONG off)
+ {
+ CONTRACT (MethodTable*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(Alloc));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN (MethodTable *) (Alloc + off);
+ }
+
+ static BYTE *MTToAlloc(MethodTable *MT, LONG off)
+ {
+ CONTRACT (BYTE*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(MT));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN (((BYTE *) MT) - off);
+ }
+
+ static PCODE CreateThunkForVirtualMethod(DWORD dwSlot, BYTE *bCode);
+
+ // Static members
+ static DWORD s_dwCommitedTPSlots;
+ static DWORD s_dwReservedTPSlots;
+ static DWORD s_dwReservedTPIndirectionSlotSize;
+ SPTR_DECL(MethodTable, s_pThunkTable);
+ static MethodTable* s_pRemotingProxyClass;
+ static DWORD s_dwGCInfoBytes;
+ static DWORD s_dwMTDataSlots;
+ static CrstStatic s_TPMethodTableCrst;
+ static EEThunkHashTable *s_pThunkHashTable;
+ static BOOL s_fTPTableFieldsInitialized;
+};
+
+extern "C" UINT32 STDCALL TransparentProxyStubWorker(TransitionBlock * pTransitionBlock, TADDR pMethodDescOrSlot);
+
+// Holder for remoting profiler notifications
+class ProfilerRemotingClientCallbackHolder
+{
+public:
+ ProfilerRemotingClientCallbackHolder();
+ ~ProfilerRemotingClientCallbackHolder();
+};
+
+// These stub manager classes help the debugger to step
+// through the various stubs and thunks generated by the
+// remoting infrastructure
+class CVirtualThunkMgr :public StubManager
+{
+ friend class CTPMethodTable;
+
+ VPTR_VTABLE_CLASS(CVirtualThunkMgr, StubManager)
+
+public:
+ static void InitVirtualThunkManager();
+#ifndef DACCESS_COMPILE
+ CVirtualThunkMgr()
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+#endif
+
+public:
+#ifdef _DEBUG
+ virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "CVirtualThunkMgr"; }
+#endif
+
+ virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress);
+
+ virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) DAC_EMPTY_RET(FALSE);
+
+ static MethodDesc *Entry2MethodDesc(PCODE StubStartAddress, MethodTable *pMT);
+
+private:
+ // Private methods
+ LPBYTE FindThunk(const BYTE *stubStartAddress);
+ static MethodDesc *GetMethodDescByASM(PCODE startaddr, MethodTable *pMT);
+ static BOOL IsThunkByASM(PCODE startaddr);
+
+ // Private statics
+ static CVirtualThunkMgr *s_pVirtualThunkMgr;
+
+#ifdef DACCESS_COMPILE
+protected:
+ virtual LPCWSTR GetStubManagerName(PCODE addr)
+ { LIMITED_METHOD_CONTRACT; return W("CVirtualThunk"); }
+#endif
+};
+
+
+#ifndef HAS_REMOTING_PRECODE
+
+class CNonVirtualThunkMgr :public StubManager
+{
+ friend class CTPMethodTable;
+
+ VPTR_VTABLE_CLASS(CNonVirtualThunkMgr, StubManager)
+
+public:
+ static void InitNonVirtualThunkManager();
+
+public:
+#ifdef _DEBUG
+ virtual const char * DbgGetName() { return "CNonVirtualThunkMgr"; }
+#endif
+
+ virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress);
+
+ virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) DAC_EMPTY_RET(FALSE);
+
+ virtual BOOL TraceManager(Thread *thread,
+ TraceDestination *trace,
+ CONTEXT *pContext,
+ BYTE **pRetAddr) DAC_EMPTY_RET(FALSE);
+
+ static MethodDesc *Entry2MethodDesc(PCODE StubStartAddress, MethodTable *pMT);
+
+private:
+ // Private methods
+ CNonVirtualThunk* FindThunk(const BYTE *stubStartAddress);
+ static MethodDesc *GetMethodDescByASM(PCODE startaddr);
+ static BOOL IsThunkByASM(PCODE startaddr);
+
+ // Private statics
+ static CNonVirtualThunkMgr *s_pNonVirtualThunkMgr;
+
+#ifdef DACCESS_COMPILE
+protected:
+ virtual LPCWSTR GetStubManagerName(PCODE addr)
+ { LIMITED_METHOD_CONTRACT; return W("CNonVirtualThunk"); }
+#endif
+};
+
+#endif // HAS_REMOTING_PRECODE
+
+#endif // __REMOTING_H__
diff --git a/src/vm/rexcep.h b/src/vm/rexcep.h
new file mode 100644
index 0000000000..84ea535b8f
--- /dev/null
+++ b/src/vm/rexcep.h
@@ -0,0 +1,356 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//====================================================================
+
+//
+// Purpose: Lists the commonly-used Runtime Exceptions visible to users.
+//
+
+//
+//====================================================================
+
+// If you add an exception, modify CorError.h to add an HResult there.
+// (Guidelines for picking a unique number for your HRESULT are in CorError.h)
+// Also modify your managed Exception class to include its HResult.
+// Modify __HResults in the same directory as your exception, to include
+// your new HResult. And of course, add your exception and symbolic
+// name for your HResult to the list below so it can be thrown from
+// within the EE and recognized in Interop scenarios.
+
+
+// This is an exhaustive list of all exceptions that can be
+// thrown by the EE itself. If you add to this list the IL spec
+// needs to be updated!
+
+// Note: When multiple exceptions map to the same hresult it is very important
+// that the exception that should be created when the hresult in question
+// is returned by a function be FIRST in the list.
+//
+
+
+//
+// These are the macro's that need to be implemented before this file is included.
+//
+
+//
+// EXCEPTION_BEGIN_DEFINE(ns, reKind, bHRformessage, ...)
+//
+// This macro starts an exception definition.
+//
+// ns Namespace of the exception.
+// reKind Name of the exception.
+// bHRformessage When the exception is thrown from the EE, if this argument is true
+// the EE will create a string with the HRESULT, so that you get a more
+// meaningful error message than let's say AssemblyLoadException.
+// Usually you want to set this to true if your exception corresponds to
+// more than one HRESULT.
+// ... The list of HRESULTs that map to this exception. The first of the list
+// is used as the representative HRESULT for the reKind value.
+//
+
+//
+// #define EXCEPTION_ADD_HR(hr)
+//
+// This macro adds an additional HRESULT that maps to the exception.
+//
+// hr Additional HRESULT that maps to the exception.
+//
+
+//
+// #define EXCEPTION_END_DEFINE()
+//
+// This macro terminates the exception definition.
+//
+
+
+//
+// Namespaces used to define the exceptions.
+//
+
+
+
+#include "namespace.h"
+
+
+// Temporary workaround - adding some HRESULTs that the Jupiter team will define and
+// add to one of their header files for errors. Once these have been RI'ed into
+// windows and are in a file adjacent to winerror.h, we can remove these constants.
+// Given integration time, remove this by August 2011 at the very latest.
+#ifndef E_XAMLPARSEFAILED
+#define E_XAMLPARSEFAILED _HRESULT_TYPEDEF_(0x802B000AL)
+#endif
+#ifndef E_LAYOUTCYCLE
+#define E_LAYOUTCYCLE _HRESULT_TYPEDEF_(0x802B0014L)
+#endif
+#ifndef E_ELEMENTNOTENABLED
+#define E_ELEMENTNOTENABLED _HRESULT_TYPEDEF_(0x802B001EL)
+#endif
+#ifndef E_ELEMENTNOTAVAILABLE
+#define E_ELEMENTNOTAVAILABLE _HRESULT_TYPEDEF_(0x802B001FL)
+#endif
+#ifndef RO_E_CLOSED
+#define RO_E_CLOSED _HRESULT_TYPEDEF_(0x80000013L)
+#endif
+#ifndef APPMODEL_ERROR_NO_PACKAGE
+#define APPMODEL_ERROR_NO_PACKAGE 15700L
+#endif
+
+//
+// Actual definition of the exceptions and their matching HRESULT's.
+// HRESULTs are expected to be defined in CorError.h, and must also be
+// redefined in managed code in an __HResults class. The managed exception
+// object MUST use the same HRESULT in all of its constructors for COM Interop.
+// Read comments near top of this file.
+//
+//
+// NOTE: Please keep this list sorted according to the name of the exception.
+//
+//
+
+DEFINE_EXCEPTION(g_ReflectionNS, AmbiguousMatchException, false, COR_E_AMBIGUOUSMATCH)
+#ifdef FEATURE_CORECLR
+// ApplicationException is removed in CoreCLR
+#define kApplicationException kException
+#else
+DEFINE_EXCEPTION(g_SystemNS, ApplicationException, false, COR_E_APPLICATION)
+#endif // FEATURE_CORECLR
+DEFINE_EXCEPTION(g_SystemNS, AppDomainUnloadedException, false, COR_E_APPDOMAINUNLOADED)
+DEFINE_EXCEPTION(g_SystemNS, ArithmeticException, false, COR_E_ARITHMETIC)
+
+DEFINE_EXCEPTION(g_SystemNS, ArgumentException, false,
+ COR_E_ARGUMENT, STD_CTL_SCODE(449), STD_CTL_SCODE(450), CLR_E_BIND_UNRECOGNIZED_IDENTITY_FORMAT)
+
+DEFINE_EXCEPTION(g_SystemNS, ArgumentOutOfRangeException, false, COR_E_ARGUMENTOUTOFRANGE, HRESULT_FROM_WIN32(ERROR_NO_UNICODE_TRANSLATION))
+DEFINE_EXCEPTION(g_SystemNS, ArrayTypeMismatchException, false, COR_E_ARRAYTYPEMISMATCH)
+
+// Keep in sync with the list in EEFileLoadException::GetFileLoadKind in clrex.cpp
+DEFINE_EXCEPTION(g_SystemNS, BadImageFormatException, true,
+ COR_E_BADIMAGEFORMAT, CLDB_E_FILE_OLDVER,
+ CLDB_E_INDEX_NOTFOUND,
+ CLDB_E_FILE_CORRUPT, COR_E_NEWER_RUNTIME,
+ COR_E_ASSEMBLYEXPECTED,
+ HRESULT_FROM_WIN32(ERROR_BAD_EXE_FORMAT),
+ HRESULT_FROM_WIN32(ERROR_EXE_MARKED_INVALID),
+ CORSEC_E_INVALID_IMAGE_FORMAT,
+ HRESULT_FROM_WIN32(ERROR_NOACCESS),
+ HRESULT_FROM_WIN32(ERROR_INVALID_ORDINAL),
+ HRESULT_FROM_WIN32(ERROR_INVALID_DLL),
+ HRESULT_FROM_WIN32(ERROR_FILE_CORRUPT),
+ IDS_CLASSLOAD_32BITCLRLOADING64BITASSEMBLY,
+ COR_E_LOADING_REFERENCE_ASSEMBLY,
+ META_E_BAD_SIGNATURE,
+ COR_E_LOADING_WINMD_REFERENCE_ASSEMBLY)
+
+DEFINE_EXCEPTION(g_SystemNS, CannotUnloadAppDomainException, false, COR_E_CANNOTUNLOADAPPDOMAIN)
+DEFINE_EXCEPTION(g_CodeContractsNS, ContractException, false, COR_E_CODECONTRACTFAILED)
+DEFINE_EXCEPTION(g_SystemNS, ContextMarshalException, false, COR_E_CONTEXTMARSHAL)
+DEFINE_EXCEPTION(g_ReflectionNS, CustomAttributeFormatException, false, COR_E_CUSTOMATTRIBUTEFORMAT)
+
+#if defined(FEATURE_X509) || defined(FEATURE_CRYPTO) || defined(FEATURE_LEGACYNETCFCRYPTO)
+DEFINE_EXCEPTION(g_CryptographyNS, CryptographicException, false, CORSEC_E_CRYPTO)
+#endif // FEATURE_X509 || FEATURE_CRYPTO
+#ifndef FEATURE_CORECLR
+DEFINE_EXCEPTION(g_CryptographyNS, CryptographicUnexpectedOperationException, false, CORSEC_E_CRYPTO_UNEX_OPER)
+#endif // FEATURE_CORECLR
+
+DEFINE_EXCEPTION(g_SystemNS, DataMisalignedException, false, COR_E_DATAMISALIGNED)
+
+DEFINE_EXCEPTION(g_IONS, DirectoryNotFoundException, true, COR_E_DIRECTORYNOTFOUND, STG_E_PATHNOTFOUND, CTL_E_PATHNOTFOUND)
+
+DEFINE_EXCEPTION(g_SystemNS, DivideByZeroException, false, COR_E_DIVIDEBYZERO, CTL_E_DIVISIONBYZERO)
+
+DEFINE_EXCEPTION(g_SystemNS, DllNotFoundException, false, COR_E_DLLNOTFOUND)
+DEFINE_EXCEPTION(g_SystemNS, DuplicateWaitObjectException, false, COR_E_DUPLICATEWAITOBJECT)
+
+DEFINE_EXCEPTION(g_IONS, EndOfStreamException, false, COR_E_ENDOFSTREAM, STD_CTL_SCODE(62))
+
+DEFINE_EXCEPTION(g_SystemNS, EntryPointNotFoundException, false, COR_E_ENTRYPOINTNOTFOUND)
+DEFINE_EXCEPTION(g_SystemNS, Exception, false, COR_E_EXCEPTION)
+DEFINE_EXCEPTION(g_SystemNS, ExecutionEngineException, false, COR_E_EXECUTIONENGINE)
+
+DEFINE_EXCEPTION(g_SystemNS, FieldAccessException, false, COR_E_FIELDACCESS)
+
+DEFINE_EXCEPTION(g_IONS, FileLoadException, true,
+ COR_E_FILELOAD, FUSION_E_INVALID_PRIVATE_ASM_LOCATION,
+ FUSION_E_SIGNATURE_CHECK_FAILED,
+ FUSION_E_LOADFROM_BLOCKED, FUSION_E_CACHEFILE_FAILED,
+ FUSION_E_ASM_MODULE_MISSING, FUSION_E_INVALID_NAME,
+ FUSION_E_PRIVATE_ASM_DISALLOWED, FUSION_E_HOST_GAC_ASM_MISMATCH,
+ COR_E_MODULE_HASH_CHECK_FAILED, FUSION_E_REF_DEF_MISMATCH,
+ SECURITY_E_INCOMPATIBLE_SHARE, SECURITY_E_INCOMPATIBLE_EVIDENCE,
+ SECURITY_E_UNVERIFIABLE, COR_E_FIXUPSINEXE, HRESULT_FROM_WIN32(ERROR_TOO_MANY_OPEN_FILES),
+ HRESULT_FROM_WIN32(ERROR_SHARING_VIOLATION), HRESULT_FROM_WIN32(ERROR_LOCK_VIOLATION),
+ HRESULT_FROM_WIN32(ERROR_OPEN_FAILED), HRESULT_FROM_WIN32(ERROR_DISK_CORRUPT),
+ HRESULT_FROM_WIN32(ERROR_UNRECOGNIZED_VOLUME),
+ HRESULT_FROM_WIN32(ERROR_DLL_INIT_FAILED),
+ FUSION_E_CODE_DOWNLOAD_DISABLED, CORSEC_E_MISSING_STRONGNAME,
+ MSEE_E_ASSEMBLYLOADINPROGRESS,
+ HRESULT_FROM_WIN32(ERROR_FILE_INVALID))
+
+DEFINE_EXCEPTION(g_IONS, FileNotFoundException, true,
+ HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND), HRESULT_FROM_WIN32(ERROR_MOD_NOT_FOUND),
+ HRESULT_FROM_WIN32(ERROR_INVALID_NAME), CTL_E_FILENOTFOUND,
+ HRESULT_FROM_WIN32(ERROR_PATH_NOT_FOUND), HRESULT_FROM_WIN32(ERROR_BAD_NET_NAME),
+ HRESULT_FROM_WIN32(ERROR_BAD_NETPATH), HRESULT_FROM_WIN32(ERROR_NOT_READY),
+ HRESULT_FROM_WIN32(ERROR_WRONG_TARGET_NAME), INET_E_UNKNOWN_PROTOCOL,
+ INET_E_CONNECTION_TIMEOUT, INET_E_CANNOT_CONNECT, INET_E_RESOURCE_NOT_FOUND,
+ INET_E_OBJECT_NOT_FOUND, INET_E_DOWNLOAD_FAILURE, INET_E_DATA_NOT_AVAILABLE,
+ HRESULT_FROM_WIN32(ERROR_DLL_NOT_FOUND),
+ CLR_E_BIND_ASSEMBLY_VERSION_TOO_LOW, CLR_E_BIND_ASSEMBLY_PUBLIC_KEY_MISMATCH,
+ CLR_E_BIND_ASSEMBLY_NOT_FOUND)
+
+DEFINE_EXCEPTION(g_SystemNS, FormatException, false, COR_E_FORMAT)
+
+DEFINE_EXCEPTION(g_SystemNS, IndexOutOfRangeException, false, COR_E_INDEXOUTOFRANGE, 0x800a0009 /*Subscript out of range*/)
+DEFINE_EXCEPTION(g_SystemNS, InsufficientExecutionStackException, false, COR_E_INSUFFICIENTEXECUTIONSTACK)
+DEFINE_EXCEPTION(g_SystemNS, InvalidCastException, false, COR_E_INVALIDCAST)
+#ifdef FEATURE_COMINTEROP
+DEFINE_EXCEPTION(g_InteropNS, InvalidComObjectException, false, COR_E_INVALIDCOMOBJECT)
+#endif //FEATURE_COMINTEROP
+DEFINE_EXCEPTION(g_ReflectionNS, InvalidFilterCriteriaException, false, COR_E_INVALIDFILTERCRITERIA)
+DEFINE_EXCEPTION(g_InteropNS, InvalidOleVariantTypeException, false, COR_E_INVALIDOLEVARIANTTYPE)
+
+DEFINE_EXCEPTION(g_SystemNS, InvalidOperationException, false, COR_E_INVALIDOPERATION)
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_EXCEPTION_HR_WINRT_ONLY(g_SystemNS, InvalidOperationException, COR_E_INVALIDOPERATION,
+ E_ILLEGAL_STATE_CHANGE,
+ E_ILLEGAL_METHOD_CALL,
+ E_ILLEGAL_DELEGATE_ASSIGNMENT,
+ HRESULT_FROM_WIN32(APPMODEL_ERROR_NO_PACKAGE))
+#endif // FEATURE_COMINTEROP
+
+DEFINE_EXCEPTION(g_SystemNS, InvalidProgramException, false, COR_E_INVALIDPROGRAM)
+
+DEFINE_EXCEPTION(g_IONS, IOException, false, COR_E_IO, CTL_E_DEVICEIOERROR, STD_CTL_SCODE(31036), STD_CTL_SCODE(31037))
+
+DEFINE_EXCEPTION(g_InteropNS, MarshalDirectiveException, false, COR_E_MARSHALDIRECTIVE)
+DEFINE_EXCEPTION(g_SystemNS, MethodAccessException, false, COR_E_METHODACCESS, META_E_CA_FRIENDS_SN_REQUIRED)
+DEFINE_EXCEPTION(g_SystemNS, MemberAccessException, false, COR_E_MEMBERACCESS)
+DEFINE_EXCEPTION(g_SystemNS, MissingFieldException, false, COR_E_MISSINGFIELD)
+DEFINE_EXCEPTION(g_ResourcesNS, MissingManifestResourceException, false,COR_E_MISSINGMANIFESTRESOURCE)
+
+DEFINE_EXCEPTION(g_SystemNS, MissingMemberException, false, COR_E_MISSINGMEMBER, STD_CTL_SCODE(461))
+
+DEFINE_EXCEPTION(g_SystemNS, MissingMethodException, false, COR_E_MISSINGMETHOD)
+DEFINE_EXCEPTION(g_SystemNS, MulticastNotSupportedException, false, COR_E_MULTICASTNOTSUPPORTED)
+
+DEFINE_EXCEPTION(g_SystemNS, NotFiniteNumberException, false, COR_E_NOTFINITENUMBER)
+
+DEFINE_EXCEPTION(g_SystemNS, NotSupportedException, false, COR_E_NOTSUPPORTED, STD_CTL_SCODE(438), STD_CTL_SCODE(445), STD_CTL_SCODE(458), STD_CTL_SCODE(459))
+
+DEFINE_EXCEPTION(g_SystemNS, NullReferenceException, false, COR_E_NULLREFERENCE)
+// Note: this has to come after NullReferenceException since we want NullReferenceException to be created
+// when E_POINTER is returned from COM interfaces.
+DEFINE_EXCEPTION(g_SystemNS, AccessViolationException, false, E_POINTER)
+
+DEFINE_EXCEPTION(g_SystemNS, ObjectDisposedException, false, COR_E_OBJECTDISPOSED, RO_E_CLOSED)
+
+DEFINE_EXCEPTION(g_SystemNS, OperationCanceledException, false, COR_E_OPERATIONCANCELED)
+
+DEFINE_EXCEPTION(g_SystemNS, OverflowException, false, COR_E_OVERFLOW, CTL_E_OVERFLOW)
+
+DEFINE_EXCEPTION(g_IONS, PathTooLongException, false, COR_E_PATHTOOLONG)
+
+DEFINE_EXCEPTION(g_SystemNS, PlatformNotSupportedException, false, COR_E_PLATFORMNOTSUPPORTED)
+
+DEFINE_EXCEPTION(g_SystemNS, RankException, false, COR_E_RANK)
+DEFINE_EXCEPTION(g_ReflectionNS, ReflectionTypeLoadException, false, COR_E_REFLECTIONTYPELOAD)
+#ifdef FEATURE_REMOTING
+DEFINE_EXCEPTION(g_RemotingNS, RemotingException, false, COR_E_REMOTING)
+#endif // FEATURE_REMOTING
+DEFINE_EXCEPTION(g_CompilerServicesNS, RuntimeWrappedException, false, COR_E_RUNTIMEWRAPPED)
+
+#ifdef FEATURE_REMOTING
+DEFINE_EXCEPTION(g_RemotingNS, ServerException, false, COR_E_SERVER)
+#endif // FEATURE_REMOTING
+
+DEFINE_EXCEPTION(g_SecurityNS, SecurityException, true,
+ COR_E_SECURITY, CORSEC_E_INVALID_STRONGNAME,
+ CTL_E_PERMISSIONDENIED, STD_CTL_SCODE(419),
+ CORSEC_E_INVALID_PUBLICKEY, CORSEC_E_SIGNATURE_MISMATCH)
+
+#if FEATURE_COMINTEROP
+DEFINE_EXCEPTION(g_InteropNS, SafeArrayRankMismatchException, false, COR_E_SAFEARRAYRANKMISMATCH)
+DEFINE_EXCEPTION(g_InteropNS, SafeArrayTypeMismatchException, false, COR_E_SAFEARRAYTYPEMISMATCH)
+#endif //FEATURE_COMINTEROP
+DEFINE_EXCEPTION(g_SerializationNS, SerializationException, false, COR_E_SERIALIZATION)
+
+DEFINE_EXCEPTION(g_SystemNS, StackOverflowException, false, COR_E_STACKOVERFLOW, CTL_E_OUTOFSTACKSPACE)
+
+DEFINE_EXCEPTION(g_ThreadingNS, SynchronizationLockException, false, COR_E_SYNCHRONIZATIONLOCK)
+DEFINE_EXCEPTION(g_SystemNS, SystemException, false, COR_E_SYSTEM)
+
+DEFINE_EXCEPTION(g_ReflectionNS, TargetException, false, COR_E_TARGET)
+DEFINE_EXCEPTION(g_ReflectionNS, TargetInvocationException, false, COR_E_TARGETINVOCATION)
+DEFINE_EXCEPTION(g_ReflectionNS, TargetParameterCountException, false, COR_E_TARGETPARAMCOUNT)
+DEFINE_EXCEPTION(g_ThreadingNS, ThreadAbortException, false, COR_E_THREADABORTED)
+DEFINE_EXCEPTION(g_ThreadingNS, ThreadInterruptedException, false, COR_E_THREADINTERRUPTED)
+DEFINE_EXCEPTION(g_ThreadingNS, ThreadStateException, false, COR_E_THREADSTATE)
+DEFINE_EXCEPTION(g_ThreadingNS, ThreadStartException, false, COR_E_THREADSTART)
+DEFINE_EXCEPTION(g_SystemNS, TypeAccessException, false, COR_E_TYPEACCESS)
+DEFINE_EXCEPTION(g_SystemNS, TypeInitializationException, false, COR_E_TYPEINITIALIZATION)
+
+#ifdef FEATURE_COMINTEROP
+DEFINE_EXCEPTION(g_SystemNS, TypeLoadException, false, COR_E_TYPELOAD,
+ RO_E_METADATA_NAME_NOT_FOUND, CLR_E_BIND_TYPE_NOT_FOUND)
+#else
+DEFINE_EXCEPTION(g_SystemNS, TypeLoadException, false, COR_E_TYPELOAD)
+#endif
+
+DEFINE_EXCEPTION(g_SystemNS, TypeUnloadedException, false, COR_E_TYPEUNLOADED)
+
+DEFINE_EXCEPTION(g_SystemNS, UnauthorizedAccessException, true, COR_E_UNAUTHORIZEDACCESS, CTL_E_PATHFILEACCESSERROR, STD_CTL_SCODE(335))
+
+DEFINE_EXCEPTION(g_SecurityNS, VerificationException, false, COR_E_VERIFICATION)
+
+#ifdef FEATURE_CAS_POLICY
+DEFINE_EXCEPTION(g_PolicyNS, PolicyException, true, CORSEC_E_POLICY_EXCEPTION, CORSEC_E_NO_EXEC_PERM, CORSEC_E_MIN_GRANT_FAIL)
+DEFINE_EXCEPTION(g_SecurityNS, XmlSyntaxException, false, CORSEC_E_XMLSYNTAX)
+#endif // FEATURE_CAS_POLICY
+
+DEFINE_EXCEPTION(g_InteropNS, COMException, false, E_FAIL)
+DEFINE_EXCEPTION(g_InteropNS, ExternalException, false, E_FAIL)
+DEFINE_EXCEPTION(g_InteropNS, SEHException, false, E_FAIL)
+DEFINE_EXCEPTION(g_SystemNS, NotImplementedException, false, E_NOTIMPL)
+
+DEFINE_EXCEPTION(g_SystemNS, OutOfMemoryException, false, E_OUTOFMEMORY, CTL_E_OUTOFMEMORY, STD_CTL_SCODE(31001))
+
+#ifdef FEATURE_CORECLR
+DEFINE_EXCEPTION(g_SystemNS, CrossAppDomainMarshaledException, false, E_FAIL)
+#endif //FEATURE_CORECLR
+
+#ifdef FEATURE_ISOSTORE
+DEFINE_EXCEPTION(g_IsolatedStorageNS, IsolatedStorageException, true,
+ ISS_E_ISOSTORE, ISS_E_ISOSTORE, ISS_E_OPEN_STORE_FILE,
+ ISS_E_OPEN_FILE_MAPPING, ISS_E_MAP_VIEW_OF_FILE, ISS_E_GET_FILE_SIZE, ISS_E_CREATE_MUTEX, ISS_E_LOCK_FAILED,
+ ISS_E_FILE_WRITE, ISS_E_SET_FILE_POINTER, ISS_E_CREATE_DIR,
+ ISS_E_CORRUPTED_STORE_FILE, ISS_E_STORE_VERSION, ISS_E_FILE_NOT_MAPPED, ISS_E_BLOCK_SIZE_TOO_SMALL,
+ ISS_E_ALLOC_TOO_LARGE, ISS_E_USAGE_WILL_EXCEED_QUOTA, ISS_E_TABLE_ROW_NOT_FOUND, ISS_E_DEPRECATE, ISS_E_CALLER,
+ ISS_E_PATH_LENGTH, ISS_E_MACHINE, ISS_E_STORE_NOT_OPEN, ISS_E_MACHINE_DACL)
+#endif // FEATURE_ISOSTORE
+
+DEFINE_EXCEPTION(g_SystemNS, ArgumentNullException, false, E_POINTER)
+
+#define kLastExceptionInMscorlib kArgumentNullException
+
+//
+// All exceptions defined in other .NET Framework assemblies have to be at the end
+//
+
+#ifdef FEATURE_COMINTEROP
+// Jupiter needs some HRESULTs mapped to exceptions in .NET Framework assemblies other than mscorlib.
+DEFINE_EXCEPTION_IN_OTHER_FX_ASSEMBLY(g_MarkupNS, XamlParseException, "System.Runtime.WindowsRuntime.UI.Xaml", ECMA_PUBLICKEY_STR, false, E_XAMLPARSEFAILED)
+DEFINE_EXCEPTION_IN_OTHER_FX_ASSEMBLY(g_AutomationNS, ElementNotAvailableException, "System.Runtime.WindowsRuntime.UI.Xaml", ECMA_PUBLICKEY_STR, false, E_ELEMENTNOTAVAILABLE)
+DEFINE_EXCEPTION_IN_OTHER_FX_ASSEMBLY(g_AutomationNS, ElementNotEnabledException, "System.Runtime.WindowsRuntime.UI.Xaml", ECMA_PUBLICKEY_STR, false, E_ELEMENTNOTENABLED)
+DEFINE_EXCEPTION_IN_OTHER_FX_ASSEMBLY(g_DirectUINS, LayoutCycleException, "System.Runtime.WindowsRuntime.UI.Xaml", ECMA_PUBLICKEY_STR, false, E_LAYOUTCYCLE)
+#endif // FEATURE_COMINTEROP
+
+
+// Please see comments on at the top of this list
+
+#undef DEFINE_EXCEPTION
+#undef DEFINE_EXCEPTION_HR_WINRT_ONLY
+#undef DEFINE_EXCEPTION_IN_OTHER_FX_ASSEMBLY
diff --git a/src/vm/rtlfunctions.cpp b/src/vm/rtlfunctions.cpp
new file mode 100644
index 0000000000..d2c5b54785
--- /dev/null
+++ b/src/vm/rtlfunctions.cpp
@@ -0,0 +1,117 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// RtlFunctions.CPP
+//
+
+//
+// Various functions for interacting with ntdll.
+//
+//
+
+// Precompiled Header
+
+#include "common.h"
+
+#include "rtlfunctions.h"
+
+
+#ifdef _TARGET_AMD64_
+
+RtlVirtualUnwindFn* RtlVirtualUnwind_Unsafe = NULL;
+
+HRESULT EnsureRtlFunctions()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HMODULE hModuleNtDll = CLRLoadLibrary(W("ntdll"));
+
+ if (hModuleNtDll == NULL)
+ return E_FAIL;
+
+#define ENSURE_FUNCTION_RENAME(clrname, ntname) \
+ if (NULL == clrname) { clrname = (ntname##Fn*)GetProcAddress(hModuleNtDll, #ntname); } \
+ if (NULL == clrname) { return E_FAIL; } \
+ { }
+
+ ENSURE_FUNCTION_RENAME(RtlVirtualUnwind_Unsafe, RtlVirtualUnwind );
+
+ return S_OK;
+}
+
+#else // _TARGET_AMD64_
+
+HRESULT EnsureRtlFunctions()
+{
+ LIMITED_METHOD_CONTRACT;
+ return S_OK;
+}
+
+#endif // _TARGET_AMD64_
+
+#if defined(WIN64EXCEPTIONS)
+
+VOID InstallEEFunctionTable (
+ PVOID pvTableID,
+ PVOID pvStartRange,
+ ULONG cbRange,
+ PGET_RUNTIME_FUNCTION_CALLBACK pfnGetRuntimeFunctionCallback,
+ PVOID pvContext,
+ EEDynamicFunctionTableType TableType)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(cbRange <= DYNAMIC_FUNCTION_TABLE_MAX_RANGE);
+ }
+ CONTRACTL_END;
+
+ static LPWSTR wszModuleName = NULL;
+ static WCHAR rgwModuleName[MAX_PATH] = {0};
+
+ if (wszModuleName == NULL)
+ {
+ WCHAR rgwTempName[MAX_PATH] = {0};
+ DWORD dwTempNameSize = MAX_PATH;
+
+ // Leaves trailing backslash on path, producing something like "c:\windows\microsoft.net\framework\v4.0.x86dbg\"
+ HRESULT hr = GetInternalSystemDirectory(rgwTempName, &dwTempNameSize);
+
+ //finish creating complete path and copy to buffer if we can
+ if (FAILED(hr) ||
+ (wcscat_s(rgwTempName, MAX_PATH, MAIN_DAC_MODULE_DLL_NAME_W) != 0) ||
+ (wcscpy_s(rgwModuleName, MAX_PATH, rgwTempName) != 0))
+ { // The CLR should become unavailable in this case.
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
+ }
+
+ // publish result
+ InterlockedExchangeT(&wszModuleName, rgwModuleName);
+ }
+
+ if (!RtlInstallFunctionTableCallback(
+ ((ULONG_PTR)pvTableID) | 3, // the low 2 bits must be set so NT knows
+ // it's not really a pointer. See
+ // DeleteEEFunctionTable.
+ (ULONG_PTR)pvStartRange,
+ cbRange,
+ pfnGetRuntimeFunctionCallback,
+ EncodeDynamicFunctionTableContext(pvContext, TableType),
+ wszModuleName))
+ {
+ COMPlusThrowOM();
+ }
+}
+
+#endif // WIN64EXCEPTIONS
+
diff --git a/src/vm/rtlfunctions.h b/src/vm/rtlfunctions.h
new file mode 100644
index 0000000000..70ffcea5f8
--- /dev/null
+++ b/src/vm/rtlfunctions.h
@@ -0,0 +1,80 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#ifndef __RTLFUNCTIONS_H__
+#define __RTLFUNCTIONS_H__
+
+#ifdef WIN64EXCEPTIONS
+
+enum EEDynamicFunctionTableType
+{
+ DYNFNTABLE_JIT = 0,
+ DYNFNTABLE_STUB = 1,
+ DYNFNTABLE_INVALID = -1,
+
+ DYNFNTABLE_FIRST = DYNFNTABLE_JIT,
+ DYNFNTABLE_LAST = DYNFNTABLE_STUB,
+};
+
+// Used by OutOfProcessFunctionTableCallback in DLLS\mscordbg\DebugSupport.cpp
+// to figure out how to parse a dunamic function table that was registered
+// with a callback.
+inline
+EEDynamicFunctionTableType IdentifyDynamicFunctionTableTypeFromContext (PVOID pvContext)
+{
+ EEDynamicFunctionTableType type = (EEDynamicFunctionTableType)((SIZE_T)pvContext & 3);
+ if (type < DYNFNTABLE_FIRST || type > DYNFNTABLE_LAST)
+ type = DYNFNTABLE_INVALID;
+ return type;
+}
+
+inline
+PVOID EncodeDynamicFunctionTableContext (PVOID pvContext, EEDynamicFunctionTableType type)
+{
+ _ASSERTE(type >= DYNFNTABLE_FIRST && type <= DYNFNTABLE_LAST);
+ return (PVOID)((SIZE_T)pvContext | type);
+}
+
+inline
+PVOID DecodeDynamicFunctionTableContext (PVOID pvContext)
+{
+ return (PVOID)((SIZE_T)pvContext & ~3);
+}
+
+
+#define DYNAMIC_FUNCTION_TABLE_MAX_RANGE LONG_MAX
+
+#endif // WIN64EXCEPTIONS
+
+
+#if defined(WIN64EXCEPTIONS) && !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE) && !defined(FEATURE_PAL)
+
+// Wrapper for RtlInstallFunctionTableCallback.
+VOID InstallEEFunctionTable(
+ PVOID pvTableID,
+ PVOID pvStartRange,
+ ULONG cbRange,
+ PGET_RUNTIME_FUNCTION_CALLBACK pfnGetRuntimeFunctionCallback,
+ PVOID pvContext,
+ EEDynamicFunctionTableType TableType);
+
+inline
+VOID DeleteEEFunctionTable(
+ PVOID pvTableID)
+{
+ RtlDeleteFunctionTable((PRUNTIME_FUNCTION)((ULONG64)pvTableID | 3));
+}
+
+#else // WIN64EXCEPTIONS && !DACCESS_COMPILE && !CROSSGEN_COMPILE && !FEATURE_PAL
+
+#define InstallEEFunctionTable(pvTableID, pvStartRange, cbRange, pfnGetRuntimeFunctionCallback, pvContext, TableType) do { } while (0)
+#define DeleteEEFunctionTable(pvTableID) do { } while (0)
+
+#endif // WIN64EXCEPTIONS && !DACCESS_COMPILE && !CROSSGEN_COMPILE && !FEATURE_PAL
+
+
+#endif // !__RTLFUNCTIONS_H__
diff --git a/src/vm/runtimecallablewrapper.cpp b/src/vm/runtimecallablewrapper.cpp
new file mode 100644
index 0000000000..ef267c2201
--- /dev/null
+++ b/src/vm/runtimecallablewrapper.cpp
@@ -0,0 +1,5616 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Class: RCW
+**
+**
+** Purpose: The implementation of the ComObject class
+**
+
+===========================================================*/
+
+#include "common.h"
+
+#include <ole2.h>
+#include <inspectable.h>
+
+class Object;
+#include "vars.hpp"
+#include "object.h"
+#include "excep.h"
+#include "frames.h"
+#include "vars.hpp"
+#include "threads.h"
+#include "field.h"
+#include "runtimecallablewrapper.h"
+#include "hash.h"
+#include "interoputil.h"
+#include "comcallablewrapper.h"
+#include "eeconfig.h"
+#include "comdelegate.h"
+#include "comcache.h"
+#include "notifyexternals.h"
+#include "winrttypenameconverter.h"
+#include "../md/compiler/custattr.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "mdaassistants.h"
+#include "olevariant.h"
+#include "interopconverter.h"
+#include "constrainedexecutionregion.h"
+#ifdef FEATURE_REMOTING
+#include "crossdomaincalls.h"
+#endif
+#include "caparser.h"
+#include "classnames.h"
+#include "objectnative.h"
+#include "rcwwalker.h"
+#include "finalizerthread.h"
+
+// static
+SLIST_HEADER RCW::s_RCWStandbyList;
+
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+#include "olecontexthelpers.h"
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+#ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+
+#ifndef CROSSGEN_COMPILE
+
+void ComClassFactory::ThrowHRMsg(HRESULT hr, DWORD dwMsgResID)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ SString strMessage;
+ SString strResource;
+ WCHAR strClsid[39];
+ SString strHRDescription;
+
+ // Obtain the textual representation of the HRESULT.
+ StringFromGUID2(m_rclsid, strClsid, sizeof(strClsid) / sizeof(WCHAR));
+
+ SString strHRHex;
+ strHRHex.Printf("%.8x", hr);
+
+ // Obtain the description of the HRESULT.
+ GetHRMsg(hr, strHRDescription);
+
+ // Load the appropriate resource and throw
+ COMPlusThrowHR(hr, dwMsgResID, strHRHex, strClsid, strHRDescription.GetUnicode());
+}
+
+//-------------------------------------------------------------
+// Common code for licensing
+//
+IUnknown *ComClassFactory::CreateInstanceFromClassFactory(IClassFactory *pClassFact, IUnknown *punkOuter, BOOL *pfDidContainment)
+{
+ CONTRACT (IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pClassFact));
+ PRECONDITION(CheckPointer(punkOuter, NULL_OK));
+ PRECONDITION(CheckPointer(pfDidContainment, NULL_OK));
+ PRECONDITION(CheckPointer(m_pClassMT, NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ HRESULT hr = S_OK;
+ SafeComHolder<IClassFactory2> pClassFact2 = NULL;
+ SafeComHolder<IUnknown> pUnk = NULL;
+ BSTRHolder bstrKey = NULL;
+
+ Thread *pThread = GetThread();
+
+ // Does this support licensing?
+ if (FAILED(SafeQueryInterface(pClassFact, IID_IClassFactory2, (IUnknown**)&pClassFact2)))
+ {
+ // not a licensed class - just createinstance the usual way.
+ // Create an instance of the object.
+ FrameWithCookie<DebuggerExitFrame> __def;
+ {
+ GCX_PREEMP();
+ {
+ LeaveRuntimeHolder lrh(**(size_t**)(IUnknown*)pClassFact);
+ hr = pClassFact->CreateInstance(punkOuter, IID_IUnknown, (void **)&pUnk);
+ }
+ if (FAILED(hr) && punkOuter)
+ {
+ LeaveRuntimeHolder lrh(**(size_t**)(IUnknown*)pClassFact);
+ hr = pClassFact->CreateInstance(NULL, IID_IUnknown, (void**)&pUnk);
+ if (pfDidContainment)
+ *pfDidContainment = TRUE;
+ }
+ }
+ __def.Pop();
+ }
+ else
+ {
+ if (m_pClassMT == NULL)
+ {
+ // Create an instance of the object.
+ FrameWithCookie<DebuggerExitFrame> __def;
+ {
+ GCX_PREEMP();
+ LeaveRuntimeHolder lrh(**(size_t**)(IUnknown*)pClassFact);
+ hr = pClassFact->CreateInstance(punkOuter, IID_IUnknown, (void **)&pUnk);
+ if (FAILED(hr) && punkOuter)
+ {
+ hr = pClassFact->CreateInstance(NULL, IID_IUnknown, (void**)&pUnk);
+ if (pfDidContainment)
+ *pfDidContainment = TRUE;
+ }
+ }
+ __def.Pop();
+ }
+ else
+ {
+ MethodTable *pHelperMT = pThread->GetDomain()->GetLicenseInteropHelperMethodTable();
+ MethodDesc *pMD = MemberLoader::FindMethod(pHelperMT, "GetCurrentContextInfo", &gsig_IM_LicenseInteropHelper_GetCurrentContextInfo);
+ MethodDescCallSite getCurrentContextInfo(pMD);
+
+ TypeHandle rth = TypeHandle(m_pClassMT);
+
+ struct _gc {
+ OBJECTREF pHelper;
+ OBJECTREF pType;
+ } gc;
+ gc.pHelper = NULL; // LicenseInteropHelper
+ gc.pType = NULL;
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.pHelper = pHelperMT->Allocate();
+ gc.pType = rth.GetManagedClassObject();
+
+ // First, crack open the current licensing context.
+ INT32 fDesignTime = 0;
+ ARG_SLOT args[4];
+ args[0] = ObjToArgSlot(gc.pHelper);
+ args[1] = (ARG_SLOT)&fDesignTime;
+ args[2] = (ARG_SLOT)(BSTR*)&bstrKey;
+ args[3] = ObjToArgSlot(gc.pType);
+
+ getCurrentContextInfo.Call(args);
+
+ if (fDesignTime)
+ {
+ // If designtime, we're supposed to obtain the runtime license key
+ // from the component and save it away in the license context
+ // (the design tool can then grab it and embedded it into the
+ // app it's creating.)
+
+ if (bstrKey != NULL)
+ {
+ // It's illegal for our helper to return a non-null bstrKey
+ // when the context is design-time. But we'll try to do the
+ // right thing anway.
+ _ASSERTE(!"We're not supposed to get here, but we'll try to cope anyway.");
+ SysFreeString(bstrKey);
+ bstrKey = NULL;
+ }
+
+ {
+ GCX_PREEMP();
+ hr = pClassFact2->RequestLicKey(0, &bstrKey);
+ }
+
+ // E_NOTIMPL is not a true failure. It simply indicates that
+ // the component doesn't support a runtime license key.
+ if (hr == E_NOTIMPL)
+ hr = S_OK;
+
+ if (SUCCEEDED(hr))
+ {
+ MethodDesc *pMDSaveKey = MemberLoader::FindMethod(pHelperMT, "SaveKeyInCurrentContext", &gsig_IM_LicenseInteropHelper_SaveKeyInCurrentContext);
+ MethodDescCallSite saveKeyInCurrentContext(pMDSaveKey);
+
+ args[0] = ObjToArgSlot(gc.pHelper);
+ args[1] = (ARG_SLOT)(BSTR)bstrKey;
+ saveKeyInCurrentContext.Call(args);
+ }
+ }
+
+ if (SUCCEEDED(hr))
+ {
+ FrameWithCookie<DebuggerExitFrame> __def;
+ {
+ GCX_PREEMP();
+
+ if (fDesignTime || bstrKey == NULL)
+ {
+ // Either it's design time, or the current context doesn't
+ // supply a runtime license key.
+ LeaveRuntimeHolder lrh(**(size_t**)(IUnknown*)pClassFact);
+ hr = pClassFact->CreateInstance(punkOuter, IID_IUnknown, (void **)&pUnk);
+ if (FAILED(hr) && punkOuter)
+ {
+ hr = pClassFact->CreateInstance(NULL, IID_IUnknown, (void**)&pUnk);
+ if (pfDidContainment)
+ *pfDidContainment = TRUE;
+ }
+ }
+ else
+ {
+ // It's runtime, and we do have a non-null license key.
+ _ASSERTE(bstrKey != NULL);
+ LeaveRuntimeHolder lrh(**(size_t**)(IUnknown*)pClassFact);
+ hr = pClassFact2->CreateInstanceLic(punkOuter, NULL, IID_IUnknown, bstrKey, (void**)&pUnk);
+ if (FAILED(hr) && punkOuter)
+ {
+ hr = pClassFact2->CreateInstanceLic(NULL, NULL, IID_IUnknown, bstrKey, (void**)&pUnk);
+ if (pfDidContainment)
+ *pfDidContainment = TRUE;
+ }
+
+ }
+ }
+ __def.Pop();
+ }
+
+ GCPROTECT_END();
+ }
+ }
+
+ if (FAILED(hr))
+ {
+ if (bstrKey == NULL)
+ ThrowHRMsg(hr, IDS_EE_CREATEINSTANCE_FAILED);
+ else
+ ThrowHRMsg(hr, IDS_EE_CREATEINSTANCE_LIC_FAILED);
+ }
+
+ pUnk.SuppressRelease();
+ RETURN pUnk;
+}
+
+
+//-------------------------------------------------------------
+// ComClassFactory::CreateAggregatedInstance(MethodTable* pMTClass)
+// create a COM+ instance that aggregates a COM instance
+
+OBJECTREF ComClassFactory::CreateAggregatedInstance(MethodTable* pMTClass, BOOL ForManaged)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pMTClass));
+ }
+ CONTRACTL_END;
+
+ BOOL fDidContainment = FALSE;
+
+#ifdef _DEBUG
+ // verify the class extends a COM import class
+ MethodTable * pMT = pMTClass;
+ do
+ {
+ pMT = pMT->GetParentMethodTable();
+ }
+ while (pMT == NULL || pMT->IsComImport());
+ _ASSERTE(pMT != NULL);
+#endif
+
+ SafeComHolder<IUnknown> pOuter = NULL;
+ SafeComHolder<IClassFactory> pClassFact = NULL;
+ SafeComHolder<IUnknown> pUnk = NULL;
+
+ HRESULT hr = S_OK;
+ NewRCWHolder pNewRCW;
+ BOOL bUseDelegate = FALSE;
+
+ MethodTable *pCallbackMT = NULL;
+
+ OBJECTREF oref = NULL;
+ COMOBJECTREF cref = NULL;
+ GCPROTECT_BEGIN(cref)
+ {
+ cref = (COMOBJECTREF)ComObject::CreateComObjectRef(pMTClass);
+
+ //get wrapper for the object, this could enable GC
+ CCWHolder pComWrap = ComCallWrapper::InlineGetWrapper((OBJECTREF *)&cref);
+
+ // Make sure the ClassInitializer has run, since the user might have
+ // wanted to set up a COM object creation callback.
+ pMTClass->CheckRunClassInitThrowing();
+
+ // If the user is going to use a delegate to allocate the COM object
+ // (rather than CoCreateInstance), we need to know now, before we enable
+ // preemptive GC mode (since we touch object references in the
+ // determination).
+ // We don't just check the current class to see if it has a cllabck
+ // registered, we check up the class chain to see if any of our parents
+ // did.
+
+ pCallbackMT = pMTClass;
+ while ((pCallbackMT != NULL) &&
+ (pCallbackMT->GetObjCreateDelegate() == NULL) &&
+ !pCallbackMT->IsComImport())
+ {
+ pCallbackMT = pCallbackMT->GetParentMethodTable();
+ }
+
+ if (pCallbackMT && !pCallbackMT->IsComImport())
+ bUseDelegate = TRUE;
+
+ FrameWithCookie<DebuggerExitFrame> __def;
+
+ // get the IUnknown interface for the managed object
+ pOuter = ComCallWrapper::GetComIPFromCCW(pComWrap, IID_IUnknown, NULL);
+ _ASSERTE(pOuter != NULL);
+
+ // If the user has set a delegate to allocate the COM object, use it.
+ // Otherwise we just CoCreateInstance it.
+ if (bUseDelegate)
+ {
+ ARG_SLOT args[2];
+
+ OBJECTREF orDelegate = pCallbackMT->GetObjCreateDelegate();
+ MethodDesc *pMeth = COMDelegate::GetMethodDesc(orDelegate);
+
+ GCPROTECT_BEGIN(orDelegate)
+ {
+ _ASSERTE(pMeth);
+ MethodDescCallSite delegateMethod(pMeth, &orDelegate);
+
+ // Get the OR on which we are going to invoke the method and set it
+ // as the first parameter in arg above.
+ args[0] = (ARG_SLOT)OBJECTREFToObject(COMDelegate::GetTargetObject(orDelegate));
+
+ // Pass the IUnknown of the aggregator as the second argument.
+ args[1] = (ARG_SLOT)(IUnknown*)pOuter;
+
+ // Call the method...
+ pUnk = (IUnknown *)delegateMethod.Call_RetArgSlot(args);
+ if (!pUnk)
+ COMPlusThrowHR(E_FAIL);
+ }
+ GCPROTECT_END();
+ }
+ else
+ {
+ _ASSERTE(m_pClassMT);
+ pUnk = CreateInstanceInternal(pOuter, &fDidContainment);
+ }
+
+ __def.Pop();
+
+ // give up the extra addref that we did in our QI and suppress the auto-release.
+ pComWrap->Release();
+ pComWrap.SuppressRelease();
+
+ // Here's the scary part. If we are doing a managed 'new' of the aggregator,
+ // then COM really isn't involved. We should not be counting for our caller
+ // because our caller relies on GC references rather than COM reference counting
+ // to keep us alive.
+ //
+ // Drive the instances count down to 0 -- and rely on the GCPROTECT to keep us
+ // alive until we get back to our caller.
+ if (ForManaged)
+ pComWrap->Release();
+
+ RCWCache* pCache = RCWCache::GetRCWCache();
+
+ _ASSERTE(cref->GetSyncBlock()->IsPrecious()); // the object already has a CCW
+ DWORD dwSyncBlockIndex = cref->GetSyncBlockIndex();
+
+ // create a wrapper for this COM object
+ pNewRCW = RCW::CreateRCW(pUnk, dwSyncBlockIndex, RCW::CF_None, pMTClass);
+
+ RCWHolder pRCW(GetThread());
+ pRCW.InitNoCheck(pNewRCW);
+
+ // we used containment
+ // we need to store this wrapper in our hash table
+ {
+ RCWCache::LockHolder lh(pCache);
+
+ GCX_FORBID();
+
+ BOOL fInserted = pCache->FindOrInsertWrapper_NoLock(pUnk, &pRCW, /* fAllowReInit = */ FALSE);
+ if (!fInserted)
+ {
+ // OK. Looks like the factory returned a singleton on us and the cache already
+ // has an entry for this pIdentity. This should always happen in containment
+ // scenario, not for aggregation.
+ // In this case, we should insert this new RCW into cache as a unique RCW,
+ // because these are separate objects, and we need two separate RCWs with
+ // different flags (should be contained, impossible to be aggregated) pointing
+ // to them, separately
+ pNewRCW->m_pIdentity = pNewRCW;
+
+ fInserted = pCache->FindOrInsertWrapper_NoLock((IUnknown*)pNewRCW->m_pIdentity, &pRCW, /* fAllowReInit = */ FALSE);
+ _ASSERTE(fInserted);
+ }
+ }
+
+ if (fDidContainment)
+ {
+ // mark the wrapper as contained
+ pRCW->MarkURTContained();
+ }
+ else
+ {
+ // mark the wrapper as aggregated
+ pRCW->MarkURTAggregated();
+ }
+
+ // pUnk has to be released inside GC-protected block and before oref get assigned value
+ // because it could trigger GC
+ SafeRelease(pUnk);
+ pUnk.SuppressRelease();
+
+ // If the object was created successfully then we need to copy the OBJECTREF
+ // to oref because the GCPROTECT_END() will destroy the contents of cref.
+ oref = ObjectToOBJECTREF(*(Object **)&cref);
+ }
+ GCPROTECT_END();
+
+ if (oref != NULL)
+ {
+ pOuter.SuppressRelease();
+ pClassFact.SuppressRelease();
+ pNewRCW.SuppressRelease();
+ }
+
+ return oref;
+}
+
+//--------------------------------------------------------------
+// Create instance using IClassFactory
+// Overridable
+IUnknown *ComClassFactory::CreateInstanceInternal(IUnknown *pOuter, BOOL *pfDidContainment)
+{
+ CONTRACT(IUnknown *)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pOuter, NULL_OK));
+ PRECONDITION(CheckPointer(pfDidContainment, NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ SafeComHolder<IClassFactory> pClassFactory = GetIClassFactory();
+ RETURN CreateInstanceFromClassFactory(pClassFactory, pOuter, pfDidContainment);
+}
+
+IClassFactory *ComClassFactory::GetIClassFactory()
+{
+ CONTRACT(IClassFactory *)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ HRESULT hr = S_OK;
+ IClassFactory *pClassFactory = NULL;
+
+ GCX_PREEMP();
+
+ // If a server name is specified, then first try CLSCTX_REMOTE_SERVER.
+ if (m_pwszServer)
+ {
+ // Set up the COSERVERINFO struct.
+ COSERVERINFO ServerInfo;
+ memset(&ServerInfo, 0, sizeof(COSERVERINFO));
+ ServerInfo.pwszName = m_pwszServer;
+
+ // Try to retrieve the IClassFactory passing in CLSCTX_REMOTE_SERVER.
+ LeaveRuntimeHolder lrh((size_t)CoGetClassObject);
+ hr = CoGetClassObject(m_rclsid, CLSCTX_REMOTE_SERVER, &ServerInfo, IID_IClassFactory, (void**)&pClassFactory);
+ }
+ else
+ {
+ // No server name is specified so we use CLSCTX_SERVER.
+ LeaveRuntimeHolder lrh((size_t)CoGetClassObject);
+
+#ifdef FEATURE_CLASSIC_COMINTEROP
+ // If the CLSID is hosted by the CLR itself, then we do not want to go through the COM registration
+ // entries, as this will trigger our COM activation code that may not activate against this runtime.
+ // In this scenario, we want to get the address of the DllGetClassObject method on this CLR or a DLL
+ // that lives in the same directory as the CLR and use it directly. The code falls back to
+ // CoGetClassObject if we fail on the call to DllGetClassObject, but it might be better to fail outright.
+ if (Clr::Util::Com::CLSIDHasMscoreeAsInprocServer32(m_rclsid))
+ {
+ typedef HRESULT (STDMETHODCALLTYPE *PDllGetClassObject)(REFCLSID rclsid, REFIID riid, LPVOID FAR *ppv);
+
+ StackSString ssServer;
+ if (FAILED(Clr::Util::Com::FindServerUsingCLSID(m_rclsid, ssServer)))
+ {
+#ifndef FEATURE_CORECLR
+ // If there is no server entry, then that implies the CLSID could be implemented by CLR.DLL itself,
+ // if the CLSID is one of the special ones implemented by the CLR. We need to check against the
+ // specific list of CLSIDs here because CLR.DLL-implemented CLSIDs and managed class-implemented
+ // CLSIDs look the same until you start interating the subkeys. For now, the set of CLSIDs implemented
+ // by CLR.DLL is a short and tractable list, but at some point it might become worthwhile to move over
+ // to the more generalized solution of looking for the entries that identify when the CLSID is
+ // implemented by a managed type to avoid having to maintain the hardcoded list.
+ if (IsClrHostedLegacyComObject(m_rclsid))
+ {
+ PDllGetClassObject pFN = NULL;
+ hr = g_pCLRRuntime->GetProcAddress("DllGetClassObjectInternal", reinterpret_cast<void**>(&pFN));
+
+ if (FAILED(hr))
+ hr = g_pCLRRuntime->GetProcAddress("DllGetClassObject", reinterpret_cast<void**>(&pFN));
+
+ if (SUCCEEDED(hr))
+ hr = pFN(m_rclsid, IID_IClassFactory, (void**)&pClassFactory);
+ }
+#endif
+ }
+ else
+ {
+#ifndef FEATURE_CORECLR
+ // @CORESYSTODO: ?
+
+ // There is a SxS DLL that implements this CLSID.
+ // NOTE: It is standard practise for RCWs and P/Invokes to leak their module handles,
+ // as there is no automated mechanism for the runtime to call CanUnloadDllNow.
+ HMODULE hServer = NULL;
+ if (SUCCEEDED(hr = g_pCLRRuntime->LoadLibrary(ssServer.GetUnicode(), &hServer)))
+ {
+ PDllGetClassObject pFN = reinterpret_cast<PDllGetClassObject>(GetProcAddress(hServer, "DllGetClassObject"));
+ if (pFN != NULL)
+ {
+ hr = pFN(m_rclsid, IID_IClassFactory, (void**)&pClassFactory);
+ }
+ else
+ {
+ hr = HRESULT_FROM_GetLastError();
+ }
+ }
+#endif
+ }
+ }
+#endif // FEATURE_CLASSIC_COMINTEROP
+
+ if (pClassFactory == NULL)
+ hr = CoGetClassObject(m_rclsid, CLSCTX_SERVER, NULL, IID_IClassFactory, (void**)&pClassFactory);
+ }
+
+ // If we failed to obtain the IClassFactory, throw an exception with rich information
+ // explaining the failure.
+ if (FAILED(hr))
+ {
+ SString strMessage;
+ SString strResource;
+ WCHAR strClsid[39];
+ SString strHRDescription;
+
+ // Obtain the textual representation of the HRESULT.
+ StringFromGUID2(m_rclsid, strClsid, sizeof(strClsid) / sizeof(WCHAR));
+
+ SString strHRHex;
+ strHRHex.Printf("%.8x", hr);
+
+ // Obtain the description of the HRESULT.
+ GetHRMsg(hr, strHRDescription);
+
+ // Throw the actual exception indicating we couldn't find the class factory.
+ if (m_pwszServer == NULL)
+ COMPlusThrowHR(hr, IDS_EE_LOCAL_COGETCLASSOBJECT_FAILED, strHRHex, strClsid, strHRDescription.GetUnicode());
+ else
+ COMPlusThrowHR(hr, IDS_EE_REMOTE_COGETCLASSOBJECT_FAILED, strHRHex, strClsid, m_pwszServer, strHRDescription.GetUnicode());
+ }
+
+ RETURN pClassFactory;
+}
+
+//-------------------------------------------------------------
+// ComClassFactory::CreateInstance()
+// create instance, calls IClassFactory::CreateInstance
+OBJECTREF ComClassFactory::CreateInstance(MethodTable* pMTClass, BOOL ForManaged)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pMTClass, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // Check for aggregates
+ if (pMTClass != NULL && !pMTClass->IsComImport())
+ return CreateAggregatedInstance(pMTClass, ForManaged);
+
+ HRESULT hr = S_OK;
+ OBJECTREF coref = NULL;
+ OBJECTREF RetObj = NULL;
+
+ GCPROTECT_BEGIN(coref)
+ {
+ {
+ SafeComHolder<IUnknown> pUnk = NULL;
+ SafeComHolder<IClassFactory> pClassFact = NULL;
+
+ // Create the instance
+ pUnk = CreateInstanceInternal(NULL, NULL);
+
+ // Even though we just created the object, it's possible that we got back a context
+ // wrapper from the COM side. For instance, it could have been an existing object
+ // or it could have been created in a different context than we are running in.
+
+ // pMTClass is the class that wraps the com ip
+ // if a class was passed in use it
+ // otherwise use the class that we know
+ if (pMTClass == NULL)
+ pMTClass = m_pClassMT;
+
+ GetObjectRefFromComIP(&coref, pUnk, pMTClass);
+
+ if (coref == NULL)
+ COMPlusThrowOM();
+ }
+
+ // Set the value of the return object after the COM guys are cleaned up.
+ RetObj = coref;
+ }
+ GCPROTECT_END();
+
+ return RetObj;
+}
+#endif //#ifndef CROSSGEN_COMPILE
+
+//--------------------------------------------------------------
+// Init the ComClassFactory.
+void ComClassFactory::Init(__in_opt __in_z WCHAR* pwszProgID, __in_opt __in_z WCHAR* pwszServer, MethodTable* pClassMT)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_pwszProgID = pwszProgID;
+ m_pwszServer = pwszServer;
+ _ASSERTE(pClassMT == NULL || !pClassMT->Collectible());
+ m_pClassMT = pClassMT;
+}
+
+//-------------------------------------------------------------
+void ComClassFactory::Cleanup()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_bManagedVersion)
+ return;
+
+ if (m_pwszProgID != NULL)
+ delete [] m_pwszProgID;
+
+ if (m_pwszServer != NULL)
+ delete [] m_pwszServer;
+
+ delete this;
+}
+
+#if defined(FEATURE_APPX) && !defined(CROSSGEN_COMPILE)
+//-------------------------------------------------------------
+// Create instance using CoCreateIntanceFromApp
+// CoCreateInstanceFromApp is a new Windows 8 API that only
+// allow creating COM objects (not WinRT objects) in the allow
+// list
+// Note: We don't QI for IClassFactory2 in this case as it is not
+// supported in ModernSDK
+IUnknown *AppXComClassFactory::CreateInstanceInternal(IUnknown *pOuter, BOOL *pfDidContainment)
+{
+ CONTRACT(IUnknown *)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pOuter, NULL_OK));
+ PRECONDITION(CheckPointer(pfDidContainment, NULL_OK));
+ PRECONDITION(AppX::IsAppXProcess());
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ GCX_PREEMP();
+
+ MULTI_QI multiQI;
+ ::ZeroMemory(&multiQI, sizeof(MULTI_QI));
+ multiQI.pIID = &IID_IUnknown;
+
+ HRESULT hr;
+
+#ifdef FEATURE_CORESYSTEM
+ // This works around a bug in the Windows 7 loader that prevents us from loading the
+ // forwarder for this function
+ typedef HRESULT (*CoCreateInstanceFromAppFnPtr) (REFCLSID rclsid, IUnknown *punkOuter, DWORD dwClsCtx,
+ void *reserved, DWORD dwCount, MULTI_QI *pResults);
+
+ static CoCreateInstanceFromAppFnPtr CoCreateInstanceFromApp = NULL;
+ if (NULL == CoCreateInstanceFromApp)
+ {
+ HMODULE hmod = LoadLibraryExW(W("api-ms-win-core-com-l1-1-1.dll"), NULL, 0);
+
+ if (hmod)
+ CoCreateInstanceFromApp = (CoCreateInstanceFromAppFnPtr)GetProcAddress(hmod, "CoCreateInstanceFromApp");
+ }
+
+ if (NULL == CoCreateInstanceFromApp)
+ {
+ // This shouldn't happen
+ _ASSERTE(false);
+ IfFailThrow(E_FAIL);
+ }
+#endif
+
+ LeaveRuntimeHolder lrh((size_t)CoCreateInstanceFromApp);
+
+ if (m_pwszServer)
+ {
+ //
+ // Remote server activation
+ //
+ COSERVERINFO ServerInfo;
+ ::ZeroMemory(&ServerInfo, sizeof(COSERVERINFO));
+ ServerInfo.pwszName = m_pwszServer;
+
+ hr = CoCreateInstanceFromApp(
+ m_rclsid,
+ pOuter,
+ CLSCTX_REMOTE_SERVER,
+ &ServerInfo,
+ 1,
+ &multiQI);
+ if (FAILED(hr) && pOuter)
+ {
+ //
+ // Aggregation attempt failed. Retry containment
+ //
+ hr = CoCreateInstanceFromApp(
+ m_rclsid,
+ NULL,
+ CLSCTX_REMOTE_SERVER,
+ &ServerInfo,
+ 1,
+ &multiQI);
+ if (pfDidContainment)
+ *pfDidContainment = TRUE;
+ }
+ }
+ else
+ {
+ //
+ // Normal activation
+ //
+ hr = CoCreateInstanceFromApp(
+ m_rclsid,
+ pOuter,
+ CLSCTX_SERVER,
+ NULL,
+ 1,
+ &multiQI);
+ if (FAILED(hr) && pOuter)
+ {
+ //
+ // Aggregation attempt failed. Retry containment
+ //
+ hr = CoCreateInstanceFromApp(
+ m_rclsid,
+ NULL,
+ CLSCTX_SERVER,
+ NULL,
+ 1,
+ &multiQI);
+ if (pfDidContainment)
+ *pfDidContainment = TRUE;
+ }
+ }
+
+ if (FAILED(hr))
+ ThrowHRMsg(hr, IDS_EE_CREATEINSTANCEFROMAPP_FAILED);
+ if (FAILED(multiQI.hr))
+ ThrowHRMsg(multiQI.hr, IDS_EE_CREATEINSTANCEFROMAPP_FAILED);
+
+ RETURN multiQI.pItf;
+}
+#endif //FEATURE_APPX
+
+//-------------------------------------------------------------
+MethodTable *WinRTClassFactory::GetTypeFromAttribute(IMDInternalImport *pImport, mdCustomAttribute tkAttribute)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // get raw custom attribute
+ const BYTE *pbAttr = NULL;
+ ULONG cbAttr = 0;
+ IfFailThrowBF(pImport->GetCustomAttributeAsBlob(tkAttribute, (const void **)&pbAttr, &cbAttr), BFA_INVALID_TOKEN, m_pClassMT->GetModule());
+
+ CustomAttributeParser cap(pbAttr, cbAttr);
+ IfFailThrowBF(cap.ValidateProlog(), BFA_BAD_CA_HEADER, m_pClassMT->GetModule());
+
+ // retrieve the factory interface name
+ LPCUTF8 szName;
+ ULONG cbName;
+ IfFailThrow(cap.GetNonNullString(&szName, &cbName));
+
+ // copy the name to a temporary buffer and NULL terminate it
+ StackSString ss(SString::Utf8, szName, cbName);
+
+ // load the factory interface
+ return TypeName::GetTypeUsingCASearchRules(ss.GetUnicode(), m_pClassMT->GetAssembly()).GetMethodTable();
+}
+
+//-------------------------------------------------------------
+// Returns true if the first parameter of the CA's method ctor is a System.Type
+static BOOL AttributeFirstParamIsSystemType(mdCustomAttribute tkAttribute, IMDInternalImport *pImport)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pImport));
+ }
+ CONTRACTL_END;
+
+ mdToken ctorToken;
+ IfFailThrow(pImport->GetCustomAttributeProps(tkAttribute, &ctorToken));
+
+ LPCSTR ctorName;
+ PCCOR_SIGNATURE ctorSig;
+ ULONG cbCtorSig;
+
+ if (TypeFromToken(ctorToken) == mdtMemberRef)
+ {
+ IfFailThrow(pImport->GetNameAndSigOfMemberRef(ctorToken, &ctorSig, &cbCtorSig, &ctorName));
+ }
+ else if (TypeFromToken(ctorToken) == mdtMethodDef)
+ {
+ IfFailThrow(pImport->GetNameAndSigOfMethodDef(ctorToken, &ctorSig, &cbCtorSig, &ctorName));
+ }
+ else
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+
+ SigParser sigParser(ctorSig, cbCtorSig);
+
+ ULONG callingConvention;
+ IfFailThrow(sigParser.GetCallingConvInfo(&callingConvention));
+ if (callingConvention != IMAGE_CEE_CS_CALLCONV_HASTHIS)
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+
+ ULONG cParameters;
+ IfFailThrow(sigParser.GetData(&cParameters));
+ if (cParameters < 1)
+ {
+ return FALSE;
+ }
+
+ BYTE returnElmentType;
+ IfFailThrow(sigParser.GetByte(&returnElmentType));
+ if (returnElmentType != ELEMENT_TYPE_VOID)
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+
+ BYTE paramElementType;
+ IfFailThrow(sigParser.GetByte(&paramElementType));
+ if (paramElementType != ELEMENT_TYPE_CLASS)
+ {
+ return FALSE;
+ }
+
+ mdToken paramTypeToken;
+ IfFailThrow(sigParser.GetToken(&paramTypeToken));
+
+ if (TypeFromToken(paramTypeToken) != mdtTypeRef)
+ {
+ return FALSE;
+ }
+
+ LPCSTR paramTypeNamespace;
+ LPCSTR paramTypeName;
+ IfFailThrow(pImport->GetNameOfTypeRef(paramTypeToken, &paramTypeNamespace, &paramTypeName));
+ if (strcmp("System", paramTypeNamespace) != 0 || strcmp("Type", paramTypeName) != 0)
+ {
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+//-------------------------------------------------------------
+void WinRTClassFactory::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ IMDInternalImport *pImport = m_pClassMT->GetMDImport();
+
+ {
+ // Sealed classes may have Windows.Foundation.Activatable attributes. Such classes must be sealed, because we'd
+ // have no way to use their ctor from a derived class (no composition)
+ // Unsealed classes may have Windows.Foundation.Composable attributes. These are currently mutually exclusive, but we
+ // may need to relax this in the future for versioning reasons (so a class can be unsealed in a new version without
+ // being binary breaking).
+ // Note that we just ignore activation attributes if they occur on the wrong type of class
+ LPCSTR attributeName;
+ UINT numExpectedParams;
+ if (IsComposition())
+ {
+ attributeName = g_WindowsFoundationComposableAttributeClassName;
+ }
+ else
+ {
+ attributeName = g_WindowsFoundationActivatableAttributeClassName;
+ }
+
+ MDEnumHolder hEnum(pImport);
+
+ // find and parse all WindowsFoundationActivatableAttribute/WindowsFoundationComposableAttribute attributes
+ hr = pImport->EnumCustomAttributeByNameInit(m_pClassMT->GetCl(), attributeName, &hEnum);
+ IfFailThrow(hr);
+
+ if (hr == S_OK) // there are factory interfaces
+ {
+ mdCustomAttribute tkAttribute;
+ while (pImport->EnumNext(&hEnum, &tkAttribute))
+ {
+ if (!AttributeFirstParamIsSystemType(tkAttribute, pImport))
+ {
+ // The first parameter of the Composable/Activatable attribute is not a System.Type
+ // and therefore the attribute does not specify a factory interface so we ignore the attribute
+ continue;
+ }
+ // get raw custom attribute
+ const BYTE *pbAttr = NULL;
+ ULONG cbAttr = 0;
+ IfFailThrowBF(pImport->GetCustomAttributeAsBlob(tkAttribute, (const void **)&pbAttr, &cbAttr), BFA_INVALID_TOKEN, m_pClassMT->GetModule());
+ CustomAttributeParser cap(pbAttr, cbAttr);
+ IfFailThrowBF(cap.ValidateProlog(), BFA_BAD_CA_HEADER, m_pClassMT->GetModule());
+
+ // The activation factory interface is stored in the attribute by type name
+ LPCUTF8 szFactoryInterfaceName;
+ ULONG cbFactoryInterfaceName;
+ IfFailThrow(cap.GetNonNullString(&szFactoryInterfaceName, &cbFactoryInterfaceName));
+
+ StackSString strFactoryInterface(SString::Utf8, szFactoryInterfaceName, cbFactoryInterfaceName);
+ MethodTable *pMTFactoryInterface = GetWinRTType(&strFactoryInterface, /* bThrowIfNotFound = */ TRUE).GetMethodTable();
+
+ _ASSERTE(pMTFactoryInterface);
+ m_factoryInterfaces.Append(pMTFactoryInterface);
+ }
+ }
+ }
+
+ {
+ // find and parse all Windows.Foundation.Static attributes
+ MDEnumHolder hEnum(pImport);
+ hr = pImport->EnumCustomAttributeByNameInit(m_pClassMT->GetCl(), g_WindowsFoundationStaticAttributeClassName, &hEnum);
+ IfFailThrow(hr);
+
+ if (hr == S_OK) // there are static interfaces
+ {
+ mdCustomAttribute tkAttribute;
+ while (pImport->EnumNext(&hEnum, &tkAttribute))
+ {
+ if (!AttributeFirstParamIsSystemType(tkAttribute, pImport))
+ {
+ // The first parameter of the Static attribute is not a System.Type
+ // and therefore the attribute does not specify a factory interface so we ignore the attribute
+ continue;
+ }
+
+ const BYTE *pbAttr = NULL;
+ ULONG cbAttr = 0;
+ IfFailThrowBF(pImport->GetCustomAttributeAsBlob(tkAttribute, (const void **)&pbAttr, &cbAttr), BFA_INVALID_TOKEN, m_pClassMT->GetModule());
+
+ CustomAttributeParser cap(pbAttr, cbAttr);
+ IfFailThrowBF(cap.ValidateProlog(), BFA_BAD_CA_HEADER, m_pClassMT->GetModule());
+
+ // retrieve the factory interface name
+ LPCUTF8 szName;
+ ULONG cbName;
+ IfFailThrow(cap.GetNonNullString(&szName, &cbName));
+
+ // copy the name to a temporary buffer and NULL terminate it
+ StackSString ss(SString::Utf8, szName, cbName);
+ TypeHandle th = GetWinRTType(&ss, /* bThrowIfNotFound = */ TRUE);
+
+ MethodTable *pMTStaticInterface = th.GetMethodTable();
+ m_staticInterfaces.Append(pMTStaticInterface);
+ }
+ }
+ }
+
+ {
+
+ // Special case (not pretty): WinMD types requires you to put DefaultAttribute on interfaceImpl to
+ // mark the interface as default interface. But C# doesn't allow you to do that so we have
+ // to do it manually here.
+ MethodTable* pAsyncTracingEventArgsMT = MscorlibBinder::GetClass(CLASS__ASYNC_TRACING_EVENT_ARGS);
+ if(pAsyncTracingEventArgsMT == m_pClassMT)
+ {
+ m_pDefaultItfMT = MscorlibBinder::GetClass(CLASS__IASYNC_TRACING_EVENT_ARGS);
+ }
+ else
+ {
+ // parse the DefaultAttribute to figure out the default interface of the class
+ HENUMInternalHolder hEnumInterfaceImpl(pImport);
+ hEnumInterfaceImpl.EnumInit(mdtInterfaceImpl, m_pClassMT->GetCl());
+
+ DWORD cInterfaces = pImport->EnumGetCount(&hEnumInterfaceImpl);
+ if (cInterfaces != 0)
+ {
+ mdInterfaceImpl ii;
+ while (pImport->EnumNext(&hEnumInterfaceImpl, &ii))
+ {
+ const BYTE *pbAttr;
+ ULONG cbAttr;
+ HRESULT hr = pImport->GetCustomAttributeByName(ii, g_WindowsFoundationDefaultClassName, (const void **)&pbAttr, &cbAttr);
+ IfFailThrow(hr);
+ if (hr == S_OK)
+ {
+ mdToken typeRefOrDefOrSpec;
+ IfFailThrow(pImport->GetTypeOfInterfaceImpl(ii, &typeRefOrDefOrSpec));
+
+ TypeHandle th = ClassLoader::LoadTypeDefOrRefOrSpecThrowing(
+ m_pClassMT->GetModule(),
+ typeRefOrDefOrSpec,
+ NULL,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::FailIfUninstDefOrRef,
+ ClassLoader::LoadTypes, CLASS_LOAD_EXACTPARENTS);
+
+ m_pDefaultItfMT = th.GetMethodTable();
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ // initialize m_hClassName
+ InlineSString<DEFAULT_NONSTACK_CLASSNAME_SIZE> ssClassName;
+ m_pClassMT->_GetFullyQualifiedNameForClass(ssClassName);
+
+#ifndef CROSSGEN_COMPILE
+ if (!GetAppDomain()->IsCompilationDomain())
+ {
+ // don't bother creating the HSTRING when NGENing - we may run on downlevel
+ IfFailThrow(WindowsCreateString(ssClassName.GetUnicode(), ssClassName.GetCount(), &m_hClassName));
+ }
+#endif
+
+ if (ssClassName.BeginsWith(SL(W("Windows."))))
+ {
+ // parse the GCPressureAttribute only on first party runtime classes
+ const BYTE *pVal = NULL;
+ ULONG cbVal = 0;
+
+ if (S_OK == pImport->GetCustomAttributeByName(m_pClassMT->GetCl(), g_WindowsFoundationGCPressureAttributeClassName, (const void **)&pVal, &cbVal))
+ {
+ CustomAttributeParser cap(pVal, cbVal);
+ CaNamedArg namedArgs[1];
+
+ // First, the void constructor
+ IfFailThrow(ParseKnownCaArgs(cap, NULL, 0));
+
+ // Then, find the named argument
+ namedArgs[0].InitI4FieldEnum("amount", "Windows.Foundation.Metadata.GCPressureAmount", -1);
+
+ IfFailThrow(ParseKnownCaNamedArgs(cap, namedArgs, lengthof(namedArgs)));
+
+ static_assert(RCW::GCPressureSize_WinRT_Medium == RCW::GCPressureSize_WinRT_Low + 1, "RCW::GCPressureSize does not match Windows.Foundation.Metadata.GCPressureAmount");
+ static_assert(RCW::GCPressureSize_WinRT_High == RCW::GCPressureSize_WinRT_Medium + 1, "RCW::GCPressureSize does not match Windows.Foundation.Metadata.GCPressureAmount");
+
+ int amount = namedArgs[0].val.i4;
+ if (amount >= 0 && amount < (RCW::GCPressureSize_COUNT - RCW::GCPressureSize_WinRT_Low))
+ {
+ m_GCPressure = (RCW::GCPressureSize)(amount + RCW::GCPressureSize_WinRT_Low);
+ }
+ }
+ }
+}
+
+//-------------------------------------------------------------
+MethodDesc *WinRTClassFactory::FindFactoryMethod(PCCOR_SIGNATURE pSig, DWORD cSig, Module *pModule)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pSig));
+ PRECONDITION(CheckPointer(pModule));
+ }
+ CONTRACTL_END;
+
+ COUNT_T count = m_factoryInterfaces.GetCount();
+ for (UINT i = 0; i < count; i++)
+ {
+ MethodTable *pMT = m_factoryInterfaces[i];
+
+ MethodDesc *pMD = MemberLoader::FindMethod(pMT, "", pSig, cSig, pModule, MemberLoader::FM_IgnoreName);
+ if (pMD != NULL)
+ {
+ return pMD;
+ }
+ }
+
+ return NULL;
+}
+
+//-------------------------------------------------------------
+MethodDesc *WinRTClassFactory::FindStaticMethod(LPCUTF8 pszName, PCCOR_SIGNATURE pSig, DWORD cSig, Module *pModule)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pszName));
+ PRECONDITION(CheckPointer(pSig));
+ PRECONDITION(CheckPointer(pModule));
+ }
+ CONTRACTL_END;
+
+ COUNT_T count = m_staticInterfaces.GetCount();
+ for (UINT i = 0; i < count; i++)
+ {
+ MethodTable *pMT = m_staticInterfaces[i];
+
+ MethodDesc *pMD = MemberLoader::FindMethod(pMT, pszName, pSig, cSig, pModule);
+ if (pMD != NULL)
+ {
+ return pMD;
+ }
+ }
+
+ return NULL;
+}
+
+//-------------------------------------------------------------
+void WinRTClassFactory::Cleanup()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_hClassName != NULL)
+ {
+ // HSTRING has been created, which means combase should have been loaded.
+ // Delay load will not fail.
+ _ASSERTE(WszGetModuleHandle(W("combase.dll")) != NULL);
+ CONTRACT_VIOLATION(ThrowsViolation);
+
+#ifndef CROSSGEN_COMPILE
+ WindowsDeleteString(m_hClassName);
+#endif
+ }
+ delete m_pWinRTOverrideInfo;
+ delete this;
+}
+
+//-------------------------------------------------------------
+void WinRTManagedClassFactory::Cleanup()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_pCCWTemplate != NULL)
+ {
+ m_pCCWTemplate->Release();
+ m_pCCWTemplate = NULL;
+ }
+
+ WinRTClassFactory::Cleanup(); // deletes 'this'
+}
+#ifndef CROSSGEN_COMPILE
+//-------------------------------------------------------------
+ComCallWrapperTemplate *WinRTManagedClassFactory::GetOrCreateComCallWrapperTemplate(MethodTable *pFactoryMT)
+{
+ CONTRACT (ComCallWrapperTemplate *)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pFactoryMT));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ if (m_pCCWTemplate == NULL)
+ {
+ ComCallWrapperTemplate::CreateTemplate(TypeHandle(pFactoryMT), this);
+ }
+
+ RETURN m_pCCWTemplate;
+}
+#endif // CROSSGEN_COMPILE
+
+#endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+
+#ifndef CROSSGEN_COMPILE
+//---------------------------------------------------------------------
+// RCW cache, act as the manager for the RCWs
+// uses a hash table to map IUnknown to the corresponding wrappers
+//---------------------------------------------------------------------
+
+// Obtain the appropriate wrapper cache from the current context.
+RCWCache* RCWCache::GetRCWCache()
+{
+ CONTRACT (RCWCache*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ AppDomain * pDomain = GetAppDomain();
+ RETURN (pDomain ? pDomain->GetRCWCache() : NULL);
+}
+
+RCWCache* RCWCache::GetRCWCacheNoCreate()
+{
+ CONTRACT (RCWCache*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ AppDomain * pDomain = GetAppDomain();
+ RETURN (pDomain ? pDomain->GetRCWCacheNoCreate() : NULL);
+}
+
+
+//---------------------------------------------------------------------
+// Constructor. Note we init the global RCW cleanup list in here too.
+RCWCache::RCWCache(AppDomain *pDomain)
+ : m_lock(CrstRCWCache, CRST_UNSAFE_COOPGC)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pDomain));
+ }
+ CONTRACTL_END;
+
+ m_pDomain = pDomain;
+}
+
+// Look up to see if we already have an valid wrapper in cache for this IUnk
+// DOES NOT hold a lock inside the function - locking in the caller side IS REQUIRED
+// If pfMadeWrapperStrong is TRUE upon return, you NEED to call AddRef on pIdentity
+void RCWCache::FindWrapperInCache_NoLock(IUnknown* pIdentity, RCWHolder* pRCW)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pIdentity));
+ PRECONDITION(CheckPointer(pRCW));
+ }
+ CONTRACTL_END;
+
+ // lookup in our hash table
+ LookupWrapper(pIdentity, pRCW);
+
+ // check if we found the wrapper,
+ if (!pRCW->IsNull())
+ {
+ if ((*pRCW)->IsValid())
+ {
+ if ((*pRCW)->IsDetached())
+ {
+ _ASSERTE((LPVOID)pIdentity != (LPVOID)pRCW->GetRawRCWUnsafe()); // we should never find "unique" RCWs
+
+ // remove and re-insert the RCW using its unique identity
+ RemoveWrapper(pRCW);
+ (*pRCW)->m_pIdentity = (LPVOID)pRCW->GetRawRCWUnsafe();
+ InsertWrapper(pRCW);
+
+ pRCW->UnInit();
+ }
+ else
+ {
+ // addref the wrapper
+ (*pRCW)->AddRef(this);
+ }
+ }
+ else
+ {
+ pRCW->UnInit();
+ }
+ }
+
+ return;
+}
+
+BOOL RCWCache::FindOrInsertWrapper_NoLock(IUnknown* pIdentity, RCWHolder* pRCW, BOOL fAllowReinit)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pIdentity));
+ PRECONDITION(pIdentity != (IUnknown*)-1);
+ PRECONDITION(CheckPointer(pRCW));
+ PRECONDITION(CheckPointer(pRCW->GetRawRCWUnsafe()));
+ }
+ CONTRACTL_END;
+
+ BOOL fInserted = FALSE;
+
+ // we have created a wrapper, let us insert it into the hash table
+ // but we need to check if somebody beat us to it
+ {
+ // see if somebody beat us to it
+ // perf: unfold LookupWrapper to avoid creating RCWHolder in common cases
+ RCW *pRawRCW = LookupWrapperUnsafe(pIdentity);
+ if (pRawRCW == NULL)
+ {
+ InsertWrapper(pRCW);
+ fInserted = TRUE;
+ }
+ else
+ {
+ RCWHolder pTempRCW(GetThread());
+
+ // Assume that we already have a sync block for this object.
+ pTempRCW.InitNoCheck(pRawRCW);
+
+ // if we didn't find a valid wrapper, Insert our own wrapper
+ if (pTempRCW.IsNull() || !pTempRCW->IsValid())
+ {
+ // if we found a bogus wrapper, let us get rid of it
+ // so that when we insert we insert a valid wrapper, instead of duplicate
+ if (!pTempRCW.IsNull())
+ {
+ _ASSERTE(!pTempRCW->IsValid());
+ RemoveWrapper(&pTempRCW);
+ }
+
+ InsertWrapper(pRCW);
+ fInserted = TRUE;
+ }
+ else
+ {
+ _ASSERTE(!pTempRCW.IsNull() && pTempRCW->IsValid());
+ // okay we found a valid wrapper,
+
+ if (pTempRCW->IsDetached())
+ {
+ _ASSERTE((LPVOID)pIdentity != (LPVOID)pTempRCW.GetRawRCWUnsafe()); // we should never find "unique" RCWs
+
+ // remove and re-insert the RCW using its unique identity
+ RemoveWrapper(&pTempRCW);
+ pTempRCW->m_pIdentity = (LPVOID)pTempRCW.GetRawRCWUnsafe();
+ InsertWrapper(&pTempRCW);
+
+ // and insert the new incoming RCW
+ InsertWrapper(pRCW);
+ fInserted = TRUE;
+ }
+ else if (fAllowReinit)
+ {
+ // addref the wrapper
+ pTempRCW->AddRef(this);
+
+ // Initialize the holder with the rcw we're going to return.
+ OBJECTREF objref = pTempRCW->GetExposedObject();
+ pTempRCW.UnInit();
+ pRCW->UnInit();
+ pRCW->InitNoCheck(objref);
+ }
+ }
+ }
+ }
+
+ return fInserted;
+}
+
+//--------------------------------------------------------------------------------
+// ULONG RCWCache::ReleaseWrappers()
+// Helper to release the complus wrappers in the cache that lives in the specified
+// context (including Jupiter RCWs) or all the wrappers in the cache if the pCtxCookie is null.
+void RCWCache::ReleaseWrappersWorker(LPVOID pCtxCookie)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCtxCookie, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ RCWCleanupList CleanupList;
+ RCWCleanupList AggregatedCleanupList;
+
+ struct RCWInterfacePointer
+ {
+ IUnknown *m_pUnk;
+ RCW *m_pRCW;
+ CtxEntry *m_pCtxEntry;
+ };
+
+ // Arrays of individual interface pointers to call Release on
+ CQuickArrayList<RCWInterfacePointer> InterfacePointerList;
+ CQuickArrayList<RCWInterfacePointer> AggregatedInterfacePointerList;
+
+ // Switch to cooperative GC mode before we take the lock.
+ GCX_COOP();
+ {
+ {
+ RCWCache::LockHolder lh(this);
+
+ // Go through the hash table and add the wrappers to the cleanup lists.
+ for (SHash<RCWCacheTraits>::Iterator it = m_HashMap.Begin(); it != m_HashMap.End(); it++)
+ {
+ RCW *pWrap = *it;
+ _ASSERTE(pWrap != NULL);
+
+ // If a context cookie was specified, then only clean up wrappers that
+ // are in that context, including non-FTM regular RCWs, and FTM Jupiter objects
+ // Otherwise clean up all the wrappers.
+ // Ignore RCWs that aggregate the FTM if we are cleaning up context
+ // specific RCWs (note that we rely on this behavior in WinRT factory cache code)
+ // Note that Jupiter RCWs are special and they are considered to be context-bound
+ if (!pCtxCookie || ((pWrap->GetWrapperCtxCookie() == pCtxCookie) && (pWrap->IsJupiterObject() || !pWrap->IsFreeThreaded())))
+ {
+ if (!pWrap->IsURTAggregated())
+ CleanupList.AddWrapper_NoLock(pWrap);
+ else
+ AggregatedCleanupList.AddWrapper_NoLock(pWrap);
+
+ pWrap->DecoupleFromObject();
+ RemoveWrapper(pWrap);
+ }
+ else if (!pWrap->IsFreeThreaded())
+ {
+ // We have a non-zero pCtxCookie but this RCW was not created in that context. We still
+ // need to take a closer look at the RCW because its interface pointer cache may contain
+ // pointers acquired in the given context - and those need to be released here.
+ if (pWrap->m_pAuxiliaryData != NULL)
+ {
+ RCWAuxiliaryData::InterfaceEntryIterator it = pWrap->m_pAuxiliaryData->IterateInterfacePointers();
+ while (it.Next())
+ {
+ InterfaceEntry *pEntry = it.GetEntry();
+ if (!pEntry->IsFree() && it.GetCtxCookie() == pCtxCookie)
+ {
+ RCWInterfacePointer intfPtr;
+ intfPtr.m_pUnk = pEntry->m_pUnknown;
+ intfPtr.m_pRCW = pWrap;
+ intfPtr.m_pCtxEntry = it.GetCtxEntryNoAddRef();
+
+ if (!pWrap->IsURTAggregated())
+ InterfacePointerList.Push(intfPtr);
+ else
+ AggregatedInterfacePointerList.Push(intfPtr);
+
+ // Reset the CtxEntry first, so we don't race with RCWAuxiliaryData::CacheInterfacePointer
+ // which may try to reuse the InterfaceEntry for another (pUnk, MT, CtxEntry) triplet.
+ it.ResetCtxEntry();
+ pEntry->Free();
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Clean up the non URT aggregated RCW's first then clean up the URT aggregated RCW's.
+ CleanupList.CleanupAllWrappers();
+
+ for (SIZE_T i = 0; i < InterfacePointerList.Size(); i++)
+ {
+ RCWInterfacePointer &intfPtr = InterfacePointerList[i];
+
+ RCW_VTABLEPTR(intfPtr.m_pRCW);
+ SafeRelease(intfPtr.m_pUnk, intfPtr.m_pRCW);
+
+ intfPtr.m_pCtxEntry->Release();
+ }
+
+ AggregatedCleanupList.CleanupAllWrappers();
+
+ for (SIZE_T i = 0; i < AggregatedInterfacePointerList.Size(); i++)
+ {
+ RCWInterfacePointer &intfPtr = AggregatedInterfacePointerList[i];
+
+ RCW_VTABLEPTR(intfPtr.m_pRCW);
+ SafeRelease(intfPtr.m_pUnk, intfPtr.m_pRCW);
+
+ intfPtr.m_pCtxEntry->Release();
+ }
+
+ }
+
+ if (!CleanupList.IsEmpty() || !AggregatedCleanupList.IsEmpty())
+ {
+ _ASSERTE(!"Cannot cleanup RCWs in cleanup list. Most likely because the RCW is disabled for eager cleanup.");
+ LOG((LF_INTEROP, LL_INFO1000, "Cannot cleanup RCWs in cleanup list. Most likely because the RCW is disabled for eager cleanup."));
+ }
+}
+
+//--------------------------------------------------------------------------------
+// ULONG RCWCache::DetachWrappersWorker()
+// Helper to mark RCWs that are not GC-promoted at this point as detached.
+class DetachWrappersFunctor
+{
+public:
+ FORCEINLINE void operator() (RCW *pRCW)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (pRCW->IsValid())
+ {
+ if (!GCHeap::GetGCHeap()->IsPromoted(OBJECTREFToObject(pRCW->GetExposedObject())) &&
+ !pRCW->IsDetached())
+ {
+ // No need to use InterlockedOr here since every other place that modifies the flags
+ // runs in cooperative GC mode (i.e. definitely not concurrently with this function).
+ pRCW->m_Flags.m_Detached = 1;
+
+ if (pRCW->IsJupiterObject())
+ RCWWalker::BeforeJupiterRCWDestroyed(pRCW);
+ }
+ }
+ }
+};
+
+void RCWCache::DetachWrappersWorker()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(GCHeap::IsGCInProgress()); // GC is in progress and the runtime is suspended
+ }
+ CONTRACTL_END;
+
+ DetachWrappersFunctor functor;
+ m_HashMap.ForEach(functor);
+}
+
+VOID RCWCleanupList::AddWrapper(RCW* pRCW)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // For the global cleanup list, this is called only from the finalizer thread
+ _ASSERTE(this != g_pRCWCleanupList || GetThread() == FinalizerThread::GetFinalizerThread());
+
+ {
+ CrstHolder ch(&m_lock);
+
+ AddWrapper_NoLock(pRCW);
+ }
+}
+
+VOID RCWCleanupList::AddWrapper_NoLock(RCW* pRCW)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Traverse the list for match - when found, insert as the matching bucket head.
+ RCW *pBucket = m_pFirstBucket;
+ RCW *pPrevBucket = NULL;
+ while (pBucket != NULL)
+ {
+ if (pRCW->MatchesCleanupBucket(pBucket))
+ {
+ // Insert as bucket head.
+ pRCW->m_pNextRCW = pBucket;
+ pRCW->m_pNextCleanupBucket = pBucket->m_pNextCleanupBucket;
+
+ // Not necessary but makes it clearer that pBucket is no longer a bucket head.
+ pBucket->m_pNextCleanupBucket = NULL;
+ break;
+ }
+ pPrevBucket = pBucket;
+ pBucket = pBucket->m_pNextCleanupBucket;
+ }
+
+ // If we didn't find a match, insert as a new bucket.
+ if (pBucket == NULL)
+ {
+ pRCW->m_pNextRCW = NULL;
+ pRCW->m_pNextCleanupBucket = NULL;
+ }
+
+ // pRCW is now a bucket head - the only thing missing is a link from the previous bucket head.
+ if (pPrevBucket != NULL)
+ pPrevBucket->m_pNextCleanupBucket = pRCW;
+ else
+ m_pFirstBucket = pRCW;
+}
+
+VOID RCWCleanupList::CleanupAllWrappers()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ // For the global cleanup list, this is called only from the finalizer thread
+ PRECONDITION( (this != g_pRCWCleanupList) || (GetThread() == FinalizerThread::GetFinalizerThread()));
+ }
+ CONTRACTL_END;
+
+ RemovedBuckets NonSTABuckets;
+ RemovedBuckets STABuckets;
+
+ // We sweep the cleanup list once and remove all MTA/Free-Threaded buckets as well as STA buckets, leaving only
+ // those with disabled eager cleanup in the list. Then we drop the lock, walk the removed buckets,
+ // and perform the actual release. We cannot be releasing during the initial sweep because we would
+ // need to drop and reacquire the lock for each bucket which would invalidate the entire linked
+ // list so we would need to start the enumeration over after each bucket.
+ {
+ // Take the lock
+ CrstHolder ch(&m_lock);
+
+ RCW *pBucket = m_pFirstBucket;
+ RCW *pPrevBucket = NULL;
+ while (pBucket != NULL)
+ {
+ RCW *pNextBucket = pBucket->m_pNextCleanupBucket;
+ Thread *pSTAThread = pBucket->GetSTAThread();
+
+ if (pSTAThread == NULL || pBucket->AllowEagerSTACleanup())
+ {
+ // Remove the list from the CleanupList structure
+ if (pPrevBucket != NULL)
+ pPrevBucket->m_pNextCleanupBucket = pBucket->m_pNextCleanupBucket;
+ else
+ m_pFirstBucket = pBucket->m_pNextCleanupBucket;
+
+ if (pSTAThread == NULL)
+ {
+ // and add it to the local MTA/Free-Threaded chain
+ NonSTABuckets.Append(pBucket);
+ }
+ else
+ {
+ // or to the local STA chain
+ STABuckets.Append(pBucket);
+ }
+ }
+ else
+ {
+ // move the 'previous' pointer only if we didn't remove the current bucket
+ pPrevBucket = pBucket;
+ }
+ pBucket = pNextBucket;
+ }
+ // Release the lock so we can correctly transition to cleanup.
+ }
+
+ // Request help from other threads
+ m_doCleanupInContexts = TRUE;
+
+ // First, cleanup the MTA/Free-Threaded buckets
+ RCW *pRCWToCleanup;
+ while ((pRCWToCleanup = NonSTABuckets.PopHead()) != NULL)
+ {
+ ReleaseRCWList_Args args;
+ args.pHead = pRCWToCleanup;
+ args.ctxTried = FALSE;
+ args.ctxBusy = FALSE;
+
+ ReleaseRCWListInCorrectCtx(&args);
+ }
+
+ // Now, cleanup the STA buckets
+ while ((pRCWToCleanup = STABuckets.PopHead()) != NULL)
+ {
+ //
+ // CAUTION: DONOT access pSTAThread fields here as pSTAThread
+ // could've already been deleted, if
+ // 1) the RCW is a free threaded RCW
+ // 2) the RCW is a regular RCW, and marked by GC but not finalized yet
+ // Only pointer comparison is allowed.
+ //
+ ReleaseRCWList_Args args;
+ args.pHead = pRCWToCleanup;
+ args.ctxTried = FALSE;
+ args.ctxBusy = FALSE;
+
+ // Advertise the fact that we're cleaning up this thread.
+ m_pCurCleanupThread = pRCWToCleanup->GetSTAThread();
+ _ASSERTE(pRCWToCleanup->GetSTAThread() != NULL);
+
+ ReleaseRCWListInCorrectCtx(&args);
+
+ // Done cleaning this thread for now...reset
+ m_pCurCleanupThread = NULL;
+ }
+
+ // No more stuff for other threads to help with
+ m_doCleanupInContexts = FALSE;
+}
+
+
+VOID RCWCleanupList::CleanupWrappersInCurrentCtxThread(BOOL fWait, BOOL fManualCleanupRequested, BOOL bIgnoreComObjectEagerCleanupSetting)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!m_doCleanupInContexts && !fManualCleanupRequested)
+ return;
+
+ // Find out our STA (if any)
+ Thread *pThread = GetThread();
+ LPVOID pCurrCtxCookie = GetCurrentCtxCookie();
+
+ Thread::ApartmentState aptState = pThread->GetApartment();
+
+ RemovedBuckets BucketsToCleanup;
+
+ {
+ // Take the lock
+ CrstHolder ch(&m_lock);
+
+ RCW *pBucket = m_pFirstBucket;
+ RCW *pPrevBucket = NULL;
+ while (pBucket != NULL)
+ {
+ BOOL fMatch = FALSE;
+ RCW *pNextBucket = pBucket->m_pNextCleanupBucket;
+
+ if (aptState != Thread::AS_InSTA)
+ {
+ // If we're in an MTA, just look for a matching contexts (including free-threaded and non-free threaded)
+ if (pBucket->GetSTAThread() == NULL &&
+ (pCurrCtxCookie == NULL || pBucket->GetWrapperCtxCookie() == pCurrCtxCookie))
+ {
+ fMatch = TRUE;
+ }
+ }
+ else
+ {
+ // If we're in an STA, clean all matching STA contexts (including free-threaded and non-free threaded)
+ if (pBucket->GetWrapperCtxCookie() == pCurrCtxCookie &&
+ (bIgnoreComObjectEagerCleanupSetting || pBucket->AllowEagerSTACleanup()))
+ {
+ fMatch = TRUE;
+ }
+ }
+
+ if (fMatch)
+ {
+ // Remove the list from the CleanupList structure
+ if (pPrevBucket != NULL)
+ pPrevBucket->m_pNextCleanupBucket = pBucket->m_pNextCleanupBucket;
+ else
+ m_pFirstBucket = pBucket->m_pNextCleanupBucket;
+
+ // and add it to the local cleanup chain
+ BucketsToCleanup.Append(pBucket);
+ }
+ else
+ {
+ // move the 'previous' pointer only if we didn't remove the current bucket
+ pPrevBucket = pBucket;
+ }
+ pBucket = pNextBucket;
+ }
+ }
+
+ // Clean it up
+ RCW *pRCWToCleanup;
+ while ((pRCWToCleanup = BucketsToCleanup.PopHead()) != NULL)
+ {
+ if (pRCWToCleanup->GetSTAThread() == NULL)
+ {
+ // We're already in the correct context, just clean it.
+ ReleaseRCWListRaw(pRCWToCleanup);
+ }
+ else
+ {
+ ReleaseRCWList_Args args;
+ args.pHead = pRCWToCleanup;
+ args.ctxTried = FALSE;
+ args.ctxBusy = FALSE;
+
+ ReleaseRCWListInCorrectCtx(&args);
+ }
+ }
+
+ if (aptState == Thread::AS_InSTA)
+ {
+ if (fWait && m_pCurCleanupThread == pThread)
+ {
+ // The finalizer thread may be trying to enter our STA -
+ // make sure it can get in.
+
+ LOG((LF_INTEROP, LL_INFO1000, "Thread %p: Yielding to finalizer thread.\n", pThread));
+
+ // Do a noop wait just to make sure we are cooperating
+ // with the finalizer thread
+ pThread->Join(1, TRUE);
+ }
+ }
+}
+
+BOOL RCWCleanupList::IsEmpty()
+{
+ LIMITED_METHOD_CONTRACT;
+ return (m_pFirstBucket == NULL);
+}
+
+// static
+HRESULT RCWCleanupList::ReleaseRCWListInCorrectCtx(LPVOID pData)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pData));
+ }
+ CONTRACTL_END;
+
+ ReleaseRCWList_Args* args = (ReleaseRCWList_Args*)pData;
+
+#ifdef FEATURE_REMOTING
+ if (InSendMessage())
+ {
+ args->ctxBusy = TRUE;
+ return S_OK;
+ }
+#endif
+
+ RCW* pHead = (RCW *)args->pHead;
+
+ LPVOID pCurrCtxCookie = GetCurrentCtxCookie();
+
+ // If we are releasing our IP's as a result of shutdown, we MUST not transition
+ // into cooperative GC mode. This "fix" will prevent us from doing so.
+ if (g_fEEShutDown & ShutDown_Finalize2)
+ {
+ Thread *pThread = GetThread();
+ if (pThread && !FinalizerThread::IsCurrentThreadFinalizer())
+ pThread->SetThreadStateNC(Thread::TSNC_UnsafeSkipEnterCooperative);
+ }
+
+
+ // Make sure we're in the right context / apartment.
+ // Also - if we've already transitioned once, we don't want to do so again.
+ // If the cookie exists in multiple MTA apartments, and the STA has gone away
+ // (leaving the old STA thread as unknown state with a context value equal to
+ // the MTA context), we will infinitely loop. So, we short circuit this with ctxTried.
+
+ Thread *pHeadThread = pHead->GetSTAThread();
+ BOOL fCorrectThread = (pHeadThread == NULL) ? TRUE : (pHeadThread == GetThread());
+ BOOL fCorrectCookie = (pCurrCtxCookie == NULL) ? TRUE : (pHead->GetWrapperCtxCookie() == pCurrCtxCookie);
+
+ if ( pHead->IsFreeThreaded() || // Avoid context transition if the list is for free threaded RCW
+ (fCorrectThread && fCorrectCookie) || args->ctxTried )
+ {
+ ReleaseRCWListRaw(pHead);
+ }
+ else
+ {
+ // Mark that we're trying a context transition
+ args->ctxTried = TRUE;
+
+ // Transition into the context to release the interfaces.
+ HRESULT hr = pHead->EnterContext(ReleaseRCWListInCorrectCtx, args);
+ if (FAILED(hr) || args->ctxBusy)
+ {
+ // We are having trouble transitioning into the context (typically because the context is disconnected)
+ // or the context is busy so we cannot transition into it to clean up.
+ // The only option we have left is to try and clean up the RCW's from the current context.
+ ReleaseRCWListRaw(pHead);
+ }
+ }
+
+ // Reset the bit indicating we cannot transition into cooperative GC mode.
+ if (g_fEEShutDown & ShutDown_Finalize2)
+ {
+ Thread *pThread = GetThread();
+ if (pThread && !FinalizerThread::IsCurrentThreadFinalizer())
+ pThread->ResetThreadStateNC(Thread::TSNC_UnsafeSkipEnterCooperative);
+ }
+
+ return S_OK;
+}
+
+// static
+VOID RCWCleanupList::ReleaseRCWListRaw(RCW* pRCW)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pRCW));
+ }
+ CONTRACTL_END;
+
+ // Release all these RCWs
+ RCW* pNext = NULL;
+ while (pRCW != NULL)
+ {
+ pNext = pRCW->m_pNextRCW;
+ pRCW->Cleanup();
+ pRCW = pNext;
+ }
+}
+
+// Destroys RCWAuxiliaryData. Note that we do not release interface pointers stored in the
+// auxiliary interface pointer cache here. That needs to be done in the right COM context
+// (see code:RCW::ReleaseAuxInterfacesCallBack).
+RCWAuxiliaryData::~RCWAuxiliaryData()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_prVariantInterfaces != NULL)
+ {
+ delete m_prVariantInterfaces;
+ }
+
+ InterfaceEntryEx *pEntry = m_pInterfaceCache;
+ while (pEntry)
+ {
+ InterfaceEntryEx *pNextEntry = pEntry->m_pNext;
+
+ delete pEntry;
+ pEntry = pNextEntry;
+ }
+
+ if (VARIANCE_STUB_TARGET_IS_HANDLE(m_ohObjectVariantCallTarget_IEnumerable))
+ {
+ DestroyHandle(m_ohObjectVariantCallTarget_IEnumerable);
+ }
+ if (VARIANCE_STUB_TARGET_IS_HANDLE(m_ohObjectVariantCallTarget_IReadOnlyList))
+ {
+ DestroyHandle(m_ohObjectVariantCallTarget_IReadOnlyList);
+ }
+}
+
+// Inserts variant interfaces to the cache.
+void RCWAuxiliaryData::CacheVariantInterface(MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ CrstHolder ch(&m_VarianceCacheCrst);
+
+ if (m_prVariantInterfaces == NULL)
+ {
+ m_prVariantInterfaces = new ArrayList();
+ }
+
+ if (pMT->HasVariance() && m_prVariantInterfaces->FindElement(0, pMT) == ArrayList::NOT_FOUND)
+ {
+ m_prVariantInterfaces->Append(pMT);
+ }
+
+ // check implemented interfaces as well
+ MethodTable::InterfaceMapIterator it = pMT->IterateInterfaceMap();
+ while (it.Next())
+ {
+ MethodTable *pItfMT = it.GetInterface();
+ if (pItfMT->HasVariance() && m_prVariantInterfaces->FindElement(0, pItfMT) == ArrayList::NOT_FOUND)
+ {
+ m_prVariantInterfaces->Append(pItfMT);
+ }
+ }
+}
+
+// Inserts an interface pointer in the cache.
+void RCWAuxiliaryData::CacheInterfacePointer(MethodTable *pMT, IUnknown *pUnk, LPVOID pCtxCookie)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ InterfaceEntryEx *pEntry = NULL;
+
+ // first, try to find a free entry to reuse
+ InterfaceEntryIterator it = IterateInterfacePointers();
+ while (it.Next())
+ {
+ InterfaceEntry *pEntry = it.GetEntry();
+ if (pEntry->IsFree() && pEntry->Init(pMT, pUnk))
+ {
+ // setting the cookie after "publishing" the entry is fine, at worst
+ // we may miss the cache if someone looks for this pMT concurrently
+ _ASSERTE_MSG(it.GetCtxCookie() == NULL, "Race condition detected, we are supposed to own the InterfaceEntry at this point");
+ it.SetCtxCookie(pCtxCookie);
+ return;
+ }
+ }
+
+ // create a new entry if a free one was not found
+ InterfaceEntryEx *pEntryEx = new InterfaceEntryEx();
+ ZeroMemory(pEntryEx, sizeof(InterfaceEntryEx));
+
+ pEntryEx->m_BaseEntry.Init(pMT, pUnk);
+
+ if (pCtxCookie != NULL)
+ {
+ pEntryEx->m_pCtxEntry = CtxEntryCache::GetCtxEntryCache()->FindCtxEntry(pCtxCookie, GetThread());
+ }
+ else
+ {
+ pEntryEx->m_pCtxEntry = NULL;
+ }
+
+ // and insert it into the linked list (the interlocked operation ensures that
+ // the list is walkable by other threads at all times)
+ InterfaceEntryEx *pNext;
+ do
+ {
+ pNext = VolatileLoad(&m_pInterfaceCache); // our candidate "next"
+ pEntryEx->m_pNext = pNext;
+ }
+ while (FastInterlockCompareExchangePointer(&m_pInterfaceCache, pEntryEx, pNext) != pNext);
+}
+
+// Returns a cached interface pointer or NULL if there was no match.
+IUnknown *RCWAuxiliaryData::FindInterfacePointer(MethodTable *pMT, LPVOID pCtxCookie)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ InterfaceEntryIterator it = IterateInterfacePointers();
+ while (it.Next())
+ {
+ InterfaceEntry *pEntry = it.GetEntry();
+ if (!pEntry->IsFree() && pEntry->m_pMT == (IE_METHODTABLE_PTR)pMT && it.GetCtxCookie() == pCtxCookie)
+ {
+ return pEntry->m_pUnknown;
+ }
+ }
+
+ return NULL;
+}
+
+const int RCW::s_rGCPressureTable[GCPressureSize_COUNT] =
+{
+ 0, // GCPressureSize_None
+ GC_PRESSURE_PROCESS_LOCAL, // GCPressureSize_ProcessLocal
+ GC_PRESSURE_MACHINE_LOCAL, // GCPressureSize_MachineLocal
+ GC_PRESSURE_REMOTE, // GCPressureSize_Remote
+ GC_PRESSURE_WINRT_BASE, // GCPressureSize_WinRT_Base
+ GC_PRESSURE_WINRT_LOW, // GCPressureSize_WinRT_Low
+ GC_PRESSURE_WINRT_MEDIUM, // GCPressureSize_WinRT_Medium
+ GC_PRESSURE_WINRT_HIGH, // GCPressureSize_WinRT_High
+};
+
+// Deletes all items in code:s_RCWStandbyList.
+void RCW::FlushStandbyList()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ PSLIST_ENTRY pEntry = InterlockedFlushSList(&RCW::s_RCWStandbyList);
+ while (pEntry)
+ {
+ PSLIST_ENTRY pNextEntry = pEntry->Next;
+ delete (RCW *)pEntry;
+ pEntry = pNextEntry;
+ }
+}
+//--------------------------------------------------------------------------------
+// The IUnknown passed in is AddRef'ed if we succeed in creating the wrapper unless
+// the CF_SuppressAddRef flag is set.
+RCW* RCW::CreateRCW(IUnknown *pUnk, DWORD dwSyncBlockIndex, DWORD flags, MethodTable *pClassMT)
+{
+ CONTRACT (RCW*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACT_END;
+
+ RCW *pRCW = NULL;
+
+ {
+ GCX_PREEMP();
+ pRCW = RCW::CreateRCWInternal(pUnk, dwSyncBlockIndex, flags, pClassMT);
+ }
+
+ // No exception after this point
+ if (pRCW->IsJupiterObject())
+ RCWWalker::AfterJupiterRCWCreated(pRCW);
+
+ RETURN pRCW;
+}
+
+RCW* RCW::CreateRCWInternal(IUnknown *pUnk, DWORD dwSyncBlockIndex, DWORD flags, MethodTable *pClassMT)
+{
+ CONTRACT (RCW*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(dwSyncBlockIndex != 0);
+ PRECONDITION(CheckPointer(pClassMT));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ // now allocate the wrapper
+ RCW *pWrap = (RCW *)InterlockedPopEntrySList(&RCW::s_RCWStandbyList);
+ if (pWrap != NULL)
+ {
+ // cache hit - reinitialize the data structure
+ new (pWrap) RCW();
+ }
+ else
+ {
+ pWrap = new RCW();
+ }
+
+ AppDomain * pAppDomain = GetAppDomain();
+ if((flags & CF_QueryForIdentity) ||
+ (pAppDomain && pAppDomain->GetDisableInterfaceCache()))
+ {
+ IUnknown *pUnkTemp = NULL;
+ HRESULT hr = SafeQueryInterfacePreemp(pUnk, IID_IUnknown, &pUnkTemp);
+ LogInteropQI(pUnk, IID_IUnknown, hr, "QI for IID_IUnknown in RCW::CreateRCW");
+ if(SUCCEEDED(hr))
+ {
+ pUnk = pUnkTemp;
+
+ }
+ }
+ else
+ {
+ ULONG cbRef = SafeAddRefPreemp(pUnk);
+ LogInteropAddRef(pUnk, cbRef, "RCWCache::CreateRCW: Addref pUnk because creating new RCW");
+ }
+
+ // Make sure we release AddRef-ed pUnk in case of exceptions
+ SafeComHolderPreemp<IUnknown> pUnkHolder = pUnk;
+
+ // Log the creation
+ LogRCWCreate(pWrap, pUnk);
+
+ // Remember that the object is known to support IInspectable
+ pWrap->m_Flags.m_fSupportsIInspectable = !!(flags & CF_SupportsIInspectable);
+
+ // Initialize wrapper
+ pWrap->Initialize(pUnk, dwSyncBlockIndex, pClassMT);
+
+ if (flags & CF_SupportsIInspectable)
+ {
+ // WinRT objects always apply some GC pressure
+ GCPressureSize pressureSize = GCPressureSize_WinRT_Base;
+
+ // if we have a strongly-typed non-delegate RCW, we may have read the GC pressure amount from metadata
+ if (pClassMT->IsProjectedFromWinRT() && !pClassMT->IsDelegate())
+ {
+ WinRTClassFactory *pFactory = GetComClassFactory(pClassMT)->AsWinRTClassFactory();
+ pressureSize = pFactory->GetGCPressure();
+ }
+
+ pWrap->AddMemoryPressure(pressureSize);
+ }
+
+ // Check to see if this is a DCOM proxy if either we've been explicitly asked to, or if
+ // we're talking to a non-WinRT object and we need to add memory pressure
+ const bool checkForDCOMProxy = (flags & CF_DetectDCOMProxy) ||
+ !(flags & CF_SupportsIInspectable);
+
+ if (checkForDCOMProxy)
+ {
+ // If the object is a DCOM proxy...
+ SafeComHolderPreemp<IRpcOptions> pRpcOptions = NULL;
+ GCPressureSize pressureSize = GCPressureSize_None;
+ HRESULT hr = pWrap->SafeQueryInterfaceRemoteAware(IID_IRpcOptions, (IUnknown**)&pRpcOptions);
+ LogInteropQI(pUnk, IID_IRpcOptions, hr, "QI for IRpcOptions");
+ if (S_OK == hr)
+ {
+ ULONG_PTR dwValue = 0;
+ hr = pRpcOptions->Query(pUnk, COMBND_SERVER_LOCALITY, &dwValue);
+
+ if (SUCCEEDED(hr))
+ {
+ if (dwValue == SERVER_LOCALITY_MACHINE_LOCAL || dwValue == SERVER_LOCALITY_REMOTE)
+ {
+ pWrap->m_Flags.m_fIsDCOMProxy = 1;
+ }
+
+ // Only add memory pressure for proxies for non-WinRT objects
+ if (!(flags & CF_SupportsIInspectable))
+ {
+ switch(dwValue)
+ {
+ case SERVER_LOCALITY_PROCESS_LOCAL:
+ pressureSize = GCPressureSize_ProcessLocal;
+ break;
+ case SERVER_LOCALITY_MACHINE_LOCAL:
+ pressureSize = GCPressureSize_MachineLocal;
+ break;
+ case SERVER_LOCALITY_REMOTE:
+ pressureSize = GCPressureSize_Remote;
+ break;
+ default:
+ pressureSize = GCPressureSize_None;
+ break;
+ }
+ }
+ }
+ }
+
+ // ...add the appropriate amount of memory pressure to the GC.
+ if (pressureSize != GCPressureSize_None)
+ {
+ pWrap->AddMemoryPressure(pressureSize);
+ }
+ }
+
+ pUnkHolder.SuppressRelease();
+
+ RETURN pWrap;
+}
+
+//----------------------------------------------------------
+// Init IUnknown and IDispatch cookies with the pointers, and assocaiate the COMOBJECTREF with this RCW
+void RCW::Initialize(IUnknown* pUnk, DWORD dwSyncBlockIndex, MethodTable *pClassMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ INJECT_FAULT(ThrowOutOfMemory());
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(dwSyncBlockIndex != 0);
+ PRECONDITION(CheckPointer(pClassMT));
+ }
+ CONTRACTL_END;
+
+ m_cbRefCount = 1;
+
+ // Start with use count 1 (this is counteracted in RCW::Cleanup)
+ m_cbUseCount = 1;
+
+ // Cache the IUnk and thread
+ m_pIdentity = pUnk;
+
+ // Remember the VTable pointer of the COM IP.
+ // This is very helpful for tracking down early released COM objects
+ // that AV when you call IUnknown::Release.
+ m_vtablePtr = *(LPVOID*)pUnk;
+
+ // track the thread that created this wrapper
+ // if this thread is an STA thread, then when the STA dies
+ // we need to cleanup this wrapper
+ m_pCreatorThread = GetThread();
+ _ASSERTE(m_pCreatorThread != NULL);
+
+ m_pRCWCache = RCWCache::GetRCWCache();
+
+ m_Flags.m_MarshalingType = GetMarshalingType(pUnk, pClassMT);
+
+ // Initialize the IUnkEntry
+ m_UnkEntry.Init(pUnk, IsFreeThreaded(), m_pCreatorThread DEBUGARG(this));
+
+ // Determine AllowEagerSTACleanup setting right now
+ // We don't want to access the thread object to get the status
+ // as m_pCreatorThread could be already dead at RCW cleanup time
+ //
+ // Free threaded RCWs created from STA "survives" even after the pSTAThread is terminated
+ // and destroyed. For free threaded objects, there will be no pumping at all and user should always
+ // expect the object to be accessed concurrently.
+ //
+ // So, only disallow eager STA cleanup for non free-threaded RCWs. Free threaded RCWs
+ // should be cleaned up regardless of the setting on thread.
+ bool disableEagerCleanup = m_pCreatorThread->IsDisableComObjectEagerCleanup();
+
+ if (disableEagerCleanup && !IsFreeThreaded())
+ m_Flags.m_fAllowEagerSTACleanup = 0;
+ else
+ m_Flags.m_fAllowEagerSTACleanup = 1;
+
+ // store the wrapper in the sync block, that is the only way we can get cleaned up
+ // the syncblock is guaranteed to be present
+ SyncBlock *pSyncBlock = g_pSyncTable[(int)dwSyncBlockIndex].m_SyncBlock;
+ InteropSyncBlockInfo *pInteropInfo = pSyncBlock->GetInteropInfo();
+ pInteropInfo->SetRawRCW(this);
+
+ // Store the sync block index.
+ m_SyncBlockIndex = dwSyncBlockIndex;
+
+ // Check if this object is a Jupiter object (only for WinRT scenarios)
+ _ASSERTE(m_Flags.m_fIsJupiterObject == 0);
+ if (SupportsIInspectable())
+ {
+ SafeComHolderPreemp<IJupiterObject> pJupiterObject = NULL;
+ HRESULT hr = SafeQueryInterfacePreemp(pUnk, IID_IJupiterObject, (IUnknown **)&pJupiterObject);
+ LogInteropQI(pUnk, IID_IJupiterObject, hr, "QI for IJupiterObject");
+
+ if (SUCCEEDED(hr))
+ {
+ // A Jupiter object that is not free threaded is not allowed
+ if (!IsFreeThreaded())
+ {
+ StackSString ssObjClsName;
+ StackSString ssDestClsName;
+
+ pClassMT->_GetFullyQualifiedNameForClass(ssObjClsName);
+
+ COMPlusThrow(kInvalidCastException, IDS_EE_CANNOTCAST,
+ ssObjClsName.GetUnicode(), W("IAgileObject"));
+ }
+
+ RCWWalker::OnJupiterRCWCreated(this, pJupiterObject);
+
+ SetJupiterObject(pJupiterObject);
+
+ if (!IsURTAggregated())
+ {
+ pJupiterObject.SuppressRelease();
+ }
+ }
+ }
+
+ // Log the wrapper initialization.
+ LOG((LF_INTEROP, LL_INFO100, "Initializing RCW %p with SyncBlock index %d\n", this, dwSyncBlockIndex));
+
+ // To help combat finalizer thread starvation, we check to see if there are any wrappers
+ // scheduled to be cleaned up for our context. If so, we'll do them here to avoid making
+ // the finalizer thread do a transition.
+ // @perf: This may need a bit of tuning.
+ // Note: This will enter a message pump in order to synchronize with the finalizer thread.
+
+ // We can't safely pump here for Releasing (or directly release)
+ // if we're currently in a SendMessage.
+ // Also, clients can opt out of this. The option is is a per-thread flag which they can
+ // set by calling DisableComEagerCleanup on the appropriate thread. Why would they
+ // want to opt out? Because pumping can lead to re-entrancy in in unexpected places.
+ // If a client decides to opt out, they are required to cleanup RCWs themselves by
+ // calling Marshal.CleanupUnusedObjectsInCurrentContext periodically. The best place
+ // to make that call is within their own message pump.
+ if (!disableEagerCleanup
+#ifdef FEATURE_REMOTING
+ && !InSendMessage()
+#endif
+ )
+ {
+ _ASSERTE(g_pRCWCleanupList != NULL);
+ g_pRCWCleanupList->CleanupWrappersInCurrentCtxThread();
+ }
+}
+
+VOID RCW::MarkURTAggregated()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(m_Flags.m_fURTContained == 0);
+ }
+ CONTRACTL_END;
+
+ if (!m_Flags.m_fURTAggregated && m_Flags.m_fIsJupiterObject)
+ {
+ // Notify Jupiter that we are about to release IJupiterObject
+ RCWWalker::BeforeInterfaceRelease(this);
+
+ // If we mark this RCW as aggregated and we've done a QI for IJupiterObject,
+ // release it to account for the extra ref
+ // Note that this is a quick fix for PDC-2 and eventually we should replace
+ // this with a better fix
+ SafeRelease(GetJupiterObject());
+ }
+
+ m_Flags.m_fURTAggregated = 1;
+}
+
+RCW::MarshalingType RCW::GetMarshalingType(IUnknown* pUnk, MethodTable *pClassMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(CheckPointer(pClassMT));
+ }
+ CONTRACTL_END;
+
+ PTR_EEClass pClass = pClassMT->GetClass();
+
+ // Skip attributes on interfaces as any object could implement those interface
+ if (!pClass->IsInterface() && pClass->IsMarshalingTypeSet())
+ {
+ MarshalingType mType;
+ ( pClass ->IsMarshalingTypeFreeThreaded() ) ? mType = MarshalingType_FreeThreaded
+ : (pClass->IsMarshalingTypeInhibit() ? mType = MarshalingType_Inhibit
+ : mType = MarshalingType_Standard);
+ return mType;
+ }
+ // MarshalingBehavior is not set and hence we will have to find the behavior using the QI
+ else
+ {
+ // Check whether the COM object can be marshaled. Hence we query for INoMarshal
+ SafeComHolderPreemp<INoMarshal> pNoMarshal;
+ HRESULT hr = SafeQueryInterfacePreemp(pUnk, IID_INoMarshal, (IUnknown**)&pNoMarshal);
+ LogInteropQI(pUnk, IID_INoMarshal, hr, "RCW::GetMarshalingType: QI for INoMarshal");
+
+ if (SUCCEEDED(hr))
+ return MarshalingType_Inhibit;
+ if (IUnkEntry::IsComponentFreeThreaded(pUnk))
+ return MarshalingType_FreeThreaded;
+ }
+ return MarshalingType_Unknown;
+}
+
+void RCW::AddMemoryPressure(GCPressureSize pressureSize)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ int pressure = s_rGCPressureTable[pressureSize];
+
+ if (pressureSize >= GCPressureSize_WinRT_Base)
+ {
+ // use the new implementation for WinRT RCWs
+ GCInterface::NewAddMemoryPressure(pressure);
+ }
+ else
+ {
+ // use the old implementation for classic COM interop
+ GCInterface::AddMemoryPressure(pressure);
+ }
+
+ // Remember the pressure we set.
+ m_Flags.m_GCPressure = pressureSize;
+}
+
+
+void RCW::RemoveMemoryPressure()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION((GetThread()->m_StateNC & Thread::TSNC_UnsafeSkipEnterCooperative) == 0);
+ }
+ CONTRACTL_END;
+
+ if (GCPressureSize_None == m_Flags.m_GCPressure)
+ return;
+
+ int pressure = s_rGCPressureTable[m_Flags.m_GCPressure];
+
+ if (m_Flags.m_GCPressure >= GCPressureSize_WinRT_Base)
+ {
+ // use the new implementation for WinRT RCWs
+ GCInterface::NewRemoveMemoryPressure(pressure);
+ }
+ else
+ {
+ // use the old implementation for classic COM interop
+ GCInterface::RemoveMemoryPressure(pressure);
+ }
+
+ m_Flags.m_GCPressure = GCPressureSize_None;
+}
+
+
+//--------------------------------------------------------------------------------
+// Addref is called only from within the runtime, when we lookup a wrapper in our hash
+// table
+LONG RCW::AddRef(RCWCache* pWrapCache)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pWrapCache));
+ PRECONDITION(pWrapCache->LOCKHELD());
+ }
+ CONTRACTL_END;
+
+ LONG cbRef = ++m_cbRefCount;
+ return cbRef;
+}
+
+AppDomain* RCW::GetDomain()
+{
+ CONTRACT (AppDomain*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+ RETURN m_pRCWCache->GetDomain();
+}
+
+//--------------------------------------------------------------------------------
+// Used to facilitate the ReleaseComObject API.
+// Ensures that the RCW is not in use before attempting to release it.
+//
+INT32 RCW::ExternalRelease(OBJECTREF* pObjPROTECTED)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(pObjPROTECTED != NULL);
+ PRECONDITION(*pObjPROTECTED != NULL);
+ }
+ CONTRACTL_END;
+
+ COMOBJECTREF* cref = (COMOBJECTREF*)pObjPROTECTED;
+
+ INT32 cbRef = -1;
+ BOOL fCleanupWrapper = FALSE;
+ RCW* pRCW = NULL;
+
+ // Lock
+ RCWCache* pCache = RCWCache::GetRCWCache();
+ _ASSERTE(pCache);
+
+ {
+ RCWCache::LockHolder lh(pCache);
+
+ // now to see if the wrapper is valid
+ // if there is another ReleaseComObject on this object
+ // of if an STA thread death decides to cleanup this wrapper
+ // then the object will be disconnected from the wrapper
+ pRCW = (*cref)->GetSyncBlock()->GetInteropInfoNoCreate()->GetRawRCW();
+
+ if (pRCW)
+ {
+ // check for invalid case
+ if ((LONG)pRCW->m_cbRefCount > 0)
+ {
+ cbRef = (INT32) (--(pRCW->m_cbRefCount));
+ if (cbRef == 0)
+ {
+ pCache->RemoveWrapper(pRCW);
+ fCleanupWrapper = TRUE;
+ }
+ }
+ }
+ }
+
+ // do cleanup after releasing the lock
+ if (fCleanupWrapper)
+ {
+#ifdef MDA_SUPPORTED
+ MdaRaceOnRCWCleanup* mda = MDA_GET_ASSISTANT(RaceOnRCWCleanup);
+ if (mda)
+ {
+ BOOL fIsInUse = FALSE;
+
+ // Walk the thread tables, looking for this RCW in use.
+ {
+ // Take the threadstore lock
+ ThreadStoreLockHolder tslh;
+
+ Thread* pThread = NULL;
+
+ // walk each thread's table
+ while (NULL != (pThread = ThreadStore::GetThreadList(pThread)) )
+ {
+ if (pThread->RCWIsInUse(pRCW))
+ {
+ // found a match!
+ fIsInUse = TRUE;
+ break;
+ }
+ }
+ }
+
+ // If we found one, bail.
+ if (fIsInUse)
+ {
+ // Cannot decrement the counter if it's in use.
+ ++(pRCW->m_cbRefCount);
+ mda->ReportViolation();
+ }
+ }
+#endif // MDA_SUPPORTED
+
+ // Release all the data associated with the __ComObject.
+ ComObject::ReleaseAllData(pRCW->GetExposedObject());
+
+ pRCW->DecoupleFromObject();
+ pRCW->Cleanup();
+ }
+
+ return cbRef;
+}
+
+
+//--------------------------------------------------------------------------------
+// Used to facilitate the FinalReleaseComObject API.
+// Ensures that the RCW is not in use before attempting to release it.
+//
+void RCW::FinalExternalRelease(OBJECTREF* pObjPROTECTED)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(pObjPROTECTED != NULL);
+ PRECONDITION(*pObjPROTECTED != NULL);
+ }
+ CONTRACTL_END;
+
+ COMOBJECTREF* cref = (COMOBJECTREF*)pObjPROTECTED;
+ BOOL fCleanupWrapper = FALSE;
+ RCW* pRCW = NULL;
+
+ // Lock
+ RCWCache* pCache = RCWCache::GetRCWCache();
+ _ASSERTE(pCache);
+
+ {
+ RCWCache::LockHolder lh(pCache);
+
+ pRCW = (*cref)->GetSyncBlock()->GetInteropInfoNoCreate()->GetRawRCW();
+
+ if (pRCW && pRCW->m_cbRefCount > 0)
+ {
+ pRCW->m_cbRefCount = 0;
+ pCache->RemoveWrapper(pRCW);
+ fCleanupWrapper = TRUE;
+ }
+ }
+
+ // do cleanup after releasing the lock
+ if (fCleanupWrapper)
+ {
+#ifdef MDA_SUPPORTED
+ MdaRaceOnRCWCleanup* mda = MDA_GET_ASSISTANT(RaceOnRCWCleanup);
+ if (mda)
+ {
+ BOOL fIsInUse = FALSE;
+
+ // Walk the thread tables, looking for this RCW in use.
+ {
+ // Take the threadstore lock
+ ThreadStoreLockHolder tslh;
+
+ Thread* pThread = NULL;
+
+ // walk each thread's table
+ while (NULL != (pThread = ThreadStore::GetThreadList(pThread)) )
+ {
+ if (pThread->RCWIsInUse(pRCW))
+ {
+ // found a match!
+ fIsInUse = TRUE;
+ break;
+ }
+ }
+ }
+
+ // If we found one, bail.
+ if (fIsInUse)
+ {
+ // Cannot zero the counter if it's in use.
+ pRCW->m_cbRefCount = 1;
+ mda->ReportViolation();
+ }
+ }
+#endif // MDA_SUPPORTED
+
+ // Release all the data associated with the __ComObject.
+ ComObject::ReleaseAllData(pRCW->GetExposedObject());
+
+ pRCW->DecoupleFromObject();
+ pRCW->Cleanup();
+ }
+}
+
+
+//--------------------------------------------------------------------------------
+// schedule to free all interface pointers, called during GC to
+// do minimal work
+void RCW::MinorCleanup()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(GCHeap::IsGCInProgress() || ( (g_fEEShutDown & ShutDown_SyncBlock) && g_fProcessDetach ));
+ }
+ CONTRACTL_END;
+
+ // Log the wrapper minor cleanup.
+ LogRCWMinorCleanup(this);
+
+ // remove the wrapper from the cache, so that
+ // other threads won't find this invalid wrapper
+ // NOTE: we don't need to LOCK because we make sure
+ // the rest of the folks touch this hash table
+ // with thier GC mode pre-emptiveGCDisabled
+ RCWCache* pCache = m_pRCWCache;
+ _ASSERTE(pCache);
+
+ // On server build, multiple threads will be removing
+ // wrappers from wrapper cache,
+ pCache->RemoveWrapper(this);
+
+ if (IsJupiterObject() && !IsDetached())
+ RCWWalker::BeforeJupiterRCWDestroyed(this);
+
+ // Clear the SyncBlockIndex as the object is being GC'd and the index will become
+ // invalid as soon as the object is collected.
+ m_SyncBlockIndex = 0;
+}
+
+//--------------------------------------------------------------------------------
+// Cleanup free all interface pointers
+void RCW::Cleanup()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Log the destruction of the RCW.
+ LogRCWDestroy(this);
+
+ // If we can't switch to cooperative mode, then we need to skip the check to
+ // if the wrapper is still in the cache. Also, if we can't switch to coop mode,
+ // we're guaranteed to have already decoupled the RCW from its object.
+#ifdef _DEBUG
+ if (!(GetThread()->m_StateNC & Thread::TSNC_UnsafeSkipEnterCooperative))
+ {
+ GCX_COOP();
+
+ // make sure this wrapper is not in the hash table
+ RCWCache::LockHolder lh(m_pRCWCache);
+ _ASSERTE(m_pRCWCache->LookupWrapperUnsafe(m_pIdentity) != this);
+ }
+#endif
+
+ // Switch to preemptive GC mode before we release the interfaces.
+ {
+ GCX_PREEMP();
+
+ // Release the IUnkEntry and the InterfaceEntries.
+ ReleaseAllInterfacesCallBack(this);
+
+ // Remove the memory pressure caused by this RCW (if present)
+ // If we're in a shutdown situation, we can ignore the memory pressure.
+ if ((GetThread()->m_StateNC & Thread::TSNC_UnsafeSkipEnterCooperative) == 0 && !g_fForbidEnterEE)
+ RemoveMemoryPressure();
+ }
+
+ if (m_pAuxiliaryData != NULL)
+ {
+ delete m_pAuxiliaryData;
+ }
+
+#ifdef _DEBUG
+ m_cbRefCount = 0;
+ m_SyncBlockIndex = 0;
+#endif
+
+ // If there's no thread currently working with the RCW, this call will release helper fields on IUnkEntry
+ // and recycle the entire RCW structure, i.e. insert it in the standby list to be reused or free the memory.
+ // If a thread still keeps a ref-count on the RCW, it will release it when it's done. Keeping the structure
+ // and the helper fields alive reduces the chances of memory corruption in race scenarios.
+ DecrementUseCount();
+}
+
+
+//--------------------------------------------------------------------------------
+// Create a new wrapper for a different method table that represents the same
+// COM object as the original wrapper.
+void RCW::CreateDuplicateWrapper(MethodTable *pNewMT, RCWHolder* pNewRCW)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pNewMT));
+ PRECONDITION(pNewMT->IsComObjectType());
+ PRECONDITION(CheckPointer(pNewRCW));
+ //POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACTL_END;
+
+ NewRCWHolder pNewWrap;
+
+ // Validate that there exists a default constructor for the new wrapper class.
+ if (!pNewMT->HasDefaultConstructor())
+ COMPlusThrow(kArgumentException, IDS_EE_WRAPPER_MUST_HAVE_DEF_CONS);
+
+ // Allocate the wrapper COM object.
+ COMOBJECTREF NewWrapperObj = (COMOBJECTREF)ComObject::CreateComObjectRef(pNewMT);
+ GCPROTECT_BEGIN(NewWrapperObj)
+ {
+ SafeComHolder<IUnknown> pAutoUnk = NULL;
+
+ // Retrieve the RCWCache to use.
+ RCWCache* pCache = RCWCache::GetRCWCache();
+
+ // Create the new RCW associated with the COM object. We need
+ // to set the identity to some default value so we don't remove the original
+ // wrapper from the hash table when this wrapper goes away.
+ pAutoUnk = GetIUnknown();
+
+ DWORD flags = 0;
+ if (SupportsIInspectable())
+ flags |= CF_SupportsIInspectable;
+
+ // make sure we "pin" the syncblock before switching to preemptive mode
+ SyncBlock *pSB = NewWrapperObj->GetSyncBlock();
+ pSB->SetPrecious();
+ DWORD dwSyncBlockIndex = pSB->GetSyncBlockIndex();
+
+ pNewWrap = RCW::CreateRCW((IUnknown *)pAutoUnk, dwSyncBlockIndex, flags, pNewMT);
+
+ // Reset the Identity to be the RCW* as we don't want to create a duplicate entry
+ pNewWrap->m_pIdentity = (LPVOID)pNewWrap;
+
+ // Run the class constructor if it has not run yet.
+ pNewMT->CheckRunClassInitThrowing();
+
+ CallDefaultConstructor(ObjectToOBJECTREF(NewWrapperObj));
+
+ pNewRCW->InitNoCheck(NewWrapperObj);
+
+ // Insert the wrapper into the hashtable. The wrapper will be a duplicate however we
+ // we fix the identity to ensure there is no collison in the hash table & it is required
+ // since the hashtable is used on appdomain unload to determine what RCW's need to released.
+ {
+ RCWCache::LockHolder lh(pCache);
+ pCache->InsertWrapper(pNewRCW);
+ }
+ }
+ GCPROTECT_END();
+
+ pNewWrap.SuppressRelease();
+}
+
+//--------------------------------------------------------------------------------
+// Calling this is relatively slow since it can't take advantage of the cache
+// since there is no longer any way to go from an IID to a MethodTable.
+// If at all possible you should use the version that takes a MethodTable.
+// This usually means calling GetComIPFromObjectRef passing in a MethodTable
+// instead of an IID.
+IUnknown* RCW::GetComIPFromRCW(REFIID iid)
+{
+ CONTRACT(IUnknown *)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ SafeComHolder<IUnknown> pRet = NULL;
+ HRESULT hr = S_OK;
+
+ hr = SafeQueryInterfaceRemoteAware(iid, (IUnknown**)&pRet);
+ if (hr != E_NOINTERFACE)
+ {
+ // We simply return NULL on E_NOINTERFACE which is much better for perf than throwing exceptions. Note
+ // that we can hit this code path often in aggregation scenarios where we forward QI's to the COM base class.
+ IfFailThrow(hr);
+ }
+ else
+ {
+ // Clear the return value in case we got E_NOINTERFACE but a non-NULL pUnk.
+ pRet.Clear();
+ }
+
+ pRet.SuppressRelease();
+ RETURN pRet;
+}
+
+//--------------------------------------------------------------------------------
+// check the local cache, out of line cache
+// if not found QI for the interface and store it
+IUnknown* RCW::GetComIPFromRCW(MethodTable* pMT)
+{
+ CONTRACT (IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT, NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ if (pMT == NULL || pMT->IsObjectClass())
+ {
+ // give out the IUnknown or IDispatch
+ IUnknown *result = GetIUnknown();
+ _ASSERTE(result != NULL);
+ RETURN result;
+ }
+
+ //
+ // Collectible types do not support com interop
+ //
+ if (pMT->Collectible())
+ {
+ COMPlusThrow(kNotSupportedException, W("NotSupported_CollectibleCOM"));
+ }
+
+ // returns an AddRef'ed IP
+ RETURN GetComIPForMethodTableFromCache(pMT);
+}
+
+
+//-----------------------------------------------------------------
+// Get the IUnknown pointer for the wrapper
+// make sure it is on the right thread
+IUnknown* RCW::GetIUnknown()
+{
+ CONTRACT (IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ // Try to retrieve the IUnknown in the current context.
+ RETURN m_UnkEntry.GetIUnknownForCurrContext(false);
+}
+
+//-----------------------------------------------------------------
+// Get the IUnknown pointer for the wrapper, non-AddRef'ed.
+// Generally this will work only if we are on the right thread,
+// otherwise NULL will be returned.
+IUnknown* RCW::GetIUnknown_NoAddRef()
+{
+ CONTRACT (IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ // Retrieve the IUnknown in the current context.
+ RETURN m_UnkEntry.GetIUnknownForCurrContext(true);
+}
+
+IUnknown *RCW::GetWellKnownInterface(REFIID riid)
+{
+ CONTRACT (IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ IUnknown *pUnk = NULL;
+
+ // QI for riid.
+ HRESULT hr = SafeQueryInterfaceRemoteAware(riid, &pUnk);
+ if ( S_OK != hr )
+ {
+ // If anything goes wrong simply set pUnk to NULL to indicate that
+ // the wrapper does not support given riid.
+ pUnk = NULL;
+ }
+
+ // Return the IDispatch that is guaranteed to be valid on the current thread.
+ RETURN pUnk;
+}
+
+//-----------------------------------------------------------------
+// Get the IUnknown pointer for the wrapper
+// make sure it is on the right thread
+IDispatch *RCW::GetIDispatch()
+{
+#ifdef FEATURE_CORECLR
+ if (AppX::IsAppXProcess())
+ {
+ COMPlusThrow(kPlatformNotSupportedException, IDS_EE_ERROR_IDISPATCH);
+ }
+#endif // FEATURE_CORECLR
+
+ WRAPPER_NO_CONTRACT;
+ return (IDispatch *)GetWellKnownInterface(IID_IDispatch);
+}
+
+//-----------------------------------------------------------------
+// Get the IInspectable pointer for the wrapper
+IInspectable *RCW::GetIInspectable()
+{
+ WRAPPER_NO_CONTRACT;
+ return (IInspectable *)GetWellKnownInterface(IID_IInspectable);
+}
+
+//-----------------------------------------------
+// Free GC handle and remove SyncBlock entry
+void RCW::DecoupleFromObject()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (m_SyncBlockIndex != 0)
+ {
+ if (IsJupiterObject() && !IsDetached())
+ RCWWalker::BeforeJupiterRCWDestroyed(this);
+
+ // remove reference to wrapper from sync block
+ SyncBlock* pSB = GetSyncBlock();
+ _ASSERTE(pSB);
+
+ InteropSyncBlockInfo* pInteropInfo = pSB->GetInteropInfoNoCreate();
+ _ASSERTE(pInteropInfo);
+
+ pInteropInfo->SetRawRCW(NULL);
+
+ m_SyncBlockIndex = 0;
+ }
+}
+
+HRESULT RCW::SafeQueryInterfaceRemoteAware(REFIID iid, IUnknown** ppResUnk)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SafeComHolder<IUnknown> pUnk(GetIUnknown_NoAddRef(), /*takeOwnership =*/ FALSE);
+ if (pUnk == NULL)
+ {
+ // if we are not on the right thread we get a proxy which we need to keep AddRef'ed
+ pUnk = GetIUnknown();
+ }
+
+ RCW_VTABLEPTR(this);
+
+ HRESULT hr = SafeQueryInterface(pUnk, iid, ppResUnk);
+ LogInteropQI(pUnk, iid, hr, "QI for interface in SafeQueryInterfaceRemoteAware");
+
+ if (hr == CO_E_OBJNOTCONNECTED || hr == RPC_E_INVALID_OBJECT || hr == RPC_E_INVALID_OBJREF || hr == CO_E_OBJNOTREG)
+ {
+ // set apartment state
+ GetThread()->SetApartment(Thread::AS_InMTA, FALSE);
+
+ // Release the stream of the IUnkEntry to force UnmarshalIUnknownForCurrContext
+ // to remarshal to the stream.
+ m_UnkEntry.ReleaseStream();
+
+ // Unmarshal again to the current context to get a valid proxy.
+ IUnknown *pTmpUnk = m_UnkEntry.UnmarshalIUnknownForCurrContext();
+
+ // Try to QI for the interface again.
+ hr = SafeQueryInterface(pTmpUnk, iid, ppResUnk);
+ LogInteropQI(pTmpUnk, iid, hr, "SafeQIRemoteAware - QI for Interface after lost");
+
+ // release our ref-count on pTmpUnk
+ int cbRef = SafeRelease(pTmpUnk);
+ LogInteropRelease(pTmpUnk, cbRef, "SafeQIRemoteAware - Release for Interface after lost");
+ }
+
+ return hr;
+}
+
+#endif //#ifndef CROSSGEN_COMPILE
+
+//-----------------------------------------------------------------
+// Returns a redirected collection interface corresponding to a given ICollection/ICollection<T> or NULL
+// if the given interface is not ICollection/ICollection<T>. This also works for IReadOnlyCollection<T>.
+// The BOOL parameters help resolve the ambiguity around ICollection<KeyValuePair<K, V>>.
+// static
+MethodTable *RCW::ResolveICollectionInterface(MethodTable *pItfMT, BOOL fPreferIDictionary, BOOL *pfChosenIDictionary)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pItfMT));
+ PRECONDITION(CheckPointer(pfChosenIDictionary, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ if (pfChosenIDictionary != NULL)
+ *pfChosenIDictionary = FALSE;
+
+ // Casting/calling via ICollection<T> means QI/calling through IVector<T>, casting/calling via ICollection<KeyValuePair<K, V>> means
+ // QI/calling via IMap<K, V> OR IVector<IKeyValuePair<K, V>>. See which case it is.
+ if (pItfMT->HasSameTypeDefAs(MscorlibBinder::GetExistingClass(CLASS__ICOLLECTIONGENERIC)))
+ {
+ Instantiation inst = pItfMT->GetInstantiation();
+ TypeHandle arg = inst[0];
+
+ if (fPreferIDictionary)
+ {
+ if (!arg.IsTypeDesc() && arg.GetMethodTable()->HasSameTypeDefAs(MscorlibBinder::GetClass(CLASS__KEYVALUEPAIRGENERIC)))
+ {
+ // ICollection<KeyValuePair<K, V>> -> IDictionary<K, V>
+ if (pfChosenIDictionary != NULL)
+ *pfChosenIDictionary = TRUE;
+
+ pItfMT = GetAppDomain()->GetRedirectedType(WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IDictionary);
+ return TypeHandle(pItfMT).Instantiate(arg.GetInstantiation()).GetMethodTable();
+ }
+ }
+
+ // ICollection<T> -> IList<T>
+ pItfMT = GetAppDomain()->GetRedirectedType(WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IList);
+ return TypeHandle(pItfMT).Instantiate(inst).GetMethodTable();
+ }
+
+ // Casting/calling via IReadOnlyCollection<T> means QI/calling through IVectorView<T>, casting/calling via IReadOnlyCollection<KeyValuePair<K, V>> means
+ // QI/calling via IMapView<K, V> OR IVectorView<IKeyValuePair<K, V>>. See which case it is.
+ if (pItfMT->HasSameTypeDefAs(MscorlibBinder::GetExistingClass(CLASS__IREADONLYCOLLECTIONGENERIC)))
+ {
+ Instantiation inst = pItfMT->GetInstantiation();
+ TypeHandle arg = inst[0];
+
+ if (fPreferIDictionary)
+ {
+ if (!arg.IsTypeDesc() && arg.GetMethodTable()->HasSameTypeDefAs(MscorlibBinder::GetClass(CLASS__KEYVALUEPAIRGENERIC)))
+ {
+ // IReadOnlyCollection<KeyValuePair<K, V>> -> IReadOnlyDictionary<K, V>
+ if (pfChosenIDictionary != NULL)
+ *pfChosenIDictionary = TRUE;
+
+ pItfMT = GetAppDomain()->GetRedirectedType(WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IReadOnlyDictionary);
+ return TypeHandle(pItfMT).Instantiate(arg.GetInstantiation()).GetMethodTable();
+ }
+ }
+
+ // IReadOnlyCollection<T> -> IReadOnlyList<T>
+ pItfMT = GetAppDomain()->GetRedirectedType(WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IReadOnlyList);
+ return TypeHandle(pItfMT).Instantiate(inst).GetMethodTable();
+ }
+
+ // Casting/calling via ICollection means QI/calling through IBindableVector (projected to IList).
+ if (pItfMT == MscorlibBinder::GetExistingClass(CLASS__ICOLLECTION))
+ {
+ return MscorlibBinder::GetExistingClass(CLASS__ILIST);
+ }
+
+ // none of the above
+ return NULL;
+}
+
+// Helper method to allow us to compare a MethodTable against a known method table
+// from mscorlib. If the mscorlib type isn't loaded, we don't load it because we
+// know that it can't be the MethodTable we're curious about.
+static bool MethodTableHasSameTypeDefAsMscorlibClass(MethodTable* pMT, BinderClassID classId)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodTable* pMT_MscorlibClass = MscorlibBinder::GetClassIfExist(classId);
+ if (pMT_MscorlibClass == NULL)
+ return false;
+
+ return (pMT->HasSameTypeDefAs(pMT_MscorlibClass) != FALSE);
+}
+
+// Returns an interface with variance corresponding to pMT or NULL if pMT does not support variance.
+// The reason why we don't just call HasVariance() is that we also deal with the WinRT interfaces
+// like IIterable<T> which do not (and cannot) have variance from .NET type system point of view.
+// static
+MethodTable *RCW::GetVariantMethodTable(MethodTable *pMT)
+{
+ CONTRACT(MethodTable *)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ POSTCONDITION(RETVAL == NULL || RETVAL->HasVariance());
+ }
+ CONTRACT_END;
+
+ RCWPerTypeData *pData = pMT->GetRCWPerTypeData();
+ if (pData == NULL)
+ {
+ // if this type has no RCW data allocated, we know for sure that pMT has no
+ // corresponding MethodTable with variance
+ _ASSERTE(ComputeVariantMethodTable(pMT) == NULL);
+ RETURN NULL;
+ }
+
+ if ((pData->m_dwFlags & RCWPerTypeData::VariantTypeInited) == 0)
+ {
+ pData->m_pVariantMT = ComputeVariantMethodTable(pMT);
+ FastInterlockOr(&pData->m_dwFlags, RCWPerTypeData::VariantTypeInited);
+ }
+ RETURN pData->m_pVariantMT;
+}
+
+// static
+MethodTable *RCW::ComputeVariantMethodTable(MethodTable *pMT)
+{
+ CONTRACT(MethodTable *)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ POSTCONDITION(RETVAL == NULL || RETVAL->HasVariance());
+ }
+ CONTRACT_END;
+
+ if (!pMT->IsProjectedFromWinRT() && !WinRTTypeNameConverter::ResolveRedirectedType(pMT, NULL))
+ {
+ RETURN NULL;
+ }
+
+ if (pMT->HasVariance())
+ {
+ RETURN pMT;
+ }
+
+ // IIterable and IVectorView are not marked as covariant. Check them explicitly and
+ // return the corresponding IEnumerable / IReadOnlyList instantiation.
+ if (MethodTableHasSameTypeDefAsMscorlibClass(pMT, CLASS__IITERABLE))
+ {
+ RETURN TypeHandle(MscorlibBinder::GetExistingClass(CLASS__IENUMERABLEGENERIC)).
+ Instantiate(pMT->GetInstantiation()).AsMethodTable();
+ }
+ if (MethodTableHasSameTypeDefAsMscorlibClass(pMT, CLASS__IVECTORVIEW))
+ {
+ RETURN TypeHandle(MscorlibBinder::GetExistingClass(CLASS__IREADONLYLISTGENERIC)).
+ Instantiate(pMT->GetInstantiation()).AsMethodTable();
+ }
+
+ // IIterator is not marked as covariant either. Return the covariant IEnumerator.
+ DefineFullyQualifiedNameForClassW();
+ if (MethodTableHasSameTypeDefAsMscorlibClass(pMT, CLASS__IITERATOR) ||
+ wcscmp(GetFullyQualifiedNameForClassW_WinRT(pMT), g_WinRTIIteratorClassNameW) == 0)
+ {
+ RETURN TypeHandle(MscorlibBinder::GetClass(CLASS__IENUMERATORGENERIC)).
+ Instantiate(pMT->GetInstantiation()).AsMethodTable();
+ }
+
+ RETURN NULL;
+}
+
+#ifndef CROSSGEN_COMPILE
+//-----------------------------------------------------------------
+// Determines the interface that should be QI'ed for when the RCW is cast to pItfMT.
+RCW::InterfaceRedirectionKind RCW::GetInterfaceForQI(MethodTable *pItfMT, MethodTable **pNewItfMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pItfMT));
+ PRECONDITION(CheckPointer(pNewItfMT));
+ }
+ CONTRACTL_END;
+
+ // We don't want to be redirecting interfaces if the underlying COM object is not a WinRT type
+ if (SupportsIInspectable() || pItfMT->IsWinRTRedirectedDelegate())
+ {
+ MethodTable *pNewItfMT1;
+ MethodTable *pNewItfMT2;
+ InterfaceRedirectionKind redirectionKind = GetInterfacesForQI(pItfMT, &pNewItfMT1, &pNewItfMT2);
+
+ //
+ // IEnumerable may need three QI attempts:
+ // 1. IEnumerable/IDispatch+DISPID_NEWENUM
+ // 2. IBindableIterable
+ // 3. IIterable<T> for a T
+ //
+ // Is this 3rd attempt on IEnumerable (non-generic)?
+ if (redirectionKind == InterfaceRedirection_Other_RetryOnFailure &&
+ pItfMT != *pNewItfMT && *pNewItfMT != NULL &&
+ pItfMT == MscorlibBinder::GetExistingClass(CLASS__IENUMERABLE))
+ {
+ // Yes - we are at 3rd attempt;
+ // QI for IEnumerable/IDispatch+DISPID_NEWENUM and for IBindableIterable failed
+ // and we are about to see if we know of an IIterable<T> to use.
+
+ MethodDesc *pMD = GetGetEnumeratorMethod();
+ if (pMD != NULL)
+ {
+ // we have already determined what casting to IEnumerable means for this RCW
+ TypeHandle th = TypeHandle(MscorlibBinder::GetClass(CLASS__IITERABLE));
+ *pNewItfMT = th.Instantiate(pMD->GetClassInstantiation()).GetMethodTable();
+ return InterfaceRedirection_IEnumerable;
+ }
+
+ // The last attempt failed, this is an error.
+ return InterfaceRedirection_UnresolvedIEnumerable;
+ }
+
+ if ((redirectionKind != InterfaceRedirection_IEnumerable_RetryOnFailure &&
+ redirectionKind != InterfaceRedirection_Other_RetryOnFailure) || *pNewItfMT == NULL)
+ {
+ // First attempt - use pNewItfMT1
+ *pNewItfMT = pNewItfMT1;
+ return redirectionKind;
+ }
+ else
+ {
+ // Second attempt - use pNewItfMT2
+ *pNewItfMT = pNewItfMT2;
+
+ if (redirectionKind == InterfaceRedirection_IEnumerable_RetryOnFailure)
+ return InterfaceRedirection_IEnumerable;
+
+ // Get ready for the 3rd attmpt if 2nd attempt fails
+ // This only happens for non-generic IEnumerable
+ if (pItfMT == MscorlibBinder::GetExistingClass(CLASS__IENUMERABLE))
+ return InterfaceRedirection_IEnumerable_RetryOnFailure;
+
+ return InterfaceRedirection_IEnumerable;
+ }
+ }
+
+ *pNewItfMT = pItfMT;
+ return InterfaceRedirection_None;
+}
+#endif // !CROSSGEN_COMPILE
+
+// static
+RCW::InterfaceRedirectionKind RCW::GetInterfacesForQI(MethodTable *pItfMT, MethodTable **ppNewItfMT1, MethodTable **ppNewItfMT2)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pItfMT));
+ PRECONDITION(CheckPointer(ppNewItfMT1));
+ PRECONDITION(CheckPointer(ppNewItfMT2));
+ }
+ CONTRACTL_END;
+
+ RCWPerTypeData *pData = pItfMT->GetRCWPerTypeData();
+ if (pData == NULL)
+ {
+#ifdef _DEBUG
+ // verify that if the per-type data is NULL, the type has indeed no redirection
+ MethodTable *pNewItfMT1;
+ MethodTable *pNewItfMT2;
+ _ASSERTE(ComputeInterfacesForQI(pItfMT, &pNewItfMT1, &pNewItfMT2) == InterfaceRedirection_None);
+#endif // _DEBUG
+
+ *ppNewItfMT1 = pItfMT;
+ *ppNewItfMT2 = NULL;
+ return InterfaceRedirection_None;
+ }
+ else
+ {
+ if ((pData->m_dwFlags & RCWPerTypeData::RedirectionInfoInited) == 0)
+ {
+ pData->m_RedirectionKind = ComputeInterfacesForQI(pItfMT, &pData->m_pMTForQI1, &pData->m_pMTForQI2);
+ FastInterlockOr(&pData->m_dwFlags, RCWPerTypeData::RedirectionInfoInited);
+ }
+
+ *ppNewItfMT1 = pData->m_pMTForQI1;
+ *ppNewItfMT2 = pData->m_pMTForQI2;
+ return pData->m_RedirectionKind;
+ }
+}
+
+// static
+RCW::InterfaceRedirectionKind RCW::ComputeInterfacesForQI(MethodTable *pItfMT, MethodTable **ppNewItfMT1, MethodTable **ppNewItfMT2)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pItfMT));
+ PRECONDITION(CheckPointer(ppNewItfMT1));
+ PRECONDITION(CheckPointer(ppNewItfMT2));
+ }
+ CONTRACTL_END;
+
+ if (pItfMT->IsProjectedFromWinRT())
+ {
+ // If we're casting to IIterable<T> directly, then while we do want to QI IIterable<T>, also
+ // make a note that it is redirected from IEnumerable<T>
+ if (pItfMT->HasInstantiation() && pItfMT->HasSameTypeDefAs(MscorlibBinder::GetClass(CLASS__IITERABLE)))
+ {
+ *ppNewItfMT1 = pItfMT;
+ return InterfaceRedirection_IEnumerable;
+ }
+ }
+ else
+ {
+ WinMDAdapter::RedirectedTypeIndex redirectedInterfaceIndex;
+ RCW::InterfaceRedirectionKind redirectionKind = InterfaceRedirection_None;
+
+ BOOL fChosenIDictionary;
+ MethodTable *pResolvedItfMT = ResolveICollectionInterface(pItfMT, TRUE, &fChosenIDictionary);
+ if (pResolvedItfMT == NULL)
+ {
+ pResolvedItfMT = pItfMT;
+ // Let ResolveRedirectedType convert IDictionary/IList to the corresponding WinRT type as usual
+ }
+
+ if (WinRTInterfaceRedirector::ResolveRedirectedInterface(pResolvedItfMT, &redirectedInterfaceIndex))
+ {
+ TypeHandle th = WinRTInterfaceRedirector::GetWinRTTypeForRedirectedInterfaceIndex(redirectedInterfaceIndex);
+
+ if (th.HasInstantiation())
+ {
+ *ppNewItfMT1 = th.Instantiate(pResolvedItfMT->GetInstantiation()).GetMethodTable();
+ if (pItfMT->CanCastToInterface(MscorlibBinder::GetClass(CLASS__IENUMERABLE)))
+ {
+ redirectionKind = InterfaceRedirection_IEnumerable;
+ }
+ else
+ {
+ _ASSERTE(!fChosenIDictionary);
+ redirectionKind = InterfaceRedirection_Other;
+ }
+ }
+ else
+ {
+ // pItfMT is a non-generic redirected interface - for compat reasons do QI for the interface first,
+ // and if it fails, use redirection
+ *ppNewItfMT1 = pItfMT;
+ *ppNewItfMT2 = th.GetMethodTable();
+ redirectionKind = InterfaceRedirection_Other_RetryOnFailure;
+ }
+ }
+
+ if (fChosenIDictionary)
+ {
+ // pItfMT is the ambiguous ICollection<KeyValuePair<K, V>> and *ppNewItfMT1 at this point is the
+ // corresponding IMap<K, V>, now we are going to assign IVector<IKeyValuePair<K, V>> to *ppNewItfMT2
+ pResolvedItfMT = ResolveICollectionInterface(pItfMT, FALSE, NULL);
+
+ VERIFY(WinRTInterfaceRedirector::ResolveRedirectedInterface(pResolvedItfMT, &redirectedInterfaceIndex));
+ TypeHandle th = WinRTInterfaceRedirector::GetWinRTTypeForRedirectedInterfaceIndex(redirectedInterfaceIndex);
+
+ *ppNewItfMT2 = th.Instantiate(pItfMT->GetInstantiation()).GetMethodTable();
+ redirectionKind = InterfaceRedirection_IEnumerable_RetryOnFailure;
+ }
+
+ if (redirectionKind != InterfaceRedirection_None)
+ {
+ return redirectionKind;
+ }
+
+ if (WinRTDelegateRedirector::ResolveRedirectedDelegate(pItfMT, &redirectedInterfaceIndex))
+ {
+ TypeHandle th = TypeHandle(WinRTDelegateRedirector::GetWinRTTypeForRedirectedDelegateIndex(redirectedInterfaceIndex));
+
+ if (pItfMT->HasInstantiation())
+ {
+ th = th.Instantiate(pItfMT->GetInstantiation());
+ }
+
+ *ppNewItfMT1 = th.GetMethodTable();
+ return InterfaceRedirection_Other;
+ }
+ }
+
+ *ppNewItfMT1 = pItfMT;
+ return InterfaceRedirection_None;
+}
+
+#ifndef CROSSGEN_COMPILE
+//-----------------------------------------------------------------
+// Returns a known working IEnumerable<T>::GetEnumerator to be used in lieu of the non-generic
+// IEnumerable::GetEnumerator.
+MethodDesc *RCW::GetGetEnumeratorMethod()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_pAuxiliaryData == NULL || m_pAuxiliaryData->m_pGetEnumeratorMethod == NULL)
+ {
+ MethodTable *pClsMT;
+ {
+ GCX_COOP();
+ pClsMT = GetExposedObject()->GetTrueMethodTable();
+ }
+
+ SetGetEnumeratorMethod(pClsMT);
+ }
+
+ return (m_pAuxiliaryData == NULL ? NULL : m_pAuxiliaryData->m_pGetEnumeratorMethod);
+}
+
+//-----------------------------------------------------------------
+// Sets the first "known" GetEnumerator method on the RCW if not set already.
+void RCW::SetGetEnumeratorMethod(MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_pAuxiliaryData != NULL && m_pAuxiliaryData->m_pGetEnumeratorMethod != NULL)
+ return;
+
+ // Retrieve cached GetEnumerator method or compute the right one for this pMT
+ MethodDesc *pMD = GetOrComputeGetEnumeratorMethodForType(pMT);
+
+ if (pMD != NULL)
+ {
+ // We successfully got a GetEnumerator method - cache it in the RCW
+ // We can have multiple casts going on concurrently, make sure that
+ // the result of this method is stable.
+ InterlockedCompareExchangeT(&GetOrCreateAuxiliaryData()->m_pGetEnumeratorMethod, pMD, NULL);
+ }
+}
+
+// Retrieve cached GetEnumerator method or compute the right one for a specific type
+MethodDesc *RCW::GetOrComputeGetEnumeratorMethodForType(MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodDesc *pMD = NULL;
+
+ RCWPerTypeData *pData = pMT->GetRCWPerTypeData();
+ if (pData != NULL)
+ {
+ if ((pData->m_dwFlags & RCWPerTypeData::GetEnumeratorInited) == 0)
+ {
+ pData->m_pGetEnumeratorMethod = ComputeGetEnumeratorMethodForType(pMT);
+ FastInterlockOr(&pData->m_dwFlags, RCWPerTypeData::GetEnumeratorInited);
+ }
+
+ pMD = pData->m_pGetEnumeratorMethod;
+ }
+ else
+ {
+ pMD = ComputeGetEnumeratorMethodForType(pMT);
+ }
+
+ return pMD;
+}
+
+// Compute the first GetEnumerator for a specific type
+MethodDesc *RCW::ComputeGetEnumeratorMethodForType(MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodDesc *pMD = ComputeGetEnumeratorMethodForTypeInternal(pMT);
+
+ // Walk the interface impl and use these interfaces to compute
+ MethodTable::InterfaceMapIterator it = pMT->IterateInterfaceMap();
+ while (pMD == NULL && it.Next())
+ {
+ pMT = it.GetInterface();
+ pMD = GetOrComputeGetEnumeratorMethodForType(pMT);
+ }
+
+ return pMD;
+}
+
+// Get the GetEnumerator method for IEnumerable<T> or IIterable<T>
+MethodDesc *RCW::ComputeGetEnumeratorMethodForTypeInternal(MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!pMT->HasSameTypeDefAs(MscorlibBinder::GetExistingClass(CLASS__IENUMERABLEGENERIC)))
+ {
+ // If we have an IIterable<T>, we want to get the enumerator for the equivalent
+ // instantiation of IEnumerable<T>
+ if (pMT->HasSameTypeDefAs(MscorlibBinder::GetClass(CLASS__IITERABLE)))
+ {
+ TypeHandle thEnumerable = TypeHandle(MscorlibBinder::GetExistingClass(CLASS__IENUMERABLEGENERIC));
+ pMT = thEnumerable.Instantiate(pMT->GetInstantiation()).GetMethodTable();
+ }
+ else
+ {
+ return NULL;
+ }
+ }
+
+ MethodDesc *pMD = pMT->GetMethodDescForSlot(0);
+ _ASSERTE(strcmp(pMD->GetName(), "GetEnumerator") == 0);
+
+ if (pMD->IsSharedByGenericInstantiations())
+ {
+ pMD = MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pMD,
+ pMT,
+ FALSE, // forceBoxedEntryPoint
+ Instantiation(), // methodInst
+ FALSE, // allowInstParam
+ TRUE); // forceRemotableMethod
+ }
+
+ return pMD;
+}
+
+
+//-----------------------------------------------------------------
+// Notifies the RCW of an interface that is known to be supported by the COM object.
+// pItfMT is the type which the object directly supports, originalInst is the instantiation
+// that we asked for. I.e. we know that the object supports pItfMT<originalInst> via
+// variance because the QI for IID(pItfMT) succeeded.
+void RCW::SetSupportedInterface(MethodTable *pItfMT, Instantiation originalInst)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ BOOL fIsEnumerable = (pItfMT->HasSameTypeDefAs(MscorlibBinder::GetExistingClass(CLASS__IENUMERABLEGENERIC)) ||
+ pItfMT->HasSameTypeDefAs(MscorlibBinder::GetClass(CLASS__IITERABLE)));
+
+ if (fIsEnumerable || pItfMT->HasSameTypeDefAs(MscorlibBinder::GetExistingClass(CLASS__IREADONLYLISTGENERIC)) ||
+ pItfMT->HasSameTypeDefAs(MscorlibBinder::GetClass(CLASS__IVECTORVIEW)))
+ {
+ WinRTInterfaceRedirector::WinRTLegalStructureBaseType baseType;
+ if (!originalInst.IsEmpty())
+ {
+ // use the original instantiation if available
+ baseType = WinRTInterfaceRedirector::GetStructureBaseType(originalInst);
+ }
+ else
+ {
+ baseType = WinRTInterfaceRedirector::GetStructureBaseType(pItfMT->GetInstantiation());
+ }
+
+ switch (baseType)
+ {
+ case WinRTInterfaceRedirector::BaseType_Object:
+ {
+ OBJECTHANDLE *pohHandleField = fIsEnumerable ?
+ &GetOrCreateAuxiliaryData()->m_ohObjectVariantCallTarget_IEnumerable :
+ &GetOrCreateAuxiliaryData()->m_ohObjectVariantCallTarget_IReadOnlyList;
+
+ if (*pohHandleField != NULL)
+ {
+ // we've already established the behavior so we can skip the code below
+ break;
+ }
+
+ if (!originalInst.IsEmpty())
+ {
+ MethodTable *pInstArgMT = pItfMT->GetInstantiation()[0].GetMethodTable();
+
+ if (pInstArgMT == g_pStringClass)
+ {
+ // We are casting the RCW to IEnumerable<string> or IReadOnlyList<string> - we special-case this common case
+ // so we don't have to create the delegate.
+ FastInterlockCompareExchangePointer<OBJECTHANDLE>(pohHandleField, VARIANCE_STUB_TARGET_USE_STRING, NULL);
+ }
+ else if (pInstArgMT == g_pExceptionClass ||
+ pInstArgMT == MscorlibBinder::GetClass(CLASS__TYPE) ||
+ pInstArgMT->IsArray() ||
+ pInstArgMT->IsDelegate())
+ {
+ // We are casting the RCW to IEnumerable<T> or IReadOnlyList<T> where T is Type/Exception/an array/a delegate
+ // i.e. an unbounded set of types. We'll create a delegate pointing to the right stub and cache it on the RCW
+ // so we can handle the calls via GetEnumerator/Indexer_Get as fast as possible.
+
+ MethodDesc *pTargetMD = MscorlibBinder::GetMethod(fIsEnumerable ?
+ METHOD__ITERABLE_TO_ENUMERABLE_ADAPTER__GET_ENUMERATOR_STUB :
+ METHOD__IVECTORVIEW_TO_IREADONLYLIST_ADAPTER__INDEXER_GET);
+
+ pTargetMD = MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pTargetMD,
+ pTargetMD->GetMethodTable(),
+ FALSE, // forceBoxedEntryPoint
+ pItfMT->GetInstantiation(), // methodInst
+ FALSE, // allowInstParam
+ TRUE); // forceRemotableMethod
+
+ MethodTable *pMT = MscorlibBinder::GetClass(fIsEnumerable ?
+ CLASS__GET_ENUMERATOR_DELEGATE :
+ CLASS__INDEXER_GET_DELEGATE);
+
+ pMT = TypeHandle(pMT).Instantiate(pItfMT->GetInstantiation()).AsMethodTable();
+
+ GCX_COOP();
+
+ DELEGATEREF pDelObj = NULL;
+ GCPROTECT_BEGIN(pDelObj);
+
+ pDelObj = (DELEGATEREF)AllocateObject(pMT);
+ pDelObj->SetTarget(GetExposedObject());
+ pDelObj->SetMethodPtr(pTargetMD->GetMultiCallableAddrOfCode());
+
+ OBJECTHANDLEHolder oh = GetAppDomain()->CreateHandle(pDelObj);
+ if (FastInterlockCompareExchangePointer<OBJECTHANDLE>(pohHandleField, oh, NULL) == NULL)
+ {
+ oh.SuppressRelease();
+ }
+
+ GCPROTECT_END();
+ }
+ }
+
+ // the default is "use T", i.e. handle the call as normal
+ if (*pohHandleField == NULL)
+ {
+ FastInterlockCompareExchangePointer<OBJECTHANDLE>(pohHandleField, VARIANCE_STUB_TARGET_USE_T, NULL);
+ }
+ break;
+ }
+
+ case WinRTInterfaceRedirector::BaseType_IEnumerable:
+ case WinRTInterfaceRedirector::BaseType_IEnumerableOfChar:
+ {
+ // The only WinRT-legal type that implements IEnumerable<IEnumerable> or IEnumerable<IEnumerable<char>> or
+ // IReadOnlyList<IEnumerable> or IReadOnlyList<IEnumerable<char>> AND is not an IInspectable on the WinRT
+ // side is string. We'll use a couple of flags here since the number of options is small.
+
+ InterfaceVarianceBehavior varianceBehavior = (fIsEnumerable ? IEnumerableSupported : IReadOnlyListSupported);
+
+ if (!originalInst.IsEmpty())
+ {
+ MethodTable *pInstArgMT = pItfMT->GetInstantiation()[0].GetMethodTable();
+ if (pInstArgMT == g_pStringClass)
+ {
+ varianceBehavior = (InterfaceVarianceBehavior)
+ (varianceBehavior | (fIsEnumerable ? IEnumerableSupportedViaStringInstantiation : IReadOnlyListSupportedViaStringInstantiation));
+ }
+
+ RCWAuxiliaryData::RCWAuxFlags newAuxFlags = { 0 };
+
+ if (baseType == WinRTInterfaceRedirector::BaseType_IEnumerable)
+ {
+ newAuxFlags.m_InterfaceVarianceBehavior_OfIEnumerable = varianceBehavior;
+ }
+ else
+ {
+ _ASSERTE(baseType == WinRTInterfaceRedirector::BaseType_IEnumerableOfChar);
+ newAuxFlags.m_InterfaceVarianceBehavior_OfIEnumerableOfChar = varianceBehavior;
+ }
+
+ RCWAuxiliaryData *pAuxData = GetOrCreateAuxiliaryData();
+ FastInterlockOr(&pAuxData->m_AuxFlags.m_dwFlags, newAuxFlags.m_dwFlags);
+ }
+ }
+ }
+ }
+}
+
+// Performs QI for the given interface, optionally instantiating it with the given generic args.
+HRESULT RCW::CallQueryInterface(MethodTable *pMT, Instantiation inst, IID *piid, IUnknown **ppUnk)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ MethodTable *pCastToMT = pMT;
+ MethodTable *pCOMItfMT = NULL;
+ InterfaceRedirectionKind redirection = InterfaceRedirection_None;
+
+ if (!inst.IsEmpty())
+ {
+ pMT = TypeHandle(pMT).Instantiate(inst).GetMethodTable();
+ }
+
+ do
+ {
+ redirection = GetInterfaceForQI(pMT, &pCOMItfMT);
+
+ if (redirection == InterfaceRedirection_UnresolvedIEnumerable)
+ {
+ // We just say no in this case. If we threw an exception, we would make the "as" operator
+ // throwing which would be ECMA violation.
+ return E_NOINTERFACE;
+ }
+
+ // To avoid throwing BadImageFormatException later in ComputeGuidForGenericTypes we must fail early if this is a generic type and not a legal WinRT type.
+ if (pCOMItfMT->SupportsGenericInterop(TypeHandle::Interop_NativeToManaged, MethodTable::modeProjected) && !pCOMItfMT->IsLegalNonArrayWinRTType())
+ {
+ return E_NOINTERFACE;
+ }
+ else
+ {
+ // Retrieve the IID of the interface.
+ pCOMItfMT->GetGuid(piid, TRUE);
+ }
+
+ // QI for the interface.
+ hr = SafeQueryInterfaceRemoteAware(*piid, ppUnk);
+ }
+ while (hr == E_NOINTERFACE && // Terminate the loop if the QI failed for some other reasons (for example, context transition failure)
+ (redirection == InterfaceRedirection_IEnumerable_RetryOnFailure || redirection == InterfaceRedirection_Other_RetryOnFailure));
+
+ if (SUCCEEDED(hr))
+ {
+ if (redirection == InterfaceRedirection_IEnumerable)
+ {
+ // remember the first IEnumerable<T> interface we successfully QI'ed for
+ SetGetEnumeratorMethod(pMT);
+ }
+
+ // remember successful QI's for interesting interfaces passing the original instantiation so we know that variance was involved
+ SetSupportedInterface(pCOMItfMT, pCastToMT->GetInstantiation());
+ }
+
+ return hr;
+}
+
+// Performs QI for interfaces that are castable to pMT using co-/contra-variance.
+HRESULT RCW::CallQueryInterfaceUsingVariance(MethodTable *pMT, IUnknown **ppUnk)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_NOINTERFACE;
+
+ // see if pMT is an interface with variance, if not we return NULL
+ MethodTable *pVariantMT = GetVariantMethodTable(pMT);
+
+ if (pVariantMT != NULL)
+ {
+ MethodTable *pItfMT = NULL;
+ IID variantIid;
+
+ MethodTable *pClassMT;
+
+ {
+ GCX_COOP();
+ pClassMT = GetExposedObject()->GetTrueMethodTable();
+ }
+
+ // Try interfaces that we know about from metadata
+ if (pClassMT != NULL && pClassMT != g_pBaseCOMObject)
+ {
+ MethodTable::InterfaceMapIterator it = pClassMT->IterateInterfaceMap();
+ while (FAILED(hr) && it.Next())
+ {
+ pItfMT = GetVariantMethodTable(it.GetInterface());
+ if (pItfMT != NULL && pItfMT->CanCastByVarianceToInterfaceOrDelegate(pVariantMT, NULL))
+ {
+ hr = CallQueryInterface(pMT, pItfMT->GetInstantiation(), &variantIid, ppUnk);
+ }
+ }
+ }
+
+ // Then try the interface pointer cache
+ CachedInterfaceEntryIterator it = IterateCachedInterfacePointers();
+ while (FAILED(hr) && it.Next())
+ {
+ MethodTable *pCachedItfMT = (MethodTable *)it.GetEntry()->m_pMT;
+ if (pCachedItfMT != NULL)
+ {
+ pItfMT = GetVariantMethodTable(pCachedItfMT);
+ if (pItfMT != NULL && pItfMT->CanCastByVarianceToInterfaceOrDelegate(pVariantMT, NULL))
+ {
+ hr = CallQueryInterface(pMT, pItfMT->GetInstantiation(), &variantIid, ppUnk);
+ }
+
+ // The cached interface may not support variance, but one of its base interfaces can
+ if (FAILED(hr))
+ {
+ MethodTable::InterfaceMapIterator it = pCachedItfMT->IterateInterfaceMap();
+ while (FAILED(hr) && it.Next())
+ {
+ pItfMT = GetVariantMethodTable(it.GetInterface());
+ if (pItfMT != NULL && pItfMT->CanCastByVarianceToInterfaceOrDelegate(pVariantMT, NULL))
+ {
+ hr = CallQueryInterface(pMT, pItfMT->GetInstantiation(), &variantIid, ppUnk);
+ }
+ }
+ }
+ }
+ }
+
+ // If we still haven't succeeded, enumerate the variant interface cache
+ if (FAILED(hr) && m_pAuxiliaryData != NULL && m_pAuxiliaryData->m_prVariantInterfaces != NULL)
+ {
+ // make a copy of the cache under the lock
+ ArrayList rVariantInterfacesCopy;
+ {
+ CrstHolder ch(&m_pAuxiliaryData->m_VarianceCacheCrst);
+
+ ArrayList::Iterator it = m_pAuxiliaryData->m_prVariantInterfaces->Iterate();
+ while (it.Next())
+ {
+ rVariantInterfacesCopy.Append(it.GetElement());
+ }
+ }
+
+ ArrayList::Iterator it = rVariantInterfacesCopy.Iterate();
+ while (FAILED(hr) && it.Next())
+ {
+ pItfMT = (MethodTable *)it.GetElement();
+ if (pItfMT->CanCastByVarianceToInterfaceOrDelegate(pVariantMT, NULL))
+ {
+ hr = CallQueryInterface(pMT, pItfMT->GetInstantiation(), &variantIid, ppUnk);
+ }
+ }
+ }
+ }
+
+ return hr;
+}
+
+//-----------------------------------------------------------------
+// Retrieve correct COM IP for the method table
+// for the current apartment, use the cache and update the cache on miss
+IUnknown* RCW::GetComIPForMethodTableFromCache(MethodTable* pMT)
+{
+ CONTRACT(IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ ULONG cbRef;
+ IUnknown* pUnk = 0;
+ IID iid;
+ HRESULT hr;
+ int i;
+
+ LPVOID pCtxCookie = GetCurrentCtxCookie();
+ _ASSERTE(pCtxCookie != NULL);
+
+ RCW_VTABLEPTR(this);
+
+ // Check whether we can satisfy this request from our cache.
+ if (pCtxCookie == GetWrapperCtxCookie() || IsFreeThreaded())
+ {
+ for (i = 0; i < INTERFACE_ENTRY_CACHE_SIZE; i++)
+ {
+ if (m_aInterfaceEntries[i].m_pMT == (IE_METHODTABLE_PTR)pMT)
+ {
+ _ASSERTE(!m_aInterfaceEntries[i].IsFree());
+
+ pUnk = m_aInterfaceEntries[i].m_pUnknown;
+ _ASSERTE(pUnk != NULL);
+
+ cbRef = SafeAddRef(pUnk);
+ LogInteropAddRef(pUnk, cbRef, "RCW::GetComIPForMethodTableFromCache: Addref because returning pUnk fetched from InterfaceEntry cache");
+ RETURN pUnk;
+ }
+ }
+ }
+
+ if (m_pAuxiliaryData != NULL)
+ {
+ pUnk = m_pAuxiliaryData->FindInterfacePointer(pMT, (IsFreeThreaded() ? NULL : pCtxCookie));
+ if (pUnk != NULL)
+ {
+ cbRef = SafeAddRef(pUnk);
+ LogInteropAddRef(pUnk, cbRef, "RCW::GetComIPForMethodTableFromCache: Addref because returning pUnk fetched from auxiliary interface pointer cache");
+ RETURN pUnk;
+ }
+ }
+
+ // We're going to be making some COM calls, better initialize COM.
+ EnsureComStarted();
+
+ // First, try to QI for the interface that we were asked for
+ hr = CallQueryInterface(pMT, Instantiation(), &iid, &pUnk);
+
+ // If that failed and the interface has variance, we'll try to find another instantiation
+ bool fVarianceUsed = false;
+ if (FAILED(hr))
+ {
+ hr = CallQueryInterfaceUsingVariance(pMT, &pUnk);
+ if (pUnk != NULL)
+ {
+ fVarianceUsed = true;
+ }
+ }
+
+#ifdef MDA_SUPPORTED
+ if (FAILED(hr))
+ {
+ MDA_TRIGGER_ASSISTANT(FailedQI, ReportAdditionalInfo(hr, this, iid, pMT));
+ }
+#endif
+
+ if (pUnk == NULL)
+ RETURN NULL;
+
+ // See if we should cache the result in the fast inline cache. This cache can only store interface pointers
+ // returned from QI's in the same context where we created the RCW.
+ bool fAllowCache = true;
+ bool fAllowOutOfContextCache = true;
+
+ if (!pMT->IsProjectedFromWinRT() && !pMT->IsWinRTRedirectedInterface(TypeHandle::Interop_ManagedToNative) && !pMT->IsWinRTRedirectedDelegate())
+ {
+ AppDomain *pAppDomain = GetAppDomain();
+ if (pAppDomain && pAppDomain->GetDisableInterfaceCache())
+ {
+ // Caching is disabled in this AD
+ fAllowCache = false;
+ }
+ else
+ {
+ // This is not a WinRT interface and we could in theory use the out-of-context auxiliary cache,
+ // at worst we would just do
+ // fAllowOutOfContextCache = !IsURTAggregated()
+ // however such a change has some breaking potential (COM proxies would live much longer) and is
+ // considered to risky for an in-place release.
+
+ fAllowOutOfContextCache = false;
+ }
+ }
+
+ // try to cache the interface pointer in the inline cache
+ bool fInterfaceCached = false;
+ if (fAllowCache)
+ {
+ if (GetWrapperCtxCookie() == pCtxCookie || IsFreeThreaded())
+ {
+ for (i = 0; i < INTERFACE_ENTRY_CACHE_SIZE; i++)
+ {
+ if (m_aInterfaceEntries[i].IsFree() && m_aInterfaceEntries[i].Init(pMT, pUnk))
+ {
+ // If the component is not aggregated then we need to ref-count
+ if (!IsURTAggregated())
+ {
+ // Get an extra addref to hold this reference alive in our cache
+ cbRef = SafeAddRef(pUnk);
+ LogInteropAddRef(pUnk, cbRef, "RCW::GetComIPForMethodTableFromCache: Addref because storing pUnk in InterfaceEntry cache");
+
+ // Notify Jupiter we have done a AddRef
+ // We should do this *after* we made a AddRef because we should never
+ // be in a state where report refs > actual refs
+ RCWWalker::AfterInterfaceAddRef(this);
+ }
+
+ fInterfaceCached = true;
+ break;
+ }
+ }
+ }
+
+ if (!fInterfaceCached && fAllowOutOfContextCache)
+ {
+ // We couldn't insert into the inline cache, either because it didn't fit, or because
+ // we are in a wrong COM context. We'll use the RCWAuxiliaryData structure.
+ GetOrCreateAuxiliaryData()->CacheInterfacePointer(pMT, pUnk, (IsFreeThreaded() ? NULL : pCtxCookie));
+
+ // If the component is not aggregated then we need to ref-count
+ if (!IsURTAggregated())
+ {
+ // Get an extra addref to hold this reference alive in our cache
+ cbRef = SafeAddRef(pUnk);
+ LogInteropAddRef(pUnk, cbRef, "RCW::GetComIPForMethodTableFromCache: Addref because storing pUnk in the auxiliary interface pointer cache");
+
+ // Notify Jupiter we have done a AddRef
+ // We should do this *after* we made a AddRef because we should never
+ // be in a state where report refs > actual refs
+ RCWWalker::AfterInterfaceAddRef(this);
+ }
+
+ fInterfaceCached = true;
+ }
+ }
+
+ // Make sure we cache successful QI's for variant interfaces. This is so we can cast an RCW for
+ // example to IEnumerable<object> if we previously successfully QI'ed for IEnumerable<IFoo>. We
+ // only need to do this if we actually didn't use variance for this QI.
+ if (!fVarianceUsed)
+ {
+ MethodTable *pVariantMT = GetVariantMethodTable(pMT);
+
+ // We can also skip the potentially expensive CacheVariantInterface call if we already inserted
+ // the variant interface into our interface pointer cache.
+ if (pVariantMT != NULL && (!fInterfaceCached || pVariantMT != pMT))
+ {
+ GetOrCreateAuxiliaryData()->CacheVariantInterface(pVariantMT);
+ }
+ }
+
+ RETURN pUnk;
+}
+
+//----------------------------------------------------------
+// Determine if the COM object supports IProvideClassInfo.
+BOOL RCW::SupportsIProvideClassInfo()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ BOOL bSupportsIProvideClassInfo = FALSE;
+ SafeComHolder<IUnknown> pProvClassInfo = NULL;
+
+ // QI for IProvideClassInfo on the COM object.
+ HRESULT hr = SafeQueryInterfaceRemoteAware(IID_IProvideClassInfo, &pProvClassInfo);
+
+ // Check to see if the QI for IProvideClassInfo succeeded.
+ if (SUCCEEDED(hr))
+ {
+ _ASSERTE(pProvClassInfo);
+ bSupportsIProvideClassInfo = TRUE;
+ }
+
+ return bSupportsIProvideClassInfo;
+}
+
+BOOL RCW::AllowEagerSTACleanup()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // We only consider STA threads. MTA threads should have been dealt
+ // with before calling this.
+ _ASSERTE(GetSTAThread() != NULL);
+
+ // If the client has called CoEEShutdownCOM, then we should always try to
+ // clean up RCWs, even if they have previously opted out by calling
+ // DisableComObjectEagerCleanup. There's no way for clients to re-enable
+ // eager cleanup so, if we don't clean up now, they will be leaked. After
+ // shutting down COM, clients would not expect any RCWs to be left over.
+ if( g_fShutDownCOM )
+ {
+ return TRUE;
+ }
+
+ return m_Flags.m_fAllowEagerSTACleanup;
+}
+
+HRESULT RCW::EnterContext(PFNCTXCALLBACK pCallbackFunc, LPVOID pData)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(!IsFreeThreaded());
+ PRECONDITION(GetWrapperCtxEntryNoRef() != NULL);
+ }
+ CONTRACTL_END;
+
+ CtxEntryHolder pCtxEntry = GetWrapperCtxEntry();
+ return pCtxEntry->EnterContext(pCallbackFunc, pData);
+}
+
+//---------------------------------------------------------------------
+// Callback called to release the interfaces in the auxiliary cache.
+HRESULT __stdcall RCW::ReleaseAuxInterfacesCallBack(LPVOID pData)
+{
+ CONTRACT(HRESULT)
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pData));
+ POSTCONDITION(SUCCEEDED(RETVAL));
+ }
+ CONTRACT_END;
+
+ RCW* pWrap = (RCW*)pData;
+
+ LPVOID pCurrentCtxCookie = GetCurrentCtxCookie();
+ _ASSERTE(pCurrentCtxCookie != NULL);
+
+ RCW_VTABLEPTR(pWrap);
+
+ // we don't come here for free-threaded RCWs
+ _ASSERTE(!pWrap->IsFreeThreaded());
+
+ // we don't come here if there are no interfaces in the aux cache
+ _ASSERTE(pWrap->m_pAuxiliaryData != NULL);
+
+ RCWAuxiliaryData::InterfaceEntryIterator it = pWrap->m_pAuxiliaryData->IterateInterfacePointers();
+ while (it.Next())
+ {
+ InterfaceEntry *pEntry = it.GetEntry();
+ if (!pEntry->IsFree())
+ {
+ if (pCurrentCtxCookie == it.GetCtxCookie())
+ {
+ IUnknown *pUnk = it.GetEntry()->m_pUnknown;
+
+ // make sure we never try to clean this up again
+ pEntry->Free();
+ SafeReleasePreemp(pUnk, pWrap);
+ }
+ }
+ }
+
+ RETURN S_OK;
+}
+
+//---------------------------------------------------------------------
+// Callback called to release the IUnkEntry and the Interface entries.
+HRESULT __stdcall RCW::ReleaseAllInterfacesCallBack(LPVOID pData)
+{
+ CONTRACT(HRESULT)
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pData));
+ POSTCONDITION(SUCCEEDED(RETVAL));
+ }
+ CONTRACT_END;
+
+ RCW* pWrap = (RCW*)pData;
+
+ RCW_VTABLEPTR(pWrap);
+
+ LPVOID pCurrentCtxCookie = GetCurrentCtxCookie();
+ if (pCurrentCtxCookie == NULL || pCurrentCtxCookie == pWrap->GetWrapperCtxCookie() || pWrap->IsFreeThreaded())
+ {
+ pWrap->ReleaseAllInterfaces();
+ }
+ else
+ {
+ // Transition into the context to release the interfaces.
+ HRESULT hr = pWrap->EnterContext(ReleaseAllInterfacesCallBack, pWrap);
+ if (FAILED(hr))
+ {
+ // The context is disconnected so we cannot transition into it to clean up.
+ // The only option we have left is to try and release the interfaces from
+ // the current context. This will work for context agile object's since we have
+ // a pointer to them directly. It will however fail for others since we only
+ // have a pointer to a proxy which is no longer attached to the object.
+
+#ifdef MDA_SUPPORTED
+ MDA_TRIGGER_ASSISTANT(DisconnectedContext, ReportViolationCleanup(pWrap->GetWrapperCtxCookie(), pCurrentCtxCookie, hr));
+#endif
+
+ pWrap->ReleaseAllInterfaces();
+ }
+ }
+
+ // Free auxiliary interface entries if this is not an extensible RCW
+ if (!pWrap->IsURTAggregated() && pWrap->m_pAuxiliaryData != NULL)
+ {
+ RCWAuxiliaryData::InterfaceEntryIterator it = pWrap->m_pAuxiliaryData->IterateInterfacePointers();
+ while (it.Next())
+ {
+ InterfaceEntry *pEntry = it.GetEntry();
+ if (!pEntry->IsFree())
+ {
+ IUnknown *pUnk = it.GetEntry()->m_pUnknown;
+
+ if (pCurrentCtxCookie == NULL || pCurrentCtxCookie == it.GetCtxCookie() || pWrap->IsFreeThreaded())
+ {
+ // Notify Jupiter we are about to do a Release() for every cached interface pointer
+ // This needs to be made before call Release because we should never be in a
+ // state that we report more than the actual ref
+ RCWWalker::BeforeInterfaceRelease(pWrap);
+
+ // make sure we never try to clean this up again
+ pEntry->Free();
+ SafeReleasePreemp(pUnk, pWrap);
+ }
+ else
+ {
+ _ASSERTE(!pWrap->IsJupiterObject());
+
+ // Retrieve the addref'ed context entry that the wrapper lives in.
+ CtxEntryHolder pCtxEntry = it.GetCtxEntry();
+
+ // Transition into the context to release the interfaces.
+ HRESULT hr = pCtxEntry->EnterContext(ReleaseAuxInterfacesCallBack, pWrap);
+ if (FAILED(hr))
+ {
+ // The context is disconnected so we cannot transition into it to clean up.
+ // The only option we have left is to try and release the interfaces from
+ // the current context. This will work for context agile object's since we have
+ // a pointer to them directly. It will however fail for others since we only
+ // have a pointer to a proxy which is no longer attached to the object.
+
+#ifdef MDA_SUPPORTED
+ MDA_TRIGGER_ASSISTANT(DisconnectedContext, ReportViolationCleanup(it.GetCtxCookie(), pCurrentCtxCookie, hr));
+#endif
+
+ // make sure we never try to clean this up again
+ pEntry->Free();
+ SafeReleasePreemp(pUnk, pWrap);
+ }
+ }
+ }
+ }
+ }
+
+ RETURN S_OK;
+}
+
+//---------------------------------------------------------------------
+// Helper function called from ReleaseAllInterfacesCallBack do do the
+// actual releases.
+void RCW::ReleaseAllInterfaces()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ RCW_VTABLEPTR(this);
+
+ // Notify Jupiter we are about to do a Release() for IUnknown
+ // This needs to be made before call Release because we should never be in a
+ // state that we report more than the actual ref
+ RCWWalker::BeforeInterfaceRelease(this);
+
+ // Release the pUnk held by IUnkEntry
+ m_UnkEntry.ReleaseInterface(this);
+
+ // If this wrapper is not an Extensible RCW, free all the interface entries that have been allocated.
+ if (!IsURTAggregated())
+ {
+ for (int i = m_Flags.m_iEntryToRelease; i < INTERFACE_ENTRY_CACHE_SIZE; i++)
+ {
+ // Make sure we never try to clean this up again (so if we bail, we'll leak it).
+ m_Flags.m_iEntryToRelease++;
+
+ if (!m_aInterfaceEntries[i].IsFree())
+ {
+ // Notify Jupiter we are about to do a Release() for every cached interface pointer
+ // This needs to be made before call Release because we should never be in a
+ // state that we report more than the actual ref
+ RCWWalker::BeforeInterfaceRelease(this);
+
+ DWORD cbRef = SafeReleasePreemp(m_aInterfaceEntries[i].m_pUnknown, this);
+ LogInteropRelease(m_aInterfaceEntries[i].m_pUnknown, cbRef, "RCW::ReleaseAllInterfaces: Releasing ref from InterfaceEntry table");
+ }
+ }
+ }
+}
+
+//---------------------------------------------------------------------
+// Returns RCWAuxiliaryData associated with this RCW.
+PTR_RCWAuxiliaryData RCW::GetOrCreateAuxiliaryData()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_pAuxiliaryData == NULL)
+ {
+ NewHolder<RCWAuxiliaryData> pData = new RCWAuxiliaryData();
+ if (InterlockedCompareExchangeT(&m_pAuxiliaryData, pData.GetValue(), NULL) == NULL)
+ {
+ pData.SuppressRelease();
+ }
+ }
+ return m_pAuxiliaryData;
+}
+
+//---------------------------------------------------------------------
+// Returns true if the RCW supports given "standard managed" interface.
+bool RCW::SupportsMngStdInterface(MethodTable *pItfMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pItfMT));
+ }
+ CONTRACTL_END;
+
+ //
+ // Handle casts to normal managed standard interfaces.
+ //
+
+ // Check to see if the interface is a managed standard interface.
+ IID *pNativeIID = MngStdInterfaceMap::GetNativeIIDForType(pItfMT);
+ if (pNativeIID != NULL)
+ {
+ // It is a managed standard interface so we need to check to see if the COM component
+ // implements the native interface associated with it.
+ SafeComHolder<IUnknown> pNativeItf = NULL;
+
+ // QI for the native interface.
+ SafeQueryInterfaceRemoteAware(*pNativeIID, &pNativeItf);
+
+ // If the component supports the native interface then we can say it implements the
+ // standard interface.
+ if (pNativeItf)
+ return true;
+ }
+ else
+ {
+ //
+ // Handle casts to IEnumerable.
+ //
+
+ // If the requested interface is IEnumerable then we need to check to see if the
+ // COM object implements IDispatch and has a member with DISPID_NEWENUM.
+ if (pItfMT == MscorlibBinder::GetExistingClass(CLASS__IENUMERABLE))
+ {
+ SafeComHolder<IDispatch> pDisp = NULL;
+#ifdef FEATURE_CORECLR
+ if (!AppX::IsAppXProcess())
+#endif // FEATURE_CORECLR
+ {
+ // Get the IDispatch on the current thread.
+ pDisp = GetIDispatch();
+ }
+ if (pDisp)
+ {
+ DISPPARAMS DispParams = {0, 0, NULL, NULL};
+ VariantHolder VarResult;
+
+ // Initialize the return variant.
+ SafeVariantInit(&VarResult);
+
+ HRESULT hr = E_FAIL;
+ {
+ // We are about to make a call to COM so switch to preemptive GC.
+ GCX_PREEMP();
+
+ // Can not get the IP for pDisp->Invoke, instead using the first IP in vtable.
+ LeaveRuntimeHolder holder (**(size_t**)((IDispatch*)pDisp));
+
+ // Call invoke with DISPID_NEWENUM to see if such a member exists.
+ hr = pDisp->Invoke(
+ DISPID_NEWENUM,
+ IID_NULL,
+ LOCALE_USER_DEFAULT,
+ DISPATCH_METHOD | DISPATCH_PROPERTYGET,
+ &DispParams,
+ &VarResult,
+ NULL,
+ NULL
+ );
+ }
+
+ // If the invoke succeeded then the component has a member DISPID_NEWENUM
+ // so we can expose it as an IEnumerable.
+ if (SUCCEEDED(hr))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+//---------------------------------------------------------------------
+// Determines whether a call through the given interface should use new
+// WinRT interop (as opposed to classic COM).
+TypeHandle::CastResult RCW::SupportsWinRTInteropInterfaceNoGC(MethodTable *pItfMT)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ WinMDAdapter::RedirectedTypeIndex index;
+
+ // @TODO: Make this nicer?
+ RedirectionBehavior redirectionBehavior;
+ if (pItfMT == MscorlibBinder::GetExistingClass(CLASS__IENUMERABLE))
+ redirectionBehavior = (RedirectionBehavior)m_Flags.m_RedirectionBehavior_IEnumerable;
+ else if (pItfMT == MscorlibBinder::GetExistingClass(CLASS__ICOLLECTION))
+ redirectionBehavior = (RedirectionBehavior)m_Flags.m_RedirectionBehavior_ICollection;
+ else if (pItfMT == MscorlibBinder::GetExistingClass(CLASS__ILIST))
+ redirectionBehavior = (RedirectionBehavior)m_Flags.m_RedirectionBehavior_IList;
+ else if (WinRTInterfaceRedirector::ResolveRedirectedInterface(pItfMT, &index) && index == WinMDAdapter::RedirectedTypeIndex_System_Collections_Specialized_INotifyCollectionChanged)
+ redirectionBehavior = (RedirectionBehavior)m_Flags.m_RedirectionBehavior_INotifyCollectionChanged;
+ else if (WinRTInterfaceRedirector::ResolveRedirectedInterface(pItfMT, &index) && index == WinMDAdapter::RedirectedTypeIndex_System_ComponentModel_INotifyPropertyChanged)
+ redirectionBehavior = (RedirectionBehavior)m_Flags.m_RedirectionBehavior_INotifyPropertyChanged;
+ else if (WinRTInterfaceRedirector::ResolveRedirectedInterface(pItfMT, &index) && index == WinMDAdapter::RedirectedTypeIndex_System_Windows_Input_ICommand)
+ redirectionBehavior = (RedirectionBehavior)m_Flags.m_RedirectionBehavior_ICommand;
+ else if (WinRTInterfaceRedirector::ResolveRedirectedInterface(pItfMT, &index) && index == WinMDAdapter::RedirectedTypeIndex_System_IDisposable)
+ redirectionBehavior = (RedirectionBehavior)m_Flags.m_RedirectionBehavior_IDisposable;
+ else
+ {
+ UNREACHABLE_MSG("Unknown redirected interface");
+ }
+
+ if ((redirectionBehavior & RedirectionBehaviorComputed) == 0)
+ {
+ // we don't know yet what the behavior should be
+ return TypeHandle::MaybeCast;
+ }
+
+ return ((redirectionBehavior & RedirectionBehaviorEnabled) == 0 ?
+ TypeHandle::CannotCast :
+ TypeHandle::CanCast);
+}
+
+//---------------------------------------------------------------------
+// This is a GC-triggering variant of code:SupportsWinRTInteropInterfaceNoGC.
+bool RCW::SupportsWinRTInteropInterface(MethodTable *pItfMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ TypeHandle::CastResult result = SupportsWinRTInteropInterfaceNoGC(pItfMT);
+ switch (result)
+ {
+ case TypeHandle::CanCast: return true;
+ case TypeHandle::CannotCast: return false;
+ }
+
+ WinMDAdapter::RedirectedTypeIndex index;
+ bool fLegacySupported;
+
+ // @TODO: Make this nicer?
+ RedirectionBehavior redirectionBehavior;
+ RCWFlags newFlags = { 0 };
+
+ if (pItfMT == MscorlibBinder::GetExistingClass(CLASS__IENUMERABLE))
+ {
+ redirectionBehavior = ComputeRedirectionBehavior(pItfMT, &fLegacySupported);
+ newFlags.m_RedirectionBehavior_IEnumerable = redirectionBehavior;
+ newFlags.m_RedirectionBehavior_IEnumerable_LegacySupported = fLegacySupported;
+ }
+ else if (pItfMT == MscorlibBinder::GetExistingClass(CLASS__ICOLLECTION))
+ {
+ redirectionBehavior = ComputeRedirectionBehavior(pItfMT, &fLegacySupported);
+ newFlags.m_RedirectionBehavior_ICollection = redirectionBehavior;
+ }
+ else if (pItfMT == MscorlibBinder::GetExistingClass(CLASS__ILIST))
+ {
+ redirectionBehavior = ComputeRedirectionBehavior(pItfMT, &fLegacySupported);
+ newFlags.m_RedirectionBehavior_IList = redirectionBehavior;
+ }
+ else if (WinRTInterfaceRedirector::ResolveRedirectedInterface(pItfMT, &index) && index == WinMDAdapter::RedirectedTypeIndex_System_Collections_Specialized_INotifyCollectionChanged)
+ {
+ redirectionBehavior = ComputeRedirectionBehavior(pItfMT, &fLegacySupported);
+ newFlags.m_RedirectionBehavior_INotifyCollectionChanged = redirectionBehavior;
+ }
+ else if (WinRTInterfaceRedirector::ResolveRedirectedInterface(pItfMT, &index) && index == WinMDAdapter::RedirectedTypeIndex_System_ComponentModel_INotifyPropertyChanged)
+ {
+ redirectionBehavior = ComputeRedirectionBehavior(pItfMT, &fLegacySupported);
+ newFlags.m_RedirectionBehavior_INotifyPropertyChanged = redirectionBehavior;
+ }
+ else if (WinRTInterfaceRedirector::ResolveRedirectedInterface(pItfMT, &index) && index == WinMDAdapter::RedirectedTypeIndex_System_Windows_Input_ICommand)
+ {
+ redirectionBehavior = ComputeRedirectionBehavior(pItfMT, &fLegacySupported);
+ newFlags.m_RedirectionBehavior_ICommand = redirectionBehavior;
+ }
+ else if (WinRTInterfaceRedirector::ResolveRedirectedInterface(pItfMT, &index) && index == WinMDAdapter::RedirectedTypeIndex_System_IDisposable)
+ {
+ redirectionBehavior = ComputeRedirectionBehavior(pItfMT, &fLegacySupported);
+ newFlags.m_RedirectionBehavior_IDisposable = redirectionBehavior;
+ }
+ else
+ {
+ UNREACHABLE_MSG("Unknown redirected interface");
+ }
+
+ // Use interlocked operation so we don't race with other threads trying to set some other flags on the RCW.
+ // Note that since we are in cooperative mode, we don't race with RCWCache::DetachWrappersWorker here.
+ FastInterlockOr(&m_Flags.m_dwFlags, newFlags.m_dwFlags);
+
+ _ASSERTE((redirectionBehavior & RedirectionBehaviorComputed) != 0);
+ return ((redirectionBehavior & RedirectionBehaviorEnabled) != 0);
+}
+
+//---------------------------------------------------------------------
+// Computes the result of code:SupportsWinRTInteropInterface.
+RCW::RedirectionBehavior RCW::ComputeRedirectionBehavior(MethodTable *pItfMT, bool *pfLegacySupported)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ *pfLegacySupported = false;
+
+ // @TODO: It may be possible to take advantage of metadata (e.g. non-WinRT ComImport class says it implements ICollection -> use classic COM)
+ // and/or the interface cache but for now we'll just QI.
+
+ IID iid;
+ pItfMT->GetGuid(&iid, TRUE, TRUE);
+
+ SafeComHolder<IUnknown> pUnk;
+ if (SUCCEEDED(SafeQueryInterfaceRemoteAware(iid, &pUnk)))
+ {
+ // if the object supports the legacy COM interface we don't use redirection
+ *pfLegacySupported = true;
+ return RedirectionBehaviorComputed;
+ }
+
+ if (SupportsMngStdInterface(pItfMT))
+ {
+ // if the object supports the corresponding "managed std" interface we don't use redirection
+ *pfLegacySupported = true;
+ return RedirectionBehaviorComputed;
+ }
+
+ COMOBJECTREF oref = GetExposedObject();
+ if (ComObject::SupportsInterface(oref, pItfMT))
+ {
+ // the cast succeeded but we know that the legacy COM interface is not implemented
+ // -> we know for sure that the object supports the WinRT redirected interface
+ return (RedirectionBehavior)(RedirectionBehaviorComputed | RedirectionBehaviorEnabled);
+ }
+
+ // The object does not support anything which means that we are in a failure case and an
+ // exception will be thrown. For back compat we want the exception message to include the
+ // classic COM IID so we'll return the "no redirection" result.
+ return RedirectionBehaviorComputed;
+}
+
+//--------------------------------------------------------------------------------
+// OBJECTREF ComObject::CreateComObjectRef(MethodTable* pMT)
+// returns NULL for out of memory scenarios
+OBJECTREF ComObject::CreateComObjectRef(MethodTable* pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(pMT->IsComObjectType());
+ }
+ CONTRACTL_END;
+
+ if (pMT != g_pBaseCOMObject)
+ {
+ pMT->CheckRestore();
+ pMT->EnsureInstanceActive();
+
+ //
+ // Collectible types do not support com interop
+ //
+ if (pMT->Collectible())
+ {
+ COMPlusThrow(kNotSupportedException, W("NotSupported_CollectibleCOM"));
+ }
+
+ pMT->CheckRunClassInitThrowing();
+ }
+
+ return AllocateObject(pMT, false);
+}
+
+
+//--------------------------------------------------------------------------------
+// SupportsInterface
+BOOL ComObject::SupportsInterface(OBJECTREF oref, MethodTable* pIntfTable)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(oref != NULL);
+ PRECONDITION(CheckPointer(pIntfTable));
+ }
+ CONTRACTL_END
+
+ SafeComHolder<IUnknown> pUnk = NULL;
+ HRESULT hr;
+ BOOL bSupportsItf = FALSE;
+
+ GCPROTECT_BEGIN(oref);
+
+ // Make sure the interface method table has been restored.
+ pIntfTable->CheckRestore();
+
+ // Check to see if the static class definition indicates we implement the interface.
+ MethodTable *pMT = oref->GetMethodTable();
+ if (pMT->CanCastToInterface(pIntfTable))
+ {
+ bSupportsItf = TRUE;
+ }
+ else
+ {
+ RCWHolder pRCW(GetThread());
+ RCWPROTECT_BEGIN(pRCW, oref);
+
+ // This should not be called for interfaces that are in the normal portion of the
+ // interface map for this class. The only interfaces that are in the interface map
+ // but are not in the normal portion are the dynamic interfaces on extensible RCW's.
+ _ASSERTE(!oref->GetMethodTable()->ImplementsInterface(pIntfTable));
+
+
+ //
+ // First QI the object to see if it implements the specified interface.
+ //
+
+ pUnk = pRCW->GetComIPFromRCW(pIntfTable);
+ if (pUnk)
+ {
+ bSupportsItf = true;
+ }
+ else if (pIntfTable->IsComEventItfType())
+ {
+ MethodTable *pSrcItfClass = NULL;
+ MethodTable *pEvProvClass = NULL;
+ GUID SrcItfIID;
+ SafeComHolder<IConnectionPointContainer> pCPC = NULL;
+ SafeComHolder<IConnectionPoint> pCP = NULL;
+
+ // Retrieve the IID of the source interface associated with this
+ // event interface.
+ pIntfTable->GetEventInterfaceInfo(&pSrcItfClass, &pEvProvClass);
+ pSrcItfClass->GetGuid(&SrcItfIID, TRUE);
+
+ // QI for IConnectionPointContainer.
+ hr = pRCW->SafeQueryInterfaceRemoteAware(IID_IConnectionPointContainer, (IUnknown**)&pCPC);
+
+ // If the component implements IConnectionPointContainer, then check
+ // to see if it handles the source interface.
+ if (SUCCEEDED(hr))
+ {
+ GCX_PREEMP(); // make sure we switch to preemptive mode before calling the external COM object
+ LeaveRuntimeHolder lrh(*((*(size_t**)(IConnectionPointContainer*)pCPC)+4));
+ hr = pCPC->FindConnectionPoint(SrcItfIID, &pCP);
+ if (SUCCEEDED(hr))
+ {
+ // The component handles the source interface so we can succeed the QI call.
+ bSupportsItf = true;
+ }
+ }
+ }
+ else if (pRCW->SupportsMngStdInterface(pIntfTable))
+ {
+ bSupportsItf = true;
+ }
+
+ if (bSupportsItf)
+ {
+ // If the object has a dynamic interface map then we have extra work to do.
+ MethodTable *pMT = oref->GetMethodTable();
+ if (pMT->HasDynamicInterfaceMap())
+ {
+ // First, make sure we haven't already added this.
+ if (!pMT->FindDynamicallyAddedInterface(pIntfTable))
+ {
+ // It's not there.
+ if (!pMT->IsWinRTObjectType())
+ {
+ // Check if the object supports all of these interfaces only if this is a classic COM interop
+ // scenario. This is a perf optimization (no need to QI for base interfaces if we don't really
+ // need them just yet) and also has a usability aspect. If this SupportsInterface call failed
+ // because one of the base interfaces is not supported, the exception we'd throw would contain
+ // only the name of the "top level" interface which would confuse the developer.
+ MethodTable::InterfaceMapIterator it = pIntfTable->IterateInterfaceMap();
+ while (it.Next())
+ {
+ bSupportsItf = Object::SupportsInterface(oref, it.GetInterface());
+ if (!bSupportsItf)
+ break;
+ }
+ }
+
+ // If the object supports all these interfaces, attempt to add the interface table
+ // to the cache.
+ if (bSupportsItf)
+ {
+ {
+ // Take the wrapper cache lock before we start playing with the interface map.
+ RCWCache::LockHolder lh(RCWCache::GetRCWCache());
+
+ // Check again with the lock.
+ if (!pMT->FindDynamicallyAddedInterface(pIntfTable))
+ {
+ // Add it to the dynamic interface table.
+ pMT->AddDynamicInterface(pIntfTable);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ RCWPROTECT_END(pRCW);
+ }
+
+ GCPROTECT_END();
+
+ return bSupportsItf;
+
+}
+
+//--------------------------------------------------------------------
+// ThrowInvalidCastException
+void ComObject::ThrowInvalidCastException(OBJECTREF *pObj, MethodTable *pCastToMT)
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(pObj != NULL);
+ PRECONDITION(*pObj != NULL);
+ PRECONDITION(IsProtectedByGCFrame (pObj));
+ POSTCONDITION(!"This function should never return!");
+ }
+ CONTRACT_END;
+
+ SafeComHolder<IUnknown> pItf = NULL;
+ HRESULT hr = S_OK;
+ IID *pNativeIID = NULL;
+ GUID iid;
+
+ // Use an InlineSString with a size of MAX_CLASSNAME_LENGTH + 1 to prevent
+ // TypeHandle::GetName from having to allocate a new block of memory. This
+ // significantly improves the performance of throwing an InvalidCastException.
+ InlineSString<MAX_CLASSNAME_LENGTH + 1> strComObjClassName;
+ InlineSString<MAX_CLASSNAME_LENGTH + 1> strCastToName;
+
+ TypeHandle thClass = (*pObj)->GetTypeHandle();
+ TypeHandle thCastTo = TypeHandle(pCastToMT);
+
+ thClass.GetName(strComObjClassName);
+ thCastTo.GetName(strCastToName);
+
+ if (thCastTo.IsInterface())
+ {
+ RCWHolder pRCW(GetThread());
+ pRCW.Init(*pObj);
+
+ // Retrieve the IID of the interface.
+ MethodTable *pCOMItfMT = NULL;
+ if (pRCW->GetInterfaceForQI(thCastTo.GetMethodTable(), &pCOMItfMT) == RCW::InterfaceRedirection_UnresolvedIEnumerable)
+ {
+ // A special exception message for the case where we are unable to figure out the
+ // redirected interface because we haven't seen a cast to a generic IEnumerable yet.
+ COMPlusThrow(kInvalidCastException, IDS_EE_WINRT_IENUMERABLE_BAD_CAST);
+ }
+
+ if (pCOMItfMT->IsProjectedFromWinRT())
+ {
+ // pCOMItfMT could be a generic WinRT-illegal interface in which case GetGuid would throw a confusing BadImageFormatException
+ // so we swallow the exception and throw the generic InvalidCastException instead
+ if (FAILED(pCOMItfMT->GetGuidNoThrow(&iid, FALSE)))
+ {
+ COMPlusThrow(kInvalidCastException, IDS_EE_CANNOTCAST, strComObjClassName.GetUnicode(), strCastToName.GetUnicode());
+ }
+ }
+ else
+ {
+ // keep calling the throwing GetGuid for non-WinRT interfaces (back compat)
+ pCOMItfMT->GetGuid(&iid, TRUE);
+ }
+
+ // Query for the interface to determine the failure HRESULT.
+ hr = pRCW->SafeQueryInterfaceRemoteAware(iid, (IUnknown**)&pItf);
+
+ // If this function was called, it means the QI call failed in the past. If it
+ // no longer fails now, we still need to throw, so throw a generic invalid cast exception.
+ if (SUCCEEDED(hr) ||
+ // Also throw the generic exception if the QI failed with E_NOINTERFACE and this is
+ // a WinRT scenario - the user is very likely not interested in details like IID and
+ // HRESULT, they just want to get the "managed" experience.
+ (hr == E_NOINTERFACE && (thClass.GetMethodTable()->IsWinRTObjectType() || pCOMItfMT->IsProjectedFromWinRT())))
+ {
+ COMPlusThrow(kInvalidCastException, IDS_EE_CANNOTCAST, strComObjClassName.GetUnicode(), strCastToName.GetUnicode());
+ }
+
+ // Convert the IID to a string.
+ WCHAR strIID[39];
+ StringFromGUID2(iid, strIID, sizeof(strIID) / sizeof(WCHAR));
+
+ // Obtain the textual description of the HRESULT.
+ SString strHRDescription;
+ GetHRMsg(hr, strHRDescription);
+
+ if (thCastTo.IsComEventItfType())
+ {
+ GUID SrcItfIID;
+ MethodTable *pSrcItfClass = NULL;
+ MethodTable *pEvProvClass = NULL;
+
+ // Retrieve the IID of the source interface associated with this event interface.
+ thCastTo.GetMethodTable()->GetEventInterfaceInfo(&pSrcItfClass, &pEvProvClass);
+ pSrcItfClass->GetGuid(&SrcItfIID, TRUE);
+
+ // Convert the source interface IID to a string.
+ WCHAR strSrcItfIID[39];
+ StringFromGUID2(SrcItfIID, strSrcItfIID, sizeof(strSrcItfIID) / sizeof(WCHAR));
+
+ COMPlusThrow(kInvalidCastException, IDS_EE_RCW_INVALIDCAST_EVENTITF, strHRDescription.GetUnicode(), strComObjClassName.GetUnicode(),
+ strCastToName.GetUnicode(), strIID, strSrcItfIID);
+ }
+ else if (thCastTo == TypeHandle(MscorlibBinder::GetClass(CLASS__IENUMERABLE)))
+ {
+ COMPlusThrow(kInvalidCastException, IDS_EE_RCW_INVALIDCAST_IENUMERABLE,
+ strHRDescription.GetUnicode(), strComObjClassName.GetUnicode(), strCastToName.GetUnicode(), strIID);
+ }
+ else if ((pNativeIID = MngStdInterfaceMap::GetNativeIIDForType(thCastTo)) != NULL)
+ {
+ // Convert the source interface IID to a string.
+ WCHAR strNativeItfIID[39];
+ StringFromGUID2(*pNativeIID, strNativeItfIID, sizeof(strNativeItfIID) / sizeof(WCHAR));
+
+ // Query for the interface to determine the failure HRESULT.
+ HRESULT hr2 = pRCW->SafeQueryInterfaceRemoteAware(iid, (IUnknown**)&pItf);
+
+ // If this function was called, it means the QI call failed in the past. If it
+ // no longer fails now, we still need to throw, so throw a generic invalid cast exception.
+ if (SUCCEEDED(hr2))
+ COMPlusThrow(kInvalidCastException, IDS_EE_CANNOTCAST, strComObjClassName.GetUnicode(), strCastToName.GetUnicode());
+
+ // Obtain the textual description of the 2nd HRESULT.
+ SString strHR2Description;
+ GetHRMsg(hr2, strHR2Description);
+
+ COMPlusThrow(kInvalidCastException, IDS_EE_RCW_INVALIDCAST_MNGSTDITF, strHRDescription.GetUnicode(), strComObjClassName.GetUnicode(),
+ strCastToName.GetUnicode(), strIID, strNativeItfIID, strHR2Description.GetUnicode());
+ }
+ else
+ {
+ COMPlusThrow(kInvalidCastException, IDS_EE_RCW_INVALIDCAST_ITF,
+ strHRDescription.GetUnicode(), strComObjClassName.GetUnicode(), strCastToName.GetUnicode(), strIID);
+ }
+ }
+ else
+ {
+ // Validate that this function wasn't erroneously called.
+ _ASSERTE(!thClass.CanCastTo(thCastTo));
+
+ if (thClass.GetMethodTable()->IsWinRTObjectType() || thCastTo.IsProjectedFromWinRT() || thCastTo.GetMethodTable()->IsWinRTObjectType())
+ {
+ // don't mention any "COM components" in the exception if we failed to cast a WinRT object or
+ // to a WinRT object, throw the simple generic InvalidCastException instead
+ COMPlusThrow(kInvalidCastException, IDS_EE_CANNOTCAST, strComObjClassName.GetUnicode(), strCastToName.GetUnicode());
+ }
+
+ if (thCastTo.IsComObjectType())
+ {
+ if (IsComObjectClass(thClass))
+ {
+ // An attempt was made to cast an __ComObject to ComImport metadata defined type.
+ COMPlusThrow(kInvalidCastException, IDS_EE_RCW_INVALIDCAST_COMOBJ_TO_MD,
+ strComObjClassName.GetUnicode(), strCastToName.GetUnicode());
+ }
+ else
+ {
+ // An attempt was made to cast an instance of a ComImport metadata defined type to
+ // a different non ComImport metadata defined type.
+ COMPlusThrow(kInvalidCastException, IDS_EE_RCW_INVALIDCAST_MD_TO_MD,
+ strComObjClassName.GetUnicode(), strCastToName.GetUnicode());
+ }
+ }
+ else
+ {
+ // An attempt was made to cast this RCW to a non ComObjectType class.
+ COMPlusThrow(kInvalidCastException, IDS_EE_RCW_INVALIDCAST_TO_NON_COMOBJTYPE,
+ strComObjClassName.GetUnicode(), strCastToName.GetUnicode());
+ }
+ }
+
+ RETURN;
+}
+
+//--------------------------------------------------------------------------------
+// Release all the data associated with the __ComObject.
+void ComObject::ReleaseAllData(OBJECTREF oref)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(oref != NULL);
+ PRECONDITION(oref->GetMethodTable()->IsComObjectType());
+ }
+ CONTRACTL_END;
+
+ GCPROTECT_BEGIN(oref)
+ {
+ PREPARE_NONVIRTUAL_CALLSITE(METHOD__COM_OBJECT__RELEASE_ALL_DATA);
+
+ DECLARE_ARGHOLDER_ARRAY(ReleaseAllDataArgs, 1);
+ ReleaseAllDataArgs[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(oref);
+
+ CALL_MANAGED_METHOD_NORET(ReleaseAllDataArgs);
+ }
+ GCPROTECT_END();
+}
+
+#ifndef DACCESS_COMPILE
+//--------------------------------------------------------------------------
+// Wrapper around code:RCW.GetComIPFromRCW
+// static
+IUnknown *ComObject::GetComIPFromRCW(OBJECTREF *pObj, MethodTable* pIntfTable)
+{
+ CONTRACT (IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(IsProtectedByGCFrame(pObj));
+ PRECONDITION(CheckPointer(pIntfTable, NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); // NULL if we couldn't find match
+ }
+ CONTRACT_END;
+
+ SafeComHolder<IUnknown> pIUnk;
+
+ RCWHolder pRCW(GetThread());
+ RCWPROTECT_BEGIN(pRCW, *pObj);
+
+ pIUnk = pRCW->GetComIPFromRCW(pIntfTable);
+
+ RCWPROTECT_END(pRCW);
+ RETURN pIUnk.Extract();
+}
+
+//--------------------------------------------------------------------------
+// Wrapper around code:ComObject.GetComIPFromRCW that throws InvalidCastException
+// static
+IUnknown *ComObject::GetComIPFromRCWThrowing(OBJECTREF *pObj, MethodTable* pIntfTable)
+{
+ CONTRACT (IUnknown*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(IsProtectedByGCFrame(pObj));
+ PRECONDITION(CheckPointer(pIntfTable, NULL_OK));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ IUnknown* pIUnk = GetComIPFromRCW(pObj, pIntfTable);
+
+ if (pIUnk == NULL)
+ ThrowInvalidCastException(pObj, pIntfTable);
+
+ RETURN pIUnk;
+}
+
+//
+// Create override information based on interface lookup
+//
+WinRTOverrideInfo::WinRTOverrideInfo(EEClass *pClass)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pClass));
+ }
+ CONTRACTL_END;
+
+ ::ZeroMemory(this, sizeof(WinRTOverrideInfo));
+
+ MethodTable *pMT = pClass->GetMethodTable();
+
+ _ASSERTE(IsTdClass(pClass->GetAttrClass()));
+ //
+ // Iterate through each implemented interface
+ // Note that the interface map is laid from parent to child
+ // So we start from the most derived class, and climb our way up to parent, instead of
+ // inspecting interface map directly
+ //
+ while (pMT != g_pBaseCOMObject)
+ {
+ MethodTable *pParentMT = pMT->GetParentMethodTable();
+ unsigned dwParentInterfaces = 0;
+ if (pParentMT)
+ dwParentInterfaces = pParentMT->GetNumInterfaces();
+
+ DWORD dwFound = 0;
+
+ //
+ // Scanning only current class only if the current class have more interface than parent
+ //
+ if (pMT->GetNumInterfaces() > dwParentInterfaces)
+ {
+ MethodTable::InterfaceMapIterator it = pMT->IterateInterfaceMapFrom(dwParentInterfaces);
+ while (!it.Finished())
+ {
+ MethodTable *pImplementedIntfMT = it.GetInterface();
+
+ // Only check private interfaces as they are exclusive
+ if (IsTdNotPublic(pImplementedIntfMT->GetAttrClass()) && pImplementedIntfMT->IsProjectedFromWinRT())
+ {
+ if (m_pToStringMD == NULL)
+ {
+ m_pToStringMD = MemberLoader::FindMethod(
+ pImplementedIntfMT,
+ "ToString",
+ &gsig_IM_RetStr);
+ if (m_pToStringMD != NULL)
+ dwFound++;
+ }
+
+ if (m_pGetHashCodeMD == NULL)
+ {
+ m_pGetHashCodeMD = MemberLoader::FindMethod(
+ pImplementedIntfMT,
+ "GetHashCode",
+ &gsig_IM_RetInt);
+ if (m_pGetHashCodeMD != NULL)
+ dwFound++;
+ }
+
+ if (m_pEqualsMD == NULL)
+ {
+ m_pEqualsMD = MemberLoader::FindMethod(
+ pImplementedIntfMT,
+ "Equals",
+ &gsig_IM_Obj_RetBool);
+ if (m_pEqualsMD != NULL)
+ dwFound++;
+ }
+
+ if (dwFound == 3)
+ return;
+ }
+
+ it.Next();
+ }
+ }
+
+ //
+ // Parent has no more interfaces (including parents of parent). We are done
+ //
+ if (dwParentInterfaces == 0)
+ break;
+
+ pMT = pParentMT;
+ }
+}
+
+//
+// If WinRTOverrideInfo is not created, create one. Otherwise return existing one
+//
+WinRTOverrideInfo *WinRTOverrideInfo::GetOrCreateWinRTOverrideInfo(MethodTable *pMT)
+{
+ CONTRACT (WinRTOverrideInfo*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ PRECONDITION(CheckPointer(pMT));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ _ASSERTE(pMT != NULL);
+
+ EEClass *pClass = pMT->GetClass();
+
+ //
+ // Retrieve the WinRTOverrideInfo from WinRT class factory
+ // It is kind of sub-optimal but saves a EEClass field
+ //
+ WinRTClassFactory *pClassFactory = GetComClassFactory(pMT)->AsWinRTClassFactory();
+
+ WinRTOverrideInfo *pOverrideInfo = pClassFactory->GetWinRTOverrideInfo();
+ if (pOverrideInfo == NULL)
+ {
+ //
+ // Create the override information
+ //
+ NewHolder<WinRTOverrideInfo> pNewOverrideInfo = new WinRTOverrideInfo(pClass);
+
+ if (pNewOverrideInfo->m_pEqualsMD == NULL &&
+ pNewOverrideInfo->m_pGetHashCodeMD == NULL &&
+ pNewOverrideInfo->m_pToStringMD == NULL)
+ {
+ // Special optimization for where there is no override found
+ pMT->SetSkipWinRTOverride();
+
+ RETURN NULL;
+ }
+ else
+ {
+ if (pClassFactory->SetWinRTOverrideInfo(pNewOverrideInfo))
+ {
+ // We win the race
+ pNewOverrideInfo.SuppressRelease();
+ RETURN pNewOverrideInfo;
+ }
+ else
+ {
+ // Lost the race - retrieve again
+ RETURN pClassFactory->GetWinRTOverrideInfo();
+ }
+ }
+ }
+
+ RETURN pOverrideInfo;
+}
+
+//
+// Redirection for ToString
+//
+NOINLINE static MethodDesc *GetRedirectedToStringMDHelper(Object *pThisUNSAFE, MethodTable *pMT)
+{
+ FC_INNER_PROLOG(ComObject::GetRedirectedToStringMD);
+
+ MethodDesc *pRetMD = NULL;
+
+ // Creates helper frame for GetOrCreateWinRTOverrideInfo (which throws)
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
+
+ WinRTOverrideInfo *pOverrideInfo = WinRTOverrideInfo::GetOrCreateWinRTOverrideInfo(pMT);
+ if (pOverrideInfo && pOverrideInfo->m_pToStringMD != NULL)
+ {
+ pRetMD = pOverrideInfo->m_pToStringMD;
+ }
+
+ HELPER_METHOD_FRAME_END();
+
+ FC_INNER_EPILOG();
+
+ return pRetMD;
+}
+
+FCIMPL1(MethodDesc *, ComObject::GetRedirectedToStringMD, Object *pThisUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ MethodTable *pMT = pThisUNSAFE->GetMethodTable();
+ if (pMT->IsSkipWinRTOverride())
+ return NULL;
+
+ FC_INNER_RETURN(MethodDesc*, ::GetRedirectedToStringMDHelper(pThisUNSAFE, pMT));
+}
+FCIMPLEND
+
+FCIMPL2(StringObject *, ComObject::RedirectToString, Object *pThisUNSAFE, MethodDesc *pToStringMD)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF refThis = ObjectToOBJECTREF(pThisUNSAFE);
+ STRINGREF refString = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_2(refThis, refString);
+
+ // Note that this has to be virtual. Consider this case
+ //
+ // interface INativeA
+ // {
+
+ // string ToString();
+ // }
+ //
+ // class NativeA : INativeA
+ // {
+ // protected override ToString()
+ // {
+ // .override IA.ToString()
+ // }
+ // }
+ //
+ // class Managed : NativeA
+ // {
+ // override ToString();
+ // }
+ //
+ // If we call IA.ToString virtually, we'll land on INativeA.ToString() which is not correct.
+ // Calling it virtually will solve this problem
+ PREPARE_VIRTUAL_CALLSITE_USING_METHODDESC(pToStringMD, refThis);
+
+ DECLARE_ARGHOLDER_ARRAY(ToStringArgs, 1);
+ ToStringArgs[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(refThis);
+
+ CALL_MANAGED_METHOD_RETREF(refString, STRINGREF, ToStringArgs);
+
+ HELPER_METHOD_FRAME_END();
+
+ return STRINGREFToObject(refString);
+}
+FCIMPLEND
+
+//
+// Redirection for GetHashCode
+//
+NOINLINE static MethodDesc *GetRedirectedGetHashCodeMDHelper(Object *pThisUNSAFE, MethodTable *pMT)
+{
+ FC_INNER_PROLOG(ComObject::GetRedirectedGetHashCodeMD);
+
+ MethodDesc *pRetMD = NULL;
+
+ // Creates helper frame for GetOrCreateWinRTOverrideInfo (which throws)
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
+
+ WinRTOverrideInfo *pOverrideInfo = WinRTOverrideInfo::GetOrCreateWinRTOverrideInfo(pMT);
+ if (pOverrideInfo && pOverrideInfo->m_pGetHashCodeMD != NULL)
+ {
+ pRetMD = pOverrideInfo->m_pGetHashCodeMD;
+ }
+
+ HELPER_METHOD_FRAME_END();
+
+ FC_INNER_EPILOG();
+
+ return pRetMD;
+}
+
+FCIMPL1(MethodDesc *, ComObject::GetRedirectedGetHashCodeMD, Object *pThisUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ MethodTable *pMT = pThisUNSAFE->GetMethodTable();
+ if (pMT->IsSkipWinRTOverride())
+ return NULL;
+
+ FC_INNER_RETURN(MethodDesc*, ::GetRedirectedGetHashCodeMDHelper(pThisUNSAFE, pMT));
+}
+FCIMPLEND
+
+FCIMPL2(int, ComObject::RedirectGetHashCode, Object *pThisUNSAFE, MethodDesc *pGetHashCodeMD)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF refThis = ObjectToOBJECTREF(pThisUNSAFE);
+ int hash = 0;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refThis);
+
+ // Note that this has to be virtual. See RedirectToString for more details
+ PREPARE_VIRTUAL_CALLSITE_USING_METHODDESC(pGetHashCodeMD, refThis);
+
+ DECLARE_ARGHOLDER_ARRAY(GetHashCodeArgs, 1);
+ GetHashCodeArgs[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(refThis);
+
+ CALL_MANAGED_METHOD(hash, int, GetHashCodeArgs);
+
+ HELPER_METHOD_FRAME_END();
+
+ return hash;
+}
+FCIMPLEND
+
+NOINLINE static MethodDesc *GetRedirectedEqualsMDHelper(Object *pThisUNSAFE, MethodTable *pMT)
+{
+ FC_INNER_PROLOG(ComObject::GetRedirectedEqualsMD);
+
+ MethodDesc *pRetMD = NULL;
+
+ // Creates helper frame for GetOrCreateWinRTOverrideInfo (which throws)
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
+
+ WinRTOverrideInfo *pOverrideInfo = WinRTOverrideInfo::GetOrCreateWinRTOverrideInfo(pMT);
+ if (pOverrideInfo && pOverrideInfo->m_pEqualsMD!= NULL)
+ {
+ pRetMD = pOverrideInfo->m_pEqualsMD;
+ }
+
+ HELPER_METHOD_FRAME_END();
+
+ FC_INNER_EPILOG();
+
+ return pRetMD;
+}
+
+FCIMPL1(MethodDesc *, ComObject::GetRedirectedEqualsMD, Object *pThisUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ MethodTable *pMT = pThisUNSAFE->GetMethodTable();
+ if (pMT->IsSkipWinRTOverride())
+ return NULL;
+
+ FC_INNER_RETURN(MethodDesc*, ::GetRedirectedEqualsMDHelper(pThisUNSAFE, pMT));
+}
+FCIMPLEND
+
+FCIMPL3(FC_BOOL_RET, ComObject::RedirectEquals, Object *pThisUNSAFE, Object *pOtherUNSAFE, MethodDesc *pEqualsMD)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF refThis = ObjectToOBJECTREF(pThisUNSAFE);
+ OBJECTREF refOther = ObjectToOBJECTREF(pOtherUNSAFE);
+
+ CLR_BOOL ret = FALSE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_2(refThis, refOther);
+
+ // Note that this has to be virtual. See RedirectToString for more details
+ PREPARE_VIRTUAL_CALLSITE_USING_METHODDESC(pEqualsMD, refThis);
+
+ DECLARE_ARGHOLDER_ARRAY(EqualArgs, 2);
+ EqualArgs[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(refThis);
+ EqualArgs[ARGNUM_1] = OBJECTREF_TO_ARGHOLDER(refOther);
+
+ CALL_MANAGED_METHOD(ret, CLR_BOOL, EqualArgs);
+
+ HELPER_METHOD_FRAME_END();
+
+ FC_RETURN_BOOL(ret);
+}
+FCIMPLEND
+
+#endif // #ifndef DACCESS_COMPILE
+
+#endif //#ifndef CROSSGEN_COMPILE
+
diff --git a/src/vm/runtimecallablewrapper.h b/src/vm/runtimecallablewrapper.h
new file mode 100644
index 0000000000..d13e43a64a
--- /dev/null
+++ b/src/vm/runtimecallablewrapper.h
@@ -0,0 +1,2234 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: RuntimeCallableWrapper.h
+**
+**
+** Purpose: Contains types and method signatures for the RCW class
+**
+**
+
+===========================================================*/
+//---------------------------------------------------------------------------------
+// Runtime Callable WRAPPERS on COM objects
+// Purpose: wrap com objects to behave as CLR objects
+// Reqmts: Wrapper has to have the same layout as the CLR objects
+//
+// Data members of wrapper, are basically COM2 Interface pointers on the COM2 object
+// Interfaces that belong to the same object are stored in the same wrapper, IUnknown
+// pointer determines the identity of the object.
+// As new COM2 interfaces are seen on the same object, they need to be added to the
+// wrapper, wrapper is allocated as a fixed size object with overflow chain.
+//
+// struct IPMap
+// {
+// MethodTable *pMT; // identifies the managed interface class
+// IUnknown* m_ip; // COM IP
+// }
+//
+// Issues : Performance/Identity trade-offs, create new wrappers or find and reuse wrappers
+// we use a hash table to track the wrappers and reuse them, maintains identity
+// RCWCache class maintains the lookup table and handles the clean up
+// Cast operations: requires a QI, unless a QI for that interface was done previously
+//
+// Threading : apartment model COM objects have thread affinity
+// choices: COM+ can guarantee thread affinity by making sure
+// the calls are always made on the right thread
+// Advantanges: avoid an extra marshalling
+// Dis.Advt. : need to make sure legacy apartment semantics are preserved
+// this includes any weird behaviour currently built into DCOM.
+//
+// RCWs: Interface map (IMap) won't have any entries, the method table of RCWs
+// have a special flag to indicate that these managed objects
+// require special treatment for interface cast, call interface operations.
+//
+// Stubs : need to find the COM2 interface ptr, and the slot within the interface to
+// re-direct the call
+// Marshaling params and results (common case should be fast)
+//
+//-----------------------------------------------------------------------------------
+
+
+#ifndef _RUNTIMECALLABLEWRAPPER_H
+#define _RUNTIMECALLABLEWRAPPER_H
+
+#ifndef FEATURE_COMINTEROP
+#error FEATURE_COMINTEROP is required for this file
+#endif // FEATURE_COMINTEROP
+
+#include "utilcode.h"
+#include "vars.hpp"
+#include "objecthandle.h"
+#include "spinlock.h"
+#include "interoputil.h"
+#include "mngstdinterfaces.h"
+#include "excep.h"
+#include "comcache.h"
+#include "threads.h"
+#include "mdaassistants.h"
+#include "comcache.h"
+#include "jupiterobject.h"
+
+class Object;
+class ComCallWrapper;
+class Thread;
+
+#define GC_PRESSURE_PROCESS_LOCAL 3456
+#define GC_PRESSURE_MACHINE_LOCAL 4004
+#define GC_PRESSURE_REMOTE 4824
+
+#ifdef _WIN64
+#define GC_PRESSURE_WINRT_BASE 1000
+#define GC_PRESSURE_WINRT_LOW 12000
+#define GC_PRESSURE_WINRT_MEDIUM 120000
+#define GC_PRESSURE_WINRT_HIGH 1200000
+#else // _WIN64
+#define GC_PRESSURE_WINRT_BASE 750
+#define GC_PRESSURE_WINRT_LOW 8000
+#define GC_PRESSURE_WINRT_MEDIUM 80000
+#define GC_PRESSURE_WINRT_HIGH 800000
+#endif // _WIN64
+
+extern bool g_fShutDownCOM;
+
+enum {INTERFACE_ENTRY_CACHE_SIZE = 8};
+
+struct RCWAuxiliaryData;
+typedef DPTR(RCWAuxiliaryData) PTR_RCWAuxiliaryData;
+
+#define VARIANCE_STUB_TARGET_USE_STRING ((OBJECTHANDLE)(INT_PTR)0x1)
+#define VARIANCE_STUB_TARGET_USE_T ((OBJECTHANDLE)(INT_PTR)0x2)
+#define VARIANCE_STUB_TARGET_IS_HANDLE(handle) (((INT_PTR)(handle) & ~0x3) != 0)
+
+// Additional RCW data used for generic interop and auxiliary interface pointer cache.
+// This structure is lazily allocated and associated with the RCW via the m_pAuxiliaryData
+// field. It's needed only if the RCW supports IEnumerable<T> or another interface with
+// variance, or if a QI result could not be saved in the inline interface pointer cache
+// (code:RCW.m_aInterfaceEntries).
+struct RCWAuxiliaryData
+{
+ RCWAuxiliaryData()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ m_pGetEnumeratorMethod = NULL;
+ m_prVariantInterfaces = NULL;
+ m_VarianceCacheCrst.Init(CrstLeafLock);
+ m_pInterfaceCache = NULL;
+ m_ohObjectVariantCallTarget_IEnumerable = NULL;
+ m_ohObjectVariantCallTarget_IReadOnlyList = NULL;
+ m_AuxFlags.m_dwFlags = 0;
+ }
+
+ ~RCWAuxiliaryData();
+
+ struct InterfaceEntryEx;
+ typedef DPTR(InterfaceEntryEx) PTR_InterfaceEntryEx;
+
+ // Augments code:InterfaceEntry with a next pointer and context entry field.
+ struct InterfaceEntryEx
+ {
+ PTR_InterfaceEntryEx m_pNext;
+
+ InterfaceEntry m_BaseEntry;
+ PTR_CtxEntry m_pCtxEntry;
+
+ ~InterfaceEntryEx()
+ {
+ WRAPPER_NO_CONTRACT;
+ if (m_pCtxEntry != NULL)
+ {
+ m_pCtxEntry->Release();
+ }
+ }
+ };
+
+ // Iterator for cached interface entries.
+ class InterfaceEntryIterator
+ {
+ PTR_InterfaceEntryEx m_pCurrent;
+ bool m_fFirst;
+
+ public:
+ inline InterfaceEntryIterator(PTR_RCWAuxiliaryData pAuxiliaryData)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pCurrent = (pAuxiliaryData == NULL ? NULL : pAuxiliaryData->m_pInterfaceCache);
+ m_fFirst = true;
+ }
+
+ // Move to the next item returning TRUE if an item exists or FALSE if we've run off the end
+ inline bool Next()
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (m_fFirst)
+ {
+ m_fFirst = false;
+ }
+ else
+ {
+ m_pCurrent = m_pCurrent->m_pNext;
+ }
+ return (m_pCurrent != NULL);
+ }
+
+ inline InterfaceEntry *GetEntry()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return &m_pCurrent->m_BaseEntry;
+ }
+
+ inline LPVOID GetCtxCookie()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_pCurrent->m_pCtxEntry == NULL ? NULL : m_pCurrent->m_pCtxEntry->GetCtxCookie());
+ }
+
+ inline CtxEntry *GetCtxEntry()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_pCurrent->m_pCtxEntry->AddRef();
+ return m_pCurrent->m_pCtxEntry;
+ }
+
+ inline CtxEntry *GetCtxEntryNoAddRef()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pCurrent->m_pCtxEntry;
+ }
+
+ inline void ResetCtxEntry()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pCurrent->m_pCtxEntry = NULL;
+ }
+
+#ifndef DACCESS_COMPILE
+ inline void SetCtxCookie(LPVOID pCtxCookie)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ CtxEntry *pCtxEntry = NULL;
+ if (pCtxCookie != NULL)
+ {
+ pCtxEntry = CtxEntryCache::GetCtxEntryCache()->FindCtxEntry(pCtxCookie, GetThread());
+ }
+ m_pCurrent->m_pCtxEntry = pCtxEntry;
+ }
+#endif // !DACCESS_COMPILE
+ };
+
+ void CacheVariantInterface(MethodTable *pMT);
+
+ void CacheInterfacePointer(MethodTable *pMT, IUnknown *pUnk, LPVOID pCtxCookie);
+ IUnknown *FindInterfacePointer(MethodTable *pMT, LPVOID pCtxCookie);
+
+ inline InterfaceEntryIterator IterateInterfacePointers()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return InterfaceEntryIterator(dac_cast<PTR_RCWAuxiliaryData>(this));
+ }
+
+ // GetEnumerator method of the first IEnumerable<T> interface we successfully QI'ed for
+ PTR_MethodDesc m_pGetEnumeratorMethod;
+
+ // Interfaces with variance that we successfully QI'ed for
+ ArrayList *m_prVariantInterfaces;
+
+ // Lock to protect concurrent access to m_prVariantInterfaces
+ CrstExplicitInit m_VarianceCacheCrst;
+
+ // Linked list of cached interface pointers
+ PTR_InterfaceEntryEx m_pInterfaceCache;
+
+ // Cached object handles wrapping delegate objects that point to the right GetEnumerator/Indexer_Get
+ // stubs that should be used when calling these methods via IEnumerable<object>/IReadOnlyList<object>.
+ // Can also contain the special VARIANCE_STUB_TARGET_USE_STRING and VARIANCE_STUB_TARGET_USE_T values.
+ OBJECTHANDLE m_ohObjectVariantCallTarget_IEnumerable; // GetEnumerator
+ OBJECTHANDLE m_ohObjectVariantCallTarget_IReadOnlyList; // Indexer_Get
+
+ // Rarely used RCW flags (keep the commonly used ones in code:RCW::RCWFlags)
+ union RCWAuxFlags
+ {
+ DWORD m_dwFlags;
+
+ struct
+ {
+ // InterfaceVarianceBehavior for rarely used instantiations that could be supported via string:
+ DWORD m_InterfaceVarianceBehavior_OfIEnumerable:4;
+ DWORD m_InterfaceVarianceBehavior_OfIEnumerableOfChar:4;
+ };
+ }
+ m_AuxFlags;
+};
+
+typedef DPTR(RCW) PTR_RCW;
+
+//----------------------------------------------------------------------------
+// RCW, internal class
+// caches the IPs for a single com object, this wrapper is
+// not in the GC heap, this allows us to grab a pointer to this block
+// and play with-it without worrying about GC
+struct RCW
+{
+ enum CreationFlags
+ {
+ CF_None = 0x00,
+ CF_SupportsIInspectable = 0x01, // the underlying object supports IInspectable
+ CF_QueryForIdentity = 0x02, // Need to QI for the real identity IUnknown during creating RCW
+ CF_IsWeakReference = 0x04, // mark the RCW as "weak"
+ CF_NeedUniqueObject = 0x08, // always create a new RCW/object even if we have one cached already
+ CF_DontResolveClass = 0x10, // don't attempt to create a strongly typed RCW
+ CF_DetectDCOMProxy = 0x20, // attempt to determine if the RCW is for a DCOM proxy
+ };
+
+ static CreationFlags CreationFlagsFromObjForComIPFlags(ObjFromComIP::flags flags);
+
+ // List of RCW instances that have been freed since the last RCW cleanup.
+ static SLIST_HEADER s_RCWStandbyList;
+
+ // Simple read-only iterator for all cached interface pointers.
+ class CachedInterfaceEntryIterator
+ {
+ PTR_RCW m_pRCW;
+ int m_InlineCacheIndex;
+ RCWAuxiliaryData::InterfaceEntryIterator m_AuxIterator;
+
+ public:
+ inline CachedInterfaceEntryIterator(PTR_RCW pRCW)
+ : m_AuxIterator(pRCW->m_pAuxiliaryData)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pRCW = pRCW;
+ m_InlineCacheIndex = -1;
+ }
+
+ // Move to the next item returning TRUE if an item exists or FALSE if we've run off the end
+ inline bool Next()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_InlineCacheIndex < INTERFACE_ENTRY_CACHE_SIZE)
+ {
+ // stop incrementing m_InlineCacheIndex once we reach INTERFACE_ENTRY_CACHE_SIZE
+ if (++m_InlineCacheIndex < INTERFACE_ENTRY_CACHE_SIZE)
+ {
+ return TRUE;
+ }
+ }
+ return m_AuxIterator.Next();
+ }
+
+ inline InterfaceEntry *GetEntry()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE_MSG(m_InlineCacheIndex >= 0, "Iterator starts before the first element, you need to call Next");
+ if (m_InlineCacheIndex < INTERFACE_ENTRY_CACHE_SIZE)
+ {
+ return &m_pRCW->m_aInterfaceEntries[m_InlineCacheIndex];
+ }
+ return m_AuxIterator.GetEntry();
+ }
+
+ inline LPVOID GetCtxCookie()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE_MSG(m_InlineCacheIndex >= 0, "Iterator starts before the first element, you need to call Next");
+ if (m_InlineCacheIndex < INTERFACE_ENTRY_CACHE_SIZE)
+ {
+ return m_pRCW->GetWrapperCtxCookie();
+ }
+ return m_AuxIterator.GetCtxCookie();
+ }
+ };
+
+ // constructor
+ RCW()
+ {
+ WRAPPER_NO_CONTRACT;
+ ZeroMemory(this, sizeof(*this));
+ }
+
+ // Deletes all items in code:s_RCWStandbyList.
+ static void FlushStandbyList();
+
+ // Create a new wrapper for given IUnk, IDispatch
+ static RCW* CreateRCW(IUnknown *pUnk, DWORD dwSyncBlockIndex, DWORD flags, MethodTable *pClassMT);
+
+ //-------------------------------------------------
+ // initialize IUnknown and Identity, and associate with the managed object.
+ void Initialize(IUnknown* pUnk, DWORD dwSyncBlockIndex, MethodTable *pClassMT);
+
+ enum MarshalingType
+ {
+ MarshalingType_Unknown = 0, /* The MarshalingType has not been set*/
+ MarshalingType_Inhibit = 1, /* This value is same as the MarshalingType.Inhibit*/
+ MarshalingType_FreeThreaded = 2, /* This value is same as the MarshalingType.FreeThreaded*/
+ MarshalingType_Standard = 3 /* This value is same as the MarshalingType.Standard*/
+ };
+
+ //-------------------------------------------------
+ // Get the MarshalingType of the associated managed object.
+ MarshalingType GetMarshalingType(IUnknown* pUnk, MethodTable *pClassMT);
+
+
+ //-----------------------------------------------
+ // Free GC handle and remove SyncBlock entry
+ void DecoupleFromObject();
+
+ //---------------------------------------------------
+ // Cleanup free all interface pointers
+ void Cleanup();
+
+ //-----------------------------------------------------
+ // called during GC to do minor cleanup and schedule the ips to be
+ // released
+ void MinorCleanup();
+
+ //-----------------------------------------------------
+ // The amount of GC pressure we apply has one of a few possible values.
+ // We save space in the RCW structure by tracking this instead of the
+ // actual value.
+ enum GCPressureSize
+ {
+ GCPressureSize_None = 0,
+ GCPressureSize_ProcessLocal = 1,
+ GCPressureSize_MachineLocal = 2,
+ GCPressureSize_Remote = 3,
+ GCPressureSize_WinRT_Base = 4,
+ GCPressureSize_WinRT_Low = 5,
+ GCPressureSize_WinRT_Medium = 6,
+ GCPressureSize_WinRT_High = 7,
+ GCPressureSize_COUNT = 8
+ };
+
+ //---------------------------------------------------
+ // Add memory pressure to the GC representing the native cost
+ void AddMemoryPressure(GCPressureSize pressureSize);
+
+ //---------------------------------------------------
+ // Remove memory pressure from the GC representing the native cost
+ void RemoveMemoryPressure();
+
+ //-----------------------------------------------------
+ // AddRef
+ LONG AddRef(RCWCache* pCache);
+
+ //-----------------------------------------------------
+ // Release
+ static INT32 ExternalRelease(OBJECTREF* objPROTECTED);
+ static void FinalExternalRelease(OBJECTREF* objPROTECTED);
+
+ // Create a new wrapper for a different method table that represents the same
+ // COM object as the original wrapper.
+ void CreateDuplicateWrapper(MethodTable *pNewMT, RCWHolder* pNewRCW);
+
+ AppDomain* GetDomain();
+
+#ifndef DACCESS_COMPILE
+
+ //-------------------------------------------------
+ // return exposed ComObject
+ COMOBJECTREF GetExposedObject()
+ {
+ CONTRACT(COMOBJECTREF)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(m_SyncBlockIndex != 0);
+ POSTCONDITION(RETVAL != NULL);
+ }
+ CONTRACT_END;
+
+ RETURN (COMOBJECTREF) ObjectToOBJECTREF(g_pSyncTable[m_SyncBlockIndex].m_Object);
+ }
+
+ //-------------------------------------------------
+ // returns the sync block for the RCW
+ SyncBlock *GetSyncBlock()
+ {
+ CONTRACT(SyncBlock*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(m_SyncBlockIndex != 0);
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN g_pSyncTable[m_SyncBlockIndex].m_SyncBlock;
+ }
+
+ //--------------------------------------------------------------------------
+ // out of line call, takes a lock, does a QI if the interface was not found in local cache
+ IUnknown* GetComIPFromRCW(MethodTable* pMT);
+
+ //-----------------------------------------------------------------
+ // out of line call
+ IUnknown* GetComIPFromRCW(REFIID iid);
+
+#endif // #ifndef DACCESS_COMPILE
+
+ enum InterfaceRedirectionKind
+ {
+ InterfaceRedirection_None,
+ InterfaceRedirection_IEnumerable, // IEnumerable`1 - based interface
+ InterfaceRedirection_IEnumerable_RetryOnFailure, // IEnumerable`1 - based interface, retry on QI failure
+ InterfaceRedirection_UnresolvedIEnumerable, // unknown IEnumerable`1 instantiation
+ InterfaceRedirection_Other, // other interface
+ InterfaceRedirection_Other_RetryOnFailure, // non-generic redirected interface
+ };
+
+ // Returns a redirected collection interface corresponding to a given ICollection<T>, IReadOnlyCollection<T>, or NULL.
+ static MethodTable *ResolveICollectionInterface(MethodTable *pItfMT, BOOL fPreferIDictionary, BOOL *pfChosenIDictionary);
+
+ // Returns an interface with variance corresponding to pMT or NULL if pMT does not support variance.
+ static MethodTable *GetVariantMethodTable(MethodTable *pMT);
+ static MethodTable *ComputeVariantMethodTable(MethodTable *pMT);
+
+ // Determines the interface that should be QI'ed for when the RCW is cast to pItfMT.
+ // Returns the kind of interface redirection that has been performed.
+ InterfaceRedirectionKind GetInterfaceForQI(MethodTable *pItfMT, MethodTable **pNewItfMT);
+ static InterfaceRedirectionKind GetInterfacesForQI(MethodTable *pItfMT, MethodTable **ppNewItfMT1, MethodTable **ppNewItfMT2);
+ static InterfaceRedirectionKind ComputeInterfacesForQI(MethodTable *pItfMT, MethodTable **ppNewItfMT1, MethodTable **ppNewItfMT2);
+
+ // Performs QI for the given interface, optionally instantiating it with the given generic args.
+ HRESULT CallQueryInterface(MethodTable *pMT, Instantiation inst, IID *piid, IUnknown **ppUnk);
+
+ // Performs QI for interfaces that are castable to pMT using co-/contra-variance.
+ HRESULT CallQueryInterfaceUsingVariance(MethodTable *pMT, IUnknown **ppUnk);
+
+ // Returns the GetEnumerator method of the first IEnumerable<T> this RCW was successfully
+ // cast to, or NULL if no such cast has ever succeeded.
+ MethodDesc *GetGetEnumeratorMethod();
+
+ // Sets the first "known" GetEnumerator method if not set already.
+ void SetGetEnumeratorMethod(MethodTable *pMT);
+
+ // Retrieve cached GetEnumerator method or compute the right one for a specific type
+ static MethodDesc *GetOrComputeGetEnumeratorMethodForType(MethodTable *pMT);
+
+ // Compute the first GetEnumerator for a specific type
+ static MethodDesc *ComputeGetEnumeratorMethodForType(MethodTable *pMT);
+
+ // Get the GetEnumerator method for IEnumerable<T> or IIterable<T>
+ static MethodDesc *ComputeGetEnumeratorMethodForTypeInternal(MethodTable *pMT);
+
+ // Notifies the RCW of an interface that is known to be supported by the COM object.
+ void SetSupportedInterface(MethodTable *pItfMT, Instantiation originalInst);
+
+ //-----------------------------------------------------------------
+ // Retrieve correct COM IP for the current apartment.
+ // use the cache /update the cache
+ IUnknown* GetComIPForMethodTableFromCache(MethodTable * pMT);
+
+ // helpers to get to IUnknown, IDispatch, and IInspectable interfaces
+ // Returns an addref'd pointer - caller must Release
+ IUnknown* GetWellKnownInterface(REFIID riid);
+
+ IUnknown* GetIUnknown();
+ IUnknown* GetIUnknown_NoAddRef();
+ IDispatch* GetIDispatch();
+ IInspectable* GetIInspectable();
+
+ ULONG GetRefCount()
+ {
+ return m_cbRefCount;
+ }
+
+ IJupiterObject *GetJupiterObjectNoCheck()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(IsJupiterObject());
+
+ // We saved IJupiterObject * on the first slot
+ _ASSERTE((IUnknown *)m_aInterfaceEntries[0].m_pUnknown != NULL);
+ _ASSERTE((MethodTable *)m_aInterfaceEntries[0].m_pMT == NULL);
+
+ return (IJupiterObject *)m_aInterfaceEntries[0].m_pUnknown.Load();
+ }
+
+ IJupiterObject *GetJupiterObject()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (IsJupiterObject())
+ {
+ return GetJupiterObjectNoCheck();
+ }
+
+ return NULL;
+ }
+
+ void GetCachedInterfaceTypes(BOOL bIInspectableOnly,
+ SArray<PTR_MethodTable> * rgItfTables)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ CachedInterfaceEntryIterator it = IterateCachedInterfacePointers();
+ while (it.Next())
+ {
+ PTR_MethodTable pMT = dac_cast<PTR_MethodTable>((TADDR)(it.GetEntry()->m_pMT.Load()));
+ if (pMT != NULL &&
+ (!bIInspectableOnly || pMT->IsProjectedFromWinRT() || pMT->SupportsGenericInterop(TypeHandle::Interop_NativeToManaged)))
+ {
+ // Don't return mscorlib-internal declarations of WinRT types.
+ if (!(pMT->GetModule()->IsSystem() && pMT->IsProjectedFromWinRT()))
+ {
+ rgItfTables->Append(pMT);
+ }
+ }
+ }
+ }
+
+ void GetCachedInterfacePointers(BOOL bIInspectableOnly,
+ SArray<TADDR> * rgItfPtrs)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ CachedInterfaceEntryIterator it = IterateCachedInterfacePointers();
+ while (it.Next())
+ {
+ PTR_MethodTable pMT = dac_cast<PTR_MethodTable>((TADDR)(it.GetEntry()->m_pMT.Load()));
+ if (pMT != NULL &&
+ (!bIInspectableOnly || pMT->IsProjectedFromWinRT() || pMT->SupportsGenericInterop(TypeHandle::Interop_NativeToManaged)))
+ {
+ TADDR taUnk = (TADDR)(it.GetEntry()->m_pUnknown.Load());
+ if (taUnk != NULL)
+ {
+ rgItfPtrs->Append(taUnk);
+ }
+ }
+ }
+ }
+
+ // Save IJupiterObject * on the first slot
+ // Only call this in Initialize code
+ void SetJupiterObject(IJupiterObject *pJupiterObject)
+ {
+
+ LIMITED_METHOD_CONTRACT;
+
+ m_Flags.m_fIsJupiterObject = 1;
+
+ //
+ // Save pJupiterObject* on the first SLOT
+ // Only AddRef if not aggregated
+ //
+ _ASSERTE(m_aInterfaceEntries[0].IsFree());
+
+ m_aInterfaceEntries[0].Init(NULL, pJupiterObject);
+ }
+
+ LPVOID GetVTablePtr() { LIMITED_METHOD_CONTRACT; return m_vtablePtr; }
+
+ // Remoting aware QI that will attempt to re-unmarshal on object disconnect.
+ HRESULT SafeQueryInterfaceRemoteAware(REFIID iid, IUnknown** pResUnk);
+
+ BOOL IsValid()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_SyncBlockIndex != 0;
+ }
+
+ BOOL SupportsIProvideClassInfo();
+
+ VOID MarkURTAggregated();
+
+ VOID MarkURTContained()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(m_Flags.m_fURTAggregated == 0);
+ }
+ CONTRACTL_END;
+
+ m_Flags.m_fURTContained = 1;
+ }
+
+
+ BOOL IsURTAggregated()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_Flags.m_fURTAggregated == 1;
+ }
+
+ BOOL IsURTContained()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_Flags.m_fURTContained == 1;
+ }
+
+ BOOL SupportsIInspectable()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_Flags.m_fSupportsIInspectable == 1;
+ }
+
+ //
+ // This COM object aggregates FTM?
+ //
+ bool IsFreeThreaded()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (m_Flags.m_MarshalingType == MarshalingType_FreeThreaded) ;
+ }
+
+ //
+ // Is this COM object a DCOM Proxy? (For WinRT the RCW must have been created with CF_DetectDCOMProxy)
+ //
+ bool IsDCOMProxy()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_Flags.m_fIsDCOMProxy == 1;
+ }
+
+ //
+ // This COM object implements INoMarshal?
+ //
+ bool IsMarshalingInhibited()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (m_Flags.m_MarshalingType == MarshalingType_Inhibit) ;
+ }
+
+ BOOL IsJupiterObject()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_Flags.m_fIsJupiterObject == 1;
+ }
+
+ // Returns TRUE if this RCW has been detached. Detached RCWs are fully functional but have been found
+ // dead during GC, before finalizable/f-reachable objects were promoted. If we ever find such an RCW
+ // in the RCW cache during marshaling (i.e. an interface pointer with the same identity enters managed
+ // code), we re-insert it as "unique", and create a new RCW. This is to prevent unexpected resurrection
+ // of objects that may already be finalized.
+ BOOL IsDetached()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_Flags.m_Detached == 1;
+ }
+
+ BOOL MatchesCleanupBucket(RCW *pOtherRCW)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (IsFreeThreaded() == pOtherRCW->IsFreeThreaded() &&
+ m_Flags.m_fAllowEagerSTACleanup == pOtherRCW->m_Flags.m_fAllowEagerSTACleanup &&
+ GetSTAThread() == pOtherRCW->GetSTAThread() &&
+ GetWrapperCtxCookie() == pOtherRCW->GetWrapperCtxCookie()
+ );
+ }
+
+ // Note that this is not a simple field getter
+ BOOL AllowEagerSTACleanup();
+
+ // GetWrapper context cookie
+ LPVOID GetWrapperCtxCookie()
+ {
+ CONTRACT (LPVOID)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN m_UnkEntry.m_pCtxCookie;
+ }
+
+ inline Thread *GetSTAThread()
+ {
+ CONTRACT (Thread *)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ CtxEntry *pCtxEntry = GetWrapperCtxEntryNoRef();
+ if (pCtxEntry)
+ RETURN pCtxEntry->GetSTAThread();
+ RETURN NULL;
+ }
+
+ // Function to enter the context. The specified callback function will
+ // be called from within the context.
+ HRESULT EnterContext(PFNCTXCALLBACK pCallbackFunc, LPVOID pData);
+
+ inline CachedInterfaceEntryIterator IterateCachedInterfacePointers()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return CachedInterfaceEntryIterator(dac_cast<PTR_RCW>(this));
+ }
+
+ //---------------------------------------------------------------------
+ // Returns RCWAuxiliaryData associated with this RCW. Allocates the
+ // structure if it does not exist already.
+ PTR_RCWAuxiliaryData GetOrCreateAuxiliaryData();
+
+ //---------------------------------------------------------------------
+ // Returns true iff pItfMT is a "standard managed" interface, such as
+ // IEnumerator, and the RCW supports the interface through classic COM
+ // interop mechanisms.
+ bool SupportsMngStdInterface(MethodTable *pItfMT);
+
+ //---------------------------------------------------------------------
+ // Determines whether a call through the given interface should use new
+ // WinRT interop (as opposed to classic COM). pItfMT should be a non-generic
+ // redirected interface such as IEnumerable whose interop behavior is
+ // ambiguous. This is a NoGC variant, if it returns TypeHandle::MaybeCast,
+ // SupportsWinRTInteropInterface should be called.
+ TypeHandle::CastResult SupportsWinRTInteropInterfaceNoGC(MethodTable *pItfMT);
+
+ //---------------------------------------------------------------------
+ // This is a GC-triggering variant of code:SupportsWinRTInteropInterfaceNoGC.
+ bool SupportsWinRTInteropInterface(MethodTable *pItfMT);
+
+ //---------------------------------------------------------------------
+ // True if the object supports legacy (not WinRT) IEnumerable marshaling.
+ bool SupportsLegacyEnumerableInterface()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(SupportsWinRTInteropInterfaceNoGC(MscorlibBinder::GetExistingClass(CLASS__IENUMERABLE)) == TypeHandle::CannotCast);
+ return m_Flags.m_RedirectionBehavior_IEnumerable_LegacySupported;
+ }
+
+ enum RedirectionBehavior
+ {
+ RedirectionBehaviorComputed = 1, // the second bit is valid
+ RedirectionBehaviorEnabled = 2 // if RedirectionBehaviorComputed is set, true means the interface is redirected on this RCW
+ };
+
+ enum InterfaceVarianceBehavior
+ {
+ IEnumerableSupported = 1, // IEnumerable<T> is supported on this RCW
+ IEnumerableSupportedViaStringInstantiation = 2, // the object failed QI for IEnumerable<T> but succeeded QI for IEnumerable<string>
+
+ IReadOnlyListSupported = 4, // IReadOnlyList<T> is supported on this RCW
+ IReadOnlyListSupportedViaStringInstantiation = 8, // the object failed QI for IReadOnlyList<T> but succeeded QI for IReadOnlyList<string>
+ };
+
+ // Returns a delegate object that points to the right GetEnumerator/Indexer_Get stub that should be used when calling these methods via
+ // IEnumerable<object>/IReadOnlyList<object> or NULL in which case the BOOL argument are relevant:
+ // *pfUseString == true means that the caller should use IEnumerable<string>/IReadOnlyList<string>
+ // *pfUseT == true means that the caller should handle the call as normal, i.e. invoking the stub instantiated over T.
+ OBJECTREF GetTargetForAmbiguousVariantCall(BOOL fIsEnumerable, WinRTInterfaceRedirector::WinRTLegalStructureBaseType baseType, BOOL *pfUseString, BOOL *pfUseT)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_pAuxiliaryData != NULL)
+ {
+ if (baseType == WinRTInterfaceRedirector::BaseType_Object)
+ {
+ if (fIsEnumerable)
+ {
+ if (VARIANCE_STUB_TARGET_IS_HANDLE(m_pAuxiliaryData->m_ohObjectVariantCallTarget_IEnumerable))
+ return ObjectFromHandle(m_pAuxiliaryData->m_ohObjectVariantCallTarget_IEnumerable);
+
+ if (m_pAuxiliaryData->m_ohObjectVariantCallTarget_IEnumerable == VARIANCE_STUB_TARGET_USE_STRING)
+ *pfUseString = TRUE;
+ else if (m_pAuxiliaryData->m_ohObjectVariantCallTarget_IEnumerable == VARIANCE_STUB_TARGET_USE_T)
+ *pfUseT = TRUE;
+ }
+ else
+ {
+ if (VARIANCE_STUB_TARGET_IS_HANDLE(m_pAuxiliaryData->m_ohObjectVariantCallTarget_IReadOnlyList))
+ return ObjectFromHandle(m_pAuxiliaryData->m_ohObjectVariantCallTarget_IReadOnlyList);
+
+ if (m_pAuxiliaryData->m_ohObjectVariantCallTarget_IReadOnlyList == VARIANCE_STUB_TARGET_USE_STRING)
+ *pfUseString = TRUE;
+ else if (m_pAuxiliaryData->m_ohObjectVariantCallTarget_IReadOnlyList == VARIANCE_STUB_TARGET_USE_T)
+ *pfUseT = TRUE;
+ }
+ }
+ else
+ {
+ InterfaceVarianceBehavior varianceBehavior = (baseType == WinRTInterfaceRedirector::BaseType_IEnumerable) ?
+ (InterfaceVarianceBehavior)m_pAuxiliaryData->m_AuxFlags.m_InterfaceVarianceBehavior_OfIEnumerable :
+ (InterfaceVarianceBehavior)m_pAuxiliaryData->m_AuxFlags.m_InterfaceVarianceBehavior_OfIEnumerableOfChar;
+
+ if (fIsEnumerable)
+ {
+ if ((varianceBehavior & IEnumerableSupported) != 0)
+ {
+ if ((varianceBehavior & IEnumerableSupportedViaStringInstantiation) != 0)
+ *pfUseString = TRUE;
+ else
+ *pfUseT = TRUE;
+ }
+ }
+ else
+ {
+ if ((varianceBehavior & IReadOnlyListSupported) != 0)
+ {
+ if ((varianceBehavior & IReadOnlyListSupportedViaStringInstantiation) != 0)
+ *pfUseString = TRUE;
+ else
+ *pfUseT = TRUE;
+ }
+ }
+ }
+ }
+ return NULL;
+ }
+
+#ifdef _DEBUG
+ // Does not throw if m_UnkEntry.m_pUnknown is no longer valid, debug only.
+ IUnknown *GetRawIUnknown_NoAddRef_NoThrow()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_UnkEntry.GetRawIUnknown_NoAddRef_NoThrow();
+ }
+#endif // _DEBUG
+
+ IUnknown *GetRawIUnknown_NoAddRef()
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_UnkEntry.GetRawIUnknown_NoAddRef();
+ }
+
+ bool IsDisconnected()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_UnkEntry.IsDisconnected();
+ }
+
+ void IncrementUseCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+ InterlockedIncrement(&m_cbUseCount);
+ }
+
+ void DecrementUseCount()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (InterlockedDecrement(&m_cbUseCount) == 0)
+ {
+ // this was the final decrement, go ahead and delete/recycle the RCW
+ {
+ GCX_PREEMP();
+ m_UnkEntry.Free();
+ }
+
+ if (g_fEEShutDown)
+ {
+ delete this;
+ }
+ else
+ {
+ InterlockedPushEntrySList(&RCW::s_RCWStandbyList, (PSLIST_ENTRY)this);
+ }
+ }
+ }
+
+private:
+ //---------------------------------------------------------------------
+ // Computes the result of code:SupportsWinRTInteropInterface.
+ RedirectionBehavior ComputeRedirectionBehavior(MethodTable *pItfMT, bool *pfLegacySupported);
+
+ //---------------------------------------------------------------------
+ // Callback called to release the interfaces in the auxiliary cache.
+ static HRESULT __stdcall ReleaseAuxInterfacesCallBack(LPVOID pData);
+
+ //---------------------------------------------------------------------
+ // Callback called to release the IUnkEntry and the InterfaceEntries,
+ static HRESULT __stdcall ReleaseAllInterfacesCallBack(LPVOID pData);
+
+ //---------------------------------------------------------------------
+ // Helper function called from ReleaseAllInterfaces_CallBack do do the
+ // actual releases.
+ void ReleaseAllInterfaces();
+
+public:
+ // Points to the next RCW bucket if this RCW is part of a code:RCWCleanupList
+ PTR_RCW m_pNextCleanupBucket;
+
+ // interface entries
+ InterfaceEntry m_aInterfaceEntries[INTERFACE_ENTRY_CACHE_SIZE];
+
+ // Identity
+ LPVOID m_pIdentity;
+
+ // Sync block index for the exposed managed object
+ DWORD m_SyncBlockIndex;
+
+ //ref-count
+ ULONG m_cbRefCount;
+
+ // Wrapper Cache
+ RCWCache* m_pRCWCache;
+
+ // thread in which the wrapper has been created
+ // if this thread is an STA thread, then when the STA dies
+ // we need to cleanup this wrapper
+ Thread* m_pCreatorThread;
+
+ union RCWFlags
+ {
+ DWORD m_dwFlags;
+
+ struct
+ {
+ static_assert((1 << 4) > INTERFACE_ENTRY_CACHE_SIZE, "m_iEntryToRelease needs a bigger data type");
+ DWORD m_iEntryToRelease:4;
+
+ DWORD m_fURTAggregated:1; // this RCW represents a COM object aggregated by a managed object
+ DWORD m_fURTContained:1; // this RCW represents a COM object contained by a managed object
+ DWORD m_fAllowEagerSTACleanup:1; // this RCW can be cleaned up eagerly (as opposed to via CleanupUnusedObjectsInCurrentContext)
+ DWORD m_fSupportsIInspectable:1; // the underlying COM object is known to support IInspectable
+ DWORD m_fIsJupiterObject:1; // this RCW represents a COM object from Jupiter
+
+ static_assert((1 << 3) >= GCPressureSize_COUNT, "m_GCPressure needs a bigger data type");
+ DWORD m_GCPressure:3; // index into s_rGCPressureTable
+
+ // RedirectionBehavior of non-generic redirected interfaces:
+ DWORD m_RedirectionBehavior_IEnumerable:2;
+ DWORD m_RedirectionBehavior_IEnumerable_LegacySupported:1; // one extra bit for IEnumerable
+
+ DWORD m_RedirectionBehavior_ICollection:2;
+ DWORD m_RedirectionBehavior_IList:2;
+ DWORD m_RedirectionBehavior_INotifyCollectionChanged:2;
+ DWORD m_RedirectionBehavior_INotifyPropertyChanged:2;
+ DWORD m_RedirectionBehavior_ICommand:2;
+ DWORD m_RedirectionBehavior_IDisposable:2;
+
+ // Reserve 2 bits for marshaling behavior
+ DWORD m_MarshalingType:2; // MarshalingBehavior of the COM object.
+
+ DWORD m_Detached:1; // set if the RCW was found dead during GC
+
+ DWORD m_fIsDCOMProxy:1; // Is the object a proxy to a remote process
+ };
+ }
+ m_Flags;
+
+ static_assert(sizeof(RCWFlags) == 4, "Flags don't fit in 4 bytes, there's too many of them");
+
+ // GC pressure sizes in bytes
+ static const int s_rGCPressureTable[GCPressureSize_COUNT];
+
+ // Tracks concurrent access to this RCW to prevent using RCW instances that have already been released
+ LONG m_cbUseCount;
+
+ // additional RCW data used for generic interop and advanced interface pointer caching (NULL unless needed)
+ PTR_RCWAuxiliaryData m_pAuxiliaryData;
+
+ PTR_RCW m_pNextRCW;
+
+ // This field is useful for debugging purposes, please do not remove. The typical scenario is a crash in
+ // SafeRelease because the COM object disappeared. Knowing the vtable usually helps find the culprit.
+ LPVOID m_vtablePtr;
+
+private :
+ // cookies for tracking IUnknown on the correct thread
+ IUnkEntry m_UnkEntry;
+
+ // IUnkEntry needs to access m_UnkEntry field
+ friend IUnkEntry;
+
+private :
+ static RCW* CreateRCWInternal(IUnknown *pUnk, DWORD dwSyncBlockIndex, DWORD flags, MethodTable *pClassMT);
+
+ // Returns an addref'ed context entry
+ CtxEntry* GetWrapperCtxEntry()
+ {
+ CONTRACT (CtxEntry*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(!IsFreeThreaded()); // Must not be free-threaded, otherwise CtxEntry = NULL
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ CtxEntry *pCtxEntry = m_UnkEntry.GetCtxEntry();
+ pCtxEntry->AddRef();
+ RETURN pCtxEntry;
+ }
+
+ // Returns an non-addref'ed context entry
+ CtxEntry *GetWrapperCtxEntryNoRef()
+ {
+ CONTRACT (CtxEntry *)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ CtxEntry *pCtxEntry = m_UnkEntry.GetCtxEntry();
+ RETURN pCtxEntry;
+ }
+};
+
+inline RCW::CreationFlags operator|(RCW::CreationFlags lhs, RCW::CreationFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ return static_cast<RCW::CreationFlags>(static_cast<DWORD>(lhs) | static_cast<DWORD>(rhs));
+}
+inline RCW::CreationFlags operator|=(RCW::CreationFlags & lhs, RCW::CreationFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ lhs = static_cast<RCW::CreationFlags>(static_cast<DWORD>(lhs) | static_cast<DWORD>(rhs));
+ return lhs;
+}
+
+// In order to save vtablePtr in minidumps, we put it on the stack as a volatile local
+// (so it's not optimized away by the compiler). Most places where we call out to COM
+// can absorb the cost of one stack slot and one instruction to improve debuggability.
+#define RCW_VTABLEPTR(pRCW) Volatile<LPVOID> __vtablePtr = (pRCW)->m_vtablePtr
+
+
+// 01 REQUIRE_IINSPECTABLE 01 ITF_MARSHAL_INSP_ITF 01 CF_SupportsIInspectable
+// 02 SUPPRESS_ADDREF 02 ITF_MARSHAL_SUPPRESS_ADDREF 02 CF_SuppressAddRef
+// 04 CF_IsWeakReference
+// 04 CLASS_IS_HINT 04 ITF_MARSHAL_CLASS_IS_HINT
+// 08 UNIQUE_OBJECT 08 CF_NeedUniqueObject
+// 08 ITF_MARSHAL_DISP_ITF
+// 10 IGNORE_WINRT_AND_SKIP_UNBOXING 10 CF_DontResolveClass
+// 10 ITF_MARSHAL_USE_BASIC_ITF
+// 20 ITF_MARSHAL_WINRT_SCENARIO
+inline RCW::CreationFlags RCW::CreationFlagsFromObjForComIPFlags(ObjFromComIP::flags dwFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ static_assert_no_msg(CF_NeedUniqueObject == ObjFromComIP::UNIQUE_OBJECT);
+ static_assert_no_msg(CF_SupportsIInspectable == ObjFromComIP::REQUIRE_IINSPECTABLE);
+ static_assert_no_msg(CF_DontResolveClass == ObjFromComIP::IGNORE_WINRT_AND_SKIP_UNBOXING);
+
+ RCW::CreationFlags result = (RCW::CreationFlags)(dwFlags &
+ (ObjFromComIP::UNIQUE_OBJECT
+ | ObjFromComIP::IGNORE_WINRT_AND_SKIP_UNBOXING));
+ if ((dwFlags & (ObjFromComIP::REQUIRE_IINSPECTABLE|ObjFromComIP::CLASS_IS_HINT))
+ == (ObjFromComIP::REQUIRE_IINSPECTABLE|ObjFromComIP::CLASS_IS_HINT))
+ {
+ result |= CF_SupportsIInspectable;
+ }
+ return result;
+}
+
+
+// RCW data attached to MethodTable's that represent interesting types. Types without RCWPerTypeData
+// (i.e. those with MethodTable::GetRCWPerTypeData() == NULL) are not interesting and are assumed to
+// use NULL/default values for m_pVariantMT/m_pMTForQI1/m_pMTForQI2/m_pGetEnumeratorMethod.
+struct RCWPerTypeData
+{
+ // Corresponding type with variance or NULL if the type does not exhibit variant behavior.
+ MethodTable *m_pVariantMT;
+
+ // Types that should be used for QI. m_pMTForQI1 is tried first; if it fails and m_pMTForQI2
+ // is not NULL, QI for m_pMTForQI2 is performed. We need two types to supports ambiguous casts
+ // to ICollection<KeyValuePair<K, V>>.
+ MethodTable *m_pMTForQI1;
+ MethodTable *m_pMTForQI2;
+
+ // The corresponding IEnumerator<T>::GetEnumerator instantiation or NULL if the type does not
+ // act like IEnumerable.
+ MethodDesc *m_pGetEnumeratorMethod;
+
+ // The kind of redirection performed by QI'ing for m_pMTForQI1.
+ RCW::InterfaceRedirectionKind m_RedirectionKind;
+
+ enum
+ {
+ VariantTypeInited = 0x01, // m_pVariantMT is set
+ RedirectionInfoInited = 0x02, // m_pMTForQI1, m_pMTForQI2, and m_RedirectionKind are set
+ GetEnumeratorInited = 0x04, // m_pGetEnumeratorMethod is set
+ InterfaceFlagsInited = 0x08, // IsRedirectedInterface and IsICollectionGeneric are set
+
+ IsRedirectedInterface = 0x10, // the type is a redirected interface
+ IsICollectionGeneric = 0x20, // the type is ICollection`1
+ };
+ DWORD m_dwFlags;
+};
+
+#ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+
+class ComClassFactory;
+class WinRTClassFactory;
+class WinRTManagedClassFactory;
+
+class ClassFactoryBase
+{
+public:
+ //-------------------------------------------------------------
+ // Function to clean up
+ virtual void Cleanup() = 0;
+
+ ComClassFactory *AsComClassFactory()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_pClassMT == NULL || (!m_pClassMT->IsProjectedFromWinRT() && !m_pClassMT->IsExportedToWinRT()));
+ return (ComClassFactory *)this;
+ }
+
+ WinRTClassFactory *AsWinRTClassFactory()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_pClassMT->IsProjectedFromWinRT() || m_pClassMT->IsExportedToWinRT());
+ return (WinRTClassFactory *)this;
+ }
+
+ WinRTManagedClassFactory *AsWinRTManagedClassFactory()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_pClassMT->IsExportedToWinRT());
+ return (WinRTManagedClassFactory *)this;
+ }
+
+protected:
+ ClassFactoryBase(MethodTable *pClassMT = NULL)
+ : m_pClassMT(pClassMT)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ MethodTable *m_pClassMT;
+};
+
+class ComClassFactoryCreator;
+//-------------------------------------------------------------------------
+// Class that wraps an IClassFactory
+// This class allows a Reflection Class to wrap an IClassFactory
+// Class::GetClassFromProgID("ProgID", "Server") can be used to get a Class
+// object that wraps an IClassFactory.
+// Calling class.CreateInstance() will create an instance of the COM object and
+// wrap it with a RCW, the wrapper can be cast to the appropriate interface
+// and used.
+//
+class ComClassFactory : public ClassFactoryBase
+{
+protected:
+ friend ComClassFactoryCreator;
+
+ // We have two types of ComClassFactory:
+ // 1. We build for reflection purpose. We should not clean up.
+ // 2. We build for IClassFactory. We should clean up.
+ //-----------------------------------------------------------
+ // constructor
+ ComClassFactory(REFCLSID rclsid)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ m_pwszProgID = NULL;
+ m_pwszServer = NULL;
+
+ // Default to unmanaged version.
+ m_bManagedVersion = FALSE;
+ m_rclsid = rclsid;
+ }
+
+public :
+ //---------------------------------------------------------
+ // Mark this instance as Managed Version, so we will not do clean up.
+ void SetManagedVersion()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_bManagedVersion = TRUE;
+ }
+
+ //--------------------------------------------------------------
+ // Init the ComClassFactory
+ void Init(__in_opt __in_z WCHAR* pwszProgID, __in_opt __in_z WCHAR* pwszServer, MethodTable* pClassMT);
+
+ //-------------------------------------------------------------
+ // create instance, calls IClassFactory::CreateInstance
+ OBJECTREF CreateInstance(MethodTable* pMTClass, BOOL ForManaged = FALSE);
+
+ //-------------------------------------------------------------
+ // Function to clean up
+ void Cleanup();
+
+protected :
+#ifndef CROSSGEN_COMPILE
+ //-------------------------------------------------------------
+ // Create instance. Overridable from child classes
+ virtual IUnknown *CreateInstanceInternal(IUnknown *pOuter, BOOL *pfDidContainment);
+#endif
+ //-------------------------------------------------------------
+ // Throw exception message
+ void ThrowHRMsg(HRESULT hr, DWORD dwMsgResID);
+
+
+private:
+ //-------------------------------------------------------------
+ // ComClassFactory::CreateAggregatedInstance(MethodTable* pMTClass)
+ // create a COM+ instance that aggregates a COM instance
+ OBJECTREF CreateAggregatedInstance(MethodTable* pMTClass, BOOL ForManaged);
+
+ //--------------------------------------------------------------
+ // Retrieve the IClassFactory.
+ IClassFactory *GetIClassFactory();
+
+ //--------------------------------------------------------------
+ // Create an instance of the component from the class factory.
+ IUnknown *CreateInstanceFromClassFactory(IClassFactory *pClassFact, IUnknown *punkOuter, BOOL *pfDidContainment);
+
+public:;
+ WCHAR* m_pwszProgID; // progId
+ CLSID m_rclsid; // CLSID
+ WCHAR* m_pwszServer; // server name
+
+private:
+ BOOL m_bManagedVersion;
+};
+
+//
+// WinRT override information for ToString/GetHashCode/Equals
+//
+struct WinRTOverrideInfo
+{
+ MethodDesc *m_pToStringMD;
+ MethodDesc *m_pGetHashCodeMD;
+ MethodDesc *m_pEqualsMD;
+
+ WinRTOverrideInfo(EEClass *pClass);
+ static WinRTOverrideInfo *GetOrCreateWinRTOverrideInfo(MethodTable *pMT);
+ MethodDesc* GetIStringableToStringMD(MethodTable *pMT);
+};
+
+//--------------------------------------------------------------
+// Special ComClassFactory for AppX scenarios only
+// Call CoCreateInstanceFromApp to ensure compatibility
+class AppXComClassFactory : public ComClassFactory
+{
+protected :
+ friend ComClassFactoryCreator;
+
+ AppXComClassFactory(REFCLSID rclsid)
+ :ComClassFactory(rclsid)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+protected :
+#ifndef CROSSGEN_COMPILE
+ //-------------------------------------------------------------
+ // Create instance using CoCreateInstanceFromApp
+ virtual IUnknown *CreateInstanceInternal(IUnknown *pOuter, BOOL *pfDidContainment);
+#endif
+};
+
+//--------------------------------------------------------------
+// Creates the right ComClassFactory for you
+class ComClassFactoryCreator
+{
+public :
+ static ComClassFactory *Create(REFCLSID rclsid)
+ {
+ CONTRACT(ComClassFactory *)
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+#ifdef FEATURE_APPX
+ if (AppX::IsAppXProcess())
+ RETURN new AppXComClassFactory(rclsid);
+ else
+#endif
+ RETURN new ComClassFactory(rclsid);
+ }
+};
+//-------------------------------------------------------------------------
+// Encapsulates data needed to instantiate WinRT runtime classes.
+class WinRTClassFactory : public ClassFactoryBase
+{
+public:
+ WinRTClassFactory(MethodTable *pClassMT)
+ : ClassFactoryBase(pClassMT)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_hClassName = NULL;
+ m_pDefaultItfMT = NULL;
+ m_pWinRTOverrideInfo = NULL;
+ m_GCPressure = RCW::GCPressureSize_WinRT_Base;
+ }
+
+ //-------------------------------------------------------------
+ // Initialize this instance by parsing factory-related attributes.
+ void Init();
+
+ //-------------------------------------------------------------
+ // Returns a factory method that matches the given signature.
+ MethodDesc *FindFactoryMethod(PCCOR_SIGNATURE pSig, DWORD cSig, Module *pModule);
+
+ //-------------------------------------------------------------
+ // Returns a static interface method that matches the given signature.
+ MethodDesc *FindStaticMethod(LPCUTF8 pszName, PCCOR_SIGNATURE pSig, DWORD cSig, Module *pModule);
+
+ //-------------------------------------------------------------
+ // Function to clean up
+ void Cleanup();
+
+ // If true, the class can be activated only using the composition pattern
+ BOOL IsComposition()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return !m_pClassMT->IsSealed();
+ }
+
+ MethodTable *GetClass()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pClassMT;
+ }
+
+ HSTRING GetClassName()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_hClassName;
+ }
+
+ SArray<MethodTable *> *GetFactoryInterfaces()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return &m_factoryInterfaces;
+ }
+
+ SArray<MethodTable *> *GetStaticInterfaces()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return &m_staticInterfaces;
+ }
+
+ MethodTable *GetDefaultInterface()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pDefaultItfMT;
+ }
+
+ RCW::GCPressureSize GetGCPressure()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_GCPressure;
+ }
+
+ FORCEINLINE WinRTOverrideInfo *GetWinRTOverrideInfo ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pWinRTOverrideInfo;
+ }
+
+ BOOL SetWinRTOverrideInfo (WinRTOverrideInfo *pWinRTOverrideInfo)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (InterlockedCompareExchangeT(&m_pWinRTOverrideInfo, pWinRTOverrideInfo, NULL) == NULL);
+ }
+
+protected:
+ MethodTable *GetTypeFromAttribute(IMDInternalImport *pImport, mdCustomAttribute tkAttribute);
+
+ HSTRING m_hClassName;
+
+ InlineSArray<MethodTable *, 1> m_factoryInterfaces;
+ InlineSArray<MethodTable *, 1> m_staticInterfaces;
+
+ MethodTable *m_pDefaultItfMT; // Default interface of the class
+
+ WinRTOverrideInfo *m_pWinRTOverrideInfo; // ToString/GetHashCode/GetValue override information
+
+ RCW::GCPressureSize m_GCPressure; // GC pressure size associated with instances of this class
+};
+#endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+
+//-------------------------------------------------------------------------
+// Encapsulates data needed to instantiate WinRT runtime classes implemented
+// in managed code.
+class WinRTManagedClassFactory : public WinRTClassFactory
+{
+public:
+ WinRTManagedClassFactory(MethodTable *pClassMT)
+ : WinRTClassFactory(pClassMT)
+ {
+ m_pCCWTemplate = NULL;
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ //-------------------------------------------------------------
+ // Function to clean up
+ void Cleanup();
+
+ ComCallWrapperTemplate *GetComCallWrapperTemplate()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pCCWTemplate;
+ }
+
+ BOOL SetComCallWrapperTemplate(ComCallWrapperTemplate *pTemplate)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (InterlockedCompareExchangeT(&m_pCCWTemplate, pTemplate, NULL) == NULL);
+ }
+
+ ComCallWrapperTemplate *GetOrCreateComCallWrapperTemplate(MethodTable *pFactoryMT);
+
+protected:
+ ComCallWrapperTemplate *m_pCCWTemplate; // CCW template for the factory object
+};
+
+FORCEINLINE void NewRCWHolderRelease(RCW* p)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (p)
+ {
+ GCX_COOP();
+
+ p->DecoupleFromObject();
+ p->Cleanup();
+ }
+};
+
+class NewRCWHolder : public Wrapper<RCW*, NewRCWHolderDoNothing, NewRCWHolderRelease, NULL>
+{
+public:
+ NewRCWHolder(RCW* p = NULL)
+ : Wrapper<RCW*, NewRCWHolderDoNothing, NewRCWHolderRelease, NULL>(p)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ FORCEINLINE void operator=(RCW* p)
+ {
+ WRAPPER_NO_CONTRACT;
+ Wrapper<RCW*, NewRCWHolderDoNothing, NewRCWHolderRelease, NULL>::operator=(p);
+ }
+};
+
+#ifndef DACCESS_COMPILE
+class RCWHolder
+{
+public:
+ RCWHolder(PTR_Thread pThread)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ PRECONDITION(CheckPointer(pThread));
+ }
+ CONTRACTL_END;
+
+ m_pThread = pThread;
+ m_pRCW = NULL;
+ m_pSB = NULL;
+ m_fValid = FALSE;
+ m_fRCWInUse = FALSE;
+#ifdef MDA_SUPPORTED
+ m_pMDA = MDA_GET_ASSISTANT(RaceOnRCWCleanup);
+#endif // MDA_SUPPORTED
+ }
+
+ ~RCWHolder()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ if (m_fRCWInUse) GC_TRIGGERS; else GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+#ifdef MDA_SUPPORTED
+ // Unregister this RCW on the thread
+ if (m_pThread && m_pSB && m_fValid)
+ {
+ if (m_pMDA)
+ m_pThread->UnregisterRCW(INDEBUG(m_pSB));
+ }
+#endif // MDA_SUPPORTED
+
+ if (m_fRCWInUse)
+ {
+ m_pRCW->DecrementUseCount();
+ }
+ }
+
+ void Init(PTR_SyncBlock pSB)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ PRECONDITION(CheckPointer(pSB));
+ PRECONDITION(m_pRCW == NULL);
+ PRECONDITION(CheckPointer(m_pThread));
+ }
+ CONTRACTL_END;
+
+ m_pSB = pSB;
+ m_pRCW = m_pSB->GetInteropInfoNoCreate()->GetRCWAndIncrementUseCount();
+
+ if (!m_pRCW)
+ {
+ COMPlusThrow(kInvalidComObjectException, IDS_EE_COM_OBJECT_NO_LONGER_HAS_WRAPPER);
+ }
+ m_fRCWInUse = TRUE;
+
+#ifdef MDA_SUPPORTED
+ if (m_pMDA)
+ {
+ m_pThread->RegisterRCW(m_pRCW);
+ }
+#endif // MDA_SUPPORTED
+
+ m_fValid = TRUE;
+ }
+
+ void Init(OBJECTREF pObject)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(m_pRCW == NULL);
+ PRECONDITION(CheckPointer(m_pThread));
+ }
+ CONTRACTL_END;
+
+ Init(pObject->GetSyncBlock());
+ }
+
+ // Like Init() but does not increment the use count on the RCW. To be used on perf-critical code paths.
+ void InitFastCheck(PTR_SyncBlock pSB)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ PRECONDITION(CheckPointer(pSB));
+ PRECONDITION(m_pRCW == NULL);
+ PRECONDITION(CheckPointer(m_pThread));
+ }
+ CONTRACTL_END;
+
+ m_pSB = pSB;
+ m_pRCW = m_pSB->GetInteropInfoNoCreate()->GetRawRCW();
+
+ if (!m_pRCW)
+ {
+ COMPlusThrow(kInvalidComObjectException, IDS_EE_COM_OBJECT_NO_LONGER_HAS_WRAPPER);
+ }
+
+#ifdef MDA_SUPPORTED
+ if (m_pMDA)
+ {
+ m_pThread->RegisterRCW(m_pRCW);
+ }
+#endif // MDA_SUPPORTED
+
+ m_fValid = TRUE;
+ }
+
+ void InitNoCheck(PTR_SyncBlock pSB)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ PRECONDITION(CheckPointer(pSB));
+ PRECONDITION(m_pRCW == NULL);
+ PRECONDITION(CheckPointer(m_pThread));
+ PRECONDITION(GetThread() == m_pThread);
+ }
+ CONTRACTL_END;
+
+ m_pSB = pSB;
+ m_pRCW = m_pSB->GetInteropInfoNoCreate()->GetRawRCW();
+
+#ifdef MDA_SUPPORTED
+ if (m_pMDA)
+ {
+ m_fValid = m_pThread->RegisterRCWNoThrow(m_pRCW);
+ }
+ else
+#endif // MDA_SUPPORTED
+ {
+ m_fValid = TRUE;
+ }
+ }
+
+ void InitNoCheck(OBJECTREF pObject)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(m_pRCW == NULL);
+ PRECONDITION(CheckPointer(m_pThread));
+ }
+ CONTRACTL_END;
+
+ InitNoCheck((PTR_SyncBlock)pObject->GetSyncBlock());
+ }
+
+ void InitNoCheck(RCW *pRCW)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(m_pRCW == NULL);
+ PRECONDITION(CheckPointer(m_pThread));
+ PRECONDITION(CheckPointer(pRCW));
+ }
+ CONTRACTL_END;
+
+ InitNoCheck(pRCW->GetSyncBlock());
+ }
+
+ void UnInit()
+ {
+ CONTRACTL
+ {
+ if (m_fRCWInUse)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ else
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ MODE_ANY;
+ SUPPORTS_DAC;
+ PRECONDITION(CheckPointer(m_pThread));
+ PRECONDITION(CheckPointer(m_pSB));
+ PRECONDITION(GetThread() == m_pThread);
+ }
+ CONTRACTL_END;
+
+ // Unregister this RCW on the thread
+ if (m_fValid)
+ {
+ m_fValid = FALSE;
+
+#ifdef MDA_SUPPORTED
+ if (m_pMDA)
+ m_pThread->UnregisterRCW(INDEBUG(m_pSB));
+#endif // MDA_SUPPORTED
+ }
+
+ BOOL fThrowException = FALSE;
+ if (m_fRCWInUse)
+ {
+ // Now's the perfect time to check the RCW again. If the SyncBlock doesn't point to
+ // our RCW anymore, we know that we must have raced with an explicit release.
+ if (m_pSB->GetInteropInfoNoCreate()->GetRawRCW() != m_pRCW)
+ {
+ fThrowException = TRUE;
+ }
+
+ m_pRCW->DecrementUseCount();
+ m_fRCWInUse = FALSE;
+ }
+
+ m_pRCW = NULL;
+ m_pSB = NULL;
+
+ if (fThrowException)
+ {
+ // Since the object demonstrably had the RCW when we executed Init, we know for sure that
+ // this must be a race. Use the same exception for compatibility but pass resource ID of
+ // a slightly enhanced error message.
+ COMPlusThrow(kInvalidComObjectException, IDS_EE_COM_OBJECT_RELEASE_RACE);
+ }
+ }
+
+ PTR_RCW GetRawRCWUnsafe()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pRCW;
+ }
+
+ BOOL IsNull()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (m_pRCW == NULL) ? TRUE : FALSE;
+ }
+
+ inline PTR_RCW operator->()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ PRECONDITION(CheckPointer(m_pRCW));
+ }
+ CONTRACTL_END;
+
+ return m_pRCW;
+ }
+
+private:
+ PTR_RCW m_pRCW;
+
+ // Used for per-thread registration.
+ PTR_SyncBlock m_pSB;
+ PTR_Thread m_pThread;
+
+ // Used for de-registration
+ BOOL m_fValid;
+ BOOL m_fRCWInUse;
+
+#ifdef MDA_SUPPORTED
+ // Stores the MDA.
+ MdaRaceOnRCWCleanup* m_pMDA;
+#endif // MDA_SUPPORTED
+};
+#endif // !DACCESS_COMPILE
+
+
+//---------------------------------------------------------------------
+// When the RCW is used for actual calls out to the COM object, we want to check for cleanup race
+// when we're done with it, ideally at the point where the RCWHolder goes out of scope. But, since
+// throwing exceptions from destructors is generally a bad idea, we use the RCWPROTECT_BEGIN
+// RCWPROTECT_END brackets instead of the plain RCWHolder.
+//---------------------------------------------------------------------
+#define RCWPROTECT_BEGIN(pRCWHolder, arg) \
+ { \
+ pRCWHolder.Init(arg);
+
+#define RCWPROTECT_END(pRCWHolder) \
+ pRCWHolder.UnInit(); \
+ }
+
+//---------------------------------------------------------------------
+// RCW cache, act as the manager for the RCWs
+// uses a hash table to map IUnknown to the corresponding wrappers.
+// There is one such cache per thread affinity domain.
+//
+// <TODO>@TODO context cwb: revisit. One could have a cache per thread affinity
+// domain, or one per context. It depends on how we do the handshake between
+// ole32 and runtime contexts. For now, we only worry about apartments, so
+// thread affinity domains are sufficient.</TODO>
+//---------------------------------------------------------------------
+class RCWCache
+{
+ friend class RCWRefCache;
+
+public:
+ class LockHolder : public CrstHolder
+ {
+ public:
+ LockHolder(RCWCache *pCache)
+ : CrstHolder(&pCache->m_lock)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE; // The RCWCache lock must be taken
+ // in coop mode. It syncs RCW releases
+ // with the GC.
+ // This lock will *not* be taken by the GC
+ // during collection.
+ }
+ CONTRACTL_END;
+ }
+ };
+
+
+ RCWCache(AppDomain *pDomain);
+
+ static RCWCache* GetRCWCache();
+ static RCWCache* GetRCWCacheNoCreate();
+
+#ifndef DACCESS_COMPILE
+ // Insert wrapper into hash table.
+ // Since lock is held, no need to report RCW use to thread.
+ void InsertWrapper(RCWHolder* pRCW)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pRCW));
+ PRECONDITION(CheckPointer(pRCW->GetRawRCWUnsafe()));
+ PRECONDITION(LOCKHELD());
+ PRECONDITION(LookupWrapperUnsafe(pRCW->GetRawRCWUnsafe()->m_pIdentity) == NULL);
+ }
+ CONTRACTL_END;
+
+ m_HashMap.Add(pRCW->GetRawRCWUnsafe());
+ }
+
+ void RemoveWrapper(RCWHolder* pRCW)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ RemoveWrapper(pRCW->GetRawRCWUnsafe());
+ }
+#endif // DACCESS_COMPILE
+
+ // Delete wrapper for a given IUnk from hash table
+ void RemoveWrapper(RCW* pRCW)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pRCW));
+ }
+ CONTRACTL_END;
+
+ // Note that the GC thread doesn't have to take the lock
+ // since all other threads access in cooperative mode
+
+ _ASSERTE_IMPL(LOCKHELD() && GetThread()->PreemptiveGCDisabled()
+ || Debug_IsLockedViaThreadSuspension());
+
+ LPVOID pIdentity;
+ pIdentity = pRCW->m_pIdentity;
+ _ASSERTE(pIdentity != NULL);
+
+ m_HashMap.Remove(pIdentity);
+ }
+
+ // Lookup to see if we already have a wrapper else insert this wrapper
+ // return a valid wrapper that has been inserted into the cache
+ BOOL FindOrInsertWrapper_NoLock(IUnknown* pIdentity, RCWHolder* pWrap, BOOL fAllowReinit);
+
+ AppDomain* GetDomain()
+ {
+ CONTRACT (AppDomain*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN m_pDomain;
+ }
+
+ // Worker function called to release wrappers in the pCtxCookie context.
+ // Zero indicates all wrappers.
+ void ReleaseWrappersWorker(LPVOID pCtxCookie);
+
+ // Worker function called to detach GC-unmarked wrappers from their
+ // underlying COM pUnk identities to prevent resurrection.
+ void DetachWrappersWorker();
+
+#ifndef DACCESS_COMPILE
+
+ // Lookup wrapper, lookup hash table for a wrapper for a given IUnk
+ void LookupWrapper(LPVOID pUnk, RCWHolder* pRCW)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(LOCKHELD());
+ //POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // We don't want the GC messing with the hash table underneath us.
+ GCX_FORBID();
+
+ RCW* pRawRCW = LookupWrapperUnsafe(pUnk);
+
+ if (pRawRCW == NULL)
+ return;
+
+ // Assume that we already have a sync block for this object.
+ pRCW->InitNoCheck(pRawRCW);
+ }
+
+ RCW* LookupWrapperUnsafe(LPVOID pUnk)
+ {
+ CONTRACT (RCW*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(LOCKHELD());
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ // We don't want the GC messing with the hash table underneath us.
+ GCX_FORBID();
+
+ RETURN m_HashMap.Lookup(pUnk);
+ }
+
+#endif //DACCESS_COMPILE
+
+#ifdef _DEBUG
+ BOOL LOCKHELD()
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_lock.OwnedByCurrentThread();
+ }
+#endif
+
+private :
+ friend class COMInterfaceMarshaler;
+
+ // Look up to see if we already have an valid wrapper in cache for this IUnk
+ // DOES NOT hold a lock inside the function - locking in the caller side IS REQUIRED
+ void FindWrapperInCache_NoLock(IUnknown* pIdentity, RCWHolder* pRCW);
+
+private:
+ class RCWCacheTraits : public DefaultSHashTraits<RCW *>
+ {
+ public:
+ typedef LPVOID key_t;
+ static RCW *Null() { LIMITED_METHOD_CONTRACT; return NULL; }
+ static bool IsNull(RCW *e) { LIMITED_METHOD_CONTRACT; return (e == NULL); }
+ static const LPVOID GetKey(RCW *e) { LIMITED_METHOD_CONTRACT; return e->m_pIdentity; }
+ static count_t Hash(LPVOID key_t) { LIMITED_METHOD_CONTRACT; return (count_t)key_t; }
+ static BOOL Equals(LPVOID lhs, LPVOID rhs) { LIMITED_METHOD_CONTRACT; return (lhs == rhs); }
+ static RCW *Deleted() { LIMITED_METHOD_CONTRACT; return (RCW *)-1; }
+ static bool IsDeleted(RCW *e) { LIMITED_METHOD_CONTRACT; return e == (RCW *)-1; }
+ };
+
+ SHash<RCWCacheTraits> m_HashMap;
+
+ // spin lock for fast synchronization
+ Crst m_lock;
+ AppDomain* m_pDomain;
+};
+
+struct ReleaseRCWList_Args
+{
+ RCW *pHead;
+ BOOL ctxTried;
+ BOOL ctxBusy;
+};
+
+// RCWCleanupList represents a list of RCWs whose corresponding managed objects have been collected.
+// These RCWs must be released, potentially involving transitioning into the right apartment/context.
+// That is why the operation is deferred and done in chunks instead of individual RCWs so the
+// transition overhead is minimized. This data structure is a two-dimensional linked list with
+// individual RCWs grouped into buckets that share the same COM apartment/context.
+//
+// Adding RCWs into the cleanup list must not allocate memory or perform any similar operation that
+// may fail. The only operation allowed to fail is the release itself (out of our control). Therefore
+// the data structure uses only a single statically allocated instance of RCWCleanupList and the
+// "links" are taken care of by the RCW structures themselves.
+//
+// m_pFirstBucket m_pNextCleanupBucket m_pNextCleanupBucket
+// RCWCleanupList ------> RCW_1a -------------------> RCW_2a -------------------> RCW_3a -->...--> NULL
+// | | |
+// | m_pNextRCW | m_pNextRCW | m_pNextRCW
+// v v v
+// RCW_1b RCW_2b RCW_3b
+// | | |
+// | m_pNextRCW | m_pNextRCW | m_pNextRCW
+// v v v
+// RCW_1c RCW_2c RCW_3c
+// | | |
+// v v v
+// ... ... ...
+// | | |
+// v v v
+// NULL NULL NULL
+//
+// In the picture above, RCW_1a, RCW_1b, RCW_1c, ... are in the same bucket, RCW_2a, RCW_2b, RCW_2c, ...
+// are in another bucket etc. The supported operations are adding an RCW (see code:RCWCleanupList::AddWrapper)
+// and removing entire buckets that meet given criteria (see code:RCWCleanupList::CleanupAllWrappers and
+// code:RCWCleanupList::CleanupWrappersInCurrentCtxThread).
+
+class RCWCleanupList
+{
+#ifdef DACCESS_COMPILE
+ friend class ClrDataAccess;
+#endif // DACCESS_COMPILE
+
+public:
+ RCWCleanupList()
+ : m_lock(CrstRCWCleanupList, CRST_UNSAFE_ANYMODE),
+ m_pCurCleanupThread(NULL), m_doCleanupInContexts(FALSE),
+ m_pFirstBucket(NULL)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ ~RCWCleanupList()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(IsEmpty());
+ }
+
+ VOID AddWrapper(RCW* pRCW);
+ VOID AddWrapper_NoLock(RCW *pRCW);
+ VOID CleanupAllWrappers();
+ VOID CleanupWrappersInCurrentCtxThread(BOOL fWait = TRUE, BOOL fManualCleanupRequested = FALSE, BOOL bIgnoreComObjectEagerCleanupSetting = FALSE);
+
+ BOOL IsEmpty();
+
+private:
+ // These 2 functions are static so we can call them through the Context Callback mechanism.
+ static HRESULT ReleaseRCWListInCorrectCtx(LPVOID pData);
+ static VOID ReleaseRCWListRaw(RCW* pRCW);
+
+#ifndef DACCESS_COMPILE
+ // Utility class that maintains a list of buckets removed from the cleanup list.
+ struct RemovedBuckets
+ {
+ RemovedBuckets()
+ : m_pFirstBucket(NULL),
+ m_pLastBucket(NULL)
+ { }
+
+ ~RemovedBuckets()
+ {
+ // we must always end up with an empty list, otherwise we leak RCWs
+ _ASSERTE(m_pFirstBucket == NULL);
+ }
+
+ void Append(PTR_RCW pBucket)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_pLastBucket == NULL)
+ {
+ // appending the first bucket
+ _ASSERTE(m_pFirstBucket == NULL);
+ m_pFirstBucket = pBucket;
+ }
+ else
+ {
+ // appending >first bucket
+ m_pLastBucket->m_pNextCleanupBucket = pBucket;
+ }
+
+ pBucket->m_pNextCleanupBucket = NULL;
+ m_pLastBucket = pBucket;
+ }
+
+ RCW *PopHead()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ RCW *pRetVal = m_pFirstBucket;
+ if (m_pFirstBucket != NULL)
+ m_pFirstBucket = m_pFirstBucket->m_pNextCleanupBucket;
+
+ return pRetVal;
+ }
+
+ RCW *m_pFirstBucket;
+ RCW *m_pLastBucket;
+ };
+#endif // !DACCESS_COMPILE
+
+ RCW *m_pFirstBucket;
+ Crst m_lock;
+ Thread* m_pCurCleanupThread;
+
+ // Fast check for whether threads should help cleanup wrappers in their contexts
+ BOOL m_doCleanupInContexts;
+};
+
+FORCEINLINE void CtxEntryHolderRelease(CtxEntry *p)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (p != NULL)
+ {
+ p->Release();
+ }
+}
+
+class CtxEntryHolder : public Wrapper<CtxEntry *, CtxEntryDoNothing, CtxEntryHolderRelease, NULL>
+{
+public:
+ CtxEntryHolder(CtxEntry *p = NULL)
+ : Wrapper<CtxEntry *, CtxEntryDoNothing, CtxEntryHolderRelease, NULL>(p)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ FORCEINLINE void operator=(CtxEntry *p)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ Wrapper<CtxEntry *, CtxEntryDoNothing, CtxEntryHolderRelease, NULL>::operator=(p);
+ }
+
+};
+
+#endif // _RUNTIMECALLABLEWRAPPER_H
diff --git a/src/vm/runtimeexceptionkind.h b/src/vm/runtimeexceptionkind.h
new file mode 100644
index 0000000000..812f36b8a8
--- /dev/null
+++ b/src/vm/runtimeexceptionkind.h
@@ -0,0 +1,32 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// RuntimeExceptionKind.h
+//
+
+//
+
+
+#ifndef __runtimeexceptionkind_h__
+#define __runtimeexceptionkind_h__
+
+//==========================================================================
+// Identifies commonly-used exception classes for COMPlusThrowable().
+//==========================================================================
+enum RuntimeExceptionKind {
+#define DEFINE_EXCEPTION(ns, reKind, bHRformessage, ...) k##reKind,
+#define DEFINE_EXCEPTION_HR_WINRT_ONLY(ns, reKind, ...)
+#define DEFINE_EXCEPTION_IN_OTHER_FX_ASSEMBLY(ns, reKind, assemblySimpleName, publicKeyToken, bHRformessage, ...) DEFINE_EXCEPTION(ns, reKind, bHRformessage, __VA_ARGS__)
+#include "rexcep.h"
+kLastException
+};
+
+
+// I would have preferred to define a unique HRESULT in our own facility, but we
+// weren't supposed to create new HRESULTs so close to ship. And now it's set
+// in stone.
+#define E_PROCESS_SHUTDOWN_REENTRY HRESULT_FROM_WIN32(ERROR_PROCESS_ABORTED)
+
+
+#endif // __runtimeexceptionkind_h__
diff --git a/src/vm/runtimehandles.cpp b/src/vm/runtimehandles.cpp
new file mode 100644
index 0000000000..e5cc503eae
--- /dev/null
+++ b/src/vm/runtimehandles.cpp
@@ -0,0 +1,3618 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "common.h"
+#include "corhdr.h"
+#include "runtimehandles.h"
+#include "object.h"
+#include "class.h"
+#include "method.hpp"
+#include "typehandle.h"
+#include "field.h"
+#include "siginfo.hpp"
+#include "clsload.hpp"
+#include "typestring.h"
+#include "typeparse.h"
+#include "holder.h"
+#include "codeman.h"
+#include "corhlpr.h"
+#include "jitinterface.h"
+#include "stackprobe.h"
+#include "eeconfig.h"
+#include "eehash.h"
+#include "objecthandle.h"
+#include "interoputil.h"
+#include "typedesc.h"
+#include "virtualcallstub.h"
+#include "contractimpl.h"
+#include "dynamicmethod.h"
+#include "peimagelayout.inl"
+#include "security.h"
+#include "eventtrace.h"
+#include "invokeutil.h"
+
+
+FCIMPL3(FC_BOOL_RET, Utf8String::EqualsCaseSensitive, LPCUTF8 szLhs, LPCUTF8 szRhs, INT32 stringNumBytes)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(szLhs));
+ PRECONDITION(CheckPointer(szRhs));
+ }
+ CONTRACTL_END;
+
+ // Important: the string in pSsz isn't null terminated so the length must be used
+ // when performing operations on the string.
+
+ // At this point, both the left and right strings are guaranteed to have the
+ // same length.
+ FC_RETURN_BOOL(strncmp(szLhs, szRhs, stringNumBytes) == 0);
+}
+FCIMPLEND
+
+BOOL QCALLTYPE Utf8String::EqualsCaseInsensitive(LPCUTF8 szLhs, LPCUTF8 szRhs, INT32 stringNumBytes)
+{
+ QCALL_CONTRACT;
+
+ // Important: the string in pSsz isn't null terminated so the length must be used
+ // when performing operations on the string.
+
+ BOOL fStringsEqual = FALSE;
+
+ BEGIN_QCALL;
+
+ _ASSERTE(CheckPointer(szLhs));
+ _ASSERTE(CheckPointer(szRhs));
+
+ // At this point, both the left and right strings are guaranteed to have the
+ // same length.
+ StackSString lhs(SString::Utf8, szLhs, stringNumBytes);
+ StackSString rhs(SString::Utf8, szRhs, stringNumBytes);
+
+ // We can use SString for simple case insensitive compares
+ fStringsEqual = lhs.EqualsCaseInsensitive(rhs);
+
+ END_QCALL;
+
+ return fStringsEqual;
+}
+
+ULONG QCALLTYPE Utf8String::HashCaseInsensitive(LPCUTF8 sz, INT32 stringNumBytes)
+{
+ QCALL_CONTRACT;
+
+ // Important: the string in pSsz isn't null terminated so the length must be used
+ // when performing operations on the string.
+
+ ULONG hashValue = 0;
+
+ BEGIN_QCALL;
+
+ StackSString str(SString::Utf8, sz, stringNumBytes);
+ hashValue = str.HashCaseInsensitive();
+
+ END_QCALL;
+
+ return hashValue;
+}
+
+static BOOL CheckCAVisibilityFromDecoratedType(MethodTable* pCAMT, MethodDesc* pCACtor, MethodTable* pDecoratedMT, Module* pDecoratedModule)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCAMT));
+ PRECONDITION(CheckPointer(pCACtor, NULL_OK));
+ PRECONDITION(CheckPointer(pDecoratedMT, NULL_OK));
+ PRECONDITION(CheckPointer(pDecoratedModule));
+ }
+ CONTRACTL_END;
+
+ DWORD dwAttr = mdPublic;
+
+ if (pCACtor != NULL)
+ {
+ // Allowing a dangerous method to be called in custom attribute instantiation is, well, dangerous.
+ // E.g. a malicious user can craft a custom attribute record that fools us into creating a DynamicMethod
+ // object attached to typeof(System.Reflection.CustomAttribute) and thus gain access to mscorlib internals.
+ if (InvokeUtil::IsDangerousMethod(pCACtor))
+ return FALSE;
+
+ _ASSERTE(pCACtor->IsCtor());
+
+ dwAttr = pCACtor->GetAttrs();
+ }
+
+ StaticAccessCheckContext accessContext(NULL, pDecoratedMT, pDecoratedModule->GetAssembly());
+
+ // Don't do transparency check here. Custom attributes have different transparency rules.
+ // The checks are done by AllowCriticalCustomAttributes and CheckLinktimeDemands in CustomAttribute.cs.
+ return ClassLoader::CanAccess(
+ &accessContext,
+ pCAMT,
+ pCAMT->GetAssembly(),
+ dwAttr,
+ pCACtor,
+ NULL,
+ *AccessCheckOptions::s_pNormalAccessChecks,
+ FALSE,
+ FALSE);
+}
+
+BOOL QCALLTYPE RuntimeMethodHandle::IsCAVisibleFromDecoratedType(
+ EnregisteredTypeHandle targetTypeHandle,
+ MethodDesc * pTargetCtor,
+ EnregisteredTypeHandle sourceTypeHandle,
+ QCall::ModuleHandle sourceModuleHandle)
+{
+ QCALL_CONTRACT;
+
+ BOOL bResult = TRUE;
+
+ BEGIN_QCALL;
+ TypeHandle sourceHandle = TypeHandle::FromPtr(sourceTypeHandle);
+ TypeHandle targetHandle = TypeHandle::FromPtr(targetTypeHandle);
+
+ _ASSERTE((sourceHandle.IsNull() || !sourceHandle.IsTypeDesc()) &&
+ !targetHandle.IsNull() &&
+ !targetHandle.IsTypeDesc());
+
+ if (sourceHandle.IsTypeDesc() ||
+ targetHandle.IsNull() ||
+ targetHandle.IsTypeDesc())
+ COMPlusThrowArgumentNull(NULL, W("Arg_InvalidHandle"));
+
+ bResult = CheckCAVisibilityFromDecoratedType(targetHandle.AsMethodTable(), pTargetCtor, sourceHandle.AsMethodTable(), sourceModuleHandle);
+ END_QCALL;
+
+ return bResult;
+}
+
+// static
+BOOL QCALLTYPE RuntimeMethodHandle::IsSecurityCritical(MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ BOOL fIsCritical = TRUE;
+
+ BEGIN_QCALL;
+
+ if (pMD == NULL)
+ COMPlusThrowArgumentNull(NULL, W("Arg_InvalidHandle"));
+
+ fIsCritical = Security::IsMethodCritical(pMD);
+
+ END_QCALL;
+
+ return fIsCritical;
+}
+
+// static
+BOOL QCALLTYPE RuntimeMethodHandle::IsSecuritySafeCritical(MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ BOOL fIsSafeCritical = TRUE;
+
+ BEGIN_QCALL;
+
+ if (pMD == NULL)
+ COMPlusThrowArgumentNull(NULL, W("Arg_InvalidHandle"));
+
+ fIsSafeCritical = Security::IsMethodSafeCritical(pMD);
+
+ END_QCALL;
+
+ return fIsSafeCritical;
+}
+
+// static
+BOOL QCALLTYPE RuntimeMethodHandle::IsSecurityTransparent(MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ BOOL fIsTransparent = TRUE;
+
+ BEGIN_QCALL;
+
+ if (pMD == NULL)
+ COMPlusThrowArgumentNull(NULL, W("Arg_InvalidHandle"));
+
+ fIsTransparent = Security::IsMethodTransparent(pMD);
+
+ END_QCALL;
+
+ return fIsTransparent;
+}
+
+FCIMPL2(FC_BOOL_RET, RuntimeMethodHandle::IsTokenSecurityTransparent, ReflectModuleBaseObject *pModuleUNSAFE, INT32 tkToken) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ REFLECTMODULEBASEREF refModule = (REFLECTMODULEBASEREF)ObjectToOBJECTREF(pModuleUNSAFE);
+
+ if(refModule == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ Module *pModule = refModule->GetModule();
+
+ BOOL bIsSecurityTransparent = TRUE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refModule);
+ {
+ bIsSecurityTransparent = Security::IsTokenTransparent(pModule, tkToken);
+ }
+ HELPER_METHOD_FRAME_END();
+
+ FC_RETURN_BOOL(bIsSecurityTransparent );
+
+}
+FCIMPLEND
+
+static bool DoAttributeTransparencyChecks(Assembly *pAttributeAssembly, Assembly *pDecoratedAssembly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pAttributeAssembly));
+ PRECONDITION(CheckPointer(pDecoratedAssembly));
+ }
+ CONTRACTL_END;
+
+ // Do transparency checks - if both the decorated assembly and attribute use the v4 security model,
+ // then we can do a direct transparency check. However, if the decorated assembly uses the v2
+ // security model, then we need to convert the security critical attribute to looking as though it
+ // has a LinkDemand for full trust.
+ const SecurityTransparencyBehavior *pTargetTransparency = pDecoratedAssembly->GetSecurityTransparencyBehavior();
+ const SecurityTransparencyBehavior *pAttributeTransparency = pAttributeAssembly->GetSecurityTransparencyBehavior();
+
+ // v2 transparency did not impose checks for using its custom attributes, so if the attribute is
+ // defined in an assembly using the v2 transparency model then we don't need to do any
+ // additional checks.
+ if (pAttributeTransparency->DoAttributesRequireTransparencyChecks())
+ {
+ if (pTargetTransparency->CanTransparentCodeCallLinkDemandMethods() &&
+ pAttributeTransparency->CanCriticalMembersBeConvertedToLinkDemand())
+ {
+ // We have a v4 critical attribute being applied to a v2 transparent target. Since v2
+ // transparency doesn't understand externally visible critical attributes, we convert the
+ // attribute to a LinkDemand for full trust. v2 transparency did not convert
+ // LinkDemands on its attributes into full demands so we do not do that second level of
+ // conversion here either.
+ Security::FullTrustLinkDemand(pDecoratedAssembly);
+ return true;
+ }
+ else
+ {
+ // If we are here either the target of the attribute uses the v4 security model, or the
+ // attribute itself uses the v2 model. In these cases, we cannot perform a conversion of
+ // the critical attribute into a LinkDemand, and we have an error condition.
+ return false;
+ }
+ }
+
+ return true;
+}
+
+FCIMPL3(void, RuntimeMethodHandle::CheckLinktimeDemands, ReflectMethodObject *pMethodUNSAFE, ReflectModuleBaseObject *pModuleUNSAFE, CLR_BOOL isDecoratedTargetSecurityTransparent)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pModuleUNSAFE));
+ PRECONDITION(CheckPointer(pMethodUNSAFE));
+ }
+ CONTRACTL_END;
+
+ REFLECTMETHODREF refMethod = (REFLECTMETHODREF)ObjectToOBJECTREF(pMethodUNSAFE);
+ REFLECTMODULEBASEREF refModule = (REFLECTMODULEBASEREF)ObjectToOBJECTREF(pModuleUNSAFE);
+
+ HELPER_METHOD_FRAME_BEGIN_2(refMethod, refModule);
+ {
+ MethodDesc *pCallee = refMethod->GetMethod(); // pCallee is the CA ctor or CA setter method
+ Module *pDecoratedModule = refModule->GetModule();
+
+ bool isAttributeSecurityCritical = Security::IsMethodCritical(pCallee) &&
+ !Security::IsMethodSafeCritical(pCallee);
+
+ if (isDecoratedTargetSecurityTransparent && isAttributeSecurityCritical)
+ {
+ if (!DoAttributeTransparencyChecks(pCallee->GetAssembly(), pDecoratedModule->GetAssembly()))
+ {
+ SecurityTransparent::ThrowMethodAccessException(pCallee);
+ }
+ }
+
+#ifndef FEATURE_CORECLR
+ if (pCallee->RequiresLinktimeCheck())
+ {
+ Module *pModule = refModule->GetModule();
+ Security::LinktimeCheckMethod(pDecoratedModule->GetAssembly(), pCallee);
+ }
+#endif // !FEATURE_CORECLR
+ }
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+NOINLINE static ReflectClassBaseObject* GetRuntimeTypeHelper(LPVOID __me, TypeHandle typeHandle, OBJECTREF keepAlive)
+{
+ FC_INNER_PROLOG_NO_ME_SETUP();
+ if (typeHandle.AsPtr() == NULL)
+ return NULL;
+
+ // RuntimeTypeHandle::GetRuntimeType has picked off the most common case, but does not cover array types.
+ // Before we do the really heavy weight option of setting up a helper method frame, check if we have to.
+ OBJECTREF refType = typeHandle.GetManagedClassObjectFast();
+ if (refType != NULL)
+ return (ReflectClassBaseObject*)OBJECTREFToObject(refType);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, keepAlive);
+ refType = typeHandle.GetManagedClassObject();
+ HELPER_METHOD_FRAME_END();
+
+ FC_INNER_EPILOG();
+ return (ReflectClassBaseObject*)OBJECTREFToObject(refType);
+}
+
+#define RETURN_CLASS_OBJECT(typeHandle, keepAlive) FC_INNER_RETURN(ReflectClassBaseObject*, GetRuntimeTypeHelper(__me, typeHandle, keepAlive))
+
+NOINLINE ReflectModuleBaseObject* GetRuntimeModuleHelper(LPVOID __me, Module *pModule, OBJECTREF keepAlive)
+{
+ FC_INNER_PROLOG_NO_ME_SETUP();
+ if (pModule == NULL)
+ return NULL;
+
+ DomainFile * pDomainFile = pModule->FindDomainFile(GetAppDomain());
+
+ OBJECTREF refModule = (pDomainFile != NULL) ? pDomainFile->GetExposedModuleObjectIfExists() : NULL;
+
+ if(refModule != NULL)
+ return (ReflectModuleBaseObject*)OBJECTREFToObject(refModule);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, keepAlive);
+ refModule = pModule->GetExposedObject();
+ HELPER_METHOD_FRAME_END();
+
+ FC_INNER_EPILOG();
+ return (ReflectModuleBaseObject*)OBJECTREFToObject(refModule);
+}
+
+NOINLINE AssemblyBaseObject* GetRuntimeAssemblyHelper(LPVOID __me, DomainAssembly *pAssembly, OBJECTREF keepAlive)
+{
+ FC_INNER_PROLOG_NO_ME_SETUP();
+ if (pAssembly == NULL)
+ return NULL;
+
+ OBJECTREF refAssembly = (pAssembly != NULL) ? pAssembly->GetExposedAssemblyObjectIfExists() : NULL;
+
+ if(refAssembly != NULL)
+ return (AssemblyBaseObject*)OBJECTREFToObject(refAssembly);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, keepAlive);
+ refAssembly = pAssembly->GetExposedAssemblyObject();
+ HELPER_METHOD_FRAME_END();
+
+ FC_INNER_EPILOG();
+ return (AssemblyBaseObject*)OBJECTREFToObject(refAssembly);
+}
+
+
+// This is the routine that is called by the 'typeof()' operator in C#. It is one of the most commonly used
+// reflection operations. This call should be optimized away in nearly all situations
+FCIMPL1_V(ReflectClassBaseObject*, RuntimeTypeHandle::GetTypeFromHandle, FCALLRuntimeTypeHandle th)
+{
+ FCALL_CONTRACT;
+
+ FCUnique(0x31);
+ return FCALL_RTH_TO_REFLECTCLASS(th);
+}
+FCIMPLEND
+
+FCIMPL1(ReflectClassBaseObject*, RuntimeTypeHandle::GetRuntimeType, EnregisteredTypeHandle th)
+{
+ FCALL_CONTRACT;
+
+ TypeHandle typeHandle = TypeHandle::FromPtr(th);
+ _ASSERTE(CheckPointer(typeHandle.AsPtr(), NULL_OK));
+ if (typeHandle.AsPtr()!= NULL)
+ {
+ if (!typeHandle.IsTypeDesc())
+ {
+ OBJECTREF typePtr = typeHandle.AsMethodTable()->GetManagedClassObjectIfExists();
+ if (typePtr != NULL)
+ {
+ return (ReflectClassBaseObject*)OBJECTREFToObject(typePtr);
+ }
+ }
+ }
+ else
+ return NULL;
+
+ RETURN_CLASS_OBJECT(typeHandle, NULL);
+}
+FCIMPLEND
+
+FCIMPL1_V(EnregisteredTypeHandle, RuntimeTypeHandle::GetValueInternal, FCALLRuntimeTypeHandle RTH)
+{
+ FCALL_CONTRACT;
+
+ if (FCALL_RTH_TO_REFLECTCLASS(RTH) == NULL)
+ return 0;
+
+ return FCALL_RTH_TO_REFLECTCLASS(RTH) ->GetType().AsPtr();
+}
+FCIMPLEND
+
+// TypeEqualsHelper and TypeNotEqualsHelper are almost identical.
+// Unfortunately we cannot combime them because they need to hardcode the caller's name
+NOINLINE static BOOL TypeEqualSlow(OBJECTREF refL, OBJECTREF refR, LPVOID __me)
+{
+ BOOL ret = FALSE;
+
+ FC_INNER_PROLOG_NO_ME_SETUP();
+
+ _ASSERTE(refL != NULL && refR != NULL);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_2(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, refL, refR);
+
+ MethodDescCallSite TypeEqualsMethod(METHOD__OBJECT__EQUALS, &refL);
+
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(refL),
+ ObjToArgSlot(refR)
+ };
+
+ ret = TypeEqualsMethod.Call_RetBool(args);
+
+ HELPER_METHOD_FRAME_END();
+
+ FC_INNER_EPILOG();
+
+ return ret;
+}
+
+
+
+#include <optsmallperfcritical.h>
+
+FCIMPL2(FC_BOOL_RET, RuntimeTypeHandle::TypeEQ, Object* left, Object* right)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF refL = (OBJECTREF)left;
+ OBJECTREF refR = (OBJECTREF)right;
+
+ if (refL == refR)
+ {
+ FC_RETURN_BOOL(TRUE);
+ }
+
+ if (!refL || !refR)
+ {
+ FC_RETURN_BOOL(FALSE);
+ }
+
+ if ((refL->GetMethodTable() == g_pRuntimeTypeClass || refR->GetMethodTable() == g_pRuntimeTypeClass))
+ {
+ // Quick path for negative common case
+ FC_RETURN_BOOL(FALSE);
+ }
+
+ // The fast path didn't get us the result
+ // Let's try the slow path: refL.Equals(refR);
+ FC_INNER_RETURN(FC_BOOL_RET, (FC_BOOL_RET)(!!TypeEqualSlow(refL, refR, __me)));
+}
+FCIMPLEND
+
+FCIMPL2(FC_BOOL_RET, RuntimeTypeHandle::TypeNEQ, Object* left, Object* right)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF refL = (OBJECTREF)left;
+ OBJECTREF refR = (OBJECTREF)right;
+
+ if (refL == refR)
+ {
+ FC_RETURN_BOOL(FALSE);
+ }
+
+ if (!refL || !refR)
+ {
+ FC_RETURN_BOOL(TRUE);
+ }
+
+ if ((refL->GetMethodTable() == g_pRuntimeTypeClass || refR->GetMethodTable() == g_pRuntimeTypeClass))
+ {
+ // Quick path for negative common case
+ FC_RETURN_BOOL(TRUE);
+ }
+
+ // The fast path didn't get us the result
+ // Let's try the slow path: refL.Equals(refR);
+ FC_INNER_RETURN(FC_BOOL_RET, (FC_BOOL_RET)(!TypeEqualSlow(refL, refR, __me)));
+}
+FCIMPLEND
+
+#include <optdefault.h>
+
+
+
+#ifndef FEATURE_CORECLR
+FCIMPL2(FC_BOOL_RET, RuntimeTypeHandle::IsEquivalentTo, ReflectClassBaseObject *rtType1UNSAFE, ReflectClassBaseObject *rtType2UNSAFE)
+{
+ FCALL_CONTRACT;
+
+ BOOL bResult = FALSE;
+
+ REFLECTCLASSBASEREF rtType1 = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(rtType1UNSAFE);
+ REFLECTCLASSBASEREF rtType2 = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(rtType2UNSAFE);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_2(rtType1, rtType2);
+ if (rtType1 == NULL)
+ COMPlusThrowArgumentNull(W("rtType1"));
+ if (rtType2 == NULL)
+ COMPlusThrowArgumentNull(W("rtType2"));
+
+ bResult = rtType1->GetType().IsEquivalentTo(rtType2->GetType());
+ HELPER_METHOD_FRAME_END();
+
+ FC_RETURN_BOOL(bResult);
+}
+FCIMPLEND
+
+FCIMPL1(FC_BOOL_RET, RuntimeTypeHandle::IsEquivalentType, ReflectClassBaseObject *rtTypeUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ BOOL bResult = FALSE;
+
+ TypeHandle typeHandle = rtTypeUNSAFE->GetType();
+ if (!typeHandle.IsTypeDesc())
+ bResult = typeHandle.AsMethodTable()->GetClass()->IsEquivalentType();
+
+ FC_RETURN_BOOL(bResult);
+}
+FCIMPLEND
+#endif // !FEATURE_CORECLR
+
+#ifdef FEATURE_COMINTEROP
+FCIMPL1(FC_BOOL_RET, RuntimeTypeHandle::IsWindowsRuntimeObjectType, ReflectClassBaseObject *rtTypeUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ BOOL isWindowsRuntimeType = FALSE;
+
+ TypeHandle typeHandle = rtTypeUNSAFE->GetType();
+ MethodTable *pMT = typeHandle.GetMethodTable();
+
+ if (pMT != NULL)
+ {
+ isWindowsRuntimeType = pMT->IsWinRTObjectType();
+ }
+
+ FC_RETURN_BOOL(isWindowsRuntimeType);
+}
+FCIMPLEND
+
+FCIMPL1(FC_BOOL_RET, RuntimeTypeHandle::IsTypeExportedToWindowsRuntime, ReflectClassBaseObject *rtTypeUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ BOOL isExportedToWinRT = FALSE;
+
+ TypeHandle typeHandle = rtTypeUNSAFE->GetType();
+ MethodTable *pMT = typeHandle.GetMethodTable();
+
+ if (pMT != NULL)
+ {
+ isExportedToWinRT = pMT->IsExportedToWinRT();
+ }
+
+ FC_RETURN_BOOL(isExportedToWinRT);
+}
+FCIMPLEND
+#endif // FEATURE_COMINTEROP
+
+NOINLINE static MethodDesc * RestoreMethodHelper(MethodDesc * pMethod, LPVOID __me)
+{
+ FC_INNER_PROLOG_NO_ME_SETUP();
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+ pMethod->CheckRestore();
+ HELPER_METHOD_FRAME_END();
+
+ FC_INNER_EPILOG();
+
+ return pMethod;
+}
+
+FCIMPL1(MethodDesc *, RuntimeTypeHandle::GetFirstIntroducedMethod, ReflectClassBaseObject *pTypeUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pTypeUNSAFE));
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+ TypeHandle typeHandle = refType->GetType();
+
+ if (typeHandle.IsGenericVariable())
+ FCThrowRes(kArgumentException, W("Arg_InvalidHandle"));
+
+ if (typeHandle.IsTypeDesc()) {
+ if (!typeHandle.IsArray())
+ return NULL;
+ }
+
+ MethodTable* pMT = typeHandle.GetMethodTable();
+ if (pMT == NULL)
+ return NULL;
+
+ MethodDesc* pMethod = MethodTable::IntroducedMethodIterator::GetFirst(pMT);
+
+ // The only method that can show up here unrestored is instantiated methods. Check for it before performing the expensive IsRestored() check.
+ if (pMethod != NULL && pMethod->GetClassification() == mcInstantiated && !pMethod->IsRestored()) {
+ FC_INNER_RETURN(MethodDesc *, RestoreMethodHelper(pMethod, __me));
+ }
+
+ _ASSERTE(pMethod == NULL || pMethod->IsRestored());
+ return pMethod;
+}
+FCIMPLEND
+
+#include <optsmallperfcritical.h>
+FCIMPL1(void, RuntimeTypeHandle::GetNextIntroducedMethod, MethodDesc ** ppMethod) {
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(ppMethod));
+ PRECONDITION(CheckPointer(*ppMethod));
+ }
+ CONTRACTL_END;
+
+ MethodDesc *pMethod = MethodTable::IntroducedMethodIterator::GetNext(*ppMethod);
+
+ *ppMethod = pMethod;
+
+ if (pMethod != NULL && pMethod->GetClassification() == mcInstantiated && !pMethod->IsRestored()) {
+ FC_INNER_RETURN_VOID(RestoreMethodHelper(pMethod, __me));
+ }
+
+ _ASSERTE(pMethod == NULL || pMethod->IsRestored());
+}
+FCIMPLEND
+#include <optdefault.h>
+
+FCIMPL1(INT32, RuntimeTypeHandle::GetCorElementType, ReflectClassBaseObject *pTypeUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ return refType->GetType().GetSignatureCorElementType();
+}
+FCIMPLEND
+
+FCIMPL1(AssemblyBaseObject*, RuntimeTypeHandle::GetAssembly, ReflectClassBaseObject *pTypeUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ DomainFile *pDomainFile = NULL;
+
+ Module *pModule = refType->GetType().GetAssembly()->GetManifestModule();
+
+ pDomainFile = pModule->FindDomainFile(GetAppDomain());
+#ifdef FEATURE_LOADER_OPTIMIZATION
+ if (pDomainFile == NULL)
+ {
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refType);
+
+ pDomainFile = GetAppDomain()->LoadDomainNeutralModuleDependency(pModule, FILE_LOADED);
+
+ HELPER_METHOD_FRAME_END();
+ }
+#endif // FEATURE_LOADER_OPTIMIZATION
+
+
+ FC_RETURN_ASSEMBLY_OBJECT((DomainAssembly *)pDomainFile, refType);
+}
+FCIMPLEND
+
+
+FCIMPL1(FC_BOOL_RET, RuntimeFieldHandle::AcquiresContextFromThis, FieldDesc *pField)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pField));
+ }
+ CONTRACTL_END;
+
+ FC_RETURN_BOOL(pField->IsSharedByGenericInstantiations());
+
+}
+FCIMPLEND
+
+// static
+BOOL QCALLTYPE RuntimeFieldHandle::IsSecurityCritical(FieldDesc *pFD)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(pFD));
+ }
+ CONTRACTL_END;
+
+ BOOL fIsCritical = FALSE;
+
+ BEGIN_QCALL;
+
+ fIsCritical = Security::IsFieldCritical(pFD);
+
+ END_QCALL;
+
+ return fIsCritical;
+}
+
+// static
+BOOL QCALLTYPE RuntimeFieldHandle::IsSecuritySafeCritical(FieldDesc *pFD)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(pFD));
+ }
+ CONTRACTL_END;
+
+ BOOL fIsSafeCritical = FALSE;
+
+ BEGIN_QCALL;
+
+ fIsSafeCritical = Security::IsFieldSafeCritical(pFD);
+
+ END_QCALL;
+
+ return fIsSafeCritical;
+}
+
+// static
+BOOL QCALLTYPE RuntimeFieldHandle::IsSecurityTransparent(FieldDesc *pFD)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(pFD));
+ }
+ CONTRACTL_END;
+
+ BOOL fIsTransparent = FALSE;
+
+ BEGIN_QCALL;
+
+ fIsTransparent = Security::IsFieldTransparent(pFD);
+
+ END_QCALL;
+
+ return fIsTransparent;
+}
+
+// static
+void QCALLTYPE RuntimeFieldHandle::CheckAttributeAccess(FieldDesc *pFD, QCall::ModuleHandle pModule)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(pFD));
+ PRECONDITION(CheckPointer(pModule.m_pModule));
+ }
+ CONTRACTL_END;
+
+ BEGIN_QCALL;
+
+ if (Security::IsFieldCritical(pFD) && !Security::IsFieldSafeCritical(pFD))
+ {
+ GCX_COOP();
+
+ if (!DoAttributeTransparencyChecks(pFD->GetModule()->GetAssembly(), pModule->GetAssembly()))
+ {
+ ThrowFieldAccessException(NULL, pFD, TRUE, IDS_E_CRITICAL_FIELD_ACCESS_DENIED);
+ }
+ }
+
+ END_QCALL;
+}
+
+FCIMPL1(ReflectModuleBaseObject*, RuntimeTypeHandle::GetModule, ReflectClassBaseObject *pTypeUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ Module *result;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrow(kStackOverflowException));
+
+ result = refType->GetType().GetModule();
+
+ END_SO_INTOLERANT_CODE;
+
+ FC_RETURN_MODULE_OBJECT(result, refType);
+}
+FCIMPLEND
+
+FCIMPL1(ReflectClassBaseObject *, RuntimeTypeHandle::GetBaseType, ReflectClassBaseObject *pTypeUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ TypeHandle typeHandle = refType->GetType();
+
+ if (typeHandle.IsGenericVariable())
+ FCThrowRes(kArgumentException, W("Arg_InvalidHandle"));
+
+ if (typeHandle.IsTypeDesc()) {
+ if (!typeHandle.IsArray())
+ return NULL;
+ }
+
+ RETURN_CLASS_OBJECT(typeHandle.GetParent(), refType);
+}
+FCIMPLEND
+
+FCIMPL1(ReflectClassBaseObject *, RuntimeTypeHandle::GetElementType, ReflectClassBaseObject *pTypeUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ TypeHandle typeHandle = refType->GetType();
+
+ if (!typeHandle.IsTypeDesc())
+ return 0;
+
+ if (typeHandle.IsGenericVariable())
+ return 0;
+
+ TypeHandle typeReturn;
+
+ if (typeHandle.IsArray())
+ typeReturn = typeHandle.AsArray()->GetArrayElementTypeHandle();
+ else
+ typeReturn = typeHandle.AsTypeDesc()->GetTypeParam();
+
+ RETURN_CLASS_OBJECT(typeReturn, refType);
+}
+FCIMPLEND
+
+FCIMPL1(INT32, RuntimeTypeHandle::GetArrayRank, ReflectClassBaseObject *pTypeUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pTypeUNSAFE));
+ PRECONDITION(pTypeUNSAFE->GetType().IsArray());
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ return (INT32)refType->GetType().AsArray()->GetRank();
+}
+FCIMPLEND
+
+FCIMPL1(INT32, RuntimeTypeHandle::GetNumVirtuals, ReflectClassBaseObject *pTypeUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ TypeHandle typeHandle = refType->GetType();
+
+ if (typeHandle.IsGenericVariable())
+ FCThrowRes(kArgumentException, W("Arg_InvalidHandle"));
+
+ MethodTable *pMT = typeHandle.GetMethodTable();
+
+ if (pMT)
+ return (INT32)pMT->GetNumVirtuals();
+ else
+ return 0; //REVIEW: should this return the number of methods in Object?
+}
+FCIMPLEND
+
+FCIMPL2(MethodDesc *, RuntimeTypeHandle::GetMethodAt, ReflectClassBaseObject *pTypeUNSAFE, INT32 slot) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ TypeHandle typeHandle = refType->GetType();
+
+ MethodDesc* pRetMethod = NULL;
+
+ if (typeHandle.IsGenericVariable())
+ FCThrowRes(kArgumentException, W("Arg_InvalidHandle"));
+
+ if (slot < 0 || slot >= (INT32)typeHandle.GetMethodTable()->GetNumVirtuals())
+ FCThrowRes(kArgumentException, W("Arg_ArgumentOutOfRangeException"));
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refType);
+ pRetMethod = typeHandle.GetMethodTable()->GetMethodDescForSlot((DWORD)slot);
+ HELPER_METHOD_FRAME_END();
+
+ return pRetMethod;
+}
+
+FCIMPLEND
+
+FCIMPL3(FC_BOOL_RET, RuntimeTypeHandle::GetFields, ReflectClassBaseObject *pTypeUNSAFE, INT32 **result, INT32 *pCount) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ TypeHandle typeHandle = refType->GetType();
+
+ if (!pCount || !result)
+ FCThrow(kArgumentNullException);
+
+ if (typeHandle.IsGenericVariable())
+ FCThrowRes(kArgumentException, W("Arg_InvalidHandle"));
+
+ if (typeHandle.IsTypeDesc()) {
+ *pCount = 0;
+ FC_RETURN_BOOL(TRUE);
+ }
+
+ MethodTable *pMT= typeHandle.GetMethodTable();
+ if (!pMT)
+ FCThrowRes(kArgumentException, W("Arg_InvalidHandle"));
+
+ BOOL retVal = FALSE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refType);
+ // <TODO>Check this approximation - we may be losing exact type information </TODO>
+ ApproxFieldDescIterator fdIterator(pMT, ApproxFieldDescIterator::ALL_FIELDS);
+ INT32 count = (INT32)fdIterator.Count();
+
+ if (count > *pCount)
+ {
+ *pCount = count;
+ }
+ else
+ {
+ for(INT32 i = 0; i < count; i ++)
+ result[i] = (INT32*)fdIterator.Next();
+
+ *pCount = count;
+ retVal = TRUE;
+ }
+ HELPER_METHOD_FRAME_END();
+ FC_RETURN_BOOL(retVal);
+}
+FCIMPLEND
+
+void QCALLTYPE RuntimeMethodHandle::ConstructInstantiation(MethodDesc * pMethod, DWORD format, QCall::StringHandleOnStack retString)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ StackSString ss;
+ TypeString::AppendInst(ss, pMethod->LoadMethodInstantiation(), format);
+ retString.Set(ss);
+
+ END_QCALL;
+}
+
+void QCALLTYPE RuntimeTypeHandle::ConstructName(EnregisteredTypeHandle pTypeHandle, DWORD format, QCall::StringHandleOnStack retString)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ StackSString ss;
+ TypeString::AppendType(ss, TypeHandle::FromPtr(pTypeHandle), format);
+ retString.Set(ss);
+
+ END_QCALL;
+}
+
+PTRARRAYREF CopyRuntimeTypeHandles(TypeHandle * prgTH, FixupPointer<TypeHandle> * prgTH2, INT32 numTypeHandles, BinderClassID arrayElemType)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ PTRARRAYREF refReturn = NULL;
+ PTRARRAYREF refArray = NULL;
+
+ if (numTypeHandles == 0)
+ return NULL;
+
+ _ASSERTE((prgTH != NULL) || (prgTH2 != NULL));
+ if (prgTH != NULL)
+ {
+ _ASSERTE(prgTH2 == NULL);
+ }
+
+ GCPROTECT_BEGIN(refArray);
+ TypeHandle thRuntimeType = TypeHandle(MscorlibBinder::GetClass(arrayElemType));
+ TypeHandle arrayHandle = ClassLoader::LoadArrayTypeThrowing(thRuntimeType, ELEMENT_TYPE_SZARRAY);
+ refArray = (PTRARRAYREF)AllocateArrayEx(arrayHandle, &numTypeHandles, 1);
+
+ for (INT32 i = 0; i < numTypeHandles; i++)
+ {
+ TypeHandle th;
+
+ if (prgTH != NULL)
+ th = prgTH[i];
+ else
+ th = prgTH2[i].GetValue();
+
+ OBJECTREF refType = th.GetManagedClassObject();
+ refArray->SetAt(i, refType);
+ }
+
+ refReturn = refArray;
+ GCPROTECT_END();
+
+ return refReturn;
+}
+
+void QCALLTYPE RuntimeTypeHandle::GetConstraints(EnregisteredTypeHandle pTypeHandle, QCall::ObjectHandleOnStack retTypeArray)
+{
+ QCALL_CONTRACT;
+
+ TypeHandle* constraints = NULL;
+
+ BEGIN_QCALL;
+
+ TypeHandle typeHandle = TypeHandle::FromPtr(pTypeHandle);
+
+ if (!typeHandle.IsGenericVariable())
+ COMPlusThrow(kArgumentException, W("Arg_InvalidHandle"));
+
+ TypeVarTypeDesc* pGenericVariable = typeHandle.AsGenericVariable();
+
+ DWORD dwCount;
+ constraints = pGenericVariable->GetConstraints(&dwCount);
+
+ GCX_COOP();
+ retTypeArray.Set(CopyRuntimeTypeHandles(constraints, NULL, dwCount, CLASS__TYPE));
+
+ END_QCALL;
+
+ return;
+}
+
+FCIMPL1(PtrArray*, RuntimeTypeHandle::GetInterfaces, ReflectClassBaseObject *pTypeUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ TypeHandle typeHandle = refType->GetType();
+
+ if (typeHandle.IsGenericVariable())
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ INT32 ifaceCount = 0;
+
+ PTRARRAYREF refRetVal = NULL;
+ HELPER_METHOD_FRAME_BEGIN_RET_2(refRetVal, refType);
+ {
+ if (typeHandle.IsTypeDesc())
+ {
+ if (typeHandle.IsArray())
+ {
+ ifaceCount = typeHandle.GetMethodTable()->GetNumInterfaces();
+ }
+ else
+ {
+ ifaceCount = 0;
+ }
+ }
+ else
+ {
+ ifaceCount = typeHandle.GetMethodTable()->GetNumInterfaces();
+ }
+
+ // Allocate the array
+ if (ifaceCount > 0)
+ {
+ TypeHandle arrayHandle = ClassLoader::LoadArrayTypeThrowing(TypeHandle(g_pRuntimeTypeClass), ELEMENT_TYPE_SZARRAY);
+ refRetVal = (PTRARRAYREF)AllocateArrayEx(arrayHandle, &ifaceCount, 1);
+
+ // populate type array
+ UINT i = 0;
+
+ MethodTable::InterfaceMapIterator it = typeHandle.GetMethodTable()->IterateInterfaceMap();
+ while (it.Next())
+ {
+ OBJECTREF refInterface = it.GetInterface()->GetManagedClassObject();
+ refRetVal->SetAt(i, refInterface);
+ _ASSERTE(refRetVal->GetAt(i) != NULL);
+ i++;
+ }
+ }
+ }
+ HELPER_METHOD_FRAME_END();
+
+ return (PtrArray*)OBJECTREFToObject(refRetVal);
+}
+FCIMPLEND
+
+FCIMPL1(INT32, RuntimeTypeHandle::GetAttributes, ReflectClassBaseObject *pTypeUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ TypeHandle typeHandle = refType->GetType();
+
+ if (typeHandle.IsTypeDesc()) {
+
+ if (typeHandle.IsGenericVariable()) {
+ return tdPublic;
+ }
+
+ if (!typeHandle.IsArray())
+ return 0;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ // __ComObject types are always public.
+ if (IsComObjectClass(typeHandle))
+ return (typeHandle.GetMethodTable()->GetAttrClass() & tdVisibilityMask) | tdPublic;
+#endif // FEATURE_COMINTEROP
+
+ INT32 ret = 0;
+
+ ret = (INT32)typeHandle.GetMethodTable()->GetAttrClass();
+ return ret;
+}
+FCIMPLEND
+
+#ifdef FEATURE_REMOTING
+FCIMPL1(FC_BOOL_RET, RuntimeTypeHandle::IsContextful, ReflectClassBaseObject *pTypeUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ TypeHandle typeHandle = refType->GetType();
+
+ if (typeHandle.IsTypeDesc())
+ FC_RETURN_BOOL(FALSE);
+
+ MethodTable* pMT= typeHandle.GetMethodTable();
+
+ if (!pMT)
+ FCThrowRes(kArgumentException, W("Arg_InvalidHandle"));
+
+ FC_RETURN_BOOL(pMT->IsContextful());
+}
+FCIMPLEND
+#endif
+
+FCIMPL1(FC_BOOL_RET, RuntimeTypeHandle::IsValueType, ReflectClassBaseObject *pTypeUNSAFE)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ _ASSERTE(refType != NULL);
+
+ TypeHandle typeHandle = refType->GetType();
+
+ FC_RETURN_BOOL(typeHandle.IsValueType());
+}
+FCIMPLEND;
+
+FCIMPL1(FC_BOOL_RET, RuntimeTypeHandle::IsInterface, ReflectClassBaseObject *pTypeUNSAFE)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ _ASSERTE(refType != NULL);
+
+ TypeHandle typeHandle = refType->GetType();
+
+ FC_RETURN_BOOL(typeHandle.IsInterface());
+}
+FCIMPLEND;
+
+BOOL
+QCALLTYPE
+RuntimeTypeHandle::IsVisible(
+ EnregisteredTypeHandle pTypeHandle)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ BOOL fIsExternallyVisible = FALSE;
+
+ BEGIN_QCALL;
+
+ TypeHandle typeHandle = TypeHandle::FromPtr(pTypeHandle);
+
+ _ASSERTE(!typeHandle.IsNull());
+
+ fIsExternallyVisible = typeHandle.IsExternallyVisible();
+
+ END_QCALL;
+
+ return fIsExternallyVisible;
+} // RuntimeTypeHandle::IsVisible
+
+// static
+BOOL QCALLTYPE RuntimeTypeHandle::IsSecurityCritical(EnregisteredTypeHandle pTypeHandle)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(pTypeHandle));
+ }
+ CONTRACTL_END;
+
+ BOOL fIsCritical = FALSE;
+
+ BEGIN_QCALL;
+
+ MethodTable *pMT = TypeHandle::FromPtr(pTypeHandle).GetMethodTable();
+ if (pMT != NULL)
+ {
+ fIsCritical = Security::IsTypeCritical(pMT);
+ }
+
+ END_QCALL;
+
+ return fIsCritical;
+}
+
+// static
+BOOL QCALLTYPE RuntimeTypeHandle::IsSecuritySafeCritical(EnregisteredTypeHandle pTypeHandle)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(pTypeHandle));
+ }
+ CONTRACTL_END;
+
+ BOOL fIsSafeCritical = FALSE;
+
+ BEGIN_QCALL;
+
+ MethodTable *pMT = TypeHandle::FromPtr(pTypeHandle).GetMethodTable();
+ if (pMT != NULL)
+ {
+ fIsSafeCritical = Security::IsTypeSafeCritical(pMT);
+ }
+
+ END_QCALL;
+
+ return fIsSafeCritical;
+}
+
+// static
+BOOL QCALLTYPE RuntimeTypeHandle::IsSecurityTransparent(EnregisteredTypeHandle pTypeHandle)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(pTypeHandle));
+ }
+ CONTRACTL_END;
+
+ BOOL fIsTransparent = TRUE;
+
+ BEGIN_QCALL;
+
+ MethodTable * pMT = TypeHandle::FromPtr(pTypeHandle).GetMethodTable();
+ if (pMT != NULL)
+ {
+ fIsTransparent = Security::IsTypeTransparent(pMT);
+ }
+
+ END_QCALL;
+
+ return fIsTransparent;
+}
+
+FCIMPL1(FC_BOOL_RET, RuntimeTypeHandle::HasProxyAttribute, ReflectClassBaseObject *pTypeUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ TypeHandle typeHandle = refType->GetType();
+
+ // TODO: Justify this
+ if (typeHandle.IsGenericVariable())
+ FC_RETURN_BOOL(FALSE);
+
+ if (typeHandle.IsTypeDesc()) {
+ if (!typeHandle.IsArray())
+ FC_RETURN_BOOL(FALSE);
+ }
+
+ MethodTable* pMT= typeHandle.GetMethodTable();
+
+ if (!pMT)
+ FCThrowRes(kArgumentException, W("Arg_InvalidHandle"));
+
+ FC_RETURN_BOOL(pMT->GetClass()->HasRemotingProxyAttribute());
+}
+FCIMPLEND
+
+FCIMPL2(FC_BOOL_RET, RuntimeTypeHandle::IsComObject, ReflectClassBaseObject *pTypeUNSAFE, CLR_BOOL isGenericCOM) {
+#ifdef FEATURE_COMINTEROP
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ BOOL ret = FALSE;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ TypeHandle typeHandle = refType->GetType();
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refType);
+ {
+ if (isGenericCOM)
+ ret = IsComObjectClass(typeHandle);
+ else
+ ret = IsComWrapperClass(typeHandle);
+ }
+ HELPER_METHOD_FRAME_END();
+
+ FC_RETURN_BOOL(ret);
+#else
+ CONTRACTL {
+ DISABLED(NOTHROW);
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pTypeUNSAFE));
+ }
+ CONTRACTL_END;
+ FCUnique(0x37);
+ FC_RETURN_BOOL(FALSE);
+#endif
+}
+FCIMPLEND
+
+FCIMPL1(LPCUTF8, RuntimeTypeHandle::GetUtf8Name, ReflectClassBaseObject* pTypeUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ TypeHandle typeHandle = refType->GetType();
+ INT32 tkTypeDef = mdTypeDefNil;
+ LPCUTF8 szName = NULL;
+
+ if (typeHandle.IsGenericVariable())
+ FCThrowRes(kArgumentException, W("Arg_InvalidHandle"));
+
+ if (typeHandle.IsTypeDesc())
+ FCThrowRes(kArgumentException, W("Arg_InvalidHandle"));
+
+ MethodTable* pMT= typeHandle.GetMethodTable();
+
+ if (pMT == NULL)
+ FCThrowRes(kArgumentException, W("Arg_InvalidHandle"));
+
+ tkTypeDef = (INT32)pMT->GetCl();
+
+ if (IsNilToken(tkTypeDef))
+ FCThrowRes(kArgumentException, W("Arg_InvalidHandle"));
+
+ if (FAILED(pMT->GetMDImport()->GetNameOfTypeDef(tkTypeDef, &szName, NULL)))
+ {
+ FCThrowRes(kArgumentException, W("Arg_InvalidHandle"));
+ }
+
+ _ASSERTE(CheckPointer(szName, NULL_OK));
+
+ return szName;
+}
+FCIMPLEND
+
+FCIMPL1(INT32, RuntimeTypeHandle::GetToken, ReflectClassBaseObject *pTypeUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ TypeHandle typeHandle = refType->GetType();
+
+ if (typeHandle.IsTypeDesc())
+ {
+ if (typeHandle.IsGenericVariable())
+ {
+ INT32 tkTypeDef = typeHandle.AsGenericVariable()->GetToken();
+
+ _ASSERTE(!IsNilToken(tkTypeDef) && TypeFromToken(tkTypeDef) == mdtGenericParam);
+
+ return tkTypeDef;
+ }
+
+ return mdTypeDefNil;
+ }
+
+ return (INT32)typeHandle.AsMethodTable()->GetCl();
+}
+FCIMPLEND
+
+PVOID QCALLTYPE RuntimeTypeHandle::GetGCHandle(EnregisteredTypeHandle pTypeHandle, INT32 handleType)
+{
+ QCALL_CONTRACT;
+
+ OBJECTHANDLE objHandle = NULL;
+
+ BEGIN_QCALL;
+
+ GCX_COOP();
+
+ TypeHandle th = TypeHandle::FromPtr(pTypeHandle);
+ objHandle = th.GetDomain()->CreateTypedHandle(NULL, handleType);
+ th.GetLoaderAllocator()->RegisterHandleForCleanup(objHandle);
+
+ END_QCALL;
+
+ return objHandle;
+}
+
+void QCALLTYPE RuntimeTypeHandle::VerifyInterfaceIsImplemented(EnregisteredTypeHandle pTypeHandle, EnregisteredTypeHandle pIFaceHandle)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ TypeHandle typeHandle = TypeHandle::FromPtr(pTypeHandle);
+ TypeHandle ifaceHandle = TypeHandle::FromPtr(pIFaceHandle);
+
+ if (typeHandle.IsGenericVariable())
+ COMPlusThrow(kArgumentException, W("Arg_InvalidHandle"));
+
+ if (typeHandle.IsTypeDesc()) {
+ if (!typeHandle.IsArray())
+ COMPlusThrow(kArgumentException, W("Arg_NotFoundIFace"));
+ }
+
+ if (typeHandle.IsInterface())
+ COMPlusThrow(kArgumentException, W("Argument_InterfaceMap"));
+
+ if (!ifaceHandle.IsInterface())
+ COMPlusThrow(kArgumentException, W("Arg_MustBeInterface"));
+
+ // First try the cheap check, which amounts to iterating the interface map looking for
+ // the ifaceHandle MethodTable.
+ if (!typeHandle.GetMethodTable()->ImplementsInterface(ifaceHandle.AsMethodTable()))
+ { // If the cheap check fails, try the more expensive but complete check.
+ if (!typeHandle.CanCastTo(ifaceHandle))
+ { // If the complete check fails, we're certain that this type
+ // does not implement the interface specified.
+ COMPlusThrow(kArgumentException, W("Arg_NotFoundIFace"));
+ }
+ }
+
+ END_QCALL;
+}
+
+INT32 QCALLTYPE RuntimeTypeHandle::GetInterfaceMethodImplementationSlot(EnregisteredTypeHandle pTypeHandle, EnregisteredTypeHandle pOwner, MethodDesc * pMD)
+{
+ QCALL_CONTRACT;
+
+ INT32 slotNumber = -1;
+
+ BEGIN_QCALL;
+
+ TypeHandle typeHandle = TypeHandle::FromPtr(pTypeHandle);
+ TypeHandle thOwnerOfMD = TypeHandle::FromPtr(pOwner);
+
+ // Ok to have INVALID_SLOT in the case where abstract class does not implement an interface method.
+ // This case can not be reproed using C# "implements" all interface methods
+ // with at least an abstract method. b19897_GetInterfaceMap_Abstract.exe tests this case.
+ //@TODO:STUBDISPATCH: Don't need to track down the implementation, just the declaration, and this can
+ //@TODO: be done faster - just need to make a function FindDispatchDecl.
+ DispatchSlot slot(typeHandle.GetMethodTable()->FindDispatchSlotForInterfaceMD(thOwnerOfMD, pMD));
+ if (!slot.IsNull())
+ slotNumber = slot.GetMethodDesc()->GetSlot();
+
+ END_QCALL;
+
+ return slotNumber;
+ }
+
+void QCALLTYPE RuntimeTypeHandle::GetDefaultConstructor(EnregisteredTypeHandle pTypeHandle, QCall::ObjectHandleOnStack retMethod)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ MethodDesc* pCtor = NULL;
+
+ TypeHandle typeHandle = TypeHandle::FromPtr(pTypeHandle);
+
+ if (!typeHandle.IsTypeDesc())
+ {
+ MethodTable* pMethodTable = typeHandle.AsMethodTable();
+ if (pMethodTable->HasDefaultConstructor())
+ pCtor = pMethodTable->GetDefaultConstructor();
+ }
+
+ if (pCtor != NULL)
+ {
+ GCX_COOP();
+ retMethod.Set(pCtor->GetStubMethodInfo());
+ }
+ END_QCALL;
+
+ return;
+}
+
+FCIMPL1(ReflectMethodObject*, RuntimeTypeHandle::GetDeclaringMethod, ReflectClassBaseObject *pTypeUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ TypeHandle typeHandle = refType->GetType();;
+
+ if (!typeHandle.IsTypeDesc())
+ return NULL;
+
+ TypeVarTypeDesc* pGenericVariable = typeHandle.AsGenericVariable();
+ mdToken defToken = pGenericVariable->GetTypeOrMethodDef();
+ if (TypeFromToken(defToken) != mdtMethodDef)
+ return NULL;
+
+ REFLECTMETHODREF pRet = NULL;
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+ MethodDesc * pMD = pGenericVariable->LoadOwnerMethod();
+ pMD->CheckRestore();
+ pRet = pMD->GetStubMethodInfo();
+ HELPER_METHOD_FRAME_END();
+
+ return (ReflectMethodObject*)OBJECTREFToObject(pRet);
+}
+FCIMPLEND
+
+FCIMPL1(ReflectClassBaseObject*, RuntimeTypeHandle::GetDeclaringType, ReflectClassBaseObject *pTypeUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ TypeHandle retTypeHandle;
+
+ BOOL fThrowException = FALSE;
+ LPCWSTR argName = W("Arg_InvalidHandle");
+ RuntimeExceptionKind reKind = kArgumentNullException;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ TypeHandle typeHandle = refType->GetType();
+
+ MethodTable* pMT = NULL;
+ mdTypeDef tkTypeDef = mdTokenNil;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrow(kStackOverflowException));
+ if (typeHandle.IsTypeDesc()) {
+
+ if (typeHandle.IsGenericVariable()) {
+ TypeVarTypeDesc* pGenericVariable = typeHandle.AsGenericVariable();
+ mdToken defToken = pGenericVariable->GetTypeOrMethodDef();
+
+ // Try the fast way first (if the declaring type has been loaded already).
+ if (TypeFromToken(defToken) == mdtMethodDef)
+ {
+ MethodDesc * retMethod = pGenericVariable->GetModule()->LookupMethodDef(defToken);
+ if (retMethod != NULL)
+ retTypeHandle = retMethod->GetMethodTable();
+ }
+ else
+ {
+ retTypeHandle = pGenericVariable->GetModule()->LookupTypeDef(defToken);
+ }
+
+ if (!retTypeHandle.IsNull() && retTypeHandle.IsFullyLoaded())
+ goto Exit;
+
+ // OK, need to go the slow way and load the type first.
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refType);
+ {
+ if (TypeFromToken(defToken) == mdtMethodDef)
+ {
+ retTypeHandle = pGenericVariable->LoadOwnerMethod()->GetMethodTable();
+ }
+ else
+ {
+ retTypeHandle = pGenericVariable->LoadOwnerType();
+ }
+ retTypeHandle.CheckRestore();
+ }
+ HELPER_METHOD_FRAME_END();
+ goto Exit;
+ }
+ if (!typeHandle.IsArray())
+ {
+ retTypeHandle = TypeHandle();
+ goto Exit;
+ }
+ }
+
+ pMT = typeHandle.GetMethodTable();
+
+ if (pMT == NULL)
+ {
+ fThrowException = TRUE;
+ goto Exit;
+ }
+
+ if(!pMT->GetClass()->IsNested())
+ {
+ retTypeHandle = TypeHandle();
+ goto Exit;
+ }
+
+ tkTypeDef = pMT->GetCl();
+
+ if (FAILED(typeHandle.GetModule()->GetMDImport()->GetNestedClassProps(tkTypeDef, &tkTypeDef)))
+ {
+ fThrowException = TRUE;
+ reKind = kBadImageFormatException;
+ argName = NULL;
+ goto Exit;
+ }
+
+ // Try the fast way first (if the declaring type has been loaded already).
+ retTypeHandle = typeHandle.GetModule()->LookupTypeDef(tkTypeDef);
+ if (retTypeHandle.IsNull())
+ {
+ // OK, need to go the slow way and load the type first.
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refType);
+ {
+ retTypeHandle = ClassLoader::LoadTypeDefThrowing(typeHandle.GetModule(), tkTypeDef,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef);
+ }
+ HELPER_METHOD_FRAME_END();
+ }
+Exit:
+
+ END_SO_INTOLERANT_CODE;
+
+ if (fThrowException)
+ {
+ FCThrowRes(reKind, argName);
+ }
+
+ RETURN_CLASS_OBJECT(retTypeHandle, refType);
+ }
+FCIMPLEND
+
+FCIMPL2(FC_BOOL_RET, RuntimeTypeHandle::CanCastTo, ReflectClassBaseObject *pTypeUNSAFE, ReflectClassBaseObject *pTargetUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+ REFLECTCLASSBASEREF refTarget = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTargetUNSAFE);
+
+ if ((refType == NULL) || (refTarget == NULL))
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ TypeHandle fromHandle = refType->GetType();
+ TypeHandle toHandle = refTarget->GetType();
+
+ BOOL iRetVal = 0;
+
+ TypeHandle::CastResult r = fromHandle.CanCastToNoGC(toHandle);
+ if (r == TypeHandle::MaybeCast)
+ {
+ HELPER_METHOD_FRAME_BEGIN_RET_2(refType, refTarget);
+ iRetVal = fromHandle.CanCastTo(toHandle);
+ HELPER_METHOD_FRAME_END();
+ }
+ else
+ {
+ iRetVal = (r == TypeHandle::CanCast);
+ }
+
+ // We allow T to be cast to Nullable<T>
+ if (!iRetVal && Nullable::IsNullableType(toHandle) && !fromHandle.IsTypeDesc())
+ {
+ HELPER_METHOD_FRAME_BEGIN_RET_2(refType, refTarget);
+ if (Nullable::IsNullableForType(toHandle, fromHandle.AsMethodTable()))
+ {
+ iRetVal = TRUE;
+ }
+ HELPER_METHOD_FRAME_END();
+ }
+
+ FC_RETURN_BOOL(iRetVal);
+}
+FCIMPLEND
+
+void QCALLTYPE RuntimeTypeHandle::GetTypeByNameUsingCARules(LPCWSTR pwzClassName, QCall::ModuleHandle pModule, QCall::ObjectHandleOnStack retType)
+{
+ QCALL_CONTRACT;
+
+ TypeHandle typeHandle;
+
+ BEGIN_QCALL;
+
+ if (!pwzClassName)
+ COMPlusThrowArgumentNull(W("className"),W("ArgumentNull_String"));
+
+ typeHandle = TypeName::GetTypeUsingCASearchRules(pwzClassName, pModule->GetAssembly());
+
+ GCX_COOP();
+ retType.Set(typeHandle.GetManagedClassObject());
+
+ END_QCALL;
+
+ return;
+}
+
+void QCALLTYPE RuntimeTypeHandle::GetTypeByName(LPCWSTR pwzClassName, BOOL bThrowOnError, BOOL bIgnoreCase, BOOL bReflectionOnly,
+ QCall::StackCrawlMarkHandle pStackMark,
+#ifdef FEATURE_HOSTED_BINDER
+ ICLRPrivBinder * pPrivHostBinder,
+#endif
+ BOOL bLoadTypeFromPartialNameHack, QCall::ObjectHandleOnStack retType)
+{
+ QCALL_CONTRACT;
+
+ TypeHandle typeHandle;
+
+ BEGIN_QCALL;
+
+ if (!pwzClassName)
+ COMPlusThrowArgumentNull(W("className"),W("ArgumentNull_String"));
+
+ GCX_COOP();
+ {
+ OBJECTREF keepAlive = NULL;
+
+ // BEGIN_QCALL/END_QCALL define try/catch scopes for potential exceptions thrown when bThrowOnError is enabled.
+ // Originally, in case of an exception the GCFrame was removed from the Thread’s Frame chain in the catch block, in UnwindAndContinueRethrowHelperInsideCatch.
+ // However, the catch block declared some local variables that overlapped the location of the now out of scope GCFrame and OBJECTREF, therefore corrupting
+ // those values. Having the GCX_COOP/GCX_PREEMP switching GC modes, allowed a situation where in case of an exception, the thread would wait for a GC to complete
+ // while still having the GCFrame in the Thread’s Frame chain, but with a corrupt OBJECTREF due to stack location reuse in the catch block.
+ // The solution is to force the removal of GCFrame (and the Frames above) from the Thread’s Frame chain before entering the catch block, at the time of
+ // FrameWithCookieHolder's destruction.
+ GCPROTECT_HOLDER(keepAlive);
+
+ {
+ GCX_PREEMP();
+ typeHandle = TypeName::GetTypeManaged(pwzClassName, NULL, bThrowOnError, bIgnoreCase, bReflectionOnly, /*bProhibitAsmQualifiedName =*/ FALSE, pStackMark, bLoadTypeFromPartialNameHack, &keepAlive
+#ifdef FEATURE_HOSTED_BINDER
+ , pPrivHostBinder
+#endif
+ );
+ }
+
+ if (!typeHandle.IsNull())
+ {
+ retType.Set(typeHandle.GetManagedClassObject());
+ }
+ }
+
+ END_QCALL;
+
+ return;
+}
+
+FCIMPL6(FC_BOOL_RET, RuntimeTypeHandle::SatisfiesConstraints, PTR_ReflectClassBaseObject pParamTypeUNSAFE, TypeHandle *typeContextArgs, INT32 typeContextCount, TypeHandle *methodContextArgs, INT32 methodContextCount, PTR_ReflectClassBaseObject pArgumentTypeUNSAFE);
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(typeContextArgs, NULL_OK));
+ PRECONDITION(CheckPointer(methodContextArgs, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refParamType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pParamTypeUNSAFE);
+ REFLECTCLASSBASEREF refArgumentType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pArgumentTypeUNSAFE);
+
+ TypeHandle thGenericParameter = refParamType->GetType();
+ TypeHandle thGenericArgument = refArgumentType->GetType();
+ BOOL bResult = FALSE;
+ SigTypeContext typeContext;
+
+ Instantiation classInst;
+ Instantiation methodInst;
+
+ if (typeContextArgs != NULL)
+ {
+ classInst = Instantiation(typeContextArgs, typeContextCount);
+ }
+
+ if (methodContextArgs != NULL)
+ {
+ methodInst = Instantiation(methodContextArgs, methodContextCount);
+ }
+
+ SigTypeContext::InitTypeContext(classInst, methodInst, &typeContext);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_2(refParamType, refArgumentType);
+ {
+ bResult = thGenericParameter.AsGenericVariable()->SatisfiesConstraints(&typeContext, thGenericArgument);
+ }
+ HELPER_METHOD_FRAME_END();
+
+ FC_RETURN_BOOL(bResult);
+}
+FCIMPLEND
+
+void QCALLTYPE RuntimeTypeHandle::GetInstantiation(EnregisteredTypeHandle pType, QCall::ObjectHandleOnStack retTypes, BOOL fAsRuntimeTypeArray)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ TypeHandle typeHandle = TypeHandle::FromPtr(pType);
+ Instantiation inst = typeHandle.GetInstantiation();
+ GCX_COOP();
+ retTypes.Set(CopyRuntimeTypeHandles(NULL, inst.GetRawArgs(), inst.GetNumArgs(), fAsRuntimeTypeArray ? CLASS__CLASS : CLASS__TYPE));
+ END_QCALL;
+
+ return;
+}
+
+void QCALLTYPE RuntimeTypeHandle::MakeArray(EnregisteredTypeHandle pTypeHandle, INT32 rank, QCall::ObjectHandleOnStack retType)
+{
+ QCALL_CONTRACT;
+
+ TypeHandle arrayHandle;
+
+ BEGIN_QCALL;
+ arrayHandle = TypeHandle::FromPtr(pTypeHandle).MakeArray(rank);
+ GCX_COOP();
+ retType.Set(arrayHandle.GetManagedClassObject());
+ END_QCALL;
+
+ return;
+}
+
+void QCALLTYPE RuntimeTypeHandle::MakeSZArray(EnregisteredTypeHandle pTypeHandle, QCall::ObjectHandleOnStack retType)
+{
+ QCALL_CONTRACT;
+
+ TypeHandle arrayHandle;
+
+ BEGIN_QCALL;
+ arrayHandle = TypeHandle::FromPtr(pTypeHandle).MakeSZArray();
+ GCX_COOP();
+ retType.Set(arrayHandle.GetManagedClassObject());
+ END_QCALL;
+
+ return;
+}
+
+void QCALLTYPE RuntimeTypeHandle::MakePointer(EnregisteredTypeHandle pTypeHandle, QCall::ObjectHandleOnStack retType)
+{
+ QCALL_CONTRACT;
+
+ TypeHandle pointerHandle;
+
+ BEGIN_QCALL;
+ pointerHandle = TypeHandle::FromPtr(pTypeHandle).MakePointer();
+ GCX_COOP();
+ retType.Set(pointerHandle.GetManagedClassObject());
+ END_QCALL;
+
+ return;
+}
+
+void QCALLTYPE RuntimeTypeHandle::MakeByRef(EnregisteredTypeHandle pTypeHandle, QCall::ObjectHandleOnStack retType)
+{
+ QCALL_CONTRACT;
+
+ TypeHandle byRefHandle;
+
+ BEGIN_QCALL;
+ byRefHandle = TypeHandle::FromPtr(pTypeHandle).MakeByRef();
+ GCX_COOP();
+ retType.Set(byRefHandle.GetManagedClassObject());
+ END_QCALL;
+
+ return;
+}
+
+BOOL QCALLTYPE RuntimeTypeHandle::IsCollectible(EnregisteredTypeHandle pTypeHandle)
+{
+ QCALL_CONTRACT;
+
+ BOOL retVal = FALSE;
+
+ BEGIN_QCALL;
+ retVal = TypeHandle::FromPtr(pTypeHandle).GetLoaderAllocator()->IsCollectible();
+ END_QCALL;
+
+ return retVal;
+}
+
+void QCALLTYPE RuntimeTypeHandle::Instantiate(EnregisteredTypeHandle pTypeHandle, TypeHandle * pInstArray, INT32 cInstArray, QCall::ObjectHandleOnStack retType)
+{
+ QCALL_CONTRACT;
+
+ TypeHandle type;
+
+ BEGIN_QCALL;
+ type = TypeHandle::FromPtr(pTypeHandle).Instantiate(Instantiation(pInstArray, cInstArray));
+ GCX_COOP();
+ retType.Set(type.GetManagedClassObject());
+ END_QCALL;
+
+ return;
+}
+
+void QCALLTYPE RuntimeTypeHandle::GetGenericTypeDefinition(EnregisteredTypeHandle pTypeHandle, QCall::ObjectHandleOnStack retType)
+{
+ QCALL_CONTRACT;
+
+ TypeHandle typeDef;
+
+ BEGIN_QCALL;
+
+ TypeHandle genericType = TypeHandle::FromPtr(pTypeHandle);
+
+ typeDef = ClassLoader::LoadTypeDefThrowing(genericType.GetModule(),
+ genericType.GetMethodTable()->GetCl(),
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef);
+
+ GCX_COOP();
+ retType.Set(typeDef.GetManagedClassObject());
+
+ END_QCALL;
+
+ return;
+}
+
+FCIMPL2(FC_BOOL_RET, RuntimeTypeHandle::CompareCanonicalHandles, ReflectClassBaseObject *pLeftUNSAFE, ReflectClassBaseObject *pRightUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ REFLECTCLASSBASEREF refLeft = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pLeftUNSAFE);
+ REFLECTCLASSBASEREF refRight = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pRightUNSAFE);
+
+ if ((refLeft == NULL) || (refRight == NULL))
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ FC_RETURN_BOOL(refLeft->GetType().GetCanonicalMethodTable() == refRight->GetType().GetCanonicalMethodTable());
+}
+FCIMPLEND
+
+FCIMPL1(FC_BOOL_RET, RuntimeTypeHandle::HasInstantiation, PTR_ReflectClassBaseObject pTypeUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ FC_RETURN_BOOL(refType->GetType().HasInstantiation());
+}
+FCIMPLEND
+
+FCIMPL1(FC_BOOL_RET, RuntimeTypeHandle::IsGenericTypeDefinition, PTR_ReflectClassBaseObject pTypeUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ FC_RETURN_BOOL(refType->GetType().IsGenericTypeDefinition());
+}
+FCIMPLEND
+
+FCIMPL1(FC_BOOL_RET, RuntimeTypeHandle::IsGenericVariable, PTR_ReflectClassBaseObject pTypeUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ FC_RETURN_BOOL(refType->GetType().IsGenericVariable());
+}
+FCIMPLEND
+
+FCIMPL1(INT32, RuntimeTypeHandle::GetGenericVariableIndex, PTR_ReflectClassBaseObject pTypeUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ return (INT32)refType->GetType().AsGenericVariable()->GetIndex();
+}
+FCIMPLEND
+
+FCIMPL1(FC_BOOL_RET, RuntimeTypeHandle::ContainsGenericVariables, PTR_ReflectClassBaseObject pTypeUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ FC_RETURN_BOOL(refType->GetType().ContainsGenericVariables());
+}
+FCIMPLEND
+
+FCIMPL1(IMDInternalImport*, RuntimeTypeHandle::GetMetadataImport, ReflectClassBaseObject * pTypeUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ Module *pModule = refType->GetType().GetModule();
+
+ return pModule->GetMDImport();
+}
+FCIMPLEND
+
+
+//***********************************************************************************
+//***********************************************************************************
+//***********************************************************************************
+
+void * QCALLTYPE RuntimeMethodHandle::GetFunctionPointer(MethodDesc * pMethod)
+{
+ QCALL_CONTRACT;
+
+ void* funcPtr = 0;
+
+ BEGIN_QCALL;
+
+ funcPtr = (void*)pMethod->GetMultiCallableAddrOfCode();
+
+ END_QCALL;
+
+ return funcPtr;
+}
+
+FCIMPL1(LPCUTF8, RuntimeMethodHandle::GetUtf8Name, MethodDesc *pMethod) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ LPCUTF8 szName = NULL;
+
+ if (!pMethod)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ szName = pMethod->GetName();
+
+ _ASSERTE(CheckPointer(szName, NULL_OK));
+
+ return szName;
+}
+FCIMPLEND
+
+FCIMPL2(FC_BOOL_RET, RuntimeMethodHandle::MatchesNameHash, MethodDesc * pMethod, ULONG hash)
+{
+ FCALL_CONTRACT;
+
+ FC_RETURN_BOOL(pMethod->MightHaveName(hash));
+}
+FCIMPLEND
+
+FCIMPL1(StringObject*, RuntimeMethodHandle::GetName, MethodDesc *pMethod) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ if (!pMethod)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ STRINGREF refName = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+ refName = StringObject::NewString(pMethod->GetName());
+ HELPER_METHOD_FRAME_END();
+
+ return (StringObject*)OBJECTREFToObject(refName);
+}
+FCIMPLEND
+
+FCIMPL1(INT32, RuntimeMethodHandle::GetAttributes, MethodDesc *pMethod) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ if (!pMethod)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ INT32 retVal = 0;
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrow(kStackOverflowException));
+ retVal = (INT32)pMethod->GetAttrs();
+ END_SO_INTOLERANT_CODE;
+ return retVal;
+}
+FCIMPLEND
+
+FCIMPL1(INT32, RuntimeMethodHandle::GetImplAttributes, ReflectMethodObject *pMethodUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ if (!pMethodUNSAFE)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ MethodDesc* pMethod = pMethodUNSAFE->GetMethod();
+ INT32 attributes = 0;
+
+ if (IsNilToken(pMethod->GetMemberDef()))
+ return attributes;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrow(kStackOverflowException));
+ {
+ attributes = (INT32)pMethod->GetImplAttrs();
+ }
+ END_SO_INTOLERANT_CODE;
+
+ return attributes;
+}
+FCIMPLEND
+
+
+FCIMPL1(ReflectClassBaseObject*, RuntimeMethodHandle::GetDeclaringType, MethodDesc *pMethod) {
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pMethod));
+ }
+ CONTRACTL_END;
+
+ if (!pMethod)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ MethodTable *pMT = pMethod->GetMethodTable();
+ TypeHandle declType(pMT);
+ if (pMT->IsArray())
+ {
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ // Load the TypeDesc for the array type. Note the returned type is approximate, i.e.
+ // if shared between reference array types then we will get object[] back.
+ DWORD rank = pMT->GetRank();
+ TypeHandle elemType = pMT->GetApproxArrayElementTypeHandle();
+ declType = ClassLoader::LoadArrayTypeThrowing(elemType, pMT->GetInternalCorElementType(), rank);
+ HELPER_METHOD_FRAME_END();
+ }
+ RETURN_CLASS_OBJECT(declType, NULL);
+}
+FCIMPLEND
+
+FCIMPL1(INT32, RuntimeMethodHandle::GetSlot, MethodDesc *pMethod) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ if (!pMethod)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ return (INT32)pMethod->GetSlot();
+}
+FCIMPLEND
+
+FCIMPL3(Object *, SignatureNative::GetCustomModifiers, SignatureNative* pSignatureUNSAFE,
+ INT32 parameter, CLR_BOOL fRequired)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ struct
+ {
+ SIGNATURENATIVEREF pSig;
+ PTRARRAYREF retVal;
+ } gc;
+
+ gc.pSig = (SIGNATURENATIVEREF)pSignatureUNSAFE;
+ gc.retVal = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+ {
+
+ BYTE callConv = *(BYTE*)gc.pSig->GetCorSig();
+ SigTypeContext typeContext;
+ gc.pSig->GetTypeContext(&typeContext);
+ MetaSig sig(gc.pSig->GetCorSig(),
+ gc.pSig->GetCorSigSize(),
+ gc.pSig->GetModule(),
+ &typeContext,
+ (callConv & IMAGE_CEE_CS_CALLCONV_MASK) == IMAGE_CEE_CS_CALLCONV_FIELD ? MetaSig::sigField : MetaSig::sigMember);
+ _ASSERTE(callConv == sig.GetCallingConventionInfo());
+
+ SigPointer argument(NULL, 0);
+
+ PRECONDITION(sig.GetCallingConvention() != IMAGE_CEE_CS_CALLCONV_FIELD || parameter == 1);
+
+ if (parameter == 0)
+ {
+ argument = sig.GetReturnProps();
+ }
+ else
+ {
+ for(INT32 i = 0; i < parameter; i++)
+ sig.NextArg();
+
+ argument = sig.GetArgProps();
+ }
+
+ //if (parameter < 0 || parameter > (INT32)sig.NumFixedArgs())
+ // FCThrowResVoid(kArgumentNullException, W("Arg_ArgumentOutOfRangeException"));
+
+ SigPointer sp = argument;
+ Module* pModule = sig.GetModule();
+ INT32 cMods = 0;
+ CorElementType cmodType;
+
+ CorElementType cmodTypeExpected = fRequired ? ELEMENT_TYPE_CMOD_REQD : ELEMENT_TYPE_CMOD_OPT;
+
+ // Discover the number of required and optional custom modifiers.
+ while(TRUE)
+ {
+ BYTE data;
+ IfFailThrow(sp.GetByte(&data));
+ cmodType = (CorElementType)data;
+
+ if (cmodType == ELEMENT_TYPE_CMOD_REQD || cmodType == ELEMENT_TYPE_CMOD_OPT)
+ {
+ if (cmodType == cmodTypeExpected)
+ {
+ cMods ++;
+ }
+ }
+ else if (cmodType != ELEMENT_TYPE_SENTINEL)
+ {
+ break;
+ }
+
+ IfFailThrow(sp.GetToken(NULL));
+ }
+
+ // Reset sp and populate the arrays for the required and optional custom
+ // modifiers now that we know how long they should be.
+ sp = argument;
+
+ MethodTable *pMT = MscorlibBinder::GetClass(CLASS__TYPE);
+ TypeHandle arrayHandle = ClassLoader::LoadArrayTypeThrowing(TypeHandle(pMT), ELEMENT_TYPE_SZARRAY);
+
+ gc.retVal = (PTRARRAYREF) AllocateArrayEx(arrayHandle, &cMods, 1);
+
+ while(cMods != 0)
+ {
+ BYTE data;
+ IfFailThrow(sp.GetByte(&data));
+ cmodType = (CorElementType)data;
+
+ mdToken token;
+ IfFailThrow(sp.GetToken(&token));
+
+ if (cmodType == cmodTypeExpected)
+ {
+ TypeHandle th = ClassLoader::LoadTypeDefOrRefOrSpecThrowing(pModule, token,
+ &typeContext,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::FailIfUninstDefOrRef);
+
+ OBJECTREF refType = th.GetManagedClassObject();
+ gc.retVal->SetAt(--cMods, refType);
+ }
+ }
+ }
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(gc.retVal);
+}
+FCIMPLEND
+
+FCIMPL1(INT32, RuntimeMethodHandle::GetMethodDef, ReflectMethodObject *pMethodUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ if (!pMethodUNSAFE)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ MethodDesc* pMethod = pMethodUNSAFE->GetMethod();
+
+ if (pMethod->HasMethodInstantiation())
+ {
+ HELPER_METHOD_FRAME_BEGIN_RET_1(pMethodUNSAFE);
+ {
+ pMethod = pMethod->StripMethodInstantiation();
+ }
+ HELPER_METHOD_FRAME_END();
+ }
+
+ INT32 tkMethodDef = (INT32)pMethod->GetMemberDef();
+ _ASSERTE(TypeFromToken(tkMethodDef) == mdtMethodDef);
+
+ if (IsNilToken(tkMethodDef) || TypeFromToken(tkMethodDef) != mdtMethodDef)
+ return mdMethodDefNil;
+
+ return tkMethodDef;
+}
+FCIMPLEND
+
+FCIMPL6(void, SignatureNative::GetSignature,
+ SignatureNative* pSignatureNativeUNSAFE,
+ PCCOR_SIGNATURE pCorSig, DWORD cCorSig,
+ FieldDesc *pFieldDesc, ReflectMethodObject *pMethodUNSAFE, ReflectClassBaseObject *pDeclaringTypeUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(pDeclaringTypeUNSAFE || pMethodUNSAFE->GetMethod()->IsDynamicMethod());
+ PRECONDITION(CheckPointer(pCorSig, NULL_OK));
+ PRECONDITION(CheckPointer(pMethodUNSAFE, NULL_OK));
+ PRECONDITION(CheckPointer(pFieldDesc, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ struct
+ {
+ REFLECTCLASSBASEREF refDeclaringType;
+ REFLECTMETHODREF refMethod;
+ SIGNATURENATIVEREF pSig;
+ } gc;
+
+ gc.refDeclaringType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pDeclaringTypeUNSAFE);
+ gc.refMethod = (REFLECTMETHODREF)ObjectToOBJECTREF(pMethodUNSAFE);
+ gc.pSig = (SIGNATURENATIVEREF)pSignatureNativeUNSAFE;
+
+ MethodDesc *pMethod;
+ TypeHandle declType;
+
+ if (gc.refDeclaringType == NULL)
+ {
+ // for dynamic method, see precondition
+ pMethod = gc.refMethod->GetMethod();
+ declType = pMethod->GetMethodTable();
+ }
+ else
+ {
+ pMethod = gc.refMethod != NULL ? gc.refMethod->GetMethod() : NULL;
+ declType = gc.refDeclaringType->GetType();
+ }
+
+ HELPER_METHOD_FRAME_BEGIN_PROTECT(gc);
+ {
+ Module* pModule = declType.GetModule();
+
+ if (pMethod)
+ {
+ pMethod->GetSig(&pCorSig, &cCorSig);
+ if (pMethod->GetClassification() == mcInstantiated)
+ {
+ LoaderAllocator *pLoaderAllocator = pMethod->GetLoaderAllocator();
+ if (pLoaderAllocator->IsCollectible())
+ gc.pSig->SetKeepAlive(pLoaderAllocator->GetExposedObject());
+ }
+ }
+ else if (pFieldDesc)
+ pFieldDesc->GetSig(&pCorSig, &cCorSig);
+
+ gc.pSig->m_sig = pCorSig;
+ gc.pSig->m_cSig = cCorSig;
+ gc.pSig->m_pMethod = pMethod;
+
+ REFLECTCLASSBASEREF refDeclType = (REFLECTCLASSBASEREF)declType.GetManagedClassObject();
+ gc.pSig->SetDeclaringType(refDeclType);
+
+ PREFIX_ASSUME(pCorSig!= NULL);
+ BYTE callConv = *(BYTE*)pCorSig;
+ SigTypeContext typeContext;
+ if (pMethod)
+ SigTypeContext::InitTypeContext(
+ pMethod, declType.GetClassOrArrayInstantiation(), pMethod->LoadMethodInstantiation(), &typeContext);
+ else
+ SigTypeContext::InitTypeContext(declType, &typeContext);
+ MetaSig msig(pCorSig, cCorSig, pModule, &typeContext,
+ (callConv & IMAGE_CEE_CS_CALLCONV_MASK) == IMAGE_CEE_CS_CALLCONV_FIELD ? MetaSig::sigField : MetaSig::sigMember);
+
+ if (callConv == IMAGE_CEE_CS_CALLCONV_FIELD)
+ {
+ msig.NextArgNormalized();
+
+ OBJECTREF refRetType = msig.GetLastTypeHandleThrowing().GetManagedClassObject();
+ gc.pSig->SetReturnType(refRetType);
+ }
+ else
+ {
+ gc.pSig->SetCallingConvention(msig.GetCallingConventionInfo());
+
+ OBJECTREF refRetType = msig.GetRetTypeHandleThrowing().GetManagedClassObject();
+ gc.pSig->SetReturnType(refRetType);
+
+ INT32 nArgs = msig.NumFixedArgs();
+ TypeHandle arrayHandle = ClassLoader::LoadArrayTypeThrowing(TypeHandle(g_pRuntimeTypeClass), ELEMENT_TYPE_SZARRAY);
+
+ PTRARRAYREF ptrArrayarguments = (PTRARRAYREF) AllocateArrayEx(arrayHandle, &nArgs, 1);
+ gc.pSig->SetArgumentArray(ptrArrayarguments);
+
+ for (INT32 i = 0; i < nArgs; i++)
+ {
+ msig.NextArg();
+
+ OBJECTREF refArgType = msig.GetLastTypeHandleThrowing().GetManagedClassObject();
+ gc.pSig->SetArgument(i, refArgType);
+ }
+
+ _ASSERTE(gc.pSig->m_returnType != NULL);
+ }
+ }
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL2(FC_BOOL_RET, SignatureNative::CompareSig, SignatureNative* pLhsUNSAFE, SignatureNative* pRhsUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ INT32 ret = 0;
+
+ struct
+ {
+ SIGNATURENATIVEREF pLhs;
+ SIGNATURENATIVEREF pRhs;
+ } gc;
+
+ gc.pLhs = (SIGNATURENATIVEREF)pLhsUNSAFE;
+ gc.pRhs = (SIGNATURENATIVEREF)pRhsUNSAFE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+ {
+ ret = MetaSig::CompareMethodSigs(
+ gc.pLhs->GetCorSig(), gc.pLhs->GetCorSigSize(), gc.pLhs->GetModule(), NULL,
+ gc.pRhs->GetCorSig(), gc.pRhs->GetCorSigSize(), gc.pRhs->GetModule(), NULL);
+ }
+ HELPER_METHOD_FRAME_END();
+ FC_RETURN_BOOL(ret);
+}
+FCIMPLEND
+
+#if FEATURE_LEGACYNETCF
+FCIMPL4(FC_BOOL_RET, SignatureNative::CompareSigForAppCompat, SignatureNative* pLhsUNSAFE, ReflectClassBaseObject * pTypeLhsUNSAFE, SignatureNative* pRhsUNSAFE, ReflectClassBaseObject * pTypeRhsUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ INT32 ret = 0;
+
+ struct
+ {
+ SIGNATURENATIVEREF pLhs;
+ REFLECTCLASSBASEREF refTypeLhs;
+ SIGNATURENATIVEREF pRhs;
+ REFLECTCLASSBASEREF refTypeRhs;
+ } gc;
+
+ gc.pLhs = (SIGNATURENATIVEREF)pLhsUNSAFE;
+ gc.refTypeLhs = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeLhsUNSAFE);
+ gc.pRhs = (SIGNATURENATIVEREF)pRhsUNSAFE;
+ gc.refTypeRhs = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeRhsUNSAFE);
+
+ if ((gc.refTypeLhs == NULL) || (gc.refTypeRhs == NULL))
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ TypeHandle typeHandle1 = gc.refTypeLhs->GetType();
+ TypeHandle typeHandle2 = gc.refTypeRhs->GetType();
+
+ // The type contexts will be used in substituting formal type arguments in generic types.
+ SigTypeContext typeContext1(typeHandle1);
+ SigTypeContext typeContext2(typeHandle2);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+ {
+ MetaSig metaSig1(gc.pLhs->GetCorSig(), gc.pLhs->GetCorSigSize(), gc.pLhs->GetModule(), &typeContext1);
+ MetaSig metaSig2(gc.pRhs->GetCorSig(), gc.pRhs->GetCorSigSize(), gc.pRhs->GetModule(), &typeContext2);
+
+ ret = MetaSig::CompareMethodSigs(metaSig1, metaSig2, FALSE);
+ }
+ HELPER_METHOD_FRAME_END();
+ FC_RETURN_BOOL(ret);
+}
+FCIMPLEND
+#endif
+
+void QCALLTYPE RuntimeMethodHandle::GetMethodInstantiation(MethodDesc * pMethod, QCall::ObjectHandleOnStack retTypes, BOOL fAsRuntimeTypeArray)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+ Instantiation inst = pMethod->LoadMethodInstantiation();
+
+ GCX_COOP();
+ retTypes.Set(CopyRuntimeTypeHandles(NULL, inst.GetRawArgs(), inst.GetNumArgs(), fAsRuntimeTypeArray ? CLASS__CLASS : CLASS__TYPE));
+ END_QCALL;
+
+ return;
+}
+
+FCIMPL1(FC_BOOL_RET, RuntimeMethodHandle::HasMethodInstantiation, MethodDesc * pMethod)
+{
+ FCALL_CONTRACT;
+
+ FC_RETURN_BOOL(pMethod->HasMethodInstantiation());
+}
+FCIMPLEND
+
+FCIMPL1(FC_BOOL_RET, RuntimeMethodHandle::IsGenericMethodDefinition, MethodDesc * pMethod)
+{
+ FCALL_CONTRACT;
+
+ FC_RETURN_BOOL(pMethod->IsGenericMethodDefinition());
+}
+FCIMPLEND
+
+FCIMPL1(FC_BOOL_RET, RuntimeMethodHandle::IsDynamicMethod, MethodDesc * pMethod)
+{
+ FCALL_CONTRACT;
+
+ FC_RETURN_BOOL(pMethod->IsNoMetadata());
+}
+FCIMPLEND
+
+FCIMPL1(Object*, RuntimeMethodHandle::GetResolver, MethodDesc * pMethod)
+{
+ FCALL_CONTRACT;
+
+ if (!pMethod)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ OBJECTREF resolver = NULL;
+ if (pMethod->IsLCGMethod())
+ {
+ resolver = pMethod->AsDynamicMethodDesc()->GetLCGMethodResolver()->GetManagedResolver();
+ }
+ return OBJECTREFToObject(resolver);
+}
+FCIMPLEND
+
+void QCALLTYPE RuntimeMethodHandle::Destroy(MethodDesc * pMethod)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ if (pMethod == NULL)
+ COMPlusThrowArgumentNull(NULL, W("Arg_InvalidHandle"));
+
+ DynamicMethodDesc* pDynamicMethodDesc = pMethod->AsDynamicMethodDesc();
+
+ GCX_COOP();
+
+ // Destroy should be called only if the managed part is gone.
+ _ASSERTE(OBJECTREFToObject(pDynamicMethodDesc->GetLCGMethodResolver()->GetManagedResolver()) == NULL);
+
+ // Fire Unload Dynamic Method Event here
+ ETW::MethodLog::DynamicMethodDestroyed(pMethod);
+
+ pDynamicMethodDesc->Destroy();
+
+ END_QCALL;
+ }
+
+FCIMPL1(FC_BOOL_RET, RuntimeMethodHandle::IsTypicalMethodDefinition, ReflectMethodObject *pMethodUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ if (!pMethodUNSAFE)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ MethodDesc* pMethod = pMethodUNSAFE->GetMethod();
+
+ FC_RETURN_BOOL(pMethod->IsTypicalMethodDefinition());
+}
+FCIMPLEND
+
+void QCALLTYPE RuntimeMethodHandle::GetTypicalMethodDefinition(MethodDesc * pMethod, QCall::ObjectHandleOnStack refMethod)
+ {
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+#ifdef _DEBUG
+ {
+ GCX_COOP();
+ _ASSERTE(((ReflectMethodObject *)(*refMethod.m_ppObject))->GetMethod() == pMethod);
+ }
+#endif
+ MethodDesc *pMethodTypical = pMethod->LoadTypicalMethodDefinition();
+ if (pMethodTypical != pMethod)
+ {
+ GCX_COOP();
+ refMethod.Set(pMethodTypical->GetStubMethodInfo());
+ }
+ END_QCALL;
+
+ return;
+}
+
+void QCALLTYPE RuntimeMethodHandle::StripMethodInstantiation(MethodDesc * pMethod, QCall::ObjectHandleOnStack refMethod)
+ {
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ if (!pMethod)
+ COMPlusThrowArgumentNull(NULL, W("Arg_InvalidHandle"));
+
+#ifdef _DEBUG
+ {
+ GCX_COOP();
+ _ASSERTE(((ReflectMethodObject *)(*refMethod.m_ppObject))->GetMethod() == pMethod);
+ }
+#endif
+ MethodDesc *pMethodStripped = pMethod->StripMethodInstantiation();
+ if (pMethodStripped != pMethod)
+ {
+ GCX_COOP();
+ refMethod.Set(pMethodStripped->GetStubMethodInfo());
+ }
+ END_QCALL;
+
+ return;
+}
+
+// In the VM there might be more than one MethodDescs for a "method"
+// examples are methods on generic types which may have additional instantiating stubs
+// and methods on value types which may have additional unboxing stubs.
+//
+// For generic methods we always hand out an instantiating stub except for a generic method definition
+// For non-generic methods on generic types we need an instantiating stub if it's one of the following
+// - static method on a generic class
+// - static or instance method on a generic interface
+// - static or instance method on a generic value type
+// The Reflection policy is to always hand out instantiating stubs in these cases
+//
+// For methods on non-generic value types we can use either the cannonical method or the unboxing stub
+// The Reflection policy is to always hand out unboxing stubs if the methods are virtual methods
+// The reason for this is that in the current implementation of the class loader, the v-table slots for
+// those methods point to unboxing stubs already. Note that this is just a implementation choice
+// that might change in the future. But we should always keep this Reflection policy an invariant.
+//
+// For virtual methods on generic value types (intersection of the two cases), reflection will hand
+// out an unboxing instantiating stub
+//
+// GetInstantiatingStub is called to:
+// 1. create an InstantiatedMethodDesc for a generic method when calling BindGenericArguments() on a generic
+// method. In this case instArray will not be null.
+// 2. create an InstantiatedMethodDesc for a method in a generic class. In this case instArray will be null.
+// 3. create an UnboxingStub for a method in a value type. In this case instArray will be null.
+// For case 2 and 3, an instantiating stub or unboxing stub might not be needed in which case the original
+// MethodDesc is returned.
+FCIMPL3(MethodDesc*, RuntimeMethodHandle::GetStubIfNeeded,
+ MethodDesc *pMethod,
+ ReflectClassBaseObject *pTypeUNSAFE,
+ PtrArray* instArrayUNSAFE)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+ PTRARRAYREF instArray = (PTRARRAYREF)ObjectToOBJECTREF(instArrayUNSAFE);
+
+ if (refType == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ TypeHandle instType = refType->GetType();
+ MethodDesc *pNewMethod = pMethod;
+
+ // error conditions
+ if (!pMethod)
+ FCThrowRes(kArgumentException, W("Arg_InvalidHandle"));
+
+ if (instType.IsNull())
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ // Perf optimization: this logic is actually duplicated in FindOrCreateAssociatedMethodDescForReflection, but since it
+ // is the more common case it's worth the duplicate check here to avoid the helper method frame
+ if ( instArray == NULL &&
+ ( pMethod->HasMethodInstantiation() ||
+ ( !instType.IsValueType() &&
+ ( !instType.HasInstantiation() || instType.IsGenericTypeDefinition() ) ) ) )
+ {
+ return pNewMethod;
+ }
+
+ HELPER_METHOD_FRAME_BEGIN_RET_2(refType, instArray);
+ {
+ TypeHandle *inst = NULL;
+ DWORD ntypars = 0;
+
+ if (instArray != NULL)
+ {
+ ntypars = instArray->GetNumComponents();
+
+ size_t size = ntypars * sizeof(TypeHandle);
+ if ((size / sizeof(TypeHandle)) != ntypars) // uint over/underflow
+ COMPlusThrow(kArgumentException);
+ inst = (TypeHandle*) _alloca(size);
+
+ for (DWORD i = 0; i < ntypars; i++)
+ {
+ REFLECTCLASSBASEREF instRef = (REFLECTCLASSBASEREF)instArray->GetAt(i);
+
+ if (instRef == NULL)
+ COMPlusThrowArgumentNull(W("inst"), W("ArgumentNull_ArrayElement"));
+
+ inst[i] = instRef->GetType();
+ }
+ }
+
+ pNewMethod = MethodDesc::FindOrCreateAssociatedMethodDescForReflection(pMethod, instType, Instantiation(inst, ntypars));
+ }
+ HELPER_METHOD_FRAME_END();
+
+ return pNewMethod;
+}
+FCIMPLEND
+
+
+FCIMPL2(MethodDesc*, RuntimeMethodHandle::GetMethodFromCanonical, MethodDesc *pMethod, ReflectClassBaseObject *pTypeUNSAFE)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pMethod));
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ TypeHandle instType = refType->GetType();
+ MethodDesc* pMDescInCanonMT = instType.GetMethodTable()->GetParallelMethodDesc(pMethod);
+
+ return pMDescInCanonMT;
+}
+FCIMPLEND
+
+
+FCIMPL2(MethodBody *, RuntimeMethodHandle::GetMethodBody, ReflectMethodObject *pMethodUNSAFE, ReflectClassBaseObject *pDeclaringTypeUNSAFE)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ struct _gc
+ {
+ METHODBODYREF MethodBodyObj;
+ EXCEPTIONHANDLINGCLAUSEREF EHClauseObj;
+ LOCALVARIABLEINFOREF LocalVariableInfoObj;
+ U1ARRAYREF U1Array;
+ BASEARRAYREF TempArray;
+ REFLECTCLASSBASEREF declaringType;
+ REFLECTMETHODREF refMethod;
+ } gc;
+
+ gc.MethodBodyObj = NULL;
+ gc.EHClauseObj = NULL;
+ gc.LocalVariableInfoObj = NULL;
+ gc.U1Array = NULL;
+ gc.TempArray = NULL;
+ gc.declaringType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pDeclaringTypeUNSAFE);
+ gc.refMethod = (REFLECTMETHODREF)ObjectToOBJECTREF(pMethodUNSAFE);
+
+
+ if (!gc.refMethod)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ MethodDesc* pMethod = gc.refMethod->GetMethod();
+
+ TypeHandle declaringType = gc.declaringType == NULL ? TypeHandle() : gc.declaringType->GetType();
+
+ if (!pMethod->IsIL())
+ return NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+ {
+ MethodDesc *pMethodIL = pMethod;
+ if (pMethod->IsWrapperStub())
+ pMethodIL = pMethod->GetWrappedMethodDesc();
+
+ COR_ILMETHOD* pILHeader = pMethodIL->GetILHeader();
+
+ if (pILHeader)
+ {
+ MethodTable * pExceptionHandlingClauseMT = MscorlibBinder::GetClass(CLASS__EH_CLAUSE);
+ TypeHandle thEHClauseArray = ClassLoader::LoadArrayTypeThrowing(TypeHandle(pExceptionHandlingClauseMT), ELEMENT_TYPE_SZARRAY);
+
+ MethodTable * pLocalVariableMT = MscorlibBinder::GetClass(CLASS__LOCAL_VARIABLE_INFO);
+ TypeHandle thLocalVariableArray = ClassLoader::LoadArrayTypeThrowing(TypeHandle(pLocalVariableMT), ELEMENT_TYPE_SZARRAY);
+
+ Module* pModule = pMethod->GetModule();
+ COR_ILMETHOD_DECODER::DecoderStatus status;
+ COR_ILMETHOD_DECODER header(pILHeader, pModule->GetMDImport(), &status);
+
+ if (status != COR_ILMETHOD_DECODER::SUCCESS)
+ {
+ if (status == COR_ILMETHOD_DECODER::VERIFICATION_ERROR)
+ {
+ // Throw a verification HR
+ COMPlusThrowHR(COR_E_VERIFICATION);
+ }
+ else
+ {
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ }
+
+ gc.MethodBodyObj = (METHODBODYREF)AllocateObject(MscorlibBinder::GetClass(CLASS__METHOD_BODY));
+
+ gc.MethodBodyObj->m_maxStackSize = header.GetMaxStack();
+ gc.MethodBodyObj->m_initLocals = !!(header.GetFlags() & CorILMethod_InitLocals);
+
+ if (header.IsFat())
+ gc.MethodBodyObj->m_localVarSigToken = header.GetLocalVarSigTok();
+ else
+ gc.MethodBodyObj->m_localVarSigToken = 0;
+
+ // Allocate the array of IL and fill it in from the method header.
+ BYTE* pIL = const_cast<BYTE*>(header.Code);
+ COUNT_T cIL = header.GetCodeSize();
+ gc.U1Array = (U1ARRAYREF) AllocatePrimitiveArray(ELEMENT_TYPE_U1, cIL);
+
+ SetObjectReference((OBJECTREF*)&gc.MethodBodyObj->m_IL, gc.U1Array, GetAppDomain());
+ memcpyNoGCRefs(gc.MethodBodyObj->m_IL->GetDataPtr(), pIL, cIL);
+
+ // Allocate the array of exception clauses.
+ INT32 cEh = (INT32)header.EHCount();
+ const COR_ILMETHOD_SECT_EH* ehInfo = header.EH;
+ gc.TempArray = (BASEARRAYREF) AllocateArrayEx(thEHClauseArray, &cEh, 1);
+
+ SetObjectReference((OBJECTREF*)&gc.MethodBodyObj->m_exceptionClauses, gc.TempArray, GetAppDomain());
+
+ for (INT32 i = 0; i < cEh; i++)
+ {
+ COR_ILMETHOD_SECT_EH_CLAUSE_FAT ehBuff;
+ const COR_ILMETHOD_SECT_EH_CLAUSE_FAT* ehClause =
+ (const COR_ILMETHOD_SECT_EH_CLAUSE_FAT*)ehInfo->EHClause(i, &ehBuff);
+
+ gc.EHClauseObj = (EXCEPTIONHANDLINGCLAUSEREF) AllocateObject(pExceptionHandlingClauseMT);
+
+ gc.EHClauseObj->m_flags = ehClause->GetFlags();
+ gc.EHClauseObj->m_tryOffset = ehClause->GetTryOffset();
+ gc.EHClauseObj->m_tryLength = ehClause->GetTryLength();
+ gc.EHClauseObj->m_handlerOffset = ehClause->GetHandlerOffset();
+ gc.EHClauseObj->m_handlerLength = ehClause->GetHandlerLength();
+
+ if ((ehClause->GetFlags() & COR_ILEXCEPTION_CLAUSE_FILTER) == 0)
+ gc.EHClauseObj->m_catchToken = ehClause->GetClassToken();
+ else
+ gc.EHClauseObj->m_filterOffset = ehClause->GetFilterOffset();
+
+ gc.MethodBodyObj->m_exceptionClauses->SetAt(i, (OBJECTREF) gc.EHClauseObj);
+ SetObjectReference((OBJECTREF*)&(gc.EHClauseObj->m_methodBody), (OBJECTREF)gc.MethodBodyObj, GetAppDomain());
+ }
+
+ if (header.LocalVarSig != NULL)
+ {
+ SigTypeContext sigTypeContext(pMethod, declaringType, pMethod->LoadMethodInstantiation());
+ MetaSig metaSig(header.LocalVarSig,
+ header.cbLocalVarSig,
+ pModule,
+ &sigTypeContext,
+ MetaSig::sigLocalVars);
+ INT32 cLocals = metaSig.NumFixedArgs();
+ gc.TempArray = (BASEARRAYREF) AllocateArrayEx(thLocalVariableArray, &cLocals, 1);
+ SetObjectReference((OBJECTREF*)&gc.MethodBodyObj->m_localVariables, gc.TempArray, GetAppDomain());
+
+ for (INT32 i = 0; i < cLocals; i ++)
+ {
+ gc.LocalVariableInfoObj = (LOCALVARIABLEINFOREF)AllocateObject(pLocalVariableMT);
+
+ gc.LocalVariableInfoObj->m_localIndex = i;
+
+ metaSig.NextArg();
+
+ CorElementType eType;
+ IfFailThrow(metaSig.GetArgProps().PeekElemType(&eType));
+ if (ELEMENT_TYPE_PINNED == eType)
+ gc.LocalVariableInfoObj->m_bIsPinned = TRUE;
+
+ TypeHandle tempType= metaSig.GetArgProps().GetTypeHandleThrowing(pModule, &sigTypeContext);
+ OBJECTREF refLocalType = tempType.GetManagedClassObject();
+ gc.LocalVariableInfoObj->SetType(refLocalType);
+ gc.MethodBodyObj->m_localVariables->SetAt(i, (OBJECTREF) gc.LocalVariableInfoObj);
+ }
+ }
+ else
+ {
+ INT32 cLocals = 0;
+ gc.TempArray = (BASEARRAYREF) AllocateArrayEx(thLocalVariableArray, &cLocals, 1);
+ SetObjectReference((OBJECTREF*)&gc.MethodBodyObj->m_localVariables, gc.TempArray, GetAppDomain());
+ }
+ }
+ }
+ HELPER_METHOD_FRAME_END();
+
+ return (MethodBody*)OBJECTREFToObject(gc.MethodBodyObj);
+}
+FCIMPLEND
+
+FCIMPL1(FC_BOOL_RET, RuntimeMethodHandle::IsConstructor, MethodDesc *pMethod)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pMethod));
+ }
+ CONTRACTL_END;
+
+ BOOL ret = FALSE;
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrow(kStackOverflowException));
+ ret = (BOOL)pMethod->IsClassConstructorOrCtor();
+ END_SO_INTOLERANT_CODE;
+ FC_RETURN_BOOL(ret);
+}
+FCIMPLEND
+
+FCIMPL1(Object*, RuntimeMethodHandle::GetLoaderAllocator, MethodDesc *pMethod)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF loaderAllocator = NULL;
+
+ if (!pMethod)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(loaderAllocator);
+
+ LoaderAllocator *pLoaderAllocator = pMethod->GetLoaderAllocator();
+ loaderAllocator = pLoaderAllocator->GetExposedObject();
+
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(loaderAllocator);
+}
+FCIMPLEND
+
+//*********************************************************************************************
+//*********************************************************************************************
+//*********************************************************************************************
+
+FCIMPL1(StringObject*, RuntimeFieldHandle::GetName, ReflectFieldObject *pFieldUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ REFLECTFIELDREF refField = (REFLECTFIELDREF)ObjectToOBJECTREF(pFieldUNSAFE);
+ if (!refField)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ FieldDesc *pField = refField->GetField();
+
+ STRINGREF refString = NULL;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refField);
+ {
+ refString = StringObject::NewString(pField->GetName());
+ }
+ HELPER_METHOD_FRAME_END();
+ return (StringObject*)OBJECTREFToObject(refString);
+}
+FCIMPLEND
+
+FCIMPL1(LPCUTF8, RuntimeFieldHandle::GetUtf8Name, FieldDesc *pField) {
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pField));
+ }
+ CONTRACTL_END;
+
+ LPCUTF8 szFieldName;
+
+ if (FAILED(pField->GetName_NoThrow(&szFieldName)))
+ {
+ FCThrow(kBadImageFormatException);
+ }
+ return szFieldName;
+}
+FCIMPLEND
+
+FCIMPL2(FC_BOOL_RET, RuntimeFieldHandle::MatchesNameHash, FieldDesc * pField, ULONG hash)
+{
+ FCALL_CONTRACT;
+
+ FC_RETURN_BOOL(pField->MightHaveName(hash));
+}
+FCIMPLEND
+
+FCIMPL1(INT32, RuntimeFieldHandle::GetAttributes, FieldDesc *pField) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ if (!pField)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ INT32 ret = 0;
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrow(kStackOverflowException));
+ ret = (INT32)pField->GetAttributes();
+ END_SO_INTOLERANT_CODE;
+ return ret;
+}
+FCIMPLEND
+
+FCIMPL1(ReflectClassBaseObject*, RuntimeFieldHandle::GetApproxDeclaringType, FieldDesc *pField) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ if (!pField)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ TypeHandle th = TypeHandle(pField->GetApproxEnclosingMethodTable()); // <REVISIT_TODO> this needs to be checked - see bug 184355 </REVISIT_TODO>
+ RETURN_CLASS_OBJECT(th, NULL);
+}
+FCIMPLEND
+
+FCIMPL1(INT32, RuntimeFieldHandle::GetToken, ReflectFieldObject *pFieldUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ REFLECTFIELDREF refField = (REFLECTFIELDREF)ObjectToOBJECTREF(pFieldUNSAFE);
+ if (!refField)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ FieldDesc *pField = refField->GetField();
+
+ INT32 tkFieldDef = (INT32)pField->GetMemberDef();
+ _ASSERTE(!IsNilToken(tkFieldDef) || tkFieldDef == mdFieldDefNil);
+ return tkFieldDef;
+}
+FCIMPLEND
+
+FCIMPL2(FieldDesc*, RuntimeFieldHandle::GetStaticFieldForGenericType, FieldDesc *pField, ReflectClassBaseObject *pDeclaringTypeUNSAFE)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refDeclaringType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pDeclaringTypeUNSAFE);
+
+ if ((refDeclaringType == NULL) || (pField == NULL))
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ TypeHandle declaringType = refDeclaringType->GetType();
+
+ if (!pField)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+ if (declaringType.IsTypeDesc())
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+ MethodTable *pMT = declaringType.AsMethodTable();
+
+ _ASSERTE(pField->IsStatic());
+ if (pMT->HasGenericsStaticsInfo())
+ pField = pMT->GetFieldDescByIndex(pField->GetApproxEnclosingMethodTable()->GetIndexForFieldDesc(pField));
+ _ASSERTE(!pField->IsSharedByGenericInstantiations());
+ _ASSERTE(pField->GetEnclosingMethodTable() == pMT);
+
+ return pField;
+}
+FCIMPLEND
+
+FCIMPL1(ReflectModuleBaseObject*, AssemblyHandle::GetManifestModule, AssemblyBaseObject* pAssemblyUNSAFE) {
+ FCALL_CONTRACT;
+
+ ASSEMBLYREF refAssembly = (ASSEMBLYREF)ObjectToOBJECTREF(pAssemblyUNSAFE);
+
+ if (refAssembly == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ DomainAssembly *pAssembly = refAssembly->GetDomainAssembly();
+ Assembly* currentAssembly = pAssembly->GetCurrentAssembly();
+
+ if (currentAssembly == NULL)
+ return NULL;
+
+ Module *pModule = currentAssembly->GetManifestModule();
+ DomainFile * pDomainFile = pModule->FindDomainFile(GetAppDomain());
+
+#ifdef _DEBUG
+ OBJECTREF orModule;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refAssembly);
+ orModule = (pDomainFile != NULL) ? pDomainFile->GetExposedModuleObjectIfExists() : NULL;
+ if (orModule == NULL)
+ orModule = pModule->GetExposedObject();
+#else
+ OBJECTREF orModule = (pDomainFile != NULL) ? pDomainFile->GetExposedModuleObjectIfExists() : NULL;
+ if (orModule != NULL)
+ return (ReflectModuleBaseObject*)OBJECTREFToObject(orModule);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refAssembly);
+ orModule = pModule->GetExposedObject();
+#endif
+
+ HELPER_METHOD_FRAME_END();
+ return (ReflectModuleBaseObject*)OBJECTREFToObject(orModule);
+
+}
+FCIMPLEND
+
+FCIMPL1(INT32, AssemblyHandle::GetToken, AssemblyBaseObject* pAssemblyUNSAFE) {
+ FCALL_CONTRACT;
+
+ ASSEMBLYREF refAssembly = (ASSEMBLYREF)ObjectToOBJECTREF(pAssemblyUNSAFE);
+
+ if (refAssembly == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ DomainAssembly *pAssembly = refAssembly->GetDomainAssembly();
+ mdAssembly token = mdAssemblyNil;
+
+ IMDInternalImport *mdImport = pAssembly->GetCurrentAssembly()->GetManifestImport();
+
+ if (mdImport != 0)
+ {
+ if (FAILED(mdImport->GetAssemblyFromScope(&token)))
+ {
+ FCThrow(kBadImageFormatException);
+ }
+ }
+
+ return token;
+}
+FCIMPLEND
+
+#ifdef FEATURE_APTCA
+FCIMPL2(FC_BOOL_RET, AssemblyHandle::AptcaCheck, AssemblyBaseObject* pTargetAssemblyUNSAFE, AssemblyBaseObject* pSourceAssemblyUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ ASSEMBLYREF refTargetAssembly = (ASSEMBLYREF)ObjectToOBJECTREF(pTargetAssemblyUNSAFE);
+ ASSEMBLYREF refSourceAssembly = (ASSEMBLYREF)ObjectToOBJECTREF(pSourceAssemblyUNSAFE);
+
+ if ((refTargetAssembly == NULL) || (refSourceAssembly == NULL))
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ DomainAssembly *pTargetAssembly = refTargetAssembly->GetDomainAssembly();
+ DomainAssembly *pSourceAssembly = refSourceAssembly->GetDomainAssembly();
+
+ if (pTargetAssembly == pSourceAssembly)
+ FC_RETURN_BOOL(TRUE);
+
+ BOOL bResult = TRUE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_2(refSourceAssembly, refTargetAssembly);
+ {
+ bResult = ( pTargetAssembly->GetAssembly()->AllowUntrustedCaller() || // target assembly allows untrusted callers unconditionally
+ pSourceAssembly->GetSecurityDescriptor()->IsFullyTrusted());
+ }
+ HELPER_METHOD_FRAME_END();
+
+ FC_RETURN_BOOL(bResult);
+}
+FCIMPLEND
+#endif // FEATURE_APTCA
+
+void QCALLTYPE ModuleHandle::GetPEKind(QCall::ModuleHandle pModule, DWORD* pdwPEKind, DWORD* pdwMachine)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+ pModule->GetFile()->GetPEKindAndMachine(pdwPEKind, pdwMachine);
+ END_QCALL;
+}
+
+FCIMPL1(INT32, ModuleHandle::GetMDStreamVersion, ReflectModuleBaseObject * pModuleUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ REFLECTMODULEBASEREF refModule = (REFLECTMODULEBASEREF)ObjectToOBJECTREF(pModuleUNSAFE);
+
+ if (refModule == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ Module *pModule = refModule->GetModule();
+
+ if (pModule->IsResource())
+ return 0;
+
+ return pModule->GetMDImport()->GetMetadataStreamVersion();
+}
+FCIMPLEND
+
+void QCALLTYPE ModuleHandle::GetModuleType(QCall::ModuleHandle pModule, QCall::ObjectHandleOnStack retType)
+{
+ QCALL_CONTRACT;
+
+ TypeHandle globalTypeHandle = TypeHandle();
+
+ BEGIN_QCALL;
+
+ EX_TRY
+ {
+ globalTypeHandle = TypeHandle(pModule->GetGlobalMethodTable());
+ }
+ EX_SWALLOW_NONTRANSIENT;
+
+ if (!globalTypeHandle.IsNull())
+ {
+ GCX_COOP();
+ retType.Set(globalTypeHandle.GetManagedClassObject());
+ }
+
+ END_QCALL;
+
+ return;
+}
+
+FCIMPL1(INT32, ModuleHandle::GetToken, ReflectModuleBaseObject * pModuleUNSAFE) {
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ REFLECTMODULEBASEREF refModule = (REFLECTMODULEBASEREF)ObjectToOBJECTREF(pModuleUNSAFE);
+
+ if (refModule == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ Module *pModule = refModule->GetModule();
+
+ if (pModule->IsResource())
+ return mdModuleNil;
+
+ return pModule->GetMDImport()->GetModuleFromScope();
+}
+FCIMPLEND
+
+FCIMPL1(IMDInternalImport*, ModuleHandle::GetMetadataImport, ReflectModuleBaseObject * pModuleUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ REFLECTMODULEBASEREF refModule = (REFLECTMODULEBASEREF)ObjectToOBJECTREF(pModuleUNSAFE);
+
+ if (refModule == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ Module *pModule = refModule->GetModule();
+
+ if (pModule->IsResource())
+ return NULL;
+
+ return pModule->GetMDImport();
+}
+FCIMPLEND
+
+BOOL QCALLTYPE ModuleHandle::ContainsPropertyMatchingHash(QCall::ModuleHandle pModule, INT32 tkProperty, ULONG hash)
+{
+ QCALL_CONTRACT;
+
+ BOOL fContains = TRUE;
+
+ BEGIN_QCALL;
+
+ fContains = pModule->MightContainMatchingProperty(tkProperty, hash);
+
+ END_QCALL;
+
+ return fContains;
+}
+
+void QCALLTYPE ModuleHandle::ResolveType(QCall::ModuleHandle pModule, INT32 tkType, TypeHandle *typeArgs, INT32 typeArgsCount, TypeHandle *methodArgs, INT32 methodArgsCount, QCall::ObjectHandleOnStack retType)
+{
+ QCALL_CONTRACT;
+
+ TypeHandle typeHandle;
+
+ BEGIN_QCALL;
+
+ _ASSERTE(!IsNilToken(tkType));
+
+ SigTypeContext typeContext(Instantiation(typeArgs, typeArgsCount), Instantiation(methodArgs, methodArgsCount));
+ typeHandle = ClassLoader::LoadTypeDefOrRefOrSpecThrowing(pModule, tkType, &typeContext,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef);
+
+ GCX_COOP();
+ retType.Set(typeHandle.GetManagedClassObject());
+
+ END_QCALL;
+
+ return;
+}
+
+MethodDesc *QCALLTYPE ModuleHandle::ResolveMethod(QCall::ModuleHandle pModule, INT32 tkMemberRef, TypeHandle *typeArgs, INT32 typeArgsCount, TypeHandle *methodArgs, INT32 methodArgsCount)
+{
+ QCALL_CONTRACT;
+
+ MethodDesc* pMD = NULL;
+
+ BEGIN_QCALL;
+
+ _ASSERTE(!IsNilToken(tkMemberRef));
+
+ BOOL strictMetadataChecks = (TypeFromToken(tkMemberRef) == mdtMethodSpec);
+
+ SigTypeContext typeContext(Instantiation(typeArgs, typeArgsCount), Instantiation(methodArgs, methodArgsCount));
+ pMD = MemberLoader::GetMethodDescFromMemberDefOrRefOrSpec(pModule, tkMemberRef, &typeContext, strictMetadataChecks, FALSE);
+
+ // This will get us the instantiating or unboxing stub if needed
+ pMD = MethodDesc::FindOrCreateAssociatedMethodDescForReflection(pMD, pMD->GetMethodTable(), pMD->GetMethodInstantiation());
+
+ END_QCALL;
+
+ return pMD;
+}
+
+void QCALLTYPE ModuleHandle::ResolveField(QCall::ModuleHandle pModule, INT32 tkMemberRef, TypeHandle *typeArgs, INT32 typeArgsCount, TypeHandle *methodArgs, INT32 methodArgsCount, QCall::ObjectHandleOnStack retField)
+{
+ QCALL_CONTRACT;
+
+ FieldDesc* pField = NULL;
+
+ BEGIN_QCALL;
+
+ _ASSERTE(!IsNilToken(tkMemberRef));
+
+ SigTypeContext typeContext(Instantiation(typeArgs, typeArgsCount), Instantiation(methodArgs, methodArgsCount));
+ pField = MemberLoader::GetFieldDescFromMemberDefOrRef(pModule, tkMemberRef, &typeContext, FALSE);
+ GCX_COOP();
+ retField.Set(pField->GetStubFieldInfo());
+
+ END_QCALL;
+
+ return;
+}
+
+void QCALLTYPE ModuleHandle::GetAssembly(QCall::ModuleHandle pModule, QCall::ObjectHandleOnStack retAssembly)
+{
+ QCALL_CONTRACT;
+
+ DomainAssembly *pAssembly = NULL;
+
+ BEGIN_QCALL;
+ pAssembly = pModule->GetDomainAssembly();
+
+ GCX_COOP();
+ retAssembly.Set(pAssembly->GetExposedAssemblyObject());
+ END_QCALL;
+
+ return;
+}
+
+FCIMPL5(ReflectMethodObject*, ModuleHandle::GetDynamicMethod, ReflectMethodObject *pMethodUNSAFE, ReflectModuleBaseObject *pModuleUNSAFE, StringObject *name, U1Array *sig, Object *resolver) {
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(name));
+ PRECONDITION(CheckPointer(sig));
+ }
+ CONTRACTL_END;
+
+ DynamicMethodDesc *pNewMD = NULL;
+
+ struct
+ {
+ STRINGREF nameRef;
+ OBJECTREF resolverRef;
+ OBJECTREF methodRef;
+ REFLECTMETHODREF retMethod;
+ REFLECTMODULEBASEREF refModule;
+ } gc;
+ gc.nameRef = (STRINGREF)name;
+ gc.resolverRef = (OBJECTREF)resolver;
+ gc.methodRef = ObjectToOBJECTREF(pMethodUNSAFE);
+ gc.retMethod = NULL;
+ gc.refModule = (REFLECTMODULEBASEREF)ObjectToOBJECTREF(pModuleUNSAFE);
+
+ if (gc.refModule == NULL)
+ FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
+
+ Module *pModule = gc.refModule->GetModule();
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ DomainFile *pDomainModule = pModule->GetDomainFile();
+
+ U1ARRAYREF dataArray = (U1ARRAYREF)sig;
+ DWORD sigSize = dataArray->GetNumComponents();
+ NewHolder<BYTE> pSig(new BYTE[sigSize]);
+ memcpy(pSig, dataArray->GetDataPtr(), sigSize);
+
+ DWORD length = gc.nameRef->GetStringLength();
+ NewArrayHolder<char> pName(new char[(length + 1) * 2]);
+ pName[0] = '\0';
+ length = WszWideCharToMultiByte(CP_UTF8, 0, gc.nameRef->GetBuffer(), length, pName, (length + 1) * 2 - sizeof(char), NULL, NULL);
+ if (length)
+ pName[length / sizeof(char)] = '\0';
+
+ DynamicMethodTable *pMTForDynamicMethods = pDomainModule->GetDynamicMethodTable();
+ pNewMD = pMTForDynamicMethods->GetDynamicMethod(pSig, sigSize, pName);
+ _ASSERTE(pNewMD != NULL);
+ // pNewMD now owns pSig and pName.
+ pSig.SuppressRelease();
+ pName.SuppressRelease();
+
+ // create a handle to hold the resolver objectref
+ OBJECTHANDLE resolverHandle = pDomainModule->GetAppDomain()->CreateLongWeakHandle(gc.resolverRef);
+ pNewMD->GetLCGMethodResolver()->SetManagedResolver(resolverHandle);
+ gc.retMethod = pNewMD->GetStubMethodInfo();
+ gc.retMethod->SetKeepAlive(gc.resolverRef);
+
+ LoaderAllocator *pLoaderAllocator = pModule->GetLoaderAllocator();
+
+ if (pLoaderAllocator->IsCollectible())
+ pLoaderAllocator->AddReference();
+
+ HELPER_METHOD_FRAME_END();
+
+ return (ReflectMethodObject*)OBJECTREFToObject(gc.retMethod);
+}
+FCIMPLEND
+
+void QCALLTYPE RuntimeMethodHandle::GetCallerType(QCall::StackCrawlMarkHandle pStackMark, QCall::ObjectHandleOnStack retType)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+ GCX_COOP();
+ MethodTable *pMT = NULL;
+
+ pMT = SystemDomain::GetCallersType(pStackMark);
+
+ if (pMT != NULL)
+ retType.Set(pMT->GetManagedClassObject());
+
+ END_QCALL;
+
+ return;
+}
diff --git a/src/vm/runtimehandles.h b/src/vm/runtimehandles.h
new file mode 100644
index 0000000000..8432b2a90f
--- /dev/null
+++ b/src/vm/runtimehandles.h
@@ -0,0 +1,677 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#ifndef _RUNTIMEHANDLES_H_
+#define _RUNTIMEHANDLES_H_
+
+#include "object.h"
+#include "typehandle.h"
+#include "fcall.h"
+#include "field.h"
+#include "typectxt.h"
+#include "constrainedexecutionregion.h"
+
+typedef void* EnregisteredTypeHandle;
+class SignatureNative;
+
+// NOTE: These are defined in CallingConventions.cs.
+typedef enum ReflectionCallConv {
+ CALLCONV_Standard = 0x0001,
+ CALLCONV_VarArgs = 0x0002,
+ CALLCONV_Any = CALLCONV_Standard | CALLCONV_VarArgs,
+ CALLCONV_HasThis = 0x0020,
+ CALLCONV_ExplicitThis = 0x0040,
+ CALLCONV_ArgIteratorFlags = 0xFFFFFF00, // PRIVATE member -- cached ArgIterator flags -- Not exposed in CallingConventions.cs
+ CALLCONV_ArgIteratorFlags_Shift = 8,
+} ReflectionCallConv;
+
+
+// Types used to expose method bodies via reflection.
+
+class ExceptionHandlingClause;
+class MethodBody;
+class LocalVariableInfo;
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<ExceptionHandlingClause> EXCEPTIONHANDLINGCLAUSEREF;
+typedef REF<MethodBody> METHODBODYREF;
+typedef REF<LocalVariableInfo> LOCALVARIABLEINFOREF;
+#else
+typedef DPTR(ExceptionHandlingClause) EXCEPTIONHANDLINGCLAUSEREF;
+typedef DPTR(MethodBody) METHODBODYREF;
+typedef DPTR(LocalVariableInfo) LOCALVARIABLEINFOREF;
+#endif
+
+class ExceptionHandlingClause : Object
+{
+private:
+ // Disallow creation and copy construction of these.
+ ExceptionHandlingClause() { }
+ ExceptionHandlingClause(ExceptionHandlingClause &r) { }
+
+public:
+ METHODBODYREF m_methodBody;
+ CorExceptionFlag m_flags;
+ INT32 m_tryOffset;
+ INT32 m_tryLength;
+ INT32 m_handlerOffset;
+ INT32 m_handlerLength;
+ mdTypeDef m_catchToken;
+ INT32 m_filterOffset;
+};
+
+class MethodBody : Object
+{
+private:
+ // Disallow creation and copy construction of these.
+ MethodBody() { }
+ MethodBody(MethodBody &r) { }
+
+public:
+ U1ARRAYREF m_IL;
+ PTRARRAYREF m_exceptionClauses;
+ PTRARRAYREF m_localVariables;
+ OBJECTREF m_methodBase;
+
+ INT32 m_localVarSigToken;
+ INT32 m_maxStackSize;
+ CLR_BOOL m_initLocals;
+};
+
+class LocalVariableInfo : Object
+{
+private:
+ // Disallow creation and copy construction of these.
+ LocalVariableInfo() { }
+ LocalVariableInfo(LocalVariableInfo &r) { }
+
+public:
+
+ REFLECTCLASSBASEREF GetType()
+ {
+ return (REFLECTCLASSBASEREF)m_type;
+ }
+
+ void SetType(OBJECTREF type)
+ {
+ SetObjectReference(&m_type, type, GetAppDomain());
+ }
+
+ OBJECTREF m_type;
+ INT32 m_bIsPinned;
+ INT32 m_localIndex;
+};
+
+class Utf8String {
+public:
+ static FCDECL3(FC_BOOL_RET, EqualsCaseSensitive, LPCUTF8 szLhs, LPCUTF8 szRhs, INT32 stringNumBytes);
+
+ static
+ BOOL QCALLTYPE EqualsCaseInsensitive(LPCUTF8 szLhs, LPCUTF8 szRhs, INT32 stringNumBytes);
+
+ static
+ ULONG QCALLTYPE HashCaseInsensitive(LPCUTF8 sz, INT32 stringNumBytes);
+};
+
+class RuntimeTypeHandle;
+
+typedef RuntimeTypeHandle FCALLRuntimeTypeHandle;
+#define FCALL_RTH_TO_REFLECTCLASS(x) (x).pRuntimeTypeDONOTUSEDIRECTLY
+
+class RuntimeTypeHandle {
+ ReflectClassBaseObject *pRuntimeTypeDONOTUSEDIRECTLY;
+public:
+
+ // Static method on RuntimeTypeHandle
+ static FCDECL1(Object*, Allocate, ReflectClassBaseObject *refType) ; //A.CI work
+ static FCDECL6(Object*, CreateInstance, ReflectClassBaseObject* refThisUNSAFE,
+ CLR_BOOL publicOnly,
+ CLR_BOOL securityOff,
+ CLR_BOOL *pbCanBeCached,
+ MethodDesc** pConstructor,
+ CLR_BOOL *pbNeedSecurityCheck);
+
+ static FCDECL2(Object*, CreateCaInstance, ReflectClassBaseObject* refCaType, ReflectMethodObject* pCtorUNSAFE);
+
+ static
+ void QCALLTYPE MakeByRef(EnregisteredTypeHandle pTypeHandle, QCall::ObjectHandleOnStack retType);
+
+ static
+ void QCALLTYPE MakePointer(EnregisteredTypeHandle pTypeHandle, QCall::ObjectHandleOnStack retType);
+
+ static
+ void QCALLTYPE MakeSZArray(EnregisteredTypeHandle pTypeHandle, QCall::ObjectHandleOnStack retType);
+
+ static
+ void QCALLTYPE MakeArray(EnregisteredTypeHandle pTypeHandle, INT32 rank, QCall::ObjectHandleOnStack retType);
+
+ static BOOL QCALLTYPE IsCollectible(EnregisteredTypeHandle pTypeHandle);
+
+ static FCDECL1(ReflectClassBaseObject*, GetRuntimeType, void *th);
+
+ static FCDECL1_V(ReflectClassBaseObject*, GetTypeFromHandle, FCALLRuntimeTypeHandle th);
+ static FCDECL1_V(EnregisteredTypeHandle, GetValueInternal, FCALLRuntimeTypeHandle RTH);
+
+ static FCDECL2(FC_BOOL_RET, TypeEQ, Object* left, Object* right);
+ static FCDECL2(FC_BOOL_RET, TypeNEQ, Object* left, Object* right);
+
+#ifndef FEATURE_CORECLR
+ static FCDECL2(FC_BOOL_RET, IsEquivalentTo, ReflectClassBaseObject *rtType1UNSAFE, ReflectClassBaseObject *rtType2UNSAFE);
+ static FCDECL1(FC_BOOL_RET, IsEquivalentType, ReflectClassBaseObject *rtTypeUNSAFE);
+#endif // !FEATURE_CORECLR
+
+#ifdef FEATURE_COMINTEROP
+ static FCDECL1(FC_BOOL_RET, IsWindowsRuntimeObjectType, ReflectClassBaseObject *rtTypeUNSAFE);
+ static FCDECL1(FC_BOOL_RET, IsTypeExportedToWindowsRuntime, ReflectClassBaseObject *rtTypeUNSAFE);
+#endif // FEATURE_COMINTEROP
+
+ static
+ void QCALLTYPE PrepareMemberInfoCache(EnregisteredTypeHandle pMemberInfoCache);
+
+ static
+ void QCALLTYPE ConstructName(EnregisteredTypeHandle pTypeHandle, DWORD format, QCall::StringHandleOnStack retString);
+
+ static
+ void QCALLTYPE GetTypeByNameUsingCARules(LPCWSTR pwzClassName, QCall::ModuleHandle pModule, QCall::ObjectHandleOnStack retType);
+
+ static
+ void QCALLTYPE GetTypeByName(LPCWSTR pwzClassName, BOOL bThrowOnError, BOOL bIgnoreCase, BOOL bReflectionOnly,
+ QCall::StackCrawlMarkHandle pStackMark,
+#ifdef FEATURE_HOSTED_BINDER
+ ICLRPrivBinder * pPrivHostBinder,
+#endif
+ BOOL bLoadTypeFromPartialNameHack, QCall::ObjectHandleOnStack retType);
+
+ static FCDECL1(AssemblyBaseObject*, GetAssembly, ReflectClassBaseObject *pType);
+ static FCDECL1(ReflectClassBaseObject*, GetBaseType, ReflectClassBaseObject* pType);
+ static FCDECL1(ReflectModuleBaseObject*, GetModule, ReflectClassBaseObject* pType);
+ static FCDECL1(INT32, GetAttributes, ReflectClassBaseObject* pType);
+ static FCDECL1(INT32, GetToken, ReflectClassBaseObject* pType);
+ static FCDECL1(LPCUTF8, GetUtf8Name, ReflectClassBaseObject* pType);
+ static FCDECL1(INT32, GetArrayRank, ReflectClassBaseObject* pType);
+
+ static FCDECL1(ReflectMethodObject*, GetDeclaringMethod, ReflectClassBaseObject *pType);
+
+ static
+ void QCALLTYPE GetDefaultConstructor(EnregisteredTypeHandle pTypeHandle, QCall::ObjectHandleOnStack retMethod);
+
+ static FCDECL1(ReflectClassBaseObject*, GetDeclaringType, ReflectClassBaseObject* pType);
+#ifdef FEATURE_REMOTING
+ static FCDECL1(FC_BOOL_RET, IsContextful, ReflectClassBaseObject* pType);
+#endif
+ static FCDECL1(FC_BOOL_RET, IsValueType, ReflectClassBaseObject* pType);
+ static FCDECL1(FC_BOOL_RET, IsInterface, ReflectClassBaseObject* pType);
+
+ static
+ BOOL QCALLTYPE IsVisible(EnregisteredTypeHandle pTypeHandle);
+
+ static
+ BOOL QCALLTYPE IsSecurityCritical(EnregisteredTypeHandle pTypeHandle);
+
+ static
+ BOOL QCALLTYPE IsSecuritySafeCritical(EnregisteredTypeHandle pTypeHandle);
+
+ static
+ BOOL QCALLTYPE IsSecurityTransparent(EnregisteredTypeHandle pTypeHandle);
+
+ static FCDECL1(FC_BOOL_RET, HasProxyAttribute, ReflectClassBaseObject *pType);
+ static FCDECL2(FC_BOOL_RET, IsComObject, ReflectClassBaseObject *pType, CLR_BOOL isGenericCOM);
+ static FCDECL2(FC_BOOL_RET, CanCastTo, ReflectClassBaseObject *pType, ReflectClassBaseObject *pTarget);
+ static FCDECL2(FC_BOOL_RET, IsInstanceOfType, ReflectClassBaseObject *pType, Object *object);
+
+ static FCDECL6(FC_BOOL_RET, SatisfiesConstraints, PTR_ReflectClassBaseObject pGenericParameter, TypeHandle *typeContextArgs, INT32 typeContextCount, TypeHandle *methodContextArgs, INT32 methodContextCount, PTR_ReflectClassBaseObject pGenericArgument);
+ static
+ FCDECL1(FC_BOOL_RET, HasInstantiation, PTR_ReflectClassBaseObject pType);
+
+ static
+ FCDECL1(FC_BOOL_RET, IsGenericTypeDefinition, PTR_ReflectClassBaseObject pType);
+
+ static
+ FCDECL1(FC_BOOL_RET, IsGenericVariable, PTR_ReflectClassBaseObject pType);
+
+ static
+ FCDECL1(INT32, GetGenericVariableIndex, PTR_ReflectClassBaseObject pType);
+
+ static
+ FCDECL1(FC_BOOL_RET, ContainsGenericVariables, PTR_ReflectClassBaseObject pType);
+
+ static
+ void QCALLTYPE GetInstantiation(EnregisteredTypeHandle pTypeHandle, QCall::ObjectHandleOnStack retType, BOOL fAsRuntimeTypeArray);
+
+ static
+ void QCALLTYPE Instantiate(EnregisteredTypeHandle pTypeHandle, TypeHandle * pInstArray, INT32 cInstArray, QCall::ObjectHandleOnStack retType);
+
+ static
+ void QCALLTYPE GetGenericTypeDefinition(EnregisteredTypeHandle pTypeHandle, QCall::ObjectHandleOnStack retType);
+
+ static FCDECL2(FC_BOOL_RET, CompareCanonicalHandles, PTR_ReflectClassBaseObject pLeft, PTR_ReflectClassBaseObject pRight);
+
+ static FCDECL1(PtrArray*, GetInterfaces, ReflectClassBaseObject *pType);
+
+ static
+ void QCALLTYPE GetConstraints(EnregisteredTypeHandle pTypeHandle, QCall::ObjectHandleOnStack retTypes);
+
+ static
+ PVOID QCALLTYPE GetGCHandle(EnregisteredTypeHandle pTypeHandle, INT32 handleType);
+
+ static FCDECL1(INT32, GetCorElementType, PTR_ReflectClassBaseObject pType);
+ static FCDECL1(ReflectClassBaseObject*, GetElementType, ReflectClassBaseObject* pType);
+
+ static FCDECL2(MethodDesc*, GetMethodAt, PTR_ReflectClassBaseObject pType, INT32 slot);
+ static FCDECL1(INT32, GetNumVirtuals, ReflectClassBaseObject *pType);
+
+ static
+ void QCALLTYPE VerifyInterfaceIsImplemented(EnregisteredTypeHandle pTypeHandle, EnregisteredTypeHandle pIFaceHandle);
+
+ static
+ INT32 QCALLTYPE GetInterfaceMethodImplementationSlot(EnregisteredTypeHandle pTypeHandle, EnregisteredTypeHandle pOwner, MethodDesc * pMD);
+
+ static FCDECL3(FC_BOOL_RET, GetFields, ReflectClassBaseObject *pType, INT32 **result, INT32 *pCount);
+
+ static FCDECL1(MethodDesc *, GetFirstIntroducedMethod, ReflectClassBaseObject* pType);
+ static FCDECL1(void, GetNextIntroducedMethod, MethodDesc **ppMethod);
+
+ static FCDECL2(Object*, CreateInstanceForGenericType, ReflectClassBaseObject* pType
+ , ReflectClassBaseObject* parameterType );
+
+ static
+ FCDECL1(IMDInternalImport*, GetMetadataImport, ReflectClassBaseObject * pModuleUNSAFE);
+};
+
+class RuntimeMethodHandle {
+
+public:
+ static FCDECL1(ReflectMethodObject*, GetCurrentMethod, StackCrawlMark* stackMark);
+
+ static FCDECL4(Object*, InvokeMethod, Object *target, PTRArray *objs, SignatureNative* pSig, CLR_BOOL fConstructor);
+
+ struct StreamingContextData {
+ Object * additionalContext; // additionalContex was changed from OBJECTREF to Object to avoid having a
+ INT32 contextStates; // constructor in this struct. GCC doesn't allow structs with constructors to be
+ };
+
+ // *******************************************************************************************
+ // Keep these in sync with the version in bcl\system\runtime\serialization\streamingcontext.cs
+ // *******************************************************************************************
+ enum StreamingContextStates
+ {
+ CONTEXTSTATE_CrossProcess = 0x01,
+ CONTEXTSTATE_CrossMachine = 0x02,
+ CONTEXTSTATE_File = 0x04,
+ CONTEXTSTATE_Persistence = 0x08,
+ CONTEXTSTATE_Remoting = 0x10,
+ CONTEXTSTATE_Other = 0x20,
+ CONTEXTSTATE_Clone = 0x40,
+ CONTEXTSTATE_CrossAppDomain = 0x80,
+ CONTEXTSTATE_All = 0xFF
+ };
+
+ // passed by value
+ // STATIC IMPLEMENTATION
+ static OBJECTREF InvokeMethod_Internal(
+ MethodDesc *pMethod, OBJECTREF targetUNSAFE, INT32 attrs, OBJECTREF binderUNSAFE, PTRARRAYREF objsUNSAFE, OBJECTREF localeUNSAFE,
+ BOOL isBinderDefault, Assembly *caller, Assembly *reflectedClassAssembly, TypeHandle declaringType, SignatureNative* pSig, BOOL verifyAccess);
+
+ static
+ BOOL QCALLTYPE IsSecurityCritical(MethodDesc *pMD);
+
+ static
+ BOOL QCALLTYPE IsSecuritySafeCritical(MethodDesc *pMD);
+
+ static
+ BOOL QCALLTYPE IsSecurityTransparent(MethodDesc *pMD);
+
+ static FCDECL2(FC_BOOL_RET, IsTokenSecurityTransparent, ReflectModuleBaseObject *pModuleUNSAFE, INT32 tkToken);
+
+ static
+ BOOL QCALLTYPE IsCAVisibleFromDecoratedType(
+ EnregisteredTypeHandle targetTypeHandle,
+ MethodDesc * pTargetCtor,
+ EnregisteredTypeHandle sourceTypeHandle,
+ QCall::ModuleHandle sourceModuleHandle);
+
+ static FCDECL3(void, CheckLinktimeDemands, ReflectMethodObject *pMethodUNSAFE, ReflectModuleBaseObject *pModuleUNSAFE, CLR_BOOL isDecoratedTargetSecurityTransparent);
+ static FCDECL4(void, SerializationInvoke, ReflectMethodObject *pMethodUNSAFE, Object* targetUNSAFE,
+ Object* serializationInfoUNSAFE, struct StreamingContextData * pContext);
+
+ static
+ void QCALLTYPE ConstructInstantiation(MethodDesc * pMethod, DWORD format, QCall::StringHandleOnStack retString);
+
+ static
+ void * QCALLTYPE GetFunctionPointer(MethodDesc * pMethod);
+
+ static FCDECL1(INT32, GetAttributes, MethodDesc *pMethod);
+ static FCDECL1(INT32, GetImplAttributes, ReflectMethodObject *pMethodUNSAFE);
+ static FCDECL1(ReflectClassBaseObject*, GetDeclaringType, MethodDesc *pMethod);
+ static FCDECL1(INT32, GetSlot, MethodDesc *pMethod);
+ static FCDECL1(INT32, GetMethodDef, ReflectMethodObject *pMethodUNSAFE);
+ static FCDECL1(StringObject*, GetName, MethodDesc *pMethod);
+ static FCDECL1(LPCUTF8, GetUtf8Name, MethodDesc *pMethod);
+ static FCDECL2(FC_BOOL_RET, MatchesNameHash, MethodDesc * pMethod, ULONG hash);
+
+ static
+ void QCALLTYPE GetMethodInstantiation(MethodDesc * pMethod, QCall::ObjectHandleOnStack retTypes, BOOL fAsRuntimeTypeArray);
+
+ static
+ FCDECL1(FC_BOOL_RET, HasMethodInstantiation, MethodDesc *pMethod);
+
+ static
+ FCDECL1(FC_BOOL_RET, IsGenericMethodDefinition, MethodDesc *pMethod);
+
+ static
+ FCDECL1(FC_BOOL_RET, IsTypicalMethodDefinition, ReflectMethodObject *pMethodUNSAFE);
+
+ static
+ void QCALLTYPE GetTypicalMethodDefinition(MethodDesc * pMethod, QCall::ObjectHandleOnStack refMethod);
+
+ static
+ void QCALLTYPE StripMethodInstantiation(MethodDesc * pMethod, QCall::ObjectHandleOnStack refMethod);
+
+ // see comment in the cpp file
+ static FCDECL3(MethodDesc*, GetStubIfNeeded, MethodDesc *pMethod, ReflectClassBaseObject *pType, PtrArray* instArray);
+ static FCDECL2(MethodDesc*, GetMethodFromCanonical, MethodDesc *pMethod, PTR_ReflectClassBaseObject pType);
+
+ static
+ FCDECL1(FC_BOOL_RET, IsDynamicMethod, MethodDesc * pMethod);
+
+ static
+ FCDECL1(Object*, GetResolver, MethodDesc * pMethod);
+
+ static
+ void QCALLTYPE Destroy(MethodDesc * pMethod);
+
+ static
+ void QCALLTYPE GetCallerType(QCall::StackCrawlMarkHandle pStackMark, QCall::ObjectHandleOnStack retType);
+
+ static FCDECL2(MethodBody*, GetMethodBody, ReflectMethodObject *pMethodUNSAFE, PTR_ReflectClassBaseObject pDeclaringType);
+
+ static FCDECL1(FC_BOOL_RET, IsConstructor, MethodDesc *pMethod);
+
+ static FCDECL1(Object*, GetLoaderAllocator, MethodDesc *pMethod);
+};
+
+class RuntimeFieldHandle {
+
+public:
+ static FCDECL5(Object*, GetValue, ReflectFieldObject *pFieldUNSAFE, Object *instanceUNSAFE, ReflectClassBaseObject *pFieldType, ReflectClassBaseObject *pDeclaringType, CLR_BOOL *pDomainInitialized);
+ static FCDECL7(void, SetValue, ReflectFieldObject *pFieldUNSAFE, Object *targetUNSAFE, Object *valueUNSAFE, ReflectClassBaseObject *pFieldType, DWORD attr, ReflectClassBaseObject *pDeclaringType, CLR_BOOL *pDomainInitialized);
+ static FCDECL4(Object*, GetValueDirect, ReflectFieldObject *pFieldUNSAFE, ReflectClassBaseObject *pFieldType, TypedByRef *pTarget, ReflectClassBaseObject *pDeclaringType);
+ static FCDECL5(void, SetValueDirect, ReflectFieldObject *pFieldUNSAFE, ReflectClassBaseObject *pFieldType, TypedByRef *pTarget, Object *valueUNSAFE, ReflectClassBaseObject *pContextType);
+ static FCDECL1(StringObject*, GetName, ReflectFieldObject *pFieldUNSAFE);
+ static FCDECL1(LPCUTF8, GetUtf8Name, FieldDesc *pField);
+ static FCDECL2(FC_BOOL_RET, MatchesNameHash, FieldDesc * pField, ULONG hash);
+
+ static FCDECL1(INT32, GetAttributes, FieldDesc *pField);
+ static FCDECL1(ReflectClassBaseObject*, GetApproxDeclaringType, FieldDesc *pField);
+ static FCDECL1(INT32, GetToken, ReflectFieldObject *pFieldUNSAFE);
+ static FCDECL2(FieldDesc*, GetStaticFieldForGenericType, FieldDesc *pField, ReflectClassBaseObject *pDeclaringType);
+ static FCDECL1(FC_BOOL_RET, AcquiresContextFromThis, FieldDesc *pField);
+
+ static
+ BOOL QCALLTYPE IsSecurityCritical(FieldDesc *pFD);
+
+ static
+ BOOL QCALLTYPE IsSecuritySafeCritical(FieldDesc *pFD);
+
+ static
+ BOOL QCALLTYPE IsSecurityTransparent(FieldDesc *pFD);
+
+ static
+ void QCALLTYPE CheckAttributeAccess(FieldDesc *pFD, QCall::ModuleHandle pModule);
+};
+
+class ModuleHandle {
+
+public:
+ static FCDECL5(ReflectMethodObject*, GetDynamicMethod, ReflectMethodObject *pMethodUNSAFE, ReflectModuleBaseObject *pModuleUNSAFE, StringObject *name, U1Array *sig, Object *resolver);
+ static FCDECL1(INT32, GetToken, ReflectModuleBaseObject *pModuleUNSAFE);
+
+ static
+ void QCALLTYPE GetModuleType(QCall::ModuleHandle pModule, QCall::ObjectHandleOnStack retType);
+
+ static
+ FCDECL1(IMDInternalImport*, GetMetadataImport, ReflectModuleBaseObject * pModuleUNSAFE);
+
+ static
+ BOOL QCALLTYPE ContainsPropertyMatchingHash(QCall::ModuleHandle pModule, INT32 tkProperty, ULONG hash);
+
+ static
+ void QCALLTYPE ResolveType(QCall::ModuleHandle pModule, INT32 tkType, TypeHandle *typeArgs, INT32 typeArgsCount, TypeHandle *methodArgs, INT32 methodArgsCount, QCall::ObjectHandleOnStack retType);
+
+ static
+ MethodDesc * QCALLTYPE ResolveMethod(QCall::ModuleHandle pModule, INT32 tkMemberRef, TypeHandle *typeArgs, INT32 typeArgsCount, TypeHandle *methodArgs, INT32 methodArgsCount);
+
+ static
+ void QCALLTYPE ResolveField(QCall::ModuleHandle pModule, INT32 tkMemberRef, TypeHandle *typeArgs, INT32 typeArgsCount, TypeHandle *methodArgs, INT32 methodArgsCount, QCall::ObjectHandleOnStack retField);
+
+ static
+ void QCALLTYPE GetAssembly(QCall::ModuleHandle pModule, QCall::ObjectHandleOnStack retAssembly);
+
+ static
+ void QCALLTYPE GetPEKind(QCall::ModuleHandle pModule, DWORD* pdwPEKind, DWORD* pdwMachine);
+
+ static
+ FCDECL1(INT32, GetMDStreamVersion, ReflectModuleBaseObject * pModuleUNSAFE);
+};
+
+class AssemblyHandle {
+
+public:
+ static FCDECL1(ReflectModuleBaseObject*, GetManifestModule, AssemblyBaseObject *pAssemblyUNSAFE);
+
+ static FCDECL1(INT32, GetToken, AssemblyBaseObject *pAssemblyUNSAFE);
+#ifdef FEATURE_APTCA
+ static FCDECL2(FC_BOOL_RET, AptcaCheck, AssemblyBaseObject *pTargetAssemblyUNSAFE, AssemblyBaseObject *pSourceAssemblyUNSAFE);
+#endif // FEATURE_APTCA
+};
+
+class SignatureNative;
+
+typedef DPTR(SignatureNative) PTR_SignatureNative;
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<SignatureNative> SIGNATURENATIVEREF;
+#else
+typedef PTR_SignatureNative SIGNATURENATIVEREF;
+#endif
+
+class SignatureNative : public Object
+{
+ friend class RuntimeMethodHandle;
+ friend class ArgIteratorForMethodInvoke;
+
+public:
+ static FCDECL6(void, GetSignature,
+ SignatureNative* pSignatureNative,
+ PCCOR_SIGNATURE pCorSig, DWORD cCorSig,
+ FieldDesc *pFieldDesc, ReflectMethodObject *pMethodUNSAFE,
+ ReflectClassBaseObject *pDeclaringType);
+ static FCDECL3(Object *, GetCustomModifiers, SignatureNative* pSig, INT32 parameter, CLR_BOOL fRequired);
+ static FCDECL2(FC_BOOL_RET, CompareSig, SignatureNative* pLhs, SignatureNative* pRhs);
+ static FCDECL4(FC_BOOL_RET, CompareSigForAppCompat, SignatureNative* pLhs, ReflectClassBaseObject * pTypeLhs, SignatureNative* pRhs, ReflectClassBaseObject * pTypeRhs);
+
+
+ BOOL HasThis() { LIMITED_METHOD_CONTRACT; return (m_managedCallingConvention & CALLCONV_HasThis); }
+ INT32 NumFixedArgs() { WRAPPER_NO_CONTRACT; return m_PtrArrayarguments->GetNumComponents(); }
+ TypeHandle GetReturnTypeHandle()
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ return ((REFLECTCLASSBASEREF)m_returnType)->GetType();
+ }
+
+ PCCOR_SIGNATURE GetCorSig() { LIMITED_METHOD_CONTRACT; return m_sig; }
+ DWORD GetCorSigSize() { LIMITED_METHOD_CONTRACT; return m_cSig; }
+ Module* GetModule() { WRAPPER_NO_CONTRACT; return GetDeclaringType().GetModule(); }
+
+ TypeHandle GetArgumentAt(INT32 position)
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refArgument = (REFLECTCLASSBASEREF)m_PtrArrayarguments->GetAt(position);
+ return refArgument->GetType();
+ }
+
+ DWORD GetArgIteratorFlags()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return VolatileLoad(&m_managedCallingConvention) >> CALLCONV_ArgIteratorFlags_Shift;
+ }
+
+ INT32 GetSizeOfArgStack()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_nSizeOfArgStack;
+ }
+
+ TypeHandle GetDeclaringType()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_declaringType->GetType();
+ }
+ MethodDesc* GetMethod()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pMethod;
+ }
+
+ const SigTypeContext * GetTypeContext(SigTypeContext *pTypeContext)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_pMethod || !GetDeclaringType().IsNull());
+ if (m_pMethod)
+ return SigTypeContext::GetOptionalTypeContext(m_pMethod, GetDeclaringType(), pTypeContext);
+ else
+ return SigTypeContext::GetOptionalTypeContext(GetDeclaringType(), pTypeContext);
+ }
+
+private:
+ void SetReturnType(OBJECTREF returnType)
+ {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ SetObjectReference(&m_returnType, returnType, GetAppDomain());
+ }
+
+ void SetKeepAlive(OBJECTREF keepAlive)
+ {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ SetObjectReference(&m_keepalive, keepAlive, GetAppDomain());
+ }
+
+ void SetDeclaringType(REFLECTCLASSBASEREF declaringType)
+ {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ SetObjectReference((OBJECTREF*)&m_declaringType, (OBJECTREF)declaringType, GetAppDomain());
+ }
+
+ void SetArgumentArray(PTRARRAYREF ptrArrayarguments)
+ {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ SetObjectReference((OBJECTREF*)&m_PtrArrayarguments, (OBJECTREF)ptrArrayarguments, GetAppDomain());
+ }
+
+ void SetArgument(INT32 argument, OBJECTREF argumentType)
+ {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ m_PtrArrayarguments->SetAt(argument, argumentType);
+ }
+
+ void SetArgIteratorFlags(DWORD flags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return VolatileStore(&m_managedCallingConvention, (INT32)(m_managedCallingConvention | (flags << CALLCONV_ArgIteratorFlags_Shift)));
+ }
+
+ void SetSizeOfArgStack(INT32 nSizeOfArgStack)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_nSizeOfArgStack = nSizeOfArgStack;
+ }
+
+ void SetCallingConvention(INT32 mdCallingConvention)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if ((mdCallingConvention & IMAGE_CEE_CS_CALLCONV_MASK) == IMAGE_CEE_CS_CALLCONV_VARARG)
+ m_managedCallingConvention = CALLCONV_VarArgs;
+ else
+ m_managedCallingConvention = CALLCONV_Standard;
+
+ if ((mdCallingConvention & IMAGE_CEE_CS_CALLCONV_HASTHIS) != 0)
+ m_managedCallingConvention |= CALLCONV_HasThis;
+
+ if ((mdCallingConvention & IMAGE_CEE_CS_CALLCONV_EXPLICITTHIS) != 0)
+ m_managedCallingConvention |= CALLCONV_ExplicitThis;
+ }
+
+ // Mirrored in the managed world (System.Signature)
+ //
+ // this is the layout the classloader chooses by default for the managed struct.
+ //
+ PTRARRAYREF m_PtrArrayarguments;
+ REFLECTCLASSBASEREF m_declaringType;
+ OBJECTREF m_returnType;
+ OBJECTREF m_keepalive;
+ PCCOR_SIGNATURE m_sig;
+ INT32 m_managedCallingConvention;
+ INT32 m_nSizeOfArgStack;
+ DWORD m_cSig;
+ MethodDesc* m_pMethod;
+};
+
+class ReflectionPointer : public Object
+{
+public:
+ OBJECTREF _ptrType;
+ void * _ptr;
+};
+
+#endif
+
diff --git a/src/vm/rwlock.cpp b/src/vm/rwlock.cpp
new file mode 100644
index 0000000000..3ea81fb915
--- /dev/null
+++ b/src/vm/rwlock.cpp
@@ -0,0 +1,2947 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+//+-------------------------------------------------------------------
+//
+// File: RWLock.cpp
+//
+// Contents: Reader writer lock implementation that supports the
+// following features
+// 1. Cheap enough to be used in large numbers
+// such as per object synchronization.
+// 2. Supports timeout. This is a valuable feature
+// to detect deadlocks
+// 3. Supports caching of events. This allows
+// the events to be moved from least contentious
+// regions to the most contentious regions.
+// In other words, the number of events needed by
+// Reader-Writer lockls is bounded by the number
+// of threads in the process.
+// 4. Supports nested locks by readers and writers
+// 5. Supports spin counts for avoiding context switches
+// on multi processor machines.
+// 6. Supports functionality for upgrading to a writer
+// lock with a return argument that indicates
+// intermediate writes. Downgrading from a writer
+// lock restores the state of the lock.
+// 7. Supports functionality to Release Lock for calling
+// app code. RestoreLock restores the lock state and
+// indicates intermediate writes.
+// 8. Recovers from most common failures such as creation of
+// events. In other words, the lock mainitains consistent
+// internal state and remains usable
+//
+//
+// Classes: CRWLock
+//
+//--------------------------------------------------------------------
+
+
+#include "common.h"
+#include "rwlock.h"
+#include "corhost.h"
+
+#ifdef FEATURE_RWLOCK
+
+// Reader increment
+#define READER 0x00000001
+// Max number of readers
+#define READERS_MASK 0x000003FF
+// Reader being signaled
+#define READER_SIGNALED 0x00000400
+// Writer being signaled
+#define WRITER_SIGNALED 0x00000800
+#define WRITER 0x00001000
+// Waiting reader increment
+#define WAITING_READER 0x00002000
+// Note size of waiting readers must be less
+// than or equal to size of readers
+#define WAITING_READERS_MASK 0x007FE000
+#define WAITING_READERS_SHIFT 13
+// Waiting writer increment
+#define WAITING_WRITER 0x00800000
+// Max number of waiting writers
+#define WAITING_WRITERS_MASK 0xFF800000
+// Events are being cached
+#define CACHING_EVENTS (READER_SIGNALED | WRITER_SIGNALED)
+
+// Cookie flags
+#define UPGRADE_COOKIE 0x02000
+#define RELEASE_COOKIE 0x04000
+#define COOKIE_NONE 0x10000
+#define COOKIE_WRITER 0x20000
+#define COOKIE_READER 0x40000
+#define INVALID_COOKIE (~(UPGRADE_COOKIE | RELEASE_COOKIE | \
+ COOKIE_NONE | COOKIE_WRITER | COOKIE_READER))
+#define RWLOCK_MAX_ACQUIRE_COUNT 0xFFFF
+
+// globals
+Volatile<LONG> CRWLock::s_mostRecentLLockID = 0;
+Volatile<LONG> CRWLock::s_mostRecentULockID = -1;
+CrstStatic CRWLock::s_RWLockCrst;
+
+// Default values
+#ifdef _DEBUG
+DWORD gdwDefaultTimeout = 120000;
+#else //!_DEBUG
+DWORD gdwDefaultTimeout = INFINITE;
+#endif //_DEBUG
+const DWORD gdwReasonableTimeout = 120000;
+DWORD gdwDefaultSpinCount = 0;
+BOOL fBreakOnErrors = FALSE; // Temporarily break on errors
+
+// <REVISIT_TODO> REVISIT_TODO: Bad practise</REVISIT_TODO>
+#define HEAP_SERIALIZE 0
+#define RWLOCK_RECOVERY_FAILURE (0xC0000227L)
+
+// Catch GC holes
+#if _DEBUG
+#define VALIDATE_LOCK(pRWLock) ((Object *) (pRWLock))->Validate();
+#else // !_DEBUG
+#define VALIDATE_LOCK(pRWLock)
+#endif // _DEBUG
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::ProcessInit public
+//
+// Synopsis: Reads default values from registry and intializes
+// process wide data structures
+//
+//+-------------------------------------------------------------------
+void CRWLock::ProcessInit()
+{
+ CONTRACTL
+ {
+ THROWS; // From Crst.Init()
+ GC_NOTRIGGER;
+ PRECONDITION((g_SystemInfo.dwNumberOfProcessors != 0));
+ }
+ CONTRACTL_END;
+
+ gdwDefaultSpinCount = (g_SystemInfo.dwNumberOfProcessors != 1) ? 500 : 0;
+
+ PPEB peb = (PPEB) ClrTeb::GetProcessEnvironmentBlock();
+ DWORD dwTimeout = (DWORD)(peb->CriticalSectionTimeout.QuadPart/-10000000);
+ if (dwTimeout)
+ {
+ gdwDefaultTimeout = dwTimeout;
+ }
+
+ // Initialize the critical section used by the lock
+ // Can throw out of memory here.
+ s_RWLockCrst.Init(CrstRWLock, CRST_UNSAFE_ANYMODE);
+}
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::CRWLock public
+//
+// Synopsis: Constructor
+//
+//+-------------------------------------------------------------------
+CRWLock::CRWLock()
+: _hWriterEvent(NULL),
+ _hReaderEvent(NULL),
+ _dwState(0),
+ _dwWriterID(0),
+ _dwWriterSeqNum(1),
+ _wWriterLevel(0)
+#ifdef RWLOCK_STATISTICS
+ ,
+ _dwReaderEntryCount(0),
+ _dwReaderContentionCount(0),
+ _dwWriterEntryCount(0),
+ _dwWriterContentionCount(0),
+ _dwEventsReleasedCount(0)
+#endif
+{
+
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ POSTCONDITION((_dwLLockID > 0));
+ }
+ CONTRACT_END;
+
+ LONG dwKnownLLockID;
+ LONG dwULockID = s_mostRecentULockID;
+ LONG dwLLockID = s_mostRecentLLockID;
+ do
+ {
+ dwKnownLLockID = dwLLockID;
+ if(dwKnownLLockID != 0)
+ {
+ dwLLockID = RWInterlockedCompareExchange(&s_mostRecentLLockID, dwKnownLLockID+1, dwKnownLLockID);
+ }
+ else
+ {
+ CrstHolder ch(&s_RWLockCrst);
+
+ if(s_mostRecentLLockID == 0)
+ {
+ dwULockID = ++s_mostRecentULockID;
+ dwLLockID = s_mostRecentLLockID++;
+ dwKnownLLockID = dwLLockID;
+ }
+ else
+ {
+ dwULockID = s_mostRecentULockID;
+ dwLLockID = s_mostRecentLLockID;
+ }
+ }
+ } while(dwKnownLLockID != dwLLockID);
+
+ _dwLLockID = ++dwLLockID;
+ _dwULockID = dwULockID;
+
+ RETURN;
+}
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::Cleanup public
+//
+// Synopsis: Cleansup state
+//
+//+-------------------------------------------------------------------
+void CRWLock::Cleanup()
+{
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION((_dwState == 0)); // sanity checks
+ PRECONDITION((_dwWriterID == 0));
+ PRECONDITION((_wWriterLevel == 0));
+ }
+ CONTRACTL_END;
+
+ if(_hWriterEvent) {
+ delete _hWriterEvent;
+ _hWriterEvent = NULL;
+ }
+ if(_hReaderEvent) {
+ delete _hReaderEvent;
+ _hReaderEvent = NULL;
+ }
+
+ return;
+}
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::ChainEntry private
+//
+// Synopsis: Chains the given lock entry into the chain
+//
+//+-------------------------------------------------------------------
+inline void CRWLock::ChainEntry(Thread *pThread, LockEntry *pLockEntry)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // This is to synchronize with finalizer thread and deadlock detection.
+ CrstHolder rwl(&s_RWLockCrst);
+ LockEntry *pHeadEntry = pThread->m_pHead;
+ pLockEntry->pNext = pHeadEntry;
+ pLockEntry->pPrev = pHeadEntry->pPrev;
+ pLockEntry->pPrev->pNext = pLockEntry;
+ pHeadEntry->pPrev = pLockEntry;
+
+ return;
+}
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::GetLockEntry private
+//
+// Synopsis: Gets lock entry from TLS
+//
+//+-------------------------------------------------------------------
+inline LockEntry *CRWLock::GetLockEntry(Thread* pThread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (pThread == NULL) {
+ pThread = GetThread();
+ }
+ LockEntry *pHeadEntry = pThread->m_pHead;
+ LockEntry *pLockEntry = pHeadEntry;
+ do
+ {
+ if((pLockEntry->dwLLockID == _dwLLockID) && (pLockEntry->dwULockID == _dwULockID))
+ return(pLockEntry);
+ pLockEntry = pLockEntry->pNext;
+ } while(pLockEntry != pHeadEntry);
+
+ return(NULL);
+}
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::FastGetOrCreateLockEntry private
+//
+// Synopsis: The fast path for getting a lock entry from TLS
+//
+//+-------------------------------------------------------------------
+inline LockEntry *CRWLock::FastGetOrCreateLockEntry()
+{
+
+ CONTRACTL
+ {
+ THROWS; // SlowGetOrCreateLockEntry can throw out of memory exception
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ Thread *pThread = GetThread();
+ _ASSERTE(pThread);
+ LockEntry *pLockEntry = pThread->m_pHead;
+ if(pLockEntry->dwLLockID == 0)
+ {
+ _ASSERTE(pLockEntry->wReaderLevel == 0);
+ pLockEntry->dwLLockID = _dwLLockID;
+ pLockEntry->dwULockID = _dwULockID;
+ return(pLockEntry);
+ }
+ else if((pLockEntry->dwLLockID == _dwLLockID) && (pLockEntry->dwULockID == _dwULockID))
+ {
+ // Note, StaticAcquireReaderLock can have reentry via pumping while it's blocking
+ // so no assertions about pLockEntry->wReaderLevel's state
+ return(pLockEntry);
+ }
+
+ return(SlowGetOrCreateLockEntry(pThread));
+}
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::SlowGetorCreateLockEntry private
+//
+// Synopsis: The slow path for getting a lock entry from TLS
+//
+//+-------------------------------------------------------------------
+LockEntry *CRWLock::SlowGetOrCreateLockEntry(Thread *pThread)
+{
+
+ CONTRACTL
+ {
+ THROWS; // memory allocation can throw out of memory exception
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LockEntry *pFreeEntry = NULL;
+ LockEntry *pHeadEntry = pThread->m_pHead;
+
+ // Search for an empty entry or an entry belonging to this lock
+ LockEntry *pLockEntry = pHeadEntry->pNext;
+ while(pLockEntry != pHeadEntry)
+ {
+ if(pLockEntry->dwLLockID &&
+ ((pLockEntry->dwLLockID != _dwLLockID) || (pLockEntry->dwULockID != _dwULockID)))
+ {
+ // Move to the next entry
+ pLockEntry = pLockEntry->pNext;
+ }
+ else
+ {
+ // Prepare to move it to the head
+ pFreeEntry = pLockEntry;
+ pLockEntry->pPrev->pNext = pLockEntry->pNext;
+ pLockEntry->pNext->pPrev = pLockEntry->pPrev;
+
+ break;
+ }
+ }
+
+ if(pFreeEntry == NULL)
+ {
+ pFreeEntry = new LockEntry;
+ pFreeEntry->wReaderLevel = 0;
+ }
+
+ if(pFreeEntry)
+ {
+ _ASSERTE((pFreeEntry->dwLLockID != 0) || (pFreeEntry->wReaderLevel == 0));
+ _ASSERTE((pFreeEntry->wReaderLevel == 0) ||
+ ((pFreeEntry->dwLLockID == _dwLLockID) && (pFreeEntry->dwULockID == _dwULockID)));
+
+ // Chain back the entry
+ ChainEntry(pThread, pFreeEntry);
+
+ // Move this entry to the head
+ pThread->m_pHead = pFreeEntry;
+
+ // Mark the entry as belonging to this lock
+ pFreeEntry->dwLLockID = _dwLLockID;
+ pFreeEntry->dwULockID = _dwULockID;
+ }
+
+ return pFreeEntry;
+}
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::FastRecycleLockEntry private
+//
+// Synopsis: Fast path for recycling the lock entry that is used
+// when the thread is the next few instructions is going
+// to call FastGetOrCreateLockEntry again
+//
+//+-------------------------------------------------------------------
+inline void CRWLock::FastRecycleLockEntry(LockEntry *pLockEntry)
+{
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+
+ // Sanity checks
+ PRECONDITION(pLockEntry->wReaderLevel == 0);
+ PRECONDITION((pLockEntry->dwLLockID == _dwLLockID) && (pLockEntry->dwULockID == _dwULockID));
+ PRECONDITION(pLockEntry == GetThread()->m_pHead);
+ }
+ CONTRACTL_END;
+
+
+ pLockEntry->dwLLockID = 0;
+}
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::RecycleLockEntry private
+//
+// Synopsis: Fast path for recycling the lock entry
+//
+//+-------------------------------------------------------------------
+inline void CRWLock::RecycleLockEntry(LockEntry *pLockEntry)
+{
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+
+ // Sanity check
+ PRECONDITION(pLockEntry->wReaderLevel == 0);
+ }
+ CONTRACTL_END;
+
+ // Move the entry to tail
+ Thread *pThread = GetThread();
+ LockEntry *pHeadEntry = pThread->m_pHead;
+ if(pLockEntry == pHeadEntry)
+ {
+ pThread->m_pHead = pHeadEntry->pNext;
+ }
+ else if(pLockEntry->pNext->dwLLockID)
+ {
+ // Prepare to move the entry to tail
+ pLockEntry->pPrev->pNext = pLockEntry->pNext;
+ pLockEntry->pNext->pPrev = pLockEntry->pPrev;
+
+ // Chain back the entry
+ ChainEntry(pThread, pLockEntry);
+ }
+
+ // The entry does not belong to this lock anymore
+ pLockEntry->dwLLockID = 0;
+ return;
+}
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::StaticIsWriterLockHeld public
+//
+// Synopsis: Return TRUE if writer lock is held
+//
+//+-------------------------------------------------------------------
+FCIMPL1(FC_BOOL_RET, CRWLock::StaticIsWriterLockHeld, CRWLock *pRWLock)
+{
+ FCALL_CONTRACT;
+
+ if (pRWLock == NULL)
+ {
+ FCThrow(kNullReferenceException);
+ }
+
+ if(pRWLock->_dwWriterID == GetThread()->GetThreadId())
+ FC_RETURN_BOOL(TRUE);
+
+ FC_RETURN_BOOL(FALSE);
+}
+FCIMPLEND
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::StaticIsReaderLockHeld public
+//
+// Synopsis: Return TRUE if reader lock is held
+//
+//+-------------------------------------------------------------------
+FCIMPL1(FC_BOOL_RET, CRWLock::StaticIsReaderLockHeld, CRWLock *pRWLock)
+{
+ FCALL_CONTRACT;
+
+ if (pRWLock == NULL)
+ {
+ FCThrow(kNullReferenceException);
+ }
+
+ LockEntry *pLockEntry = pRWLock->GetLockEntry();
+ if(pLockEntry)
+ {
+ FC_RETURN_BOOL(pLockEntry->wReaderLevel > 0);
+ }
+
+ FC_RETURN_BOOL(FALSE);
+}
+FCIMPLEND
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::AssertWriterLockHeld public
+//
+// Synopsis: Asserts that writer lock is held
+//
+//+-------------------------------------------------------------------
+#ifdef _DEBUG
+BOOL CRWLock::AssertWriterLockHeld()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if(_dwWriterID == GetThread()->GetThreadId())
+ return(TRUE);
+
+ _ASSERTE(!"Writer lock not held by the current thread");
+ return(FALSE);
+}
+#endif
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::AssertWriterLockNotHeld public
+//
+// Synopsis: Asserts that writer lock is not held
+//
+//+-------------------------------------------------------------------
+#ifdef _DEBUG
+BOOL CRWLock::AssertWriterLockNotHeld()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if(_dwWriterID != GetThread()->GetThreadId())
+ return(TRUE);
+
+ _ASSERTE(!"Writer lock held by the current thread");
+ return(FALSE);
+}
+#endif
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::AssertReaderLockHeld public
+//
+// Synopsis: Asserts that reader lock is held
+//
+//+-------------------------------------------------------------------
+#ifdef _DEBUG
+BOOL CRWLock::AssertReaderLockHeld()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ LockEntry *pLockEntry = GetLockEntry();
+ if(pLockEntry)
+ {
+ _ASSERTE(pLockEntry->wReaderLevel);
+ return(TRUE);
+ }
+
+ _ASSERTE(!"Reader lock not held by the current thread");
+ return(FALSE);
+}
+#endif
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::AssertReaderLockNotHeld public
+//
+// Synopsis: Asserts that writer lock is not held
+//
+//+-------------------------------------------------------------------
+#ifdef _DEBUG
+BOOL CRWLock::AssertReaderLockNotHeld()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ LockEntry *pLockEntry = GetLockEntry();
+ if(pLockEntry == NULL)
+ return(TRUE);
+
+ _ASSERTE(pLockEntry->wReaderLevel);
+ _ASSERTE(!"Reader lock held by the current thread");
+
+ return(FALSE);
+}
+#endif
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::AssertReaderOrWriterLockHeld public
+//
+// Synopsis: Asserts that writer lock is not held
+//
+//+-------------------------------------------------------------------
+#ifdef _DEBUG
+BOOL CRWLock::AssertReaderOrWriterLockHeld()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if(_dwWriterID == GetThread()->GetThreadId())
+ {
+ return(TRUE);
+ }
+ else
+ {
+ LockEntry *pLockEntry = GetLockEntry();
+ if(pLockEntry)
+ {
+ _ASSERTE(pLockEntry->wReaderLevel);
+ return(TRUE);
+ }
+ }
+
+ _ASSERTE(!"Neither Reader nor Writer lock held");
+ return(FALSE);
+}
+#endif
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::RWSetEvent private
+//
+// Synopsis: Helper function for setting an event
+//
+//+-------------------------------------------------------------------
+inline void CRWLock::RWSetEvent(CLREvent* event)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ if(!event->Set())
+ {
+ _ASSERTE(!"SetEvent failed");
+ if(fBreakOnErrors) // fBreakOnErrors == FALSE so will be optimized out.
+ DebugBreak();
+ COMPlusThrowWin32(E_UNEXPECTED);
+ }
+}
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::RWResetEvent private
+//
+// Synopsis: Helper function for resetting an event
+//
+//+-------------------------------------------------------------------
+inline void CRWLock::RWResetEvent(CLREvent* event)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ if(!event->Reset())
+ {
+ _ASSERTE(!"ResetEvent failed");
+ if(fBreakOnErrors) // fBreakOnErrors == FALSE so will be optimized out.
+ DebugBreak();
+ COMPlusThrowWin32(E_UNEXPECTED);
+ }
+}
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::RWWaitForSingleObject public
+//
+// Synopsis: Helper function for waiting on an event
+//
+//+-------------------------------------------------------------------
+inline DWORD CRWLock::RWWaitForSingleObject(CLREvent* event, DWORD dwTimeout)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ DWORD status = WAIT_FAILED;
+ EX_TRY
+ {
+ status = event->Wait(dwTimeout,TRUE);
+ }
+ EX_CATCH
+ {
+ status = GET_EXCEPTION()->GetHR();
+ if (status == S_OK)
+ {
+ status = WAIT_FAILED;
+ }
+ }
+ EX_END_CATCH(SwallowAllExceptions); // The caller will rethrow the exception
+
+ return status;
+}
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::RWSleep public
+//
+// Synopsis: Helper function for calling Sleep
+//
+//+-------------------------------------------------------------------
+inline void CRWLock::RWSleep(DWORD dwTime)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ ClrSleepEx(dwTime, TRUE);
+}
+
+
+#undef volatile
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::RWInterlockedCompareExchange public
+//
+// Synopsis: Helper function for calling intelockedCompareExchange
+//
+//+-------------------------------------------------------------------
+inline LONG CRWLock::RWInterlockedCompareExchange(LONG volatile *pvDestination,
+ LONG dwExchange,
+ LONG dwComparand)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return FastInterlockCompareExchange(pvDestination,
+ dwExchange,
+ dwComparand);
+}
+
+inline void* CRWLock::RWInterlockedCompareExchangePointer(PVOID volatile *pvDestination,
+ void* pExchange,
+ void* pComparand)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return FastInterlockCompareExchangePointer(pvDestination,
+ pExchange,
+ pComparand);
+}
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::RWInterlockedExchangeAdd public
+//
+// Synopsis: Helper function for adding state
+//
+//+-------------------------------------------------------------------
+inline LONG CRWLock::RWInterlockedExchangeAdd(LONG volatile *pvDestination,
+ LONG dwAddToState)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return FastInterlockExchangeAdd(pvDestination, dwAddToState);
+}
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::RWInterlockedIncrement public
+//
+// Synopsis: Helper function for incrementing a pointer
+//
+//+-------------------------------------------------------------------
+inline LONG CRWLock::RWInterlockedIncrement(LONG volatile *pdwState)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return FastInterlockIncrement(pdwState);
+}
+
+#define volatile DoNotUserVolatileKeyword
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::ReleaseEvents public
+//
+// Synopsis: Helper function for caching events
+//
+//+-------------------------------------------------------------------
+void CRWLock::ReleaseEvents()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ PRECONDITION(((_dwState & CACHING_EVENTS) == CACHING_EVENTS)); // Ensure that reader and writers have been stalled
+
+ }
+ CONTRACTL_END;
+
+ // Save writer event
+ CLREvent *hWriterEvent = _hWriterEvent;
+ _hWriterEvent = NULL;
+
+ // Save reader event
+ CLREvent *hReaderEvent = _hReaderEvent;
+ _hReaderEvent = NULL;
+
+ // Allow readers and writers to continue
+ RWInterlockedExchangeAdd(&_dwState, -(CACHING_EVENTS));
+
+ // Cache events
+ // <REVISIT_TODO>:
+ // I am closing events for now. What is needed
+ // is an event cache to which the events are
+ // released using InterlockedCompareExchange64</REVISIT_TODO>
+ if(hWriterEvent)
+ {
+ LOG((LF_SYNC, LL_INFO10, "Releasing writer event\n"));
+ delete hWriterEvent;
+ }
+ if(hReaderEvent)
+ {
+ LOG((LF_SYNC, LL_INFO10, "Releasing reader event\n"));
+ delete hReaderEvent;
+ }
+#ifdef RWLOCK_STATISTICS
+ RWInterlockedIncrement(&_dwEventsReleasedCount);
+#endif
+
+ return;
+}
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::GetWriterEvent public
+//
+// Synopsis: Helper function for obtaining a auto reset event used
+// for serializing writers. It utilizes event cache
+//
+//+-------------------------------------------------------------------
+CLREvent* CRWLock::GetWriterEvent(HRESULT *pHR)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ *pHR = S_OK;
+ //GC could happen in ~CLREvent or EH. "this" is a GC object so it could be moved
+ //during GC. So we need to cache the field before GC could happen
+ CLREvent * result = _hWriterEvent;
+
+ if(_hWriterEvent == NULL)
+ {
+ EX_TRY
+ {
+ CLREvent *pEvent = new CLREvent();
+ NewHolder<CLREvent> hWriterEvent (pEvent);
+ hWriterEvent->CreateRWLockWriterEvent(FALSE,this);
+ if(hWriterEvent)
+ {
+ if(RWInterlockedCompareExchangePointer((PVOID*) &_hWriterEvent,
+ hWriterEvent.GetValue(),
+ NULL) == NULL)
+ {
+ hWriterEvent.SuppressRelease();
+ }
+ //GC could happen in ~CLREvent or EH. "this" is a GC object so it could be moved
+ //during GC. So we need to cache the field before GC could happen.
+ result = _hWriterEvent;
+ }
+ }
+ EX_CATCH
+ {
+ *pHR = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+
+ return(result);
+}
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::GetReaderEvent public
+//
+// Synopsis: Helper function for obtaining a manula reset event used
+// by readers to wait when a writer holds the lock.
+// It utilizes event cache
+//
+//+-------------------------------------------------------------------
+CLREvent* CRWLock::GetReaderEvent(HRESULT *pHR)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ *pHR = S_OK;
+ //GC could happen in ~CLREvent or EH. "this" is a GC object so it could be moved
+ //during GC. So we need to cache the field before GC could happen
+ CLREvent * result = _hReaderEvent;
+
+ if(_hReaderEvent == NULL)
+ {
+ EX_TRY
+ {
+ CLREvent *pEvent = new CLREvent();
+ NewHolder<CLREvent> hReaderEvent (pEvent);
+ hReaderEvent->CreateRWLockReaderEvent(FALSE, this);
+ if(hReaderEvent)
+ {
+ if(RWInterlockedCompareExchangePointer((PVOID*) &_hReaderEvent,
+ hReaderEvent.GetValue(),
+ NULL) == NULL)
+ {
+ hReaderEvent.SuppressRelease();
+ }
+ //GC could happen in ~CLREvent or EH. "this" is a GC object so it could be moved
+ //during GC. So we need to cache the field before GC could happen
+ result = _hReaderEvent;
+ }
+ }
+ EX_CATCH
+ {
+ *pHR = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+
+ return(result);
+}
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::StaticRecoverLock public
+//
+// Synopsis: Helper function to restore the lock to
+// the original state
+//
+
+//
+//+-------------------------------------------------------------------
+void CRWLock::StaticRecoverLock(
+ CRWLock **ppRWLock,
+ LockCookie *pLockCookie,
+ DWORD dwFlags)
+{
+ CONTRACTL
+ {
+ THROWS; // StaticAcquireWriterLock can throw exception
+ GC_TRIGGERS;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ DWORD dwTimeout = (gdwDefaultTimeout > gdwReasonableTimeout)
+ ? gdwDefaultTimeout
+ : gdwReasonableTimeout;
+
+ Thread *pThread = GetThread();
+ _ASSERTE (pThread);
+
+ EX_TRY
+ {
+ // Check if the thread was a writer
+ if(dwFlags & COOKIE_WRITER)
+ {
+ // Acquire writer lock
+ StaticAcquireWriterLock(ppRWLock, dwTimeout);
+ _ASSERTE (pThread->m_dwLockCount >= (*ppRWLock)->_wWriterLevel);
+ ASSERT_UNLESS_NO_DEBUG_STATE(__pClrDebugState->GetLockCount(kDbgStateLockType_User) >= (*ppRWLock)->_wWriterLevel);
+ pThread->m_dwLockCount -= (*ppRWLock)->_wWriterLevel;
+ USER_LOCK_RELEASED_MULTIPLE((*ppRWLock)->_wWriterLevel, GetPtrForLockContract(ppRWLock));
+ (*ppRWLock)->_wWriterLevel = pLockCookie->wWriterLevel;
+ pThread->m_dwLockCount += (*ppRWLock)->_wWriterLevel;
+ USER_LOCK_TAKEN_MULTIPLE((*ppRWLock)->_wWriterLevel, GetPtrForLockContract(ppRWLock));
+ }
+ // Check if the thread was a reader
+ else if(dwFlags & COOKIE_READER)
+ {
+ StaticAcquireReaderLock(ppRWLock, dwTimeout);
+ LockEntry *pLockEntry = (*ppRWLock)->GetLockEntry();
+ _ASSERTE(pLockEntry);
+ _ASSERTE (pThread->m_dwLockCount >= pLockEntry->wReaderLevel);
+ ASSERT_UNLESS_NO_DEBUG_STATE(__pClrDebugState->GetLockCount(kDbgStateLockType_User) >= pLockEntry->wReaderLevel);
+ pThread->m_dwLockCount -= pLockEntry->wReaderLevel;
+ USER_LOCK_RELEASED_MULTIPLE(pLockEntry->wReaderLevel, GetPtrForLockContract(ppRWLock));
+ pLockEntry->wReaderLevel = pLockCookie->wReaderLevel;
+ pThread->m_dwLockCount += pLockEntry->wReaderLevel;
+ USER_LOCK_TAKEN_MULTIPLE(pLockEntry->wReaderLevel, GetPtrForLockContract(ppRWLock));
+ }
+ }
+ EX_CATCH
+ {
+ // Removed an assert here. This error is expected in case of
+ // ThreadAbort.
+ COMPlusThrowWin32(RWLOCK_RECOVERY_FAILURE);
+ }
+ EX_END_CATCH_UNREACHABLE
+}
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::StaticAcquireReaderLockPublic public
+//
+// Synopsis: Public access to StaticAcquireReaderLock
+//
+//+-------------------------------------------------------------------
+FCIMPL2(void, CRWLock::StaticAcquireReaderLockPublic, CRWLock *pRWLockUNSAFE, DWORD dwDesiredTimeout)
+{
+ FCALL_CONTRACT;
+
+ if (pRWLockUNSAFE == NULL)
+ {
+ FCThrowVoid(kNullReferenceException);
+ }
+
+ OBJECTREF pRWLock = ObjectToOBJECTREF((Object*)pRWLockUNSAFE);
+ HELPER_METHOD_FRAME_BEGIN_1(pRWLock);
+
+ StaticAcquireReaderLock((CRWLock**)&pRWLock, dwDesiredTimeout);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::StaticAcquireReaderLock private
+//
+// Synopsis: Makes the thread a reader. Supports nested reader locks.
+//
+//+-------------------------------------------------------------------
+
+void CRWLock::StaticAcquireReaderLock(
+ CRWLock **ppRWLock,
+ DWORD dwDesiredTimeout)
+{
+
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS; // CLREvent::Wait is GC_TRIGGERS
+ CAN_TAKE_LOCK;
+ PRECONDITION(CheckPointer(ppRWLock));
+ PRECONDITION(CheckPointer(*ppRWLock));
+ }
+ CONTRACTL_END;
+
+ TESTHOOKCALL(AppDomainCanBeUnloaded(GetThread()->GetDomain()->GetId().m_dwId,FALSE));
+
+ if (GetThread()->IsAbortRequested()) {
+ GetThread()->HandleThreadAbort();
+ }
+
+ LockEntry *pLockEntry = (*ppRWLock)->FastGetOrCreateLockEntry();
+ if (pLockEntry == NULL)
+ {
+ COMPlusThrowWin32(STATUS_NO_MEMORY);
+ }
+
+ DWORD dwStatus = WAIT_OBJECT_0;
+ // Check for the fast path
+ if(RWInterlockedCompareExchange(&(*ppRWLock)->_dwState, READER, 0) == 0)
+ {
+ _ASSERTE(pLockEntry->wReaderLevel == 0);
+ }
+ // Check for nested reader
+ else if(pLockEntry->wReaderLevel != 0)
+ {
+ _ASSERTE((*ppRWLock)->_dwState & READERS_MASK);
+
+ if (pLockEntry->wReaderLevel == RWLOCK_MAX_ACQUIRE_COUNT) {
+ COMPlusThrow(kOverflowException, W("Overflow_UInt16"));
+ }
+ ++pLockEntry->wReaderLevel;
+ INCTHREADLOCKCOUNT();
+ USER_LOCK_TAKEN(GetPtrForLockContract(ppRWLock));
+ return;
+ }
+ // Check if the thread already has writer lock
+ else if((*ppRWLock)->_dwWriterID == GetThread()->GetThreadId())
+ {
+ StaticAcquireWriterLock(ppRWLock, dwDesiredTimeout);
+ (*ppRWLock)->FastRecycleLockEntry(pLockEntry);
+ return;
+ }
+ else
+ {
+ DWORD dwSpinCount;
+ DWORD dwCurrentState, dwKnownState;
+
+ // Initialize
+ dwSpinCount = 0;
+ dwCurrentState = (*ppRWLock)->_dwState;
+ do
+ {
+ dwKnownState = dwCurrentState;
+
+ // Reader need not wait if there are only readers and no writer
+ if((dwKnownState < READERS_MASK) ||
+ (((dwKnownState & READER_SIGNALED) && ((dwKnownState & WRITER) == 0)) &&
+ (((dwKnownState & READERS_MASK) +
+ ((dwKnownState & WAITING_READERS_MASK) >> WAITING_READERS_SHIFT)) <=
+ (READERS_MASK - 2))))
+ {
+ // Add to readers
+ dwCurrentState = RWInterlockedCompareExchange(&(*ppRWLock)->_dwState,
+ (dwKnownState + READER),
+ dwKnownState);
+ if(dwCurrentState == dwKnownState)
+ {
+ // One more reader
+ break;
+ }
+ }
+ // Check for too many Readers or waiting readers or signaling in progress
+ else if(((dwKnownState & READERS_MASK) == READERS_MASK) ||
+ ((dwKnownState & WAITING_READERS_MASK) == WAITING_READERS_MASK) ||
+ ((dwKnownState & CACHING_EVENTS) == READER_SIGNALED))
+ {
+ // Sleep
+ GetThread()->UserSleep(1000);
+
+ // Update to latest state
+ dwSpinCount = 0;
+ dwCurrentState = (*ppRWLock)->_dwState;
+ }
+ // Check if events are being cached
+ else if((dwKnownState & CACHING_EVENTS) == CACHING_EVENTS)
+ {
+ if(++dwSpinCount > gdwDefaultSpinCount)
+ {
+ RWSleep(1);
+ dwSpinCount = 0;
+ }
+ dwCurrentState = (*ppRWLock)->_dwState;
+ }
+ // Check spin count
+ else if(++dwSpinCount <= gdwDefaultSpinCount)
+ {
+ dwCurrentState = (*ppRWLock)->_dwState;
+ }
+ else
+ {
+ // Add to waiting readers
+ dwCurrentState = RWInterlockedCompareExchange(&(*ppRWLock)->_dwState,
+ (dwKnownState + WAITING_READER),
+ dwKnownState);
+ if(dwCurrentState == dwKnownState)
+ {
+ CLREvent* hReaderEvent;
+ DWORD dwModifyState;
+
+ // One more waiting reader
+#ifdef RWLOCK_STATISTICS
+ RWInterlockedIncrement(&(*ppRWLock)->_dwReaderContentionCount);
+#endif
+ HRESULT hr;
+ hReaderEvent = (*ppRWLock)->GetReaderEvent(&hr);
+ if(hReaderEvent)
+ {
+ dwStatus = RWWaitForSingleObject(hReaderEvent, dwDesiredTimeout);
+ VALIDATE_LOCK(*ppRWLock);
+
+ // StaticAcquireReaderLock can have reentry via pumping while waiting for
+ // hReaderEvent, which may change pLockEntry's state from underneath us.
+ if ((pLockEntry->dwLLockID != (*ppRWLock)->_dwLLockID) ||
+ (pLockEntry->dwULockID != (*ppRWLock)->_dwULockID))
+ {
+ pLockEntry = (*ppRWLock)->FastGetOrCreateLockEntry();
+ if (pLockEntry == NULL)
+ {
+ COMPlusThrowWin32(STATUS_NO_MEMORY);
+ }
+ }
+ }
+ else
+ {
+ LOG((LF_SYNC, LL_WARNING,
+ "AcquireReaderLock failed to create reader "
+ "event for RWLock 0x%x\n", *ppRWLock));
+ dwStatus = E_FAIL;
+ }
+
+ if(dwStatus == WAIT_OBJECT_0)
+ {
+ _ASSERTE((*ppRWLock)->_dwState & READER_SIGNALED);
+ _ASSERTE(((*ppRWLock)->_dwState & READERS_MASK) < READERS_MASK);
+ dwModifyState = READER - WAITING_READER;
+ }
+ else
+ {
+ dwModifyState = (DWORD) -WAITING_READER;
+ if(dwStatus == WAIT_TIMEOUT)
+ {
+ LOG((LF_SYNC, LL_WARNING,
+ "Timed out trying to acquire reader lock "
+ "for RWLock 0x%x\n", *ppRWLock));
+ hr = HRESULT_FROM_WIN32(ERROR_TIMEOUT);
+ }
+ else if(dwStatus == WAIT_IO_COMPLETION)
+ {
+ LOG((LF_SYNC, LL_WARNING,
+ "Thread interrupted while trying to acquire reader lock "
+ "for RWLock 0x%x\n", *ppRWLock));
+ hr = COR_E_THREADINTERRUPTED;
+ }
+ else if (dwStatus == WAIT_FAILED)
+ {
+ if (SUCCEEDED(hr))
+ {
+ dwStatus = GetLastError();
+ if (dwStatus == WAIT_OBJECT_0)
+ {
+ dwStatus = WAIT_FAILED;
+ }
+ hr = HRESULT_FROM_WIN32(dwStatus);
+ LOG((LF_SYNC, LL_WARNING,
+ "WaitForSingleObject on Event 0x%x failed for "
+ "RWLock 0x%x with status code 0x%x\n",
+ hReaderEvent, *ppRWLock, dwStatus));
+ }
+ }
+ }
+
+ // One less waiting reader and he may have become a reader
+ dwKnownState = RWInterlockedExchangeAdd(&(*ppRWLock)->_dwState, dwModifyState);
+
+ // Check for last signaled waiting reader
+ if(dwStatus == WAIT_OBJECT_0)
+ {
+ _ASSERTE(dwKnownState & READER_SIGNALED);
+ _ASSERTE((dwKnownState & READERS_MASK) < READERS_MASK);
+ if((dwKnownState & WAITING_READERS_MASK) == WAITING_READER)
+ {
+ // Reset the event and lower reader signaled flag
+ RWResetEvent(hReaderEvent);
+ RWInterlockedExchangeAdd(&(*ppRWLock)->_dwState, -READER_SIGNALED);
+ }
+ }
+ else
+ {
+ if(((dwKnownState & WAITING_READERS_MASK) == WAITING_READER) &&
+ (dwKnownState & READER_SIGNALED))
+ {
+ HRESULT hr1;
+ if(hReaderEvent == NULL)
+ hReaderEvent = (*ppRWLock)->GetReaderEvent(&hr1);
+ _ASSERTE(hReaderEvent);
+
+ // Ensure the event is signalled before resetting it.
+ DWORD dwTemp;
+ dwTemp = hReaderEvent->Wait(INFINITE, FALSE);
+ _ASSERTE(dwTemp == WAIT_OBJECT_0);
+ _ASSERTE(((*ppRWLock)->_dwState & READERS_MASK) < READERS_MASK);
+
+ // Reset the event and lower reader signaled flag
+ RWResetEvent(hReaderEvent);
+ RWInterlockedExchangeAdd(&(*ppRWLock)->_dwState, (READER - READER_SIGNALED));
+
+ // Honor the orginal status
+ ++pLockEntry->wReaderLevel;
+ INCTHREADLOCKCOUNT();
+ USER_LOCK_TAKEN(GetPtrForLockContract(ppRWLock));
+ StaticReleaseReaderLock(ppRWLock);
+ }
+ else
+ {
+ (*ppRWLock)->FastRecycleLockEntry(pLockEntry);
+ }
+
+ _ASSERTE((pLockEntry == NULL) ||
+ ((pLockEntry->dwLLockID == 0) &&
+ (pLockEntry->wReaderLevel == 0)));
+ if(fBreakOnErrors) // fBreakOnErrors == FALSE so will be optimized out.
+ {
+ _ASSERTE(!"Failed to acquire reader lock");
+ DebugBreak();
+ }
+
+ // Prepare the frame for throwing an exception
+ if ((DWORD)HOST_E_DEADLOCK == dwStatus)
+ {
+ // So that the error message is in the exception.
+ RaiseDeadLockException();
+ } else if ((DWORD)COR_E_THREADINTERRUPTED == dwStatus) {
+ COMPlusThrow(kThreadInterruptedException);
+ }
+ else
+ {
+ COMPlusThrowWin32 (hr);
+ }
+ }
+
+ // Sanity check
+ _ASSERTE(dwStatus == WAIT_OBJECT_0);
+ break;
+ }
+ }
+ YieldProcessor(); // Indicate to the processor that we are spining
+ } while(TRUE);
+ }
+
+ // Success
+ _ASSERTE(dwStatus == WAIT_OBJECT_0);
+ _ASSERTE(((*ppRWLock)->_dwState & WRITER) == 0);
+ _ASSERTE((*ppRWLock)->_dwState & READERS_MASK);
+ ++pLockEntry->wReaderLevel;
+ INCTHREADLOCKCOUNT();
+ USER_LOCK_TAKEN(GetPtrForLockContract(ppRWLock));
+#ifdef RWLOCK_STATISTICS
+ RWInterlockedIncrement(&(*ppRWLock)->_dwReaderEntryCount);
+#endif
+ return;
+}
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::StaticAcquireWriterLockPublic public
+//
+// Synopsis: Public access to StaticAcquireWriterLock
+//
+//+-------------------------------------------------------------------
+FCIMPL2(void, CRWLock::StaticAcquireWriterLockPublic, CRWLock *pRWLockUNSAFE, DWORD dwDesiredTimeout)
+{
+ FCALL_CONTRACT;
+
+ if (pRWLockUNSAFE == NULL)
+ {
+ FCThrowVoid(kNullReferenceException);
+ }
+
+ OBJECTREF pRWLock = ObjectToOBJECTREF((Object*)pRWLockUNSAFE);
+ HELPER_METHOD_FRAME_BEGIN_1(pRWLock);
+
+ StaticAcquireWriterLock((CRWLock**)&pRWLock, dwDesiredTimeout);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::StaticAcquireWriterLock private
+//
+// Synopsis: Makes the thread a writer. Supports nested writer
+// locks
+//
+//+-------------------------------------------------------------------
+
+void CRWLock::StaticAcquireWriterLock(
+ CRWLock **ppRWLock,
+ DWORD dwDesiredTimeout)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS; // CLREvent::Wait can trigger GC
+ CAN_TAKE_LOCK;
+ PRECONDITION((CheckPointer(ppRWLock)));
+ PRECONDITION((CheckPointer(*ppRWLock)));
+ }
+ CONTRACTL_END;
+
+ TESTHOOKCALL(AppDomainCanBeUnloaded(GetThread()->GetDomain()->GetId().m_dwId,FALSE));
+ if (GetThread()->IsAbortRequested()) {
+ GetThread()->HandleThreadAbort();
+ }
+
+ // Declare locals needed for setting up frame
+ DWORD dwThreadID = GetThread()->GetThreadId();
+ DWORD dwStatus;
+
+ // Check for the fast path
+ if(RWInterlockedCompareExchange(&(*ppRWLock)->_dwState, WRITER, 0) == 0)
+ {
+ _ASSERTE(((*ppRWLock)->_dwState & READERS_MASK) == 0);
+ }
+ // Check if the thread already has writer lock
+ else if((*ppRWLock)->_dwWriterID == dwThreadID)
+ {
+ if ((*ppRWLock)->_wWriterLevel == RWLOCK_MAX_ACQUIRE_COUNT) {
+ COMPlusThrow(kOverflowException, W("Overflow_UInt16"));
+ }
+ ++(*ppRWLock)->_wWriterLevel;
+ INCTHREADLOCKCOUNT();
+ USER_LOCK_TAKEN(GetPtrForLockContract(ppRWLock));
+ return;
+ }
+ else
+ {
+ DWORD dwCurrentState, dwKnownState;
+ DWORD dwSpinCount;
+
+ // Initialize
+ dwSpinCount = 0;
+ dwCurrentState = (*ppRWLock)->_dwState;
+ do
+ {
+ dwKnownState = dwCurrentState;
+
+ // Writer need not wait if there are no readers and writer
+ if((dwKnownState == 0) || (dwKnownState == CACHING_EVENTS))
+ {
+ // Can be a writer
+ dwCurrentState = RWInterlockedCompareExchange(&(*ppRWLock)->_dwState,
+ (dwKnownState + WRITER),
+ dwKnownState);
+ if(dwCurrentState == dwKnownState)
+ {
+ // Only writer
+ break;
+ }
+ }
+ // Check for too many waiting writers
+ else if(((dwKnownState & WAITING_WRITERS_MASK) == WAITING_WRITERS_MASK))
+ {
+ // Sleep
+ GetThread()->UserSleep(1000);
+
+ // Update to latest state
+ dwSpinCount = 0;
+ dwCurrentState = (*ppRWLock)->_dwState;
+ }
+ // Check if events are being cached
+ else if((dwKnownState & CACHING_EVENTS) == CACHING_EVENTS)
+ {
+ if(++dwSpinCount > gdwDefaultSpinCount)
+ {
+ RWSleep(1);
+ dwSpinCount = 0;
+ }
+ dwCurrentState = (*ppRWLock)->_dwState;
+ }
+ // Check spin count
+ else if(++dwSpinCount <= gdwDefaultSpinCount)
+ {
+ dwCurrentState = (*ppRWLock)->_dwState;
+ }
+ else
+ {
+ // Add to waiting writers
+ dwCurrentState = RWInterlockedCompareExchange(&(*ppRWLock)->_dwState,
+ (dwKnownState + WAITING_WRITER),
+ dwKnownState);
+ if(dwCurrentState == dwKnownState)
+ {
+ CLREvent* hWriterEvent;
+ DWORD dwModifyState;
+
+ // One more waiting writer
+#ifdef RWLOCK_STATISTICS
+ RWInterlockedIncrement(&(*ppRWLock)->_dwWriterContentionCount);
+#endif
+ HRESULT hr;
+ hWriterEvent = (*ppRWLock)->GetWriterEvent(&hr);
+ if(hWriterEvent)
+ {
+ dwStatus = RWWaitForSingleObject(hWriterEvent, dwDesiredTimeout);
+ VALIDATE_LOCK(*ppRWLock);
+ }
+ else
+ {
+ LOG((LF_SYNC, LL_WARNING,
+ "AcquireWriterLock failed to create writer "
+ "event for RWLock 0x%x\n", *ppRWLock));
+ dwStatus = WAIT_FAILED;
+ }
+
+ if(dwStatus == WAIT_OBJECT_0)
+ {
+ _ASSERTE((*ppRWLock)->_dwState & WRITER_SIGNALED);
+ dwModifyState = WRITER - WAITING_WRITER - WRITER_SIGNALED;
+ }
+ else
+ {
+ dwModifyState = (DWORD) -WAITING_WRITER;
+ if(dwStatus == WAIT_TIMEOUT)
+ {
+ LOG((LF_SYNC, LL_WARNING,
+ "Timed out trying to acquire writer "
+ "lock for RWLock 0x%x\n", *ppRWLock));
+ hr = HRESULT_FROM_WIN32 (ERROR_TIMEOUT);
+ }
+ else if(dwStatus == WAIT_IO_COMPLETION)
+ {
+ LOG((LF_SYNC, LL_WARNING,
+ "Thread interrupted while trying to acquire writer lock "
+ "for RWLock 0x%x\n", *ppRWLock));
+ hr = COR_E_THREADINTERRUPTED;
+ }
+ else if (dwStatus == WAIT_FAILED)
+ {
+ if (SUCCEEDED(hr))
+ {
+ dwStatus = GetLastError();
+ if (dwStatus == WAIT_OBJECT_0)
+ {
+ dwStatus = WAIT_FAILED;
+ }
+ hr = HRESULT_FROM_WIN32(dwStatus);
+ LOG((LF_SYNC, LL_WARNING,
+ "WaitForSingleObject on Event 0x%x failed for "
+ "RWLock 0x%x with status code 0x%x",
+ hWriterEvent, *ppRWLock, dwStatus));
+ }
+ }
+ }
+
+ // One less waiting writer and he may have become a writer
+ dwKnownState = RWInterlockedExchangeAdd(&(*ppRWLock)->_dwState, dwModifyState);
+
+ // Check for last timing out signaled waiting writer
+ if(dwStatus == WAIT_OBJECT_0)
+ {
+ // Common case
+ }
+ else
+ {
+ if((dwKnownState & WRITER_SIGNALED) &&
+ ((dwKnownState & WAITING_WRITERS_MASK) == WAITING_WRITER))
+ {
+ HRESULT hr1;
+ if(hWriterEvent == NULL)
+ hWriterEvent = (*ppRWLock)->GetWriterEvent(&hr1);
+ _ASSERTE(hWriterEvent);
+ do
+ {
+ dwKnownState = (*ppRWLock)->_dwState;
+ if((dwKnownState & WRITER_SIGNALED) &&
+ ((dwKnownState & WAITING_WRITERS_MASK) == 0))
+ {
+ DWORD dwTemp = hWriterEvent->Wait(10, FALSE);
+ if(dwTemp == WAIT_OBJECT_0)
+ {
+ dwKnownState = RWInterlockedExchangeAdd(&(*ppRWLock)->_dwState, (WRITER - WRITER_SIGNALED));
+ _ASSERTE(dwKnownState & WRITER_SIGNALED);
+ _ASSERTE((dwKnownState & WRITER) == 0);
+
+ // Honor the orginal status
+ (*ppRWLock)->_dwWriterID = dwThreadID;
+ Thread *pThread = GetThread();
+ _ASSERTE (pThread);
+ _ASSERTE ((*ppRWLock)->_wWriterLevel == 0);
+ pThread->m_dwLockCount ++;
+ USER_LOCK_TAKEN(GetPtrForLockContract(ppRWLock));
+ (*ppRWLock)->_wWriterLevel = 1;
+ StaticReleaseWriterLock(ppRWLock);
+ break;
+ }
+ // else continue;
+ }
+ else
+ break;
+ }while(TRUE);
+ }
+
+ if(fBreakOnErrors) // fBreakOnErrors == FALSE so will be optimized out.
+ {
+ _ASSERTE(!"Failed to acquire writer lock");
+ DebugBreak();
+ }
+
+ // Prepare the frame for throwing an exception
+ if ((DWORD)HOST_E_DEADLOCK == dwStatus)
+ {
+ // So that the error message is in the exception.
+ RaiseDeadLockException();
+ } else if ((DWORD)COR_E_THREADINTERRUPTED == dwStatus) {
+ COMPlusThrow(kThreadInterruptedException);
+ }
+ else
+ {
+ COMPlusThrowWin32(hr);
+ }
+ }
+
+ // Sanity check
+ _ASSERTE(dwStatus == WAIT_OBJECT_0);
+ break;
+ }
+ }
+ YieldProcessor(); // indicate to the processor that we are spinning
+ } while(TRUE);
+ }
+
+ // Success
+ _ASSERTE((*ppRWLock)->_dwState & WRITER);
+ _ASSERTE(((*ppRWLock)->_dwState & READERS_MASK) == 0);
+ _ASSERTE((*ppRWLock)->_dwWriterID == 0);
+
+ // Save threadid of the writer
+ (*ppRWLock)->_dwWriterID = dwThreadID;
+ (*ppRWLock)->_wWriterLevel = 1;
+ INCTHREADLOCKCOUNT();
+ USER_LOCK_TAKEN(GetPtrForLockContract(ppRWLock));
+ ++(*ppRWLock)->_dwWriterSeqNum;
+#ifdef RWLOCK_STATISTICS
+ ++(*ppRWLock)->_dwWriterEntryCount;
+#endif
+ return;
+}
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::StaticReleaseWriterLockPublic public
+//
+// Synopsis: Public access to StaticReleaseWriterLock
+//
+//+-------------------------------------------------------------------
+FCIMPL1(void, CRWLock::StaticReleaseWriterLockPublic, CRWLock *pRWLockUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ if (pRWLockUNSAFE == NULL)
+ {
+ FCThrowVoid(kNullReferenceException);
+ }
+
+ OBJECTREF pRWLock = ObjectToOBJECTREF((Object*)pRWLockUNSAFE);
+
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_NO_THREAD_ABORT, pRWLock);
+
+ // We don't want to block thread abort when we need to construct exception in
+ // unwind-continue handler.
+ // note that we cannot use this holder in FCALLs outside our HMF since it breaks the epilog walker on x86!
+ ThreadPreventAbortHolder preventAbortIn;
+
+ StaticReleaseWriterLock((CRWLock**)&pRWLock);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::StaticReleaseWriterLock private
+//
+// Synopsis: Removes the thread as a writer if not a nested
+// call to release the lock
+//
+//+-------------------------------------------------------------------
+void CRWLock::StaticReleaseWriterLock(
+ CRWLock **ppRWLock)
+{
+
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION((CheckPointer(ppRWLock)));
+ PRECONDITION((CheckPointer(*ppRWLock)));
+ }
+ CONTRACTL_END;
+
+ DWORD dwThreadID = GetThread()->GetThreadId();
+
+ // Check validity of caller
+ if((*ppRWLock)->_dwWriterID == dwThreadID)
+ {
+ DECTHREADLOCKCOUNT();
+ USER_LOCK_RELEASED(GetPtrForLockContract(ppRWLock));
+ // Check for nested release
+ if(--(*ppRWLock)->_wWriterLevel == 0)
+ {
+ DWORD dwCurrentState, dwKnownState, dwModifyState;
+ BOOL fCacheEvents;
+ CLREvent* hReaderEvent = NULL, *hWriterEvent = NULL;
+
+ // Not a writer any more
+ (*ppRWLock)->_dwWriterID = 0;
+ dwCurrentState = (*ppRWLock)->_dwState;
+ do
+ {
+ dwKnownState = dwCurrentState;
+ dwModifyState = (DWORD) -WRITER;
+ fCacheEvents = FALSE;
+ if(dwKnownState & WAITING_READERS_MASK)
+ {
+ HRESULT hr;
+ hReaderEvent = (*ppRWLock)->GetReaderEvent(&hr);
+ if(hReaderEvent == NULL)
+ {
+ LOG((LF_SYNC, LL_WARNING,
+ "ReleaseWriterLock failed to create "
+ "reader event for RWLock 0x%x\n", *ppRWLock));
+ RWSleep(100);
+ dwCurrentState = (*ppRWLock)->_dwState;
+ dwKnownState = 0;
+ _ASSERTE(dwCurrentState != dwKnownState);
+ continue;
+ }
+ dwModifyState += READER_SIGNALED;
+ }
+ else if(dwKnownState & WAITING_WRITERS_MASK)
+ {
+ HRESULT hr;
+ hWriterEvent = (*ppRWLock)->GetWriterEvent(&hr);
+ if(hWriterEvent == NULL)
+ {
+ LOG((LF_SYNC, LL_WARNING,
+ "ReleaseWriterLock failed to create "
+ "writer event for RWLock 0x%x\n", *ppRWLock));
+ RWSleep(100);
+ dwCurrentState = (*ppRWLock)->_dwState;
+ dwKnownState = 0;
+ _ASSERTE(dwCurrentState != dwKnownState);
+ continue;
+ }
+ dwModifyState += WRITER_SIGNALED;
+ }
+ else if(((*ppRWLock)->_hReaderEvent || (*ppRWLock)->_hWriterEvent) &&
+ (dwKnownState == WRITER))
+ {
+ fCacheEvents = TRUE;
+ dwModifyState += CACHING_EVENTS;
+ }
+
+ // Sanity checks
+ _ASSERTE((dwKnownState & READERS_MASK) == 0);
+
+ dwCurrentState = RWInterlockedCompareExchange(&(*ppRWLock)->_dwState,
+ (dwKnownState + dwModifyState),
+ dwKnownState);
+ } while(dwCurrentState != dwKnownState);
+
+ // Check for waiting readers
+ if(dwKnownState & WAITING_READERS_MASK)
+ {
+ _ASSERTE((*ppRWLock)->_dwState & READER_SIGNALED);
+ _ASSERTE(hReaderEvent);
+ RWSetEvent(hReaderEvent);
+ }
+ // Check for waiting writers
+ else if(dwKnownState & WAITING_WRITERS_MASK)
+ {
+ _ASSERTE((*ppRWLock)->_dwState & WRITER_SIGNALED);
+ _ASSERTE(hWriterEvent);
+ RWSetEvent(hWriterEvent);
+ }
+ // Check for the need to release events
+ else if(fCacheEvents)
+ {
+ (*ppRWLock)->ReleaseEvents();
+ }
+
+ Thread *pThread = GetThread();
+ TESTHOOKCALL(AppDomainCanBeUnloaded(pThread->GetDomain()->GetId().m_dwId,FALSE));
+ if (pThread->IsAbortRequested()) {
+ pThread->HandleThreadAbort();
+ }
+
+ }
+ }
+ else
+ {
+ if(fBreakOnErrors) // fBreakOnErrors == FALSE so will be optimized out.
+ {
+ _ASSERTE(!"Attempt to release writer lock on a wrong thread");
+ DebugBreak();
+ }
+ COMPlusThrowWin32(ERROR_NOT_OWNER);
+ }
+
+ return;
+}
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::StaticReleaseReaderLockPublic public
+//
+// Synopsis: Public access to StaticReleaseReaderLock
+//
+//+-------------------------------------------------------------------
+FCIMPL1(void, CRWLock::StaticReleaseReaderLockPublic, CRWLock *pRWLockUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ if (pRWLockUNSAFE == NULL)
+ {
+ FCThrowVoid(kNullReferenceException);
+ }
+
+ OBJECTREF pRWLock = ObjectToOBJECTREF((Object*)pRWLockUNSAFE);
+
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_NO_THREAD_ABORT, pRWLock);
+
+ // note that we cannot use this holder in FCALLs outside our HMF since it breaks the epilog walker on x86!
+ ThreadPreventAbortHolder preventAbortIn;
+
+ StaticReleaseReaderLock((CRWLock**)&pRWLock);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::StaticReleaseReaderLock private
+//
+// Synopsis: Removes the thread as a reader
+//
+//+-------------------------------------------------------------------
+
+void CRWLock::StaticReleaseReaderLock(
+ CRWLock **ppRWLock)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION((CheckPointer(ppRWLock)));
+ PRECONDITION((CheckPointer(*ppRWLock)));
+ }
+ CONTRACTL_END;
+
+ // Check if the thread has writer lock
+ if((*ppRWLock)->_dwWriterID == GetThread()->GetThreadId())
+ {
+ StaticReleaseWriterLock(ppRWLock);
+ }
+ else
+ {
+ LockEntry *pLockEntry = (*ppRWLock)->GetLockEntry();
+ if(pLockEntry)
+ {
+ --pLockEntry->wReaderLevel;
+ DECTHREADLOCKCOUNT();
+ USER_LOCK_RELEASED(GetPtrForLockContract(ppRWLock));
+ if(pLockEntry->wReaderLevel == 0)
+ {
+ DWORD dwCurrentState, dwKnownState, dwModifyState;
+ BOOL fLastReader, fCacheEvents = FALSE;
+ CLREvent* hReaderEvent = NULL, *hWriterEvent = NULL;
+
+ // Sanity checks
+ _ASSERTE(((*ppRWLock)->_dwState & WRITER) == 0);
+ _ASSERTE((*ppRWLock)->_dwState & READERS_MASK);
+
+ // Not a reader any more
+ dwCurrentState = (*ppRWLock)->_dwState;
+ do
+ {
+ dwKnownState = dwCurrentState;
+ dwModifyState = (DWORD) -READER;
+ if((dwKnownState & (READERS_MASK | READER_SIGNALED)) == READER)
+ {
+ fLastReader = TRUE;
+ fCacheEvents = FALSE;
+ if(dwKnownState & WAITING_WRITERS_MASK)
+ {
+ HRESULT hr;
+ hWriterEvent = (*ppRWLock)->GetWriterEvent(&hr);
+ if(hWriterEvent == NULL)
+ {
+ LOG((LF_SYNC, LL_WARNING,
+ "ReleaseReaderLock failed to create "
+ "writer event for RWLock 0x%x\n", *ppRWLock));
+ RWSleep(100);
+ dwCurrentState = (*ppRWLock)->_dwState;
+ dwKnownState = 0;
+ _ASSERTE(dwCurrentState != dwKnownState);
+ continue;
+ }
+ dwModifyState += WRITER_SIGNALED;
+ }
+ else if(dwKnownState & WAITING_READERS_MASK)
+ {
+ HRESULT hr;
+ hReaderEvent = (*ppRWLock)->GetReaderEvent(&hr);
+ if(hReaderEvent == NULL)
+ {
+ LOG((LF_SYNC, LL_WARNING,
+ "ReleaseReaderLock failed to create "
+ "reader event\n", *ppRWLock));
+ RWSleep(100);
+ dwCurrentState = (*ppRWLock)->_dwState;
+ dwKnownState = 0;
+ _ASSERTE(dwCurrentState != dwKnownState);
+ continue;
+ }
+ dwModifyState += READER_SIGNALED;
+ }
+ else if(((*ppRWLock)->_hReaderEvent || (*ppRWLock)->_hWriterEvent) &&
+ (dwKnownState == READER))
+ {
+ fCacheEvents = TRUE;
+ dwModifyState += CACHING_EVENTS;
+ }
+ }
+ else
+ {
+ fLastReader = FALSE;
+ }
+
+ // Sanity checks
+ _ASSERTE((dwKnownState & WRITER) == 0);
+ _ASSERTE(dwKnownState & READERS_MASK);
+
+ dwCurrentState = RWInterlockedCompareExchange(&(*ppRWLock)->_dwState,
+ (dwKnownState + dwModifyState),
+ dwKnownState);
+ } while(dwCurrentState != dwKnownState);
+
+ // Check for last reader
+ if(fLastReader)
+ {
+ // Check for waiting writers
+ if(dwKnownState & WAITING_WRITERS_MASK)
+ {
+ _ASSERTE((*ppRWLock)->_dwState & WRITER_SIGNALED);
+ _ASSERTE(hWriterEvent);
+ RWSetEvent(hWriterEvent);
+ }
+ // Check for waiting readers
+ else if(dwKnownState & WAITING_READERS_MASK)
+ {
+ _ASSERTE((*ppRWLock)->_dwState & READER_SIGNALED);
+ _ASSERTE(hReaderEvent);
+ RWSetEvent(hReaderEvent);
+ }
+ // Check for the need to release events
+ else if(fCacheEvents)
+ {
+ (*ppRWLock)->ReleaseEvents();
+ }
+ }
+
+ // Recycle lock entry
+ RecycleLockEntry(pLockEntry);
+
+ Thread *pThread = GetThread();
+ TESTHOOKCALL(AppDomainCanBeUnloaded(pThread->GetDomain()->GetId().m_dwId,FALSE));
+
+ if (pThread->IsAbortRequested()) {
+ pThread->HandleThreadAbort();
+ }
+ }
+ }
+ else
+ {
+ if(fBreakOnErrors) // fBreakOnErrors == FALSE so will be optimized out.
+ {
+ _ASSERTE(!"Attempt to release reader lock on a wrong thread");
+ DebugBreak();
+ }
+ COMPlusThrowWin32(ERROR_NOT_OWNER);
+ }
+ }
+
+ return;
+}
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::StaticDoUpgradeToWriterLockPublic private
+//
+// Synopsis: Public Access to StaticUpgradeToWriterLockPublic
+//
+//
+//+-------------------------------------------------------------------
+FCIMPL3(void, CRWLock::StaticDoUpgradeToWriterLockPublic, CRWLock *pRWLockUNSAFE, LockCookie * pLockCookie, DWORD dwDesiredTimeout)
+{
+ FCALL_CONTRACT;
+
+ if (pRWLockUNSAFE == NULL)
+ {
+ FCThrowVoid(kNullReferenceException);
+ }
+
+ OBJECTREF pRWLock = ObjectToOBJECTREF((Object*)pRWLockUNSAFE);
+ HELPER_METHOD_FRAME_BEGIN_1(pRWLock);
+ GCPROTECT_BEGININTERIOR (pLockCookie)
+
+ StaticUpgradeToWriterLock((CRWLock**)&pRWLock, pLockCookie, dwDesiredTimeout);
+
+ GCPROTECT_END ();
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::StaticUpgradeToWriterLock Private
+//
+// Synopsis: Upgrades to a writer lock. It returns a BOOL that
+// indicates intervening writes.
+//
+
+//
+//+-------------------------------------------------------------------
+
+void CRWLock::StaticUpgradeToWriterLock(
+ CRWLock **ppRWLock,
+ LockCookie *pLockCookie,
+ DWORD dwDesiredTimeout)
+
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ DWORD dwThreadID = GetThread()->GetThreadId();
+
+ // Check if the thread is already a writer
+ if((*ppRWLock)->_dwWriterID == dwThreadID)
+ {
+ // Update cookie state
+ pLockCookie->dwFlags = UPGRADE_COOKIE | COOKIE_WRITER;
+ pLockCookie->wWriterLevel = (*ppRWLock)->_wWriterLevel;
+
+ // Acquire the writer lock again
+ StaticAcquireWriterLock(ppRWLock, dwDesiredTimeout);
+ }
+ else
+ {
+ BOOL fAcquireWriterLock;
+ LockEntry *pLockEntry = (*ppRWLock)->GetLockEntry();
+ if(pLockEntry == NULL)
+ {
+ fAcquireWriterLock = TRUE;
+ pLockCookie->dwFlags = UPGRADE_COOKIE | COOKIE_NONE;
+ }
+ else
+ {
+ // Sanity check
+ _ASSERTE((*ppRWLock)->_dwState & READERS_MASK);
+ _ASSERTE(pLockEntry->wReaderLevel);
+
+ // Save lock state in the cookie
+ pLockCookie->dwFlags = UPGRADE_COOKIE | COOKIE_READER;
+ pLockCookie->wReaderLevel = pLockEntry->wReaderLevel;
+ pLockCookie->dwWriterSeqNum = (*ppRWLock)->_dwWriterSeqNum;
+
+ // If there is only one reader, try to convert reader to a writer
+ DWORD dwKnownState = RWInterlockedCompareExchange(&(*ppRWLock)->_dwState,
+ WRITER,
+ READER);
+ if(dwKnownState == READER)
+ {
+ // Thread is no longer a reader
+ Thread* pThread = GetThread();
+ _ASSERTE (pThread);
+ _ASSERTE (pThread->m_dwLockCount >= pLockEntry->wReaderLevel);
+ ASSERT_UNLESS_NO_DEBUG_STATE(__pClrDebugState->GetLockCount(kDbgStateLockType_User) >= pLockEntry->wReaderLevel);
+ pThread->m_dwLockCount -= pLockEntry->wReaderLevel;
+ USER_LOCK_RELEASED_MULTIPLE(pLockEntry->wReaderLevel, GetPtrForLockContract(ppRWLock));
+ pLockEntry->wReaderLevel = 0;
+ RecycleLockEntry(pLockEntry);
+
+ // Thread is a writer
+ (*ppRWLock)->_dwWriterID = dwThreadID;
+ (*ppRWLock)->_wWriterLevel = 1;
+ INCTHREADLOCKCOUNT();
+ USER_LOCK_TAKEN(GetPtrForLockContract(ppRWLock));
+ ++(*ppRWLock)->_dwWriterSeqNum;
+ fAcquireWriterLock = FALSE;
+
+ // No intevening writes
+#if RWLOCK_STATISTICS
+ ++(*ppRWLock)->_dwWriterEntryCount;
+#endif
+ }
+ else
+ {
+ // Release the reader lock
+ Thread *pThread = GetThread();
+ _ASSERTE (pThread);
+ _ASSERTE (pThread->m_dwLockCount >= (DWORD)(pLockEntry->wReaderLevel - 1));
+ ASSERT_UNLESS_NO_DEBUG_STATE(__pClrDebugState->GetLockCount(kDbgStateLockType_User) >=
+ (DWORD)(pLockEntry->wReaderLevel - 1));
+ pThread->m_dwLockCount -= (pLockEntry->wReaderLevel - 1);
+ USER_LOCK_RELEASED_MULTIPLE(pLockEntry->wReaderLevel - 1, GetPtrForLockContract(ppRWLock));
+ pLockEntry->wReaderLevel = 1;
+ StaticReleaseReaderLock(ppRWLock);
+ fAcquireWriterLock = TRUE;
+ }
+ }
+
+ // Check for the need to acquire the writer lock
+ if(fAcquireWriterLock)
+ {
+
+ // Declare and Setup the frame as we are aware of the contention
+ // on the lock and the thread will most probably block
+ // to acquire writer lock
+
+ EX_TRY
+ {
+ StaticAcquireWriterLock(ppRWLock, dwDesiredTimeout);
+ }
+ EX_CATCH
+ {
+ // Invalidate cookie
+ DWORD dwFlags = pLockCookie->dwFlags;
+ pLockCookie->dwFlags = INVALID_COOKIE;
+
+ StaticRecoverLock(ppRWLock, pLockCookie, dwFlags & COOKIE_READER);
+
+ EX_RETHROW;
+ }
+ EX_END_CATCH_UNREACHABLE
+ }
+ }
+
+
+ // Update the validation fields of the cookie
+ pLockCookie->dwThreadID = dwThreadID;
+
+ return;
+}
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::StaticDowngradeFromWriterLock public
+//
+// Synopsis: Downgrades from a writer lock.
+//
+//+-------------------------------------------------------------------
+
+inline CRWLock* GetLock(OBJECTREF orLock)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return (CRWLock*)OBJECTREFToObject(orLock);
+}
+
+FCIMPL2(void, CRWLock::StaticDowngradeFromWriterLock, CRWLock *pRWLockUNSAFE, LockCookie* pLockCookie)
+{
+ FCALL_CONTRACT;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+
+ DWORD dwThreadID = GetThread()->GetThreadId();
+
+ if (pRWLockUNSAFE == NULL)
+ {
+ FCThrowVoid(kNullReferenceException);
+ }
+
+ if( NULL == pLockCookie) {
+ FCThrowVoid(kNullReferenceException);
+ }
+
+ OBJECTREF pRWLock = ObjectToOBJECTREF((Object*)pRWLockUNSAFE);
+ HELPER_METHOD_FRAME_BEGIN_1(pRWLock);
+
+ if (GetLock(pRWLock)->_dwWriterID != dwThreadID)
+ {
+ COMPlusThrowWin32(ERROR_NOT_OWNER);
+ }
+
+ // Validate cookie
+ DWORD dwStatus;
+ if(((pLockCookie->dwFlags & INVALID_COOKIE) == 0) &&
+ (pLockCookie->dwThreadID == dwThreadID))
+ {
+ DWORD dwFlags = pLockCookie->dwFlags;
+ pLockCookie->dwFlags = INVALID_COOKIE;
+
+ // Check if the thread was a reader
+ if(dwFlags & COOKIE_READER)
+ {
+ // Sanity checks
+ _ASSERTE(GetLock(pRWLock)->_wWriterLevel == 1);
+
+ LockEntry *pLockEntry = GetLock(pRWLock)->FastGetOrCreateLockEntry();
+ if(pLockEntry)
+ {
+ DWORD dwCurrentState, dwKnownState, dwModifyState;
+ CLREvent* hReaderEvent = NULL;
+
+ // Downgrade to a reader
+ GetLock(pRWLock)->_dwWriterID = 0;
+ GetLock(pRWLock)->_wWriterLevel = 0;
+ DECTHREADLOCKCOUNT ();
+ USER_LOCK_RELEASED(GetPtrForLockContract((CRWLock**)&pRWLock));
+ dwCurrentState = GetLock(pRWLock)->_dwState;
+ do
+ {
+ dwKnownState = dwCurrentState;
+ dwModifyState = READER - WRITER;
+ if(dwKnownState & WAITING_READERS_MASK)
+ {
+ HRESULT hr;
+ hReaderEvent = GetLock(pRWLock)->GetReaderEvent(&hr);
+ if(hReaderEvent == NULL)
+ {
+ LOG((LF_SYNC, LL_WARNING,
+ "DowngradeFromWriterLock failed to create "
+ "reader event for RWLock 0x%x\n", GetLock(pRWLock)));
+ RWSleep(100);
+ dwCurrentState = GetLock(pRWLock)->_dwState;
+ dwKnownState = 0;
+ _ASSERTE(dwCurrentState != dwKnownState);
+ continue;
+ }
+ dwModifyState += READER_SIGNALED;
+ }
+
+ // Sanity checks
+ _ASSERTE((dwKnownState & READERS_MASK) == 0);
+
+ dwCurrentState = RWInterlockedCompareExchange(&GetLock(pRWLock)->_dwState,
+ (dwKnownState + dwModifyState),
+ dwKnownState);
+ } while(dwCurrentState != dwKnownState);
+
+ // Check for waiting readers
+ if(dwKnownState & WAITING_READERS_MASK)
+ {
+ _ASSERTE(GetLock(pRWLock)->_dwState & READER_SIGNALED);
+ _ASSERTE(hReaderEvent);
+ RWSetEvent(hReaderEvent);
+ }
+
+ // Restore reader nesting level
+ Thread *pThread = GetThread();
+ _ASSERTE (pThread);
+ _ASSERTE (pThread->m_dwLockCount >= pLockEntry->wReaderLevel);
+ ASSERT_UNLESS_NO_DEBUG_STATE(__pClrDebugState->GetLockCount(kDbgStateLockType_User) >=
+ pLockEntry->wReaderLevel);
+ pThread->m_dwLockCount -= pLockEntry->wReaderLevel;
+ USER_LOCK_RELEASED_MULTIPLE(pLockEntry->wReaderLevel, GetPtrForLockContract((CRWLock**)&pRWLock));
+ pLockEntry->wReaderLevel = pLockCookie->wReaderLevel;
+ pThread->m_dwLockCount += pLockEntry->wReaderLevel;
+ USER_LOCK_TAKEN_MULTIPLE(pLockEntry->wReaderLevel, GetPtrForLockContract((CRWLock**)&pRWLock));
+ #ifdef RWLOCK_STATISTICS
+ RWInterlockedIncrement(&GetLock(pRWLock)->_dwReaderEntryCount);
+ #endif
+ }
+ else
+ {
+ // Removed assert, as thread abort can occur normally
+ dwStatus = RWLOCK_RECOVERY_FAILURE;
+ goto ThrowException;
+ }
+ }
+ else if(dwFlags & (COOKIE_WRITER | COOKIE_NONE))
+ {
+ // Release the writer lock
+ StaticReleaseWriterLock((CRWLock**)&pRWLock);
+ _ASSERTE((GetLock(pRWLock)->_dwWriterID != GetThread()->GetThreadId()) ||
+ (dwFlags & COOKIE_WRITER));
+ }
+ }
+ else
+ {
+ dwStatus = E_INVALIDARG;
+ThrowException:
+ COMPlusThrowWin32(dwStatus);
+ }
+
+ HELPER_METHOD_FRAME_END();
+
+ // Update the validation fields of the cookie
+ pLockCookie->dwThreadID = dwThreadID;
+
+}
+FCIMPLEND
+
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::StaticDoReleaseLock private
+//
+// Synopsis: Releases the lock held by the current thread
+//
+//+-------------------------------------------------------------------
+
+FCIMPL2(void, CRWLock::StaticDoReleaseLock, CRWLock *pRWLockUNSAFE, LockCookie * pLockCookie)
+{
+ FCALL_CONTRACT;
+
+ if (pRWLockUNSAFE == NULL)
+ {
+ FCThrowVoid(kNullReferenceException);
+ }
+
+ DWORD dwThreadID = GetThread()->GetThreadId();
+
+ OBJECTREF pRWLock = ObjectToOBJECTREF((Object*)pRWLockUNSAFE);
+
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_NO_THREAD_ABORT, pRWLock);
+
+ // note that we cannot use this holder in FCALLs outside our HMF since it breaks the epilog walker on x86!
+ ThreadPreventAbortHolder preventAbortIn;
+
+ GCPROTECT_BEGININTERIOR (pLockCookie)
+
+ // Check if the thread is a writer
+ if(GetLock(pRWLock)->_dwWriterID == dwThreadID)
+ {
+ // Save lock state in the cookie
+ pLockCookie->dwFlags = RELEASE_COOKIE | COOKIE_WRITER;
+ pLockCookie->dwWriterSeqNum = GetLock(pRWLock)->_dwWriterSeqNum;
+ pLockCookie->wWriterLevel = GetLock(pRWLock)->_wWriterLevel;
+
+ // Release the writer lock
+ Thread *pThread = GetThread();
+ _ASSERTE (pThread);
+ _ASSERTE (pThread->m_dwLockCount >= (DWORD)(GetLock(pRWLock)->_wWriterLevel - 1));
+ ASSERT_UNLESS_NO_DEBUG_STATE(__pClrDebugState->GetLockCount(kDbgStateLockType_User) >=
+ (DWORD)(GetLock(pRWLock)->_wWriterLevel - 1));
+ pThread->m_dwLockCount -= (GetLock(pRWLock)->_wWriterLevel - 1);
+ USER_LOCK_RELEASED_MULTIPLE(GetLock(pRWLock)->_wWriterLevel - 1, GetPtrForLockContract((CRWLock**)&pRWLock));
+ GetLock(pRWLock)->_wWriterLevel = 1;
+ StaticReleaseWriterLock((CRWLock**)&pRWLock);
+ }
+ else
+ {
+ LockEntry *pLockEntry = GetLock(pRWLock)->GetLockEntry();
+ if(pLockEntry)
+ {
+ // Sanity check
+ _ASSERTE(GetLock(pRWLock)->_dwState & READERS_MASK);
+ _ASSERTE(pLockEntry->wReaderLevel);
+
+ // Save lock state in the cookie
+ pLockCookie->dwFlags = RELEASE_COOKIE | COOKIE_READER;
+ pLockCookie->wReaderLevel = pLockEntry->wReaderLevel;
+ pLockCookie->dwWriterSeqNum = GetLock(pRWLock)->_dwWriterSeqNum;
+
+ // Release the reader lock
+ Thread *pThread = GetThread();
+ _ASSERTE (pThread);
+ _ASSERTE (pThread->m_dwLockCount >= (DWORD)(pLockEntry->wReaderLevel - 1));
+ ASSERT_UNLESS_NO_DEBUG_STATE(__pClrDebugState->GetLockCount(kDbgStateLockType_User) >=
+ (DWORD)(pLockEntry->wReaderLevel - 1));
+ pThread->m_dwLockCount -= (pLockEntry->wReaderLevel - 1);
+ USER_LOCK_RELEASED_MULTIPLE(pLockEntry->wReaderLevel - 1, GetPtrForLockContract((CRWLock**)&pRWLock));
+ pLockEntry->wReaderLevel = 1;
+ StaticReleaseReaderLock((CRWLock**)&pRWLock);
+ }
+ else
+ {
+ pLockCookie->dwFlags = RELEASE_COOKIE | COOKIE_NONE;
+ }
+ }
+
+ GCPROTECT_END ();
+
+ HELPER_METHOD_FRAME_END();
+
+ // Update the validation fields of the cookie
+ pLockCookie->dwThreadID = dwThreadID;
+}
+FCIMPLEND
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::StaticRestoreLockPublic public
+//
+// Synopsis: Public Access to StaticRestoreLock
+//
+//
+//+-------------------------------------------------------------------
+
+FCIMPL2(void, CRWLock::StaticRestoreLockPublic, CRWLock *pRWLockUNSAFE, LockCookie* pLockCookie)
+{
+ FCALL_CONTRACT;
+
+ if (pRWLockUNSAFE == NULL) {
+ FCThrowVoid(kNullReferenceException);
+ }
+
+ if( NULL == pLockCookie) {
+ FCThrowVoid(kNullReferenceException);
+ }
+
+ OBJECTREF pRWLock = ObjectToOBJECTREF((Object*)pRWLockUNSAFE);
+ HELPER_METHOD_FRAME_BEGIN_1(pRWLock);
+
+ StaticRestoreLock((CRWLock**)&pRWLock, pLockCookie);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+//+-------------------------------------------------------------------
+//
+// Method: CRWLock::StaticRestoreLock Private
+//
+// Synopsis: Restore the lock held by the current thread
+//
+
+//
+//+-------------------------------------------------------------------
+
+void CRWLock::StaticRestoreLock(
+ CRWLock **ppRWLock,
+ LockCookie *pLockCookie)
+{
+ CONTRACTL
+ {
+ THROWS;
+ CAN_TAKE_LOCK;
+ GC_TRIGGERS; // CRWLock::StaticAquireWriterLock can trigger GC
+ }
+ CONTRACTL_END;
+
+ // Validate cookie
+ DWORD dwThreadID = GetThread()->GetThreadId();
+ DWORD dwFlags = pLockCookie->dwFlags;
+ if(pLockCookie->dwThreadID == dwThreadID)
+ {
+ if (((*ppRWLock)->_dwWriterID == dwThreadID) || ((*ppRWLock)->GetLockEntry() != NULL))
+ {
+ COMPlusThrow(kSynchronizationLockException, W("Arg_RWLockRestoreException"));
+ }
+
+ // Check for the no contention case
+ pLockCookie->dwFlags = INVALID_COOKIE;
+ if(dwFlags & COOKIE_WRITER)
+ {
+ if(RWInterlockedCompareExchange(&(*ppRWLock)->_dwState, WRITER, 0) == 0)
+ {
+ // Restore writer nesting level
+ (*ppRWLock)->_dwWriterID = dwThreadID;
+ Thread *pThread = GetThread();
+ _ASSERTE (pThread);
+ _ASSERTE (pThread->m_dwLockCount >= (*ppRWLock)->_wWriterLevel);
+ ASSERT_UNLESS_NO_DEBUG_STATE(__pClrDebugState->GetLockCount(kDbgStateLockType_User) >=
+ (*ppRWLock)->_wWriterLevel);
+ pThread->m_dwLockCount -= (*ppRWLock)->_wWriterLevel;
+ USER_LOCK_RELEASED_MULTIPLE((*ppRWLock)->_wWriterLevel, GetPtrForLockContract(ppRWLock));
+ (*ppRWLock)->_wWriterLevel = pLockCookie->wWriterLevel;
+ pThread->m_dwLockCount += (*ppRWLock)->_wWriterLevel;
+ USER_LOCK_TAKEN_MULTIPLE((*ppRWLock)->_wWriterLevel, GetPtrForLockContract(ppRWLock));
+ ++(*ppRWLock)->_dwWriterSeqNum;
+#ifdef RWLOCK_STATISTICS
+ ++(*ppRWLock)->_dwWriterEntryCount;
+#endif
+ goto LNormalReturn;
+ }
+ }
+ else if(dwFlags & COOKIE_READER)
+ {
+ LockEntry *pLockEntry = (*ppRWLock)->FastGetOrCreateLockEntry();
+ if(pLockEntry)
+ {
+ // This thread should not already be a reader
+ // else bad things can happen
+ _ASSERTE(pLockEntry->wReaderLevel == 0);
+ DWORD dwKnownState = (*ppRWLock)->_dwState;
+ if(dwKnownState < READERS_MASK)
+ {
+ DWORD dwCurrentState = RWInterlockedCompareExchange(&(*ppRWLock)->_dwState,
+ (dwKnownState + READER),
+ dwKnownState);
+ if(dwCurrentState == dwKnownState)
+ {
+ // Restore reader nesting level
+ Thread *pThread = GetThread();
+ _ASSERTE (pThread);
+ pLockEntry->wReaderLevel = pLockCookie->wReaderLevel;
+ pThread->m_dwLockCount += pLockEntry->wReaderLevel;
+ USER_LOCK_TAKEN_MULTIPLE(pLockEntry->wReaderLevel, GetPtrForLockContract(ppRWLock));
+#ifdef RWLOCK_STATISTICS
+ RWInterlockedIncrement(&(*ppRWLock)->_dwReaderEntryCount);
+#endif
+ goto LNormalReturn;
+ }
+ }
+
+ // Recycle the lock entry for the slow case
+ (*ppRWLock)->FastRecycleLockEntry(pLockEntry);
+ }
+ else
+ {
+ // Ignore the error and try again below. May be thread will luck
+ // out the second time
+ }
+ }
+ else if(dwFlags & COOKIE_NONE)
+ {
+ goto LNormalReturn;
+ }
+
+ // Declare and Setup the frame as we are aware of the contention
+ // on the lock and the thread will most probably block
+ // to acquire lock below
+ThrowException:
+ if((dwFlags & INVALID_COOKIE) == 0)
+ {
+ StaticRecoverLock(ppRWLock, pLockCookie, dwFlags);
+ }
+ else
+ {
+ COMPlusThrowWin32(E_INVALIDARG);
+ }
+
+ goto LNormalReturn;
+ }
+ else
+ {
+ dwFlags = INVALID_COOKIE;
+ goto ThrowException;
+ }
+
+LNormalReturn:
+ return;
+}
+
+
+//+-------------------------------------------------------------------
+//
+// Class: CRWLock::StaticPrivateInitialize
+//
+// Synopsis: Initialize lock
+//
+//+-------------------------------------------------------------------
+FCIMPL1(void, CRWLock::StaticPrivateInitialize, CRWLock *pRWLock)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_1(pRWLock);
+
+ // Run the constructor on the GC allocated space
+ // CRWLock's constructor can throw exception
+#ifndef _PREFAST_
+ // Prefast falsely complains of memory leak.
+ CRWLock *pTemp;
+ pTemp = new (pRWLock) CRWLock();
+ _ASSERTE(pTemp == pRWLock);
+#endif
+
+ // Catch GC holes
+ VALIDATE_LOCK(pRWLock);
+
+ HELPER_METHOD_FRAME_END();
+ return;
+}
+FCIMPLEND
+
+
+//+-------------------------------------------------------------------
+//
+// Class: CRWLock::StaticPrivateDestruct
+//
+// Synopsis: Destruct lock
+//+-------------------------------------------------------------------
+FCIMPL1(void, CRWLock::StaticPrivateDestruct, CRWLock *pRWLock)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_NO_THREAD_ABORT, pRWLock);
+
+ // Fixing one handle recycling security hole by
+ // ensuring we don't delete the events more than once.
+ // After deletion (for now, assuming ONE FINALIZER THREAD)
+ // make the object essentially unusable by setting handle to
+ // INVALID_HANDLE_VALUE (unusable) versus NULL (uninitialized)
+
+ if ((pRWLock->_hWriterEvent != INVALID_HANDLE_VALUE) && (pRWLock->_hReaderEvent != INVALID_HANDLE_VALUE))
+ {
+ // Note, this still allows concurrent event consumers (such as StaticAcquireReaderLock)
+ // to Set and/or Wait on non-events. There still exists a security hole here.
+ if(pRWLock->_hWriterEvent)
+ {
+ CLREvent *h = (CLREvent *) FastInterlockExchangePointer((PVOID *)&(pRWLock->_hWriterEvent), INVALID_HANDLE_VALUE);
+ delete h;
+ }
+ if(pRWLock->_hReaderEvent)
+ {
+ CLREvent *h = (CLREvent *) FastInterlockExchangePointer((PVOID *)&(pRWLock->_hReaderEvent), INVALID_HANDLE_VALUE);
+ delete h;
+ }
+
+ // There is no LockEntry for this lock.
+ if (pRWLock->_dwState != 0)
+ {
+ // Recycle LockEntry on threads
+ ThreadStoreLockHolder tsl;
+
+ // Take ThreadStore lock and walk over every thread in the process
+ Thread *thread = NULL;
+ while ((thread = ThreadStore::s_pThreadStore->GetAllThreadList(thread,
+ Thread::TS_Unstarted|Thread::TS_Dead|Thread::TS_Detached, 0))
+ != NULL)
+ {
+ LockEntry *pLockEntry;
+ {
+ CrstHolder rwl(&s_RWLockCrst);
+ pLockEntry = pRWLock->GetLockEntry(thread);
+ }
+ if (pLockEntry)
+ {
+ // The entry does not belong to this lock anymore
+ pLockEntry->dwLLockID = 0;
+ pLockEntry->wReaderLevel = 0;
+ }
+ }
+ }
+ }
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+
+//+-------------------------------------------------------------------
+//
+// Class: CRWLock::StaticGetWriterSeqNum
+//
+// Synopsis: Returns the current sequence number
+//
+//+-------------------------------------------------------------------
+FCIMPL1(INT32, CRWLock::StaticGetWriterSeqNum, CRWLock *pRWLock)
+{
+ FCALL_CONTRACT;
+
+ if (pRWLock == NULL)
+ {
+ FCThrow(kNullReferenceException);
+ }
+
+ return(pRWLock->_dwWriterSeqNum);
+}
+FCIMPLEND
+
+
+//+-------------------------------------------------------------------
+//
+// Class: CRWLock::StaticAnyWritersSince
+//
+// Synopsis: Returns TRUE if there were writers since the given
+// sequence number
+//
+//+-------------------------------------------------------------------
+FCIMPL2(FC_BOOL_RET, CRWLock::StaticAnyWritersSince, CRWLock *pRWLock, DWORD dwSeqNum)
+{
+ FCALL_CONTRACT;
+
+ if (pRWLock == NULL)
+ {
+ FCThrow(kNullReferenceException);
+ }
+
+
+ if(pRWLock->_dwWriterID == GetThread()->GetThreadId())
+ ++dwSeqNum;
+
+ FC_RETURN_BOOL(pRWLock->_dwWriterSeqNum > dwSeqNum);
+}
+FCIMPLEND
+
+struct RWLockIterator
+{
+ IHostTask **m_Owner;
+ DWORD m_Capacity;
+ DWORD m_index;
+};
+
+OBJECTHANDLE CRWLock::GetObjectHandle()
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (_hObjectHandle == NULL)
+ {
+ OBJECTREF obj = ObjectToOBJECTREF((Object*)this);
+ OBJECTHANDLE handle = GetAppDomain()->CreateLongWeakHandle(obj);
+ if (RWInterlockedCompareExchangePointer((PVOID*)&_hObjectHandle, handle, NULL) != NULL)
+ {
+ DestroyLongWeakHandle(handle);
+ }
+ }
+ return _hObjectHandle;
+}
+
+// CRWLock::CreateOwnerIterator can return E_OUTOFMEMORY
+//
+HRESULT CRWLock::CreateOwnerIterator(SIZE_T *pIterator)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_PREEMPTIVE;
+ GC_NOTRIGGER;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ *pIterator = 0;
+ if (_dwState == 0) {
+ return S_OK;
+ }
+ NewHolder<RWLockIterator> IteratorHolder(new (nothrow) RWLockIterator);
+ RWLockIterator *pRWLockIterator = IteratorHolder;
+ if (pRWLockIterator == NULL) {
+ return E_OUTOFMEMORY;
+ }
+ // Writer can be handled fast
+ if (_dwState & WRITER) {
+ DWORD writerID = _dwWriterID;
+ if (writerID != 0)
+ {
+ pRWLockIterator->m_Capacity = 1;
+ pRWLockIterator->m_index = 0;
+ pRWLockIterator->m_Owner = new (nothrow) IHostTask*[1];
+ if (pRWLockIterator->m_Owner == NULL) {
+ return E_OUTOFMEMORY;
+ }
+ Thread *pThread = g_pThinLockThreadIdDispenser->IdToThreadWithValidation(writerID);
+ if (pThread == NULL)
+ {
+ return S_OK;
+ }
+ IteratorHolder.SuppressRelease();
+ pRWLockIterator->m_Owner[0] = pThread->GetHostTaskWithAddRef();
+ *pIterator = (SIZE_T)pRWLockIterator;
+ return S_OK;
+ }
+ }
+ if (_dwState == 0) {
+ return S_OK;
+ }
+ pRWLockIterator->m_Capacity = 4;
+ pRWLockIterator->m_index = 0;
+ pRWLockIterator->m_Owner = new (nothrow) IHostTask*[pRWLockIterator->m_Capacity];
+ if (pRWLockIterator->m_Owner == NULL) {
+ return E_OUTOFMEMORY;
+ }
+
+ HRESULT hr = S_OK;
+
+ NewArrayHolder<IHostTask*> OwnerHolder(pRWLockIterator->m_Owner);
+
+ // Take ThreadStore lock and walk over every thread in the process
+ Thread *thread = NULL;
+ while ((thread = ThreadStore::s_pThreadStore->GetAllThreadList(thread,
+ Thread::TS_Unstarted|Thread::TS_Dead|Thread::TS_Detached, 0))
+ != NULL)
+ {
+ LockEntry *pLockEntry;
+ {
+ CrstHolder rwl(&s_RWLockCrst);
+ pLockEntry = GetLockEntry(thread);
+ }
+ if (pLockEntry && pLockEntry->wReaderLevel >= 1) {
+ if (pRWLockIterator->m_index == pRWLockIterator->m_Capacity) {
+ IHostTask** newArray = new (nothrow) IHostTask*[2*pRWLockIterator->m_Capacity];
+ if (newArray == NULL) {
+ hr = E_OUTOFMEMORY;
+ break;
+ }
+ memcpy (newArray,pRWLockIterator->m_Owner,pRWLockIterator->m_Capacity*sizeof(IHostTask*));
+ pRWLockIterator->m_Owner = newArray;
+ pRWLockIterator->m_Capacity *= 2;
+ OwnerHolder = pRWLockIterator->m_Owner;
+ }
+ IHostTask *pHostTask = thread->GetHostTaskWithAddRef();
+ if (pHostTask)
+ {
+ pRWLockIterator->m_Owner[pRWLockIterator->m_index++] = pHostTask;
+ }
+ }
+ }
+ if (FAILED(hr))
+ {
+ for (DWORD i = 0; i < pRWLockIterator->m_index; i ++)
+ {
+ if (pRWLockIterator->m_Owner[i])
+ {
+ pRWLockIterator->m_Owner[i]->Release();
+ }
+ }
+ }
+ if (SUCCEEDED(hr)) {
+ IteratorHolder.SuppressRelease();
+ OwnerHolder.SuppressRelease();
+ pRWLockIterator->m_Capacity = pRWLockIterator->m_index;
+ pRWLockIterator->m_index = 0;
+ *pIterator = (SIZE_T)pRWLockIterator;
+ }
+
+ return hr;
+}
+
+void CRWLock::GetNextOwner(SIZE_T Iterator, IHostTask **ppOwnerHostTask)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ *ppOwnerHostTask = NULL;
+ if (Iterator) {
+ RWLockIterator* tmp = (RWLockIterator*)Iterator;
+ if (tmp->m_index < tmp->m_Capacity) {
+ *ppOwnerHostTask = tmp->m_Owner[tmp->m_index];
+ tmp->m_index ++;
+ }
+ }
+}
+
+void CRWLock::DeleteOwnerIterator(SIZE_T Iterator)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+
+ if (Iterator) {
+ RWLockIterator* pIterator = (RWLockIterator*)Iterator;
+ while (pIterator->m_index < pIterator->m_Capacity) {
+ IHostTask *pHostTask = pIterator->m_Owner[pIterator->m_index];
+ if (pHostTask)
+ {
+ pHostTask->Release();
+ }
+ pIterator->m_index ++;
+ }
+ delete[] pIterator->m_Owner;
+ delete pIterator;
+ }
+}
+#endif // FEATURE_RWLOCK
diff --git a/src/vm/rwlock.h b/src/vm/rwlock.h
new file mode 100644
index 0000000000..14d8a7da9b
--- /dev/null
+++ b/src/vm/rwlock.h
@@ -0,0 +1,286 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+//+-------------------------------------------------------------------
+//
+// File: RWLock.h
+//
+// Contents: Reader writer lock implementation that supports the
+// following features
+// 1. Cheap enough to be used in large numbers
+// such as per object synchronization.
+// 2. Supports timeout. This is a valuable feature
+// to detect deadlocks
+// 3. Supports caching of events. The allows
+// the events to be moved from least contentious
+// regions to the most contentious regions.
+// In other words, the number of events needed by
+// Reader-Writer lockls is bounded by the number
+// of threads in the process.
+// 4. Supports nested locks by readers and writers
+// 5. Supports spin counts for avoiding context switches
+// on multi processor machines.
+// 6. Supports functionality for upgrading to a writer
+// lock with a return argument that indicates
+// intermediate writes. Downgrading from a writer
+// lock restores the state of the lock.
+// 7. Supports functionality to Release Lock for calling
+// app code. RestoreLock restores the lock state and
+// indicates intermediate writes.
+// 8. Recovers from most common failures such as creation of
+// events. In other words, the lock mainitains consistent
+// internal state and remains usable
+//
+// Classes: CRWLock,
+// CStaticRWLock
+//
+//--------------------------------------------------------------------
+
+#ifdef FEATURE_RWLOCK
+#ifndef _RWLOCK_H_
+#define _RWLOCK_H_
+#include "common.h"
+#include "threads.h"
+
+// If you do define this, make sure you define this in managed as well.
+//#define RWLOCK_STATISTICS 0
+
+extern DWORD gdwDefaultTimeout;
+extern DWORD gdwDefaultSpinCount;
+
+
+//+-------------------------------------------------------------------
+//
+// Struct: LockCookie
+//
+// Synopsis: Lock cookies returned to the client
+//
+//+-------------------------------------------------------------------
+typedef struct {
+ DWORD dwFlags;
+ DWORD dwWriterSeqNum;
+ WORD wReaderLevel;
+ WORD wWriterLevel;
+ DWORD dwThreadID;
+} LockCookie;
+
+//+-------------------------------------------------------------------
+//
+// Class: CRWLock
+//
+// Synopsis: Class the implements the reader writer locks.
+//
+//+-------------------------------------------------------------------
+class CRWLock : public Object
+{
+ friend class MscorlibBinder;
+
+public:
+ // Constuctor
+ CRWLock();
+
+ // Cleanup
+ void Cleanup();
+
+ OBJECTHANDLE GetObjectHandle();
+ HRESULT CreateOwnerIterator(SIZE_T *pIterator);
+ static void GetNextOwner(SIZE_T Iterator, IHostTask **ppOwnerHostTask);
+ static void DeleteOwnerIterator(SIZE_T Iterator);
+
+ // Statics that do the core work
+ static FCDECL1 (void, StaticPrivateInitialize, CRWLock *pRWLock);
+ static FCDECL1 (void, StaticPrivateDestruct, CRWLock *pRWLock);
+ static FCDECL2 (void, StaticAcquireReaderLockPublic, CRWLock *pRWLock, DWORD dwDesiredTimeout);
+ static FCDECL2 (void, StaticAcquireWriterLockPublic, CRWLock *pRWLock, DWORD dwDesiredTimeout);
+ static FCDECL1 (void, StaticReleaseReaderLockPublic, CRWLock *pRWLock);
+ static FCDECL1 (void, StaticReleaseWriterLockPublic, CRWLock *pRWLock);
+ static FCDECL3 (void, StaticDoUpgradeToWriterLockPublic, CRWLock *pRWLock, LockCookie * pLockCookie, DWORD dwDesiredTimeout);
+ static FCDECL2 (void, StaticDowngradeFromWriterLock, CRWLock *pRWLock, LockCookie* pLockCookie);
+ static FCDECL2 (void, StaticDoReleaseLock, CRWLock *pRWLock, LockCookie * pLockCookie);
+ static FCDECL2 (void, StaticRestoreLockPublic, CRWLock *pRWLock, LockCookie* pLockCookie);
+ static FCDECL1 (FC_BOOL_RET, StaticIsReaderLockHeld, CRWLock *pRWLock);
+ static FCDECL1 (FC_BOOL_RET, StaticIsWriterLockHeld, CRWLock *pRWLock);
+ static FCDECL1 (INT32, StaticGetWriterSeqNum, CRWLock *pRWLock);
+ static FCDECL2 (FC_BOOL_RET, StaticAnyWritersSince, CRWLock *pRWLock, DWORD dwSeqNum);
+private:
+ static void StaticAcquireReaderLock(CRWLock **ppRWLock, DWORD dwDesiredTimeout);
+ static void StaticAcquireWriterLock(CRWLock **ppRWLock, DWORD dwDesiredTimeout);
+ static void StaticReleaseReaderLock(CRWLock **ppRWLock);
+ static void StaticReleaseWriterLock(CRWLock **ppRWLock);
+ static void StaticRecoverLock(CRWLock **ppRWLock, LockCookie *pLockCookie, DWORD dwFlags);
+ static void StaticRestoreLock(CRWLock **ppRWLock, LockCookie *pLockCookie);
+ static void StaticUpgradeToWriterLock(CRWLock **ppRWLock, LockCookie *pLockCookie, DWORD dwDesiredTimeout);
+public:
+ // Assert functions
+#ifdef _DEBUG
+ BOOL AssertWriterLockHeld();
+ BOOL AssertWriterLockNotHeld();
+ BOOL AssertReaderLockHeld();
+ BOOL AssertReaderLockNotHeld();
+ BOOL AssertReaderOrWriterLockHeld();
+ void AssertHeld()
+ {
+ WRAPPER_NO_CONTRACT;
+ AssertWriterLockHeld();
+ }
+ void AssertNotHeld()
+ {
+ WRAPPER_NO_CONTRACT;
+ AssertWriterLockNotHeld();
+ AssertReaderLockNotHeld();
+ }
+#else
+ void AssertWriterLockHeld() { LIMITED_METHOD_CONTRACT; }
+ void AssertWriterLockNotHeld() { LIMITED_METHOD_CONTRACT; }
+ void AssertReaderLockHeld() { LIMITED_METHOD_CONTRACT; }
+ void AssertReaderLockNotHeld() { LIMITED_METHOD_CONTRACT; }
+ void AssertReaderOrWriterLockHeld() { LIMITED_METHOD_CONTRACT; }
+ void AssertHeld() { LIMITED_METHOD_CONTRACT; }
+ void AssertNotHeld() { LIMITED_METHOD_CONTRACT; }
+#endif
+
+ // Helper functions
+#ifdef RWLOCK_STATISTICS
+ DWORD GetReaderEntryCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return(_dwReaderEntryCount);
+ }
+ DWORD GetReaderContentionCount() { LIMITED_METHOD_CONTRACT; return(_dwReaderContentionCount); }
+ DWORD GetWriterEntryCount() { LIMITED_METHOD_CONTRACT; return(_dwWriterEntryCount); }
+ DWORD GetWriterContentionCount() { LIMITED_METHOD_CONTRACT; return(_dwWriterContentionCount); }
+#endif
+ // Static functions
+ static void *operator new(size_t size)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return ::operator new(size);
+ }
+ static void ProcessInit();
+
+ static void SetTimeout(DWORD dwTimeout)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ gdwDefaultTimeout = dwTimeout;
+ }
+ static DWORD GetTimeout()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return(gdwDefaultTimeout);
+ }
+ static void SetSpinCount(DWORD dwSpinCount)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ gdwDefaultSpinCount = g_SystemInfo.dwNumberOfProcessors > 1
+ ? dwSpinCount
+ : 0;
+ }
+ static DWORD GetSpinCount() { LIMITED_METHOD_CONTRACT; return(gdwDefaultSpinCount); }
+
+private:
+ // Private helpers
+ static void ChainEntry(Thread *pThread, LockEntry *pLockEntry);
+ LockEntry *GetLockEntry(Thread *pThread = NULL);
+ LockEntry *FastGetOrCreateLockEntry();
+ LockEntry *SlowGetOrCreateLockEntry(Thread *pThread);
+ void FastRecycleLockEntry(LockEntry *pLockEntry);
+ static void RecycleLockEntry(LockEntry *pLockEntry);
+
+ CLREvent* GetReaderEvent(HRESULT *pHR);
+ CLREvent* GetWriterEvent(HRESULT *pHR);
+ void ReleaseEvents();
+
+ static LONG RWInterlockedCompareExchange(LONG RAW_KEYWORD(volatile) *pvDestination,
+ LONG dwExchange,
+ LONG dwComperand);
+ static void* RWInterlockedCompareExchangePointer(PVOID RAW_KEYWORD(volatile) *pvDestination,
+ PVOID pExchange,
+ PVOID pComparand);
+ static LONG RWInterlockedExchangeAdd(LONG RAW_KEYWORD(volatile) *pvDestination, LONG dwAddState);
+ static LONG RWInterlockedIncrement(LONG RAW_KEYWORD(volatile) *pdwState);
+
+ static DWORD RWWaitForSingleObject(CLREvent* event, DWORD dwTimeout);
+ static void RWSetEvent(CLREvent* event);
+ static void RWResetEvent(CLREvent* event);
+ static void RWSleep(DWORD dwTime);
+
+#if defined(ENABLE_CONTRACTS_IMPL)
+ // The LOCK_TAKEN/RELEASED macros need a "pointer" to the lock object to do
+ // comparisons between takes & releases (and to provide debugging info to the
+ // developer). We can't use "this" (*ppRWLock), because CRWLock is an Object and thus
+ // can move. So we use _dwLLockID instead. It's not exactly unique, but it's
+ // good enough--worst that can happen is if a thread takes RWLock A and erroneously
+ // releases RWLock B (instead of A), we'll fail to catch that if their _dwLLockID's
+ // are the same. On 64 bits, we can use both _dwULockID & _dwLLockID and be unique
+ static void * GetPtrForLockContract(CRWLock ** ppRWLock)
+ {
+#if defined(_WIN64)
+ return (void *)
+ (
+ (
+ ((__int64) ((*ppRWLock)->_dwULockID)) << 32
+ )
+ |
+ (
+ (__int64) ((*ppRWLock)->_dwLLockID)
+ )
+ );
+#else //defined(_WIN64)
+ return LongToPtr((*ppRWLock)->_dwLLockID);
+#endif //defined(_WIN64)
+ }
+#endif //defined(ENABLE_CONTRACTS_IMPL)
+
+ // private new
+ static void *operator new(size_t size, void *pv) { LIMITED_METHOD_CONTRACT; return(pv); }
+
+ // Private data
+ CLREvent *_hWriterEvent;
+ CLREvent *_hReaderEvent;
+ OBJECTHANDLE _hObjectHandle;
+ Volatile<LONG> _dwState;
+ LONG _dwULockID;
+ LONG _dwLLockID;
+ DWORD _dwWriterID;
+ DWORD _dwWriterSeqNum;
+ WORD _wWriterLevel;
+#ifdef RWLOCK_STATISTICS
+ // WARNING: You must explicitly #define RWLOCK_STATISTICS when you build
+ // in both the VM and BCL directories, as the managed class must also
+ // contain these fields!
+ Volatile<LONG> _dwReaderEntryCount;
+ Volatile<LONG> _dwReaderContentionCount;
+ Volatile<LONG> _dwWriterEntryCount;
+ Volatile<LONG> _dwWriterContentionCount;
+ Volatile<LONG> _dwEventsReleasedCount;
+#endif
+
+ // Static data
+ static Volatile<LONG> s_mostRecentULockID;
+ static Volatile<LONG> s_mostRecentLLockID;
+ static CrstStatic s_RWLockCrst;
+};
+
+#ifdef USE_CHECKED_OBJECTREFS
+typedef REF<CRWLock> RWLOCKREF;
+
+#else
+typedef CRWLock* RWLOCKREF;
+#endif
+
+#endif // _RWLOCK_H_
+
+#endif // FEATURE_RWLOCK
+
diff --git a/src/vm/safehandle.cpp b/src/vm/safehandle.cpp
new file mode 100644
index 0000000000..316c2a5002
--- /dev/null
+++ b/src/vm/safehandle.cpp
@@ -0,0 +1,507 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//
+
+/*============================================================
+**
+** Class: SafeHandle
+**
+**
+** Purpose: The unmanaged implementation of the SafeHandle
+** class
+**
+===========================================================*/
+
+#include "common.h"
+#include "vars.hpp"
+#include "object.h"
+#include "excep.h"
+#include "frames.h"
+#include "eecontract.h"
+#include "mdaassistants.h"
+#include "typestring.h"
+
+WORD SafeHandle::s_IsInvalidHandleMethodSlot = MethodTable::NO_SLOT;
+WORD SafeHandle::s_ReleaseHandleMethodSlot = MethodTable::NO_SLOT;
+
+void SafeHandle::Init()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ // For reliability purposes, we need to eliminate all possible failure
+ // points before making a call to a CER method. IsInvalidHandle, and
+ // ReleaseHandle methods are critical calls that are already prepared (code:
+ // PrepareCriticalFinalizerObject). As a performance optimization, we are
+ // calling these methods through a fast macro that assumes the method slot
+ // has been already cached. Since figuring out the method slot for these 2
+ // methods involves calling .GetMethod which can fail, we are doing this
+ // eagerly here, Otherwise we will have to do it at the time of the call,
+ // and this could be at risk if .GetMethod failed.
+ MethodDesc* pMD = MscorlibBinder::GetMethod(METHOD__SAFE_HANDLE__GET_IS_INVALID);
+ s_IsInvalidHandleMethodSlot = pMD->GetSlot();
+
+ pMD = MscorlibBinder::GetMethod(METHOD__SAFE_HANDLE__RELEASE_HANDLE);
+ s_ReleaseHandleMethodSlot = pMD->GetSlot();
+}
+
+void SafeHandle::AddRef()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INSTANCE_CHECK;
+ } CONTRACTL_END;
+
+ // Cannot use "this" after Release, which toggles the GC mode.
+ SAFEHANDLEREF sh(this);
+
+#ifdef _DEBUG
+ VALIDATEOBJECTREF(sh->m_debugStackTrace);
+#endif
+ _ASSERTE(sh->IsFullyInitialized());
+
+ // To prevent handle recycling security attacks we must enforce the
+ // following invariant: we cannot successfully AddRef a handle on which
+ // we've committed to the process of releasing.
+
+ // We ensure this by never AddRef'ing a handle that is marked closed and
+ // never marking a handle as closed while the ref count is non-zero. For
+ // this to be thread safe we must perform inspection/updates of the two
+ // values as a single atomic operation. We achieve this by storing them both
+ // in a single aligned DWORD and modifying the entire state via interlocked
+ // compare exchange operations.
+
+ // Additionally we have to deal with the problem of the Dispose operation.
+ // We must assume that this operation is directly exposed to untrusted
+ // callers and that malicious callers will try and use what is basically a
+ // Release call to decrement the ref count to zero and free the handle while
+ // it's still in use (the other way a handle recycling attack can be
+ // mounted). We combat this by allowing only one Dispose to operate against
+ // a given safe handle (which balances the creation operation given that
+ // Dispose suppresses finalization). We record the fact that a Dispose has
+ // been requested in the same state field as the ref count and closed state.
+
+ // So the state field ends up looking like this:
+ //
+ // 31 2 1 0
+ // +-----------------------------------------------------------+---+---+
+ // | Ref count | D | C |
+ // +-----------------------------------------------------------+---+---+
+ //
+ // Where D = 1 means a Dispose has been performed and C = 1 means the
+ // underlying handle has (or will be shortly) released.
+
+ // Might have to perform the following steps multiple times due to
+ // interference from other AddRef's and Release's.
+ INT32 oldState, newState;
+ do {
+
+ // First step is to read the current handle state. We use this as a
+ // basis to decide whether an AddRef is legal and, if so, to propose an
+ // update predicated on the initial state (a conditional write).
+ oldState = sh->m_state;
+
+ // Check for closed state.
+ if (oldState & SH_State_Closed)
+ COMPlusThrow(kObjectDisposedException, IDS_EE_SAFEHANDLECLOSED);
+
+ // Not closed, let's propose an update (to the ref count, just add
+ // SH_RefCountOne to the state to effectively add 1 to the ref count).
+ // Continue doing this until the update succeeds (because nobody
+ // modifies the state field between the read and write operations) or
+ // the state moves to closed.
+ newState = oldState + SH_RefCountOne;
+
+ } while (InterlockedCompareExchange((LONG*)&sh->m_state, newState, oldState) != oldState);
+
+ // If we got here we managed to update the ref count while the state
+ // remained non closed. So we're done.
+}
+
+void SafeHandle::Release(bool fDispose)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INSTANCE_CHECK;
+ } CONTRACTL_END;
+
+ // Cannot use "this" after RunReleaseMethod, which toggles the GC mode.
+ SAFEHANDLEREF sh(this);
+
+#ifdef _DEBUG
+ VALIDATEOBJECTREF(sh->m_debugStackTrace);
+#endif
+ _ASSERTE(sh->IsFullyInitialized());
+
+ // See AddRef above for the design of the synchronization here. Basically we
+ // will try to decrement the current ref count and, if that would take us to
+ // zero refs, set the closed state on the handle as well.
+ bool fPerformRelease = false;
+
+ // Might have to perform the following steps multiple times due to
+ // interference from other AddRef's and Release's.
+ INT32 oldState, newState;
+ do {
+
+ // First step is to read the current handle state. We use this cached
+ // value to predicate any modification we might decide to make to the
+ // state).
+ oldState = sh->m_state;
+
+ // If this is a Dispose operation we have additional requirements (to
+ // ensure that Dispose happens at most once as the comments in AddRef
+ // detail). We must check that the dispose bit is not set in the old
+ // state and, in the case of successful state update, leave the disposed
+ // bit set. Silently do nothing if Dispose has already been called
+ // (because we advertise that as a semantic of Dispose).
+ if (fDispose && (oldState & SH_State_Disposed))
+ return;
+
+ // We should never see a ref count of zero (that would imply we have
+ // unbalanced AddRef and Releases). (We might see a closed state before
+ // hitting zero though -- that can happen if SetHandleAsInvalid is
+ // used).
+ if ((oldState & SH_State_RefCount) == 0)
+ COMPlusThrow(kObjectDisposedException, IDS_EE_SAFEHANDLECLOSED);
+
+ // If we're proposing a decrement to zero and the handle is not closed
+ // and we own the handle then we need to release the handle upon a
+ // successful state update.
+ fPerformRelease = ((oldState & (SH_State_RefCount | SH_State_Closed)) == SH_RefCountOne) && m_ownsHandle;
+
+ // If so we need to check whether the handle is currently invalid by
+ // asking the SafeHandle subclass. We must do this *before*
+ // transitioning the handle to closed, however, since setting the closed
+ // state will cause IsInvalid to always return true.
+ if (fPerformRelease)
+ {
+ GCPROTECT_BEGIN(sh);
+
+ CLR_BOOL fIsInvalid = FALSE;
+
+ DECLARE_ARGHOLDER_ARRAY(args, 1);
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(sh);
+
+ PREPARE_SIMPLE_VIRTUAL_CALLSITE_USING_SLOT(s_IsInvalidHandleMethodSlot, sh);
+
+ CRITICAL_CALLSITE;
+ CALL_MANAGED_METHOD(fIsInvalid, CLR_BOOL, args);
+
+ if (fIsInvalid)
+ {
+ fPerformRelease = false;
+ }
+
+ GCPROTECT_END();
+ }
+
+ // Attempt the update to the new state, fail and retry if the initial
+ // state has been modified in the meantime. Decrement the ref count by
+ // substracting SH_RefCountOne from the state then OR in the bits for
+ // Dispose (if that's the reason for the Release) and closed (if the
+ // initial ref count was 1).
+ newState = (oldState - SH_RefCountOne) |
+ ((oldState & SH_State_RefCount) == SH_RefCountOne ? SH_State_Closed : 0) |
+ (fDispose ? SH_State_Disposed : 0);
+
+ } while (InterlockedCompareExchange((LONG*)&sh->m_state, newState, oldState) != oldState);
+
+ // If we get here we successfully decremented the ref count. Additonally we
+ // may have decremented it to zero and set the handle state as closed. In
+ // this case (providng we own the handle) we will call the ReleaseHandle
+ // method on the SafeHandle subclass.
+ if (fPerformRelease)
+ RunReleaseMethod((SafeHandle*) OBJECTREFToObject(sh));
+}
+
+void SafeHandle::Dispose()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INSTANCE_CHECK;
+ } CONTRACTL_END;
+
+ // You can't use the "this" pointer after the call to Release because
+ // Release may trigger a GC.
+ SAFEHANDLEREF sh(this);
+
+#ifdef _DEBUG
+ VALIDATEOBJECTREF(sh->m_debugStackTrace);
+#endif
+ _ASSERTE(sh->IsFullyInitialized());
+
+ GCPROTECT_BEGIN(sh);
+ sh->Release(true);
+ // Suppress finalization on this object (we may be racing here but the
+ // operation below is idempotent and a dispose should never race a
+ // finalization).
+ GCHeap::GetGCHeap()->SetFinalizationRun(OBJECTREFToObject(sh));
+ GCPROTECT_END();
+}
+
+void SafeHandle::SetHandle(LPVOID handle)
+{
+ CONTRACTL {
+ THROWS;
+ MODE_COOPERATIVE;
+ INSTANCE_CHECK;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ _ASSERTE(IsFullyInitialized());
+
+ // The SafeHandle's handle field can only be set it if the SafeHandle isn't
+ // closed or disposed and its ref count is 1.
+ if (m_state != (LONG)SH_RefCountOne)
+ COMPlusThrow(kObjectDisposedException, IDS_EE_SAFEHANDLECANNOTSETHANDLE);
+
+ m_handle = handle;
+}
+
+void AcquireSafeHandle(SAFEHANDLEREF* s)
+{
+ WRAPPER_NO_CONTRACT;
+ GCX_COOP();
+ _ASSERTE(s != NULL && *s != NULL);
+ (*s)->AddRef();
+}
+
+void ReleaseSafeHandle(SAFEHANDLEREF* s)
+{
+ WRAPPER_NO_CONTRACT;
+ GCX_COOP();
+ _ASSERTE(s != NULL && *s != NULL);
+ (*s)->Release(false);
+}
+
+
+// This could theoretically be an instance method, but we'd need to
+// somehow GC protect the this pointer or never dereference any
+// field within the object. It's a lot simpler if we simply make
+// this method static.
+void SafeHandle::RunReleaseMethod(SafeHandle* psh)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ SAFEHANDLEREF sh(psh);
+ _ASSERTE(sh != NULL);
+ _ASSERTE(sh->m_ownsHandle);
+ _ASSERTE(sh->IsFullyInitialized());
+
+ GCPROTECT_BEGIN(sh);
+
+ // Save last error from P/Invoke in case the implementation of ReleaseHandle
+ // trashes it (important because this ReleaseHandle could occur implicitly
+ // as part of unmarshaling another P/Invoke).
+ Thread *pThread = GetThread();
+ DWORD dwSavedError = pThread->m_dwLastError;
+
+ CLR_BOOL fReleaseHandle = FALSE;
+
+ DECLARE_ARGHOLDER_ARRAY(args, 1);
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(sh);
+
+ PREPARE_SIMPLE_VIRTUAL_CALLSITE_USING_SLOT(s_ReleaseHandleMethodSlot, sh);
+
+ CRITICAL_CALLSITE;
+ CALL_MANAGED_METHOD(fReleaseHandle, CLR_BOOL, args);
+
+ if (!fReleaseHandle) {
+#ifdef MDA_SUPPORTED
+ MDA_TRIGGER_ASSISTANT(ReleaseHandleFailed, ReportViolation(sh->GetTypeHandle(), sh->m_handle));
+#endif
+ }
+
+ pThread->m_dwLastError = dwSavedError;
+
+ GCPROTECT_END();
+}
+
+FCIMPL1(void, SafeHandle::DisposeNative, SafeHandle* refThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ SAFEHANDLEREF sh(refThisUNSAFE);
+ if (sh == NULL)
+ FCThrowVoid(kNullReferenceException);
+
+ HELPER_METHOD_FRAME_BEGIN_1(sh);
+ _ASSERTE(sh->IsFullyInitialized());
+ sh->Dispose();
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL1(void, SafeHandle::Finalize, SafeHandle* refThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ SAFEHANDLEREF sh(refThisUNSAFE);
+ _ASSERTE(sh != NULL);
+
+ HELPER_METHOD_FRAME_BEGIN_1(sh);
+
+ if (sh->IsFullyInitialized())
+ sh->Dispose();
+
+ // By the time we get here we better have gotten rid of any handle resources
+ // we own (unless we were force finalized during shutdown).
+
+ // It's possible to have a critical finalizer reference a
+ // safehandle that ends up calling DangerousRelease *after* this finalizer
+ // is run. In that case we assert since the state is not closed.
+// _ASSERTE(!sh->IsFullyInitialized() || (sh->m_state & SH_State_Closed) || g_fEEShutDown);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL1(void, SafeHandle::SetHandleAsInvalid, SafeHandle* refThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ SAFEHANDLEREF sh(refThisUNSAFE);
+ _ASSERTE(sh != NULL);
+
+ // Attempt to set closed state (low order bit of the m_state field).
+ // Might have to attempt these repeatedly, if the operation suffers
+ // interference from an AddRef or Release.
+ INT32 oldState, newState;
+ do {
+
+ // First step is to read the current handle state so we can predicate a
+ // state update on it.
+ oldState = sh->m_state;
+
+ // New state has the same ref count but is now closed. Attempt to write
+ // this new state but fail if the state was updated in the meantime.
+ newState = oldState | SH_State_Closed;
+
+ } while (InterlockedCompareExchange((LONG*)&sh->m_state, newState, oldState) != oldState);
+
+ GCHeap::GetGCHeap()->SetFinalizationRun(OBJECTREFToObject(sh));
+}
+FCIMPLEND
+
+FCIMPL2(void, SafeHandle::DangerousAddRef, SafeHandle* refThisUNSAFE, CLR_BOOL *pfSuccess)
+{
+ FCALL_CONTRACT;
+
+ SAFEHANDLEREF sh(refThisUNSAFE);
+
+ HELPER_METHOD_FRAME_BEGIN_1(sh);
+
+ if (pfSuccess == NULL)
+ COMPlusThrow(kNullReferenceException);
+
+ sh->AddRef();
+ *pfSuccess = TRUE;
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL1(void, SafeHandle::DangerousRelease, SafeHandle* refThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ SAFEHANDLEREF sh(refThisUNSAFE);
+
+ HELPER_METHOD_FRAME_BEGIN_1(sh);
+
+ sh->Release(FALSE);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL1(void, CriticalHandle::FireCustomerDebugProbe, CriticalHandle* refThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ CRITICALHANDLEREF ch(refThisUNSAFE);
+
+ HELPER_METHOD_FRAME_BEGIN_1(ch);
+
+#ifdef MDA_SUPPORTED
+ MDA_TRIGGER_ASSISTANT(ReleaseHandleFailed, ReportViolation(ch->GetTypeHandle(), ch->m_handle));
+#else
+ FCUnique(0x53);
+#endif
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+
+FCIMPL1(UINT, SafeBuffer::SizeOfType, ReflectClassBaseObject* typeUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ REFLECTCLASSBASEREF type(typeUNSAFE);
+
+ MethodTable* pMT = type->GetType().AsMethodTable();
+
+ if (!pMT->IsValueType() || pMT->ContainsPointers())
+ FCThrowArgument(W("type"), W("Argument_NeedStructWithNoRefs"));
+
+ FC_GC_POLL_RET();
+
+ return pMT->GetNumInstanceFieldBytes();
+}
+FCIMPLEND
+
+FCIMPL1(UINT, SafeBuffer::AlignedSizeOfType, ReflectClassBaseObject* typeUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ REFLECTCLASSBASEREF type(typeUNSAFE);
+
+ MethodTable* pMT = type->GetType().AsMethodTable();
+
+ if (!pMT->IsValueType() || pMT->ContainsPointers())
+ FCThrowArgument(W("type"), W("Argument_NeedStructWithNoRefs"));
+
+ FC_GC_POLL_RET();
+
+ return pMT->GetAlignedNumInstanceFieldBytes();
+}
+FCIMPLEND
+
+FCIMPL3(void, SafeBuffer::PtrToStructure, BYTE* ptr, TypedByRef structure, UINT32 sizeofT)
+{
+ FCALL_CONTRACT;
+
+ LPVOID structData = structure.data;
+ _ASSERTE(ptr != NULL && structData != NULL);
+ memcpyNoGCRefs(structData, ptr, sizeofT);
+ FC_GC_POLL();
+}
+FCIMPLEND
+
+FCIMPL3(void, SafeBuffer::StructureToPtr, TypedByRef structure, BYTE* ptr, UINT32 sizeofT)
+{
+ FCALL_CONTRACT;
+
+ LPVOID structData = structure.data;
+ _ASSERTE(ptr != NULL && structData != NULL);
+ memcpyNoGCRefs(ptr, structData, sizeofT);
+ FC_GC_POLL();
+}
+FCIMPLEND
diff --git a/src/vm/security.cpp b/src/vm/security.cpp
new file mode 100644
index 0000000000..f30ecd799c
--- /dev/null
+++ b/src/vm/security.cpp
@@ -0,0 +1,78 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#include "common.h"
+
+#include "security.h"
+#include "securitydescriptor.h"
+#include "securitydescriptorappdomain.h"
+#include "securitydescriptorassembly.h"
+
+IApplicationSecurityDescriptor * Security::CreateApplicationSecurityDescriptor(AppDomain * pDomain)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return static_cast<IApplicationSecurityDescriptor*>(new ApplicationSecurityDescriptor(pDomain));
+}
+
+IAssemblySecurityDescriptor* Security::CreateAssemblySecurityDescriptor(AppDomain *pDomain, DomainAssembly *pAssembly, LoaderAllocator *pLoaderAllocator)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return static_cast<IAssemblySecurityDescriptor*>(new AssemblySecurityDescriptor(pDomain, pAssembly, pLoaderAllocator));
+}
+
+ISharedSecurityDescriptor* Security::CreateSharedSecurityDescriptor(Assembly* pAssembly)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return static_cast<ISharedSecurityDescriptor*>(new SharedSecurityDescriptor(pAssembly));
+}
+
+#ifndef FEATURE_CORECLR
+IPEFileSecurityDescriptor* Security::CreatePEFileSecurityDescriptor(AppDomain* pDomain, PEFile *pPEFile)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return static_cast<IPEFileSecurityDescriptor*>(new PEFileSecurityDescriptor(pDomain, pPEFile));
+}
+#endif
+
+//---------------------------------------------------------------------------------------
+//
+// Determine if security checks should be bypassed for a method because the method is
+// being used by a profiler.
+//
+// Profilers often do things like inject unverifiable IL or P/Invoke which won't be allowed
+// if they're working with a transparent method. This hook allows those checks to be
+// suppressed if we're currently profiling.
+//
+// Arguments:
+// pMD - Method we're checking to see if security checks may be bypassed for
+//
+
+BOOL Security::BypassSecurityChecksForProfiler(MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+#if defined(PROFILING_SUPPORTED) && !defined(CROSSGEN_COMPILE)
+ return CORProfilerPresent() &&
+ CORProfilerBypassSecurityChecks() &&
+ pMD->GetAssembly()->GetSecurityDescriptor()->IsFullyTrusted();
+#else
+ return FALSE;
+#endif
+}
diff --git a/src/vm/security.h b/src/vm/security.h
new file mode 100644
index 0000000000..2b9e63c06d
--- /dev/null
+++ b/src/vm/security.h
@@ -0,0 +1,381 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+
+//
+
+
+#ifndef __security_h__
+#define __security_h__
+
+#include "securitypolicy.h"
+#include "securityattributes.h"
+#include "securitydeclarativecache.h"
+#include "securitydeclarative.h"
+#include "securityimperative.h"
+#include "securitytransparentassembly.h"
+
+#ifdef FEATURE_APTCA
+#include "aptca.h"
+#endif
+
+class IAssemblySecurityDescriptor;
+class IApplicationSecurityDescriptor;
+class IPEFileSecurityDescriptor;
+
+enum SecurityStackWalkType
+{
+ SSWT_DECLARATIVE_DEMAND = 1,
+ SSWT_IMPERATIVE_DEMAND = 2,
+ SSWT_DEMAND_FROM_NATIVE = 3,
+ SSWT_IMPERATIVE_ASSERT = 4,
+ SSWT_DENY_OR_PERMITONLY = 5,
+ SSWT_LATEBOUND_LINKDEMAND = 6,
+ SSWT_COUNT_OVERRIDES = 7,
+ SSWT_GET_ZONE_AND_URL = 8,
+};
+
+// AssemblyLoadSecurity is used to describe to the loader security information to apply to an assembly at
+// load time. This includes information such as the assembly's evidence, as well as if we should resolve
+// policy on the assembly or push a grant set to its security descriptor.
+struct AssemblyLoadSecurity
+{
+ OBJECTREF *m_pEvidence;
+ OBJECTREF *m_pAdditionalEvidence;
+ OBJECTREF *m_pGrantSet;
+ OBJECTREF *m_pRefusedSet;
+ DWORD m_dwSpecialFlags;
+ bool m_fCheckLoadFromRemoteSource;
+ bool m_fSuppressSecurityChecks;
+ bool m_fPropagatingAnonymouslyHostedDynamicMethodGrant;
+
+ inline AssemblyLoadSecurity();
+
+ // Should the assembly have policy resolved on it, or should it use a pre-determined grant set
+ inline bool ShouldResolvePolicy();
+};
+
+// Ultimately this will become the only interface through
+// which the VM will access security code.
+
+namespace Security
+{
+ // ----------------------------------------
+ // SecurityPolicy
+ // ----------------------------------------
+
+ // Init
+ inline void Start();
+ inline void Stop();
+ inline void SaveCache();
+
+ // Policy
+#ifdef FEATURE_CAS_POLICY
+ inline bool IsProcessWideLegacyCasPolicyEnabled();
+ inline bool CanLoadFromRemoteSources();
+#endif // FEATURE_CAS_POLICY
+
+ BOOL BypassSecurityChecksForProfiler(MethodDesc *pMD);
+ inline BOOL CanCallUnmanagedCode(Module *pModule);
+ inline BOOL CanAssert(Module *pModule);
+ inline DECLSPEC_NORETURN void ThrowSecurityException(__in_z const char *szDemandClass, DWORD dwFlags);
+
+#ifndef DACCESS_COMPILE
+ inline BOOL CanTailCall(MethodDesc* pMD);
+ inline BOOL CanHaveRVA(Assembly * pAssembly);
+ inline BOOL CanAccessNonVerifiableExplicitField(MethodDesc* pMD);
+ inline BOOL CanSkipVerification(MethodDesc * pMethod);
+#endif
+
+ inline BOOL CanSkipVerification(DomainAssembly * pAssembly);
+ inline CorInfoCanSkipVerificationResult JITCanSkipVerification(DomainAssembly * pAssembly);
+ inline CorInfoCanSkipVerificationResult JITCanSkipVerification(MethodDesc * pMD);
+
+ // ----------------------------------------
+ // SecurityAttributes
+ // ----------------------------------------
+
+ inline OBJECTREF CreatePermissionSet(BOOL fTrusted);
+ inline void CopyByteArrayToEncoding(IN U1ARRAYREF* pArray, OUT PBYTE* pbData, OUT DWORD* cbData);
+ inline void CopyEncodingToByteArray(IN PBYTE pbData, IN DWORD cbData, IN OBJECTREF* pArray);
+
+ // ----------------------------------------
+ // SecurityDeclarative
+ // ----------------------------------------
+ inline HRESULT GetDeclarationFlags(IMDInternalImport *pInternalImport, mdToken token, DWORD* pdwFlags, DWORD* pdwNullFlags, BOOL* fHasSuppressUnmanagedCodeAccessAttr = NULL);
+ inline void RetrieveLinktimeDemands(MethodDesc* pMD, OBJECTREF* pClassCas, OBJECTREF* pClassNonCas, OBJECTREF* pMethodCas, OBJECTREF* pMethodNonCas);
+ inline void CheckLinkDemandAgainstAppDomain(MethodDesc *pMD) ;
+
+ inline LinktimeCheckReason GetLinktimeCheckReason(MethodDesc *pMD,
+ OBJECTREF *pClassCasDemands,
+ OBJECTREF *pClassNonCasDemands,
+ OBJECTREF *pMethodCasDemands,
+ OBJECTREF *pMethodNonCasDemands);
+
+ inline void LinktimeCheckMethod(Assembly *pCaller, MethodDesc *pCallee);
+ inline void ClassInheritanceCheck(MethodTable *pClass, MethodTable *pParent);
+ inline void MethodInheritanceCheck(MethodDesc *pMethod, MethodDesc *pParent);
+ inline void GetPermissionInstance(OBJECTREF *perm, int index);
+ inline void DoDeclarativeActions(MethodDesc *pMD, DeclActionInfo *pActions, LPVOID pSecObj, MethodSecurityDescriptor *pMSD = NULL);
+#ifndef DACCESS_COMPILE
+ inline void CheckNonCasDemand(OBJECTREF *prefDemand);
+#endif // #ifndef DACCESS_COMPILE
+ inline BOOL MethodIsVisibleOutsideItsAssembly(MethodDesc * pMD);
+ inline BOOL MethodIsVisibleOutsideItsAssembly(DWORD dwMethodAttr, DWORD dwClassAttr, BOOL fIsGlobalClass);
+ inline void CheckBeforeAllocConsole(AppDomain* pDomain, Assembly* pAssembly);
+
+ // ----------------------------------------
+ // SecurityStackWalk
+ // ----------------------------------------
+
+ // other CAS Actions
+ inline void Demand(SecurityStackWalkType eType, OBJECTREF demand) ;
+#ifdef FEATURE_CAS_POLICY
+ inline void DemandGrantSet(IAssemblySecurityDescriptor *psdAssembly);
+#endif // FEATURE_CAS_POLICY
+ inline void DemandSet(SecurityStackWalkType eType, OBJECTREF demand) ;
+ inline void DemandSet(SecurityStackWalkType eType, PsetCacheEntry *pPCE, DWORD dwAction) ;
+#ifdef FEATURE_CAS_POLICY
+ inline void ReflectionTargetDemand(DWORD dwPermission, IAssemblySecurityDescriptor *psdTarget);
+ inline void ReflectionTargetDemand(DWORD dwPermission,
+ IAssemblySecurityDescriptor *psdTarget,
+ DynamicResolver * pAccessContext);
+#endif // FEATURE_CAS_POLICY
+ inline void SpecialDemand(SecurityStackWalkType eType, DWORD whatPermission) ;
+
+ inline void InheritanceLinkDemandCheck(Assembly *pTargetAssembly, MethodDesc * pMDLinkDemand);
+
+ inline void FullTrustInheritanceDemand(Assembly *pTargetAssembly);
+ inline void FullTrustLinkDemand(Assembly *pTargetAssembly);
+
+ // Compressed Stack
+#ifdef FEATURE_COMPRESSEDSTACK
+ inline COMPRESSEDSTACKREF GetCSFromContextTransitionFrame(Frame *pFrame) ;
+ inline BOOL IsContextTransitionFrameWithCS(Frame *pFrame);
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
+
+ // Misc - todo: put these in better categories
+
+ inline BOOL AllDomainsOnStackFullyTrusted();
+ IApplicationSecurityDescriptor* CreateApplicationSecurityDescriptor(AppDomain * pDomain);
+ IAssemblySecurityDescriptor* CreateAssemblySecurityDescriptor(AppDomain *pDomain, DomainAssembly *pAssembly, LoaderAllocator *pLoaderAllocator);
+ ISharedSecurityDescriptor* CreateSharedSecurityDescriptor(Assembly* pAssembly);
+#ifndef FEATURE_CORECLR
+ IPEFileSecurityDescriptor* CreatePEFileSecurityDescriptor(AppDomain* pDomain, PEFile *pPEFile);
+#endif
+ inline void SetDefaultAppDomainProperty(IApplicationSecurityDescriptor* pASD);
+ inline void SetDefaultAppDomainEvidenceProperty(IApplicationSecurityDescriptor* pASD);
+
+
+ // Checks for one of the special domain wide flags
+ // such as if we are currently in a "fully trusted" environment
+ // or if unmanaged code access is allowed at this time
+ // Note: This is an inline method instead of a virtual method on IApplicationSecurityDescriptor
+ // for stackwalk perf.
+ inline BOOL CheckDomainWideSpecialFlag(IApplicationSecurityDescriptor *pASD, DWORD flags);
+
+ inline BOOL IsResolved(Assembly *pAssembly);
+
+ FORCEINLINE VOID IncrementSecurityPerfCounter() ;
+ inline BOOL IsSpecialRunFrame(MethodDesc *pMeth) ;
+ inline BOOL SkipAndFindFunctionInfo(INT32 i, MethodDesc** ppMD, OBJECTREF** ppOR, AppDomain **ppAppDomain = NULL);
+ inline BOOL SkipAndFindFunctionInfo(StackCrawlMark* pSCM, MethodDesc** ppMD, OBJECTREF** ppOR, AppDomain **ppAppDomain = NULL);
+
+ // Transparency checks
+ inline BOOL IsMethodTransparent(MethodDesc * pMD);
+ inline BOOL IsMethodCritical(MethodDesc * pMD);
+ inline BOOL IsMethodSafeCritical(MethodDesc * pMD);
+
+ inline BOOL IsTypeCritical(MethodTable *pMT);
+ inline BOOL IsTypeSafeCritical(MethodTable *pMT);
+ inline BOOL IsTypeTransparent(MethodTable * pMT);
+ inline BOOL IsTypeAllTransparent(MethodTable * pMT);
+
+ inline BOOL IsFieldTransparent(FieldDesc * pFD);
+ inline BOOL IsFieldCritical(FieldDesc * pFD);
+ inline BOOL IsFieldSafeCritical(FieldDesc * pFD);
+
+ inline BOOL IsTokenTransparent(Module* pModule, mdToken token);
+
+ inline void DoSecurityClassAccessChecks(MethodDesc *pCallerMD,
+ const TypeHandle &calleeTH,
+ CorInfoSecurityRuntimeChecks check);
+
+ inline CorInfoIsAccessAllowedResult RequiresTransparentAssemblyChecks(MethodDesc* pCaller,
+ MethodDesc* pCallee,
+ SecurityTransparencyError *pError);
+ inline VOID EnforceTransparentAssemblyChecks(MethodDesc* pCallee, MethodDesc* pCaller);
+ inline VOID EnforceTransparentDelegateChecks(MethodTable* pDelegateMT, MethodDesc* pCaller);
+ inline VOID PerformTransparencyChecksForLoadByteArray(MethodDesc* pCallersMD, IAssemblySecurityDescriptor* pLoadedSecDesc);
+
+ inline bool TypeRequiresTransparencyCheck(TypeHandle type, bool checkForLinkDemands = false);
+
+ inline BOOL CheckCriticalAccess(AccessCheckContext* pContext,
+ MethodDesc* pOptionalTargetMethod = NULL,
+ FieldDesc* pOptionalTargetField = NULL,
+ MethodTable * pOptionalTargetType = NULL);
+
+ // declarative security
+ inline HRESULT GetDeclaredPermissions(IN IMDInternalImport *pInternalImport, IN mdToken token, IN CorDeclSecurity action, OUT OBJECTREF *pDeclaredPermissions, OUT PsetCacheEntry **pPSCacheEntry = NULL) ;
+
+ // security enforcement
+ inline BOOL ContainsBuiltinCASPermsOnly(CORSEC_ATTRSET* pAttrSet);
+
+#ifdef FEATURE_APTCA
+ inline BOOL IsUntrustedCallerCheckNeeded(MethodDesc *pCalleeMD, Assembly *pCallerAssem = NULL) ;
+ inline void DoUntrustedCallerChecks(Assembly *pCaller, MethodDesc *pCalee, BOOL fFullStackWalk) ;
+
+ inline bool NativeImageHasValidAptcaDependencies(PEImage *pNativeImage, DomainAssembly *pDomainAssembly);
+
+ inline SString GetAptcaKillBitAccessExceptionContext(Assembly *pTargetAssembly);
+ inline SString GetConditionalAptcaAccessExceptionContext(Assembly *pTargetAssembly);
+#endif // FEATURE_APTCA
+
+#ifdef FEATURE_CORECLR
+ inline BOOL IsMicrosoftPlatform(IAssemblySecurityDescriptor *pSecDesc);
+#endif // FEATURE_CORECLR
+
+ inline bool SecurityCalloutQuickCheck(MethodDesc *pCallerMD);
+
+ inline bool CanShareAssembly(DomainAssembly *pAssembly);
+};
+
+class ISecurityDescriptor
+{
+public:
+ VPTR_BASE_VTABLE_CLASS(ISecurityDescriptor)
+ virtual ~ISecurityDescriptor() { LIMITED_METHOD_CONTRACT; }
+
+ virtual BOOL IsFullyTrusted() = 0;
+
+ virtual BOOL CanCallUnmanagedCode() const = 0;
+
+#ifndef DACCESS_COMPILE
+ virtual DWORD GetSpecialFlags() const = 0;
+
+ virtual AppDomain* GetDomain() const = 0;
+
+ virtual void Resolve() = 0;
+ virtual BOOL IsResolved() const = 0;
+
+#ifdef FEATURE_CAS_POLICY
+ virtual OBJECTREF GetEvidence() = 0;
+ virtual BOOL IsEvidenceComputed() const = 0;
+ virtual void SetEvidence(OBJECTREF evidence) = 0;
+#endif // FEATURE_CAS_POLICY
+
+ virtual OBJECTREF GetGrantedPermissionSet(OBJECTREF* RefusedPermissions = NULL) = 0;
+#endif // !DACCESS_COMPILE
+};
+
+class IApplicationSecurityDescriptor : public ISecurityDescriptor
+{
+public:
+ VPTR_ABSTRACT_VTABLE_CLASS(IApplicationSecurityDescriptor, ISecurityDescriptor)
+
+#ifndef DACCESS_COMPILE
+public:
+ virtual BOOL IsHomogeneous() const = 0;
+ virtual void SetHomogeneousFlag(BOOL fRuntimeSuppliedHomogenousGrantSet) = 0;
+ virtual BOOL ContainsAnyRefusedPermissions() = 0;
+
+ virtual BOOL IsDefaultAppDomain() const = 0;
+ virtual BOOL IsDefaultAppDomainEvidence() = 0;
+ virtual BOOL DomainMayContainPartialTrustCode() = 0;
+
+ virtual BOOL CallHostSecurityManager() = 0;
+ virtual void SetHostSecurityManagerFlags(DWORD dwFlags) = 0;
+ virtual void SetPolicyLevelFlag() = 0;
+
+ virtual void FinishInitialization() = 0;
+ virtual BOOL IsInitializationInProgress() = 0;
+
+ // Determine the security state that an AppDomain will arrive in if nothing changes during domain
+ // initialization. (ie, get the input security state of the domain)
+ virtual void PreResolve(BOOL *pfIsFullyTrusted, BOOL *pfIsHomogeneous) = 0;
+
+ // Gets special domain wide flags that specify things
+ // such as whether we are currently in a "fully trusted" environment
+ // or if unmanaged code access is allowed at this time
+ virtual DWORD GetDomainWideSpecialFlag() const = 0;
+
+#ifdef FEATURE_CAS_POLICY
+ virtual void SetLegacyCasPolicyEnabled() = 0;
+ virtual BOOL IsLegacyCasPolicyEnabled() = 0;
+ virtual BOOL AllowsLoadsFromRemoteSources() = 0;
+#endif // FEATURE_CAS_POLICY
+
+#ifdef FEATURE_APTCA
+ virtual ConditionalAptcaCache *GetConditionalAptcaCache() = 0;
+ virtual void SetCanonicalConditionalAptcaList(LPCWSTR wszCanonicalConditionalAptcaList) = 0;
+#endif // FEATURE_APTCA
+#endif // !DACCESS_COMPILE
+};
+
+class IAssemblySecurityDescriptor : public ISecurityDescriptor
+{
+public:
+ VPTR_ABSTRACT_VTABLE_CLASS(IAssemblySecurityDescriptor, ISecurityDescriptor)
+
+#ifndef DACCESS_COMPILE
+ virtual SharedSecurityDescriptor *GetSharedSecDesc() = 0;
+
+ virtual BOOL CanAssert() = 0;
+ virtual BOOL HasUnrestrictedUIPermission() = 0;
+ virtual BOOL IsAllCritical() = 0;
+ virtual BOOL IsAllSafeCritical() = 0;
+ virtual BOOL IsAllPublicAreaSafeCritical() = 0;
+ virtual BOOL IsAllTransparent() = 0;
+ virtual BOOL IsSystem() = 0;
+ virtual BOOL AllowSkipVerificationInFullTrust() = 0;
+
+ virtual void ResolvePolicy(ISharedSecurityDescriptor *pSharedDesc, BOOL fShouldSkipPolicyResolution) = 0;
+
+#ifdef FEATURE_CAS_POLICY
+ virtual HRESULT LoadSignature( COR_TRUST **ppSignature = NULL) = 0;
+
+ virtual void SetRequestedPermissionSet(OBJECTREF RequiredPermissionSet, OBJECTREF OptionalPermissionSet, OBJECTREF DeniedPermissionSet) = 0;
+
+ virtual void SetAdditionalEvidence(OBJECTREF evidence) = 0;
+ virtual BOOL HasAdditionalEvidence() = 0;
+ virtual OBJECTREF GetAdditionalEvidence() = 0;
+ virtual void SetEvidenceFromPEFile(IPEFileSecurityDescriptor *pPEFileSecDesc) = 0;
+#endif // FEATURE_CAS_POLICY
+
+ virtual void PropagatePermissionSet(OBJECTREF GrantedPermissionSet, OBJECTREF DeniedPermissionSet, DWORD dwSpecialFlags) = 0;
+
+#ifndef FEATURE_CORECLR
+ virtual BOOL AllowApplicationSpecifiedAppDomainManager() = 0;
+#endif
+
+ // Check to make sure that security will allow this assembly to load. Throw an exception if the
+ // assembly should be forbidden from loading for security related purposes
+ virtual void CheckAllowAssemblyLoad() = 0;
+#endif // #ifndef DACCESS_COMPILE
+};
+
+class ISharedSecurityDescriptor
+{
+public:
+ virtual void Resolve(IAssemblySecurityDescriptor *pSecDesc = NULL) = 0;
+ virtual BOOL IsResolved() const = 0;
+ virtual BOOL IsSystem() = 0;
+ virtual Assembly* GetAssembly() = 0;
+};
+
+#ifndef FEATURE_CORECLR
+class IPEFileSecurityDescriptor : public ISecurityDescriptor
+{
+public:
+ virtual BOOL AllowBindingRedirects() = 0;
+};
+#endif
+
+#include "security.inl"
+#include "securitydeclarative.inl"
+#include "securityattributes.inl"
+
+#endif
diff --git a/src/vm/security.inl b/src/vm/security.inl
new file mode 100644
index 0000000000..ad0ea1c57c
--- /dev/null
+++ b/src/vm/security.inl
@@ -0,0 +1,804 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+
+//
+
+#ifndef _INL_SECURITY_
+#define _INL_SECURITY_
+
+#include "securitydescriptorassembly.h"
+#include "securitydescriptorappdomain.h"
+#include "securitystackwalk.h"
+
+// Init
+inline void Security::Start()
+{
+ WRAPPER_NO_CONTRACT;
+ SecurityPolicy::Start();
+}
+
+inline void Security::Stop()
+{
+ WRAPPER_NO_CONTRACT;
+ SecurityPolicy::Stop();
+}
+#ifdef FEATURE_CAS_POLICY
+inline void Security::SaveCache()
+{
+ WRAPPER_NO_CONTRACT;
+ SecurityPolicy::SaveCache();
+}
+#endif
+// ----------------------------------------
+// SecurityPolicy
+// ----------------------------------------
+
+#ifdef FEATURE_CAS_POLICY
+
+//---------------------------------------------------------------------------------------
+//
+// Determine if the entire process is running with CAS policy enabled for legacy
+// compatibility. If this value is false, the CLR does not apply any security policy.
+// Instead, it defers to a host if one is present or grants assemblies full trust.
+//
+
+inline bool Security::IsProcessWideLegacyCasPolicyEnabled()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // APPX precludes the use of legacy CAS policy
+ if (AppX::IsAppXProcess())
+ {
+ return false;
+ }
+
+ return CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_Security_LegacyCasPolicy) ||
+ CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_Security_NetFx40LegacySecurityPolicy);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// In pre-v4 versions of the CLR, doing a LoadFrom for a file in a remote location would
+// implicitly sandbox that assembly. If CAS policy is disabled, then these applications
+// will suddenly be granting full trust to assemblies they expected to be sandboxed. In
+// order to prevent this, these LoadFroms are disabled unless the application has explcitly
+// configured itself to allow them.
+//
+// This method returns the a value that indicates if the application has indicated that it
+// is safe to LoadFrom remote locations and that the CLR should not block these loads.
+//
+
+inline bool Security::CanLoadFromRemoteSources()
+{
+ WRAPPER_NO_CONTRACT;
+ return !!CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_Security_LoadFromRemoteSources);
+}
+
+#endif // FEATURE_CAS_POLICY
+
+inline BOOL Security::CanCallUnmanagedCode(Module *pModule)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityPolicy::CanCallUnmanagedCode(pModule);
+}
+
+#ifndef DACCESS_COMPILE
+inline BOOL Security::CanAssert(Module *pModule)
+{
+ WRAPPER_NO_CONTRACT;
+ SharedSecurityDescriptor *pSharedSecDesc = static_cast<SharedSecurityDescriptor*>(pModule->GetAssembly()->GetSharedSecurityDescriptor());
+ if (pSharedSecDesc)
+ return pSharedSecDesc->CanAssert();
+
+ AssemblySecurityDescriptor *pSec = static_cast<AssemblySecurityDescriptor*>(pModule->GetSecurityDescriptor());
+ _ASSERTE(pSec);
+ return pSec->CanAssert();
+}
+
+inline DECLSPEC_NORETURN void Security::ThrowSecurityException(__in_z const char *szDemandClass, DWORD dwFlags)
+{
+ WRAPPER_NO_CONTRACT;
+ SecurityPolicy::ThrowSecurityException(szDemandClass, dwFlags);
+}
+
+inline BOOL Security::CanTailCall(MethodDesc* pMD)
+{
+ WRAPPER_NO_CONTRACT;
+ return Security::CanSkipVerification(pMD);
+}
+
+inline BOOL Security::CanAccessNonVerifiableExplicitField(MethodDesc* pMD)
+{
+ WRAPPER_NO_CONTRACT
+ // just check if the method can have unverifiable code
+ return Security::CanSkipVerification(pMD);
+}
+#endif
+
+// ----------------------------------------
+// SecurityAttributes
+// ----------------------------------------
+
+inline OBJECTREF Security::CreatePermissionSet(BOOL fTrusted)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityAttributes::CreatePermissionSet(fTrusted);
+}
+
+inline void Security::CopyByteArrayToEncoding(IN U1ARRAYREF* pArray, OUT PBYTE* pbData, OUT DWORD* cbData)
+{
+ WRAPPER_NO_CONTRACT;
+ SecurityAttributes::CopyByteArrayToEncoding(pArray, pbData, cbData);
+}
+
+inline void Security::CopyEncodingToByteArray(IN PBYTE pbData, IN DWORD cbData, IN OBJECTREF* pArray)
+{
+ WRAPPER_NO_CONTRACT;
+ SecurityAttributes::CopyEncodingToByteArray(pbData, cbData, pArray);
+}
+
+// ----------------------------------------
+// SecurityDeclarative
+// ----------------------------------------
+
+inline HRESULT Security::GetDeclarationFlags(IMDInternalImport *pInternalImport, mdToken token, DWORD* pdwFlags, DWORD* pdwNullFlags, BOOL* fHasSuppressUnmanagedCodeAccessAttr)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityDeclarative::GetDeclarationFlags(pInternalImport, token, pdwFlags, pdwNullFlags, fHasSuppressUnmanagedCodeAccessAttr);
+}
+
+inline void Security::RetrieveLinktimeDemands(MethodDesc* pMD, OBJECTREF* pClassCas, OBJECTREF* pClassNonCas, OBJECTREF* pMethodCas, OBJECTREF* pMethodNonCas)
+{
+ WRAPPER_NO_CONTRACT;
+ SecurityDeclarative::RetrieveLinktimeDemands(pMD, pClassCas, pClassNonCas, pMethodCas, pMethodNonCas);
+}
+
+inline LinktimeCheckReason Security::GetLinktimeCheckReason(MethodDesc *pMD,
+ OBJECTREF *pClassCasDemands,
+ OBJECTREF *pClassNonCasDemands,
+ OBJECTREF *pMethodCasDemands,
+ OBJECTREF *pMethodNonCasDemands)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityDeclarative::GetLinktimeCheckReason(pMD,
+ pClassCasDemands,
+ pClassNonCasDemands,
+ pMethodCasDemands,
+ pMethodNonCasDemands);
+}
+
+inline void Security::CheckLinkDemandAgainstAppDomain(MethodDesc *pMD)
+{
+ WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_CAS_POLICY
+ SecurityDeclarative::CheckLinkDemandAgainstAppDomain(pMD);
+#endif
+}
+
+inline void Security::LinktimeCheckMethod(Assembly *pCaller, MethodDesc *pCallee)
+{
+ WRAPPER_NO_CONTRACT;
+ SecurityDeclarative::LinktimeCheckMethod(pCaller, pCallee);
+}
+
+inline void Security::ClassInheritanceCheck(MethodTable *pClass, MethodTable *pParent)
+{
+ WRAPPER_NO_CONTRACT;
+ SecurityDeclarative::ClassInheritanceCheck(pClass, pParent);
+}
+
+inline void Security::MethodInheritanceCheck(MethodDesc *pMethod, MethodDesc *pParent)
+{
+ WRAPPER_NO_CONTRACT;
+ SecurityDeclarative::MethodInheritanceCheck(pMethod, pParent);
+}
+
+inline void Security::GetPermissionInstance(OBJECTREF *perm, int index)
+{
+ WRAPPER_NO_CONTRACT;
+ SecurityDeclarative::GetPermissionInstance(perm, index);
+}
+
+inline void Security::DoDeclarativeActions(MethodDesc *pMD, DeclActionInfo *pActions, LPVOID pSecObj, MethodSecurityDescriptor *pMSD)
+{
+ WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_CAS_POLICY
+ SecurityDeclarative::DoDeclarativeActions(pMD, pActions, pSecObj, pMSD);
+#endif
+}
+
+#ifndef DACCESS_COMPILE
+inline void Security::CheckNonCasDemand(OBJECTREF *prefDemand)
+{
+ WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_CAS_POLICY
+ SecurityDeclarative::CheckNonCasDemand(prefDemand);
+#endif
+}
+#endif // #ifndef DACCESS_COMPILE
+
+inline BOOL Security::MethodIsVisibleOutsideItsAssembly(MethodDesc * pMD)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityDeclarative::MethodIsVisibleOutsideItsAssembly(pMD);
+}
+
+inline BOOL Security::MethodIsVisibleOutsideItsAssembly(DWORD dwMethodAttr, DWORD dwClassAttr, BOOL fIsGlobalClass)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityDeclarative::MethodIsVisibleOutsideItsAssembly(dwMethodAttr, dwClassAttr, fIsGlobalClass);
+}
+
+inline void Security::CheckBeforeAllocConsole(AppDomain* pDomain, Assembly* pAssembly)
+{
+ WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_CAS_POLICY
+ SecurityRuntime::CheckBeforeAllocConsole(pDomain, pAssembly);
+#endif
+}
+
+// ----------------------------------------
+// SecurityStackWalk
+// ----------------------------------------
+
+// other CAS Actions
+inline void Security::Demand(SecurityStackWalkType eType, OBJECTREF demand)
+{
+ WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_CAS_POLICY
+ SecurityStackWalk::Demand(eType, demand);
+#endif
+}
+
+#ifdef FEATURE_CAS_POLICY
+inline void Security::DemandGrantSet(IAssemblySecurityDescriptor *psdAssembly)
+{
+ WRAPPER_NO_CONTRACT;
+ SecurityStackWalk::DemandGrantSet(static_cast<AssemblySecurityDescriptor*>(psdAssembly));
+}
+#endif // FEATURE_CAS_POLICY
+
+inline void Security::DemandSet(SecurityStackWalkType eType, OBJECTREF demand)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+#ifdef FEATURE_CAS_POLICY
+ SecurityStackWalk::DemandSet(eType, demand);
+#endif
+}
+
+inline void Security::DemandSet(SecurityStackWalkType eType, PsetCacheEntry *pPCE, DWORD dwAction)
+{
+ WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_CAS_POLICY
+ SecurityStackWalk::DemandSet(eType, pPCE, dwAction);
+#endif
+}
+
+#ifdef FEATURE_CAS_POLICY
+inline void Security::ReflectionTargetDemand(DWORD dwPermission, IAssemblySecurityDescriptor *psdTarget)
+{
+ WRAPPER_NO_CONTRACT;
+ SecurityStackWalk::ReflectionTargetDemand(dwPermission, static_cast<AssemblySecurityDescriptor*>(psdTarget));
+}
+
+inline void Security::ReflectionTargetDemand(DWORD dwPermission,
+ IAssemblySecurityDescriptor *psdTarget,
+ DynamicResolver * pAccessContext)
+{
+ WRAPPER_NO_CONTRACT;
+ SecurityStackWalk::ReflectionTargetDemand(dwPermission, static_cast<AssemblySecurityDescriptor*>(psdTarget), pAccessContext);
+}
+#endif // FEATURE_CAS_POLICY
+
+inline void Security::SpecialDemand(SecurityStackWalkType eType, DWORD whatPermission)
+{
+ WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_CAS_POLICY
+ SecurityStackWalk::SpecialDemand(eType, whatPermission);
+#endif
+}
+
+inline void Security::InheritanceLinkDemandCheck(Assembly *pTargetAssembly, MethodDesc * pMDLinkDemand)
+{
+ WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_CAS_POLICY
+ SecurityDeclarative::InheritanceLinkDemandCheck(pTargetAssembly, pMDLinkDemand);
+#endif
+}
+
+inline void Security::FullTrustInheritanceDemand(Assembly *pTargetAssembly)
+{
+ WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_CAS_POLICY
+ SecurityDeclarative::FullTrustInheritanceDemand(pTargetAssembly);
+#endif
+}
+
+inline void Security::FullTrustLinkDemand(Assembly *pTargetAssembly)
+{
+ WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_CAS_POLICY
+ SecurityDeclarative::FullTrustLinkDemand(pTargetAssembly);
+#endif
+}
+
+#ifdef FEATURE_COMPRESSEDSTACK
+// Compressed Stack
+
+inline COMPRESSEDSTACKREF Security::GetCSFromContextTransitionFrame(Frame *pFrame)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityStackWalk::GetCSFromContextTransitionFrame(pFrame);
+}
+
+inline BOOL Security::IsContextTransitionFrameWithCS(Frame *pFrame)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityStackWalk::IsContextTransitionFrameWithCS(pFrame);
+}
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
+// Misc - todo: put these in better categories
+
+FORCEINLINE VOID Security::IncrementSecurityPerfCounter()
+{
+ WRAPPER_NO_CONTRACT;
+ SecurityStackWalk::IncrementSecurityPerfCounter();
+}
+
+inline BOOL Security::IsSpecialRunFrame(MethodDesc *pMeth)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityStackWalk::IsSpecialRunFrame(pMeth);
+}
+
+inline BOOL Security::SkipAndFindFunctionInfo(INT32 i, MethodDesc** ppMD, OBJECTREF** ppOR, AppDomain **ppAppDomain )
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityStackWalk::SkipAndFindFunctionInfo(i, ppMD, ppOR, ppAppDomain);
+}
+
+inline BOOL Security::SkipAndFindFunctionInfo(StackCrawlMark* pSCM, MethodDesc** ppMD, OBJECTREF** ppOR, AppDomain **ppAppDomain )
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityStackWalk::SkipAndFindFunctionInfo(pSCM, ppMD, ppOR, ppAppDomain);
+}
+
+#ifndef DACCESS_COMPILE
+inline BOOL Security::AllDomainsOnStackFullyTrusted()
+{
+ WRAPPER_NO_CONTRACT;
+ return (SecurityStackWalk::HasFlagsOrFullyTrusted(0));
+}
+
+inline void Security::SetDefaultAppDomainProperty(IApplicationSecurityDescriptor* pASD)
+ {WRAPPER_NO_CONTRACT; static_cast<ApplicationSecurityDescriptor*>(pASD)->SetDefaultAppDomain();}
+
+inline void Security::SetDefaultAppDomainEvidenceProperty(IApplicationSecurityDescriptor* pASD)
+ {WRAPPER_NO_CONTRACT; static_cast<ApplicationSecurityDescriptor*>(pASD)->SetDefaultAppDomainEvidence();}
+
+inline BOOL Security::CheckDomainWideSpecialFlag(IApplicationSecurityDescriptor *pASD, DWORD flags)
+{
+ WRAPPER_NO_CONTRACT;
+ return static_cast<ApplicationSecurityDescriptor*>(pASD)->CheckDomainWideSpecialFlag(flags);
+}
+
+inline BOOL Security::IsResolved(Assembly *pAssembly)
+{
+ WRAPPER_NO_CONTRACT;
+
+ ISharedSecurityDescriptor *pSSD = pAssembly->GetSharedSecurityDescriptor();
+ if (pSSD != NULL)
+ {
+ return pSSD->IsResolved();
+ }
+ else
+ {
+ IAssemblySecurityDescriptor *pSD = pAssembly->GetSecurityDescriptor();
+ return pSD->IsResolved();
+ }
+}
+#endif //! DACCESS_COMPILE
+
+inline BOOL Security::IsMethodTransparent(MethodDesc * pMD)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityTransparent::IsMethodTransparent(pMD);
+}
+
+inline BOOL Security::IsMethodCritical(MethodDesc * pMD)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityTransparent::IsMethodCritical(pMD);
+}
+
+inline BOOL Security::IsMethodSafeCritical(MethodDesc * pMD)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityTransparent::IsMethodSafeCritical(pMD);
+}
+
+inline BOOL Security::IsTypeCritical(MethodTable *pMT)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityTransparent::IsTypeCritical(pMT);
+}
+
+inline BOOL Security::IsTypeSafeCritical(MethodTable *pMT)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityTransparent::IsTypeSafeCritical(pMT);
+}
+
+inline BOOL Security::IsTypeTransparent(MethodTable * pMT)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityTransparent::IsTypeTransparent(pMT);
+}
+
+inline BOOL Security::IsTypeAllTransparent(MethodTable * pMT)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityTransparent::IsTypeAllTransparent(pMT);
+}
+
+inline BOOL Security::IsFieldTransparent(FieldDesc * pFD)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityTransparent::IsFieldTransparent(pFD);
+}
+
+inline BOOL Security::IsFieldCritical(FieldDesc * pFD)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityTransparent::IsFieldCritical(pFD);
+}
+
+inline BOOL Security::IsFieldSafeCritical(FieldDesc * pFD)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityTransparent::IsFieldSafeCritical(pFD);
+}
+
+inline BOOL Security::IsTokenTransparent(Module* pModule, mdToken token)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityTransparent::IsTokenTransparent(pModule, token);
+}
+
+inline void Security::DoSecurityClassAccessChecks(MethodDesc *pCallerMD,
+ const TypeHandle &calleeTH,
+ CorInfoSecurityRuntimeChecks checks)
+{
+ WRAPPER_NO_CONTRACT;
+ SecurityTransparent::DoSecurityClassAccessChecks(pCallerMD, calleeTH, checks);
+}
+
+// Transparency checks
+inline CorInfoIsAccessAllowedResult Security::RequiresTransparentAssemblyChecks(MethodDesc* pCaller,
+ MethodDesc* pCallee,
+ SecurityTransparencyError *pError)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityTransparent::RequiresTransparentAssemblyChecks(pCaller, pCallee, pError);
+}
+
+inline VOID Security::EnforceTransparentDelegateChecks(MethodTable* pDelegateMT, MethodDesc* pCaller)
+{
+ WRAPPER_NO_CONTRACT;
+ SecurityTransparent::EnforceTransparentDelegateChecks(pDelegateMT, pCaller);
+}
+
+inline VOID Security::EnforceTransparentAssemblyChecks( MethodDesc* pCallee, MethodDesc* pCaller)
+{
+ WRAPPER_NO_CONTRACT;
+ SecurityTransparent::EnforceTransparentAssemblyChecks( pCallee, pCaller);
+}
+
+inline VOID Security::PerformTransparencyChecksForLoadByteArray(MethodDesc* pCallersMD, IAssemblySecurityDescriptor* pLoadedSecDesc)
+{
+ WRAPPER_NO_CONTRACT;
+ SecurityTransparent::PerformTransparencyChecksForLoadByteArray(pCallersMD, static_cast<AssemblySecurityDescriptor*>(pLoadedSecDesc));
+}
+
+inline bool Security::TypeRequiresTransparencyCheck(TypeHandle type, bool checkForLinkDemands /*= false*/)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityTransparent::TypeRequiresTransparencyCheck(type, checkForLinkDemands);
+}
+
+inline BOOL Security::CheckCriticalAccess(AccessCheckContext* pContext,
+ MethodDesc* pOptionalTargetMethod,
+ FieldDesc* pOptionalTargetField,
+ MethodTable * pOptionalTargetType)
+{
+ return SecurityTransparent::CheckCriticalAccess(pContext,
+ pOptionalTargetMethod,
+ pOptionalTargetField,
+ pOptionalTargetType);
+}
+
+#ifndef DACCESS_COMPILE
+inline BOOL Security::CanHaveRVA(Assembly * pAssembly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ return Security::CanSkipVerification(pAssembly->GetDomainAssembly());
+}
+
+inline BOOL Security::CanSkipVerification(MethodDesc * pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Special case the System.Object..ctor:
+ // System.Object..ctor is not verifiable according to current verifier rules (that require to call the base
+ // class ctor). But since we want System.Object..ctor() to be marked transparent, it cannot be unverifiable
+ // (v4 security rules prohibit transparent code from being unverifiable)
+
+#ifndef DACCESS_COMPILE
+ if (g_pObjectCtorMD == pMD)
+ return TRUE;
+#endif
+
+ // In AppX, all dynamic code (dynamic assemblies and dynamic methods) should be verified..
+ if (AppX::IsAppXProcess() && !AppX::IsAppXDesignMode())
+ {
+ if (pMD->IsLCGMethod() || pMD->GetAssembly()->IsDynamic())
+ return FALSE;
+ }
+
+ BOOL fCanSkipVerification = Security::CanSkipVerification(pMD->GetAssembly()->GetDomainAssembly());
+ if (fCanSkipVerification)
+ {
+#ifdef FEATURE_CORECLR
+ //For Profile assemblies, do not verify any code. All Transparent methods are guaranteed to be
+ //verifiable (verified by tests). Therefore, skip all verification on platform assemblies.
+ if(pMD->GetAssembly()->GetDomainAssembly()->GetFile()->IsProfileAssembly())
+ return TRUE;
+#endif
+ // check for transparency
+ if (SecurityTransparent::IsMethodTransparent(pMD))
+ {
+#ifndef FEATURE_CORECLR
+ ModuleSecurityDescriptor *pModuleSecDesc = ModuleSecurityDescriptor::GetModuleSecurityDescriptor(pMD->GetAssembly());
+ if (!pModuleSecDesc->CanTransparentCodeSkipVerification())
+#endif // !FEATURE_CORECLR
+ {
+ return FALSE;
+ }
+ }
+ }
+#if defined(_DEBUG) && defined(FEATURE_CORECLR)
+ else
+ {
+ //Profile assemblies must have skip verification.
+ _ASSERTE(!pMD->GetAssembly()->GetDomainAssembly()->GetFile()->IsProfileAssembly());
+ }
+#endif //_DEBUG && FEATURE_CORECLR
+ return fCanSkipVerification;
+}
+#endif //!DACCESS_COMPILE
+
+
+inline BOOL Security::CanSkipVerification(DomainAssembly * pAssembly)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityPolicy::CanSkipVerification(pAssembly);
+}
+
+inline CorInfoCanSkipVerificationResult Security::JITCanSkipVerification(DomainAssembly * pAssembly)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityTransparent::JITCanSkipVerification(pAssembly);
+}
+
+inline CorInfoCanSkipVerificationResult Security::JITCanSkipVerification(MethodDesc * pMD)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityTransparent::JITCanSkipVerification(pMD);
+}
+
+inline BOOL Security::ContainsBuiltinCASPermsOnly(CORSEC_ATTRSET* pAttrSet)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityAttributes::ContainsBuiltinCASPermsOnly(pAttrSet);
+}
+
+#ifdef FEATURE_APTCA
+inline BOOL Security::IsUntrustedCallerCheckNeeded(MethodDesc *pCalleeMD, Assembly *pCallerAssem)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityDeclarative::IsUntrustedCallerCheckNeeded(pCalleeMD, pCallerAssem);
+}
+
+inline void Security::DoUntrustedCallerChecks(Assembly *pCaller, MethodDesc *pCalee, BOOL fFullStackWalk)
+{
+ WRAPPER_NO_CONTRACT;
+ SecurityDeclarative::DoUntrustedCallerChecks(pCaller, pCalee, fFullStackWalk);
+}
+
+inline bool Security::NativeImageHasValidAptcaDependencies(PEImage *pNativeImage, DomainAssembly *pDomainAssembly)
+{
+ WRAPPER_NO_CONTRACT;
+ return ::NativeImageHasValidAptcaDependencies(pNativeImage, pDomainAssembly);
+}
+
+inline SString Security::GetAptcaKillBitAccessExceptionContext(Assembly *pTargetAssembly)
+{
+ WRAPPER_NO_CONTRACT;
+ return ::GetAptcaKillBitAccessExceptionContext(pTargetAssembly);
+}
+
+inline SString Security::GetConditionalAptcaAccessExceptionContext(Assembly *pTargetAssembly)
+{
+ WRAPPER_NO_CONTRACT;
+ return ::GetConditionalAptcaAccessExceptionContext(pTargetAssembly);
+}
+
+#endif // FEATURE_APTCA
+
+#ifdef FEATURE_CORECLR
+#ifndef DACCESS_COMPILE
+
+inline BOOL Security::IsMicrosoftPlatform(IAssemblySecurityDescriptor *pSecDesc)
+{
+ WRAPPER_NO_CONTRACT;
+ return static_cast<AssemblySecurityDescriptor*>(pSecDesc)->IsMicrosoftPlatform();
+}
+#endif // DACCESS_COMPILE
+#endif // FEATURE_CORECLR
+
+
+inline bool Security::SecurityCalloutQuickCheck(MethodDesc *pCallerMD)
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityTransparent::SecurityCalloutQuickCheck(pCallerMD);
+}
+
+inline bool Security::CanShareAssembly(DomainAssembly *pAssembly)
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef FEATURE_APTCA
+ if (!DomainCanShareAptcaAssembly(pAssembly))
+ {
+ return false;
+ }
+#endif // FEATURE_APTCA
+
+ return true;
+}
+
+inline HRESULT Security::GetDeclaredPermissions(IN IMDInternalImport *pInternalImport, IN mdToken token, IN CorDeclSecurity action, OUT OBJECTREF *pDeclaredPermissions, OUT PsetCacheEntry **pPSCacheEntry )
+{
+ WRAPPER_NO_CONTRACT;
+ return SecurityAttributes::GetDeclaredPermissions(pInternalImport, token, action, pDeclaredPermissions, pPSCacheEntry);
+}
+
+#ifndef DACCESS_COMPILE
+ // Returns true if everyone is fully trusted or has the indicated flags
+FORCEINLINE BOOL SecurityStackWalk::HasFlagsOrFullyTrustedIgnoreMode (DWORD flags) {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_CAS_POLICY
+ return TRUE;
+#else
+ // either the desired flag (often 0) or fully trusted will do
+ flags |= (1<<SECURITY_FULL_TRUST);
+
+ // in order for us to use the threadwide state it has to be the case that there have been no
+ // overrides since the evaluation (e.g. no denies) We keep the state up-to-date by updating
+ // it whenever a new AppDomainStackEntry is pushed on the AppDomainStack attached to the thread.
+ // When we evaluate the demand, we always intersect the current thread state with the AppDomain
+ // wide flags, which are updated anytime a new Assembly is loaded into that domain.
+ //
+ // note if the flag is clear we still might be able to satisfy the demand if we do the full
+ // stackwalk.
+ //
+ // this code is very perf sensitive, do not make changes here without running
+ // a lot of interop and declarative security benchmarks
+ //
+ // it's important that we be able to do these checks without having to touch objects
+ // other than the thread itself -- that's where a big part of the speed comes from
+ // L1 cache misses are at a premium on this code path -- never mind L2...
+ // main memory is right out :)
+
+ Thread* pThread = GetThread();
+ return ((pThread->GetOverridesCount() == 0) &&
+ pThread->CheckThreadWideSpecialFlag(flags) &&
+ static_cast<ApplicationSecurityDescriptor*>(pThread->GetDomain()->GetSecurityDescriptor())->CheckDomainWideSpecialFlag(flags));
+#endif
+}
+
+// Returns true if everyone is fully trusted or has the indicated flags AND we're not in legacy CAS mode
+FORCEINLINE BOOL SecurityStackWalk::HasFlagsOrFullyTrusted (DWORD flags) {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+ return (HasFlagsOrFullyTrustedIgnoreMode(flags));
+
+}
+
+FORCEINLINE BOOL SecurityStackWalk::QuickCheckForAllDemands(DWORD flags)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ return (SecurityStackWalk::HasFlagsOrFullyTrusted(flags));
+}
+
+inline void StoreObjectInLazyHandle(LOADERHANDLE& handle, OBJECTREF ref, LoaderAllocator* la)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (handle == NULL)
+ {
+ // Storing NULL doesn't require us to allocate a handle
+ if (ref != NULL)
+ {
+ GCPROTECT_BEGIN(ref);
+ // Atomically create a handle and store it
+ LOADERHANDLE tmpHandle = la->AllocateHandle(NULL);
+ if (FastInterlockCompareExchangePointer(&handle, tmpHandle, static_cast<LOADERHANDLE>(NULL)) != NULL)
+ {
+ // Another thread snuck in and created the handle - this should be unusual and acceptable to leak here. (Only leaks till end of AppDomain or Assembly lifetime)
+ }
+ else
+ {
+ la->SetHandleValue(handle, ref);
+ }
+ GCPROTECT_END();
+ }
+ }
+ else
+ {
+ la->SetHandleValue(handle, ref);
+ }
+}
+#endif // #ifndef DACCESS_COMPILE
+
+
+#endif
+
diff --git a/src/vm/securityattributes.cpp b/src/vm/securityattributes.cpp
new file mode 100644
index 0000000000..8ca316c5b5
--- /dev/null
+++ b/src/vm/securityattributes.cpp
@@ -0,0 +1,2765 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#include "common.h"
+
+#include "security.h"
+#include "field.h"
+#include "comcallablewrapper.h"
+#include "typeparse.h"
+#include "appdomain.inl"
+#include "mdaassistants.h"
+#include "fstring.h"
+
+
+HRESULT BlobToAttributeSet(BYTE* pBuffer, ULONG cbBuffer, CORSEC_ATTRSET* pAttrSet, DWORD dwAction);
+
+#ifndef CROSSGEN_COMPILE
+
+OBJECTREF SecurityAttributes::CreatePermissionSet(BOOL fTrusted)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ OBJECTREF pPermSet = NULL;
+ GCPROTECT_BEGIN(pPermSet);
+
+ MethodTable* pMT = MscorlibBinder::GetClass(CLASS__PERMISSION_SET);
+ pPermSet = (OBJECTREF) AllocateObject(pMT);
+
+ ARG_SLOT fStatus = (fTrusted) ? 1 : 0;
+
+ MethodDescCallSite ctor(METHOD__PERMISSION_SET__CTOR);
+
+ ARG_SLOT arg[2] = {
+ ObjToArgSlot(pPermSet),
+ BoolToArgSlot(fStatus)
+ };
+ ctor.Call(arg);
+
+ GCPROTECT_END();
+
+ return pPermSet;
+}
+
+#ifdef FEATURE_CAS_POLICY
+
+// todo: remove the non-cas parameters (because they're bogus now anyway)
+void SecurityAttributes::XmlToPermissionSet(PBYTE pbXmlBlob,
+ DWORD cbXmlBlob,
+ OBJECTREF* pPermSet,
+ OBJECTREF* pEncoding,
+ PBYTE pbNonCasXmlBlob,
+ DWORD cbNonCasXmlBlob,
+ OBJECTREF* pNonCasPermSet,
+ OBJECTREF* pNonCasEncoding)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(IsProtectedByGCFrame (pPermSet));
+ PRECONDITION(IsProtectedByGCFrame (pEncoding));
+ PRECONDITION(IsProtectedByGCFrame (pNonCasPermSet));
+ PRECONDITION(IsProtectedByGCFrame (pNonCasEncoding));
+ } CONTRACTL_END;
+
+ // Get Host Protection Flags
+ EApiCategories eProtectedCategories = GetHostProtectionManager()->GetProtectedCategories();
+
+ MethodDescCallSite decodeXML(METHOD__PERMISSION_SET__DECODE_XML, pPermSet); // can trigger GC
+
+ // Deserialize the CAS PermissionSet
+ if(pbXmlBlob && cbXmlBlob > 0)
+ {
+ _ASSERTE(*pbXmlBlob != LAZY_DECL_SEC_FLAG);
+
+ // Create a new (empty) permission set.
+ *pPermSet = SecurityAttributes::CreatePermissionSet(FALSE);
+
+ // Buffer in managed space.
+ SecurityAttributes::CopyEncodingToByteArray(pbXmlBlob, cbXmlBlob, pEncoding);
+
+ ARG_SLOT args[] = {
+ ObjToArgSlot(*pPermSet),
+ ObjToArgSlot(*pEncoding),
+ (ARG_SLOT)eProtectedCategories,
+ (ARG_SLOT)0,
+ };
+
+ // Deserialize into a managed object.
+ BOOL success = FALSE;
+ EX_TRY
+ {
+ // Elevate thread's allowed loading level. This can cause load failures if assemblies loaded from this point on require
+ // any assemblies currently being loaded.
+ OVERRIDE_LOAD_LEVEL_LIMIT(FILE_ACTIVE);
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+ success = decodeXML.Call_RetBool(args);
+ }
+ EX_SWALLOW_NONTERMINAL
+
+ if (!success)
+ COMPlusThrow(kSecurityException, IDS_ENCODEDPERMSET_DECODEFAILURE);
+ }
+
+ // Deserialize the non-CAS PermissionSet
+ if(pbNonCasXmlBlob && cbNonCasXmlBlob > 0)
+ {
+ _ASSERTE(*pbNonCasXmlBlob != LAZY_DECL_SEC_FLAG);
+
+ // Create a new (empty) permission set.
+ *pNonCasPermSet = SecurityAttributes::CreatePermissionSet(FALSE);
+
+ // Buffer in managed space.
+ SecurityAttributes::CopyEncodingToByteArray(pbNonCasXmlBlob, cbNonCasXmlBlob, pNonCasEncoding);
+
+ ARG_SLOT args[] = {
+ ObjToArgSlot(*pNonCasPermSet),
+ ObjToArgSlot(*pNonCasEncoding),
+ (ARG_SLOT)eProtectedCategories,
+ (ARG_SLOT)0,
+ };
+
+ // Deserialize into a managed object.
+ BOOL success = FALSE;
+ EX_TRY
+ {
+ // Elevate thread's allowed loading level. This can cause load failures if assemblies loaded from this point on require
+ // any assemblies currently being loaded.
+ OVERRIDE_LOAD_LEVEL_LIMIT(FILE_ACTIVE);
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+ success = decodeXML.Call_RetBool(args);
+ }
+ EX_SWALLOW_NONTERMINAL
+
+ if (!success)
+ COMPlusThrow(kSecurityException, IDS_ENCODEDPERMSET_DECODEFAILURE);
+ }
+}
+
+#endif // FEATURE_CAS_POLICY
+
+//
+// Determine if a security action allows an optimization where an empty permission set can be represented as
+// NULL. Some VM optimizations kick in if an empty permission set can be represented as NULL; however since
+// some security actions have a semantic difference between not being specified at all and having an explicit
+// empty permission set specified, permission sets associated with those actions must be represented as an
+// empty object rather than as NULL.
+//
+// Arguments:
+// action - security action to check
+//
+// Return Value:
+// true if the security action may have an empty permission set optimized to NULL, false otherwise
+//
+// Notes:
+// The security actions which cannot have NULL represent an empty permission set are:
+//
+// * PermitOnly - a PermitOnly set containing no permissions means that all demands should fail, as
+// opposed to not having a PermitOnly set on a method.
+// * RequestOptional - not specifying a RequestOptional set is equivilent to having a RequestOptional set
+// of FullTrust, rather than having an empty RequestOptional set.
+//
+
+// static
+bool SecurityAttributes::ActionAllowsNullPermissionSet(CorDeclSecurity action)
+{
+ LIMITED_METHOD_CONTRACT;
+ return action != dclPermitOnly && action != dclRequestOptional;
+}
+
+#ifdef FEATURE_CAS_POLICY
+
+PsetCacheEntry* SecurityAttributes::MergePermissionSets(IN PsetCacheEntry *pPCE1, IN PsetCacheEntry *pPCE2, IN bool fIntersect, DWORD dwAction)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ SecurityDeclarativeCache *pSDC;
+ PsetCacheEntry* pMergedPCE;
+
+ struct _gc {
+ OBJECTREF orSet1;
+ OBJECTREF orSet2;
+ OBJECTREF orMergedSet;
+ } gc;
+ memset(&gc, '\0', sizeof(gc));
+ GCPROTECT_BEGIN(gc);
+ {
+ // Union or Intersect the two PermissionSets
+ gc.orSet1 = pPCE1->CreateManagedPsetObject (dwAction);
+
+ if(gc.orSet1 == NULL)
+ pMergedPCE = fIntersect ? pPCE1 : pPCE2;
+ else
+ {
+ gc.orSet2 = pPCE2->CreateManagedPsetObject (dwAction);
+ if(gc.orSet2 == NULL)
+ pMergedPCE = fIntersect ? pPCE2 : pPCE1;
+ else
+ {
+ BinderMethodID methID = (fIntersect ? METHOD__PERMISSION_SET__INTERSECT : METHOD__PERMISSION_SET__UNION);
+ MethodDescCallSite mergeMethod(methID, &gc.orSet1);
+
+ ARG_SLOT args[2] = {
+ ObjToArgSlot(gc.orSet1),
+ ObjToArgSlot(gc.orSet2),
+ };
+ gc.orMergedSet = mergeMethod.Call_RetOBJECTREF(args);
+
+ if(gc.orMergedSet == NULL)
+ gc.orMergedSet = CreatePermissionSet(false);
+
+ // Convert to XML blob
+ PBYTE pbData;
+ DWORD cbData;
+ EncodePermissionSet(&gc.orMergedSet, &pbData, &cbData);
+
+ // Store XML blob and obtain an index to reference it
+ pSDC = &(GetAppDomain()->m_pSecContext->m_pSecurityDeclarativeCache);
+ pMergedPCE = pSDC->CreateAndCachePset (pbData, cbData);
+
+ }
+ }
+ }
+ GCPROTECT_END();
+
+ return pMergedPCE;
+}
+
+#endif // FEATURE_CAS_POLICY
+
+void SecurityAttributes::CopyEncodingToByteArray(IN PBYTE pbData,
+ IN DWORD cbData,
+ OUT OBJECTREF* pArray)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ U1ARRAYREF pObj;
+ _ASSERTE(pArray);
+
+ pObj = (U1ARRAYREF)AllocatePrimitiveArray(ELEMENT_TYPE_U1,cbData);
+ memcpyNoGCRefs(pObj->m_Array, pbData, cbData);
+ *pArray = (OBJECTREF) pObj;
+}
+
+void SecurityAttributes::CopyByteArrayToEncoding(IN U1ARRAYREF* pArray,
+ OUT PBYTE* ppbData,
+ OUT DWORD* pcbData)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pArray));
+ PRECONDITION(CheckPointer(ppbData));
+ PRECONDITION(CheckPointer(pcbData));
+ PRECONDITION(*pArray != NULL);
+ } CONTRACTL_END;
+
+ DWORD size = (DWORD) (*pArray)->GetNumComponents();
+ *ppbData = new BYTE[size];
+ *pcbData = size;
+
+ CopyMemory(*ppbData, (*pArray)->GetDirectPointerToNonObjectElements(), size);
+}
+
+#ifdef FEATURE_CAS_POLICY
+void SecurityAttributes::EncodePermissionSet(IN OBJECTREF* pRef,
+ OUT PBYTE* ppbData,
+ OUT DWORD* pcbData)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(IsProtectedByGCFrame (pRef));
+ } CONTRACTL_END;
+
+ MethodDescCallSite encodeXML(METHOD__PERMISSION_SET__ENCODE_XML);
+
+ // Encode up the result
+ ARG_SLOT args1[1];
+ args1[0] = ObjToArgSlot(*pRef);
+ OBJECTREF pByteArray = NULL;
+ pByteArray = encodeXML.Call_RetOBJECTREF(args1);
+
+ SecurityAttributes::CopyByteArrayToEncoding((U1ARRAYREF*) &pByteArray,
+ ppbData,
+ pcbData);
+}
+#endif // FEATURE_CAS_POLICY
+
+#ifdef FEATURE_CAS_POLICY
+static void SetupRestrictSecAttributes()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ EX_TRY
+ {
+ MethodDescCallSite setupSecurity(METHOD__PERMISSION_SET__SETUP_SECURITY);
+
+ setupSecurity.Call(NULL);
+ }
+ EX_CATCH
+ {
+ // There is a possibility that we've already set the appdomain policy
+ // level for this process. In that case we'll get a policy exception
+ // that we are free to ignore.
+ OBJECTREF pThrowable = GET_THROWABLE();
+ DefineFullyQualifiedNameForClassOnStack();
+ LPCUTF8 szClass = GetFullyQualifiedNameForClass(pThrowable->GetMethodTable());
+ if (strcmp(g_PolicyExceptionClassName, szClass) != 0)
+ COMPlusThrow(pThrowable);
+ }
+ EX_END_CATCH(RethrowTerminalExceptions)
+}
+#endif // FEATURE_CAS_POLICY
+
+Assembly* SecurityAttributes::LoadAssemblyFromToken(IMetaDataAssemblyImport *pImport, mdAssemblyRef tkAssemblyRef)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(TypeFromToken(tkAssemblyRef) == mdtAssemblyRef);
+
+ // Find all the details needed to name an assembly for loading.
+ ASSEMBLYMETADATA sContext;
+ BYTE *pbPublicKeyOrToken;
+ DWORD cbPublicKeyOrToken;
+ DWORD dwFlags;
+ LPWSTR wszName;
+ DWORD cchName;
+
+ // Initialize ASSEMBLYMETADATA structure.
+ ZeroMemory(&sContext, sizeof(ASSEMBLYMETADATA));
+
+ // Retrieve size of assembly name.
+ HRESULT hr = pImport->GetAssemblyRefProps(tkAssemblyRef, // [IN] The AssemblyRef for which to get the properties.
+ NULL, // [OUT] Pointer to the public key or token.
+ NULL, // [OUT] Count of bytes in the public key or token.
+ NULL, // [OUT] Buffer to fill with name.
+ NULL, // [IN] Size of buffer in wide chars.
+ &cchName, // [OUT] Actual # of wide chars in name.
+ &sContext, // [OUT] Assembly MetaData.
+ NULL, // [OUT] Hash blob.
+ NULL, // [OUT] Count of bytes in the hash blob.
+ NULL); // [OUT] Flags.
+ _ASSERTE(SUCCEEDED(hr));
+
+ // Allocate the necessary buffers.
+ wszName = (LPWSTR)_alloca(cchName * sizeof(WCHAR));
+ sContext.szLocale = (LPWSTR)_alloca(sContext.cbLocale * sizeof(WCHAR));
+ sContext.rProcessor = (DWORD *)_alloca(sContext.ulProcessor * sizeof(DWORD));
+ sContext.rOS = (OSINFO *)_alloca(sContext.ulOS * sizeof(OSINFO));
+
+ // Get the assembly name and rest of naming properties.
+ hr = pImport->GetAssemblyRefProps(tkAssemblyRef,
+ (const void **)&pbPublicKeyOrToken,
+ &cbPublicKeyOrToken,
+ wszName,
+ cchName,
+ &cchName,
+ &sContext,
+ NULL,
+ NULL,
+ &dwFlags);
+ _ASSERTE(SUCCEEDED(hr));
+
+ // We've got the details of the assembly, just need to load it.
+
+ // Convert assembly name to UTF8.
+ MAKE_UTF8PTR_FROMWIDE(uszAssemblyName, wszName);
+
+ // Unfortunately we've got an ASSEMBLYMETADATA structure, but we need
+ // an AssemblyMetaDataInternal
+ AssemblyMetaDataInternal internalContext;
+
+ // Initialize the structure.
+ ZeroMemory(&internalContext, sizeof(AssemblyMetaDataInternal));
+
+ internalContext.usMajorVersion = sContext.usMajorVersion;
+ internalContext.usMinorVersion = sContext.usMinorVersion;
+ internalContext.usBuildNumber = sContext.usBuildNumber;
+ internalContext.usRevisionNumber = sContext.usRevisionNumber;
+ internalContext.rProcessor = sContext.rProcessor;
+ internalContext.ulProcessor = sContext.ulProcessor;
+ internalContext.rOS = sContext.rOS;
+ internalContext.ulOS = sContext.ulOS;
+ if(sContext.cbLocale)
+ {
+ MAKE_UTF8PTR_FROMWIDE(pLocale, sContext.szLocale);
+ internalContext.szLocale = pLocale;
+ }
+ else
+ {
+ internalContext.szLocale = "";
+ }
+
+ Assembly* pAssembly = NULL;
+ {
+ // Elevate thread's allowed loading level. This can cause load failures if assemblies loaded from this point on require
+ // any assemblies currently being loaded.
+ OVERRIDE_LOAD_LEVEL_LIMIT(FILE_ACTIVE);
+ pAssembly = AssemblySpec::LoadAssembly(uszAssemblyName,
+ &internalContext,
+ pbPublicKeyOrToken,
+ cbPublicKeyOrToken,
+ dwFlags);
+ }
+
+ // @todo: Add CORSECATTR_E_ASSEMBLY_LOAD_FAILED_EX context to this exception path?
+
+ return pAssembly;
+}
+
+TypeHandle FindSecurityAttributeHandle(LPCWSTR wszTypeName)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ TypeHandle hType;
+ MethodDescCallSite findSecurityAttributeTypeHandle(METHOD__SECURITY_ATTRIBUTE__FIND_SECURITY_ATTRIBUTE_TYPE_HANDLE);
+
+ struct _gc {
+ STRINGREF str;
+ } gc;
+
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+ gc.str = StringObject::NewString(wszTypeName);
+ ARG_SLOT arg[1] = {
+ ObjToArgSlot(gc.str)
+ };
+
+ TypeHandle th = TypeHandle::FromPtr(findSecurityAttributeTypeHandle.Call_RetLPVOID(arg));
+ hType = th;
+ GCPROTECT_END();
+
+ return hType;
+}
+
+// @TODO: replace this method with a call to the reflection code that decodes CA blobs
+// and instantiates managed attribute objects. Currently the most significant perf
+// cost of this method is due to TypeName::GetTypeWorker which it calls via
+// GetTypeFromAssemblyQualifiedName, and GetTypeUsingCASearchRules
+HRESULT SecurityAttributes::AttributeSetToManaged(OBJECTREF* /*OUT*/obj, CORSEC_ATTRSET* pAttrSet, OBJECTREF* pThrowable, DWORD* pdwErrorIndex, bool bLazy)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ // Assumption: if the first obj is protected, the whole array is protected
+ if (pAttrSet->dwAttrCount > 0) {PRECONDITION(IsProtectedByGCFrame (obj));}
+ } CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ DWORD i;
+ TypeHandle hType;
+ MethodTable *pMT = NULL;
+ MethodDesc *pMD = NULL;
+
+ // Elevate the allowed loading level
+ // Elevate thread's allowed loading level. This can cause load failures if assemblies loaded from this point on require any assemblies currently being loaded.
+ OVERRIDE_LOAD_LEVEL_LIMIT(FILE_ACTIVE);
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+
+ for (i = 0; i < pAttrSet->dwAttrCount; i++)
+ {
+ CORSEC_ATTRIBUTE *pAttr = &pAttrSet->pAttrs[i];
+
+ if (pdwErrorIndex)
+ *pdwErrorIndex = pAttr->dwIndex;
+
+ // Find the assembly that contains the security attribute class.
+ _ASSERTE(pAttr->pName);
+ Assembly *pAssembly;
+
+ if (bLazy)
+ {
+ // Convert type name to Unicode
+ MAKE_WIDEPTR_FROMUTF8(wszTypeName, pAttr->pName);
+
+ {
+ // Load the type
+ {
+ DWORD error = (DWORD)-1;
+ NewHolder<TypeName> pTypeName = new TypeName(wszTypeName, &error);
+
+ if (error == (DWORD)(-1) && !(pTypeName->GetAssembly()->IsEmpty()))
+ {
+ hType = pTypeName->GetTypeFromAsm(FALSE);
+ }
+ else
+ {
+ hType = TypeName::GetTypeFromAssembly(wszTypeName, SystemDomain::SystemAssembly());
+ }
+ }
+
+ // Special workaround for if the compile-time version of the attribute is no longer available
+ if (hType.IsNull() || hType.GetMethodTable() == NULL)
+ hType = FindSecurityAttributeHandle(wszTypeName);
+ }
+ }
+ else
+ {
+ if (!IsNilToken(pAttr->tkAssemblyRef) && TypeFromToken(pAttr->tkAssemblyRef) == mdtAssemblyRef)
+ {
+ // Load from AssemblyRef token stored in the CORSEC_ATTRSET
+ pAssembly = LoadAssemblyFromToken(pAttrSet->pImport, pAttr->tkAssemblyRef);
+ }
+ else
+ {
+ // Load from MSCORLIB.
+ pAssembly = SystemDomain::SystemAssembly();
+ }
+ _ASSERTE(pAssembly && "Failed to find assembly with declarative attribute");
+
+ EX_TRY
+ {
+ hType = ClassLoader::LoadTypeByNameThrowing(pAssembly, NULL, pAttr->pName);
+ }
+ EX_CATCH_THROWABLE(pThrowable);
+ }
+
+ // Load the security attribute class.
+ if (hType.IsNull() || (pMT = hType.GetMethodTable()) == NULL)
+ {
+ MAKE_WIDEPTR_FROMUTF8(wszTemp, pAttr->pName);
+ SString sMessage;
+ GetExceptionMessage(*pThrowable, sMessage);
+ if (!sMessage.IsEmpty())
+ hr = VMPostError(CORSECATTR_E_TYPE_LOAD_FAILED_EX, wszTemp, sMessage.GetUnicode());
+ else
+ hr = VMPostError(CORSECATTR_E_TYPE_LOAD_FAILED, wszTemp);
+ return hr;
+ }
+
+ // Make sure it's not abstract.
+ if (pMT->IsAbstract())
+ return VMPostError(CORSECATTR_E_ABSTRACT);
+
+#ifdef _DEBUG
+ // Make sure it's really a security attribute class
+ /*{
+ MethodTable *pParentMT = pMT->GetParentMethodTable();
+ CHAR *szClass;
+ DefineFullyQualifiedNameForClassOnStack();
+ while (pParentMT) {
+ szClass = GetFullyQualifiedNameForClass(pParentMT->GetClass());
+ if (stricmpUTF8(szClass, COR_BASE_SECURITY_ATTRIBUTE_CLASS_ANSI) == 0)
+ break;
+ pParentMT = pParentMT->GetParentMethodTable();
+ }
+ _ASSERTE(pParentMT && "Security attribute not derived from COR_BASE_SECURITY_ATTRIBUTE_CLASS");
+ }*/
+#endif
+
+ // Instantiate an instance.
+ obj[i] = pMT->Allocate();
+
+ // Find and call the constructor.
+ pMD = MemberLoader::FindConstructor(pMT, &gsig_IM_SecurityAction_RetVoid);
+ if (pMD == NULL)
+ return VMPostError(CORSECATTR_E_MISSING_CONSTRUCTOR);
+ MethodDescCallSite ctor(pMD);
+ ARG_SLOT args[] = {
+ ObjToArgSlot(obj[i]),
+ (ARG_SLOT)pAttrSet->dwAction
+ };
+ ctor.Call(args);
+
+ // Set the attributes and properties
+ hr = SetAttrFieldsAndProperties(pAttr, pThrowable, pMT, &obj[i]);
+ if (FAILED(hr))
+ return hr;
+ }
+
+ return hr;
+}
+
+
+HRESULT SecurityAttributes::SetAttrFieldsAndProperties(CORSEC_ATTRIBUTE *pAttr, OBJECTREF* pThrowable, MethodTable* pMT, OBJECTREF* pObj)
+{
+ // Setup fields and properties on the object, as specified by the
+ // serialized data passed to us.
+ BYTE *pbBuffer = pAttr->pbValues;
+ SIZE_T cbBuffer = pAttr->cbValues;
+ BYTE *pbBufferEnd = pbBuffer + cbBuffer;
+ DWORD j;
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ for (j = 0; j < pAttr->wValues; j++)
+ {
+ DWORD dwType = 0;
+ BOOL bIsField = FALSE;
+ BYTE *pbName;
+ DWORD cbName;
+ DWORD dwLength;
+ NewArrayHolder<CHAR> szName(NULL);
+ TypeHandle hEnum;
+ CorElementType eEnumType = ELEMENT_TYPE_END;
+
+ // Check we've got at least the field/property specifier and the
+ // type code.
+ if(cbBuffer < (sizeof(BYTE) + sizeof(BYTE)))
+ {
+ hr = VMPostError(CORSECATTR_E_TRUNCATED);
+ goto Error;
+ }
+
+ // Grab the field/property specifier.
+ bIsField = *(BYTE*)pbBuffer == SERIALIZATION_TYPE_FIELD;
+ if(!bIsField && *(BYTE*)pbBuffer != SERIALIZATION_TYPE_PROPERTY)
+ {
+ hr = VMPostError(CORSECATTR_E_TRUNCATED);
+ goto Error;
+ }
+ pbBuffer += sizeof(BYTE);
+ cbBuffer -= sizeof(BYTE);
+
+ // Grab the value type.
+ dwType = *(BYTE*)pbBuffer;
+ pbBuffer += sizeof(BYTE);
+ cbBuffer -= sizeof(BYTE);
+
+ // If it's a type that needs further specification, get that information
+ switch (dwType)
+ {
+ case SERIALIZATION_TYPE_ENUM:
+ // Immediately after the enum type token is the fully
+ // qualified name of the value type used to represent
+ // the enum.
+ if (FAILED(CPackedLen::SafeGetData((BYTE const *)pbBuffer,
+ (BYTE const *)pbBufferEnd,
+ &cbName,
+ (BYTE const **)&pbName)))
+ {
+ hr = VMPostError(CORSECATTR_E_TRUNCATED);
+ goto Error;
+ }
+
+ // SafeGetData ensured that the name is within the buffer
+ _ASSERTE(FitsIn<DWORD>((pbName - pbBuffer) + cbName));
+ dwLength = static_cast<DWORD>((pbName - pbBuffer) + cbName);
+ pbBuffer += dwLength;
+ cbBuffer -= dwLength;
+
+ // Buffer the name and nul terminate it.
+ szName = new (nothrow) CHAR[cbName + 1];
+ if (szName == NULL)
+ {
+ hr = E_OUTOFMEMORY;
+ goto Error;
+ }
+ memcpy(szName, pbName, cbName);
+ szName[cbName] = '\0';
+
+ // Lookup the type (possibly loading an assembly containing
+ // the type).
+ hEnum = TypeName::GetTypeUsingCASearchRules(szName, NULL);
+
+ //If we couldn't find the type, post an error
+ if (hEnum.IsNull())
+ {
+ MAKE_WIDEPTR_FROMUTF8(wszTemp, szName);
+ SString sMessage;
+ GetExceptionMessage(*pThrowable, sMessage);
+ if (!sMessage.IsEmpty())
+ hr = VMPostError(CORSECATTR_E_TYPE_LOAD_FAILED_EX, wszTemp, sMessage.GetUnicode());
+ else
+ hr = VMPostError(CORSECATTR_E_TYPE_LOAD_FAILED, wszTemp);
+ goto Error;
+ }
+
+ // Calculate the underlying primitive type of the
+ // enumeration.
+ eEnumType = hEnum.GetInternalCorElementType();
+ break;
+ case SERIALIZATION_TYPE_SZARRAY:
+ case SERIALIZATION_TYPE_TYPE:
+ // Can't deal with these yet.
+ hr = VMPostError(CORSECATTR_E_UNSUPPORTED_TYPE);
+ goto Error;
+ }
+
+ // Grab the field/property name and length.
+ if (FAILED(CPackedLen::SafeGetData((BYTE const *)pbBuffer,
+ (BYTE const *)pbBufferEnd,
+ &cbName,
+ (BYTE const **)&pbName)))
+ {
+ hr = VMPostError(CORSECATTR_E_TRUNCATED);
+ goto Error;
+ }
+
+ // SafeGetData ensured that the name is within the buffer
+ _ASSERTE(FitsIn<DWORD>((pbName - pbBuffer) + cbName));
+ dwLength = static_cast<DWORD>((pbName - pbBuffer) + cbName);
+ pbBuffer += dwLength;
+ cbBuffer -= dwLength;
+
+ // Buffer the name and null terminate it.
+ szName = new (nothrow) CHAR[cbName + 1];
+ if (szName == NULL)
+ {
+ hr = E_OUTOFMEMORY;
+ goto Error;
+ }
+ memcpy(szName, pbName, cbName);
+ szName[cbName] = '\0';
+
+ // Set the field or property
+ if (bIsField)
+ hr = SetAttrField(&pbBuffer, &cbBuffer, dwType, hEnum, pMT, szName, pObj, dwLength, pbName, cbName, eEnumType);
+ else
+ hr = SetAttrProperty(&pbBuffer, &cbBuffer, pMT, dwType, szName, pObj, dwLength, pbName, cbName, eEnumType);
+ }
+ }
+Error:;
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ if (pThrowable)
+ {
+ *pThrowable = GET_THROWABLE();
+ }
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ return hr;
+}
+
+HRESULT SecurityAttributes::SetAttrField(BYTE** ppbBuffer, SIZE_T* pcbBuffer, DWORD dwType, TypeHandle hEnum, MethodTable* pMT, __in_z LPSTR szName, OBJECTREF* pObj, DWORD dwLength, BYTE* pbName, DWORD cbName, CorElementType eEnumType)
+{
+ DWORD cbSig = 0;
+ NewArrayHolder<BYTE> pbSig(new (nothrow) BYTE[128]);
+ if (pbSig == NULL)
+ return E_OUTOFMEMORY;
+
+ BYTE *pbBufferEnd = *ppbBuffer + *pcbBuffer;
+
+ // Build the field signature.
+ cbSig += CorSigCompressData((ULONG)IMAGE_CEE_CS_CALLCONV_FIELD, &pbSig[cbSig]);
+ switch (dwType)
+ {
+ case SERIALIZATION_TYPE_BOOLEAN:
+ case SERIALIZATION_TYPE_I1:
+ case SERIALIZATION_TYPE_I2:
+ case SERIALIZATION_TYPE_I4:
+ case SERIALIZATION_TYPE_I8:
+ case SERIALIZATION_TYPE_U1:
+ case SERIALIZATION_TYPE_U2:
+ case SERIALIZATION_TYPE_U4:
+ case SERIALIZATION_TYPE_U8:
+ case SERIALIZATION_TYPE_R4:
+ case SERIALIZATION_TYPE_R8:
+ case SERIALIZATION_TYPE_CHAR:
+ static_assert_no_msg(SERIALIZATION_TYPE_BOOLEAN == (CorSerializationType)ELEMENT_TYPE_BOOLEAN);
+ static_assert_no_msg(SERIALIZATION_TYPE_I1 == (CorSerializationType)ELEMENT_TYPE_I1);
+ static_assert_no_msg(SERIALIZATION_TYPE_I2 == (CorSerializationType)ELEMENT_TYPE_I2);
+ static_assert_no_msg(SERIALIZATION_TYPE_I4 == (CorSerializationType)ELEMENT_TYPE_I4);
+ static_assert_no_msg(SERIALIZATION_TYPE_I8 == (CorSerializationType)ELEMENT_TYPE_I8);
+ static_assert_no_msg(SERIALIZATION_TYPE_U1 == (CorSerializationType)ELEMENT_TYPE_U1);
+ static_assert_no_msg(SERIALIZATION_TYPE_U2 == (CorSerializationType)ELEMENT_TYPE_U2);
+ static_assert_no_msg(SERIALIZATION_TYPE_U4 == (CorSerializationType)ELEMENT_TYPE_U4);
+ static_assert_no_msg(SERIALIZATION_TYPE_U8 == (CorSerializationType)ELEMENT_TYPE_U8);
+ static_assert_no_msg(SERIALIZATION_TYPE_R4 == (CorSerializationType)ELEMENT_TYPE_R4);
+ static_assert_no_msg(SERIALIZATION_TYPE_R8 == (CorSerializationType)ELEMENT_TYPE_R8);
+ static_assert_no_msg(SERIALIZATION_TYPE_CHAR == (CorSerializationType)ELEMENT_TYPE_CHAR);
+ cbSig += CorSigCompressData(dwType, &pbSig[cbSig]);
+ break;
+ case SERIALIZATION_TYPE_STRING:
+ cbSig += CorSigCompressData((ULONG)ELEMENT_TYPE_STRING, &pbSig[cbSig]);
+ break;
+ case SERIALIZATION_TYPE_ENUM:
+ // To avoid problems when the field and enum are defined
+ // in different scopes (we'd have to go hunting for
+ // typerefs), we build a signature with a special type
+ // (ELEMENT_TYPE_INTERNAL, which contains a TypeHandle).
+ // This compares loaded types for indentity.
+ cbSig += CorSigCompressData((ULONG)ELEMENT_TYPE_INTERNAL, &pbSig[cbSig]);
+ cbSig += CorSigCompressPointer(hEnum.AsPtr(), &pbSig[cbSig]);
+ break;
+ default:
+ return VMPostError(CORSECATTR_E_UNSUPPORTED_TYPE);
+ }
+
+
+ // Locate a field desc.
+ FieldDesc* pFD = MemberLoader::FindField(pMT, szName, (PCCOR_SIGNATURE)pbSig,
+ cbSig, pMT->GetModule());
+ if (pFD == NULL)
+ {
+ MAKE_WIDEPTR_FROMUTF8(wszTemp, szName);
+ return VMPostError(CORSECATTR_E_NO_FIELD, wszTemp);
+ }
+
+ // Set the field value.
+ LPSTR szString;
+ switch (dwType)
+ {
+ case SERIALIZATION_TYPE_BOOLEAN:
+ case SERIALIZATION_TYPE_I1:
+ case SERIALIZATION_TYPE_U1:
+ if(*pcbBuffer < sizeof(BYTE))
+ return VMPostError(CORSECATTR_E_TRUNCATED);
+ pFD->SetValue8(*pObj, *(BYTE*)(*ppbBuffer));
+ (*ppbBuffer) += sizeof(BYTE);
+ (*pcbBuffer) -= sizeof(BYTE);
+ break;
+ case SERIALIZATION_TYPE_CHAR:
+ case SERIALIZATION_TYPE_I2:
+ case SERIALIZATION_TYPE_U2:
+ if(*pcbBuffer < sizeof(WORD))
+ return VMPostError(CORSECATTR_E_TRUNCATED);
+ pFD->SetValue16(*pObj, GET_UNALIGNED_VAL16(*ppbBuffer));
+ (*ppbBuffer) += sizeof(WORD);
+ (*pcbBuffer) -= sizeof(WORD);
+ break;
+ case SERIALIZATION_TYPE_I4:
+ case SERIALIZATION_TYPE_U4:
+ case SERIALIZATION_TYPE_R4:
+ if(*pcbBuffer < sizeof(DWORD))
+ return VMPostError(CORSECATTR_E_TRUNCATED);
+ pFD->SetValue32(*pObj, GET_UNALIGNED_VAL32(*ppbBuffer));
+ (*ppbBuffer) += sizeof(DWORD);
+ (*pcbBuffer) -= sizeof(DWORD);
+ break;
+ case SERIALIZATION_TYPE_I8:
+ case SERIALIZATION_TYPE_U8:
+ case SERIALIZATION_TYPE_R8:
+ if(*pcbBuffer < sizeof(INT64))
+ return VMPostError(CORSECATTR_E_TRUNCATED);
+ pFD->SetValue64(*pObj, GET_UNALIGNED_VAL64(*ppbBuffer));
+ (*ppbBuffer) += sizeof(INT64);
+ (*pcbBuffer) -= sizeof(INT64);
+ break;
+ case SERIALIZATION_TYPE_STRING:
+ // Ensures special case 'null' check below does not overrun buffer
+ if(*ppbBuffer >= pbBufferEnd) {
+ return VMPostError(CORSECATTR_E_TRUNCATED);
+ }
+ // Special case 'null' (represented as a length byte of '0xFF').
+ if (*(*ppbBuffer) == 0xFF) {
+ szString = NULL;
+ dwLength = sizeof(BYTE);
+ } else {
+ if (FAILED(CPackedLen::SafeGetData((BYTE const *)*ppbBuffer,
+ (BYTE const *)pbBufferEnd,
+ &cbName,
+ (BYTE const **)&pbName)))
+ {
+ return VMPostError(CORSECATTR_E_TRUNCATED);
+ }
+
+ // SafeGetData will ensure the name is within the buffer
+ _ASSERTE(FitsIn<DWORD>((pbName - *ppbBuffer) + cbName));
+ dwLength = static_cast<DWORD>((pbName - *ppbBuffer) + cbName);
+
+ DWORD allocLen = cbName + 1;
+ // Buffer and nul terminate it.
+ szString = (LPSTR)_alloca(allocLen);
+ memcpy(szString, pbName, cbName);
+ szString[cbName] = '\0';
+
+ }
+
+ // Allocate and initialize a managed version of the string.
+ {
+ STRINGREF orString;
+ if (szString)
+ {
+ orString = StringObject::NewString(szString, cbName);
+ if (orString == NULL)
+ COMPlusThrowOM();
+ }
+ else
+ orString = NULL;
+
+ pFD->SetRefValue(*pObj, (OBJECTREF)orString);
+ }
+
+ (*ppbBuffer) += dwLength;
+ (*pcbBuffer) -= dwLength;
+ break;
+ case SERIALIZATION_TYPE_ENUM:
+ // Get the underlying primitive type.
+ switch (eEnumType)
+ {
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ if(*pcbBuffer < sizeof(BYTE))
+ return VMPostError(CORSECATTR_E_TRUNCATED);
+ pFD->SetValue8(*pObj, *(BYTE*)(*ppbBuffer));
+ (*ppbBuffer) += sizeof(BYTE);
+ (*pcbBuffer) -= sizeof(BYTE);
+ break;
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ if(*pcbBuffer < sizeof(WORD))
+ return VMPostError(CORSECATTR_E_TRUNCATED);
+ pFD->SetValue16(*pObj, GET_UNALIGNED_VAL16(*ppbBuffer));
+ (*ppbBuffer) += sizeof(WORD);
+ (*pcbBuffer) -= sizeof(WORD);
+ break;
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ if(*pcbBuffer < sizeof(DWORD))
+ return VMPostError(CORSECATTR_E_TRUNCATED);
+ pFD->SetValue32(*pObj, GET_UNALIGNED_VAL32(*ppbBuffer));
+ (*ppbBuffer) += sizeof(DWORD);
+ (*pcbBuffer) -= sizeof(DWORD);
+ break;
+ default:
+ return VMPostError(CORSECATTR_E_UNSUPPORTED_ENUM_TYPE);
+ }
+ break;
+ default:
+ return VMPostError(CORSECATTR_E_UNSUPPORTED_TYPE);
+ }
+ return S_OK;
+}
+
+HRESULT SecurityAttributes::SetAttrProperty(BYTE** ppbBuffer, SIZE_T* pcbBuffer, MethodTable* pMT, DWORD dwType, __in_z LPSTR szName, OBJECTREF* pObj, DWORD dwLength, BYTE* pbName, DWORD cbName, CorElementType eEnumType)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(IsProtectedByGCFrame (pObj));
+ } CONTRACTL_END;
+
+ // Locate the property setter.
+ MethodDesc* pMD = MemberLoader::FindPropertyMethod(pMT, szName, PropertySet);
+ if (pMD == NULL)
+ {
+ MAKE_WIDEPTR_FROMUTF8(wszTemp, szName);
+ return VMPostError(CORSECATTR_E_NO_PROPERTY, wszTemp);
+ }
+
+ MethodDescCallSite propSet(pMD);
+
+ // Build the argument list.
+ ARG_SLOT args[2] = { NULL, NULL };
+ LPSTR szString;
+ NewHolder<BYTE> tmpLargeStringHolder (NULL);
+
+ switch (dwType)
+ {
+ case SERIALIZATION_TYPE_BOOLEAN:
+ case SERIALIZATION_TYPE_I1:
+ case SERIALIZATION_TYPE_U1:
+ if(*pcbBuffer < sizeof(BYTE))
+ return VMPostError(CORSECATTR_E_TRUNCATED);
+ args[1] = (ARG_SLOT)*(BYTE*)(*ppbBuffer);
+ (*ppbBuffer) += sizeof(BYTE);
+ (*pcbBuffer) -= sizeof(BYTE);
+ break;
+ case SERIALIZATION_TYPE_CHAR:
+ case SERIALIZATION_TYPE_I2:
+ case SERIALIZATION_TYPE_U2:
+ if(*pcbBuffer < sizeof(WORD))
+ return VMPostError(CORSECATTR_E_TRUNCATED);
+ args[1] = (ARG_SLOT)GET_UNALIGNED_VAL16(*ppbBuffer);
+ (*ppbBuffer) += sizeof(WORD);
+ (*pcbBuffer) -= sizeof(WORD);
+ break;
+ case SERIALIZATION_TYPE_I4:
+ case SERIALIZATION_TYPE_U4:
+ case SERIALIZATION_TYPE_R4:
+ if(*pcbBuffer < sizeof(DWORD))
+ return VMPostError(CORSECATTR_E_TRUNCATED);
+ args[1] = (ARG_SLOT)GET_UNALIGNED_VAL32(*ppbBuffer);
+ (*ppbBuffer) += sizeof(DWORD);
+ (*pcbBuffer) -= sizeof(DWORD);
+ break;
+ case SERIALIZATION_TYPE_I8:
+ case SERIALIZATION_TYPE_U8:
+ case SERIALIZATION_TYPE_R8:
+ if(*pcbBuffer < sizeof(INT64))
+ return VMPostError(CORSECATTR_E_TRUNCATED);
+ args[1] = (ARG_SLOT)GET_UNALIGNED_VAL64(*ppbBuffer);
+ (*ppbBuffer) += sizeof(INT64);
+ (*pcbBuffer) -= sizeof(INT64);
+ break;
+ case SERIALIZATION_TYPE_STRING:
+ // Ensures special case 'null' check below does not overrun buffer
+ if(*pcbBuffer < sizeof(BYTE)) {
+ return VMPostError(CORSECATTR_E_TRUNCATED);
+ }
+ // Special case 'null' (represented as a length byte of '0xFF').
+ if (*(*ppbBuffer) == 0xFF) {
+ szString = NULL;
+ dwLength = sizeof(BYTE);
+ if(*pcbBuffer < sizeof(BYTE))
+ return VMPostError(CORSECATTR_E_TRUNCATED);
+ } else {
+
+ if (FAILED(CPackedLen::SafeGetData((BYTE const *)(*ppbBuffer),
+ (BYTE const *)(*ppbBuffer + *pcbBuffer),
+ &cbName,
+ (BYTE const **)&pbName)))
+ {
+ return VMPostError(CORSECATTR_E_TRUNCATED);
+ }
+
+ // Used below - SafeGetData ensures that name is within the buffer
+ _ASSERTE(FitsIn<DWORD>((pbName - *ppbBuffer) + cbName));
+ dwLength = static_cast<DWORD>((pbName - *ppbBuffer) + cbName);
+
+ DWORD allocLen = cbName + 1;
+
+ //
+ // For smaller size strings allocate from stack, use heap otherwise
+ //
+
+ if ((pbName - *ppbBuffer) < 4) {
+ // Buffer and nul terminate it.
+ szString = (LPSTR)_alloca(allocLen);
+ } else {
+ tmpLargeStringHolder = new BYTE[allocLen];
+ szString = (LPSTR) ((BYTE*)tmpLargeStringHolder);
+ }
+
+ memcpy(szString, pbName, cbName);
+ szString[cbName] = '\0';
+ }
+
+ // Allocate and initialize a managed version of the string.
+ {
+ STRINGREF orString;
+
+ if (szString) {
+ orString = StringObject::NewString(szString, cbName);
+ if (orString == NULL)
+ COMPlusThrowOM();
+ } else
+ orString = NULL;
+
+ args[1] = ObjToArgSlot(orString);
+ }
+
+ (*ppbBuffer) += dwLength;
+ (*pcbBuffer) -= dwLength;
+ break;
+ case SERIALIZATION_TYPE_ENUM:
+ // Get the underlying primitive type.
+ switch (eEnumType)
+ {
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ if(*pcbBuffer < sizeof(BYTE))
+ return VMPostError(CORSECATTR_E_TRUNCATED);
+ args[1] = (ARG_SLOT)*(BYTE*)(*ppbBuffer);
+ (*ppbBuffer) += sizeof(BYTE);
+ (*pcbBuffer) -= sizeof(BYTE);
+ break;
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ if(*pcbBuffer < sizeof(WORD))
+ return VMPostError(CORSECATTR_E_TRUNCATED);
+ args[1] = (ARG_SLOT)GET_UNALIGNED_VAL16(*ppbBuffer);
+ (*ppbBuffer) += sizeof(WORD);
+ (*pcbBuffer) -= sizeof(WORD);
+ break;
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ if(*pcbBuffer < sizeof(DWORD))
+ return VMPostError(CORSECATTR_E_TRUNCATED);
+ args[1] = (ARG_SLOT)GET_UNALIGNED_VAL32(*ppbBuffer);
+ (*ppbBuffer) += sizeof(DWORD);
+ (*pcbBuffer) -= sizeof(DWORD);
+ break;
+ default:
+ return VMPostError(CORSECATTR_E_UNSUPPORTED_ENUM_TYPE);
+ }
+ break;
+ default:
+ return VMPostError(CORSECATTR_E_UNSUPPORTED_TYPE);
+ }
+
+
+ // ! don't move this up, StringObject::NewString
+ // ! inside the switch causes a GC
+ args[0] = ObjToArgSlot(*pObj);
+
+ // Call the setter.
+ propSet.Call(args);
+
+ return S_OK;
+}
+
+
+void SecurityAttributes::AttrSetBlobToPermissionSets(
+ IN BYTE* pbRawPermissions,
+ IN DWORD cbRawPermissions,
+ OUT OBJECTREF* pObj,
+ DWORD dwAction)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ _ASSERTE(pbRawPermissions);
+ _ASSERTE(cbRawPermissions > 0);
+ _ASSERTE(pbRawPermissions[0] == LAZY_DECL_SEC_FLAG);
+
+ HRESULT hr = S_OK;
+ CORSEC_ATTRSET pset;
+
+ // Deserialize the CORSEC_ATTRSET
+ hr = BlobToAttributeSet(pbRawPermissions, cbRawPermissions, &pset, dwAction);
+ if(FAILED(hr))
+ COMPlusThrowHR(hr);
+
+ OBJECTREF throwable = NULL;
+ GCPROTECT_BEGIN(throwable);
+ {
+ // allocate and GC-protect an array of objectrefs to reference the permissions
+ OBJECTREF* attrArray = (OBJECTREF*)_alloca(pset.dwAttrCount * sizeof(OBJECTREF));
+ memset(attrArray, 0, pset.dwAttrCount * sizeof(OBJECTREF));
+ GCPROTECT_ARRAY_BEGIN(*attrArray, pset.dwAttrCount);
+ {
+ // Convert to a managed array of attribute objects
+ DWORD dwErrorIndex;
+ hr = AttributeSetToManaged(/*OUT*/attrArray, &pset, &throwable, &dwErrorIndex, true);
+
+ // Convert the array of attribute objects to a serialized PermissionSet
+ if (SUCCEEDED(hr))
+ {
+ BYTE* pbXmlBlob = NULL;
+ DWORD cbXmlBlob = 0;
+ BYTE* pbNonCasXmlBlob = NULL;
+ DWORD cbNonCasXmlBlob = 0;
+
+ AttrArrayToPermissionSet(attrArray,
+ false,
+ pset.dwAttrCount,
+ &pbXmlBlob,
+ &cbXmlBlob,
+ &pbNonCasXmlBlob,
+ &cbNonCasXmlBlob,
+ ActionAllowsNullPermissionSet(static_cast<CorDeclSecurity>(dwAction)),
+ pObj);
+
+ _ASSERTE(pbXmlBlob == NULL && cbXmlBlob == 0 && pbNonCasXmlBlob == NULL && cbNonCasXmlBlob == 0);
+ }
+ }
+ GCPROTECT_END();
+ }
+ GCPROTECT_END();
+
+ if(FAILED(hr))
+ COMPlusThrowHR(hr);
+}
+
+#ifdef FEATURE_CAS_POLICY
+HRESULT SecurityAttributes::TranslateSecurityAttributesHelper(
+ CORSEC_ATTRSET *pAttrSet,
+ BYTE **ppbOutput,
+ DWORD *pcbOutput,
+ BYTE **ppbNonCasOutput,
+ DWORD *pcbNonCasOutput,
+ DWORD *pdwErrorIndex)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ OBJECTREF *attrArray;
+ DWORD dwGlobalError = 0;
+
+ EX_TRY
+ {
+ if (pdwErrorIndex)
+ dwGlobalError = *pdwErrorIndex;
+
+ // Get into the context of the special compilation appdomain (which has an
+ // AppBase set to the current directory).
+ ComCallWrapper *pWrap = ComCallWrapper::GetWrapperFromIP(pAttrSet->pAppDomain);
+
+ ENTER_DOMAIN_ID(pWrap->GetDomainID())
+ {
+ struct _gc {
+ OBJECTREF throwable;
+ OBJECTREF orPermSet;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ GCPROTECT_BEGIN(gc);
+ {
+ // we need to setup special security settings that we use during compilation
+ SetupRestrictSecAttributes();
+
+ // allocate and protect an array of objectrefs to reference the permissions
+ attrArray = (OBJECTREF*)_alloca(pAttrSet->dwAttrCount * sizeof(OBJECTREF));
+ memset(attrArray, 0, pAttrSet->dwAttrCount * sizeof(OBJECTREF));
+ GCPROTECT_ARRAY_BEGIN(*attrArray, pAttrSet->dwAttrCount);
+ {
+ // Convert to an array of attributes, and then serialize to XML
+ hr = AttributeSetToManaged(/*OUT*/attrArray, pAttrSet, &gc.throwable, pdwErrorIndex, false);
+ if (SUCCEEDED(hr))
+ {
+ if (pdwErrorIndex)
+ *pdwErrorIndex = dwGlobalError;
+
+ // Convert the array of attribute objects to a serialized PermissionSet or PermissionSetCollection
+ AttrArrayToPermissionSet(attrArray,
+ true,
+ pAttrSet->dwAttrCount,
+ ppbOutput,
+ pcbOutput,
+ ppbNonCasOutput,
+ pcbNonCasOutput,
+ ActionAllowsNullPermissionSet(static_cast<CorDeclSecurity>(pAttrSet->dwAction)),
+ &gc.orPermSet);
+ }
+ }
+ GCPROTECT_END();
+ }
+ GCPROTECT_END(); // for throwable
+ }
+ END_DOMAIN_TRANSITION;
+ }
+ EX_CATCH_HRESULT(hr);
+ return hr;
+}
+#endif // FEATURE_CAS_POLICY
+
+// Call into managed code to group permissions into a PermissionSet and serialize it to XML
+void SecurityAttributes::AttrArrayToPermissionSet(OBJECTREF* attrArray,
+ bool fSerialize,
+ DWORD attrCount,
+ BYTE **ppbOutput,
+ DWORD *pcbOutput,
+ BYTE **ppbNonCasOutput,
+ DWORD *pcbNonCasOutput,
+ bool fAllowEmptyPermissionSet,
+ OBJECTREF* pPermSet)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ EApiCategories eProtectedCategories = (EApiCategories)(GetHostProtectionManager()->GetProtectedCategories());
+
+ MethodDescCallSite createSerialized(METHOD__PERMISSION_SET__CREATE_SERIALIZED);
+
+ // Allocate a managed array of security attribute objects for input to the function.
+ PTRARRAYREF orInput = (PTRARRAYREF) AllocateObjectArray(attrCount, g_pObjectClass);
+
+ // Copy over the permission objects references.
+ DWORD i;
+ for (i = 0; i < attrCount; i++)
+ {
+ orInput->SetAt(i, attrArray[i]);
+ }
+
+ // Call the routine.
+ struct _gc {
+ U1ARRAYREF orNonCasOutput;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ GCPROTECT_BEGIN(gc);
+
+ ARG_SLOT args[] = {
+ ObjToArgSlot(orInput),
+ BoolToArgSlot(fSerialize),
+ PtrToArgSlot(&gc.orNonCasOutput),
+ PtrToArgSlot(pPermSet),
+ (ARG_SLOT)eProtectedCategories,
+ BoolToArgSlot(fAllowEmptyPermissionSet)
+ };
+ U1ARRAYREF orOutput = NULL;
+
+ {
+ // Elevate the allowed loading level
+ // Elevate thread's allowed loading level. This can cause load failures if assemblies loaded from this point on require any assemblies currently being loaded.
+ OVERRIDE_LOAD_LEVEL_LIMIT(FILE_ACTIVE);
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+
+ orOutput = (U1ARRAYREF) createSerialized.Call_RetOBJECTREF(args);
+ }
+
+ // Buffer the managed output in a native binary blob.
+ // Special case the empty blob. We might get a second blob output if
+ // there were any non-CAS permissions present.
+ NewArrayHolder<BYTE> TempOutput(NULL);
+ NewArrayHolder<BYTE> TempNonCasOutput(NULL);
+
+ if (orOutput == NULL)
+ {
+ *pcbOutput = 0;
+ }
+ else
+ {
+ BYTE *pbArray = orOutput->GetDataPtr();
+ DWORD cbArray = orOutput->GetNumComponents();
+ TempOutput = new BYTE[cbArray];
+ memcpy(TempOutput, pbArray, cbArray);
+ *pcbOutput = cbArray;
+ }
+
+ if (gc.orNonCasOutput == NULL)
+ {
+ *pcbNonCasOutput = 0;
+ }
+ else
+ {
+ BYTE *pbArray = gc.orNonCasOutput->GetDataPtr();
+ DWORD cbArray = gc.orNonCasOutput->GetNumComponents();
+ TempNonCasOutput = new BYTE[cbArray];
+ memcpy(TempNonCasOutput, pbArray, cbArray);
+ *pcbNonCasOutput = cbArray;
+ }
+
+ *ppbOutput = TempOutput;
+ *ppbNonCasOutput = TempNonCasOutput;
+
+ TempOutput.SuppressRelease();
+ TempNonCasOutput.SuppressRelease();
+
+ GCPROTECT_END();
+}
+
+
+//
+// This is a public exported method
+//
+
+// Translate a set of security custom attributes into a serialized permission set blob.
+HRESULT STDMETHODCALLTYPE TranslateSecurityAttributes(CORSEC_ATTRSET *pAttrSet,
+ BYTE **ppbOutput,
+ DWORD *pcbOutput,
+ BYTE **ppbNonCasOutput,
+ DWORD *pcbNonCasOutput,
+ DWORD *pdwErrorIndex)
+{
+#ifdef FEATURE_CAS_POLICY
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ ENTRY_POINT;
+ MODE_ANY;
+ } CONTRACTL_END;
+ HRESULT hr = S_OK;
+
+ BEGIN_ENTRYPOINT_NOTHROW;
+
+ GCX_COOP(); // because it calls into managed code to instantiate the PermissionSet objects
+ hr = SecurityAttributes::TranslateSecurityAttributesHelper(pAttrSet, ppbOutput, pcbOutput,
+ ppbNonCasOutput, pcbNonCasOutput, pdwErrorIndex);
+
+ END_ENTRYPOINT_NOTHROW;
+
+ return hr;
+#else
+ return E_NOTIMPL;
+#endif
+}
+
+
+//
+// This is a public exported method
+//
+
+// Reads permission requests (if any) from the manifest of an assembly.
+HRESULT STDMETHODCALLTYPE GetPermissionRequests(LPCWSTR pwszFileName,
+ BYTE **ppbMinimal,
+ DWORD *pcbMinimal,
+ BYTE **ppbOptional,
+ DWORD *pcbOptional,
+ BYTE **ppbRefused,
+ DWORD *pcbRefused)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ ENTRY_POINT;
+ } CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ IMetaDataDispenser *pMD = NULL;
+ IMetaDataAssemblyImport *pMDAsmImport = NULL;
+ IMetaDataImport *pMDImport = NULL;
+ mdAssembly mdAssembly;
+ BYTE *pbMinimal = NULL;
+ DWORD cbMinimal = 0;
+ BYTE *pbOptional = NULL;
+ DWORD cbOptional = 0;
+ BYTE *pbRefused = NULL;
+ DWORD cbRefused = 0;
+ HCORENUM hEnumDcl = NULL;
+ mdPermission rPSets[dclMaximumValue + 1];
+ DWORD dwSets;
+ DWORD i;
+
+ *ppbMinimal = NULL;
+ *pcbMinimal = 0;
+ *ppbOptional = NULL;
+ *pcbOptional = 0;
+ *ppbRefused = NULL;
+ *pcbRefused = 0;
+
+ // Get the meta data interface dispenser.
+ hr = MetaDataGetDispenser(CLSID_CorMetaDataDispenser,
+ IID_IMetaDataDispenserEx,
+ (void **)&pMD);
+ if (FAILED(hr))
+ goto Error;
+
+ // Open a scope on the assembly file.
+ hr = pMD->OpenScope(pwszFileName,
+ 0,
+ IID_IMetaDataAssemblyImport,
+ (IUnknown**)&pMDAsmImport);
+ if (FAILED(hr))
+ goto Error;
+
+ // Determine the assembly token.
+ hr = pMDAsmImport->GetAssemblyFromScope(&mdAssembly);
+ if (FAILED(hr))
+ goto Error;
+
+ // QI for a normal import interface.
+ hr = pMDAsmImport->QueryInterface(IID_IMetaDataImport, (void**)&pMDImport);
+ if (FAILED(hr))
+ goto Error;
+
+ // Look for permission request sets hung off the assembly token.
+ hr = pMDImport->EnumPermissionSets(&hEnumDcl,
+ mdAssembly,
+ dclActionNil,
+ rPSets,
+ dclMaximumValue + 1,
+ &dwSets);
+ if (FAILED(hr))
+ goto Error;
+
+ for (i = 0; i < dwSets; i++) {
+ BYTE *pbData;
+ DWORD cbData;
+ DWORD dwAction;
+
+ pMDImport->GetPermissionSetProps(rPSets[i],
+ &dwAction,
+ (void const **)&pbData,
+ &cbData);
+
+ switch (dwAction) {
+ case dclRequestMinimum:
+ _ASSERTE(pbMinimal == NULL);
+ pbMinimal = pbData;
+ cbMinimal = cbData;
+ break;
+ case dclRequestOptional:
+ _ASSERTE(pbOptional == NULL);
+ pbOptional = pbData;
+ cbOptional = cbData;
+ break;
+ case dclRequestRefuse:
+ _ASSERTE(pbRefused == NULL);
+ pbRefused = pbData;
+ cbRefused = cbData;
+ break;
+ default:
+ _ASSERTE(FALSE);
+ }
+ }
+
+ pMDImport->CloseEnum(hEnumDcl);
+
+ // Buffer the results (since we're about to close the metadata scope and
+ // lose the original data).
+ if (pbMinimal) {
+ *ppbMinimal = new (nothrow) BYTE[cbMinimal];
+ if (*ppbMinimal == NULL) {
+ hr = E_OUTOFMEMORY;
+ goto Error;
+ }
+ memcpy(*ppbMinimal, pbMinimal, cbMinimal);
+ *pcbMinimal = cbMinimal;
+ }
+
+ if (pbOptional) {
+ *ppbOptional = new (nothrow) BYTE[cbOptional];
+ if (*ppbOptional == NULL) {
+ hr = E_OUTOFMEMORY;
+ goto Error;
+ }
+ memcpy(*ppbOptional, pbOptional, cbOptional);
+ *pcbOptional = cbOptional;
+ }
+
+ if (pbRefused) {
+ *ppbRefused = new (nothrow) BYTE[cbRefused];
+ if (*ppbRefused == NULL) {
+ hr = E_OUTOFMEMORY;
+ goto Error;
+ }
+ memcpy(*ppbRefused, pbRefused, cbRefused);
+ *pcbRefused = cbRefused;
+ }
+
+ Error:
+ if (pMDImport)
+ pMDImport->Release();
+ if (pMDAsmImport)
+ pMDAsmImport->Release();
+ if (pMD)
+ pMD->Release();
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+// Load permission requests in their serialized form from assembly metadata.
+// This consists of a required permissions set and optionally an optional and
+// deny permission set.
+void SecurityAttributes::LoadPermissionRequestsFromAssembly(IN IMDInternalImport* pImport,
+ OUT OBJECTREF* pReqdPermissions,
+ OUT OBJECTREF* pOptPermissions,
+ OUT OBJECTREF* pDenyPermissions)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pReqdPermissions));
+ PRECONDITION(CheckPointer(pOptPermissions));
+ PRECONDITION(CheckPointer(pDenyPermissions));
+ } CONTRACTL_END;
+
+ mdAssembly mdAssembly;
+ HRESULT hr;
+
+ *pReqdPermissions = NULL;
+ *pOptPermissions = NULL;
+ *pDenyPermissions = NULL;
+
+ // It's OK to be called with a NULL assembly. This can happen in the code
+ // path where we're just checking for a signature, nothing else. So just
+ // return without doing anything.
+ if (pImport == NULL)
+ return;
+
+ // Locate assembly metadata token since the various permission sets are
+ // written as custom values against this token.
+ if (pImport->GetAssemblyFromScope(&mdAssembly) != S_OK) {
+ _ASSERT(FALSE);
+ return;
+ }
+
+ struct _gc
+ {
+ OBJECTREF reqdPset;
+ OBJECTREF optPset;
+ OBJECTREF denyPset;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ {
+ GCX_COOP(); // because GetDeclaredPermissions may call into managed code
+ GCPROTECT_BEGIN(gc);
+
+ // Read and translate required permission set.
+ hr = Security::GetDeclaredPermissions(pImport, mdAssembly, dclRequestMinimum, &gc.reqdPset, NULL);
+ _ASSERT(SUCCEEDED(hr) || (hr == CLDB_E_RECORD_NOTFOUND));
+
+ // Now the optional permission set.
+ PsetCacheEntry *pOptPSCacheEntry = NULL;
+ hr = Security::GetDeclaredPermissions(pImport, mdAssembly, dclRequestOptional, &gc.optPset, &pOptPSCacheEntry);
+ _ASSERT(SUCCEEDED(hr) || (hr == CLDB_E_RECORD_NOTFOUND));
+
+ // An empty permission set has semantic meaning if it is an assembly's optional permission set.
+ // If we have an optional set, then we need to make sure it is created.
+ if (SUCCEEDED(hr) && gc.optPset == NULL && pOptPSCacheEntry != NULL)
+ {
+ gc.optPset = pOptPSCacheEntry->CreateManagedPsetObject(dclRequestOptional, /* createEmptySet */ true);
+ }
+
+ // And finally the refused permission set.
+ hr = Security::GetDeclaredPermissions(pImport, mdAssembly, dclRequestRefuse, &gc.denyPset, NULL);
+ _ASSERT(SUCCEEDED(hr) || (hr == CLDB_E_RECORD_NOTFOUND));
+
+ *pReqdPermissions = gc.reqdPset;
+ *pOptPermissions = gc.optPset;
+ *pDenyPermissions = gc.denyPset;
+
+ GCPROTECT_END();
+ }
+}
+
+// Determine whether a RequestOptional or RequestRefused are made in the assembly manifest.
+BOOL SecurityAttributes::RestrictiveRequestsInAssembly(IMDInternalImport* pImport)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ mdAssembly mdAssembly;
+ HRESULT hr;
+ HENUMInternal hEnumDcl;
+
+ // Locate assembly metadata token since the various permission sets are
+ // written as custom values against this token.
+ hr = pImport->GetAssemblyFromScope(&mdAssembly);
+ if (FAILED(hr))
+ return TRUE;
+
+ hr = pImport->EnumPermissionSetsInit(mdAssembly,
+ dclRequestRefuse,
+ &hEnumDcl);
+
+ BOOL bFoundRequestRefuse = (hr != CLDB_E_RECORD_NOTFOUND);
+ pImport->EnumClose(&hEnumDcl);
+
+ if (bFoundRequestRefuse)
+ return TRUE;
+
+ hr = pImport->EnumPermissionSetsInit(mdAssembly,
+ dclRequestOptional,
+ &hEnumDcl);
+ BOOL bFoundRequestOptional = (hr != CLDB_E_RECORD_NOTFOUND);
+ pImport->EnumClose(&hEnumDcl);
+
+ return bFoundRequestOptional;
+}
+#endif // CROSSGEN_COMPILE
+
+HRESULT SecurityAttributes::GetPermissionsFromMetaData(IN IMDInternalImport *pInternalImport,
+ IN mdToken token,
+ IN CorDeclSecurity action,
+ OUT PBYTE* ppbPerm,
+ OUT ULONG* pcbPerm)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+ HRESULT hr = S_OK;
+ mdPermission tkPerm;
+ void const ** ppData = const_cast<void const**> (reinterpret_cast<void**> (ppbPerm));
+ DWORD dwActionDummy;
+ // Get the blob for the CAS action from the security action table in metadata
+ HENUMInternalHolder hEnumDcl(pInternalImport);
+ if (hEnumDcl.EnumPermissionSetsInit(token,action))
+ {
+ _ASSERTE(pInternalImport->EnumGetCount(&hEnumDcl) == 1 && "Multiple permissions sets for the same declaration aren't currently supported.");
+ if (pInternalImport->EnumNext(&hEnumDcl, &tkPerm))
+ {
+ hr = pInternalImport->GetPermissionSetProps(
+ tkPerm,
+ &dwActionDummy,
+ ppData,
+ pcbPerm);
+
+ if (FAILED(hr) )
+ {
+ COMPlusThrowHR(hr);
+ }
+ }
+ else
+ {
+ _ASSERTE(!"At least one enumeration expected");
+ }
+ }
+ else
+ {
+ hr = CLDB_E_RECORD_NOTFOUND;
+ }
+ return hr;
+}
+
+void SecurityAttributes::CreateAndCachePermissions(
+ IN PBYTE pbPerm,
+ IN ULONG cbPerm,
+ IN CorDeclSecurity action,
+ OUT OBJECTREF *pDeclaredPermissions,
+ OUT PsetCacheEntry **pPSCacheEntry)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ SecurityDeclarativeCache *pSDC;
+ PsetCacheEntry* pPCE;
+
+ pSDC = &(GetAppDomain()->m_pSecContext->m_pSecurityDeclarativeCache);
+
+
+ pPCE = pSDC->CreateAndCachePset (pbPerm, cbPerm);
+ if (pDeclaredPermissions) {
+#ifdef CROSSGEN_COMPILE
+ _ASSERTE(!"This codepath should be unreachable during crossgen");
+ *pDeclaredPermissions = NULL;
+#else
+ *pDeclaredPermissions = pPCE->CreateManagedPsetObject (action);
+#endif
+ }
+ if (pPSCacheEntry) {
+ *pPSCacheEntry = pPCE;
+ }
+}
+
+// Returns the declared PermissionSet for the specified action type.
+HRESULT SecurityAttributes::GetDeclaredPermissions(IN IMDInternalImport *pInternalImport,
+ IN mdToken token,
+ IN CorDeclSecurity action,
+ OUT OBJECTREF *pDeclaredPermissions,
+ OUT PsetCacheEntry **pPSCacheEntry)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ HRESULT hr = S_FALSE;
+ PBYTE pbPerm = NULL;
+ ULONG cbPerm = 0;
+
+
+
+ _ASSERTE(action > dclActionNil && action <= dclMaximumValue);
+
+ // Initialize the output parameters.
+ if (pDeclaredPermissions)
+ *pDeclaredPermissions = NULL;
+ if(pPSCacheEntry)
+ *pPSCacheEntry = NULL;
+
+ bool bCas = !(action == dclNonCasDemand || action == dclNonCasLinkDemand || action == dclNonCasInheritance);
+
+ hr = GetPermissionsFromMetaData(pInternalImport, token, action, &pbPerm, &cbPerm);
+ if(pbPerm && cbPerm > 0)
+ {
+ CreateAndCachePermissions(pbPerm, cbPerm, action, pDeclaredPermissions, pPSCacheEntry);
+ }
+ else if(!bCas)
+ {
+ // We're looking for a non-CAS action which may be encoded with the corresponding CAS action
+ // Pre-Whidbey, we used to encode CAS and non-CAS actions separately because we used to do
+ // declarative security processing at build time (we used to create a
+ // permset object corresponding to a declarative action, convert it into XML and then store the serialized
+ // XML in the assembly).
+ //
+ // In Whidbey the default is what we call LAZY declarative security (LAZY_DECL_SEC_FLAG below) - to not do any
+ // declarative security processing at build time (we just take the declarative annotiation and store it as a
+ // serialzied blob - no permsets created/converted to XML). And at runtime, we do the actual processing (create permsets etc.)
+ //
+ // What does this mean? It means that in Whidbey (and beyond), we cannot tell at build time if it is a declarative CAS action
+ // or non-CAS action. So at runtime, we need to check the permset stored under the cas action for a non-CAS action.
+ // Of course, we need to do this only if LAZY_DECL_SEC_FLAG is in effect.
+
+ // Determine the corresponding CAS action
+ CorDeclSecurity casAction = dclDemand;
+ if(action == dclNonCasLinkDemand)
+ casAction = dclLinktimeCheck;
+ else if(action == dclNonCasInheritance)
+ casAction = dclInheritanceCheck;
+
+ // Get the blob for the CAS action from the security action table in metadata
+ hr = GetPermissionsFromMetaData(pInternalImport, token, casAction, &pbPerm, &cbPerm);
+
+ if(pbPerm && cbPerm > 0 && pbPerm[0] == LAZY_DECL_SEC_FLAG) // if it's a serialized CORSEC_ATTRSET
+ {
+ CreateAndCachePermissions(pbPerm, cbPerm, casAction, pDeclaredPermissions, pPSCacheEntry);
+ }
+
+ }
+
+ return hr;
+}
+
+bool SecurityAttributes::IsHostProtectionAttribute(CORSEC_ATTRIBUTE* pAttr)
+{
+ static const char s_HostProtectionAttributeName[] = "System.Security.Permissions.HostProtectionAttribute, mscorlib";
+
+ return (strncmp(pAttr->pName, s_HostProtectionAttributeName, sizeof(s_HostProtectionAttributeName)-1) == 0);
+}
+
+bool SecurityAttributes::IsBuiltInCASPermissionAttribute(CORSEC_ATTRIBUTE* pAttr)
+{
+ WRAPPER_NO_CONTRACT;
+ static const char s_permissionsNamespace[] = "System.Security.Permissions.";
+ if(strncmp(pAttr->pName, s_permissionsNamespace, sizeof(s_permissionsNamespace) - 1) != 0)
+ return false; // not built-in permission
+ static const char s_principalPermissionName[] = "System.Security.Permissions.PrincipalPermissionAttribute, mscorlib";
+
+ // ASSERT: at this point we know we are in builtin namespace...so compare with PrincipalPermissionAttribute
+ if (strncmp(pAttr->pName, s_principalPermissionName, sizeof(s_principalPermissionName)-1) == 0)
+ return false; // found a principal permission => Not a built-in CAS permission
+
+ // special-case the unrestricted permission set attribute.
+ static const char s_PermissionSetName[] = "System.Security.Permissions.PermissionSetAttribute, mscorlib";
+ if (strncmp(pAttr->pName, s_PermissionSetName, sizeof(s_PermissionSetName)-1) == 0)
+ return IsUnrestrictedPermissionSetAttribute(pAttr);
+
+ return true; //built-in perm, but not principal perm => IsBuiltInCASPermissionAttribute
+}
+
+bool SecurityAttributes::IsUnrestrictedPermissionSetAttribute(CORSEC_ATTRIBUTE* pPerm)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ BYTE const * pbBuffer = pPerm->pbValues;
+ SIZE_T cbBuffer = pPerm->cbValues;
+ BYTE const * pbBufferEnd = pbBuffer + cbBuffer;
+
+ if (cbBuffer < 2 * sizeof(BYTE))
+ return false;
+
+ // Get the field/property specifier
+ if (*(BYTE*)pbBuffer == SERIALIZATION_TYPE_FIELD)
+ return false;
+
+ _ASSERTE(*(BYTE*)pbBuffer == SERIALIZATION_TYPE_PROPERTY);
+ pbBuffer += sizeof(BYTE);
+ cbBuffer -= sizeof(BYTE);
+
+ // Get the value type
+ DWORD dwType = *(BYTE*)pbBuffer;
+ pbBuffer += sizeof(BYTE);
+ cbBuffer -= sizeof(BYTE);
+ if (dwType != SERIALIZATION_TYPE_BOOLEAN)
+ return false;
+
+ // Grab the field/property name and length.
+ DWORD cbName;
+ BYTE const * pbName;
+ if (FAILED(CPackedLen::SafeGetData(pbBuffer,
+ pbBufferEnd,
+ &cbName,
+ &pbName)))
+ {
+ return false;
+ }
+
+ PREFIX_ASSUME(pbName != NULL);
+
+ // SafeGetData will ensure the name is within the buffer
+ SIZE_T cbNameOffset = pbName - pbBuffer;
+ _ASSERTE(FitsIn<DWORD>(cbNameOffset));
+ DWORD dwLength = static_cast<DWORD>(cbNameOffset + cbName);
+ pbBuffer += dwLength;
+ cbBuffer -= dwLength;
+
+ // Buffer the name of the property and null terminate it.
+ DWORD allocLen = cbName + 1;
+ if (allocLen < cbName)
+ return false;
+
+ LPSTR szName = (LPSTR)_alloca(allocLen);
+ memcpy(szName, pbName, cbName);
+ szName[cbName] = '\0';
+
+ if (strcmp(szName, "Unrestricted") != 0)
+ return false;
+
+ // Make sure the value isn't "false"
+ return (*pbBuffer != 0);
+}
+
+// This takes a PermissionSetAttribute blob and looks to see if it uses the "FILE" property. If it
+// does, then it loads the file now and modifies the attribute to use the XML property instead
+// (because the file may not be available at runtime.)
+HRESULT SecurityAttributes::FixUpPermissionSetAttribute(CORSEC_ATTRIBUTE* pPerm)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pPerm->wValues == 1 && strcmp(pPerm->pName, "System.Security.Permissions.PermissionSetAttribute") == 0);
+ BYTE const * pbBuffer = pPerm->pbValues;
+ SIZE_T cbBuffer = pPerm->cbValues;
+ BYTE const * pbBufferEnd = pbBuffer + cbBuffer;
+ HRESULT hr;
+
+ // Check we've got at least the field/property specifier and the
+ // type code.
+ _ASSERTE(cbBuffer >= (sizeof(BYTE) + sizeof(BYTE)));
+
+ // Grab the field/property specifier.
+ bool bIsField = *(BYTE*)pbBuffer == SERIALIZATION_TYPE_FIELD;
+ _ASSERTE(bIsField || (*(BYTE*)pbBuffer == SERIALIZATION_TYPE_PROPERTY));
+ pbBuffer += sizeof(BYTE);
+ cbBuffer -= sizeof(BYTE);
+
+ // Grab the value type.
+ DWORD dwType = *(BYTE*)pbBuffer;
+ pbBuffer += sizeof(BYTE);
+ cbBuffer -= sizeof(BYTE);
+
+ if(bIsField)
+ return S_OK;
+ if(dwType != SERIALIZATION_TYPE_STRING)
+ return S_OK;
+
+ // Grab the field/property name and length.
+ ULONG cbName;
+ BYTE const * pbName;
+ IfFailRet(CPackedLen::SafeGetData(pbBuffer, pbBufferEnd, &cbName, &pbName));
+ PREFIX_ASSUME(pbName != NULL);
+
+ // SafeGetData ensures name is within buffer
+ SIZE_T cbNameOffset = pbName - pbBuffer;
+ _ASSERTE(FitsIn<DWORD>(cbNameOffset));
+ DWORD dwLength = static_cast<DWORD>(cbNameOffset + cbName);
+ pbBuffer += dwLength;
+ cbBuffer -= dwLength;
+
+ // Buffer the name of the property and null terminate it.
+ DWORD allocLen = cbName + 1;
+ LPSTR szName = (LPSTR)_alloca(allocLen);
+ memcpy(szName, pbName, cbName);
+ szName[cbName] = '\0';
+
+ if(strcmp(szName, "File") != 0)
+ return S_OK;
+ if(*pbBuffer == 0xFF) // special case that represents NULL string
+ return S_OK;
+
+ IfFailRet(CPackedLen::SafeGetData(pbBuffer, pbBufferEnd, &cbName, &pbName));
+ PREFIX_ASSUME(pbName != NULL);
+
+ // SafeGetData ensures name is within buffer
+ cbNameOffset = pbName - pbBuffer;
+ _ASSERTE(FitsIn<DWORD>(cbNameOffset));
+ dwLength = static_cast<DWORD>(cbNameOffset + cbName);
+ _ASSERTE(cbBuffer >= dwLength);
+
+ // Open the file
+ MAKE_WIDEPTR_FROMUTF8N(wszFileName, (LPCSTR)pbName, cbName);
+ HandleHolder hFile(WszCreateFile (wszFileName,
+ GENERIC_READ,
+ FILE_SHARE_READ,
+ NULL,
+ OPEN_EXISTING,
+ FILE_ATTRIBUTE_NORMAL | FILE_FLAG_SEQUENTIAL_SCAN,
+ NULL));
+ if (hFile == INVALID_HANDLE_VALUE)
+ return HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND);
+ DWORD dwFileLen = SafeGetFileSize(hFile, 0);
+ if (dwFileLen == 0xFFFFFFFF)
+ return HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND);
+
+ // Read the file
+ BYTE* pFileBuffer = new (nothrow) BYTE[(dwFileLen + 4) * sizeof(BYTE)];
+ if(!pFileBuffer)
+ return E_OUTOFMEMORY;
+ DWORD dwBytesRead;
+ if ((SetFilePointer(hFile, 0, NULL, FILE_BEGIN) == 0xFFFFFFFF) ||
+ (!ReadFile(hFile, pFileBuffer, dwFileLen, &dwBytesRead, NULL)))
+ {
+ delete [] pFileBuffer;
+ return E_FAIL;
+ }
+ if(dwBytesRead < dwFileLen)
+ {
+ delete [] pFileBuffer;
+ return E_FAIL;
+ }
+
+ // Make the new attribute blob
+ BYTE* pNewAttrBuffer = new (nothrow) BYTE[(dwFileLen + 10) * 2 * sizeof(BYTE)];
+ if(!pNewAttrBuffer)
+ return E_OUTOFMEMORY;
+ BYTE* pCurBuf = pNewAttrBuffer;
+ *pCurBuf = (BYTE)SERIALIZATION_TYPE_PROPERTY;
+ pCurBuf++;
+ *pCurBuf = (BYTE)SERIALIZATION_TYPE_STRING;
+ pCurBuf++;
+ pCurBuf = (BYTE*)CPackedLen::PutLength(pCurBuf, 3);
+ memcpy(pCurBuf, "Hex", 3);
+ pCurBuf += 3;
+ pCurBuf = (BYTE*)CPackedLen::PutLength(pCurBuf, dwFileLen * 2);
+ DWORD n;
+ BYTE b;
+ for(n = 0; n < dwFileLen; n++)
+ {
+ b = (pFileBuffer[n] >> 4) & 0xf;
+ *pCurBuf = (b < 10 ? '0' + b : 'a' + b - 10);
+ pCurBuf++;
+ b = pFileBuffer[n] & 0xf;
+ *pCurBuf = (b < 10 ? '0' + b : 'a' + b - 10);
+ pCurBuf++;
+ }
+ delete [] pFileBuffer;
+
+ // We shouldn't have a serialized permission set that can be this large, but to be safe we'll ensure
+ // that we fit in the output DWORD size.
+ SIZE_T cbNewAttrSize = pCurBuf - pNewAttrBuffer;
+
+ // Set the new values
+ delete(pPerm->pbValues);
+ pPerm->pbValues = pNewAttrBuffer;
+ pPerm->cbValues = cbNewAttrSize;
+ return S_OK;
+}
+
+// if tkAssemblyRef is NULL, this assumes the type is in this assembly
+// uszClassName should be a UTF8 string including both namespace and class
+HRESULT GetFullyQualifiedTypeName(SString* pString, mdAssemblyRef tkAssemblyRef, __in_z CHAR* uszClassName, IMetaDataAssemblyImport *pImport, mdToken tkCtor)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ // Add class name
+ MAKE_WIDEPTR_FROMUTF8(wszClassName, uszClassName);
+ (*pString) += (LPCWSTR) wszClassName;
+ if(IsNilToken(tkAssemblyRef))
+ tkAssemblyRef = TokenFromRid(1, mdtAssembly);
+
+ // Add a comma separator
+ (*pString) += W(", ");
+
+ DWORD dwDisplayFlags = ASM_DISPLAYF_VERSION | ASM_DISPLAYF_PUBLIC_KEY_TOKEN | ASM_DISPLAYF_CULTURE;
+#ifdef FEATURE_FUSION // why is Security accessing Fusion interfaces bypassing Loader?
+ // Retrieve size of assembly name
+ ASSEMBLYMETADATA sContext;
+ ZeroMemory(&sContext, sizeof(ASSEMBLYMETADATA));
+ HRESULT hr = S_OK;
+ LPWSTR wszAssemblyName = NULL;
+ BYTE *pbPublicKeyOrToken = NULL;
+ DWORD cbPublicKeyOrToken = 0;
+ DWORD dwFlags = 0;
+ if(TypeFromToken(tkAssemblyRef) == mdtAssembly)
+ {
+ DWORD cchName;
+ hr = pImport->GetAssemblyProps(tkAssemblyRef, // [IN] The Assembly for which to get the properties.
+ NULL, // [OUT] Pointer to the public key or token.
+ NULL, // [OUT] Count of bytes in the public key or token.
+ NULL, // [OUT] Hash Algorithm
+ NULL, // [OUT] Buffer to fill with name.
+ NULL, // [IN] Size of buffer in wide chars.
+ &cchName, // [OUT] Actual # of wide chars in name.
+ &sContext, // [OUT] Assembly MetaData.
+ NULL); // [OUT] Flags.
+ if(FAILED(hr))
+ return hr;
+
+ // Get the assembly name other naming properties
+ wszAssemblyName = (LPWSTR)_alloca(cchName * sizeof(WCHAR));
+ hr = pImport->GetAssemblyProps(tkAssemblyRef,
+ (const void **)&pbPublicKeyOrToken,
+ &cbPublicKeyOrToken,
+ NULL,
+ wszAssemblyName,
+ cchName,
+ &cchName,
+ &sContext,
+ &dwFlags);
+ if(FAILED(hr))
+ return hr;
+ }
+ else if(TypeFromToken(tkAssemblyRef) == mdtAssemblyRef)
+ {
+ DWORD cchName;
+ hr = pImport->GetAssemblyRefProps(tkAssemblyRef, // [IN] The AssemblyRef for which to get the properties.
+ NULL, // [OUT] Pointer to the public key or token.
+ NULL, // [OUT] Count of bytes in the public key or token.
+ NULL, // [OUT] Buffer to fill with name.
+ NULL, // [IN] Size of buffer in wide chars.
+ &cchName, // [OUT] Actual # of wide chars in name.
+ &sContext, // [OUT] Assembly MetaData.
+ NULL, // [OUT] Hash blob.
+ NULL, // [OUT] Count of bytes in the hash blob.
+ NULL); // [OUT] Flags.
+ if(FAILED(hr))
+ return hr;
+
+ // Get the assembly name other naming properties
+ wszAssemblyName = (LPWSTR)_alloca(cchName * sizeof(WCHAR));
+ hr = pImport->GetAssemblyRefProps(tkAssemblyRef,
+ (const void **)&pbPublicKeyOrToken,
+ &cbPublicKeyOrToken,
+ wszAssemblyName,
+ cchName,
+ &cchName,
+ &sContext,
+ NULL,
+ NULL,
+ &dwFlags);
+ if(FAILED(hr))
+ return hr;
+ }
+ else
+ {
+ _ASSERTE(false && "unexpected token");
+ }
+
+ // Convert to an AssemblyNameObject
+ ReleaseHolder<IAssemblyName> pAssemblyNameObj;
+ hr = CreateAssemblyNameObject(&pAssemblyNameObj, wszAssemblyName, CANOF_PARSE_DISPLAY_NAME, NULL);
+ if(FAILED(hr))
+ return hr;
+ _ASSERTE(pAssemblyNameObj && "assembly name object shouldn't be NULL");
+ pAssemblyNameObj->SetProperty(ASM_NAME_MAJOR_VERSION, &sContext.usMajorVersion, sizeof(WORD));
+ pAssemblyNameObj->SetProperty(ASM_NAME_MINOR_VERSION, &sContext.usMinorVersion, sizeof(WORD));
+ pAssemblyNameObj->SetProperty(ASM_NAME_BUILD_NUMBER, &sContext.usBuildNumber, sizeof(WORD));
+ pAssemblyNameObj->SetProperty(ASM_NAME_REVISION_NUMBER, &sContext.usRevisionNumber, sizeof(WORD));
+ pAssemblyNameObj->SetProperty(ASM_NAME_CULTURE, W(""), sizeof(WCHAR));
+ if(pbPublicKeyOrToken && cbPublicKeyOrToken > 0)
+ {
+ if(dwFlags & afPublicKey)
+ pAssemblyNameObj->SetProperty(ASM_NAME_PUBLIC_KEY, pbPublicKeyOrToken, cbPublicKeyOrToken);
+ else
+ pAssemblyNameObj->SetProperty(ASM_NAME_PUBLIC_KEY_TOKEN, pbPublicKeyOrToken, cbPublicKeyOrToken);
+ }
+
+ // Convert assembly name to an ole string
+ StackSString name;
+ FusionBind::GetAssemblyNameDisplayName(pAssemblyNameObj, name, dwDisplayFlags);
+#else // FEATURE_FUSION
+ HRESULT hr;
+ AssemblySpec spec;
+ StackSString name;
+
+ IfFailRet(spec.Init((mdToken)tkAssemblyRef,pImport));
+ spec.GetFileOrDisplayName(dwDisplayFlags,name);
+#endif // FEATURE_FUSION
+ _ASSERTE(!name.IsEmpty() && "the assembly name should not be empty here");
+
+ (*pString) += name;
+ return S_OK;
+}
+
+HRESULT SecurityAttributes::SerializeAttribute(CORSEC_ATTRIBUTE* pAttr, BYTE* pBuffer, SIZE_T* pCount, IMetaDataAssemblyImport *pImport)
+{
+ // pBuffer can be NULL if the caller is only trying to determine the size of the serialized blob. In that case, let's make a little temp buffer to facilitate CPackedLen::PutLength
+ SIZE_T cbPos = *pCount;
+ BYTE* pTempBuf = pBuffer;
+ SIZE_T const* pTempPos = &cbPos;
+ BYTE tempBuf[8];
+ const SIZE_T zero = 0;
+ if(!pTempBuf)
+ {
+ pTempBuf = tempBuf;
+ pTempPos = &zero;
+ }
+ BYTE* pOldPos;
+
+ // Get the fully qualified type name
+ SString sType;
+ HRESULT hr = GetFullyQualifiedTypeName(&sType, pAttr->tkAssemblyRef, pAttr->pName, pImport, pAttr->tkCtor);
+ if(FAILED(hr))
+ return hr;
+
+ // Convert assembly name to UTF8.
+ const WCHAR* wszTypeName = sType.GetUnicode();
+ MAKE_UTF8PTR_FROMWIDE(uszTypeName, wszTypeName);
+ DWORD dwUTF8TypeNameLen = (DWORD)strlen(uszTypeName);
+
+ // Serialize the type name length
+ pOldPos = &pTempBuf[*pTempPos];
+ cbPos += (BYTE*)CPackedLen::PutLength(&pTempBuf[*pTempPos], dwUTF8TypeNameLen) - pOldPos;
+
+ // Serialize the type name
+ if(pBuffer)
+ memcpy(&pBuffer[cbPos], uszTypeName, dwUTF8TypeNameLen);
+ cbPos += dwUTF8TypeNameLen;
+
+ // Serialize the size of the properties blob
+ BYTE temp[32];
+ SIZE_T cbSizeOfCompressedPropertiesCount = (BYTE*)CPackedLen::PutLength(temp, pAttr->wValues) - temp;
+ pOldPos = &pTempBuf[*pTempPos];
+
+ _ASSERTE(FitsIn<ULONG>(pAttr->cbValues + cbSizeOfCompressedPropertiesCount));
+ ULONG propertiesLength = static_cast<ULONG>(pAttr->cbValues + cbSizeOfCompressedPropertiesCount);
+ cbPos += (BYTE*)CPackedLen::PutLength(&pTempBuf[*pTempPos], propertiesLength) - pOldPos;
+
+ // Serialize the count of properties
+ pOldPos = &pTempBuf[*pTempPos];
+ cbPos += (BYTE*)CPackedLen::PutLength(&pTempBuf[*pTempPos], pAttr->wValues) - pOldPos;
+
+ // Serialize the properties blob
+ if(pBuffer)
+ memcpy(&pBuffer[cbPos], pAttr->pbValues, pAttr->cbValues);
+ cbPos += pAttr->cbValues;
+
+ *pCount = cbPos;
+ return hr;
+}
+
+HRESULT SecurityAttributes::DeserializeAttribute(CORSEC_ATTRIBUTE *pAttr, BYTE* pBuffer, ULONG cbBuffer, SIZE_T* pPos)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ HRESULT hr;
+
+ // Deserialize the size of the type name
+ BYTE* pClassName;
+ ULONG dwClassNameSize;
+ BYTE* pBufferEnd = pBuffer + cbBuffer;
+ IfFailRet(CPackedLen::SafeGetData((BYTE const *)&pBuffer[*pPos],
+ (BYTE const *)pBufferEnd,
+ &dwClassNameSize,
+ (BYTE const **)&pClassName));
+ (*pPos) += pClassName - &pBuffer[*pPos];
+
+ // Deserialize the type name
+ (*pPos) += dwClassNameSize;
+ pAttr->pName = new (nothrow) CHAR[dwClassNameSize + 1];
+ if(!pAttr->pName)
+ return E_OUTOFMEMORY;
+ memcpy(pAttr->pName, pClassName, dwClassNameSize);
+ pAttr->pName[dwClassNameSize] = '\0';
+
+ // Deserialize the CA blob size
+ BYTE* pCABlob;
+ ULONG cbCABlob;
+ IfFailRet(CPackedLen::SafeGetData((BYTE const *)&pBuffer[*pPos],
+ (BYTE const *)pBufferEnd,
+ &cbCABlob,
+ (BYTE const **)&pCABlob));
+
+ (*pPos) += pCABlob - &pBuffer[*pPos];
+
+ // Deserialize the CA blob value count
+ BYTE* pCABlobValues;
+ ULONG cCABlobValues;
+ IfFailRet(CPackedLen::SafeGetLength((BYTE const *)&pBuffer[*pPos],
+ (BYTE const *)pBufferEnd,
+ &cCABlobValues,
+ (BYTE const **)&pCABlobValues));
+
+ (*pPos) += pCABlobValues - &pBuffer[*pPos];
+ if (!FitsIn<WORD>(cCABlobValues))
+ return COR_E_OVERFLOW;
+ pAttr->wValues = static_cast<WORD>(cCABlobValues);
+
+ // We know that pCABlobValues - pCABlob will be a positive result.
+ if (cbCABlob < (ULONG)(pCABlobValues - pCABlob))
+ return COR_E_OVERFLOW;
+
+ pAttr->cbValues = cbCABlob - (pCABlobValues - pCABlob);
+
+ // Deserialize the CA blob
+ pAttr->pbValues = new (nothrow) BYTE[pAttr->cbValues];
+ if(!pAttr->pbValues)
+ return E_OUTOFMEMORY;
+ memcpy(pAttr->pbValues, pCABlobValues, pAttr->cbValues);
+
+ (*pPos) += pAttr->cbValues;
+
+ return S_OK;
+}
+
+HRESULT AttributeSetToBlob(CORSEC_ATTRSET* pAttrSet, BYTE* pBuffer, SIZE_T* pCount, IMetaDataAssemblyImport *pImport, DWORD dwAction)
+{
+ STANDARD_VM_CONTRACT;
+
+ // pBuffer can be NULL if the caller is only trying to determine the size of the serialized blob. In that case, let's make a little temp buffer to facilitate CPackedLen::PutLength
+ SIZE_T cbPos = 0;
+ BYTE* pTempBuf = pBuffer;
+ SIZE_T const *pTempPos = &cbPos;
+ BYTE tempBuf[8];
+ const SIZE_T zero = 0;
+ if(!pTempBuf)
+ {
+ pTempBuf = tempBuf;
+ pTempPos = &zero;
+ }
+ BYTE* pOldPos;
+ HRESULT hr = S_OK;
+
+ // Serialize a LAZY_DECL_SEC_FLAG to identify the blob format (as opposed to '<' which would indicate the older XML format)
+ if(pBuffer)
+ pBuffer[cbPos] = LAZY_DECL_SEC_FLAG;
+ cbPos++;
+
+ // Serialize the attribute count
+ pOldPos = &pTempBuf[*pTempPos];
+ cbPos += (BYTE*)CPackedLen::PutLength(&pTempBuf[*pTempPos], pAttrSet->dwAttrCount) - pOldPos;
+
+ // Serialize the attributes
+ DWORD i;
+ for(i = 0; i < pAttrSet->dwAttrCount; i++)
+ {
+ // Get the attribute
+ CORSEC_ATTRIBUTE *pAttr = &pAttrSet->pAttrs[i];
+
+ // Perform any necessary fix-ups on it
+ if(pAttr->wValues == 1 && strcmp(pAttr->pName, "System.Security.Permissions.PermissionSetAttribute") == 0)
+ IfFailGo(SecurityAttributes::FixUpPermissionSetAttribute(pAttr));
+ else if((dwAction == dclLinktimeCheck ||
+ dwAction == dclInheritanceCheck) &&
+ strcmp(pAttr->pName, "System.Security.Permissions.PrincipalPermissionAttribute") == 0)
+ {
+ VMPostError(CORSECATTR_E_BAD_NONCAS);
+ return CORSECATTR_E_BAD_NONCAS;
+ }
+
+ // Serialize it
+ SIZE_T dwAttrSize = 0;
+ IfFailGo(SecurityAttributes::SerializeAttribute(pAttr, pBuffer ? pBuffer + cbPos : NULL, &dwAttrSize, pImport));
+ cbPos += dwAttrSize;
+ }
+ if(pCount != NULL)
+ *pCount = cbPos;
+
+ErrExit:
+ if (FAILED(hr))
+ VMPostError(CORSECATTR_E_FAILED_TO_CREATE_PERM); // Allows for the correct message to be printed by the compiler
+
+ return hr;
+}
+
+HRESULT BlobToAttributeSet(BYTE* pBuffer, ULONG cbBuffer, CORSEC_ATTRSET* pAttrSet, DWORD dwAction)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ HRESULT hr = S_OK;
+ SIZE_T cbPos = 0;
+ BYTE* pBufferEnd = pBuffer + cbBuffer;
+ memset(pAttrSet, '\0', sizeof(CORSEC_ATTRSET));
+ if (dwAction >= dclDemand && dwAction <= dclRequestRefuse)
+ pAttrSet->dwAction = dwAction; // Already lies in the publicly visible range ( values that managed enum SecurityAction can take)
+ else
+ {
+ // Map the action to a publicly visible value
+ if (dwAction == dclNonCasDemand)
+ pAttrSet->dwAction = dclDemand;
+ else if (dwAction == dclNonCasInheritance)
+ pAttrSet->dwAction = dclInheritanceCheck;
+ else if (dwAction == dclNonCasLinkDemand)
+ pAttrSet->dwAction = dclLinktimeCheck;
+ else
+ {
+ // We have an unexpected security action here. It would be nice to fail, but for compatibility we need to simply
+ // reset the action to Nil.
+ pAttrSet->dwAction = dclActionNil;
+ }
+ }
+
+ // Deserialize the LAZY_DECL_SEC_FLAG to identify serialization of CORSEC_ATTRSET (as opposed to '<' which would indicate a serialized permission as Xml)
+ BYTE firstChar = pBuffer[cbPos];
+ cbPos++;
+ if(firstChar != LAZY_DECL_SEC_FLAG)
+ return S_FALSE;
+
+ // Deserialize the attribute count
+ BYTE* pBufferNext;
+ IfFailRet(CPackedLen::SafeGetLength((BYTE const *)&pBuffer[cbPos],
+ (BYTE const *)pBufferEnd,
+ &pAttrSet->dwAttrCount,
+ (BYTE const **)&pBufferNext));
+
+ cbPos += pBufferNext - &pBuffer[cbPos];
+ if(pAttrSet->dwAttrCount > 0)
+ {
+ pAttrSet->pAttrs = new (nothrow) CORSEC_ATTRIBUTE[pAttrSet->dwAttrCount];
+ if(!pAttrSet->pAttrs)
+ return E_OUTOFMEMORY;
+ pAttrSet->dwAllocated = pAttrSet->dwAttrCount;
+ }
+
+ // Deserialize the attributes
+ DWORD i;
+ for(i = 0; i < pAttrSet->dwAttrCount; i++)
+ {
+ CORSEC_ATTRIBUTE *pAttr = &pAttrSet->pAttrs[i];
+ hr = SecurityAttributes::DeserializeAttribute(pAttr, pBuffer, cbBuffer, &cbPos);
+ if(FAILED(hr))
+ return hr;
+ }
+
+ return S_OK;
+}
+
+// This function takes an array of COR_SECATTR (which wrap custom security attribute blobs) and
+// converts it to an array of CORSEC_ATTRSET (which contains partially-parsed custom security attribute
+// blobs grouped by SecurityAction). Note that you must delete all the pPermissions that this allocates
+// for each COR_SECATTR
+HRESULT STDMETHODCALLTYPE GroupSecurityAttributesByAction(
+ CORSEC_ATTRSET /*OUT*/rPermSets[],
+ COR_SECATTR rSecAttrs[],
+ ULONG cSecAttrs,
+ mdToken tkObj,
+ ULONG *pulErrorAttr,
+ CMiniMdRW* pMiniMd,
+ IMDInternalImport* pInternalImport)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ DWORD i, j, k;
+ DWORD dwAction;
+ BYTE* pData = NULL;
+ CORSEC_ATTRIBUTE* pPerm;
+ mdTypeDef tkParent;
+ TypeDefRec* pTypeDefRec;
+ MemberRefRec* pMemberRefRec;
+ TypeRefRec* pTypeRefRec;
+ SIZE_T cbAllocationSize;
+
+ // If you are calling this at compile-time, you should pass in pMiniMd, and pInternalImport should be NULL
+ // If you are calling this at run-time, you should pass in pInternalImport, and pMiniMd should be NULL
+ _ASSERTE((pMiniMd && !pInternalImport) || (!pMiniMd && pInternalImport));
+
+ // Calculate number and sizes of permission sets to produce. This depends on
+ // the security action code encoded as the single parameter to the
+ // constructor for each security custom attribute.
+ for (i = 0; i < cSecAttrs; i++)
+ {
+ if (pulErrorAttr)
+ *pulErrorAttr = i;
+
+ // Perform basic validation of the header of each security custom
+ // attribute constructor call.
+ pData = (BYTE*)rSecAttrs[i].pCustomAttribute;
+
+ // Check minimum length.
+ if (rSecAttrs[i].cbCustomAttribute < (sizeof(WORD) + sizeof(DWORD) + sizeof(WORD)))
+ {
+ VMPostError(CORSECATTR_E_TRUNCATED);
+ IfFailGo(CORSECATTR_E_TRUNCATED);
+ }
+
+ // Check version.
+ if (GET_UNALIGNED_VAL16(pData) != 1)
+ {
+ VMPostError(CORSECATTR_E_BAD_VERSION);
+ IfFailGo(CORSECATTR_E_BAD_VERSION);
+ }
+ pData += sizeof(WORD);
+
+ // Extract and check security action.
+ if(pData[2] == SERIALIZATION_TYPE_PROPERTY) // check to see if it's a HostProtection attribute w/o an action
+ dwAction = dclLinktimeCheck;
+ else
+ dwAction = GET_UNALIGNED_VAL32(pData);
+ if (dwAction == dclActionNil || dwAction > dclMaximumValue)
+ {
+ VMPostError(CORSECATTR_E_BAD_ACTION);
+ IfFailGo(CORSECATTR_E_BAD_ACTION);
+ }
+
+ // All other declarative security only valid on types and methods.
+ if (TypeFromToken(tkObj) == mdtAssembly)
+ {
+ // Assemblies can only take permission requests.
+ if (dwAction != dclRequestMinimum &&
+ dwAction != dclRequestOptional &&
+ dwAction != dclRequestRefuse)
+ {
+ VMPostError(CORSECATTR_E_BAD_ACTION_ASM);
+ IfFailGo(CORSECATTR_E_BAD_ACTION_ASM);
+ }
+ }
+ else if (TypeFromToken(tkObj) == mdtTypeDef || TypeFromToken(tkObj) == mdtMethodDef)
+ {
+ // Types and methods can only take declarative security.
+ if (dwAction != dclRequest &&
+ dwAction != dclDemand &&
+ dwAction != dclAssert &&
+ dwAction != dclDeny &&
+ dwAction != dclPermitOnly &&
+ dwAction != dclLinktimeCheck &&
+ dwAction != dclInheritanceCheck)
+ {
+ VMPostError(CORSECATTR_E_BAD_ACTION_OTHER);
+ IfFailGo(CORSECATTR_E_BAD_ACTION_OTHER);
+ }
+ }
+ else
+ {
+ // Permission sets can't be attached to anything else.
+ VMPostError(CORSECATTR_E_BAD_PARENT);
+ IfFailGo(CORSECATTR_E_BAD_PARENT);
+ }
+
+ rPermSets[dwAction].dwAttrCount++;
+ }
+
+ // Initialize the descriptor for each type of permission set we are going to
+ // produce.
+ for (i = 0; i <= dclMaximumValue; i++)
+ {
+ if (rPermSets[i].dwAttrCount == 0)
+ continue;
+
+ rPermSets[i].tkObj = tkObj;
+ rPermSets[i].dwAction = i;
+ rPermSets[i].pImport = NULL;
+ rPermSets[i].pAppDomain = NULL;
+ rPermSets[i].pAttrs = new (nothrow) CORSEC_ATTRIBUTE[rPermSets[i].dwAttrCount];
+ IfNullGo(rPermSets[i].pAttrs);
+
+ // Initialize a descriptor for each permission within the permission set.
+ for (j = 0, k = 0; j < rPermSets[i].dwAttrCount; j++, k++)
+ {
+ // Locate the next security attribute that contributes to this
+ // permission set.
+ for (; k < cSecAttrs; k++)
+ {
+ pData = (BYTE*)rSecAttrs[k].pCustomAttribute;
+ if(pData[4] == SERIALIZATION_TYPE_PROPERTY) // check to see if it's a HostProtection attribute w/o an action
+ dwAction = dclLinktimeCheck;
+ else
+ dwAction = GET_UNALIGNED_VAL32(pData + sizeof(WORD));
+ if (dwAction == i)
+ break;
+ }
+ _ASSERTE(k < cSecAttrs);
+
+ if (pulErrorAttr)
+ *pulErrorAttr = k;
+
+ // Initialize the permission.
+ pPerm = &rPermSets[i].pAttrs[j];
+ pPerm->tkCtor = rSecAttrs[k].tkCtor;
+ pPerm->dwIndex = k;
+ if(pData[4] == SERIALIZATION_TYPE_PROPERTY) // check to see if it's a HostProtection attribute w/o an action
+ {
+ _ASSERTE(!pPerm->pbValues);
+ //pPerm->pbValues = pData + (sizeof (WORD) + sizeof(WORD));
+ if (!ClrSafeInt<SIZE_T>::subtraction(rSecAttrs[k].cbCustomAttribute, (sizeof (WORD) + sizeof(WORD)), pPerm->cbValues))
+ return COR_E_OVERFLOW;
+ pPerm->wValues = GET_UNALIGNED_VAL16(pData + sizeof (WORD));
+ // Prefast overflow sanity check the addition.
+ if (!ClrSafeInt<SIZE_T>::addition(pPerm->cbValues, sizeof(WORD), cbAllocationSize))
+ return COR_E_OVERFLOW;
+ pPerm->pbValues = new (nothrow) BYTE[cbAllocationSize];
+ if(!pPerm->pbValues)
+ return E_OUTOFMEMORY;
+ memcpy(pPerm->pbValues, pData + (sizeof (WORD) + sizeof(WORD)), pPerm->cbValues);
+ }
+ else
+ {
+ _ASSERTE(!pPerm->pbValues);
+ //pPerm->pbValues = pData + (sizeof (WORD) + sizeof(DWORD) + sizeof(WORD));
+ if (!ClrSafeInt<SIZE_T>::subtraction(rSecAttrs[k].cbCustomAttribute, (sizeof (WORD) + sizeof (DWORD) + sizeof(WORD)), pPerm->cbValues))
+ return COR_E_OVERFLOW;
+ pPerm->wValues = GET_UNALIGNED_VAL16(pData + sizeof (WORD) + sizeof(DWORD));
+ // Prefast overflow sanity check the addition.
+ if (!ClrSafeInt<SIZE_T>::addition(pPerm->cbValues, sizeof(WORD), cbAllocationSize))
+ return COR_E_OVERFLOW;
+ pPerm->pbValues = new (nothrow) BYTE[cbAllocationSize];
+ if(!pPerm->pbValues)
+ return E_OUTOFMEMORY;
+ memcpy(pPerm->pbValues, pData + (sizeof (WORD) + sizeof(DWORD) + sizeof(WORD)), pPerm->cbValues);
+ }
+
+ CQuickBytes qbFullName;
+ CHAR* szFullName = NULL;
+
+ LPCSTR szTypeName;
+ LPCSTR szTypeNamespace;
+
+ // Follow the security custom attribute constructor back up to its
+ // defining assembly (so we know how to load its definition). If the
+ // token resolution scope is not defined, it's assumed to be
+ // mscorlib.
+ if (TypeFromToken(rSecAttrs[k].tkCtor) == mdtMethodDef)
+ {
+ if (pMiniMd != NULL)
+ {
+ // scratch buffer for full type name
+ szFullName = (CHAR*) qbFullName.AllocNoThrow((MAX_CLASSNAME_LENGTH+1) * sizeof(CHAR));
+ if(szFullName == NULL)
+ return E_OUTOFMEMORY;
+
+ // grab the type that contains the security attribute constructor
+ IfFailGo(pMiniMd->FindParentOfMethodHelper(rSecAttrs[k].tkCtor, &tkParent));
+
+ // scratch buffer for nested type names
+ CQuickBytes qbBuffer;
+ CHAR* szBuffer;
+
+ CHAR* szName = NULL;
+ BOOL fFirstLoop = TRUE;
+ pTypeDefRec = NULL;
+ do
+ {
+ // get outer type name
+ IfFailGo(pMiniMd->GetTypeDefRecord(RidFromToken(tkParent), &pTypeDefRec));
+ IfFailGo(pMiniMd->getNameOfTypeDef(pTypeDefRec, (LPCSTR *)&szName));
+
+ // If this is the first time through the loop, just assign values, otherwise build nested type name.
+ if (!fFirstLoop)
+ {
+ szBuffer = (CHAR*) qbBuffer.AllocNoThrow((MAX_CLASSNAME_LENGTH+1) * sizeof(CHAR));
+ if(szBuffer == NULL)
+ return E_OUTOFMEMORY;
+
+ ns::MakeNestedTypeName(szBuffer, (MAX_CLASSNAME_LENGTH+1) * sizeof(CHAR), szName, szFullName);
+ szName = szBuffer;
+ }
+ else
+ {
+ fFirstLoop = FALSE;
+ }
+
+ // copy into buffer
+ size_t localLen = strlen(szName) + 1;
+ strcpy_s(szFullName, localLen, szName);
+
+ // move to next parent
+ DWORD dwFlags = pMiniMd->getFlagsOfTypeDef(pTypeDefRec);
+ if (IsTdNested(dwFlags))
+ {
+ RID ridNestedRec;
+ IfFailGo(pMiniMd->FindNestedClassHelper(tkParent, &ridNestedRec));
+ _ASSERTE(!InvalidRid(ridNestedRec));
+ NestedClassRec *pNestedRec;
+ IfFailGo(pMiniMd->GetNestedClassRecord(ridNestedRec, &pNestedRec));
+ tkParent = pMiniMd->getEnclosingClassOfNestedClass(pNestedRec);
+ }
+ else
+ {
+ tkParent = NULL;
+ }
+ } while (tkParent != NULL);
+
+ IfFailGo(pMiniMd->getNamespaceOfTypeDef(pTypeDefRec, &szTypeNamespace));
+ szTypeName = szFullName;
+ }
+ else
+ {
+ IfFailGo(pInternalImport->GetParentToken(rSecAttrs[k].tkCtor, &tkParent));
+ IfFailGo(pInternalImport->GetNameOfTypeDef(tkParent, &szTypeName, &szTypeNamespace));
+ }
+ pPerm->tkTypeRef = mdTokenNil;
+ pPerm->tkAssemblyRef = mdTokenNil;
+ }
+ else
+ {
+ _ASSERTE(TypeFromToken(rSecAttrs[k].tkCtor) == mdtMemberRef);
+
+ // Get the type ref
+ if (pMiniMd != NULL)
+ {
+ IfFailGo(pMiniMd->GetMemberRefRecord(RidFromToken(rSecAttrs[k].tkCtor), &pMemberRefRec));
+ pPerm->tkTypeRef = pMiniMd->getClassOfMemberRef(pMemberRefRec);
+ }
+ else
+ {
+ IfFailGo(pInternalImport->GetParentOfMemberRef(rSecAttrs[k].tkCtor, &pPerm->tkTypeRef));
+ }
+
+ _ASSERTE(TypeFromToken(pPerm->tkTypeRef) == mdtTypeRef);
+
+ // Get an assembly ref
+ pPerm->tkAssemblyRef = pPerm->tkTypeRef;
+ pTypeRefRec = NULL;
+ do
+ {
+ if (pMiniMd != NULL)
+ {
+ IfFailGo(pMiniMd->GetTypeRefRecord(RidFromToken(pPerm->tkAssemblyRef), &pTypeRefRec));
+ pPerm->tkAssemblyRef = pMiniMd->getResolutionScopeOfTypeRef(pTypeRefRec);
+ }
+ else
+ {
+ IfFailGo(pInternalImport->GetResolutionScopeOfTypeRef(pPerm->tkAssemblyRef, &pPerm->tkAssemblyRef));
+ }
+ // loop because nested types have a resolution scope of the parent type rather than an assembly
+ } while(TypeFromToken(pPerm->tkAssemblyRef) == mdtTypeRef);
+
+ // Figure out the fully qualified type name
+ if (pMiniMd != NULL)
+ {
+ IfFailGo(pMiniMd->getNamespaceOfTypeRef(pTypeRefRec, &szTypeNamespace));
+ IfFailGo(pMiniMd->getNameOfTypeRef(pTypeRefRec, &szTypeName));
+ }
+ else
+ {
+ IfFailGo(pInternalImport->GetNameOfTypeRef(pPerm->tkTypeRef, &szTypeNamespace, &szTypeName));
+ }
+ }
+
+ CQuickBytes qb;
+ CHAR* szTmp = (CHAR*) qb.AllocNoThrow((MAX_CLASSNAME_LENGTH+1) * sizeof(CHAR));
+ if(szTmp == NULL)
+ return E_OUTOFMEMORY;
+
+ ns::MakePath(szTmp, MAX_CLASSNAME_LENGTH, szTypeNamespace, szTypeName);
+
+ size_t len = strlen(szTmp) + 1;
+ pPerm->pName = new (nothrow) CHAR[len];
+ if(!pPerm->pName)
+ return E_OUTOFMEMORY;
+ strcpy_s(pPerm->pName, len, szTmp);
+ }
+ }
+
+ErrExit:
+ return hr;
+}
diff --git a/src/vm/securityattributes.h b/src/vm/securityattributes.h
new file mode 100644
index 0000000000..afb6a70d43
--- /dev/null
+++ b/src/vm/securityattributes.h
@@ -0,0 +1,147 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#ifndef __SECURITYATTRIBUTES_H__
+#define __SECURITYATTRIBUTES_H__
+
+#include "vars.hpp"
+#include "eehash.h"
+#include "corperm.h"
+
+class SecurityDescriptor;
+class AssemblySecurityDescriptor;
+class SecurityStackWalk;
+class COMCustomAttribute;
+class PsetCacheEntry;
+struct TokenDeclActionInfo;
+
+extern HRESULT BlobToAttributeSet(BYTE* pBuffer, ULONG cbBuffer, CORSEC_ATTRSET* pAttrSet, DWORD dwAction);
+
+namespace SecurityAttributes
+{
+ // Retrieves a previously loaded PermissionSet
+ // object index (this will work even if the permission set was loaded in
+ // a different appdomain).
+ OBJECTREF GetPermissionSet(DWORD dwIndex, DWORD dwAction);
+
+ // Locate the index of a permission set in the cache (returns false if the
+ // permission set has not yet been seen and decoded).
+ BOOL LookupPermissionSet(IN PBYTE pbPset,
+ IN DWORD cbPset,
+ OUT DWORD *pdwSetIndex);
+
+ // Creates a new permission set
+ OBJECTREF CreatePermissionSet(BOOL fTrusted);
+
+#ifdef FEATURE_CAS_POLICY
+ // Takes two PermissionSets (referenced by index) and merges them (unions or intersects
+ // depending on fIntersect) and returns the index of the merged PermissionSet
+ PsetCacheEntry* MergePermissionSets(IN PsetCacheEntry *pPCE1, IN PsetCacheEntry *pPCE2, IN bool fIntersect, IN DWORD dwAction);
+#endif // FEATURE_CAS_POLICY
+
+ // Uses new to create the byte array that is returned.
+ void CopyByteArrayToEncoding(IN U1ARRAYREF* pArray,
+ OUT PBYTE* pbData,
+ OUT DWORD* cbData);
+
+#ifdef FEATURE_CAS_POLICY
+ void EncodePermissionSet(IN OBJECTREF* pRef,
+ OUT PBYTE* ppbData,
+ OUT DWORD* pcbData);
+#endif // FEATURE_CAS_POLICY
+
+ // Generic routine, use with encoding calls that
+ // use the EncodePermission client data
+ // Uses new to create the byte array that is returned.
+ void CopyEncodingToByteArray(IN PBYTE pbData,
+ IN DWORD cbData,
+ IN OBJECTREF* pArray);
+
+ BOOL RestrictiveRequestsInAssembly(IMDInternalImport* pImport);
+
+ // Returns the declared PermissionSet or PermissionSetCollection for the
+ // specified action type.
+ HRESULT GetDeclaredPermissions(IN IMDInternalImport *pInternalImport,
+ IN mdToken token, // token for method, class, or assembly
+ IN CorDeclSecurity action, // SecurityAction
+ OUT OBJECTREF *pDeclaredPermissions, // The returned PermissionSet for that SecurityAction
+ OUT PsetCacheEntry **pPSCacheEntry = NULL); // The cache entry for the PermissionSet blob.
+
+
+ HRESULT TranslateSecurityAttributesHelper(
+ CORSEC_ATTRSET *pAttrSet,
+ BYTE **ppbOutput,
+ DWORD *pcbOutput,
+ BYTE **ppbNonCasOutput,
+ DWORD *pcbNonCasOutput,
+ DWORD *pdwErrorIndex);
+
+ HRESULT FixUpPermissionSetAttribute(CORSEC_ATTRIBUTE* pPerm);
+ HRESULT SerializeAttribute(CORSEC_ATTRIBUTE* pAttr, BYTE* pBuffer, SIZE_T* pCount, IMetaDataAssemblyImport *pImport);
+ HRESULT DeserializeAttribute(CORSEC_ATTRIBUTE *pAttr, BYTE* pBuffer, ULONG cbBuffer, SIZE_T* pPos);
+
+ inline bool ContainsBuiltinCASPermsOnly(CORSEC_ATTRSET* pAttrSet);
+
+ inline bool ContainsBuiltinCASPermsOnly(CORSEC_ATTRSET* pAttrSet, bool* pHostProtectionOnly);
+
+ void CreateAndCachePermissions(IN PBYTE pbPerm,
+ IN ULONG cbPerm,
+ IN CorDeclSecurity action,
+ OUT OBJECTREF *pDeclaredPermissions,
+ OUT PsetCacheEntry **pPSCacheEntry);
+
+ HRESULT GetPermissionsFromMetaData(IN IMDInternalImport *pInternalImport,
+ IN mdToken token,
+ IN CorDeclSecurity action,
+ OUT PBYTE* ppbPerm,
+ OUT ULONG* pcbPerm);
+
+ bool IsUnrestrictedPermissionSetAttribute(CORSEC_ATTRIBUTE* pAttr);
+ bool IsBuiltInCASPermissionAttribute(CORSEC_ATTRIBUTE* pAttr);
+ bool IsHostProtectionAttribute(CORSEC_ATTRIBUTE* pAttr);
+
+ void LoadPermissionRequestsFromAssembly(IN IMDInternalImport *pImport,
+ OUT OBJECTREF* pReqdPermissions,
+ OUT OBJECTREF* pOptPermissions,
+ OUT OBJECTREF* pDenyPermissions);
+
+ // Insert a decoded permission set into the cache. Duplicates are discarded.
+ void InsertPermissionSet(IN PBYTE pbPset,
+ IN DWORD cbPset,
+ IN OBJECTREF orPset,
+ OUT DWORD *pdwSetIndex);
+
+ Assembly* LoadAssemblyFromToken(IMetaDataAssemblyImport *pImport, mdAssemblyRef tkAssemblyRef);
+ Assembly* LoadAssemblyFromNameString(__in_z WCHAR* pAssemblyName);
+ HRESULT AttributeSetToManaged(OBJECTREF* /*OUT*/obj, CORSEC_ATTRSET* pAttrSet, OBJECTREF* pThrowable, DWORD* pdwErrorIndex, bool bLazy);
+ HRESULT SetAttrFieldsAndProperties(CORSEC_ATTRIBUTE *pAttr, OBJECTREF* pThrowable, MethodTable* pMT, OBJECTREF* pObj);
+ HRESULT SetAttrField(BYTE** ppbBuffer, SIZE_T* pcbBuffer, DWORD dwType, TypeHandle hEnum, MethodTable* pMT, __in_z LPSTR szName, OBJECTREF* pObj, DWORD dwLength, BYTE* pbName, DWORD cbName, CorElementType eEnumType);
+ HRESULT SetAttrProperty(BYTE** ppbBuffer, SIZE_T* pcbBuffer, MethodTable* pMT, DWORD dwType, __in_z LPSTR szName, OBJECTREF* pObj, DWORD dwLength, BYTE* pbName, DWORD cbName, CorElementType eEnumType);
+ void AttrArrayToPermissionSet(OBJECTREF* attrArray, bool fSerialize, DWORD attrCount, BYTE **ppbOutput, DWORD *pcbOutput, BYTE **ppbNonCasOutput, DWORD *pcbNonCasOutput, bool fAllowEmptyPermissionSet, OBJECTREF* pPermSet);
+ void AttrSetBlobToPermissionSets(IN BYTE* pbRawPermissions, IN DWORD cbRawPermissions, OUT OBJECTREF* pObj, IN DWORD dwAction);
+
+#ifdef FEATURE_CAS_POLICY
+ void XmlToPermissionSet(PBYTE pbXmlBlob,
+ DWORD cbXmlBlob,
+ OBJECTREF* pPermSet,
+ OBJECTREF* pEncoding,
+ PBYTE pbNonCasXmlBlob,
+ DWORD cbNonCasXmlBlob,
+ OBJECTREF* pNonCasPermSet,
+ OBJECTREF* pNonCasEncoding);
+#endif // FEATURE_CAS_POLICY
+
+
+ bool ActionAllowsNullPermissionSet(CorDeclSecurity action);
+}
+
+#define LAZY_DECL_SEC_FLAG '.'
+
+#endif // __SECURITYATTRIBUTES_H__
+
diff --git a/src/vm/securityattributes.inl b/src/vm/securityattributes.inl
new file mode 100644
index 0000000000..08b41ef908
--- /dev/null
+++ b/src/vm/securityattributes.inl
@@ -0,0 +1,45 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#ifndef __SECURITYATTRIBUTES_INL__
+#define __SECURITYATTRIBUTES_INL__
+
+#include "securityattributes.h"
+
+
+inline bool SecurityAttributes::ContainsBuiltinCASPermsOnly(CORSEC_ATTRSET* pAttrSet)
+{
+ bool hostProtectiononly;
+ return ContainsBuiltinCASPermsOnly(pAttrSet, &hostProtectiononly);
+}
+
+
+inline bool SecurityAttributes::ContainsBuiltinCASPermsOnly(CORSEC_ATTRSET* pAttrSet, bool* pHostProtectionOnly)
+{
+ DWORD n;
+ *pHostProtectionOnly = true; // Assume that it's all HostProtection only
+ for(n = 0; n < pAttrSet->dwAttrCount; n++)
+ {
+ CORSEC_ATTRIBUTE* pAttr = &pAttrSet->pAttrs[n];
+ if(!IsBuiltInCASPermissionAttribute(pAttr))
+ {
+ *pHostProtectionOnly = false;
+ return false;
+ }
+ if (*pHostProtectionOnly && !IsHostProtectionAttribute(pAttr))
+ {
+ *pHostProtectionOnly = false;
+ }
+ }
+
+ return true;
+}
+
+#endif // __SECURITYATTRIBUTES_INL__
+
diff --git a/src/vm/securityconfig.cpp b/src/vm/securityconfig.cpp
new file mode 100644
index 0000000000..ba4d47a4e2
--- /dev/null
+++ b/src/vm/securityconfig.cpp
@@ -0,0 +1,2182 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: SecurityConfig.cpp
+//
+
+//
+// Native implementation for security config access and manipulation
+//
+
+
+// #SecurityConfigFormat
+//
+// The security config system resides outside of the rest
+// of the config system since our needs are different. The
+// unmanaged portion of the security config system is only
+// concerned with data file/cache file pairs, not what they
+// are used for. It performs all the duties of reading data
+// from the disk, saving data back to the disk, and maintaining
+// the policy and quick cache data structures.
+//
+// FILE FORMAT
+//
+// The data file is a purely opaque blob for the unmanaged
+// code; however, the cache file is constructed and maintained
+// completely in the unmanaged code. It's format is as follows:
+//
+// CacheHeader
+// |
+// +-- dummyFileTime (FILETIME, 8 bytes) = this exists to make sure we don't read old format cache files. Must be set to {1, 0}.
+// |
+// +-- version (DWORD) = The version of this config file.
+// |
+// +-- configFileTime (FILETIME, 8 bytes) = The file time of the config file associated with this cache file.
+// |
+// +-- isSecurityOn (DWORD, 4 bytes) = This is currently not used.
+// |
+// +-- quickCache (DWORD, 4 bytes) = Used as a bitfield to maintain the information for the QuickCache. See the QuickCache section for more details.
+// |
+// +-- registryExtensionsInfo (struct RegistryExtensionsInfo) = Indicates whether this cache file was generated in the presence of registry extensions.
+// |
+// +-- numEntries (DWORD, 4 bytes) = The number of policy cache entries in the latter portion of this cache file.
+// |
+// +-- sizeConfig (DWORD, 4 bytes) = The size of the config information stored in the latter portion of this cache file.
+//
+// Config Data (if any)
+// The cache file can include an entire copy of this
+// information in the adjoining config file. This is
+// necessary since the cache often allows us to make
+// policy decisions without having parsed the data in
+// the config file. In order to guarantee that the config
+// data used by this process is not altered in the
+// meantime, we need to store the data in a readonly
+// location. Due to the design of the caching system
+// the cache file is locked when it is opened and therefore
+// is the perfect place to store this information. The
+// other alternative is to hold it in memory, but since
+// this can amount to many kilobytes of data we decided
+// on this design.
+//
+// List of CacheEntries
+// |
+// +-- CacheEntry
+// | |
+// | +-- numItemsInKey (DWORD, 4 bytes) = The number of evidence objects serialized in the key blob
+// | |
+// | +-- keySize (DWORD, 4 bytes) = The number of bytes in the key blob.
+// | |
+// | +-- dataSize (DWORD, 4 bytes) = The number of bytes in the data blob.
+// | |
+// | +-- keyBlob (raw) = A raw blob representing the serialized evidence.
+// | |
+// | +-- dataBlob (raw) = A raw blob representing an XML serialized PolicyStatement
+// |
+// +-- ...
+
+#include "common.h"
+
+#ifdef FEATURE_CAS_POLICY
+
+#include "securityconfig.h"
+
+// Header version of the cache file.
+#define CONFIG_VERSION 2
+// This controls the maximum size of the cache file.
+#define MAX_CACHEFILE_SIZE (1 << 20)
+
+#define SIZE_OF_ENTRY( X ) sizeof( CacheEntryHeader ) + X->header.keySize + X->header.dataSize
+#define MAX_NUM_LENGTH 16
+
+WCHAR* SecurityConfig::wcscatDWORD( __out_ecount(cchdst) __out_z WCHAR* dst, size_t cchdst, DWORD num )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ _ASSERTE( SecurityConfig::dataLock_.OwnedByCurrentThread() );
+
+ static WCHAR buffer[MAX_NUM_LENGTH];
+
+ buffer[MAX_NUM_LENGTH-1] = W('\0');
+
+ size_t index = MAX_NUM_LENGTH-2;
+
+ if (num == 0)
+ {
+ buffer[index--] = W('0');
+ }
+ else
+ {
+ while (num != 0)
+ {
+ buffer[index--] = (WCHAR)(W('0') + (num % 10));
+ num = num / 10;
+ }
+ }
+
+ wcscat_s( dst, cchdst, buffer + index + 1 );
+
+ return dst;
+}
+
+inline WCHAR * Wszdup(const WCHAR * str)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ size_t len = wcslen(str) + 1;
+ WCHAR * ret = new WCHAR[len];
+ wcscpy_s(ret, len, str);
+ return ret;
+}
+
+struct CacheHeader
+{
+ FILETIME dummyFileTime;
+ DWORD version;
+ FILETIME configFileTime;
+ DWORD isSecurityOn, quickCache;
+ SecurityConfig::RegistryExtensionsInfo registryExtensionsInfo;
+ DWORD numEntries, sizeConfig;
+
+ CacheHeader() : isSecurityOn( (DWORD) -1 ), quickCache( 0 ), numEntries( 0 ), sizeConfig( 0 )
+ {
+ WRAPPER_NO_CONTRACT;
+ memset( &this->configFileTime, 0, sizeof( configFileTime ) );
+ dummyFileTime.dwLowDateTime = 1;
+ dummyFileTime.dwHighDateTime = 0;
+ version = CONFIG_VERSION;
+ memset(&registryExtensionsInfo, 0, sizeof(registryExtensionsInfo));
+ _ASSERTE( IsValid() && "CacheHeader constructor should make it valid" );
+ };
+
+ bool IsValid()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return dummyFileTime.dwLowDateTime == 1 &&
+ dummyFileTime.dwHighDateTime == 0 &&
+ version == CONFIG_VERSION;
+ }
+};
+
+struct CacheEntryHeader
+{
+ DWORD numItemsInKey;
+ DWORD keySize;
+ DWORD dataSize;
+};
+
+struct CacheEntry
+{
+ CacheEntryHeader header;
+ BYTE* key;
+ BYTE* data;
+ DWORD cachePosition;
+ BOOL used;
+
+ CacheEntry() : key( NULL ), data( NULL ), used( FALSE )
+ {
+ LIMITED_METHOD_CONTRACT;
+ };
+
+ ~CacheEntry( void )
+ {
+ WRAPPER_NO_CONTRACT;
+ delete [] key;
+ delete [] data;
+ }
+};
+
+struct Data
+{
+ enum State
+ {
+ None = 0x0,
+ UsingCacheFile = 0x1,
+ CopyCacheFile = 0x2,
+ CacheUpdated = 0x4,
+ UsingConfigFile = 0x10,
+ CacheExhausted = 0x20,
+ NewConfigFile = 0x40
+ };
+
+ INT32 id;
+ WCHAR* configFileName;
+ WCHAR* cacheFileName;
+ WCHAR* cacheFileNameTemp;
+
+ LPBYTE configData;
+ DWORD configDataSize;
+ FILETIME configFileTime;
+ FILETIME cacheFileTime;
+ CacheHeader header;
+ ArrayList* oldCacheEntries;
+ ArrayList* newCacheEntries;
+ State state;
+ DWORD cacheCurrentPosition;
+ HANDLE cache;
+ PBYTE configBuffer;
+ DWORD sizeConfig;
+ SecurityConfig::ConfigRetval initRetval;
+ DWORD newEntriesSize;
+
+ Data( INT32 id )
+ : id( id ),
+ configFileName( NULL ),
+ cacheFileName( NULL ),
+ configData( NULL ),
+ oldCacheEntries( new ArrayList ),
+ newCacheEntries( new ArrayList ),
+ state( Data::None ),
+ cache( INVALID_HANDLE_VALUE ),
+ configBuffer( NULL ),
+ newEntriesSize( 0 )
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ Data( INT32 id, STRINGREF* configFile )
+ : id( id ),
+ cacheFileName( NULL ),
+ configData( NULL ),
+ oldCacheEntries( new ArrayList ),
+ newCacheEntries( new ArrayList ),
+ state( Data::None ),
+ cache( INVALID_HANDLE_VALUE ),
+ configBuffer( NULL ),
+ newEntriesSize( 0 )
+ {
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(*configFile != NULL);
+ } CONTRACTL_END;
+
+ configFileName = Wszdup( (*configFile)->GetBuffer() );
+ cacheFileName = NULL;
+ cacheFileNameTemp = NULL;
+ }
+
+ Data( INT32 id, STRINGREF* configFile, STRINGREF* cacheFile )
+ : id( id ),
+ configData( NULL ),
+ oldCacheEntries( new ArrayList ),
+ newCacheEntries( new ArrayList ),
+ state( Data::None ),
+ cache( INVALID_HANDLE_VALUE ),
+ configBuffer( NULL ),
+ newEntriesSize( 0 )
+ {
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(*configFile != NULL);
+ } CONTRACTL_END;
+
+ configFileName = Wszdup( (*configFile)->GetBuffer() );
+
+ if (cacheFile != NULL)
+ {
+ // Since temp cache files can stick around even after the process that
+ // created them, we want to make sure they are fairly unique (if they
+ // aren't, we'll just fail to save cache information, which is not good
+ // but it won't cause anyone to crash or anything). The unique name
+ // algorithm used here is to append the process id and tick count to
+ // the name of the cache file.
+
+ cacheFileName = Wszdup( (*cacheFile)->GetBuffer() );
+ size_t len = wcslen( cacheFileName ) + 1 + 2 * MAX_NUM_LENGTH;
+ cacheFileNameTemp = new WCHAR[len];
+ wcscpy_s( cacheFileNameTemp, len, cacheFileName );
+ wcscat_s( cacheFileNameTemp, len, W(".") );
+ SecurityConfig::wcscatDWORD( cacheFileNameTemp, len, GetCurrentProcessId() );
+ wcscat_s( cacheFileNameTemp, len, W(".") );
+ SecurityConfig::wcscatDWORD( cacheFileNameTemp, len, GetTickCount() );
+ }
+ else
+ {
+ cacheFileName = NULL;
+ cacheFileNameTemp = NULL;
+ }
+ }
+
+ Data( INT32 id, const WCHAR* configFile, const WCHAR* cacheFile )
+ : id( id ),
+ configData( NULL ),
+ oldCacheEntries( new ArrayList ),
+ newCacheEntries( new ArrayList ),
+ state( Data::None ),
+ cache( INVALID_HANDLE_VALUE ),
+ configBuffer( NULL ),
+ newEntriesSize( 0 )
+
+ {
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(*configFile != NULL);
+ } CONTRACTL_END;
+
+ configFileName = Wszdup( configFile );
+
+ if (cacheFile != NULL)
+ {
+ cacheFileName = Wszdup( cacheFile );
+ size_t len = wcslen( cacheFileName ) + 1 + 2 * MAX_NUM_LENGTH;
+ cacheFileNameTemp = new WCHAR[len];
+ wcscpy_s( cacheFileNameTemp, len, cacheFileName );
+ wcscat_s( cacheFileNameTemp, len, W(".") );
+ SecurityConfig::wcscatDWORD( cacheFileNameTemp, len, GetCurrentProcessId() );
+ wcscat_s( cacheFileNameTemp, len, W(".") );
+ SecurityConfig::wcscatDWORD( cacheFileNameTemp, len, GetTickCount() );
+ }
+ else
+ {
+ cacheFileName = NULL;
+ cacheFileNameTemp = NULL;
+ }
+ }
+
+ void Reset( void )
+ {
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ delete [] configBuffer;
+ configBuffer = NULL;
+
+ if (cache != INVALID_HANDLE_VALUE)
+ {
+ CloseHandle( cache );
+ cache = INVALID_HANDLE_VALUE;
+ }
+
+ if (cacheFileNameTemp != NULL)
+ {
+ // Note: we don't check a return value here as the worst thing that
+ // happens is we leave a spurious cache file.
+
+ WszDeleteFile( cacheFileNameTemp );
+ }
+
+ if (configData != NULL)
+ delete [] configData;
+ configData = NULL;
+
+ DeleteAllEntries();
+ header = CacheHeader();
+
+ oldCacheEntries = new ArrayList();
+ newCacheEntries = new ArrayList();
+
+ }
+
+ void Cleanup( void )
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ if (cache != INVALID_HANDLE_VALUE)
+ {
+ CloseHandle( cache );
+ cache = INVALID_HANDLE_VALUE;
+ }
+
+ if (cacheFileNameTemp != NULL)
+ {
+ // Note: we don't check a return value here as the worst thing that
+ // happens is we leave a spurious cache file.
+
+ WszDeleteFile( cacheFileNameTemp );
+ }
+ }
+
+ ~Data( void )
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ Cleanup();
+ delete [] configBuffer;
+
+ delete [] configFileName;
+ delete [] cacheFileName;
+ delete [] cacheFileNameTemp;
+
+ if (configData != NULL)
+ delete [] configData;
+ DeleteAllEntries();
+ }
+
+ void DeleteAllEntries( void );
+};
+
+void Data::DeleteAllEntries( void )
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ ArrayList::Iterator iter;
+
+ if (oldCacheEntries != NULL)
+ {
+ iter = oldCacheEntries->Iterate();
+
+ while (iter.Next())
+ {
+ delete (CacheEntry*) iter.GetElement();
+ }
+
+ delete oldCacheEntries;
+ oldCacheEntries = NULL;
+ }
+
+ if (newCacheEntries != NULL)
+ {
+ iter = newCacheEntries->Iterate();
+
+ while (iter.Next())
+ {
+ delete (CacheEntry*) iter.GetElement();
+ }
+
+ delete newCacheEntries;
+ newCacheEntries = NULL;
+ }
+}
+
+void* SecurityConfig::GetData( INT32 id )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ ArrayList::Iterator iter = entries_.Iterate();
+
+ while (iter.Next())
+ {
+ Data* data = (Data*)iter.GetElement();
+
+ if (data->id == id)
+ {
+ return data;
+ }
+ }
+
+ return NULL;
+}
+
+static BOOL CacheOutOfDate( FILETIME* configFileTime, __in_z WCHAR* configFileName, __in_opt __in_z WCHAR* cacheFileName )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END
+
+ BOOL retval = TRUE;
+ BOOL deleteFile = FALSE;
+
+ HandleHolder config(WszCreateFile( configFileName, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL ));
+
+ if (config.GetValue() == INVALID_HANDLE_VALUE)
+ {
+ goto CLEANUP;
+ }
+
+ // Get the last write time for both files.
+
+ FILETIME newConfigTime;
+
+ if (!GetFileTime( config.GetValue(), NULL, NULL, &newConfigTime ))
+ {
+ goto CLEANUP;
+ }
+
+ if (CompareFileTime( configFileTime, &newConfigTime ) != 0)
+ {
+ // Cache is dated. Delete the cache.
+ deleteFile = TRUE;
+ goto CLEANUP;
+ }
+
+ retval = FALSE;
+
+CLEANUP:
+ // Note: deleting this file is a perf optimization so that
+ // we don't have to do this file time comparison next time.
+ // Therefore, if it fails for some reason we just loss a
+ // little perf.
+
+ if (deleteFile && cacheFileName != NULL)
+ WszDeleteFile( cacheFileName );
+
+ return retval;
+}
+
+static BOOL CacheOutOfDate( FILETIME* cacheFileTime, HANDLE cache, __in_opt __in_z WCHAR* cacheFileName )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END
+
+ BOOL retval = TRUE;
+
+ // Get the last write time for both files.
+
+ FILETIME newCacheTime;
+
+ if (!GetFileTime( cache, NULL, NULL, &newCacheTime ))
+ {
+ goto CLEANUP;
+ }
+
+ if (CompareFileTime( cacheFileTime, &newCacheTime ) != 0)
+ {
+ // Cache is dated. Delete the cache.
+ // Note: deleting this file is a perf optimization so that
+ // we don't have to do this file time comparison next time.
+ // Therefore, if it fails for some reason we just loss a
+ // little perf.
+
+ if (cacheFileName != NULL)
+ {
+ CloseHandle( cache );
+ WszDeleteFile( cacheFileName );
+ }
+ goto CLEANUP;
+ }
+
+ retval = FALSE;
+
+CLEANUP:
+ return retval;
+}
+
+static BOOL CacheOutOfDate( FILETIME* configTime, FILETIME* cachedConfigTime )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END
+
+ DWORD result = CompareFileTime( configTime, cachedConfigTime );
+
+ return result != 0;
+}
+
+static DWORD GetShareFlags()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return FILE_SHARE_READ | FILE_SHARE_DELETE;
+}
+
+static DWORD WriteFileData( HANDLE file, LPCBYTE data, DWORD size )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ DWORD totalBytesWritten = 0;
+ DWORD bytesWritten;
+
+ do
+ {
+ if (WriteFile( file, data, size - totalBytesWritten, &bytesWritten, NULL ) == 0)
+ {
+ return E_FAIL;
+ }
+ if (bytesWritten == 0)
+ {
+ return E_FAIL;
+ }
+ totalBytesWritten += bytesWritten;
+ } while (totalBytesWritten < size);
+
+ return S_OK;
+}
+
+// the data argument to this function can be a pointer to GC heap.
+// We do ensure cooperative mode before we call this function using a pointer to GC heap,
+// so we can't change GC mode inside this function.
+// Greg will look into the ways to pin the object.
+
+static DWORD ReadFileData( HANDLE file, PBYTE data, DWORD size )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ DWORD totalBytesRead = 0;
+ DWORD bytesRead;
+ do
+ {
+ if (ReadFile( file, data, size - totalBytesRead, &bytesRead, NULL ) == 0)
+ {
+ return E_FAIL;
+ }
+
+ if (bytesRead == 0)
+ {
+ return E_FAIL;
+ }
+
+ totalBytesRead += bytesRead;
+
+ } while (totalBytesRead < size);
+
+ return S_OK;
+}
+
+SecurityConfig::ConfigRetval SecurityConfig::InitData( INT32 id, const WCHAR* configFileName, const WCHAR* cacheFileName )
+{
+ STANDARD_VM_CONTRACT;
+
+ Data* data = (Data*)GetData( id );
+ if (data != NULL)
+ {
+ return data->initRetval;
+ }
+
+ if (configFileName == NULL || wcslen( configFileName ) == 0)
+ {
+ return NoFile;
+ }
+
+ {
+ CrstHolder ch( &dataLock_ );
+ data = new (nothrow) Data( id, configFileName, cacheFileName );
+ }
+
+ if (data == NULL)
+ {
+ return NoFile;
+ }
+
+ return InitData( data, TRUE );
+}
+
+
+SecurityConfig::ConfigRetval SecurityConfig::InitData( void* configDataParam, BOOL addToList )
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE( configDataParam != NULL );
+
+ Data* data = (Data*) configDataParam;
+ DWORD cacheSize;
+ DWORD configSize;
+ ConfigRetval retval = NoFile;
+ DWORD shareFlags;
+
+ shareFlags = GetShareFlags();
+
+ // Crack open the config file.
+
+ HandleHolder config(WszCreateFile( data->configFileName, GENERIC_READ, shareFlags, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL ));
+ if (config == INVALID_HANDLE_VALUE || !GetFileTime( config, NULL, NULL, &data->configFileTime ))
+ {
+ memset( &data->configFileTime, 0, sizeof( data->configFileTime ) );
+ }
+ else
+ {
+ data->state = (Data::State)(Data::UsingConfigFile | data->state);
+ }
+
+ // If we want a cache file, try to open that up.
+ // Note: we do not use a holder for data->cache because the new holder for data will
+ // delete the entire data structure which includes closing this handle as necessary.
+
+ if (data->cacheFileName != NULL)
+ data->cache = WszCreateFile( data->cacheFileName, GENERIC_READ, shareFlags, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL );
+
+ if (data->cache == INVALID_HANDLE_VALUE)
+ {
+ goto READ_DATA;
+ }
+
+ // Validate that the cache file is in a good form by checking
+ // that it is at least big enough to contain a header.
+
+ cacheSize = SafeGetFileSize( data->cache, NULL );
+
+ if (cacheSize == 0xFFFFFFFF)
+ {
+ goto READ_DATA;
+ }
+
+ if (cacheSize < sizeof( CacheHeader ))
+ {
+ goto READ_DATA;
+ }
+
+ // Finally read the data from the file into the buffer.
+
+ if (ReadFileData( data->cache, (BYTE*)&data->header, sizeof( CacheHeader ) ) != S_OK)
+ {
+ goto READ_DATA;
+ }
+
+ if (!data->header.IsValid())
+ {
+ goto READ_DATA;
+ }
+
+ // Check to make sure the cache file and the config file
+ // match up by comparing the actual file time of the config
+ // file and the config file time stored in the cache file.
+
+ if (CacheOutOfDate( &data->configFileTime, &data->header.configFileTime ))
+ {
+ goto READ_DATA;
+ }
+
+ if (!GetFileTime( data->cache, NULL, NULL, &data->cacheFileTime ))
+ {
+ goto READ_DATA;
+ }
+
+ // Set the file pointer to after both the header and config data (if any) so
+ // that we are ready to read cache entries.
+
+ if (SetFilePointer( data->cache, sizeof( CacheHeader ) + data->header.sizeConfig, NULL, FILE_BEGIN ) == INVALID_SET_FILE_POINTER)
+ {
+ goto READ_DATA;
+ }
+
+ data->cacheCurrentPosition = sizeof( CacheHeader ) + data->header.sizeConfig;
+ data->state = (Data::State)(Data::UsingCacheFile | Data::CopyCacheFile | data->state);
+
+ retval = (ConfigRetval)(retval | CacheFile);
+
+READ_DATA:
+ // If we are not using the cache file but we successfully opened it, we need
+ // to close it now. In addition, we need to reset the cache information
+ // stored in the Data object to make sure there is no spill over.
+
+ if (data->cache != INVALID_HANDLE_VALUE && (data->state & Data::UsingCacheFile) == 0)
+ {
+ CloseHandle( data->cache );
+ data->header = CacheHeader();
+ data->cache = INVALID_HANDLE_VALUE;
+ }
+
+ if (config != INVALID_HANDLE_VALUE)
+ {
+ configSize = SafeGetFileSize( config, NULL );
+
+ if (configSize == 0xFFFFFFFF)
+ {
+ goto ADD_DATA;
+ }
+
+ // Be paranoid and only use the cache file version if we find that it has the correct sized
+ // blob in it.
+
+ if ((data->state & Data::UsingCacheFile) != 0 && configSize == data->header.sizeConfig)
+ {
+ goto ADD_DATA;
+ }
+ else
+ {
+ if (data->cache != INVALID_HANDLE_VALUE)
+ {
+ CloseHandle( data->cache );
+ data->header = CacheHeader();
+ data->cache = INVALID_HANDLE_VALUE;
+ data->state = (Data::State)(data->state & ~(Data::UsingCacheFile));
+ }
+
+ data->configData = new BYTE[configSize];
+ if (ReadFileData( config, data->configData, configSize ) != S_OK)
+ {
+ goto ADD_DATA;
+ }
+ data->configDataSize = configSize;
+ }
+ retval = (ConfigRetval)(retval | ConfigFile);
+ }
+
+ADD_DATA:
+ {
+ CrstHolder ch(&dataLock_);
+
+ if (addToList)
+ {
+ IfFailThrow(entries_.Append(data));
+ }
+ }
+
+ _ASSERTE( data );
+ data->initRetval = retval;
+
+ return retval;
+
+};
+
+static CacheEntry* LoadNextEntry( HANDLE cache, Data* data )
+{
+ STANDARD_VM_CONTRACT;
+
+ if ((data->state & Data::CacheExhausted) != 0)
+ return NULL;
+
+ NewHolder<CacheEntry> entry(new CacheEntry());
+
+ if (SetFilePointer( cache, data->cacheCurrentPosition, NULL, FILE_BEGIN ) == INVALID_SET_FILE_POINTER)
+ {
+ return NULL;
+ }
+
+ if (ReadFileData( cache, (BYTE*)&entry.GetValue()->header, sizeof( CacheEntryHeader ) ) != S_OK)
+ {
+ return NULL;
+ }
+
+ entry.GetValue()->cachePosition = data->cacheCurrentPosition + sizeof( entry.GetValue()->header );
+
+ data->cacheCurrentPosition += sizeof( entry.GetValue()->header ) + entry.GetValue()->header.keySize + entry.GetValue()->header.dataSize;
+
+ if (SetFilePointer( cache, entry.GetValue()->header.keySize + entry->header.dataSize, NULL, FILE_CURRENT ) == INVALID_SET_FILE_POINTER)
+ {
+ return NULL;
+ }
+
+ // We append a partially populated entry. CompareEntry is robust enough to handle this.
+ IfFailThrow(data->oldCacheEntries->Append( entry ));
+
+ return entry.Extract();
+}
+
+static BOOL WriteEntry( HANDLE cache, CacheEntry* entry, HANDLE oldCache = NULL )
+{
+ STANDARD_VM_CONTRACT;
+
+ if (WriteFileData( cache, (BYTE*)&entry->header, sizeof( CacheEntryHeader ) ) != S_OK)
+ {
+ return FALSE;
+ }
+
+ if (entry->key == NULL)
+ {
+ _ASSERTE (oldCache != NULL);
+
+ // We were lazy in reading the entry. Read the key now.
+ entry->key = new BYTE[entry->header.keySize];
+
+ _ASSERTE (cache != INVALID_HANDLE_VALUE);
+
+ if (SetFilePointer( oldCache, entry->cachePosition, NULL, FILE_BEGIN ) == INVALID_SET_FILE_POINTER)
+ return NULL;
+
+ if (ReadFileData( oldCache, entry->key, entry->header.keySize ) != S_OK)
+ {
+ return NULL;
+ }
+
+ entry->cachePosition += entry->header.keySize;
+ }
+
+ _ASSERTE( entry->key != NULL );
+
+ if (entry->data == NULL)
+ {
+ _ASSERTE (oldCache != NULL);
+
+ // We were lazy in reading the entry. Read the data also.
+ entry->data = new BYTE[entry->header.dataSize];
+
+ if (SetFilePointer( oldCache, entry->cachePosition, NULL, FILE_BEGIN ) == INVALID_SET_FILE_POINTER)
+ return NULL;
+
+ if (ReadFileData( oldCache, entry->data, entry->header.dataSize ) != S_OK)
+ return NULL;
+
+ entry->cachePosition += entry->header.dataSize;
+ }
+
+ _ASSERT( entry->data != NULL );
+
+ if (WriteFileData( cache, entry->key, entry->header.keySize ) != S_OK)
+ {
+ return FALSE;
+ }
+
+ if (WriteFileData( cache, entry->data, entry->header.dataSize ) != S_OK)
+ {
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+BOOL SecurityConfig::SaveCacheData( INT32 id )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ GCX_PREEMP();
+
+ // Note: this function should only be called at EEShutdown time.
+ // This is because we need to close the current cache file in
+ // order to delete it. If it ever became necessary to do
+ // cache saves while a process we still executing managed code
+ // it should be possible to create a locking scheme for usage
+ // of the cache handle with very little reordering of the below
+ // (as it should always be possible for us to have a live copy of
+ // the file and yet still be making the swap).
+
+ HandleHolder cache;
+ HandleHolder config;
+ CacheHeader header;
+ BOOL retval = FALSE;
+ BOOL fWriteSucceeded = FALSE;
+ DWORD numEntriesWritten = 0;
+ DWORD amountWritten = 0;
+ DWORD sizeConfig = 0;
+ NewHolder<BYTE> configBuffer;
+ BOOL useConfigData = FALSE;
+
+ Data* data = (Data*)GetData( id );
+
+ // If there is not data by the id or there is no
+ // cache file name associated with the data, then fail.
+
+ if (data == NULL || data->cacheFileName == NULL)
+ return FALSE;
+
+ // If we haven't added anything new to the cache
+ // then just return success.
+
+ if ((data->state & Data::CacheUpdated) == 0)
+ return TRUE;
+
+ // If the config file has changed since the process started
+ // then our cache data is no longer valid. We'll just
+ // return success in this case.
+
+ if ((data->state & Data::UsingConfigFile) != 0 && CacheOutOfDate( &data->configFileTime, data->configFileName, NULL ))
+ return TRUE;
+
+ DWORD fileNameLength = (DWORD)wcslen( data->cacheFileName );
+
+ NewArrayHolder<WCHAR> newFileName(new WCHAR[fileNameLength + 5]);
+
+ swprintf_s( newFileName.GetValue(), fileNameLength + 5, W("%s%s"), data->cacheFileName, W(".new") );
+
+ cache.Assign( WszCreateFile( newFileName, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL ) );
+
+ for (DWORD RetryCount = 0; RetryCount < 5; RetryCount++)
+ {
+ if (cache != INVALID_HANDLE_VALUE)
+ {
+ break;
+ }
+ else
+ {
+ DWORD error = GetLastError();
+
+ if (error == ERROR_PATH_NOT_FOUND)
+ {
+ // The directory does not exist, iterate through and try to create it.
+
+ WCHAR* currentChar = newFileName;
+
+ // Skip the first backslash
+
+ while (*currentChar != W('\0'))
+ {
+ if (*currentChar == W('\\') || *currentChar == W('/'))
+ {
+ currentChar++;
+ break;
+ }
+ currentChar++;
+ }
+
+ // Iterate through trying to create each subdirectory.
+
+ while (*currentChar != W('\0'))
+ {
+ if (*currentChar == W('\\') || *currentChar == W('/'))
+ {
+ *currentChar = W('\0');
+
+ if (!WszCreateDirectory( newFileName, NULL ))
+ {
+ error = GetLastError();
+
+ if (error != ERROR_ACCESS_DENIED && error != ERROR_INVALID_NAME && error != ERROR_ALREADY_EXISTS)
+ {
+ goto CLEANUP;
+ }
+ }
+
+ *currentChar = W('\\');
+ }
+ currentChar++;
+ }
+
+ // Try the file creation again
+ continue;
+ }
+ }
+
+ // CreateFile failed. Sleep a little and retry, in case a
+ // virus scanner caused the creation to fail.
+ ClrSleepEx(10, FALSE);
+ }
+
+ if (cache.GetValue() == INVALID_HANDLE_VALUE)
+ goto CLEANUP;
+
+ // This code seems complicated only because of the
+ // number of cases that we are trying to handle. All we
+ // are trying to do is determine the amount of space to
+ // leave for the config information.
+
+ // If we saved out a new config file during this run, use
+ // the config size stored in the Data object itself.
+
+ if (data->configData != NULL)
+ {
+ useConfigData = TRUE;
+ }
+
+ if ((data->state & Data::NewConfigFile) != 0)
+ {
+ sizeConfig = data->sizeConfig;
+ }
+
+ // If we have a cache file, then use the size stored in the
+ // cache header.
+
+ else if ((data->state & Data::UsingCacheFile) != 0)
+ {
+ sizeConfig = data->header.sizeConfig;
+ }
+
+ // If we read in the config data, use the size of the
+ // managed byte array that it is stored in.
+
+ else if (useConfigData)
+ {
+ sizeConfig = data->configDataSize;
+ }
+
+ // Otherwise, check the config file itself to get the size.
+
+ else
+ {
+ DWORD shareFlags;
+
+ shareFlags = GetShareFlags();
+
+ config.Assign( WszCreateFile( data->configFileName, GENERIC_READ, shareFlags, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL ) );
+
+ if (config == INVALID_HANDLE_VALUE)
+ {
+ sizeConfig = 0;
+ }
+ else
+ {
+ sizeConfig = SafeGetFileSize( config, NULL );
+
+ if (sizeConfig == 0xFFFFFFFF)
+ {
+ sizeConfig = 0;
+ }
+ }
+ }
+
+ // First write the entries.
+
+ if (SetFilePointer( cache, sizeof( CacheHeader ) + sizeConfig, NULL, FILE_BEGIN ) == INVALID_SET_FILE_POINTER)
+ {
+ goto CLEANUP;
+ }
+
+ // We're going to write out the cache entries in a modified
+ // least recently used order, throwing out any that end up
+ // taking us past our hardcoded max file size.
+
+ {
+ // First, write the entries from the cache file that were used.
+ // We do this because presumably these are system assemblies
+ // and other assemblies used by a number of applications.
+
+ ArrayList::Iterator iter;
+
+ if ((data->state & Data::UsingCacheFile) != 0)
+ {
+ iter = data->oldCacheEntries->Iterate();
+
+ while (iter.Next() && amountWritten < MAX_CACHEFILE_SIZE)
+ {
+ CacheEntry* currentEntry = (CacheEntry*)iter.GetElement();
+
+ if (currentEntry->used)
+ {
+ if(!WriteEntry( cache, currentEntry, data->cache ))
+ {
+ goto CLEANUP;
+ }
+
+ amountWritten += SIZE_OF_ENTRY( currentEntry );
+ numEntriesWritten++;
+ }
+ }
+ }
+
+ // Second, write any new cache entries to the file. These are
+ // more likely to be assemblies specific to this app.
+
+ iter = data->newCacheEntries->Iterate();
+
+ while (iter.Next() && amountWritten < MAX_CACHEFILE_SIZE)
+ {
+ CacheEntry* currentEntry = (CacheEntry*)iter.GetElement();
+
+ if (!WriteEntry( cache, currentEntry ))
+ {
+ goto CLEANUP;
+ }
+
+ amountWritten += SIZE_OF_ENTRY( currentEntry );
+ numEntriesWritten++;
+ }
+
+ // Third, if we are using the cache file, write the old entries
+ // that were not used this time around.
+
+ if ((data->state & Data::UsingCacheFile) != 0)
+ {
+ // First, write the ones that we already have partially loaded
+
+ iter = data->oldCacheEntries->Iterate();
+
+ while (iter.Next() && amountWritten < MAX_CACHEFILE_SIZE)
+ {
+ CacheEntry* currentEntry = (CacheEntry*)iter.GetElement();
+
+ if (!currentEntry->used)
+ {
+ if(!WriteEntry( cache, currentEntry, data->cache ))
+ {
+ goto CLEANUP;
+ }
+
+ amountWritten += SIZE_OF_ENTRY( currentEntry );
+ numEntriesWritten++;
+ }
+ }
+
+ while (amountWritten < MAX_CACHEFILE_SIZE)
+ {
+ CacheEntry* entry = LoadNextEntry( data->cache, data );
+
+ if (entry == NULL)
+ break;
+
+ if (!WriteEntry( cache, entry, data->cache ))
+ {
+ goto CLEANUP;
+ }
+
+ amountWritten += SIZE_OF_ENTRY( entry );
+ numEntriesWritten++;
+ }
+ }
+
+ fWriteSucceeded = TRUE;
+ }
+
+
+ if (!fWriteSucceeded)
+ {
+ CloseHandle( cache.GetValue() );
+ cache.SuppressRelease();
+ WszDeleteFile( newFileName );
+ goto CLEANUP;
+ }
+
+ // End with writing the header.
+
+ header.configFileTime = data->configFileTime;
+ header.isSecurityOn = 1;
+ header.numEntries = numEntriesWritten;
+ header.quickCache = data->header.quickCache;
+ header.sizeConfig = sizeConfig;
+
+ if (SetFilePointer( cache, 0, NULL, FILE_BEGIN ) == INVALID_SET_FILE_POINTER)
+ {
+ // Couldn't move to the beginning of the file
+ goto CLEANUP;
+ }
+
+ if (WriteFileData( cache, (PBYTE)&header, sizeof( header ) ) != S_OK)
+ {
+ // Couldn't write header info.
+ goto CLEANUP;
+ }
+
+ if (sizeConfig != 0)
+ {
+ if ((data->state & Data::NewConfigFile) != 0)
+ {
+ if (WriteFileData( cache, data->configBuffer, sizeConfig ) != S_OK)
+ {
+ goto CLEANUP;
+ }
+ }
+ else
+ {
+ if (data->configData != NULL)
+ {
+ if (WriteFileData( cache, data->configData, sizeConfig ) != S_OK)
+ {
+ goto CLEANUP;
+ }
+ }
+ else if ((data->state & Data::UsingCacheFile) != 0)
+ {
+ configBuffer.Assign( new BYTE[sizeConfig] );
+
+ if (SetFilePointer( data->cache, sizeof( CacheHeader ), NULL, FILE_BEGIN ) == INVALID_SET_FILE_POINTER)
+ {
+ goto CLEANUP;
+ }
+
+ if (ReadFileData( data->cache, configBuffer.GetValue(), sizeConfig ) != S_OK)
+ {
+ goto CLEANUP;
+ }
+
+ if (WriteFileData( cache, configBuffer.GetValue(), sizeConfig ) != S_OK)
+ {
+ goto CLEANUP;
+ }
+ }
+ else
+ {
+ configBuffer.Assign( new BYTE[sizeConfig] );
+
+ if (SetFilePointer( config, 0, NULL, FILE_BEGIN ) == INVALID_SET_FILE_POINTER)
+ {
+ goto CLEANUP;
+ }
+
+ if (ReadFileData( config, configBuffer.GetValue(), sizeConfig ) != S_OK)
+ {
+ goto CLEANUP;
+ }
+
+ if (WriteFileData( cache, configBuffer.GetValue(), sizeConfig ) != S_OK)
+ {
+ goto CLEANUP;
+ }
+ }
+ }
+ }
+
+ // Flush the file buffers to make sure
+ // we get full write through.
+
+ FlushFileBuffers( cache.GetValue() );
+
+ CloseHandle( cache );
+ cache.SuppressRelease();
+ CloseHandle( data->cache );
+ data->cache = INVALID_HANDLE_VALUE;
+
+ // Move the existing file out of the way
+ // Note: use MoveFile because we know it will never cross
+ // device boundaries.
+
+ // Note: the delete file can fail, but we can't really do anything
+ // if it does so just ignore any failures.
+ WszDeleteFile( data->cacheFileNameTemp );
+
+ // Try to move the existing cache file out of the way. However, if we can't
+ // then try to delete it. If it can't be deleted then just bail out.
+ if (!WszMoveFile( data->cacheFileName, data->cacheFileNameTemp ) &&
+ (!Assembly::FileNotFound(HRESULT_FROM_WIN32(GetLastError()))) &&
+ !WszDeleteFile( data->cacheFileName ))
+ {
+ if (!Assembly::FileNotFound(HRESULT_FROM_WIN32(GetLastError())))
+ goto CLEANUP;
+ }
+
+ // Move the new file into position
+
+ if (!WszMoveFile( newFileName, data->cacheFileName ))
+ {
+ goto CLEANUP;
+ }
+
+ retval = TRUE;
+
+CLEANUP:
+ if (retval)
+ cache.SuppressRelease();
+
+ return retval;
+
+}
+
+void QCALLTYPE SecurityConfig::ResetCacheData(INT32 id)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ Data* data = (Data*)GetData( id );
+
+ if (data != NULL)
+ {
+ CrstHolder ch(&dataLock_);
+
+ data->DeleteAllEntries();
+
+ data->oldCacheEntries = new ArrayList;
+ data->newCacheEntries = new ArrayList;
+
+ data->header = CacheHeader();
+ data->state = (Data::State)(~(Data::CopyCacheFile | Data::UsingCacheFile) & data->state);
+
+ HandleHolder config(WszCreateFile( data->configFileName, GENERIC_READ, GetShareFlags(), NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL ));
+
+ if (config.GetValue() != INVALID_HANDLE_VALUE)
+ {
+ VERIFY(GetFileTime( config, NULL, NULL, &data->configFileTime ));
+ VERIFY(GetFileTime( config, NULL, NULL, &data->header.configFileTime ));
+ }
+}
+
+ END_QCALL;
+}
+
+HRESULT QCALLTYPE SecurityConfig::SaveDataByte(LPCWSTR wszConfigPath, LPCBYTE pbData, DWORD cbData)
+{
+ QCALL_CONTRACT;
+
+ HRESULT retval = E_FAIL;
+
+ BEGIN_QCALL;
+
+ HandleHolder newFile(INVALID_HANDLE_VALUE);
+
+ int RetryCount;
+ DWORD error = 0;
+ DWORD fileNameLength = (DWORD) wcslen(wszConfigPath);
+
+ NewArrayHolder<WCHAR> newFileName(new WCHAR[fileNameLength + 5]);
+ NewArrayHolder<WCHAR> oldFileName(new WCHAR[fileNameLength + 5]);
+
+ swprintf_s( newFileName.GetValue(), fileNameLength + 5, W("%s%s"), wszConfigPath, W(".new") );
+ swprintf_s( oldFileName.GetValue(), fileNameLength + 5, W("%s%s"), wszConfigPath, W(".old") );
+
+ // Create the new file.
+ for (RetryCount = 0; RetryCount < 5; RetryCount++) {
+ newFile.Assign( WszCreateFile( newFileName, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL ) );
+
+ if (newFile != INVALID_HANDLE_VALUE)
+ break;
+ else
+ {
+ error = GetLastError();
+
+ if (error == ERROR_PATH_NOT_FOUND)
+ {
+ // The directory does not exist, iterate through and try to create it.
+
+ WCHAR* currentChar = newFileName;
+
+ // Skip the first backslash
+
+ while (*currentChar != W('\0'))
+ {
+ if (*currentChar == W('\\') || *currentChar == W('/'))
+ {
+ currentChar++;
+ break;
+ }
+ currentChar++;
+ }
+
+ // Iterate through trying to create each subdirectory.
+
+ while (*currentChar != W('\0'))
+ {
+ if (*currentChar == W('\\') || *currentChar == W('/'))
+ {
+ *currentChar = W('\0');
+
+ if (!WszCreateDirectory( newFileName, NULL ))
+ {
+ error = GetLastError();
+
+ if (error != ERROR_ACCESS_DENIED && error != ERROR_ALREADY_EXISTS)
+ {
+ goto CLEANUP;
+ }
+ }
+
+ *currentChar = W('\\');
+ }
+ currentChar++;
+ }
+
+ // Try the file creation again
+ continue;
+ }
+ }
+
+ // CreateFile failed. Sleep a little and retry, in case a
+ // virus scanner caused the creation to fail.
+ ClrSleepEx(10, FALSE);
+ }
+
+ if (newFile == INVALID_HANDLE_VALUE) {
+ goto CLEANUP;
+ }
+
+ // Write the data into it.
+ if ((retval = WriteFileData(newFile.GetValue(), pbData, cbData)) != S_OK)
+ {
+ // Write failed, destroy the file and bail.
+ // Note: if the delete fails, we always do a CREATE_NEW
+ // for this file so that should take care of it. If not
+ // we'll fail to write out future cache files.
+ CloseHandle( newFile.GetValue() );
+ newFile.SuppressRelease();
+ WszDeleteFile( newFileName );
+ goto CLEANUP;
+ }
+
+ if (!FlushFileBuffers(newFile.GetValue()))
+ {
+ error = GetLastError();
+ goto CLEANUP;
+ }
+
+ CloseHandle( newFile.GetValue() );
+ newFile.SuppressRelease();
+
+ // Move the existing file out of the way
+ if (!WszMoveFileEx( wszConfigPath, oldFileName, MOVEFILE_REPLACE_EXISTING | MOVEFILE_COPY_ALLOWED ))
+ {
+ // If move fails for a reason other than not being able to find the file, bail out.
+ // Also, if the old file didn't exist, we have no need to delete it.
+ HRESULT hrMove = HRESULT_FROM_WIN32(GetLastError());
+ if (!Assembly::FileNotFound(hrMove))
+ {
+ retval = hrMove;
+ WszDeleteFile(wszConfigPath);
+ goto CLEANUP;
+ }
+ }
+
+ // Move the new file into position
+
+ if (!WszMoveFileEx( newFileName, wszConfigPath, MOVEFILE_REPLACE_EXISTING | MOVEFILE_COPY_ALLOWED ))
+ {
+ error = GetLastError();
+ goto CLEANUP;
+ }
+
+ retval = S_OK;
+
+CLEANUP:
+ if (retval == E_FAIL && error != 0)
+ retval = HRESULT_FROM_WIN32(error);
+
+ END_QCALL;
+
+ return retval;
+}
+
+BOOL QCALLTYPE SecurityConfig::RecoverData(INT32 id)
+ {
+ QCALL_CONTRACT;
+
+ BOOL retval = FALSE;
+
+ BEGIN_QCALL;
+
+ Data* data = (Data*)GetData( id );
+
+ if (data == NULL)
+ goto CLEANUP;
+
+ {
+ DWORD fileNameLength = (DWORD)wcslen( data->configFileName );
+
+ NewArrayHolder<WCHAR> tempFileName(new WCHAR[fileNameLength + 10]);
+ NewArrayHolder<WCHAR> oldFileName(new WCHAR[fileNameLength + 5]);
+
+ swprintf_s( tempFileName.GetValue(), fileNameLength + 10, W("%s%s"), data->configFileName, W(".old.temp") );
+ swprintf_s( oldFileName.GetValue(), fileNameLength + 5, W("%s%s"), data->configFileName, W(".old") );
+
+ HandleHolder oldFile(WszCreateFile( oldFileName, 0, 0, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL ));
+
+ if (oldFile.GetValue() == INVALID_HANDLE_VALUE)
+ {
+ goto CLEANUP;
+ }
+
+ CloseHandle( oldFile );
+ oldFile.SuppressRelease();
+
+ if (!WszMoveFile( data->configFileName, tempFileName ))
+ {
+ goto CLEANUP;
+ }
+
+ if (!WszMoveFile( oldFileName, data->configFileName ))
+ {
+ goto CLEANUP;
+ }
+
+ if (!WszMoveFile( tempFileName, oldFileName ))
+ {
+ goto CLEANUP;
+ }
+ }
+
+ // We need to do some work to reset the unmanaged data object
+ // so that the managed side of things behaves like you'd expect.
+ // This basically means cleaning up the open resources and
+ // doing the work to init on a different set of files.
+
+ data->Reset();
+ InitData( data, FALSE );
+
+ retval = TRUE;
+
+CLEANUP:
+ END_QCALL;
+
+ return retval;
+}
+
+BOOL SecurityConfig::GetQuickCacheEntry( INT32 id, QuickCacheEntryType type )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ //
+ // If there is no config file for this level, then we'll assume the default
+ // security policy is in effect. This could happen for example if there is
+ // user profile loaded or if the config file is not present.
+ //
+
+ Data* data = (Data*)GetData( id );
+ if (data == NULL || ((data->state & Data::UsingConfigFile) == 0))
+ return (type == FullTrustZoneMyComputer); // MyComputer gets FT by default.
+
+ if ((data->state & Data::UsingCacheFile) == 0)
+ return FALSE;
+
+ return (data->header.quickCache & type);
+}
+
+void QCALLTYPE SecurityConfig::SetQuickCache(INT32 id, QuickCacheEntryType type)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ Data* data = (Data*)GetData( id );
+
+ if (data != NULL && (DWORD) type != data->header.quickCache)
+ {
+ CrstHolder ch(&dataLock_);
+
+ data->state = (Data::State)(Data::CacheUpdated | data->state);
+ data->header.quickCache = type;
+ }
+
+ END_QCALL;
+}
+
+static HANDLE OpenCacheFile( Data* data )
+{
+ STANDARD_VM_CONTRACT;
+
+ CrstHolder ch(&SecurityConfig::dataLock_);
+
+ if (data->cache != INVALID_HANDLE_VALUE)
+ return data->cache;
+
+ _ASSERTE( FALSE && "This case should never happen" );
+
+ data->cache = WszCreateFile( data->cacheFileName, GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL );
+ if (data->cache == INVALID_HANDLE_VALUE)
+ return NULL;
+
+ // Check whether the cache has changed since we first looked at it.
+ // If it has but the config file hasn't, then we need to start fresh.
+ // However, if the config file has changed then we have to ignore it.
+
+ if (CacheOutOfDate( &data->cacheFileTime, data->cache, NULL ))
+ {
+ if (CacheOutOfDate( &data->configFileTime, data->configFileName, NULL ))
+ return NULL;
+
+ if (ReadFileData( data->cache, (BYTE*)&data->header, sizeof( CacheHeader ) ) != S_OK)
+ return NULL;
+
+ data->cacheCurrentPosition = sizeof( CacheHeader );
+
+ if (data->oldCacheEntries != NULL)
+ {
+ ArrayList::Iterator iter = data->oldCacheEntries->Iterate();
+ while (iter.Next())
+ {
+ delete (CacheEntry*)iter.GetElement();
+ }
+ delete data->oldCacheEntries;
+ data->oldCacheEntries = new ArrayList();
+ }
+ }
+
+ return data->cache;
+}
+
+static BYTE* CompareEntry( CacheEntry* entry, DWORD numEvidence, DWORD evidenceSize, LPCBYTE evidenceBlock, HANDLE cache, DWORD* size)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE (entry);
+
+ if (entry->header.numItemsInKey == numEvidence &&
+ entry->header.keySize == evidenceSize)
+ {
+ if (entry->key == NULL)
+ {
+ // We were lazy in reading the entry. Read the key now.
+ entry->key = new BYTE[entry->header.keySize];
+
+ _ASSERTE (cache != INVALID_HANDLE_VALUE);
+
+ if (SetFilePointer( cache, entry->cachePosition, NULL, FILE_BEGIN ) == INVALID_SET_FILE_POINTER)
+ return NULL;
+
+ if (ReadFileData( cache, entry->key, entry->header.keySize ) != S_OK)
+ return NULL;
+
+ entry->cachePosition += entry->header.keySize;
+ }
+
+ _ASSERTE (entry->key);
+
+ if (memcmp( entry->key, evidenceBlock, entry->header.keySize ) == 0)
+ {
+ if (entry->data == NULL)
+ {
+ // We were lazy in reading the entry. Read the data also.
+ entry->data = new BYTE[entry->header.dataSize];
+
+ if (SetFilePointer( cache, entry->cachePosition, NULL, FILE_BEGIN ) == INVALID_SET_FILE_POINTER)
+ return NULL;
+
+ if (ReadFileData( cache, entry->data, entry->header.dataSize ) != S_OK)
+ return NULL;
+
+ entry->cachePosition += entry->header.dataSize;
+ }
+
+ entry->used = TRUE;
+ *size = entry->header.dataSize;
+
+ return entry->data;
+ }
+ }
+ return NULL;
+}
+
+BOOL QCALLTYPE SecurityConfig::GetCacheEntry(INT32 id, DWORD numEvidence, LPCBYTE pEvidence, DWORD cbEvidence, QCall::ObjectHandleOnStack retPolicy)
+{
+ QCALL_CONTRACT;
+
+ BOOL success = FALSE;
+
+ BEGIN_QCALL;
+
+ HANDLE cache = INVALID_HANDLE_VALUE;
+
+ BYTE* retval = NULL;
+ DWORD size = (DWORD) -1;
+
+ Data* data = (Data*)GetData( id );
+
+ if (data == NULL)
+ {
+ goto CLEANUP;
+ }
+
+ {
+
+ ArrayList::Iterator iter;
+
+ if ((data->state & Data::UsingCacheFile) == 0)
+ {
+ // We know we don't have anything in the config file, so
+ // let's just look through the new entries to make sure we
+ // aren't getting any repeats.
+
+ // Then try the existing new entries
+
+ iter = data->newCacheEntries->Iterate();
+
+ while (iter.Next())
+ {
+ // newCacheEntries do not need the cache file so pass in NULL.
+ retval = CompareEntry( (CacheEntry*)iter.GetElement(), numEvidence, cbEvidence, pEvidence, NULL, &size );
+
+ if (retval != NULL)
+ {
+ success = TRUE;
+ goto CLEANUP;
+ }
+ }
+
+ goto CLEANUP;
+ }
+
+ // Its possible that the old entries were not read in completely
+ // so we keep the cache file open before iterating through the
+ // old entries.
+
+ cache = OpenCacheFile( data );
+
+ if ( cache == NULL )
+ {
+ goto CLEANUP;
+ }
+
+ // First, iterator over the old entries
+
+ {
+ CrstHolder ch(&dataLock_);
+
+ iter = data->oldCacheEntries->Iterate();
+ while (iter.Next())
+ {
+ retval = CompareEntry( (CacheEntry*)iter.GetElement(), numEvidence, cbEvidence, pEvidence, cache, &size );
+ if (retval != NULL)
+ {
+ success = TRUE;
+ goto CLEANUP;
+ }
+ }
+
+ // LockHolder goes out of scope here
+ }
+
+ // Then try the existing new entries
+ iter = data->newCacheEntries->Iterate();
+ while (iter.Next())
+ {
+ // newCacheEntries do not need the cache file so pass in NULL.
+ retval = CompareEntry( (CacheEntry*)iter.GetElement(), numEvidence, cbEvidence, pEvidence, NULL, &size );
+ if (retval != NULL)
+ {
+ success = TRUE;
+ goto CLEANUP;
+ }
+ }
+
+ // Finally, try loading existing entries from the file
+
+ {
+ CrstHolder ch(&dataLock_);
+
+ if (SetFilePointer( cache, data->cacheCurrentPosition, NULL, FILE_BEGIN ) == INVALID_SET_FILE_POINTER)
+ {
+ goto CLEANUP;
+ }
+
+ do
+ {
+ CacheEntry* entry = LoadNextEntry( cache, data );
+ if (entry == NULL)
+ {
+ data->state = (Data::State)(Data::CacheExhausted | data->state);
+ break;
+ }
+
+ retval = CompareEntry( entry, numEvidence, cbEvidence, pEvidence, cache, &size );
+ if (retval != NULL)
+ {
+ success = TRUE;
+ break;
+ }
+ } while (TRUE);
+
+ // LockHolder goes out of scope here
+ }
+ }
+
+CLEANUP:
+ if (success && retval != NULL)
+ {
+ _ASSERTE( size != (DWORD) -1 );
+ retPolicy.SetByteArray(retval, size);
+ }
+
+ END_QCALL;
+
+ return success;
+}
+
+void QCALLTYPE SecurityConfig::AddCacheEntry(INT32 id, DWORD numEvidence, LPCBYTE pEvidence, DWORD cbEvidence, LPCBYTE pPolicy, DWORD cbPolicy)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ Data* data = (Data*)GetData( id );
+
+ DWORD sizeOfEntry = 0;
+ NewHolder<CacheEntry> entry;
+
+ if (data == NULL)
+ {
+ goto lExit;
+ }
+
+ // In order to limit how large a long running app can become,
+ // we limit the total memory held by the new cache entries list.
+ // For now this limit corresponds with how large the max cache file
+ // can be.
+
+ sizeOfEntry = cbEvidence + cbPolicy + sizeof( CacheEntryHeader );
+
+ if (data->newEntriesSize + sizeOfEntry >= MAX_CACHEFILE_SIZE)
+ {
+ goto lExit;
+ }
+
+ entry = new CacheEntry();
+
+ entry->header.numItemsInKey = numEvidence;
+ entry->header.keySize = cbEvidence;
+ entry->header.dataSize = cbPolicy;
+
+ entry->key = new BYTE[entry->header.keySize];
+ entry->data = new BYTE[entry->header.dataSize];
+
+ memcpyNoGCRefs(entry->key, pEvidence, cbEvidence);
+ memcpyNoGCRefs(entry->data, pPolicy, cbPolicy);
+
+ {
+ CrstHolder ch(&dataLock_);
+
+ // Check the size again to handle the race.
+ if (data->newEntriesSize + sizeOfEntry < MAX_CACHEFILE_SIZE)
+ {
+ data->state = (Data::State)(Data::CacheUpdated | data->state);
+ IfFailThrow(data->newCacheEntries->Append( entry.GetValue() ));
+ entry.SuppressRelease();
+ data->newEntriesSize += sizeOfEntry;
+ }
+ }
+
+lExit: ;
+
+ END_QCALL;
+}
+
+ArrayListStatic SecurityConfig::entries_;
+CrstStatic SecurityConfig::dataLock_;
+
+void SecurityConfig::Init( void )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ dataLock_.Init(CrstSecurityPolicyCache);
+ entries_.Init();
+}
+
+void SecurityConfig::Cleanup( void )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ ArrayList::Iterator iter = entries_.Iterate();
+
+ GCX_PREEMP();
+
+ CrstHolder ch(&dataLock_);
+
+ while (iter.Next())
+ {
+ ((Data*) iter.GetElement())->Cleanup();
+ }
+}
+
+void SecurityConfig::Delete( void )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ ArrayList::Iterator iter = entries_.Iterate();
+
+ while (iter.Next())
+ {
+ delete (Data*) iter.GetElement();
+ }
+
+ entries_.Destroy();
+ dataLock_.Destroy();
+}
+
+void QCALLTYPE SecurityConfig::_GetMachineDirectory(QCall::StringHandleOnStack retDirectory)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ WCHAR machine[MAX_PATH];
+
+ HRESULT hr = GetMachineDirectory(machine, MAX_PATH);
+ if (FAILED(hr))
+ ThrowHR(hr);
+
+ retDirectory.Set(machine);
+
+ END_QCALL;
+}
+
+void QCALLTYPE SecurityConfig::_GetUserDirectory(QCall::StringHandleOnStack retDirectory)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ WCHAR user[MAX_PATH];
+
+ BOOL result = GetUserDirectory(user, MAX_PATH);
+ if (result)
+ retDirectory.Set(user);
+
+ END_QCALL;
+}
+
+HRESULT SecurityConfig::GetMachineDirectory(__out_ecount(bufferCount) __out_z WCHAR* buffer, size_t bufferCount)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr;
+
+ DWORD length = (DWORD)bufferCount;
+ hr = GetInternalSystemDirectory(buffer, &length);
+ if (FAILED(hr))
+ return hr;
+
+ // Make sure we have enough buffer to concat the string.
+ // Note the length including the terminating zero.
+ if((bufferCount - wcslen(buffer) - 1) < wcslen(W("config\\")))
+ return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
+
+ wcscat_s(buffer, bufferCount, W("config\\"));
+
+ return S_OK;
+}
+
+BOOL SecurityConfig::GetVIUserDirectory(__out_ecount(bufferCount) __out_z WCHAR* buffer, size_t bufferCount)
+{
+ STANDARD_VM_CONTRACT;
+
+ WCHAR scratchBuffer[MAX_PATH];
+ BOOL retval = FALSE;
+
+ DWORD size = MAX_PATH;
+
+ if (!GetUserDir(buffer, bufferCount, TRUE))
+ goto CLEANUP;
+
+ wcscpy_s( scratchBuffer, COUNTOF(scratchBuffer), W("\\Microsoft\\CLR Security Config\\") );
+
+ if (bufferCount < wcslen( buffer ) + wcslen( scratchBuffer ) + 1)
+ {
+ goto CLEANUP;
+ }
+
+ wcscat_s( buffer, bufferCount, scratchBuffer );
+
+ retval = TRUE;
+
+CLEANUP:
+ return retval;
+}
+
+BOOL SecurityConfig::GetUserDirectory(__out_ecount(bufferCount) __out_z WCHAR* buffer, size_t bufferCount)
+{
+ STANDARD_VM_CONTRACT;
+
+ StackSString ssScratchBuffer;
+ BOOL retval = FALSE;
+
+ WCHAR* wszScratchBuffer = ssScratchBuffer.OpenUnicodeBuffer( (COUNT_T)bufferCount );
+ retval = GetVIUserDirectory(wszScratchBuffer, bufferCount);
+ ssScratchBuffer.CloseBuffer( (COUNT_T)wcslen( wszScratchBuffer ) );
+
+ if (!retval)
+ return retval;
+
+ ssScratchBuffer.Append( W("v") );
+ ssScratchBuffer.Append( VER_PRODUCTVERSION_NO_QFE_STR_L );
+ ssScratchBuffer.Append( W("\\") );
+
+#ifdef _WIN64
+ ssScratchBuffer.Append( W("64bit\\") );
+#endif // _WIN64
+
+ if (ssScratchBuffer.GetCount() + 1 > bufferCount)
+ return FALSE;
+
+ wcscpy_s( buffer, bufferCount, ssScratchBuffer.GetUnicode() );
+
+ return TRUE;
+}
+
+BOOL QCALLTYPE SecurityConfig::WriteToEventLog(LPCWSTR wszMessage)
+{
+ QCALL_CONTRACT;
+
+ BOOL retVal = FALSE;
+
+ BEGIN_QCALL;
+
+ retVal = ReportEventCLR(
+ EVENTLOG_WARNING_TYPE, // event type
+ 0, // category
+ (DWORD)1000, // event identifier
+ NULL, // no user security identifier
+ &StackSString(wszMessage)); // message to log
+
+ END_QCALL
+
+ return retVal;
+}
+
+#ifdef _DEBUG
+HRESULT QCALLTYPE SecurityConfig::DebugOut(LPCWSTR wszFileName, LPCWSTR wszMessage)
+{
+ HRESULT retVal = E_FAIL;
+
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ HandleHolder file(WszCreateFile( wszFileName, GENERIC_WRITE, 0, NULL, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL ));
+
+ if (file == INVALID_HANDLE_VALUE)
+ {
+ goto lExit;
+ }
+
+ SetFilePointer( file, 0, NULL, FILE_END );
+
+ DWORD cbMessage;
+ DWORD cbWritten;
+
+ cbMessage = (DWORD)wcslen(wszMessage) * sizeof(WCHAR);
+ if (!WriteFile( file, wszMessage, cbMessage, &cbWritten, NULL ))
+ {
+ goto lExit;
+ }
+
+ if (cbMessage != cbWritten)
+ {
+ goto lExit;
+ }
+
+ retVal = S_OK;
+
+lExit: ;
+ END_QCALL;
+
+ return retVal;
+}
+#endif
+
+#endif // FEATURE_CAS_POLICY
diff --git a/src/vm/securityconfig.h b/src/vm/securityconfig.h
new file mode 100644
index 0000000000..dcd826d44d
--- /dev/null
+++ b/src/vm/securityconfig.h
@@ -0,0 +1,123 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: SecurityConfig.h
+//
+
+//
+// Native implementation for security config access and manipulation
+//
+
+
+#ifndef _COMSecurityConfig_H_
+#define _COMSecurityConfig_H_
+#ifdef FEATURE_CAS_POLICY
+
+#include "qcall.h"
+
+class SecurityConfig
+{
+friend struct CacheHeader;
+
+private:
+ // These structures can be removed in the next SxS runtime version when we won't have to potentially read
+ // config files generated by an in-place runtime that used to include them in the header.
+ enum RegistryExtensionsAccessStatus {
+ Unknown = 0,
+ NoExtensions = 1,
+ AccessFailure = 2,
+ AccessSuccess = 3
+ };
+
+ struct RegistryExtensionsInfo {
+ RegistryExtensionsAccessStatus eStatus;
+ FILETIME ftLastWriteTime;
+ };
+
+public:
+ // Duplicated in System.Security.Util.Config.cs
+ enum ConfigId
+ {
+ None = 0,
+ MachinePolicyLevel = 1,
+ UserPolicyLevel = 2,
+ EnterprisePolicyLevel = 3,
+ };
+
+ // Duplicated in System.Security.Util.Config.cs
+ enum QuickCacheEntryType
+ {
+ FullTrustZoneMyComputer = 0x1000000,
+ FullTrustZoneIntranet = 0x2000000,
+ FullTrustZoneInternet = 0x4000000,
+ FullTrustZoneTrusted = 0x8000000,
+ FullTrustZoneUntrusted = 0x10000000,
+ FullTrustAll = 0x20000000,
+ };
+
+ // Duplicated in System.Security.Util.Config.cs
+ enum ConfigRetval
+ {
+ NoFile = 0,
+ ConfigFile = 1,
+ CacheFile = 2
+ };
+
+ static ConfigRetval InitData( INT32 id, const WCHAR* configFileName, const WCHAR* cacheFileName );
+ static ConfigRetval InitData( void* configData, BOOL addToList );
+
+ static BOOL SaveCacheData( INT32 id );
+
+ static
+ void QCALLTYPE ResetCacheData(INT32 id);
+
+ static
+ HRESULT QCALLTYPE SaveDataByte(LPCWSTR wszConfigPath, LPCBYTE pbData, DWORD cbData);
+
+ static
+ BOOL QCALLTYPE RecoverData(INT32 id);
+
+ static
+ void QCALLTYPE SetQuickCache(INT32 id, QuickCacheEntryType type);
+
+ static
+ BOOL QCALLTYPE GetCacheEntry(INT32 id, DWORD numEvidence, LPCBYTE pEvidence, DWORD cbEvidence, QCall::ObjectHandleOnStack retPolicy);
+
+ static
+ void QCALLTYPE AddCacheEntry(INT32 id, DWORD numEvidence, LPCBYTE pEvidence, DWORD cbEvidence, LPCBYTE pPolicy, DWORD cbPolicy);
+
+ static
+ void QCALLTYPE _GetMachineDirectory(QCall::StringHandleOnStack retDirectory);
+
+ static
+ void QCALLTYPE _GetUserDirectory(QCall::StringHandleOnStack retDirectory);
+
+ static HRESULT GetMachineDirectory (__out_ecount(bufferCount) __out_z WCHAR* buffer, size_t bufferCount);
+ static BOOL GetUserDirectory(__out_ecount(bufferCount) __out_z WCHAR* buffer, size_t bufferCount);
+ static BOOL GetVIUserDirectory(__out_ecount(bufferCount) __out_z WCHAR* buffer, size_t bufferCount);
+
+ static
+ BOOL QCALLTYPE WriteToEventLog(LPCWSTR wszMessage);
+
+#ifdef _DEBUG
+ static
+ HRESULT QCALLTYPE DebugOut(LPCWSTR wszFileName, LPCWSTR wszMessage);
+#endif
+
+ static void Init( void );
+ static void Cleanup( void );
+ static void Delete( void );
+
+ static BOOL GetQuickCacheEntry( INT32 id, QuickCacheEntryType type );
+
+ static void* GetData( INT32 id );
+
+ static ArrayListStatic entries_;
+ static CrstStatic dataLock_;
+
+ static WCHAR* wcscatDWORD( __out_ecount(cchdst) __out_z WCHAR* dst, size_t cchdst, DWORD num );
+};
+#endif // FEATURE_CAS_POLICY
+#endif
diff --git a/src/vm/securitydeclarative.cpp b/src/vm/securitydeclarative.cpp
new file mode 100644
index 0000000000..d302a6c208
--- /dev/null
+++ b/src/vm/securitydeclarative.cpp
@@ -0,0 +1,1793 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#include "common.h"
+
+#include "security.h"
+#include "securitydeclarative.inl"
+#include "eventtrace.h"
+
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif // FEATURE_REMOTING
+
+
+//-----------------------------------------------------------------------------
+//
+//
+// CODE FOR MAKING THE SECURITY STUB AT JIT-TIME
+//
+//
+//-----------------------------------------------------------------------------
+
+
+enum DeclSecMergeMethod
+{
+ DS_METHOD_OVERRIDE,
+ DS_CLASS_OVERRIDE,
+ DS_UNION,
+ DS_INTERSECT,
+ DS_APPLY_METHOD_THEN_CLASS, // not supported with stack modifier actions
+ DS_APPLY_CLASS_THEN_METHOD, // not supported with stack modifier actions
+ DS_NOT_APPLICABLE, // action not supported on both method and class
+};
+
+// (Note: The values that are DS_NOT_APPLICABLE are not hooked up to
+// this table, so changing one of those values will have no effect)
+const DeclSecMergeMethod g_DeclSecClassAndMethodMergeTable[] =
+{
+ DS_NOT_APPLICABLE, // dclActionNil = 0
+ DS_NOT_APPLICABLE, // dclRequest = 1
+ DS_UNION, // dclDemand = 2
+ DS_METHOD_OVERRIDE, // dclAssert = 3
+ DS_UNION, // dclDeny = 4
+ DS_INTERSECT, // dclPermitOnly = 5
+ DS_NOT_APPLICABLE, // dclLinktimeCheck = 6
+ DS_NOT_APPLICABLE, // dclInheritanceCheck = 7
+ DS_NOT_APPLICABLE, // dclRequestMinimum = 8
+ DS_NOT_APPLICABLE, // dclRequestOptional = 9
+ DS_NOT_APPLICABLE, // dclRequestRefuse = 10
+ DS_NOT_APPLICABLE, // dclPrejitGrant = 11
+ DS_NOT_APPLICABLE, // dclPrejitDenied = 12
+ DS_UNION, // dclNonCasDemand = 13
+ DS_NOT_APPLICABLE, // dclNonCasLinkDemand = 14
+ DS_NOT_APPLICABLE, // dclNonCasInheritance = 15
+};
+
+// This table specifies the order in which runtime declarative actions will be performed
+// (Note that for stack-modifying actions, this means the order in which they are applied to the
+// frame descriptor, not the order in which they are evaluated when a demand is performed.
+// That order is determined by the code in System.Security.FrameSecurityDescriptor.)
+const CorDeclSecurity g_RuntimeDeclSecOrderTable[] =
+{
+ dclPermitOnly, // 5
+ dclDeny, // 4
+ dclAssert, // 3
+ dclDemand, // 2
+ dclNonCasDemand, // 13
+};
+
+#define DECLSEC_RUNTIME_ACTION_COUNT (sizeof(g_RuntimeDeclSecOrderTable) / sizeof(CorDeclSecurity))
+
+
+TokenDeclActionInfo* TokenDeclActionInfo::Init(DWORD dwAction, PsetCacheEntry *pPCE)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ AppDomain *pDomain = GetAppDomain();
+
+ TokenDeclActionInfo *pTemp =
+ static_cast<TokenDeclActionInfo*>((void*)pDomain->GetLowFrequencyHeap()
+ ->AllocMem(S_SIZE_T(sizeof(TokenDeclActionInfo))));
+
+ pTemp->dwDeclAction = dwAction;
+ pTemp->pPCE = pPCE;
+ pTemp->pNext = NULL;
+
+ return pTemp;
+}
+
+void TokenDeclActionInfo::LinkNewDeclAction(TokenDeclActionInfo** ppActionList, CorDeclSecurity action, PsetCacheEntry *pPCE)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ TokenDeclActionInfo *temp = Init(DclToFlag(action), pPCE);
+ if (!(*ppActionList))
+ *ppActionList = temp;
+ else
+ {
+ temp->pNext = *ppActionList;
+ *ppActionList = temp;
+ }
+}
+
+DeclActionInfo *DeclActionInfo::Init(MethodDesc *pMD, DWORD dwAction, PsetCacheEntry *pPCE)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ DeclActionInfo *pTemp = (DeclActionInfo *)(void*)pMD->GetDomainSpecificLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(DeclActionInfo)));
+
+ pTemp->dwDeclAction = dwAction;
+ pTemp->pPCE = pPCE;
+ pTemp->pNext = NULL;
+
+ return pTemp;
+}
+
+void LinkNewDeclAction(DeclActionInfo** ppActionList, CorDeclSecurity action, PsetCacheEntry *pPCE, MethodDesc *pMeth)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ DeclActionInfo *temp = DeclActionInfo::Init(pMeth, DclToFlag(action), pPCE);
+ if (!(*ppActionList))
+ *ppActionList = temp;
+ else
+ {
+ // Add overrides to the end of the list, all others to the front
+ if (IsDclActionAnyStackModifier(action))
+ {
+ DeclActionInfo *w = *ppActionList;
+ while (w->pNext != NULL)
+ w = w->pNext;
+ w->pNext = temp;
+ }
+ else
+ {
+ temp->pNext = *ppActionList;
+ *ppActionList = temp;
+ }
+ }
+}
+
+void SecurityDeclarative::AddDeclAction(CorDeclSecurity action, PsetCacheEntry *pClassPCE, PsetCacheEntry *pMethodPCE, DeclActionInfo** ppActionList, MethodDesc *pMeth)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ if(pClassPCE == NULL)
+ {
+ if(pMethodPCE == NULL)
+ return;
+ LinkNewDeclAction(ppActionList, action, pMethodPCE, pMeth);
+ return;
+ }
+ else if(pMethodPCE == NULL)
+ {
+ LinkNewDeclAction(ppActionList, action, pClassPCE, pMeth);
+ return;
+ }
+
+ // Merge class and method declarations
+ switch(g_DeclSecClassAndMethodMergeTable[action])
+ {
+ case DS_METHOD_OVERRIDE:
+ LinkNewDeclAction(ppActionList, action, pMethodPCE, pMeth);
+ break;
+
+ case DS_CLASS_OVERRIDE:
+ LinkNewDeclAction(ppActionList, action, pClassPCE, pMeth);
+ break;
+
+ case DS_UNION:
+#ifdef FEATURE_CAS_POLICY
+ LinkNewDeclAction(ppActionList, action, SecurityAttributes::MergePermissionSets(pClassPCE, pMethodPCE, false, action), pMeth);
+#else // FEATURE_CAS_POLICY
+ _ASSERTE(!"Declarative permission sets may not be unioned together in CoreCLR. Are you attempting to have a declarative demand or deny on both a method and its enclosing class?");
+#endif // FEATURE_CAS_POLICY
+ break;
+
+ case DS_INTERSECT:
+#ifdef FEATURE_CAS_POLICY
+ LinkNewDeclAction(ppActionList, action, SecurityAttributes::MergePermissionSets(pClassPCE, pMethodPCE, true, action), pMeth);
+#else // FEATURE_CAS_POLICY
+ _ASSERTE(!"Declarative permission sets may not be intersected in CoreCLR. Are you attempting to have a declarative permit only on both a method and its enclosing class?");
+#endif // FEATURE_CAS_POLICY
+ break;
+
+ case DS_APPLY_METHOD_THEN_CLASS:
+ LinkNewDeclAction(ppActionList, action, pClassPCE, pMeth); // note: order reversed because LinkNewDeclAction inserts at beginning of list
+ LinkNewDeclAction(ppActionList, action, pMethodPCE, pMeth);
+ break;
+
+ case DS_APPLY_CLASS_THEN_METHOD:
+ LinkNewDeclAction(ppActionList, action, pMethodPCE, pMeth); // note: order reversed because LinkNewDeclAction inserts at beginning of list
+ LinkNewDeclAction(ppActionList, action, pClassPCE, pMeth);
+ break;
+
+ case DS_NOT_APPLICABLE:
+ _ASSERTE(!"not a runtime action");
+ break;
+
+ default:
+ _ASSERTE(!"unexpected merge type");
+ break;
+ }
+}
+
+
+// Here we see what declarative actions are needed everytime a method is called,
+// and create a list of these actions, which will be emitted as an argument to
+// DoDeclarativeSecurity
+DeclActionInfo* SecurityDeclarative::DetectDeclActions(MethodDesc *pMeth, DWORD dwDeclFlags)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ GCX_COOP();
+
+ DeclActionInfo *pDeclActions = NULL;
+
+ IMDInternalImport *pInternalImport = pMeth->GetMDImport();
+
+ // Lets check the Ndirect/Interop cases first
+ if (dwDeclFlags & DECLSEC_UNMNGD_ACCESS_DEMAND)
+ {
+ HRESULT hr = S_FALSE;
+ if (pMeth->HasSuppressUnmanagedCodeAccessAttr())
+ {
+ dwDeclFlags &= ~DECLSEC_UNMNGD_ACCESS_DEMAND;
+ }
+ else
+ {
+ MethodTable * pMT = pMeth->GetMethodTable();
+ EEClass * pClass = pMT->GetClass();
+
+ // If speculatively true then check the CA
+
+ if (pClass->HasSuppressUnmanagedCodeAccessAttr())
+ {
+#ifdef FEATURE_CORECLR
+ hr = S_OK;
+#else
+ hr = pInternalImport->GetCustomAttributeByName(pMT->GetCl(),
+ COR_SUPPRESS_UNMANAGED_CODE_CHECK_ATTRIBUTE_ANSI,
+ NULL,
+ NULL);
+#endif // FEATURE_CORECLR
+ if (hr != S_OK)
+ {
+ g_IBCLogger.LogEEClassCOWTableAccess(pMT);
+ pClass->SetDoesNotHaveSuppressUnmanagedCodeAccessAttr();
+ }
+ }
+ _ASSERTE(SUCCEEDED(hr));
+ if (hr == S_OK)
+ dwDeclFlags &= ~DECLSEC_UNMNGD_ACCESS_DEMAND;
+ }
+ // Check if now there are no actions left
+ if (dwDeclFlags == 0)
+ return NULL;
+
+ if (dwDeclFlags & DECLSEC_UNMNGD_ACCESS_DEMAND)
+ {
+ // A NDirect/Interop demand is required.
+ DeclActionInfo *temp = DeclActionInfo::Init(pMeth, DECLSEC_UNMNGD_ACCESS_DEMAND, NULL);
+ if (!pDeclActions)
+ pDeclActions = temp;
+ else
+ {
+ temp->pNext = pDeclActions;
+ pDeclActions = temp;
+ }
+ }
+ } // if DECLSEC_UNMNGD_ACCESS_DEMAND
+
+ // Find class declarations
+ PsetCacheEntry* classSetPermissions[dclMaximumValue + 1];
+ DetectDeclActionsOnToken(pMeth->GetMethodTable()->GetCl(), dwDeclFlags, classSetPermissions, pInternalImport);
+
+ // Find method declarations
+ PsetCacheEntry* methodSetPermissions[dclMaximumValue + 1];
+ DetectDeclActionsOnToken(pMeth->GetMemberDef(), dwDeclFlags, methodSetPermissions, pInternalImport);
+
+ // Make sure the g_DeclSecClassAndMethodMergeTable is okay
+ _ASSERTE(sizeof(g_DeclSecClassAndMethodMergeTable) == sizeof(DeclSecMergeMethod) * (dclMaximumValue + 1) &&
+ "g_DeclSecClassAndMethodMergeTable wrong size!");
+
+ // Merge class and method runtime declarations into a single linked list of set indexes
+ int i;
+ for(i = DECLSEC_RUNTIME_ACTION_COUNT - 1; i >= 0; i--) // note: the loop uses reverse order because AddDeclAction inserts at beginning of the list
+ {
+ CorDeclSecurity action = g_RuntimeDeclSecOrderTable[i];
+ _ASSERTE(action > dclActionNil && action <= dclMaximumValue && "action out of range");
+ AddDeclAction(action, classSetPermissions[action], methodSetPermissions[action], &pDeclActions, pMeth);
+ }
+
+ return pDeclActions;
+}
+
+void SecurityDeclarative::DetectDeclActionsOnToken(mdToken tk, DWORD dwDeclFlags, PsetCacheEntry** pSets, IMDInternalImport *pInternalImport)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ // Make sure the DCL to Flag table is okay
+ _ASSERTE(DclToFlag(dclDemand) == DECLSEC_DEMANDS &&
+ sizeof(DCL_FLAG_MAP) == sizeof(DWORD) * (dclMaximumValue + 1) &&
+ "DCL_FLAG_MAP out of sync with CorDeclSecurity!");
+
+ // Initialize the array
+ int i;
+ for(i = 0; i < dclMaximumValue + 1; i++)
+ pSets[i] = NULL;
+
+ // Look up declarations on the token for each SecurityAction
+ DWORD dwAction;
+ for (dwAction = 0; dwAction <= dclMaximumValue; dwAction++)
+ {
+ // don't bother with actions that are not in the requested mask
+ CorDeclSecurity action = (CorDeclSecurity)dwAction;
+ DWORD dwActionFlag = DclToFlag(action);
+ if ((dwDeclFlags & dwActionFlag) == 0)
+ continue;
+
+ // Load the PermissionSet or PermissionSetCollection from the security action table in the metadata
+ PsetCacheEntry *pPCE;
+ HRESULT hr = SecurityAttributes::GetDeclaredPermissions(pInternalImport, tk, action, NULL, &pPCE);
+ if (hr != S_OK) // returns S_FALSE if it didn't find anything in the metadata
+ continue;
+
+ pSets[dwAction] = pPCE;
+ }
+}
+
+// Returns TRUE if there is a possibility that a token has declarations of the type specified by 'action'
+// Returns FALSE if it can determine that the token definately does not.
+BOOL SecurityDeclarative::TokenMightHaveDeclarations(IMDInternalImport *pInternalImport, mdToken token, CorDeclSecurity action)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ HENUMInternal hEnumDcl;
+ DWORD cDcl;
+
+ // Check if the token has declarations for
+ // the action specified.
+ hr = pInternalImport->EnumPermissionSetsInit(
+ token,
+ action,
+ &hEnumDcl);
+
+ if (FAILED(hr) || hr == S_FALSE)
+ {
+ // PermissionSets for non-CAS actions are special cases because they may be mixed with
+ // the set for the corresponding CAS action in a serialized CORSEC_PSET
+ if(action == dclNonCasDemand || action == dclNonCasLinkDemand || action == dclNonCasInheritance)
+ {
+ // See if the corresponding CAS action has permissions
+ BOOL fDoCheck = FALSE;
+ if(action == dclNonCasDemand)
+ fDoCheck = TokenMightHaveDeclarations(pInternalImport, token, dclDemand);
+ else if(action == dclNonCasLinkDemand)
+ fDoCheck = TokenMightHaveDeclarations(pInternalImport, token, dclLinktimeCheck);
+ else if(action == dclNonCasInheritance)
+ fDoCheck = TokenMightHaveDeclarations(pInternalImport, token, dclInheritanceCheck);
+ if(fDoCheck)
+ {
+ // We can't tell for sure if there are declarations unless we deserializing something
+ // (which is too expensive), so we'll just return TRUE
+ return TRUE;
+ /*
+ OBJECTREF refPermSet = NULL;
+ DWORD dwIndex = ~0;
+ hr = SecurityAttributes::GetDeclaredPermissionsWithCache(pInternalImport, token, action, &refPermSet, &dwIndex);
+ if(refPermSet != NULL)
+ {
+ _ASSERTE(dwIndex != (~0));
+ return TRUE;
+ }
+ */
+ }
+ }
+ pInternalImport->EnumClose(&hEnumDcl);
+ return FALSE;
+ }
+
+ cDcl = pInternalImport->EnumGetCount(&hEnumDcl);
+ pInternalImport->EnumClose(&hEnumDcl);
+
+ return (cDcl > 0);
+}
+
+
+bool SecurityDeclarative::BlobMightContainNonCasPermission(PBYTE pbAttrSet, ULONG cbAttrSet, DWORD dwAction, bool* pHostProtectionOnly)
+{
+ CONTRACTL {
+ THROWS;
+ } CONTRACTL_END;
+
+ // Deserialize the CORSEC_ATTRSET
+ CORSEC_ATTRSET attrSet;
+ HRESULT hr = BlobToAttributeSet(pbAttrSet, cbAttrSet, &attrSet, dwAction);
+ if(FAILED(hr))
+ COMPlusThrowHR(hr);
+
+ // this works because SecurityAttributes::CanUnrestrictedOverride only returns
+ // true if the attribute set contains only well-known non-CAS permissions
+ return !SecurityAttributes::ContainsBuiltinCASPermsOnly(&attrSet, pHostProtectionOnly);
+}
+
+// Accumulate status of declarative security.
+HRESULT SecurityDeclarative::GetDeclarationFlags(IMDInternalImport *pInternalImport, mdToken token, DWORD* pdwFlags, DWORD* pdwNullFlags, BOOL* pfHasSuppressUnmanagedCodeAccessAttr /*[IN:TRUE if Pinvoke/Cominterop][OUT:FALSE if doesn't have attr]*/)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ HENUMInternal hEnumDcl;
+ HRESULT hr;
+ DWORD dwFlags = 0;
+ DWORD dwNullFlags = 0;
+
+ _ASSERTE(pdwFlags);
+ *pdwFlags = 0;
+
+ if (pdwNullFlags)
+ *pdwNullFlags = 0;
+
+ hr = pInternalImport->EnumPermissionSetsInit(token, dclActionNil, &hEnumDcl);
+ if (FAILED(hr))
+ goto Exit;
+
+ if (hr == S_OK)
+ {
+ //Look through the security action table in the metadata for declared permission sets
+ mdPermission perms;
+ DWORD dwAction;
+ DWORD dwDclFlags;
+ ULONG cbPerm;
+ PBYTE pbPerm;
+ while (pInternalImport->EnumNext(&hEnumDcl, &perms))
+ {
+ hr = pInternalImport->GetPermissionSetProps(
+ perms,
+ &dwAction,
+ (const void**)&pbPerm,
+ &cbPerm);
+ if (FAILED(hr))
+ {
+ goto Exit;
+ }
+
+ dwDclFlags = DclToFlag(dwAction);
+
+ if ((cbPerm > 0) && (pbPerm[0] == LAZY_DECL_SEC_FLAG)) // indicates a serialized CORSEC_PSET
+ {
+ bool hostProtectionOnly; // gets initialized in call to BlobMightContainNonCasPermission
+ if (BlobMightContainNonCasPermission(pbPerm, cbPerm, dwAction, &hostProtectionOnly))
+ {
+ switch (dwAction)
+ {
+ case dclDemand:
+ dwFlags |= DclToFlag(dclNonCasDemand);
+ break;
+ case dclLinktimeCheck:
+ dwFlags |= DclToFlag(dclNonCasLinkDemand);
+ break;
+ case dclInheritanceCheck:
+ dwFlags |= DclToFlag(dclNonCasInheritance);
+ break;
+ }
+ }
+ else
+ {
+ if (hostProtectionOnly)
+ {
+ // If this is a linkcheck for HostProtection only, let's capture that in the flags.
+ // Subsequently, this will be captured in the bit mask on EEClass/MethodDesc
+ // and used when deciding whether to insert runtime callouts for transparency
+ dwDclFlags |= DECLSEC_LINK_CHECKS_HPONLY;
+ }
+ }
+ }
+
+ dwFlags |= dwDclFlags;
+ }
+ }
+ pInternalImport->EnumClose(&hEnumDcl);
+
+ // Disable any runtime checking of UnmanagedCode permission if the correct
+ // custom attribute is present.
+ // By default, check except when told not to by the passed in BOOL*
+
+ BOOL hasSuppressUnmanagedCodeAccessAttr;
+ if (pfHasSuppressUnmanagedCodeAccessAttr == NULL)
+ {
+#ifdef FEATURE_CORECLR
+ hasSuppressUnmanagedCodeAccessAttr = TRUE;
+#else
+ hasSuppressUnmanagedCodeAccessAttr =
+ (pInternalImport->GetCustomAttributeByName(token,
+ COR_SUPPRESS_UNMANAGED_CODE_CHECK_ATTRIBUTE_ANSI,
+ NULL,
+ NULL) == S_OK);
+#endif
+ }
+ else
+ hasSuppressUnmanagedCodeAccessAttr = *pfHasSuppressUnmanagedCodeAccessAttr;
+
+
+ if (hasSuppressUnmanagedCodeAccessAttr)
+ {
+ dwFlags |= DECLSEC_UNMNGD_ACCESS_DEMAND;
+ dwNullFlags |= DECLSEC_UNMNGD_ACCESS_DEMAND;
+ }
+
+ *pdwFlags = dwFlags;
+ if (pdwNullFlags)
+ *pdwNullFlags = dwNullFlags;
+
+Exit:
+ return hr;
+}
+
+void SecurityDeclarative::ClassInheritanceCheck(MethodTable *pClass, MethodTable *pParent)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pClass));
+ PRECONDITION(CheckPointer(pParent));
+ PRECONDITION(!pClass->IsInterface());
+ }
+ CONTRACTL_END;
+
+ // Regular check since Fast path check didn't succeed
+ TypeSecurityDescriptor typeSecDesc(pParent);
+ typeSecDesc.InvokeInheritanceChecks(pClass);
+}
+
+void SecurityDeclarative::MethodInheritanceCheck(MethodDesc *pMethod, MethodDesc *pParent)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pMethod));
+ PRECONDITION(CheckPointer(pParent));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // Regular check since Fast path check didn't succeed
+ MethodSecurityDescriptor MDSecDesc(pParent);
+ MDSecDesc.InvokeInheritanceChecks(pMethod);
+}
+
+//---------------------------------------------------------
+// Invoke linktime checks on the caller if demands exist
+// for the callee.
+//---------------------------------------------------------
+/*static*/
+void SecurityDeclarative::LinktimeCheckMethod(Assembly *pCaller, MethodDesc *pCallee)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+#ifdef FEATURE_CAS_POLICY
+ // Do a fulltrust check on the caller if the callee is fully trusted
+ if (FullTrustCheckForLinkOrInheritanceDemand(pCaller))
+ {
+ return;
+ }
+
+#ifdef CROSSGEN_COMPILE
+ CrossGenNotSupported("SecurityDeclarative::LinktimeCheckMethod");
+#else
+ GCX_COOP();
+
+ MethodTable *pTargetMT = pCallee->GetMethodTable();
+
+ // If it's a delegate BeginInvoke, we need to perform a HostProtection check for synchronization
+ if(pTargetMT->IsDelegate())
+ {
+ DelegateEEClass* pDelegateClass = (DelegateEEClass*)pTargetMT->GetClass();
+ if(pCallee == pDelegateClass->m_pBeginInvokeMethod)
+ {
+ EApiCategories eProtectedCategories = GetHostProtectionManager()->GetProtectedCategories();
+ if((eProtectedCategories & eSynchronization) == eSynchronization)
+ {
+ if(!pCaller->GetSecurityDescriptor()->IsFullyTrusted())
+ {
+ ThrowHPException(eProtectedCategories, eSynchronization);
+ }
+ }
+ }
+ }
+
+ // the rest of the LinkDemand checks
+ {
+ // Track perfmon counters. Linktime security checkes.
+ COUNTER_ONLY(GetPerfCounters().m_Security.cLinkChecks++);
+
+#ifdef FEATURE_APTCA
+ // APTCA check
+ SecurityDeclarative::DoUntrustedCallerChecks(pCaller, pCallee, FALSE);
+#endif // FEATURE_APTCA
+
+ // If the class has its own linktime checks, do them first...
+ if (pTargetMT->GetClass()->RequiresLinktimeCheck())
+ {
+ TypeSecurityDescriptor::InvokeLinktimeChecks(pTargetMT, pCaller);
+ }
+
+ // If the previous check passed, check the method for
+ // method-specific linktime checks...
+ if (IsMdHasSecurity(pCallee->GetAttrs()) &&
+ (TokenMightHaveDeclarations(pTargetMT->GetMDImport(),
+ pCallee->GetMemberDef(),
+ dclLinktimeCheck) ||
+ TokenMightHaveDeclarations(pTargetMT->GetMDImport(),
+ pCallee->GetMemberDef(),
+ dclNonCasLinkDemand) ))
+ {
+ MethodSecurityDescriptor::InvokeLinktimeChecks(pCallee, pCaller);
+ }
+
+ // We perform automatic linktime checks for UnmanagedCode in three cases:
+ // o P/Invoke calls
+ // o Calls through an interface that have a suppress runtime check
+ // attribute on them (these are almost certainly interop calls).
+ // o Interop calls made through method impls.
+ if (pCallee->IsNDirect() ||
+ (pTargetMT->IsInterface() &&
+ (pTargetMT->GetMDImport()->GetCustomAttributeByName(pTargetMT->GetCl(),
+ COR_SUPPRESS_UNMANAGED_CODE_CHECK_ATTRIBUTE_ANSI,
+ NULL,
+ NULL) == S_OK ||
+ pTargetMT->GetMDImport()->GetCustomAttributeByName(pCallee->GetMemberDef(),
+ COR_SUPPRESS_UNMANAGED_CODE_CHECK_ATTRIBUTE_ANSI,
+ NULL,
+ NULL) == S_OK) ) ||
+ (pCallee->IsComPlusCall() && !pCallee->IsInterface()))
+ {
+ if (!pCaller->GetSecurityDescriptor()->CanCallUnmanagedCode())
+ {
+ Security::ThrowSecurityException(g_SecurityPermissionClassName, SPFLAGSUNMANAGEDCODE);
+ }
+ }
+ }
+
+#endif // !CROSSGEN_COMPILE
+
+#endif // FEATURE_CAS_POLICY
+}
+
+#ifndef CROSSGEN_COMPILE
+//-----------------------------------------------------------------------------
+//
+//
+// CODE FOR PERFORMING JIT-TIME CHECKS
+//
+//
+//-----------------------------------------------------------------------------
+
+void SecurityDeclarative::_GetSharedPermissionInstance(OBJECTREF *perm, int index)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ _ASSERTE(index < (int) NUM_PERM_OBJECTS);
+
+ AppDomain *pDomain = GetAppDomain();
+ SharedPermissionObjects *pShared = &pDomain->m_pSecContext->m_rPermObjects[index];
+
+ if (pShared->hPermissionObject == NULL) {
+ pShared->hPermissionObject = pDomain->CreateHandle(NULL);
+ *perm = NULL;
+ }
+ else
+ *perm = ObjectFromHandle(pShared->hPermissionObject);
+
+ if (*perm == NULL)
+ {
+ MethodTable *pMT = NULL;
+ OBJECTREF p = NULL;
+
+ GCPROTECT_BEGIN(p);
+
+ pMT = MscorlibBinder::GetClass(pShared->idClass);
+ MethodDescCallSite ctor(pShared->idConstructor);
+
+ p = AllocateObject(pMT);
+
+ ARG_SLOT argInit[2] =
+ {
+ ObjToArgSlot(p),
+ (ARG_SLOT) pShared->dwPermissionFlag
+ };
+
+ ctor.Call(argInit);
+
+ StoreObjectInHandle(pShared->hPermissionObject, p);
+ *perm = p;
+
+ GCPROTECT_END();
+ }
+}
+
+#ifdef FEATURE_APTCA
+void DECLSPEC_NORETURN SecurityDeclarative::ThrowAPTCAException(Assembly *pCaller, MethodDesc *pCallee)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ MethodDescCallSite throwSecurityException(METHOD__SECURITY_ENGINE__THROW_SECURITY_EXCEPTION);
+
+ OBJECTREF callerObj = NULL;
+ if (pCaller != NULL && pCaller->GetDomain() == GetAppDomain())
+ callerObj = pCaller->GetExposedObject();
+
+ ARG_SLOT args[7];
+ args[0] = ObjToArgSlot(callerObj);
+ args[1] = ObjToArgSlot(NULL);
+ args[2] = ObjToArgSlot(NULL);
+ args[3] = PtrToArgSlot(pCallee);
+ args[4] = (ARG_SLOT)dclLinktimeCheck;
+ args[5] = ObjToArgSlot(NULL);
+ args[6] = ObjToArgSlot(NULL);
+ throwSecurityException.Call(args);
+
+ UNREACHABLE();
+}
+#endif // FEATURE_APTCA
+
+#ifdef FEATURE_CAS_POLICY
+void DECLSPEC_NORETURN SecurityDeclarative::ThrowHPException(EApiCategories protectedCategories, EApiCategories demandedCategories)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ OBJECTREF hpException = NULL;
+ GCPROTECT_BEGIN(hpException);
+
+ MethodTable* pMT = MscorlibBinder::GetClass(CLASS__HOST_PROTECTION_EXCEPTION);
+ hpException = (OBJECTREF) AllocateObject(pMT);
+
+
+ MethodDescCallSite ctor(METHOD__HOST_PROTECTION_EXCEPTION__CTOR);
+
+ ARG_SLOT arg[3] = {
+ ObjToArgSlot(hpException),
+ protectedCategories,
+ demandedCategories
+ };
+ ctor.Call(arg);
+
+ COMPlusThrow(hpException);
+
+ GCPROTECT_END();
+}
+#endif // FEATURE_CAS_POLICY
+
+#ifdef FEATURE_APTCA
+BOOL SecurityDeclarative::IsUntrustedCallerCheckNeeded(MethodDesc *pCalleeMD, Assembly *pCallerAssem)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Assembly *pCalleeAssembly = pCalleeMD->GetAssembly();
+ _ASSERTE(pCalleeAssembly != NULL);
+
+ // ATPCA is only enforced for cross-assembly calls, so if the target is not accessable from outside
+ // the assembly, or if the caller and callee are both within the same assembly, we do not need to
+ // do any APTCA checks
+ if (pCallerAssem == pCalleeAssembly)
+ {
+ return FALSE;
+ }
+
+ if (!MethodIsVisibleOutsideItsAssembly(pCalleeMD))
+ {
+ return FALSE;
+ }
+
+ // If the target assembly allows untrusted callers unconditionally, then the call should be allowed
+ if (pCalleeAssembly->AllowUntrustedCaller())
+ {
+ return FALSE;
+ }
+
+ // Otherwise, we need to ensure the caller is fully trusted
+ return TRUE;
+}
+#endif // FEATURE_APTCA
+
+
+#ifdef FEATURE_APTCA
+// Do a fulltrust check on the caller if the callee is fully trusted and
+// callee did not enable AllowUntrustedCallerChecks
+/*static*/
+void SecurityDeclarative::DoUntrustedCallerChecks(
+ Assembly *pCaller, MethodDesc *pCallee,
+ BOOL fFullStackWalk)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ BOOL fRet = TRUE;
+
+#ifdef _DEBUG
+ if (!g_pConfig->Do_AllowUntrustedCaller_Checks())
+ return;
+#endif
+
+ if (!IsUntrustedCallerCheckNeeded(pCallee, pCaller))
+ return;
+
+ // Expensive calls after this point, this could end up resolving policy
+
+ if (fFullStackWalk)
+ {
+ // It is possible that wrappers like VBHelper libraries that are
+ // fully trusted, make calls to public methods that do not have
+ // safe for Untrusted caller custom attribute set.
+ // Like all other link demand that gets transformed to a full stack
+ // walk for reflection, calls to public methods also gets
+ // converted to full stack walk
+
+ OBJECTREF permSet = NULL;
+ GCPROTECT_BEGIN(permSet);
+
+ GetPermissionInstance(&permSet, SECURITY_FULL_TRUST);
+ EX_TRY
+ {
+ SecurityStackWalk::DemandSet(SSWT_LATEBOUND_LINKDEMAND, permSet);
+ }
+ EX_CATCH
+ {
+ fRet = FALSE;
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ GCPROTECT_END();
+ }
+ else
+ {
+ _ASSERTE(pCaller);
+
+ // Link Demand only, no full stack walk here
+ if (!pCaller->GetSecurityDescriptor()->IsFullyTrusted())
+ fRet = FALSE;
+ }
+
+ if (!fRet)
+ {
+ ThrowAPTCAException(pCaller, pCallee);
+ }
+}
+
+#endif // FEATURE_APTCA
+
+// Retrieve all linktime demands sets for a method. This includes both CAS and
+// non-CAS sets for LDs at the class and the method level, so we could get up to
+// four sets.
+void SecurityDeclarative::RetrieveLinktimeDemands(MethodDesc *pMD,
+ OBJECTREF *pClassCas,
+ OBJECTREF *pClassNonCas,
+ OBJECTREF *pMethodCas,
+ OBJECTREF *pMethodNonCas)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+#ifdef FEATURE_CAS_POLICY
+ MethodTable * pMT = pMD->GetMethodTable();
+
+ // Class level first.
+ if (pMT->GetClass()->RequiresLinktimeCheck())
+ *pClassCas = TypeSecurityDescriptor::GetLinktimePermissions(pMT, pClassNonCas);
+
+ // Then the method level.
+ if (IsMdHasSecurity(pMD->GetAttrs()))
+ *pMethodCas = MethodSecurityDescriptor::GetLinktimePermissions(pMD, pMethodNonCas);
+#endif
+}
+
+//
+// Determine the reason why a method has been marked as requiring a link time check
+//
+// Arguments:
+// pMD - the method to figure out what link checks are needed for
+// pClassCasDemands - [out, optional] the CAS link demands found on the class containing the method
+// pClassNonCasDemands - [out, optional] the non-CAS link demands found on the class containing the method
+// pMethodCasDemands - [out, optional] the CAS link demands found on the method itself
+// pMethodNonCasDemands - [out, optional] the non-CAS link demands found on the method itself
+//
+// Return Value:
+// Flags indicating why the method has a link time check requirement
+//
+
+// static
+LinktimeCheckReason SecurityDeclarative::GetLinktimeCheckReason(MethodDesc *pMD,
+ OBJECTREF *pClassCasDemands,
+ OBJECTREF *pClassNonCasDemands,
+ OBJECTREF *pMethodCasDemands,
+ OBJECTREF *pMethodNonCasDemands)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(CheckPointer(pClassCasDemands, NULL_OK));
+ PRECONDITION(CheckPointer(pClassNonCasDemands, NULL_OK));
+ PRECONDITION(CheckPointer(pMethodCasDemands, NULL_OK));
+ PRECONDITION(CheckPointer(pMethodNonCasDemands, NULL_OK));
+ PRECONDITION(pMD->RequiresLinktimeCheck());
+ }
+ CONTRACTL_END;
+
+ LinktimeCheckReason reason = LinktimeCheckReason_None;
+
+#if defined(FEATURE_APTCA) || defined(FEATURE_CORESYSTEM)
+ ModuleSecurityDescriptor *pMSD = ModuleSecurityDescriptor::GetModuleSecurityDescriptor(pMD->GetAssembly());
+
+ // If the method does not allow partially trusted callers, then the check is because we need to ensure all
+ // callers are fully trusted.
+ if (!pMSD->IsAPTCA())
+ {
+ reason |= LinktimeCheckReason_AptcaCheck;
+ }
+#endif // defined(FEATURE_APTCA) || defined(FEATURE_CORESYSTEM)
+
+ //
+ // If the method has a LinkDemand on it for either CAS or non-CAS permissions, get those and set the
+ // flags for the appropriate type of permission.
+ //
+
+ struct gc
+ {
+ OBJECTREF refClassCasDemands;
+ OBJECTREF refClassNonCasDemands;
+ OBJECTREF refMethodCasDemands;
+ OBJECTREF refMethodNonCasDemands;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ // Fetch link demand sets from all the places in metadata where we might
+ // find them (class and method). These might be split into CAS and non-CAS
+ // sets as well.
+ Security::RetrieveLinktimeDemands(pMD,
+ &gc.refClassCasDemands,
+ &gc.refClassNonCasDemands,
+ &gc.refMethodCasDemands,
+ &gc.refMethodNonCasDemands);
+
+ if (gc.refClassCasDemands != NULL || gc.refMethodCasDemands != NULL)
+ {
+ reason |= LinktimeCheckReason_CasDemand;
+
+ if (pClassCasDemands != NULL)
+ {
+ *pClassCasDemands = gc.refClassCasDemands;
+ }
+ if (pMethodCasDemands != NULL)
+ {
+ *pMethodCasDemands = gc.refMethodCasDemands;
+ }
+ }
+
+ if (gc.refClassNonCasDemands != NULL || gc.refMethodNonCasDemands != NULL)
+ {
+ reason |= LinktimeCheckReason_NonCasDemand;
+
+ if (pClassNonCasDemands != NULL)
+ {
+ *pClassNonCasDemands = gc.refClassNonCasDemands;
+ }
+
+ if (pMethodNonCasDemands != NULL)
+ {
+ *pMethodNonCasDemands = gc.refMethodNonCasDemands;
+ }
+
+ }
+
+ GCPROTECT_END();
+
+ //
+ // Check to see if the target of the method is unmanaged code
+ //
+ // We detect linktime checks for UnmanagedCode in three cases:
+ // o P/Invoke calls.
+ // o Calls through an interface that have a suppress runtime check attribute on them (these are almost
+ // certainly interop calls).
+ // o Interop calls made through method impls.
+ //
+
+ if (pMD->IsNDirect())
+ {
+ reason |= LinktimeCheckReason_NativeCodeCall;
+ }
+#ifdef FEATURE_COMINTEROP
+ else if (pMD->IsComPlusCall() && !pMD->IsInterface())
+ {
+ reason |= LinktimeCheckReason_NativeCodeCall;
+ }
+ else if (pMD->IsInterface())
+ {
+ // We also consider calls to interfaces that contain the SuppressUnmanagedCodeSecurity attribute to
+ // be COM calls, so check for those.
+ bool fSuppressUnmanagedCheck =
+ pMD->GetMDImport()->GetCustomAttributeByName(pMD->GetMethodTable()->GetCl(),
+ COR_SUPPRESS_UNMANAGED_CODE_CHECK_ATTRIBUTE_ANSI,
+ NULL,
+ NULL) == S_OK ||
+ pMD->GetMDImport()->GetCustomAttributeByName(pMD->GetMemberDef(),
+ COR_SUPPRESS_UNMANAGED_CODE_CHECK_ATTRIBUTE_ANSI,
+ NULL,
+ NULL) == S_OK;
+ if (fSuppressUnmanagedCheck)
+ {
+ reason |= LinktimeCheckReason_NativeCodeCall;
+ }
+ }
+#endif // FEATURE_COMINTEROP
+
+ return reason;
+}
+
+#ifdef FEATURE_CAS_POLICY
+// Issue an inheritance demand against the target assembly
+
+// static
+void SecurityDeclarative::InheritanceDemand(Assembly *pTargetAssembly, OBJECTREF refDemand)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pTargetAssembly));
+ PRECONDITION(refDemand != NULL);
+ }
+ CONTRACTL_END;
+
+ struct
+ {
+ OBJECTREF refDemand;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+ gc.refDemand = refDemand;
+
+ GCPROTECT_BEGIN(gc);
+
+ IAssemblySecurityDescriptor *pTargetASD = pTargetAssembly->GetSecurityDescriptor();
+ SecurityStackWalk::LinkOrInheritanceCheck(pTargetASD,
+ gc.refDemand,
+ pTargetAssembly,
+ dclInheritanceCheck);
+ GCPROTECT_END();
+}
+
+// static
+void SecurityDeclarative::InheritanceLinkDemandCheck(Assembly *pTargetAssembly, MethodDesc * pMDLinkDemand)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pTargetAssembly));
+ PRECONDITION(CheckPointer(pMDLinkDemand));
+ }
+ CONTRACTL_END;
+
+ GCX_COOP();
+ struct
+ {
+ OBJECTREF refClassCas;
+ OBJECTREF refClassNonCas;
+ OBJECTREF refMethodCas;
+ OBJECTREF refMethodNonCas;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ Security::RetrieveLinktimeDemands(pMDLinkDemand,
+ &gc.refClassCas,
+ &gc.refClassNonCas,
+ &gc.refMethodCas,
+ &gc.refMethodNonCas);
+
+ if (gc.refClassCas != NULL)
+ {
+ InheritanceDemand(pTargetAssembly, gc.refClassCas);
+ }
+
+ if (gc.refMethodCas != NULL)
+ {
+ InheritanceDemand(pTargetAssembly, gc.refMethodCas);
+ }
+
+ GCPROTECT_END();
+}
+
+// Issue a FullTrust inheritance demand against the target assembly
+
+// static
+void SecurityDeclarative::FullTrustInheritanceDemand(Assembly *pTargetAssembly)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pTargetAssembly));
+ }
+ CONTRACTL_END;
+
+ GCX_COOP();
+
+ struct
+ {
+ OBJECTREF refFullTrust;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.refFullTrust = Security::CreatePermissionSet(TRUE);
+ InheritanceDemand(pTargetAssembly, gc.refFullTrust);
+
+ GCPROTECT_END();
+}
+
+// Issue a FullTrust link demand against the target assembly
+
+// static
+void SecurityDeclarative::FullTrustLinkDemand(Assembly *pTargetAssembly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pTargetAssembly));
+ }
+ CONTRACTL_END;
+
+ struct
+ {
+ OBJECTREF refFullTrust;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.refFullTrust = Security::CreatePermissionSet(TRUE);
+ IAssemblySecurityDescriptor *pTargetASD = pTargetAssembly->GetSecurityDescriptor();
+ SecurityStackWalk::LinkOrInheritanceCheck(pTargetASD,
+ gc.refFullTrust,
+ pTargetAssembly,
+ dclLinktimeCheck);
+ GCPROTECT_END();
+}
+
+// Used by interop to simulate the effect of link demands when the caller is
+// in fact script constrained by an appdomain setup by IE.
+void SecurityDeclarative::CheckLinkDemandAgainstAppDomain(MethodDesc *pMD)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ if (!pMD->RequiresLinktimeCheck())
+ return;
+
+ // Find the outermost (closest to caller) appdomain. This
+ // represents the domain in which the unmanaged caller is
+ // considered to "live" (or, at least, be constrained by).
+ AppDomain *pDomain = GetThread()->GetInitialDomain();
+
+ // The link check is only performed if this app domain has
+ // security permissions associated with it, which will be
+ // the case for all IE scripting callers that have got this
+ // far because we automatically reported our managed classes
+ // as "safe for scripting".
+ //
+ // We also can't do the check if the AppDomain isn't fully
+ // setup yet, since we might not have a domain grant set.
+ // This is acceptable, since the only code that should run
+ // during AppDomain creation is fully trusted.
+ IApplicationSecurityDescriptor *pSecDesc = pDomain->GetSecurityDescriptor();
+ if (pSecDesc == NULL || pSecDesc->IsInitializationInProgress() || pSecDesc->IsDefaultAppDomain())
+ return;
+
+ struct _gc
+ {
+ OBJECTREF refGrant;
+ OBJECTREF refRefused;
+ OBJECTREF refClassNonCasDemands;
+ OBJECTREF refClassCasDemands;
+ OBJECTREF refMethodNonCasDemands;
+ OBJECTREF refMethodCasDemands;
+ OBJECTREF refAssembly;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+#ifdef FEATURE_APTCA
+ // Do a fulltrust check on the caller if the callee did not enable
+ // AllowUntrustedCallerChecks. Pass a NULL caller assembly:
+ // DoUntrustedCallerChecks needs to be able to cope with this.
+ SecurityDeclarative::DoUntrustedCallerChecks(NULL, pMD, TRUE);
+#endif // FEATURE_APTCA
+
+ // Fetch link demand sets from all the places in metadata where we might
+ // find them (class and method). These might be split into CAS and non-CAS
+ // sets as well.
+ SecurityDeclarative::RetrieveLinktimeDemands(pMD,
+ &gc.refClassCasDemands,
+ &gc.refClassNonCasDemands,
+ &gc.refMethodCasDemands,
+ &gc.refMethodNonCasDemands);
+
+ // Check CAS link demands.
+ bool fGotGrantSet = false;
+ if (gc.refClassCasDemands != NULL || gc.refMethodCasDemands != NULL)
+ {
+ // Get grant (and possibly denied) sets from the app
+ // domain.
+ gc.refGrant = pSecDesc->GetGrantedPermissionSet(NULL);
+ fGotGrantSet = true;
+ gc.refAssembly = pMD->GetAssembly()->GetExposedObject();
+
+ if (gc.refClassCasDemands != NULL)
+ SecurityStackWalk::CheckSetHelper(&gc.refClassCasDemands,
+ &gc.refGrant,
+ &gc.refRefused,
+ pDomain,
+ pMD,
+ &gc.refAssembly,
+ dclLinktimeCheck);
+
+ if (gc.refMethodCasDemands != NULL)
+ SecurityStackWalk::CheckSetHelper(&gc.refMethodCasDemands,
+ &gc.refGrant,
+ &gc.refRefused,
+ pDomain,
+ pMD,
+ &gc.refAssembly,
+ dclLinktimeCheck);
+
+ }
+
+ // Non-CAS demands are not applied against a grant
+ // set, they're standalone.
+ if (gc.refClassNonCasDemands != NULL)
+ CheckNonCasDemand(&gc.refClassNonCasDemands);
+
+ if (gc.refMethodNonCasDemands != NULL)
+ CheckNonCasDemand(&gc.refMethodNonCasDemands);
+
+#ifndef FEATURE_CORECLR
+ // On CORECLR, we do this from the JIT callouts if the caller is transparent: if caller is critical, no checks needed
+
+ // We perform automatic linktime checks for UnmanagedCode in three cases:
+ // o P/Invoke calls (shouldn't get these here, but let's be paranoid).
+ // o Calls through an interface that have a suppress runtime check
+ // attribute on them (these are almost certainly interop calls).
+ // o Interop calls made through method impls.
+ // Just walk the stack in these cases, they'll be extremely rare and the
+ // perf delta isn't that huge.
+ if (pMD->IsNDirect() ||
+ (pMD->IsInterface() &&
+ (pMD->GetMDImport()->GetCustomAttributeByName(pMD->GetMethodTable()->GetCl(),
+ COR_SUPPRESS_UNMANAGED_CODE_CHECK_ATTRIBUTE_ANSI,
+ NULL,
+ NULL) == S_OK ||
+ pMD->GetMDImport()->GetCustomAttributeByName(pMD->GetMemberDef(),
+ COR_SUPPRESS_UNMANAGED_CODE_CHECK_ATTRIBUTE_ANSI,
+ NULL,
+ NULL) == S_OK) ) ||
+ (pMD->IsComPlusCall() && !pMD->IsInterface()))
+ SecurityStackWalk::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_UNMANAGED_CODE);
+#endif // FEATURE_CORECLR
+
+ GCPROTECT_END();
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+//-----------------------------------------------------------------------------
+//
+//
+// CODE FOR PERFORMING RUN-TIME CHECKS
+//
+//
+//-----------------------------------------------------------------------------
+
+void SecurityDeclarative::EnsureAssertAllowed(MethodDesc *pMeth, MethodSecurityDescriptor *pMSD)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(CheckPointer(pMeth));
+ PRECONDITION(pMSD == NULL || pMSD->GetMethod() == pMeth);
+ } CONTRACTL_END;
+
+ // Check if this Assembly has permission to assert
+ if (pMSD == NULL || !pMSD->CanAssert()) // early out if we have an MSD and we already have checked this permission
+ {
+ Module* pModule = pMeth->GetModule();
+ PREFIX_ASSUME_MSG(pModule != NULL, "Should be a Module pointer here");
+
+ if (!Security::CanAssert(pModule))
+ SecurityPolicy::ThrowSecurityException(g_SecurityPermissionClassName, SPFLAGSASSERTION);
+ }
+
+ // Check if the Method is allowed to assert based on transparent/critical classification
+ if (!SecurityTransparent::IsAllowedToAssert(pMeth))
+ {
+#ifdef _DEBUG
+ if (g_pConfig->LogTransparencyErrors())
+ {
+ SecurityTransparent::LogTransparencyError(pMeth, "Transparent method using a security assert");
+ }
+ if (!g_pConfig->DisableTransparencyEnforcement())
+#endif // _DEBUG
+ {
+ // if assembly is transparent fail the ASSERT operations
+ COMPlusThrow(kInvalidOperationException, W("InvalidOperation_AssertTransparentCode"));
+ }
+ }
+
+ return;
+}
+
+void SecurityDeclarative::InvokeDeclarativeActions (MethodDesc *pMeth, DeclActionInfo *pActions, MethodSecurityDescriptor *pMSD)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ OBJECTREF refPermSet = NULL;
+ ARG_SLOT arg = 0;
+
+ // If we get a real PermissionSet, then invoke the action.
+ switch (pActions->dwDeclAction)
+ {
+ case DECLSEC_DEMANDS:
+ SecurityStackWalk::DemandSet(SSWT_DECLARATIVE_DEMAND, pActions->pPCE, dclDemand);
+ break;
+
+ case DECLSEC_ASSERTIONS:
+ EnsureAssertAllowed(pMeth, pMSD);
+ GetThread()->IncrementAssertCount();
+ break;
+
+ case DECLSEC_DENIALS:
+ case DECLSEC_PERMITONLY:
+ GetThread()->IncrementOverridesCount();
+ break;
+
+ case DECLSEC_NONCAS_DEMANDS:
+ refPermSet = pActions->pPCE->CreateManagedPsetObject (dclNonCasDemand);
+ if (refPermSet == NULL)
+ break;
+ if(!((PERMISSIONSETREF)refPermSet)->CheckedForNonCas() ||((PERMISSIONSETREF)refPermSet)->ContainsNonCas())
+ {
+ GCPROTECT_BEGIN(refPermSet);
+ MethodDescCallSite demand(METHOD__PERMISSION_SET__DEMAND_NON_CAS, &refPermSet);
+
+ arg = ObjToArgSlot(refPermSet);
+ demand.Call(&arg);
+ GCPROTECT_END();
+ }
+ break;
+
+ default:
+ _ASSERTE(!"Unknown action requested in InvokeDeclarativeActions");
+ break;
+
+ } // switch
+}
+
+
+//
+// CODE FOR PERFORMING RUN-TIME CHECKS
+//
+extern LPVOID GetSecurityObjectForFrameInternal(StackCrawlMark *stackMark, INT32 create, OBJECTREF *pRefSecDesc);
+
+namespace
+{
+ inline void UpdateFrameSecurityObj(DWORD dwAction, OBJECTREF *refPermSet, OBJECTREF * pSecObj)
+ {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ GetSecurityObjectForFrameInternal(NULL, true, pSecObj);
+
+ FRAMESECDESCREF fsdRef = (FRAMESECDESCREF)*pSecObj;
+ switch (dwAction)
+ {
+ // currently we require declarative security to store the data in both the fields in the FSD
+ case dclAssert:
+ fsdRef->SetDeclarativeAssertions(*refPermSet);
+ {
+ PERMISSIONSETREF psRef = (PERMISSIONSETREF)*refPermSet;
+ if (psRef != NULL && psRef->IsUnrestricted())
+ fsdRef->SetAssertFT(TRUE);
+ }
+ break;
+
+ case dclDeny:
+ fsdRef->SetDeclarativeDenials(*refPermSet);
+ break;
+
+ case dclPermitOnly:
+ fsdRef->SetDeclarativeRestrictions(*refPermSet);
+ break;
+
+ default:
+ _ASSERTE(0 && "Unreached, add code to handle if reached here...");
+ break;
+ }
+ }
+}
+
+void SecurityDeclarative::InvokeDeclarativeStackModifiers(MethodDesc * pMeth, DeclActionInfo * pActions, OBJECTREF * pSecObj)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ OBJECTREF refPermSet = NULL;
+
+ // If we get a real PermissionSet, then invoke the action.
+ switch (pActions->dwDeclAction)
+ {
+ case DECLSEC_DEMANDS:
+ case DECLSEC_NONCAS_DEMANDS:
+ // Nothing to do for demands
+ break;
+
+ case DECLSEC_ASSERTIONS:
+ refPermSet = pActions->pPCE->CreateManagedPsetObject (dclAssert);
+ if (refPermSet == NULL)
+ break;
+ GCPROTECT_BEGIN(refPermSet);
+ // Now update the frame security object
+ UpdateFrameSecurityObj(dclAssert, &refPermSet, pSecObj);
+ GCPROTECT_END();
+ break;
+
+ case DECLSEC_DENIALS:
+ // Update the frame security object
+ refPermSet = pActions->pPCE->CreateManagedPsetObject (dclDeny);
+
+ if (refPermSet == NULL)
+ break;
+
+ GCPROTECT_BEGIN(refPermSet);
+
+#ifdef FEATURE_CAS_POLICY
+ // Deny is only valid if we're in legacy CAS mode
+ IApplicationSecurityDescriptor *pSecDesc = GetAppDomain()->GetSecurityDescriptor();
+ if (!pSecDesc->IsLegacyCasPolicyEnabled())
+ {
+ COMPlusThrow(kNotSupportedException, W("NotSupported_CasDeny"));
+ }
+#endif // FEATURE_CAS_POLICY
+
+ UpdateFrameSecurityObj(dclDeny, &refPermSet, pSecObj);
+
+ GCPROTECT_END();
+ break;
+
+ case DECLSEC_PERMITONLY:
+ // Update the frame security object
+ refPermSet = pActions->pPCE->CreateManagedPsetObject (dclPermitOnly);
+
+ if (refPermSet == NULL)
+ break;
+ GCPROTECT_BEGIN(refPermSet);
+ UpdateFrameSecurityObj(dclPermitOnly, &refPermSet, pSecObj);
+ GCPROTECT_END();
+ break;
+
+
+ default:
+ _ASSERTE(!"Unknown action requested in InvokeDeclarativeStackModifiers");
+ break;
+
+ } // switch
+}
+
+void SecurityDeclarative::DoDeclarativeActions(MethodDesc *pMeth, DeclActionInfo *pActions, LPVOID pSecObj, MethodSecurityDescriptor *pMSD)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+#ifndef FEATURE_CAS_POLICY
+ // In the CoreCLR, we don't support CAS actions outside mscorlib.
+ // However, we do have to expose certain types in mscorlib due to compiler requirements
+ // (c# compiler requires us to expose SecurityPermission/SecurityAction etc so that it can
+ // insert a RequestMinimum for SkipVerification).
+ // This means that code outside mscorlib could construct IL that has declarative security
+ // in it. This is not a security issue - even if they try to create IL that asserts for
+ // permissions they don't have, it's not going to work for the same reasons it didn't in the desktop.
+ // However, we could have bugs like DDB 120109 where they can cause Demands to fail etc.
+ // So for goodness, we're not going to do any runtime declarative work on assemblies other than mscorlib.
+ if (!pMeth->GetModule()->IsSystem())
+ {
+ // Non-mscorlib code reached... exit
+ return;
+ }
+#endif //!FEATURE_CAS_POLICY
+
+
+ // --------------------------------------------------------------------------- //
+ // D E C L A R A T I V E S E C U R I T Y D E M A N D S //
+ // --------------------------------------------------------------------------- //
+ // The frame is now fully formed, arguments have been copied into place,
+ // and synchronization monitors have been entered if necessary. At this
+ // point, we are prepared for something to throw an exception, so we may
+ // check for declarative security demands and execute them. We need a
+ // well-formed frame and synchronization domain to accept security excep-
+ // tions thrown by the SecurityManager. We MAY need argument values in
+ // the frame so that the arguments may be finalized if security throws an
+ // exception across them (unknown).
+ if (pActions != NULL && pActions->dwDeclAction == DECLSEC_UNMNGD_ACCESS_DEMAND &&
+ pActions->pNext == NULL)
+ {
+ /* We special-case the security check on single pinvoke/interop calls
+ so we can avoid setting up the GCFrame */
+
+ SecurityStackWalk::SpecialDemand(SSWT_DECLARATIVE_DEMAND, SECURITY_UNMANAGED_CODE);
+ return;
+ }
+ else
+ {
+#ifdef FEATURE_COMPRESSEDSTACK
+ // If this is an anonymously hosted dynamic method, there aren't any direct modifiers, but if it has a compressed stack that
+ // might have modifiers, mark that there are modifiers so we make sure to do a stack walk
+ if(SecurityStackWalk::MethodIsAnonymouslyHostedDynamicMethodWithCSToEvaluate(pMeth))
+ {
+ // We don't know how many asserts or overrides might be in the compressed stack,
+ // but we just need to increment the counters to ensure optimizations don't skip CS evaluation
+ GetThread()->IncrementAssertCount();
+ GetThread()->IncrementOverridesCount();
+ }
+#endif // FEATURE_COMPRESSEDSTACK
+
+ for (/**/; pActions; pActions = pActions->pNext)
+ {
+ if (pActions->dwDeclAction == DECLSEC_UNMNGD_ACCESS_DEMAND)
+ {
+ SecurityStackWalk::SpecialDemand(SSWT_DECLARATIVE_DEMAND, SECURITY_UNMANAGED_CODE);
+ }
+ else
+ {
+ InvokeDeclarativeActions(pMeth, pActions, pMSD);
+ }
+ }
+
+ }
+}
+void SecurityDeclarative::DoDeclarativeStackModifiers(MethodDesc *pMeth, AppDomain* pAppDomain, LPVOID pSecObj)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+#ifndef FEATURE_CAS_POLICY
+ // In the CoreCLR, we don't support CAS actions outside mscorlib.
+ // However, we do have to expose certain types in mscorlib due to compiler requirements
+ // (c# compiler requires us to expose SecurityPermission/SecurityAction etc so that it can
+ // insert a RequestMinimum for SkipVerification).
+ // This means that code outside mscorlib could construct IL that has declarative security
+ // in it. This is not a security issue - even if they try to create IL that asserts for
+ // permissions they don't have, it's not going to work for the same reasons it didn't in the desktop.
+ // However, we could have bugs like DDB 120109 where they can cause Demands to fail etc.
+ // So for goodness, we're not going to do any runtime declarative work on assemblies other than mscorlib.
+ if (!pMeth->GetModule()->IsSystem())
+ {
+ // Non-mscorlib code reached... exit
+ return;
+ }
+#endif //!FEATURE_CAS_POLICY
+
+
+ AppDomain* pCurrentDomain = GetAppDomain();
+
+ if (pCurrentDomain != pAppDomain)
+ {
+ ENTER_DOMAIN_PTR(pAppDomain, ADV_RUNNINGIN)
+ {
+ DoDeclarativeStackModifiersInternal(pMeth, pSecObj);
+ }
+ END_DOMAIN_TRANSITION;
+ }
+ else
+ {
+ DoDeclarativeStackModifiersInternal(pMeth, pSecObj);
+ }
+ }
+
+void SecurityDeclarative::DoDeclarativeStackModifiersInternal(MethodDesc *pMeth, LPVOID pSecObj)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ Object** ppSecObject = (Object**) pSecObj;
+ _ASSERTE(pMeth->IsInterceptedForDeclSecurity() && !pMeth->IsInterceptedForDeclSecurityCASDemandsOnly());
+
+ MethodSecurityDescriptor MDSecDesc(pMeth);
+ MethodSecurityDescriptor::LookupOrCreateMethodSecurityDescriptor(&MDSecDesc);
+ DeclActionInfo* pActions = MDSecDesc.GetRuntimeDeclActionInfo();
+
+ OBJECTREF fsdRef = ObjectToOBJECTREF(*ppSecObject);
+ GCPROTECT_BEGIN(fsdRef);
+
+ for (/**/; pActions; pActions = pActions->pNext)
+ {
+ InvokeDeclarativeStackModifiers(pMeth, pActions, &fsdRef);
+ }
+ // If we had just NON-CAS demands, we'd come here but not create an FSD.
+ if (fsdRef != NULL)
+ {
+ ((FRAMESECDESCREF)(fsdRef))->SetDeclSecComputed(TRUE);
+
+ if (*ppSecObject == NULL)
+ {
+ // we came in with a NULL FSD and the FSD got created here...so we need to copy it back
+ // If we had come in with a non-NULL FSD, that would have been updated and this (shallow/pointer) copy
+ // would not be necessary
+ *ppSecObject = OBJECTREFToObject(fsdRef);
+ }
+}
+
+ GCPROTECT_END();
+}
+
+
+void SecurityDeclarative::CheckNonCasDemand(OBJECTREF *prefDemand)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(IsProtectedByGCFrame (prefDemand));
+ } CONTRACTL_END;
+
+ if(((PERMISSIONSETREF)*prefDemand)->CheckedForNonCas())
+ {
+ if(!((PERMISSIONSETREF)*prefDemand)->ContainsNonCas())
+ return;
+ }
+ MethodDescCallSite demand(METHOD__PERMISSION_SET__DEMAND_NON_CAS, prefDemand);
+ ARG_SLOT arg = ObjToArgSlot(*prefDemand);
+ demand.Call(&arg);
+}
+
+#endif // FEATURE_CAS_POLICY
+
+#endif // CROSSGEN_COMPILE
diff --git a/src/vm/securitydeclarative.h b/src/vm/securitydeclarative.h
new file mode 100644
index 0000000000..eaa055bc82
--- /dev/null
+++ b/src/vm/securitydeclarative.h
@@ -0,0 +1,199 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#ifndef __SECURITYDECLARATIVE_H__
+#define __SECURITYDECLARATIVE_H__
+
+class SecurityStackWalk;
+class MethodSecurityDescriptor;
+class TokenSecurityDescriptor;
+struct TokenDeclActionInfo;
+class TypeSecurityDescriptor;
+class PsetCacheEntry;
+
+// Reasons why a method may have been flagged as requiring a LinkDemand
+enum LinktimeCheckReason
+{
+ LinktimeCheckReason_None = 0x00000000, // The method does not require a LinkDemand
+ LinktimeCheckReason_CasDemand = 0x00000001, // The method has CAS LinkDemands
+ LinktimeCheckReason_NonCasDemand = 0x00000002, // The method has non-CAS LinkDemands
+ LinktimeCheckReason_AptcaCheck = 0x00000004, // The method is a member of a non-APTCA assembly that requires its caller to be trusted
+ LinktimeCheckReason_NativeCodeCall = 0x00000008 // The method may represent a call to native code
+};
+
+struct DeclActionInfo
+{
+ DWORD dwDeclAction; // This'll tell InvokeDeclarativeSecurity whats the action needed
+ PsetCacheEntry *pPCE; // The cached permissionset on which to demand/assert/deny/blah
+ DeclActionInfo *pNext; // Next declarative action needed on this method, if any.
+
+ static DeclActionInfo *Init(MethodDesc *pMD, DWORD dwAction, PsetCacheEntry *pPCE);
+};
+
+inline LinktimeCheckReason operator|(LinktimeCheckReason lhs, LinktimeCheckReason rhs);
+inline LinktimeCheckReason operator|=(LinktimeCheckReason &lhs, LinktimeCheckReason rhs);
+inline LinktimeCheckReason operator&(LinktimeCheckReason lhs, LinktimeCheckReason rhs);
+inline LinktimeCheckReason operator&=(LinktimeCheckReason &lhs, LinktimeCheckReason rhs);
+
+namespace SecurityDeclarative
+{
+ // Returns an instance of a well-known permission. (It caches them, so each permission is created only once.)
+ void _GetSharedPermissionInstance(OBJECTREF *perm, int index);
+
+ // Perform the declarative actions
+ // Callers:
+ // DoDeclarativeSecurity
+ void DoDeclarativeActions(MethodDesc *pMD, DeclActionInfo *pActions, LPVOID pSecObj, MethodSecurityDescriptor *pMSD = NULL);
+ void DoDeclarativeStackModifiers(MethodDesc *pMeth, AppDomain* pAppDomain, LPVOID pSecObj);
+ void DoDeclarativeStackModifiersInternal(MethodDesc *pMeth, LPVOID pSecObj);
+ void EnsureAssertAllowed(MethodDesc *pMeth, MethodSecurityDescriptor* pMSD); // throws exception if assert is not allowed for MethodDesc
+ // Determine which declarative SecurityActions are used on this type and return a
+ // DWORD of flags to represent the results
+ // Callers:
+ // MethodTableBuilder::CreateClass
+ // MethodTableBuilder::EnumerateClassMembers
+ // MethodDesc::GetSecurityFlags
+ HRESULT GetDeclarationFlags(IMDInternalImport *pInternalImport, mdToken token, DWORD* pdwFlags, DWORD* pdwNullFlags, BOOL* fHasSuppressUnmanagedCodeAccessAttr = NULL);
+
+ // Query the metadata to get all LinkDemands on this method (and it's class)
+ // Callers:
+ // CanAccess (ReflectionInvocation)
+ // ReflectionInvocation::GetSpecialSecurityFlags
+ // RuntimeMethodHandle::InvokeMethod_Internal
+ // Security::CheckLinkDemandAgainstAppDomain
+ void RetrieveLinktimeDemands(MethodDesc* pMD,
+ OBJECTREF* pClassCas,
+ OBJECTREF* pClassNonCas,
+ OBJECTREF* pMethodCas,
+ OBJECTREF* pMethodNonCas);
+
+ // Determine why the method is marked as requiring a linktime check, optionally returning the declared
+ // CAS link demands on the method itself.
+ LinktimeCheckReason GetLinktimeCheckReason(MethodDesc *pMD,
+ OBJECTREF *pClassCasDemands,
+ OBJECTREF *pClassNonCasDemands,
+ OBJECTREF *pMethodCasDemands,
+ OBJECTREF *pMethodNonCasDemands);
+
+ // Used by interop to simulate the effect of link demands when the caller is
+ // in fact script constrained by an appdomain setup by IE.
+ // Callers:
+ // DispatchInfo::InvokeMember
+ // COMToCLRWorkerBody (COMToCLRCall)
+ void CheckLinkDemandAgainstAppDomain(MethodDesc *pMD);
+
+ // Perform a LinkDemand
+ // Callers:
+ // COMCustomAttribute::CreateCAObject
+ // CheckMethodAccess
+ // InvokeUtil::CheckLinktimeDemand
+ // CEEInfo::findMethod
+ // RuntimeMethodHandle::InvokeMethod_Internal
+ void LinktimeCheckMethod(Assembly *pCaller, MethodDesc *pCallee);
+
+ // Perform inheritance link demand
+ // Called by:
+ // MethodTableBuilder::ConvertLinkDemandToInheritanceDemand
+ void InheritanceLinkDemandCheck(Assembly *pTargetAssembly, MethodDesc * pMDLinkDemand);
+
+ // Perform an InheritanceDemand against the target assembly
+ void InheritanceDemand(Assembly *pTargetAssembly, OBJECTREF refDemand);
+
+ // Perform a FullTrust InheritanceDemand against the target assembly
+ void FullTrustInheritanceDemand(Assembly *pTargetAssembly);
+
+ // Perform a FullTrust LinkDemand against the target assembly
+ void FullTrustLinkDemand(Assembly *pTargetAssembly);
+
+ // Do InheritanceDemands on the type
+ // Called by:
+ // MethodTableBuilder::VerifyInheritanceSecurity
+ void ClassInheritanceCheck(MethodTable *pClass, MethodTable *pParent);
+
+ // Do InheritanceDemands on the Method
+ // Callers:
+ // MethodTableBuilder::VerifyInheritanceSecurity
+ void MethodInheritanceCheck(MethodDesc *pMethod, MethodDesc *pParent);
+
+ // Returns a managed instance of a well-known PermissionSet
+ // Callers:
+ // COMCodeAccessSecurityEngine::SpecialDemand
+ // ReflectionSerialization::GetSafeUninitializedObject
+ inline void GetPermissionInstance(OBJECTREF *perm, int index);
+
+ inline BOOL FullTrustCheckForLinkOrInheritanceDemand(Assembly *pAssembly);
+
+
+#ifdef FEATURE_APTCA
+ // Returns TRUE if an APTCA check is necessary
+ // Callers:
+ // CanAccess
+ BOOL IsUntrustedCallerCheckNeeded(MethodDesc *pCalleeMD, Assembly *pCallerAssem = NULL);
+
+ // Perform the APTCA check
+ // Callers:
+ // CanAccess
+ // Security::CheckLinkDemandAgainstAppDomain
+ void DoUntrustedCallerChecks(
+ Assembly *pCaller, MethodDesc *pCalee,
+ BOOL fFullStackWalk);
+#endif // FEATURE_APTCA
+
+#ifndef DACCESS_COMPILE
+ // Calls PermissionSet.Demand
+ // Callers:
+ // CanAccess (ReflectionInvocation)
+ // Security::CheckLinkDemandAgainstAppDomain
+ void CheckNonCasDemand(OBJECTREF *prefDemand);
+#endif // #ifndef DACCESS_COMPILE
+
+ // Returns TRUE if the method is visible outside its assembly
+ // Callers:
+ // MethodTableBuilder::SetSecurityFlagsOnMethod
+ inline BOOL MethodIsVisibleOutsideItsAssembly(MethodDesc * pMD);
+ inline BOOL MethodIsVisibleOutsideItsAssembly(DWORD dwMethodAttr, DWORD dwClassAttr, BOOL fIsGlobalClass);
+
+ BOOL TokenMightHaveDeclarations(IMDInternalImport *pInternalImport, mdToken token, CorDeclSecurity action);
+ DeclActionInfo *DetectDeclActions(MethodDesc *pMeth, DWORD dwDeclFlags);
+ void DetectDeclActionsOnToken(mdToken tk, DWORD dwDeclFlags, PsetCacheEntry** pSets, IMDInternalImport *pInternalImport);
+ void InvokeLinktimeChecks(Assembly *pCaller,
+ Module *pModule,
+ mdToken token);
+
+ inline BOOL MethodIsVisibleOutsideItsAssembly(DWORD dwMethodAttr);
+
+ inline BOOL ClassIsVisibleOutsideItsAssembly(DWORD dwClassAttr, BOOL fIsGlobalClass);
+
+#ifdef FEATURE_APTCA
+ // Returns an instance of a SecurityException with the message "This method doesn't allow partially trusted callers"
+ // Callers:
+ // DoUntrustedCallerChecks
+ void DECLSPEC_NORETURN ThrowAPTCAException(Assembly *pCaller, MethodDesc *pCallee);
+#endif // FEATURE_APTCA
+#ifdef FEATURE_CAS_POLICY
+ void DECLSPEC_NORETURN ThrowHPException(EApiCategories protectedCategories, EApiCategories demandedCategories);
+#endif // FEATURE_CAS_POLICY
+
+ // Add a declarative action and PermissionSet index to the linked list
+ void AddDeclAction(CorDeclSecurity action, PsetCacheEntry *pClassPCE, PsetCacheEntry *pMethodPCE, DeclActionInfo** ppActionList, MethodDesc *pMeth);
+
+ // Helper for DoDeclarativeActions
+ void InvokeDeclarativeActions(MethodDesc *pMeth, DeclActionInfo *pActions, MethodSecurityDescriptor *pMSD);
+ void InvokeDeclarativeStackModifiers (MethodDesc *pMeth, DeclActionInfo *pActions, OBJECTREF * pSecObj);
+
+ bool BlobMightContainNonCasPermission(PBYTE pbPerm, ULONG cbPerm, DWORD dwAction, bool* pHostProtectionOnly);
+
+// Delayed Declarative Security processing
+#ifndef DACCESS_COMPILE
+ inline void DoDeclarativeSecurityAtStackWalk(MethodDesc* pFunc, AppDomain* pAppDomain, OBJECTREF* pFrameObjectSlot);
+#endif
+}
+
+#endif // __SECURITYDECLARATIVE_H__
+
diff --git a/src/vm/securitydeclarative.inl b/src/vm/securitydeclarative.inl
new file mode 100644
index 0000000000..3a56bb2fa0
--- /dev/null
+++ b/src/vm/securitydeclarative.inl
@@ -0,0 +1,135 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#ifndef __SECURITYDECLARATIVE_INL__
+#define __SECURITYDECLARATIVE_INL__
+
+#include "security.h"
+
+inline LinktimeCheckReason operator|(LinktimeCheckReason lhs, LinktimeCheckReason rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ return static_cast<LinktimeCheckReason>(static_cast<DWORD>(lhs) | static_cast<DWORD>(rhs));
+}
+
+inline LinktimeCheckReason operator|=(LinktimeCheckReason &lhs, LinktimeCheckReason rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ lhs = lhs | rhs;
+ return lhs;
+}
+
+inline LinktimeCheckReason operator&(LinktimeCheckReason lhs, LinktimeCheckReason rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ return static_cast<LinktimeCheckReason>(static_cast<DWORD>(lhs) & static_cast<DWORD>(rhs));
+}
+
+
+inline LinktimeCheckReason operator&=(LinktimeCheckReason &lhs, LinktimeCheckReason rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ lhs = lhs & rhs;
+ return lhs;
+}
+
+inline void SecurityDeclarative::GetPermissionInstance(OBJECTREF *perm, int index)
+{
+ WRAPPER_NO_CONTRACT;
+ _GetSharedPermissionInstance(perm, index);
+}
+
+inline BOOL SecurityDeclarative::FullTrustCheckForLinkOrInheritanceDemand(Assembly *pAssembly)
+{
+ WRAPPER_NO_CONTRACT;
+#ifndef DACCESS_COMPILE
+ IAssemblySecurityDescriptor* pSecDesc = pAssembly->GetSecurityDescriptor();
+ if (pSecDesc->IsSystem())
+ return TRUE;
+
+ if (pSecDesc->IsFullyTrusted())
+ return TRUE;
+#endif
+ return FALSE;
+
+}
+
+inline BOOL SecurityDeclarative::MethodIsVisibleOutsideItsAssembly(DWORD dwMethodAttr)
+{
+ LIMITED_METHOD_CONTRACT;
+ return ( IsMdPublic(dwMethodAttr) ||
+ IsMdFamORAssem(dwMethodAttr)||
+ IsMdFamily(dwMethodAttr) );
+}
+
+inline BOOL SecurityDeclarative::MethodIsVisibleOutsideItsAssembly(
+ MethodDesc * pMD)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ MethodTable * pMT = pMD->GetMethodTable();
+
+ if (!ClassIsVisibleOutsideItsAssembly(pMT->GetAttrClass(), pMT->IsGlobalClass()))
+ return FALSE;
+
+ return MethodIsVisibleOutsideItsAssembly(pMD->GetAttrs());
+}
+
+inline BOOL SecurityDeclarative::MethodIsVisibleOutsideItsAssembly(DWORD dwMethodAttr, DWORD dwClassAttr, BOOL fIsGlobalClass)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (!ClassIsVisibleOutsideItsAssembly(dwClassAttr, fIsGlobalClass))
+ return FALSE;
+
+ return MethodIsVisibleOutsideItsAssembly(dwMethodAttr);
+}
+
+inline BOOL SecurityDeclarative::ClassIsVisibleOutsideItsAssembly(DWORD dwClassAttr, BOOL fIsGlobalClass)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (fIsGlobalClass)
+ {
+ return TRUE;
+ }
+
+ return ( IsTdPublic(dwClassAttr) ||
+ IsTdNestedPublic(dwClassAttr)||
+ IsTdNestedFamily(dwClassAttr)||
+ IsTdNestedFamORAssem(dwClassAttr));
+}
+
+#ifndef DACCESS_COMPILE
+inline void SecurityDeclarative::DoDeclarativeSecurityAtStackWalk(MethodDesc* pFunc, AppDomain* pAppDomain, OBJECTREF* pFrameObjectSlot)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+
+ BOOL hasDeclarativeStackModifier = (pFunc->IsInterceptedForDeclSecurity() && !pFunc->IsInterceptedForDeclSecurityCASDemandsOnly());
+ if (hasDeclarativeStackModifier)
+ {
+
+ _ASSERTE(pFrameObjectSlot != NULL);
+ if (*pFrameObjectSlot == NULL || !( ((FRAMESECDESCREF)(*pFrameObjectSlot))->IsDeclSecComputed()) )
+ {
+ // Populate the FSD with declarative assert/deny/PO
+ SecurityDeclarative::DoDeclarativeStackModifiers(pFunc, pAppDomain, pFrameObjectSlot);
+ }
+ }
+}
+#endif
+
+
+
+#endif // __SECURITYDECLARATIVE_INL__
diff --git a/src/vm/securitydeclarativecache.cpp b/src/vm/securitydeclarativecache.cpp
new file mode 100644
index 0000000000..7f38a02f80
--- /dev/null
+++ b/src/vm/securitydeclarativecache.cpp
@@ -0,0 +1,358 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#include "common.h"
+#include "appdomain.inl"
+#include "security.h"
+#include "field.h"
+#include "comcallablewrapper.h"
+#include "typeparse.h"
+
+
+//
+//----------------------------------------------------
+//
+//Brief design overview:
+//
+//Essentially we moved away from the old scheme of a per-process hash table for blob->index mapping,
+//and a growable per appdomain array containing the managed objects. The new scheme has a per
+//appdomain hash that does memory allocs from the appdomain heap. The hash table maps the metadata
+//blob to a data structure called PsetCacheEntry. PsetCacheEntry has the metadata blob and a handle
+//to the managed pset object. It is the central place where caching/creation of the managed pset
+//objects happen. Essentially whenever we see a new decl security blob, we insert it into the
+//appdomain hash (if it's not already there). The object is lazily created as needed (we let
+//threads race for object creation).
+//
+//----------------------------------------------------
+//
+
+BOOL PsetCacheKey::IsEquiv(PsetCacheKey *pOther)
+{
+ WRAPPER_NO_CONTRACT;
+ if (m_cbPset != pOther->m_cbPset || !m_pbPset || !pOther->m_pbPset)
+ return FALSE;
+ return memcmp(m_pbPset, pOther->m_pbPset, m_cbPset) == 0;
+}
+
+DWORD PsetCacheKey::Hash()
+{
+ LIMITED_METHOD_CONTRACT;
+ DWORD dwHash = 0;
+ for (DWORD i = 0; i < (m_cbPset / sizeof(DWORD)); i++)
+ dwHash ^= GET_UNALIGNED_VAL32(&((DWORD*)m_pbPset)[i]);
+ return dwHash;
+}
+
+void PsetCacheEntry::Init (PsetCacheKey *pKey, AppDomain *pDomain)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ THROWS; // From CreateHandle()
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ m_pKey = pKey;
+ m_eCanUnrestrictedOverride = CUO_DontKnow;
+ m_fEmptyPermissionSet = false;
+#ifndef CROSSGEN_COMPILE
+ m_handle = pDomain->CreateHandle(NULL);
+#endif // CROSSGEN_COMPILE
+}
+
+#ifndef CROSSGEN_COMPILE
+OBJECTREF PsetCacheEntry::CreateManagedPsetObject(DWORD dwAction, bool createEmptySet /* = false */)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ OBJECTREF orRet;
+
+ orRet = GetManagedPsetObject();
+ if (orRet != NULL) {
+ return orRet;
+ }
+
+ if (!createEmptySet && m_fEmptyPermissionSet) {
+ return NULL;
+ }
+
+ struct _gc {
+ OBJECTREF pset;
+ OBJECTREF encoding;
+ OBJECTREF nonCasPset;
+ OBJECTREF orNonCasPset;
+ OBJECTREF orNonCasEncoding;
+ } gc;
+ memset(&gc, 0, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ if ( (m_pKey->m_cbPset > 0) && (m_pKey->m_pbPset[0] == LAZY_DECL_SEC_FLAG) ) {
+
+ SecurityAttributes::AttrSetBlobToPermissionSets(m_pKey->m_pbPset,
+ m_pKey->m_cbPset,
+ &gc.pset,
+ dwAction);
+
+ } else {
+
+#ifdef FEATURE_CAS_POLICY
+ SecurityAttributes::XmlToPermissionSet(m_pKey->m_pbPset,
+ m_pKey->m_cbPset,
+ &gc.pset,
+ &gc.encoding,
+ NULL,
+ 0,
+ &gc.orNonCasPset,
+ &gc.orNonCasEncoding);
+#else
+ // The v1.x serialized permission set format is not supported on CoreCLR
+ COMPlusThrowHR(CORSECATTR_E_BAD_ATTRIBUTE);
+#endif //FEATURE_CAS_POLICY
+ }
+
+ StoreFirstObjectInHandle(m_handle, gc.pset);
+
+ if (gc.pset == NULL)
+ m_fEmptyPermissionSet = true;
+
+ GCPROTECT_END();
+
+ //
+ // Some other thread may have won the race, and stored away a different
+ // object in the handle.
+ //
+
+ orRet = GetManagedPsetObject();
+ return orRet;
+}
+#endif // CROSSGEN_COMPILE
+
+bool PsetCacheEntry::ContainsBuiltinCASPermsOnly (DWORD dwAction)
+{
+
+ if (m_eCanUnrestrictedOverride == CUO_Yes) {
+ return true;
+ }
+
+ if (m_eCanUnrestrictedOverride == CUO_No) {
+ return false;
+ }
+
+ bool bRet = ContainsBuiltinCASPermsOnlyInternal(dwAction);
+
+ //
+ // Cache the results.
+ //
+
+ if(bRet) {
+ m_eCanUnrestrictedOverride = CUO_Yes;
+ } else {
+ m_eCanUnrestrictedOverride = CUO_No;
+ }
+
+ return bRet;
+}
+
+bool PsetCacheEntry::ContainsBuiltinCASPermsOnlyInternal(DWORD dwAction)
+{
+ //
+ // Deserialize the CORSEC_ATTRSET
+ //
+
+ CORSEC_ATTRSET attrSet;
+ HRESULT hr = BlobToAttributeSet(m_pKey->m_pbPset, m_pKey->m_cbPset, &attrSet, dwAction);
+
+ if(FAILED(hr)) {
+ COMPlusThrowHR(hr);
+ }
+
+ if (hr == S_FALSE) {
+ //
+ // BlobToAttributeSet didn't work as expected - bail out early
+ //
+ return FALSE;
+ }
+
+ // Check the attributes
+ return SecurityAttributes::ContainsBuiltinCASPermsOnly(&attrSet);
+}
+
+void SecurityDeclarativeCache::Init(LoaderHeap *pHeap)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ _ASSERTE (pHeap);
+
+ m_pHeap = pHeap;
+
+ m_pCachedPsetsHash = new EEPsetHashTable;
+
+ m_prCachedPsetsLock = new SimpleRWLock (COOPERATIVE_OR_PREEMPTIVE,
+ LOCK_TYPE_DEFAULT);
+
+ if (!m_pCachedPsetsHash->Init(19, &g_lockTrustMeIAmThreadSafe, m_pHeap)) {
+ ThrowOutOfMemory();
+ }
+}
+
+PsetCacheEntry* SecurityDeclarativeCache::CreateAndCachePset(
+ IN PBYTE pbAttrBlob,
+ IN DWORD cbAttrBlob
+ )
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ PsetCacheEntry *pPCE;
+ LoaderHeap *pHeap;
+ SimpleWriteLockHolder writeLockHolder(m_prCachedPsetsLock);
+
+ //
+ // Check for Duplicates.
+ //
+
+ pPCE = GetCachedPsetWithoutLocks (pbAttrBlob, cbAttrBlob);
+ if (pPCE) {
+ return pPCE;
+ }
+
+ AppDomain *pDomain;
+ PsetCacheKey *pKey;
+ HashDatum datum;
+
+ //
+ // Buffer permission set blob (it might go away if the metadata scope it
+ // came from is closed).
+ //
+
+ pDomain = GetAppDomain ();
+ pHeap = pDomain->GetLowFrequencyHeap ();
+
+ pKey = (PsetCacheKey*) ((void*) pHeap->AllocMem ((S_SIZE_T)sizeof(PsetCacheKey)));
+
+ pKey->Init (pbAttrBlob, cbAttrBlob, TRUE, pHeap);
+
+
+
+ pPCE = (PsetCacheEntry*)
+ ((void*) pHeap->AllocMem ((S_SIZE_T)sizeof(PsetCacheEntry)));
+
+ pPCE->Init (pKey, pDomain);
+
+ datum = reinterpret_cast<HashDatum>(pPCE);
+ m_pCachedPsetsHash->InsertValue (pKey, datum);
+
+ return pPCE;
+}
+
+PsetCacheEntry* SecurityDeclarativeCache::GetCachedPset(IN PBYTE pbAttrBlob,
+ IN DWORD cbAttrBlob
+ )
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ PsetCacheEntry *pPCE;
+ SimpleReadLockHolder readLockHolder(m_prCachedPsetsLock);
+
+ pPCE = GetCachedPsetWithoutLocks(pbAttrBlob, cbAttrBlob);
+ return pPCE;
+}
+
+PsetCacheEntry* SecurityDeclarativeCache::GetCachedPsetWithoutLocks(
+ IN PBYTE pbAttrBlob,
+ IN DWORD cbAttrBlob
+ )
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ PsetCacheKey sKey;
+ PsetCacheEntry *pPCE;
+ BOOL found;
+ HashDatum datum;
+
+ sKey.Init (pbAttrBlob, cbAttrBlob, FALSE, NULL);
+
+ found = m_pCachedPsetsHash->GetValue(&sKey, &datum);
+
+ if (found) {
+ pPCE = reinterpret_cast<PsetCacheEntry*>(datum);
+ return pPCE;
+ } else {
+ return NULL;
+ }
+}
+
+SecurityDeclarativeCache::~SecurityDeclarativeCache()
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Destroy the hash table even if entries are allocated from
+ // appdomain heap: the hash table may have used non heap memory for internal data structures
+ if (m_pCachedPsetsHash)
+ {
+ delete m_pCachedPsetsHash;
+ }
+
+ if (m_prCachedPsetsLock)
+ {
+ delete m_prCachedPsetsLock;
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/vm/securitydeclarativecache.h b/src/vm/securitydeclarativecache.h
new file mode 100644
index 0000000000..5ca71c6fd3
--- /dev/null
+++ b/src/vm/securitydeclarativecache.h
@@ -0,0 +1,139 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#ifndef __SecurityDecarativeCache_h__
+#define __SecurityDecarativeCache_h__
+
+struct PsetCacheKey
+{
+public:
+ PBYTE m_pbPset;
+ DWORD m_cbPset;
+ BOOL m_bCopyArray;
+
+ void Init (PBYTE pbPset, DWORD cbPset, BOOL CopyArray, LoaderHeap *pHeap)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_cbPset = cbPset;
+
+ if (CopyArray) {
+ m_pbPset = (PBYTE) ((void*)pHeap->AllocMem((S_SIZE_T)(cbPset * sizeof(BYTE)))) ;
+ memcpy (m_pbPset, pbPset, cbPset);
+ } else {
+ m_pbPset = pbPset;
+ }
+ }
+
+ BOOL IsEquiv(PsetCacheKey *pOther);
+ DWORD Hash();
+};
+
+//
+// Records a serialized permission set we've seen and decoded.
+//
+
+enum CanUnrestrictedOverride
+{
+ CUO_DontKnow = 0,
+ CUO_Yes = 1,
+ CUO_No = 2,
+};
+
+class PsetCacheEntry
+{
+private:
+ PsetCacheKey* m_pKey;
+ OBJECTHANDLE m_handle;
+ BYTE m_eCanUnrestrictedOverride;
+ bool m_fEmptyPermissionSet;
+
+ bool ContainsBuiltinCASPermsOnlyInternal(DWORD dwAction);
+
+public:
+
+ void Init(PsetCacheKey* pKey, AppDomain* pDomain);
+
+ OBJECTREF CreateManagedPsetObject(DWORD dwAction, bool createEmptySet = false);
+
+ OBJECTREF GetManagedPsetObject()
+ {
+ WRAPPER_NO_CONTRACT;
+ return ObjectFromHandle(m_handle);
+ }
+
+ bool ContainsBuiltinCASPermsOnly (DWORD dwAction);
+ PsetCacheEntry() {m_pKey = NULL;}
+ ~PsetCacheEntry()
+ {
+ if (m_pKey) {
+ delete m_pKey;
+ }
+ }
+};
+
+
+
+class SecurityDeclarativeCache {
+
+private:
+ EEPsetHashTable* m_pCachedPsetsHash;
+ SimpleRWLock* m_prCachedPsetsLock;
+ LoaderHeap* m_pHeap;
+
+ PsetCacheEntry* GetCachedPsetWithoutLocks(IN PBYTE pbAttrBlob,
+ IN DWORD cbAttrBlob
+ );
+
+public:
+ void Init(LoaderHeap *pHeap);
+
+ SecurityDeclarativeCache() :
+ m_pCachedPsetsHash(NULL),
+ m_prCachedPsetsLock(NULL),
+ m_pHeap(NULL)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ ~SecurityDeclarativeCache();
+
+ PsetCacheEntry* CreateAndCachePset(IN PBYTE pbAttrBlob,
+ IN DWORD cbAttrBlob
+ );
+
+ PsetCacheEntry* GetCachedPset(IN PBYTE pbAttrBlob,
+ IN DWORD cbAttrBlob
+ );
+
+
+};
+
+#endif
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/vm/securitydescriptor.cpp b/src/vm/securitydescriptor.cpp
new file mode 100644
index 0000000000..8de070f23e
--- /dev/null
+++ b/src/vm/securitydescriptor.cpp
@@ -0,0 +1,479 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#include "common.h"
+
+#include "security.h"
+#include "eventtrace.h"
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// [SecurityDescriptor]
+// |
+// |
+// +----[PEFileSecurityDescriptor]
+//
+///////////////////////////////////////////////////////////////////////////////
+
+BOOL SecurityDescriptor::CanCallUnmanagedCode () const
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(IsResolved() || m_pAppDomain->GetSecurityDescriptor()->IsInitializationInProgress());
+ } CONTRACTL_END;
+
+ return CheckSpecialFlag(1 << SECURITY_UNMANAGED_CODE);
+}
+
+#ifndef DACCESS_COMPILE
+
+OBJECTREF SecurityDescriptor::GetGrantedPermissionSet(OBJECTREF* pRefusedPermissions)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(IsResolved() || m_pAppDomain->GetSecurityDescriptor()->IsInitializationInProgress());
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+#ifndef CROSSGEN_COMPILE
+ if (pRefusedPermissions)
+ *pRefusedPermissions = ObjectFromLazyHandle(m_hGrantDeniedPermissionSet, m_pLoaderAllocator);
+ return ObjectFromLazyHandle(m_hGrantedPermissionSet, m_pLoaderAllocator);
+#else
+ return NULL;
+#endif
+}
+
+//
+// Returns TRUE if the given zone has the given special permission.
+//
+#ifdef FEATURE_CAS_POLICY
+BOOL SecurityDescriptor::CheckQuickCache(SecurityConfig::QuickCacheEntryType all, DWORD dwZone)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(m_pAppDomain->GetSecurityDescriptor()->IsLegacyCasPolicyEnabled());
+ PRECONDITION(SecurityPolicy::s_fPolicyInitialized);
+ } CONTRACTL_END;
+
+ static const SecurityConfig::QuickCacheEntryType zoneTable[] =
+ {
+ SecurityConfig::FullTrustZoneMyComputer,
+ SecurityConfig::FullTrustZoneIntranet,
+ SecurityConfig::FullTrustZoneTrusted,
+ SecurityConfig::FullTrustZoneInternet,
+ SecurityConfig::FullTrustZoneUntrusted
+ };
+
+ // If an additional evidence was provided, then perform the normal
+ // policy resolution. This is true for all AppDomains and also for
+ // assemblies loaded with a specific additional evidence. Note that
+ // for the default AppDomain, the policy resolution code paths short
+ // circuits the parsing of the security XML files by granting FullTrust
+ // to the default AppDomain.
+
+ if (m_hAdditionalEvidence != NULL)
+ return FALSE;
+
+ BOOL fMachine = SecurityConfig::GetQuickCacheEntry(SecurityConfig::MachinePolicyLevel, all);
+ BOOL fUser = SecurityConfig::GetQuickCacheEntry(SecurityConfig::UserPolicyLevel, all);
+ BOOL fEnterprise = SecurityConfig::GetQuickCacheEntry(SecurityConfig::EnterprisePolicyLevel, all);
+
+ if (fMachine && fUser && fEnterprise)
+ return TRUE;
+
+ // If we can't match for all, try for our zone.
+ if (dwZone == 0xFFFFFFFF)
+ return FALSE;
+
+ fMachine = SecurityConfig::GetQuickCacheEntry(SecurityConfig::MachinePolicyLevel, zoneTable[dwZone]);
+ fUser = SecurityConfig::GetQuickCacheEntry(SecurityConfig::UserPolicyLevel, zoneTable[dwZone]);
+ fEnterprise = SecurityConfig::GetQuickCacheEntry(SecurityConfig::EnterprisePolicyLevel, zoneTable[dwZone]);
+
+ return (fMachine && fUser && fEnterprise);
+}
+#endif // FEATURE_CAS_POLICY
+
+#endif // DACCESS_COMPILE
+
+#ifdef FEATURE_CAS_POLICY
+BOOL SecurityDescriptor::IsEvidenceComputed() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fEvidenceComputed;
+}
+#endif //FEATURE_CAS_POLICY
+
+//
+// This method will return TRUE if this object is fully trusted.
+//
+
+BOOL SecurityDescriptor::IsFullyTrusted ()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ SO_TOLERANT;
+ PRECONDITION(IsResolved() || m_pAppDomain->GetSecurityDescriptor()->IsInitializationInProgress());
+ } CONTRACTL_END;
+
+ return CheckSpecialFlag(1 << SECURITY_FULL_TRUST);
+}
+
+BOOL SecurityDescriptor::IsResolved() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fSDResolved;
+}
+
+DWORD SecurityDescriptor::GetSpecialFlags() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_dwSpecialFlags;
+}
+
+#ifndef DACCESS_COMPILE
+void SecurityDescriptor::SetGrantedPermissionSet(OBJECTREF GrantedPermissionSet,
+ OBJECTREF DeniedPermissionSet,
+ DWORD dwSpecialFlags)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef CROSSGEN_COMPILE
+ GCPROTECT_BEGIN(DeniedPermissionSet);
+ StoreObjectInLazyHandle(m_hGrantedPermissionSet, GrantedPermissionSet, m_pLoaderAllocator);
+ StoreObjectInLazyHandle(m_hGrantDeniedPermissionSet, DeniedPermissionSet, m_pLoaderAllocator);
+ GCPROTECT_END();
+#endif
+
+ if (dwSpecialFlags & (1 << SECURITY_FULL_TRUST))
+ {
+ m_dwSpecialFlags = 0xFFFFFFFF; // Fulltrust means that all possible quick checks should succeed, so we set all flags
+ }
+ else
+ {
+ m_dwSpecialFlags = dwSpecialFlags;
+ }
+
+ m_fSDResolved = TRUE;
+}
+
+
+#ifdef FEATURE_CAS_POLICY
+void SecurityDescriptor::SetEvidence(OBJECTREF evidence)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(evidence != NULL);
+ }
+ CONTRACTL_END;
+
+ if (evidence != NULL)
+ {
+ StoreObjectInLazyHandle(m_hAdditionalEvidence, evidence, m_pLoaderAllocator);
+ SetEvidenceComputed();
+ }
+}
+#endif // FEATURE_CAS_POLICY
+#endif // !DACCESS_COMPILE
+
+AppDomain* SecurityDescriptor::GetDomain() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pAppDomain;
+}
+
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_CAS_POLICY
+
+//---------------------------------------------------------------------------------------
+//
+// Build an evidence collection which can generate evidence about a PEFile
+//
+// Arguments:
+// pPEFile - PEFile the evidence collection will generate evidence for
+// objHostSuppliedEvidence - additional evidence to merge into the collection supplied by the host
+//
+// Return Value:
+// Evidence collection which targets this PEFile
+//
+// Notes:
+// Calls System.Security.Policy.PEFileEvidenceFactory.CreateSecurityIdentity
+//
+
+// static
+OBJECTREF PEFileSecurityDescriptor::BuildEvidence(PEFile *pPEFile, const OBJECTREF& objHostSuppliedEvidence)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pPEFile));
+ }
+ CONTRACTL_END;
+
+ struct
+ {
+ SAFEHANDLE objPEFile;
+ OBJECTREF objHostSuppliedEvidence;
+ OBJECTREF objEvidence;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+
+ gc.objPEFile = pPEFile->GetSafeHandle();
+ gc.objHostSuppliedEvidence = objHostSuppliedEvidence;
+
+ MethodDescCallSite createSecurityIdentity(METHOD__PEFILE_EVIDENCE_FACTORY__CREATE_SECURITY_IDENTITY);
+
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(gc.objPEFile),
+ ObjToArgSlot(gc.objHostSuppliedEvidence)
+ };
+
+ gc.objEvidence = createSecurityIdentity.Call_RetOBJECTREF(args);
+
+ END_SO_INTOLERANT_CODE;
+ GCPROTECT_END();
+
+ return gc.objEvidence;
+}
+
+#endif // FEATURE_CAS_POLICY
+
+#ifndef FEATURE_CORECLR
+BOOL PEFileSecurityDescriptor::QuickIsFullyTrusted()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef CROSSGEN_COMPILE
+ return TRUE;
+#else
+ if (!m_pAppDomain->GetSecurityDescriptor()->IsLegacyCasPolicyEnabled())
+ {
+ return TRUE;
+ }
+ else if (m_pAppDomain->IsCompilationDomain())
+ {
+ return TRUE;
+ }
+ else
+ {
+ return CheckQuickCache(SecurityConfig::FullTrustAll, GetZone());
+ }
+#endif
+}
+
+#ifndef CROSSGEN_COMPILE
+//---------------------------------------------------------------------------------------
+//
+// Get the evidence for this PE file
+//
+
+OBJECTREF PEFileSecurityDescriptor::GetEvidence()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(m_pAppDomain == GetAppDomain());
+ INJECT_FAULT(COMPlusThrowOM());
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // If we already have evidence, then just return that
+ if (IsEvidenceComputed())
+ return ObjectFromLazyHandle(m_hAdditionalEvidence, m_pLoaderAllocator);
+
+ struct
+ {
+ OBJECTREF objHostProvidedEvidence;
+ OBJECTREF objEvidence;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+
+#if CHECK_APP_DOMAIN_LEAKS
+ if (g_pConfig->AppDomainLeaks())
+ _ASSERTE(gc.objHostProvidedEvidence == NULL || GetAppDomain() == gc.objHostProvidedEvidence->GetAppDomain());
+#endif // CHECK_APP_DOMAIN_LEAKS
+
+ gc.objHostProvidedEvidence = ObjectFromLazyHandle(m_hAdditionalEvidence, m_pLoaderAllocator);
+ gc.objEvidence = PEFileSecurityDescriptor::BuildEvidence(m_pPEFile, gc.objHostProvidedEvidence);
+ SetEvidence(gc.objEvidence);
+
+#if CHECK_APP_DOMAIN_LEAKS
+ if (g_pConfig->AppDomainLeaks())
+ _ASSERTE(gc.objEvidence == NULL || GetAppDomain() == gc.objEvidence->GetAppDomain());
+#endif // CHECK_APP_DOMAIN_LEAKS
+
+ END_SO_INTOLERANT_CODE;
+
+ GCPROTECT_END();
+
+ return gc.objEvidence;
+}
+
+DWORD PEFileSecurityDescriptor::GetZone()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(m_pAppDomain->GetSecurityDescriptor()->IsLegacyCasPolicyEnabled());
+ }
+ CONTRACTL_END;
+
+ SecZone dwZone = NoZone;
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+
+ StackSString codebase;
+ BYTE rbUniqueID[MAX_SIZE_SECURITY_ID];
+ DWORD cbUniqueID = sizeof(rbUniqueID);
+
+ m_pPEFile->GetSecurityIdentity(codebase, &dwZone, 0, rbUniqueID, &cbUniqueID);
+ END_SO_INTOLERANT_CODE;
+ return dwZone;
+}
+#endif // !CROSSGEN_COMPILE
+
+void PEFileSecurityDescriptor::Resolve()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ if (IsResolved())
+ return;
+ ResolveWorker();
+}
+
+void PEFileSecurityDescriptor::ResolveWorker()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (NingenEnabled()) {
+ SetGrantedPermissionSet(NULL, NULL, 0xFFFFFFFF);
+ }
+
+#ifndef CROSSGEN_COMPILE
+ struct _gc
+ {
+ OBJECTREF evidence; // Object containing evidence
+ OBJECTREF granted; // Policy based Granted Permission
+ OBJECTREF grantdenied; // Policy based explicitly Denied Permissions
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ DWORD dwSpecialFlags = 0;
+ if (QuickIsFullyTrusted())
+ {
+ Security::GetPermissionInstance(&gc.granted, SECURITY_FULL_TRUST);
+ dwSpecialFlags = 0xFFFFFFFF;
+ }
+ else
+ {
+ if (IsEvidenceComputed())
+ {
+ gc.evidence = ObjectFromLazyHandle(m_hAdditionalEvidence, m_pLoaderAllocator);
+ }
+ else
+ {
+ gc.evidence = GetEvidence();
+ }
+
+ if (!m_pAppDomain->GetSecurityDescriptor()->IsLegacyCasPolicyEnabled())
+ {
+ gc.granted = SecurityPolicy::ResolveGrantSet(gc.evidence, &dwSpecialFlags, FALSE);
+ }
+ else
+ {
+ gc.granted = SecurityPolicy::ResolveCasPolicy(gc.evidence,
+ NULL,
+ NULL,
+ NULL,
+ &gc.grantdenied,
+ &dwSpecialFlags,
+ FALSE);
+ }
+ }
+
+ SetGrantedPermissionSet(gc.granted, NULL, dwSpecialFlags);
+
+ GCPROTECT_END();
+#endif // CROSSGEN_COMPILE
+}
+
+BOOL PEFileSecurityDescriptor::AllowBindingRedirects()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(IsResolved());
+ } CONTRACTL_END;
+
+ ETWOnStartup (AllowBindingRedirs_V1, AllowBindingRedirsEnd_V1);
+
+ return CheckSpecialFlag(1 << SECURITY_BINDING_REDIRECTS);
+}
+
+#endif // FEATURE_CORECLR
+
+#endif // !DACCESS_COMPILE
diff --git a/src/vm/securitydescriptor.h b/src/vm/securitydescriptor.h
new file mode 100644
index 0000000000..22aecf5dd8
--- /dev/null
+++ b/src/vm/securitydescriptor.h
@@ -0,0 +1,197 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#ifndef __SECURITYDESCRIPTOR_H__
+#define __SECURITYDESCRIPTOR_H__
+
+#include "securityconfig.h"
+#include "securityattributes.h"
+#include "securitypolicy.h"
+
+class ISecurityDescriptor;
+class IPEFileSecurityDescriptor;
+
+// Security flags for the objects that store security information
+#define CORSEC_ASSERTED 0x000020 // Asseted permission set present on frame
+#define CORSEC_DENIED 0x000040 // Denied permission set present on frame
+#define CORSEC_REDUCED 0x000080 // Reduced permission set present on frame
+
+// Inline Functions to support lazy handles - read/write to handle that may not have been created yet
+// SecurityDescriptor and ApplicationSecurityDescriptor currently use these
+inline OBJECTREF ObjectFromLazyHandle(LOADERHANDLE handle, LoaderAllocator* la);
+
+#ifndef DACCESS_COMPILE
+
+inline void StoreObjectInLazyHandle(LOADERHANDLE& handle, OBJECTREF ref, LoaderAllocator* la);
+
+
+#endif // #ifndef DACCESS_COMPILE
+
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// [SecurityDescriptor]
+// |
+// +----[PEFileSecurityDescriptor]
+// |
+// +----[ApplicationSecurityDescriptor]
+// |
+// +----[AssemblySecurityDescriptor]
+//
+// [SharedSecurityDescriptor]
+//
+///////////////////////////////////////////////////////////////////////////////
+//
+// A Security Descriptor is placed on AppDomain and Assembly (Unmanged) objects.
+// AppDomain and Assembly could be from different zones.
+// Security Descriptor could also be placed on a native frame.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// SecurityDescriptor is the base class for all security descriptors.
+// Extend this class to implement SecurityDescriptors for Assemblies and
+// AppDomains.
+//
+// WARNING : Do not add virtual methods to this class! Doing so results
+// in derived classes such as AssemblySecurityDescriptor having two v-table
+// pointers, which the DAC doesn't support.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+class SecurityDescriptor
+{
+protected:
+#ifdef FEATURE_CAS_POLICY
+ LOADERHANDLE m_hAdditionalEvidence; // Evidence Object
+#endif // FEATURE_CAS_POLICY
+
+ // The unmanaged DomainAssembly object
+ DomainAssembly *m_pAssem;
+
+ // The PEFile associated with the DomainAssembly
+ PEFile *m_pPEFile;
+
+ // The AppDomain context
+ AppDomain* m_pAppDomain;
+
+ BOOL m_fSDResolved;
+#ifdef FEATURE_CAS_POLICY
+ BOOL m_fEvidenceComputed;
+#endif // FEATURE_CAS_POLICY
+
+ DWORD m_dwSpecialFlags;
+ LoaderAllocator *m_pLoaderAllocator;
+
+private:
+ LOADERHANDLE m_hGrantedPermissionSet; // Granted Permission
+ LOADERHANDLE m_hGrantDeniedPermissionSet;// Specifically Denied Permissions
+
+public:
+ BOOL IsFullyTrusted();
+ DWORD GetSpecialFlags() const;
+
+ AppDomain* GetDomain() const;
+ BOOL CanCallUnmanagedCode() const;
+
+#ifdef FEATURE_CAS_POLICY
+
+#ifndef DACCESS_COMPILE
+ void SetEvidence(OBJECTREF evidence);
+ BOOL CheckQuickCache(SecurityConfig::QuickCacheEntryType all, DWORD dwZone);
+#endif // FEATURE_CAS_POLICY
+ BOOL IsEvidenceComputed() const;
+ inline void SetEvidenceComputed();
+#endif // FEATURE_CAS_POLICY
+
+#ifndef DACCESS_COMPILE
+ void SetGrantedPermissionSet(OBJECTREF GrantedPermissionSet,
+ OBJECTREF DeniedPermissionSet,
+ DWORD dwSpecialFlags);
+ OBJECTREF GetGrantedPermissionSet(OBJECTREF* pRefusedPermissions = NULL);
+#endif // DACCESS_COMPILE
+
+ BOOL IsResolved() const;
+
+ // Checks for one of the special security flags such as FullTrust or UnmanagedCode
+ FORCEINLINE BOOL CheckSpecialFlag (DWORD flags) const;
+
+ // Used to locate the assembly
+ inline PEFile *GetPEFile() const;
+
+protected:
+ //--------------------
+ // Constructor
+ //--------------------
+#ifndef DACCESS_COMPILE
+ inline SecurityDescriptor(AppDomain *pAppDomain, DomainAssembly *pAssembly, PEFile* pPEFile, LoaderAllocator *pLoaderAllocator);
+#endif // #ifndef DACCESS_COMPILE
+};
+
+template<typename IT>
+class SecurityDescriptorBase : public IT, public SecurityDescriptor
+{
+public:
+ VPTR_ABSTRACT_VTABLE_CLASS(SecurityDescriptorBase, IT) // needed for the DAC
+
+ inline SecurityDescriptorBase(AppDomain *pAppDomain, DomainAssembly *pAssembly, PEFile* pPEFile, LoaderAllocator *pLoaderAllocator);
+
+public:
+ virtual BOOL IsFullyTrusted() { return SecurityDescriptor::IsFullyTrusted(); }
+ virtual BOOL CanCallUnmanagedCode() const { return SecurityDescriptor::CanCallUnmanagedCode(); }
+ virtual DWORD GetSpecialFlags() const { return SecurityDescriptor::GetSpecialFlags(); }
+
+ virtual AppDomain* GetDomain() const { return SecurityDescriptor::GetDomain(); }
+
+ virtual BOOL IsResolved() const { return SecurityDescriptor::IsResolved(); }
+
+#ifdef FEATURE_CAS_POLICY
+ virtual BOOL IsEvidenceComputed() const { return SecurityDescriptor::IsEvidenceComputed(); }
+#ifndef DACCESS_COMPILE
+ virtual void SetEvidence(OBJECTREF evidence) { SecurityDescriptor::SetEvidence(evidence); }
+#endif // DACCESS_COMPILE
+#endif // FEATURE_CAS_POLICY
+
+#ifndef DACCESS_COMPILE
+ virtual OBJECTREF GetGrantedPermissionSet(OBJECTREF* RefusedPermissions = NULL) { return SecurityDescriptor::GetGrantedPermissionSet(RefusedPermissions); }
+#endif
+};
+
+#ifndef FEATURE_CORECLR
+class PEFileSecurityDescriptor : public SecurityDescriptorBase<IPEFileSecurityDescriptor>
+{
+public:
+ virtual BOOL AllowBindingRedirects();
+ BOOL QuickIsFullyTrusted();
+ virtual VOID Resolve();
+
+#ifndef DACCESS_COMPILE
+ inline PEFileSecurityDescriptor(AppDomain* pDomain, PEFile *pPEFile);
+#endif
+
+#ifdef FEATURE_CAS_POLICY
+ virtual OBJECTREF GetEvidence();
+ DWORD GetZone();
+#endif // FEATURE_CAS_POLICY
+
+
+#ifdef FEATURE_CAS_POLICY
+ static
+ OBJECTREF BuildEvidence(PEFile *pPEFile, const OBJECTREF& objHostSuppliedEvidence);
+#endif // FEATURE_CAS_POLICY
+private:
+ VOID ResolveWorker();
+};
+#endif // !FEATURE_CORECLR
+
+#include "securitydescriptor.inl"
+
+#endif // #define __SECURITYDESCRIPTOR_H__
+
diff --git a/src/vm/securitydescriptor.inl b/src/vm/securitydescriptor.inl
new file mode 100644
index 0000000000..a993c0d964
--- /dev/null
+++ b/src/vm/securitydescriptor.inl
@@ -0,0 +1,108 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#ifndef __SECURITYDESCRIPTOR_INL__
+#define __SECURITYDESCRIPTOR_INL__
+
+// Inline Functions to support lazy handles - read/write to handle that may not have been created yet
+// SecurityDescriptor and ApplicationSecurityDescriptor currently use these
+inline OBJECTREF ObjectFromLazyHandle(LOADERHANDLE handle, LoaderAllocator *pLoaderAllocator)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (handle != NULL)
+ {
+ return pLoaderAllocator->GetHandleValue(handle);
+ }
+ else
+ {
+ return NULL;
+ }
+}
+
+#ifndef DACCESS_COMPILE
+
+inline SecurityDescriptor::SecurityDescriptor(AppDomain *pAppDomain,
+ DomainAssembly *pAssembly,
+ PEFile* pPEFile,
+ LoaderAllocator *pLoaderAllocator) :
+#ifdef FEATURE_CAS_POLICY
+ m_hAdditionalEvidence(NULL),
+#endif // FEATURE_CAS_POLICY
+ m_pAssem(pAssembly),
+ m_pPEFile(pPEFile),
+ m_pAppDomain(pAppDomain),
+ m_fSDResolved(FALSE),
+#ifdef FEATURE_CAS_POLICY
+ m_fEvidenceComputed(FALSE),
+#endif // FEATURE_CAS_POLICY
+ m_dwSpecialFlags(0),
+ m_pLoaderAllocator(pLoaderAllocator),
+ m_hGrantedPermissionSet(NULL),
+ m_hGrantDeniedPermissionSet(NULL)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+#endif // !DACCESS_COMPILE
+
+#ifdef FEATURE_CAS_POLICY
+inline void SecurityDescriptor::SetEvidenceComputed()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_fEvidenceComputed = TRUE;
+}
+
+#endif // FEATURE_CAS_POLICY
+
+// Checks for one of the special security flags such as FullTrust or UnmanagedCode
+FORCEINLINE BOOL SecurityDescriptor::CheckSpecialFlag (DWORD flags) const
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return (m_dwSpecialFlags & flags);
+}
+
+inline PEFile *SecurityDescriptor::GetPEFile() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pPEFile;
+}
+
+#ifndef DACCESS_COMPILE
+template<typename IT>
+inline SecurityDescriptorBase<IT>::SecurityDescriptorBase(AppDomain *pAppDomain,
+ DomainAssembly *pAssembly,
+ PEFile* pPEFile,
+ LoaderAllocator *pLoaderAllocator) :
+ SecurityDescriptor(pAppDomain, pAssembly, pPEFile, pLoaderAllocator)
+{
+}
+#endif // !DACCESS_COMPILE
+
+#ifndef FEATURE_CORECLR
+
+#ifndef DACCESS_COMPILE
+inline PEFileSecurityDescriptor::PEFileSecurityDescriptor(AppDomain* pDomain, PEFile *pPEFile) :
+ SecurityDescriptorBase<IPEFileSecurityDescriptor>(pDomain, NULL,pPEFile, pDomain->GetLoaderAllocator())
+{
+ LIMITED_METHOD_CONTRACT
+}
+#endif // !DACCESS_COMPILE
+
+#endif // !FEATURE_CORECLR
+
+#endif // #define __SECURITYDESCRIPTOR_INL__
diff --git a/src/vm/securitydescriptorappdomain.cpp b/src/vm/securitydescriptorappdomain.cpp
new file mode 100644
index 0000000000..48e0e7bb92
--- /dev/null
+++ b/src/vm/securitydescriptorappdomain.cpp
@@ -0,0 +1,787 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#include "common.h"
+#include "security.h"
+#ifdef FEATURE_REMOTING
+#include "crossdomaincalls.h"
+#else
+#include "callhelpers.h"
+#endif
+
+#ifndef DACCESS_COMPILE
+
+#ifndef FEATURE_CORECLR
+BOOL ApplicationSecurityDescriptor::QuickIsFullyTrusted()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+#ifdef CROSSGEN_COMPILE
+ return TRUE;
+#else
+ if (IsDefaultAppDomain())
+ {
+ return TRUE;
+ }
+
+ // NGEN is always done in full trust
+ if (m_pAppDomain->IsCompilationDomain())
+ {
+ return TRUE;
+ }
+
+ // Check if we need to call the HostSecurityManager.
+ if (CallHostSecurityManager())
+ {
+ return FALSE;
+ }
+
+ APPDOMAINREF adRef = static_cast<APPDOMAINREF>(m_pAppDomain->GetExposedObject());
+
+ // - If this AppDomain is a standard domain (full trust homogeneous), we are full trust
+ // - If this is a homogeneous case, get the PermissionSet from managed code
+ // - If CAS policy is not enabled, then we are fully trusted
+ // - Otherwise, check the quick cache
+ if (adRef->GetIsFastFullTrustDomain())
+ {
+ return TRUE;
+ }
+ else if (IsHomogeneous())
+ {
+ // A homogenous domain will be fully trusted if its grant set is full trust
+ APPLICATIONTRUSTREF appTrustRef = static_cast<APPLICATIONTRUSTREF>(adRef->GetApplicationTrust());
+ POLICYSTATEMENTREF psRef = static_cast<POLICYSTATEMENTREF>(appTrustRef->GetPolicyStatement());
+ PERMISSIONSETREF grantSetRef = psRef->GetPermissionSet();
+ return grantSetRef->IsUnrestricted();
+ }
+ else if (!IsLegacyCasPolicyEnabled())
+ {
+ return TRUE;
+ }
+ else
+ {
+ return CheckQuickCache(SecurityConfig::FullTrustAll, GetZone());
+ }
+#endif // CROSSGEN_COMPILE
+}
+#endif // FEATURE_CORECLR
+
+#ifdef FEATURE_CAS_POLICY
+OBJECTREF ApplicationSecurityDescriptor::GetEvidence()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(m_pAppDomain == GetAppDomain());
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ OBJECTREF retval = ObjectFromLazyHandle(m_hAdditionalEvidence, m_pLoaderAllocator);
+ return retval;
+}
+#endif // FEATURE_CAS_POLICY
+
+void ApplicationSecurityDescriptor::Resolve()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ if (IsResolved())
+ return;
+
+#ifndef CROSSGEN_COMPILE
+ ResolveWorker();
+#else
+ SetGrantedPermissionSet(NULL, NULL, 0xFFFFFFFF);
+#endif
+}
+
+#ifndef CROSSGEN_COMPILE
+void ApplicationSecurityDescriptor::ResolveWorker()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ struct _gc
+ {
+ OBJECTREF evidence; // Object containing evidence
+ OBJECTREF granted; // Policy based Granted Permission
+ OBJECTREF grantdenied; // Policy based explicitly Denied Permissions
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+ DWORD dwSpecialFlags;
+
+ // On debug builds do a pre-resolution so that we can validate pre-resolve and post-resolve match up
+ // assuming no host interference.
+#ifdef _DEBUG
+ // We shouldn't be causing the first pre-resolution if we needed to cache this state for the host
+ _ASSERTE(m_fIsPreResolved ||
+ (GetAppDomain()->GetAppDomainManagerInitializeNewDomainFlags() & eInitializeNewDomainFlags_NoSecurityChanges) == eInitializeNewDomainFlags_None);
+
+ BOOL fPreResolveFullTrust = FALSE;
+ BOOL fPreResolveHomogeneous = FALSE;
+ PreResolve(&fPreResolveFullTrust, &fPreResolveHomogeneous);
+#endif // _DEBUG
+
+#ifdef FEATURE_CORECLR
+ // coreclr has 2 kinds of AppDomains - sandboxed or not. If sandboxed, then the homogeneous flag is set. If not, then it is a full-trust appdomain.
+ if (!IsHomogeneous())
+ {
+ Security::GetPermissionInstance(&gc.granted, SECURITY_FULL_TRUST);
+ dwSpecialFlags = 0xFFFFFFFF;
+ }
+ else
+ {
+ APPDOMAINREF adRef = (APPDOMAINREF)m_pAppDomain->GetExposedObject();
+ if (adRef->GetIsFastFullTrustDomain())
+ {
+ Security::GetPermissionInstance(&gc.granted, SECURITY_FULL_TRUST);
+ dwSpecialFlags = 0xFFFFFFFF;
+ }
+ else
+ {
+ APPLICATIONTRUSTREF appTrustRef = (APPLICATIONTRUSTREF)adRef->GetApplicationTrust();
+ POLICYSTATEMENTREF psRef = appTrustRef->GetPolicyStatement();
+ gc.granted = (OBJECTREF)psRef->GetPermissionSet();
+
+ // We can trust the grant set special flags, since only mscorlib can access the root
+ // ApplicationTrust reference.
+ dwSpecialFlags = appTrustRef->GetGrantSetSpecialFlags();
+ }
+ }
+
+#else
+ if (QuickIsFullyTrusted())
+ {
+ Security::GetPermissionInstance(&gc.granted, SECURITY_FULL_TRUST);
+ dwSpecialFlags = 0xFFFFFFFF;
+ }
+ // We need to check the homogeneous flag directly rather than going through the accessor method, since
+ // that method also considers the presence of a HostSecurityManager. The HostSecurityManager should not
+ // affect the domain's grant set at this point however, as it does not have any domain policy resolution
+ // callbacks and if it wanted customize the homogenous domain grant set it needed to do that when we called
+ // its InitializeNewDomain. Longer term IsHomogenous should not consider the HostSecurityManager at all.
+ else if (m_fHomogeneous)
+ {
+ // Homogeneous AppDomain case
+
+ APPDOMAINREF adRef = (APPDOMAINREF)m_pAppDomain->GetExposedObject();
+ _ASSERTE( adRef != NULL);
+
+ if (adRef->GetIsFastFullTrustDomain())
+ {
+ Security::GetPermissionInstance(&gc.granted, SECURITY_FULL_TRUST);
+ dwSpecialFlags = 0xFFFFFFFF;
+ }
+ else
+ {
+ APPLICATIONTRUSTREF appTrustRef = (APPLICATIONTRUSTREF)adRef->GetApplicationTrust();
+ _ASSERTE(appTrustRef != NULL);
+ POLICYSTATEMENTREF psRef = appTrustRef->GetPolicyStatement();
+ _ASSERTE(psRef != NULL);
+ gc.granted = (OBJECTREF)psRef->GetPermissionSet();
+
+ // We can trust the grant set special flags, since only mscorlib can access the root
+ // ApplicationTrust reference.
+ dwSpecialFlags = appTrustRef->GetGrantSetSpecialFlags();
+ }
+ }
+ else
+ {
+ // Regular AppDomain policy resolution based on AppDomain evidence
+ if (IsEvidenceComputed())
+ {
+ gc.evidence = ObjectFromLazyHandle(m_hAdditionalEvidence, m_pLoaderAllocator);
+ }
+ else
+ {
+ gc.evidence = GetEvidence();
+ }
+
+ if (!IsLegacyCasPolicyEnabled())
+ {
+ // Either we have a host security manager or a homogenous AppDomain that could make this domain be
+ // partially trusted. Call out to managed to get the grant set.
+ gc.granted = SecurityPolicy::ResolveGrantSet(gc.evidence, &dwSpecialFlags, FALSE);
+ }
+ else
+ {
+ // Legacy CAS policy is enabled, so do a full CAS resolve
+ gc.granted = SecurityPolicy::ResolveCasPolicy(gc.evidence,
+ NULL,
+ NULL,
+ NULL,
+ &gc.grantdenied,
+ &dwSpecialFlags,
+ FALSE);
+ }
+ }
+#endif
+
+ SetGrantedPermissionSet(gc.granted, NULL, dwSpecialFlags);
+
+#ifdef FEATURE_CAS_POLICY
+ // If the host promised not to modify the security of the AppDomain, throw an InvalidOperationException
+ // if it did. We specifically want to check the cached version of this state on the security
+ // descriptor, rather than any version calculated earlier on in this method since the domain manager has
+ // already run by the time ResolveWorker is entered.
+ if (GetAppDomain()->GetAppDomainManagerInitializeNewDomainFlags() & eInitializeNewDomainFlags_NoSecurityChanges)
+ {
+ _ASSERTE(m_fIsPreResolved);
+
+ if (!!m_fPreResolutionFullTrust != !!IsFullyTrusted())
+ {
+ COMPlusThrow(kInvalidOperationException, W("InvalidOperation_HostModifiedSecurityState"));
+ }
+
+ if (!!m_fPreResolutionHomogeneous != !!IsHomogeneous())
+ {
+ COMPlusThrow(kInvalidOperationException, W("InvalidOperation_HostModifiedSecurityState"));
+ }
+ }
+#endif // FEATURE_CAS_POLICY
+
+#if defined(_DEBUG) && !defined(FEATURE_CORECLR)
+ // Make sure that that our PreResolve routine is consistent with our actual resolution results. This is
+ // only required to be true in the absence of an AppDomainManager.
+ //
+ // If any assert fires in this block, it means that PreResolve isn't correctly figuring out what the
+ // incoming security state of an AppDomain is going to resolve into.
+ if (!GetAppDomain()->HasAppDomainManagerInfo())
+ {
+#ifdef FEATURE_CLICKONCE
+ if (GetAppDomain()->IsClickOnceAppDomain())
+ {
+ _ASSERTE(!!IsHomogeneous() == !!fPreResolveHomogeneous);
+ // We don't check grant set since we don't attempt to pre-resolve that - pre-resolution should
+ // have always come back partial trust
+ _ASSERTE(!fPreResolveFullTrust);
+ }
+ else
+#endif // FEATURE_CLICKONCE
+ if (IsHomogeneous())
+ {
+ _ASSERTE(!!IsHomogeneous() == !!fPreResolveHomogeneous);
+ _ASSERTE(!!IsFullyTrusted() == !!fPreResolveFullTrust);
+ }
+ else
+ {
+ _ASSERTE(!!IsHomogeneous() == !!fPreResolveHomogeneous);
+ // We don't check grant sets on heterogeneous domains since they are never attempted to be pre-resolved.
+ }
+ }
+#endif // _DEBUG && !FEATURE_CORECLR
+
+ GCPROTECT_END();
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Determine the security state of an AppDomain before the domain is fully configured.
+// This method is used to detect the input configuration of a domain - specifically, if it
+// is homogenous and fully trusted before domain setup is completed.
+//
+// Note that this state may not reflect the final state of the AppDomain when it is
+// configured, since components like the AppDomainManager can modify these bits during execution.
+//
+
+void ApplicationSecurityDescriptor::PreResolve(BOOL *pfIsFullyTrusted, BOOL *pfIsHomogeneous)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pfIsFullyTrusted));
+ PRECONDITION(CheckPointer(pfIsHomogeneous));
+ PRECONDITION(IsInitializationInProgress()); // We shouldn't be looking at the pre-resolved state if we've already done real resolution
+ }
+ CONTRACTL_END;
+
+ if (m_fIsPreResolved)
+ {
+ *pfIsFullyTrusted = m_fPreResolutionFullTrust;
+ *pfIsHomogeneous = m_fPreResolutionHomogeneous;
+ return;
+ }
+
+ GCX_COOP();
+
+#ifdef FEATURE_CORECLR
+ // On CoreCLR all domains are partial trust homogenous
+ m_fPreResolutionFullTrust = FALSE;
+ m_fPreResolutionHomogeneous = TRUE;
+
+#else // !FEATURE_CORECLR
+ if (GetAppDomain()->IsClickOnceAppDomain())
+ {
+ // In the ClickOnce case we can't pre-resolve the grant set because it's entirely in the control of
+ // the ApplicationSecurityManager. We conservatively assume that it will be partial trust; however
+ // we always know that the domain will be homogenous
+ m_fPreResolutionFullTrust = FALSE;
+ m_fPreResolutionHomogeneous = TRUE;
+ }
+ else if (GetAppDomain()->IsCompilationDomain())
+ {
+ // NGEN is always full trust and homogenous
+ m_fPreResolutionFullTrust = TRUE;
+ m_fPreResolutionHomogeneous = TRUE;
+ }
+ else if (GetAppDomain()->IsDefaultDomain())
+ {
+ // Barring any shenanigans from the AppDomainManager, we know that the default domain will be fully
+ // trusted and homogenous in the standard case, but heterogenous in the legacy CAS policy case.
+ m_fPreResolutionFullTrust = TRUE;
+ m_fPreResolutionHomogeneous = !Security::IsProcessWideLegacyCasPolicyEnabled();
+ }
+ else
+ {
+ // In all other AppDomains we need to consult the incoming AppDomainSetup in order to figure out if
+ // the domain is being setup as full or partial trust.
+ CLR_BOOL fPreResolutionFullTrust = FALSE;
+ CLR_BOOL fPreResolutionHomogeneous = FALSE;
+
+ MethodDescCallSite preResolve(METHOD__SECURITY_ENGINE__PRE_RESOLVE);
+
+ ARG_SLOT args[] =
+ {
+ PtrToArgSlot(&fPreResolutionFullTrust),
+ PtrToArgSlot(&fPreResolutionHomogeneous)
+ };
+
+ preResolve.Call(args);
+
+ m_fPreResolutionFullTrust = !!fPreResolutionFullTrust;
+ m_fPreResolutionHomogeneous = !!fPreResolutionHomogeneous;
+ }
+#endif // FEATURE_CORECLR
+
+ *pfIsFullyTrusted = m_fPreResolutionFullTrust;
+ *pfIsHomogeneous = m_fPreResolutionHomogeneous;
+ m_fIsPreResolved = TRUE;
+}
+#endif // CROSSGEN_COMPILE
+
+#ifdef FEATURE_CAS_POLICY
+//---------------------------------------------------------------------------------------
+//
+// Determine if an AppDomain should allow an assembly to be LoadFrom-ed a remote location.
+// Since pre-v4 versions of the CLR would implicitly sandbox this load, we only want to
+// allow this if the application has either acknowledged it to be safe, or if the application
+// has taken control of sandboxing itself.
+//
+// This method returns true if the load should be allowed, false if it should be blocked
+// from a remote location.
+//
+
+BOOL ApplicationSecurityDescriptor::AllowsLoadsFromRemoteSources()
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // If the application has explicitly enabled remote LoadFroms then we should allow the load
+ if (Security::CanLoadFromRemoteSources())
+ {
+ return true;
+ }
+
+ // Otherwise, we only allow the load if the assembly is going to be sandboxed (or explicitly not sandboxed
+ // by a host). That can happen if we've got legacy CAS polcy enabled, if we're in a homogenous AppDomain,
+ // or if there is a HostSecurityManager that cares about assembly policy.
+ //
+ // Note that we don't allow LoadFrom a remote source in a domain that had its ApplicationTrust supplied by
+ // the CLR, since that domain would have implicitly sandboxed the LoadFrom in CLR v2. Instead, these
+ // domains require that there be a HostSecurityManager present which setup the sandbox.
+
+ if (IsHomogeneous() && !m_fRuntimeSuppliedHomogenousGrantSet)
+ {
+ return true;
+ }
+
+ if (IsLegacyCasPolicyEnabled())
+ {
+ return true;
+ }
+
+ return false;
+}
+
+DWORD ApplicationSecurityDescriptor::GetZone()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(IsLegacyCasPolicyEnabled());
+ }
+ CONTRACTL_END;
+
+ SecZone dwZone = NoZone;
+ if (m_pAppDomain->GetRootAssembly() != NULL && m_pAppDomain->IsDefaultDomain())
+ {
+ LPCWSTR wszAsmPath = m_pAppDomain->GetRootAssembly()->GetManifestFile()->GetPath();
+
+ if (wszAsmPath)
+ {
+ StackSString ssPath( W("file://") );
+ ssPath.Append( wszAsmPath );
+
+ dwZone = SecurityPolicy::MapUrlToZone(ssPath.GetUnicode());
+ }
+ }
+
+ return dwZone;
+}
+#endif // FEATURE_CAS_POLICY
+
+
+//
+// PLS (PermissionListSet) optimization Implementation
+// The idea of the PLS optimization is to maintain the intersection
+// of the grant sets of all assemblies loaded into the AppDomain (plus
+// the grant set of the AppDomain itself) and the union of all denied
+// sets. When a demand is evaluated, we first check the permission
+// that is being demanded against the combined grant and denied set
+// and if that check succeeds, then we know the demand is satisfied
+// in the AppDomain without having to perform an entire stack walk.
+//
+
+// Creates the PermissionListSet which holds the AppDomain level intersection of
+// granted and denied permission sets of all assemblies in the domain and updates
+// the granted and denied set with those of the AppDomain.
+void ApplicationSecurityDescriptor::InitializePLS()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(IsResolved());
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+#ifdef FEATURE_PLS
+ struct _gc {
+ OBJECTREF refGrantedSet;
+ OBJECTREF refDeniedSet;
+ OBJECTREF refPermListSet;
+ OBJECTREF refRetVal;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ if (!IsFullyTrusted()) {
+ GCPROTECT_BEGIN(gc);
+
+ gc.refPermListSet = ObjectFromLazyHandle(m_hDomainPermissionListSet, m_pLoaderAllocator);
+ gc.refGrantedSet = GetGrantedPermissionSet(&gc.refDeniedSet);
+
+ MethodDescCallSite updateAppDomainPLS(METHOD__SECURITY_ENGINE__UPDATE_APPDOMAIN_PLS);
+ ARG_SLOT args[] = {
+ ObjToArgSlot(gc.refPermListSet),
+ ObjToArgSlot(gc.refGrantedSet),
+ ObjToArgSlot(gc.refDeniedSet),
+ };
+ gc.refRetVal = updateAppDomainPLS.Call_RetOBJECTREF(args);
+
+ GCPROTECT_END();
+ }
+
+ StoreObjectInLazyHandle(m_hDomainPermissionListSet, gc.refRetVal, m_pLoaderAllocator);
+#endif // FEATURE_PLS
+ m_dwDomainWideSpecialFlags = m_dwSpecialFlags;
+}
+
+// Whenever a new assembly is added to the domain, we need to update the PermissionListSet
+void ApplicationSecurityDescriptor::AddNewSecDescToPLS(AssemblySecurityDescriptor *pNewSecDescriptor)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(pNewSecDescriptor->IsResolved());
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ //
+ // If the assembly is fully trusted, this should be a no-op as the PLS is unaffected.
+ // Note it's Ok to call this method before the AppDomain is fully initialized (and so
+ // before the PLS is created for the AppDomain) because we enforce that all assemblies
+ // loaded during that phase are fully trusted.
+ //
+
+ if (!pNewSecDescriptor->IsFullyTrusted()) {
+#ifdef FEATURE_PLS
+ struct _gc {
+ OBJECTREF refGrantedSet;
+ OBJECTREF refDeniedSet;
+ OBJECTREF refPermListSet;
+ OBJECTREF refRetVal;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.refGrantedSet = pNewSecDescriptor->GetGrantedPermissionSet(&gc.refDeniedSet);
+ if (gc.refDeniedSet != NULL)
+ m_fContainsAnyRefusedPermissions = TRUE;
+
+ // Ensure that the handle is created so that the compare exchange below will work correctly
+ if (m_hDomainPermissionListSet == NULL)
+ {
+ LOADERHANDLE tmpHandle = m_pLoaderAllocator->AllocateHandle(NULL);
+
+ // Races where this fails should be sufficiently rare that leaking this handle
+ // until LoaderAllocator destruction is acceptable.
+ FastInterlockCompareExchangePointer(&m_hDomainPermissionListSet, tmpHandle, static_cast<LOADERHANDLE>(NULL));
+ }
+
+ // we have to synchronize the update to the PLS across concurring threads.
+ // we don't care if another thread is checking the existing PLS while this
+ // update is in progress as the loaded assembly won't be on the stack for such
+ // a demand and so will not affect the result of the existing PLS optimization.
+ do {
+ gc.refPermListSet = ObjectFromLazyHandle(m_hDomainPermissionListSet, m_pLoaderAllocator);
+
+ MethodDescCallSite updateAppDomainPLS(METHOD__SECURITY_ENGINE__UPDATE_APPDOMAIN_PLS);
+ ARG_SLOT args[] = {
+ ObjToArgSlot(gc.refPermListSet),
+ ObjToArgSlot(gc.refGrantedSet),
+ ObjToArgSlot(gc.refDeniedSet),
+ };
+ // This returns a new copy of the PermissionListSet
+ gc.refRetVal = updateAppDomainPLS.Call_RetOBJECTREF(args);
+ }
+ // If some other thread beat us to the PLS object handle, just try updating the PLS again
+ // This race should be rare enough that recomputing the PLS is acceptable.
+ while (m_pLoaderAllocator->CompareExchangeValueInHandle(m_hDomainPermissionListSet, gc.refRetVal, gc.refPermListSet) != gc.refPermListSet);
+
+ GCPROTECT_END();
+#endif // FEATURE_PLS
+
+ LONG dwNewDomainWideSpecialFlags = 0;
+ LONG dwOldDomainWideSpecialFlags = 0;
+ do {
+ dwOldDomainWideSpecialFlags = m_dwDomainWideSpecialFlags;
+ dwNewDomainWideSpecialFlags = (dwOldDomainWideSpecialFlags & pNewSecDescriptor->GetSpecialFlags());
+ }
+ while (InterlockedCompareExchange((LONG*)&m_dwDomainWideSpecialFlags, dwNewDomainWideSpecialFlags, dwOldDomainWideSpecialFlags) != dwOldDomainWideSpecialFlags);
+ }
+}
+
+#ifdef FEATURE_PLS
+BOOL ApplicationSecurityDescriptor::CheckPLS (OBJECTREF* orDemand, DWORD dwDemandSpecialFlags, BOOL fDemandSet)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ // Check if all assemblies so far have full trust.
+ if (CheckDomainWideSpecialFlag(1 << SECURITY_FULL_TRUST))
+ return TRUE;
+
+ // Check if this is a one of the well-known permissions tracked in the special flags.
+ if (CheckDomainWideSpecialFlag(dwDemandSpecialFlags))
+ return TRUE;
+
+ CLR_BOOL fResult = FALSE;
+ //
+ // only evaluate the PLS if we don't need to marshal the demand across AppDomains
+ // This means we would perform a stack walk when there are multiple semi-trust
+ // AppDomains on the stack, which is acceptable.
+ // In homogeneous cases, the stack walk could potentially detect the situation
+ // and avoid the expensive walk of the assemblies if the permission demand is a
+ // subset of the homogeneous grant set applied to the AppDomain.
+ //
+ if (m_pAppDomain == GetThread()->GetDomain()) {
+ OBJECTREF refDomainPLS = NULL;
+
+ GCPROTECT_BEGIN(refDomainPLS);
+ refDomainPLS = ObjectFromLazyHandle(m_hDomainPermissionListSet, m_pLoaderAllocator);
+
+ EX_TRY
+ {
+ if (fDemandSet) {
+ PREPARE_NONVIRTUAL_CALLSITE(METHOD__PERMISSION_LIST_SET__CHECK_SET_DEMAND_NO_THROW);
+ DECLARE_ARGHOLDER_ARRAY(args, 2);
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(refDomainPLS); // arg 0
+ args[ARGNUM_1] = OBJECTREF_TO_ARGHOLDER(*orDemand); // arg 1
+ CATCH_HANDLER_FOUND_NOTIFICATION_CALLSITE;
+ CALL_MANAGED_METHOD(fResult, CLR_BOOL, args);
+ }
+ else {
+ PREPARE_NONVIRTUAL_CALLSITE(METHOD__PERMISSION_LIST_SET__CHECK_DEMAND_NO_THROW);
+ DECLARE_ARGHOLDER_ARRAY(args, 2);
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(refDomainPLS); // arg 0
+ args[ARGNUM_1] = OBJECTREF_TO_ARGHOLDER(*orDemand); // arg 1
+ CATCH_HANDLER_FOUND_NOTIFICATION_CALLSITE;
+ CALL_MANAGED_METHOD(fResult, CLR_BOOL, args);
+ }
+ }
+ EX_SWALLOW_NONTRANSIENT;
+
+ GCPROTECT_END();
+ }
+
+ return fResult;
+}
+#endif // FEATURE_PLS
+
+DWORD ApplicationSecurityDescriptor::GetDomainWideSpecialFlag() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_dwDomainWideSpecialFlags;
+}
+
+void ApplicationSecurityDescriptor::FinishInitialization()
+{
+ WRAPPER_NO_CONTRACT;
+ // Resolve the AppDomain security descriptor.
+ this->Resolve();
+
+ // Reset the initialization in-progress flag.
+ this->ResetInitializationInProgress();
+
+ // Initialize the PLS with the grant set of the AppDomain
+ this->InitializePLS();
+}
+
+void ApplicationSecurityDescriptor::SetHostSecurityManagerFlags(DWORD dwFlags)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_dwHostSecurityManagerFlags |= dwFlags;
+}
+
+void ApplicationSecurityDescriptor::SetPolicyLevelFlag()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_dwHostSecurityManagerFlags |= HOST_POLICY_LEVEL;
+}
+
+BOOL ApplicationSecurityDescriptor::IsHomogeneous() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fHomogeneous;
+}
+
+// Should the HSM be consulted for security decisions in this AppDomain.
+BOOL ApplicationSecurityDescriptor::CallHostSecurityManager()
+{
+ LIMITED_METHOD_CONTRACT;
+ return (m_dwHostSecurityManagerFlags & HOST_APP_DOMAIN_EVIDENCE ||
+ m_dwHostSecurityManagerFlags & HOST_POLICY_LEVEL ||
+ m_dwHostSecurityManagerFlags & HOST_ASM_EVIDENCE ||
+ m_dwHostSecurityManagerFlags & HOST_RESOLVE_POLICY);
+}
+
+// The AppDomain is considered a default one (FT) if the property is set and it's not a homogeneous AppDomain
+BOOL ApplicationSecurityDescriptor::IsDefaultAppDomain() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fIsDefaultAppdomain
+#ifndef FEATURE_CORECLR
+ && !m_fHomogeneous
+#endif // FEATURE_CORECLR
+ ;
+}
+
+BOOL ApplicationSecurityDescriptor::IsDefaultAppDomainEvidence()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fIsDefaultAppdomainEvidence;// This need not be a default AD, but has no evidence. So we'll use the default AD evidence
+}
+
+// Indicates whether the initialization phase is in progress.
+BOOL ApplicationSecurityDescriptor::IsInitializationInProgress()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fIsInitializationInProgress;
+}
+
+BOOL ApplicationSecurityDescriptor::ContainsAnyRefusedPermissions()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fContainsAnyRefusedPermissions;
+}
+
+#ifdef FEATURE_CAS_POLICY
+void ApplicationSecurityDescriptor::SetLegacyCasPolicyEnabled()
+{
+ STANDARD_VM_CONTRACT;
+
+ // APPX precludes the use of legacy CAS policy
+ if (!AppX::IsAppXProcess())
+ {
+ SecurityPolicy::InitPolicyConfig();
+ m_fLegacyCasPolicy = TRUE;
+ }
+}
+
+BOOL ApplicationSecurityDescriptor::IsLegacyCasPolicyEnabled()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fLegacyCasPolicy && !AppX::IsAppXProcess();
+}
+
+#endif // FEATURE_CAS_POLICY
+
+// Is it possible for the AppDomain to contain partial trust code. This method may return true even if the
+// domain does not currently have partial trust code in it - a true value simply means that it is possible
+// for partial trust code to eventually end up in the domain.
+BOOL ApplicationSecurityDescriptor::DomainMayContainPartialTrustCode()
+{
+ WRAPPER_NO_CONTRACT;
+ return !m_fHomogeneous || !IsFullyTrusted();
+}
+
+#ifdef FEATURE_APTCA
+ConditionalAptcaCache *ApplicationSecurityDescriptor::GetConditionalAptcaCache()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pConditionalAptcaCache;
+}
+
+void ApplicationSecurityDescriptor::SetCanonicalConditionalAptcaList(LPCWSTR wszCanonicalConditionalAptcaList)
+{
+ WRAPPER_NO_CONTRACT;
+ return this->GetConditionalAptcaCache()->SetCanonicalConditionalAptcaList(wszCanonicalConditionalAptcaList);
+}
+#endif // FEATURE_APTCA
+
+#endif // !DACCESS_COMPILE
+
+
diff --git a/src/vm/securitydescriptorappdomain.h b/src/vm/securitydescriptorappdomain.h
new file mode 100644
index 0000000000..d39cf0e3a9
--- /dev/null
+++ b/src/vm/securitydescriptorappdomain.h
@@ -0,0 +1,187 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#ifndef __SECURITYDESCRIPTOR_APPDOMAIN_H__
+#define __SECURITYDESCRIPTOR_APPDOMAIN_H__
+#include "security.h"
+#include "securitydescriptor.h"
+#include "securitymeta.h"
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// [SecurityDescriptor]
+// |
+// +----[PEFileSecurityDescriptor]
+// |
+// +----[ApplicationSecurityDescriptor]
+// |
+// +----[AssemblySecurityDescriptor]
+//
+// [SharedSecurityDescriptor]
+//
+///////////////////////////////////////////////////////////////////////////////
+
+//------------------------------------------------------------------
+//
+// APPDOMAIN SECURITY DESCRIPTOR
+//
+//------------------------------------------------------------------
+
+class ApplicationSecurityDescriptor : public SecurityDescriptorBase<IApplicationSecurityDescriptor>
+{
+public:
+ VPTR_VTABLE_CLASS(ApplicationSecurityDescriptor, SecurityDescriptorBase<IApplicationSecurityDescriptor>)
+
+private:
+ // Dependency in managed : System.Security.HostSecurityManager.cs
+ enum HostSecurityManagerFlags
+ {
+ // Flags to control which HostSecurityManager features are provided by the host
+ HOST_NONE = 0x0000,
+ HOST_APP_DOMAIN_EVIDENCE = 0x0001,
+ HOST_POLICY_LEVEL = 0x0002,
+ HOST_ASM_EVIDENCE = 0x0004,
+ HOST_DAT = 0x0008,
+ HOST_RESOLVE_POLICY = 0x0010
+ };
+
+#ifdef FEATURE_PLS
+ // Intersection of granted/denied permissions of all assemblies in domain
+ LOADERHANDLE m_hDomainPermissionListSet;
+#endif // FEATURE_PLS
+
+ // The bits represent the status of security checks on some specific permissions within this domain
+ Volatile<DWORD> m_dwDomainWideSpecialFlags;
+ // m_dwDomainWideSpecialFlags bit map
+ // Bit 0 = Unmanaged Code access permission. Accessed via SECURITY_UNMANAGED_CODE
+ // Bit 1 = Skip verification permission. SECURITY_SKIP_VER
+ // Bit 2 = Permission to Reflect over types. REFLECTION_TYPE_INFO
+ // Bit 3 = Permission to Assert. SECURITY_ASSERT
+ // Bit 4 = Permission to invoke methods. REFLECTION_MEMBER_ACCESS
+ // Bit 7 = PermissionSet, fulltrust SECURITY_FULL_TRUST
+ // Bit 9 = UIPermission (unrestricted)
+
+ BOOL m_fIsInitializationInProgress; // appdomain is in the initialization stage and is considered FullTrust by the security system.
+ BOOL m_fIsDefaultAppdomain; // appdomain is the default appdomain, or created by the default appdomain without an explicit evidence
+ BOOL m_fIsDefaultAppdomainEvidence; // Evidence for this AD is the same as the Default AD.
+ // m_ifIsDefaultAppDomain is TRUE => m_fIsDefaultAppdomainEvidence is TRUE
+ // m_fIsDefaultAppdomainEvidence can be TRUE when m_fIsDefaultAppdomain is FALSE if a homogeneous AD was
+ // created without evidence (non-null PermissionSet though).
+ // m_fIsDefaultAppdomainEvidence and m_fIsDefaultAppdomain are both FALSE when an explicit evidence
+ // exists on the AppDomain. (In the managed world: AppDomain._SecurityIdentity != null)
+ BOOL m_fHomogeneous; // This AppDomain has an ApplicationTrust
+ BOOL m_fRuntimeSuppliedHomogenousGrantSet; // This AppDomain is homogenous only because the v4 CLR defaults to creating homogenous domains, and would not have been homogenous in v2
+#ifdef FEATURE_CAS_POLICY
+ BOOL m_fLegacyCasPolicy; // This AppDomain is using legacy CAS policy
+#endif // FEATURE_CAS_POLICY
+ DWORD m_dwHostSecurityManagerFlags; // Flags indicating what decisions the host wants to participate in.
+ BOOL m_fContainsAnyRefusedPermissions;
+
+ BOOL m_fIsPreResolved; // Have we done a pre-resolve on this domain yet
+ BOOL m_fPreResolutionFullTrust; // Was the domain pre-resolved to be full trust
+ BOOL m_fPreResolutionHomogeneous; // Was the domain pre-resolved to be homogenous
+
+#ifdef FEATURE_APTCA
+ ConditionalAptcaCache* m_pConditionalAptcaCache; // Cache of known conditional APTCA assemblies in this domain
+#endif // FEATURE_APTCA
+
+#ifndef DACCESS_COMPILE
+public:
+ //--------------------
+ // Constructor
+ //--------------------
+ inline ApplicationSecurityDescriptor(AppDomain *pAppDomain);
+
+ //--------------------
+ // Destructor
+ //--------------------
+#ifdef FEATURE_APTCA // The destructor only deletes the ConditionalAptcaCache
+ inline ~ApplicationSecurityDescriptor();
+#endif // FEATURE_APTCA
+
+public:
+ // Indicates whether the initialization phase is in progress.
+ virtual BOOL IsInitializationInProgress();
+ inline void ResetInitializationInProgress();
+
+ // The AppDomain is considered a default one (FT) if the property is
+ // set and it's not a homogeneous AppDomain (ClickOnce case for example).
+ virtual BOOL IsDefaultAppDomain() const;
+ inline void SetDefaultAppDomain();
+
+ virtual BOOL IsDefaultAppDomainEvidence();
+ inline void SetDefaultAppDomainEvidence();
+
+ virtual VOID Resolve();
+
+ void ResolveWorker();
+
+ virtual void FinishInitialization();
+
+ virtual void PreResolve(BOOL *pfIsFullyTrusted, BOOL *pfIsHomogeneous);
+
+ virtual void SetHostSecurityManagerFlags(DWORD dwFlags);
+ virtual void SetPolicyLevelFlag();
+
+ inline void SetHomogeneousFlag(BOOL fRuntimeSuppliedHomogenousGrantSet);
+ virtual BOOL IsHomogeneous() const;
+
+#ifdef FEATURE_CAS_POLICY
+ virtual BOOL IsLegacyCasPolicyEnabled();
+ virtual void SetLegacyCasPolicyEnabled();
+#endif // FEATURE_CAS_POLICY
+
+ virtual BOOL ContainsAnyRefusedPermissions();
+
+ // Should the HSM be consulted for security decisions in this AppDomain.
+ virtual BOOL CallHostSecurityManager();
+
+#ifdef FEATURE_CAS_POLICY
+ // Does the domain's HSM need to be consulted for assemblies loaded into the domain
+ inline BOOL CallHostSecurityManagerForAssemblies();
+#endif // FEATURE_CAS_POLICY
+
+ // Initialize the PLS on the AppDomain.
+ void InitializePLS();
+
+ // Called everytime an AssemblySecurityDescriptor is resolved.
+ void AddNewSecDescToPLS(AssemblySecurityDescriptor *pNewSecDescriptor);
+
+#ifdef FEATURE_PLS
+ // Check the demand against the PLS in this AppDomain
+ BOOL CheckPLS (OBJECTREF* orDemand, DWORD dwDemandSpecialFlags, BOOL fDemandSet);
+#endif // FEATURE_PLS
+
+ // Checks for one of the special domain wide flags
+ // such as if we are currently in a "fully trusted" environment
+ // or if unmanaged code access is allowed at this time
+ inline BOOL CheckDomainWideSpecialFlag(DWORD flags) const;
+ virtual DWORD GetDomainWideSpecialFlag() const;
+
+#ifdef FEATURE_CAS_POLICY
+ virtual OBJECTREF GetEvidence();
+ DWORD GetZone();
+
+ virtual BOOL AllowsLoadsFromRemoteSources();
+#endif // FEATURE_CAS_POLICY
+
+ virtual BOOL DomainMayContainPartialTrustCode();
+
+ BOOL QuickIsFullyTrusted();
+
+#ifdef FEATURE_APTCA
+ virtual ConditionalAptcaCache *GetConditionalAptcaCache();
+ virtual void SetCanonicalConditionalAptcaList(LPCWSTR wszCanonicalConditionalAptcaList);
+#endif // FEATURE_APTCA
+#endif // #ifndef DACCESS_COMPILE
+};
+
+#include "securitydescriptorappdomain.inl"
+
+#endif // #define __SECURITYDESCRIPTOR_APPDOMAIN_H__
diff --git a/src/vm/securitydescriptorappdomain.inl b/src/vm/securitydescriptorappdomain.inl
new file mode 100644
index 0000000000..e801ccfa23
--- /dev/null
+++ b/src/vm/securitydescriptorappdomain.inl
@@ -0,0 +1,128 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+#ifndef __SECURITYDESCRIPTORAPPDOMAIN_INL__
+#define __SECURITYDESCRIPTORAPPDOMAIN_INL__
+
+#ifndef DACCESS_COMPILE
+
+inline ApplicationSecurityDescriptor::ApplicationSecurityDescriptor(AppDomain *pAppDomain) :
+ SecurityDescriptorBase<IApplicationSecurityDescriptor>(pAppDomain, NULL, NULL, pAppDomain->GetLoaderAllocator()),
+#ifdef FEATURE_PLS
+ m_hDomainPermissionListSet(NULL),
+#endif // FEAUTRE_PLS
+ m_dwDomainWideSpecialFlags(0xFFFFFFFF),
+ m_fIsInitializationInProgress(TRUE),
+ m_fIsDefaultAppdomain(FALSE),
+ m_fIsDefaultAppdomainEvidence(FALSE),
+ m_fHomogeneous(FALSE),
+ m_fRuntimeSuppliedHomogenousGrantSet(FALSE),
+#ifdef FEATURE_CAS_POLICY
+ m_fLegacyCasPolicy(Security::IsProcessWideLegacyCasPolicyEnabled()),
+#endif // FEATURE_CAS_POLICY
+ m_dwHostSecurityManagerFlags(HOST_NONE),
+ m_fContainsAnyRefusedPermissions(FALSE),
+ m_fIsPreResolved(FALSE),
+ m_fPreResolutionFullTrust(FALSE),
+ m_fPreResolutionHomogeneous(FALSE)
+#ifdef FEATURE_APTCA
+ ,m_pConditionalAptcaCache(new ConditionalAptcaCache(pAppDomain))
+#endif // FEATURE_APTCA
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ return;
+}
+
+#ifdef FEATURE_APTCA
+inline ApplicationSecurityDescriptor::~ApplicationSecurityDescriptor()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ delete m_pConditionalAptcaCache;
+}
+#endif // FEATURE_APTCA
+
+inline void ApplicationSecurityDescriptor::ResetInitializationInProgress()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_fIsInitializationInProgress = FALSE;
+}
+
+// Checks for one of the special domain wide flags such as if we are currently in a "fully trusted"
+// environment or if unmanaged code access is allowed at this time
+inline BOOL ApplicationSecurityDescriptor::CheckDomainWideSpecialFlag(DWORD flags) const
+{
+ LIMITED_METHOD_CONTRACT;
+ return (m_dwDomainWideSpecialFlags & flags);
+}
+inline void ApplicationSecurityDescriptor::SetDefaultAppDomain()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_fIsDefaultAppdomain = TRUE;
+ m_fIsDefaultAppdomainEvidence = TRUE; // Follows from the fact that this is a default AppDomain
+}
+
+inline void ApplicationSecurityDescriptor::SetDefaultAppDomainEvidence()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_fIsDefaultAppdomainEvidence = TRUE; // This need not be a default AD, but has no evidence. So we'll use the default AD evidence
+}
+
+inline void ApplicationSecurityDescriptor::SetHomogeneousFlag(BOOL fRuntimeSuppliedHomogenousGrantSet)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_fHomogeneous = TRUE;
+ m_fRuntimeSuppliedHomogenousGrantSet = fRuntimeSuppliedHomogenousGrantSet;
+}
+
+#ifdef FEATURE_CAS_POLICY
+
+// Does the domain's HSM need to be consulted for assemblies loaded into the domain
+inline BOOL ApplicationSecurityDescriptor::CallHostSecurityManagerForAssemblies()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // We always need to call the HSM if it wants to specify the assembly's grant set
+ if (m_dwHostSecurityManagerFlags & HOST_RESOLVE_POLICY)
+ {
+ return TRUE;
+ }
+
+ // In legacy CAS mode, we also need to call the HSM if it wants to supply evidence or if we have an
+ // AppDomain policy level
+ if (IsLegacyCasPolicyEnabled())
+ {
+ if ((m_dwHostSecurityManagerFlags & HOST_ASM_EVIDENCE) ||
+ (m_dwHostSecurityManagerFlags & HOST_POLICY_LEVEL))
+ {
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+#endif // FEATURE_CAS_POLICY
+
+#endif // #ifndef DACCESS_COMPILE
+
+#endif // !__SECURITYDESCRIPTORAPPDOMAIN_INL__
diff --git a/src/vm/securitydescriptorassembly.cpp b/src/vm/securitydescriptorassembly.cpp
new file mode 100644
index 0000000000..6723913af0
--- /dev/null
+++ b/src/vm/securitydescriptorassembly.cpp
@@ -0,0 +1,1094 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#include "common.h"
+#include "security.h"
+
+#ifndef DACCESS_COMPILE
+AssemblySecurityDescriptor::AssemblySecurityDescriptor(AppDomain *pDomain, DomainAssembly *pAssembly, LoaderAllocator *pLoaderAllocator) :
+ SecurityDescriptorBase<IAssemblySecurityDescriptor>(pDomain, pAssembly, pAssembly->GetFile(), pLoaderAllocator),
+ m_dwNumPassedDemands(0),
+ m_pSignature(NULL),
+ m_pSharedSecDesc(NULL),
+#ifdef FEATURE_CAS_POLICY
+ m_hRequiredPermissionSet(NULL),
+ m_hOptionalPermissionSet(NULL),
+ m_hDeniedPermissionSet(NULL),
+ m_fAdditionalEvidence(FALSE),
+ m_fIsSignatureLoaded(FALSE),
+ m_fAssemblyRequestsComputed(FALSE),
+#endif
+ m_fMicrosoftPlatform(FALSE),
+ m_fAllowSkipVerificationInFullTrust(TRUE)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ } CONTRACTL_END;
+}
+
+#ifdef FEATURE_CAS_POLICY
+OBJECTREF AssemblySecurityDescriptor::GetRequestedPermissionSet(OBJECTREF *pOptionalPermissionSet,
+ OBJECTREF *pDeniedPermissionSet)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(m_pAppDomain == GetAppDomain());
+ PRECONDITION(CheckPointer(pOptionalPermissionSet));
+ PRECONDITION(CheckPointer(pDeniedPermissionSet));
+ PRECONDITION(m_pAppDomain->GetSecurityDescriptor()->IsLegacyCasPolicyEnabled());
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ OBJECTREF req = NULL;
+ GCPROTECT_BEGIN(req);
+
+ if (!IsAssemblyRequestsComputed())
+ {
+ ReleaseHolder<IMDInternalImport> pImport (m_pAssem->GetFile()->GetMDImportWithRef());
+
+ // Try to load permission requests from assembly first.
+ SecurityAttributes::LoadPermissionRequestsFromAssembly(pImport,
+ &req,
+ pOptionalPermissionSet,
+ pDeniedPermissionSet);
+
+ SetRequestedPermissionSet(req, *pOptionalPermissionSet, *pDeniedPermissionSet);
+ }
+ else
+ {
+ *pOptionalPermissionSet = ObjectFromLazyHandle(m_hOptionalPermissionSet, m_pLoaderAllocator);
+ *pDeniedPermissionSet = ObjectFromLazyHandle(m_hDeniedPermissionSet, m_pLoaderAllocator);
+ req = ObjectFromLazyHandle(m_hRequiredPermissionSet, m_pLoaderAllocator);
+ }
+
+ GCPROTECT_END();
+ return req;
+}
+
+void AssemblySecurityDescriptor::SetRequestedPermissionSet(OBJECTREF RequiredPermissionSet,
+ OBJECTREF OptionalPermissionSet,
+ OBJECTREF DeniedPermissionSet)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ struct _gc
+ {
+ OBJECTREF RequiredPermissionSet;
+ OBJECTREF OptionalPermissionSet;
+ OBJECTREF DeniedPermissionSet;
+ } gc;
+
+ gc.RequiredPermissionSet = RequiredPermissionSet;
+ gc.OptionalPermissionSet = OptionalPermissionSet;
+ gc.DeniedPermissionSet = DeniedPermissionSet;
+
+ GCPROTECT_BEGIN(gc);
+ StoreObjectInLazyHandle(m_hRequiredPermissionSet, gc.RequiredPermissionSet, m_pLoaderAllocator);
+ StoreObjectInLazyHandle(m_hOptionalPermissionSet, gc.OptionalPermissionSet, m_pLoaderAllocator);
+ StoreObjectInLazyHandle(m_hDeniedPermissionSet, gc.DeniedPermissionSet, m_pLoaderAllocator);
+ GCPROTECT_END();
+
+ m_fAssemblyRequestsComputed = TRUE;
+}
+#endif // FEATURE_CAS_POLICY
+
+//
+// This method will return TRUE if this assembly is allowed to skip verification.
+//
+
+BOOL AssemblySecurityDescriptor::CanSkipVerification()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(IsResolved());
+ }
+ CONTRACTL_END;
+
+
+ // Assemblies loaded into the verification domain never get to skip verification
+ // unless they are coming from the GAC.
+ if (m_pAppDomain->IsVerificationDomain())
+ {
+ if (!m_pAssem->GetFile()->IsSourceGAC() && m_pAssem->IsIntrospectionOnly())
+ {
+ return FALSE;
+ }
+ }
+
+ return CheckSpecialFlag(1 << SECURITY_SKIP_VER);
+}
+
+BOOL AssemblySecurityDescriptor::AllowSkipVerificationInFullTrust()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fAllowSkipVerificationInFullTrust;
+}
+
+//
+// This method will return TRUE if this assembly has assertion permission.
+//
+
+BOOL AssemblySecurityDescriptor::CanAssert()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(IsResolved());
+ } CONTRACTL_END;
+
+ return CheckSpecialFlag(1 << SECURITY_ASSERT);
+}
+
+//
+// This method will return TRUE if this assembly has unrestricted UI permissions.
+//
+
+BOOL AssemblySecurityDescriptor::HasUnrestrictedUIPermission()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(IsResolved());
+ } CONTRACTL_END;
+
+ return CheckSpecialFlag(1 << UI_PERMISSION);
+}
+
+//
+// Assembly transparency access methods. These methods what the default transparency level are for methods
+// and types introduced by the assembly.
+//
+
+BOOL AssemblySecurityDescriptor::IsAllCritical()
+{
+ STANDARD_VM_CONTRACT;
+
+ ModuleSecurityDescriptor *pMsd = ModuleSecurityDescriptor::GetModuleSecurityDescriptor(GetAssembly());
+ return pMsd->IsAllCritical();
+}
+
+BOOL AssemblySecurityDescriptor::IsAllSafeCritical()
+{
+ STANDARD_VM_CONTRACT;
+
+ ModuleSecurityDescriptor *pMsd = ModuleSecurityDescriptor::GetModuleSecurityDescriptor(GetAssembly());
+ return pMsd->IsAllCritical() && pMsd->IsTreatAsSafe();
+}
+
+BOOL AssemblySecurityDescriptor::IsAllPublicAreaSafeCritical()
+{
+ STANDARD_VM_CONTRACT;
+
+ ModuleSecurityDescriptor *pMsd = ModuleSecurityDescriptor::GetModuleSecurityDescriptor(GetAssembly());
+
+ bool fIsPublicAreaSafeCritical = SecurityTransparencyBehavior::GetTransparencyBehavior(pMsd->GetSecurityRuleSet())->DoesPublicImplyTreatAsSafe();
+
+ return pMsd->IsAllCritical() && (pMsd->IsTreatAsSafe() || fIsPublicAreaSafeCritical);
+}
+
+BOOL AssemblySecurityDescriptor::IsAllTransparent()
+{
+ STANDARD_VM_CONTRACT;
+
+ ModuleSecurityDescriptor *pMsd = ModuleSecurityDescriptor::GetModuleSecurityDescriptor(GetAssembly());
+ return pMsd->IsAllTransparent();
+}
+
+BOOL AssemblySecurityDescriptor::QuickIsFullyTrusted()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ if (IsSystem())
+ return TRUE;
+#ifdef FEATURE_CAS_POLICY
+
+ // NGEN is always done in full trust
+ if (m_pAppDomain->IsCompilationDomain())
+ {
+ return TRUE;
+ }
+
+ // If the assembly is in the GAC then it gets FullTrust.
+ if (m_pAssem->GetFile()->IsSourceGAC())
+ return TRUE;
+
+ // quickly detect if we've got a request refused or a request optional.
+ if (m_pAppDomain->GetSecurityDescriptor()->IsLegacyCasPolicyEnabled())
+ {
+ ReleaseHolder<IMDInternalImport> pImport(m_pAssem->GetFile()->GetMDImportWithRef());
+ if (SecurityAttributes::RestrictiveRequestsInAssembly(pImport))
+ return FALSE;
+ }
+
+ // Check if we need to call the HostSecurityManager.
+ ApplicationSecurityDescriptor* pAppSecDesc = static_cast<ApplicationSecurityDescriptor*>(m_pAppDomain->GetSecurityDescriptor());
+ if (pAppSecDesc->CallHostSecurityManagerForAssemblies())
+ return FALSE;
+
+ // - If the AppDomain is homogeneous, we currently simply detect the FT case
+ // - Not having CAS on implies full trust. We can get here if we're still in the process of setting up
+ // the AppDomain and the CLR hasn't yet setup the homogenous flag.
+ // - Otherwise, check the quick cache
+ if (pAppSecDesc->IsHomogeneous())
+ {
+ return m_pAppDomain->GetSecurityDescriptor()->IsFullyTrusted();
+ }
+ else if (!m_pAppDomain->GetSecurityDescriptor()->IsLegacyCasPolicyEnabled())
+ {
+ return TRUE;
+ }
+ else if (CheckQuickCache(SecurityConfig::FullTrustAll, GetZone()))
+ {
+ return TRUE;
+ }
+#endif
+
+ // See if we've already determined that the assembly is FT
+ // in another AppDomain, in case this is a shared assembly.
+ SharedSecurityDescriptor* pSharedSecDesc = GetSharedSecDesc();
+ if (pSharedSecDesc && pSharedSecDesc->IsResolved() && pSharedSecDesc->IsFullyTrusted())
+ return TRUE;
+
+ return FALSE;
+}
+
+#ifndef DACCESS_COMPILE
+
+void AssemblySecurityDescriptor::PropagatePermissionSet(OBJECTREF GrantedPermissionSet, OBJECTREF DeniedPermissionSet, DWORD dwSpecialFlags)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // If we're propagating a permission set, then we don't want to allow an assembly to skip verificaiton in
+ // full trust. This prevents people leapfrogging from the fully trusted anonymously hosted dynamic methods
+ // assembly into running unverifiable code. (Note that we already enforce that transaprent code must only load
+ // other transparent code - so this restriction simply enforces that it is truly transparent.) It would
+ // be nicer to throw an exception in this case, however that would be a breaking change. Instead, since the
+ // SkipVerificationInFullTrust feature has always been described as a performance optimization and nothing more,
+ // we can simply turn off the optimization in these cases.
+ m_fAllowSkipVerificationInFullTrust = FALSE;
+
+ SetGrantedPermissionSet(GrantedPermissionSet, DeniedPermissionSet, dwSpecialFlags);
+
+ // make sure the shared security descriptor is updated in case this
+ // is a security descriptor for a shared assembly.
+ Resolve();
+}
+
+#ifdef FEATURE_CAS_POLICY
+//-----------------------------------------------------------------------------------------------------------
+//
+// Use the evidence already generated for this assembly's PEFile as the evidence for the assembly
+//
+// Arguments:
+// pPEFileSecDesc - PEFile security descriptor contining the already generated evidence
+//
+void AssemblySecurityDescriptor::SetEvidenceFromPEFile(IPEFileSecurityDescriptor *pPEFileSecDesc)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pPEFileSecDesc));
+ PRECONDITION(GetPEFile()->Equals(static_cast<PEFileSecurityDescriptor*>(pPEFileSecDesc)->GetPEFile()));
+ }
+ CONTRACTL_END;
+
+ // If we couldn't determine the assembly was fully trusted without first generating evidence for it,
+ // then we cannot reuse the PEFile's evidence. In that case we'll just use what we've generated for the
+ // assembly, and discard the PEFile's version.
+ if (!IsEvidenceComputed())
+ {
+ struct
+ {
+ OBJECTREF objPEFileEvidence;
+ OBJECTREF objEvidence;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.objPEFileEvidence = pPEFileSecDesc->GetEvidence();
+ gc.objEvidence = UpgradePEFileEvidenceToAssemblyEvidence(gc.objPEFileEvidence);
+ SetEvidence(gc.objEvidence);
+
+ GCPROTECT_END();
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Get the evidence collection for this Assembly
+//
+//
+OBJECTREF AssemblySecurityDescriptor::GetEvidence()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(m_pAppDomain == GetAppDomain());
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // If we already have evidence, then just return that
+ if (IsEvidenceComputed())
+ return ObjectFromLazyHandle(m_hAdditionalEvidence, m_pLoaderAllocator);
+
+ struct
+ {
+ OBJECTREF objHostProvidedEvidence;
+ OBJECTREF objPEFileEvidence;
+ OBJECTREF objEvidence;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+
+ gc.objHostProvidedEvidence = ObjectFromLazyHandle(m_hAdditionalEvidence, m_pLoaderAllocator);
+
+#if CHECK_APP_DOMAIN_LEAKS
+ if (g_pConfig->AppDomainLeaks())
+ {
+ _ASSERTE(gc.objPEFileEvidence == NULL || GetAppDomain() == gc.objPEFileEvidence->GetAppDomain());
+ _ASSERTE(gc.objHostProvidedEvidence == NULL || GetAppDomain() == gc.objHostProvidedEvidence->GetAppDomain());
+ }
+#endif // CHECK_APP_DOMAIN_LEAKS
+
+ //
+ // First get an evidence collection which targets our PEFile, then upgrade it to use this assembly as a
+ // target. We create a new Evidence for the PEFile here, which means that any evidence that PEFile may
+ // have already had is not used in this upgrade. If an existing PEFileSecurityDescriptor exists for the
+ // PEFile, then that should be upgraded directly, rather than going through this code path.
+ //
+
+ gc.objPEFileEvidence = PEFileSecurityDescriptor::BuildEvidence(m_pPEFile, gc.objHostProvidedEvidence);
+ gc.objEvidence = UpgradePEFileEvidenceToAssemblyEvidence(gc.objPEFileEvidence);
+ SetEvidence(gc.objEvidence);
+
+#if CHECK_APP_DOMAIN_LEAKS
+ if (g_pConfig->AppDomainLeaks())
+ _ASSERTE(gc.objEvidence == NULL || GetAppDomain() == gc.objEvidence->GetAppDomain());
+#endif // CHECK_APP_DOMAIN_LEAKS
+
+ END_SO_INTOLERANT_CODE;
+
+ GCPROTECT_END();
+
+ return gc.objEvidence;
+}
+#endif // FEATURE_CAS_POLICY
+#endif // !DACCESS_COMPILE
+
+BOOL AssemblySecurityDescriptor::IsSystem()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pAssem->GetFile()->IsSystem();
+}
+
+void AssemblySecurityDescriptor::Resolve()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(m_pAssem != NULL);
+ INJECT_FAULT(COMPlusThrowOM(););
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ // Always resolve the assembly security descriptor in the new AppDomain
+ if (!IsResolved())
+ ResolveWorker();
+
+ // Update the info in the shared security descriptor
+ SharedSecurityDescriptor* pSharedSecDesc = GetSharedSecDesc();
+ if (pSharedSecDesc)
+ pSharedSecDesc->Resolve(this);
+}
+
+#ifdef FEATURE_CAS_POLICY
+// This routine is called when we have determined that it that there is no SECURITY reason
+// to verify an image, but we may want to do so anyway to insure that 3rd parties don't
+// accidentally ship delay signed dlls because the application happens to be full trust.
+//
+static bool DontNeedToFlagAccidentalDelaySigning(PEAssembly* assem)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // If the file has a native image, then either it is strongly named and can be considered
+ // fully signed (see additional comments in code:PEAssembly::IsFullySigned), or it is not
+ // strong named and thus can't be delay signed. Either way no check is needed.
+ // If the file fully signed, then people did not accidentally forget, so no check is needed
+ if (assem->HasNativeImage() || assem->IsFullySigned())
+ return true;
+
+ // If mscorlib itself is not signed, this is not an offical CLR, you don't need to
+ // to do the checking in this case either because 3rd parties should not be running this way.
+ // This is useful because otherwise when we run perf runs on normal CLR lab builds we don't
+ // measure the performance that we get for a offical runtime (since official runtimes will
+ // be signed).
+ PEAssembly* mscorlib = SystemDomain::SystemFile();
+ if (!mscorlib->HasNativeImage())
+ return false;
+ if ((mscorlib->GetLoadedNative()->GetNativeHeader()->COR20Flags & COMIMAGE_FLAGS_STRONGNAMESIGNED) == 0)
+ return true;
+
+ return false;
+}
+#endif // FEATURE_CAS_POLICY
+
+void AssemblySecurityDescriptor::ResolveWorker()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+#ifdef FEATURE_CORECLR
+
+ if (NingenEnabled())
+ {
+ int dwSpecialFlags;
+ BOOL platformAssembly = FALSE;
+
+ if (IsSystem())
+ {
+ dwSpecialFlags = 0xFFFFFFFF;
+ platformAssembly = TRUE;
+ }
+ else
+ {
+ // Decide if this is a platform assembly
+ if (m_pAssem->GetFile()->IsProfileAssembly())
+ platformAssembly = TRUE;
+
+ // Decide trust level
+ if (platformAssembly)
+ {
+ dwSpecialFlags = 0xFFFFFFFF;
+ }
+ else
+ {
+ dwSpecialFlags = m_pAppDomain->GetSecurityDescriptor()->GetSpecialFlags();
+ }
+ }
+
+ SetGrantedPermissionSet(NULL, NULL, dwSpecialFlags);
+ if (platformAssembly)
+ SetMicrosoftPlatform();
+
+ return;
+ }
+
+#ifndef CROSSGEN_COMPILE
+ int dwSpecialFlags;
+ BOOL platformAssembly = FALSE;
+ BOOL trustedAssembly = FALSE;
+
+ struct _gc {
+ OBJECTREF granted; // Policy based Granted Permission
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ GCPROTECT_BEGIN(gc);
+
+ if (IsSystem())
+ {
+ // mscorlib is always FT, but we can't create permissionsets yet. So grantset for mscorlib will be NULL: we should never look at it though.
+ dwSpecialFlags = 0xFFFFFFFF;
+ platformAssembly = TRUE;
+ }
+ else
+ {
+ // Decide if this is a platform assembly
+ if (m_pAssem->GetFile()->IsProfileAssembly())
+ platformAssembly = TRUE;
+
+ // Decide trust level
+ if (platformAssembly || trustedAssembly)
+ {
+ Security::GetPermissionInstance(&gc.granted, SECURITY_FULL_TRUST);
+ dwSpecialFlags = 0xFFFFFFFF;
+ }
+ else
+ {
+ // get grant from AppDomain grant set.
+ gc.granted = m_pAppDomain->GetSecurityDescriptor()->GetGrantedPermissionSet(NULL);
+ dwSpecialFlags = m_pAppDomain->GetSecurityDescriptor()->GetSpecialFlags();
+ }
+
+ }
+ SetGrantedPermissionSet(gc.granted, NULL, dwSpecialFlags);
+ if (platformAssembly)
+ SetMicrosoftPlatform();
+
+ // Only fully trusted assemblies are allowed to be loaded when
+ // the AppDomain is in the initialization phase.
+ if (m_pAppDomain->GetSecurityDescriptor()->IsInitializationInProgress() && !IsFullyTrusted())
+ COMPlusThrow(kApplicationException, W("Policy_CannotLoadSemiTrustAssembliesDuringInit"));
+
+ GCPROTECT_END();
+#endif // CROSSGEN_COMPILE
+
+#else
+ if (CanSkipPolicyResolution() || NingenEnabled()) {
+ SetGrantedPermissionSet(NULL, NULL, 0xFFFFFFFF);
+ m_pAssem->GetFile()->SetStrongNameBypassed();
+ return;
+ }
+
+#ifndef CROSSGEN_COMPILE
+ struct _gc {
+ OBJECTREF reqdPset; // Required Requested Permissions
+ OBJECTREF optPset; // Optional Requested Permissions
+ OBJECTREF denyPset; // Denied Permissions
+ OBJECTREF evidence; // Object containing evidence
+ OBJECTREF granted; // Policy based Granted Permission
+ OBJECTREF grantdenied; // Policy based explicitly Denied Permissions
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ // Policy resolution can cause string comparisons that trigger .nlp module loads. (Specifically,
+ // FileIOPermission can trigger this). At this point mscorlib is already loaded, so we can
+ // override the load levels here to allow the .nlp module loads.
+ OVERRIDE_LOAD_LEVEL_LIMIT(FILE_ACTIVE);
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+
+ // Resolve is one of the few SecurityDescriptor routines that may be called
+ // from the wrong appdomain context. If that's the case we will transition
+ // into the correct appdomain for the duration of the call.
+
+ ENTER_DOMAIN_PTR_PREDICATED(m_pAppDomain,ADV_RUNNINGIN,!IsSystem())
+ {
+ GCPROTECT_BEGIN(gc);
+
+ //
+ // GAC assemblies with no RequestRefuse get FullTrust
+ // Also AppDomains with an AppTrust that are fully trusted are
+ // homogeneous and so every assembly that does not have a RequestRefuse
+ // will also get FullTrust.
+ //
+
+ DWORD dwSpecialFlags;
+
+ if (QuickIsFullyTrusted())
+ {
+ Security::GetPermissionInstance(&gc.granted, SECURITY_FULL_TRUST);
+ dwSpecialFlags = 0xFFFFFFFF;
+ }
+ else
+ {
+ // We couldn't quickly figure out that the assembly was fully trusted, so gather its evidence and
+ // call managed code to get the final grant set.
+ if (IsEvidenceComputed())
+ {
+ gc.evidence = ObjectFromLazyHandle(m_hAdditionalEvidence, m_pLoaderAllocator);
+ }
+ else
+ {
+ gc.evidence = GetEvidence();
+ }
+
+ if (!m_pAppDomain->GetSecurityDescriptor()->IsLegacyCasPolicyEnabled())
+ {
+ // Either we have a host security manager or a homogenous AppDomain that could make this
+ // assembly be partially trusted. Call out to managed to get the grant set.
+ gc.granted = SecurityPolicy::ResolveGrantSet(gc.evidence, &dwSpecialFlags, TRUE);
+ }
+ else
+ {
+ // Legacy CAS policy is enabled, so do a full CAS resolve
+ gc.reqdPset = GetRequestedPermissionSet(&gc.optPset, &gc.denyPset);
+ gc.granted = SecurityPolicy::ResolveCasPolicy(gc.evidence,
+ gc.reqdPset,
+ gc.optPset,
+ gc.denyPset,
+ &gc.grantdenied,
+ &dwSpecialFlags,
+ TRUE);
+ }
+ }
+
+ SetGrantedPermissionSet(gc.granted, gc.denyPset, dwSpecialFlags);
+
+#ifdef FEATURE_CAS_POLICY
+ // If we're delaying verification of the assembly's strong name, check to see if we need to force a
+ // verification at this point.
+ if (!m_pAssem->IsSystem() &&
+ !m_pAssem->GetFile()->IsSourceGAC() &&
+ !m_pAssem->GetFile()->IsStrongNameVerified())
+ {
+ //
+ // #StrongNameBypass
+ //
+ // If the application has opted into the strong name bypass feature, then we will attempt to
+ // load its assemblies without verifying their strong names. We can get away with avoiding the
+ // strong name verification in the case where all of the following apply.
+ //
+ // 1. The application has asked for strong name bypass
+ // 2. The machine administrator has not disabled strong name bypass for the machine
+ // 3. The assembly being loaded is fully trusted, and this trust is not soley based upon its
+ // strong name.
+ // 4. The AppDomain the assembly is being loaded into is fully trusted
+ // 5. The assembly is fully signed
+ // 6. The appdomain is not attempting to run ngen.
+ //
+ // Condition #3 requires that the grant set of the assembly not be obtained via the strong name
+ // evidence of the assembly. Note that this requirement does not forbid matching a code group
+ // with a strong name membership condition, as long as that code group and any children code
+ // groups produce a grant set which was less than or equal to the grant set produced by other
+ // code groups. For instance, in standard security policy:
+ //
+ // 1.1 MyComputer -> FullTrust
+ // 1.1.1 Microsoft Strong Name -> FullTrust
+ //
+ // In this case, an assembly loaded from the local machine with the Microsoft strong name is
+ // still eligable for strong name bypass via condition #3, since the MyComputer FullTrust grant
+ // set unioned with the StrongName FullTrust grant set will produce the same results as if we
+ // didn't evaluate the StrongName code group.
+ //
+ // Note that strong name bypass is not the same thing as strong name skip verification. Skip
+ // verification is a development time feature which enables developers to test privately built
+ // assemblies that do not have a full signature yet. It is not intended for use at runtime on
+ // non-developer machines, nor is it intended for use on assemblies with valid strong names.
+ //
+ // In contrast strong name bypass is intended to be used on assemblies with valid strong names
+ // that are deployed to end users machines. It's a performance feature which enables assemblies
+ // that were not gaining any benefit from having their strong name validated to avoid having to
+ // pay the expense of a full signature verification. This is why ngen does not skip. There
+ // are potential ways to use the ngen cache to skip strong name verification if ngen participates
+ // in strong name bypass, and the startup performance concern of the application is not a concern
+ // when running at ngen time.
+ //
+
+ if (IsFullyTrusted() &&
+ GetDomain()->GetSecurityDescriptor()->IsFullyTrusted() &&
+ !SecurityPolicy::WasStrongNameEvidenceUsed(gc.evidence) &&
+ DontNeedToFlagAccidentalDelaySigning(m_pAssem->GetFile()) &&
+ !IsCompilationProcess())
+ {
+ m_pAssem->GetFile()->SetStrongNameBypassed();
+ }
+ else
+ {
+ m_pAssem->GetFile()->VerifyStrongName();
+ }
+ }
+#endif // FEATURE_CAS_POLICY
+
+ GCPROTECT_END();
+ }
+ END_DOMAIN_TRANSITION;
+#endif // CROSSGEN_COMPILE
+
+#endif // FEATURE_CORECLR
+}
+
+void AssemblySecurityDescriptor::ResolvePolicy(ISharedSecurityDescriptor *pSharedSecDesc, BOOL fShouldSkipPolicyResolution)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(CheckPointer(pSharedSecDesc));
+ } CONTRACTL_END;
+
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+
+ m_pSharedSecDesc = static_cast<SharedSecurityDescriptor*>(pSharedSecDesc);
+
+ ETWOnStartup (SecurityCatchCall_V1, SecurityCatchCallEnd_V1);
+ //
+ // In V1.x, we used to check whether execution checking is enabled in caspol.exe
+ // or whether the assembly has assembly requests before resolving the assembly.
+ // This leads to several unnecessary complications in the code and the way assembly
+ // resolution is tracked throughout the lifetime of the AssemblySecurityDescriptor.
+ //
+ // In Whidbey, we will always resolve the policy eagerly while the assembly is being
+ // loaded. The perf concern is less of an issue in Whidbey as GAC assemblies are now
+ // automatically granted FullTrust.
+ //
+
+ // Push this frame around resolving the assembly for security to ensure the
+ // debugger can properly recognize any managed code that gets run
+ // as "class initializaion" code.
+ FrameWithCookie<DebuggerClassInitMarkFrame> __dcimf;
+
+ Resolve();
+
+ if (!fShouldSkipPolicyResolution)
+ {
+ // update the PLS with the grant/denied sets of the loaded assembly
+ ApplicationSecurityDescriptor* pAppDomainSecDesc = static_cast<ApplicationSecurityDescriptor*>(GetDomain()->GetSecurityDescriptor());
+ pAppDomainSecDesc->AddNewSecDescToPLS(this);
+
+ // Make sure that module transparency information is calculated so that we can verify that if the assembly
+ // is being loaded in partial trust it is transparent. This check is done in the ModuleSecurityDescriptor,
+ // so we just need to force it to calculate here.
+ ModuleSecurityDescriptor *pMSD = ModuleSecurityDescriptor::GetModuleSecurityDescriptor(GetAssembly());
+ pMSD->VerifyDataComputed();
+ _ASSERTE(IsFullyTrusted() || pMSD->IsAllTransparent());
+ }
+
+ __dcimf.Pop();
+}
+
+#ifdef FEATURE_CAS_POLICY
+DWORD AssemblySecurityDescriptor::GetZone()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(m_pAppDomain->GetSecurityDescriptor()->IsLegacyCasPolicyEnabled());
+ } CONTRACTL_END;
+
+ StackSString codebase;
+ SecZone dwZone = NoZone;
+ BYTE rbUniqueID[MAX_SIZE_SECURITY_ID];
+ DWORD cbUniqueID = sizeof(rbUniqueID);
+
+ m_pAssem->GetSecurityIdentity(codebase, &dwZone, 0, rbUniqueID, &cbUniqueID);
+ return dwZone;
+}
+#endif // FEATURE_CAS_POLICY
+
+Assembly* AssemblySecurityDescriptor::GetAssembly()
+{
+ return m_pAssem->GetAssembly();
+}
+
+BOOL AssemblySecurityDescriptor::CanSkipPolicyResolution()
+{
+ WRAPPER_NO_CONTRACT;
+ Assembly* pAssembly = GetAssembly();
+ return pAssembly && pAssembly->CanSkipPolicyResolution();
+}
+
+
+#ifdef FEATURE_CAS_POLICY
+//-----------------------------------------------------------------------------------------------------------
+//
+// Upgrade the evidence used for resolving a PEFile to be targeted at the Assembly the PEFile represents
+//
+// Arguments:
+// objPEFileEvidence -
+//
+// Notes:
+// During CLR startup we may need to resolve policy against a PEFile before we have the associated
+// Assembly. Once we have the Assembly we don't want to recompute potenially expensive evidence, so this
+// method can be used to upgrade the evidence who's target was the PEFile to target the assembly instead.
+//
+// Will call into System.Reflection.Assembly.UpgradeSecurityIdentity
+//
+
+OBJECTREF AssemblySecurityDescriptor::UpgradePEFileEvidenceToAssemblyEvidence(const OBJECTREF& objPEFileEvidence)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(objPEFileEvidence != NULL);
+ }
+ CONTRACTL_END;
+
+ struct
+ {
+ OBJECTREF objAssembly;
+ OBJECTREF objEvidence;
+ OBJECTREF objUpgradedEvidence;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.objAssembly = m_pAssem->GetExposedAssemblyObject();
+ gc.objEvidence = objPEFileEvidence;
+
+ MethodDescCallSite upgradeSecurityIdentity(METHOD__ASSEMBLY_EVIDENCE_FACTORY__UPGRADE_SECURITY_IDENTITY);
+
+ ARG_SLOT args[] =
+ {
+ ObjToArgSlot(gc.objEvidence),
+ ObjToArgSlot(gc.objAssembly)
+ };
+
+ gc.objUpgradedEvidence = upgradeSecurityIdentity.Call_RetOBJECTREF(args);
+
+ GCPROTECT_END();
+
+ return gc.objUpgradedEvidence;
+}
+
+HRESULT AssemblySecurityDescriptor::LoadSignature(COR_TRUST **ppSignature)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ if (IsSignatureLoaded())
+ {
+ if (ppSignature)
+ {
+ *ppSignature = m_pSignature;
+ }
+
+ return S_OK;
+ }
+
+ GCX_PREEMP();
+ m_pSignature = m_pAssem->GetFile()->GetAuthenticodeSignature();
+
+ SetSignatureLoaded();
+
+ if (ppSignature)
+ {
+ *ppSignature = m_pSignature;
+ }
+
+ return S_OK;
+}
+
+void AssemblySecurityDescriptor::SetAdditionalEvidence(OBJECTREF evidence)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ StoreObjectInLazyHandle(m_hAdditionalEvidence, evidence, m_pLoaderAllocator);
+ m_fAdditionalEvidence = TRUE;
+}
+
+BOOL AssemblySecurityDescriptor::HasAdditionalEvidence()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fAdditionalEvidence;
+}
+
+OBJECTREF AssemblySecurityDescriptor::GetAdditionalEvidence()
+{
+ WRAPPER_NO_CONTRACT;
+ return ObjectFromLazyHandle(m_hAdditionalEvidence, m_pLoaderAllocator);
+}
+#endif // FEATURE_CAS_POLICY
+
+#ifndef FEATURE_CORECLR
+BOOL AssemblySecurityDescriptor::AllowApplicationSpecifiedAppDomainManager()
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Only fully trusted assemblies are allowed to specify their AppDomainManager in a config file
+ return this->IsFullyTrusted();
+}
+#endif // FEATURE_CORECLR
+
+// Check to make sure that security will allow this assembly to load. Throw an exception if the assembly
+// should be forbidden from loading for security related purposes
+void AssemblySecurityDescriptor::CheckAllowAssemblyLoad()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (m_pAssem->IsSystem())
+ {
+ return;
+ }
+
+ // If we're running PEVerify, then we need to allow the assembly to load in to be verified
+ if (m_pAppDomain->IsVerificationDomain())
+ {
+ return;
+ }
+
+ // Similarly, in the NGEN domain we don't want to force policy resolution, and we want
+ // to allow all assemblies to load
+ if (m_pAppDomain->IsCompilationDomain())
+ {
+ return;
+ }
+
+ // Reflection only loads are also always allowed
+ if (m_pAssem->IsIntrospectionOnly())
+ {
+ return;
+ }
+
+ if (!IsResolved())
+ {
+ GCX_COOP();
+ Resolve();
+ }
+
+ if (!IsFullyTrusted() && (!m_pAppDomain->IsCompilationDomain() || !NingenEnabled()))
+ {
+ // Only fully trusted assemblies are allowed to be loaded when
+ // the AppDomain is in the initialization phase.
+ if (m_pAppDomain->GetSecurityDescriptor()->IsInitializationInProgress())
+ {
+ COMPlusThrow(kApplicationException, W("Policy_CannotLoadSemiTrustAssembliesDuringInit"));
+ }
+
+#ifdef FEATURE_COMINTEROP
+ // WinRT is not supported in partial trust, so block it by throwing if a partially trusted winmd is loaded
+ if (IsAfContentType_WindowsRuntime(m_pAssem->GetFile()->GetFlags()))
+ {
+ COMPlusThrow(kNotSupportedException, W("NotSupported_WinRT_PartialTrust"));
+ }
+#endif // FEATURE_COMINTEROP
+ }
+}
+
+SharedSecurityDescriptor::SharedSecurityDescriptor(Assembly *pAssembly) :
+ m_pAssembly(pAssembly),
+ m_fResolved(FALSE),
+ m_fFullyTrusted(FALSE),
+ m_fCanCallUnmanagedCode(FALSE),
+ m_fCanAssert(FALSE),
+ m_fMicrosoftPlatform(FALSE)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+void SharedSecurityDescriptor::Resolve(IAssemblySecurityDescriptor *pSecDesc)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(pSecDesc->IsResolved());
+ }
+ CONTRACTL_END;
+
+ if (!m_fResolved)
+ {
+ m_fFullyTrusted = pSecDesc->IsFullyTrusted();
+ m_fCanCallUnmanagedCode = pSecDesc->CanCallUnmanagedCode();
+ m_fCanAssert = pSecDesc->CanAssert();
+#ifdef FEATURE_CORECLR
+ m_fMicrosoftPlatform = static_cast<AssemblySecurityDescriptor*>(pSecDesc)->IsMicrosoftPlatform();
+#endif // FEATURE_CORECLR
+
+ m_fResolved = TRUE;
+ }
+
+ _ASSERTE(!!m_fFullyTrusted == !!pSecDesc->IsFullyTrusted());
+ _ASSERTE(!!m_fCanCallUnmanagedCode == !!pSecDesc->CanCallUnmanagedCode());
+ _ASSERTE(!!m_fCanAssert == !!pSecDesc->CanAssert());
+}
+
+BOOL SharedSecurityDescriptor::IsFullyTrusted()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(IsResolved());
+ } CONTRACTL_END;
+
+ return m_fFullyTrusted;
+}
+
+BOOL SharedSecurityDescriptor::CanCallUnmanagedCode() const
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(IsResolved());
+ } CONTRACTL_END;
+
+ return m_fCanCallUnmanagedCode;
+}
+
+BOOL SharedSecurityDescriptor::IsResolved() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fResolved;
+}
+
+BOOL SharedSecurityDescriptor::CanAssert()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(IsResolved());
+ } CONTRACTL_END;
+
+ return m_fCanAssert;
+}
+
+BOOL SharedSecurityDescriptor::IsSystem()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pAssembly->IsSystem();
+}
+
+Assembly* SharedSecurityDescriptor::GetAssembly()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pAssembly;
+}
+
+SharedSecurityDescriptor *AssemblySecurityDescriptor::GetSharedSecDesc()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pSharedSecDesc;
+}
+#endif // #ifndef DACCESS_COMPILE
+
+
diff --git a/src/vm/securitydescriptorassembly.h b/src/vm/securitydescriptorassembly.h
new file mode 100644
index 0000000000..306ac24517
--- /dev/null
+++ b/src/vm/securitydescriptorassembly.h
@@ -0,0 +1,204 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#ifndef __SECURITYDESCRIPTOR_ASSEMBLY_H__
+#define __SECURITYDESCRIPTOR_ASSEMBLY_H__
+
+#include "security.h"
+#include "securitydescriptor.h"
+struct AssemblyLoadSecurity;
+
+class Assembly;
+class DomainAssembly;
+
+// Security flags for the objects that store security information
+#define CORSEC_ASSERTED 0x000020 // Asseted permission set present on frame
+#define CORSEC_DENIED 0x000040 // Denied permission set present on frame
+#define CORSEC_REDUCED 0x000080 // Reduced permission set present on frame
+
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// [SecurityDescriptor]
+// |
+// +----[PEFileSecurityDescriptor]
+// |
+// +----[ApplicationSecurityDescriptor]
+// |
+// +----[AssemblySecurityDescriptor]
+//
+// [SharedSecurityDescriptor]
+//
+///////////////////////////////////////////////////////////////////////////////
+//
+// A Security Descriptor is placed on AppDomain and Assembly (Unmanged) objects.
+// AppDomain and Assembly could be from different zones.
+// Security Descriptor could also be placed on a native frame.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#define MAX_PASSED_DEMANDS 10
+
+//------------------------------------------------------------------
+//
+// ASSEMBLY SECURITY DESCRIPTOR
+//
+//------------------------------------------------------------------
+
+#ifndef DACCESS_COMPILE
+void StoreObjectInLazyHandle(LOADERHANDLE& handle, OBJECTREF ref, LoaderAllocator* la);
+#endif
+class AssemblySecurityDescriptor : public SecurityDescriptorBase<IAssemblySecurityDescriptor>
+{
+public:
+ VPTR_VTABLE_CLASS(AssemblySecurityDescriptor, SecurityDescriptorBase<IAssemblySecurityDescriptor>)
+
+private:
+ PsetCacheEntry* m_arrPassedLinktimeDemands[MAX_PASSED_DEMANDS];
+ DWORD m_dwNumPassedDemands;
+
+ COR_TRUST *m_pSignature; // Contains the publisher, requested permission
+ SharedSecurityDescriptor *m_pSharedSecDesc; // Shared state for assemblies loaded into multiple appdomains
+
+#ifdef FEATURE_CAS_POLICY
+ LOADERHANDLE m_hRequiredPermissionSet; // Required Requested Permissions
+ LOADERHANDLE m_hOptionalPermissionSet; // Optional Requested Permissions
+ LOADERHANDLE m_hDeniedPermissionSet; // Denied Permissions
+
+ BOOL m_fAdditionalEvidence;
+ BOOL m_fIsSignatureLoaded;
+ BOOL m_fAssemblyRequestsComputed;
+#endif // FEATURE_CAS_POLICY
+
+ BOOL m_fMicrosoftPlatform;
+ BOOL m_fAllowSkipVerificationInFullTrust;
+
+#ifndef DACCESS_COMPILE
+public:
+ virtual SharedSecurityDescriptor *GetSharedSecDesc();
+
+ virtual BOOL CanAssert();
+ virtual BOOL HasUnrestrictedUIPermission();
+ virtual BOOL IsAllCritical();
+ virtual BOOL IsAllSafeCritical();
+ virtual BOOL IsAllPublicAreaSafeCritical();
+ virtual BOOL IsAllTransparent();
+ virtual BOOL IsSystem();
+ BOOL QuickIsFullyTrusted();
+
+ BOOL CanSkipVerification();
+ virtual BOOL AllowSkipVerificationInFullTrust();
+
+ virtual VOID Resolve();
+
+ virtual void ResolvePolicy(ISharedSecurityDescriptor *pSharedDesc, BOOL fShouldSkipPolicyResolution);
+
+ AssemblySecurityDescriptor(AppDomain *pDomain, DomainAssembly *pAssembly, LoaderAllocator *pLoaderAllocator);
+
+ inline BOOL AlreadyPassedDemand(PsetCacheEntry *pCasDemands);
+ inline void TryCachePassedDemand(PsetCacheEntry *pCasDemands);
+ Assembly* GetAssembly();
+
+#ifndef DACCESS_COMPILE
+ virtual void PropagatePermissionSet(OBJECTREF GrantedPermissionSet, OBJECTREF DeniedPermissionSet, DWORD dwSpecialFlags);
+#endif // !DACCESS_COMPILE
+
+#ifdef FEATURE_CAS_POLICY
+ virtual HRESULT LoadSignature(COR_TRUST **ppSignature = NULL);
+ virtual OBJECTREF GetEvidence();
+ DWORD GetZone();
+
+ OBJECTREF GetRequestedPermissionSet(OBJECTREF *pOptionalPermissionSet, OBJECTREF *pDeniedPermissionSet);
+
+ virtual void SetRequestedPermissionSet(OBJECTREF RequiredPermissionSet,
+ OBJECTREF OptionalPermissionSet,
+ OBJECTREF DeniedPermissionSet);
+
+#ifndef DACCESS_COMPILE
+ virtual void SetAdditionalEvidence(OBJECTREF evidence);
+ virtual BOOL HasAdditionalEvidence();
+ virtual OBJECTREF GetAdditionalEvidence();
+ virtual void SetEvidenceFromPEFile(IPEFileSecurityDescriptor *pPEFileSecDesc);
+#endif // !DACCESS_COMPILE
+#endif // FEATURE_CAS_POLICY
+
+#ifndef FEATURE_CORECLR
+ virtual BOOL AllowApplicationSpecifiedAppDomainManager();
+#endif // !FEATURE_CORECLR
+
+ virtual void CheckAllowAssemblyLoad();
+
+#ifdef FEATURE_CORECLR
+ inline BOOL IsMicrosoftPlatform();
+#endif // FEATURE_CORECLR
+
+private:
+ BOOL CanSkipPolicyResolution();
+ OBJECTREF UpgradePEFileEvidenceToAssemblyEvidence(const OBJECTREF& objPEFileEvidence);
+
+ void ResolveWorker();
+
+#ifdef FEATURE_CAS_POLICY
+ inline BOOL IsAssemblyRequestsComputed();
+ inline BOOL IsSignatureLoaded();
+ inline void SetSignatureLoaded();
+#endif
+
+#ifdef FEATURE_APTCA
+ // If you think you need to call this method, you're probably wrong. We shouldn't be making any
+ // security enforcement decisions based upon this result -- it's strictly for ensuring that we load
+ // conditional APTCA assemblies correctly.
+ inline BOOL IsConditionalAptca();
+#endif // FEATURE_APTCA
+
+#ifdef FEATURE_CORECLR
+ inline void SetMicrosoftPlatform();
+#endif // FEAUTRE_CORECLR
+#endif // #ifndef DACCESS_COMPILE
+};
+
+
+// This really isn't in the SecurityDescriptor hierarchy, per-se. It's attached
+// to the unmanaged assembly object and used to store common information when
+// the assembly is shared across multiple appdomains.
+class SharedSecurityDescriptor : public ISharedSecurityDescriptor
+{
+private:
+ // Unmanaged assembly this descriptor is attached to.
+ Assembly *m_pAssembly;
+
+ // All policy resolution is funnelled through the shared descriptor so we
+ // can guarantee everyone's using the same grant/denied sets.
+ BOOL m_fResolved;
+ BOOL m_fFullyTrusted;
+ BOOL m_fCanCallUnmanagedCode;
+ BOOL m_fCanAssert;
+ BOOL m_fMicrosoftPlatform;
+
+public:
+ SharedSecurityDescriptor(Assembly *pAssembly);
+
+ // All policy resolution is funnelled through the shared descriptor so we
+ // can guarantee everyone's using the same grant/denied sets.
+ virtual void Resolve(IAssemblySecurityDescriptor *pSecDesc = NULL);
+ virtual BOOL IsResolved() const;
+
+ // Is this assembly a system assembly?
+ virtual BOOL IsSystem();
+ virtual Assembly* GetAssembly();
+
+ inline BOOL IsMicrosoftPlatform();
+ BOOL IsFullyTrusted();
+ BOOL CanCallUnmanagedCode() const;
+ BOOL CanAssert();
+};
+
+#include "securitydescriptorassembly.inl"
+
+#endif // #define __SECURITYDESCRIPTOR_ASSEMBLY_H__
diff --git a/src/vm/securitydescriptorassembly.inl b/src/vm/securitydescriptorassembly.inl
new file mode 100644
index 0000000000..bb6162d6f3
--- /dev/null
+++ b/src/vm/securitydescriptorassembly.inl
@@ -0,0 +1,117 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+#ifndef __SECURITYDESCRIPTOR_ASSEMBLY_INL__
+#define __SECURITYDESCRIPTOR_ASSEMBLY_INL__
+
+#ifndef DACCESS_COMPILE
+
+inline BOOL AssemblySecurityDescriptor::AlreadyPassedDemand(PsetCacheEntry *pCasDemands)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ BOOL result = false;
+ for (UINT index = 0; index < m_dwNumPassedDemands; index++)
+ {
+ if (m_arrPassedLinktimeDemands[index] == pCasDemands)
+ {
+ result = true;
+ break;
+ }
+ }
+
+ return result;
+}
+
+inline void AssemblySecurityDescriptor::TryCachePassedDemand(PsetCacheEntry *pCasDemands)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_dwNumPassedDemands <= (MAX_PASSED_DEMANDS - 1))
+ m_arrPassedLinktimeDemands[m_dwNumPassedDemands++] = pCasDemands;
+}
+
+#ifdef FEATURE_CAS_POLICY
+
+inline BOOL AssemblySecurityDescriptor::IsAssemblyRequestsComputed()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fAssemblyRequestsComputed;
+}
+
+inline BOOL AssemblySecurityDescriptor::IsSignatureLoaded()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fIsSignatureLoaded;
+}
+
+inline void AssemblySecurityDescriptor::SetSignatureLoaded()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_fIsSignatureLoaded = TRUE;
+}
+
+#endif // FEATURE_CAS_POLICY
+
+#ifdef FEATURE_CORECLR
+
+inline BOOL AssemblySecurityDescriptor::IsMicrosoftPlatform()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fMicrosoftPlatform;
+}
+
+inline void AssemblySecurityDescriptor::SetMicrosoftPlatform()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_fMicrosoftPlatform = TRUE;
+}
+
+#endif // FEATURE_CORECLR
+
+#ifdef FEATURE_APTCA
+
+inline BOOL AssemblySecurityDescriptor::IsConditionalAptca()
+{
+ WRAPPER_NO_CONTRACT;
+ ModuleSecurityDescriptor *pMSD = ModuleSecurityDescriptor::GetModuleSecurityDescriptor(m_pAssem->GetAssembly());
+ return (pMSD->GetTokenFlags() & TokenSecurityDescriptorFlags_ConditionalAPTCA) == TokenSecurityDescriptorFlags_ConditionalAPTCA;
+}
+
+#endif // FEATURE_APTCA
+
+#endif // !DACCESS_COMPILE
+
+inline BOOL SharedSecurityDescriptor::IsMicrosoftPlatform()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fMicrosoftPlatform;
+}
+
+inline AssemblyLoadSecurity::AssemblyLoadSecurity() :
+ m_pEvidence(NULL),
+ m_pAdditionalEvidence(NULL),
+ m_pGrantSet(NULL),
+ m_pRefusedSet(NULL),
+ m_dwSpecialFlags(0),
+ m_fCheckLoadFromRemoteSource(false),
+ m_fSuppressSecurityChecks(false),
+ m_fPropagatingAnonymouslyHostedDynamicMethodGrant(false)
+{
+ LIMITED_METHOD_CONTRACT;
+ return;
+}
+
+// Should the assembly have policy resolved on it, or should it use a pre-determined grant set
+inline bool AssemblyLoadSecurity::ShouldResolvePolicy()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pGrantSet == NULL;
+}
+
+#endif // #define __SECURITYDESCRIPTOR_ASSEMBLY_INL__
diff --git a/src/vm/securityhostprotection.cpp b/src/vm/securityhostprotection.cpp
new file mode 100644
index 0000000000..fc3f53147b
--- /dev/null
+++ b/src/vm/securityhostprotection.cpp
@@ -0,0 +1,103 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#include "common.h"
+#include "securityattributes.h"
+#include "security.h"
+#include "eeconfig.h"
+#include "corhost.h"
+
+CorHostProtectionManager::CorHostProtectionManager()
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }CONTRACTL_END;
+
+ m_eProtectedCategories = eNoChecks;
+ m_fEagerSerializeGrantSet = false;
+ m_fFrozen = false;
+}
+
+HRESULT CorHostProtectionManager::QueryInterface(REFIID id, void **pInterface)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (id == IID_ICLRHostProtectionManager)
+ {
+ *pInterface = GetHostProtectionManager();
+ return S_OK;
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ return E_NOINTERFACE;
+}
+
+ULONG CorHostProtectionManager::AddRef()
+{
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+}
+
+ULONG CorHostProtectionManager::Release()
+{
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+}
+
+void CorHostProtectionManager::Freeze()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_fFrozen = true;
+}
+
+HRESULT CorHostProtectionManager::SetProtectedCategories(EApiCategories eProtectedCategories)
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ if(m_fFrozen)
+ return E_FAIL;
+ if((eProtectedCategories | eAll) != eAll)
+ return E_FAIL;
+ m_eProtectedCategories = eProtectedCategories;
+ return S_OK;
+}
+
+EApiCategories CorHostProtectionManager::GetProtectedCategories()
+{
+ WRAPPER_NO_CONTRACT;
+
+ Freeze();
+ return m_eProtectedCategories;
+}
+
+bool CorHostProtectionManager::GetEagerSerializeGrantSets() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // To provide more context about this flag in the hosting API, this is the case where,
+ // during the unload of an appdomain, we need to serialize a grant set for a shared assembly
+ // that has resolved policy in order to maintain the invariant that the same assembly loaded
+ // into another appdomain created in the future will be granted the same permissions
+ // (since the current policy is potentially burned into the jitted code of the shared assembly already).
+
+ return m_fEagerSerializeGrantSet;
+}
+
+HRESULT CorHostProtectionManager::SetEagerSerializeGrantSets()
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ m_fEagerSerializeGrantSet = true;
+ return S_OK;
+}
diff --git a/src/vm/securityhostprotection.h b/src/vm/securityhostprotection.h
new file mode 100644
index 0000000000..72b65c3ae1
--- /dev/null
+++ b/src/vm/securityhostprotection.h
@@ -0,0 +1,15 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#ifndef __SECURITYHOSTPROTECTION_H__
+#define __SECURITYHOSTPROTECTION_H__
+
+class CorHostProtectionManager;
+
+#endif // __SECURITYHOSTPROTECTION_H__
diff --git a/src/vm/securityimperative.cpp b/src/vm/securityimperative.cpp
new file mode 100644
index 0000000000..a59e44a1c4
--- /dev/null
+++ b/src/vm/securityimperative.cpp
@@ -0,0 +1,120 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+
+//
+
+#include "common.h"
+#include "security.h"
+
+//-----------------------------------------------------------+
+// P R I V A T E H E L P E R S
+//-----------------------------------------------------------+
+
+LPVOID GetSecurityObjectForFrameInternal(StackCrawlMark *stackMark, INT32 create, OBJECTREF *pRefSecDesc)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ // This is a package protected method. Assumes correct usage.
+
+ Thread *pThread = GetThread();
+ AppDomain * pAppDomain = pThread->GetDomain();
+
+ if (pRefSecDesc == NULL)
+ {
+ if (!Security::SkipAndFindFunctionInfo(stackMark, NULL, &pRefSecDesc, &pAppDomain))
+ return NULL;
+ }
+
+ if (pRefSecDesc == NULL)
+ return NULL;
+
+ // Is security object frame in a different context?
+ bool fSwitchContext;
+ fSwitchContext = pAppDomain != pThread->GetDomain();
+
+ if (create && *pRefSecDesc == NULL)
+ {
+ // If necessary, shift to correct context to allocate security object.
+ _ASSERTE(pAppDomain == GetAppDomain());
+ MethodTable* pMethFrameSecDesc = MscorlibBinder::GetClass(CLASS__FRAME_SECURITY_DESCRIPTOR);
+ *pRefSecDesc = AllocateObject(pMethFrameSecDesc);
+ }
+
+ // If we found or created a security object in a different context, make a
+ // copy in the current context.
+ LPVOID rv;
+ if (fSwitchContext && *pRefSecDesc != NULL)
+ *((OBJECTREF*)&rv) = AppDomainHelper::CrossContextCopyFrom(pAppDomain, pRefSecDesc);
+ else
+ *((OBJECTREF*)&rv) = *pRefSecDesc;
+
+ return rv;
+}
+
+FCIMPL2(Object*, SecurityRuntime::GetSecurityObjectForFrame, StackCrawlMark* stackMark, CLR_BOOL create)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF refRetVal = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ refRetVal = ObjectToOBJECTREF((Object*)GetSecurityObjectForFrameInternal(stackMark, create, NULL));
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(refRetVal);
+}
+FCIMPLEND
+
+void SecurityRuntime::CheckBeforeAllocConsole(AppDomain* pDomain, Assembly* pAssembly)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ // Tell the debugger not to start on any managed code that we call in this method
+ FrameWithCookie<DebuggerSecurityCodeMarkFrame> __dbgSecFrame;
+
+ // Check that the assembly is granted unrestricted UIPermission
+ IAssemblySecurityDescriptor* pSecDesc = pAssembly->GetSecurityDescriptor(pDomain);
+ _ASSERTE(pSecDesc != NULL);
+ if (!pSecDesc->HasUnrestrictedUIPermission())
+ {
+ struct _gc {
+ OBJECTREF orDemand;
+ OBJECTREF orRefused;
+ OBJECTREF orGranted;
+ } gc;
+ ZeroMemory(&gc, sizeof(_gc));
+ GCPROTECT_BEGIN(gc);
+ {
+ // Get the necessary managed objects
+ gc.orGranted = pSecDesc->GetGrantedPermissionSet(&gc.orRefused);
+ SecurityDeclarative::_GetSharedPermissionInstance(&gc.orDemand, UI_PERMISSION);
+
+ // Check that the assembly is granted the necessary permission
+ SecurityStackWalk sw(SSWT_DEMAND_FROM_NATIVE, NULL);
+ sw.m_objects.SetObjects(gc.orDemand, NULL);
+ sw.CheckPermissionAgainstGrants(NULL, gc.orGranted, gc.orRefused, pDomain, NULL, pAssembly);
+ }
+ GCPROTECT_END();
+ }
+
+ // Now do a demand against everything on the stack for unrestricted UIPermission
+ Security::SpecialDemand(SSWT_DEMAND_FROM_NATIVE, UI_PERMISSION);
+
+ // Pop the debugger frame
+ __dbgSecFrame.Pop();
+}
+
+
diff --git a/src/vm/securityimperative.h b/src/vm/securityimperative.h
new file mode 100644
index 0000000000..4ff2a45378
--- /dev/null
+++ b/src/vm/securityimperative.h
@@ -0,0 +1,37 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+
+//
+
+
+#ifndef __ComSecurityRuntime_h__
+#define __ComSecurityRuntime_h__
+
+#include "common.h"
+
+#include "object.h"
+#include "util.hpp"
+
+// Forward declarations to avoid pulling in too many headers.
+class Frame;
+enum StackWalkAction;
+
+//-----------------------------------------------------------
+// The SecurityRuntime implements all the native methods
+// for the managed class System.Security.SecurityRuntime
+//-----------------------------------------------------------
+namespace SecurityRuntime
+{
+//public:
+ // private helper for getting a security object
+ FCDECL2(Object*, GetSecurityObjectForFrame, StackCrawlMark* stackMark, CLR_BOOL create);
+//protected:
+ void CheckBeforeAllocConsole(AppDomain* pDomain, Assembly* pAssembly);
+};
+
+#endif /* __ComSecurityRuntime_h__ */
+
diff --git a/src/vm/securitymeta.cpp b/src/vm/securitymeta.cpp
new file mode 100644
index 0000000000..1d50c76a76
--- /dev/null
+++ b/src/vm/securitymeta.cpp
@@ -0,0 +1,2356 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//--------------------------------------------------------------------------
+// securitymeta.cpp
+//
+//pre-computes security meta information, from declarative and run-time information
+//
+
+
+//
+//--------------------------------------------------------------------------
+
+
+
+#include "common.h"
+
+#include "object.h"
+#include "excep.h"
+#include "vars.hpp"
+#include "security.h"
+
+#include "perfcounters.h"
+#include "nlstable.h"
+#include "frames.h"
+#include "dllimport.h"
+#include "strongname.h"
+#include "eeconfig.h"
+#include "field.h"
+#include "threads.h"
+#include "eventtrace.h"
+#ifdef FEATURE_REMOTING
+#include "appdomainhelper.h"
+#include "objectclone.h"
+#endif //FEATURE_REMOTING
+#include "typestring.h"
+#include "stackcompressor.h"
+#include "securitydeclarative.h"
+#include "customattribute.h"
+#include "../md/compiler/custattr.h"
+
+#include "securitymeta.h"
+#include "caparser.h"
+
+void FieldSecurityDescriptor::VerifyDataComputed()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (m_flags & FieldSecurityDescriptorFlags_IsComputed)
+ {
+ return;
+ }
+
+#ifndef FEATURE_CORECLR
+ FieldSecurityDescriptorTransparencyEtwEvents etw(this);
+#endif // !FEATURE_CORECLR
+
+#ifdef _DEBUG
+ // If we've setup a breakpoint when we compute the transparency of this field, then stop in the debugger
+ // now.
+ static ConfigMethodSet fieldTransparencyBreak;
+ fieldTransparencyBreak.ensureInit(CLRConfig::INTERNAL_Security_TransparencyFieldBreak);
+ if (fieldTransparencyBreak.contains(m_pFD->GetName(), m_pFD->GetApproxEnclosingMethodTable()->GetDebugClassName(), NULL))
+ {
+ DebugBreak();
+ }
+#endif // _DEBUG
+
+ FieldSecurityDescriptorFlags fieldFlags = FieldSecurityDescriptorFlags_None;
+
+ // check to see if the class has the critical attribute
+ MethodTable* pMT = m_pFD->GetApproxEnclosingMethodTable();
+ TypeSecurityDescriptor typeSecDesc(pMT);
+
+ const SecurityTransparencyBehavior *pTransparencyBehavior = m_pFD->GetModule()->GetAssembly()->GetSecurityTransparencyBehavior();
+ _ASSERTE(pTransparencyBehavior);
+
+ TokenSecurityDescriptor tokenSecDesc(m_pFD->GetModule(), m_pFD->GetMemberDef());
+
+ // If the containing type is all transparent or all critical / safe critical, then the field must also be
+ // transparent or critical / safe critical. If the type is mixed, then we need to look at the field's
+ // token first to see what its transparency level is
+ if (typeSecDesc.IsAllTransparent())
+ {
+ fieldFlags = FieldSecurityDescriptorFlags_None;
+ }
+ else if (typeSecDesc.IsOpportunisticallyCritical())
+ {
+ // Field opportunistically critical rules:
+ // Level 1 -> safe critical
+ // Level 2 -> critical
+ // If the containing type is participating in type equivalence -> transparent
+
+ if (!typeSecDesc.IsTypeEquivalent())
+ {
+ fieldFlags |= FieldSecurityDescriptorFlags_IsCritical;
+
+ if (typeSecDesc.IsTreatAsSafe() || pTransparencyBehavior->DoesOpportunisticRequireOnlySafeCriticalMethods())
+ {
+ fieldFlags |= FieldSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+ }
+ }
+ else if (typeSecDesc.IsAllCritical())
+ {
+ fieldFlags |= FieldSecurityDescriptorFlags_IsCritical;
+
+ if (typeSecDesc.IsTreatAsSafe())
+ {
+ fieldFlags |= FieldSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+ else if (pTransparencyBehavior->CanIntroducedCriticalMembersAddTreatAsSafe() &&
+ (tokenSecDesc.GetMetadataFlags() & (TokenSecurityDescriptorFlags_TreatAsSafe | TokenSecurityDescriptorFlags_SafeCritical)))
+ {
+ // If the transparency model allows members introduced into a critical scope to add their own
+ // TreatAsSafe attributes, then we need to look for a token level TreatAsSafe as well.
+ fieldFlags |= FieldSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+ }
+ else
+ {
+ fieldFlags |= pTransparencyBehavior->MapFieldAttributes(tokenSecDesc.GetMetadataFlags());
+ }
+
+ // TreatAsSafe from the type we're contained in always propigates to its fields
+ if ((fieldFlags & FieldSecurityDescriptorFlags_IsCritical) &&
+ typeSecDesc.IsTreatAsSafe())
+ {
+ fieldFlags |= FieldSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+
+ // If the field is public and critical, it may additionally need to be marked treat as safe
+ if (pTransparencyBehavior->DoesPublicImplyTreatAsSafe() &&
+ typeSecDesc.IsTypeExternallyVisibleForTransparency() &&
+ (m_pFD->IsPublic() || m_pFD->IsProtected() || IsFdFamORAssem(m_pFD->GetFieldProtection())) &&
+ (fieldFlags & FieldSecurityDescriptorFlags_IsCritical) &&
+ !(fieldFlags & FieldSecurityDescriptorFlags_IsTreatAsSafe))
+ {
+ fieldFlags |= FieldSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+
+ // mark computed
+ FastInterlockOr(reinterpret_cast<DWORD *>(&m_flags), fieldFlags | FieldSecurityDescriptorFlags_IsComputed);
+}
+
+
+// All callers to his method will pass in a valid memory location for pMethodSecurityDesc which they are responsible for
+// free-ing when done using it. Typically this will be a stack location for perf reasons.
+//
+// Some details about when we cache MethodSecurityDescriptors and how the linkdemand process works:
+// - When we perform the LinkTimeCheck, we follow this order of checks
+// : APTCA check
+// : Class-level declarative security using TypeSecurityDescriptor
+// : Method-level declarative security using MethodSecurityDescriptor
+// : Unmanaged-code check (if required)
+//
+// For APTCA and Unmanaged code checks, we don't have a permissionset entry in the hashtable that we use when performing the demand. Since
+// these are well-known demands, we special-case them. What this means is that we may have a MethodSecurityDescriptor that requires a linktime check
+// but does not have DeclActionInfo or TokenDeclActionInfo fields inside.
+//
+// For cases where the Type causes the Link/Inheritance demand, the MethodDesc has the flag set, but the MethodSecurityDescriptor will not have any
+// DeclActionInfo or TokenDeclActionInfo.
+//
+// And the relevance all this has to this method is the following: Don't automatically insert a MethodSecurityDescriptor into the hash table if it has
+// linktime or inheritance time check. Only do so if either of the DeclActionInfo or TokenDeclActionInfo fields are non-NULL.
+void MethodSecurityDescriptor::LookupOrCreateMethodSecurityDescriptor(MethodSecurityDescriptor* ret_methSecDesc)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(ret_methSecDesc));
+ } CONTRACTL_END;
+
+ _ASSERTE(CanMethodSecurityDescriptorBeCached(ret_methSecDesc->m_pMD));
+
+ MethodSecurityDescriptor* pMethodSecurityDesc = (MethodSecurityDescriptor*)TokenSecurityDescriptor::LookupSecurityDescriptor(ret_methSecDesc->m_pMD);
+ if (pMethodSecurityDesc == NULL)
+ {
+ ret_methSecDesc->VerifyDataComputedInternal();// compute all the data that is needed.
+
+ // cache method security desc using some simple heuristics
+ // we have some token actions computed, let us cache this method security desc
+
+ if (ret_methSecDesc->GetRuntimeDeclActionInfo() != NULL ||
+ ret_methSecDesc->GetTokenDeclActionInfo() != NULL ||
+ // NGEN accesses MethodSecurityDescriptors frequently to check for security callouts
+ IsCompilationProcess())
+ {
+
+ // Need to insert this methodSecDesc
+ LPVOID pMem = GetAppDomain()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(MethodSecurityDescriptor)));
+
+ // allocate a method security descriptor, using the appdomain heap memory
+ pMethodSecurityDesc = new (pMem) MethodSecurityDescriptor(ret_methSecDesc->m_pMD);
+
+ *pMethodSecurityDesc = *ret_methSecDesc; // copy over the fields
+
+ MethodSecurityDescriptor* pExistingMethodSecurityDesc = NULL;
+ // insert pMethodSecurityDesc into our hash table
+ pExistingMethodSecurityDesc = reinterpret_cast<MethodSecurityDescriptor*>(TokenSecurityDescriptor::InsertSecurityDescriptor(ret_methSecDesc->m_pMD, (HashDatum) pMethodSecurityDesc));
+ if (pExistingMethodSecurityDesc != NULL)
+ {
+ // if we found an existing method security desc, use it
+ // no need to delete the one we had created, as we allocated it in the Appdomain heap
+ pMethodSecurityDesc = pExistingMethodSecurityDesc;
+ }
+ }
+ }
+ else
+ {
+ *ret_methSecDesc = *pMethodSecurityDesc;
+ }
+
+ return;
+}
+
+BOOL MethodSecurityDescriptor::CanMethodSecurityDescriptorBeCached(MethodDesc* pMD)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return pMD->IsInterceptedForDeclSecurity() ||
+ pMD->RequiresLinktimeCheck() ||
+ pMD->RequiresInheritanceCheck()||
+ pMD->IsVirtual()||
+ pMD->IsMethodImpl()||
+ pMD->IsLCGMethod();
+}
+
+void MethodSecurityDescriptor::VerifyDataComputedInternal()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (m_flags & MethodSecurityDescriptorFlags_IsComputed)
+ {
+ return;
+ }
+
+ // If the method hasn't already cached it's transparency information, then we need to calculate it here.
+ // It can be cached if we're loading the method from a native image, but are creating the security
+ // descriptor in order to figure out declarative security.
+ if (!m_pMD->HasCriticalTransparentInfo())
+ {
+ ComputeCriticalTransparentInfo();
+ }
+
+ // compute RUN-TIME DECLARATIVE SECURITY STUFF
+ // (merges both class and method level run-time declarative security info).
+ if (HasRuntimeDeclarativeSecurity())
+ {
+ ComputeRuntimeDeclarativeSecurityInfo();
+ }
+
+ // compute method specific DECLARATIVE STUFF
+ if (HasRuntimeDeclarativeSecurity() || HasLinkOrInheritanceDeclarativeSecurity())
+ {
+ ComputeMethodDeclarativeSecurityInfo();
+ }
+
+ // mark computed
+ FastInterlockOr(reinterpret_cast<DWORD *>(&m_flags), MethodSecurityDescriptorFlags_IsComputed);
+}
+
+void MethodSecurityDescriptor::ComputeCriticalTransparentInfo()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_CORECLR
+ MethodSecurityDescriptorTransparencyEtwEvents etw(this);
+#endif // !FEATURE_CORECLR
+
+ MethodTable* pMT = m_pMD->GetMethodTable();
+
+#ifdef _DEBUG
+ // If we've setup a breakpoint when we compute the transparency of this method, then stop in the debugger
+ // now.
+ static ConfigMethodSet methodTransparencyBreak;
+ methodTransparencyBreak.ensureInit(CLRConfig::INTERNAL_Security_TransparencyMethodBreak);
+ if (methodTransparencyBreak.contains(m_pMD->GetName(), pMT->GetDebugClassName(), NULL))
+ {
+ DebugBreak();
+ }
+#endif // _DEBUG
+
+ MethodSecurityDescriptorFlags methodFlags = MethodSecurityDescriptorFlags_None;
+ TypeSecurityDescriptor typeSecDesc(pMT);
+
+ const SecurityTransparencyBehavior *pTransparencyBehavior = m_pMD->GetAssembly()->GetSecurityTransparencyBehavior();
+ _ASSERTE(pTransparencyBehavior);
+
+ // If the transparency model used by this method cares about the location of the introduced method,
+ // then we need to figure out where the method was introduced. This is only important when the type is
+ // all critical or opportunistically critical, since otherwise we'll look at the method directly anyway.
+ MethodDesc *pIntroducingMD = NULL;
+ bool fWasIntroducedLocally = true;
+ if (pTransparencyBehavior->DoesScopeApplyOnlyToIntroducedMethods() &&
+ (typeSecDesc.IsOpportunisticallyCritical() || typeSecDesc.IsAllCritical()))
+ {
+ if (m_pMD->IsVirtual() &&
+ !m_pMD->IsInterface() &&
+ m_pMD->GetSlot() < m_pMD->GetMethodTable()->GetNumVirtuals())
+ {
+ pIntroducingMD = m_pMD->GetMethodTable()->GetIntroducingMethodDesc(m_pMD->GetSlot());
+ }
+
+ fWasIntroducedLocally = pIntroducingMD == NULL || pIntroducingMD == m_pMD;
+
+ //
+ // #OpportunisticallyCriticalMultipleImplement
+ //
+ // One method can be the target of multiple interfaces and also an override of a base class. Further,
+ // there could be conflicting inheritance requirements; for instance overriding a critical method and
+ // implementing a transparent interface with the same method desc.
+ //
+ // For APTCA assemblies, we require that they seperate out to explicit interface implementations to
+ // solve this problem, however we cannot push this requirement to opportunistically critical
+ // assemblies. Therefore, in those assemblies we create the following non-introduced method rule:
+ //
+ // 1. If both the base override and all of the interfaces that a method desc is implementing have the
+ // same accessibility, then the method must agree with that accessibility.
+ //
+ // 2. If there is a mix of transparent accessibilities, then the method desc will be safe critical.
+ // This leads to a situation where a safe critical method can implement a critical interface,
+ // which is not a security hole, but does create some strangeness around the fact that transparent
+ // code can call the method directly but not via the interface (or base type).
+ //
+ // Since there is no way for all inheritance requirements to be satisfied here, we choose to
+ // violate the overriding critical one because looking directly at the method will indicate that
+ // it is callable from transparent, whereas allowing a critical implementation of a transparent
+ // interface would create a worse situation of the method desc saying that it is not callable from
+ // transparent, while it would be via the interface.
+ //
+ // A variation of this problem can also occur with MethodImpls. For example, a virtual method could
+ // implement both a transparent and a critical virtual. This case follows the same rules laid out
+ // above for interface implementations.
+
+ // We need to check the interfaces and MethodImpls if we were introduced locally, or if we're
+ // opportunistically critical and the introducing method was not safe critical.
+ bool fCheckInterfacesAndMethodImpls = fWasIntroducedLocally;
+ if (!fCheckInterfacesAndMethodImpls && typeSecDesc.IsOpportunisticallyCritical())
+ {
+ _ASSERTE(pIntroducingMD != NULL);
+ // Make sure the introducing method has its transparency calculated
+ if (!pIntroducingMD->HasCriticalTransparentInfo())
+ {
+ MethodSecurityDescriptor introducingMSD(pIntroducingMD);
+ introducingMSD.ComputeCriticalTransparentInfo();
+ }
+
+ // We need to keep looking at the interfaces and MethodImpls if we override a critical method. If
+ // we're overriding a safe critical or transparent method, then we'll end up being safe critical
+ // anyway.
+ fCheckInterfacesAndMethodImpls = pIntroducingMD->IsCritical() && !pIntroducingMD->IsTreatAsSafe();
+ }
+
+ if (fCheckInterfacesAndMethodImpls &&
+ !m_pMD->IsCtor() &&
+ !m_pMD->IsStatic())
+ {
+ // Interface implementation or MethodImpl that we choose to use to calculate transparency - for
+ // opportunistically critical methods, this is the first safe critical / transparent method if one
+ // is found, otherwise the first critical method. For all other methods, it is the first
+ // interface / MethodImpl method found.
+ MethodDesc *pSelectedMD = NULL;
+
+ // Iterate over the implemented methods to see if we're implementing any interfaces or virtuals
+ MethodImplementationIterator implementationIterator(m_pMD);
+ bool fFoundTargetMethod = false;
+ for (; implementationIterator.IsValid() && !fFoundTargetMethod; implementationIterator.Next())
+ {
+ MethodDesc *pImplementedMD = implementationIterator.Current();
+
+ // If we're opportunistically critical, then we need to figure out if the implemented
+ // method is critical or not, and continue looking if we only found critical methods
+ // to this point.
+ if (typeSecDesc.IsOpportunisticallyCritical())
+ {
+ // We should either have not found a candidate yet, or that candidate should be critical
+ _ASSERTE(pSelectedMD == NULL ||
+ (pSelectedMD->IsCritical() && !pSelectedMD->IsTreatAsSafe()));
+
+ if (!pImplementedMD->HasCriticalTransparentInfo())
+ {
+ MethodSecurityDescriptor implementedMSD(pImplementedMD);
+ implementedMSD.ComputeCriticalTransparentInfo();
+ }
+
+ // If this is the first interface method or MethodImpl we've seen, save it away. Otherwise,
+ // we've so far implemented only critical interfaces and methods, so if we see a
+ // transparent or safe critical interface method, we should note that and stop looking
+ // further.
+ if (!pImplementedMD->IsCritical() || pImplementedMD->IsTreatAsSafe())
+ {
+ pSelectedMD = pImplementedMD;
+ fFoundTargetMethod = true;
+ }
+ else if (pSelectedMD == NULL)
+ {
+ pSelectedMD = pImplementedMD;
+ }
+ }
+ else
+ {
+ // If we're not opportunistically critical, then we only care about the first interface
+ // implementation or MethodImpl that we see.
+ _ASSERTE(pSelectedMD == NULL);
+ pSelectedMD = pImplementedMD;
+ fFoundTargetMethod = true;
+ }
+ }
+
+ // If we found an interface method or MethodImpl, then use that as the introducing method
+ if (pSelectedMD != NULL)
+ {
+ pIntroducingMD = pSelectedMD;
+ fWasIntroducedLocally = false;
+ }
+ }
+
+ // If we're not working with a method that we introduced, make sure it has its transparency calculated
+ // before we need to use it.
+ if (!fWasIntroducedLocally && !pIntroducingMD->HasCriticalTransparentInfo())
+ {
+ MethodSecurityDescriptor introducingMSD(pIntroducingMD);
+ introducingMSD.ComputeCriticalTransparentInfo();
+ _ASSERTE(pIntroducingMD->HasCriticalTransparentInfo());
+ }
+ }
+
+ // In a couple of cases we know the transparency of the method directly:
+ // 1. If our parent type is all transparent, we must also be transparent
+ // 2. If we're opprotunstically critical, then we can figure out the annotation based upon the override
+ // 3. If our parent type is all critical, and we were introduced by that type, we must also be critical
+ // (we could also be safe critical as well).
+ //
+ // Otherwise, we need to ask the current transparency implementation what this method is, because it
+ // will vary depending upon if we're in legacy mode or not.
+ TokenSecurityDescriptor methodTokenSecDesc(m_pMD->GetModule(), GetToken());
+ if (typeSecDesc.IsAllTransparent())
+ {
+ methodFlags = MethodSecurityDescriptorFlags_None;
+ }
+ else if (typeSecDesc.IsOpportunisticallyCritical())
+ {
+ // Opportunistically critical methods will always be critical
+ methodFlags |= MethodSecurityDescriptorFlags_IsCritical;
+
+ // If we're overriding a safe critical or transparent method, we also need to be treat as safe
+ //
+ // Virtuals on value types have multiple entries in the method table, so we may not have mapped
+ // it back to the override that it was implementing. In order to compensate for this, we simply
+ // allow all virtuals in opportunistically critical value types to be safe critical. This doesn't
+ // introduce any extra risk, because unless we're overriding one of the Object overloads, there is
+ // nothing that transparent code can cast the ValueType to in order to access the virtual since the
+ // value type itself will be critical.
+ //
+ // If we're in a transparency model where all opportunistically critical methods are safe critical, we
+ // need to add the treat as safe bit.
+ //
+ // Finally, if we're in a type participating in type equivalence, then we need to add the treat as
+ // safe bit. This keeps the transparency of methods in type equivalent interfaces consistent across
+ // security rule sets in opportunistically critical assemblies, which allows types from v2 PIAs to
+ // be embedded successfully into v4 assemblies for instance.
+ if (!fWasIntroducedLocally &&
+ (!pIntroducingMD->IsCritical() || pIntroducingMD->IsTreatAsSafe()))
+ {
+ methodFlags |= MethodSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+ else if (pMT->IsValueType() && m_pMD->IsVirtual())
+ {
+ methodFlags |= MethodSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+ else if (pTransparencyBehavior->DoesOpportunisticRequireOnlySafeCriticalMethods())
+ {
+ methodFlags |= MethodSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+ else if (typeSecDesc.IsTypeEquivalent())
+ {
+ methodFlags |= MethodSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+ }
+ else if (typeSecDesc.IsAllCritical() && fWasIntroducedLocally)
+ {
+ methodFlags |= MethodSecurityDescriptorFlags_IsCritical;
+
+ if (typeSecDesc.IsTreatAsSafe())
+ {
+ methodFlags |= MethodSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+ else if (pTransparencyBehavior->CanIntroducedCriticalMembersAddTreatAsSafe() &&
+ (methodTokenSecDesc.GetMetadataFlags() & (TokenSecurityDescriptorFlags_TreatAsSafe | TokenSecurityDescriptorFlags_SafeCritical)))
+ {
+ // If the transparency model allows members introduced into a critical scope to add their own
+ // TreatAsSafe attributes, then we need to look for a token level TreatAsSafe as well.
+ methodFlags |= MethodSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+ }
+ else
+ {
+ // We don't have a larger scope that tells us what to do with the method, so ask the transparency
+ // implementation to map our attributes to a set of flags
+ methodFlags |= pTransparencyBehavior->MapMethodAttributes(methodTokenSecDesc.GetMetadataFlags());
+ }
+
+ // TreatAsSafe from the type we're contained in always propigates to its methods
+ if (fWasIntroducedLocally &&
+ (methodFlags & MethodSecurityDescriptorFlags_IsCritical) &&
+ typeSecDesc.IsTreatAsSafe())
+ {
+ methodFlags |= MethodSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+
+ // The compiler can introduce default constructors implicitly, and for an explicitly critical type they
+ // will always be transparent - resulting in a type load exception. If we are a transparent default .ctor
+ // of an explicitly critical type, then we'll switch to being safe critical to allow the type to load and
+ // allow us access to our this pointer
+ if (!typeSecDesc.IsAllCritical() &&
+ typeSecDesc.IsCritical() &&
+ !(methodFlags & MethodSecurityDescriptorFlags_IsCritical) &&
+ m_pMD->IsCtor())
+ {
+ if (pMT->HasDefaultConstructor() &&
+ pMT->GetDefaultConstructor() == m_pMD)
+ {
+ methodFlags |= MethodSecurityDescriptorFlags_IsCritical |
+ MethodSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+ }
+
+ // See if we're a public critical method, then we may need to additionally make ourselves treat as safe
+ if (pTransparencyBehavior->DoesPublicImplyTreatAsSafe() &&
+ typeSecDesc.IsTypeExternallyVisibleForTransparency() &&
+ (m_pMD->IsPublic() || m_pMD->IsProtected() || IsMdFamORAssem(m_pMD->GetAttrs())) &&
+ (methodFlags & MethodSecurityDescriptorFlags_IsCritical) &&
+ !(methodFlags & MethodSecurityDescriptorFlags_IsTreatAsSafe))
+ {
+ methodFlags |= MethodSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+
+ // Cache our state on the MethodDesc
+ m_pMD->SetCriticalTransparentInfo(methodFlags & MethodSecurityDescriptorFlags_IsCritical,
+ methodFlags & MethodSecurityDescriptorFlags_IsTreatAsSafe);
+}
+
+void MethodSecurityDescriptor::ComputeRuntimeDeclarativeSecurityInfo()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // Load declarative security attributes
+ _ASSERTE(HasRuntimeDeclarativeSecurity());
+ m_declFlagsDuringPreStub = m_pMD->GetSecurityFlagsDuringPreStub();
+ _ASSERTE(m_declFlagsDuringPreStub && " Expected some runtime security action");
+ m_pRuntimeDeclActionInfo = SecurityDeclarative::DetectDeclActions(m_pMD, m_declFlagsDuringPreStub);
+}
+
+void MethodSecurityDescriptor::ComputeMethodDeclarativeSecurityInfo()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ DWORD flags = 0;
+
+ _ASSERTE(HasRuntimeDeclarativeSecurity()|| HasLinkOrInheritanceDeclarativeSecurity());
+ DWORD dwDeclFlags;
+ HRESULT hr = SecurityDeclarative::GetDeclarationFlags(GetIMDInternalImport(), GetToken(), &dwDeclFlags, NULL, NULL);
+
+ if (SUCCEEDED(hr))
+ {
+ GCX_COOP();
+ PsetCacheEntry *tokenSetIndexes[dclMaximumValue + 1];
+ SecurityDeclarative::DetectDeclActionsOnToken(GetToken(), dwDeclFlags, tokenSetIndexes, GetIMDInternalImport());
+
+ // Create single linked list of set indexes
+ DWORD dwLocalAction;
+ bool builtInCASPermsOnly = TRUE;
+ for (dwLocalAction = 0; dwLocalAction <= dclMaximumValue; dwLocalAction++)
+ {
+ if (tokenSetIndexes[dwLocalAction] != NULL)
+ {
+ TokenDeclActionInfo::LinkNewDeclAction(&m_pTokenDeclActionInfo, (CorDeclSecurity)dwLocalAction, tokenSetIndexes[dwLocalAction]);
+ builtInCASPermsOnly = builtInCASPermsOnly && (tokenSetIndexes[dwLocalAction]->ContainsBuiltinCASPermsOnly(dwLocalAction));
+ }
+ }
+
+ if (builtInCASPermsOnly)
+ flags |= MethodSecurityDescriptorFlags_IsBuiltInCASPermsOnly;
+ SecurityProperties sp(dwDeclFlags);
+ if (sp.FDemandsOnly())
+ flags |= MethodSecurityDescriptorFlags_IsDemandsOnly;
+ if (sp.FAssertionsExist())
+ {
+ // Do a check to see if the assembly has been granted permission to assert and let's cache that value in the MethodSecurityDesriptor
+ Module* pModule = m_pMD->GetModule();
+ PREFIX_ASSUME_MSG(pModule != NULL, "Should be a Module pointer here");
+
+ if (Security::CanAssert(pModule))
+ {
+ flags |= MethodSecurityDescriptorFlags_AssertAllowed;
+ }
+ }
+ }
+
+ FastInterlockOr(reinterpret_cast<DWORD *>(&m_flags), flags);
+}
+
+void MethodSecurityDescriptor::InvokeInheritanceChecks(MethodDesc *pChildMD)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pChildMD));
+ }
+ CONTRACTL_END;
+
+ const SecurityTransparencyBehavior *pTransparencyBehavior = pChildMD->GetAssembly()->GetSecurityTransparencyBehavior();
+ if (pTransparencyBehavior->AreInheritanceRulesEnforced())
+ {
+ // The profiler may want to suppress these checks if it's currently running on the child type
+ if (Security::BypassSecurityChecksForProfiler(pChildMD))
+ {
+ return;
+ }
+
+ /*
+ Allowed Inheritance Patterns (cannot change accessibility)
+ ----------------------------
+
+ Base Class/Method Derived Class/ Method
+ ----------------- ---------------------
+ Transparent Transparent
+ Transparent SafeCritical
+ SafeCritical SafeCritical
+ SafeCritical Transparent
+ Critical Critical
+
+
+ Disallowed Inheritance patterns
+ -------------------------------
+
+ Base Class/Method Derived Class /Method
+ ----------------- ---------------------
+ Transparent Critical
+ SafeCritical Critical
+ Critical Transparent
+ Critical SafeCritical
+ */
+
+ MethodSecurityDescriptor methSecurityDescriptor(pChildMD, FALSE);
+ TokenSecurityDescriptor methTokenSecurityDescriptor(pChildMD->GetModule(), pChildMD->GetMemberDef());
+ if (IsCritical())
+ {
+ if (IsTreatAsSafe())
+ {
+ // Base: SafeCritical. Check if Child is Critical
+ if (methSecurityDescriptor.IsCritical() && !methSecurityDescriptor.IsTreatAsSafe())
+ {
+#ifdef _DEBUG
+ if (g_pConfig->LogTransparencyErrors())
+ {
+ SecurityTransparent::LogTransparencyError(pChildMD, "Critical method overriding a SafeCritical base method", m_pMD);
+ }
+ if (!g_pConfig->DisableTransparencyEnforcement())
+#endif // _DEBUG
+ {
+ SecurityTransparent::ThrowTypeLoadException(pChildMD);
+ }
+ }
+ }
+ else
+ {
+ // Base: Critical.
+ if (!methSecurityDescriptor.IsCritical())
+ {
+ // Child is transparent
+ // throw
+#ifdef _DEBUG
+ if (g_pConfig->LogTransparencyErrors())
+ {
+ SecurityTransparent::LogTransparencyError(pChildMD, "Transparent method overriding a critical base method", m_pMD);
+ }
+ if (!g_pConfig->DisableTransparencyEnforcement())
+#endif // _DEBUG
+ {
+ SecurityTransparent::ThrowTypeLoadException(pChildMD);
+ }
+ }
+ else if (methSecurityDescriptor.IsTreatAsSafe() && !methSecurityDescriptor.IsOpportunisticallyCritical())
+ {
+ // The child is safe critical and not opportunistically critical (see code:#OpportunisticallyCriticalMultipleImplement)
+ // throw.
+#ifdef _DEBUG
+ if (g_pConfig->LogTransparencyErrors())
+ {
+ SecurityTransparent::LogTransparencyError(pChildMD, "Safe critical method overriding a SafeCritical base method", m_pMD);
+ }
+ if (!g_pConfig->DisableTransparencyEnforcement())
+#endif // _DEBUG
+ {
+ SecurityTransparent::ThrowTypeLoadException(pChildMD);
+ }
+ }
+ }
+ }
+ else
+ {
+ // Base: Transparent. Throw if derived is Critical and not SafeCritical
+ if (methSecurityDescriptor.IsCritical() && !methSecurityDescriptor.IsTreatAsSafe())
+ {
+#ifdef _DEBUG
+ if (g_pConfig->LogTransparencyErrors())
+ {
+ SecurityTransparent::LogTransparencyError(pChildMD, "Critical method overriding a transparent base method", m_pMD);
+ }
+ if (!g_pConfig->DisableTransparencyEnforcement())
+#endif // _DEBUG
+ {
+ SecurityTransparent::ThrowTypeLoadException(pChildMD);
+ }
+ }
+ }
+ }
+#ifndef FEATURE_CORECLR
+ // Check CAS Inheritance
+
+ // Early out if we're fully trusted
+ if (SecurityDeclarative::FullTrustCheckForLinkOrInheritanceDemand(pChildMD->GetAssembly()))
+ {
+ return;
+ }
+
+ if (HasInheritanceDeclarativeSecurity())
+ {
+#ifdef CROSSGEN_COMPILE
+ // NGen is always full trust. This path should be unreachable.
+ CrossGenNotSupported("HasInheritanceDeclarativeSecurity()");
+#else // CROSSGEN_COMPILE
+ GCX_COOP();
+
+ OBJECTREF refCasDemands = NULL;
+ PsetCacheEntry* pCasDemands = NULL;
+
+ HRESULT hr = GetDeclaredPermissionsWithCache(dclInheritanceCheck, &refCasDemands, &pCasDemands);
+ if (refCasDemands != NULL)
+ {
+ _ASSERTE(pCasDemands != NULL);
+
+ // See if inheritor's assembly has passed this demand before
+ AssemblySecurityDescriptor *pInheritorAssem = static_cast<AssemblySecurityDescriptor*>(pChildMD->GetAssembly()->GetSecurityDescriptor());
+ BOOL fSkipCheck = pInheritorAssem->AlreadyPassedDemand(pCasDemands);
+
+ if (!fSkipCheck)
+ {
+ GCPROTECT_BEGIN(refCasDemands);
+
+ // Perform the check (it's really just a LinkDemand)
+ SecurityStackWalk::LinkOrInheritanceCheck(pChildMD->GetAssembly()->GetSecurityDescriptor(), refCasDemands, pChildMD->GetAssembly(), dclInheritanceCheck);
+
+ // Demand passed. Add it to the Inheritor's assembly's list of passed demands
+ pInheritorAssem->TryCachePassedDemand(pCasDemands);
+
+ GCPROTECT_END();
+ }
+ }
+
+ // @todo -- non cas shouldn't be used for inheritance demands...
+
+ // Check non-CAS Inheritance
+ OBJECTREF refNonCasDemands = NULL;
+ hr = GetDeclaredPermissionsWithCache( dclNonCasInheritance, &refNonCasDemands, NULL);
+ if (refNonCasDemands != NULL)
+ {
+ _ASSERTE(((PERMISSIONSETREF)refNonCasDemands)->CheckedForNonCas() && "Declarative permissions should have been checked for nonCAS in PermissionSet.CreateSerialized");
+ if (((PERMISSIONSETREF)refNonCasDemands)->ContainsNonCas())
+ {
+ GCPROTECT_BEGIN(refNonCasDemands);
+
+ // Perform the check
+ MethodDescCallSite demand(METHOD__PERMISSION_SET__DEMAND_NON_CAS, &refNonCasDemands);
+ ARG_SLOT arg = ObjToArgSlot(refNonCasDemands);
+ demand.Call(&arg);
+
+ GCPROTECT_END();
+ }
+ }
+#endif // CROSSGEN_COMPILE
+ }
+#endif // FEATURE_CORECLR
+}
+
+MethodSecurityDescriptor::MethodImplementationIterator::MethodImplementationIterator(MethodDesc *pMD)
+ : m_interfaceIterator(pMD->GetMethodTable()),
+ m_pMD(pMD),
+ m_iMethodImplIndex(0),
+ m_fInterfaceIterationBegun(false),
+ m_fMethodImplIterationBegun(false)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ PRECONDITION(pMD != NULL);
+ }
+ CONTRACTL_END;
+
+ Next();
+}
+
+MethodDesc *MethodSecurityDescriptor::MethodImplementationIterator::Current()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ PRECONDITION(IsValid());
+ }
+ CONTRACTL_END;
+
+ if (m_pMD->GetMethodTable()->HasDispatchMap() && m_interfaceIterator.IsValid())
+ {
+ _ASSERTE(m_fInterfaceIterationBegun);
+ MethodTable *pInterface = m_pMD->GetMethodTable()->LookupDispatchMapType(m_interfaceIterator.Entry()->GetTypeID());
+ return pInterface->GetMethodDescForSlot(m_interfaceIterator.Entry()->GetSlotNumber());
+ }
+ else
+ {
+ _ASSERTE(m_fMethodImplIterationBegun);
+ _ASSERTE(m_pMD->IsMethodImpl());
+ _ASSERTE(m_iMethodImplIndex < m_pMD->GetMethodImpl()->GetSize());
+ return m_pMD->GetMethodImpl()->GetImplementedMDs()[m_iMethodImplIndex];
+ }
+}
+
+bool MethodSecurityDescriptor::MethodImplementationIterator::IsValid()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ // We're valid as long as we still have interface maps or method impls to process
+ if (m_pMD->GetMethodTable()->HasDispatchMap() && m_interfaceIterator.IsValid())
+ {
+ return true;
+ }
+ else if (m_pMD->IsMethodImpl())
+ {
+ return m_iMethodImplIndex < m_pMD->GetMethodImpl()->GetSize();
+ }
+ else
+ {
+ return false;
+ }
+}
+
+void MethodSecurityDescriptor::MethodImplementationIterator::Next()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ bool fFoundImpl = false;
+
+ // First iterate over the interface implementations
+ if (m_pMD->GetMethodTable()->HasDispatchMap() && m_interfaceIterator.IsValid())
+ {
+ while (m_interfaceIterator.IsValid() && !fFoundImpl)
+ {
+ // If we haven't yet begun iterating interfaces then don't call Next right away - otherwise
+ // we'll potentially skip over the first interface method.
+ if (m_fInterfaceIterationBegun)
+ {
+ m_interfaceIterator.Next();
+ }
+ else
+ {
+ m_fInterfaceIterationBegun = true;
+ }
+
+ if (m_interfaceIterator.IsValid())
+ {
+ _ASSERTE(!m_interfaceIterator.Entry()->GetTypeID().IsThisClass());
+ fFoundImpl = (m_interfaceIterator.Entry()->GetTargetSlotNumber() == m_pMD->GetSlot());
+ }
+ }
+ }
+
+ // Once we're done with the interface implementations, check for a MethodImpl
+ if (!fFoundImpl && m_pMD->IsMethodImpl())
+ {
+ MethodImpl * pMethodImpl = m_pMD->GetMethodImpl();
+ while ((m_iMethodImplIndex < pMethodImpl->GetSize()) && !fFoundImpl)
+ {
+ // If we haven't yet begun iterating method impls then don't move to the next element right away
+ // - otehrwise we'll potentially skip over the first MethodImpl
+ if (m_fMethodImplIterationBegun)
+ {
+ ++m_iMethodImplIndex;
+ }
+ else
+ {
+ m_fMethodImplIterationBegun = true;
+ }
+
+ if (m_iMethodImplIndex < pMethodImpl->GetSize())
+ {
+ // Skip over the interface MethodImpls since we already processed those
+ fFoundImpl = !pMethodImpl->GetImplementedMDs()[m_iMethodImplIndex]->IsInterface();
+ }
+ }
+ }
+} // MethodSecurityDescriptor::MethodImplementationIterator::Next
+
+TypeSecurityDescriptor* TypeSecurityDescriptor::GetTypeSecurityDescriptor(MethodTable* pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ TypeSecurityDescriptor* pTypeSecurityDesc =NULL;
+
+
+ pTypeSecurityDesc = (TypeSecurityDescriptor*)TokenSecurityDescriptor::LookupSecurityDescriptor(pMT);
+ if (pTypeSecurityDesc == NULL)
+ {
+ // didn't find a security descriptor, create one and insert it
+ LPVOID pMem = GetAppDomain()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(TypeSecurityDescriptor)));
+
+ // allocate a security descriptor, using the appdomain help memory
+ pTypeSecurityDesc = new (pMem) TypeSecurityDescriptor(pMT);
+ pTypeSecurityDesc->VerifyDataComputedInternal(); // compute all the data that is needed.
+
+ TypeSecurityDescriptor* pExistingTypeSecurityDesc = NULL;
+ // insert securitydesc into our hash table
+ pExistingTypeSecurityDesc = (TypeSecurityDescriptor*)TokenSecurityDescriptor::InsertSecurityDescriptor(pMT, (HashDatum) pTypeSecurityDesc);
+ if (pExistingTypeSecurityDesc != NULL)
+ {
+ // if we found an existing security desc, use it
+ // no need to delete the one we had created, as we allocated it in the Appdomain help
+ pTypeSecurityDesc = pExistingTypeSecurityDesc;
+ }
+ }
+
+ return pTypeSecurityDesc;
+}
+
+#if !defined(CROSSGEN_COMPILE) && defined(FEATURE_CAS_POLICY)
+HRESULT TokenDeclActionInfo::GetDeclaredPermissionsWithCache(
+ IN CorDeclSecurity action,
+ OUT OBJECTREF *pDeclaredPermissions,
+ OUT PsetCacheEntry **pPCE)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ HRESULT hr = S_OK;
+ DWORD dwActionFlag = DclToFlag((CorDeclSecurity)action);
+
+ PsetCacheEntry *ptempPCE=NULL;
+ TokenDeclActionInfo* pCurrentAction = this;
+ for (;
+ pCurrentAction;
+ pCurrentAction = pCurrentAction->pNext)
+ {
+ if (pCurrentAction->dwDeclAction == dwActionFlag)
+ {
+ ptempPCE = pCurrentAction->pPCE;
+ break;
+ }
+ }
+ if (pDeclaredPermissions && pCurrentAction)
+ {
+ *pDeclaredPermissions = ptempPCE->CreateManagedPsetObject (action);
+ }
+ if (pPCE && pCurrentAction)
+ {
+ *pPCE = ptempPCE;
+ }
+
+ return hr;
+}
+
+OBJECTREF TokenDeclActionInfo::GetLinktimePermissions(OBJECTREF *prefNonCasDemands)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF refCasDemands = NULL;
+ GCPROTECT_BEGIN(refCasDemands);
+
+ GetDeclaredPermissionsWithCache(
+ dclLinktimeCheck,
+ &refCasDemands, NULL);
+
+ TokenDeclActionInfo::GetDeclaredPermissionsWithCache(
+ dclNonCasLinkDemand,
+ prefNonCasDemands, NULL);
+
+ GCPROTECT_END();
+ return refCasDemands;
+}
+
+void TokenDeclActionInfo::InvokeLinktimeChecks(Assembly* pCaller)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(CheckPointer(pCaller));
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_MULTICOREJIT
+
+ // Reset the flag to allow managed code to be called in multicore JIT background thread from this routine
+ ThreadStateNCStackHolder holder(-1, Thread::TSNC_CallingManagedCodeDisabled);
+
+#endif
+
+ struct gc
+ {
+ OBJECTREF refNonCasDemands;
+ OBJECTREF refCasDemands;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ // CAS LinkDemands
+ GetDeclaredPermissionsWithCache(dclLinktimeCheck,
+ &gc.refCasDemands,
+ NULL);
+
+ if (gc.refCasDemands != NULL)
+ {
+ SecurityStackWalk::LinkOrInheritanceCheck(pCaller->GetSecurityDescriptor(), gc.refCasDemands, pCaller, dclLinktimeCheck);
+ }
+
+ // NON CAS LinkDEMANDS (we shouldn't support this).
+ GetDeclaredPermissionsWithCache(dclNonCasLinkDemand,
+ &gc.refNonCasDemands,
+ NULL);
+
+ GCPROTECT_END();
+}
+#endif // !CROSSGEN_COMPILE && FEATURE_CAS_POLICY
+
+void TypeSecurityDescriptor::ComputeCriticalTransparentInfo()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_CORECLR
+ TypeSecurityDescriptorTransparencyEtwEvents etw(this);
+#endif // !FEATURE_CORECLR
+
+#ifdef _DEBUG
+ // If we've setup a breakpoint when we compute the transparency of this type, then stop in the debugger now
+ SString strTypeTransparencyBreak(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_Security_TransparencyTypeBreak));
+ SString strClassName(SString::Utf8, m_pMT->GetDebugClassName());
+ if (strTypeTransparencyBreak.EqualsCaseInsensitive(strClassName))
+ {
+ // Do not break in fuzzed assemblies where class name can be empty
+ if (!strClassName.IsEmpty())
+ {
+ DebugBreak();
+ }
+ }
+#endif // _DEBUG
+
+ // check to see if the assembly has the critical attribute
+ Assembly* pAssembly = m_pMT->GetAssembly();
+ _ASSERTE(pAssembly);
+ ModuleSecurityDescriptor* pModuleSecDesc = ModuleSecurityDescriptor::GetModuleSecurityDescriptor(pAssembly);
+ pModuleSecDesc->VerifyDataComputed();
+
+ EEClass *pClass = m_pMT->GetClass();
+ TypeSecurityDescriptorFlags typeFlags = TypeSecurityDescriptorFlags_None;
+
+ // If we're contained within another type, then we inherit the transparency of that type. Otherwise we
+ // check the module to see what type of transparency we have.
+ if (pClass->IsNested())
+ {
+ // If the type is nested, see if the outer class tells us what our transparency is. Note that we cannot
+ // use a TypeSecurityDescriptor here since we may still be in the process of loading our outer type.
+ TokenSecurityDescriptor enclosingTokenSecurityDescriptor(m_pMT->GetModule(), m_pMT->GetEnclosingCl());
+ if (enclosingTokenSecurityDescriptor.IsSemanticCritical())
+ {
+ typeFlags |= TypeSecurityDescriptorFlags_IsAllCritical;
+ }
+
+ // We want to propigate the TreatAsSafe bit even if the outer class is not critical because in the legacy
+ // transparency model you could have a TAS but not critical type, and the TAS propigated to all nested
+ // types.
+ if (enclosingTokenSecurityDescriptor.IsSemanticTreatAsSafe())
+ {
+ typeFlags |= TypeSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+ }
+
+ const SecurityTransparencyBehavior *pTransparencyBehavior = m_pMT->GetAssembly()->GetSecurityTransparencyBehavior();
+ _ASSERTE(pTransparencyBehavior);
+
+ // If we're not nested, or if the outer type didn't give us enough information to determine what we were,
+ // then we need to look at the module to see what we are.
+ if (typeFlags == TypeSecurityDescriptorFlags_None)
+ {
+ if (pModuleSecDesc->IsAllTransparent())
+ {
+ typeFlags |= TypeSecurityDescriptorFlags_IsAllTransparent;
+ }
+ else if (pModuleSecDesc->IsOpportunisticallyCritical())
+ {
+ // In level 1 transparency, opportunistically critical types are transparent, in level 2 they
+ // are critical. However, this causes problems when doing type equivalence between levels (for
+ // instance a type from a v2 PIA which was embedded into a v4 assembly). In order to allow type
+ // equivalence to work across security rule sets, we consider all types participating in
+ // equivalence to be transparent under the opportunistically critical rules:
+ // Participating in equivalence -> Transparent
+ // Level 1 -> Transparent
+ // Level 2 -> All critical
+ if (!pTransparencyBehavior->DoesOpportunisticRequireOnlySafeCriticalMethods() &&
+ !IsTypeEquivalent())
+ {
+ typeFlags |= TypeSecurityDescriptorFlags_IsAllCritical;
+ }
+ }
+ else if (pModuleSecDesc->IsAllCritical())
+ {
+ typeFlags |= TypeSecurityDescriptorFlags_IsAllCritical;
+ if (pModuleSecDesc->IsTreatAsSafe())
+ {
+ typeFlags |= TypeSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+ }
+ }
+
+ // We need to look at the type token for more information if we still don't know if we're transparent or
+ // critical. This can also happen if the type is in an opportunistically critical module, however the
+ // transparency model requires opportunistically critical types to be transparent. In this case, we need
+ // to make sure that we do not look at the metadata token.
+ TokenSecurityDescriptor classTokenSecurityDescriptor(m_pMT->GetModule(),
+ m_pMT->GetCl());
+
+ const TypeSecurityDescriptorFlags transparencyMask = TypeSecurityDescriptorFlags_IsCritical |
+ TypeSecurityDescriptorFlags_IsAllCritical |
+ TypeSecurityDescriptorFlags_IsAllTransparent;
+
+ if (!(typeFlags & transparencyMask) &&
+ !pModuleSecDesc->IsOpportunisticallyCritical())
+ {
+ // First, ask the transparency behavior implementation to map from the metadata attributes to the real
+ // behavior that we should be seeing.
+ typeFlags |= pTransparencyBehavior->MapTypeAttributes(classTokenSecurityDescriptor.GetMetadataFlags());
+
+ // If we still don't know what the transparency of the type is, then we're transparent, but not all
+ // transparent. That implies that we're in a mixed assembly.
+ _ASSERTE((typeFlags & transparencyMask) || pModuleSecDesc->IsMixedTransparency());
+ }
+
+ // If the transparency behavior dictates that publics must be safe critical, then also set the treat as safe bit.
+ if (pTransparencyBehavior->DoesPublicImplyTreatAsSafe() &&
+ ((typeFlags & TypeSecurityDescriptorFlags_IsCritical) || (typeFlags & TypeSecurityDescriptorFlags_IsAllCritical)) &&
+ !(typeFlags & TypeSecurityDescriptorFlags_IsTreatAsSafe))
+ {
+ if (IsTypeExternallyVisibleForTransparency())
+ {
+ typeFlags |= TypeSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+ }
+
+ // It is common for a v2 assembly to mark a delegate type as explicitly critical rather than all critical,
+ // since in C# the syntax for creating a delegate type does not make it obvious that a new type is being
+ // defined. That leads to situations where we commonly have critical types with transparent memebers -
+ // a nonsense scenario that we reject due to the members not having access to their own this pointer.
+ //
+ // For compatibility, we implicitly convert all explicitly critical delegate types into all critical
+ // types, which is likely what the code intended in the first place, and allows delegate types which
+ // loaded on v2.0 to continue to load on future runtimes.
+ //
+ // Note: While loading BCL classes, we may be running this codepath before it is safe to call MethodTable::IsDelegate.
+ // That call can only happen after CLASS__MULTICASTDELEGATE has been loaded. However, we should not have any
+ // explicit critical Delegate types in mscorlib (that can only happen if you're loading v2.0 assembly or have SecurityScope.Explicit).
+ if ((typeFlags & TypeSecurityDescriptorFlags_IsCritical) &&
+ !(typeFlags & TypeSecurityDescriptorFlags_IsAllCritical) &&
+ m_pMT->IsDelegate())
+ {
+ typeFlags |= TypeSecurityDescriptorFlags_IsAllCritical;
+ }
+
+ // Update the cached values in the EE Class.
+ g_IBCLogger.LogEEClassCOWTableAccess(m_pMT);
+ pClass->SetCriticalTransparentInfo(
+#ifndef FEATURE_CORECLR
+ typeFlags & (TypeSecurityDescriptorFlags_IsCritical | TypeSecurityDescriptorFlags_IsAllCritical),
+#endif // FEATURE_CORECLR
+ typeFlags & TypeSecurityDescriptorFlags_IsTreatAsSafe,
+ typeFlags & TypeSecurityDescriptorFlags_IsAllTransparent,
+ typeFlags & TypeSecurityDescriptorFlags_IsAllCritical);
+}
+
+void TypeSecurityDescriptor::ComputeTypeDeclarativeSecurityInfo()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // if method doesn't have any security return
+ if (!IsTdHasSecurity(m_pMT->GetAttrClass()))
+ {
+ return;
+ }
+
+ DWORD dwDeclFlags;
+ HRESULT hr = SecurityDeclarative::GetDeclarationFlags(GetIMDInternalImport(), GetToken(), &dwDeclFlags, NULL, NULL);
+
+ if (SUCCEEDED(hr))
+ {
+ GCX_COOP();
+ PsetCacheEntry *tokenSetIndexes[dclMaximumValue + 1];
+ SecurityDeclarative::DetectDeclActionsOnToken(GetToken(), dwDeclFlags, tokenSetIndexes, GetIMDInternalImport());
+
+ // Create single linked list of set indexes
+ DWORD dwLocalAction;
+ for (dwLocalAction = 0; dwLocalAction <= dclMaximumValue; dwLocalAction++)
+ {
+ if (tokenSetIndexes[dwLocalAction] != NULL)
+ {
+ TokenDeclActionInfo::LinkNewDeclAction(&m_pTokenDeclActionInfo,
+ (CorDeclSecurity)dwLocalAction,
+ tokenSetIndexes[dwLocalAction]);
+ }
+ }
+ }
+}
+
+BOOL TypeSecurityDescriptor::CanTypeSecurityDescriptorBeCached(MethodTable* pMT)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ EEClass *pClass = pMT->GetClass();
+ return pClass->RequiresLinktimeCheck() ||
+ pClass->RequiresInheritanceCheck() ||
+ // NGEN accesses security descriptors frequently to check for security callouts
+ IsCompilationProcess();
+}
+
+BOOL TypeSecurityDescriptor::IsTypeExternallyVisibleForTransparency()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(m_pMT->GetAssembly()->GetSecurityTransparencyBehavior()->DoesPublicImplyTreatAsSafe());
+ }
+ CONTRACTL_END;
+
+ if (m_pMT->IsExternallyVisible())
+ {
+ // If the type is genuinely externally visible, then it is also visible for transparency
+ return TRUE;
+ }
+ else if (m_pMT->IsGlobalClass())
+ {
+ // Global methods are externally visible
+ return TRUE;
+ }
+ else if (m_pMT->IsSharedByGenericInstantiations())
+ {
+ TokenSecurityDescriptor tokenSecDesc(m_pMT->GetModule(), m_pMT->GetCl());
+
+ // Canonical method tables for shared generic instantiations will appear to us as
+ // GenericClass<__Canon>, rather than the actual generic type parameter, and since __Canon is not
+ // public, these method tables will not appear to be public either.
+ //
+ // For these types, we'll look at the metadata directly, and ignore generic parameters to see
+ // if the type is public. Note that this will under-enforce; for instance G<CriticalRefType> will
+ // have it's G<__Canon> calls refered to as safe critical (which is necessary, since G<__Canon>
+ // is also the canonical representation for G<TransparentRefType>. We rely on the checks done by
+ // CheckTransparentAccessToCriticalCode in the CanAccess code path to reject any attempts to use
+ // the generic type over a critical parameter.
+ if (tokenSecDesc.IsSemanticExternallyVisible())
+ {
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+void TypeSecurityDescriptor::VerifyDataComputedInternal()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_fIsComputed)
+ {
+ return;
+ }
+
+ // If the type hasn't already cached it's transparency information, then we need to calculate it here. It
+ // can be cached if we're loading the type from a native image, but are creating the security descriptor
+ // in order to figure out declarative security.
+ if (!m_pMT->GetClass()->HasCriticalTransparentInfo())
+ {
+ ComputeCriticalTransparentInfo();
+ }
+
+ // COMPUTE Type DECLARATIVE SECURITY INFO
+ ComputeTypeDeclarativeSecurityInfo();
+
+ // mark computed
+ InterlockedCompareExchange(reinterpret_cast<LONG *>(&m_fIsComputed), TRUE, FALSE);
+}
+
+void TypeSecurityDescriptor::InvokeInheritanceChecks(MethodTable* pChildMT)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pChildMT));
+ }
+ CONTRACTL_END;
+
+ const SecurityTransparencyBehavior *pChildTransparencyBehavior = pChildMT->GetAssembly()->GetSecurityTransparencyBehavior();
+ if (pChildTransparencyBehavior->AreInheritanceRulesEnforced())
+ {
+ // We compare the child class with the most critical base class in the type hierarchy.
+ //
+ // We can stop walking the inheritance chain if we find a type that also enforces inheritance rules,
+ // since we know that it must be at least as critical as the most critical of all its base types.
+ // Similarly, we can stop walking when we find a critical parent, because we know that this is the
+ // most critical we can get.
+ bool fFoundCriticalParent = false;
+ bool fFoundSafeCriticalParent = false;
+ bool fFoundParentWithEnforcedInheritance = false;
+
+ for (MethodTable *pParentMT = m_pMT;
+ pParentMT != NULL && !fFoundParentWithEnforcedInheritance && !fFoundCriticalParent;
+ pParentMT = pParentMT->GetParentMethodTable())
+ {
+ EEClass *pParentClass = pParentMT->GetClass();
+
+ // Make sure this parent class has its transparency information computed
+ if (!pParentClass->HasCriticalTransparentInfo())
+ {
+ TypeSecurityDescriptor parentSecurityDescriptor(pParentMT);
+ parentSecurityDescriptor.ComputeCriticalTransparentInfo();
+ }
+
+ // See if it is critical or safe critical
+ if (pParentClass->IsCritical() && pParentClass->IsTreatAsSafe())
+ {
+ fFoundSafeCriticalParent = true;
+ }
+ else if (pParentClass->IsCritical() && !pParentClass->IsTreatAsSafe())
+ {
+ fFoundCriticalParent = true;
+ }
+
+ // If this parent class enforced transparency, we can stop looking at further parents
+ const SecurityTransparencyBehavior *pParentTransparencyBehavior = pParentMT->GetAssembly()->GetSecurityTransparencyBehavior();
+ fFoundParentWithEnforcedInheritance = pParentTransparencyBehavior->AreInheritanceRulesEnforced();
+ }
+
+ /*
+ Allowed Inheritance Patterns
+ ----------------------------
+
+ Base Class/Method Derived Class/ Method
+ ----------------- ---------------------
+ Transparent Transparent
+ Transparent SafeCritical
+ Transparent Critical
+ SafeCritical SafeCritical
+ SafeCritical Critical
+ Critical Critical
+
+
+ Disallowed Inheritance patterns
+ -------------------------------
+
+ Base Class/Method Derived Class /Method
+ ----------------- ---------------------
+ SafeCritical Transparent
+ Critical Transparent
+ Critical SafeCritical
+ */
+
+ // Make sure the child class has its transparency calculated
+ EEClass *pChildClass = pChildMT->GetClass();
+ if (!pChildClass->HasCriticalTransparentInfo())
+ {
+ TypeSecurityDescriptor childSecurityDescriptor(pChildMT);
+ childSecurityDescriptor.ComputeCriticalTransparentInfo();
+ }
+
+ if (fFoundCriticalParent)
+ {
+ if (!pChildClass->IsCritical() || pChildClass->IsTreatAsSafe())
+ {
+#ifdef _DEBUG
+ if (g_pConfig->LogTransparencyErrors())
+ {
+ SecurityTransparent::LogTransparencyError(pChildMT, "Transparent or safe critical type deriving from a critical base type");
+ }
+ if (!g_pConfig->DisableTransparencyEnforcement())
+#endif // _DEBUG
+ {
+ // The parent class is critical, but the child class is not
+ SecurityTransparent::ThrowTypeLoadException(pChildMT);
+ }
+ }
+ }
+ else if (fFoundSafeCriticalParent)
+ {
+ if (!pChildClass->IsCritical())
+ {
+#ifdef _DEBUG
+ if (g_pConfig->LogTransparencyErrors())
+ {
+ SecurityTransparent::LogTransparencyError(pChildMT, "Transparent type deriving from a safe critical base type");
+ }
+ if (!g_pConfig->DisableTransparencyEnforcement())
+#endif // _DEBUG
+ {
+ // The parent class is safe critical, but the child class is transparent
+ SecurityTransparent::ThrowTypeLoadException(pChildMT);
+ }
+ }
+ }
+ }
+#ifndef FEATURE_CORECLR
+
+ // Fast path check
+ if (SecurityDeclarative::FullTrustCheckForLinkOrInheritanceDemand(pChildMT->GetAssembly()))
+ {
+ return;
+ }
+
+ if (HasInheritanceDeclarativeSecurity())
+ {
+#ifdef CROSSGEN_COMPILE
+ // NGen is always full trust. This path should be unreachable.
+ CrossGenNotSupported("HasInheritanceDeclarativeSecurity()");
+#else // CROSSGEN_COMPILE
+ GCX_COOP();
+
+ // If we have a class that requires inheritance checks,
+ // then we require a thread to perform the checks.
+ // We won't have a thread when some of the system classes
+ // are preloaded, so make sure that none of them have
+ // inheritance checks.
+ _ASSERTE(GetThread() != NULL);
+
+ struct
+ {
+ OBJECTREF refCasDemands;
+ OBJECTREF refNonCasDemands;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ EEClass *pClass = m_pMT->GetClass();
+ if (pClass->RequiresCasInheritanceCheck())
+ {
+ GetDeclaredPermissionsWithCache(dclInheritanceCheck, &gc.refCasDemands, NULL);
+ }
+
+ if (pClass->RequiresNonCasInheritanceCheck())
+ {
+ GetDeclaredPermissionsWithCache(dclNonCasInheritance, &gc.refNonCasDemands, NULL);
+ }
+
+ if (gc.refCasDemands != NULL)
+ {
+ SecurityStackWalk::LinkOrInheritanceCheck(pChildMT->GetAssembly()->GetSecurityDescriptor(),
+ gc.refCasDemands,
+ pChildMT->GetAssembly(),
+ dclInheritanceCheck);
+ }
+
+ if (gc.refNonCasDemands != NULL)
+ {
+ _ASSERTE(((PERMISSIONSETREF)gc.refNonCasDemands)->CheckedForNonCas() && "Declarative permissions should have been checked for nonCAS in PermissionSet.CreateSerialized");
+ if(((PERMISSIONSETREF)gc.refNonCasDemands)->ContainsNonCas())
+ {
+ MethodDescCallSite demand(METHOD__PERMISSION_SET__DEMAND_NON_CAS, &gc.refNonCasDemands);
+
+ ARG_SLOT arg = ObjToArgSlot(gc.refNonCasDemands);
+ demand.Call(&arg);
+ }
+ }
+
+ GCPROTECT_END();
+#endif // CROSSGEN_COMPILE
+ }
+#endif // FEATURE_CORECLR
+}
+
+// Module security descriptor contains static security information about the module
+// this information could get persisted in the NGen image
+void ModuleSecurityDescriptor::VerifyDataComputed()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (m_flags & ModuleSecurityDescriptorFlags_IsComputed)
+ {
+ return;
+ }
+
+#ifndef FEATURE_CORECLR
+ ModuleSecurityDescriptorTransparencyEtwEvents etw(this);
+#endif // !FEATURE_CORECLR
+
+ // Read the security attributes from the assembly
+ Assembly *pAssembly = m_pModule->GetAssembly();
+
+ // Get the metadata flags on the assembly. Note that we cannot use a TokenSecurityDescriptor directly
+ // here because Reflection.Emit may have overriden the metadata flags with different ones of its own
+ // choosing.
+ TokenSecurityDescriptorFlags tokenFlags = GetTokenFlags();
+
+#ifdef FEATURE_APTCA
+ // We need to post-process the APTCA bits on the token security descriptor to handle:
+ // 1. Conditional APTCA assemblies, which should appear as either APTCA-enabled or APTCA-disabled
+ // 2. APTCA killbitted assemblies, which should appear as APTCA-disabled
+ tokenFlags = ProcessAssemblyAptcaFlags(pAssembly->GetDomainAssembly(), tokenFlags);
+#endif // FEATURE_APTCA
+
+#ifndef FEATURE_CORECLR
+ // Make sure we understand the security rule set being asked for
+ if (GetSecurityRuleSet() < SecurityRuleSet_Min || GetSecurityRuleSet() > SecurityRuleSet_Max)
+ {
+ // Unknown rule set - fail to load this module
+ SString strAssemblyName;
+ pAssembly->GetDisplayName(strAssemblyName);
+ COMPlusThrow(kFileLoadException, IDS_E_UNKNOWN_SECURITY_RULESET, strAssemblyName.GetUnicode());
+ }
+
+#endif // !FEATURE_CORECLR
+
+ // Get a transparency behavior object for the assembly.
+ const SecurityTransparencyBehavior *pTransparencyBehavior =
+ SecurityTransparencyBehavior::GetTransparencyBehavior(GetSecurityRuleSet());
+ pAssembly->SetSecurityTransparencyBehavior(pTransparencyBehavior);
+
+ ModuleSecurityDescriptorFlags moduleFlags = pTransparencyBehavior->MapModuleAttributes(tokenFlags);
+
+ AssemblySecurityDescriptor *pAssemSecDesc = static_cast<AssemblySecurityDescriptor*>(pAssembly->GetSecurityDescriptor());
+
+#ifdef FEATURE_LEGACYNETCF
+ // Legacy Mango apps have incorrect transparency attributes, so quirk to ignore them and force
+ // opportunistic criticality
+ if (GetAppDomain()->GetAppDomainCompatMode() == BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8 && !pAssemSecDesc->IsMicrosoftPlatform())
+ {
+ moduleFlags = ModuleSecurityDescriptorFlags_IsOpportunisticallyCritical | ModuleSecurityDescriptorFlags_IsAPTCA;
+ }
+#endif // FEATURE_LEGACYNETCF
+
+ // We shouldn't be both all transparent and all critical
+ const ModuleSecurityDescriptorFlags invalidMask = ModuleSecurityDescriptorFlags_IsAllCritical |
+ ModuleSecurityDescriptorFlags_IsAllTransparent;
+ if ((moduleFlags & invalidMask) == invalidMask)
+ {
+#ifdef _DEBUG
+ if (g_pConfig->LogTransparencyErrors())
+ {
+ SecurityTransparent::LogTransparencyError(pAssembly, "Found both critical and transparent assembly level annotations");
+ }
+ if (!g_pConfig->DisableTransparencyEnforcement())
+#endif // _DEBUG
+ {
+ COMPlusThrow(kInvalidOperationException, W("InvalidOperation_CriticalTransparentAreMutuallyExclusive"));
+ }
+ }
+
+ const ModuleSecurityDescriptorFlags transparencyMask = ModuleSecurityDescriptorFlags_IsAllCritical |
+ ModuleSecurityDescriptorFlags_IsAllTransparent |
+ ModuleSecurityDescriptorFlags_IsTreatAsSafe |
+ ModuleSecurityDescriptorFlags_IsOpportunisticallyCritical;
+
+ // See if the assembly becomes implicitly transparent if loaded in partial trust
+ if (pTransparencyBehavior->DoesPartialTrustImplyAllTransparent())
+ {
+ if (!pAssemSecDesc->IsFullyTrusted())
+ {
+ moduleFlags &= ~transparencyMask;
+ moduleFlags |= ModuleSecurityDescriptorFlags_IsAllTransparent;
+
+ moduleFlags |= ModuleSecurityDescriptorFlags_TransparentDueToPartialTrust;
+
+ SString strAssemblyName;
+ pAssembly->GetDisplayName(strAssemblyName);
+ LOG((LF_SECURITY,
+ LL_INFO10,
+ "Assembly '%S' was loaded in partial trust and was made implicitly all transparent.\n",
+ strAssemblyName.GetUnicode()));
+ }
+ }
+
+ // If the assembly is not allowed to use the SkipVerificationInFullTrust optimization, then disable that bit
+ if (!pAssembly->GetSecurityDescriptor()->AllowSkipVerificationInFullTrust())
+ {
+ moduleFlags &= ~ModuleSecurityDescriptorFlags_SkipFullTrustVerification;
+ }
+
+ // Make sure that if the assembly is being loaded in partial trust that it is all transparent. This is a
+ // change from v2.0 rules, and for compatibility we use the DoesPartialTrustImplyAllTransparent check to
+ // ensure that v2 assemblies can load in partial trust unmodified. This change does allow us to follow
+ // the CoreCLR model of using transparency for security enforcement, rather than the v2.0 model of using
+ // transparency only for audit.
+ if (!pAssembly->GetSecurityDescriptor()->IsFullyTrusted() &&
+ !(moduleFlags & ModuleSecurityDescriptorFlags_IsAllTransparent))
+ {
+ SString strAssemblyName;
+ pAssembly->GetDisplayName(strAssemblyName);
+
+#ifdef _DEBUG
+ if (g_pConfig->LogTransparencyErrors())
+ {
+ SecurityTransparent::LogTransparencyError(pAssembly, "Attempt to load an assembly which is not fully transparent in partial trust");
+ }
+ if (g_pConfig->DisableTransparencyEnforcement())
+ {
+ SecurityTransparent::LogTransparencyError(pAssembly, "Forcing partial trust assembly to be fully transparent");
+ if (!pAssembly->GetSecurityDescriptor()->IsFullyTrusted())
+ {
+ moduleFlags &= ~transparencyMask;
+ moduleFlags |= ModuleSecurityDescriptorFlags_IsAllTransparent;
+
+ }
+ }
+ else
+#endif // _DEBUG
+ {
+ COMPlusThrow(kFileLoadException, IDS_E_LOAD_CRITICAL_IN_PARTIAL_TRUST, strAssemblyName.GetUnicode());
+ }
+ }
+
+#ifdef FEATURE_APTCA
+ // If the security model implies that unsigned assemblies are APTCA, then check to see if we're unsigned
+ // and set the APTCA bit.
+ if (pTransparencyBehavior->DoesUnsignedImplyAPTCA() && !pAssembly->IsStrongNamed())
+ {
+ moduleFlags |= ModuleSecurityDescriptorFlags_IsAPTCA;
+ }
+#endif // FEATURE_APTCA
+
+#ifdef _DEBUG
+ // If we're being forced to generate native code for this assembly which can be used in a partial trust
+ // context, then we need to ensure that the assembly is entirely transparent -- otherwise the code may
+ // perform a critical operation preventing the ngen image from being loaded into partial trust.
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_Security_NGenForPartialTrust) != 0)
+ {
+ moduleFlags &= ~transparencyMask;
+ moduleFlags |= ModuleSecurityDescriptorFlags_IsAllTransparent;
+ }
+#endif // _DEBUG
+
+#ifdef FEATURE_CORECLR
+ if (pAssembly->IsSystem() || pAssembly->GetManifestFile()->HasOpenedILimage() && GetAppDomain()->IsImageFullyTrusted(pAssembly->GetManifestFile()->GetOpenedILimage()))
+ {
+ // Set the flag if the assembly is microsoft platform. This gets saved in Ngen Image
+ // to determinne if the NI was genrated as full-trust. If NI is generated as full-trust
+ // the codegen is generated different as compared to non-trusted.
+ _ASSERTE(!(moduleFlags & ModuleSecurityDescriptorFlags_IsMicrosoftPlatform));
+ moduleFlags |= ModuleSecurityDescriptorFlags_IsMicrosoftPlatform;
+ }
+#endif
+
+ // Mark the module as having its security state computed
+ moduleFlags |= ModuleSecurityDescriptorFlags_IsComputed;
+ InterlockedCompareExchange(reinterpret_cast<LONG *>(&m_flags),
+ moduleFlags,
+ ModuleSecurityDescriptorFlags_None);
+
+ // If this assert fires, we ended up racing to different outcomes
+ _ASSERTE(m_flags == moduleFlags);
+}
+
+#ifndef FEATURE_CORECLR
+
+// Determine if this assembly was build against a version of the runtime that only supported legacy transparency
+BOOL ModuleSecurityDescriptor::AssemblyVersionRequiresLegacyTransparency()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ BOOL fIsLegacyAssembly = FALSE;
+
+ // Check the manifest version number to see if we're a v1 or v2 assembly. We specifically check for the
+ // manifest version to come back as a string that starts with either v1 or v2; if we get anything
+ // unexpected, we'll just use the default transparency implementation
+ LPCSTR szVersion = NULL;
+ IMDInternalImport *pmdImport = m_pModule->GetAssembly()->GetManifestImport();
+ if (SUCCEEDED(pmdImport->GetVersionString(&szVersion)))
+ {
+ if (szVersion != NULL && strlen(szVersion) > 2)
+ {
+ fIsLegacyAssembly = szVersion[0] == 'v' &&
+ (szVersion[1] == '1' || szVersion[1] == '2');
+ }
+ }
+
+ return fIsLegacyAssembly;
+}
+
+#endif // !FEATURE_CORECLR
+
+ModuleSecurityDescriptor* ModuleSecurityDescriptor::GetModuleSecurityDescriptor(Assembly *pAssembly)
+{
+ WRAPPER_NO_CONTRACT;
+
+ Module* pModule = pAssembly->GetManifestModule();
+ _ASSERTE(pModule);
+
+ ModuleSecurityDescriptor* pModuleSecurityDesc = pModule->m_pModuleSecurityDescriptor;
+ _ASSERTE(pModuleSecurityDesc);
+
+ return pModuleSecurityDesc;
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+VOID ModuleSecurityDescriptor::Save(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+ VerifyDataComputed();
+ image->StoreStructure(this,
+ sizeof(ModuleSecurityDescriptor),
+ DataImage::ITEM_MODULE_SECDESC);
+}
+
+VOID ModuleSecurityDescriptor::Fixup(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+ image->FixupPointerField(this, offsetof(ModuleSecurityDescriptor, m_pModule));
+}
+#endif
+
+#if defined(FEATURE_APTCA) || defined(FEATURE_CORESYSTEM)
+
+//---------------------------------------------------------------------------------------
+//
+// Parse an APTCA blob into its corresponding token security descriptor flags.
+//
+
+TokenSecurityDescriptorFlags ParseAptcaAttribute(const BYTE *pbAptcaBlob, DWORD cbAptcaBlob)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ PRECONDITION(CheckPointer(pbAptcaBlob));
+ }
+ CONTRACTL_END;
+
+ TokenSecurityDescriptorFlags aptcaFlags = TokenSecurityDescriptorFlags_None;
+
+ CustomAttributeParser cap(pbAptcaBlob, cbAptcaBlob);
+ if (SUCCEEDED(cap.SkipProlog()))
+ {
+ aptcaFlags |= TokenSecurityDescriptorFlags_APTCA;
+
+ // Look for the PartialTrustVisibilityLevel named argument
+ CaNamedArg namedArgs[1];
+ namedArgs[0].InitI4FieldEnum(g_PartialTrustVisibilityLevel, g_SecurityPartialTrustVisibilityLevel);
+
+ if (SUCCEEDED(ParseKnownCaNamedArgs(cap, namedArgs, _countof(namedArgs))))
+ {
+ // If we have a partial trust visiblity level, then we may additionally be conditionally APTCA.
+ PartialTrustVisibilityLevel visibilityLevel = static_cast<PartialTrustVisibilityLevel>(namedArgs[0].val.u4);
+ if (visibilityLevel == PartialTrustVisibilityLevel_NotVisibleByDefault)
+ {
+ aptcaFlags |= TokenSecurityDescriptorFlags_ConditionalAPTCA;
+ }
+ }
+ }
+
+ return aptcaFlags;
+}
+
+#endif // defined(FEATURE_APTCA) || defined(FEATURE_CORESYSTEM)
+
+//---------------------------------------------------------------------------------------
+//
+// Parse a security rules attribute blob into its corresponding token security descriptor
+// flags.
+//
+
+TokenSecurityDescriptorFlags ParseSecurityRulesAttribute(const BYTE *pbSecurityRulesBlob,
+ DWORD cbSecurityRulesBlob)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ PRECONDITION(CheckPointer(pbSecurityRulesBlob));
+ }
+ CONTRACTL_END;
+
+ TokenSecurityDescriptorFlags rulesFlags = TokenSecurityDescriptorFlags_None;
+
+ CustomAttributeParser cap(pbSecurityRulesBlob, cbSecurityRulesBlob);
+ if (SUCCEEDED(cap.SkipProlog()))
+ {
+ rulesFlags |= TokenSecurityDescriptorFlags_SecurityRules;
+
+ // Read out the version number
+ UINT8 bRulesLevel = 0;
+ if (SUCCEEDED(cap.GetU1(&bRulesLevel)))
+ {
+ rulesFlags |= EncodeSecurityRuleSet(static_cast<SecurityRuleSet>(bRulesLevel));
+ }
+
+ // See if the attribute specified that full trust transparent code should not be verified
+ CaNamedArg skipVerificationArg;
+ skipVerificationArg.InitBoolField("SkipVerificationInFullTrust", FALSE);
+ if (SUCCEEDED(ParseKnownCaNamedArgs(cap, &skipVerificationArg, 1)))
+ {
+ if (skipVerificationArg.val.boolean)
+ {
+ rulesFlags |= TokenSecurityDescriptorFlags_SkipFullTrustVerification;
+ }
+ }
+ }
+
+ return rulesFlags;
+}
+
+// grok the meta data and compute the necessary attributes
+void TokenSecurityDescriptor::VerifyDataComputed()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ PRECONDITION(CheckPointer(m_pModule));
+ }
+ CONTRACTL_END;
+
+ if (m_flags & TokenSecurityDescriptorFlags_IsComputed)
+ {
+ return;
+ }
+
+ // Loop over the attributes on the token, reading off bits that are interesting for security
+ TokenSecurityDescriptorFlags flags = ReadSecurityAttributes(m_pModule->GetMDImport(), m_token);
+ flags |= TokenSecurityDescriptorFlags_IsComputed;
+ FastInterlockOr(reinterpret_cast<DWORD *>(&m_flags), flags);
+}
+
+// static
+TokenSecurityDescriptorFlags TokenSecurityDescriptor::ReadSecurityAttributes(IMDInternalImport *pmdImport, mdToken token)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ PRECONDITION(CheckPointer(pmdImport));
+ }
+ CONTRACTL_END;
+
+ TokenSecurityDescriptorFlags flags = TokenSecurityDescriptorFlags_None;
+
+ HENUMInternalHolder hEnum(pmdImport);
+ hEnum.EnumInit(mdtCustomAttribute, token);
+
+ mdCustomAttribute currentAttribute;
+ while (hEnum.EnumNext(&currentAttribute))
+ {
+ LPCSTR szAttributeName;
+ LPCSTR szAttributeNamespace;
+
+ if (FAILED(pmdImport->GetNameOfCustomAttribute(currentAttribute, &szAttributeNamespace, &szAttributeName)))
+ {
+ continue;
+ }
+
+ // The only attributes we care about are in System.Security, so move on if we found something in a
+ // different namespace
+ if (szAttributeName != NULL &&
+ szAttributeNamespace != NULL &&
+ strcmp(g_SecurityNS, szAttributeNamespace) == 0)
+ {
+#if defined(FEATURE_APTCA) || defined(FEATURE_CORESYSTEM)
+ if (strcmp(g_SecurityAPTCA + sizeof(g_SecurityNS), szAttributeName) == 0)
+ {
+ // Check the visibility parameter
+ const BYTE *pbAttributeBlob;
+ ULONG cbAttributeBlob;
+
+ if (FAILED(pmdImport->GetCustomAttributeAsBlob(currentAttribute, reinterpret_cast<const void **>(&pbAttributeBlob), &cbAttributeBlob)))
+ {
+ continue;
+ }
+
+ TokenSecurityDescriptorFlags aptcaFlags = ParseAptcaAttribute(pbAttributeBlob, cbAttributeBlob);
+ flags |= aptcaFlags;
+ }
+ else
+#endif // defined(FEATURE_APTCA) || defined(FEATURE_CORESYSTEM)
+ if (strcmp(g_SecurityCriticalAttribute + sizeof(g_SecurityNS), szAttributeName) == 0)
+ {
+ flags |= TokenSecurityDescriptorFlags_Critical;
+
+#ifndef FEATURE_CORECLR
+ // Check the SecurityCriticalScope parameter
+ const BYTE *pbAttributeBlob;
+ ULONG cbAttributeBlob;
+
+ if (FAILED(pmdImport->GetCustomAttributeAsBlob(
+ currentAttribute,
+ reinterpret_cast<const void **>(&pbAttributeBlob),
+ &cbAttributeBlob)))
+ {
+ continue;
+ }
+ CustomAttributeParser cap(pbAttributeBlob, cbAttributeBlob);
+ if (SUCCEEDED(cap.SkipProlog()))
+ {
+ UINT32 dwCriticalFlags;
+ if (SUCCEEDED(cap.GetU4(&dwCriticalFlags)))
+ {
+ if (dwCriticalFlags == SecurityCriticalFlags_All)
+ {
+ flags |= TokenSecurityDescriptorFlags_AllCritical;
+ }
+ }
+ }
+#endif // !FEATURE_CORECLR
+ }
+ else if (strcmp(g_SecuritySafeCriticalAttribute + sizeof(g_SecurityNS), szAttributeName) == 0)
+ {
+ flags |= TokenSecurityDescriptorFlags_SafeCritical;
+ }
+ else if (strcmp(g_SecurityTransparentAttribute + sizeof(g_SecurityNS), szAttributeName) == 0)
+ {
+ flags |= TokenSecurityDescriptorFlags_Transparent;
+ }
+#ifndef FEATURE_CORECLR
+ else if (strcmp(g_SecurityRulesAttribute + sizeof(g_SecurityNS), szAttributeName) == 0)
+ {
+ const BYTE *pbAttributeBlob;
+ ULONG cbAttributeBlob;
+
+ if (FAILED(pmdImport->GetCustomAttributeAsBlob(
+ currentAttribute,
+ reinterpret_cast<const void **>(&pbAttributeBlob),
+ &cbAttributeBlob)))
+ {
+ continue;
+ }
+
+ TokenSecurityDescriptorFlags securityRulesFlags =
+ ParseSecurityRulesAttribute(pbAttributeBlob, cbAttributeBlob);
+
+ flags |= securityRulesFlags;
+ }
+ else if (strcmp(g_SecurityTreatAsSafeAttribute + sizeof(g_SecurityNS), szAttributeName) == 0)
+ {
+ flags |= TokenSecurityDescriptorFlags_TreatAsSafe;
+ }
+#endif // !FEATURE_CORECLR
+ }
+ }
+
+ return flags;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Calculate the semantic critical / transparent state for this metadata token.
+// See code:TokenSecurityDescriptor#TokenSecurityDescriptorSemanticLookup
+//
+
+void TokenSecurityDescriptor::VerifySemanticDataComputed()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_flags & TokenSecurityDescriptorFlags_IsSemanticComputed)
+ {
+ return;
+ }
+
+#ifndef FEATURE_CORECLR
+ TokenSecurityDescriptorTransparencyEtwEvents etw(this);
+#endif // !FEATURE_CORECLR
+
+ bool fIsSemanticallyCritical = false;
+ bool fIsSemanticallyTreatAsSafe = false;
+ bool fIsSemanticallyExternallyVisible = false;
+
+ // Check the module to see if every type in the module is the same
+ Assembly *pAssembly = m_pModule->GetAssembly();
+ ModuleSecurityDescriptor* pModuleSecDesc = ModuleSecurityDescriptor::GetModuleSecurityDescriptor(pAssembly);
+ if (pModuleSecDesc->IsAllTransparent())
+ {
+ // If the module is explicitly Transparent, then everything in it is Transparent
+ fIsSemanticallyCritical = false;
+ fIsSemanticallyTreatAsSafe = false;
+ }
+ else if (pModuleSecDesc->IsAllCritical())
+ {
+ // If the module is critical or safe critical, then everything in it matches
+ fIsSemanticallyCritical = true;
+
+ if (pModuleSecDesc->IsTreatAsSafe())
+ {
+ fIsSemanticallyTreatAsSafe = true;
+ }
+ }
+ else if (pModuleSecDesc->IsOpportunisticallyCritical())
+ {
+ // There are three cases for an opportunistically critical type:
+ // 1. Level 2 transparency - all types are critical
+ // 2. Level 1 transparency - all types are transparent
+ // 3. Types participating in type equivalence (regardless of level) - types are transparent
+ //
+ // Therefore, we consider the type critical only if it is level 2, otherwise keep it transparent.
+
+ const SecurityTransparencyBehavior *pTransparencyBehavior = pAssembly->GetSecurityTransparencyBehavior();
+ if (!pTransparencyBehavior->DoesOpportunisticRequireOnlySafeCriticalMethods() &&
+ !IsTypeEquivalent())
+ {
+ // If the module is opportunistically critical, then every type in it is critical
+ fIsSemanticallyCritical = true;
+ }
+ }
+ // Mixed transparency
+ else
+ {
+ const TypeSecurityDescriptorFlags criticalMask = TypeSecurityDescriptorFlags_IsAllCritical |
+ TypeSecurityDescriptorFlags_IsCritical;
+ const TypeSecurityDescriptorFlags treatAsSafeMask = TypeSecurityDescriptorFlags_IsTreatAsSafe;
+
+ const SecurityTransparencyBehavior *pTransparencyBehavior = pAssembly->GetSecurityTransparencyBehavior();
+ _ASSERTE(pTransparencyBehavior != NULL);
+
+ // We don't have full module-level state, so we need to loop over the tokens to figure it out.
+ IMDInternalImport* pMdImport = m_pModule->GetMDImport();
+ mdToken tkCurrent = m_token;
+ mdToken tkPrev = mdTokenNil;
+
+ // First, we need to walk the chain inside out, building up a stack so that we can pop the stack from
+ // the outside in, looking for the largest scope with a statement about the transparency of the types.
+ CStackArray<mdToken> typeTokenStack;
+ while (tkPrev != tkCurrent)
+ {
+ typeTokenStack.Push(tkCurrent);
+ tkPrev = tkCurrent;
+ IfFailThrow(pMdImport->GetParentToken(tkPrev, &tkCurrent));
+ }
+
+ //
+ // Walk up the chain of containing types, starting with the current metadata token. At each step on the
+ // chain, keep track of if we've been marked critical / treat as safe yet.
+ //
+ // It's important that we use only metadata tokens here, rather than using EEClass and
+ // TypeSecurityDescriptors, since this method can be called while loading nested types and using
+ // TypESecurityDescriptor can lead to recursion during type load.
+ //
+ // We also need to walk the chain from the outside in, since we listen to the outermost marking. We
+ // can stop looking at tokens once we found one that has a transparency marking (we've become either
+ // critical or safe critical), and we've determined that the inner types are not publicly visible.
+ //
+
+ // We'll start out by saying all tokens are not public if public doesn't imply treat as safe - that
+ // way we don't flip over to safe critical even if they are all public
+ bool fAllTokensPublic = pTransparencyBehavior->DoesPublicImplyTreatAsSafe();
+
+ while (typeTokenStack.Count() > 0 && !fIsSemanticallyCritical)
+ {
+ mdToken *ptkCurrentType = typeTokenStack.Pop();
+ TokenSecurityDescriptor currentTokenSD(m_pModule, *ptkCurrentType);
+
+ // Check to see if the current type is critical / treat as safe. We only want to check this if we
+ // haven't already found an outer type that had a transparency attribute; otherwise we would let
+ // an inner scope have more priority than its containing scope
+ TypeSecurityDescriptorFlags currentTypeFlags = pTransparencyBehavior->MapTypeAttributes(currentTokenSD.GetMetadataFlags());
+ if (!fIsSemanticallyCritical)
+ {
+ fIsSemanticallyCritical = !!(currentTypeFlags & criticalMask);
+ fIsSemanticallyTreatAsSafe |= !!(currentTypeFlags & treatAsSafeMask);
+ }
+
+ // If the assembly uses a transparency model where publicly visible items are treat as safe, then
+ // we need to check to see if all the types in the containment chain are visible
+ if (fAllTokensPublic)
+ {
+ DWORD dwTypeAttrs;
+ IfFailThrow(pMdImport->GetTypeDefProps(tkCurrent, &dwTypeAttrs, NULL));
+
+ fAllTokensPublic = IsTdPublic(dwTypeAttrs) ||
+ IsTdNestedPublic(dwTypeAttrs) ||
+ IsTdNestedFamily(dwTypeAttrs) ||
+ IsTdNestedFamORAssem(dwTypeAttrs);
+ }
+ }
+
+ // If public implies treat as safe, all the types were visible, and we are semantically critical
+ // then we're actually semantically safe critical
+ if (fAllTokensPublic)
+ {
+ _ASSERTE(pTransparencyBehavior->DoesPublicImplyTreatAsSafe());
+
+ fIsSemanticallyExternallyVisible = true;
+
+ if (fIsSemanticallyCritical)
+ {
+ fIsSemanticallyTreatAsSafe = true;
+ }
+ }
+ }
+
+ // Further, if we're critical due to the assembly, and public implies treat as safe,
+ // and the outermost nested type is public, then we are safe critical
+ if (pModuleSecDesc->IsAllCritical() ||
+ pModuleSecDesc->IsOpportunisticallyCritical())
+ {
+ // We shouldn't have determined if we're externally visible or not yet
+ _ASSERTE(!fIsSemanticallyExternallyVisible);
+
+ const SecurityTransparencyBehavior *pTransparencyBehavior = pAssembly->GetSecurityTransparencyBehavior();
+
+ if (pTransparencyBehavior->DoesPublicImplyTreatAsSafe() &&
+ fIsSemanticallyCritical &&
+ !fIsSemanticallyTreatAsSafe)
+ {
+ IMDInternalImport* pMdImport = m_pModule->GetMDImport();
+ mdToken tkCurrent = m_token;
+ mdToken tkPrev = mdTokenNil;
+ HRESULT hrIter = S_OK;
+
+ while (SUCCEEDED(hrIter) && tkCurrent != tkPrev)
+ {
+ tkPrev = tkCurrent;
+ hrIter = pMdImport->GetNestedClassProps(tkPrev, &tkCurrent);
+
+ if (!SUCCEEDED(hrIter))
+ {
+ if (hrIter == CLDB_E_RECORD_NOTFOUND)
+ {
+ // We don't have a parent class, so use the previous as our outermost
+ tkCurrent = tkPrev;
+ }
+ else
+ {
+ ThrowHR(hrIter);
+ }
+ }
+
+ DWORD dwOuterTypeAttrs;
+ IfFailThrow(pMdImport->GetTypeDefProps(tkCurrent, &dwOuterTypeAttrs, NULL));
+ if (IsTdPublic(dwOuterTypeAttrs))
+ {
+ fIsSemanticallyExternallyVisible = true;
+ fIsSemanticallyTreatAsSafe = true;
+ }
+ }
+ }
+ }
+
+ // Save away the semantic state that we just computed
+ TokenSecurityDescriptorFlags semanticFlags = TokenSecurityDescriptorFlags_IsSemanticComputed;
+ if (fIsSemanticallyCritical)
+ semanticFlags |= TokenSecurityDescriptorFlags_IsSemanticCritical;
+ if (fIsSemanticallyTreatAsSafe)
+ semanticFlags |= TokenSecurityDescriptorFlags_IsSemanticTreatAsSafe;
+ if (fIsSemanticallyExternallyVisible)
+ semanticFlags |= TokenSecurityDescriptorFlags_IsSemanticExternallyVisible;
+
+ FastInterlockOr(reinterpret_cast<DWORD *>(&m_flags), static_cast<DWORD>(semanticFlags));
+}
+
+HashDatum TokenSecurityDescriptor::LookupSecurityDescriptor(void* pKey)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HashDatum datum;
+ AppDomain* pDomain = GetAppDomain();
+
+ EEPtrHashTable &rCachedMethodPermissionsHash = pDomain->m_pSecContext->m_pCachedMethodPermissionsHash;
+
+ // We need to switch to cooperative GC here. But using GCX_COOP here
+ // causes 20% perf degrade in some declarative security assert scenario.
+ // We should fix this one.
+ CONTRACT_VIOLATION(ModeViolation);
+ // Fast attempt, that may fail (and return FALSE):
+ if (!rCachedMethodPermissionsHash.GetValueSpeculative(pKey, &datum))
+ {
+ // Slow call
+ datum = LookupSecurityDescriptor_Slow(pDomain, pKey, rCachedMethodPermissionsHash);
+ }
+ return datum;
+}
+
+HashDatum TokenSecurityDescriptor::LookupSecurityDescriptor_Slow(AppDomain* pDomain,
+ void* pKey,
+ EEPtrHashTable &rCachedMethodPermissionsHash )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HashDatum datum;
+ SimpleRWLock* prGlobalLock = pDomain->m_pSecContext->m_prCachedMethodPermissionsLock;
+ // look up the cache in the slow mode
+ // in the false failure case, we'll recheck the cache anyway
+ SimpleReadLockHolder readLockHolder(prGlobalLock);
+ if (rCachedMethodPermissionsHash.GetValue(pKey, &datum))
+ {
+ return datum;
+ }
+ return NULL;
+}
+
+HashDatum TokenSecurityDescriptor::InsertSecurityDescriptor(void* pKey, HashDatum pHashDatum)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ AppDomain* pDomain = GetAppDomain();
+ SimpleRWLock* prGlobalLock = pDomain->m_pSecContext->m_prCachedMethodPermissionsLock;
+ EEPtrHashTable &rCachedMethodPermissionsHash = pDomain->m_pSecContext->m_pCachedMethodPermissionsHash;
+
+ HashDatum pFoundHashDatum = NULL;
+ // insert the computed details in our hash table
+ {
+ SimpleWriteLockHolder writeLockHolder(prGlobalLock);
+ // since the hash table doesn't support duplicates by
+ // default, we need to recheck in case another thread
+ // added the value during a context switch
+ if (!rCachedMethodPermissionsHash.GetValue(pKey, &pFoundHashDatum))
+ {
+ // no entry was found
+ _ASSERTE(pFoundHashDatum == NULL);
+ // Place the new entry into the hash.
+ rCachedMethodPermissionsHash.InsertValue(pKey, pHashDatum);
+ }
+ }
+ // return the value found in the lookup, in case there was a duplicate
+ return pFoundHashDatum;
+}
diff --git a/src/vm/securitymeta.h b/src/vm/securitymeta.h
new file mode 100644
index 0000000000..e64b97b0db
--- /dev/null
+++ b/src/vm/securitymeta.h
@@ -0,0 +1,674 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//--------------------------------------------------------------------------
+// securitymeta.h
+//
+// pre-computes various security information, declarative and runtime meta-info
+//
+
+
+//
+//--------------------------------------------------------------------------
+
+
+#ifndef __SECURITYMETA_H__
+#define __SECURITYMETA_H__
+
+class SecurityStackWalk;
+class AssertStackWalk;
+class PsetCacheEntry;
+class SecurityTransparencyBehavior;
+struct DeclActionInfo;
+
+#define INVALID_SET_INDEX ((DWORD)~0)
+
+// The enum that describes the value of the SecurityCriticalFlags in SecurityCritical attribute.
+enum SecurityCriticalFlags
+{
+ SecurityCriticalFlags_None = 0,
+ SecurityCriticalFlags_All = 0x1
+};
+
+// Security rule sets that can be used - this enum should match the BCL SecurityRuleSet enum
+enum SecurityRuleSet
+{
+ SecurityRuleSet_Level1 = 1, // v2.0 rules
+ SecurityRuleSet_Level2 = 2, // v4.0 rules
+
+ SecurityRuleSet_Min = SecurityRuleSet_Level1, // Smallest rule set we understand
+ SecurityRuleSet_Max = SecurityRuleSet_Level2, // Largest rule set we understand
+ SecurityRuleSet_Default = SecurityRuleSet_Level2 // Rule set to use if unspecified
+};
+
+// Partial trust visibility level for APTCA assemblies - this enum should match the BCL
+// PartialTrustVisibilityLevel enum
+enum PartialTrustVisibilityLevel
+{
+ PartialTrustVisibilityLevel_VisibleToAllHosts = 0,
+ PartialTrustVisibilityLevel_NotVisibleByDefault = 1
+};
+
+SELECTANY const DWORD DCL_FLAG_MAP[] =
+{
+ 0, // dclActionNil = 0
+ DECLSEC_REQUESTS, // dclRequest = 1
+ DECLSEC_DEMANDS, // dclDemand = 2
+ DECLSEC_ASSERTIONS, // dclAssert = 3
+ DECLSEC_DENIALS, // dclDeny = 4
+ DECLSEC_PERMITONLY, // dclPermitOnly = 5
+ DECLSEC_LINK_CHECKS, // dclLinktimeCheck = 6
+ DECLSEC_INHERIT_CHECKS, // dclInheritanceCheck = 7
+ DECLSEC_REQUESTS, // dclRequestMinimum = 8
+ DECLSEC_REQUESTS, // dclRequestOptional = 9
+ DECLSEC_REQUESTS, // dclRequestRefuse = 10
+ 0, // dclPrejitGrant = 11
+ 0, // dclPrejitDenied = 12
+ DECLSEC_NONCAS_DEMANDS, // dclNonCasDemand = 13
+ DECLSEC_NONCAS_LINK_DEMANDS, // dclNonCasLinkDemand = 14
+ DECLSEC_NONCAS_INHERITANCE, // dclNonCasInheritance = 15
+};
+#define DCL_FLAG_MAP_SIZE (sizeof(DCL_FLAG_MAP)/sizeof(DWORD))
+#define DclToFlag(dcl) (((size_t)dcl < DCL_FLAG_MAP_SIZE) ? DCL_FLAG_MAP[dcl] : 0)
+
+
+struct TokenDeclActionInfo
+{
+ DWORD dwDeclAction; // This'll tell InvokeDeclarativeSecurity whats the action needed
+ PsetCacheEntry *pPCE; // The cached permissionset on which to demand/assert/deny/etc
+ TokenDeclActionInfo* pNext; // pointer to next action link in chain
+
+ static TokenDeclActionInfo *Init(DWORD dwAction, PsetCacheEntry *pPCE);
+ static void LinkNewDeclAction(TokenDeclActionInfo** ppActionList, CorDeclSecurity action, PsetCacheEntry *pPCE);
+
+
+ HRESULT GetDeclaredPermissionsWithCache(IN CorDeclSecurity action,
+ OUT OBJECTREF *pDeclaredPermissions,
+ OUT PsetCacheEntry **pPCE);
+
+ OBJECTREF GetLinktimePermissions(OBJECTREF *prefNonCasDemands);
+ void InvokeLinktimeChecks(Assembly* pCaller);
+};
+
+// Flags about the raw security attributes found on a metadata token, as well as semantic interpretations of
+// them in some cases (see code:TokenSecurityDescriptor#TokenSecurityDescriptorSemanticLookup). These flags
+// are split into several sections:
+//
+// 32 28 16 12 4 0
+// | Rules version | Rules Bits | Semantic data | Raw attributes | Metabits |
+//
+// Rules version - the SecurityRuleSet selected by a SecurityRules attribute
+// Rules bits - extra flags set on a SecurityRules attribute
+// Semantic data - Flags indicating the security state of the item represented by the token taking into
+// account parent types and modules - giving the true semantic security state
+// (see code:TokenSecurityDescriptor#TokenSecurityDescriptorSemanticLookup)
+// Raw attributes - Flags for data we read directly out of metadata; these only indicate that the attributes
+// are set, and do not indicate the actual security state of the token until they have been
+// interpreted by the assembly they are applied within.
+// Metabits - Flags about the state of the token security descriptor itself
+enum TokenSecurityDescriptorFlags
+{
+ // Metabits
+ TokenSecurityDescriptorFlags_None = 0x00000000,
+ TokenSecurityDescriptorFlags_IsComputed = 0x00000001,
+
+ // Raw attributes
+ TokenSecurityDescriptorFlags_RawAttributeMask = 0x00000FF0,
+ TokenSecurityDescriptorFlags_AllCritical = 0x00000010, // [SecurityCritical(SecurityCriticalScope.All)]
+ TokenSecurityDescriptorFlags_APTCA = 0x00000020, // [AllowPartiallyTrustedCallers] (VisibleByDefault)
+ TokenSecurityDescriptorFlags_ConditionalAPTCA = 0x00000040, // [AllowPartiallyTrustedCallers] (NotVisibleByDefault)
+ TokenSecurityDescriptorFlags_Critical = 0x00000080, // [SecurityCritical] (regardless of scope)
+ TokenSecurityDescriptorFlags_SecurityRules = 0x00000100, // [SecurityRules]
+ TokenSecurityDescriptorFlags_SafeCritical = 0x00000200, // [SecuritySafeCritical]
+ TokenSecurityDescriptorFlags_Transparent = 0x00000400, // [SecurityTransparent]
+ TokenSecurityDescriptorFlags_TreatAsSafe = 0x00000800, // [SecurityTreatAsSafe]
+
+ // Semantic data
+ TokenSecurityDescriptorFlags_SemanticMask = 0x000FF000,
+ TokenSecurityDescriptorFlags_IsSemanticComputed = 0x00001000,
+ TokenSecurityDescriptorFlags_IsSemanticCritical = 0x00002000,
+ TokenSecurityDescriptorFlags_IsSemanticTreatAsSafe = 0x00004000,
+ TokenSecurityDescriptorFlags_IsSemanticExternallyVisible= 0x00008000,
+
+ // Rules bits
+ TokenSecurityDescriptorFlags_RulesMask = 0x0FFF0000,
+ TokenSecurityDescriptorFlags_SkipFullTrustVerification = 0x00010000, // In full trust do not do IL verificaiton for transparent code
+
+ // Rules version
+ TokenSecurityDescriptorFlags_RulesVersionMask = 0xF0000000
+};
+
+inline TokenSecurityDescriptorFlags operator|(TokenSecurityDescriptorFlags lhs,
+ TokenSecurityDescriptorFlags rhs);
+
+inline TokenSecurityDescriptorFlags operator|=(TokenSecurityDescriptorFlags& lhs,
+ TokenSecurityDescriptorFlags rhs);
+
+inline TokenSecurityDescriptorFlags operator&(TokenSecurityDescriptorFlags lhs,
+ TokenSecurityDescriptorFlags rhs);
+
+inline TokenSecurityDescriptorFlags operator&=(TokenSecurityDescriptorFlags& lhs,
+ TokenSecurityDescriptorFlags rhs);
+
+inline TokenSecurityDescriptorFlags operator~(TokenSecurityDescriptorFlags flags);
+
+// Get the version of the security rules that token security descriptor flags are requesting
+inline SecurityRuleSet GetSecurityRuleSet(TokenSecurityDescriptorFlags flags);
+
+// Encode a security rule set into token flags - this reverses GetSecurityRuleSet
+inline TokenSecurityDescriptorFlags EncodeSecurityRuleSet(SecurityRuleSet ruleSet);
+
+#ifdef FEATURE_APTCA
+TokenSecurityDescriptorFlags ParseAptcaAttribute(const BYTE *pbAptcaBlob,
+ DWORD cbAptcaBlob);
+#endif // FEATURE_APTCA
+
+TokenSecurityDescriptorFlags ParseSecurityRulesAttribute(const BYTE *pbSecurityRulesBlob,
+ DWORD cbSecurityRulesBlob);
+
+//
+// #TokenSecurityDescriptorSemanticLookup
+//
+// Token security descriptors are used to get information on the security state of a specific metadata
+// token. They have two types of lookup - standard and semantic. Standard lookup is cheaper and only looks at
+// the specific metadata token. Semantic lookup will follow the token to its parents, figuring out if the
+// token is semanticaly critical or transparent due to a containing item. For instance:
+//
+// [SecurityCritical]
+// class A
+// {
+// class B { }
+// }
+//
+// A TokenSecurityDescriptor's standard lookup for B will say that it is transparent because B does not
+// directly have a critical attribute. However, a semantic lookup will notice that A is critical and
+// contains B, therefore B is also critical.
+//
+
+class TokenSecurityDescriptor
+{
+private:
+ PTR_Module m_pModule;
+ mdToken m_token;
+ TokenSecurityDescriptorFlags m_flags;
+
+public:
+ inline TokenSecurityDescriptor(PTR_Module pModule, mdToken token);
+
+ void VerifyDataComputed();
+ void VerifySemanticDataComputed();
+
+ // Get the raw flags for the token
+ inline TokenSecurityDescriptorFlags GetFlags();
+
+ //
+ // Critical / transparent checks for the specific metadata token only - these methods do not take into
+ // account the containment of the token and therefore only include information about the token itself
+ // and cannot be used to determine if the item represented by the token is semantically critical.
+ //
+ // See code:TokenSecurityDescriptor#TokenSecurityDescriptorSemanticLookup
+ //
+
+ // Get the attributes that were set on the token
+ inline TokenSecurityDescriptorFlags GetMetadataFlags();
+
+ //
+ // Semantic critical / transparent checks for the metadata token - these methods take into account
+ // containers of the token to get a true semantic security status for the token.
+ //
+ // See code:TokenSecurityDescriptor#TokenSecurityDescriptorSemanticLookup
+ //
+
+ inline BOOL IsSemanticCritical();
+
+ inline BOOL IsSemanticTreatAsSafe();
+
+ inline BOOL IsSemanticExternallyVisible();
+
+ // static helper to find cached security descriptors based on token
+ static HashDatum LookupSecurityDescriptor(void* pKey);
+
+ static HashDatum LookupSecurityDescriptor_Slow(AppDomain* pDomain,
+ void* pKey,
+ EEPtrHashTable &rCachedMethodPermissionsHash );
+
+ // static helper to insert a security descriptor for a token, dupes not allowed, returns previous entry in hash table
+ static HashDatum InsertSecurityDescriptor(void* pKey, HashDatum pHashDatum);
+
+ // static helper to parse the security attributes for a token from a given metadata importer
+ static TokenSecurityDescriptorFlags ReadSecurityAttributes(IMDInternalImport *pmdImport, mdToken token);
+
+private:
+ // does the type represented by this TokenSecurityDescriptor particpate in type equivalence
+ inline BOOL IsTypeEquivalent();
+
+private:
+ // Helper class which fires transparency calculation begin/end ETW events
+ class TokenSecurityDescriptorTransparencyEtwEvents
+ {
+ private:
+ const TokenSecurityDescriptor *m_pTSD;
+
+ public:
+ inline TokenSecurityDescriptorTransparencyEtwEvents(const TokenSecurityDescriptor *pTSD);
+ inline ~TokenSecurityDescriptorTransparencyEtwEvents();
+ };
+};
+
+enum MethodSecurityDescriptorFlags
+{
+ MethodSecurityDescriptorFlags_None = 0x0000,
+ MethodSecurityDescriptorFlags_IsComputed = 0x0001,
+
+ // Method transparency info is cached directly on MethodDesc for performance reasons
+ // These flags are used only during calculation of transparency information; runtime data
+ // should be read from the method desc
+ MethodSecurityDescriptorFlags_IsCritical = 0x0002,
+ MethodSecurityDescriptorFlags_IsTreatAsSafe = 0x0004,
+
+ MethodSecurityDescriptorFlags_IsBuiltInCASPermsOnly = 0x0008,
+ MethodSecurityDescriptorFlags_IsDemandsOnly = 0x0010,
+ MethodSecurityDescriptorFlags_AssertAllowed = 0x0020,
+ MethodSecurityDescriptorFlags_CanCache = 0x0040,
+};
+
+inline MethodSecurityDescriptorFlags operator|(MethodSecurityDescriptorFlags lhs,
+ MethodSecurityDescriptorFlags rhs);
+
+inline MethodSecurityDescriptorFlags operator|=(MethodSecurityDescriptorFlags& lhs,
+ MethodSecurityDescriptorFlags rhs);
+
+inline MethodSecurityDescriptorFlags operator&(MethodSecurityDescriptorFlags lhs,
+ MethodSecurityDescriptorFlags rhs);
+
+inline MethodSecurityDescriptorFlags operator&=(MethodSecurityDescriptorFlags& lhs,
+ MethodSecurityDescriptorFlags rhs);
+
+class MethodSecurityDescriptor
+{
+private:
+ MethodDesc *m_pMD;
+ DeclActionInfo *m_pRuntimeDeclActionInfo; // run-time declarative actions list
+ TokenDeclActionInfo *m_pTokenDeclActionInfo; // link-time declarative actions list
+ MethodSecurityDescriptorFlags m_flags;
+ DWORD m_declFlagsDuringPreStub; // declarative run-time security flags,
+
+public:
+ explicit inline MethodSecurityDescriptor(MethodDesc* pMD, BOOL fCanCache = TRUE);
+
+ inline BOOL CanAssert();
+ inline void SetCanAssert();
+
+ inline BOOL CanCache();
+ inline void SetCanCache();
+
+ inline BOOL HasRuntimeDeclarativeSecurity();
+ inline BOOL HasLinkOrInheritanceDeclarativeSecurity();
+ inline BOOL HasLinktimeDeclarativeSecurity();
+ inline BOOL HasInheritanceDeclarativeSecurity();
+
+ inline mdToken GetToken();
+ inline MethodDesc *GetMethod();
+ inline IMDInternalImport *GetIMDInternalImport();
+
+ inline BOOL ContainsBuiltInCASDemandsOnly();
+ inline DeclActionInfo* GetRuntimeDeclActionInfo();
+ inline DWORD GetDeclFlagsDuringPreStub();
+ inline TokenDeclActionInfo* GetTokenDeclActionInfo();
+
+ inline BOOL IsCritical();
+ inline BOOL IsTreatAsSafe();
+
+ inline BOOL IsOpportunisticallyCritical();
+
+ inline HRESULT GetDeclaredPermissionsWithCache(IN CorDeclSecurity action,
+ OUT OBJECTREF *pDeclaredPermissions,
+ OUT PsetCacheEntry **pPCE);
+
+ static HRESULT GetDeclaredPermissionsWithCache(MethodDesc* pMD,
+ IN CorDeclSecurity action,
+ OUT OBJECTREF *pDeclaredPermissions,
+ OUT PsetCacheEntry **pPCE);
+
+ static OBJECTREF GetLinktimePermissions(MethodDesc* pMD, OBJECTREF *prefNonCasDemands);
+
+ inline void InvokeLinktimeChecks(Assembly* pCaller);
+ static inline void InvokeLinktimeChecks(MethodDesc* pMD, Assembly* pCaller);
+
+ void InvokeInheritanceChecks(MethodDesc *pMethod);
+
+ // This method will look for the cached copy of the MethodSecurityDescriptor corresponding to ret_methSecDesc->_pMD
+ // If the cache lookup succeeds, we get back the cached copy in ret_methSecDesc
+ // If the cache lookup fails, then the data is computed in ret_methSecDesc. If we find that this is a cache-able MSD,
+ // a copy is made in AppDomain heap and inserted into the hash table for future lookups.
+ static void LookupOrCreateMethodSecurityDescriptor(MethodSecurityDescriptor* ret_methSecDesc);
+ static BOOL IsDeclSecurityCASDemandsOnly(DWORD dwMethDeclFlags,
+ mdToken _mdToken,
+ IMDInternalImport *pInternalImport);
+
+private:
+ void ComputeRuntimeDeclarativeSecurityInfo();
+ void ComputeMethodDeclarativeSecurityInfo();
+
+ inline void VerifyDataComputed();
+ void VerifyDataComputedInternal();
+
+ // Force the type to figure out if it is transparent or critial.
+ // NOTE: Generally this is not needed, as the data is cached on the MethodDesc for you. This method should
+ // only be called if the MethodDesc is returning FALSE from HasCriticalTransparentInfo
+ void ComputeCriticalTransparentInfo();
+
+ static BOOL CanMethodSecurityDescriptorBeCached(MethodDesc* pMD);
+
+private:
+ // Helper class which fires transparency calculation begin/end ETW events
+ class MethodSecurityDescriptorTransparencyEtwEvents
+ {
+ private:
+ const MethodSecurityDescriptor *m_pMSD;
+
+ public:
+ inline MethodSecurityDescriptorTransparencyEtwEvents(const MethodSecurityDescriptor *pMSD);
+ inline ~MethodSecurityDescriptorTransparencyEtwEvents();
+ };
+
+ // Helper class to iterater over methods that the MethodSecurityDescriptor's MethodDesc may be
+ // implementing. This type iterates over interface implementations followed by MethodImpls for virtuals
+ // that the input MethodDesc implements.
+ class MethodImplementationIterator
+ {
+ private:
+ DispatchMap::Iterator m_interfaceIterator;
+ MethodDesc *m_pMD;
+ DWORD m_iMethodImplIndex;
+ bool m_fInterfaceIterationBegun;
+ bool m_fMethodImplIterationBegun;
+
+ public:
+ MethodImplementationIterator(MethodDesc *pMD);
+
+ MethodDesc *Current();
+ bool IsValid();
+ void Next();
+ };
+};
+
+enum FieldSecurityDescriptorFlags
+{
+ FieldSecurityDescriptorFlags_None = 0x0000,
+ FieldSecurityDescriptorFlags_IsComputed = 0x0001,
+ FieldSecurityDescriptorFlags_IsCritical = 0x0002,
+ FieldSecurityDescriptorFlags_IsTreatAsSafe = 0x0004,
+};
+
+inline FieldSecurityDescriptorFlags operator|(FieldSecurityDescriptorFlags lhs,
+ FieldSecurityDescriptorFlags rhs);
+
+inline FieldSecurityDescriptorFlags operator|=(FieldSecurityDescriptorFlags& lhs,
+ FieldSecurityDescriptorFlags rhs);
+
+inline FieldSecurityDescriptorFlags operator&(FieldSecurityDescriptorFlags lhs,
+ FieldSecurityDescriptorFlags rhs);
+
+inline FieldSecurityDescriptorFlags operator&=(FieldSecurityDescriptorFlags& lhs,
+ FieldSecurityDescriptorFlags rhs);
+
+class FieldSecurityDescriptor
+{
+private:
+ FieldDesc *m_pFD;
+ FieldSecurityDescriptorFlags m_flags;
+
+public:
+ explicit inline FieldSecurityDescriptor(FieldDesc* pFD);
+
+ void VerifyDataComputed();
+
+ inline BOOL IsCritical();
+ inline BOOL IsTreatAsSafe();
+
+private:
+ // Helper class which fires transparency calculation begin/end ETW events
+ class FieldSecurityDescriptorTransparencyEtwEvents
+ {
+ private:
+ const FieldSecurityDescriptor *m_pFSD;
+
+ public:
+ inline FieldSecurityDescriptorTransparencyEtwEvents(const FieldSecurityDescriptor *pFSD);
+ inline ~FieldSecurityDescriptorTransparencyEtwEvents();
+ };
+};
+
+enum TypeSecurityDescriptorFlags
+{
+ TypeSecurityDescriptorFlags_None = 0x0000,
+
+ // Type transparency info is cached directly on EEClass for performance reasons; these bits are used only
+ // as intermediate state while calculating the final set of bits to cache on the EEClass
+ TypeSecurityDescriptorFlags_IsAllCritical = 0x0001, // Everything introduced by this type is critical
+ TypeSecurityDescriptorFlags_IsAllTransparent = 0x0002, // All code in the type is transparent
+ TypeSecurityDescriptorFlags_IsCritical = 0x0004, // The type is critical, but its introduced methods may not be
+ TypeSecurityDescriptorFlags_IsTreatAsSafe = 0x0008, // Combined with IsAllCritical or IsCritical makes the type SafeCritical
+};
+
+inline TypeSecurityDescriptorFlags operator|(TypeSecurityDescriptorFlags lhs,
+ TypeSecurityDescriptorFlags rhs);
+
+inline TypeSecurityDescriptorFlags operator|=(TypeSecurityDescriptorFlags& lhs,
+ TypeSecurityDescriptorFlags rhs);
+
+inline TypeSecurityDescriptorFlags operator&(TypeSecurityDescriptorFlags lhs,
+ TypeSecurityDescriptorFlags rhs);
+
+inline TypeSecurityDescriptorFlags operator&=(TypeSecurityDescriptorFlags& lhs,
+ TypeSecurityDescriptorFlags rhs);
+
+class TypeSecurityDescriptor
+{
+private:
+ MethodTable *m_pMT;
+ TokenDeclActionInfo *m_pTokenDeclActionInfo;
+ BOOL m_fIsComputed;
+
+public:
+ explicit inline TypeSecurityDescriptor(MethodTable *pMT);
+
+ inline BOOL HasLinkOrInheritanceDeclarativeSecurity();
+ inline BOOL HasLinktimeDeclarativeSecurity();
+ inline BOOL HasInheritanceDeclarativeSecurity();
+
+ // Is everything introduced by the type critical
+ inline BOOL IsAllCritical();
+
+ // Does the type contain only transparent code
+ inline BOOL IsAllTransparent();
+
+ // Combined with IsCritical/IsAllCritical is the type safe critical
+ inline BOOL IsTreatAsSafe();
+
+ // Is the type critical, but not necessarially its conatined methods
+ inline BOOL IsCritical();
+
+ // Is the type in an assembly that doesn't care about transparency, and therefore wants the CLR to make
+ // sure that all annotations are correct for it.
+ inline BOOL IsOpportunisticallyCritical();
+
+ // Should this type be considered externally visible when calculating the transpraency of the type
+ // and its members. (For instance, when seeing if public implies treat as safe)
+ BOOL IsTypeExternallyVisibleForTransparency();
+
+ inline mdToken GetToken();
+ inline IMDInternalImport *GetIMDInternalImport();
+
+ inline TokenDeclActionInfo* GetTokenDeclActionInfo();
+
+ inline HRESULT GetDeclaredPermissionsWithCache(IN CorDeclSecurity action,
+ OUT OBJECTREF *pDeclaredPermissions,
+ OUT PsetCacheEntry **pPCE);
+
+ static HRESULT GetDeclaredPermissionsWithCache(MethodTable* pTargetMT,
+ IN CorDeclSecurity action,
+ OUT OBJECTREF *pDeclaredPermissions,
+ OUT PsetCacheEntry **pPCE);
+
+ static OBJECTREF GetLinktimePermissions(MethodTable* pMT, OBJECTREF *prefNonCasDemands);
+
+ // Is the type represented by this TypeSecurityDescripter participating in type equivalence
+ inline BOOL IsTypeEquivalent();
+
+ void InvokeInheritanceChecks(MethodTable* pMT);
+ inline void InvokeLinktimeChecks(Assembly* pCaller);
+ static inline void InvokeLinktimeChecks(MethodTable* pMT, Assembly* pCaller);
+
+private:
+ inline TypeSecurityDescriptor& operator=(const TypeSecurityDescriptor &tsd);
+ void ComputeTypeDeclarativeSecurityInfo();
+ static TypeSecurityDescriptor* GetTypeSecurityDescriptor(MethodTable* pMT);
+ void VerifyDataComputedInternal();
+ inline void VerifyDataComputed();
+ // Force the type to figure out if it is transparent or critial.
+ // NOTE: Generally this is not needed, as the data is cached on the EEClass for you. This method should
+ // only be called if the EEClass is returning FALSE from HasCriticalTransparentInfo
+ void ComputeCriticalTransparentInfo();
+ static BOOL CanTypeSecurityDescriptorBeCached(MethodTable* pMT);
+
+private:
+ // Helper class which fires transparency calculation begin/end ETW events
+ class TypeSecurityDescriptorTransparencyEtwEvents
+ {
+ private:
+ const TypeSecurityDescriptor *m_pTSD;
+
+ public:
+ inline TypeSecurityDescriptorTransparencyEtwEvents(const TypeSecurityDescriptor *pTSD);
+ inline ~TypeSecurityDescriptorTransparencyEtwEvents();
+ };
+};
+
+
+enum ModuleSecurityDescriptorFlags
+{
+ ModuleSecurityDescriptorFlags_None = 0x0000,
+ ModuleSecurityDescriptorFlags_IsComputed = 0x0001,
+
+ ModuleSecurityDescriptorFlags_IsAPTCA = 0x0002, // The assembly allows partially trusted callers
+ ModuleSecurityDescriptorFlags_IsAllCritical = 0x0004, // Every type and method introduced by the assembly is critical
+ ModuleSecurityDescriptorFlags_IsAllTransparent = 0x0008, // Every type and method in the assembly is transparent
+ ModuleSecurityDescriptorFlags_IsTreatAsSafe = 0x0010, // Combined with IsAllCritical - every type and method introduced by the assembly is safe critical
+ ModuleSecurityDescriptorFlags_IsOpportunisticallyCritical = 0x0020, // Ensure that the assembly follows all transparency rules by making all methods critical or safe critical as needed
+ ModuleSecurityDescriptorFlags_SkipFullTrustVerification = 0x0040, // Fully trusted transparent code does not require verification
+ ModuleSecurityDescriptorFlags_TransparentDueToPartialTrust = 0x0080, // Whether we made the assembly all transparent because it was partially-trusted
+ ModuleSecurityDescriptorFlags_IsMicrosoftPlatform = 0x0100, // Whether we made the assembly microsoft platform. Stored in ngen image to determine if the ngen
+ // was generated as microsoft platform assembly (full trust) or not.
+};
+
+inline ModuleSecurityDescriptorFlags operator|(ModuleSecurityDescriptorFlags lhs,
+ ModuleSecurityDescriptorFlags rhs);
+
+inline ModuleSecurityDescriptorFlags operator|=(ModuleSecurityDescriptorFlags& lhs,
+ ModuleSecurityDescriptorFlags rhs);
+
+inline ModuleSecurityDescriptorFlags operator&(ModuleSecurityDescriptorFlags lhs,
+ ModuleSecurityDescriptorFlags rhs);
+
+inline ModuleSecurityDescriptorFlags operator&=(ModuleSecurityDescriptorFlags& lhs,
+ ModuleSecurityDescriptorFlags rhs);
+
+inline ModuleSecurityDescriptorFlags operator~(ModuleSecurityDescriptorFlags flags);
+
+#ifdef FEATURE_APTCA
+BOOL CheckAssemblyHasBeenKillBitted(LPASSEMBLYNAME pAssemblyName, ULARGE_INTEGER uliFileVersion);
+#endif
+
+// Module security descriptor, this class contains static security information about the module
+// this information will get persisted in the NGen image
+class ModuleSecurityDescriptor
+{
+ friend class Module;
+
+private:
+ PTR_Module m_pModule;
+ ModuleSecurityDescriptorFlags m_flags;
+ TokenSecurityDescriptorFlags m_tokenFlags;
+
+private:
+ explicit inline ModuleSecurityDescriptor(PTR_Module pModule);
+
+public:
+ static inline BOOL IsMarkedTransparent(Assembly* pAssembly);
+
+ static ModuleSecurityDescriptor* GetModuleSecurityDescriptor(Assembly* pAssembly);
+
+ void Save(DataImage *image);
+ void Fixup(DataImage *image);
+
+ void VerifyDataComputed();
+
+ inline void OverrideTokenFlags(TokenSecurityDescriptorFlags tokenFlags);
+ inline TokenSecurityDescriptorFlags GetTokenFlags();
+
+ inline Module *GetModule();
+
+#ifdef DACCESS_COMPILE
+ // Get the value of the module security descriptor flags without forcing them to be computed
+ inline ModuleSecurityDescriptorFlags GetRawFlags();
+#endif // DACCESS_COMPILE
+
+ // Is Microsoft Platform
+ inline BOOL IsMicrosoftPlatform();
+
+ // Is every method and type in the assembly transparent
+ inline BOOL IsAllTransparent();
+
+ // Is every method and type introduced by the assembly critical
+ inline BOOL IsAllCritical();
+
+ // Combined with IsAllCritical - is every method and type introduced by the assembly safe critical
+ inline BOOL IsTreatAsSafe();
+
+ // Does the assembly not care about transparency, and wants the CLR to take care of making sure everything
+ // is annotated properly in the assembly.
+ inline BOOL IsOpportunisticallyCritical();
+
+ // Does the assembly contain a mix of critical and transparent code
+ inline BOOL IsMixedTransparency();
+
+ // Partial trust assemblies are forced all-transparent under some conditions. This
+ // tells us whether that is true for this particular assembly.
+ inline BOOL IsAllTransparentDueToPartialTrust();
+
+ // Get the rule set the assembly uses
+ inline SecurityRuleSet GetSecurityRuleSet();
+
+#ifndef FEATURE_CORECLR
+ // Can fully trusted transparent code bypass verification
+ inline BOOL CanTransparentCodeSkipVerification();
+#endif // !FEATURE_CORECLR
+
+#if defined(FEATURE_APTCA) || defined(FEATURE_CORESYSTEM)
+ // Does the assembly allow partially trusted callers
+ inline BOOL IsAPTCA();
+#endif // defined(FEATURE_APTCA) || defined(FEATURE_CORESYSTEM)
+
+#ifndef FEATURE_CORECLR
+ BOOL AssemblyVersionRequiresLegacyTransparency();
+#endif // !FEATURE_CORECLR
+
+private:
+ // Helper class which fires transparency calculation begin/end ETW events
+ class ModuleSecurityDescriptorTransparencyEtwEvents
+ {
+ private:
+ ModuleSecurityDescriptor *m_pMSD;
+
+ public:
+ inline ModuleSecurityDescriptorTransparencyEtwEvents(ModuleSecurityDescriptor *pMSD);
+ inline ~ModuleSecurityDescriptorTransparencyEtwEvents();
+ };
+};
+
+#include "securitymeta.inl"
+
+#endif // __SECURITYMETA_H__
diff --git a/src/vm/securitymeta.inl b/src/vm/securitymeta.inl
new file mode 100644
index 0000000000..aaff5096f8
--- /dev/null
+++ b/src/vm/securitymeta.inl
@@ -0,0 +1,1280 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//--------------------------------------------------------------------------
+// securitymeta.inl
+//
+// pre-computes various security information, declarative and runtime meta-info
+//
+
+
+//
+//--------------------------------------------------------------------------
+
+
+#include "typestring.h"
+
+#include "securitypolicy.h"
+#include "securitydeclarative.h"
+
+#ifndef __SECURITYMETA_INL__
+#define __SECURITYMETA_INL__
+
+inline TokenSecurityDescriptorFlags operator|(TokenSecurityDescriptorFlags lhs,
+ TokenSecurityDescriptorFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ return static_cast<TokenSecurityDescriptorFlags>(static_cast<DWORD>(lhs) |
+ static_cast<DWORD>(rhs));
+}
+
+inline TokenSecurityDescriptorFlags operator|=(TokenSecurityDescriptorFlags& lhs,
+ TokenSecurityDescriptorFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ lhs = static_cast<TokenSecurityDescriptorFlags>(static_cast<DWORD>(lhs) |
+ static_cast<DWORD>(rhs));
+ return lhs;
+}
+
+inline TokenSecurityDescriptorFlags operator&(TokenSecurityDescriptorFlags lhs,
+ TokenSecurityDescriptorFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ return static_cast<TokenSecurityDescriptorFlags>(static_cast<DWORD>(lhs) &
+ static_cast<DWORD>(rhs));
+}
+
+inline TokenSecurityDescriptorFlags operator&=(TokenSecurityDescriptorFlags& lhs,
+ TokenSecurityDescriptorFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ lhs = static_cast<TokenSecurityDescriptorFlags>(static_cast<DWORD>(lhs) &
+ static_cast<DWORD>(rhs));
+ return lhs;
+}
+
+inline TokenSecurityDescriptorFlags operator~(TokenSecurityDescriptorFlags flags)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Invert all the bits which aren't part of the rules version number
+ DWORD flagBits = flags & ~static_cast<DWORD>(TokenSecurityDescriptorFlags_RulesVersionMask);
+ return static_cast<TokenSecurityDescriptorFlags>(
+ (EncodeSecurityRuleSet(GetSecurityRuleSet(flags)) << 24 ) |
+ (~flagBits));
+}
+
+// Get the version of the security rules that token security descriptor flags are requesting
+inline SecurityRuleSet GetSecurityRuleSet(TokenSecurityDescriptorFlags flags)
+{
+ LIMITED_METHOD_CONTRACT;
+ return static_cast<SecurityRuleSet>((flags & TokenSecurityDescriptorFlags_RulesMask) >> 24);
+}
+
+// Encode a security rule set into token flags - this reverses GetSecurityRuleSet
+inline TokenSecurityDescriptorFlags EncodeSecurityRuleSet(SecurityRuleSet ruleSet)
+{
+ LIMITED_METHOD_CONTRACT;
+ return static_cast<TokenSecurityDescriptorFlags>(static_cast<DWORD>(ruleSet) << 24);
+}
+
+inline TokenSecurityDescriptor::TokenSecurityDescriptor(PTR_Module pModule, mdToken token)
+ : m_pModule(pModule),
+ m_token(token),
+ m_flags(TokenSecurityDescriptorFlags_None)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(pModule);
+}
+
+inline TokenSecurityDescriptorFlags TokenSecurityDescriptor::GetFlags()
+{
+ WRAPPER_NO_CONTRACT;
+ VerifyDataComputed();
+ return m_flags;
+}
+
+// Get the attributes that were set on the token
+inline TokenSecurityDescriptorFlags TokenSecurityDescriptor::GetMetadataFlags()
+{
+ WRAPPER_NO_CONTRACT;
+ VerifyDataComputed();
+ return m_flags & TokenSecurityDescriptorFlags_RawAttributeMask;
+}
+
+inline BOOL TokenSecurityDescriptor::IsSemanticCritical()
+{
+ WRAPPER_NO_CONTRACT;
+ VerifySemanticDataComputed();
+ return !!(m_flags & TokenSecurityDescriptorFlags_IsSemanticCritical);
+}
+
+inline BOOL TokenSecurityDescriptor::IsSemanticTreatAsSafe()
+{
+ WRAPPER_NO_CONTRACT;
+ VerifySemanticDataComputed();
+ return !!(m_flags & TokenSecurityDescriptorFlags_IsSemanticTreatAsSafe);
+}
+
+inline BOOL TokenSecurityDescriptor::IsSemanticExternallyVisible()
+{
+ WRAPPER_NO_CONTRACT;
+ VerifySemanticDataComputed();
+ return !!(m_flags & TokenSecurityDescriptorFlags_IsSemanticExternallyVisible);
+}
+
+// Determine if the type represented by the token in this TokenSecurityDescriptor is participating in type
+// equivalence.
+inline BOOL TokenSecurityDescriptor::IsTypeEquivalent()
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(TypeFromToken(m_token) == mdtTypeDef);
+ return IsTypeDefEquivalent(m_token, m_pModule);
+}
+
+#ifndef DACCESS_COMPILE
+
+inline TokenSecurityDescriptor::TokenSecurityDescriptorTransparencyEtwEvents::TokenSecurityDescriptorTransparencyEtwEvents(const TokenSecurityDescriptor *pTSD)
+ : m_pTSD(pTSD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END
+
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, TokenTransparencyComputationStart))
+ {
+ LPCWSTR module = m_pTSD->m_pModule->GetPathForErrorMessages();
+
+ ETW::SecurityLog::FireTokenTransparencyComputationStart(m_pTSD->m_token,
+ module,
+ ::GetAppDomain()->GetId().m_dwId);
+ }
+}
+
+inline TokenSecurityDescriptor::TokenSecurityDescriptorTransparencyEtwEvents::~TokenSecurityDescriptorTransparencyEtwEvents()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END
+
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, TokenTransparencyComputationEnd))
+ {
+ LPCWSTR module = m_pTSD->m_pModule->GetPathForErrorMessages();
+
+ ETW::SecurityLog::FireTokenTransparencyComputationEnd(m_pTSD->m_token,
+ module,
+ !!(m_pTSD->m_flags & TokenSecurityDescriptorFlags_IsSemanticCritical),
+ !!(m_pTSD->m_flags & TokenSecurityDescriptorFlags_IsSemanticTreatAsSafe),
+ ::GetAppDomain()->GetId().m_dwId);
+ }
+}
+
+#endif //!DACCESS_COMPILE
+
+inline MethodSecurityDescriptor::MethodSecurityDescriptor(MethodDesc* pMD, BOOL fCanCache /* = TRUE */) :
+ m_pMD(pMD),
+ m_pRuntimeDeclActionInfo(NULL),
+ m_pTokenDeclActionInfo(NULL),
+ m_flags(MethodSecurityDescriptorFlags_None),
+ m_declFlagsDuringPreStub(0)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (fCanCache)
+ {
+ SetCanCache();
+ }
+}
+
+inline BOOL MethodSecurityDescriptor::CanAssert()
+{
+ // No need to do a VerifyDataComputed here -> this value is set by SecurityDeclarative::EnsureAssertAllowed as an optmization
+ LIMITED_METHOD_CONTRACT;
+ return !!(m_flags & MethodSecurityDescriptorFlags_AssertAllowed);
+}
+
+inline void MethodSecurityDescriptor::SetCanAssert()
+{
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockOr(reinterpret_cast<DWORD *>(&m_flags), MethodSecurityDescriptorFlags_AssertAllowed);
+}
+
+inline BOOL MethodSecurityDescriptor::CanCache()
+{
+ LIMITED_METHOD_CONTRACT;
+ return !!(m_flags & MethodSecurityDescriptorFlags_CanCache);
+}
+
+inline void MethodSecurityDescriptor::SetCanCache()
+{
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockOr(reinterpret_cast<DWORD *>(&m_flags), MethodSecurityDescriptorFlags_CanCache);
+}
+
+inline BOOL MethodSecurityDescriptor::HasRuntimeDeclarativeSecurity()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pMD->IsInterceptedForDeclSecurity();
+}
+
+inline BOOL MethodSecurityDescriptor::HasLinkOrInheritanceDeclarativeSecurity()
+{
+ WRAPPER_NO_CONTRACT;
+ return HasLinktimeDeclarativeSecurity() || HasInheritanceDeclarativeSecurity();
+}
+
+inline BOOL MethodSecurityDescriptor::HasLinktimeDeclarativeSecurity()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pMD->RequiresLinktimeCheck();
+}
+
+inline BOOL MethodSecurityDescriptor::HasInheritanceDeclarativeSecurity()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pMD->RequiresInheritanceCheck();
+}
+
+inline mdToken MethodSecurityDescriptor::GetToken()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pMD->GetMemberDef();
+}
+
+inline MethodDesc *MethodSecurityDescriptor::GetMethod()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pMD;
+}
+
+inline IMDInternalImport *MethodSecurityDescriptor::GetIMDInternalImport()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pMD->GetMDImport();
+}
+
+
+inline BOOL MethodSecurityDescriptor::ContainsBuiltInCASDemandsOnly()
+{
+ WRAPPER_NO_CONTRACT;
+ VerifyDataComputed();
+ return ((m_flags & MethodSecurityDescriptorFlags_IsBuiltInCASPermsOnly) &&
+ (m_flags & MethodSecurityDescriptorFlags_IsDemandsOnly));
+}
+
+inline DeclActionInfo* MethodSecurityDescriptor::GetRuntimeDeclActionInfo()
+{
+ WRAPPER_NO_CONTRACT;
+ VerifyDataComputed();
+ return m_pRuntimeDeclActionInfo;
+}
+
+inline DWORD MethodSecurityDescriptor::GetDeclFlagsDuringPreStub()
+{
+ WRAPPER_NO_CONTRACT;
+ VerifyDataComputed();
+ return m_declFlagsDuringPreStub;
+}
+
+inline TokenDeclActionInfo* MethodSecurityDescriptor::GetTokenDeclActionInfo()
+{
+ WRAPPER_NO_CONTRACT;
+ VerifyDataComputed();
+ return m_pTokenDeclActionInfo;
+}
+
+inline BOOL MethodSecurityDescriptor::IsCritical()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!m_pMD->HasCriticalTransparentInfo())
+ ComputeCriticalTransparentInfo();
+ return m_pMD->IsCritical();
+}
+
+inline BOOL MethodSecurityDescriptor::IsTreatAsSafe()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!m_pMD->HasCriticalTransparentInfo())
+ ComputeCriticalTransparentInfo();
+ return m_pMD->IsTreatAsSafe();
+}
+
+inline BOOL MethodSecurityDescriptor::IsOpportunisticallyCritical()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ TypeSecurityDescriptor typeSecDesc(m_pMD->GetMethodTable());
+ return typeSecDesc.IsOpportunisticallyCritical();
+}
+
+inline HRESULT MethodSecurityDescriptor::GetDeclaredPermissionsWithCache(IN CorDeclSecurity action,
+ OUT OBJECTREF *pDeclaredPermissions,
+ OUT PsetCacheEntry **pPCE)
+{
+ WRAPPER_NO_CONTRACT;
+ return GetTokenDeclActionInfo()->GetDeclaredPermissionsWithCache(action, pDeclaredPermissions, pPCE);
+}
+
+// static
+inline HRESULT MethodSecurityDescriptor::GetDeclaredPermissionsWithCache(MethodDesc* pMD,
+ IN CorDeclSecurity action,
+ OUT OBJECTREF *pDeclaredPermissions,
+ OUT PsetCacheEntry **pPCE)
+{
+ WRAPPER_NO_CONTRACT;
+ MethodSecurityDescriptor methodSecurityDesc(pMD);
+ LookupOrCreateMethodSecurityDescriptor(&methodSecurityDesc);
+ return methodSecurityDesc.GetDeclaredPermissionsWithCache(action, pDeclaredPermissions, pPCE);
+}
+
+// static
+inline OBJECTREF MethodSecurityDescriptor::GetLinktimePermissions(MethodDesc* pMD,
+ OBJECTREF *prefNonCasDemands)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (!pMD->RequiresLinktimeCheck())
+ return NULL;
+
+ MethodSecurityDescriptor methodSecurityDesc(pMD);
+ LookupOrCreateMethodSecurityDescriptor(&methodSecurityDesc);
+ return methodSecurityDesc.GetTokenDeclActionInfo()->GetLinktimePermissions(prefNonCasDemands);
+}
+
+inline void MethodSecurityDescriptor::InvokeLinktimeChecks(Assembly* pCaller)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (!HasLinktimeDeclarativeSecurity())
+ return;
+
+ GetTokenDeclActionInfo()->InvokeLinktimeChecks(pCaller);
+}
+
+// staitc
+inline void MethodSecurityDescriptor::InvokeLinktimeChecks(MethodDesc* pMD, Assembly* pCaller)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (!pMD->RequiresLinktimeCheck())
+ return;
+
+ MethodSecurityDescriptor methodSecurityDesc(pMD);
+ LookupOrCreateMethodSecurityDescriptor(&methodSecurityDesc);
+ methodSecurityDesc.InvokeLinktimeChecks(pCaller);
+}
+
+// static
+inline BOOL MethodSecurityDescriptor::IsDeclSecurityCASDemandsOnly(DWORD dwMethDeclFlags,
+ mdToken _mdToken,
+ IMDInternalImport *pInternalImport)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_CORECLR
+ // Non-CAS demands are not supported in CoreCLR
+ return TRUE;
+#else
+ GCX_COOP();
+
+ PsetCacheEntry *tokenSetIndexes[dclMaximumValue + 1];
+ SecurityDeclarative::DetectDeclActionsOnToken(_mdToken, dwMethDeclFlags, tokenSetIndexes, pInternalImport);
+ SecurityProperties sp(dwMethDeclFlags);
+ if (!sp.FDemandsOnly())
+ return FALSE;
+
+ DWORD dwLocalAction;
+ bool builtInCASPermsOnly = true;
+ for (dwLocalAction = 0; dwLocalAction <= dclMaximumValue && builtInCASPermsOnly; dwLocalAction++)
+ {
+ if (tokenSetIndexes[dwLocalAction] != NULL)
+ {
+ builtInCASPermsOnly = builtInCASPermsOnly && (tokenSetIndexes[dwLocalAction]->ContainsBuiltinCASPermsOnly(dwLocalAction));
+ }
+ }
+
+ return (builtInCASPermsOnly); // we only get here if there are only demands...so it suffices to return this value directly
+#endif
+}
+
+#ifndef DACCESS_COMPILE
+
+inline MethodSecurityDescriptor::MethodSecurityDescriptorTransparencyEtwEvents::MethodSecurityDescriptorTransparencyEtwEvents(const MethodSecurityDescriptor *pMSD)
+ : m_pMSD(pMSD)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, MethodTransparencyComputationStart))
+ {
+ LPCWSTR module = m_pMSD->m_pMD->GetModule()->GetPathForErrorMessages();
+
+ SString method;
+ m_pMSD->m_pMD->GetFullMethodInfo(method);
+
+ ETW::SecurityLog::FireMethodTransparencyComputationStart(method.GetUnicode(),
+ module,
+ ::GetAppDomain()->GetId().m_dwId);
+ }
+}
+
+inline MethodSecurityDescriptor::MethodSecurityDescriptorTransparencyEtwEvents::~MethodSecurityDescriptorTransparencyEtwEvents()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, MethodTransparencyComputationEnd))
+ {
+ LPCWSTR module = m_pMSD->m_pMD->GetModule()->GetPathForErrorMessages();
+
+ SString method;
+ m_pMSD->m_pMD->GetFullMethodInfo(method);
+
+ BOOL fIsCritical = FALSE;
+ BOOL fIsTreatAsSafe = FALSE;
+
+ if (m_pMSD->m_pMD->HasCriticalTransparentInfo())
+ {
+ fIsCritical = m_pMSD->m_pMD->IsCritical();
+ fIsTreatAsSafe = m_pMSD->m_pMD->IsTreatAsSafe();
+ }
+
+ ETW::SecurityLog::FireMethodTransparencyComputationEnd(method.GetUnicode(),
+ module,
+ ::GetAppDomain()->GetId().m_dwId,
+ fIsCritical,
+ fIsTreatAsSafe);
+ }
+}
+
+#endif //!DACCESS_COMPILE
+
+inline FieldSecurityDescriptorFlags operator|(FieldSecurityDescriptorFlags lhs,
+ FieldSecurityDescriptorFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ return static_cast<FieldSecurityDescriptorFlags>(static_cast<DWORD>(lhs) |
+ static_cast<DWORD>(rhs));
+}
+
+inline FieldSecurityDescriptorFlags operator|=(FieldSecurityDescriptorFlags& lhs,
+ FieldSecurityDescriptorFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ lhs = static_cast<FieldSecurityDescriptorFlags>(static_cast<DWORD>(lhs) |
+ static_cast<DWORD>(rhs));
+ return lhs;
+}
+
+inline FieldSecurityDescriptorFlags operator&(FieldSecurityDescriptorFlags lhs,
+ FieldSecurityDescriptorFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ return static_cast<FieldSecurityDescriptorFlags>(static_cast<DWORD>(lhs) &
+ static_cast<DWORD>(rhs));
+}
+
+inline FieldSecurityDescriptorFlags operator&=(FieldSecurityDescriptorFlags& lhs,
+ FieldSecurityDescriptorFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ lhs = static_cast<FieldSecurityDescriptorFlags>(static_cast<DWORD>(lhs) &
+ static_cast<DWORD>(rhs));
+ return lhs;
+}
+
+inline FieldSecurityDescriptor::FieldSecurityDescriptor(FieldDesc* pFD) :
+ m_pFD(pFD),
+ m_flags(FieldSecurityDescriptorFlags_None)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(pFD);
+}
+
+inline BOOL FieldSecurityDescriptor::IsCritical()
+{
+ WRAPPER_NO_CONTRACT;
+ VerifyDataComputed();
+ return !!(m_flags & FieldSecurityDescriptorFlags_IsCritical);
+}
+
+inline BOOL FieldSecurityDescriptor::IsTreatAsSafe()
+{
+ WRAPPER_NO_CONTRACT;
+ VerifyDataComputed();
+ return !!(m_flags & FieldSecurityDescriptorFlags_IsTreatAsSafe);
+}
+
+#ifndef DACCESS_COMPILE
+
+inline FieldSecurityDescriptor::FieldSecurityDescriptorTransparencyEtwEvents::FieldSecurityDescriptorTransparencyEtwEvents(const FieldSecurityDescriptor *pFSD)
+ : m_pFSD(pFSD)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, FieldTransparencyComputationStart))
+ {
+ LPCWSTR module = m_pFSD->m_pFD->GetModule()->GetPathForErrorMessages();
+
+ SString field;
+ TypeString::AppendType(field, TypeHandle(m_pFSD->m_pFD->GetApproxEnclosingMethodTable()));
+ field.AppendUTF8("::");
+ field.AppendUTF8(m_pFSD->m_pFD->GetName());
+
+ ETW::SecurityLog::FireFieldTransparencyComputationStart(field.GetUnicode(),
+ module,
+ ::GetAppDomain()->GetId().m_dwId);
+ }
+}
+
+inline FieldSecurityDescriptor::FieldSecurityDescriptorTransparencyEtwEvents::~FieldSecurityDescriptorTransparencyEtwEvents()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, FieldTransparencyComputationEnd))
+ {
+ LPCWSTR module = m_pFSD->m_pFD->GetModule()->GetPathForErrorMessages();
+
+ SString field;
+ TypeString::AppendType(field, TypeHandle(m_pFSD->m_pFD->GetApproxEnclosingMethodTable()));
+ field.AppendUTF8("::");
+ field.AppendUTF8(m_pFSD->m_pFD->GetName());
+
+ ETW::SecurityLog::FireFieldTransparencyComputationEnd(field.GetUnicode(),
+ module,
+ ::GetAppDomain()->GetId().m_dwId,
+ !!(m_pFSD->m_flags & FieldSecurityDescriptorFlags_IsCritical),
+ !!(m_pFSD->m_flags & FieldSecurityDescriptorFlags_IsTreatAsSafe));
+ }
+}
+
+#endif //!DACCESS_COMPILE
+
+inline TypeSecurityDescriptor::TypeSecurityDescriptor(MethodTable *pMT) :
+ m_pMT(pMT->GetCanonicalMethodTable()),
+ m_pTokenDeclActionInfo(NULL),
+ m_fIsComputed(FALSE)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(pMT);
+}
+
+inline BOOL TypeSecurityDescriptor::HasLinkOrInheritanceDeclarativeSecurity()
+{
+ WRAPPER_NO_CONTRACT;
+ return HasLinktimeDeclarativeSecurity() || HasInheritanceDeclarativeSecurity();
+}
+
+inline BOOL TypeSecurityDescriptor::HasLinktimeDeclarativeSecurity()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pMT->GetClass()->RequiresLinktimeCheck();
+}
+
+inline BOOL TypeSecurityDescriptor::HasInheritanceDeclarativeSecurity()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pMT->GetClass()->RequiresInheritanceCheck();
+}
+
+inline BOOL TypeSecurityDescriptor::IsCritical()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ EEClass *pClass = m_pMT->GetClass();
+ if (!pClass->HasCriticalTransparentInfo())
+ {
+ ComputeCriticalTransparentInfo();
+ }
+
+ return pClass->IsAllCritical()
+#ifndef FEATURE_CORECLR
+ || pClass->IsCritical()
+#endif // !FEATURE_CORECLR
+ ;
+}
+
+inline BOOL TypeSecurityDescriptor::IsOpportunisticallyCritical()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ ModuleSecurityDescriptor *pModuleSecDesc = ModuleSecurityDescriptor::GetModuleSecurityDescriptor(m_pMT->GetAssembly());
+ return pModuleSecDesc->IsOpportunisticallyCritical();
+}
+
+inline BOOL TypeSecurityDescriptor::IsAllCritical()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ EEClass *pClass = m_pMT->GetClass();
+ if (!pClass->HasCriticalTransparentInfo())
+ ComputeCriticalTransparentInfo();
+ return pClass->IsAllCritical();
+}
+
+inline BOOL TypeSecurityDescriptor::IsAllTransparent()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ EEClass *pClass = m_pMT->GetClass();
+ if (!pClass->HasCriticalTransparentInfo())
+ ComputeCriticalTransparentInfo();
+ return pClass->IsAllTransparent();
+}
+
+inline BOOL TypeSecurityDescriptor::IsTreatAsSafe()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ EEClass *pClass = m_pMT->GetClass();
+ if (!pClass->HasCriticalTransparentInfo())
+ ComputeCriticalTransparentInfo();
+ return pClass->IsTreatAsSafe();
+}
+
+inline mdToken TypeSecurityDescriptor::GetToken()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pMT->GetCl();
+}
+
+inline IMDInternalImport *TypeSecurityDescriptor::GetIMDInternalImport()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pMT->GetMDImport();
+}
+
+inline TokenDeclActionInfo* TypeSecurityDescriptor::GetTokenDeclActionInfo()
+{
+ WRAPPER_NO_CONTRACT;
+ VerifyDataComputed();
+ return m_pTokenDeclActionInfo;
+}
+
+inline TypeSecurityDescriptorFlags operator|(TypeSecurityDescriptorFlags lhs,
+ TypeSecurityDescriptorFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ return static_cast<TypeSecurityDescriptorFlags>(static_cast<DWORD>(lhs) |
+ static_cast<DWORD>(rhs));
+}
+
+inline TypeSecurityDescriptorFlags operator|=(TypeSecurityDescriptorFlags& lhs,
+ TypeSecurityDescriptorFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ lhs = static_cast<TypeSecurityDescriptorFlags>(static_cast<DWORD>(lhs) |
+ static_cast<DWORD>(rhs));
+ return lhs;
+}
+
+inline TypeSecurityDescriptorFlags operator&(TypeSecurityDescriptorFlags lhs,
+ TypeSecurityDescriptorFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ return static_cast<TypeSecurityDescriptorFlags>(static_cast<DWORD>(lhs) &
+ static_cast<DWORD>(rhs));
+}
+
+inline TypeSecurityDescriptorFlags operator&=(TypeSecurityDescriptorFlags& lhs,
+ TypeSecurityDescriptorFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ lhs = static_cast<TypeSecurityDescriptorFlags>(static_cast<DWORD>(lhs) &
+ static_cast<DWORD>(rhs));
+ return lhs;
+}
+
+inline HRESULT TypeSecurityDescriptor::GetDeclaredPermissionsWithCache(IN CorDeclSecurity action,
+ OUT OBJECTREF *pDeclaredPermissions,
+ OUT PsetCacheEntry **pPCE)
+{
+ WRAPPER_NO_CONTRACT;
+ return GetTokenDeclActionInfo()->GetDeclaredPermissionsWithCache(action, pDeclaredPermissions, pPCE);
+}
+
+// static
+inline HRESULT TypeSecurityDescriptor::GetDeclaredPermissionsWithCache(MethodTable *pTargetMT,
+ IN CorDeclSecurity action,
+ OUT OBJECTREF *pDeclaredPermissions,
+ OUT PsetCacheEntry **pPCE)
+{
+ WRAPPER_NO_CONTRACT;
+ TypeSecurityDescriptor* pTypeSecurityDesc = GetTypeSecurityDescriptor(pTargetMT);
+ _ASSERTE(pTypeSecurityDesc != NULL);
+ return pTypeSecurityDesc->GetDeclaredPermissionsWithCache(action, pDeclaredPermissions, pPCE);
+}
+
+// static
+inline OBJECTREF TypeSecurityDescriptor::GetLinktimePermissions(MethodTable *pMT,
+ OBJECTREF *prefNonCasDemands)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (!pMT->GetClass()->RequiresLinktimeCheck())
+ return NULL;
+
+ TypeSecurityDescriptor* pTypeSecurityDesc = GetTypeSecurityDescriptor(pMT);
+ _ASSERTE(pTypeSecurityDesc != NULL);
+ return pTypeSecurityDesc->GetTokenDeclActionInfo()->GetLinktimePermissions(prefNonCasDemands);
+}
+
+inline void TypeSecurityDescriptor::InvokeLinktimeChecks(Assembly* pCaller)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ if (!HasLinktimeDeclarativeSecurity())
+ return;
+ GetTokenDeclActionInfo()->InvokeLinktimeChecks(pCaller);
+}
+
+// Determine if the type by this TypeSecurityDescriptor is participating in type equivalence. Note that this
+// is only checking to see if the type would like to participate in equivalence, and not if it is actually
+// equivalent to anything - which allows its transparency to be the same regardless of what other types have
+// been loaded.
+inline BOOL TypeSecurityDescriptor::IsTypeEquivalent()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return m_pMT->GetClass()->IsEquivalentType();
+}
+
+// static
+inline void TypeSecurityDescriptor::InvokeLinktimeChecks(MethodTable *pMT, Assembly* pCaller)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ if (!pMT->GetClass()->RequiresLinktimeCheck())
+ return;
+ GetTypeSecurityDescriptor(pMT)->InvokeLinktimeChecks(pCaller);
+}
+
+inline void TypeSecurityDescriptor::VerifyDataComputed()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_fIsComputed)
+ {
+ return;
+ }
+
+ BOOL canTypeSecDescCached = CanTypeSecurityDescriptorBeCached(m_pMT);
+ if (!canTypeSecDescCached)
+ {
+ VerifyDataComputedInternal();
+ }
+ else
+ {
+ TypeSecurityDescriptor* pCachedTypeSecurityDesc = GetTypeSecurityDescriptor(m_pMT);
+ *this = *pCachedTypeSecurityDesc; // copy the struct
+ _ASSERTE(m_fIsComputed);
+ }
+
+ return;
+}
+
+inline TypeSecurityDescriptor& TypeSecurityDescriptor::operator=(const TypeSecurityDescriptor &tsd)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_pMT = tsd.m_pMT;
+ m_pTokenDeclActionInfo = tsd.m_pTokenDeclActionInfo;
+ m_fIsComputed = tsd.m_fIsComputed;
+
+ return *this;
+}
+
+#ifndef DACCESS_COMPILE
+
+inline TypeSecurityDescriptor::TypeSecurityDescriptorTransparencyEtwEvents::TypeSecurityDescriptorTransparencyEtwEvents(const TypeSecurityDescriptor *pTSD)
+ : m_pTSD(pTSD)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, TypeTransparencyComputationStart))
+ {
+ LPCWSTR module = m_pTSD->m_pMT->GetModule()->GetPathForErrorMessages();
+
+ SString type;
+ if (!IsNilToken(m_pTSD->m_pMT->GetCl()))
+ {
+ TypeString::AppendType(type, TypeHandle(m_pTSD->m_pMT));
+ }
+
+ ETW::SecurityLog::FireTypeTransparencyComputationStart(type.GetUnicode(),
+ module,
+ ::GetAppDomain()->GetId().m_dwId);
+ }
+
+}
+
+inline TypeSecurityDescriptor::TypeSecurityDescriptorTransparencyEtwEvents::~TypeSecurityDescriptorTransparencyEtwEvents()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, TypeTransparencyComputationEnd))
+ {
+ LPCWSTR module = m_pTSD->m_pMT->GetModule()->GetPathForErrorMessages();
+
+ SString type;
+ if (!IsNilToken(m_pTSD->m_pMT->GetCl()))
+ {
+ TypeString::AppendType(type, TypeHandle(m_pTSD->m_pMT));
+ }
+
+ BOOL fIsAllCritical = FALSE;
+ BOOL fIsAllTransparent = FALSE;
+ BOOL fIsCritical = FALSE;
+ BOOL fIsTreatAsSafe = FALSE;
+
+ EEClass *pClass = m_pTSD->m_pMT->GetClass();
+ if (pClass->HasCriticalTransparentInfo())
+ {
+ fIsAllCritical = pClass->IsAllCritical();
+ fIsAllTransparent = pClass->IsAllTransparent();
+ fIsCritical = pClass->IsCritical();
+ fIsTreatAsSafe = pClass->IsTreatAsSafe();
+ }
+
+ ETW::SecurityLog::FireTypeTransparencyComputationEnd(type.GetUnicode(),
+ module,
+ ::GetAppDomain()->GetId().m_dwId,
+ fIsAllCritical,
+ fIsAllTransparent,
+ fIsCritical,
+ fIsTreatAsSafe);
+ }
+
+}
+
+#endif //!DACCESS_COMPILE
+
+inline ModuleSecurityDescriptorFlags operator|(ModuleSecurityDescriptorFlags lhs,
+ ModuleSecurityDescriptorFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ return static_cast<ModuleSecurityDescriptorFlags>(static_cast<DWORD>(lhs) |
+ static_cast<DWORD>(rhs));
+}
+
+inline ModuleSecurityDescriptorFlags operator|=(ModuleSecurityDescriptorFlags& lhs,
+ ModuleSecurityDescriptorFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ lhs = static_cast<ModuleSecurityDescriptorFlags>(static_cast<DWORD>(lhs) |
+ static_cast<DWORD>(rhs));
+ return lhs;
+}
+
+inline ModuleSecurityDescriptorFlags operator&(ModuleSecurityDescriptorFlags lhs,
+ ModuleSecurityDescriptorFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ return static_cast<ModuleSecurityDescriptorFlags>(static_cast<DWORD>(lhs) &
+ static_cast<DWORD>(rhs));
+}
+
+inline ModuleSecurityDescriptorFlags operator&=(ModuleSecurityDescriptorFlags& lhs,
+ ModuleSecurityDescriptorFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ lhs = static_cast<ModuleSecurityDescriptorFlags>(static_cast<DWORD>(lhs) &
+ static_cast<DWORD>(rhs));
+ return lhs;
+}
+
+inline ModuleSecurityDescriptorFlags operator~(ModuleSecurityDescriptorFlags flags)
+{
+ LIMITED_METHOD_CONTRACT;
+ return static_cast<ModuleSecurityDescriptorFlags>(~static_cast<DWORD>(flags));
+}
+
+inline ModuleSecurityDescriptor::ModuleSecurityDescriptor(PTR_Module pModule) :
+ m_pModule(pModule),
+ m_flags(ModuleSecurityDescriptorFlags_None),
+ m_tokenFlags(TokenSecurityDescriptorFlags_None)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(pModule);
+}
+
+// static
+inline BOOL ModuleSecurityDescriptor::IsMarkedTransparent(Assembly* pAssembly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ return GetModuleSecurityDescriptor(pAssembly)->IsAllTransparent();
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Override the token flags that would be read from the metadata directly with a
+// precomputed set of flags. This is used by reflection emit to create a dynamic assembly
+// with security attributes given at creation time.
+//
+
+inline void ModuleSecurityDescriptor::OverrideTokenFlags(TokenSecurityDescriptorFlags tokenFlags)
+{
+ CONTRACTL
+ {
+ LIMITED_METHOD_CONTRACT;
+ PRECONDITION(!(m_flags & ModuleSecurityDescriptorFlags_IsComputed));
+ PRECONDITION(m_tokenFlags == TokenSecurityDescriptorFlags_None);
+ PRECONDITION(CheckPointer(m_pModule));
+ PRECONDITION(m_pModule->GetAssembly()->IsDynamic()); // Token overrides should only be used by reflection
+ }
+ CONTRACTL_END;
+
+ m_tokenFlags = tokenFlags;
+}
+
+inline TokenSecurityDescriptorFlags ModuleSecurityDescriptor::GetTokenFlags()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (m_tokenFlags == TokenSecurityDescriptorFlags_None)
+ {
+ Assembly *pAssembly = m_pModule->GetAssembly();
+ TokenSecurityDescriptor tsd(pAssembly->GetManifestModule(), pAssembly->GetManifestToken());
+ EnsureWritablePages(&m_tokenFlags);
+ InterlockedCompareExchange(reinterpret_cast<LONG *>(&m_tokenFlags),
+ tsd.GetFlags(),
+ TokenSecurityDescriptorFlags_None);
+ }
+
+ return m_tokenFlags;
+}
+
+inline Module *ModuleSecurityDescriptor::GetModule()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pModule;
+}
+
+#ifdef DACCESS_COMPILE
+inline ModuleSecurityDescriptorFlags ModuleSecurityDescriptor::GetRawFlags()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_flags;
+}
+#endif // DACCESS_COMPILE
+
+inline BOOL ModuleSecurityDescriptor::IsMicrosoftPlatform()
+{
+ WRAPPER_NO_CONTRACT;
+ VerifyDataComputed();
+ return !!(m_flags & ModuleSecurityDescriptorFlags_IsMicrosoftPlatform);
+}
+
+inline BOOL ModuleSecurityDescriptor::IsAllTransparent()
+{
+ WRAPPER_NO_CONTRACT;
+ VerifyDataComputed();
+ return !!(m_flags & ModuleSecurityDescriptorFlags_IsAllTransparent);
+}
+
+inline BOOL ModuleSecurityDescriptor::IsAllCritical()
+{
+ WRAPPER_NO_CONTRACT;
+ VerifyDataComputed();
+ return !!(m_flags & ModuleSecurityDescriptorFlags_IsAllCritical);
+}
+
+inline BOOL ModuleSecurityDescriptor::IsTreatAsSafe()
+{
+ WRAPPER_NO_CONTRACT;
+ VerifyDataComputed();
+ return !!(m_flags & ModuleSecurityDescriptorFlags_IsTreatAsSafe);
+}
+
+inline BOOL ModuleSecurityDescriptor::IsOpportunisticallyCritical()
+{
+ WRAPPER_NO_CONTRACT;
+ VerifyDataComputed();
+ return !!(m_flags & ModuleSecurityDescriptorFlags_IsOpportunisticallyCritical);
+}
+
+inline BOOL ModuleSecurityDescriptor::IsAllTransparentDueToPartialTrust()
+{
+ WRAPPER_NO_CONTRACT;
+ VerifyDataComputed();
+ return !!(m_flags & ModuleSecurityDescriptorFlags_TransparentDueToPartialTrust);
+}
+
+inline BOOL ModuleSecurityDescriptor::IsMixedTransparency()
+{
+ WRAPPER_NO_CONTRACT;
+ return !IsAllCritical() && !IsAllTransparent();
+}
+
+#ifndef FEATURE_CORECLR
+
+inline BOOL ModuleSecurityDescriptor::CanTransparentCodeSkipVerification()
+{
+ WRAPPER_NO_CONTRACT;
+ VerifyDataComputed();
+ return !!(m_flags & ModuleSecurityDescriptorFlags_SkipFullTrustVerification);
+}
+
+#endif // !FEATURE_CORECLR
+
+#if defined(FEATURE_APTCA) || defined(FEATURE_CORESYSTEM)
+inline BOOL ModuleSecurityDescriptor::IsAPTCA()
+{
+ WRAPPER_NO_CONTRACT;
+ VerifyDataComputed();
+ return !!(m_flags & ModuleSecurityDescriptorFlags_IsAPTCA);
+}
+#endif // defined(FEATURE_APTCA) || defined(FEATURE_CORESYSTEM)
+
+// Get the set of security rules that the assembly is using
+inline SecurityRuleSet ModuleSecurityDescriptor::GetSecurityRuleSet()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // If the assembly specified a rule set, then use that. If it's a v2 assembly, then use the v2 rules.
+ // Otherwise, use the default rule set.
+ TokenSecurityDescriptorFlags tokenFlags = GetTokenFlags();
+ if (tokenFlags & TokenSecurityDescriptorFlags_SecurityRules)
+ {
+ return ::GetSecurityRuleSet(tokenFlags);
+ }
+#ifndef FEATURE_CORECLR
+ else if (AssemblyVersionRequiresLegacyTransparency())
+ {
+ return SecurityRuleSet_Level1;
+ }
+#endif // !FEATURE_CORECLR
+ else
+ {
+ // The assembly hasn't specified the rule set that it needs to use. We'll just use the default rule
+ // set unless the environment is overriding that with another value.
+ DWORD dwDefaultRuleSet = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_Security_DefaultSecurityRuleSet);
+
+ if (dwDefaultRuleSet == 0)
+ {
+ return SecurityRuleSet_Default;
+ }
+ else
+ {
+ return static_cast<SecurityRuleSet>(dwDefaultRuleSet);
+ }
+ }
+}
+
+#ifndef DACCESS_COMPILE
+
+inline ModuleSecurityDescriptor::ModuleSecurityDescriptorTransparencyEtwEvents::ModuleSecurityDescriptorTransparencyEtwEvents(ModuleSecurityDescriptor *pMSD)
+ : m_pMSD(pMSD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END
+
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, ModuleTransparencyComputationStart))
+ {
+ LPCWSTR module = m_pMSD->m_pModule->GetPathForErrorMessages();
+
+ ETW::SecurityLog::FireModuleTransparencyComputationStart(module,
+ ::GetAppDomain()->GetId().m_dwId);
+ }
+}
+
+inline ModuleSecurityDescriptor::ModuleSecurityDescriptorTransparencyEtwEvents::~ModuleSecurityDescriptorTransparencyEtwEvents()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END
+
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, ModuleTransparencyComputationEnd))
+ {
+ LPCWSTR module = m_pMSD->m_pModule->GetPathForErrorMessages();
+
+ ETW::SecurityLog::FireModuleTransparencyComputationEnd(module,
+ ::GetAppDomain()->GetId().m_dwId,
+ !!(m_pMSD->m_flags & ModuleSecurityDescriptorFlags_IsAllCritical),
+ !!(m_pMSD->m_flags & ModuleSecurityDescriptorFlags_IsAllTransparent),
+ !!(m_pMSD->m_flags & ModuleSecurityDescriptorFlags_IsTreatAsSafe),
+ !!(m_pMSD->m_flags & ModuleSecurityDescriptorFlags_IsOpportunisticallyCritical),
+ m_pMSD->GetSecurityRuleSet());
+ }
+}
+
+#endif //!DACCESS_COMPILE
+
+inline MethodSecurityDescriptorFlags operator|(MethodSecurityDescriptorFlags lhs,
+ MethodSecurityDescriptorFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ return static_cast<MethodSecurityDescriptorFlags>(static_cast<DWORD>(lhs) |
+ static_cast<DWORD>(rhs));
+}
+
+inline MethodSecurityDescriptorFlags operator|=(MethodSecurityDescriptorFlags& lhs,
+ MethodSecurityDescriptorFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ lhs = static_cast<MethodSecurityDescriptorFlags>(static_cast<DWORD>(lhs) |
+ static_cast<DWORD>(rhs));
+ return lhs;
+}
+
+inline MethodSecurityDescriptorFlags operator&(MethodSecurityDescriptorFlags lhs,
+ MethodSecurityDescriptorFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ return static_cast<MethodSecurityDescriptorFlags>(static_cast<DWORD>(lhs) &
+ static_cast<DWORD>(rhs));
+}
+
+inline MethodSecurityDescriptorFlags operator&=(MethodSecurityDescriptorFlags& lhs,
+ MethodSecurityDescriptorFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ lhs = static_cast<MethodSecurityDescriptorFlags>(static_cast<DWORD>(lhs) &
+ static_cast<DWORD>(rhs));
+ return lhs;
+}
+
+inline void MethodSecurityDescriptor::VerifyDataComputed()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_flags & MethodSecurityDescriptorFlags_IsComputed)
+ return;
+
+ BOOL canMethSecDescCached = (CanCache() && CanMethodSecurityDescriptorBeCached(m_pMD));
+ if (!canMethSecDescCached)
+ {
+ VerifyDataComputedInternal();
+ }
+ else
+ {
+ LookupOrCreateMethodSecurityDescriptor(this);
+ _ASSERTE(m_flags & MethodSecurityDescriptorFlags_IsComputed);
+ }
+
+ return;
+}
+
+#endif // __SECURITYMETA_INL__
diff --git a/src/vm/securitypolicy.cpp b/src/vm/securitypolicy.cpp
new file mode 100644
index 0000000000..3d287b6179
--- /dev/null
+++ b/src/vm/securitypolicy.cpp
@@ -0,0 +1,1267 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#include "common.h"
+
+#include "security.h"
+#include "perfcounters.h"
+#include "eventtrace.h"
+#include "appdomainstack.inl"
+
+#ifndef FEATURE_PAL
+#include <shlobj.h>
+#include <Accctrl.h>
+#include <Aclapi.h>
+#include "urlmon.h"
+#endif // !FEATURE_PAL
+
+#ifndef CROSSGEN_COMPILE
+void *SecurityProperties::operator new(size_t size, LoaderHeap *pHeap)
+{
+ WRAPPER_NO_CONTRACT;
+ return pHeap->AllocMem(S_SIZE_T(size));
+}
+
+void SecurityProperties::operator delete(void *pMem)
+{
+ LIMITED_METHOD_CONTRACT;
+ // No action required
+}
+
+#ifdef FEATURE_CAS_POLICY
+
+// static
+CrstStatic SecurityPolicy::s_crstPolicyInit;
+
+// static
+bool SecurityPolicy::s_fPolicyInitialized = false;
+
+void SecurityPolicy::InitPolicyConfig()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ GCX_PREEMP();
+
+ CrstHolder initializePolicy(&s_crstPolicyInit);
+
+ if (!s_fPolicyInitialized)
+ {
+ // Note: These buffers should be at least as big as the longest possible
+ // string that will be placed into them by the code below.
+ const size_t cchcache = MAX_PATH + sizeof( W("defaultusersecurity.config.cch") ) / sizeof( WCHAR ) + 1;
+ const size_t cchconfig = MAX_PATH + sizeof( W("defaultusersecurity.config.cch") ) / sizeof( WCHAR ) + 1;
+ NewArrayHolder<WCHAR> cache(new WCHAR[cchcache]);
+ NewArrayHolder<WCHAR> config(new WCHAR[cchconfig]);
+
+ HRESULT hr = SecurityConfig::GetMachineDirectory(config, MAX_PATH);
+ if (FAILED(hr))
+ ThrowHR(hr);
+
+ wcscat_s( config, cchconfig, W("security.config") );
+ wcscpy_s( cache, cchcache, config );
+ wcscat_s( cache, cchcache, W(".cch") );
+ SecurityConfig::InitData( SecurityConfig::MachinePolicyLevel, config, cache );
+
+ hr = SecurityConfig::GetMachineDirectory(config, MAX_PATH);
+ if (FAILED(hr))
+ ThrowHR(hr);
+
+ wcscat_s( config, cchconfig, W("enterprisesec.config") );
+ wcscpy_s( cache, cchcache, config );
+ wcscat_s( cache, cchcache, W(".cch") );
+ SecurityConfig::InitData( SecurityConfig::EnterprisePolicyLevel, config, cache );
+
+ BOOL result = SecurityConfig::GetUserDirectory(config, MAX_PATH);
+ if (result) {
+ wcscat_s( config, cchconfig, W("security.config") );
+ wcscpy_s( cache, cchcache, config );
+ wcscat_s( cache, cchcache, W(".cch") );
+ SecurityConfig::InitData( SecurityConfig::UserPolicyLevel, config, cache );
+ }
+
+ s_fPolicyInitialized = true;
+ }
+}
+#endif // FEATURE_CAS_POLICY
+
+void SecurityPolicy::Start()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+#ifndef FEATURE_PAL
+ // Making sure we are in sync with URLMon
+ _ASSERTE(URLZONE_LOCAL_MACHINE == LocalMachine);
+ _ASSERTE(URLZONE_INTRANET == Intranet);
+ _ASSERTE(URLZONE_TRUSTED == Trusted);
+ _ASSERTE(URLZONE_INTERNET == Internet);
+ _ASSERTE(URLZONE_UNTRUSTED == Untrusted);
+#endif // !FEATURE_PAL
+
+#ifdef FEATURE_CAS_POLICY
+ s_crstPolicyInit.Init(CrstSecurityPolicyInit);
+
+ SecurityConfig::Init();
+
+ if (Security::IsProcessWideLegacyCasPolicyEnabled())
+ {
+ SecurityPolicy::InitPolicyConfig();
+ }
+
+ g_pCertificateCache = new CertificateCache();
+#endif // FEATURE_CAS_POLICY
+}
+
+void SecurityPolicy::Stop()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+}
+
+#ifdef FEATURE_CAS_POLICY
+void SecurityPolicy::SaveCache()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ Thread *pThread = GetThread();
+ if (pThread == NULL)
+ {
+ BOOL fRet = FALSE;
+ EX_TRY
+ {
+ // If CLR is hosted, a host can deny a thread during SetupThread call.
+ if (IsShutdownSpecialThread())
+ {
+ SetupInternalThread();
+ }
+ else
+ {
+ SetupThread();
+ }
+ }
+ EX_CATCH
+ {
+ fRet = TRUE;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ if (fRet)
+ {
+ return;
+ }
+ }
+
+ SecurityConfig::SaveCacheData( SecurityConfig::MachinePolicyLevel );
+ SecurityConfig::SaveCacheData( SecurityConfig::UserPolicyLevel );
+ SecurityConfig::SaveCacheData( SecurityConfig::EnterprisePolicyLevel );
+
+ SecurityConfig::Cleanup();
+}
+#endif
+
+void QCALLTYPE SecurityPolicy::GetGrantedPermissions(QCall::ObjectHandleOnStack retGranted, QCall::ObjectHandleOnStack retDenied, QCall::StackCrawlMarkHandle stackmark)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ AppDomain* pDomain = NULL;
+
+ Assembly* callerAssembly = SystemDomain::GetCallersAssembly( stackmark, &pDomain );
+ _ASSERTE( callerAssembly != NULL);
+
+ IAssemblySecurityDescriptor* pSecDesc = callerAssembly->GetSecurityDescriptor(pDomain);
+ _ASSERTE( pSecDesc != NULL );
+
+ {
+ GCX_COOP();
+
+ OBJECTREF orDenied;
+ OBJECTREF orGranted = pSecDesc->GetGrantedPermissionSet(&orDenied);
+
+ retGranted.Set(orGranted);
+ retDenied.Set(orDenied);
+ }
+
+ END_QCALL;
+}
+
+#ifdef FEATURE_IMPERSONATION
+FCIMPL0(DWORD, SecurityPolicy::GetImpersonationFlowMode)
+{
+ FCALL_CONTRACT;
+ return (g_pConfig->ImpersonationMode());
+}
+FCIMPLEND
+#endif
+
+void SecurityPolicy::CreateSecurityException(__in_z const char *szDemandClass, DWORD dwFlags, OBJECTREF *pThrowable)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ MAKE_WIDEPTR_FROMUTF8(wszDemandClass, szDemandClass);
+
+ MethodTable * pMT = MscorlibBinder::GetClass(CLASS__SECURITY_EXCEPTION);
+
+#ifdef FEATURE_CAS_POLICY
+ MethodTable * pMTSecPerm = MscorlibBinder::GetClass(CLASS__SECURITY_PERMISSION);
+
+ struct _gc {
+ STRINGREF strDemandClass;
+ OBJECTREF secPerm;
+ STRINGREF strPermState;
+ OBJECTREF secPermType;
+ OBJECTREF secElement;
+ } gc;
+ memset(&gc, 0, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.strDemandClass = StringObject::NewString(wszDemandClass);
+ if (gc.strDemandClass == NULL) COMPlusThrowOM();
+ // Get the type seen by reflection
+ gc.secPermType = pMTSecPerm->GetManagedClassObject();
+ // Allocate the security exception object
+ *pThrowable = AllocateObject(pMT);
+ // Allocate the security permission object
+ gc.secPerm = AllocateObject(pMTSecPerm);
+
+ // Call the construtor with the correct flag
+ MethodDescCallSite ctor(METHOD__SECURITY_PERMISSION__CTOR);
+ ARG_SLOT arg3[2] = {
+ ObjToArgSlot(gc.secPerm),
+ (ARG_SLOT)dwFlags
+ };
+ ctor.Call(arg3);
+
+ // Now, get the ToXml method
+ MethodDescCallSite toXML(METHOD__SECURITY_PERMISSION__TOXML, &gc.secPerm);
+ ARG_SLOT arg4 = ObjToArgSlot(gc.secPerm);
+ gc.secElement = toXML.Call_RetOBJECTREF(&arg4);
+
+ MethodDescCallSite toString(METHOD__SECURITY_ELEMENT__TO_STRING, &gc.secElement);
+ ARG_SLOT arg5 = ObjToArgSlot(gc.secElement);
+ gc.strPermState = toString.Call_RetSTRINGREF(&arg5);
+
+ MethodDescCallSite exceptionCtor(METHOD__SECURITY_EXCEPTION__CTOR);
+
+ ARG_SLOT arg6[4] = {
+ ObjToArgSlot(*pThrowable),
+ ObjToArgSlot(gc.strDemandClass),
+ ObjToArgSlot(gc.secPermType),
+ ObjToArgSlot(gc.strPermState),
+ };
+ exceptionCtor.Call(arg6);
+
+ GCPROTECT_END();
+#else // FEATURE_CAS_POLICY
+
+ UNREFERENCED_PARAMETER(szDemandClass);
+ UNREFERENCED_PARAMETER(dwFlags);
+
+ // Allocate the security exception object
+ *pThrowable = AllocateObject(pMT);
+ CallDefaultConstructor(*pThrowable);
+
+#endif // FEATURE_CAS_POLICY
+}
+
+DECLSPEC_NORETURN void SecurityPolicy::ThrowSecurityException(__in_z const char *szDemandClass, DWORD dwFlags)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ GCX_COOP();
+
+ struct _gc {
+ OBJECTREF throwable;
+ } gc;
+ memset(&gc, 0, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ CreateSecurityException(szDemandClass, dwFlags, &gc.throwable);
+ COMPlusThrow(gc.throwable);
+
+ GCPROTECT_END();
+}
+
+#ifdef FEATURE_CAS_POLICY
+//-----------------------------------------------------------------------------------------
+//
+// Fire an ETW event to indicate that an evidence object has been generated for an assembly
+//
+// Arguments:
+// type - Type of evidence that was generated
+// pPEFile - PEFile for the assembly the evidence was for
+//
+
+// static
+void SecurityPolicy::TraceEvidenceGeneration(EvidenceType type, PEFile *pPEFile)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pPEFile));
+ PRECONDITION(type >= kAssemblySupplied && type <= kZone);
+ }
+ CONTRACTL_END;
+
+ const SString& strPath = pPEFile->GetILimage()->GetPath();
+ FireEtwEvidenceGenerated(type,
+ GetThread()->GetDomain()->GetId().m_dwId,
+ strPath.IsEmpty() ? W("") : strPath.GetUnicode(),
+ GetClrInstanceId());
+}
+
+// Called if CAS policy is not enabled, but we either have a host or a simple sandbox domain which will
+// determine the grant set of some evidence.
+OBJECTREF SecurityPolicy::ResolveGrantSet(OBJECTREF evidence, DWORD *pdwSpecialFlags, BOOL fCheckExecutionPermission)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(!GetAppDomain()->GetSecurityDescriptor()->IsLegacyCasPolicyEnabled());
+ PRECONDITION(CheckPointer(pdwSpecialFlags));
+ }
+ CONTRACTL_END;
+
+ struct
+ {
+ OBJECTREF evidence;
+ OBJECTREF grantSet;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ gc.evidence = evidence;
+
+ GCPROTECT_BEGIN(gc);
+
+ MethodDescCallSite resolve(METHOD__SECURITY_ENGINE__RESOLVE_GRANT_SET);
+
+ ARG_SLOT args[3];
+ args[0] = ObjToArgSlot(gc.evidence);
+ args[1] = PtrToArgSlot(pdwSpecialFlags);
+ args[2] = BoolToArgSlot(fCheckExecutionPermission);
+
+ gc.grantSet = resolve.Call_RetOBJECTREF(args);
+
+ GCPROTECT_END();
+
+ return gc.grantSet;
+}
+
+// Resolve legacy CAS policy
+OBJECTREF SecurityPolicy::ResolveCasPolicy(OBJECTREF evidence,
+ OBJECTREF reqdPset,
+ OBJECTREF optPset,
+ OBJECTREF denyPset,
+ OBJECTREF* grantdenied,
+ DWORD* dwSpecialFlags,
+ BOOL checkExecutionPermission)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(GetAppDomain()->GetSecurityDescriptor()->IsLegacyCasPolicyEnabled());
+ PRECONDITION(SecurityPolicy::s_fPolicyInitialized);
+ PRECONDITION(CheckPointer(dwSpecialFlags));
+ } CONTRACTL_END;
+
+
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+
+ // If we got here, then we are going to do at least one security
+ // check. Make sure security is initialized.
+
+ struct _gc {
+ OBJECTREF reqdPset; // Required Requested Permissions
+ OBJECTREF optPset; // Optional Requested Permissions
+ OBJECTREF denyPset; // Denied Permissions
+ OBJECTREF evidence; // Object containing evidence
+ OBJECTREF refRetVal;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ gc.evidence = evidence;
+ gc.reqdPset = reqdPset;
+ gc.denyPset = denyPset;
+ gc.optPset = optPset;
+
+ GCPROTECT_BEGIN(gc);
+
+ MethodDescCallSite resolvePolicy(METHOD__SECURITY_MANAGER__RESOLVE_CAS_POLICY);
+
+ ARG_SLOT args[7];
+ args[0] = ObjToArgSlot(gc.evidence);
+ args[1] = ObjToArgSlot(gc.reqdPset);
+ args[2] = ObjToArgSlot(gc.optPset);
+ args[3] = ObjToArgSlot(gc.denyPset);
+ args[4] = PtrToArgSlot(grantdenied);
+ args[5] = PtrToArgSlot(dwSpecialFlags);
+ args[6] = BoolToArgSlot(checkExecutionPermission);
+
+ {
+ // Elevate thread's allowed loading level. This can cause load failures if assemblies loaded from this point on require
+ // any assemblies currently being loaded.
+ OVERRIDE_LOAD_LEVEL_LIMIT(FILE_ACTIVE);
+ // call policy resolution routine in managed code
+ gc.refRetVal = resolvePolicy.Call_RetOBJECTREF(args);
+ }
+
+ GCPROTECT_END();
+ return gc.refRetVal;
+}
+#endif // FEATURE_CAS_POLICY
+
+#endif // CROSSGEN_COMPILE
+
+BOOL SecurityPolicy::CanSkipVerification(DomainAssembly * pAssembly)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(CheckPointer(pAssembly));
+ } CONTRACTL_END;
+
+ BOOL canSkipVerification = TRUE;
+ if (!pAssembly->IsSystem())
+ {
+ AssemblySecurityDescriptor *pSec;
+ {
+ GCX_COOP();
+ pSec = static_cast<AssemblySecurityDescriptor*>(pAssembly->GetSecurityDescriptor());
+ }
+ _ASSERTE(pSec);
+ if (pSec)
+ {
+ canSkipVerification = pSec->CanSkipVerification();
+ }
+ else
+ {
+ canSkipVerification = FALSE;
+ }
+ }
+
+ return canSkipVerification;
+}
+
+BOOL SecurityPolicy::CanCallUnmanagedCode(Module *pModule)
+{
+ CONTRACTL {
+ THROWS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pModule));
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ SharedSecurityDescriptor *pSharedSecDesc = static_cast<SharedSecurityDescriptor*>(pModule->GetAssembly()->GetSharedSecurityDescriptor());
+ if (pSharedSecDesc)
+ return pSharedSecDesc->CanCallUnmanagedCode();
+
+ AssemblySecurityDescriptor *pSec = static_cast<AssemblySecurityDescriptor*>(pModule->GetSecurityDescriptor());
+ _ASSERTE(pSec);
+ return pSec->CanCallUnmanagedCode();
+}
+
+#ifndef CROSSGEN_COMPILE
+
+#ifdef FEATURE_CAS_POLICY
+SecZone QCALLTYPE SecurityPolicy::CreateFromUrl(LPCWSTR wszUrl)
+{
+ QCALL_CONTRACT;
+
+ SecZone dwZone = NoZone;
+
+ BEGIN_QCALL;
+
+ if (wszUrl != NULL)
+ {
+ dwZone = SecurityPolicy::MapUrlToZone(wszUrl);
+ }
+
+ END_QCALL;
+
+ return dwZone;
+}
+
+HRESULT
+GetSecurityPolicyRegKey(
+ __out WCHAR **ppszSecurityPolicy)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ INJECT_FAULT(return E_OUTOFMEMORY);
+ }
+ CONTRACTL_END;
+
+ DWORD dwLen = 0;
+
+ HRESULT hr = g_pCLRRuntime->GetVersionString(NULL, &dwLen);
+ if (hr != HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER))
+ return hr;
+
+ size_t bufSize = _countof(FRAMEWORK_REGISTRY_KEY_W) + 1 + dwLen + _countof(KEY_COM_SECURITY_POLICY);
+ NewArrayHolder<WCHAR> key(new(nothrow) WCHAR[bufSize]);
+ if (key == NULL)
+ return E_OUTOFMEMORY;
+ wcscpy_s(key, bufSize, FRAMEWORK_REGISTRY_KEY_W W("\\"));
+
+ hr = g_pCLRRuntime->GetVersionString(key + NumItems(FRAMEWORK_REGISTRY_KEY_W), &dwLen);
+ if (FAILED(hr))
+ return hr;
+
+ size_t offset = _countof(FRAMEWORK_REGISTRY_KEY_W)+dwLen-1;
+ wcscpy_s(key + offset, bufSize - offset, KEY_COM_SECURITY_POLICY);
+ key.SuppressRelease();
+ *ppszSecurityPolicy = key;
+ return S_OK;
+} // GetSecurityPolicyRegKey
+
+HRESULT SecurityPolicy::ApplyCustomZoneOverride(SecZone *pdwZone)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(*pdwZone >= NumZones);
+ INJECT_FAULT(return E_OUTOFMEMORY);
+ }
+ CONTRACTL_END;
+
+ NewArrayHolder<WCHAR> key(NULL);
+ HRESULT hr = GetSecurityPolicyRegKey(&key);
+ if (FAILED(hr))
+ return hr;
+ if (REGUTIL::GetLong(KEY_COM_SECURITY_ZONEOVERRIDE, 0, key, HKEY_POLICY_ROOT) == 1)
+ *pdwZone=Internet;
+ return S_OK;
+} // ApplyCustomZoneOverride
+
+//---------------------------------------------------------------------------------------
+//
+// Determine which security zone a URL belongs to
+//
+// Arguments:
+// wszUrl - URL to get zone information about
+//
+// Return Value:
+// Security zone the URL belongs to
+//
+// Notes:
+// If the runtime cannot map the URL, we'll return NoZone. A mapping to a zone that the VM doesn't
+// know about will cause us to check the TreatCustomZonesAsInternetZone registry key and potentially
+// map it back to the Internet zone.
+//
+
+// static
+SecZone SecurityPolicy::MapUrlToZone(__in_z LPCWSTR wszUrl)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(wszUrl != NULL);
+ }
+ CONTRACTL_END;
+
+ SecZone dwZone = NoZone;
+
+ ReleaseHolder<IInternetSecurityManager> securityManager = NULL;
+ HRESULT hr = CoInternetCreateSecurityManager(NULL, &securityManager, 0);
+
+ if (SUCCEEDED(hr))
+ {
+ _ASSERTE(sizeof(SecZone) == sizeof(DWORD));
+ hr = securityManager->MapUrlToZone(wszUrl, reinterpret_cast<DWORD *>(&dwZone), 0);
+
+ if (SUCCEEDED(hr))
+ {
+ // if this is a custom zone, see if the user wants us to map it back to the Internet zone
+ if (dwZone >= NumZones)
+ {
+ SecZone dwMappedZone = dwZone;
+ hr = ApplyCustomZoneOverride(&dwMappedZone);
+ if (SUCCEEDED(hr))
+ {
+ dwZone = dwMappedZone;
+ }
+ }
+ }
+ else
+ {
+ dwZone = NoZone;
+ }
+ }
+
+ return dwZone;
+}
+#endif //FEATURE_CAS_POLICY
+
+BOOL QCALLTYPE SecurityPolicy::IsLocalDrive(LPCWSTR wszPath)
+{
+ QCALL_CONTRACT;
+
+ BOOL retVal = FALSE;
+
+#ifndef FEATURE_PAL
+ BEGIN_QCALL;
+
+ WCHAR rootPath[4];
+ ZeroMemory( rootPath, sizeof( rootPath ) );
+
+ rootPath[0] = wszPath[0];
+ wcscat_s( rootPath, COUNTOF(rootPath), W(":\\") );
+
+ UINT driveType = WszGetDriveType( rootPath );
+ retVal =
+ (driveType == DRIVE_REMOVABLE ||
+ driveType == DRIVE_FIXED ||
+ driveType == DRIVE_CDROM ||
+ driveType == DRIVE_RAMDISK);
+
+ END_QCALL;
+
+#else // !FEATURE_PAL
+ retVal = TRUE;
+#endif // !FEATURE_PAL
+
+ return retVal;
+}
+
+void QCALLTYPE SecurityPolicy::_GetLongPathName(LPCWSTR wszPath, QCall::StringHandleOnStack retLongPath)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+#if !defined(PLATFORM_UNIX)
+ WCHAR wszBuffer[MAX_PATH + 1];
+ ZeroMemory(wszBuffer, sizeof(wszBuffer));
+
+ if (SecurityPolicy::GetLongPathNameHelper( wszPath, wszBuffer, MAX_PATH ) != 0)
+ {
+ retLongPath.Set( wszBuffer );
+ }
+#endif // !PLATFORM_UNIX
+
+ END_QCALL;
+}
+
+#if !defined(PLATFORM_UNIX)
+size_t SecurityPolicy::GetLongPathNameHelper( const WCHAR* wszShortPath, __inout_ecount(cchBuffer) __inout_z WCHAR* wszBuffer, DWORD cchBuffer )
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ DWORD size = WszGetLongPathName(wszShortPath, wszBuffer, cchBuffer);
+
+ if (size == 0)
+ {
+ // We have to deal with files that do not exist so just
+ // because GetLongPathName doesn't give us anything doesn't
+ // mean that we can give up. We iterate through the input
+ // trying GetLongPathName on every subdirectory until
+ // it succeeds or we run out of string.
+
+ WCHAR wszIntermediateBuffer[MAX_PATH];
+
+ if (wcslen( wszShortPath ) >= MAX_PATH)
+ return 0;
+
+ wcscpy_s( wszIntermediateBuffer, COUNTOF(wszIntermediateBuffer), wszShortPath );
+
+ size_t index = wcslen( wszIntermediateBuffer );
+
+ do
+ {
+ while (index > 0 && (wszIntermediateBuffer[index-1] != W('\\') && wszIntermediateBuffer[index-1] != W('/')))
+ --index;
+
+ if (index == 0)
+ break;
+
+ #ifdef _PREFAST_
+ #pragma prefast(push)
+ #pragma prefast(disable:26001, "suppress prefast warning about underflow by doing index-1 which is checked above.")
+ #endif // _PREFAST_
+
+ wszIntermediateBuffer[index-1] = W('\0');
+
+ #ifdef _PREFAST_
+ #pragma prefast(pop)
+ #endif
+
+ size = WszGetLongPathName(wszIntermediateBuffer, wszBuffer, MAX_PATH);
+
+ if (size != 0)
+ {
+ size_t sizeBuffer = wcslen( wszBuffer );
+
+ if (sizeBuffer + wcslen( &wszIntermediateBuffer[index] ) > MAX_PATH - 2)
+ {
+ return 0;
+ }
+ else
+ {
+ if (wszBuffer[sizeBuffer-1] != W('\\') && wszBuffer[sizeBuffer-1] != W('/'))
+ wcscat_s( wszBuffer, cchBuffer, W("\\") );
+ wcscat_s( wszBuffer, cchBuffer, &wszIntermediateBuffer[index] );
+ return (DWORD)wcslen( wszBuffer );
+ }
+ }
+ }
+ while( true );
+
+ return 0;
+ }
+ else if (size > MAX_PATH)
+ {
+ return 0;
+ }
+ else
+ {
+ return wcslen( wszBuffer );
+ }
+}
+#endif // !PLATFORM_UNIX
+
+void QCALLTYPE SecurityPolicy::GetDeviceName(LPCWSTR wszDriveLetter, QCall::StringHandleOnStack retDeviceName)
+{
+ QCALL_CONTRACT;
+
+#if !defined(FEATURE_CORECLR)
+ BEGIN_QCALL;
+
+ WCHAR networkName[MAX_PATH];
+ DWORD networkNameSize = MAX_PATH;
+ ZeroMemory( networkName, sizeof( networkName ) );
+
+ UINT driveType = WszGetDriveType( wszDriveLetter );
+ if (driveType == DRIVE_REMOVABLE ||
+ driveType == DRIVE_FIXED ||
+ driveType == DRIVE_CDROM ||
+ driveType == DRIVE_RAMDISK)
+ {
+ retDeviceName.Set( wszDriveLetter );
+ goto lExit;
+ }
+
+ if (WszWNetGetConnection(wszDriveLetter, networkName, &networkNameSize) != NO_ERROR)
+ {
+ goto lExit;
+ }
+
+ retDeviceName.Set( networkName );
+
+lExit: ;
+
+ END_QCALL;
+#endif // !FEATURE_CORECLR
+}
+
+#ifdef FEATURE_CAS_POLICY
+
+//
+// Fire the ETW event that signals that a specific type of evidence has been created
+//
+// Arguments:
+// pPEFile - PEFile the evidence was generated for
+// type - type of evidence generated
+//
+
+// static
+void QCALLTYPE SecurityPolicy::FireEvidenceGeneratedEvent(PEFile *pPEFile,
+ EvidenceType type)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(pPEFile));
+ }
+ CONTRACTL_END;
+
+ BEGIN_QCALL;
+
+ TraceEvidenceGeneration(type, pPEFile);
+
+ END_QCALL;
+}
+
+// static
+
+void QCALLTYPE SecurityPolicy::GetEvidence(QCall::AssemblyHandle pAssembly, QCall::ObjectHandleOnStack retEvidence)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ IAssemblySecurityDescriptor *pSecDesc = pAssembly->GetSecurityDescriptor();
+
+ _ASSERTE(pSecDesc->GetDomain() == GetAppDomain());
+
+ GCX_COOP();
+ if (pSecDesc->IsEvidenceComputed())
+ retEvidence.Set(pSecDesc->GetAdditionalEvidence());
+ else
+ retEvidence.Set(pSecDesc->GetEvidence());
+
+ END_QCALL;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Determine if an evidence collection has a delay generated strong name evidence object
+// which was used during the process of demand evaluation.
+//
+// Arguments:
+// orEvidence - evidence collection to examine
+//
+// Return Value:
+// true if orEvidence contains unverified strong name evidence which has been used to generate a grant,
+// false if orEvidence does not contain strong name evidence or that evidence was verified / not used
+//
+
+// static
+BOOL SecurityPolicy::WasStrongNameEvidenceUsed(OBJECTREF orEvidence)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // If we don't have any evidence, then there isn't any strong name evidence, and therefore it couldn't
+ // have been used.
+ if (orEvidence == NULL)
+ {
+ return FALSE;
+ }
+
+ BOOL fStrongNameEvidenceWasUsed = FALSE;
+
+ GCPROTECT_BEGIN(orEvidence);
+
+ MethodDescCallSite wasSnEvidenceUsed(METHOD__EVIDENCE__WAS_STRONGNAME_EVIDENCE_USED);
+
+ ARG_SLOT args[] = { ObjToArgSlot(orEvidence) };
+ fStrongNameEvidenceWasUsed = !!wasSnEvidenceUsed.Call_RetBool(args);
+
+ GCPROTECT_END();
+
+ return fStrongNameEvidenceWasUsed;
+}
+#endif // FEATURE_CAS_POLICY
+
+FCIMPL0(void, SecurityPolicy::IncrementOverridesCount)
+{
+ FCALL_CONTRACT;
+
+ Thread *pThread = GetThread();
+ pThread->IncrementOverridesCount();
+}
+FCIMPLEND
+
+FCIMPL0(void, SecurityPolicy::DecrementOverridesCount)
+{
+ FCALL_CONTRACT;
+
+ Thread *pThread = GetThread();
+ pThread->DecrementOverridesCount();
+}
+FCIMPLEND
+
+FCIMPL0(void, SecurityPolicy::IncrementAssertCount)
+{
+ FCALL_CONTRACT;
+
+ Thread *pThread = GetThread();
+ pThread->IncrementAssertCount();
+}
+FCIMPLEND
+
+FCIMPL0(void, SecurityPolicy::DecrementAssertCount)
+{
+ FCALL_CONTRACT;
+
+ Thread *pThread = GetThread();
+ pThread->DecrementAssertCount();
+}
+FCIMPLEND
+
+#ifdef FEATURE_CAS_POLICY
+//
+// Evidence QCalls
+//
+
+//---------------------------------------------------------------------------------------
+//
+// Get the assembly level permission requests
+//
+// Arguments:
+// pAssembly - Assembly to get the declarative security of
+// retMinimumPermissions - [out] RequestMinimum set of the assembly
+// retOptionalPermissions - [out] RequestOptional set of the assembly
+// retRefusedPermissions - [out] RequestRefuse set of the assembly
+//
+
+// static
+void QCALLTYPE SecurityPolicy::GetAssemblyPermissionRequests(QCall::AssemblyHandle pAssembly,
+ QCall::ObjectHandleOnStack retMinimumPermissions,
+ QCall::ObjectHandleOnStack retOptionalPermissions,
+ QCall::ObjectHandleOnStack retRefusedPermissions)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+
+ TraceEvidenceGeneration(kPermissionRequest, pAssembly->GetFile());
+ AssemblySecurityDescriptor *pSecurityDescriptor = static_cast<AssemblySecurityDescriptor*>(pAssembly->GetSecurityDescriptor());
+
+ _ASSERTE(pSecurityDescriptor->GetDomain()->GetSecurityDescriptor()->IsLegacyCasPolicyEnabled());
+
+ struct
+ {
+ OBJECTREF objMinimumPermissions;
+ OBJECTREF objOptionalPermissions;
+ OBJECTREF objRefusedPermissions;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCX_COOP();
+ GCPROTECT_BEGIN(gc);
+
+ gc.objMinimumPermissions = pSecurityDescriptor->GetRequestedPermissionSet(&gc.objOptionalPermissions,
+ &gc.objRefusedPermissions);
+
+ retMinimumPermissions.Set(gc.objMinimumPermissions);
+ retOptionalPermissions.Set(gc.objOptionalPermissions);
+ retRefusedPermissions.Set(gc.objRefusedPermissions);
+
+ GCPROTECT_END();
+
+ END_QCALL;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Get the serialized evidence stream from an assembly
+//
+// Arguments:
+// pPEFile - PEFile to load the evidence stream from
+// retSerializedEvidence - [out] contents of the serialized evidence
+//
+
+// static
+void QCALLTYPE SecurityPolicy::GetAssemblySuppliedEvidence(PEFile *pPEFile,
+ QCall::ObjectHandleOnStack retSerializedEvidence)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(pPEFile));
+ }
+ CONTRACTL_END;
+
+ BEGIN_QCALL;
+
+ DWORD cbResource;
+ BYTE *pbResource;
+
+ // Load the resource from the PE file. We do not need to free this memory, since we're getting a direct
+ // pointer into the PE contents rather than a buffer.
+ TraceEvidenceGeneration(kAssemblySupplied, pPEFile);
+ BOOL fFoundSerializedEvidence = pPEFile->GetResource("Security.Evidence",
+ &cbResource,
+ &pbResource,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ FALSE,
+ TRUE,
+ NULL,
+ NULL);
+
+ if (fFoundSerializedEvidence)
+ {
+ retSerializedEvidence.SetByteArray(pbResource, cbResource);
+ }
+
+ END_QCALL;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Get the zone and URL that the PEFile was loaded from
+//
+// Arguments:
+// pPEFile - PEFile to load the evidence stream from
+// pZone - [out] SecurityZone the file was loaded from
+// retUrl - [out] URL the file was loaded from
+
+// static
+void QCALLTYPE SecurityPolicy::GetLocationEvidence(PEFile *pPEFile,
+ SecZone *pZone,
+ QCall::StringHandleOnStack retUrl)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(pPEFile));
+ }
+ CONTRACTL_END;
+
+ BEGIN_QCALL;
+
+ StackSString ssCodeBase;
+ BYTE pbUniqueID[MAX_SIZE_SECURITY_ID];
+ DWORD cbUniqueID = COUNTOF(pbUniqueID);
+
+ // The location information is used to create Site, Url, and Zone evidence so fire all three events
+ TraceEvidenceGeneration(kSite, pPEFile);
+ TraceEvidenceGeneration(kUrl, pPEFile);
+ TraceEvidenceGeneration(kZone, pPEFile);
+
+ pPEFile->GetSecurityIdentity(ssCodeBase, pZone, 0, pbUniqueID, &cbUniqueID);
+
+ retUrl.Set(ssCodeBase);
+
+ END_QCALL;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Get the X.509 certificate that the PE file's Authenticode signature was created with
+//
+// Arguments:
+// pPEFile - PEFile to load the evidence stream from
+// retCertificate - [out] certificate that signed the file
+
+// static
+void QCALLTYPE SecurityPolicy::GetPublisherCertificate(PEFile *pPEFile,
+ QCall::ObjectHandleOnStack retCertificate)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(pPEFile));
+ }
+ CONTRACTL_END;
+
+ BEGIN_QCALL;
+
+ TraceEvidenceGeneration(kPublisher, pPEFile);
+ COR_TRUST *pAuthenticodeSignature = pPEFile->GetAuthenticodeSignature();
+ if (pAuthenticodeSignature != NULL && pAuthenticodeSignature->pbSigner != NULL)
+ {
+ retCertificate.SetByteArray(pAuthenticodeSignature->pbSigner, pAuthenticodeSignature->cbSigner);
+ }
+
+ END_QCALL;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Get the components of an assembly's strong name to generate strong name evidence with
+//
+// Arguments:
+// pAssembly - assembly to get the strong name of
+// retPublicKeyBlob - [out] public component of the key the assembly is signed with
+// retSimpleName - [out] simple name of the file
+// piMajorVersion - [out] major version
+// piMinorVersion - [out] minor version
+// piBuild - [out] build
+// piRevision - [out] revision
+//
+// Notes:
+// retPublicKeyBlob will be null for a simply named assembly
+//
+
+// static
+void QCALLTYPE SecurityPolicy::GetStrongNameInformation(QCall::AssemblyHandle pAssembly,
+ QCall::ObjectHandleOnStack retPublicKeyBlob,
+ QCall::StringHandleOnStack retSimpleName,
+ USHORT *piMajorVersion,
+ USHORT *piMinorVersion,
+ USHORT *piBuild,
+ USHORT *piRevision)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(piMajorVersion));
+ PRECONDITION(CheckPointer(piMinorVersion));
+ PRECONDITION(CheckPointer(piBuild));
+ PRECONDITION(CheckPointer(piRevision));
+ }
+ CONTRACTL_END;
+
+ BEGIN_QCALL;
+
+ PEAssembly *pPEAssembly = pAssembly->GetFile();
+ TraceEvidenceGeneration(kStrongName, pPEAssembly);
+
+ DWORD cbPublicKey;
+ const BYTE *pbPublicKey = reinterpret_cast<const BYTE*>(pPEAssembly->GetPublicKey(&cbPublicKey));
+
+ if (pbPublicKey != NULL && cbPublicKey > 0)
+ {
+ pPEAssembly->GetVersion(piMajorVersion, piMinorVersion, piBuild, piRevision);
+ retPublicKeyBlob.SetByteArray(pbPublicKey, cbPublicKey);
+ retSimpleName.Set(pPEAssembly->GetSimpleName());
+ }
+ else
+ {
+ GCX_COOP();
+ retPublicKeyBlob.Set(NULL);
+ }
+
+ END_QCALL;
+}
+
+#endif // FEATURE_CAS_POLICY
+
+#ifdef FEATURE_FUSION
+static void GetFusionNameFromAssemblyQualifiedTypeName(LPCWSTR pAssemblyQualifedTypeName, IAssemblyName ** ppFusionName)
+{
+ STANDARD_VM_CONTRACT;
+
+ StackSString ssAssemblyQualifedTypeName(pAssemblyQualifedTypeName);
+ StackSString ssAssemblyName;
+
+ SString::Iterator iter = ssAssemblyQualifedTypeName.Begin();
+
+ if (ssAssemblyQualifedTypeName.Find( iter, ',' ))
+ {
+ iter++;
+ while (*iter == ' ' )
+ iter++;
+
+ ssAssemblyName.Set( ssAssemblyQualifedTypeName,
+ iter,
+ ssAssemblyQualifedTypeName.End() );
+}
+
+ StackScratchBuffer sBuffer;
+ AssemblySpec spec;
+ spec.Init(ssAssemblyName.GetANSI(sBuffer));
+
+ IfFailThrow(spec.CreateFusionName(ppFusionName));
+}
+#endif // FEATURE_FUSION
+
+BOOL QCALLTYPE SecurityPolicy::IsSameType(LPCWSTR pLeft, LPCWSTR pRight)
+{
+ QCALL_CONTRACT;
+
+ BOOL bEqual = FALSE;
+
+ BEGIN_QCALL;
+
+// @telesto: Is this #ifdef-#else-#endif required anymore? Used to be needed when security was bypassing
+// loader and accessing Fusion interfaces. Seems like that's been fixed to use GetFusionNameFrom...
+#ifdef FEATURE_FUSION
+
+ AppDomain* pDomain = GetAppDomain();
+ IApplicationContext* pAppCtx = pDomain->GetFusionContext();
+
+ _ASSERTE( pAppCtx != NULL && "Fusion context not setup yet" );
+
+ SafeComHolderPreemp<IAssemblyName> pAssemblyNameLeft;
+ SafeComHolderPreemp<IAssemblyName> pAssemblyNameRight;
+
+ GetFusionNameFromAssemblyQualifiedTypeName(pLeft, &pAssemblyNameLeft);
+ GetFusionNameFromAssemblyQualifiedTypeName(pRight, &pAssemblyNameRight);
+
+ SafeComHolderPreemp<IAssemblyName> pAssemblyNamePostPolicyLeft;
+ SafeComHolderPreemp<IAssemblyName> pAssemblyNamePostPolicyRight;
+
+ if (FAILED(PreBindAssembly(pAppCtx, pAssemblyNameLeft, NULL, &pAssemblyNamePostPolicyLeft, NULL)) ||
+ FAILED(PreBindAssembly(pAppCtx, pAssemblyNameRight, NULL, &pAssemblyNamePostPolicyRight, NULL)))
+ {
+ // version-agnostic comparison.
+ bEqual = pAssemblyNameLeft->IsEqual(pAssemblyNameRight, ASM_CMPF_NAME | ASM_CMPF_PUBLIC_KEY_TOKEN | ASM_CMPF_CULTURE) == S_OK;
+ }
+ else
+ {
+ // version-agnostic comparison.
+ bEqual = pAssemblyNamePostPolicyLeft->IsEqual(pAssemblyNamePostPolicyRight, ASM_CMPF_NAME | ASM_CMPF_PUBLIC_KEY_TOKEN | ASM_CMPF_CULTURE) == S_OK;
+ }
+#else // FEATURE_FUSION
+ bEqual=TRUE;
+#endif // FEATURE_FUSION
+
+ END_QCALL;
+
+ return bEqual;
+}
+
+FCIMPL1(FC_BOOL_RET, SecurityPolicy::SetThreadSecurity, CLR_BOOL fThreadSecurity)
+{
+ FCALL_CONTRACT;
+
+ Thread* pThread = GetThread();
+ BOOL inProgress = pThread->IsSecurityStackwalkInProgess();
+ pThread->SetSecurityStackwalkInProgress(fThreadSecurity);
+ FC_RETURN_BOOL(inProgress);
+}
+FCIMPLEND
+
+FCIMPL0(FC_BOOL_RET, SecurityPolicy::IsDefaultThreadSecurityInfo)
+{
+ FCALL_CONTRACT;
+
+ FC_RETURN_BOOL(SecurityStackWalk::HasFlagsOrFullyTrusted(0));
+}
+FCIMPLEND
+
+#endif // CROSSGEN_COMPILE
diff --git a/src/vm/securitypolicy.h b/src/vm/securitypolicy.h
new file mode 100644
index 0000000000..42f9410ae6
--- /dev/null
+++ b/src/vm/securitypolicy.h
@@ -0,0 +1,350 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#ifndef __SECURITYPOLICY_H__
+#define __SECURITYPOLICY_H__
+
+#include "crst.h"
+#include "objecthandle.h"
+#include "securityattributes.h"
+#include "securitydeclarativecache.h"
+#include "declsec.h"
+#include "fcall.h"
+#include "qcall.h"
+#include "cgensys.h"
+#include "rwlock.h"
+
+#define SPFLAGSASSERTION 0x01
+#define SPFLAGSUNMANAGEDCODE 0x02
+#define SPFLAGSSKIPVERIFICATION 0x04
+
+#define CORSEC_STACKWALK_HALTED 0x00000001 // Stack walk was halted
+#define CORSEC_FT_ASSERT 0x00000004 // Hit a FT-assert during the stackwalk
+
+// Forward declarations to avoid pulling in too many headers.
+class Frame;
+class FramedMethodFrame;
+class ClassLoader;
+class Thread;
+class CrawlFrame;
+class SystemNative;
+class NDirect;
+class SystemDomain;
+class AssemblySecurityDescriptor;
+#ifndef FEATURE_CORECLR
+class PEFileSecurityDescriptor;
+#endif
+class SharedSecurityDescriptor;
+class SecurityStackWalkData;
+class DemandStackWalk;
+class SecurityDescriptor;
+class COMPrincipal;
+
+#define CLR_CASOFF_MUTEX W("Global\\CLR_CASOFF_MUTEX")
+
+// This enumeration must be kept in sync with the managed System.Security.Policy.EvidenceTypeGenerated enum
+typedef enum
+{
+ kAssemblySupplied, // Evidence supplied by the assembly itself
+ kGac, // System.Security.Policy.GacInstalled
+ kHash, // System.Security.Policy.Hash
+ kPermissionRequest, // System.Security.Policy.PermissionRequestEvidence
+ kPublisher, // System.Security.Policy.Publisher
+ kSite, // System.Security.Policy.Site
+ kStrongName, // System.Security.Policy.StrongName
+ kUrl, // System.Security.Policy.Url
+ kZone // System.Security.Policy.Zone
+}
+EvidenceType;
+
+namespace SecurityPolicy
+{
+ // -----------------------------------------------------------
+ // FCalls
+ // -----------------------------------------------------------
+
+ BOOL QCALLTYPE IsSameType(LPCWSTR pLeft, LPCWSTR pRight);
+
+ FCDECL1(FC_BOOL_RET, SetThreadSecurity, CLR_BOOL fThreadSecurity);
+
+ void QCALLTYPE GetGrantedPermissions(QCall::ObjectHandleOnStack retGranted, QCall::ObjectHandleOnStack retDenied, QCall::StackCrawlMarkHandle stackmark);
+
+
+#ifdef FEATURE_IMPERSONATION
+ FCDECL0(DWORD, GetImpersonationFlowMode);
+#endif // #ifdef FEATURE_IMPERSONATION
+
+ FCDECL0(FC_BOOL_RET, IsDefaultThreadSecurityInfo);
+#ifdef FEATURE_CAS_POLICY
+ SecZone QCALLTYPE CreateFromUrl(LPCWSTR wszUrl);
+#endif // FEATURE_CAS_POLICY
+ void QCALLTYPE _GetLongPathName(LPCWSTR wszPath, QCall::StringHandleOnStack retLongPath);
+
+ BOOL QCALLTYPE IsLocalDrive(LPCWSTR wszPath);
+
+ void QCALLTYPE GetDeviceName(LPCWSTR wszDriveLetter, QCall::StringHandleOnStack retDeviceName);
+
+ FCDECL0(VOID, IncrementOverridesCount);
+
+ FCDECL0(VOID, DecrementOverridesCount);
+
+ FCDECL0(VOID, IncrementAssertCount);
+
+ FCDECL0(VOID, DecrementAssertCount);
+
+#ifdef FEATURE_CAS_POLICY
+ //
+ // Evidence QCalls
+ //
+
+//public:
+ void QCALLTYPE FireEvidenceGeneratedEvent(PEFile *pPEFile, EvidenceType type);
+
+ void QCALLTYPE GetEvidence(QCall::AssemblyHandle pAssembly, QCall::ObjectHandleOnStack retEvidence);
+
+ void QCALLTYPE GetAssemblyPermissionRequests(QCall::AssemblyHandle pAssembly,
+ QCall::ObjectHandleOnStack retMinimumPermissions,
+ QCall::ObjectHandleOnStack retOptionalPermissions,
+ QCall::ObjectHandleOnStack retRefusedPermissions);
+
+ void QCALLTYPE GetAssemblySuppliedEvidence(PEFile *pPEFile, QCall::ObjectHandleOnStack retSerializedEvidence);
+
+ void QCALLTYPE GetLocationEvidence(PEFile *pPEFile, SecZone *pZone, QCall::StringHandleOnStack retUrl);
+
+ void QCALLTYPE GetPublisherCertificate(PEFile *pPEFile, QCall::ObjectHandleOnStack retCertificate);
+
+ void QCALLTYPE GetStrongNameInformation(QCall::AssemblyHandle pAssembly,
+ QCall::ObjectHandleOnStack retPublicKeyBlob,
+ QCall::StringHandleOnStack retSimpleName,
+ USHORT *piMajorVersion,
+ USHORT *piMinorVersion,
+ USHORT *piBuild,
+ USHORT *piRevision);
+#endif // FEATURE_CAS_POLICY
+
+//private:
+ // -----------------------------------------------------------
+ // Init methods
+ // -----------------------------------------------------------
+
+ // Calls all the security-related init methods
+ // Callers:
+ // EEStartupHelper
+ void Start();
+
+ // Calls all the security-related shutdown methods
+ // Callers:
+ // <currently unused> @TODO: shouldn't EEShutDownHelper call this?
+ void Stop();
+
+#ifdef FEATURE_CAS_POLICY
+ // Saves security cache data
+ // Callers:
+ // EEShutDownHelper
+ void SaveCache();
+#endif
+
+
+ // -----------------------------------------------------------
+ // Policy
+ // -----------------------------------------------------------
+
+ // Returns TRUE if the assembly has permission to call unmanaged code
+ // Callers:
+ // CEEInfo::getNewHelper
+ // MakeStubWorker
+ // MethodDesc::DoPrestub
+ BOOL CanCallUnmanagedCode(Module *pModule);
+
+ // Throws a security exception
+ // Callers:
+ // JIT_SecurityUnmanagedCodeException
+ void CreateSecurityException(__in_z const char *szDemandClass, DWORD dwFlags, OBJECTREF* pThrowable);
+ DECLSPEC_NORETURN void ThrowSecurityException(__in_z const char *szDemandClass, DWORD dwFlags);
+
+ BOOL CanSkipVerification(DomainAssembly * pAssembly);
+
+#ifdef FEATURE_CAS_POLICY
+ void TraceEvidenceGeneration(EvidenceType type, PEFile *pPEFile);
+
+ // Map a URL to a zone, applying any user supplied policy
+ SecZone MapUrlToZone(__in_z LPCWSTR wszUrl);
+
+ // Apply user supplied policy to a zone
+ HRESULT ApplyCustomZoneOverride(SecZone *pdwZone);
+
+ // Determine what the grant set of an assembly is
+ OBJECTREF ResolveGrantSet(OBJECTREF evidence, OUT DWORD *pdwSpecialFlags, BOOL fcheckExecutionPermission);
+
+ // Resolve legacy CAS policy on the assembly
+ // Callers:
+ // SecurityDescriptor::ResolveWorker
+ OBJECTREF ResolveCasPolicy(OBJECTREF evidence,
+ OBJECTREF reqdPset,
+ OBJECTREF optPset,
+ OBJECTREF denyPset,
+ OBJECTREF* grantdenied,
+ DWORD* pdwSpecialFlags,
+ BOOL checkExecutionPermission);
+
+ // Load the policy config/cache files at EE startup
+ void InitPolicyConfig();
+
+ BOOL WasStrongNameEvidenceUsed(OBJECTREF evidence);
+#endif
+ // Like WszGetLongPathName, but it works with nonexistant files too
+ size_t GetLongPathNameHelper( const WCHAR* wszShortPath, __inout_ecount(cchBuffer) __inout_z WCHAR* wszBuffer, DWORD cchBuffer );
+
+#ifdef FEATURE_CAS_POLICY
+ extern CrstStatic s_crstPolicyInit;
+ extern bool s_fPolicyInitialized;
+#endif // FEATURE_CAS_POLICY
+}
+
+struct SharedPermissionObjects
+{
+ OBJECTHANDLE hPermissionObject; // Commonly used Permission Object
+ BinderClassID idClass; // ID of class
+ BinderMethodID idConstructor; // ID of constructor to call
+ DWORD dwPermissionFlag; // Flag needed by the constructors (Only a single argument is assumed)
+};
+
+/******** Shared Permission Objects related constants *******/
+#define NUM_PERM_OBJECTS (sizeof(g_rPermObjectsTemplate) / sizeof(SharedPermissionObjects))
+
+// Constants to use with SecurityPermission
+#define SECURITY_PERMISSION_ASSERTION 1 // SecurityPermission.cs
+#define SECURITY_PERMISSION_UNMANAGEDCODE 2 // SecurityPermission.cs
+#define SECURITY_PERMISSION_SKIPVERIFICATION 4 // SecurityPermission.cs
+#define SECURITY_PERMISSION_CONTROLEVIDENCE 0x20 // SecurityPermission.cs
+#define SECURITY_PERMISSION_SERIALIZATIONFORMATTER 0X80 // SecurityPermission.cs
+#define SECURITY_PERMISSION_CONTROLPRINCIPAL 0x200 // SecurityPermission.cs
+#define SECURITY_PERMISSION_BINDINGREDIRECTS 0X2000 // SecurityPermission.cs
+
+// Constants to use with ReflectionPermission
+#define REFLECTION_PERMISSION_TYPEINFO 1 // ReflectionPermission.cs
+#define REFLECTION_PERMISSION_MEMBERACCESS 2 // ReflectionPermission.cs
+#define REFLECTION_PERMISSION_RESTRICTEDMEMBERACCESS 8 // ReflectionPermission.cs
+
+// PermissionState.Unrestricted
+#define PERMISSION_STATE_UNRESTRICTED 1 // PermissionState.cs
+
+SELECTANY const SharedPermissionObjects g_rPermObjectsTemplate[] =
+{
+ {NULL, CLASS__SECURITY_PERMISSION, METHOD__SECURITY_PERMISSION__CTOR, SECURITY_PERMISSION_UNMANAGEDCODE },
+ {NULL, CLASS__SECURITY_PERMISSION, METHOD__SECURITY_PERMISSION__CTOR, SECURITY_PERMISSION_SKIPVERIFICATION },
+ {NULL, CLASS__REFLECTION_PERMISSION, METHOD__REFLECTION_PERMISSION__CTOR, REFLECTION_PERMISSION_TYPEINFO },
+ {NULL, CLASS__SECURITY_PERMISSION, METHOD__SECURITY_PERMISSION__CTOR, SECURITY_PERMISSION_ASSERTION },
+ {NULL, CLASS__REFLECTION_PERMISSION, METHOD__REFLECTION_PERMISSION__CTOR, REFLECTION_PERMISSION_MEMBERACCESS },
+ {NULL, CLASS__SECURITY_PERMISSION, METHOD__SECURITY_PERMISSION__CTOR, SECURITY_PERMISSION_SERIALIZATIONFORMATTER},
+ {NULL, CLASS__REFLECTION_PERMISSION, METHOD__REFLECTION_PERMISSION__CTOR, REFLECTION_PERMISSION_RESTRICTEDMEMBERACCESS},
+ {NULL, CLASS__PERMISSION_SET, METHOD__PERMISSION_SET__CTOR, PERMISSION_STATE_UNRESTRICTED},
+ {NULL, CLASS__SECURITY_PERMISSION, METHOD__SECURITY_PERMISSION__CTOR, SECURITY_PERMISSION_BINDINGREDIRECTS },
+ {NULL, CLASS__UI_PERMISSION, METHOD__UI_PERMISSION__CTOR, PERMISSION_STATE_UNRESTRICTED },
+};
+
+// Array index in SharedPermissionObjects array
+// Note: these should all be permissions that implement IUnrestrictedPermission.
+// Any changes to these must be reflected in bcl\system\security\codeaccesssecurityengine.cs and the above table
+
+// special flags
+#define SECURITY_UNMANAGED_CODE 0
+#define SECURITY_SKIP_VER 1
+#define REFLECTION_TYPE_INFO 2
+#define SECURITY_ASSERT 3
+#define REFLECTION_MEMBER_ACCESS 4
+#define SECURITY_SERIALIZATION 5
+#define REFLECTION_RESTRICTED_MEMBER_ACCESS 6
+#define SECURITY_FULL_TRUST 7
+#define SECURITY_BINDING_REDIRECTS 8
+
+// special permissions
+#define UI_PERMISSION 9
+#define ENVIRONMENT_PERMISSION 10
+#define FILEDIALOG_PERMISSION 11
+#define FILEIO_PERMISSION 12
+#define REFLECTION_PERMISSION 13
+#define SECURITY_PERMISSION 14
+
+// additional special flags
+#define SECURITY_CONTROL_EVIDENCE 16
+#define SECURITY_CONTROL_PRINCIPAL 17
+
+// Objects corresponding to the above index could be Permission or PermissionSet objects.
+// Helper macro to identify which kind it is. If you're adding to the index above, please update this also.
+#define IS_SPECIAL_FLAG_PERMISSION_SET(x) ((x) == SECURITY_FULL_TRUST)
+
+// Class holding a grab bag of security stuff we need on a per-appdomain basis.
+struct SecurityContext
+{
+ SharedPermissionObjects m_rPermObjects[NUM_PERM_OBJECTS];
+
+ // Cached declarative permissions per method
+ EEPtrHashTable m_pCachedMethodPermissionsHash;
+ SimpleRWLock * m_prCachedMethodPermissionsLock;
+ SecurityDeclarativeCache m_pSecurityDeclarativeCache;
+ size_t m_nCachedPsetsSize;
+
+ SecurityContext(LoaderHeap* pHeap) :
+ m_prCachedMethodPermissionsLock(NULL),
+ m_nCachedPsetsSize(0)
+ {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+ memcpy(m_rPermObjects, g_rPermObjectsTemplate, sizeof(m_rPermObjects));
+
+ // initialize cache of method-level declarative security permissions
+ // Note that the method-level permissions are stored elsewhere
+ m_prCachedMethodPermissionsLock = new SimpleRWLock(PREEMPTIVE, LOCK_TYPE_DEFAULT);
+ if (!m_pCachedMethodPermissionsHash.Init(100, &g_lockTrustMeIAmThreadSafe))
+ ThrowOutOfMemory();
+
+ m_pSecurityDeclarativeCache.Init (pHeap);
+ }
+
+ ~SecurityContext()
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ // no need to explicitly delete the cache contents, since they will be deallocated with the AppDomain's heap
+ if (m_prCachedMethodPermissionsLock) delete m_prCachedMethodPermissionsLock;
+ }
+};
+
+#ifdef _DEBUG
+
+#define DBG_TRACE_METHOD(cf) \
+ do { \
+ MethodDesc * __pFunc = cf -> GetFunction(); \
+ if (__pFunc) { \
+ LOG((LF_SECURITY, LL_INFO1000, \
+ " Method: %s.%s\n", \
+ (__pFunc->m_pszDebugClassName == NULL) ? \
+ "<null>" : __pFunc->m_pszDebugClassName, \
+ __pFunc->GetName())); \
+ } \
+ } while (false)
+
+#define DBG_TRACE_STACKWALK(msg, verbose) LOG((LF_SECURITY, (verbose) ? LL_INFO10000 : LL_INFO1000, msg))
+#else //_DEBUG
+
+#define DBG_TRACE_METHOD(cf)
+#define DBG_TRACE_STACKWALK(msg, verbose)
+
+#endif //_DEBUG
+
+
+#endif // __SECURITYPOLICY_H__
diff --git a/src/vm/securityprincipal.cpp b/src/vm/securityprincipal.cpp
new file mode 100644
index 0000000000..9ddb64984c
--- /dev/null
+++ b/src/vm/securityprincipal.cpp
@@ -0,0 +1,238 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//+--------------------------------------------------------------------------
+//
+// Microsoft Confidential.
+//
+//---------------------------------------------------------------------------
+//
+
+//
+
+
+#include "common.h"
+
+#include "securityprincipal.h"
+#include "corhost.h"
+#include "security.h"
+
+#ifndef FEATURE_CORECLR
+INT32 QCALLTYPE COMPrincipal::ImpersonateLoggedOnUser(HANDLE hToken)
+{
+ QCALL_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_QCALL;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostSecurityManager *pSM = CorHost2::GetHostSecurityManager();
+ if (pSM) {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pSM->ImpersonateLoggedOnUser(hToken);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ if (!::ImpersonateLoggedOnUser(hToken))
+ hr = HRESULT_FROM_GetLastError();
+ }
+
+ STRESS_LOG2(LF_SECURITY, LL_INFO100, "COMPrincipal::ImpersonateLoggedOnUser called with hTokenSAFE = %d. Returning 0x%x\n",hToken,hr);
+
+ END_QCALL;
+
+ return hr;
+}
+
+FCIMPL3(INT32, COMPrincipal::OpenThreadToken, DWORD dwDesiredAccess, DWORD dwOpenAs, SafeHandle** phThreadTokenUNSAFE)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(phThreadTokenUNSAFE));
+ } CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ SafeHandle** phThreadTokenSAFE = phThreadTokenUNSAFE;
+ GCPROTECT_BEGININTERIOR(phThreadTokenSAFE);
+
+ *phThreadTokenUNSAFE = NULL;
+ HandleHolder hThreadToken;
+ {
+ GCX_PREEMP();
+ BOOL bOpenAsSelf = TRUE;
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostSecurityManager *pSM = CorHost2::GetHostSecurityManager();
+ if (pSM) {
+ if (dwOpenAs == WINSECURITYCONTEXT_THREAD)
+ bOpenAsSelf = FALSE;
+
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pSM->OpenThreadToken(dwDesiredAccess, bOpenAsSelf, &hThreadToken);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (FAILED(hr) && dwOpenAs == WINSECURITYCONTEXT_BOTH) {
+ bOpenAsSelf = FALSE;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pSM->OpenThreadToken(dwDesiredAccess, bOpenAsSelf, &hThreadToken);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ if (dwOpenAs == WINSECURITYCONTEXT_THREAD)
+ bOpenAsSelf = FALSE;
+
+ if (!::OpenThreadToken(::GetCurrentThread(), dwDesiredAccess, bOpenAsSelf, &hThreadToken)) {
+ if (dwOpenAs == WINSECURITYCONTEXT_BOTH) {
+ bOpenAsSelf = FALSE;
+ hr = S_OK;
+ if (!::OpenThreadToken(::GetCurrentThread(), dwDesiredAccess, bOpenAsSelf, &hThreadToken))
+ hr = HRESULT_FROM_GetLastError();
+ }
+ else
+ hr = HRESULT_FROM_GetLastError();
+ }
+ }
+ }
+
+ if (SUCCEEDED(hr)) {
+ struct _gc {
+ SAFEHANDLE pSafeTokenHandle;
+ } gc;
+ gc.pSafeTokenHandle = NULL;
+
+ GCPROTECT_BEGIN(gc);
+ // Allocate a SafeHandle here
+ MethodTable *pMT = MscorlibBinder::GetClass(CLASS__SAFE_TOKENHANDLE);
+ gc.pSafeTokenHandle = (SAFEHANDLE) AllocateObject(pMT);
+ CallDefaultConstructor(gc.pSafeTokenHandle);
+ gc.pSafeTokenHandle->SetHandle((void*) hThreadToken);
+ hThreadToken.SuppressRelease();
+
+ SetObjectReference((OBJECTREF*) phThreadTokenSAFE, (OBJECTREF) gc.pSafeTokenHandle, gc.pSafeTokenHandle->GetAppDomain());
+ GCPROTECT_END();
+ }
+
+ GCPROTECT_END();
+
+ HELPER_METHOD_FRAME_END();
+ return hr;
+}
+FCIMPLEND
+
+INT32 QCALLTYPE COMPrincipal::RevertToSelf()
+{
+ QCALL_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_QCALL;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostSecurityManager *pSM = CorHost2::GetHostSecurityManager();
+ if (pSM) {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pSM->RevertToSelf();
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ if (!::RevertToSelf())
+ hr = HRESULT_FROM_GetLastError();
+ }
+
+ STRESS_LOG1(LF_SECURITY, LL_INFO100, "COMPrincipal::RevertToSelf returning 0x%x\n",hr);
+
+ END_QCALL;
+
+ return hr;
+}
+
+INT32 QCALLTYPE COMPrincipal::SetThreadToken(HANDLE hToken)
+{
+ QCALL_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_QCALL;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostSecurityManager *pSM = CorHost2::GetHostSecurityManager();
+ if (pSM)
+ {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pSM->SetThreadToken(hToken);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ if (!::SetThreadToken(NULL, hToken))
+ hr = HRESULT_FROM_GetLastError();
+ }
+
+ END_QCALL;
+
+ return hr;
+}
+#endif // !FEATURE_CORECLR
+
+void COMPrincipal::CLR_ImpersonateLoggedOnUser(HANDLE hToken)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ {
+ GCX_PREEMP();
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostSecurityManager *pSM = CorHost2::GetHostSecurityManager();
+ if (pSM) {
+ hr = pSM->RevertToSelf();
+ if (hr != S_OK)
+ {
+ // FailFast
+ STRESS_LOG2(LF_EH, LL_INFO100, "CLR_ImpersonateLoggedOnUser failed for hImpersonateToken = %d with error:0x%x\n",hToken, hr);
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_SECURITY);
+ }
+ if (hToken != NULL)
+ hr = pSM->ImpersonateLoggedOnUser(hToken);
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ if (!::RevertToSelf())
+ hr = HRESULT_FROM_GetLastError();
+ if (hr != S_OK)
+ {
+ // FailFast
+ STRESS_LOG2(LF_EH, LL_INFO100, "CLR_ImpersonateLoggedOnUser failed for hImpersonateToken = %d with error:0x%x\n",hToken, hr);
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_SECURITY);
+ }
+ if (hToken != NULL && !::ImpersonateLoggedOnUser(hToken))
+ hr = HRESULT_FROM_GetLastError();
+ }
+
+ if (hr != S_OK)
+ {
+ // FailFast
+ STRESS_LOG2(LF_EH, LL_INFO100, "CLR_ImpersonateLoggedOnUser failed for hImpersonateToken = %d with error:0x%x\n",hToken, hr);
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_SECURITY);
+ }
+ }
+
+ return;
+}
+
diff --git a/src/vm/securityprincipal.h b/src/vm/securityprincipal.h
new file mode 100644
index 0000000000..223b0ca739
--- /dev/null
+++ b/src/vm/securityprincipal.h
@@ -0,0 +1,41 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//+--------------------------------------------------------------------------
+//
+// Microsoft Confidential.
+//
+//---------------------------------------------------------------------------
+//
+
+//
+
+
+
+#pragma once
+
+// keep in sync with windowsidentity.cs
+#define WINSECURITYCONTEXT_THREAD 1
+#define WINSECURITYCONTEXT_PROCESS 2
+#define WINSECURITYCONTEXT_BOTH 3
+
+
+
+class COMPrincipal
+{
+public:
+ static
+ INT32 QCALLTYPE ImpersonateLoggedOnUser(HANDLE hToken);
+
+ static FCDECL3(INT32, OpenThreadToken, DWORD dwDesiredAccess, DWORD dwOpenAs, SafeHandle** phThreadTokenUNSAFE);
+
+ static
+ INT32 QCALLTYPE RevertToSelf();
+
+ static
+ INT32 QCALLTYPE SetThreadToken(HANDLE hToken);
+
+ static void CLR_ImpersonateLoggedOnUser(HANDLE hToken);
+};
diff --git a/src/vm/securitystackwalk.cpp b/src/vm/securitystackwalk.cpp
new file mode 100644
index 0000000000..1fbfb5b9c7
--- /dev/null
+++ b/src/vm/securitystackwalk.cpp
@@ -0,0 +1,2440 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+
+//
+
+
+#include "common.h"
+
+#include "security.h"
+#include "perfcounters.h"
+#include "stackcompressor.h"
+#ifdef FEATURE_REMOTING
+#include "crossdomaincalls.h"
+#else
+#include "callhelpers.h"
+#endif
+#include "appdomain.inl"
+#include "appdomainstack.inl"
+
+COUNTER_ONLY(PERF_COUNTER_TIMER_PRECISION g_TotalTimeInSecurityRuntimeChecks = 0);
+COUNTER_ONLY(PERF_COUNTER_TIMER_PRECISION g_LastTimeInSecurityRuntimeChecks = 0);
+COUNTER_ONLY(UINT32 g_SecurityChecksIterations=0);
+
+bool SecurityStackWalk::IsSpecialRunFrame(MethodDesc* pMeth)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+#ifndef FEATURE_CORECLR
+ if (pMeth == MscorlibBinder::GetMethod(METHOD__EXECUTIONCONTEXT__RUN))
+ return true;
+
+#if defined(FEATURE_IMPERSONATION) || defined(FEATURE_COMPRESSEDSTACK)
+ if (pMeth == MscorlibBinder::GetMethod(METHOD__SECURITYCONTEXT__RUN))
+ return true;
+#endif // #if defined(FEATURE_IMPERSONATION) || defined(FEATURE_COMPRESSEDSTACK)
+
+#ifdef FEATURE_COMPRESSEDSTACK
+ if (pMeth == MscorlibBinder::GetMethod(METHOD__COMPRESSED_STACK__RUN))
+ return true;
+#endif // FEATURE_COMPRESSEDSTACK
+
+#endif // !FEATURE_CORECLR
+
+ return false;
+}
+
+void SecurityStackWalk::CheckPermissionAgainstGrants(OBJECTREF refCS, OBJECTREF refGrants, OBJECTREF refRefused, AppDomain *pDomain, MethodDesc* pMethod, Assembly* pAssembly)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ struct _gc {
+ OBJECTREF orCS;
+ OBJECTREF orGranted;
+ OBJECTREF orRefused;
+ OBJECTREF orDemand;
+ OBJECTREF orToken;
+ OBJECTREF orAssembly;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ gc.orCS = refCS;
+ gc.orGranted = refGrants;
+ gc.orRefused = refRefused;
+
+ GCPROTECT_BEGIN(gc);
+
+ // Switch into the destination context if necessary.
+ ENTER_DOMAIN_PTR(pDomain,ADV_RUNNINGIN) //have it on the stack
+ {
+ // Fetch input objects that might originate from a different appdomain,
+ // marshalling if necessary.
+ gc.orDemand = m_objects.GetObjects(pDomain, &gc.orToken);
+ if(pAssembly)
+ gc.orAssembly = pAssembly->GetExposedObject();
+
+ PREPARE_NONVIRTUAL_CALLSITE(METHOD__SECURITY_ENGINE__CHECK_HELPER);
+
+ DECLARE_ARGHOLDER_ARRAY(helperArgs, 8);
+ helperArgs[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(gc.orCS);
+ helperArgs[ARGNUM_1] = OBJECTREF_TO_ARGHOLDER(gc.orGranted);
+ helperArgs[ARGNUM_2] = OBJECTREF_TO_ARGHOLDER(gc.orRefused);
+ helperArgs[ARGNUM_3] = OBJECTREF_TO_ARGHOLDER(gc.orDemand);
+ helperArgs[ARGNUM_4] = OBJECTREF_TO_ARGHOLDER(gc.orToken);
+ helperArgs[ARGNUM_5] = PTR_TO_ARGHOLDER(pMethod);
+ helperArgs[ARGNUM_6] = OBJECTREF_TO_ARGHOLDER(gc.orAssembly);
+ helperArgs[ARGNUM_7] = DWORD_TO_ARGHOLDER(dclDemand);
+
+ CATCH_HANDLER_FOUND_NOTIFICATION_CALLSITE;
+ CALL_MANAGED_METHOD_NORET(helperArgs);
+
+ }
+ END_DOMAIN_TRANSITION;
+
+ GCPROTECT_END();
+}
+
+
+void SecurityStackWalk::CheckSetAgainstGrants(OBJECTREF refCS, OBJECTREF refGrants, OBJECTREF refRefused, AppDomain *pDomain, MethodDesc* pMethod, Assembly* pAssembly)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ struct _gc {
+ OBJECTREF orCS;
+ OBJECTREF orGranted;
+ OBJECTREF orRefused;
+ OBJECTREF orDemand;
+ OBJECTREF orAssembly;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ gc.orCS = refCS;
+ gc.orGranted = refGrants;
+ gc.orRefused = refRefused;
+
+ GCPROTECT_BEGIN(gc);
+
+ // Switch into the destination context if necessary.
+ ENTER_DOMAIN_PTR(pDomain,ADV_RUNNINGIN) //have it on the stack
+ {
+ // Fetch input objects that might originate from a different appdomain,
+ // marshalling if necessary.
+ gc.orDemand = m_objects.GetObject(pDomain);
+ if(pAssembly)
+ gc.orAssembly = pAssembly->GetExposedObject();
+
+ PREPARE_NONVIRTUAL_CALLSITE(METHOD__SECURITY_ENGINE__CHECK_SET_HELPER);
+
+ DECLARE_ARGHOLDER_ARRAY(helperArgs, 7);
+ helperArgs[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(gc.orCS);
+ helperArgs[ARGNUM_1] = OBJECTREF_TO_ARGHOLDER(gc.orGranted);
+ helperArgs[ARGNUM_2] = OBJECTREF_TO_ARGHOLDER(gc.orRefused);
+ helperArgs[ARGNUM_3] = OBJECTREF_TO_ARGHOLDER(gc.orDemand);
+ helperArgs[ARGNUM_4] = PTR_TO_ARGHOLDER(pMethod);
+ helperArgs[ARGNUM_5] = OBJECTREF_TO_ARGHOLDER(gc.orAssembly);
+ helperArgs[ARGNUM_6] = DWORD_TO_ARGHOLDER(dclDemand);
+
+ CATCH_HANDLER_FOUND_NOTIFICATION_CALLSITE;
+ CALL_MANAGED_METHOD_NORET(helperArgs);
+ }
+ END_DOMAIN_TRANSITION;
+
+ GCPROTECT_END();
+}
+
+void SecurityStackWalk::GetZoneAndOriginGrants(OBJECTREF refCS, OBJECTREF refGrants, OBJECTREF refRefused, AppDomain *pDomain, MethodDesc* pMethod, Assembly* pAssembly)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ Thread *pThread = GetThread();
+
+ struct _gc {
+ OBJECTREF orCS;
+ OBJECTREF orGranted;
+ OBJECTREF orRefused;
+ OBJECTREF orZoneList;
+ OBJECTREF orOriginList;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ gc.orCS = refCS;
+ gc.orGranted = refGrants;
+ gc.orRefused = refRefused;
+
+ GCPROTECT_BEGIN(gc);
+
+ // Fetch input objects that might originate from a different appdomain,
+ // marshalling if necessary.
+ gc.orZoneList = m_objects.GetObjects(pDomain, &gc.orOriginList);
+
+ // Switch into the destination context if necessary.
+ ENTER_DOMAIN_PTR(pDomain,ADV_RUNNINGIN) //have it on the stack
+ {
+
+ BOOL inProgress = pThread->IsSecurityStackwalkInProgess();
+
+ // We turn security stackwalk in progress off which turns security back
+ // on for a thread. This means that if the managed call throws an exception
+ // we are already in the proper state so we don't need to do anything.
+
+ if (inProgress)
+ pThread->SetSecurityStackwalkInProgress(FALSE);
+
+ MethodDescCallSite getZoneAndOriginHelper(METHOD__SECURITY_ENGINE__GET_ZONE_AND_ORIGIN_HELPER);
+
+ ARG_SLOT helperArgs[5];
+
+ helperArgs[0] = ObjToArgSlot(gc.orCS);
+ helperArgs[1] = ObjToArgSlot(gc.orGranted);
+ helperArgs[2] = ObjToArgSlot(gc.orRefused);
+ helperArgs[3] = ObjToArgSlot(gc.orZoneList);
+ helperArgs[4] = ObjToArgSlot(gc.orOriginList);
+
+ getZoneAndOriginHelper.Call(&(helperArgs[0]));
+
+ if (inProgress)
+ pThread->SetSecurityStackwalkInProgress(TRUE);
+ }
+ END_DOMAIN_TRANSITION;
+
+ GCPROTECT_END();
+}
+
+BOOL SecurityStackWalk::CheckPermissionAgainstFrameData(OBJECTREF refFrameData, AppDomain* pDomain, MethodDesc* pMethod)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ CLR_BOOL ret = FALSE;
+
+ struct _gc {
+ OBJECTREF orFrameData;
+ OBJECTREF orDemand;
+ OBJECTREF orToken;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ gc.orFrameData = refFrameData;
+
+ GCPROTECT_BEGIN(gc);
+
+ // Fetch input objects that might originate from a different appdomain,
+ // marshalling if necessary.
+ gc.orDemand = m_objects.GetObjects(pDomain, &gc.orToken);
+
+ // Switch into the destination context if necessary.
+ ENTER_DOMAIN_PTR(pDomain,ADV_RUNNINGIN) //have it on the stack
+ {
+ PREPARE_NONVIRTUAL_CALLSITE(METHOD__SECURITY_RUNTIME__FRAME_DESC_HELPER);
+
+ DECLARE_ARGHOLDER_ARRAY(args, 4);
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(gc.orFrameData); // arg 0
+ args[ARGNUM_1] = OBJECTREF_TO_ARGHOLDER(gc.orDemand); // arg 1
+ args[ARGNUM_2] = OBJECTREF_TO_ARGHOLDER(gc.orToken); // arg 2
+ args[ARGNUM_3] = PTR_TO_ARGHOLDER(pMethod); // arg 3
+
+ CATCH_HANDLER_FOUND_NOTIFICATION_CALLSITE;
+ CALL_MANAGED_METHOD(ret, CLR_BOOL, args);
+
+ }
+ END_DOMAIN_TRANSITION;
+
+ GCPROTECT_END();
+
+ return ret;
+}
+
+BOOL SecurityStackWalk::CheckSetAgainstFrameData(OBJECTREF refFrameData, AppDomain* pDomain, MethodDesc* pMethod)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ CLR_BOOL ret = FALSE;
+
+ struct _gc {
+ OBJECTREF orFrameData;
+ OBJECTREF orDemand;
+ OBJECTREF orPermSetOut;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ gc.orFrameData = refFrameData;
+
+ GCPROTECT_BEGIN(gc);
+
+ // Fetch input objects that might originate from a different appdomain,
+ // marshalling if necessary.
+ gc.orDemand = m_objects.GetObject(pDomain);
+
+ // Switch into the destination context if necessary.
+ ENTER_DOMAIN_PTR(pDomain,ADV_RUNNINGIN) //have it on the stack
+ {
+ PREPARE_NONVIRTUAL_CALLSITE(METHOD__SECURITY_RUNTIME__FRAME_DESC_SET_HELPER);
+
+ DECLARE_ARGHOLDER_ARRAY(args, 4);
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(gc.orFrameData);
+ args[ARGNUM_1] = OBJECTREF_TO_ARGHOLDER(gc.orDemand);
+ args[ARGNUM_2] = PTR_TO_ARGHOLDER(&gc.orPermSetOut);
+ args[ARGNUM_3] = PTR_TO_ARGHOLDER(pMethod);
+
+ CATCH_HANDLER_FOUND_NOTIFICATION_CALLSITE;
+ CALL_MANAGED_METHOD(ret, CLR_BOOL, args);
+
+ if (gc.orPermSetOut != NULL) {
+ // Update the cached object.
+ m_objects.UpdateObject(pDomain, gc.orPermSetOut);
+ }
+ }
+ END_DOMAIN_TRANSITION;
+
+ GCPROTECT_END();
+
+ return ret;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+// -------------------------------------------------------------------------------
+//
+// DemandStackWalk
+//
+// -------------------------------------------------------------------------------
+
+class DemandStackWalk : public SecurityStackWalk
+{
+public:
+ enum DemandType
+ {
+ DT_PERMISSION = 1,
+ DT_SET = 2,
+ DT_ZONE_AND_URL = 3,
+ };
+
+protected:
+ Frame* m_pCtxTxFrame;
+ AppDomain * m_pPrevAppDomain;
+ AppDomain* m_pSkipAppDomain;
+ Assembly * m_pPrevAssembly;
+ StackCrawlMark * m_pStackMark;
+ DemandType m_eDemandType;
+ bool m_bHaveFoundStartingFrameYet;
+ BOOL m_bFoundStackMark;
+ DWORD m_dwdemandFlags;
+ DWORD m_adStackIndex;
+ AppDomainStack* m_pThreadADStack;
+
+public:
+ DemandStackWalk(SecurityStackWalkType eType, DWORD flags, StackCrawlMark* stackMark, DemandType eDemandType, DWORD demandFlags)
+ : SecurityStackWalk(eType, flags)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_pCtxTxFrame = NULL;
+ m_pPrevAppDomain = NULL;
+ m_pSkipAppDomain = NULL;
+ m_pPrevAssembly = NULL;
+ m_eDemandType = eDemandType;
+ m_bHaveFoundStartingFrameYet = false;
+ m_pStackMark = stackMark;
+ m_bFoundStackMark = FALSE;
+ m_dwdemandFlags = demandFlags;
+ m_pThreadADStack = GetThread()->GetAppDomainStackPointer();
+ m_pThreadADStack->InitDomainIteration(&m_adStackIndex);
+ }
+
+ void DoStackWalk();
+ StackWalkAction WalkFrame(CrawlFrame* pCf);
+
+protected:
+ bool IsStartingFrame(CrawlFrame* pCf);
+ bool IsSpecialRunFrame(MethodDesc* pMeth)
+ {
+ return SecurityStackWalk::IsSpecialRunFrame(pMeth);
+ }
+ void CheckGrant(OBJECTREF refCS, OBJECTREF refGrants, OBJECTREF refRefused, AppDomain *pDomain, MethodDesc* pMethod, Assembly* pAssembly);
+ BOOL CheckFrame(OBJECTREF refFrameData, AppDomain* pDomain, MethodDesc* pMethod);
+
+private:
+ FORCEINLINE BOOL QuickCheck(OBJECTREF refCS, OBJECTREF refGrants, OBJECTREF refRefused)
+ {
+ if (refCS == NULL && refRefused == NULL && refGrants != NULL)
+ {
+ // if we have a FT grant and nothing else, and our demand is for something that FT can satisfy, we're done
+ PERMISSIONSETREF permSetRef = (PERMISSIONSETREF)refGrants;
+ return permSetRef->IsUnrestricted();
+ }
+ return FALSE;
+ }
+ void ProcessAppDomainTransition(AppDomain * pAppDomain, bool bCheckPrevAppDomain);
+#ifdef _DEBUG
+ BOOL IsValidReturnFromWalkFrame(StackWalkAction retVal, CrawlFrame* pCF)
+ {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ // This function checks that when we hit a Special frame, we are indeed returning the action to stop the stackwalk
+ MethodDesc *pFunc = pCF->GetFunction();
+ if (pFunc != NULL && IsSpecialRunFrame(pFunc))
+ {
+ return (retVal == SWA_ABORT);
+ }
+ return TRUE;
+ }
+#endif // _DEBUG
+
+#ifdef FEATURE_COMPRESSEDSTACK
+ BOOL CheckAnonymouslyHostedDynamicMethodCompressedStack(OBJECTREF refDynamicResolver, AppDomain* pDomain, MethodDesc* pMethod);
+ BOOL CheckAnonymouslyHostedDynamicMethodCompressedStackPermission(OBJECTREF refDynamicResolver, AppDomain* pDomain, MethodDesc* pMethod);
+ BOOL CheckAnonymouslyHostedDynamicMethodCompressedStackPermissionSet(OBJECTREF refDynamicResolver, AppDomain* pDomain, MethodDesc* pMethod);
+#endif // FEATURE_COMPRESSEDSTACK
+};
+
+void DemandStackWalk::CheckGrant(OBJECTREF refCS, OBJECTREF refGrants, OBJECTREF refRefused, AppDomain *pDomain, MethodDesc* pMethod, Assembly* pAssembly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ switch(m_eDemandType)
+ {
+ case DT_PERMISSION:
+ // Test early out scenario (quickcheck) before calling into managed code
+ if (!QuickCheck(refCS, refGrants, refRefused))
+ CheckPermissionAgainstGrants(refCS, refGrants, refRefused, pDomain, pMethod, pAssembly);
+ break;
+
+ case DT_SET:
+ // Test early out scenario (quickcheck) before calling into managed code
+ if (!QuickCheck(refCS, refGrants, refRefused))
+ CheckSetAgainstGrants(refCS, refGrants, refRefused, pDomain, pMethod, pAssembly);
+ break;
+ case DT_ZONE_AND_URL:
+ GetZoneAndOriginGrants(refCS, refGrants, refRefused, pDomain, pMethod, pAssembly);
+ break;
+ default:
+ _ASSERTE(!"unexpected demand type");
+ break;
+ }
+}
+
+BOOL DemandStackWalk::CheckFrame(OBJECTREF refFrameData, AppDomain* pDomain, MethodDesc* pMethod)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ switch(m_eDemandType)
+ {
+ case DT_PERMISSION:
+ return CheckPermissionAgainstFrameData(refFrameData, pDomain, pMethod);
+
+ case DT_SET:
+ return CheckSetAgainstFrameData(refFrameData, pDomain, pMethod);
+ case DT_ZONE_AND_URL:
+ return TRUE; //Nothing to do here since CS cannot live on a Frame anymore.
+ default:
+ _ASSERTE(!"unexpected demand type");
+ }
+ return TRUE;
+}
+
+bool DemandStackWalk::IsStartingFrame(CrawlFrame* pCf)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ switch(m_eStackWalkType)
+ {
+ case SSWT_DECLARATIVE_DEMAND: // Begin after the security stub(s)
+ _ASSERTE(m_pStackMark == NULL);
+ // skip the current method that has decl sec
+ if (m_bFoundStackMark)
+ return true;
+ else
+ {
+ m_bFoundStackMark = true;
+ return false;
+ }
+
+ case SSWT_IMPERATIVE_DEMAND: // Begin where the StackMark says to
+ case SSWT_GET_ZONE_AND_URL: // Begin where the StackMark says to
+ _ASSERTE(*m_pStackMark == LookForMyCaller || *m_pStackMark == LookForMyCallersCaller);
+
+ // See if we've passed the stack mark yet
+ if (!pCf->IsInCalleesFrames(m_pStackMark))
+ return false;
+
+ // Skip the frame after the stack mark as well.
+ if(*m_pStackMark == LookForMyCallersCaller && !m_bFoundStackMark)
+ {
+ m_bFoundStackMark = TRUE;
+ return false;
+ }
+
+ return true;
+
+ case SSWT_LATEBOUND_LINKDEMAND: // Begin immediately
+ case SSWT_DEMAND_FROM_NATIVE:
+ _ASSERTE(m_pStackMark == NULL);
+ return true;
+
+ default:
+ _ASSERTE(FALSE); // Unexpected stack walk type
+ break;
+ }
+ return true;
+}
+void DemandStackWalk::ProcessAppDomainTransition(AppDomain* pAppDomain, bool bCheckPrevAppDomain)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+ _ASSERTE(pAppDomain != m_pPrevAppDomain);
+
+ if (m_pPrevAppDomain != NULL && bCheckPrevAppDomain)
+ {
+ // We have not checked the previous AppDomain. Check it now.
+ if (m_pSkipAppDomain != m_pPrevAppDomain)
+ {
+ ApplicationSecurityDescriptor *pSecDesc =
+ static_cast<ApplicationSecurityDescriptor*>(m_pPrevAppDomain->GetSecurityDescriptor());
+
+ // Only process AppDomains which have completed security initialization. If the domain is not
+ // yet fully initialized then only fully trusted code can be running in the domain, so we're
+ // safe to ignore the transition. The domain may also not yet have a sane grant set setup on it
+ // yet if the demand is coming out of AppDomainManager code.
+ if (pSecDesc && !pSecDesc->IsInitializationInProgress())
+ {
+ DBG_TRACE_STACKWALK(" Checking appdomain...\n", true);
+
+ if (!pSecDesc->IsDefaultAppDomain() &&
+ !pSecDesc->IsFullyTrusted() &&
+ !pSecDesc->CheckSpecialFlag(m_dwdemandFlags))
+ {
+ OBJECTREF orRefused;
+ OBJECTREF orGranted = pSecDesc->GetGrantedPermissionSet(&orRefused);
+ CheckGrant(NULL, orGranted, orRefused, m_pPrevAppDomain, NULL, m_pPrevAssembly);
+ }
+ }
+ else
+ {
+ DBG_TRACE_STACKWALK(" Skipping appdomain...\n", true);
+ }
+ }
+ }
+ // Move the domain index forward
+ m_pThreadADStack->GetNextDomainEntryOnStack(&m_adStackIndex);
+
+ // At the end of the stack walk, do a check on the grants of
+ // the m_pPrevAppDomain by the stackwalk caller if needed.
+ m_pPrevAppDomain = pAppDomain;
+
+ // Check if we can skip the entire pAppDomain. If so, assign m_pSkipAppDomain
+ // TODO: Can Check the AppDomain PLS also here.
+ if ((m_pThreadADStack->GetCurrentDomainEntryOnStack(m_adStackIndex))->HasFlagsOrFullyTrustedWithNoStackModifiers(m_dwdemandFlags))
+ m_pSkipAppDomain = pAppDomain;
+ else
+ m_pSkipAppDomain = NULL;
+
+
+
+}
+StackWalkAction DemandStackWalk::WalkFrame(CrawlFrame* pCf)
+{
+ CONTRACT (StackWalkAction) {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ POSTCONDITION(IsValidReturnFromWalkFrame(RETVAL, pCf));
+ } CONTRACT_END;
+
+ StackWalkAction ret = SWA_CONTINUE;
+
+#ifdef FEATURE_REMOTING
+#ifdef FEATURE_COMPRESSEDSTACK
+
+ // Save the CtxTxFrame if this is one
+ if (m_pCtxTxFrame == NULL)
+ {
+ Frame *pFrame = pCf->GetFrame();
+ if (SecurityStackWalk::IsContextTransitionFrameWithCS(pFrame))
+ {
+
+ m_pCtxTxFrame = pFrame;
+ }
+ }
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
+#endif // FEATURE_REMOTING
+
+ MethodDesc * pFunc = pCf->GetFunction();
+ Assembly * pAssem = pCf->GetAssembly();
+ // Get the current app domain.
+ AppDomain *pAppDomain = pCf->GetAppDomain();
+ if (pAppDomain != m_pPrevAppDomain)
+ {
+#ifndef FEATURE_REMOTING
+ BOOL bRealAppDomainTransition = (m_pPrevAppDomain != NULL);
+#endif
+ ProcessAppDomainTransition(pAppDomain, m_bHaveFoundStartingFrameYet);
+
+#ifndef FEATURE_REMOTING
+ // The first AppDomain transition is the transition from NULL to current domain. We should not stop on that.
+ // We should stop on the first "real" appdomain transition - which is a transition out of the current domain.
+ if (bRealAppDomainTransition)
+ {
+ // without remoting other appdomains do not matter (can be only createdomain call anyhow) so stop the stack walk
+ m_dwFlags |= CORSEC_STACKWALK_HALTED;
+ RETURN SWA_ABORT;
+ }
+#endif
+ }
+
+ if ((pFunc == NULL && pAssem == NULL) || (pFunc && pFunc->IsILStub()))
+ RETURN ret; // Not a function
+
+ // Skip until the frame where the stackwalk should begin
+ if (!m_bHaveFoundStartingFrameYet)
+ {
+ if (IsStartingFrame(pCf))
+ m_bHaveFoundStartingFrameYet = true;
+ else
+ RETURN ret;
+ }
+
+ //
+ // Now check the current frame!
+ //
+ // If this is a *.Run method, then we need to terminate the stackwalk after considering this frame
+ if (pFunc && IsSpecialRunFrame(pFunc))
+ {
+ DBG_TRACE_STACKWALK(" Halting stackwalk for .Run.\n", false);
+ // Dont mark the CORSEC_STACKWALK_HALTED in m_dwFlags because we still need to look at the CS
+ ret = SWA_ABORT;
+ }
+
+ DBG_TRACE_STACKWALK(" Checking granted permissions for current method...\n", true);
+
+ // Reached here imples we walked atleast a single frame.
+ COUNTER_ONLY(GetPerfCounters().m_Security.stackWalkDepth++);
+
+
+ // Get the previous assembly
+ Assembly *pPrevAssem = m_pPrevAssembly;
+
+
+ // Check if we can skip the entire appdomain
+ if (m_pSkipAppDomain == pAppDomain)
+ {
+ RETURN ret;
+ }
+
+ // Keep track of the last module checked. If we have just checked the
+ // permissions on the module, we don't need to do it again.
+ if (pAssem != pPrevAssem)
+ {
+ DBG_TRACE_STACKWALK(" Checking grants for current assembly.\n", true);
+
+ // Get the security descriptor for the current assembly and pass it to
+ // the interpreted helper.
+ AssemblySecurityDescriptor * pSecDesc = static_cast<AssemblySecurityDescriptor*>(pAssem->GetSecurityDescriptor(pAppDomain));
+ _ASSERTE(pSecDesc != NULL);
+
+ // We have to check the permissions if we are not fully trusted or
+ // we cannot be overrided by full trust. Plus we always skip checks
+ // on system classes.
+ if (!pSecDesc->IsSystem() &&
+ !pSecDesc->IsFullyTrusted() &&
+ !pSecDesc->CheckSpecialFlag(m_dwdemandFlags))
+ {
+ OBJECTREF orRefused;
+ OBJECTREF orGranted = pSecDesc->GetGrantedPermissionSet(&orRefused);
+ CheckGrant(NULL, orGranted, orRefused, pAppDomain, pFunc, pAssem);
+ }
+
+ m_pPrevAssembly = pAssem;
+ }
+ else
+ {
+ DBG_TRACE_STACKWALK(" Current assembly same as previous. Skipping check.\n", true);
+ }
+
+
+ // Passed initial check. See if there is security info on this frame.
+ OBJECTREF *pFrameObjectSlot = pCf->GetAddrOfSecurityObject();
+ if (pFrameObjectSlot != NULL)
+ {
+ SecurityDeclarative::DoDeclarativeSecurityAtStackWalk(pFunc, pAppDomain, pFrameObjectSlot);
+ if (*pFrameObjectSlot != NULL)
+ {
+ DBG_TRACE_STACKWALK(" + Frame-specific security info found. Checking...\n", false);
+
+ if(!CheckFrame(*pFrameObjectSlot, pAppDomain, pFunc))
+ {
+ DBG_TRACE_STACKWALK(" Halting stackwalk for assert.\n", false);
+ m_dwFlags |= CORSEC_STACKWALK_HALTED;
+ ret = SWA_ABORT;
+ }
+ }
+ }
+
+#if FEATURE_COMPRESSEDSTACK
+ // If this frame is an anonymously hosted dynamic assembly, we need to run the demand against its compressed stack
+ // to ensure the creator had the permissions for this demand
+ if(pAssem != NULL && pAppDomain != NULL && pAssem->GetDomainAssembly(pAppDomain) == pAppDomain->GetAnonymouslyHostedDynamicMethodsAssembly() &&
+ !CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_Security_DisableAnonymouslyHostedDynamicMethodCreatorSecurityCheck))
+ {
+ _ASSERTE(pFunc->IsLCGMethod());
+ OBJECTREF dynamicResolver = pFunc->AsDynamicMethodDesc()->GetLCGMethodResolver()->GetManagedResolver();
+ if(!CheckAnonymouslyHostedDynamicMethodCompressedStack(dynamicResolver, pAppDomain, pFunc))
+ {
+ m_dwFlags |= CORSEC_STACKWALK_HALTED;
+ ret = SWA_ABORT;
+ }
+ }
+#endif // FEATURE_COMPRESSEDSTACK
+
+ DBG_TRACE_STACKWALK(" Check passes for this method.\n", true);
+
+
+ // Passed all the checks, return current value of ret (could be SWA_ABORT of SWA_CONTINUE based on above checks)
+ RETURN ret;
+}
+
+static
+StackWalkAction CodeAccessCheckStackWalkCB(CrawlFrame* pCf, VOID* pData)
+{
+ WRAPPER_NO_CONTRACT;
+ DemandStackWalk *pCBdata = (DemandStackWalk*)pData;
+ return pCBdata->WalkFrame(pCf);
+}
+
+void DemandStackWalk::DoStackWalk()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ // Get the current thread.
+ Thread *pThread = GetThread();
+ _ASSERTE(pThread != NULL);
+
+ // Don't allow recursive security stackwalks. Note that this implies that
+ // *no* untrusted code must ever be called during a security stackwalk.
+ if (pThread->IsSecurityStackwalkInProgess())
+ return;
+
+ // NOTE: Initialize the stack depth. Note that if more that one thread tries
+ // to perform stackwalk then these counters gets stomped upon.
+ COUNTER_ONLY(GetPerfCounters().m_Security.stackWalkDepth = 0);
+
+ // Walk the thread.
+ EX_TRY
+ {
+ pThread->SetSecurityStackwalkInProgress( TRUE );
+
+ DBG_TRACE_STACKWALK("Code-access security check invoked.\n", false);
+ // LIGHTUNWIND flag: allow using stackwalk cache for security stackwalks
+ pThread->StackWalkFrames(CodeAccessCheckStackWalkCB, this, SKIPFUNCLETS | LIGHTUNWIND);
+ DBG_TRACE_STACKWALK("\tCode-access stackwalk completed.\n", false);
+
+ // check the last app domain or CompressedStack at the thread base
+ if (((m_dwFlags & CORSEC_STACKWALK_HALTED) == 0) /*&& m_cCheck != 0*/)
+ {
+ AppDomain *pAppDomain = m_pPrevAppDomain;
+#ifdef FEATURE_COMPRESSEDSTACK
+ OBJECTREF orCS = pThread->GetCompressedStack();
+
+ if (orCS == NULL)
+ {
+ // There may have been an AD transition and we shd look at the CB data to see if this is the case
+ if (m_pCtxTxFrame != NULL)
+ {
+ orCS = (OBJECTREF)SecurityStackWalk::GetCSFromContextTransitionFrame(m_pCtxTxFrame);
+ pAppDomain = m_pCtxTxFrame->GetReturnDomain();
+ }
+ }
+
+
+ if (orCS != NULL)
+ {
+ // We have a CS at the thread base - just look at that. Dont look at the last AD
+ DBG_TRACE_STACKWALK("\tChoosing CompressedStack check.\n", true);
+ DBG_TRACE_STACKWALK("\tChecking CompressedStack...\n", true);
+
+ CheckGrant(orCS, NULL, NULL, pAppDomain, NULL, NULL);
+ DBG_TRACE_STACKWALK("\tCompressedStack check passed.\n", true);
+ }
+ else
+#endif // FEATURE_COMPRESSEDSTACK
+ {
+ // No CS at thread base - must look at the last AD
+ DBG_TRACE_STACKWALK("\tChoosing appdomain check.\n", true);
+
+ ApplicationSecurityDescriptor *pSecDesc = static_cast<ApplicationSecurityDescriptor*>(pAppDomain->GetSecurityDescriptor());
+
+ if (pSecDesc != NULL)
+ {
+ // Note: the order of these calls is important since you have to have done a
+ // GetEvidence() on the security descriptor before you check for the
+ // CORSEC_DEFAULT_APPDOMAIN property. IsFullyTrusted calls Resolve so
+ // we're all good.
+ if (!pSecDesc->IsDefaultAppDomain() &&
+ !pSecDesc->IsFullyTrusted() &&
+ !pSecDesc->CheckSpecialFlag(m_dwdemandFlags))
+ {
+ DBG_TRACE_STACKWALK("\tChecking appdomain...\n", true);
+ OBJECTREF orRefused;
+ OBJECTREF orGranted = pSecDesc->GetGrantedPermissionSet(&orRefused);
+ CheckGrant(NULL, orGranted, orRefused, pAppDomain, NULL, NULL);
+ DBG_TRACE_STACKWALK("\tappdomain check passed.\n", true);
+ }
+ }
+ else
+ {
+ DBG_TRACE_STACKWALK("\tSkipping appdomain check.\n", true);
+ }
+ }
+ }
+ else
+ {
+ DBG_TRACE_STACKWALK("\tSkipping CS/appdomain check.\n", true);
+ }
+
+ pThread->SetSecurityStackwalkInProgress( FALSE );
+ }
+ EX_CATCH
+ {
+ // We catch exceptions and rethrow like this to ensure that we've
+ // established an exception handler on the fs:[0] chain (managed
+ // exception handlers won't do this). This in turn guarantees that
+ // managed exception filters in any of our callers won't be found,
+ // otherwise they could get to execute untrusted code with security
+ // turned off.
+ pThread->SetSecurityStackwalkInProgress( FALSE );
+
+ EX_RETHROW;
+ }
+ EX_END_CATCH_UNREACHABLE
+
+
+ DBG_TRACE_STACKWALK("Code-access check passed.\n", false);
+}
+
+#ifdef FEATURE_COMPRESSEDSTACK
+BOOL DemandStackWalk::CheckAnonymouslyHostedDynamicMethodCompressedStack(OBJECTREF refDynamicResolver, AppDomain* pDomain, MethodDesc* pMethod)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ BOOL ret = TRUE;
+
+ switch(m_eDemandType)
+ {
+ case DT_PERMISSION:
+ ret = CheckAnonymouslyHostedDynamicMethodCompressedStackPermission(refDynamicResolver, pDomain, pMethod);
+ break;
+
+ case DT_SET:
+ ret = CheckAnonymouslyHostedDynamicMethodCompressedStackPermissionSet(refDynamicResolver, pDomain, pMethod);
+ break;
+
+ case DT_ZONE_AND_URL:
+ // Not needed for compressed stack
+ break;
+
+ default:
+ _ASSERTE(!"unexpected demand type");
+ break;
+ }
+
+ return ret;
+}
+
+BOOL DemandStackWalk::CheckAnonymouslyHostedDynamicMethodCompressedStackPermissionSet(OBJECTREF refDynamicResolver, AppDomain* pDomain, MethodDesc* pMethod)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ CLR_BOOL ret = FALSE;
+
+
+ struct _gc {
+ OBJECTREF orDynamicResolver;
+ OBJECTREF orDemandSet;
+ OBJECTREF orPermSetOut;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ gc.orDynamicResolver = refDynamicResolver;
+
+ GCPROTECT_BEGIN(gc);
+
+ // Fetch input objects that might originate from a different appdomain,
+ // marshalling if necessary.
+ gc.orDemandSet = m_objects.GetObject(pDomain);
+
+ // Switch into the destination context if necessary.
+ ENTER_DOMAIN_PTR(pDomain,ADV_RUNNINGIN) //have it on the stack
+ {
+ PREPARE_NONVIRTUAL_CALLSITE(METHOD__SECURITY_RUNTIME__CHECK_DYNAMIC_METHOD_SET_HELPER);
+
+ DECLARE_ARGHOLDER_ARRAY(args, 4);
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(gc.orDynamicResolver);
+ args[ARGNUM_1] = OBJECTREF_TO_ARGHOLDER(gc.orDemandSet);
+ args[ARGNUM_2] = PTR_TO_ARGHOLDER(&gc.orPermSetOut);
+ args[ARGNUM_3] = PTR_TO_ARGHOLDER(pMethod);
+
+ CATCH_HANDLER_FOUND_NOTIFICATION_CALLSITE;
+ CALL_MANAGED_METHOD(ret, CLR_BOOL, args);
+
+ if (gc.orPermSetOut != NULL) {
+ // Update the cached object.
+ m_objects.UpdateObject(pDomain, gc.orPermSetOut);
+ }
+ }
+ END_DOMAIN_TRANSITION;
+
+ GCPROTECT_END();
+
+ return ret;
+}
+
+
+BOOL DemandStackWalk::CheckAnonymouslyHostedDynamicMethodCompressedStackPermission(OBJECTREF refDynamicResolver, AppDomain* pDomain, MethodDesc* pMethod)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ CLR_BOOL ret = FALSE;
+
+
+ struct _gc {
+ OBJECTREF orDynamicResolver;
+ OBJECTREF orDemand;
+ OBJECTREF orToken;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ gc.orDynamicResolver = refDynamicResolver;
+
+ GCPROTECT_BEGIN(gc);
+
+ // Fetch input objects that might originate from a different appdomain,
+ // marshalling if necessary.
+ gc.orDemand = m_objects.GetObjects(pDomain, &gc.orToken);
+
+ // Switch into the destination context if necessary.
+ ENTER_DOMAIN_PTR(pDomain,ADV_RUNNINGIN) //have it on the stack
+ {
+ PREPARE_NONVIRTUAL_CALLSITE(METHOD__SECURITY_RUNTIME__CHECK_DYNAMIC_METHOD_HELPER);
+
+ DECLARE_ARGHOLDER_ARRAY(args, 4);
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(gc.orDynamicResolver);
+ args[ARGNUM_1] = OBJECTREF_TO_ARGHOLDER(gc.orDemand);
+ args[ARGNUM_2] = OBJECTREF_TO_ARGHOLDER(gc.orToken);
+ args[ARGNUM_3] = PTR_TO_ARGHOLDER(pMethod);
+
+ CATCH_HANDLER_FOUND_NOTIFICATION_CALLSITE;
+ CALL_MANAGED_METHOD(ret, CLR_BOOL, args);
+ }
+ END_DOMAIN_TRANSITION;
+
+ GCPROTECT_END();
+
+ return ret;
+}
+#endif // FEATURE_COMPRESSEDSTACK
+
+
+
+
+// -------------------------------------------------------------------------------
+//
+// AssertStackWalk
+//
+// -------------------------------------------------------------------------------
+
+class AssertStackWalk : public SecurityStackWalk
+{
+protected:
+ StackCrawlMark * m_pStackMark;
+ bool m_bHaveFoundStartingFrameYet;
+ INT_PTR m_cCheck;
+
+public:
+ OBJECTREF* m_pSecurityObject;
+ AppDomain* m_pSecurityObjectDomain;
+
+ AssertStackWalk(SecurityStackWalkType eType, DWORD dwFlags, StackCrawlMark* stackMark)
+ : SecurityStackWalk(eType, dwFlags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pStackMark = stackMark;
+ m_bHaveFoundStartingFrameYet = false;
+ m_cCheck = 1;
+ m_pSecurityObject = NULL;
+ m_pSecurityObjectDomain = NULL;
+ }
+
+ void DoStackWalk();
+ StackWalkAction WalkFrame(CrawlFrame* pCf);
+};
+
+StackWalkAction AssertStackWalk::WalkFrame(CrawlFrame* pCf)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ DBG_TRACE_METHOD(pCf);
+
+ MethodDesc * pFunc = pCf->GetFunction();
+ _ASSERTE(pFunc != NULL); // we requested functions only!
+ _ASSERTE(m_eStackWalkType == SSWT_IMPERATIVE_ASSERT);
+ _ASSERTE(*m_pStackMark == LookForMyCaller);
+
+ // Skip until we pass the StackMark
+ if (!m_bHaveFoundStartingFrameYet)
+ {
+ if (pCf->IsInCalleesFrames(m_pStackMark))
+ m_bHaveFoundStartingFrameYet = true;
+ else
+ return SWA_CONTINUE;
+ }
+
+ // Check if we've visited the maximum number of frames
+ if (m_cCheck >= 0)
+ {
+ if (m_cCheck == 0)
+ {
+ m_dwFlags |= CORSEC_STACKWALK_HALTED;
+ return SWA_ABORT;
+ }
+ else
+ --m_cCheck;
+ }
+
+ // Reached here imples we walked atleast a single frame.
+ COUNTER_ONLY(GetPerfCounters().m_Security.stackWalkDepth++);
+
+ DBG_TRACE_STACKWALK(" Checking grants for current assembly.\n", true);
+
+ // Get the security descriptor for the current assembly and pass it to
+ // the interpreted helper.
+ // Get the current assembly
+ Assembly *pAssem = pFunc->GetModule()->GetAssembly();
+ AppDomain *pAppDomain = pCf->GetAppDomain();
+ IAssemblySecurityDescriptor * pSecDesc = pAssem->GetSecurityDescriptor(pAppDomain);
+ _ASSERTE(pSecDesc != NULL);
+
+
+ if (!SecurityTransparent::IsAllowedToAssert(pFunc))
+ {
+ // Transparent method can't have the permission to Assert
+ COMPlusThrow(kInvalidOperationException,W("InvalidOperation_AssertTransparentCode"));
+ }
+
+ if (!pSecDesc->IsSystem() && !pSecDesc->IsFullyTrusted())
+ {
+ OBJECTREF orRefused;
+ OBJECTREF orGranted = pSecDesc->GetGrantedPermissionSet(&orRefused);
+ CheckPermissionAgainstGrants(NULL, orGranted, orRefused, pAppDomain, pFunc, pAssem);
+ }
+
+ // Passed initial check. See if there is security info on this frame.
+ m_pSecurityObject = pCf->GetAddrOfSecurityObject();
+ m_pSecurityObjectDomain = pAppDomain;
+
+ DBG_TRACE_STACKWALK(" Check Immediate passes for this method.\n", true);
+
+ // Passed all the checks, so continue.
+ return SWA_ABORT;
+}
+
+static
+StackWalkAction CheckNReturnSOStackWalkCB(CrawlFrame* pCf, VOID* pData)
+{
+ WRAPPER_NO_CONTRACT;
+ AssertStackWalk *pCBdata = (AssertStackWalk*)pData;
+ return pCBdata->WalkFrame(pCf);
+}
+
+FCIMPL4(Object*, SecurityStackWalk::CheckNReturnSO, Object* permTokenUNSAFE, Object* permUNSAFE, StackCrawlMark* stackMark, INT32 create)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF refRetVal = NULL;
+ OBJECTREF permToken = (OBJECTREF) permTokenUNSAFE;
+ OBJECTREF perm = (OBJECTREF) permUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_2(permToken, perm);
+
+ _ASSERTE((permToken != NULL) && (perm != NULL));
+
+ // Track perfmon counters. Runtime security checkes.
+ IncrementSecurityPerfCounter();
+
+#if defined(ENABLE_PERF_COUNTERS)
+ // Perf Counter "%Time in Runtime check" support
+ PERF_COUNTER_TIMER_PRECISION _startPerfCounterTimer = GET_CYCLE_COUNT();
+#endif
+
+ // Initialize callback data.
+ DWORD dwFlags = 0;
+ AssertStackWalk walkData(SSWT_IMPERATIVE_ASSERT, dwFlags, stackMark);
+ walkData.m_objects.SetObjects(perm, permToken);
+
+ // Protect the object references in the callback data.
+ GCPROTECT_BEGIN(walkData.m_objects.m_sGC);
+
+ walkData.DoStackWalk();
+
+ GCPROTECT_END();
+
+#if defined(ENABLE_PERF_COUNTERS)
+ // Accumulate the counter
+ PERF_COUNTER_TIMER_PRECISION _stopPerfCounterTimer = GET_CYCLE_COUNT();
+ g_TotalTimeInSecurityRuntimeChecks += _stopPerfCounterTimer - _startPerfCounterTimer;
+
+ // Report the accumulated counter only after NUM_OF_TERATIONS
+ if (g_SecurityChecksIterations++ > PERF_COUNTER_NUM_OF_ITERATIONS)
+ {
+ GetPerfCounters().m_Security.timeRTchecks = static_cast<DWORD>(g_TotalTimeInSecurityRuntimeChecks);
+ GetPerfCounters().m_Security.timeRTchecksBase = static_cast<DWORD>(_stopPerfCounterTimer - g_LastTimeInSecurityRuntimeChecks);
+
+ g_TotalTimeInSecurityRuntimeChecks = 0;
+ g_LastTimeInSecurityRuntimeChecks = _stopPerfCounterTimer;
+ g_SecurityChecksIterations = 0;
+ }
+#endif // #if defined(ENABLE_PERF_COUNTERS)
+
+ if (walkData.m_pSecurityObject == NULL)
+ {
+ goto lExit;
+ }
+
+ // Is security object frame in a different context?
+ Thread *pThread;
+ pThread = GetThread();
+ bool fSwitchContext;
+
+ fSwitchContext = walkData.m_pSecurityObjectDomain != pThread->GetDomain();
+ if (create && *walkData.m_pSecurityObject == NULL)
+ {
+ // If necessary, shift to correct context to allocate security object.
+ ENTER_DOMAIN_PTR(walkData.m_pSecurityObjectDomain,ADV_RUNNINGIN) //on the stack
+ {
+ MethodTable* pMethFrameSecDesc = MscorlibBinder::GetClass(CLASS__FRAME_SECURITY_DESCRIPTOR);
+
+ *walkData.m_pSecurityObject = AllocateObject(pMethFrameSecDesc);
+ }
+ END_DOMAIN_TRANSITION;
+ }
+
+ // If we found or created a security object in a different context, make a
+ // copy in the current context.
+#ifndef FEATURE_CORECLR // should not happen in core clr
+ if (fSwitchContext && *walkData.m_pSecurityObject != NULL)
+ refRetVal = AppDomainHelper::CrossContextCopyFrom(walkData.m_pSecurityObjectDomain,
+ walkData.m_pSecurityObject);
+ else
+#else
+ _ASSERTE(!fSwitchContext);
+#endif
+
+ refRetVal = *walkData.m_pSecurityObject;
+
+lExit: ;
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(refRetVal);
+}
+FCIMPLEND
+
+
+void AssertStackWalk::DoStackWalk()
+{
+ // Get the current thread.
+ Thread *pThread = GetThread();
+ _ASSERTE(pThread != NULL);
+
+ // NOTE: Initialize the stack depth. Note that if more that one thread tries
+ // to perform stackwalk then these counters gets stomped upon.
+ COUNTER_ONLY(GetPerfCounters().m_Security.stackWalkDepth = 0);
+
+ // Walk the thread.
+ DBG_TRACE_STACKWALK("Code-access security check immediate invoked.\n", false);
+ // LIGHTUNWIND flag: allow using stackwalk cache for security stackwalks
+ pThread->StackWalkFrames(CheckNReturnSOStackWalkCB, this, FUNCTIONSONLY | SKIPFUNCLETS | LIGHTUNWIND);
+
+ DBG_TRACE_STACKWALK("\tCode-access stackwalk completed.\n", false);
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+// -------------------------------------------------------------------------------
+//
+// CountOverridesStackWalk
+//
+// -------------------------------------------------------------------------------
+
+typedef struct _SkipFunctionsData
+{
+ INT32 cSkipFunctions;
+ StackCrawlMark* pStackMark;
+ BOOL bUseStackMark;
+ BOOL bFoundCaller;
+ MethodDesc* pFunction;
+ OBJECTREF* pSecurityObject;
+ AppDomain* pSecurityObjectAppDomain;
+} SkipFunctionsData;
+
+static StackWalkAction SkipFunctionsCB(CrawlFrame* pCf, VOID* pData)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+ SkipFunctionsData *skipData = (SkipFunctionsData*)pData;
+ _ASSERTE(skipData != NULL);
+
+ MethodDesc *pFunc = pCf->GetFunction();
+
+#ifdef _DEBUG
+ // Get the interesting info now, so we can get a trace
+ // while debugging...
+ OBJECTREF *pSecObj;
+ pSecObj = pCf->GetAddrOfSecurityObject();
+#endif
+
+ _ASSERTE(skipData->bUseStackMark && "you must specify a stackmark");
+
+ // First check if the walk has skipped the required frames. The check
+ // here is between the address of a local variable (the stack mark) and a
+ // pointer to the EIP for a frame (which is actually the pointer to the
+ // return address to the function from the previous frame). So we'll
+ // actually notice which frame the stack mark was in one frame later. This
+ // is fine for our purposes since we're always looking for the frame of the
+ // caller of the method that actually created the stack mark.
+ if ((skipData->pStackMark != NULL) &&
+ !pCf->IsInCalleesFrames(skipData->pStackMark))
+
+ return SWA_CONTINUE;
+
+ skipData->pFunction = pFunc;
+ skipData->pSecurityObject = pCf->GetAddrOfSecurityObject();
+ skipData->pSecurityObjectAppDomain = pCf->GetAppDomain();
+ return SWA_ABORT; // This actually indicates success.
+}
+
+// Version of the above method that looks for a stack mark (the address of a
+// local variable in a frame called by the target frame).
+BOOL SecurityStackWalk::SkipAndFindFunctionInfo(StackCrawlMark* stackMark, MethodDesc ** ppFunc, OBJECTREF ** ppObj, AppDomain ** ppAppDomain)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+ _ASSERTE(ppFunc != NULL || ppObj != NULL || !"Why was this function called?!");
+
+ SkipFunctionsData walkData;
+ walkData.pStackMark = stackMark;
+ walkData.bUseStackMark = TRUE;
+ walkData.bFoundCaller = FALSE;
+ walkData.pFunction = NULL;
+ walkData.pSecurityObject = NULL;
+ // LIGHTUNWIND flag: allow using stackwalk cache for security stackwalks
+ StackWalkAction action = GetThread()->StackWalkFrames(SkipFunctionsCB, &walkData, FUNCTIONSONLY | SKIPFUNCLETS | LIGHTUNWIND);
+ if (action == SWA_ABORT)
+ {
+ if (ppFunc != NULL)
+ *ppFunc = walkData.pFunction;
+ if (ppObj != NULL)
+ {
+ *ppObj = walkData.pSecurityObject;
+ if (ppAppDomain != NULL)
+ *ppAppDomain = walkData.pSecurityObjectAppDomain;
+ }
+ return TRUE;
+ }
+ else
+ {
+ return FALSE;
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+// -------------------------------------------------------------------------------
+//
+// CountOverridesStackWalk
+//
+// -------------------------------------------------------------------------------
+
+class CountOverridesStackWalk
+{
+public:
+ DWORD numOverrides; // Can be removed
+ DWORD numAsserts; // Can be removed
+ DWORD numDomainOverrides;
+ DWORD numDomainAsserts;
+ AppDomain* prev_AppDomain;
+ Frame* pCtxTxFrame;
+ DWORD adStackIndex;
+
+ CountOverridesStackWalk()
+ {
+ LIMITED_METHOD_CONTRACT;
+ numOverrides = 0;
+ numAsserts = 0;
+ numDomainAsserts = 0;
+ numDomainOverrides = 0;
+ prev_AppDomain = NULL;
+ pCtxTxFrame = NULL;
+ GetThread()->InitDomainIteration(&adStackIndex);
+ }
+ bool IsSpecialRunFrame(MethodDesc* pMeth)
+ {
+ return SecurityStackWalk::IsSpecialRunFrame(pMeth);
+ }
+};
+
+static
+StackWalkAction UpdateOverridesCountCB(CrawlFrame* pCf, void *pData)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ DBG_TRACE_METHOD(pCf);
+
+ CountOverridesStackWalk *pCBdata = static_cast<CountOverridesStackWalk *>(pData);
+
+
+ // First check if the walk has skipped the required frames. The check
+ // here is between the address of a local variable (the stack mark) and a
+ // pointer to the EIP for a frame (which is actually the pointer to the
+ // return address to the function from the previous frame). So we'll
+ // actually notice which frame the stack mark was in one frame later. This
+ // is fine for our purposes since we're always looking for the frame of the
+ // caller (or the caller's caller) of the method that actually created the
+ // stack mark.
+
+#ifdef FEATURE_REMOTING
+#ifdef FEATURE_COMPRESSEDSTACK
+
+ // Save the CtxTxFrame if this is one
+ if (pCBdata->pCtxTxFrame == NULL)
+ {
+ Frame *pFrame = pCf->GetFrame();
+ if (SecurityStackWalk::IsContextTransitionFrameWithCS(pFrame))
+ {
+ pCBdata->pCtxTxFrame = pFrame;
+ }
+ }
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
+#endif // FEATURE_REMOTING
+ MethodDesc* pMeth = pCf->GetFunction();
+ if (pMeth == NULL || pMeth->IsILStub())
+ return SWA_CONTINUE; // not a function frame and not a security stub.
+ // Since we were just looking for CtxTransitionFrames, resume the stackwalk...
+
+
+
+
+ AppDomain* pAppDomain = pCf->GetAppDomain();
+ if (pCBdata->prev_AppDomain == NULL)
+ {
+ pCBdata->prev_AppDomain = pAppDomain; //innermost AD
+ }
+ else if (pCBdata->prev_AppDomain != pAppDomain)
+ {
+ // AppDomain Transition
+ // Update the values in the ADStack for the current AD
+ Thread *t = GetThread();
+ t->GetNextDomainOnStack(&pCBdata->adStackIndex, NULL, NULL);
+ t->UpdateDomainOnStack(pCBdata->adStackIndex, pCBdata->numDomainAsserts, pCBdata->numDomainOverrides);
+
+ // Update CBdata values
+ pCBdata->numAsserts+= pCBdata->numDomainAsserts;
+ pCBdata->numOverrides += pCBdata->numDomainOverrides;
+ pCBdata->numDomainAsserts = 0;
+ pCBdata->numDomainOverrides = 0;
+ pCBdata->prev_AppDomain = pAppDomain;
+
+ }
+ // Get the security object for this function...
+ OBJECTREF* pRefSecDesc = pCf->GetAddrOfSecurityObject();
+ if (pRefSecDesc != NULL)
+ {
+ SecurityDeclarative::DoDeclarativeSecurityAtStackWalk(pMeth, pAppDomain, pRefSecDesc);
+ FRAMESECDESCREF refFSD = *((FRAMESECDESCREF*)pRefSecDesc);
+ if (refFSD != NULL)
+ {
+
+ INT32 ret = refFSD->GetOverridesCount();
+ pCBdata->numDomainAsserts+= refFSD->GetAssertCount();
+
+ if (ret > 0)
+ {
+ DBG_TRACE_STACKWALK(" SecurityDescriptor with overrides FOUND.\n", false);
+ pCBdata->numDomainOverrides += ret;
+ }
+ else
+ {
+ DBG_TRACE_STACKWALK(" SecurityDescriptor with no override found.\n", false);
+ }
+ }
+
+ }
+
+#ifdef FEATURE_COMPRESSEDSTACK
+ if(SecurityStackWalk::MethodIsAnonymouslyHostedDynamicMethodWithCSToEvaluate(pMeth))
+ {
+ pCBdata->numDomainAsserts++;
+ pCBdata->numDomainOverrides++;
+ }
+#endif // FEATURE_COMPRESSEDSTACK
+
+ // If this is a *.Run method,
+ // or if it has a CompressedStack then we need to terminate the stackwalk
+ if (pCBdata->IsSpecialRunFrame(pMeth))
+ {
+ DBG_TRACE_STACKWALK(" Halting stackwalk for .Run.\n", false);
+ return SWA_ABORT;
+ }
+
+ return SWA_CONTINUE;
+}
+
+VOID SecurityStackWalk::UpdateOverridesCount()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ //
+ // Initialize the callback data on the stack.
+ //
+
+ CountOverridesStackWalk walkData;
+
+ // Get the current thread that we're to walk.
+ Thread * t = GetThread();
+
+
+ // Don't allow recursive security stackwalks. Note that this implies that
+ // *no* untrusted code must ever be called during a security stackwalk.
+ if (t->IsSecurityStackwalkInProgess())
+ return;
+
+ EX_TRY
+ {
+ t->SetSecurityStackwalkInProgress( TRUE );
+
+ //
+ // Begin the stack walk
+ //
+ DBG_TRACE_STACKWALK(" Update Overrides Count invoked .\n", false);
+ // LIGHTUNWIND flag: allow using stackwalk cache for security stackwalks
+ t->StackWalkFrames(UpdateOverridesCountCB, &walkData, SKIPFUNCLETS | LIGHTUNWIND);
+#ifdef FEATURE_COMPRESSEDSTACK
+ COMPRESSEDSTACKREF csRef = (COMPRESSEDSTACKREF)t->GetCompressedStack();
+
+ // There may have been an AD transition and we shd look at the CB data to see if this is the case
+ if (csRef == NULL && walkData.pCtxTxFrame != NULL)
+ {
+ csRef = SecurityStackWalk::GetCSFromContextTransitionFrame(walkData.pCtxTxFrame);
+ }
+
+ // Use CS if found
+ if (csRef != NULL)
+ {
+
+ walkData.numDomainOverrides += StackCompressor::GetCSInnerAppDomainOverridesCount(csRef);
+ walkData.numDomainAsserts += StackCompressor::GetCSInnerAppDomainAssertCount(csRef);
+ }
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
+ t->GetNextDomainOnStack(&walkData.adStackIndex, NULL, NULL);
+ t->UpdateDomainOnStack(walkData.adStackIndex, walkData.numDomainAsserts, walkData.numDomainOverrides);
+ walkData.numAsserts += walkData.numDomainAsserts;
+ walkData.numOverrides += walkData.numDomainOverrides;
+
+ t->SetSecurityStackwalkInProgress( FALSE );
+ }
+ EX_CATCH
+ {
+ // We catch exceptions and rethrow like this to ensure that we've
+ // established an exception handler on the fs:[0] chain (managed
+ // exception handlers won't do this). This in turn guarantees that
+ // managed exception filters in any of our callers won't be found,
+ // otherwise they could get to execute untrusted code with security
+ // turned off.
+ t->SetSecurityStackwalkInProgress( FALSE );
+
+ EX_RETHROW;
+ }
+ EX_END_CATCH_UNREACHABLE
+
+
+
+
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+// -------------------------------------------------------------------------------
+//
+// COMCodeAccessSecurityEngine
+//
+// -------------------------------------------------------------------------------
+#ifdef FEATURE_COMPRESSEDSTACK
+COMPRESSEDSTACKREF SecurityStackWalk::GetCSFromContextTransitionFrame(Frame *pFrame)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ EXECUTIONCONTEXTREF ecRef = NULL;
+
+ if (pFrame != NULL)
+ ecRef = (EXECUTIONCONTEXTREF)pFrame->GetReturnExecutionContext();
+ if (ecRef != NULL)
+ return (ecRef->GetCompressedStack());
+
+ return NULL;
+}
+
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
+
+//-----------------------------------------------------------+
+// Helper used to check a demand set against a provided grant
+// and possibly denied set. Grant and denied set might be from
+// another domain.
+//-----------------------------------------------------------+
+void SecurityStackWalk::CheckSetHelper(OBJECTREF *prefDemand,
+ OBJECTREF *prefGrant,
+ OBJECTREF *prefRefused,
+ AppDomain *pGrantDomain,
+ MethodDesc *pMethod,
+ OBJECTREF *pAssembly,
+ CorDeclSecurity action)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(IsProtectedByGCFrame (prefDemand));
+ PRECONDITION(IsProtectedByGCFrame (prefGrant));
+ PRECONDITION(IsProtectedByGCFrame (prefRefused));
+ PRECONDITION(IsProtectedByGCFrame (pAssembly));
+ } CONTRACTL_END;
+
+ // We might need to marshal the grant and denied sets into the current
+ // domain.
+#ifndef FEATURE_CORECLR // should not happen in core clr
+ if (pGrantDomain != GetAppDomain())
+ {
+ *prefGrant = AppDomainHelper::CrossContextCopyFrom(pGrantDomain, prefGrant);
+ if (*prefRefused != NULL)
+ *prefRefused = AppDomainHelper::CrossContextCopyFrom(pGrantDomain, prefRefused);
+ }
+#else
+ _ASSERTE(pGrantDomain == GetAppDomain());
+#endif
+ MethodDescCallSite checkSetHelper(METHOD__SECURITY_ENGINE__CHECK_SET_HELPER);
+
+ ARG_SLOT args[] = {
+ ObjToArgSlot(NULL),
+ ObjToArgSlot(*prefGrant),
+ ObjToArgSlot(*prefRefused),
+ ObjToArgSlot(*prefDemand),
+ PtrToArgSlot(pMethod),
+ ObjToArgSlot(*pAssembly),
+ (ARG_SLOT)action
+ };
+
+ checkSetHelper.Call(args);
+}
+
+
+
+
+
+FCIMPL0(FC_BOOL_RET, SecurityStackWalk::FCallQuickCheckForAllDemands)
+{
+ FCALL_CONTRACT;
+ // This function collides with SecurityPolicy::IsDefaultThreadSecurityInfo
+ FCUnique(0x17);
+ FC_RETURN_BOOL(QuickCheckForAllDemands(0));
+
+}
+FCIMPLEND
+
+FCIMPL0(FC_BOOL_RET, SecurityStackWalk::FCallAllDomainsHomogeneousWithNoStackModifiers)
+{
+ FCALL_CONTRACT;
+
+ Thread* t = GetThread();
+ FC_RETURN_BOOL(t->AllDomainsHomogeneousWithNoStackModifiers());
+
+}
+FCIMPLEND
+
+//-----------------------------------------------------------
+// Native implementation for code-access security check.
+// Checks that callers on the stack have the permission
+// specified in the arguments or checks for unrestricted
+// access if the permission is null.
+//-----------------------------------------------------------
+FCIMPL3(void, SecurityStackWalk::Check, Object* permOrPermSetUNSAFE, StackCrawlMark* stackMark, CLR_BOOL isPermSet)
+{
+ FCALL_CONTRACT;
+
+ if (QuickCheckForAllDemands(0))
+ return;
+
+ FC_INNER_RETURN_VOID(CheckFramed(permOrPermSetUNSAFE, stackMark, isPermSet));
+}
+FCIMPLEND
+
+NOINLINE void SecurityStackWalk::CheckFramed(Object* permOrPermSetUNSAFE,
+ StackCrawlMark* stackMark,
+ CLR_BOOL isPermSet)
+{
+ CONTRACTL {
+ THROWS;
+ DISABLED(GC_TRIGGERS); // FCALLS with HELPER frames have issues with GC_TRIGGERS
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ FC_INNER_PROLOG(SecurityStackWalk::Check);
+
+ OBJECTREF permOrPermSet = (OBJECTREF) permOrPermSetUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_CAPTURE_DEPTH_2, permOrPermSet);
+
+ Check_PLS_SW(isPermSet, SSWT_IMPERATIVE_DEMAND, &permOrPermSet, stackMark);
+
+ HELPER_METHOD_FRAME_END();
+ FC_INNER_EPILOG();
+}
+
+
+void SecurityStackWalk::Check_PLS_SW(BOOL isPermSet,
+ SecurityStackWalkType eType,
+ OBJECTREF* permOrPermSet,
+ StackCrawlMark* stackMark)
+{
+
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ if (!PreCheck(permOrPermSet, isPermSet))
+ {
+ Check_StackWalk(eType, permOrPermSet, stackMark, isPermSet);
+ }
+}
+void SecurityStackWalk::Check_PLS_SW_GC( BOOL isPermSet,
+ SecurityStackWalkType eType,
+ OBJECTREF permOrPermSet,
+ StackCrawlMark* stackMark)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ GCPROTECT_BEGIN(permOrPermSet);
+ Check_PLS_SW(isPermSet, eType, &permOrPermSet, stackMark);
+ GCPROTECT_END();
+}
+
+void SecurityStackWalk::Check_StackWalk(SecurityStackWalkType eType,
+ OBJECTREF* pPermOrPermSet,
+ StackCrawlMark* stackMark,
+ BOOL isPermSet)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(*pPermOrPermSet != NULL);
+ } CONTRACTL_END;
+
+#if defined(ENABLE_PERF_COUNTERS)
+ // Perf Counter "%Time in Runtime check" support
+ PERF_COUNTER_TIMER_PRECISION _startPerfCounterTimer = GET_CYCLE_COUNT();
+#endif
+
+ if (GetThread()->GetOverridesCount() != 0)
+ {
+ // First let's make sure the overrides count is OK.
+ UpdateOverridesCount();
+ // Once the overrides count has been fixes, let's see if we really need to stackwalk
+ // This is an additional cost if we do need to walk, but can remove an unnecessary SW otherwise.
+ // Pick your poison.
+ if (QuickCheckForAllDemands(0))
+ return; //
+ }
+ // Initialize callback data.
+ DWORD dwFlags = 0;
+ DWORD demandFlags = GetPermissionSpecialFlags(pPermOrPermSet);
+
+ DemandStackWalk walkData(eType, dwFlags, stackMark, (isPermSet?DemandStackWalk::DT_SET:DemandStackWalk::DT_PERMISSION), demandFlags);
+ walkData.m_objects.SetObject(*pPermOrPermSet);
+
+ // Protect the object references in the callback data.
+ GCPROTECT_BEGIN(walkData.m_objects.m_sGC);
+
+ walkData.DoStackWalk();
+
+ GCPROTECT_END();
+
+#if defined(ENABLE_PERF_COUNTERS)
+ // Accumulate the counter
+ PERF_COUNTER_TIMER_PRECISION _stopPerfCounterTimer = GET_CYCLE_COUNT();
+ g_TotalTimeInSecurityRuntimeChecks += _stopPerfCounterTimer - _startPerfCounterTimer;
+
+ // Report the accumulated counter only after NUM_OF_TERATIONS
+ if (g_SecurityChecksIterations++ > PERF_COUNTER_NUM_OF_ITERATIONS)
+ {
+ GetPerfCounters().m_Security.timeRTchecks = static_cast<DWORD>(g_TotalTimeInSecurityRuntimeChecks);
+ GetPerfCounters().m_Security.timeRTchecksBase = static_cast<DWORD>(_stopPerfCounterTimer - g_LastTimeInSecurityRuntimeChecks);
+
+ g_TotalTimeInSecurityRuntimeChecks = 0;
+ g_LastTimeInSecurityRuntimeChecks = _stopPerfCounterTimer;
+ g_SecurityChecksIterations = 0;
+ }
+#endif // #if defined(ENABLE_PERF_COUNTERS)
+}
+
+FCIMPL3(void, SecurityStackWalk::GetZoneAndOrigin, Object* pZoneListUNSAFE, Object* pOriginListUNSAFE, StackCrawlMark* stackMark)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF zoneList = (OBJECTREF) pZoneListUNSAFE;
+ OBJECTREF originList = (OBJECTREF) pOriginListUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_2(zoneList, originList);
+
+ // Initialize callback data.
+ DWORD dwFlags = 0;
+ DemandStackWalk walkData(SSWT_GET_ZONE_AND_URL, dwFlags, stackMark, DemandStackWalk::DT_ZONE_AND_URL, 0);
+ walkData.m_objects.SetObjects(zoneList, originList);
+
+ GCPROTECT_BEGIN(walkData.m_objects.m_sGC);
+
+ walkData.DoStackWalk();
+
+ GCPROTECT_END();
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+
+#ifdef FEATURE_COMPRESSEDSTACK
+FCIMPL1(VOID, SecurityStackWalk::FcallDestroyDelayedCompressedStack, void *compressedStack)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ StackCompressor::Destroy(compressedStack);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+#endif // FEATURE_COMPRESSEDSTACK
+//
+// This method checks a few special demands in case we can
+// avoid looking at the real PLS object.
+//
+
+DWORD SecurityStackWalk::GetPermissionSpecialFlags (OBJECTREF* orDemand)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ MethodTable* pMethPermissionSet = MscorlibBinder::GetClass(CLASS__PERMISSION_SET);
+ MethodTable* pMethNamedPermissionSet = MscorlibBinder::GetClass(CLASS__NAMEDPERMISSION_SET);
+ MethodTable* pMethReflectionPermission = MscorlibBinder::GetClass(CLASS__REFLECTION_PERMISSION);
+ MethodTable* pMethSecurityPermission = MscorlibBinder::GetClass(CLASS__SECURITY_PERMISSION);
+
+ DWORD dwSecurityPermissionFlags = 0, dwReflectionPermissionFlags = 0;
+ MethodTable* pMeth = (*orDemand)->GetMethodTable();
+ if (pMeth == pMethPermissionSet || pMeth == pMethNamedPermissionSet) {
+ // NamedPermissionSet derives from PermissionSet and we're interested only
+ // in the fields in PermissionSet: so it's OK to cast to the unmanaged
+ // equivalent of PermissionSet even for a NamedPermissionSet object
+ PERMISSIONSETREF permSet = (PERMISSIONSETREF) *orDemand;
+
+ if (permSet->IsUnrestricted()) {
+ return (1 << SECURITY_FULL_TRUST);
+ }
+ TOKENBASEDSETREF tokenBasedSet = (TOKENBASEDSETREF) permSet->GetTokenBasedSet();
+ if (tokenBasedSet != NULL && tokenBasedSet->GetNumElements() == 1 && tokenBasedSet->GetPermSet() != NULL) {
+ pMeth = (tokenBasedSet->GetPermSet())->GetMethodTable();
+
+ if (pMeth == pMethReflectionPermission) {
+ dwReflectionPermissionFlags = ((REFLECTIONPERMISSIONREF) tokenBasedSet->GetPermSet())->GetFlags();
+ }
+ else if (pMeth == pMethSecurityPermission) {
+ dwSecurityPermissionFlags = ((SECURITYPERMISSIONREF) tokenBasedSet->GetPermSet())->GetFlags();
+ }
+ }
+ }
+ else {
+ if (pMeth == pMethReflectionPermission)
+ dwReflectionPermissionFlags = ((REFLECTIONPERMISSIONREF) (*orDemand))->GetFlags();
+ else if (pMeth == pMethSecurityPermission)
+ dwSecurityPermissionFlags = ((SECURITYPERMISSIONREF) (*orDemand))->GetFlags();
+ }
+
+ if (pMeth == pMethReflectionPermission) {
+ switch (dwReflectionPermissionFlags) {
+ case REFLECTION_PERMISSION_TYPEINFO:
+ return (1 << REFLECTION_TYPE_INFO);
+ case REFLECTION_PERMISSION_MEMBERACCESS:
+ return (1 << REFLECTION_MEMBER_ACCESS);
+ case REFLECTION_PERMISSION_RESTRICTEDMEMBERACCESS:
+ return (1 << REFLECTION_RESTRICTED_MEMBER_ACCESS);
+ default:
+ return 0; // There is no mapping for this reflection permission flag
+ }
+ } else if (pMeth == pMethSecurityPermission) {
+ switch (dwSecurityPermissionFlags) {
+ case SECURITY_PERMISSION_ASSERTION:
+ return (1 << SECURITY_ASSERT);
+ case SECURITY_PERMISSION_UNMANAGEDCODE:
+ return (1 << SECURITY_UNMANAGED_CODE);
+ case SECURITY_PERMISSION_SKIPVERIFICATION:
+ return (1 << SECURITY_SKIP_VER);
+ case SECURITY_PERMISSION_SERIALIZATIONFORMATTER:
+ return (1 << SECURITY_SERIALIZATION);
+ case SECURITY_PERMISSION_BINDINGREDIRECTS:
+ return (1 << SECURITY_BINDING_REDIRECTS);
+ case SECURITY_PERMISSION_CONTROLEVIDENCE:
+ return (1 << SECURITY_CONTROL_EVIDENCE);
+ case SECURITY_PERMISSION_CONTROLPRINCIPAL:
+ return (1 << SECURITY_CONTROL_PRINCIPAL);
+ default:
+ return 0; // There is no mapping for this security permission flag
+ }
+ }
+
+ // We couldn't find an exact match for the permission, so we'll just return no flags.
+ return 0;
+}
+
+// check is a stackwalk is needed to evaluate the demand
+BOOL SecurityStackWalk::PreCheck (OBJECTREF* orDemand, BOOL fDemandSet)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ // Track perfmon counters. Runtime security checks.
+ IncrementSecurityPerfCounter();
+
+ Thread* pThread = GetThread();
+ // The PLS optimization does not support overrides.
+ if (pThread->GetOverridesCount() > 0)
+ return FALSE;
+
+ DWORD dwDemandSpecialFlags = GetPermissionSpecialFlags(orDemand);
+
+ // If we were able to map the demand to an exact permission special flag, and we know that all code on
+ // this stack has been granted that permission, then we can take the fast path and allow the demand to
+ // succeed.
+ if (dwDemandSpecialFlags != 0)
+ {
+ return SecurityStackWalk::HasFlagsOrFullyTrustedIgnoreMode(dwDemandSpecialFlags);
+ }
+
+#ifdef FEATURE_PLS
+ // If we know there is only one AppDomain, then there is
+ // no need to walk the AppDomainStack structure.
+ if (pThread->GetNumAppDomainsOnThread() == 1)
+ {
+ ApplicationSecurityDescriptor* pASD = static_cast<ApplicationSecurityDescriptor*>(GetAppDomain()->GetSecurityDescriptor());
+ return pASD->CheckPLS(orDemand, dwDemandSpecialFlags, fDemandSet);
+ }
+
+ // Walk all AppDomains in the stack and check the PLS on each one of them
+ DWORD dwAppDomainIndex = 0;
+ pThread->InitDomainIteration(&dwAppDomainIndex);
+ _ASSERT(SystemDomain::System() && "SystemDomain not yet created!");
+ while (dwAppDomainIndex != 0) {
+ AppDomainFromIDHolder appDomain(pThread->GetNextDomainOnStack(&dwAppDomainIndex, NULL, NULL), FALSE);
+ if (appDomain.IsUnloaded())
+ // appdomain has been unloaded, so we can just continue on the loop
+ continue;
+
+ ApplicationSecurityDescriptor* pAppSecDesc = static_cast<ApplicationSecurityDescriptor*>(appDomain->GetSecurityDescriptor());
+ appDomain.Release();
+
+ if (!pAppSecDesc->CheckPLS(orDemand, dwDemandSpecialFlags, fDemandSet))
+ return FALSE;
+ }
+ return TRUE;
+#else
+ return FALSE;
+#endif // FEATURE_PLS
+}
+
+//-----------------------------------------------------------+
+// Unmanaged version of CodeAccessSecurityEngine.Demand() in BCL
+// Any change there may have to be propagated here
+// This call has to be virtual, unlike DemandSet
+//-----------------------------------------------------------+
+void
+SecurityStackWalk::Demand(SecurityStackWalkType eType, OBJECTREF demand)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ if (QuickCheckForAllDemands(0))
+ return;
+
+ Check_PLS_SW_GC(FALSE, eType, demand, NULL);
+}
+
+//
+// Demand which succeeds if either a demand for a single well known permission or restricted member access is
+// granted and a demand for the permission set of the target of a reflection operation would have suceeded.
+//
+// Arguments:
+// dwPermission - Permission input to the demand (See SecurityPolicy.h)
+// psdTarget - Security descriptor for the target assembly
+//
+// Return Value:
+// None, a SecurityException is thrown if the demands fail.
+//
+// Notes:
+// This is used by Reflection to implement partial trust reflection, where demands should succeed if either
+// a single permission demand, such as MemberAccess, would succeed for compatibility reasons, or if a
+// demand for the permission set of the target assembly would succeed.
+//
+// The intent is to allow reflection in partial trust when reflecting within the same permission set. Note
+// that this is inexact, since in the face of RequestRefuse, the target assembly may fail the demand even
+// within the same permission set.
+
+// static
+void SecurityStackWalk::ReflectionTargetDemand(DWORD dwPermission,
+ AssemblySecurityDescriptor *psdTarget)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(dwPermission != 0);
+ PRECONDITION(CheckPointer(psdTarget));
+ }
+ CONTRACTL_END;
+
+ // If everybody on the stack has the special permission, the disjunctive demand will succeed.
+ if (QuickCheckForAllDemands(1 << dwPermission))
+ return;
+
+ // In the simple sandbox case, we know the disjunctive demand will succeed if:
+ // * we are granted restricted member access
+ // * we are not reflecting on a FullTrust assembly
+ // * every other AppDomain in the call stack is fully trusted
+ Thread *pCurrentThread = GetThread();
+ AppDomainStack appDomains = pCurrentThread->GetAppDomainStack();
+
+ if (QuickCheckForAllDemands(1 << REFLECTION_RESTRICTED_MEMBER_ACCESS) &&
+ !psdTarget->IsFullyTrusted() &&
+ pCurrentThread->GetDomain()->GetSecurityDescriptor()->IsHomogeneous() &&
+ !pCurrentThread->GetDomain()->GetSecurityDescriptor()->ContainsAnyRefusedPermissions() &&
+ appDomains.GetOverridesCount() == 0)
+ {
+ DWORD dwCurrentDomain;
+ appDomains.InitDomainIteration(&dwCurrentDomain);
+
+ bool fFullTrustStack = true;
+ while (dwCurrentDomain != 0 && fFullTrustStack)
+ {
+ AppDomainStackEntry *pCurrentDomain = appDomains.GetNextDomainEntryOnStack(&dwCurrentDomain);
+ fFullTrustStack = pCurrentDomain->m_domainID == pCurrentThread->GetDomain()->GetId() ||
+ pCurrentDomain->IsFullyTrustedWithNoStackModifiers();
+ }
+
+ if (fFullTrustStack)
+ return;
+ }
+
+ OBJECTREF objTargetRefusedSet;
+ OBJECTREF objTargetGrantSet = psdTarget->GetGrantedPermissionSet(&objTargetRefusedSet);
+
+ GCPROTECT_BEGIN(objTargetGrantSet);
+
+ MethodDescCallSite reflectionTargetDemandHelper(METHOD__SECURITY_ENGINE__REFLECTION_TARGET_DEMAND_HELPER);
+ ARG_SLOT ilargs[] =
+ {
+ static_cast<ARG_SLOT>(dwPermission),
+ ObjToArgSlot(objTargetGrantSet)
+ };
+
+ reflectionTargetDemandHelper.Call(ilargs);
+
+ GCPROTECT_END();
+}
+
+//
+// Similar to a standard refelection target demand, however the demand is done against a captured compressed
+// stack instead of the current callstack
+//
+// Arguments:
+// dwPermission - Permission input to the demand (See SecurityPolicy.h)
+// psdTarget - Security descriptor for the target assembly
+// securityContext - Compressed stack to perform the demand against
+//
+// Return Value:
+// None, a SecurityException is thrown if the demands fail.
+//
+
+// static
+void SecurityStackWalk::ReflectionTargetDemand(DWORD dwPermission,
+ AssemblySecurityDescriptor *psdTarget,
+ DynamicResolver * pAccessContext)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(dwPermission >= 0 && dwPermission < 32);
+ PRECONDITION(CheckPointer(psdTarget));
+ }
+ CONTRACTL_END;
+
+ struct
+ {
+ OBJECTREF objTargetRefusedSet;
+ OBJECTREF objTargetGrantSet;
+ OBJECTREF objAccessContextObject;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.objTargetGrantSet = psdTarget->GetGrantedPermissionSet(&(gc.objTargetRefusedSet));
+
+ _ASSERTE(pAccessContext->GetDynamicMethod()->IsLCGMethod());
+ gc.objAccessContextObject = ((LCGMethodResolver *)pAccessContext)->GetManagedResolver();
+
+ MethodDescCallSite reflectionTargetDemandHelper(METHOD__SECURITY_ENGINE__REFLECTION_TARGET_DEMAND_HELPER_WITH_CONTEXT);
+ ARG_SLOT ilargs[] =
+ {
+ static_cast<ARG_SLOT>(dwPermission),
+ ObjToArgSlot(gc.objTargetGrantSet),
+ ObjToArgSlot(gc.objAccessContextObject)
+ };
+
+ reflectionTargetDemandHelper.Call(ilargs);
+
+ GCPROTECT_END();
+}
+
+//-----------------------------------------------------------+
+// Special case of Demand(). This remembers the result of the
+// previous demand, and reuses it if new assemblies have not
+// been added since then
+//-----------------------------------------------------------+
+void SecurityStackWalk::SpecialDemand(SecurityStackWalkType eType, DWORD whatPermission, StackCrawlMark* stackMark)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ if (QuickCheckForAllDemands(1<<whatPermission))
+ {
+ // Track perfmon counters. Runtime security checks.
+ IncrementSecurityPerfCounter();
+ return;
+ }
+ OBJECTREF demand = NULL;
+ GCPROTECT_BEGIN(demand);
+
+ SecurityDeclarative::GetPermissionInstance(&demand, whatPermission);
+ Check_PLS_SW(IS_SPECIAL_FLAG_PERMISSION_SET(whatPermission), eType, &demand, stackMark);
+
+ GCPROTECT_END();
+
+}
+
+// Do a demand for a special permission type
+FCIMPL2(void, SecurityStackWalk::FcallSpecialDemand, DWORD whatPermission, StackCrawlMark* stackMark)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ SpecialDemand(SSWT_IMPERATIVE_DEMAND, whatPermission, stackMark);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+//-----------------------------------------------------------+
+// Unmanaged version of PermissionSet.Demand()
+//-----------------------------------------------------------+
+void SecurityStackWalk::DemandSet(SecurityStackWalkType eType, OBJECTREF demand)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+
+ // Though the PermissionSet may contain non-CAS permissions, we are considering it as a CAS permission set only
+ // at this point and so it's safe to check for the FT and if FTmeansFT and return
+ if (QuickCheckForAllDemands(0))
+ return;
+
+ // Do further checks (PLS/SW) only if this set contains CAS perms
+ if(((PERMISSIONSETREF)demand)->CheckedForNonCas() && !((PERMISSIONSETREF)demand)->ContainsCas())
+ return;
+
+ Check_PLS_SW_GC(TRUE, eType, demand, NULL);
+}
+
+void SecurityStackWalk::DemandSet(SecurityStackWalkType eType, PsetCacheEntry *pPCE, DWORD dwAction)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+
+ // Though the PermissionSet may contain non-CAS permissions, we are considering it as a CAS permission set only
+ // at this point and so it's safe to check for the FT and if FTmeansFT and return
+ if (QuickCheckForAllDemands(0))
+ return;
+
+ OBJECTREF refPermSet = pPCE->CreateManagedPsetObject (dwAction);
+
+ if(refPermSet != NULL)
+ {
+ // Do further checks (PLS/SW) only if this set contains CAS perms
+ if(((PERMISSIONSETREF)refPermSet)->CheckedForNonCas() && !((PERMISSIONSETREF)refPermSet)->ContainsCas())
+ return;
+
+ Check_PLS_SW_GC(TRUE, eType, refPermSet, NULL);
+
+ }
+}
+
+//
+// Demand for the grant set of an assembly, without any identity permissions
+//
+// Arguments:
+// psdAssembly - assembly security descriptor to demand the grant set of
+//
+// Return Value:
+// None, a SecurityException is thrown if the demands fail.
+//
+
+// static
+void SecurityStackWalk::DemandGrantSet(AssemblySecurityDescriptor *psdAssembly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(psdAssembly));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF objRefusedSet;
+ OBJECTREF objGrantSet = psdAssembly->GetGrantedPermissionSet(&objRefusedSet);
+
+ GCPROTECT_BEGIN(objGrantSet);
+
+ if (OBJECTREFToObject(objGrantSet) != NULL)
+ {
+ MethodDescCallSite checkWithoutIdentityPermissions(METHOD__SECURITY_ENGINE__CHECK_GRANT_SET_HELPER);
+ ARG_SLOT ilargs[] =
+ {
+ ObjToArgSlot(objGrantSet)
+ };
+
+ checkWithoutIdentityPermissions.Call(ilargs);
+ }
+ else
+ {
+ // null grant set means full trust (mscorlib or anything created by it)
+ StackCrawlMark scm = LookForMyCaller;
+ SpecialDemand(SSWT_IMPERATIVE_DEMAND, SECURITY_FULL_TRUST, &scm);
+ }
+
+ GCPROTECT_END();
+}
+
+//-----------------------------------------------------------+
+// L I N K /I N H E R I T A N C E T I M E C H E C K
+//-----------------------------------------------------------+
+void SecurityStackWalk::LinkOrInheritanceCheck(IAssemblySecurityDescriptor *pSecDesc, OBJECTREF refDemands, Assembly* pAssembly, CorDeclSecurity action)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ // MSCORLIB is not subject to inheritance checks
+ if (pAssembly->IsSystem())
+ return;
+
+ struct _gc {
+ OBJECTREF refDemands;
+ OBJECTREF refExposedAssemblyObject;
+ OBJECTREF refRefused;
+ OBJECTREF refGranted;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ gc.refDemands = refDemands;
+ gc.refExposedAssemblyObject = NULL;
+
+ GCPROTECT_BEGIN(gc);
+
+
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
+
+ // We only do LinkDemands if the assembly is not fully trusted or if the demand contains permissions that don't implement IUnrestricted
+ if (!pAssembly->GetSecurityDescriptor()->IsFullyTrusted())
+ {
+ if (pAssembly)
+ gc.refExposedAssemblyObject = pAssembly->GetExposedObject();
+
+ if (!pSecDesc->IsFullyTrusted())
+ {
+ MethodDescCallSite checkSetHelper(METHOD__SECURITY_ENGINE__CHECK_SET_HELPER);
+ gc.refGranted = pSecDesc->GetGrantedPermissionSet(&(gc.refRefused));
+ ARG_SLOT ilargs[7];
+ ilargs[0] = ObjToArgSlot(NULL);
+ ilargs[1] = ObjToArgSlot(gc.refGranted);
+ ilargs[2] = ObjToArgSlot(gc.refRefused);
+ ilargs[3] = ObjToArgSlot(gc.refDemands);
+ ilargs[4] = PtrToArgSlot(NULL);
+ ilargs[5] = ObjToArgSlot(gc.refExposedAssemblyObject);
+ ilargs[6] = (ARG_SLOT)action;
+ checkSetHelper.Call(ilargs);
+ }
+ }
+ GCPROTECT_END();
+}
+
+
+//-----------------------------------------------------------+
+// S T A C K C O M P R E S S I O N FCALLS
+//-----------------------------------------------------------+
+
+
+
+#ifdef FEATURE_COMPRESSEDSTACK
+FCIMPL2(Object*, SecurityStackWalk::EcallGetDelayedCompressedStack, StackCrawlMark* stackMark, CLR_BOOL fWalkStack)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF rv = NULL;
+
+ // No need to GC-protect stackMark as it a byref on the stack
+ _ASSERTE(PVOID(stackMark) < GetThread()->GetCachedStackBase() &&
+ PVOID(stackMark) > PVOID(&rv));
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ rv = StackCompressor::GetCompressedStack(stackMark, fWalkStack);
+
+ HELPER_METHOD_FRAME_END();
+ return OBJECTREFToObject(rv);
+
+}
+FCIMPLEND
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
+
+#ifdef FEATURE_COMPRESSEDSTACK
+BOOL SecurityStackWalk::MethodIsAnonymouslyHostedDynamicMethodWithCSToEvaluate(MethodDesc* pMeth)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ if (!pMeth->IsLCGMethod())
+ {
+ return FALSE;
+ }
+ Assembly* pAssembly = pMeth->GetAssembly();
+ AppDomain* pAppDomain = GetAppDomain();
+ if(pAssembly != NULL && pAppDomain != NULL && pAssembly->GetDomainAssembly(pAppDomain) == pAppDomain->GetAnonymouslyHostedDynamicMethodsAssembly())
+ {
+ GCX_COOP();
+ DynamicResolver::SecurityControlFlags dwSecurityFlags = DynamicResolver::Default;
+ TypeHandle dynamicOwner; // not used
+ pMeth->AsDynamicMethodDesc()->GetLCGMethodResolver()->GetJitContextCoop(&dwSecurityFlags, &dynamicOwner);
+ if((dwSecurityFlags & DynamicResolver::CanSkipCSEvaluation) == 0)
+ {
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+#endif // FEATURE_COMPRESSEDSTACK
diff --git a/src/vm/securitystackwalk.h b/src/vm/securitystackwalk.h
new file mode 100644
index 0000000000..5471da2958
--- /dev/null
+++ b/src/vm/securitystackwalk.h
@@ -0,0 +1,295 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+
+//
+
+
+#ifndef __SECURITYSTACKWALK_H__
+#define __SECURITYSTACKWALK_H__
+
+#include "common.h"
+
+#include "object.h"
+#include "util.hpp"
+#include "fcall.h"
+#include "perfcounters.h"
+#include "security.h"
+#include "holder.h"
+#ifdef FEATURE_REMOTING
+#include "appdomainhelper.h"
+#endif
+
+#ifdef FEATURE_COMPRESSEDSTACK
+class NewCompressedStack;
+class DomainCompressedStack;
+#endif // FEATURE_COMPRESSEDSTACK
+class ApplicationSecurityDescriptor;
+class DemandStackWalk;
+class CountOverridesStackWalk;
+class AssertStackWalk;
+struct TokenDeclActionInfo;
+
+//-----------------------------------------------------------
+// SecurityStackWalk implements all the native methods
+// for the managed class System.Security.CodeAccessSecurityEngine.
+//-----------------------------------------------------------
+class SecurityStackWalk
+{
+protected:
+
+ SecurityStackWalkType m_eStackWalkType;
+ DWORD m_dwFlags;
+
+public:
+#ifdef FEATURE_REMOTING
+ MarshalCache m_objects;
+#else //!FEATURE_REMOTING
+ struct ObjectCache
+ {
+ struct gc
+ {
+ OBJECTREF object1;
+ OBJECTREF object2;
+ }
+ m_sGC;
+ AppDomain* m_pOriginalDomain;
+
+#ifndef DACCESS_COMPILE
+ OBJECTREF GetObjects(AppDomain *pDomain, OBJECTREF *porObject2)
+ {
+ _ASSERTE(pDomain == ::GetAppDomain());
+ _ASSERTE(m_pOriginalDomain == ::GetAppDomain());
+ *porObject2 = m_sGC.object2;
+ return m_sGC.object1;
+ };
+ OBJECTREF GetObject(AppDomain *pDomain)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(pDomain == ::GetAppDomain());
+ _ASSERTE(m_pOriginalDomain == ::GetAppDomain());
+ return m_sGC.object1;
+ };
+ void SetObject(OBJECTREF orObject)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pOriginalDomain = ::GetAppDomain();
+ m_sGC.object1 = orObject;
+ }
+
+ // Set the original values of both cached objects.
+ void SetObjects(OBJECTREF orObject1, OBJECTREF orObject2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pOriginalDomain = ::GetAppDomain();
+ m_sGC.object1 = orObject1;
+ m_sGC.object2 = orObject2;
+ }
+
+ void UpdateObject(AppDomain *pDomain, OBJECTREF orObject)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(pDomain == ::GetAppDomain());
+ _ASSERTE(m_pOriginalDomain == ::GetAppDomain());
+ m_sGC.object1 = orObject;
+ }
+#endif //!DACCESS_COMPILE
+ ObjectCache()
+ {
+ m_pOriginalDomain = NULL;
+ ZeroMemory(&m_sGC,sizeof(m_sGC));
+ }
+
+ } m_objects;
+#endif //!FEATURE_REMOTING
+
+ SecurityStackWalk(SecurityStackWalkType eType, DWORD flags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_eStackWalkType = eType;
+ m_dwFlags = flags;
+ }
+
+ // ----------------------------------------------------
+ // FCalls
+ // ----------------------------------------------------
+
+ // FCall wrapper for CheckInternal
+ static FCDECL3(void, Check, Object* permOrPermSetUNSAFE, StackCrawlMark* stackMark, CLR_BOOL isPermSet);
+ static void CheckFramed(Object* permOrPermSetUNSAFE, StackCrawlMark* stackMark, CLR_BOOL isPermSet);
+
+ // FCALL wrapper for quickcheckforalldemands
+ static FCDECL0(FC_BOOL_RET, FCallQuickCheckForAllDemands);
+ static FCDECL0(FC_BOOL_RET, FCallAllDomainsHomogeneousWithNoStackModifiers);
+
+
+ static FCDECL3(void, GetZoneAndOrigin, Object* pZoneListUNSAFE, Object* pOriginListUNSAFE, StackCrawlMark* stackMark);
+
+ // Do an imperative assert. (Check the for the permission and return the SecurityObject for the first frame)
+ static FCDECL4(Object*, CheckNReturnSO, Object* permTokenUNSAFE, Object* permUNSAFE, StackCrawlMark* stackMark, INT32 create);
+
+
+ // Do a demand for a special permission type
+ static FCDECL2(void, FcallSpecialDemand, DWORD whatPermission, StackCrawlMark* stackMark);
+
+ // ----------------------------------------------------
+ // Checks
+ // ----------------------------------------------------
+
+ // Methods for checking grant and refused sets
+
+public:
+ void CheckPermissionAgainstGrants(OBJECTREF refCS, OBJECTREF refGrants, OBJECTREF refRefused, AppDomain *pDomain, MethodDesc* pMethod, Assembly* pAssembly);
+
+protected:
+ void CheckSetAgainstGrants(OBJECTREF refCS, OBJECTREF refGrants, OBJECTREF refRefused, AppDomain *pDomain, MethodDesc* pMethod, Assembly* pAssembly);
+
+ void GetZoneAndOriginGrants(OBJECTREF refCS, OBJECTREF refGrants, OBJECTREF refRefused, AppDomain *pDomain, MethodDesc* pMethod, Assembly* pAssembly);
+
+ // Methods for checking stack modifiers
+ BOOL CheckPermissionAgainstFrameData(OBJECTREF refFrameData, AppDomain* pDomain, MethodDesc* pMethod);
+ BOOL CheckSetAgainstFrameData(OBJECTREF refFrameData, AppDomain* pDomain, MethodDesc* pMethod);
+
+public:
+ // ----------------------------------------------------
+ // CAS Actions
+ // ----------------------------------------------------
+
+ // Native version of CodeAccessPermission.Demand()
+ // Callers:
+ // <Currently unused>
+ static void Demand(SecurityStackWalkType eType, OBJECTREF demand);
+
+ // Demand all of the permissions granted to an assembly, with the exception of any identity permissions
+ static void DemandGrantSet(AssemblySecurityDescriptor *psdAssembly);
+
+ // Native version of PermissionSet.Demand()
+ // Callers:
+ // CanAccess (ReflectionInvocation)
+ // ReflectionSerialization::GetSafeUninitializedObject
+#ifdef FEATURE_APTCA
+ // SecurityDeclarative::DoUntrustedCallerChecks
+#endif // FEATURE_APTCA
+ static void DemandSet(SecurityStackWalkType eType, OBJECTREF demand);
+
+ // Native version of PermissionSet.Demand() that delays instantiating the PermissionSet object
+ // Callers:
+ // InvokeDeclarativeActions
+ static void DemandSet(SecurityStackWalkType eType, PsetCacheEntry *pPCE, DWORD dwAction);
+
+
+ static void ReflectionTargetDemand(DWORD dwPermission, AssemblySecurityDescriptor *psdTarget);
+
+ static void ReflectionTargetDemand(DWORD dwPermission,
+ AssemblySecurityDescriptor *psdTarget,
+ DynamicResolver * pAccessContext);
+
+ // Optimized demand for a well-known permission
+ // Callers:
+ // SecurityDeclarative::DoDeclarativeActions
+ // Security::CheckLinkDemandAgainstAppDomain
+ // TryDemand (ReflectionInvocation)
+ // CanAccess (ReflectionInvocation)
+ // ReflectionInvocation::CanValueSpecialCast
+ // RuntimeTypeHandle::CreateInstance
+ // RuntimeMethodHandle::InvokeMethod_Internal
+ // InvokeArrayConstructor (ReflectionInvocation)
+ // ReflectionInvocation::InvokeDispMethod
+ // COMArrayInfo::CreateInstance
+ // COMArrayInfo::CreateInstanceEx
+ // COMDelegate::BindToMethodName
+ // InvokeUtil::CheckArg
+ // InvokeUtil::ValidField
+ // RefSecContext::CallerHasPerm
+ // MngStdItfBase::ForwardCallToManagedView
+ // ObjectClone::Clone
+ static void SpecialDemand(SecurityStackWalkType eType, DWORD whatPermission, StackCrawlMark* stackMark = NULL);
+
+ // ----------------------------------------------------
+ // Compressed Stack
+ // ----------------------------------------------------
+public:
+#ifdef FEATURE_COMPRESSEDSTACK
+ static FCDECL2(Object*, EcallGetDelayedCompressedStack, StackCrawlMark* stackMark, CLR_BOOL fWalkStack);
+ static FCDECL1(VOID, FcallDestroyDelayedCompressedStack, void *compressedStack);
+ static COMPRESSEDSTACKREF GetCSFromContextTransitionFrame(Frame *pFrame);
+ static BOOL IsContextTransitionFrameWithCS(Frame *pFrame)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ return (GetCSFromContextTransitionFrame(pFrame) != NULL);
+ }
+ static BOOL MethodIsAnonymouslyHostedDynamicMethodWithCSToEvaluate(MethodDesc* pMeth);
+
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
+
+#ifndef DACCESS_COMPILE
+ FORCEINLINE static BOOL HasFlagsOrFullyTrustedIgnoreMode (DWORD flags);
+ FORCEINLINE static BOOL HasFlagsOrFullyTrusted (DWORD flags);
+#endif // #ifndef DACCESS_COMPILE
+
+public:
+ // Perf Counters
+ FORCEINLINE static VOID IncrementSecurityPerfCounter()
+ {
+ CONTRACTL {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+ COUNTER_ONLY(GetPerfCounters().m_Security.cTotalRTChecks++);
+ }
+
+ // ----------------------------------------------------
+ // Misc
+ // ----------------------------------------------------
+ static bool IsSpecialRunFrame(MethodDesc *pMeth);
+
+ static BOOL SkipAndFindFunctionInfo(INT32, MethodDesc**, OBJECTREF**, AppDomain **ppAppDomain = NULL);
+ static BOOL SkipAndFindFunctionInfo(StackCrawlMark*, MethodDesc**, OBJECTREF**, AppDomain **ppAppDomain = NULL);
+
+ // Check the provided demand set against the provided grant/refused set
+ static void CheckSetHelper(OBJECTREF *prefDemand,
+ OBJECTREF *prefGrant,
+ OBJECTREF *prefDenied,
+ AppDomain *pGrantDomain,
+ MethodDesc *pMethod,
+ OBJECTREF *pAssembly,
+ CorDeclSecurity action);
+
+ // Check for Link/Inheritance CAS permissions
+ static void LinkOrInheritanceCheck(IAssemblySecurityDescriptor *pSecDesc, OBJECTREF refDemands, Assembly* pAssembly, CorDeclSecurity action);
+
+private:
+ FORCEINLINE static BOOL QuickCheckForAllDemands(DWORD flags);
+
+ // Tries to avoid unnecessary demands
+ static BOOL PreCheck(OBJECTREF* orDemand, BOOL fDemandSet = FALSE);
+ static DWORD GetPermissionSpecialFlags (OBJECTREF* orDemand);
+
+ // Does a demand for a CodeAccessPermission : First does PreCheck. If PreCheck fails then calls Check_StackWalk
+ static void Check_PLS_SW(BOOL isPermSet, SecurityStackWalkType eType, OBJECTREF* permOrPermSet, StackCrawlMark* stackMark);
+
+ // Calls into Check_PLS_SW after GC protecting "perm "
+ static void Check_PLS_SW_GC(BOOL isPermSet, SecurityStackWalkType eType, OBJECTREF permOrPermSet, StackCrawlMark* stackMark);
+
+ // Walks the stack for a CodeAccessPermission demand (assumes PreCheck was already called)
+ static void Check_StackWalk(SecurityStackWalkType eType, OBJECTREF* pPerm, StackCrawlMark* stackMark, BOOL isPermSet);
+
+ // Walk the stack and count all the frame descriptors with an Assert, Deny, or PermitOnly
+ static VOID UpdateOverridesCount();
+};
+
+
+#endif /* __SECURITYSTACKWALK_H__ */
+
diff --git a/src/vm/securitytransparentassembly.cpp b/src/vm/securitytransparentassembly.cpp
new file mode 100644
index 0000000000..24a56398ad
--- /dev/null
+++ b/src/vm/securitytransparentassembly.cpp
@@ -0,0 +1,1849 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//--------------------------------------------------------------------------
+// securityTransparentAssembly.cpp
+//
+// Implementation for transparent code feature
+//
+
+
+//--------------------------------------------------------------------------
+
+
+#include "common.h"
+#include "field.h"
+#include "securitydeclarative.h"
+#include "security.h"
+#include "customattribute.h"
+#include "securitytransparentassembly.h"
+#include "securitymeta.h"
+#include "typestring.h"
+#include "comdelegate.h"
+
+#if defined(FEATURE_PREJIT)
+#include "compile.h"
+#endif
+
+#ifdef _DEBUG
+//
+// In debug builds of the CLR, we support a mode where transparency errors are not enforced with exceptions; instead
+// they are written to the CLR debug log. This allows us to migrate tests from the v2 to the v4 transparency model by
+// allowing test runs to continue to the end of the run, and keeping a log file of which assemblies need migration.
+//
+
+// static
+void SecurityTransparent::LogTransparencyError(Assembly *pAssembly, const LPCSTR szError)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pAssembly));
+ PRECONDITION(CheckPointer(szError));
+ PRECONDITION(g_pConfig->LogTransparencyErrors());
+ }
+ CONTRACTL_END;
+
+ const SString &strAssemblyName = pAssembly->GetManifestModule()->GetPath();
+
+ LOG((LF_SECURITY,
+ LL_INFO1000,
+ "Security Transparency Violation: Assembly '%S': %s\n",
+ strAssemblyName.GetUnicode(),
+ szError));
+}
+
+// static
+void SecurityTransparent::LogTransparencyError(MethodTable *pMT, const LPCSTR szError)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(CheckPointer(szError));
+ PRECONDITION(g_pConfig->LogTransparencyErrors());
+ }
+ CONTRACTL_END;
+
+ Assembly *pAssembly = pMT->GetAssembly();
+ const SString &strAssemblyName = pAssembly->GetManifestModule()->GetPath();
+
+ LOG((LF_SECURITY,
+ LL_INFO1000,
+ "Security Transparency Violation: Assembly '%S' - Type '%s': %s\n",
+ strAssemblyName.GetUnicode(),
+ pMT->GetDebugClassName(),
+ szError));
+}
+
+// static
+void SecurityTransparent::LogTransparencyError(MethodDesc *pMD, const LPCSTR szError, MethodDesc *pTargetMD /* = NULL */)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(CheckPointer(szError));
+ PRECONDITION(g_pConfig->LogTransparencyErrors());
+ }
+ CONTRACTL_END;
+
+ Assembly *pAssembly = pMD->GetAssembly();
+ const SString &strAssemblyName = pAssembly->GetManifestModule()->GetPath();
+
+ if (pTargetMD == NULL)
+ {
+ LOG((LF_SECURITY,
+ LL_INFO1000,
+ "Security Transparency Violation: Assembly '%S' - Method '%s::%s': %s\n",
+ strAssemblyName.GetUnicode(),
+ pMD->m_pszDebugClassName,
+ pMD->m_pszDebugMethodName,
+ szError));
+ }
+ else
+ {
+ Assembly *pTargetAssembly = pTargetMD->GetAssembly();
+ const SString &strTargetAssemblyName = pTargetAssembly->GetManifestModule()->GetPath();
+
+ LOG((LF_SECURITY,
+ LL_INFO1000,
+ "Security Transparency Violation: Assembly '%S' - Method '%s::%s' - Target Assembly '%S': %s\n",
+ strAssemblyName.GetUnicode(),
+ pMD->m_pszDebugClassName,
+ pMD->m_pszDebugMethodName,
+ strTargetAssemblyName.GetUnicode(),
+ szError));
+ }
+}
+
+#endif // _DEBUG
+
+// There are a few places we throw transparency method access exceptions that aren't "real"
+// method access exceptions - such as unverifiable code in a transparent assembly, and having a critical
+// attribute on a transparent method. Those continue to use the one-MethodDesc form of throwing -
+// everything else should use the standard ::ThrowMethodAccessException call
+
+// static
+void DECLSPEC_NORETURN SecurityTransparent::ThrowMethodAccessException(MethodDesc* pMD,
+ DWORD dwMessageId /* = IDS_CRITICAL_METHOD_ACCESS_DENIED */)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ // throw method access exception
+ StackSString strMethod;
+ TypeString::AppendMethod(strMethod, pMD, pMD->GetClassInstantiation(), TypeString::FormatNamespace | TypeString::FormatAngleBrackets| TypeString::FormatSignature);
+ COMPlusThrowHR(COR_E_METHODACCESS, dwMessageId, strMethod.GetUnicode());
+}
+
+// static
+void DECLSPEC_NORETURN SecurityTransparent::ThrowTypeLoadException(MethodDesc* pMethod, DWORD dwMessageID /* = IDS_METHOD_INHERITANCE_RULES_VIOLATED */)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pMethod));
+ }
+ CONTRACTL_END;
+
+ // Throw an exception here
+ StackSString strMethod;
+ StackScratchBuffer buffer;
+ TypeString::AppendMethod(strMethod, pMethod, pMethod->GetClassInstantiation(), TypeString::FormatNamespace | TypeString::FormatAngleBrackets | TypeString::FormatSignature);
+ pMethod->GetAssembly()->ThrowTypeLoadException(strMethod.GetUTF8(buffer), dwMessageID);
+}
+
+// static
+void DECLSPEC_NORETURN SecurityTransparent::ThrowTypeLoadException(MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ // Throw an exception here
+ StackScratchBuffer buffer;
+ SString strType;
+ TypeString::AppendType(strType, TypeHandle(pMT), TypeString::FormatNamespace | TypeString::FormatAngleBrackets );
+ pMT->GetAssembly()->ThrowTypeLoadException(strType.GetUTF8(buffer), IDS_TYPE_INHERITANCE_RULES_VIOLATED);
+}
+
+static BOOL IsTransparentCallerAllowed(MethodDesc *pCallerMD, MethodDesc *pCalleeMD, SecurityTransparencyError *pError)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pCallerMD));
+ PRECONDITION(CheckPointer(pCalleeMD));
+ PRECONDITION(CheckPointer(pError, NULL_OK));
+ PRECONDITION(pCallerMD->IsTransparent());
+ }
+ CONTRACTL_END;
+
+ // If the target is critical, and not treat as safe, then we cannot allow the call
+ if (Security::IsMethodCritical(pCalleeMD) && !Security::IsMethodSafeCritical(pCalleeMD))
+ {
+ if (pError != NULL)
+ {
+ *pError = SecurityTransparencyError_CallCriticalMethod;
+ }
+
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Convert the critical member to a LinkDemand for FullTrust, and convert that LinkDemand to a
+// full demand. If the current call stack allows this conversion to succeed, this method returns. Otherwise
+// a security exception is thrown.
+//
+// Arguments:
+// pCallerMD - The method calling the critical method
+//
+
+static void ConvertCriticalMethodToLinkDemand(MethodDesc *pCallerMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pCallerMD));
+ PRECONDITION(pCallerMD->IsTransparent());
+ PRECONDITION(pCallerMD->GetAssembly()->GetSecurityTransparencyBehavior()->CanTransparentCodeCallLinkDemandMethods());
+ }
+ CONTRACTL_END;
+
+#if !defined(CROSSGEN_COMPILE) && defined(FEATURE_CAS_POLICY)
+ if (NingenEnabled())
+ return;
+
+ GCX_COOP();
+
+ OBJECTREF permSet = NULL;
+ GCPROTECT_BEGIN(permSet);
+
+ Security::GetPermissionInstance(&permSet, SECURITY_FULL_TRUST);
+ Security::DemandSet(SSWT_LATEBOUND_LINKDEMAND, permSet);
+
+ GCPROTECT_END();
+#endif // !CROSSGEN_COMPILE && FEATURE_CAS_POLICY
+}
+
+// static
+BOOL SecurityTransparent::CheckCriticalAccess(AccessCheckContext* pContext,
+ MethodDesc* pOptionalTargetMethod,
+ FieldDesc* pOptionalTargetField,
+ MethodTable * pOptionalTargetType)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pContext));
+ }
+ CONTRACTL_END;
+
+ // At most one of these should be non-NULL
+ _ASSERTE(1 >= ((pOptionalTargetMethod ? 1 : 0) +
+ (pOptionalTargetField ? 1 : 0) +
+ (pOptionalTargetType ? 1 : 0)));
+
+ // okay caller is transparent, additional checks needed
+ BOOL fIsTargetCritical = FALSE; // check if target is critical
+ BOOL fIsTargetSafe = FALSE; // check if target is marked safe
+ Assembly *pTargetAssembly = NULL;
+
+ if (pOptionalTargetMethod != NULL)
+ {
+ fIsTargetCritical = IsMethodCritical(pOptionalTargetMethod);
+ fIsTargetSafe = IsMethodSafeCritical(pOptionalTargetMethod);
+ pTargetAssembly = pOptionalTargetMethod->GetAssembly();
+ }
+ else if (pOptionalTargetField != NULL)
+ {
+ FieldSecurityDescriptor fieldSecurityDescriptor(pOptionalTargetField);
+ fIsTargetCritical = fieldSecurityDescriptor.IsCritical();
+ fIsTargetSafe = fieldSecurityDescriptor.IsTreatAsSafe();
+ pTargetAssembly = pOptionalTargetField->GetModule()->GetAssembly();
+ }
+ else if (pOptionalTargetType != NULL)
+ {
+ fIsTargetCritical = IsTypeAllCritical(pOptionalTargetType); // check for only all critical classes
+ fIsTargetSafe = IsTypeSafeCritical(pOptionalTargetType);
+ pTargetAssembly = pOptionalTargetType->GetAssembly();
+ }
+
+ // If the target is transparent or safe critical, then no further checks are needed. Otherwise, if a
+ // legacy caller is targeting a new critical method, we may be able to allow the call by converting
+ // the critical method to a LinkDemand for FullTrust and converting the LinkDemand to a full demand.
+ //
+ // This allows for the case where a v2 transparent assembly called a method that was proteced by a
+ // LinkDemand in v2 and followed our suggested path of converting to being critical in v4. By treating
+ // the v4 critical method as if it were protected with a LinkDmeand instead, we're simply reversing this
+ // conversion to provide compatible behavior with legacy binaries
+ if (!fIsTargetCritical || fIsTargetSafe)
+ {
+ return TRUE;
+ }
+
+ if (pContext->IsCalledFromInterop())
+ return TRUE;
+
+ MethodDesc* pCurrentMD = pContext->GetCallerMethod();
+ MethodTable* pCurrentMT = pContext->GetCallerMT();
+
+ // Not from interop but the caller is NULL, this can only happen
+ // when we are checking from a Type/Assembly.
+ if (pCurrentMD != NULL)
+ {
+ // TODO: need to probably CheckCastToClass as well..
+ if (!IsMethodTransparent(pCurrentMD))
+ {
+ // Return TRUE if caller is NULL (interop caller) or critical.
+ return TRUE;
+ }
+
+#ifdef FEATURE_CORECLR
+ // On the coreCLR, a method can be transparent even if the containing type is marked Critical.
+ // This will happen when that method is an override of a base transparent method, and the type that
+ // contains the override is marked Critical. And that's the only case it can happen.
+ // This particular case is not a failure. To state this another way, from a security transpararency perspective,
+ // a method will always have access to the type that it is a member of.
+ if (pOptionalTargetType == pCurrentMD->GetMethodTable())
+ {
+ return TRUE;
+ }
+#endif // FEATURE_CORECLR
+
+ // an attached profiler may wish to have these checks suppressed
+ if (Security::BypassSecurityChecksForProfiler(pCurrentMD))
+ {
+ return TRUE;
+ }
+
+ if (pTargetAssembly != NULL &&
+ pTargetAssembly->GetSecurityTransparencyBehavior()->CanCriticalMembersBeConvertedToLinkDemand() &&
+ pCurrentMD->GetAssembly()->GetSecurityTransparencyBehavior()->CanTransparentCodeCallLinkDemandMethods())
+ {
+ // Convert the critical member to a LinkDemand for FullTrust, and convert that LinkDemand to a
+ // full demand. If the resulting full demand for FullTrust is successful, then we'll allow the access
+ // to the critical method to succeed
+ ConvertCriticalMethodToLinkDemand(pCurrentMD);
+ return TRUE;
+ }
+ }
+ else if (pCurrentMT != NULL)
+ {
+ if (!IsTypeTransparent(pCurrentMT))
+ {
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+// Determine if a method is allowed to perform a CAS assert within the transparency rules. Generally, only
+// critical code may assert. However, for compatibility with v2.0 we allow asserts from transparent code if
+// the following criteria are met:
+// 1. The assembly is a true v2.0 binary, and is not just using v2.0 transparency rules via the
+// SecurityRuleSet.Level1 annotation.
+// 2. The assembly is agnostic to transparency (that is, if it were fully trusted it would be
+// opprotunistically critical).
+// 3. We are currently in a heterogenous AppDomain.
+//
+// This compensates for the fact that while partial trust code could have asserted in v2.0, it can no longer
+// assert in v4.0 as we force it to be transparent. While the v2.0 transparency rules still don't allow
+// asserting, assemblies that would have been critical in v2.0 are allowed to continue asserting in v4.0.
+
+// static
+BOOL SecurityTransparent::IsAllowedToAssert(MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ // Critical code is always allowed to assert
+ if (IsMethodCritical(pMD))
+ {
+ return TRUE;
+ }
+
+#ifdef FEATURE_CORECLR
+ // On CoreCLR only critical code may ever assert - there are no compatibility reasons to allow
+ // transparent asserts.
+ return FALSE;
+#else // !FEATURE_CORECLR
+ // We must be in a heterogenous AppDomain for transparent asserts to work
+ if (GetAppDomain()->GetSecurityDescriptor()->IsHomogeneous())
+ {
+ return FALSE;
+ }
+
+ ModuleSecurityDescriptor *pMSD = ModuleSecurityDescriptor::GetModuleSecurityDescriptor(pMD->GetAssembly());
+
+ // Only assemblies whose version requires them to use legacy transparency (rather than assemblies which
+ // get legacy transparency via RuleSet.Level1) can assert from transparent code
+ if (!pMSD->AssemblyVersionRequiresLegacyTransparency())
+ {
+ return FALSE;
+ }
+
+ // Finally, the assembly must not have had any of the transparency attributes on it
+ const TokenSecurityDescriptorFlags transparencyAwareFlags =
+ TokenSecurityDescriptorFlags_AllCritical | // [SecurityCritical(SecurityCriticalScope.All)]
+ TokenSecurityDescriptorFlags_Critical | // [SecurityCritical]
+ TokenSecurityDescriptorFlags_SafeCritical | // [SecuritySafeCritical]
+ TokenSecurityDescriptorFlags_Transparent | // [SecurityTransparent]
+ TokenSecurityDescriptorFlags_TreatAsSafe; // [SecurityTreatAsSafe]
+ TokenSecurityDescriptorFlags moduleAttributes = pMSD->GetTokenFlags();
+ if ((moduleAttributes & transparencyAwareFlags) != TokenSecurityDescriptorFlags_None)
+ {
+ return FALSE;
+ }
+
+ return TRUE;
+#endif // FEATURE_CORECLR
+}
+
+// Functor class to aid in determining if a type requires a transparency check
+class TypeRequiresTransparencyCheckFunctor
+{
+private:
+ bool m_requiresTransparencyCheck;
+ bool m_checkForLinkDemands;
+
+public:
+ TypeRequiresTransparencyCheckFunctor(bool checkForLinkDemands) :
+ m_requiresTransparencyCheck(false),
+ m_checkForLinkDemands(checkForLinkDemands)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ TypeRequiresTransparencyCheckFunctor(const TypeRequiresTransparencyCheckFunctor &other); // not implemented
+
+ bool RequiresTransparencyCheck() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_requiresTransparencyCheck;
+ }
+
+ void operator()(MethodTable *pMT)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // We only need to do a check if so far none of the other component typpes required a transparency
+ // check. Critical, but not safe critical, types require transparency checks of their callers.
+ if (!m_requiresTransparencyCheck)
+ {
+ m_requiresTransparencyCheck = Security::IsTypeCritical(pMT) && !Security::IsTypeSafeCritical(pMT) &&
+ (!m_checkForLinkDemands || pMT->GetAssembly()->GetSecurityTransparencyBehavior()->CanCriticalMembersBeConvertedToLinkDemand());
+ }
+ }
+};
+
+// Determine if accessing a type requires doing a transparency check - this checks to see if the type
+// itself, or any of its generic variables are security critical.
+
+// static
+bool SecurityTransparent::TypeRequiresTransparencyCheck(TypeHandle type, bool checkForLinkDemands)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ TypeRequiresTransparencyCheckFunctor typeChecker(checkForLinkDemands);
+ type.ForEachComponentMethodTable(typeChecker);
+ return typeChecker.RequiresTransparencyCheck();
+}
+
+CorInfoCanSkipVerificationResult SecurityTransparent::JITCanSkipVerification(MethodDesc * pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ /* XXX Fri 1/12/2007
+ * This code is cloned from security.inl!Security::CanSkipVerification(MethodDesc, BOOL).
+ */
+ // Special case the System.Object..ctor:
+ // System.Object..ctor is not verifiable according to current verifier rules (that require to call the
+ // base class ctor). But since we want System.Object..ctor() to be marked transparent, it cannot be
+ // unverifiable (telesto security rules prohibit transparent code from being unverifiable)
+
+#ifndef DACCESS_COMPILE
+ if (g_pObjectCtorMD == pMD)
+ return CORINFO_VERIFICATION_CAN_SKIP;
+#endif //!DACCESS_COMPILE
+
+ // If a profiler is attached, we may want to bypass verification as well
+ if (Security::BypassSecurityChecksForProfiler(pMD))
+ {
+ return CORINFO_VERIFICATION_CAN_SKIP;
+ }
+
+ BOOL hasSkipVerificationPermisson = false;
+ DomainAssembly * pDomainAssembly = pMD->GetAssembly()->GetDomainAssembly();
+ hasSkipVerificationPermisson = Security::CanSkipVerification(pDomainAssembly);
+
+ CorInfoCanSkipVerificationResult canSkipVerif = hasSkipVerificationPermisson ? CORINFO_VERIFICATION_CAN_SKIP : CORINFO_VERIFICATION_CANNOT_SKIP;
+
+#ifdef FEATURE_CORECLR
+ //For Profile assemblies, do not verify any code. All Transparent methods are guaranteed to be
+ //verifiable (verified by tests). Therefore, skip all verification on platform assemblies.
+
+ //All profile assemblies have skip verification.
+ _ASSERTE(!(pDomainAssembly->GetFile()->IsProfileAssembly() && !hasSkipVerificationPermisson));
+
+#ifdef FEATURE_CORESYSTEM
+ //
+ // On Phone, at runtime, enable verification for user code that will run as transparent:
+ // - All Mango applications
+ // - Apollo applications with transparent code
+ //
+ if (hasSkipVerificationPermisson && !pDomainAssembly->GetFile()->IsProfileAssembly())
+ {
+ if (SecurityTransparent::IsMethodTransparent(pMD))
+ {
+ canSkipVerif = CORINFO_VERIFICATION_CANNOT_SKIP;
+ }
+ }
+#endif // FEATURE_CORESYSTEM
+
+#else //FEATURE_CORECLR
+ // also check to see if the method is marked transparent
+ if (hasSkipVerificationPermisson)
+ {
+ if (pDomainAssembly == GetAppDomain()->GetAnonymouslyHostedDynamicMethodsAssembly())
+ {
+ // This assembly is FullTrust. However, it cannot contain unverifiable code.
+ // The JIT compiler is not hardened to deal with invalid code. Hence, we cannot
+ // return CORINFO_VERIFICATION_RUNTIME_CHECK for IL that could have been generated
+ // by a low-trust assembly.
+ canSkipVerif = CORINFO_VERIFICATION_CANNOT_SKIP;
+ }
+ // also check to see if the method is marked transparent
+ else if (SecurityTransparent::IsMethodTransparent(pMD))
+ {
+ // If the assembly requested that even its transparent members not be verified, then we can skip
+ // verification. Otherwise, we need to either inject a runtime demand in the v2 model, or fail
+ // verification in the v4 model.
+ ModuleSecurityDescriptor *pModuleSecDesc = ModuleSecurityDescriptor::GetModuleSecurityDescriptor(pMD->GetAssembly());
+ if (pModuleSecDesc->CanTransparentCodeSkipVerification())
+ {
+ canSkipVerif = CORINFO_VERIFICATION_CAN_SKIP;
+ }
+ else if (pMD->GetAssembly()->GetSecurityTransparencyBehavior()->CanTransparentCodeSkipVerification())
+ {
+ canSkipVerif = CORINFO_VERIFICATION_RUNTIME_CHECK;
+ }
+ else
+ {
+ canSkipVerif = CORINFO_VERIFICATION_CANNOT_SKIP;
+ }
+ }
+ }
+#endif //FEATURE_CORECLR
+
+ return canSkipVerif;
+}
+
+CorInfoCanSkipVerificationResult SecurityTransparent::JITCanSkipVerification(DomainAssembly * pAssembly)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ BOOL hasSkipVerificationPermisson = Security::CanSkipVerification(pAssembly);
+
+ CorInfoCanSkipVerificationResult canSkipVerif = hasSkipVerificationPermisson ? CORINFO_VERIFICATION_CAN_SKIP : CORINFO_VERIFICATION_CANNOT_SKIP;
+
+ // If the assembly has permission to skip verification, but its transparency model requires that
+ // transparency can only be skipped with a runtime demand, then we need to make sure that there is a
+ // runtime check done.
+ if (hasSkipVerificationPermisson)
+ {
+ // In CoreCLR, do not enable transparency checks here. We depend on this method being "honest" in
+ // JITCanSkipVerification to skip transparency checks on profile assemblies.
+#ifndef FEATURE_CORECLR
+ ModuleSecurityDescriptor *pMsd = ModuleSecurityDescriptor::GetModuleSecurityDescriptor(pAssembly->GetAssembly());
+ if (pMsd->IsAllTransparent() &&
+ pAssembly->GetAssembly()->GetSecurityTransparencyBehavior()->CanTransparentCodeSkipVerification())
+ {
+ canSkipVerif = CORINFO_VERIFICATION_RUNTIME_CHECK;
+ }
+#endif // !FEATURE_CORECLR
+ }
+
+ return canSkipVerif;
+}
+
+// Determine if a method can quickly exit a runtime callout from the JIT - a true return value indicates
+// that the callout is not needed, false means that we cannot quicky exit
+
+// static
+bool SecurityTransparent::SecurityCalloutQuickCheck(MethodDesc *pCallerMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pCallerMD));
+ PRECONDITION(pCallerMD->HasCriticalTransparentInfo());
+ }
+ CONTRACTL_END;
+
+ // In coreclr, we modified the logic in the callout to also do some transparency method access checks
+ // These checks need to happen regardless of trust level and we shouldn’t be bailing out early
+ // just because we happen to be in Full Trust
+#ifndef FEATURE_CORECLR
+ // See if we need to process this callout for real, or if we can bail out early before setting up a HMF,
+ // and spending a lot of time processing the transparency evaluation. The simplest case where we can do
+ // this is if the caller is critical. In that case, we know that the caller is allowed to do whatever
+ // it wants, so we quit out.
+ //
+ // Additionally, if the caller is using SecurityRuleSet.Level1, which turns transparency violations into
+ // security demands, we can bail out early if we know for sure all demands will succeed on the current
+ // call stack. (Note: this remains true as long as we don't start generating callouts for transparent
+ // level 1 calling critical level 1, or transparent level 1 doing an assert, which are the only two
+ // violations which do not succeed in the face of a successful demand).
+ if (pCallerMD->IsCritical())
+ {
+ return true;
+ }
+ else
+ {
+ // The caller is transparent, so let's see if demands can cause transparency violations to succeed,
+ // and also if all demands issued from this context will succeed.
+ const SecurityTransparencyBehavior *pCallerTransparency = pCallerMD->GetAssembly()->TryGetSecurityTransparencyBehavior();
+ if (pCallerTransparency != NULL &&
+ pCallerTransparency->CanTransparentCodeCallLinkDemandMethods() &&
+ SecurityStackWalk::HasFlagsOrFullyTrustedIgnoreMode(0))
+ {
+ return true;
+ }
+ }
+#endif // !FEATURE_CORECLR
+
+ return false;
+}
+
+CorInfoIsAccessAllowedResult SecurityTransparent::RequiresTransparentAssemblyChecks(MethodDesc* pCallerMD,
+ MethodDesc* pCalleeMD,
+ SecurityTransparencyError *pError)
+{
+ LIMITED_METHOD_CONTRACT;
+ return RequiresTransparentCodeChecks(pCallerMD, pCalleeMD, pError);
+}
+
+CorInfoIsAccessAllowedResult SecurityTransparent::RequiresTransparentCodeChecks(MethodDesc* pCallerMD,
+ MethodDesc* pCalleeMD,
+ SecurityTransparencyError *pError)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pCallerMD));
+ PRECONDITION(CheckPointer(pCalleeMD));
+ PRECONDITION(CheckPointer(pError, NULL_OK));
+ PRECONDITION(!pCalleeMD->IsILStub());
+ }
+ CONTRACTL_END;
+
+ // check if the caller assembly is transparent and NOT an interception stub (e.g. marshalling)
+ bool doChecks = !pCallerMD->IsILStub() && IsMethodTransparent(pCallerMD);
+
+ if (doChecks)
+ {
+ if (!IsTransparentCallerAllowed(pCallerMD, pCalleeMD, pError))
+ {
+ // intercept the call to throw a MAE at runtime (more debuggable than throwing MAE at JIT-time)
+ // IsTransparentCallerAllowed will have set pError if necessary
+ return CORINFO_ACCESS_RUNTIME_CHECK;
+ }
+
+ // Check to see if the callee has a LinkDemand, if so we may need to intercept the call.
+ if (pCalleeMD->RequiresLinktimeCheck())
+ {
+ if (pCalleeMD->RequiresLinkTimeCheckHostProtectionOnly()
+#ifndef CROSSGEN_COMPILE
+ && GetHostProtectionManager()->GetProtectedCategories() == eNoChecks
+#endif // CROSSGEN_COMPILE
+ )
+ {
+ // exclude HPA which are marked as LinkDemand and there is no HostProtection enabled currently
+ return CORINFO_ACCESS_ALLOWED;
+ }
+
+ // There was a reason other than simply conditional APTCA that the method required a linktime
+ // check - intercept the call later.
+ if (pError != NULL)
+ {
+ *pError = SecurityTransparencyError_CallLinkDemand;
+ }
+
+ return CORINFO_ACCESS_RUNTIME_CHECK;
+ }
+ }
+
+ return CORINFO_ACCESS_ALLOWED;
+}
+
+
+#ifndef CROSSGEN_COMPILE
+
+// Perform appropriate Transparency checks if the caller to the Load(byte[] ) without passing in an input Evidence is Transparent
+VOID SecurityTransparent::PerformTransparencyChecksForLoadByteArray(MethodDesc* pCallerMD, AssemblySecurityDescriptor* pLoadedSecDesc)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+#ifdef FEATURE_CAS_POLICY
+ GCX_COOP();
+ // check to see if the method that does the Load(byte[] ) is transparent
+ if (IsMethodTransparent(pCallerMD))
+ {
+ Assembly* pLoadedAssembly = pLoadedSecDesc->GetAssembly();
+ // check to see if the byte[] being loaded is critical, i.e. not Transparent
+ if (!ModuleSecurityDescriptor::IsMarkedTransparent(pLoadedAssembly))
+ {
+ // if transparent code loads a byte[] that is critical, need to inject appropriate demands
+ if (pLoadedSecDesc->IsFullyTrusted()) // if the loaded code is full-trust
+ {
+ // do a full-demand for Full-Trust
+ OBJECTREF permSet = NULL;
+ GCPROTECT_BEGIN(permSet);
+ Security::GetPermissionInstance(&permSet, SECURITY_FULL_TRUST);
+ Security::DemandSet(SSWT_LATEBOUND_LINKDEMAND, permSet);
+ GCPROTECT_END();// do a full-demand for Full-Trust
+ }
+ else
+ {
+ // otherwise inject a Demand for permissions being granted?
+ struct _localGC {
+ OBJECTREF granted;
+ OBJECTREF denied;
+ } localGC;
+ ZeroMemory(&localGC, sizeof(localGC));
+
+ GCPROTECT_BEGIN(localGC);
+ {
+ localGC.granted = pLoadedSecDesc->GetGrantedPermissionSet(&(localGC.denied));
+ Security::DemandSet(SSWT_LATEBOUND_LINKDEMAND, localGC.granted);
+ }
+ GCPROTECT_END();
+ }
+ }
+ }
+#endif // FEATURE_CAS_POLICY
+}
+
+static void ConvertLinkDemandToFullDemand(MethodDesc* pCallerMD, MethodDesc* pCalleeMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pCallerMD));
+ PRECONDITION(CheckPointer(pCalleeMD));
+ PRECONDITION(pCallerMD->IsTransparent());
+ }
+ CONTRACTL_END;
+
+ if (!pCalleeMD->RequiresLinktimeCheck() ||
+ pCalleeMD->RequiresLinkTimeCheckHostProtectionOnly())
+ {
+ return;
+ }
+
+ // Profilers may wish to suppress linktime checks for methods they're profiling
+ if (Security::BypassSecurityChecksForProfiler(pCallerMD))
+ {
+ return;
+ }
+
+ struct
+ {
+ OBJECTREF refClassNonCasDemands;
+ OBJECTREF refClassCasDemands;
+ OBJECTREF refMethodNonCasDemands;
+ OBJECTREF refMethodCasDemands;
+ OBJECTREF refThrowable;
+ }
+ gc;
+ ZeroMemory(&gc, sizeof(gc));
+ GCPROTECT_BEGIN(gc);
+
+ LinktimeCheckReason linktimeCheckReason = Security::GetLinktimeCheckReason(pCalleeMD,
+ &gc.refClassCasDemands,
+ &gc.refClassNonCasDemands,
+ &gc.refMethodCasDemands,
+ &gc.refMethodNonCasDemands);
+
+#ifdef FEATURE_APTCA
+ BOOL fCallerIsAPTCA = pCallerMD->GetAssembly()->AllowUntrustedCaller();
+
+ if ((linktimeCheckReason & LinktimeCheckReason_AptcaCheck))
+ {
+ if (fCallerIsAPTCA &&
+ Security::IsUntrustedCallerCheckNeeded(pCalleeMD, pCallerMD->GetAssembly()))
+ {
+#ifdef _DEBUG
+ if (g_pConfig->LogTransparencyErrors())
+ {
+ SecurityTransparent::LogTransparencyError(pCallerMD, "Transparent method calling an APTCA protected assembly", pCalleeMD);
+ }
+ if (!g_pConfig->DisableTransparencyEnforcement())
+#endif // _DEBUG
+ {
+ // Depending on the transparency model, we need to either fail the attempt to call a method
+ // protected with the APTCA link demand, or conver it to a full demand. Note that we need to
+ // upgrade to a full demand if either the caller of callee are in v2 mode, the APTCA check is
+ // conceptually a link demand, and for link demands we do the conversion if either assembly is
+ // using the v2 rules.
+ if (pCallerMD->GetAssembly()->GetSecurityTransparencyBehavior()->CanTransparentCodeCallLinkDemandMethods() ||
+ pCalleeMD->GetAssembly()->GetSecurityTransparencyBehavior()->CanTransparentCodeCallLinkDemandMethods())
+ {
+ OBJECTREF permSet = NULL;
+ GCPROTECT_BEGIN(permSet);
+ Security::GetPermissionInstance(&permSet, SECURITY_FULL_TRUST);
+ Security::DemandSet(SSWT_LATEBOUND_LINKDEMAND, permSet);
+ GCPROTECT_END();
+ }
+ else
+ {
+ ::ThrowMethodAccessException(pCallerMD, pCalleeMD, FALSE, IDS_E_TRANSPARENT_CALL_LINKDEMAND);
+ }
+ }
+ }
+ }
+#endif // FEATURE_APTCA
+
+
+ // The following logic turns link demands on the target method into full stack walks
+
+ if ((linktimeCheckReason & LinktimeCheckReason_CasDemand) ||
+ (linktimeCheckReason & LinktimeCheckReason_NonCasDemand))
+ {
+ // If we found a link demand, then we need to make sure that both the callee's transparency model
+ // allows for it to satisfy a link demand. We check both since a v4 caller calling a v2 assembly may
+ // be attempting to satisfy a LinkDemand which the v2 assembly has not yet had a chance to remove.
+ if (!pCallerMD->GetAssembly()->GetSecurityTransparencyBehavior()->CanTransparentCodeCallLinkDemandMethods() &&
+ !pCalleeMD->GetAssembly()->GetSecurityTransparencyBehavior()->CanTransparentCodeCallLinkDemandMethods() &&
+ (gc.refClassCasDemands != NULL || gc.refMethodCasDemands != NULL))
+ {
+#ifdef _DEBUG
+ if (g_pConfig->LogTransparencyErrors())
+ {
+ SecurityTransparent::LogTransparencyError(pCallerMD, "Transparent method calling a LinkDemand protected method", pCalleeMD);
+ }
+ if (!g_pConfig->DisableTransparencyEnforcement())
+#endif // _DEBUG
+ {
+ ::ThrowMethodAccessException(pCallerMD, pCalleeMD, FALSE, IDS_E_TRANSPARENT_CALL_LINKDEMAND);
+ }
+ }
+
+ // CAS Link Demands
+ if (gc.refClassCasDemands != NULL)
+ Security::DemandSet(SSWT_LATEBOUND_LINKDEMAND, gc.refClassCasDemands);
+ if (gc.refMethodCasDemands != NULL)
+ Security::DemandSet(SSWT_LATEBOUND_LINKDEMAND, gc.refMethodCasDemands);
+
+ // Non-CAS demands are not applied against a grant set, they're standalone.
+ if (gc.refClassNonCasDemands != NULL)
+ Security::CheckNonCasDemand(&gc.refClassNonCasDemands);
+ if (gc.refMethodNonCasDemands != NULL)
+ Security::CheckNonCasDemand(&gc.refMethodNonCasDemands);
+ }
+
+
+ //
+ // Make sure that the callee is allowed to call unmanaged code if the target is native.
+ //
+
+ if (linktimeCheckReason & LinktimeCheckReason_NativeCodeCall)
+ {
+#ifdef _DEBUG
+ if (g_pConfig->LogTransparencyErrors())
+ {
+ SecurityTransparent::LogTransparencyError(pCallerMD, "Transparent method calling unmanaged code");
+ }
+ if (!g_pConfig->DisableTransparencyEnforcement())
+#endif // _DEBUG
+ {
+ if (pCallerMD->GetAssembly()->GetSecurityTransparencyBehavior()->CanTransparentCodeCallUnmanagedCode())
+ {
+#ifdef FEATURE_APTCA
+ if (fCallerIsAPTCA)
+ {
+ // if the caller assembly is APTCA, then only inject this demand, for NON-APTCA we will allow
+ // calls to native code
+ // NOTE: the JIT would have already performed the LinkDemand for this anyways
+ Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_UNMANAGED_CODE);
+ }
+#endif // FEATURE_APTCA
+ }
+ else
+ {
+#if defined(FEATURE_CORECLR_COVERAGE_BUILD) && defined(FEATURE_STRONGNAME_DELAY_SIGNING_ALLOWED)
+ // For code coverage builds we have an issue where the inserted types/methods are not annotated.
+ // In patricular, there may be p/invokes from transparent code. Allow that on cov builds for platform assemblies.
+ // Paranoia: allow this only on non shp builds - all builds except the SHP type will have
+ // FEATURE_STRONGNAME_DELAY_SIGNING_ALLOWED defined. So we can use that to figure out if this is a SHP build
+ // type that someone is trying to relax that constraint on and not allow that.
+ if (!pCalleeMD->GetModule()->GetFile()->GetAssembly()->IsProfileAssembly())
+#endif // defined(FEATURE_CORECLR_COVERAGE_BUILD) && defined(FEATURE_STRONGNAME_DELAY_SIGNING_ALLOWED)
+ {
+ ::ThrowMethodAccessException(pCallerMD, pCalleeMD, FALSE, IDS_E_TRANSPARENT_CALL_NATIVE);
+ }
+ }
+ }
+ }
+
+ GCPROTECT_END();
+}
+
+
+VOID SecurityTransparent::EnforceTransparentAssemblyChecks(MethodDesc* pCallerMD, MethodDesc* pCalleeMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pCallerMD));
+ PRECONDITION(Security::IsMethodTransparent(pCallerMD));
+ PRECONDITION(CheckPointer(pCalleeMD));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // Profilers may wish to suppress transparency checks for methods they're profiling
+ if (Security::BypassSecurityChecksForProfiler(pCallerMD))
+ {
+ return;
+ }
+
+ // if target is critical, and not marked as TreatAsSafe, Access ERROR.
+ if (Security::IsMethodCritical(pCalleeMD) && !Security::IsMethodSafeCritical(pCalleeMD))
+ {
+
+ const SecurityTransparencyBehavior *pCalleeTransparency =
+ pCalleeMD->GetAssembly()->GetSecurityTransparencyBehavior();
+ const SecurityTransparencyBehavior *pCallerTransparency =
+ pCallerMD->GetAssembly()->GetSecurityTransparencyBehavior();
+
+ // If critical methods in the target can be converted to a link demand for legacy callers, then we
+ // need to do that conversion. Otherwise, this access is disallowed.
+ if (pCalleeTransparency->CanCriticalMembersBeConvertedToLinkDemand() &&
+ pCallerTransparency->CanTransparentCodeCallLinkDemandMethods())
+ {
+ ConvertCriticalMethodToLinkDemand(pCallerMD);
+ }
+ else
+ {
+ // Conversion to a LinkDemand was not allowed, so we need to
+#ifdef _DEBUG
+ if (g_pConfig->LogTransparencyErrors())
+ {
+ LogTransparencyError(pCallerMD, "Transparent method accessing a critical method", pCalleeMD);
+ }
+ if (!g_pConfig->DisableTransparencyEnforcement())
+#endif // _DEBUG
+ {
+ ::ThrowMethodAccessException(pCallerMD, pCalleeMD, TRUE, IDS_E_CRITICAL_METHOD_ACCESS_DENIED);
+ }
+ }
+ }
+
+ ConvertLinkDemandToFullDemand(pCallerMD, pCalleeMD);
+}
+
+
+VOID SecurityTransparent::EnforceTransparentDelegateChecks(MethodTable* pDelegateMT, MethodDesc* pCalleeMD)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pDelegateMT));
+ PRECONDITION(CheckPointer(pCalleeMD));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_CORECLR
+ // We only enforce delegate binding rules in partial trust
+ if (GetAppDomain()->GetSecurityDescriptor()->IsFullyTrusted())
+ return;
+
+ StackSString strMethod;
+ TypeString::AppendMethod(strMethod, pCalleeMD, pCalleeMD->GetClassInstantiation(), TypeString::FormatNamespace | TypeString::FormatAngleBrackets| TypeString::FormatSignature);
+ StackSString strDelegateType;
+ TypeString::AppendType(strDelegateType, pDelegateMT, TypeString::FormatNamespace | TypeString::FormatAngleBrackets| TypeString::FormatSignature);
+
+ COMPlusThrowHR(COR_E_METHODACCESS, IDS_E_DELEGATE_BINDING_TRANSPARENCY, strDelegateType.GetUnicode(), strMethod.GetUnicode());
+#endif // FEATURE_CORECLR
+}
+
+#endif // CROSSGEN_COMPILE
+
+
+BOOL SecurityTransparent::IsMethodTransparent(MethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ // Is transparency info cached?
+ if (pMD->HasCriticalTransparentInfo())
+ {
+ return !pMD->IsCritical();
+ }
+
+ MethodSecurityDescriptor methSecurityDescriptor(pMD);
+ return !methSecurityDescriptor.IsCritical();
+}
+
+BOOL SecurityTransparent::IsMethodCritical(MethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ // Is transparency info cached?
+ if (pMD->HasCriticalTransparentInfo())
+ {
+ return pMD->IsCritical();
+ }
+
+ MethodSecurityDescriptor methSecurityDescriptor(pMD);
+ return methSecurityDescriptor.IsCritical();
+}
+
+// Returns True if a method is SafeCritical (=> not Transparent and not Critical)
+BOOL SecurityTransparent::IsMethodSafeCritical(MethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ // Is transparency info cached?
+ if (pMD->HasCriticalTransparentInfo())
+ {
+ return (pMD->IsCritical() && pMD->IsTreatAsSafe());
+ }
+
+ MethodSecurityDescriptor methSecurityDescriptor(pMD);
+ return (methSecurityDescriptor.IsCritical() && methSecurityDescriptor.IsTreatAsSafe());
+}
+
+BOOL SecurityTransparent::IsTypeCritical(MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ EEClass *pClass = pMT->GetClass();
+ if (pClass->HasCriticalTransparentInfo())
+ {
+ return pClass->IsCritical();
+ }
+
+ TypeSecurityDescriptor typeSecurityDescriptor(pMT);
+ return typeSecurityDescriptor.IsCritical();
+}
+
+BOOL SecurityTransparent::IsTypeSafeCritical(MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ EEClass *pClass = pMT->GetClass();
+ if (pClass->HasCriticalTransparentInfo())
+ {
+ return pClass->IsCritical() && pClass->IsTreatAsSafe();
+ }
+
+ TypeSecurityDescriptor typeSecurityDescriptor(pMT);
+ return typeSecurityDescriptor.IsCritical() &&
+ typeSecurityDescriptor.IsTreatAsSafe();
+}
+
+BOOL SecurityTransparent::IsTypeTransparent(MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ EEClass *pClass = pMT->GetClass();
+ if (pClass->HasCriticalTransparentInfo())
+ {
+ return !pClass->IsCritical();
+ }
+
+ TypeSecurityDescriptor typeSecurityDescriptor(pMT);
+ return !typeSecurityDescriptor.IsCritical();
+}
+
+// Returns TRUE if a type is transparent and contains only transparent members
+// static
+BOOL SecurityTransparent::IsTypeAllTransparent(MethodTable * pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ EEClass *pClass = pMT->GetClass();
+ if (pClass->HasCriticalTransparentInfo())
+ {
+ return pClass->IsAllTransparent();
+ }
+
+ TypeSecurityDescriptor typeSecurityDescriptor(pMT);
+ return typeSecurityDescriptor.IsAllTransparent();
+}
+
+BOOL SecurityTransparent::IsTypeAllCritical(MethodTable * pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ EEClass *pClass = pMT->GetClass();
+ if (pClass->HasCriticalTransparentInfo())
+ {
+ return pClass->IsAllCritical();
+ }
+
+ TypeSecurityDescriptor typeSecurityDescriptor(pMT);
+ return typeSecurityDescriptor.IsAllCritical();
+}
+
+BOOL SecurityTransparent::IsFieldTransparent(FieldDesc* pFD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ PRECONDITION(CheckPointer(pFD));
+ }
+ CONTRACTL_END;
+
+ FieldSecurityDescriptor fsd(pFD);
+ return !fsd.IsCritical();
+}
+
+BOOL SecurityTransparent::IsFieldCritical(FieldDesc* pFD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ PRECONDITION(CheckPointer(pFD));
+ }
+ CONTRACTL_END;
+
+ FieldSecurityDescriptor fsd(pFD);
+ return fsd.IsCritical();
+}
+
+// Returns True if a method is SafeCritical (=> not Transparent and not Critical)
+BOOL SecurityTransparent::IsFieldSafeCritical(FieldDesc* pFD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ PRECONDITION(CheckPointer(pFD));
+ }
+ CONTRACTL_END;
+
+ FieldSecurityDescriptor fsd(pFD);
+ return fsd.IsCritical() && fsd.IsTreatAsSafe();
+}
+
+// Returns True if the token is transparent
+BOOL SecurityTransparent::IsTokenTransparent(Module *pModule, mdToken tkToken)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ ModuleSecurityDescriptor *pMsd = ModuleSecurityDescriptor::GetModuleSecurityDescriptor(pModule->GetAssembly());
+ if (pMsd->IsAllCritical())
+ {
+ return FALSE;
+ }
+
+ const TokenSecurityDescriptorFlags criticalMask = TokenSecurityDescriptorFlags_AllCritical |
+ TokenSecurityDescriptorFlags_Critical |
+ TokenSecurityDescriptorFlags_SafeCritical;
+ TokenSecurityDescriptor tokenSecurityDescriptor(pModule, tkToken);
+ return !(tokenSecurityDescriptor.GetMetadataFlags() & criticalMask);
+}
+
+// Fuctor type to do perform class access checks on any disallowed transparent -> critical accesses.
+class DoSecurityClassAccessChecksFunctor
+{
+private:
+ MethodDesc *m_pCallerMD;
+ CorInfoSecurityRuntimeChecks m_check;
+
+public:
+ DoSecurityClassAccessChecksFunctor(MethodDesc *pCallerMD, CorInfoSecurityRuntimeChecks check)
+ : m_pCallerMD(pCallerMD),
+ m_check(check)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ DoSecurityClassAccessChecksFunctor(const DoSecurityClassAccessChecksFunctor &other); // not implemented
+
+ void operator()(MethodTable *pMT)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // We can get caller checks of 0 if we're in AlwaysInsertCallout mode, so make sure to do all of our
+ // work under checks for specific flags
+ if (m_check & CORINFO_ACCESS_SECURITY_TRANSPARENCY)
+ {
+ StaticAccessCheckContext accessContext(m_pCallerMD);
+
+ if (!Security::CheckCriticalAccess(&accessContext, NULL, NULL, pMT))
+ {
+ ThrowTypeAccessException(m_pCallerMD, pMT, TRUE, IDS_E_CRITICAL_TYPE_ACCESS_DENIED);
+ }
+ }
+ }
+};
+
+// Check that a calling method is allowed to access a type handle for security reasons. This checks:
+// 1. That transparency allows the caller to use the type
+//
+// The method returns if the checks succeed and throws on error.
+//
+// static
+void SecurityTransparent::DoSecurityClassAccessChecks(MethodDesc *pCallerMD,
+ const TypeHandle &calleeTH,
+ CorInfoSecurityRuntimeChecks check)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ DoSecurityClassAccessChecksFunctor classAccessChecks(pCallerMD, check);
+ calleeTH.ForEachComponentMethodTable(classAccessChecks);
+}
+
+//
+// Transparency behavior implementations
+//
+
+//---------------------------------------------------------------------------------------
+//
+// Transparency behavior implementation for v4 and CoreCLR assemblies
+//
+
+class TransparencyBehaviorImpl : public ISecurityTransparencyImpl
+{
+public:
+
+ // Get bits that indicate how transparency should behave in different situations
+ virtual SecurityTransparencyBehaviorFlags GetBehaviorFlags() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return SecurityTransparencyBehaviorFlags_AttributesRequireTransparencyCheck |
+ SecurityTransparencyBehaviorFlags_CriticalMembersConvertToLinkDemand |
+ SecurityTransparencyBehaviorFlags_InheritanceRulesEnforced |
+ SecurityTransparencyBehaviorFlags_PartialTrustImpliesAllTransparent |
+ SecurityTransparencyBehaviorFlags_ScopeAppliesOnlyToIntroducedMethods;
+ }
+
+ // Transparency field behavior mappings:
+ // Attribute Behavior
+ // -----------------------------------------------------
+ // Critical (any) Critical
+ // SafeCritical Safe critical
+ // TAS (no critical) No effect
+ // TAS (with any critical) Safe critical
+ virtual FieldSecurityDescriptorFlags MapFieldAttributes(TokenSecurityDescriptorFlags tokenFlags) const
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ FieldSecurityDescriptorFlags fieldFlags = FieldSecurityDescriptorFlags_None;
+
+ if (tokenFlags & TokenSecurityDescriptorFlags_Critical)
+ {
+ fieldFlags |= FieldSecurityDescriptorFlags_IsCritical;
+
+ if (tokenFlags & TokenSecurityDescriptorFlags_TreatAsSafe)
+ {
+ fieldFlags |= FieldSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+ }
+
+ if (tokenFlags & TokenSecurityDescriptorFlags_SafeCritical)
+ {
+ fieldFlags |= FieldSecurityDescriptorFlags_IsCritical | FieldSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+
+ return fieldFlags;
+ }
+
+ // Transparency module behavior mappings for an introduced method:
+ // Attribute Behavior
+ // -----------------------------------------------------
+ // Critical (any) Critical
+ // SafeCritical Safe critical
+ // TAS (no critical) No effect
+ // TAS (with any critical) Safe critical
+ virtual MethodSecurityDescriptorFlags MapMethodAttributes(TokenSecurityDescriptorFlags tokenFlags) const
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ MethodSecurityDescriptorFlags methodFlags = MethodSecurityDescriptorFlags_None;
+
+ if (tokenFlags & TokenSecurityDescriptorFlags_Critical)
+ {
+ methodFlags |= MethodSecurityDescriptorFlags_IsCritical;
+
+ if (tokenFlags & TokenSecurityDescriptorFlags_TreatAsSafe)
+ {
+ methodFlags |= MethodSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+ }
+
+ if (tokenFlags & TokenSecurityDescriptorFlags_SafeCritical)
+ {
+ methodFlags |= MethodSecurityDescriptorFlags_IsCritical |
+ MethodSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+
+ return methodFlags;
+ }
+
+ // Transparency module behavior mappings:
+ // Attribute Behavior
+ // -----------------------------------------------------
+ // APTCA Mixed transparency + APTCA
+ // Critical (scoped) All critical + APTCA
+ // Critical (all) All critical + APTCA
+ // SafeCritical No effect
+ // TAS (no critical) No effect
+ // TAS (with scoped critical) All safe critical + APTCA
+ // TAS (with all critical) All safe critical + APTCA
+ // Transparent All transparent + APTCA
+ //
+ // If the assembly has no attributes, then it will be opportunistically critical.
+ //
+ // APTCA is granted to all assemblies because we rely upon transparent code being unable to call critical
+ // code to enforce the APTCA check. Since all partial trust code must be transparent, this provides the
+ // same effect.
+ virtual ModuleSecurityDescriptorFlags MapModuleAttributes(TokenSecurityDescriptorFlags tokenFlags) const
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ ModuleSecurityDescriptorFlags moduleFlags = ModuleSecurityDescriptorFlags_None;
+
+#if defined(FEATURE_APTCA) || defined(FEATURE_CORESYSTEM)
+ if (tokenFlags & TokenSecurityDescriptorFlags_APTCA)
+ {
+ moduleFlags |= ModuleSecurityDescriptorFlags_IsAPTCA;
+ }
+#endif // defined(FEATURE_APTCA) || defined(FEATURE_CORESYSTEM)
+
+ if (tokenFlags & TokenSecurityDescriptorFlags_Critical)
+ {
+ // We don't pay attention to the critical scope if we're not a legacy assembly
+ moduleFlags |= ModuleSecurityDescriptorFlags_IsAllCritical;
+
+ if (tokenFlags & TokenSecurityDescriptorFlags_TreatAsSafe)
+ {
+ moduleFlags |= ModuleSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+ }
+
+ if (tokenFlags & TokenSecurityDescriptorFlags_Transparent)
+ {
+ moduleFlags |= ModuleSecurityDescriptorFlags_IsAllTransparent;
+ }
+
+ // If we didn't see APTCA/CA, Transparent, or any form of Critical, then the assembly is opportunistically
+ // critical.
+ const ModuleSecurityDescriptorFlags transparencyMask = ModuleSecurityDescriptorFlags_IsAPTCA |
+ ModuleSecurityDescriptorFlags_IsAllTransparent |
+ ModuleSecurityDescriptorFlags_IsAllCritical;
+ if (!(moduleFlags & transparencyMask))
+ {
+ moduleFlags |= ModuleSecurityDescriptorFlags_IsOpportunisticallyCritical;
+ }
+
+ // If the token asks to not have IL verification done in full trust, propigate that to the module
+ if (tokenFlags & TokenSecurityDescriptorFlags_SkipFullTrustVerification)
+ {
+ moduleFlags |= ModuleSecurityDescriptorFlags_SkipFullTrustVerification;
+ }
+
+ // We rely on transparent / critical checks to provide APTCA enforcement in the v4 model, so all assemblies
+ // get APTCA.
+ moduleFlags |= ModuleSecurityDescriptorFlags_IsAPTCA;
+
+ return moduleFlags;
+ }
+
+ // Transparency type behavior mappings:
+ // Attribute Behavior
+ // -----------------------------------------------------
+ // Critical (any) All critical
+ // SafeCritical All safe critical
+ // TAS (no critical) No effect on the type, but save TAS bit since members of the type may be critical
+ // TAS (with any critical) All SafeCritical
+ virtual TypeSecurityDescriptorFlags MapTypeAttributes(TokenSecurityDescriptorFlags tokenFlags) const
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ TypeSecurityDescriptorFlags typeFlags = TypeSecurityDescriptorFlags_None;
+
+ if (tokenFlags & TokenSecurityDescriptorFlags_Critical)
+ {
+ typeFlags |= TypeSecurityDescriptorFlags_IsCritical |
+ TypeSecurityDescriptorFlags_IsAllCritical;
+ }
+
+ // SafeCritical always means all critical + TAS
+ if (tokenFlags & TokenSecurityDescriptorFlags_SafeCritical)
+ {
+ typeFlags |= TypeSecurityDescriptorFlags_IsCritical |
+ TypeSecurityDescriptorFlags_IsAllCritical |
+ TypeSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+
+ if (tokenFlags & TokenSecurityDescriptorFlags_TreatAsSafe)
+ {
+ typeFlags |= TypeSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+
+ return typeFlags;
+ }
+};
+
+#ifndef FEATURE_CORECLR
+
+//---------------------------------------------------------------------------------------
+//
+// Transparency behavior implementation for v2 assemblies
+//
+
+class LegacyTransparencyBehaviorImpl : public ISecurityTransparencyImpl
+{
+public:
+ // Get bits that indicate how transparency should behave in different situations
+ virtual SecurityTransparencyBehaviorFlags GetBehaviorFlags() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return SecurityTransparencyBehaviorFlags_IntroducedCriticalsMayAddTreatAsSafe |
+ SecurityTransparencyBehaviorFlags_OpportunisticIsSafeCriticalMethods |
+ SecurityTransparencyBehaviorFlags_PartialTrustImpliesAllTransparent |
+ SecurityTransparencyBehaviorFlags_PublicImpliesTreatAsSafe |
+ SecurityTransparencyBehaviorFlags_TransparentCodeCanCallLinkDemand |
+ SecurityTransaprencyBehaviorFlags_TransparentCodeCanCallUnmanagedCode |
+ SecurityTransparencyBehaviorFlags_TransparentCodeCanSkipVerification |
+ SecurityTransparencyBehaviorFlags_UnsignedImpliesAPTCA;
+ }
+
+ // Legacy transparency field behavior mappings:
+ // Attribute Behavior
+ // -----------------------------------------------------
+ // Critical (any) Critical
+ // SafeCritical Safe critical
+ // TAS (no critical) No effect
+ // TAS (with any critical) Safe critical
+ virtual FieldSecurityDescriptorFlags MapFieldAttributes(TokenSecurityDescriptorFlags tokenFlags) const
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // Legacy transparency behaves the same for fields as the current transparency model, so we just forward
+ // this call to that implementation.
+ TransparencyBehaviorImpl forwardImpl;
+ return forwardImpl.MapFieldAttributes(tokenFlags);
+ }
+
+
+ // Legacy transparency method behavior mappings:
+ // Attribute Behavior
+ // -----------------------------------------------------
+ // Critical (any) Critical
+ // SafeCritical Safe critical
+ // TAS (no critical) No effect
+ // TAS (with any critical) Safe critical
+ virtual MethodSecurityDescriptorFlags MapMethodAttributes(TokenSecurityDescriptorFlags tokenFlags) const
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // Legacy transparency behaves the same for methods as the current transparency model, so we just forward
+ // this call to that implementation.
+ TransparencyBehaviorImpl forwardImpl;
+ return forwardImpl.MapMethodAttributes(tokenFlags);
+ }
+
+ // Legacy transparency module behavior mappings:
+ // Attribute Behavior
+ // -----------------------------------------------------
+ // APTCA APTCA
+ // ConditionlAPTCA Exception
+ // Critical (scoped) Mixed transparency
+ // Critical (all) All critical
+ // SafeCritical All safe critical
+ // TAS (no critical) No effect
+ // TAS (with scoped critical) No effect
+ // TAS (with all critical) All safe critical
+ // Transparent All transparent
+ //
+ // Having no transparent, critical, or safe critical attributes means that the assembly should have all
+ // transparent types and all safe critical methods.
+ virtual ModuleSecurityDescriptorFlags MapModuleAttributes(TokenSecurityDescriptorFlags tokenFlags) const
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ ModuleSecurityDescriptorFlags moduleFlags = ModuleSecurityDescriptorFlags_None;
+ bool fShouldBeOpportunisticallyCritical = true;
+
+#if defined(FEATURE_APTCA) || defined(FEATURE_CORESYSTEM)
+ if (tokenFlags & TokenSecurityDescriptorFlags_APTCA)
+ {
+ moduleFlags |= ModuleSecurityDescriptorFlags_IsAPTCA;
+ }
+#endif // defined(FEATURE_APTCA) || defined(FEATURE_CORESYSTEM)
+
+ if (tokenFlags & TokenSecurityDescriptorFlags_Transparent)
+ {
+ moduleFlags |= ModuleSecurityDescriptorFlags_IsAllTransparent;
+ fShouldBeOpportunisticallyCritical = false;
+ }
+
+ if (tokenFlags & TokenSecurityDescriptorFlags_Critical)
+ {
+ fShouldBeOpportunisticallyCritical = false;
+
+ // If we're critical, but not all critical that means we're mixed.
+ if (tokenFlags & TokenSecurityDescriptorFlags_AllCritical)
+ {
+ moduleFlags |= ModuleSecurityDescriptorFlags_IsAllCritical;
+
+ // If we're all critical and treat as safe, that means we're safe critical
+ if (tokenFlags & TokenSecurityDescriptorFlags_TreatAsSafe)
+ {
+ moduleFlags |= ModuleSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+ }
+ }
+
+ // SafeCritical always means Critical + TreatAsSafe; we can get in this case for legacy assemblies if the
+ // assembly is actually a v4 assembly which is using the Legacy attribute.
+ if (tokenFlags & TokenSecurityDescriptorFlags_SafeCritical)
+ {
+ moduleFlags |= ModuleSecurityDescriptorFlags_IsAllCritical |
+ ModuleSecurityDescriptorFlags_IsTreatAsSafe;
+ fShouldBeOpportunisticallyCritical = false;
+ }
+
+ // If we didn't find an attribute that indicates the assembly cares about transparency, then it is
+ // opportunistically critical.
+ if (fShouldBeOpportunisticallyCritical)
+ {
+ _ASSERTE(!(moduleFlags & ModuleSecurityDescriptorFlags_IsAllTransparent));
+ _ASSERTE(!(moduleFlags & ModuleSecurityDescriptorFlags_IsAllCritical));
+
+ moduleFlags |= ModuleSecurityDescriptorFlags_IsOpportunisticallyCritical;
+ }
+
+ // If the token asks to not have IL verification done in full trust, propigate that to the module
+ if (tokenFlags & TokenSecurityDescriptorFlags_SkipFullTrustVerification)
+ {
+ moduleFlags |= ModuleSecurityDescriptorFlags_SkipFullTrustVerification;
+ }
+
+ return moduleFlags;
+ }
+
+ // Legacy transparency type behavior mappings:
+ // Attribute Behavior
+ // -----------------------------------------------------
+ // Critical (scoped) Critical, but not all critical
+ // Critical (all) All critical
+ // SafeCritical All safe critical
+ // TAS (no critical) No effect on the type, but save TAS bit for members of the type
+ // TAS (with scoped critical) SafeCritical, but not all critical
+ // TAS (with all critical) All SafeCritical
+ virtual TypeSecurityDescriptorFlags MapTypeAttributes(TokenSecurityDescriptorFlags tokenFlags) const
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ TypeSecurityDescriptorFlags typeFlags = TypeSecurityDescriptorFlags_None;
+
+ if (tokenFlags & TokenSecurityDescriptorFlags_Critical)
+ {
+ typeFlags |= TypeSecurityDescriptorFlags_IsCritical;
+
+ // We only consider all critical if the critical attribute was present
+ if (tokenFlags & TokenSecurityDescriptorFlags_AllCritical)
+ {
+ typeFlags |= TypeSecurityDescriptorFlags_IsAllCritical;
+ }
+ }
+
+ // SafeCritical always means all critical + TAS
+ if (tokenFlags & TokenSecurityDescriptorFlags_SafeCritical)
+ {
+ typeFlags |= TypeSecurityDescriptorFlags_IsCritical |
+ TypeSecurityDescriptorFlags_IsAllCritical |
+ TypeSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+
+ if (tokenFlags & TokenSecurityDescriptorFlags_TreatAsSafe)
+ {
+ typeFlags |= TypeSecurityDescriptorFlags_IsTreatAsSafe;
+ }
+
+ return typeFlags;
+ }
+};
+
+#endif // !FEATURE_CORECLR
+
+//
+// Shared transparency behavior objects
+//
+
+//---------------------------------------------------------------------------------------
+//
+// Access a shared security transparency behavior object, creating it if the object has
+// not yet been used.
+//
+
+template <class T>
+const SecurityTransparencyBehavior *GetOrCreateTransparencyBehavior(SecurityTransparencyBehavior **ppBehavior)
+{
+ CONTRACT(const SecurityTransparencyBehavior *)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(ppBehavior));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ if (*ppBehavior == NULL)
+ {
+ NewHolder<ISecurityTransparencyImpl> pImpl(new T);
+ NewHolder<SecurityTransparencyBehavior> pBehavior(new SecurityTransparencyBehavior(pImpl));
+
+ SecurityTransparencyBehavior *pPrevBehavior =
+ InterlockedCompareExchangeT(ppBehavior, pBehavior.GetValue(), NULL);
+
+ if (pPrevBehavior == NULL)
+ {
+ pBehavior.SuppressRelease();
+ pImpl.SuppressRelease();
+ }
+ }
+
+ RETURN(*ppBehavior);
+}
+
+// Transparency behavior object for v4 transparent assemblies
+// static
+SecurityTransparencyBehavior *SecurityTransparencyBehavior::s_pStandardTransparencyBehavior = NULL;
+
+#ifndef FEATURE_CORECLR
+
+// Transpraency behavior object for v2 transparent assemblies
+// static
+SecurityTransparencyBehavior *SecurityTransparencyBehavior::s_pLegacyTransparencyBehavior = NULL;
+
+#endif // !FEATURE_CORECLR
+
+//---------------------------------------------------------------------------------------
+//
+// Get a security transparency object for an assembly with the specified attributes on
+// its manifest
+//
+// Arguments:
+// moduleTokenFlags - flags from reading the security attributes of the assembly's
+// manifest module
+//
+
+const SecurityTransparencyBehavior *SecurityTransparencyBehavior::GetTransparencyBehavior(SecurityRuleSet ruleSet)
+{
+ CONTRACT(const SecurityTransparencyBehavior *)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(ruleSet == SecurityRuleSet_Level1 || ruleSet == SecurityRuleSet_Level2);
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+#ifndef FEATURE_CORECLR
+ if (ruleSet == SecurityRuleSet_Level1)
+ {
+ // Level 1 rules - v2.0 behavior
+ RETURN(GetOrCreateTransparencyBehavior<LegacyTransparencyBehaviorImpl>(&s_pLegacyTransparencyBehavior));
+ }
+ else
+#endif // FEATURE_CORECLR;
+ {
+ // Level 2 rules - v4.0 behavior
+ RETURN(GetOrCreateTransparencyBehavior<TransparencyBehaviorImpl>(&s_pStandardTransparencyBehavior));
+ }
+}
diff --git a/src/vm/securitytransparentassembly.h b/src/vm/securitytransparentassembly.h
new file mode 100644
index 0000000000..74102d6911
--- /dev/null
+++ b/src/vm/securitytransparentassembly.h
@@ -0,0 +1,250 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//--------------------------------------------------------------------------
+// securityTransparentAssembly.h
+//
+// Implementation for transparent code feature
+//
+
+
+//--------------------------------------------------------------------------
+
+
+#ifndef __SECURITYTRANSPARENT_H__
+#define __SECURITYTRANSPARENT_H__
+
+#include "securitymeta.h"
+
+// Reason that a transparency error was flagged
+enum SecurityTransparencyError
+{
+ SecurityTransparencyError_None,
+ SecurityTransparencyError_CallCriticalMethod, // A transparent method tried to call a critical method
+ SecurityTransparencyError_CallLinkDemand // A transparent method tried to call a method with a LinkDemand
+};
+
+namespace SecurityTransparent
+{
+//private:
+ BOOL IsMethodTransparent(MethodDesc *pMD);
+ BOOL IsMethodCritical(MethodDesc *pMD);
+ BOOL IsMethodSafeCritical(MethodDesc *pMD);
+ BOOL IsTypeCritical(MethodTable *pMT);
+ BOOL IsTypeSafeCritical(MethodTable *pMT);
+ BOOL IsTypeTransparent(MethodTable *pMT);
+ BOOL IsTypeAllTransparent(MethodTable *pMT);
+ BOOL IsTypeAllCritical(MethodTable *pMT);
+ BOOL IsFieldTransparent(FieldDesc *pFD);
+ BOOL IsFieldCritical(FieldDesc *pFD);
+ BOOL IsFieldSafeCritical(FieldDesc *pFD);
+ BOOL IsTokenTransparent(Module *pModule, mdToken tkToken);
+
+//public:
+ bool SecurityCalloutQuickCheck(MethodDesc *pCallerMD);
+
+ CorInfoIsAccessAllowedResult RequiresTransparentCodeChecks(MethodDesc* pCaller,
+ MethodDesc* pCallee,
+ SecurityTransparencyError *pError);
+ CorInfoIsAccessAllowedResult RequiresTransparentAssemblyChecks(MethodDesc* pCaller,
+ MethodDesc* pCallee,
+ SecurityTransparencyError *pError);
+ void EnforceTransparentAssemblyChecks(MethodDesc* pCaller, MethodDesc* pCallee);
+ void EnforceTransparentDelegateChecks(MethodTable* pDelegateMT, MethodDesc* pCallee);
+ CorInfoCanSkipVerificationResult JITCanSkipVerification(DomainAssembly * pAssembly);
+ CorInfoCanSkipVerificationResult JITCanSkipVerification(MethodDesc * pMD);
+ VOID PerformTransparencyChecksForLoadByteArray(MethodDesc* pCallersMD, AssemblySecurityDescriptor* pLoadedSecDesc);
+ BOOL CheckCriticalAccess(AccessCheckContext* pContext,
+ MethodDesc* pOptionalTargetMethod,
+ FieldDesc* pOptionalTargetField,
+ MethodTable * pOptionalTargetType);
+ BOOL IsAllowedToAssert(MethodDesc *pMD);
+
+ bool TypeRequiresTransparencyCheck(TypeHandle type, bool checkForLinkDemands);
+
+ void DECLSPEC_NORETURN ThrowMethodAccessException(MethodDesc* pMD, DWORD dwMessageId = IDS_CRITICAL_METHOD_ACCESS_DENIED);
+
+ void DECLSPEC_NORETURN ThrowTypeLoadException(MethodDesc* pMD, DWORD dwMessageId = IDS_METHOD_INHERITANCE_RULES_VIOLATED);
+ void DECLSPEC_NORETURN ThrowTypeLoadException(MethodTable* pMT);
+
+ void DoSecurityClassAccessChecks(MethodDesc *pCallerMD,
+ const TypeHandle &calleeTH,
+ CorInfoSecurityRuntimeChecks checks);
+#ifdef _DEBUG
+ void LogTransparencyError(Assembly *pAssembly, const LPCSTR szError);
+ void LogTransparencyError(MethodTable *pMT, const LPCSTR szError);
+ void LogTransparencyError(MethodDesc *pMD, const LPCSTR szError, MethodDesc *pTargetMD = NULL);
+#endif // _DEBUG
+};
+
+//
+// Transparency is implemented slightly differently between v2 desktop, v4 desktop, and CoreCLR. In order to
+// support running v2 desktop assemblies on the v4 CLR without modifying their expected transparency behavior,
+// we indirect all questions about what transparency means through a SecurityTransparencyBehavior object.
+//
+// The SecurityTransparencyBehavior object uses implementations of ISecurityTransparencyImpl to query about
+// specific behavior differences.
+//
+
+enum SecurityTransparencyBehaviorFlags
+{
+ SecurityTransparencyBehaviorFlags_None = 0x0000,
+
+ // Custom attributes require transparency checks in order to be used by transparent code
+ SecurityTransparencyBehaviorFlags_AttributesRequireTransparencyCheck = 0x0001,
+
+ // Public critical members of an assembly can behave as if they were safe critical with a LinkDemand
+ // for FullTrust
+ SecurityTransparencyBehaviorFlags_CriticalMembersConvertToLinkDemand = 0x0002,
+
+ // Types and methods must obey the transparency inheritance rules
+ SecurityTransparencyBehaviorFlags_InheritanceRulesEnforced = 0x0004,
+
+ // Members contained within a scope that introduces members as critical may add their own treat as safe
+ SecurityTransparencyBehaviorFlags_IntroducedCriticalsMayAddTreatAsSafe = 0x0008,
+
+ // Opportunistically critical assemblies consist of entirely transparent types with entirely safe
+ // critical methods.
+ SecurityTransparencyBehaviorFlags_OpportunisticIsSafeCriticalMethods = 0x0010,
+
+ // Assemblies loaded in partial trust are implicitly all transparent
+ SecurityTransparencyBehaviorFlags_PartialTrustImpliesAllTransparent = 0x0020,
+
+ // All public critical types and methods get an implicit treat as safe marking
+ SecurityTransparencyBehaviorFlags_PublicImpliesTreatAsSafe = 0x0040,
+
+ // Security critical or safe critical at a larger than method scope applies only to methods introduced
+ // within that scope, rather than all methods contained in the scope
+ SecurityTransparencyBehaviorFlags_ScopeAppliesOnlyToIntroducedMethods = 0x0080,
+
+ // Security transparent code can call methods protected with a LinkDemand
+ SecurityTransparencyBehaviorFlags_TransparentCodeCanCallLinkDemand = 0x0100,
+
+ // Security transparent code can call native code via P/Invoke or COM Interop
+ SecurityTransaprencyBehaviorFlags_TransparentCodeCanCallUnmanagedCode = 0x0200,
+
+ // Security transparent code can skip verification with a runtime check
+ SecurityTransparencyBehaviorFlags_TransparentCodeCanSkipVerification = 0x0400,
+
+ // Unsigned assemblies implicitly are APTCA
+ SecurityTransparencyBehaviorFlags_UnsignedImpliesAPTCA = 0x0800,
+};
+
+inline SecurityTransparencyBehaviorFlags operator|(SecurityTransparencyBehaviorFlags lhs,
+ SecurityTransparencyBehaviorFlags rhs);
+
+inline SecurityTransparencyBehaviorFlags operator|=(SecurityTransparencyBehaviorFlags& lhs,
+ SecurityTransparencyBehaviorFlags rhs);
+
+inline SecurityTransparencyBehaviorFlags operator&(SecurityTransparencyBehaviorFlags lhs,
+ SecurityTransparencyBehaviorFlags rhs);
+
+inline SecurityTransparencyBehaviorFlags operator&=(SecurityTransparencyBehaviorFlags &lhs,
+ SecurityTransparencyBehaviorFlags rhs);
+
+// Base interface for transparency behavior implementations
+class ISecurityTransparencyImpl
+{
+public:
+ virtual ~ISecurityTransparencyImpl()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ // Get flags that indicate specific on/off behaviors of transparency
+ virtual SecurityTransparencyBehaviorFlags GetBehaviorFlags() const = 0;
+
+ // Map security attributes that a field contains to the set of behaviors it supports
+ virtual FieldSecurityDescriptorFlags MapFieldAttributes(TokenSecurityDescriptorFlags tokenFlags) const = 0;
+
+ // Map security attributes that a method contains to the set of behaviors it supports
+ virtual MethodSecurityDescriptorFlags MapMethodAttributes(TokenSecurityDescriptorFlags tokenFlags) const = 0;
+
+ // Map security attributes that a module contains to the set of behaviors it supports
+ virtual ModuleSecurityDescriptorFlags MapModuleAttributes(TokenSecurityDescriptorFlags tokenFlags) const = 0;
+
+ // Map security attributes that a type contains to the set of behaviors it supports
+ virtual TypeSecurityDescriptorFlags MapTypeAttributes(TokenSecurityDescriptorFlags tokenFlags) const = 0;
+};
+
+class SecurityTransparencyBehavior
+{
+public:
+ // Get a transparency behavior for a module with the given attributes applied to it
+ static
+ const SecurityTransparencyBehavior *GetTransparencyBehavior(SecurityRuleSet ruleSet);
+
+public:
+ // Are types and methods required to obey the transparency inheritance rules
+ inline bool AreInheritanceRulesEnforced() const;
+
+ // Can public critical members of an assembly behave as if they were safe critical with a LinkDemand
+ // for FullTrust
+ inline bool CanCriticalMembersBeConvertedToLinkDemand() const;
+
+ // Can members contained within a scope that introduces members as critical add their own TreatAsSafe
+ // attribute
+ inline bool CanIntroducedCriticalMembersAddTreatAsSafe() const;
+
+ // Can transparent methods call methods protected with a LinkDemand
+ inline bool CanTransparentCodeCallLinkDemandMethods() const;
+
+ // Can transparent methods call native code
+ inline bool CanTransparentCodeCallUnmanagedCode() const;
+
+ // Can transparent members skip verification if the callstack passes a runtime check
+ inline bool CanTransparentCodeSkipVerification() const;
+
+ // Custom attributes require transparency checks in order to be used by transparent code
+ inline bool DoAttributesRequireTransparencyChecks() const;
+
+ // Opportunistically critical assemblies consist of entirely transparent types with entirely safe
+ // critical methods.
+ inline bool DoesOpportunisticRequireOnlySafeCriticalMethods() const;
+
+ // Does being loaded in partial trust imply that the assembly is implicitly all transparent
+ inline bool DoesPartialTrustImplyAllTransparent() const;
+
+ // Do all public members of the assembly get an implicit treat as safe marking
+ inline bool DoesPublicImplyTreatAsSafe() const;
+
+ // Do security critical or safe critical at a larger than method scope apply only to methods introduced
+ // within that scope, or to all methods conateind within the scope.
+ inline bool DoesScopeApplyOnlyToIntroducedMethods() const;
+
+ // Do unsigned assemblies implicitly become APTCA
+ inline bool DoesUnsignedImplyAPTCA() const;
+
+ // Get flags that indicate specific on/off behaviors of transparency
+ inline FieldSecurityDescriptorFlags MapFieldAttributes(TokenSecurityDescriptorFlags tokenFlags) const;
+
+ // Map security attributes that a method contains to the set of behaviors it supports
+ inline MethodSecurityDescriptorFlags MapMethodAttributes(TokenSecurityDescriptorFlags tokenFlags) const;
+
+ // Map security attributes that a module contains to the set of behaviors it supports
+ inline ModuleSecurityDescriptorFlags MapModuleAttributes(TokenSecurityDescriptorFlags tokenFlags) const;
+
+ // Map security attributes that a type contains to the set of behaviors it supports
+ inline TypeSecurityDescriptorFlags MapTypeAttributes(TokenSecurityDescriptorFlags tokenFlags) const;
+
+private:
+ explicit inline SecurityTransparencyBehavior(ISecurityTransparencyImpl *pTransparencyImpl);
+ SecurityTransparencyBehavior(const SecurityTransparencyBehavior &); // not implemented
+ SecurityTransparencyBehavior &operator=(const SecurityTransparencyBehavior &); // not implemented
+
+private:
+ template <class T>
+ friend const SecurityTransparencyBehavior *GetOrCreateTransparencyBehavior(SecurityTransparencyBehavior **ppBehavior);
+
+private:
+ static SecurityTransparencyBehavior *s_pStandardTransparencyBehavior;
+ static SecurityTransparencyBehavior *s_pLegacyTransparencyBehavior;
+
+ ISecurityTransparencyImpl *m_pTransparencyImpl;
+ SecurityTransparencyBehaviorFlags m_flags;
+};
+
+#include "securitytransparentassembly.inl"
+
+#endif // __SECURITYTRANSPARENT_H__
diff --git a/src/vm/securitytransparentassembly.inl b/src/vm/securitytransparentassembly.inl
new file mode 100644
index 0000000000..c610db1794
--- /dev/null
+++ b/src/vm/securitytransparentassembly.inl
@@ -0,0 +1,260 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//--------------------------------------------------------------------------
+// securitytransparentassembly.inl
+//
+// Implementation for transparent code feature
+//
+
+
+//--------------------------------------------------------------------------
+
+
+#ifndef __SECURITYTRANSPARENT_INL__
+#define __SECURITYTRANSPARENT_INL__
+
+//---------------------------------------------------------------------------------------
+//
+// Create a transparency behavior object
+//
+// Arguments:
+// pTransparencyImpl - transparency implementation to base behavior decisions on
+//
+// Notes:
+// The tranparency implementation object must have a lifetime at least as long as the
+// created transparency behavior object.
+//
+
+inline SecurityTransparencyBehavior::SecurityTransparencyBehavior(ISecurityTransparencyImpl *pTransparencyImpl) :
+ m_pTransparencyImpl(pTransparencyImpl),
+ m_flags(pTransparencyImpl->GetBehaviorFlags())
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(pTransparencyImpl);
+}
+
+//
+// Typed logical operators for transparency flags
+//
+
+inline SecurityTransparencyBehaviorFlags operator|(SecurityTransparencyBehaviorFlags lhs,
+ SecurityTransparencyBehaviorFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ return static_cast<SecurityTransparencyBehaviorFlags>(static_cast<DWORD>(lhs) |
+ static_cast<DWORD>(rhs));
+}
+
+inline SecurityTransparencyBehaviorFlags operator|=(SecurityTransparencyBehaviorFlags& lhs,
+ SecurityTransparencyBehaviorFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ lhs = static_cast<SecurityTransparencyBehaviorFlags>(static_cast<DWORD>(lhs) |
+ static_cast<DWORD>(rhs));
+ return lhs;
+}
+
+inline SecurityTransparencyBehaviorFlags operator&(SecurityTransparencyBehaviorFlags lhs,
+ SecurityTransparencyBehaviorFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ return static_cast<SecurityTransparencyBehaviorFlags>(static_cast<DWORD>(lhs) &
+ static_cast<DWORD>(rhs));
+}
+
+inline SecurityTransparencyBehaviorFlags operator&=(SecurityTransparencyBehaviorFlags& lhs,
+ SecurityTransparencyBehaviorFlags rhs)
+{
+ LIMITED_METHOD_CONTRACT;
+ lhs = static_cast<SecurityTransparencyBehaviorFlags>(static_cast<DWORD>(lhs) &
+ static_cast<DWORD>(rhs));
+ return lhs;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Are types and methods required to obey the transparency inheritance rules
+//
+
+inline bool SecurityTransparencyBehavior::AreInheritanceRulesEnforced() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return !!(m_flags & SecurityTransparencyBehaviorFlags_InheritanceRulesEnforced);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Can public critical members of an assembly behave as if they were safe critical with a
+// LinkDemand for FullTrust
+//
+
+inline bool SecurityTransparencyBehavior::CanCriticalMembersBeConvertedToLinkDemand() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return !!(m_flags & SecurityTransparencyBehaviorFlags_CriticalMembersConvertToLinkDemand);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Can members contained within a scope that introduces members as critical add their own
+// TreatAsSafe attribute
+//
+
+inline bool SecurityTransparencyBehavior::CanIntroducedCriticalMembersAddTreatAsSafe() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return !!(m_flags & SecurityTransparencyBehaviorFlags_IntroducedCriticalsMayAddTreatAsSafe);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Can transparent methods call methods protected with a LinkDemand
+//
+
+inline bool SecurityTransparencyBehavior::CanTransparentCodeCallLinkDemandMethods() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return !!(m_flags & SecurityTransparencyBehaviorFlags_TransparentCodeCanCallLinkDemand);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Can transparent members call native code directly
+//
+
+inline bool SecurityTransparencyBehavior::CanTransparentCodeCallUnmanagedCode() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return !!(m_flags & SecurityTransaprencyBehaviorFlags_TransparentCodeCanCallUnmanagedCode);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Can transparent members skip verification if the callstack passes a runtime check
+//
+
+inline bool SecurityTransparencyBehavior::CanTransparentCodeSkipVerification() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return !!(m_flags & SecurityTransparencyBehaviorFlags_TransparentCodeCanSkipVerification);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Custom attributes require transparency checks in order to be used by critical code
+//
+
+inline bool SecurityTransparencyBehavior::DoAttributesRequireTransparencyChecks() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return !!(m_flags & SecurityTransparencyBehaviorFlags_AttributesRequireTransparencyCheck);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Opportunistically critical assemblies consist of entirely transparent types with entirely safe
+// critical methods.
+inline bool SecurityTransparencyBehavior::DoesOpportunisticRequireOnlySafeCriticalMethods() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return !!(m_flags & SecurityTransparencyBehaviorFlags_OpportunisticIsSafeCriticalMethods);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Does being loaded in partial trust imply that the assembly is implicitly all transparent
+//
+
+inline bool SecurityTransparencyBehavior::DoesPartialTrustImplyAllTransparent() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return !!(m_flags & SecurityTransparencyBehaviorFlags_PartialTrustImpliesAllTransparent);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Do all public types and methods automatically become treat as safe
+//
+
+inline bool SecurityTransparencyBehavior::DoesPublicImplyTreatAsSafe() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return !!(m_flags & SecurityTransparencyBehaviorFlags_PublicImpliesTreatAsSafe);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Do security critical or safe critical at a larger than method scope apply only to methods introduced
+// within that scope, or to all methods conateind within the scope.
+//
+// For instance, if this method returns true, a critical type does not make a method it overrides critical
+// because that method was introduced in a base type.
+//
+
+inline bool SecurityTransparencyBehavior::DoesScopeApplyOnlyToIntroducedMethods() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return !!(m_flags & SecurityTransparencyBehaviorFlags_ScopeAppliesOnlyToIntroducedMethods);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Do unsigned assemblies implicitly become APTCA
+//
+
+inline bool SecurityTransparencyBehavior::DoesUnsignedImplyAPTCA() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return !!(m_flags & SecurityTransparencyBehaviorFlags_UnsignedImpliesAPTCA);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Map the attributes found on a field into bits that represent what those attributes
+// mean to this field.
+//
+
+inline FieldSecurityDescriptorFlags SecurityTransparencyBehavior::MapFieldAttributes(TokenSecurityDescriptorFlags tokenFlags) const
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pTransparencyImpl->MapFieldAttributes(tokenFlags);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Map the attributes found on a method to the security transparency of that method
+//
+
+inline MethodSecurityDescriptorFlags SecurityTransparencyBehavior::MapMethodAttributes(TokenSecurityDescriptorFlags tokenFlags) const
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pTransparencyImpl->MapMethodAttributes(tokenFlags);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Map the attributes found on an assembly into bits that represent what those
+// attributes mean to this assembly.
+//
+
+inline ModuleSecurityDescriptorFlags SecurityTransparencyBehavior::MapModuleAttributes(TokenSecurityDescriptorFlags tokenFlags) const
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pTransparencyImpl->MapModuleAttributes(tokenFlags);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Map the attributes found on a type into bits that represent what those
+// attributes mean to this type.
+//
+
+inline TypeSecurityDescriptorFlags SecurityTransparencyBehavior::MapTypeAttributes(TokenSecurityDescriptorFlags tokenFlags) const
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pTransparencyImpl->MapTypeAttributes(tokenFlags);
+}
+
+#endif // __SECURTYTRANSPARENT_INL__
diff --git a/src/vm/sha1.cpp b/src/vm/sha1.cpp
new file mode 100644
index 0000000000..750b5b9363
--- /dev/null
+++ b/src/vm/sha1.cpp
@@ -0,0 +1,409 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+//
+//
+// ===========================================================================
+// File: sha1.cpp
+//
+// ===========================================================================
+/*++
+
+Abstract:
+
+ SHA-1 implementation
+
+Revision History:
+
+--*/
+
+/*
+ File sha1.cpp <STRIP>Version 03 August 2000.</STRIP>
+
+
+ This implements the SHA-1 hash function.
+ For algorithmic background see (for example)
+
+
+ Alfred J. Menezes et al
+ Handbook of Applied Cryptography
+ The CRC Press Series on Discrete Mathematics
+ and its Applications
+ CRC Press LLC, 1997
+ ISBN 0-8495-8523-7
+ QA76.9A25M643
+
+ Also see FIPS 180-1 - Secure Hash Standard,
+ 1993 May 11 and 1995 April 17, by the U.S.
+ National Institute of Standards and Technology (NIST).
+
+*/
+
+#include "common.h"
+#include "sha1.h"
+#include "contract.h"
+
+typedef const DWORD DWORDC;
+#define ROTATE32L(x,n) _rotl(x,n)
+#define SHAVE32(x) (DWORD)(x)
+
+static void SHA1_block(SHA1_CTX *ctx)
+/*
+ Update the SHA-1 hash from a fresh 64 bytes of data.
+*/
+{
+ static DWORDC sha1_round1 = 0x5A827999u;
+ static DWORDC sha1_round2 = 0x6ED9EBA1u;
+ static DWORDC sha1_round3 = 0x8F1BBCDCu;
+ static DWORDC sha1_round4 = 0xCA62C1D6u;
+
+ DWORD a = ctx->partial_hash[0], b = ctx->partial_hash[1];
+ DWORD c = ctx->partial_hash[2], d = ctx->partial_hash[3];
+ DWORD e = ctx->partial_hash[4];
+ DWORD msg80[80];
+ int i;
+ BOOL OK = TRUE;
+
+ for (i = 0; i != 16; i++) { // Copy to local array, zero original
+ // Extend length to 80
+ DWORDC datval = ctx->awaiting_data[i];
+ ctx->awaiting_data[i] = 0;
+ msg80[i] = datval;
+ }
+
+ for (i = 16; i != 80; i += 2) {
+ DWORDC temp1 = msg80[i-3] ^ msg80[i-8]
+ ^ msg80[i-14] ^ msg80[i-16];
+ DWORDC temp2 = msg80[i-2] ^ msg80[i-7]
+ ^ msg80[i-13] ^ msg80[i-15];
+ msg80[i ] = ROTATE32L(temp1, 1);
+ msg80[i+1] = ROTATE32L(temp2, 1);
+ }
+
+#define ROUND1(B, C, D) ((D ^ (B & (C ^ D))) + sha1_round1)
+ // Equivalent to (B & C) | (~B & D).
+ // (check cases B = 0 and B = 1)
+#define ROUND2(B, C, D) ((B ^ C ^ D) + sha1_round2)
+
+#define ROUND3(B, C, D) ((C & (B | D) | (B & D)) + sha1_round3)
+
+#define ROUND4(B, C, D) ((B ^ C ^ D) + sha1_round4)
+
+// Round 1
+ for (i = 0; i != 20; i += 5) {
+ e += ROTATE32L(a, 5) + ROUND1(b, c, d) + msg80[i];
+ b = ROTATE32L(b, 30);
+
+ d += ROTATE32L(e, 5) + ROUND1(a, b, c) + msg80[i+1];
+ a = ROTATE32L(a, 30);
+
+ c += ROTATE32L(d, 5) + ROUND1(e, a, b) + msg80[i+2];
+ e = ROTATE32L(e, 30);
+
+ b += ROTATE32L(c, 5) + ROUND1(d, e, a) + msg80[i+3];
+ d = ROTATE32L(d, 30);
+
+ a += ROTATE32L(b, 5) + ROUND1(c, d, e) + msg80[i+4];
+ c = ROTATE32L(c, 30);
+#if 0
+ printf("i = %ld %08lx %08lx %08lx %08lx %08lx\n",
+ i, a, b, c, d, e);
+#endif
+ } // for i
+
+// Round 2
+ for (i = 20; i != 40; i += 5) {
+ e += ROTATE32L(a, 5) + ROUND2(b, c, d) + msg80[i];
+ b = ROTATE32L(b, 30);
+
+ d += ROTATE32L(e, 5) + ROUND2(a, b, c) + msg80[i+1];
+ a = ROTATE32L(a, 30);
+
+ c += ROTATE32L(d, 5) + ROUND2(e, a, b) + msg80[i+2];
+ e = ROTATE32L(e, 30);
+
+ b += ROTATE32L(c, 5) + ROUND2(d, e, a) + msg80[i+3];
+ d = ROTATE32L(d, 30);
+
+ a += ROTATE32L(b, 5) + ROUND2(c, d, e) + msg80[i+4];
+ c = ROTATE32L(c, 30);
+ } // for i
+
+// Round 3
+ for (i = 40; i != 60; i += 5) {
+ e += ROTATE32L(a, 5) + ROUND3(b, c, d) + msg80[i];
+ b = ROTATE32L(b, 30);
+
+ d += ROTATE32L(e, 5) + ROUND3(a, b, c) + msg80[i+1];
+ a = ROTATE32L(a, 30);
+
+ c += ROTATE32L(d, 5) + ROUND3(e, a, b) + msg80[i+2];
+ e = ROTATE32L(e, 30);
+
+ b += ROTATE32L(c, 5) + ROUND3(d, e, a) + msg80[i+3];
+ d = ROTATE32L(d, 30);
+
+ a += ROTATE32L(b, 5) + ROUND3(c, d, e) + msg80[i+4];
+ c = ROTATE32L(c, 30);
+ } // for i
+
+// Round 4
+ for (i = 60; i != 80; i += 5) {
+ e += ROTATE32L(a, 5) + ROUND4(b, c, d) + msg80[i];
+ b = ROTATE32L(b, 30);
+
+ d += ROTATE32L(e, 5) + ROUND4(a, b, c) + msg80[i+1];
+ a = ROTATE32L(a, 30);
+
+ c += ROTATE32L(d, 5) + ROUND4(e, a, b) + msg80[i+2];
+ e = ROTATE32L(e, 30);
+
+ b += ROTATE32L(c, 5) + ROUND4(d, e, a) + msg80[i+3];
+ d = ROTATE32L(d, 30);
+
+ a += ROTATE32L(b, 5) + ROUND4(c, d, e) + msg80[i+4];
+ c = ROTATE32L(c, 30);
+ } // for i
+
+#undef ROUND1
+#undef ROUND2
+#undef ROUND3
+#undef ROUND4
+
+ ctx->partial_hash[0] += a;
+ ctx->partial_hash[1] += b;
+ ctx->partial_hash[2] += c;
+ ctx->partial_hash[3] += d;
+ ctx->partial_hash[4] += e;
+#if 0
+ for (i = 0; i != 16; i++) {
+ printf("%8lx ", msg16[i]);
+ if ((i & 7) == 7) printf("\n");
+ }
+ printf("a, b, c, d, e = %08lx %08lx %08lx %08lx %08lx\n",
+ a, b, c, d, e);
+ printf("Partial hash = %08lx %08lx %08lx %08lx %08lx\n",
+ (long)ctx->partial_hash[0], (long)ctx->partial_hash[1],
+ (long)ctx->partial_hash[2], (long)ctx->partial_hash[3],
+ (long)ctx->partial_hash[4]);
+#endif
+} // end SHA1_block
+
+
+void SHA1Hash::SHA1Init(SHA1_CTX *ctx)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ ctx->nbit_total[0] = ctx->nbit_total[1] = 0;
+
+ for (DWORD i = 0; i != 16; i++) {
+ ctx->awaiting_data[i] = 0;
+ }
+
+ /*
+ Initialize hash variables.
+
+ */
+
+ ctx->partial_hash[0] = 0x67452301u;
+ ctx->partial_hash[1] = 0xefcdab89u;
+ ctx->partial_hash[2] = ~ctx->partial_hash[0];
+ ctx->partial_hash[3] = ~ctx->partial_hash[1];
+ ctx->partial_hash[4] = 0xc3d2e1f0u;
+
+}
+
+void SHA1Hash::SHA1Update(
+ SHA1_CTX * ctx, // IN/OUT
+ const BYTE * msg, // IN
+ DWORD nbyte) // IN
+/*
+ Append data to a partially hashed SHA-1 message.
+*/
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ const BYTE *fresh_data = msg;
+ DWORD nbyte_left = nbyte;
+ DWORD nbit_occupied = ctx->nbit_total[0] & 511;
+ DWORD *awaiting_data;
+ DWORDC nbitnew_low = SHAVE32(8*nbyte);
+
+
+ _ASSERTE((nbit_occupied & 7) == 0); // Partial bytes not implemented
+
+ ctx->nbit_total[0] += nbitnew_low;
+ ctx->nbit_total[1] += (nbyte >> 29)
+ + (SHAVE32(ctx->nbit_total[0]) < nbitnew_low);
+
+ /* Advance to word boundary in waiting_data */
+
+ if ((nbit_occupied & 31) != 0) {
+ awaiting_data = ctx->awaiting_data + nbit_occupied/32;
+
+ while ((nbit_occupied & 31) != 0 && nbyte_left != 0) {
+ nbit_occupied += 8;
+ *awaiting_data |= (DWORD)*fresh_data++
+ << ((-(int)nbit_occupied) & 31);
+ nbyte_left--; // Start at most significant byte
+ }
+ } // if nbit_occupied
+
+ /* Transfer 4 bytes at a time */
+
+ do {
+ DWORDC nword_occupied = nbit_occupied/32;
+ DWORD nwcopy = min(nbyte_left/4, 16 - nword_occupied);
+ _ASSERTE (nbit_occupied <= 512);
+ _ASSERTE ((nbit_occupied & 31) == 0 || nbyte_left == 0);
+ awaiting_data = ctx->awaiting_data + nword_occupied;
+ nbyte_left -= 4*nwcopy;
+ nbit_occupied += 32*nwcopy;
+
+ while (nwcopy != 0) {
+ DWORDC byte0 = (DWORD)fresh_data[0];
+ DWORDC byte1 = (DWORD)fresh_data[1];
+ DWORDC byte2 = (DWORD)fresh_data[2];
+ DWORDC byte3 = (DWORD)fresh_data[3];
+ *awaiting_data++ = byte3 | (byte2 << 8)
+ | (byte1 << 16) | (byte0 << 24);
+ /* Big endian */
+ fresh_data += 4;
+ nwcopy--;
+ }
+
+ if (nbit_occupied == 512) {
+ SHA1_block(ctx);
+ nbit_occupied = 0;
+ awaiting_data -= 16;
+ _ASSERTE(awaiting_data == ctx->awaiting_data);
+ }
+ } while (nbyte_left >= 4);
+
+ _ASSERTE (ctx->awaiting_data + nbit_occupied/32
+ == awaiting_data);
+
+ while (nbyte_left != 0) {
+ DWORDC new_byte = (DWORD)*fresh_data++;
+
+ _ASSERTE((nbit_occupied & 31) <= 16);
+ nbit_occupied += 8;
+ *awaiting_data |= new_byte << ((-(int)nbit_occupied) & 31);
+ nbyte_left--;
+ }
+
+ _ASSERTE (nbit_occupied == (ctx->nbit_total[0] & 511));
+} // end SHA1Update
+
+
+
+void SHA1Hash::SHA1Final(
+ SHA1_CTX * ctx, // IN/OUT
+ BYTE * digest) // OUT
+/*
+ Finish a SHA-1 hash.
+*/
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ DWORDC nbit0 = ctx->nbit_total[0];
+ DWORDC nbit1 = ctx->nbit_total[1];
+ DWORD nbit_occupied = nbit0 & 511;
+ DWORD i;
+
+ _ASSERTE((nbit_occupied & 7) == 0);
+
+ ctx->awaiting_data[nbit_occupied/32]
+ |= (DWORD)0x80 << ((-8-nbit_occupied) & 31);
+ // Append a 1 bit
+ nbit_occupied += 8;
+
+
+ // Append zero bits until length (in bits) is 448 mod 512.
+ // Then append the length, in bits.
+ // Here we assume the buffer was zeroed earlier.
+
+ if (nbit_occupied > 448) { // If fewer than 64 bits left
+ SHA1_block(ctx);
+ nbit_occupied = 0;
+ }
+ ctx->awaiting_data[14] = nbit1;
+ ctx->awaiting_data[15] = nbit0;
+ SHA1_block(ctx);
+
+ /* Copy final digest to user-supplied byte array */
+
+ for (i = 0; i != 5; i++) {
+ DWORDC dwi = ctx->partial_hash[i];
+ digest[4*i + 0] = (BYTE)((dwi >> 24) & 255);
+ digest[4*i + 1] = (BYTE)((dwi >> 16) & 255);
+ digest[4*i + 2] = (BYTE)((dwi >> 8) & 255);
+ digest[4*i + 3] = (BYTE)(dwi & 255); // Big-endian
+ }
+} // end SHA1Final
+
+SHA1Hash::SHA1Hash()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ m_fFinalized = FALSE;
+ SHA1Init(&m_Context);
+}
+
+void SHA1Hash::AddData(BYTE *pbData, DWORD cbData)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ if (m_fFinalized)
+ return;
+
+ SHA1Update(&m_Context, pbData, cbData);
+}
+
+// Retrieve a pointer to the final hash.
+BYTE *SHA1Hash::GetHash()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ if (m_fFinalized)
+ return m_Value;
+
+ SHA1Final(&m_Context, m_Value);
+
+ m_fFinalized = TRUE;
+
+ return m_Value;
+}
+
diff --git a/src/vm/sha1.h b/src/vm/sha1.h
new file mode 100644
index 0000000000..ed814c0fa9
--- /dev/null
+++ b/src/vm/sha1.h
@@ -0,0 +1,55 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+#if defined(FEATURE_CORECLR)
+#ifndef SHA1_H_
+#define SHA1_H_
+
+// Hasher class, performs no allocation and therefore does not throw or return
+// errors. Usage is as follows:
+// Create an instance (this initializes the hash).
+// Add one or more blocks of input data using AddData().
+// Retrieve the hash using GetHash(). This can be done as many times as desired
+// until the object is destructed. Once a hash is asked for, further AddData
+// calls will be ignored. There is no way to reset object state (simply
+// destroy the object and create another instead).
+
+#define SHA1_HASH_SIZE 20 // Number of bytes output by SHA-1
+
+typedef struct {
+ DWORD magic_sha1; // Magic value for A_SHA_CTX
+ DWORD awaiting_data[16];
+ // Data awaiting full 512-bit block.
+ // Length (nbit_total[0] % 512) bits.
+ // Unused part of buffer (at end) is zero
+ DWORD partial_hash[5];
+ // Hash through last full block
+ DWORD nbit_total[2];
+ // Total length of message so far
+ // (bits, mod 2^64)
+} SHA1_CTX;
+
+class SHA1Hash
+{
+private:
+ SHA1_CTX m_Context;
+ BYTE m_Value[SHA1_HASH_SIZE];
+ BOOL m_fFinalized;
+
+ void SHA1Init(SHA1_CTX*);
+ void SHA1Update(SHA1_CTX*, const BYTE*, const DWORD);
+ void SHA1Final(SHA1_CTX*, BYTE* digest);
+
+public:
+ SHA1Hash();
+ void AddData(BYTE *pbData, DWORD cbData);
+ BYTE *GetHash();
+};
+
+#endif // SHA1_H_
+#else // !defined(FEATURE_CORECLR)
+#include "crypto/sha1.h"
+#endif // defined(FEATURE_CORECLR) \ No newline at end of file
diff --git a/src/vm/sigformat.cpp b/src/vm/sigformat.cpp
new file mode 100644
index 0000000000..0f569973cc
--- /dev/null
+++ b/src/vm/sigformat.cpp
@@ -0,0 +1,648 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#include "common.h"
+#include "sigformat.h"
+#include "typedesc.h"
+
+SigFormat::SigFormat()
+{
+ WRAPPER_NO_CONTRACT; // THROWS;GC_TRIGGERS;INJECT_FAULT(ThrowOM)
+ _size = SIG_INC;
+ _pos = 0;
+ _fmtSig = new char[_size];
+}
+
+SigFormat::SigFormat(MetaSig &metaSig, LPCUTF8 szMemberName, LPCUTF8 szClassName, LPCUTF8 szNameSpace)
+{
+ WRAPPER_NO_CONTRACT;
+ FormatSig(metaSig, szMemberName, szClassName, szNameSpace);
+}
+
+
+// SigFormat::SigFormat()
+// This constructor will create the string representation of a
+// method.
+SigFormat::SigFormat(MethodDesc* pMeth, TypeHandle owner, BOOL fIgnoreMethodName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ // Explicitly use MethodDesc::LoadMethodInstantiation so that we can succesfully format
+ // non-typical generic method definitions.
+ MetaSig sig(pMeth, pMeth->GetExactClassInstantiation(owner), pMeth->LoadMethodInstantiation());
+
+ if (fIgnoreMethodName)
+ {
+ FormatSig(sig, NULL);
+ }
+ else
+ {
+ FormatSig(sig, pMeth->GetName());
+ }
+}
+
+
+SigFormat::~SigFormat()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (_fmtSig)
+ delete [] _fmtSig;
+}
+
+const char * SigFormat::GetCString()
+{
+ LIMITED_METHOD_CONTRACT;
+ return _fmtSig;
+}
+
+const char * SigFormat::GetCStringParmsOnly()
+{
+ LIMITED_METHOD_CONTRACT;
+ // _fmtSig looks like: "void Put (byte[], int, int)".
+ // Skip to the '('.
+ int skip;
+ for(skip=0; _fmtSig[skip]!='('; skip++)
+ ;
+ return _fmtSig + skip;
+}
+
+void SigFormat::AddString(LPCUTF8 s)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ size_t len = strlen(s);
+ // Allocate on overflow
+ size_t requiredSize = _pos + len + 1;
+
+ if (requiredSize <= _pos) { // check for integer overflow in previous calc
+ #ifndef DACCESS_COMPILE
+ COMPlusThrowOM();
+ #else
+ DacError(E_OUTOFMEMORY);
+ #endif
+ }
+
+ if (requiredSize > _size) {
+ size_t newSize = (_size+SIG_INC > requiredSize) ? _size+SIG_INC : requiredSize+SIG_INC;
+ char* temp = new char[newSize];
+ memcpy(temp,_fmtSig,_size);
+ delete [] _fmtSig;
+ _fmtSig = temp;
+ _size=newSize;
+ }
+ strcpy_s(&_fmtSig[_pos],_size - (&_fmtSig[_pos] - _fmtSig), s);
+ _pos += len;
+}
+
+
+//------------------------------------------------------------------------
+// Replacement for SigFormat::AddType that avoids class loading
+// and copes with formal type parameters
+//------------------------------------------------------------------------
+void SigFormat::AddTypeString(Module* pModule, SigPointer sig, const SigTypeContext *pTypeContext)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ LPCUTF8 szcName;
+ LPCUTF8 szcNameSpace;
+ /*
+ ULONG cArgs;
+ VOID *pEnum;
+ ULONG i;
+ */
+
+ CorElementType type;
+ IfFailThrow(sig.GetElemType(&type));
+
+ // Format the output
+ switch (type)
+ {
+// @Todo: Should these be ilasm-style types?
+ case ELEMENT_TYPE_VOID: AddString("Void"); break;
+ case ELEMENT_TYPE_BOOLEAN: AddString("Boolean"); break;
+ case ELEMENT_TYPE_I1: AddString("SByte"); break;
+ case ELEMENT_TYPE_U1: AddString("Byte"); break;
+ case ELEMENT_TYPE_I2: AddString("Int16"); break;
+ case ELEMENT_TYPE_U2: AddString("UInt16"); break;
+ case ELEMENT_TYPE_CHAR: AddString("Char"); break;
+ case ELEMENT_TYPE_I: AddString("IntPtr"); break;
+ case ELEMENT_TYPE_U: AddString("UIntPtr"); break;
+ case ELEMENT_TYPE_I4: AddString("Int32"); break;
+ case ELEMENT_TYPE_U4: AddString("UInt32"); break;
+ case ELEMENT_TYPE_I8: AddString("Int64"); break;
+ case ELEMENT_TYPE_U8: AddString("UInt64"); break;
+ case ELEMENT_TYPE_R4: AddString("Single"); break;
+ case ELEMENT_TYPE_R8: AddString("Double"); break;
+ case ELEMENT_TYPE_OBJECT: AddString(g_ObjectClassName); break;
+ case ELEMENT_TYPE_STRING: AddString(g_StringClassName); break;
+
+ // For Value Classes we fall through unless the pVMC is an Array Class,
+ // If its an array class we need to get the name of the underlying type from
+ // it.
+ case ELEMENT_TYPE_VALUETYPE:
+ case ELEMENT_TYPE_CLASS:
+ {
+ IMDInternalImport *pInternalImport = pModule->GetMDImport();
+ mdToken token;
+ IfFailThrow(sig.GetToken(&token));
+
+ if (TypeFromToken(token) == mdtTypeDef)
+ {
+ IfFailThrow(pInternalImport->GetNameOfTypeDef(token, &szcName, &szcNameSpace));
+ }
+ else if (TypeFromToken(token) == mdtTypeRef)
+ {
+ IfFailThrow(pInternalImport->GetNameOfTypeRef(token, &szcNameSpace, &szcName));
+ }
+ else
+ break;
+
+ if (*szcNameSpace)
+ {
+ AddString(szcNameSpace);
+ AddString(".");
+ }
+ AddString(szcName);
+ break;
+ }
+ case ELEMENT_TYPE_INTERNAL:
+ {
+ TypeHandle hType;
+
+ // this check is not functional in DAC and provides no security against a malicious dump
+ // the DAC is prepared to receive an invalid type handle
+#ifndef DACCESS_COMPILE
+ if (pModule->IsSigInIL(sig.GetPtr()))
+ THROW_BAD_FORMAT(BFA_BAD_COMPLUS_SIG, pModule);
+#endif
+
+ CorSigUncompressPointer(sig.GetPtr(), (void**)&hType);
+ _ASSERTE(!hType.IsNull());
+ MethodTable *pMT = hType.GetMethodTable();
+ _ASSERTE(pMT);
+ mdToken token = pMT->GetCl();
+ IfFailThrow(pMT->GetMDImport()->GetNameOfTypeDef(token, &szcName, &szcNameSpace));
+ if (*szcNameSpace)
+ {
+ AddString(szcNameSpace);
+ AddString(".");
+ }
+ AddString(szcName);
+ break;
+ }
+ case ELEMENT_TYPE_TYPEDBYREF:
+ {
+ AddString("TypedReference");
+ break;
+ }
+
+ case ELEMENT_TYPE_BYREF:
+ {
+ AddTypeString(pModule, sig, pTypeContext);
+ AddString(" ByRef");
+ }
+ break;
+
+ case ELEMENT_TYPE_MVAR :
+ {
+ DWORD ix;
+ IfFailThrow(sig.GetData(&ix));
+ if (pTypeContext && !pTypeContext->m_methodInst.IsEmpty() && ix >= 0 && ix < pTypeContext->m_methodInst.GetNumArgs())
+ {
+ AddType(pTypeContext->m_methodInst[ix]);
+ }
+ else
+ {
+ char smallbuf[20];
+ sprintf_s(smallbuf, COUNTOF(smallbuf), "!!%d", ix);
+ AddString(smallbuf);
+ }
+ }
+ break;
+
+ case ELEMENT_TYPE_VAR :
+ {
+ DWORD ix;
+ IfFailThrow(sig.GetData(&ix));
+
+ if (pTypeContext && !pTypeContext->m_classInst.IsEmpty() && ix >= 0 && ix < pTypeContext->m_classInst.GetNumArgs())
+ {
+ AddType(pTypeContext->m_classInst[ix]);
+ }
+ else
+ {
+ char smallbuf[20];
+ sprintf_s(smallbuf, COUNTOF(smallbuf), "!%d", ix);
+ AddString(smallbuf);
+ }
+ }
+ break;
+
+ case ELEMENT_TYPE_GENERICINST :
+ {
+ AddTypeString(pModule, sig, pTypeContext);
+
+ IfFailThrow(sig.SkipExactlyOne());
+ DWORD n;
+ IfFailThrow(sig.GetData(&n));
+
+ AddString("<");
+ for (DWORD i = 0; i < n; i++)
+ {
+ if (i > 0)
+ AddString(",");
+ AddTypeString(pModule,sig, pTypeContext);
+ IfFailThrow(sig.SkipExactlyOne());
+ }
+ AddString(">");
+
+ break;
+ }
+
+ case ELEMENT_TYPE_SZARRAY: // Single Dim, Zero
+ case ELEMENT_TYPE_ARRAY: // General Array
+ {
+ AddTypeString(pModule, sig, pTypeContext);
+ IfFailThrow(sig.SkipExactlyOne());
+ if (type == ELEMENT_TYPE_ARRAY)
+ {
+ AddString("[");
+ ULONG len;
+ IfFailThrow(sig.GetData(&len));
+
+ for (ULONG i=1;i<len;i++)
+ AddString(",");
+
+ AddString("]");
+ }
+ else
+ {
+ AddString("[]");
+ }
+ }
+ break;
+
+ case ELEMENT_TYPE_PTR:
+ {
+ // This will pop up on methods that take a pointer to a block of unmanaged memory.
+ AddTypeString(pModule, sig, pTypeContext);
+ AddString("*");
+ break;
+ }
+
+ case ELEMENT_TYPE_FNPTR:
+ {
+ DWORD callConv;
+ IfFailThrow(sig.GetData(&callConv));
+
+ ULONG cArgs;
+ IfFailThrow(sig.GetData(&cArgs));
+
+ AddTypeString(pModule, sig, pTypeContext);
+ IfFailThrow(sig.SkipExactlyOne());
+ AddString(" (");
+ ULONG i;
+ for (i = 0; i < cArgs; i++) {
+ AddTypeString(pModule, sig, pTypeContext);
+ IfFailThrow(sig.SkipExactlyOne());
+ if (i != (cArgs - 1))
+ AddString(", ");
+ }
+ if ((callConv & IMAGE_CEE_CS_CALLCONV_MASK) == IMAGE_CEE_CS_CALLCONV_VARARG)
+ {
+ if (cArgs)
+ AddString(", ");
+ AddString("...");
+ }
+ AddString(")");
+ break;
+ }
+
+ default:
+ AddString("**UNKNOWN TYPE**");
+
+ }
+}
+
+void SigFormat::FormatSig(MetaSig &sig, LPCUTF8 szMemberName, LPCUTF8 szClassName, LPCUTF8 szNameSpace)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ UINT cArgs;
+
+ _size = SIG_INC;
+ _pos = 0;
+ _fmtSig = new char[_size];
+
+ AddTypeString(sig.GetModule(), sig.GetReturnProps(), sig.GetSigTypeContext());
+
+ AddString(" ");
+ if (szNameSpace != NULL)
+ {
+ AddString(szNameSpace);
+ AddString(".");
+ }
+ if (szClassName != NULL)
+ {
+ AddString(szClassName);
+ AddString(".");
+ }
+ if (szMemberName != NULL)
+ {
+ AddString(szMemberName);
+ }
+
+ cArgs = sig.NumFixedArgs();
+ sig.Reset();
+
+ AddString("(");
+
+ // Loop through all of the args
+ for (UINT i=0;i<cArgs;i++) {
+ sig.NextArg();
+ AddTypeString(sig.GetModule(), sig.GetArgProps(), sig.GetSigTypeContext());
+ if (i != cArgs-1)
+ AddString(", ");
+ }
+
+ // Display vararg signature at end
+ if (sig.IsVarArg())
+ {
+ if (cArgs)
+ AddString(", ");
+ AddString("...");
+ }
+
+ AddString(")");
+}
+
+void SigFormat::AddType(TypeHandle th)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ LPCUTF8 szcName;
+ LPCUTF8 szcNameSpace;
+ ULONG cArgs;
+ ULONG i;
+
+ if (th.IsNull())
+ {
+ AddString("**UNKNOWN TYPE**");
+ return;
+ }
+
+ CorElementType type = th.GetSignatureCorElementType();
+
+ // Format the output
+ switch (type)
+ {
+// <TODO>@Todo: Should these be ilasm-style types?</TODO>
+ case ELEMENT_TYPE_VOID: AddString("Void"); break;
+ case ELEMENT_TYPE_BOOLEAN: AddString("Boolean"); break;
+ case ELEMENT_TYPE_I1: AddString("SByte"); break;
+ case ELEMENT_TYPE_U1: AddString("Byte"); break;
+ case ELEMENT_TYPE_I2: AddString("Int16"); break;
+ case ELEMENT_TYPE_U2: AddString("UInt16"); break;
+ case ELEMENT_TYPE_CHAR: AddString("Char"); break;
+ case ELEMENT_TYPE_I: AddString("IntPtr"); break;
+ case ELEMENT_TYPE_U: AddString("UIntPtr"); break;
+ case ELEMENT_TYPE_I4: AddString("Int32"); break;
+ case ELEMENT_TYPE_U4: AddString("UInt32"); break;
+ case ELEMENT_TYPE_I8: AddString("Int64"); break;
+ case ELEMENT_TYPE_U8: AddString("UInt64"); break;
+ case ELEMENT_TYPE_R4: AddString("Single"); break;
+ case ELEMENT_TYPE_R8: AddString("Double"); break;
+ case ELEMENT_TYPE_OBJECT: AddString(g_ObjectClassName); break;
+ case ELEMENT_TYPE_STRING: AddString(g_StringClassName); break;
+
+ // For Value Classes we fall through unless the pVMC is an Array Class,
+ // If its an array class we need to get the name of the underlying type from
+ // it.
+ case ELEMENT_TYPE_VALUETYPE:
+ case ELEMENT_TYPE_CLASS:
+ {
+ if (FAILED(th.GetMethodTable()->GetMDImport()->GetNameOfTypeDef(th.GetCl(), &szcName, &szcNameSpace)))
+ {
+ szcName = szcNameSpace = "Invalid TypeDef record";
+ }
+
+ if (*szcNameSpace != 0)
+ {
+ AddString(szcNameSpace);
+ AddString(".");
+ }
+ AddString(szcName);
+
+ if (th.HasInstantiation())
+ {
+ Instantiation inst = th.GetInstantiation();
+
+ if(!inst.IsEmpty())
+ {
+ AddString("<");
+ for (DWORD j = 0; j < th.GetNumGenericArgs(); j++)
+ {
+ if (j > 0)
+ AddString(",");
+
+ AddType(inst[j]);
+ }
+ AddString(">");
+ }
+ }
+
+ break;
+ }
+ case ELEMENT_TYPE_TYPEDBYREF:
+ {
+ AddString("TypedReference");
+ break;
+ }
+
+ case ELEMENT_TYPE_BYREF:
+ {
+ TypeHandle h = th.AsTypeDesc()->GetTypeParam();
+ AddType(h);
+ AddString(" ByRef");
+ }
+ break;
+
+ case ELEMENT_TYPE_SZARRAY: // Single Dim, Zero
+ case ELEMENT_TYPE_ARRAY: // General Array
+ {
+ ArrayTypeDesc* aTD = th.AsArray();
+ AddType(aTD->GetArrayElementTypeHandle());
+
+ if (type == ELEMENT_TYPE_ARRAY)
+ {
+ AddString("[");
+ int len = aTD->GetRank();
+
+ for (int j=0;j<len-1;j++)
+
+ AddString(",");
+ AddString("]");
+ }
+ else
+ {
+ AddString("[]");
+ }
+ }
+ break;
+
+ case ELEMENT_TYPE_PTR:
+ {
+ // This will pop up on methods that take a pointer to a block of unmanaged memory.
+ TypeHandle h = th.AsTypeDesc()->GetTypeParam();
+ AddType(h);
+ AddString("*");
+ break;
+ }
+ case ELEMENT_TYPE_FNPTR:
+ {
+ FnPtrTypeDesc* pTD = th.AsFnPtrType();
+
+ TypeHandle *pRetAndArgTypes = pTD->GetRetAndArgTypes();
+ AddType(pRetAndArgTypes[0]);
+ AddString(" (");
+
+ cArgs = pTD->GetNumArgs();
+
+ for (i = 0; i < cArgs; i++)
+ {
+ AddType(pRetAndArgTypes[i+1]);
+
+ if (i != (cArgs - 1))
+ AddString(", ");
+ }
+ if ((pTD->GetCallConv() & IMAGE_CEE_CS_CALLCONV_MASK) == IMAGE_CEE_CS_CALLCONV_VARARG)
+ {
+ AddString(", ");
+
+ AddString("...");
+ }
+ AddString(")");
+
+ break;
+ }
+
+ case ELEMENT_TYPE_VAR:
+ case ELEMENT_TYPE_MVAR:
+ {
+ StackScratchBuffer scratch;
+ StackSString name;
+ th.GetName(name);
+
+ AddString(name.GetANSI(scratch));
+
+ break;
+ }
+
+ default:
+ AddString("**UNKNOWN TYPE**");
+ }
+}
+
+//
+// Legacy debug-only string formatting pretty printer
+//
+
+#ifdef _DEBUG
+#ifndef DACCESS_COMPILE
+
+#include <formattype.h>
+
+/*******************************************************************/
+const char* FormatSigHelper(MethodDesc* pMD, CQuickBytes *out, LoaderHeap *pHeap, AllocMemTracker *pamTracker)
+{
+ PCCOR_SIGNATURE pSig;
+ ULONG cSig;
+ const char *ret = NULL;
+
+ pMD->GetSig(&pSig, &cSig);
+
+ if (pSig == NULL)
+ {
+ return "<null>";
+ }
+
+ EX_TRY
+ {
+ const char* sigStr = PrettyPrintSig(pSig, cSig, "*", out, pMD->GetMDImport(), 0);
+
+ S_SIZE_T len = S_SIZE_T(strlen(sigStr))+S_SIZE_T(1);
+ if(len.IsOverflow()) COMPlusThrowHR(COR_E_OVERFLOW);
+
+ char* mem = (char*) pamTracker->Track(pHeap->AllocMem(len));
+
+ if (mem != NULL)
+ {
+ strcpy_s(mem, len.Value(), sigStr);
+ ret = mem;
+ }
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ return ret;
+}
+
+/*******************************************************************/
+const char* FormatSig(MethodDesc * pMD, LoaderHeap * pHeap, AllocMemTracker * pamTracker)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ CQuickBytes out;
+ return FormatSigHelper(pMD, &out, pHeap, pamTracker);
+}
+
+/*******************************************************************/
+const char* FormatSig(MethodDesc* pMD, AppDomain *pDomain, AllocMemTracker *pamTracker)
+{
+ WRAPPER_NO_CONTRACT;
+ return FormatSig(pMD,pDomain->GetLowFrequencyHeap(),pamTracker);
+}
+#endif
+#endif
diff --git a/src/vm/sigformat.h b/src/vm/sigformat.h
new file mode 100644
index 0000000000..387fabaa68
--- /dev/null
+++ b/src/vm/sigformat.h
@@ -0,0 +1,44 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#ifndef _SIGFORMAT_H
+#define _SIGFORMAT_H
+
+#include "invokeutil.h"
+#include "field.h"
+
+#define SIG_INC 256
+
+class SigFormat
+{
+public:
+ SigFormat();
+
+ //@GENERICS: the owning type handle is required because pMeth may be shared between instantiations
+ SigFormat(MethodDesc* pMeth, TypeHandle owner, BOOL fIgnoreMethodName = false);
+ SigFormat(MetaSig &metaSig, LPCUTF8 memberName, LPCUTF8 className = NULL, LPCUTF8 ns = NULL);
+
+ ~SigFormat();
+
+ const char * GetCString();
+ const char * GetCStringParmsOnly();
+
+ void AddType(TypeHandle th);
+
+protected:
+ void FormatSig(MetaSig &metaSig, LPCUTF8 memberName, LPCUTF8 className = NULL, LPCUTF8 ns = NULL);
+
+ char* _fmtSig;
+ size_t _size;
+ size_t _pos;
+
+ void AddString(LPCUTF8 s);
+ void AddTypeString(Module* pModule, SigPointer sig, const SigTypeContext *pTypeContext);
+
+};
+
+#endif // _SIGFORMAT_H
diff --git a/src/vm/siginfo.cpp b/src/vm/siginfo.cpp
new file mode 100644
index 0000000000..3cc9cf1a23
--- /dev/null
+++ b/src/vm/siginfo.cpp
@@ -0,0 +1,5603 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// siginfo.cpp
+//
+// Signature parsing code
+//
+
+
+#include "common.h"
+
+#include "siginfo.hpp"
+#include "clsload.hpp"
+#include "vars.hpp"
+#include "excep.h"
+#include "gc.h"
+#include "field.h"
+#include "eeconfig.h"
+#include "runtimehandles.h" // for SignatureNative
+#include "security.h" // for CanSkipVerification
+#include "winwrap.h"
+#include <formattype.h>
+#include "sigbuilder.h"
+#include "../md/compiler/custattr.h"
+#include <corhlprpriv.h>
+
+/*******************************************************************/
+const CorTypeInfo::CorTypeInfoEntry CorTypeInfo::info[ELEMENT_TYPE_MAX] =
+{
+#define TYPEINFO(enumName,nameSpace,className,size,gcType,isArray,isPrim,isFloat,isModifier,isGenVar) \
+ { nameSpace, className, enumName, size, gcType, isArray, isPrim, isFloat, isModifier, isGenVar },
+#include "cortypeinfo.h"
+# undef TYPEINFO
+};
+
+/*******************************************************************/
+/* static */
+CorElementType
+CorTypeInfo::FindPrimitiveType(LPCUTF8 name)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(name != NULL);
+
+ for (unsigned int i = 1; i < _countof(CorTypeInfo::info); i++)
+ { // can skip ELEMENT_TYPE_END (index 0)
+ if ((info[i].className != NULL) && (strcmp(name, info[i].className) == 0))
+ return (CorElementType)i;
+ }
+
+ return ELEMENT_TYPE_END;
+}
+
+const ElementTypeInfo gElementTypeInfo[] = {
+
+#ifdef _DEBUG
+#define DEFINEELEMENTTYPEINFO(etname, cbsize, gcness, inreg) {(int)(etname),cbsize,gcness,inreg},
+#else
+#define DEFINEELEMENTTYPEINFO(etname, cbsize, gcness, inreg) {cbsize,gcness,inreg},
+#endif
+
+// Meaning of columns:
+//
+// name - The checked build uses this to verify that the table is sorted
+// correctly. This is a lookup table that uses ELEMENT_TYPE_*
+// as an array index.
+//
+// cbsize - The byte size of this value as returned by SizeOf(). SPECIAL VALUE: -1
+// requires type-specific treatment.
+//
+// gc - 0 no embedded objectrefs
+// 1 value is an objectref
+// 2 value is an interior pointer - promote it but don't scan it
+// 3 requires type-specific treatment
+//
+// reg - put in a register?
+//
+// Note: This table is very similar to the one in file:corTypeInfo.h with these exceptions:
+// reg column is missing in corTypeInfo.h
+// ELEMENT_TYPE_VAR, ELEMENT_TYPE_GENERICINST, ELEMENT_TYPE_MVAR ... size -1 vs. sizeof(void*) in corTypeInfo.h
+// ELEMENT_TYPE_CMOD_REQD, ELEMENT_TYPE_CMOD_OPT, ELEMENT_TYPE_INTERNAL ... size -1 vs. 0 in corTypeInfo.h
+// ELEMENT_TYPE_INTERNAL ... GC type is TYPE_GC_NONE vs. TYPE_GC_OTHER in corTypeInfo.h
+//
+// name cbsize gc reg
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_END, -1, TYPE_GC_NONE, 0)
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_VOID, 0, TYPE_GC_NONE, 0)
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_BOOLEAN, 1, TYPE_GC_NONE, 1)
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_CHAR, 2, TYPE_GC_NONE, 1)
+
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_I1, 1, TYPE_GC_NONE, 1)
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_U1, 1, TYPE_GC_NONE, 1)
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_I2, 2, TYPE_GC_NONE, 1)
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_U2, 2, TYPE_GC_NONE, 1)
+
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_I4, 4, TYPE_GC_NONE, 1)
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_U4, 4, TYPE_GC_NONE, 1)
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_I8, 8, TYPE_GC_NONE, 0)
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_U8, 8, TYPE_GC_NONE, 0)
+
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_R4, 4, TYPE_GC_NONE, 0)
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_R8, 8, TYPE_GC_NONE, 0)
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_STRING, sizeof(LPVOID), TYPE_GC_REF, 1)
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_PTR, sizeof(LPVOID), TYPE_GC_NONE, 1)
+
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_BYREF, sizeof(LPVOID), TYPE_GC_BYREF, 1)
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_VALUETYPE, -1, TYPE_GC_OTHER, 0)
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_CLASS, sizeof(LPVOID), TYPE_GC_REF, 1)
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_VAR, -1, TYPE_GC_OTHER, 1)
+
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_ARRAY, sizeof(LPVOID), TYPE_GC_REF, 1)
+
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_GENERICINST, -1, TYPE_GC_OTHER, 0)
+
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_TYPEDBYREF, sizeof(LPVOID)*2,TYPE_GC_BYREF, 0)
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_VALUEARRAY_UNSUPPORTED, -1, TYPE_GC_NONE, 0)
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_I, sizeof(LPVOID), TYPE_GC_NONE, 1)
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_U, sizeof(LPVOID), TYPE_GC_NONE, 1)
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_R_UNSUPPORTED, -1, TYPE_GC_NONE, 0)
+
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_FNPTR, sizeof(LPVOID), TYPE_GC_NONE, 1)
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_OBJECT, sizeof(LPVOID), TYPE_GC_REF, 1)
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_SZARRAY, sizeof(LPVOID), TYPE_GC_REF, 1)
+
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_MVAR, -1, TYPE_GC_OTHER, 1)
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_CMOD_REQD, -1, TYPE_GC_NONE, 1)
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_CMOD_OPT, -1, TYPE_GC_NONE, 1)
+DEFINEELEMENTTYPEINFO(ELEMENT_TYPE_INTERNAL, -1, TYPE_GC_NONE, 0)
+};
+
+unsigned GetSizeForCorElementType(CorElementType etyp)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(gElementTypeInfo[etyp].m_elementType == etyp);
+ return gElementTypeInfo[etyp].m_cbSize;
+}
+
+const ElementTypeInfo* GetElementTypeInfo(CorElementType etyp)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(gElementTypeInfo[etyp].m_elementType == etyp);
+ return &gElementTypeInfo[etyp];
+}
+
+#ifndef DACCESS_COMPILE
+
+void SigPointer::ConvertToInternalExactlyOne(Module* pSigModule, SigTypeContext *pTypeContext, SigBuilder * pSigBuilder, BOOL bSkipCustomModifier)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(CheckPointer(pSigModule));
+ }
+ CONTRACTL_END
+
+ SigPointer sigStart = *this;
+
+ CorElementType typ = ELEMENT_TYPE_END;
+
+ // Check whether we need to skip custom modifier
+ // Only preserve custom modifier when calculating IL stub hash blob
+ if (bSkipCustomModifier)
+ {
+ // GetElemType eats sentinel and custom modifiers
+ IfFailThrowBF(GetElemType(&typ), BFA_BAD_COMPLUS_SIG, pSigModule);
+ }
+ else
+ {
+ BYTE byElemType;
+
+ IfFailThrowBF(SkipAnyVASentinel(), BFA_BAD_COMPLUS_SIG, pSigModule);
+
+ // Call GetByte and make sure we don't lose custom modifiers
+ IfFailThrowBF(GetByte(&byElemType), BFA_BAD_COMPLUS_SIG, pSigModule);
+ typ = (CorElementType) byElemType;
+ }
+
+ if (typ == ELEMENT_TYPE_CLASS || typ == ELEMENT_TYPE_VALUETYPE)
+ {
+ IfFailThrowBF(GetToken(NULL), BFA_BAD_COMPLUS_SIG, pSigModule);
+ TypeHandle th = sigStart.GetTypeHandleThrowing(pSigModule, pTypeContext);
+
+ pSigBuilder->AppendElementType(ELEMENT_TYPE_INTERNAL);
+ pSigBuilder->AppendPointer(th.AsPtr());
+ return;
+ }
+
+ if (pTypeContext != NULL)
+ {
+ ULONG varNum;
+ if (typ == ELEMENT_TYPE_VAR)
+ {
+ IfFailThrowBF(GetData(&varNum), BFA_BAD_COMPLUS_SIG, pSigModule);
+ THROW_BAD_FORMAT_MAYBE(varNum < pTypeContext->m_classInst.GetNumArgs(), BFA_BAD_COMPLUS_SIG, pSigModule);
+
+ pSigBuilder->AppendElementType(ELEMENT_TYPE_INTERNAL);
+ pSigBuilder->AppendPointer(pTypeContext->m_classInst[varNum].AsPtr());
+ return;
+ }
+ if (typ == ELEMENT_TYPE_MVAR)
+ {
+ IfFailThrowBF(GetData(&varNum), BFA_BAD_COMPLUS_SIG, pSigModule);
+ THROW_BAD_FORMAT_MAYBE(varNum < pTypeContext->m_methodInst.GetNumArgs(), BFA_BAD_COMPLUS_SIG, pSigModule);
+
+ pSigBuilder->AppendElementType(ELEMENT_TYPE_INTERNAL);
+ pSigBuilder->AppendPointer(pTypeContext->m_methodInst[varNum].AsPtr());
+ return;
+ }
+ }
+
+ pSigBuilder->AppendElementType(typ);
+
+ if (!CorIsPrimitiveType(typ))
+ {
+ switch (typ)
+ {
+ default:
+ THROW_BAD_FORMAT(BFA_BAD_COMPLUS_SIG, pSigModule);
+ break;
+ case ELEMENT_TYPE_VAR:
+ case ELEMENT_TYPE_MVAR:
+ {
+ ULONG varNum;
+ // Skip variable number
+ IfFailThrowBF(GetData(&varNum), BFA_BAD_COMPLUS_SIG, pSigModule);
+ pSigBuilder->AppendData(varNum);
+ }
+ break;
+ case ELEMENT_TYPE_OBJECT:
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_TYPEDBYREF:
+ break;
+
+ case ELEMENT_TYPE_BYREF: //fallthru
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_PINNED:
+ case ELEMENT_TYPE_SZARRAY:
+ ConvertToInternalExactlyOne(pSigModule, pTypeContext, pSigBuilder, bSkipCustomModifier);
+ break;
+
+ case ELEMENT_TYPE_FNPTR:
+ ConvertToInternalSignature(pSigModule, pTypeContext, pSigBuilder, bSkipCustomModifier);
+ break;
+
+ case ELEMENT_TYPE_ARRAY:
+ {
+ ConvertToInternalExactlyOne(pSigModule, pTypeContext, pSigBuilder, bSkipCustomModifier);
+
+ ULONG rank = 0; // Get rank
+ IfFailThrowBF(GetData(&rank), BFA_BAD_COMPLUS_SIG, pSigModule);
+ pSigBuilder->AppendData(rank);
+
+ if (rank)
+ {
+ ULONG nsizes = 0;
+ IfFailThrowBF(GetData(&nsizes), BFA_BAD_COMPLUS_SIG, pSigModule);
+ pSigBuilder->AppendData(nsizes);
+
+ while (nsizes--)
+ {
+ ULONG data = 0;
+ IfFailThrowBF(GetData(&data), BFA_BAD_COMPLUS_SIG, pSigModule);
+ pSigBuilder->AppendData(data);
+ }
+
+ ULONG nlbounds = 0;
+ IfFailThrowBF(GetData(&nlbounds), BFA_BAD_COMPLUS_SIG, pSigModule);
+ pSigBuilder->AppendData(nlbounds);
+
+ while (nlbounds--)
+ {
+ ULONG data = 0;
+ IfFailThrowBF(GetData(&data), BFA_BAD_COMPLUS_SIG, pSigModule);
+ pSigBuilder->AppendData(data);
+ }
+ }
+ }
+ break;
+
+ case ELEMENT_TYPE_INTERNAL:
+ {
+ // this check is not functional in DAC and provides no security against a malicious dump
+ // the DAC is prepared to receive an invalid type handle
+#ifndef DACCESS_COMPILE
+ if (pSigModule->IsSigInIL(m_ptr))
+ THROW_BAD_FORMAT(BFA_BAD_COMPLUS_SIG, pSigModule);
+#endif
+
+ TypeHandle hType;
+
+ IfFailThrowBF(GetPointer((void**)&hType), BFA_BAD_COMPLUS_SIG, pSigModule);
+
+ pSigBuilder->AppendPointer(hType.AsPtr());
+ }
+ break;
+
+ case ELEMENT_TYPE_GENERICINST:
+ {
+ TypeHandle genericType = GetGenericInstType(pSigModule);
+
+ pSigBuilder->AppendElementType(ELEMENT_TYPE_INTERNAL);
+ pSigBuilder->AppendPointer(genericType.AsPtr());
+
+ ULONG argCnt = 0; // Get number of parameters
+ IfFailThrowBF(GetData(&argCnt), BFA_BAD_COMPLUS_SIG, pSigModule);
+ pSigBuilder->AppendData(argCnt);
+
+ while (argCnt--)
+ {
+ ConvertToInternalExactlyOne(pSigModule, pTypeContext, pSigBuilder, bSkipCustomModifier);
+ }
+ }
+ break;
+
+ // Note: the following is only for correctly computing IL stub hash for modifiers in order to support C++ scenarios
+ case ELEMENT_TYPE_CMOD_OPT:
+ case ELEMENT_TYPE_CMOD_REQD:
+ {
+ mdToken tk;
+ IfFailThrowBF(GetToken(&tk), BFA_BAD_COMPLUS_SIG, pSigModule);
+ TypeHandle th = ClassLoader::LoadTypeDefOrRefThrowing(pSigModule, tk);
+ pSigBuilder->AppendPointer(th.AsPtr());
+
+ ConvertToInternalExactlyOne(pSigModule, pTypeContext, pSigBuilder, bSkipCustomModifier);
+ }
+ break;
+ }
+ }
+}
+
+void SigPointer::ConvertToInternalSignature(Module* pSigModule, SigTypeContext *pTypeContext, SigBuilder * pSigBuilder, BOOL bSkipCustomModifier)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(CheckPointer(pSigModule));
+ }
+ CONTRACTL_END
+
+ BYTE uCallConv = 0;
+ IfFailThrowBF(GetByte(&uCallConv), BFA_BAD_COMPLUS_SIG, pSigModule);
+
+ if ((uCallConv & IMAGE_CEE_CS_CALLCONV_MASK) == IMAGE_CEE_CS_CALLCONV_FIELD)
+ THROW_BAD_FORMAT(BFA_UNEXPECTED_FIELD_SIGNATURE, pSigModule);
+
+ pSigBuilder->AppendByte(uCallConv);
+
+ // Skip type parameter count
+ if (uCallConv & IMAGE_CEE_CS_CALLCONV_GENERIC)
+ {
+ ULONG nParams = 0;
+ IfFailThrowBF(GetData(&nParams), BFA_BAD_COMPLUS_SIG, pSigModule);
+ pSigBuilder->AppendData(nParams);
+ }
+
+ // Get arg count;
+ ULONG cArgs = 0;
+ IfFailThrowBF(GetData(&cArgs), BFA_BAD_COMPLUS_SIG, pSigModule);
+ pSigBuilder->AppendData(cArgs);
+
+ cArgs++; // +1 for return type
+
+ // Skip args.
+ while (cArgs)
+ {
+ ConvertToInternalExactlyOne(pSigModule, pTypeContext, pSigBuilder, bSkipCustomModifier);
+ cArgs--;
+ }
+}
+#endif // DACCESS_COMPILE
+
+
+//---------------------------------------------------------------------------------------
+//
+// Default constructor for creating an empty Signature, i.e. with a NULL raw PCCOR_SIGNATURE pointer.
+//
+
+Signature::Signature()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_pSig = NULL;
+ m_cbSig = 0;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Primary constructor for creating a Signature.
+//
+// Arguments:
+// pSig - raw PCCOR_SIGNATURE pointer
+// cbSig - length of the signature
+//
+
+Signature::Signature(PCCOR_SIGNATURE pSig,
+ DWORD cbSig)
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC; // host-only data structure - not marshalled
+
+ m_pSig = pSig;
+ m_cbSig = cbSig;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Check if the Signature is empty, i.e. has a NULL raw PCCOR_SIGNATURE
+//
+// Return Value:
+// TRUE if the raw PCCOR_SIGNATURE is NULL
+//
+
+BOOL Signature::IsEmpty() const
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return (m_pSig == NULL);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Create a SigParser over the Signature. In DAC builds, grab the signature bytes from out of process first.
+//
+// Return Value:
+// a SigpParser for this particular Signature
+//
+
+SigParser Signature::CreateSigParser() const
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#if defined(DACCESS_COMPILE)
+ // Copy the signature bytes from the target process.
+ PCCOR_SIGNATURE pTargetSig = (PCCOR_SIGNATURE)DacInstantiateTypeByAddress((TADDR)m_pSig, m_cbSig, true);
+ return SigParser(pTargetSig, m_cbSig);
+#else // !DACCESS_COMPILE
+ return SigParser(m_pSig, m_cbSig);
+#endif // !DACCESS_COMPILE
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Create a SigPointer over the Signature. In DAC builds, grab the signature bytes from out of process first.
+//
+// Return Value:
+// a SigPointer for this particular Signature
+//
+
+SigPointer Signature::CreateSigPointer() const
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#if defined(DACCESS_COMPILE)
+ // Copy the signature bytes from the target process.
+ PCCOR_SIGNATURE pTargetSig = (PCCOR_SIGNATURE)DacInstantiateTypeByAddress((TADDR)m_pSig, m_cbSig, true);
+ return SigPointer(pTargetSig, m_cbSig);
+#else // !DACCESS_COMPILE
+ return SigPointer(m_pSig, m_cbSig);
+#endif // !DACCESS_COMPILE
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Pretty-print the Signature. This is just a wrapper over code:PrettyPrintSig().
+//
+// Arguments:
+// pszMethodName - the name of the method in question
+// pqbOut - a CQuickBytes array for allocating memory
+// pIMDI - a IMDInternalImport interface for resolving tokens
+//
+// Return Value:
+// whatever PrettyPrintSig() returns
+//
+
+void Signature::PrettyPrint(const CHAR * pszMethodName,
+ CQuickBytes * pqbOut,
+ IMDInternalImport * pIMDI) const
+{
+ WRAPPER_NO_CONTRACT;
+ PrettyPrintSig(this->GetRawSig(), this->GetRawSigLen(), pszMethodName, pqbOut, pIMDI, NULL);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Get the raw signature pointer contained in this Siganture.
+//
+// Return Value:
+// the raw signature pointer
+//
+// Notes:
+// Use this ONLY IF there is no other way to do what you want to do!
+// In most cases you just want a SigParser/SigPointer from the Signature.
+//
+
+PCCOR_SIGNATURE Signature::GetRawSig() const
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_pSig;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Get the length of the raw signature contained in this Siganture.
+//
+// Return Value:
+// the length of the raw signature
+//
+// Notes:
+// Use this ONLY IF there is no other way to do what you want to do!
+// In most cases you just want a SigParser/SigPointer from the Signature.
+//
+
+DWORD Signature::GetRawSigLen() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_cbSig;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Constructor.
+//
+void MetaSig::Init(
+ PCCOR_SIGNATURE szMetaSig,
+ DWORD cbMetaSig,
+ Module * pModule,
+ const SigTypeContext * pTypeContext,
+ MetaSigKind kind)
+{
+ CONTRACTL
+ {
+ CONSTRUCTOR_CHECK;
+ NOTHROW;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ PRECONDITION(CheckPointer(szMetaSig));
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(CheckPointer(pTypeContext, NULL_OK));
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+
+#ifdef _DEBUG
+ FillMemory(this, sizeof(*this), 0xcc);
+#endif
+
+ // Copy the type context
+ SigTypeContext::InitTypeContext(pTypeContext,&m_typeContext);
+ m_pModule = pModule;
+
+ SigPointer psig(szMetaSig, cbMetaSig);
+
+ HRESULT hr;
+
+ switch (kind)
+ {
+ case sigLocalVars:
+ {
+ ULONG data = 0;
+ IfFailGo(psig.GetCallingConvInfo(&data)); // Store calling convention
+ m_CallConv = (BYTE)data;
+
+ IfFailGo(psig.GetData(&data)); // Store number of arguments.
+ m_nArgs = data;
+
+ m_pRetType = SigPointer(NULL, 0);
+ break;
+ }
+ case sigMember:
+ {
+ ULONG data = 0;
+ IfFailGo(psig.GetCallingConvInfo(&data)); // Store calling convention
+ m_CallConv = (BYTE)data;
+
+ // Store type parameter count
+ if (m_CallConv & IMAGE_CEE_CS_CALLCONV_GENERIC)
+ {
+ IfFailGo(psig.GetData(NULL));
+ }
+
+ IfFailGo(psig.GetData(&data)); // Store number of arguments.
+ m_nArgs = data;
+ m_pRetType = psig;
+ IfFailGo(psig.SkipExactlyOne());
+ break;
+ }
+ case sigField:
+ {
+ ULONG data = 0;
+ IfFailGo(psig.GetCallingConvInfo(&data)); // Store calling convention
+ m_CallConv = (BYTE)data;
+
+ m_nArgs = 1; //There's only 1 'arg' - the type.
+ m_pRetType = SigPointer(NULL, 0);
+ break;
+ }
+ default:
+ {
+ UNREACHABLE();
+ goto ErrExit;
+ }
+ }
+
+
+ m_pStart = psig;
+
+ m_flags = 0;
+
+ // Reset the iterator fields
+ Reset();
+
+ return;
+
+ErrExit:
+ // Invalid signature or parameter
+ m_CallConv = 0;
+ INDEBUG(m_CallConv = 0xff;)
+
+ m_nArgs = 0;
+ m_pRetType = SigPointer(NULL, 0);
+} // MetaSig::MetaSig
+
+
+// Helper constructor that constructs a method signature MetaSig from a MethodDesc
+// IMPORTANT: if classInst/methodInst is omitted and the MethodDesc is shared between generic
+// instantiations then the instantiation info for the method will be representative. This
+// is OK for GC, field layout etc. but not OK where exact types matter.
+//
+// Also, if used on a shared instantiated method descriptor or instance method in a shared generic struct
+// then the calling convention is fixed up to include the extra dictionary argument
+//
+// For method descs from array types the "instantiation" is set to the element type of the array
+// This lets us use VAR in the signatures for Get, Set and Address
+MetaSig::MetaSig(MethodDesc *pMD, Instantiation classInst, Instantiation methodInst)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SigTypeContext typeContext(pMD, classInst, methodInst);
+
+ PCCOR_SIGNATURE pSig;
+ DWORD cbSigSize;
+ pMD->GetSig(&pSig, &cbSigSize);
+
+ Init(pSig, cbSigSize, pMD->GetModule(),&typeContext);
+
+ if (pMD->RequiresInstArg())
+ SetHasParamTypeArg();
+}
+
+MetaSig::MetaSig(MethodDesc *pMD, TypeHandle declaringType)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SigTypeContext typeContext(pMD, declaringType);
+ PCCOR_SIGNATURE pSig;
+ DWORD cbSigSize;
+ pMD->GetSig(&pSig, &cbSigSize);
+
+ Init(pSig, cbSigSize, pMD->GetModule(),&typeContext);
+
+ if (pMD->RequiresInstArg())
+ SetHasParamTypeArg();
+}
+
+#ifdef _DEBUG
+//*******************************************************************************
+static BOOL MethodDescMatchesSig(MethodDesc* pMD, PCCOR_SIGNATURE pSig, DWORD cSig, Module * pModule)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ PCCOR_SIGNATURE pSigOfMD;
+ DWORD cSigOfMD;
+ pMD->GetSig(&pSigOfMD, &cSigOfMD);
+
+ return MetaSig::CompareMethodSigs(pSig, cSig, pModule, NULL,
+ pSigOfMD, cSigOfMD, pMD->GetModule(), NULL);
+}
+#endif // _DEBUG
+
+MetaSig::MetaSig(BinderMethodID id)
+{
+ CONTRACTL
+ {
+ CONSTRUCTOR_CHECK;
+ THROWS;
+ MODE_ANY;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ Signature sig = MscorlibBinder::GetMethodSignature(id);
+
+ _ASSERTE(MethodDescMatchesSig(MscorlibBinder::GetMethod(id),
+ sig.GetRawSig(), sig.GetRawSigLen(), MscorlibBinder::GetModule()));
+
+ Init(sig.GetRawSig(), sig.GetRawSigLen(), MscorlibBinder::GetModule(), NULL);
+}
+
+MetaSig::MetaSig(LPHARDCODEDMETASIG pwzMetaSig)
+{
+ CONTRACTL
+ {
+ CONSTRUCTOR_CHECK;
+ THROWS;
+ MODE_ANY;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ Signature sig = MscorlibBinder::GetSignature(pwzMetaSig);
+
+ Init(sig.GetRawSig(), sig.GetRawSigLen(), MscorlibBinder::GetModule(), NULL);
+}
+
+// Helper constructor that constructs a field signature MetaSig from a FieldDesc
+// IMPORTANT: the classInst is omitted then the instantiation info for the field
+// will be representative only as FieldDescs can be shared
+//
+MetaSig::MetaSig(FieldDesc *pFD, TypeHandle declaringType)
+{
+ CONTRACTL
+ {
+ CONSTRUCTOR_CHECK;
+ NOTHROW;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pFD));
+ }
+ CONTRACTL_END
+
+ PCCOR_SIGNATURE pSig;
+ DWORD cSig;
+
+ pFD->GetSig(&pSig, &cSig);
+
+ SigTypeContext typeContext(pFD, declaringType);
+
+ Init(pSig, cSig, pFD->GetModule(),&typeContext, sigField);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Returns type of current argument index. Returns ELEMENT_TYPE_END
+// if already past end of arguments.
+//
+CorElementType
+MetaSig::PeekArg() const
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (m_iCurArg == m_nArgs)
+ {
+ return ELEMENT_TYPE_END;
+ }
+ return m_pWalk.PeekElemTypeClosed(GetModule(), &m_typeContext);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Returns type of current argument index. Returns ELEMENT_TYPE_END
+// if already past end of arguments.
+//
+CorElementType
+MetaSig::PeekArgNormalized(TypeHandle * pthValueType) const
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (m_iCurArg == m_nArgs)
+ {
+ return ELEMENT_TYPE_END;
+ }
+ return m_pWalk.PeekElemTypeNormalized(m_pModule, &m_typeContext, pthValueType);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Returns type of current argument, then advances the argument
+// index. Returns ELEMENT_TYPE_END if already past end of arguments.
+//
+CorElementType
+MetaSig::NextArg()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ m_pLastType = m_pWalk;
+
+ if (m_iCurArg == m_nArgs)
+ {
+ return ELEMENT_TYPE_END;
+ }
+ m_iCurArg++;
+ CorElementType mt = m_pWalk.PeekElemTypeClosed(GetModule(), &m_typeContext);
+ if (FAILED(m_pWalk.SkipExactlyOne()))
+ {
+ m_pWalk = m_pLastType;
+ return ELEMENT_TYPE_END;
+ }
+ return mt;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Advance the argument index. Can be used with GetArgProps() to
+// to iterate when you do not have a valid type context
+//
+void
+MetaSig::SkipArg()
+{
+ WRAPPER_NO_CONTRACT;
+
+ m_pLastType = m_pWalk;
+
+ if (m_iCurArg < m_nArgs)
+ {
+ m_iCurArg++;
+ if (FAILED(m_pWalk.SkipExactlyOne()))
+ {
+ m_pWalk = m_pLastType;
+ m_iCurArg = m_nArgs;
+ }
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// reset: goto start pos
+//
+VOID
+MetaSig::Reset()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ m_pWalk = m_pStart;
+ m_iCurArg = 0;
+ return;
+}
+
+#ifndef DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+//
+BOOL
+IsTypeRefOrDef(
+ LPCSTR szClassName,
+ Module * pModule,
+ mdToken token)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ LPCUTF8 pclsname;
+ LPCUTF8 pszNamespace;
+
+ IMDInternalImport *pInternalImport = pModule->GetMDImport();
+
+ if (TypeFromToken(token) == mdtTypeDef)
+ {
+ if (FAILED(pInternalImport->GetNameOfTypeDef(token, &pclsname, &pszNamespace)))
+ {
+ return false;
+ }
+ }
+ else if (TypeFromToken(token) == mdtTypeRef)
+ {
+ if (FAILED(pInternalImport->GetNameOfTypeRef(token, &pszNamespace, &pclsname)))
+ {
+ return false;
+ }
+ }
+ else
+ {
+ return false;
+ }
+
+ // If the namespace is not the same.
+ int iLen = (int)strlen(pszNamespace);
+ if (iLen)
+ {
+ if (strncmp(szClassName, pszNamespace, iLen) != 0)
+ return(false);
+
+ if (szClassName[iLen] != NAMESPACE_SEPARATOR_CHAR)
+ return(false);
+ ++iLen;
+ }
+
+ if (strcmp(&szClassName[iLen], pclsname) != 0)
+ return(false);
+ return(true);
+} // IsTypeRefOrDef
+
+TypeHandle SigPointer::GetTypeHandleNT(Module* pModule,
+ const SigTypeContext *pTypeContext) const
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ MODE_ANY;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END
+
+ TypeHandle th;
+ EX_TRY
+ {
+ th = GetTypeHandleThrowing(pModule, pTypeContext);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+ return(th);
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#endif
+
+// Method: TypeHandle SigPointer::GetTypeHandleThrowing()
+// pZapSigContext is only set when decoding zapsigs
+//
+TypeHandle SigPointer::GetTypeHandleThrowing(
+ Module * pModule,
+ const SigTypeContext * pTypeContext,
+ ClassLoader::LoadTypesFlag fLoadTypes/*=LoadTypes*/,
+ ClassLoadLevel level/*=CLASS_LOADED*/,
+ BOOL dropGenericArgumentLevel/*=FALSE*/,
+ const Substitution * pSubst/*=NULL*/,
+ // ZapSigContext is only set when decoding zapsigs
+ const ZapSig::Context * pZapSigContext) const
+{
+ CONTRACT(TypeHandle)
+ {
+ INSTANCE_CHECK;
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ MODE_ANY;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ if (FORBIDGC_LOADER_USE_ENABLED() || fLoadTypes != ClassLoader::LoadTypes) { LOADS_TYPE(CLASS_LOAD_BEGIN); } else { LOADS_TYPE(level); }
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(level > CLASS_LOAD_BEGIN && level <= CLASS_LOADED);
+ POSTCONDITION(CheckPointer(RETVAL, ((fLoadTypes == ClassLoader::LoadTypes) ? NULL_NOT_OK : NULL_OK)));
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END
+
+ // We have an invariant that before we call a method, we must have loaded all of the valuetype parameters of that
+ // method visible from the signature of the method. Normally we do this via type loading before the method is called
+ // by walking the signature of the callee method at jit time, and loading all of the valuetype arguments at that time.
+ // For NGEN, we record which valuetypes need to be loaded, and force load those types when the caller method is first executed.
+ // However, in certain circumstances involving generics the jit does not have the opportunity to observe the complete method
+ // signature that may be used a signature walk time. See example below.
+ //
+ //
+//using System;
+//
+//struct A<T> { }
+//struct B<T> { }
+//
+//interface Interface<T>
+//{ A<T> InterfaceFunc(); }
+//
+//class Base<T>
+//{ public virtual B<T> Func() { return default(B<T>); } }
+//
+//class C<U,T> where U:Base<T>, Interface<T>
+//{
+// public static void CallFunc(U u) { u.Func(); }
+// public static void CallInterfaceFunc(U u) { u.InterfaceFunc(); }
+//}
+//
+//class Problem : Base<object>, Interface<object>
+//{
+// public A<object> InterfaceFunc() { return new A<object>(); }
+// public override B<object> Func() { return new B<object>(); }
+//}
+//
+//class Test
+//{
+// static void Main()
+// {
+// C<Problem, object>.CallFunc(new Problem());
+// C<Problem, object>.CallInterfaceFunc(new Problem());
+// }
+//}
+//
+ // In this example, when CallFunc and CallInterfaceFunc are jitted, the types that will
+ // be loaded during JIT time are A<__Canon> and <__Canon>. Thus we need to be able to only
+ // search for canonical type arguments during these restricted time periods. IsGCThread() || IsStackWalkerThread() is the current
+ // predicate for determining this.
+
+#ifdef _DEBUG
+ if ((IsGCThread() || IsStackWalkerThread()) && (fLoadTypes == ClassLoader::LoadTypes))
+ {
+ // The callers are expected to pass the right arguments in
+ _ASSERTE(level == CLASS_LOAD_APPROXPARENTS);
+ _ASSERTE(dropGenericArgumentLevel == TRUE);
+ }
+#endif
+
+ TypeHandle thRet;
+ SigPointer psig = *this;
+ CorElementType typ = ELEMENT_TYPE_END;
+ IfFailThrowBF(psig.GetElemType(&typ), BFA_BAD_SIGNATURE, pModule);
+
+ if ((typ < ELEMENT_TYPE_MAX) &&
+ (CorTypeInfo::IsPrimitiveType_NoThrow(typ) || (typ == ELEMENT_TYPE_STRING) || (typ == ELEMENT_TYPE_OBJECT)))
+ {
+ // case ELEMENT_TYPE_VOID = 0x01,
+ // case ELEMENT_TYPE_BOOLEAN = 0x02,
+ // case ELEMENT_TYPE_CHAR = 0x03,
+ // case ELEMENT_TYPE_I1 = 0x04,
+ // case ELEMENT_TYPE_U1 = 0x05,
+ // case ELEMENT_TYPE_I2 = 0x06,
+ // case ELEMENT_TYPE_U2 = 0x07,
+ // case ELEMENT_TYPE_I4 = 0x08,
+ // case ELEMENT_TYPE_U4 = 0x09,
+ // case ELEMENT_TYPE_I8 = 0x0a,
+ // case ELEMENT_TYPE_U8 = 0x0b,
+ // case ELEMENT_TYPE_R4 = 0x0c,
+ // case ELEMENT_TYPE_R8 = 0x0d,
+ // case ELEMENT_TYPE_I = 0x18,
+ // case ELEMENT_TYPE_U = 0x19,
+ //
+ // case ELEMENT_TYPE_STRING = 0x0e,
+ // case ELEMENT_TYPE_OBJECT = 0x1c,
+ //
+ thRet = TypeHandle(MscorlibBinder::GetElementType(typ));
+ }
+ else
+ {
+ // This function is recursive, so it must have an interior probe
+ INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(10, NO_FORBIDGC_LOADER_USE_ThrowSO(););
+
+#ifdef _DEBUG_IMPL
+ // This verifies that we won't try and load a type
+ // if FORBIDGC_LOADER_USE_ENABLED is true.
+ //
+ // The FORBIDGC_LOADER_USE is limited to very specific scenarios that need to retrieve
+ // GC_OTHER typehandles for size and gcroot information. This assert attempts to prevent
+ // this abuse from proliferating.
+ //
+ if (FORBIDGC_LOADER_USE_ENABLED() && (fLoadTypes == ClassLoader::LoadTypes))
+ {
+ TypeHandle th = GetTypeHandleThrowing(pModule,
+ pTypeContext,
+ ClassLoader::DontLoadTypes,
+ level,
+ dropGenericArgumentLevel,
+ pSubst,
+ pZapSigContext);
+ _ASSERTE(!th.IsNull());
+ }
+#endif
+ //
+ // pOrigModule is the original module that contained this ZapSig
+ //
+ Module * pOrigModule = (pZapSigContext != NULL) ? pZapSigContext->pInfoModule : pModule;
+ ClassLoader::NotFoundAction notFoundAction;
+ CorInternalStates tdTypes;
+
+ switch(typ) {
+ case ELEMENT_TYPE_TYPEDBYREF:
+ {
+ thRet = TypeHandle(g_TypedReferenceMT);
+ break;
+ }
+
+#ifdef FEATURE_PREJIT
+ case ELEMENT_TYPE_NATIVE_ARRAY_TEMPLATE_ZAPSIG:
+ {
+#ifndef DACCESS_COMPILE
+ TypeHandle baseType = psig.GetTypeHandleThrowing(pModule,
+ pTypeContext,
+ fLoadTypes,
+ level,
+ dropGenericArgumentLevel,
+ pSubst,
+ pZapSigContext);
+ if (baseType.IsNull())
+ {
+ thRet = baseType;
+ }
+ else
+ {
+ thRet = baseType.GetMethodTable();
+ }
+#else
+ DacNotImpl();
+ thRet = TypeHandle();
+#endif
+ break;
+ }
+
+ case ELEMENT_TYPE_NATIVE_VALUETYPE_ZAPSIG:
+ {
+#ifndef DACCESS_COMPILE
+ TypeHandle baseType = psig.GetTypeHandleThrowing(pModule,
+ pTypeContext,
+ fLoadTypes,
+ level,
+ dropGenericArgumentLevel,
+ pSubst,
+ pZapSigContext);
+ if (baseType.IsNull())
+ {
+ thRet = baseType;
+ }
+ else
+ {
+ thRet = ClassLoader::LoadNativeValueTypeThrowing(baseType, fLoadTypes, level);
+ }
+#else
+ DacNotImpl();
+ thRet = TypeHandle();
+#endif
+ break;
+ }
+
+ case ELEMENT_TYPE_CANON_ZAPSIG:
+ {
+#ifndef DACCESS_COMPILE
+ assert(g_pCanonMethodTableClass != NULL);
+ thRet = TypeHandle(g_pCanonMethodTableClass);
+#else
+ DacNotImpl();
+ thRet = TypeHandle();
+#endif
+ break;
+ }
+
+ case ELEMENT_TYPE_MODULE_ZAPSIG:
+ {
+#ifndef DACCESS_COMPILE
+ DWORD ix;
+ IfFailThrowBF(psig.GetData(&ix), BFA_BAD_SIGNATURE, pModule);
+
+ PREFIX_ASSUME(pZapSigContext != NULL);
+ pModule = pZapSigContext->GetZapSigModule()->GetModuleFromIndex(ix);
+ if (pModule != NULL)
+ {
+ thRet = psig.GetTypeHandleThrowing(pModule,
+ pTypeContext,
+ fLoadTypes,
+ level,
+ dropGenericArgumentLevel,
+ pSubst,
+ pZapSigContext);
+ }
+#else
+ DacNotImpl();
+ thRet = TypeHandle();
+#endif
+ break;
+ }
+
+ case ELEMENT_TYPE_VAR_ZAPSIG:
+ {
+#ifndef DACCESS_COMPILE
+ DWORD rid;
+ IfFailThrowBF(psig.GetData(&rid), BFA_BAD_SIGNATURE, pModule);
+
+ mdGenericParam tkTyPar = TokenFromRid(rid, mdtGenericParam);
+
+ TypeVarTypeDesc *pTypeVarTypeDesc = pModule->LookupGenericParam(tkTyPar);
+ if (pTypeVarTypeDesc == NULL && (fLoadTypes == ClassLoader::LoadTypes))
+ {
+ mdToken tkOwner;
+ IfFailThrow(pModule->GetMDImport()->GetGenericParamProps(tkTyPar, NULL, NULL, &tkOwner, NULL, NULL));
+
+ if (TypeFromToken(tkOwner) == mdtMethodDef)
+ {
+ MemberLoader::GetMethodDescFromMethodDef(pModule, tkOwner, FALSE);
+ }
+ else
+ {
+ ClassLoader::LoadTypeDefThrowing(pModule, tkOwner,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef);
+ }
+
+ pTypeVarTypeDesc = pModule->LookupGenericParam(tkTyPar);
+ if (pTypeVarTypeDesc == NULL)
+ {
+ THROW_BAD_FORMAT(BFA_BAD_COMPLUS_SIG, pOrigModule);
+ }
+ }
+ thRet = TypeHandle(pTypeVarTypeDesc);
+#else
+ DacNotImpl();
+ thRet = TypeHandle();
+#endif
+ break;
+ }
+#endif // FEATURE_PREJIT
+
+ case ELEMENT_TYPE_VAR:
+ {
+ if ((pSubst != NULL) && !pSubst->GetInst().IsNull())
+ {
+#ifdef _DEBUG_IMPL
+ _ASSERTE(!FORBIDGC_LOADER_USE_ENABLED());
+#endif
+ DWORD index;
+ IfFailThrow(psig.GetData(&index));
+
+ SigPointer inst = pSubst->GetInst();
+ for (DWORD i = 0; i < index; i++)
+ {
+ IfFailThrowBF(inst.SkipExactlyOne(), BFA_BAD_SIGNATURE, pOrigModule);
+ }
+
+ thRet = inst.GetTypeHandleThrowing(
+ pSubst->GetModule(),
+ pTypeContext,
+ fLoadTypes,
+ level,
+ dropGenericArgumentLevel,
+ pSubst->GetNext(),
+ pZapSigContext);
+ }
+ else
+ {
+ thRet = (psig.GetTypeVariableThrowing(pModule, typ, fLoadTypes, pTypeContext));
+ if (fLoadTypes == ClassLoader::LoadTypes)
+ ClassLoader::EnsureLoaded(thRet, level);
+ }
+ break;
+ }
+
+ case ELEMENT_TYPE_MVAR:
+ {
+ thRet = (psig.GetTypeVariableThrowing(pModule, typ, fLoadTypes, pTypeContext));
+ if (fLoadTypes == ClassLoader::LoadTypes)
+ ClassLoader::EnsureLoaded(thRet, level);
+ break;
+ }
+
+ case ELEMENT_TYPE_GENERICINST:
+ {
+ mdTypeDef tkGenericType = mdTypeDefNil;
+ Module *pGenericTypeModule = NULL;
+
+ // Before parsing the generic instantiation, determine if the signature tells us its module and token.
+ // This is the common case, and when true we can avoid dereferencing the resulting TypeHandle to ask for them.
+ bool typeAndModuleKnown = false;
+ if (pZapSigContext && pZapSigContext->externalTokens == ZapSig::NormalTokens && psig.IsTypeDef(&tkGenericType))
+ {
+ typeAndModuleKnown = true;
+ pGenericTypeModule = pModule;
+ }
+
+ TypeHandle genericType = psig.GetGenericInstType(pModule, fLoadTypes, level, pZapSigContext);
+
+ if (genericType.IsNull())
+ {
+ thRet = genericType;
+ break;
+ }
+
+ if (!typeAndModuleKnown)
+ {
+ tkGenericType = genericType.GetCl();
+ pGenericTypeModule = genericType.GetModule();
+ }
+ else
+ {
+ _ASSERTE(tkGenericType == genericType.GetCl());
+ _ASSERTE(pGenericTypeModule == genericType.GetModule());
+ }
+
+ if (level == CLASS_LOAD_APPROXPARENTS && dropGenericArgumentLevel && genericType.IsInterface())
+ {
+ thRet = genericType;
+ break;
+ }
+
+ // The number of type parameters follows
+ DWORD ntypars = 0;
+ IfFailThrowBF(psig.GetData(&ntypars), BFA_BAD_SIGNATURE, pOrigModule);
+
+ DWORD dwAllocaSize = 0;
+ if (!ClrSafeInt<DWORD>::multiply(ntypars, sizeof(TypeHandle), dwAllocaSize))
+ ThrowHR(COR_E_OVERFLOW);
+
+ if ((dwAllocaSize/PAGE_SIZE+1) >= 2)
+ {
+ DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD((10+dwAllocaSize/PAGE_SIZE+1), NO_FORBIDGC_LOADER_USE_ThrowSO(););
+ }
+ TypeHandle *thisinst = (TypeHandle*) _alloca(dwAllocaSize);
+
+ // Finally we gather up the type arguments themselves, loading at the level specified for generic arguments
+ for (unsigned i = 0; i < ntypars; i++)
+ {
+ ClassLoadLevel argLevel = level;
+ TypeHandle typeHnd = TypeHandle();
+ BOOL argDrop = FALSE;
+
+ if (dropGenericArgumentLevel)
+ {
+ if (level == CLASS_LOAD_APPROXPARENTS)
+ {
+ SigPointer tempsig = psig;
+
+ CorElementType elemType = ELEMENT_TYPE_END;
+ IfFailThrowBF(tempsig.GetElemType(&elemType), BFA_BAD_SIGNATURE, pOrigModule);
+
+ if (elemType == (CorElementType) ELEMENT_TYPE_MODULE_ZAPSIG)
+ {
+ // Skip over the module index
+ IfFailThrowBF(tempsig.GetData(NULL), BFA_BAD_SIGNATURE, pModule);
+ // Read the next elemType
+ IfFailThrowBF(tempsig.GetElemType(&elemType), BFA_BAD_SIGNATURE, pModule);
+ }
+
+ if (elemType == ELEMENT_TYPE_GENERICINST)
+ {
+ CorElementType tmpEType = ELEMENT_TYPE_END;
+ IfFailThrowBF(tempsig.PeekElemType(&tmpEType), BFA_BAD_SIGNATURE, pOrigModule);
+
+ if (tmpEType == ELEMENT_TYPE_CLASS)
+ typeHnd = TypeHandle(g_pCanonMethodTableClass);
+ }
+ else if ((elemType == (CorElementType) ELEMENT_TYPE_CANON_ZAPSIG) ||
+ (CorTypeInfo::GetGCType_NoThrow(elemType) == TYPE_GC_REF))
+ {
+ typeHnd = TypeHandle(g_pCanonMethodTableClass);
+ }
+
+ argDrop = TRUE;
+ }
+ else
+ // We need to make sure that typekey is always restored. Otherwise, we may run into unrestored typehandles while using
+ // the typekey for lookups. It is safe to not drop the levels for initial NGen-specific loading levels since there cannot
+ // be cycles in typekeys.
+ if (level > CLASS_LOAD_APPROXPARENTS)
+ {
+ argLevel = (ClassLoadLevel) (level-1);
+ }
+ }
+
+ if (typeHnd.IsNull())
+ {
+ typeHnd = psig.GetTypeHandleThrowing(pOrigModule,
+ pTypeContext,
+ fLoadTypes,
+ argLevel,
+ argDrop,
+ pSubst,
+ pZapSigContext);
+ if (typeHnd.IsNull())
+ {
+ // Indicate failure by setting thisinst to NULL
+ thisinst = NULL;
+ break;
+ }
+ }
+ thisinst[i] = typeHnd;
+ IfFailThrowBF(psig.SkipExactlyOne(), BFA_BAD_SIGNATURE, pOrigModule);
+ }
+
+ // If we failed to get all of the instantiation type arguments then we return the null type handle
+ if (thisinst == NULL)
+ {
+ thRet = TypeHandle();
+ break;
+ }
+
+ // Group together the current signature type context and substitution chain, which
+ // we may later use to instantiate constraints of type arguments that turn out to be
+ // typespecs, i.e. generic types.
+ InstantiationContext instContext(pTypeContext, pSubst);
+
+ // Now make the instantiated type
+ // The class loader will check the arity
+ // When we know it was correctly computed at NGen time, we ask the class loader to skip that check.
+ thRet = (ClassLoader::LoadGenericInstantiationThrowing(pGenericTypeModule,
+ tkGenericType,
+ Instantiation(thisinst, ntypars),
+ fLoadTypes, level,
+ &instContext,
+ pZapSigContext && pZapSigContext->externalTokens == ZapSig::NormalTokens));
+ break;
+ }
+
+ case ELEMENT_TYPE_CLASS:
+ // intentional fallthru to ELEMENT_TYPE_VALUETYPE
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ mdTypeRef typeToken = 0;
+
+ IfFailThrowBF(psig.GetToken(&typeToken), BFA_BAD_SIGNATURE, pOrigModule);
+
+#if defined(FEATURE_NATIVE_IMAGE_GENERATION) && !defined(DACCESS_COMPILE)
+ if ((pOrigModule != pModule) && (pZapSigContext->externalTokens == ZapSig::IbcTokens))
+ {
+ // ibcExternalType tokens are actually encoded as mdtTypeDef tokens in the signature
+ RID typeRid = RidFromToken(typeToken);
+ idExternalType ibcToken = RidToToken(typeRid, ibcExternalType);
+ typeToken = pOrigModule->LookupIbcTypeToken(pModule, ibcToken);
+
+ if (IsNilToken(typeToken))
+ {
+ SString * fullTypeName = pOrigModule->IBCErrorNameString();
+ fullTypeName->Clear();
+ pOrigModule->LookupIbcTypeToken(pModule, ibcToken, fullTypeName);
+
+ THROW_BAD_FORMAT(BFA_MISSING_IBC_EXTERNAL_TYPE, pOrigModule);
+ }
+ }
+#endif
+
+ if ((TypeFromToken(typeToken) != mdtTypeRef) && (TypeFromToken(typeToken) != mdtTypeDef))
+ THROW_BAD_FORMAT(BFA_UNEXPECTED_TOKEN_AFTER_CLASSVALTYPE, pOrigModule);
+
+ if (IsNilToken(typeToken))
+ THROW_BAD_FORMAT(BFA_UNEXPECTED_TOKEN_AFTER_CLASSVALTYPE, pOrigModule);
+
+ if (fLoadTypes == ClassLoader::LoadTypes)
+ {
+ notFoundAction = ClassLoader::ThrowButNullV11McppWorkaround;
+ tdTypes = tdNoTypes;
+ }
+ else
+ {
+ notFoundAction = ClassLoader::ReturnNullIfNotFound;
+ tdTypes = tdAllTypes;
+ }
+
+ TypeHandle loadedType =
+ ClassLoader::LoadTypeDefOrRefThrowing(pModule,
+ typeToken,
+ notFoundAction,
+ // pZapSigContext is only set when decoding zapsigs
+ // ZapSigs use uninstantiated tokens to represent the GenericTypeDefinition
+ (pZapSigContext ? ClassLoader::PermitUninstDefOrRef : ClassLoader::FailIfUninstDefOrRef),
+ tdTypes,
+ level);
+
+ // Everett C++ compiler can generate a TypeRef with RS=0 without respective TypeDef for unmanaged valuetypes,
+ // referenced only by pointers to them. For this case we treat this as an ELEMENT_TYPE_VOID, and perform the
+ // same operations as the appropriate case block above.
+ if (loadedType.IsNull())
+ {
+ if (TypeFromToken(typeToken) == mdtTypeRef)
+ {
+ loadedType = TypeHandle(MscorlibBinder::GetElementType(ELEMENT_TYPE_VOID));
+ thRet = loadedType;
+ break;
+ }
+ }
+
+#ifndef DACCESS_COMPILE
+ //
+ // Check that the type that we loaded matches the signature
+ // with regards to ET_CLASS and ET_VALUETYPE
+ //
+ if ((fLoadTypes == ClassLoader::LoadTypes))
+ {
+ // Skip this check when using zap sigs; it should have been correctly computed at NGen time
+ // and a change from one to the other would have invalidated the image.
+ if (pZapSigContext == NULL || pZapSigContext->externalTokens != ZapSig::NormalTokens)
+ {
+ bool typFromSigIsClass = (typ == ELEMENT_TYPE_CLASS);
+ bool typLoadedIsClass = (loadedType.GetSignatureCorElementType() == ELEMENT_TYPE_CLASS);
+
+ if (typFromSigIsClass != typLoadedIsClass)
+ {
+ if((pModule->GetMDImport()->GetMetadataStreamVersion() != MD_STREAM_VER_1X)
+ || !Security::CanSkipVerification(pModule->GetDomainAssembly()))
+ {
+ pOrigModule->GetAssembly()->ThrowTypeLoadException(pModule->GetMDImport(),
+ typeToken,
+ BFA_CLASSLOAD_VALUETYPEMISMATCH);
+ }
+ }
+ }
+
+ // Assert that our reasoning above was valid (that there is never a zapsig that gets this wrong)
+ _ASSERTE(((typ == ELEMENT_TYPE_CLASS) == (loadedType.GetSignatureCorElementType() == ELEMENT_TYPE_CLASS)) ||
+ pZapSigContext == NULL || pZapSigContext->externalTokens != ZapSig::NormalTokens);
+
+ }
+#endif // #ifndef DACCESS_COMPILE
+
+ thRet = loadedType;
+ break;
+ }
+
+ case ELEMENT_TYPE_ARRAY:
+ case ELEMENT_TYPE_SZARRAY:
+ {
+ TypeHandle elemType = psig.GetTypeHandleThrowing(pModule,
+ pTypeContext,
+ fLoadTypes,
+ level,
+ dropGenericArgumentLevel,
+ pSubst,
+ pZapSigContext);
+ if (elemType.IsNull())
+ {
+ thRet = elemType;
+ break;
+ }
+
+ ULONG rank = 0;
+ if (typ == ELEMENT_TYPE_ARRAY) {
+ IfFailThrowBF(psig.SkipExactlyOne(), BFA_BAD_SIGNATURE, pOrigModule);
+ IfFailThrowBF(psig.GetData(&rank), BFA_BAD_SIGNATURE, pOrigModule);
+
+ _ASSERTE(0 < rank);
+ }
+ thRet = ClassLoader::LoadArrayTypeThrowing(elemType, typ, rank, fLoadTypes, level);
+ break;
+ }
+
+ case ELEMENT_TYPE_PINNED:
+ // Return what follows
+ thRet = psig.GetTypeHandleThrowing(pModule,
+ pTypeContext,
+ fLoadTypes,
+ level,
+ dropGenericArgumentLevel,
+ pSubst,
+ pZapSigContext);
+ break;
+
+ case ELEMENT_TYPE_BYREF:
+ case ELEMENT_TYPE_PTR:
+ {
+ TypeHandle baseType = psig.GetTypeHandleThrowing(pModule,
+ pTypeContext,
+ fLoadTypes,
+ level,
+ dropGenericArgumentLevel,
+ pSubst,
+ pZapSigContext);
+ if (baseType.IsNull())
+ {
+ thRet = baseType;
+ }
+ else
+ {
+ thRet = ClassLoader::LoadPointerOrByrefTypeThrowing(typ, baseType, fLoadTypes, level);
+ }
+ break;
+ }
+
+ case ELEMENT_TYPE_FNPTR:
+ {
+#ifndef DACCESS_COMPILE
+ ULONG uCallConv = 0;
+ IfFailThrowBF(psig.GetData(&uCallConv), BFA_BAD_SIGNATURE, pOrigModule);
+
+ if ((uCallConv & IMAGE_CEE_CS_CALLCONV_MASK) == IMAGE_CEE_CS_CALLCONV_FIELD)
+ THROW_BAD_FORMAT(BFA_FNPTR_CANNOT_BE_A_FIELD, pOrigModule);
+
+ if ((uCallConv & IMAGE_CEE_CS_CALLCONV_GENERIC) > 0)
+ THROW_BAD_FORMAT(BFA_FNPTR_CANNOT_BE_GENERIC, pOrigModule);
+
+ // Get arg count;
+ ULONG cArgs = 0;
+ IfFailThrowBF(psig.GetData(&cArgs), BFA_BAD_SIGNATURE, pOrigModule);
+
+ ULONG cAllocaSize;
+ if (!ClrSafeInt<ULONG>::addition(cArgs, 1, cAllocaSize) ||
+ !ClrSafeInt<ULONG>::multiply(cAllocaSize, sizeof(TypeHandle), cAllocaSize))
+ {
+ ThrowHR(COR_E_OVERFLOW);
+ }
+
+ if ((cAllocaSize/PAGE_SIZE+1) >= 2)
+ {
+ DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD((10+cAllocaSize/PAGE_SIZE+1), NO_FORBIDGC_LOADER_USE_ThrowSO(););
+ }
+
+ TypeHandle *retAndArgTypes = (TypeHandle*) _alloca(cAllocaSize);
+ bool fReturnTypeOrParameterNotLoaded = false;
+
+ for (unsigned i = 0; i <= cArgs; i++)
+ {
+ retAndArgTypes[i] = psig.GetTypeHandleThrowing(pOrigModule,
+ pTypeContext,
+ fLoadTypes,
+ level,
+ dropGenericArgumentLevel,
+ pSubst,
+ pZapSigContext);
+ if (retAndArgTypes[i].IsNull())
+ {
+ thRet = TypeHandle();
+ fReturnTypeOrParameterNotLoaded = true;
+ break;
+ }
+
+ IfFailThrowBF(psig.SkipExactlyOne(), BFA_BAD_SIGNATURE, pOrigModule);
+ }
+
+ if (fReturnTypeOrParameterNotLoaded)
+ {
+ break;
+ }
+
+ // Now make the function pointer type
+ thRet = ClassLoader::LoadFnptrTypeThrowing((BYTE) uCallConv, cArgs, retAndArgTypes, fLoadTypes, level);
+#else
+ DacNotImpl();
+ thRet = TypeHandle();
+#endif
+ break;
+ }
+
+ case ELEMENT_TYPE_INTERNAL :
+ {
+ TypeHandle hType;
+ // this check is not functional in DAC and provides no security against a malicious dump
+ // the DAC is prepared to receive an invalid type handle
+#ifndef DACCESS_COMPILE
+ if (pModule->IsSigInIL(m_ptr))
+ THROW_BAD_FORMAT(BFA_BAD_SIGNATURE, (Module*)pModule);
+#endif
+ CorSigUncompressPointer(psig.GetPtr(), (void**)&hType);
+ thRet = hType;
+ break;
+ }
+
+ case ELEMENT_TYPE_SENTINEL:
+ {
+#ifndef DACCESS_COMPILE
+
+ mdToken token = 0;
+
+ IfFailThrowBF(psig.GetToken(&token), BFA_BAD_SIGNATURE, pOrigModule);
+
+ pOrigModule->GetAssembly()->ThrowTypeLoadException(pModule->GetMDImport(),
+ token,
+ IDS_CLASSLOAD_GENERAL);
+#else
+ DacNotImpl();
+ break;
+#endif // #ifndef DACCESS_COMPILE
+ }
+
+ default:
+#ifdef _DEBUG_IMPL
+ _ASSERTE(!FORBIDGC_LOADER_USE_ENABLED());
+#endif
+ THROW_BAD_FORMAT(BFA_BAD_COMPLUS_SIG, pOrigModule);
+ }
+
+ END_INTERIOR_STACK_PROBE;
+ }
+
+ RETURN thRet;
+}
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+TypeHandle SigPointer::GetGenericInstType(Module * pModule,
+ ClassLoader::LoadTypesFlag fLoadTypes/*=LoadTypes*/,
+ ClassLoadLevel level/*=CLASS_LOADED*/,
+ const ZapSig::Context * pZapSigContext)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ MODE_ANY;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(return TypeHandle();); }
+ if (FORBIDGC_LOADER_USE_ENABLED() || fLoadTypes != ClassLoader::LoadTypes) { LOADS_TYPE(CLASS_LOAD_BEGIN); } else { LOADS_TYPE(level); }
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ Module * pOrigModule = (pZapSigContext != NULL) ? pZapSigContext->pInfoModule : pModule;
+
+ CorElementType typ = ELEMENT_TYPE_END;
+ IfFailThrowBF(GetElemType(&typ), BFA_BAD_SIGNATURE, pOrigModule);
+
+ TypeHandle genericType;
+
+ if (typ == ELEMENT_TYPE_INTERNAL)
+ {
+ // this check is not functional in DAC and provides no security against a malicious dump
+ // the DAC is prepared to receive an invalid type handle
+#ifndef DACCESS_COMPILE
+ if (pModule->IsSigInIL(m_ptr))
+ THROW_BAD_FORMAT(BFA_BAD_SIGNATURE, (Module*)pModule);
+#endif
+
+ IfFailThrow(GetPointer((void**)&genericType));
+ }
+ else
+ {
+ mdToken typeToken = mdTypeRefNil;
+ IfFailThrowBF(GetToken(&typeToken), BFA_BAD_SIGNATURE, pOrigModule);
+
+#if defined(FEATURE_NATIVE_IMAGE_GENERATION) && !defined(DACCESS_COMPILE)
+ if ((pOrigModule != pModule) && (pZapSigContext->externalTokens == ZapSig::IbcTokens))
+ {
+ // ibcExternalType tokens are actually encoded as mdtTypeDef tokens in the signature
+ RID typeRid = RidFromToken(typeToken);
+ idExternalType ibcToken = RidToToken(typeRid, ibcExternalType);
+ typeToken = pOrigModule->LookupIbcTypeToken(pModule, ibcToken);
+
+ if (IsNilToken(typeToken))
+ {
+ SString * fullTypeName = pOrigModule->IBCErrorNameString();
+ fullTypeName->Clear();
+ pOrigModule->LookupIbcTypeToken(pModule, ibcToken, fullTypeName);
+
+ THROW_BAD_FORMAT(BFA_MISSING_IBC_EXTERNAL_TYPE, pOrigModule);
+ }
+ }
+#endif
+
+ if ((TypeFromToken(typeToken) != mdtTypeRef) && (TypeFromToken(typeToken) != mdtTypeDef))
+ THROW_BAD_FORMAT(BFA_UNEXPECTED_TOKEN_AFTER_GENINST, pOrigModule);
+
+ if (IsNilToken(typeToken))
+ THROW_BAD_FORMAT(BFA_UNEXPECTED_TOKEN_AFTER_GENINST, pOrigModule);
+
+ ClassLoader::NotFoundAction notFoundAction;
+ CorInternalStates tdTypes;
+
+ if (fLoadTypes == ClassLoader::LoadTypes)
+ {
+ notFoundAction = ClassLoader::ThrowIfNotFound;
+ tdTypes = tdNoTypes;
+ }
+ else
+ {
+ notFoundAction = ClassLoader::ReturnNullIfNotFound;
+ tdTypes = tdAllTypes;
+ }
+
+ genericType = ClassLoader::LoadTypeDefOrRefThrowing(pModule,
+ typeToken,
+ notFoundAction,
+ ClassLoader::PermitUninstDefOrRef,
+ tdTypes,
+ level);
+
+ if (genericType.IsNull())
+ {
+ return genericType;
+ }
+
+#ifndef DACCESS_COMPILE
+ if ((fLoadTypes == ClassLoader::LoadTypes))
+ {
+ // Skip this check when using zap sigs; it should have been correctly computed at NGen time
+ // and a change from one to the other would have invalidated the image. Leave in the code for debug so we can assert below.
+ if (pZapSigContext == NULL || pZapSigContext->externalTokens != ZapSig::NormalTokens)
+ {
+ bool typFromSigIsClass = (typ == ELEMENT_TYPE_CLASS);
+ bool typLoadedIsClass = (genericType.GetSignatureCorElementType() == ELEMENT_TYPE_CLASS);
+
+ if (typFromSigIsClass != typLoadedIsClass)
+ {
+ pOrigModule->GetAssembly()->ThrowTypeLoadException(pModule->GetMDImport(),
+ typeToken,
+ BFA_CLASSLOAD_VALUETYPEMISMATCH);
+ }
+ }
+
+ // Assert that our reasoning above was valid (that there is never a zapsig that gets this wrong)
+ _ASSERTE(((typ == ELEMENT_TYPE_CLASS) == (genericType.GetSignatureCorElementType() == ELEMENT_TYPE_CLASS)) ||
+ pZapSigContext == NULL || pZapSigContext->externalTokens != ZapSig::NormalTokens);
+ }
+#endif // #ifndef DACCESS_COMPILE
+ }
+
+ return genericType;
+}
+
+// SigPointer should be just after E_T_VAR or E_T_MVAR
+TypeHandle SigPointer::GetTypeVariableThrowing(Module *pModule, // unused - may be used later for better error reporting
+ CorElementType et,
+ ClassLoader::LoadTypesFlag fLoadTypes/*=LoadTypes*/,
+ const SigTypeContext *pTypeContext)
+{
+ CONTRACT(TypeHandle)
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CorTypeInfo::IsGenericVariable_NoThrow(et));
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ MODE_ANY;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ POSTCONDITION(CheckPointer(RETVAL, ((fLoadTypes == ClassLoader::LoadTypes) ? NULL_NOT_OK : NULL_OK)));
+ SUPPORTS_DAC;
+ }
+ CONTRACT_END
+
+ TypeHandle res = GetTypeVariable(et, pTypeContext);
+#ifndef DACCESS_COMPILE
+ if (res.IsNull() && (fLoadTypes == ClassLoader::LoadTypes))
+ {
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+#endif
+ RETURN(res);
+}
+
+// SigPointer should be just after E_T_VAR or E_T_MVAR
+TypeHandle SigPointer::GetTypeVariable(CorElementType et,
+ const SigTypeContext *pTypeContext)
+{
+
+ CONTRACT(TypeHandle)
+ {
+ INSTANCE_CHECK;
+ PRECONDITION(CorTypeInfo::IsGenericVariable_NoThrow(et));
+ NOTHROW;
+ GC_NOTRIGGER;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); // will return TypeHandle() if index is out of range
+ SUPPORTS_DAC;
+#ifndef DACCESS_COMPILE
+ // POSTCONDITION(RETVAL.IsNull() || RETVAL.IsRestored() || RETVAL.GetMethodTable()->IsRestoring());
+#endif
+ MODE_ANY;
+ }
+ CONTRACT_END
+
+ DWORD index;
+ if (FAILED(GetData(&index)))
+ {
+ TypeHandle thNull;
+ RETURN(thNull);
+ }
+
+ if (!pTypeContext
+ ||
+ (et == ELEMENT_TYPE_VAR &&
+ (index >= pTypeContext->m_classInst.GetNumArgs()))
+ ||
+ (et == ELEMENT_TYPE_MVAR &&
+ (index >= pTypeContext->m_methodInst.GetNumArgs())))
+ {
+ LOG((LF_ALWAYS, LL_INFO1000, "GENERICS: Error: GetTypeVariable on out-of-range type variable\n"));
+ BAD_FORMAT_NOTHROW_ASSERT(!"Invalid type context: either this is an ill-formed signature (e.g. an invalid type variable number) or you have not provided a non-empty SigTypeContext where one is required. Check back on the callstack for where the value of pTypeContext is first provided, and see if it is acquired from the correct place. For calls originating from a JIT it should be acquired from the context parameter, which indicates the method being compiled. For calls from other locations it should be acquired from the MethodTable, EEClass, TypeHandle, FieldDesc or MethodDesc being analyzed.");
+ TypeHandle thNull;
+ RETURN(thNull);
+ }
+ if (et == ELEMENT_TYPE_VAR)
+ {
+ RETURN(pTypeContext->m_classInst[index]);
+ }
+ else
+ {
+ RETURN(pTypeContext->m_methodInst[index]);
+ }
+}
+
+
+#ifndef DACCESS_COMPILE
+
+// Does this type contain class or method type parameters whose instantiation cannot
+// be determined at JIT-compile time from the instantiations in the method context?
+// Return a combination of hasClassVar and hasMethodVar flags.
+// See header file for more info.
+VarKind SigPointer::IsPolyType(const SigTypeContext *pTypeContext) const
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ SigPointer psig = *this;
+ CorElementType typ;
+
+ if (FAILED(psig.GetElemType(&typ)))
+ return hasNoVars;
+
+ switch(typ) {
+ case ELEMENT_TYPE_VAR:
+ case ELEMENT_TYPE_MVAR:
+ {
+ VarKind res = (typ == ELEMENT_TYPE_VAR ? hasClassVar : hasMethodVar);
+ if (pTypeContext != NULL)
+ {
+ TypeHandle ty = psig.GetTypeVariable(typ, pTypeContext);
+ if (ty.IsCanonicalSubtype())
+ res = (VarKind) (res | (typ == ELEMENT_TYPE_VAR ? hasSharableClassVar : hasSharableMethodVar));
+ }
+ return (res);
+ }
+
+ case ELEMENT_TYPE_U:
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_OBJECT:
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_BOOLEAN:
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_CHAR:
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_R4:
+ case ELEMENT_TYPE_R8:
+ case ELEMENT_TYPE_VOID:
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_VALUETYPE:
+ case ELEMENT_TYPE_TYPEDBYREF:
+ return(hasNoVars);
+
+ case ELEMENT_TYPE_GENERICINST:
+ {
+ VarKind k = psig.IsPolyType(pTypeContext);
+ if (FAILED(psig.SkipExactlyOne()))
+ return hasNoVars;
+
+ ULONG ntypars;
+ if(FAILED(psig.GetData(&ntypars)))
+ return hasNoVars;
+
+ for (ULONG i = 0; i < ntypars; i++)
+ {
+ k = (VarKind) (psig.IsPolyType(pTypeContext) | k);
+ if (FAILED(psig.SkipExactlyOne()))
+ return hasNoVars;
+ }
+ return(k);
+ }
+
+ case ELEMENT_TYPE_ARRAY:
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_PINNED:
+ case ELEMENT_TYPE_BYREF:
+ case ELEMENT_TYPE_PTR:
+ {
+ return(psig.IsPolyType(pTypeContext));
+ }
+
+ case ELEMENT_TYPE_FNPTR:
+ {
+ if (FAILED(psig.GetData(NULL)))
+ return hasNoVars;
+
+ // Get arg count;
+ ULONG cArgs;
+ if (FAILED(psig.GetData(&cArgs)))
+ return hasNoVars;
+
+ VarKind k = psig.IsPolyType(pTypeContext);
+ if (FAILED(psig.SkipExactlyOne()))
+ return hasNoVars;
+
+ for (unsigned i = 0; i < cArgs; i++)
+ {
+ k = (VarKind) (psig.IsPolyType(pTypeContext) | k);
+ if (FAILED(psig.SkipExactlyOne()))
+ return hasNoVars;
+ }
+
+ return(k);
+ }
+
+ default:
+ BAD_FORMAT_NOTHROW_ASSERT(!"Bad type");
+ }
+ return(hasNoVars);
+}
+
+BOOL SigPointer::IsStringType(Module* pModule, const SigTypeContext *pTypeContext) const
+{
+ WRAPPER_NO_CONTRACT;
+
+ return IsStringTypeHelper(pModule, pTypeContext, FALSE);
+}
+
+
+BOOL SigPointer::IsStringTypeThrowing(Module* pModule, const SigTypeContext *pTypeContext) const
+{
+ WRAPPER_NO_CONTRACT;
+
+ return IsStringTypeHelper(pModule, pTypeContext, TRUE);
+}
+
+BOOL SigPointer::IsStringTypeHelper(Module* pModule, const SigTypeContext* pTypeContext, BOOL fThrow) const
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ if (fThrow)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ else
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pModule));
+ }
+ CONTRACTL_END
+
+ IMDInternalImport *pInternalImport = pModule->GetMDImport();
+ SigPointer psig = *this;
+ CorElementType typ;
+ if (FAILED(psig.GetElemType(&typ)))
+ {
+ if (fThrow)
+ {
+ ThrowHR(META_E_BAD_SIGNATURE);
+ }
+ else
+ {
+ return FALSE;
+ }
+ }
+
+ switch (typ)
+ {
+ case ELEMENT_TYPE_STRING :
+ return TRUE;
+
+ case ELEMENT_TYPE_CLASS :
+ {
+ LPCUTF8 pclsname;
+ LPCUTF8 pszNamespace;
+ mdToken token;
+
+ if (FAILED( psig.GetToken(&token)))
+ {
+ if (fThrow)
+ {
+ ThrowHR(META_E_BAD_SIGNATURE);
+ }
+ else
+ {
+ return FALSE;
+ }
+ }
+
+ if (TypeFromToken(token) == mdtTypeDef)
+ {
+ if (FAILED(pInternalImport->GetNameOfTypeDef(token, &pclsname, &pszNamespace)))
+ {
+ if (fThrow)
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ else
+ {
+ return FALSE;
+ }
+ }
+ }
+ else
+ {
+ BAD_FORMAT_NOTHROW_ASSERT(TypeFromToken(token) == mdtTypeRef);
+ if (FAILED(pInternalImport->GetNameOfTypeRef(token, &pszNamespace, &pclsname)))
+ {
+ if (fThrow)
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ else
+ {
+ return FALSE;
+ }
+ }
+ }
+
+ if (strcmp(pclsname, g_StringName) != 0)
+ return FALSE;
+
+ if (pszNamespace == NULL)
+ return FALSE;
+
+ return (strcmp(pszNamespace, g_SystemNS) == 0);
+ }
+
+ case ELEMENT_TYPE_VAR :
+ case ELEMENT_TYPE_MVAR :
+ {
+ TypeHandle ty;
+
+ if (fThrow)
+ {
+ ty = psig.GetTypeVariableThrowing(pModule, typ, ClassLoader::LoadTypes, pTypeContext);
+ }
+ else
+ {
+ ty = psig.GetTypeVariable(typ, pTypeContext);
+ }
+
+ TypeHandle th(g_pStringClass);
+ return (ty == th);
+ }
+
+ default:
+ break;
+ }
+ return FALSE;
+}
+
+
+//------------------------------------------------------------------------
+// Tests if the element class name is szClassName.
+//------------------------------------------------------------------------
+BOOL SigPointer::IsClass(Module* pModule, LPCUTF8 szClassName, const SigTypeContext *pTypeContext) const
+{
+ WRAPPER_NO_CONTRACT;
+
+ return IsClassHelper(pModule, szClassName, pTypeContext, FALSE);
+}
+
+
+//------------------------------------------------------------------------
+// Tests if the element class name is szClassName.
+//------------------------------------------------------------------------
+BOOL SigPointer::IsClassThrowing(Module* pModule, LPCUTF8 szClassName, const SigTypeContext *pTypeContext) const
+{
+ WRAPPER_NO_CONTRACT;
+
+ return IsClassHelper(pModule, szClassName, pTypeContext, TRUE);
+}
+
+BOOL SigPointer::IsClassHelper(Module* pModule, LPCUTF8 szClassName, const SigTypeContext* pTypeContext, BOOL fThrow) const
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+
+ if (fThrow)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ else
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(CheckPointer(szClassName));
+ }
+ CONTRACTL_END
+
+ SigPointer psig = *this;
+ CorElementType typ;
+ if (FAILED(psig.GetElemType(&typ)))
+ {
+ if (fThrow)
+ ThrowHR(META_E_BAD_SIGNATURE);
+ else
+ return FALSE;
+ }
+
+ BAD_FORMAT_NOTHROW_ASSERT((typ == ELEMENT_TYPE_VAR) || (typ == ELEMENT_TYPE_MVAR) ||
+ (typ == ELEMENT_TYPE_CLASS) || (typ == ELEMENT_TYPE_VALUETYPE) ||
+ (typ == ELEMENT_TYPE_OBJECT) || (typ == ELEMENT_TYPE_STRING) ||
+ (typ == ELEMENT_TYPE_INTERNAL) || (typ == ELEMENT_TYPE_GENERICINST));
+
+
+ if (typ == ELEMENT_TYPE_VAR || typ == ELEMENT_TYPE_MVAR)
+ {
+ TypeHandle ty;
+
+ if (fThrow)
+ ty = psig.GetTypeVariableThrowing(pModule, typ, ClassLoader::LoadTypes, pTypeContext);
+ else
+ ty = psig.GetTypeVariable(typ, pTypeContext);
+
+ return(!ty.IsNull() && IsTypeRefOrDef(szClassName, ty.GetModule(), ty.GetCl()));
+ }
+ else if ((typ == ELEMENT_TYPE_CLASS) || (typ == ELEMENT_TYPE_VALUETYPE))
+ {
+ mdTypeRef typeref;
+ if (FAILED(psig.GetToken(&typeref)))
+ {
+ if (fThrow)
+ ThrowHR(META_E_BAD_SIGNATURE);
+ else
+ return FALSE;
+ }
+
+ return( IsTypeRefOrDef(szClassName, pModule, typeref) );
+ }
+ else if (typ == ELEMENT_TYPE_OBJECT)
+ {
+ return( !strcmp(szClassName, g_ObjectClassName) );
+ }
+ else if (typ == ELEMENT_TYPE_STRING)
+ {
+ return( !strcmp(szClassName, g_StringClassName) );
+ }
+ else if (typ == ELEMENT_TYPE_INTERNAL)
+ {
+ TypeHandle th;
+
+ // this check is not functional in DAC and provides no security against a malicious dump
+ // the DAC is prepared to receive an invalid type handle
+#ifndef DACCESS_COMPILE
+ if (pModule->IsSigInIL(m_ptr))
+ {
+ if (fThrow)
+ ThrowHR(META_E_BAD_SIGNATURE);
+ else
+ return FALSE;
+ }
+#endif
+
+ CorSigUncompressPointer(psig.GetPtr(), (void**)&th);
+ _ASSERTE(!th.IsNull());
+ return(IsTypeRefOrDef(szClassName, th.GetModule(), th.GetCl()));
+ }
+
+ return( false );
+}
+
+//------------------------------------------------------------------------
+// Tests for the existence of a custom modifier
+//------------------------------------------------------------------------
+BOOL SigPointer::HasCustomModifier(Module *pModule, LPCSTR szModName, CorElementType cmodtype) const
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+
+ BAD_FORMAT_NOTHROW_ASSERT(cmodtype == ELEMENT_TYPE_CMOD_OPT || cmodtype == ELEMENT_TYPE_CMOD_REQD);
+
+ SigPointer sp = *this;
+ CorElementType etyp;
+ if (sp.AtSentinel())
+ sp.m_ptr++;
+
+ BYTE data;
+
+ if (FAILED(sp.GetByte(&data)))
+ return FALSE;
+
+ etyp = (CorElementType)data;
+
+
+ while (etyp == ELEMENT_TYPE_CMOD_OPT || etyp == ELEMENT_TYPE_CMOD_REQD) {
+
+ mdToken tk;
+ if (FAILED(sp.GetToken(&tk)))
+ return FALSE;
+
+ if (etyp == cmodtype && IsTypeRefOrDef(szModName, pModule, tk))
+ {
+ return(TRUE);
+ }
+
+ if (FAILED(sp.GetByte(&data)))
+ return FALSE;
+
+ etyp = (CorElementType)data;
+
+
+ }
+ return(FALSE);
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+//------------------------------------------------------------------------
+// Tests for ELEMENT_TYPE_CLASS or ELEMENT_TYPE_VALUETYPE followed by a TypeDef,
+// and returns the TypeDef
+//------------------------------------------------------------------------
+BOOL SigPointer::IsTypeDef(mdTypeDef* pTypeDef) const
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SigPointer sigTemp(*this);
+
+ CorElementType etype = ELEMENT_TYPE_END;
+ HRESULT hr = sigTemp.GetElemType(&etype);
+ if (FAILED(hr))
+ return FALSE;
+
+ if (etype != ELEMENT_TYPE_CLASS && etype != ELEMENT_TYPE_VALUETYPE)
+ return FALSE;
+
+ mdToken token = mdTypeRefNil;
+ hr = sigTemp.GetToken(&token);
+ if (FAILED(hr))
+ return FALSE;
+
+ if (TypeFromToken(token) != mdtTypeDef)
+ return FALSE;
+
+ if (pTypeDef)
+ *pTypeDef = (mdTypeDef)token;
+
+ return TRUE;
+}
+
+CorElementType SigPointer::PeekElemTypeNormalized(Module* pModule, const SigTypeContext *pTypeContext, TypeHandle * pthValueType) const
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ MODE_ANY;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ CorElementType type = PeekElemTypeClosed(pModule, pTypeContext);
+ _ASSERTE(type != ELEMENT_TYPE_INTERNAL);
+
+ if (type == ELEMENT_TYPE_VALUETYPE)
+ {
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+ {
+ // Everett C++ compiler can generate a TypeRef with RS=0
+ // without respective TypeDef for unmanaged valuetypes,
+ // referenced only by pointers to them.
+ // In such case, GetTypeHandleThrowing returns null handle,
+ // and we return E_T_VOID
+ TypeHandle th = GetTypeHandleThrowing(pModule, pTypeContext, ClassLoader::LoadTypes, CLASS_LOAD_APPROXPARENTS, TRUE);
+ if(th.IsNull())
+ {
+ th = TypeHandle(MscorlibBinder::GetElementType(ELEMENT_TYPE_VOID));
+ }
+
+ type = th.GetInternalCorElementType();
+ if (pthValueType != NULL)
+ *pthValueType = th;
+ }
+ END_SO_INTOLERANT_CODE;
+ }
+
+ return(type);
+}
+
+//---------------------------------------------------------------------------------------
+//
+CorElementType
+SigPointer::PeekElemTypeClosed(
+ Module * pModule,
+ const SigTypeContext * pTypeContext) const
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+
+ CorElementType type;
+
+ if (FAILED(PeekElemType(&type)))
+ return ELEMENT_TYPE_END;
+
+ if ((type == ELEMENT_TYPE_GENERICINST) ||
+ (type == ELEMENT_TYPE_VAR) ||
+ (type == ELEMENT_TYPE_MVAR) ||
+ (type == ELEMENT_TYPE_INTERNAL))
+ {
+ SigPointer sp(*this);
+ if (FAILED(sp.GetElemType(NULL))) // skip over E_T_XXX
+ return ELEMENT_TYPE_END;
+
+ switch (type)
+ {
+ case ELEMENT_TYPE_GENERICINST:
+ {
+ if (FAILED(sp.GetElemType(&type)))
+ return ELEMENT_TYPE_END;
+
+ if (type != ELEMENT_TYPE_INTERNAL)
+ return type;
+ }
+
+ // intentionally fall through
+ case ELEMENT_TYPE_INTERNAL:
+ {
+ TypeHandle th;
+
+ // this check is not functional in DAC and provides no security against a malicious dump
+ // the DAC is prepared to receive an invalid type handle
+#ifndef DACCESS_COMPILE
+ if ((pModule != NULL) && pModule->IsSigInIL(m_ptr))
+ {
+ return ELEMENT_TYPE_END;
+ }
+#endif
+
+ if (FAILED(sp.GetPointer((void **)&th)))
+ {
+ return ELEMENT_TYPE_END;
+ }
+ _ASSERTE(!th.IsNull());
+
+ return th.GetSignatureCorElementType();
+ }
+ case ELEMENT_TYPE_VAR :
+ case ELEMENT_TYPE_MVAR :
+ {
+ TypeHandle th = sp.GetTypeVariable(type, pTypeContext);
+ if (th.IsNull())
+ {
+ BAD_FORMAT_NOTHROW_ASSERT(!"You either have bad signature or caller forget to pass valid type context");
+ return ELEMENT_TYPE_END;
+ }
+
+ return th.GetSignatureCorElementType();
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ return type;
+} // SigPointer::PeekElemTypeClosed
+
+
+//---------------------------------------------------------------------------------------
+//
+mdTypeRef SigPointer::PeekValueTypeTokenClosed(Module *pModule, const SigTypeContext *pTypeContext, Module **ppModuleOfToken) const
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(PeekElemTypeClosed(NULL, pTypeContext) == ELEMENT_TYPE_VALUETYPE);
+ FORBID_FAULT;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END
+
+
+ mdToken token;
+ CorElementType type;
+
+ *ppModuleOfToken = pModule;
+
+ if (FAILED(PeekElemType(&type)))
+ return mdTokenNil;
+
+ switch (type)
+ {
+ case ELEMENT_TYPE_GENERICINST:
+ {
+ SigPointer sp(*this);
+ if (FAILED(sp.GetElemType(NULL)))
+ return mdTokenNil;
+
+ if (FAILED(sp.GetElemType(NULL)))
+ return mdTokenNil;
+
+ if (FAILED(sp.GetToken(&token)))
+ return mdTokenNil;
+
+ return token;
+ }
+ case ELEMENT_TYPE_VAR :
+ case ELEMENT_TYPE_MVAR :
+ {
+ SigPointer sp(*this);
+
+ if (FAILED(sp.GetElemType(NULL)))
+ return mdTokenNil;
+
+ TypeHandle th = sp.GetTypeVariable(type, pTypeContext);
+ *ppModuleOfToken = th.GetModule();
+ _ASSERTE(!th.IsNull());
+ return(th.GetCl());
+ }
+ case ELEMENT_TYPE_INTERNAL:
+ // we have no way to give back a token for the E_T_INTERNAL so we return a null one
+ // and make the caller deal with it
+ return mdTokenNil;
+
+ default:
+ {
+ _ASSERTE(type == ELEMENT_TYPE_VALUETYPE);
+ SigPointer sp(*this);
+
+ if (FAILED(sp.GetElemType(NULL)))
+ return mdTokenNil;
+
+ if (FAILED(sp.GetToken(&token)))
+ return mdTokenNil;
+
+ return token;
+ }
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+UINT MetaSig::GetElemSize(CorElementType etype, TypeHandle thValueType)
+{
+ CONTRACTL
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ if ((UINT)etype >= COUNTOF(gElementTypeInfo))
+ ThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_COMPLUS_SIG);
+
+ int cbsize = gElementTypeInfo[(UINT)etype].m_cbSize;
+ if (cbsize != -1)
+ return(cbsize);
+
+ if (!thValueType.IsNull())
+ return thValueType.GetSize();
+
+ if (etype == ELEMENT_TYPE_VAR || etype == ELEMENT_TYPE_MVAR)
+ {
+ LOG((LF_ALWAYS, LL_INFO1000, "GENERICS: Warning: SizeOf on VAR without instantiation\n"));
+ return(sizeof(LPVOID));
+ }
+
+ ThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_ELEM_IN_SIZEOF);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Assumes that the SigPointer points to the start of an element type.
+// Returns size of that element in bytes. This is the minimum size that a
+// field of this type would occupy inside an object.
+//
+UINT SigPointer::SizeOf(Module* pModule, const SigTypeContext *pTypeContext) const
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ MODE_ANY;
+ UNCHECKED(PRECONDITION(CheckPointer(pModule)));
+ UNCHECKED(PRECONDITION(CheckPointer(pTypeContext, NULL_OK)));
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ TypeHandle thValueType;
+ CorElementType etype = PeekElemTypeNormalized(pModule, pTypeContext, &thValueType);
+ return MetaSig::GetElemSize(etype, thValueType);
+}
+
+#ifndef DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+//
+// Determines if the current argument is System.String.
+// Caller must determine first that the argument type is ELEMENT_TYPE_CLASS.
+//
+BOOL MetaSig::IsStringType() const
+{
+ WRAPPER_NO_CONTRACT
+
+ return m_pLastType.IsStringType(m_pModule, &m_typeContext);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Determines if the current argument is a particular class.
+// Caller must determine first that the argument type is ELEMENT_TYPE_CLASS.
+//
+BOOL MetaSig::IsClass(LPCUTF8 szClassName) const
+{
+ WRAPPER_NO_CONTRACT
+
+ return m_pLastType.IsClass(m_pModule, szClassName, &m_typeContext);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Return the type of an reference if the array is the param type
+// The arg type must be an ELEMENT_TYPE_BYREF
+// ref to array needs additional arg
+//
+CorElementType MetaSig::GetByRefType(TypeHandle *pTy) const
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ SigPointer sigptr(m_pLastType);
+
+ CorElementType typ = ELEMENT_TYPE_END;
+ IfFailThrowBF(sigptr.GetElemType(&typ), BFA_BAD_SIGNATURE, GetModule());
+
+ _ASSERTE(typ == ELEMENT_TYPE_BYREF);
+ typ = (CorElementType)sigptr.PeekElemTypeClosed(GetModule(), &m_typeContext);
+
+ if (!CorIsPrimitiveType(typ))
+ {
+ if (typ == ELEMENT_TYPE_TYPEDBYREF)
+ THROW_BAD_FORMAT(BFA_TYPEDBYREFCANNOTHAVEBYREF, GetModule());
+ TypeHandle th = sigptr.GetTypeHandleThrowing(m_pModule, &m_typeContext);
+ *pTy = th;
+ return(th.GetSignatureCorElementType());
+ }
+ return(typ);
+}
+
+//---------------------------------------------------------------------------------------
+//
+HRESULT CompareTypeTokensNT(mdToken tk1, mdToken tk2, Module *pModule1, Module *pModule2)
+{
+ STATIC_CONTRACT_NOTHROW;
+
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ if (CompareTypeTokens(tk1, tk2, pModule1, pModule2))
+ hr = S_OK;
+ else
+ hr = S_FALSE;
+ }
+ EX_CATCH_HRESULT_NO_ERRORINFO(hr);
+ return hr;
+}
+
+#ifdef FEATURE_TYPEEQUIVALENCE
+
+//---------------------------------------------------------------------------------------
+//
+// Returns S_FALSE if the type is not decorated with TypeIdentifierAttribute.
+//
+HRESULT TypeIdentifierData::Init(Module *pModule, mdToken tk)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(TypeFromToken(tk) == mdtTypeDef);
+ }
+ CONTRACTL_END
+
+ IMDInternalImport *pInternalImport = pModule->GetMDImport();
+ HRESULT hr = S_OK;
+
+ DWORD dwAttrType;
+ IfFailRet(pInternalImport->GetTypeDefProps(tk, &dwAttrType, NULL));
+
+ if (IsTdWindowsRuntime(dwAttrType))
+ {
+ // no type equivalence support for WinRT types
+ return S_FALSE;
+ }
+
+ ULONG cbData;
+ const BYTE *pData;
+
+ IfFailRet(pInternalImport->GetCustomAttributeByName(
+ tk,
+ g_TypeIdentifierAttributeClassName,
+ (const void **)&pData,
+ &cbData));
+
+ if (hr == S_OK)
+ {
+ CustomAttributeParser caType(pData, cbData);
+
+ if (cbData > 4)
+ {
+ // parameterless blob is 01 00 00 00 which means that the two arguments must follow now
+ CaArg args[2];
+
+ args[0].Init(SERIALIZATION_TYPE_STRING, 0);
+ args[1].Init(SERIALIZATION_TYPE_STRING, 0);
+ IfFailRet(ParseKnownCaArgs(caType, args, lengthof(args)));
+
+ m_cbScope = args[0].val.str.cbStr;
+ m_pchScope = args[0].val.str.pStr;
+ m_cbIdentifierName = args[1].val.str.cbStr;
+ m_pchIdentifierName = args[1].val.str.pStr;
+ }
+ else
+ {
+ // no arguments follow but we should still verify the blob
+ IfFailRet(caType.ValidateProlog());
+ }
+ }
+ else
+ {
+ // no TypeIdentifierAttribute -> the assembly must be a type library
+ bool has_eq = !pModule->GetAssembly()->IsDynamic() && pModule->GetAssembly()->IsPIAOrImportedFromTypeLib();
+
+ if (!has_eq)
+ {
+ // this type is not opted into type equivalence
+ return S_FALSE;
+ }
+ }
+
+ if (m_pchIdentifierName == NULL)
+ {
+ // we have got no data from the TypeIdentifier attribute -> we have to get it elsewhere
+ if (IsTdInterface(dwAttrType) && IsTdImport(dwAttrType))
+ {
+ // ComImport interfaces get scope from their GUID
+ hr = pInternalImport->GetCustomAttributeByName(tk, INTEROP_GUID_TYPE, (const void **)&pData, &cbData);
+ }
+ else
+ {
+ // other equivalent types get it from the declaring assembly
+ IMDInternalImport *pAssemblyImport = pModule->GetAssembly()->GetManifestImport();
+ hr = pAssemblyImport->GetCustomAttributeByName(TokenFromRid(1, mdtAssembly), INTEROP_GUID_TYPE, (const void **)&pData, &cbData);
+ }
+
+ if (hr != S_OK)
+ {
+ // no GUID is available
+ return hr;
+ }
+
+ CustomAttributeParser caType(pData, cbData);
+ CaArg guidarg;
+
+ guidarg.Init(SERIALIZATION_TYPE_STRING, 0);
+ IfFailRet(ParseKnownCaArgs(caType, &guidarg, 1));
+
+ m_cbScope = guidarg.val.str.cbStr;
+ m_pchScope = guidarg.val.str.pStr;
+
+ // all types get their identifier from their namespace and name
+ LPCUTF8 pszName;
+ LPCUTF8 pszNamespace;
+ IfFailRet(pInternalImport->GetNameOfTypeDef(tk, &pszName, &pszNamespace));
+
+ m_cbIdentifierNamespace = (pszNamespace != NULL ? strlen(pszNamespace) : 0);
+ m_pchIdentifierNamespace = pszNamespace;
+
+ m_cbIdentifierName = strlen(pszName);
+ m_pchIdentifierName = pszName;
+
+ hr = S_OK;
+ }
+
+ return hr;
+}
+
+//---------------------------------------------------------------------------------------
+//
+BOOL TypeIdentifierData::IsEqual(const TypeIdentifierData & data) const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // scope needs to be the same
+ if (m_cbScope != data.m_cbScope || _strnicmp(m_pchScope, data.m_pchScope, m_cbScope) != 0)
+ return FALSE;
+
+ // identifier needs to be the same
+ if (m_cbIdentifierNamespace == 0 && data.m_cbIdentifierNamespace == 0)
+ {
+ // we are comparing only m_pchIdentifierName
+ return (m_cbIdentifierName == data.m_cbIdentifierName) &&
+ (memcmp(m_pchIdentifierName, data.m_pchIdentifierName, m_cbIdentifierName) == 0);
+ }
+
+ if (m_cbIdentifierNamespace != 0 && data.m_cbIdentifierNamespace != 0)
+ {
+ // we are comparing both m_pchIdentifierNamespace and m_pchIdentifierName
+ return (m_cbIdentifierName == data.m_cbIdentifierName) &&
+ (m_cbIdentifierNamespace == data.m_cbIdentifierNamespace) &&
+ (memcmp(m_pchIdentifierName, data.m_pchIdentifierName, m_cbIdentifierName) == 0) &&
+ (memcmp(m_pchIdentifierNamespace, data.m_pchIdentifierNamespace, m_cbIdentifierNamespace) == 0);
+ }
+
+ if (m_cbIdentifierNamespace == 0 && data.m_cbIdentifierNamespace != 0)
+ {
+ // we are comparing m_cbIdentifierName with (data.m_pchIdentifierNamespace + '.' + data.m_pchIdentifierName)
+ if (m_cbIdentifierName != data.m_cbIdentifierNamespace + 1 + data.m_cbIdentifierName)
+ return FALSE;
+
+ return (memcmp(m_pchIdentifierName, data.m_pchIdentifierNamespace, data.m_cbIdentifierNamespace) == 0) &&
+ (m_pchIdentifierName[data.m_cbIdentifierNamespace] == NAMESPACE_SEPARATOR_CHAR) &&
+ (memcmp(m_pchIdentifierName + data.m_cbIdentifierNamespace + 1, data.m_pchIdentifierName, data.m_cbIdentifierName) == 0);
+ }
+
+ _ASSERTE(m_cbIdentifierNamespace != 0 && data.m_cbIdentifierNamespace == 0);
+
+ // we are comparing (m_pchIdentifierNamespace + '.' + m_pchIdentifierName) with data.m_cbIdentifierName
+ if (m_cbIdentifierNamespace + 1 + m_cbIdentifierName != data.m_cbIdentifierName)
+ return FALSE;
+
+ return (memcmp(m_pchIdentifierNamespace, data.m_pchIdentifierName, m_cbIdentifierNamespace) == 0) &&
+ (data.m_pchIdentifierName[m_cbIdentifierNamespace] == NAMESPACE_SEPARATOR_CHAR) &&
+ (memcmp(m_pchIdentifierName, data.m_pchIdentifierName + m_cbIdentifierNamespace + 1, m_cbIdentifierName) == 0);
+}
+
+#endif //FEATURE_TYPEEQUIVALENCE
+#ifdef FEATURE_COMINTEROP
+
+//---------------------------------------------------------------------------------------
+//
+static CorElementType GetFieldSigElementType(PCCOR_SIGNATURE pSig, DWORD cbSig)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ SigPointer sigptr(pSig, cbSig);
+
+ ULONG data;
+ IfFailThrow(sigptr.GetCallingConv(&data));
+ _ASSERTE(data == IMAGE_CEE_CS_CALLCONV_FIELD);
+
+ CorElementType etype;
+ IfFailThrow(sigptr.GetElemType(&etype));
+
+ return etype;
+}
+
+//---------------------------------------------------------------------------------------
+//
+static BOOL CompareStructuresForEquivalence(mdToken tk1, mdToken tk2, Module *pModule1, Module *pModule2, BOOL fEnumMode, TokenPairList *pVisited)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ // make sure the types don't declare any methods
+ IMDInternalImport *pInternalImport1 = pModule1->GetMDImport();
+ IMDInternalImport *pInternalImport2 = pModule2->GetMDImport();
+
+ HENUMInternalHolder hMethodEnum1(pInternalImport1);
+ HENUMInternalHolder hMethodEnum2(pInternalImport2);
+
+ hMethodEnum1.EnumInit(mdtMethodDef, tk1);
+ hMethodEnum2.EnumInit(mdtMethodDef, tk2);
+
+ if (hMethodEnum1.EnumGetCount() != 0 || hMethodEnum2.EnumGetCount() != 0)
+ return FALSE;
+
+ // compare field types for equivalence
+ HENUMInternalHolder hFieldEnum1(pInternalImport1);
+ HENUMInternalHolder hFieldEnum2(pInternalImport2);
+
+ hFieldEnum1.EnumInit(mdtFieldDef, tk1);
+ hFieldEnum2.EnumInit(mdtFieldDef, tk2);
+
+ while (true)
+ {
+ mdToken tkField1, tkField2;
+
+ DWORD dwAttrField1, dwAttrField2;
+ bool res1, res2;
+
+ while ((res1 = hFieldEnum1.EnumNext(&tkField1)) == true)
+ {
+ IfFailThrow(pInternalImport1->GetFieldDefProps(tkField1, &dwAttrField1));
+
+ if (IsFdPublic(dwAttrField1) && !IsFdStatic(dwAttrField1))
+ break;
+
+ if (!fEnumMode || !IsFdLiteral(dwAttrField1)) // ignore literals in enums
+ return FALSE;
+ }
+
+ while ((res2 = hFieldEnum2.EnumNext(&tkField2)) == true)
+ {
+ IfFailThrow(pInternalImport2->GetFieldDefProps(tkField2, &dwAttrField2));
+
+ if (IsFdPublic(dwAttrField2) && !IsFdStatic(dwAttrField2))
+ break;
+
+ if (!fEnumMode || !IsFdLiteral(dwAttrField2)) // ignore literals in enums
+ return FALSE;
+ }
+
+ if (!res1 && !res2)
+ {
+ // we ran out of fields in both types
+ break;
+ }
+
+ if (res1 != res2)
+ {
+ // we ran out of fields in one type
+ return FALSE;
+ }
+
+ // now we have tokens of two instance fields that need to be compared for equivalence
+ PCCOR_SIGNATURE pSig1, pSig2;
+ DWORD cbSig1, cbSig2;
+
+ IfFailThrow(pInternalImport1->GetSigOfFieldDef(tkField1, &cbSig1, &pSig1));
+ IfFailThrow(pInternalImport2->GetSigOfFieldDef(tkField2, &cbSig2, &pSig2));
+
+ if (!MetaSig::CompareFieldSigs(pSig1, cbSig1, pModule1, pSig2, cbSig2, pModule2, pVisited))
+ return FALSE;
+ }
+
+ if (!fEnumMode)
+ {
+ // compare layout (layout kind, charset, packing, size, offsets, marshaling)
+ if (!CompareTypeLayout(tk1, tk2, pModule1, pModule2))
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+//---------------------------------------------------------------------------------------
+//
+static void GetDelegateInvokeMethodSignature(mdToken tkDelegate, Module *pModule, DWORD *pcbSig, PCCOR_SIGNATURE *ppSig)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ IMDInternalImport *pInternalImport = pModule->GetMDImport();
+
+ HENUMInternalHolder hEnum(pInternalImport);
+ hEnum.EnumInit(mdtMethodDef, tkDelegate);
+
+ mdToken tkMethod;
+ while (hEnum.EnumNext(&tkMethod))
+ {
+ LPCUTF8 pszName;
+ IfFailThrow(pInternalImport->GetNameAndSigOfMethodDef(tkMethod, ppSig, pcbSig, &pszName));
+
+ if (strcmp(pszName, "Invoke") == 0)
+ return;
+ }
+
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+}
+
+static BOOL CompareDelegatesForEquivalence(mdToken tk1, mdToken tk2, Module *pModule1, Module *pModule2, TokenPairList *pVisited)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ PCCOR_SIGNATURE pSig1;
+ PCCOR_SIGNATURE pSig2;
+ DWORD cbSig1;
+ DWORD cbSig2;
+
+ // find the Invoke methods
+ GetDelegateInvokeMethodSignature(tk1, pModule1, &cbSig1, &pSig1);
+ GetDelegateInvokeMethodSignature(tk2, pModule2, &cbSig2, &pSig2);
+
+ return MetaSig::CompareMethodSigs(pSig1, cbSig1, pModule1, NULL, pSig2, cbSig2, pModule2, NULL, pVisited);
+}
+
+#endif // FEATURE_COMINTEROP
+#endif // #ifndef DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+//---------------------------------------------------------------------------------------
+//
+BOOL IsTypeDefExternallyVisible(mdToken tk, Module *pModule, DWORD dwAttrClass)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ BOOL bIsVisible = TRUE;
+
+ if (!IsTdPublic(dwAttrClass))
+ {
+ if (!IsTdNestedPublic(dwAttrClass))
+ return FALSE;
+
+ IMDInternalImport *pInternalImport = pModule->GetMDImport();
+
+ DWORD dwAttrEnclosing;
+
+ mdTypeDef tdCurrent = tk;
+ do
+ {
+ mdTypeDef tdEnclosing = mdTypeDefNil;
+
+ if (FAILED(pInternalImport->GetNestedClassProps(tdCurrent, &tdEnclosing)))
+ return FALSE;
+
+ tdCurrent = tdEnclosing;
+
+ // We stop searching as soon as we hit the first non NestedPublic type.
+ // So logically, we can't possibly fall off the top of the hierarchy.
+ _ASSERTE(tdEnclosing != mdTypeDefNil);
+
+ mdToken tkJunk = mdTokenNil;
+
+ if (FAILED(pInternalImport->GetTypeDefProps(tdEnclosing, &dwAttrEnclosing, &tkJunk)))
+ {
+ return FALSE;
+ }
+ }
+ while (IsTdNestedPublic(dwAttrEnclosing));
+
+ bIsVisible = IsTdPublic(dwAttrEnclosing);
+ }
+
+ return bIsVisible;
+}
+#endif
+
+#ifndef FEATURE_TYPEEQUIVALENCE
+#ifndef DACCESS_COMPILE
+BOOL IsTypeDefEquivalent(mdToken tk, Module *pModule)
+{
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+}
+#endif
+#endif
+
+#ifdef FEATURE_TYPEEQUIVALENCE
+#ifndef DACCESS_COMPILE
+BOOL IsTypeDefEquivalent(mdToken tk, Module *pModule)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+
+ IMDInternalImport *pInternalImport = pModule->GetMDImport();
+
+ if (tk == mdTypeDefNil)
+ return FALSE;
+
+ DWORD dwAttrType;
+ mdToken tkExtends;
+
+ IfFailThrow(pInternalImport->GetTypeDefProps(tk, &dwAttrType, &tkExtends));
+
+ if (IsTdWindowsRuntime(dwAttrType))
+ {
+ // no type equivalence support for WinRT types
+ return FALSE;
+ }
+
+ // Check for the TypeIdentifierAttribute and auto opt-in
+ HRESULT hr = pInternalImport->GetCustomAttributeByName(tk, g_TypeIdentifierAttributeClassName, NULL, NULL);
+ IfFailThrow(hr);
+
+ // 1. Type is within assembly marked with ImportedFromTypeLibAttribute or PrimaryInteropAssemblyAttribute
+ if (hr != S_OK)
+ {
+ bool has_eq = !pModule->GetAssembly()->IsDynamic() && pModule->GetAssembly()->IsPIAOrImportedFromTypeLib();
+
+ if (!has_eq)
+ return FALSE;
+ }
+ else if (hr == S_OK)
+ {
+ // Type has TypeIdentifierAttribute. It is marked as type equivalent.
+ return TRUE;
+ }
+
+ mdToken tdEnum = g_pEnumClass->GetCl();
+ Module *pSystemModule = g_pEnumClass->GetModule();
+ mdToken tdValueType = g_pValueTypeClass->GetCl();
+ _ASSERTE(pSystemModule == g_pValueTypeClass->GetModule());
+ mdToken tdMCDelegate = g_pMulticastDelegateClass->GetCl();
+ _ASSERTE(pSystemModule == g_pMulticastDelegateClass->GetModule());
+
+ // 2. Type is a COMImport/COMEvent interface, enum, struct, or delegate
+ BOOL fIsCOMInterface = FALSE;
+ if (IsTdInterface(dwAttrType))
+ {
+ if (IsTdImport(dwAttrType))
+ {
+ // COMImport
+ fIsCOMInterface = TRUE;
+ }
+ else
+ {
+ // COMEvent
+ hr = pInternalImport->GetCustomAttributeByName(tk, INTEROP_COMEVENTINTERFACE_TYPE, NULL, NULL);
+ IfFailThrow(hr);
+
+ if (hr == S_OK)
+ fIsCOMInterface = TRUE;
+ }
+ }
+
+ if (fIsCOMInterface ||
+ (!IsTdInterface(dwAttrType) && (tkExtends != mdTypeDefNil) &&
+ ((CompareTypeTokens(tkExtends, tdEnum, pModule, pSystemModule)) ||
+ (CompareTypeTokens(tkExtends, tdValueType, pModule, pSystemModule) && (tk != tdEnum || pModule != pSystemModule)) ||
+ (CompareTypeTokens(tkExtends, tdMCDelegate, pModule, pSystemModule)))))
+ {
+ HENUMInternal hEnumGenericPars;
+ IfFailThrow(pInternalImport->EnumInit(mdtGenericParam, tk, &hEnumGenericPars));
+ DWORD numGenericArgs = pInternalImport->EnumGetCount(&hEnumGenericPars);
+
+ // 3. Type is not generic
+ if (numGenericArgs > 0)
+ return FALSE;
+
+ // 4. Type is externally visible (i.e. public)
+ if (!IsTypeDefExternallyVisible(tk, pModule, dwAttrType))
+ return FALSE;
+
+ // since the token has not been loaded yet,
+ // its module might be not fully initialized in this domain
+ // take care of that possibility
+ pModule->EnsureAllocated();
+
+ // 5. Type is in a fully trusted assembly
+ if (!pModule->GetSecurityDescriptor()->IsFullyTrusted())
+ return FALSE;
+
+ // 6. If type is nested, nesting type must be equivalent.
+ if (IsTdNested(dwAttrType))
+ {
+ mdTypeDef tdEnclosing = mdTypeDefNil;
+
+ IfFailThrow(pInternalImport->GetNestedClassProps(tk, &tdEnclosing));
+
+ if (!IsTypeDefEquivalent(tdEnclosing, pModule))
+ return FALSE;
+ }
+
+ // Type meets all of the requirements laid down above. Type is considered to be marked as equivalent.
+ return TRUE;
+ }
+ else
+ {
+ return FALSE;
+ }
+}
+#endif
+#endif // FEATURE_TYPEEQUIVALENCE
+
+BOOL CompareTypeDefsForEquivalence(mdToken tk1, mdToken tk2, Module *pModule1, Module *pModule2, TokenPairList *pVisited)
+{
+#if !defined(DACCESS_COMPILE) && defined(FEATURE_TYPEEQUIVALENCE)
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (TokenPairList::InTypeEquivalenceForbiddenScope(pVisited))
+ {
+ // we limit variance on generics only to interfaces
+ return FALSE;
+ }
+ if (TokenPairList::Exists(pVisited, tk1, pModule1, tk2, pModule2))
+ {
+ // we are in the process of comparing these tokens already
+ return TRUE;
+ }
+ TokenPairList newVisited(tk1, pModule1, tk2, pModule2, pVisited);
+
+ DWORD dwAttrType1;
+ DWORD dwAttrType2;
+ mdToken tkExtends1;
+ mdToken tkExtends2;
+ IMDInternalImport *pInternalImport1 = pModule1->GetMDImport();
+ IMDInternalImport *pInternalImport2 = pModule2->GetMDImport();
+
+ // *************************************************************************
+ // 1. both types must opt into type equivalence and be able to acquire their equivalence set
+ // *************************************************************************
+ TypeIdentifierData data1;
+ TypeIdentifierData data2;
+ HRESULT hr;
+
+ IfFailThrow(hr = data1.Init(pModule1, tk1));
+ BOOL has_eq1 = (hr == S_OK);
+
+ IfFailThrow(hr = data2.Init(pModule2, tk2));
+ BOOL has_eq2 = (hr == S_OK);
+
+ if (!has_eq1 || !has_eq2)
+ return FALSE;
+
+ // Check to ensure that the types are actually opted into equivalence.
+ if (!IsTypeDefEquivalent(tk1, pModule1) || !IsTypeDefEquivalent(tk2, pModule2))
+ return FALSE;
+
+ // *************************************************************************
+ // 2. the two types have the same type identity
+ // *************************************************************************
+ if (!data1.IsEqual(data2))
+ return FALSE;
+
+ IfFailThrow(pInternalImport1->GetTypeDefProps(tk1, &dwAttrType1, &tkExtends1));
+ IfFailThrow(pInternalImport2->GetTypeDefProps(tk2, &dwAttrType2, &tkExtends2));
+
+ // *************************************************************************
+ // 2a. the two types have the same name and namespace
+ // *************************************************************************
+ {
+ LPCUTF8 pszName1;
+ LPCUTF8 pszNamespace1;
+ LPCUTF8 pszName2;
+ LPCUTF8 pszNamespace2;
+
+ IfFailThrow(pInternalImport1->GetNameOfTypeDef(tk1, &pszName1, &pszNamespace1));
+ IfFailThrow(pInternalImport2->GetNameOfTypeDef(tk2, &pszName2, &pszNamespace2));
+
+ if (strcmp(pszName1, pszName2) != 0 || strcmp(pszNamespace1, pszNamespace2) != 0)
+ {
+ return FALSE;
+ }
+ }
+
+ // *************************************************************************
+ // 2b. the two types must not be nested... or they must have an equivalent enclosing type
+ // *************************************************************************
+ {
+ if (!!IsTdNested(dwAttrType1) != !!IsTdNested(dwAttrType2))
+ {
+ return FALSE;
+ }
+
+ if (IsTdNested(dwAttrType1))
+ {
+ mdToken tkEnclosing1;
+ mdToken tkEnclosing2;
+
+ IfFailThrow(pInternalImport1->GetNestedClassProps(tk1, &tkEnclosing1));
+ IfFailThrow(pInternalImport2->GetNestedClassProps(tk2, &tkEnclosing2));
+
+ if (!CompareTypeDefsForEquivalence(tkEnclosing1, tkEnclosing2, pModule1, pModule2, pVisited))
+ {
+ return FALSE;
+ }
+ }
+ }
+
+ // *************************************************************************
+ // 2c. the two types cannot be equivalent across IntrospectionOnly/Non-introspection boundaries
+ // *************************************************************************
+ if (!!pModule1->GetAssembly()->IsIntrospectionOnly() != !!pModule2->GetAssembly()->IsIntrospectionOnly())
+ {
+ return FALSE;
+ }
+
+ // *************************************************************************
+ // 3. type is an interface, struct, enum, or delegate
+ // *************************************************************************
+ if (IsTdInterface(dwAttrType1))
+ {
+ // interface
+ if (!IsTdInterface(dwAttrType2))
+ return FALSE;
+ }
+ else
+ {
+ mdToken tdEnum = g_pEnumClass->GetCl();
+ Module *pSystemModule = g_pEnumClass->GetModule();
+
+ if (CompareTypeTokens(tkExtends1, tdEnum, pModule1, pSystemModule, &newVisited))
+ {
+ // enum (extends System.Enum)
+ if (!CompareTypeTokens(tkExtends2, tdEnum, pModule2, pSystemModule, &newVisited))
+ return FALSE;
+
+ if (!CompareStructuresForEquivalence(tk1, tk2, pModule1, pModule2, TRUE, &newVisited))
+ return FALSE;
+ }
+ else
+ {
+ mdToken tdValueType = g_pValueTypeClass->GetCl();
+ _ASSERTE(pSystemModule == g_pValueTypeClass->GetModule());
+
+ if (CompareTypeTokens(tkExtends1, tdValueType, pModule1, pSystemModule, &newVisited) &&
+ (tk1 != tdEnum || pModule1 != pSystemModule))
+ {
+ // struct (extends System.ValueType but is not System.Enum)
+ if (!CompareTypeTokens(tkExtends2, tdValueType, pModule2, pSystemModule, &newVisited) ||
+ (tk2 == tdEnum && pModule2 == pSystemModule))
+ return FALSE;
+
+ if (!CompareStructuresForEquivalence(tk1, tk2, pModule1, pModule2, FALSE, &newVisited))
+ return FALSE;
+ }
+ else
+ {
+ mdToken tdMCDelegate = g_pMulticastDelegateClass->GetCl();
+ _ASSERTE(pSystemModule == g_pMulticastDelegateClass->GetModule());
+
+ if (CompareTypeTokens(tkExtends1, tdMCDelegate, pModule1, pSystemModule, &newVisited))
+ {
+ // delegate (extends System.MulticastDelegate)
+ if (!CompareTypeTokens(tkExtends2, tdMCDelegate, pModule2, pSystemModule, &newVisited))
+ return FALSE;
+
+ if (!CompareDelegatesForEquivalence(tk1, tk2, pModule1, pModule2, &newVisited))
+ return FALSE;
+ }
+ else
+ {
+ // the type is neither interface, struct, enum, nor delegate
+ return FALSE;
+ }
+ }
+ }
+ }
+ return TRUE;
+
+#else //!defined(DACCESS_COMPILE) && defined(FEATURE_COMINTEROP)
+
+#ifdef DACCESS_COMPILE
+ // We shouldn't execute this code in dac builds.
+ _ASSERTE(FALSE);
+#endif
+ return (tk1 == tk2) && (pModule1 == pModule2);
+#endif //!defined(DACCESS_COMPILE) && defined(FEATURE_COMINTEROP)
+}
+
+
+BOOL CompareTypeTokens(mdToken tk1, mdToken tk2, Module *pModule1, Module *pModule2, TokenPairList *pVisited /*= NULL*/)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ HRESULT hr;
+ IMDInternalImport *pInternalImport1;
+ IMDInternalImport *pInternalImport2;
+ LPCUTF8 pszName1;
+ LPCUTF8 pszNamespace1;
+ LPCUTF8 pszName2;
+ LPCUTF8 pszNamespace2;
+ mdToken enclosingTypeTk1;
+ mdToken enclosingTypeTk2;
+
+ if (dac_cast<TADDR>(pModule1) == dac_cast<TADDR>(pModule2) &&
+ tk1 == tk2)
+ {
+ return TRUE;
+ }
+
+ pInternalImport1 = pModule1->GetMDImport();
+ if (!pInternalImport1->IsValidToken(tk1))
+ {
+ BAD_FORMAT_NOTHROW_ASSERT(!"Invalid token");
+ IfFailGo(COR_E_BADIMAGEFORMAT);
+ }
+
+ pInternalImport2 = pModule2->GetMDImport();
+ if (!pInternalImport2->IsValidToken(tk2))
+ {
+ BAD_FORMAT_NOTHROW_ASSERT(!"Invalid token");
+ IfFailGo(COR_E_BADIMAGEFORMAT);
+ }
+
+ pszName1 = NULL;
+ pszNamespace1 = NULL;
+ if (TypeFromToken(tk1) == mdtTypeRef)
+ {
+ IfFailGo(pInternalImport1->GetNameOfTypeRef(tk1, &pszNamespace1, &pszName1));
+ }
+ else if (TypeFromToken(tk1) == mdtTypeDef)
+ {
+ if (TypeFromToken(tk2) == mdtTypeDef)
+ {
+#ifdef FEATURE_TYPEEQUIVALENCE
+ // two type defs can't be the same unless they are identical or resolve to
+ // equivalent types (equivalence based on GUID and TypeIdentifierAttribute)
+ return CompareTypeDefsForEquivalence(tk1, tk2, pModule1, pModule2, pVisited);
+#else // FEATURE_TYPEEQUIVALENCE
+ // two type defs can't be the same unless they are identical
+ return FALSE;
+#endif // FEATURE_TYPEEQUIVALENCE
+ }
+ IfFailGo(pInternalImport1->GetNameOfTypeDef(tk1, &pszName1, &pszNamespace1));
+ }
+ else
+ {
+ return FALSE; // comparing a type against a module or assemblyref, no match
+ }
+
+ pszName2 = NULL;
+ pszNamespace2 = NULL;
+ if (TypeFromToken(tk2) == mdtTypeRef)
+ {
+ IfFailGo(pInternalImport2->GetNameOfTypeRef(tk2, &pszNamespace2, &pszName2));
+ }
+ else if (TypeFromToken(tk2) == mdtTypeDef)
+ {
+ IfFailGo(pInternalImport2->GetNameOfTypeDef(tk2, &pszName2, &pszNamespace2));
+ }
+ else
+ {
+ return FALSE; // comparing a type against a module or assemblyref, no match
+ }
+
+ _ASSERTE((pszNamespace1 != NULL) && (pszNamespace2 != NULL));
+ if (strcmp(pszName1, pszName2) != 0 || strcmp(pszNamespace1, pszNamespace2) != 0)
+ {
+ return FALSE;
+ }
+
+ //////////////////////////////////////////////////////////////////////
+ // OK names pass, see if it is nested, and if so that the nested classes are the same
+
+ enclosingTypeTk1 = mdTokenNil;
+ if (TypeFromToken(tk1) == mdtTypeRef)
+ {
+ IfFailGo(pInternalImport1->GetResolutionScopeOfTypeRef(tk1, &enclosingTypeTk1));
+ if (enclosingTypeTk1 == mdTypeRefNil)
+ {
+ enclosingTypeTk1 = mdTokenNil;
+ }
+ }
+ else
+ {
+ if (FAILED(hr = pInternalImport1->GetNestedClassProps(tk1, &enclosingTypeTk1)))
+ {
+ if (hr != CLDB_E_RECORD_NOTFOUND)
+ {
+ IfFailGo(hr);
+ }
+ enclosingTypeTk1 = mdTokenNil;
+ }
+ }
+
+ enclosingTypeTk2 = mdTokenNil;
+ if (TypeFromToken(tk2) == mdtTypeRef)
+ {
+ IfFailGo(pInternalImport2->GetResolutionScopeOfTypeRef(tk2, &enclosingTypeTk2));
+ if (enclosingTypeTk2 == mdTypeRefNil)
+ {
+ enclosingTypeTk2 = mdTokenNil;
+ }
+ }
+ else
+ {
+ if (FAILED(hr = pInternalImport2->GetNestedClassProps(tk2, &enclosingTypeTk2)))
+ {
+ if (hr != CLDB_E_RECORD_NOTFOUND)
+ {
+ IfFailGo(hr);
+ }
+ enclosingTypeTk2 = mdTokenNil;
+ }
+ }
+
+ if (TypeFromToken(enclosingTypeTk1) == mdtTypeRef || TypeFromToken(enclosingTypeTk1) == mdtTypeDef)
+ {
+ if (!CompareTypeTokens(enclosingTypeTk1, enclosingTypeTk2, pModule1, pModule2, pVisited))
+ return FALSE;
+
+ // TODO: We could return TRUE if we knew that type equivalence was not exercised during the previous call.
+ }
+ else
+ {
+ // Check if tk1 is non-nested, but tk2 is nested
+ if (TypeFromToken(enclosingTypeTk2) == mdtTypeRef || TypeFromToken(enclosingTypeTk2) == mdtTypeDef)
+ return FALSE;
+ }
+
+ //////////////////////////////////////////////////////////////////////
+ // OK, we have non-nested types or the the enclosing types are equivalent
+
+
+ // Do not load the type! (Or else you may run into circular dependency loading problems.)
+ Module* pFoundModule1;
+ mdToken foundTypeDefToken1;
+ if (!ClassLoader::ResolveTokenToTypeDefThrowing(pModule1,
+ tk1,
+ &pFoundModule1,
+ &foundTypeDefToken1))
+ {
+ return FALSE;
+ }
+ _ASSERTE(TypeFromToken(foundTypeDefToken1) == mdtTypeDef);
+
+ Module* pFoundModule2;
+ mdToken foundTypeDefToken2;
+ if (!ClassLoader::ResolveTokenToTypeDefThrowing(pModule2,
+ tk2,
+ &pFoundModule2,
+ &foundTypeDefToken2))
+ {
+ return FALSE;
+ }
+ _ASSERTE(TypeFromToken(foundTypeDefToken2) == mdtTypeDef);
+
+ _ASSERTE(TypeFromToken(foundTypeDefToken1) == mdtTypeDef && TypeFromToken(foundTypeDefToken2) == mdtTypeDef);
+ return CompareTypeTokens(foundTypeDefToken1, foundTypeDefToken2, pFoundModule1, pFoundModule2, pVisited);
+
+ErrExit:
+#ifdef DACCESS_COMPILE
+ ThrowHR(hr);
+#else
+ EEFileLoadException::Throw(pModule2->GetFile(), hr);
+#endif //!DACCESS_COMPILE
+} // CompareTypeTokens
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#endif
+//---------------------------------------------------------------------------------------
+//
+// Compare the next elements in two sigs.
+//
+// static
+BOOL
+MetaSig::CompareElementType(
+ PCCOR_SIGNATURE & pSig1,
+ PCCOR_SIGNATURE & pSig2,
+ PCCOR_SIGNATURE pEndSig1,
+ PCCOR_SIGNATURE pEndSig2,
+ Module * pModule1,
+ Module * pModule2,
+ const Substitution * pSubst1,
+ const Substitution * pSubst2,
+ TokenPairList * pVisited) // = NULL
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ redo:
+ // We jump here if the Type was a ET_CMOD prefix.
+ // The caller expects us to handle CMOD's but not present them as types on their own.
+
+ if ((pSig1 >= pEndSig1) || (pSig2 >= pEndSig2))
+ { // End of sig encountered prematurely
+ return FALSE;
+ }
+
+ if ((*pSig2 == ELEMENT_TYPE_VAR) && (pSubst2 != NULL) && !pSubst2->GetInst().IsNull())
+ {
+ SigPointer inst = pSubst2->GetInst();
+ pSig2++;
+ DWORD index;
+ IfFailThrow(CorSigUncompressData_EndPtr(pSig2, pEndSig2, &index));
+
+ for (DWORD i = 0; i < index; i++)
+ {
+ IfFailThrow(inst.SkipExactlyOne());
+ }
+ PCCOR_SIGNATURE pSig3 = inst.GetPtr();
+ IfFailThrow(inst.SkipExactlyOne());
+ PCCOR_SIGNATURE pEndSig3 = inst.GetPtr();
+
+ return CompareElementType(
+ pSig1,
+ pSig3,
+ pEndSig1,
+ pEndSig3,
+ pModule1,
+ pSubst2->GetModule(),
+ pSubst1,
+ pSubst2->GetNext(),
+ pVisited);
+ }
+
+ if ((*pSig1 == ELEMENT_TYPE_VAR) && (pSubst1 != NULL) && !pSubst1->GetInst().IsNull())
+ {
+ SigPointer inst = pSubst1->GetInst();
+ pSig1++;
+ DWORD index;
+ IfFailThrow(CorSigUncompressData_EndPtr(pSig1, pEndSig1, &index));
+
+ for (DWORD i = 0; i < index; i++)
+ {
+ IfFailThrow(inst.SkipExactlyOne());
+ }
+ PCCOR_SIGNATURE pSig3 = inst.GetPtr();
+ IfFailThrow(inst.SkipExactlyOne());
+ PCCOR_SIGNATURE pEndSig3 = inst.GetPtr();
+
+ return CompareElementType(
+ pSig3,
+ pSig2,
+ pEndSig3,
+ pEndSig2,
+ pSubst1->GetModule(),
+ pModule2,
+ pSubst1->GetNext(),
+ pSubst2,
+ pVisited);
+ }
+
+ CorElementType Type1 = ELEMENT_TYPE_MAX; // initialize to illegal
+ CorElementType Type2 = ELEMENT_TYPE_MAX; // initialize to illegal
+
+ IfFailThrow(CorSigUncompressElementType_EndPtr(pSig1, pEndSig1, &Type1));
+ IfFailThrow(CorSigUncompressElementType_EndPtr(pSig2, pEndSig2, &Type2));
+
+ if (Type1 == ELEMENT_TYPE_INTERNAL)
+ {
+ // this check is not functional in DAC and provides no security against a malicious dump
+ // the DAC is prepared to receive an invalid type handle
+#ifndef DACCESS_COMPILE
+ if (pModule1->IsSigInIL(pSig1))
+ {
+ THROW_BAD_FORMAT(BFA_BAD_SIGNATURE, (Module *)pModule1);
+ }
+#endif
+
+ }
+
+ if (Type2 == ELEMENT_TYPE_INTERNAL)
+ {
+ // this check is not functional in DAC and provides no security against a malicious dump
+ // the DAC is prepared to receive an invalid type handle
+#ifndef DACCESS_COMPILE
+ if (pModule2->IsSigInIL(pSig2))
+ {
+ THROW_BAD_FORMAT(BFA_BAD_SIGNATURE, (Module *)pModule2);
+ }
+#endif
+ }
+
+ if (Type1 != Type2)
+ {
+ if ((Type1 == ELEMENT_TYPE_INTERNAL) || (Type2 == ELEMENT_TYPE_INTERNAL))
+ {
+ TypeHandle hInternal;
+ CorElementType eOtherType;
+ Module * pOtherModule;
+
+ // One type is already loaded, collect all the necessary information to identify the other type.
+ if (Type1 == ELEMENT_TYPE_INTERNAL)
+ {
+ IfFailThrow(CorSigUncompressPointer_EndPtr(pSig1, pEndSig1, (void **)&hInternal));
+
+ eOtherType = Type2;
+ pOtherModule = pModule2;
+ }
+ else
+ {
+ IfFailThrow(CorSigUncompressPointer_EndPtr(pSig2, pEndSig2, (void **)&hInternal));
+
+ eOtherType = Type1;
+ pOtherModule = pModule1;
+ }
+
+ // Internal types can only correspond to types or value types.
+ switch (eOtherType)
+ {
+ case ELEMENT_TYPE_OBJECT:
+ {
+ return (hInternal.AsMethodTable() == g_pObjectClass);
+ }
+ case ELEMENT_TYPE_STRING:
+ {
+ return (hInternal.AsMethodTable() == g_pStringClass);
+ }
+ case ELEMENT_TYPE_VALUETYPE:
+ case ELEMENT_TYPE_CLASS:
+ {
+ mdToken tkOther;
+ if (Type1 == ELEMENT_TYPE_INTERNAL)
+ {
+ IfFailThrow(CorSigUncompressToken_EndPtr(pSig2, pEndSig2, &tkOther));
+ }
+ else
+ {
+ IfFailThrow(CorSigUncompressToken_EndPtr(pSig1, pEndSig1, &tkOther));
+ }
+ TypeHandle hOtherType;
+
+ hOtherType = ClassLoader::LoadTypeDefOrRefThrowing(
+ pOtherModule,
+ tkOther,
+ ClassLoader::ReturnNullIfNotFound,
+ ClassLoader::FailIfUninstDefOrRef);
+
+ return (hInternal == hOtherType);
+ }
+ default:
+ {
+ return FALSE;
+ }
+ }
+
+#ifdef _DEBUG
+ // Shouldn't get here.
+ _ASSERTE(FALSE);
+ return FALSE;
+#endif
+ }
+ else
+ {
+ return FALSE; // types must be the same
+ }
+ }
+
+ switch (Type1)
+ {
+ default:
+ {
+ // Unknown type!
+ THROW_BAD_FORMAT(BFA_BAD_COMPLUS_SIG, pModule1);
+ }
+
+ case ELEMENT_TYPE_U:
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_VOID:
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_R4:
+ case ELEMENT_TYPE_R8:
+ case ELEMENT_TYPE_BOOLEAN:
+ case ELEMENT_TYPE_CHAR:
+ case ELEMENT_TYPE_TYPEDBYREF:
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_OBJECT:
+ {
+ return TRUE;
+ }
+
+ case ELEMENT_TYPE_VAR:
+ case ELEMENT_TYPE_MVAR:
+ {
+ DWORD varNum1;
+ IfFailThrow(CorSigUncompressData_EndPtr(pSig1, pEndSig1, &varNum1));
+ DWORD varNum2;
+ IfFailThrow(CorSigUncompressData_EndPtr(pSig2, pEndSig2, &varNum2));
+
+ return (varNum1 == varNum2);
+ }
+
+ case ELEMENT_TYPE_CMOD_REQD:
+ case ELEMENT_TYPE_CMOD_OPT:
+ {
+ mdToken tk1, tk2;
+
+ IfFailThrow(CorSigUncompressToken_EndPtr(pSig1, pEndSig1, &tk1));
+ IfFailThrow(CorSigUncompressToken_EndPtr(pSig2, pEndSig2, &tk2));
+
+#ifndef DACCESS_COMPILE
+ if (!CompareTypeDefOrRefOrSpec(
+ pModule1,
+ tk1,
+ pSubst1,
+ pModule2,
+ tk2,
+ pSubst2,
+ pVisited))
+ {
+ return FALSE;
+ }
+#endif //!DACCESS_COMPILE
+
+ goto redo;
+ }
+
+ // These take an additional argument, which is the element type
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_BYREF:
+ {
+ if (!CompareElementType(
+ pSig1,
+ pSig2,
+ pEndSig1,
+ pEndSig2,
+ pModule1,
+ pModule2,
+ pSubst1,
+ pSubst2,
+ pVisited))
+ {
+ return FALSE;
+ }
+ return TRUE;
+ }
+
+ case ELEMENT_TYPE_VALUETYPE:
+ case ELEMENT_TYPE_CLASS:
+ {
+ mdToken tk1, tk2;
+
+ IfFailThrow(CorSigUncompressToken_EndPtr(pSig1, pEndSig1, &tk1));
+ IfFailThrow(CorSigUncompressToken_EndPtr(pSig2, pEndSig2, &tk2));
+
+ return CompareTypeTokens(tk1, tk2, pModule1, pModule2, pVisited);
+ }
+
+ case ELEMENT_TYPE_FNPTR:
+ {
+ // Compare calling conventions
+ // Note: We used to read them as compressed integers, which is wrong, but works for correct
+ // signatures as the highest bit is always 0 for calling conventions
+ CorElementType callingConvention1 = ELEMENT_TYPE_MAX; // initialize to illegal
+ IfFailThrow(CorSigUncompressElementType_EndPtr(pSig1, pEndSig1, &callingConvention1));
+ CorElementType callingConvention2 = ELEMENT_TYPE_MAX; // initialize to illegal
+ IfFailThrow(CorSigUncompressElementType_EndPtr(pSig2, pEndSig2, &callingConvention2));
+ if (callingConvention1 != callingConvention2)
+ {
+ return FALSE;
+ }
+
+ DWORD argCnt1;
+ IfFailThrow(CorSigUncompressData_EndPtr(pSig1, pEndSig1, &argCnt1));
+ DWORD argCnt2;
+ IfFailThrow(CorSigUncompressData_EndPtr(pSig2, pEndSig2, &argCnt2));
+ if (argCnt1 != argCnt2)
+ {
+ return FALSE;
+ }
+
+ // Compressed integer values can be only 0-0x1FFFFFFF
+ _ASSERTE(argCnt1 < MAXDWORD);
+ // Add return parameter into the parameter count (it cannot overflow)
+ argCnt1++;
+
+ TokenPairList newVisited = TokenPairList::AdjustForTypeEquivalenceForbiddenScope(pVisited);
+ // Compare all parameters, incl. return parameter
+ while (argCnt1 > 0)
+ {
+ if (!CompareElementType(
+ pSig1,
+ pSig2,
+ pEndSig1,
+ pEndSig2,
+ pModule1,
+ pModule2,
+ pSubst1,
+ pSubst2,
+ &newVisited))
+ {
+ return FALSE;
+ }
+ --argCnt1;
+ }
+ return TRUE;
+ }
+
+ case ELEMENT_TYPE_GENERICINST:
+ {
+ TokenPairList newVisited = TokenPairList::AdjustForTypeSpec(
+ pVisited,
+ pModule1,
+ pSig1 - 1,
+ (DWORD)(pEndSig1 - pSig1) + 1);
+ TokenPairList newVisitedAlwaysForbidden = TokenPairList::AdjustForTypeEquivalenceForbiddenScope(pVisited);
+
+ // Type constructors - The actual type is never permitted to participate in type equivalence.
+ if (!CompareElementType(
+ pSig1,
+ pSig2,
+ pEndSig1,
+ pEndSig2,
+ pModule1,
+ pModule2,
+ pSubst1,
+ pSubst2,
+ &newVisitedAlwaysForbidden))
+ {
+ return FALSE;
+ }
+
+ DWORD argCnt1;
+ IfFailThrow(CorSigUncompressData_EndPtr(pSig1, pEndSig1, &argCnt1));
+ DWORD argCnt2;
+ IfFailThrow(CorSigUncompressData_EndPtr(pSig2, pEndSig2, &argCnt2));
+ if (argCnt1 != argCnt2)
+ {
+ return FALSE;
+ }
+
+ while (argCnt1 > 0)
+ {
+ if (!CompareElementType(
+ pSig1,
+ pSig2,
+ pEndSig1,
+ pEndSig2,
+ pModule1,
+ pModule2,
+ pSubst1,
+ pSubst2,
+ &newVisited))
+ {
+ return FALSE;
+ }
+ --argCnt1;
+ }
+ return TRUE;
+ }
+
+ case ELEMENT_TYPE_ARRAY:
+ {
+ // syntax: ARRAY <base type> rank <count n> <size 1> .... <size n> <lower bound m>
+ // <lb 1> .... <lb m>
+ DWORD rank1, rank2;
+ DWORD dimension_sizes1, dimension_sizes2;
+ DWORD dimension_lowerb1, dimension_lowerb2;
+ DWORD i;
+
+ // element type
+ if (!CompareElementType(
+ pSig1,
+ pSig2,
+ pEndSig1,
+ pEndSig2,
+ pModule1,
+ pModule2,
+ pSubst1,
+ pSubst2,
+ pVisited))
+ {
+ return FALSE;
+ }
+
+ IfFailThrow(CorSigUncompressData_EndPtr(pSig1, pEndSig1, &rank1));
+ IfFailThrow(CorSigUncompressData_EndPtr(pSig2, pEndSig2, &rank2));
+ if (rank1 != rank2)
+ {
+ return FALSE;
+ }
+ // A zero ends the array spec
+ if (rank1 == 0)
+ {
+ return TRUE;
+ }
+
+ IfFailThrow(CorSigUncompressData_EndPtr(pSig1, pEndSig1, &dimension_sizes1));
+ IfFailThrow(CorSigUncompressData_EndPtr(pSig2, pEndSig2, &dimension_sizes2));
+ if (dimension_sizes1 != dimension_sizes2)
+ {
+ return FALSE;
+ }
+
+ for (i = 0; i < dimension_sizes1; i++)
+ {
+ DWORD size1, size2;
+
+ if (pSig1 == pEndSig1)
+ { // premature end ok
+ return TRUE;
+ }
+
+ IfFailThrow(CorSigUncompressData_EndPtr(pSig1, pEndSig1, &size1));
+ IfFailThrow(CorSigUncompressData_EndPtr(pSig2, pEndSig2, &size2));
+ if (size1 != size2)
+ {
+ return FALSE;
+ }
+ }
+
+ if (pSig1 == pEndSig1)
+ { // premature end ok
+ return TRUE;
+ }
+
+ // # dimensions for lower bounds
+ IfFailThrow(CorSigUncompressData_EndPtr(pSig1, pEndSig1, &dimension_lowerb1));
+ IfFailThrow(CorSigUncompressData_EndPtr(pSig2, pEndSig2, &dimension_lowerb2));
+ if (dimension_lowerb1 != dimension_lowerb2)
+ {
+ return FALSE;
+ }
+
+ for (i = 0; i < dimension_lowerb1; i++)
+ {
+ DWORD size1, size2;
+
+ if (pSig1 == pEndSig1)
+ { // premature end ok
+ return TRUE;
+ }
+
+ IfFailThrow(CorSigUncompressData_EndPtr(pSig1, pEndSig1, &size1));
+ IfFailThrow(CorSigUncompressData_EndPtr(pSig2, pEndSig2, &size2));
+ if (size1 != size2)
+ {
+ return FALSE;
+ }
+ }
+ return TRUE;
+ }
+
+ case ELEMENT_TYPE_INTERNAL:
+ {
+ TypeHandle hType1, hType2;
+
+ IfFailThrow(CorSigUncompressPointer_EndPtr(pSig1, pEndSig1, (void **)&hType1));
+ IfFailThrow(CorSigUncompressPointer_EndPtr(pSig2, pEndSig2, (void **)&hType2));
+
+ return (hType1 == hType2);
+ }
+ } // switch
+ // Unreachable
+} // MetaSig::CompareElementType
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+
+//---------------------------------------------------------------------------------------
+//
+BOOL
+MetaSig::CompareTypeDefsUnderSubstitutions(
+ MethodTable * pTypeDef1,
+ MethodTable * pTypeDef2,
+ const Substitution * pSubst1,
+ const Substitution * pSubst2,
+ TokenPairList * pVisited)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ bool fSameTypeDef = (pTypeDef1->GetTypeDefRid() == pTypeDef2->GetTypeDefRid()) && (pTypeDef1->GetModule() == pTypeDef2->GetModule());
+
+ if (!fSameTypeDef)
+ {
+ if (!pTypeDef1->GetClass()->IsEquivalentType() || !pTypeDef2->GetClass()->IsEquivalentType() || TokenPairList::InTypeEquivalenceForbiddenScope(pVisited))
+ {
+ return FALSE;
+ }
+ else
+ {
+ if (!CompareTypeDefsForEquivalence(pTypeDef1->GetCl(), pTypeDef2->GetCl(), pTypeDef1->GetModule(), pTypeDef2->GetModule(), pVisited))
+ {
+ return FALSE;
+ }
+ }
+ }
+
+ if (pTypeDef1->GetNumGenericArgs() != pTypeDef2->GetNumGenericArgs())
+ return FALSE;
+
+ if (pTypeDef1->GetNumGenericArgs() == 0)
+ return TRUE;
+
+ if ((pSubst1 == NULL) || (pSubst2 == NULL) || pSubst1->GetInst().IsNull() || pSubst2->GetInst().IsNull())
+ return FALSE;
+
+ SigPointer inst1 = pSubst1->GetInst();
+ SigPointer inst2 = pSubst2->GetInst();
+ for (DWORD i = 0; i < pTypeDef1->GetNumGenericArgs(); i++)
+ {
+ PCCOR_SIGNATURE startInst1 = inst1.GetPtr();
+ IfFailThrow(inst1.SkipExactlyOne());
+ PCCOR_SIGNATURE endInst1ptr = inst1.GetPtr();
+ PCCOR_SIGNATURE startInst2 = inst2.GetPtr();
+ IfFailThrow(inst2.SkipExactlyOne());
+ PCCOR_SIGNATURE endInst2ptr = inst2.GetPtr();
+ if (!CompareElementType(
+ startInst1,
+ startInst2,
+ endInst1ptr,
+ endInst2ptr,
+ pSubst1->GetModule(),
+ pSubst2->GetModule(),
+ pSubst1->GetNext(),
+ pSubst2->GetNext(),
+ pVisited))
+ {
+ return FALSE;
+ }
+ }
+ return TRUE;
+
+} // MetaSig::CompareTypeDefsUnderSubstitutions
+
+//---------------------------------------------------------------------------------------
+//
+BOOL
+TypeHandleCompareHelper(
+ TypeHandle th1,
+ TypeHandle th2)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+#ifndef DACCESS_COMPILE
+ return th1.IsEquivalentTo(th2);
+#else
+ return TRUE;
+#endif // #ifndef DACCESS_COMPILE
+}
+
+//---------------------------------------------------------------------------------------
+//
+//static
+BOOL
+MetaSig::CompareMethodSigs(
+ MetaSig & msig1,
+ MetaSig & msig2,
+ BOOL ignoreCallconv)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ if (!ignoreCallconv &&
+ ((msig1.GetCallingConventionInfo() & IMAGE_CEE_CS_CALLCONV_MASK)
+ != (msig2.GetCallingConventionInfo() & IMAGE_CEE_CS_CALLCONV_MASK)))
+ {
+ return FALSE; // calling convention mismatch
+ }
+
+ if (msig1.NumFixedArgs() != msig2.NumFixedArgs())
+ return FALSE; // number of arguments don't match
+
+ // check that the argument types are equal
+ for (DWORD i = 0; i<msig1.NumFixedArgs(); i++) //@GENERICSVER: does this really do the return type too?
+ {
+ CorElementType et1 = msig1.NextArg();
+ CorElementType et2 = msig2.NextArg();
+ if (et1 != et2)
+ return FALSE;
+ if (!CorTypeInfo::IsPrimitiveType(et1))
+ {
+ if (!TypeHandleCompareHelper(msig1.GetLastTypeHandleThrowing(), msig2.GetLastTypeHandleThrowing()))
+ return FALSE;
+ }
+ }
+
+ CorElementType ret1 = msig1.GetReturnType();
+ CorElementType ret2 = msig2.GetReturnType();
+ if (ret1 != ret2)
+ return FALSE;
+
+ if (!CorTypeInfo::IsPrimitiveType(ret1))
+ {
+ return TypeHandleCompareHelper(msig1.GetRetTypeHandleThrowing(), msig2.GetRetTypeHandleThrowing());
+ }
+
+ return TRUE;
+}
+
+//---------------------------------------------------------------------------------------
+//
+//static
+HRESULT
+MetaSig::CompareMethodSigsNT(
+ PCCOR_SIGNATURE pSignature1,
+ DWORD cSig1,
+ Module * pModule1,
+ const Substitution * pSubst1,
+ PCCOR_SIGNATURE pSignature2,
+ DWORD cSig2,
+ Module * pModule2,
+ const Substitution * pSubst2,
+ TokenPairList * pVisited) //= NULL
+{
+ STATIC_CONTRACT_NOTHROW;
+
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ if (CompareMethodSigs(pSignature1, cSig1, pModule1, pSubst1, pSignature2, cSig2, pModule2, pSubst2, pVisited))
+ hr = S_OK;
+ else
+ hr = S_FALSE;
+ }
+ EX_CATCH_HRESULT_NO_ERRORINFO(hr);
+ return hr;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Compare two method sigs and return whether they are the same.
+// @GENERICS: instantiation of the type variables in the second signature
+//
+//static
+BOOL
+MetaSig::CompareMethodSigs(
+ PCCOR_SIGNATURE pSignature1,
+ DWORD cSig1,
+ Module * pModule1,
+ const Substitution * pSubst1,
+ PCCOR_SIGNATURE pSignature2,
+ DWORD cSig2,
+ Module * pModule2,
+ const Substitution * pSubst2,
+ TokenPairList * pVisited) //= NULL
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ PCCOR_SIGNATURE pSig1 = pSignature1;
+ PCCOR_SIGNATURE pSig2 = pSignature2;
+ PCCOR_SIGNATURE pEndSig1 = pSignature1 + cSig1;
+ PCCOR_SIGNATURE pEndSig2 = pSignature2 + cSig2;
+ DWORD ArgCount1;
+ DWORD ArgCount2;
+ DWORD i;
+
+ // If scopes are the same, and sigs are same, can return.
+ // If the sigs aren't the same, but same scope, can't return yet, in
+ // case there are two AssemblyRefs pointing to the same assembly or such.
+ if ((pModule1 == pModule2) &&
+ (cSig1 == cSig2) &&
+ (pSubst1 == NULL) &&
+ (pSubst2 == NULL) &&
+ (memcmp(pSig1, pSig2, cSig1) == 0))
+ {
+ return TRUE;
+ }
+
+ if ((*pSig1 & ~CORINFO_CALLCONV_PARAMTYPE) != (*pSig2 & ~CORINFO_CALLCONV_PARAMTYPE))
+ { // Calling convention or hasThis mismatch
+ return FALSE;
+ }
+
+ __int8 callConv = *pSig1;
+
+ pSig1++;
+ pSig2++;
+
+ if (callConv & IMAGE_CEE_CS_CALLCONV_GENERIC)
+ {
+ DWORD TyArgCount1;
+ IfFailThrow(CorSigUncompressData_EndPtr(pSig1, pEndSig1, &TyArgCount1));
+ DWORD TyArgCount2;
+ IfFailThrow(CorSigUncompressData_EndPtr(pSig2, pEndSig2, &TyArgCount2));
+
+ if (TyArgCount1 != TyArgCount2)
+ return FALSE;
+ }
+
+ IfFailThrow(CorSigUncompressData_EndPtr(pSig1, pEndSig1, &ArgCount1));
+ IfFailThrow(CorSigUncompressData_EndPtr(pSig2, pEndSig2, &ArgCount2));
+
+ if (ArgCount1 != ArgCount2)
+ {
+ if ((callConv & IMAGE_CEE_CS_CALLCONV_MASK) != IMAGE_CEE_CS_CALLCONV_VARARG)
+ return FALSE;
+
+ // Signature #1 is the caller. We proceed until we hit the sentinel, or we hit
+ // the end of the signature (which is an implied sentinel). We never worry about
+ // what follows the sentinel, because that is the ... part, which is not
+ // involved in matching.
+ //
+ // Theoretically, it's illegal for a sentinel to be the last element in the
+ // caller's signature, because it's redundant. We don't waste our time checking
+ // that case, but the metadata validator should. Also, it is always illegal
+ // for a sentinel to appear in a callee's signature. We assert against this,
+ // but in the shipping product the comparison would simply fail.
+ //
+ // Signature #2 is the callee. We must hit the exact end of the callee, because
+ // we are trying to match on everything up to the variable part. This allows us
+ // to correctly handle overloads, where there are a number of varargs methods
+ // to pick from, like m1(int,...) and m2(int,int,...), etc.
+
+ // <= because we want to include a check of the return value!
+ for (i = 0; i <= ArgCount1; i++)
+ {
+ // We may be just going out of bounds on the callee, but no further than that.
+ _ASSERTE(i <= ArgCount2 + 1);
+
+ // If we matched all the way on the caller, is the callee now complete?
+ if (*pSig1 == ELEMENT_TYPE_SENTINEL)
+ return (i > ArgCount2);
+
+ // if we have more to compare on the caller side, but the callee side is
+ // exhausted, this isn't our match
+ if (i > ArgCount2)
+ return FALSE;
+
+ // This would be a breaking change to make this throw... see comment above
+ _ASSERT(*pSig2 != ELEMENT_TYPE_SENTINEL);
+
+ // We are in bounds on both sides. Compare the element.
+ if (!CompareElementType(
+ pSig1,
+ pSig2,
+ pEndSig1,
+ pEndSig2,
+ pModule1,
+ pModule2,
+ pSubst1,
+ pSubst2,
+ pVisited))
+ {
+ return FALSE;
+ }
+ }
+
+ // If we didn't consume all of the callee signature, then we failed.
+ if (i <= ArgCount2)
+ return FALSE;
+
+ return TRUE;
+ }
+
+ // do return type as well
+ for (i = 0; i <= ArgCount1; i++)
+ {
+ if (!CompareElementType(
+ pSig1,
+ pSig2,
+ pEndSig1,
+ pEndSig2,
+ pModule1,
+ pModule2,
+ pSubst1,
+ pSubst2,
+ pVisited))
+ {
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+} // MetaSig::CompareMethodSigs
+
+//---------------------------------------------------------------------------------------
+//
+//static
+BOOL MetaSig::CompareFieldSigs(
+ PCCOR_SIGNATURE pSignature1,
+ DWORD cSig1,
+ Module * pModule1,
+ PCCOR_SIGNATURE pSignature2,
+ DWORD cSig2,
+ Module * pModule2,
+ TokenPairList * pVisited) //= NULL
+{
+ WRAPPER_NO_CONTRACT;
+
+ PCCOR_SIGNATURE pSig1 = pSignature1;
+ PCCOR_SIGNATURE pSig2 = pSignature2;
+ PCCOR_SIGNATURE pEndSig1;
+ PCCOR_SIGNATURE pEndSig2;
+
+#if 0
+ // <TODO>@TODO: If scopes are the same, use identity rule - for now, don't, so that we test the code paths</TODO>
+ if (cSig1 != cSig2)
+ return(FALSE); // sigs must be same size if they are in the same scope
+#endif
+
+ if (*pSig1 != *pSig2)
+ return(FALSE); // calling convention, must be IMAGE_CEE_CS_CALLCONV_FIELD
+
+ pEndSig1 = pSig1 + cSig1;
+ pEndSig2 = pSig2 + cSig2;
+
+ return(CompareElementType(++pSig1, ++pSig2, pEndSig1, pEndSig2, pModule1, pModule2, NULL, NULL, pVisited));
+}
+
+#ifndef DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+//
+//static
+BOOL
+MetaSig::CompareElementTypeToToken(
+ PCCOR_SIGNATURE & pSig1,
+ PCCOR_SIGNATURE pEndSig1, // end of sig1
+ mdToken tk2,
+ Module * pModule1,
+ Module * pModule2,
+ const Substitution * pSubst1,
+ TokenPairList * pVisited)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ _ASSERTE((TypeFromToken(tk2) == mdtTypeDef) ||
+ (TypeFromToken(tk2) == mdtTypeRef));
+
+ if (pSig1 >= pEndSig1)
+ { // End of sig encountered prematurely
+ return FALSE;
+ }
+
+ if ((*pSig1 == ELEMENT_TYPE_VAR) && (pSubst1 != NULL) && !pSubst1->GetInst().IsNull())
+ {
+ SigPointer inst = pSubst1->GetInst();
+ pSig1++;
+ DWORD index;
+ IfFailThrow(CorSigUncompressData_EndPtr(pSig1, pEndSig1, &index));
+
+ for (DWORD i = 0; i < index; i++)
+ {
+ IfFailThrow(inst.SkipExactlyOne());
+ }
+ PCCOR_SIGNATURE pSig3 = inst.GetPtr();
+ IfFailThrow(inst.SkipExactlyOne());
+ PCCOR_SIGNATURE pEndSig3 = inst.GetPtr();
+
+ return CompareElementTypeToToken(
+ pSig3,
+ pEndSig3,
+ tk2,
+ pSubst1->GetModule(),
+ pModule2,
+ pSubst1->GetNext(),
+ pVisited);
+ }
+
+ CorElementType Type1 = ELEMENT_TYPE_MAX; // initialize to illegal
+
+ IfFailThrow(CorSigUncompressElementType_EndPtr(pSig1, pEndSig1, &Type1));
+ _ASSERTE(Type1 != ELEMENT_TYPE_INTERNAL);
+
+ if (Type1 == ELEMENT_TYPE_INTERNAL)
+ {
+ // this check is not functional in DAC and provides no security against a malicious dump
+ // the DAC is prepared to receive an invalid type handle
+#ifndef DACCESS_COMPILE
+ if (pModule1->IsSigInIL(pSig1))
+ {
+ THROW_BAD_FORMAT(BFA_BAD_SIGNATURE, (Module*)pModule1);
+ }
+#endif
+ }
+
+ switch (Type1)
+ {
+ default:
+ { // Unknown type!
+ THROW_BAD_FORMAT(BFA_BAD_COMPLUS_SIG, pModule1);
+ }
+
+ case ELEMENT_TYPE_U:
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_VOID:
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_R4:
+ case ELEMENT_TYPE_R8:
+ case ELEMENT_TYPE_BOOLEAN:
+ case ELEMENT_TYPE_CHAR:
+ case ELEMENT_TYPE_TYPEDBYREF:
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_OBJECT:
+ {
+ break;
+ }
+
+ case ELEMENT_TYPE_VAR:
+ case ELEMENT_TYPE_MVAR:
+ {
+ return FALSE;
+ }
+ case ELEMENT_TYPE_CMOD_REQD:
+ case ELEMENT_TYPE_CMOD_OPT:
+ {
+ return FALSE;
+ }
+ // These take an additional argument, which is the element type
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_BYREF:
+ {
+ return FALSE;
+ }
+ case ELEMENT_TYPE_VALUETYPE:
+ case ELEMENT_TYPE_CLASS:
+ {
+ mdToken tk1;
+
+ IfFailThrow(CorSigUncompressToken_EndPtr(pSig1, pEndSig1, &tk1));
+
+ return CompareTypeTokens(
+ tk1,
+ tk2,
+ pModule1,
+ pModule2,
+ pVisited);
+ }
+ case ELEMENT_TYPE_FNPTR:
+ {
+ return FALSE;
+ }
+ case ELEMENT_TYPE_GENERICINST:
+ {
+ return FALSE;
+ }
+ case ELEMENT_TYPE_ARRAY:
+ {
+ return FALSE;
+ }
+ case ELEMENT_TYPE_INTERNAL:
+ {
+ return FALSE;
+ }
+ }
+
+ return CompareTypeTokens(
+ MscorlibBinder::GetElementType(Type1)->GetCl(),
+ tk2,
+ MscorlibBinder::GetModule(),
+ pModule2,
+ pVisited);
+} // MetaSig::CompareElementTypeToToken
+
+/* static */
+BOOL MetaSig::CompareTypeSpecToToken(mdTypeSpec tk1,
+ mdToken tk2,
+ Module *pModule1,
+ Module *pModule2,
+ const Substitution *pSubst1,
+ TokenPairList *pVisited)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ _ASSERTE(TypeFromToken(tk1) == mdtTypeSpec);
+ _ASSERTE(TypeFromToken(tk2) == mdtTypeDef ||
+ TypeFromToken(tk2) == mdtTypeRef);
+
+ IMDInternalImport *pInternalImport = pModule1->GetMDImport();
+
+ PCCOR_SIGNATURE pSig1;
+ ULONG cSig1;
+ IfFailThrow(pInternalImport->GetTypeSpecFromToken(tk1, &pSig1, &cSig1));
+
+ TokenPairList newVisited = TokenPairList::AdjustForTypeSpec(pVisited, pModule1, pSig1, cSig1);
+
+ return CompareElementTypeToToken(pSig1,pSig1+cSig1,tk2,pModule1,pModule2,pSubst1,&newVisited);
+} // MetaSig::CompareTypeSpecToToken
+
+
+/* static */
+BOOL MetaSig::CompareTypeDefOrRefOrSpec(Module *pModule1, mdToken tok1,
+ const Substitution *pSubst1,
+ Module *pModule2, mdToken tok2,
+ const Substitution *pSubst2,
+ TokenPairList *pVisited)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ if (TypeFromToken(tok1) != mdtTypeSpec && TypeFromToken(tok2) != mdtTypeSpec)
+ {
+ _ASSERTE(TypeFromToken(tok1) == mdtTypeDef || TypeFromToken(tok1) == mdtTypeRef);
+ _ASSERTE(TypeFromToken(tok2) == mdtTypeDef || TypeFromToken(tok2) == mdtTypeRef);
+ return CompareTypeTokens(tok1,tok2,pModule1,pModule2,pVisited);
+ }
+
+ if (TypeFromToken(tok1) != TypeFromToken(tok2))
+ {
+ if (TypeFromToken(tok1) == mdtTypeSpec)
+ {
+ return CompareTypeSpecToToken(tok1,tok2,pModule1,pModule2,pSubst1,pVisited);
+ }
+ else
+ {
+ _ASSERTE(TypeFromToken(tok2) == mdtTypeSpec);
+ return CompareTypeSpecToToken(tok2,tok1,pModule2,pModule1,pSubst2,pVisited);
+ }
+ }
+
+ _ASSERTE(TypeFromToken(tok1) == mdtTypeSpec &&
+ TypeFromToken(tok2) == mdtTypeSpec);
+
+ IMDInternalImport *pInternalImport1 = pModule1->GetMDImport();
+ IMDInternalImport *pInternalImport2 = pModule2->GetMDImport();
+
+ PCCOR_SIGNATURE pSig1,pSig2;
+ ULONG cSig1,cSig2;
+ IfFailThrow(pInternalImport1->GetTypeSpecFromToken(tok1, &pSig1, &cSig1));
+ IfFailThrow(pInternalImport2->GetTypeSpecFromToken(tok2, &pSig2, &cSig2));
+ return MetaSig::CompareElementType(pSig1,pSig2,pSig1+cSig1,pSig2+cSig2,pModule1,pModule2,pSubst1,pSubst2,pVisited);
+} // MetaSig::CompareTypeDefOrRefOrSpec
+
+/* static */
+BOOL MetaSig::CompareVariableConstraints(const Substitution *pSubst1,
+ Module *pModule1, mdGenericParam tok1, //overriding
+ const Substitution *pSubst2,
+ Module *pModule2, mdGenericParam tok2) //overridden
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ IMDInternalImport *pInternalImport1 = pModule1->GetMDImport();
+ IMDInternalImport *pInternalImport2 = pModule2->GetMDImport();
+
+ DWORD specialConstraints1,specialConstraints2;
+
+ // check special constraints
+ {
+ IfFailThrow(pInternalImport1->GetGenericParamProps(tok1, NULL, &specialConstraints1, NULL, NULL, NULL));
+ IfFailThrow(pInternalImport2->GetGenericParamProps(tok2, NULL, &specialConstraints2, NULL, NULL, NULL));
+ specialConstraints1 = specialConstraints1 & gpSpecialConstraintMask;
+ specialConstraints2 = specialConstraints2 & gpSpecialConstraintMask;
+
+ if ((specialConstraints1 & gpNotNullableValueTypeConstraint) != 0)
+ {
+ if ((specialConstraints2 & gpNotNullableValueTypeConstraint) == 0)
+ return FALSE;
+ }
+ if ((specialConstraints1 & gpReferenceTypeConstraint) != 0)
+ {
+ if ((specialConstraints2 & gpReferenceTypeConstraint) == 0)
+ return FALSE;
+ }
+ if ((specialConstraints1 & gpDefaultConstructorConstraint) != 0)
+ {
+ if ((specialConstraints2 & (gpDefaultConstructorConstraint | gpNotNullableValueTypeConstraint)) == 0)
+ return FALSE;
+ }
+ }
+
+
+ HENUMInternalHolder hEnum1(pInternalImport1);
+ mdGenericParamConstraint tkConstraint1;
+ hEnum1.EnumInit(mdtGenericParamConstraint, tok1);
+
+ while (pInternalImport1->EnumNext(&hEnum1, &tkConstraint1))
+ {
+ mdToken tkConstraintType1, tkParam1;
+ IfFailThrow(pInternalImport1->GetGenericParamConstraintProps(tkConstraint1, &tkParam1, &tkConstraintType1));
+ _ASSERTE(tkParam1 == tok1);
+
+ // for each non-object constraint,
+ // and, in the case of a notNullableValueType, each non-ValueType constraint,
+ // find an equivalent constraint on tok2
+ // NB: we do not attempt to match constraints equivalent to object (and ValueType when tok1 is notNullable)
+ // because they
+ // a) are vacuous, and
+ // b) may be implicit (ie. absent) in the overriden variable's declaration
+ if (!(CompareTypeDefOrRefOrSpec(pModule1, tkConstraintType1, NULL,
+ MscorlibBinder::GetModule(), g_pObjectClass->GetCl(), NULL, NULL) ||
+ ((specialConstraints1 & gpNotNullableValueTypeConstraint) != 0) &&
+ (CompareTypeDefOrRefOrSpec(pModule1, tkConstraintType1, NULL,
+ MscorlibBinder::GetModule(), g_pValueTypeClass->GetCl(), NULL, NULL))))
+ {
+ HENUMInternalHolder hEnum2(pInternalImport2);
+ mdGenericParamConstraint tkConstraint2;
+ hEnum2.EnumInit(mdtGenericParamConstraint, tok2);
+
+ BOOL found = FALSE;
+ while (!found && pInternalImport2->EnumNext(&hEnum2, &tkConstraint2) )
+ {
+ mdToken tkConstraintType2, tkParam2;
+ IfFailThrow(pInternalImport2->GetGenericParamConstraintProps(tkConstraint2, &tkParam2, &tkConstraintType2));
+ _ASSERTE(tkParam2 == tok2);
+
+ found = CompareTypeDefOrRefOrSpec(pModule1, tkConstraintType1, pSubst1, pModule2, tkConstraintType2, pSubst2, NULL);
+ }
+ if (!found)
+ {
+ //none of the constrains on tyvar2 match, exit early
+ return FALSE;
+ }
+ }
+ //check next constraint of tok1
+ }
+
+ return TRUE;
+}
+
+/* static */
+BOOL MetaSig::CompareMethodConstraints(const Substitution *pSubst1,
+ Module *pModule1,
+ mdMethodDef tok1, //implementation
+ const Substitution *pSubst2,
+ Module *pModule2,
+ mdMethodDef tok2) //declaration w.r.t subsitution
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ IMDInternalImport *pInternalImport1 = pModule1->GetMDImport();
+ IMDInternalImport *pInternalImport2 = pModule2->GetMDImport();
+
+ HENUMInternalHolder hEnumTyPars1(pInternalImport1);
+ HENUMInternalHolder hEnumTyPars2(pInternalImport2);
+
+ hEnumTyPars1.EnumInit(mdtGenericParam, tok1);
+ hEnumTyPars2.EnumInit(mdtGenericParam, tok2);
+
+ mdGenericParam tkTyPar1,tkTyPar2;
+
+ // enumerate the variables
+ DWORD numTyPars1 = pInternalImport1->EnumGetCount(&hEnumTyPars1);
+ DWORD numTyPars2 = pInternalImport2->EnumGetCount(&hEnumTyPars2);
+
+ _ASSERTE(numTyPars1 == numTyPars2);
+ if (numTyPars1 != numTyPars2) //play it safe
+ return FALSE; //throw bad format exception?
+
+ for(unsigned int i = 0; i < numTyPars1; i++)
+ {
+ pInternalImport1->EnumNext(&hEnumTyPars1, &tkTyPar1);
+ pInternalImport2->EnumNext(&hEnumTyPars2, &tkTyPar2);
+ if (!CompareVariableConstraints(pSubst1, pModule1, tkTyPar1, pSubst2, pModule2, tkTyPar2))
+ {
+ return FALSE;
+ }
+ }
+ return TRUE;
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+// PromoteCarefully
+//
+// Clients who know they MAY have an interior pointer should come through here. We
+// can efficiently check whether our object lives on the current stack. If so, our
+// reference to it is not an interior pointer. This is more efficient than asking
+// the heap to verify whether our reference is interior, since it would have to
+// check all the heap segments, including those containing large objects.
+//
+// Note that we only have to check against the thread we are currently crawling. It
+// would be illegal for us to have a ByRef from someone else's stack. And this will
+// be asserted if we pass this reference to the heap as a potentially interior pointer.
+//
+// But the thread we are currently crawling is not the currently executing thread (in
+// the general case). We rely on fragile caching of the interesting thread, in our
+// call to UpdateCachedStackInfo() where we initiate the crawl in GcScanRoots() above.
+//
+// The flags must indicate that the have an interior pointer GC_CALL_INTERIOR
+// additionally the flags may indicate that we also have a pinned local byref
+//
+void PromoteCarefully(promote_func fn,
+ PTR_PTR_Object ppObj,
+ ScanContext* sc,
+ DWORD flags /* = GC_CALL_INTERIOR*/ )
+{
+ LIMITED_METHOD_CONTRACT;
+
+ //
+ // Sanity check that the flags contain only these three values
+ //
+ assert((flags & ~(GC_CALL_INTERIOR|GC_CALL_PINNED|GC_CALL_CHECK_APP_DOMAIN)) == 0);
+
+ //
+ // Sanity check that GC_CALL_INTERIOR FLAG is set
+ //
+ assert(flags & GC_CALL_INTERIOR);
+
+#if !defined(DACCESS_COMPILE)
+ // Note that the base is at a higher address than the limit, since the stack
+ // grows downwards.
+ if (sc->thread_under_crawl->IsAddressInStack(*ppObj))
+ {
+ return;
+ }
+#endif // !defined(DACCESS_COMPILE)
+
+ (*fn) (ppObj, sc, flags);
+}
+
+void ReportPointersFromValueType(promote_func *fn, ScanContext *sc, PTR_MethodTable pMT, PTR_VOID pSrc)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!pMT->ContainsPointers())
+ return;
+
+ CGCDesc* map = CGCDesc::GetCGCDescFromMT(pMT);
+ CGCDescSeries* cur = map->GetHighestSeries();
+ CGCDescSeries* last = map->GetLowestSeries();
+ DWORD size = pMT->GetBaseSize();
+ _ASSERTE(cur >= last);
+
+ do
+ {
+ // offset to embedded references in this series must be
+ // adjusted by the VTable pointer, when in the unboxed state.
+ size_t offset = cur->GetSeriesOffset() - sizeof(void*);
+ PTR_OBJECTREF srcPtr = dac_cast<PTR_OBJECTREF>(PTR_BYTE(pSrc) + offset);
+ PTR_OBJECTREF srcPtrStop = dac_cast<PTR_OBJECTREF>(PTR_BYTE(srcPtr) + cur->GetSeriesSize() + size);
+ while (srcPtr < srcPtrStop)
+ {
+ (*fn)(dac_cast<PTR_PTR_Object>(srcPtr), sc, 0);
+ srcPtr++;
+ }
+ cur--;
+ } while (cur >= last);
+}
+
+//------------------------------------------------------------------
+// Perform type-specific GC promotion on the value (based upon the
+// last type retrieved by NextArg()).
+//------------------------------------------------------------------
+VOID MetaSig::GcScanRoots(PTR_VOID pValue,
+ promote_func *fn,
+ ScanContext* sc,
+ promote_carefully_func *fnc)
+{
+
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+
+ PTR_PTR_Object pArgPtr = (PTR_PTR_Object)pValue;
+ if (fnc == NULL)
+ fnc = &PromoteCarefully;
+
+ TypeHandle thValueType;
+ CorElementType etype = m_pLastType.PeekElemTypeNormalized(m_pModule, &m_typeContext, &thValueType);
+
+ _ASSERTE(etype >= 0 && etype < ELEMENT_TYPE_MAX);
+
+#ifdef _DEBUG
+ PTR_Object pOldLocation;
+#endif
+
+ switch (gElementTypeInfo[etype].m_gc)
+ {
+ case TYPE_GC_NONE:
+ // do nothing
+ break;
+
+ case TYPE_GC_REF:
+ LOG((LF_GC, INFO3,
+ " Argument at" FMT_ADDR "causes promotion of " FMT_OBJECT "\n",
+ DBG_ADDR(pArgPtr), DBG_ADDR(*pArgPtr) ));
+#ifdef _DEBUG
+ pOldLocation = *pArgPtr;
+#endif
+ (*fn)(pArgPtr, sc, GC_CALL_CHECK_APP_DOMAIN );
+
+ // !!! Do not cast to (OBJECTREF*)
+ // !!! If we are in the relocate phase, we may have updated root,
+ // !!! but we have not moved the GC heap yet.
+ // !!! The root then points to bad locations until GC is done.
+#ifdef LOGGING
+ if (pOldLocation != *pArgPtr)
+ LOG((LF_GC, INFO3,
+ " Relocating from" FMT_ADDR "to " FMT_ADDR "\n",
+ DBG_ADDR(pOldLocation), DBG_ADDR(*pArgPtr)));
+#endif
+ break;
+
+ case TYPE_GC_BYREF:
+#ifdef ENREGISTERED_PARAMTYPE_MAXSIZE
+ case_TYPE_GC_BYREF:
+#endif // ENREGISTERED_PARAMTYPE_MAXSIZE
+
+ // value is an interior pointer
+ LOG((LF_GC, INFO3,
+ " Argument at" FMT_ADDR "causes promotion of interior pointer" FMT_ADDR "\n",
+ DBG_ADDR(pArgPtr), DBG_ADDR(*pArgPtr) ));
+
+#ifdef _DEBUG
+ pOldLocation = *pArgPtr;
+#endif
+
+ (*fnc)(fn, pArgPtr, sc, GC_CALL_INTERIOR|GC_CALL_CHECK_APP_DOMAIN);
+
+ // !!! Do not cast to (OBJECTREF*)
+ // !!! If we are in the relocate phase, we may have updated root,
+ // !!! but we have not moved the GC heap yet.
+ // !!! The root then points to bad locations until GC is done.
+#ifdef LOGGING
+ if (pOldLocation != *pArgPtr)
+ LOG((LF_GC, INFO3,
+ " Relocating from" FMT_ADDR "to " FMT_ADDR "\n",
+ DBG_ADDR(pOldLocation), DBG_ADDR(*pArgPtr)));
+#endif
+ break;
+
+ case TYPE_GC_OTHER:
+ // value is a ValueClass, generic type parameter
+ // See one of the go_through_object() macros in
+ // gc.cpp for the code we are emulating here. But note that the GCDesc
+ // for value classes describes the state of the instance in its boxed
+ // state. Here we are dealing with an unboxed instance, so we must adjust
+ // the object size and series offsets appropriately.
+ _ASSERTE(etype == ELEMENT_TYPE_VALUETYPE);
+ {
+ PTR_MethodTable pMT = thValueType.AsMethodTable();
+
+#ifdef ENREGISTERED_PARAMTYPE_MAXSIZE
+ if (ArgIterator::IsArgPassedByRef(thValueType))
+ {
+ goto case_TYPE_GC_BYREF;
+ }
+#endif // ENREGISTERED_PARAMTYPE_MAXSIZE
+
+ ReportPointersFromValueType(fn, sc, pMT, pArgPtr);
+ }
+ break;
+
+ default:
+ _ASSERTE(0); // can't get here.
+ }
+}
+
+
+#ifndef DACCESS_COMPILE
+
+void MetaSig::EnsureSigValueTypesLoaded(MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ SigTypeContext typeContext(pMD);
+
+ Module * pModule = pMD->GetModule();
+
+ // The signature format is approximately:
+ // CallingConvention NumberOfArguments ReturnType Arg1 ...
+ // There is also a blob length at pSig-1.
+ SigPointer ptr(pMD->GetSig());
+
+ // Skip over calling convention.
+ IfFailThrowBF(ptr.GetCallingConv(NULL), BFA_BAD_SIGNATURE, pModule);
+
+ ULONG numArgs = 0;
+ IfFailThrowBF(ptr.GetData(&numArgs), BFA_BAD_SIGNATURE, pModule);
+
+ // Force a load of value type arguments.
+ for(ULONG i=0; i <= numArgs; i++)
+ {
+ ptr.PeekElemTypeNormalized(pModule,&typeContext);
+ // Move to next argument token.
+ IfFailThrowBF(ptr.SkipExactlyOne(), BFA_BAD_SIGNATURE, pModule);
+ }
+}
+
+// this walks the sig and checks to see if all types in the sig can be loaded
+
+// This is used by ComCallableWrapper to give good error reporting
+/*static*/
+void MetaSig::CheckSigTypesCanBeLoaded(MethodDesc * pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ SigTypeContext typeContext(pMD);
+
+ Module * pModule = pMD->GetModule();
+
+ // The signature format is approximately:
+ // CallingConvention NumberOfArguments ReturnType Arg1 ...
+ // There is also a blob length at pSig-1.
+ SigPointer ptr(pMD->GetSig());
+
+ // Skip over calling convention.
+ IfFailThrowBF(ptr.GetCallingConv(NULL), BFA_BAD_SIGNATURE, pModule);
+
+ ULONG numArgs = 0;
+ IfFailThrowBF(ptr.GetData(&numArgs), BFA_BAD_SIGNATURE, pModule);
+
+ // must do a skip so we skip any class tokens associated with the return type
+ IfFailThrowBF(ptr.SkipExactlyOne(), BFA_BAD_SIGNATURE, pModule);
+
+ // Force a load of value type arguments.
+ for(ULONG i=0; i < numArgs; i++)
+ {
+ unsigned type = ptr.PeekElemTypeNormalized(pModule,&typeContext);
+ if (type == ELEMENT_TYPE_VALUETYPE || type == ELEMENT_TYPE_CLASS)
+ {
+ ptr.GetTypeHandleThrowing(pModule, &typeContext);
+ }
+ // Move to next argument token.
+ IfFailThrowBF(ptr.SkipExactlyOne(), BFA_BAD_SIGNATURE, pModule);
+ }
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+CorElementType MetaSig::GetReturnTypeNormalized(TypeHandle * pthValueType) const
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ if ((m_flags & SIG_RET_TYPE_INITTED) &&
+ ((pthValueType == NULL) || (m_corNormalizedRetType != ELEMENT_TYPE_VALUETYPE)))
+ {
+ return( m_corNormalizedRetType );
+ }
+
+ MetaSig * pSig = const_cast<MetaSig *>(this);
+ pSig->m_corNormalizedRetType = m_pRetType.PeekElemTypeNormalized(m_pModule, &m_typeContext, pthValueType);
+ pSig->m_flags |= SIG_RET_TYPE_INITTED;
+
+ return( m_corNormalizedRetType );
+}
+
+BOOL MetaSig::IsObjectRefReturnType()
+{
+ WRAPPER_NO_CONTRACT;
+
+ switch (GetReturnTypeNormalized())
+ {
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_ARRAY:
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_OBJECT:
+ case ELEMENT_TYPE_VAR:
+ return( TRUE );
+ default:
+ break;
+ }
+ return( FALSE );
+}
+
+CorElementType MetaSig::GetReturnType() const
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pRetType.PeekElemTypeClosed(GetModule(), &m_typeContext);
+}
+
+BOOL MetaSig::IsReturnTypeVoid() const
+{
+ WRAPPER_NO_CONTRACT;
+ return (GetReturnType() == ELEMENT_TYPE_VOID);
+}
+
+#ifndef DACCESS_COMPILE
+
+//----------------------------------------------------------
+// Returns the unmanaged calling convention.
+//----------------------------------------------------------
+/*static*/
+BOOL
+MetaSig::GetUnmanagedCallingConvention(
+ Module * pModule,
+ PCCOR_SIGNATURE pSig,
+ ULONG cSig,
+ CorPinvokeMap * pPinvokeMapOut)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+
+ // Instantiations aren't relevant here
+ MetaSig msig(pSig, cSig, pModule, NULL);
+ PCCOR_SIGNATURE pWalk = msig.m_pRetType.GetPtr();
+ _ASSERTE(pWalk <= pSig + cSig);
+ while ((pWalk < (pSig + cSig)) && ((*pWalk == ELEMENT_TYPE_CMOD_OPT) || (*pWalk == ELEMENT_TYPE_CMOD_REQD)))
+ {
+ BOOL fIsOptional = (*pWalk == ELEMENT_TYPE_CMOD_OPT);
+
+ pWalk++;
+ if (pWalk + CorSigUncompressedDataSize(pWalk) > pSig + cSig)
+ {
+ return FALSE; // Bad formatting
+ }
+ mdToken tk;
+ pWalk += CorSigUncompressToken(pWalk, &tk);
+
+ if (fIsOptional)
+ {
+ if (IsTypeRefOrDef("System.Runtime.CompilerServices.CallConvCdecl", pModule, tk))
+ {
+ *pPinvokeMapOut = pmCallConvCdecl;
+ return TRUE;
+ }
+ else if (IsTypeRefOrDef("System.Runtime.CompilerServices.CallConvStdcall", pModule, tk))
+ {
+ *pPinvokeMapOut = pmCallConvStdcall;
+ return TRUE;
+ }
+ else if (IsTypeRefOrDef("System.Runtime.CompilerServices.CallConvThiscall", pModule, tk))
+ {
+ *pPinvokeMapOut = pmCallConvThiscall;
+ return TRUE;
+ }
+ else if (IsTypeRefOrDef("System.Runtime.CompilerServices.CallConvFastcall", pModule, tk))
+ {
+ *pPinvokeMapOut = pmCallConvFastcall;
+ return TRUE;
+ }
+ }
+ }
+
+ *pPinvokeMapOut = (CorPinvokeMap)0;
+ return TRUE;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Substitution from a token (TypeDef and TypeRef have empty instantiation, TypeSpec gets it from MetaData).
+//
+Substitution::Substitution(
+ mdToken parentTypeDefOrRefOrSpec,
+ Module * pModule,
+ const Substitution * pNext)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_pModule = pModule;
+ m_pNext = pNext;
+
+ if (IsNilToken(parentTypeDefOrRefOrSpec) ||
+ (TypeFromToken(parentTypeDefOrRefOrSpec) != mdtTypeSpec))
+ {
+ return;
+ }
+
+ ULONG cbSig;
+ PCCOR_SIGNATURE pSig = NULL;
+ if (FAILED(pModule->GetMDImport()->GetTypeSpecFromToken(
+ parentTypeDefOrRefOrSpec,
+ &pSig,
+ &cbSig)))
+ {
+ return;
+ }
+ SigPointer sigptr = SigPointer(pSig, cbSig);
+ CorElementType type;
+
+ if (FAILED(sigptr.GetElemType(&type)))
+ return;
+
+ // The only kind of type specs that we recognise are instantiated types
+ if (type != ELEMENT_TYPE_GENERICINST)
+ return;
+
+ if (FAILED(sigptr.GetElemType(&type)))
+ return;
+
+ if (type != ELEMENT_TYPE_CLASS)
+ return;
+
+ /* mdToken genericTok = */
+ if (FAILED(sigptr.GetToken(NULL)))
+ return;
+ /* DWORD ntypars = */
+ if (FAILED(sigptr.GetData(NULL)))
+ return;
+
+ m_sigInst = sigptr;
+} // Substitution::Substitution
+
+//---------------------------------------------------------------------------------------
+//
+void
+Substitution::CopyToArray(
+ Substitution * pTarget) const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ const Substitution * pChain = this;
+ DWORD i = 0;
+ for (; pChain != NULL; pChain = pChain->GetNext())
+ {
+ CONSISTENCY_CHECK(CheckPointer(pChain->GetModule()));
+
+ Substitution * pNext = (pChain->GetNext() != NULL) ? &pTarget[i + 1] : NULL;
+ pTarget[i++] = Substitution(pChain->GetModule(), pChain->GetInst(), pNext);
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+DWORD Substitution::GetLength() const
+{
+ LIMITED_METHOD_CONTRACT;
+ DWORD res = 0;
+ for (const Substitution * pChain = this; pChain != NULL; pChain = pChain->m_pNext)
+ {
+ res++;
+ }
+ return res;
+}
+
+//---------------------------------------------------------------------------------------
+//
+void Substitution::DeleteChain()
+{
+ LIMITED_METHOD_CONTRACT;
+ if (m_pNext != NULL)
+ {
+ ((Substitution *)m_pNext)->DeleteChain();
+ }
+ delete this;
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+//
+// static
+TokenPairList TokenPairList::AdjustForTypeSpec(TokenPairList *pTemplate, Module *pTypeSpecModule, PCCOR_SIGNATURE pTypeSpecSig, DWORD cbTypeSpecSig)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END
+
+ TokenPairList result(pTemplate);
+
+ if (InTypeEquivalenceForbiddenScope(&result))
+ {
+ // it cannot get any worse
+ return result;
+ }
+
+ SigParser sig(pTypeSpecSig, cbTypeSpecSig);
+ CorElementType elemType;
+
+ IfFailThrow(sig.GetElemType(&elemType));
+ if (elemType != ELEMENT_TYPE_GENERICINST)
+ {
+ // we don't care about anything else than generic instantiations
+ return result;
+ }
+
+ IfFailThrow(sig.GetElemType(&elemType));
+
+ if (elemType == ELEMENT_TYPE_CLASS)
+ {
+ mdToken tkType;
+ IfFailThrow(sig.GetToken(&tkType));
+
+ Module *pModule;
+ if (!ClassLoader::ResolveTokenToTypeDefThrowing(pTypeSpecModule,
+ tkType,
+ &pModule,
+ &tkType))
+ {
+ // we couldn't prove otherwise so assume that this is not an interface
+ result.m_bInTypeEquivalenceForbiddenScope = TRUE;
+ }
+ else
+ {
+ DWORD dwAttrType;
+ IfFailThrow(pModule->GetMDImport()->GetTypeDefProps(tkType, &dwAttrType, NULL));
+
+ result.m_bInTypeEquivalenceForbiddenScope = !IsTdInterface(dwAttrType);
+ }
+ }
+ else
+ {
+ _ASSERTE(elemType == ELEMENT_TYPE_VALUETYPE);
+ result.m_bInTypeEquivalenceForbiddenScope = TRUE;
+ }
+
+ return result;
+}
+
+// static
+TokenPairList TokenPairList::AdjustForTypeEquivalenceForbiddenScope(TokenPairList *pTemplate)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END
+
+ TokenPairList result(pTemplate);
+ result.m_bInTypeEquivalenceForbiddenScope = TRUE;
+ return result;
+}
+
+// TRUE if the two TypeDefs have the same layout and field marshal information.
+BOOL CompareTypeLayout(mdToken tk1, mdToken tk2, Module *pModule1, Module *pModule2)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ PRECONDITION(TypeFromToken(tk1) == mdtTypeDef);
+ PRECONDITION(TypeFromToken(tk2) == mdtTypeDef);
+ }
+ CONTRACTL_END
+
+ DWORD dwAttr1, dwAttr2;
+ IMDInternalImport *pInternalImport1 = pModule1->GetMDImport();
+ IMDInternalImport *pInternalImport2 = pModule2->GetMDImport();
+
+ IfFailThrow(pInternalImport1->GetTypeDefProps(tk1, &dwAttr1, NULL));
+ IfFailThrow(pInternalImport2->GetTypeDefProps(tk2, &dwAttr2, NULL));
+
+ // we need both to have sequential or explicit layout
+ BOOL fExplicitLayout = FALSE;
+ if (IsTdSequentialLayout(dwAttr1))
+ {
+ if (!IsTdSequentialLayout(dwAttr2))
+ return FALSE;
+ }
+ else if (IsTdExplicitLayout(dwAttr1))
+ {
+ if (!IsTdExplicitLayout(dwAttr2))
+ return FALSE;
+
+ fExplicitLayout = TRUE;
+ }
+ else
+ {
+ return FALSE;
+ }
+
+ // they must have the same charset
+ if ((dwAttr1 & tdStringFormatMask) != (dwAttr2 & tdStringFormatMask))
+ return FALSE;
+
+ // they must have the same packing
+ DWORD dwPackSize1, dwPackSize2;
+ HRESULT hr1 = pInternalImport1->GetClassPackSize(tk1, &dwPackSize1);
+ HRESULT hr2 = pInternalImport2->GetClassPackSize(tk2, &dwPackSize2);
+
+ if (hr1 == CLDB_E_RECORD_NOTFOUND)
+ dwPackSize1 = 0;
+ else
+ IfFailThrow(hr1);
+
+ if (hr2 == CLDB_E_RECORD_NOTFOUND)
+ dwPackSize2 = 0;
+ else
+ IfFailThrow(hr2);
+
+ if (dwPackSize1 != dwPackSize2)
+ return FALSE;
+
+ // they must have the same explicit size
+ DWORD dwTotalSize1, dwTotalSize2;
+ hr1 = pInternalImport1->GetClassTotalSize(tk1, &dwTotalSize1);
+ hr2 = pInternalImport2->GetClassTotalSize(tk2, &dwTotalSize2);
+
+ if (hr1 == CLDB_E_RECORD_NOTFOUND)
+ dwTotalSize1 = 0;
+ else
+ IfFailThrow(hr1);
+
+ if (hr2 == CLDB_E_RECORD_NOTFOUND)
+ dwTotalSize2 = 0;
+ else
+ IfFailThrow(hr2);
+
+ if (dwTotalSize1 != dwTotalSize2)
+ return FALSE;
+
+ // same offsets, same field marshal
+ HENUMInternalHolder hFieldEnum1(pInternalImport1);
+ HENUMInternalHolder hFieldEnum2(pInternalImport2);
+
+ hFieldEnum1.EnumInit(mdtFieldDef, tk1);
+ hFieldEnum2.EnumInit(mdtFieldDef, tk2);
+
+ mdToken tkField1, tkField2;
+
+ while (hFieldEnum1.EnumNext(&tkField1))
+ {
+ if (!hFieldEnum2.EnumNext(&tkField2))
+ return FALSE;
+
+ // check for same offsets
+ if (fExplicitLayout)
+ {
+ ULONG uOffset1, uOffset2;
+ IfFailThrow(pInternalImport1->GetFieldOffset(tkField1, &uOffset1));
+ IfFailThrow(pInternalImport2->GetFieldOffset(tkField2, &uOffset2));
+
+ if (uOffset1 != uOffset2)
+ return FALSE;
+ }
+
+ // check for same field marshal
+ DWORD dwAttrField1, dwAttrField2;
+ IfFailThrow(pInternalImport1->GetFieldDefProps(tkField1, &dwAttrField1));
+ IfFailThrow(pInternalImport2->GetFieldDefProps(tkField2, &dwAttrField2));
+
+ if (IsFdHasFieldMarshal(dwAttrField1) != IsFdHasFieldMarshal(dwAttrField2))
+ return FALSE;
+
+ if (IsFdHasFieldMarshal(dwAttrField1))
+ {
+ // both fields have field marshal info - make sure it's same
+ PCCOR_SIGNATURE pNativeSig1, pNativeSig2;
+ ULONG cbNativeSig1, cbNativeSig2;
+
+ IfFailThrow(pInternalImport1->GetFieldMarshal(tkField1, &pNativeSig1, &cbNativeSig1));
+ IfFailThrow(pInternalImport2->GetFieldMarshal(tkField2, &pNativeSig2, &cbNativeSig2));
+
+ // just check if the blobs are identical
+ if (cbNativeSig1 != cbNativeSig2 || memcmp(pNativeSig1, pNativeSig2, cbNativeSig1) != 0)
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
diff --git a/src/vm/siginfo.hpp b/src/vm/siginfo.hpp
new file mode 100644
index 0000000000..06d3b66a24
--- /dev/null
+++ b/src/vm/siginfo.hpp
@@ -0,0 +1,1194 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// siginfo.hpp
+//
+
+
+#ifndef _H_SIGINFO
+#define _H_SIGINFO
+
+
+#include "util.hpp"
+#include "vars.hpp"
+#include "clsload.hpp"
+#include "zapsig.h"
+#include "threads.h"
+
+#include "eecontract.h"
+#include "typectxt.h"
+#include "sigparser.h"
+
+//---------------------------------------------------------------------------------------
+// These macros define how arguments are mapped to the stack in the managed calling convention.
+// We assume to be walking a method's signature left-to-right, in the virtual calling convention.
+// See MethodDesc::Call for details on this virtual calling convention.
+// These macros tell us whether the arguments we see as we proceed with the signature walk are mapped
+// to increasing or decreasing stack addresses. This is valid only for arguments that go on the stack.
+//---------------------------------------------------------------------------------------
+#if defined(_TARGET_X86_)
+#define STACK_GROWS_DOWN_ON_ARGS_WALK
+#else
+#define STACK_GROWS_UP_ON_ARGS_WALK
+#endif
+
+BOOL IsTypeRefOrDef(LPCSTR szClassName, Module *pModule, mdToken token);
+
+struct ElementTypeInfo {
+#ifdef _DEBUG
+ int m_elementType;
+#endif
+ int m_cbSize;
+ CorInfoGCType m_gc : 3;
+ int m_enregister : 1;
+};
+extern const ElementTypeInfo gElementTypeInfo[];
+
+unsigned GetSizeForCorElementType(CorElementType etyp);
+const ElementTypeInfo* GetElementTypeInfo(CorElementType etyp);
+
+class SigBuilder;
+
+typedef const struct HardCodedMetaSig *LPHARDCODEDMETASIG;
+
+//@GENERICS: flags returned from IsPolyType indicating the presence or absence of class and
+// method type parameters in a type whose instantiation cannot be determined at JIT-compile time
+enum VarKind
+{
+ hasNoVars = 0x0000,
+ hasClassVar = 0x0001,
+ hasMethodVar = 0x0002,
+ hasSharableClassVar = 0x0004,
+ hasSharableMethodVar = 0x0008,
+ hasAnyVarsMask = 0x0003,
+ hasSharableVarsMask = 0x000c
+};
+
+//---------------------------------------------------------------------------------------
+
+struct ScanContext;
+typedef void promote_func(PTR_PTR_Object, ScanContext*, DWORD);
+typedef void promote_carefully_func(promote_func*, PTR_PTR_Object, ScanContext*, DWORD);
+
+void PromoteCarefully(promote_func fn,
+ PTR_PTR_Object obj,
+ ScanContext* sc,
+ DWORD flags = GC_CALL_INTERIOR);
+
+class LoaderAllocator;
+void GcReportLoaderAllocator(promote_func* fn, ScanContext* sc, LoaderAllocator *pLoaderAllocator);
+
+//---------------------------------------------------------------------------------------
+//
+// Encapsulates how compressed integers and typeref tokens are encoded into
+// a bytestream.
+//
+// As you use this class please understand the implicit normalizations
+// on the CorElementType's returned by the various methods, especially
+// for variable types (e.g. !0 in generic signatures), string types
+// (i.e. E_T_STRING), object types (E_T_OBJECT), constructed types
+// (e.g. List<int>) and enums.
+//
+class SigPointer : public SigParser
+{
+ friend class MetaSig;
+
+public:
+ // Constructor.
+ SigPointer() { LIMITED_METHOD_DAC_CONTRACT; }
+
+ // Copy constructor.
+ SigPointer(const SigPointer & sig) : SigParser(sig)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ SigPointer(const SigParser & sig) : SigParser(sig)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ // Signature from a pointer. INSECURE!!!
+ // WARNING: Should not be used as it is insecure, because we do not have size of the signature and
+ // therefore we can read behind the end of buffer/file.
+ FORCEINLINE
+ SigPointer(PCCOR_SIGNATURE ptr) : SigParser(ptr)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ // Signature from a pointer and size.
+ FORCEINLINE
+ SigPointer(PCCOR_SIGNATURE ptr, DWORD len) : SigParser(ptr, len)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+
+ //=========================================================================
+ // The RAW interface for reading signatures. You see exactly the signature,
+ // apart from custom modifiers which for historical reasons tend to get eaten.
+ //
+ // DO NOT USE THESE METHODS UNLESS YOU'RE TOTALLY SURE YOU WANT
+ // THE RAW signature. You nearly always want GetElemTypeClosed() or
+ // PeekElemTypeClosed() or one of the MetaSig functions. See the notes above.
+ // These functions will return E_T_INTERNAL, E_T_VAR, E_T_MVAR and such
+ // so the caller must be able to deal with those
+ //=========================================================================
+
+
+ void ConvertToInternalExactlyOne(Module* pSigModule, SigTypeContext *pTypeContext, SigBuilder * pSigBuilder, BOOL bSkipCustomModifier = TRUE);
+ void ConvertToInternalSignature(Module* pSigModule, SigTypeContext *pTypeContext, SigBuilder * pSigBuilder, BOOL bSkipCustomModifier = TRUE);
+
+
+ //=========================================================================
+ // The CLOSED interface for reading signatures. With the following
+ // methods you see the signature "as if" all type variables are
+ // replaced by the given instantiations. However, no type loads happen.
+ //
+ // In general this is what you want to use if the signature may include
+ // generic type variables. Even if you know it doesn't you can always
+ // pass in NULL for the instantiations and put a comment to that effect.
+ //
+ // The CLOSED api also hides E_T_INTERNAL by return E_T_CLASS or E_T_VALUETYPE
+ // appropriately (as directed by the TypeHandle following E_T_INTERNAL)
+ //=========================================================================
+
+ // The CorElementTypes returned correspond
+ // to those returned by TypeHandle::GetSignatureCorElementType.
+ CorElementType PeekElemTypeClosed(Module *pModule, const SigTypeContext *pTypeContext) const;
+
+ //------------------------------------------------------------------------
+ // Fetch the token for a CLASS, VALUETYPE or GENRICINST, or a type
+ // variable instantiatied to be one of these, taking into account
+ // the given instantiations.
+ //
+ // SigPointer should be in a position that satisfies
+ // ptr.PeekElemTypeClosed(pTypeContext) = ELEMENT_TYPE_VALUETYPE
+ //
+ // A type ref or def is returned. For an instantiated generic struct
+ // this will return the token for the generic class, e.g. for a signature
+ // for "struct Pair<int,int>" this will return a token for "Pair".
+ //
+ // The token will only make sense in the context of the module where
+ // the signature occurs.
+ //
+ // WARNING: This api will return a mdTokenNil for a E_T_VALUETYPE obtained
+ // from a E_T_INTERNAL, as the token is meaningless in that case
+ // Users of this api must be prepared to deal with a null token
+ //------------------------------------------------------------------------
+ mdTypeRef PeekValueTypeTokenClosed(Module *pModule, const SigTypeContext *pTypeContext, Module **ppModuleOfToken) const;
+
+
+ //=========================================================================
+ // The INTERNAL-NORMALIZED interface for reading signatures. You see
+ // information concerning the signature, but taking into account normalizations
+ // performed for layout of data, e.g. enums and one-field VCs.
+ //=========================================================================
+
+ // The CorElementTypes returned correspond
+ // to those returned by TypeHandle::GetInternalCorElementType.
+ CorElementType PeekElemTypeNormalized(Module* pModule, const SigTypeContext *pTypeContext, TypeHandle * pthValueType = NULL) const;
+
+ //------------------------------------------------------------------------
+ // Assumes that the SigPointer points to the start of an element type.
+ // Returns size of that element in bytes. This is the minimum size that a
+ // field of this type would occupy inside an object.
+ //------------------------------------------------------------------------
+ UINT SizeOf(Module* pModule, const SigTypeContext *pTypeContext) const;
+
+private:
+
+ // SigPointer should be just after E_T_VAR or E_T_MVAR
+ TypeHandle GetTypeVariable(CorElementType et,const SigTypeContext *pTypeContext);
+ TypeHandle GetTypeVariableThrowing(Module *pModule,
+ CorElementType et,
+ ClassLoader::LoadTypesFlag fLoadTypes,
+ const SigTypeContext *pTypeContext);
+
+ // Parse type following E_T_GENERICINST
+ TypeHandle GetGenericInstType(Module * pModule,
+ ClassLoader::LoadTypesFlag = ClassLoader::LoadTypes,
+ ClassLoadLevel level = CLASS_LOADED,
+ const ZapSig::Context *pZapSigContext = NULL);
+
+public:
+
+ //------------------------------------------------------------------------
+ // Assuming that the SigPointer points the start if an element type.
+ // Use SigTypeContext to fill in any type parameters
+ //
+ // Also advance the pointer to after the element type.
+ //------------------------------------------------------------------------
+
+ // OBSOLETE - Use GetTypeHandleThrowing()
+ TypeHandle GetTypeHandleNT(Module* pModule,
+ const SigTypeContext *pTypeContext) const;
+
+ // pTypeContext indicates how to instantiate any generic type parameters we come
+ // However, first we implicitly apply the substitution pSubst to the metadata if pSubst is supplied.
+ // That is, if the metadata contains a type variable "!0" then we first look up
+ // !0 in pSubst to produce another item of metdata and continue processing.
+ // If pSubst is empty then we look up !0 in the pTypeContext to produce a final
+ // type handle. If any of these are out of range we throw an exception.
+ //
+ // The level is the level to which the result type will be loaded (see classloadlevel.h)
+ // If dropGenericArgumentLevel is TRUE, and the metadata represents an instantiated generic type,
+ // then generic arguments to the generic type will be loaded one level lower. (This is used by the
+ // class loader to avoid looping on definitions such as class C : D<C>)
+ //
+ // If dropGenericArgumentLevel is TRUE and
+ // level=CLASS_LOAD_APPROXPARENTS, then the instantiated
+ // generic type is "approximated" in the following way:
+ // - for generic interfaces, the generic type (uninstantiated) is returned
+ // - for other generic instantiations, System.Object is used in place of any reference types
+ // occurring in the type arguments
+ // This semantics is used by the class loader to load tricky recursive definitions in phases
+ // (e.g. class C : D<C>, or struct S : I<S>)
+ TypeHandle GetTypeHandleThrowing(Module* pModule,
+ const SigTypeContext *pTypeContext,
+ ClassLoader::LoadTypesFlag fLoadTypes = ClassLoader::LoadTypes,
+ ClassLoadLevel level = CLASS_LOADED,
+ BOOL dropGenericArgumentLevel = FALSE,
+ const Substitution *pSubst = NULL,
+ const ZapSig::Context *pZapSigContext = NULL) const;
+
+public:
+ //------------------------------------------------------------------------
+ // Does this type contain class or method type parameters whose instantiation cannot
+ // be determined at JIT-compile time from the instantiations in the method context?
+ // Return a combination of hasClassVar and hasMethodVar flags.
+ //
+ // Example: class C<A,B> containing instance method m<T,U>
+ // Suppose that the method context is C<float,string>::m<double,object>
+ // Then the type Dict<!0,!!0> is considered to have *no* "polymorphic" type parameters because
+ // !0 is known to be float and !!0 is known to be double
+ // But Dict<!1,!!1> has polymorphic class *and* method type parameters because both
+ // !1=string and !!1=object are reference types and so code using these can be shared with
+ // other reference instantiations.
+ //------------------------------------------------------------------------
+ VarKind IsPolyType(const SigTypeContext *pTypeContext) const;
+
+ //------------------------------------------------------------------------
+ // Tests if the element type is a System.String. Accepts
+ // either ELEMENT_TYPE_STRING or ELEMENT_TYPE_CLASS encoding.
+ //------------------------------------------------------------------------
+ BOOL IsStringType(Module* pModule, const SigTypeContext *pTypeContext) const;
+ BOOL IsStringTypeThrowing(Module* pModule, const SigTypeContext *pTypeContext) const;
+
+private:
+ BOOL IsStringTypeHelper(Module* pModule, const SigTypeContext* pTypeContext, BOOL fThrow) const;
+
+public:
+
+
+ //------------------------------------------------------------------------
+ // Tests if the element class name is szClassName.
+ //------------------------------------------------------------------------
+ BOOL IsClass(Module* pModule, LPCUTF8 szClassName, const SigTypeContext *pTypeContext = NULL) const;
+ BOOL IsClassThrowing(Module* pModule, LPCUTF8 szClassName, const SigTypeContext *pTypeContext = NULL) const;
+
+private:
+ BOOL IsClassHelper(Module* pModule, LPCUTF8 szClassName, const SigTypeContext* pTypeContext, BOOL fThrow) const;
+
+public:
+ //------------------------------------------------------------------------
+ // Tests for the existence of a custom modifier
+ //------------------------------------------------------------------------
+ BOOL HasCustomModifier(Module *pModule, LPCSTR szModName, CorElementType cmodtype) const;
+
+ //------------------------------------------------------------------------
+ // Tests for ELEMENT_TYPE_CLASS or ELEMENT_TYPE_VALUETYPE followed by a TypeDef,
+ // and returns the TypeDef
+ //------------------------------------------------------------------------
+ BOOL IsTypeDef(mdTypeDef* pTypeDef) const;
+
+}; // class SigPointer
+
+// forward declarations needed for the friends declared in Signature
+struct FrameInfo;
+struct VASigCookie;
+#if defined(DACCESS_COMPILE)
+class DacDbiInterfaceImpl;
+#endif // DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+//
+// Currently, PCCOR_SIGNATURE is used all over the runtime to represent a signature, which is just
+// an array of bytes. The problem with PCCOR_SIGNATURE is that it doesn't tell you the length of
+// the signature (i.e. the number of bytes in the array). This is particularly troublesome for DAC,
+// which needs to know how much memory to grab from out of process. This class is an encapsulation
+// over PCCOR_SIGNATURE AND the length of the signature it points to.
+//
+// Notes:
+// This class is meant to be read-only. Moreover, preferrably we should never read the raw
+// PCCOR_SIGNATURE pointer directly, but there are likely some cases where it is inevitable.
+// We should keep these to a minimum.
+//
+// We should move over to Signature instead of PCCOR_SIGNATURE.
+//
+// To get a Signature, you can create one yourself by using a constructor. However, it's recommended
+// that you check whether the Signature should be constructed at a lower level. For example, instead of
+// creating a Signature in FramedMethodFrame::PromoteCallerStackWalker(), we should add a member function
+// to MethodDesc to return a Signature.
+//
+
+class Signature
+{
+public:
+ // create an empty Signature
+ Signature();
+
+ // this is the primary constructor
+ Signature(PCCOR_SIGNATURE pSig,
+ DWORD cbSig);
+
+ // check whether the signature is empty, i.e. have a NULL PCCOR_SIGNATURE
+ BOOL IsEmpty() const;
+
+ // create a SigParser from the signature
+ SigParser CreateSigParser() const;
+
+ // create a SigPointer from the signature
+ SigPointer CreateSigPointer() const;
+
+ // pretty print the signature
+ void PrettyPrint(const CHAR * pszMethodName,
+ CQuickBytes * pqbOut,
+ IMDInternalImport * pIMDI) const;
+
+ // retrieve the raw PCCOR_SIGNATURE pointer
+ PCCOR_SIGNATURE GetRawSig() const;
+
+ // retrieve the length of the signature
+ DWORD GetRawSigLen() const;
+
+private:
+ PCCOR_SIGNATURE m_pSig;
+ DWORD m_cbSig;
+}; // class Signature
+
+
+#ifdef _DEBUG
+#define MAX_CACHED_SIG_SIZE 3 // To excercize non-cached code path
+#else
+#define MAX_CACHED_SIG_SIZE 15
+#endif
+
+
+//---------------------------------------------------------------------------------------
+//
+// A substitution represents the composition of several formal type instantiations
+// It is used when matching formal signatures across the inheritance hierarchy.
+//
+// It has the form of a linked list:
+// [mod_1, <inst_1>] ->
+// [mod_2, <inst_2>] ->
+// ...
+// [mod_n, <inst_n>]
+//
+// Here the types in <inst_1> must be resolved in the scope of module mod_1 but
+// may contain type variables instantiated by <inst_2>
+// ...
+// and the types in <inst_(n-1)> must be resolved in the scope of mould mod_(n-1) but
+// may contain type variables instantiated by <inst_n>
+//
+// Any type variables in <inst_n> are treated as "free".
+//
+class Substitution
+{
+private:
+ Module * m_pModule; // Module in which instantiation lives (needed to resolve typerefs)
+ SigPointer m_sigInst;
+ const Substitution * m_pNext;
+
+public:
+ Substitution()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pModule = NULL;
+ m_pNext = NULL;
+ }
+
+ Substitution(
+ Module * pModuleArg,
+ const SigPointer & sigInst,
+ const Substitution * pNextSubstitution)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pModule = pModuleArg;
+ m_sigInst = sigInst;
+ m_pNext = pNextSubstitution;
+ }
+
+ Substitution(
+ mdToken parentTypeDefOrRefOrSpec,
+ Module * pModuleArg,
+ const Substitution * nextArg);
+
+ Substitution(const Substitution & subst)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pModule = subst.m_pModule;
+ m_sigInst = subst.m_sigInst;
+ m_pNext = subst.m_pNext;
+ }
+ void DeleteChain();
+
+ Module * GetModule() const { LIMITED_METHOD_DAC_CONTRACT; return m_pModule; }
+ const Substitution * GetNext() const { LIMITED_METHOD_DAC_CONTRACT; return m_pNext; }
+ const SigPointer & GetInst() const { LIMITED_METHOD_DAC_CONTRACT; return m_sigInst; }
+ DWORD GetLength() const;
+
+ void CopyToArray(Substitution * pTarget /* must have type Substitution[GetLength()] */ ) const;
+
+}; // class Substitution
+
+//---------------------------------------------------------------------------------------
+//
+// Linked list that records what tokens are currently being compared for equivalence. This prevents
+// infinite recursion when types refer to each other in a cycle, e.g. a delegate that takes itself as
+// a parameter or a struct that declares a field of itself (illegal but we don't know at this point).
+//
+class TokenPairList
+{
+public:
+ // Chain using this constructor when comparing two typedefs for equivalence.
+ TokenPairList(mdToken token1, Module *pModule1, mdToken token2, Module *pModule2, TokenPairList *pNext)
+ : m_token1(token1), m_token2(token2),
+ m_pModule1(pModule1), m_pModule2(pModule2),
+ m_bInTypeEquivalenceForbiddenScope(pNext == NULL ? FALSE : pNext->m_bInTypeEquivalenceForbiddenScope),
+ m_pNext(pNext)
+ { LIMITED_METHOD_CONTRACT; }
+
+ static BOOL Exists(TokenPairList *pList, mdToken token1, Module *pModule1, mdToken token2, Module *pModule2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ while (pList != NULL)
+ {
+ if (pList->m_token1 == token1 && pList->m_pModule1 == pModule1 &&
+ pList->m_token2 == token2 && pList->m_pModule2 == pModule2)
+ return TRUE;
+
+ if (pList->m_token1 == token2 && pList->m_pModule1 == pModule2 &&
+ pList->m_token2 == token1 && pList->m_pModule2 == pModule1)
+ return TRUE;
+
+ pList = pList->m_pNext;
+ }
+ return FALSE;
+ }
+
+ static BOOL InTypeEquivalenceForbiddenScope(TokenPairList *pList)
+ {
+ return (pList == NULL ? FALSE : pList->m_bInTypeEquivalenceForbiddenScope);
+ }
+
+ // Chain using this method when comparing type specs.
+ static TokenPairList AdjustForTypeSpec(TokenPairList *pTemplate, Module *pTypeSpecModule, PCCOR_SIGNATURE pTypeSpecSig, DWORD cbTypeSpecSig);
+ static TokenPairList AdjustForTypeEquivalenceForbiddenScope(TokenPairList *pTemplate);
+
+private:
+ TokenPairList(TokenPairList *pTemplate)
+ : m_token1(pTemplate ? pTemplate->m_token1 : mdTokenNil),
+ m_token2(pTemplate ? pTemplate->m_token2 : mdTokenNil),
+ m_pModule1(pTemplate ? pTemplate->m_pModule1 : NULL),
+ m_pModule2(pTemplate ? pTemplate->m_pModule2 : NULL),
+ m_bInTypeEquivalenceForbiddenScope(pTemplate ? pTemplate->m_bInTypeEquivalenceForbiddenScope : FALSE),
+ m_pNext(pTemplate ? pTemplate->m_pNext : NULL)
+ { LIMITED_METHOD_CONTRACT; }
+
+ mdToken m_token1, m_token2;
+ Module *m_pModule1, *m_pModule2;
+ BOOL m_bInTypeEquivalenceForbiddenScope;
+ TokenPairList *m_pNext;
+}; // class TokenPairList
+
+//---------------------------------------------------------------------------------------
+//
+class MetaSig
+{
+ public:
+ enum MetaSigKind {
+ sigMember,
+ sigLocalVars,
+ sigField,
+ };
+
+ //------------------------------------------------------------------
+ // Common init used by other constructors
+ //------------------------------------------------------------------
+ void Init(PCCOR_SIGNATURE szMetaSig,
+ DWORD cbMetaSig,
+ Module* pModule,
+ const SigTypeContext *pTypeContext,
+ MetaSigKind kind = sigMember);
+
+ //------------------------------------------------------------------
+ // Constructor. Warning: Does NOT make a copy of szMetaSig.
+ //
+ // The instantiations are used to fill in type variables on calls
+ // to PeekArg, GetReturnType, GetNextArg, GetTypeHandle, GetRetTypeHandle and
+ // so on.
+ //
+ // Please make sure you know what you're doing by leaving classInst and methodInst to default NULL
+ // Are you sure the signature cannot contain type parameters (E_T_VAR, E_T_MVAR)?
+ //------------------------------------------------------------------
+ MetaSig(PCCOR_SIGNATURE szMetaSig,
+ DWORD cbMetaSig,
+ Module* pModule,
+ const SigTypeContext *pTypeContext,
+ MetaSigKind kind = sigMember)
+ {
+ WRAPPER_NO_CONTRACT;
+ Init(szMetaSig, cbMetaSig, pModule, pTypeContext, kind);
+ }
+
+ // this is just a variation of the previous constructor to ease the transition to Signature
+ MetaSig(const Signature & signature,
+ Module * pModule,
+ const SigTypeContext * pTypeContext,
+ MetaSigKind kind = sigMember)
+ {
+ WRAPPER_NO_CONTRACT;
+ Init(signature.GetRawSig(), signature.GetRawSigLen(), pModule, pTypeContext, kind);
+ }
+
+ // The following create MetaSigs for parsing the signature of the given method.
+ // They are identical except that they give slightly different
+ // type contexts. (Note the type context will only be relevant if we
+ // are parsing a method on an array type or on a generic type.)
+ // See TypeCtxt.h for more details.
+ // If declaringType is omitted then a *representative* instantiation may be obtained from pMD or pFD
+ MetaSig(MethodDesc *pMD, TypeHandle declaringType = TypeHandle());
+ MetaSig(MethodDesc *pMD, Instantiation classInst, Instantiation methodInst);
+
+ MetaSig(FieldDesc *pFD, TypeHandle declaringType = TypeHandle());
+
+ // Used to avoid touching metadata for mscorlib methods. Nb. only use for non-generic methods.
+ MetaSig(BinderMethodID id);
+
+ MetaSig(LPHARDCODEDMETASIG pwzMetaSig);
+
+ //------------------------------------------------------------------
+ // Returns type of current argument index. Returns ELEMENT_TYPE_END
+ // if already past end of arguments.
+ //------------------------------------------------------------------
+ CorElementType PeekArg() const;
+
+ //------------------------------------------------------------------
+ // Returns type of current argument index. Returns ELEMENT_TYPE_END
+ // if already past end of arguments.
+ //------------------------------------------------------------------
+ CorElementType PeekArgNormalized(TypeHandle * pthValueType = NULL) const;
+
+ //------------------------------------------------------------------
+ // Returns type of current argument, then advances the argument
+ // index. Returns ELEMENT_TYPE_END if already past end of arguments.
+ // This method updates m_pLastType
+ //------------------------------------------------------------------
+ CorElementType NextArg();
+
+ //------------------------------------------------------------------
+ // Advance the argument index. Can be used with GetArgProps() to
+ // to iterate when you do not have a valid type context.
+ // This method updates m_pLastType
+ //------------------------------------------------------------------
+ void SkipArg();
+
+ //------------------------------------------------------------------
+ // Returns a read-only SigPointer for the m_pLastType set by one
+ // of NextArg() or SkipArg()
+ // This allows extracting more information for complex types.
+ //------------------------------------------------------------------
+ const SigPointer & GetArgProps() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pLastType;
+ }
+
+ //------------------------------------------------------------------
+ // Returns a read-only SigPointer for the return type.
+ // This allows extracting more information for complex types.
+ //------------------------------------------------------------------
+ const SigPointer & GetReturnProps() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pRetType;
+ }
+
+
+ //------------------------------------------------------------------------
+ // Returns # of arguments. Does not count the return value.
+ // Does not count the "this" argument (which is not reflected om the
+ // sig.) 64-bit arguments are counted as one argument.
+ //------------------------------------------------------------------------
+ UINT NumFixedArgs()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_nArgs;
+ }
+
+ //----------------------------------------------------------
+ // Returns the calling convention (see IMAGE_CEE_CS_CALLCONV_*
+ // defines in cor.h) - throws.
+ //----------------------------------------------------------
+ static BYTE GetCallingConvention(
+ Module *pModule,
+ const Signature &signature)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ PCCOR_SIGNATURE pSig = signature.GetRawSig();
+
+ if (signature.GetRawSigLen() < 1)
+ {
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ return (BYTE)(IMAGE_CEE_CS_CALLCONV_MASK & CorSigUncompressCallingConv(/*modifies*/pSig));
+ }
+
+ //----------------------------------------------------------
+ // Returns the calling convention (see IMAGE_CEE_CS_CALLCONV_*
+ // defines in cor.h) - doesn't throw.
+ //----------------------------------------------------------
+ __checkReturn
+ static HRESULT GetCallingConvention_NoThrow(
+ Module *pModule,
+ const Signature &signature,
+ BYTE *pbCallingConvention)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ PCCOR_SIGNATURE pSig = signature.GetRawSig();
+
+ if (signature.GetRawSigLen() < 1)
+ {
+ *pbCallingConvention = 0;
+ return COR_E_BADIMAGEFORMAT;
+ }
+ *pbCallingConvention = (BYTE)(IMAGE_CEE_CS_CALLCONV_MASK & CorSigUncompressCallingConv(/*modifies*/pSig));
+ return S_OK;
+ }
+
+ //----------------------------------------------------------
+ // Returns the calling convention (see IMAGE_CEE_CS_CALLCONV_*
+ // defines in cor.h)
+ //----------------------------------------------------------
+ BYTE GetCallingConvention()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_CallConv & IMAGE_CEE_CS_CALLCONV_MASK;
+ }
+
+ //----------------------------------------------------------
+ // Returns the calling convention & flags (see IMAGE_CEE_CS_CALLCONV_*
+ // defines in cor.h)
+ //----------------------------------------------------------
+ BYTE GetCallingConventionInfo()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return m_CallConv;
+ }
+
+ //----------------------------------------------------------
+ // Has a 'this' pointer?
+ //----------------------------------------------------------
+ BOOL HasThis()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_CallConv & IMAGE_CEE_CS_CALLCONV_HASTHIS;
+ }
+
+ //----------------------------------------------------------
+ // Has a explicit 'this' pointer?
+ //----------------------------------------------------------
+ BOOL HasExplicitThis()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_CallConv & IMAGE_CEE_CS_CALLCONV_EXPLICITTHIS;
+ }
+
+ //----------------------------------------------------------
+ // Is a generic method with explicit arity?
+ //----------------------------------------------------------
+ BOOL IsGenericMethod()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_CallConv & IMAGE_CEE_CS_CALLCONV_GENERIC;
+ }
+
+ //----------------------------------------------------------
+ // Is vararg?
+ //----------------------------------------------------------
+ BOOL IsVarArg()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return GetCallingConvention() == IMAGE_CEE_CS_CALLCONV_VARARG;
+ }
+
+ //----------------------------------------------------------
+ // Is vararg?
+ //----------------------------------------------------------
+ static BOOL IsVarArg(Module *pModule, const Signature &signature)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ HRESULT hr;
+ BYTE nCallingConvention;
+
+ hr = GetCallingConvention_NoThrow(pModule, signature, &nCallingConvention);
+ if (FAILED(hr))
+ { // Invalid signatures are not VarArg
+ return FALSE;
+ }
+ return nCallingConvention == IMAGE_CEE_CS_CALLCONV_VARARG;
+ }
+
+ Module* GetModule() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return m_pModule;
+ }
+
+ //----------------------------------------------------------
+ // Returns the unmanaged calling convention.
+ //----------------------------------------------------------
+ static BOOL GetUnmanagedCallingConvention(Module *pModule, PCCOR_SIGNATURE pSig, ULONG cSig, CorPinvokeMap *pPinvokeMapOut);
+
+ //------------------------------------------------------------------
+ // Like NextArg, but return only normalized type (enums flattned to
+ // underlying type ...
+ //------------------------------------------------------------------
+ CorElementType
+ NextArgNormalized(TypeHandle * pthValueType = NULL)
+ {
+ CONTRACTL
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ m_pLastType = m_pWalk;
+ if (m_iCurArg == m_nArgs)
+ {
+ return ELEMENT_TYPE_END;
+ }
+ else
+ {
+ m_iCurArg++;
+ CorElementType mt = m_pWalk.PeekElemTypeNormalized(m_pModule, &m_typeContext, pthValueType);
+ // We should not hit ELEMENT_TYPE_END in the middle of the signature
+ if (mt == ELEMENT_TYPE_END)
+ {
+ THROW_BAD_FORMAT(BFA_BAD_SIGNATURE, (Module *)NULL);
+ }
+ IfFailThrowBF(m_pWalk.SkipExactlyOne(), BFA_BAD_SIGNATURE, (Module *)NULL);
+ return mt;
+ }
+ } // NextArgNormalized
+
+ // Tests if the return type is an object ref. Loads types
+ // if needed (though it shouldn't really need to)
+ BOOL IsObjectRefReturnType();
+
+ //------------------------------------------------------------------------
+ // Compute element size from CorElementType and optional valuetype.
+ //------------------------------------------------------------------------
+ static UINT GetElemSize(CorElementType etype, TypeHandle thValueType);
+
+ UINT GetReturnTypeSize()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return m_pRetType.SizeOf(m_pModule, &m_typeContext);
+ }
+
+ //------------------------------------------------------------------
+ // Perform type-specific GC promotion on the value (based upon the
+ // last type retrieved by NextArg()).
+ //------------------------------------------------------------------
+ VOID GcScanRoots(PTR_VOID pValue, promote_func *fn,
+ ScanContext* sc, promote_carefully_func *fnc = NULL);
+
+ //------------------------------------------------------------------
+ // Is the return type 64 bit?
+ //------------------------------------------------------------------
+ BOOL Is64BitReturn() const
+ {
+ WRAPPER_NO_CONTRACT;
+ CorElementType rt = GetReturnTypeNormalized();
+ return (rt == ELEMENT_TYPE_I8 || rt == ELEMENT_TYPE_U8 || rt == ELEMENT_TYPE_R8);
+ }
+
+ //------------------------------------------------------------------
+ // Is the return type floating point?
+ //------------------------------------------------------------------
+ BOOL HasFPReturn()
+ {
+ WRAPPER_NO_CONTRACT;
+ CorElementType rt = GetReturnTypeNormalized();
+ return (rt == ELEMENT_TYPE_R4 || rt == ELEMENT_TYPE_R8);
+ }
+
+ //------------------------------------------------------------------
+ // reset: goto start pos
+ //------------------------------------------------------------------
+ VOID Reset();
+
+ //------------------------------------------------------------------
+ // current position of the arg iterator
+ //------------------------------------------------------------------
+ UINT GetArgNum()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_iCurArg;
+ }
+
+ //------------------------------------------------------------------
+ // Returns CorElementType of return value, taking into account
+ // any instantiations due to generics. Does not load types.
+ // Does not return normalized type.
+ //------------------------------------------------------------------
+ CorElementType GetReturnType() const;
+
+ BOOL IsReturnTypeVoid() const;
+
+
+ enum RETURNTYPE {RETOBJ, RETBYREF, RETNONOBJ};
+
+ CorElementType GetReturnTypeNormalized(TypeHandle * pthValueType = NULL) const;
+
+ //------------------------------------------------------------------
+ // used to treat some sigs as special case vararg
+ // used by calli to unmanaged target
+ //------------------------------------------------------------------
+ BOOL IsTreatAsVarArg()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (m_flags & TREAT_AS_VARARG);
+ }
+
+ //------------------------------------------------------------------
+ // Determines if the current argument is System/String.
+ // Caller must determine first that the argument type is
+ // ELEMENT_TYPE_CLASS or ELEMENT_TYPE_STRING. This may be used during
+ // GC.
+ //------------------------------------------------------------------
+ BOOL IsStringType() const;
+
+ //------------------------------------------------------------------
+ // Determines if the current argument is a particular class.
+ // Caller must determine first that the argument type
+ // is ELEMENT_TYPE_CLASS.
+ //------------------------------------------------------------------
+ BOOL IsClass(LPCUTF8 szClassName) const;
+
+
+ //------------------------------------------------------------------
+ // This method will return a TypeHandle for the last argument
+ // examined.
+ // If NextArg() returns ELEMENT_TYPE_BYREF, you can also call GetByRefType()
+ // to get to the underlying type of the byref
+ //------------------------------------------------------------------
+ TypeHandle GetLastTypeHandleNT() const
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_pLastType.GetTypeHandleNT(m_pModule, &m_typeContext);
+ }
+
+ //------------------------------------------------------------------
+ // This method will return a TypeHandle for the last argument
+ // examined.
+ // If NextArg() returns ELEMENT_TYPE_BYREF, you can also call GetByRefType()
+ // to get to the underlying type of the byref
+ //------------------------------------------------------------------
+ TypeHandle GetLastTypeHandleThrowing(ClassLoader::LoadTypesFlag fLoadTypes = ClassLoader::LoadTypes,
+ ClassLoadLevel level = CLASS_LOADED,
+ BOOL dropGenericArgumentLevel = FALSE) const
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_pLastType.GetTypeHandleThrowing(m_pModule, &m_typeContext, fLoadTypes,
+ level, dropGenericArgumentLevel);
+ }
+
+ //------------------------------------------------------------------
+ // Returns the TypeHandle for the return type of the signature
+ //------------------------------------------------------------------
+ TypeHandle GetRetTypeHandleNT() const
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_pRetType.GetTypeHandleNT(m_pModule, &m_typeContext);
+ }
+
+ TypeHandle GetRetTypeHandleThrowing(ClassLoader::LoadTypesFlag fLoadTypes = ClassLoader::LoadTypes,
+ ClassLoadLevel level = CLASS_LOADED) const
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_pRetType.GetTypeHandleThrowing(m_pModule, &m_typeContext, fLoadTypes, level);
+ }
+
+ //------------------------------------------------------------------
+ // Returns the base type of the byref type of the last argument examined
+ // which needs to have been ELEMENT_TYPE_BYREF.
+ // For object references, the class being accessed byref is also returned in *pTy.
+ // eg. for "int32 &", return value = ELEMENT_TYPE_I4, *pTy= ???
+ // for "System.Exception &", return value = ELEMENT_TYPE_CLASS, *pTy=System.Exception
+ // Note that byref to byref is not allowed, and so the return value
+ // can never be ELEMENT_TYPE_BYREF.
+ //------------------------------------------------------------------
+ CorElementType GetByRefType(TypeHandle* pTy) const;
+
+ // Compare types in two signatures, first applying
+ // - optional substitutions pSubst1 and pSubst2
+ // to class type parameters (E_T_VAR) in the respective signatures
+ static
+ BOOL
+ CompareElementType(
+ PCCOR_SIGNATURE & pSig1,
+ PCCOR_SIGNATURE & pSig2,
+ PCCOR_SIGNATURE pEndSig1,
+ PCCOR_SIGNATURE pEndSig2,
+ Module * pModule1,
+ Module * pModule2,
+ const Substitution * pSubst1,
+ const Substitution * pSubst2,
+ TokenPairList * pVisited = NULL);
+
+
+
+ // If pTypeDef1 is C<...> and pTypeDef2 is C<...> (for possibly different instantiations)
+ // then check C<!0, ... !n> @ pSubst1 == C<!0, ..., !n> @ pSubst2, i.e.
+ // that the head type (C) is the same and that when the head type is treated
+ // as an uninstantiated type definition and we apply each of the substitutions
+ // then the same type results. This effectively checks that the two substitutions
+ // are equivalent.
+ static BOOL CompareTypeDefsUnderSubstitutions(MethodTable *pTypeDef1, MethodTable *pTypeDef2,
+ const Substitution* pSubst1, const Substitution* pSubst2,
+ TokenPairList *pVisited = NULL);
+
+
+ // Compare two complete method signatures, first applying optional substitutions pSubst1 and pSubst2
+ // to class type parameters (E_T_VAR) in the respective signatures
+ static BOOL CompareMethodSigs(
+ PCCOR_SIGNATURE pSig1,
+ DWORD cSig1,
+ Module* pModule1,
+ const Substitution* pSubst1,
+ PCCOR_SIGNATURE pSig2,
+ DWORD cSig2,
+ Module* pModule2,
+ const Substitution* pSubst2,
+ TokenPairList *pVisited = NULL
+ );
+
+ // Nonthrowing version of CompareMethodSigs
+ //
+ // Return S_OK if they match
+ // S_FALSE if they don't match
+ // FAILED if OOM or some other blocking error
+ //
+ static HRESULT CompareMethodSigsNT(
+ PCCOR_SIGNATURE pSig1,
+ DWORD cSig1,
+ Module* pModule1,
+ const Substitution* pSubst1,
+ PCCOR_SIGNATURE pSig2,
+ DWORD cSig2,
+ Module* pModule2,
+ const Substitution* pSubst2,
+ TokenPairList *pVisited = NULL
+ );
+
+ static BOOL CompareFieldSigs(
+ PCCOR_SIGNATURE pSig1,
+ DWORD cSig1,
+ Module* pModule1,
+ PCCOR_SIGNATURE pSig2,
+ DWORD cSig2,
+ Module* pModule2,
+ TokenPairList *pVisited = NULL
+ );
+
+ static BOOL CompareMethodSigs(MetaSig &msig1,
+ MetaSig &msig2,
+ BOOL ignoreCallconv);
+
+ // Is each set of constraints on the implementing method's type parameters a subset
+ // of the corresponding set of constraints on the declared method's type parameters,
+ // given a subsitution for the latter's (class) type parameters.
+ // This is used by the class loader to verify type safety of method overriding and interface implementation.
+ static BOOL CompareMethodConstraints(const Substitution *pSubst1,
+ Module *pModule1,
+ mdMethodDef tok1, //implementing method
+ const Substitution *pSubst2,
+ Module *pModule2,
+ mdMethodDef tok2); //declared method
+
+private:
+ static BOOL CompareVariableConstraints(const Substitution *pSubst1,
+ Module *pModule1, mdGenericParam tok1, //overriding
+ const Substitution *pSubst2,
+ Module *pModule2, mdGenericParam tok2); //overridden
+
+ static BOOL CompareTypeDefOrRefOrSpec(Module *pModule1, mdToken tok1,
+ const Substitution *pSubst1,
+ Module *pModule2, mdToken tok2,
+ const Substitution *pSubst2,
+ TokenPairList *pVisited);
+ static BOOL CompareTypeSpecToToken(mdTypeSpec tk1,
+ mdToken tk2,
+ Module *pModule1,
+ Module *pModule2,
+ const Substitution *pSubst1,
+ TokenPairList *pVisited);
+
+ static BOOL CompareElementTypeToToken(PCCOR_SIGNATURE &pSig1,
+ PCCOR_SIGNATURE pEndSig1, // end of sig1
+ mdToken tk2,
+ Module* pModule1,
+ Module* pModule2,
+ const Substitution* pSubst1,
+ TokenPairList *pVisited);
+
+public:
+
+ //------------------------------------------------------------------
+ // Ensures that all the value types in the sig are loaded. This
+ // should be called on sig's that have value types before they
+ // are passed to Call(). This ensures that value classes will not
+ // be loaded during the operation to determine the size of the
+ // stack. Thus preventing the resulting GC hole.
+ //------------------------------------------------------------------
+ static void EnsureSigValueTypesLoaded(MethodDesc *pMD);
+
+ // this walks the sig and checks to see if all types in the sig can be loaded
+ static void CheckSigTypesCanBeLoaded(MethodDesc *pMD);
+
+ const SigTypeContext *GetSigTypeContext() const { LIMITED_METHOD_CONTRACT; return &m_typeContext; }
+
+ // Disallow copy constructor.
+ MetaSig(MetaSig *pSig);
+
+ void SetHasParamTypeArg()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_CallConv |= CORINFO_CALLCONV_PARAMTYPE;
+ }
+
+ void SetTreatAsVarArg()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_flags |= TREAT_AS_VARARG;
+ }
+
+
+ // These are protected because Reflection subclasses Metasig
+ protected:
+
+ enum MetaSigFlags
+ {
+ SIG_RET_TYPE_INITTED = 0x01,
+ TREAT_AS_VARARG = 0x02, // used to treat some sigs as special case vararg
+ // used by calli to unmanaged target
+ };
+
+ Module* m_pModule;
+ SigTypeContext m_typeContext; // Instantiation for type parameters
+
+ SigPointer m_pStart;
+ SigPointer m_pWalk;
+ SigPointer m_pLastType;
+ SigPointer m_pRetType;
+ UINT32 m_nArgs;
+ UINT32 m_iCurArg;
+
+ // The following are cached so we don't the signature
+ // multiple times
+
+ CorElementType m_corNormalizedRetType;
+ BYTE m_flags;
+ BYTE m_CallConv;
+}; // class MetaSig
+
+
+BOOL IsTypeRefOrDef(LPCSTR szClassName, Module *pModule, mdToken token);
+
+#if defined(FEATURE_TYPEEQUIVALENCE) && !defined(DACCESS_COMPILE)
+
+// A helper struct representing data stored in the TypeIdentifierAttribute.
+struct TypeIdentifierData
+{
+ TypeIdentifierData() : m_cbScope(0), m_pchScope(NULL), m_cbIdentifierNamespace(0), m_pchIdentifierNamespace(NULL),
+ m_cbIdentifierName(0), m_pchIdentifierName(NULL) {}
+
+ HRESULT Init(Module *pModule, mdToken tk);
+ BOOL IsEqual(const TypeIdentifierData & data) const;
+
+private:
+ SIZE_T m_cbScope;
+ LPCUTF8 m_pchScope;
+ SIZE_T m_cbIdentifierNamespace;
+ LPCUTF8 m_pchIdentifierNamespace;
+ SIZE_T m_cbIdentifierName;
+ LPCUTF8 m_pchIdentifierName;
+};
+
+#endif // FEATURE_TYPEEQUIVALENCE && !DACCESS_COMPILE
+
+// fResolved is TRUE when one of the tokens is a resolved TypeRef. This is used to restrict
+// type equivalence checks for value types.
+BOOL CompareTypeTokens(mdToken tk1, mdToken tk2, Module *pModule1, Module *pModule2, TokenPairList *pVisited = NULL);
+
+// Nonthrowing version of CompareTypeTokens.
+//
+// Return S_OK if they match
+// S_FALSE if they don't match
+// FAILED if OOM or some other blocking error
+//
+HRESULT CompareTypeTokensNT(mdToken tk1, mdToken tk2, Module *pModule1, Module *pModule2);
+
+// TRUE if the two TypeDefs have the same layout and field marshal information.
+BOOL CompareTypeLayout(mdToken tk1, mdToken tk2, Module *pModule1, Module *pModule2);
+BOOL CompareTypeDefsForEquivalence(mdToken tk1, mdToken tk2, Module *pModule1, Module *pModule2, TokenPairList *pVisited);
+BOOL IsTypeDefEquivalent(mdToken tk, Module *pModule);
+BOOL IsTypeDefExternallyVisible(mdToken tk, Module *pModule, DWORD dwAttrs);
+
+void ReportPointersFromValueType(promote_func *fn, ScanContext *sc, PTR_MethodTable pMT, PTR_VOID pSrc);
+
+#endif /* _H_SIGINFO */
+
diff --git a/src/vm/simplerwlock.cpp b/src/vm/simplerwlock.cpp
new file mode 100644
index 0000000000..5988609565
--- /dev/null
+++ b/src/vm/simplerwlock.cpp
@@ -0,0 +1,308 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+#include "common.h"
+#include "simplerwlock.hpp"
+
+BOOL SimpleRWLock::TryEnterRead()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+
+#ifdef _DEBUG
+ PreEnter();
+#endif //_DEBUG
+
+ LONG RWLock;
+
+ do {
+ RWLock = m_RWLock;
+ if( RWLock == -1 ) return FALSE;
+ _ASSERTE (RWLock >= 0);
+ } while( RWLock != InterlockedCompareExchange( &m_RWLock, RWLock+1, RWLock ));
+
+ INCTHREADLOCKCOUNT();
+ EE_LOCK_TAKEN(this);
+
+#ifdef _DEBUG
+ PostEnter();
+#endif //_DEBUG
+
+ return TRUE;
+}
+
+//=====================================================================
+void SimpleRWLock::EnterRead()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+
+ // Custom contract is needed for PostEnter()'s unscoped GC_NoTrigger counter change
+#ifdef ENABLE_CONTRACTS_IMPL
+ CheckGCNoTrigger();
+#endif //ENABLE_CONTRACTS_IMPL
+
+ GCX_MAYBE_PREEMP(m_gcMode == PREEMPTIVE);
+
+#ifdef _DEBUG
+ PreEnter();
+#endif //_DEBUG
+
+ DWORD dwSwitchCount = 0;
+
+ while (TRUE)
+ {
+ // prevent writers from being starved. This assumes that writers are rare and
+ // dont hold the lock for a long time.
+ while (IsWriterWaiting())
+ {
+ int spinCount = m_spinCount;
+ while (spinCount > 0) {
+ spinCount--;
+ YieldProcessor();
+ }
+ __SwitchToThread(0, ++dwSwitchCount);
+ }
+
+ if (TryEnterRead())
+ {
+ return;
+ }
+
+ DWORD i = g_SpinConstants.dwInitialDuration;
+ do
+ {
+ if (TryEnterRead())
+ {
+ return;
+ }
+
+ if (g_SystemInfo.dwNumberOfProcessors <= 1)
+ {
+ break;
+ }
+ // Delay by approximately 2*i clock cycles (Pentium III).
+ // This is brittle code - future processors may of course execute this
+ // faster or slower, and future code generators may eliminate the loop altogether.
+ // The precise value of the delay is not critical, however, and I can't think
+ // of a better way that isn't machine-dependent.
+ for (int delayCount = i; --delayCount; )
+ {
+ YieldProcessor(); // indicate to the processor that we are spining
+ }
+
+ // exponential backoff: wait a factor longer in the next iteration
+ i *= g_SpinConstants.dwBackoffFactor;
+ }
+ while (i < g_SpinConstants.dwMaximumDuration);
+
+ __SwitchToThread(0, ++dwSwitchCount);
+ }
+}
+
+//=====================================================================
+BOOL SimpleRWLock::TryEnterWrite()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+
+#ifdef _DEBUG
+ PreEnter();
+#endif //_DEBUG
+
+ LONG RWLock = InterlockedCompareExchange( &m_RWLock, -1, 0 );
+
+ _ASSERTE (RWLock >= 0 || RWLock == -1);
+
+ if( RWLock ) {
+ return FALSE;
+ }
+
+ INCTHREADLOCKCOUNT();
+ EE_LOCK_TAKEN(this);
+
+#ifdef _DEBUG
+ PostEnter();
+#endif //_DEBUG
+
+ ResetWriterWaiting();
+
+ return TRUE;
+}
+
+//=====================================================================
+void SimpleRWLock::EnterWrite()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+
+ // Custom contract is needed for PostEnter()'s unscoped GC_NoTrigger counter change
+#ifdef ENABLE_CONTRACTS_IMPL
+ CheckGCNoTrigger();
+#endif //ENABLE_CONTRACTS_IMPL
+
+ GCX_MAYBE_PREEMP(m_gcMode == PREEMPTIVE);
+
+#ifdef _DEBUG
+ PreEnter();
+#endif //_DEBUG
+
+ BOOL set = FALSE;
+
+ DWORD dwSwitchCount = 0;
+
+ while (TRUE)
+ {
+ if (TryEnterWrite())
+ {
+ return;
+ }
+
+ // set the writer waiting word, if not already set, to notify potential
+ // readers to wait. Remember, if the word is set, so it can be reset later.
+ if (!IsWriterWaiting())
+ {
+ SetWriterWaiting();
+ set = TRUE;
+ }
+
+ DWORD i = g_SpinConstants.dwInitialDuration;
+ do
+ {
+ if (TryEnterWrite())
+ {
+ return;
+ }
+
+ if (g_SystemInfo.dwNumberOfProcessors <= 1)
+ {
+ break;
+ }
+ // Delay by approximately 2*i clock cycles (Pentium III).
+ // This is brittle code - future processors may of course execute this
+ // faster or slower, and future code generators may eliminate the loop altogether.
+ // The precise value of the delay is not critical, however, and I can't think
+ // of a better way that isn't machine-dependent.
+ for (int delayCount = i; --delayCount; )
+ {
+ YieldProcessor(); // indicate to the processor that we are spining
+ }
+
+ // exponential backoff: wait a factor longer in the next iteration
+ i *= g_SpinConstants.dwBackoffFactor;
+ }
+ while (i < g_SpinConstants.dwMaximumDuration);
+
+ __SwitchToThread(0, ++dwSwitchCount);
+ }
+}
+
+#ifdef ENABLE_CONTRACTS_IMPL
+//=========================================================================
+// Asserts if lock mode is PREEMPTIVE and thread in a GC_NOTRIGGER contract
+//=========================================================================
+void SimpleRWLock::CheckGCNoTrigger()
+{
+ STATIC_CONTRACT_NOTHROW;
+
+ // On PREEMPTIVE locks we'll toggle the GC mode, so we better not be in a GC_NOTRIGGERS region
+ if (m_gcMode == PREEMPTIVE)
+ {
+ ClrDebugState *pClrDebugState = CheckClrDebugState();
+ if (pClrDebugState)
+ {
+ if (pClrDebugState->GetGCNoTriggerCount())
+ {
+ // If we have no thread object, we won't be toggling the GC. This is the case,
+ // for example, on the debugger helper thread which is always GC_NOTRIGGERS.
+ if (GetThreadNULLOk() != NULL)
+ {
+ if (!( (GCViolation|BadDebugState) & pClrDebugState->ViolationMask()))
+ {
+ CONTRACT_ASSERT("You cannot enter a lock in a GC_NOTRIGGER region.",
+ Contract::GC_NoTrigger,
+ Contract::GC_Mask,
+ __FUNCTION__,
+ __FILE__,
+ __LINE__);
+ }
+ }
+ }
+
+ // The mode checks and enforcement of GC_NOTRIGGER during the lock are done in SimpleRWLock::PostEnter().
+ }
+ }
+}
+#endif //ENABLE_CONTRACTS_IMPL
+
+#ifdef _DEBUG
+//=====================================================================
+// GC mode assertions before acquringing a lock based on its mode.
+//=====================================================================
+void SimpleRWLock::PreEnter()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ DEBUG_ONLY;
+ }
+ CONTRACTL_END;
+
+ if (m_gcMode == PREEMPTIVE)
+ _ASSERTE(!GetThread() || !GetThread()->PreemptiveGCDisabled());
+ else if (m_gcMode == COOPERATIVE)
+ _ASSERTE(!GetThread() || GetThread()->PreemptiveGCDisabled());
+}
+
+//=====================================================================
+// GC checks after lock acquisition for avoiding deadlock scenarios.
+//=====================================================================
+void SimpleRWLock::PostEnter()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if ((m_gcMode == COOPERATIVE) || (m_gcMode == COOPERATIVE_OR_PREEMPTIVE))
+ {
+ Thread * pThread = GetThreadNULLOk();
+ if (pThread == NULL)
+ {
+ // Cannot set NoTrigger. This could conceivably turn into
+ // A GC hole if the thread is created and then a GC rendezvous happens
+ // while the lock is still held.
+ }
+ else
+ {
+ // Keep a count, since the thread may change from NULL to non-NULL and
+ // we don't want to have unbalanced NoTrigger calls
+ InterlockedIncrement(&m_countNoTriggerGC);
+ INCONTRACT(pThread->BeginNoTriggerGC(__FILE__, __LINE__));
+ }
+ }
+}
+
+//=====================================================================
+// GC checks before lock release for avoiding deadlock scenarios.
+//=====================================================================
+void SimpleRWLock::PreLeave()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (m_countNoTriggerGC > 0)
+ {
+ DWORD countNoTriggerGC = InterlockedDecrement(&m_countNoTriggerGC);
+ _ASSERTE(countNoTriggerGC >= 0);
+
+ Thread * pThread = GetThreadNULLOk();
+ if (pThread != NULL)
+ {
+ INCONTRACT(pThread->EndNoTriggerGC());
+ }
+ }
+}
+#endif //_DEBUG
diff --git a/src/vm/simplerwlock.hpp b/src/vm/simplerwlock.hpp
new file mode 100644
index 0000000000..2b9fcbeff7
--- /dev/null
+++ b/src/vm/simplerwlock.hpp
@@ -0,0 +1,266 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+#ifndef _SimpleRWLock_hpp_
+#define _SimpleRWLock_hpp_
+
+#ifndef BINDER
+#include "threads.h"
+#endif
+
+class SimpleRWLock;
+
+//-------------------------------------------------------------------------------------------
+// GC_MODE defines custom CONTRACTs for TryEnterRead and TryEnterWrite.
+//
+// Contract differs when acquiring the lock depending on its lock mode.
+//
+// GC/MODE
+// A SimpleRWLock can be one of the following modes. We only want to see the "PREEMPTIVE"
+// type used in new code. Other types, kept for legacy reasons, are listed in
+// order from least objectionable to most objectionable.
+//
+// PREEMPTIVE (equivalent to CRST's "normal")
+// This is the preferred type of crst. Enter() will force-switch your thread
+// into preemptive mode if it isn't already. Thus, the effective contract is:
+//
+// MODE_ANY
+// GC_TRIGGERS
+//
+//
+//
+// COOPERATIVE (equivalent to CRST_UNSAFE_COOPGC)
+// You can only attempt to acquire this crst if you're already in coop mode. It is
+// guaranteed no GC will occur while waiting to acquire the lock. While you hold
+// the lock, your thread is in a GCFORBID state.
+//
+// MODE_COOP
+// GC_NOTRIGGER
+//
+//
+//
+// COOPERATIVE_OR_PREEMPTIVE (equivalent to CRST_UNSAFE_ANYMODE)
+// You can attempt to acquire this in either mode. Entering the crst will not change
+// your thread mode but it will increment the GCNoTrigger count.
+//
+// MODE_ANY
+// GC_NOTRIGGER
+//------------------------------------------------------------------------------------------------
+enum GC_MODE {
+ COOPERATIVE,
+ PREEMPTIVE,
+ COOPERATIVE_OR_PREEMPTIVE} ;
+
+class SimpleRWLock
+{
+ // Allow Module access so we can use Offsetof on this class's private members during native image creation (determinism)
+ friend class Module;
+private:
+ BOOL IsWriterWaiting()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_WriterWaiting != 0;
+ }
+
+ void SetWriterWaiting()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_WriterWaiting = 1;
+ }
+
+ void ResetWriterWaiting()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_WriterWaiting = 0;
+ }
+
+ BOOL TryEnterRead();
+
+ BOOL TryEnterWrite();
+
+#ifdef ENABLE_CONTRACTS_IMPL
+ void CheckGCNoTrigger();
+#endif //ENABLE_CONTRACTS_IMPL
+
+ // lock used for R/W synchronization
+ Volatile<LONG> m_RWLock;
+
+ // Does this lock require to be taken in PreemptiveGC mode?
+ const GC_MODE m_gcMode;
+
+ // spin count for a reader waiting for a writer to release the lock
+ LONG m_spinCount;
+
+ // used to prevent writers from being starved by readers
+ // we currently do not prevent writers from starving readers since writers
+ // are supposed to be rare.
+ BOOL m_WriterWaiting;
+
+#ifdef _DEBUG
+ // Check for dead lock situation.
+ Volatile<LONG> m_countNoTriggerGC;
+
+#ifdef _WIN64
+ // ensures that we are a multiple of 8-bytes
+ UINT32 pad;
+#endif
+
+ void PostEnter ();
+ void PreEnter ();
+ void PreLeave ();
+#endif //_DEBUG
+
+#ifndef DACCESS_COMPILE
+ static void AcquireReadLock(SimpleRWLock *s) { LIMITED_METHOD_CONTRACT; s->EnterRead(); }
+ static void ReleaseReadLock(SimpleRWLock *s) { LIMITED_METHOD_CONTRACT; s->LeaveRead(); }
+
+ static void AcquireWriteLock(SimpleRWLock *s) { LIMITED_METHOD_CONTRACT; s->EnterWrite(); }
+ static void ReleaseWriteLock(SimpleRWLock *s) { LIMITED_METHOD_CONTRACT; s->LeaveWrite(); }
+#else // DACCESS_COMPILE
+ // in DAC builds, we don't actually acquire the lock, we just determine whether the LS
+ // already holds it. If so, we assume the data is inconsistent and throw an exception.
+ // Argument:
+ // input: s - the lock to be checked.
+ // Note: Throws
+ static void AcquireReadLock(SimpleRWLock *s)
+ {
+ SUPPORTS_DAC;
+ if (s->IsWriterLock())
+ {
+ ThrowHR(CORDBG_E_PROCESS_NOT_SYNCHRONIZED);
+ }
+ };
+ static void ReleaseReadLock(SimpleRWLock *s);
+
+ static void AcquireWriteLock(SimpleRWLock *s) { SUPPORTS_DAC; ThrowHR(CORDBG_E_TARGET_READONLY); };
+ static void ReleaseWriteLock(SimpleRWLock *s);
+#endif // DACCESS_COMPILE
+
+public:
+ SimpleRWLock (GC_MODE gcMode, LOCK_TYPE locktype)
+ : m_gcMode (gcMode)
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ m_RWLock = 0;
+#ifdef CLR_STANDALONE_BINDER
+ m_spinCount = 0;
+#else
+ m_spinCount = (GetCurrentProcessCpuCount() == 1) ? 0 : 4000;
+#endif
+ m_WriterWaiting = FALSE;
+
+#ifdef _DEBUG
+ m_countNoTriggerGC = 0;
+#endif
+ }
+
+#ifdef DACCESS_COMPILE
+ // Special empty CTOR for DAC. We still need to assign to const fields, but they won't actually be used.
+ SimpleRWLock()
+ : m_gcMode(COOPERATIVE_OR_PREEMPTIVE)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef _DEBUG
+ m_countNoTriggerGC = 0;
+#endif //_DEBUG
+ }
+#endif
+
+#ifndef DACCESS_COMPILE
+ // Acquire the reader lock.
+ void EnterRead();
+
+ // Acquire the writer lock.
+ void EnterWrite();
+
+#ifdef BINDER
+ // Leave the reader lock.
+ void LeaveRead();
+ // Leave the writer lock.
+ void LeaveWrite();
+#else // !BINDER
+ // Leave the reader lock.
+ void LeaveRead()
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifdef _DEBUG
+ PreLeave ();
+#endif //_DEBUG
+ LONG RWLock;
+ RWLock = InterlockedDecrement(&m_RWLock);
+ _ASSERTE (RWLock >= 0);
+ DECTHREADLOCKCOUNT();
+ EE_LOCK_RELEASED(this);
+ }
+
+ // Leave the writer lock.
+ void LeaveWrite()
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifdef _DEBUG
+ PreLeave ();
+#endif //_DEBUG
+ LONG RWLock;
+ RWLock = InterlockedExchange (&m_RWLock, 0);
+ _ASSERTE(RWLock == -1);
+ DECTHREADLOCKCOUNT();
+ EE_LOCK_RELEASED(this);
+ }
+#endif // !BINDER
+
+#endif // DACCESS_COMPILE
+
+ typedef DacHolder<SimpleRWLock *, SimpleRWLock::AcquireReadLock, SimpleRWLock::ReleaseReadLock> SimpleReadLockHolder;
+ typedef DacHolder<SimpleRWLock *, SimpleRWLock::AcquireWriteLock, SimpleRWLock::ReleaseWriteLock> SimpleWriteLockHolder;
+
+#ifdef _DEBUG
+ BOOL LockTaken ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_RWLock != 0;
+ }
+
+ BOOL IsReaderLock ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_RWLock > 0;
+ }
+
+#endif
+
+ BOOL IsWriterLock ()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_RWLock < 0;
+ }
+
+};
+
+typedef SimpleRWLock::SimpleReadLockHolder SimpleReadLockHolder;
+typedef SimpleRWLock::SimpleWriteLockHolder SimpleWriteLockHolder;
+typedef DPTR(SimpleRWLock) PTR_SimpleRWLock;
+
+#ifdef TEST_DATA_CONSISTENCY
+// used for test purposes. Determines if a crst is held.
+// Arguments:
+// input: pLock - the lock to test
+// Note: Throws if the lock is held
+
+FORCEINLINE void DebugTryRWLock(SimpleRWLock * pLock)
+{
+ SUPPORTS_DAC;
+
+ SimpleReadLockHolder rwLock(pLock);
+}
+#endif // TEST_DATA_CONSISTENCY
+#endif
diff --git a/src/vm/sourceline.cpp b/src/vm/sourceline.cpp
new file mode 100644
index 0000000000..c4a370b5bb
--- /dev/null
+++ b/src/vm/sourceline.cpp
@@ -0,0 +1,335 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#include "common.h"
+
+#include "sourceline.h"
+
+//////////////////////////////////////////////////////////////////
+
+#ifdef ENABLE_DIAGNOSTIC_SYMBOL_READING
+
+class CCallback : public IDiaLoadCallback
+{
+ int m_nRefCount;
+public:
+ CCallback() {
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }CONTRACTL_END;
+
+ m_nRefCount = 0;
+ }
+
+ //IUnknown
+ ULONG STDMETHODCALLTYPE AddRef() {
+ LIMITED_METHOD_CONTRACT;
+ m_nRefCount++;
+ return m_nRefCount;
+ }
+
+ ULONG STDMETHODCALLTYPE Release() {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+ if ( (--m_nRefCount) == 0 )
+ delete this;
+ END_SO_INTOLERANT_CODE;
+
+ return m_nRefCount;
+ }
+
+ HRESULT STDMETHODCALLTYPE QueryInterface( REFIID rid, void **ppUnk ) {
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ if ( ppUnk == NULL ) {
+ return E_INVALIDARG;
+ }
+ if (rid == __uuidof( IDiaLoadCallback ) )
+ *ppUnk = (IDiaLoadCallback *)this;
+ else if (rid == __uuidof( IDiaLoadCallback ) )
+ *ppUnk = (IDiaLoadCallback *)this;
+ else if (rid == __uuidof( IUnknown ) )
+ *ppUnk = (IUnknown *)this;
+ else
+ *ppUnk = NULL;
+
+ if ( *ppUnk != NULL ) {
+ AddRef();
+ return S_OK;
+ }
+
+ return E_NOINTERFACE;
+ }
+
+ HRESULT STDMETHODCALLTYPE NotifyDebugDir(
+ BOOL fExecutable,
+ DWORD cbData,
+ BYTE data[]) // really a const struct _IMAGE_DEBUG_DIRECTORY *
+ {
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return S_OK;
+ }
+
+ HRESULT STDMETHODCALLTYPE NotifyOpenDBG(
+ LPCOLESTR dbgPath,
+ HRESULT resultCode)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return S_OK;
+ }
+
+ HRESULT STDMETHODCALLTYPE NotifyOpenPDB(
+ LPCOLESTR pdbPath,
+ HRESULT resultCode)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return S_OK;
+ }
+
+ HRESULT STDMETHODCALLTYPE RestrictRegistryAccess() // return hr != S_OK to prevent querying the registry for symbol search paths
+ {
+ LIMITED_METHOD_CONTRACT;
+ return S_OK;
+ }
+
+ HRESULT STDMETHODCALLTYPE RestrictSymbolServerAccess() // return hr != S_OK to prevent accessing a symbol server
+ {
+ LIMITED_METHOD_CONTRACT;
+ return S_OK;
+ }
+
+ HRESULT STDMETHODCALLTYPE RestrictOriginalPathAccess() // return hr != S_OK to prevent querying the registry for symbol search paths
+ {
+ LIMITED_METHOD_CONTRACT;
+ return S_OK;
+ }
+
+ HRESULT STDMETHODCALLTYPE RestrictReferencePathAccess() // return hr != S_OK to prevent accessing a symbol server
+ {
+ LIMITED_METHOD_CONTRACT;
+ return S_OK;
+ }
+
+ HRESULT STDMETHODCALLTYPE RestrictDBGAccess()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return S_OK;
+ }
+};
+
+//////////////////////////////////////////////////////////////////
+
+bool SourceLine::LoadDataFromPdb( __in_z LPWSTR wszFilename )
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ HRESULT hResult;
+
+// CComPtr(IDiaDataSource) pDataSource;
+
+ hResult = CoInitialize(NULL);
+
+ if (FAILED(hResult)){
+ return FALSE;
+ }
+
+ // Obtain Access To The Provider
+ hResult = CoCreateInstance(CLSID_DiaSource,
+ NULL,
+ CLSCTX_INPROC_SERVER,
+ IID_IDiaDataSource,
+ (void **) &pSource_);
+
+ if (FAILED(hResult)){
+ return FALSE;
+ }
+
+ CCallback callback;
+ callback.AddRef();
+
+ if ( FAILED( pSource_->loadDataFromPdb( wszFilename ) )
+ && FAILED( pSource_->loadDataForExe( wszFilename, W("symsrv*symsrv.dll*\\\\symbols\\\\symbols"), &callback ) ) )
+ return FALSE;
+ if ( FAILED( pSource_->openSession(&pSession_) ) )
+ return FALSE;
+ if ( FAILED( pSession_->get_globalScope(&pGlobal_) ) )
+ return FALSE;
+
+ return TRUE;
+}
+
+//////////////////////////////////////////////////////////////////
+
+SourceLine::SourceLine( __in_z LPWSTR pszFileName )
+{
+ WRAPPER_NO_CONTRACT;
+ if (LoadDataFromPdb(pszFileName)) {
+ initialized_ = true;
+ }
+ else{
+ initialized_ = false;
+ }
+}
+
+//////////////////////////////////////////////////////////////////
+
+HRESULT SourceLine::GetSourceLine( DWORD dwFunctionToken, DWORD dwOffset, __out_ecount(dwFileNameMaxLen) __out_z LPWSTR pszFileName, DWORD dwFileNameMaxLen, PDWORD pdwLineNumber )
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ _ASSERTE(initialized_);
+
+ CComPtr(IDiaSymbol) pSymbol;
+ HRESULT hResult = pSession_->findSymbolByToken(dwFunctionToken, SymTagFunction, &pSymbol);
+
+ if( SUCCEEDED(hResult) && pSymbol != NULL) {
+
+ ULONGLONG length;
+ pSymbol->get_length(&length);
+
+ DWORD rva;
+ CComPtr(IDiaEnumLineNumbers) pLines;
+
+ if(SUCCEEDED(pSymbol->get_relativeVirtualAddress(&rva))) {
+
+ DWORD initialOffset;
+ pSymbol->get_addressOffset(&initialOffset);
+
+ DWORD isect;
+ pSymbol->get_addressSection(&isect);
+
+ hResult = pSession_->findLinesByAddr(isect, initialOffset+dwOffset, 1, &pLines);
+ if( SUCCEEDED(hResult) ){
+
+ CComPtr(IDiaLineNumber) pLine;
+
+ hResult = pLines->Item( 0, &pLine );
+
+ if(SUCCEEDED(hResult)){
+
+ pLine->get_lineNumber(pdwLineNumber);
+
+ CComPtr(IDiaSourceFile) pSourceFile;
+ pLine->get_sourceFile( &pSourceFile );
+
+ BSTR sourceName;
+ pSourceFile->get_fileName( &sourceName );
+
+ wcsncpy_s( pszFileName, dwFileNameMaxLen, sourceName, dwFileNameMaxLen );
+ }
+ }
+ }
+ }
+
+ return hResult;
+}
+
+//////////////////////////////////////////////////////////////////
+
+HRESULT SourceLine::GetLocalName( DWORD dwFunctionToken, DWORD dwSlot, __out_ecount(dwNameMaxLen) __out_z LPWSTR pszName, DWORD dwNameMaxLen )
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ CComPtr(IDiaSymbol) pSymbol;
+ HRESULT hResult = pSession_->findSymbolByToken(dwFunctionToken, SymTagFunction, &pSymbol);
+
+ if( SUCCEEDED(hResult) && pSymbol != NULL ) {
+
+ ULONGLONG length;
+ pSymbol->get_length(&length);
+
+ DWORD rva;
+// CComPtr(IDiaEnumLineNumbers) pLines;
+
+ hResult = pSymbol->get_relativeVirtualAddress(&rva);
+
+ if(SUCCEEDED(hResult)) {
+
+ CComPtr( IDiaSymbol ) pBlock;
+ hResult = pSession_->findSymbolByRVA( rva, SymTagBlock, &pBlock );
+
+ if( SUCCEEDED(hResult) && pBlock != NULL ) {
+
+ ULONG celt = 0;
+
+ CComPtr(IDiaSymbol) pLocalSymbol = NULL;
+ CComPtr( IDiaEnumSymbols ) pEnum;
+ hResult = pBlock->findChildren( SymTagData, NULL, nsNone, &pEnum );
+
+ if( SUCCEEDED(hResult) ) {
+
+ //
+ // Find function local by slot
+ //
+ while (SUCCEEDED(hResult = pEnum->Next(1, &pLocalSymbol, &celt)) && celt == 1) {
+
+ DWORD dwThisSlot;
+ pLocalSymbol->get_slot( &dwThisSlot );
+
+ if( dwThisSlot == dwSlot ) {
+
+ BSTR name = NULL;
+ hResult = pLocalSymbol->get_name(&name);
+
+ wcsncpy_s( pszName, dwNameMaxLen, name, _TRUNCATE );
+
+ return S_OK;
+ }
+
+ pLocalSymbol = 0;
+ }
+ }
+ }
+ }
+ }
+
+ return hResult;
+}
+
+#else // !ENABLE_DIAGNOSTIC_SYMBOL_READING
+SourceLine::SourceLine( __in_z LPWSTR pszFileName )
+{
+ LIMITED_METHOD_CONTRACT;
+ initialized_ = false;
+}
+
+HRESULT SourceLine::GetSourceLine( DWORD dwFunctionToken, DWORD dwOffset, __out_ecount(dwFileNameMaxLen) __out_z LPWSTR pszFileName, DWORD dwFileNameMaxLen, PDWORD pdwLineNumber )
+{
+ LIMITED_METHOD_CONTRACT;
+ return E_NOTIMPL;
+}
+
+HRESULT SourceLine::GetLocalName( DWORD dwFunctionToken, DWORD dwSlot, __out_ecount(dwNameMaxLen) __out_z LPWSTR pszName, DWORD dwNameMaxLen )
+{
+ LIMITED_METHOD_CONTRACT;
+ return E_NOTIMPL;
+}
+#endif // ENABLE_DIAGNOSTIC_SYMBOL_READING
+
diff --git a/src/vm/sourceline.h b/src/vm/sourceline.h
new file mode 100644
index 0000000000..c3f4665fde
--- /dev/null
+++ b/src/vm/sourceline.h
@@ -0,0 +1,45 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#ifndef __SOURCELINE_H__
+#define __SOURCELINE_H__
+
+#ifdef ENABLE_DIAGNOSTIC_SYMBOL_READING
+#include "dia2.h"
+#endif // ENABLE_DIAGNOSTIC_SYMBOL_READING
+
+#define CComPtr(x) x*
+
+class SourceLine
+{
+ bool initialized_;
+
+#ifdef ENABLE_DIAGNOSTIC_SYMBOL_READING
+ CComPtr(IDiaDataSource) pSource_;
+ CComPtr(IDiaSymbol) pGlobal_;
+ CComPtr(IDiaSession) pSession_;
+
+ bool LoadDataFromPdb( __in_z LPWSTR wszFilename );
+#endif // ENABLE_DIAGNOSTIC_SYMBOL_READING
+
+public:
+ SourceLine( __in_z LPWSTR pszFileName );
+
+ bool IsInitialized() { return initialized_; }
+
+ //
+ // Given function token (methoddef) and offset, return filename and line number
+ //
+ HRESULT GetSourceLine( DWORD dwFunctionToken, DWORD dwOffset, __out_ecount(dwFileNameMaxLen) __out_z LPWSTR wszFileName, DWORD dwFileNameMaxLen, PDWORD pdwLineNumber );
+
+ //
+ // Given function token (methoddef) and slot, return name of the local variable
+ //
+ HRESULT GetLocalName( DWORD dwFunctionToken, DWORD dwSlot, __out_ecount(dwNameMaxLen) __out_z LPWSTR wszName, DWORD dwNameMaxLen );
+};
+
+#endif // __SOURCELINE_H__
diff --git a/src/vm/specialstatics.h b/src/vm/specialstatics.h
new file mode 100644
index 0000000000..ccdd996d09
--- /dev/null
+++ b/src/vm/specialstatics.h
@@ -0,0 +1,41 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*===========================================================================
+**
+** File: SpecialStatics.h
+**
+**
+** Purpose: Defines the data structures for context relative statics.
+**
+**
+**
+=============================================================================*/
+#ifndef _H_SPECIALSTATICS_
+#define _H_SPECIALSTATICS_
+
+// Data structure for storing special context relative static data.
+typedef struct _STATIC_DATA
+{
+ DWORD cElem;
+ PTR_VOID dataPtr[0];
+
+#ifdef DACCESS_COMPILE
+ static ULONG32 DacSize(TADDR addr)
+ {
+ DWORD cElem = *PTR_DWORD(addr);
+ return offsetof(struct _STATIC_DATA, dataPtr) +
+ cElem * sizeof(TADDR);
+ }
+
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+} STATIC_DATA;
+typedef SPTR(STATIC_DATA) PTR_STATIC_DATA;
+
+typedef SimpleList<OBJECTHANDLE> ObjectHandleList;
+
+#endif
diff --git a/src/vm/spinlock.cpp b/src/vm/spinlock.cpp
new file mode 100644
index 0000000000..2360d5cf0c
--- /dev/null
+++ b/src/vm/spinlock.cpp
@@ -0,0 +1,513 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// spinlock.cpp
+//
+
+//
+
+
+#include "common.h"
+
+#include "slist.h"
+#include "spinlock.h"
+#include "threads.h"
+#include "corhost.h"
+
+enum
+{
+ BACKOFF_LIMIT = 1000 // used in spin to acquire
+};
+
+#ifdef _DEBUG
+
+ // profile information
+ULONG SpinLockProfiler::s_ulBackOffs = 0;
+ULONG SpinLockProfiler::s_ulCollisons [LOCK_TYPE_DEFAULT + 1] = { 0 };
+ULONG SpinLockProfiler::s_ulSpins [LOCK_TYPE_DEFAULT + 1] = { 0 };
+
+#endif
+
+SpinLock::SpinLock()
+{
+ // Global SpinLock variables will cause the constructor to be
+ // called during DllInit, which means we cannot use full contracts
+ // because we have not called InitUtilCode yet.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ m_hostLock = NULL;
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ m_Initialized = UnInitialized;
+}
+
+
+SpinLock::~SpinLock()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (CLRSyncHosted() && m_hostLock) {
+ m_hostLock->Release();
+ m_hostLock = NULL;
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+}
+
+void SpinLock::Init(LOCK_TYPE type, bool RequireCoopGC)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // Disallow creation of locks before EE starts. But only complain if we end up
+ // being hosted, since such locks have escaped the hosting net and will cause
+ // AVs on next use.
+#ifdef _DEBUG
+ static bool fEarlyInit; // = false
+
+ if (!(g_fEEStarted || g_fEEInit))
+ {
+ if (!CLRSyncHosted())
+ fEarlyInit = true;
+ }
+
+ // If we are now hosted, we better not have *ever* created some locks that are
+ // not known to our host.
+ _ASSERTE(!fEarlyInit || !CLRSyncHosted());
+
+#endif
+
+ if (m_Initialized == Initialized)
+ {
+ _ASSERTE (type == m_LockType);
+ _ASSERTE (RequireCoopGC == m_requireCoopGCMode);
+
+ // We have initialized this spinlock.
+ return;
+ }
+
+ while (TRUE)
+ {
+ LONG curValue = FastInterlockCompareExchange((LONG*)&m_Initialized, BeingInitialized, UnInitialized);
+ if (curValue == Initialized)
+ {
+ return;
+ }
+ else if (curValue == UnInitialized)
+ {
+ // We are the first to initialize the lock
+ break;
+ }
+ else
+ {
+ __SwitchToThread(10, CALLER_LIMITS_SPINNING);
+ }
+ }
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostSyncManager *pManager = CorHost2::GetHostSyncManager();
+ _ASSERTE((pManager == NULL && m_lock == 0) ||
+ (pManager && m_hostLock == NULL));
+
+ if (pManager)
+ {
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pManager->CreateCrst(&m_hostLock);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (hr != S_OK) {
+ _ASSERTE (hr == E_OUTOFMEMORY);
+ _ASSERTE (m_Initialized == BeingInitialized);
+ m_Initialized = UnInitialized;
+ ThrowOutOfMemory();
+ }
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ m_lock = 0;
+ }
+
+#ifdef _DEBUG
+ m_LockType = type;
+ m_requireCoopGCMode = RequireCoopGC;
+#endif
+
+ _ASSERTE (m_Initialized == BeingInitialized);
+ m_Initialized = Initialized;
+}
+
+#ifdef _DEBUG
+BOOL SpinLock::OwnedByCurrentThread()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ DEBUG_ONLY;
+ }
+ CONTRACTL_END;
+
+ return m_holdingThreadId.IsSameThread();
+}
+#endif
+
+DEBUG_NOINLINE void SpinLock::AcquireLock(SpinLock *s, Thread * pThread)
+{
+ SCAN_SCOPE_BEGIN;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ s->GetLock(pThread);
+}
+
+DEBUG_NOINLINE void SpinLock::ReleaseLock(SpinLock *s, Thread * pThread)
+{
+ SCAN_SCOPE_END;
+
+ s->FreeLock(pThread);
+}
+
+
+void SpinLock::GetLock(Thread* pThread)
+{
+ CONTRACTL
+ {
+ DISABLED(THROWS); // need to rewrite spin locks to no-throw.
+ GC_NOTRIGGER;
+ CAN_TAKE_LOCK;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_Initialized == Initialized);
+
+#ifdef _DEBUG
+ dbg_PreEnterLock();
+#endif
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (CLRSyncHosted())
+ {
+ DWORD option = WAIT_NOTINDEADLOCK;
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(pThread);
+ hr = m_hostLock->Enter(option);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ _ASSERTE(hr == S_OK);
+ EE_LOCK_TAKEN(this);
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ // Not CLR Sync hosted, so we use interlocked operations on
+ // m_lock to acquire the lock. This will automatically cause
+ // us to call EE_LOCK_TAKEN(this);
+ if (!GetLockNoWait())
+ {
+ SpinToAcquire();
+ }
+ }
+
+ INCTHREADLOCKCOUNTTHREAD(pThread);
+#ifdef _DEBUG
+ m_holdingThreadId.SetThreadId();
+ dbg_EnterLock();
+#endif
+}
+
+//----------------------------------------------------------------------------
+// SpinLock::GetLockNoWait
+// used interlocked exchange and fast lock acquire
+
+BOOL SpinLock::GetLockNoWait()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CAN_TAKE_LOCK;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (CLRSyncHosted())
+ {
+ BOOL result;
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = m_hostLock->TryEnter(WAIT_NOTINDEADLOCK, &result);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ _ASSERTE(hr == S_OK);
+
+ if (result)
+ {
+ EE_LOCK_TAKEN(this);
+ }
+
+ return result;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ if (VolatileLoad(&m_lock) == 0 && FastInterlockExchange (&m_lock, 1) == 0)
+ {
+ EE_LOCK_TAKEN(this);
+ return 1;
+ }
+ return 0;
+ }
+}
+
+//----------------------------------------------------------------------------
+// SpinLock::FreeLock
+// Release the spinlock
+//
+void SpinLock::FreeLock(Thread* pThread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_Initialized == Initialized);
+
+#ifdef _DEBUG
+ _ASSERTE(OwnedByCurrentThread());
+ m_holdingThreadId.ResetThreadId();
+ dbg_LeaveLock();
+#endif
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (CLRSyncHosted())
+ {
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(pThread);
+ hr = m_hostLock->Leave();
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ _ASSERTE (hr == S_OK);
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ VolatileStore(&m_lock, (LONG)0);
+ }
+
+ DECTHREADLOCKCOUNTTHREAD(pThread);
+ EE_LOCK_RELEASED(this);
+
+} // SpinLock::FreeLock ()
+
+
+//----------------------------------------------------------------------------
+// SpinLock::SpinToAcquire , non-inline function, called from inline Acquire
+//
+// Spin waiting for a spinlock to become free.
+//
+//
+void
+SpinLock::SpinToAcquire()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CAN_TAKE_LOCK;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE (!CLRSyncHosted());
+
+ DWORD backoffs = 0;
+ ULONG ulSpins = 0;
+
+ while (true)
+ {
+ for (unsigned i = ulSpins+10000;
+ ulSpins < i;
+ ulSpins++)
+ {
+ // Note: Must use Volatile to ensure the lock is
+ // refetched from memory.
+ //
+ if (VolatileLoad(&m_lock) == 0)
+ {
+ break;
+ }
+ YieldProcessor(); // indicate to the processor that we are spining
+ }
+
+ // Try the inline atomic test again.
+ //
+ if (GetLockNoWait())
+ {
+ // EE_LOCK_TAKEN(this) has already been called by GetLockNoWait
+ break;
+ }
+
+ //backoff
+ __SwitchToThread(0, backoffs++);
+ }
+
+#ifdef _DEBUG
+ //profile info
+ SpinLockProfiler::IncrementCollisions (m_LockType);
+ SpinLockProfiler::IncrementSpins (m_LockType, ulSpins);
+ SpinLockProfiler::IncrementBackoffs (backoffs);
+#endif
+
+} // SpinLock::SpinToAcquire ()
+
+#ifdef _DEBUG
+// If a GC is not allowed when we enter the lock, we'd better not do anything inside
+// the lock that could provoke a GC. Otherwise other threads attempting to block
+// (which are presumably in the same GC mode as this one) will block. This will cause
+// a deadlock if we do attempt a GC because we can't suspend blocking threads and we
+// can't release the spin lock.
+void SpinLock::dbg_PreEnterLock()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ DEBUG_ONLY;
+ }
+ CONTRACTL_END;
+
+ Thread* pThread = GetThread();
+ if (pThread)
+ {
+ // SpinLock can not be nested.
+ _ASSERTE ((pThread->m_StateNC & Thread::TSNC_OwnsSpinLock) == 0);
+
+ pThread->SetThreadStateNC(Thread::TSNC_OwnsSpinLock);
+
+ if (!pThread->PreemptiveGCDisabled())
+ _ASSERTE(!m_requireCoopGCMode);
+ }
+}
+
+void SpinLock::dbg_EnterLock()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ DEBUG_ONLY;
+ }
+ CONTRACTL_END;
+
+ Thread* pThread = GetThread();
+ if (pThread)
+ {
+ INCONTRACT(pThread->BeginNoTriggerGC(__FILE__, __LINE__));
+ }
+}
+
+void SpinLock::dbg_LeaveLock()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ DEBUG_ONLY;
+ }
+ CONTRACTL_END;
+
+ Thread* pThread = GetThread();
+ if (pThread)
+ {
+ _ASSERTE ((pThread->m_StateNC & Thread::TSNC_OwnsSpinLock) != 0);
+ pThread->ResetThreadStateNC(Thread::TSNC_OwnsSpinLock);
+ INCONTRACT(pThread->EndNoTriggerGC());
+ }
+}
+
+
+void SpinLockProfiler::InitStatics ()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ DEBUG_ONLY;
+ }
+ CONTRACTL_END;
+
+ s_ulBackOffs = 0;
+ memset (s_ulCollisons, 0, sizeof (s_ulCollisons));
+ memset (s_ulSpins, 0, sizeof (s_ulSpins));
+}
+
+void SpinLockProfiler::IncrementSpins (LOCK_TYPE type, ULONG value)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ DEBUG_ONLY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(type <= LOCK_TYPE_DEFAULT);
+ s_ulSpins [type] += value;
+}
+
+void SpinLockProfiler::IncrementCollisions (LOCK_TYPE type)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ DEBUG_ONLY;
+ }
+ CONTRACTL_END;
+
+ ++s_ulCollisons [type];
+}
+
+void SpinLockProfiler::IncrementBackoffs (ULONG value)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ DEBUG_ONLY;
+ }
+ CONTRACTL_END;
+
+ s_ulBackOffs += value;
+}
+
+void SpinLockProfiler::DumpStatics()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ DEBUG_ONLY;
+ }
+ CONTRACTL_END;
+
+ //<TODO>todo </TODO>
+}
+
+#endif // _DEBUG
+
+// End of file: spinlock.cpp
diff --git a/src/vm/spinlock.h b/src/vm/spinlock.h
new file mode 100644
index 0000000000..b3aced393a
--- /dev/null
+++ b/src/vm/spinlock.h
@@ -0,0 +1,307 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//----------------------------------------------------------------------------
+// spinlock.h , defines the spin lock class and a profiler class
+//
+
+//
+//----------------------------------------------------------------------------
+
+
+//#ifndef _H_UTIL
+//#error I am a part of util.hpp Please don't include me alone !
+//#endif
+
+
+
+#ifndef _H_SPINLOCK_
+#define _H_SPINLOCK_
+
+#include <stddef.h>
+
+
+// #SwitchToThreadSpinning
+//
+// If you call __SwitchToThread in a loop waiting for a condition to be met,
+// it is critical that you insert periodic sleeps. This is because the thread
+// you are waiting for to set that condition may need your CPU, and simply
+// calling __SwitchToThread(0) will NOT guarantee that it gets a chance to run.
+// If there are other runnable threads of higher priority, or even if there
+// aren't and it is in another processor's queue, you will be spinning a very
+// long time.
+//
+// To force all callers to consider this issue and to avoid each having to
+// duplicate the same backoff code, __SwitchToThread takes a required second
+// parameter. If you want it to handle backoff for you, this parameter should
+// be the number of successive calls you have made to __SwitchToThread (a loop
+// count). If you want to take care of backing off yourself, you can pass
+// CALLER_LIMITS_SPINNING. There are three valid cases for doing this:
+//
+// - You count iterations and induce a sleep periodically
+// - The number of consecutive __SwitchToThreads is limited
+// - Your call to __SwitchToThread includes a non-zero sleep duration
+//
+// Lastly, to simplify this requirement for the following common coding pattern:
+//
+// while (!condition)
+// SwitchToThread
+//
+// you can use the YIELD_WHILE macro.
+
+#define CALLER_LIMITS_SPINNING 0
+
+#define YIELD_WHILE(condition) \
+ { \
+ DWORD __dwSwitchCount = 0; \
+ while (condition) \
+ { \
+ __SwitchToThread(0, ++__dwSwitchCount); \
+ } \
+ }
+
+// non-zero return value if this function causes the OS to switch to another thread
+BOOL __SwitchToThread (DWORD dwSleepMSec, DWORD dwSwitchCount);
+BOOL __DangerousSwitchToThread (DWORD dwSleepMSec, DWORD dwSwitchCount, BOOL goThroughOS);
+
+
+//----------------------------------------------------------------------------
+// class: DangerousNonHostedSpinLock
+//
+// PURPOSE:
+// A simple wrapper around the spinloop without host interactions. To be
+// used for short-time locking in the VM, in particular when the runtime
+// has not been started yet.
+//
+//----------------------------------------------------------------------------
+class DangerousNonHostedSpinLock
+{
+public:
+ FORCEINLINE DangerousNonHostedSpinLock() { LIMITED_METHOD_CONTRACT; m_value = 0; }
+
+private:
+ // Intentionally unimplemented - prevents the compiler from generating default copy ctor.
+ DangerousNonHostedSpinLock(DangerousNonHostedSpinLock const & other);
+
+ FORCEINLINE void Acquire()
+ {
+ WRAPPER_NO_CONTRACT;
+ YIELD_WHILE(FastInterlockExchange(&m_value, 1) == 1);
+ }
+
+ FORCEINLINE BOOL TryAcquire()
+ {
+ WRAPPER_NO_CONTRACT;
+ return (FastInterlockExchange(&m_value, 1) == 0);
+ }
+
+ FORCEINLINE void Release()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_value = 0;
+ }
+
+ inline static void AcquireLock(DangerousNonHostedSpinLock *pLock) { WRAPPER_NO_CONTRACT; pLock->Acquire(); }
+ inline static BOOL TryAcquireLock(DangerousNonHostedSpinLock *pLock) { WRAPPER_NO_CONTRACT; return pLock->TryAcquire(); }
+ inline static void ReleaseLock(DangerousNonHostedSpinLock *pLock) { WRAPPER_NO_CONTRACT; pLock->Release(); }
+
+ Volatile<LONG> m_value;
+
+public:
+ BOOL IsHeld()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (BOOL)m_value;
+ }
+
+ typedef Holder<DangerousNonHostedSpinLock *, DangerousNonHostedSpinLock::AcquireLock, DangerousNonHostedSpinLock::ReleaseLock> Holder;
+ typedef ConditionalStateHolder<DangerousNonHostedSpinLock *, DangerousNonHostedSpinLock::TryAcquireLock, DangerousNonHostedSpinLock::ReleaseLock> TryHolder;
+};
+
+typedef DangerousNonHostedSpinLock::Holder DangerousNonHostedSpinLockHolder;
+typedef DangerousNonHostedSpinLock::TryHolder DangerousNonHostedSpinLockTryHolder;
+
+
+class SpinLock;
+
+// Lock Types, used in profiling
+//
+enum LOCK_TYPE
+{
+ LOCK_PLUSWRAPPER_CACHE = 1, // change
+ LOCK_FCALL = 2, // leave, but rank to tip
+ LOCK_COMCTXENTRYCACHE = 3, // creates events, allocs memory, SEH, etc.
+#ifdef FEATURE_COMINTEROP
+ LOCK_COMCALL = 4,
+#endif
+ LOCK_REFLECTCACHE = 5,
+ LOCK_CORMAP = 7,
+ LOCK_TYPE_DEFAULT = 8
+};
+
+//----------------------------------------------------------------------------
+// class: Spinlock
+//
+// PURPOSE:
+// spinlock class that contains constructor and out of line spinloop.
+//
+//----------------------------------------------------------------------------
+class SpinLock
+{
+
+private:
+ union {
+ // m_lock has to be the fist data member in the class
+ LONG m_lock; // LONG used in interlocked exchange
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostCrst *m_hostLock;
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ };
+
+ enum SpinLockState
+ {
+ UnInitialized,
+ BeingInitialized,
+ Initialized
+ };
+
+ Volatile<SpinLockState> m_Initialized; // To verify initialized
+ // And initialize once
+
+#ifdef _DEBUG
+ LOCK_TYPE m_LockType; // lock type to track statistics
+
+ // Check for dead lock situation.
+ bool m_requireCoopGCMode;
+ EEThreadId m_holdingThreadId;
+#endif
+
+public:
+ SpinLock ();
+ ~SpinLock ();
+
+ //Init method, initialize lock and _DEBUG flags
+ void Init(LOCK_TYPE type, bool RequireCoopGC = FALSE);
+
+ //-----------------------------------------------------------------
+ // Is the current thread the owner?
+ //-----------------------------------------------------------------
+#ifdef _DEBUG
+ BOOL OwnedByCurrentThread();
+#endif
+
+private:
+ void SpinToAcquire (); // out of line call spins
+
+#ifdef _DEBUG
+ void dbg_PreEnterLock();
+ void dbg_EnterLock();
+ void dbg_LeaveLock();
+#endif
+
+ // The following 5 APIs must remain private. We want all entry/exit code to
+ // occur via holders, so that exceptions will be sure to release the lock.
+private:
+ void GetLock(Thread * pThread); // Acquire lock, blocks if unsuccessful
+ BOOL GetLockNoWait(); // Acquire lock, fail-fast
+ void FreeLock(Thread * pThread); // Release lock
+
+public:
+ static void AcquireLock(SpinLock *s, Thread * pThread);
+ static void ReleaseLock(SpinLock *s, Thread * pThread);
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+#define SPINLOCK_THREAD_PARAM_ONLY_IN_SOME_BUILDS m_pThread
+#else
+#define SPINLOCK_THREAD_PARAM_ONLY_IN_SOME_BUILDS NULL
+#endif
+
+ class Holder
+ {
+ SpinLock * m_pSpinLock;
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ Thread * m_pThread;
+#endif
+ public:
+ Holder(SpinLock * s) :
+ m_pSpinLock(s)
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ , m_pThread(GetThread())
+#endif
+ {
+ SCAN_SCOPE_BEGIN;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ m_pSpinLock->GetLock(SPINLOCK_THREAD_PARAM_ONLY_IN_SOME_BUILDS);
+ }
+
+ ~Holder()
+ {
+ SCAN_SCOPE_END;
+
+ m_pSpinLock->FreeLock(SPINLOCK_THREAD_PARAM_ONLY_IN_SOME_BUILDS);
+ }
+ };
+};
+
+
+typedef SpinLock::Holder SpinLockHolder;
+#define TAKE_SPINLOCK_AND_DONOT_TRIGGER_GC(lock) \
+ SpinLockHolder __spinLockHolder(lock);\
+ GCX_NOTRIGGER ();
+
+#define ACQUIRE_SPINLOCK_NO_HOLDER(lock, thread)\
+{ \
+ SpinLock::AcquireLock(lock, thread); \
+ GCX_NOTRIGGER(); \
+ CANNOTTHROWCOMPLUSEXCEPTION(); \
+ STATIC_CONTRACT_NOTHROW; \
+
+
+#define RELEASE_SPINLOCK_NO_HOLDER(lock, thread)\
+ SpinLock::ReleaseLock(lock, thread); \
+} \
+
+__inline BOOL IsOwnerOfSpinLock (LPVOID lock)
+{
+ WRAPPER_NO_CONTRACT;
+#ifdef _DEBUG
+ return ((SpinLock*)lock)->OwnedByCurrentThread();
+#else
+ // This function should not be called on free build.
+ DebugBreak();
+ return TRUE;
+#endif
+}
+
+#ifdef _DEBUG
+//----------------------------------------------------------------------------
+// class SpinLockProfiler
+// to track contention, useful for profiling
+//
+//----------------------------------------------------------------------------
+class SpinLockProfiler
+{
+ // Pointer to spinlock names.
+ //
+ static ULONG s_ulBackOffs;
+ static ULONG s_ulCollisons [LOCK_TYPE_DEFAULT + 1];
+ static ULONG s_ulSpins [LOCK_TYPE_DEFAULT + 1];
+
+public:
+
+ static void InitStatics ();
+
+ static void IncrementSpins (LOCK_TYPE type, ULONG value);
+
+ static void IncrementCollisions (LOCK_TYPE type);
+
+ static void IncrementBackoffs (ULONG value);
+
+ static void DumpStatics();
+
+};
+
+#endif // ifdef _DEBUG
+#endif // ifndef _H_SPINLOCK_
diff --git a/src/vm/stackbuildersink.cpp b/src/vm/stackbuildersink.cpp
new file mode 100644
index 0000000000..bcd8d62f50
--- /dev/null
+++ b/src/vm/stackbuildersink.cpp
@@ -0,0 +1,700 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: StackBuilderSink.cpp
+//
+
+//
+// Purpose: Native implementation for System.Runtime.Remoting.Messaging.StackBuilderSink
+//
+
+
+#include "common.h"
+
+#ifdef FEATURE_REMOTING
+
+#include "excep.h"
+#include "message.h"
+#include "stackbuildersink.h"
+#include "dbginterface.h"
+#include "remoting.h"
+#include "profilepriv.h"
+#include "class.h"
+
+struct ArgInfo
+{
+ PBYTE dataLocation;
+ INT32 dataSize;
+ TypeHandle dataTypeHandle;
+ BYTE dataType;
+ BYTE byref;
+};
+
+
+//+----------------------------------------------------------------------------
+//
+// Method: CStackBuilderSink::PrivateProcessMessage, public
+//
+// Synopsis: Builds the stack and calls an object
+//
+//
+//+----------------------------------------------------------------------------
+FCIMPL5(Object*, CStackBuilderSink::PrivateProcessMessage,
+ Object* pSBSinkUNSAFE,
+ MethodDesc* pMD,
+ PTRArray* pArgsUNSAFE,
+ Object* pServerUNSAFE,
+ PTRARRAYREF* ppVarOutParams)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(!pMD->IsGenericMethodDefinition());
+ PRECONDITION(pMD->IsRuntimeMethodHandle());
+ }
+ CONTRACTL_END;
+
+ struct _gc
+ {
+ PTRARRAYREF pArgs;
+ OBJECTREF pServer;
+ OBJECTREF pSBSink;
+ OBJECTREF ret;
+ } gc;
+ gc.pArgs = (PTRARRAYREF) pArgsUNSAFE;
+ gc.pServer = (OBJECTREF) pServerUNSAFE;
+ gc.pSBSink = (OBJECTREF) pSBSinkUNSAFE;
+ gc.ret = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
+
+ // pMD->IsStatic() is SO_INTOLERANT.
+ // Either pServer is non-null or the method is static (but not both)
+ _ASSERTE( (pServerUNSAFE!=NULL) == !(pMD->IsStatic()) );
+
+ LOG((LF_REMOTING, LL_INFO10, "CStackBuilderSink::PrivateProcessMessage\n"));
+
+ MethodDesc *pResolvedMD = pMD;
+ // Check if this is an interface invoke, if yes, then we have to find the
+ // real method descriptor on the class of the server object.
+ if(pMD->GetMethodTable()->IsInterface())
+ {
+ _ASSERTE(gc.pServer != NULL);
+
+ // NOTE: This method can trigger GC
+ // The last parameter (true) causes the method to take into account that
+ // the object passed in is a TP and try to resolve the interface MD into
+ // a server MD anyway (normally the call short circuits thunking objects
+ // and just returns the interface MD unchanged).
+ MethodTable *pRealMT = gc.pServer->GetTrueMethodTable();
+
+#ifdef FEATURE_COMINTEROP
+ if (pRealMT->IsComObjectType())
+ pResolvedMD = pRealMT->GetMethodDescForComInterfaceMethod(pMD, true);
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ if (pRealMT->ImplementsInterface(pMD->GetMethodTable()))
+ {
+ pResolvedMD = pRealMT->GetMethodDescForInterfaceMethod(TypeHandle(pMD->GetMethodTable()), pMD);
+
+ // If the method is generic then we have more work to do --
+ // we'll get back the generic method descriptor and we'll have
+ // to load the version with the right instantiation (get the
+ // instantiation from the interface method).
+ if (pResolvedMD->HasMethodInstantiation())
+ {
+ _ASSERTE(pResolvedMD->IsGenericMethodDefinition());
+ _ASSERTE(pMD->GetNumGenericMethodArgs() == pResolvedMD->GetNumGenericMethodArgs());
+
+ pResolvedMD = MethodDesc::FindOrCreateAssociatedMethodDesc(pResolvedMD,
+ pRealMT,
+ FALSE,
+ pMD->GetMethodInstantiation(),
+ FALSE);
+
+ _ASSERTE(!pResolvedMD->ContainsGenericVariables());
+ }
+ }
+ else
+ pResolvedMD = NULL;
+ }
+
+ if(!pResolvedMD)
+ {
+ MAKE_WIDEPTR_FROMUTF8(wName, pMD->GetName());
+ COMPlusThrow(kMissingMethodException, IDS_EE_MISSING_METHOD, wName);
+ }
+ }
+
+ // <TODO>This looks a little dodgy for generics: pResolvedMD has been interface-resolved but not
+ // virtual-resolved. So we seem to be taking the signature of a
+ // half-resolved-virtual-call. But the MetaSig
+ // is only used for GC purposes, and thus is probably OK: although the
+ // metadata for the signature of a call may be different
+ // as we move to base classes, the instantiated version
+ // of the signature will still be the same
+ // at both the callsite and the target). </TODO>
+ MetaSig mSig(pResolvedMD);
+
+ // get the target depending on whether the method is virtual or non-virtual
+ // like a constructor, private or final method
+ PCODE pTarget = pResolvedMD->GetCallTarget(&(gc.pServer));
+
+ VASigCookie *pCookie = NULL;
+ _ASSERTE(NULL != pTarget);
+
+ // this function does the work
+ ::CallDescrWithObjectArray(
+ gc.pServer,
+ pResolvedMD,
+ //pRM,
+ pTarget,
+ &mSig,
+ pCookie,
+ gc.pArgs,
+ &gc.ret,
+ ppVarOutParams);
+
+ LOG((LF_REMOTING, LL_INFO10, "CStackBuilderSink::PrivateProcessMessage OUT\n"));
+
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(gc.ret);
+}
+FCIMPLEND
+
+class ProfilerServerCallbackHolder
+{
+public:
+ ProfilerServerCallbackHolder(Thread* pThread) : m_pThread(pThread)
+ {
+#ifdef PROFILING_SUPPORTED
+ // If we're profiling, notify the profiler that we're about to invoke the remoting target
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackRemoting());
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->RemotingServerInvocationStarted();
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+ }
+
+ ~ProfilerServerCallbackHolder()
+ {
+#ifdef PROFILING_SUPPORTED
+ // If profiling is active, tell profiler we've made the call, received the
+ // return value, done any processing necessary, and now remoting is done.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackRemoting());
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->RemotingServerInvocationReturned();
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+ }
+
+private:
+ Thread* m_pThread;
+};
+
+//+----------------------------------------------------------------------------
+//
+// Function: CallDescrWithObjectArray, private
+//
+// Synopsis: Builds the stack from a object array and call the object
+//
+//
+// Note this function triggers GC and assumes that pServer, pArguments, pVarRet, and ppVarOutParams are
+// all already protected!!
+//+----------------------------------------------------------------------------
+void CallDescrWithObjectArray(OBJECTREF& pServer,
+ //ReflectMethod *pRM,
+ MethodDesc *pMeth,
+ PCODE pTarget,
+ MetaSig* sig,
+ VASigCookie *pCookie,
+ PTRARRAYREF& pArgArray,
+ OBJECTREF *pVarRet,
+ PTRARRAYREF *ppVarOutParams)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ LOG((LF_REMOTING, LL_INFO10, "CallDescrWithObjectArray IN\n"));
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:6263) // "Suppress PREFast warning about _alloca in a loop"
+ // _alloca is called within a loop in a number of places within this method
+ // (as an ultra fast means of acquiring temporary storage). This can be a
+ // problem in some scenarios (swiftly drive us to stack overflow). But in
+ // this case the allocations are tightly bounded (by the number of arguments
+ // in the target method) and the allocations themselves are small (no worse
+ // really than calling the method an extra time).
+#endif
+
+ ByRefInfo *pByRefs = NULL;
+ FrameWithCookie<ProtectValueClassFrame> *pProtectValueClassFrame = NULL;
+ FrameWithCookie<ProtectByRefsFrame> *pProtectionFrame = NULL;
+ UINT nStackBytes = 0;
+ LPBYTE pAlloc = 0;
+ LPBYTE pTransitionBlock = 0;
+ UINT32 numByRef = 0;
+ //DWORD attr = pRM->dwFlags;
+#ifdef _DEBUG
+ //MethodDesc *pMD = pRM->pMethod;
+#endif
+ ValueClassInfo *pValueClasses = NULL;
+
+ // check the calling convention
+
+ BYTE callingconvention = sig->GetCallingConvention();
+ if (!isCallConv(callingconvention, IMAGE_CEE_CS_CALLCONV_DEFAULT))
+ {
+ _ASSERTE(!"This calling convention is not supported.");
+ COMPlusThrow(kInvalidProgramException);
+ }
+
+ // Make sure we are properly loaded
+ CONSISTENCY_CHECK(GetAppDomain()->CheckCanExecuteManagedCode(pMeth));
+
+ // Note this is redundant with the above but we do it anyway for safety
+ pMeth->EnsureActive();
+
+#ifdef DEBUGGING_SUPPORTED
+ // debugger goo What does this do? can someone put a comment here?
+ if (CORDebuggerTraceCall())
+ {
+ g_pDebugInterface->TraceCall((const BYTE *)pTarget);
+ }
+#endif // DEBUGGING_SUPPORTED
+
+ Thread * pThread = GetThread();
+
+ {
+ ProfilerServerCallbackHolder profilerHolder(pThread);
+
+ ArgIterator argit(sig);
+
+ // Create a fake FramedMethodFrame on the stack.
+ nStackBytes = argit.SizeOfFrameArgumentArray();
+
+ UINT32 cbAlloc = 0;
+ if (!ClrSafeInt<UINT32>::addition(TransitionBlock::GetNegSpaceSize(), sizeof(TransitionBlock), cbAlloc))
+ COMPlusThrow(kArgumentException);
+ if (!ClrSafeInt<UINT32>::addition(cbAlloc, nStackBytes, cbAlloc))
+ COMPlusThrow(kArgumentException);
+
+ pAlloc = (LPBYTE)_alloca(cbAlloc);
+ pTransitionBlock = pAlloc + TransitionBlock::GetNegSpaceSize();
+
+ // cycle through the parameters and see if there are byrefs
+ BOOL fHasByRefs = FALSE;
+
+ //if (attr & RM_ATTR_BYREF_FLAG_SET)
+ // fHasByRefs = attr & RM_ATTR_HAS_BYREF_ARG;
+ //else
+ {
+ sig->Reset();
+ CorElementType typ;
+ while ((typ = sig->NextArg()) != ELEMENT_TYPE_END)
+ {
+ if (typ == ELEMENT_TYPE_BYREF)
+ {
+ fHasByRefs = TRUE;
+ //attr |= RM_ATTR_HAS_BYREF_ARG;
+ break;
+ }
+ }
+ //attr |= RM_ATTR_BYREF_FLAG_SET;
+ //pRM->dwFlags = attr;
+ sig->Reset();
+ }
+
+ UINT32 nFixedArgs = sig->NumFixedArgs();
+ // To avoid any security problems with integer overflow/underflow we're
+ // going to validate the number of args here (we're about to _alloca an
+ // array of ArgInfo structures and maybe a managed object array as well
+ // based on this count).
+ _ASSERTE(sizeof(Object*) <= sizeof(ArgInfo));
+ UINT32 nMaxArgs = UINT32_MAX / sizeof(ArgInfo);
+ if (nFixedArgs > nMaxArgs)
+ COMPlusThrow(kArgumentException);
+
+ // if there are byrefs allocate and array for the out parameters
+ if (fHasByRefs)
+ {
+ *ppVarOutParams = PTRARRAYREF(AllocateObjectArray(sig->NumFixedArgs(), g_pObjectClass));
+
+ // Null out the array
+ memset(&(*ppVarOutParams)->m_Array, 0, sizeof(OBJECTREF) * sig->NumFixedArgs());
+ }
+
+ OBJECTREF *ppThis = NULL;
+
+ if (sig->HasThis())
+ {
+ ppThis = (OBJECTREF*)(pTransitionBlock + argit.GetThisOffset());
+
+ // *ppThis is not GC protected. It will be set to the right value
+ // after all object allocations are made.
+ *ppThis = NULL;
+ }
+
+ // if we have the Value Class return, we need to allocate that class and place a pointer to it on the stack.
+
+ *pVarRet = NULL;
+ TypeHandle retType = sig->GetRetTypeHandleThrowing();
+ // Note that we want the unnormalized (signature) type because GetStackObject
+ // boxes as the element type, which if we normalized it would loose information.
+ CorElementType retElemType = sig->GetReturnType();
+
+ // The MethodTable pointer of the return type, if that's a struct
+ MethodTable* pStructRetTypeMT = NULL;
+
+ // Allocate a boxed struct instance to hold the return value in any case.
+ if (retElemType == ELEMENT_TYPE_VALUETYPE)
+ {
+ pStructRetTypeMT = retType.GetMethodTable();
+ *pVarRet = pStructRetTypeMT->Allocate();
+ }
+ else {
+ _ASSERTE(!argit.HasRetBuffArg());
+ }
+
+#ifdef CALLDESCR_REGTYPEMAP
+ UINT64 dwRegTypeMap = 0;
+ BYTE* pMap = (BYTE*)&dwRegTypeMap;
+#endif // CALLDESCR_REGTYPEMAP
+
+#ifdef CALLDESCR_FPARGREGS
+ FloatArgumentRegisters *pFloatArgumentRegisters = NULL;
+#endif // CALLDESCR_FPARGREGS
+
+ // gather data about the parameters by iterating over the sig:
+ UINT32 arg = 0;
+ int ofs = 0;
+
+ // REVIEW: need to use actual arg count if VarArgs are supported
+ ArgInfo* pArgInfoStart = (ArgInfo*) _alloca(nFixedArgs*sizeof(ArgInfo));
+
+#ifdef _DEBUG
+ // We expect to write useful data over every part of this so need
+ // not do this in retail!
+ memset((void *)pArgInfoStart, 0, sizeof(ArgInfo)*nFixedArgs);
+#endif
+
+ for (; TransitionBlock::InvalidOffset != (ofs = argit.GetNextOffset()); arg++)
+ {
+ CONSISTENCY_CHECK(arg < nFixedArgs);
+ ArgInfo* pArgInfo = pArgInfoStart + arg;
+
+#ifdef CALLDESCR_REGTYPEMAP
+ FillInRegTypeMap(ofs, argit.GetArgType(), pMap);
+#endif
+
+#ifdef CALLDESCR_FPARGREGS
+ // Under CALLDESCR_FPARGREGS -ve offsets indicate arguments in floating point registers. If we have at
+ // least one such argument we point the call worker at the floating point area of the frame (we leave
+ // it null otherwise since the worker can perform a useful optimization if it knows no floating point
+ // registers need to be set up).
+ if (TransitionBlock::IsFloatArgumentRegisterOffset(ofs) && (pFloatArgumentRegisters == NULL))
+ pFloatArgumentRegisters = (FloatArgumentRegisters*)(pTransitionBlock +
+ TransitionBlock::GetOffsetOfFloatArgumentRegisters());
+#endif
+
+ if (argit.GetArgType() == ELEMENT_TYPE_BYREF)
+ {
+ TypeHandle ty = TypeHandle();
+ CorElementType brType = sig->GetByRefType(&ty);
+ if (CorIsPrimitiveType(brType))
+ {
+ pArgInfo->dataSize = gElementTypeInfo[brType].m_cbSize;
+ }
+ else if (ty.IsValueType())
+ {
+ pArgInfo->dataSize = ty.GetMethodTable()->GetNumInstanceFieldBytes();
+ numByRef ++;
+ }
+ else
+ {
+ pArgInfo->dataSize = sizeof(Object *);
+ numByRef ++;
+ }
+
+ ByRefInfo *brInfo = (ByRefInfo *) _alloca(offsetof(ByRefInfo,data) + pArgInfo->dataSize);
+ brInfo->argIndex = arg;
+ brInfo->typ = brType;
+ brInfo->typeHandle = ty;
+ brInfo->pNext = pByRefs;
+ pByRefs = brInfo;
+ pArgInfo->dataLocation = (BYTE*)brInfo->data;
+ *((void**)(pTransitionBlock + ofs)) = (void*)pArgInfo->dataLocation;
+ pArgInfo->dataTypeHandle = ty;
+ pArgInfo->dataType = static_cast<BYTE>(brType);
+ pArgInfo->byref = TRUE;
+ }
+ else
+ {
+ pArgInfo->dataLocation = pTransitionBlock + ofs;
+ pArgInfo->dataSize = argit.GetArgSize();
+ pArgInfo->dataTypeHandle = sig->GetLastTypeHandleThrowing(); // this may cause GC!
+ pArgInfo->dataType = (BYTE)argit.GetArgType();
+ pArgInfo->byref = FALSE;
+ }
+ }
+
+
+ if (ppThis)
+ {
+ // If this isn't a value class, verify the objectref
+#ifdef _DEBUG
+ //if (pMD->GetMethodTable()->IsValueType() == FALSE)
+ //{
+ // VALIDATEOBJECTREF(pServer);
+ //}
+#endif //_DEBUG
+ *ppThis = pServer;
+ }
+
+ PVOID pRetBufStackData = NULL;
+
+ if (argit.HasRetBuffArg())
+ {
+ // If the return buffer *must* be a stack-allocated object, allocate it.
+ PVOID pRetBufData = NULL;
+ if (pStructRetTypeMT->IsStructRequiringStackAllocRetBuf())
+ {
+ SIZE_T sz = pStructRetTypeMT->GetNumInstanceFieldBytes();
+ pRetBufData = pRetBufStackData = _alloca(sz);
+ memset(pRetBufData, 0, sz);
+ pValueClasses = new (_alloca(sizeof(ValueClassInfo))) ValueClassInfo(pRetBufStackData, pStructRetTypeMT, pValueClasses);
+ }
+ else
+ {
+ pRetBufData = (*pVarRet)->GetData();
+ }
+ *((LPVOID*) (pTransitionBlock + argit.GetRetBuffArgOffset())) = pRetBufData;
+ }
+
+ // There should be no GC when we fill up the stack with parameters, as we don't protect them
+ // Assignment of "*ppThis" above triggers the point where we become unprotected.
+ {
+ GCX_FORBID();
+
+ PBYTE dataLocation;
+ INT32 dataSize;
+ TypeHandle dataTypeHandle;
+ BYTE dataType;
+
+ OBJECTREF* pArguments = pArgArray->m_Array;
+ UINT32 i;
+ for (i=0; i<nFixedArgs; i++)
+ {
+ ArgInfo* pArgInfo = pArgInfoStart + i;
+
+ dataSize = pArgInfo->dataSize;
+ dataLocation = pArgInfo->dataLocation;
+ dataTypeHandle = pArgInfo->dataTypeHandle;
+ dataType = pArgInfo->dataType;
+
+ // Nullable<T> needs special treatment, even if it is 1, 2, 4, or 8 bytes
+ if (dataType == ELEMENT_TYPE_VALUETYPE)
+ goto DEFAULT_CASE;
+
+ switch (dataSize)
+ {
+ case 1:
+ // This "if" statement is necessary to make the assignement big-endian aware
+ if (pArgInfo->byref)
+ *((INT8*)dataLocation) = *((INT8*)pArguments[i]->GetData());
+ else
+ *(StackElemType*)dataLocation = (StackElemType)*((INT8*)pArguments[i]->GetData());
+ break;
+ case 2:
+ // This "if" statement is necessary to make the assignement big-endian aware
+ if (pArgInfo->byref)
+ *((INT16*)dataLocation) = *((INT16*)pArguments[i]->GetData());
+ else
+ *(StackElemType*)dataLocation = (StackElemType)*((INT16*)pArguments[i]->GetData());
+ break;
+ case 4:
+#ifndef _WIN64
+ if ((dataType == ELEMENT_TYPE_STRING) ||
+ (dataType == ELEMENT_TYPE_OBJECT) ||
+ (dataType == ELEMENT_TYPE_CLASS) ||
+ (dataType == ELEMENT_TYPE_SZARRAY) ||
+ (dataType == ELEMENT_TYPE_ARRAY))
+ {
+ *(OBJECTREF *)dataLocation = pArguments[i];
+ }
+ else
+ {
+ *(StackElemType*)dataLocation = (StackElemType)*((INT32*)pArguments[i]->GetData());
+ }
+#else // !_WIN64
+ // This "if" statement is necessary to make the assignement big-endian aware
+ if (pArgInfo->byref)
+ *(INT32*)dataLocation = *((INT32*)pArguments[i]->GetData());
+ else
+ *(StackElemType*)dataLocation = (StackElemType)*((INT32*)pArguments[i]->GetData());
+#endif // !_WIN64
+ break;
+
+ case 8:
+#ifdef _WIN64
+ if ((dataType == ELEMENT_TYPE_STRING) ||
+ (dataType == ELEMENT_TYPE_OBJECT) ||
+ (dataType == ELEMENT_TYPE_CLASS) ||
+ (dataType == ELEMENT_TYPE_SZARRAY) ||
+ (dataType == ELEMENT_TYPE_ARRAY))
+ {
+ *(OBJECTREF *)dataLocation = pArguments[i];
+ }
+ else
+ {
+ *((INT64*)dataLocation) = *((INT64*)pArguments[i]->GetData());
+ }
+#else // _WIN64
+ *((INT64*)dataLocation) = *((INT64*)pArguments[i]->GetData());
+#endif // _WIN64
+ break;
+
+ default:
+ {
+ DEFAULT_CASE:
+ MethodTable * pMT = dataTypeHandle.GetMethodTable();
+
+#ifdef ENREGISTERED_PARAMTYPE_MAXSIZE
+ // We do not need to allocate a buffer if the argument is already passed by reference.
+ if (!pArgInfo->byref && ArgIterator::IsArgPassedByRef(dataTypeHandle))
+ {
+ PVOID pvArg = _alloca(dataSize);
+ pMT->UnBoxIntoUnchecked(pvArg, pArguments[i]);
+ *(PVOID*)dataLocation = pvArg;
+
+ pValueClasses = new (_alloca(sizeof(ValueClassInfo))) ValueClassInfo(pvArg, pMT, pValueClasses);
+ }
+ else
+#endif
+ {
+ pMT->UnBoxIntoUnchecked(dataLocation, pArguments[i]);
+ }
+ }
+ }
+ }
+
+#ifdef _DEBUG
+ // Should not be using this any more
+ memset((void *)pArgInfoStart, 0, sizeof(ArgInfo)*nFixedArgs);
+#endif
+
+ // if there were byrefs, push a protection frame
+ if (pByRefs && numByRef > 0)
+ {
+ char *pBuffer = (char*)_alloca (sizeof (FrameWithCookie<ProtectByRefsFrame>));
+ pProtectionFrame = new (pBuffer) FrameWithCookie<ProtectByRefsFrame>(pThread, pByRefs);
+ }
+
+ // If there were any value classes that must be protected by the
+ // caller, push a ProtectValueClassFrame.
+ if (pValueClasses)
+ {
+ char *pBuffer = (char*)_alloca (sizeof (FrameWithCookie<ProtectValueClassFrame>));
+ pProtectValueClassFrame = new (pBuffer) FrameWithCookie<ProtectValueClassFrame>(pThread, pValueClasses);
+ }
+
+ } // GCX_FORBID
+
+ UINT fpReturnSize = argit.GetFPReturnSize();
+
+ CallDescrData callDescrData;
+
+ callDescrData.pSrc = pTransitionBlock + sizeof(TransitionBlock);
+ callDescrData.numStackSlots = nStackBytes / STACK_ELEM_SIZE;
+#ifdef CALLDESCR_ARGREGS
+ callDescrData.pArgumentRegisters = (ArgumentRegisters*)(pTransitionBlock + TransitionBlock::GetOffsetOfArgumentRegisters());
+#endif
+#ifdef CALLDESCR_FPARGREGS
+ callDescrData.pFloatArgumentRegisters = pFloatArgumentRegisters;
+#endif
+#ifdef CALLDESCR_REGTYPEMAP
+ callDescrData.dwRegTypeMap = dwRegTypeMap;
+#endif
+ callDescrData.fpReturnSize = fpReturnSize;
+ callDescrData.pTarget = pTarget;
+
+ CallDescrWorkerWithHandler(&callDescrData);
+
+ // It is still illegal to do a GC here. The return type might have/contain GC pointers.
+ if (retElemType == ELEMENT_TYPE_VALUETYPE)
+ {
+ _ASSERTE(*pVarRet != NULL); // we have already allocated a return object
+ PVOID pVarRetData = (*pVarRet)->GetData();
+
+ // If the return result was passed back in registers, then copy it into the return object
+ if (!argit.HasRetBuffArg())
+ {
+ CopyValueClass(pVarRetData, &callDescrData.returnValue, (*pVarRet)->GetMethodTable(), (*pVarRet)->GetAppDomain());
+ }
+ else if (pRetBufStackData != NULL)
+ {
+ // Copy the stack-allocated ret buff to the heap-allocated one.
+ CopyValueClass(pVarRetData, pRetBufStackData, (*pVarRet)->GetMethodTable(), (*pVarRet)->GetAppDomain());
+ }
+
+ // If the return is a Nullable<T>, box it using Nullable<T> conventions.
+ // TODO: this double allocates on constructions which is wasteful
+ if (!retType.IsNull())
+ *pVarRet = Nullable::NormalizeBox(*pVarRet);
+ }
+ else
+ CMessage::GetObjectFromStack(pVarRet, &callDescrData.returnValue, retElemType, retType);
+
+ // You can now do GCs if you want to
+
+ if (pProtectValueClassFrame)
+ pProtectValueClassFrame->Pop(pThread);
+
+ // extract the out args from the byrefs
+ if (pByRefs)
+ {
+ do
+ {
+ // Always extract the data ptr every time we enter this loop because
+ // calls to GetObjectFromStack below can cause a GC.
+ // Even this is not enough, because that we are passing a pointer to GC heap
+ // to GetObjectFromStack . If GC happens, nobody is protecting the passed in pointer.
+
+ OBJECTREF pTmp = NULL;
+ void* dataLocation = pByRefs->data;
+ CMessage::GetObjectFromStack(&pTmp, &dataLocation, pByRefs->typ, pByRefs->typeHandle, TRUE);
+ (*ppVarOutParams)->SetAt(pByRefs->argIndex, pTmp);
+ pByRefs = pByRefs->pNext;
+ }
+ while (pByRefs);
+
+ if (pProtectionFrame)
+ pProtectionFrame->Pop(pThread);
+ }
+
+ } // ProfilerServerCallbackHolder
+
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+ LOG((LF_REMOTING, LL_INFO10, "CallDescrWithObjectArray OUT\n"));
+}
+
+#endif // FEATURE_REMOTING
diff --git a/src/vm/stackbuildersink.h b/src/vm/stackbuildersink.h
new file mode 100644
index 0000000000..f16a8557ff
--- /dev/null
+++ b/src/vm/stackbuildersink.h
@@ -0,0 +1,50 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: StackBuilderSink.h
+//
+
+//
+// Purpose: Native implementation for System.Runtime.Remoting.Messaging.StackBuilderSink
+//
+
+
+#ifndef __STACKBUILDERSINK_H__
+#define __STACKBUILDERSINK_H__
+
+#ifndef FEATURE_REMOTING
+#error FEATURE_REMOTING is not set, please do not include stackbuildersink.h
+#endif
+
+void CallDescrWithObjectArray(OBJECTREF& pServer, MethodDesc *pMD, //ReflectMethod *pMD,
+ PCODE pTarget, MetaSig* sig, VASigCookie *pCookie,
+ PTRARRAYREF& pArguments,
+ OBJECTREF* pVarRet, PTRARRAYREF* ppVarOutParams);
+
+//+----------------------------------------------------------
+//
+// Class: CStackBuilderSink
+//
+// Synopsis: EE counterpart to
+// Microsoft.Runtime.StackBuilderSink
+// Code helper to build a stack of parameter
+// arguments and make a function call on an
+// object.
+//
+//
+//------------------------------------------------------------
+class CStackBuilderSink
+{
+public:
+
+ static FCDECL5(Object*, PrivateProcessMessage,
+ Object* pSBSinkUNSAFE,
+ MethodDesc* pMD,
+ PTRArray* pArgsUNSAFE,
+ Object* pServerUNSAFE,
+ PTRARRAYREF* ppVarOutParams);
+};
+
+#endif // __STACKBUILDERSINK_H__
diff --git a/src/vm/stackcompressor.cpp b/src/vm/stackcompressor.cpp
new file mode 100644
index 0000000000..97181bb43c
--- /dev/null
+++ b/src/vm/stackcompressor.cpp
@@ -0,0 +1,379 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+
+//
+
+
+#include "common.h"
+
+#ifdef FEATURE_COMPRESSEDSTACK
+
+#include "stackcompressor.h"
+#include "securitystackwalk.h"
+#include "appdomainstack.inl"
+#include "comdelegate.h"
+
+//-----------------------------------------------------------
+// Stack walk callback data structure for stack compress.
+//-----------------------------------------------------------
+typedef struct _StackCompressData
+{
+ void* compressedStack;
+ StackCrawlMark * stackMark;
+ DWORD dwFlags;
+ Assembly * prevAssembly; // Previously checked assembly.
+ AppDomain * prevAppDomain;
+ Frame* pCtxTxFrame;
+} StackCompressData;
+
+
+void TurnSecurityStackWalkProgressOn( Thread* pThread )
+{
+ WRAPPER_NO_CONTRACT;
+ pThread->SetSecurityStackwalkInProgress( TRUE );
+}
+void TurnSecurityStackWalkProgressOff( Thread* pThread )
+{
+ WRAPPER_NO_CONTRACT;
+ pThread->SetSecurityStackwalkInProgress( FALSE );
+}
+typedef Holder< Thread*, TurnSecurityStackWalkProgressOn, TurnSecurityStackWalkProgressOff > StackWalkProgressEnableHolder;
+
+
+
+DWORD StackCompressor::GetCSInnerAppDomainOverridesCount(COMPRESSEDSTACKREF csRef)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ //csRef can be NULL - that implies that we set the CS, then crossed an AD. So we would already have counted the overrides when we hit the
+ // ctxTxFrame. Nothing to do here
+ if (csRef != NULL)
+ {
+ NewCompressedStack* cs = (NewCompressedStack*)csRef->GetUnmanagedCompressedStack();
+ if (cs != NULL)
+ return cs->GetInnerAppDomainOverridesCount();
+ }
+ return 0;
+}
+DWORD StackCompressor::GetCSInnerAppDomainAssertCount(COMPRESSEDSTACKREF csRef)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ //csRef can be NULL - that implies that we set the CS, then crossed an AD. So we would already have counted the overrides when we hit the
+ // ctxTxFrame. Nothing to do here
+ if (csRef != NULL)
+ {
+ NewCompressedStack* cs = (NewCompressedStack*)csRef->GetUnmanagedCompressedStack();
+ if (cs != NULL)
+ return cs->GetInnerAppDomainAssertCount();
+ }
+ return 0;
+}
+
+void* StackCompressor::SetAppDomainStack(Thread* pThread, void* curr)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ THROWS;
+ } CONTRACTL_END;
+
+ NewCompressedStack* unmanagedCompressedStack = (NewCompressedStack *)curr;
+
+ AppDomainStack* pRetADStack = NULL;
+
+ if (unmanagedCompressedStack != NULL)
+ {
+ pRetADStack = new AppDomainStack(pThread->GetAppDomainStack());
+ pThread->SetAppDomainStack(unmanagedCompressedStack->GetAppDomainStack() );
+ }
+ else
+ {
+ if (!pThread->IsDefaultSecurityInfo()) /* Do nothing for the single domain/FT/no overrides case */
+ {
+ pRetADStack = new AppDomainStack(pThread->GetAppDomainStack());
+ pThread->ResetSecurityInfo();
+ }
+ }
+ return (void*)pRetADStack;
+}
+
+void StackCompressor::RestoreAppDomainStack(Thread* pThread, void* appDomainStack)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(appDomainStack != NULL);
+ AppDomainStack* pADStack = (AppDomainStack*)appDomainStack;
+ pThread->SetAppDomainStack(*pADStack);
+ delete pADStack;
+}
+
+void StackCompressor::Destroy(void *stack)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(stack != NULL && "Don't pass NULL");
+ NewCompressedStack* ncs = (NewCompressedStack*)stack;
+ ncs->Destroy();
+}
+
+
+/* Forward declarations of the new CS stackwalking implementation */
+static void NCS_GetCompressedStackWorker(Thread *t, void *pData);
+static StackWalkAction NCS_CompressStackCB(CrawlFrame* pCf, void *pData);
+
+OBJECTREF StackCompressor::GetCompressedStack( StackCrawlMark* stackMark, BOOL fWalkStack )
+{
+
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ SO_INTOLERANT;// Not an entry point
+ } CONTRACTL_END;
+
+ // Get the current thread that we're to walk.
+ Thread * t = GetThread();
+
+ NewCompressedStackHolder csHolder(new NewCompressedStack());
+
+
+ if (fWalkStack)
+ {
+ //
+ // Initialize the callback data on the stack...
+ //
+
+ StackCompressData walkData;
+
+ walkData.dwFlags = 0;
+ walkData.prevAssembly = NULL;
+ walkData.prevAppDomain = NULL;
+ walkData.stackMark = stackMark;
+ walkData.pCtxTxFrame = NULL;
+
+
+ walkData.compressedStack = (void*)csHolder.GetValue();
+ NCS_GetCompressedStackWorker(t, &walkData);
+ }
+
+ struct _gc {
+ SAFEHANDLE pSafeCSHandle;
+ } gc;
+ gc.pSafeCSHandle = NULL;
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.pSafeCSHandle = (SAFEHANDLE) AllocateObject(MscorlibBinder::GetClass(CLASS__SAFE_CSHANDLE));
+ CallDefaultConstructor(gc.pSafeCSHandle);
+ gc.pSafeCSHandle->SetHandle((void*) csHolder.GetValue());
+ csHolder.SuppressRelease();
+
+ GCPROTECT_END();
+ return (OBJECTREF) gc.pSafeCSHandle;
+}
+
+
+void NCS_GetCompressedStackWorker(Thread *t, void *pData)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ StackCompressData *pWalkData = (StackCompressData*)pData;
+ NewCompressedStack* compressedStack = (NewCompressedStack*) pWalkData->compressedStack;
+
+ _ASSERTE( t != NULL );
+
+ {
+ StackWalkProgressEnableHolder holder( t );
+
+ //
+ // Begin the stack walk...
+ //
+ // LIGHTUNWIND flag: allow using stackwalk cache for security stackwalks
+ // SKIPFUNCLETS flag: stop processing the stack after completing the current funclet (for instance
+ // only process the catch block on x64, not the throw block)
+ t->StackWalkFrames(NCS_CompressStackCB, pWalkData, SKIPFUNCLETS | LIGHTUNWIND);
+
+
+ // Ignore CS (if present) when we hit a FT assert
+ if (pWalkData->dwFlags & CORSEC_FT_ASSERT)
+ return;
+
+ // Check if there is a CS at the top of the thread
+ COMPRESSEDSTACKREF csRef = (COMPRESSEDSTACKREF)t->GetCompressedStack();
+ AppDomain *pAppDomain = t->GetDomain();
+ Frame *pFrame = NULL;
+#ifdef FEATURE_REMOTING
+ if (csRef == NULL)
+ {
+ // There may have been an AD transition and we shd look at the CB data to see if this is the case
+ if (pWalkData->pCtxTxFrame != NULL)
+ {
+ pFrame = pWalkData->pCtxTxFrame;
+ csRef = Security::GetCSFromContextTransitionFrame(pFrame);
+ _ASSERTE(csRef != NULL); //otherwise we would not have saved the frame in the CB data
+ pAppDomain = pWalkData->pCtxTxFrame->GetReturnDomain();
+ }
+ }
+#endif // FEATURE_REMOTING
+
+ if (csRef != NULL)
+ {
+
+
+ compressedStack->ProcessCS(pAppDomain, csRef, pFrame);
+ }
+ else
+ {
+ compressedStack->ProcessAppDomainTransition(); // just to update domain overrides/assert count at the end of stackwalk
+ }
+
+
+ }
+
+ return;
+}
+
+StackWalkAction NCS_CompressStackCB(CrawlFrame* pCf, void *pData)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ StackCompressData *pCBdata = (StackCompressData*)pData;
+ NewCompressedStack* compressedStack = (NewCompressedStack*) pCBdata->compressedStack;
+
+ // First check if the walk has skipped the required frames. The check
+ // here is between the address of a local variable (the stack mark) and a
+ // pointer to the EIP for a frame (which is actually the pointer to the
+ // return address to the function from the previous frame). So we'll
+ // actually notice which frame the stack mark was in one frame later. This
+ // is fine for our purposes since we're always looking for the frame of the
+ // caller of the method that actually created the stack mark.
+ _ASSERTE((pCBdata->stackMark == NULL) || (*pCBdata->stackMark == LookForMyCaller));
+ if ((pCBdata->stackMark != NULL) &&
+ !pCf->IsInCalleesFrames(pCBdata->stackMark))
+ return SWA_CONTINUE;
+
+ Frame *pFrame = pCf->GetFrame();
+
+#ifdef FEATURE_REMOTING
+ // Save the CtxTxFrame if this is one
+ if (pCBdata->pCtxTxFrame == NULL)
+ {
+ if (Security::IsContextTransitionFrameWithCS(pFrame))
+ {
+ pCBdata->pCtxTxFrame = pFrame;
+ }
+ }
+#endif // FEATURE_REMOTING
+
+ // Handle AppDomain transitions:
+ AppDomain *pAppDomain = pCf->GetAppDomain();
+ if (pCBdata->prevAppDomain != pAppDomain)
+ {
+#ifndef FEATURE_REMOTING
+ BOOL bRealAppDomainTransition = (pCBdata->prevAppDomain != NULL);
+
+ // For a "real" appdomain transition, we can stop the stackwalk since there's no managed AD transitions
+ // without remoting. The "real" here denotes that this is not the first appdomain transition (from NULL to current)
+ // that happens on the first crawlframe we see on a stackwalk. Also don't do the final ad transition here (that'll happen
+ // outside the callback)
+ if (bRealAppDomainTransition)
+ {
+ return SWA_ABORT;
+ }
+ else
+#endif // !FEATURE_REMOTING
+ {
+ compressedStack->ProcessAppDomainTransition();
+ pCBdata->prevAppDomain = pAppDomain;
+ }
+
+ }
+
+
+ if (pCf->GetFunction() == NULL)
+ return SWA_CONTINUE; // not a function frame, so we were just looking for CtxTransitionFrames. Resume the stackwalk...
+
+ // Get the security object for this function...
+ OBJECTREF* pRefSecDesc = pCf->GetAddrOfSecurityObject();
+
+ MethodDesc * pFunc = pCf->GetFunction();
+
+ _ASSERTE(pFunc != NULL); // we requested methods!
+
+ Assembly * pAssem = pCf->GetAssembly();
+ _ASSERTE(pAssem != NULL);
+ PREFIX_ASSUME(pAssem != NULL);
+
+
+
+
+ if (pRefSecDesc != NULL)
+ SecurityDeclarative::DoDeclarativeSecurityAtStackWalk(pFunc, pAppDomain, pRefSecDesc);
+
+
+
+ if (pFunc->GetMethodTable()->IsDelegate())
+ {
+ DelegateEEClass* delegateCls = (DelegateEEClass*) pFunc->GetMethodTable()->GetClass();
+ if (pFunc == delegateCls->m_pBeginInvokeMethod)
+ {
+ // Async delegate case: we may need to insert the creator frame into the CS
+ DELEGATEREF dRef = (DELEGATEREF) ((FramedMethodFrame *)pFrame)->GetThis();
+ _ASSERTE(dRef);
+ if (COMDelegate::IsSecureDelegate(dRef))
+ {
+ if (!dRef->IsWrapperDelegate())
+ {
+ MethodDesc* pCreatorMethod = (MethodDesc*) dRef->GetMethodPtrAux();
+ Assembly* pCreatorAssembly = pCreatorMethod->GetAssembly();
+ compressedStack->ProcessFrame(pAppDomain,
+ NULL,
+ NULL,
+ pCreatorAssembly->GetSharedSecurityDescriptor(),
+ NULL) ; // ignore return value - No FSD being passed in.
+ }
+ }
+
+ }
+ }
+
+
+ DWORD retFlags = compressedStack->ProcessFrame(pAppDomain,
+ pAssem,
+ pFunc,
+ pAssem->GetSharedSecurityDescriptor(),
+ (FRAMESECDESCREF *) pRefSecDesc) ;
+
+ pCBdata->dwFlags |= (retFlags & CORSEC_FT_ASSERT);
+ // ProcessFrame returns TRUE if we should stop stackwalking
+ if (retFlags != 0 || Security::IsSpecialRunFrame(pFunc))
+ return SWA_ABORT;
+
+ return SWA_CONTINUE;
+
+}
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
diff --git a/src/vm/stackcompressor.h b/src/vm/stackcompressor.h
new file mode 100644
index 0000000000..01a1517cb9
--- /dev/null
+++ b/src/vm/stackcompressor.h
@@ -0,0 +1,40 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+
+//
+
+
+#ifndef __stackcompressor_h__
+#define __stackcompressor_h__
+#ifdef FEATURE_COMPRESSEDSTACK
+
+#include "common.h"
+
+
+#include "newcompressedstack.h"
+
+#ifndef DACCESS_COMPILE
+
+class StackCompressor
+{
+
+public:
+ static DWORD StackCompressor::GetCSInnerAppDomainAssertCount(COMPRESSEDSTACKREF csRef);
+ static DWORD StackCompressor::GetCSInnerAppDomainOverridesCount(COMPRESSEDSTACKREF csRef);
+ static void* SetAppDomainStack(Thread* pThread, void* curr);
+ static void RestoreAppDomainStack(Thread* pThread, void* appDomainStack);
+
+ static void Destroy(void *stack);
+ static OBJECTREF GetCompressedStack( StackCrawlMark* stackMark = NULL, BOOL fWalkStack = TRUE );
+
+
+};
+#endif // DACCESS_COMPILE
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
+
+#endif // __stackcompressor_h__
+
diff --git a/src/vm/stackingallocator.cpp b/src/vm/stackingallocator.cpp
new file mode 100644
index 0000000000..b82c49d35b
--- /dev/null
+++ b/src/vm/stackingallocator.cpp
@@ -0,0 +1,376 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// StackingAllocator.cpp -
+//
+
+//
+// Non-thread safe allocator designed for allocations with the following
+// pattern:
+// allocate, allocate, allocate ... deallocate all
+// There may also be recursive uses of this allocator (by the same thread), so
+// the usage becomes:
+// mark checkpoint, allocate, allocate, ..., deallocate back to checkpoint
+//
+// Allocations come from a singly linked list of blocks with dynamically
+// determined size (the goal is to have fewer block allocations than allocation
+// requests).
+//
+// Allocations are very fast (in the case where a new block isn't allocated)
+// since blocks are carved up into packets by simply moving a cursor through
+// the block.
+//
+// Allocations are guaranteed to be quadword aligned.
+
+
+
+#include "common.h"
+#include "excep.h"
+
+
+#if 0
+#define INC_COUNTER(_name, _amount) do { \
+ unsigned _count = REGUTIL::GetLong(W("AllocCounter_") _name, 0, NULL, HKEY_CURRENT_USER); \
+ REGUTIL::SetLong(W("AllocCounter_") _name, _count+(_amount), NULL, HKEY_CURRENT_USER); \
+ } while (0)
+#define MAX_COUNTER(_name, _amount) do { \
+ unsigned _count = REGUTIL::GetLong(W("AllocCounter_") _name, 0, NULL, HKEY_CURRENT_USER); \
+ REGUTIL::SetLong(W("AllocCounter_") _name, max(_count, (_amount)), NULL, HKEY_CURRENT_USER); \
+ } while (0)
+#else
+#define INC_COUNTER(_name, _amount)
+#define MAX_COUNTER(_name, _amount)
+#endif
+
+
+StackingAllocator::StackingAllocator()
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE((sizeof(StackBlock) & 7) == 0);
+ _ASSERTE((sizeof(Checkpoint) & 7) == 0);
+
+ m_FirstBlock = NULL;
+ m_FirstFree = NULL;
+ m_InitialBlock = NULL;
+ m_DeferredFreeBlock = NULL;
+
+#ifdef _DEBUG
+ m_CheckpointDepth = 0;
+ m_Allocs = 0;
+ m_Checkpoints = 0;
+ m_Collapses = 0;
+ m_BlockAllocs = 0;
+ m_MaxAlloc = 0;
+#endif
+
+ Init(true);
+}
+
+
+StackingAllocator::~StackingAllocator()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Clear(NULL);
+
+ if (m_DeferredFreeBlock)
+ {
+ delete [] (char*)m_DeferredFreeBlock;
+ m_DeferredFreeBlock = NULL;
+ }
+
+#ifdef _DEBUG
+ INC_COUNTER(W("Allocs"), m_Allocs);
+ INC_COUNTER(W("Checkpoints"), m_Checkpoints);
+ INC_COUNTER(W("Collapses"), m_Collapses);
+ INC_COUNTER(W("BlockAllocs"), m_BlockAllocs);
+ MAX_COUNTER(W("MaxAlloc"), m_MaxAlloc);
+#endif
+}
+
+// Lightweight initial checkpoint
+Checkpoint StackingAllocator::s_initialCheckpoint;
+
+void *StackingAllocator::GetCheckpoint()
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+#ifdef _DEBUG
+ m_CheckpointDepth++;
+ m_Checkpoints++;
+#endif
+
+ // As an optimization, initial checkpoints are lightweight (they just return
+ // a special marker, s_initialCheckpoint). This is because we know how to restore the
+ // allocator state on a Collapse without having to store any additional
+ // context info.
+ if ((m_InitialBlock == NULL) || (m_FirstFree == m_InitialBlock->m_Data))
+ return &s_initialCheckpoint;
+
+ // Remember the current allocator state.
+ StackBlock *pOldBlock = m_FirstBlock;
+ unsigned iOldBytesLeft = m_BytesLeft;
+
+ // Allocate a checkpoint block (just like a normal user request).
+ Checkpoint *c = new (this) Checkpoint();
+
+ // Record previous allocator state in it.
+ c->m_OldBlock = pOldBlock;
+ c->m_OldBytesLeft = iOldBytesLeft;
+
+ // Return the checkpoint marker.
+ return c;
+}
+
+
+bool StackingAllocator::AllocNewBlockForBytes(unsigned n)
+{
+ CONTRACT (bool)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(m_CheckpointDepth > 0);
+ }
+ CONTRACT_END;
+
+ // already aligned and in the hard case
+
+ _ASSERTE(n % 8 == 0);
+ _ASSERTE(n > m_BytesLeft);
+
+ StackBlock* b = NULL;
+
+ // we need a block, but before we allocate a new block
+ // we're going to check to see if there is block that we saved
+ // rather than return to the OS, if there is such a block
+ // and it's big enough we can reuse it now, rather than go to the
+ // OS again -- this helps us if we happen to be checkpointing
+ // across a block seam very often as in VSWhidbey #100462
+
+ if (m_DeferredFreeBlock != NULL && m_DeferredFreeBlock->m_Length >= n)
+ {
+ b = m_DeferredFreeBlock;
+ m_DeferredFreeBlock = NULL;
+
+ // b->m_Length doesn't need init because its value is still valid
+ // from the original allocation
+ }
+ else
+ {
+ // Allocate a block four times as large as the request but with a lower
+ // limit of MinBlockSize and an upper limit of MaxBlockSize. If the
+ // request is larger than MaxBlockSize then allocate exactly that
+ // amount.
+ // Additionally, if we don't have an initial block yet, use an increased
+ // lower bound for the size, since we intend to cache this block.
+ unsigned lower = m_InitialBlock ? MinBlockSize : InitBlockSize;
+ size_t allocSize = sizeof(StackBlock) + max(n, min(max(n * 4, lower), MaxBlockSize));
+
+ // Allocate the block.
+ // <TODO>@todo: Is it worth implementing a non-thread safe standard heap for
+ // this allocator, to get even more MP scalability?</TODO>
+ b = (StackBlock *)new (nothrow) char[allocSize];
+ if (b == NULL)
+ RETURN false;
+
+ // reserve space for the Block structure and then link it in
+ b->m_Length = (unsigned) (allocSize - sizeof(StackBlock));
+
+#ifdef _DEBUG
+ m_BlockAllocs++;
+#endif
+ }
+
+ // If this is the first block allocated, we record that fact since we
+ // intend to cache it.
+ if (m_InitialBlock == NULL)
+ {
+ _ASSERTE((m_FirstBlock == NULL) && (m_FirstFree == NULL) && (m_BytesLeft == 0));
+ m_InitialBlock = b;
+ }
+
+ // Link new block to head of block chain and update internal state to
+ // start allocating from this new block.
+ b->m_Next = m_FirstBlock;
+ m_FirstBlock = b;
+ m_FirstFree = b->m_Data;
+ // the cast below is safe because b->m_Length is less than MaxBlockSize (4096)
+ m_BytesLeft = static_cast<unsigned>(b->m_Length);
+
+ INDEBUG(b->m_Sentinal = 0);
+
+ RETURN true;
+}
+
+
+void* StackingAllocator::UnsafeAllocSafeThrow(UINT32 Size)
+{
+ CONTRACT (void*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ INJECT_FAULT(ThrowOutOfMemory());
+ PRECONDITION(m_CheckpointDepth > 0);
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ // OOM fault injection in AllocNoThrow
+
+ void* retval = UnsafeAllocNoThrow(Size);
+ if (retval == NULL)
+ ENCLOSE_IN_EXCEPTION_HANDLER ( ThrowOutOfMemory );
+
+ RETURN retval;
+}
+
+void *StackingAllocator::UnsafeAlloc(UINT32 Size)
+{
+ CONTRACT (void*)
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ INJECT_FAULT(ThrowOutOfMemory());
+ PRECONDITION(m_CheckpointDepth > 0);
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ // OOM fault injection in AllocNoThrow
+
+ void* retval = UnsafeAllocNoThrow(Size);
+ if (retval == NULL)
+ ThrowOutOfMemory();
+
+ RETURN retval;
+}
+
+
+void StackingAllocator::Collapse(void *CheckpointMarker)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(m_CheckpointDepth > 0);
+
+#ifdef _DEBUG
+ m_CheckpointDepth--;
+ m_Collapses++;
+#endif
+
+ Checkpoint *c = (Checkpoint *)CheckpointMarker;
+
+ // Special case collapsing back to the initial checkpoint.
+ if (c == &s_initialCheckpoint || c->m_OldBlock == NULL) {
+ Clear(m_InitialBlock);
+ Init(false);
+
+ // confirm no buffer overruns
+ INDEBUG(Validate(m_FirstBlock, m_FirstFree));
+
+ return;
+ }
+
+ // Cache contents of checkpoint, we can potentially deallocate it in the
+ // next step (if a new block had to be allocated to accomodate the
+ // checkpoint).
+ StackBlock *pOldBlock = c->m_OldBlock;
+ unsigned iOldBytesLeft = c->m_OldBytesLeft;
+
+ // Start deallocating blocks until the block that was at the head on the
+ // chain when the checkpoint is taken is there again.
+ Clear(pOldBlock);
+
+ // Restore former allocator state.
+ m_FirstBlock = pOldBlock;
+ m_FirstFree = &pOldBlock->m_Data[pOldBlock->m_Length - iOldBytesLeft];
+ m_BytesLeft = iOldBytesLeft;
+
+ // confirm no buffer overruns
+ INDEBUG(Validate(m_FirstBlock, m_FirstFree));
+}
+
+
+void * __cdecl operator new(size_t n, StackingAllocator * alloc)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_FAULT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+#ifdef _WIN64
+ // size_t's too big on 64-bit platforms so we check for overflow
+ if(n > (size_t)(1<<31)) ThrowOutOfMemory();
+#endif
+ void *retval = alloc->UnsafeAllocNoThrow((unsigned)n);
+ if(retval == NULL) ThrowOutOfMemory();
+
+ return retval;
+}
+
+void * __cdecl operator new[](size_t n, StackingAllocator * alloc)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_FAULT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+#ifdef _WIN64
+ // size_t's too big on 64-bit platforms so we check for overflow
+ if(n > (size_t)(1<<31)) ThrowOutOfMemory();
+#else
+ if(n == (size_t)-1) ThrowOutOfMemory(); // overflow occured
+#endif
+
+ void *retval = alloc->UnsafeAllocNoThrow((unsigned)n);
+ if (retval == NULL)
+ ThrowOutOfMemory();
+
+ return retval;
+}
+
+void * __cdecl operator new(size_t n, StackingAllocator * alloc, const NoThrow&)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_FAULT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+#ifdef _WIN64
+ // size_t's too big on 64-bit platforms so we check for overflow
+ if(n > (size_t)(1<<31)) return NULL;
+#endif
+
+ return alloc->UnsafeAllocNoThrow((unsigned)n);
+}
+
+void * __cdecl operator new[](size_t n, StackingAllocator * alloc, const NoThrow&)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_FAULT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+#ifdef _WIN64
+ // size_t's too big on 64-bit platforms so we check for overflow
+ if(n > (size_t)(1<<31)) return NULL;
+#else
+ if(n == (size_t)-1) return NULL; // overflow occured
+#endif
+
+ return alloc->UnsafeAllocNoThrow((unsigned)n);
+}
+
diff --git a/src/vm/stackingallocator.h b/src/vm/stackingallocator.h
new file mode 100644
index 0000000000..10745b71a0
--- /dev/null
+++ b/src/vm/stackingallocator.h
@@ -0,0 +1,316 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// StackingAllocator.h -
+//
+
+//
+
+
+#ifndef __stacking_allocator_h__
+#define __stacking_allocator_h__
+
+#include "util.hpp"
+#include "eecontract.h"
+
+
+// We use zero sized arrays, disable the non-standard extension warning.
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable:4200)
+#endif
+
+#ifdef _DEBUG
+ struct Sentinal
+ {
+ enum { marker1Val = 0xBAD00BAD };
+ Sentinal(Sentinal* next) : m_Marker1(marker1Val), m_Next(next) { LIMITED_METHOD_CONTRACT; }
+
+ unsigned m_Marker1; // just some data bytes
+ Sentinal* m_Next; // linked list of these
+ };
+#endif
+
+ // Blocks from which allocations are carved. Size is determined dynamically,
+ // with upper and lower bounds of MinBlockSize and MaxBlockSize respectively
+ // (though large allocation requests will cause a block of exactly the right
+ // size to be allocated).
+ struct StackBlock
+ {
+ StackBlock *m_Next; // Next oldest block in list
+ DWORD_PTR m_Length; // Length of block excluding header (needs to be pointer-sized for alignment on IA64)
+ INDEBUG(Sentinal* m_Sentinal;) // insure that we don't fall of the end of the buffer
+ INDEBUG(void** m_Pad;) // keep the size a multiple of 8
+ char m_Data[]; // Start of user allocation space
+ };
+
+ // Whenever a checkpoint is requested, a checkpoint structure is allocated
+ // (as a normal allocation) and is filled with information about the state
+ // of the allocator prior to the checkpoint. When a Collapse request comes
+ // in we can therefore restore the state of the allocator.
+ // It is the address of the checkpoint structure that we hand out to the
+ // caller of GetCheckpoint as an opaque checkpoint marker.
+ struct Checkpoint
+ {
+ StackBlock *m_OldBlock; // Head of block list before checkpoint
+ unsigned m_OldBytesLeft; // Number of free bytes before checkpoint
+ };
+
+
+
+// Non-thread safe allocator designed for allocations with the following
+// pattern:
+// allocate, allocate, allocate ... deallocate all
+// There may also be recursive uses of this allocator (by the same thread), so
+// the usage becomes:
+// mark checkpoint, allocate, allocate, ..., deallocate back to checkpoint
+//
+// Allocations come from a singly linked list of blocks with dynamically
+// determined size (the goal is to have fewer block allocations than allocation
+// requests).
+//
+// Allocations are very fast (in the case where a new block isn't allocated)
+// since blocks are carved up into packets by simply moving a cursor through
+// the block.
+//
+// Allocations are guaranteed to be quadword aligned.
+class StackingAllocator
+{
+public:
+
+ enum
+ {
+ MinBlockSize = 128,
+ MaxBlockSize = 4096,
+ InitBlockSize = 512
+ };
+
+#ifndef DACCESS_COMPILE
+ StackingAllocator();
+ ~StackingAllocator();
+#else
+ StackingAllocator() { LIMITED_METHOD_CONTRACT; }
+#endif
+
+ void StoreCheckpoint(Checkpoint *c)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef _DEBUG
+ m_CheckpointDepth++;
+ m_Checkpoints++;
+#endif
+
+ // Record previous allocator state in it.
+ c->m_OldBlock = m_FirstBlock;
+ c->m_OldBytesLeft = m_BytesLeft;
+ }
+
+ void* GetCheckpoint();
+
+ // @todo move this into a .inl file as many class users of this class don't need to include this body
+ FORCEINLINE void * UnsafeAllocNoThrow(unsigned Size)
+ {
+ CONTRACT (void*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ INJECT_FAULT(CONTRACT_RETURN NULL;);
+ PRECONDITION(m_CheckpointDepth > 0);
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+#ifdef _DEBUG
+ m_Allocs++;
+ m_MaxAlloc = max(Size, m_MaxAlloc);
+#endif
+
+ //special case, 0 size alloc, return non-null but invalid pointer
+ if (Size == 0)
+ {
+ RETURN (void*)-1;
+ }
+
+ // Round size up to ensure alignment.
+ unsigned n = (Size + 7) & ~7;
+ if(n < Size)
+ {
+ return NULL;
+ }
+
+ // leave room for sentinal
+ INDEBUG(n += sizeof(Sentinal));
+
+ // Is the request too large for the current block?
+ if (n > m_BytesLeft)
+ {
+ bool allocatedNewBlock = false;
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), RETURN NULL);
+ allocatedNewBlock = AllocNewBlockForBytes(n);
+ END_SO_INTOLERANT_CODE;
+
+ if (!allocatedNewBlock)
+ {
+ RETURN NULL;
+ }
+ }
+
+ // Once we get here we know we have enough bytes left in the block at the
+ // head of the chain.
+ _ASSERTE(n <= m_BytesLeft);
+
+ void *ret = m_FirstFree;
+ m_FirstFree += n;
+ m_BytesLeft -= n;
+
+#ifdef _DEBUG
+ // Add sentinal to the end
+ m_FirstBlock->m_Sentinal = new(m_FirstFree - sizeof(Sentinal)) Sentinal(m_FirstBlock->m_Sentinal);
+#endif
+
+ RETURN ret;
+ }
+
+ FORCEINLINE void * AllocNoThrow(S_UINT32 size)
+ {
+ // Safely round size up to ensure alignment.
+ if(size.IsOverflow()) return NULL;
+
+ return UnsafeAllocNoThrow(size.Value());
+ }
+
+ FORCEINLINE void * AllocSafeThrow(S_UINT32 size){
+ WRAPPER_NO_CONTRACT;
+
+ if(size.IsOverflow()) ThrowOutOfMemory();
+
+ return UnsafeAllocSafeThrow(size.Value());
+ }
+
+ FORCEINLINE void * Alloc(S_UINT32 size){
+ WRAPPER_NO_CONTRACT;
+
+ if(size.IsOverflow()) ThrowOutOfMemory();
+
+ return UnsafeAlloc(size.Value());
+ }
+
+ void Collapse(void* CheckpointMarker);
+
+ void* UnsafeAllocSafeThrow(UINT32 size);
+ void* UnsafeAlloc(UINT32 size);
+private:
+
+ bool AllocNewBlockForBytes(unsigned n);
+
+ StackBlock *m_FirstBlock; // Pointer to head of allocation block list
+ char *m_FirstFree; // Pointer to first free byte in head block
+ unsigned m_BytesLeft; // Number of free bytes left in head block
+ StackBlock *m_InitialBlock; // The first block is special, we never free it
+ StackBlock *m_DeferredFreeBlock; // Avoid going to the OS too often by deferring one free
+
+#ifdef _DEBUG
+ unsigned m_CheckpointDepth;
+ unsigned m_Allocs;
+ unsigned m_Checkpoints;
+ unsigned m_Collapses;
+ unsigned m_BlockAllocs;
+ unsigned m_MaxAlloc;
+#endif
+
+ void Init(bool bResetInitBlock)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (bResetInitBlock || (m_InitialBlock == NULL))
+ {
+ Clear(NULL);
+ m_FirstBlock = NULL;
+ m_FirstFree = NULL;
+ m_BytesLeft = 0;
+ m_InitialBlock = NULL;
+ }
+ else
+ {
+ m_FirstBlock = m_InitialBlock;
+ m_FirstFree = m_InitialBlock->m_Data;
+ _ASSERTE(FitsIn<unsigned>(m_InitialBlock->m_Length));
+ m_BytesLeft = static_cast<unsigned>(m_InitialBlock->m_Length);
+ }
+ }
+
+#ifdef _DEBUG
+ void Validate(StackBlock *block, void* spot)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (!block)
+ return;
+ Sentinal* ptr = block->m_Sentinal;
+ _ASSERTE(spot);
+ while(ptr >= spot)
+ {
+ // If this assert goes off then someone overwrote their buffer!
+ // A common candidate is PINVOKE buffer run. To confirm look
+ // up on the stack for NDirect.* Look for the MethodDesc
+ // associated with it. Be very suspicious if it is one that
+ // has a return string buffer!. This usually means the end
+ // programmer did not allocate a big enough buffer before passing
+ // it to the PINVOKE method.
+ if (ptr->m_Marker1 != Sentinal::marker1Val)
+ _ASSERTE(!"Memory overrun!! May be bad buffer passed to PINVOKE. turn on logging LF_STUBS level 6 to find method");
+ ptr = ptr->m_Next;
+ }
+ block->m_Sentinal = ptr;
+ }
+#endif
+
+ void Clear(StackBlock *ToBlock)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ StackBlock *p = m_FirstBlock;
+ StackBlock *q;
+
+ while (p != ToBlock)
+ {
+ PREFAST_ASSUME(p != NULL);
+
+ q = p;
+ p = p->m_Next;
+
+ INDEBUG(Validate(q, q));
+
+ // we don't give the tail block back to the OS
+ // because we can get into situations where we're growing
+ // back and forth over a single seam for a tiny alloc
+ // and the perf is a disaster -- VSWhidbey #100462
+ if (m_DeferredFreeBlock != NULL)
+ {
+ delete [] (char *)m_DeferredFreeBlock;
+ }
+
+ m_DeferredFreeBlock = q;
+ m_DeferredFreeBlock->m_Next = NULL;
+ }
+ }
+
+private :
+ static Checkpoint s_initialCheckpoint;
+};
+
+void * __cdecl operator new(size_t n, StackingAllocator *alloc);
+void * __cdecl operator new[](size_t n, StackingAllocator *alloc);
+void * __cdecl operator new(size_t n, StackingAllocator *alloc, const NoThrow&);
+void * __cdecl operator new[](size_t n, StackingAllocator *alloc, const NoThrow&);
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+#endif
diff --git a/src/vm/stackprobe.cpp b/src/vm/stackprobe.cpp
new file mode 100644
index 0000000000..a515534f5c
--- /dev/null
+++ b/src/vm/stackprobe.cpp
@@ -0,0 +1,1793 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+//-----------------------------------------------------------------------------
+// StackProbe.cpp
+//-----------------------------------------------------------------------------
+
+
+#include "common.h"
+#include "stackprobe.h"
+
+
+#ifdef FEATURE_STACK_PROBE
+
+
+// SOTolerantBoundaryFilter is called when an exception in SO-tolerant code arrives
+// at the boundary back into SO-intolerant code.
+//
+// If we are running in an environment where we must be hardened to SO, then we must
+// catch the exception if there is not enough space to run our backout code (the stuff in the
+// EX_CATCH clauses). We also cannot let a hard SO propogate into SO-intolerant code, because
+// we rip the process if that happens (we have no way to tell that the SO is ok.)
+int SOTolerantBoundaryFilter(EXCEPTION_POINTERS *pExceptionInfo, DWORD * pdwSOTolerantFlags)
+{
+ Thread *pThread = GetThread();
+ _ASSERTE(pThread);
+ _ASSERTE(pdwSOTolerantFlags != NULL);
+ _ASSERTE(!((*pdwSOTolerantFlags) & BSTC_TRIGGERING_UNWIND_FOR_SO));
+
+ SaveCurrentExceptionInfo(pExceptionInfo->ExceptionRecord, pExceptionInfo->ContextRecord);
+
+ NTSTATUS exceptionCode = pExceptionInfo->ExceptionRecord->ExceptionCode;
+
+ // We must always handle a hard SO
+ if (IsSOExceptionCode(exceptionCode))
+ {
+ if (exceptionCode == EXCEPTION_SOFTSO)
+ {
+ *pdwSOTolerantFlags |= BSTC_IS_SOFT_SO;
+ }
+ *pdwSOTolerantFlags |= BSTC_IS_SO;
+
+ if (!CLRHosted() || pThread == NULL || GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) != eRudeUnloadAppDomain)
+ {
+ // For security reason, it is not safe to continue execution if stack overflow happens
+ // unless a host tells us to do something different.
+ EEPolicy::HandleFatalStackOverflow(pExceptionInfo);
+ }
+
+ /* If there is a SO_INTOLERANT region above this */
+ /* we should have processed it already in SOIntolerantTransitionHandler */
+ EEPolicy::HandleStackOverflow(SOD_SOTolerantTransitor, FRAME_TOP);
+
+ *pdwSOTolerantFlags |= BSTC_TRIGGERING_UNWIND_FOR_SO;
+
+ return EXCEPTION_EXECUTE_HANDLER;
+ }
+
+ // Make sure we have enough stack to run our backout code. If not,
+ // catch the exception.
+ if (! pThread->IsStackSpaceAvailable(ADJUST_PROBE(BACKOUT_CODE_STACK_LIMIT)))
+ {
+ *pdwSOTolerantFlags |= BSTC_TRIGGERING_UNWIND_FOR_SO;
+ return EXCEPTION_EXECUTE_HANDLER;
+ }
+
+
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+
+void SOTolerantCode_RecoverStack(DWORD dwFlags)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ Thread * pThread = GetThread();
+ if (!(dwFlags & BSTC_IS_SOFT_SO))
+ {
+ pThread->RestoreGuardPage();
+ }
+ if (dwFlags & BSTC_IS_SO)
+ {
+ if (!pThread->PreemptiveGCDisabled())
+ {
+ pThread->DisablePreemptiveGC();
+ }
+ // PerformADUnloadAction is SO_INTOLERANT, but we might be
+ // calling BEGIN_SO_TOLERANT_CODE from an entry point method
+ BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
+ BEGIN_GCX_ASSERT_COOP;
+ // We have enough stack now. Start unload
+ EEPolicy::PerformADUnloadAction(eRudeUnloadAppDomain, TRUE, TRUE);
+ END_GCX_ASSERT_COOP;
+ END_CONTRACT_VIOLATION;
+ }
+ COMPlusThrowSO();
+}
+
+void SOTolerantCode_ExceptBody(DWORD * pdwFlags, Frame * pSafeForSOFrame)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // do nothing here. Get our stack back post-catch and then throw a new exception
+ *pdwFlags |= BSTC_RECOVER_STACK;
+ if (*pdwFlags & BSTC_IS_SO)
+ {
+ // If this assertion fires, then it means that we have not unwound the frame chain
+ Thread * pThread = GetThread();
+ _ASSERTE(pSafeForSOFrame == pThread->GetFrame());
+ pThread->ClearExceptionStateAfterSO(pSafeForSOFrame);
+ }
+}
+
+//
+// ReportStackOverflow is called when our probe infrastructure detects that there
+// is insufficient stack to perform the operation.
+//
+
+void ReportStackOverflow()
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(IsStackProbingEnabled());
+
+ Thread *pThread = GetThread();
+
+ if (pThread != NULL)
+ {
+ // We don't want an SO to happen while we are trying to throw this one. So check if there
+ // is enough space left to handle an exception (this translates to check that we have stack
+ // space left equivalent to the soft guard region). If not, then remove the guard page by
+ // forcing a hard SO. This effectively turns the SO into a boundary SO.
+
+ // We should only ever get in this situation on a probe from managed code. From within the EE,
+ // we will never let our probe point get this close. Either way, we'd rip the process if a hard
+ // SO occured.
+
+ UINT_PTR stackGuarantee = pThread->GetStackGuarantee();
+
+ // We expect the stackGuarantee to be a multiple of the page size for
+ // the call to IsStackSpaceAvailable.
+ _ASSERTE(stackGuarantee%OS_PAGE_SIZE == 0);
+ if (pThread->IsStackSpaceAvailable(static_cast<float>(stackGuarantee)/OS_PAGE_SIZE))
+ {
+ COMPlusThrowSO();
+ }
+
+ // If there isn't much stack left to attempt to report a soft stack overflow, let's trigger a hard
+ // SO, so we clear the guard page and give us at least another page of stack to work with.
+
+ if (!pThread->IsStackSpaceAvailable(ADJUST_PROBE(1)))
+ {
+ DontCallDirectlyForceStackOverflow();
+ }
+ }
+
+ RaiseException(EXCEPTION_SOFTSO, 0, 0, NULL);
+}
+
+void CheckForSOInSOIntolerantCode()
+{
+ Thread *pThread = GetThreadNULLOk();
+ if (pThread == NULL)
+ {
+ return;
+ }
+ // We use the location of frames to decide SO mode. But during exception,
+ // we may not unwind some frames, for example: TPMethodFrame, therefore
+ // it is not safe to apply this check.
+ //_ASSERTE(!pThread->IsSOTolerant(FRAME_TOP));
+ if (! pThread->IsSPBeyondLimit())
+ {
+ return;
+ }
+ EEPolicy::HandleStackOverflow(SOD_SOIntolerantTransitor, FRAME_TOP);
+ _ASSERTE (!"Can not reach here");
+}
+
+//---------------------------------------------------------------------------------------
+//
+// SetSOIntolerantTransitionMarker: Use the current frame as our marker for intolerant transition.
+//
+// Arguments:
+// None.
+//
+// Return Value:
+// None.
+//
+// Note:
+// SO mode is determined by what is on stack. If we see our intolerant transtion first, we are in SO.
+// Because compiler lays object in a function at random stack location, the address of our intolerant
+// transition object SOIntolerantTransitionHandler may be before the HelperMethodFrame. Therefore, we
+// can not use the address of the handlers. Instead we use the current top frame.
+//
+void SetSOIntolerantTransitionMarker()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ Thread *pThread = GetThreadNULLOk();
+ if (pThread == NULL)
+ {
+ return;
+ }
+ Frame *pFrame = pThread->GetFrame();
+
+ //
+ // Check to see if the Frame chain is corrupt
+ // This can happen when unmanaged code calls back to managed code
+ //
+ if (pFrame != FRAME_TOP)
+ {
+ // SafeGetGCCookiePtr examines the value of the vtable pointer
+ // and makes sure that it is a legal Frame subtype.
+ // It returns NULL when we have an illegal (i.e. corrupt) vtable value.
+ //
+ if (!Frame::HasValidVTablePtr(pFrame))
+ DoJITFailFast();
+ }
+
+ // We use pFrame - 1 as our marker so that IntolerantTransitionHandler is seen before
+ // a transition frame.
+ ClrFlsSetValue(TlsIdx_SOIntolerantTransitionHandler, (void*)(((size_t)pFrame)-1));
+
+ _ASSERTE(!pThread->IsSOTolerant(FRAME_TOP));
+}
+
+BOOL RetailStackProbeNoThrowNoThread(unsigned int n)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_MODE_ANY;
+
+ BEGIN_GETTHREAD_ALLOWED;
+ Thread *pThread = GetThread();
+
+ if (!pThread)
+ {
+ // we only probe on managed threads
+ return TRUE;
+ }
+ return RetailStackProbeNoThrow(n, pThread);
+ END_GETTHREAD_ALLOWED;
+}
+
+// This functions are used by the stack probe infrastucture that is outside the VM
+// tree. It needs to call into the VM code in order to probe properly.
+void InitStackProbesRetail()
+{
+ LIMITED_METHOD_CONTRACT;
+ g_fpCheckForSOInSOIntolerantCode = CheckForSOInSOIntolerantCode;
+ g_fpSetSOIntolerantTransitionMarker = SetSOIntolerantTransitionMarker;
+ g_fpDoProbe = RetailStackProbeNoThrowNoThread;
+ g_fpHandleSoftStackOverflow = EEPolicy::HandleSoftStackOverflow;
+
+ g_StackProbingEnabled = g_pConfig->ProbeForStackOverflow() != 0;
+}
+
+// Shared by both the nothrow and throwing version. FORCEINLINE into both to avoid the call overhead.
+FORCEINLINE BOOL RetailStackProbeHelper(unsigned int n, Thread *pThread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ UINT_PTR probeLimit;
+
+ // @TODO - Need to devise a probe that doesn't require the thread object
+ if (pThread == NULL)
+ {
+ UINT_PTR stackLimit = (UINT_PTR)Thread::GetStackLowerBound();
+ probeLimit = Thread::GetLastNormalStackAddress(stackLimit);
+ }
+ else
+ {
+ probeLimit = pThread->GetProbeLimit();
+ }
+ UINT_PTR probeAddress = (UINT_PTR)(&pThread) - (n * OS_PAGE_SIZE);
+
+ // If the address we want to probe to is beyond the precalculated limit we fail
+ // Note that we don't check for stack probing being disabled. This is encoded in
+ // the value returned from GetProbeLimit, which will be 0 if probing is disabled.
+ if (probeAddress < probeLimit)
+ {
+#if 0
+ // @todo : remove this when iexplore, W3WP.EXE and friends allocate 512K instead
+ // of 256K for their stack.
+ if (((char *)(pThread->GetCachedStackBase()) - (char *)(pThread->GetCachedStackLimit())) < 0x41000)
+ {
+ return true;
+ }
+#endif
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+BOOL RetailStackProbeNoThrowWorker(unsigned int n, Thread *pThread)
+{
+ WRAPPER_NO_CONTRACT;
+ return RetailStackProbeHelper(n, pThread);
+}
+
+void RetailStackProbeWorker(unsigned int n, Thread *pThread)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ if (RetailStackProbeHelper(n, pThread))
+ {
+ return;
+ }
+ ReportStackOverflow();
+}
+
+void DefaultRetailStackProbeWorker(Thread *pThread)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ if (RetailStackProbeHelper(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), pThread))
+ {
+ return;
+ }
+ ReportStackOverflow();
+}
+
+#endif // FEATURE_STACK_PROBE
+
+#ifdef STACK_GUARDS_DEBUG
+
+DWORD g_InteriorProbeAmount = DEFAULT_INTERIOR_PROBE_AMOUNT;
+
+DWORD g_CurrentStackGuardTlsIdx = (DWORD) -1;
+DWORD g_UniqueId = 0;
+
+// If this has a non-zero value, we'll mark only those pages whose probe line number matches. This allows us
+// to turn protection on only for a specific probe so that can find multiple w/o having to rebuild. Otherwise
+// can never get past that first AV in the debugger.
+unsigned int g_ProtectStackPagesInDebuggerForProbeAtLine = 0;
+
+// These two are used to the amount probed for at a particular line number
+unsigned int g_UpdateProbeAtLine = 0;
+SString* g_pUpdateProbeAtLineInFile = NULL;
+unsigned int g_UpdateProbeAtLineAmount = 0;
+
+// If this is TRUE, we'll break in the debugger if we try to probe during the handling of a
+// probe-induced stack overflow.
+BOOL g_BreakOnProbeDuringSO = FALSE;
+
+// If this is TRUE, probe cookie validation via assertion is enabled
+// disable assertions on debug build. The stack consumption is different enough
+// that we'll always be getting spurious failures.
+BOOL g_probeAssertOnOverrun = FALSE;
+
+// SO logging pollutes the EH logging space and vice-versa. The SOLogger class
+// allows us to turn SO logging on separately and only produce SO logging, or
+// to allow both.
+#undef LOG
+#define LOG(x) s_SOLogger.LogSpew x
+
+class SOLogger {
+
+ enum SOLogStyle {
+ SO_LOGGING_NONE, // No SO logging
+ SO_LOGGING_SEPARATE_LOG, // Log SO to separate file
+ SO_LOGGING_STANDARD_LOG // Log SO to standard log
+ };
+
+ SOLogStyle m_SOLogStyle;
+ FILE *m_SOLoggerFile;
+
+public:
+ SOLogger();
+ ~SOLogger();
+
+ void Initialize();
+
+ void LogSpew(DWORD facility, DWORD level, const char *fmt, ... );
+};
+
+static SOLogger s_SOLogger;
+
+SOLogger::SOLogger()
+ : m_SOLogStyle(SO_LOGGING_NONE), m_SOLoggerFile(NULL)
+{
+}
+
+void SOLogger::Initialize()
+{
+ WRAPPER_NO_CONTRACT;
+
+ DWORD SOLogger = REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_SOLogger, SO_LOGGING_NONE);
+ if (SOLogger == SO_LOGGING_SEPARATE_LOG)
+ {
+ m_SOLogStyle = SO_LOGGING_SEPARATE_LOG;
+ int ec = fopen_s(&m_SOLoggerFile, "SOLogSpewFile.log", "w");
+ _ASSERTE(SUCCEEDED(ec));
+ }
+ else if (SOLogger == SO_LOGGING_STANDARD_LOG)
+ {
+ m_SOLogStyle = SO_LOGGING_STANDARD_LOG;
+ }
+ else if (SOLogger == SO_LOGGING_NONE)
+ {
+ m_SOLogStyle = SO_LOGGING_NONE;
+ }
+ else
+ {
+ _ASSERTE(!"Invalid SOLogger value");
+ }
+}
+
+SOLogger::~SOLogger()
+{
+ LIMITED_METHOD_CONTRACT;
+ if (m_SOLoggerFile != NULL)
+ {
+ fclose(m_SOLoggerFile);
+ }
+}
+
+void SOLogger::LogSpew(DWORD facility, DWORD level, const char *fmt, ... )
+{
+ STATIC_CONTRACT_WRAPPER;
+
+ if (m_SOLogStyle == SO_LOGGING_NONE)
+ {
+ return;
+ }
+
+ va_list args;
+ va_start( args, fmt );
+ if (m_SOLogStyle == SO_LOGGING_SEPARATE_LOG)
+ {
+ vfprintf(m_SOLoggerFile, fmt, args);
+ }
+ else if (LoggingEnabled())
+ {
+ LogSpewValist (facility, level, fmt, args);
+ }
+}
+
+#define MORE_INFO_STRING \
+ "\nPlease open a bug against the feature owner.\n" \
+ "\nFor details about this feature, see, in a CLR enlistment, src\\ndp\\clr\\doc\\OtherDevDocs\\untriaged\\clrdev_web\\SO Guide for CLR Developers.doc\n"
+
+
+// The following are used to support the SO-injection framework
+HMODULE BaseStackGuard::m_hProbeCallBack = 0;
+BaseStackGuard::ProbeCallbackType BaseStackGuard::m_pfnProbeCallback = NULL;
+
+//
+// ShouldValidateSOToleranceOnThisThread determines if we should check for SO_Tolerance on this
+// thread.
+//
+// If it is a thread we care about, then we will assert if it calls an SO-intolerant function
+// outside of a probe
+//
+BOOL ShouldValidateSOToleranceOnThisThread()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (g_StackProbingEnabled == false || g_fEEShutDown == TRUE)
+ {
+ return FALSE;
+ }
+
+ BEGIN_GETTHREAD_ALLOWED;
+ Thread *pThread = GetThread();
+ if (pThread == NULL || ShouldProbeOnThisThread() == FALSE)
+ {
+ return FALSE;
+ }
+
+ // We only want to probe on managed threads that have IL on the stack behind them. But
+ // there's not an easy way to check for that, so we use whether or not we own the thread and
+ // whether or not a stack guard is in place.
+ //
+ // If we don't own the thread, then just make sure that we didn't get here by leaving the EE and coming
+ // back in. (In which case we would have installed a probe and the GetCurrentStackGuard is non-NULL).
+ // We are only probing on managed threads, but we want to avoid asserting for cases where an unmanaged
+ // app starts the EE (thereby creating a managed thread), and runs completely unmanaged, but uses some of the CLR's
+ // infrastructure, such as Crsts.
+ if (pThread->DoWeOwn() == FALSE && pThread->GetCurrentStackGuard() == NULL)
+ {
+ return FALSE;
+ }
+
+ if (! IsHandleNullUnchecked(pThread->GetThrowableAsHandle()))
+ {
+ return FALSE;
+ }
+
+ return TRUE;
+ END_GETTHREAD_ALLOWED;
+}
+
+
+BOOL BaseStackGuard_RequiresNStackPages(BaseStackGuardGeneric *pGuard, unsigned int n, BOOL fThrowOnSO)
+{
+ return ((BaseStackGuard*)pGuard)->RequiresNStackPages(n, fThrowOnSO);
+}
+
+void BaseStackGuard_CheckStack(BaseStackGuardGeneric *pGuard)
+{
+ WRAPPER_NO_CONTRACT;
+ ((BaseStackGuard*)pGuard)->CheckStack();
+}
+
+BOOL CheckNStackPagesAvailable(unsigned int n)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_MODE_ANY;
+
+ BEGIN_GETTHREAD_ALLOWED;
+ Thread *pThread = GetThread();
+
+ // If we don't have a managed thread object, we assume that we have the requested
+ // number of pages available.
+ if (!pThread)
+ return TRUE;
+
+ _ASSERTE(FitsIn<float>(n));
+ return pThread->IsStackSpaceAvailable(static_cast<float>(n));
+ END_GETTHREAD_ALLOWED;
+}
+
+void InitStackProbes()
+{
+ WRAPPER_NO_CONTRACT;
+
+ g_CurrentStackGuardTlsIdx = TlsIdx_StackProbe;
+
+ s_SOLogger.Initialize();
+
+ // If we're in a debugger, and if the config word below is set, then we'll go ahead and protect stack pages
+ // when we're run under a debugger.
+ //if (IsDebuggerPresent())
+ //{
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SOEnableStackProtectionInDebugger) == 1)
+ {
+ g_ProtectStackPagesInDebugger = TRUE;
+ }
+ g_ProtectStackPagesInDebuggerForProbeAtLine =
+ CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SOEnableStackProtectionInDebuggerForProbeAtLine);
+
+ g_UpdateProbeAtLine = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SOUpdateProbeAtLine);
+ g_UpdateProbeAtLineAmount = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SOUpdateProbeAtLineAmount);
+ LPWSTR wszUpdateProbeAtLineInFile = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SOUpdateProbeAtLineInFile);
+ g_pUpdateProbeAtLineInFile = new SString(wszUpdateProbeAtLineInFile);
+ g_pUpdateProbeAtLineInFile->Normalize();
+
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SOBreakOnProbeDuringSO) == 1)
+ {
+ g_BreakOnProbeDuringSO = TRUE;
+ }
+ //}
+
+ // Never let g_EntryPointProbeAmount get set to an invalid value of <= 0 to avoid races in places that might be
+ // about to probe as we set it.
+ BOOL entryPointProbeAmount = REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_SOEntryPointProbe, g_EntryPointProbeAmount);
+ if (entryPointProbeAmount > 0)
+ {
+ g_EntryPointProbeAmount = entryPointProbeAmount;
+ }
+
+ BOOL interiorProbeAmount = REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_SOInteriorProbe, g_InteriorProbeAmount);
+ if (interiorProbeAmount > 0)
+ {
+ g_InteriorProbeAmount = interiorProbeAmount;
+ }
+
+ BOOL enableBackoutStackValidation = REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_SOEnableBackoutStackValidation, FALSE);
+
+ g_EnableDefaultRWValidation = 1;
+
+ BOOL enableDefaultRWValidation = REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_SOEnableDefaultRWValidation, g_EnableDefaultRWValidation);
+
+
+
+ // put this first because it will cause probe validation via contract otherwise
+ g_probeAssertOnOverrun = REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_SOProbeAssertOnOverrun, g_probeAssertOnOverrun);
+
+ BaseStackGuard::InitProbeReportingToFaultInjectionFramework();
+
+ g_EnableBackoutStackValidation = enableBackoutStackValidation;
+
+ g_EnableDefaultRWValidation = enableDefaultRWValidation;
+
+ g_fpShouldValidateSOToleranceOnThisThread = ShouldValidateSOToleranceOnThisThread;
+
+ g_fpRestoreCurrentStackGuard = BaseStackGuard::RestoreCurrentGuard;
+ g_fpHandleStackOverflowAfterCatch = EEPolicy::HandleStackOverflowAfterCatch;
+
+
+ g_fp_BaseStackGuard_RequiresNStackPages = BaseStackGuard_RequiresNStackPages;
+ g_fp_BaseStackGuard_CheckStack = BaseStackGuard_CheckStack;
+
+ g_fpCheckNStackPagesAvailable = CheckNStackPagesAvailable;
+
+ InitStackProbesRetail();
+
+}
+
+void CloseSOTolerantViolationFile();
+
+//
+// This function is called when the EE is shutting down and we want to stop
+// doing stack probing. Don't clear the g_CurrentStackGuardTlsIdx field though,
+// because there may still be other threads in the process of probing and
+// they'll AV if we pull the g_CurrentStackGuardTlsIdx out from under them.
+void TerminateStackProbes()
+{
+ WRAPPER_NO_CONTRACT;
+
+
+ CloseSOTolerantViolationFile();
+
+ // Don't actually shut down the SO infrastructure. We've got multiple threads
+ // racing around in the runtime, and they can be left in an inconsisent state
+ // if we flip this off.
+
+ return;
+#if 0
+ // Yank the stack guard on this thread
+ StackGuardDisabler __guardDisable;
+ __guardDisable.NeverRestoreGuard();
+
+ // Clear out the current guard in case we terminate and its cleanup code
+ // does not get to run.
+ BaseStackGuard::SetCurrentGuard(NULL);
+
+ g_StackProbingEnabled = false;
+ g_EnableBackoutStackValidation = FALSE;
+ g_fpShouldValidateSOToleranceOnThisThread = NULL;
+#endif
+}
+
+//-----------------------------------------------------------------------------
+// Error handling when we go past a stack guard.
+// We have different messages to more aggresively diagnose the problem
+//-----------------------------------------------------------------------------
+
+// Called by Check_Stack when we overwrite the cookie
+void BaseStackGuard::HandleOverwrittenThisStackGuard(__in_z char *stackID)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (! g_probeAssertOnOverrun)
+ {
+ return;
+ }
+
+ ClrDebugState *pState = GetClrDebugState();
+ _ASSERTE(pState);
+ if (pState->IsSONotMainline())
+ {
+ return;
+ }
+
+ // This prevents infinite loops in this function if we call something that probes.
+ // Must do it after the check for pState->IsSONotMainline() to give the first invocation
+ // a chance to run.
+ SO_NOT_MAINLINE_FUNCTION;
+
+ // This fires at a closing Check_Stack.
+ // The cookie set by Requires_?K_stack was overwritten. We detected that at
+ // the closing call to check_stack.
+
+ // To fix, increase the guard size at the specified ip.
+ //
+ // A debugging trick: If you can set a breakpoint at the opening Requires_?K_Stack
+ // macro for this instance, you can step in and see where the cookie is actually
+ // placed. Then, place a breakpoint that triggers when (DWORD*) 0xYYYYYYYY changes.
+ // Continue execution. The breakpoint will fire exactly when the cookie is over-written.
+ char buff[1024];
+ buff[0] = '\0';
+
+ sprintf_s(buff, COUNTOF(buff),
+ "STACK GUARD VIOLATION\n"
+ "The%s stack guard installed in %s at \"%s\" @ %d requested %d pages of stack.\n"
+ "\nIf this is easily reproduced, please rerun the test under the debugger with the\n"
+ "DWORD environment variable COMPLUS_SOEnableStackProtectionInDebugger\n"
+ "set to 1. This will cause an AV at the point of overrun.\n"
+ "Attach the stack trace at that point to the bug in addition to this assert."
+ MORE_INFO_STRING, stackID ? stackID : "",
+ m_szFunction, m_szFile, m_lineNum, m_numPages);
+
+ LOG((LF_EH, LL_INFO100000, "%s", buff));
+
+ DbgAssertDialog((char *)m_szFile, m_lineNum, buff);
+
+}
+
+void BaseStackGuard::HandleOverwrittenPreviousStackGuard(int probeShortFall, __in_z char *stackID)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (! g_probeAssertOnOverrun)
+ {
+ return;
+ }
+
+ ClrDebugState *pState = GetClrDebugState();
+ _ASSERTE(pState);
+ if (pState->IsSONotMainline())
+ {
+ return;
+ }
+
+ // This prevents infinite loops in this function if we call something that probes.
+ // Must do it after the check for pState->IsSONotMainline() to give the first invocation
+ // a chance to run.
+ SO_NOT_MAINLINE_FUNCTION;
+
+ // This fires at an opening Requires_?K_Stack
+ // We detected that we were already passed our parent's stack guard. So this guard is
+ // ok, but our parent's guard is too small. Note that if this test was removed,
+ // the failure would be detected by our parent's closing Check_Stack. But if we detect it
+ // here, we have more information.
+ //
+ // We can see how many bytes short our parent is and adjust it properly.
+ char buff[2048];
+ buff[0] = '\0';
+
+ // We don't come in here unless we have a previous guard.
+ _ASSERTE(m_pPrevGuard != NULL);
+
+ sprintf_s(buff, COUNTOF(buff),
+ "STACK GUARD VIOLATION\n"
+ " The%s stack guard being installed in %s at \"%s\" @ %d is already in violation of the previous stack guard.\n"
+ " The previous guard was installed in %s at \"%s\" @ %d and requested %d pages of stack.\n"
+ "The stack requested by the previous guard is at least %d pages (%d bytes) short.\n"
+ MORE_INFO_STRING, stackID ? stackID : "", m_szFunction, m_szFile, m_lineNum,
+ m_pPrevGuard->m_szFunction, m_pPrevGuard->m_szFile, m_pPrevGuard->m_lineNum, m_pPrevGuard->m_numPages,
+ probeShortFall/OS_PAGE_SIZE + (probeShortFall%OS_PAGE_SIZE ? 1 : 0), probeShortFall);
+
+ LOG((LF_EH, LL_INFO100000, "%s", buff));
+
+ DbgAssertDialog((char *)m_szFile, m_lineNum, buff);
+}
+
+void BaseStackGuard::HandleOverwrittenCurrentStackGuard(void *pGuard, int shortFall, __in_z char *stackID)
+{
+ ( (BaseStackGuard *)pGuard)->HandleOverwrittenCurrentStackGuard(shortFall, stackID);
+}
+
+void BaseStackGuard::HandleOverwrittenCurrentStackGuard(int probeShortFall, __in_z char *stackID)
+{
+ DEBUG_ONLY_FUNCTION;
+
+ if (! g_probeAssertOnOverrun)
+ {
+ return;
+ }
+
+ // This fires during probe invariant validation.
+ // We detected that our current stack was already past the current probe depth. Note that if this
+ // test were removed, the failure should be detected the current guard's closing Check_Stack.
+ // But if we detect it here, we have more information.
+ //
+ // We can see how many bytes short the guard is and adjust it properly.
+ char buff[2048];
+ buff[0] = '\0';
+
+ sprintf_s(buff, COUNTOF(buff),
+ "STACK GUARD VIOLATION\n\n"
+ "The%s stack guard installed in %s at \"%s\" @ %d has been violated\n\n"
+ "The guard requested %d pages of stack and is at least %d pages (%d bytes) short.\n"
+ MORE_INFO_STRING, stackID ? stackID : "", m_szFunction, m_szFile, m_lineNum, m_numPages,
+ probeShortFall/OS_PAGE_SIZE + (probeShortFall%OS_PAGE_SIZE ? 1 : 0), probeShortFall);
+
+ LOG((LF_EH, LL_INFO100000, buff));
+
+ DbgAssertDialog((char *)m_szFile, m_lineNum, buff);
+}
+
+//-----------------------------------------------------------------------------
+// Function to do the actual touching of memory during probing, so we can have
+// a good approximation of the address we should be overflowing at.
+//-----------------------------------------------------------------------------
+static __declspec(noinline) void PlaceMarker(UINT_PTR *pMarker)
+{
+ LIMITED_METHOD_CONTRACT;
+ *pMarker = STACK_COOKIE_VALUE;
+}
+
+
+StackGuardDisabler::StackGuardDisabler()
+{
+ LIMITED_METHOD_CONTRACT;
+ BaseStackGuard *pGuard = BaseStackGuard::GetCurrentGuard();
+
+ if (pGuard == NULL || !BaseStackGuard::IsProbeGuard(pGuard) || !pGuard->Enabled())
+ {
+ // If there's no guard or its a boundary guard, there's nothing to do
+ m_fDisabledGuard = FALSE;
+ return;
+ }
+
+ // If the guard is currently enabled, then we'll need to change the page protection
+ pGuard->UndoPageProtectionInDebugger();
+ pGuard->DisableGuard();
+ m_fDisabledGuard = TRUE;
+}// StackGuardDisabler
+
+void StackGuardDisabler::NeverRestoreGuard()
+{
+ m_fDisabledGuard = FALSE;
+}
+
+StackGuardDisabler::~StackGuardDisabler()
+{
+ WRAPPER_NO_CONTRACT;
+ if (m_fDisabledGuard)
+ {
+ BaseStackGuard::RestoreCurrentGuard(TRUE);
+ }
+}// ~StackProbeDisabler
+
+//-----------------------------------------------------------------------------
+// BaseStackGuard::RestoreCurrentGuard
+//
+// Function to restore the current marker's cookie after an EH.
+//
+// During an exception, we cannot restore stack guard cookies as we unwind our stack guards
+// because the stack has not been unwound and we might corrupt it. So we just pop off our
+// guards as we go and deal with restoring the cookie after the exception.
+// There are two cases:
+//
+// 1) the exception is caught outside the EE
+// 2) the exception is caught in the EE
+//
+// Case 1: If we catch the exception outside the EE, then the boundary guard that we installed before
+// leaving the EE will still be intact, so we have no work to do.
+//
+// Case 2: If we caught the exception in the EE, then on EX_END_CATCH, after we have unwound the stack, we need to
+// restore the cookie for the topmost stack guard. That is what RestoreCurrentGuard does.
+//
+//-----------------------------------------------------------------------------
+void BaseStackGuard::RestoreCurrentGuard(BOOL fWasDisabled)
+{
+ if (!IsStackProbingEnabled())
+ {
+ // nothing to do
+ return;
+ }
+
+ LPVOID pSP = (LPVOID)GetCurrentSP();
+ BaseStackGuard *pGuard = GetCurrentGuard();
+
+ if (pGuard == NULL || !IsProbeGuard(pGuard))
+ {
+ // If there's no guard or its a boundary guard, there's nothing to do
+ // Just set state to SO-tolerant and quit.
+ GetClrDebugState()->SetSOTolerance();
+ return;
+ }
+
+ if (reinterpret_cast<LPVOID>(pGuard->m_pMarker) > pSP)
+ {
+ // We have caught an exception while processing an exception. So can't restore the marker and must
+ // wait until the catcher of the original exception handles it.
+ if (!IsBackoutCalledForEH((BYTE *)(pGuard), static_cast<BYTE *>(pSP)))
+ {
+ // verfiy that really are processing an exception. We could have some false positives here, but in
+ // general this is a good check.
+ _ASSERTE(!"After an exception was caught, we couldn't restore the marker because it is greater than the SP\n"
+ "This should only happen if we caught a nested exception when already processing an exception, but"
+ " the distance between the SP and the probe does not indicate an exception is in flight.");
+ }
+ return;
+ }
+
+ // Reset the SO-tolerance state
+
+ // We should never get here with a guard beyond the current SP
+ _ASSERTE(reinterpret_cast<LPVOID>(pGuard) > pSP);
+
+ LOG((LF_EH, LL_INFO100000, "BSG::RSG: G: %p D: %d \n", pGuard, pGuard->m_depth));
+
+ // If we have EX_TRY {EX_TRY {...}EX_CATCH{...}EX_END_CATCH}EX_CATCH{...}EX_END_CATCH,
+ // the inner EX_END_CATCH will mark the current guard protected. When we reach the
+ // outer EX_END_CATCH, we will AV when placing marker.
+ pGuard->UndoPageProtectionInDebugger();
+ if (fWasDisabled)
+ pGuard->EnableGuard();
+ // Replace the marker for the current guard
+ PlaceMarker(pGuard->m_pMarker);
+
+ // Protect marker page in debugger if we need it
+ pGuard->ProtectMarkerPageInDebugger();
+ GetClrDebugState()->ResetSOTolerance();
+ pGuard->m_fEHInProgress = FALSE;
+}
+
+//-----------------------------------------------------------------------------
+// This places a marker outside the bounds of a probe. We don't want to use
+// PlaceMarker because that is how we detect if a proper SO was triggered (via
+// StackProbeContainsIP
+//-----------------------------------------------------------------------------
+static __declspec(noinline) void PlaceMarkerBeyondProbe(UINT_PTR *pMarker)
+{
+ *pMarker = STACK_COOKIE_VALUE;
+}
+
+//---------------------------------------------------------------------------------------------
+// Determine if we should check integrity of previous cookie. Only check if the previous was a probe guard.
+//---------------------------------------------------------------------------------------------
+inline BOOL BaseStackGuard::ShouldCheckPreviousCookieIntegrity()
+{
+ WRAPPER_NO_CONTRACT;
+ if (m_pPrevGuard == NULL ||
+ IsBoundaryGuard(m_pPrevGuard) ||
+ m_pPrevGuard->m_pMarker==NULL ||
+ m_pPrevGuard->m_fEHInProgress ||
+ !m_pPrevGuard->Enabled())
+ {
+ return FALSE;
+ }
+ return TRUE;
+}
+
+//---------------------------------------------------------------------------------------------
+// Determine if we should check integrity of this cookie.
+//---------------------------------------------------------------------------------------------
+inline BOOL BaseStackGuard::ShouldCheckThisCookieIntegrity()
+{
+ WRAPPER_NO_CONTRACT;
+ // We only need to check if this is a probe guard and it has a non-null marker.
+ // Anything else, we don't care about.
+ return IsProbeGuard(this) && m_pMarker != NULL && Enabled();
+}
+
+BOOL BaseStackGuard::RequiresNStackPages(unsigned int n, BOOL fThrowsOnSO)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return RequiresNStackPagesInternal(n, fThrowsOnSO);
+}
+
+BOOL BaseStackGuard::RequiresNStackPagesThrowing(unsigned int n)
+{
+// STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ return RequiresNStackPagesInternal(n, TRUE);
+}
+
+BOOL BaseStackGuard::RequiresNStackPagesNoThrow(unsigned int n)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ return RequiresNStackPagesInternal(n, FALSE);
+}
+
+//-----------------------------------------------------------------------------
+// Place guard in stack.
+//-----------------------------------------------------------------------------
+BOOL BaseStackGuard::RequiresNStackPagesInternal(unsigned int n, BOOL fThrowOnSO)
+{
+ CONTRACTL
+ {
+ DISABLED(THROWS);
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ BOOL fRet;
+
+ // Temporarily initialize the exception occured flag
+ m_exceptionOccured = FALSE;
+
+ // Code below checks if there's a Thread, and exits immediately if not.
+ // So the rest of the function rightly assumes there is a Thread
+ BEGIN_GETTHREAD_ALLOWED;
+
+ // only probe on managed threads. No thread, no probe.
+ if (! IsStackProbingEnabled() || GetThread() == NULL)
+ {
+ return TRUE;
+ }
+
+ // Don't try to probe if we are checking backout and there are active backout markers on
+ // the stack to avoid collision
+ if (g_EnableBackoutStackValidation)
+ {
+ if ((!(GetClrDebugState()->GetStackMarkerStack().IsEmpty()))
+ && (!(GetClrDebugState()->GetStackMarkerStack().IsDisabled())))
+ {
+ return TRUE;
+ }
+ }
+
+ if (n <= 1)
+ {
+ // Our calculation below doesn't handle 1-page probes.
+ _ASSERTE(!"RequiresNStackPages called with a probe amount less than 2");
+ }
+
+ // Retrieve the current stack pointer which will be used to calculate the marker.
+ LPVOID pStack = (LPVOID)GetCurrentSP();
+
+ // Setup some helpful debugging information. Get our caller's ip. This is useful for debugging (so we can see
+ // when the previous guard was set).
+ m_UniqueId = g_UniqueId++;
+ m_numPages = n;
+
+ // Get the address of the last few bytes on the penultimate page we probed for. This is slightly early than the probe point,
+ // but gives us more conservatism in our overrun checking. ("Last" here means the bytes with the smallest address.)
+ m_pMarker = ((UINT_PTR*)pStack) - (OS_PAGE_SIZE / sizeof(UINT_PTR) * (n-1));
+ m_pMarker = (UINT_PTR*)((UINT_PTR)m_pMarker & ~(OS_PAGE_SIZE - 1));
+
+ // Grab the previous guard, if any, and update our depth.
+ m_pPrevGuard = GetCurrentGuard();
+
+ if (m_pPrevGuard == NULL)
+ {
+ m_depth = 0;
+ }
+ else
+ {
+ // If we've already got a probe in place that exceeds the reach of this one, then
+ // don't install this one. This avoids problems where we've installed an entry point
+ // probe and then called into a function that happens to do an interior probe. If we
+ // install the interior probe, then we effectively lose our deep entry point probe
+ // and end up with probe overrun violations. Check for it being a probe guard
+ // because boundary guards will always have 0 markers and we'd never probe
+ // after a boundary guard otherwise.
+ if (IsProbeGuard(m_pPrevGuard) && m_pPrevGuard->m_pMarker < m_pMarker)
+ {
+ return TRUE;
+ }
+ m_depth = m_pPrevGuard->m_depth + 1;
+
+ // We need to undo the page protection that we setup when we put the previous guard in place so we don't
+ // trip over it with this guard. Also, track that we came next.
+ if (IsProbeGuard(m_pPrevGuard) && m_pPrevGuard->m_pMarker != NULL)
+ {
+ m_pPrevGuard->UndoPageProtectionInDebugger();
+ m_pPrevGuard->m_szNextFunction = m_szFunction;
+ m_pPrevGuard->m_szNextFile = m_szFile;
+ m_pPrevGuard->m_nextLineNum = m_lineNum;
+ }
+ }
+
+ if (ShouldCheckPreviousCookieIntegrity())
+ {
+ UINT_PTR *approxSP = (UINT_PTR*)GetCurrentSP();
+ if (approxSP <= m_pPrevGuard->m_pMarker)
+ {
+ UINT_PTR uProbeShortFall = (char*)m_pPrevGuard->m_pMarker - (char*)approxSP;
+ _ASSERTE(FitsIn<int>(uProbeShortFall));
+ HandleOverwrittenPreviousStackGuard(static_cast<int>(uProbeShortFall), NULL);
+ }
+ }
+
+ m_eInitialized = cPartialInit;
+
+ fRet = DoProbe(m_numPages, fThrowOnSO);
+ END_GETTHREAD_ALLOWED;
+ return fRet;
+}
+
+BOOL BaseStackGuard::DoProbe(unsigned int n, BOOL fThrowOnSO)
+{
+ CONTRACTL
+ {
+ DISABLED(THROWS);
+ MODE_ANY;
+ WRAPPER(GC_TRIGGERS);
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (! IsStackProbingEnabled() || m_eInitialized != cPartialInit)
+ {
+ return TRUE;
+ }
+
+ LOG((LF_EH, LL_INFO100000, "BSG::DP: %d pages, depth %d, probe 0x%p, fcn %s, prev 0x%p\n",
+ m_numPages, m_depth, this, this->m_szFunction, m_pPrevGuard));
+
+ // For cases where have a separate call to DoProbe, make sure the probe amounts match.
+ _ASSERTE(n == m_numPages);
+
+ // We'll probe for 12 pages + 4 for cleanup.... we'll just put our marker at the 12 page point.
+ unsigned int nPagesToProbe = n + static_cast<unsigned int>(ADJUST_PROBE(BACKOUT_CODE_STACK_LIMIT));
+
+ Thread *pThread = GetThread();
+
+ // We already checked in RequiresNPagesStack that we've got a thread. But ASSERT just to
+ // be sure.
+ _ASSERTE(pThread);
+
+ // Check if we have enough space left in the stack
+ if (fThrowOnSO)
+ {
+ RetailStackProbe(nPagesToProbe, pThread);
+ }
+ else if (! RetailStackProbeNoThrow(nPagesToProbe, pThread))
+ {
+ return FALSE;
+ }
+
+ // The fault injection framework will tell us when it want to inject
+ // an SO. If it returns TRUE, then inject an SO depending on the fThrowOnSO flag
+ if (ReportProbeToFaultInjectionFramework() == TRUE)
+ {
+ if (fThrowOnSO)
+ {
+ COMPlusThrowSO();
+ }
+ // return probe failure (ie SO) if not in a throwing probe
+ return FALSE;
+ }
+
+ LOG((LF_EH, LL_INFO100000, "BSG::DP: pushing to 0x%p\n", m_pMarker));
+
+ // See if we're able to get a TLS slot to mark our guard page
+ HRESULT hr = PrepGuard();
+
+ // Since we can be here only with a valid managed thread object,
+ // it will already have its TLS setup. Thus, accessing TLS in PrepGuard
+ // call above shouldn't fail.
+ _ASSERTE(SUCCEEDED(hr));
+
+ // make sure the guard page is beyond the marker page, otherwise we could AV or when the guard
+ // page moves up, it could wipe out our debugger page protection
+ UINT_PTR *sp = (UINT_PTR*)GetCurrentSP();
+ while (sp >= m_pMarker)
+ {
+ sp -= (OS_PAGE_SIZE / sizeof(UINT_PTR));
+ *sp = NULL;
+ }
+
+ // Write the cookie onto the stack.
+ PlaceMarker(m_pMarker);
+
+ // We'll protect the page where we put the marker if a debugger is attached. That way, you get an AV right away
+ // when you go past the guard when running under a debugger.
+ ProtectMarkerPageInDebugger();
+
+ // Mark that we're initialized (and didn't get interupted from an exception)
+ m_eInitialized = cInit;
+
+ // Initialize the exception occured flag
+ m_exceptionOccured = TRUE;
+
+ // setup flag to tell if we're unwinding due to an exception
+ m_fEHInProgress = FALSE;
+
+ // By this point, everything is working, so go ahead and hook up.
+ SetCurrentGuard(this);
+
+ return TRUE;
+}
+
+
+//-----------------------------------------------------------------------------
+// PopGuardForEH
+//
+// If we are being popped during an EH unwind, our cookie is likely corrupt so we can't check it.
+// So just pop ourselves off the stack and return. We will restore the markers
+// after we've caught the exception.
+//
+// We also set the EHInProgress bit on the previous guard to indicate that the
+// current guard was unwound during EH and couldn't restore the previous guard's
+// cookie.
+//
+// Also need to clear the protection bit as go down because it will no
+// longer be protected.
+//-----------------------------------------------------------------------------
+void BaseStackGuard::PopGuardForEH()
+{
+ LIMITED_METHOD_CONTRACT;
+ // If we've protected this page, undo the protection
+ UndoPageProtectionInDebugger();
+
+ if (m_pPrevGuard)
+ {
+ m_pPrevGuard->m_fEHInProgress = TRUE;
+
+ // Indicate that we haven't reprotected the previous guard
+ m_pPrevGuard->m_fProtectedStackPage = FALSE;
+ }
+ // Mark it as unwound for EH. This is for debugging purposes only so we
+ // know how it was popped.
+ m_eInitialized = cEHUnwound;
+ SetCurrentGuard(m_pPrevGuard);
+}
+
+//-----------------------------------------------------------------------------
+// Check guard in stack
+// This must be called 1:1 with RequiresNPagesStack, else:
+// - the function's stack cookie isn't restored
+// - the stack chain in TLS gets out of wack.
+//-----------------------------------------------------------------------------
+void BaseStackGuard::CheckStack()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (! IsStackProbingEnabled() || m_eInitialized != cInit)
+ {
+ return;
+ }
+
+ // If we are being popped during an EH unwind, our cookie is likely corrupt so we can't check it.
+ // So just pop ourselves off the stack and return. We will restore the markers
+ // after we've caught the exception.
+ if (DidExceptionOccur())
+ {
+ // We may not be the topmost in the stack, but we'd better not be called when we've already
+ // unwound the stack past this guy.
+ _ASSERTE(GetCurrentGuard() <= this);
+
+ // Make sure that if we didn't get to the END_SO_INTOLERANT_CODE that the stack usage
+ // indicates an exception. This is only a rough check - we might miss some cases where the
+ // stack grew a lot between construction and descrution of the guard. However, it will
+ // catch most short-circuits.
+ if (!IsBackoutCalledForEH((BYTE *)(this), static_cast<BYTE *>((LPVOID)GetCurrentSP())))
+ {
+ _ASSERTE(!"Short-circuit of END_SO_INTOLERANT_CODE detected. You cannot short-cirtuit return from an SO-intolerant region");
+ }
+
+ LOG((LF_EH, LL_INFO100000, "BSG::CS on EH path sp 0x %p popping probe 0x%p depth %d \n", GetCurrentSP(), this, m_depth));
+ PopGuardForEH();
+ return;
+ }
+
+ LOG((LF_EH, LL_INFO100000, "BSG::CS checking probe 0x%p depth %d \n", this, m_depth));
+
+ // if we aren't being unwound during EH, then we shouldn't have our EHInProgress bit set. That
+ // means we caught the exception in the EE and didn't call RestoreGuard or we missed a SO-tolerant
+ // transition out of the EE and the exception occured above us.
+ _ASSERTE(m_fEHInProgress == FALSE);
+
+ // we should only ever be popping ourselves if we are not on the EH unwind path
+ _ASSERTE(GetCurrentGuard() == this);
+
+ // Can have 0-sized probes for cases where have an entry that is small enough not to need a probe. But still
+ // need to put something in place for the boundary probe assertions to work properly. So just remove it and
+ // don't do any cookie checking.
+ if (m_numPages == 0)
+ {
+ // Just unhook our guard from the chain. We're done. 0-page probes don't have anything preceding them.
+ ResetCurrentGuard(m_pPrevGuard);
+ return;
+ }
+
+ // We need to undo the page protection that we setup when we put the guard in place.
+ UndoPageProtectionInDebugger();
+
+ CheckMarkerIntegrity();
+
+ RestorePreviousGuard();
+}
+
+void BaseStackGuard::CheckMarkerIntegrity()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_pMarker == 0)
+ {
+ return;
+ }
+
+ // Make sure our cookie is still on the stack where it belongs.
+ if (ShouldCheckThisCookieIntegrity() && IsMarkerOverrun(m_pMarker))
+ {
+ HandleOverwrittenThisStackGuard(NULL);
+ }
+}
+
+
+void BaseStackGuard::RestorePreviousGuard()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (! IsProbeGuard(m_pPrevGuard) || !m_pPrevGuard->Enabled())
+ {
+ LOG((LF_EH, LL_INFO100000, "BSG::RPG depth %d, probe 0x%p, prev 0x%p not probe\n",
+ m_depth, this, m_pPrevGuard));
+ // Unhook our guard from the chain.
+ ResetCurrentGuard(m_pPrevGuard);
+ return;
+ }
+
+ if (m_pPrevGuard->m_fEHInProgress)
+ {
+ // If the marker was lost during exception processing, we cannot restore it and it will be restored on the catch.
+ // This can happen if we were partway through an EH unwind and then called something that probed. We'll have unwound our
+ // probe guards but won't have been able to put the cookie back, and we're still in that same position.
+ LOG((LF_EH, LL_INFO100000, "BSG::RPG depth %d, probe 0x%p, EH in progress, not resetting prev 0x%p\n",
+ m_depth, this, m_pPrevGuard));
+ // Unhook our guard from the chain.
+ ResetCurrentGuard(m_pPrevGuard);
+ return;
+ }
+
+ if (m_pPrevGuard->m_pMarker == NULL)
+ {
+ // Previous guard had no marker.
+ // We're done, so just unhook ourselves from the chain and leave.
+ ResetCurrentGuard(m_pPrevGuard);
+ }
+
+ // Restore last cookie, so that our previous guard will be able to properly check whether it gets overwritten. Note:
+ // we don't restore the previous cookie if we overwrote it with this guard. Doing so, by definition, corrupts the
+ // stack. Its better to have the previous guard report the over-write.
+ PlaceMarker(m_pPrevGuard->m_pMarker);
+ LOG((LF_EH, LL_INFO100000, "BSG::RPG depth %d, probe 0x%p "
+ "for prev 0x%p at 0x%p in %s\n",
+ m_depth, this, m_pPrevGuard, m_pPrevGuard->m_pMarker, m_pPrevGuard->m_szFunction));
+ // And, of course, restore the previous guard's page protection (if it had done any.)
+ if (m_pPrevGuard->m_fProtectedStackPage)
+ {
+ m_pPrevGuard->ProtectMarkerPageInDebugger();
+ }
+
+ // Mark it as unwound on normal path. This is for debugging purposes only so we
+ // know how it was popped.
+ m_eInitialized = cUnwound;
+
+ // Unhook our guard from the chain.
+ ResetCurrentGuard(m_pPrevGuard);
+}
+
+void BaseStackGuard::ProtectMarkerPageInDebugger(void *pGuard)
+{
+ ((BaseStackGuard *)pGuard)->ProtectMarkerPageInDebugger();
+}
+
+//-----------------------------------------------------------------------------
+// Protect the page where we put the marker if a debugger is attached. That way, you get an AV right away
+// when you go past the guard when running under a debugger.
+//-----------------------------------------------------------------------------
+void BaseStackGuard::ProtectMarkerPageInDebugger()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ DEBUG_ONLY_FUNCTION;
+
+ if (! (g_ProtectStackPagesInDebugger || g_ProtectStackPagesInDebuggerForProbeAtLine))
+ {
+ return;
+ }
+
+#ifdef _DEBUG
+ BEGIN_GETTHREAD_ALLOWED;
+ Thread* pThread = GetThread();
+ if (pThread)
+ {
+ pThread->AddFiberInfo(Thread::ThreadTrackInfo_Escalation);
+ }
+ END_GETTHREAD_ALLOWED;
+#endif
+
+ DWORD flOldProtect;
+
+ LOG((LF_EH, LL_INFO100000, "BSG::PMP: m_pMarker 0x%p, value 0x%p\n", m_pMarker, *m_pMarker));
+
+ // We cannot call into host for VirtualProtect. EEVirtualProtect will try to restore previous
+ // guard, but the location has been marked with PAGE_NOACCESS.
+#undef VirtualProtect
+ BOOL fSuccess = ::VirtualProtect(m_pMarker, 1, PAGE_NOACCESS, &flOldProtect);
+ _ASSERTE(fSuccess);
+
+#define VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect) \
+ Dont_Use_VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect)
+
+ m_fProtectedStackPage = fSuccess;
+}
+
+
+void BaseStackGuard::UndoPageProtectionInDebugger(void *pGuard)
+{
+ ((BaseStackGuard *)pGuard)->UndoPageProtectionInDebugger();
+}
+
+//-----------------------------------------------------------------------------
+// Remove page protection installed for this probe
+//-----------------------------------------------------------------------------
+void BaseStackGuard::UndoPageProtectionInDebugger()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ DEBUG_ONLY_FUNCTION;
+
+ if (!m_fProtectedStackPage)
+ {
+ return;
+ }
+
+ _ASSERTE(IsProbeGuard());
+
+#ifdef _DEBUG
+ BEGIN_GETTHREAD_ALLOWED;
+ Thread* pThread = GetThread();
+ if (pThread)
+ {
+ pThread->AddFiberInfo(Thread::ThreadTrackInfo_Escalation);
+ }
+ END_GETTHREAD_ALLOWED;
+#endif
+
+ DWORD flOldProtect;
+ // EEVirtualProtect installs a BoundaryStackGuard. To avoid recursion, we call
+ // into OS for VirtualProtect instead.
+#undef VirtualProtect
+ BOOL fSuccess = ::VirtualProtect(m_pMarker, 1, PAGE_READWRITE, &flOldProtect);
+ _ASSERTE(fSuccess);
+
+ LOG((LF_EH, LL_INFO100000, "BSG::UMP m_pMarker 0x%p\n", m_pMarker));
+ // Frankly, if we had protected the stack page, then we shouldn't have gone past the guard, right? :)
+ _ASSERTE(!Enabled() || !IsMarkerOverrun(m_pMarker));
+
+#define VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect) \
+ Dont_Use_VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect)
+}
+
+void BaseStackGuard::InitProbeReportingToFaultInjectionFramework()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (! g_pConfig->ShouldInjectFault(INJECTFAULT_SO))
+ {
+ return;
+ }
+
+ m_hProbeCallBack = CLRLoadLibrary(MAKEDLLNAME_W(W("FaultHostingLayer")));
+ if (!m_hProbeCallBack) {
+ fprintf(stderr, "StackProbing: Failed to load " MAKEDLLNAME_A("FaultHostingLayer") ". LastErr=%d\n",
+ GetLastError());
+ return;
+ }
+
+ m_pfnProbeCallback = (ProbeCallbackType)GetProcAddress(m_hProbeCallBack, "StackProbeCallback");
+ if (!m_pfnProbeCallback) {
+ fprintf(stderr, "StackProbing: Couldn't find StackProbeCallback() in FaultHostingLayer\n");
+ return;
+ }
+}
+
+// The fault injection framework will return TRUE if we should
+// inject an SO at the point of the current probe.
+BOOL BaseStackGuard::ReportProbeToFaultInjectionFramework()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_MODE_ANY;
+
+ if (! g_pConfig->ShouldInjectFault(INJECTFAULT_SO) || ! m_pfnProbeCallback)
+ {
+ return FALSE;
+ }
+
+ // FORBIDGC_LOADER_USE_ENABLED says we are only doing a minimal amount of work and will not
+ // update global state (just read it.) Code running in this state cannot tolerate a fault injection.
+ if (FORBIDGC_LOADER_USE_ENABLED())
+ {
+ return FALSE;
+ }
+
+ // For codepaths that are not mainline or are debug only, we don't care about fault injection because
+ // taking an SO here won't matter (or can't happen). However, we'd like to still probe on those paths
+ // just to give us more conservative probe coverage, so we still do the probe, just not the fault injection.
+ ClrDebugState *pDebugState = GetClrDebugState();
+ if (pDebugState && pDebugState->IsSONotMainline() || pDebugState->IsDebugOnly())
+ {
+ return FALSE;
+ }
+
+
+ // Faults injected into the default domain are process fatal. Probing is still going to occur
+ // but we never trigger fault injection.
+ {
+ //Attempting to figure out if we are in the default domain will trigger SO probes so
+ // we temporarily mark ourselves SONotMainline during the check to prevent recursive probes
+ SO_NOT_MAINLINE_REGION();
+ Thread *pThread = GetThreadNULLOk();
+ if (pThread && pThread->GetDomain(TRUE)->IsDefaultDomain())
+ {
+ return FALSE;
+ }
+ }
+
+ return m_pfnProbeCallback(m_lineNum, m_szFile);
+}
+
+void BaseStackGuard::SetCurrentGuard(BaseStackGuard* pGuard)
+{
+ WRAPPER_NO_CONTRACT;
+
+ ClrFlsSetValue(g_CurrentStackGuardTlsIdx, pGuard);
+
+ Thread * pThread = GetThreadNULLOk();
+ if (pThread)
+ {
+ // For faster access, store the guard in the thread object, if available
+ pThread->SetCurrentStackGuard(pGuard);
+ }
+}
+
+// Reset the current guard state back to this one's
+void BaseStackGuard::ResetCurrentGuard(BaseStackGuard* pGuard)
+{
+ WRAPPER_NO_CONTRACT;
+
+ SetCurrentGuard(pGuard);
+}
+
+// This puts a boundary probe in the list when we leave the EE
+DEBUG_NOINLINE void BoundaryStackGuard::Push()
+{
+ SCAN_SCOPE_BEGIN;
+ ANNOTATION_FN_SO_TOLERANT;
+
+ if (! IsStackProbingEnabled())
+ {
+ return;
+ }
+
+
+ m_isBoundaryGuard = TRUE;
+ m_pPrevGuard = GetCurrentGuard();
+
+ if (m_pPrevGuard)
+ {
+ // @todo can remove the check for IsProbeGuard when have all the probes in place
+ if (IsProbeGuard(m_pPrevGuard))
+ {
+ // ensure that the previous probe was sufficiently large
+ if (ShouldCheckPreviousCookieIntegrity())
+ {
+ // Grab an approximation of our current stack pointer.
+ void *approxStackPointer = (LPVOID)GetCurrentSP();
+
+ if (((UINT_PTR*) approxStackPointer <= m_pPrevGuard->Marker()))
+ {
+ UINT_PTR uProbeShortFall = (char*)m_pPrevGuard->Marker() - (char*)this;
+ _ASSERTE(FitsIn<int>(uProbeShortFall));
+ HandleOverwrittenPreviousStackGuard(static_cast<int>(uProbeShortFall), NULL);
+ }
+ }
+ m_pPrevGuard->UndoPageProtectionInDebugger(); // undo previuos guard's page protection
+ m_pPrevGuard->m_szNextFunction = m_szFunction; // track that we came next
+ m_pPrevGuard->m_szNextFile = m_szFile;
+ m_pPrevGuard->m_nextLineNum= m_lineNum;
+ }
+ m_depth = m_pPrevGuard->Depth(); // don't increment, but record so can transfer to next probe
+ }
+ LOG((LF_EH, LL_INFO100000, "BNSG::PS probe 0x%p, depth %d, prev 0x%p in %s\n",
+ this, m_depth, m_pPrevGuard, m_pPrevGuard ? m_pPrevGuard->FunctionName() : NULL));
+
+ // See if we're able to get a TLS slot to mark our guard page. If not, this will just be an unitialized
+ // guard. This generally happens in callbacks to the host before the EE infrastructure is set up on
+ // the thread, so there won't be interesting probes to protect anyway.
+ if (FAILED(PrepGuard()))
+ {
+ return;
+ }
+
+ // Mark that we're initialized (and didn't get interupted from an exception)
+ m_eInitialized = cInit;
+
+ // setup flag to tell if we're unwinding due to an exception
+ m_exceptionOccured = TRUE;
+
+ SetCurrentGuard(this);
+}
+
+
+
+// Pop the boundary probe and reset the original probe's cookie when
+// return into the EE
+DEBUG_NOINLINE void BoundaryStackGuard::Pop()
+{
+ SCAN_SCOPE_END;
+
+ if (! IsStackProbingEnabled() || m_eInitialized != cInit)
+ {
+ return;
+ }
+
+ // If we are being popped during an EH unwind, we cannot restore the probe cookie because it will
+ // corrupt the stack. So just pop ourselves off the stack and return. We will restore the markers
+ // after we've caught the exception.
+ if (DidExceptionOccur())
+ {
+ // We may not be the topmost in the stack, but we'd better not be called when we've already
+ // unwound the stack past this guy.
+ _ASSERTE(GetCurrentGuard() <= this);
+
+ // Make sure that if we didn't get to the END_SO_TOLERANT_CODE that the stack usage
+ // indicates an exception. This is only a rough check - we might miss some cases where the
+ // stack grew a lot between construction and descrution of the guard. However, it will
+ // catch most short-circuits.
+ if (!IsBackoutCalledForEH((BYTE *)(this), static_cast<BYTE *>((LPVOID)GetCurrentSP())))
+ {
+ _ASSERTE(!"Short-circuit of END_SO_TOLERANT_CODE detected. You cannot short-cirtuit return from an SO-tolerant region");
+ }
+
+ LOG((LF_EH, LL_INFO100000, "BNSG::PP popping on EH path 0x%p depth %d \n", this, m_depth));
+ PopGuardForEH();
+ return;
+ }
+
+ LOG((LF_EH, LL_INFO100000, "BNSG::PP 0x%p depth %d restoring CK at 0x%p "
+ " probe 0x%p in %s\n",
+ this, m_depth, (!IsProbeGuard(m_pPrevGuard) ? 0 : m_pPrevGuard->Marker()),
+ m_pPrevGuard, m_pPrevGuard ? m_pPrevGuard->FunctionName() : NULL));
+
+ // we should only ever be popping ourselves
+ _ASSERTE(GetCurrentGuard() == this);
+
+ RestorePreviousGuard();
+}
+
+
+//
+// IsBackoutCalledForEH
+//
+// Uses heuristics to determines whether the backout code is being called on an EH path or
+// not based on the original SP and the SP when the backout code is called.
+//
+// origSP: The SP when the mainline code was called. For example, the SP of a ctor or code in a try block
+//
+// backoutSP: The SP when the backout code is called.
+//
+// Returns: boolean indicating whether or not the backout code is being called on an EH path.
+//
+BOOL IsBackoutCalledForEH(BYTE *origSP,
+ BYTE *backoutSP)
+{
+ // We need to determine if we are being called in the normal or exception path. (Sure would be
+ // nice if the CRT would tell us.) We use the stack pointer to determine this. On the normal path
+ // the stack pointer should be not far from the this pointer, whereas on the exception path it
+ // will typically be a lot higher up the stack. We will make the following assumptions:
+ //
+ // 1) on EH path the OS has to push a context onto the stack. So the SP will be increased by
+ // at least the size of a context when calling a destructor through EH path.
+ //
+ // 2) the CRT will use minimal stack space to call a destructor. This is assumed to be less
+ // than the size of a context.
+ //
+ // Caveats:
+ //
+ // 1) If there is less than a context on the stack on the EH path, we will miss the fact that
+ // an exception occured
+ //
+ // 2) If the CRT uses near the size of a context before calling the destructor in the normal case,
+ // we will assume we've got an exception and ASSERT.
+ //
+ // So if we arrive at our backout code and the SP is more than the size of a context beyond the original SP,
+ // we assume we are on an EH path.
+ //
+ return (origSP - sizeof(CONTEXT)) > backoutSP;
+
+}
+
+
+DebugSOIntolerantTransitionHandlerBeginOnly::DebugSOIntolerantTransitionHandlerBeginOnly(EEThreadHandle thread)
+{
+ SCAN_SCOPE_BEGIN;
+ ANNOTATION_FN_SO_INTOLERANT;
+
+ // save the SP so that we can check if the dtor is being called with a much bigger one
+ m_ctorSP = (char *)GetCurrentSP();
+ m_clrDebugState = GetClrDebugState();
+ m_prevSOTolerantState = m_clrDebugState->BeginSOIntolerant();
+}
+
+DebugSOIntolerantTransitionHandlerBeginOnly::~DebugSOIntolerantTransitionHandlerBeginOnly()
+{
+ SCAN_SCOPE_END;
+
+ // A DebugSOIntolerantTransitionHandlerBeginOnly is instantiated only for cases where we will not see
+ // an exception. So the desctructor should never be called on an exception path. This will check if
+ // we are handling an exception and raise an assert if so.
+
+ //
+ // We need to determine if we are being called in the normal or exception path. (Sure would be
+ // nice if the CRT would tell us.) We use the stack pointer to determine this. On the normal path
+ // the stack pointer should be not far from the this pointer, whereas on the exception path it
+ // will typically be a lot higher up the stack. We will make the following assumptions:
+ //
+ // 1) on EH path the OS has to push a context onto the stack. So the SP will be increased by
+ // at least the size of a context when calling a destructor through EH path.
+ //
+ // 2) the CRT will use minimal stack space to call a destructor. This is assumed to be less
+ // than the size of a context.
+ //
+ // Caveats:
+ //
+ // 1) If there is less than a context on the stack on the EH path, we will miss the fact that
+ // an exception occured
+ //
+ // 2) If the CRT uses near the size of a context before calling the destructor in the normal case,
+ // we will assume we've got an exception and ASSERT.
+ //
+ // So if we arrive at our destructor and the SP is within the size of a context beyond the SP when
+ // we called the ctor, we assume we are on normal path.
+ if ((m_ctorSP - sizeof(CONTEXT)) > (LPVOID)GetCurrentSP())
+ {
+ _ASSERTE(!"An exception cannot leak through a SO_INTOLERANT_CODE_NOTHROW boundary");
+ }
+
+ m_clrDebugState->SetSOTolerance(m_prevSOTolerantState);
+}
+#endif // STACK_GUARDS_DEBUG
+
+#if defined(FEATURE_STACK_PROBE) && defined(_DEBUG)
+
+#undef __STACKPROBE_inl__
+
+#define INCLUDE_RETAIL_STACK_PROBE
+
+#include "stackprobe.inl"
+
+#endif // defined(FEATURE_STACK_PROBE) && defined(_DEBUG)
+
+#if 0 //FEATURE_FUSION_FAST_CLOSURE - was too buggy at the end of Dev10, not used since then. Delete it after Dev12 if it is still not fixed and used.
+
+#ifdef FEATURE_STACK_PROBE
+// This is a helper that fusion (CFastAssemblyBindingClosure) uses to
+// do an interior stack probe.
+HRESULT InteriorStackProbeNothrowCheckThread()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+
+ HRESULT hr = S_OK;
+ INTERIOR_STACK_PROBE_NOTHROW_CHECK_THREAD(hr = E_OUTOFMEMORY;);
+ END_INTERIOR_STACK_PROBE;
+
+ return hr;
+}
+#endif
+
+#endif //0 - FEATURE_FUSION_FAST_CLOSURE
diff --git a/src/vm/stackprobe.h b/src/vm/stackprobe.h
new file mode 100644
index 0000000000..7d69574ad1
--- /dev/null
+++ b/src/vm/stackprobe.h
@@ -0,0 +1,1008 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+//-----------------------------------------------------------------------------
+// Stack Probe Header
+// Used to setup stack guards
+//-----------------------------------------------------------------------------
+
+#ifndef __STACKPROBE_h__
+#define __STACKPROBE_h__
+
+//-----------------------------------------------------------------------------
+// Stack Guards.
+//
+// The idea is to force stack overflows to occur at convenient spots.
+// * Fire at RequiresNPagesStack (beggining of func) if this functions locals
+// cause overflow. Note that in a debug mode, initing the locals to garbage
+// will cause the overflow before this macro is executed.
+//
+// * Fire at CheckStack (end of func) if either our nested function calls
+// cause or use of _alloca cause the stack overflow. Note that this macro
+// is debug only, so release builds won't catch on this
+//
+// Some comments:
+// - Stack grows *down*,
+// - Ideally, all funcs would have EBP frame and we'd use EBP instead of ESP,
+// however, we use the 'this' ptr to get the stack ptr, since the guard
+// is declared on the stack.
+//
+// Comments about inlining assembly w/ Macros:
+// - Must use cstyle comments /* ... */
+// - No semi colons, need __asm keyword at the start of each line
+//-----------------------------------------------------------------------------
+
+//-----------------------------------------------------------------------------
+// *How* to use stack guards.
+//
+// See, in a CLR enlistment, src\ndp\clr\doc\OtherDevDocs\untriaged\clrdev_web\
+//
+//-----------------------------------------------------------------------------
+
+//-----------------------------------------------------------------------------
+// Stack guards have 3 compiler states:
+//#define FEATURE_STACK_PROBE
+// (All) All stack guard code is completely removed by the preprocessor if
+// not defined. This is used for CoreCLR.
+//
+//#define STACK_GUARDS_DEBUG
+// (DEBUG) Full stack guard debugging including cookies, tracking ips, and
+// chaining. More heavy weight, recommended for a debug build only
+//
+//#define STACK_GUARDS_RELEASE
+// (RELEASE) Light stack guard code. For golden builds. Forces Stack Overflow
+// to happen at "convenient" times. No debugging help.
+//-----------------------------------------------------------------------------
+
+#include "genericstackprobe.h"
+#include "utilcode.h"
+
+/* defining VM_NO_SO_INFRASTRUCTURE_CODE for VM code
+ * This macro can be used to have code which will be present
+ * only for code inside VM directory when SO infrastructure code is not built.
+ * Eg. Currently it is used in macro EX_END_HOOK.
+ * For VM code EX_HOOK calls CLREXception::HandleState::SetupCatch().
+ * When Stack guards are disabled we will tear down the process in
+ * CLREXception::HandleState::SetupCatch() if there is a StackOverflow.
+ * So we should not reach EX_END_HOOK when there is StackOverflow.
+ * This change cannot be done for all other code because
+ * CLREXception::HandleState::SetupCatch() is not called rather
+ * EXception::HandleState::SetupCatch() is called which is a nop.
+ */
+
+#ifndef FEATURE_STACK_PROBE
+#undef VM_NO_SO_INFRASTRUCTURE_CODE
+#define VM_NO_SO_INFRASTRUCTURE_CODE(x) x
+#endif
+
+
+#ifdef FEATURE_STACK_PROBE
+
+#define DEFAULT_INTERIOR_PROBE_AMOUNT 4
+
+#define MINIMUM_STACK_REQUIREMENT (0.25)
+
+BOOL IsBackoutCalledForEH(BYTE *origSP, BYTE *backoutSP);
+
+//=============================================================================
+// Common code
+//=============================================================================
+// Release version of the probe function
+BOOL RetailStackProbeNoThrow(unsigned int n, Thread *pThread);
+BOOL RetailStackProbeNoThrowWorker(unsigned int n, Thread *pThread);
+void RetailStackProbe(unsigned int n, Thread *pThread);
+void RetailStackProbeWorker(unsigned int n, Thread *pThread);
+void ReportStackOverflow();
+
+// Retail stack probe with default amount is the most common stack probe. Create
+// a dedicated method for it to reduce code size.
+void DefaultRetailStackProbeWorker(Thread * pThread);
+
+void RetailStackProbe(unsigned int n);
+
+BOOL ShouldProbeOnThisThread();
+
+int SOTolerantBoundaryFilter(EXCEPTION_POINTERS *pExceptionInfo, DWORD * pdwSOTolerantFlags);
+void SOTolerantCode_RecoverStack(DWORD dwFlags);
+void SOTolerantCode_ExceptBody(DWORD * pdwFlags, Frame * pSafeForSOFrame);
+
+#endif
+
+#if defined(FEATURE_STACK_PROBE) && !defined(DACCESS_COMPILE)
+
+inline bool IsStackProbingEnabled()
+{
+ LIMITED_METHOD_CONTRACT;
+ return g_StackProbingEnabled;
+}
+
+//=============================================================================
+// DEBUG
+//=============================================================================
+#if defined(STACK_GUARDS_DEBUG)
+
+#include "common.h"
+
+class BaseStackGuard;
+
+//-----------------------------------------------------------------------------
+// Need to chain together stack guard address for nested functions
+// Use a TLS slot to store the head of the chain
+//-----------------------------------------------------------------------------
+extern DWORD g_CurrentStackGuardTlsIdx;
+
+//-----------------------------------------------------------------------------
+// Class
+//-----------------------------------------------------------------------------
+
+// Base version - has no ctor/dtor, so we can use it with SEH
+//
+// *** Don't declare any members here. Put them in BaseStackGuardGeneric.
+// We downcast directly from the base to the derived, using the knowledge
+// that the base class and the derived class are identical for members.
+//
+class BaseStackGuard : public BaseStackGuardGeneric
+{
+protected:
+ BaseStackGuard()
+ {
+ _ASSERTE(!"No default construction allowed");
+ }
+
+public:
+ BaseStackGuard(const char *szFunction, const char *szFile, unsigned int lineNum) :
+ BaseStackGuardGeneric(szFunction, szFile, lineNum)
+ {
+ STATIC_CONTRACT_LEAF;
+ }
+
+ UINT_PTR *Marker() { return m_pMarker; }
+
+ unsigned int Depth() { return m_depth; }
+
+ const char *FunctionName() { return m_szFunction; }
+
+ BOOL IsProbeGuard()
+ {
+ return (m_isBoundaryGuard == FALSE);
+ }
+
+ BOOL IsBoundaryGuard()
+ {
+ return (m_isBoundaryGuard == TRUE);
+ }
+
+ inline BOOL ShouldCheckPreviousCookieIntegrity();
+ inline BOOL ShouldCheckThisCookieIntegrity();
+
+ BOOL RequiresNStackPages(unsigned int n, BOOL fThrowOnSO = TRUE);
+ BOOL RequiresNStackPagesThrowing(unsigned int n);
+ BOOL RequiresNStackPagesNoThrow(unsigned int n);
+private:
+ BOOL RequiresNStackPagesInternal(unsigned int n, BOOL fThrowOnSO = TRUE);
+public:
+ BOOL DoProbe(unsigned int n, BOOL fThrowOnSO);
+ void CheckStack();
+
+ static void RestoreCurrentGuard(BOOL fWasDisabled = FALSE);
+ void PopGuardForEH();
+
+ // Different error messages for the different times we detemine there's a problem.
+ void HandleOverwrittenThisStackGuard(__in_z char *stackID);
+ void HandleOverwrittenPreviousStackGuard(int shortFall, __in_z char *stackID);
+ void HandleOverwrittenCurrentStackGuard(int shortFall, __in_z char *stackID);
+ static void HandleOverwrittenCurrentStackGuard(void *pGuard, int shortFall, __in_z char *stackID);
+
+ void CheckMarkerIntegrity();
+ void RestorePreviousGuard();
+ void ProtectMarkerPageInDebugger();
+ void UndoPageProtectionInDebugger();
+ static void ProtectMarkerPageInDebugger(void *pGuard);
+ static void UndoPageProtectionInDebugger(void *pGuard);
+
+ inline HRESULT PrepGuard()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // See if it has already been prepped...
+ if (ClrFlsGetValue(g_CurrentStackGuardTlsIdx) != NULL)
+ return S_OK;
+
+ // Let's see if we'll be able to put in a guard page
+ ClrFlsSetValue(g_CurrentStackGuardTlsIdx,
+(void*)-1);
+
+ if (ClrFlsGetValue(g_CurrentStackGuardTlsIdx) != (void*)-1)
+ return E_OUTOFMEMORY;
+
+ return S_OK;
+
+ }
+
+ inline static BaseStackGuard* GetCurrentGuard()
+ {
+ WRAPPER_NO_CONTRACT;
+ if (g_CurrentStackGuardTlsIdx != -1)
+ return (BaseStackGuard*) ClrFlsGetValue(g_CurrentStackGuardTlsIdx);
+ else
+ return NULL;
+ }
+
+ inline static BOOL IsGuard(BaseStackGuard *probe)
+ {
+ return (probe != NULL);
+ }
+ static void SetCurrentGuard(BaseStackGuard* pGuard);
+ static void ResetCurrentGuard(BaseStackGuard* pGuard);
+
+ inline static BOOL IsProbeGuard(BaseStackGuard *probe)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (IsGuard(probe) != NULL && probe->IsProbeGuard());
+ }
+
+ inline static BOOL IsBoundaryGuard(BaseStackGuard *probe)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (IsGuard(probe) != NULL && probe->IsBoundaryGuard());
+ }
+
+ static void InitProbeReportingToFaultInjectionFramework();
+ BOOL ReportProbeToFaultInjectionFramework();
+
+ static void Terminate();
+
+
+ static HMODULE m_hProbeCallBack;
+ typedef BOOL (*ProbeCallbackType)(unsigned, const char *);
+ static ProbeCallbackType m_pfnProbeCallback;
+
+};
+
+
+// Derived version, add a dtor that automatically calls Check_Stack, move convenient, but can't use with SEH.
+class AutoCleanupStackGuard : public BaseStackGuard
+{
+protected:
+ AutoCleanupStackGuard()
+ {
+ _ASSERTE(!"No default construction allowed");
+ }
+
+public:
+ DEBUG_NOINLINE AutoCleanupStackGuard(const char *szFunction, const char *szFile, unsigned int lineNum) :
+ BaseStackGuard(szFunction, szFile, lineNum)
+ {
+ SCAN_SCOPE_BEGIN;
+ // This CANNOT be a STATIC_CONTRACT_SO_INTOLERANT b/c that isn't
+ // really just a static contract, it is actually calls EnsureSOIntolerantOK
+ // as well. Instead we just use the annotation.
+ ANNOTATION_FN_SO_INTOLERANT;
+ }
+
+ DEBUG_NOINLINE ~AutoCleanupStackGuard()
+ {
+ SCAN_SCOPE_END;
+ CheckStack();
+ }
+};
+
+class DebugSOIntolerantTransitionHandlerBeginOnly
+{
+ BOOL m_prevSOTolerantState;
+ ClrDebugState* m_clrDebugState;
+ char *m_ctorSP;
+
+ public:
+ DEBUG_NOINLINE DebugSOIntolerantTransitionHandlerBeginOnly(EEThreadHandle thread);
+ DEBUG_NOINLINE ~DebugSOIntolerantTransitionHandlerBeginOnly();
+};
+
+
+
+extern DWORD g_InteriorProbeAmount;
+
+//=============================================================================
+// Macros for transition into SO_INTOLERANT code
+//=============================================================================
+
+FORCEINLINE DWORD DefaultEntryProbeAmount() { return g_EntryPointProbeAmount; }
+
+#define BEGIN_SO_INTOLERANT_CODE(pThread) \
+ BEGIN_SO_INTOLERANT_CODE_FOR(pThread, g_EntryPointProbeAmount) \
+
+#define BEGIN_SO_INTOLERANT_CODE_FOR(pThread, n) \
+ { \
+ /*_ASSERTE(pThread); */ \
+ AutoCleanupStackGuard stack_guard_XXX(__FUNCTION__, __FILE__, __LINE__); \
+ stack_guard_XXX.RequiresNStackPagesThrowing(ADJUST_PROBE(n)); \
+ /* work around unreachable code warning */ \
+ if (true) \
+ { \
+ DebugSOIntolerantTransitionHandler __soIntolerantTransitionHandler; \
+ ANNOTATION_SO_PROBE_BEGIN(DEFAULT_ENTRY_PROBE_AMOUNT); \
+ /* work around unreachable code warning */ \
+ if (true) \
+ { \
+ DEBUG_ASSURE_NO_RETURN_BEGIN(SO_INTOLERANT)
+
+#define BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, ActionOnSO) \
+ { \
+ /*_ASSERTE(pThread || IsGCSpecialThread());*/ \
+ AutoCleanupStackGuard stack_guard_XXX(__FUNCTION__, __FILE__, __LINE__); \
+ if (! stack_guard_XXX.RequiresNStackPagesNoThrow(ADJUST_PROBE(g_EntryPointProbeAmount)))\
+ { \
+ stack_guard_XXX.SetNoException(); \
+ ActionOnSO; \
+ } \
+ /* work around unreachable code warning */ \
+ else \
+ { \
+ DebugSOIntolerantTransitionHandler __soIntolerantTransitionHandler; \
+ ANNOTATION_SO_PROBE_BEGIN(DEFAULT_ENTRY_PROBE_AMOUNT); \
+ /* work around unreachable code warning */ \
+ if (true) \
+ { \
+ DEBUG_ASSURE_NO_RETURN_BEGIN(SO_INTOLERANT)
+
+
+// This is defined just for using in the InternalSetupForComCall macro which
+// doesn't have a corresponding end macro because no exception will pass through it
+// It should not be used in any situation where an exception could pass through
+// the transition.
+#define SO_INTOLERANT_CODE_NOTHROW(pThread, ActionOnSO) \
+ AutoCleanupStackGuard stack_guard_XXX(__FUNCTION__, __FILE__, __LINE__); \
+ if (! stack_guard_XXX.RequiresNStackPagesNoThrow(ADJUST_PROBE(g_EntryPointProbeAmount)))\
+ { \
+ ActionOnSO; \
+ } \
+ stack_guard_XXX.SetNoException(); \
+ DebugSOIntolerantTransitionHandlerBeginOnly __soIntolerantTransitionHandler(pThread); \
+ ANNOTATION_SO_PROBE_BEGIN(DEFAULT_ENTRY_PROBE_AMOUNT);
+
+
+// For some codepaths used during the handling of an SO, we need to guarantee a
+// minimal stack consumption to avoid an SO on that codepath. These are typically host
+// APIS such as allocation. The host is going to use < 1/4 page, so make sure
+// we have that amount before calling. Then use the BACKOUT_VALIDATION to ensure
+// that we don't overrun it. We call ReportStackOverflow, which will generate a hard
+// SO if we have less than a page left.
+
+#define MINIMAL_STACK_PROBE_CHECK_THREAD(pThread) \
+ if (IsStackProbingEnabled()) \
+ { \
+ Thread *__pThread = pThread; \
+ if (__pThread && ! __pThread->IsStackSpaceAvailable(MINIMUM_STACK_REQUIREMENT)) \
+ { \
+ ReportStackOverflow(); \
+ } \
+ } \
+ CONTRACT_VIOLATION(SOToleranceViolation);
+
+// We don't use the DebugSOIntolerantTransitionHandler here because we don't need to transition into
+// SO-intolerant code. We're already there. We also don't need to annotate as having probed,
+// because this only matters for entry point functions.
+// We have a way to separate the declaration from the actual probing for cases where need
+// to do a test, such as IsGCThread(), to decide if should probe.
+#define DECLARE_INTERIOR_STACK_PROBE \
+ { \
+ AutoCleanupStackGuard stack_guard_XXX(__FUNCTION__, __FILE__, __LINE__);\
+ DEBUG_ASSURE_NO_RETURN_BEGIN(STACK_PROBE)
+
+
+// A function containing an interior probe is implicilty SO-Intolerant because we
+// assume that it is not behind a probe. So confirm that we are in the correct state.
+#define DO_INTERIOR_STACK_PROBE_FOR(pThread, n) \
+ _ASSERTE(pThread != NULL); \
+ stack_guard_XXX.RequiresNStackPagesThrowing(ADJUST_PROBE(n)); \
+ EnsureSOIntolerantOK(__FUNCTION__, __FILE__, __LINE__);
+
+#define DO_INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(n) \
+ if (ShouldProbeOnThisThread()) \
+ { \
+ DO_INTERIOR_STACK_PROBE_FOR(GetThread(), g_InteriorProbeAmount); \
+ }
+
+// A function containing an interior probe is implicilty SO-Intolerant because we
+// assume that it is not behind a probe. So confirm that we are in the correct state.
+#define DO_INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, n, actionOnSO) \
+ _ASSERTE(pThread != NULL); \
+ if (! stack_guard_XXX.RequiresNStackPagesNoThrow(ADJUST_PROBE(n))) \
+ { \
+ stack_guard_XXX.SetNoException(); \
+ actionOnSO; \
+ } \
+ EnsureSOIntolerantOK(__FUNCTION__, __FILE__, __LINE__);
+
+#define DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(n, actionOnSO) \
+ if (ShouldProbeOnThisThread()) \
+ { \
+ DO_INTERIOR_STACK_PROBE_FOR_NOTHROW(GetThread(), n, actionOnSO); \
+ }
+
+
+#define INTERIOR_STACK_PROBE_FOR(pThread, n) \
+ DECLARE_INTERIOR_STACK_PROBE; \
+ DO_INTERIOR_STACK_PROBE_FOR(pThread, n)
+
+#define INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(n) \
+ DECLARE_INTERIOR_STACK_PROBE; \
+ DO_INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(n)
+
+#define INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, n, ActionOnSO) \
+ DECLARE_INTERIOR_STACK_PROBE; \
+ DO_INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, n, ActionOnSO)
+
+#define INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(n, ActionOnSO) \
+ DECLARE_INTERIOR_STACK_PROBE; \
+ DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(n, ActionOnSO)
+
+
+#define INTERIOR_STACK_PROBE(pThread) \
+ INTERIOR_STACK_PROBE_FOR(pThread, g_InteriorProbeAmount)
+
+#define INTERIOR_STACK_PROBE_CHECK_THREAD \
+ INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(g_InteriorProbeAmount)
+
+#define INTERIOR_STACK_PROBE_NOTHROW(pThread, ActionOnSO) \
+ INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, g_InteriorProbeAmount, ActionOnSO)
+
+#define INTERIOR_STACK_PROBE_NOTHROW_CHECK_THREAD(ActionOnSO) \
+ INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(g_InteriorProbeAmount, ActionOnSO)
+
+
+#define END_INTERIOR_STACK_PROBE \
+ DEBUG_ASSURE_NO_RETURN_END(STACK_PROBE) \
+ stack_guard_XXX.SetNoException(); \
+ }
+
+#define RETURN_FROM_INTERIOR_PROBE(x) \
+ DEBUG_OK_TO_RETURN_BEGIN(STACK_PROBE) \
+ stack_guard_XXX.SetNoException(); \
+ RETURN(x); \
+ DEBUG_OK_TO_RETURN_END(STACK_PROBE)
+
+
+// This is used for EH code where we are about to throw.
+// To avoid taking an SO during EH processing, want to include it in our probe limits
+// So we will just do a big probe and then throw.
+#define STACK_PROBE_FOR_THROW(pThread) \
+ AutoCleanupStackGuard stack_guard_XXX(__FUNCTION__, __FILE__, __LINE__); \
+ if (pThread != NULL) \
+ { \
+ DO_INTERIOR_STACK_PROBE_FOR(pThread, ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT)); \
+ }
+
+// This is used for throws where we cannot use a dtor-based probe.
+#define PUSH_STACK_PROBE_FOR_THROW(pThread) \
+ BaseStackGuard stack_guard_XXX(__FUNCTION__, __FILE__, __LINE__); \
+ stack_guard_XXX.RequiresNStackPagesThrowing(ADJUST_PROBE(g_EntryPointProbeAmount));
+
+#define SAVE_ADDRESS_OF_STACK_PROBE_FOR_THROW(pGuard) \
+ pGuard = &stack_guard_XXX;
+
+#define RESET_EXCEPTION_FROM_STACK_PROBE_FOR_THROW(pGuard) \
+ pGuard->SetNoException ();
+
+#define POP_STACK_PROBE_FOR_THROW(pGuard) \
+ pGuard->CheckStack();
+
+//=============================================================================
+// Macros for transition into SO_TOLERANT code
+//=============================================================================
+// @todo : put this assert in when all probes are in place.
+// _ASSERTE(! pThread->IsSOTolerant());
+
+//*********************************************************************************
+
+// A boundary stack guard is pushed onto the probe stack when we leave the EE and
+// popped when we return. It is used for 1) restoring the original probe's cookie
+// when we return, as managed code could trash it and 2) marking a boundary so that
+// we know not to check for over-written probes before it when install a real probe.
+//
+class BoundaryStackGuard : public BaseStackGuard
+{
+protected:
+ BoundaryStackGuard()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!"No default construction allowed");
+ }
+
+public:
+ DEBUG_NOINLINE BoundaryStackGuard(const char *szFunction, const char *szFile, unsigned int lineNum)
+ : BaseStackGuard(szFunction, szFile, lineNum)
+ {
+ SCAN_SCOPE_BEGIN;
+ ANNOTATION_FN_SO_TOLERANT;
+
+ m_isBoundaryGuard = TRUE;
+ }
+
+ DEBUG_NOINLINE void Push();
+ DEBUG_NOINLINE void Pop();
+
+ DEBUG_NOINLINE void SetNoExceptionNoPop()
+ {
+ SCAN_SCOPE_END;
+ SetNoException();
+ }
+
+};
+
+// Derived version, add a dtor that automatically calls Pop, more convenient, but can't use with SEH.
+class AutoCleanupBoundaryStackGuard : public BoundaryStackGuard
+{
+protected:
+ AutoCleanupBoundaryStackGuard()
+ {
+ _ASSERTE(!"No default construction allowed");
+ }
+
+public:
+ DEBUG_NOINLINE AutoCleanupBoundaryStackGuard(const char *szFunction, const char *szFile, unsigned int lineNum) :
+ BoundaryStackGuard(szFunction, szFile, lineNum)
+ {
+ SCAN_SCOPE_BEGIN;
+ ANNOTATION_FN_SO_TOLERANT;
+ }
+
+ DEBUG_NOINLINE ~AutoCleanupBoundaryStackGuard()
+ {
+ SCAN_SCOPE_END;
+ Pop();
+ }
+};
+
+
+class DebugSOTolerantTransitionHandler
+{
+ BOOL m_prevSOTolerantState;
+ ClrDebugState* m_clrDebugState;
+
+ public:
+ void EnterSOTolerantCode(Thread *pThread);
+ void ReturnFromSOTolerantCode();
+};
+
+class AutoCleanupDebugSOTolerantTransitionHandler : DebugSOTolerantTransitionHandler
+{
+ BOOL m_prevSOTolerantState;
+ ClrDebugState* m_clrDebugState;
+
+ public:
+ DEBUG_NOINLINE AutoCleanupDebugSOTolerantTransitionHandler(Thread *pThread)
+ {
+ SCAN_SCOPE_BEGIN;
+ ANNOTATION_FN_SO_INTOLERANT;
+
+ EnterSOTolerantCode(pThread);
+ }
+ DEBUG_NOINLINE ~AutoCleanupDebugSOTolerantTransitionHandler()
+ {
+ SCAN_SCOPE_END;
+
+ ReturnFromSOTolerantCode();
+ }
+};
+
+
+// When we enter SO-tolerant code, we
+// 1) probe to make sure that we will have enough stack to run our backout code. We don't
+// need to check that the cookie was overrun because we only care that we had enough stack.
+// But we do anyway, to pop off the guard.s
+// The backout code infrastcture ensures that we stay below the BACKOUT_CODE_STACK_LIMIT.
+// 2) Install a boundary guard, which will preserve our cookie and prevent spurious checks if
+// we call back into the EE.
+// 3) Formally transition into SO-tolerant code so that we can make sure we are probing if we call
+// back into the EE.
+//
+
+#undef OPTIONAL_SO_CLEANUP_UNWIND
+#define OPTIONAL_SO_CLEANUP_UNWIND(pThread, pFrame)
+
+#define BSTC_RECOVER_STACK 0x1
+#define BSTC_IS_SO 0x2
+#define BSTC_IS_SOFT_SO 0x4
+#define BSTC_TRIGGERING_UNWIND_FOR_SO 0x8
+
+#define BEGIN_SO_TOLERANT_CODE(pThread) \
+ { /* add an outer scope so that we'll restore our state as soon as we return */ \
+ Thread * const __pThread = pThread; \
+ DWORD __dwFlags = 0; \
+ Frame * __pSafeForSOFrame = __pThread ? __pThread->GetFrame() : NULL; \
+ SCAN_BLOCKMARKER(); \
+ SCAN_BLOCKMARKER_MARK(); \
+ BoundaryStackGuard boundary_guard_XXX(__FUNCTION__, __FILE__, __LINE__); \
+ boundary_guard_XXX.Push(); \
+ DebugSOTolerantTransitionHandler __soTolerantTransitionHandler; \
+ __soTolerantTransitionHandler.EnterSOTolerantCode(__pThread); \
+ __try \
+ { \
+ SCAN_EHMARKER(); \
+ __try \
+ { \
+ SCAN_EHMARKER_TRY(); \
+ DEBUG_ASSURE_NO_RETURN_BEGIN(STACK_PROBE) \
+ __try \
+ {
+
+
+// We need to catch any hard SO that comes through in order to get our stack back and make sure that we can run our backout code.
+// Also can't allow a hard SO to propogate into SO-intolerant code, as we can't tell where it came from and would have to rip the process.
+// So install a filter and catch hard SO and rethrow a C++ SO. Note that we don't check the host policy here it only applies to exceptions
+// that will leak back into managed code.
+#define END_SO_TOLERANT_CODE \
+ } \
+ __finally \
+ { \
+ STATIC_CONTRACT_SO_TOLERANT; \
+ if (__dwFlags & BSTC_TRIGGERING_UNWIND_FOR_SO) \
+ { \
+ OPTIONAL_SO_CLEANUP_UNWIND(__pThread, __pSafeForSOFrame) \
+ } \
+ } \
+ DEBUG_ASSURE_NO_RETURN_END(STACK_PROBE) \
+ boundary_guard_XXX.SetNoException(); \
+ SCAN_EHMARKER_END_TRY(); \
+ } \
+ __except(SOTolerantBoundaryFilter(GetExceptionInformation(), &__dwFlags)) \
+ { \
+ SCAN_EHMARKER_CATCH(); \
+ __soTolerantTransitionHandler.ReturnFromSOTolerantCode(); \
+ SOTolerantCode_ExceptBody(&__dwFlags, __pSafeForSOFrame); \
+ SCAN_EHMARKER_END_CATCH(); \
+ } \
+ /* This will correctly set the annotation back to SOIntolerant if needed */ \
+ SCAN_BLOCKMARKER_USE(); \
+ if (__dwFlags & BSTC_RECOVER_STACK) \
+ { \
+ SOTolerantCode_RecoverStack(__dwFlags); \
+ } \
+ } \
+ __finally \
+ { \
+ __soTolerantTransitionHandler.ReturnFromSOTolerantCode(); \
+ boundary_guard_XXX.Pop(); \
+ } \
+ /* This is actually attached to the SCAN_BLOCKMARKER_USE() in the try scope */ \
+ /* but should hopefully chain the right annotations for a call to a __finally */ \
+ SCAN_BLOCKMARKER_END_USE(); \
+ }
+
+extern unsigned __int64 getTimeStamp();
+
+INDEBUG(void AddHostCallsStaticMarker();)
+
+// This is used for calling into host
+// We only need to install the boundary guard, and transition into SO-tolerant code.
+#define BEGIN_SO_TOLERANT_CODE_CALLING_HOST(pThread) \
+ { \
+ ULONGLONG __entryTime = 0; \
+ __int64 __entryTimeStamp = 0; \
+ if (CLRTaskHosted()) \
+ { \
+ __entryTimeStamp = getTimeStamp(); \
+ __entryTime = CLRGetTickCount64(); \
+ } \
+ _ASSERTE(CanThisThreadCallIntoHost()); \
+ _ASSERTE((pThread == NULL) || \
+ (pThread->GetClrDebugState() == NULL) || \
+ ((pThread->GetClrDebugState()->ViolationMask() & \
+ (HostViolation|BadDebugState)) != 0) || \
+ (pThread->GetClrDebugState()->IsHostCaller())); \
+ INDEBUG(AddHostCallsStaticMarker();) \
+ _ASSERTE(pThread == NULL || !pThread->IsInForbidSuspendRegion()); \
+ { \
+ AutoCleanupBoundaryStackGuard boundary_guard_XXX(__FUNCTION__, __FILE__, __LINE__); \
+ boundary_guard_XXX.Push(); \
+ AutoCleanupDebugSOTolerantTransitionHandler __soTolerantTransitionHandler(pThread); \
+ DEBUG_ASSURE_NO_RETURN_BEGIN(STACK_PROBE); \
+
+#define END_SO_TOLERANT_CODE_CALLING_HOST \
+ DEBUG_ASSURE_NO_RETURN_END(STACK_PROBE) \
+ boundary_guard_XXX.SetNoExceptionNoPop(); \
+ } \
+ if (CLRTaskHosted()) \
+ { \
+ ULONGLONG __endTime = CLRGetTickCount64(); \
+ ULONGLONG __elapse = __endTime - __entryTime; \
+ if (__elapse > 20000 && __entryTimeStamp) \
+ { \
+ STRESS_LOG4(LF_EH, LL_INFO10, \
+ "CALLING HOST takes %d ms: line %d in %s(%s)\n", \
+ (int)__elapse, __LINE__, __FUNCTION__, __FILE__); \
+ } \
+ } \
+ }
+
+//-----------------------------------------------------------------------------
+// Startup & Shutdown stack guard subsystem
+//-----------------------------------------------------------------------------
+void InitStackProbes();
+void TerminateStackProbes();
+
+#elif defined(STACK_GUARDS_RELEASE)
+//=============================================================================
+// Release - really streamlined,
+//=============================================================================
+
+void InitStackProbesRetail();
+inline void InitStackProbes()
+{
+ InitStackProbesRetail();
+}
+
+inline void TerminateStackProbes()
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+
+//=============================================================================
+// Macros for transition into SO_INTOLERANT code
+//=============================================================================
+
+FORCEINLINE DWORD DefaultEntryProbeAmount() { return DEFAULT_ENTRY_PROBE_AMOUNT; }
+
+#define BEGIN_SO_INTOLERANT_CODE(pThread) \
+{ \
+ if (IsStackProbingEnabled()) DefaultRetailStackProbeWorker(pThread); \
+ /* match with the else used in other macros */ \
+ if (true) { \
+ SOIntolerantTransitionHandler __soIntolerantTransitionHandler; \
+ /* work around unreachable code warning */ \
+ if (true) { \
+ DEBUG_ASSURE_NO_RETURN_BEGIN(SO_INTOLERANT)
+
+#define BEGIN_SO_INTOLERANT_CODE_FOR(pThread, n) \
+{ \
+ if (IsStackProbingEnabled()) RetailStackProbeWorker(ADJUST_PROBE(n), pThread); \
+ /* match with the else used in other macros */ \
+ if (true) { \
+ SOIntolerantTransitionHandler __soIntolerantTransitionHandler; \
+ /* work around unreachable code warning */ \
+ if (true) { \
+ DEBUG_ASSURE_NO_RETURN_BEGIN(SO_INTOLERANT)
+
+#define BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, ActionOnSO) \
+{ \
+ if (IsStackProbingEnabled() && !RetailStackProbeNoThrowWorker(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), pThread)) \
+ { \
+ ActionOnSO; \
+ } else { \
+ SOIntolerantTransitionHandler __soIntolerantTransitionHandler; \
+ /* work around unreachable code warning */ \
+ if (true) { \
+ DEBUG_ASSURE_NO_RETURN_BEGIN(SO_INTOLERANT)
+
+
+// This is defined just for using in the InternalSetupForComCall macro which
+// doesn't have a corresponding end macro because no exception will pass through it
+// It should not be used in any situation where an exception could pass through
+// the transition.
+#define SO_INTOLERANT_CODE_NOTHROW(pThread, ActionOnSO) \
+ if (IsStackProbingEnabled() && !RetailStackProbeNoThrowWorker(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), pThread)) \
+ { \
+ ActionOnSO; \
+ } \
+
+#define MINIMAL_STACK_PROBE_CHECK_THREAD(pThread) \
+ if (IsStackProbingEnabled()) \
+ { \
+ Thread *__pThread = pThread; \
+ if (__pThread && ! __pThread->IsStackSpaceAvailable(MINIMUM_STACK_REQUIREMENT)) \
+ { \
+ ReportStackOverflow(); \
+ } \
+ }
+
+#define DECLARE_INTERIOR_STACK_PROBE
+
+
+#define DO_INTERIOR_STACK_PROBE_FOR(pThread, n) \
+ if (IsStackProbingEnabled()) \
+ { \
+ RetailStackProbeWorker(ADJUST_PROBE(n), pThread); \
+ }
+
+#define DO_INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(n) \
+ if (IsStackProbingEnabled() && ShouldProbeOnThisThread()) \
+ { \
+ RetailStackProbeWorker(ADJUST_PROBE(n), GetThread()); \
+ }
+
+#define DO_INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, n, ActionOnSO) \
+ if (IsStackProbingEnabled()) \
+ { \
+ if (!RetailStackProbeNoThrowWorker(ADJUST_PROBE(n), pThread)) \
+ { \
+ ActionOnSO; \
+ } \
+ }
+
+#define DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(n, ActionOnSO) \
+ if (IsStackProbingEnabled() && ShouldProbeOnThisThread()) \
+ { \
+ if (!RetailStackProbeNoThrowWorker(ADJUST_PROBE(n), GetThread())) \
+ { \
+ ActionOnSO; \
+ } \
+ }
+
+
+#define INTERIOR_STACK_PROBE_FOR(pThread, n) \
+ DECLARE_INTERIOR_STACK_PROBE; \
+ DO_INTERIOR_STACK_PROBE_FOR(pThread, n)
+
+#define INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(n) \
+ DECLARE_INTERIOR_STACK_PROBE; \
+ DO_INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(n)
+
+#define INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, n, ActionOnSO) \
+ DECLARE_INTERIOR_STACK_PROBE; \
+ DO_INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, n, ActionOnSO)
+
+#define INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(n, ActionOnSO) \
+ DECLARE_INTERIOR_STACK_PROBE; \
+ DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(n, ActionOnSO)
+
+
+#define INTERIOR_STACK_PROBE(pThread) \
+ INTERIOR_STACK_PROBE_FOR(pThread, DEFAULT_INTERIOR_PROBE_AMOUNT)
+
+#define INTERIOR_STACK_PROBE_CHECK_THREAD \
+ INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(DEFAULT_INTERIOR_PROBE_AMOUNT)
+
+#define INTERIOR_STACK_PROBE_NOTHROW(pThread, ActionOnSO) \
+ INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, DEFAULT_INTERIOR_PROBE_AMOUNT, ActionOnSO)
+
+#define INTERIOR_STACK_PROBE_NOTHROW_CHECK_THREAD(ActionOnSO) \
+ INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(DEFAULT_INTERIOR_PROBE_AMOUNT, ActionOnSO)
+
+
+#define END_INTERIOR_STACK_PROBE
+
+#define RETURN_FROM_INTERIOR_PROBE(x) RETURN(x)
+
+
+// This is used for EH code where we are about to throw
+// To avoid taking an SO during EH processing, want to include it in our probe limits
+// So we will just do a big probe and then throw.
+#define STACK_PROBE_FOR_THROW(pThread) \
+ if (pThread != NULL) \
+ { \
+ RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), pThread); \
+ } \
+
+#define PUSH_STACK_PROBE_FOR_THROW(pThread) \
+ RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), pThread);
+
+#define SAVE_ADDRESS_OF_STACK_PROBE_FOR_THROW(pGuard)
+
+#define POP_STACK_PROBE_FOR_THROW(pGuard)
+
+
+//=============================================================================
+// Macros for transition into SO_TOLERANT code
+//=============================================================================
+
+#undef OPTIONAL_SO_CLEANUP_UNWIND
+#define OPTIONAL_SO_CLEANUP_UNWIND(pThread, pFrame)
+
+#define BSTC_RECOVER_STACK 0x1
+#define BSTC_IS_SO 0x2
+#define BSTC_IS_SOFT_SO 0x4
+#define BSTC_TRIGGERING_UNWIND_FOR_SO 0x8
+
+
+#define BEGIN_SO_TOLERANT_CODE(pThread) \
+{ \
+ Thread * __pThread = pThread; \
+ DWORD __dwFlags = 0; \
+ Frame * __pSafeForSOFrame = __pThread ? __pThread->GetFrame() : NULL; \
+ SCAN_BLOCKMARKER(); \
+ SCAN_BLOCKMARKER_MARK(); \
+ SCAN_EHMARKER(); \
+ __try \
+ { \
+ SCAN_EHMARKER_TRY() \
+ __try \
+ {
+
+// We need to catch any hard SO that comes through in order to get our stack back and make sure that we can run our backout code.
+// Also can't allow a hard SO to propogate into SO-intolerant code, as we can't tell where it came from and would have to rip the process.
+// So install a filter and catch hard SO and rethrow a C++ SO.
+#define END_SO_TOLERANT_CODE \
+ } \
+ __finally \
+ { \
+ STATIC_CONTRACT_SO_TOLERANT; \
+ if (__dwFlags & BSTC_TRIGGERING_UNWIND_FOR_SO) \
+ { \
+ OPTIONAL_SO_CLEANUP_UNWIND(__pThread, __pSafeForSOFrame) \
+ } \
+ } \
+ SCAN_EHMARKER_END_TRY(); \
+ } \
+ __except(SOTolerantBoundaryFilter(GetExceptionInformation(), &__dwFlags)) \
+ { \
+ SCAN_EHMARKER_CATCH(); \
+ SOTolerantCode_ExceptBody(&__dwFlags, __pSafeForSOFrame); \
+ SCAN_EHMARKER_END_CATCH(); \
+ } \
+ SCAN_BLOCKMARKER_USE(); \
+ if (__dwFlags & BSTC_RECOVER_STACK) \
+ { \
+ SOTolerantCode_RecoverStack(__dwFlags); \
+ } \
+ SCAN_BLOCKMARKER_END_USE(); \
+}
+
+#define BEGIN_SO_TOLERANT_CODE_CALLING_HOST(pThread) \
+ { \
+
+#define END_SO_TOLERANT_CODE_CALLING_HOST \
+ }
+
+#endif
+
+#else // FEATURE_STACK_PROBE && !DACCESS_COMPILE
+
+inline void InitStackProbes()
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+inline void TerminateStackProbes()
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+#define BEGIN_SO_INTOLERANT_CODE(pThread)
+#define BEGIN_SO_INTOLERANT_CODE_FOR(pThread, n)
+#define BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, ActionOnSO)
+#define SO_INTOLERANT_CODE_NOTHROW(pThread, ActionOnSO)
+#define MINIMAL_STACK_PROBE_CHECK_THREAD(pThread)
+
+#define DECLARE_INTERIOR_STACK_PROBE
+
+#define DO_INTERIOR_STACK_PROBE_FOR(pThread, n)
+#define DO_INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(n)
+#define DO_INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, n, ActionOnSO)
+#define DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(n, ActionOnSO)
+
+#define INTERIOR_STACK_PROBE_FOR(pThread, n)
+#define INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(n)
+#define INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, n, ActionOnSO)
+#define INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(n, ActionOnSO)
+
+#define INTERIOR_STACK_PROBE(pThread)
+#define INTERIOR_STACK_PROBE_CHECK_THREAD
+#define INTERIOR_STACK_PROBE_NOTHROW(pThread, ActionOnSO)
+#define INTERIOR_STACK_PROBE_NOTHROW_CHECK_THREAD(ActionOnSO)
+
+#define END_INTERIOR_STACK_PROBE
+#define RETURN_FROM_INTERIOR_PROBE(x) RETURN(x)
+
+#define STACK_PROBE_FOR_THROW(pThread)
+#define PUSH_STACK_PROBE_FOR_THROW(pThread)
+#define SAVE_ADDRESS_OF_STACK_PROBE_FOR_THROW(pGuard)
+#define POP_STACK_PROBE_FOR_THROW(pGuard)
+
+#define BEGIN_SO_TOLERANT_CODE(pThread)
+#define END_SO_TOLERANT_CODE
+#define RETURN_FROM_SO_TOLERANT_CODE_HAS_CATCH
+#define BEGIN_SO_TOLERANT_CODE_CALLING_HOST(pThread) \
+ _ASSERTE(CanThisThreadCallIntoHost());
+
+#define END_SO_TOLERANT_CODE_CALLING_HOST
+
+#endif // FEATURE_STACK_PROBE && !DACCESS_COMPILE
+
+#endif // __STACKPROBE_h__
diff --git a/src/vm/stackprobe.inl b/src/vm/stackprobe.inl
new file mode 100644
index 0000000000..5a2214433f
--- /dev/null
+++ b/src/vm/stackprobe.inl
@@ -0,0 +1,136 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//
+
+//
+// ==--==
+//
+
+//
+//-----------------------------------------------------------------------------
+// Stack Probe Header for inline functions
+// Used to setup stack guards
+//-----------------------------------------------------------------------------
+#ifndef __STACKPROBE_inl__
+#define __STACKPROBE_inl__
+
+#include "stackprobe.h"
+#include "common.h"
+
+#if defined(FEATURE_STACK_PROBE) && !defined(DACCESS_COMPILE)
+
+// want to inline in retail, but out of line into stackprobe.cpp in debug
+#if !defined(_DEBUG) || defined(INCLUDE_RETAIL_STACK_PROBE)
+
+#ifndef _DEBUG
+#define INLINE_NONDEBUG_ONLY FORCEINLINE
+#else
+#define INLINE_NONDEBUG_ONLY
+#endif
+
+INLINE_NONDEBUG_ONLY BOOL ShouldProbeOnThisThread()
+{
+ // we only want to probe on user threads, not any of our special threads
+ return GetCurrentTaskType() == TT_USER;
+}
+
+#if defined(_DEBUG) && defined(STACK_GUARDS_DEBUG)
+
+DEBUG_NOINLINE void DebugSOTolerantTransitionHandler::EnterSOTolerantCode(Thread *pThread)
+{
+ SCAN_SCOPE_BEGIN;
+ ANNOTATION_FN_SO_TOLERANT;
+
+ if (pThread)
+ {
+ m_clrDebugState = pThread->GetClrDebugState();
+ }
+ else
+ {
+ m_clrDebugState = GetClrDebugState();
+ }
+ if (m_clrDebugState)
+ m_prevSOTolerantState = m_clrDebugState->BeginSOTolerant();
+}
+
+DEBUG_NOINLINE void DebugSOTolerantTransitionHandler::ReturnFromSOTolerantCode()
+{
+ SCAN_SCOPE_END;
+
+ if (m_clrDebugState)
+ m_clrDebugState->SetSOTolerance(m_prevSOTolerantState);
+}
+
+#endif
+
+// Keep the main body out of line to keep code size down.
+NOINLINE BOOL RetailStackProbeNoThrowWorker(unsigned int n, Thread *pThread);
+NOINLINE void RetailStackProbeWorker(unsigned int n, Thread *pThread);
+
+INLINE_NONDEBUG_ONLY
+BOOL RetailStackProbeNoThrow(unsigned int n, Thread *pThread)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+#ifdef STACK_GUARDS_RELEASE
+ if(!IsStackProbingEnabled())
+ {
+ return TRUE;
+ }
+#endif
+
+ return RetailStackProbeNoThrowWorker(n, pThread);
+}
+
+INLINE_NONDEBUG_ONLY
+void RetailStackProbe(unsigned int n, Thread *pThread)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+#ifdef STACK_GUARDS_RELEASE
+ if(!IsStackProbingEnabled())
+ {
+ return;
+ }
+#endif
+
+ if (RetailStackProbeNoThrowWorker(n, pThread))
+ {
+ return;
+ }
+ ReportStackOverflow();
+}
+
+INLINE_NONDEBUG_ONLY
+void RetailStackProbe(unsigned int n)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+#ifdef STACK_GUARDS_RELEASE
+ if(!IsStackProbingEnabled())
+ {
+ return;
+ }
+#endif
+
+ if (RetailStackProbeNoThrowWorker(n, GetThread()))
+ {
+ return;
+ }
+ ReportStackOverflow();
+}
+
+#endif
+#endif
+
+
+#endif // __STACKPROBE_inl__
diff --git a/src/vm/stacksampler.cpp b/src/vm/stacksampler.cpp
new file mode 100644
index 0000000000..1246466b07
--- /dev/null
+++ b/src/vm/stacksampler.cpp
@@ -0,0 +1,468 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//
+// Summary:
+// --------
+//
+// StackSampler is intended to identify methods where the process is spending most of its time
+// and to re-JIT such methods in the background. Call these methods hot.
+//
+// Identifying hot methods:
+// ========================
+//
+// There is no easy way to tell at a given point in execution whether in the future an unseen
+// or un-hot method will become hot; So we track an evolving list of hot methods.
+//
+// We identify hot methods by suspending the runtime every "m" milliseconds. This operation
+// freezes all the threads. We now get a list of threads that are executing and walk their
+// stacks to get the managed method at the top of their stacks. The sampled list of methods
+// for each thread constitute a single sample. Once we obtain a sample, the threads are thawed.
+//
+// The more a method is present in samples, it is clear that the process is spending its time
+// in that method at several given points in time.
+//
+// We track this information on a per method basis, the count of its occurrences in each sample
+// using a hash map.
+//
+// Note:
+// =====
+// o Using the above technique we have only identified top methods at a given point in the execution.
+// The list of hot methods keeps evolving as we get more samples. Only at the process end can we
+// say that the evolving list of hot methods is THE list of hot methods for the whole process.
+// o Because we get the top managed method in the thread, this includes time spent by that method
+// in helper calls.
+// o If GC is in progress it has suspended the threads, and we would not be able to suspend the threads.
+//
+// Future Consideration:
+// =====================
+// We could track "trending" methods, as methods decay out so we can keep only "trending" variants
+// in the code manager and kick out "past" hot methods.
+//
+// Jitting in the background:
+// ==========================
+// Once we have the hot methods at a given point in time, we JIT them. The decision to JIT is configurable
+// by configuring the number of times a method is seen in samples before we would JIT it.
+// For example, if we are sampling every 10 msec and if we expect methods that spend at least 1 second
+// to be hot, then the number of times to see this method is roughly, 100, it is best to be conservative
+// with this number.
+//
+// Note that we JIT the evolving list of methods, without knowing ahead of time that the methods we JIT
+// will be the final top "n" hot methods due to the lack of knowledge of when the process will end.
+// This means we would over-JIT but this only yields results with false negatives.
+//
+// Currently we JIT in the background only once (with the current goal of getting a trace.)
+//
+// Note:
+// =====
+// o To run the JIT in the background, we try our best to JIT in the same app domain in which the original
+// JITting happened. But if we fail to acquire (ngen'ed method) or enter (unloaded domain) the original domain,
+// we then try to JIT it under the thread's app domain in which the method was last seen to be executing.
+//
+// o The JIT to use is configurable with COMPLUS_AltJitName when COMPLUS_StackSampling is enabled.
+//
+// o One use case is to collect traces as an .mc file from SuperPMI Shim JIT.
+//
+// Jitting parameters:
+// ==========================
+// The prestub tells us at JITting time using "RecordJittingInfo" to record the parameters used to JIT
+// originally. We use these parameters to JIT in the background when we decide to JIT the method.
+//
+
+
+#include "common.h"
+#include "corjit.h"
+#include "stacksampler.h"
+#include "threadsuspend.h"
+
+#ifdef FEATURE_STACK_SAMPLING
+
+// Global instance of the sampler
+StackSampler* g_pStackSampler = nullptr;
+
+// Create an instance of the stack sampler if sampling is enabled.
+void StackSampler::Init()
+{
+ STANDARD_VM_CONTRACT;
+
+ bool samplingEnabled = (CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_StackSamplingEnabled) != 0);
+ if (samplingEnabled)
+ {
+ g_pStackSampler = new (nothrow) StackSampler();
+ }
+}
+
+// ThreadProc for performing sampling and JITting.
+/* static */
+DWORD __stdcall StackSampler::SamplingThreadProc(void* arg)
+{
+ WRAPPER_NO_CONTRACT;
+
+ StackSampler* pThis = (StackSampler*) arg;
+ pThis->ThreadProc();
+ return 0;
+}
+
+// Constructor
+StackSampler::StackSampler()
+ : m_nSampleAfter(0)
+ , m_nSampleEvery(s_knDefaultSamplingIntervalMsec)
+ , m_nNumMethods(s_knDefaultNumMethods)
+ , m_crstJitInfo(CrstStackSampler, (CrstFlags) (CRST_UNSAFE_ANYMODE))
+{
+ // When to start sampling after the thread launch.
+ int nSampleAfter = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_StackSamplingAfter);
+ if (nSampleAfter != INT_MAX && nSampleAfter >= 0)
+ {
+ m_nSampleAfter = nSampleAfter;
+ }
+
+ // How frequently to sample.
+ int nSampleEvery = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_StackSamplingEvery);
+ if (nSampleEvery != INT_MAX && nSampleEvery > 0)
+ {
+ m_nSampleEvery = nSampleEvery;
+ }
+
+ // Max number of methods to track.
+ int nNumMethods = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_StackSamplingNumMethods);
+ if (nNumMethods != INT_MAX && nNumMethods > 0)
+ {
+ m_nNumMethods = nNumMethods;
+ }
+
+ // Launch the thread.
+ m_pThread = SetupUnstartedThread();
+ m_pThread->SetBackground(TRUE);
+
+ // Passing "this" to the thread in the constructor.
+ if (m_pThread->CreateNewThread(1*1024*1024, SamplingThreadProc, this))
+ {
+ m_pThread->StartThread();
+ }
+}
+
+// Is "pMD" a good method, that is suitable for tracking as HOT and
+// JITting in the background.
+bool IsGoodMethodDesc(MethodDesc* pMD)
+{
+ LIMITED_METHOD_CONTRACT;
+ return !(pMD == nullptr || !pMD->IsIL() || pMD->IsUnboxingStub() || pMD->GetMethodTable()->Collectible());
+}
+
+//
+// An opportunity to record the parameters passed to the JIT at the time of JITting this method.
+/* static */
+void StackSampler::RecordJittingInfo(MethodDesc* pMD, DWORD dwFlags, DWORD dwFlags2)
+{
+ WRAPPER_NO_CONTRACT;
+ if (g_pStackSampler == nullptr)
+ {
+ return;
+ }
+ // Skip if this is not a good method desc.
+ if (!IsGoodMethodDesc(pMD))
+ {
+ return;
+ }
+ // Record in the hash map.
+ g_pStackSampler->RecordJittingInfoInternal(pMD, dwFlags);
+}
+
+void StackSampler::RecordJittingInfoInternal(MethodDesc* pMD, DWORD dwFlags)
+{
+ ADID dwDomainId = GetThread()->GetDomain()->GetId();
+ JitInfoHashEntry entry(pMD, dwDomainId);
+
+ // Record the domain in the hash map.
+ {
+ CrstHolder ch(&m_crstJitInfo);
+ m_jitInfo.AddOrReplace(entry);
+ }
+}
+
+// Obtain the domain ID in which the method was originally JITted, if
+// it was never JITted (Ngened) or the original app domain was unloaded
+// use the "defaultId" supplied.
+ADID StackSampler::GetDomainId(MethodDesc* pMD, const ADID& defaultId)
+{
+ ADID adId;
+ BOOL bPresent = FALSE;
+ {
+ CrstHolder ch(&m_crstJitInfo);
+ bPresent = m_jitInfo.Lookup(pMD, &adId);
+ }
+ if (bPresent != FALSE)
+ {
+ AppDomainFromIDHolder pDomain(adId, FALSE);
+ if (!pDomain.IsUnloaded())
+ {
+ return adId;
+ }
+ }
+ return defaultId;
+}
+
+// Stack walk callback data.
+struct WalkInfo
+{
+ StackSampler* pThis;
+
+ // The thread in which the walk is happening and the method is executing.
+ // Used to obtain the app domain.
+ Thread* pMdThread;
+};
+
+// Visitor for stack walk callback.
+StackWalkAction StackSampler::StackWalkCallback(CrawlFrame* pCf, VOID* data)
+{
+ WRAPPER_NO_CONTRACT;
+
+ WalkInfo* info = (WalkInfo*) data;
+ return ((StackSampler*) info->pThis)->CrawlFrameVisitor(pCf, info->pMdThread);
+}
+
+// Stack walk visitor helper to maintain the hash map of method desc, their count
+// and the thread's domain in which the method is executing.
+StackWalkAction StackSampler::CrawlFrameVisitor(CrawlFrame* pCf, Thread* pMdThread)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodDesc* pMD = pCf->GetFunction();
+
+ // Filter out methods we don't care about
+ if (!IsGoodMethodDesc(pMD))
+ {
+ return SWA_CONTINUE;
+ }
+
+ // Lookup the method desc and obtain info.
+ ADID adId = pMdThread->GetDomain()->GetId();
+ CountInfo info(adId);
+ m_countInfo.Lookup(pMD, &info);
+
+ // Record the current domain ID of the method's thread, i.e.,
+ // the method is last known to be executing.
+ info.adDomainId = adId;
+ info.uCount++;
+
+ // Put the info back.
+ m_countInfo.AddOrReplace(CountInfoHashEntry(pMD, info));
+
+ // We got the top good one, skip.
+ return SWA_ABORT;
+}
+
+// Thread routine that suspends the runtime, walks the other threads' stacks to get the
+// top managed method. Restarts the runtime after samples are collected. Identifies top
+// methods from the samples and re-JITs them in the background.
+void StackSampler::ThreadProc()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ // Complete the thread init.
+ if (!m_pThread->HasStarted())
+ {
+ return;
+ }
+
+ BEGIN_SO_INTOLERANT_CODE(m_pThread);
+
+ // User asked us to sample after certain time.
+ m_pThread->UserSleep(m_nSampleAfter);
+
+ WalkInfo info = { this, nullptr };
+
+ while (true)
+ {
+ EX_TRY
+ {
+ // Suspend the runtime.
+ ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_OTHER);
+
+ // Walk all other threads.
+ Thread* pThread = nullptr;
+ while ((pThread = ThreadStore::GetThreadList(pThread)) != nullptr)
+ {
+ if (pThread == m_pThread)
+ {
+ continue;
+ }
+ // TODO: Detect if thread is suspended by user before we suspended and skip.
+
+ info.pMdThread = pThread;
+
+ // Walk the frames.
+ pThread->StackWalkFrames(StackWalkCallback, &info, FUNCTIONSONLY | ALLOW_ASYNC_STACK_WALK);
+ }
+
+ // Restart the runtime.
+ ThreadSuspend::RestartEE(FALSE, TRUE);
+
+ // JIT the methods that frequently occur in samples.
+ JitFrequentMethodsInSamples();
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ // User asked us to sample every few seconds.
+ // TODO: Measure time to JIT using CycleTimer and subtract from the time we sleep every time.
+ m_pThread->UserSleep(m_nSampleEvery);
+ }
+
+ END_SO_INTOLERANT_CODE;
+}
+
+// Find the most frequent method in the samples and JIT them.
+void StackSampler::JitFrequentMethodsInSamples()
+{
+ struct Count
+ {
+ MethodDesc* pMD;
+ CountInfo info;
+
+ static int __cdecl Decreasing(const void* e1, const void* e2)
+ {
+ return ((Count*) e2)->info.uCount - ((Count*) e1)->info.uCount;
+ }
+ };
+
+ // We want to keep a max-heap of the top frequent methods in the samples.
+ NewHolder<Count> freq(new (nothrow) Count[m_nNumMethods]);
+
+ //
+ // For each element in the samples, call it incoming, add to the "frequent" list
+ // if the list has space to hold the incoming element.
+ //
+ // If the list doesn't have space, replace the min frequent element in the list
+ // with the incoming element, if the latter is more frequent.
+ //
+ unsigned uLength = 0;
+ for (CountInfoHash::Iterator iter = m_countInfo.Begin(), end = m_countInfo.End(); iter != end; iter++)
+ {
+ Count c = { (*iter).Key(), (*iter).Value() };
+
+ // Is the list full? Drop the min element if incoming is more frequent.
+ if (uLength == m_nNumMethods)
+ {
+ // Find the min element and the min index.
+ unsigned uMinIndex = 0;
+ unsigned uMin = freq[0].info.uCount;
+ for (unsigned i = 1; i < uLength; ++i)
+ {
+ if (uMin > freq[i].info.uCount)
+ {
+ uMin = freq[i].info.uCount;
+ uMinIndex = i;
+ }
+ }
+ if (uMin < c.info.uCount)
+ {
+ freq[uMinIndex] = c;
+ }
+ }
+ // List is not full, just add the incoming element.
+ else
+ {
+ freq[uLength] = c;
+ uLength++;
+ }
+ }
+
+ // Sort by most frequent element first.
+ qsort(freq, uLength, sizeof(Count), Count::Decreasing);
+
+#ifdef _DEBUG
+ LOG((LF_JIT, LL_INFO100000, "-----------HOT METHODS-------\n"));
+ for (unsigned i = 0; i < uLength; ++i)
+ {
+ // printf("%s:%s, %u\n", freq[i].pMD->GetMethodTable()->GetClass()->GetDebugClassName(), freq[i].pMD->GetName(), freq[i].info.uCount);
+ LOG((LF_JIT, LL_INFO100000, "%s:%s, %u\n", freq[i].pMD->GetMethodTable()->GetClass()->GetDebugClassName(), freq[i].pMD->GetName(), freq[i].info.uCount));
+ }
+ LOG((LF_JIT, LL_INFO100000, "-----------------------------\n"));
+#endif
+
+ // Do the JITting.
+ for (unsigned i = 0; i < uLength; ++i)
+ {
+ // If not already JITted and the method is frequent enough to be important.
+ if (!freq[i].info.fJitted && freq[i].info.uCount > s_knDefaultCountForImportance)
+ {
+ // Try to get the original app domain ID in which the method was JITTed, if not
+ // use the app domain ID the method was last seen executing.
+ ADID adId = GetDomainId(freq[i].pMD, freq[i].info.adDomainId);
+ JitAndCollectTrace(freq[i].pMD, adId);
+ }
+ }
+}
+
+// Invoke the JIT for the method desc. Switch to the appropriate domain.
+void StackSampler::JitAndCollectTrace(MethodDesc* pMD, const ADID& adId)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Indicate to the JIT or the JIT interface that we are JITting
+ // in the background for stack sampling.
+ DWORD dwFlags2 = CORJIT_FLG2_SAMPLING_JIT_BACKGROUND;
+
+ _ASSERTE(pMD->IsIL());
+
+ EX_TRY
+ {
+ ENTER_DOMAIN_ID(adId)
+ {
+ GCX_PREEMP();
+
+ COR_ILMETHOD_DECODER::DecoderStatus status;
+ NewHolder<COR_ILMETHOD_DECODER> pDecoder(
+ new COR_ILMETHOD_DECODER(pMD->GetILHeader(),
+ pMD->GetMDImport(),
+ &status));
+
+#ifdef _DEBUG
+ LOG((LF_JIT, LL_INFO100000, "Jitting the hot method desc using SuperPMI in the background thread -> "));
+ LOG((LF_JIT, LL_INFO100000, "%s:%s\n", pMD->GetMethodTable()->GetClass()->GetDebugClassName(), pMD->GetName()));
+#endif
+
+ PCODE pCode = UnsafeJitFunction(pMD, pDecoder, 0, dwFlags2);
+ }
+ END_DOMAIN_TRANSITION;
+
+ // Update that this method has been already JITted.
+ CountInfo info((ADID) DefaultADID);
+ m_countInfo.Lookup(pMD, &info);
+ info.fJitted = true;
+ m_countInfo.AddOrReplace(CountInfoHashEntry(pMD, info));
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+}
+
+#endif // FEATURE_STACK_SAMPLING
diff --git a/src/vm/stacksampler.h b/src/vm/stacksampler.h
new file mode 100644
index 0000000000..bfb0173161
--- /dev/null
+++ b/src/vm/stacksampler.h
@@ -0,0 +1,82 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*++
+
+Module Name:
+
+ StackSampler.h
+
+--*/
+
+#ifndef __STACK_SAMPLER_H
+#define __STACK_SAMPLER_H
+#endif
+
+#ifdef FEATURE_STACK_SAMPLING
+
+class StackSampler
+{
+public:
+ // Interface
+ static void Init();
+ static void RecordJittingInfo(MethodDesc* pMD, DWORD dwFlags, DWORD dwFlags2);
+
+private:
+
+ // Methods
+ StackSampler();
+ ~StackSampler();
+
+ static DWORD __stdcall SamplingThreadProc(void* arg);
+
+ static StackWalkAction StackWalkCallback(CrawlFrame* pCf, VOID* data);
+
+ StackWalkAction CrawlFrameVisitor(CrawlFrame* pCf, Thread* pMdThread);
+
+ void ThreadProc();
+
+ void JitFrequentMethodsInSamples();
+
+ void JitAndCollectTrace(MethodDesc* pMD, const ADID& adId);
+
+ void RecordJittingInfoInternal(MethodDesc* pMD, DWORD flags);
+ ADID GetDomainId(MethodDesc* pMD, const ADID& defaultId);
+
+
+ // Constants
+ static const int s_knDefaultSamplingIntervalMsec = 100;
+ static const int s_knDefaultNumMethods = 32;
+ static const int s_knDefaultCountForImportance = 0; // TODO: Set to some reasonable value.
+
+ // Typedefs
+ struct CountInfo;
+ typedef MapSHash<MethodDesc*, CountInfo> CountInfoHash;
+ typedef CountInfoHash::element_t CountInfoHashEntry;
+
+ typedef MapSHash<MethodDesc*, ADID> JitInfoHash;
+ typedef JitInfoHash::element_t JitInfoHashEntry;
+
+ // Nested types
+ struct CountInfo
+ {
+ unsigned uCount;
+ bool fJitted;
+ ADID adDomainId;
+ CountInfo(const ADID& adId) : adDomainId(adId), fJitted(false), uCount(0) {}
+ CountInfo() {} // SHash doesn't like it
+ };
+
+ // Fields
+ Crst m_crstJitInfo;
+ CountInfoHash m_countInfo;
+ JitInfoHash m_jitInfo;
+ Thread* m_pThread;
+ unsigned m_nSampleEvery;
+ unsigned m_nSampleAfter;
+ unsigned m_nNumMethods;
+};
+#endif // FEATURE_STACK_SAMPLING
+
diff --git a/src/vm/stackwalk.cpp b/src/vm/stackwalk.cpp
new file mode 100644
index 0000000000..72b9e3ccac
--- /dev/null
+++ b/src/vm/stackwalk.cpp
@@ -0,0 +1,3393 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// STACKWALK.CPP
+
+
+
+#include "common.h"
+#include "frames.h"
+#include "threads.h"
+#include "stackwalk.h"
+#include "excep.h"
+#include "eetwain.h"
+#include "codeman.h"
+#include "eeconfig.h"
+#include "stackprobe.h"
+#include "dbginterface.h"
+#include "generics.h"
+#ifdef FEATURE_INTERPRETER
+#include "interpreter.h"
+#endif // FEATURE_INTERPRETER
+
+#ifdef _DEBUG
+void* forceFrame; // Variable used to force a local variable to the frame
+#endif
+
+CrawlFrame::CrawlFrame()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ pCurGSCookie = NULL;
+ pFirstGSCookie = NULL;
+ isCachedMethod = FALSE;
+}
+
+Assembly* CrawlFrame::GetAssembly()
+{
+ WRAPPER_NO_CONTRACT;
+
+ Assembly *pAssembly = NULL;
+ Frame *pF = GetFrame();
+
+ if (pF != NULL)
+ pAssembly = pF->GetAssembly();
+
+ if (pAssembly == NULL && pFunc != NULL)
+ pAssembly = pFunc->GetModule()->GetAssembly();
+
+ return pAssembly;
+}
+
+OBJECTREF* CrawlFrame::GetAddrOfSecurityObject()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ if (isFrameless)
+ {
+ _ASSERTE(pFunc);
+
+#if defined(_TARGET_X86_)
+ if (isCachedMethod)
+ {
+ return pSecurityObject;
+ }
+ else
+#endif // _TARGET_X86_
+ {
+ return (static_cast <OBJECTREF*>(GetCodeManager()->GetAddrOfSecurityObject(this)));
+ }
+ }
+ else
+ {
+#ifdef FEATURE_INTERPRETER
+ // Check for an InterpreterFrame.
+ Frame* pFrm = GetFrame();
+ if (pFrm != NULL && pFrm->GetVTablePtr() == InterpreterFrame::GetMethodFrameVPtr())
+ {
+#ifdef DACCESS_COMPILE
+ // TBD: DACize the interpreter.
+ return NULL;
+#else
+ return dac_cast<PTR_InterpreterFrame>(pFrm)->GetInterpreter()->GetAddressOfSecurityObject();
+#endif
+ }
+ // Otherwise...
+#endif // FEATURE_INTERPRETER
+
+ /*ISSUE: Are there any other functions holding a security desc? */
+ if (pFunc && (pFunc->IsIL() || pFunc->IsNoMetadata()))
+ return dac_cast<PTR_FramedMethodFrame>
+ (pFrame)->GetAddrOfSecurityDesc();
+ }
+ return NULL;
+}
+
+BOOL CrawlFrame::IsInCalleesFrames(LPVOID stackPointer)
+{
+ LIMITED_METHOD_CONTRACT;
+#ifdef FEATURE_INTERPRETER
+ Frame* pFrm = GetFrame();
+ if (pFrm != NULL && pFrm->GetVTablePtr() == InterpreterFrame::GetMethodFrameVPtr())
+ {
+#ifdef DACCESS_COMPILE
+ // TBD: DACize the interpreter.
+ return NULL;
+#else
+ return dac_cast<PTR_InterpreterFrame>(pFrm)->GetInterpreter()->IsInCalleesFrames(stackPointer);
+#endif
+ }
+ else if (pFunc != NULL)
+ {
+ return ::IsInCalleesFrames(GetRegisterSet(), stackPointer);
+ }
+ else
+ {
+ return FALSE;
+ }
+#else
+ return ::IsInCalleesFrames(GetRegisterSet(), stackPointer);
+#endif
+}
+
+#ifdef FEATURE_INTERPRETER
+MethodDesc* CrawlFrame::GetFunction()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ if (pFunc != NULL)
+ {
+ return pFunc;
+ }
+ else
+ {
+ Frame* pFrm = GetFrame();
+ if (pFrm != NULL && pFrm->GetVTablePtr() == InterpreterFrame::GetMethodFrameVPtr())
+ {
+#ifdef DACCESS_COMPILE
+ // TBD: DACize the interpreter.
+ return NULL;
+#else
+ return dac_cast<PTR_InterpreterFrame>(pFrm)->GetInterpreter()->GetMethodDesc();
+#endif
+ }
+ else
+ {
+ return NULL;
+ }
+ }
+}
+#endif // FEATURE_INTERPRETER
+
+OBJECTREF CrawlFrame::GetThisPointer()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ if (!pFunc || pFunc->IsStatic() || pFunc->GetMethodTable()->IsValueType())
+ return NULL;
+
+ // As discussed in the specification comment at the declaration, the precondition, unfortunately,
+ // differs by architecture. @TODO: fix this.
+#if defined(_TARGET_X86_)
+ _ASSERTE_MSG((pFunc->IsSharedByGenericInstantiations() && pFunc->AcquiresInstMethodTableFromThis())
+ || pFunc->IsSynchronized(),
+ "Precondition");
+#else
+ _ASSERTE_MSG(pFunc->IsSharedByGenericInstantiations() && pFunc->AcquiresInstMethodTableFromThis(), "Precondition");
+#endif
+
+ if (isFrameless)
+ {
+ return GetCodeManager()->GetInstance(pRD,
+ &codeInfo);
+ }
+ else
+ {
+ _ASSERTE(pFrame);
+ _ASSERTE(pFunc);
+ /*ISSUE: we already know that we have (at least) a method */
+ /* might need adjustment as soon as we solved the
+ jit-helper frame question
+ */
+ //<TODO>@TODO: What about other calling conventions?
+// _ASSERT(pFunc()->GetCallSig()->CALLING CONVENTION);</TODO>
+
+#ifdef _TARGET_AMD64_
+ // @TODO: PORT: we need to find the this pointer without triggering a GC
+ // or find a way to make this method GC_TRIGGERS
+ return NULL;
+#else
+ return (dac_cast<PTR_FramedMethodFrame>(pFrame))->GetThis();
+#endif // _TARGET_AMD64_
+ }
+}
+
+
+//-----------------------------------------------------------------------------
+// Get the "Ambient SP" from a CrawlFrame.
+// This will be null if there is no Ambient SP (eg, in the prolog / epilog,
+// or on certain platforms),
+//-----------------------------------------------------------------------------
+TADDR CrawlFrame::GetAmbientSPFromCrawlFrame()
+{
+ SUPPORTS_DAC;
+#if defined(_TARGET_X86_)
+ // we set nesting level to zero because it won't be used for esp-framed methods,
+ // and zero is at least valid for ebp based methods (where we won't use the ambient esp anyways)
+ DWORD nestingLevel = 0;
+ return GetCodeManager()->GetAmbientSP(
+ GetRegisterSet(),
+ GetCodeInfo(),
+ GetRelOffset(),
+ nestingLevel,
+ GetCodeManState()
+ );
+
+#elif defined(_TARGET_ARM_)
+ return GetRegisterSet()->pCurrentContext->Sp;
+#else
+ return NULL;
+#endif
+}
+
+
+PTR_VOID CrawlFrame::GetParamTypeArg()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ if (isFrameless)
+ {
+ return GetCodeManager()->GetParamTypeArg(pRD,
+ &codeInfo);
+ }
+ else
+ {
+#ifdef FEATURE_INTERPRETER
+ if (pFrame != NULL && pFrame->GetVTablePtr() == InterpreterFrame::GetMethodFrameVPtr())
+ {
+#ifdef DACCESS_COMPILE
+ // TBD: DACize the interpreter.
+ return NULL;
+#else
+ return dac_cast<PTR_InterpreterFrame>(pFrame)->GetInterpreter()->GetParamTypeArg();
+#endif
+ }
+ // Otherwise...
+#endif // FEATURE_INTERPRETER
+
+ if (!pFunc || !pFunc->RequiresInstArg())
+ {
+ return NULL;
+ }
+
+#ifdef _WIN64
+ if (!pFunc->IsSharedByGenericInstantiations() ||
+ !(pFunc->RequiresInstMethodTableArg() || pFunc->RequiresInstMethodDescArg()))
+ {
+ // win64 can only return the param type arg if the method is shared code
+ // and actually has a param type arg
+ return NULL;
+ }
+#endif // _WIN64
+
+ _ASSERTE(pFrame);
+ _ASSERTE(pFunc);
+ return (dac_cast<PTR_FramedMethodFrame>(pFrame))->GetParamTypeArg();
+ }
+}
+
+
+
+// [pClassInstantiation] : Always filled in, though may be set to NULL if no inst.
+// [pMethodInst] : Always filled in, though may be set to NULL if no inst.
+void CrawlFrame::GetExactGenericInstantiations(Instantiation *pClassInst,
+ Instantiation *pMethodInst)
+{
+
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pClassInst));
+ PRECONDITION(CheckPointer(pMethodInst));
+ } CONTRACTL_END;
+
+ TypeHandle specificClass;
+ MethodDesc* specificMethod;
+
+ BOOL ret = Generics::GetExactInstantiationsOfMethodAndItsClassFromCallInformation(
+ GetFunction(),
+ GetExactGenericArgsToken(),
+ &specificClass,
+ &specificMethod);
+
+ if (!ret)
+ {
+ _ASSERTE(!"Cannot return exact class instantiation when we are requested to.");
+ }
+
+ *pClassInst = specificMethod->GetExactClassInstantiation(specificClass);
+ *pMethodInst = specificMethod->GetMethodInstantiation();
+}
+
+PTR_VOID CrawlFrame::GetExactGenericArgsToken()
+{
+
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ MethodDesc* pFunc = GetFunction();
+
+ if (!pFunc || !pFunc->IsSharedByGenericInstantiations())
+ return NULL;
+
+ if (pFunc->AcquiresInstMethodTableFromThis())
+ {
+ OBJECTREF obj = GetThisPointer();
+ if (obj == NULL)
+ return NULL;
+ return obj->GetMethodTable();
+ }
+ else
+ {
+ _ASSERTE(pFunc->RequiresInstArg());
+ return GetParamTypeArg();
+ }
+}
+
+ /* Is this frame at a safe spot for GC?
+ */
+bool CrawlFrame::IsGcSafe()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ return GetCodeManager()->IsGcSafe(&codeInfo, GetRelOffset());
+}
+
+inline void CrawlFrame::GotoNextFrame()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ //
+ // Update app domain if this frame caused a transition
+ //
+
+ AppDomain *pRetDomain = pFrame->GetReturnDomain();
+ if (pRetDomain != NULL)
+ pAppDomain = pRetDomain;
+ pFrame = pFrame->Next();
+
+ if (pFrame != FRAME_TOP)
+ {
+ SetCurGSCookie(Frame::SafeGetGSCookiePtr(pFrame));
+ }
+}
+
+//******************************************************************************
+
+// For asynchronous stackwalks, the thread being walked may not be suspended.
+// It could cause a buffer-overrun while the stack-walk is in progress.
+// To detect this, we can only use data that is guarded by a GSCookie
+// that has been recently checked.
+// This function should be called after doing any time-consuming activity
+// during stack-walking to reduce the window in which a buffer-overrun
+// could cause an problems.
+//
+// To keep things simple, we do this checking even for synchronous stack-walks.
+void CrawlFrame::CheckGSCookies()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+#if !defined(DACCESS_COMPILE)
+ if (pFirstGSCookie == NULL)
+ return;
+
+ if (*pFirstGSCookie != GetProcessGSCookie())
+ DoJITFailFast();
+
+ if(*pCurGSCookie != GetProcessGSCookie())
+ DoJITFailFast();
+#endif // !DACCESS_COMPILE
+}
+
+void CrawlFrame::SetCurGSCookie(GSCookie * pGSCookie)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+#if !defined(DACCESS_COMPILE)
+ if (pGSCookie == NULL)
+ DoJITFailFast();
+
+ pCurGSCookie = pGSCookie;
+ if (pFirstGSCookie == NULL)
+ pFirstGSCookie = pGSCookie;
+
+ CheckGSCookies();
+#endif // !DACCESS_COMPILE
+}
+
+#if defined(WIN64EXCEPTIONS)
+bool CrawlFrame::IsFilterFunclet()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!IsFrameless())
+ {
+ return false;
+ }
+
+ if (!isFilterFuncletCached)
+ {
+ isFilterFunclet = GetJitManager()->IsFilterFunclet(&codeInfo) != 0;
+ isFilterFuncletCached = true;
+ }
+
+ return isFilterFunclet;
+}
+
+#endif // WIN64EXCEPTIONS
+
+//******************************************************************************
+#if defined(ELIMINATE_FEF)
+//******************************************************************************
+// Advance to the next ExInfo. Typically done when an ExInfo has been used and
+// should not be used again.
+//******************************************************************************
+void ExInfoWalker::WalkOne()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ if (m_pExInfo)
+ {
+ LOG((LF_EH, LL_INFO10000, "ExInfoWalker::WalkOne: advancing ExInfo chain: pExInfo:%p, pContext:%p; prev:%p, pContext:%p\n",
+ m_pExInfo, m_pExInfo->m_pContext, m_pExInfo->m_pPrevNestedInfo, m_pExInfo->m_pPrevNestedInfo?m_pExInfo->m_pPrevNestedInfo->m_pContext:0));
+ m_pExInfo = m_pExInfo->m_pPrevNestedInfo;
+ }
+} // void ExInfoWalker::WalkOne()
+
+//******************************************************************************
+// Attempt to find an ExInfo with a pContext that is higher (older) than
+// a given minimum location. (It is the pContext's SP that is relevant.)
+//******************************************************************************
+void ExInfoWalker::WalkToPosition(
+ TADDR taMinimum, // Starting point of stack walk.
+ BOOL bPopFrames) // If true, ResetUseExInfoForStackwalk on each exinfo.
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ while (m_pExInfo &&
+ ((GetSPFromContext() < taMinimum) ||
+ (GetSPFromContext() == NULL)) )
+ {
+ // Try the next ExInfo, if there is one.
+ LOG((LF_EH, LL_INFO10000,
+ "ExInfoWalker::WalkToPosition: searching ExInfo chain: m_pExInfo:%p, pContext:%p; \
+ prev:%p, pContext:%p; pStartFrame:%p\n",
+ m_pExInfo,
+ m_pExInfo->m_pContext,
+ m_pExInfo->m_pPrevNestedInfo,
+ (m_pExInfo->m_pPrevNestedInfo ? m_pExInfo->m_pPrevNestedInfo->m_pContext : 0),
+ taMinimum));
+
+ if (bPopFrames)
+ { // If caller asked for it, reset the bit which indicates that this ExInfo marks a fault from managed code.
+ // This is done so that the fault can be effectively "unwound" from the stack, similarly to how Frames
+ // are unlinked from the Frame chain.
+ m_pExInfo->m_ExceptionFlags.ResetUseExInfoForStackwalk();
+ }
+ m_pExInfo = m_pExInfo->m_pPrevNestedInfo;
+ }
+ // At this point, m_pExInfo is NULL, or points to a pContext that is greater than taMinimum.
+} // void ExInfoWalker::WalkToPosition()
+
+//******************************************************************************
+// Attempt to find an ExInfo with a pContext that has an IP in managed code.
+//******************************************************************************
+void ExInfoWalker::WalkToManaged()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+
+ while (m_pExInfo)
+ {
+ // See if the current ExInfo has a CONTEXT that "returns" to managed code, and, if so, exit the loop.
+ if (m_pExInfo->m_ExceptionFlags.UseExInfoForStackwalk() &&
+ GetContext() &&
+ ExecutionManager::IsManagedCode(GetIP(GetContext())))
+ {
+ break;
+ }
+ // No, so skip to next, if any.
+ LOG((LF_EH, LL_INFO1000, "ExInfoWalker::WalkToManaged: searching for ExInfo->managed: m_pExInfo:%p, pContext:%p, sp:%p; prev:%p, pContext:%p\n",
+ m_pExInfo,
+ GetContext(),
+ GetSPFromContext(),
+ m_pExInfo->m_pPrevNestedInfo,
+ m_pExInfo->m_pPrevNestedInfo?m_pExInfo->m_pPrevNestedInfo->m_pContext:0));
+ m_pExInfo = m_pExInfo->m_pPrevNestedInfo;
+ }
+ // At this point, m_pExInfo is NULL, or points to a pContext that has an IP in managed code.
+} // void ExInfoWalker::WalkToManaged()
+#endif // defined(ELIMINATE_FEF)
+
+#ifdef WIN64EXCEPTIONS
+// static
+UINT_PTR Thread::VirtualUnwindCallFrame(PREGDISPLAY pRD, EECodeInfo* pCodeInfo /*= NULL*/)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+
+ PRECONDITION(GetControlPC(pRD) == GetIP(pRD->pCurrentContext));
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (pRD->IsCallerContextValid)
+ {
+ // We already have the caller's frame context
+ // We just switch the pointers
+ PT_CONTEXT temp = pRD->pCurrentContext;
+ pRD->pCurrentContext = pRD->pCallerContext;
+ pRD->pCallerContext = temp;
+
+#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+ PT_KNONVOLATILE_CONTEXT_POINTERS tempPtrs = pRD->pCurrentContextPointers;
+ pRD->pCurrentContextPointers = pRD->pCallerContextPointers;
+ pRD->pCallerContextPointers = tempPtrs;
+#endif // defined(_TARGET_AMD64_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+ }
+ else
+ {
+ PT_KNONVOLATILE_CONTEXT_POINTERS pCurrentContextPointers = NULL;
+ NOT_X86(pCurrentContextPointers = pRD->pCurrentContextPointers);
+ VirtualUnwindCallFrame(pRD->pCurrentContext, pCurrentContextPointers, pCodeInfo);
+ }
+
+ SyncRegDisplayToCurrentContext(pRD);
+ pRD->IsCallerContextValid = FALSE;
+ pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
+
+ return pRD->ControlPC;
+}
+
+// static
+PCODE Thread::VirtualUnwindCallFrame(T_CONTEXT* pContext,
+ T_KNONVOLATILE_CONTEXT_POINTERS* pContextPointers /*= NULL*/,
+ EECodeInfo * pCodeInfo /*= NULL*/)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pContext, NULL_NOT_OK));
+ PRECONDITION(CheckPointer(pContextPointers, NULL_OK));
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ PCODE uControlPc = GetIP(pContext);
+
+#ifndef FEATURE_PAL
+
+#if !defined(DACCESS_COMPILE)
+ UINT_PTR uImageBase;
+ PRUNTIME_FUNCTION pFunctionEntry;
+
+ if (pCodeInfo == NULL)
+ {
+ pFunctionEntry = RtlLookupFunctionEntry(uControlPc,
+ ARM_ONLY((DWORD*))(&uImageBase),
+ NULL);
+ }
+ else
+ {
+ pFunctionEntry = pCodeInfo->GetFunctionEntry();
+ uImageBase = (UINT_PTR)pCodeInfo->GetModuleBase();
+
+ // RUNTIME_FUNCTION of cold code just points to the RUNTIME_FUNCTION of hot code. The unwinder
+ // expects this indirection to be resolved, so we use RUNTIME_FUNCTION of the hot code even
+ // if we are in cold code.
+
+#if defined(_DEBUG)
+ UINT_PTR uImageBaseFromOS;
+ PRUNTIME_FUNCTION pFunctionEntryFromOS;
+
+ pFunctionEntryFromOS = RtlLookupFunctionEntry(uControlPc,
+ ARM_ONLY((DWORD*))(&uImageBaseFromOS),
+ NULL);
+ _ASSERTE( (uImageBase == uImageBaseFromOS) && (pFunctionEntry == pFunctionEntryFromOS) );
+#endif // _DEBUG
+ }
+
+ if (pFunctionEntry)
+ {
+ uControlPc = VirtualUnwindNonLeafCallFrame(pContext, pContextPointers, pFunctionEntry, uImageBase);
+ }
+ else
+ {
+ uControlPc = VirtualUnwindLeafCallFrame(pContext);
+ }
+#else // DACCESS_COMPILE
+ // We can't use RtlVirtualUnwind() from out-of-process. Instead, we call code:DacUnwindStackFrame,
+ // which is similar to StackWalk64().
+ if (DacUnwindStackFrame(pContext, pContextPointers) == TRUE)
+ {
+ uControlPc = GetIP(pContext);
+ }
+ else
+ {
+ ThrowHR(CORDBG_E_TARGET_INCONSISTENT);
+ }
+#endif // !DACCESS_COMPILE
+
+#endif // !FEATURE_PAL
+
+ return uControlPc;
+}
+
+#ifdef DACCESS_COMPILE
+
+PCODE Thread::VirtualUnwindLeafCallFrame(T_CONTEXT* pContext)
+{
+ DacNotImpl();
+ return 0;
+}
+UINT_PTR Thread::VirtualUnwindToFirstManagedCallFrame(T_CONTEXT* pContext)
+{
+ DacNotImpl();
+ return 0;
+}
+
+#else // !DACCESS_COMPILE
+
+// static
+PCODE Thread::VirtualUnwindLeafCallFrame(T_CONTEXT* pContext)
+{
+#ifndef FEATURE_PAL
+
+ PCODE uControlPc;
+#ifdef _DEBUG
+ UINT_PTR uImageBase;
+
+ PRUNTIME_FUNCTION pFunctionEntry = RtlLookupFunctionEntry((UINT_PTR)GetIP(pContext),
+ ARM_ONLY((DWORD*))(&uImageBase),
+ NULL);
+
+ CONSISTENCY_CHECK(NULL == pFunctionEntry);
+#endif // _DEBUG
+
+#if defined(_TARGET_AMD64_)
+
+ uControlPc = *(ULONGLONG*)pContext->Rsp;
+ pContext->Rip = uControlPc;
+ pContext->Rsp += sizeof(ULONGLONG);
+
+#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+
+ uControlPc = TADDR(pContext->Lr);
+
+#else
+ PORTABILITY_ASSERT("Thread::VirtualUnwindLeafCallFrame");
+ uControlPc = NULL;
+#endif
+
+ SetIP(pContext, uControlPc);
+
+ return uControlPc;
+#else // !FEATURE_PAL
+ PORTABILITY_ASSERT("UNIXTODO: Implement for PAL");
+#endif
+}
+
+// static
+PCODE Thread::VirtualUnwindNonLeafCallFrame(T_CONTEXT* pContext, KNONVOLATILE_CONTEXT_POINTERS* pContextPointers,
+ PRUNTIME_FUNCTION pFunctionEntry, UINT_PTR uImageBase)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pContext, NULL_NOT_OK));
+ PRECONDITION(CheckPointer(pContextPointers, NULL_OK));
+ PRECONDITION(CheckPointer(pFunctionEntry, NULL_OK));
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_PAL
+
+ PCODE uControlPc = GetIP(pContext);
+#if defined(_WIN64)
+ UINT64 EstablisherFrame;
+ PVOID HandlerData;
+#elif defined(_TARGET_ARM_)
+ DWORD EstablisherFrame;
+ PVOID HandlerData;
+#else
+ _ASSERTE(!"nyi platform stackwalking");
+#endif
+
+ if (NULL == pFunctionEntry)
+ {
+ pFunctionEntry = RtlLookupFunctionEntry(uControlPc,
+ ARM_ONLY((DWORD*))(&uImageBase),
+ NULL);
+ if (NULL == pFunctionEntry)
+ {
+ return NULL;
+ }
+ }
+
+ RtlVirtualUnwind(NULL,
+ uImageBase,
+ uControlPc,
+ pFunctionEntry,
+ pContext,
+ &HandlerData,
+ &EstablisherFrame,
+ pContextPointers);
+
+ uControlPc = GetIP(pContext);
+ return uControlPc;
+#else // !FEATURE_PAL
+ PORTABILITY_ASSERT("UNIXTODO: Implement for PAL");
+#endif
+}
+
+// static
+UINT_PTR Thread::VirtualUnwindToFirstManagedCallFrame(T_CONTEXT* pContext)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_PAL
+
+ PCODE uControlPc = GetIP(pContext);
+
+ // unwind out of this function and out of our caller to
+ // get our caller's PSP, or our caller's caller's SP.
+ while (!ExecutionManager::IsManagedCode(uControlPc))
+ {
+ uControlPc = VirtualUnwindCallFrame(pContext);
+ }
+
+ return uControlPc;
+#else // !FEATURE_PAL
+ PORTABILITY_ASSERT("TODO: Implement for PAL");
+#endif
+}
+
+#endif // DACCESS_COMPILE
+#endif // WIN64EXCEPTIONS
+
+#ifdef _DEBUG
+void Thread::DebugLogStackWalkInfo(CrawlFrame* pCF, __in_z LPCSTR pszTag, UINT32 uFramesProcessed)
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ if (pCF->isFrameless)
+ {
+ LPCSTR pszType = "";
+
+#ifdef WIN64EXCEPTIONS
+ if (pCF->IsFunclet())
+ {
+ pszType = "[funclet]";
+ }
+ else
+#endif // WIN64EXCEPTIONS
+ if (pCF->pFunc->IsNoMetadata())
+ {
+ pszType = "[no metadata]";
+ }
+
+ LOG((LF_GCROOTS, LL_INFO10000, "STACKWALK: [%03x] %s: FRAMELESS: PC=" FMT_ADDR " SP=" FMT_ADDR " method=%s %s\n",
+ uFramesProcessed,
+ pszTag,
+ DBG_ADDR(GetControlPC(pCF->pRD)),
+ DBG_ADDR(GetRegdisplaySP(pCF->pRD)),
+ pCF->pFunc->m_pszDebugMethodName,
+ pszType));
+ }
+ else if (pCF->isNativeMarker)
+ {
+ LOG((LF_GCROOTS, LL_INFO10000, "STACKWALK: [%03x] %s: NATIVE : PC=" FMT_ADDR " SP=" FMT_ADDR "\n",
+ uFramesProcessed,
+ pszTag,
+ DBG_ADDR(GetControlPC(pCF->pRD)),
+ DBG_ADDR(GetRegdisplaySP(pCF->pRD))));
+ }
+ else if (pCF->isNoFrameTransition)
+ {
+ LOG((LF_GCROOTS, LL_INFO10000, "STACKWALK: [%03x] %s: NO_FRAME : PC=" FMT_ADDR " SP=" FMT_ADDR "\n",
+ uFramesProcessed,
+ pszTag,
+ DBG_ADDR(GetControlPC(pCF->pRD)),
+ DBG_ADDR(GetRegdisplaySP(pCF->pRD))));
+ }
+ else
+ {
+ LOG((LF_GCROOTS, LL_INFO10000, "STACKWALK: [%03x] %s: EXPLICIT : PC=" FMT_ADDR " SP=" FMT_ADDR " Frame=" FMT_ADDR" vtbl=" FMT_ADDR "\n",
+ uFramesProcessed,
+ pszTag,
+ DBG_ADDR(GetControlPC(pCF->pRD)),
+ DBG_ADDR(GetRegdisplaySP(pCF->pRD)),
+ DBG_ADDR(pCF->pFrame),
+ DBG_ADDR((pCF->pFrame != FRAME_TOP) ? pCF->pFrame->GetVTablePtr() : NULL)));
+ }
+}
+#endif // _DEBUG
+
+StackWalkAction Thread::MakeStackwalkerCallback(
+ CrawlFrame* pCF,
+ PSTACKWALKFRAMESCALLBACK pCallback,
+ VOID* pData
+ DEBUG_ARG(UINT32 uFramesProcessed))
+{
+ INDEBUG(DebugLogStackWalkInfo(pCF, "CALLBACK", uFramesProcessed));
+
+ // Since we may be asynchronously walking another thread's stack,
+ // check (frequently) for stack-buffer-overrun corruptions
+ pCF->CheckGSCookies();
+
+ // Since the stackwalker callback may execute arbitrary managed code and possibly
+ // not even return (in the case of exception unwinding), explicitly clear the
+ // stackwalker thread state indicator around the callback.
+
+ CLEAR_THREAD_TYPE_STACKWALKER();
+
+ StackWalkAction swa = pCallback(pCF, (VOID*)pData);
+
+ SET_THREAD_TYPE_STACKWALKER(this);
+
+ pCF->CheckGSCookies();
+
+#ifdef _DEBUG
+ if (swa == SWA_ABORT)
+ {
+ LOG((LF_GCROOTS, LL_INFO10000, "STACKWALK: SWA_ABORT: callback aborted the stackwalk\n"));
+ }
+#endif // _DEBUG
+
+ return swa;
+}
+
+
+#if !defined(DACCESS_COMPILE) && defined(_TARGET_X86_)
+#define STACKWALKER_MAY_POP_FRAMES
+#endif
+
+
+StackWalkAction Thread::StackWalkFramesEx(
+ PREGDISPLAY pRD, // virtual register set at crawl start
+ PSTACKWALKFRAMESCALLBACK pCallback,
+ VOID *pData,
+ unsigned flags,
+ PTR_Frame pStartFrame
+ )
+{
+ // Note: there are cases (i.e., exception handling) where we may never return from this function. This means
+ // that any C++ destructors pushed in this function will never execute, and it means that this function can
+ // never have a dynamic contract.
+ STATIC_CONTRACT_WRAPPER;
+ STATIC_CONTRACT_SO_INTOLERANT;
+ SCAN_IGNORE_THROW; // see contract above
+ SCAN_IGNORE_TRIGGER; // see contract above
+
+ _ASSERTE(pRD);
+ _ASSERTE(pCallback);
+
+ // when POPFRAMES we don't want to allow GC trigger.
+ // The only method that guarantees this now is COMPlusUnwindCallback
+#ifdef STACKWALKER_MAY_POP_FRAMES
+ ASSERT(!(flags & POPFRAMES) || pCallback == (PSTACKWALKFRAMESCALLBACK) COMPlusUnwindCallback);
+ ASSERT(!(flags & POPFRAMES) || pRD->pContextForUnwind != NULL);
+ ASSERT(!(flags & POPFRAMES) || (this == GetThread() && PreemptiveGCDisabled()));
+#else // STACKWALKER_MAY_POP_FRAMES
+ ASSERT(!(flags & POPFRAMES));
+#endif // STACKWALKER_MAY_POP_FRAMES
+
+ // We haven't set the stackwalker thread type flag yet, so it shouldn't be set. Only
+ // exception to this is if the current call is made by a hijacking profiler which
+ // redirected this thread while it was previously in the middle of another stack walk
+#ifdef PROFILING_SUPPORTED
+ _ASSERTE(CORProfilerStackSnapshotEnabled() || !IsStackWalkerThread());
+#else
+ _ASSERTE(!IsStackWalkerThread());
+#endif
+
+ StackWalkAction retVal = SWA_FAILED;
+
+ {
+ // SCOPE: Remember that we're walking the stack.
+ //
+ // Normally, we'd use a holder (ClrFlsThreadTypeSwitch) to temporarily set this
+ // flag in the thread state, but we can't in this function, since C++ destructors
+ // are forbidden when this is called for exception handling (which causes
+ // MakeStackwalkerCallback() not to return). Note that in exception handling
+ // cases, we will have already cleared the stack walker thread state indicator inside
+ // MakeStackwalkerCallback(), so we will be properly cleaned up.
+#if !defined(DACCESS_COMPILE)
+ PVOID pStackWalkThreadOrig = ClrFlsGetValue(TlsIdx_StackWalkerWalkingThread);
+#endif
+ SET_THREAD_TYPE_STACKWALKER(this);
+
+ StackFrameIterator iter;
+ if (iter.Init(this, pStartFrame, pRD, flags) == TRUE)
+ {
+ while (iter.IsValid())
+ {
+ retVal = MakeStackwalkerCallback(&iter.m_crawl, pCallback, pData DEBUG_ARG(iter.m_uFramesProcessed));
+ if (retVal == SWA_ABORT)
+ {
+ break;
+ }
+
+ retVal = iter.Next();
+ if (retVal == SWA_FAILED)
+ {
+ break;
+ }
+ }
+ }
+
+ SET_THREAD_TYPE_STACKWALKER(pStackWalkThreadOrig);
+ }
+
+ return retVal;
+} // StackWalkAction Thread::StackWalkFramesEx()
+
+StackWalkAction Thread::StackWalkFrames(PSTACKWALKFRAMESCALLBACK pCallback,
+ VOID *pData,
+ unsigned flags,
+ PTR_Frame pStartFrame)
+{
+ // Note: there are cases (i.e., exception handling) where we may never return from this function. This means
+ // that any C++ destructors pushed in this function will never execute, and it means that this function can
+ // never have a dynamic contract.
+ STATIC_CONTRACT_WRAPPER;
+ _ASSERTE((flags & THREAD_IS_SUSPENDED) == 0 || (flags & ALLOW_ASYNC_STACK_WALK));
+
+ T_CONTEXT ctx;
+ REGDISPLAY rd;
+ bool fUseInitRegDisplay;
+
+#ifndef DACCESS_COMPILE
+ _ASSERTE(GetThread() == this || (flags & ALLOW_ASYNC_STACK_WALK));
+ BOOL fDebuggerHasInitialContext = (GetFilterContext() != NULL);
+ BOOL fProfilerHasInitialContext = (GetProfilerFilterContext() != NULL);
+
+ // If this walk is seeded by a profiler, then the walk better be done by the profiler
+ _ASSERTE(!fProfilerHasInitialContext || (flags & PROFILER_DO_STACK_SNAPSHOT));
+
+ fUseInitRegDisplay = fDebuggerHasInitialContext || fProfilerHasInitialContext;
+#else
+ fUseInitRegDisplay = true;
+#endif
+
+ if(fUseInitRegDisplay)
+ {
+ if (GetProfilerFilterContext() != NULL)
+ {
+ if (!InitRegDisplay(&rd, GetProfilerFilterContext(), TRUE))
+ {
+ LOG((LF_CORPROF, LL_INFO100, "**PROF: InitRegDisplay(&rd, GetProfilerFilterContext() failure leads to SWA_FAILED.\n"));
+ return SWA_FAILED;
+ }
+ }
+ else
+ {
+ if (!InitRegDisplay(&rd, &ctx, FALSE))
+ {
+ LOG((LF_CORPROF, LL_INFO100, "**PROF: InitRegDisplay(&rd, &ctx, FALSE) failure leads to SWA_FAILED.\n"));
+ return SWA_FAILED;
+ }
+ }
+ }
+ else
+ {
+ // Initialize the context
+ memset(&ctx, 0x00, sizeof(T_CONTEXT));
+ SetIP(&ctx, 0);
+ SetSP(&ctx, 0);
+ SetFP(&ctx, 0);
+ LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK starting with partial context\n"));
+ FillRegDisplay(&rd, &ctx);
+ }
+
+#ifdef STACKWALKER_MAY_POP_FRAMES
+ if (flags & POPFRAMES)
+ rd.pContextForUnwind = &ctx;
+#endif
+
+ return StackWalkFramesEx(&rd, pCallback, pData, flags, pStartFrame);
+}
+
+StackWalkAction StackWalkFunctions(Thread * thread,
+ PSTACKWALKFRAMESCALLBACK pCallback,
+ VOID * pData)
+{
+ // Note: there are cases (i.e., exception handling) where we may never return from this function. This means
+ // that any C++ destructors pushed in this function will never execute, and it means that this function can
+ // never have a dynamic contract.
+ STATIC_CONTRACT_WRAPPER;
+
+ return thread->StackWalkFrames(pCallback, pData, FUNCTIONSONLY);
+}
+
+// ----------------------------------------------------------------------------
+// StackFrameIterator::StackFrameIterator
+//
+// Description:
+// This constructor is for the usage pattern of creating an uninitialized StackFrameIterator and then
+// calling Init() on it.
+//
+// Assumptions:
+// * The caller needs to call Init() with the correct arguments before using the StackFrameIterator.
+//
+
+StackFrameIterator::StackFrameIterator()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ CommonCtor(NULL, NULL, 0xbaadf00d);
+} // StackFrameIterator::StackFrameIterator()
+
+// ----------------------------------------------------------------------------
+// StackFrameIterator::StackFrameIterator
+//
+// Description:
+// This constructor is for the usage pattern of creating an initialized StackFrameIterator and then
+// calling ResetRegDisp() on it.
+//
+// Arguments:
+// * pThread - the thread to walk
+// * pFrame - the starting explicit frame; NULL means use the top explicit frame from the frame chain
+// * flags - the stackwalk flags
+//
+// Assumptions:
+// * The caller can call ResetRegDisp() to use the StackFrameIterator without calling Init() first.
+//
+
+StackFrameIterator::StackFrameIterator(Thread * pThread, PTR_Frame pFrame, ULONG32 flags)
+{
+ SUPPORTS_DAC;
+ CommonCtor(pThread, pFrame, flags);
+} // StackFrameIterator::StackFrameIterator()
+
+// ----------------------------------------------------------------------------
+// StackFrameIterator::CommonCtor
+//
+// Description:
+// This is a helper for the two constructors.
+//
+// Arguments:
+// * pThread - the thread to walk
+// * pFrame - the starting explicit frame; NULL means use the top explicit frame from the frame chain
+// * flags - the stackwalk flags
+//
+
+void StackFrameIterator::CommonCtor(Thread * pThread, PTR_Frame pFrame, ULONG32 flags)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ INDEBUG(m_uFramesProcessed = 0);
+
+ m_frameState = SFITER_UNINITIALIZED;
+ m_pThread = pThread;
+
+ m_pStartFrame = pFrame;
+#if defined(_DEBUG)
+ if (m_pStartFrame != NULL)
+ {
+ m_pRealStartFrame = m_pStartFrame;
+ }
+ else if (m_pThread != NULL)
+ {
+ m_pRealStartFrame = m_pThread->GetFrame();
+ }
+ else
+ {
+ m_pRealStartFrame = NULL;
+ }
+#endif // _DEBUG
+
+ m_flags = flags;
+ m_codeManFlags = (ICodeManagerFlags)0;
+
+ m_pCachedGSCookie = NULL;
+
+#if defined(WIN64EXCEPTIONS)
+ m_sfParent = StackFrame();
+ ResetGCRefReportingState();
+ m_fDidFuncletReportGCReferences = true;
+#endif // WIN64EXCEPTIONS
+
+#if defined(_WIN64) || defined(_TARGET_ARM_)
+ m_pvResumableFrameTargetSP = NULL;
+#endif // defined(_WIN64) || defined(_TARGET_ARM_)
+} // StackFrameIterator::CommonCtor()
+
+//---------------------------------------------------------------------------------------
+//
+// Initialize the iterator. Note that the iterator has thread-affinity,
+// and the stackwalk flags cannot be changed once the iterator is created.
+// Depending on the flags, initialization may involve unwinding to a frame of interest.
+// The unwinding could fail.
+//
+// Arguments:
+// pThread - the thread to walk
+// pFrame - the starting explicit frame; NULL means use the top explicit frame from
+// pThread->GetFrame()
+// pRegDisp - the initial REGDISPLAY
+// flags - the stackwalk flags
+//
+// Return Value:
+// Returns true if the initialization is successful. The initialization could fail because
+// we fail to unwind.
+//
+// Notes:
+// Do not do anything funky between initializing a StackFrameIterator and actually using it.
+// In particular, do not resume the thread. We only unhijack the thread once in Init().
+// Refer to StackWalkFramesEx() for the typical usage pattern.
+//
+
+BOOL StackFrameIterator::Init(Thread * pThread,
+ PTR_Frame pFrame,
+ PREGDISPLAY pRegDisp,
+ ULONG32 flags)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(pThread != NULL);
+ _ASSERTE(pRegDisp != NULL);
+
+#if !defined(DACCESS_COMPILE)
+ // When the LIGHTUNWIND flag is set, we use the stack walk cache.
+ // On x64, accesses to the stack walk cache are synchronized by
+ // a CrstStatic, which may need to call back into the host.
+ _ASSERTE(CanThisThreadCallIntoHost() || (flags & LIGHTUNWIND) == 0);
+#endif // DACCESS_COMPILE
+
+#if defined(_WIN64) || defined(_TARGET_ARM_)
+ _ASSERTE(!(flags & POPFRAMES));
+ _ASSERTE(pRegDisp->pCurrentContext);
+#endif // _WIN64 || _TARGET_ARM_
+
+ BEGIN_FORBID_TYPELOAD();
+
+#ifdef FEATURE_HIJACK
+ // We can't crawl the stack of a thread that currently has a hijack pending
+ // (since the hijack routine won't be recognized by any code manager). So we
+ // undo any hijack, the EE will re-attempt it later.
+
+#if !defined(DACCESS_COMPILE)
+ // OOP stackwalks need to deal with hijacked threads in a special way.
+ pThread->UnhijackThread();
+#endif // !DACCESS_COMPILE
+
+#endif // FEATURE_HIJACK
+
+ // FRAME_TOP and NULL must be distinct values. This assert
+ // will fire if someone changes this.
+ static_assert_no_msg(FRAME_TOP_VALUE != NULL);
+
+ m_frameState = SFITER_UNINITIALIZED;
+
+ m_pThread = pThread;
+ m_flags = flags;
+
+ ResetCrawlFrame();
+
+ m_pStartFrame = pFrame;
+ if (m_pStartFrame)
+ {
+ m_crawl.pFrame = m_pStartFrame;
+ }
+ else
+ {
+ m_crawl.pFrame = m_pThread->GetFrame();
+ _ASSERTE(m_crawl.pFrame != NULL);
+ }
+ INDEBUG(m_pRealStartFrame = m_crawl.pFrame);
+
+ if (m_crawl.pFrame != FRAME_TOP)
+ {
+ m_crawl.SetCurGSCookie(Frame::SafeGetGSCookiePtr(m_crawl.pFrame));
+ }
+
+ m_crawl.pRD = pRegDisp;
+ m_crawl.pAppDomain = pThread->GetDomain(INDEBUG(flags & PROFILER_DO_STACK_SNAPSHOT));
+
+ m_codeManFlags = (ICodeManagerFlags)((flags & QUICKUNWIND) ? 0 : UpdateAllRegs);
+ m_scanFlag = ExecutionManager::GetScanFlags();
+
+#if defined(ELIMINATE_FEF)
+ // Walk the ExInfo chain, past any specified starting frame.
+ m_exInfoWalk.Init(&(pThread->GetExceptionState()->m_currentExInfo));
+ // false means don't reset UseExInfoForStackwalk
+ m_exInfoWalk.WalkToPosition(dac_cast<TADDR>(m_pStartFrame), false);
+#endif // ELIMINATE_FEF
+
+ //
+ // These fields are used in the iteration and will be updated on a per-frame basis:
+ //
+ // EECodeInfo m_cachedCodeInfo;
+ //
+ // GSCookie * m_pCachedGSCookie;
+ //
+ // StackFrame m_sfParent;
+ //
+ // LPVOID m_pvResumableFrameTargetSP;
+ //
+
+ // process the REGDISPLAY and stop at the first frame
+ ProcessIp(GetControlPC(m_crawl.pRD));
+ ProcessCurrentFrame();
+
+ // advance to the next frame which matches the stackwalk flags
+ StackWalkAction retVal = Filter();
+
+ END_FORBID_TYPELOAD();
+
+ return (retVal == SWA_CONTINUE);
+} // StackFrameIterator::Init()
+
+//---------------------------------------------------------------------------------------
+//
+// Reset the stackwalk iterator with the specified REGDISPLAY.
+// The caller is responsible for making sure the REGDISPLAY is valid.
+// This function is very similar to Init(), except that this function takes a REGDISPLAY
+// to seed the stackwalk. This function may also unwind depending on the flags, and the
+// unwinding may fail.
+//
+// Arguments:
+// pRegDisp - new REGDISPLAY
+// bool - whether the REGDISPLAY is for the leaf frame
+//
+// Return Value:
+// Returns true if the reset is successful. The reset could fail because
+// we fail to unwind.
+//
+// Assumptions:
+// The REGDISPLAY is valid for the thread which the iterator has affinity to.
+//
+
+BOOL StackFrameIterator::ResetRegDisp(PREGDISPLAY pRegDisp,
+ bool fIsFirst)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ // It is invalid to reset a stackwalk if we are popping frames along the way.
+ ASSERT(!(m_flags & POPFRAMES));
+
+ BEGIN_FORBID_TYPELOAD();
+
+ m_frameState = SFITER_UNINITIALIZED;
+
+ // Make sure the StackFrameIterator has been initialized properly.
+ _ASSERTE(m_pThread != NULL);
+ _ASSERTE(m_flags != 0xbaadf00d);
+
+ ResetCrawlFrame();
+
+ m_crawl.isFirst = fIsFirst;
+
+ if (m_pStartFrame)
+ {
+ m_crawl.pFrame = m_pStartFrame;
+ }
+ else
+ {
+ m_crawl.pFrame = m_pThread->GetFrame();
+ _ASSERTE(m_crawl.pFrame != NULL);
+ }
+
+ if (m_crawl.pFrame != FRAME_TOP)
+ {
+ m_crawl.SetCurGSCookie(Frame::SafeGetGSCookiePtr(m_crawl.pFrame));
+ }
+
+ m_crawl.pRD = pRegDisp;
+
+ // we initialize the appdomain to be the current domain, but this nees to be updated below
+ m_crawl.pAppDomain = m_crawl.pThread->GetDomain(INDEBUG(m_flags & PROFILER_DO_STACK_SNAPSHOT));
+
+ m_codeManFlags = (ICodeManagerFlags)((m_flags & QUICKUNWIND) ? 0 : UpdateAllRegs);
+
+ // make sure the REGDISPLAY is synchronized with the CONTEXT
+ UpdateRegDisp();
+
+ PCODE curPc = GetControlPC(pRegDisp);
+ ProcessIp(curPc);
+
+ // loop the frame chain to find the closet explicit frame which is lower than the specificed REGDISPLAY
+ // (stack grows up towards lower address)
+ if (m_crawl.pFrame != FRAME_TOP)
+ {
+ TADDR curSP = GetRegdisplaySP(m_crawl.pRD);
+
+#if defined(_WIN64) || defined(_TARGET_ARM_)
+ if (m_crawl.IsFrameless())
+ {
+ // On 64-bit and ARM, we stop at the explicit frames contained in a managed stack frame
+ // before the managed stack frame itself.
+ EECodeManager::EnsureCallerContextIsValid(m_crawl.pRD, NULL);
+ curSP = GetSP(m_crawl.pRD->pCallerContext);
+ }
+#endif // defined(_WIN64) || defined(_TARGET_ARM_)
+
+#if defined(_TARGET_X86_)
+ // special processing on x86; see below for more information
+ TADDR curEBP = GetRegdisplayFP(m_crawl.pRD);
+
+ CONTEXT tmpCtx;
+ REGDISPLAY tmpRD;
+ CopyRegDisplay(m_crawl.pRD, &tmpRD, &tmpCtx);
+#endif // _TARGET_X86_
+
+ //
+ // The basic idea is to loop the frame chain until we find an explicit frame whose address is below
+ // (close to the root) the SP in the specified REGDISPLAY. This works well on WIN64 platforms.
+ // However, on x86, in M2U transitions, the Windows debuggers will pass us an incorrect REGDISPLAY
+ // for the managed stack frame at the M2U boundary. The REGDISPLAY is obtained by unwinding the
+ // marshaling stub, and it contains an SP which is actually higher (closer to the leaf) than the
+ // address of the transition frame. It is as if the explicit frame is not contained in the stack
+ // frame of any method. Here's an example:
+ //
+ // ChildEBP
+ // 0012e884 ntdll32!DbgBreakPoint
+ // 0012e89c CLRStub[StubLinkStub]@1f0ac1e
+ // 0012e8a4 invalid ESP of Foo() according to the REGDISPLAY specified by the debuggers
+ // 0012e8b4 address of transition frame (NDirectMethodFrameStandalone)
+ // 0012e8c8 real ESP of Foo() according to the transition frame
+ // 0012e8d8 managed!Dummy.Foo()+0x20
+ //
+ // The original implementation of ResetRegDisp() compares the return address of the transition frame
+ // and the IP in the specified REGDISPLAY to work around this problem. However, even this comparison
+ // is not enough because we may have recursive pinvoke calls on the stack (albeit an unlikely
+ // scenario). So in addition to the IP comparison, we also check EBP. Note that this does not
+ // require managed stack frames to be EBP-framed.
+ //
+
+ while (m_crawl.pFrame != FRAME_TOP)
+ {
+ // this check is sufficient on WIN64
+ if (dac_cast<TADDR>(m_crawl.pFrame) >= curSP)
+ {
+#if defined(_TARGET_X86_)
+ // check the IP
+ if (m_crawl.pFrame->GetReturnAddress() != curPc)
+ {
+ break;
+ }
+ else
+ {
+ // unwind the REGDISPLAY using the transition frame and check the EBP
+ m_crawl.pFrame->UpdateRegDisplay(&tmpRD);
+ if (GetRegdisplayFP(&tmpRD) != curEBP)
+ {
+ break;
+ }
+ }
+#else // !_TARGET_X86_
+ break;
+#endif // !_TARGET_X86_
+ }
+
+ // if the REGDISPLAY represents the managed stack frame at a M2U transition boundary,
+ // update the flags on the CrawlFrame and the REGDISPLAY
+ PCODE frameRetAddr = m_crawl.pFrame->GetReturnAddress();
+ if (frameRetAddr == curPc)
+ {
+ unsigned uFrameAttribs = m_crawl.pFrame->GetFrameAttribs();
+
+ m_crawl.isFirst = ((uFrameAttribs & Frame::FRAME_ATTR_RESUMABLE) != 0);
+ m_crawl.isInterrupted = ((uFrameAttribs & Frame::FRAME_ATTR_EXCEPTION) != 0);
+
+ if (m_crawl.isInterrupted)
+ {
+ m_crawl.hasFaulted = ((uFrameAttribs & Frame::FRAME_ATTR_FAULTED) != 0);
+ m_crawl.isIPadjusted = ((uFrameAttribs & Frame::FRAME_ATTR_OUT_OF_LINE) != 0);
+ }
+
+ m_crawl.pFrame->UpdateRegDisplay(m_crawl.pRD);
+
+ _ASSERTE(curPc == GetControlPC(m_crawl.pRD));
+ }
+
+ // this call also updates the appdomain if the explicit frame is a ContextTransitionFrame
+ m_crawl.GotoNextFrame();
+ }
+ }
+
+#if defined(ELIMINATE_FEF)
+ // Similarly, we need to walk the ExInfos.
+ m_exInfoWalk.Init(&(m_crawl.pThread->GetExceptionState()->m_currentExInfo));
+ // false means don't reset UseExInfoForStackwalk
+ m_exInfoWalk.WalkToPosition(GetRegdisplaySP(m_crawl.pRD), false);
+#endif // ELIMINATE_FEF
+
+ // now that everything is at where it should be, update the CrawlFrame
+ ProcessCurrentFrame();
+
+ // advance to the next frame which matches the stackwalk flags
+ StackWalkAction retVal = Filter();
+
+ END_FORBID_TYPELOAD();
+
+ return (retVal == SWA_CONTINUE);
+} // StackFrameIterator::ResetRegDisp()
+
+
+//---------------------------------------------------------------------------------------
+//
+// Reset the CrawlFrame owned by the iterator. Used by both Init() and ResetRegDisp().
+//
+// Assumptions:
+// this->m_pThread and this->m_flags have been initialized.
+//
+// Notes:
+// In addition, the following fields are not reset. The caller must update them:
+// pFrame, pFunc, pAppDomain, pRD
+//
+// Fields updated by ProcessIp():
+// isFrameless, and codeInfo
+//
+// Fields updated by ProcessCurrentFrame():
+// codeManState
+//
+
+void StackFrameIterator::ResetCrawlFrame()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ INDEBUG(memset(&(m_crawl.pFunc), 0xCC, sizeof(m_crawl.pFunc)));
+
+ m_crawl.isFirst = true;
+ m_crawl.isInterrupted = false;
+ m_crawl.hasFaulted = false;
+ m_crawl.isIPadjusted = false; // can be removed
+
+ m_crawl.isNativeMarker = false;
+ m_crawl.isProfilerDoStackSnapshot = !!(this->m_flags & PROFILER_DO_STACK_SNAPSHOT);
+ m_crawl.isNoFrameTransition = false;
+
+ m_crawl.taNoFrameTransitionMarker = NULL;
+
+#if defined(WIN64EXCEPTIONS)
+ m_crawl.isFilterFunclet = false;
+ m_crawl.isFilterFuncletCached = false;
+ m_crawl.fShouldParentToFuncletSkipReportingGCReferences = false;
+#endif // WIN64EXCEPTIONS
+
+ m_crawl.pThread = this->m_pThread;
+
+ m_crawl.pSecurityObject = NULL;
+ m_crawl.isCachedMethod = false;
+ m_crawl.stackWalkCache.ClearEntry();
+
+ m_crawl.pCurGSCookie = NULL;
+ m_crawl.pFirstGSCookie = NULL;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// This function represents whether the iterator has reached the root of the stack or not.
+// It can be used as the loop-terminating condition for the iterator.
+//
+// Return Value:
+// Returns true if there is more frames on the stack to walk.
+//
+
+BOOL StackFrameIterator::IsValid(void)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ // There is more to iterate if the stackwalk is currently in managed code,
+ // or if there are frames left.
+ // If there is an ExInfo with a pContext, it may substitute for a Frame,
+ // if the ExInfo is due to an exception in managed code.
+ if (!m_crawl.isFrameless && m_crawl.pFrame == FRAME_TOP)
+ {
+ // if we are stopped at a native marker frame, we can still advance at least once more
+ if (m_frameState == SFITER_NATIVE_MARKER_FRAME)
+ {
+ _ASSERTE(m_crawl.isNativeMarker);
+ return TRUE;
+ }
+
+#if defined(ELIMINATE_FEF)
+ // Not in managed code, and no frames left -- check for an ExInfo.
+ // @todo: check for exception?
+ m_exInfoWalk.WalkToManaged();
+ if (m_exInfoWalk.GetContext())
+ return TRUE;
+#endif // ELIMINATE_FEF
+
+#ifdef _DEBUG
+ // Try to ensure that the frame chain did not change underneath us.
+ // In particular, is thread's starting frame the same as it was when
+ // we started?
+ //DevDiv 168789: In GCStress >= 4 two threads could race on triggering GC;
+ // if the one that just made p/invoke call is second and hits the trap instruction
+ // before call to syncronize with GC, it will push RedirectedThreadFrame concurrently
+ // with GC stackwalking.
+ // In normal case (no GCStress), after p/invoke, IL_STUB will check if GC is in progress and syncronize.
+ BOOL bRedirectedPinvoke = FALSE;
+
+#ifdef FEATURE_HIJACK
+ bRedirectedPinvoke = ((GCStress<cfg_instr>::IsEnabled()) &&
+ (m_pRealStartFrame != NULL) &&
+ (m_pRealStartFrame != FRAME_TOP) &&
+ (m_pRealStartFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr()) &&
+ (m_pThread->GetFrame() != NULL) &&
+ (m_pThread->GetFrame() != FRAME_TOP) &&
+ (m_pThread->GetFrame()->GetVTablePtr() == RedirectedThreadFrame::GetMethodFrameVPtr()));
+#endif // FEATURE_HIJACK
+
+ _ASSERTE( (m_pStartFrame != NULL) ||
+ (m_flags & POPFRAMES) ||
+ (m_pRealStartFrame == m_pThread->GetFrame()) ||
+ (bRedirectedPinvoke));
+#endif //_DEBUG
+
+ return FALSE;
+ }
+
+ return TRUE;
+} // StackFrameIterator::IsValid()
+
+//---------------------------------------------------------------------------------------
+//
+// Advance to the next frame according to the stackwalk flags. If the iterator is stopped
+// at some place not specified by the stackwalk flags, this function will automatically advance
+// to the next frame.
+//
+// Return Value:
+// SWA_CONTINUE (== SWA_DONE) if the iterator is successful in advancing to the next frame
+// SWA_FAILED if an operation performed by the iterator fails
+//
+// Notes:
+// This function returns SWA_DONE when advancing from the last frame to becoming invalid.
+// It returns SWA_FAILED if the iterator is invalid.
+//
+
+StackWalkAction StackFrameIterator::Next(void)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ if (!IsValid())
+ {
+ return SWA_FAILED;
+ }
+
+ BEGIN_FORBID_TYPELOAD();
+
+ StackWalkAction retVal = NextRaw();
+ if (retVal == SWA_CONTINUE)
+ {
+ retVal = Filter();
+ }
+
+ END_FORBID_TYPELOAD();
+ return retVal;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Check whether we should stop at the current frame given the stackwalk flags.
+// If not, continue advancing to the next frame.
+//
+// Return Value:
+// Returns SWA_CONTINUE (== SWA_DONE) if the iterator is invalid or if no automatic advancing is done.
+// Otherwise returns whatever the last call to NextRaw() returns.
+//
+
+StackWalkAction StackFrameIterator::Filter(void)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ bool fStop = false;
+ bool fSkippingFunclet = false;
+
+#if defined(WIN64EXCEPTIONS)
+ bool fRecheckCurrentFrame = false;
+ bool fSkipFuncletCallback = true;
+#endif // defined(WIN64EXCEPTIONS)
+
+ StackWalkAction retVal = SWA_CONTINUE;
+
+ while (IsValid())
+ {
+ fStop = false;
+ fSkippingFunclet = false;
+
+#if defined(WIN64EXCEPTIONS)
+ fRecheckCurrentFrame = false;
+ fSkipFuncletCallback = true;
+
+ // by default, there is no funclet for the current frame
+ // that reported GC references
+ m_crawl.fShouldParentToFuncletSkipReportingGCReferences = false;
+
+ // By default, assume that we are going to report GC references for this
+ // CrawlFrame
+ m_crawl.fShouldCrawlframeReportGCReferences = true;
+
+ if (!m_sfParent.IsNull())
+ {
+ // we are now skipping frames to get to the funclet's parent
+ fSkippingFunclet = true;
+ }
+#endif // WIN64EXCEPTIONS
+
+ switch (m_frameState)
+ {
+ case SFITER_FRAMELESS_METHOD:
+#if defined(WIN64EXCEPTIONS)
+ProcessFuncletsForGCReporting:
+ do
+ {
+ fRecheckCurrentFrame = false;
+
+ // When enumerating GC references for "liveness" reporting, depending upon the architecture,
+ // the reponsibility of who reports what varies:
+ //
+ // 1) On x86, Evanesco (ARM) and X64 (via RyuJIT) the funclet reports all references belonging to itself and its parent method.
+ //
+ // 2) X64 (via JIT64) has the reporting distributed between the funclets and the parent method.
+ // If some reference(s) get double reported, JIT64 can handle that by playing conservative.
+ //
+ // 3) On Evanesco, the reporting is done by funclets (if present). Otherwise, the primary method
+ // does it.
+ //
+ // On x64 and Evanesco, the GCStackCrawlCallback is invoked with a new flag indicating that
+ // the stackwalk is being done for GC reporting purposes - this flag is GC_FUNCLET_REFERENCE_REPORTING.
+ // The presence of this flag influences how stackwalker will enumerate frames, which frames will
+ // result in the callback being invoked, etc. The idea is that we want to report only the
+ // relevant frames via the callback that are active on the callstack. This removes the need to
+ // double report (even though JIT64 does it today), reporting of dead frames, and makes the
+ // design of reference reporting more consistent (and easier to understand) across architectures.
+ //
+ // NOTE: This flag is applicable only to X64 and Evanesco (ARM).
+ //
+ // The algorithm is as follows (at a conceptual level):
+ //
+ // 1) For each enumerated managed (frameless) frame, check if it is a funclet or not.
+ // 1.1) If its not a funclet, pass the frame to the callback and goto (2).
+ // 1.2) If its a funclet, we preserve the callerSP of the parent frame where the funclet was invoked from.
+ // Passthe funclet to the callback.
+ // 1.3) For filter funclets, we enumerate all frames until we reach the parent. Once the parent is reached,
+ // pass it to the callback with a flag indicating that its corresponding funclet has already performed
+ // the reporting.
+ // 1.4) For non-filter funclets, we skip all the frames until we reach the parent. Once the parent is reached,
+ // pass it to the callback with a flag indicating that its corresponding funclet has already performed
+ // the reporting.
+ // 1.5) If we see non-filter funclets while processing a filter funclet, then goto (1.4). Once we have reached the
+ // parent of the non-filter funclet, resume filter funclet processing as described in (1.3).
+ // 2) If another frame enumerated, goto (1). Otherwise, stackwalk is complete.
+ //
+ // Note: When a flag is passed to the callback indicating that the funclet for a parent frame has already
+ // reported the references, RyuJIT (Evanesco) will simply do nothing and return from the callback.
+ // JIT64, on the other hand, will ignore the flag and perform reporting (again), like it does today.
+ //
+ // Note: For non-filter funcelts there is a small window during unwind where we have conceptually unwound past a
+ // funclet but have not yet reached the parent/handling frame. In this case we might need the parent to
+ // report its GC roots. See comments around use of m_fDidFuncletReportGCReferences for more details.
+ //
+ // Needless to say, all applicable (read: active) explicit frames are also processed.
+ //
+ // Check if we are in the mode of enumerating GC references (or not)
+ if (m_flags & GC_FUNCLET_REFERENCE_REPORTING)
+ {
+ // Do we already have a reference to a funclet parent?
+ if (!m_sfFuncletParent.IsNull())
+ {
+ // Have we been processing a filter funclet without encountering any non-filter funclets?
+ if ((m_fProcessNonFilterFunclet == false) && (m_fProcessIntermediaryNonFilterFunclet == false))
+ {
+ // Yes, we have. Check the current frame
+ // and if it is the parent we are looking for,
+ // clear the flag indicating that its funclet
+ // has already reported the GC references (see
+ // below comment for Dev11 376329 explaining
+ // why we do this).
+ if (ExceptionTracker::IsUnwoundToTargetParentFrame(&m_crawl, m_sfFuncletParent))
+ {
+ STRESS_LOG2(LF_GCROOTS, LL_INFO100,
+ "STACKWALK: Reached parent of filter funclet @ CallerSP: %p, m_crawl.pFunc = %p\n",
+ m_sfFuncletParent.SP, m_crawl.pFunc);
+
+ // Dev11 376329 - ARM: GC hole during filter funclet dispatch
+ // filters are invoked during the first pass so we cannot skip
+ // reporting the parent frame since it's still live. normally
+ // this would cause double reporting however for filters the JIT
+ // will report all GC roots as pinned to alleviate this problem.
+ // note that JIT64 does not have this problem since it always
+ // reports the parent frame (this flag is essentially ignored)
+ // so it's safe to make this change for both architectures (but
+ // once AMD64 moves to RyuJIT the fix will become relevant there).
+ m_crawl.fShouldParentToFuncletSkipReportingGCReferences = false;
+ ResetGCRefReportingState();
+
+ // We have reached the parent of the filter funclet.
+ // It is possible this is another funclet (e.g. a catch/fault/finally),
+ // so reexamine this frame and see if it needs any skipping.
+ fRecheckCurrentFrame = true;
+ }
+ else
+ {
+ // When processing filter funclets, until we reach the parent frame
+ // we should be seeing only non--filter-funclet frames. This is because
+ // exceptions cannot escape filter funclets. Thus, there can be no frameless frames
+ // between the filter funclet and its parent.
+ _ASSERTE(!m_crawl.IsFilterFunclet());
+ if (m_crawl.IsFunclet())
+ {
+ // This is a non-filter funclet encountered when processing a filter funclet.
+ // In such a case, we will deliver a callback for it and skip frames until we reach
+ // its parent. Once there, we will resume frame enumeration for finding
+ // parent of the filter funclet we were originally processing.
+ m_sfIntermediaryFuncletParent = ExceptionTracker::FindParentStackFrameForStackWalk(&m_crawl, true);
+ _ASSERTE(!m_sfIntermediaryFuncletParent.IsNull());
+ m_fProcessIntermediaryNonFilterFunclet = true;
+
+ // Set the parent frame so that the funclet skipping logic (further below)
+ // can use it.
+ m_sfParent = m_sfIntermediaryFuncletParent;
+ fSkipFuncletCallback = false;
+ }
+ }
+ }
+ }
+ else
+ {
+ // We dont have any funclet parent reference. Check if the current
+ // frame represents a funclet.
+ if (m_crawl.IsFunclet())
+ {
+ // Get a reference to the funclet's parent frame.
+ m_sfFuncletParent = ExceptionTracker::FindParentStackFrameForStackWalk(&m_crawl, true);
+
+ if (m_sfFuncletParent.IsNull())
+ {
+ // This can only happen if the funclet (and its parent) have been unwound.
+ _ASSERTE(ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException(&m_crawl));
+ }
+ else
+ {
+ // We should have found the funclet's parent stackframe
+ _ASSERTE(!m_sfFuncletParent.IsNull());
+
+ bool fIsFilterFunclet = m_crawl.IsFilterFunclet();
+
+ STRESS_LOG4(LF_GCROOTS, LL_INFO100,
+ "STACKWALK: Found %sFilter funclet @ SP: %p, m_crawl.pFunc = %p; FuncletParentCallerSP: %p\n",
+ (fIsFilterFunclet)?"":"Non-", m_crawl.GetRegisterSet()->SP, m_crawl.pFunc, m_sfFuncletParent.SP);
+
+ if (!fIsFilterFunclet)
+ {
+ m_fProcessNonFilterFunclet = true;
+
+ // Set the parent frame so that the funclet skipping logic (further below)
+ // can use it.
+ m_sfParent = m_sfFuncletParent;
+
+ // For non-filter funclets, we will make the callback for the funclet
+ // but skip all the frames until we reach the parent method. When we do,
+ // we will make a callback for it as well and then continue to make callbacks
+ // for all upstack frames, until we reach another funclet or the top of the stack
+ // is reached.
+ fSkipFuncletCallback = false;
+ }
+ else
+ {
+ m_fProcessNonFilterFunclet = false;
+
+ // Nothing more to do as we have come across a filter funclet. In this case, we will:
+ //
+ // 1) Get a reference to the parent frame
+ // 2) Report the funclet
+ // 3) Continue to report the parent frame, alongwith a flag that funclet has been reported (see above)
+ // 4) Continue to report all upstack frames
+ }
+ }
+ }
+ }
+ }
+ }
+ while(fRecheckCurrentFrame == true);
+
+ if ((m_fProcessNonFilterFunclet == true) || (m_fProcessIntermediaryNonFilterFunclet == true) || (m_flags & (FUNCTIONSONLY | SKIPFUNCLETS)))
+ {
+ bool fSkipFrameDueToUnwind = false;
+
+ if (m_flags & GC_FUNCLET_REFERENCE_REPORTING)
+ {
+ // When a nested exception escapes, it will unwind pass a funclet. In addition, it'll
+ // unwind the frame chain up to the funclet. When that happens, we'll basically lose
+ // all the stack frames higher than and equal to he funclet. We can't skip funclets in
+ // the usual way because the first frame we see won't be a funclet. It'll be something
+ // which have conceptually been unwound. We need to use the information on the
+ // ExceptionTracker to determine if a stack frame is in the unwound stack region.
+ //
+ // If we are enumerating frames for GC reporting and we determined that
+ // the current frame needs to be reported, ensure that it has not already
+ // been unwound by the active exception. If it has been, then we will set a flag
+ // indicating that its references need not be reported. The CrawlFrame, however,
+ // will still be passed to the GC stackwalk callback incase it represents a dynamic
+ // method, to allow the GC to keep them alive.
+ if (ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException(&m_crawl))
+ {
+ // Invoke the GC callback for this crawlframe (to keep any dynamic methods alive) but do not report its references.
+ m_crawl.fShouldCrawlframeReportGCReferences = false;
+ fSkipFrameDueToUnwind = true;
+
+ if (m_crawl.IsFunclet() && !fSkippingFunclet)
+ {
+ // we have come across a funclet that has been unwound and we haven't yet started to
+ // look for its parent. in such a case, the funclet will not have anything to report
+ // so set the corresponding flag to indicate so.
+
+ _ASSERTE(m_fDidFuncletReportGCReferences);
+ m_fDidFuncletReportGCReferences = false;
+
+ STRESS_LOG0(LF_GCROOTS, LL_INFO100, "Unwound funclet will skip reporting references\n");
+ }
+ }
+ }
+ else if (m_flags & (FUNCTIONSONLY | SKIPFUNCLETS))
+ {
+ if (ExceptionTracker::IsInStackRegionUnwoundByCurrentException(&m_crawl))
+ {
+ // don't stop here
+ fSkipFrameDueToUnwind = true;
+ }
+ }
+
+ if (fSkipFrameDueToUnwind)
+ {
+ if (m_flags & GC_FUNCLET_REFERENCE_REPORTING)
+ {
+ // Check if we are skipping frames.
+ if (!m_sfParent.IsNull())
+ {
+ // Check if our have reached our target method frame.
+ // IsMaxVal() is a special value to indicate that we should skip one frame.
+ if (m_sfParent.IsMaxVal() ||
+ ExceptionTracker::IsUnwoundToTargetParentFrame(&m_crawl, m_sfParent))
+ {
+ // We've finished skipping as told. Now check again.
+
+ if ((m_fProcessIntermediaryNonFilterFunclet == true) || (m_fProcessNonFilterFunclet == true))
+ {
+ STRESS_LOG2(LF_GCROOTS, LL_INFO100,
+ "STACKWALK: Reached parent of non-filter funclet @ CallerSP: %p, m_crawl.pFunc = %p\n",
+ m_sfParent.SP, m_crawl.pFunc);
+
+ // If we are here, we should be in GC reference reporting mode.
+ _ASSERTE(m_flags & GC_FUNCLET_REFERENCE_REPORTING);
+
+ // landing here indicates that the funclet's parent has been unwound so
+ // this will always be true, no need to predicate on the state of the funclet
+ m_crawl.fShouldParentToFuncletSkipReportingGCReferences = true;
+
+ // we've reached the parent so reset our state
+ m_fDidFuncletReportGCReferences = true;
+
+ ResetGCRefReportingState(m_fProcessIntermediaryNonFilterFunclet);
+
+ }
+
+ m_sfParent.Clear();
+
+ if (m_crawl.IsFunclet())
+ {
+ // We've hit a funclet.
+ // Since we are in GC reference reporting mode,
+ // then avoid code duplication and go to
+ // funclet processing.
+ goto ProcessFuncletsForGCReporting;
+ }
+ }
+ }
+ }
+
+ if (m_crawl.fShouldCrawlframeReportGCReferences)
+ {
+ // Skip the callback for this frame - we dont do this for unwound frames encountered
+ // in GC stackwalk since they may represent dynamic methods whose resolver objects
+ // the GC may need to keep alive.
+ break;
+ }
+ }
+ else
+ {
+ // Check if we are skipping frames.
+ if (!m_sfParent.IsNull())
+ {
+ // Check if our have reached our target method frame.
+ // IsMaxVal() is a special value to indicate that we should skip one frame.
+ if (m_sfParent.IsMaxVal() ||
+ ExceptionTracker::IsUnwoundToTargetParentFrame(&m_crawl, m_sfParent))
+ {
+ // We've finished skipping as told. Now check again.
+ if ((m_fProcessIntermediaryNonFilterFunclet == true) || (m_fProcessNonFilterFunclet == true))
+ {
+ // If we are here, we should be in GC reference reporting mode.
+ _ASSERTE(m_flags & GC_FUNCLET_REFERENCE_REPORTING);
+
+ STRESS_LOG2(LF_GCROOTS, LL_INFO100,
+ "STACKWALK: Reached parent of non-filter funclet @ CallerSP: %p, m_crawl.pFunc = %p\n",
+ m_sfParent.SP, m_crawl.pFunc);
+
+ // by default a funclet's parent won't report its GC roots since they would have already
+ // been reported by the funclet. however there is a small window during unwind before
+ // control returns to the OS where we might require the parent to report. more below.
+ bool shouldSkipReporting = true;
+
+ if (!m_fDidFuncletReportGCReferences)
+ {
+ // we have reached the parent frame of the funclet which didn’t report roots since it was already unwound.
+ // check if the parent frame of the funclet is also handling an exception. if it is, then we will need to
+ // report roots for it since the catch handler may use references inside it.
+
+ ExceptionTracker* pTracker = m_crawl.pThread->GetExceptionState()->GetCurrentExceptionTracker();
+ if (pTracker->GetCallerOfActualHandlingFrame() == m_sfFuncletParent)
+ {
+ // we should not skip reporting for this parent frame
+ shouldSkipReporting = false;
+
+ // now that we've found the parent that will report roots reset our state.
+ m_fDidFuncletReportGCReferences = true;
+ }
+ else if (!m_crawl.IsFunclet())
+ {
+ // we've reached the parent and it's not handling an exception, it's also not
+ // a funclet so reset our state. note that we cannot reset the state when the
+ // parent is a funclet since the leaf funclet didn't report any references and
+ // we might have a catch handler below us that might contain GC roots.
+ m_fDidFuncletReportGCReferences = true;
+ }
+
+ STRESS_LOG4(LF_GCROOTS, LL_INFO100,
+ "Funclet didn't report references: handling frame: %p, m_sfFuncletParent = %p, is funclet: %d, skip reporting %d\n",
+ pTracker->GetEstablisherOfActualHandlingFrame().SP, m_sfFuncletParent.SP, m_crawl.IsFunclet(), shouldSkipReporting);
+ }
+ m_crawl.fShouldParentToFuncletSkipReportingGCReferences = shouldSkipReporting;
+
+ ResetGCRefReportingState(m_fProcessIntermediaryNonFilterFunclet);
+ }
+
+ m_sfParent.Clear();
+ }
+ }
+
+ if (m_sfParent.IsNull() && m_crawl.IsFunclet())
+ {
+ // We've hit a funclet.
+ if (m_flags & GC_FUNCLET_REFERENCE_REPORTING)
+ {
+ // If we are in GC reference reporting mode,
+ // then avoid code duplication and go to
+ // funclet processing.
+ goto ProcessFuncletsForGCReporting;
+ }
+ else
+ {
+ // Start skipping frames.
+ m_sfParent = ExceptionTracker::FindParentStackFrameForStackWalk(&m_crawl);
+ }
+
+ // m_sfParent can be NULL if the current funclet is a filter,
+ // in which case we shouldn't skip the frames.
+ }
+
+ // If we're skipping frames due to a funclet on the stack
+ // or this is an IL stub (which don't get reported when
+ // FUNCTIONSONLY is set) we skip the callback.
+ //
+ // The only exception is the GC reference reporting mode -
+ // for it, we will callback for the funclet so that references
+ // are reported and then continue to skip all frames between the funclet
+ // and its parent, eventually making a callback for the parent as well.
+ if (m_flags & (FUNCTIONSONLY | SKIPFUNCLETS))
+ {
+ if (!m_sfParent.IsNull() || m_crawl.pFunc->IsILStub())
+ {
+ STRESS_LOG4(LF_GCROOTS, LL_INFO100,
+ "STACKWALK: %s: not making callback for this frame, SPOfParent = %p, \
+ isILStub = %d, m_crawl.pFunc = %pM\n",
+ (!m_sfParent.IsNull() ? "SKIPPING_TO_FUNCLET_PARENT" : "IS_IL_STUB"),
+ m_sfParent.SP,
+ (m_crawl.pFunc->IsILStub() ? 1 : 0),
+ m_crawl.pFunc);
+
+ // don't stop here
+ break;
+ }
+ }
+ else if (fSkipFuncletCallback && (m_flags & GC_FUNCLET_REFERENCE_REPORTING))
+ {
+ if (!m_sfParent.IsNull())
+ {
+ STRESS_LOG4(LF_GCROOTS, LL_INFO100,
+ "STACKWALK: %s: not making callback for this frame, SPOfParent = %p, \
+ isILStub = %d, m_crawl.pFunc = %pM\n",
+ (!m_sfParent.IsNull() ? "SKIPPING_TO_FUNCLET_PARENT" : "IS_IL_STUB"),
+ m_sfParent.SP,
+ (m_crawl.pFunc->IsILStub() ? 1 : 0),
+ m_crawl.pFunc);
+
+ // don't stop here
+ break;
+ }
+ }
+ }
+ }
+ else if (m_flags & GC_FUNCLET_REFERENCE_REPORTING)
+ {
+ // If we are enumerating frames for GC reporting and we determined that
+ // the current frame needs to be reported, ensure that it has not already
+ // been unwound by the active exception. If it has been, then we will
+ // simply skip it and not deliver a callback for it.
+ if (ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException(&m_crawl))
+ {
+ // Invoke the GC callback for this crawlframe (to keep any dynamic methods alive) but do not report its references.
+ m_crawl.fShouldCrawlframeReportGCReferences = false;
+ }
+ }
+
+#else // WIN64EXCEPTIONS
+ // Skip IL stubs
+ if (m_flags & FUNCTIONSONLY)
+ {
+ if (m_crawl.pFunc->IsILStub())
+ {
+ LOG((LF_GCROOTS, LL_INFO100000,
+ "STACKWALK: IS_IL_STUB: not making callback for this frame, m_crawl.pFunc = %s\n",
+ m_crawl.pFunc->m_pszDebugMethodName));
+
+ // don't stop here
+ break;
+ }
+ }
+#endif // WIN64EXCEPTIONS
+
+ fStop = true;
+ break;
+
+ case SFITER_FRAME_FUNCTION:
+ //
+ // fall through
+ //
+
+ case SFITER_SKIPPED_FRAME_FUNCTION:
+ if (!fSkippingFunclet)
+ {
+#if defined(WIN64EXCEPTIONS)
+ if (m_flags & GC_FUNCLET_REFERENCE_REPORTING)
+ {
+ // If we are enumerating frames for GC reporting and we determined that
+ // the current frame needs to be reported, ensure that it has not already
+ // been unwound by the active exception. If it has been, then we will
+ // simply skip it and not deliver a callback for it.
+ if (ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException(&m_crawl))
+ {
+ // Invoke the GC callback for this crawlframe (to keep any dynamic methods alive) but do not report its references.
+ m_crawl.fShouldCrawlframeReportGCReferences = false;
+ }
+ }
+ else if (m_flags & (FUNCTIONSONLY | SKIPFUNCLETS))
+ {
+ // See the comment above for IsInStackRegionUnwoundByCurrentException().
+ if (ExceptionTracker::IsInStackRegionUnwoundByCurrentException(&m_crawl))
+ {
+ // don't stop here
+ break;
+ }
+ }
+#endif // WIN64EXCEPTIONS
+ if ( (m_crawl.pFunc != NULL) || !(m_flags & FUNCTIONSONLY) )
+ {
+ fStop = true;
+ }
+ }
+ break;
+
+ case SFITER_NO_FRAME_TRANSITION:
+ if (!fSkippingFunclet)
+ {
+ if (m_flags & NOTIFY_ON_NO_FRAME_TRANSITIONS)
+ {
+ _ASSERTE(m_crawl.isNoFrameTransition == true);
+ fStop = true;
+ }
+ }
+ break;
+
+ case SFITER_NATIVE_MARKER_FRAME:
+ if (!fSkippingFunclet)
+ {
+ if (m_flags & NOTIFY_ON_U2M_TRANSITIONS)
+ {
+ _ASSERTE(m_crawl.isNativeMarker == true);
+ fStop = true;
+ }
+ }
+ break;
+
+ case SFITER_INITIAL_NATIVE_CONTEXT:
+ if (!fSkippingFunclet)
+ {
+ if (m_flags & NOTIFY_ON_INITIAL_NATIVE_CONTEXT)
+ {
+ fStop = true;
+ }
+ }
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+
+ if (fStop)
+ {
+ break;
+ }
+ else
+ {
+ INDEBUG(m_crawl.pThread->DebugLogStackWalkInfo(&m_crawl, "FILTER ", m_uFramesProcessed));
+ retVal = NextRaw();
+ if (retVal != SWA_CONTINUE)
+ {
+ break;
+ }
+ }
+ }
+
+ return retVal;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Advance to the next frame and stop, regardless of the stackwalk flags.
+//
+// Return Value:
+// SWA_CONTINUE (== SWA_DONE) if the iterator is successful in advancing to the next frame
+// SWA_FAILED if an operation performed by the iterator fails
+//
+// Assumptions:
+// The caller has checked that the iterator is valid.
+//
+// Notes:
+// This function returns SWA_DONE when advancing from the last frame to becoming invalid.
+//
+
+StackWalkAction StackFrameIterator::NextRaw(void)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(IsValid());
+
+ INDEBUG(m_uFramesProcessed++);
+
+ StackWalkAction retVal = SWA_CONTINUE;
+
+ if (m_frameState == SFITER_SKIPPED_FRAME_FUNCTION)
+ {
+#if (defined(_WIN64) || defined(_TARGET_ARM_)) && defined(_DEBUG)
+ // make sure we're not skipping a different transition
+ if (m_crawl.pFrame->NeedsUpdateRegDisplay())
+ {
+ CONSISTENCY_CHECK(m_crawl.pFrame->IsTransitionToNativeFrame());
+ if (m_crawl.pFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr())
+ {
+ // ControlPC may be different as the InlinedCallFrame stays active throughout
+ // the STOP_FOR_GC callout but we can use the stack/frame pointer for the assert.
+ PTR_InlinedCallFrame pICF = dac_cast<PTR_InlinedCallFrame>(m_crawl.pFrame);
+ CONSISTENCY_CHECK((GetRegdisplaySP(m_crawl.pRD) == (TADDR)pICF->GetCallSiteSP())
+ || (GetFP(m_crawl.pRD->pCurrentContext) == pICF->GetCalleeSavedFP()));
+ }
+ else
+ {
+ CONSISTENCY_CHECK(GetControlPC(m_crawl.pRD) == m_crawl.pFrame->GetReturnAddress());
+ }
+ }
+#endif // (defined(_WIN64) || defined(_TARGET_ARM_)) && defined(_DEBUG)
+
+#if defined(STACKWALKER_MAY_POP_FRAMES)
+ if (m_flags & POPFRAMES)
+ {
+ _ASSERTE(m_crawl.pFrame == m_crawl.pThread->GetFrame());
+
+ // If we got here, the current frame chose not to handle the
+ // exception. Give it a chance to do any termination work
+ // before we pop it off.
+
+ CLEAR_THREAD_TYPE_STACKWALKER();
+ END_FORBID_TYPELOAD();
+
+ m_crawl.pFrame->ExceptionUnwind();
+
+ BEGIN_FORBID_TYPELOAD();
+ SET_THREAD_TYPE_STACKWALKER(m_pThread);
+
+ // Pop off this frame and go on to the next one.
+ m_crawl.GotoNextFrame();
+
+ // When StackWalkFramesEx is originally called, we ensure
+ // that if POPFRAMES is set that the thread is in COOP mode
+ // and that running thread is walking itself. Thus, this
+ // COOP assertion is safe.
+ BEGIN_GCX_ASSERT_COOP;
+ m_crawl.pThread->SetFrame(m_crawl.pFrame);
+ END_GCX_ASSERT_COOP;
+ }
+ else
+#endif // STACKWALKER_MAY_POP_FRAMES
+ {
+ // go to the next frame
+ m_crawl.GotoNextFrame();
+ }
+
+ // check for skipped frames again
+ if (CheckForSkippedFrames())
+ {
+ // there are more skipped explicit frames
+ _ASSERTE(m_frameState == SFITER_SKIPPED_FRAME_FUNCTION);
+ goto Cleanup;
+ }
+ else
+ {
+#if defined(_TARGET_X86_)
+ // On x86, we process a managed stack frame before processing any explicit frames contained in it.
+ // So when we are done with the skipped explicit frame, we have already processed the managed
+ // stack frame, and it is time to move onto the next stack frame.
+ PostProcessingForManagedFrames();
+ if (m_frameState == SFITER_NATIVE_MARKER_FRAME)
+ {
+ goto Cleanup;
+ }
+#elif (defined(_WIN64) || defined(_TARGET_ARM_))
+ // On WIN64 and ARM, we are done handling the skipped explicit frame at this point. So move on to the
+ // managed stack frame.
+ m_crawl.isFrameless = true;
+ m_crawl.codeInfo = m_cachedCodeInfo;
+ m_crawl.pFunc = m_crawl.codeInfo.GetMethodDesc();
+
+
+ PreProcessingForManagedFrames();
+ goto Cleanup;
+#endif // (defined(_WIN64) || defined(_TARGET_ARM_))
+ }
+ }
+ else if (m_frameState == SFITER_FRAMELESS_METHOD)
+ {
+ // Now find out if we need to leave monitors
+
+#if !defined(_WIN64) && !defined(_TARGET_ARM_)
+ //
+ // WIN64 and ARM has the JIT generate try/finallys to leave monitors
+ //
+#if defined(STACKWALKER_MAY_POP_FRAMES)
+ if (m_flags & POPFRAMES)
+ {
+ BEGIN_GCX_ASSERT_COOP;
+
+ if (m_crawl.pFunc->IsSynchronized())
+ {
+ MethodDesc * pMD = m_crawl.pFunc;
+ OBJECTREF orUnwind = NULL;
+
+ if (m_crawl.GetCodeManager()->IsInSynchronizedRegion(m_crawl.GetRelOffset(),
+ m_crawl.GetGCInfo(),
+ m_crawl.GetCodeManagerFlags()))
+ {
+ if (pMD->IsStatic())
+ {
+ MethodTable * pMT = pMD->GetMethodTable();
+ orUnwind = pMT->GetManagedClassObjectIfExists();
+
+ _ASSERTE(orUnwind != NULL);
+ }
+ else
+ {
+ orUnwind = m_crawl.GetCodeManager()->GetInstance(
+ m_crawl.pRD,
+ m_crawl.GetCodeInfo());
+ }
+
+ _ASSERTE(orUnwind != NULL);
+ VALIDATEOBJECTREF(orUnwind);
+
+ _ASSERTE(!orUnwind->IsTransparentProxy());
+
+ if (orUnwind != NULL)
+ {
+ orUnwind->LeaveObjMonitorAtException();
+ }
+ }
+ }
+
+ END_GCX_ASSERT_COOP;
+ }
+#endif // STACKWALKER_MAY_POP_FRAMES
+#endif // !defined(_WIN64) && !defined(_TARGET_ARM_)
+
+#if !defined(ELIMINATE_FEF)
+ // FaultingExceptionFrame is special case where it gets
+ // pushed on the stack after the frame is running
+ _ASSERTE((m_crawl.pFrame == FRAME_TOP) ||
+ ((TADDR)GetRegdisplaySP(m_crawl.pRD) < dac_cast<TADDR>(m_crawl.pFrame)) ||
+ (m_crawl.pFrame->GetVTablePtr() == FaultingExceptionFrame::GetMethodFrameVPtr()) ||
+ (m_crawl.pFrame->GetVTablePtr() == ContextTransitionFrame::GetMethodFrameVPtr()));
+#endif // !defined(ELIMINATE_FEF)
+
+ // Get rid of the frame (actually, it isn't really popped)
+
+ LOG((LF_GCROOTS, LL_EVERYTHING, "STACKWALK: [%03x] about to unwind for '%s', SP:" FMT_ADDR ", IP:" FMT_ADDR "\n",
+ m_uFramesProcessed,
+ m_crawl.pFunc->m_pszDebugMethodName,
+ DBG_ADDR(GetRegdisplaySP(m_crawl.pRD)),
+ DBG_ADDR(GetControlPC(m_crawl.pRD))));
+
+#if !defined(DACCESS_COMPILE)
+ StackwalkCacheEntry *pCacheEntry = m_crawl.GetStackwalkCacheEntry();
+ if (pCacheEntry != NULL)
+ {
+ _ASSERTE(m_crawl.stackWalkCache.Enabled() && (m_flags & LIGHTUNWIND));
+
+ // lightened schema: take stack unwind info from stackwalk cache
+ EECodeManager::QuickUnwindStackFrame(m_crawl.pRD, pCacheEntry, EECodeManager::UnwindCurrentStackFrame);
+ }
+ else
+#endif // !DACCESS_COMPILE
+ {
+#if !defined(DACCESS_COMPILE)
+ // non-optimized stack unwind schema, doesn't use StackwalkCache
+ UINT_PTR curSP = (UINT_PTR)GetRegdisplaySP(m_crawl.pRD);
+ UINT_PTR curIP = (UINT_PTR)GetControlPC(m_crawl.pRD);
+#endif // !DACCESS_COMPILE
+
+ bool fInsertCacheEntry = m_crawl.stackWalkCache.Enabled() &&
+ (m_flags & LIGHTUNWIND) &&
+ (m_pCachedGSCookie == NULL);
+
+ StackwalkCacheUnwindInfo unwindInfo;
+
+ if (!m_crawl.GetCodeManager()->UnwindStackFrame(
+ m_crawl.pRD,
+ &m_cachedCodeInfo,
+ m_codeManFlags
+ | m_crawl.GetCodeManagerFlags()
+ | ((m_flags & PROFILER_DO_STACK_SNAPSHOT) ? SpeculativeStackwalk : 0),
+ &m_crawl.codeManState,
+ (fInsertCacheEntry ? &unwindInfo : NULL)))
+ {
+ LOG((LF_CORPROF, LL_INFO100, "**PROF: m_crawl.GetCodeManager()->UnwindStackFrame failure leads to SWA_FAILED.\n"));
+ retVal = SWA_FAILED;
+ goto Cleanup;
+ }
+
+#if !defined(DACCESS_COMPILE)
+ // store into hashtable if fits, otherwise just use old schema
+ if (fInsertCacheEntry)
+ {
+ //
+ // information we add to cache, consists of two parts:
+ // 1. SPOffset - locals, etc. of current method, adding which to current ESP we get to retAddr ptr
+ // 2. argSize - size of pushed function arguments, the rest we need to add to get new ESP
+ // we have to store two parts of ESP delta, since we need to update pPC also, and so require retAddr ptr
+ //
+ // newSP = oldSP + SPOffset + sizeof(PTR) + argSize
+ //
+ UINT_PTR SPOffset = (UINT_PTR)GetRegdisplayStackMark(m_crawl.pRD) - curSP;
+ UINT_PTR argSize = (UINT_PTR)GetRegdisplaySP(m_crawl.pRD) - curSP - SPOffset - sizeof(void*);
+
+ StackwalkCacheEntry cacheEntry = {0};
+ if (cacheEntry.Init(
+ curIP,
+ SPOffset,
+ &unwindInfo,
+ argSize))
+ {
+ m_crawl.stackWalkCache.Insert(&cacheEntry);
+ }
+ }
+#endif // !DACCESS_COMPILE
+ }
+
+#define FAIL_IF_SPECULATIVE_WALK(condition) \
+ if (m_flags & PROFILER_DO_STACK_SNAPSHOT) \
+ { \
+ if (!(condition)) \
+ { \
+ LOG((LF_CORPROF, LL_INFO100, "**PROF: " #condition " failure leads to SWA_FAILED.\n")); \
+ retVal = SWA_FAILED; \
+ goto Cleanup; \
+ } \
+ } \
+ else \
+ { \
+ _ASSERTE(condition); \
+ }
+
+ // When the stackwalk is seeded with a profiler context, the context
+ // might be bogus. Check the stack pointer and the program counter for validity here.
+ // (Note that these checks are not strictly necessary since we are able
+ // to recover from AVs during profiler stackwalk.)
+
+ PTR_VOID newSP = PTR_VOID((TADDR)GetRegdisplaySP(m_crawl.pRD));
+ FAIL_IF_SPECULATIVE_WALK(newSP >= m_crawl.pThread->GetCachedStackLimit());
+ FAIL_IF_SPECULATIVE_WALK(newSP < m_crawl.pThread->GetCachedStackBase());
+
+#undef FAIL_IF_SPECULATIVE_WALK
+
+ LOG((LF_GCROOTS, LL_EVERYTHING, "STACKWALK: [%03x] finished unwind for '%s', SP:" FMT_ADDR \
+ ", IP:" FMT_ADDR "\n",
+ m_uFramesProcessed,
+ m_crawl.pFunc->m_pszDebugMethodName,
+ DBG_ADDR(GetRegdisplaySP(m_crawl.pRD)),
+ DBG_ADDR(GetControlPC(m_crawl.pRD))));
+
+ m_crawl.isFirst = FALSE;
+ m_crawl.isInterrupted = FALSE;
+ m_crawl.hasFaulted = FALSE;
+ m_crawl.isIPadjusted = FALSE;
+
+#if defined(_TARGET_X86_)
+ // remember, x86 handles the manages stack frame before the explicit frames contained in it
+ if (CheckForSkippedFrames())
+ {
+ _ASSERTE(m_frameState == SFITER_SKIPPED_FRAME_FUNCTION);
+ goto Cleanup;
+ }
+#endif // _TARGET_X86_
+
+ PostProcessingForManagedFrames();
+ if (m_frameState == SFITER_NATIVE_MARKER_FRAME)
+ {
+ goto Cleanup;
+ }
+ }
+ else if (m_frameState == SFITER_FRAME_FUNCTION)
+ {
+ Frame* pInlinedFrame = NULL;
+
+ if (InlinedCallFrame::FrameHasActiveCall(m_crawl.pFrame))
+ {
+ pInlinedFrame = m_crawl.pFrame;
+ }
+
+ unsigned uFrameAttribs = m_crawl.pFrame->GetFrameAttribs();
+
+ // Special resumable frames make believe they are on top of the stack.
+ m_crawl.isFirst = (uFrameAttribs & Frame::FRAME_ATTR_RESUMABLE) != 0;
+
+ // If the frame is a subclass of ExceptionFrame,
+ // then we know this is interrupted.
+ m_crawl.isInterrupted = (uFrameAttribs & Frame::FRAME_ATTR_EXCEPTION) != 0;
+
+ if (m_crawl.isInterrupted)
+ {
+ m_crawl.hasFaulted = (uFrameAttribs & Frame::FRAME_ATTR_FAULTED) != 0;
+ m_crawl.isIPadjusted = (uFrameAttribs & Frame::FRAME_ATTR_OUT_OF_LINE) != 0;
+ _ASSERTE(!m_crawl.hasFaulted || !m_crawl.isIPadjusted); // both cant be set together
+ }
+
+ //
+ // Update app domain if this frame caused a transition.
+ //
+
+ AppDomain *retDomain = m_crawl.pFrame->GetReturnDomain();
+ if (retDomain != NULL)
+ {
+ m_crawl.pAppDomain = retDomain;
+ }
+
+ PCODE adr = m_crawl.pFrame->GetReturnAddress();
+ _ASSERTE(adr != (PCODE)POISONC);
+
+ _ASSERTE(!pInlinedFrame || adr);
+
+ if (adr)
+ {
+ ProcessIp(adr);
+
+ _ASSERTE(m_crawl.GetCodeInfo()->IsValid() || !pInlinedFrame);
+
+ if (m_crawl.isFrameless)
+ {
+ m_crawl.pFrame->UpdateRegDisplay(m_crawl.pRD);
+
+#if defined(_WIN64) || defined(_TARGET_ARM_)
+ CONSISTENCY_CHECK(NULL == m_pvResumableFrameTargetSP);
+
+ if (m_crawl.isFirst)
+ {
+ if (m_flags & THREAD_IS_SUSPENDED)
+ {
+ _ASSERTE(m_crawl.isProfilerDoStackSnapshot);
+
+ // abort the stackwalk, we can't proceed without risking deadlock
+ retVal = SWA_FAILED;
+ goto Cleanup;
+ }
+
+ // we are about to unwind, which may take a lock, so the thread
+ // better not be suspended.
+ CONSISTENCY_CHECK(!(m_flags & THREAD_IS_SUSPENDED));
+
+#if !defined(DACCESS_COMPILE)
+ if (m_crawl.stackWalkCache.Enabled() && (m_flags & LIGHTUNWIND))
+ {
+ m_crawl.isCachedMethod = m_crawl.stackWalkCache.Lookup((UINT_PTR)adr);
+ }
+#endif // DACCESS_COMPILE
+
+ EECodeManager::EnsureCallerContextIsValid(m_crawl.pRD, m_crawl.GetStackwalkCacheEntry());
+ m_pvResumableFrameTargetSP = (LPVOID)GetSP(m_crawl.pRD->pCallerContext);
+ }
+#endif // defined(_WIN64) || defined(_TARGET_ARM_)
+
+
+ // We are transitioning from unmanaged code to managed code... lets do some validation of our
+ // EH mechanism on platforms that we can.
+#if defined(_DEBUG) && !defined(DACCESS_COMPILE) && defined(_TARGET_X86_)
+ VerifyValidTransitionFromManagedCode(m_crawl.pThread, &m_crawl);
+#endif // _DEBUG && !DACCESS_COMPILE && _TARGET_X86_
+ }
+ }
+
+ if (!pInlinedFrame)
+ {
+#if defined(STACKWALKER_MAY_POP_FRAMES)
+ if (m_flags & POPFRAMES)
+ {
+ // If we got here, the current frame chose not to handle the
+ // exception. Give it a chance to do any termination work
+ // before we pop it off.
+
+ CLEAR_THREAD_TYPE_STACKWALKER();
+ END_FORBID_TYPELOAD();
+
+ m_crawl.pFrame->ExceptionUnwind();
+
+ BEGIN_FORBID_TYPELOAD();
+ SET_THREAD_TYPE_STACKWALKER(m_pThread);
+
+ // Pop off this frame and go on to the next one.
+ m_crawl.GotoNextFrame();
+
+ // When StackWalkFramesEx is originally called, we ensure
+ // that if POPFRAMES is set that the thread is in COOP mode
+ // and that running thread is walking itself. Thus, this
+ // COOP assertion is safe.
+ BEGIN_GCX_ASSERT_COOP;
+ m_crawl.pThread->SetFrame(m_crawl.pFrame);
+ END_GCX_ASSERT_COOP;
+ }
+ else
+#endif // STACKWALKER_MAY_POP_FRAMES
+ {
+ // Go to the next frame.
+ m_crawl.GotoNextFrame();
+ }
+ }
+ }
+#if defined(ELIMINATE_FEF)
+ else if (m_frameState == SFITER_NO_FRAME_TRANSITION)
+ {
+ PostProcessingForNoFrameTransition();
+ }
+#endif // ELIMINATE_FEF
+ else if (m_frameState == SFITER_NATIVE_MARKER_FRAME)
+ {
+ m_crawl.isNativeMarker = false;
+ }
+ else if (m_frameState == SFITER_INITIAL_NATIVE_CONTEXT)
+ {
+ // nothing to do here
+ }
+ else
+ {
+ _ASSERTE(m_frameState == SFITER_UNINITIALIZED);
+ _ASSERTE(!"StackFrameIterator::NextRaw() called when the iterator is uninitialized. \
+ Should never get here.");
+ retVal = SWA_FAILED;
+ goto Cleanup;
+ }
+
+ ProcessCurrentFrame();
+
+Cleanup:
+#if defined(_DEBUG)
+ if (retVal == SWA_FAILED)
+ {
+ LOG((LF_GCROOTS, LL_INFO10000, "STACKWALK: SWA_FAILED: couldn't start stackwalk\n"));
+ }
+#endif // _DEBUG
+
+ return retVal;
+} // StackFrameIterator::NextRaw()
+
+//---------------------------------------------------------------------------------------
+//
+// Synchronizing the REGDISPLAY to the current CONTEXT stored in the REGDISPLAY.
+// This is an nop on non-WIN64 platforms.
+//
+
+void StackFrameIterator::UpdateRegDisp(void)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ WIN64_ONLY(SyncRegDisplayToCurrentContext(m_crawl.pRD));
+} // StackFrameIterator::UpdateRegDisp()
+
+//---------------------------------------------------------------------------------------
+//
+// Check whether the specified Ip is in managed code and update the CrawlFrame accordingly.
+// This function updates isFrameless, JitManagerInstance.
+//
+// Arguments:
+// Ip - IP to be processed
+//
+
+void StackFrameIterator::ProcessIp(PCODE Ip)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ // Re-initialize codeInfo with new IP
+ m_crawl.codeInfo.Init(Ip, m_scanFlag);
+
+ m_crawl.isFrameless = !!m_crawl.codeInfo.IsValid();
+} // StackFrameIterator::ProcessIp()
+
+//---------------------------------------------------------------------------------------
+//
+// Update the CrawlFrame to represent where we have stopped. This is called after advancing
+// to a new frame.
+//
+// Notes:
+// This function and everything it calls must not rely on m_frameState, which could have become invalid
+// when we advance the iterator before calling this function.
+//
+
+void StackFrameIterator::ProcessCurrentFrame(void)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ bool fDone = false;
+
+ m_crawl.CheckGSCookies();
+
+ // Since we have advanced the iterator, the frame state represents the previous frame state,
+ // not the current one. This is important to keep in mind. Ideally we should just assert that
+ // the frame state has been set to invalid upon entry to this function, but we need the previous frame
+ // state to decide if we should stop at an native stack frame.
+
+ // If we just do a simple check for native code here, we will loop forever.
+ if (m_frameState == SFITER_UNINITIALIZED)
+ {
+ // "!IsFrameless()" normally implies that the CrawlFrame is at an explicit frame. Here we are using it
+ // to detect whether the CONTEXT is in managed code or not. Ideally we should have a enum on the
+ // CrawlFrame to indicate the various types of "frames" the CrawlFrame can stop at.
+ //
+ // If the CONTEXT is in native code and the StackFrameIterator is uninitialized, then it must be
+ // an initial native CONTEXT passed to the StackFrameIterator when it is created or
+ // when ResetRegDisp() is called.
+ if (!m_crawl.IsFrameless())
+ {
+ m_frameState = SFITER_INITIAL_NATIVE_CONTEXT;
+ fDone = true;
+ }
+ }
+ else
+ {
+ // Clear the frame state. It will be set before we return from this function.
+ m_frameState = SFITER_UNINITIALIZED;
+ }
+
+ // Check for the case of an exception in managed code, and resync the stack walk
+ // from the exception context.
+#if defined(ELIMINATE_FEF)
+ if (!fDone && !m_crawl.IsFrameless() && m_exInfoWalk.GetExInfo())
+ {
+ // We are currently walking ("lost") in unmanaged code. We can recover
+ // from a) the next Frame record, or b) an exception context.
+ // Recover from the exception context if all of these are true:
+ // - it "returns" to managed code
+ // - if is lower (newer) than the next Frame record
+ // - the stack walk has not already passed by it
+ //
+ // The ExInfo walker is initialized to be higher than the pStartFrame, and
+ // as we unwind managed (frameless) functions, we keep eliminating any
+ // ExInfos that are passed in the stackwalk.
+ //
+ // So, here we need to find the next ExInfo that "returns" to managed code,
+ // and then choose the lower of that ExInfo and the next Frame.
+ m_exInfoWalk.WalkToManaged();
+ TADDR pContextSP = m_exInfoWalk.GetSPFromContext();
+
+ //@todo: check the exception code for a fault?
+
+ // If there was a pContext that is higher than the SP and starting frame...
+ if (pContextSP)
+ {
+ PTR_CONTEXT pContext = m_exInfoWalk.GetContext();
+
+ LOG((LF_EH, LL_INFO10000, "STACKWALK: considering resync from pContext(%p), fault(%08X), sp(%p); \
+ pStartFrame(%p); cf.pFrame(%p), cf.SP(%p)\n",
+ pContext, m_exInfoWalk.GetFault(), pContextSP,
+ m_pStartFrame, dac_cast<TADDR>(m_crawl.pFrame), GetRegdisplaySP(m_crawl.pRD)));
+
+ // If the pContext is lower (newer) than the CrawlFrame's Frame*, try to use
+ // the pContext.
+ // There are still a few cases in which a FaultingExceptionFrame is linked in. If
+ // the next frame is one of them, we don't want to override it. THIS IS PROBABLY BAD!!!
+ if ( (pContextSP < dac_cast<TADDR>(m_crawl.pFrame)) &&
+ ((m_crawl.GetFrame() == FRAME_TOP) ||
+ (m_crawl.GetFrame()->GetVTablePtr() != FaultingExceptionFrame::GetMethodFrameVPtr() ) ) )
+ {
+ //
+ // If the REGDISPLAY represents an unmanaged stack frame above (closer to the leaf than) an
+ // ExInfo without any intervening managed stack frame, then we will stop at the no-frame
+ // transition protected by the ExInfo. However, if the unmanaged stack frame is the one
+ // immediately above the faulting managed stack frame, we want to continue the stackwalk
+ // with the faulting managed stack frame. So we do not stop in this case.
+ //
+ // However, just comparing EBP is not enough. The OS exception handler
+ // (KiUserExceptionDispatcher()) does not use an EBP frame. So if we just compare the EBP
+ // we will think that the OS excpetion handler is the one we want to claim. Instead,
+ // we should also check the current IP, which because of the way unwinding work and
+ // how the OS exception handler behaves is actually going to be the stack limit of the
+ // current thread. This is of course a workaround and is dependent on the OS behaviour.
+ //
+
+ PCODE curPC = GetControlPC(m_crawl.pRD);
+ if ((m_crawl.pRD->pEbp != NULL ) &&
+ (m_exInfoWalk.GetEBPFromContext() == GetRegdisplayFP(m_crawl.pRD)) &&
+ ((m_crawl.pThread->GetCachedStackLimit() <= PTR_VOID(curPC)) &&
+ (PTR_VOID(curPC) < m_crawl.pThread->GetCachedStackBase())))
+ {
+ // restore the CONTEXT saved by the ExInfo and continue on to the faulting
+ // managed stack frame
+ PostProcessingForNoFrameTransition();
+ }
+ else
+ {
+ // we stop stop at the no-frame transition
+ m_frameState = SFITER_NO_FRAME_TRANSITION;
+ m_crawl.isNoFrameTransition = true;
+ m_crawl.taNoFrameTransitionMarker = pContextSP;
+ fDone = true;
+ }
+ }
+ }
+ }
+#endif // defined(ELIMINATE_FEF)
+
+ if (!fDone)
+ {
+ // returns SWA_DONE if there is no more frames to walk
+ if (!IsValid())
+ {
+ LOG((LF_GCROOTS, LL_INFO10000, "STACKWALK: SWA_DONE: reached the end of the stack\n"));
+ m_frameState = SFITER_DONE;
+ return;
+ }
+
+ m_crawl.codeManState.dwIsSet = 0;
+#if defined(_DEBUG)
+ memset((void *)m_crawl.codeManState.stateBuf, 0xCD,
+ sizeof(m_crawl.codeManState.stateBuf));
+#endif // _DEBUG
+
+ if (m_crawl.isFrameless)
+ {
+ //------------------------------------------------------------------------
+ // This must be a JITed/managed native method. There is no explicit frame.
+ //------------------------------------------------------------------------
+
+#if !defined(DACCESS_COMPILE)
+ m_crawl.isCachedMethod = FALSE;
+ if (m_crawl.stackWalkCache.Enabled() && (m_flags & LIGHTUNWIND))
+ {
+ m_crawl.isCachedMethod = m_crawl.stackWalkCache.Lookup((UINT_PTR)GetControlPC(m_crawl.pRD));
+ _ASSERTE (m_crawl.isCachedMethod != m_crawl.stackWalkCache.IsEmpty());
+
+ m_crawl.pSecurityObject = NULL;
+#if defined(_TARGET_X86_)
+ if (m_crawl.isCachedMethod && m_crawl.stackWalkCache.m_CacheEntry.HasSecurityObject())
+ {
+ // pCallback will use this to save time on GetAddrOfSecurityObject
+ StackwalkCacheUnwindInfo stackwalkCacheUnwindInfo(&m_crawl.stackWalkCache.m_CacheEntry);
+ m_crawl.pSecurityObject = EECodeManager::GetAddrOfSecurityObjectFromCachedInfo(
+ m_crawl.pRD,
+ &stackwalkCacheUnwindInfo);
+ }
+#endif // _TARGET_X86_
+ }
+#endif // DACCESS_COMPILE
+
+
+#if defined(WIN64EXCEPTIONS)
+ m_crawl.isFilterFuncletCached = false;
+#endif // WIN64EXCEPTIONS
+
+ m_crawl.pFunc = m_crawl.codeInfo.GetMethodDesc();
+
+ // Cache values which may be updated by CheckForSkippedFrames()
+ m_cachedCodeInfo = m_crawl.codeInfo;
+
+#if defined(_WIN64) || defined(_TARGET_ARM_)
+ // On WIN64 and ARM, we want to process the skipped explicit frames before the managed stack frame
+ // containing them.
+ if (CheckForSkippedFrames())
+ {
+ _ASSERTE(m_frameState == SFITER_SKIPPED_FRAME_FUNCTION);
+ }
+ else
+#endif // _WIN64 || _TARGET_ARM_
+ {
+ PreProcessingForManagedFrames();
+ _ASSERTE(m_frameState == SFITER_FRAMELESS_METHOD);
+ }
+ }
+ else
+ {
+ INDEBUG(m_crawl.pThread->DebugLogStackWalkInfo(&m_crawl, "CONSIDER", m_uFramesProcessed));
+
+ _ASSERTE(m_crawl.pFrame != FRAME_TOP);
+
+ m_crawl.pFunc = m_crawl.pFrame->GetFunction();
+
+ m_frameState = SFITER_FRAME_FUNCTION;
+ }
+ }
+
+ _ASSERTE(m_frameState != SFITER_UNINITIALIZED);
+} // StackFrameIterator::ProcessCurrentFrame()
+
+//---------------------------------------------------------------------------------------
+//
+// If an explicit frame is allocated in a managed stack frame (e.g. an inlined pinvoke call),
+// we may have skipped an explicit frame. This function checks for them.
+//
+// Return Value:
+// Returns true if there are skipped frames.
+//
+// Notes:
+// x86 wants to stop at the skipped stack frames after the containing managed stack frame, but
+// WIN64 wants to stop before. I don't think x86 actually has any good reason for this, except
+// because it doesn't unwind one frame ahead of time like WIN64 does. This means that we don't
+// have the caller SP on x86.
+//
+
+BOOL StackFrameIterator::CheckForSkippedFrames(void)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ BOOL fHandleSkippedFrames = FALSE;
+ TADDR pvReferenceSP;
+
+ // Can the caller handle skipped frames;
+ fHandleSkippedFrames = (m_flags & HANDLESKIPPEDFRAMES);
+
+#if !(defined(_WIN64) || defined(_TARGET_ARM_))
+ pvReferenceSP = GetRegdisplaySP(m_crawl.pRD);
+#else // (defined(_WIN64) || defined(_TARGET_ARM_))
+ // Order the Frames relative to the caller SP of the methods
+ // this makes it so that any Frame that is in a managed call
+ // frame will be reported before its containing method.
+
+ // This should always succeed! If it doesn't, it's a bug somewhere else!
+ EECodeManager::EnsureCallerContextIsValid(m_crawl.pRD, m_crawl.GetStackwalkCacheEntry(), &m_cachedCodeInfo);
+ pvReferenceSP = GetSP(m_crawl.pRD->pCallerContext);
+#endif // !(defined(_WIN64) || defined(_TARGET_ARM_))
+
+ if ( !( (m_crawl.pFrame != FRAME_TOP) &&
+ (dac_cast<TADDR>(m_crawl.pFrame) < pvReferenceSP) )
+ )
+ {
+ return FALSE;
+ }
+
+ LOG((LF_GCROOTS, LL_EVERYTHING, "STACKWALK: CheckForSkippedFrames\n"));
+
+ // We might have skipped past some Frames.
+ // This happens with InlinedCallFrames and if we unwound
+ // out of a finally in managed code or for ContextTransitionFrames
+ // that are inserted into the managed call stack.
+ while ( (m_crawl.pFrame != FRAME_TOP) &&
+ (dac_cast<TADDR>(m_crawl.pFrame) < pvReferenceSP)
+ )
+ {
+ BOOL fReportInteropMD =
+ // If we see InlinedCallFrame in certain IL stubs, we should report the MD that
+ // was passed to the stub as its secret argument. This is the true interop MD.
+ // Note that code:InlinedCallFrame.GetFunction may return NULL in this case because
+ // the call is made using the CALLI instruction.
+ m_crawl.pFrame != FRAME_TOP &&
+ m_crawl.pFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr() &&
+ m_crawl.pFunc != NULL &&
+ m_crawl.pFunc->IsILStub() &&
+ m_crawl.pFunc->AsDynamicMethodDesc()->HasMDContextArg();
+
+ if (fHandleSkippedFrames
+#ifdef _TARGET_X86_
+ || // On x86 we have already reported the InlinedCallFrame, don't report it again.
+ (InlinedCallFrame::FrameHasActiveCall(m_crawl.pFrame) && !fReportInteropMD)
+#endif // _TARGET_X86_
+ )
+ {
+ m_crawl.GotoNextFrame();
+#ifdef STACKWALKER_MAY_POP_FRAMES
+ if (m_flags & POPFRAMES)
+ {
+ // When StackWalkFramesEx is originally called, we ensure
+ // that if POPFRAMES is set that the thread is in COOP mode
+ // and that running thread is walking itself. Thus, this
+ // COOP assertion is safe.
+ BEGIN_GCX_ASSERT_COOP;
+ m_crawl.pThread->SetFrame(m_crawl.pFrame);
+ END_GCX_ASSERT_COOP;
+ }
+#endif // STACKWALKER_MAY_POP_FRAMES
+ }
+ else
+ {
+ m_crawl.isFrameless = false;
+
+ if (fReportInteropMD)
+ {
+ m_crawl.pFunc = ((PTR_InlinedCallFrame)m_crawl.pFrame)->GetActualInteropMethodDesc();
+ _ASSERTE(m_crawl.pFunc != NULL);
+ _ASSERTE(m_crawl.pFunc->SanityCheck());
+ }
+ else
+ {
+ m_crawl.pFunc = m_crawl.pFrame->GetFunction();
+ }
+
+ INDEBUG(m_crawl.pThread->DebugLogStackWalkInfo(&m_crawl, "CONSIDER", m_uFramesProcessed));
+
+ m_frameState = SFITER_SKIPPED_FRAME_FUNCTION;
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+} // StackFrameIterator::CheckForSkippedFrames()
+
+//---------------------------------------------------------------------------------------
+//
+// Perform the necessary tasks before stopping at a managed stack frame. This is mostly validation work.
+//
+
+void StackFrameIterator::PreProcessingForManagedFrames(void)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+#if defined(_WIN64) || defined(_TARGET_ARM_)
+ if (m_pvResumableFrameTargetSP)
+ {
+ // We expect that if we saw a resumable frame, the next managed
+ // IP that we see will be the one the resumable frame took us to.
+
+ // However, because we might visit intervening explicit Frames
+ // that will clear the .isFirst flag, we need to set it back here.
+
+ CONSISTENCY_CHECK(m_crawl.pRD->IsCallerContextValid);
+ CONSISTENCY_CHECK((LPVOID)GetSP(m_crawl.pRD->pCallerContext) == m_pvResumableFrameTargetSP);
+ m_pvResumableFrameTargetSP = NULL;
+ m_crawl.isFirst = true;
+ }
+#endif // defined(_WIN64) || defined(_TARGET_ARM_)
+
+#if !defined(DACCESS_COMPILE)
+ m_pCachedGSCookie = (GSCookie*)m_crawl.GetCodeManager()->GetGSCookieAddr(
+ m_crawl.pRD,
+ &m_crawl.codeInfo,
+ &m_crawl.codeManState);
+#endif // !DACCESS_COMPILE
+
+ if (m_pCachedGSCookie)
+ {
+ m_crawl.SetCurGSCookie(m_pCachedGSCookie);
+ }
+
+ INDEBUG(m_crawl.pThread->DebugLogStackWalkInfo(&m_crawl, "CONSIDER", m_uFramesProcessed));
+
+#if defined(_DEBUG) && defined(_TARGET_X86_) && !defined(DACCESS_COMPILE)
+ // m_crawl.GetThisPointer() requires full unwind
+ // In GC's relocate phase, objects is not verifiable
+ if ( !(m_flags & (LIGHTUNWIND | QUICKUNWIND | ALLOW_INVALID_OBJECTS)) &&
+ m_crawl.pFunc->IsSynchronized() &&
+ !m_crawl.pFunc->IsStatic() &&
+ m_crawl.GetCodeManager()->IsInSynchronizedRegion(m_crawl.GetRelOffset(),
+ m_crawl.GetGCInfo(),
+ m_crawl.GetCodeManagerFlags()))
+ {
+ BEGIN_GCX_ASSERT_COOP;
+
+ OBJECTREF obj = m_crawl.GetThisPointer();
+
+ _ASSERTE(obj != NULL);
+ VALIDATEOBJECTREF(obj);
+
+ DWORD threadId = 0;
+ DWORD acquisitionCount = 0;
+ _ASSERTE(obj->GetThreadOwningMonitorLock(&threadId, &acquisitionCount) &&
+ (threadId == m_crawl.pThread->GetThreadId()));
+
+ END_GCX_ASSERT_COOP;
+ }
+#endif // _DEBUG && _TARGET_X86_ && !DACCESS_COMPILE
+
+ m_frameState = SFITER_FRAMELESS_METHOD;
+} // StackFrameIterator::PreProcessingForManagedFrames()
+
+//---------------------------------------------------------------------------------------
+//
+// Perform the necessary tasks after stopping at a managed stack frame and unwinding to its caller.
+// This includes advancing the ExInfo and checking whether the new IP is managed.
+//
+
+void StackFrameIterator::PostProcessingForManagedFrames(void)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+
+#if defined(ELIMINATE_FEF)
+ // As with frames, we may have unwound past a ExInfo.pContext. This
+ // can happen when unwinding from a handler that rethrew the exception.
+ // Skip any ExInfo.pContext records that may no longer be valid.
+ // If Frames would be unlinked from the Frame chain, also reset the UseExInfoForStackwalk bit
+ // on the ExInfo.
+ m_exInfoWalk.WalkToPosition(GetRegdisplaySP(m_crawl.pRD), (m_flags & POPFRAMES));
+#endif // ELIMINATE_FEF
+
+ ProcessIp(GetControlPC(m_crawl.pRD));
+
+ // if we have unwound to a native stack frame, stop and set the frame state accordingly
+ if (!m_crawl.isFrameless)
+ {
+ m_frameState = SFITER_NATIVE_MARKER_FRAME;
+ m_crawl.isNativeMarker = true;
+ }
+} // StackFrameIterator::PostProcessingForManagedFrames()
+
+//---------------------------------------------------------------------------------------
+//
+// Perform the necessary tasks after stopping at a no-frame transition. This includes loading
+// the CONTEXT stored in the ExInfo and updating the REGDISPLAY to the faulting managed stack frame.
+//
+
+void StackFrameIterator::PostProcessingForNoFrameTransition()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+#if defined(ELIMINATE_FEF)
+ PTR_CONTEXT pContext = m_exInfoWalk.GetContext();
+
+ // Get the JitManager for the managed address.
+ m_crawl.codeInfo.Init(GetIP(pContext), m_scanFlag);
+ _ASSERTE(m_crawl.codeInfo.IsValid());
+
+ STRESS_LOG4(LF_EH, LL_INFO100, "STACKWALK: resync from pContext(%p); pStartFrame(%p), \
+ cf.pFrame(%p), cf.SP(%p)\n",
+ dac_cast<TADDR>(pContext), dac_cast<TADDR>(m_pStartFrame), dac_cast<TADDR>(m_crawl.pFrame),
+ GetRegdisplaySP(m_crawl.pRD));
+
+ // Update the RegDisplay from the context info.
+ FillRegDisplay(m_crawl.pRD, pContext);
+
+ // Now we know where we are, and it's "frameless", aka managed.
+ m_crawl.isFrameless = true;
+
+ // Flags the same as from a FaultingExceptionFrame.
+ m_crawl.isInterrupted = 1;
+ m_crawl.hasFaulted = 1;
+ m_crawl.isIPadjusted = 0;
+
+#if defined(STACKWALKER_MAY_POP_FRAMES)
+ // If Frames would be unlinked from the Frame chain, also reset the UseExInfoForStackwalk bit
+ // on the ExInfo.
+ if (m_flags & POPFRAMES)
+ {
+ m_exInfoWalk.GetExInfo()->m_ExceptionFlags.ResetUseExInfoForStackwalk();
+ }
+#endif // STACKWALKER_MAY_POP_FRAMES
+
+ // Done with this ExInfo.
+ m_exInfoWalk.WalkOne();
+
+ m_crawl.isNoFrameTransition = false;
+ m_crawl.taNoFrameTransitionMarker = NULL;
+#endif // ELIMINATE_FEF
+} // StackFrameIterator::PostProcessingForNoFrameTransition()
+
+
+#if defined(_TARGET_AMD64_) && !defined(DACCESS_COMPILE)
+static CrstStatic g_StackwalkCacheLock; // Global StackwalkCache lock; only used on AMD64
+EXTERN_C void moveOWord(LPVOID src, LPVOID target);
+#endif // _TARGET_AMD64_
+
+/*
+ copies 64-bit *src to *target, atomically accessing the data
+ requires 64-bit alignment for atomic load/store
+*/
+inline static void atomicMoveCacheEntry(UINT64* src, UINT64* target)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef _TARGET_X86_
+ // the most negative value is used a sort of integer infinity
+ // value, so it have to be avoided
+ _ASSERTE(*src != 0x8000000000000000);
+ __asm
+ {
+ mov eax, src
+ fild qword ptr [eax]
+ mov eax, target
+ fistp qword ptr [eax]
+ }
+#elif defined(_TARGET_AMD64_) && !defined(DACCESS_COMPILE)
+ // On AMD64 there's no way to move 16 bytes atomically, so we need to take a lock before calling moveOWord().
+ CrstHolder ch(&g_StackwalkCacheLock);
+ moveOWord(src, target);
+#endif
+}
+
+/*
+============================================================
+Here is an implementation of StackwalkCache class, used to optimize performance
+of stack walking. Currently each CrawlFrame has a StackwalkCache member, which implements
+functionality for caching already walked methods (see Thread::StackWalkFramesEx).
+See class and corresponding types declaration at stackwalktypes.h
+We do use global cache g_StackwalkCache[] with InterlockCompareExchange, fitting
+each cache entry into 8 bytes.
+============================================================
+*/
+
+#ifndef DACCESS_COMPILE
+#define LOG_NUM_OF_CACHE_ENTRIES 10
+#else
+// Stack walk cache is disabled in DAC - save space
+#define LOG_NUM_OF_CACHE_ENTRIES 0
+#endif
+#define NUM_OF_CACHE_ENTRIES (1 << LOG_NUM_OF_CACHE_ENTRIES)
+
+static StackwalkCacheEntry g_StackwalkCache[NUM_OF_CACHE_ENTRIES] = {}; // Global StackwalkCache
+
+#ifdef DACCESS_COMPILE
+const BOOL StackwalkCache::s_Enabled = FALSE;
+#else
+BOOL StackwalkCache::s_Enabled = FALSE;
+
+/*
+ StackwalkCache class constructor.
+ Set "enable/disable optimization" flag according to registry key.
+*/
+StackwalkCache::StackwalkCache()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ ClearEntry();
+
+ static BOOL stackwalkCacheEnableChecked = FALSE;
+ if (!stackwalkCacheEnableChecked)
+ {
+ // We can enter this block on multiple threads because of racing.
+ // However, that is OK since this operation is idempotent
+
+ s_Enabled = ((g_pConfig->DisableStackwalkCache() == 0) &&
+ // disable cache if for some reason it is not aligned
+ IS_ALIGNED((void*)&g_StackwalkCache[0], STACKWALK_CACHE_ENTRY_ALIGN_BOUNDARY));
+ stackwalkCacheEnableChecked = TRUE;
+ }
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+// static
+void StackwalkCache::Init()
+{
+#if defined(_TARGET_AMD64_) && !defined(DACCESS_COMPILE)
+ g_StackwalkCacheLock.Init(CrstSecurityStackwalkCache, CRST_UNSAFE_ANYMODE);
+#endif // _TARGET_AMD64_
+}
+
+/*
+ Returns efficient hash table key based on provided IP.
+ CPU architecture dependent.
+*/
+inline unsigned StackwalkCache::GetKey(UINT_PTR IP)
+{
+ LIMITED_METHOD_CONTRACT;
+ return (unsigned)(((IP >> LOG_NUM_OF_CACHE_ENTRIES) ^ IP) & (NUM_OF_CACHE_ENTRIES-1));
+}
+
+/*
+ Looks into cache and returns StackwalkCache entry, if current IP is cached.
+ JIT team guarantees the same ESP offset for the same IPs for different call chains.
+*/
+BOOL StackwalkCache::Lookup(UINT_PTR IP)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+ _ASSERTE(Enabled());
+ _ASSERTE(IP);
+
+ unsigned hkey = GetKey(IP);
+ _ASSERTE(IS_ALIGNED((void*)&g_StackwalkCache[hkey], STACKWALK_CACHE_ENTRY_ALIGN_BOUNDARY));
+ // Don't care about m_CacheEntry access atomicity, since it's private to this
+ // stackwalk/thread
+ atomicMoveCacheEntry((UINT64*)&g_StackwalkCache[hkey], (UINT64*)&m_CacheEntry);
+
+#ifdef _DEBUG
+ if (IP != m_CacheEntry.IP)
+ {
+ ClearEntry();
+ }
+#endif
+
+ return (IP == m_CacheEntry.IP);
+#else // _TARGET_X86_
+ return FALSE;
+#endif // _TARGET_X86_
+}
+
+/*
+ Caches data provided for current IP.
+*/
+void StackwalkCache::Insert(StackwalkCacheEntry *pCacheEntry)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ _ASSERTE(Enabled());
+ _ASSERTE(pCacheEntry);
+
+ unsigned hkey = GetKey(pCacheEntry->IP);
+ _ASSERTE(IS_ALIGNED((void*)&g_StackwalkCache[hkey], STACKWALK_CACHE_ENTRY_ALIGN_BOUNDARY));
+ atomicMoveCacheEntry((UINT64*)pCacheEntry, (UINT64*)&g_StackwalkCache[hkey]);
+}
+
+// static
+void StackwalkCache::Invalidate(LoaderAllocator * pLoaderAllocator)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ if (!s_Enabled)
+ return;
+
+ /* Note that we could just flush the entries corresponding to
+ pDomain if we wanted to get fancy. To keep things simple for now,
+ we just invalidate everything
+ */
+
+ ZeroMemory(PVOID(&g_StackwalkCache), sizeof(g_StackwalkCache));
+}
+
+//----------------------------------------------------------------------------
+//
+// SetUpRegdisplayForStackWalk - set up Regdisplay for a stack walk
+//
+// Arguments:
+// pThread - pointer to the managed thread to be crawled
+// pContext - pointer to the context
+// pRegdisplay - pointer to the REGDISPLAY to be filled
+//
+// Return Value:
+// None
+//
+//----------------------------------------------------------------------------
+void SetUpRegdisplayForStackWalk(Thread * pThread, T_CONTEXT * pContext, REGDISPLAY * pRegdisplay)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ // @dbgtodo filter CONTEXT- The filter CONTEXT will be removed in V3.0.
+ T_CONTEXT * pFilterContext = pThread->GetFilterContext();
+ _ASSERTE(!(pFilterContext && ISREDIRECTEDTHREAD(pThread)));
+
+ if (pFilterContext != NULL)
+ {
+ FillRegDisplay(pRegdisplay, pFilterContext);
+ }
+ else
+ {
+ ZeroMemory(pContext, sizeof(*pContext));
+ FillRegDisplay(pRegdisplay, pContext);
+
+ if (ISREDIRECTEDTHREAD(pThread))
+ {
+ pThread->GetFrame()->UpdateRegDisplay(pRegdisplay);
+ }
+ }
+}
diff --git a/src/vm/stackwalk.h b/src/vm/stackwalk.h
new file mode 100644
index 0000000000..fb45082596
--- /dev/null
+++ b/src/vm/stackwalk.h
@@ -0,0 +1,697 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+/* This is a poor man's implementation of virtual methods. */
+/* The purpose of pCrawlFrame is to abstract (at least for the most common cases
+ from the fact that not all methods are "framed" (basically all methods in
+ "native" code are "unframed"). That way the job for the enumerator callbacks
+ becomes much simpler (i.e. more transparent and hopefully less error prone).
+ Two call-backs still need to distinguish between the two types: GC and exception.
+ Both of these call-backs need to do really different things; for frameless methods
+ they need to go through the codemanager and use the resp. apis.
+
+ The reason for not implementing virtual methods on crawlFrame is solely because of
+ the way exception handling is implemented (it does a "long jump" and bypasses
+ the enumerator (stackWalker) when it finds a matching frame. By doing so couldn't
+ properly destruct the dynamically created instance of CrawlFrame.
+*/
+
+#ifndef __stackwalk_h__
+#define __stackwalk_h__
+
+#include "eetwain.h"
+#include "stackwalktypes.h"
+
+class Frame;
+class CrawlFrame;
+class ICodeManager;
+class IJitManager;
+struct EE_ILEXCEPTION;
+class AppDomain;
+
+// This define controls handling of faults in managed code. If it is defined,
+// the exception is handled (retried, actually), with a FaultingExceptionFrame
+// on the stack. The FEF is used for unwinding. If not defined, the unwinding
+// uses the exception context.
+#define USE_FEF // to mark where code needs to be changed to eliminate the FEF
+#if defined(_TARGET_X86_)
+ #undef USE_FEF // Turn off the FEF use on x86.
+ #define ELIMINATE_FEF
+#else
+ #if defined(ELIMINATE_FEF)
+ #undef ELIMINATE_FEF
+ #endif
+#endif // _86_
+
+//************************************************************************
+// Enumerate all functions.
+//************************************************************************
+
+/* This enumerator is meant to be used for the most common cases, i.e. to
+ enumerate just all the functions of the requested thread. It is just a
+ cover for the "real" enumerator.
+ */
+
+StackWalkAction StackWalkFunctions(Thread * thread, PSTACKWALKFRAMESCALLBACK pCallback, VOID * pData);
+
+/*<TODO>@ISSUE: Maybe use a define instead?</TODO>
+#define StackWalkFunctions(thread, callBack, userdata) thread->StackWalkFrames(METHODSONLY, (callBack),(userData))
+*/
+
+
+class CrawlFrame
+{
+public:
+
+#ifdef _TARGET_X86_
+ friend StackWalkAction TAStackCrawlCallBack(CrawlFrame* pCf, void* data);
+#endif // _TARGET_X86_
+
+ //************************************************************************
+ // Functions available for the callbacks (using the current pCrawlFrame)
+ //************************************************************************
+
+ /* Widely used/benign functions */
+
+ /* Is this a function? */
+ /* Returns either a MethodDesc* or NULL for "non-function" frames */
+ //<TODO>@TODO: what will it return for transition frames?</TODO>
+
+#ifdef FEATURE_INTERPRETER
+ MethodDesc *GetFunction();
+#else // FEATURE_INTERPRETER
+ inline MethodDesc *GetFunction()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return pFunc;
+ }
+#endif
+
+
+ Assembly *GetAssembly();
+
+ /* Returns either a Frame * (for "framed items) or
+ Returns NULL for frameless functions
+ */
+ inline Frame* GetFrame() // will return NULL for "frameless methods"
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (isFrameless)
+ return NULL;
+ else
+ return pFrame;
+ }
+
+ BOOL IsInCalleesFrames(LPVOID stackPointer);
+
+ /* Returns address of the securityobject stored in the current function (method?)
+ Returns NULL if
+ - not a function OR
+ - function (method?) hasn't reserved any room for it
+ (which is an error)
+ */
+ OBJECTREF * GetAddrOfSecurityObject();
+
+ // Fetch the extra type argument passed in some cases
+ PTR_VOID GetParamTypeArg();
+
+ /* Returns the "this" pointer of the method of the current frame -- at least in some cases.
+ Returns NULL if the current frame does not have a method, or that method is not an instance method of a class type.
+ Otherwise, the semantics currently depend, unfortunately, on the architecture. On non-x86 architectures,
+ should only be called for methods where the generic instantiation context is found via the this pointer (so that
+ this information will be encoded in the GC Info). On x86, can be called for this case, or if the method
+ is synchronized.
+ */
+ OBJECTREF GetThisPointer();
+
+ /*
+ Returns ambient Stack pointer for this crawlframe.
+ Must be a frameless method.
+ Returns NULL if not available (includes prolog + epilog).
+ This is safe to call on all methods, but it may return
+ garbage if the method does not have an ambient SP (eg, ebp-based methods).
+ x86 is the only platform using ambient SP.
+ */
+ TADDR GetAmbientSPFromCrawlFrame();
+
+ void GetExactGenericInstantiations(Instantiation *pClassInst,
+ Instantiation *pMethodInst);
+
+ /* Returns extra information required to reconstruct exact generic parameters,
+ if any.
+ Returns NULL if
+ - no extra information is required (i.e. the code is non-shared, which
+ you can tell from the MethodDesc)
+ - the extra information is not available (i.e. optimized away or codegen problem)
+ Returns a MethodTable if the pMD returned by GetFunction satisfies RequiresInstMethodTableArg,
+ and returns a MethodDesc if the pMD returned by GetFunction satisfies RequiresInstMethodDescArg.
+ These together carry the exact instantiation information.
+ */
+ PTR_VOID GetExactGenericArgsToken();
+
+ inline CodeManState * GetCodeManState() { LIMITED_METHOD_DAC_CONTRACT; return & codeManState; }
+ /*
+ IF YOU USE ANY OF THE SUBSEEQUENT FUNCTIONS, YOU NEED TO REALLY UNDERSTAND THE
+ STACK-WALKER (INCLUDING UNWINDING OF METHODS IN MANAGED NATIVE CODE)!
+ YOU ALSO NEED TO UNDERSTAND THE THESE FUNCTIONS MIGHT CHANGE ON A AS-NEED BASIS.
+ */
+
+ /* The rest are meant to be used only by the exception catcher and the GC call-back */
+
+ /* Is currently a frame available? */
+ /* conceptually returns (GetFrame(pCrawlFrame) == NULL)
+ */
+ inline bool IsFrameless()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return isFrameless;
+ }
+
+
+ /* Is it the current active (top-most) frame
+ */
+ inline bool IsActiveFrame()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return isFirst;
+ }
+
+ /* Is it the current active function (top-most frame)
+ asserts for non-functions, should be used for managed native code only
+ */
+ inline bool IsActiveFunc()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (pFunc && isFirst);
+ }
+
+ /* Is it the current active function (top-most frame)
+ which faulted or threw an exception ?
+ asserts for non-functions, should be used for managed native code only
+ */
+ bool IsInterrupted()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (pFunc && isInterrupted /* && isFrameless?? */);
+ }
+
+ /* Is it the current active function (top-most frame) which faulted ?
+ asserts for non-functions, should be used for managed native code only
+ */
+ bool HasFaulted()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (pFunc && hasFaulted /* && isFrameless?? */);
+ }
+
+ /* Is this CrawlFrame just marking that we're in native code?
+ Such frames are only provided when the stackwalk is inited w/ NOTIFY_ON_U2M_TRANSITIONS.
+ The only use of these crawlframes is to get the Regdisplay.
+ */
+ bool IsNativeMarker()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return isNativeMarker;
+ }
+
+ /* x86 does not always push a FaultingExceptionFrame on the stack when there is a native exception
+ (e.g. a breakpoint). In this case, it relies on the CONTEXT stored on the ExInfo to resume
+ the stackwalk at the managed stack frame which has faulted.
+
+ This flag is set when the stackwalker is stopped at such a no-explicit-frame transition. Conceptually
+ this is just like stopping at a transition frame. Note that the stackwalker only stops at no-frame
+ transition if NOTIFY_ON_NO_FRAME_TRANSITIONS is set.
+ */
+ bool IsNoFrameTransition()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return isNoFrameTransition;
+ }
+
+ // A no-frame transition is one protected by an ExInfo. It's an optimization on x86 to avoid pushing a
+ // FaultingExceptionFrame (FEF). Thus, for all intents and purposes, we should treat a no-frame
+ // transition as a FEF. This function returns a stack address for the no-frame transition to substitute
+ // as the frame address of a FEF. It's currently only used by the debugger stackwalker.
+ TADDR GetNoFrameTransitionMarker()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (isNoFrameTransition ? taNoFrameTransitionMarker : NULL);
+ }
+
+ /* Has the IP been adjusted to a point where it is safe to do GC ?
+ (for OutOfLineThrownExceptionFrame)
+ asserts for non-functions, should be used for managed native code only
+ */
+ bool IsIPadjusted()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (pFunc && isIPadjusted /* && isFrameless?? */);
+ }
+
+ /* Gets the ICodeMangerFlags for the current frame */
+
+ unsigned GetCodeManagerFlags()
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ unsigned flags = 0;
+
+ if (IsActiveFunc())
+ flags |= ActiveStackFrame;
+
+ if (IsInterrupted())
+ {
+ flags |= ExecutionAborted;
+
+ if (!HasFaulted() && !IsIPadjusted())
+ {
+ _ASSERTE(!(flags & ActiveStackFrame));
+ flags |= AbortingCall;
+ }
+ }
+
+#if defined(WIN64EXCEPTIONS)
+ if (ShouldParentToFuncletSkipReportingGCReferences())
+ {
+ flags |= ParentOfFuncletStackFrame;
+ }
+#endif // defined(WIN64EXCEPTIONS)
+
+ return flags;
+ }
+
+ AppDomain *GetAppDomain()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return pAppDomain;
+ }
+
+ /* Is this frame at a safe spot for GC?
+ */
+ bool IsGcSafe();
+
+
+ PREGDISPLAY GetRegisterSet()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // We would like to make the following assertion, but it is legitimately
+ // violated when we perform a crawl to find the return address for a hijack.
+ // _ASSERTE(isFrameless);
+ return pRD;
+ }
+
+ EECodeInfo * GetCodeInfo()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // This assumes that CrawlFrame is host-only structure with DACCESS_COMPILE
+ // and thus it always returns the host address.
+ return &codeInfo;
+ }
+
+ PTR_VOID GetGCInfo()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(isFrameless);
+ return codeInfo.GetGCInfo();
+ }
+
+ const METHODTOKEN& GetMethodToken()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(isFrameless);
+ return codeInfo.GetMethodToken();
+ }
+
+ unsigned GetRelOffset()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(isFrameless);
+ return codeInfo.GetRelOffset();
+ }
+
+ IJitManager* GetJitManager()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(isFrameless);
+ return codeInfo.GetJitManager();
+ }
+
+ ICodeManager* GetCodeManager()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(isFrameless);
+ return codeInfo.GetCodeManager();
+ }
+
+ inline StackwalkCacheEntry* GetStackwalkCacheEntry()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE (isCachedMethod != stackWalkCache.IsEmpty());
+ if (isCachedMethod && stackWalkCache.m_CacheEntry.IsSafeToUseCache())
+ {
+ return &(stackWalkCache.m_CacheEntry);
+ }
+ else
+ {
+ return NULL;
+ }
+ }
+
+ void CheckGSCookies();
+
+#if defined(WIN64EXCEPTIONS)
+ bool IsFunclet()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (!IsFrameless())
+ return false;
+
+ return !!codeInfo.IsFunclet();
+ }
+
+ bool IsFilterFunclet();
+
+ // Indicates if the funclet has already reported GC
+ // references (or not). This will return true if
+ // we come across the parent frame of a funclet
+ // that is active on the stack.
+ bool ShouldParentToFuncletSkipReportingGCReferences()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return fShouldParentToFuncletSkipReportingGCReferences;
+ }
+
+ bool ShouldCrawlframeReportGCReferences()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return fShouldCrawlframeReportGCReferences;
+ }
+
+#endif // WIN64EXCEPTIONS
+
+protected:
+ // CrawlFrames are temporarily created by the enumerator.
+ // Do not create one from C++. This protected constructor polices this rule.
+ CrawlFrame();
+
+ void SetCurGSCookie(GSCookie * pGSCookie);
+
+private:
+
+ friend class Thread;
+ friend class EECodeManager;
+ friend class StackFrameIterator;
+#ifdef WIN64EXCEPTIONS
+ friend class ExceptionTracker;
+#endif // WIN64EXCEPTIONS
+
+ CodeManState codeManState;
+
+ bool isFrameless;
+ bool isFirst;
+
+ // The next three fields are only valid for managed stack frames. They are set using attributes
+ // on explicit frames, and they are reset after processing each managed stack frame.
+ bool isInterrupted;
+ bool hasFaulted;
+ bool isIPadjusted;
+
+ bool isNativeMarker;
+ bool isProfilerDoStackSnapshot;
+ bool isNoFrameTransition;
+ TADDR taNoFrameTransitionMarker; // see code:CrawlFrame.GetNoFrameTransitionMarker
+ PTR_Frame pFrame;
+ MethodDesc *pFunc;
+
+ // the rest is only used for "frameless methods"
+ AppDomain *pAppDomain;
+ PREGDISPLAY pRD; // "thread context"/"virtual register set"
+
+ EECodeInfo codeInfo;
+#if defined(WIN64EXCEPTIONS)
+ bool isFilterFunclet;
+ bool isFilterFuncletCached;
+ bool fShouldParentToFuncletSkipReportingGCReferences;
+ bool fShouldCrawlframeReportGCReferences;
+#endif //WIN64EXCEPTIONS
+ Thread* pThread;
+
+ // fields used for stackwalk cache
+ OBJECTREF *pSecurityObject;
+ BOOL isCachedMethod;
+ StackwalkCache stackWalkCache;
+
+ GSCookie *pCurGSCookie;
+ GSCookie *pFirstGSCookie;
+
+ friend class Frame; // added to allow 'friend void CrawlFrame::GotoNextFrame();' declaration in class Frame, frames.h
+ void GotoNextFrame();
+};
+
+void GcEnumObject(LPVOID pData, OBJECTREF *pObj);
+StackWalkAction GcStackCrawlCallBack(CrawlFrame* pCF, VOID* pData);
+
+#if defined(ELIMINATE_FEF)
+//******************************************************************************
+// This class is used to help use exception context records to resync a
+// stackwalk, when managed code has generated an exception (eg, AV, zerodiv.,,)
+// Such an exception causes a transition from the managed code into unmanaged
+// OS and runtime code, but without the benefit of any Frame. This code helps
+// the stackwalker simulate the effect that such a frame would have.
+// In particular, this class has methods to walk the chain of ExInfos, looking
+// for records with pContext pointers with certain characteristics. The
+// characteristics that are important are the location in the stack (ie, is a
+// given pContext relevant at a particular point in the stack walk), and
+// whether the pContext was generated in managed code.
+//******************************************************************************
+class ExInfoWalker
+{
+public:
+ ExInfoWalker() : m_pExInfo(0) { SUPPORTS_DAC; }
+ void Init (ExInfo *pExInfo) { SUPPORTS_DAC; m_pExInfo = pExInfo; }
+ // Skip one ExInfo.
+ void WalkOne();
+ // Attempt to find an ExInfo with a pContext that is higher (older) than
+ // a given minimum location.
+ void WalkToPosition(TADDR taMinimum, BOOL bPopFrames);
+ // Attempt to find an ExInfo with a pContext that has an IP in managed code.
+ void WalkToManaged();
+ // Return current ExInfo's m_pContext, or NULL if no m_pExInfo.
+ PTR_CONTEXT GetContext() { SUPPORTS_DAC; return m_pExInfo ? m_pExInfo->m_pContext : NULL; }
+ // Useful to see if there is more on the ExInfo chain.
+ ExInfo* GetExInfo() { SUPPORTS_DAC; return m_pExInfo; }
+
+ // helper functions for retrieving information from the exception CONTEXT
+ TADDR GetSPFromContext()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return dac_cast<TADDR>((m_pExInfo && m_pExInfo->m_pContext) ? GetSP(m_pExInfo->m_pContext) : PTR_NULL);
+ }
+
+ TADDR GetEBPFromContext()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return dac_cast<TADDR>((m_pExInfo && m_pExInfo->m_pContext) ? GetFP(m_pExInfo->m_pContext) : PTR_NULL);
+ }
+
+ DWORD GetFault() { SUPPORTS_DAC; return m_pExInfo ? m_pExInfo->m_pExceptionRecord->ExceptionCode : 0; }
+
+private:
+ ExInfo *m_pExInfo;
+}; // class ExInfoWalker
+#endif // ELIMINATE_FEF
+
+
+//---------------------------------------------------------------------------------------
+//
+// This iterator class walks the stack of a managed thread. Where the iterator stops depends on the
+// stackwalk flags.
+//
+// Notes:
+// This class works both in-process and out-of-process (e.g. DAC).
+//
+
+class StackFrameIterator
+{
+public:
+ // This constructor is for the usage pattern of creating an uninitialized StackFrameIterator and then
+ // calling Init() on it.
+ StackFrameIterator(void);
+
+ // This constructor is for the usage pattern of creating an initialized StackFrameIterator and then
+ // calling ResetRegDisp() on it.
+ StackFrameIterator(Thread * pThread, PTR_Frame pFrame, ULONG32 flags);
+
+ //
+ // We should consider merging Init() and ResetRegDisp().
+ //
+
+ // Initialize the iterator. Note that the iterator has thread-affinity,
+ // and the stackwalk flags cannot be changed once the iterator is created.
+ BOOL Init(Thread * pThread,
+ PTR_Frame pFrame,
+ PREGDISPLAY pRegDisp,
+ ULONG32 flags);
+
+ // Reset the iterator to the specified REGDISPLAY. The caller must ensure that the REGDISPLAY is valid.
+ BOOL ResetRegDisp(PREGDISPLAY pRegDisp,
+ bool fIsFirst);
+
+ // @dbgtodo inspection - This function should be removed once the Windows debuggers stop using the old DAC API.
+ void SetIsFirstFrame(bool isFirst)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_crawl.isFirst = isFirst;
+ }
+
+ // whether the iterator has reached the root of the stack or not
+ BOOL IsValid(void);
+
+ // advance to the next frame according to the stackwalk flags
+ StackWalkAction Next(void);
+
+ enum FrameState
+ {
+ SFITER_UNINITIALIZED, // uninitialized
+ SFITER_FRAMELESS_METHOD, // managed stack frame
+ SFITER_FRAME_FUNCTION, // explicit frame
+ SFITER_SKIPPED_FRAME_FUNCTION, // skipped explicit frame
+ SFITER_NO_FRAME_TRANSITION, // no-frame transition (currently used for ExInfo only)
+ SFITER_NATIVE_MARKER_FRAME, // the native stack frame immediately below (stack grows up)
+ // a managed stack region
+ SFITER_INITIAL_NATIVE_CONTEXT, // initial native seed CONTEXT
+ SFITER_DONE, // the iterator has reached the end of the stack
+ };
+ FrameState GetFrameState() {LIMITED_METHOD_DAC_CONTRACT; return m_frameState;}
+
+ CrawlFrame m_crawl;
+
+#if defined(_DEBUG)
+ // used in logging
+ UINT32 m_uFramesProcessed;
+#endif // _DEBUG
+
+private:
+ // This is a helper for the two constructors.
+ void CommonCtor(Thread * pThread, PTR_Frame pFrame, ULONG32 flags);
+
+ // Reset the CrawlFrame owned by the iterator. Used by both Init() and ResetRegDisp().
+ void ResetCrawlFrame(void);
+
+ // Check whether we should stop at the current frame given the stackwalk flags.
+ // If not, continue advancing to the next frame.
+ StackWalkAction Filter(void);
+
+ // Advance to the next frame regardless of the stackwalk flags. This is used by Next() and Filter().
+ StackWalkAction NextRaw(void);
+
+ // sync the REGDISPLAY to the current CONTEXT
+ void UpdateRegDisp(void);
+
+ // Check whether the IP is managed code. This function updates the following fields on CrawlFrame:
+ // JitManagerInstance and isFrameless.
+ void ProcessIp(PCODE Ip);
+
+ // Update the CrawlFrame to represent where we have stopped.
+ // This is called after advancing to a new frame.
+ void ProcessCurrentFrame(void);
+
+ // If an explicit frame is allocated in a managed stack frame (e.g. an inlined pinvoke call),
+ // we may have skipped an explicit frame. This function checks for them.
+ BOOL CheckForSkippedFrames(void);
+
+ // Perform the necessary tasks before stopping at a managed stack frame. This is mostly validation work.
+ void PreProcessingForManagedFrames(void);
+
+ // Perform the necessary tasks after stopping at a managed stack frame and unwinding to its caller.
+ // This includes advancing the ExInfo and checking whether the new IP is managed.
+ void PostProcessingForManagedFrames(void);
+
+ // Perform the necessary tasks after stopping at a no-frame transition. This includes loading
+ // the CONTEXT stored in the ExInfo and updating the REGDISPLAY to the faulting managed stack frame.
+ void PostProcessingForNoFrameTransition(void);
+
+#if defined(WIN64EXCEPTIONS)
+ void ResetGCRefReportingState(bool ResetOnlyIntermediaryState = false)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (!ResetOnlyIntermediaryState)
+ {
+ m_sfFuncletParent = StackFrame();
+ m_fProcessNonFilterFunclet = false;
+ }
+
+ m_sfIntermediaryFuncletParent = StackFrame();
+ m_fProcessIntermediaryNonFilterFunclet = false;
+ }
+#endif // defined(WIN64EXCEPTIONS)
+
+ // Iteration state.
+ FrameState m_frameState;
+
+ // Initial state. Must be preserved for restarting.
+ Thread * m_pThread; // Thread on which to walk.
+
+ PTR_Frame m_pStartFrame; // Frame* passed to Init
+
+ // This is the real starting explicit frame. If m_pStartFrame is NULL,
+ // then this is equal to m_pThread->GetFrame(). Otherwise this is equal to m_pStartFrame.
+ INDEBUG(PTR_Frame m_pRealStartFrame);
+
+ ULONG32 m_flags; // StackWalkFrames flags.
+ ICodeManagerFlags m_codeManFlags;
+ ExecutionManager::ScanFlag m_scanFlag;
+
+ // the following fields are used to cache information about a managed stack frame
+ // when we need to stop for skipped explicit frames
+ EECodeInfo m_cachedCodeInfo;
+ PTR_VOID m_pCachedGCInfo;
+
+ GSCookie * m_pCachedGSCookie;
+
+#if defined(ELIMINATE_FEF)
+ ExInfoWalker m_exInfoWalk;
+#endif // ELIMINATE_FEF
+
+#if defined(WIN64EXCEPTIONS)
+ // used in funclet-skipping
+ StackFrame m_sfParent;
+
+ // Used in GC reference enumeration mode
+ StackFrame m_sfFuncletParent;
+ bool m_fProcessNonFilterFunclet;
+ StackFrame m_sfIntermediaryFuncletParent;
+ bool m_fProcessIntermediaryNonFilterFunclet;
+ bool m_fDidFuncletReportGCReferences;
+#endif // WIN64EXCEPTIONS
+
+#if defined(_WIN64) || defined(_TARGET_ARM_)
+ LPVOID m_pvResumableFrameTargetSP;
+#endif // defined(_WIN64) || defined(_TARGET_ARM_)
+};
+
+void SetUpRegdisplayForStackWalk(Thread * pThread, T_CONTEXT * pContext, REGDISPLAY * pRegdisplay);
+
+#endif
diff --git a/src/vm/stackwalktypes.h b/src/vm/stackwalktypes.h
new file mode 100644
index 0000000000..f2be145c81
--- /dev/null
+++ b/src/vm/stackwalktypes.h
@@ -0,0 +1,244 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ============================================================================
+// File: stackwalktypes.h
+//
+
+// ============================================================================
+// Contains types used by stackwalk.h.
+
+
+#ifndef __STACKWALKTYPES_H__
+#define __STACKWALKTYPES_H__
+
+class CrawlFrame;
+struct RangeSection;
+struct StackwalkCacheEntry;
+
+//
+// This type should be used internally inside the code manager only. EECodeInfo should
+// be used in general code instead. Ideally, we would replace all uses of METHODTOKEN
+// with EECodeInfo.
+//
+struct METHODTOKEN
+{
+ METHODTOKEN(RangeSection * pRangeSection, TADDR pCodeHeader)
+ : m_pRangeSection(pRangeSection), m_pCodeHeader(pCodeHeader)
+ {
+ }
+
+ METHODTOKEN()
+ {
+ }
+
+ // Cache of RangeSection containing the code to avoid redundant lookups.
+ RangeSection * m_pRangeSection;
+
+ // CodeHeader* for EEJitManager
+ // PTR_RUNTIME_FUNCTION for managed native code
+ TADDR m_pCodeHeader;
+
+ BOOL IsNull() const
+ {
+ return m_pCodeHeader == NULL;
+ }
+};
+
+//************************************************************************
+// Stack walking
+//************************************************************************
+enum StackCrawlMark
+{
+ LookForMe = 0,
+ LookForMyCaller = 1,
+ LookForMyCallersCaller = 2,
+ LookForThread = 3
+};
+
+enum StackWalkAction
+{
+ SWA_CONTINUE = 0, // continue walking
+ SWA_ABORT = 1, // stop walking, early out in "failure case"
+ SWA_FAILED = 2 // couldn't walk stack
+};
+
+#define SWA_DONE SWA_CONTINUE
+
+
+// Pointer to the StackWalk callback function.
+typedef StackWalkAction (*PSTACKWALKFRAMESCALLBACK)(
+ CrawlFrame *pCF, //
+ VOID* pData // Caller's private data
+
+);
+
+/******************************************************************************
+ StackwalkCache: new class implements stackwalk perf optimization features.
+ StackwalkCacheEntry array: very simple per thread hash table, keeping cached data.
+ StackwalkCacheUnwindInfo: used by EECodeManager::UnwindStackFrame to return
+ stackwalk cache flags.
+ Cf. Ilyakoz for any questions.
+*/
+
+struct StackwalkCacheUnwindInfo
+{
+#if defined(_TARGET_AMD64_)
+ ULONG RBPOffset;
+ ULONG RSPOffsetFromUnwindInfo;
+#else // !_TARGET_AMD64_
+ size_t securityObjectOffset; // offset of SecurityObject. 0 if there is no security object
+ BOOL fUseEbp; // Is EBP modified by the method - either for a frame-pointer or for a scratch-register?
+ BOOL fUseEbpAsFrameReg; // use EBP as the frame pointer?
+#endif // !_TARGET_AMD64_
+
+ inline StackwalkCacheUnwindInfo() { SUPPORTS_DAC; ZeroMemory(this, sizeof(StackwalkCacheUnwindInfo)); }
+ StackwalkCacheUnwindInfo(StackwalkCacheEntry * pCacheEntry);
+};
+
+//************************************************************************
+
+#if defined(_WIN64)
+ #define STACKWALK_CACHE_ENTRY_ALIGN_BOUNDARY 0x10
+#else // !_WIN64
+ #define STACKWALK_CACHE_ENTRY_ALIGN_BOUNDARY 0x8
+#endif // !_WIN64
+
+DECLSPEC_ALIGN(STACKWALK_CACHE_ENTRY_ALIGN_BOUNDARY)
+struct StackwalkCacheEntry
+{
+ //
+ // don't rearrange the fields, so that invalid value 0x8000000000000000 will never appear
+ // as StackwalkCacheEntry, it's required for atomicMOVQ using FILD/FISTP instructions
+ //
+ UINT_PTR IP;
+#if !defined(_TARGET_AMD64_)
+ WORD ESPOffset:15; // stack offset (frame size + pending arguments + etc)
+ WORD securityObjectOffset:3;// offset of SecurityObject. 0 if there is no security object
+ WORD fUseEbp:1; // For ESP methods, is EBP touched at all?
+ WORD fUseEbpAsFrameReg:1; // use EBP as the frame register?
+ WORD argSize:11; // size of args pushed on stack
+#else // _TARGET_AMD64_
+ DWORD RSPOffset;
+ DWORD RBPOffset;
+#endif // _TARGET_AMD64_
+
+ inline BOOL Init(UINT_PTR IP,
+ UINT_PTR SPOffset,
+ StackwalkCacheUnwindInfo *pUnwindInfo,
+ UINT_PTR argSize)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ this->IP = IP;
+
+#if defined(_TARGET_X86_)
+ this->ESPOffset = SPOffset;
+ this->argSize = argSize;
+
+ this->securityObjectOffset = (WORD)pUnwindInfo->securityObjectOffset;
+ _ASSERTE(this->securityObjectOffset == pUnwindInfo->securityObjectOffset);
+
+ this->fUseEbp = pUnwindInfo->fUseEbp;
+ this->fUseEbpAsFrameReg = pUnwindInfo->fUseEbpAsFrameReg;
+ _ASSERTE(!fUseEbpAsFrameReg || fUseEbp);
+
+ // return success if we fit SPOffset and argSize into
+ return ((this->ESPOffset == SPOffset) &&
+ (this->argSize == argSize));
+#elif defined(_TARGET_AMD64_)
+ // The size of a stack frame is guaranteed to fit in 4 bytes, so we don't need to check RSPOffset and RBPOffset.
+
+ // The actual SP offset may be bigger than the offset we get from the unwind info because of stack allocations.
+ _ASSERTE(SPOffset >= pUnwindInfo->RSPOffsetFromUnwindInfo);
+
+ _ASSERTE(FitsIn<DWORD>(SPOffset));
+ this->RSPOffset = static_cast<DWORD>(SPOffset);
+ _ASSERTE(FitsIn<DWORD>(pUnwindInfo->RBPOffset + (SPOffset - pUnwindInfo->RSPOffsetFromUnwindInfo)));
+ this->RBPOffset = static_cast<DWORD>(pUnwindInfo->RBPOffset + (SPOffset - pUnwindInfo->RSPOffsetFromUnwindInfo));
+ return TRUE;
+#else // !_TARGET_X86_ && !_TARGET_AMD64_
+ return FALSE;
+#endif // !_TARGET_X86_ && !_TARGET_AMD64_
+ }
+
+ inline BOOL HasSecurityObject()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+#if defined(_TARGET_X86_)
+ return securityObjectOffset != 0;
+#else // !_TARGET_X86_
+ // On AMD64 we don't save anything by grabbing the security object before it is needed. This is because
+ // we need to crack the GC info in order to find the security object, and to unwind we only need to
+ // crack the unwind info.
+ return FALSE;
+#endif // !_TARGET_X86_
+ }
+
+ inline BOOL IsSafeToUseCache()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+#if defined(_TARGET_X86_)
+ return (!fUseEbp || fUseEbpAsFrameReg);
+#elif defined(_TARGET_AMD64_)
+ return TRUE;
+#else // !_TARGET_X86_ && !_TARGET_AMD64_
+ return FALSE;
+#endif // !_TARGET_X86_ && !_TARGET_AMD64_
+ }
+};
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+static_assert_no_msg(sizeof(StackwalkCacheEntry) == 2 * sizeof(UINT_PTR));
+#endif // _TARGET_X86_ || _TARGET_AMD64_
+
+//************************************************************************
+
+class StackwalkCache
+{
+ public:
+ BOOL Lookup(UINT_PTR IP);
+ void Insert(StackwalkCacheEntry *pCacheEntry);
+ inline void ClearEntry () { LIMITED_METHOD_DAC_CONTRACT; m_CacheEntry.IP = 0; }
+ inline BOOL Enabled() { LIMITED_METHOD_DAC_CONTRACT; return s_Enabled; };
+ inline BOOL IsEmpty () { LIMITED_METHOD_CONTRACT; return m_CacheEntry.IP == 0; }
+
+#ifndef DACCESS_COMPILE
+ StackwalkCache();
+#endif
+ static void Init();
+
+ StackwalkCacheEntry m_CacheEntry; // local copy of Global Cache entry for current IP
+
+ static void Invalidate(LoaderAllocator * pLoaderAllocator);
+
+ private:
+ unsigned GetKey(UINT_PTR IP);
+
+#ifdef DACCESS_COMPILE
+ // DAC can't rely on the cache here
+ const static BOOL s_Enabled;
+#else
+ static BOOL s_Enabled;
+#endif
+};
+
+//************************************************************************
+
+inline StackwalkCacheUnwindInfo::StackwalkCacheUnwindInfo(StackwalkCacheEntry * pCacheEntry)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#if defined(_TARGET_AMD64_)
+ RBPOffset = pCacheEntry->RBPOffset;
+#else // !_TARGET_AMD64_
+ securityObjectOffset = pCacheEntry->securityObjectOffset;
+ fUseEbp = pCacheEntry->fUseEbp;
+ fUseEbpAsFrameReg = pCacheEntry->fUseEbpAsFrameReg;
+#endif // !_TARGET_AMD64_
+}
+
+#endif // __STACKWALKTYPES_H__
diff --git a/src/vm/staticallocationhelpers.inl b/src/vm/staticallocationhelpers.inl
new file mode 100644
index 0000000000..73b389e2a2
--- /dev/null
+++ b/src/vm/staticallocationhelpers.inl
@@ -0,0 +1,240 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// StaticAllocationHelpers.inl -
+//
+
+//
+// Helpers used to determine static offset allocation. Placed into an inl file so as to be shareable between
+// mdilbind and the vm codebases.
+//
+//
+#ifndef StaticAllocationHelpers_INL
+#define StaticAllocationHelpers_INL
+
+// Will return underlying type if it's an enum
+// ELEMENT_TYPE_VALUETYPE if it is a non enum
+// ELEMENT_TYPE_END if it doesn't know (we may not want to load other assemblies)
+#ifdef CLR_STANDALONE_BINDER
+static CorElementType ParseMetadataForStaticsIsValueTypeEnum(MdilModule * pModule, IMetaDataImport2 *pImport, mdToken tk)
+#else
+static CorElementType ParseMetadataForStaticsIsValueTypeEnum(Module * pModule, IMDInternalImport *pImport, mdToken tk)
+#endif
+{
+ STANDARD_VM_CONTRACT;
+
+ if (TypeFromToken(tk) != mdtTypeDef)
+ {
+ // At this point, we would have to load other assemblies. The only one we have guaranteed
+ // to be there is mscorlib.
+ return ELEMENT_TYPE_END;
+ }
+
+ // The only condition we will be checking is that the parent of the type is System.Enum
+ // Rest of the checks will be handed by class loader, which will fail to load if it's malformed
+ // hence, no need to do all the checks here.
+ mdToken tkParent = 0;
+ DWORD dwParentAttr = 0;
+
+#ifdef CLR_STANDALONE_BINDER
+ if (FAILED(pImport->GetTypeDefProps(tk, NULL, 0, NULL, &dwParentAttr, &tkParent)))
+#else
+ if (FAILED(pImport->GetTypeDefProps(tk, &dwParentAttr, &tkParent)))
+#endif
+ {
+ return ELEMENT_TYPE_END;
+ }
+
+ if (RidFromToken(tkParent) == 0)
+ {
+ return ELEMENT_TYPE_END;
+ }
+
+#ifdef CLR_STANDALONE_BINDER
+ WCHAR wszTypeName[MAX_CLASS_NAME];
+ ULONG cchTypeName;
+#else
+ LPCSTR szName = NULL;
+ LPCSTR szNamespace = NULL;
+#endif
+
+ switch (TypeFromToken(tkParent))
+ {
+ case mdtTypeDef:
+#ifdef CLR_STANDALONE_BINDER
+ if (FAILED(pImport->GetTypeDefProps(tkParent, wszTypeName, _countof(wszTypeName), &cchTypeName, NULL, NULL)))
+#else
+ if (FAILED(pImport->GetNameOfTypeDef(tkParent, &szName, &szNamespace)))
+#endif
+ {
+ return ELEMENT_TYPE_END;
+ }
+ break;
+ case mdtTypeRef:
+#ifdef CLR_STANDALONE_BINDER
+ if (FAILED(pImport->GetTypeRefProps(tkParent, NULL, wszTypeName, _countof(wszTypeName), &cchTypeName)))
+#else
+ if (FAILED(pImport->GetNameOfTypeRef(tkParent, &szNamespace, &szName)))
+#endif
+ {
+ return ELEMENT_TYPE_END;
+ }
+ break;
+ default:
+ return ELEMENT_TYPE_END;
+ }
+
+#ifndef CLR_STANDALONE_BINDER
+ if (szName == NULL || szNamespace == NULL)
+ {
+ return ELEMENT_TYPE_END;
+ }
+#endif
+
+ // If it doesn't inherit from System.Enum, then it must be a value type
+ // Note that loader will not load malformed types so this check is enough
+#ifdef CLR_STANDALONE_BINDER
+ if (wcscmp(wszTypeName, L"System.Enum") != 0)
+#else
+ if (strcmp(szName,"Enum") != 0 || strcmp(szNamespace,"System") != 0)
+#endif
+ {
+ return ELEMENT_TYPE_VALUETYPE;
+ }
+
+ // OK, it's an enum; find its instance field and get its type
+#ifdef CLR_STANDALONE_BINDER
+ HCORENUM hEnumFields = NULL;
+ CloseHCORENUMOnDestruct hEnumFieldsDestruct(pImport, &hEnumFields);
+ ULONG cFields;
+ HRESULT hr;
+#else
+ HENUMInternalHolder hEnum(pImport);
+#endif
+ mdToken tkField;
+#ifdef CLR_STANDALONE_BINDER
+ while (S_OK == (hr = pImport->EnumFields(&hEnumFields, tk, &tkField, 1, &cFields)))
+#else
+ hEnum.EnumInit(mdtFieldDef,tk);
+ while (pImport->EnumNext(&hEnum,&tkField))
+#endif
+ {
+#ifdef CLR_STANDALONE_BINDER
+ _ASSERTE(cFields == 1);
+#endif
+ PCCOR_SIGNATURE pMemberSignature;
+ DWORD cMemberSignature;
+
+ // Get the type of the static field.
+ DWORD dwMemberAttribs;
+
+#ifdef CLR_STANDALONE_BINDER
+ IfFailThrow(pImport->GetFieldProps(tkField, NULL, NULL, 0, NULL, &dwMemberAttribs, &pMemberSignature, &cMemberSignature, NULL, NULL, NULL));
+#else
+ IfFailThrow(pImport->GetFieldDefProps(tkField, &dwMemberAttribs));
+#endif
+
+ if (!IsFdStatic(dwMemberAttribs))
+ {
+#ifndef CLR_STANDALONE_BINDER
+ IfFailThrow(pImport->GetSigOfFieldDef(tkField, &cMemberSignature, &pMemberSignature));
+
+ IfFailThrow(validateTokenSig(tkField,pMemberSignature,cMemberSignature,dwMemberAttribs,pImport));
+#endif
+
+ SigTypeContext typeContext;
+ MetaSig fsig(pMemberSignature, cMemberSignature, pModule, &typeContext, MetaSig::sigField);
+ CorElementType ElementType = fsig.NextArg();
+ return ElementType;
+ }
+ }
+
+#ifdef CLR_STANDALONE_BINDER
+ IfFailThrow(hr);
+#endif
+
+ // no instance field found -- error!
+ return ELEMENT_TYPE_END;
+}
+
+#ifdef CLR_STANDALONE_BINDER
+#define g_ThreadStaticAttributeClassName L"System.ThreadStaticAttribute"
+static BOOL GetStaticFieldElementTypeForFieldDef(MdilModule * pModule, IMetaDataImport2 *pImport, mdToken field, CorElementType *pElementType, mdToken *ptkValueTypeToken, int *pkk)
+#else
+static BOOL GetStaticFieldElementTypeForFieldDef(Module * pModule, IMDInternalImport *pImport, mdToken field, CorElementType *pElementType, mdToken *ptkValueTypeToken, int *pkk)
+#endif
+{
+ STANDARD_VM_CONTRACT;
+
+ PCCOR_SIGNATURE pMemberSignature;
+ DWORD cMemberSignature;
+ DWORD dwMemberAttribs;
+#ifdef CLR_STANDALONE_BINDER
+ IfFailThrow(pImport->GetFieldProps(field, NULL, NULL, 0, NULL, &dwMemberAttribs, &pMemberSignature, &cMemberSignature, NULL, NULL, NULL));
+#else
+ IfFailThrow(pImport->GetFieldDefProps(field, &dwMemberAttribs));
+#endif
+
+ // Skip non-static and literal fields
+ if (!IsFdStatic(dwMemberAttribs) || IsFdLiteral(dwMemberAttribs))
+ return TRUE;
+
+ // We need to do an extra check to see if this field is ThreadStatic
+ HRESULT hr = pImport->GetCustomAttributeByName((mdToken)field,
+ g_ThreadStaticAttributeClassName,
+ NULL, NULL);
+
+#if defined(FEATURE_LEGACYNETCF) && defined(CLR_STANDALONE_BINDER)
+ // Replicate quirk from code:CMiniMd::CommonGetCustomAttributeByNameEx
+ if (FAILED(hr) && RuntimeIsLegacyNetCF(0))
+ hr = S_FALSE;
+#endif
+
+ IfFailThrow(hr);
+
+ // Use one set of variables for regular statics, and the other set for thread statics
+ *pkk = (hr == S_OK) ? 1 : 0;
+
+
+ // Get the type of the static field.
+#ifndef CLR_STANDALONE_BINDER
+ IfFailThrow(pImport->GetSigOfFieldDef(field, &cMemberSignature, &pMemberSignature));
+ IfFailThrow(validateTokenSig(field,pMemberSignature,cMemberSignature,dwMemberAttribs,pImport));
+#endif
+
+ SigTypeContext typeContext; // <TODO> this is an empty type context: is this right? Should we be explicitly excluding all generic types from this iteration? </TODO>
+ MetaSig fsig(pMemberSignature, cMemberSignature, pModule, &typeContext, MetaSig::sigField);
+ CorElementType ElementType = fsig.NextArg();
+
+ if (ElementType == ELEMENT_TYPE_VALUETYPE)
+ {
+ // See if we can figure out what the value type is
+#ifdef CLR_STANDALONE_BINDER
+ MdilModule *pTokenModule;
+ mdToken tk = PeekValueTypeTokenClosed(&fsig.GetArgProps(), pModule, &typeContext, &pTokenModule);
+#else
+ Module *pTokenModule;
+ mdToken tk = fsig.GetArgProps().PeekValueTypeTokenClosed(pModule, &typeContext, &pTokenModule);
+#endif
+
+ *ptkValueTypeToken = tk;
+
+ // As the current class is not generic, this should never happen, but if it did happen, we
+ // would have a problem.
+ if (pTokenModule != pModule)
+ {
+#ifdef CLR_STANDALONE_BINDER
+ IfFailThrow(COR_E_BADIMAGEFORMAT);
+#else
+ ThrowHR(COR_E_BADIMAGEFORMAT, BFA_METADATA_CORRUPT);
+#endif
+ }
+
+ ElementType = ParseMetadataForStaticsIsValueTypeEnum(pModule, pImport, tk);
+ }
+
+ *pElementType = ElementType;
+ return FALSE;
+}
+#endif // StaticAllocationHelpers_INL
diff --git a/src/vm/stdinterfaces.cpp b/src/vm/stdinterfaces.cpp
new file mode 100644
index 0000000000..a716606053
--- /dev/null
+++ b/src/vm/stdinterfaces.cpp
@@ -0,0 +1,3717 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//---------------------------------------------------------------------------------
+// stdinterfaces.cpp
+//
+// Defines various standard com interfaces
+
+//---------------------------------------------------------------------------------
+
+
+#include "common.h"
+
+#include <ole2.h>
+#include <guidfromname.h>
+#include <olectl.h>
+#include <objsafe.h> // IID_IObjectSafety
+#include "vars.hpp"
+#include "object.h"
+#include "excep.h"
+#include "frames.h"
+#include "vars.hpp"
+#include "runtimecallablewrapper.h"
+#include "stdinterfaces.h"
+#include "comcallablewrapper.h"
+#include "field.h"
+#include "threads.h"
+#include "interoputil.h"
+#include "tlbexport.h"
+#ifdef FEATURE_COMINTEROP_TLB_SUPPORT
+#include "comtypelibconverter.h"
+#endif
+#include "comdelegate.h"
+#include "olevariant.h"
+#include "eeconfig.h"
+#include "typehandle.h"
+#include "posterror.h"
+#include <corerror.h>
+#include <mscoree.h>
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "mtx.h"
+#include "cgencpu.h"
+#include "interopconverter.h"
+#include "cominterfacemarshaler.h"
+#include "eecontract.h"
+#include "stdinterfaces_internal.h"
+#include <restrictederrorinfo.h> // IRestrictedErrorInfo
+#include "winrttypenameconverter.h"
+#include "interoputil.inl"
+
+
+//------------------------------------------------------------------------------------------
+// Definitions used by the IDispatchEx implementation
+
+// The names of the properties that are accessed on the managed member info's
+#define MEMBER_INFO_NAME_PROP "Name"
+#define MEMBER_INFO_TYPE_PROP "MemberType"
+#define PROPERTY_INFO_CAN_READ_PROP "CanRead"
+#define PROPERTY_INFO_CAN_WRITE_PROP "CanWrite"
+
+
+// {00020430-0000-0000-C000-000000000046}
+static const GUID LIBID_STDOLE2 = { 0x00020430, 0x0000, 0x0000, { 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46 } };
+
+// The uuid.lib doesn't have IID_IRestrictedErrorInfo in their lib. Remove this once it is included in the lib
+static const GUID IID_IRestrictedErrorInfo = { 0x82BA7092, 0x4C88, 0x427D, { 0xA7, 0xBC, 0x16, 0xDD, 0x93, 0xFE, 0xB6, 0x7E } };
+EXTERN_C SELECTANY const IID IID_ILanguageExceptionErrorInfo = { 0x04a2dbf3, 0xdf83, 0x116c, { 0x09, 0x46, 0x08, 0x12, 0xab, 0xf6, 0xe0, 0x7d } };
+
+// Until the Windows SDK is updated, just hard-code the IAgileObject IID
+#ifndef __IAgileObject_INTERFACE_DEFINED__
+EXTERN_C SELECTANY const GUID IID_IAgileObject = { 0x94ea2b94, 0xe9cc, 0x49e0, { 0xc0, 0xff, 0xee, 0x64, 0xca, 0x8f, 0x5b, 0x90 } };
+#endif // !__IAgileObject_INTERFACE_DEFINED__
+
+// Until the Windows SDK is updated, just hard-code the INoMarshal IID
+#ifndef __INoMarshal_INTERFACE_DEFINED__
+static const GUID IID_INoMarshal = {0xecc8691b, 0xc1db, 0x4dc0, { 0x85, 0x5e, 0x65, 0xf6, 0xc5, 0x51, 0xaf, 0x49 } };
+#endif // !__INoMarshal_INTERFACE_DEFINED__
+
+// NOTE: In the following vtables, QI points to the same function
+// this is because, during marshalling between COM & COM+ we want a fast way to
+// check if a COM IP is a tear-off that we created.
+
+// array of vtable pointers for std. interfaces such as IProvideClassInfo etc.
+const SLOT * const g_rgStdVtables[] =
+{
+ (SLOT*)&g_InnerUnknown.m_vtable,
+ (SLOT*)&g_IProvideClassInfo.m_vtable,
+ (SLOT*)&g_IMarshal.m_vtable,
+ (SLOT*)&g_ISupportsErrorInfo.m_vtable,
+ (SLOT*)&g_IErrorInfo.m_vtable,
+ (SLOT*)&g_IManagedObject.m_vtable,
+ (SLOT*)&g_IConnectionPointContainer.m_vtable,
+ (SLOT*)&g_IObjectSafety.m_vtable,
+ (SLOT*)&g_IDispatchEx.m_vtable,
+ (SLOT*)&g_IWeakReferenceSource.m_vtable,
+ (SLOT*)&g_ICustomPropertyProvider.m_vtable,
+ (SLOT*)&g_ICCW.m_vtable,
+ (SLOT*)&g_IAgileObject.m_vtable,
+ (SLOT*)&g_IStringable.m_vtable
+};
+
+
+const IID IID_IWeakReferenceSource = __uuidof(IWeakReferenceSource);
+const IID IID_IWeakReference = __uuidof(IWeakReference);
+
+// {7C925755-3E48-42B4-8677-76372267033F}
+const IID IID_ICustomPropertyProvider = {0x7C925755,0x3E48,0x42B4,{0x86, 0x77, 0x76, 0x37, 0x22, 0x67, 0x03, 0x3F}};
+
+const IID IID_IStringable = {0x96369f54,0x8eb6,0x48f0, {0xab,0xce,0xc1,0xb2,0x11,0xe6,0x27,0xc3}};
+
+// For free-threaded marshaling, we must not be spoofed by out-of-process or cross-runtime marshal data.
+// Only unmarshal data that comes from our own runtime.
+BYTE g_UnmarshalSecret[sizeof(GUID)];
+bool g_fInitedUnmarshalSecret = false;
+
+
+static HRESULT InitUnmarshalSecret()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ if (!g_fInitedUnmarshalSecret)
+ {
+ ComCall::LockHolder lh;
+
+ if (!g_fInitedUnmarshalSecret)
+ {
+ hr = ::CoCreateGuid((GUID *) g_UnmarshalSecret);
+ if (SUCCEEDED(hr))
+ g_fInitedUnmarshalSecret = true;
+ }
+ }
+ return hr;
+}
+
+
+HRESULT TryGetGuid(MethodTable* pClass, GUID* pGUID, BOOL b)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pClass));
+ PRECONDITION(CheckPointer(pGUID));
+ }
+ CONTRACTL_END;
+
+ GCX_COOP();
+
+ HRESULT hr = S_OK;
+ OBJECTREF pThrowable = NULL;
+ GCPROTECT_BEGIN(pThrowable);
+ {
+ EX_TRY
+ {
+ pClass->GetGuid(pGUID, b);
+ }
+ EX_CATCH
+ {
+ pThrowable = GET_THROWABLE();
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ if (pThrowable != NULL)
+ hr = SetupErrorInfo(pThrowable);
+ }
+ GCPROTECT_END();
+
+ return hr;
+}
+
+
+//------------------------------------------------------------------------------------------
+// IUnknown methods for CLR objects
+
+
+HRESULT
+Unknown_QueryInterface_Internal(ComCallWrapper* pWrap, IUnknown* pUnk, REFIID riid, void** ppv)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(IsInProcCCWTearOff(pUnk));
+ PRECONDITION(CheckPointer(ppv, NULL_OK));
+ PRECONDITION(CheckPointer(pWrap));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ SafeComHolderPreemp<IUnknown> pDestItf = NULL;
+
+ // Validate the arguments.
+ if (!ppv)
+ return E_POINTER;
+
+ // Initialize the returned interface pointer to NULL before we start.
+ *ppv = NULL;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ // Initialize the HRESULT to E_NOINTERFACE. This must be done after the
+ // BEGIN_EXTERNAL_ENTRYPOINT since otherwise it will be reset to S_OK by
+ // BEGIN_EXTERNAL_ENTRYPOINT.
+ hr = E_NOINTERFACE;
+
+ // Check for QIs on inner unknown
+ if (!IsInnerUnknown(pUnk))
+ {
+ // Aggregation support, delegate to the outer unknown if non null.
+ IUnknown *pOuter = pWrap->GetSimpleWrapper()->GetOuter();
+ if (pOuter != NULL)
+ {
+ hr = SafeQueryInterfacePreemp(pOuter, riid, &pDestItf);
+ LogInteropQI(pOuter, riid, hr, "QI to outer Unknown");
+ IfFailGo(hr);
+ }
+ }
+ else
+ {
+ // Assert the component has been aggregated
+ _ASSERTE(pWrap->GetSimpleWrapper()->GetOuter() != NULL);
+
+ // Okay special case IUnknown
+ if (IsEqualIID(riid, IID_IUnknown))
+ {
+ SafeAddRefPreemp(pUnk);
+ pDestItf = pUnk;
+ }
+ }
+
+ // If we haven't found the IP or if we haven't looked yet (because we aren't
+ // being aggregated), now look on the managed object to see if it supports the interface.
+ if (pDestItf == NULL)
+ {
+ pDestItf = ComCallWrapper::GetComIPFromCCW(pWrap, riid, NULL, GetComIPFromCCW::CheckVisibility);
+ if (pDestItf == NULL)
+ {
+#ifdef FEATURE_REMOTING
+ // Check if the wrapper is a transparent proxy if so delegate the QI to the real proxy
+ if (pWrap->IsObjectTP())
+ {
+ ARG_SLOT ret = 0;
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+ OBJECTREF oref = pWrap->GetObjectRef();
+ OBJECTREF realProxy = ObjectToOBJECTREF(CRemotingServices::GetRealProxy(OBJECTREFToObject(oref)));
+ _ASSERTE(realProxy != NULL);
+
+ if (!CRemotingServices::CallSupportsInterface(realProxy, riid, &ret))
+ goto ErrExit;
+ } // end GCX_COOP scope, pDestItf must be assigned to in preemptive mode
+
+ pDestItf = (IUnknown*)ret;
+ }
+#endif // FEATURE_REMOTING
+ }
+ }
+
+ErrExit:
+ // If we succeeded in obtaining the requested IP then return S_OK.
+ if (pDestItf != NULL)
+ hr = S_OK;
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ if (SUCCEEDED(hr))
+ {
+ // If we succeeded in obtaining the requested IP, set ppv to the interface.
+ _ASSERTE(pDestItf != NULL);
+ *ppv = pDestItf;
+ pDestItf.SuppressRelease();
+ }
+
+ return hr;
+} // Unknown_QueryInterface_Internal
+
+
+ULONG __stdcall
+Unknown_AddRefInner_Internal(IUnknown* pUnk)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ SimpleComCallWrapper* pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pUnk);
+ ComCallWrapper* pWrap = pSimpleWrap->GetMainWrapper();
+
+ // Assert the component has been aggregated
+ _ASSERTE(pSimpleWrap->GetOuter() != NULL);
+
+ // We are guaranteed to be in the right domain here, so can always get the oref
+ // w/o fear of the handle having been deleted.
+ return pWrap->AddRef();
+} // Unknown_AddRef
+
+
+ULONG __stdcall
+Unknown_AddRef_Internal(IUnknown* pUnk)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ ComCallWrapper* pWrap = ComCallWrapper::GetWrapperFromIP(pUnk);
+
+ // check for aggregation
+ IUnknown *pOuter;
+ SimpleComCallWrapper* pSimpleWrap = pWrap->GetSimpleWrapper();
+ if (pSimpleWrap && (pOuter = pSimpleWrap->GetOuter()) != NULL)
+ {
+ // If we are in process detach, we cannot safely call release on our outer.
+ if (g_fProcessDetach)
+ return 1;
+
+ ULONG cbRef = pOuter->AddRef();
+ LogInteropAddRef(pOuter, cbRef, "Delegate to outer");
+ return cbRef;
+ }
+ // are guaranteed to be in the right domain here, so can always get the oref
+ // w/o fear of the handle having been deleted.
+ return pWrap->AddRef();
+} // Unknown_AddRef
+
+
+ULONG __stdcall
+Unknown_ReleaseInner_Internal(IUnknown* pUnk)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ ULONG cbRef = -1;
+
+ SimpleComCallWrapper* pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pUnk);
+ ComCallWrapper* pWrap = pSimpleWrap->GetMainWrapper();
+
+ // Assert the component has been aggregated
+ _ASSERTE(pSimpleWrap->GetOuter() != NULL);
+
+ // We know for sure this wrapper is a start wrapper let us pass this information in
+ cbRef = pWrap->Release();
+
+ return cbRef;
+} // Unknown_Release
+
+ULONG __stdcall
+Unknown_Release_Internal(IUnknown* pUnk)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ ULONG cbRef = -1;
+
+ // check for aggregation
+ ComCallWrapper* pWrap = ComCallWrapper::GetWrapperFromIP(pUnk);
+ SimpleComCallWrapper* pSimpleWrap = pWrap->GetSimpleWrapper();
+ IUnknown *pOuter;
+ if (pSimpleWrap && (pOuter = pSimpleWrap->GetOuter()) != NULL)
+ {
+ // If we are in process detach, we cannot safely call release on our outer.
+ if (g_fProcessDetach)
+ cbRef = 1;
+
+ cbRef = SafeReleasePreemp(pOuter);
+ LogInteropRelease(pOuter, cbRef, "Delegate Release to outer");
+ }
+ else
+ {
+ cbRef = pWrap->Release();
+ }
+
+ return cbRef;
+} // Unknown_Release
+
+
+// ---------------------------------------------------------------------------
+// for simple tearoffs
+// ---------------------------------------------------------------------------
+ULONG __stdcall
+Unknown_AddRefSpecial_Internal(IUnknown* pUnk)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(IsSimpleTearOff(pUnk));
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pUnk);
+ return pSimpleWrap->AddRefWithAggregationCheck();
+} // Unknown_AddRefSpecial
+
+// ---------------------------------------------------------------------------
+// for simplecomcall wrappers, stdinterfaces such as IProvideClassInfo etc.
+// ---------------------------------------------------------------------------
+ULONG __stdcall
+Unknown_ReleaseSpecial_Internal(IUnknown* pUnk)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(IsSimpleTearOff(pUnk));
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ ULONG cbRef = -1;
+
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pUnk);
+
+ // aggregation check
+ IUnknown *pOuter = pSimpleWrap->GetOuter();
+ if (pOuter != NULL)
+ {
+ cbRef = SafeReleasePreemp(pOuter);
+ }
+ else
+ {
+ cbRef = pSimpleWrap->Release();
+ }
+
+ return cbRef;
+} // Unknown_Release
+
+
+HRESULT __stdcall
+Unknown_QueryInterface_IErrorInfo_Simple(IUnknown* pUnk, REFIID riid, void** ppv)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(IsInProcCCWTearOff(pUnk));
+ PRECONDITION(CheckPointer(ppv, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ if (!ppv)
+ return E_POINTER;
+ *ppv = NULL;
+
+ EX_TRY
+ {
+ hr = E_NOINTERFACE;
+
+ _ASSERTE(!IsInnerUnknown(pUnk) && IsSimpleTearOff(pUnk));
+
+ SimpleComCallWrapper* pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pUnk);
+
+ // we must not switch to cooperative GC mode here, so respond only to the
+ // two interfaces we always support
+ if (riid == IID_IUnknown || riid == IID_IErrorInfo)
+ {
+ *ppv = pUnk;
+ pSimpleWrap->AddRef();
+ hr = S_OK;
+ }
+ }
+ EX_CATCH_HRESULT_NO_ERRORINFO(hr);
+
+ return hr;
+} // Unknown_QueryInterface_IErrorInfo_Simple
+
+// ---------------------------------------------------------------------------
+ULONG __stdcall
+Unknown_ReleaseSpecial_IErrorInfo_Internal(IUnknown* pUnk)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(IsSimpleTearOff(pUnk));
+ }
+ CONTRACTL_END;
+
+ ULONG cbRef = -1;
+
+ EX_TRY
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pUnk);
+ cbRef = pSimpleWrap->Release();
+ EX_CATCH
+ EX_END_CATCH(SwallowAllExceptions)
+
+ return cbRef;
+}
+
+
+// ---------------------------------------------------------------------------
+// Interface IProvideClassInfo
+// ---------------------------------------------------------------------------
+HRESULT __stdcall
+ClassInfo_GetClassInfo(IUnknown* pUnk, ITypeInfo** ppTI)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(CheckPointer(ppTI));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ _ASSERTE(IsSimpleTearOff(pUnk));
+
+ SimpleComCallWrapper *pWrap = SimpleComCallWrapper::GetWrapperFromIP(pUnk);
+
+ // If this is an extensible RCW then we need to check to see if the CLR part of the
+ // herarchy is visible to COM.
+ if (pWrap->IsExtendsCOMObject())
+ {
+ // Retrieve the wrapper template for the class.
+ ComCallWrapperTemplate *pTemplate = ComCallWrapperTemplate::GetTemplate(pWrap->GetMethodTable());
+
+ // Find the first COM visible IClassX starting at ComMethodTable passed in and
+ // walking up the hierarchy.
+ ComMethodTable *pComMT = NULL;
+ if (pTemplate->SupportsIClassX())
+ {
+ for (pComMT = pTemplate->GetClassComMT(); pComMT && !pComMT->IsComVisible(); pComMT = pComMT->GetParentClassComMT());
+ }
+
+ // If the CLR part of the object is not visible then delegate the call to the
+ // base COM object if it implements IProvideClassInfo.
+ if (!pComMT || pComMT->GetMethodTable()->ParentEquals(g_pObjectClass))
+ {
+ IProvideClassInfo *pProvClassInfo = NULL;
+
+ SyncBlock* pBlock = pWrap->GetSyncBlock();
+ _ASSERTE(pBlock);
+
+ RCWHolder pRCW(GetThread());
+ RCWPROTECT_BEGIN(pRCW, pBlock);
+
+ hr = pRCW->SafeQueryInterfaceRemoteAware(IID_IProvideClassInfo, (IUnknown**)&pProvClassInfo);
+ if (SUCCEEDED(hr))
+ {
+ hr = pProvClassInfo->GetClassInfo(ppTI);
+ ULONG cbRef = SafeRelease(pProvClassInfo);
+ LogInteropRelease(pProvClassInfo, cbRef, "ClassInfo_GetClassInfo");
+ IfFailThrow(hr);
+ }
+
+ RCWPROTECT_END(pRCW);
+ }
+ }
+
+ MethodTable* pClass = pWrap->GetMethodTable();
+ IfFailThrow(GetITypeInfoForEEClass(pClass, ppTI, true/*bClassInfo*/));
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+//-------------------------------------------------------------------------------------
+// Helper to get the ITypeLib* for a Assembly.
+HRESULT GetITypeLibForAssembly(Assembly *pAssembly, ITypeLib **ppTLB, int bAutoCreate, int flags)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pAssembly));
+ PRECONDITION(CheckPointer(ppTLB));
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_CORECLR
+ //@CORESYSTODO: what to do?
+ return E_FAIL;
+#else
+
+ HRESULT hr = S_OK; // A result.
+ CQuickWSTRBase rName; // Library (scope) or file name.
+ int bResize=false; // If true, had to resize the buffer to hold the name.
+ LPCWSTR szModule=0; // The module name.
+ GUID guid; // A GUID.
+ ITypeLib *pITLB=0; // The TypeLib.
+ Module *pModule; // The assembly's module.
+ WCHAR rcDrive[_MAX_DRIVE]; // Module's drive letter.
+ WCHAR rcDir[_MAX_DIR]; // Module's directory.
+ WCHAR rcFname[_MAX_FNAME]; // Module's file name.
+ USHORT wMajor; // Major version number.
+ USHORT wMinor; // Minor version number.
+
+ rName.Init();
+
+ // Check to see if we have a cached copy.
+ pITLB = pAssembly->GetTypeLib();
+ if (pITLB)
+ {
+ // Check to see if the cached value is -1. This indicate that we tried
+ // to export the typelib but that the export failed.
+ if (pITLB == (ITypeLib*)-1)
+ {
+ hr = E_FAIL;
+ goto ReturnHR;
+ }
+
+ // We have a cached copy so return it.
+ *ppTLB = pITLB;
+ hr = S_OK;
+ goto ReturnHR;
+ }
+
+ // Retrieve the name of the module.
+ pModule = pAssembly->GetManifestModule();
+
+ EX_TRY
+ {
+ // SString::ConvertToUnicode is THROW_UNLESS_NORMALIZED
+ szModule = pModule->GetPath();
+ }
+ EX_CATCH_HRESULT(hr);
+ IfFailGo(hr);
+
+ // Retrieve the guid for typelib that would be generated from the assembly.
+ IfFailGo(GetTypeLibGuidForAssembly(pAssembly, &guid));
+
+ // If the typelib is for the runtime library, we'd better know where it is.
+ if (guid == LIBID_ComPlusRuntime)
+ {
+ ULONG dwSize = (ULONG)rName.MaxSize();
+ while (FAILED(GetInternalSystemDirectory(rName.Ptr(), &dwSize)))
+ {
+ IfFailGo(rName.ReSizeNoThrow(dwSize=(ULONG)rName.MaxSize()*2));
+ }
+
+ IfFailGo(rName.ReSizeNoThrow(dwSize + wcslen(g_pwBaseLibraryTLB) + 3));
+ wcscat_s(rName.Ptr(), rName.Size(), g_pwBaseLibraryTLB);
+ hr = LoadTypeLibExWithFlags(rName.Ptr(), flags, &pITLB);
+ goto ErrExit;
+ }
+
+ // Retrive the major and minor version number.
+ IfFailGo(GetTypeLibVersionForAssembly(pAssembly, &wMajor, &wMinor));
+
+ // Maybe the module was imported from COM, and we can get the libid of the existing typelib.
+ if (pAssembly->IsImportedFromTypeLib())
+ {
+ hr = LoadRegTypeLibWithFlags(guid, wMajor, wMinor, flags, &pITLB);
+ if (SUCCEEDED(hr))
+ goto ErrExit;
+
+ // Try just the Assembly version
+ pAssembly->GetVersion(&wMajor, &wMinor, NULL, NULL);
+ hr = LoadRegTypeLibWithFlags(guid, wMajor, wMinor, flags, &pITLB);
+ if (SUCCEEDED(hr))
+ goto ErrExit;
+
+ // Try loading the highest registered version.
+ hr = LoadRegTypeLibWithFlags(guid, -1, -1, flags, &pITLB);
+ if (SUCCEEDED(hr))
+ goto ErrExit;
+
+ // The module is known to be imported, so no need to try conversion.
+
+ // Set the error info for most callers.
+ VMPostError(TLBX_E_CIRCULAR_EXPORT2, szModule);
+
+ // Set the hr for the case where we're trying to load a type library to
+ // resolve a type reference from another library. The error message will
+ // be posted where more information is available.
+ if (hr == TYPE_E_LIBNOTREGISTERED)
+ hr = TLBX_W_LIBNOTREGISTERED;
+ else
+ hr = TLBX_E_CANTLOADLIBRARY;
+
+ IfFailGo(hr);
+ }
+
+ // Try to load the registered typelib.
+ hr = LoadRegTypeLibWithFlags(guid, wMajor, wMinor, flags, &pITLB);
+ if(hr == S_OK)
+ goto ErrExit;
+
+ // Try just the Assembly version
+ pAssembly->GetVersion(&wMajor, &wMinor, NULL, NULL);
+ hr = LoadRegTypeLibWithFlags(guid, wMajor, wMinor, flags, &pITLB);
+ if (SUCCEEDED(hr))
+ goto ErrExit;
+
+ // If that fails, try loading the highest registered version.
+ hr = LoadRegTypeLibWithFlags(guid, -1, -1, flags, &pITLB);
+ if(hr == S_OK)
+ goto ErrExit;
+
+ // If caller only wants registered typelibs, exit now, with error from prior call.
+ if (flags & TlbExporter_OnlyReferenceRegistered)
+ goto ErrExit;
+
+ // If we haven't managed to find the typelib so far try and load the typelib by name.
+ hr = LoadTypeLibExWithFlags(szModule, flags, &pITLB);
+ if(hr == S_OK)
+ {
+ // Check libid.
+ TLIBATTR *pTlibAttr;
+ int bMatch;
+
+ IfFailGo(pITLB->GetLibAttr(&pTlibAttr));
+ bMatch = pTlibAttr->guid == guid;
+ pITLB->ReleaseTLibAttr(pTlibAttr);
+
+ if (bMatch)
+ {
+ goto ErrExit;
+ }
+ else
+ {
+ SafeReleasePreemp(pITLB);
+ pITLB = NULL;
+ hr = TLBX_E_CANTLOADLIBRARY;
+ }
+ }
+
+ // Add a ".tlb" extension and try again.
+ IfFailGo(rName.ReSizeNoThrow((int)(wcslen(szModule) + 5)));
+ SplitPath(szModule, rcDrive, _MAX_DRIVE, rcDir, _MAX_DIR, rcFname, _MAX_FNAME, 0, 0);
+ MakePath(rName.Ptr(), rcDrive, rcDir, rcFname, W(".tlb"));
+
+ hr = LoadTypeLibExWithFlags(rName.Ptr(), flags, &pITLB);
+ if(hr == S_OK)
+ {
+ // Check libid.
+ TLIBATTR *pTlibAttr;
+ int bMatch;
+ IfFailGo(pITLB->GetLibAttr(&pTlibAttr));
+ bMatch = pTlibAttr->guid == guid;
+ pITLB->ReleaseTLibAttr(pTlibAttr);
+ if (bMatch)
+ {
+ goto ErrExit;
+ }
+ else
+ {
+ SafeReleasePreemp(pITLB);
+ pITLB = NULL;
+ hr = TLBX_E_CANTLOADLIBRARY;
+ }
+ }
+
+ // If the auto create flag is set then try and export the typelib from the module.
+ if (bAutoCreate)
+ {
+ // Try to export the typelib right now.
+ // This is FTL export (Fractionally Too Late).
+ hr = ExportTypeLibFromLoadedAssemblyNoThrow(pAssembly, 0, &pITLB, 0, flags);
+ if (FAILED(hr))
+ {
+ // If the export failed then remember it failed by setting the typelib
+ // to -1 on the assembly.
+ pAssembly->SetTypeLib((ITypeLib *)-1);
+ IfFailGo(hr);
+ }
+ }
+
+ErrExit:
+ // If we successfully opened (or created) the typelib, cache a pointer, and return it to caller.
+ if (pITLB)
+ {
+ pAssembly->SetTypeLib(pITLB);
+ *ppTLB = pITLB;
+ }
+ReturnHR:
+ rName.Destroy();
+ return hr;
+#endif //FEATURE_CORECLR
+} // HRESULT GetITypeLibForAssembly()
+
+
+//------------------------------------------------------------------------------------------
+// Helper to get the ITypeInfo* for a type.
+HRESULT GetITypeLibForEEClass(MethodTable *pClass, ITypeLib **ppTLB, int bAutoCreate, int flags)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ return GetITypeLibForAssembly(pClass->GetAssembly(), ppTLB, bAutoCreate, flags);
+} // HRESULT GetITypeLibForEEClass()
+
+
+HRESULT GetITypeInfoForEEClass(MethodTable *pClass, ITypeInfo **ppTI, int bClassInfo/*=false*/, int bAutoCreate/*=true*/, int flags)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(return E_OUTOFMEMORY);
+ }
+ CONTRACTL_END;
+
+ GUID clsid;
+ GUID ciid;
+ ComMethodTable *pComMT = NULL;
+ HRESULT hr = S_OK;
+ SafeComHolder<ITypeLib> pITLB = NULL;
+ SafeComHolder<ITypeInfo> pTI = NULL;
+ SafeComHolder<ITypeInfo> pTIDef = NULL; // Default typeinfo of a coclass.
+ ComCallWrapperTemplate *pTemplate = NULL;
+
+ if (pClass->IsProjectedFromWinRT() || pClass->IsExportedToWinRT())
+ {
+ // ITypeInfo is not used in the WinRT world
+ return E_NOTIMPL;
+ }
+
+ GCX_PREEMP();
+
+ // Get the typeinfo.
+ if (bClassInfo || pClass->IsInterface() || pClass->IsValueType() || pClass->IsEnum())
+ {
+ // If the class is not an interface then find the first COM visible IClassX in the hierarchy.
+ if (!pClass->IsInterface() && !pClass->IsComImport())
+ {
+ {
+ // Retrieve the ComCallWrapperTemplate from the type.
+ GCX_COOP();
+ OBJECTREF pThrowable = NULL;
+ GCPROTECT_BEGIN(pThrowable);
+ {
+ EX_TRY
+ {
+ pTemplate = ComCallWrapperTemplate::GetTemplate(pClass);
+ if (pTemplate->SupportsIClassX())
+ {
+ // Find the first COM visible IClassX starting at ComMethodTable passed in and
+ // walking up the hierarchy.
+ for (pComMT = pTemplate->GetClassComMT(); pComMT && !pComMT->IsComVisible(); pComMT = pComMT->GetParentClassComMT());
+ }
+ }
+ EX_CATCH
+ {
+ pThrowable = GET_THROWABLE();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (pThrowable != NULL)
+ hr = SetupErrorInfo(pThrowable);
+ }
+ GCPROTECT_END();
+ }
+
+ if (hr != S_OK)
+ goto ReturnHR;
+
+ if (!pTemplate)
+ {
+ hr = E_OUTOFMEMORY;
+ goto ReturnHR;
+ }
+
+ // If we haven't managed to find any visible IClassX's then return TYPE_E_ELEMENTNOTFOUND.
+ if (!pComMT)
+ {
+ hr = TYPE_E_ELEMENTNOTFOUND;
+ goto ReturnHR;
+ }
+
+ // Use the type of the first visible IClassX.
+ pClass = pComMT->GetMethodTable();
+ }
+
+ // Retrieve the ITypeLib for the assembly containing the type.
+ IfFailGo(GetITypeLibForEEClass(pClass, &pITLB, bAutoCreate, flags));
+
+ // Get the GUID of the desired TypeRef.
+ IfFailGo(TryGetGuid(pClass, &clsid, TRUE));
+
+ // Retrieve the ITypeInfo from the ITypeLib.
+ IfFailGo(pITLB->GetTypeInfoOfGuid(clsid, ppTI));
+ }
+ else if (pClass->IsComImport())
+ {
+ // This is a COM imported class, with no IClassX. Get default interface.
+ IfFailGo(GetITypeLibForEEClass(pClass, &pITLB, bAutoCreate, flags));
+ IfFailGo(TryGetGuid(pClass, &clsid, TRUE));
+ IfFailGo(pITLB->GetTypeInfoOfGuid(clsid, &pTI));
+ IfFailGo(GetDefaultInterfaceForCoclass(pTI, &pTIDef));
+
+ if (pTIDef)
+ {
+ *ppTI = pTIDef;
+ pTIDef.SuppressRelease();
+ }
+ else
+ hr = TYPE_E_ELEMENTNOTFOUND;
+ }
+ else
+ {
+ // We are attempting to retrieve an ITypeInfo for the default interface on a class.
+ TypeHandle hndDefItfClass;
+ DefaultInterfaceType DefItfType;
+ IfFailGo(TryGetDefaultInterfaceForClass(TypeHandle(pClass), &hndDefItfClass, &DefItfType));
+ switch (DefItfType)
+ {
+ case DefaultInterfaceType_Explicit:
+ {
+ _ASSERTE(!hndDefItfClass.IsNull());
+ _ASSERTE(hndDefItfClass.IsInterface());
+ hr = GetITypeInfoForEEClass(hndDefItfClass.GetMethodTable(), ppTI, FALSE, bAutoCreate, flags);
+ break;
+ }
+
+ case DefaultInterfaceType_AutoDispatch:
+ case DefaultInterfaceType_AutoDual:
+ {
+ _ASSERTE(!hndDefItfClass.IsNull());
+ _ASSERTE(!hndDefItfClass.IsInterface());
+
+ // Retrieve the ITypeLib for the assembly containing the type.
+ IfFailGo(GetITypeLibForEEClass(hndDefItfClass.GetMethodTable(), &pITLB, bAutoCreate, flags));
+
+ // Get the GUID of the desired TypeRef.
+ IfFailGo(TryGetGuid(hndDefItfClass.GetMethodTable(), &clsid, TRUE));
+
+ // Generate the IClassX IID from the class.
+ TryGenerateClassItfGuid(hndDefItfClass, &ciid);
+
+ hr = pITLB->GetTypeInfoOfGuid(ciid, ppTI);
+ break;
+ }
+
+ case DefaultInterfaceType_IUnknown:
+ case DefaultInterfaceType_BaseComClass:
+ {
+ // @PERF: Optimize this.
+ IfFailGo(LoadRegTypeLib(LIBID_STDOLE2, -1, -1, 0, &pITLB));
+ IfFailGo(pITLB->GetTypeInfoOfGuid(IID_IUnknown, ppTI));
+ hr = S_USEIUNKNOWN;
+ break;
+ }
+
+ default:
+ {
+ _ASSERTE(!"Invalid default interface type!");
+ hr = E_FAIL;
+ break;
+ }
+ }
+ }
+
+ if (bAutoCreate && SUCCEEDED(hr))
+ {
+ EX_TRY
+ {
+ // Make sure that marshaling recognizes CLSIDs of types with autogenerated ITypeInfo.
+ GetAppDomain()->InsertClassForCLSID(pClass, TRUE);
+ }
+ EX_CATCH_HRESULT(hr)
+ }
+
+ErrExit:
+ if (*ppTI == NULL)
+ {
+ if (!FAILED(hr))
+ hr = E_FAIL;
+ }
+
+ReturnHR:
+ return hr;
+} // HRESULT GetITypeInfoForEEClass()
+
+//------------------------------------------------------------------------------------------
+HRESULT GetDefaultInterfaceForCoclass(ITypeInfo *pTI, ITypeInfo **ppTIDef)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pTI));
+ PRECONDITION(CheckPointer(ppTIDef));
+ }
+ CONTRACTL_END;
+
+ int flags;
+ HRESULT hr;
+ HREFTYPE href; // href for the default typeinfo.
+ TYPEATTRHolder pAttr(pTI); // Attributes on the first TypeInfo.
+
+ IfFailGo(pTI->GetTypeAttr(&pAttr));
+ if (pAttr->typekind == TKIND_COCLASS)
+ {
+ int i;
+ for (i=0; i<pAttr->cImplTypes; ++i)
+ {
+ IfFailGo(pTI->GetImplTypeFlags(i, &flags));
+ if (flags & IMPLTYPEFLAG_FDEFAULT)
+ break;
+ }
+ // If no impltype had the default flag, use 0.
+ if (i == pAttr->cImplTypes)
+ i = 0;
+
+ IfFailGo(pTI->GetRefTypeOfImplType(i, &href));
+ IfFailGo(pTI->GetRefTypeInfo(href, ppTIDef));
+ }
+ else
+ {
+ *ppTIDef = 0;
+ hr = S_FALSE;
+ }
+
+ErrExit:
+ return hr;
+} // HRESULT GetDefaultInterfaceForCoclass()
+
+
+// Returns a NON-ADDREF'd ITypeInfo.
+HRESULT GetITypeInfoForMT(ComMethodTable *pMT, ITypeInfo **ppTI)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(CheckPointer(ppTI));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK; // A result.
+ ITypeInfo *pTI; // The ITypeInfo.
+
+ pTI = pMT->GetITypeInfo();
+
+ if (pTI == 0)
+ {
+ MethodTable *pClass = pMT->GetMethodTable();
+
+ hr = GetITypeInfoForEEClass(pClass, &pTI);
+
+ if (SUCCEEDED(hr))
+ {
+ pMT->SetITypeInfo(pTI);
+ SafeReleasePreemp(pTI);
+ }
+ }
+
+ *ppTI = pTI;
+ return hr;
+}
+
+//------------------------------------------------------------------------------------------
+// helper function to locate error info (if any) after a call, and make sure
+// that the error info comes from that call
+
+IErrorInfo *GetSupportedErrorInfo(IUnknown *iface, REFIID riid, BOOL checkForIRestrictedErrInfo)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(iface));
+ }
+ CONTRACTL_END;
+
+ IErrorInfo *pRetErrorInfo = NULL;
+ BOOL bUseThisErrorInfo = FALSE;
+
+ // This function must run in preemptive GC mode.
+ {
+ GCX_PREEMP();
+ HRESULT hr = S_OK;
+ SafeComHolderPreemp<IErrorInfo> pErrorInfo;
+
+ // See if we have any error info. (Also this clears out the error info,
+ // we want to do this whether it is a recent error or not.)
+ hr = SafeGetErrorInfo(&pErrorInfo);
+ IfFailThrow(hr);
+
+ // If we successfully retrieved an IErrorInfo, we need to verify if
+ // it is for the specifed interface.
+ if (hr == S_OK)
+ {
+ // Make sure that the object we called follows the error info protocol,
+ // otherwise the error may be stale, so we just throw it away.
+ SafeComHolderPreemp<ISupportErrorInfo> pSupport;
+ hr = SafeQueryInterfacePreemp(iface, IID_ISupportErrorInfo, (IUnknown **) &pSupport);
+ LogInteropQI(iface, IID_ISupportErrorInfo, hr, "ISupportErrorInfo");
+ if (SUCCEEDED(hr))
+ {
+ hr = pSupport->InterfaceSupportsErrorInfo(riid);
+ if (hr == S_OK)
+ {
+ // The IErrorInfo is indeed for the specified interface so return it.
+ bUseThisErrorInfo = TRUE;
+ }
+ }
+ }
+
+ if (!bUseThisErrorInfo && pErrorInfo != NULL && checkForIRestrictedErrInfo)
+ {
+
+ // Do we support IRestrictedErrorInfo?
+ SafeComHolderPreemp<IRestrictedErrorInfo> pRestrictedErrorInfo;
+ hr = SafeQueryInterfacePreemp(pErrorInfo, IID_IRestrictedErrorInfo, (IUnknown **) &pRestrictedErrorInfo);
+ LogInteropQI(pErrorInfo, IID_IRestrictedErrorInfo, hr, "IRestrictedErrorInfo");
+ if (SUCCEEDED(hr))
+ {
+
+ // This is a WinRT IRestrictedErrorInfo scenario
+ bUseThisErrorInfo = TRUE;
+ }
+ }
+
+ if (bUseThisErrorInfo)
+ {
+ pRetErrorInfo = pErrorInfo;
+ pErrorInfo.SuppressRelease();
+ pErrorInfo = NULL;
+ }
+ }
+
+ return pRetErrorInfo;
+}
+
+// ---------------------------------------------------------------------------
+// Interface ISupportsErrorInfo
+/// ---------------------------------------------------------------------------
+HRESULT __stdcall
+SupportsErroInfo_IntfSupportsErrorInfo(IUnknown* pUnk, REFIID riid)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(IsSimpleTearOff(pUnk));
+ }
+ CONTRACTL_END;
+
+ // All interfaces support ErrorInfo
+ return S_OK;
+}
+
+
+// ---------------------------------------------------------------------------
+// Interface IErrorInfo
+// %%Function: ErrorInfo_GetDescription
+// ---------------------------------------------------------------------------
+HRESULT __stdcall
+ErrorInfo_GetDescription(IUnknown* pUnk, BSTR* pbstrDescription)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(IsSimpleTearOff(pUnk));
+ PRECONDITION(CheckPointer(pbstrDescription, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ SimpleComCallWrapper *pWrap = NULL;
+
+ if (pbstrDescription == NULL)
+ IfFailGo(E_POINTER);
+
+ pWrap = SimpleComCallWrapper::GetWrapperFromIP(pUnk);
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ *pbstrDescription = pWrap->IErrorInfo_bstrDescription();
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ErrExit:
+ return hr;
+}
+
+// ---------------------------------------------------------------------------
+// Interface IErrorInfo
+// %%Function: ErrorInfo_GetGUID
+// ---------------------------------------------------------------------------
+HRESULT __stdcall ErrorInfo_GetGUID(IUnknown* pUnk, GUID* pguid)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(IsSimpleTearOff(pUnk));
+ PRECONDITION(CheckPointer(pguid, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ SimpleComCallWrapper *pWrap = NULL;
+
+ if (pguid == NULL)
+ return E_POINTER;
+
+ pWrap = SimpleComCallWrapper::GetWrapperFromIP(pUnk);
+
+ *pguid = pWrap->IErrorInfo_guid();
+
+ return hr;
+}
+
+// ---------------------------------------------------------------------------
+// Interface IErrorInfo
+// %%Function: ErrorInfo_GetHelpContext
+// ---------------------------------------------------------------------------
+HRESULT _stdcall ErrorInfo_GetHelpContext(IUnknown* pUnk, DWORD* pdwHelpCtxt)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(IsSimpleTearOff(pUnk));
+ PRECONDITION(CheckPointer(pdwHelpCtxt, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ SimpleComCallWrapper *pWrap = NULL;
+
+ if (pdwHelpCtxt == NULL)
+ return E_POINTER;
+
+ pWrap = SimpleComCallWrapper::GetWrapperFromIP(pUnk);
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ *pdwHelpCtxt = pWrap->IErrorInfo_dwHelpContext();
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+// ---------------------------------------------------------------------------
+// Interface IErrorInfo
+// %%Function: ErrorInfo_GetHelpFile
+// ---------------------------------------------------------------------------
+HRESULT __stdcall ErrorInfo_GetHelpFile(IUnknown* pUnk, BSTR* pbstrHelpFile)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(IsSimpleTearOff(pUnk));
+ PRECONDITION(CheckPointer(pbstrHelpFile, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ SimpleComCallWrapper *pWrap = NULL;
+
+ if (pbstrHelpFile == NULL)
+ return E_POINTER;
+
+ pWrap = SimpleComCallWrapper::GetWrapperFromIP(pUnk);
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ *pbstrHelpFile = pWrap->IErrorInfo_bstrHelpFile();
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+// ---------------------------------------------------------------------------
+// Interface IErrorInfo
+// %%Function: ErrorInfo_GetSource
+// ---------------------------------------------------------------------------
+HRESULT __stdcall ErrorInfo_GetSource(IUnknown* pUnk, BSTR* pbstrSource)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(IsSimpleTearOff(pUnk));
+ PRECONDITION(CheckPointer(pbstrSource, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ SimpleComCallWrapper *pWrap = NULL;
+
+ if (pbstrSource == NULL)
+ return E_POINTER;
+
+ pWrap = SimpleComCallWrapper::GetWrapperFromIP(pUnk);
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ *pbstrSource = pWrap->IErrorInfo_bstrSource();
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+
+//------------------------------------------------------------------------------------------
+// IDispatch methods that forward to the right implementation based on the flags set
+// on the IClassX COM method table.
+
+HRESULT __stdcall
+Dispatch_GetTypeInfoCount(IDispatch* pDisp, unsigned int *pctinfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(IsInProcCCWTearOff(pDisp));
+ PRECONDITION(CheckPointer(pctinfo, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ if (!pctinfo)
+ return E_POINTER;
+
+ *pctinfo = 0;
+
+ ComMethodTable *pCMT = ComMethodTable::ComMethodTableFromIP(pDisp);
+ if (pCMT->IsIClassXOrBasicItf() && pCMT->GetClassInterfaceType() != clsIfNone)
+ if (pCMT->HasInvisibleParent())
+ return E_NOTIMPL;
+
+ ITypeInfo *pTI;
+ HRESULT hr = GetITypeInfoForMT(pCMT, &pTI);
+
+ if (SUCCEEDED(hr))
+ {
+ hr = S_OK;
+ *pctinfo = 1;
+ }
+
+ return hr;
+}
+
+HRESULT __stdcall
+Dispatch_GetTypeInfo(IDispatch* pDisp, unsigned int itinfo, LCID lcid, ITypeInfo **pptinfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(IsInProcCCWTearOff(pDisp));
+ PRECONDITION(CheckPointer(pptinfo, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ if (!pptinfo)
+ return E_POINTER;
+
+ *pptinfo = NULL;
+
+ ComMethodTable *pCMT = ComMethodTable::ComMethodTableFromIP(pDisp);
+ if (pCMT->IsIClassXOrBasicItf() && pCMT->GetClassInterfaceType() != clsIfNone)
+ if (pCMT->HasInvisibleParent())
+ return E_NOTIMPL;
+
+ if (NULL != itinfo)
+ {
+ return DISP_E_BADINDEX;
+ }
+
+ HRESULT hr = GetITypeInfoForMT(pCMT, pptinfo);
+ if (SUCCEEDED(hr))
+ {
+ // GetITypeInfoForMT() can return other success codes besides S_OK so
+ // we need to convert them to S_OK.
+ hr = S_OK;
+ SafeAddRefPreemp(*pptinfo);
+ }
+
+ return hr;
+}
+
+HRESULT __stdcall
+Dispatch_GetIDsOfNames(IDispatch* pDisp, REFIID riid, __in_ecount(cNames) OLECHAR **rgszNames, unsigned int cNames, LCID lcid, DISPID *rgdispid)
+{
+ CONTRACTL
+{
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ INJECT_FAULT(return E_OUTOFMEMORY);
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(IsInProcCCWTearOff(pDisp));
+ PRECONDITION(CheckPointer(rgszNames, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // Get the CMT that matches the interface passed in.
+ ComMethodTable *pCMT = ComMethodTable::ComMethodTableFromIP(pDisp);
+ if (pCMT->IsIClassXOrBasicItf() && pCMT->GetClassInterfaceType() != clsIfNone)
+ if (pCMT->HasInvisibleParent())
+ return E_NOTIMPL;
+
+ // Use the right implementation based on the flags in the ComMethodTable and ComCallWrapperTemplate
+ if (!pCMT->IsDefinedInUntrustedCode())
+ {
+ ComCallWrapperTemplate *pTemplate = MapIUnknownToWrapper(pDisp)->GetComCallWrapperTemplate();
+ if (pTemplate->IsUseOleAutDispatchImpl())
+ {
+ return OleAutDispatchImpl_GetIDsOfNames(pDisp, riid, rgszNames, cNames, lcid, rgdispid);
+ }
+ }
+ return InternalDispatchImpl_GetIDsOfNames(pDisp, riid, rgszNames, cNames, lcid, rgdispid);
+}
+
+HRESULT __stdcall
+Dispatch_Invoke
+ (
+ IDispatch* pDisp,
+ DISPID dispidMember,
+ REFIID riid,
+ LCID lcid,
+ unsigned short wFlags,
+ DISPPARAMS *pdispparams,
+ VARIANT *pvarResult,
+ EXCEPINFO *pexcepinfo,
+ unsigned int *puArgErr
+ )
+{
+ CONTRACTL
+ {
+ THROWS; // InternalDispatchImpl_Invoke can throw if it encounters CE
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ INJECT_FAULT(return E_OUTOFMEMORY);
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(IsInProcCCWTearOff(pDisp));
+ }
+ CONTRACTL_END;
+
+ // Get the CMT that matches the interface passed in.
+ ComMethodTable *pCMT = ComMethodTable::ComMethodTableFromIP(pDisp);
+ if (pCMT->IsIClassXOrBasicItf() && pCMT->GetClassInterfaceType() != clsIfNone)
+ if (pCMT->HasInvisibleParent())
+ return E_NOTIMPL;
+
+ // Use the right implementation based on the flags in the ComMethodTable.
+ if (!pCMT->IsDefinedInUntrustedCode())
+ {
+ ComCallWrapperTemplate *pTemplate = MapIUnknownToWrapper(pDisp)->GetComCallWrapperTemplate();
+ if (pTemplate->IsUseOleAutDispatchImpl())
+ {
+ return OleAutDispatchImpl_Invoke(pDisp, dispidMember, riid, lcid, wFlags, pdispparams, pvarResult, pexcepinfo, puArgErr);
+ }
+ }
+
+ return InternalDispatchImpl_Invoke(pDisp, dispidMember, riid, lcid, wFlags, pdispparams, pvarResult, pexcepinfo, puArgErr);
+}
+
+
+//------------------------------------------------------------------------------------------
+// IDispatch methods for COM+ objects implemented internally using reflection.
+
+
+HRESULT __stdcall
+OleAutDispatchImpl_GetIDsOfNames
+(
+ IDispatch* pDisp,
+ REFIID riid,
+ __in_ecount(cNames) OLECHAR **rgszNames,
+ unsigned int cNames,
+ LCID lcid,
+ DISPID *rgdispid
+)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ INJECT_FAULT(return E_OUTOFMEMORY);
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(IsInProcCCWTearOff(pDisp));
+ PRECONDITION(CheckPointer(rgszNames));
+ }
+ CONTRACTL_END;
+
+ // Make sure that riid is IID_NULL.
+ if (riid != IID_NULL)
+ return DISP_E_UNKNOWNINTERFACE;
+
+ // Retrieve the COM method table from the IP.
+ ComMethodTable *pCMT = ComMethodTable::ComMethodTableFromIP(pDisp);
+ if (pCMT->IsIClassXOrBasicItf() && pCMT->GetClassInterfaceType() != clsIfNone)
+ if (pCMT->HasInvisibleParent())
+ return E_NOTIMPL;
+
+ ITypeInfo *pTI;
+ HRESULT hr = GetITypeInfoForMT(pCMT, &pTI);
+ if (FAILED(hr))
+ return (hr);
+
+ hr = pTI->GetIDsOfNames(rgszNames, cNames, rgdispid);
+ return hr;
+}
+
+HRESULT __stdcall
+OleAutDispatchImpl_Invoke
+ (
+ IDispatch* pDisp,
+ DISPID dispidMember,
+ REFIID riid,
+ LCID lcid,
+ unsigned short wFlags,
+ DISPPARAMS *pdispparams,
+ VARIANT *pvarResult,
+ EXCEPINFO *pexcepinfo,
+ unsigned int *puArgErr
+ )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(IsInProcCCWTearOff(pDisp));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Make sure that riid is IID_NULL.
+ if (riid != IID_NULL)
+ return DISP_E_UNKNOWNINTERFACE;
+
+ // Retrieve the COM method table from the IP.
+ ComMethodTable *pCMT = ComMethodTable::ComMethodTableFromIP(pDisp);
+ if (pCMT->IsIClassXOrBasicItf() && pCMT->GetClassInterfaceType() != clsIfNone)
+ if (pCMT->HasInvisibleParent())
+ return E_NOTIMPL;
+
+ ITypeInfo *pTI;
+ hr = GetITypeInfoForMT(pCMT, &pTI);
+ if (FAILED(hr))
+ return hr;
+
+ EX_TRY
+ {
+ // If we have a basic or IClassX interface then we're going to invoke through
+ // the class interface.
+ if (pCMT->IsIClassXOrBasicItf())
+ {
+ CCWHolder pCCW = ComCallWrapper::GetWrapperFromIP(pDisp);
+ pDisp = (IDispatch*)pCCW->GetIClassXIP();
+ }
+
+ LeaveRuntimeHolder holder(**(size_t**)pTI);
+
+ hr = pTI->Invoke(pDisp, dispidMember, wFlags, pdispparams, pvarResult, pexcepinfo, puArgErr);
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return hr;
+}
+
+HRESULT __stdcall
+InternalDispatchImpl_GetIDsOfNames (
+ IDispatch* pDisp,
+ REFIID riid,
+ __in_ecount(cNames) OLECHAR **rgszNames,
+ unsigned int cNames,
+ LCID lcid,
+ DISPID *rgdispid)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(IsInProcCCWTearOff(pDisp));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ DispatchInfo *pDispInfo;
+ SimpleComCallWrapper *pSimpleWrap;
+
+ // Validate the arguments.
+ if (!rgdispid)
+ return E_POINTER;
+
+ if (riid != IID_NULL)
+ return DISP_E_UNKNOWNINTERFACE;
+
+ if (cNames < 1)
+ return S_OK;
+ else if (!rgszNames)
+ return E_POINTER;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ // This call is coming thru an interface that inherits from IDispatch.
+ ComCallWrapper* pCCW = ComCallWrapper::GetStartWrapperFromIP(pDisp);
+
+ ComMethodTable* pCMT = ComMethodTable::ComMethodTableFromIP(pDisp);
+ if (pCMT->IsIClassXOrBasicItf() && pCMT->GetClassInterfaceType() != clsIfNone)
+ pCMT->CheckParentComVisibility(FALSE);
+
+ pSimpleWrap = pCCW->GetSimpleWrapper();
+ pDispInfo = ComMethodTable::ComMethodTableFromIP(pDisp)->GetDispatchInfo();
+
+ // Attempt to find the member in the DispatchEx information.
+ SString sName(rgszNames[0]);
+ DispatchMemberInfo *pDispMemberInfo = pDispInfo->FindMember(sName, FALSE);
+
+ // Check to see if the member has been found.
+ if (pDispMemberInfo)
+ {
+ // Get the DISPID of the member.
+ rgdispid[0] = pDispMemberInfo->m_DispID;
+
+ // Get the ID's of the named arguments.
+ if (cNames > 1)
+ hr = pDispMemberInfo->GetIDsOfParameters(rgszNames + 1, cNames - 1, rgdispid + 1, FALSE);
+ }
+ else
+ {
+ rgdispid[0] = DISPID_UNKNOWN;
+ hr = DISP_E_UNKNOWNNAME;
+ }
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+
+HRESULT __stdcall
+InternalDispatchImpl_Invoke
+ (
+ IDispatch* pDisp,
+ DISPID dispidMember,
+ REFIID riid,
+ LCID lcid,
+ unsigned short wFlags,
+ DISPPARAMS *pdispparams,
+ VARIANT *pvarResult,
+ EXCEPINFO *pexcepinfo,
+ unsigned int *puArgErr
+ )
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(IsInProcCCWTearOff(pDisp));
+ }
+ CONTRACTL_END;
+
+ DispatchInfo *pDispInfo;
+ SimpleComCallWrapper *pSimpleWrap;
+ HRESULT hr = S_OK;
+
+ // Check for valid input args that are not covered by DispatchInfo::InvokeMember.
+ if (riid != IID_NULL)
+ return DISP_E_UNKNOWNINTERFACE;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ // This call is coming thru an interface that inherits form IDispatch.
+ ComCallWrapper* pCCW = ComCallWrapper::GetStartWrapperFromIP(pDisp);
+
+ ComMethodTable* pCMT = ComMethodTable::ComMethodTableFromIP(pDisp);
+ if (pCMT->IsIClassXOrBasicItf() && pCMT->GetClassInterfaceType() != clsIfNone)
+ pCMT->CheckParentComVisibility(FALSE);
+
+ pSimpleWrap = pCCW->GetSimpleWrapper();
+
+ // Invoke the member.
+ pDispInfo = ComMethodTable::ComMethodTableFromIP(pDisp)->GetDispatchInfo();
+ hr = pDispInfo->InvokeMember(pSimpleWrap, dispidMember, lcid, wFlags, pdispparams, pvarResult, pexcepinfo, NULL, puArgErr);
+
+ }
+ END_EXTERNAL_ENTRYPOINT_RETHROW_CORRUPTING_EXCEPTIONS; // This will ensure that entry points wont swallow CE and continue to let them propagate out.
+
+ return hr;
+}
+
+
+//------------------------------------------------------------------------------------------
+// IDispatchEx methods for COM+ objects
+
+// IDispatchEx::GetTypeInfoCount
+HRESULT __stdcall DispatchEx_GetTypeInfoCount(IDispatch* pDisp, unsigned int *pctinfo)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(IsSimpleTearOff(pDisp));
+ PRECONDITION(CheckPointer(pctinfo, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ ITypeInfo *pTI = NULL;
+
+ // Validate the arguments.
+ if (!pctinfo)
+ return E_POINTER;
+
+ // Initialize the count of type info's to 0.
+ *pctinfo = 0;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pDisp);
+
+ // Retrieve the class ComMethodTable.
+ ComMethodTable *pComMT = ComCallWrapperTemplate::SetupComMethodTableForClass(pSimpleWrap->GetMethodTable(), FALSE);
+
+ // Retrieve the ITypeInfo for the ComMethodTable.
+ IfFailThrow(GetITypeInfoForMT(pComMT, &pTI));
+
+ // GetITypeInfoForMT() can return other success codes besides S_OK so
+ // we need to convert them to S_OK.
+ hr = S_OK;
+ *pctinfo = 1;
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+// IDispatchEx::GetTypeInfo
+HRESULT __stdcall DispatchEx_GetTypeInfo (
+ IDispatch* pDisp,
+ unsigned int itinfo,
+ LCID lcid,
+ ITypeInfo **pptinfo
+ )
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(IsSimpleTearOff(pDisp));
+ PRECONDITION(CheckPointer(pptinfo, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Validate the arguments.
+ if (!pptinfo)
+ return E_POINTER;
+
+ // Initialize the ITypeInfo pointer to NULL.
+ *pptinfo = NULL;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pDisp);
+
+ // Retrieve the class ComMethodTable.
+ ComMethodTable *pComMT = ComCallWrapperTemplate::SetupComMethodTableForClass(pSimpleWrap->GetMethodTable(), FALSE);
+
+ // Retrieve the ITypeInfo for the ComMethodTable.
+ IfFailThrow(GetITypeInfoForMT(pComMT, pptinfo));
+
+ // GetITypeInfoForMT() can return other success codes besides S_OK so
+ // we need to convert them to S_OK.
+ hr = S_OK;
+ SafeAddRefPreemp(*pptinfo);
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+// IDispatchEx::GetIDsofNames
+HRESULT __stdcall DispatchEx_GetIDsOfNames (
+ IDispatchEx* pDisp,
+ REFIID riid,
+ __in_ecount(cNames) OLECHAR **rgszNames,
+ unsigned int cNames,
+ LCID lcid,
+ DISPID *rgdispid
+ )
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(IsSimpleTearOff(pDisp));
+ PRECONDITION(CheckPointer(rgdispid, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Validate the arguments.
+ if (!rgdispid)
+ return E_POINTER;
+
+ if (riid != IID_NULL)
+ return DISP_E_UNKNOWNINTERFACE;
+
+ if (cNames < 1)
+ return S_OK;
+ else if (!rgszNames)
+ return E_POINTER;
+
+ // Retrieve the dispatch info and the simpler wrapper for this IDispatchEx.
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pDisp);
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ // Attempt to find the member in the DispatchEx information.
+ DispatchExInfo *pDispExInfo = pSimpleWrap->GetDispatchExInfo();
+
+ SString sName(rgszNames[0]);
+ DispatchMemberInfo *pDispMemberInfo = pDispExInfo->SynchFindMember(sName, FALSE);
+
+ // Check to see if the member has been found.
+ if (pDispMemberInfo)
+ {
+ // Get the DISPID of the member.
+ rgdispid[0] = pDispMemberInfo->m_DispID;
+
+ // Get the ID's of the named arguments.
+ if (cNames > 1)
+ hr = pDispMemberInfo->GetIDsOfParameters(rgszNames + 1, cNames - 1, rgdispid + 1, FALSE);
+ }
+ else
+ {
+ rgdispid[0] = DISPID_UNKNOWN;
+ hr = DISP_E_UNKNOWNNAME;
+ }
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+// IDispatchEx::Invoke
+HRESULT __stdcall DispatchEx_Invoke (
+ IDispatchEx* pDisp,
+ DISPID dispidMember,
+ REFIID riid,
+ LCID lcid,
+ unsigned short wFlags,
+ DISPPARAMS *pdispparams,
+ VARIANT *pvarResult,
+ EXCEPINFO *pexcepinfo,
+ unsigned int *puArgErr
+ )
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(IsSimpleTearOff(pDisp));
+ PRECONDITION(CheckPointer(pdispparams, NULL_OK));
+ PRECONDITION(CheckPointer(pvarResult, NULL_OK));
+ PRECONDITION(CheckPointer(pexcepinfo, NULL_OK));
+ PRECONDITION(CheckPointer(puArgErr, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Check for valid input args that are not covered by DispatchInfo::InvokeMember.
+ if (riid != IID_NULL)
+ return DISP_E_UNKNOWNINTERFACE;
+
+ // Retrieve the dispatch info and the simpler wrapper for this IDispatchEx.
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pDisp);
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ // Invoke the member.
+ DispatchExInfo *pDispExInfo = pSimpleWrap->GetDispatchExInfo();
+ hr = pDispExInfo->SynchInvokeMember(pSimpleWrap, dispidMember, lcid, wFlags, pdispparams, pvarResult, pexcepinfo, NULL, puArgErr);
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+// IDispatchEx::DeleteMemberByDispID
+HRESULT __stdcall DispatchEx_DeleteMemberByDispID (
+ IDispatchEx* pDisp,
+ DISPID id
+ )
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(IsSimpleTearOff(pDisp));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Retrieve the dispatch info and the simpler wrapper for this IDispatchEx.
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pDisp);
+
+ DispatchExInfo *pDispExInfo = pSimpleWrap->GetDispatchExInfo();
+
+ // If the member does not support expando operations then we cannot remove the member.
+ if (!pDispExInfo->SupportsExpando())
+ return E_NOTIMPL;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ // Delete the member from the IExpando. This method takes care of synchronizing with
+ // the managed view to make sure the member gets deleted.
+ pDispExInfo->DeleteMember(id);
+ hr = S_OK;
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+// IDispatchEx::DeleteMemberByName
+HRESULT __stdcall DispatchEx_DeleteMemberByName (
+ IDispatchEx* pDisp,
+ BSTR bstrName,
+ DWORD grfdex
+ )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(IsSimpleTearOff(pDisp));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ DISPID DispID;
+
+ if (!bstrName)
+ return E_POINTER;
+
+ // The only two supported flags are fdexNameCaseSensitive and fdexNameCaseInsensitive.
+ if (grfdex & ~(fdexNameCaseSensitive | fdexNameCaseInsensitive))
+ return E_INVALIDARG;
+
+ // Ensure both fdexNameCaseSensitive and fdexNameCaseInsensitive aren't both set.
+ if ((grfdex & (fdexNameCaseSensitive | fdexNameCaseInsensitive)) == (fdexNameCaseSensitive | fdexNameCaseInsensitive))
+ return E_INVALIDARG;
+
+ // Retrieve the dispatch info and the simpler wrapper for this IDispatchEx.
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pDisp);
+ DispatchExInfo *pDispExInfo = pSimpleWrap->GetDispatchExInfo();
+
+ // If the member does not support expando operations then we cannot remove the member.
+ if (!pDispExInfo->SupportsExpando())
+ return E_NOTIMPL;
+
+ // Simply find the associated DISPID and delegate the call to DeleteMemberByDispID.
+ hr = DispatchEx_GetDispID(pDisp, bstrName, grfdex, &DispID);
+ if (SUCCEEDED(hr))
+ hr = DispatchEx_DeleteMemberByDispID(pDisp, DispID);
+
+ return hr;
+}
+
+// IDispatchEx::GetDispID
+HRESULT __stdcall DispatchEx_GetDispID (
+ IDispatchEx* pDisp,
+ BSTR bstrName,
+ DWORD grfdex,
+ DISPID *pid
+ )
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(CheckPointer(bstrName, NULL_OK));
+ PRECONDITION(IsSimpleTearOff(pDisp));
+ PRECONDITION(CheckPointer(pid, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ SimpleComCallWrapper *pSimpleWrap;
+ DispatchExInfo *pDispExInfo;
+
+ // Validate the arguments.
+ if (!pid || !bstrName)
+ return E_POINTER;
+
+ // We don't support fdexNameImplicit, but let the search continue anyway
+
+ // Ensure both fdexNameCaseSensitive and fdexNameCaseInsensitive aren't both set.
+ if ((grfdex & (fdexNameCaseSensitive | fdexNameCaseInsensitive)) == (fdexNameCaseSensitive | fdexNameCaseInsensitive))
+ return E_INVALIDARG;
+
+ // Initialize the pid to DISPID_UNKNOWN before we start.
+ *pid = DISPID_UNKNOWN;
+
+ // Retrieve the dispatch info and the simpler wrapper for this IDispatchEx.
+ pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pDisp);
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ // Attempt to find the member in the DispatchEx information.
+ pDispExInfo = pSimpleWrap->GetDispatchExInfo();
+
+ SString sName(bstrName);
+ DispatchMemberInfo *pDispMemberInfo = pDispExInfo->SynchFindMember(sName, grfdex & fdexNameCaseSensitive);
+
+ // If we still have not found a match and the fdexNameEnsure flag is set then we
+ // need to add the member to the expando object.
+ if (!pDispMemberInfo)
+ {
+ if (grfdex & fdexNameEnsure)
+ {
+ if (pDispExInfo->SupportsExpando())
+ {
+ pDispMemberInfo = pDispExInfo->AddMember(sName, grfdex);
+ if (!pDispMemberInfo)
+ hr = E_UNEXPECTED;
+ }
+ else
+ {
+ hr = E_NOTIMPL;
+ }
+ }
+ else
+ {
+ hr = DISP_E_UNKNOWNNAME;
+ }
+ }
+
+ // Set the return DISPID if the member has been found.
+ if (pDispMemberInfo)
+ *pid = pDispMemberInfo->m_DispID;
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+// IDispatchEx::GetMemberName
+HRESULT __stdcall DispatchEx_GetMemberName (
+ IDispatchEx* pDisp,
+ DISPID id,
+ BSTR *pbstrName
+ )
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(IsSimpleTearOff(pDisp));
+ PRECONDITION(CheckPointer(pbstrName, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Validate the arguments.
+ if (!pbstrName)
+ return E_POINTER;
+
+ // Initialize the pbstrName to NULL before we start.
+ *pbstrName = NULL;
+
+ // Retrieve the dispatch info and the simpler wrapper for this IDispatchEx.
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pDisp);
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ // Do a lookup in the hashtable to find the DispatchMemberInfo for the DISPID.
+ DispatchExInfo *pDispExInfo = pSimpleWrap->GetDispatchExInfo();
+ DispatchMemberInfo *pDispMemberInfo = pDispExInfo->SynchFindMember(id);
+
+ // If the member does not exist then we return DISP_E_MEMBERNOTFOUND.
+ if (!pDispMemberInfo || !ObjectFromHandle(pDispMemberInfo->m_hndMemberInfo))
+ {
+ hr = DISP_E_MEMBERNOTFOUND;
+ }
+ else
+ {
+ // Copy the name into the output string.
+ *pbstrName = SysAllocString(pDispMemberInfo->m_strName);
+ }
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+// IDispatchEx::GetMemberProperties
+HRESULT __stdcall DispatchEx_GetMemberProperties (
+ IDispatchEx* pDisp,
+ DISPID id,
+ DWORD grfdexFetch,
+ DWORD *pgrfdex
+ )
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(IsSimpleTearOff(pDisp));
+ PRECONDITION(CheckPointer(pgrfdex, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Validate the arguments.
+ if (!pgrfdex)
+ return E_POINTER;
+
+ // Initialize the return properties to 0.
+ *pgrfdex = 0;
+
+ EnumMemberTypes MemberType;
+
+ // Retrieve the dispatch info and the simpler wrapper for this IDispatchEx.
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pDisp);
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ DispatchExInfo *pDispExInfo = pSimpleWrap->GetDispatchExInfo();
+ OBJECTREF MemberInfoObj = NULL;
+ GCPROTECT_BEGIN(MemberInfoObj)
+ {
+ // Do a lookup in the hashtable to find the DispatchMemberInfo for the DISPID.
+ DispatchMemberInfo *pDispMemberInfo = pDispExInfo->SynchFindMember(id);
+
+ // If the member does not exist then we return DISP_E_MEMBERNOTFOUND.
+ if (!pDispMemberInfo || (MemberInfoObj = ObjectFromHandle(pDispMemberInfo->m_hndMemberInfo)) == NULL)
+ {
+ hr = DISP_E_MEMBERNOTFOUND;
+ }
+ else
+ {
+ // Retrieve the type of the member.
+ MemberType = pDispMemberInfo->GetMemberType();
+
+ // Retrieve the member properties based on the type of the member.
+ switch (MemberType)
+ {
+ case Field:
+ {
+ *pgrfdex = fdexPropCanGet |
+ fdexPropCanPut |
+ fdexPropCannotPutRef |
+ fdexPropCannotCall |
+ fdexPropCannotConstruct |
+ fdexPropCannotSourceEvents;
+ break;
+ }
+
+ case Property:
+ {
+ BOOL bCanRead = FALSE;
+ BOOL bCanWrite = FALSE;
+
+ // Find the MethodDesc's for the CanRead property.
+ MethodDesc *pCanReadMD = MemberLoader::FindPropertyMethod(MemberInfoObj->GetMethodTable(), PROPERTY_INFO_CAN_READ_PROP, PropertyGet);
+ PREFIX_ASSUME_MSG((pCanReadMD != NULL), "Unable to find getter method for property PropertyInfo::CanRead");
+ MethodDescCallSite canRead(pCanReadMD, &MemberInfoObj);
+
+ // Find the MethodDesc's for the CanWrite property.
+ MethodDesc *pCanWriteMD = MemberLoader::FindPropertyMethod(MemberInfoObj->GetMethodTable(), PROPERTY_INFO_CAN_WRITE_PROP, PropertyGet);
+ PREFIX_ASSUME_MSG((pCanWriteMD != NULL), "Unable to find setter method for property PropertyInfo::CanWrite");
+ MethodDescCallSite canWrite(pCanWriteMD, &MemberInfoObj);
+
+ // Check to see if the property can be read.
+ ARG_SLOT CanReadArgs[] =
+ {
+ ObjToArgSlot(MemberInfoObj)
+ };
+
+ bCanRead = canRead.Call_RetBool(CanReadArgs);
+
+ // Check to see if the property can be written to.
+ ARG_SLOT CanWriteArgs[] =
+ {
+ ObjToArgSlot(MemberInfoObj)
+ };
+
+ bCanWrite = canWrite.Call_RetBool(CanWriteArgs);
+
+ *pgrfdex = (bCanRead ? fdexPropCanGet : fdexPropCannotGet) |
+ (bCanWrite ? fdexPropCanPut : fdexPropCannotPut) |
+ fdexPropCannotPutRef |
+ fdexPropCannotCall |
+ fdexPropCannotConstruct |
+ fdexPropCannotSourceEvents;
+ break;
+ }
+
+ case Method:
+ {
+ *pgrfdex = fdexPropCannotGet |
+ fdexPropCannotPut |
+ fdexPropCannotPutRef |
+ fdexPropCanCall |
+ fdexPropCannotConstruct |
+ fdexPropCannotSourceEvents;
+ break;
+ }
+
+ default:
+ {
+ hr = E_UNEXPECTED;
+ break;
+ }
+ }
+
+ // Mask out the unwanted properties.
+ *pgrfdex &= grfdexFetch;
+ }
+ }
+ GCPROTECT_END();
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+// IDispatchEx::GetNameSpaceParent
+HRESULT __stdcall DispatchEx_GetNameSpaceParent (
+ IDispatchEx* pDisp,
+ IUnknown **ppunk
+ )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(IsSimpleTearOff(pDisp));
+ PRECONDITION(CheckPointer(ppunk, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // Validate the arguments.
+ if (!ppunk)
+ return E_POINTER;
+
+ // @TODO (DM): Implement this.
+ *ppunk = NULL;
+ return E_NOTIMPL;
+}
+
+
+// IDispatchEx::GetNextDispID
+HRESULT __stdcall DispatchEx_GetNextDispID (
+ IDispatchEx* pDisp,
+ DWORD grfdex,
+ DISPID id,
+ DISPID *pid
+ )
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(IsSimpleTearOff(pDisp));
+ PRECONDITION(CheckPointer(pid, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ DispatchMemberInfo *pNextMember = NULL;
+ HRESULT hr = S_OK;
+
+ // Validate the arguments.
+ if (!pid)
+ return E_POINTER;
+
+ // The only two supported flags are fdexEnumDefault and fdexEnumAll.
+ if (grfdex & ~(fdexEnumDefault | fdexEnumAll))
+ return E_INVALIDARG;
+
+ // Initialize the pid to DISPID_UNKNOWN.
+ *pid = DISPID_UNKNOWN;
+
+ // Retrieve the dispatch info and the simpler wrapper for this IDispatchEx.
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pDisp);
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ DispatchExInfo *pDispExInfo = pSimpleWrap->GetDispatchExInfo();
+ // Retrieve either the first member or the next based on the DISPID.
+ if (id == DISPID_STARTENUM)
+ pNextMember = pDispExInfo->GetFirstMember();
+ else
+ pNextMember = pDispExInfo->GetNextMember(id);
+
+ // If we have found a member that has not been deleted then return its DISPID.
+ if (pNextMember)
+ {
+ *pid = pNextMember->m_DispID;
+ hr = S_OK;
+ }
+ else
+ {
+ hr = S_FALSE;
+ }
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+
+// IDispatchEx::InvokeEx
+HRESULT __stdcall DispatchEx_InvokeEx (
+ IDispatchEx* pDisp,
+ DISPID id,
+ LCID lcid,
+ WORD wFlags,
+ DISPPARAMS *pdp,
+ VARIANT *pVarRes,
+ EXCEPINFO *pei,
+ IServiceProvider *pspCaller
+ )
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(IsSimpleTearOff(pDisp));
+ PRECONDITION(CheckPointer(pdp, NULL_OK));
+ PRECONDITION(CheckPointer(pVarRes, NULL_OK));
+ PRECONDITION(CheckPointer(pei, NULL_OK));
+ PRECONDITION(CheckPointer(pspCaller, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Retrieve the dispatch info and the simpler wrapper for this IDispatchEx.
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pDisp);
+ DispatchExInfo *pDispExInfo = pSimpleWrap->GetDispatchExInfo();
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ // Invoke the member.
+ hr = pDispExInfo->SynchInvokeMember(pSimpleWrap, id, lcid, wFlags, pdp, pVarRes, pei, pspCaller, NULL);
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+HRESULT __stdcall Inspectable_GetIIDs (
+ IInspectable *pInsp,
+ ULONG *iidCount,
+ IID **iids)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pInsp));
+ PRECONDITION(IsInProcCCWTearOff(pInsp));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ if (iidCount == NULL || iids == NULL)
+ return E_POINTER;
+
+ *iidCount = 0;
+ *iids = NULL;
+
+ // Either it is ICustomPropertyProvider or IWeakReferenceSource
+ ComCallWrapper *pWrap = MapIUnknownToWrapper(pInsp);
+
+ ComCallWrapperTemplate *pTemplate = pWrap->GetComCallWrapperTemplate();
+
+ ULONG numInterfaces = pTemplate->GetNumInterfaces();
+
+ // determine the number of IIDs to return
+ // Always add IID_ICustomPropertyProvider and skip any managed WinRT interface that is IID_ICustomPropertyProvider
+ ULONG numInspectables = 1;
+ for (ULONG i = 0; i < numInterfaces; i++)
+ {
+ ComMethodTable *pComMT = pTemplate->GetComMTForIndex(i);
+
+ // Skip any managed WinRT interface that is IID_ICustomPropertyProvider
+ if (pComMT != NULL && pComMT->GetInterfaceType() == ifInspectable &&
+ !IsEqualGUID(pComMT->GetIID(), IID_ICustomPropertyProvider))
+ {
+ numInspectables++;
+ }
+ }
+
+
+ // we shouldn't ever come here if the CCW does not support IInspectable
+ _ASSERTE(numInspectables > 0);
+
+ // as per the spec, make sure *iidCount is set even if the allocation fails
+ *iidCount = numInspectables;
+
+ S_UINT32 cbAlloc = S_UINT32(numInspectables) * S_UINT32(sizeof(IID));
+ if (cbAlloc.IsOverflow())
+ return E_OUTOFMEMORY;
+
+ NewArrayHolder<IID> result = (IID *)CoTaskMemAlloc(cbAlloc.Value());
+ if (result == NULL)
+ return E_OUTOFMEMORY;
+
+ // now fill out the output array with IIDs
+ result[0] = IID_ICustomPropertyProvider;
+
+ ULONG index = 1;
+ for (ULONG i = 0; i < numInterfaces; i++)
+ {
+ ComMethodTable *pComMT = pTemplate->GetComMTForIndex(i);
+
+ // Skip any managed WinRT interface that is IID_ICustomPropertyProvider
+ if (pComMT != NULL && pComMT->GetInterfaceType() == ifInspectable &&
+ !IsEqualGUID(pComMT->GetIID(), IID_ICustomPropertyProvider))
+ {
+ result[index] = pComMT->GetIID();
+ index++;
+ }
+ }
+ _ASSERTE(index == numInspectables);
+
+ *iids = result.Extract();
+ return hr;
+}
+
+
+HRESULT __stdcall Inspectable_GetRuntimeClassName(IInspectable *pInsp, HSTRING *className)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pInsp));
+ PRECONDITION(IsInProcCCWTearOff(pInsp));
+ }
+ CONTRACTL_END;
+
+ if (className == NULL)
+ return E_POINTER;
+
+ HRESULT hr = S_OK;
+ ComCallWrapper *pWrap = MapIUnknownToWrapper(pInsp);
+
+ *className = NULL;
+
+ MethodTable *pMT = pWrap->GetSimpleWrapper()->GetMethodTable();
+ _ASSERTE(pMT != NULL);
+
+ EX_TRY
+ {
+ StackSString strClassName;
+ TypeHandle thManagedType;
+
+ {
+ GCX_COOP();
+ OBJECTREF objref = NULL;
+
+ GCPROTECT_BEGIN(objref);
+ {
+ objref = pWrap->GetObjectRef();
+ thManagedType = objref->GetTypeHandle();
+ }
+ GCPROTECT_END();
+ }
+
+ bool bIsPrimitive = false;
+ if (WinRTTypeNameConverter::AppendWinRTTypeNameForManagedType(
+ thManagedType, // thManagedType
+ strClassName, // strWinRTTypeName
+ TRUE, // bForGetRuntimeClassName,
+ &bIsPrimitive // pbIsPrimitiveType
+ )
+ && !bIsPrimitive)
+ {
+ hr = WindowsCreateString(strClassName.GetUnicode(), strClassName.GetCount(), className);
+ }
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return hr;
+}
+
+HRESULT __stdcall WeakReferenceSource_GetWeakReference (
+ IWeakReferenceSource *pRefSrc,
+ IWeakReference **weakReference)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pRefSrc));
+ PRECONDITION(IsInProcCCWTearOff(pRefSrc));
+ }
+ CONTRACTL_END;
+
+ if (weakReference == NULL)
+ return E_INVALIDARG;
+
+ *weakReference = NULL;
+
+ HRESULT hr = S_OK;
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ SimpleComCallWrapper *pWrap = SimpleComCallWrapper::GetWrapperFromIP(pRefSrc);
+
+ // Creates a new WeakReferenceImpl that tracks the object lifetime in the current domain
+ // Note that this WeakReferenceImpl is not bound to a specific CCW
+ *weakReference = pWrap->CreateWeakReference(GET_THREAD());
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+#ifdef FEATURE_REMOTING
+// HELPER to call RealProxy::GetIUnknown to get the iunknown to give out
+// for this transparent proxy for calls to IMarshal
+IUnknown* GetIUnknownForTransparentProxyHelper(SimpleComCallWrapper *pSimpleWrap)
+{
+ CONTRACT (IUnknown*)
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pSimpleWrap));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ IUnknown* pMarshalerObj = NULL;
+
+ GCX_COOP();
+
+ EX_TRY
+ {
+ OBJECTREF oref = pSimpleWrap->GetObjectRef();
+ GCPROTECT_BEGIN(oref)
+ {
+ pMarshalerObj = GetIUnknownForTransparentProxy(&oref, TRUE);
+ oref = NULL;
+ }
+ GCPROTECT_END();
+ }
+ EX_CATCH
+ {
+ // ignore
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ RETURN pMarshalerObj;
+}
+#endif // FEATURE_REMOTING
+
+// Helper to setup IMarshal
+HRESULT GetSpecialMarshaler(IMarshal* pMarsh, SimpleComCallWrapper* pSimpleWrap, ULONG dwDestContext, IMarshal **ppMarshalRet)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pMarsh, NULL_OK));
+ PRECONDITION(CheckPointer(pSimpleWrap));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+#ifdef FEATURE_REMOTING
+ // transparent proxies are special
+ if (pSimpleWrap->IsObjectTP())
+ {
+ SafeComHolderPreemp<IUnknown> pMarshalerObj = NULL;
+
+ pMarshalerObj = GetIUnknownForTransparentProxyHelper(pSimpleWrap);
+ // QI for the IMarshal Interface and verify that we don't get back
+ // a pointer to us (GetIUnknownForTransparentProxyHelper could return
+ // a pointer back to the same object if realproxy::GetCOMIUnknown
+ // is not overriden
+ if (pMarshalerObj != NULL)
+ {
+ SafeComHolderPreemp<IMarshal> pMsh = NULL;
+ hr = SafeQueryInterfacePreemp(pMarshalerObj, IID_IMarshal, (IUnknown**)&pMsh);
+
+ // make sure we don't recurse
+ if(SUCCEEDED(hr) && pMsh != pMarsh)
+ {
+ *ppMarshalRet = pMsh.Extract();
+ return S_OK;
+ }
+ }
+ }
+#endif // FEATURE_REMOTING
+
+ // In case of APPX process we always use the standard marshaller.
+ // In Non-APPX process use standard marshalling for everything except in-proc servers.
+ // In case of CoreCLR, we always use the standard marshaller as well.
+#if !defined(FEATURE_CORECLR)
+ if (!AppX::IsAppXProcess() && (dwDestContext == MSHCTX_INPROC))
+ {
+ *ppMarshalRet = NULL;
+ return S_OK;
+ }
+#endif // !FEATURE_CORECLR
+
+ SafeComHolderPreemp<IUnknown> pMarshalerObj = NULL;
+ IfFailRet(CoCreateFreeThreadedMarshaler(NULL, &pMarshalerObj));
+ return SafeQueryInterfacePreemp(pMarshalerObj, IID_IMarshal, (IUnknown**)ppMarshalRet);
+}
+
+
+//------------------------------------------------------------------------------------------
+// IMarshal methods for COM+ objects
+
+//------------------------------------------------------------------------------------------
+
+HRESULT __stdcall Marshal_GetUnmarshalClass (
+ IMarshal* pMarsh,
+ REFIID riid, void * pv, ULONG dwDestContext,
+ void * pvDestContext, ULONG mshlflags,
+ LPCLSID pclsid)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pMarsh));
+ PRECONDITION(IsSimpleTearOff(pMarsh));
+ PRECONDITION(CheckPointer(pv, NULL_OK));
+ PRECONDITION(CheckPointer(pvDestContext, NULL_OK));
+ PRECONDITION(CheckPointer(pclsid, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pMarsh);
+
+ // Prevent access to reflection over DCOM
+ if(dwDestContext != MSHCTX_INPROC)
+ {
+ if(!pSimpleWrap->GetComCallWrapperTemplate()->IsSafeTypeForMarshalling())
+ {
+ LogInterop(W("Unmarshal class blocked for reflection types."));
+ hr = E_NOINTERFACE;
+ return hr;
+ }
+ }
+
+ SafeComHolderPreemp<IMarshal> pMsh = NULL;
+ hr = GetSpecialMarshaler(pMarsh, pSimpleWrap, dwDestContext, (IMarshal **)&pMsh);
+ if (FAILED(hr))
+ return hr;
+
+ if (pMsh != NULL)
+ {
+ hr = pMsh->GetUnmarshalClass (riid, pv, dwDestContext, pvDestContext, mshlflags, pclsid);
+ return hr;
+ }
+
+ // Use a statically allocated singleton class to do all unmarshalling.
+ *pclsid = CLSID_ComCallUnmarshalV4;
+
+ return S_OK;
+}
+
+HRESULT __stdcall Marshal_GetMarshalSizeMax (
+ IMarshal* pMarsh,
+ REFIID riid, void * pv, ULONG dwDestContext,
+ void * pvDestContext, ULONG mshlflags,
+ ULONG * pSize)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pMarsh));
+ PRECONDITION(IsSimpleTearOff(pMarsh));
+ PRECONDITION(CheckPointer(pv, NULL_OK));
+ PRECONDITION(CheckPointer(pvDestContext, NULL_OK));
+ PRECONDITION(CheckPointer(pSize, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pMarsh);
+
+ SafeComHolderPreemp<IMarshal> pMsh = NULL;
+ HRESULT hr = GetSpecialMarshaler(pMarsh, pSimpleWrap, dwDestContext, (IMarshal **)&pMsh);
+ if (FAILED(hr))
+ return hr;
+
+ if (pMsh != NULL)
+ {
+ HRESULT hr = pMsh->GetMarshalSizeMax (riid, pv, dwDestContext, pvDestContext, mshlflags, pSize);
+ return hr;
+ }
+
+ *pSize = sizeof (IUnknown *) + sizeof (ULONG) + sizeof(GUID);
+
+ return S_OK;
+}
+
+HRESULT __stdcall Marshal_MarshalInterface (
+ IMarshal* pMarsh,
+ LPSTREAM pStm, REFIID riid, void * pv,
+ ULONG dwDestContext, LPVOID pvDestContext,
+ ULONG mshlflags)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pMarsh));
+ PRECONDITION(IsSimpleTearOff(pMarsh));
+ PRECONDITION(CheckPointer(pv));
+ PRECONDITION(CheckPointer(pvDestContext, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ ULONG cbRef;
+ HRESULT hr = S_OK;
+
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pMarsh);
+
+ // Prevent access to reflection over DCOM
+ if(dwDestContext != MSHCTX_INPROC)
+ {
+ if(!pSimpleWrap->GetComCallWrapperTemplate()->IsSafeTypeForMarshalling())
+ {
+ LogInterop(W("Marshal interface blocked for reflection types."));
+ hr = E_NOINTERFACE;
+ return hr;
+ }
+ }
+
+ SafeComHolderPreemp<IMarshal> pMsh = NULL;
+ hr = GetSpecialMarshaler(pMarsh, pSimpleWrap, dwDestContext, (IMarshal **)&pMsh);
+ if (FAILED(hr))
+ return hr;
+
+ if (pMsh != NULL)
+ {
+ hr = pMsh->MarshalInterface (pStm, riid, pv, dwDestContext, pvDestContext, mshlflags);
+ return hr;
+ }
+
+ // Write the raw IP into the marshalling stream.
+ hr = pStm->Write (&pv, sizeof (pv), 0);
+ if (FAILED (hr))
+ return hr;
+
+ // Followed by the marshalling flags (we need these on the remote end to
+ // manage refcounting the IP).
+ hr = pStm->Write (&mshlflags, sizeof (mshlflags), 0);
+ if (FAILED (hr))
+ return hr;
+
+ // Followed by the secret, which confirms that the pointer above can be trusted
+ // because it originated from our runtime.
+ hr = InitUnmarshalSecret();
+ if (FAILED(hr))
+ return hr;
+
+ hr = pStm->Write(g_UnmarshalSecret, sizeof(g_UnmarshalSecret), 0);
+ if (FAILED(hr))
+ return hr;
+
+ // We have now created an additional reference to the object.
+ cbRef = SafeAddRefPreemp((IUnknown *)pv);
+ LogInteropAddRef((IUnknown *)pv, cbRef, "MarshalInterface");
+
+ return S_OK;
+}
+
+HRESULT __stdcall Marshal_UnmarshalInterface (
+ IMarshal* pMarsh,
+ LPSTREAM pStm, REFIID riid,
+ void ** ppvObj)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pMarsh));
+ PRECONDITION(IsSimpleTearOff(pMarsh));
+ PRECONDITION(CheckPointer(pStm, NULL_OK));
+ PRECONDITION(CheckPointer(ppvObj, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // Unmarshal side only.
+ return E_NOTIMPL;
+}
+
+HRESULT __stdcall Marshal_ReleaseMarshalData (IMarshal* pMarsh, LPSTREAM pStm)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pMarsh));
+ PRECONDITION(IsSimpleTearOff(pMarsh));
+ PRECONDITION(CheckPointer(pStm, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // Unmarshal side only.
+ return E_NOTIMPL;
+}
+
+HRESULT __stdcall Marshal_DisconnectObject (IMarshal* pMarsh, ULONG dwReserved)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pMarsh));
+ PRECONDITION(IsSimpleTearOff(pMarsh));
+ }
+ CONTRACTL_END;
+
+ // Nothing we can (or need to) do here. The client is using a raw IP to
+ // access this server, so the server shouldn't go away until the client
+ // Release()'s it.
+ return S_OK;
+}
+
+//------------------------------------------------------------------------------------------
+// IManagedObject methods for COM+ objects
+//------------------------------------------------------------------------------------------
+HRESULT __stdcall ManagedObject_GetObjectIdentity(IManagedObject *pManaged,
+ BSTR* pBSTRGUID, DWORD* pAppDomainID,
+ void** pCCW)
+{
+ // NOTE: THIS METHOD CAN BE CALLED FROM ANY APP DOMAIN
+
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ INJECT_FAULT(ThrowOutOfMemory());
+ PRECONDITION(CheckPointer(pManaged));
+ PRECONDITION(IsSimpleTearOff(pManaged));
+ PRECONDITION(CheckPointer(pBSTRGUID, NULL_OK));
+ PRECONDITION(CheckPointer(pAppDomainID, NULL_OK));
+ PRECONDITION(CheckPointer(pCCW, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ if (pBSTRGUID == NULL || pAppDomainID == NULL || pCCW == NULL)
+ return E_POINTER;
+
+ HRESULT hr = S_OK;
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ *pCCW = 0;
+ *pAppDomainID = 0;
+
+ BSTR bstrProcGUID = GetProcessGUID();
+ BSTR bstrRetGUID = ::SysAllocString((WCHAR *)bstrProcGUID);
+
+ if (bstrRetGUID == NULL)
+ ThrowOutOfMemory();
+
+ *pBSTRGUID = bstrRetGUID;
+
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pManaged);
+ _ASSERTE(GET_THREAD()->GetDomain()->GetId() == pSimpleWrap->GetDomainID());
+
+ ComCallWrapper* pComCallWrap = pSimpleWrap->GetMainWrapper();
+ _ASSERTE(pComCallWrap);
+
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+ {
+ OBJECTREF oref = pComCallWrap->GetObjectRef();
+
+ // The parameter is typed as void** but due to the potential cross process (cross bitness)
+ // nature of this call, only the lower 32-bits of the returned value are guaranteed to be
+ // received by the caller. Instead of a CCW pointer which was the original intended use of
+ // this parameter, we'll pass a syncblock index which is always DWORD sized. The parameter
+ // is protocol-documented to be "implementation-specific, opaque value that helps identify
+ // the managed object" so we can pass whatever we want without legal consequences.
+ *pCCW = (void *)oref->GetSyncBlockIndex();
+ }
+
+ AppDomain* pDomain = GET_THREAD()->GetDomain();
+ _ASSERTE(pDomain != NULL);
+
+ *pAppDomainID = pDomain->GetId().m_dwId;
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+
+HRESULT __stdcall ManagedObject_GetSerializedBuffer(IManagedObject *pManaged,
+ BSTR* pBStr)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pManaged));
+ PRECONDITION(IsSimpleTearOff(pManaged));
+ PRECONDITION(CheckPointer(pBStr, NULL_OK));
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_CORECLR
+ _ASSERTE(!"NYI");
+ return E_NOTIMPL;
+#else // FEATURE_CORECLR
+
+ HRESULT hr = S_OK;
+ if (pBStr == NULL)
+ return E_POINTER;
+
+ *pBStr = NULL;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP( pManaged );
+ ComCallWrapper *pComCallWrap = pSimpleWrap->GetMainWrapper();
+
+ _ASSERTE(pComCallWrap != NULL);
+
+ //@todo don't allow serialization of Configured objects through DCOM
+ _ASSERTE(GetThread()->GetDomain()->GetId() == pSimpleWrap->GetDomainID());
+
+ BOOL fLegacyMode = (GetAppDomain()->GetComOrRemotingFlag() == COMorRemoting_LegacyMode);
+
+ OBJECTREF oref = pComCallWrap->GetObjectRef();
+ GCPROTECT_BEGIN(oref)
+ {
+ // GetSerializedBuffer is only called in cross-runtime/cross-process scenarios so we pass
+ // fCrossRuntime=TRUE unless we are in legacy mode
+ if (!ConvertObjectToBSTR(&oref, !fLegacyMode, pBStr))
+ {
+ // ConvertObjectToBSTR returning FALSE is equivalent to throwing SerializationException
+ hr = COR_E_SERIALIZATION;
+ }
+ }
+ GCPROTECT_END();
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+
+#endif // FEATURE_CORECLR
+}
+
+//------------------------------------------------------------------------------------------
+// IConnectionPointContainer methods for COM+ objects
+//------------------------------------------------------------------------------------------
+
+// Enumerate all the connection points supported by the component.
+HRESULT __stdcall ConnectionPointContainer_EnumConnectionPoints(IUnknown* pUnk,
+ IEnumConnectionPoints **ppEnum)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(CheckPointer(ppEnum, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ if (!ppEnum)
+ return E_POINTER;
+
+ *ppEnum = NULL;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ _ASSERTE(IsSimpleTearOff(pUnk));
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pUnk);
+ pSimpleWrap->EnumConnectionPoints(ppEnum);
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+// Find a specific connection point based on the IID of the event interface.
+HRESULT __stdcall ConnectionPointContainer_FindConnectionPoint(IUnknown* pUnk,
+ REFIID riid,
+ IConnectionPoint **ppCP)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(CheckPointer(ppCP, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ if (!ppCP)
+ return E_POINTER;
+
+ *ppCP = NULL;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ _ASSERTE(IsSimpleTearOff(pUnk));
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pUnk);
+ if (!pSimpleWrap->FindConnectionPoint(riid, ppCP))
+ hr = CONNECT_E_NOCONNECTION;
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+
+//------------------------------------------------------------------------------------------
+// IObjectSafety methods for COM+ objects
+//------------------------------------------------------------------------------------------
+
+HRESULT __stdcall ObjectSafety_GetInterfaceSafetyOptions(IUnknown* pUnk,
+ REFIID riid,
+ DWORD *pdwSupportedOptions,
+ DWORD *pdwEnabledOptions)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(IsSimpleTearOff(pUnk));
+ PRECONDITION(CheckPointer(pdwSupportedOptions, NULL_OK));
+ PRECONDITION(CheckPointer(pdwEnabledOptions, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ if (pdwSupportedOptions == NULL || pdwEnabledOptions == NULL)
+ return E_POINTER;
+
+ // Make sure the COM+ object implements the requested interface.
+ SafeComHolderPreemp<IUnknown> pItf;
+ HRESULT hr = SafeQueryInterfacePreemp(pUnk, riid, (IUnknown**)&pItf);
+ LogInteropQI(pUnk, riid, hr, "QI to for riid in GetInterfaceSafetyOptions");
+ if (SUCCEEDED(hr))
+ {
+ // We support this interface so set the safety options accordingly
+ *pdwSupportedOptions = (INTERFACESAFE_FOR_UNTRUSTED_DATA | INTERFACESAFE_FOR_UNTRUSTED_CALLER);
+ *pdwEnabledOptions = (INTERFACESAFE_FOR_UNTRUSTED_DATA | INTERFACESAFE_FOR_UNTRUSTED_CALLER);
+ return S_OK;
+ }
+ else
+ {
+ // We don't support this interface
+ *pdwSupportedOptions = 0;
+ *pdwEnabledOptions = 0;
+ return E_NOINTERFACE;
+ }
+}
+
+HRESULT __stdcall ObjectSafety_SetInterfaceSafetyOptions(IUnknown* pUnk,
+ REFIID riid,
+ DWORD dwOptionSetMask,
+ DWORD dwEnabledOptions)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(IsSimpleTearOff(pUnk));
+ }
+ CONTRACTL_END;
+
+ // Make sure the COM+ object implements the requested interface.
+ SafeComHolderPreemp<IUnknown> pItf;
+ HRESULT hr = SafeQueryInterfacePreemp(pUnk, riid, (IUnknown**)&pItf);
+ LogInteropQI(pUnk, riid, hr, "QI to for riid in SetInterfaceSafetyOptions");
+ if (FAILED(hr))
+ return hr;
+
+ if ((dwEnabledOptions & ~(INTERFACESAFE_FOR_UNTRUSTED_DATA | INTERFACESAFE_FOR_UNTRUSTED_CALLER)) != 0)
+ return E_FAIL;
+
+ return S_OK;
+}
+
+HRESULT __stdcall ICustomPropertyProvider_GetProperty(IUnknown *pPropertyProvider, HSTRING hstrName, IUnknown **ppProperty)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pPropertyProvider));
+ PRECONDITION(IsSimpleTearOff(pPropertyProvider));
+ PRECONDITION(CheckPointer(ppProperty, NULL_OK));
+ PRECONDITION(!MapIUnknownToWrapper(pPropertyProvider)->NeedToSwitchDomains(GetThread()));
+ }
+ CONTRACTL_END;
+
+ if (ppProperty == NULL)
+ return E_POINTER;
+
+ // Initialize [out] parameters
+ *ppProperty = NULL;
+
+ HRESULT hr;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ _ASSERTE(IsSimpleTearOff(pPropertyProvider));
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pPropertyProvider);
+
+ GCX_COOP();
+
+ struct _gc {
+ OBJECTREF TargetObj;
+ STRINGREF StringRef;
+ OBJECTREF RetVal;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.TargetObj = pSimpleWrap->GetObjectRef();
+
+ //
+ // Marshal HSTRING to String object
+ // NULL HSTRINGS are equivilent to empty strings
+ //
+ UINT32 cchString = 0;
+ LPCWSTR pwszString = W("");
+ if (hstrName != NULL)
+ {
+ pwszString = WindowsGetStringRawBuffer(hstrName, &cchString);
+ }
+
+ gc.StringRef = StringObject::NewString(pwszString, cchString);
+
+ //
+ // Call ICustomPropertyProviderImpl.CreateProperty
+ //
+ PREPARE_NONVIRTUAL_CALLSITE(METHOD__ICUSTOMPROPERTYPROVIDERIMPL__CREATE_PROPERTY);
+ DECLARE_ARGHOLDER_ARRAY(args, 2);
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(gc.TargetObj);
+ args[ARGNUM_1] = OBJECTREF_TO_ARGHOLDER(gc.StringRef);
+
+ CALL_MANAGED_METHOD_RETREF(gc.RetVal, OBJECTREF, args);
+
+ if (gc.RetVal != NULL)
+ {
+ // The object is a CustomPropertyImpl. Get the ICustomProperty implementation from CCW and return that
+ *ppProperty = GetComIPFromObjectRef(&gc.RetVal, MscorlibBinder::GetClass(CLASS__ICUSTOMPROPERTY));
+ }
+
+ GCPROTECT_END();
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ // Don't fail if property can't be found - just return S_OK and NULL property
+ return S_OK;
+}
+
+HRESULT __stdcall ICustomPropertyProvider_GetIndexedProperty(IUnknown *pPropertyProvider,
+ HSTRING hstrName,
+ TypeNameNative indexedParamType,
+ /* [out, retval] */ IUnknown **ppProperty)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pPropertyProvider));
+ PRECONDITION(IsSimpleTearOff(pPropertyProvider));
+ PRECONDITION(CheckPointer(ppProperty, NULL_OK));
+ PRECONDITION(!MapIUnknownToWrapper(pPropertyProvider)->NeedToSwitchDomains(GetThread()));
+ }
+ CONTRACTL_END;
+
+ if (ppProperty == NULL)
+ return E_POINTER;
+
+ // Initialize [out] parameters
+ *ppProperty = NULL;
+
+ HRESULT hr;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ _ASSERTE(IsSimpleTearOff(pPropertyProvider));
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pPropertyProvider);
+
+ GCX_COOP();
+
+ struct _gc {
+ OBJECTREF TargetObj;
+ STRINGREF StringRef;
+ OBJECTREF RetVal;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.TargetObj = pSimpleWrap->GetObjectRef();
+
+ //
+ // Marshal HSTRING to String object
+ // NULL HSTRINGS are equivilent to empty strings
+ //
+ UINT32 cchString = 0;
+ LPCWSTR pwszString = W("");
+ if (hstrName != NULL)
+ {
+ pwszString = WindowsGetStringRawBuffer(hstrName, &cchString);
+ }
+
+ gc.StringRef = StringObject::NewString(pwszString, cchString);
+
+ //
+ // Call ICustomPropertyProviderImpl.CreateIndexedProperty
+ //
+ PREPARE_NONVIRTUAL_CALLSITE(METHOD__ICUSTOMPROPERTYPROVIDERIMPL__CREATE_INDEXED_PROPERTY);
+ DECLARE_ARGHOLDER_ARRAY(args, 3);
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(gc.TargetObj);
+ args[ARGNUM_1] = OBJECTREF_TO_ARGHOLDER(gc.StringRef);
+ args[ARGNUM_2] = PTR_TO_ARGHOLDER(&indexedParamType);
+
+ CALL_MANAGED_METHOD_RETREF(gc.RetVal, OBJECTREF, args);
+
+ if (gc.RetVal != NULL)
+ {
+ // The object is a CustomPropertyImpl. Get the ICustomProperty implementation from CCW and return that
+ *ppProperty = GetComIPFromObjectRef(&gc.RetVal, MscorlibBinder::GetClass(CLASS__ICUSTOMPROPERTY));
+ }
+
+ GCPROTECT_END();
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ // Don't fail if property can't be found - just return S_OK and NULL property
+ return S_OK;
+}
+
+HRESULT __stdcall ICustomPropertyProvider_GetStringRepresentation(IUnknown *pPropertyProvider,
+ /* [out, retval] */ HSTRING *phstrStringRepresentation)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pPropertyProvider));
+ PRECONDITION(IsSimpleTearOff(pPropertyProvider));
+ PRECONDITION(CheckPointer(phstrStringRepresentation, NULL_OK));
+ PRECONDITION(!MapIUnknownToWrapper(pPropertyProvider)->NeedToSwitchDomains(GetThread()));
+ }
+ CONTRACTL_END;
+
+ if (phstrStringRepresentation == NULL)
+ return E_POINTER;
+
+ // Initialize [out] parameters
+ *phstrStringRepresentation = NULL;
+
+ HRESULT hr;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ _ASSERTE(IsSimpleTearOff(pPropertyProvider));
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pPropertyProvider);
+
+ GCX_COOP();
+
+ struct _gc {
+ OBJECTREF TargetObj;
+ STRINGREF RetVal;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.TargetObj = pSimpleWrap->GetObjectRef();
+
+ //
+ // Call IStringableHelper.ToString() to get string representation either from IStringable.ToString() or ToString()
+ //
+ PREPARE_NONVIRTUAL_CALLSITE(METHOD__ISTRINGABLEHELPER__TO_STRING);
+ DECLARE_ARGHOLDER_ARRAY(args, 1);
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(gc.TargetObj);
+ CALL_MANAGED_METHOD_RETREF(gc.RetVal, STRINGREF, args);
+
+ //
+ // Convert managed string to HSTRING
+ //
+ if (gc.RetVal == NULL)
+ *phstrStringRepresentation = NULL;
+ else
+ hr = ::WindowsCreateString(gc.RetVal->GetBuffer(), gc.RetVal->GetStringLength(), phstrStringRepresentation);
+
+ GCPROTECT_END();
+
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+}
+
+HRESULT __stdcall ICustomPropertyProvider_GetType(IUnknown *pPropertyProvider,
+ /* [out, retval] */ TypeNameNative *pTypeIdentifier)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pPropertyProvider));
+ PRECONDITION(IsSimpleTearOff(pPropertyProvider));
+ PRECONDITION(CheckPointer(pTypeIdentifier));
+ PRECONDITION(!MapIUnknownToWrapper(pPropertyProvider)->NeedToSwitchDomains(GetThread()));
+ }
+ CONTRACTL_END;
+
+ if (pTypeIdentifier == NULL)
+ return E_POINTER;
+
+ // Initialize [out] parameters
+ ::ZeroMemory(pTypeIdentifier, sizeof(TypeNameNative));
+
+ HRESULT hr;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ _ASSERTE(IsSimpleTearOff(pPropertyProvider));
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pPropertyProvider);
+
+ GCX_COOP();
+
+ OBJECTREF refTargetObj = NULL;
+ GCPROTECT_BEGIN(refTargetObj);
+
+ refTargetObj = pSimpleWrap->GetObjectRef();
+
+ //
+ // Call ICustomPropertyProviderImpl.GetType()
+ //
+ PREPARE_NONVIRTUAL_CALLSITE(METHOD__ICUSTOMPROPERTYPROVIDERIMPL__GET_TYPE);
+ DECLARE_ARGHOLDER_ARRAY(args, 2);
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(refTargetObj);
+ args[ARGNUM_1] = PTR_TO_ARGHOLDER(pTypeIdentifier);
+
+ CALL_MANAGED_METHOD_NORET(args);
+
+ GCPROTECT_END();
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return S_OK;
+}
+
+HRESULT __stdcall IStringable_ToString(IUnknown* pStringable,
+ /* [out, retval] */ HSTRING* pResult)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pStringable));
+ PRECONDITION(IsSimpleTearOff(pStringable));
+ PRECONDITION(CheckPointer(pResult, NULL_OK));
+ PRECONDITION(!MapIUnknownToWrapper(pStringable)->NeedToSwitchDomains(GetThread()));
+ }
+ CONTRACTL_END;
+
+ if (pResult == NULL)
+ return E_POINTER;
+
+ // Initialize [out] parameters
+ *pResult = NULL;
+
+ HRESULT hr;
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ _ASSERTE(IsSimpleTearOff(pStringable));
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pStringable);
+
+ GCX_COOP();
+
+ struct _gc {
+ OBJECTREF TargetObj;
+ STRINGREF RetVal;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.TargetObj = pSimpleWrap->GetObjectRef();
+ MethodDesc* pToStringMD = NULL;
+
+ MethodTable* pMT = gc.TargetObj->GetMethodTable();
+
+
+ // Get the MethodTable for Windows.Foundation.IStringable.
+ StackSString strIStringable(SString::Utf8, W("Windows.Foundation.IStringable"));
+ MethodTable *pMTIStringable = GetWinRTType(&strIStringable, /* bThrowIfNotFound = */ FALSE).GetMethodTable();
+
+ if (pMT != NULL && pMTIStringable != NULL && pMT->ImplementsInterface(pMTIStringable))
+ {
+ // Find the ToString() method of the interface.
+ pToStringMD = MemberLoader::FindMethod(
+ pMTIStringable,
+ "ToString",
+ &gsig_IM_RetStr);
+
+ _ASSERTE(pToStringMD != NULL);
+ }
+ else
+ {
+ // The object does not implement IStringable interface we need to call the default implementation using Object.ToString() call.
+ pToStringMD = MscorlibBinder::GetMethod(METHOD__OBJECT__TO_STRING);
+ _ASSERTE(pToStringMD != NULL);
+ }
+
+
+
+ PREPARE_VIRTUAL_CALLSITE_USING_METHODDESC(pToStringMD, gc.TargetObj);
+ DECLARE_ARGHOLDER_ARRAY(args, 1);
+ args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(gc.TargetObj);
+
+ CALL_MANAGED_METHOD_RETREF(gc.RetVal, STRINGREF, args);
+
+ //
+ // Convert managed string to HSTRING
+ //
+ if (gc.RetVal == NULL)
+ *pResult = NULL;
+ else
+ hr = ::WindowsCreateString(gc.RetVal->GetBuffer(), gc.RetVal->GetStringLength(), pResult);
+
+ GCPROTECT_END();
+
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return hr;
+
+}
+
+
+ULONG __stdcall
+ICCW_AddRefFromJupiter(IUnknown* pUnk)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(IsSimpleTearOff(pUnk));
+ }
+ CONTRACTL_END;
+
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pUnk);
+
+ return pSimpleWrap->AddJupiterRef();
+}
+
+ULONG __stdcall
+ICCW_ReleaseFromJupiter(IUnknown* pUnk)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(pUnk));
+ }
+ CONTRACTL_END;
+
+ ULONG cbRef = -1;
+
+ HRESULT hr;
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pUnk);
+
+ cbRef = pSimpleWrap->ReleaseJupiterRef();
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ return cbRef;
+}
+
+HRESULT __stdcall
+ICCW_Peg(IUnknown* pUnk)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pUnk));
+ }
+ CONTRACTL_END;
+
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pUnk);
+
+ pSimpleWrap->MarkPegged();
+
+ STRESS_LOG1(LF_INTEROP, LL_INFO1000, "CCW 0x%p pegged\n", (ComCallWrapper *)pSimpleWrap->GetMainWrapper());
+
+ return S_OK;
+}
+
+HRESULT __stdcall
+ICCW_Unpeg(IUnknown* pUnk)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pUnk));
+ }
+ CONTRACTL_END;
+
+ SimpleComCallWrapper *pSimpleWrap = SimpleComCallWrapper::GetWrapperFromIP(pUnk);
+
+ pSimpleWrap->UnMarkPegged();
+
+ STRESS_LOG1(LF_INTEROP, LL_INFO1000, "CCW 0x%p unpegged\n", (ComCallWrapper *)pSimpleWrap->GetMainWrapper());
+
+ return S_OK;
+}
diff --git a/src/vm/stdinterfaces.h b/src/vm/stdinterfaces.h
new file mode 100644
index 0000000000..a1d2d5873b
--- /dev/null
+++ b/src/vm/stdinterfaces.h
@@ -0,0 +1,562 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//---------------------------------------------------------------------------------
+// stdinterfaces.h
+//
+// Defines various standard com interfaces , refer to stdinterfaces.cpp for more documentation
+
+//---------------------------------------------------------------------------------
+
+#ifndef _H_STDINTERFACES_
+#define _H_STDINTERFACES_
+
+#ifndef FEATURE_COMINTEROP
+#error FEATURE_COMINTEROP is required for this file
+#endif // FEATURE_COMINTEROP
+
+#include "dispex.h"
+#include "weakreference.h"
+#include "common.h"
+
+extern const IID IID_IWeakReferenceSource;
+extern const IID IID_IWeakReference;
+extern const IID IID_ICustomPropertyProvider;
+extern const IID IID_ICCW;
+
+// Until the Windows SDK is updated, just hard-code the IAgileObject IID
+#ifndef __IAgileObject_INTERFACE_DEFINED__
+DEFINE_GUID(IID_IAgileObject,0x94ea2b94,0xe9cc,0x49e0,0xc0,0xff,0xee,0x64,0xca,0x8f,0x5b,0x90);
+MIDL_INTERFACE("94ea2b94-e9cc-49e0-c0ff-ee64ca8f5b90")
+IAgileObject : public IUnknown
+{
+public:
+};
+#endif // !__IAgileObject_INTERFACE_DEFINED__
+
+// Until the Windows SDK is updated, just hard-code the INoMarshal IID
+#ifndef __INoMarshal_INTERFACE_DEFINED__
+DEFINE_GUID(IID_INoMarshal,0xecc8691b,0xc1db,0x4dc0,0x85,0x5e,0x65,0xf6,0xc5,0x51,0xaf,0x49);
+MIDL_INTERFACE("ecc8691b-c1db-4dc0-855e-65f6c551af49")
+INoMarshal : public IUnknown
+{
+public:
+};
+#endif // !__INoMarshal_INTERFACE_DEFINED__
+
+
+class Assembly;
+class Module;
+class MethodTable;
+struct ITypeLibExporterNotifySink;
+
+typedef HRESULT (__stdcall* PCOMFN)(void);
+
+//------------------------------------------------------------------------------------------
+// HRESULT's returned by GetITypeInfoForEEClass.
+#define S_USEIUNKNOWN (HRESULT)2
+#define S_USEIDISPATCH (HRESULT)3
+
+// For free-threaded marshaling, we must not be spoofed by out-of-process or cross-runtime marshal data.
+// Only unmarshal data that comes from our own runtime.
+extern BYTE g_UnmarshalSecret[sizeof(GUID)];
+extern bool g_fInitedUnmarshalSecret;
+
+struct ExportTypeLibFromLoadedAssembly_Args
+{
+ Assembly* pAssembly;
+ LPCWSTR szTlb;
+ ITypeLib** ppTlb;
+ ITypeLibExporterNotifySink* pINotify;
+ int flags;
+ HRESULT hr;
+};
+
+// make sure to keep the following enum and the g_stdVtables array in sync
+enum Enum_StdInterfaces
+{
+ enum_InnerUnknown = 0,
+ enum_IProvideClassInfo,
+ enum_IMarshal,
+ enum_ISupportsErrorInfo,
+ enum_IErrorInfo,
+ enum_IManagedObject,
+ enum_IConnectionPointContainer,
+ enum_IObjectSafety,
+ enum_IDispatchEx,
+ enum_IWeakReferenceSource,
+ enum_ICustomPropertyProvider,
+ enum_ICCW,
+ enum_IAgileObject,
+ enum_IStringable,
+ // add your favorite std interface here
+ enum_LastStdVtable,
+
+ enum_IUnknown = 0xff, // special enum for std unknown
+};
+
+// array of vtable pointers for std. interfaces such as IProvideClassInfo etc.
+extern const SLOT * const g_rgStdVtables[];
+
+template <size_t nVtableEntries>
+struct StdInterfaceDesc
+{
+ // This is a self-describing vtable pointer.
+ Enum_StdInterfaces m_StdInterfaceKind;
+ UINT_PTR const * const m_vtable[nVtableEntries];
+};
+
+typedef DPTR(StdInterfaceDesc<1>) PTR_StdInterfaceDesc;
+typedef VPTR(IUnknown) PTR_IUnknown;
+
+inline static Enum_StdInterfaces GetStdInterfaceKind(PTR_IUnknown pUnk)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ PTR_SLOT pVtable = dac_cast<PTR_SLOT>(*(dac_cast<PTR_TADDR>(pUnk)));
+ PTR_StdInterfaceDesc pDesc = dac_cast<PTR_StdInterfaceDesc>(dac_cast<PTR_BYTE>(pVtable) - offsetof(StdInterfaceDesc<1>, m_vtable));
+
+#ifndef DACCESS_COMPILE
+ // Make sure the interface kind is the right one
+ // Only do this in non-DAC build as I don't want to bring in g_rgStdVtables global variable
+ _ASSERTE(g_rgStdVtables[pDesc->m_StdInterfaceKind] == pVtable);
+#endif // !DACCESS_COMPILE
+
+ return pDesc->m_StdInterfaceKind;
+}
+
+
+// IUnknown is part of IDispatch
+// Common vtables for well-known COM interfaces
+// shared by all COM+ callable wrappers.
+extern const StdInterfaceDesc<3> g_InnerUnknown;
+extern const StdInterfaceDesc<4> g_IProvideClassInfo;
+extern const StdInterfaceDesc<9> g_IMarshal;
+extern const StdInterfaceDesc<4> g_ISupportsErrorInfo;
+extern const StdInterfaceDesc<8> g_IErrorInfo;
+extern const StdInterfaceDesc<5> g_IManagedObject;
+extern const StdInterfaceDesc<5> g_IConnectionPointContainer;
+extern const StdInterfaceDesc<5> g_IObjectSafety;
+extern const StdInterfaceDesc<15> g_IDispatchEx;
+extern const StdInterfaceDesc<4> g_IWeakReferenceSource;
+extern const StdInterfaceDesc<10> g_ICustomPropertyProvider;
+extern const StdInterfaceDesc<7> g_ICCW;
+extern const StdInterfaceDesc<3> g_IAgileObject;
+extern const StdInterfaceDesc<7> g_IStringable;
+
+// enum class types
+enum ComClassType
+{
+ enum_UserDefined = 0,
+ enum_Collection,
+ enum_Exception,
+ enum_Event,
+ enum_Delegate,
+ enum_Control,
+ enum_Last,
+};
+
+
+//-------------------------------------------------------------------------
+// IProvideClassInfo methods
+HRESULT __stdcall ClassInfo_GetClassInfo_Wrapper(IUnknown* pUnk,
+ ITypeInfo** ppTI); //Address of output variable that receives the type info.
+
+// ---------------------------------------------------------------------------
+// Interface ISupportsErrorInfo
+
+// %%Function: SupportsErroInfo_IntfSupportsErrorInfo,
+// ---------------------------------------------------------------------------
+HRESULT __stdcall
+SupportsErroInfo_IntfSupportsErrorInfo_Wrapper(IUnknown* pUnk, REFIID riid);
+
+// ---------------------------------------------------------------------------
+// Interface IErrorInfo
+
+// %%Function: ErrorInfo_GetDescription,
+HRESULT __stdcall ErrorInfo_GetDescription_Wrapper(IUnknown* pUnk, BSTR* pbstrDescription);
+
+// %%Function: ErrorInfo_GetGUID,
+HRESULT __stdcall ErrorInfo_GetGUID_Wrapper(IUnknown* pUnk, GUID* pguid);
+
+// %%Function: ErrorInfo_GetHelpContext,
+HRESULT _stdcall ErrorInfo_GetHelpContext_Wrapper(IUnknown* pUnk, DWORD* pdwHelpCtxt);
+
+// %%Function: ErrorInfo_GetHelpFile,
+HRESULT __stdcall ErrorInfo_GetHelpFile_Wrapper(IUnknown* pUnk, BSTR* pbstrHelpFile);
+
+// %%Function: ErrorInfo_GetSource,
+HRESULT __stdcall ErrorInfo_GetSource_Wrapper(IUnknown* pUnk, BSTR* pbstrSource);
+
+//------------------------------------------------------------------------------------------
+// IDispatch methods for COM+ objects. These methods dispatch to the appropriate
+// implementation based on the flags of the class that implements them.
+
+
+// %%Function: IDispatch::GetTypeInfoCount
+HRESULT __stdcall Dispatch_GetTypeInfoCount_Wrapper (
+ IDispatch* pDisp,
+ unsigned int *pctinfo);
+
+
+// %%Function: IDispatch::GetTypeInfo
+HRESULT __stdcall Dispatch_GetTypeInfo_Wrapper (
+ IDispatch* pDisp,
+ unsigned int itinfo,
+ LCID lcid,
+ ITypeInfo **pptinfo);
+
+// %%Function: IDispatch::GetIDsofNames
+HRESULT __stdcall Dispatch_GetIDsOfNames_Wrapper (
+ IDispatch* pDisp,
+ REFIID riid,
+ __in_ecount(cNames) OLECHAR **rgszNames,
+ unsigned int cNames,
+ LCID lcid,
+ DISPID *rgdispid);
+
+// %%Function: IDispatch::Invoke
+HRESULT __stdcall Dispatch_Invoke_Wrapper (
+ IDispatch* pDisp,
+ DISPID dispidMember,
+ REFIID riid,
+ LCID lcid,
+ unsigned short wFlags,
+ DISPPARAMS *pdispparams,
+ VARIANT *pvarResult,
+ EXCEPINFO *pexcepinfo,
+ unsigned int *puArgErr
+ );
+
+// %%Function: IDispatch::GetIDsofNames
+HRESULT __stdcall InternalDispatchImpl_GetIDsOfNames_Wrapper (
+ IDispatch* pDisp,
+ REFIID riid,
+ __in_ecount(cNames) OLECHAR **rgszNames,
+ unsigned int cNames,
+ LCID lcid,
+ DISPID *rgdispid);
+
+// %%Function: IDispatch::Invoke
+HRESULT __stdcall InternalDispatchImpl_Invoke_Wrapper (
+ IDispatch* pDisp,
+ DISPID dispidMember,
+ REFIID riid,
+ LCID lcid,
+ unsigned short wFlags,
+ DISPPARAMS *pdispparams,
+ VARIANT *pvarResult,
+ EXCEPINFO *pexcepinfo,
+ unsigned int *puArgErr
+ );
+
+//------------------------------------------------------------------------------------------
+// IDispatchEx methods for COM+ objects
+
+
+// %%Function: IDispatchEx::GetTypeInfoCount
+HRESULT __stdcall DispatchEx_GetTypeInfoCount_Wrapper (
+ IDispatchEx* pDisp,
+ unsigned int *pctinfo);
+
+
+// %%Function: IDispatch::GetTypeInfo
+HRESULT __stdcall DispatchEx_GetTypeInfo_Wrapper (
+ IDispatchEx* pDisp,
+ unsigned int itinfo,
+ LCID lcid,
+ ITypeInfo **pptinfo);
+
+// IDispatchEx::GetIDsofNames
+HRESULT __stdcall DispatchEx_GetIDsOfNames_Wrapper (
+ IDispatchEx* pDisp,
+ REFIID riid,
+ __in_ecount(cNames) OLECHAR **rgszNames,
+ unsigned int cNames,
+ LCID lcid,
+ DISPID *rgdispid);
+
+// IDispatchEx::Invoke
+HRESULT __stdcall DispatchEx_Invoke_Wrapper (
+ IDispatchEx* pDisp,
+ DISPID dispidMember,
+ REFIID riid,
+ LCID lcid,
+ unsigned short wFlags,
+ DISPPARAMS *pdispparams,
+ VARIANT *pvarResult,
+ EXCEPINFO *pexcepinfo,
+ unsigned int *puArgErr);
+
+// IDispatchEx::DeleteMemberByDispID
+HRESULT __stdcall DispatchEx_DeleteMemberByDispID_Wrapper (
+ IDispatchEx* pDisp,
+ DISPID id);
+
+// IDispatchEx::DeleteMemberByName
+HRESULT __stdcall DispatchEx_DeleteMemberByName_Wrapper (
+ IDispatchEx* pDisp,
+ BSTR bstrName,
+ DWORD grfdex);
+
+
+// IDispatchEx::GetDispID
+HRESULT __stdcall DispatchEx_GetDispID_Wrapper (
+ IDispatchEx* pDisp,
+ BSTR bstrName,
+ DWORD grfdex,
+ DISPID *pid);
+
+
+// IDispatchEx::GetMemberName
+HRESULT __stdcall DispatchEx_GetMemberName_Wrapper (
+ IDispatchEx* pDisp,
+ DISPID id,
+ BSTR *pbstrName);
+
+// IDispatchEx::GetMemberProperties
+HRESULT __stdcall DispatchEx_GetMemberProperties_Wrapper (
+ IDispatchEx* pDisp,
+ DISPID id,
+ DWORD grfdexFetch,
+ DWORD *pgrfdex);
+
+// IDispatchEx::GetNameSpaceParent
+HRESULT __stdcall DispatchEx_GetNameSpaceParent_Wrapper (
+ IDispatchEx* pDisp,
+ IUnknown **ppunk);
+
+// IDispatchEx::GetNextDispID
+HRESULT __stdcall DispatchEx_GetNextDispID_Wrapper (
+ IDispatchEx* pDisp,
+ DWORD grfdex,
+ DISPID id,
+ DISPID *pid);
+
+// IDispatchEx::InvokeEx
+HRESULT __stdcall DispatchEx_InvokeEx_Wrapper (
+ IDispatchEx* pDisp,
+ DISPID id,
+ LCID lcid,
+ WORD wFlags,
+ DISPPARAMS *pdp,
+ VARIANT *pVarRes,
+ EXCEPINFO *pei,
+ IServiceProvider *pspCaller);
+
+//------------------------------------------------------------------------------------------
+// IInspectable methods for managed objects
+
+// IInspectable::GetIIDs
+HRESULT __stdcall Inspectable_GetIIDs_Wrapper (
+ IInspectable *pInsp,
+ ULONG *iidCount,
+ IID **iids);
+
+// IInspectable::GetRuntimeClassName
+HRESULT __stdcall Inspectable_GetRuntimeClassName_Wrapper (
+ IInspectable *pInsp,
+ HSTRING *className);
+
+// IInspectable::GetTrustLevel
+HRESULT __stdcall Inspectable_GetTrustLevel_Wrapper (
+ IInspectable *pInsp,
+ TrustLevel *trustLevel);
+
+//------------------------------------------------------------------------------------------
+// IWeakReferenceSource methods for managed objects
+
+HRESULT __stdcall WeakReferenceSource_GetWeakReference_Wrapper (
+ IWeakReferenceSource *pRefSrc,
+ IWeakReference **weakReference);
+
+//------------------------------------------------------------------------------------------
+// IMarshal methods for COM+ objects
+
+HRESULT __stdcall Marshal_GetUnmarshalClass_Wrapper (
+ IMarshal* pMarsh,
+ REFIID riid, void * pv, ULONG dwDestContext,
+ void * pvDestContext, ULONG mshlflags,
+ LPCLSID pclsid);
+
+HRESULT __stdcall Marshal_GetMarshalSizeMax_Wrapper (
+ IMarshal* pMarsh,
+ REFIID riid, void * pv, ULONG dwDestContext,
+ void * pvDestContext, ULONG mshlflags,
+ ULONG * pSize);
+
+HRESULT __stdcall Marshal_MarshalInterface_Wrapper (
+ IMarshal* pMarsh,
+ LPSTREAM pStm, REFIID riid, void * pv,
+ ULONG dwDestContext, LPVOID pvDestContext,
+ ULONG mshlflags);
+
+HRESULT __stdcall Marshal_UnmarshalInterface_Wrapper (
+ IMarshal* pMarsh,
+ LPSTREAM pStm, REFIID riid,
+ void ** ppvObj);
+
+HRESULT __stdcall Marshal_ReleaseMarshalData_Wrapper (IMarshal* pMarsh, LPSTREAM pStm);
+
+HRESULT __stdcall Marshal_DisconnectObject_Wrapper (IMarshal* pMarsh, ULONG dwReserved);
+
+
+//------------------------------------------------------------------------------------------
+// IManagedObject methods for COM+ objects
+
+interface IManagedObject;
+
+HRESULT __stdcall ManagedObject_GetObjectIdentity_Wrapper(IManagedObject *pManaged,
+ BSTR* pBSTRGUID, DWORD* pAppDomainID,
+ void** pCCW);
+
+
+HRESULT __stdcall ManagedObject_GetSerializedBuffer_Wrapper(IManagedObject *pManaged,
+ BSTR* pBStr);
+
+
+//------------------------------------------------------------------------------------------
+// IConnectionPointContainer methods for COM+ objects
+
+interface IEnumConnectionPoints;
+
+HRESULT __stdcall ConnectionPointContainer_EnumConnectionPoints_Wrapper(IUnknown* pUnk,
+ IEnumConnectionPoints **ppEnum);
+
+HRESULT __stdcall ConnectionPointContainer_FindConnectionPoint_Wrapper(IUnknown* pUnk,
+ REFIID riid,
+ IConnectionPoint **ppCP);
+
+
+//------------------------------------------------------------------------------------------
+// IObjectSafety methods for COM+ objects
+
+interface IObjectSafety;
+
+HRESULT __stdcall ObjectSafety_GetInterfaceSafetyOptions_Wrapper(IUnknown* pUnk,
+ REFIID riid,
+ DWORD *pdwSupportedOptions,
+ DWORD *pdwEnabledOptions);
+
+HRESULT __stdcall ObjectSafety_SetInterfaceSafetyOptions_Wrapper(IUnknown* pUnk,
+ REFIID riid,
+ DWORD dwOptionSetMask,
+ DWORD dwEnabledOptions);
+
+
+//------------------------------------------------------------------------------------------
+// ICustomPropertyProvider methods for Jupiter
+HRESULT __stdcall ICustomPropertyProvider_GetProperty_Wrapper(IUnknown *pPropertyProvider,
+ HSTRING hstrName,
+ /* [out] */ IUnknown **ppProperty);
+
+// Windows.UI.DirectUI.Xaml.TypeNameNative
+struct TypeNameNative
+{
+ HSTRING typeName;
+ int typeKind;
+};
+
+HRESULT __stdcall ICustomPropertyProvider_GetIndexedProperty_Wrapper(IUnknown *pPropertyProvider,
+ HSTRING hstrName,
+ TypeNameNative indexedParamType,
+ /* [out, retval] */ IUnknown **ppProperty);
+
+HRESULT __stdcall ICustomPropertyProvider_GetStringRepresentation_Wrapper(IUnknown *pPropertyProvider,
+ /* [out, retval] */ HSTRING *phstrStringRepresentation);
+
+HRESULT __stdcall ICustomPropertyProvider_GetType_Wrapper(IUnknown *pPropertyProvider,
+ /* [out, retval] */ TypeNameNative *pTypeIdentifier);
+
+HRESULT __stdcall IStringable_ToString_Wrapper(IUnknown* pStringable,
+ /* [out, retval] */ HSTRING* result);
+
+//------------------------------------------------------------------------------------------
+// ICCW methods for Jupiter
+ULONG __stdcall ICCW_AddRefFromJupiter_Wrapper(IUnknown *pUnk);
+
+ULONG __stdcall ICCW_ReleaseFromJupiter_Wrapper(IUnknown *pUnk);
+
+HRESULT __stdcall ICCW_Peg_Wrapper(IUnknown *pUnk);
+
+HRESULT __stdcall ICCW_Unpeg_Wrapper(IUnknown *pUnk);
+
+
+
+#ifdef MDA_SUPPORTED
+VOID __stdcall DirtyCast_Assert(IUnknown* pUnk);
+#endif
+
+// IUNKNOWN wrappers
+
+// prototypes IUnknown methods
+HRESULT __stdcall Unknown_QueryInterface(IUnknown* pUnk, REFIID riid, void** ppv);
+HRESULT __stdcall Unknown_QueryInterface_ICCW(IUnknown *pUnk, REFIID riid, void **ppv);
+
+ULONG __stdcall Unknown_AddRef(IUnknown* pUnk);
+ULONG __stdcall Unknown_Release(IUnknown* pUnk);
+ULONG __stdcall Unknown_AddRefInner(IUnknown* pUnk);
+ULONG __stdcall Unknown_ReleaseInner(IUnknown* pUnk);
+
+// for std interfaces such as IProvideClassInfo
+HRESULT __stdcall Unknown_QueryInterface_IErrorInfo(IUnknown* pUnk, REFIID riid, void** ppv);
+ULONG __stdcall Unknown_AddRefSpecial(IUnknown* pUnk);
+ULONG __stdcall Unknown_ReleaseSpecial(IUnknown* pUnk);
+ULONG __stdcall Unknown_ReleaseSpecial_IErrorInfo(IUnknown* pUnk);
+
+
+// special idispatch methods
+
+HRESULT __stdcall
+InternalDispatchImpl_GetIDsOfNames (
+ IDispatch* pDisp,
+ REFIID riid,
+ __in_ecount(cNames) OLECHAR **rgszNames,
+ unsigned int cNames,
+ LCID lcid,
+ DISPID *rgdispid);
+
+
+HRESULT __stdcall
+InternalDispatchImpl_Invoke (
+ IDispatch* pDisp,
+ DISPID dispidMember,
+ REFIID riid,
+ LCID lcid,
+ unsigned short wFlags,
+ DISPPARAMS *pdispparams,
+ VARIANT *pvarResult,
+ EXCEPINFO *pexcepinfo,
+ unsigned int *puArgErr);
+
+//------------------------------------------------------------------------------------------
+// Helper to get the current IErrorInfo if the specified interface supports it.
+IErrorInfo *GetSupportedErrorInfo(IUnknown *iface, REFIID riid, BOOL checkForIRestrictedErrInfo = TRUE);
+
+//------------------------------------------------------------------------------------------
+// Helper functions that return HRESULT's instead of throwing exceptions.
+HRESULT TryGetGuid(MethodTable* pClass, GUID* pGUID, BOOL b);
+
+//------------------------------------------------------------------------------------------
+// Helpers to get the ITypeInfo* for a type.
+HRESULT ExportTypeLibFromLoadedAssemblyNoThrow(Assembly *pAssembly, LPCWSTR szTlb, ITypeLib **ppTlb, ITypeLibExporterNotifySink *pINotify, int flags);
+void ExportTypeLibFromLoadedAssembly(Assembly *pAssembly, LPCWSTR szTlb, ITypeLib **ppTlb, ITypeLibExporterNotifySink *pINotify, int flags);
+HRESULT GetITypeLibForEEClass(MethodTable *pMT, ITypeLib **ppTLB, int bAutoCreate, int flags);
+HRESULT GetITypeInfoForEEClass(MethodTable *pMT, ITypeInfo **ppTI, int bClassInfo=false, int bAutoCreate=true, int flags=0);
+HRESULT GetTypeLibIdForRegisteredEEClass(MethodTable *pMT, GUID *pGuid);
+HRESULT GetDefaultInterfaceForCoclass(ITypeInfo *pTI, ITypeInfo **ppTIDef);
+
+//-------------------------------------------------------------------------------------
+// Helper to get the ITypeLib* for a Assembly.
+HRESULT GetITypeLibForAssembly(Assembly *pAssembly, ITypeLib **ppTLB, int bAutoCreate, int flags);
+
+//-------------------------------------------------------------------------------------
+// Helper to get the GUID of the typelib that is created from an assembly.
+HRESULT GetTypeLibGuidForAssembly(Assembly *pAssembly, GUID *pGuid);
+
+//-------------------------------------------------------------------------------------
+// Helper for IInspectable's GetRuntimeClassName on an IReference<T> or IReferenceArray<T>.
+void GetRuntimeClassNameForIReferenceOrIReferenceArray(MethodTable* pInstantiatedType, BOOL fIsIReferenceArray, SString& className);
+
+#endif
diff --git a/src/vm/stdinterfaces_internal.h b/src/vm/stdinterfaces_internal.h
new file mode 100644
index 0000000000..d2b6af2110
--- /dev/null
+++ b/src/vm/stdinterfaces_internal.h
@@ -0,0 +1,377 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#ifndef _H_INTERNAL_STDINTERFACES
+#define _H_INTERNAL_STDINTERFACES
+
+#ifndef FEATURE_COMINTEROP
+#error FEATURE_COMINTEROP is required for this file
+#endif // FEATURE_COMINTEROP
+
+// ---------------------------------------------------------------------------
+// prototypes IUnknown methods
+HRESULT Unknown_QueryInterface_Internal (
+ ComCallWrapper* pWrap, IUnknown* pUnk, REFIID riid, void** ppv);
+HRESULT __stdcall Unknown_QueryInterface_IErrorInfo_Simple (
+ IUnknown* pUnk, REFIID riid, void** ppv);
+
+ULONG __stdcall Unknown_AddRef_Internal(IUnknown* pUnk);
+ULONG __stdcall Unknown_Release_Internal(IUnknown* pUnk);
+ULONG __stdcall Unknown_AddRefInner_Internal(IUnknown* pUnk);
+ULONG __stdcall Unknown_ReleaseInner_Internal(IUnknown* pUnk);
+
+// for std interfaces such as IProvideClassInfo
+ULONG __stdcall Unknown_AddRefSpecial_Internal(IUnknown* pUnk);
+ULONG __stdcall Unknown_ReleaseSpecial_Internal(IUnknown* pUnk);
+ULONG __stdcall Unknown_ReleaseSpecial_IErrorInfo_Internal(IUnknown* pUnk);
+
+// ---------------------------------------------------------------------------
+// Interface ISupportsErrorInfo
+
+// %%Function: SupportsErroInfo_IntfSupportsErrorInfo,
+// ---------------------------------------------------------------------------
+HRESULT __stdcall
+SupportsErroInfo_IntfSupportsErrorInfo(IUnknown* pUnk, REFIID riid);
+
+// ---------------------------------------------------------------------------
+// Interface IErrorInfo
+
+// %%Function: ErrorInfo_GetDescription,
+HRESULT __stdcall
+ErrorInfo_GetDescription(IUnknown* pUnk, BSTR* pbstrDescription);
+
+// %%Function: ErrorInfo_GetGUID,
+HRESULT __stdcall ErrorInfo_GetGUID(IUnknown* pUnk, GUID* pguid);
+
+// %%Function: ErrorInfo_GetHelpContext,
+HRESULT _stdcall ErrorInfo_GetHelpContext(IUnknown* pUnk, DWORD* pdwHelpCtxt);
+
+// %%Function: ErrorInfo_GetHelpFile,
+HRESULT __stdcall ErrorInfo_GetHelpFile(IUnknown* pUnk, BSTR* pbstrHelpFile);
+
+// %%Function: ErrorInfo_GetSource,
+HRESULT __stdcall ErrorInfo_GetSource(IUnknown* pUnk, BSTR* pbstrSource);
+
+
+//------------------------------------------------------------------------------------------
+// IDispatch methods for COM+ objects. These methods dispatch to the appropriate
+// implementation based on the flags of the class that implements them.
+
+
+// IDispatch::GetTypeInfoCount
+HRESULT __stdcall Dispatch_GetTypeInfoCount (
+ IDispatch* pDisp,
+ unsigned int *pctinfo);
+
+
+// IDispatch::GetTypeInfo
+HRESULT __stdcall Dispatch_GetTypeInfo (
+ IDispatch* pDisp,
+ unsigned int itinfo,
+ LCID lcid,
+ ITypeInfo **pptinfo);
+
+// IDispatch::GetIDsofNames
+HRESULT __stdcall Dispatch_GetIDsOfNames (
+ IDispatch* pDisp,
+ REFIID riid,
+ __in_ecount(cNames) OLECHAR **rgszNames,
+ unsigned int cNames,
+ LCID lcid,
+ DISPID *rgdispid);
+
+// IDispatch::Invoke
+HRESULT __stdcall Dispatch_Invoke (
+ IDispatch* pDisp,
+ DISPID dispidMember,
+ REFIID riid,
+ LCID lcid,
+ unsigned short wFlags,
+ DISPPARAMS *pdispparams,
+ VARIANT *pvarResult,
+ EXCEPINFO *pexcepinfo,
+ unsigned int *puArgErr);
+
+
+//------------------------------------------------------------------------------------------
+// IDispatch methods for COM+ objects that use our OleAut's implementation.
+
+
+// IDispatch::GetIDsofNames
+HRESULT __stdcall OleAutDispatchImpl_GetIDsOfNames (
+ IDispatch* pDisp,
+ REFIID riid,
+ __in_ecount(cNames) OLECHAR **rgszNames,
+ unsigned int cNames,
+ LCID lcid,
+ DISPID *rgdispid);
+
+// IDispatch::Invoke
+HRESULT __stdcall OleAutDispatchImpl_Invoke (
+ IDispatch* pDisp,
+ DISPID dispidMember,
+ REFIID riid,
+ LCID lcid,
+ unsigned short wFlags,
+ DISPPARAMS *pdispparams,
+ VARIANT *pvarResult,
+ EXCEPINFO *pexcepinfo,
+ unsigned int *puArgErr);
+
+
+
+//------------------------------------------------------------------------------------------
+// IDispatch methods for COM+ objects that use our internal implementation.
+
+
+// IDispatch::GetIDsofNames
+HRESULT __stdcall InternalDispatchImpl_GetIDsOfNames (
+ IDispatch* pDisp,
+ REFIID riid,
+ __in_ecount(cNames) OLECHAR **rgszNames,
+ unsigned int cNames,
+ LCID lcid,
+ DISPID *rgdispid);
+
+// IDispatch::Invoke
+HRESULT __stdcall InternalDispatchImpl_Invoke (
+ IDispatch* pDisp,
+ DISPID dispidMember,
+ REFIID riid,
+ LCID lcid,
+ unsigned short wFlags,
+ DISPPARAMS *pdispparams,
+ VARIANT *pvarResult,
+ EXCEPINFO *pexcepinfo,
+ unsigned int *puArgErr);
+
+
+//------------------------------------------------------------------------------------------
+// IDispatchEx methods for COM+ objects
+
+
+// IDispatchEx::GetTypeInfoCount
+HRESULT __stdcall DispatchEx_GetTypeInfoCount (
+ IDispatch* pDisp,
+ unsigned int *pctinfo);
+
+
+// IDispatchEx::GetTypeInfo
+HRESULT __stdcall DispatchEx_GetTypeInfo (
+ IDispatch* pDisp,
+ unsigned int itinfo,
+ LCID lcid,
+ ITypeInfo **pptinfo);
+
+// IDispatchEx::GetIDsofNames
+HRESULT __stdcall DispatchEx_GetIDsOfNames (
+ IDispatchEx* pDisp,
+ REFIID riid,
+ __in_ecount(cNames) OLECHAR **rgszNames,
+ unsigned int cNames,
+ LCID lcid,
+ DISPID *rgdispid);
+
+// IDispatchEx::Invoke
+HRESULT __stdcall DispatchEx_Invoke (
+ IDispatchEx* pDisp,
+ DISPID dispidMember,
+ REFIID riid,
+ LCID lcid,
+ unsigned short wFlags,
+ DISPPARAMS *pdispparams,
+ VARIANT *pvarResult,
+ EXCEPINFO *pexcepinfo,
+ unsigned int *puArgErr);
+
+// IDispatchEx::DeleteMemberByDispID
+HRESULT __stdcall DispatchEx_DeleteMemberByDispID (
+ IDispatchEx* pDisp,
+ DISPID id);
+
+// IDispatchEx::DeleteMemberByName
+HRESULT __stdcall DispatchEx_DeleteMemberByName (
+ IDispatchEx* pDisp,
+ BSTR bstrName,
+ DWORD grfdex);
+
+// IDispatchEx::GetDispID
+HRESULT __stdcall DispatchEx_GetDispID (
+ IDispatchEx* pDisp,
+ BSTR bstrName,
+ DWORD grfdex,
+ DISPID *pid);
+
+// IDispatchEx::GetMemberName
+HRESULT __stdcall DispatchEx_GetMemberName (
+ IDispatchEx* pDisp,
+ DISPID id,
+ BSTR *pbstrName);
+
+// IDispatchEx::GetMemberProperties
+HRESULT __stdcall DispatchEx_GetMemberProperties (
+ IDispatchEx* pDisp,
+ DISPID id,
+ DWORD grfdexFetch,
+ DWORD *pgrfdex);
+
+// IDispatchEx::GetNameSpaceParent
+HRESULT __stdcall DispatchEx_GetNameSpaceParent (
+ IDispatchEx* pDisp,
+ IUnknown **ppunk);
+
+// IDispatchEx::GetNextDispID
+HRESULT __stdcall DispatchEx_GetNextDispID (
+ IDispatchEx* pDisp,
+ DWORD grfdex,
+ DISPID id,
+ DISPID *pid);
+
+// IDispatchEx::InvokeEx
+HRESULT __stdcall DispatchEx_InvokeEx (
+ IDispatchEx* pDisp,
+ DISPID id,
+ LCID lcid,
+ WORD wFlags,
+ DISPPARAMS *pdp,
+ VARIANT *pVarRes,
+ EXCEPINFO *pei,
+ IServiceProvider *pspCaller);
+
+//------------------------------------------------------------------------------------------
+// IInspectable methods for managed objects
+
+// IInspectable::GetIIDs
+HRESULT __stdcall Inspectable_GetIIDs (
+ IInspectable *pInsp,
+ ULONG *iidCount,
+ IID **iids);
+
+HRESULT __stdcall Inspectable_GetRuntimeClassName (
+ IInspectable *pInsp,
+ HSTRING *className);
+
+//------------------------------------------------------------------------------------------
+// IWeakReferenceSource methods for managed objects
+
+// IWeakReferenceSource::GetWeakReference
+HRESULT __stdcall WeakReferenceSource_GetWeakReference (
+ IWeakReferenceSource *pRefSrc,
+ IWeakReference **weakReference);
+
+//------------------------------------------------------------------------------------------
+// ICustomPropertyProvider methods for Jupiter data binding
+HRESULT __stdcall ICustomPropertyProvider_GetProperty(IUnknown *pPropertyProvider,
+ HSTRING hstrName,
+ /* [out, retval] */ IUnknown **ppProperty);
+
+HRESULT __stdcall ICustomPropertyProvider_GetIndexedProperty(IUnknown *pPropertyProvider,
+ HSTRING hstrName,
+ TypeNameNative indexedParamType,
+ /* [out, retval] */ IUnknown **ppProperty);
+
+HRESULT __stdcall ICustomPropertyProvider_GetStringRepresentation(IUnknown *pPropertyProvider,
+ /* [out, retval] */ HSTRING *phstrStringRepresentation);
+HRESULT __stdcall ICustomPropertyProvider_GetType(IUnknown *pPropertyProvider,
+ /* [out, retval] */ TypeNameNative *pTypeIdentifier);
+
+HRESULT __stdcall IStringable_ToString(IUnknown* pStringable,
+ /* [out, retval] */ HSTRING* pResult);
+
+//------------------------------------------------------------------------------------------
+// IMarshal methods for COM+ objects
+
+HRESULT __stdcall Marshal_GetUnmarshalClass (
+ IMarshal* pMarsh,
+ REFIID riid, void * pv, ULONG dwDestContext,
+ void * pvDestContext, ULONG mshlflags,
+ LPCLSID pclsid);
+
+HRESULT __stdcall Marshal_GetMarshalSizeMax (
+ IMarshal* pMarsh,
+ REFIID riid, void * pv, ULONG dwDestContext,
+ void * pvDestContext, ULONG mshlflags,
+ ULONG * pSize);
+
+HRESULT __stdcall Marshal_MarshalInterface (
+ IMarshal* pMarsh,
+ LPSTREAM pStm, REFIID riid, void * pv,
+ ULONG dwDestContext, LPVOID pvDestContext,
+ ULONG mshlflags);
+
+HRESULT __stdcall Marshal_UnmarshalInterface (
+ IMarshal* pMarsh,
+ LPSTREAM pStm, REFIID riid,
+ void ** ppvObj);
+
+HRESULT __stdcall Marshal_ReleaseMarshalData (IMarshal* pMarsh, LPSTREAM pStm);
+
+HRESULT __stdcall Marshal_DisconnectObject (IMarshal* pMarsh, ULONG dwReserved);
+
+
+//------------------------------------------------------------------------------------------
+// IManagedObject methods for COM+ objects
+
+interface IManagedObject;
+
+
+HRESULT __stdcall ManagedObject_RemoteDispatchAutoDone(IManagedObject *pManaged, BSTR bstr,
+ BSTR* pBStrRet);
+
+HRESULT __stdcall ManagedObject_RemoteDispatchNotAutoDone(IManagedObject *pManaged, BSTR bstr,
+ BSTR* pBStrRet);
+
+HRESULT __stdcall ManagedObject_GetObjectIdentity(IManagedObject *pManaged,
+ BSTR* pBSTRGUID, DWORD* pAppDomainID,
+ void** pCCW);
+
+
+HRESULT __stdcall ManagedObject_GetSerializedBuffer(IManagedObject *pManaged,
+ BSTR* pBStr);
+
+//------------------------------------------------------------------------------------------
+// IConnectionPointContainer methods for COM+ objects
+
+interface IEnumConnectionPoints;
+
+HRESULT __stdcall ConnectionPointContainer_EnumConnectionPoints(IUnknown* pUnk,
+ IEnumConnectionPoints **ppEnum);
+
+HRESULT __stdcall ConnectionPointContainer_FindConnectionPoint(IUnknown* pUnk,
+ REFIID riid,
+ IConnectionPoint **ppCP);
+
+//------------------------------------------------------------------------------------------
+// IObjectSafety methods for COM+ objects
+
+interface IObjectSafety;
+
+HRESULT __stdcall ObjectSafety_GetInterfaceSafetyOptions(IUnknown* pUnk,
+ REFIID riid,
+ DWORD *pdwSupportedOptions,
+ DWORD *pdwEnabledOptions);
+
+HRESULT __stdcall ObjectSafety_SetInterfaceSafetyOptions(IUnknown* pUnk,
+ REFIID riid,
+ DWORD dwOptionSetMask,
+ DWORD dwEnabledOptions);
+//-------------------------------------------------------------------------
+// IProvideClassInfo methods
+HRESULT __stdcall ClassInfo_GetClassInfo(IUnknown* pUnk,
+ ITypeInfo** ppTI //Address of output variable that receives the type info.
+ );
+//-------------------------------------------------------------------------
+// ICCW methods
+ULONG __stdcall ICCW_AddRefFromJupiter(IUnknown* pUnk);
+
+ULONG __stdcall ICCW_ReleaseFromJupiter(IUnknown* pUnk);
+
+HRESULT __stdcall ICCW_Peg(IUnknown* pUnk);
+
+HRESULT __stdcall ICCW_Unpeg(IUnknown* pUnk);
+
+
+#endif
diff --git a/src/vm/stdinterfaces_wrapper.cpp b/src/vm/stdinterfaces_wrapper.cpp
new file mode 100644
index 0000000000..697b168856
--- /dev/null
+++ b/src/vm/stdinterfaces_wrapper.cpp
@@ -0,0 +1,3270 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//---------------------------------------------------------------------------------
+// stdinterfaces_wrapper.cpp
+//
+// Defines various standard com interfaces
+
+//---------------------------------------------------------------------------------
+
+
+#include "common.h"
+
+#include <ole2.h>
+#include <guidfromname.h>
+#include <olectl.h>
+#include <objsafe.h> // IID_IObjectSafe
+#include "vars.hpp"
+#include "object.h"
+#include "excep.h"
+#include "frames.h"
+#include "vars.hpp"
+#include "runtimecallablewrapper.h"
+#include "comcallablewrapper.h"
+#include "field.h"
+#include "threads.h"
+#include "interoputil.h"
+#include "tlbexport.h"
+#include "comdelegate.h"
+#include "olevariant.h"
+#include "eeconfig.h"
+#include "typehandle.h"
+#include "posterror.h"
+#include <corerror.h>
+#include <mscoree.h>
+#include "mtx.h"
+#include "cgencpu.h"
+#include "interopconverter.h"
+#include "cominterfacemarshaler.h"
+#include "stdinterfaces.h"
+#include "stdinterfaces_internal.h"
+#include "interoputil.inl"
+
+
+interface IEnumConnectionPoints;
+interface IManagedObject;
+
+// IUnknown is part of IDispatch
+// Common vtables for well-known COM interfaces
+// shared by all COM+ callable wrappers.
+
+// All Com+ created vtables have well known IUnknown methods, which is used to identify
+// the type of the interface
+// For e.g. all com+ created tear-offs have the same QI method in their IUnknown portion
+// Unknown_QueryInterface is the QI method for all the tear-offs created from COM+
+//
+// Tearoff interfaces created for std. interfaces such as IProvideClassInfo, IErrorInfo etc.
+// have the AddRef & Release function point to Unknown_AddRefSpecial & Unknown_ReleaseSpecial
+//
+// Inner unknown, or the original unknown for a wrapper has
+// AddRef & Release point to a Unknown_AddRefInner & Unknown_ReleaseInner
+
+// global inner Unknown vtable
+const StdInterfaceDesc<3> g_InnerUnknown =
+{
+ enum_InnerUnknown,
+ {
+ (UINT_PTR*)Unknown_QueryInterface,
+ (UINT_PTR*)Unknown_AddRefInner, // special addref to distinguish inner unk
+ (UINT_PTR*)Unknown_ReleaseInner, // special release to distinguish inner unknown
+ }
+};
+
+// global IProvideClassInfo vtable
+const StdInterfaceDesc<4> g_IProvideClassInfo =
+{
+ enum_IProvideClassInfo,
+ {
+ (UINT_PTR*)Unknown_QueryInterface, // don't change this
+ (UINT_PTR*)Unknown_AddRefSpecial, // special addref for std. interface
+ (UINT_PTR*)Unknown_ReleaseSpecial, // special release for std. interface
+ (UINT_PTR*)ClassInfo_GetClassInfo_Wrapper // GetClassInfo
+ }
+};
+
+// global IMarshal vtable
+const StdInterfaceDesc<9> g_IMarshal =
+{
+ enum_IMarshal,
+ {
+ (UINT_PTR*)Unknown_QueryInterface,
+ (UINT_PTR*)Unknown_AddRefSpecial,
+ (UINT_PTR*)Unknown_ReleaseSpecial,
+ (UINT_PTR*)Marshal_GetUnmarshalClass_Wrapper,
+ (UINT_PTR*)Marshal_GetMarshalSizeMax_Wrapper,
+ (UINT_PTR*)Marshal_MarshalInterface_Wrapper,
+ (UINT_PTR*)Marshal_UnmarshalInterface_Wrapper,
+ (UINT_PTR*)Marshal_ReleaseMarshalData_Wrapper,
+ (UINT_PTR*)Marshal_DisconnectObject_Wrapper
+ }
+};
+
+// global ISupportsErrorInfo vtable
+const StdInterfaceDesc<4> g_ISupportsErrorInfo =
+{
+ enum_ISupportsErrorInfo,
+ {
+ (UINT_PTR*)Unknown_QueryInterface,
+ (UINT_PTR*)Unknown_AddRefSpecial,
+ (UINT_PTR*)Unknown_ReleaseSpecial,
+ (UINT_PTR*)SupportsErroInfo_IntfSupportsErrorInfo_Wrapper
+ }
+};
+
+// global IErrorInfo vtable
+const StdInterfaceDesc<8> g_IErrorInfo =
+{
+ enum_IErrorInfo,
+ {
+ (UINT_PTR*)Unknown_QueryInterface_IErrorInfo,
+ (UINT_PTR*)Unknown_AddRefSpecial,
+ (UINT_PTR*)Unknown_ReleaseSpecial_IErrorInfo,
+ (UINT_PTR*)ErrorInfo_GetGUID_Wrapper,
+ (UINT_PTR*)ErrorInfo_GetSource_Wrapper,
+ (UINT_PTR*)ErrorInfo_GetDescription_Wrapper,
+ (UINT_PTR*)ErrorInfo_GetHelpFile_Wrapper,
+ (UINT_PTR*)ErrorInfo_GetHelpContext_Wrapper
+ }
+};
+
+// global IManagedObject vtable
+const StdInterfaceDesc<5> g_IManagedObject =
+{
+ enum_IManagedObject,
+ {
+ (UINT_PTR*)Unknown_QueryInterface,
+ (UINT_PTR*)Unknown_AddRefSpecial,
+ (UINT_PTR*)Unknown_ReleaseSpecial,
+ (UINT_PTR*)ManagedObject_GetSerializedBuffer_Wrapper,
+ (UINT_PTR*)ManagedObject_GetObjectIdentity_Wrapper
+ }
+};
+
+// global IConnectionPointContainer vtable
+const StdInterfaceDesc<5> g_IConnectionPointContainer =
+{
+ enum_IConnectionPointContainer,
+ {
+ (UINT_PTR*)Unknown_QueryInterface,
+ (UINT_PTR*)Unknown_AddRefSpecial,
+ (UINT_PTR*)Unknown_ReleaseSpecial,
+ (UINT_PTR*)ConnectionPointContainer_EnumConnectionPoints_Wrapper,
+ (UINT_PTR*)ConnectionPointContainer_FindConnectionPoint_Wrapper
+ }
+};
+
+// global IObjectSafety vtable
+const StdInterfaceDesc<5> g_IObjectSafety =
+{
+ enum_IObjectSafety,
+ {
+ (UINT_PTR*)Unknown_QueryInterface,
+ (UINT_PTR*)Unknown_AddRefSpecial,
+ (UINT_PTR*)Unknown_ReleaseSpecial,
+ (UINT_PTR*)ObjectSafety_GetInterfaceSafetyOptions_Wrapper,
+ (UINT_PTR*)ObjectSafety_SetInterfaceSafetyOptions_Wrapper
+ }
+};
+
+// global IDispatchEx vtable
+const StdInterfaceDesc<15> g_IDispatchEx =
+{
+ enum_IDispatchEx,
+ {
+ (UINT_PTR*)Unknown_QueryInterface,
+ (UINT_PTR*)Unknown_AddRefSpecial,
+ (UINT_PTR*)Unknown_ReleaseSpecial,
+ (UINT_PTR*)DispatchEx_GetTypeInfoCount_Wrapper,
+ (UINT_PTR*)DispatchEx_GetTypeInfo_Wrapper,
+ (UINT_PTR*)DispatchEx_GetIDsOfNames_Wrapper,
+ (UINT_PTR*)DispatchEx_Invoke_Wrapper,
+ (UINT_PTR*)DispatchEx_GetDispID_Wrapper,
+ (UINT_PTR*)DispatchEx_InvokeEx_Wrapper,
+ (UINT_PTR*)DispatchEx_DeleteMemberByName_Wrapper,
+ (UINT_PTR*)DispatchEx_DeleteMemberByDispID_Wrapper,
+ (UINT_PTR*)DispatchEx_GetMemberProperties_Wrapper,
+ (UINT_PTR*)DispatchEx_GetMemberName_Wrapper,
+ (UINT_PTR*)DispatchEx_GetNextDispID_Wrapper,
+ (UINT_PTR*)DispatchEx_GetNameSpaceParent_Wrapper
+ }
+};
+
+// global IWeakReferenceSource vtable
+const StdInterfaceDesc<4> g_IWeakReferenceSource =
+{
+ enum_IWeakReferenceSource,
+ {
+ (UINT_PTR*)Unknown_QueryInterface,
+ (UINT_PTR*)Unknown_AddRefSpecial,
+ (UINT_PTR*)Unknown_ReleaseSpecial,
+ (UINT_PTR*)WeakReferenceSource_GetWeakReference_Wrapper
+ }
+};
+
+// global ICustomPropertyProvider vtable
+const StdInterfaceDesc<10> g_ICustomPropertyProvider =
+{
+ enum_ICustomPropertyProvider,
+ {
+ (UINT_PTR*)Unknown_QueryInterface,
+ (UINT_PTR*)Unknown_AddRefSpecial,
+ (UINT_PTR*)Unknown_ReleaseSpecial,
+ (UINT_PTR*)Inspectable_GetIIDs_Wrapper,
+ (UINT_PTR*)Inspectable_GetRuntimeClassName_Wrapper,
+ (UINT_PTR*)Inspectable_GetTrustLevel_Wrapper,
+ (UINT_PTR*)ICustomPropertyProvider_GetProperty_Wrapper,
+ (UINT_PTR*)ICustomPropertyProvider_GetIndexedProperty_Wrapper,
+ (UINT_PTR*)ICustomPropertyProvider_GetStringRepresentation_Wrapper,
+ (UINT_PTR*)ICustomPropertyProvider_GetType_Wrapper
+ }
+};
+
+// global ICCW vtable
+const StdInterfaceDesc<7> g_ICCW =
+{
+ enum_ICCW,
+ {
+ (UINT_PTR*)Unknown_QueryInterface_ICCW,
+ (UINT_PTR*)Unknown_AddRefSpecial,
+ (UINT_PTR*)Unknown_ReleaseSpecial,
+ (UINT_PTR*)ICCW_AddRefFromJupiter_Wrapper,
+ (UINT_PTR*)ICCW_ReleaseFromJupiter_Wrapper,
+ (UINT_PTR*)ICCW_Peg_Wrapper,
+ (UINT_PTR*)ICCW_Unpeg_Wrapper
+ }
+};
+
+// global IAgileObject vtable
+const StdInterfaceDesc<3> g_IAgileObject =
+{
+ enum_IAgileObject,
+ {
+ (UINT_PTR*)Unknown_QueryInterface,
+ (UINT_PTR*)Unknown_AddRefSpecial,
+ (UINT_PTR*)Unknown_ReleaseSpecial
+ }
+};
+
+// global IStringable vtable
+const StdInterfaceDesc<7> g_IStringable =
+{
+ enum_IStringable,
+ {
+ (UINT_PTR*)Unknown_QueryInterface,
+ (UINT_PTR*)Unknown_AddRefSpecial,
+ (UINT_PTR*)Unknown_ReleaseSpecial,
+ (UINT_PTR*)Inspectable_GetIIDs_Wrapper,
+ (UINT_PTR*)Inspectable_GetRuntimeClassName_Wrapper,
+ (UINT_PTR*)Inspectable_GetTrustLevel_Wrapper,
+ (UINT_PTR*)IStringable_ToString_Wrapper
+ }
+};
+
+
+// Generic helper to check if AppDomain matches and perform a DoCallBack otherwise
+inline BOOL IsCurrentDomainValid(ComCallWrapper* pWrap, Thread* pThread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pWrap));
+ PRECONDITION(CheckPointer(pThread));
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pWrap != NULL);
+ PREFIX_ASSUME(pWrap != NULL);
+
+ // If we are finalizing all alive objects, or after this stage, we do not allow
+ // a thread to enter EE.
+ if ((g_fEEShutDown & ShutDown_Finalize2) || g_fForbidEnterEE)
+ return FALSE;
+
+ return (!pWrap->NeedToSwitchDomains(pThread));
+}
+
+BOOL IsCurrentDomainValid(ComCallWrapper* pWrap)
+{
+ CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; SO_TOLERANT; } CONTRACTL_END;
+
+ return IsCurrentDomainValid(pWrap, GetThread());
+}
+
+struct AppDomainSwitchToPreemptiveHelperArgs
+{
+ Context::ADCallBackFcnType pRealCallback;
+ void* pRealArgs;
+};
+
+VOID __stdcall AppDomainSwitchToPreemptiveHelper(LPVOID pv)
+{
+ AppDomainSwitchToPreemptiveHelperArgs* pArgs = (AppDomainSwitchToPreemptiveHelperArgs*)pv;
+
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pv));
+
+ VOID __stdcall Dispatch_Invoke_CallBack(LPVOID ptr);
+ if (pArgs->pRealCallback == Dispatch_Invoke_CallBack) THROWS; else NOTHROW;
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+ pArgs->pRealCallback(pArgs->pRealArgs);
+}
+
+VOID AppDomainDoCallBack(ComCallWrapper* pWrap, Context::ADCallBackFcnType pTarget, LPVOID pArgs, HRESULT* phr)
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pWrap));
+ PRECONDITION(CheckPointer(pTarget));
+ PRECONDITION(CheckPointer(pArgs));
+ PRECONDITION(CheckPointer(phr));
+ }
+ CONTRACTL_END;
+
+ // If we are finalizing all alive objects, or after this stage, we do not allow
+ // a thread to enter EE.
+ if ((g_fEEShutDown & ShutDown_Finalize2) || g_fForbidEnterEE)
+ {
+ *phr = E_FAIL;
+ return;
+ }
+
+ BEGIN_EXTERNAL_ENTRYPOINT(phr)
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+ Thread *pThread = GET_THREAD();
+
+ ADID targetADID;
+ Context *pTargetContext;
+ if (pWrap->NeedToSwitchDomains(pThread, &targetADID, &pTargetContext))
+ {
+ // call ourselves again through DoCallBack with a domain transition.
+ // We need to switch back to preemptive GC mode before we call the
+ // real target method.
+ AppDomainSwitchToPreemptiveHelperArgs args = {(Context::ADCallBackFcnType)pTarget, pArgs};
+ pThread->DoContextCallBack(targetADID, pTargetContext, AppDomainSwitchToPreemptiveHelper, &args);
+ }
+ else
+ {
+ // make the call directly not forgetting to switch to preemptive GC mode
+ GCX_PREEMP();
+ ((Context::ADCallBackFcnType)pTarget)(pArgs);
+ }
+ }
+ END_EXTERNAL_ENTRYPOINT;
+}
+
+//-------------------------------------------------------------------------
+// IUnknown methods
+
+struct QIArgs
+{
+ ComCallWrapper* pWrap;
+ IUnknown* pUnk;
+ const IID* riid;
+ void** ppv;
+ HRESULT* hr;
+};
+
+VOID __stdcall Unknown_QueryInterface_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ QIArgs* pArgs = (QIArgs*)ptr;
+
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = Unknown_QueryInterface_Internal(pArgs->pWrap, pArgs->pUnk, *pArgs->riid, pArgs->ppv);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, Unknown_QueryInterface_CallBack, pArgs, pArgs->hr);;
+ }
+}
+
+HRESULT __stdcall Unknown_QueryInterface(IUnknown* pUnk, REFIID riid, void** ppv)
+{
+ SetupThreadForComCall(E_OUTOFMEMORY);
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(CheckPointer(ppv, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pUnk);
+ if (IsCurrentDomainValid(pWrap, GET_THREAD()))
+ {
+ return Unknown_QueryInterface_Internal(pWrap, pUnk, riid, ppv);
+ }
+ else
+ {
+ HRESULT hr = S_OK;
+ QIArgs args = {pWrap, pUnk, &riid, ppv, &hr};
+ Unknown_QueryInterface_CallBack(&args);
+ return hr;
+ }
+}
+
+//
+// Non crashing version of Unknown_QueryInterface for ICCW
+// This will either succeed and return a valid AddRef-ed interface pointer, or
+// fail with COR_E_ACCESS_CCW
+//
+HRESULT __stdcall Unknown_QueryInterface_ICCW(IUnknown *pUnk, REFIID riid, void **ppv)
+{
+ SetupThreadForComCall(E_OUTOFMEMORY);
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(CheckPointer(ppv, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pUnk);
+
+ //
+ // AddRef to keep the object alive
+ // AddRef is "safe" at this point because if it is a CCW with outstanding Jupiter Ref,
+ // we know for sure the CCW is no claimed yet (but the object itself could be)
+ //
+ pWrap->AddRef();
+
+ CCWHolder pCCW = pWrap;
+
+ //
+ // Check if the object is still alive, if it is, it'll be kept alive until we release the extra
+ // ref in CCWHolder
+ // We need to do this in COOP mode to make sure next GC will observe the AddRef change on RefCountHandle
+ // and avoids a race that GC could in the middle of NULL-ing out the RefCountHandle
+ //
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+
+ SimpleComCallWrapper *pSimpleWrap = pWrap->GetSimpleWrapper();
+
+ AppDomainFromIDHolder ad((ADID)pSimpleWrap->GetRawDomainID(), TRUE);
+
+ if (ad.IsUnloaded() || ad->IsUnloading())
+ return COR_E_APPDOMAINUNLOADED;
+
+ //
+ // For CCWs that have outstanding Jupiter-reference, they could be either:
+ // 1. Neutered - in this case it is unsafe to touch m_ppThis
+ // 2. RefCounted handle NULLed out by GC
+ //
+ if (pWrap->GetSimpleWrapper()->IsNeutered() ||
+ pWrap->GetObjectRef() == NULL)
+ {
+ // In those cases, it is unsafe to proceed with a QueryInterface call
+ return COR_E_ACCESSING_CCW;
+ }
+ }
+
+ //
+ // OK. Now the CCW is going to be alive until the end of this scope. We can go ahead and do the
+ // QI now
+ //
+ if (IsCurrentDomainValid(pWrap, GET_THREAD()))
+ {
+ return Unknown_QueryInterface_Internal(pWrap, pUnk, riid, ppv);
+ }
+ else
+ {
+ HRESULT hr = S_OK;
+ QIArgs args = {pWrap, pUnk, &riid, ppv, &hr};
+ Unknown_QueryInterface_CallBack(&args);
+ return hr;
+ }
+}
+
+struct AddRefReleaseArgs
+{
+ IUnknown* pUnk;
+ ULONG* pLong;
+ HRESULT* hr;
+};
+
+#ifdef MDA_SUPPORTED
+VOID __stdcall DirtyCast_Assert(IUnknown* pUnk)
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(!"The native code calling into the CLR has performed an illegal dirty cast on this IUnknown or IDispatch pointer. "
+ "The caller neglected to QI for the correct interface before making this call. This is not a CLR bug. "
+ "A bug should be filed against the native caller.");
+
+ MDA_TRIGGER_ASSISTANT(DirtyCastAndCallOnInterface, ReportViolation(pUnk));
+}
+#endif
+
+ULONG __stdcall Unknown_AddRef(IUnknown* pUnk)
+{
+ // Ensure the Thread is available for contracts and other users of the Thread, but don't do any of
+ // the other "entering managed code" work like going to SO_INTOLERANT or checking for reentrancy.
+ // We don't really need to "enter" the runtime to do an interlocked increment on a refcount, so
+ // all of that stuff should be isolated to rare paths here.
+ SetupThreadForComCall(-1);
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ ENTRY_POINT; // implies SO_TOLERANT
+ }
+ CONTRACTL_END;
+
+ // Allow addrefs to go through, coz we are allowing
+ // all releases to go through, otherwise we would
+ // have a mismatch of ref-counts
+ return Unknown_AddRef_Internal(pUnk);
+}
+
+ULONG __stdcall Unknown_Release(IUnknown* pUnk)
+{
+ // Ensure the Thread is available for contracts and other users of the Thread, but don't do any of
+ // the other "entering managed code" work like going to SO_INTOLERANT or checking for reentrancy.
+ // We don't really need to "enter" the runtime to do an interlocked decrement on a refcount, so
+ // all of that stuff should be isolated to rare paths here.
+ SetupThreadForComCall(-1);
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ ENTRY_POINT; // implies SO_TOLERANT
+ }
+ CONTRACTL_END;
+
+ // Don't switch domains since we need to allow release calls to go through
+ // even after the AD has been unlaoded. Furthermore release doesn't require
+ // us to transition into the domain to work properly.
+ return Unknown_Release_Internal(pUnk);
+}
+
+ULONG __stdcall Unknown_AddRefInner(IUnknown* pUnk)
+{
+ // Ensure the Thread is available for contracts and other users of the Thread, but don't do any of
+ // the other "entering managed code" work like going to SO_INTOLERANT or checking for reentrancy.
+ // We don't really need to "enter" the runtime to do an interlocked increment on a refcount, so
+ // all of that stuff should be isolated to rare paths here.
+ SetupThreadForComCall(-1);
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ ENTRY_POINT; // implies SO_TOLERANT
+ }
+ CONTRACTL_END;
+
+ // Allow addrefs to go through, coz we are allowing
+ // all releases to go through, otherwise we would
+ // have a mismatch of ref-counts
+ return Unknown_AddRefInner_Internal(pUnk);
+}
+
+ULONG __stdcall Unknown_ReleaseInner(IUnknown* pUnk)
+{
+ // Ensure the Thread is available for contracts and other users of the Thread, but don't do any of
+ // the other "entering managed code" work like going to SO_INTOLERANT or checking for reentrancy.
+ // We don't really need to "enter" the runtime to do an interlocked decrement on a refcount, so
+ // all of that stuff should be isolated to rare paths here.
+ SetupThreadForComCall(-1);
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ ENTRY_POINT; // implies SO_TOLERANT
+ }
+ CONTRACTL_END;
+
+ // Don't switch domains since we need to allow release calls to go through
+ // even after the AD has been unlaoded. Furthermore release doesn't require
+ // us to transition into the domain to work properly.
+ return Unknown_ReleaseInner_Internal(pUnk);
+}
+
+ULONG __stdcall Unknown_AddRefSpecial(IUnknown* pUnk)
+{
+ // Ensure the Thread is available for contracts and other users of the Thread, but don't do any of
+ // the other "entering managed code" work like going to SO_INTOLERANT or checking for reentrancy.
+ // We don't really need to "enter" the runtime to do an interlocked increment on a refcount, so
+ // all of that stuff should be isolated to rare paths here.
+ SetupThreadForComCall(-1);
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ ENTRY_POINT; // implies SO_TOLERANT
+ }
+ CONTRACTL_END;
+
+ // Allow addrefs to go through, coz we are allowing
+ // all releases to go through, otherwise we would
+ // have a mismatch of ref-counts
+ return Unknown_AddRefSpecial_Internal(pUnk);
+}
+
+ULONG __stdcall Unknown_ReleaseSpecial(IUnknown* pUnk)
+{
+ // Ensure the Thread is available for contracts and other users of the Thread, but don't do any of
+ // the other "entering managed code" work like going to SO_INTOLERANT or checking for reentrancy.
+ // We don't really need to "enter" the runtime to do an interlocked decrement on a refcount, so
+ // all of that stuff should be isolated to rare paths here.
+ SetupThreadForComCall(-1);
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ ENTRY_POINT; // implies SO_TOLERANT
+ }
+ CONTRACTL_END;
+
+ // Don't switch domains since we need to allow release calls to go through
+ // even after the AD has been unlaoded. Furthermore release doesn't require
+ // us to transition into the domain to work properly.
+ return Unknown_ReleaseSpecial_Internal(pUnk);
+}
+
+HRESULT __stdcall Unknown_QueryInterface_IErrorInfo(IUnknown* pUnk, REFIID riid, void** ppv)
+{
+ // Special version of SetupForComCallHR that doesn't call
+ // CanRunManagedCode() to avoid firing LoaderLock MDA
+ SetupForComCallHRNoCheckCanRunManagedCode();
+
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ HRESULT hr = S_OK;
+ if (!CanRunManagedCode(LoaderLockCheck::ForCorrectness))
+ {
+ // if we cannot run managed code, do a very simple QI which responds only to IUnknown and IErrorInfo
+ hr = Unknown_QueryInterface_IErrorInfo_Simple(pUnk, riid, ppv);
+
+ if (hr == E_NOINTERFACE)
+ {
+ // make sure that the MDA fires
+ VERIFY(!CanRunManagedCode(LoaderLockCheck::ForMDA));
+ hr = HOST_E_CLRNOTAVAILABLE;
+ }
+ }
+
+ // otherwise do a regular QI
+ return Unknown_QueryInterface(pUnk, riid, ppv);
+}
+
+// ---------------------------------------------------------------------------
+// Release for IErrorInfo that takes into account that this can be called
+// while holding the loader lock
+// ---------------------------------------------------------------------------
+ULONG __stdcall Unknown_ReleaseSpecial_IErrorInfo(IUnknown* pUnk)
+{
+ // Special version of SetupForComCallDWORD that doesn't call
+ // CanRunManagedCode() to avoid firing LoaderLock MDA
+ // No managed code will be executed in this function
+ SetupForComCallDWORDNoCheckCanRunManagedCode();
+
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ // <TODO>Address this violation in context of bug 27409</TODO>
+ CONTRACT_VIOLATION(GCViolation);
+
+ if (!CanRunManagedCode(LoaderLockCheck::None))
+ {
+ // CCW cleanup doesn't run managed code but may trigger operations such as
+ // switching the thread to cooperative mode which is not safe during shutdown.
+ return 0;
+ }
+ else
+ {
+ // Don't switch domains since we need to allow release calls to go through
+ // even after the AD has been unlaoded. Furthermore release doesn't require
+ // us to transition into the domain to work properly.
+ return Unknown_ReleaseSpecial_IErrorInfo_Internal(pUnk);
+ }
+}
+
+
+//-------------------------------------------------------------------------
+// IProvideClassInfo methods
+
+struct GetClassInfoArgs
+{
+ IUnknown* pUnk;
+ ITypeInfo** ppTI; //Address of output variable that receives the type info.
+ HRESULT* hr;
+};
+
+VOID __stdcall ClassInfo_GetClassInfo_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ GetClassInfoArgs* pArgs = (GetClassInfoArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = ClassInfo_GetClassInfo(pArgs->pUnk, pArgs->ppTI);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, ClassInfo_GetClassInfo_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall ClassInfo_GetClassInfo_Wrapper(IUnknown* pUnk, ITypeInfo** ppTI)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(CheckPointer(ppTI, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ GetClassInfoArgs args = {pUnk, ppTI, &hr};
+ ClassInfo_GetClassInfo_CallBack(&args);
+ return hr;
+}
+
+
+// ---------------------------------------------------------------------------
+// Interface ISupportsErrorInfo
+
+struct IntfSupportsErrorInfoArgs
+{
+ IUnknown* pUnk;
+ const IID* riid;
+ HRESULT* hr;
+};
+
+VOID __stdcall SupportsErroInfo_IntfSupportsErrorInfo_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ IntfSupportsErrorInfoArgs* pArgs = (IntfSupportsErrorInfoArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = SupportsErroInfo_IntfSupportsErrorInfo(pArgs->pUnk, *pArgs->riid);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, SupportsErroInfo_IntfSupportsErrorInfo_CallBack, pArgs, pArgs->hr);;
+ }
+}
+
+HRESULT __stdcall
+SupportsErroInfo_IntfSupportsErrorInfo_Wrapper(IUnknown* pUnk, REFIID riid)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pUnk));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ IntfSupportsErrorInfoArgs args = {pUnk, &riid, &hr};
+ SupportsErroInfo_IntfSupportsErrorInfo_CallBack(&args);
+ return hr;
+}
+
+// ---------------------------------------------------------------------------
+// Interface IErrorInfo
+
+struct GetDescriptionArgs
+{
+ IUnknown* pUnk;
+ BSTR* pbstDescription;
+ HRESULT* hr;
+};
+
+VOID __stdcall ErrorInfo_GetDescription_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ GetDescriptionArgs* pArgs = (GetDescriptionArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = ErrorInfo_GetDescription(pArgs->pUnk, pArgs->pbstDescription);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, ErrorInfo_GetDescription_CallBack, pArgs, pArgs->hr);;
+ }
+}
+
+HRESULT __stdcall ErrorInfo_GetDescription_Wrapper(IUnknown* pUnk, BSTR* pbstrDescription)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(CheckPointer(pbstrDescription, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ GetDescriptionArgs args = {pUnk, pbstrDescription, &hr};
+ ErrorInfo_GetDescription_CallBack(&args);
+ return hr;
+}
+
+struct GetGUIDArgs
+{
+ IUnknown* pUnk;
+ GUID* pguid;
+ HRESULT* hr;
+};
+
+VOID __stdcall ErrorInfo_GetGUID_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ GetGUIDArgs* pArgs = (GetGUIDArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = ErrorInfo_GetGUID(pArgs->pUnk, pArgs->pguid);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, ErrorInfo_GetGUID_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall ErrorInfo_GetGUID_Wrapper(IUnknown* pUnk, GUID* pguid)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(CheckPointer(pguid, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ GetGUIDArgs args = {pUnk, pguid, &hr};
+ ErrorInfo_GetGUID_CallBack(&args);
+ return hr;
+}
+
+struct GetHelpContextArgs
+{
+ IUnknown* pUnk;
+ DWORD* pdwHelpCtxt;
+ HRESULT* hr;
+};
+
+VOID _stdcall ErrorInfo_GetHelpContext_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ GetHelpContextArgs* pArgs = (GetHelpContextArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = ErrorInfo_GetHelpContext(pArgs->pUnk, pArgs->pdwHelpCtxt);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, ErrorInfo_GetHelpContext_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT _stdcall ErrorInfo_GetHelpContext_Wrapper(IUnknown* pUnk, DWORD* pdwHelpCtxt)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(CheckPointer(pdwHelpCtxt, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ GetHelpContextArgs args = {pUnk, pdwHelpCtxt, &hr};
+ ErrorInfo_GetHelpContext_CallBack(&args);
+ return hr;
+}
+
+struct GetHelpFileArgs
+{
+ IUnknown* pUnk;
+ BSTR* pbstrHelpFile;
+ HRESULT* hr;
+};
+
+VOID __stdcall ErrorInfo_GetHelpFile_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ GetHelpFileArgs* pArgs = (GetHelpFileArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = ErrorInfo_GetHelpFile(pArgs->pUnk, pArgs->pbstrHelpFile);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, ErrorInfo_GetHelpFile_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall ErrorInfo_GetHelpFile_Wrapper(IUnknown* pUnk, BSTR* pbstrHelpFile)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(CheckPointer(pbstrHelpFile, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ GetHelpFileArgs args = {pUnk, pbstrHelpFile, &hr};
+ ErrorInfo_GetHelpFile_CallBack(&args);
+ return hr;
+}
+
+struct GetSourceArgs
+{
+ IUnknown* pUnk;
+ BSTR* pbstrSource;
+ HRESULT* hr;
+};
+
+VOID __stdcall ErrorInfo_GetSource_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ GetSourceArgs* pArgs = (GetSourceArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = ErrorInfo_GetSource(pArgs->pUnk, pArgs->pbstrSource);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, ErrorInfo_GetSource_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall ErrorInfo_GetSource_Wrapper(IUnknown* pUnk, BSTR* pbstrSource)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(CheckPointer(pbstrSource, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ GetSourceArgs args = {pUnk, pbstrSource, &hr};
+ ErrorInfo_GetSource_CallBack(&args);
+ return hr;
+}
+
+
+// ---------------------------------------------------------------------------
+// Interface IDispatch
+//
+// IDispatch methods for COM+ objects. These methods dispatch's to the
+// appropriate implementation based on the flags of the class that
+// implements them.
+
+struct GetTypeInfoCountArgs
+{
+ IDispatch* pUnk;
+ unsigned int *pctinfo;
+ HRESULT* hr;
+};
+
+VOID __stdcall Dispatch_GetTypeInfoCount_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ GetTypeInfoCountArgs* pArgs = (GetTypeInfoCountArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = Dispatch_GetTypeInfoCount(pArgs->pUnk, pArgs->pctinfo);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, Dispatch_GetTypeInfoCount_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall Dispatch_GetTypeInfoCount_Wrapper(IDispatch* pDisp, unsigned int *pctinfo)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(CheckPointer(pctinfo, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ GetTypeInfoCountArgs args = {pDisp, pctinfo, &hr};
+ Dispatch_GetTypeInfoCount_CallBack(&args);
+ return hr;
+}
+
+struct GetTypeInfoArgs
+{
+ IDispatch* pUnk;
+ unsigned int itinfo;
+ LCID lcid;
+ ITypeInfo **pptinfo;
+ HRESULT* hr;
+};
+
+VOID __stdcall Dispatch_GetTypeInfo_CallBack (LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ GetTypeInfoArgs* pArgs = (GetTypeInfoArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = Dispatch_GetTypeInfo(pArgs->pUnk, pArgs->itinfo, pArgs->lcid, pArgs->pptinfo);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, Dispatch_GetTypeInfo_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall Dispatch_GetTypeInfo_Wrapper(IDispatch* pDisp, unsigned int itinfo, LCID lcid, ITypeInfo **pptinfo)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(CheckPointer(pptinfo, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ GetTypeInfoArgs args = {pDisp, itinfo, lcid, pptinfo, &hr};
+ Dispatch_GetTypeInfo_CallBack(&args);
+ return hr;
+}
+
+struct GetIDsOfNamesArgs
+{
+ IDispatch* pUnk;
+ const IID* riid;
+ OLECHAR **rgszNames;
+ unsigned int cNames;
+ LCID lcid;
+ DISPID *rgdispid;
+ HRESULT* hr;
+};
+
+VOID __stdcall Dispatch_GetIDsOfNames_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ GetIDsOfNamesArgs* pArgs = (GetIDsOfNamesArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = Dispatch_GetIDsOfNames(pArgs->pUnk, *pArgs->riid, pArgs->rgszNames,
+ pArgs->cNames, pArgs->lcid, pArgs->rgdispid);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, Dispatch_GetIDsOfNames_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall Dispatch_GetIDsOfNames_Wrapper(IDispatch* pDisp, REFIID riid, __in_ecount(cNames) OLECHAR **rgszNames,
+ unsigned int cNames, LCID lcid, DISPID *rgdispid)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(CheckPointer(rgszNames, NULL_OK));
+ PRECONDITION(CheckPointer(rgdispid, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ GetIDsOfNamesArgs args = {pDisp, &riid, rgszNames, cNames, lcid, rgdispid, &hr};
+ Dispatch_GetIDsOfNames_CallBack(&args);
+ return hr;
+}
+
+VOID __stdcall InternalDispatchImpl_GetIDsOfNames_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ GetIDsOfNamesArgs* pArgs = (GetIDsOfNamesArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = InternalDispatchImpl_GetIDsOfNames(pArgs->pUnk, *pArgs->riid, pArgs->rgszNames,
+ pArgs->cNames, pArgs->lcid, pArgs->rgdispid);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, InternalDispatchImpl_GetIDsOfNames_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall InternalDispatchImpl_GetIDsOfNames_Wrapper(IDispatch* pDisp, REFIID riid, __in_ecount(cNames) OLECHAR **rgszNames,
+ unsigned int cNames, LCID lcid, DISPID *rgdispid)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(CheckPointer(rgszNames, NULL_OK));
+ PRECONDITION(CheckPointer(rgdispid, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ GetIDsOfNamesArgs args = {pDisp, &riid, rgszNames, cNames, lcid, rgdispid, &hr};
+ InternalDispatchImpl_GetIDsOfNames_CallBack(&args);
+ return hr;
+}
+
+struct InvokeArgs
+{
+ IDispatch* pUnk;
+ DISPID dispidMember;
+ const IID* riid;
+ LCID lcid;
+ unsigned short wFlags;
+ DISPPARAMS *pdispparams;
+ VARIANT *pvarResult;
+ EXCEPINFO *pexcepinfo;
+ unsigned int *puArgErr;
+ HRESULT* hr;
+};
+
+VOID __stdcall Dispatch_Invoke_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ THROWS; // Dispatch_Invoke can throw
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ InvokeArgs* pArgs = (InvokeArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = Dispatch_Invoke(pArgs->pUnk, pArgs->dispidMember, *pArgs->riid,
+ pArgs->lcid, pArgs->wFlags, pArgs->pdispparams, pArgs->pvarResult,
+ pArgs->pexcepinfo, pArgs->puArgErr);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, Dispatch_Invoke_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall Dispatch_Invoke_Wrapper(IDispatch* pDisp, DISPID dispidMember, REFIID riid, LCID lcid, unsigned short wFlags,
+ DISPPARAMS *pdispparams, VARIANT *pvarResult, EXCEPINFO *pexcepinfo, unsigned int *puArgErr)
+{
+ HRESULT hrRetVal = S_OK;
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ // SetupForComCallHR uses "SO_INTOLERANT_CODE_NOTHROW" to setup the SO-Intolerant transition
+ // for COM Interop. However, "SO_INTOLERANT_CODE_NOTHROW" expects that no exception can escape
+ // through this boundary but all it does is (in addition to checking that no exception has escaped it)
+ // do stack probing.
+ //
+ // However, Corrupting Exceptions [CE] can escape the COM Interop boundary. Thus, to address that scenario,
+ // we use the macro below that uses BEGIN_SO_INTOLERANT_CODE_NOTHROW to do the equivalent of
+ // SO_INTOLERANT_CODE_NOTHROW and yet allow for CEs to escape through. Since there will be a corresponding
+ // END_SO_INTOLERANT_CODE, the call is splitted into two parts: the Begin and End (see below).
+ BeginSetupForComCallHRWithEscapingCorruptingExceptions();
+#else // !FEATURE_CORRUPTING_EXCEPTIONS
+ SetupForComCallHR();
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+
+ CONTRACTL
+ {
+ THROWS; // Dispatch_Invoke_CallBack can throw
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(CheckPointer(pdispparams, NULL_OK));
+ PRECONDITION(CheckPointer(pvarResult, NULL_OK));
+ PRECONDITION(CheckPointer(pexcepinfo, NULL_OK));
+ PRECONDITION(CheckPointer(puArgErr, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ InvokeArgs args = {pDisp, dispidMember, &riid, lcid, wFlags, pdispparams,
+ pvarResult, pexcepinfo, puArgErr, &hrRetVal};
+ Dispatch_Invoke_CallBack(&args);
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ EndSetupForComCallHRWithEscapingCorruptingExceptions();
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+ return hrRetVal;
+}
+
+VOID __stdcall InternalDispatchImpl_Invoke_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ InvokeArgs* pArgs = (InvokeArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = InternalDispatchImpl_Invoke(pArgs->pUnk, pArgs->dispidMember, *pArgs->riid,
+ pArgs->lcid, pArgs->wFlags, pArgs->pdispparams, pArgs->pvarResult,
+ pArgs->pexcepinfo, pArgs->puArgErr);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, InternalDispatchImpl_Invoke_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall InternalDispatchImpl_Invoke_Wrapper(IDispatch* pDisp, DISPID dispidMember, REFIID riid, LCID lcid,
+ unsigned short wFlags, DISPPARAMS *pdispparams, VARIANT *pvarResult,
+ EXCEPINFO *pexcepinfo, unsigned int *puArgErr)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(CheckPointer(pdispparams, NULL_OK));
+ PRECONDITION(CheckPointer(pvarResult, NULL_OK));
+ PRECONDITION(CheckPointer(pexcepinfo, NULL_OK));
+ PRECONDITION(CheckPointer(puArgErr, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ InvokeArgs args = {pDisp, dispidMember, &riid, lcid, wFlags, pdispparams,
+ pvarResult, pexcepinfo, puArgErr, &hr};
+ InternalDispatchImpl_Invoke_CallBack(&args);
+ return hr;
+}
+
+// ---------------------------------------------------------------------------
+// Interface IDispatchEx
+
+struct GetTypeInfoCountExArgs
+{
+ IDispatchEx* pUnk;
+ unsigned int *pctinfo;
+ HRESULT* hr;
+};
+
+VOID __stdcall DispatchEx_GetTypeInfoCount_CallBack (LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ GetTypeInfoCountExArgs* pArgs = (GetTypeInfoCountExArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = DispatchEx_GetTypeInfoCount(pArgs->pUnk, pArgs->pctinfo);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, DispatchEx_GetTypeInfoCount_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall DispatchEx_GetTypeInfoCount_Wrapper(IDispatchEx* pDisp, unsigned int *pctinfo)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(CheckPointer(pctinfo, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ GetTypeInfoCountExArgs args = {pDisp, pctinfo, &hr};
+ DispatchEx_GetTypeInfoCount_CallBack(&args);
+ return hr;
+}
+
+struct GetTypeInfoExArgs
+{
+ IDispatch* pUnk;
+ unsigned int itinfo;
+ LCID lcid;
+ ITypeInfo **pptinfo;
+ HRESULT* hr;
+};
+
+VOID __stdcall DispatchEx_GetTypeInfo_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ GetTypeInfoExArgs* pArgs = (GetTypeInfoExArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = DispatchEx_GetTypeInfo(pArgs->pUnk, pArgs->itinfo, pArgs->lcid, pArgs->pptinfo);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, DispatchEx_GetTypeInfo_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall DispatchEx_GetTypeInfo_Wrapper(IDispatchEx* pDisp, unsigned int itinfo, LCID lcid, ITypeInfo **pptinfo)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(CheckPointer(pptinfo, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ GetTypeInfoExArgs args = {pDisp, itinfo, lcid, pptinfo, &hr};
+ DispatchEx_GetTypeInfo_CallBack(&args);
+ return hr;
+}
+
+struct GetIDsOfNamesExArgs
+{
+ IDispatchEx* pUnk;
+ const IID* riid;
+ OLECHAR **rgszNames;
+ unsigned int cNames;
+ LCID lcid;
+ DISPID *rgdispid;
+ HRESULT* hr;
+};
+
+VOID __stdcall DispatchEx_GetIDsOfNames_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ GetIDsOfNamesExArgs* pArgs = (GetIDsOfNamesExArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = DispatchEx_GetIDsOfNames(pArgs->pUnk, *pArgs->riid, pArgs->rgszNames,
+ pArgs->cNames, pArgs->lcid, pArgs->rgdispid);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, DispatchEx_GetIDsOfNames_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall DispatchEx_GetIDsOfNames_Wrapper(IDispatchEx* pDisp, REFIID riid, __in_ecount(cNames) OLECHAR **rgszNames,
+ unsigned int cNames, LCID lcid, DISPID *rgdispid)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(CheckPointer(rgszNames, NULL_OK));
+ PRECONDITION(CheckPointer(rgdispid, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ GetIDsOfNamesExArgs args = {pDisp, &riid, rgszNames, cNames, lcid, rgdispid, &hr};
+ DispatchEx_GetIDsOfNames_CallBack(&args);
+ return hr;
+}
+
+struct DispExInvokeArgs
+{
+ IDispatchEx* pUnk;
+ DISPID dispidMember;
+ const IID* riid;
+ LCID lcid;
+ unsigned short wFlags;
+ DISPPARAMS *pdispparams;
+ VARIANT *pvarResult;
+ EXCEPINFO *pexcepinfo;
+ unsigned int *puArgErr;
+ HRESULT* hr;
+};
+
+VOID __stdcall DispatchEx_Invoke_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ DispExInvokeArgs* pArgs = (DispExInvokeArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = DispatchEx_Invoke(pArgs->pUnk, pArgs->dispidMember, *pArgs->riid,
+ pArgs->lcid, pArgs->wFlags, pArgs->pdispparams, pArgs->pvarResult,
+ pArgs->pexcepinfo, pArgs->puArgErr);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, DispatchEx_Invoke_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall DispatchEx_Invoke_Wrapper(IDispatchEx* pDisp, DISPID dispidMember, REFIID riid, LCID lcid,
+ unsigned short wFlags, DISPPARAMS *pdispparams, VARIANT *pvarResult,
+ EXCEPINFO *pexcepinfo, unsigned int *puArgErr)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(CheckPointer(pdispparams, NULL_OK));
+ PRECONDITION(CheckPointer(pvarResult, NULL_OK));
+ PRECONDITION(CheckPointer(pexcepinfo, NULL_OK));
+ PRECONDITION(CheckPointer(puArgErr, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ DispExInvokeArgs args = {pDisp, dispidMember, &riid, lcid, wFlags, pdispparams,
+ pvarResult, pexcepinfo, puArgErr, &hr};
+ DispatchEx_Invoke_CallBack(&args);
+ return hr;
+}
+
+struct DeleteMemberByDispIDArgs
+{
+ IDispatchEx* pDisp;
+ DISPID id;
+ HRESULT* hr;
+};
+
+VOID __stdcall DispatchEx_DeleteMemberByDispID_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ DeleteMemberByDispIDArgs* pArgs = (DeleteMemberByDispIDArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pDisp);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = DispatchEx_DeleteMemberByDispID(pArgs->pDisp, pArgs->id);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, DispatchEx_DeleteMemberByDispID_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall DispatchEx_DeleteMemberByDispID_Wrapper(IDispatchEx* pDisp, DISPID id)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pDisp));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ DeleteMemberByDispIDArgs args = {pDisp, id, &hr};
+ DispatchEx_DeleteMemberByDispID_CallBack(&args);
+ return hr;
+}
+
+struct DeleteMemberByNameArgs
+{
+ IDispatchEx* pDisp;
+ BSTR bstrName;
+ DWORD grfdex;
+ HRESULT* hr;
+};
+
+VOID __stdcall DispatchEx_DeleteMemberByName_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ DeleteMemberByNameArgs* pArgs = (DeleteMemberByNameArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pDisp);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = DispatchEx_DeleteMemberByName(pArgs->pDisp, pArgs->bstrName, pArgs->grfdex);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, DispatchEx_DeleteMemberByName_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall DispatchEx_DeleteMemberByName_Wrapper(IDispatchEx* pDisp, BSTR bstrName, DWORD grfdex)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pDisp));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ DeleteMemberByNameArgs args = {pDisp, bstrName, grfdex, &hr};
+ DispatchEx_DeleteMemberByName_CallBack(&args);
+ return hr;
+}
+
+struct GetMemberNameArgs
+{
+ IDispatchEx* pDisp;
+ DISPID id;
+ BSTR *pbstrName;
+ HRESULT* hr;
+};
+
+VOID __stdcall DispatchEx_GetMemberName_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ GetMemberNameArgs* pArgs = (GetMemberNameArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pDisp);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = DispatchEx_GetMemberName(pArgs->pDisp, pArgs->id, pArgs->pbstrName);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, DispatchEx_GetMemberName_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall DispatchEx_GetMemberName_Wrapper(IDispatchEx* pDisp, DISPID id, BSTR *pbstrName)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(CheckPointer(pbstrName, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ GetMemberNameArgs args = {pDisp, id, pbstrName, &hr};
+ DispatchEx_GetMemberName_CallBack(&args);
+ return hr;
+}
+
+struct GetDispIDArgs
+{
+ IDispatchEx* pDisp;
+ BSTR bstrName;
+ DWORD grfdex;
+ DISPID *pid;
+ HRESULT* hr;
+};
+
+VOID __stdcall DispatchEx_GetDispID_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ GetDispIDArgs* pArgs = (GetDispIDArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pDisp);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = DispatchEx_GetDispID(pArgs->pDisp, pArgs->bstrName, pArgs->grfdex, pArgs->pid);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, DispatchEx_GetDispID_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall DispatchEx_GetDispID_Wrapper(IDispatchEx* pDisp, BSTR bstrName, DWORD grfdex, DISPID *pid)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(CheckPointer(pid, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ GetDispIDArgs args = {pDisp, bstrName, grfdex, pid, &hr};
+ DispatchEx_GetDispID_CallBack(&args);
+ return hr;
+}
+
+struct GetMemberPropertiesArgs
+{
+ IDispatchEx* pDisp;
+ DISPID id;
+ DWORD grfdexFetch;
+ DWORD *pgrfdex;
+ HRESULT* hr;
+};
+
+VOID __stdcall DispatchEx_GetMemberProperties_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ GetMemberPropertiesArgs* pArgs = (GetMemberPropertiesArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pDisp);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = DispatchEx_GetMemberProperties(pArgs->pDisp, pArgs->id, pArgs->grfdexFetch,
+ pArgs->pgrfdex);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, DispatchEx_GetMemberProperties_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall DispatchEx_GetMemberProperties_Wrapper(IDispatchEx* pDisp, DISPID id, DWORD grfdexFetch, DWORD *pgrfdex)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(CheckPointer(pgrfdex, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ GetMemberPropertiesArgs args = {pDisp, id, grfdexFetch, pgrfdex, &hr};
+ DispatchEx_GetMemberProperties_CallBack(&args);
+ return hr;
+}
+
+struct GetNameSpaceParentArgs
+{
+ IDispatchEx* pDisp;
+ IUnknown **ppunk;
+ HRESULT* hr;
+};
+
+VOID __stdcall DispatchEx_GetNameSpaceParent_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ GetNameSpaceParentArgs* pArgs = (GetNameSpaceParentArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pDisp);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = DispatchEx_GetNameSpaceParent(pArgs->pDisp, pArgs->ppunk);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, DispatchEx_GetNameSpaceParent_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall DispatchEx_GetNameSpaceParent_Wrapper(IDispatchEx* pDisp, IUnknown **ppunk)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(CheckPointer(ppunk, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ GetNameSpaceParentArgs args = {pDisp, ppunk, &hr};
+ DispatchEx_GetNameSpaceParent_CallBack(&args);
+ return hr;
+}
+
+struct GetNextDispIDArgs
+{
+ IDispatchEx* pDisp;
+ DWORD grfdex;
+ DISPID id;
+ DISPID *pid;
+ HRESULT* hr;
+};
+
+VOID __stdcall DispatchEx_GetNextDispID_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ GetNextDispIDArgs* pArgs = (GetNextDispIDArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pDisp);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = DispatchEx_GetNextDispID(pArgs->pDisp, pArgs->grfdex, pArgs->id, pArgs->pid);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, DispatchEx_GetNextDispID_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall DispatchEx_GetNextDispID_Wrapper(IDispatchEx* pDisp, DWORD grfdex, DISPID id, DISPID *pid)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(CheckPointer(pid, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ GetNextDispIDArgs args = {pDisp, grfdex, id, pid, &hr};
+ DispatchEx_GetNextDispID_CallBack(&args);
+ return hr;
+}
+
+struct DispExInvokeExArgs
+{
+ IDispatchEx* pDisp;
+ DISPID id;
+ LCID lcid;
+ WORD wFlags;
+ DISPPARAMS *pdp;
+ VARIANT *pVarRes;
+ EXCEPINFO *pei;
+ IServiceProvider *pspCaller;
+ HRESULT* hr;
+};
+
+VOID __stdcall DispatchEx_InvokeEx_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ DispExInvokeExArgs* pArgs = (DispExInvokeExArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pDisp);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = DispatchEx_InvokeEx(pArgs->pDisp, pArgs->id,
+ pArgs->lcid, pArgs->wFlags, pArgs->pdp, pArgs->pVarRes,
+ pArgs->pei, pArgs->pspCaller);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, DispatchEx_InvokeEx_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall DispatchEx_InvokeEx_Wrapper(IDispatchEx* pDisp, DISPID id, LCID lcid, WORD wFlags, DISPPARAMS *pdp,
+ VARIANT *pVarRes, EXCEPINFO *pei, IServiceProvider *pspCaller)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pDisp));
+ PRECONDITION(CheckPointer(pdp, NULL_OK));
+ PRECONDITION(CheckPointer(pVarRes, NULL_OK));
+ PRECONDITION(CheckPointer(pei, NULL_OK));
+ PRECONDITION(CheckPointer(pspCaller, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ DispExInvokeExArgs args = {pDisp, id, lcid, wFlags, pdp, pVarRes, pei, pspCaller, &hr};
+ DispatchEx_InvokeEx_CallBack(&args);
+ return hr;
+}
+
+// ---------------------------------------------------------------------------
+// Interface IInspectable
+
+struct InspectableGetIIDsArgs
+{
+ IInspectable *pInsp;
+ ULONG *iidCount;
+ IID **iids;
+ HRESULT* hr;
+};
+
+VOID __stdcall Inspectable_GetIIDs_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ InspectableGetIIDsArgs* pArgs = (InspectableGetIIDsArgs *)ptr;
+ ComCallWrapper *pWrap = MapIUnknownToWrapper(pArgs->pInsp);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = Inspectable_GetIIDs(pArgs->pInsp, pArgs->iidCount, pArgs->iids);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, Inspectable_GetIIDs_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall Inspectable_GetIIDs_Wrapper(IInspectable *pInsp, ULONG *iidCount, IID **iids)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pInsp));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ InspectableGetIIDsArgs args = {pInsp, iidCount, iids, &hr};
+ Inspectable_GetIIDs_CallBack(&args);
+ return hr;
+}
+
+struct InspectableGetRuntimeClassNameArgs
+{
+ IInspectable *pInsp;
+ HSTRING *className;
+ HRESULT* hr;
+};
+
+VOID Inspectable_GetRuntimeClassName_Callback(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ InspectableGetRuntimeClassNameArgs* pArgs = (InspectableGetRuntimeClassNameArgs *)ptr;
+ ComCallWrapper *pWrap = MapIUnknownToWrapper(pArgs->pInsp);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = Inspectable_GetRuntimeClassName(pArgs->pInsp, pArgs->className);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, Inspectable_GetRuntimeClassName_Callback, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall Inspectable_GetRuntimeClassName_Wrapper(IInspectable *pInsp, HSTRING *className)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pInsp));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ InspectableGetRuntimeClassNameArgs args = { pInsp, className, &hr };
+ Inspectable_GetRuntimeClassName_Callback(&args);
+
+ return hr;
+}
+
+HRESULT __stdcall Inspectable_GetTrustLevel_Wrapper(IInspectable *pInsp, TrustLevel *trustLevel)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pInsp));
+ }
+ CONTRACTL_END;
+
+ if (trustLevel == NULL)
+ return E_POINTER;
+
+ // all managed WinRT objects are BaseTrust for now
+ *trustLevel = BaseTrust;
+ return S_OK;
+}
+
+// ---------------------------------------------------------------------------
+// Interface IWeakReferenceSource
+
+struct GetWeakReferenceArgs
+{
+ IWeakReferenceSource *pRefSrc;
+ IWeakReference **weakReference;
+ HRESULT* hr;
+};
+
+VOID __stdcall WeakReferenceSource_GetWeakReference_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ GetWeakReferenceArgs* pArgs = (GetWeakReferenceArgs *)ptr;
+ ComCallWrapper *pWrap = MapIUnknownToWrapper(pArgs->pRefSrc);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = WeakReferenceSource_GetWeakReference(pArgs->pRefSrc, pArgs->weakReference);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, WeakReferenceSource_GetWeakReference_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall WeakReferenceSource_GetWeakReference_Wrapper(IWeakReferenceSource *pRefSrc, IWeakReference **weakReference)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pRefSrc));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ GetWeakReferenceArgs args = {pRefSrc, weakReference, &hr};
+ WeakReferenceSource_GetWeakReference_CallBack(&args);
+ return hr;
+}
+
+// ---------------------------------------------------------------------------
+// Interface IMarshal
+
+struct GetUnmarshalClassArgs
+{
+ IMarshal* pUnk;
+ const IID* riid;
+ void * pv;
+ ULONG dwDestContext;
+ void * pvDestContext;
+ ULONG mshlflags;
+ LPCLSID pclsid;
+ HRESULT* hr;
+
+};
+
+VOID __stdcall Marshal_GetUnmarshalClass_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ GetUnmarshalClassArgs* pArgs = (GetUnmarshalClassArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = Marshal_GetUnmarshalClass(pArgs->pUnk, *(pArgs->riid), pArgs->pv,
+ pArgs->dwDestContext, pArgs->pvDestContext, pArgs->mshlflags,
+ pArgs->pclsid);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, Marshal_GetUnmarshalClass_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall Marshal_GetUnmarshalClass_Wrapper(IMarshal* pMarsh, REFIID riid, void * pv, ULONG dwDestContext,
+ void * pvDestContext, ULONG mshlflags, LPCLSID pclsid)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pMarsh));
+ PRECONDITION(CheckPointer(pv, NULL_OK));
+ PRECONDITION(CheckPointer(pvDestContext, NULL_OK));
+ PRECONDITION(CheckPointer(pclsid, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ GetUnmarshalClassArgs args = {pMarsh, &riid, pv, dwDestContext, pvDestContext,
+ mshlflags, pclsid, &hr};
+ Marshal_GetUnmarshalClass_CallBack(&args);
+ return hr;
+}
+
+struct GetMarshalSizeMaxArgs
+{
+ IMarshal* pUnk;
+ const IID* riid;
+ void * pv;
+ ULONG dwDestContext;
+ void * pvDestContext;
+ ULONG mshlflags;
+ ULONG * pSize;
+ HRESULT* hr;
+
+};
+
+VOID __stdcall Marshal_GetMarshalSizeMax_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ GetMarshalSizeMaxArgs* pArgs = (GetMarshalSizeMaxArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = Marshal_GetMarshalSizeMax(pArgs->pUnk, *(pArgs->riid), pArgs->pv,
+ pArgs->dwDestContext, pArgs->pvDestContext, pArgs->mshlflags,
+ pArgs->pSize);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, Marshal_GetMarshalSizeMax_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+
+HRESULT __stdcall Marshal_GetMarshalSizeMax_Wrapper(IMarshal* pMarsh, REFIID riid, void * pv, ULONG dwDestContext,
+ void * pvDestContext, ULONG mshlflags, ULONG * pSize)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pMarsh));
+ PRECONDITION(CheckPointer(pv, NULL_OK));
+ PRECONDITION(CheckPointer(pvDestContext, NULL_OK));
+ PRECONDITION(CheckPointer(pSize, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ GetMarshalSizeMaxArgs args = {pMarsh, &riid, pv, dwDestContext, pvDestContext,
+ mshlflags, pSize, &hr};
+ Marshal_GetMarshalSizeMax_CallBack(&args);
+ return hr;
+}
+
+struct MarshalInterfaceArgs
+{
+ IMarshal* pUnk;
+ LPSTREAM pStm;
+ const IID* riid;
+ void * pv;
+ ULONG dwDestContext;
+ void * pvDestContext;
+ ULONG mshlflags;
+ HRESULT* hr;
+
+};
+
+VOID __stdcall Marshal_MarshalInterface_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ MarshalInterfaceArgs* pArgs = (MarshalInterfaceArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = Marshal_MarshalInterface(pArgs->pUnk, pArgs->pStm, *(pArgs->riid), pArgs->pv,
+ pArgs->dwDestContext, pArgs->pvDestContext, pArgs->mshlflags);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, Marshal_MarshalInterface_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall Marshal_MarshalInterface_Wrapper(IMarshal* pMarsh, LPSTREAM pStm, REFIID riid, void * pv,
+ ULONG dwDestContext, LPVOID pvDestContext, ULONG mshlflags)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pMarsh));
+ PRECONDITION(CheckPointer(pv, NULL_OK));
+ PRECONDITION(CheckPointer(pvDestContext, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ MarshalInterfaceArgs args = {pMarsh, pStm, &riid, pv, dwDestContext, pvDestContext,
+ mshlflags, &hr};
+ Marshal_MarshalInterface_CallBack(&args);
+ return hr;
+}
+
+struct UnmarshalInterfaceArgs
+{
+ IMarshal* pUnk;
+ LPSTREAM pStm;
+ const IID* riid;
+ void ** ppvObj;
+ HRESULT* hr;
+
+};
+
+VOID __stdcall Marshal_UnmarshalInterface_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ UnmarshalInterfaceArgs* pArgs = (UnmarshalInterfaceArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = Marshal_UnmarshalInterface(pArgs->pUnk, pArgs->pStm, *(pArgs->riid), pArgs->ppvObj);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, Marshal_UnmarshalInterface_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall Marshal_UnmarshalInterface_Wrapper(IMarshal* pMarsh, LPSTREAM pStm, REFIID riid, void ** ppvObj)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pMarsh));
+ PRECONDITION(CheckPointer(pStm, NULL_OK));
+ PRECONDITION(CheckPointer(ppvObj, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ UnmarshalInterfaceArgs args = {pMarsh, pStm, &riid, ppvObj, &hr};
+ Marshal_UnmarshalInterface_CallBack(&args);
+ return hr;
+}
+
+struct ReleaseMarshalDataArgs
+{
+ IMarshal* pUnk;
+ LPSTREAM pStm;
+ HRESULT* hr;
+
+};
+
+VOID __stdcall Marshal_ReleaseMarshalData_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ ReleaseMarshalDataArgs* pArgs = (ReleaseMarshalDataArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = Marshal_ReleaseMarshalData(pArgs->pUnk, pArgs->pStm);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, Marshal_ReleaseMarshalData_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall Marshal_ReleaseMarshalData_Wrapper(IMarshal* pMarsh, LPSTREAM pStm)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pMarsh));
+ PRECONDITION(CheckPointer(pStm, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ ReleaseMarshalDataArgs args = {pMarsh, pStm, &hr};
+ Marshal_ReleaseMarshalData_CallBack(&args);
+ return hr;
+}
+
+struct DisconnectObjectArgs
+{
+ IMarshal* pUnk;
+ ULONG dwReserved;
+ HRESULT* hr;
+};
+
+VOID __stdcall Marshal_DisconnectObject_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ DisconnectObjectArgs* pArgs = (DisconnectObjectArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = Marshal_DisconnectObject(pArgs->pUnk, pArgs->dwReserved);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, Marshal_DisconnectObject_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall Marshal_DisconnectObject_Wrapper(IMarshal* pMarsh, ULONG dwReserved)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pMarsh));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ DisconnectObjectArgs args = {pMarsh, dwReserved, &hr};
+ Marshal_DisconnectObject_CallBack(&args);
+ return hr;
+}
+
+
+// ---------------------------------------------------------------------------
+// Interface IManagedObject
+
+struct GetObjectIdentityArgs
+{
+ IManagedObject *pUnk;
+ BSTR* pBSTRGUID;
+ DWORD* pAppDomainID;
+ void** pCCW;
+ HRESULT* hr;
+};
+
+VOID __stdcall ManagedObject_GetObjectIdentity_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ GetObjectIdentityArgs* pArgs = (GetObjectIdentityArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = ManagedObject_GetObjectIdentity(pArgs->pUnk, pArgs->pBSTRGUID, pArgs->pAppDomainID,
+ pArgs->pCCW);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, ManagedObject_GetObjectIdentity_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall ManagedObject_GetObjectIdentity_Wrapper(IManagedObject *pUnk, BSTR* pBSTRGUID, DWORD* pAppDomainID, void** pCCW)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(CheckPointer(pBSTRGUID, NULL_OK));
+ PRECONDITION(CheckPointer(pAppDomainID, NULL_OK));
+ PRECONDITION(CheckPointer(pCCW, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ GetObjectIdentityArgs args = {pUnk, pBSTRGUID, pAppDomainID, pCCW, &hr};
+ ManagedObject_GetObjectIdentity_CallBack(&args);
+ return hr;
+}
+
+struct GetSerializedBufferArgs
+{
+ IManagedObject *pUnk;
+ BSTR* pBStr;
+ HRESULT* hr;
+};
+
+VOID __stdcall ManagedObject_GetSerializedBuffer_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ GetSerializedBufferArgs* pArgs = (GetSerializedBufferArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = ManagedObject_GetSerializedBuffer(pArgs->pUnk, pArgs->pBStr);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, ManagedObject_GetSerializedBuffer_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall ManagedObject_GetSerializedBuffer_Wrapper(IManagedObject *pUnk, BSTR* pBStr)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(CheckPointer(pBStr, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ GetSerializedBufferArgs args = {pUnk, pBStr, &hr};
+ ManagedObject_GetSerializedBuffer_CallBack(&args);
+ return hr;
+}
+
+
+// ---------------------------------------------------------------------------
+// Interface IConnectionPointContainer
+
+struct EnumConnectionPointsArgs
+{
+ IUnknown* pUnk;
+ IEnumConnectionPoints **ppEnum;
+ HRESULT* hr;
+};
+
+VOID __stdcall ConnectionPointContainer_EnumConnectionPoints_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ EnumConnectionPointsArgs* pArgs = (EnumConnectionPointsArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = ConnectionPointContainer_EnumConnectionPoints(pArgs->pUnk, pArgs->ppEnum);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, ConnectionPointContainer_EnumConnectionPoints_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall ConnectionPointContainer_EnumConnectionPoints_Wrapper(IUnknown* pUnk, IEnumConnectionPoints **ppEnum)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(CheckPointer(ppEnum, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ EnumConnectionPointsArgs args = {pUnk, ppEnum, &hr};
+ ConnectionPointContainer_EnumConnectionPoints_CallBack(&args);
+ return hr;
+}
+
+struct FindConnectionPointArgs
+{
+ IUnknown* pUnk;
+ const IID* riid;
+ IConnectionPoint **ppCP;
+ HRESULT* hr;
+};
+
+VOID __stdcall ConnectionPointContainer_FindConnectionPoint_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ FindConnectionPointArgs* pArgs = (FindConnectionPointArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = ConnectionPointContainer_FindConnectionPoint(pArgs->pUnk, *(pArgs->riid),
+ pArgs->ppCP);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, ConnectionPointContainer_FindConnectionPoint_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+HRESULT __stdcall ConnectionPointContainer_FindConnectionPoint_Wrapper(IUnknown* pUnk, REFIID riid, IConnectionPoint **ppCP)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(CheckPointer(ppCP, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ FindConnectionPointArgs args = {pUnk, &riid, ppCP, &hr};
+ ConnectionPointContainer_FindConnectionPoint_CallBack(&args);
+ return hr;
+}
+
+
+//------------------------------------------------------------------------------------------
+// IObjectSafety methods for COM+ objects
+
+struct GetInterfaceSafetyArgs
+{
+ IUnknown* pUnk;
+ const IID* riid;
+ DWORD *pdwSupportedOptions;
+ DWORD *pdwEnabledOptions;
+ HRESULT* hr;
+};
+
+VOID __stdcall ObjectSafety_GetInterfaceSafetyOptions_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ GetInterfaceSafetyArgs* pArgs = (GetInterfaceSafetyArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = ObjectSafety_GetInterfaceSafetyOptions(pArgs->pUnk, *(pArgs->riid),
+ pArgs->pdwSupportedOptions,
+ pArgs->pdwEnabledOptions);
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, ObjectSafety_GetInterfaceSafetyOptions_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+
+HRESULT __stdcall ObjectSafety_GetInterfaceSafetyOptions_Wrapper(IUnknown* pUnk, REFIID riid,
+ DWORD *pdwSupportedOptions, DWORD *pdwEnabledOptions)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pUnk));
+ PRECONDITION(CheckPointer(pdwSupportedOptions, NULL_OK));
+ PRECONDITION(CheckPointer(pdwEnabledOptions, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ GetInterfaceSafetyArgs args = {pUnk, &riid, pdwSupportedOptions, pdwEnabledOptions, &hr};
+ ObjectSafety_GetInterfaceSafetyOptions_CallBack(&args);
+ return hr;
+}
+
+struct SetInterfaceSafetyArgs
+{
+ IUnknown* pUnk;
+ const IID* riid;
+ DWORD dwOptionSetMask;
+ DWORD dwEnabledOptions;
+ HRESULT* hr;
+};
+
+VOID __stdcall ObjectSafety_SetInterfaceSafetyOptions_CallBack(LPVOID ptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(CheckPointer(ptr));
+ }
+ CONTRACTL_END;
+
+ SetInterfaceSafetyArgs* pArgs = (SetInterfaceSafetyArgs*)ptr;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ *(pArgs->hr) = ObjectSafety_SetInterfaceSafetyOptions(pArgs->pUnk, *(pArgs->riid),
+ pArgs->dwOptionSetMask,
+ pArgs->dwEnabledOptions
+ );
+ }
+ else
+ {
+ AppDomainDoCallBack(pWrap, ObjectSafety_SetInterfaceSafetyOptions_CallBack, pArgs, pArgs->hr);
+ }
+}
+
+
+HRESULT __stdcall ObjectSafety_SetInterfaceSafetyOptions_Wrapper(IUnknown* pUnk, REFIID riid,
+ DWORD dwOptionSetMask, DWORD dwEnabledOptions)
+{
+ SetupForComCallHR();
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pUnk));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ SetInterfaceSafetyArgs args = {pUnk, &riid, dwOptionSetMask, dwEnabledOptions, &hr};
+ ObjectSafety_SetInterfaceSafetyOptions_CallBack(&args);
+ return hr;
+}
+
+//------------------------------------------------------------------------------------------
+// ICustomPropertyProvider methods for Jupiter
+
+#define APPDOMAIN_DOCALLBACK_CONTRACT \
+ CONTRACTL \
+ { \
+ NOTHROW; \
+ GC_TRIGGERS; \
+ MODE_PREEMPTIVE; \
+ PRECONDITION(CheckPointer(ptr)); \
+ } \
+ CONTRACTL_END; \
+
+
+struct ICustomPropertyProvider_GetProperty_Args
+{
+ IUnknown *pPropertyProvider;
+ HSTRING hstrName;
+ IUnknown **ppProperty;
+ HRESULT *pHR;
+};
+
+VOID __stdcall ICustomPropertyProvider_GetProperty_Callback(LPVOID ptr)
+{
+ APPDOMAIN_DOCALLBACK_CONTRACT;
+
+ ICustomPropertyProvider_GetProperty_Args* pArgs = (ICustomPropertyProvider_GetProperty_Args*)ptr;
+ *(pArgs->pHR) = ICustomPropertyProvider_GetProperty(pArgs->pPropertyProvider, pArgs->hstrName, pArgs->ppProperty);
+}
+
+HRESULT __stdcall ICustomPropertyProvider_GetProperty_Wrapper(IUnknown *pPropertyProvider, HSTRING hstrName, /* [out] */ IUnknown **ppProperty)
+{
+ SetupThreadForComCall(E_OUTOFMEMORY);
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pPropertyProvider));
+ }
+ CONTRACTL_END;
+
+ if (ppProperty == NULL)
+ return E_POINTER;
+
+
+ HRESULT hr = S_OK;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pPropertyProvider);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ hr = ICustomPropertyProvider_GetProperty(pPropertyProvider, hstrName, ppProperty);
+ }
+ else
+ {
+ ICustomPropertyProvider_GetProperty_Args args = { pPropertyProvider, hstrName, ppProperty, &hr };
+ AppDomainDoCallBack(pWrap, ICustomPropertyProvider_GetProperty_Callback, &args, &hr);
+ }
+ return hr;
+}
+
+struct ICustomPropertyProvider_GetIndexedProperty_Args
+{
+ IUnknown *pPropertyProvider;
+ HSTRING hstrName;
+ TypeNameNative indexedParamType;
+ IUnknown **ppProperty;
+ HRESULT *pHR;
+};
+
+VOID __stdcall ICustomPropertyProvider_GetIndexedProperty_Callback(LPVOID ptr)
+{
+ APPDOMAIN_DOCALLBACK_CONTRACT;
+
+ ICustomPropertyProvider_GetIndexedProperty_Args* pArgs = (ICustomPropertyProvider_GetIndexedProperty_Args*)ptr;
+ *(pArgs->pHR) = ICustomPropertyProvider_GetIndexedProperty(pArgs->pPropertyProvider, pArgs->hstrName, pArgs->indexedParamType, pArgs->ppProperty);
+}
+
+HRESULT __stdcall ICustomPropertyProvider_GetIndexedProperty_Wrapper(IUnknown *pPropertyProvider,
+ HSTRING hstrName,
+ TypeNameNative indexedParamType,
+ /* [out, retval] */ IUnknown **ppProperty)
+{
+ SetupThreadForComCall(E_OUTOFMEMORY);
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pPropertyProvider));
+ }
+ CONTRACTL_END;
+
+
+ HRESULT hr = S_OK;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pPropertyProvider);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ hr = ICustomPropertyProvider_GetIndexedProperty(pPropertyProvider, hstrName, indexedParamType, ppProperty);
+ }
+ else
+ {
+ ICustomPropertyProvider_GetIndexedProperty_Args args = { pPropertyProvider, hstrName, indexedParamType, ppProperty, &hr };
+ AppDomainDoCallBack(pWrap, ICustomPropertyProvider_GetIndexedProperty_Callback, &args, &hr);
+ }
+
+ return hr;
+}
+
+struct ICustomPropertyProvider_GetStringRepresentation_Args
+{
+ IUnknown *pPropertyProvider;
+ HSTRING *phstrStringRepresentation;
+ HRESULT *pHR;
+};
+
+VOID __stdcall ICustomPropertyProvider_GetStringRepresentation_Callback(LPVOID ptr)
+{
+ APPDOMAIN_DOCALLBACK_CONTRACT;
+
+ ICustomPropertyProvider_GetStringRepresentation_Args* pArgs = (ICustomPropertyProvider_GetStringRepresentation_Args*)ptr;
+ *(pArgs->pHR) = ICustomPropertyProvider_GetStringRepresentation(pArgs->pPropertyProvider, pArgs->phstrStringRepresentation);
+}
+
+HRESULT __stdcall ICustomPropertyProvider_GetStringRepresentation_Wrapper(IUnknown *pPropertyProvider,
+ /* [out, retval] */ HSTRING *phstrStringRepresentation)
+{
+ SetupThreadForComCall(E_OUTOFMEMORY);
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pPropertyProvider));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pPropertyProvider);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ hr = ICustomPropertyProvider_GetStringRepresentation(pPropertyProvider, phstrStringRepresentation);
+ }
+ else
+ {
+ ICustomPropertyProvider_GetStringRepresentation_Args args = { pPropertyProvider, phstrStringRepresentation, &hr };
+ AppDomainDoCallBack(pWrap, ICustomPropertyProvider_GetStringRepresentation_Callback, &args, &hr);
+ }
+
+ return hr;
+}
+
+struct ICustomPropertyProvider_GetType_Args
+{
+ IUnknown *pPropertyProvider;
+ TypeNameNative *pTypeIdentifier;
+ HRESULT *pHR;
+};
+
+VOID __stdcall ICustomPropertyProvider_GetType_Callback(LPVOID ptr)
+{
+ APPDOMAIN_DOCALLBACK_CONTRACT;
+
+ ICustomPropertyProvider_GetType_Args* pArgs = (ICustomPropertyProvider_GetType_Args*)ptr;
+ *(pArgs->pHR) = ICustomPropertyProvider_GetType(pArgs->pPropertyProvider, pArgs->pTypeIdentifier);
+}
+
+HRESULT __stdcall ICustomPropertyProvider_GetType_Wrapper(IUnknown *pPropertyProvider,
+ /* [out, retval] */ TypeNameNative *pTypeIdentifier)
+{
+ SetupThreadForComCall(E_OUTOFMEMORY);
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pPropertyProvider));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pPropertyProvider);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ hr = ICustomPropertyProvider_GetType(pPropertyProvider, pTypeIdentifier);
+ }
+ else
+ {
+ ICustomPropertyProvider_GetType_Args args = { pPropertyProvider, pTypeIdentifier, &hr };
+ AppDomainDoCallBack(pWrap, ICustomPropertyProvider_GetType_Callback, &args, &hr);
+ }
+ return hr;
+}
+
+
+struct IStringable_ToString_Args
+{
+ IUnknown *pStringable;
+ HSTRING *pResult;
+ HRESULT *pHR;
+};
+
+VOID __stdcall IStringable_ToString_Callback(LPVOID ptr)
+{
+ APPDOMAIN_DOCALLBACK_CONTRACT;
+
+ IStringable_ToString_Args* pArgs = (IStringable_ToString_Args*)ptr;
+ *(pArgs->pHR) = IStringable_ToString(pArgs->pStringable, pArgs->pResult);
+}
+
+HRESULT __stdcall IStringable_ToString_Wrapper(IUnknown *pStringable,
+ /* [out, retval] */ HSTRING *pResult)
+{
+ SetupThreadForComCall(E_OUTOFMEMORY);
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pStringable));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ ComCallWrapper* pWrap = MapIUnknownToWrapper(pStringable);
+ if (IsCurrentDomainValid(pWrap))
+ {
+ hr = IStringable_ToString(pStringable, pResult);
+ }
+ else
+ {
+ IStringable_ToString_Args args = { pStringable, pResult, &hr };
+ AppDomainDoCallBack(pWrap, IStringable_ToString_Callback, &args, &hr);
+ }
+
+ return hr;
+}
+
+
+ULONG __stdcall ICCW_AddRefFromJupiter_Wrapper(IUnknown *pUnk)
+{
+ // We do not need to hook with host here
+ SetupForComCallDWORDNoHostNotif();
+
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ return ICCW_AddRefFromJupiter(pUnk);
+}
+
+ULONG __stdcall ICCW_ReleaseFromJupiter_Wrapper(IUnknown *pUnk)
+{
+ // We do not need to hook with host here
+ SetupForComCallDWORDNoHostNotif();
+
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ return ICCW_ReleaseFromJupiter(pUnk);
+}
+
+HRESULT __stdcall ICCW_Peg_Wrapper(IUnknown *pUnk)
+{
+ // We do not need to hook with host here and we cannot do CanRunManagedCode check
+ // as we are most likely in the middle of a GC
+ SetupForComCallHRNoHostNotifNoCheckCanRunManagedCode();
+
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ return ICCW_Peg(pUnk);
+}
+
+HRESULT __stdcall ICCW_Unpeg_Wrapper(IUnknown *pUnk)
+{
+ // We do not need to hook with host here and we cannot do CanRunManagedCode check
+ // as we are most likely in the middle of a GC
+ SetupForComCallHRNoHostNotifNoCheckCanRunManagedCode();
+
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ return ICCW_Unpeg(pUnk);
+}
diff --git a/src/vm/stringliteralmap.cpp b/src/vm/stringliteralmap.cpp
new file mode 100644
index 0000000000..faa5a6db1b
--- /dev/null
+++ b/src/vm/stringliteralmap.cpp
@@ -0,0 +1,677 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+/*============================================================
+**
+** Header: Map used for interning of string literals.
+**
+===========================================================*/
+
+#include "common.h"
+#include "eeconfig.h"
+#include "stringliteralmap.h"
+
+/*
+ Thread safety in GlobalStringLiteralMap / StringLiteralMap
+
+ A single lock protects the N StringLiteralMap objects and single
+ GlobalStringLiteralMap rooted in the SystemDomain at any time. It is
+
+ SystemDomain::GetGlobalStringLiteralMap()->m_HashTableCrstGlobal
+
+ At one time each StringLiteralMap had it's own lock to protect
+ the entry hash table as well, and Interlocked operations were done on the
+ ref count of the contained StringLiteralEntries. But anything of import
+ needed to be done under the global lock mentioned above or races would
+ result. (For example, an app domain shuts down, doing final release on
+ a StringLiteralEntry, but at that moment the entry is being handed out
+ in another appdomain and addref'd only after the count went to 0.)
+
+ The rule is:
+
+ Any AddRef()/Release() calls on StringLiteralEntry need to be under the lock.
+ Any insert/deletes from the StringLiteralMap or GlobalStringLiteralMap
+ need to be done under the lock.
+
+ The only thing you can do without the lock is look up an existing StringLiteralEntry
+ in an StringLiteralMap hash table. This is true because these lookup calls
+ will all come before destruction of the map, the hash table is safe for multiple readers,
+ and we know the StringLiteralEntry so found 1) can't be destroyed because that table keeps
+ an AddRef on it and 2) isn't internally modified once created.
+*/
+
+#define GLOBAL_STRING_TABLE_BUCKET_SIZE 128
+#define INIT_NUM_APP_DOMAIN_STRING_BUCKETS 59
+#define INIT_NUM_GLOBAL_STRING_BUCKETS 131
+
+// assumes that memory pools's per block data is same as sizeof (StringLiteralEntry)
+#define EEHASH_MEMORY_POOL_GROW_COUNT 128
+
+StringLiteralEntryArray *StringLiteralEntry::s_EntryList = NULL;
+DWORD StringLiteralEntry::s_UsedEntries = NULL;
+StringLiteralEntry *StringLiteralEntry::s_FreeEntryList = NULL;
+
+StringLiteralMap::StringLiteralMap()
+: m_StringToEntryHashTable(NULL)
+, m_MemoryPool(NULL)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+}
+
+void StringLiteralMap::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(this));
+ INJECT_FAULT(ThrowOutOfMemory());
+ }
+ CONTRACTL_END;
+
+ // Allocate the memory pool and set the initial count to quarter as grow count
+ m_MemoryPool = new MemoryPool (SIZEOF_EEHASH_ENTRY, EEHASH_MEMORY_POOL_GROW_COUNT, EEHASH_MEMORY_POOL_GROW_COUNT/4);
+
+ m_StringToEntryHashTable = new EEUnicodeStringLiteralHashTable ();
+
+ LockOwner lock = {&(SystemDomain::GetGlobalStringLiteralMap()->m_HashTableCrstGlobal), IsOwnerOfCrst};
+ if (!m_StringToEntryHashTable->Init(INIT_NUM_APP_DOMAIN_STRING_BUCKETS, &lock, m_MemoryPool))
+ ThrowOutOfMemory();
+}
+
+StringLiteralMap::~StringLiteralMap()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ // We do need to take the globalstringliteralmap lock because we are manipulating
+ // StringLiteralEntry objects that belong to it.
+ // Note that we remember the current entry and relaese it only when the
+ // enumerator has advanced to the next entry so that we don't endup deleteing the
+ // current entry itself and killing the enumerator.
+
+ if (m_StringToEntryHashTable != NULL)
+ {
+ // We need the global lock anytime we release StringLiteralEntry objects
+ CrstHolder gch(&(SystemDomain::GetGlobalStringLiteralMapNoCreate()->m_HashTableCrstGlobal));
+
+ StringLiteralEntry *pEntry = NULL;
+ EEHashTableIteration Iter;
+
+#ifdef _DEBUG
+ m_StringToEntryHashTable->SuppressSyncCheck();
+#endif
+
+ m_StringToEntryHashTable->IterateStart(&Iter);
+ if (m_StringToEntryHashTable->IterateNext(&Iter))
+ {
+ pEntry = (StringLiteralEntry*)m_StringToEntryHashTable->IterateGetValue(&Iter);
+
+ while (m_StringToEntryHashTable->IterateNext(&Iter))
+ {
+ // Release the previous entry
+ _ASSERTE(pEntry);
+ pEntry->Release();
+
+ // Set the
+ pEntry = (StringLiteralEntry*)m_StringToEntryHashTable->IterateGetValue(&Iter);
+ }
+ // Release the last entry
+ _ASSERTE(pEntry);
+ pEntry->Release();
+ }
+ // else there were no entries.
+
+ // Delete the hash table first. The dtor of the hash table would clean up all the entries.
+ delete m_StringToEntryHashTable;
+ }
+
+ // Delete the pool later, since the dtor above would need it.
+ if (m_MemoryPool != NULL)
+ delete m_MemoryPool;
+}
+
+
+
+STRINGREF *StringLiteralMap::GetStringLiteral(EEStringData *pStringData, BOOL bAddIfNotFound, BOOL bAppDomainWontUnload)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(CheckPointer(pStringData));
+ }
+ CONTRACTL_END;
+
+ HashDatum Data;
+
+ DWORD dwHash = m_StringToEntryHashTable->GetHash(pStringData);
+ if (m_StringToEntryHashTable->GetValue(pStringData, &Data, dwHash))
+ {
+ STRINGREF *pStrObj = NULL;
+ pStrObj = ((StringLiteralEntry*)Data)->GetStringObject();
+ _ASSERTE(!bAddIfNotFound || pStrObj);
+ return pStrObj;
+
+ }
+ else
+ {
+ // Retrieve the string literal from the global string literal map.
+ CrstHolder gch(&(SystemDomain::GetGlobalStringLiteralMap()->m_HashTableCrstGlobal));
+
+ // TODO: We can be more efficient by checking our local hash table now to see if
+ // someone beat us to inserting it. (m_StringToEntryHashTable->GetValue(pStringData, &Data))
+ // (Rather than waiting until after we look the string up in the global map)
+
+ StringLiteralEntryHolder pEntry(SystemDomain::GetGlobalStringLiteralMap()->GetStringLiteral(pStringData, dwHash, bAddIfNotFound));
+
+ _ASSERTE(pEntry || !bAddIfNotFound);
+
+ // If pEntry is non-null then the entry exists in the Global map. (either we retrieved it or added it just now)
+ if (pEntry)
+ {
+ // If the entry exists in the Global map and the appdomain wont ever unload then we really don't need to add a
+ // hashentry in the appdomain specific map.
+ // TODO: except that by not inserting into our local table we always take the global map lock
+ // and come into this path, when we could succeed at a lock free lookup above.
+
+ if (!bAppDomainWontUnload)
+ {
+ // Make sure some other thread has not already added it.
+ if (!m_StringToEntryHashTable->GetValue(pStringData, &Data))
+ {
+ // Insert the handle to the string into the hash table.
+ m_StringToEntryHashTable->InsertValue(pStringData, (LPVOID)pEntry, FALSE);
+ }
+ else
+ {
+ pEntry.Release(); //while we're still under lock
+ }
+ }
+#ifdef _DEBUG
+ else
+ {
+ LOG((LF_APPDOMAIN, LL_INFO10000, "Avoided adding String literal to appdomain map: size: %d bytes\n", pStringData->GetCharCount()));
+ }
+#endif
+ pEntry.SuppressRelease();
+ STRINGREF *pStrObj = NULL;
+ // Retrieve the string objectref from the string literal entry.
+ pStrObj = pEntry->GetStringObject();
+ _ASSERTE(!bAddIfNotFound || pStrObj);
+ return pStrObj;
+ }
+ }
+ // If the bAddIfNotFound flag is set then we better have a string
+ // string object at this point.
+ _ASSERTE(!bAddIfNotFound);
+ return NULL;
+}
+
+STRINGREF *StringLiteralMap::GetInternedString(STRINGREF *pString, BOOL bAddIfNotFound, BOOL bAppDomainWontUnload)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(CheckPointer(pString));
+ }
+ CONTRACTL_END;
+
+ HashDatum Data;
+ EEStringData StringData = EEStringData((*pString)->GetStringLength(), (*pString)->GetBuffer());
+
+ DWORD dwHash = m_StringToEntryHashTable->GetHash(&StringData);
+ if (m_StringToEntryHashTable->GetValue(&StringData, &Data, dwHash))
+ {
+ STRINGREF *pStrObj = NULL;
+ pStrObj = ((StringLiteralEntry*)Data)->GetStringObject();
+ _ASSERTE(!bAddIfNotFound || pStrObj);
+ return pStrObj;
+
+ }
+ else
+ {
+ CrstHolder gch(&(SystemDomain::GetGlobalStringLiteralMap()->m_HashTableCrstGlobal));
+
+ // TODO: We can be more efficient by checking our local hash table now to see if
+ // someone beat us to inserting it. (m_StringToEntryHashTable->GetValue(pStringData, &Data))
+ // (Rather than waiting until after we look the string up in the global map)
+
+ // Retrieve the string literal from the global string literal map.
+ StringLiteralEntryHolder pEntry(SystemDomain::GetGlobalStringLiteralMap()->GetInternedString(pString, dwHash, bAddIfNotFound));
+
+ _ASSERTE(pEntry || !bAddIfNotFound);
+
+ // If pEntry is non-null then the entry exists in the Global map. (either we retrieved it or added it just now)
+ if (pEntry)
+ {
+ // If the entry exists in the Global map and the appdomain wont ever unload then we really don't need to add a
+ // hashentry in the appdomain specific map.
+ // TODO: except that by not inserting into our local table we always take the global map lock
+ // and come into this path, when we could succeed at a lock free lookup above.
+
+ if (!bAppDomainWontUnload)
+ {
+ // Since GlobalStringLiteralMap::GetInternedString() could have caused a GC,
+ // we need to recreate the string data.
+ StringData = EEStringData((*pString)->GetStringLength(), (*pString)->GetBuffer());
+
+ // Make sure some other thread has not already added it.
+ if (!m_StringToEntryHashTable->GetValue(&StringData, &Data))
+ {
+ // Insert the handle to the string into the hash table.
+ m_StringToEntryHashTable->InsertValue(&StringData, (LPVOID)pEntry, FALSE);
+ }
+ else
+ {
+ pEntry.Release(); // while we're under lock
+ }
+ }
+ pEntry.SuppressRelease();
+ // Retrieve the string objectref from the string literal entry.
+ STRINGREF *pStrObj = NULL;
+ pStrObj = pEntry->GetStringObject();
+ return pStrObj;
+ }
+ }
+ // If the bAddIfNotFound flag is set then we better have a string
+ // string object at this point.
+ _ASSERTE(!bAddIfNotFound);
+
+ return NULL;
+}
+
+GlobalStringLiteralMap::GlobalStringLiteralMap()
+: m_StringToEntryHashTable(NULL)
+, m_MemoryPool(NULL)
+, m_HashTableCrstGlobal(CrstGlobalStrLiteralMap)
+, m_LargeHeapHandleTable(SystemDomain::System(), GLOBAL_STRING_TABLE_BUCKET_SIZE)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ m_LargeHeapHandleTable.RegisterCrstDebug(&m_HashTableCrstGlobal);
+#endif
+}
+
+GlobalStringLiteralMap::~GlobalStringLiteralMap()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ // if we are deleting the map then either it is shutdown time or else there was a race trying to create
+ // the initial map and this one was the loser
+ // (i.e. two threads made a map and the InterlockedCompareExchange failed for one of them and
+ // now it is deleting the map)
+ //
+ // if it's not the main map, then the map we are deleting better be empty!
+
+ // there must be *some* global table
+ _ASSERTE(SystemDomain::GetGlobalStringLiteralMapNoCreate() != NULL);
+
+ if (SystemDomain::GetGlobalStringLiteralMapNoCreate() != this)
+ {
+ // if this isn't the real global table then it must be empty
+ _ASSERTE(m_StringToEntryHashTable->IsEmpty());
+
+ // Delete the hash table first. The dtor of the hash table would clean up all the entries.
+ delete m_StringToEntryHashTable;
+ // Delete the pool later, since the dtor above would need it.
+ delete m_MemoryPool;
+ }
+ else
+ {
+ // We are shutting down, the OS will reclaim the memory from the StringLiteralEntries,
+ // m_MemoryPool and m_StringToEntryHashTable.
+ _ASSERTE(g_fProcessDetach);
+ }
+}
+
+void GlobalStringLiteralMap::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(this));
+ INJECT_FAULT(ThrowOutOfMemory());
+ }
+ CONTRACTL_END;
+
+ // Allocate the memory pool and set the initial count to quarter as grow count
+ m_MemoryPool = new MemoryPool (SIZEOF_EEHASH_ENTRY, EEHASH_MEMORY_POOL_GROW_COUNT, EEHASH_MEMORY_POOL_GROW_COUNT/4);
+
+ m_StringToEntryHashTable = new EEUnicodeStringLiteralHashTable ();
+
+ LockOwner lock = {&m_HashTableCrstGlobal, IsOwnerOfCrst};
+ if (!m_StringToEntryHashTable->Init(INIT_NUM_GLOBAL_STRING_BUCKETS, &lock, m_MemoryPool))
+ ThrowOutOfMemory();
+}
+
+StringLiteralEntry *GlobalStringLiteralMap::GetStringLiteral(EEStringData *pStringData, DWORD dwHash, BOOL bAddIfNotFound)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(CheckPointer(pStringData));
+ PRECONDITION(m_HashTableCrstGlobal.OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ HashDatum Data;
+ StringLiteralEntry *pEntry = NULL;
+
+ if (m_StringToEntryHashTable->GetValue(pStringData, &Data, dwHash))
+ {
+ pEntry = (StringLiteralEntry*)Data;
+ // If the entry is already in the table then addref it before we return it.
+ if (pEntry)
+ pEntry->AddRef();
+ }
+ else
+ {
+ if (bAddIfNotFound)
+ pEntry = AddStringLiteral(pStringData);
+ }
+
+ return pEntry;
+}
+
+StringLiteralEntry *GlobalStringLiteralMap::GetInternedString(STRINGREF *pString, DWORD dwHash, BOOL bAddIfNotFound)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(CheckPointer(pString));
+ PRECONDITION(m_HashTableCrstGlobal.OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ EEStringData StringData = EEStringData((*pString)->GetStringLength(), (*pString)->GetBuffer());
+
+ HashDatum Data;
+ StringLiteralEntry *pEntry = NULL;
+
+ if (m_StringToEntryHashTable->GetValue(&StringData, &Data, dwHash))
+ {
+ pEntry = (StringLiteralEntry*)Data;
+ // If the entry is already in the table then addref it before we return it.
+ if (pEntry)
+ pEntry->AddRef();
+ }
+ else
+ {
+ if (bAddIfNotFound)
+ pEntry = AddInternedString(pString);
+ }
+
+ return pEntry;
+}
+
+#ifdef LOGGING
+static void LogStringLiteral(__in_z const char* action, EEStringData *pStringData)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ int length = pStringData->GetCharCount();
+ length = min(length, 100);
+ WCHAR *szString = (WCHAR *)_alloca((length + 1) * sizeof(WCHAR));
+ memcpyNoGCRefs((void*)szString, (void*)pStringData->GetStringBuffer(), length * sizeof(WCHAR));
+ szString[length] = '\0';
+ LOG((LF_APPDOMAIN, LL_INFO10000, "String literal \"%S\" %s to Global map, size %d bytes\n", szString, action, pStringData->GetCharCount()));
+}
+#endif
+
+STRINGREF AllocateStringObject(EEStringData *pStringData)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // Create the COM+ string object.
+ DWORD cCount = pStringData->GetCharCount();
+
+ STRINGREF strObj = AllocateString(cCount);
+
+ GCPROTECT_BEGIN(strObj)
+ {
+ // Copy the string constant into the COM+ string object. The code
+ // will add an extra null at the end for safety purposes, but since
+ // we support embedded nulls, one should never treat the string as
+ // null termianted.
+ LPWSTR strDest = strObj->GetBuffer();
+ memcpyNoGCRefs(strDest, pStringData->GetStringBuffer(), cCount*sizeof(WCHAR));
+ strDest[cCount] = 0;
+
+ // IsOnlyLowChars actually incidicates if we can sort the string in a fast way.
+ // Take a look RegMeta::DefineUserString to see how we set the flag.
+ // The flag is persisited to assembly containing the string literals.
+ // We restore the flag when we load strings from assembly (MDInternalRO::GetUserString.)
+ //
+ if (pStringData->GetIsOnlyLowChars())
+ {
+ strObj->SetHighCharState(STRING_STATE_FAST_OPS);
+ }
+ }
+ GCPROTECT_END();
+
+ return strObj;
+}
+StringLiteralEntry *GlobalStringLiteralMap::AddStringLiteral(EEStringData *pStringData)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(m_HashTableCrstGlobal.OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ StringLiteralEntry *pRet;
+
+ {
+ LargeHeapHandleBlockHolder pStrObj(&m_LargeHeapHandleTable,1);
+ // Create the COM+ string object.
+ STRINGREF strObj = AllocateStringObject(pStringData);
+
+ // Allocate a handle for the string.
+ SetObjectReference(pStrObj[0], (OBJECTREF) strObj, NULL);
+
+
+ // Allocate the StringLiteralEntry.
+ StringLiteralEntryHolder pEntry(StringLiteralEntry::AllocateEntry(pStringData, (STRINGREF*)pStrObj[0]));
+ pStrObj.SuppressRelease();
+ // Insert the handle to the string into the hash table.
+ m_StringToEntryHashTable->InsertValue(pStringData, (LPVOID)pEntry, FALSE);
+ pEntry.SuppressRelease();
+ pRet = pEntry;
+
+#ifdef LOGGING
+ LogStringLiteral("added", pStringData);
+#endif
+ }
+
+ return pRet;
+}
+
+StringLiteralEntry *GlobalStringLiteralMap::AddInternedString(STRINGREF *pString)
+{
+
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(m_HashTableCrstGlobal.OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ EEStringData StringData = EEStringData((*pString)->GetStringLength(), (*pString)->GetBuffer());
+ StringLiteralEntry *pRet;
+
+ {
+ LargeHeapHandleBlockHolder pStrObj(&m_LargeHeapHandleTable,1);
+ SetObjectReference(pStrObj[0], (OBJECTREF) *pString, NULL);
+
+ // Since the allocation might have caused a GC we need to re-get the
+ // string data.
+ StringData = EEStringData((*pString)->GetStringLength(), (*pString)->GetBuffer());
+
+ StringLiteralEntryHolder pEntry(StringLiteralEntry::AllocateEntry(&StringData, (STRINGREF*)pStrObj[0]));
+ pStrObj.SuppressRelease();
+
+ // Insert the handle to the string into the hash table.
+ m_StringToEntryHashTable->InsertValue(&StringData, (LPVOID)pEntry, FALSE);
+ pEntry.SuppressRelease();
+ pRet = pEntry;
+ }
+
+ return pRet;
+}
+
+void GlobalStringLiteralMap::RemoveStringLiteralEntry(StringLiteralEntry *pEntry)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pEntry));
+ PRECONDITION(m_HashTableCrstGlobal.OwnedByCurrentThread());
+ PRECONDITION(CheckPointer(this));
+ }
+ CONTRACTL_END;
+
+ // Remove the entry from the hash table.
+ {
+ GCX_COOP();
+
+ EEStringData StringData;
+ pEntry->GetStringData(&StringData);
+
+ BOOL bSuccess;
+ bSuccess = m_StringToEntryHashTable->DeleteValue(&StringData);
+ // this assert is comented out to accomodate case when StringLiteralEntryHolder
+ // releases this object after failed insertion into hash
+ //_ASSERTE(bSuccess);
+
+#ifdef LOGGING
+ // We need to do this logging within the GCX_COOP(), as a gc will render
+ // our StringData pointers stale.
+ if (bSuccess)
+ {
+ LogStringLiteral("removed", &StringData);
+ }
+#endif
+
+ // Release the object handle that the entry was using.
+ STRINGREF *pObjRef = pEntry->GetStringObject();
+ m_LargeHeapHandleTable.ReleaseHandles((OBJECTREF*)pObjRef, 1);
+ }
+
+ // We do not delete the StringLiteralEntry itself that will be done in the
+ // release method of the StringLiteralEntry.
+}
+
+StringLiteralEntry *StringLiteralEntry::AllocateEntry(EEStringData *pStringData, STRINGREF *pStringObj)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS; // GC_TRIGGERS because in the precondition below GetGlobalStringLiteralMap() might need to create the map
+ MODE_COOPERATIVE;
+ PRECONDITION(SystemDomain::GetGlobalStringLiteralMap()->m_HashTableCrstGlobal.OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ // Note: we don't synchronize here because allocateEntry is called when HashCrst is held.
+ void *pMem = NULL;
+ if (s_FreeEntryList != NULL)
+ {
+ pMem = s_FreeEntryList;
+ s_FreeEntryList = s_FreeEntryList->m_pNext;
+ _ASSERTE (((StringLiteralEntry*)pMem)->m_bDeleted);
+ }
+ else
+ {
+ if (s_EntryList == NULL || (s_UsedEntries >= MAX_ENTRIES_PER_CHUNK))
+ {
+ StringLiteralEntryArray *pNew = new StringLiteralEntryArray();
+ pNew->m_pNext = s_EntryList;
+ s_EntryList = pNew;
+ s_UsedEntries = 0;
+ }
+ pMem = &(s_EntryList->m_Entries[s_UsedEntries++*sizeof(StringLiteralEntry)]);
+ }
+ _ASSERTE (pMem && "Unable to allocate String literal Entry");
+
+ return new (pMem) StringLiteralEntry (pStringData, pStringObj);
+}
+
+void StringLiteralEntry::DeleteEntry (StringLiteralEntry *pEntry)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(SystemDomain::GetGlobalStringLiteralMapNoCreate()->m_HashTableCrstGlobal.OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ _ASSERTE (VolatileLoad(&pEntry->m_dwRefCount) == 0);
+
+#ifdef _DEBUG
+ memset (pEntry, 0xc, sizeof(StringLiteralEntry));
+#endif
+
+#ifdef _DEBUG
+ pEntry->m_bDeleted = TRUE;
+#endif
+
+ // The free list needs protection from the m_HashTableCrstGlobal
+ pEntry->m_pNext = s_FreeEntryList;
+ s_FreeEntryList = pEntry;
+}
+
+
+
diff --git a/src/vm/stringliteralmap.h b/src/vm/stringliteralmap.h
new file mode 100644
index 0000000000..6a989fb92e
--- /dev/null
+++ b/src/vm/stringliteralmap.h
@@ -0,0 +1,295 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+/*============================================================
+**
+** Header: Map used for interning of string literals.
+**
+===========================================================*/
+
+#ifndef _STRINGLITERALMAP_H
+#define _STRINGLITERALMAP_H
+
+#include "vars.hpp"
+#include "appdomain.hpp"
+#include "eehash.h"
+#include "eeconfig.h" // For OS pages size
+#include "memorypool.h"
+
+
+class StringLiteralEntry;
+// Allocate 16 entries (approx size sizeof(StringLiteralEntry)*16)
+#define MAX_ENTRIES_PER_CHUNK 16
+
+STRINGREF AllocateStringObject(EEStringData *pStringData);
+
+// Loader allocator specific string literal map.
+class StringLiteralMap
+{
+public:
+ // Constructor and destructor.
+ StringLiteralMap();
+ ~StringLiteralMap();
+
+ // Initialization method.
+ void Init();
+
+ size_t GetSize()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_MemoryPool?m_MemoryPool->GetSize():0;
+ }
+
+ // Method to retrieve a string from the map.
+ STRINGREF *GetStringLiteral(EEStringData *pStringData, BOOL bAddIfNotFound, BOOL bAppDomainWontUnload);
+
+ // Method to explicitly intern a string object.
+ STRINGREF *GetInternedString(STRINGREF *pString, BOOL bAddIfNotFound, BOOL bAppDomainWontUnload);
+
+private:
+ // Hash tables that maps a Unicode string to a COM+ string handle.
+ EEUnicodeStringLiteralHashTable *m_StringToEntryHashTable;
+
+ // The memorypool for hash entries for this hash table.
+ MemoryPool *m_MemoryPool;
+};
+
+// Global string literal map.
+class GlobalStringLiteralMap
+{
+ // StringLiteralMap and StringLiteralEntry need to acquire the crst of the global string literal map.
+ friend class StringLiteralMap;
+ friend class StringLiteralEntry;
+
+public:
+ // Constructor and destructor.
+ GlobalStringLiteralMap();
+ ~GlobalStringLiteralMap();
+
+ // Initialization method.
+ void Init();
+
+ // Method to retrieve a string from the map. Takes a precomputed hash (for perf).
+ StringLiteralEntry *GetStringLiteral(EEStringData *pStringData, DWORD dwHash, BOOL bAddIfNotFound);
+
+ // Method to explicitly intern a string object. Takes a precomputed hash (for perf).
+ StringLiteralEntry *GetInternedString(STRINGREF *pString, DWORD dwHash, BOOL bAddIfNotFound);
+
+ // Method to calculate the hash
+ DWORD GetHash(EEStringData* pData)
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_StringToEntryHashTable->GetHash(pData);
+ }
+
+ // public method to retrieve m_HashTableCrstGlobal
+ Crst* GetHashTableCrstGlobal()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return &m_HashTableCrstGlobal;
+ }
+
+private:
+ // Helper method to add a string to the global string literal map.
+ StringLiteralEntry *AddStringLiteral(EEStringData *pStringData);
+
+ // Helper method to add an interned string.
+ StringLiteralEntry *AddInternedString(STRINGREF *pString);
+
+ // Called by StringLiteralEntry when its RefCount falls to 0.
+ void RemoveStringLiteralEntry(StringLiteralEntry *pEntry);
+
+ // Hash tables that maps a Unicode string to a LiteralStringEntry.
+ EEUnicodeStringLiteralHashTable *m_StringToEntryHashTable;
+
+ // The memorypool for hash entries for this hash table.
+ MemoryPool *m_MemoryPool;
+
+ // The hash table table critical section.
+ // (the Global suffix is so that it is clear in context whether the global table is being locked
+ // or the per app domain table is being locked. Sometimes there was confusion in the code
+ // changing the name of the global one will avoid this problem and prevent copy/paste errors)
+
+ Crst m_HashTableCrstGlobal;
+
+ // The large heap handle table.
+ LargeHeapHandleTable m_LargeHeapHandleTable;
+
+};
+
+class StringLiteralEntryArray;
+
+// Ref counted entry representing a string literal.
+class StringLiteralEntry
+{
+private:
+ StringLiteralEntry(EEStringData *pStringData, STRINGREF *pStringObj)
+ : m_pStringObj(pStringObj), m_dwRefCount(1)
+#ifdef _DEBUG
+ , m_bDeleted(FALSE)
+#endif
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+protected:
+ ~StringLiteralEntry()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer<void>(this));
+ }
+ CONTRACTL_END;
+ }
+
+public:
+ void AddRef()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer<void>(this));
+ PRECONDITION((LONG)VolatileLoad(&m_dwRefCount) > 0);
+ PRECONDITION(SystemDomain::GetGlobalStringLiteralMapNoCreate()->m_HashTableCrstGlobal.OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ _ASSERTE (!m_bDeleted);
+
+ // We will keep the item alive forever if the refcount overflowed
+ if ((LONG)VolatileLoad(&m_dwRefCount) < 0)
+ return;
+
+ VolatileStore(&m_dwRefCount, VolatileLoad(&m_dwRefCount) + 1);
+ }
+#ifndef DACCESS_COMPILE
+ FORCEINLINE static void StaticRelease(StringLiteralEntry* pEntry)
+ {
+ CONTRACTL
+ {
+ PRECONDITION(SystemDomain::GetGlobalStringLiteralMapNoCreate()->m_HashTableCrstGlobal.OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ pEntry->Release();
+ }
+#else
+ FORCEINLINE static void StaticRelease(StringLiteralEntry* /* pEntry */)
+ {
+ WRAPPER_NO_CONTRACT;
+ DacNotImpl();
+ }
+#endif // DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+ void Release()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer<void>(this));
+ PRECONDITION(VolatileLoad(&m_dwRefCount) > 0);
+ PRECONDITION(SystemDomain::GetGlobalStringLiteralMapNoCreate()->m_HashTableCrstGlobal.OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ // We will keep the item alive forever if the refcount overflowed
+ if ((LONG)VolatileLoad(&m_dwRefCount) < 0)
+ return;
+
+ VolatileStore(&m_dwRefCount, VolatileLoad(&m_dwRefCount) - 1);
+ if (VolatileLoad(&m_dwRefCount) == 0)
+ {
+ _ASSERTE(SystemDomain::GetGlobalStringLiteralMapNoCreate());
+ SystemDomain::GetGlobalStringLiteralMapNoCreate()->RemoveStringLiteralEntry(this);
+ // Puts this entry in the free list
+ DeleteEntry (this);
+ }
+ }
+#endif // DACCESS_COMPILE
+
+ LONG GetRefCount()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ if(GetThread()){GC_NOTRIGGER;}else{DISABLED(GC_TRIGGERS);};
+ PRECONDITION(CheckPointer(this));
+ }
+ CONTRACTL_END;
+
+ _ASSERTE (!m_bDeleted);
+
+ return (VolatileLoad(&m_dwRefCount));
+ }
+
+ STRINGREF* GetStringObject()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ if(GetThread()){GC_NOTRIGGER;}else{DISABLED(GC_TRIGGERS);};
+ PRECONDITION(CheckPointer(this));
+ }
+ CONTRACTL_END;
+ return m_pStringObj;
+ }
+
+ void GetStringData(EEStringData *pStringData)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ if(GetThread()){GC_NOTRIGGER;}else{DISABLED(GC_TRIGGERS);};
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(this));
+ PRECONDITION(CheckPointer(pStringData));
+ }
+ CONTRACTL_END;
+
+ WCHAR *thisChars;
+ int thisLength;
+
+ ObjectToSTRINGREF(*(StringObject**)m_pStringObj)->RefInterpretGetStringValuesDangerousForGC(&thisChars, &thisLength);
+ pStringData->SetCharCount (thisLength); // thisLength is in WCHARs and that's what EEStringData's char count wants
+ pStringData->SetStringBuffer (thisChars);
+ }
+
+ static StringLiteralEntry *AllocateEntry(EEStringData *pStringData, STRINGREF *pStringObj);
+ static void DeleteEntry (StringLiteralEntry *pEntry);
+
+private:
+ STRINGREF* m_pStringObj;
+ union
+ {
+ DWORD m_dwRefCount;
+ StringLiteralEntry *m_pNext;
+ };
+
+#ifdef _DEBUG
+ BOOL m_bDeleted;
+#endif
+
+ // The static lists below are protected by GetGlobalStringLiteralMap()->m_HashTableCrstGlobal
+ static StringLiteralEntryArray *s_EntryList; // always the first entry array in the chain.
+ static DWORD s_UsedEntries; // number of entries used up in the first array
+ static StringLiteralEntry *s_FreeEntryList; // free list chained thru the arrays.
+};
+
+typedef Wrapper<StringLiteralEntry*,DoNothing,StringLiteralEntry::StaticRelease> StringLiteralEntryHolder;
+
+class StringLiteralEntryArray
+{
+public:
+ StringLiteralEntryArray *m_pNext;
+ BYTE m_Entries[MAX_ENTRIES_PER_CHUNK*sizeof(StringLiteralEntry)];
+};
+
+#endif // _STRINGLITERALMAP_H
+
diff --git a/src/vm/stubcache.cpp b/src/vm/stubcache.cpp
new file mode 100644
index 0000000000..1b066cba16
--- /dev/null
+++ b/src/vm/stubcache.cpp
@@ -0,0 +1,303 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: stubcache.cpp
+//
+
+//
+// Base class for caching stubs.
+//
+
+
+#include "common.h"
+#include "stubcache.h"
+#include "stublink.h"
+#include "cgensys.h"
+#include "excep.h"
+
+//---------------------------------------------------------
+// Constructor
+//---------------------------------------------------------
+StubCacheBase::StubCacheBase(LoaderHeap *pHeap) :
+ CClosedHashBase(
+#ifdef _DEBUG
+ 3,
+#else
+ 17, // CClosedHashTable will grow as necessary
+#endif
+
+ sizeof(STUBHASHENTRY),
+ FALSE
+ ),
+ m_crst(CrstStubCache),
+ m_heap(pHeap)
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef FEATURE_PAL
+ if (m_heap == NULL)
+ m_heap = SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap();
+#endif
+
+}
+
+
+//---------------------------------------------------------
+// Destructor
+//---------------------------------------------------------
+StubCacheBase::~StubCacheBase()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ STUBHASHENTRY *phe = (STUBHASHENTRY*)GetFirst();
+ while (phe)
+ {
+ _ASSERTE(NULL != phe->m_pStub);
+ phe->m_pStub->DecRef();
+ phe = (STUBHASHENTRY*)GetNext((BYTE*)phe);
+ }
+}
+
+
+
+//---------------------------------------------------------
+// Returns the equivalent hashed Stub, creating a new hash
+// entry if necessary. If the latter, will call out to CompileStub.
+//
+// Refcounting:
+// The caller is responsible for DecRef'ing the returned stub in
+// order to avoid leaks.
+//---------------------------------------------------------
+Stub *StubCacheBase::Canonicalize(const BYTE * pRawStub)
+{
+ CONTRACT (Stub*)
+ {
+ STANDARD_VM_CHECK;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ STUBHASHENTRY *phe = NULL;
+
+ {
+ CrstHolder ch(&m_crst);
+
+ // Try to find the stub
+ phe = (STUBHASHENTRY*)Find((LPVOID)pRawStub);
+ if (phe)
+ {
+ StubHolder<Stub> pstub;
+ pstub = phe->m_pStub;
+
+ // IncRef as we're returning a reference to our caller.
+ pstub->IncRef();
+
+ pstub.SuppressRelease();
+ RETURN pstub;
+ }
+ }
+
+ // Couldn't find it, let's try to compile it.
+ CPUSTUBLINKER sl;
+ CPUSTUBLINKER *psl = &sl;
+ CompileStub(pRawStub, psl);
+
+ // Append the raw stub to the native stub
+ // and link up the stub.
+ CodeLabel *plabel = psl->EmitNewCodeLabel();
+ psl->EmitBytes(pRawStub, Length(pRawStub));
+ StubHolder<Stub> pstub;
+ pstub = psl->Link(m_heap);
+ UINT32 offset = psl->GetLabelOffset(plabel);
+
+ if (offset > 0xffff)
+ COMPlusThrowOM();
+
+ {
+ CrstHolder ch(&m_crst);
+
+ bool bNew;
+ phe = (STUBHASHENTRY*)FindOrAdd((LPVOID)pRawStub, /*modifies*/bNew);
+ if (phe)
+ {
+ if (bNew)
+ {
+ phe->m_pStub = pstub;
+ phe->m_offsetOfRawStub = (UINT16)offset;
+
+ AddStub(pRawStub, pstub);
+ }
+ else
+ {
+ // If we got here, some other thread got in
+ // and enregistered an identical stub during
+ // the window in which we were out of the m_crst.
+
+ //Under DEBUG, two identical ML streams can actually compile
+ // to different compiled stubs due to the checked build's
+ // toggling between inlined TLSGetValue and api TLSGetValue.
+ //_ASSERTE(phe->m_offsetOfRawStub == (UINT16)offset);
+
+ //Use the previously created stub
+ // This will DecRef the new stub for us.
+ pstub = phe->m_pStub;
+ }
+ // IncRef so that caller has firm ownership of stub.
+ pstub->IncRef();
+ }
+ }
+
+ if (!phe)
+ {
+ // Couldn't grow hash table due to lack of memory.
+ COMPlusThrowOM();
+ }
+
+ pstub.SuppressRelease();
+ RETURN pstub;
+}
+
+
+void StubCacheBase::AddStub(const BYTE* pRawStub, Stub* pNewStub)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // By default, don't do anything.
+ return;
+}
+
+
+//*****************************************************************************
+// Hash is called with a pointer to an element in the table. You must override
+// this method and provide a hash algorithm for your element type.
+//*****************************************************************************
+unsigned int StubCacheBase::Hash( // The key value.
+ void const *pData) // Raw data to hash.
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ const BYTE *pRawStub = (const BYTE *)pData;
+
+ UINT cb = Length(pRawStub);
+ int hash = 0;
+ while (cb--)
+ hash = _rotl(hash,1) + *(pRawStub++);
+
+ return hash;
+}
+
+//*****************************************************************************
+// Compare is used in the typical memcmp way, 0 is eqaulity, -1/1 indicate
+// direction of miscompare. In this system everything is always equal or not.
+//*****************************************************************************
+unsigned int StubCacheBase::Compare( // 0, -1, or 1.
+ void const *pData, // Raw key data on lookup.
+ BYTE *pElement) // The element to compare data against.
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ const BYTE *pRawStub1 = (const BYTE *)pData;
+ const BYTE *pRawStub2 = (const BYTE *)GetKey(pElement);
+ UINT cb1 = Length(pRawStub1);
+ UINT cb2 = Length(pRawStub2);
+
+ if (cb1 != cb2)
+ return 1; // not equal
+ else
+ {
+ while (cb1--)
+ {
+ if (*(pRawStub1++) != *(pRawStub2++))
+ return 1; // not equal
+ }
+ return 0;
+ }
+}
+
+//*****************************************************************************
+// Return true if the element is free to be used.
+//*****************************************************************************
+CClosedHashBase::ELEMENTSTATUS StubCacheBase::Status( // The status of the entry.
+ BYTE *pElement) // The element to check.
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Stub *pStub = ((STUBHASHENTRY*)pElement)->m_pStub;
+
+ if (pStub == NULL)
+ return FREE;
+ else if (pStub == (Stub*)(-1))
+ return DELETED;
+ else
+ return USED;
+}
+
+//*****************************************************************************
+// Sets the status of the given element.
+//*****************************************************************************
+void StubCacheBase::SetStatus(
+ BYTE *pElement, // The element to set status for.
+ ELEMENTSTATUS eStatus) // New status.
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ STUBHASHENTRY *phe = (STUBHASHENTRY*)pElement;
+
+ switch (eStatus)
+ {
+ case FREE: phe->m_pStub = NULL; break;
+ case DELETED: phe->m_pStub = (Stub*)(-1); break;
+ default:
+ _ASSERTE(!"MLCacheEntry::SetStatus(): Bad argument.");
+ }
+}
+
+//*****************************************************************************
+// Returns the internal key value for an element.
+//*****************************************************************************
+void *StubCacheBase::GetKey( // The data to hash on.
+ BYTE *pElement) // The element to return data ptr for.
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ STUBHASHENTRY *phe = (STUBHASHENTRY*)pElement;
+ return (void *)(phe->m_pStub->GetBlob() + phe->m_offsetOfRawStub);
+}
diff --git a/src/vm/stubcache.h b/src/vm/stubcache.h
new file mode 100644
index 0000000000..1754cf5418
--- /dev/null
+++ b/src/vm/stubcache.h
@@ -0,0 +1,140 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: stubcache.h
+//
+
+//
+// Base class for caching stubs.
+//
+
+
+#ifndef __mlcache_h__
+#define __mlcache_h__
+
+
+#include "vars.hpp"
+#include "util.hpp"
+#include "crst.h"
+
+class Stub;
+class StubLinker;
+
+class StubCacheBase : private CClosedHashBase
+{
+private:
+ //---------------------------------------------------------
+ // Hash entry for CClosedHashBase.
+ //---------------------------------------------------------
+ struct STUBHASHENTRY
+ {
+ // Values:
+ // NULL = free
+ // -1 = deleted
+ // other = used
+ Stub *m_pStub;
+
+ // Offset where the RawStub begins (the RawStub can be
+ // preceded by native stub code.)
+ UINT16 m_offsetOfRawStub;
+ };
+
+
+public:
+ //---------------------------------------------------------
+ // Constructor
+ //---------------------------------------------------------
+ StubCacheBase(LoaderHeap *heap = 0);
+
+ //---------------------------------------------------------
+ // Destructor
+ //---------------------------------------------------------
+ ~StubCacheBase();
+
+ //---------------------------------------------------------
+ // Returns the equivalent hashed Stub, creating a new hash
+ // entry if necessary. If the latter, will call out to CompileStub.
+ //
+ // Throws on out of memory or other fatal error.
+ //---------------------------------------------------------
+ Stub *Canonicalize(const BYTE *pRawStub);
+
+protected:
+ //---------------------------------------------------------
+ // OVERRIDE.
+ // Compile a native (ASM) version of the stub.
+ //
+ // This method should compile into the provided stublinker (but
+ // not call the Link method.)
+ //
+ // It should return the chosen compilation mode.
+ //
+ // If the method fails for some reason, it should return
+ // INTERPRETED so that the EE can fall back on the already
+ // created ML code.
+ //---------------------------------------------------------
+ virtual void CompileStub(const BYTE *pRawStub,
+ StubLinker *psl) = 0;
+
+ //---------------------------------------------------------
+ // OVERRIDE
+ // Tells the StubCacheBase the length of a stub.
+ //---------------------------------------------------------
+ virtual UINT Length(const BYTE *pRawStub) = 0;
+
+ //---------------------------------------------------------
+ // OVERRIDE (OPTIONAL)
+ // Notifies the various derived classes that a new stub has been created
+ //---------------------------------------------------------
+ virtual void AddStub(const BYTE* pRawStub, Stub* pNewStub);
+
+
+private:
+ // *** OVERRIDES FOR CClosedHashBase ***/
+
+ //*****************************************************************************
+ // Hash is called with a pointer to an element in the table. You must override
+ // this method and provide a hash algorithm for your element type.
+ //*****************************************************************************
+ virtual unsigned int Hash( // The key value.
+ void const *pData); // Raw data to hash.
+
+ //*****************************************************************************
+ // Compare is used in the typical memcmp way, 0 is eqaulity, -1/1 indicate
+ // direction of miscompare. In this system everything is always equal or not.
+ //*****************************************************************************
+ virtual unsigned int Compare( // 0, -1, or 1.
+ void const *pData, // Raw key data on lookup.
+ BYTE *pElement); // The element to compare data against.
+
+ //*****************************************************************************
+ // Return true if the element is free to be used.
+ //*****************************************************************************
+ virtual ELEMENTSTATUS Status( // The status of the entry.
+ BYTE *pElement); // The element to check.
+
+ //*****************************************************************************
+ // Sets the status of the given element.
+ //*****************************************************************************
+ virtual void SetStatus(
+ BYTE *pElement, // The element to set status for.
+ ELEMENTSTATUS eStatus); // New status.
+
+ //*****************************************************************************
+ // Returns the internal key value for an element.
+ //*****************************************************************************
+ virtual void *GetKey( // The data to hash on.
+ BYTE *pElement); // The element to return data ptr for.
+
+
+
+
+private:
+ Crst m_crst;
+ LoaderHeap* m_heap;
+};
+
+
+#endif // __mlcache_h__
diff --git a/src/vm/stubgen.cpp b/src/vm/stubgen.cpp
new file mode 100644
index 0000000000..445d482a66
--- /dev/null
+++ b/src/vm/stubgen.cpp
@@ -0,0 +1,2908 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: StubGen.cpp
+//
+
+//
+
+
+#include "common.h"
+
+#include "stubgen.h"
+#include "jitinterface.h"
+#include "ilstubcache.h"
+#include "sigbuilder.h"
+
+#include "formattype.h"
+#include "typestring.h"
+
+
+#include "field.h"
+
+//
+// ....[.....\xxxxx]..0... -> ....[xxxxx]..0...
+// ^ ^ ^
+
+void DumpIL_RemoveFullPath(SString &strTokenFormatting)
+{
+ STANDARD_VM_CONTRACT;
+ if (strTokenFormatting.IsEmpty())
+ return;
+
+ SString::Iterator begin = strTokenFormatting.Begin();
+ SString::Iterator end = strTokenFormatting.End();
+ SString::Iterator leftBracket = strTokenFormatting.Begin();
+
+ // Find the first '[' in the string.
+ while ((leftBracket != end) && (*leftBracket != W('[')))
+ {
+ ++leftBracket;
+ }
+
+ if (leftBracket != end)
+ {
+ SString::Iterator lastSlash = strTokenFormatting.End() - 1;
+
+ // Find the last '\\' in the string.
+ while ((lastSlash != leftBracket) && (*lastSlash != W('\\')))
+ {
+ --lastSlash;
+ }
+
+ if (leftBracket != lastSlash)
+ {
+ strTokenFormatting.Delete(leftBracket + 1, lastSlash - leftBracket);
+ }
+ }
+}
+
+void DumpIL_FormatToken(TokenLookupMap* pTokenMap, mdToken token, SString &strTokenFormatting, const SString &strStubTargetSig)
+{
+ void* pvLookupRetVal = (void*)POISONC;
+ _ASSERTE(strTokenFormatting.IsEmpty());
+
+ EX_TRY
+ {
+ if (TypeFromToken(token) == mdtMethodDef)
+ {
+ MethodDesc* pMD = pTokenMap->LookupMethodDef(token);
+ pvLookupRetVal = pMD;
+ CONSISTENCY_CHECK(CheckPointer(pMD));
+
+ pMD->GetFullMethodInfo(strTokenFormatting);
+ }
+ else if (TypeFromToken(token) == mdtTypeDef)
+ {
+ TypeHandle typeHnd = pTokenMap->LookupTypeDef(token);
+ pvLookupRetVal = typeHnd.AsPtr();
+ CONSISTENCY_CHECK(!typeHnd.IsNull());
+
+ SString typeName;
+ MethodTable *pMT = NULL;
+ if (typeHnd.IsTypeDesc())
+ {
+ TypeDesc *pTypeDesc = typeHnd.AsTypeDesc();
+ pMT = pTypeDesc->GetMethodTable();
+ }
+ else
+ {
+ pMT = typeHnd.AsMethodTable();
+ }
+
+ // AppendType handles NULL correctly
+ TypeString::AppendType(typeName, TypeHandle(pMT));
+
+ if (pMT && typeHnd.IsNativeValueType())
+ typeName.Append(W("_NativeValueType"));
+ strTokenFormatting.Set(typeName);
+ }
+ else if (TypeFromToken(token) == mdtFieldDef)
+ {
+ FieldDesc* pFD = pTokenMap->LookupFieldDef(token);
+ pvLookupRetVal = pFD;
+ CONSISTENCY_CHECK(CheckPointer(pFD));
+
+ SString typeName;
+ TypeString::AppendType(typeName, TypeHandle(pFD->GetApproxEnclosingMethodTable()));
+
+ SString strFieldName(SString::Utf8, pFD->GetName());
+ strTokenFormatting.Printf(W("%s::%s"), typeName.GetUnicode(), strFieldName.GetUnicode());
+ }
+ else if (TypeFromToken(token) == mdtModule)
+ {
+ // Do nothing, because strTokenFormatting is already empty.
+ }
+ else if (TypeFromToken(token) == mdtSignature)
+ {
+ CONSISTENCY_CHECK(token == TOKEN_ILSTUB_TARGET_SIG);
+ strTokenFormatting.Set(strStubTargetSig);
+ }
+ else
+ {
+ strTokenFormatting.Printf(W("%d"), token);
+ }
+ DumpIL_RemoveFullPath(strTokenFormatting);
+ }
+ EX_CATCH
+ {
+ strTokenFormatting.Printf(W("%d"), token);
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+}
+
+void ILCodeStream::Emit(ILInstrEnum instr, INT16 iStackDelta, UINT_PTR uArg)
+{
+ STANDARD_VM_CONTRACT;
+
+ UINT idxCurInstr = 0;
+ ILStubLinker::ILInstruction* pInstrBuffer = NULL;
+
+ if (NULL == m_pqbILInstructions)
+ {
+ m_pqbILInstructions = new ILCodeStreamBuffer();
+ }
+
+ idxCurInstr = m_uCurInstrIdx;
+
+ m_uCurInstrIdx++;
+ m_pqbILInstructions->ReSizeThrows(m_uCurInstrIdx * sizeof(ILStubLinker::ILInstruction));
+
+ pInstrBuffer = (ILStubLinker::ILInstruction*)m_pqbILInstructions->Ptr();
+
+ pInstrBuffer[idxCurInstr].uInstruction = static_cast<UINT16>(instr);
+ pInstrBuffer[idxCurInstr].iStackDelta = iStackDelta;
+ pInstrBuffer[idxCurInstr].uArg = uArg;
+}
+
+ILCodeLabel* ILStubLinker::NewCodeLabel()
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeLabel* pCodeLabel = new ILCodeLabel();
+
+ pCodeLabel->m_pNext = m_pLabelList;
+ pCodeLabel->m_pOwningStubLinker = this;
+ pCodeLabel->m_pCodeStreamOfLabel = NULL;
+ pCodeLabel->m_idxLabeledInstruction = -1;
+
+ m_pLabelList = pCodeLabel;
+
+ return pCodeLabel;
+}
+
+void ILCodeStream::EmitLabel(ILCodeLabel* pCodeLabel)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION_MSG(m_pOwner == pCodeLabel->m_pOwningStubLinker, "you can only use a code label in the ILStubLinker that created it!");
+ }
+ CONTRACTL_END;
+
+ pCodeLabel->m_pCodeStreamOfLabel = this;
+ pCodeLabel->m_idxLabeledInstruction = m_uCurInstrIdx;
+
+ Emit(CEE_CODE_LABEL, 0, (UINT_PTR)pCodeLabel);
+}
+
+static const BYTE s_rgbOpcodeSizes[] =
+{
+
+#define OPDEF(name,string,pop,push,oprType,opcType,l,s1,s2,ctrl) \
+ ((l) + (oprType)),
+
+#define InlineNone 0
+#define ShortInlineVar 1
+#define ShortInlineI 1
+#define InlineI 4
+#define InlineI8 8
+#define ShortInlineR 4
+#define InlineR 8
+#define InlineMethod 4
+#define InlineSig 4
+#define ShortInlineBrTarget 1
+#define InlineBrTarget 4
+#define InlineSwitch -1
+#define InlineType 4
+#define InlineString 4
+#define InlineField 4
+#define InlineTok 4
+#define InlineVar 2
+
+#include "opcode.def"
+
+#undef OPDEF
+#undef InlineNone
+#undef ShortInlineVar
+#undef ShortInlineI
+#undef InlineI
+#undef InlineI8
+#undef ShortInlineR
+#undef InlineR
+#undef InlineMethod
+#undef InlineSig
+#undef ShortInlineBrTarget
+#undef InlineBrTarget
+#undef InlineSwitch
+#undef InlineType
+#undef InlineString
+#undef InlineField
+#undef InlineTok
+#undef InlineVar
+
+};
+
+struct ILOpcode
+{
+ BYTE byte1;
+ BYTE byte2;
+};
+
+static const ILOpcode s_rgOpcodes[] =
+{
+
+#define OPDEF(name,string,pop,push,oprType,opcType,l,s1,s2,ctrl) \
+ { (s1), (s2) },
+#include "opcode.def"
+#undef OPDEF
+
+};
+
+ILCodeStream::ILInstrEnum ILCodeStream::LowerOpcode(ILInstrEnum instr, ILStubLinker::ILInstruction* pInstr)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(instr == (ILInstrEnum)pInstr->uInstruction);
+ }
+ CONTRACTL_END;
+
+ //
+ // NOTE: we do not lower branches to their smallest form because that
+ // would introduce extra passes at link time, which isn't really
+ // worth the savings in IL code size.
+ //
+
+ UINT_PTR uConst = pInstr->uArg;
+
+ switch (instr)
+ {
+ case CEE_LDC_I8:
+ {
+ if (uConst == (UINT_PTR)-1)
+ {
+ instr = CEE_LDC_I4_M1;
+ }
+ else
+ if (uConst < 9)
+ {
+ instr = (ILInstrEnum)((UINT_PTR)CEE_LDC_I4_0 + uConst);
+ }
+ else
+ if (FitsInI1(uConst))
+ {
+ instr = CEE_LDC_I4_S;
+ }
+ else
+ if (FitsInI4(uConst))
+ {
+ instr = CEE_LDC_I4;
+ }
+ break;
+ }
+
+ case CEE_LDARG:
+ {
+ if (uConst <= 3)
+ {
+ instr = (ILInstrEnum)((UINT_PTR)CEE_LDARG_0 + uConst);
+ break;
+ }
+ goto lShortForm;
+ }
+ case CEE_LDLOC:
+ {
+ if (uConst <= 3)
+ {
+ instr = (ILInstrEnum)((UINT_PTR)CEE_LDLOC_0 + uConst);
+ break;
+ }
+ goto lShortForm;
+ }
+ case CEE_STLOC:
+ {
+ if (uConst <= 3)
+ {
+ instr = (ILInstrEnum)((UINT_PTR)CEE_STLOC_0 + uConst);
+ break;
+ }
+
+lShortForm:
+ if (FitsInI1(uConst))
+ {
+ static const UINT_PTR c_uMakeShortDelta = ((UINT_PTR)CEE_LDARG - (UINT_PTR)CEE_LDARG_S);
+ static_assert_no_msg(((UINT_PTR)CEE_LDARG - c_uMakeShortDelta) == (UINT_PTR)CEE_LDARG_S);
+ static_assert_no_msg(((UINT_PTR)CEE_LDLOC - c_uMakeShortDelta) == (UINT_PTR)CEE_LDLOC_S);
+ static_assert_no_msg(((UINT_PTR)CEE_STLOC - c_uMakeShortDelta) == (UINT_PTR)CEE_STLOC_S);
+
+ instr = (ILInstrEnum)((UINT_PTR)instr - c_uMakeShortDelta);
+ }
+ break;
+ }
+
+ case CEE_LDARGA:
+ case CEE_STARG:
+ case CEE_LDLOCA:
+ {
+ if (FitsInI1(uConst))
+ {
+ static const UINT_PTR c_uMakeShortDelta = ((UINT_PTR)CEE_LDARGA - (UINT_PTR)CEE_LDARGA_S);
+ static_assert_no_msg(((UINT_PTR)CEE_LDARGA - c_uMakeShortDelta) == (UINT_PTR)CEE_LDARGA_S);
+ static_assert_no_msg(((UINT_PTR)CEE_STARG - c_uMakeShortDelta) == (UINT_PTR)CEE_STARG_S);
+ static_assert_no_msg(((UINT_PTR)CEE_LDLOCA - c_uMakeShortDelta) == (UINT_PTR)CEE_LDLOCA_S);
+
+ instr = (ILInstrEnum)((UINT_PTR)instr - c_uMakeShortDelta);
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ pInstr->uInstruction = static_cast<UINT16>(instr);
+ return instr;
+}
+
+void ILStubLinker::PatchInstructionArgument(ILCodeLabel* pLabel, UINT_PTR uNewArg
+ DEBUG_ARG(UINT16 uExpectedInstruction))
+{
+ LIMITED_METHOD_CONTRACT;
+
+ UINT idx = pLabel->m_idxLabeledInstruction;
+ ILCodeStream* pLabelCodeStream = pLabel->m_pCodeStreamOfLabel;
+ ILInstruction* pLabelInstrBuffer = (ILInstruction*)pLabelCodeStream->m_pqbILInstructions->Ptr();
+
+ CONSISTENCY_CHECK(pLabelInstrBuffer[idx].uInstruction == ILCodeStream::CEE_CODE_LABEL);
+ CONSISTENCY_CHECK(pLabelInstrBuffer[idx].iStackDelta == 0);
+
+ idx++;
+
+ CONSISTENCY_CHECK(idx < pLabelCodeStream->m_uCurInstrIdx);
+ CONSISTENCY_CHECK(pLabelInstrBuffer[idx].uInstruction == uExpectedInstruction);
+
+ pLabelInstrBuffer[idx].uArg = uNewArg;
+}
+
+ILCodeLabel::ILCodeLabel()
+{
+ m_pNext = NULL;
+ m_pOwningStubLinker = NULL;
+ m_pCodeStreamOfLabel = NULL;
+ m_codeOffset = -1;
+ m_idxLabeledInstruction = -1;
+}
+
+ILCodeLabel::~ILCodeLabel()
+{
+}
+
+size_t ILCodeLabel::GetCodeOffset()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ CONSISTENCY_CHECK(m_codeOffset != (size_t)-1);
+ return m_codeOffset;
+}
+
+
+void ILCodeLabel::SetCodeOffset(size_t codeOffset)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ CONSISTENCY_CHECK((m_codeOffset == (size_t)-1) && (codeOffset != (size_t)-1));
+ m_codeOffset = codeOffset;
+}
+
+static const LPCSTR s_rgOpcodeNames[] =
+{
+#define OPDEF(name,string,pop,push,oprType,opcType,l,s1,s2,ctrl) \
+ string,
+#include "opcode.def"
+#undef OPDEF
+
+};
+
+#include "openum.h"
+
+static const BYTE s_rgbOpcodeArgType[] =
+{
+
+#define OPDEF(name,string,pop,push,oprType,opcType,l,s1,s2,ctrl) \
+ oprType,
+#include "opcode.def"
+#undef OPDEF
+
+};
+
+
+//---------------------------------------------------------------------------------------
+//
+void
+ILStubLinker::LogILInstruction(
+ size_t curOffset,
+ bool isLabeled,
+ INT iCurStack,
+ ILInstruction * pInstruction,
+ SString * pDumpILStubCode)
+{
+ STANDARD_VM_CONTRACT;
+ //
+ // format label
+ //
+ SString strLabel;
+
+ if (isLabeled)
+ {
+ strLabel.Printf(W("IL_%04x:"), curOffset);
+ }
+ else
+ {
+ strLabel.Set(W(" "));
+ }
+
+ //
+ // format opcode
+ //
+ SString strOpcode;
+
+ ILCodeStream::ILInstrEnum instr = (ILCodeStream::ILInstrEnum)pInstruction->uInstruction;
+ size_t cbOpcodeName = strlen(s_rgOpcodeNames[instr]);
+ SString strOpcodeName;
+ strOpcodeName.SetUTF8(s_rgOpcodeNames[instr]);
+ // Set the width of the opcode to 15.
+ strOpcode.Set(W(" "));
+ strOpcode.Replace(strOpcode.Begin(), (COUNT_T)cbOpcodeName, strOpcodeName);
+
+ //
+ // format argument
+ //
+
+ static const size_t c_cchPreallocateArgument = 512;
+ SString strArgument;
+ strArgument.Preallocate(c_cchPreallocateArgument);
+
+ static const size_t c_cchPreallocateTokenName = 1024;
+ SString strTokenName;
+ strTokenName.Preallocate(c_cchPreallocateTokenName);
+
+ if (ILCodeStream::IsBranchInstruction(instr))
+ {
+ size_t branchDistance = (size_t)pInstruction->uArg;
+ size_t targetOffset = curOffset + s_rgbOpcodeSizes[instr] + branchDistance;
+ strArgument.Printf(W("IL_%04x"), targetOffset);
+ }
+ else if ((ILCodeStream::ILInstrEnum)CEE_NOP == instr)
+ {
+ SString strInstruction;
+ strInstruction.Printf("%s", (char *)pInstruction->uArg);
+ strInstruction.ConvertToUnicode(strArgument);
+ }
+ else
+ {
+ switch (s_rgbOpcodeArgType[instr])
+ {
+ case InlineNone:
+ break;
+
+ case ShortInlineVar:
+ case ShortInlineI:
+ case InlineI:
+ strArgument.Printf(W("0x%x"), pInstruction->uArg);
+ break;
+
+ case InlineI8:
+ strArgument.Printf(W("0x%p"), (void *)pInstruction->uArg);
+ break;
+
+ case InlineMethod:
+ case InlineField:
+ case InlineType:
+ case InlineString:
+ case InlineSig:
+ case InlineRVA:
+ case InlineTok:
+ // No token value when we dump IL for ETW
+ if (pDumpILStubCode == NULL)
+ strArgument.Printf(W("0x%08x"), pInstruction->uArg);
+
+ LPUTF8 pszFormattedStubTargetSig = NULL;
+ CQuickBytes qbTargetSig;
+
+ if (TOKEN_ILSTUB_TARGET_SIG == pInstruction->uArg)
+ {
+ PCCOR_SIGNATURE pTargetSig;
+ ULONG cTargetSig;
+ CQuickBytes qbTempTargetSig;
+
+ IMDInternalImport * pIMDI = MscorlibBinder::GetModule()->GetMDImport();
+
+ cTargetSig = GetStubTargetMethodSigSize();
+ pTargetSig = (PCCOR_SIGNATURE)qbTempTargetSig.AllocThrows(cTargetSig);
+
+ GetStubTargetMethodSig((BYTE*)pTargetSig, cTargetSig);
+ PrettyPrintSig(pTargetSig, cTargetSig, "", &qbTargetSig, pIMDI, NULL);
+
+ pszFormattedStubTargetSig = (LPUTF8)qbTargetSig.Ptr();
+ }
+
+ // Dump to szTokenNameBuffer if logging, otherwise dump to szArgumentBuffer to avoid an extra space because we are omitting the token
+ _ASSERTE(FitsIn<mdToken>(pInstruction->uArg));
+ SString strFormattedStubTargetSig;
+ strFormattedStubTargetSig.SetUTF8(pszFormattedStubTargetSig);
+ if (pDumpILStubCode == NULL)
+ DumpIL_FormatToken(&m_tokenMap, static_cast<mdToken>(pInstruction->uArg), strTokenName, strFormattedStubTargetSig);
+ else
+ DumpIL_FormatToken(&m_tokenMap, static_cast<mdToken>(pInstruction->uArg), strArgument, strFormattedStubTargetSig);
+
+ break;
+ }
+ }
+
+ //
+ // log it!
+ //
+ if (pDumpILStubCode)
+ {
+ pDumpILStubCode->AppendPrintf(W("%s /*(%2d)*/ %s %s %s\n"), strLabel.GetUnicode(), iCurStack, strOpcode.GetUnicode(),
+ strArgument.GetUnicode(), strTokenName.GetUnicode());
+ }
+ else
+ {
+ StackScratchBuffer strLabelBuffer;
+ StackScratchBuffer strOpcodeBuffer;
+ StackScratchBuffer strArgumentBuffer;
+ StackScratchBuffer strTokenNameBuffer;
+ LOG((LF_STUBS, LL_INFO1000, "%s (%2d) %s %s %s\n", strLabel.GetUTF8(strLabelBuffer), iCurStack, \
+ strOpcode.GetUTF8(strOpcodeBuffer), strArgument.GetUTF8(strArgumentBuffer), strTokenName.GetUTF8(strTokenNameBuffer)));
+ }
+} // ILStubLinker::LogILInstruction
+
+//---------------------------------------------------------------------------------------
+//
+void
+ILStubLinker::LogILStubWorker(
+ ILInstruction * pInstrBuffer,
+ UINT numInstr,
+ size_t * pcbCode,
+ INT * piCurStack,
+ SString * pDumpILStubCode)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pcbCode));
+ PRECONDITION(CheckPointer(piCurStack));
+ PRECONDITION(CheckPointer(pDumpILStubCode, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ bool isLabeled = false;
+
+ for (UINT i = 0; i < numInstr; i++)
+ {
+ ILCodeStream::ILInstrEnum instr = (ILCodeStream::ILInstrEnum)pInstrBuffer[i].uInstruction;
+ CONSISTENCY_CHECK(ILCodeStream::IsSupportedInstruction(instr));
+
+ if (instr == ILCodeStream::CEE_CODE_LABEL)
+ {
+ isLabeled = true;
+ continue;
+ }
+
+ LogILInstruction(*pcbCode, isLabeled, *piCurStack, &pInstrBuffer[i], pDumpILStubCode);
+ isLabeled = false;
+
+ //
+ // calculate the code size
+ //
+ PREFIX_ASSUME((size_t)instr < sizeof(s_rgbOpcodeSizes));
+ *pcbCode += s_rgbOpcodeSizes[instr];
+
+ //
+ // calculate curstack
+ //
+ *piCurStack += pInstrBuffer[i].iStackDelta;
+ }
+
+ // Be sure to log any trailing label that has no associated instruction.
+ if (isLabeled)
+ {
+ if (pDumpILStubCode)
+ {
+ pDumpILStubCode->AppendPrintf(W("IL_%04x:\n"), *pcbCode);
+ }
+ else
+ {
+ LOG((LF_STUBS, LL_INFO1000, "IL_%04x:\n", *pcbCode));
+ }
+ }
+}
+
+static inline void LogOneFlag(DWORD flags, DWORD flag, LPCSTR str, DWORD facility, DWORD level)
+{
+ if (flags & flag)
+ {
+ LOG((facility, level, str));
+ }
+}
+
+static void LogJitFlags(DWORD facility, DWORD level, DWORD dwJitFlags)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+
+ LOG((facility, level, "dwJitFlags: 0x%08x\n", dwJitFlags));
+
+#define LOG_FLAG(name) LogOneFlag(dwJitFlags, name, " " #name "\n", facility, level);
+
+ // these are all we care about at the moment
+ LOG_FLAG(CORJIT_FLG_IL_STUB);
+ LOG_FLAG(CORJIT_FLG_PUBLISH_SECRET_PARAM);
+
+#undef LOG_FLAGS
+
+ DWORD dwKnownMask =
+ CORJIT_FLG_IL_STUB |
+ CORJIT_FLG_PUBLISH_SECRET_PARAM |
+ NULL;
+
+ DWORD dwUnknownFlags = dwJitFlags & ~dwKnownMask;
+ if (0 != dwUnknownFlags)
+ {
+ LOG((facility, level, "UNKNOWN FLAGS: 0x%08x\n", dwUnknownFlags));
+ }
+}
+
+void ILStubLinker::LogILStub(DWORD dwJitFlags, SString *pDumpILStubCode)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pDumpILStubCode, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ ILCodeStream* pCurrentStream = m_pCodeStreamList;
+ size_t cbCode = 0;
+ INT iCurStack = 0;
+
+ if (pDumpILStubCode == NULL)
+ LogJitFlags(LF_STUBS, LL_INFO1000, dwJitFlags);
+
+ while (pCurrentStream)
+ {
+ if (pCurrentStream->m_pqbILInstructions)
+ {
+ if (pDumpILStubCode)
+ pDumpILStubCode->AppendPrintf("// %s {\n", pCurrentStream->GetStreamDescription(pCurrentStream->GetStreamType()));
+ else
+ LOG((LF_STUBS, LL_INFO1000, "%s {\n", pCurrentStream->GetStreamDescription(pCurrentStream->GetStreamType())));
+
+ ILInstruction* pInstrBuffer = (ILInstruction*)pCurrentStream->m_pqbILInstructions->Ptr();
+ LogILStubWorker(pInstrBuffer, pCurrentStream->m_uCurInstrIdx, &cbCode, &iCurStack, pDumpILStubCode);
+
+ if (pDumpILStubCode)
+ pDumpILStubCode->AppendPrintf("// } %s \n", pCurrentStream->GetStreamDescription(pCurrentStream->GetStreamType()));
+ else
+ LOG((LF_STUBS, LL_INFO1000, "}\n"));
+ }
+
+ pCurrentStream = pCurrentStream->m_pNextStream;
+ }
+}
+
+bool ILStubLinker::FirstPassLink(ILInstruction* pInstrBuffer, UINT numInstr, size_t* pcbCode, INT* piCurStack, UINT* puMaxStack)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(puMaxStack));
+ }
+ CONTRACTL_END;
+
+ bool fStackUnderflow = false;
+
+ for (UINT i = 0; i < numInstr; i++)
+ {
+ ILCodeStream::ILInstrEnum instr = (ILCodeStream::ILInstrEnum)pInstrBuffer[i].uInstruction;
+ CONSISTENCY_CHECK(ILCodeStream::IsSupportedInstruction(instr));
+
+ //
+ // down-size instructions
+ //
+ instr = ILCodeStream::LowerOpcode(instr, &pInstrBuffer[i]);
+
+ //
+ // fill in code label offsets
+ //
+ if (instr == ILCodeStream::CEE_CODE_LABEL)
+ {
+ ILCodeLabel* pLabel = (ILCodeLabel*)(pInstrBuffer[i].uArg);
+ pLabel->SetCodeOffset(*pcbCode);
+ }
+
+ //
+ // calculate the code size
+ //
+ PREFIX_ASSUME((size_t)instr < sizeof(s_rgbOpcodeSizes));
+ *pcbCode += s_rgbOpcodeSizes[instr];
+
+ //
+ // calculate maxstack
+ //
+ *piCurStack += pInstrBuffer[i].iStackDelta;
+ if (*piCurStack > (INT)*puMaxStack)
+ {
+ *puMaxStack = *piCurStack;
+ }
+#ifdef _DEBUG
+ if (*piCurStack < 0)
+ {
+ fStackUnderflow = true;
+ }
+#endif // _DEBUG
+ }
+
+ return fStackUnderflow;
+}
+
+void ILStubLinker::SecondPassLink(ILInstruction* pInstrBuffer, UINT numInstr, size_t* pCurCodeOffset)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pCurCodeOffset));
+ }
+ CONTRACTL_END;
+
+ for (UINT i = 0; i < numInstr; i++)
+ {
+ ILCodeStream::ILInstrEnum instr = (ILCodeStream::ILInstrEnum)pInstrBuffer[i].uInstruction;
+ CONSISTENCY_CHECK(ILCodeStream::IsSupportedInstruction(instr));
+ *pCurCodeOffset += s_rgbOpcodeSizes[instr];
+
+ if (ILCodeStream::IsBranchInstruction(instr))
+ {
+ ILCodeLabel* pLabel = (ILCodeLabel*) pInstrBuffer[i].uArg;
+
+ CONSISTENCY_CHECK(this == pLabel->m_pOwningStubLinker);
+ CONSISTENCY_CHECK(IsInCodeStreamList(pLabel->m_pCodeStreamOfLabel));
+
+ pInstrBuffer[i].uArg = pLabel->GetCodeOffset() - *pCurCodeOffset;
+ }
+ }
+}
+
+size_t ILStubLinker::Link(UINT* puMaxStack)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(puMaxStack));
+ }
+ CONTRACTL_END;
+
+ //
+ // Pass1: calculate code size, lower instructions to smallest form,
+ // fill in branch target offsets, and calculate maxstack
+ //
+
+ ILCodeStream* pCurrentStream = m_pCodeStreamList;
+ size_t cbCode = 0;
+ INT iCurStack = 0;
+ UINT uMaxStack = 0;
+ DEBUG_STMT(bool fStackUnderflow = false);
+
+ while (pCurrentStream)
+ {
+ if (pCurrentStream->m_pqbILInstructions)
+ {
+ ILInstruction* pInstrBuffer = (ILInstruction*)pCurrentStream->m_pqbILInstructions->Ptr();
+ INDEBUG( fStackUnderflow = ) FirstPassLink(pInstrBuffer, pCurrentStream->m_uCurInstrIdx, &cbCode, &iCurStack, &uMaxStack);
+ }
+
+ pCurrentStream = pCurrentStream->m_pNextStream;
+ }
+
+ //
+ // Pass2: go back and patch the branch instructions
+ //
+
+ pCurrentStream = m_pCodeStreamList;
+ size_t curCodeOffset = 0;
+
+ while (pCurrentStream)
+ {
+ if (pCurrentStream->m_pqbILInstructions)
+ {
+ ILInstruction* pInstrBuffer = (ILInstruction*)pCurrentStream->m_pqbILInstructions->Ptr();
+ SecondPassLink(pInstrBuffer, pCurrentStream->m_uCurInstrIdx, &curCodeOffset);
+ }
+
+ pCurrentStream = pCurrentStream->m_pNextStream;
+ }
+
+#ifdef _DEBUG
+ if (fStackUnderflow)
+ {
+ LogILStub(NULL);
+ CONSISTENCY_CHECK_MSG(false, "IL stack underflow! -- see logging output");
+ }
+#endif // _DEBUG
+
+ *puMaxStack = uMaxStack;
+ return cbCode;
+}
+
+#ifdef _DEBUG
+
+static const PCSTR s_rgOpNames[] =
+{
+
+#define OPDEF(name,string,pop,push,oprType,opcType,l,s1,s2,ctrl) \
+ #name,
+#include "opcode.def"
+#undef OPDEF
+
+};
+
+
+#endif // _DEBUG
+
+BYTE* ILStubLinker::GenerateCodeWorker(BYTE* pbBuffer, ILInstruction* pInstrBuffer, UINT numInstr, size_t* pcbCode)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pcbCode));
+ }
+ CONTRACTL_END;
+
+ for (UINT i = 0; i < numInstr; i++)
+ {
+ ILCodeStream::ILInstrEnum instr = (ILCodeStream::ILInstrEnum)pInstrBuffer[i].uInstruction;
+ CONSISTENCY_CHECK(ILCodeStream::IsSupportedInstruction(instr));
+
+ //
+ // copy the IL instructions from the various linkers into the given buffer
+ //
+ if (instr != ILCodeStream::CEE_CODE_LABEL)
+ {
+ const ILOpcode* pOpcode = &s_rgOpcodes[instr];
+
+ PREFIX_ASSUME((size_t)instr < sizeof(s_rgbOpcodeSizes));
+ int opSize = s_rgbOpcodeSizes[instr];
+ bool twoByteOp = (pOpcode->byte1 != 0xFF);
+ int argSize = opSize - (twoByteOp ? 2 : 1);
+
+ if (twoByteOp)
+ {
+ *pbBuffer++ = pOpcode->byte1;
+ }
+
+ *pbBuffer++ = pOpcode->byte2;
+
+ switch (argSize)
+ {
+ case 0:
+ break;
+
+ case 1:
+ *pbBuffer = (BYTE)pInstrBuffer[i].uArg;
+ break;
+
+ case 2:
+ SET_UNALIGNED_VAL16(pbBuffer, pInstrBuffer[i].uArg);
+ break;
+
+ case 4:
+ SET_UNALIGNED_VAL32(pbBuffer, pInstrBuffer[i].uArg);
+ break;
+
+ case 8:
+ {
+ UINT64 uVal = pInstrBuffer[i].uArg;
+#ifndef _WIN64 // We don't have room on 32-bit platforms to store the CLR_NAN_64 value, so
+ // we use a special value to represent CLR_NAN_64 and then recreate it here.
+ if ((instr == ILCodeStream::CEE_LDC_R8) && (((UINT32)uVal) == ILCodeStream::SPECIAL_VALUE_NAN_64_ON_32))
+ uVal = CLR_NAN_64;
+#endif // _WIN64
+ SET_UNALIGNED_VAL64(pbBuffer, uVal);
+ }
+ break;
+
+ default:
+ UNREACHABLE_MSG("unexpected il opcode argument size");
+ }
+
+ pbBuffer += argSize;
+ *pcbCode += opSize;
+ }
+ }
+
+ return pbBuffer;
+}
+
+void ILStubLinker::GenerateCode(BYTE* pbBuffer, size_t cbBufferSize)
+{
+ STANDARD_VM_CONTRACT;
+
+ ILCodeStream* pCurrentStream = m_pCodeStreamList;
+ size_t cbCode = 0;
+
+ while (pCurrentStream)
+ {
+ if (pCurrentStream->m_pqbILInstructions)
+ {
+ ILInstruction* pInstrBuffer = (ILInstruction*)pCurrentStream->m_pqbILInstructions->Ptr();
+ pbBuffer = GenerateCodeWorker(pbBuffer, pInstrBuffer, pCurrentStream->m_uCurInstrIdx, &cbCode);
+ }
+
+ pCurrentStream = pCurrentStream->m_pNextStream;
+ }
+
+ CONSISTENCY_CHECK(cbCode <= cbBufferSize);
+}
+
+
+#ifdef _DEBUG
+bool ILStubLinker::IsInCodeStreamList(ILCodeStream* pcs)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ ILCodeStream* pCurrentStream = m_pCodeStreamList;
+ while (pCurrentStream)
+ {
+ if (pcs == pCurrentStream)
+ {
+ return true;
+ }
+
+ pCurrentStream = pCurrentStream->m_pNextStream;
+ }
+
+ return false;
+}
+
+// static
+bool ILCodeStream::IsSupportedInstruction(ILInstrEnum instr)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ CONSISTENCY_CHECK_MSG(instr != CEE_SWITCH, "CEE_SWITCH is not supported currently due to InlineSwitch in s_rgbOpcodeSizes");
+ CONSISTENCY_CHECK_MSG(((instr >= CEE_BR_S) && (instr <= CEE_BLT_UN_S)) || (CEE_LEAVE), "we only use long-form branch opcodes");
+ return true;
+}
+#endif // _DEBUG
+
+LPCSTR ILCodeStream::GetStreamDescription(ILStubLinker::CodeStreamType streamType)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ static LPCSTR lpszDescriptions[] = {
+ "Initialize",
+ "Marshal",
+ "CallMethod",
+ "UnmarshalReturn",
+ "Unmarshal",
+ "ExceptionCleanup",
+ "Cleanup",
+ "ExceptionHandler",
+ };
+
+#ifdef _DEBUG
+ size_t len = sizeof(lpszDescriptions)/sizeof(LPCSTR);
+ _ASSERT(streamType >= 0 && (size_t)streamType < len);
+#endif // _DEBUG
+
+ return lpszDescriptions[streamType];
+}
+
+void ILCodeStream::EmitADD()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_ADD, -1, 0);
+}
+void ILCodeStream::EmitADD_OVF()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_ADD_OVF, -1, 0);
+}
+void ILCodeStream::EmitAND()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_AND, -1, 0);
+}
+void ILCodeStream::EmitARGLIST()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_ARGLIST, 1, 0);
+}
+
+void ILCodeStream::EmitBEQ(ILCodeLabel* pCodeLabel)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_BEQ, -2, (UINT_PTR)pCodeLabel);
+}
+
+void ILCodeStream::EmitBGE(ILCodeLabel* pCodeLabel)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_BGE, -2, (UINT_PTR)pCodeLabel);
+}
+
+void ILCodeStream::EmitBGE_UN(ILCodeLabel* pCodeLabel)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_BGE_UN, -2, (UINT_PTR)pCodeLabel);
+}
+
+void ILCodeStream::EmitBGT(ILCodeLabel* pCodeLabel)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_BGT, -2, (UINT_PTR)pCodeLabel);
+}
+void ILCodeStream::EmitBLE(ILCodeLabel* pCodeLabel)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_BLE, -2, (UINT_PTR)pCodeLabel);
+}
+void ILCodeStream::EmitBLE_UN(ILCodeLabel* pCodeLabel)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_BLE_UN, -2, (UINT_PTR)pCodeLabel);
+}
+void ILCodeStream::EmitBLT(ILCodeLabel* pCodeLabel)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_BLT, -2, (UINT_PTR)pCodeLabel);
+}
+void ILCodeStream::EmitBR(ILCodeLabel* pCodeLabel)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_BR, 0, (UINT_PTR)pCodeLabel);
+}
+void ILCodeStream::EmitBREAK()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_BREAK, 0, 0);
+}
+void ILCodeStream::EmitBRFALSE(ILCodeLabel* pCodeLabel)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_BRFALSE, -1, (UINT_PTR)pCodeLabel);
+}
+void ILCodeStream::EmitBRTRUE(ILCodeLabel* pCodeLabel)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_BRTRUE, -1, (UINT_PTR)pCodeLabel);
+}
+void ILCodeStream::EmitCALL(int token, int numInArgs, int numRetArgs)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_CALL, (INT16)(numRetArgs - numInArgs), token);
+}
+void ILCodeStream::EmitCALLI(int token, int numInArgs, int numRetArgs)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_CALLI, (INT16)(numRetArgs - numInArgs - 1), token);
+}
+void ILCodeStream::EmitCEQ ()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_CEQ, -1, 0);
+}
+void ILCodeStream::EmitCGT()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_CGT, -1, 0);
+}
+void ILCodeStream::EmitCGT_UN()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_CGT_UN, -1, 0);
+}
+void ILCodeStream::EmitCLT()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_CLT, -1, 0);
+}
+void ILCodeStream::EmitCLT_UN()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_CLT_UN, -1, 0);
+}
+void ILCodeStream::EmitCONV_I()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_CONV_I, 0, 0);
+}
+void ILCodeStream::EmitCONV_I1()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_CONV_I1, 0, 0);
+}
+void ILCodeStream::EmitCONV_I2()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_CONV_I2, 0, 0);
+}
+void ILCodeStream::EmitCONV_I4()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_CONV_I4, 0, 0);
+}
+void ILCodeStream::EmitCONV_I8()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_CONV_I8, 0, 0);
+}
+void ILCodeStream::EmitCONV_U()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_CONV_U, 0, 0);
+}
+void ILCodeStream::EmitCONV_U1()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_CONV_U1, 0, 0);
+}
+void ILCodeStream::EmitCONV_U2()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_CONV_U2, 0, 0);
+}
+void ILCodeStream::EmitCONV_U4()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_CONV_U4, 0, 0);
+}
+void ILCodeStream::EmitCONV_U8()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_CONV_U8, 0, 0);
+}
+void ILCodeStream::EmitCONV_R4()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_CONV_R4, 0, 0);
+}
+void ILCodeStream::EmitCONV_R8()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_CONV_R8, 0, 0);
+}
+void ILCodeStream::EmitCONV_OVF_I4()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_CONV_OVF_I4, 0, 0);
+}
+void ILCodeStream::EmitCONV_T(CorElementType t)
+{
+ STANDARD_VM_CONTRACT;
+
+ switch (t)
+ {
+ case ELEMENT_TYPE_U1:
+ EmitCONV_U1();
+ break;
+ case ELEMENT_TYPE_I1:
+ EmitCONV_I1();
+ break;
+ case ELEMENT_TYPE_U2:
+ EmitCONV_U2();
+ break;
+ case ELEMENT_TYPE_I2:
+ EmitCONV_I2();
+ break;
+ case ELEMENT_TYPE_U4:
+ EmitCONV_U4();
+ break;
+ case ELEMENT_TYPE_I4:
+ EmitCONV_I4();
+ break;
+ case ELEMENT_TYPE_U8:
+ EmitCONV_U8();
+ break;
+ case ELEMENT_TYPE_I8:
+ EmitCONV_I8();
+ break;
+ case ELEMENT_TYPE_R4:
+ EmitCONV_R4();
+ break;
+ case ELEMENT_TYPE_R8:
+ EmitCONV_R8();
+ break;
+ case ELEMENT_TYPE_I:
+ EmitCONV_I();
+ break;
+ case ELEMENT_TYPE_U:
+ EmitCONV_U();
+ break;
+ default:
+ _ASSERTE(!"Invalid type for conversion");
+ break;
+ }
+}
+void ILCodeStream::EmitCPBLK()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_CPBLK, -3, 0);
+}
+void ILCodeStream::EmitCPOBJ(int token)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_CPOBJ, -2, token);
+}
+void ILCodeStream::EmitDUP ()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_DUP, 1, 0);
+}
+void ILCodeStream::EmitENDFINALLY()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_ENDFINALLY, 0, 0);
+}
+void ILCodeStream::EmitINITBLK()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_INITBLK, -3, 0);
+}
+void ILCodeStream::EmitINITOBJ(int token)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_INITOBJ, -1, token);
+}
+void ILCodeStream::EmitJMP(int token)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_JMP, 0, token);
+}
+void ILCodeStream::EmitLDARG (unsigned uArgIdx)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (m_pOwner->m_fHasThis)
+ {
+ uArgIdx++;
+ }
+ Emit(CEE_LDARG, 1, uArgIdx);
+}
+void ILCodeStream::EmitLDARGA (unsigned uArgIdx)
+{
+ WRAPPER_NO_CONTRACT;
+ if (m_pOwner->m_fHasThis)
+ {
+ uArgIdx++;
+ }
+ Emit(CEE_LDARGA, 1, uArgIdx);
+}
+void ILCodeStream::EmitLDC(DWORD_PTR uConst)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(
+#ifdef _WIN64
+ CEE_LDC_I8
+#else
+ CEE_LDC_I4
+#endif
+ , 1, uConst);
+}
+void ILCodeStream::EmitLDC_R4(UINT32 uConst)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LDC_R4, 1, uConst);
+}
+void ILCodeStream::EmitLDC_R8(UINT64 uConst)
+{
+ STANDARD_VM_CONTRACT;
+#ifndef _WIN64 // We don't have room on 32-bit platforms to stor the CLR_NAN_64 value, so
+ // we use a special value to represent CLR_NAN_64 and then recreate it later.
+ CONSISTENCY_CHECK(((UINT32)uConst) != SPECIAL_VALUE_NAN_64_ON_32);
+ if (uConst == CLR_NAN_64)
+ uConst = SPECIAL_VALUE_NAN_64_ON_32;
+ else
+ CONSISTENCY_CHECK(FitsInU4(uConst));
+#endif // _WIN64
+ Emit(CEE_LDC_R8, 1, (UINT_PTR)uConst);
+}
+void ILCodeStream::EmitLDELEM_REF()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LDELEM_REF, -1, 0);
+}
+void ILCodeStream::EmitLDFLD(int token)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LDFLD, 0, token);
+}
+void ILCodeStream::EmitLDFLDA(int token)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LDFLDA, 0, token);
+}
+void ILCodeStream::EmitLDFTN(int token)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LDFTN, 1, token);
+}
+void ILCodeStream::EmitLDIND_I()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LDIND_I, 0, 0);
+}
+void ILCodeStream::EmitLDIND_I1()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LDIND_I1, 0, 0);
+}
+void ILCodeStream::EmitLDIND_I2()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LDIND_I2, 0, 0);
+}
+void ILCodeStream::EmitLDIND_I4()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LDIND_I4, 0, 0);
+}
+void ILCodeStream::EmitLDIND_I8()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LDIND_I8, 0, 0);
+}
+void ILCodeStream::EmitLDIND_R4()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LDIND_R4, 0, 0);
+}
+void ILCodeStream::EmitLDIND_R8()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LDIND_R8, 0, 0);
+}
+void ILCodeStream::EmitLDIND_REF()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LDIND_REF, 0, 0);
+}
+void ILCodeStream::EmitLDIND_T(LocalDesc* pType)
+{
+ CONTRACTL
+ {
+ PRECONDITION(pType->cbType == 1);
+ }
+ CONTRACTL_END;
+
+ switch (pType->ElementType[0])
+ {
+ case ELEMENT_TYPE_I1: EmitLDIND_I1(); break;
+ case ELEMENT_TYPE_BOOLEAN: // fall through
+ case ELEMENT_TYPE_U1: EmitLDIND_U1(); break;
+ case ELEMENT_TYPE_I2: EmitLDIND_I2(); break;
+ case ELEMENT_TYPE_CHAR: // fall through
+ case ELEMENT_TYPE_U2: EmitLDIND_U2(); break;
+ case ELEMENT_TYPE_I4: EmitLDIND_I4(); break;
+ case ELEMENT_TYPE_U4: EmitLDIND_U4(); break;
+ case ELEMENT_TYPE_I8: EmitLDIND_I8(); break;
+ case ELEMENT_TYPE_U8: EmitLDIND_I8(); break;
+ case ELEMENT_TYPE_R4: EmitLDIND_R4(); break;
+ case ELEMENT_TYPE_R8: EmitLDIND_R8(); break;
+ case ELEMENT_TYPE_FNPTR: // same as ELEMENT_TYPE_I
+ case ELEMENT_TYPE_I: EmitLDIND_I(); break;
+ case ELEMENT_TYPE_U: EmitLDIND_I(); break;
+ case ELEMENT_TYPE_STRING: // fall through
+ case ELEMENT_TYPE_CLASS: // fall through
+ case ELEMENT_TYPE_ARRAY:
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_OBJECT: EmitLDIND_REF(); break;
+
+ case ELEMENT_TYPE_INTERNAL:
+ {
+ CONSISTENCY_CHECK_MSG(!(pType->InternalToken.GetMethodTable()->IsValueType()), "don't know how to handle value types here");
+ EmitLDIND_REF();
+ break;
+ }
+
+ default:
+ UNREACHABLE_MSG("unexpected type passed to EmitLDIND_T");
+ break;
+ }
+}
+void ILCodeStream::EmitLDIND_U1()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LDIND_U1, 0, 0);
+}
+void ILCodeStream::EmitLDIND_U2()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LDIND_U2, 0, 0);
+}
+void ILCodeStream::EmitLDIND_U4()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LDIND_U4, 0, 0);
+}
+void ILCodeStream::EmitLDLEN()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LDLEN, 0, 0);
+}
+void ILCodeStream::EmitLDLOC (DWORD dwLocalNum)
+{
+ STANDARD_VM_CONTRACT;
+ CONSISTENCY_CHECK(dwLocalNum != (DWORD)-1);
+ CONSISTENCY_CHECK(dwLocalNum != (WORD)-1);
+ Emit(CEE_LDLOC, 1, dwLocalNum);
+}
+void ILCodeStream::EmitLDLOCA (DWORD dwLocalNum)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LDLOCA, 1, dwLocalNum);
+}
+void ILCodeStream::EmitLDNULL()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LDNULL, 1, 0);
+}
+void ILCodeStream::EmitLDOBJ (int token)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LDOBJ, 0, token);
+}
+void ILCodeStream::EmitLDSFLD(int token)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LDSFLD, 1, token);
+}
+void ILCodeStream::EmitLDSFLDA(int token)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LDSFLDA, 1, token);
+}
+void ILCodeStream::EmitLDTOKEN(int token)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LDTOKEN, 1, token);
+}
+void ILCodeStream::EmitLEAVE(ILCodeLabel* pCodeLabel)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LEAVE, 0, (UINT_PTR)pCodeLabel);
+}
+void ILCodeStream::EmitLOCALLOC()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_LOCALLOC, 0, 0);
+}
+void ILCodeStream::EmitMUL()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_MUL, -1, 0);
+}
+void ILCodeStream::EmitMUL_OVF()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_MUL_OVF, -1, 0);
+}
+void ILCodeStream::EmitNEWOBJ(int token, int numInArgs)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_NEWOBJ, (INT16)(1 - numInArgs), token);
+}
+
+void ILCodeStream::EmitNOP(LPCSTR pszNopComment)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_NOP, 0, (UINT_PTR)pszNopComment);
+}
+
+void ILCodeStream::EmitPOP()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_POP, -1, 0);
+}
+void ILCodeStream::EmitRET()
+{
+ WRAPPER_NO_CONTRACT;
+ INT16 iStackDelta = m_pOwner->m_StubHasVoidReturnType ? 0 : -1;
+ Emit(CEE_RET, iStackDelta, 0);
+}
+void ILCodeStream::EmitSHR_UN()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_SHR_UN, -1, 0);
+}
+void ILCodeStream::EmitSTARG(unsigned uArgIdx)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_STARG, -1, uArgIdx);
+}
+void ILCodeStream::EmitSTELEM_REF()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_STELEM_REF, -3, 0);
+}
+void ILCodeStream::EmitSTIND_I()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_STIND_I, -2, 0);
+}
+void ILCodeStream::EmitSTIND_I1()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_STIND_I1, -2, 0);
+}
+void ILCodeStream::EmitSTIND_I2()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_STIND_I2, -2, 0);
+}
+void ILCodeStream::EmitSTIND_I4()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_STIND_I4, -2, 0);
+}
+void ILCodeStream::EmitSTIND_I8()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_STIND_I8, -2, 0);
+}
+void ILCodeStream::EmitSTIND_R4()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_STIND_R4, -2, 0);
+}
+void ILCodeStream::EmitSTIND_R8()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_STIND_R8, -2, 0);
+}
+void ILCodeStream::EmitSTIND_REF()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_STIND_REF, -2, 0);
+}
+void ILCodeStream::EmitSTIND_T(LocalDesc* pType)
+{
+ CONTRACTL
+ {
+ PRECONDITION(pType->cbType == 1);
+ }
+ CONTRACTL_END;
+
+ switch (pType->ElementType[0])
+ {
+ case ELEMENT_TYPE_I1: EmitSTIND_I1(); break;
+ case ELEMENT_TYPE_BOOLEAN: // fall through
+ case ELEMENT_TYPE_U1: EmitSTIND_I1(); break;
+ case ELEMENT_TYPE_I2: EmitSTIND_I2(); break;
+ case ELEMENT_TYPE_CHAR: // fall through
+ case ELEMENT_TYPE_U2: EmitSTIND_I2(); break;
+ case ELEMENT_TYPE_I4: EmitSTIND_I4(); break;
+ case ELEMENT_TYPE_U4: EmitSTIND_I4(); break;
+ case ELEMENT_TYPE_I8: EmitSTIND_I8(); break;
+ case ELEMENT_TYPE_U8: EmitSTIND_I8(); break;
+ case ELEMENT_TYPE_R4: EmitSTIND_R4(); break;
+ case ELEMENT_TYPE_R8: EmitSTIND_R8(); break;
+ case ELEMENT_TYPE_FNPTR: // same as ELEMENT_TYPE_I
+ case ELEMENT_TYPE_I: EmitSTIND_I(); break;
+ case ELEMENT_TYPE_U: EmitSTIND_I(); break;
+ case ELEMENT_TYPE_STRING: // fall through
+ case ELEMENT_TYPE_CLASS: // fall through
+ case ELEMENT_TYPE_ARRAY:
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_OBJECT: EmitSTIND_REF(); break;
+
+ case ELEMENT_TYPE_INTERNAL:
+ {
+ CONSISTENCY_CHECK_MSG(!(pType->InternalToken.GetMethodTable()->IsValueType()), "don't know how to handle value types here");
+ EmitSTIND_REF();
+ break;
+ }
+
+ default:
+ UNREACHABLE_MSG("unexpected type passed to EmitSTIND_T");
+ break;
+ }
+}
+void ILCodeStream::EmitSTFLD(int token)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_STFLD, -2, token);
+}
+void ILCodeStream::EmitSTLOC(DWORD dwLocalNum)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_STLOC, -1, dwLocalNum);
+}
+void ILCodeStream::EmitSTOBJ(int token)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_STOBJ, -2, token);
+}
+void ILCodeStream::EmitSTSFLD(int token)
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_STSFLD, -1, token);
+}
+void ILCodeStream::EmitSUB()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_SUB, -1, 0);
+}
+void ILCodeStream::EmitTHROW()
+{
+ WRAPPER_NO_CONTRACT;
+ Emit(CEE_THROW, -1, 0);
+}
+
+
+void ILCodeStream::EmitNEWOBJ(BinderMethodID id, int numInArgs)
+{
+ STANDARD_VM_CONTRACT;
+ EmitNEWOBJ(GetToken(MscorlibBinder::GetMethod(id)), numInArgs);
+}
+
+void ILCodeStream::EmitCALL(BinderMethodID id, int numInArgs, int numRetArgs)
+{
+ STANDARD_VM_CONTRACT;
+ EmitCALL(GetToken(MscorlibBinder::GetMethod(id)), numInArgs, numRetArgs);
+}
+
+
+
+
+
+
+void ILStubLinker::SetHasThis (bool fHasThis)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_fHasThis = fHasThis;
+}
+
+void ILCodeStream::EmitLoadThis ()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(m_pOwner->m_fHasThis);
+ // OK, this is ugly, but we add 1 to all LDARGs when
+ // m_fHasThis is true, so we compensate for that here
+ // so that we don't have to have a special method to
+ // load arguments.
+ EmitLDARG((unsigned)-1);
+}
+
+void ILCodeStream::EmitLoadNullPtr()
+{
+ WRAPPER_NO_CONTRACT;
+
+ // This is the correct way to load unmanaged zero pointer. EmitLDC(0) alone works
+ // fine in most cases but may lead to wrong code being generated on 64-bit if the
+ // flow graph is complex.
+ EmitLDC(0);
+ EmitCONV_I();
+}
+
+void ILCodeStream::EmitArgIteratorCreateAndLoad()
+{
+ STANDARD_VM_CONTRACT;
+
+ //
+ // we insert the ArgIterator in the same spot that the VASigCookie will go for sanity
+ //
+ LocalDesc aiLoc(MscorlibBinder::GetClass(CLASS__ARG_ITERATOR));
+ int aiLocNum;
+
+ aiLocNum = NewLocal(aiLoc);
+
+ EmitLDLOCA(aiLocNum);
+ EmitDUP();
+ EmitARGLIST();
+ EmitLoadNullPtr();
+ EmitCALL(METHOD__ARG_ITERATOR__CTOR2, 2, 0);
+
+ aiLoc.ElementType[0] = ELEMENT_TYPE_BYREF;
+ aiLoc.ElementType[1] = ELEMENT_TYPE_INTERNAL;
+ aiLoc.cbType = 2;
+ aiLoc.InternalToken = MscorlibBinder::GetClass(CLASS__ARG_ITERATOR);
+
+ SetStubTargetArgType(&aiLoc, false);
+}
+
+DWORD ILStubLinker::NewLocal(CorElementType typ)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ LocalDesc locDesc(typ);
+ return NewLocal(locDesc);
+}
+
+StubSigBuilder::StubSigBuilder() :
+ m_nItems(0),
+ m_cbSig(0)
+{
+ STANDARD_VM_CONTRACT;
+
+ m_pbSigCursor = (BYTE*) m_qbSigBuffer.AllocThrows(INITIAL_BUFFER_SIZE);
+}
+
+void StubSigBuilder::EnsureEnoughQuickBytes(size_t cbToAppend)
+{
+ STANDARD_VM_CONTRACT;
+
+ SIZE_T cbBuffer = m_qbSigBuffer.Size();
+ if ((m_cbSig + cbToAppend) >= cbBuffer)
+ {
+ m_qbSigBuffer.ReSizeThrows(2 * cbBuffer);
+ m_pbSigCursor = ((BYTE*)m_qbSigBuffer.Ptr()) + m_cbSig;
+ }
+}
+
+DWORD StubSigBuilder::Append(LocalDesc* pLoc)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(pLoc));
+ }
+ CONTRACTL_END;
+
+ EnsureEnoughQuickBytes(pLoc->cbType + sizeof(TypeHandle));
+
+ memcpyNoGCRefs(m_pbSigCursor, pLoc->ElementType, pLoc->cbType);
+ m_pbSigCursor += pLoc->cbType;
+ m_cbSig += pLoc->cbType;
+
+ size_t i = 0;
+
+ while (i < pLoc->cbType)
+ {
+ CONSISTENCY_CHECK( ELEMENT_TYPE_CLASS != pLoc->ElementType[i]
+ && ELEMENT_TYPE_VALUETYPE != pLoc->ElementType[i]);
+
+ switch (pLoc->ElementType[i])
+ {
+ case ELEMENT_TYPE_INTERNAL:
+ SET_UNALIGNED_PTR(m_pbSigCursor, (UINT_PTR)pLoc->InternalToken.AsPtr());
+ m_pbSigCursor += sizeof(TypeHandle);
+ m_cbSig += sizeof(TypeHandle);
+ break;
+
+ case ELEMENT_TYPE_FNPTR:
+ {
+ SigPointer ptr(pLoc->pSig);
+
+ SigBuilder sigBuilder;
+ ptr.ConvertToInternalSignature(pLoc->pSigModule, NULL, &sigBuilder);
+
+ DWORD cbFnPtrSig;
+ PVOID pFnPtrSig = sigBuilder.GetSignature(&cbFnPtrSig);
+
+ EnsureEnoughQuickBytes(cbFnPtrSig);
+
+ memcpyNoGCRefs(m_pbSigCursor, pFnPtrSig, cbFnPtrSig);
+
+ m_pbSigCursor += cbFnPtrSig;
+ m_cbSig += cbFnPtrSig;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ i++;
+ }
+
+ if (pLoc->ElementType[0] == ELEMENT_TYPE_ARRAY)
+ {
+ EnsureEnoughQuickBytes(pLoc->cbArrayBoundsInfo);
+
+ memcpyNoGCRefs(m_pbSigCursor, pLoc->pSig, pLoc->cbArrayBoundsInfo);
+ m_pbSigCursor += pLoc->cbArrayBoundsInfo;
+ m_cbSig += pLoc->cbArrayBoundsInfo;
+ }
+
+ _ASSERTE(m_cbSig <= m_qbSigBuffer.Size()); // we corrupted our buffer resizing if this assert fires
+
+ return m_nItems++;
+}
+
+//---------------------------------------------------------------------------------------
+//
+DWORD
+LocalSigBuilder::GetSigSize()
+{
+ STANDARD_VM_CONTRACT;
+
+ BYTE temp[4];
+ UINT32 cbEncoded = CorSigCompressData(m_nItems, temp);
+
+ S_UINT32 cbSigSize =
+ S_UINT32(1) + // IMAGE_CEE_CS_CALLCONV_LOCAL_SIG
+ S_UINT32(cbEncoded) + // encoded number of locals
+ S_UINT32(m_cbSig) + // types
+ S_UINT32(1); // ELEMENT_TYPE_END
+ if (cbSigSize.IsOverflow())
+ {
+ IfFailThrow(COR_E_OVERFLOW);
+ }
+ return cbSigSize.Value();
+}
+
+//---------------------------------------------------------------------------------------
+//
+DWORD
+LocalSigBuilder::GetSig(
+ BYTE * pbLocalSig,
+ DWORD cbBuffer)
+{
+ STANDARD_VM_CONTRACT;
+ BYTE temp[4];
+ size_t cb = CorSigCompressData(m_nItems, temp);
+
+ _ASSERTE((1 + cb + m_cbSig + 1) == GetSigSize());
+
+ if ((1 + cb + m_cbSig + 1) <= cbBuffer)
+ {
+ pbLocalSig[0] = IMAGE_CEE_CS_CALLCONV_LOCAL_SIG;
+ memcpyNoGCRefs(&pbLocalSig[1], temp, cb);
+ memcpyNoGCRefs(&pbLocalSig[1 + cb], m_qbSigBuffer.Ptr(), m_cbSig);
+ pbLocalSig[1 + cb + m_cbSig] = ELEMENT_TYPE_END;
+ return (DWORD)(1 + cb + m_cbSig + 1);
+ }
+ else
+ {
+ return NULL;
+ }
+}
+
+FunctionSigBuilder::FunctionSigBuilder() :
+ m_callingConv(IMAGE_CEE_CS_CALLCONV_DEFAULT)
+{
+ STANDARD_VM_CONTRACT;
+ m_qbReturnSig.ReSizeThrows(1);
+ *(CorElementType *)m_qbReturnSig.Ptr() = ELEMENT_TYPE_VOID;
+}
+
+
+void FunctionSigBuilder::SetReturnType(LocalDesc* pLoc)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(pLoc->cbType > 0);
+ }
+ CONTRACTL_END;
+
+ m_qbReturnSig.ReSizeThrows(pLoc->cbType);
+ memcpyNoGCRefs(m_qbReturnSig.Ptr(), pLoc->ElementType, pLoc->cbType);
+
+ size_t i = 0;
+
+ while (i < pLoc->cbType)
+ {
+ CONSISTENCY_CHECK( ELEMENT_TYPE_CLASS != pLoc->ElementType[i]
+ && ELEMENT_TYPE_VALUETYPE != pLoc->ElementType[i]);
+
+ switch (pLoc->ElementType[i])
+ {
+ case ELEMENT_TYPE_INTERNAL:
+ m_qbReturnSig.ReSizeThrows(m_qbReturnSig.Size() + sizeof(TypeHandle));
+ SET_UNALIGNED_PTR((BYTE *)m_qbReturnSig.Ptr() + m_qbReturnSig.Size() - + sizeof(TypeHandle), (UINT_PTR)pLoc->InternalToken.AsPtr());
+ break;
+
+ case ELEMENT_TYPE_FNPTR:
+ {
+ SigPointer ptr(pLoc->pSig);
+
+ SigBuilder sigBuilder;
+ ptr.ConvertToInternalSignature(pLoc->pSigModule, NULL, &sigBuilder);
+
+ DWORD cbFnPtrSig;
+ PVOID pFnPtrSig = sigBuilder.GetSignature(&cbFnPtrSig);
+
+ m_qbReturnSig.ReSizeThrows(m_qbReturnSig.Size() + cbFnPtrSig);
+
+ memcpyNoGCRefs((BYTE *)m_qbReturnSig.Ptr() + m_qbReturnSig.Size() - cbFnPtrSig, pFnPtrSig, cbFnPtrSig);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ i++;
+ }
+
+ if (pLoc->ElementType[0] == ELEMENT_TYPE_ARRAY)
+ {
+ SIZE_T size = m_qbReturnSig.Size();
+ m_qbReturnSig.ReSizeThrows(size + pLoc->cbArrayBoundsInfo);
+ memcpyNoGCRefs((BYTE *)m_qbReturnSig.Ptr() + size, pLoc->pSig, pLoc->cbArrayBoundsInfo);
+ }
+}
+
+void FunctionSigBuilder::SetSig(PCCOR_SIGNATURE pSig, DWORD cSig)
+{
+ STANDARD_VM_CONTRACT;
+
+ // parse the incoming signature
+ SigPointer sigPtr(pSig, cSig);
+
+ // 1) calling convention
+ ULONG callConv;
+ IfFailThrow(sigPtr.GetCallingConvInfo(&callConv));
+ SetCallingConv((CorCallingConvention)callConv);
+
+ // 2) number of parameters
+ IfFailThrow(sigPtr.GetData(&m_nItems));
+
+ // 3) return type
+ PCCOR_SIGNATURE ptr = sigPtr.GetPtr();
+ IfFailThrow(sigPtr.SkipExactlyOne());
+
+ size_t retSigLength = sigPtr.GetPtr() - ptr;
+
+ m_qbReturnSig.ReSizeThrows(retSigLength);
+ memcpyNoGCRefs(m_qbReturnSig.Ptr(), ptr, retSigLength);
+
+ // 4) parameters
+ m_cbSig = 0;
+
+ size_t cbSigLen = (cSig - (sigPtr.GetPtr() - pSig));
+
+ m_pbSigCursor = (BYTE *)m_qbSigBuffer.Ptr();
+ EnsureEnoughQuickBytes(cbSigLen);
+
+ memcpyNoGCRefs(m_pbSigCursor, sigPtr.GetPtr(), cbSigLen);
+
+ m_cbSig = cbSigLen;
+ m_pbSigCursor += cbSigLen;
+}
+
+//---------------------------------------------------------------------------------------
+//
+DWORD
+FunctionSigBuilder::GetSigSize()
+{
+ STANDARD_VM_CONTRACT;
+
+ BYTE temp[4];
+ DWORD cbEncodedLen = CorSigCompressData(m_nItems, temp);
+ SIZE_T cbEncodedRetType = m_qbReturnSig.Size();
+
+ CONSISTENCY_CHECK(cbEncodedRetType > 0);
+
+ S_UINT32 cbSigSize =
+ S_UINT32(1) + // calling convention
+ S_UINT32(cbEncodedLen) + // encoded number of args
+ S_UINT32(cbEncodedRetType) + // encoded return type
+ S_UINT32(m_cbSig) + // types
+ S_UINT32(1); // ELEMENT_TYPE_END
+ if (cbSigSize.IsOverflow())
+ {
+ IfFailThrow(COR_E_OVERFLOW);
+ }
+ return cbSigSize.Value();
+}
+
+//---------------------------------------------------------------------------------------
+//
+DWORD
+FunctionSigBuilder::GetSig(
+ BYTE * pbLocalSig,
+ DWORD cbBuffer)
+{
+ STANDARD_VM_CONTRACT;
+ BYTE tempLen[4];
+ size_t cbEncodedLen = CorSigCompressData(m_nItems, tempLen);
+ size_t cbEncodedRetType = m_qbReturnSig.Size();
+
+ CONSISTENCY_CHECK(cbEncodedRetType > 0);
+
+ _ASSERTE((1 + cbEncodedLen + cbEncodedRetType + m_cbSig + 1) == GetSigSize());
+
+ if ((1 + cbEncodedLen + cbEncodedRetType + m_cbSig + 1) <= cbBuffer)
+ {
+ BYTE* pbCursor = pbLocalSig;
+ *pbCursor = static_cast<BYTE>(m_callingConv);
+ pbCursor++;
+
+ memcpyNoGCRefs(pbCursor, tempLen, cbEncodedLen);
+ pbCursor += cbEncodedLen;
+
+ memcpyNoGCRefs(pbCursor, m_qbReturnSig.Ptr(), m_qbReturnSig.Size());
+ pbCursor += m_qbReturnSig.Size();
+
+ memcpyNoGCRefs(pbCursor, m_qbSigBuffer.Ptr(), m_cbSig);
+ pbCursor += m_cbSig;
+ pbCursor[0] = ELEMENT_TYPE_END;
+ return (DWORD)(1 + cbEncodedLen + cbEncodedRetType + m_cbSig + 1);
+ }
+ else
+ {
+ return NULL;
+ }
+}
+
+DWORD ILStubLinker::NewLocal(LocalDesc loc)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return m_localSigBuilder.NewLocal(&loc);
+}
+
+//---------------------------------------------------------------------------------------
+//
+DWORD
+ILStubLinker::GetLocalSigSize()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_localSigBuilder.GetSigSize();
+}
+
+//---------------------------------------------------------------------------------------
+//
+DWORD
+ILStubLinker::GetLocalSig(
+ BYTE * pbLocalSig,
+ DWORD cbBuffer)
+{
+ STANDARD_VM_CONTRACT;
+
+ DWORD dwRet = m_localSigBuilder.GetSig(pbLocalSig, cbBuffer);
+ return dwRet;
+}
+
+//---------------------------------------------------------------------------------------
+//
+DWORD
+ILStubLinker::GetStubTargetMethodSigSize()
+{
+ STANDARD_VM_CONTRACT;
+
+ return m_nativeFnSigBuilder.GetSigSize();
+}
+
+//---------------------------------------------------------------------------------------
+//
+DWORD
+ILStubLinker::GetStubTargetMethodSig(
+ BYTE * pbSig,
+ DWORD cbSig)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DWORD dwRet = m_nativeFnSigBuilder.GetSig(pbSig, cbSig);
+ return dwRet;
+}
+
+void ILStubLinker::SetStubTargetMethodSig(PCCOR_SIGNATURE pSig, DWORD cSig)
+{
+ STANDARD_VM_CONTRACT;
+
+ m_nativeFnSigBuilder.SetSig(pSig, cSig);
+}
+
+static BOOL SigHasVoidReturnType(const Signature &signature)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END
+
+ SigPointer ptr = signature.CreateSigPointer();
+
+ ULONG data;
+ IfFailThrow(ptr.GetCallingConvInfo(&data));
+ // Skip number of type arguments
+ if (data & IMAGE_CEE_CS_CALLCONV_GENERIC)
+ {
+ IfFailThrow(ptr.GetData(NULL));
+ }
+
+ // skip number of args
+ IfFailThrow(ptr.GetData(NULL));
+
+ CorElementType retType;
+ IfFailThrow(ptr.PeekElemType(&retType));
+
+ return (ELEMENT_TYPE_VOID == retType);
+}
+
+
+ILStubLinker::ILStubLinker(Module* pStubSigModule, const Signature &signature, SigTypeContext *pTypeContext, MethodDesc *pMD,
+ BOOL fTargetHasThis, BOOL fStubHasThis, BOOL fIsNDirectStub) :
+ m_pCodeStreamList(NULL),
+ m_stubSig(signature),
+ m_pTypeContext(pTypeContext),
+ m_pCode(NULL),
+ m_pStubSigModule(pStubSigModule),
+ m_pLabelList(NULL),
+ m_StubHasVoidReturnType(0),
+ m_iTargetStackDelta(0),
+ m_cbCurrentCompressedSigLen(1),
+ m_nLocals(0),
+ m_fHasThis(false),
+ m_pMD(pMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ m_managedSigPtr = signature.CreateSigPointer();
+ if (!signature.IsEmpty())
+ {
+ m_StubHasVoidReturnType = SigHasVoidReturnType(signature);
+
+ //
+ // Get the stub's calling convention. Set m_fHasThis to match
+ // IMAGE_CEE_CS_CALLCONV_HASTHIS.
+ //
+
+ ULONG uStubCallingConvInfo;
+ IfFailThrow(m_managedSigPtr.GetCallingConvInfo(&uStubCallingConvInfo));
+
+ if (fStubHasThis)
+ {
+ m_fHasThis = true;
+ }
+
+ //
+ // If target calling convention was specified, use it instead.
+ // Otherwise, derive one based on the stub's signature.
+ //
+
+ ULONG uCallingConvInfo = uStubCallingConvInfo;
+
+ ULONG uCallingConv = (uCallingConvInfo & IMAGE_CEE_CS_CALLCONV_MASK);
+ ULONG uNativeCallingConv;
+
+ if (IMAGE_CEE_CS_CALLCONV_VARARG == uCallingConv)
+ {
+ //
+ // If we have a PInvoke stub that has a VARARG calling convention
+ // we will transition to a NATIVEVARARG calling convention for the
+ // target call. The JIT64 knows about this calling convention,
+ // basically it is the same as the managed vararg calling convention
+ // except without a VASigCookie.
+ //
+ // If our stub is not a PInvoke stub and has a vararg calling convention,
+ // we are most likely going to have to forward those variable arguments
+ // on to our call target. Unfortunately, callsites to varargs methods
+ // in IL always have full signatures (that's where the VASigCookie comes
+ // from). But we don't have that in this case, so we play some tricks and
+ // pass an ArgIterator down to an assembly routine that pulls out the
+ // variable arguments and puts them in the right spot before forwarding
+ // to the stub target.
+ //
+ // The net result is that we don't want to set the native calling
+ // convention to be vararg for non-PInvoke stubs, so we just use
+ // the default callconv.
+ //
+ if (!fIsNDirectStub)
+ uNativeCallingConv = IMAGE_CEE_CS_CALLCONV_DEFAULT;
+ else
+ uNativeCallingConv = IMAGE_CEE_CS_CALLCONV_NATIVEVARARG;
+ }
+ else
+ {
+ uNativeCallingConv = IMAGE_CEE_CS_CALLCONV_DEFAULT;
+ }
+
+ if (fTargetHasThis && !fIsNDirectStub)
+ {
+ // ndirect native sig never has a 'this' pointer
+ uNativeCallingConv |= IMAGE_CEE_CS_CALLCONV_HASTHIS;
+ }
+
+ if (fTargetHasThis)
+ {
+ m_iTargetStackDelta--;
+ }
+
+ m_nativeFnSigBuilder.SetCallingConv((CorCallingConvention)uNativeCallingConv);
+
+ if (uStubCallingConvInfo & IMAGE_CEE_CS_CALLCONV_GENERIC)
+ IfFailThrow(m_managedSigPtr.GetData(NULL)); // skip number of type parameters
+
+ IfFailThrow(m_managedSigPtr.GetData(NULL)); // skip number of parameters
+ IfFailThrow(m_managedSigPtr.SkipExactlyOne()); // skip return type
+ }
+}
+
+ILStubLinker::~ILStubLinker()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ DeleteCodeLabels();
+ DeleteCodeStreams();
+}
+
+void ILStubLinker::DeleteCodeLabels()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ //
+ // walk the list of labels and free each one
+ //
+ ILCodeLabel* pCurrent = m_pLabelList;
+ while (pCurrent)
+ {
+ ILCodeLabel* pDeleteMe = pCurrent;
+ pCurrent = pCurrent->m_pNext;
+ delete pDeleteMe;
+ }
+ m_pLabelList = NULL;
+}
+
+void ILStubLinker::DeleteCodeStreams()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ ILCodeStream* pCurrent = m_pCodeStreamList;
+ while (pCurrent)
+ {
+ ILCodeStream* pDeleteMe = pCurrent;
+ pCurrent = pCurrent->m_pNextStream;
+ delete pDeleteMe;
+ }
+ m_pCodeStreamList = NULL;
+}
+
+void ILStubLinker::ClearCodeStreams()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ ILCodeStream* pCurrent = m_pCodeStreamList;
+ while (pCurrent)
+ {
+ pCurrent->ClearCode();
+ pCurrent = pCurrent->m_pNextStream;
+ }
+}
+
+void ILStubLinker::GetStubReturnType(LocalDesc* pLoc)
+{
+ WRAPPER_NO_CONTRACT;
+
+ GetStubReturnType(pLoc, m_pStubSigModule);
+}
+
+void ILStubLinker::GetStubReturnType(LocalDesc* pLoc, Module* pModule)
+{
+ STANDARD_VM_CONTRACT;
+ SigPointer ptr = m_stubSig.CreateSigPointer();
+ ULONG uCallingConv;
+ int nTypeArgs = 0;
+ int nArgs;
+
+ IfFailThrow(ptr.GetCallingConvInfo(&uCallingConv));
+
+ if (uCallingConv & IMAGE_CEE_CS_CALLCONV_GENERIC)
+ IfFailThrow(ptr.GetData((ULONG*)&nTypeArgs));
+
+ IfFailThrow(ptr.GetData((ULONG*)&nArgs));
+
+ GetManagedTypeHelper(pLoc, pModule, ptr.GetPtr(), m_pTypeContext, m_pMD);
+}
+
+CorCallingConvention ILStubLinker::GetStubTargetCallingConv()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_nativeFnSigBuilder.GetCallingConv();
+}
+
+void ILStubLinker::TransformArgForJIT(LocalDesc *pLoc)
+{
+ STANDARD_VM_CONTRACT;
+ // Turn everything into blittable primitives. The reason this method is needed are
+ // byrefs which are OK only when they ref stack data or are pinned. This condition
+ // cannot be verified by code:NDirect.MarshalingRequired so we explicitly get rid
+ // of them here.
+ switch (pLoc->ElementType[0])
+ {
+ // primitives
+ case ELEMENT_TYPE_VOID:
+ case ELEMENT_TYPE_BOOLEAN:
+ case ELEMENT_TYPE_CHAR:
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_R4:
+ case ELEMENT_TYPE_R8:
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_U:
+ {
+ // no transformation needed
+ break;
+ }
+
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ _ASSERTE(!"Should have been replaced by a native value type!");
+ break;
+ }
+
+ case ELEMENT_TYPE_PTR:
+ {
+#ifdef _TARGET_X86_
+ if (pLoc->bIsCopyConstructed)
+ {
+ // The only pointers that we don't transform to ELEMENT_TYPE_I are those that are
+ // ET_TYPE_CMOD_REQD<IsCopyConstructed>/ET_TYPE_CMOD_REQD<NeedsCopyConstructorModifier>
+ // in the original signature. This convention is understood by the UM thunk compiler
+ // (code:UMThunkMarshInfo.CompileNExportThunk) which will generate different thunk code.
+ // Such parameters come from unmanaged by value but must enter the IL stub by reference
+ // because we are not supposed to make a copy.
+ }
+ else
+#endif // _TARGET_X86_
+ {
+ pLoc->ElementType[0] = ELEMENT_TYPE_I;
+ pLoc->cbType = 1;
+ }
+ break;
+ }
+
+ case ELEMENT_TYPE_INTERNAL:
+ {
+ // JIT will handle structures
+ if (pLoc->InternalToken.IsValueType())
+ {
+ _ASSERTE(pLoc->InternalToken.IsBlittable());
+ break;
+ }
+ // intentional fall-thru
+ }
+
+ // pointers, byrefs, strings, arrays, other ref types -> ELEMENT_TYPE_I
+ default:
+ {
+ pLoc->ElementType[0] = ELEMENT_TYPE_I;
+ pLoc->cbType = 1;
+ break;
+ }
+ }
+}
+
+Module *ILStubLinker::GetStubSigModule()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pStubSigModule;
+}
+
+SigTypeContext *ILStubLinker::GetStubSigTypeContext()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pTypeContext;
+}
+
+void ILStubLinker::SetStubTargetReturnType(CorElementType typ)
+{
+ WRAPPER_NO_CONTRACT;
+
+ LocalDesc locDesc(typ);
+ SetStubTargetReturnType(&locDesc);
+}
+
+void ILStubLinker::SetStubTargetReturnType(LocalDesc* pLoc)
+{
+ CONTRACTL
+ {
+ WRAPPER(NOTHROW);
+ WRAPPER(GC_NOTRIGGER);
+ WRAPPER(MODE_ANY);
+ PRECONDITION(CheckPointer(pLoc, NULL_NOT_OK));
+ }
+ CONTRACTL_END;
+
+ TransformArgForJIT(pLoc);
+
+ m_nativeFnSigBuilder.SetReturnType(pLoc);
+
+ if ((1 != pLoc->cbType) || (ELEMENT_TYPE_VOID != pLoc->ElementType[0]))
+ {
+ m_iTargetStackDelta++;
+ }
+}
+
+DWORD ILStubLinker::SetStubTargetArgType(CorElementType typ, bool fConsumeStubArg /*= true*/)
+{
+ STANDARD_VM_CONTRACT;
+
+ LocalDesc locDesc(typ);
+ return SetStubTargetArgType(&locDesc, fConsumeStubArg);
+}
+
+void ILStubLinker::SetStubTargetCallingConv(CorCallingConvention uNativeCallingConv)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_nativeFnSigBuilder.SetCallingConv(uNativeCallingConv);
+}
+
+static size_t GetManagedTypeForMDArray(LocalDesc* pLoc, Module* pModule, PCCOR_SIGNATURE psigManagedArg, SigTypeContext *pTypeContext)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(CheckPointer(pLoc));
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(CheckPointer(psigManagedArg));
+ PRECONDITION(*psigManagedArg == ELEMENT_TYPE_ARRAY);
+ }
+ CONTRACTL_END;
+
+ SigPointer ptr;
+ size_t cbDest = 0;
+
+ //
+ // copy ELEMENT_TYPE_ARRAY
+ //
+ pLoc->ElementType[cbDest] = *psigManagedArg;
+ psigManagedArg++;
+ cbDest++;
+
+ ptr.SetSig(psigManagedArg);
+
+ IfFailThrow(ptr.SkipCustomModifiers());
+
+ psigManagedArg = ptr.GetPtr();
+
+ //
+ // get array type
+ //
+ pLoc->InternalToken = ptr.GetTypeHandleThrowing(pModule, pTypeContext);
+
+ pLoc->ElementType[cbDest] = ELEMENT_TYPE_INTERNAL;
+ cbDest++;
+
+ //
+ // get array bounds
+ //
+
+ size_t cbType;
+ PCCOR_SIGNATURE psigNextManagedArg;
+
+ // find the start of the next argument
+ ptr.SetSig(psigManagedArg - 1); // -1 to back up to E_T_ARRAY;
+ IfFailThrow(ptr.SkipExactlyOne());
+
+ psigNextManagedArg = ptr.GetPtr();
+
+ // find the start of the array bounds information
+ ptr.SetSig(psigManagedArg);
+ IfFailThrow(ptr.SkipExactlyOne());
+
+ psigManagedArg = ptr.GetPtr(); // point to the array bounds info
+ cbType = psigNextManagedArg - psigManagedArg;
+
+ pLoc->pSig = psigManagedArg; // point to the array bounds info
+ pLoc->cbArrayBoundsInfo = cbType; // size of array bounds info
+ pLoc->cbType = cbDest;
+
+ return cbDest;
+}
+
+
+// static
+void ILStubLinker::GetManagedTypeHelper(LocalDesc* pLoc, Module* pModule, PCCOR_SIGNATURE psigManagedArg, SigTypeContext *pTypeContext, MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+
+ PRECONDITION(CheckPointer(pLoc));
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(CheckPointer(psigManagedArg));
+ }
+ CONTRACTL_END;
+
+ SigPointer ptr(psigManagedArg);
+ CorElementType eType;
+ LOG((LF_STUBS, LL_INFO10000, "GetManagedTypeHelper on type at %p\n", psigManagedArg));
+
+ IfFailThrow(ptr.PeekElemType(&eType));
+
+ size_t cbDest = 0;
+
+ while (eType == ELEMENT_TYPE_PTR ||
+ eType == ELEMENT_TYPE_BYREF ||
+ eType == ELEMENT_TYPE_SZARRAY)
+ {
+ pLoc->ElementType[cbDest] = static_cast<BYTE>(eType);
+ cbDest++;
+
+ if (cbDest >= LocalDesc::MAX_LOCALDESC_ELEMENTS)
+ {
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_SIGTOOCOMPLEX);
+ }
+
+ IfFailThrow(ptr.GetElemType(NULL));
+ IfFailThrow(ptr.PeekElemType(&eType));
+ }
+
+ SigPointer ptr2(ptr);
+ IfFailThrow(ptr2.SkipCustomModifiers());
+ psigManagedArg = ptr2.GetPtr();
+
+ switch (eType)
+ {
+ case ELEMENT_TYPE_VAR:
+ case ELEMENT_TYPE_MVAR:
+ {
+ IfFailThrow(ptr.GetElemType(NULL)); // skip ET
+ ULONG varNum;
+ IfFailThrowBF(ptr.GetData(&varNum), BFA_BAD_COMPLUS_SIG, pModule);
+
+ DWORD varCount = (eType == ELEMENT_TYPE_VAR ? pTypeContext->m_classInst.GetNumArgs() :
+ pTypeContext->m_methodInst.GetNumArgs());
+ THROW_BAD_FORMAT_MAYBE(varNum < varCount, BFA_BAD_COMPLUS_SIG, pModule);
+
+ pLoc->InternalToken = (eType == ELEMENT_TYPE_VAR ? pTypeContext->m_classInst[varNum] :
+ pTypeContext->m_methodInst[varNum]);
+
+ pLoc->ElementType[cbDest] = ELEMENT_TYPE_INTERNAL;
+ cbDest++;
+ break;
+ }
+
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_VALUETYPE:
+ case ELEMENT_TYPE_INTERNAL:
+ {
+ pLoc->InternalToken = ptr.GetTypeHandleThrowing(pModule, pTypeContext);
+
+ pLoc->ElementType[cbDest] = ELEMENT_TYPE_INTERNAL;
+ cbDest++;
+ break;
+ }
+
+ case ELEMENT_TYPE_GENERICINST:
+ {
+ pLoc->InternalToken = ptr.GetTypeHandleThrowing(pModule, pTypeContext);
+
+ pLoc->ElementType[cbDest] = ELEMENT_TYPE_INTERNAL;
+ cbDest++;
+ break;
+ }
+
+ case ELEMENT_TYPE_FNPTR:
+ // save off a pointer to the managed sig
+ // we'll convert it in bulk when we store it
+ // in the generated sig
+ pLoc->pSigModule = pModule;
+ pLoc->pSig = psigManagedArg+1;
+
+ pLoc->ElementType[cbDest] = ELEMENT_TYPE_FNPTR;
+ cbDest++;
+ break;
+
+ case ELEMENT_TYPE_ARRAY:
+ cbDest = GetManagedTypeForMDArray(pLoc, pModule, psigManagedArg, pTypeContext);
+ break;
+
+ default:
+ {
+ size_t cbType;
+ PCCOR_SIGNATURE psigNextManagedArg;
+
+ IfFailThrow(ptr.SkipExactlyOne());
+
+ psigNextManagedArg = ptr.GetPtr();
+ cbType = psigNextManagedArg - psigManagedArg;
+
+ size_t cbNewDest;
+ if (!ClrSafeInt<size_t>::addition(cbDest, cbType, cbNewDest) ||
+ cbNewDest > LocalDesc::MAX_LOCALDESC_ELEMENTS)
+ {
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_SIGTOOCOMPLEX);
+ }
+
+ memcpyNoGCRefs(&pLoc->ElementType[cbDest], psigManagedArg, cbType);
+ cbDest = cbNewDest;
+ break;
+ }
+ }
+
+ if (cbDest > LocalDesc::MAX_LOCALDESC_ELEMENTS)
+ {
+ COMPlusThrow(kMarshalDirectiveException, IDS_EE_SIGTOOCOMPLEX);
+ }
+
+ pLoc->cbType = cbDest;
+}
+
+void ILStubLinker::GetStubTargetReturnType(LocalDesc* pLoc)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pLoc));
+ }
+ CONTRACTL_END;
+
+ GetStubTargetReturnType(pLoc, m_pStubSigModule);
+}
+
+void ILStubLinker::GetStubTargetReturnType(LocalDesc* pLoc, Module* pModule)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pLoc));
+ PRECONDITION(CheckPointer(pModule));
+ }
+ CONTRACTL_END;
+
+ GetManagedTypeHelper(pLoc, pModule, m_nativeFnSigBuilder.GetReturnSig(), m_pTypeContext, NULL);
+}
+
+void ILStubLinker::GetStubArgType(LocalDesc* pLoc)
+{
+
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pLoc));
+ }
+ CONTRACTL_END;
+
+ GetStubArgType(pLoc, m_pStubSigModule);
+}
+
+void ILStubLinker::GetStubArgType(LocalDesc* pLoc, Module* pModule)
+{
+
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pLoc));
+ PRECONDITION(CheckPointer(pModule));
+ }
+ CONTRACTL_END;
+
+ GetManagedTypeHelper(pLoc, pModule, m_managedSigPtr.GetPtr(), m_pTypeContext, m_pMD);
+}
+
+//---------------------------------------------------------------------------------------
+//
+DWORD
+ILStubLinker::SetStubTargetArgType(
+ LocalDesc * pLoc, // = NULL
+ bool fConsumeStubArg) // = true
+{
+ STANDARD_VM_CONTRACT;
+
+ LocalDesc locDesc;
+
+ if (fConsumeStubArg)
+ {
+ _ASSERTE(m_pStubSigModule);
+
+ if (pLoc == NULL)
+ {
+ pLoc = &locDesc;
+ GetStubArgType(pLoc, m_pStubSigModule);
+ }
+
+ IfFailThrow(m_managedSigPtr.SkipExactlyOne());
+ }
+
+ TransformArgForJIT(pLoc);
+
+ DWORD dwArgNum = m_nativeFnSigBuilder.NewArg(pLoc);
+ m_iTargetStackDelta--;
+
+ return dwArgNum;
+} // ILStubLinker::SetStubTargetArgType
+
+//---------------------------------------------------------------------------------------
+//
+int ILStubLinker::GetToken(MethodDesc* pMD)
+{
+ STANDARD_VM_CONTRACT;
+ return m_tokenMap.GetToken(pMD);
+}
+
+int ILStubLinker::GetToken(MethodTable* pMT)
+{
+ STANDARD_VM_CONTRACT;
+ return m_tokenMap.GetToken(TypeHandle(pMT));
+}
+
+int ILStubLinker::GetToken(TypeHandle th)
+{
+ STANDARD_VM_CONTRACT;
+ return m_tokenMap.GetToken(th);
+}
+
+int ILStubLinker::GetToken(FieldDesc* pFD)
+{
+ STANDARD_VM_CONTRACT;
+ return m_tokenMap.GetToken(pFD);
+}
+
+
+BOOL ILStubLinker::StubHasVoidReturnType()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_StubHasVoidReturnType;
+}
+
+void ILStubLinker::ClearCode()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ DeleteCodeLabels();
+ ClearCodeStreams();
+}
+
+// static
+ILCodeStream* ILStubLinker::FindLastCodeStream(ILCodeStream* pList)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (NULL == pList)
+ {
+ return NULL;
+ }
+
+ while (NULL != pList->m_pNextStream)
+ {
+ pList = pList->m_pNextStream;
+ }
+
+ return pList;
+}
+
+ILCodeStream* ILStubLinker::NewCodeStream(CodeStreamType codeStreamType)
+{
+ STANDARD_VM_CONTRACT;
+
+ NewHolder<ILCodeStream> pNewCodeStream = new ILCodeStream(this, codeStreamType);
+
+ if (NULL == m_pCodeStreamList)
+ {
+ m_pCodeStreamList = pNewCodeStream;
+ }
+ else
+ {
+ ILCodeStream* pTail = FindLastCodeStream(m_pCodeStreamList);
+ CONSISTENCY_CHECK(NULL == pTail->m_pNextStream);
+ pTail->m_pNextStream = pNewCodeStream;
+ }
+
+ pNewCodeStream.SuppressRelease();
+ return pNewCodeStream;
+}
+
+int ILCodeStream::GetToken(MethodDesc* pMD)
+{
+ STANDARD_VM_CONTRACT;
+ return m_pOwner->GetToken(pMD);
+}
+int ILCodeStream::GetToken(MethodTable* pMT)
+{
+ STANDARD_VM_CONTRACT;
+ return m_pOwner->GetToken(pMT);
+}
+int ILCodeStream::GetToken(TypeHandle th)
+{
+ STANDARD_VM_CONTRACT;
+ return m_pOwner->GetToken(th);
+}
+int ILCodeStream::GetToken(FieldDesc* pFD)
+{
+ STANDARD_VM_CONTRACT;
+ return m_pOwner->GetToken(pFD);
+}
+
+DWORD ILCodeStream::NewLocal(CorElementType typ)
+{
+ STANDARD_VM_CONTRACT;
+ return m_pOwner->NewLocal(typ);
+}
+DWORD ILCodeStream::NewLocal(LocalDesc loc)
+{
+ WRAPPER_NO_CONTRACT;
+ return m_pOwner->NewLocal(loc);
+}
+DWORD ILCodeStream::SetStubTargetArgType(CorElementType typ, bool fConsumeStubArg)
+{
+ STANDARD_VM_CONTRACT;
+ return m_pOwner->SetStubTargetArgType(typ, fConsumeStubArg);
+}
+DWORD ILCodeStream::SetStubTargetArgType(LocalDesc* pLoc, bool fConsumeStubArg)
+{
+ STANDARD_VM_CONTRACT;
+ return m_pOwner->SetStubTargetArgType(pLoc, fConsumeStubArg);
+}
+void ILCodeStream::SetStubTargetReturnType(CorElementType typ)
+{
+ STANDARD_VM_CONTRACT;
+ m_pOwner->SetStubTargetReturnType(typ);
+}
+void ILCodeStream::SetStubTargetReturnType(LocalDesc* pLoc)
+{
+ STANDARD_VM_CONTRACT;
+ m_pOwner->SetStubTargetReturnType(pLoc);
+}
+ILCodeLabel* ILCodeStream::NewCodeLabel()
+{
+ STANDARD_VM_CONTRACT;
+ return m_pOwner->NewCodeLabel();
+}
+void ILCodeStream::ClearCode()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_uCurInstrIdx = 0;
+}
diff --git a/src/vm/stubgen.h b/src/vm/stubgen.h
new file mode 100644
index 0000000000..02e0f32320
--- /dev/null
+++ b/src/vm/stubgen.h
@@ -0,0 +1,739 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: StubGen.h
+//
+
+//
+
+
+#ifndef __STUBGEN_H__
+#define __STUBGEN_H__
+
+#include "stublink.h"
+
+struct LocalDesc
+{
+ const static size_t MAX_LOCALDESC_ELEMENTS = 8;
+
+ BYTE ElementType[MAX_LOCALDESC_ELEMENTS];
+ size_t cbType;
+ TypeHandle InternalToken; // only valid with ELEMENT_TYPE_INTERNAL
+
+ // used only for E_T_FNPTR and E_T_ARRAY
+ PCCOR_SIGNATURE pSig;
+ union
+ {
+ Module* pSigModule;
+ size_t cbArrayBoundsInfo;
+ BOOL bIsCopyConstructed; // used for E_T_PTR
+ };
+
+ LocalDesc()
+ {
+ }
+
+ inline LocalDesc(CorElementType elemType)
+ {
+ ElementType[0] = static_cast<BYTE>(elemType);
+ cbType = 1;
+ bIsCopyConstructed = FALSE;
+ }
+
+ inline LocalDesc(TypeHandle thType)
+ {
+ ElementType[0] = ELEMENT_TYPE_INTERNAL;
+ cbType = 1;
+ InternalToken = thType;
+ bIsCopyConstructed = FALSE;
+ }
+
+ inline LocalDesc(MethodTable *pMT)
+ {
+ WRAPPER_NO_CONTRACT;
+ ElementType[0] = ELEMENT_TYPE_INTERNAL;
+ cbType = 1;
+ InternalToken = TypeHandle(pMT);
+ bIsCopyConstructed = FALSE;
+ }
+
+ void MakeByRef()
+ {
+ ChangeType(ELEMENT_TYPE_BYREF);
+ }
+
+ void MakePinned()
+ {
+ ChangeType(ELEMENT_TYPE_PINNED);
+ }
+
+ // makes the LocalDesc semantically equivalent to ET_TYPE_CMOD_REQD<IsCopyConstructed>/ET_TYPE_CMOD_REQD<NeedsCopyConstructorModifier>
+ void MakeCopyConstructedPointer()
+ {
+ ChangeType(ELEMENT_TYPE_PTR);
+ bIsCopyConstructed = TRUE;
+ }
+
+ void ChangeType(CorElementType elemType)
+ {
+ LIMITED_METHOD_CONTRACT;
+ PREFIX_ASSUME((MAX_LOCALDESC_ELEMENTS-1) >= cbType);
+
+ for (size_t i = cbType; i >= 1; i--)
+ {
+ ElementType[i] = ElementType[i-1];
+ }
+
+ ElementType[0] = static_cast<BYTE>(elemType);
+ cbType += 1;
+ }
+
+ bool IsValueClass()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(cbType == 1); // this only works on 1-element types for now
+ }
+ CONTRACTL_END;
+
+ if (ElementType[0] == ELEMENT_TYPE_VALUETYPE)
+ {
+ return true;
+ }
+ else if ((ElementType[0] == ELEMENT_TYPE_INTERNAL) &&
+ (InternalToken.IsNativeValueType() ||
+ InternalToken.GetMethodTable()->IsValueType()))
+ {
+ return true;
+ }
+
+ return false;
+ }
+};
+
+class StubSigBuilder
+{
+public:
+ StubSigBuilder();
+
+ DWORD Append(LocalDesc* pLoc);
+
+protected:
+ CQuickBytes m_qbSigBuffer;
+ DWORD m_nItems;
+ BYTE* m_pbSigCursor;
+ size_t m_cbSig;
+
+ enum Constants { INITIAL_BUFFER_SIZE = 256 };
+
+ void EnsureEnoughQuickBytes(size_t cbToAppend);
+};
+
+//---------------------------------------------------------------------------------------
+//
+class LocalSigBuilder : protected StubSigBuilder
+{
+public:
+ DWORD NewLocal(LocalDesc * pLoc)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pLoc));
+ }
+ CONTRACTL_END;
+
+ return Append(pLoc);
+ }
+
+ DWORD GetSigSize();
+ DWORD GetSig(BYTE * pbSig, DWORD cbBuffer);
+
+}; // class LocalSigBuilder
+
+//---------------------------------------------------------------------------------------
+//
+class FunctionSigBuilder : protected StubSigBuilder
+{
+public:
+ FunctionSigBuilder();
+
+ DWORD NewArg(LocalDesc * pArg)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return Append(pArg);
+ }
+
+ DWORD GetNumArgs()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_nItems;
+ }
+
+ void SetCallingConv(CorCallingConvention callingConv)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_callingConv = callingConv;
+ }
+
+ CorCallingConvention GetCallingConv()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_callingConv;
+ }
+
+ void SetSig(PCCOR_SIGNATURE pSig, DWORD cSig);
+
+ DWORD GetSigSize();
+ DWORD GetSig(BYTE * pbSig, DWORD cbBuffer);
+
+ void SetReturnType(LocalDesc* pLoc);
+
+ CorElementType GetReturnElementType()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ CONSISTENCY_CHECK(m_qbReturnSig.Size() > 0);
+ return *(CorElementType *)m_qbReturnSig.Ptr();
+ }
+
+ PCCOR_SIGNATURE GetReturnSig()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ CONSISTENCY_CHECK(m_qbReturnSig.Size() > 0);
+ return (PCCOR_SIGNATURE)m_qbReturnSig.Ptr();
+ }
+
+protected:
+ CorCallingConvention m_callingConv;
+ CQuickBytes m_qbReturnSig;
+}; // class FunctionSigBuilder
+
+#ifdef _DEBUG
+// exercise the resize code
+#define TOKEN_LOOKUP_MAP_SIZE (8*sizeof(void*))
+#else // _DEBUG
+#define TOKEN_LOOKUP_MAP_SIZE (64*sizeof(void*))
+#endif // _DEBUG
+
+//---------------------------------------------------------------------------------------
+//
+class TokenLookupMap
+{
+public:
+ TokenLookupMap()
+ {
+ STANDARD_VM_CONTRACT;
+
+ m_qbEntries.AllocThrows(TOKEN_LOOKUP_MAP_SIZE);
+ m_nextAvailableRid = 0;
+ }
+
+ // copy ctor
+ TokenLookupMap(TokenLookupMap* pSrc)
+ {
+ STANDARD_VM_CONTRACT;
+
+ m_nextAvailableRid = pSrc->m_nextAvailableRid;
+ size_t size = pSrc->m_qbEntries.Size();
+ m_qbEntries.AllocThrows(size);
+ memcpy(m_qbEntries.Ptr(), pSrc->m_qbEntries.Ptr(), size);
+ }
+
+ TypeHandle LookupTypeDef(mdToken token)
+ {
+ WRAPPER_NO_CONTRACT;
+ return LookupTokenWorker<mdtTypeDef, MethodTable*>(token);
+ }
+ MethodDesc* LookupMethodDef(mdToken token)
+ {
+ WRAPPER_NO_CONTRACT;
+ return LookupTokenWorker<mdtMethodDef, MethodDesc*>(token);
+ }
+ FieldDesc* LookupFieldDef(mdToken token)
+ {
+ WRAPPER_NO_CONTRACT;
+ return LookupTokenWorker<mdtFieldDef, FieldDesc*>(token);
+ }
+
+ mdToken GetToken(TypeHandle pMT)
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetTokenWorker<mdtTypeDef, TypeHandle>(pMT);
+ }
+ mdToken GetToken(MethodDesc* pMD)
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetTokenWorker<mdtMethodDef, MethodDesc*>(pMD);
+ }
+ mdToken GetToken(FieldDesc* pFieldDesc)
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetTokenWorker<mdtFieldDef, FieldDesc*>(pFieldDesc);
+ }
+
+protected:
+ template<mdToken TokenType, typename HandleType>
+ HandleType LookupTokenWorker(mdToken token)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ PRECONDITION(RidFromToken(token)-1 < m_nextAvailableRid);
+ PRECONDITION(RidFromToken(token) != 0);
+ PRECONDITION(TypeFromToken(token) == TokenType);
+ }
+ CONTRACTL_END;
+
+ return ((HandleType*)m_qbEntries.Ptr())[RidFromToken(token)-1];
+ }
+
+ template<mdToken TokenType, typename HandleType>
+ mdToken GetTokenWorker(HandleType handle)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ PRECONDITION(handle != NULL);
+ }
+ CONTRACTL_END;
+
+ if (m_qbEntries.Size() <= (sizeof(handle) * m_nextAvailableRid))
+ {
+ m_qbEntries.ReSizeThrows(2 * m_qbEntries.Size());
+ }
+
+ mdToken token = TokenFromRid(m_nextAvailableRid++, TokenType)+1;
+
+ ((HandleType*)m_qbEntries.Ptr())[RidFromToken(token)-1] = handle;
+
+ return token;
+ }
+
+ unsigned int m_nextAvailableRid;
+ CQuickBytesSpecifySize<TOKEN_LOOKUP_MAP_SIZE> m_qbEntries;
+};
+
+struct ILStubEHClause
+{
+ enum Kind { kNone, kTypedCatch, kFinally };
+
+ DWORD kind;
+ DWORD dwTryBeginOffset;
+ DWORD cbTryLength;
+ DWORD dwHandlerBeginOffset;
+ DWORD cbHandlerLength;
+ DWORD dwTypeToken;
+};
+
+
+class ILCodeLabel;
+class ILCodeStream;
+//---------------------------------------------------------------------------------------
+//
+class ILStubLinker
+{
+ friend class ILCodeLabel;
+ friend class ILCodeStream;
+
+public:
+
+ ILStubLinker(Module* pModule, const Signature &signature, SigTypeContext *pTypeContext, MethodDesc *pMD,
+ BOOL fTargetHasThis, BOOL fStubHasThis, BOOL fIsNDirectStub = FALSE);
+ ~ILStubLinker();
+
+ void GenerateCode(BYTE* pbBuffer, size_t cbBufferSize);
+ void ClearCode();
+
+protected:
+
+ void DeleteCodeLabels();
+ void DeleteCodeStreams();
+
+ struct ILInstruction
+ {
+ UINT16 uInstruction;
+ INT16 iStackDelta;
+ UINT_PTR uArg;
+ };
+
+ static void PatchInstructionArgument(ILCodeLabel* pLabel, UINT_PTR uNewArg
+ DEBUG_ARG(UINT16 uExpectedInstruction));
+
+#ifdef _DEBUG
+ bool IsInCodeStreamList(ILCodeStream* pcs);
+#endif // _DEBUG
+
+public:
+
+ void SetHasThis (bool fHasThis);
+ bool HasThis () { LIMITED_METHOD_CONTRACT; return m_fHasThis; }
+
+ DWORD GetLocalSigSize();
+ DWORD GetLocalSig(BYTE * pbLocalSig, DWORD cbBuffer);
+
+ DWORD GetStubTargetMethodSigSize();
+ DWORD GetStubTargetMethodSig(BYTE * pbLocalSig, DWORD cbBuffer);
+
+ void SetStubTargetMethodSig(PCCOR_SIGNATURE pSig, DWORD cSig);
+
+ void GetStubTargetReturnType(LocalDesc * pLoc);
+ void GetStubTargetReturnType(LocalDesc * pLoc, Module * pModule);
+
+ void GetStubArgType(LocalDesc * pLoc);
+ void GetStubArgType(LocalDesc * pLoc, Module * pModule);
+ void GetStubReturnType(LocalDesc * pLoc);
+ void GetStubReturnType(LocalDesc * pLoc, Module * pModule);
+ CorCallingConvention GetStubTargetCallingConv();
+
+
+ CorElementType GetStubTargetReturnElementType() { WRAPPER_NO_CONTRACT; return m_nativeFnSigBuilder.GetReturnElementType(); }
+
+ static void GetManagedTypeHelper(LocalDesc* pLoc, Module* pModule, PCCOR_SIGNATURE pSig, SigTypeContext *pTypeContext, MethodDesc *pMD);
+
+ BOOL StubHasVoidReturnType();
+
+ Stub *Link(LoaderHeap *pHeap, UINT *pcbSize /* = NULL*/, BOOL fMC);
+
+ size_t Link(UINT* puMaxStack);
+
+
+ TokenLookupMap* GetTokenLookupMap() { LIMITED_METHOD_CONTRACT; return &m_tokenMap; }
+
+ enum CodeStreamType
+ {
+ kSetup,
+ kMarshal,
+ kDispatch,
+ kReturnUnmarshal,
+ kUnmarshal,
+ kExceptionCleanup,
+ kCleanup,
+ kExceptionHandler,
+ };
+
+ ILCodeStream* NewCodeStream(CodeStreamType codeStreamType);
+
+ MethodDesc *GetTargetMD() { LIMITED_METHOD_CONTRACT; return m_pMD; }
+ Signature GetStubSignature() { LIMITED_METHOD_CONTRACT; return m_stubSig; }
+
+ void ClearCodeStreams();
+
+ void LogILStub(DWORD dwJitFlags, SString *pDumpILStubCode = NULL);
+protected:
+ void LogILStubWorker(ILInstruction* pInstrBuffer, UINT numInstr, size_t* pcbCode, INT* piCurStack, SString *pDumpILStubCode = NULL);
+ void LogILInstruction(size_t curOffset, bool isLabeled, INT iCurStack, ILInstruction* pInstruction, SString *pDumpILStubCode = NULL);
+
+private:
+ ILCodeStream* m_pCodeStreamList;
+
+ TokenLookupMap m_tokenMap;
+ LocalSigBuilder m_localSigBuilder;
+ FunctionSigBuilder m_nativeFnSigBuilder;
+ BYTE m_rgbBuffer[sizeof(COR_ILMETHOD_DECODER)];
+
+ Signature m_stubSig; // managed sig of stub
+ SigTypeContext* m_pTypeContext; // type context for m_stubSig
+
+ SigPointer m_managedSigPtr;
+ void* m_pCode;
+ Module* m_pStubSigModule;
+ ILCodeLabel* m_pLabelList;
+
+ bool FirstPassLink(ILInstruction* pInstrBuffer, UINT numInstr, size_t* pcbCode, INT* piCurStack, UINT* puMaxStack);
+ void SecondPassLink(ILInstruction* pInstrBuffer, UINT numInstr, size_t* pCurCodeOffset);
+
+ BYTE* GenerateCodeWorker(BYTE* pbBuffer, ILInstruction* pInstrBuffer, UINT numInstr, size_t* pcbCode);
+
+ static ILCodeStream* FindLastCodeStream(ILCodeStream* pList);
+
+protected:
+ //
+ // the public entrypoints for these methods are in ILCodeStream
+ //
+ ILCodeLabel* NewCodeLabel();
+ int GetToken(MethodDesc* pMD);
+ int GetToken(MethodTable* pMT);
+ int GetToken(TypeHandle th);
+ int GetToken(FieldDesc* pFD);
+ DWORD NewLocal(CorElementType typ = ELEMENT_TYPE_I);
+ DWORD NewLocal(LocalDesc loc);
+
+ DWORD SetStubTargetArgType(CorElementType typ, bool fConsumeStubArg = true);
+ DWORD SetStubTargetArgType(LocalDesc* pLoc = NULL, bool fConsumeStubArg = true); // passing pLoc = NULL means "use stub arg type"
+ void SetStubTargetReturnType(CorElementType typ);
+ void SetStubTargetReturnType(LocalDesc* pLoc);
+ void SetStubTargetCallingConv(CorCallingConvention uNativeCallingConv);
+
+ void TransformArgForJIT(LocalDesc *pLoc);
+
+ Module * GetStubSigModule();
+ SigTypeContext *GetStubSigTypeContext();
+
+ BOOL m_StubHasVoidReturnType;
+ INT m_iTargetStackDelta;
+ DWORD m_cbCurrentCompressedSigLen;
+ DWORD m_nLocals;
+
+ bool m_fHasThis;
+
+ // We need this MethodDesc so we can reconstruct the generics
+ // SigTypeContext info, if needed.
+ MethodDesc * m_pMD;
+}; // class ILStubLinker
+
+
+//---------------------------------------------------------------------------------------
+//
+class ILCodeLabel
+{
+ friend class ILStubLinker;
+ friend class ILCodeStream;
+
+public:
+ ILCodeLabel();
+ ~ILCodeLabel();
+
+ size_t GetCodeOffset();
+
+private:
+ void SetCodeOffset(size_t codeOffset);
+
+ ILCodeLabel* m_pNext;
+ ILStubLinker* m_pOwningStubLinker;
+ ILCodeStream* m_pCodeStreamOfLabel; // this is the ILCodeStream that the index is relative to
+ size_t m_codeOffset; // this is the absolute resolved IL offset after linking
+ UINT m_idxLabeledInstruction; // this is the index within the instruction buffer of the owning ILCodeStream
+};
+
+class ILCodeStream
+{
+ friend class ILStubLinker;
+
+public:
+ enum ILInstrEnum
+ {
+#define OPDEF(name,string,pop,push,oprType,opcType,l,s1,s2,ctrl) \
+ name,
+
+#include "opcode.def"
+#undef OPDEF
+ };
+
+private:
+ static ILInstrEnum LowerOpcode(ILInstrEnum instr, ILStubLinker::ILInstruction* pInstr);
+
+#ifdef _DEBUG
+ static bool IsSupportedInstruction(ILInstrEnum instr);
+#endif // _DEBUG
+
+ static bool IsBranchInstruction(ILInstrEnum instr)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ((instr >= CEE_BR) && (instr <= CEE_BLT_UN)) || (instr == CEE_LEAVE);
+ }
+
+
+public:
+ void EmitADD ();
+ void EmitADD_OVF ();
+ void EmitAND ();
+ void EmitARGLIST ();
+ void EmitBEQ (ILCodeLabel* pCodeLabel);
+ void EmitBGE (ILCodeLabel* pCodeLabel);
+ void EmitBGE_UN(ILCodeLabel* pCodeLabel);
+ void EmitBGT (ILCodeLabel* pCodeLabel);
+ void EmitBLE (ILCodeLabel* pCodeLabel);
+ void EmitBLE_UN (ILCodeLabel* pCodeLabel);
+ void EmitBLT (ILCodeLabel* pCodeLabel);
+ void EmitBR (ILCodeLabel* pCodeLabel);
+ void EmitBREAK ();
+ void EmitBRFALSE (ILCodeLabel* pCodeLabel);
+ void EmitBRTRUE (ILCodeLabel* pCodeLabel);
+ void EmitCALL (int token, int numInArgs, int numRetArgs);
+ void EmitCALLI (int token, int numInArgs, int numRetArgs);
+ void EmitCEQ ();
+ void EmitCGT ();
+ void EmitCGT_UN ();
+ void EmitCLT ();
+ void EmitCLT_UN ();
+ void EmitCONV_I ();
+ void EmitCONV_I1 ();
+ void EmitCONV_I2 ();
+ void EmitCONV_I4 ();
+ void EmitCONV_I8 ();
+ void EmitCONV_U ();
+ void EmitCONV_U1 ();
+ void EmitCONV_U2 ();
+ void EmitCONV_U4 ();
+ void EmitCONV_U8 ();
+ void EmitCONV_R4 ();
+ void EmitCONV_R8 ();
+ void EmitCONV_OVF_I4();
+ void EmitCONV_T (CorElementType type);
+ void EmitCPBLK ();
+ void EmitCPOBJ (int token);
+ void EmitDUP ();
+ void EmitENDFINALLY ();
+ void EmitINITBLK ();
+ void EmitINITOBJ (int token);
+ void EmitJMP (int token);
+ void EmitLDARG (unsigned uArgIdx);
+ void EmitLDARGA (unsigned uArgIdx);
+ void EmitLDC (DWORD_PTR uConst);
+ void EmitLDC_R4 (UINT32 uConst);
+ void EmitLDC_R8 (UINT64 uConst);
+ void EmitLDELEM_REF ();
+ void EmitLDFLD (int token);
+ void EmitLDFLDA (int token);
+ void EmitLDFTN (int token);
+ void EmitLDIND_I ();
+ void EmitLDIND_I1 ();
+ void EmitLDIND_I2 ();
+ void EmitLDIND_I4 ();
+ void EmitLDIND_I8 ();
+ void EmitLDIND_R4 ();
+ void EmitLDIND_R8 ();
+ void EmitLDIND_REF ();
+ void EmitLDIND_T (LocalDesc* pType);
+ void EmitLDIND_U1 ();
+ void EmitLDIND_U2 ();
+ void EmitLDIND_U4 ();
+ void EmitLDLEN ();
+ void EmitLDLOC (DWORD dwLocalNum);
+ void EmitLDLOCA (DWORD dwLocalNum);
+ void EmitLDNULL ();
+ void EmitLDOBJ (int token);
+ void EmitLDSFLD (int token);
+ void EmitLDSFLDA (int token);
+ void EmitLDTOKEN (int token);
+ void EmitLEAVE (ILCodeLabel* pCodeLabel);
+ void EmitLOCALLOC ();
+ void EmitMUL ();
+ void EmitMUL_OVF ();
+ void EmitNEWOBJ (int token, int numInArgs);
+ void EmitNOP (LPCSTR pszNopComment);
+ void EmitPOP ();
+ void EmitRET ();
+ void EmitSHR_UN ();
+ void EmitSTARG (unsigned uArgIdx);
+ void EmitSTELEM_REF ();
+ void EmitSTIND_I ();
+ void EmitSTIND_I1 ();
+ void EmitSTIND_I2 ();
+ void EmitSTIND_I4 ();
+ void EmitSTIND_I8 ();
+ void EmitSTIND_R4 ();
+ void EmitSTIND_R8 ();
+ void EmitSTIND_REF ();
+ void EmitSTIND_T (LocalDesc* pType);
+ void EmitSTFLD (int token);
+ void EmitSTLOC (DWORD dwLocalNum);
+ void EmitSTOBJ (int token);
+ void EmitSTSFLD (int token);
+ void EmitSUB ();
+ void EmitTHROW ();
+
+ // Overloads to simplify common usage patterns
+ void EmitNEWOBJ (BinderMethodID id, int numInArgs);
+ void EmitCALL (BinderMethodID id, int numInArgs, int numRetArgs);
+
+ void EmitLabel(ILCodeLabel* pLabel);
+ void EmitLoadThis ();
+ void EmitLoadNullPtr();
+ void EmitArgIteratorCreateAndLoad();
+
+ ILCodeLabel* NewCodeLabel();
+
+ void ClearCode();
+
+ //
+ // these functions just forward to the owning ILStubLinker
+ //
+
+ int GetToken(MethodDesc* pMD);
+ int GetToken(MethodTable* pMT);
+ int GetToken(TypeHandle th);
+ int GetToken(FieldDesc* pFD);
+
+ DWORD NewLocal(CorElementType typ = ELEMENT_TYPE_I);
+ DWORD NewLocal(LocalDesc loc);
+ DWORD SetStubTargetArgType(CorElementType typ, bool fConsumeStubArg = true);
+ DWORD SetStubTargetArgType(LocalDesc* pLoc = NULL, bool fConsumeStubArg = true); // passing pLoc = NULL means "use stub arg type"
+ void SetStubTargetReturnType(CorElementType typ);
+ void SetStubTargetReturnType(LocalDesc* pLoc);
+
+
+ //
+ // ctors/dtor
+ //
+
+ ILCodeStream(ILStubLinker* pOwner, ILStubLinker::CodeStreamType codeStreamType) :
+ m_pNextStream(NULL),
+ m_pOwner(pOwner),
+ m_pqbILInstructions(NULL),
+ m_uCurInstrIdx(0),
+ m_codeStreamType(codeStreamType)
+ {
+ }
+
+ ~ILCodeStream()
+ {
+ CONTRACTL
+ {
+ MODE_ANY;
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ if (NULL != m_pqbILInstructions)
+ {
+ delete m_pqbILInstructions;
+ m_pqbILInstructions = NULL;
+ }
+ }
+
+ ILStubLinker::CodeStreamType GetStreamType() { return m_codeStreamType; }
+
+ LPCSTR GetStreamDescription(ILStubLinker::CodeStreamType streamType);
+
+protected:
+
+ void Emit(ILInstrEnum instr, INT16 iStackDelta, UINT_PTR uArg);
+
+ enum Constants
+ {
+ INITIAL_NUM_IL_INSTRUCTIONS = 64,
+ INITIAL_IL_INSTRUCTION_BUFFER_SIZE = INITIAL_NUM_IL_INSTRUCTIONS * sizeof(ILStubLinker::ILInstruction),
+ };
+
+ typedef CQuickBytesSpecifySize<INITIAL_IL_INSTRUCTION_BUFFER_SIZE> ILCodeStreamBuffer;
+
+ ILCodeStream* m_pNextStream;
+ ILStubLinker* m_pOwner;
+ ILCodeStreamBuffer* m_pqbILInstructions;
+ UINT m_uCurInstrIdx;
+ ILStubLinker::CodeStreamType m_codeStreamType; // Type of the ILCodeStream
+
+#ifndef _WIN64
+ const static UINT32 SPECIAL_VALUE_NAN_64_ON_32 = 0xFFFFFFFF;
+#endif // _WIN64
+};
+
+#define TOKEN_ILSTUB_TARGET_SIG (TokenFromRid(0xFFFFFF, mdtSignature))
+
+#endif // __STUBGEN_H__
diff --git a/src/vm/stubhelpers.cpp b/src/vm/stubhelpers.cpp
new file mode 100644
index 0000000000..ab7bf0ccba
--- /dev/null
+++ b/src/vm/stubhelpers.cpp
@@ -0,0 +1,2149 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: stubhelpers.cpp
+//
+
+//
+
+
+#include "common.h"
+
+#include "mlinfo.h"
+#include "stubhelpers.h"
+#include "jitinterface.h"
+#include "dllimport.h"
+#include "fieldmarshaler.h"
+#include "comdelegate.h"
+#include "security.h"
+#include "eventtrace.h"
+#include "comdatetime.h"
+#include "gc.h"
+#include "interoputil.h"
+#include "gcscan.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+
+#ifdef FEATURE_COMINTEROP
+#include <oletls.h>
+#include "olecontexthelpers.h"
+#include "runtimecallablewrapper.h"
+#include "comcallablewrapper.h"
+#include "clrtocomcall.h"
+#include "cominterfacemarshaler.h"
+#include "winrttypenameconverter.h"
+#endif
+
+#ifdef VERIFY_HEAP
+
+CQuickArray<StubHelpers::ByrefValidationEntry> StubHelpers::s_ByrefValidationEntries;
+SIZE_T StubHelpers::s_ByrefValidationIndex = 0;
+CrstStatic StubHelpers::s_ByrefValidationLock;
+
+// static
+void StubHelpers::Init()
+{
+ WRAPPER_NO_CONTRACT;
+ s_ByrefValidationLock.Init(CrstPinnedByrefValidation);
+}
+
+// static
+void StubHelpers::ValidateObjectInternal(Object *pObjUNSAFE, BOOL fValidateNextObj)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+}
+ CONTRACTL_END;
+
+ _ASSERTE(CNameSpace::GetGcRuntimeStructuresValid());
+
+ // validate the object - there's no need to validate next object's
+ // header since we validate the next object explicitly below
+ pObjUNSAFE->Validate(/*bDeep=*/ TRUE, /*bVerifyNextHeader=*/ FALSE, /*bVerifySyncBlock=*/ TRUE);
+
+ // and the next object as required
+ if (fValidateNextObj)
+ {
+ Object *nextObj = GCHeap::GetGCHeap()->NextObj(pObjUNSAFE);
+ if (nextObj != NULL)
+ {
+ // Note that the MethodTable of the object (i.e. the pointer at offset 0) can change from
+ // g_pFreeObjectMethodTable to NULL, from NULL to <legal-value>, or possibly also from
+ // g_pFreeObjectMethodTable to <legal-value> concurrently while executing this function.
+ // Once <legal-value> is seen, we believe that the object should pass the Validate check.
+ // We have to be careful and read the pointer only once to avoid "phantom reads".
+ MethodTable *pMT = VolatileLoad(nextObj->GetMethodTablePtr());
+ if (pMT != NULL && pMT != g_pFreeObjectMethodTable)
+ {
+ // do *not* verify the next object's syncblock - the next object is not guaranteed to
+ // be "alive" so the finalizer thread may have already released its syncblock
+ nextObj->Validate(/*bDeep=*/ TRUE, /*bVerifyNextHeader=*/ FALSE, /*bVerifySyncBlock=*/ FALSE);
+ }
+ }
+ }
+}
+
+// static
+MethodDesc *StubHelpers::ResolveInteropMethod(Object *pThisUNSAFE, MethodDesc *pMD)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (pMD == NULL && pThisUNSAFE != NULL)
+ {
+ // if this is a call via delegate, get its Invoke method
+ MethodTable *pMT = pThisUNSAFE->GetMethodTable();
+
+ _ASSERTE(pMT->IsDelegate());
+ return ((DelegateEEClass *)pMT->GetClass())->m_pInvokeMethod;
+ }
+ return pMD;
+}
+
+// static
+void StubHelpers::FormatValidationMessage(MethodDesc *pMD, SString &ssErrorString)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ssErrorString.Append(W("Detected managed heap corruption, likely culprit is interop call through "));
+
+ if (pMD == NULL)
+ {
+ // the only case where we don't have interop MD is CALLI
+ ssErrorString.Append(W("CALLI."));
+ }
+ else
+ {
+ ssErrorString.Append(W("method '"));
+
+ StackSString ssClassName;
+ pMD->GetMethodTable()->_GetFullyQualifiedNameForClass(ssClassName);
+
+ ssErrorString.Append(ssClassName);
+ ssErrorString.Append(NAMESPACE_SEPARATOR_CHAR);
+ ssErrorString.AppendUTF8(pMD->GetName());
+
+ ssErrorString.Append(W("'."));
+ }
+}
+
+// static
+void StubHelpers::ProcessByrefValidationList()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ StackSString errorString;
+ ByrefValidationEntry entry = { NULL, NULL };
+
+ EX_TRY
+ {
+ AVInRuntimeImplOkayHolder AVOkay;
+
+ // Process all byref validation entries we have saved since the last GC. Note that EE is suspended at
+ // this point so we don't have to take locks and we can safely call code:GCHeap.GetContainingObject.
+ for (SIZE_T i = 0; i < s_ByrefValidationIndex; i++)
+ {
+ entry = s_ByrefValidationEntries[i];
+
+ Object *pObjUNSAFE = GCHeap::GetGCHeap()->GetGCHeap()->GetContainingObject(entry.pByref);
+ ValidateObjectInternal(pObjUNSAFE, TRUE);
+ }
+ }
+ EX_CATCH
+ {
+ FormatValidationMessage(entry.pMD, errorString);
+ EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, errorString.GetUnicode());
+ }
+ EX_END_CATCH_UNREACHABLE;
+
+ s_ByrefValidationIndex = 0;
+}
+
+#endif // VERIFY_HEAP
+
+FCIMPL1(double, StubHelpers::DateMarshaler__ConvertToNative, INT64 managedDate)
+{
+ FCALL_CONTRACT;
+
+ double retval = 0.0;
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+ retval = COMDateTime::TicksToDoubleDate(managedDate);
+ HELPER_METHOD_FRAME_END();
+ return retval;
+}
+FCIMPLEND
+
+FCIMPL1_V(INT64, StubHelpers::DateMarshaler__ConvertToManaged, double nativeDate)
+{
+ FCALL_CONTRACT;
+
+ INT64 retval = 0;
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+ retval = COMDateTime::DoubleDateToTicks(nativeDate);
+ HELPER_METHOD_FRAME_END();
+ return retval;
+}
+FCIMPLEND
+
+FCIMPL4(void, StubHelpers::ValueClassMarshaler__ConvertToNative, LPVOID pDest, LPVOID pSrc, MethodTable* pMT, OBJECTREF *ppCleanupWorkListOnStack)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+ FmtValueTypeUpdateNative(&pSrc, pMT, (BYTE*)pDest, ppCleanupWorkListOnStack);
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL3(void, StubHelpers::ValueClassMarshaler__ConvertToManaged, LPVOID pDest, LPVOID pSrc, MethodTable* pMT)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+ FmtValueTypeUpdateCLR(&pDest, pMT, (BYTE*)pSrc);
+ HELPER_METHOD_FRAME_END_POLL();
+}
+FCIMPLEND
+
+FCIMPL2(void, StubHelpers::ValueClassMarshaler__ClearNative, LPVOID pDest, MethodTable* pMT)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+ FmtClassDestroyNative(pDest, pMT);
+ HELPER_METHOD_FRAME_END_POLL();
+}
+FCIMPLEND
+
+#ifdef FEATURE_COMINTEROP
+
+FORCEINLINE static void GetCOMIPFromRCW_ClearFP()
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef _TARGET_X86_
+ // As per ASURT 146699 we need to clear FP state before calling to COM
+ // the following sequence was previously generated to compiled ML stubs
+ // and is faster than _clearfp().
+ __asm
+ {
+ fnstsw ax
+ and eax, 0x3F
+ jz NoNeedToClear
+ fnclex
+NoNeedToClear:
+ }
+#endif // _TARGET_X86_
+}
+
+FORCEINLINE static SOleTlsData *GetOrCreateOleTlsData()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ SOleTlsData *pOleTlsData;
+#ifdef _TARGET_X86_
+ // This saves 1 memory instruction over NtCurretTeb()->ReservedForOle because
+ // NtCurrentTeb() reads _TEB.NtTib.Self which is the same as what FS:0 already
+ // points to.
+ pOleTlsData = (SOleTlsData *)(ULONG_PTR)__readfsdword(offsetof(TEB, ReservedForOle));
+#else // _TARGET_X86_
+ pOleTlsData = (SOleTlsData *)NtCurrentTeb()->ReservedForOle;
+#endif // _TARGET_X86_
+ if (pOleTlsData == NULL)
+ {
+ pOleTlsData = (SOleTlsData *)SetupOleContext();
+ }
+ return pOleTlsData;
+}
+
+FORCEINLINE static void *GetCOMIPFromRCW_GetTargetNoInterception(IUnknown *pUnk, ComPlusCallInfo *pComInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef _TARGET_X86_
+ _ASSERTE(pComInfo->m_pInterceptStub == NULL || pComInfo->m_pInterceptStub == (LPVOID)-1);
+ _ASSERTE(!pComInfo->HasCopyCtorArgs());
+#endif // _TARGET_X86_
+ _ASSERTE(!NDirect::IsHostHookEnabled());
+
+ LPVOID *lpVtbl = *(LPVOID **)pUnk;
+ return lpVtbl[pComInfo->m_cachedComSlot];
+}
+
+FORCEINLINE static IUnknown *GetCOMIPFromRCW_GetIUnknownFromRCWCache(RCW *pRCW, MethodTable * pItfMT)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // The code in this helper is the "fast path" that used to be generated directly
+ // to compiled ML stubs. The idea is to aim for an efficient RCW cache hit.
+ SOleTlsData * pOleTlsData = GetOrCreateOleTlsData();
+
+ // test for free-threaded after testing for context match to optimize for apartment-bound objects
+ if (pOleTlsData->pCurrentCtx == pRCW->GetWrapperCtxCookie() || pRCW->IsFreeThreaded())
+ {
+ for (int i = 0; i < INTERFACE_ENTRY_CACHE_SIZE; i++)
+ {
+ if (pRCW->m_aInterfaceEntries[i].m_pMT == pItfMT)
+ {
+ return pRCW->m_aInterfaceEntries[i].m_pUnknown;
+ }
+ }
+ }
+
+ // also search the auxiliary cache if it's available
+ RCWAuxiliaryData *pAuxData = pRCW->m_pAuxiliaryData;
+ if (pAuxData != NULL)
+ {
+ LPVOID pCtxCookie = (pRCW->IsFreeThreaded() ? NULL : pOleTlsData->pCurrentCtx);
+ return pAuxData->FindInterfacePointer(pItfMT, pCtxCookie);
+ }
+
+ return NULL;
+}
+
+// Like GetCOMIPFromRCW_GetIUnknownFromRCWCache but also computes the target. This is a couple of instructions
+// faster than GetCOMIPFromRCW_GetIUnknownFromRCWCache + GetCOMIPFromRCW_GetTargetNoInterception.
+FORCEINLINE static IUnknown *GetCOMIPFromRCW_GetIUnknownFromRCWCache_NoInterception(RCW *pRCW, ComPlusCallInfo *pComInfo, void **ppTarget)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // The code in this helper is the "fast path" that used to be generated directly
+ // to compiled ML stubs. The idea is to aim for an efficient RCW cache hit.
+ SOleTlsData *pOleTlsData = GetOrCreateOleTlsData();
+ MethodTable *pItfMT = pComInfo->m_pInterfaceMT;
+
+ // test for free-threaded after testing for context match to optimize for apartment-bound objects
+ if (pOleTlsData->pCurrentCtx == pRCW->GetWrapperCtxCookie() || pRCW->IsFreeThreaded())
+ {
+ for (int i = 0; i < INTERFACE_ENTRY_CACHE_SIZE; i++)
+ {
+ if (pRCW->m_aInterfaceEntries[i].m_pMT == pItfMT)
+ {
+ IUnknown *pUnk = pRCW->m_aInterfaceEntries[i].m_pUnknown;
+ _ASSERTE(pUnk != NULL);
+ *ppTarget = GetCOMIPFromRCW_GetTargetNoInterception(pUnk, pComInfo);
+ return pUnk;
+ }
+ }
+ }
+
+ // also search the auxiliary cache if it's available
+ RCWAuxiliaryData *pAuxData = pRCW->m_pAuxiliaryData;
+ if (pAuxData != NULL)
+ {
+ LPVOID pCtxCookie = (pRCW->IsFreeThreaded() ? NULL : pOleTlsData->pCurrentCtx);
+
+ IUnknown *pUnk = pAuxData->FindInterfacePointer(pItfMT, pCtxCookie);
+ if (pUnk != NULL)
+ {
+ *ppTarget = GetCOMIPFromRCW_GetTargetNoInterception(pUnk, pComInfo);
+ return pUnk;
+ }
+ }
+
+ return NULL;
+}
+
+FORCEINLINE static void *GetCOMIPFromRCW_GetTarget(IUnknown *pUnk, ComPlusCallInfo *pComInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifndef FEATURE_CORECLR
+#ifdef _TARGET_X86_
+ // m_pInterceptStub is either NULL if we never called on this method, -1 if we're not
+ // hosted, or the host hook stub if we are hosted. The stub will extract the real target
+ // from the 'this' argument.
+ PVOID pInterceptStub = VolatileLoadWithoutBarrier(&pComInfo->m_pInterceptStub);
+
+ if (pInterceptStub != (LPVOID)-1)
+ {
+ if (pInterceptStub != NULL)
+ {
+ return pInterceptStub;
+ }
+
+ if (NDirect::IsHostHookEnabled() || pComInfo->HasCopyCtorArgs())
+ {
+ return NULL;
+ }
+
+ if (!EnsureWritablePagesNoThrow(&pComInfo->m_pInterceptStub, sizeof(pComInfo->m_pInterceptStub)))
+ {
+ return NULL;
+ }
+
+ pComInfo->m_pInterceptStub = (LPVOID)-1;
+ }
+#else // _TARGET_X86_
+ if (NDirect::IsHostHookEnabled())
+ {
+ // There's one static stub on !_TARGET_X86_.
+ return (LPVOID)GetEEFuncEntryPoint(PInvokeStubForHost);
+ }
+#endif // _TARGET_X86_
+#endif // FEATURE_CORECLR
+
+ LPVOID *lpVtbl = *(LPVOID **)pUnk;
+ return lpVtbl[pComInfo->m_cachedComSlot];
+}
+
+NOINLINE static IUnknown* GetCOMIPFromRCWHelper(LPVOID pFCall, OBJECTREF pSrc, MethodDesc* pMD, void **ppTarget)
+{
+ FC_INNER_PROLOG(pFCall);
+
+ IUnknown *pIntf = NULL;
+
+ // This is only called in IL stubs which are in CER, so we don't need to worry about ThreadAbort
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(Frame::FRAME_ATTR_NO_THREAD_ABORT|Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, pSrc);
+
+ SafeComHolder<IUnknown> pRetUnk;
+
+ ComPlusCallInfo *pComInfo = ComPlusCallInfo::FromMethodDesc(pMD);
+ pRetUnk = ComObject::GetComIPFromRCWThrowing(&pSrc, pComInfo->m_pInterfaceMT);
+
+ if (pFCall == StubHelpers::GetCOMIPFromRCW_WinRT ||
+ pFCall == StubHelpers::GetCOMIPFromRCW_WinRTSharedGeneric ||
+ pFCall == StubHelpers::GetCOMIPFromRCW_WinRTDelegate)
+ {
+ pRetUnk.Release();
+ }
+
+#ifndef FEATURE_CORECLR
+#ifdef _TARGET_X86_
+ GCX_PREEMP();
+ Stub *pInterceptStub = NULL;
+
+ if (pComInfo->m_pInterceptStub == NULL)
+ {
+ if (pComInfo->HasCopyCtorArgs())
+ {
+ // static stub that gets its arguments in a thread-static field
+ pInterceptStub = NDirect::GetStubForCopyCtor();
+ }
+
+ if (NDirect::IsHostHookEnabled())
+ {
+ pInterceptStub = pComInfo->GenerateStubForHost(
+ pMD->GetDomain()->GetLoaderAllocator()->GetStubHeap(),
+ pInterceptStub);
+ }
+
+ EnsureWritablePages(&pComInfo->m_pInterceptStub);
+
+ if (pInterceptStub != NULL)
+ {
+ if (InterlockedCompareExchangeT(&pComInfo->m_pInterceptStub,
+ (LPVOID)pInterceptStub->GetEntryPoint(),
+ NULL) != NULL)
+ {
+ pInterceptStub->DecRef();
+ }
+ }
+ else
+ {
+ pComInfo->m_pInterceptStub = (LPVOID)-1;
+ }
+ }
+#endif // _TARGET_X86_
+#endif // !FEATURE_CORECLR
+
+ *ppTarget = GetCOMIPFromRCW_GetTarget(pRetUnk, pComInfo);
+ _ASSERTE(*ppTarget != NULL);
+
+ GetCOMIPFromRCW_ClearFP();
+
+ pIntf = pRetUnk.Extract();
+
+ // No exception will be thrown here (including thread abort as it is delayed in IL stubs)
+ HELPER_METHOD_FRAME_END();
+
+ FC_INNER_EPILOG();
+ return pIntf;
+}
+
+//==================================================================================================================
+// The GetCOMIPFromRCW helper exists in four specialized versions to optimize CLR->COM perf. Please be careful when
+// changing this code as one of these methods is executed as part of every CLR->COM call so every instruction counts.
+//==================================================================================================================
+
+
+#include <optsmallperfcritical.h>
+
+// This helper can handle any CLR->COM call (classic COM, WinRT, WinRT delegate, WinRT generic), it supports hosting,
+// and clears FP state on x86 for compatibility with VB6.
+FCIMPL4(IUnknown*, StubHelpers::GetCOMIPFromRCW, Object* pSrcUNSAFE, MethodDesc* pMD, void **ppTarget, CLR_BOOL* pfNeedsRelease)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(pMD->IsComPlusCall() || pMD->IsGenericComPlusCall() || pMD->IsEEImpl());
+ }
+ CONTRACTL_END;
+
+ OBJECTREF pSrc = ObjectToOBJECTREF(pSrcUNSAFE);
+ *pfNeedsRelease = false;
+
+ ComPlusCallInfo *pComInfo = ComPlusCallInfo::FromMethodDesc(pMD);
+ RCW *pRCW = pSrc->PassiveGetSyncBlock()->GetInteropInfoNoCreate()->GetRawRCW();
+ if (pRCW != NULL)
+ {
+
+ IUnknown * pUnk = GetCOMIPFromRCW_GetIUnknownFromRCWCache(pRCW, pComInfo->m_pInterfaceMT);
+ if (pUnk != NULL)
+ {
+ *ppTarget = GetCOMIPFromRCW_GetTarget(pUnk, pComInfo);
+ if (*ppTarget != NULL)
+ {
+ GetCOMIPFromRCW_ClearFP();
+ return pUnk;
+ }
+ }
+ }
+
+ /* if we didn't find the COM interface pointer in the cache we will have to erect an HMF */
+ *pfNeedsRelease = true;
+ FC_INNER_RETURN(IUnknown*, GetCOMIPFromRCWHelper(StubHelpers::GetCOMIPFromRCW, pSrc, pMD, ppTarget));
+}
+FCIMPLEND
+
+// This helper can handle only non-generic WinRT calls, does not support hosting/interception, and does not clear FP state.
+FCIMPL3(IUnknown*, StubHelpers::GetCOMIPFromRCW_WinRT, Object* pSrcUNSAFE, MethodDesc* pMD, void** ppTarget)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(pMD->IsComPlusCall());
+ }
+ CONTRACTL_END;
+
+ OBJECTREF pSrc = ObjectToOBJECTREF(pSrcUNSAFE);
+
+ ComPlusCallInfo *pComInfo = ((ComPlusCallMethodDesc *)pMD)->m_pComPlusCallInfo;
+ RCW *pRCW = pSrc->PassiveGetSyncBlock()->GetInteropInfoNoCreate()->GetRawRCW();
+ if (pRCW != NULL)
+ {
+ IUnknown *pUnk = GetCOMIPFromRCW_GetIUnknownFromRCWCache_NoInterception(pRCW, pComInfo, ppTarget);
+ if (pUnk != NULL)
+ {
+ return pUnk;
+ }
+ }
+
+ /* if we didn't find the COM interface pointer in the cache we will have to erect an HMF */
+ FC_INNER_RETURN(IUnknown*, GetCOMIPFromRCWHelper(StubHelpers::GetCOMIPFromRCW_WinRT, pSrc, pMD, ppTarget));
+}
+FCIMPLEND
+
+// This helper can handle only generic WinRT calls, does not support hosting, and does not clear FP state.
+FCIMPL3(IUnknown*, StubHelpers::GetCOMIPFromRCW_WinRTSharedGeneric, Object* pSrcUNSAFE, MethodDesc* pMD, void** ppTarget)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(pMD->IsGenericComPlusCall());
+ }
+ CONTRACTL_END;
+
+ OBJECTREF pSrc = ObjectToOBJECTREF(pSrcUNSAFE);
+
+ ComPlusCallInfo *pComInfo = pMD->AsInstantiatedMethodDesc()->IMD_GetComPlusCallInfo();
+ RCW *pRCW = pSrc->PassiveGetSyncBlock()->GetInteropInfoNoCreate()->GetRawRCW();
+ if (pRCW != NULL)
+ {
+ IUnknown *pUnk = GetCOMIPFromRCW_GetIUnknownFromRCWCache_NoInterception(pRCW, pComInfo, ppTarget);
+ if (pUnk != NULL)
+ {
+ return pUnk;
+ }
+ }
+
+ /* if we didn't find the COM interface pointer in the cache we will have to erect an HMF */
+ FC_INNER_RETURN(IUnknown*, GetCOMIPFromRCWHelper(StubHelpers::GetCOMIPFromRCW_WinRTSharedGeneric, pSrc, pMD, ppTarget));
+}
+FCIMPLEND
+
+// This helper can handle only delegate WinRT calls, does not support hosting, and does not clear FP state.
+FCIMPL3(IUnknown*, StubHelpers::GetCOMIPFromRCW_WinRTDelegate, Object* pSrcUNSAFE, MethodDesc* pMD, void** ppTarget)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(pMD->IsEEImpl());
+ }
+ CONTRACTL_END;
+
+ OBJECTREF pSrc = ObjectToOBJECTREF(pSrcUNSAFE);
+
+ ComPlusCallInfo *pComInfo = ((DelegateEEClass *)pMD->GetClass())->m_pComPlusCallInfo;
+ RCW *pRCW = pSrc->PassiveGetSyncBlock()->GetInteropInfoNoCreate()->GetRawRCW();
+ if (pRCW != NULL)
+ {
+ IUnknown *pUnk = GetCOMIPFromRCW_GetIUnknownFromRCWCache_NoInterception(pRCW, pComInfo, ppTarget);
+ if (pUnk != NULL)
+ {
+ return pUnk;
+ }
+ }
+
+ /* if we didn't find the COM interface pointer in the cache we will have to erect an HMF */
+ FC_INNER_RETURN(IUnknown*, GetCOMIPFromRCWHelper(StubHelpers::GetCOMIPFromRCW_WinRTDelegate, pSrc, pMD, ppTarget));
+}
+FCIMPLEND
+
+#include <optdefault.h>
+
+
+NOINLINE static FC_BOOL_RET ShouldCallWinRTInterfaceHelper(RCW *pRCW, MethodTable *pItfMT)
+{
+ FC_INNER_PROLOG(StubHelpers::ShouldCallWinRTInterface);
+
+ bool result = false;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
+
+ // call the GC-triggering version
+ result = pRCW->SupportsWinRTInteropInterface(pItfMT);
+
+ HELPER_METHOD_FRAME_END();
+ FC_INNER_EPILOG();
+
+ FC_RETURN_BOOL(result);
+}
+
+FCIMPL2(FC_BOOL_RET, StubHelpers::ShouldCallWinRTInterface, Object *pSrcUNSAFE, MethodDesc *pMD)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF pSrc = ObjectToOBJECTREF(pSrcUNSAFE);
+
+ ComPlusCallInfo *pComInfo = ComPlusCallInfo::FromMethodDesc(pMD);
+ RCW *pRCW = pSrc->PassiveGetSyncBlock()->GetInteropInfoNoCreate()->GetRawRCW();
+ if (pRCW == NULL)
+ {
+ // Pretend that we are not redirected WinRT type
+ // We'll throw InvalidComObjectException later in GetComIPFromRCW
+ return false;
+ }
+
+ TypeHandle::CastResult result = pRCW->SupportsWinRTInteropInterfaceNoGC(pComInfo->m_pInterfaceMT);
+ switch (result)
+ {
+ case TypeHandle::CanCast: FC_RETURN_BOOL(true);
+ case TypeHandle::CannotCast: FC_RETURN_BOOL(false);
+ }
+
+ FC_INNER_RETURN(FC_BOOL_RET, ShouldCallWinRTInterfaceHelper(pRCW, pComInfo->m_pInterfaceMT));
+}
+FCIMPLEND
+
+NOINLINE static DelegateObject *GetTargetForAmbiguousVariantCallHelper(RCW *pRCW, MethodTable *pMT, BOOL fIsEnumerable, CLR_BOOL *pfUseString)
+{
+ FC_INNER_PROLOG(StubHelpers::GetTargetForAmbiguousVariantCall);
+
+ DelegateObject *pRetVal = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
+
+ // Note that if the call succeeds, it will set the right OBJECTHANDLE/flags on the RCW so we won't have to do this
+ // next time. If the call fails, we don't care because it is an error and an exception will be thrown later.
+ SafeComHolder<IUnknown> pUnk = pRCW->GetComIPFromRCW(pMT);
+
+ WinRTInterfaceRedirector::WinRTLegalStructureBaseType baseType = WinRTInterfaceRedirector::GetStructureBaseType(pMT->GetInstantiation());
+
+ BOOL fUseString = FALSE;
+ BOOL fUseT = FALSE;
+ pRetVal = (DelegateObject *)OBJECTREFToObject(pRCW->GetTargetForAmbiguousVariantCall(fIsEnumerable, baseType, &fUseString, &fUseT));
+
+ *pfUseString = !!fUseString;
+
+ HELPER_METHOD_FRAME_END();
+ FC_INNER_EPILOG();
+
+ return pRetVal;
+}
+
+// Performs a run-time check to see how an ambiguous variant call on an RCW should be handled. Returns a delegate which should
+// be called, or sets *pfUseString to true which means that the caller should use the <string> instantiation. If NULL is returned
+// and *pfUseString is false, the caller should attempt to handle the call as usual.
+FCIMPL3(DelegateObject*, StubHelpers::GetTargetForAmbiguousVariantCall, Object *pSrcUNSAFE, MethodTable *pMT, CLR_BOOL *pfUseString)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF pSrc = ObjectToOBJECTREF(pSrcUNSAFE);
+
+ RCW *pRCW = pSrc->PassiveGetSyncBlock()->GetInteropInfoNoCreate()->GetRawRCW();
+ if (pRCW == NULL)
+ {
+ // ignore this - the call we'll attempt to make later will throw the right exception
+ *pfUseString = false;
+ return NULL;
+ }
+
+ BOOL fIsEnumerable = pMT->HasSameTypeDefAs(MscorlibBinder::GetExistingClass(CLASS__IENUMERABLEGENERIC));
+ _ASSERTE(fIsEnumerable || pMT->HasSameTypeDefAs(MscorlibBinder::GetExistingClass(CLASS__IREADONLYLISTGENERIC)));
+
+ WinRTInterfaceRedirector::WinRTLegalStructureBaseType baseType = WinRTInterfaceRedirector::GetStructureBaseType(pMT->GetInstantiation());
+
+ BOOL fUseString = FALSE;
+ BOOL fUseT = FALSE;
+ DelegateObject *pRetVal = (DelegateObject *)OBJECTREFToObject(pRCW->GetTargetForAmbiguousVariantCall(fIsEnumerable, baseType, &fUseString, &fUseT));
+
+ if (pRetVal != NULL || fUseT || fUseString)
+ {
+ *pfUseString = !!fUseString;
+ return pRetVal;
+ }
+
+ // we haven't seen QI for the interface yet, trigger it now
+ FC_INNER_RETURN(DelegateObject*, GetTargetForAmbiguousVariantCallHelper(pRCW, pMT, fIsEnumerable, pfUseString));
+}
+FCIMPLEND
+
+FCIMPL2(void, StubHelpers::ObjectMarshaler__ConvertToNative, Object* pSrcUNSAFE, VARIANT* pDest)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF pSrc = ObjectToOBJECTREF(pSrcUNSAFE);
+
+ HELPER_METHOD_FRAME_BEGIN_1(pSrc);
+ if (pDest->vt & VT_BYREF)
+ {
+ OleVariant::MarshalOleRefVariantForObject(&pSrc, pDest);
+ }
+ else
+ {
+ OleVariant::MarshalOleVariantForObject(&pSrc, pDest);
+ }
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL1(Object*, StubHelpers::ObjectMarshaler__ConvertToManaged, VARIANT* pSrc)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF retVal = NULL;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(retVal);
+ // The IL stub is going to call ObjectMarshaler__ClearNative() afterwards.
+ // If it doesn't it's a bug in ILObjectMarshaler.
+ OleVariant::MarshalObjectForOleVariant(pSrc, &retVal);
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(retVal);
+}
+FCIMPLEND
+
+FCIMPL1(void, StubHelpers::ObjectMarshaler__ClearNative, VARIANT* pSrc)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+ SafeVariantClear(pSrc);
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+#include <optsmallperfcritical.h>
+FCIMPL4(IUnknown*, StubHelpers::InterfaceMarshaler__ConvertToNative, Object* pObjUNSAFE, MethodTable* pItfMT, MethodTable* pClsMT, DWORD dwFlags)
+{
+ FCALL_CONTRACT;
+
+ if (NULL == pObjUNSAFE)
+ {
+ return NULL;
+ }
+
+ IUnknown *pIntf = NULL;
+ OBJECTREF pObj = ObjectToOBJECTREF(pObjUNSAFE);
+
+ // This is only called in IL stubs which are in CER, so we don't need to worry about ThreadAbort
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(Frame::FRAME_ATTR_NO_THREAD_ABORT, pObj);
+
+ pIntf = MarshalObjectToInterface(&pObj, pItfMT, pClsMT, dwFlags);
+
+ // No exception will be thrown here (including thread abort as it is delayed in IL stubs)
+ HELPER_METHOD_FRAME_END();
+
+ return pIntf;
+}
+FCIMPLEND
+
+FCIMPL4(Object*, StubHelpers::InterfaceMarshaler__ConvertToManaged, IUnknown **ppUnk, MethodTable *pItfMT, MethodTable *pClsMT, DWORD dwFlags)
+{
+ FCALL_CONTRACT;
+
+ if (NULL == *ppUnk)
+ {
+ return NULL;
+ }
+
+ OBJECTREF pObj = NULL;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(pObj);
+
+ UnmarshalObjectFromInterface(&pObj, ppUnk, pItfMT, pClsMT, dwFlags);
+
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(pObj);
+}
+FCIMPLEND
+
+void QCALLTYPE StubHelpers::InterfaceMarshaler__ClearNative(IUnknown * pUnk)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL_SO_TOLERANT;
+
+ ULONG cbRef = SafeReleasePreemp(pUnk);
+ LogInteropRelease(pUnk, cbRef, "InterfaceMarshalerBase::ClearNative: In/Out release");
+
+ END_QCALL_SO_TOLERANT;
+}
+#include <optdefault.h>
+
+
+
+
+FCIMPL1(StringObject*, StubHelpers::UriMarshaler__GetRawUriFromNative, ABI::Windows::Foundation::IUriRuntimeClass* pIUriRC)
+{
+ FCALL_CONTRACT;
+
+ if (NULL == pIUriRC)
+ {
+ return NULL;
+ }
+
+ STRINGREF strRef = NULL;
+ UINT32 cchRawUri = 0;
+ LPCWSTR pwszRawUri = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(strRef);
+
+ WinRtString hsRawUriName;
+
+ {
+ GCX_PREEMP();
+
+ // Get the RawUri string from the WinRT URI object
+ {
+ LeaveRuntimeHolder lrh(**(size_t**)(IUnknown*)pIUriRC);
+ IfFailThrow(pIUriRC->get_RawUri(hsRawUriName.Address()));
+ }
+
+ pwszRawUri = hsRawUriName.GetRawBuffer(&cchRawUri);
+ }
+
+ strRef = StringObject::NewString(pwszRawUri, cchRawUri);
+
+ HELPER_METHOD_FRAME_END();
+
+ return STRINGREFToObject(strRef);
+}
+FCIMPLEND
+
+FCIMPL2(IUnknown*, StubHelpers::UriMarshaler__CreateNativeUriInstance, WCHAR* pRawUri, UINT strLen)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pRawUri));
+ }
+ CONTRACTL_END;
+
+ ABI::Windows::Foundation::IUriRuntimeClass* pIUriRC = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ GCX_PREEMP();
+ pIUriRC = CreateWinRTUri(pRawUri, strLen);
+
+ HELPER_METHOD_FRAME_END();
+
+ return pIUriRC;
+}
+FCIMPLEND
+
+ABI::Windows::UI::Xaml::Interop::INotifyCollectionChangedEventArgs* QCALLTYPE
+StubHelpers::EventArgsMarshaler__CreateNativeNCCEventArgsInstance
+(int action, ABI::Windows::UI::Xaml::Interop::IBindableVector *newItem, ABI::Windows::UI::Xaml::Interop::IBindableVector *oldItem, int newIndex, int oldIndex)
+{
+ QCALL_CONTRACT;
+
+ ABI::Windows::UI::Xaml::Interop::INotifyCollectionChangedEventArgs *pArgsRC = NULL;
+
+ BEGIN_QCALL;
+
+ EventArgsMarshalingInfo *marshalingInfo = GetAppDomain()->GetMarshalingData()->GetEventArgsMarshalingInfo();
+ ABI::Windows::UI::Xaml::Interop::INotifyCollectionChangedEventArgsFactory *pFactory = marshalingInfo->GetNCCEventArgsFactory();
+
+ SafeComHolderPreemp<IInspectable> pInner;
+ HRESULT hr;
+ {
+ LeaveRuntimeHolder lrh(**(size_t **)(IUnknown *)pFactory);
+ hr = pFactory->CreateInstanceWithAllParameters(
+ (ABI::Windows::UI::Xaml::Interop::NotifyCollectionChangedAction)action,
+ (ABI::Windows::UI::Xaml::Interop::IBindableVector *)newItem,
+ (ABI::Windows::UI::Xaml::Interop::IBindableVector *)oldItem,
+ newIndex,
+ oldIndex,
+ NULL,
+ &pInner,
+ &pArgsRC);
+ }
+ IfFailThrow(hr);
+
+ END_QCALL;
+
+ return pArgsRC;
+}
+
+ABI::Windows::UI::Xaml::Data::IPropertyChangedEventArgs* QCALLTYPE
+ StubHelpers::EventArgsMarshaler__CreateNativePCEventArgsInstance(HSTRING name)
+{
+ QCALL_CONTRACT;
+
+ ABI::Windows::UI::Xaml::Data::IPropertyChangedEventArgs *pArgsRC = NULL;
+
+ BEGIN_QCALL;
+
+ EventArgsMarshalingInfo *marshalingInfo = GetAppDomain()->GetMarshalingData()->GetEventArgsMarshalingInfo();
+ ABI::Windows::UI::Xaml::Data::IPropertyChangedEventArgsFactory *pFactory = marshalingInfo->GetPCEventArgsFactory();
+
+ SafeComHolderPreemp<IInspectable> pInner;
+ HRESULT hr;
+ {
+ LeaveRuntimeHolder lrh(**(size_t **)(IUnknown *)pFactory);
+ hr = pFactory->CreateInstance(
+ name,
+ NULL,
+ &pInner,
+ &pArgsRC);
+ }
+ IfFailThrow(hr);
+
+ END_QCALL;
+
+ return pArgsRC;
+}
+
+// A helper to convert an IP to object using special flags.
+FCIMPL1(Object *, StubHelpers::InterfaceMarshaler__ConvertToManagedWithoutUnboxing, IUnknown *pNative)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF oref = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(oref);
+
+ //
+ // Get a wrapper for pNative
+ // Note that we need to skip WinRT unboxing at this point because
+ // 1. We never know whether GetObjectRefFromComIP went through unboxing path.
+ // For example, user could just pass a IUnknown * to T and we'll happily convert that to T
+ // 2. If for some reason we end up getting something that does not implement IReference<T>,
+ // we'll get a nice message later when we do the cast in CLRIReferenceImpl.UnboxHelper
+ //
+ GetObjectRefFromComIP(
+ &oref,
+ pNative, // pUnk
+ g_pBaseCOMObject, // Use __ComObject
+ NULL, // pItfMT
+ ObjFromComIP::CLASS_IS_HINT | // No cast check - we'll do cast later
+ ObjFromComIP::UNIQUE_OBJECT | // Do not cache the object - To ensure that the unboxing code is called on this object always
+ // and the object is not retrieved from the cache as an __ComObject.
+ // Don't call GetRuntimeClassName - I just want a RCW of __ComObject
+ ObjFromComIP::IGNORE_WINRT_AND_SKIP_UNBOXING // Skip unboxing
+ );
+
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(oref);
+}
+FCIMPLEND
+
+FCIMPL2(StringObject *, StubHelpers::WinRTTypeNameConverter__ConvertToWinRTTypeName,
+ ReflectClassBaseObject *pTypeUNSAFE, CLR_BOOL *pbIsWinRTPrimitive)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pTypeUNSAFE));
+ PRECONDITION(CheckPointer(pbIsWinRTPrimitive));
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refClass = (REFLECTCLASSBASEREF) pTypeUNSAFE;
+ STRINGREF refString= NULL;
+ HELPER_METHOD_FRAME_BEGIN_RET_2(refClass, refString);
+
+ SString strWinRTTypeName;
+ bool bIsPrimitive;
+ if (WinRTTypeNameConverter::AppendWinRTTypeNameForManagedType(
+ refClass->GetType(), // thManagedType
+ strWinRTTypeName, // strWinRTTypeName to append
+ FALSE, // for type conversion, not for GetRuntimeClassName
+ &bIsPrimitive
+ ))
+ {
+ *pbIsWinRTPrimitive = bIsPrimitive;
+ refString = AllocateString(strWinRTTypeName);
+ }
+ else
+ {
+ *pbIsWinRTPrimitive = FALSE;
+ refString = NULL;
+ }
+
+ HELPER_METHOD_FRAME_END();
+
+ return STRINGREFToObject(refString);
+}
+FCIMPLEND
+
+FCIMPL2(ReflectClassBaseObject *, StubHelpers::WinRTTypeNameConverter__GetTypeFromWinRTTypeName, StringObject *pWinRTTypeNameUNSAFE, CLR_BOOL *pbIsPrimitive)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pWinRTTypeNameUNSAFE));
+ }
+ CONTRACTL_END;
+
+ OBJECTREF refClass = NULL;
+ STRINGREF refString = ObjectToSTRINGREF(pWinRTTypeNameUNSAFE);
+ HELPER_METHOD_FRAME_BEGIN_RET_2(refClass, refString);
+
+ bool isPrimitive;
+ TypeHandle th = WinRTTypeNameConverter::GetManagedTypeFromWinRTTypeName(refString->GetBuffer(), &isPrimitive);
+ *pbIsPrimitive = isPrimitive;
+
+ refClass = th.GetManagedClassObject();
+
+ HELPER_METHOD_FRAME_END();
+
+ return (ReflectClassBaseObject *)OBJECTREFToObject(refClass);
+}
+FCIMPLEND
+
+FCIMPL1(MethodDesc*, StubHelpers::GetDelegateInvokeMethod, DelegateObject *pThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ MethodDesc *pMD = NULL;
+
+ OBJECTREF pThis = ObjectToOBJECTREF(pThisUNSAFE);
+ HELPER_METHOD_FRAME_BEGIN_RET_1(pThis);
+
+ MethodTable *pDelMT = pThis->GetMethodTable();
+
+ pMD = COMDelegate::FindDelegateInvokeMethod(pDelMT);
+ if (pMD->IsSharedByGenericInstantiations())
+ {
+ // we need the exact MethodDesc
+ pMD = InstantiatedMethodDesc::FindOrCreateExactClassMethod(pDelMT, pMD);
+ }
+
+ HELPER_METHOD_FRAME_END();
+
+ _ASSERTE(pMD);
+ return pMD;
+}
+FCIMPLEND
+
+// Called from COM-to-CLR factory method stubs to get the return value (the delegating interface pointer
+// corresponding to the default WinRT interface of the class which we are constructing).
+FCIMPL2(IInspectable *, StubHelpers::GetWinRTFactoryReturnValue, Object *pThisUNSAFE, PCODE pCtorEntry)
+{
+ FCALL_CONTRACT;
+
+ IInspectable *pInsp = NULL;
+
+ OBJECTREF pThis = ObjectToOBJECTREF(pThisUNSAFE);
+ HELPER_METHOD_FRAME_BEGIN_RET_1(pThis);
+
+ // COM-to-CLR stubs use the target method entry point as their stub context
+ MethodDesc *pCtorMD = Entry2MethodDesc(pCtorEntry, NULL);
+ MethodTable *pClassMT = pCtorMD->GetMethodTable();
+
+ // make sure that we talk to the right CCW
+ ComCallWrapperTemplate *pTemplate = ComCallWrapperTemplate::GetTemplate(TypeHandle(pClassMT));
+ CCWHolder pWrap = ComCallWrapper::InlineGetWrapper(&pThis, pTemplate);
+
+ MethodTable *pDefaultItf = pClassMT->GetDefaultWinRTInterface();
+ const IID &riid = (pDefaultItf == NULL ? IID_IInspectable : IID_NULL);
+
+ pInsp = static_cast<IInspectable *>(ComCallWrapper::GetComIPFromCCW(pWrap, riid, pDefaultItf,
+ GetComIPFromCCW::CheckVisibility));
+
+ HELPER_METHOD_FRAME_END();
+
+ return pInsp;
+}
+FCIMPLEND
+
+// Called from CLR-to-COM factory method stubs to get the outer IInspectable to pass
+// to the underlying factory object.
+FCIMPL2(IInspectable *, StubHelpers::GetOuterInspectable, Object *pThisUNSAFE, MethodDesc *pCtorMD)
+{
+ FCALL_CONTRACT;
+
+ IInspectable *pInsp = NULL;
+
+ OBJECTREF pThis = ObjectToOBJECTREF(pThisUNSAFE);
+
+ if (pThis->GetTrueMethodTable() != pCtorMD->GetMethodTable())
+ {
+ // this is a composition scenario
+ HELPER_METHOD_FRAME_BEGIN_RET_1(pThis);
+
+ // we don't have the "outer" yet, marshal the object
+ pInsp = static_cast<IInspectable *>
+ (MarshalObjectToInterface(&pThis, NULL, NULL, ItfMarshalInfo::ITF_MARSHAL_INSP_ITF | ItfMarshalInfo::ITF_MARSHAL_USE_BASIC_ITF));
+
+ HELPER_METHOD_FRAME_END();
+ }
+
+ return pInsp;
+}
+FCIMPLEND
+
+#ifdef MDA_SUPPORTED
+FCIMPL2(ExceptionObject*, StubHelpers::TriggerExceptionSwallowedMDA, ExceptionObject* pExceptionUNSAFE, PCODE pManagedTarget)
+{
+ FCALL_CONTRACT;
+ OBJECTREF pException = ObjectToOBJECTREF(pExceptionUNSAFE);
+ HELPER_METHOD_FRAME_BEGIN_RET_1(pException);
+
+ // COM-to-CLR stubs use the target method entry point as their stub context
+ MethodDesc * pMD = Entry2MethodDesc(pManagedTarget, NULL);
+
+ MDA_TRIGGER_ASSISTANT(ExceptionSwallowedOnCallFromCom, ReportViolation(pMD, &pException));
+
+ HELPER_METHOD_FRAME_END();
+ return (ExceptionObject*)OBJECTREFToObject(pException);
+}
+FCIMPLEND
+#endif // MDA_SUPPORTED
+
+#endif // FEATURE_COMINTEROP
+
+FCIMPL0(void, StubHelpers::SetLastError)
+{
+ // Make sure this is the first thing we do after returning from the target, as almost everything can cause the last error to get trashed
+ DWORD lastError = ::GetLastError();
+
+ FCALL_CONTRACT;
+
+ GetThread()->m_dwLastError = lastError;
+}
+FCIMPLEND
+
+FCIMPL1(FC_BOOL_RET, StubHelpers::IsQCall, NDirectMethodDesc* pNMD)
+{
+ FCALL_CONTRACT;
+ FC_RETURN_BOOL(pNMD->IsQCall());
+}
+FCIMPLEND
+
+NOINLINE static void InitDeclaringTypeHelper(MethodTable *pMT)
+{
+ FC_INNER_PROLOG(StubHelpers::InitDeclaringType);
+
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
+ pMT->CheckRunClassInitThrowing();
+ HELPER_METHOD_FRAME_END();
+
+ FC_INNER_EPILOG();
+}
+
+// Triggers cctor of pNMD's declarer, similar to code:JIT_InitClass.
+#include <optsmallperfcritical.h>
+FCIMPL1(void, StubHelpers::InitDeclaringType, NDirectMethodDesc* pNMD)
+{
+ FCALL_CONTRACT;
+
+ MethodTable *pMT = pNMD->GetMethodTable();
+ _ASSERTE(!pMT->IsClassPreInited());
+
+ if (pMT->GetDomainLocalModule()->IsClassInitialized(pMT))
+ return;
+
+ FC_INNER_RETURN_VOID(InitDeclaringTypeHelper(pMT));
+}
+FCIMPLEND
+#include <optdefault.h>
+
+FCIMPL1(void*, StubHelpers::GetNDirectTarget, NDirectMethodDesc* pNMD)
+{
+ FCALL_CONTRACT;
+
+ FCUnique(0xa2);
+ return pNMD->GetNDirectTarget();
+}
+FCIMPLEND
+
+FCIMPL2(void*, StubHelpers::GetDelegateTarget, DelegateObject *pThisUNSAFE, UINT_PTR *ppStubArg)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(CheckPointer(pThisUNSAFE));
+ }
+ CONTRACTL_END;
+
+ DELEGATEREF orefThis = (DELEGATEREF)ObjectToOBJECTREF(pThisUNSAFE);
+
+ // On x86 we wrap the call with a thunk that handles host notifications.
+ PCODE pInterceptStubEntryPoint = NULL;
+
+#if defined(_TARGET_X86_)
+ SyncBlock *pSyncBlock = orefThis->PassiveGetSyncBlock();
+ if (pSyncBlock != NULL)
+ {
+ InteropSyncBlockInfo *pInteropInfo = pSyncBlock->GetInteropInfoNoCreate();
+ if (pInteropInfo != NULL)
+ {
+ // we return entry point to a stub that wraps the real target
+ Stub *pInterceptStub = pInteropInfo->GetInterceptStub();
+ if (pInterceptStub != NULL)
+ {
+ pInterceptStubEntryPoint = pInterceptStub->GetEntryPoint();
+ }
+ }
+ }
+#endif // _TARGET_X86_
+
+#if defined(_WIN64)
+ UINT_PTR target = (UINT_PTR)orefThis->GetMethodPtrAux();
+
+ // The lowest bit is used to distinguish between MD and target on 64-bit.
+#ifdef _TARGET_AMD64_
+ target = (target << 1) | 1;
+#endif // _TARGET_AMD64_
+
+ // On 64-bit we pass the real target to the stub-for-host through this out argument,
+ // see IL code gen in NDirectStubLinker::DoNDirect for details.
+ *ppStubArg = target;
+
+ if (NDirect::IsHostHookEnabled())
+ {
+ // There's one static stub on !_TARGET_X86_.
+ return (LPVOID)GetEEFuncEntryPoint(PInvokeStubForHost);
+ }
+#elif defined(_TARGET_ARM_)
+ // @ARMTODO: Nothing to do for ARM yet since we don't support the hosted path.
+#endif // _WIN64, _TARGET_ARM_
+
+ if (pInterceptStubEntryPoint != NULL)
+ {
+ return (LPVOID)pInterceptStubEntryPoint;
+ }
+ return (LPVOID)orefThis->GetMethodPtrAux();
+}
+FCIMPLEND
+
+#ifndef FEATURE_CORECLR // CAS
+static void DoDeclarativeActionsForPInvoke(MethodDesc* pCurrent)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ MethodSecurityDescriptor MDSecDesc(pCurrent);
+ MethodSecurityDescriptor::LookupOrCreateMethodSecurityDescriptor(&MDSecDesc);
+
+ DeclActionInfo* pRuntimeDeclActionInfo = MDSecDesc.GetRuntimeDeclActionInfo();
+ if (pRuntimeDeclActionInfo != NULL)
+ {
+ // Tell the debugger not to start on any managed code that we call in this method
+ FrameWithCookie<DebuggerSecurityCodeMarkFrame> __dbgSecFrame;
+
+ Security::DoDeclarativeActions(pCurrent, pRuntimeDeclActionInfo, NULL, &MDSecDesc);
+
+ // Pop the debugger frame
+ __dbgSecFrame.Pop();
+ }
+}
+#endif // FEATURE_CORECLR
+
+#ifndef FEATURE_CORECLR
+#ifndef _WIN64
+FCIMPL3(void*, StubHelpers::GetFinalStubTarget, LPVOID pStubArg, LPVOID pUnmngThis, DWORD dwFlags)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(SF_IsForwardStub(dwFlags));
+ }
+ CONTRACTL_END;
+
+ if (SF_IsCALLIStub(dwFlags))
+ {
+ // stub argument is the target
+ return pStubArg;
+ }
+ else if (SF_IsDelegateStub(dwFlags))
+ {
+ // stub argument is not used but we pass _methodPtrAux which is the target
+ return pStubArg;
+ }
+ else if (SF_IsCOMStub(dwFlags))
+ {
+ // stub argument is a ComPlusCallMethodDesc
+ ComPlusCallMethodDesc *pCMD = (ComPlusCallMethodDesc *)pStubArg;
+ LPVOID *lpVtbl = *(LPVOID **)pUnmngThis;
+ return lpVtbl[pCMD->m_pComPlusCallInfo->m_cachedComSlot];
+ }
+ else // P/Invoke
+ {
+ // secret stub argument is an NDirectMethodDesc
+ NDirectMethodDesc *pNMD = (NDirectMethodDesc *)pStubArg;
+ return pNMD->GetNativeNDirectTarget();
+ }
+}
+FCIMPLEND
+#endif // !_WIN64
+
+FCIMPL1(void, StubHelpers::DemandPermission, NDirectMethodDesc *pNMD)
+{
+ FCALL_CONTRACT;
+
+ // ETWOnStartup (SecurityCatchCall, SecurityCatchCallEnd); // this is messing up HMF below
+
+ if (pNMD != NULL)
+ {
+ g_IBCLogger.LogMethodDescAccess(pNMD);
+
+ if (pNMD->IsInterceptedForDeclSecurity())
+ {
+ if (pNMD->IsInterceptedForDeclSecurityCASDemandsOnly() &&
+ SecurityStackWalk::HasFlagsOrFullyTrusted(1 << SECURITY_UNMANAGED_CODE))
+ {
+ // Track perfmon counters. Runtime security checks.
+ Security::IncrementSecurityPerfCounter();
+ }
+ else
+ {
+ HELPER_METHOD_FRAME_BEGIN_0();
+ DoDeclarativeActionsForPInvoke(pNMD);
+ HELPER_METHOD_FRAME_END();
+ }
+ }
+ }
+ else
+ {
+ // This is either CLR->COM or delegate P/Invoke (we don't call this helper for CALLI).
+ HELPER_METHOD_FRAME_BEGIN_0();
+ SecurityStackWalk::SpecialDemand(SSWT_DECLARATIVE_DEMAND, SECURITY_UNMANAGED_CODE);
+ HELPER_METHOD_FRAME_END();
+ }
+}
+FCIMPLEND
+#endif // !FEATURE_CORECLR
+
+FCIMPL2(void, StubHelpers::ThrowInteropParamException, UINT resID, UINT paramIdx)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+ ::ThrowInteropParamException(resID, paramIdx);
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+#ifdef FEATURE_COMINTEROP
+FCIMPL1(void, StubHelpers::StubRegisterRCW, Object *unsafe_pThis)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF oref = ObjectToOBJECTREF(unsafe_pThis);
+ HELPER_METHOD_FRAME_BEGIN_1(oref);
+
+#if defined(_DEBUG) && defined(FEATURE_MDA)
+ // Make sure that we only get here because the MDA is turned on.
+ MdaRaceOnRCWCleanup* mda = MDA_GET_ASSISTANT(RaceOnRCWCleanup);
+ _ASSERTE(mda != NULL);
+#endif // _DEBUG
+
+ // RegisterRCW may throw OOM in which case we need to decrement the refcount on the RCW
+ class RCWDecrementUseCountHolder
+ {
+ public:
+ RCW *m_pRCW;
+
+ RCWDecrementUseCountHolder(RCW *pRCW)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pRCW = pRCW;
+ }
+
+ ~RCWDecrementUseCountHolder()
+ {
+ WRAPPER_NO_CONTRACT;
+ if (m_pRCW != NULL)
+ {
+ m_pRCW->DecrementUseCount();
+ }
+ }
+ };
+
+ RCWDecrementUseCountHolder holder(oref->GetSyncBlock()->GetInteropInfoNoCreate()->GetRCWAndIncrementUseCount());
+ if (holder.m_pRCW == NULL)
+ {
+ COMPlusThrow(kInvalidComObjectException, IDS_EE_COM_OBJECT_NO_LONGER_HAS_WRAPPER);
+ }
+
+ GET_THREAD()->RegisterRCW(holder.m_pRCW);
+
+ // if we made it here, suppress the DecrementUseCount call
+ holder.m_pRCW = NULL;
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL1(void, StubHelpers::StubUnregisterRCW, Object *unsafe_pThis)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF oref = ObjectToOBJECTREF(unsafe_pThis);
+ HELPER_METHOD_FRAME_BEGIN_1(oref);
+
+#if defined(_DEBUG) && defined(FEATURE_MDA)
+ // Make sure that we only get here because the MDA is turned on.
+ MdaRaceOnRCWCleanup* mda = MDA_GET_ASSISTANT(RaceOnRCWCleanup);
+ _ASSERTE(mda != NULL);
+#endif // _DEBUG
+
+ RCW *pRCW = GET_THREAD()->UnregisterRCW(INDEBUG(oref->GetSyncBlock()));
+
+ if (pRCW != NULL)
+ {
+ // Thread::RegisterRCW incremented the use count, decrement it now
+ pRCW->DecrementUseCount();
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+class COMInterfaceMarshalerCallback : public ICOMInterfaceMarshalerCallback
+{
+public :
+ COMInterfaceMarshalerCallback(Thread *pThread, LPVOID pCtxCookie)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pThread != NULL);
+ _ASSERTE(pCtxCookie != NULL);
+
+ m_bIsFreeThreaded = false;
+ m_pThread = pThread;
+ m_pCtxCookie = pCtxCookie;
+
+ m_bIsDCOMProxy = false;
+ }
+
+ virtual void OnRCWCreated(RCW *pRCW)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(pRCW != NULL);
+
+ if (pRCW->IsFreeThreaded())
+ m_bIsFreeThreaded = true;
+
+ if (pRCW->IsDCOMProxy())
+ m_bIsDCOMProxy = true;
+ }
+
+ // Return true if ComInterfaceMarshaler should use this RCW
+ // Return false if ComInterfaceMarshaler should just skip this RCW and proceed
+ // to create a duplicate one instead
+ virtual bool ShouldUseThisRCW(RCW *pRCW)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(pRCW->SupportsIInspectable());
+
+ // Is this a free threaded RCW or a context-bound RCW created in the same context
+ if (pRCW->IsFreeThreaded() ||
+ pRCW->GetWrapperCtxCookie() == m_pCtxCookie)
+ {
+ return true;
+ }
+ else
+ {
+ //
+ // Now we get back a WinRT factory RCW created in a different context. This means the
+ // factory is a singleton, and the returned IActivationFactory could be either one of
+ // the following:
+ // 1) A raw pointer, and it acts like a free threaded object
+ // 2) A proxy that is used across different contexts. It might maintain a list of contexts
+ // that it is marshaled to, and will fail to be called if it is not marshaled to this
+ // context yet.
+ //
+ // In this case, it is unsafe to use this RCW in this context and we should proceed
+ // to create a duplicated one instead. It might make sense to have a context-sensitive
+ // RCW cache but I don't think this case will be common enough to justify it
+ //
+ return false;
+ }
+ }
+
+ virtual void OnRCWCacheHit(RCW *pRCW)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (pRCW->IsFreeThreaded())
+ m_bIsFreeThreaded = true;
+
+ if (pRCW->IsDCOMProxy())
+ m_bIsDCOMProxy = true;
+ }
+
+ bool IsFreeThreaded()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_bIsFreeThreaded;
+ }
+
+ bool IsDCOMProxy()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_bIsDCOMProxy;
+ }
+
+private :
+ Thread *m_pThread; // Current thread
+ LPVOID m_pCtxCookie; // Current context cookie
+ bool m_bIsFreeThreaded; // Whether we got back the RCW from a different context
+ bool m_bIsDCOMProxy; // Is this a proxy to an object in a different process
+};
+
+//
+// Retrieve cached WinRT factory RCW or create a new one, according to the MethodDesc of the .ctor
+//
+FCIMPL1(Object*, StubHelpers::GetWinRTFactoryObject, MethodDesc *pCMD)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF refFactory = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refFactory);
+
+ MethodTable *pMTOfTypeToCreate = pCMD->GetMethodTable();
+ AppDomain *pDomain = GetAppDomain();
+
+ //
+ // Look up cached WinRT factory according to type to create + current context cookie
+ // For each type in AppDomain, we cache only the last WinRT factory object
+ // We don't cache factory per context in order to avoid explosion of objects if there are
+ // multiple STA apartments
+ //
+ // Note that if cached WinRT factory is FTM, we'll get it back regardless of the supplied cookie
+ //
+ LPVOID lpCtxCookie = GetCurrentCtxCookie();
+ refFactory = pDomain->LookupWinRTFactoryObject(pMTOfTypeToCreate, lpCtxCookie);
+ if (refFactory == NULL)
+ {
+ //
+ // Didn't find a cached factory that matches the context
+ // Time to create a new factory and wrap it in a RCW
+ //
+
+ //
+ // Creates a callback to checks for singleton WinRT factory during RCW creation
+ //
+ // If we get back an existing RCW from a different context, this callback
+ // will make the RCW a context-agile (but not free-threaded) RCW. Being context-agile
+ // in this case means RCW will not make any context transition. As long as we are only
+ // calling this RCW from where we got it back (using IInspectable* as identity), we should
+ // be fine (as we are supposed to call that pointer directly anyway)
+ //
+ // See code:COMInterfaceMarshalerCallback for more details
+ //
+ COMInterfaceMarshalerCallback callback(GET_THREAD(), lpCtxCookie);
+
+ //
+ // Get the activation factory instance for this WinRT type and create a RCW for it
+ //
+ GetNativeWinRTFactoryObject(
+ pMTOfTypeToCreate,
+ GET_THREAD(),
+ ComPlusCallInfo::FromMethodDesc(pCMD)->m_pInterfaceMT, // Factory interface
+ FALSE, // Don't need a unique RCW
+ // it is only needed in WindowsRuntimeMarshal.GetActivationFactory API
+ &callback,
+ &refFactory);
+
+ //
+ // If this is free-threaded factory RCW, set lpCtxCookie = NULL, which means
+ // this RCW can be used anywhere
+ // Otherwise, we can only use this RCW from current thread
+ //
+ if (callback.IsFreeThreaded())
+ lpCtxCookie = NULL;
+
+ // Cache the result in the AD-wide cache, unless this is a proxy to a DCOM object on Apollo. In the
+ // Apollo app model, out of process WinRT servers can have lifetimes independent of the application,
+ // and the cache may wind up with stale pointers if we save proxies to OOP factories. In addition,
+ // their app model is such that OOP WinRT objects cannot rely on having static state as they can be
+ // forcibly terminated at any point. Therefore, not caching an OOP WinRT factory object in Apollo
+ // does not lead to correctness issues.
+ //
+ // This is not the same on the desktop, where OOP objects may contain static state, and therefore
+ // we need to keep them alive.
+#ifdef FEATURE_WINDOWSPHONE
+ if (!callback.IsDCOMProxy())
+#endif // FEATURE_WINDOWSPHONE
+ {
+ pDomain->CacheWinRTFactoryObject(pMTOfTypeToCreate, &refFactory, lpCtxCookie);
+ }
+ }
+
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(refFactory);
+}
+FCIMPLEND
+
+
+#endif
+
+#ifdef MDA_SUPPORTED
+NOINLINE static void CheckCollectedDelegateMDAHelper(UMEntryThunk *pEntryThunk)
+{
+ FC_INNER_PROLOG(StubHelpers::CheckCollectedDelegateMDA);
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
+
+ CallbackOnCollectedDelegateHelper(pEntryThunk);
+
+ HELPER_METHOD_FRAME_END();
+ FC_INNER_EPILOG();
+}
+
+FCIMPL1(void, StubHelpers::CheckCollectedDelegateMDA, LPVOID pEntryThunk)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(pEntryThunk != NULL);
+ }
+ CONTRACTL_END;
+
+ if (MDA_GET_ASSISTANT(CallbackOnCollectedDelegate) == NULL)
+ return;
+
+ // keep this FCall as fast as possible for the "MDA is off" case
+ FC_INNER_RETURN_VOID(CheckCollectedDelegateMDAHelper((UMEntryThunk *)pEntryThunk));
+}
+FCIMPLEND
+#endif // MDA_SUPPORTED
+
+#ifdef PROFILING_SUPPORTED
+FCIMPL3(SIZE_T, StubHelpers::ProfilerBeginTransitionCallback, SIZE_T pSecretParam, Thread* pThread, Object* unsafe_pThis)
+{
+ FCALL_CONTRACT;
+
+ // We can get here with an ngen image generated with "/prof",
+ // even if the profiler doesn't want to track transitions.
+ if (!CORProfilerTrackTransitions())
+ {
+ return NULL;
+ }
+
+ MethodDesc* pRealMD = NULL;
+
+ BEGIN_PRESERVE_LAST_ERROR;
+
+ // We must transition to preemptive GC mode before calling out to the profiler,
+ // and the transition requires us to set up a HMF.
+ DELEGATEREF dref = (DELEGATEREF)ObjectToOBJECTREF(unsafe_pThis);
+ HELPER_METHOD_FRAME_BEGIN_RET_1(dref);
+
+ bool fReverseInterop = false;
+
+ if (NULL == pThread)
+ {
+ // This is our signal for the reverse interop cases.
+ fReverseInterop = true;
+ pThread = GET_THREAD();
+ // the secret param in this casee is the UMEntryThunk
+ pRealMD = ((UMEntryThunk*)pSecretParam)->GetMethod();
+ }
+ else
+ if (pSecretParam == 0)
+ {
+ // Secret param is null. This is the calli pinvoke case or the unmanaged delegate case.
+ // We have an unmanaged target address but no MD. For the unmanaged delegate case, we can
+ // still retrieve the MD by looking at the "this" object.
+
+ if (dref == NULL)
+ {
+ // calli pinvoke case
+ pRealMD = NULL;
+ }
+ else
+ {
+ // unmanaged delegate case
+ MethodTable* pMT = dref->GetMethodTable();
+ _ASSERTE(pMT->IsDelegate());
+
+ EEClass * pClass = pMT->GetClass();
+ pRealMD = ((DelegateEEClass*)pClass)->m_pInvokeMethod;
+ _ASSERTE(pRealMD);
+ }
+ }
+ else
+ {
+ // This is either the COM interop or the pinvoke case.
+ pRealMD = (MethodDesc*)pSecretParam;
+ }
+
+ {
+ GCX_PREEMP_THREAD_EXISTS(pThread);
+
+ if (fReverseInterop)
+ {
+ ProfilerUnmanagedToManagedTransitionMD(pRealMD, COR_PRF_TRANSITION_CALL);
+ }
+ else
+ {
+ ProfilerManagedToUnmanagedTransitionMD(pRealMD, COR_PRF_TRANSITION_CALL);
+ }
+ }
+
+ HELPER_METHOD_FRAME_END();
+
+ END_PRESERVE_LAST_ERROR;
+
+ return (SIZE_T)pRealMD;
+}
+FCIMPLEND
+
+FCIMPL2(void, StubHelpers::ProfilerEndTransitionCallback, MethodDesc* pRealMD, Thread* pThread)
+{
+ FCALL_CONTRACT;
+
+ // We can get here with an ngen image generated with "/prof",
+ // even if the profiler doesn't want to track transitions.
+ if (!CORProfilerTrackTransitions())
+ {
+ return;
+ }
+
+ BEGIN_PRESERVE_LAST_ERROR;
+
+ // We must transition to preemptive GC mode before calling out to the profiler,
+ // and the transition requires us to set up a HMF.
+ HELPER_METHOD_FRAME_BEGIN_0();
+ {
+ bool fReverseInterop = false;
+
+ if (NULL == pThread)
+ {
+ // if pThread is null, we are doing reverse interop
+ pThread = GET_THREAD();
+ fReverseInterop = true;
+ }
+
+ GCX_PREEMP_THREAD_EXISTS(pThread);
+
+ if (fReverseInterop)
+ {
+ ProfilerManagedToUnmanagedTransitionMD(pRealMD, COR_PRF_TRANSITION_RETURN);
+ }
+ else
+ {
+ ProfilerUnmanagedToManagedTransitionMD(pRealMD, COR_PRF_TRANSITION_RETURN);
+ }
+ }
+ HELPER_METHOD_FRAME_END();
+
+ END_PRESERVE_LAST_ERROR;
+}
+FCIMPLEND
+#endif // PROFILING_SUPPORTED
+
+FCIMPL1(Object*, StubHelpers::GetHRExceptionObject, HRESULT hr)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF oThrowable = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_1(oThrowable);
+ {
+ // GetExceptionForHR uses equivalant logic as COMPlusThrowHR
+ GetExceptionForHR(hr, &oThrowable);
+ }
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(oThrowable);
+}
+FCIMPLEND
+
+#ifdef FEATURE_COMINTEROP
+FCIMPL4(Object*, StubHelpers::GetCOMHRExceptionObject, HRESULT hr, MethodDesc *pMD, Object *unsafe_pThis, CLR_BOOL fForWinRT)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF oThrowable = NULL;
+
+ // get 'this'
+ OBJECTREF oref = ObjectToOBJECTREF(unsafe_pThis);
+
+ HELPER_METHOD_FRAME_BEGIN_RET_2(oref, oThrowable);
+ {
+ IErrorInfo *pErrInfo = NULL;
+
+ IRestrictedErrorInfo *pResErrorInfo = NULL;
+ BOOL bHasNonCLRLanguageErrorObject = FALSE;
+
+ if (fForWinRT)
+ {
+ SafeGetRestrictedErrorInfo(&pResErrorInfo);
+ if (pResErrorInfo != NULL)
+ {
+ // If we have a restricted error Info lets try and find the corresponding errorInfo,
+ // bHasNonCLRLanguageErrorObject can be TRUE|FALSE depending on whether we have an associtated LanguageExceptionObject
+ // and whether it is CLR exceptionObject => bHasNonCLRLanguageErrorObject = FALSE;
+ // or whether it is a non-CLRExceptionObject => bHasNonCLRLanguageErrorObject = TRUE;
+ pErrInfo = GetCorrepondingErrorInfo_WinRT(hr, pResErrorInfo, &bHasNonCLRLanguageErrorObject);
+ }
+ }
+ if (pErrInfo == NULL && pMD != NULL)
+ {
+ // Retrieve the interface method table.
+ MethodTable *pItfMT = ComPlusCallInfo::FromMethodDesc(pMD)->m_pInterfaceMT;
+
+ // Get IUnknown pointer for this interface on this object
+ IUnknown* pUnk = ComObject::GetComIPFromRCW(&oref, pItfMT);
+ if (pUnk != NULL)
+ {
+ // Check to see if the component supports error information for this interface.
+ IID ItfIID;
+ pItfMT->GetGuid(&ItfIID, TRUE);
+ pErrInfo = GetSupportedErrorInfo(pUnk, ItfIID, !fForWinRT);
+
+ DWORD cbRef = SafeRelease(pUnk);
+ LogInteropRelease(pUnk, cbRef, "IUnk to QI for ISupportsErrorInfo");
+ }
+ }
+
+ GetExceptionForHR(hr, pErrInfo, !fForWinRT, &oThrowable, pResErrorInfo, bHasNonCLRLanguageErrorObject);
+ }
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(oThrowable);
+}
+FCIMPLEND
+#endif // FEATURE_COMINTEROP
+
+FCIMPL3(void, StubHelpers::FmtClassUpdateNativeInternal, Object* pObjUNSAFE, BYTE* pbNative, OBJECTREF *ppCleanupWorkListOnStack)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF pObj = ObjectToOBJECTREF(pObjUNSAFE);
+ HELPER_METHOD_FRAME_BEGIN_1(pObj);
+
+ FmtClassUpdateNative(&pObj, pbNative, ppCleanupWorkListOnStack);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL2(void, StubHelpers::FmtClassUpdateCLRInternal, Object* pObjUNSAFE, BYTE* pbNative)
+{
+ FCALL_CONTRACT;
+
+ OBJECTREF pObj = ObjectToOBJECTREF(pObjUNSAFE);
+ HELPER_METHOD_FRAME_BEGIN_1(pObj);
+
+ FmtClassUpdateCLR(&pObj, pbNative);
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL2(void, StubHelpers::LayoutDestroyNativeInternal, BYTE* pbNative, MethodTable* pMT)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+ LayoutDestroyNative(pbNative, pMT);
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL1(Object*, StubHelpers::AllocateInternal, EnregisteredTypeHandle pRegisteredTypeHnd)
+{
+ FCALL_CONTRACT;
+
+ TypeHandle typeHnd = TypeHandle::FromPtr(pRegisteredTypeHnd);
+ OBJECTREF objRet = NULL;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(objRet);
+
+ MethodTable* pMT = typeHnd.GetMethodTable();
+ objRet = pMT->Allocate();
+
+ HELPER_METHOD_FRAME_END();
+
+ return OBJECTREFToObject(objRet);
+}
+FCIMPLEND
+
+FCIMPL1(void, StubHelpers::DecimalCanonicalizeInternal, DECIMAL *pDec)
+{
+ FCALL_CONTRACT;
+
+ if (FAILED(DecimalCanonicalize(pDec)))
+ {
+ FCThrowResVoid(kOverflowException, W("Overflow_Currency"));
+ }
+}
+FCIMPLEND
+
+FCIMPL1(int, StubHelpers::AnsiStrlen, __in_z char* pszStr)
+{
+ FCALL_CONTRACT;
+
+ size_t len = strlen(pszStr);
+
+ // the length should have been checked earlier (see StubHelpers.CheckStringLength)
+ _ASSERTE(FitsInI4(len));
+
+ return (int)len;
+}
+FCIMPLEND
+
+FCIMPL3(void, StubHelpers::MarshalToUnmanagedVaListInternal, va_list va, DWORD cbVaListSize, const VARARGS* pArgIterator)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+ VARARGS::MarshalToUnmanagedVaList(va, cbVaListSize, pArgIterator);
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL2(void, StubHelpers::MarshalToManagedVaListInternal, va_list va, VARARGS* pArgIterator)
+{
+ FCALL_CONTRACT;
+
+ VARARGS::MarshalToManagedVaList(va, pArgIterator);
+}
+FCIMPLEND
+
+FCIMPL3(void, StubHelpers::ValidateObject, Object *pObjUNSAFE, MethodDesc *pMD, Object *pThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+#ifdef VERIFY_HEAP
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ StackSString errorString;
+ EX_TRY
+ {
+ AVInRuntimeImplOkayHolder AVOkay;
+ // don't validate the next object if a BGC is in progress. we can race with background
+ // sweep which could make the next object a Free object underneath us if it's dead.
+ ValidateObjectInternal(pObjUNSAFE, !(GCHeap::GetGCHeap()->IsConcurrentGCInProgress()));
+ }
+ EX_CATCH
+ {
+ FormatValidationMessage(ResolveInteropMethod(pThisUNSAFE, pMD), errorString);
+ EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, errorString.GetUnicode());
+ }
+ EX_END_CATCH_UNREACHABLE;
+
+ HELPER_METHOD_FRAME_END();
+#else // VERIFY_HEAP
+ FCUnique(0xa3);
+ UNREACHABLE_MSG("No validation support without VERIFY_HEAP");
+#endif // VERIFY_HEAP
+}
+FCIMPLEND
+
+FCIMPL3(void, StubHelpers::ValidateByref, void *pByref, MethodDesc *pMD, Object *pThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+#ifdef VERIFY_HEAP
+ // We cannot validate byrefs at this point as code:GCHeap.GetContainingObject could potentially race
+ // with allocations on other threads. We'll just remember this byref along with the interop MD and
+ // perform the validation on next GC (see code:StubHelpers.ProcessByrefValidationList).
+
+ // Skip byref if is not pointing inside managed heap
+ if (!GCHeap::GetGCHeap()->IsHeapPointer(pByref))
+ {
+ return;
+ }
+ ByrefValidationEntry entry;
+ entry.pByref = pByref;
+ entry.pMD = ResolveInteropMethod(pThisUNSAFE, pMD);
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+
+ SIZE_T NumOfEntries = 0;
+ {
+ CrstHolder ch(&s_ByrefValidationLock);
+
+ if (s_ByrefValidationIndex >= s_ByrefValidationEntries.Size())
+ {
+ // The validation list grows as necessary, for simplicity we never shrink it.
+ SIZE_T newSize;
+ if (!ClrSafeInt<SIZE_T>::multiply(s_ByrefValidationIndex, 2, newSize) ||
+ !ClrSafeInt<SIZE_T>::addition(newSize, 1, newSize))
+ {
+ ThrowHR(COR_E_OVERFLOW);
+ }
+
+ s_ByrefValidationEntries.ReSizeThrows(newSize);
+ _ASSERTE(s_ByrefValidationIndex < s_ByrefValidationEntries.Size());
+ }
+
+ s_ByrefValidationEntries[s_ByrefValidationIndex] = entry;
+ NumOfEntries = ++s_ByrefValidationIndex;
+ }
+
+ if (NumOfEntries > BYREF_VALIDATION_LIST_MAX_SIZE)
+ {
+ // if the list is too big, trigger GC now
+ GCHeap::GetGCHeap()->GarbageCollect(0);
+ }
+
+ HELPER_METHOD_FRAME_END();
+#else // VERIFY_HEAP
+ FCUnique(0xa4);
+ UNREACHABLE_MSG("No validation support without VERIFY_HEAP");
+#endif // VERIFY_HEAP
+}
+FCIMPLEND
+
+FCIMPL0(void*, StubHelpers::GetStubContext)
+{
+ FCALL_CONTRACT;
+
+ FCUnique(0xa0);
+ UNREACHABLE_MSG_RET("This is a JIT intrinsic!");
+}
+FCIMPLEND
+
+FCIMPL2(void, StubHelpers::LogPinnedArgument, MethodDesc *target, Object *pinnedArg)
+{
+ FCALL_CONTRACT;
+
+ SIZE_T managedSize = 0;
+
+ if (pinnedArg != NULL)
+ {
+ // Can pass null objects to interop, only check the size if the object is valid.
+ managedSize = pinnedArg->GetSize();
+ }
+
+ if (target != NULL)
+ {
+ STRESS_LOG3(LF_STUBS, LL_INFO100, "Managed object %#X with size '%#X' pinned for interop to Method [%pM]\n", pinnedArg, managedSize, target);
+ }
+ else
+ {
+ STRESS_LOG2(LF_STUBS, LL_INFO100, "Managed object %#X pinned for interop with size '%#X'", pinnedArg, managedSize);
+ }
+}
+FCIMPLEND
+
+#ifdef _WIN64
+FCIMPL0(void*, StubHelpers::GetStubContextAddr)
+{
+ FCALL_CONTRACT;
+
+ FCUnique(0xa1);
+ UNREACHABLE_MSG("This is a JIT intrinsic!");
+}
+FCIMPLEND
+#endif // _WIN64
+
+#ifdef MDA_SUPPORTED
+FCIMPL0(void, StubHelpers::TriggerGCForMDA)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_0();
+ TriggerGCForMDAInternal();
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+#endif // MDA_SUPPORTED
+
+FCIMPL1(DWORD, StubHelpers::CalcVaListSize, VARARGS *varargs)
+{
+ FCALL_CONTRACT;
+
+ return VARARGS::CalcVaListSize(varargs);
+}
+FCIMPLEND
+
+#ifdef FEATURE_ARRAYSTUB_AS_IL
+NOINLINE static void ArrayTypeCheckSlow(Object* element, PtrArray* arr)
+{
+ FC_INNER_PROLOG(StubHelpers::ArrayTypeCheck);
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
+
+ if (!ObjIsInstanceOf(element, arr->GetArrayElementTypeHandle()))
+ COMPlusThrow(kArrayTypeMismatchException);
+
+ HELPER_METHOD_FRAME_END();
+
+ FC_INNER_EPILOG();
+}
+
+FCIMPL2(void, StubHelpers::ArrayTypeCheck, Object* element, PtrArray* arr)
+{
+ FCALL_CONTRACT;
+
+ if (ObjIsInstanceOfNoGC(element, arr->GetArrayElementTypeHandle()) == TypeHandle::CanCast)
+ return;
+
+ FC_INNER_RETURN_VOID(ArrayTypeCheckSlow(element, arr));
+}
+FCIMPLEND
+#endif // FEATURE_ARRAYSTUB_AS_IL
+
+#ifdef FEATURE_STUBS_AS_IL
+FCIMPL2(void, StubHelpers::MulticastDebuggerTraceHelper, Object* element, INT32 count)
+{
+ FCALL_CONTRACT;
+}
+FCIMPLEND
+#endif // FEATURE_STUBS_AS_IL
diff --git a/src/vm/stubhelpers.h b/src/vm/stubhelpers.h
new file mode 100644
index 0000000000..16a82f99d5
--- /dev/null
+++ b/src/vm/stubhelpers.h
@@ -0,0 +1,167 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: stubhelpers.h
+//
+
+//
+
+
+#ifndef __STUBHELPERS_h__
+#define __STUBHELPERS_h__
+
+#include "fcall.h"
+#include "clrvarargs.h"
+
+// Maximum number of deferred byref validation entries - we will trigger gen0 GC if we reach this number.
+#define BYREF_VALIDATION_LIST_MAX_SIZE (512 * 1024)
+
+class StubHelpers
+{
+#ifdef VERIFY_HEAP
+ struct ByrefValidationEntry
+ {
+ void *pByref; // pointer to GC heap
+ MethodDesc *pMD; // interop MD this byref was passed to
+ };
+
+ static CQuickArray<ByrefValidationEntry> s_ByrefValidationEntries;
+ static SIZE_T s_ByrefValidationIndex;
+ static CrstStatic s_ByrefValidationLock;
+
+ static void ValidateObjectInternal(Object *pObjUNSAFE, BOOL fValidateNextObj);
+ static MethodDesc *ResolveInteropMethod(Object *pThisUNSAFE, MethodDesc *pMD);
+ static void FormatValidationMessage(MethodDesc *pMD, SString &ssErrorString);
+
+public:
+ static void Init();
+ static void ProcessByrefValidationList();
+#else // VERIFY_HEAP
+public:
+ static void Init() { LIMITED_METHOD_CONTRACT; }
+#endif // VERIFY_HEAP
+
+ //-------------------------------------------------------
+ // PInvoke stub helpers
+ //-------------------------------------------------------
+
+ static FCDECL1(double, DateMarshaler__ConvertToNative, INT64 managedDate);
+ static FCDECL1_V(INT64, DateMarshaler__ConvertToManaged, double nativeDate);
+
+ static FCDECL4(void, ValueClassMarshaler__ConvertToNative, LPVOID pDest, LPVOID pSrc, MethodTable* pMT, OBJECTREF *ppCleanupWorkListOnStack);
+ static FCDECL3(void, ValueClassMarshaler__ConvertToManaged, LPVOID pDest, LPVOID pSrc, MethodTable* pMT);
+ static FCDECL2(void, ValueClassMarshaler__ClearNative, LPVOID pDest, MethodTable* pMT);
+
+#ifdef FEATURE_COMINTEROP
+ static FCDECL4(IUnknown*, GetCOMIPFromRCW, Object* pSrcUNSAFE, MethodDesc* pMD, void **ppTarget, CLR_BOOL* pfNeedsRelease);
+ static FCDECL3(IUnknown*, GetCOMIPFromRCW_WinRT, Object* pSrcUNSAFE, MethodDesc* pMD, void **ppTarget);
+ static FCDECL3(IUnknown*, GetCOMIPFromRCW_WinRTSharedGeneric, Object* pSrcUNSAFE, MethodDesc* pMD, void **ppTarget);
+ static FCDECL3(IUnknown*, GetCOMIPFromRCW_WinRTDelegate, Object* pSrcUNSAFE, MethodDesc* pMD, void **ppTarget);
+
+ static FCDECL2(FC_BOOL_RET, ShouldCallWinRTInterface, Object* pSrcUNSAFE, MethodDesc* pMD);
+ static FCDECL3(DelegateObject*, GetTargetForAmbiguousVariantCall, Object* pSrcUNSAFE, MethodTable* pMT, CLR_BOOL *pfUseString);
+
+ static FCDECL2(void, ObjectMarshaler__ConvertToNative, Object* pSrcUNSAFE, VARIANT* pDest);
+ static FCDECL1(Object*, ObjectMarshaler__ConvertToManaged, VARIANT* pSrc);
+ static FCDECL1(void, ObjectMarshaler__ClearNative, VARIANT* pSrc);
+
+ static FCDECL4(IUnknown*, InterfaceMarshaler__ConvertToNative, Object* pObjUNSAFE, MethodTable* pItfMT, MethodTable* pClsMT, DWORD dwFlags);
+ static FCDECL4(Object*, InterfaceMarshaler__ConvertToManaged, IUnknown **ppUnk, MethodTable *pItfMT, MethodTable *pClsMT, DWORD dwFlags);
+ static void QCALLTYPE InterfaceMarshaler__ClearNative(IUnknown * pUnk);
+ static FCDECL1(Object *, InterfaceMarshaler__ConvertToManagedWithoutUnboxing, IUnknown *pNative);
+
+ static FCDECL1(StringObject*, UriMarshaler__GetRawUriFromNative, ABI::Windows::Foundation::IUriRuntimeClass* pIUriRC);
+ static FCDECL2(IUnknown*, UriMarshaler__CreateNativeUriInstance, CLR_CHAR* pRawUriObj, UINT strLen);
+
+ static ABI::Windows::UI::Xaml::Interop::INotifyCollectionChangedEventArgs* QCALLTYPE
+ EventArgsMarshaler__CreateNativeNCCEventArgsInstance
+ (int action, ABI::Windows::UI::Xaml::Interop::IBindableVector *newItem, ABI::Windows::UI::Xaml::Interop::IBindableVector *oldItem, int newIndex, int oldIndex);
+
+ static ABI::Windows::UI::Xaml::Data::IPropertyChangedEventArgs* QCALLTYPE EventArgsMarshaler__CreateNativePCEventArgsInstance(HSTRING name);
+
+ static FCDECL1(MethodDesc *, GetDelegateInvokeMethod, DelegateObject *pThisUNSAFE);
+ static FCDECL2(IInspectable *, GetWinRTFactoryReturnValue, Object *pThisUNSAFE, PCODE pCtorEntry);
+ static FCDECL2(IInspectable *, GetOuterInspectable, Object *pThisUNSAFE, MethodDesc *pCtorMD);
+#ifdef MDA_SUPPORTED
+ static FCDECL2(ExceptionObject*,TriggerExceptionSwallowedMDA, ExceptionObject* pExceptionUNSAFE, PCODE pManagedTarget);
+#endif // MDA_SUPPORTED
+#endif // FEATURE_COMINTEROP
+
+ static FCDECL0(void, SetLastError );
+ static FCDECL1(FC_BOOL_RET, IsQCall, NDirectMethodDesc* pNMD);
+ static FCDECL1(void, InitDeclaringType, NDirectMethodDesc* pMND);
+ static FCDECL1(void*, GetNDirectTarget, NDirectMethodDesc* pNMD);
+ static FCDECL2(void*, GetDelegateTarget, DelegateObject *pThisUNSAFE, UINT_PTR *ppStubArg);
+
+#ifndef FEATURE_CORECLR
+#ifndef _WIN64
+ static FCDECL3(void*, GetFinalStubTarget, LPVOID pStubArg, LPVOID pUnmngThis, DWORD dwFlags);
+#endif // !_WIN64
+ static FCDECL1(void, DemandPermission, NDirectMethodDesc *pNMD);
+#endif // !FEATURE_CORECLR
+
+ static FCDECL2(void, ThrowInteropParamException, UINT resID, UINT paramIdx);
+ static FCDECL1(Object*, GetHRExceptionObject, HRESULT hr);
+
+#ifdef FEATURE_COMINTEROP
+ static FCDECL4(Object*, GetCOMHRExceptionObject, HRESULT hr, MethodDesc *pMD, Object *unsafe_pThis, CLR_BOOL fForWinRT);
+#endif // FEATURE_COMINTEROP
+
+ static FCDECL3(void*, CreateCustomMarshalerHelper, MethodDesc* pMD, mdToken paramToken, TypeHandle hndManagedType);
+
+ static FCDECL1(void, DecimalCanonicalizeInternal, DECIMAL *pDec);
+ static FCDECL3(void, FmtClassUpdateNativeInternal, Object* pObjUNSAFE, BYTE* pbNative, OBJECTREF *ppCleanupWorkListOnStack);
+ static FCDECL2(void, FmtClassUpdateCLRInternal, Object* pObjUNSAFE, BYTE* pbNative);
+ static FCDECL2(void, LayoutDestroyNativeInternal, BYTE* pbNative, MethodTable* pMT);
+ static FCDECL1(Object*, AllocateInternal, EnregisteredTypeHandle typeHnd);
+ static FCDECL1(int, AnsiStrlen, __in_z char* pszStr);
+ static FCDECL3(void, MarshalToUnmanagedVaListInternal, va_list va, DWORD cbVaListSize, const VARARGS* pArgIterator);
+ static FCDECL2(void, MarshalToManagedVaListInternal, va_list va, VARARGS* pArgIterator);
+ static FCDECL0(void*, GetStubContext);
+ static FCDECL2(void, LogPinnedArgument, MethodDesc *localDesc, Object *nativeArg);
+#ifdef _WIN64
+ static FCDECL0(void*, GetStubContextAddr);
+#endif // _WIN64
+ static FCDECL1(DWORD, CalcVaListSize, VARARGS *varargs);
+ static FCDECL3(void, ValidateObject, Object *pObjUNSAFE, MethodDesc *pMD, Object *pThisUNSAFE);
+ static FCDECL3(void, ValidateByref, void *pByref, MethodDesc *pMD, Object *pThisUNSAFE);
+
+#ifdef FEATURE_COMINTEROP
+ //-------------------------------------------------------
+ // Helper for the MDA RaceOnRCWCleanup
+ //-------------------------------------------------------
+ static FCDECL1(void, StubRegisterRCW, Object *unsafe_pThis);
+ static FCDECL1(void, StubUnregisterRCW, Object *unsafe_pThis);
+ static FCDECL1(Object*, GetWinRTFactoryObject, MethodDesc *pCMD);
+#endif // FEATURE_COMINTEROP
+
+#ifdef MDA_SUPPORTED
+ static FCDECL0(void, TriggerGCForMDA);
+ static FCDECL1(void, CheckCollectedDelegateMDA, LPVOID pEntryThunk);
+#endif // MDA_SUPPORTED
+
+#ifdef PROFILING_SUPPORTED
+ //-------------------------------------------------------
+ // Profiler helper
+ //-------------------------------------------------------
+ static FCDECL3(SIZE_T, ProfilerBeginTransitionCallback, SIZE_T pSecretParam, Thread* pThread, Object* unsafe_pThis);
+ static FCDECL2(void, ProfilerEndTransitionCallback, MethodDesc* pRealMD, Thread* pThread);
+#endif
+
+#ifdef FEATURE_ARRAYSTUB_AS_IL
+ static FCDECL2(void, ArrayTypeCheck, Object*, PtrArray*);
+#endif
+
+#ifdef FEATURE_COMINTEROP
+ static FCDECL2(StringObject *, WinRTTypeNameConverter__ConvertToWinRTTypeName, ReflectClassBaseObject *pTypeUNSAFE, CLR_BOOL *pbIsWinRTPrimitive);
+ static FCDECL2(ReflectClassBaseObject *, WinRTTypeNameConverter__GetTypeFromWinRTTypeName, StringObject *pWinRTTypeNameUNSAFE, CLR_BOOL *pbIsPrimitive);
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_STUBS_AS_IL
+ static FCDECL2(void, MulticastDebuggerTraceHelper, Object*, INT32);
+#endif
+};
+
+#endif // __STUBHELPERS_h__
diff --git a/src/vm/stublink.cpp b/src/vm/stublink.cpp
new file mode 100644
index 0000000000..678a57669b
--- /dev/null
+++ b/src/vm/stublink.cpp
@@ -0,0 +1,2598 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// stublink.cpp
+//
+
+
+
+#include "common.h"
+
+#include "threads.h"
+#include "excep.h"
+#include "stublink.h"
+#include "perfcounters.h"
+#include "stubgen.h"
+#include "stublink.inl"
+
+#include "rtlfunctions.h"
+
+#define S_BYTEPTR(x) S_SIZE_T((SIZE_T)(x))
+
+#ifndef DACCESS_COMPILE
+
+
+//************************************************************************
+// CodeElement
+//
+// There are two types of CodeElements: CodeRuns (a stream of uninterpreted
+// code bytes) and LabelRefs (an instruction containing
+// a fixup.)
+//************************************************************************
+struct CodeElement
+{
+ enum CodeElementType {
+ kCodeRun = 0,
+ kLabelRef = 1,
+ };
+
+
+ CodeElementType m_type; // kCodeRun or kLabelRef
+ CodeElement *m_next; // ptr to next CodeElement
+
+ // Used as workspace during Link(): holds the offset relative to
+ // the start of the final stub.
+ UINT m_globaloffset;
+ UINT m_dataoffset;
+};
+
+
+//************************************************************************
+// CodeRun: A run of uninterrupted code bytes.
+//************************************************************************
+
+#ifdef _DEBUG
+#define CODERUNSIZE 3
+#else
+#define CODERUNSIZE 32
+#endif
+
+struct CodeRun : public CodeElement
+{
+ UINT m_numcodebytes; // how many bytes are actually used
+ BYTE m_codebytes[CODERUNSIZE];
+};
+
+//************************************************************************
+// LabelRef: An instruction containing an embedded label reference
+//************************************************************************
+struct LabelRef : public CodeElement
+{
+ // provides platform-specific information about the instruction
+ InstructionFormat *m_pInstructionFormat;
+
+ // a variation code (interpretation is specific to the InstructionFormat)
+ // typically used to customize an instruction (e.g. with a condition
+ // code.)
+ UINT m_variationCode;
+
+
+ CodeLabel *m_target;
+
+ // Workspace during the link phase
+ UINT m_refsize;
+
+
+ // Pointer to next LabelRef
+ LabelRef *m_nextLabelRef;
+};
+
+
+//************************************************************************
+// IntermediateUnwindInfo
+//************************************************************************
+
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+
+
+#ifdef _TARGET_AMD64_
+// List of unwind operations, queued in StubLinker::m_pUnwindInfoList.
+struct IntermediateUnwindInfo
+{
+ IntermediateUnwindInfo *pNext;
+ CodeRun *pCodeRun;
+ UINT LocalOffset;
+ UNWIND_CODE rgUnwindCode[1]; // variable length, depends on first entry's UnwindOp
+};
+#endif // _TARGET_AMD64_
+
+
+StubUnwindInfoHeapSegment *g_StubHeapSegments;
+CrstStatic g_StubUnwindInfoHeapSegmentsCrst;
+#ifdef _DEBUG // for unit test
+void *__DEBUG__g_StubHeapSegments = &g_StubHeapSegments;
+#endif
+
+
+//
+// Callback registered via RtlInstallFunctionTableCallback. Called by
+// RtlpLookupDynamicFunctionEntry to locate RUNTIME_FUNCTION entry for a PC
+// found within a portion of a heap that contains stub code.
+//
+T_RUNTIME_FUNCTION*
+FindStubFunctionEntry (
+ WIN64_ONLY(IN ULONG64 ControlPc)
+ NOT_WIN64(IN ULONG ControlPc),
+ IN PVOID Context
+ )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END
+
+ CONSISTENCY_CHECK(DYNFNTABLE_STUB == IdentifyDynamicFunctionTableTypeFromContext(Context));
+
+ StubUnwindInfoHeapSegment *pStubHeapSegment = (StubUnwindInfoHeapSegment*)DecodeDynamicFunctionTableContext(Context);
+
+ //
+ // The RUNTIME_FUNCTION entry contains ULONG offsets relative to the
+ // segment base. Stub::EmitUnwindInfo ensures that this cast is valid.
+ //
+ ULONG RelativeAddress = (ULONG)((BYTE*)ControlPc - pStubHeapSegment->pbBaseAddress);
+
+ LOG((LF_STUBS, LL_INFO100000, "ControlPc %p, RelativeAddress 0x%x, pStubHeapSegment %p, pStubHeapSegment->pbBaseAddress %p\n",
+ ControlPc,
+ RelativeAddress,
+ pStubHeapSegment,
+ pStubHeapSegment->pbBaseAddress));
+
+ //
+ // Search this segment's list of stubs for an entry that includes the
+ // segment-relative offset.
+ //
+ for (StubUnwindInfoHeader *pHeader = pStubHeapSegment->pUnwindHeaderList;
+ pHeader;
+ pHeader = pHeader->pNext)
+ {
+ // The entry points are in increasing address order.
+ if (RelativeAddress >= RUNTIME_FUNCTION__BeginAddress(&pHeader->FunctionEntry))
+ {
+ T_RUNTIME_FUNCTION *pCurFunction = &pHeader->FunctionEntry;
+ T_RUNTIME_FUNCTION *pPrevFunction = NULL;
+
+ LOG((LF_STUBS, LL_INFO100000, "pCurFunction %p, pCurFunction->BeginAddress 0x%x, pCurFunction->EndAddress 0x%x\n",
+ pCurFunction,
+ RUNTIME_FUNCTION__BeginAddress(pCurFunction),
+ RUNTIME_FUNCTION__EndAddress(pCurFunction, (TADDR)pStubHeapSegment->pbBaseAddress)));
+
+ CONSISTENCY_CHECK(RUNTIME_FUNCTION__EndAddress(pCurFunction, (TADDR)pStubHeapSegment->pbBaseAddress) > RUNTIME_FUNCTION__BeginAddress(pCurFunction));
+ CONSISTENCY_CHECK(!pPrevFunction || RUNTIME_FUNCTION__EndAddress(pPrevFunction, (TADDR)pStubHeapSegment->pbBaseAddress) <= RUNTIME_FUNCTION__BeginAddress(pCurFunction));
+
+ // The entry points are in increasing address order. They're
+ // also contiguous, so after we're sure it's after the start of
+ // the first function (checked above), we only need to test
+ // the end address.
+ if (RelativeAddress < RUNTIME_FUNCTION__EndAddress(pCurFunction, (TADDR)pStubHeapSegment->pbBaseAddress))
+ {
+ CONSISTENCY_CHECK(RelativeAddress >= RUNTIME_FUNCTION__BeginAddress(pCurFunction));
+
+ return pCurFunction;
+ }
+ }
+ }
+
+ //
+ // Return NULL to indicate that there is no RUNTIME_FUNCTION/unwind
+ // information for this offset.
+ //
+ return NULL;
+}
+
+
+void UnregisterUnwindInfoInLoaderHeapCallback (PVOID pvAllocationBase, SIZE_T cbReserved)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ //
+ // There may be multiple StubUnwindInfoHeapSegment's associated with a region.
+ //
+
+ LOG((LF_STUBS, LL_INFO1000, "Looking for stub unwind info for LoaderHeap segment %p size %p\n", pvAllocationBase, cbReserved));
+
+ CrstHolder crst(&g_StubUnwindInfoHeapSegmentsCrst);
+
+ StubUnwindInfoHeapSegment *pStubHeapSegment;
+ for (StubUnwindInfoHeapSegment **ppPrevStubHeapSegment = &g_StubHeapSegments;
+ pStubHeapSegment = *ppPrevStubHeapSegment; )
+ {
+ LOG((LF_STUBS, LL_INFO10000, " have unwind info for address %p size %p\n", pStubHeapSegment->pbBaseAddress, pStubHeapSegment->cbSegment));
+
+ // If heap region ends before stub segment
+ if ((BYTE*)pvAllocationBase + cbReserved <= pStubHeapSegment->pbBaseAddress)
+ {
+ // The list is ordered, so address range is between segments
+ break;
+ }
+
+ // The given heap segment base address may fall within a prereserved
+ // region that was given to the heap when the heap was constructed, so
+ // pvAllocationBase may be > pbBaseAddress. Also, there could be
+ // multiple segments for each heap region, so pvAllocationBase may be
+ // < pbBaseAddress. So...there is no meaningful relationship between
+ // pvAllocationBase and pbBaseAddress.
+
+ // If heap region starts before end of stub segment
+ if ((BYTE*)pvAllocationBase < pStubHeapSegment->pbBaseAddress + pStubHeapSegment->cbSegment)
+ {
+ _ASSERTE((BYTE*)pvAllocationBase + cbReserved <= pStubHeapSegment->pbBaseAddress + pStubHeapSegment->cbSegment);
+
+ DeleteEEFunctionTable(pStubHeapSegment);
+#ifdef _TARGET_AMD64_
+ if (pStubHeapSegment->pUnwindInfoTable != 0)
+ delete pStubHeapSegment->pUnwindInfoTable;
+#endif
+ *ppPrevStubHeapSegment = pStubHeapSegment->pNext;
+
+ delete pStubHeapSegment;
+ }
+ else
+ {
+ ppPrevStubHeapSegment = &pStubHeapSegment->pNext;
+ }
+ }
+}
+
+
+VOID UnregisterUnwindInfoInLoaderHeap (UnlockedLoaderHeap *pHeap)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ PRECONDITION(pHeap->m_fPermitStubsWithUnwindInfo);
+ }
+ CONTRACTL_END;
+
+ pHeap->EnumPageRegions(&UnregisterUnwindInfoInLoaderHeapCallback);
+
+#ifdef _DEBUG
+ pHeap->m_fStubUnwindInfoUnregistered = TRUE;
+#endif // _DEBUG
+}
+
+
+class StubUnwindInfoSegmentBoundaryReservationList
+{
+ struct ReservationList
+ {
+ ReservationList *pNext;
+
+ static ReservationList *FromStub (Stub *pStub)
+ {
+ return (ReservationList*)(pStub+1);
+ }
+
+ Stub *GetStub ()
+ {
+ return (Stub*)this - 1;
+ }
+ };
+
+ ReservationList *m_pList;
+
+public:
+
+ StubUnwindInfoSegmentBoundaryReservationList ()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_pList = NULL;
+ }
+
+ ~StubUnwindInfoSegmentBoundaryReservationList ()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ ReservationList *pList = m_pList;
+ while (pList)
+ {
+ ReservationList *pNext = pList->pNext;
+
+ pList->GetStub()->DecRef();
+
+ pList = pNext;
+ }
+ }
+
+ void AddStub (Stub *pStub)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ ReservationList *pList = ReservationList::FromStub(pStub);
+
+ pList->pNext = m_pList;
+ m_pList = pList;
+ }
+};
+
+
+#endif // STUBLINKER_GENERATES_UNWIND_INFO
+
+
+//************************************************************************
+// StubLinker
+//************************************************************************
+
+//---------------------------------------------------------------
+// Construction
+//---------------------------------------------------------------
+StubLinker::StubLinker()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ m_pCodeElements = NULL;
+ m_pFirstCodeLabel = NULL;
+ m_pFirstLabelRef = NULL;
+ m_pPatchLabel = NULL;
+ m_stackSize = 0;
+ m_fDataOnly = FALSE;
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+#ifdef _DEBUG
+ m_pUnwindInfoCheckLabel = NULL;
+#endif
+#ifdef _TARGET_AMD64_
+ m_pUnwindInfoList = NULL;
+ m_nUnwindSlots = 0;
+ m_fHaveFramePointer = FALSE;
+#endif
+#ifdef _TARGET_ARM_
+ m_fProlog = FALSE;
+ m_cCalleeSavedRegs = 0;
+ m_cbStackFrame = 0;
+ m_fPushArgRegs = FALSE;
+#endif
+#ifdef _TARGET_ARM64_
+ m_fProlog = FALSE;
+ m_cIntRegArgs = 0;
+ m_cVecRegArgs = 0;
+ m_cCalleeSavedRegs = 0;
+ m_cbStackSpace = 0;
+#endif
+#endif // STUBLINKER_GENERATES_UNWIND_INFO
+}
+
+
+
+//---------------------------------------------------------------
+// Append code bytes.
+//---------------------------------------------------------------
+VOID StubLinker::EmitBytes(const BYTE *pBytes, UINT numBytes)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ CodeElement *pLastCodeElement = GetLastCodeElement();
+ while (numBytes != 0) {
+
+ if (pLastCodeElement != NULL &&
+ pLastCodeElement->m_type == CodeElement::kCodeRun) {
+ CodeRun *pCodeRun = (CodeRun*)pLastCodeElement;
+ UINT numbytessrc = numBytes;
+ UINT numbytesdst = CODERUNSIZE - pCodeRun->m_numcodebytes;
+ if (numbytesdst <= numbytessrc) {
+ CopyMemory(&(pCodeRun->m_codebytes[pCodeRun->m_numcodebytes]),
+ pBytes,
+ numbytesdst);
+ pCodeRun->m_numcodebytes = CODERUNSIZE;
+ pLastCodeElement = NULL;
+ pBytes += numbytesdst;
+ numBytes -= numbytesdst;
+ } else {
+ CopyMemory(&(pCodeRun->m_codebytes[pCodeRun->m_numcodebytes]),
+ pBytes,
+ numbytessrc);
+ pCodeRun->m_numcodebytes += numbytessrc;
+ pBytes += numbytessrc;
+ numBytes = 0;
+ }
+
+ } else {
+ pLastCodeElement = AppendNewEmptyCodeRun();
+ }
+ }
+}
+
+
+//---------------------------------------------------------------
+// Append code bytes.
+//---------------------------------------------------------------
+VOID StubLinker::Emit8 (unsigned __int8 val)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ CodeRun *pCodeRun = GetLastCodeRunIfAny();
+ if (pCodeRun && (CODERUNSIZE - pCodeRun->m_numcodebytes) >= sizeof(val)) {
+ *((unsigned __int8 *)(pCodeRun->m_codebytes + pCodeRun->m_numcodebytes)) = val;
+ pCodeRun->m_numcodebytes += sizeof(val);
+ } else {
+ EmitBytes((BYTE*)&val, sizeof(val));
+ }
+}
+
+//---------------------------------------------------------------
+// Append code bytes.
+//---------------------------------------------------------------
+VOID StubLinker::Emit16(unsigned __int16 val)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ CodeRun *pCodeRun = GetLastCodeRunIfAny();
+ if (pCodeRun && (CODERUNSIZE - pCodeRun->m_numcodebytes) >= sizeof(val)) {
+ SET_UNALIGNED_16(pCodeRun->m_codebytes + pCodeRun->m_numcodebytes, val);
+ pCodeRun->m_numcodebytes += sizeof(val);
+ } else {
+ EmitBytes((BYTE*)&val, sizeof(val));
+ }
+}
+
+//---------------------------------------------------------------
+// Append code bytes.
+//---------------------------------------------------------------
+VOID StubLinker::Emit32(unsigned __int32 val)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ CodeRun *pCodeRun = GetLastCodeRunIfAny();
+ if (pCodeRun && (CODERUNSIZE - pCodeRun->m_numcodebytes) >= sizeof(val)) {
+ SET_UNALIGNED_32(pCodeRun->m_codebytes + pCodeRun->m_numcodebytes, val);
+ pCodeRun->m_numcodebytes += sizeof(val);
+ } else {
+ EmitBytes((BYTE*)&val, sizeof(val));
+ }
+}
+
+//---------------------------------------------------------------
+// Append code bytes.
+//---------------------------------------------------------------
+VOID StubLinker::Emit64(unsigned __int64 val)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ CodeRun *pCodeRun = GetLastCodeRunIfAny();
+ if (pCodeRun && (CODERUNSIZE - pCodeRun->m_numcodebytes) >= sizeof(val)) {
+ SET_UNALIGNED_64(pCodeRun->m_codebytes + pCodeRun->m_numcodebytes, val);
+ pCodeRun->m_numcodebytes += sizeof(val);
+ } else {
+ EmitBytes((BYTE*)&val, sizeof(val));
+ }
+}
+
+//---------------------------------------------------------------
+// Append pointer value.
+//---------------------------------------------------------------
+VOID StubLinker::EmitPtr(const VOID *val)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ CodeRun *pCodeRun = GetLastCodeRunIfAny();
+ if (pCodeRun && (CODERUNSIZE - pCodeRun->m_numcodebytes) >= sizeof(val)) {
+ SET_UNALIGNED_PTR(pCodeRun->m_codebytes + pCodeRun->m_numcodebytes, (UINT_PTR)val);
+ pCodeRun->m_numcodebytes += sizeof(val);
+ } else {
+ EmitBytes((BYTE*)&val, sizeof(val));
+ }
+}
+
+
+//---------------------------------------------------------------
+// Create a new undefined label. Label must be assigned to a code
+// location using EmitLabel() prior to final linking.
+// Throws COM+ exception on failure.
+//---------------------------------------------------------------
+CodeLabel* StubLinker::NewCodeLabel()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ CodeLabel *pCodeLabel = (CodeLabel*)(m_quickHeap.Alloc(sizeof(CodeLabel)));
+ _ASSERTE(pCodeLabel); // QuickHeap throws exceptions rather than returning NULL
+ pCodeLabel->m_next = m_pFirstCodeLabel;
+ pCodeLabel->m_fExternal = FALSE;
+ pCodeLabel->m_fAbsolute = FALSE;
+ pCodeLabel->i.m_pCodeRun = NULL;
+ m_pFirstCodeLabel = pCodeLabel;
+ return pCodeLabel;
+
+
+}
+
+CodeLabel* StubLinker::NewAbsoluteCodeLabel()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ CodeLabel *pCodeLabel = NewCodeLabel();
+ pCodeLabel->m_fAbsolute = TRUE;
+ return pCodeLabel;
+}
+
+
+//---------------------------------------------------------------
+// Sets the label to point to the current "instruction pointer".
+// It is invalid to call EmitLabel() twice on
+// the same label.
+//---------------------------------------------------------------
+VOID StubLinker::EmitLabel(CodeLabel* pCodeLabel)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!(pCodeLabel->m_fExternal)); //can't emit an external label
+ _ASSERTE(pCodeLabel->i.m_pCodeRun == NULL); //must only emit label once
+ CodeRun *pLastCodeRun = GetLastCodeRunIfAny();
+ if (!pLastCodeRun) {
+ pLastCodeRun = AppendNewEmptyCodeRun();
+ }
+ pCodeLabel->i.m_pCodeRun = pLastCodeRun;
+ pCodeLabel->i.m_localOffset = pLastCodeRun->m_numcodebytes;
+}
+
+
+//---------------------------------------------------------------
+// Combines NewCodeLabel() and EmitLabel() for convenience.
+// Throws COM+ exception on failure.
+//---------------------------------------------------------------
+CodeLabel* StubLinker::EmitNewCodeLabel()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ CodeLabel* label = NewCodeLabel();
+ EmitLabel(label);
+ return label;
+}
+
+
+//---------------------------------------------------------------
+// Creates & emits the patch offset label for the stub
+//---------------------------------------------------------------
+VOID StubLinker::EmitPatchLabel()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ //
+ // Note that it's OK to have re-emit the patch label,
+ // just use the later one.
+ //
+
+ m_pPatchLabel = EmitNewCodeLabel();
+}
+
+//---------------------------------------------------------------
+// Returns final location of label as an offset from the start
+// of the stub. Can only be called after linkage.
+//---------------------------------------------------------------
+UINT32 StubLinker::GetLabelOffset(CodeLabel *pLabel)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!(pLabel->m_fExternal));
+ return pLabel->i.m_localOffset + pLabel->i.m_pCodeRun->m_globaloffset;
+}
+
+
+//---------------------------------------------------------------
+// Create a new label to an external address.
+// Throws COM+ exception on failure.
+//---------------------------------------------------------------
+CodeLabel* StubLinker::NewExternalCodeLabel(LPVOID pExternalAddress)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+
+ PRECONDITION(CheckPointer(pExternalAddress));
+ }
+ CONTRACTL_END;
+
+ CodeLabel *pCodeLabel = (CodeLabel*)(m_quickHeap.Alloc(sizeof(CodeLabel)));
+ _ASSERTE(pCodeLabel); // QuickHeap throws exceptions rather than returning NULL
+ pCodeLabel->m_next = m_pFirstCodeLabel;
+ pCodeLabel->m_fExternal = TRUE;
+ pCodeLabel->m_fAbsolute = FALSE;
+ pCodeLabel->e.m_pExternalAddress = pExternalAddress;
+ m_pFirstCodeLabel = pCodeLabel;
+ return pCodeLabel;
+}
+
+
+
+
+//---------------------------------------------------------------
+// Append an instruction containing a reference to a label.
+//
+// target - the label being referenced.
+// instructionFormat - a platform-specific InstructionFormat object
+// that gives properties about the reference.
+// variationCode - uninterpreted data passed to the pInstructionFormat methods.
+//---------------------------------------------------------------
+VOID StubLinker::EmitLabelRef(CodeLabel* target, const InstructionFormat & instructionFormat, UINT variationCode)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ LabelRef *pLabelRef = (LabelRef *)(m_quickHeap.Alloc(sizeof(LabelRef)));
+ _ASSERTE(pLabelRef); // m_quickHeap throws an exception rather than returning NULL
+ pLabelRef->m_type = LabelRef::kLabelRef;
+ pLabelRef->m_pInstructionFormat = (InstructionFormat*)&instructionFormat;
+ pLabelRef->m_variationCode = variationCode;
+ pLabelRef->m_target = target;
+
+ pLabelRef->m_nextLabelRef = m_pFirstLabelRef;
+ m_pFirstLabelRef = pLabelRef;
+
+ AppendCodeElement(pLabelRef);
+
+
+}
+
+
+
+
+
+//---------------------------------------------------------------
+// Internal helper routine.
+//---------------------------------------------------------------
+CodeRun *StubLinker::GetLastCodeRunIfAny()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ CodeElement *pLastCodeElem = GetLastCodeElement();
+ if (pLastCodeElem == NULL || pLastCodeElem->m_type != CodeElement::kCodeRun) {
+ return NULL;
+ } else {
+ return (CodeRun*)pLastCodeElem;
+ }
+}
+
+
+//---------------------------------------------------------------
+// Internal helper routine.
+//---------------------------------------------------------------
+CodeRun *StubLinker::AppendNewEmptyCodeRun()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ CodeRun *pNewCodeRun = (CodeRun*)(m_quickHeap.Alloc(sizeof(CodeRun)));
+ _ASSERTE(pNewCodeRun); // QuickHeap throws exceptions rather than returning NULL
+ pNewCodeRun->m_type = CodeElement::kCodeRun;
+ pNewCodeRun->m_numcodebytes = 0;
+ AppendCodeElement(pNewCodeRun);
+ return pNewCodeRun;
+
+}
+
+//---------------------------------------------------------------
+// Internal helper routine.
+//---------------------------------------------------------------
+VOID StubLinker::AppendCodeElement(CodeElement *pCodeElement)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ pCodeElement->m_next = m_pCodeElements;
+ m_pCodeElements = pCodeElement;
+}
+
+
+
+//---------------------------------------------------------------
+// Is the current LabelRef's size big enough to reach the target?
+//---------------------------------------------------------------
+static BOOL LabelCanReach(LabelRef *pLabelRef)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ InstructionFormat *pIF = pLabelRef->m_pInstructionFormat;
+
+ if (pLabelRef->m_target->m_fExternal)
+ {
+ return pLabelRef->m_pInstructionFormat->CanReach(
+ pLabelRef->m_refsize, pLabelRef->m_variationCode, TRUE, (INT_PTR)pLabelRef->m_target->e.m_pExternalAddress);
+ }
+ else
+ {
+ UINT targetglobaloffset = pLabelRef->m_target->i.m_pCodeRun->m_globaloffset +
+ pLabelRef->m_target->i.m_localOffset;
+ UINT srcglobaloffset = pLabelRef->m_globaloffset +
+ pIF->GetHotSpotOffset(pLabelRef->m_refsize,
+ pLabelRef->m_variationCode);
+ INT offset = (INT)(targetglobaloffset - srcglobaloffset);
+
+ return pLabelRef->m_pInstructionFormat->CanReach(
+ pLabelRef->m_refsize, pLabelRef->m_variationCode, FALSE, offset);
+ }
+}
+
+//---------------------------------------------------------------
+// Generate the actual stub. The returned stub has a refcount of 1.
+// No other methods (other than the destructor) should be called
+// after calling Link().
+//
+// Throws COM+ exception on failure.
+//---------------------------------------------------------------
+Stub *StubLinker::LinkInterceptor(LoaderHeap *pHeap, Stub* interceptee, void *pRealAddr)
+{
+ STANDARD_VM_CONTRACT;
+
+ int globalsize = 0;
+ int size = CalculateSize(&globalsize);
+
+ _ASSERTE(!pHeap || pHeap->IsExecutable());
+
+ StubHolder<Stub> pStub;
+
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ StubUnwindInfoSegmentBoundaryReservationList ReservedStubs;
+
+ for (;;)
+#endif
+ {
+ pStub = InterceptStub::NewInterceptedStub(pHeap, size, interceptee,
+ pRealAddr
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ , UnwindInfoSize(globalsize)
+#endif
+ );
+ bool fSuccess; fSuccess = EmitStub(pStub, globalsize);
+
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ if (fSuccess)
+ {
+ break;
+ }
+ else
+ {
+ ReservedStubs.AddStub(pStub);
+ pStub.SuppressRelease();
+ }
+#else
+ CONSISTENCY_CHECK_MSG(fSuccess, ("EmitStub should always return true"));
+#endif
+ }
+
+ return pStub.Extract();
+}
+
+//---------------------------------------------------------------
+// Generate the actual stub. The returned stub has a refcount of 1.
+// No other methods (other than the destructor) should be called
+// after calling Link().
+//
+// Throws COM+ exception on failure.
+//---------------------------------------------------------------
+Stub *StubLinker::Link(LoaderHeap *pHeap, DWORD flags)
+{
+ STANDARD_VM_CONTRACT;
+
+ int globalsize = 0;
+ int size = CalculateSize(&globalsize);
+
+ _ASSERTE(!pHeap || pHeap->IsExecutable());
+
+ StubHolder<Stub> pStub;
+
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ StubUnwindInfoSegmentBoundaryReservationList ReservedStubs;
+
+ for (;;)
+#endif
+ {
+ pStub = Stub::NewStub(
+ pHeap,
+ size,
+ flags
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ , UnwindInfoSize(globalsize)
+#endif
+ );
+ ASSERT(pStub != NULL);
+
+ bool fSuccess; fSuccess = EmitStub(pStub, globalsize);
+
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ if (fSuccess)
+ {
+ break;
+ }
+ else
+ {
+ ReservedStubs.AddStub(pStub);
+ pStub.SuppressRelease();
+ }
+#else
+ CONSISTENCY_CHECK_MSG(fSuccess, ("EmitStub should always return true"));
+#endif
+ }
+
+ return pStub.Extract();
+}
+
+int StubLinker::CalculateSize(int* pGlobalSize)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pGlobalSize);
+
+#if defined(_DEBUG) && defined(STUBLINKER_GENERATES_UNWIND_INFO) && !defined(CROSSGEN_COMPILE)
+ if (m_pUnwindInfoCheckLabel)
+ {
+ EmitLabel(m_pUnwindInfoCheckLabel);
+ EmitUnwindInfoCheckSubfunction();
+ m_pUnwindInfoCheckLabel = NULL;
+ }
+#endif
+
+#ifdef _DEBUG
+ // Don't want any undefined labels
+ for (CodeLabel *pCodeLabel = m_pFirstCodeLabel;
+ pCodeLabel != NULL;
+ pCodeLabel = pCodeLabel->m_next) {
+ if ((!(pCodeLabel->m_fExternal)) && pCodeLabel->i.m_pCodeRun == NULL) {
+ _ASSERTE(!"Forgot to define a label before asking StubLinker to link.");
+ }
+ }
+#endif //_DEBUG
+
+ //-------------------------------------------------------------------
+ // Tentatively set all of the labelref sizes to their smallest possible
+ // value.
+ //-------------------------------------------------------------------
+ for (LabelRef *pLabelRef = m_pFirstLabelRef;
+ pLabelRef != NULL;
+ pLabelRef = pLabelRef->m_nextLabelRef) {
+
+ for (UINT bitmask = 1; bitmask <= InstructionFormat::kMax; bitmask = bitmask << 1) {
+ if (pLabelRef->m_pInstructionFormat->m_allowedSizes & bitmask) {
+ pLabelRef->m_refsize = bitmask;
+ break;
+ }
+ }
+
+ }
+
+ UINT globalsize;
+ UINT datasize;
+ BOOL fSomethingChanged;
+ do {
+ fSomethingChanged = FALSE;
+
+
+ // Layout each code element.
+ globalsize = 0;
+ datasize = 0;
+ CodeElement *pCodeElem;
+ for (pCodeElem = m_pCodeElements; pCodeElem; pCodeElem = pCodeElem->m_next) {
+
+ switch (pCodeElem->m_type) {
+ case CodeElement::kCodeRun:
+ globalsize += ((CodeRun*)pCodeElem)->m_numcodebytes;
+ break;
+
+ case CodeElement::kLabelRef: {
+ LabelRef *pLabelRef = (LabelRef*)pCodeElem;
+ globalsize += pLabelRef->m_pInstructionFormat->GetSizeOfInstruction( pLabelRef->m_refsize,
+ pLabelRef->m_variationCode );
+ datasize += pLabelRef->m_pInstructionFormat->GetSizeOfData( pLabelRef->m_refsize,
+ pLabelRef->m_variationCode );
+ }
+ break;
+
+ default:
+ _ASSERTE(0);
+ }
+
+ // Record a temporary global offset; this is actually
+ // wrong by a fixed value. We'll fix up after we know the
+ // size of the entire stub.
+ pCodeElem->m_globaloffset = 0 - globalsize;
+
+ // also record the data offset. Note the link-list we walk is in
+ // *reverse* order so we visit the last instruction first
+ // so what we record now is in fact the offset from the *end* of
+ // the data block. We fix it up later.
+ pCodeElem->m_dataoffset = 0 - datasize;
+ }
+
+ // Now fix up the global offsets.
+ for (pCodeElem = m_pCodeElements; pCodeElem; pCodeElem = pCodeElem->m_next) {
+ pCodeElem->m_globaloffset += globalsize;
+ pCodeElem->m_dataoffset += datasize;
+ }
+
+
+ // Now, iterate thru the LabelRef's and check if any of them
+ // have to be resized.
+ for (LabelRef *pLabelRef = m_pFirstLabelRef;
+ pLabelRef != NULL;
+ pLabelRef = pLabelRef->m_nextLabelRef) {
+
+
+ if (!LabelCanReach(pLabelRef)) {
+ fSomethingChanged = TRUE;
+
+ UINT bitmask = pLabelRef->m_refsize << 1;
+ // Find the next largest size.
+ // (we could be smarter about this and eliminate intermediate
+ // sizes based on the tentative offset.)
+ for (; bitmask <= InstructionFormat::kMax; bitmask = bitmask << 1) {
+ if (pLabelRef->m_pInstructionFormat->m_allowedSizes & bitmask) {
+ pLabelRef->m_refsize = bitmask;
+ break;
+ }
+ }
+#ifdef _DEBUG
+ if (bitmask > InstructionFormat::kMax) {
+ // CANNOT REACH target even with kMax
+ _ASSERTE(!"Stub instruction cannot reach target: must choose a different instruction!");
+ }
+#endif
+ }
+ }
+
+
+ } while (fSomethingChanged); // Keep iterating until all LabelRef's can reach
+
+
+ // We now have the correct layout write out the stub.
+
+ // Compute stub code+data size after aligning data correctly
+ if(globalsize % DATA_ALIGNMENT)
+ globalsize += (DATA_ALIGNMENT - (globalsize % DATA_ALIGNMENT));
+
+ *pGlobalSize = globalsize;
+ return globalsize + datasize;
+}
+
+bool StubLinker::EmitStub(Stub* pStub, int globalsize)
+{
+ STANDARD_VM_CONTRACT;
+
+ BYTE *pCode = (BYTE*)(pStub->GetBlob());
+ BYTE *pData = pCode+globalsize; // start of data area
+ {
+ int lastCodeOffset = 0;
+
+ // Write out each code element.
+ for (CodeElement* pCodeElem = m_pCodeElements; pCodeElem; pCodeElem = pCodeElem->m_next) {
+ int currOffset = 0;
+
+ switch (pCodeElem->m_type) {
+ case CodeElement::kCodeRun:
+ CopyMemory(pCode + pCodeElem->m_globaloffset,
+ ((CodeRun*)pCodeElem)->m_codebytes,
+ ((CodeRun*)pCodeElem)->m_numcodebytes);
+ currOffset = pCodeElem->m_globaloffset + ((CodeRun *)pCodeElem)->m_numcodebytes;
+ break;
+
+ case CodeElement::kLabelRef: {
+ LabelRef *pLabelRef = (LabelRef*)pCodeElem;
+ InstructionFormat *pIF = pLabelRef->m_pInstructionFormat;
+ __int64 fixupval;
+
+ LPBYTE srcglobaladdr = pCode +
+ pLabelRef->m_globaloffset +
+ pIF->GetHotSpotOffset(pLabelRef->m_refsize,
+ pLabelRef->m_variationCode);
+ LPBYTE targetglobaladdr;
+ if (!(pLabelRef->m_target->m_fExternal)) {
+ targetglobaladdr = pCode +
+ pLabelRef->m_target->i.m_pCodeRun->m_globaloffset +
+ pLabelRef->m_target->i.m_localOffset;
+ } else {
+ targetglobaladdr = (LPBYTE)(pLabelRef->m_target->e.m_pExternalAddress);
+ }
+ if ((pLabelRef->m_target->m_fAbsolute)) {
+ fixupval = (__int64)(size_t)targetglobaladdr;
+ } else
+ fixupval = (__int64)(targetglobaladdr - srcglobaladdr);
+
+ pLabelRef->m_pInstructionFormat->EmitInstruction(
+ pLabelRef->m_refsize,
+ fixupval,
+ pCode + pCodeElem->m_globaloffset,
+ pLabelRef->m_variationCode,
+ pData + pCodeElem->m_dataoffset);
+
+ currOffset =
+ pCodeElem->m_globaloffset +
+ pLabelRef->m_pInstructionFormat->GetSizeOfInstruction( pLabelRef->m_refsize,
+ pLabelRef->m_variationCode );
+ }
+ break;
+
+ default:
+ _ASSERTE(0);
+ }
+ lastCodeOffset = (currOffset > lastCodeOffset) ? currOffset : lastCodeOffset;
+ }
+
+ // Fill in zeros at the end, if necessary
+ if (lastCodeOffset < globalsize)
+ ZeroMemory(pCode + lastCodeOffset, globalsize - lastCodeOffset);
+ }
+
+ // Fill in patch offset, if we have one
+ // Note that these offsets are relative to the start of the stub,
+ // not the code, so you'll have to add sizeof(Stub) to get to the
+ // right spot.
+ if (m_pPatchLabel != NULL)
+ {
+ UINT32 uLabelOffset = GetLabelOffset(m_pPatchLabel);
+ _ASSERTE(FitsIn<USHORT>(uLabelOffset));
+ pStub->SetPatchOffset(static_cast<USHORT>(uLabelOffset));
+
+ LOG((LF_CORDB, LL_INFO100, "SL::ES: patch offset:0x%x\n",
+ pStub->GetPatchOffset()));
+ }
+
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ if (pStub->HasUnwindInfo())
+ {
+ if (!EmitUnwindInfo(pStub, globalsize))
+ return false;
+ }
+#endif // STUBLINKER_GENERATES_UNWIND_INFO
+
+ if (!m_fDataOnly)
+ {
+ FlushInstructionCache(GetCurrentProcess(), pCode, globalsize);
+ }
+
+ _ASSERTE(m_fDataOnly || DbgIsExecutable(pCode, globalsize));
+
+ return true;
+}
+
+
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+#if defined(_TARGET_AMD64_)
+
+// See RtlVirtualUnwind in base\ntos\rtl\amd64\exdsptch.c
+
+static_assert_no_msg(kRAX == (FIELD_OFFSET(CONTEXT, Rax) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
+static_assert_no_msg(kRCX == (FIELD_OFFSET(CONTEXT, Rcx) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
+static_assert_no_msg(kRDX == (FIELD_OFFSET(CONTEXT, Rdx) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
+static_assert_no_msg(kRBX == (FIELD_OFFSET(CONTEXT, Rbx) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
+static_assert_no_msg(kRBP == (FIELD_OFFSET(CONTEXT, Rbp) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
+static_assert_no_msg(kRSI == (FIELD_OFFSET(CONTEXT, Rsi) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
+static_assert_no_msg(kRDI == (FIELD_OFFSET(CONTEXT, Rdi) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
+static_assert_no_msg(kR8 == (FIELD_OFFSET(CONTEXT, R8 ) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
+static_assert_no_msg(kR9 == (FIELD_OFFSET(CONTEXT, R9 ) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
+static_assert_no_msg(kR10 == (FIELD_OFFSET(CONTEXT, R10) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
+static_assert_no_msg(kR11 == (FIELD_OFFSET(CONTEXT, R11) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
+static_assert_no_msg(kR12 == (FIELD_OFFSET(CONTEXT, R12) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
+static_assert_no_msg(kR13 == (FIELD_OFFSET(CONTEXT, R13) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
+static_assert_no_msg(kR14 == (FIELD_OFFSET(CONTEXT, R14) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
+static_assert_no_msg(kR15 == (FIELD_OFFSET(CONTEXT, R15) - FIELD_OFFSET(CONTEXT, Rax)) / sizeof(ULONG64));
+
+VOID StubLinker::UnwindSavedReg (UCHAR reg, ULONG SPRelativeOffset)
+{
+ USHORT FrameOffset = (USHORT)(SPRelativeOffset / 8);
+
+ if ((ULONG)FrameOffset == SPRelativeOffset)
+ {
+ UNWIND_CODE *pUnwindCode = AllocUnwindInfo(UWOP_SAVE_NONVOL);
+ pUnwindCode->OpInfo = reg;
+ pUnwindCode[1].FrameOffset = FrameOffset;
+ }
+ else
+ {
+ UNWIND_CODE *pUnwindCode = AllocUnwindInfo(UWOP_SAVE_NONVOL_FAR);
+ pUnwindCode->OpInfo = reg;
+ pUnwindCode[1].FrameOffset = (USHORT)SPRelativeOffset;
+ pUnwindCode[2].FrameOffset = (USHORT)(SPRelativeOffset >> 16);
+ }
+}
+
+VOID StubLinker::UnwindPushedReg (UCHAR reg)
+{
+ m_stackSize += sizeof(void*);
+
+ if (m_fHaveFramePointer)
+ return;
+
+ UNWIND_CODE *pUnwindCode = AllocUnwindInfo(UWOP_PUSH_NONVOL);
+ pUnwindCode->OpInfo = reg;
+}
+
+VOID StubLinker::UnwindAllocStack (SHORT FrameSizeIncrement)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ if (! ClrSafeInt<SHORT>::addition(m_stackSize, FrameSizeIncrement, m_stackSize))
+ COMPlusThrowArithmetic();
+
+ if (m_fHaveFramePointer)
+ return;
+
+ UCHAR OpInfo = (UCHAR)((FrameSizeIncrement - 8) / 8);
+
+ if (OpInfo*8 + 8 == FrameSizeIncrement)
+ {
+ UNWIND_CODE *pUnwindCode = AllocUnwindInfo(UWOP_ALLOC_SMALL);
+ pUnwindCode->OpInfo = OpInfo;
+ }
+ else
+ {
+ USHORT FrameOffset = (USHORT)FrameSizeIncrement;
+ BOOL fNeedExtraSlot = ((ULONG)FrameOffset != (ULONG)FrameSizeIncrement);
+
+ UNWIND_CODE *pUnwindCode = AllocUnwindInfo(UWOP_ALLOC_LARGE, fNeedExtraSlot);
+
+ pUnwindCode->OpInfo = fNeedExtraSlot;
+
+ pUnwindCode[1].FrameOffset = FrameOffset;
+
+ if (fNeedExtraSlot)
+ pUnwindCode[2].FrameOffset = (USHORT)(FrameSizeIncrement >> 16);
+ }
+}
+
+VOID StubLinker::UnwindSetFramePointer (UCHAR reg)
+{
+ _ASSERTE(!m_fHaveFramePointer);
+
+ UNWIND_CODE *pUnwindCode = AllocUnwindInfo(UWOP_SET_FPREG);
+ pUnwindCode->OpInfo = reg;
+
+ m_fHaveFramePointer = TRUE;
+}
+
+UNWIND_CODE *StubLinker::AllocUnwindInfo (UCHAR Op, UCHAR nExtraSlots /*= 0*/)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ _ASSERTE(Op < sizeof(UnwindOpExtraSlotTable));
+
+ UCHAR nSlotsAlloc = UnwindOpExtraSlotTable[Op] + nExtraSlots;
+
+ IntermediateUnwindInfo *pUnwindInfo = (IntermediateUnwindInfo*)m_quickHeap.Alloc( sizeof(IntermediateUnwindInfo)
+ + nSlotsAlloc * sizeof(UNWIND_CODE));
+ m_nUnwindSlots += 1 + nSlotsAlloc;
+
+ pUnwindInfo->pNext = m_pUnwindInfoList;
+ m_pUnwindInfoList = pUnwindInfo;
+
+ UNWIND_CODE *pUnwindCode = &pUnwindInfo->rgUnwindCode[0];
+
+ pUnwindCode->UnwindOp = Op;
+
+ CodeRun *pCodeRun = GetLastCodeRunIfAny();
+ _ASSERTE(pCodeRun != NULL);
+
+ pUnwindInfo->pCodeRun = pCodeRun;
+ pUnwindInfo->LocalOffset = pCodeRun->m_numcodebytes;
+
+ EmitUnwindInfoCheck();
+
+ return pUnwindCode;
+}
+#endif // defined(_TARGET_AMD64_)
+
+bool StubLinker::EmitUnwindInfo(Stub* pStub, int globalsize)
+{
+ STANDARD_VM_CONTRACT;
+
+ BYTE *pCode = (BYTE*)(pStub->GetEntryPoint());
+
+ //
+ // Determine the lower bound of the address space containing the stub.
+ // The properties of individual pages may change, but the bounds of a
+ // VirtualAlloc(MEM_RESERVE)'d region will never change.
+ //
+
+ MEMORY_BASIC_INFORMATION mbi;
+
+ if (sizeof(mbi) != ClrVirtualQuery(pCode, &mbi, sizeof(mbi)))
+ {
+ // REVISIT_TODO better exception
+ COMPlusThrowOM();
+ }
+
+ BYTE *pbRegionBaseAddress = (BYTE*)mbi.AllocationBase;
+
+#ifdef _DEBUG
+ static SIZE_T MaxSegmentSize = -1;
+ if (MaxSegmentSize == (SIZE_T)-1)
+ MaxSegmentSize = EEConfig::GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_MaxStubUnwindInfoSegmentSize, DYNAMIC_FUNCTION_TABLE_MAX_RANGE);
+#else
+ const SIZE_T MaxSegmentSize = DYNAMIC_FUNCTION_TABLE_MAX_RANGE;
+#endif
+
+ //
+ // The RUNTIME_FUNCTION offsets are ULONGs. If the region size is >
+ // ULONG_MAX, then we'll shift the base address to the next 4gb and
+ // register a separate function table.
+ //
+ // But...RtlInstallFunctionTableCallback has a 2gb restriction...so
+ // make that LONG_MAX.
+ //
+
+ StubUnwindInfoHeader *pHeader = pStub->GetUnwindInfoHeader();
+ _ASSERTE(IS_ALIGNED(pHeader, sizeof(void*)));
+
+ BYTE *pbBaseAddress = pbRegionBaseAddress;
+
+ while ((size_t)((BYTE*)pHeader - pbBaseAddress) > MaxSegmentSize)
+ {
+ pbBaseAddress += MaxSegmentSize;
+ }
+
+ //
+ // If the unwind info/code straddle a 2gb boundary, then we're stuck.
+ // Rather than add a lot more bit twiddling code to deal with this
+ // exceptionally rare case, we'll signal the caller to keep this allocation
+ // temporarily and allocate another. This repeats until we eventually get
+ // an allocation that doesn't straddle a 2gb boundary. Afterwards the old
+ // allocations are freed.
+ //
+
+ if ((size_t)(pCode + globalsize - pbBaseAddress) > MaxSegmentSize)
+ {
+ return false;
+ }
+
+ // Ensure that the first RUNTIME_FUNCTION struct ends up pointer aligned,
+ // so that the StubUnwindInfoHeader struct is aligned. UNWIND_INFO
+ // includes one UNWIND_CODE.
+ _ASSERTE(IS_ALIGNED(pStub, sizeof(void*)));
+ _ASSERTE(0 == (FIELD_OFFSET(StubUnwindInfoHeader, FunctionEntry) % sizeof(void*)));
+
+ StubUnwindInfoHeader * pUnwindInfoHeader = pStub->GetUnwindInfoHeader();
+
+#ifdef _TARGET_AMD64_
+
+ UNWIND_CODE *pDestUnwindCode = &pUnwindInfoHeader->UnwindInfo.UnwindCode[0];
+#ifdef _DEBUG
+ UNWIND_CODE *pDestUnwindCodeLimit = (UNWIND_CODE*)pStub->GetUnwindInfoHeaderSuffix();
+#endif
+
+ UINT FrameRegister = 0;
+
+ //
+ // Resolve the unwind operation offsets, and fill in the UNWIND_INFO and
+ // RUNTIME_FUNCTION structs preceeding the stub. The unwind codes are recorded
+ // in decreasing address order.
+ //
+
+ for (IntermediateUnwindInfo *pUnwindInfoList = m_pUnwindInfoList; pUnwindInfoList != NULL; pUnwindInfoList = pUnwindInfoList->pNext)
+ {
+ UNWIND_CODE *pUnwindCode = &pUnwindInfoList->rgUnwindCode[0];
+ UCHAR op = pUnwindCode[0].UnwindOp;
+
+ if (UWOP_SET_FPREG == op)
+ {
+ FrameRegister = pUnwindCode[0].OpInfo;
+ }
+
+ //
+ // Compute number of slots used by this encoding.
+ //
+
+ UINT nSlots;
+
+ if (UWOP_ALLOC_LARGE == op)
+ {
+ nSlots = 2 + pUnwindCode[0].OpInfo;
+ }
+ else
+ {
+ _ASSERTE(UnwindOpExtraSlotTable[op] != (UCHAR)-1);
+ nSlots = 1 + UnwindOpExtraSlotTable[op];
+ }
+
+ //
+ // Compute offset and ensure that it will fit in the encoding.
+ //
+
+ SIZE_T CodeOffset = pUnwindInfoList->pCodeRun->m_globaloffset
+ + pUnwindInfoList->LocalOffset;
+
+ if (CodeOffset != (SIZE_T)(UCHAR)CodeOffset)
+ {
+ // REVISIT_TODO better exception
+ COMPlusThrowOM();
+ }
+
+ //
+ // Copy the encoding data, overwrite the new offset, and advance
+ // to the next encoding.
+ //
+
+ _ASSERTE(pDestUnwindCode + nSlots <= pDestUnwindCodeLimit);
+
+ CopyMemory(pDestUnwindCode, pUnwindCode, nSlots * sizeof(UNWIND_CODE));
+
+ pDestUnwindCode->CodeOffset = (UCHAR)CodeOffset;
+
+ pDestUnwindCode += nSlots;
+ }
+
+ //
+ // Fill in the UNWIND_INFO struct
+ //
+ UNWIND_INFO *pUnwindInfo = &pUnwindInfoHeader->UnwindInfo;
+ _ASSERTE(IS_ALIGNED(pUnwindInfo, sizeof(ULONG)));
+
+ // PrologueSize may be 0 if all unwind directives at offset 0.
+ SIZE_T PrologueSize = m_pUnwindInfoList->pCodeRun->m_globaloffset
+ + m_pUnwindInfoList->LocalOffset;
+
+ UINT nEntryPointSlots = m_nUnwindSlots;
+
+ if ( PrologueSize != (SIZE_T)(UCHAR)PrologueSize
+ || nEntryPointSlots > UCHAR_MAX)
+ {
+ // REVISIT_TODO better exception
+ COMPlusThrowOM();
+ }
+
+ _ASSERTE(nEntryPointSlots);
+
+ pUnwindInfo->Version = 1;
+ pUnwindInfo->Flags = 0;
+ pUnwindInfo->SizeOfProlog = (UCHAR)PrologueSize;
+ pUnwindInfo->CountOfUnwindCodes = (UCHAR)nEntryPointSlots;
+ pUnwindInfo->FrameRegister = FrameRegister;
+ pUnwindInfo->FrameOffset = 0;
+
+ //
+ // Fill in the RUNTIME_FUNCTION struct for this prologue.
+ //
+ RUNTIME_FUNCTION *pCurFunction = &pUnwindInfoHeader->FunctionEntry;
+ _ASSERTE(IS_ALIGNED(pCurFunction, sizeof(ULONG)));
+
+ S_UINT32 sBeginAddress = S_BYTEPTR(pCode) - S_BYTEPTR(pbBaseAddress);
+ if (sBeginAddress.IsOverflow())
+ COMPlusThrowArithmetic();
+ pCurFunction->BeginAddress = sBeginAddress.Value();
+
+ S_UINT32 sEndAddress = S_BYTEPTR(pCode) + S_BYTEPTR(globalsize) - S_BYTEPTR(pbBaseAddress);
+ if (sEndAddress.IsOverflow())
+ COMPlusThrowArithmetic();
+ pCurFunction->EndAddress = sEndAddress.Value();
+
+ S_UINT32 sTemp = S_BYTEPTR(pUnwindInfo) - S_BYTEPTR(pbBaseAddress);
+ if (sTemp.IsOverflow())
+ COMPlusThrowArithmetic();
+ RUNTIME_FUNCTION__SetUnwindInfoAddress(pCurFunction, sTemp.Value());
+#elif defined(_TARGET_ARM_)
+ //
+ // Fill in the RUNTIME_FUNCTION struct for this prologue.
+ //
+ UNWIND_INFO *pUnwindInfo = &pUnwindInfoHeader->UnwindInfo;
+
+ RUNTIME_FUNCTION *pCurFunction = &pUnwindInfoHeader->FunctionEntry;
+ _ASSERTE(IS_ALIGNED(pCurFunction, sizeof(ULONG)));
+
+ S_UINT32 sBeginAddress = S_BYTEPTR(pCode) - S_BYTEPTR(pbBaseAddress);
+ if (sBeginAddress.IsOverflow())
+ COMPlusThrowArithmetic();
+ RUNTIME_FUNCTION__SetBeginAddress(pCurFunction, sBeginAddress.Value());
+
+ S_UINT32 sTemp = S_BYTEPTR(pUnwindInfo) - S_BYTEPTR(pbBaseAddress);
+ if (sTemp.IsOverflow())
+ COMPlusThrowArithmetic();
+ RUNTIME_FUNCTION__SetUnwindInfoAddress(pCurFunction, sTemp.Value());
+
+ //Get the exact function Length. Cannot use globalsize as it is explicitly made to be
+ // 4 byte aligned
+ CodeRun *pLastCodeElem = GetLastCodeRunIfAny();
+ _ASSERTE(pLastCodeElem != NULL);
+
+ int functionLength = pLastCodeElem->m_numcodebytes + pLastCodeElem->m_globaloffset;
+
+ // cannot encode functionLength greater than (2 * 0xFFFFF)
+ if (functionLength > 2 * 0xFFFFF)
+ COMPlusThrowArithmetic();
+
+ _ASSERTE(functionLength <= globalsize);
+
+ BYTE * pUnwindCodes = (BYTE *)pUnwindInfo + sizeof(DWORD);
+
+ // Not emitting compact unwind info as there are very few (4) dynamic stubs with unwind info.
+ // Benefit of the optimization does not outweigh the cost of adding the code for it.
+
+ //UnwindInfo for prolog
+ if (m_cbStackFrame != 0)
+ {
+ if(m_cbStackFrame < 512)
+ {
+ *pUnwindCodes++ = (BYTE)0xF8; // 16-bit sub/add sp,#x
+ *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 18);
+ *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 10);
+ *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 2);
+ }
+ else
+ {
+ *pUnwindCodes++ = (BYTE)0xFA; // 32-bit sub/add sp,#x
+ *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 18);
+ *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 10);
+ *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 2);
+ }
+
+ if(m_cbStackFrame >= 4096)
+ {
+ // r4 register is used as param to checkStack function and must have been saved in prolog
+ _ASSERTE(m_cCalleeSavedRegs > 0);
+ *pUnwindCodes++ = (BYTE)0xFB; // nop 16 bit for bl r12
+ *pUnwindCodes++ = (BYTE)0xFC; // nop 32 bit for movt r12, checkStack
+ *pUnwindCodes++ = (BYTE)0xFC; // nop 32 bit for movw r12, checkStack
+
+ // Ensure that mov r4, m_cbStackFrame fits in a 32-bit instruction
+ if(m_cbStackFrame > 65535)
+ COMPlusThrow(kNotSupportedException);
+ *pUnwindCodes++ = (BYTE)0xFC; // nop 32 bit for mov r4, m_cbStackFrame
+ }
+ }
+
+ // Unwind info generated will be incorrect when m_cCalleeSavedRegs = 0.
+ // The unwind code will say that the size of push/pop instruction
+ // size is 16bits when actually the opcode generated by
+ // ThumbEmitPop & ThumbEMitPush will be 32bits.
+ // Currently no stubs has m_cCalleeSavedRegs as 0
+ // therfore just adding the assert.
+ _ASSERTE(m_cCalleeSavedRegs > 0);
+
+ if (m_cCalleeSavedRegs <= 4)
+ {
+ *pUnwindCodes++ = (BYTE)(0xD4 + (m_cCalleeSavedRegs - 1)); // push/pop {r4-rX}
+ }
+ else
+ {
+ _ASSERTE(m_cCalleeSavedRegs <= 8);
+ *pUnwindCodes++ = (BYTE)(0xDC + (m_cCalleeSavedRegs - 5)); // push/pop {r4-rX}
+ }
+
+ if (m_fPushArgRegs)
+ {
+ *pUnwindCodes++ = (BYTE)0x04; // push {r0-r3} / add sp,#16
+ *pUnwindCodes++ = (BYTE)0xFD; // bx lr
+ }
+ else
+ {
+ *pUnwindCodes++ = (BYTE)0xFF; // end
+ }
+
+ int epilogUnwindCodeIndex = 0;
+
+ //epilog differs from prolog
+ if(m_cbStackFrame >= 4096)
+ {
+ //Index of the first unwind code of the epilog
+ epilogUnwindCodeIndex = pUnwindCodes - (BYTE *)pUnwindInfo - sizeof(DWORD);
+
+ *pUnwindCodes++ = (BYTE)0xF8; // sub/add sp,#x
+ *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 18);
+ *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 10);
+ *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 2);
+
+ if (m_cCalleeSavedRegs <= 4)
+ {
+ *pUnwindCodes++ = (BYTE)(0xD4 + (m_cCalleeSavedRegs - 1)); // push/pop {r4-rX}
+ }
+ else
+ {
+ *pUnwindCodes++ = (BYTE)(0xDC + (m_cCalleeSavedRegs - 5)); // push/pop {r4-rX}
+ }
+
+ if (m_fPushArgRegs)
+ {
+ *pUnwindCodes++ = (BYTE)0x04; // push {r0-r3} / add sp,#16
+ *pUnwindCodes++ = (BYTE)0xFD; // bx lr
+ }
+ else
+ {
+ *pUnwindCodes++ = (BYTE)0xFF; // end
+ }
+
+ }
+
+ // Number of 32-bit unwind codes
+ int codeWordsCount = (ALIGN_UP((size_t)pUnwindCodes, sizeof(void*)) - (size_t)pUnwindInfo - sizeof(DWORD))/4;
+
+ _ASSERTE(epilogUnwindCodeIndex < 32);
+
+ //Check that MAX_UNWIND_CODE_WORDS is sufficient to store all unwind Codes
+ _ASSERTE(codeWordsCount <= MAX_UNWIND_CODE_WORDS);
+
+ *(DWORD *)pUnwindInfo =
+ ((functionLength) / 2) |
+ (1 << 21) |
+ (epilogUnwindCodeIndex << 23)|
+ (codeWordsCount << 28);
+
+#elif defined(_TARGET_ARM64_)
+ if (!m_fProlog)
+ {
+ // If EmitProlog isn't called. This is a leaf function which doesn't need any unwindInfo
+ T_RUNTIME_FUNCTION *pCurFunction = NULL;
+ }
+ else
+ {
+
+ //
+ // Fill in the RUNTIME_FUNCTION struct for this prologue.
+ //
+ UNWIND_INFO *pUnwindInfo = &(pUnwindInfoHeader->UnwindInfo);
+
+ T_RUNTIME_FUNCTION *pCurFunction = &(pUnwindInfoHeader->FunctionEntry);
+
+ _ASSERTE(IS_ALIGNED(pCurFunction, sizeof(void*)));
+
+ S_UINT32 sBeginAddress = S_BYTEPTR(pCode) - S_BYTEPTR(pbBaseAddress);
+ if (sBeginAddress.IsOverflow())
+ COMPlusThrowArithmetic();
+
+ S_UINT32 sTemp = S_BYTEPTR(pUnwindInfo) - S_BYTEPTR(pbBaseAddress);
+ if (sTemp.IsOverflow())
+ COMPlusThrowArithmetic();
+
+ RUNTIME_FUNCTION__SetBeginAddress(pCurFunction, sBeginAddress.Value());
+ RUNTIME_FUNCTION__SetUnwindInfoAddress(pCurFunction, sTemp.Value());
+
+ CodeRun *pLastCodeElem = GetLastCodeRunIfAny();
+ _ASSERTE(pLastCodeElem != NULL);
+
+ int functionLength = pLastCodeElem->m_numcodebytes + pLastCodeElem->m_globaloffset;
+
+ // .xdata has 18 bits for function length and it is to store the total length of the function in bytes, divided by 4
+ // If the function is larger than 1M, then multiple pdata and xdata records must be used, which we don't support right now.
+ if (functionLength > 4 * 0x3FFFF)
+ COMPlusThrowArithmetic();
+
+ _ASSERTE(functionLength <= globalsize);
+
+ // No support for extended code words and/or extended epilog.
+ // ASSERTION: first 10 bits of the pUnwindInfo, which holds the #codewords and #epilogcount, cannot be 0
+ // And no space for exception scope data also means that no support for exceptions for the stubs
+ // generated with this stublinker.
+ BYTE * pUnwindCodes = (BYTE *)pUnwindInfo + sizeof(DWORD);
+
+
+ // Emitting the unwind codes:
+ // The unwind codes are emited in Epilog order.
+ //
+ // 6. Integer argument registers
+ // Although we might be saving the argument registers in the prolog we don't need
+ // to report them to the OS. (they are not expressible anyways)
+
+ // 5. Floating point argument registers:
+ // Similar to Integer argumetn registers, no reporting
+ //
+
+ // 4. Set the frame pointer
+ // ASSUMPTION: none of the Stubs generated with this stublinker change SP value outside of epilog and prolog
+ // when that is the case we can skip reporting setting up the frame pointer
+
+ // With skiping Step #4, #5 and #6 Prolog and Epilog becomes reversible. so they can share the unwind codes
+ int epilogUnwindCodeIndex = 0;
+
+ unsigned cStackFrameSizeInQWORDs = GetStackFrameSize()/16;
+ // 3. Store FP/LR
+ // save_fplr
+ *pUnwindCodes++ = (BYTE)(0x40 | (m_cbStackSpace>>3));
+
+ // 2. Callee-saved registers
+ //
+ if (m_cCalleeSavedRegs > 0)
+ {
+ unsigned offset = 2 + m_cbStackSpace/8; // 2 is for fp,lr
+ if ((m_cCalleeSavedRegs %2) ==1)
+ {
+ // save_reg
+ *pUnwindCodes++ = (BYTE) (0xD0 | ((m_cCalleeSavedRegs-1)>>2));
+ *pUnwindCodes++ = (BYTE) ((BYTE)((m_cCalleeSavedRegs-1) << 6) | ((offset + m_cCalleeSavedRegs - 1) & 0x3F));
+ }
+ for (int i=(m_cCalleeSavedRegs/2)*2-2; i>=0; i-=2)
+ {
+ if (i!=0)
+ {
+ // save_next
+ *pUnwindCodes++ = 0xE6;
+ }
+ else
+ {
+ // save_regp
+ *pUnwindCodes++ = 0xC8;
+ *pUnwindCodes++ = (BYTE)(offset & 0x3F);
+ }
+ }
+ }
+
+ // 1. SP Relocation
+ //
+ // EmitProlog is supposed to reject frames larger than 504 bytes.
+ // Assert that here.
+ _ASSERTE(cStackFrameSizeInQWORDs <= 0x3F);
+ if (cStackFrameSizeInQWORDs <= 0x1F)
+ {
+ // alloc_s
+ *pUnwindCodes++ = (BYTE)(cStackFrameSizeInQWORDs);
+ }
+ else
+ {
+ // alloc_m
+ *pUnwindCodes++ = (BYTE)(0xC0 | (cStackFrameSizeInQWORDs >> 8));
+ *pUnwindCodes++ = (BYTE)(cStackFrameSizeInQWORDs);
+ }
+
+ // End
+ *pUnwindCodes++ = 0xE4;
+
+ // Number of 32-bit unwind codes
+ int codeWordsCount = (int)(ALIGN_UP((size_t)pUnwindCodes, sizeof(DWORD)) - (size_t)pUnwindInfo - sizeof(DWORD))/4;
+
+ //Check that MAX_UNWIND_CODE_WORDS is sufficient to store all unwind Codes
+ _ASSERTE(codeWordsCount <= MAX_UNWIND_CODE_WORDS);
+
+ *(DWORD *)pUnwindInfo =
+ ((functionLength) / 4) |
+ (1 << 21) | // E bit
+ (epilogUnwindCodeIndex << 22)|
+ (codeWordsCount << 27);
+ } // end else (!m_fProlog)
+#else
+ PORTABILITY_ASSERT("StubLinker::EmitUnwindInfo");
+ T_RUNTIME_FUNCTION *pCurFunction = NULL;
+#endif
+
+ //
+ // Get a StubUnwindInfoHeapSegment for this base address
+ //
+
+ CrstHolder crst(&g_StubUnwindInfoHeapSegmentsCrst);
+
+ StubUnwindInfoHeapSegment *pStubHeapSegment;
+ StubUnwindInfoHeapSegment **ppPrevStubHeapSegment;
+ for (ppPrevStubHeapSegment = &g_StubHeapSegments;
+ (pStubHeapSegment = *ppPrevStubHeapSegment);
+ (ppPrevStubHeapSegment = &pStubHeapSegment->pNext))
+ {
+ if (pbBaseAddress < pStubHeapSegment->pbBaseAddress)
+ {
+ // The list is ordered, so address is between segments
+ pStubHeapSegment = NULL;
+ break;
+ }
+
+ if (pbBaseAddress == pStubHeapSegment->pbBaseAddress)
+ {
+ // Found an existing segment
+ break;
+ }
+ }
+
+ if (!pStubHeapSegment)
+ {
+ //
+ // Determine the upper bound of the address space containing the stub.
+ // Start with stub region's allocation base, and query for each
+ // successive region's allocation base until it changes or we hit an
+ // unreserved region.
+ //
+
+ PBYTE pbCurrentBase = pbBaseAddress;
+
+ for (;;)
+ {
+ if (sizeof(mbi) != ClrVirtualQuery(pbCurrentBase, &mbi, sizeof(mbi)))
+ {
+ // REVISIT_TODO better exception
+ COMPlusThrowOM();
+ }
+
+ // AllocationBase is undefined if this is set.
+ if (mbi.State & MEM_FREE)
+ break;
+
+ if (pbRegionBaseAddress != mbi.AllocationBase)
+ break;
+
+ pbCurrentBase += mbi.RegionSize;
+ }
+
+ //
+ // RtlInstallFunctionTableCallback will only accept a ULONG for the
+ // region size. We've already checked above that the RUNTIME_FUNCTION
+ // offsets will work relative to pbBaseAddress.
+ //
+
+ SIZE_T cbSegment = pbCurrentBase - pbBaseAddress;
+
+ if (cbSegment > MaxSegmentSize)
+ cbSegment = MaxSegmentSize;
+
+ NewHolder<StubUnwindInfoHeapSegment> pNewStubHeapSegment = new StubUnwindInfoHeapSegment();
+
+
+ pNewStubHeapSegment->pbBaseAddress = pbBaseAddress;
+ pNewStubHeapSegment->cbSegment = cbSegment;
+ pNewStubHeapSegment->pUnwindHeaderList = NULL;
+#ifdef _TARGET_AMD64_
+ pNewStubHeapSegment->pUnwindInfoTable = NULL;
+#endif
+
+ // Insert the new stub into list
+ pNewStubHeapSegment->pNext = *ppPrevStubHeapSegment;
+ *ppPrevStubHeapSegment = pNewStubHeapSegment;
+ pNewStubHeapSegment.SuppressRelease();
+
+ // Use new segment for the stub
+ pStubHeapSegment = pNewStubHeapSegment;
+
+ InstallEEFunctionTable(
+ pNewStubHeapSegment,
+ pbBaseAddress,
+ (ULONG)cbSegment,
+ &FindStubFunctionEntry,
+ pNewStubHeapSegment,
+ DYNFNTABLE_STUB);
+ }
+
+ //
+ // Link the new stub into the segment.
+ //
+
+ pHeader->pNext = pStubHeapSegment->pUnwindHeaderList;
+ pStubHeapSegment->pUnwindHeaderList = pHeader;
+
+#ifdef _TARGET_AMD64_
+ // Publish Unwind info to ETW stack crawler
+ UnwindInfoTable::AddToUnwindInfoTable(
+ &pStubHeapSegment->pUnwindInfoTable, pCurFunction,
+ (TADDR) pStubHeapSegment->pbBaseAddress,
+ (TADDR) pStubHeapSegment->pbBaseAddress + pStubHeapSegment->cbSegment);
+#endif
+
+#ifdef _DEBUG
+ _ASSERTE(pHeader->IsRegistered());
+ _ASSERTE( &pHeader->FunctionEntry
+ == FindStubFunctionEntry((ULONG64)pCode, EncodeDynamicFunctionTableContext(pStubHeapSegment, DYNFNTABLE_STUB)));
+#endif
+
+ return true;
+}
+#endif // STUBLINKER_GENERATES_UNWIND_INFO
+
+#ifdef _TARGET_ARM_
+void StubLinker::DescribeProlog(UINT cCalleeSavedRegs, UINT cbStackFrame, BOOL fPushArgRegs)
+{
+ m_fProlog = TRUE;
+ m_cCalleeSavedRegs = cCalleeSavedRegs;
+ m_cbStackFrame = cbStackFrame;
+ m_fPushArgRegs = fPushArgRegs;
+}
+#elif defined(_TARGET_ARM64_)
+void StubLinker::DescribeProlog(UINT cIntRegArgs, UINT cVecRegArgs, UINT cCalleeSavedRegs, UINT cbStackSpace)
+{
+ m_fProlog = TRUE;
+ m_cIntRegArgs = cIntRegArgs;
+ m_cVecRegArgs = cVecRegArgs;
+ m_cCalleeSavedRegs = cCalleeSavedRegs;
+ m_cbStackSpace = cbStackSpace;
+}
+
+UINT StubLinker::GetSavedRegArgsOffset()
+{
+ _ASSERTE(m_fProlog);
+ // This is the offset from SP
+ // We're assuming that the stublinker will push the arg registers to the bottom of the stack frame
+ return m_cbStackSpace + (2+ m_cCalleeSavedRegs)*sizeof(void*); // 2 is for FP and LR
+}
+
+UINT StubLinker::GetStackFrameSize()
+{
+ _ASSERTE(m_fProlog);
+ return m_cbStackSpace + (2 + m_cCalleeSavedRegs + m_cIntRegArgs + m_cVecRegArgs)*sizeof(void*);
+}
+
+
+#endif // ifdef _TARGET_ARM_, elif defined(_TARGET_ARM64_)
+
+#endif // #ifndef DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+
+//-------------------------------------------------------------------
+// Inc the refcount.
+//-------------------------------------------------------------------
+VOID Stub::IncRef()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_signature == kUsedStub);
+ FastInterlockIncrement((LONG*)&m_refcount);
+}
+
+//-------------------------------------------------------------------
+// Dec the refcount.
+//-------------------------------------------------------------------
+BOOL Stub::DecRef()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_signature == kUsedStub);
+ int count = FastInterlockDecrement((LONG*)&m_refcount);
+ if (count <= 0) {
+ if(m_patchOffset & INTERCEPT_BIT)
+ {
+ ((InterceptStub*)this)->ReleaseInterceptedStub();
+ }
+
+ DeleteStub();
+ return TRUE;
+ }
+ return FALSE;
+}
+
+VOID Stub::DeleteStub()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ COUNTER_ONLY(GetPerfCounters().m_Interop.cStubs--);
+
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ if (HasUnwindInfo())
+ {
+ StubUnwindInfoHeader *pHeader = GetUnwindInfoHeader();
+
+ //
+ // Check if the stub has been linked into a StubUnwindInfoHeapSegment.
+ //
+ if (pHeader->IsRegistered())
+ {
+ CrstHolder crst(&g_StubUnwindInfoHeapSegmentsCrst);
+
+ //
+ // Find the segment containing the stub.
+ //
+ StubUnwindInfoHeapSegment **ppPrevSegment = &g_StubHeapSegments;
+ StubUnwindInfoHeapSegment *pSegment = *ppPrevSegment;
+
+ if (pSegment)
+ {
+ PBYTE pbCode = (PBYTE)GetEntryPointInternal();
+#ifdef _TARGET_AMD64_
+ UnwindInfoTable::RemoveFromUnwindInfoTable(&pSegment->pUnwindInfoTable,
+ (TADDR) pSegment->pbBaseAddress, (TADDR) pbCode);
+#endif
+ for (StubUnwindInfoHeapSegment *pNextSegment = pSegment->pNext;
+ pNextSegment;
+ ppPrevSegment = &pSegment->pNext, pSegment = pNextSegment, pNextSegment = pSegment->pNext)
+ {
+ // The segments are sorted by pbBaseAddress.
+ if (pbCode < pNextSegment->pbBaseAddress)
+ break;
+ }
+ }
+
+ // The stub was marked as registered, so a segment should exist.
+ _ASSERTE(pSegment);
+
+ if (pSegment)
+ {
+
+ //
+ // Find this stub's location in the segment's list.
+ //
+ StubUnwindInfoHeader *pCurHeader;
+ StubUnwindInfoHeader **ppPrevHeaderList;
+ for (ppPrevHeaderList = &pSegment->pUnwindHeaderList;
+ (pCurHeader = *ppPrevHeaderList);
+ (ppPrevHeaderList = &pCurHeader->pNext))
+ {
+ if (pHeader == pCurHeader)
+ break;
+ }
+
+ // The stub was marked as registered, so we should find it in the segment's list.
+ _ASSERTE(pCurHeader);
+
+ if (pCurHeader)
+ {
+ //
+ // Remove the stub from the segment's list.
+ //
+ *ppPrevHeaderList = pHeader->pNext;
+
+ //
+ // If the segment's list is now empty, delete the segment.
+ //
+ if (!pSegment->pUnwindHeaderList)
+ {
+ DeleteEEFunctionTable(pSegment);
+#ifdef _TARGET_AMD64_
+ if (pSegment->pUnwindInfoTable != 0)
+ delete pSegment->pUnwindInfoTable;
+#endif
+ *ppPrevSegment = pSegment->pNext;
+ delete pSegment;
+ }
+ }
+ }
+ }
+ }
+#endif
+
+ // a size of 0 is a signal to Nirvana to flush the entire cache
+ //FlushInstructionCache(GetCurrentProcess(),0,0);
+
+ if ((m_patchOffset & LOADER_HEAP_BIT) == 0)
+ {
+#ifdef _DEBUG
+ m_signature = kFreedStub;
+ FillMemory(this+1, m_numCodeBytes, 0xcc);
+#endif
+
+#ifndef FEATURE_PAL
+ DeleteExecutable((BYTE*)GetAllocationBase());
+#else
+ delete [] (BYTE*)GetAllocationBase();
+#endif
+ }
+}
+
+TADDR Stub::GetAllocationBase()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ TADDR info = dac_cast<TADDR>(this);
+ SIZE_T cbPrefix = 0;
+
+ if (IsIntercept())
+ {
+ cbPrefix += 2 * sizeof(TADDR);
+ }
+
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ if (HasUnwindInfo())
+ {
+ StubUnwindInfoHeaderSuffix *pSuffix =
+ PTR_StubUnwindInfoHeaderSuffix(info - cbPrefix -
+ sizeof(*pSuffix));
+
+ cbPrefix += StubUnwindInfoHeader::ComputeSize(pSuffix->nUnwindInfoSize);
+ }
+#endif // STUBLINKER_GENERATES_UNWIND_INFO
+
+ if (!HasExternalEntryPoint())
+ {
+ cbPrefix = ALIGN_UP(cbPrefix + sizeof(Stub), CODE_SIZE_ALIGN) - sizeof(Stub);
+ }
+
+ return info - cbPrefix;
+}
+
+Stub* Stub::NewStub(PTR_VOID pCode, DWORD flags)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ Stub* pStub = NewStub(NULL, 0, flags | NEWSTUB_FL_EXTERNAL);
+ _ASSERTE(pStub->HasExternalEntryPoint());
+
+ *(PTR_VOID *)(pStub + 1) = pCode;
+
+ return pStub;
+}
+
+//-------------------------------------------------------------------
+// Stub allocation done here.
+//-------------------------------------------------------------------
+/*static*/ Stub* Stub::NewStub(
+ LoaderHeap *pHeap,
+ UINT numCodeBytes,
+ DWORD flags
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ , UINT nUnwindInfoSize
+#endif
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ _ASSERTE(!nUnwindInfoSize || !pHeap || pHeap->m_fPermitStubsWithUnwindInfo);
+#endif // STUBLINKER_GENERATES_UNWIND_INFO
+
+ COUNTER_ONLY(GetPerfCounters().m_Interop.cStubs++);
+
+ S_SIZE_T size = S_SIZE_T(sizeof(Stub));
+
+ if (flags & NEWSTUB_FL_INTERCEPT)
+ {
+ size += sizeof(Stub *) + sizeof(void*);
+ }
+
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ if (nUnwindInfoSize != 0)
+ {
+ size += StubUnwindInfoHeader::ComputeSize(nUnwindInfoSize);
+ }
+#endif
+
+ if (flags & NEWSTUB_FL_EXTERNAL)
+ {
+ _ASSERTE(numCodeBytes == 0);
+ size += sizeof(PTR_PCODE);
+ }
+ else
+ {
+ size.AlignUp(CODE_SIZE_ALIGN);
+ size += numCodeBytes;
+ }
+
+ if (size.IsOverflow())
+ COMPlusThrowArithmetic();
+
+ size_t totalSize = size.Value();
+
+ BYTE *pBlock;
+ if (pHeap == NULL)
+ {
+#ifndef FEATURE_PAL
+ pBlock = new (executable) BYTE[totalSize];
+#else
+ pBlock = new BYTE[totalSize];
+#endif
+ }
+ else
+ {
+ pBlock = (BYTE*)(void*) pHeap->AllocAlignedMem(totalSize, CODE_SIZE_ALIGN);
+ flags |= NEWSTUB_FL_LOADERHEAP;
+ }
+
+ // Make sure that the payload of the stub is aligned
+ Stub* pStub = (Stub*)((pBlock + totalSize) -
+ (sizeof(Stub) + ((flags & NEWSTUB_FL_EXTERNAL) ? sizeof(PTR_PCODE) : numCodeBytes)));
+
+ pStub->SetupStub(
+ numCodeBytes,
+ flags
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ , nUnwindInfoSize
+#endif
+ );
+
+ _ASSERTE((BYTE *)pStub->GetAllocationBase() == pBlock);
+
+ return pStub;
+}
+
+void Stub::SetupStub(int numCodeBytes, DWORD flags
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ , UINT nUnwindInfoSize
+#endif
+ )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ m_signature = kUsedStub;
+#else
+#ifdef _WIN64
+ m_pad_code_bytes = 0;
+#endif
+#endif
+
+ m_numCodeBytes = numCodeBytes;
+
+ m_refcount = 1;
+ m_patchOffset = 0;
+
+ if((flags & NEWSTUB_FL_INTERCEPT) != 0)
+ m_patchOffset |= INTERCEPT_BIT;
+ if((flags & NEWSTUB_FL_LOADERHEAP) != 0)
+ m_patchOffset |= LOADER_HEAP_BIT;
+ if((flags & NEWSTUB_FL_MULTICAST) != 0)
+ m_patchOffset |= MULTICAST_DELEGATE_BIT;
+ if ((flags & NEWSTUB_FL_EXTERNAL) != 0)
+ m_patchOffset |= EXTERNAL_ENTRY_BIT;
+
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ if (nUnwindInfoSize)
+ {
+ m_patchOffset |= UNWIND_INFO_BIT;
+
+ StubUnwindInfoHeaderSuffix * pSuffix = GetUnwindInfoHeaderSuffix();
+ pSuffix->nUnwindInfoSize = (BYTE)nUnwindInfoSize;
+
+ StubUnwindInfoHeader * pHeader = GetUnwindInfoHeader();
+ pHeader->Init();
+ }
+#endif
+}
+
+//-------------------------------------------------------------------
+// One-time init
+//-------------------------------------------------------------------
+/*static*/ void Stub::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ g_StubUnwindInfoHeapSegmentsCrst.Init(CrstStubUnwindInfoHeapSegments);
+#endif
+}
+
+/*static*/ Stub* InterceptStub::NewInterceptedStub(void* pCode,
+ Stub* interceptee,
+ void* pRealAddr)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ InterceptStub *pStub = (InterceptStub *) NewStub(pCode, NEWSTUB_FL_INTERCEPT);
+
+ *pStub->GetInterceptedStub() = interceptee;
+ *pStub->GetRealAddr() = (TADDR)pRealAddr;
+
+ LOG((LF_CORDB, LL_INFO10000, "For Stub 0x%x, set intercepted stub to 0x%x\n",
+ pStub, interceptee));
+
+ return pStub;
+}
+
+//-------------------------------------------------------------------
+// Stub allocation done here.
+//-------------------------------------------------------------------
+/*static*/ Stub* InterceptStub::NewInterceptedStub(LoaderHeap *pHeap,
+ UINT numCodeBytes,
+ Stub* interceptee,
+ void* pRealAddr
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ , UINT nUnwindInfoSize
+#endif
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ InterceptStub *pStub = (InterceptStub *) NewStub(
+ pHeap,
+ numCodeBytes,
+ NEWSTUB_FL_INTERCEPT
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ , nUnwindInfoSize
+#endif
+ );
+
+ *pStub->GetInterceptedStub() = interceptee;
+ *pStub->GetRealAddr() = (TADDR)pRealAddr;
+
+ LOG((LF_CORDB, LL_INFO10000, "For Stub 0x%x, set intercepted stub to 0x%x\n",
+ pStub, interceptee));
+
+ return pStub;
+}
+
+//-------------------------------------------------------------------
+// Release the stub that is owned by this stub
+//-------------------------------------------------------------------
+void InterceptStub::ReleaseInterceptedStub()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ Stub** intercepted = GetInterceptedStub();
+ // If we own the stub then decrement it. It can be null if the
+ // linked stub is actually a jitted stub.
+ if(*intercepted)
+ (*intercepted)->DecRef();
+}
+
+//-------------------------------------------------------------------
+// Constructor
+//-------------------------------------------------------------------
+ArgBasedStubCache::ArgBasedStubCache(UINT fixedSlots)
+ : m_numFixedSlots(fixedSlots),
+ m_crst(CrstArgBasedStubCache)
+{
+ WRAPPER_NO_CONTRACT;
+
+ m_aStub = new Stub * [m_numFixedSlots];
+ _ASSERTE(m_aStub != NULL);
+
+ for (unsigned __int32 i = 0; i < m_numFixedSlots; i++) {
+ m_aStub[i] = NULL;
+ }
+ m_pSlotEntries = NULL;
+}
+
+
+//-------------------------------------------------------------------
+// Destructor
+//-------------------------------------------------------------------
+ArgBasedStubCache::~ArgBasedStubCache()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ for (unsigned __int32 i = 0; i < m_numFixedSlots; i++) {
+ Stub *pStub = m_aStub[i];
+ if (pStub) {
+ pStub->DecRef();
+ }
+ }
+ // a size of 0 is a signal to Nirvana to flush the entire cache
+ // not sure if this is needed, but should have no CLR perf impact since size is 0.
+ FlushInstructionCache(GetCurrentProcess(),0,0);
+
+ SlotEntry **ppSlotEntry = &m_pSlotEntries;
+ SlotEntry *pCur;
+ while (NULL != (pCur = *ppSlotEntry)) {
+ Stub *pStub = pCur->m_pStub;
+ pStub->DecRef();
+ *ppSlotEntry = pCur->m_pNext;
+ delete pCur;
+ }
+ delete [] m_aStub;
+}
+
+
+
+//-------------------------------------------------------------------
+// Queries/retrieves a previously cached stub.
+//
+// If there is no stub corresponding to the given index,
+// this function returns NULL.
+//
+// Otherwise, this function returns the stub after
+// incrementing its refcount.
+//-------------------------------------------------------------------
+Stub *ArgBasedStubCache::GetStub(UINT_PTR key)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Stub *pStub;
+
+ CrstHolder ch(&m_crst);
+
+ if (key < m_numFixedSlots) {
+ pStub = m_aStub[key];
+ } else {
+ pStub = NULL;
+ for (SlotEntry *pSlotEntry = m_pSlotEntries;
+ pSlotEntry != NULL;
+ pSlotEntry = pSlotEntry->m_pNext) {
+
+ if (pSlotEntry->m_key == key) {
+ pStub = pSlotEntry->m_pStub;
+ break;
+ }
+ }
+ }
+ if (pStub) {
+ pStub->IncRef();
+ }
+ return pStub;
+}
+
+
+//-------------------------------------------------------------------
+// Tries to associate a stub with a given index. This association
+// may fail because some other thread may have beaten you to it
+// just before you make the call.
+//
+// If the association succeeds, "pStub" is installed, and it is
+// returned back to the caller. The stub's refcount is incremented
+// twice (one to reflect the cache's ownership, and one to reflect
+// the caller's ownership.)
+//
+// If the association fails because another stub is already installed,
+// then the incumbent stub is returned to the caller and its refcount
+// is incremented once (to reflect the caller's ownership.)
+//
+// If the association fails due to lack of memory, NULL is returned
+// and no one's refcount changes.
+//
+// This routine is intended to be called like this:
+//
+// Stub *pCandidate = MakeStub(); // after this, pCandidate's rc is 1
+// Stub *pWinner = cache->SetStub(idx, pCandidate);
+// pCandidate->DecRef();
+// pCandidate = 0xcccccccc; // must not use pCandidate again.
+// if (!pWinner) {
+// OutOfMemoryError;
+// }
+// // If the association succeeded, pWinner's refcount is 2 and so
+// // is pCandidate's (because it *is* pWinner);.
+// // If the association failed, pWinner's refcount is still 2
+// // and pCandidate got destroyed by the last DecRef().
+// // Either way, pWinner is now the official index holder. It
+// // has a refcount of 2 (one for the cache's ownership, and
+// // one belonging to this code.)
+//-------------------------------------------------------------------
+Stub* ArgBasedStubCache::AttemptToSetStub(UINT_PTR key, Stub *pStub)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ CrstHolder ch(&m_crst);
+
+ if (key < m_numFixedSlots) {
+ if (m_aStub[key]) {
+ pStub = m_aStub[key];
+ } else {
+ m_aStub[key] = pStub;
+ pStub->IncRef(); // IncRef on cache's behalf
+ }
+ } else {
+ SlotEntry *pSlotEntry;
+ for (pSlotEntry = m_pSlotEntries;
+ pSlotEntry != NULL;
+ pSlotEntry = pSlotEntry->m_pNext) {
+
+ if (pSlotEntry->m_key == key) {
+ pStub = pSlotEntry->m_pStub;
+ break;
+ }
+ }
+ if (!pSlotEntry) {
+ pSlotEntry = new SlotEntry;
+ pSlotEntry->m_pStub = pStub;
+ pStub->IncRef(); // IncRef on cache's behalf
+ pSlotEntry->m_key = key;
+ pSlotEntry->m_pNext = m_pSlotEntries;
+ m_pSlotEntries = pSlotEntry;
+ }
+ }
+ if (pStub) {
+ pStub->IncRef(); // IncRef because we're returning it to caller
+ }
+ return pStub;
+}
+
+
+
+#ifdef _DEBUG
+// Diagnostic dump
+VOID ArgBasedStubCache::Dump()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ printf("--------------------------------------------------------------\n");
+ printf("ArgBasedStubCache dump (%lu fixed entries):\n", m_numFixedSlots);
+ for (UINT32 i = 0; i < m_numFixedSlots; i++) {
+
+ printf(" Fixed slot %lu: ", (ULONG)i);
+ Stub *pStub = m_aStub[i];
+ if (!pStub) {
+ printf("empty\n");
+ } else {
+ printf("%lxh - refcount is %lu\n",
+ (size_t)(pStub->GetEntryPoint()),
+ (ULONG)( *( ( ((ULONG*)(pStub->GetEntryPoint())) - 1))));
+ }
+ }
+
+ for (SlotEntry *pSlotEntry = m_pSlotEntries;
+ pSlotEntry != NULL;
+ pSlotEntry = pSlotEntry->m_pNext) {
+
+ printf(" Dyna. slot %lu: ", (ULONG)(pSlotEntry->m_key));
+ Stub *pStub = pSlotEntry->m_pStub;
+ printf("%lxh - refcount is %lu\n",
+ (size_t)(pStub->GetEntryPoint()),
+ (ULONG)( *( ( ((ULONG*)(pStub->GetEntryPoint())) - 1))));
+
+ }
+
+
+ printf("--------------------------------------------------------------\n");
+}
+#endif
+
+#endif // #ifndef DACCESS_COMPILE
+
diff --git a/src/vm/stublink.h b/src/vm/stublink.h
new file mode 100644
index 0000000000..ec3b102a54
--- /dev/null
+++ b/src/vm/stublink.h
@@ -0,0 +1,1231 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+// STUBLINK.H
+//
+
+//
+// A StubLinker object provides a way to link several location-independent
+// code sources into one executable stub, resolving references,
+// and choosing the shortest possible instruction size. The StubLinker
+// abstracts out the notion of a "reference" so it is completely CPU
+// independent. This StubLinker is intended not only to create method
+// stubs but to create the PCode-marshaling stubs for Native/Direct.
+//
+// A StubLinker's typical life-cycle is:
+//
+// 1. Create a new StubLinker (it accumulates state for the stub being
+// generated.)
+// 2. Emit code bytes and references (requiring fixups) into the StubLinker.
+// 3. Call the Link() method to produce the final stub.
+// 4. Destroy the StubLinker.
+//
+// StubLinkers are not multithread-aware: they're intended to be
+// used entirely on a single thread. Also, StubLinker's report errors
+// using COMPlusThrow. StubLinker's do have a destructor: to prevent
+// C++ object unwinding from clashing with COMPlusThrow,
+// you must use COMPLUSCATCH to ensure the StubLinker's cleanup in the
+// event of an exception: the following code would do it:
+//
+// StubLinker stublink;
+// Inner();
+//
+//
+// // Have to separate into inner function because VC++ forbids
+// // mixing __try & local objects in the same function.
+// void Inner() {
+// COMPLUSTRY {
+// ... do stuff ...
+// pLinker->Link();
+// } COMPLUSCATCH {
+// }
+// }
+//
+
+
+// This file should only be included via the platform-specific cgencpu.h.
+
+#include "cgensys.h"
+
+#ifndef __stublink_h__
+#define __stublink_h__
+
+#include "crst.h"
+#include "util.hpp"
+#include "eecontract.h"
+
+//-------------------------------------------------------------------------
+// Forward refs
+//-------------------------------------------------------------------------
+class InstructionFormat;
+class Stub;
+class InterceptStub;
+class CheckDuplicatedStructLayouts;
+class CodeBasedStubCache;
+struct CodeLabel;
+
+struct CodeRun;
+struct LabelRef;
+struct CodeElement;
+struct IntermediateUnwindInfo;
+
+#if !defined(_TARGET_X86_) && !defined(FEATURE_PAL)
+#define STUBLINKER_GENERATES_UNWIND_INFO
+#endif // !_TARGET_X86_ && !FEATURE_PAL
+
+
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+
+typedef DPTR(struct StubUnwindInfoHeaderSuffix) PTR_StubUnwindInfoHeaderSuffix;
+struct StubUnwindInfoHeaderSuffix
+{
+ UCHAR nUnwindInfoSize; // Size of unwind info in bytes
+};
+
+// Variable-sized struct that preceeds a Stub when the stub requires unwind
+// information. Followed by a StubUnwindInfoHeaderSuffix.
+typedef DPTR(struct StubUnwindInfoHeader) PTR_StubUnwindInfoHeader;
+struct StubUnwindInfoHeader
+{
+ PTR_StubUnwindInfoHeader pNext;
+ T_RUNTIME_FUNCTION FunctionEntry;
+ UNWIND_INFO UnwindInfo; // variable length
+
+ // Computes the size needed for this variable-sized struct.
+ static SIZE_T ComputeSize(UINT nUnwindInfoSize);
+
+ void Init ();
+
+ bool IsRegistered ();
+};
+
+// List of stub address ranges, in increasing address order.
+struct StubUnwindInfoHeapSegment
+{
+ PBYTE pbBaseAddress;
+ SIZE_T cbSegment;
+ StubUnwindInfoHeader *pUnwindHeaderList;
+ StubUnwindInfoHeapSegment *pNext;
+
+#ifdef _WIN64
+ class UnwindInfoTable* pUnwindInfoTable; // Used to publish unwind info to ETW stack crawler
+#endif
+};
+
+#ifndef BINDER
+VOID UnregisterUnwindInfoInLoaderHeap (UnlockedLoaderHeap *pHeap);
+#endif //!BINDER
+
+#endif // STUBLINKER_GENERATES_UNWIND_INFO
+
+
+//-------------------------------------------------------------------------
+// A non-multithreaded object that fixes up and emits one executable stub.
+//-------------------------------------------------------------------------
+class StubLinker
+{
+ public:
+ //---------------------------------------------------------------
+ // Construction
+ //---------------------------------------------------------------
+ StubLinker();
+
+
+ //---------------------------------------------------------------
+ // Create a new undefined label. Label must be assigned to a code
+ // location using EmitLabel() prior to final linking.
+ // Throws exception on failure.
+ //---------------------------------------------------------------
+ CodeLabel* NewCodeLabel();
+
+ //---------------------------------------------------------------
+ // Create a new undefined label for which we want the absolute
+ // address, not offset. Label must be assigned to a code
+ // location using EmitLabel() prior to final linking.
+ // Throws exception on failure.
+ //---------------------------------------------------------------
+ CodeLabel* NewAbsoluteCodeLabel();
+
+ //---------------------------------------------------------------
+ // Combines NewCodeLabel() and EmitLabel() for convenience.
+ // Throws exception on failure.
+ //---------------------------------------------------------------
+ CodeLabel* EmitNewCodeLabel();
+
+
+ //---------------------------------------------------------------
+ // Returns final location of label as an offset from the start
+ // of the stub. Can only be called after linkage.
+ //---------------------------------------------------------------
+ UINT32 GetLabelOffset(CodeLabel *pLabel);
+
+ //---------------------------------------------------------------
+ // Append code bytes.
+ //---------------------------------------------------------------
+ VOID EmitBytes(const BYTE *pBytes, UINT numBytes);
+ VOID Emit8 (unsigned __int8 u8);
+ VOID Emit16(unsigned __int16 u16);
+ VOID Emit32(unsigned __int32 u32);
+ VOID Emit64(unsigned __int64 u64);
+ VOID EmitPtr(const VOID *pval);
+
+ //---------------------------------------------------------------
+ // Emit a UTF8 string
+ //---------------------------------------------------------------
+ VOID EmitUtf8(LPCUTF8 pUTF8)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ LPCUTF8 p = pUTF8;
+ while (*(p++)) {
+ //nothing
+ }
+ EmitBytes((const BYTE *)pUTF8, (unsigned int)(p-pUTF8-1));
+ }
+
+ //---------------------------------------------------------------
+ // Append an instruction containing a reference to a label.
+ //
+ // target - the label being referenced.
+ // instructionFormat - a platform-specific InstructionFormat object
+ // that gives properties about the reference.
+ // variationCode - uninterpreted data passed to the pInstructionFormat methods.
+ //---------------------------------------------------------------
+ VOID EmitLabelRef(CodeLabel* target, const InstructionFormat & instructionFormat, UINT variationCode);
+
+
+ //---------------------------------------------------------------
+ // Sets the label to point to the current "instruction pointer"
+ // It is invalid to call EmitLabel() twice on
+ // the same label.
+ //---------------------------------------------------------------
+ VOID EmitLabel(CodeLabel* pCodeLabel);
+
+ //---------------------------------------------------------------
+ // Emits the patch label for the stub.
+ // Throws exception on failure.
+ //---------------------------------------------------------------
+ void EmitPatchLabel();
+
+ //---------------------------------------------------------------
+ // Create a new label to an external address.
+ // Throws exception on failure.
+ //---------------------------------------------------------------
+ CodeLabel* NewExternalCodeLabel(LPVOID pExternalAddress);
+ CodeLabel* NewExternalCodeLabel(PCODE pExternalAddress)
+ {
+ return NewExternalCodeLabel((LPVOID)pExternalAddress);
+ }
+
+ //---------------------------------------------------------------
+ // Push and Pop can be used to keep track of stack growth.
+ // These should be adjusted by opcodes written to the stream.
+ //
+ // Note that popping & pushing stack size as opcodes are emitted
+ // is naive & may not be accurate in many cases,
+ // so complex stubs may have to manually adjust the stack size.
+ // However it should work for the vast majority of cases we care
+ // about.
+ //---------------------------------------------------------------
+ void Push(UINT size);
+ void Pop(UINT size);
+
+ INT GetStackSize() { LIMITED_METHOD_CONTRACT; return m_stackSize; }
+ void SetStackSize(SHORT size) { LIMITED_METHOD_CONTRACT; m_stackSize = size; }
+
+ void SetDataOnly(BOOL fDataOnly = TRUE) { LIMITED_METHOD_CONTRACT; m_fDataOnly = fDataOnly; }
+
+#ifdef _TARGET_ARM_
+ void DescribeProlog(UINT cCalleeSavedRegs, UINT cbStackFrame, BOOL fPushArgRegs);
+#elif defined(_TARGET_ARM64_)
+ void DescribeProlog(UINT cIntRegArgs, UINT cVecRegArgs, UINT cCalleeSavedRegs, UINT cbStackFrame);
+ UINT GetSavedRegArgsOffset();
+ UINT GetStackFrameSize();
+#endif
+
+ //===========================================================================
+ // Unwind information
+
+ // Records location of preserved or parameter register
+ VOID UnwindSavedReg (UCHAR reg, ULONG SPRelativeOffset);
+ VOID UnwindPushedReg (UCHAR reg);
+
+ // Records "sub rsp, xxx"
+ VOID UnwindAllocStack (SHORT FrameSizeIncrement);
+
+ // Records frame pointer register
+ VOID UnwindSetFramePointer (UCHAR reg);
+
+ // In DEBUG, emits a call to m_pUnwindInfoCheckLabel (via
+ // EmitUnwindInfoCheckWorker). Code at that label will call to a
+ // helper that will attempt to RtlVirtualUnwind through the stub. The
+ // helper will preserve ALL registers.
+ VOID EmitUnwindInfoCheck();
+
+#if defined(_DEBUG) && defined(STUBLINKER_GENERATES_UNWIND_INFO) && !defined(CROSSGEN_COMPILE)
+protected:
+
+ // Injects a call to the given label.
+ virtual VOID EmitUnwindInfoCheckWorker (CodeLabel *pCheckLabel) { _ASSERTE(!"override me"); }
+
+ // Emits a call to a helper that will attempt to RtlVirtualUnwind
+ // through the stub. The helper will preserve ALL registers.
+ virtual VOID EmitUnwindInfoCheckSubfunction() { _ASSERTE(!"override me"); }
+#endif
+
+public:
+
+ //---------------------------------------------------------------
+ // Generate the actual stub. The returned stub has a refcount of 1.
+ // No other methods (other than the destructor) should be called
+ // after calling Link().
+ //
+ // Throws exception on failure.
+ //---------------------------------------------------------------
+ Stub *Link(DWORD flags = 0) { WRAPPER_NO_CONTRACT; return Link(NULL, flags); }
+ Stub *Link(LoaderHeap *heap, DWORD flags = 0);
+
+ //---------------------------------------------------------------
+ // Generate the actual stub. The returned stub has a refcount of 1.
+ // No other methods (other than the destructor) should be called
+ // after calling Link(). The linked stub must have its increment
+ // increased by one prior to calling this method. This method
+ // does not increment the reference count of the interceptee.
+ //
+ // Throws exception on failure.
+ //---------------------------------------------------------------
+ Stub *LinkInterceptor(Stub* interceptee, void *pRealAddr)
+ { WRAPPER_NO_CONTRACT; return LinkInterceptor(NULL,interceptee, pRealAddr); }
+ Stub *LinkInterceptor(LoaderHeap *heap, Stub* interceptee, void *pRealAddr);
+
+ private:
+ CodeElement *m_pCodeElements; // stored in *reverse* order
+ CodeLabel *m_pFirstCodeLabel; // linked list of CodeLabels
+ LabelRef *m_pFirstLabelRef; // linked list of references
+ CodeLabel *m_pPatchLabel; // label of stub patch offset
+ // currently just for multicast
+ // frames.
+ SHORT m_stackSize; // count of pushes/pops
+ CQuickHeap m_quickHeap; // throwaway heap for
+ // labels, and
+ // internals.
+ BOOL m_fDataOnly; // the stub contains only data - does not need FlushInstructionCache
+
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+
+#ifdef _DEBUG
+ CodeLabel *m_pUnwindInfoCheckLabel; // subfunction to call to unwind info check helper.
+ // On AMD64, the prologue is restricted to 256
+ // bytes, so this reduces the size of the injected
+ // code from 14 to 5 bytes.
+#endif
+
+#ifdef _TARGET_AMD64_
+ IntermediateUnwindInfo *m_pUnwindInfoList;
+ UINT m_nUnwindSlots; // number of slots to allocate at end, == UNWIND_INFO::CountOfCodes
+ BOOL m_fHaveFramePointer; // indicates stack operations no longer need to be recorded
+
+ //
+ // Returns total UnwindInfoSize, including RUNTIME_FUNCTION entry
+ //
+ UINT UnwindInfoSize(UINT codeSize)
+ {
+ if (m_nUnwindSlots == 0) return 0;
+
+ return sizeof(RUNTIME_FUNCTION) + offsetof(UNWIND_INFO, UnwindCode) + m_nUnwindSlots * sizeof(UNWIND_CODE);
+ }
+#endif // _TARGET_AMD64_
+
+#ifdef _TARGET_ARM_
+#define MAX_UNWIND_CODE_WORDS 5 /* maximum number of 32-bit words to store unwind codes */
+ // Cache information about the stack frame set up in the prolog and use it in the generation of the
+ // epilog.
+protected:
+ BOOL m_fProlog; // True if DescribeProlog has been called
+ UINT m_cCalleeSavedRegs; // Count of callee saved registers (0 == none, 1 == r4, 2 ==
+ // r4-r5 etc. up to 8 == r4-r11)
+ UINT m_cbStackFrame; // Count of bytes in the stack frame (excl of saved regs)
+ BOOL m_fPushArgRegs; // If true, r0-r3 are saved before callee saved regs
+
+private:
+ // Reserve fixed size block that's big enough to fit any unwind info we can have
+ static const int c_nUnwindInfoSize = sizeof(RUNTIME_FUNCTION) + sizeof(DWORD) + MAX_UNWIND_CODE_WORDS *4;
+
+ //
+ // Returns total UnwindInfoSize, including RUNTIME_FUNCTION entry
+ //
+ UINT UnwindInfoSize(UINT codeSize)
+ {
+ if (!m_fProlog) return 0;
+
+ return c_nUnwindInfoSize;
+ }
+#endif // _TARGET_ARM_
+
+#ifdef _TARGET_ARM64_
+#define MAX_UNWIND_CODE_WORDS 5 /* maximum number of 32-bit words to store unwind codes */
+protected:
+ BOOL m_fProlog; // True if DescribeProlog has been called
+ UINT m_cIntRegArgs; // Count of int register arguments (x0 - x7)
+ UINT m_cVecRegArgs; // Count of FP register arguments (v0 - v7)
+ UINT m_cCalleeSavedRegs; // Count of callee saved registers (x19 - x28)
+ UINT m_cbStackSpace; // Additional stack space for return buffer and stack alignment
+
+private:
+ // Reserve fixed size block that's big enough to fit any unwind info we can have
+ static const int c_nUnwindInfoSize = sizeof(RUNTIME_FUNCTION) + sizeof(DWORD) + MAX_UNWIND_CODE_WORDS *4;
+ UINT UnwindInfoSize(UINT codeSize)
+ {
+ if (!m_fProlog) return 0;
+
+ return c_nUnwindInfoSize;
+ }
+
+#endif // _TARGET_ARM64_
+
+#endif // STUBLINKER_GENERATES_UNWIND_INFO
+
+ CodeRun *AppendNewEmptyCodeRun();
+
+
+ // Returns pointer to last CodeElement or NULL.
+ CodeElement *GetLastCodeElement()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pCodeElements;
+ }
+
+ // Appends a new CodeElement.
+ VOID AppendCodeElement(CodeElement *pCodeElement);
+
+
+ // Calculates the size of the stub code that is allocate
+ // immediately after the stub object. Returns the
+ // total size. GlobalSize contains the size without
+ // that data part.
+ virtual int CalculateSize(int* globalsize);
+
+ // Writes out the code element into memory following the
+ // stub object.
+ bool EmitStub(Stub* pStub, int globalsize);
+
+ CodeRun *GetLastCodeRunIfAny();
+
+ bool EmitUnwindInfo(Stub* pStub, int globalsize);
+
+#if defined(_TARGET_AMD64_) && defined(STUBLINKER_GENERATES_UNWIND_INFO)
+ UNWIND_CODE *AllocUnwindInfo (UCHAR Op, UCHAR nExtraSlots = 0);
+#endif // defined(_TARGET_AMD64_) && defined(STUBLINKER_GENERATES_UNWIND_INFO)
+};
+
+//************************************************************************
+// CodeLabel
+//************************************************************************
+struct CodeLabel
+{
+ // Link pointer for StubLink's list of labels
+ CodeLabel *m_next;
+
+ // if FALSE, label refers to some code within the same stub
+ // if TRUE, label refers to some externally supplied address.
+ BOOL m_fExternal;
+
+ // if TRUE, means we want the actual address of the label and
+ // not an offset to it
+ BOOL m_fAbsolute;
+
+ union {
+
+ // Internal
+ struct {
+ // Indicates the position of the label, expressed
+ // as an offset into a CodeRun.
+ CodeRun *m_pCodeRun;
+ UINT m_localOffset;
+
+ } i;
+
+
+ // External
+ struct {
+ LPVOID m_pExternalAddress;
+ } e;
+ };
+};
+
+enum NewStubFlags
+{
+ NEWSTUB_FL_INTERCEPT = 0x00000001,
+ NEWSTUB_FL_MULTICAST = 0x00000002,
+ NEWSTUB_FL_EXTERNAL = 0x00000004,
+ NEWSTUB_FL_LOADERHEAP = 0x00000008
+};
+
+
+//-------------------------------------------------------------------------
+// An executable stub. These can only be created by the StubLinker().
+// Each stub has a reference count (which is maintained in a thread-safe
+// manner.) When the ref-count goes to zero, the stub automatically
+// cleans itself up.
+//-------------------------------------------------------------------------
+typedef DPTR(class Stub) PTR_Stub;
+typedef DPTR(PTR_Stub) PTR_PTR_Stub;
+class Stub
+{
+ friend class CheckDuplicatedStructLayouts;
+ friend class CheckAsmOffsets;
+
+ protected:
+ enum
+ {
+ MULTICAST_DELEGATE_BIT = 0x80000000,
+ EXTERNAL_ENTRY_BIT = 0x40000000,
+ LOADER_HEAP_BIT = 0x20000000,
+ INTERCEPT_BIT = 0x10000000,
+ UNWIND_INFO_BIT = 0x08000000,
+
+ PATCH_OFFSET_MASK = UNWIND_INFO_BIT - 1,
+ MAX_PATCH_OFFSET = PATCH_OFFSET_MASK + 1,
+ };
+
+ static_assert_no_msg(PATCH_OFFSET_MASK < UNWIND_INFO_BIT);
+
+ public:
+ //-------------------------------------------------------------------
+ // Inc the refcount.
+ //-------------------------------------------------------------------
+ VOID IncRef();
+
+
+ //-------------------------------------------------------------------
+ // Dec the refcount.
+ // Returns true if the count went to zero and the stub was deleted
+ //-------------------------------------------------------------------
+ BOOL DecRef();
+
+
+
+ //-------------------------------------------------------------------
+ // Used for throwing out unused stubs from stub caches. This
+ // method cannot be 100% accurate due to race conditions. This
+ // is ok because stub cache management is robust in the face
+ // of missed or premature cleanups.
+ //-------------------------------------------------------------------
+ BOOL HeuristicLooksOrphaned()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_signature == kUsedStub);
+ return (m_refcount == 1);
+ }
+
+ //-------------------------------------------------------------------
+ // Used by the debugger to help step through stubs
+ //-------------------------------------------------------------------
+ BOOL IsIntercept()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_patchOffset & INTERCEPT_BIT) != 0;
+ }
+
+ BOOL IsMulticastDelegate()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_patchOffset & MULTICAST_DELEGATE_BIT) != 0;
+ }
+
+ //-------------------------------------------------------------------
+ // For stubs which execute user code, a patch offset needs to be set
+ // to tell the debugger how far into the stub code the debugger has
+ // to step until the frame is set up.
+ //-------------------------------------------------------------------
+ USHORT GetPatchOffset()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (USHORT)(m_patchOffset & PATCH_OFFSET_MASK);
+ }
+
+ void SetPatchOffset(USHORT offset)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(GetPatchOffset() == 0);
+ m_patchOffset |= offset;
+ _ASSERTE(GetPatchOffset() == offset);
+ }
+
+ TADDR GetPatchAddress()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return dac_cast<TADDR>(GetEntryPointInternal()) + GetPatchOffset();
+ }
+
+ //-------------------------------------------------------------------
+ // Unwind information.
+ //-------------------------------------------------------------------
+
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+
+ BOOL HasUnwindInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_patchOffset & UNWIND_INFO_BIT) != 0;
+ }
+
+ StubUnwindInfoHeaderSuffix *GetUnwindInfoHeaderSuffix()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END
+
+ _ASSERTE(HasUnwindInfo());
+
+ TADDR info = dac_cast<TADDR>(this);
+
+ if (IsIntercept())
+ {
+ info -= 2 * sizeof(TADDR);
+ }
+
+ return PTR_StubUnwindInfoHeaderSuffix
+ (info - sizeof(StubUnwindInfoHeaderSuffix));
+ }
+
+ StubUnwindInfoHeader *GetUnwindInfoHeader()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ StubUnwindInfoHeaderSuffix *pSuffix = GetUnwindInfoHeaderSuffix();
+
+ TADDR suffixEnd = dac_cast<TADDR>(pSuffix) + sizeof(*pSuffix);
+
+ return PTR_StubUnwindInfoHeader(suffixEnd -
+ StubUnwindInfoHeader::ComputeSize(pSuffix->nUnwindInfoSize));
+ }
+
+#endif // STUBLINKER_GENERATES_UNWIND_INFO
+
+ //-------------------------------------------------------------------
+ // Returns pointer to the start of the allocation containing this Stub.
+ //-------------------------------------------------------------------
+ TADDR GetAllocationBase();
+
+ //-------------------------------------------------------------------
+ // Return executable entrypoint after checking the ref count.
+ //-------------------------------------------------------------------
+ PCODE GetEntryPoint()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(m_signature == kUsedStub);
+ _ASSERTE(m_refcount > 0);
+
+ TADDR pEntryPoint = dac_cast<TADDR>(GetEntryPointInternal());
+
+#ifdef _TARGET_ARM_
+
+#ifndef THUMB_CODE
+#define THUMB_CODE 1
+#endif
+
+ pEntryPoint |= THUMB_CODE;
+#endif
+
+ return pEntryPoint;
+ }
+
+ UINT GetNumCodeBytes()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ return m_numCodeBytes;
+ }
+
+ //-------------------------------------------------------------------
+ // Return start of the stub blob
+ //-------------------------------------------------------------------
+ PTR_CBYTE GetBlob()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(m_signature == kUsedStub);
+ _ASSERTE(m_refcount > 0);
+
+ return GetEntryPointInternal();
+ }
+
+ //-------------------------------------------------------------------
+ // Return the Stub as in GetEntryPoint and size of the stub+code in bytes
+ // WARNING: Depending on the stub kind this may be just Stub size as
+ // not all stubs have the info about the code size.
+ // It's the caller responsibility to determine that
+ //-------------------------------------------------------------------
+ static Stub* RecoverStubAndSize(PCODE pEntryPoint, DWORD *pSize)
+ {
+ CONTRACT(Stub*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+
+ PRECONDITION(pEntryPoint && pSize);
+ }
+ CONTRACT_END;
+
+ Stub *pStub = Stub::RecoverStub(pEntryPoint);
+ *pSize = sizeof(Stub) + pStub->m_numCodeBytes;
+ RETURN pStub;
+ }
+
+ HRESULT CloneStub(BYTE *pBuffer, DWORD dwBufferSize)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if ((pBuffer == NULL) ||
+ (dwBufferSize < (sizeof(*this) + m_numCodeBytes)))
+ {
+ return E_INVALIDARG;
+ }
+
+ memcpyNoGCRefs(pBuffer, this, sizeof(*this) + m_numCodeBytes);
+ reinterpret_cast<Stub *>(pBuffer)->m_refcount = 1;
+
+ return S_OK;
+ }
+
+ //-------------------------------------------------------------------
+ // Reverse GetEntryPoint.
+ //-------------------------------------------------------------------
+ static Stub* RecoverStub(PCODE pEntryPoint)
+ {
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ TADDR pStubData = PCODEToPINSTR(pEntryPoint);
+
+ Stub *pStub = PTR_Stub(pStubData - sizeof(*pStub));
+
+#if !defined(DACCESS_COMPILE)
+ _ASSERTE(pStub->m_signature == kUsedStub);
+ _ASSERTE(pStub->GetEntryPoint() == pEntryPoint);
+#elif defined(_DEBUG)
+ // If this isn't really a stub we don't want
+ // to continue with it.
+ // TODO: This should be removed once IsStub
+ // can adverstise whether it's safe to call
+ // further StubManager methods.
+ if (pStub->m_signature != kUsedStub ||
+ pStub->GetEntryPoint() != pEntryPoint)
+ {
+ DacError(E_INVALIDARG);
+ }
+#endif
+ return pStub;
+ }
+
+ //-------------------------------------------------------------------
+ // Returns TRUE if entry point is not inside the Stub allocation.
+ //-------------------------------------------------------------------
+ BOOL HasExternalEntryPoint() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_patchOffset & EXTERNAL_ENTRY_BIT) != 0;
+ }
+
+ //-------------------------------------------------------------------
+ // This is the guy that creates stubs.
+ //-------------------------------------------------------------------
+ static Stub* NewStub(LoaderHeap *pLoaderHeap, UINT numCodeBytes,
+ DWORD flags = 0
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ , UINT nUnwindInfoSize = 0
+#endif
+ );
+
+ static Stub* NewStub(PTR_VOID pCode, DWORD flags = 0);
+ static Stub* NewStub(PCODE pCode, DWORD flags = 0)
+ {
+ return NewStub((PTR_VOID)pCode, flags);
+ }
+
+ //-------------------------------------------------------------------
+ // One-time init
+ //-------------------------------------------------------------------
+ static void Init();
+
+ protected:
+ // fMC: Set to true if the stub is a multicast delegate, false otherwise
+ void SetupStub(int numCodeBytes, DWORD flags
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ , UINT nUnwindInfoSlots
+#endif
+ );
+ void DeleteStub();
+
+ //-------------------------------------------------------------------
+ // Return executable entrypoint without checking the ref count.
+ //-------------------------------------------------------------------
+ inline PTR_CBYTE GetEntryPointInternal()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ _ASSERTE(m_signature == kUsedStub);
+
+
+ if (HasExternalEntryPoint())
+ {
+ return dac_cast<PTR_BYTE>(*dac_cast<PTR_PCODE>(dac_cast<TADDR>(this) + sizeof(*this)));
+ }
+ else
+ {
+ // StubLink always puts the entrypoint first.
+ return dac_cast<PTR_CBYTE>(this) + sizeof(*this);
+ }
+ }
+
+ ULONG m_refcount;
+ ULONG m_patchOffset;
+
+ UINT m_numCodeBytes;
+
+#ifdef _DEBUG
+ enum {
+ kUsedStub = 0x42555453, // 'STUB'
+ kFreedStub = 0x46555453, // 'STUF'
+ };
+
+ UINT32 m_signature;
+#else
+#ifdef _WIN64
+ //README ALIGNEMENT: in retail mode UINT m_numCodeBytes does not align to 16byte for the code
+ // after the Stub struct. This is to pad properly
+ UINT m_pad_code_bytes;
+#endif // _WIN64
+#endif // _DEBUG
+
+#ifdef _DEBUG
+ Stub() // Stubs are created by NewStub(), not "new". Hide the
+ { LIMITED_METHOD_CONTRACT; } // constructor to enforce this.
+#endif
+
+};
+
+
+/*
+ * The InterceptStub hides a reference to the real stub at a negative offset.
+ * When this stub is deleted it decrements the real stub cleaning it up as
+ * well. The InterceptStub is created by the Stublinker.
+ *
+ * <TODO>@TODO: Intercepted stubs need have a routine that will find the
+ * last real stub in the chain.</TODO>
+ * The stubs are linked - GetInterceptedStub will return either
+ * a pointer to the next intercept stub (if there is one), or NULL,
+ * indicating end-of-chain. GetRealAddr will return the address of
+ * the "real" code, which may, in fact, be another thunk (for example),
+ * and thus should be traced as well.
+ */
+
+typedef DPTR(class InterceptStub) PTR_InterceptStub;
+class InterceptStub : public Stub
+{
+ friend class Stub;
+ public:
+ //-------------------------------------------------------------------
+ // This is the guy that creates stubs.
+ //-------------------------------------------------------------------
+ static Stub* NewInterceptedStub(LoaderHeap *pHeap,
+ UINT numCodeBytes,
+ Stub* interceptee,
+ void* pRealAddr
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+ , UINT nUnwindInfoSize = 0
+#endif
+ );
+
+ //---------------------------------------------------------------
+ // Expose key offsets and values for stub generation.
+ //---------------------------------------------------------------
+ int GetNegativeOffset()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return sizeof(TADDR) + GetNegativeOffsetRealAddr();
+ }
+
+ PTR_PTR_Stub GetInterceptedStub()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return dac_cast<PTR_PTR_Stub>(
+ dac_cast<TADDR>(this) - GetNegativeOffset());
+ }
+
+ int GetNegativeOffsetRealAddr()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return sizeof(TADDR);
+ }
+
+ PTR_TADDR GetRealAddr()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return dac_cast<PTR_TADDR>(
+ dac_cast<TADDR>(this) - GetNegativeOffsetRealAddr());
+ }
+
+ static Stub* NewInterceptedStub(void* pCode,
+ Stub* interceptee,
+ void* pRealAddr);
+
+protected:
+ void ReleaseInterceptedStub();
+
+#ifdef _DEBUG
+ InterceptStub() // Intercept stubs are only created by NewInterceptedStub.
+ { LIMITED_METHOD_CONTRACT; }
+#endif
+};
+
+//-------------------------------------------------------------------------
+// Each platform encodes the "branch" instruction in a different
+// way. We use objects derived from InstructionFormat to abstract this
+// information away. InstructionFormats don't contain any variable data
+// so they should be allocated statically.
+//
+// Note that StubLinker does not create or define any InstructionFormats.
+// The client does.
+//
+// The following example shows how to define a InstructionFormat for the
+// X86 jump near instruction which takes on two forms:
+//
+// EB xx jmp rel8 ;; SHORT JMP (signed 8-bit offset)
+// E9 xxxxxxxx jmp rel32 ;; NEAR JMP (signed 32-bit offset)
+//
+// InstructionFormat's provide StubLinker the following information:
+//
+// RRT.m_allowedSizes
+//
+// What are the possible sizes that the reference can
+// take? The X86 jump can take either an 8-bit or 32-bit offset
+// so this value is set to (k8|k32). StubLinker will try to
+// use the smallest size possible.
+//
+//
+// RRT.m_fTreatSizesAsSigned
+// Sign-extend or zero-extend smallsizes offsets to the platform
+// code pointer size? For x86, this field is set to TRUE (rel8
+// is considered signed.)
+//
+//
+// UINT RRT.GetSizeOfInstruction(refsize, variationCode)
+// Returns the total size of the instruction in bytes for a given
+// refsize. For this example:
+//
+// if (refsize==k8) return 2;
+// if (refsize==k32) return 5;
+//
+//
+// UINT RRT.GetSizeOfData(refsize, variationCode)
+// Returns the total size of the seperate data area (if any) that the
+// instruction needs in bytes for a given refsize. For this example
+// on the SH3
+// if (refsize==k32) return 4; else return 0;
+//
+// The default implem of this returns 0, so CPUs that don't have need
+// for a seperate constant area don't have to worry about it.
+//
+//
+// BOOL CanReach(refsize, variationcode, fExternal, offset)
+// Returns whether the instruction with the given variationcode &
+// refsize can reach the given offset. In the case of External
+// calls, fExternal is set and offset is the target address. In this case an
+// implementation should return TRUE only if refsize is big enough to fit a
+// full machine-sized pointer to anywhere in the address space.
+//
+//
+// VOID RRT.EmitInstruction(UINT refsize,
+// __int64 fixedUpReference,
+// BYTE *pOutBuffer,
+// UINT variationCode,
+// BYTE *pDataBuffer)
+//
+// Given a chosen size (refsize) and the final offset value
+// computed by StubLink (fixedUpReference), write out the
+// instruction into the provided buffer (guaranteed to be
+// big enough provided you told the truth with GetSizeOfInstruction()).
+// If needed (e.g. on SH3) a data buffer is also passed in for
+// storage of constants.
+//
+// For x86 jmp near:
+//
+// if (refsize==k8) {
+// pOutBuffer[0] = 0xeb;
+// pOutBuffer[1] = (__int8)fixedUpReference;
+// } else if (refsize == k32) {
+// pOutBuffer[0] = 0xe9;
+// *((__int32*)(1+pOutBuffer)) = (__int32)fixedUpReference;
+// } else {
+// CRASH("Bad input.");
+// }
+//
+// VOID RRT.GetHotSpotOffset(UINT refsize, UINT variationCode)
+//
+// The reference offset is always relative to some IP: this
+// method tells StubLinker where that IP is relative to the
+// start of the instruction. For X86, the offset is always
+// relative to the start of the *following* instruction so
+// the correct implementation is:
+//
+// return GetSizeOfInstruction(refsize, variationCode);
+//
+// Actually, InstructionFormat() provides a default implementation of this
+// method that does exactly this so X86 need not override this at all.
+//
+//
+// The extra "variationCode" argument is an __int32 that StubLinker receives
+// from EmitLabelRef() and passes uninterpreted to each RRT method.
+// This allows one RRT to handle a family of related instructions,
+// for example, the family of conditional jumps on the X86.
+//
+//-------------------------------------------------------------------------
+class InstructionFormat
+{
+ private:
+ enum
+ {
+ // if you want to add a size, insert it in-order (e.g. a 18-bit size would
+ // go between k16 and k32) and shift all the higher values up. All values
+ // must be a power of 2 since the get ORed together.
+
+ _k8,
+#ifdef INSTRFMT_K9
+ _k9,
+#endif
+#ifdef INSTRFMT_K13
+ _k13,
+#endif
+ _k16,
+#ifdef INSTRFMT_K24
+ _k24,
+#endif
+#ifdef INSTRFMT_K26
+ _k26,
+#endif
+ _k32,
+#ifdef INSTRFMT_K64SMALL
+ _k64Small,
+#endif
+#ifdef INSTRFMT_K64
+ _k64,
+#endif
+ _kAllowAlways,
+ };
+
+ public:
+
+ enum
+ {
+ k8 = (1 << _k8),
+#ifdef INSTRFMT_K9
+ k9 = (1 << _k9),
+#endif
+#ifdef INSTRFMT_K13
+ k13 = (1 << _k13),
+#endif
+ k16 = (1 << _k16),
+#ifdef INSTRFMT_K24
+ k24 = (1 << _k24),
+#endif
+#ifdef INSTRFMT_K26
+ k26 = (1 << _k26),
+#endif
+ k32 = (1 << _k32),
+#ifdef INSTRFMT_K64SMALL
+ k64Small = (1 << _k64Small),
+#endif
+#ifdef INSTRFMT_K64
+ k64 = (1 << _k64),
+#endif
+ kAllowAlways= (1 << _kAllowAlways),
+ kMax = kAllowAlways,
+ };
+
+ const UINT m_allowedSizes; // OR mask using above "k" values
+ InstructionFormat(UINT allowedSizes) : m_allowedSizes(allowedSizes)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ virtual UINT GetSizeOfInstruction(UINT refsize, UINT variationCode) = 0;
+ virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pCodeBuffer, UINT variationCode, BYTE *pDataBuffer) = 0;
+ virtual UINT GetHotSpotOffset(UINT refsize, UINT variationCode)
+ {
+ WRAPPER_NO_CONTRACT;
+ // Default implementation: the offset is added to the
+ // start of the following instruction.
+ return GetSizeOfInstruction(refsize, variationCode);
+ }
+
+ virtual UINT GetSizeOfData(UINT refsize, UINT variationCode)
+ {
+ LIMITED_METHOD_CONTRACT;
+ // Default implementation: 0 extra bytes needed (most CPUs)
+ return 0;
+ }
+
+ virtual BOOL CanReach(UINT refsize, UINT variationCode, BOOL fExternal, INT_PTR offset)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (fExternal) {
+ // For external, we don't have enough info to predict
+ // the offset yet so we only accept if the offset size
+ // is at least as large as the native pointer size.
+ switch(refsize) {
+ case InstructionFormat::k8: // intentional fallthru
+ case InstructionFormat::k16: // intentional fallthru
+#ifdef INSTRFMT_K24
+ case InstructionFormat::k24: // intentional fallthru
+#endif
+#ifdef INSTRFMT_K26
+ case InstructionFormat::k26: // intentional fallthru
+#endif
+ return FALSE; // no 8 or 16-bit platforms
+
+ case InstructionFormat::k32:
+ return sizeof(LPVOID) <= 4;
+#ifdef INSTRFMT_K64
+ case InstructionFormat::k64:
+ return sizeof(LPVOID) <= 8;
+#endif
+ case InstructionFormat::kAllowAlways:
+ return TRUE;
+
+ default:
+ _ASSERTE(0);
+ return FALSE;
+ }
+ } else {
+ switch(refsize)
+ {
+ case InstructionFormat::k8:
+ return FitsInI1(offset);
+
+ case InstructionFormat::k16:
+ return FitsInI2(offset);
+
+#ifdef INSTRFMT_K24
+ case InstructionFormat::k24:
+ return FitsInI2(offset>>8);
+#endif
+
+#ifdef INSTRFMT_K26
+ case InstructionFormat::k26:
+ return FitsInI2(offset>>10);
+#endif
+ case InstructionFormat::k32:
+ return FitsInI4(offset);
+#ifdef INSTRFMT_K64
+ case InstructionFormat::k64:
+ // intentional fallthru
+#endif
+ case InstructionFormat::kAllowAlways:
+ return TRUE;
+ default:
+ _ASSERTE(0);
+ return FALSE;
+
+ }
+ }
+ }
+};
+
+
+
+
+
+//-------------------------------------------------------------------------
+// This stub cache associates stubs with an integer key. For some clients,
+// this might represent the size of the argument stack in some cpu-specific
+// units (for the x86, the size is expressed in DWORDS.) For other clients,
+// this might take into account the style of stub (e.g. whether it returns
+// an object reference or not).
+//-------------------------------------------------------------------------
+class ArgBasedStubCache
+{
+ public:
+ ArgBasedStubCache(UINT fixedSize = NUMFIXEDSLOTS);
+ ~ArgBasedStubCache();
+
+ //-----------------------------------------------------------------
+ // Retrieves the stub associated with the given key.
+ //-----------------------------------------------------------------
+ Stub *GetStub(UINT_PTR key);
+
+ //-----------------------------------------------------------------
+ // Tries to associate the stub with the given key.
+ // It may fail because another thread might swoop in and
+ // do the association before you do. Thus, you must use the
+ // return value stub rather than the pStub.
+ //-----------------------------------------------------------------
+ Stub* AttemptToSetStub(UINT_PTR key, Stub *pStub);
+
+
+ // Suggestions for number of slots
+ enum {
+ #ifdef _DEBUG
+ NUMFIXEDSLOTS = 3,
+ #else
+ NUMFIXEDSLOTS = 16,
+ #endif
+ };
+
+#ifdef _DEBUG
+ VOID Dump(); //Diagnostic dump
+#endif
+
+ private:
+
+ // How many low-numbered keys have direct access?
+ UINT m_numFixedSlots;
+
+ // For 'm_numFixedSlots' low-numbered keys, we store them in an array.
+ Stub **m_aStub;
+
+
+ struct SlotEntry
+ {
+ Stub *m_pStub;
+ UINT_PTR m_key;
+ SlotEntry *m_pNext;
+ };
+
+ // High-numbered keys are stored in a sparse linked list.
+ SlotEntry *m_pSlotEntries;
+
+
+ Crst m_crst;
+};
+
+
+#define CPUSTUBLINKER StubLinkerCPU
+
+class NDirectStubLinker;
+class CPUSTUBLINKER;
+
+#endif // __stublink_h__
diff --git a/src/vm/stublink.inl b/src/vm/stublink.inl
new file mode 100644
index 0000000000..49f4fb8832
--- /dev/null
+++ b/src/vm/stublink.inl
@@ -0,0 +1,117 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// StubLink.inl
+//
+// Defines inline functions for StubLinker
+//
+
+
+#ifndef __STUBLINK_INL__
+#define __STUBLINK_INL__
+
+#include "stublink.h"
+#include "eeconfig.h"
+#include "safemath.h"
+
+
+#ifdef STUBLINKER_GENERATES_UNWIND_INFO
+
+inline //static
+SIZE_T StubUnwindInfoHeader::ComputeSize(UINT nUnwindInfoSize)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return ALIGN_UP( FIELD_OFFSET(StubUnwindInfoHeader, FunctionEntry)
+ + nUnwindInfoSize
+ + sizeof(StubUnwindInfoHeaderSuffix)
+ , sizeof(void*));
+}
+
+
+#ifndef DACCESS_COMPILE
+
+inline
+void StubUnwindInfoHeader::Init ()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ pNext = (StubUnwindInfoHeader*)(SIZE_T)1;
+}
+
+
+inline
+bool StubUnwindInfoHeader::IsRegistered ()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return pNext != (StubUnwindInfoHeader*)(SIZE_T)1;
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+#endif // STUBLINKER_GENERATES_UNWIND_INFO
+
+
+inline
+void StubLinker::Push(UINT size)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ ClrSafeInt<SHORT> stackSize(m_stackSize);
+ _ASSERTE(FitsIn<SHORT>(size));
+ SHORT sSize = static_cast<SHORT>(size);
+ stackSize += sSize;
+ _ASSERTE(!stackSize.IsOverflow());
+ m_stackSize = stackSize.Value();
+ UnwindAllocStack(sSize);
+}
+
+
+inline
+void StubLinker::Pop(UINT size)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ ClrSafeInt<SHORT> stackSize(m_stackSize);
+ _ASSERTE(FitsIn<SHORT>(size));
+ stackSize = stackSize - ClrSafeInt<SHORT>(size);
+ _ASSERTE(!stackSize.IsOverflow());
+ m_stackSize = stackSize.Value();
+}
+
+
+inline
+VOID StubLinker::EmitUnwindInfoCheck()
+{
+#if defined(_DEBUG) && defined(STUBLINKER_GENERATES_UNWIND_INFO) && !defined(CROSSGEN_COMPILE)
+ if (g_pConfig->IsStubLinkerUnwindInfoVerificationOn())
+ {
+ if (!m_pUnwindInfoCheckLabel)
+ m_pUnwindInfoCheckLabel = NewCodeLabel();
+ EmitUnwindInfoCheckWorker(m_pUnwindInfoCheckLabel);
+ }
+#endif
+}
+
+
+#ifndef STUBLINKER_GENERATES_UNWIND_INFO
+
+inline VOID StubLinker::UnwindSavedReg (UCHAR reg, ULONG SPRelativeOffset) {LIMITED_METHOD_CONTRACT;}
+inline VOID StubLinker::UnwindAllocStack (SHORT FrameSizeIncrement) {LIMITED_METHOD_CONTRACT;}
+inline VOID StubLinker::UnwindSetFramePointer (UCHAR reg) {LIMITED_METHOD_CONTRACT;}
+
+inline VOID StubLinker::UnwindPushedReg (UCHAR reg)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_stackSize += sizeof(void*);
+}
+
+#endif // !STUBLINKER_GENERATES_UNWIND_INFO
+
+
+#endif // !__STUBLINK_INL__
+
diff --git a/src/vm/stubmgr.cpp b/src/vm/stubmgr.cpp
new file mode 100644
index 0000000000..95407e5561
--- /dev/null
+++ b/src/vm/stubmgr.cpp
@@ -0,0 +1,2752 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "common.h"
+#include "stubmgr.h"
+#include "virtualcallstub.h"
+#include "dllimportcallback.h"
+#include "stubhelpers.h"
+#include "asmconstants.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#ifdef FEATURE_COMINTEROP
+#include "olecontexthelpers.h"
+#endif
+
+#ifdef LOGGING
+const char *GetTType( TraceType tt)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ switch( tt )
+ {
+ case TRACE_ENTRY_STUB: return "TRACE_ENTRY_STUB";
+ case TRACE_STUB: return "TRACE_STUB";
+ case TRACE_UNMANAGED: return "TRACE_UNMANAGED";
+ case TRACE_MANAGED: return "TRACE_MANAGED";
+ case TRACE_FRAME_PUSH: return "TRACE_FRAME_PUSH";
+ case TRACE_MGR_PUSH: return "TRACE_MGR_PUSH";
+ case TRACE_OTHER: return "TRACE_OTHER";
+ case TRACE_UNJITTED_METHOD: return "TRACE_UNJITTED_METHOD";
+ }
+ return "TRACE_REALLY_WACKED";
+}
+
+void LogTraceDestination(const char * szHint, PCODE stubAddr, TraceDestination * pTrace)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (pTrace->GetTraceType() == TRACE_UNJITTED_METHOD)
+ {
+ MethodDesc * md = pTrace->GetMethodDesc();
+ LOG((LF_CORDB, LL_INFO10000, "'%s' yields '%s' to method 0x%p for input 0x%p.\n",
+ szHint, GetTType(pTrace->GetTraceType()),
+ md, stubAddr));
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO10000, "'%s' yields '%s' to address 0x%p for input 0x%p.\n",
+ szHint, GetTType(pTrace->GetTraceType()),
+ pTrace->GetAddress(), stubAddr));
+ }
+}
+#endif
+
+#ifdef _DEBUG
+// Get a string representation of this TraceDestination
+// Uses the supplied buffer to store the memory (or may return a string literal).
+const WCHAR * TraceDestination::DbgToString(SString & buffer)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ const WCHAR * pValue = W("unknown");
+
+#ifndef DACCESS_COMPILE
+ if (!StubManager::IsStubLoggingEnabled())
+ {
+ return W("<unavailable while native-debugging>");
+ }
+ // Now that we know we're not interop-debugging, we can safely call new.
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+
+
+ FAULT_NOT_FATAL();
+
+ EX_TRY
+ {
+ switch(this->type)
+ {
+ case TRACE_ENTRY_STUB:
+ buffer.Printf("TRACE_ENTRY_STUB(addr=0x%p)", GetAddress());
+ pValue = buffer.GetUnicode();
+ break;
+
+ case TRACE_STUB:
+ buffer.Printf("TRACE_STUB(addr=0x%p)", GetAddress());
+ pValue = buffer.GetUnicode();
+ break;
+
+ case TRACE_UNMANAGED:
+ buffer.Printf("TRACE_UNMANAGED(addr=0x%p)", GetAddress());
+ pValue = buffer.GetUnicode();
+ break;
+
+ case TRACE_MANAGED:
+ buffer.Printf("TRACE_MANAGED(addr=0x%p)", GetAddress());
+ pValue = buffer.GetUnicode();
+ break;
+
+ case TRACE_UNJITTED_METHOD:
+ {
+ MethodDesc * md = this->GetMethodDesc();
+ buffer.Printf("TRACE_UNJITTED_METHOD(md=0x%p, %s::%s)", md, md->m_pszDebugClassName, md->m_pszDebugMethodName);
+ pValue = buffer.GetUnicode();
+ }
+ break;
+
+ case TRACE_FRAME_PUSH:
+ buffer.Printf("TRACE_FRAME_PUSH(addr=0x%p)", GetAddress());
+ pValue = buffer.GetUnicode();
+ break;
+
+ case TRACE_MGR_PUSH:
+ buffer.Printf("TRACE_MGR_PUSH(addr=0x%p, sm=%s)", GetAddress(), this->GetStubManager()->DbgGetName());
+ pValue = buffer.GetUnicode();
+ break;
+
+ case TRACE_OTHER:
+ pValue = W("TRACE_OTHER");
+ break;
+ }
+ }
+ EX_CATCH
+ {
+ pValue = W("(OOM while printing TD)");
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+#endif
+ return pValue;
+}
+#endif
+
+
+void TraceDestination::InitForUnjittedMethod(MethodDesc * pDesc)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(pDesc));
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pDesc->SanityCheck());
+
+ {
+ // If this is a wrapper stub, then find the real method that it will go to and patch that.
+ // This is more than just a convenience - converted wrapper MD to real MD is required for correct behavior.
+ // Wrapper MDs look like unjitted MethodDescs. So when the debugger patches one,
+ // it won't actually bind + apply the patch (it'll wait for the jit-complete instead).
+ // But if the wrapper MD is for prejitted code, then we'll never get the Jit-complete.
+ // Thus it'll miss the patch completely.
+ if (pDesc->IsWrapperStub())
+ {
+ MethodDesc * pNewDesc = NULL;
+
+ FAULT_NOT_FATAL();
+
+
+#ifndef DACCESS_COMPILE
+ EX_TRY
+ {
+ pNewDesc = pDesc->GetExistingWrappedMethodDesc();
+ }
+ EX_CATCH
+ {
+ // In case of an error, we'll just stick w/ the original method desc.
+ } EX_END_CATCH(SwallowAllExceptions)
+#else
+ // @todo - DAC needs this too, but the method is currently not DACized.
+ // However, we don't throw here b/c the error may not be fatal.
+ // DacNotImpl();
+#endif
+
+ if (pNewDesc != NULL)
+ {
+ pDesc = pNewDesc;
+
+ LOG((LF_CORDB, LL_INFO10000, "TD::UnjittedMethod: wrapper md: %p --> %p", pDesc, pNewDesc));
+
+ }
+ }
+ }
+
+
+ this->type = TRACE_UNJITTED_METHOD;
+ this->pDesc = pDesc;
+ this->stubManager = NULL;
+}
+
+
+// Initialize statics.
+#ifdef _DEBUG
+SString * StubManager::s_pDbgStubManagerLog = NULL;
+CrstStatic StubManager::s_DbgLogCrst;
+
+#endif
+
+SPTR_IMPL(StubManager, StubManager, g_pFirstManager);
+
+CrstStatic StubManager::s_StubManagerListCrst;
+
+//-----------------------------------------------------------
+// For perf reasons, the stub managers are now kept in a two
+// tier system: all stub managers but the VirtualStubManagers
+// are in the first tier. A VirtualStubManagerManager takes
+// care of all VirtualStubManagers, and is iterated last of
+// all. It does a smarter job of looking up the owning
+// manager for virtual stubs, checking the current and shared
+// appdomains before checking the remaining managers.
+//
+// Thus, this iterator will run the regular list until it
+// hits the end, then it will check the VSMM, then it will
+// end.
+//-----------------------------------------------------------
+class StubManagerIterator
+{
+ public:
+ StubManagerIterator();
+ ~StubManagerIterator();
+
+ void Reset();
+ BOOL Next();
+ PTR_StubManager Current();
+
+ protected:
+ enum SMI_State
+ {
+ SMI_START,
+ SMI_NORMAL,
+ SMI_VIRTUALCALLSTUBMANAGER,
+ SMI_END
+ };
+
+ SMI_State m_state;
+ PTR_StubManager m_pCurMgr;
+ SimpleReadLockHolder m_lh;
+};
+
+//-----------------------------------------------------------
+// Ctor
+//-----------------------------------------------------------
+StubManagerIterator::StubManagerIterator()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ Reset();
+}
+
+void StubManagerIterator::Reset()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ m_pCurMgr = NULL;
+ m_state = SMI_START;
+}
+
+//-----------------------------------------------------------
+// Ctor
+//-----------------------------------------------------------
+StubManagerIterator::~StubManagerIterator()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+}
+
+//-----------------------------------------------------------
+// Move to the next element. Iterators are created at
+// start-1, so must call Next before using Current
+//-----------------------------------------------------------
+BOOL StubManagerIterator::Next()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+#ifndef DACCESS_COMPILE
+ CAN_TAKE_LOCK; // because of m_lh.Assign()
+#else
+ CANNOT_TAKE_LOCK;
+#endif
+ }
+ CONTRACTL_END;
+
+ SUPPORTS_DAC;
+
+ do {
+ if (m_state == SMI_START) {
+ m_state = SMI_NORMAL;
+ m_pCurMgr = StubManager::g_pFirstManager;
+ }
+ else if (m_state == SMI_NORMAL) {
+ if (m_pCurMgr != NULL) {
+ m_pCurMgr = m_pCurMgr->m_pNextManager;
+ }
+ else {
+ // If we've reached the end of the regular list of stub managers, then we
+ // set the VirtualCallStubManagerManager is the current item (effectively
+ // forcing it to always be the last manager checked).
+ m_state = SMI_VIRTUALCALLSTUBMANAGER;
+ VirtualCallStubManagerManager *pVCSMMgr = VirtualCallStubManagerManager::GlobalManager();
+ m_pCurMgr = PTR_StubManager(pVCSMMgr);
+#ifndef DACCESS_COMPILE
+ m_lh.Assign(&pVCSMMgr->m_RWLock);
+#endif
+ }
+ }
+ else if (m_state == SMI_VIRTUALCALLSTUBMANAGER) {
+ m_state = SMI_END;
+ m_pCurMgr = NULL;
+#ifndef DACCESS_COMPILE
+ m_lh.Clear();
+#endif
+ }
+ } while (m_state != SMI_END && m_pCurMgr == NULL);
+
+ CONSISTENCY_CHECK(m_state == SMI_END || m_pCurMgr != NULL);
+ return (m_state != SMI_END);
+}
+
+//-----------------------------------------------------------
+// Get the current contents of the iterator
+//-----------------------------------------------------------
+PTR_StubManager StubManagerIterator::Current()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ CONSISTENCY_CHECK(m_state != SMI_START);
+ CONSISTENCY_CHECK(m_state != SMI_END);
+ CONSISTENCY_CHECK(CheckPointer(m_pCurMgr));
+
+ return m_pCurMgr;
+}
+
+#ifndef DACCESS_COMPILE
+//-----------------------------------------------------------
+//-----------------------------------------------------------
+StubManager::StubManager()
+ : m_pNextManager(NULL)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+//-----------------------------------------------------------
+//-----------------------------------------------------------
+StubManager::~StubManager()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CAN_TAKE_LOCK; // StubManager::UnlinkStubManager uses a crst
+ PRECONDITION(CheckPointer(this));
+ } CONTRACTL_END;
+
+ UnlinkStubManager(this);
+}
+#endif // #ifndef DACCESS_COMPILE
+
+#ifdef _DEBUG_IMPL
+//-----------------------------------------------------------
+// Verify that the stub is owned by the given stub manager
+// and no other stub manager. If a stub is claimed by multiple managers,
+// then the wrong manager may claim ownership and improperly trace the stub.
+//-----------------------------------------------------------
+BOOL StubManager::IsSingleOwner(PCODE stubAddress, StubManager * pOwner)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_CAN_TAKE_LOCK; // courtesy StubManagerIterator
+
+ // ensure this stubmanager owns it.
+ _ASSERTE(pOwner != NULL);
+
+ // ensure nobody else does.
+ bool ownerFound = false;
+ int count = 0;
+ StubManagerIterator it;
+ while (it.Next())
+ {
+ // Callers would have iterated till pOwner.
+ if (!ownerFound && it.Current() != pOwner)
+ continue;
+
+ if (it.Current() == pOwner)
+ ownerFound = true;
+
+ if (it.Current()->CheckIsStub_Worker(stubAddress))
+ {
+ // If you hit this assert, you can tell what 2 stub managers are conflicting by inspecting their vtable.
+ CONSISTENCY_CHECK_MSGF((it.Current() == pOwner), ("Stub at 0x%p is owner by multiple managers (0x%p, 0x%p)",
+ (void*) stubAddress, pOwner, it.Current()));
+ count++;
+ }
+ else
+ {
+ _ASSERTE(it.Current() != pOwner);
+ }
+ }
+
+ _ASSERTE(ownerFound);
+
+ // We expect pOwner to be the only one to own this stub.
+ return (count == 1);
+}
+#endif
+
+
+
+//-----------------------------------------------------------
+//-----------------------------------------------------------
+BOOL StubManager::CheckIsStub_Worker(PCODE stubStartAddress)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ CAN_TAKE_LOCK; // CheckIsStub_Internal can enter SimpleRWLock
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ SUPPORTS_DAC;
+
+ // @todo - consider having a single check for null right up front.
+ // Though this may cover bugs where stub-managers don't handle bad addresses.
+ // And someone could just as easily pass (0x01) as NULL.
+ if (stubStartAddress == NULL)
+ {
+ return FALSE;
+ }
+
+ CONTRACT_VIOLATION(SOToleranceViolation);
+ // @todo : this might not have a thread
+ // BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), return FALSE);
+
+ struct Param
+ {
+ BOOL fIsStub;
+ StubManager *pThis;
+ TADDR stubStartAddress;
+ } param;
+ param.fIsStub = FALSE;
+ param.pThis = this;
+ param.stubStartAddress = stubStartAddress;
+
+ // This may be called from DAC, and DAC + non-DAC have very different
+ // exception handling.
+#ifdef DACCESS_COMPILE
+ PAL_TRY(Param *, pParam, &param)
+#else
+ Param *pParam = &param;
+ EX_TRY
+#endif
+ {
+ SUPPORTS_DAC;
+
+#ifndef DACCESS_COMPILE
+ // Use CheckIsStub_Internal may AV. That's ok.
+ AVInRuntimeImplOkayHolder AVOkay;
+#endif
+
+ // Make a Polymorphic call to derived stub manager.
+ // Try to see if this address is for a stub. If the address is
+ // completely bogus, then this might fault, so we protect it
+ // with SEH.
+ pParam->fIsStub = pParam->pThis->CheckIsStub_Internal(pParam->stubStartAddress);
+ }
+#ifdef DACCESS_COMPILE
+ PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+#else
+ EX_CATCH
+#endif
+ {
+ LOG((LF_CORDB, LL_INFO10000, "D::GASTSI: exception indicated addr is bad.\n"));
+
+ param.fIsStub = FALSE;
+ }
+#ifdef DACCESS_COMPILE
+ PAL_ENDTRY
+#else
+ EX_END_CATCH(SwallowAllExceptions);
+#endif
+
+ //END_SO_INTOLERANT_CODE;
+
+ return param.fIsStub;
+}
+
+//-----------------------------------------------------------
+// stubAddress may be an invalid address.
+//-----------------------------------------------------------
+PTR_StubManager StubManager::FindStubManager(PCODE stubAddress)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CAN_TAKE_LOCK; // courtesy StubManagerIterator
+ }
+ CONTRACTL_END;
+
+ SUPPORTS_DAC;
+
+ StubManagerIterator it;
+ while (it.Next())
+ {
+ if (it.Current()->CheckIsStub_Worker(stubAddress))
+ {
+ _ASSERTE_IMPL(IsSingleOwner(stubAddress, it.Current()));
+ return it.Current();
+ }
+ }
+
+ return NULL;
+}
+
+//-----------------------------------------------------------
+// Given an address, figure out a TraceDestination describing where
+// the instructions at that address will eventually transfer execution to.
+//-----------------------------------------------------------
+BOOL StubManager::TraceStub(PCODE stubStartAddress, TraceDestination *trace)
+{
+ WRAPPER_NO_CONTRACT;
+
+ StubManagerIterator it;
+ while (it.Next())
+ {
+ StubManager * pCurrent = it.Current();
+ if (pCurrent->CheckIsStub_Worker(stubStartAddress))
+ {
+ LOG((LF_CORDB, LL_INFO10000,
+ "StubManager::TraceStub: addr 0x%p claimed by mgr "
+ "0x%p.\n", stubStartAddress, pCurrent));
+
+ _ASSERTE_IMPL(IsSingleOwner(stubStartAddress, pCurrent));
+
+ BOOL fValid = pCurrent->DoTraceStub(stubStartAddress, trace);
+#ifdef _DEBUG
+ if (IsStubLoggingEnabled())
+ {
+ DbgWriteLog("Doing TraceStub for Address 0x%p, claimed by '%s' (0x%p)\n", stubStartAddress, pCurrent->DbgGetName(), pCurrent);
+ if (fValid)
+ {
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+ FAULT_NOT_FATAL();
+ SString buffer;
+ DbgWriteLog(" td=%S\n", trace->DbgToString(buffer));
+ }
+ else
+ {
+ DbgWriteLog(" stubmanager returned false. Does not expect to call managed code\n");
+
+ }
+ } // logging
+#endif
+ return fValid;
+ }
+ }
+
+ if (ExecutionManager::IsManagedCode(stubStartAddress))
+ {
+ trace->InitForManaged(stubStartAddress);
+
+#ifdef _DEBUG
+ DbgWriteLog("Doing TraceStub for Address 0x%p is jitted code claimed by codemanager\n", stubStartAddress);
+#endif
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "StubManager::TraceStub: addr 0x%p is managed code\n",
+ stubStartAddress));
+
+ return TRUE;
+ }
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "StubManager::TraceStub: addr 0x%p unknown. TRACE_OTHER...\n",
+ stubStartAddress));
+
+#ifdef _DEBUG
+ DbgWriteLog("Doing TraceStub for Address 0x%p is unknown!!!\n", stubStartAddress);
+#endif
+
+ trace->InitForOther(stubStartAddress);
+ return FALSE;
+}
+
+//-----------------------------------------------------------
+//-----------------------------------------------------------
+BOOL StubManager::FollowTrace(TraceDestination *trace)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ while (trace->GetTraceType() == TRACE_STUB)
+ {
+ LOG((LF_CORDB, LL_INFO10000,
+ "StubManager::FollowTrace: TRACE_STUB for 0x%p\n",
+ trace->GetAddress()));
+
+ if (!TraceStub(trace->GetAddress(), trace))
+ {
+ //
+ // No stub manager claimed it - it must be an EE helper or something.
+ //
+
+ trace->InitForOther(trace->GetAddress());
+ }
+ }
+
+ LOG_TRACE_DESTINATION(trace, NULL, "StubManager::FollowTrace");
+
+ return trace->GetTraceType() != TRACE_OTHER;
+}
+
+#ifndef DACCESS_COMPILE
+
+//-----------------------------------------------------------
+//-----------------------------------------------------------
+void StubManager::AddStubManager(StubManager *mgr)
+{
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(CheckPointer(g_pFirstManager, NULL_OK));
+ CONSISTENCY_CHECK(CheckPointer(mgr));
+
+ GCX_COOP_NO_THREAD_BROKEN();
+
+ CrstHolder ch(&s_StubManagerListCrst);
+
+ if (g_pFirstManager == NULL)
+ {
+ g_pFirstManager = mgr;
+ }
+ else
+ {
+ mgr->m_pNextManager = g_pFirstManager;
+ g_pFirstManager = mgr;
+ }
+
+ LOG((LF_CORDB, LL_EVERYTHING, "StubManager::AddStubManager - 0x%p (vptr %x%p)\n", mgr, (*(PVOID*)mgr)));
+}
+
+//-----------------------------------------------------------
+// NOTE: The runtime MUST be suspended to use this in a
+// truly safe manner.
+//-----------------------------------------------------------
+void StubManager::UnlinkStubManager(StubManager *mgr)
+{
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+ CONSISTENCY_CHECK(CheckPointer(g_pFirstManager, NULL_OK));
+ CONSISTENCY_CHECK(CheckPointer(mgr));
+
+ CrstHolder ch(&s_StubManagerListCrst);
+
+ StubManager **m = &g_pFirstManager;
+ while (*m != NULL)
+ {
+ if (*m == mgr)
+ {
+ *m = (*m)->m_pNextManager;
+ return;
+ }
+ m = &(*m)->m_pNextManager;
+ }
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+
+//-----------------------------------------------------------
+//-----------------------------------------------------------
+void
+StubManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ // Report the global list head.
+ DacEnumMemoryRegion(DacGlobalBase() +
+ g_dacGlobals.StubManager__g_pFirstManager,
+ sizeof(TADDR));
+
+ //
+ // Report the list contents.
+ //
+
+ StubManagerIterator it;
+ while (it.Next())
+ {
+ it.Current()->DoEnumMemoryRegions(flags);
+ }
+}
+
+//-----------------------------------------------------------
+//-----------------------------------------------------------
+void
+StubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ DAC_ENUM_VTHIS();
+ EMEM_OUT(("MEM: %p StubManager base\n", dac_cast<TADDR>(this)));
+}
+
+#endif // #ifdef DACCESS_COMPILE
+
+//-----------------------------------------------------------
+// Initialize the global stub manager service.
+//-----------------------------------------------------------
+void StubManager::InitializeStubManagers()
+{
+#if !defined(DACCESS_COMPILE)
+
+#if defined(_DEBUG)
+ s_DbgLogCrst.Init(CrstDebuggerHeapLock, (CrstFlags)(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD | CRST_TAKEN_DURING_SHUTDOWN));
+#endif
+ s_StubManagerListCrst.Init(CrstDebuggerHeapLock, (CrstFlags)(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD | CRST_TAKEN_DURING_SHUTDOWN));
+
+#endif // !DACCESS_COMPILE
+}
+
+//-----------------------------------------------------------
+// Terminate the global stub manager service.
+//-----------------------------------------------------------
+void StubManager::TerminateStubManagers()
+{
+#if !defined(DACCESS_COMPILE)
+
+#if defined(_DEBUG)
+ DbgFinishLog();
+ s_DbgLogCrst.Destroy();
+#endif
+
+ s_StubManagerListCrst.Destroy();
+#endif // !DACCESS_COMPILE
+}
+
+#ifdef _DEBUG
+
+//-----------------------------------------------------------
+// Should stub-manager logging be enabled?
+//-----------------------------------------------------------
+bool StubManager::IsStubLoggingEnabled()
+{
+ // Our current logging impl uses SString, which uses new(), which can't be called
+ // on the helper thread. (B/c it may deadlock. See SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE)
+
+ // We avoid this by just not logging when native-debugging.
+ if (IsDebuggerPresent())
+ {
+ return false;
+ }
+
+ return true;
+}
+
+
+//-----------------------------------------------------------
+// Call to reset the log. This is used at the start of a new step-operation.
+// pThread is the managed thread doing the stepping.
+// It should either be the current thread or the helper thread.
+//-----------------------------------------------------------
+void StubManager::DbgBeginLog(TADDR addrCallInstruction, TADDR addrCallTarget)
+{
+#ifndef DACCESS_COMPILE
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+
+ // We can't call new() if another thread holds the heap lock and is then suspended by
+ // an interop-debugging. Since this is debug-only logging code, we'll just skip
+ // it under those cases.
+ if (!IsStubLoggingEnabled())
+ {
+ return;
+ }
+ // Now that we know we're not interop-debugging, we can safely call new.
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+ FAULT_NOT_FATAL();
+
+ {
+ CrstHolder ch(&s_DbgLogCrst);
+ EX_TRY
+ {
+ if (s_pDbgStubManagerLog == NULL)
+ {
+ s_pDbgStubManagerLog = new SString();
+ }
+ s_pDbgStubManagerLog->Clear();
+ }
+ EX_CATCH
+ {
+ DbgFinishLog();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+
+ DbgWriteLog("Beginning Step-in. IP after Call instruction is at 0x%p, call target is at 0x%p\n",
+ addrCallInstruction, addrCallTarget);
+#endif
+}
+
+//-----------------------------------------------------------
+// Finish logging for this thread.
+// pThread is the managed thread doing the stepping.
+// It should either be the current thread or the helper thread.
+//-----------------------------------------------------------
+void StubManager::DbgFinishLog()
+{
+#ifndef DACCESS_COMPILE
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ CrstHolder ch(&s_DbgLogCrst);
+
+ // Since this is just a tool for debugging, we don't care if we call new.
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+ FAULT_NOT_FATAL();
+
+ delete s_pDbgStubManagerLog;
+ s_pDbgStubManagerLog = NULL;
+
+
+#endif
+}
+
+
+//-----------------------------------------------------------
+// Write an arbitrary string to the log.
+//-----------------------------------------------------------
+void StubManager::DbgWriteLog(const CHAR *format, ...)
+{
+#ifndef DACCESS_COMPILE
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+
+ if (!IsStubLoggingEnabled())
+ {
+ return;
+ }
+
+ // Since this is just a tool for debugging, we don't care if we call new.
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+ FAULT_NOT_FATAL();
+
+ CrstHolder ch(&s_DbgLogCrst);
+
+ if (s_pDbgStubManagerLog == NULL)
+ {
+ return;
+ }
+
+ // Suppress asserts about lossy encoding conversion in SString::Printf
+ CHECK chk;
+ BOOL fEntered = chk.EnterAssert();
+
+ EX_TRY
+ {
+ va_list args;
+ va_start(args, format);
+ s_pDbgStubManagerLog->AppendVPrintf(format, args);
+ va_end(args);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (fEntered) chk.LeaveAssert();
+#endif
+}
+
+
+
+//-----------------------------------------------------------
+// Get the log as a string.
+//-----------------------------------------------------------
+void StubManager::DbgGetLog(SString * pStringOut)
+{
+#ifndef DACCESS_COMPILE
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(pStringOut));
+ }
+ CONTRACTL_END;
+
+ if (!IsStubLoggingEnabled())
+ {
+ return;
+ }
+
+ // Since this is just a tool for debugging, we don't care if we call new.
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+ FAULT_NOT_FATAL();
+
+ CrstHolder ch(&s_DbgLogCrst);
+
+ if (s_pDbgStubManagerLog == NULL)
+ {
+ return;
+ }
+
+ EX_TRY
+ {
+ pStringOut->Set(*s_pDbgStubManagerLog);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+#endif
+}
+
+
+#endif // _DEBUG
+
+extern "C" void STDCALL ThePreStubPatchLabel(void);
+
+//-----------------------------------------------------------
+//-----------------------------------------------------------
+BOOL ThePreStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+
+ PRECONDITION(stubStartAddress != NULL);
+ PRECONDITION(CheckPointer(trace));
+ }
+ CONTRACTL_END;
+
+ //
+ // We cannot tell where the stub will end up
+ // until after the prestub worker has been run.
+ //
+
+ trace->InitForFramePush(GetEEFuncEntryPoint(ThePreStubPatchLabel));
+
+ return TRUE;
+}
+
+//-----------------------------------------------------------
+BOOL ThePreStubManager::CheckIsStub_Internal(PCODE stubStartAddress)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return stubStartAddress == GetPreStubEntryPoint();
+
+}
+
+
+// -------------------------------------------------------
+// Stub manager functions & globals
+// -------------------------------------------------------
+
+SPTR_IMPL(PrecodeStubManager, PrecodeStubManager, g_pManager);
+
+#ifndef DACCESS_COMPILE
+
+/* static */
+void PrecodeStubManager::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ g_pManager = new PrecodeStubManager();
+ StubManager::AddStubManager(g_pManager);
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+
+BOOL PrecodeStubManager::IsPrecodeByAsm(PCODE stubStartAddress)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ //
+ // First, check if it looks like a stub.
+ //
+ PREFIX_ASSUME(stubStartAddress!=NULL);
+
+ // Mask off the ThumbBit before performing any checks
+ TADDR pInstr = PCODEToPINSTR(stubStartAddress);
+
+#ifdef HAS_COMPACT_ENTRYPOINTS
+ if (MethodDescChunk::IsCompactEntryPointAtAddress(pInstr))
+ {
+ if (*PTR_BYTE(pInstr) != X86_INSTR_MOV_AL)
+ {
+ return FALSE;
+ }
+ return TRUE;
+ }
+#endif // HAS_COMPACT_ENTRYPOINTS
+
+ if (!IS_ALIGNED(pInstr, PRECODE_ALIGNMENT))
+ return FALSE;
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+ if (
+#if defined(_TARGET_AMD64_)
+ *PTR_USHORT(pInstr) != X86_INSTR_MOV_R10_IMM64
+#else
+ *PTR_BYTE(pInstr) != X86_INSTR_MOV_EAX_IMM32
+#endif
+#ifdef HAS_FIXUP_PRECODE
+ && *PTR_BYTE(pInstr) != X86_INSTR_CALL_REL32 // unpatched fixup precode
+ && *PTR_BYTE(pInstr) != X86_INSTR_JMP_REL32 // patched fixup precode
+#endif
+#ifdef HAS_THISPTR_RETBUF_PRECODE
+ && *PTR_DWORD(pInstr) != IN_WIN64(0xC8894890) IN_WIN32(0xD189C889) // mov eax,ecx; mov ecx,edx
+#endif
+ )
+ {
+ return FALSE;
+ }
+#elif defined(_TARGET_ARM_)
+ // Check if we point to a valid stub start. The values used here
+ // are picked up from various stub "::Init" methods implemented in
+ // arm\stubs.cpp. If those are changed, or a new one is introduced,
+ // then please update it here!
+ if ((*PTR_DWORD(pInstr) != 0xc008f8df) // Is this not StubPrecode?
+ && (*PTR_DWORD(pInstr) != 0xf8df46fc) // Is this not FixupPrecode?
+ && (*PTR_DWORD(pInstr) != 0x46084684) // Is this not ThisPtrRetBufPrecode?
+#if defined(HAS_REMOTING_PRECODE)
+ && (*PTR_DWORD(pInstr) != 0x4904b502) // Is this not RemotingPrecode?
+#endif // HAS_REMOTING_PRECODE
+#if defined(HAS_NDIRECT_IMPORT_PRECODE)
+ && (*PTR_DWORD(pInstr) != 0xc004f8df) // Is this not NDirectImportPrecode?
+#endif
+ )
+ {
+ // If this is not an expected stubStartAddress, then return failure.
+ return FALSE;
+ }
+#elif defined(_TARGET_ARM64_)
+ if ((*PTR_DWORD(pInstr) != 0x10000089) // Is this not StubPrecode?
+#if defined(HAS_NDIRECT_IMPORT_PRECODE)
+ && (*PTR_DWORD(pInstr) != 0x10000088) // Is this not NDirectImportPrecode?
+#endif
+ )
+ {
+ //ARM64TODO: remove this after all valid stubs have been added in if condition above
+ _ASSERTE(!"Unexpected Stub");
+ // If this is not an expected stubStartAddress, then return failure.
+ return FALSE;
+ }
+#else
+ PORTABILITY_ASSERT("PrecodeStubManager::IsPrecodeByAsm");
+#endif
+
+ return TRUE;
+}
+
+/* static */
+BOOL PrecodeStubManager::CheckIsStub_Internal(PCODE stubStartAddress)
+{
+ CONTRACTL
+ {
+ THROWS; // address may be bad, so we may AV.
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ // Forwarded to from RangeSectionStubManager
+ return FALSE;
+}
+
+BOOL PrecodeStubManager::DoTraceStub(PCODE stubStartAddress,
+ TraceDestination *trace)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ LOG((LF_CORDB, LL_EVERYTHING, "PrecodeStubManager::DoTraceStub called\n"));
+
+ MethodDesc* pMD = NULL;
+
+#ifdef HAS_COMPACT_ENTRYPOINTS
+ if (MethodDescChunk::IsCompactEntryPointAtAddress(stubStartAddress))
+ {
+ pMD = MethodDescChunk::GetMethodDescFromCompactEntryPoint(stubStartAddress);
+ }
+ else
+#endif // HAS_COMPACT_ENTRYPOINTS
+ {
+ Precode* pPrecode = Precode::GetPrecodeFromEntryPoint(stubStartAddress);
+ PREFIX_ASSUME(pPrecode != NULL);
+
+ switch (pPrecode->GetType())
+ {
+ case PRECODE_STUB:
+ break;
+
+#ifdef HAS_NDIRECT_IMPORT_PRECODE
+ case PRECODE_NDIRECT_IMPORT:
+#ifndef DACCESS_COMPILE
+ trace->InitForUnmanaged(GetEEFuncEntryPoint(NDirectImportThunk));
+#else
+ trace->InitForOther(NULL);
+#endif
+ LOG_TRACE_DESTINATION(trace, stubStartAddress, "PrecodeStubManager::DoTraceStub - NDirect import");
+ return TRUE;
+#endif // HAS_NDIRECT_IMPORT_PRECODE
+
+#ifdef HAS_REMOTING_PRECODE
+ case PRECODE_REMOTING:
+#ifndef DACCESS_COMPILE
+ trace->InitForManagerPush(GetEEFuncEntryPoint(PrecodeRemotingThunk), this);
+#else
+ trace->InitForOther(NULL);
+#endif
+ LOG_TRACE_DESTINATION(trace, stubStartAddress, "PrecodeStubManager::DoTraceStub - remoting");
+ return TRUE;
+#endif // HAS_REMOTING_PRECODE
+
+#ifdef HAS_FIXUP_PRECODE
+ case PRECODE_FIXUP:
+ break;
+#endif // HAS_FIXUP_PRECODE
+
+#ifdef HAS_THISPTR_RETBUF_PRECODE
+ case PRECODE_THISPTR_RETBUF:
+ break;
+#endif // HAS_THISPTR_RETBUF_PRECODE
+
+ default:
+ _ASSERTE_IMPL(!"DoTraceStub: Unexpected precode type");
+ break;
+ }
+
+ PCODE target = pPrecode->GetTarget();
+
+ // check if the method has been jitted
+ if (!pPrecode->IsPointingToPrestub(target))
+ {
+ trace->InitForStub(target);
+ LOG_TRACE_DESTINATION(trace, stubStartAddress, "PrecodeStubManager::DoTraceStub - code");
+ return TRUE;
+ }
+
+ pMD = pPrecode->GetMethodDesc();
+ }
+
+ PREFIX_ASSUME(pMD != NULL);
+
+ // If the method is not IL, then we patch the prestub because no one will ever change the call here at the
+ // MethodDesc. If, however, this is an IL method, then we are at risk to have another thread backpatch the call
+ // here, so we'd miss if we patched the prestub. Therefore, we go right to the IL method and patch IL offset 0
+ // by using TRACE_UNJITTED_METHOD.
+ if (!pMD->IsIL())
+ {
+ trace->InitForStub(GetPreStubEntryPoint());
+ }
+ else
+ {
+ trace->InitForUnjittedMethod(pMD);
+ }
+
+ LOG_TRACE_DESTINATION(trace, stubStartAddress, "PrecodeStubManager::DoTraceStub - prestub");
+ return TRUE;
+}
+
+#ifndef DACCESS_COMPILE
+BOOL PrecodeStubManager::TraceManager(Thread *thread,
+ TraceDestination *trace,
+ T_CONTEXT *pContext,
+ BYTE **pRetAddr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(thread, NULL_OK));
+ PRECONDITION(CheckPointer(trace));
+ PRECONDITION(CheckPointer(pContext));
+ PRECONDITION(CheckPointer(pRetAddr));
+ }
+ CONTRACTL_END;
+
+ BOOL bRet = FALSE;
+
+#ifdef HAS_REMOTING_PRECODE
+ PCODE ip = GetIP(pContext);
+
+ if (ip == GetEEFuncEntryPoint(PrecodeRemotingThunk))
+ {
+ BYTE** pStack = (BYTE**)GetSP(pContext);
+
+ // Aligning down will handle differences in layout of virtual and nonvirtual remoting precodes
+#ifdef _TARGET_ARM_
+ // The offset here is Lr-7. 6 for the size of the Precode struct, 1 for THUMB_CODE alignment.
+ Precode* pPrecode = (Precode*)(pContext->Lr - 7);
+
+ _ASSERTE(pPrecode->GetType() == PRECODE_REMOTING);
+
+ // We need to tell the debugger where we're returning to just in case
+ // the debugger can't continue on.
+ *pRetAddr = pStack[1];
+
+ Object* pThis = (Object*)(size_t)pContext->R0;
+#else
+ Precode* pPrecode = (Precode*)ALIGN_DOWN(pStack[0] - sizeof(INT32)
+ - offsetof(RemotingPrecode,m_callRel32),
+ PRECODE_ALIGNMENT);
+
+ _ASSERTE(pPrecode->GetType() == PRECODE_REMOTING);
+
+ // We need to tell the debugger where we're returning to just in case
+ // the debugger can't continue on.
+ *pRetAddr = pStack[1];
+
+ Object* pThis = (Object*)(size_t)pContext->Ecx;
+#endif
+
+ if (pThis != NULL && pThis->IsTransparentProxy())
+ {
+ // We have proxy in the way.
+#ifdef DACCESS_COMPILE
+ DacNotImpl();
+#else
+ trace->InitForFramePush(GetEEFuncEntryPoint(TransparentProxyStubPatchLabel));
+#endif
+ }
+ else
+ {
+ // No proxy in the way. Follow the target.
+ trace->InitForStub(pPrecode->GetTarget());
+ }
+ bRet = TRUE;
+ }
+ else
+#endif // HAS_REMOTING_PRECODE
+ {
+ _ASSERTE(!"Unexpected call to PrecodeStubManager::TraceManager");
+ }
+
+ return bRet;
+}
+#endif
+
+// -------------------------------------------------------
+// StubLinkStubManager
+// -------------------------------------------------------
+
+SPTR_IMPL(StubLinkStubManager, StubLinkStubManager, g_pManager);
+
+#ifndef DACCESS_COMPILE
+
+/* static */
+void StubLinkStubManager::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ g_pManager = new StubLinkStubManager();
+ StubManager::AddStubManager(g_pManager);
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+BOOL StubLinkStubManager::CheckIsStub_Internal(PCODE stubStartAddress)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ return GetRangeList()->IsInRange(stubStartAddress);
+}
+
+
+BOOL StubLinkStubManager::DoTraceStub(PCODE stubStartAddress,
+ TraceDestination *trace)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "StubLinkStubManager::DoTraceStub: stubStartAddress=0x%08x\n",
+ stubStartAddress));
+
+ Stub *stub = Stub::RecoverStub(stubStartAddress);
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "StubLinkStubManager::DoTraceStub: stub=0x%08x\n", stub));
+
+ //
+ // If this is an intercept stub, we may be able to step
+ // into the intercepted stub.
+ //
+ // <TODO>!!! Note that this case should not be necessary, it's just
+ // here until I get all of the patch offsets & frame patch
+ // methods in place.</TODO>
+ //
+ TADDR pRealAddr = 0;
+ if (stub->IsIntercept())
+ {
+ InterceptStub *is = dac_cast<PTR_InterceptStub>(stub);
+
+ if (*is->GetInterceptedStub() == NULL)
+ {
+ pRealAddr = *is->GetRealAddr();
+ LOG((LF_CORDB, LL_INFO10000, "StubLinkStubManager::DoTraceStub"
+ " Intercept stub, no following stub, real addr:0x%x\n",
+ pRealAddr));
+ }
+ else
+ {
+ stub = *is->GetInterceptedStub();
+
+ pRealAddr = stub->GetEntryPoint();
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "StubLinkStubManager::DoTraceStub: intercepted "
+ "stub=0x%08x, ep=0x%08x\n",
+ stub, stub->GetEntryPoint()));
+ }
+ _ASSERTE( pRealAddr );
+
+ // !!! will push a frame???
+ return TraceStub(pRealAddr, trace);
+ }
+ else if (stub->IsMulticastDelegate())
+ {
+ LOG((LF_CORDB, LL_INFO10000,
+ "StubLinkStubManager(MCDel)::DoTraceStub: stubStartAddress=0x%08x\n",
+ stubStartAddress));
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "StubLinkStubManager(MCDel)::DoTraceStub: stub=0x%08x MGR_PUSH to entrypoint:0x%x\n", stub,
+ stub->GetEntryPoint()));
+
+ // If it's a MC delegate, then we want to set a BP & do a context-ful
+ // manager push, so that we can figure out if this call will be to a
+ // single multicast delegate or a multi multicast delegate
+ trace->InitForManagerPush(stubStartAddress, this);
+
+ return TRUE;
+ }
+ else if (stub->GetPatchOffset() == 0)
+ {
+ LOG((LF_CORDB, LL_INFO10000, "StubLinkStubManager::DoTraceStub: patch offset is 0!\n"));
+
+ return FALSE;
+ }
+ else
+ {
+ trace->InitForFramePush((PCODE)stub->GetPatchAddress());
+
+ LOG_TRACE_DESTINATION(trace, stubStartAddress, "StubLinkStubManager::DoTraceStub");
+
+ return TRUE;
+ }
+}
+
+#ifndef DACCESS_COMPILE
+
+BOOL StubLinkStubManager::TraceManager(Thread *thread,
+ TraceDestination *trace,
+ T_CONTEXT *pContext,
+ BYTE **pRetAddr)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(return FALSE;);
+ }
+ CONTRACTL_END
+
+ // NOTE that we're assuming that this will be called if and ONLY if
+ // we're examing a multicast delegate stub. Otherwise, we'll have to figure out
+ // what we're looking iat
+
+ BYTE *pbDel = 0;
+
+ LPVOID pc = (LPVOID)GetIP(pContext);
+
+ *pRetAddr = (BYTE *)StubManagerHelpers::GetReturnAddress(pContext);
+
+ pbDel = (BYTE *)StubManagerHelpers::GetThisPtr(pContext);
+
+ LOG((LF_CORDB,LL_INFO10000, "SLSM:TM at 0x%x, retAddr is 0x%x\n", pc, (*pRetAddr)));
+
+ return DelegateInvokeStubManager::TraceDelegateObject(pbDel, trace);
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+// -------------------------------------------------------
+// Stub manager for thunks.
+//
+// Note, the only reason we have this stub manager is so that we can recgonize UMEntryThunks for IsTransitionStub. If it
+// turns out that having a full-blown stub manager for these things causes problems else where, then we can just attach
+// a range list to the thunk heap and have IsTransitionStub check that after checking with the main stub manager.
+// -------------------------------------------------------
+
+SPTR_IMPL(ThunkHeapStubManager, ThunkHeapStubManager, g_pManager);
+
+#ifndef DACCESS_COMPILE
+
+/* static */
+void ThunkHeapStubManager::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ g_pManager = new ThunkHeapStubManager();
+ StubManager::AddStubManager(g_pManager);
+}
+
+#endif // !DACCESS_COMPILE
+
+BOOL ThunkHeapStubManager::CheckIsStub_Internal(PCODE stubStartAddress)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ // Its a stub if its in our heaps range.
+ return GetRangeList()->IsInRange(stubStartAddress);
+}
+
+BOOL ThunkHeapStubManager::DoTraceStub(PCODE stubStartAddress,
+ TraceDestination *trace)
+{
+ LIMITED_METHOD_CONTRACT;
+ // We never trace through these stubs when stepping through managed code. The only reason we have this stub manager
+ // is so that IsTransitionStub can recgonize UMEntryThunks.
+ return FALSE;
+}
+
+// -------------------------------------------------------
+// JumpStub stubs
+//
+// Stub manager for jump stubs created by ExecutionManager::jumpStub()
+// These are currently used only on the 64-bit targets IA64 and AMD64
+//
+// -------------------------------------------------------
+
+SPTR_IMPL(JumpStubStubManager, JumpStubStubManager, g_pManager);
+
+#ifndef DACCESS_COMPILE
+/* static */
+void JumpStubStubManager::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ g_pManager = new JumpStubStubManager();
+ StubManager::AddStubManager(g_pManager);
+}
+#endif // #ifndef DACCESS_COMPILE
+
+BOOL JumpStubStubManager::CheckIsStub_Internal(PCODE stubStartAddress)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ // Forwarded to from RangeSectionStubManager
+ return FALSE;
+}
+
+BOOL JumpStubStubManager::DoTraceStub(PCODE stubStartAddress,
+ TraceDestination *trace)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ PCODE jumpTarget = decodeBackToBackJump(stubStartAddress);
+ trace->InitForStub(jumpTarget);
+
+ LOG_TRACE_DESTINATION(trace, stubStartAddress, "JumpStubStubManager::DoTraceStub");
+
+ return TRUE;
+}
+
+//
+// Stub manager for code sections. It forwards the query to the more appropriate
+// stub manager, or handles the query itself.
+//
+
+SPTR_IMPL(RangeSectionStubManager, RangeSectionStubManager, g_pManager);
+
+#ifndef DACCESS_COMPILE
+/* static */
+void RangeSectionStubManager::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ g_pManager = new RangeSectionStubManager();
+ StubManager::AddStubManager(g_pManager);
+}
+#endif // #ifndef DACCESS_COMPILE
+
+BOOL RangeSectionStubManager::CheckIsStub_Internal(PCODE stubStartAddress)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ switch (GetStubKind(stubStartAddress))
+ {
+ case STUB_CODE_BLOCK_PRECODE:
+ case STUB_CODE_BLOCK_JUMPSTUB:
+ case STUB_CODE_BLOCK_STUBLINK:
+ case STUB_CODE_BLOCK_VIRTUAL_METHOD_THUNK:
+ case STUB_CODE_BLOCK_EXTERNAL_METHOD_THUNK:
+ case STUB_CODE_BLOCK_METHOD_CALL_THUNK:
+ return TRUE;
+ default:
+ break;
+ }
+
+ return FALSE;
+}
+
+BOOL RangeSectionStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ switch (GetStubKind(stubStartAddress))
+ {
+ case STUB_CODE_BLOCK_PRECODE:
+ return PrecodeStubManager::g_pManager->DoTraceStub(stubStartAddress, trace);
+
+ case STUB_CODE_BLOCK_JUMPSTUB:
+ return JumpStubStubManager::g_pManager->DoTraceStub(stubStartAddress, trace);
+
+ case STUB_CODE_BLOCK_STUBLINK:
+ return StubLinkStubManager::g_pManager->DoTraceStub(stubStartAddress, trace);
+
+ case STUB_CODE_BLOCK_VIRTUAL_METHOD_THUNK:
+ {
+ PCODE pTarget = GetMethodThunkTarget(stubStartAddress);
+ if (pTarget == ExecutionManager::FindZapModule(stubStartAddress)->
+ GetNGenLayoutInfo()->m_pVirtualImportFixupJumpStub)
+ {
+#ifdef DACCESS_COMPILE
+ DacNotImpl();
+#else
+ trace->InitForManagerPush(GetEEFuncEntryPoint(VirtualMethodFixupPatchLabel), this);
+#endif
+ }
+ else
+ {
+ trace->InitForStub(pTarget);
+ }
+ return TRUE;
+ }
+
+ case STUB_CODE_BLOCK_EXTERNAL_METHOD_THUNK:
+ {
+ PCODE pTarget = GetMethodThunkTarget(stubStartAddress);
+ if (pTarget != ExecutionManager::FindZapModule(stubStartAddress)->
+ GetNGenLayoutInfo()->m_pExternalMethodFixupJumpStub)
+ {
+ trace->InitForStub(pTarget);
+ return TRUE;
+ }
+ }
+
+ __fallthrough;
+
+ case STUB_CODE_BLOCK_METHOD_CALL_THUNK:
+#ifdef DACCESS_COMPILE
+ DacNotImpl();
+#else
+ trace->InitForManagerPush(GetEEFuncEntryPoint(ExternalMethodFixupPatchLabel), this);
+#endif
+ return TRUE;
+
+ default:
+ break;
+ }
+
+ return FALSE;
+}
+
+#ifndef DACCESS_COMPILE
+BOOL RangeSectionStubManager::TraceManager(Thread *thread,
+ TraceDestination *trace,
+ CONTEXT *pContext,
+ BYTE **pRetAddr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Both virtual and external import thunks have the same structure. We can use
+ // common code to handle them.
+ _ASSERTE(GetIP(pContext) == GetEEFuncEntryPoint(VirtualMethodFixupPatchLabel)
+ || GetIP(pContext) == GetEEFuncEntryPoint(ExternalMethodFixupPatchLabel));
+
+ *pRetAddr = (BYTE *)StubManagerHelpers::GetReturnAddress(pContext);
+
+ PCODE target = StubManagerHelpers::GetTailCallTarget(pContext);
+ trace->InitForStub(target);
+ return TRUE;
+}
+#endif
+
+PCODE RangeSectionStubManager::GetMethodThunkTarget(PCODE stubStartAddress)
+{
+ WRAPPER_NO_CONTRACT;
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+ return rel32Decode(stubStartAddress+1);
+#elif defined(_TARGET_ARM_)
+ TADDR pInstr = PCODEToPINSTR(stubStartAddress);
+ return *dac_cast<PTR_PCODE>(pInstr + 2 * sizeof(DWORD));
+#else
+ PORTABILITY_ASSERT("RangeSectionStubManager::GetMethodThunkTarget");
+ return NULL;
+#endif
+}
+
+#ifdef DACCESS_COMPILE
+LPCWSTR RangeSectionStubManager::GetStubManagerName(PCODE addr)
+{
+ WRAPPER_NO_CONTRACT;
+
+ switch (GetStubKind(addr))
+ {
+ case STUB_CODE_BLOCK_PRECODE:
+ return W("MethodDescPrestub");
+
+ case STUB_CODE_BLOCK_JUMPSTUB:
+ return W("JumpStub");
+
+ case STUB_CODE_BLOCK_STUBLINK:
+ return W("StubLinkStub");
+
+ case STUB_CODE_BLOCK_VIRTUAL_METHOD_THUNK:
+ return W("VirtualMethodThunk");
+
+ case STUB_CODE_BLOCK_EXTERNAL_METHOD_THUNK:
+ return W("ExternalMethodThunk");
+
+ case STUB_CODE_BLOCK_METHOD_CALL_THUNK:
+ return W("MethodCallThunk");
+
+ default:
+ break;
+ }
+
+ return W("UnknownRangeSectionStub");
+}
+#endif // DACCESS_COMPILE
+
+StubCodeBlockKind
+RangeSectionStubManager::GetStubKind(PCODE stubStartAddress)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ RangeSection * pRS = ExecutionManager::FindCodeRange(stubStartAddress, ExecutionManager::ScanReaderLock);
+ if (pRS == NULL)
+ return STUB_CODE_BLOCK_UNKNOWN;
+
+ return pRS->pjit->GetStubCodeBlockKind(pRS, stubStartAddress);
+}
+
+//
+// This is the stub manager for IL stubs.
+//
+
+#ifndef DACCESS_COMPILE
+
+/* static */
+void ILStubManager::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ StubManager::AddStubManager(new ILStubManager());
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+BOOL ILStubManager::CheckIsStub_Internal(PCODE stubStartAddress)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ MethodDesc *pMD = ExecutionManager::GetCodeMethodDesc(stubStartAddress);
+
+ return (pMD != NULL) && pMD->IsILStub();
+}
+
+BOOL ILStubManager::DoTraceStub(PCODE stubStartAddress,
+ TraceDestination *trace)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ LOG((LF_CORDB, LL_EVERYTHING, "ILStubManager::DoTraceStub called\n"));
+
+#ifndef DACCESS_COMPILE
+
+ PCODE traceDestination = NULL;
+
+#ifdef FEATURE_STUBS_AS_IL
+ MethodDesc* pStubMD = ExecutionManager::GetCodeMethodDesc(stubStartAddress);
+ if (pStubMD != NULL && pStubMD->AsDynamicMethodDesc()->IsMulticastStub())
+ {
+ traceDestination = GetEEFuncEntryPoint(StubHelpers::MulticastDebuggerTraceHelper);
+ }
+ else
+#endif // FEATURE_STUBS_AS_IL
+ {
+ // This call is going out to unmanaged code, either through pinvoke or COM interop.
+ traceDestination = stubStartAddress;
+ }
+
+ trace->InitForManagerPush(traceDestination, this);
+ LOG_TRACE_DESTINATION(trace, traceDestination, "ILStubManager::DoTraceStub");
+
+ return TRUE;
+
+#else // !DACCESS_COMPILE
+ trace->InitForOther(NULL);
+ return FALSE;
+
+#endif // !DACCESS_COMPILE
+}
+
+#ifndef DACCESS_COMPILE
+#ifdef FEATURE_COMINTEROP
+PCODE ILStubManager::GetCOMTarget(Object *pThis, ComPlusCallInfo *pComPlusCallInfo)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // calculate the target interface pointer
+ SafeComHolder<IUnknown> pUnk;
+
+ OBJECTREF oref = ObjectToOBJECTREF(pThis);
+ GCPROTECT_BEGIN(oref);
+ pUnk = ComObject::GetComIPFromRCWThrowing(&oref, pComPlusCallInfo->m_pInterfaceMT);
+ GCPROTECT_END();
+
+ LPVOID *lpVtbl = *(LPVOID **)(IUnknown *)pUnk;
+
+ PCODE target = (PCODE)lpVtbl[pComPlusCallInfo->m_cachedComSlot];
+ return target;
+}
+
+// This function should return the same result as StubHelpers::GetWinRTFactoryObject followed by
+// ILStubManager::GetCOMTarget. The difference is that it does not allocate managed memory, so it
+// does not trigger GC.
+//
+// The reason why GC (and potentially a stack walk for other purposes, such as exception handling)
+// would be problematic is that we are stopped at the first instruction of an IL stub which is
+// not a GC-safe point. Technically, the function still has the GC_TRIGGERS contract but we should
+// not see GC in practice here without allocating.
+//
+// Note that the GC suspension logic detects the debugger-is-attached-at-GC-unsafe-point case and
+// will back off and retry. This means that it suffices to ensure that this thread does not trigger
+// GC, allocations on other threads will wait and not cause major trouble.
+PCODE ILStubManager::GetWinRTFactoryTarget(ComPlusCallMethodDesc *pCMD)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ MethodTable *pMT = pCMD->GetMethodTable();
+
+ // GetComClassFactory could load types and trigger GC, get class name manually
+ InlineSString<DEFAULT_NONSTACK_CLASSNAME_SIZE> ssClassName;
+ pMT->_GetFullyQualifiedNameForClass(ssClassName);
+
+ IID iid;
+ pCMD->m_pComPlusCallInfo->m_pInterfaceMT->GetGuid(&iid, FALSE, FALSE);
+
+ SafeComHolder<IInspectable> pFactory;
+ {
+ GCX_PREEMP();
+ if (SUCCEEDED(RoGetActivationFactory(WinRtStringRef(ssClassName.GetUnicode(), ssClassName.GetCount()), iid, &pFactory)))
+ {
+ LPVOID *lpVtbl = *(LPVOID **)(IUnknown *)pFactory;
+ return (PCODE)lpVtbl[pCMD->m_pComPlusCallInfo->m_cachedComSlot];
+ }
+ }
+
+ return NULL;
+}
+#endif // FEATURE_COMINTEROP
+
+#ifndef CROSSGEN_COMPILE
+BOOL ILStubManager::TraceManager(Thread *thread,
+ TraceDestination *trace,
+ T_CONTEXT *pContext,
+ BYTE **pRetAddr)
+{
+ // See code:ILStubCache.CreateNewMethodDesc for the code that sets flags on stub MDs
+
+ PCODE stubIP = GetIP(pContext);
+ *pRetAddr = (BYTE *)StubManagerHelpers::GetReturnAddress(pContext);
+
+#ifdef FEATURE_STUBS_AS_IL
+ if (stubIP == GetEEFuncEntryPoint(StubHelpers::MulticastDebuggerTraceHelper))
+ {
+ stubIP = (PCODE)*pRetAddr;
+ *pRetAddr = (BYTE*)StubManagerHelpers::GetRetAddrFromMulticastILStubFrame(pContext);
+ }
+#endif
+
+ DynamicMethodDesc *pStubMD = Entry2MethodDesc(stubIP, NULL)->AsDynamicMethodDesc();
+
+ TADDR arg = StubManagerHelpers::GetHiddenArg(pContext);
+
+ Object * pThis = StubManagerHelpers::GetThisPtr(pContext);
+
+ // See code:ILStubCache.CreateNewMethodDesc for the code that sets flags on stub MDs
+ PCODE target;
+
+#ifdef FEATURE_STUBS_AS_IL
+ if(pStubMD->IsMulticastStub())
+ {
+ _ASSERTE(GetIP(pContext) == GetEEFuncEntryPoint(StubHelpers::MulticastDebuggerTraceHelper));
+
+ int delegateCount = (int)StubManagerHelpers::GetSecondArg(pContext);
+
+ int totalDelegateCount = (int)*(size_t*)((BYTE*)pThis + DelegateObject::GetOffsetOfInvocationCount());
+
+ if (delegateCount == totalDelegateCount)
+ {
+ LOG((LF_CORDB, LL_INFO1000, "MF::TF: Executed all stubs, should return\n"));
+ // We've executed all the stubs, so we should return
+ return FALSE;
+ }
+ else
+ {
+ // We're going to execute stub delegateCount next, so go and grab it.
+ BYTE *pbDelInvocationList = *(BYTE **)((BYTE*)pThis + DelegateObject::GetOffsetOfInvocationList());
+
+ BYTE* pbDel = *(BYTE**)( ((ArrayBase *)pbDelInvocationList)->GetDataPtr() +
+ ((ArrayBase *)pbDelInvocationList)->GetComponentSize()*delegateCount);
+
+ _ASSERTE(pbDel);
+ return DelegateInvokeStubManager::TraceDelegateObject(pbDel, trace);
+ }
+
+ }
+ else
+#endif // FEATURE_STUBS_AS_IL
+ if (pStubMD->IsReverseStub())
+ {
+ if (pStubMD->IsStatic())
+ {
+ // This is reverse P/Invoke stub, the argument is UMEntryThunk
+ UMEntryThunk *pEntryThunk = (UMEntryThunk *)arg;
+ target = pEntryThunk->GetManagedTarget();
+
+ LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: Reverse P/Invoke case 0x%x\n", target));
+ }
+ else
+ {
+ // This is COM-to-CLR stub, the argument is the target
+ target = (PCODE)arg;
+ LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: COM-to-CLR case 0x%x\n", target));
+ }
+ trace->InitForManaged(target);
+ }
+ else if (pStubMD->IsDelegateStub())
+ {
+ // This is forward delegate P/Invoke stub, the argument is undefined
+ DelegateObject *pDel = (DelegateObject *)pThis;
+ target = pDel->GetMethodPtrAux();
+
+ LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: Forward delegate P/Invoke case 0x%x\n", target));
+ trace->InitForUnmanaged(target);
+ }
+ else if (pStubMD->IsCALLIStub())
+ {
+ // This is unmanaged CALLI stub, the argument is the target
+ target = (PCODE)arg;
+
+ // The value is mangled on 64-bit
+#ifdef _TARGET_AMD64_
+ target = target >> 1; // call target is encoded as (addr << 1) | 1
+#endif // _TARGET_AMD64_
+
+ LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: Unmanaged CALLI case 0x%x\n", target));
+ trace->InitForUnmanaged(target);
+ }
+#ifdef FEATURE_COMINTEROP
+ else if (pStubMD->IsDelegateCOMStub())
+ {
+ // This is a delegate, but the target is COM.
+ DelegateObject *pDel = (DelegateObject *)pThis;
+ DelegateEEClass *pClass = (DelegateEEClass *)pDel->GetMethodTable()->GetClass();
+
+ target = GetCOMTarget(pThis, pClass->m_pComPlusCallInfo);
+
+ LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: CLR-to-COM via delegate case 0x%x\n", target));
+ trace->InitForUnmanaged(target);
+ }
+#endif // FEATURE_COMINTEROP
+ else
+ {
+ // This is either direct forward P/Invoke or a CLR-to-COM call, the argument is MD
+ MethodDesc *pMD = (MethodDesc *)arg;
+
+ if (pMD->IsNDirect())
+ {
+ target = (PCODE)((NDirectMethodDesc *)pMD)->GetNativeNDirectTarget();
+ LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: Forward P/Invoke case 0x%x\n", target));
+ trace->InitForUnmanaged(target);
+ }
+#ifdef FEATURE_COMINTEROP
+ else
+ {
+ _ASSERTE(pMD->IsComPlusCall());
+ ComPlusCallMethodDesc *pCMD = (ComPlusCallMethodDesc *)pMD;
+
+ if (pCMD->IsStatic() || pCMD->IsCtor())
+ {
+ // pThis is not the object we'll be calling, we need to get the factory object instead
+ MethodTable *pMTOfTypeToCreate = pCMD->GetMethodTable();
+ pThis = OBJECTREFToObject(GetAppDomain()->LookupWinRTFactoryObject(pMTOfTypeToCreate, GetCurrentCtxCookie()));
+
+ if (pThis == NULL)
+ {
+ // If we don't have an RCW of the factory object yet, don't create it. We would
+ // risk triggering GC which is not safe here because the IL stub is not at a GC
+ // safe point. Instead, query WinRT directly and release the factory immediately.
+ target = GetWinRTFactoryTarget(pCMD);
+
+ if (target != NULL)
+ {
+ LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: CLR-to-COM WinRT factory RCW-does-not-exist-yet case 0x%x\n", target));
+ trace->InitForUnmanaged(target);
+ }
+ }
+ }
+
+ if (pThis != NULL)
+ {
+ target = GetCOMTarget(pThis, pCMD->m_pComPlusCallInfo);
+
+ LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: CLR-to-COM case 0x%x\n", target));
+ trace->InitForUnmanaged(target);
+ }
+ }
+#endif // FEATURE_COMINTEROP
+ }
+
+ return TRUE;
+}
+#endif // !CROSSGEN_COMPILE
+#endif //!DACCESS_COMPILE
+
+// This is used to recognize GenericComPlusCallStub, VarargPInvokeStub, and GenericPInvokeCalliHelper.
+
+#ifndef DACCESS_COMPILE
+
+/* static */
+void InteropDispatchStubManager::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ StubManager::AddStubManager(new InteropDispatchStubManager());
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+PCODE TheGenericComplusCallStub(); // clrtocom.cpp
+
+#ifndef DACCESS_COMPILE
+static BOOL IsVarargPInvokeStub(PCODE stubStartAddress)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (stubStartAddress == GetEEFuncEntryPoint(VarargPInvokeStub))
+ return TRUE;
+
+#ifndef _TARGET_X86_
+ if (stubStartAddress == GetEEFuncEntryPoint(VarargPInvokeStub_RetBuffArg))
+ return TRUE;
+#endif
+
+ return FALSE;
+}
+#endif // #ifndef DACCESS_COMPILE
+
+BOOL InteropDispatchStubManager::CheckIsStub_Internal(PCODE stubStartAddress)
+{
+ WRAPPER_NO_CONTRACT;
+ //@dbgtodo dharvey implement DAC suport
+
+#ifndef DACCESS_COMPILE
+#ifdef FEATURE_COMINTEROP
+ if (stubStartAddress == GetEEFuncEntryPoint(GenericComPlusCallStub))
+ {
+ return true;
+ }
+#endif // FEATURE_COMINTEROP
+
+ if (IsVarargPInvokeStub(stubStartAddress))
+ {
+ return true;
+ }
+
+ if (stubStartAddress == GetEEFuncEntryPoint(GenericPInvokeCalliHelper))
+ {
+ return true;
+ }
+
+#endif // !DACCESS_COMPILE
+ return false;
+}
+
+BOOL InteropDispatchStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ LOG((LF_CORDB, LL_EVERYTHING, "InteropDispatchStubManager::DoTraceStub called\n"));
+
+#ifndef DACCESS_COMPILE
+ _ASSERTE(CheckIsStub_Internal(stubStartAddress));
+
+ trace->InitForManagerPush(stubStartAddress, this);
+
+ LOG_TRACE_DESTINATION(trace, stubStartAddress, "InteropDispatchStubManager::DoTraceStub");
+
+ return TRUE;
+
+#else // !DACCESS_COMPILE
+ trace->InitForOther(NULL);
+ return FALSE;
+
+#endif // !DACCESS_COMPILE
+}
+
+#ifndef DACCESS_COMPILE
+
+BOOL InteropDispatchStubManager::TraceManager(Thread *thread,
+ TraceDestination *trace,
+ T_CONTEXT *pContext,
+ BYTE **pRetAddr)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ *pRetAddr = (BYTE *)StubManagerHelpers::GetReturnAddress(pContext);
+
+ TADDR arg = StubManagerHelpers::GetHiddenArg(pContext);
+
+ // IL stub may not exist at this point so we init directly for the target (TODO?)
+
+ if (IsVarargPInvokeStub(GetIP(pContext)))
+ {
+ NDirectMethodDesc *pNMD = (NDirectMethodDesc *)arg;
+ _ASSERTE(pNMD->IsNDirect());
+ PCODE target = (PCODE)pNMD->GetNDirectTarget();
+
+ LOG((LF_CORDB, LL_INFO10000, "IDSM::TraceManager: Vararg P/Invoke case 0x%x\n", target));
+ trace->InitForUnmanaged(target);
+ }
+ else if (GetIP(pContext) == GetEEFuncEntryPoint(GenericPInvokeCalliHelper))
+ {
+ PCODE target = (PCODE)arg;
+ LOG((LF_CORDB, LL_INFO10000, "IDSM::TraceManager: Unmanaged CALLI case 0x%x\n", target));
+ trace->InitForUnmanaged(target);
+ }
+#ifdef FEATURE_COMINTEROP
+ else
+ {
+ ComPlusCallMethodDesc *pCMD = (ComPlusCallMethodDesc *)arg;
+ _ASSERTE(pCMD->IsComPlusCall());
+
+ Object * pThis = StubManagerHelpers::GetThisPtr(pContext);
+
+#ifdef FEATURE_REMOTING
+ if (pThis != NULL && pThis->IsTransparentProxy())
+ {
+ // We have remoting proxy in the way
+#ifdef DACCESS_COMPILE
+ DacNotImpl();
+#else
+ trace->InitForFramePush(GetEEFuncEntryPoint(TransparentProxyStubPatchLabel));
+#endif
+ }
+ else
+#endif // FEATURE_REMOTING
+ {
+ if (!pCMD->m_pComPlusCallInfo->m_pInterfaceMT->IsComEventItfType() && (pCMD->m_pComPlusCallInfo->m_pILStub != NULL))
+ {
+ // Early-bound CLR->COM call - continue in the IL stub
+ trace->InitForStub(pCMD->m_pComPlusCallInfo->m_pILStub);
+ }
+ else
+ {
+ // Late-bound CLR->COM call - continue in target's IDispatch::Invoke
+ OBJECTREF oref = ObjectToOBJECTREF(pThis);
+ GCPROTECT_BEGIN(oref);
+
+ MethodTable *pItfMT = pCMD->m_pComPlusCallInfo->m_pInterfaceMT;
+ _ASSERTE(pItfMT->GetComInterfaceType() == ifDispatch);
+
+ SafeComHolder<IUnknown> pUnk = ComObject::GetComIPFromRCWThrowing(&oref, pItfMT);
+ LPVOID *lpVtbl = *(LPVOID **)(IUnknown *)pUnk;
+
+ PCODE target = (PCODE)lpVtbl[6]; // DISPATCH_INVOKE_SLOT;
+ LOG((LF_CORDB, LL_INFO10000, "CPSM::TraceManager: CLR-to-COM late-bound case 0x%x\n", target));
+ trace->InitForUnmanaged(target);
+
+ GCPROTECT_END();
+ }
+ }
+ }
+#endif // FEATURE_COMINTEROP
+
+ return TRUE;
+}
+#endif //!DACCESS_COMPILE
+
+//
+// Since we don't generate delegate invoke stubs at runtime on IA64, we
+// can't use the StubLinkStubManager for these stubs. Instead, we create
+// an additional DelegateInvokeStubManager instead.
+//
+SPTR_IMPL(DelegateInvokeStubManager, DelegateInvokeStubManager, g_pManager);
+
+#ifndef DACCESS_COMPILE
+
+// static
+void DelegateInvokeStubManager::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ g_pManager = new DelegateInvokeStubManager();
+ StubManager::AddStubManager(g_pManager);
+}
+
+BOOL DelegateInvokeStubManager::AddStub(Stub* pStub)
+{
+ WRAPPER_NO_CONTRACT;
+ PCODE start = pStub->GetEntryPoint();
+
+ // We don't really care about the size here. We only stop in these stubs at the first instruction,
+ // so we'll never be asked to claim an address in the middle of a stub.
+ return GetRangeList()->AddRange((BYTE *)start, (BYTE *)start + 1, (LPVOID)start);
+}
+
+void DelegateInvokeStubManager::RemoveStub(Stub* pStub)
+{
+ WRAPPER_NO_CONTRACT;
+ PCODE start = pStub->GetEntryPoint();
+
+ // We don't really care about the size here. We only stop in these stubs at the first instruction,
+ // so we'll never be asked to claim an address in the middle of a stub.
+ GetRangeList()->RemoveRanges((LPVOID)start);
+}
+
+#endif
+
+BOOL DelegateInvokeStubManager::CheckIsStub_Internal(PCODE stubStartAddress)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ bool fIsStub = false;
+
+#ifndef DACCESS_COMPILE
+#ifndef _TARGET_X86_
+ fIsStub = fIsStub || (stubStartAddress == GetEEFuncEntryPoint(SinglecastDelegateInvokeStub));
+#endif
+#endif // !DACCESS_COMPILE
+
+ fIsStub = fIsStub || GetRangeList()->IsInRange(stubStartAddress);
+
+ return fIsStub;
+}
+
+BOOL DelegateInvokeStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ LOG((LF_CORDB, LL_EVERYTHING, "DelegateInvokeStubManager::DoTraceStub called\n"));
+
+ _ASSERTE(CheckIsStub_Internal(stubStartAddress));
+
+ // If it's a MC delegate, then we want to set a BP & do a context-ful
+ // manager push, so that we can figure out if this call will be to a
+ // single multicast delegate or a multi multicast delegate
+ trace->InitForManagerPush(stubStartAddress, this);
+
+ LOG_TRACE_DESTINATION(trace, stubStartAddress, "DelegateInvokeStubManager::DoTraceStub");
+
+ return TRUE;
+}
+
+#if !defined(DACCESS_COMPILE)
+
+BOOL DelegateInvokeStubManager::TraceManager(Thread *thread, TraceDestination *trace,
+ T_CONTEXT *pContext, BYTE **pRetAddr)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ PCODE destAddr;
+
+ PCODE pc;
+ pc = ::GetIP(pContext);
+
+ BYTE* pThis;
+ pThis = NULL;
+
+ // Retrieve the this pointer from the context.
+#if defined(_TARGET_X86_)
+ (*pRetAddr) = *(BYTE **)(size_t)(pContext->Esp);
+
+ pThis = (BYTE*)(size_t)(pContext->Ecx);
+
+ destAddr = *(PCODE*)(pThis + DelegateObject::GetOffsetOfMethodPtrAux());
+
+#elif defined(_TARGET_AMD64_)
+
+ // <TODO>
+ // We need to check whether the following is the correct return address.
+ // </TODO>
+ (*pRetAddr) = *(BYTE **)(size_t)(pContext->Rsp);
+
+ LOG((LF_CORDB, LL_INFO10000, "DISM:TM at 0x%p, retAddr is 0x%p\n", pc, (*pRetAddr)));
+
+ DELEGATEREF orDelegate;
+ if (GetEEFuncEntryPoint(SinglecastDelegateInvokeStub) == pc)
+ {
+ LOG((LF_CORDB, LL_INFO10000, "DISM::TraceManager: isSingle\n"));
+
+ orDelegate = (DELEGATEREF)ObjectToOBJECTREF((Object*)(size_t)pContext->Rcx); // The "this" pointer is rcx
+
+ // _methodPtr is where we are going to next. However, in ngen cases, we may have a shuffle thunk
+ // burned into the ngen image, in which case the shuffle thunk is not added to the range list of
+ // the DelegateInvokeStubManager. So we use _methodPtrAux as a fallback.
+ destAddr = orDelegate->GetMethodPtr();
+ if (StubManager::TraceStub(destAddr, trace))
+ {
+ LOG((LF_CORDB,LL_INFO10000, "DISM::TM: ppbDest: 0x%p\n", destAddr));
+ LOG((LF_CORDB,LL_INFO10000, "DISM::TM: res: 1, result type: %d\n", trace->GetTraceType()));
+ return TRUE;
+ }
+ }
+ else
+ {
+ // We get here if we are stopped at the beginning of a shuffle thunk.
+ // The next address we are going to is _methodPtrAux.
+ Stub* pStub = Stub::RecoverStub(pc);
+
+ // We use the patch offset field to indicate whether the stub has a hidden return buffer argument.
+ // This field is set in SetupShuffleThunk().
+ if (pStub->GetPatchOffset() != 0)
+ {
+ // This stub has a hidden return buffer argument.
+ orDelegate = (DELEGATEREF)ObjectToOBJECTREF((Object*)(size_t)(pContext->Rdx));
+ }
+ else
+ {
+ orDelegate = (DELEGATEREF)ObjectToOBJECTREF((Object*)(size_t)(pContext->Rcx));
+ }
+ }
+
+ destAddr = orDelegate->GetMethodPtrAux();
+#elif defined(_TARGET_ARM_)
+ (*pRetAddr) = (BYTE *)(size_t)(pContext->Lr);
+ pThis = (BYTE*)(size_t)(pContext->R0);
+
+ // Could be in the singlecast invoke stub (in which case the next destination is in _methodPtr) or a
+ // shuffle thunk (destination in _methodPtrAux).
+ int offsetOfNextDest;
+ if (pc == GetEEFuncEntryPoint(SinglecastDelegateInvokeStub))
+ offsetOfNextDest = DelegateObject::GetOffsetOfMethodPtr();
+ else
+ offsetOfNextDest = DelegateObject::GetOffsetOfMethodPtrAux();
+ destAddr = *(PCODE*)(pThis + offsetOfNextDest);
+#else
+ PORTABILITY_ASSERT("DelegateInvokeStubManager::TraceManager");
+ destAddr = NULL;
+#endif
+
+ LOG((LF_CORDB,LL_INFO10000, "DISM::TM: ppbDest: 0x%p\n", destAddr));
+
+ BOOL res = StubManager::TraceStub(destAddr, trace);
+ LOG((LF_CORDB,LL_INFO10000, "DISM::TM: res: %d, result type: %d\n", res, trace->GetTraceType()));
+
+ return res;
+}
+
+// static
+BOOL DelegateInvokeStubManager::TraceDelegateObject(BYTE* pbDel, TraceDestination *trace)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ BYTE **ppbDest = NULL;
+ // If we got here, then we're here b/c we're at the start of a delegate stub
+ // need to figure out the kind of delegates we are dealing with
+
+ BYTE *pbDelInvocationList = *(BYTE **)(pbDel + DelegateObject::GetOffsetOfInvocationList());
+
+ LOG((LF_CORDB,LL_INFO10000, "DISM::TMI: invocationList: 0x%x\n", pbDelInvocationList));
+
+ if (pbDelInvocationList == NULL)
+ {
+ // null invocationList can be one of the following:
+ // Instance closed, Instance open non-virt, Instance open virtual, Static closed, Static opened, Unmanaged FtnPtr
+ // Instance open virtual is complex and we need to figure out what to do (TODO).
+ // For the others the logic is the following:
+ // if _methodPtrAux is 0 the target is in _methodPtr, otherwise the taret is _methodPtrAux
+
+ ppbDest = (BYTE **)(pbDel + DelegateObject::GetOffsetOfMethodPtrAux());
+
+ if (*ppbDest == NULL)
+ {
+ ppbDest = (BYTE **)(pbDel + DelegateObject::GetOffsetOfMethodPtr());
+
+ if (*ppbDest == NULL)
+ {
+ // it's not looking good, bail out
+ LOG((LF_CORDB,LL_INFO10000, "DISM(DelegateStub)::TM: can't trace into it\n"));
+ return FALSE;
+ }
+
+ }
+
+ LOG((LF_CORDB,LL_INFO10000, "DISM(DelegateStub)::TM: ppbDest: 0x%x *ppbDest:0x%x\n", ppbDest, *ppbDest));
+
+ BOOL res = StubManager::TraceStub((PCODE) (*ppbDest), trace);
+
+ LOG((LF_CORDB,LL_INFO10000, "DISM(MCDel)::TM: res: %d, result type: %d\n", res, trace->GetTraceType()));
+
+ return res;
+ }
+
+ // invocationList is not null, so it can be one of the following:
+ // Multicast, Static closed (special sig), Secure
+
+ // rule out the static with special sig
+ BYTE *pbCount = *(BYTE **)(pbDel + DelegateObject::GetOffsetOfInvocationCount());
+
+ if (!pbCount)
+ {
+ // it's a static closed, the target lives in _methodAuxPtr
+ ppbDest = (BYTE **)(pbDel + DelegateObject::GetOffsetOfMethodPtrAux());
+
+ if (*ppbDest == NULL)
+ {
+ // it's not looking good, bail out
+ LOG((LF_CORDB,LL_INFO10000, "DISM(DelegateStub)::TM: can't trace into it\n"));
+ return FALSE;
+ }
+
+ LOG((LF_CORDB,LL_INFO10000, "DISM(DelegateStub)::TM: ppbDest: 0x%x *ppbDest:0x%x\n", ppbDest, *ppbDest));
+
+ BOOL res = StubManager::TraceStub((PCODE) (*ppbDest), trace);
+
+ LOG((LF_CORDB,LL_INFO10000, "DISM(MCDel)::TM: res: %d, result type: %d\n", res, trace->GetTraceType()));
+
+ return res;
+ }
+
+ MethodTable *pType = *(MethodTable**)pbDelInvocationList;
+ if (pType->IsDelegate())
+ {
+ // this is a secure deelgate. The target is hidden inside this field, so recurse in and pray...
+ return TraceDelegateObject(pbDelInvocationList, trace);
+ }
+
+ // Otherwise, we're going for the first invoke of the multi case.
+ // In order to go to the correct spot, we have just have to fish out
+ // slot 0 of the invocation list, and figure out where that's going to,
+ // then put a breakpoint there...
+ pbDel = *(BYTE**)(((ArrayBase *)pbDelInvocationList)->GetDataPtr());
+ return TraceDelegateObject(pbDel, trace);
+}
+
+#endif // DACCESS_COMPILE
+
+
+#if !defined(DACCESS_COMPILE)
+
+// static
+void TailCallStubManager::Init()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ StubManager::AddStubManager(new TailCallStubManager());
+}
+
+bool TailCallStubManager::IsTailCallStubHelper(PCODE code)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return code == GetEEFuncEntryPoint(JIT_TailCall);
+}
+
+#endif // !DACCESS_COMPILED
+
+BOOL TailCallStubManager::CheckIsStub_Internal(PCODE stubStartAddress)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ bool fIsStub = false;
+
+#if !defined(DACCESS_COMPILE)
+ fIsStub = IsTailCallStubHelper(stubStartAddress);
+#endif // !DACCESS_COMPILE
+
+ return fIsStub;
+}
+
+#if !defined(DACCESS_COMPILE)
+
+#if defined(_TARGET_X86_)
+EXTERN_C void STDCALL JIT_TailCallLeave();
+EXTERN_C void STDCALL JIT_TailCallVSDLeave();
+#endif // _TARGET_X86_
+
+BOOL TailCallStubManager::TraceManager(Thread * pThread,
+ TraceDestination * pTrace,
+ T_CONTEXT * pContext,
+ BYTE ** ppRetAddr)
+{
+ WRAPPER_NO_CONTRACT;
+#if defined(_TARGET_X86_)
+ TADDR esp = GetSP(pContext);
+ TADDR ebp = GetFP(pContext);
+
+ // Check if we are stopped at the beginning of JIT_TailCall().
+ if (GetIP(pContext) == GetEEFuncEntryPoint(JIT_TailCall))
+ {
+ // There are two cases in JIT_TailCall(). The first one is a normal tail call.
+ // The second one is a tail call to a virtual method.
+ *ppRetAddr = *(reinterpret_cast<BYTE **>(ebp + sizeof(SIZE_T)));
+
+ // Check whether this is a VSD tail call.
+ SIZE_T flags = *(reinterpret_cast<SIZE_T *>(esp + JIT_TailCall_StackOffsetToFlags));
+ if (flags & 0x2)
+ {
+ // This is a VSD tail call.
+ pTrace->InitForManagerPush(GetEEFuncEntryPoint(JIT_TailCallVSDLeave), this);
+ return TRUE;
+ }
+ else
+ {
+ // This is not a VSD tail call.
+ pTrace->InitForManagerPush(GetEEFuncEntryPoint(JIT_TailCallLeave), this);
+ return TRUE;
+ }
+ }
+ else
+ {
+ if (GetIP(pContext) == GetEEFuncEntryPoint(JIT_TailCallLeave))
+ {
+ // This is the simple case. The tail call goes directly to the target. There won't be an
+ // explicit frame on the stack. We should be right at the return instruction which branches to
+ // the call target. The return address is stored in the second leafmost stack slot.
+ *ppRetAddr = *(reinterpret_cast<BYTE **>(esp + sizeof(SIZE_T)));
+ }
+ else
+ {
+ _ASSERTE(GetIP(pContext) == GetEEFuncEntryPoint(JIT_TailCallVSDLeave));
+
+ // This is the VSD case. The tail call goes through a assembly helper function which sets up
+ // and tears down an explicit frame. In this case, the return address is at the same place
+ // as on entry to JIT_TailCall().
+ *ppRetAddr = *(reinterpret_cast<BYTE **>(ebp + sizeof(SIZE_T)));
+ }
+
+ // In both cases, the target address is stored in the leafmost stack slot.
+ pTrace->InitForStub((PCODE)*reinterpret_cast<SIZE_T *>(esp));
+ return TRUE;
+ }
+
+#elif defined(_TARGET_AMD64_) || defined(_TARGET_ARM_)
+
+ _ASSERTE(GetIP(pContext) == GetEEFuncEntryPoint(JIT_TailCall));
+
+ // The target address is the second argument
+#ifdef _TARGET_AMD64_
+ PCODE target = (PCODE)pContext->Rdx;
+#else
+ PCODE target = (PCODE)pContext->R1;
+#endif
+ *ppRetAddr = reinterpret_cast<BYTE *>(target);
+ pTrace->InitForStub(target);
+ return TRUE;
+
+#else // !_TARGET_X86_ && !_TARGET_AMD64_ && !_TARGET_ARM_
+
+ _ASSERTE(!"TCSM::TM - TailCallStubManager should not be necessary on this platform");
+ return FALSE;
+
+#endif // _TARGET_X86_ || _TARGET_AMD64_
+}
+
+#endif // !DACCESS_COMPILE
+
+BOOL TailCallStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace)
+{
+ WRAPPER_NO_CONTRACT;
+
+ LOG((LF_CORDB, LL_EVERYTHING, "TailCallStubManager::DoTraceStub called\n"));
+
+ BOOL fResult = FALSE;
+
+ // Make sure we are stopped at the beginning of JIT_TailCall().
+ _ASSERTE(CheckIsStub_Internal(stubStartAddress));
+ trace->InitForManagerPush(stubStartAddress, this);
+ fResult = TRUE;
+
+ LOG_TRACE_DESTINATION(trace, stubStartAddress, "TailCallStubManager::DoTraceStub");
+ return fResult;
+}
+
+
+#ifdef DACCESS_COMPILE
+
+void
+PrecodeStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ WRAPPER_NO_CONTRACT;
+ DAC_ENUM_VTHIS();
+ EMEM_OUT(("MEM: %p PrecodeStubManager\n", dac_cast<TADDR>(this)));
+}
+
+void
+StubLinkStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ WRAPPER_NO_CONTRACT;
+ DAC_ENUM_VTHIS();
+ EMEM_OUT(("MEM: %p StubLinkStubManager\n", dac_cast<TADDR>(this)));
+ GetRangeList()->EnumMemoryRegions(flags);
+}
+
+void
+ThunkHeapStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ WRAPPER_NO_CONTRACT;
+ DAC_ENUM_VTHIS();
+ EMEM_OUT(("MEM: %p ThunkHeapStubManager\n", dac_cast<TADDR>(this)));
+ GetRangeList()->EnumMemoryRegions(flags);
+}
+
+void
+JumpStubStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ WRAPPER_NO_CONTRACT;
+ DAC_ENUM_VTHIS();
+ EMEM_OUT(("MEM: %p JumpStubStubManager\n", dac_cast<TADDR>(this)));
+}
+
+#ifdef FEATURE_PREJIT
+void
+RangeSectionStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ WRAPPER_NO_CONTRACT;
+ DAC_ENUM_VTHIS();
+ EMEM_OUT(("MEM: %p RangeSectionStubManager\n", dac_cast<TADDR>(this)));
+}
+#endif
+
+void
+ILStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ WRAPPER_NO_CONTRACT;
+ DAC_ENUM_VTHIS();
+ EMEM_OUT(("MEM: %p ILStubManager\n", dac_cast<TADDR>(this)));
+}
+
+void
+InteropDispatchStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ WRAPPER_NO_CONTRACT;
+ DAC_ENUM_VTHIS();
+ EMEM_OUT(("MEM: %p InteropDispatchStubManager\n", dac_cast<TADDR>(this)));
+}
+
+void
+DelegateInvokeStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ WRAPPER_NO_CONTRACT;
+ DAC_ENUM_VTHIS();
+ EMEM_OUT(("MEM: %p DelegateInvokeStubManager\n", dac_cast<TADDR>(this)));
+ GetRangeList()->EnumMemoryRegions(flags);
+}
+
+void
+VirtualCallStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ WRAPPER_NO_CONTRACT;
+ DAC_ENUM_VTHIS();
+ EMEM_OUT(("MEM: %p VirtualCallStubManager\n", dac_cast<TADDR>(this)));
+ GetLookupRangeList()->EnumMemoryRegions(flags);
+ GetResolveRangeList()->EnumMemoryRegions(flags);
+ GetDispatchRangeList()->EnumMemoryRegions(flags);
+ GetCacheEntryRangeList()->EnumMemoryRegions(flags);
+}
+
+void TailCallStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ WRAPPER_NO_CONTRACT;
+ DAC_ENUM_VTHIS();
+ EMEM_OUT(("MEM: %p TailCallStubManager\n", dac_cast<TADDR>(this)));
+}
+
+#endif // #ifdef DACCESS_COMPILE
+
diff --git a/src/vm/stubmgr.h b/src/vm/stubmgr.h
new file mode 100644
index 0000000000..70405ddf50
--- /dev/null
+++ b/src/vm/stubmgr.h
@@ -0,0 +1,995 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// StubMgr.h
+//
+
+//
+// The stub manager exists so that the debugger can accurately step through
+// the myriad stubs & wrappers which exist in the EE, without imposing undue
+// overhead on the stubs themselves.
+//
+// Each type of stub (except those which the debugger can treat as atomic operations)
+// needs to have a stub manager to represent it. The stub manager is responsible for
+// (a) identifying the stub as such, and
+// (b) tracing into the stub & reporting what the stub will call. This
+// report can consist of
+// (i) a managed code address
+// (ii) an unmanaged code address
+// (iii) another stub address
+// (iv) a "frame patch" address - that is, an address in the stub,
+// which the debugger can patch. When the patch is hit, the debugger
+// will query the topmost frame to trace itself. (Thus this is
+// a way of deferring the trace logic to the frame which the stub
+// will push.)
+//
+// The set of stub managers is extensible, but should be kept to a reasonable number
+// as they are currently linearly searched & queried for each stub.
+//
+
+
+#ifndef __stubmgr_h__
+#define __stubmgr_h__
+
+#include "simplerwlock.hpp"
+
+// When 'TraceStub' returns, it gives the address of where the 'target' is for a stub'
+// TraceType indicates what this 'target' is
+enum TraceType
+{
+ TRACE_ENTRY_STUB, // Stub goes to an unmanaged entry stub
+ TRACE_STUB, // Stub goes to another stub
+ TRACE_UNMANAGED, // Stub goes to unmanaged code
+ TRACE_MANAGED, // Stub goes to Jitted code
+ TRACE_UNJITTED_METHOD, // Is the prestub, since there is no code, the address will actually be a MethodDesc*
+
+ TRACE_FRAME_PUSH, // Don't know where stub goes, stop at address, and then ask the frame that is on the stack
+ TRACE_MGR_PUSH, // Don't know where stub goes, stop at address then call TraceManager() below to find out
+
+ TRACE_OTHER // We are going somewhere you can't step into (eg. ee helper function)
+};
+
+class StubManager;
+class SString;
+
+class DebuggerRCThread;
+
+enum StubCodeBlockKind : int;
+
+// A TraceDestination describes where code is going to call. This can be used by the Debugger's Step-In functionality
+// to skip through stubs and place a patch directly at a call's target.
+// TD are supplied by the stubmanagers.
+class TraceDestination
+{
+public:
+ friend class DebuggerRCThread;
+
+ TraceDestination() { }
+
+#ifdef _DEBUG
+ // Get a string representation of this TraceDestination
+ // Uses the supplied buffer to store the memory (or may return a string literal).
+ // This will also print the TD's arguments.
+ const WCHAR * DbgToString(SString &buffer);
+#endif
+
+ // Initialize for unmanaged code.
+ // The addr is in unmanaged code. Used for Step-in from managed to native.
+ void InitForUnmanaged(PCODE addr)
+ {
+ STATIC_CONTRACT_SO_TOLERANT;
+ this->type = TRACE_UNMANAGED;
+ this->address = addr;
+ this->stubManager = NULL;
+ }
+
+ // The addr is inside jitted code (eg, there's a JitManaged that will claim it)
+ void InitForManaged(PCODE addr)
+ {
+ STATIC_CONTRACT_SO_TOLERANT;
+ this->type = TRACE_MANAGED;
+ this->address = addr;
+ this->stubManager = NULL;
+ }
+
+ // Initialize for an unmanaged entry stub.
+ void InitForUnmanagedStub(PCODE addr)
+ {
+ STATIC_CONTRACT_SO_TOLERANT;
+ this->type = TRACE_ENTRY_STUB;
+ this->address = addr;
+ this->stubManager = NULL;
+ }
+
+ // Initialize for a stub.
+ void InitForStub(PCODE addr)
+ {
+ STATIC_CONTRACT_SO_TOLERANT;
+ this->type = TRACE_STUB;
+ this->address = addr;
+ this->stubManager = NULL;
+ }
+
+ // Init for a managed unjitted method.
+ // This will place an IL patch that will get bound when the debugger gets a Jit complete
+ // notification for this method.
+ // If pDesc is a wrapper methoddesc, we will unwrap it.
+ void InitForUnjittedMethod(MethodDesc * pDesc);
+
+ // Place a patch at the given addr, and then when it's hit,
+ // call pStubManager->TraceManager() to get the next TraceDestination.
+ void InitForManagerPush(PCODE addr, StubManager * pStubManager)
+ {
+ STATIC_CONTRACT_SO_TOLERANT;
+ this->type = TRACE_MGR_PUSH;
+ this->address = addr;
+ this->stubManager = pStubManager;
+ }
+
+ // Place a patch at the given addr, and then when it's hit
+ // call GetThread()->GetFrame()->TraceFrame() to get the next TraceDestination.
+ // This address must be safe to run a callstack at.
+ void InitForFramePush(PCODE addr)
+ {
+ this->type = TRACE_FRAME_PUSH;
+ this->address = addr;
+ this->stubManager = NULL;
+ }
+
+ // Nobody recognized the target address. We will not be able to step-in to it.
+ // This is ok if the target just calls into mscorwks (such as an Fcall) because
+ // there's no managed code to step in to, and we don't support debugging the CLR
+ // itself, so there's no native code to step into either.
+ void InitForOther(PCODE addr)
+ {
+ this->type = TRACE_OTHER;
+ this->address = addr;
+ this->stubManager = NULL;
+ }
+
+ // Accessors
+ TraceType GetTraceType() { return type; }
+ PCODE GetAddress()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(type != TRACE_UNJITTED_METHOD);
+ return address;
+ }
+ MethodDesc* GetMethodDesc()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(type == TRACE_UNJITTED_METHOD);
+ return pDesc;
+ }
+
+ StubManager * GetStubManager()
+ {
+ return stubManager;
+ }
+
+ // Expose this b/c DebuggerPatchTable::AddPatchForAddress() needs it.
+ // Ideally we'd get rid of this.
+ void Bad_SetTraceType(TraceType t)
+ {
+ this->type = t;
+ }
+private:
+ TraceType type; // The kind of code the stub is going to
+ PCODE address; // Where the stub is going
+ StubManager *stubManager; // The manager that claims this stub
+ MethodDesc *pDesc;
+};
+
+// For logging
+#ifdef LOGGING
+ void LogTraceDestination(const char * szHint, PCODE stubAddr, TraceDestination * pTrace);
+ #define LOG_TRACE_DESTINATION(_tracedestination, stubAddr, _stHint) LogTraceDestination(_stHint, stubAddr, _tracedestination)
+#else
+ #define LOG_TRACE_DESTINATION(_tracedestination, stubAddr, _stHint)
+#endif
+
+typedef VPTR(class StubManager) PTR_StubManager;
+
+class StubManager
+{
+ friend class StubManagerIterator;
+ VPTR_BASE_VTABLE_CLASS(StubManager)
+
+ public:
+ // Startup and shutdown the global stubmanager service.
+ static void InitializeStubManagers();
+ static void TerminateStubManagers();
+
+ // Does any sub manager recognise this EIP?
+ static BOOL IsStub(PCODE stubAddress)
+ {
+ WRAPPER_NO_CONTRACT;
+ return FindStubManager(stubAddress) != NULL;
+ }
+
+ // Find stub manager for given code address
+ static PTR_StubManager FindStubManager(PCODE stubAddress);
+
+ // Look for stubAddress, if found return TRUE, and set 'trace' to
+ static BOOL TraceStub(PCODE stubAddress, TraceDestination *trace);
+
+ // if 'trace' indicates TRACE_STUB, keep calling TraceStub on 'trace', until you get out of all stubs
+ // returns true if successfull
+ static BOOL FollowTrace(TraceDestination *trace);
+
+#ifdef DACCESS_COMPILE
+ static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+ static void AddStubManager(StubManager *mgr);
+
+ // NOTE: Very important when using this. It is not thread safe, except in this very
+ // limited scenario: the thread must have the runtime suspended.
+ static void UnlinkStubManager(StubManager *mgr);
+
+#ifndef DACCESS_COMPILE
+ StubManager();
+ virtual ~StubManager();
+#endif
+
+
+#ifdef _DEBUG
+ // Debug helper to help identify stub-managers. Make it pure to force stub managers to implement it.
+ virtual const char * DbgGetName() = 0;
+#endif
+
+ // Only Stubmanagers that return 'TRACE_MGR_PUSH' as a trace type need to implement this function
+ // Fills in 'trace' (the target), and 'pRetAddr' (the method that called the stub) (this is needed
+ // as a 'fall back' so that the debugger can at least stop when the stub returns.
+ virtual BOOL TraceManager(Thread *thread, TraceDestination *trace,
+ T_CONTEXT *pContext, BYTE **pRetAddr)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!"Default impl of TraceManager should never be called!");
+ return FALSE;
+ }
+
+ // The worker for IsStub. This calls CheckIsStub_Internal, but wraps it w/
+ // a try-catch.
+ BOOL CheckIsStub_Worker(PCODE stubStartAddress);
+
+
+
+#ifdef _DEBUG
+public:
+ //-----------------------------------------------------------------------------
+ // Debugging Stubmanager bugs is very painful. You need to figure out
+ // how you go to where you got and which stub-manager is at fault.
+ // To help with this, we track a rolling log so that we can give very
+ // informative asserts. this log is not thread-safe, but we really only expect
+ // a single stub-manager usage at a time.
+ //
+ // A stub manager for a step-in operation may be used across
+ // both the helper thread and then the managed thread doing the step-in.
+ // These threads will coordinate to have exclusive access (helper will only access
+ // when stopped; and managed thread will only access when running).
+ //
+ // It's also possible (but rare) for a single thread to have multiple step-in operations.
+ // Since that's so rare, no present need to expand our logging to support it.
+ //-----------------------------------------------------------------------------
+
+
+ static bool IsStubLoggingEnabled();
+
+ // Call to reset the log. This is used at the start of a new step-operation.
+ static void DbgBeginLog(TADDR addrCallInstruction, TADDR addrCallTarget);
+ static void DbgFinishLog();
+
+ // Log arbitrary string. This is a nop if it's outside the Begin/Finish window.
+ // We could consider making each log entry type-safe (and thus avoid the string operations).
+ static void DbgWriteLog(const CHAR *format, ...);
+
+ // Get the log as a string.
+ static void DbgGetLog(SString * pStringOut);
+
+protected:
+ // Implement log as a SString.
+ static SString * s_pDbgStubManagerLog;
+
+ static CrstStatic s_DbgLogCrst;
+
+#endif
+
+
+protected:
+
+ // Each stubmanaged implements this.
+ // This may throw, AV, etc depending on the implementation. This should not
+ // be called directly unless you know exactly what you're doing.
+ virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress) = 0;
+
+ // The worker for TraceStub
+ virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace) = 0;
+
+#ifdef _DEBUG_IMPL
+ static BOOL IsSingleOwner(PCODE stubAddress, StubManager * pOwner);
+#endif
+
+#ifdef DACCESS_COMPILE
+ virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+
+public:
+ // This is used by DAC to provide more information on who owns a stub.
+ virtual LPCWSTR GetStubManagerName(PCODE addr) = 0;
+#endif
+
+private:
+ SPTR_DECL(StubManager, g_pFirstManager);
+ PTR_StubManager m_pNextManager;
+
+ static CrstStatic s_StubManagerListCrst;
+};
+
+// -------------------------------------------------------
+// This just wraps the RangeList methods in a read or
+// write lock depending on the operation.
+// -------------------------------------------------------
+
+class LockedRangeList : public RangeList
+{
+ public:
+ VPTR_VTABLE_CLASS(LockedRangeList, RangeList)
+
+ LockedRangeList() : RangeList(), m_RangeListRWLock(COOPERATIVE_OR_PREEMPTIVE, LOCK_TYPE_DEFAULT)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ ~LockedRangeList()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ protected:
+
+ virtual BOOL AddRangeWorker(const BYTE *start, const BYTE *end, void *id)
+ {
+ WRAPPER_NO_CONTRACT;
+ SimpleWriteLockHolder lh(&m_RangeListRWLock);
+ return RangeList::AddRangeWorker(start,end,id);
+ }
+
+ virtual void RemoveRangesWorker(void *id, const BYTE *start = NULL, const BYTE *end = NULL)
+ {
+ WRAPPER_NO_CONTRACT;
+ SimpleWriteLockHolder lh(&m_RangeListRWLock);
+ RangeList::RemoveRangesWorker(id,start,end);
+ }
+
+ virtual BOOL IsInRangeWorker(TADDR address, TADDR *pID = NULL)
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ SimpleReadLockHolder lh(&m_RangeListRWLock);
+ return RangeList::IsInRangeWorker(address, pID);
+ }
+
+ SimpleRWLock m_RangeListRWLock;
+};
+
+//-----------------------------------------------------------
+// Stub manager for the prestub. Although there is just one, it has
+// unique behavior so it gets its own stub manager.
+//-----------------------------------------------------------
+class ThePreStubManager : public StubManager
+{
+ VPTR_VTABLE_CLASS(ThePreStubManager, StubManager)
+
+ public:
+#ifndef DACCESS_COMPILE
+ ThePreStubManager() { LIMITED_METHOD_CONTRACT; }
+#endif
+
+#ifdef _DEBUG
+ virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "ThePreStubManager"; }
+#endif
+
+ virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress);
+
+ virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace);
+
+#ifndef DACCESS_COMPILE
+ static void Init(void);
+#endif
+
+#ifdef DACCESS_COMPILE
+ protected:
+ virtual LPCWSTR GetStubManagerName(PCODE addr)
+ { LIMITED_METHOD_CONTRACT; return W("ThePreStub"); }
+#endif
+};
+
+// -------------------------------------------------------
+// Stub manager classes for method desc prestubs & normal
+// frame-pushing, StubLinker created stubs
+// -------------------------------------------------------
+
+typedef VPTR(class PrecodeStubManager) PTR_PrecodeStubManager;
+
+class PrecodeStubManager : public StubManager
+{
+ VPTR_VTABLE_CLASS(PrecodeStubManager, StubManager)
+
+ public:
+
+ SPTR_DECL(PrecodeStubManager, g_pManager);
+
+#ifdef _DEBUG
+ // Debug helper to help identify stub-managers.
+ virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "PrecodeStubManager"; }
+#endif
+
+
+ static void Init();
+
+#ifndef DACCESS_COMPILE
+ PrecodeStubManager() {LIMITED_METHOD_CONTRACT;}
+ ~PrecodeStubManager() {WRAPPER_NO_CONTRACT;}
+#endif
+
+ public:
+ static BOOL IsPrecodeByAsm(PCODE stubStartAddress);
+
+ virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress);
+
+ virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace);
+#ifndef DACCESS_COMPILE
+ virtual BOOL TraceManager(Thread *thread,
+ TraceDestination *trace,
+ CONTEXT *pContext,
+ BYTE **pRetAddr);
+#endif
+
+#ifdef DACCESS_COMPILE
+ virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+
+ protected:
+ virtual LPCWSTR GetStubManagerName(PCODE addr)
+ { LIMITED_METHOD_CONTRACT; return W("MethodDescPrestub"); }
+#endif
+};
+
+// Note that this stub was written by a debugger guy, and thus when he refers to 'multicast'
+// stub, he really means multi or single cast stub. This was done b/c the same stub
+// services both types of stub.
+// Note from the debugger guy: the way to understand what this manager does is to
+// first grok EmitMulticastInvoke for the platform you're working on (right now, just x86).
+// Then return here, and understand that (for x86) the only way we know which method
+// we're going to invoke next is by inspecting EDI when we've got the debuggee stopped
+// in the stub, and so our trace frame will either (FRAME_PUSH) put a breakpoint
+// in the stub, or (if we hit the BP) examine EDI, etc, & figure out where we're going next.
+
+typedef VPTR(class StubLinkStubManager) PTR_StubLinkStubManager;
+
+class StubLinkStubManager : public StubManager
+{
+ VPTR_VTABLE_CLASS(StubLinkStubManager, StubManager)
+
+ public:
+
+#ifdef _DEBUG
+ virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "StubLinkStubManager"; }
+#endif
+
+
+ SPTR_DECL(StubLinkStubManager, g_pManager);
+
+ static void Init();
+
+#ifndef DACCESS_COMPILE
+ StubLinkStubManager() : StubManager(), m_rangeList() {LIMITED_METHOD_CONTRACT;}
+ ~StubLinkStubManager() {WRAPPER_NO_CONTRACT;}
+#endif
+
+ protected:
+ LockedRangeList m_rangeList;
+ public:
+ // Get dac-ized pointer to rangelist.
+ PTR_RangeList GetRangeList()
+ {
+ SUPPORTS_DAC;
+
+ TADDR addr = PTR_HOST_MEMBER_TADDR(StubLinkStubManager, this, m_rangeList);
+ return PTR_RangeList(addr);
+ }
+
+
+ virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress);
+
+ virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace);
+#ifndef DACCESS_COMPILE
+ virtual BOOL TraceManager(Thread *thread,
+ TraceDestination *trace,
+ CONTEXT *pContext,
+ BYTE **pRetAddr);
+#endif
+
+#ifdef DACCESS_COMPILE
+ virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+
+ protected:
+ virtual LPCWSTR GetStubManagerName(PCODE addr)
+ { LIMITED_METHOD_CONTRACT; return W("StubLinkStub"); }
+#endif
+} ;
+
+// Stub manager for thunks.
+
+typedef VPTR(class ThunkHeapStubManager) PTR_ThunkHeapStubManager;
+
+class ThunkHeapStubManager : public StubManager
+{
+ VPTR_VTABLE_CLASS(ThunkHeapStubManager, StubManager)
+
+ public:
+
+ SPTR_DECL(ThunkHeapStubManager, g_pManager);
+
+ static void Init();
+
+#ifndef DACCESS_COMPILE
+ ThunkHeapStubManager() : StubManager(), m_rangeList() { LIMITED_METHOD_CONTRACT; }
+ ~ThunkHeapStubManager() {WRAPPER_NO_CONTRACT;}
+#endif
+
+#ifdef _DEBUG
+ virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "ThunkHeapStubManager"; }
+#endif
+
+ protected:
+ LockedRangeList m_rangeList;
+ public:
+ // Get dac-ized pointer to rangelist.
+ PTR_RangeList GetRangeList()
+ {
+ SUPPORTS_DAC;
+ TADDR addr = PTR_HOST_MEMBER_TADDR(ThunkHeapStubManager, this, m_rangeList);
+ return PTR_RangeList(addr);
+ }
+ virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress);
+
+ private:
+ virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace);
+
+#ifdef DACCESS_COMPILE
+ virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+
+ protected:
+ virtual LPCWSTR GetStubManagerName(PCODE addr)
+ { LIMITED_METHOD_CONTRACT; return W("ThunkHeapStub"); }
+#endif
+};
+
+//
+// Stub manager for jump stubs created by ExecutionManager::jumpStub()
+// These are currently used only on the 64-bit targets IA64 and AMD64
+//
+typedef VPTR(class JumpStubStubManager) PTR_JumpStubStubManager;
+
+class JumpStubStubManager : public StubManager
+{
+ VPTR_VTABLE_CLASS(JumpStubStubManager, StubManager)
+
+ public:
+
+ SPTR_DECL(JumpStubStubManager, g_pManager);
+
+ static void Init();
+
+#ifndef DACCESS_COMPILE
+ JumpStubStubManager() {LIMITED_METHOD_CONTRACT;}
+ ~JumpStubStubManager() {WRAPPER_NO_CONTRACT;}
+
+#endif
+
+#ifdef _DEBUG
+ virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "JumpStubStubManager"; }
+#endif
+
+ virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress);
+
+ virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace);
+
+#ifdef DACCESS_COMPILE
+ virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+
+ protected:
+ virtual LPCWSTR GetStubManagerName(PCODE addr)
+ { LIMITED_METHOD_CONTRACT; return W("JumpStub"); }
+#endif
+};
+
+//
+// Stub manager for code sections. It forwards the query to the more appropriate
+// stub manager, or handles the query itself.
+//
+typedef VPTR(class RangeSectionStubManager) PTR_RangeSectionStubManager;
+
+class RangeSectionStubManager : public StubManager
+{
+ VPTR_VTABLE_CLASS(RangeSectionStubManager, StubManager)
+
+ public:
+ SPTR_DECL(RangeSectionStubManager, g_pManager);
+
+ static void Init();
+
+#ifndef DACCESS_COMPILE
+ RangeSectionStubManager() {LIMITED_METHOD_CONTRACT;}
+ ~RangeSectionStubManager() {WRAPPER_NO_CONTRACT;}
+#endif
+
+ static StubCodeBlockKind GetStubKind(PCODE stubStartAddress);
+
+ static PCODE GetMethodThunkTarget(PCODE stubStartAddress);
+
+ public:
+#ifdef _DEBUG
+ virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "RangeSectionStubManager"; }
+#endif
+
+ virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress);
+
+ private:
+
+ virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace);
+
+#ifndef DACCESS_COMPILE
+ virtual BOOL TraceManager(Thread *thread,
+ TraceDestination *trace,
+ CONTEXT *pContext,
+ BYTE **pRetAddr);
+#endif
+
+#ifdef DACCESS_COMPILE
+ virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+
+ protected:
+ virtual LPCWSTR GetStubManagerName(PCODE addr);
+#endif
+};
+
+//
+// This is the stub manager for IL stubs.
+//
+typedef VPTR(class ILStubManager) PTR_ILStubManager;
+
+#ifdef FEATURE_COMINTEROP
+struct ComPlusCallInfo;
+#endif // FEATURE_COMINTEROP
+
+class ILStubManager : public StubManager
+{
+ VPTR_VTABLE_CLASS(ILStubManager, StubManager)
+
+ public:
+ static void Init();
+
+#ifndef DACCESS_COMPILE
+ ILStubManager() : StubManager() {WRAPPER_NO_CONTRACT;}
+ ~ILStubManager()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CAN_TAKE_LOCK; // StubManager::UnlinkStubManager uses a crst
+ }
+ CONTRACTL_END;
+ }
+#endif
+
+ public:
+
+#ifdef _DEBUG
+ virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "ILStubManager"; }
+#endif
+
+ virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress);
+
+ private:
+
+ virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace);
+
+#ifndef DACCESS_COMPILE
+#ifdef FEATURE_COMINTEROP
+ static PCODE GetCOMTarget(Object *pThis, ComPlusCallInfo *pComPlusCallInfo);
+ static PCODE GetWinRTFactoryTarget(ComPlusCallMethodDesc *pCMD);
+#endif // FEATURE_COMINTEROP
+
+ virtual BOOL TraceManager(Thread *thread,
+ TraceDestination *trace,
+ CONTEXT *pContext,
+ BYTE **pRetAddr);
+#endif
+
+#ifdef DACCESS_COMPILE
+ virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+
+ protected:
+ virtual LPCWSTR GetStubManagerName(PCODE addr)
+ { LIMITED_METHOD_CONTRACT; return W("ILStub"); }
+#endif
+};
+
+// This is used to recognize
+// GenericComPlusCallStub()
+// VarargPInvokeStub()
+// GenericPInvokeCalliHelper()
+typedef VPTR(class InteropDispatchStubManager) PTR_InteropDispatchStubManager;
+
+class InteropDispatchStubManager : public StubManager
+{
+ VPTR_VTABLE_CLASS(InteropDispatchStubManager, StubManager)
+
+ public:
+ static void Init();
+
+#ifndef DACCESS_COMPILE
+ InteropDispatchStubManager() : StubManager() {WRAPPER_NO_CONTRACT;}
+ ~InteropDispatchStubManager() {WRAPPER_NO_CONTRACT;}
+#endif
+
+#ifdef _DEBUG
+ virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "InteropDispatchStubManager"; }
+#endif
+
+ virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress);
+
+ private:
+
+ virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace);
+
+#ifndef DACCESS_COMPILE
+ virtual BOOL TraceManager(Thread *thread,
+ TraceDestination *trace,
+ CONTEXT *pContext,
+ BYTE **pRetAddr);
+#endif
+
+#ifdef DACCESS_COMPILE
+ virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+
+ protected:
+ virtual LPCWSTR GetStubManagerName(PCODE addr)
+ { LIMITED_METHOD_CONTRACT; return W("InteropDispatchStub"); }
+#endif
+};
+
+//
+// Since we don't generate delegate invoke stubs at runtime on WIN64, we
+// can't use the StubLinkStubManager for these stubs. Instead, we create
+// an additional DelegateInvokeStubManager instead.
+//
+typedef VPTR(class DelegateInvokeStubManager) PTR_DelegateInvokeStubManager;
+
+class DelegateInvokeStubManager : public StubManager
+{
+ VPTR_VTABLE_CLASS(DelegateInvokeStubManager, StubManager)
+
+ public:
+
+ SPTR_DECL(DelegateInvokeStubManager, g_pManager);
+
+ static void Init();
+
+#if !defined(DACCESS_COMPILE)
+ DelegateInvokeStubManager() : StubManager(), m_rangeList() {LIMITED_METHOD_CONTRACT;}
+ ~DelegateInvokeStubManager() {WRAPPER_NO_CONTRACT;}
+#endif // DACCESS_COMPILE
+
+ BOOL AddStub(Stub* pStub);
+ void RemoveStub(Stub* pStub);
+
+#ifdef _DEBUG
+ virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "DelegateInvokeStubManager"; }
+#endif
+
+ virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress);
+
+#if !defined(DACCESS_COMPILE)
+ virtual BOOL TraceManager(Thread *thread, TraceDestination *trace, CONTEXT *pContext, BYTE **pRetAddr);
+ static BOOL TraceDelegateObject(BYTE *orDel, TraceDestination *trace);
+#endif // DACCESS_COMPILE
+
+ private:
+
+ virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace);
+
+ protected:
+ LockedRangeList m_rangeList;
+ public:
+ // Get dac-ized pointer to rangelist.
+ PTR_RangeList GetRangeList()
+ {
+ SUPPORTS_DAC;
+
+ TADDR addr = PTR_HOST_MEMBER_TADDR(DelegateInvokeStubManager, this, m_rangeList);
+ return PTR_RangeList(addr);
+ }
+
+
+#ifdef DACCESS_COMPILE
+ virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+
+ protected:
+ virtual LPCWSTR GetStubManagerName(PCODE addr)
+ { LIMITED_METHOD_CONTRACT; return W("DelegateInvokeStub"); }
+#endif
+};
+
+//---------------------------------------------------------------------------------------
+//
+// This is the stub manager to help the managed debugger step into a tail call.
+// It helps the debugger trace through JIT_TailCall().
+//
+
+typedef VPTR(class TailCallStubManager) PTR_TailCallStubManager;
+
+class TailCallStubManager : public StubManager
+{
+ VPTR_VTABLE_CLASS(TailCallStubManager, StubManager)
+
+public:
+ static void Init();
+
+#if !defined(DACCESS_COMPILE)
+ TailCallStubManager() : StubManager() {WRAPPER_NO_CONTRACT;}
+ ~TailCallStubManager() {WRAPPER_NO_CONTRACT;}
+
+ virtual BOOL TraceManager(Thread * pThread, TraceDestination * pTrace, CONTEXT * pContext, BYTE ** ppRetAddr);
+
+ static bool IsTailCallStubHelper(PCODE code);
+#endif // DACCESS_COMPILE
+
+#if defined(_DEBUG)
+ virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "TailCallStubManager"; }
+#endif // _DEBUG
+
+ virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress);
+
+private:
+ virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination * pTrace);
+
+#if defined(DACCESS_COMPILE)
+ virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+
+protected:
+ virtual LPCWSTR GetStubManagerName(PCODE addr) {LIMITED_METHOD_CONTRACT; return W("TailCallStub");}
+#endif // !DACCESS_COMPILE
+};
+
+//
+// Helpers for common value locations in stubs to make stub managers more portable
+//
+class StubManagerHelpers
+{
+public:
+ static PCODE GetReturnAddress(T_CONTEXT * pContext)
+ {
+#if defined(_TARGET_X86_)
+ return *dac_cast<PTR_PCODE>(pContext->Esp);
+#elif defined(_TARGET_AMD64_)
+ return *dac_cast<PTR_PCODE>(pContext->Rsp);
+#elif defined(_TARGET_ARM_)
+ return pContext->Lr;
+#elif defined(_TARGET_ARM64_)
+ return pContext->Lr;
+#else
+ PORTABILITY_ASSERT("StubManagerHelpers::GetReturnAddress");
+ return NULL;
+#endif
+ }
+
+ static PTR_Object GetThisPtr(T_CONTEXT * pContext)
+ {
+#if defined(_TARGET_X86_)
+ return dac_cast<PTR_Object>(pContext->Ecx);
+#elif defined(_TARGET_AMD64_)
+#ifdef UNIX_AMD64_ABI
+ return dac_cast<PTR_Object>(pContext->Rdi);
+#else
+ return dac_cast<PTR_Object>(pContext->Rcx);
+#endif
+#elif defined(_TARGET_ARM_)
+ return dac_cast<PTR_Object>(pContext->R0);
+#elif defined(_TARGET_ARM64_)
+ return dac_cast<PTR_Object>(pContext->X0);
+#else
+ PORTABILITY_ASSERT("StubManagerHelpers::GetThisPtr");
+ return NULL;
+#endif
+ }
+
+ static PCODE GetTailCallTarget(T_CONTEXT * pContext)
+ {
+#if defined(_TARGET_X86_)
+ return pContext->Eax;
+#elif defined(_TARGET_AMD64_)
+ return pContext->Rax;
+#elif defined(_TARGET_ARM_)
+ return pContext->R12;
+#else
+ PORTABILITY_ASSERT("StubManagerHelpers::GetTailCallTarget");
+ return NULL;
+#endif
+ }
+
+ static TADDR GetHiddenArg(T_CONTEXT * pContext)
+ {
+#if defined(_TARGET_X86_)
+ return pContext->Eax;
+#elif defined(_TARGET_AMD64_)
+ return pContext->R10;
+#elif defined(_TARGET_ARM_)
+ return pContext->R12;
+#elif defined(_TARGET_ARM64_)
+ return pContext->X15;
+#else
+ PORTABILITY_ASSERT("StubManagerHelpers::GetHiddenArg");
+ return NULL;
+#endif
+ }
+
+#ifndef CROSSGEN_COMPILE
+ static PCODE GetRetAddrFromMulticastILStubFrame(T_CONTEXT * pContext)
+ {
+ /*
+ Following is the callstack corresponding to context received by ILStubManager::TraceManager.
+ This function returns the return address (user code address) where control should return after all
+ delegates in multicast delegate have been executed.
+
+ StubHelpers::MulticastDebuggerTraceHelper
+ IL_STUB_MulticastDelegate_Invoke
+ UserCode which invokes multicast delegate <---
+ */
+
+#if defined(_TARGET_X86_)
+ return *((PCODE *)pContext->Ebp + 1);
+#elif defined(_TARGET_AMD64_)
+ T_CONTEXT context(*pContext);
+ Thread::VirtualUnwindCallFrame(&context);
+ Thread::VirtualUnwindCallFrame(&context);
+
+ return pContext->Rip;
+#elif defined(_TARGET_ARM_)
+ return *((PCODE *)pContext->R11 + 1);
+#elif defined(_TARGET_ARM64_)
+ return *((PCODE *)pContext->Fp + 1);
+#else
+ PORTABILITY_ASSERT("StubManagerHelpers::GetRetAddrFromMulticastILStubFrame");
+ return NULL;
+#endif
+ }
+#endif // !CROSSGEN_COMPILE
+
+ static TADDR GetSecondArg(T_CONTEXT * pContext)
+ {
+#if defined(_TARGET_X86_)
+ return pContext->Edx;
+#elif defined(_TARGET_AMD64_)
+#ifdef UNIX_AMD64_ABI
+ return pContext->Rsi;
+#else
+ return pContext->Rdx;
+#endif
+#elif defined(_TARGET_ARM_)
+ return pContext->R1;
+#elif defined(_TARGET_ARM_)
+ return pContext->X1;
+#else
+ PORTABILITY_ASSERT("StubManagerHelpers::GetSecondArg");
+ return NULL;
+#endif
+ }
+
+};
+
+#endif
diff --git a/src/vm/syncblk.cpp b/src/vm/syncblk.cpp
new file mode 100644
index 0000000000..a0e7b0daa2
--- /dev/null
+++ b/src/vm/syncblk.cpp
@@ -0,0 +1,3664 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// SYNCBLK.CPP
+//
+
+//
+// Definition of a SyncBlock and the SyncBlockCache which manages it
+//
+
+
+#include "common.h"
+
+#include "vars.hpp"
+#include "util.hpp"
+#include "class.h"
+#include "object.h"
+#include "threads.h"
+#include "excep.h"
+#include "threads.h"
+#include "syncblk.h"
+#include "interoputil.h"
+#include "encee.h"
+#include "perfcounters.h"
+#include "eventtrace.h"
+#include "dllimportcallback.h"
+#include "comcallablewrapper.h"
+#include "eeconfig.h"
+#include "corhost.h"
+#include "comdelegate.h"
+#include "finalizerthread.h"
+#include "gcscan.h"
+
+#ifdef FEATURE_COMINTEROP
+#include "runtimecallablewrapper.h"
+#endif // FEATURE_COMINTEROP
+
+// Allocate 1 page worth. Typically enough
+#define MAXSYNCBLOCK (PAGE_SIZE-sizeof(void*))/sizeof(SyncBlock)
+#define SYNC_TABLE_INITIAL_SIZE 250
+
+//#define DUMP_SB
+
+class SyncBlockArray
+{
+ public:
+ SyncBlockArray *m_Next;
+ BYTE m_Blocks[MAXSYNCBLOCK * sizeof (SyncBlock)];
+};
+
+// For in-place constructor
+BYTE g_SyncBlockCacheInstance[sizeof(SyncBlockCache)];
+
+SPTR_IMPL (SyncBlockCache, SyncBlockCache, s_pSyncBlockCache);
+
+#ifndef DACCESS_COMPILE
+
+
+
+void SyncBlock::OnADUnload()
+{
+ WRAPPER_NO_CONTRACT;
+#ifdef EnC_SUPPORTED
+ if (m_pEnCInfo)
+ {
+ m_pEnCInfo->Cleanup();
+ m_pEnCInfo = NULL;
+ }
+#endif
+}
+
+#ifndef FEATURE_PAL
+// static
+SLIST_HEADER InteropSyncBlockInfo::s_InteropInfoStandbyList;
+#endif // !FEATURE_PAL
+
+InteropSyncBlockInfo::~InteropSyncBlockInfo()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ DESTRUCTOR_CHECK;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ FreeUMEntryThunkOrInterceptStub();
+}
+
+#ifndef FEATURE_PAL
+// Deletes all items in code:s_InteropInfoStandbyList.
+void InteropSyncBlockInfo::FlushStandbyList()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PSLIST_ENTRY pEntry = InterlockedFlushSList(&InteropSyncBlockInfo::s_InteropInfoStandbyList);
+ while (pEntry)
+ {
+ PSLIST_ENTRY pNextEntry = pEntry->Next;
+
+ // make sure to use the global delete since the destructor has already run
+ ::delete (void *)pEntry;
+ pEntry = pNextEntry;
+ }
+}
+#endif // !FEATURE_PAL
+
+void InteropSyncBlockInfo::FreeUMEntryThunkOrInterceptStub()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ DESTRUCTOR_CHECK;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ if (!g_fEEShutDown)
+ {
+ void *pUMEntryThunk = GetUMEntryThunk();
+ if (pUMEntryThunk != NULL)
+ {
+ COMDelegate::RemoveEntryFromFPtrHash((UPTR)pUMEntryThunk);
+ UMEntryThunk::FreeUMEntryThunk((UMEntryThunk *)pUMEntryThunk);
+ }
+ else
+ {
+#if defined(_TARGET_X86_)
+ Stub *pInterceptStub = GetInterceptStub();
+
+ if (pInterceptStub != NULL)
+ {
+ // There may be multiple chained stubs, i.e. host hook stub calling MDA stack
+ // imbalance stub, and the following DecRef will free all of them.
+ pInterceptStub->DecRef();
+ }
+#else // _TARGET_X86_
+ // Intercept stubs are currently not used on other platforms.
+ _ASSERTE(GetInterceptStub() == NULL);
+#endif // _TARGET_X86_
+ }
+ }
+ m_pUMEntryThunkOrInterceptStub = NULL;
+}
+
+#ifdef FEATURE_COMINTEROP
+// Returns either NULL or an RCW on which AcquireLock has been called.
+RCW* InteropSyncBlockInfo::GetRCWAndIncrementUseCount()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DWORD dwSwitchCount = 0;
+ while (true)
+ {
+ RCW *pRCW = VolatileLoad(&m_pRCW);
+ if ((size_t)pRCW <= 0x1)
+ {
+ // the RCW never existed or has been released
+ return NULL;
+ }
+
+ if (((size_t)pRCW & 0x1) == 0x0)
+ {
+ // it looks like we have a chance, try to acquire the lock
+ RCW *pLockedRCW = (RCW *)((size_t)pRCW | 0x1);
+ if (InterlockedCompareExchangeT(&m_pRCW, pLockedRCW, pRCW) == pRCW)
+ {
+ // we have the lock on the m_pRCW field, now we can safely "use" the RCW
+ pRCW->IncrementUseCount();
+
+ // release the m_pRCW lock
+ VolatileStore(&m_pRCW, pRCW);
+
+ // and return the RCW
+ return pRCW;
+ }
+ }
+
+ // somebody else holds the lock, retry
+ __SwitchToThread(0, ++dwSwitchCount);
+ }
+}
+
+// Sets the m_pRCW field in a thread-safe manner, pRCW can be NULL.
+void InteropSyncBlockInfo::SetRawRCW(RCW* pRCW)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (pRCW != NULL)
+ {
+ // we never set two different RCWs on a single object
+ _ASSERTE(m_pRCW == NULL);
+ m_pRCW = pRCW;
+ }
+ else
+ {
+ DWORD dwSwitchCount = 0;
+ while (true)
+ {
+ RCW *pOldRCW = VolatileLoad(&m_pRCW);
+
+ if ((size_t)pOldRCW <= 0x1)
+ {
+ // the RCW never existed or has been released
+ VolatileStore(&m_pRCW, (RCW *)0x1);
+ return;
+ }
+
+ if (((size_t)pOldRCW & 0x1) == 0x0)
+ {
+ // it looks like we have a chance, set the RCW to 0x1
+ if (InterlockedCompareExchangeT(&m_pRCW, (RCW *)0x1, pOldRCW) == pOldRCW)
+ {
+ // we made it
+ return;
+ }
+ }
+
+ // somebody else holds the lock, retry
+ __SwitchToThread(0, ++dwSwitchCount);
+ }
+ }
+}
+#endif // FEATURE_COMINTEROP
+
+void UMEntryThunk::OnADUnload()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_pObjectHandle = NULL;
+}
+
+#endif // !DACCESS_COMPILE
+
+PTR_SyncTableEntry SyncTableEntry::GetSyncTableEntry()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return (PTR_SyncTableEntry)g_pSyncTable;
+}
+
+#ifndef DACCESS_COMPILE
+
+SyncTableEntry*& SyncTableEntry::GetSyncTableEntryByRef()
+{
+ LIMITED_METHOD_CONTRACT;
+ return g_pSyncTable;
+}
+
+/* static */
+SyncBlockCache*& SyncBlockCache::GetSyncBlockCache()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return s_pSyncBlockCache;
+}
+
+
+//----------------------------------------------------------------------------
+//
+// ThreadQueue Implementation
+//
+//----------------------------------------------------------------------------
+#endif //!DACCESS_COMPILE
+
+// Given a link in the chain, get the Thread that it represents
+/* static */
+inline PTR_WaitEventLink ThreadQueue::WaitEventLinkForLink(PTR_SLink pLink)
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return (PTR_WaitEventLink) (((PTR_BYTE) pLink) - offsetof(WaitEventLink, m_LinkSB));
+}
+
+#ifndef DACCESS_COMPILE
+
+// Unlink the head of the Q. We are always in the SyncBlock's critical
+// section.
+/* static */
+inline WaitEventLink *ThreadQueue::DequeueThread(SyncBlock *psb)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ // Be careful, the debugger inspects the queue from out of process and just looks at the memory...
+ // it must be valid even if the lock is held. Be careful if you change the way the queue is updated.
+ SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
+
+ WaitEventLink *ret = NULL;
+ SLink *pLink = psb->m_Link.m_pNext;
+
+ if (pLink)
+ {
+ psb->m_Link.m_pNext = pLink->m_pNext;
+#ifdef _DEBUG
+ pLink->m_pNext = (SLink *)POISONC;
+#endif
+ ret = WaitEventLinkForLink(pLink);
+ _ASSERTE(ret->m_WaitSB == psb);
+ COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cQueueLength--);
+ }
+ return ret;
+}
+
+// Enqueue is the slow one. We have to find the end of the Q since we don't
+// want to burn storage for this in the SyncBlock.
+/* static */
+inline void ThreadQueue::EnqueueThread(WaitEventLink *pWaitEventLink, SyncBlock *psb)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE (pWaitEventLink->m_LinkSB.m_pNext == NULL);
+
+ // Be careful, the debugger inspects the queue from out of process and just looks at the memory...
+ // it must be valid even if the lock is held. Be careful if you change the way the queue is updated.
+ SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
+
+ COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cQueueLength++);
+
+ SLink *pPrior = &psb->m_Link;
+
+ while (pPrior->m_pNext)
+ {
+ // We shouldn't already be in the waiting list!
+ _ASSERTE(pPrior->m_pNext != &pWaitEventLink->m_LinkSB);
+
+ pPrior = pPrior->m_pNext;
+ }
+ pPrior->m_pNext = &pWaitEventLink->m_LinkSB;
+}
+
+
+// Wade through the SyncBlock's list of waiting threads and remove the
+// specified thread.
+/* static */
+BOOL ThreadQueue::RemoveThread (Thread *pThread, SyncBlock *psb)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ BOOL res = FALSE;
+
+ // Be careful, the debugger inspects the queue from out of process and just looks at the memory...
+ // it must be valid even if the lock is held. Be careful if you change the way the queue is updated.
+ SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
+
+ SLink *pPrior = &psb->m_Link;
+ SLink *pLink;
+ WaitEventLink *pWaitEventLink;
+
+ while ((pLink = pPrior->m_pNext) != NULL)
+ {
+ pWaitEventLink = WaitEventLinkForLink(pLink);
+ if (pWaitEventLink->m_Thread == pThread)
+ {
+ pPrior->m_pNext = pLink->m_pNext;
+#ifdef _DEBUG
+ pLink->m_pNext = (SLink *)POISONC;
+#endif
+ _ASSERTE(pWaitEventLink->m_WaitSB == psb);
+ COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cQueueLength--);
+ res = TRUE;
+ break;
+ }
+ pPrior = pLink;
+ }
+ return res;
+}
+
+#endif //!DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+// Enumerates the threads in the queue from front to back by calling
+// pCallbackFunction on each one
+/* static */
+void ThreadQueue::EnumerateThreads(SyncBlock *psb, FP_TQ_THREAD_ENUMERATION_CALLBACK pCallbackFunction, void* pUserData)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ SUPPORTS_DAC;
+
+ PTR_SLink pLink = psb->m_Link.m_pNext;
+ PTR_WaitEventLink pWaitEventLink;
+
+ while (pLink != NULL)
+ {
+ pWaitEventLink = WaitEventLinkForLink(pLink);
+
+ pCallbackFunction(pWaitEventLink->m_Thread, pUserData);
+ pLink = pLink->m_pNext;
+ }
+}
+#endif //DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+
+// ***************************************************************************
+//
+// Ephemeral Bitmap Helper
+//
+// ***************************************************************************
+
+#define card_size 32
+
+#define card_word_width 32
+
+size_t CardIndex (size_t card)
+{
+ LIMITED_METHOD_CONTRACT;
+ return card_size * card;
+}
+
+size_t CardOf (size_t idx)
+{
+ LIMITED_METHOD_CONTRACT;
+ return idx / card_size;
+}
+
+size_t CardWord (size_t card)
+{
+ LIMITED_METHOD_CONTRACT;
+ return card / card_word_width;
+}
+inline
+unsigned CardBit (size_t card)
+{
+ LIMITED_METHOD_CONTRACT;
+ return (unsigned)(card % card_word_width);
+}
+
+inline
+void SyncBlockCache::SetCard (size_t card)
+{
+ WRAPPER_NO_CONTRACT;
+ m_EphemeralBitmap [CardWord (card)] =
+ (m_EphemeralBitmap [CardWord (card)] | (1 << CardBit (card)));
+}
+
+inline
+void SyncBlockCache::ClearCard (size_t card)
+{
+ WRAPPER_NO_CONTRACT;
+ m_EphemeralBitmap [CardWord (card)] =
+ (m_EphemeralBitmap [CardWord (card)] & ~(1 << CardBit (card)));
+}
+
+inline
+BOOL SyncBlockCache::CardSetP (size_t card)
+{
+ WRAPPER_NO_CONTRACT;
+ return ( m_EphemeralBitmap [ CardWord (card) ] & (1 << CardBit (card)));
+}
+
+inline
+void SyncBlockCache::CardTableSetBit (size_t idx)
+{
+ WRAPPER_NO_CONTRACT;
+ SetCard (CardOf (idx));
+}
+
+
+size_t BitMapSize (size_t cacheSize)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (cacheSize + card_size * card_word_width - 1)/ (card_size * card_word_width);
+}
+
+// ***************************************************************************
+//
+// SyncBlockCache class implementation
+//
+// ***************************************************************************
+
+SyncBlockCache::SyncBlockCache()
+ : m_pCleanupBlockList(NULL),
+ m_FreeBlockList(NULL),
+
+ // NOTE: CRST_UNSAFE_ANYMODE prevents a GC mode switch when entering this crst.
+ // If you remove this flag, we will switch to preemptive mode when entering
+ // g_criticalSection, which means all functions that enter it will become
+ // GC_TRIGGERS. (This includes all uses of LockHolder around SyncBlockCache::GetSyncBlockCache().
+ // So be sure to update the contracts if you remove this flag.
+ m_CacheLock(CrstSyncBlockCache, (CrstFlags) (CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD)),
+
+ m_FreeCount(0),
+ m_ActiveCount(0),
+ m_SyncBlocks(0),
+ m_FreeSyncBlock(0),
+ m_FreeSyncTableIndex(1),
+ m_FreeSyncTableList(0),
+ m_SyncTableSize(SYNC_TABLE_INITIAL_SIZE),
+ m_OldSyncTables(0),
+ m_bSyncBlockCleanupInProgress(FALSE),
+ m_EphemeralBitmap(0)
+{
+ CONTRACTL
+ {
+ CONSTRUCTOR_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+}
+
+
+// This method is NO longer called.
+SyncBlockCache::~SyncBlockCache()
+{
+ CONTRACTL
+ {
+ DESTRUCTOR_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Clear the list the fast way.
+ m_FreeBlockList = NULL;
+ //<TODO>@todo we can clear this fast too I guess</TODO>
+ m_pCleanupBlockList = NULL;
+
+ // destruct all arrays
+ while (m_SyncBlocks)
+ {
+ SyncBlockArray *next = m_SyncBlocks->m_Next;
+ delete m_SyncBlocks;
+ m_SyncBlocks = next;
+ }
+
+ // Also, now is a good time to clean up all the old tables which we discarded
+ // when we overflowed them.
+ SyncTableEntry* arr;
+ while ((arr = m_OldSyncTables) != 0)
+ {
+ m_OldSyncTables = (SyncTableEntry*)arr[0].m_Object.Load();
+ delete arr;
+ }
+}
+
+
+// When the GC determines that an object is dead the low bit of the
+// m_Object field of SyncTableEntry is set, however it is not
+// cleaned up because we cant do the COM interop cleanup at GC time.
+// It is put on a cleanup list and at a later time (typically during
+// finalization, this list is cleaned up.
+//
+void SyncBlockCache::CleanupSyncBlocks()
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ _ASSERTE(GetThread() == FinalizerThread::GetFinalizerThread());
+
+ // Set the flag indicating sync block cleanup is in progress.
+ // IMPORTANT: This must be set before the sync block cleanup bit is reset on the thread.
+ m_bSyncBlockCleanupInProgress = TRUE;
+
+ struct Param
+ {
+ SyncBlockCache *pThis;
+ SyncBlock* psb;
+#ifdef FEATURE_COMINTEROP
+ RCW* pRCW;
+#endif
+ } param;
+ param.pThis = this;
+ param.psb = NULL;
+#ifdef FEATURE_COMINTEROP
+ param.pRCW = NULL;
+#endif
+
+ EE_TRY_FOR_FINALLY(Param *, pParam, &param)
+ {
+ // reset the flag
+ FinalizerThread::GetFinalizerThread()->ResetSyncBlockCleanup();
+
+ // walk the cleanup list and cleanup 'em up
+ while ((pParam->psb = pParam->pThis->GetNextCleanupSyncBlock()) != NULL)
+ {
+#ifdef FEATURE_COMINTEROP
+ InteropSyncBlockInfo* pInteropInfo = pParam->psb->GetInteropInfoNoCreate();
+ if (pInteropInfo)
+ {
+ pParam->pRCW = pInteropInfo->GetRawRCW();
+ if (pParam->pRCW)
+ {
+ // We should have initialized the cleanup list with the
+ // first RCW cache we created
+ _ASSERTE(g_pRCWCleanupList != NULL);
+
+ g_pRCWCleanupList->AddWrapper(pParam->pRCW);
+
+ pParam->pRCW = NULL;
+ pInteropInfo->SetRawRCW(NULL);
+ }
+ }
+#endif // FEATURE_COMINTEROP
+
+ // Delete the sync block.
+ pParam->pThis->DeleteSyncBlock(pParam->psb);
+ pParam->psb = NULL;
+
+ // pulse GC mode to allow GC to perform its work
+ if (FinalizerThread::GetFinalizerThread()->CatchAtSafePointOpportunistic())
+ {
+ FinalizerThread::GetFinalizerThread()->PulseGCMode();
+ }
+ }
+
+#ifdef FEATURE_COMINTEROP
+ // Now clean up the rcw's sorted by context
+ if (g_pRCWCleanupList != NULL)
+ g_pRCWCleanupList->CleanupAllWrappers();
+#endif // FEATURE_COMINTEROP
+ }
+ EE_FINALLY
+ {
+ // We are finished cleaning up the sync blocks.
+ m_bSyncBlockCleanupInProgress = FALSE;
+
+#ifdef FEATURE_COMINTEROP
+ if (param.pRCW)
+ param.pRCW->Cleanup();
+#endif
+
+ if (param.psb)
+ DeleteSyncBlock(param.psb);
+ } EE_END_FINALLY;
+}
+
+// When a appdomain is unloading, we need to insure that any pointers to
+// it from sync blocks (eg from COM Callable Wrappers) are properly
+// updated so that they fail gracefully if another call is made from
+// them. This is what this routine does.
+//
+VOID SyncBlockCache::CleanupSyncBlocksInAppDomain(AppDomain *pDomain)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+ _ASSERTE(IsFinalizerThread());
+
+ ADIndex index = pDomain->GetIndex();
+
+ ADID id = pDomain->GetId();
+
+ // Make sure we dont race with anybody updating the table
+ DWORD maxIndex;
+
+ {
+ // Taking this lock here avoids races whre m_FreeSyncTableIndex is being updated.
+ // (a volatile read would have been enough however).
+ SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
+ maxIndex = m_FreeSyncTableIndex;
+ }
+ BOOL bModifiedCleanupList=FALSE;
+ STRESS_LOG1(LF_APPDOMAIN, LL_INFO100, "To cleanup - %d sync blocks", maxIndex);
+ DWORD nb;
+ for (nb = 1; nb < maxIndex; nb++)
+ {
+ // This is a check for syncblocks that were already cleaned up.
+ if ((size_t)SyncTableEntry::GetSyncTableEntry()[nb].m_Object.Load() & 1)
+ {
+ continue;
+ }
+
+ // If the syncblock pointer is invalid, nothing more we can do.
+ SyncBlock *pSyncBlock = SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock;
+ if (!pSyncBlock)
+ {
+ continue;
+ }
+
+ // If we happen to have a CCW living in the AppDomain being cleaned, then we need to neuter it.
+ // We do this check early because we have to neuter CCWs for agile objects as well.
+ // Neutering the object simply means we disconnect the object from the CCW so it can no longer
+ // be used. When its ref-count falls to zero, it gets cleaned up.
+ STRESS_LOG1(LF_APPDOMAIN, LL_INFO1000000, "SyncBlock %p.", pSyncBlock);
+ InteropSyncBlockInfo* pInteropInfo = pSyncBlock->GetInteropInfoNoCreate();
+ if (pInteropInfo)
+ {
+#ifdef FEATURE_COMINTEROP
+ ComCallWrapper* pWrap = pInteropInfo->GetCCW();
+ if (pWrap)
+ {
+ SimpleComCallWrapper* pSimpleWrapper = pWrap->GetSimpleWrapper();
+ _ASSERTE(pSimpleWrapper);
+
+ if (pSimpleWrapper->GetDomainID() == id)
+ {
+ pSimpleWrapper->Neuter();
+ }
+ }
+#endif // FEATURE_COMINTEROP
+
+ UMEntryThunk* umThunk=(UMEntryThunk*)pInteropInfo->GetUMEntryThunk();
+
+ if (umThunk && umThunk->GetDomainId()==id)
+ {
+ umThunk->OnADUnload();
+ STRESS_LOG1(LF_APPDOMAIN, LL_INFO100, "Thunk %x unloaded", umThunk);
+ }
+
+#ifdef FEATURE_COMINTEROP
+ {
+ // we need to take RCWCache lock to avoid the race with another thread which is
+ // removing the RCW from cache, decoupling it from the object, and deleting the RCW.
+ RCWCache* pCache = pDomain->GetRCWCache();
+ _ASSERTE(pCache);
+ RCWCache::LockHolder lh(pCache);
+ RCW* pRCW = pInteropInfo->GetRawRCW();
+ if (pRCW && pRCW->GetDomain()==pDomain)
+ {
+ // We should have initialized the cleanup list with the
+ // first RCW cache we created
+ _ASSERTE(g_pRCWCleanupList != NULL);
+
+ g_pRCWCleanupList->AddWrapper(pRCW);
+
+ pCache->RemoveWrapper(pRCW);
+ pInteropInfo->SetRawRCW(NULL);
+ bModifiedCleanupList=TRUE;
+ }
+ }
+#endif // FEATURE_COMINTEROP
+ }
+
+ // NOTE: this will only notify the sync block if it is non-agile and living in the unloading domain.
+ // Agile objects that are still alive will not get notification!
+ if (pSyncBlock->GetAppDomainIndex() == index)
+ {
+ pSyncBlock->OnADUnload();
+ }
+ }
+ STRESS_LOG1(LF_APPDOMAIN, LL_INFO100, "AD cleanup - %d sync blocks done", nb);
+ // Make sure nobody decreased m_FreeSyncTableIndex behind our back (we would read
+ // off table limits)
+ _ASSERTE(maxIndex <= m_FreeSyncTableIndex);
+
+ if (bModifiedCleanupList)
+ GetThread()->SetSyncBlockCleanup();
+
+ while (GetThread()->RequireSyncBlockCleanup()) //we also might have something in the cleanup list
+ CleanupSyncBlocks();
+
+#ifdef _DEBUG
+ {
+ SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
+ DWORD maxIndex = m_FreeSyncTableIndex;
+ for (DWORD nb = 1; nb < maxIndex; nb++)
+ {
+ if ((size_t)SyncTableEntry::GetSyncTableEntry()[nb].m_Object.Load() & 1)
+ {
+ continue;
+ }
+
+ // If the syncblock pointer is invalid, nothing more we can do.
+ SyncBlock *pSyncBlock = SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock;
+ if (!pSyncBlock)
+ {
+ continue;
+ }
+ InteropSyncBlockInfo* pInteropInfo = pSyncBlock->GetInteropInfoNoCreate();
+ if (pInteropInfo)
+ {
+ UMEntryThunk* umThunk=(UMEntryThunk*)pInteropInfo->GetUMEntryThunk();
+
+ if (umThunk && umThunk->GetDomainId()==id)
+ {
+ _ASSERTE(!umThunk->GetObjectHandle());
+ }
+ }
+
+ }
+ }
+#endif
+
+#endif
+}
+
+
+// create the sync block cache
+/* static */
+void SyncBlockCache::Attach()
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+// destroy the sync block cache
+// This method is NO longer called.
+#if 0
+void SyncBlockCache::DoDetach()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Object *pObj;
+ ObjHeader *pHeader;
+
+
+ // Ensure that all the critical sections are released. This is particularly
+ // important in DEBUG, because all critical sections are threaded onto a global
+ // list which would otherwise be corrupted.
+ for (DWORD i=0; i<m_FreeSyncTableIndex; i++)
+ if (((size_t)SyncTableEntry::GetSyncTableEntry()[i].m_Object & 1) == 0)
+ if (SyncTableEntry::GetSyncTableEntry()[i].m_SyncBlock)
+ {
+ // <TODO>@TODO -- If threads are executing during this detach, they will
+ // fail in various ways:
+ //
+ // 1) They will race between us tearing these data structures down
+ // as they navigate through them.
+ //
+ // 2) They will unexpectedly see the syncblock destroyed, even though
+ // they hold the synchronization lock, or have been exposed out
+ // to COM, etc.
+ //
+ // 3) The instance's hash code may change during the shutdown.
+ //
+ // The correct solution involves suspending the threads earlier, but
+ // changing our suspension code so that it allows pumping if we are
+ // in a shutdown case.
+ //
+ // </TODO>
+
+ // Make sure this gets updated because the finalizer thread & others
+ // will continue to run for a short while more during our shutdown.
+ pObj = SyncTableEntry::GetSyncTableEntry()[i].m_Object;
+ pHeader = pObj->GetHeader();
+
+ {
+ ENTER_SPIN_LOCK(pHeader);
+ ADIndex appDomainIndex = pHeader->GetAppDomainIndex();
+ if (! appDomainIndex.m_dwIndex)
+ {
+ SyncBlock* syncBlock = pObj->PassiveGetSyncBlock();
+ if (syncBlock)
+ appDomainIndex = syncBlock->GetAppDomainIndex();
+ }
+
+ pHeader->ResetIndex();
+
+ if (appDomainIndex.m_dwIndex)
+ {
+ pHeader->SetIndex(appDomainIndex.m_dwIndex<<SBLK_APPDOMAIN_SHIFT);
+ }
+ LEAVE_SPIN_LOCK(pHeader);
+ }
+
+ SyncTableEntry::GetSyncTableEntry()[i].m_Object = (Object *)(m_FreeSyncTableList | 1);
+ m_FreeSyncTableList = i << 1;
+
+ DeleteSyncBlock(SyncTableEntry::GetSyncTableEntry()[i].m_SyncBlock);
+ }
+}
+#endif
+
+// destroy the sync block cache
+/* static */
+// This method is NO longer called.
+#if 0
+void SyncBlockCache::Detach()
+{
+ SyncBlockCache::GetSyncBlockCache()->DoDetach();
+}
+#endif
+
+
+// create the sync block cache
+/* static */
+void SyncBlockCache::Start()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ DWORD* bm = new DWORD [BitMapSize(SYNC_TABLE_INITIAL_SIZE+1)];
+
+ memset (bm, 0, BitMapSize (SYNC_TABLE_INITIAL_SIZE+1)*sizeof(DWORD));
+
+ SyncTableEntry::GetSyncTableEntryByRef() = new SyncTableEntry[SYNC_TABLE_INITIAL_SIZE+1];
+#ifdef _DEBUG
+ for (int i=0; i<SYNC_TABLE_INITIAL_SIZE+1; i++) {
+ SyncTableEntry::GetSyncTableEntry()[i].m_SyncBlock = NULL;
+ }
+#endif
+
+ SyncTableEntry::GetSyncTableEntry()[0].m_SyncBlock = 0;
+ SyncBlockCache::GetSyncBlockCache() = new (&g_SyncBlockCacheInstance) SyncBlockCache;
+
+ SyncBlockCache::GetSyncBlockCache()->m_EphemeralBitmap = bm;
+
+#ifndef FEATURE_PAL
+ InitializeSListHead(&InteropSyncBlockInfo::s_InteropInfoStandbyList);
+#endif // !FEATURE_PAL
+}
+
+
+// destroy the sync block cache
+/* static */
+void SyncBlockCache::Stop()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // cache must be destroyed first, since it can traverse the table to find all the
+ // sync blocks which are live and thus must have their critical sections destroyed.
+ if (SyncBlockCache::GetSyncBlockCache())
+ {
+ delete SyncBlockCache::GetSyncBlockCache();
+ SyncBlockCache::GetSyncBlockCache() = 0;
+ }
+
+ if (SyncTableEntry::GetSyncTableEntry())
+ {
+ delete SyncTableEntry::GetSyncTableEntry();
+ SyncTableEntry::GetSyncTableEntryByRef() = 0;
+ }
+}
+
+
+void SyncBlockCache::InsertCleanupSyncBlock(SyncBlock* psb)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // free up the threads that are waiting before we use the link
+ // for other purposes
+ if (psb->m_Link.m_pNext != NULL)
+ {
+ while (ThreadQueue::DequeueThread(psb) != NULL)
+ continue;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (psb->m_pInteropInfo)
+ {
+ // called during GC
+ // so do only minorcleanup
+ MinorCleanupSyncBlockComData(psb->m_pInteropInfo);
+ }
+#endif // FEATURE_COMINTEROP
+
+ // This method will be called only by the GC thread
+ //<TODO>@todo add an assert for the above statement</TODO>
+ // we don't need to lock here
+ //EnterCacheLock();
+
+ psb->m_Link.m_pNext = m_pCleanupBlockList;
+ m_pCleanupBlockList = &psb->m_Link;
+
+ // we don't need a lock here
+ //LeaveCacheLock();
+}
+
+SyncBlock* SyncBlockCache::GetNextCleanupSyncBlock()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // we don't need a lock here,
+ // as this is called only on the finalizer thread currently
+
+ SyncBlock *psb = NULL;
+ if (m_pCleanupBlockList)
+ {
+ // get the actual sync block pointer
+ psb = (SyncBlock *) (((BYTE *) m_pCleanupBlockList) - offsetof(SyncBlock, m_Link));
+ m_pCleanupBlockList = m_pCleanupBlockList->m_pNext;
+ }
+ return psb;
+}
+
+
+// returns and removes the next free syncblock from the list
+// the cache lock must be entered to call this
+SyncBlock *SyncBlockCache::GetNextFreeSyncBlock()
+{
+ CONTRACTL
+ {
+ INJECT_FAULT(COMPlusThrowOM());
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG // Instrumentation for OOM fault injection testing
+ delete new char;
+#endif
+
+ SyncBlock *psb;
+ SLink *plst = m_FreeBlockList;
+
+ COUNTER_ONLY(GetPerfCounters().m_GC.cSinkBlocks ++);
+ m_ActiveCount++;
+
+ if (plst)
+ {
+ m_FreeBlockList = m_FreeBlockList->m_pNext;
+
+ // shouldn't be 0
+ m_FreeCount--;
+
+ // get the actual sync block pointer
+ psb = (SyncBlock *) (((BYTE *) plst) - offsetof(SyncBlock, m_Link));
+
+ return psb;
+ }
+ else
+ {
+ if ((m_SyncBlocks == NULL) || (m_FreeSyncBlock >= MAXSYNCBLOCK))
+ {
+#ifdef DUMP_SB
+// LogSpewAlways("Allocating new syncblock array\n");
+// DumpSyncBlockCache();
+#endif
+ SyncBlockArray* newsyncblocks = new(SyncBlockArray);
+ if (!newsyncblocks)
+ COMPlusThrowOM ();
+
+ newsyncblocks->m_Next = m_SyncBlocks;
+ m_SyncBlocks = newsyncblocks;
+ m_FreeSyncBlock = 0;
+ }
+ return &(((SyncBlock*)m_SyncBlocks->m_Blocks)[m_FreeSyncBlock++]);
+ }
+
+}
+
+void SyncBlockCache::Grow()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ STRESS_LOG0(LF_SYNC, LL_INFO10000, "SyncBlockCache::NewSyncBlockSlot growing SyncBlockCache \n");
+
+ NewArrayHolder<SyncTableEntry> newSyncTable (NULL);
+ NewArrayHolder<DWORD> newBitMap (NULL);
+ DWORD * oldBitMap;
+
+ // Compute the size of the new synctable. Normally, we double it - unless
+ // doing so would create slots with indices too high to fit within the
+ // mask. If so, we create a synctable up to the mask limit. If we're
+ // already at the mask limit, then caller is out of luck.
+ DWORD newSyncTableSize;
+ if (m_SyncTableSize <= (MASK_SYNCBLOCKINDEX >> 1))
+ {
+ newSyncTableSize = m_SyncTableSize * 2;
+ }
+ else
+ {
+ newSyncTableSize = MASK_SYNCBLOCKINDEX;
+ }
+
+ if (!(newSyncTableSize > m_SyncTableSize)) // Make sure we actually found room to grow!
+ {
+ COMPlusThrowOM();
+ }
+
+ newSyncTable = new(SyncTableEntry[newSyncTableSize]);
+ newBitMap = new(DWORD[BitMapSize (newSyncTableSize)]);
+
+
+ {
+ //! From here on, we assume that we will succeed and start doing global side-effects.
+ //! Any operation that could fail must occur before this point.
+ CANNOTTHROWCOMPLUSEXCEPTION();
+ FAULT_FORBID();
+
+ newSyncTable.SuppressRelease();
+ newBitMap.SuppressRelease();
+
+
+ // We chain old table because we can't delete
+ // them before all the threads are stoppped
+ // (next GC)
+ SyncTableEntry::GetSyncTableEntry() [0].m_Object = (Object *)m_OldSyncTables;
+ m_OldSyncTables = SyncTableEntry::GetSyncTableEntry();
+
+ memset (newSyncTable, 0, newSyncTableSize*sizeof (SyncTableEntry));
+ memset (newBitMap, 0, BitMapSize (newSyncTableSize)*sizeof (DWORD));
+ CopyMemory (newSyncTable, SyncTableEntry::GetSyncTableEntry(),
+ m_SyncTableSize*sizeof (SyncTableEntry));
+
+ CopyMemory (newBitMap, m_EphemeralBitmap,
+ BitMapSize (m_SyncTableSize)*sizeof (DWORD));
+
+ oldBitMap = m_EphemeralBitmap;
+ m_EphemeralBitmap = newBitMap;
+ delete[] oldBitMap;
+
+ _ASSERTE((m_SyncTableSize & MASK_SYNCBLOCKINDEX) == m_SyncTableSize);
+ // note: we do not care if another thread does not see the new size
+ // however we really do not want it to see the new size without seeing the new array
+ //@TODO do we still leak here if two threads come here at the same time ?
+ FastInterlockExchangePointer(&SyncTableEntry::GetSyncTableEntryByRef(), newSyncTable.GetValue());
+
+ m_FreeSyncTableIndex++;
+
+ m_SyncTableSize = newSyncTableSize;
+
+#ifdef _DEBUG
+ static int dumpSBOnResize = -1;
+
+ if (dumpSBOnResize == -1)
+ dumpSBOnResize = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SBDumpOnResize);
+
+ if (dumpSBOnResize)
+ {
+ LogSpewAlways("SyncBlockCache resized\n");
+ DumpSyncBlockCache();
+ }
+#endif
+ }
+}
+
+DWORD SyncBlockCache::NewSyncBlockSlot(Object *obj)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+ _ASSERTE(m_CacheLock.OwnedByCurrentThread()); // GetSyncBlock takes the lock, make sure no one else does.
+
+ DWORD indexNewEntry;
+ if (m_FreeSyncTableList)
+ {
+ indexNewEntry = (DWORD)(m_FreeSyncTableList >> 1);
+ _ASSERTE ((size_t)SyncTableEntry::GetSyncTableEntry()[indexNewEntry].m_Object.Load() & 1);
+ m_FreeSyncTableList = (size_t)SyncTableEntry::GetSyncTableEntry()[indexNewEntry].m_Object.Load() & ~1;
+ }
+ else if ((indexNewEntry = (DWORD)(m_FreeSyncTableIndex)) >= m_SyncTableSize)
+ {
+ // This is kept out of line to keep stuff like the C++ EH prolog (needed for holders) off
+ // of the common path.
+ Grow();
+ }
+ else
+ {
+#ifdef _DEBUG
+ static int dumpSBOnNewIndex = -1;
+
+ if (dumpSBOnNewIndex == -1)
+ dumpSBOnNewIndex = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SBDumpOnNewIndex);
+
+ if (dumpSBOnNewIndex)
+ {
+ LogSpewAlways("SyncBlockCache index incremented\n");
+ DumpSyncBlockCache();
+ }
+#endif
+ m_FreeSyncTableIndex ++;
+ }
+
+
+ CardTableSetBit (indexNewEntry);
+
+ // In debug builds the m_SyncBlock at indexNewEntry should already be null, since we should
+ // start out with a null table and always null it out on delete.
+ _ASSERTE(SyncTableEntry::GetSyncTableEntry() [indexNewEntry].m_SyncBlock == NULL);
+ SyncTableEntry::GetSyncTableEntry() [indexNewEntry].m_SyncBlock = NULL;
+ SyncTableEntry::GetSyncTableEntry() [indexNewEntry].m_Object = obj;
+
+ _ASSERTE(indexNewEntry != 0);
+
+ return indexNewEntry;
+}
+
+
+// free a used sync block, only called from CleanupSyncBlocks.
+void SyncBlockCache::DeleteSyncBlock(SyncBlock *psb)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ // clean up comdata
+ if (psb->m_pInteropInfo)
+ {
+#ifdef FEATURE_COMINTEROP
+ CleanupSyncBlockComData(psb->m_pInteropInfo);
+#endif // FEATURE_COMINTEROP
+
+#ifndef FEATURE_PAL
+ if (g_fEEShutDown)
+ {
+ delete psb->m_pInteropInfo;
+ }
+ else
+ {
+ psb->m_pInteropInfo->~InteropSyncBlockInfo();
+ InterlockedPushEntrySList(&InteropSyncBlockInfo::s_InteropInfoStandbyList, (PSLIST_ENTRY)psb->m_pInteropInfo);
+ }
+#else // !FEATURE_PAL
+ delete psb->m_pInteropInfo;
+#endif // !FEATURE_PAL
+ }
+
+#ifdef EnC_SUPPORTED
+ // clean up EnC info
+ if (psb->m_pEnCInfo)
+ psb->m_pEnCInfo->Cleanup();
+#endif // EnC_SUPPORTED
+
+ // Destruct the SyncBlock, but don't reclaim its memory. (Overridden
+ // operator delete).
+ delete psb;
+
+ //synchronizer with the consumers,
+ // <TODO>@todo we don't really need a lock here, we can come up
+ // with some simple algo to avoid taking a lock </TODO>
+ {
+ SyncBlockCache::LockHolder lh(this);
+
+ DeleteSyncBlockMemory(psb);
+ }
+}
+
+
+// returns the sync block memory to the free pool but does not destruct sync block (must own cache lock already)
+void SyncBlockCache::DeleteSyncBlockMemory(SyncBlock *psb)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ COUNTER_ONLY(GetPerfCounters().m_GC.cSinkBlocks --);
+
+ m_ActiveCount--;
+ m_FreeCount++;
+
+ psb->m_Link.m_pNext = m_FreeBlockList;
+ m_FreeBlockList = &psb->m_Link;
+
+}
+
+// free a used sync block
+void SyncBlockCache::GCDeleteSyncBlock(SyncBlock *psb)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Destruct the SyncBlock, but don't reclaim its memory. (Overridden
+ // operator delete).
+ delete psb;
+
+ COUNTER_ONLY(GetPerfCounters().m_GC.cSinkBlocks --);
+
+
+ m_ActiveCount--;
+ m_FreeCount++;
+
+ psb->m_Link.m_pNext = m_FreeBlockList;
+ m_FreeBlockList = &psb->m_Link;
+}
+
+void SyncBlockCache::GCWeakPtrScan(HANDLESCANPROC scanProc, LPARAM lp1, LPARAM lp2)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+
+ // First delete the obsolete arrays since we have exclusive access
+ BOOL fSetSyncBlockCleanup = FALSE;
+
+ SyncTableEntry* arr;
+ while ((arr = m_OldSyncTables) != NULL)
+ {
+ m_OldSyncTables = (SyncTableEntry*)arr[0].m_Object.Load();
+ delete arr;
+ }
+
+#ifdef DUMP_SB
+ LogSpewAlways("GCWeakPtrScan starting\n");
+#endif
+
+#ifdef VERIFY_HEAP
+ if (g_pConfig->GetHeapVerifyLevel()& EEConfig::HEAPVERIFY_SYNCBLK)
+ STRESS_LOG0 (LF_GC | LF_SYNC, LL_INFO100, "GCWeakPtrScan starting\n");
+#endif
+
+ if (GCHeap::GetGCHeap()->GetCondemnedGeneration() < GCHeap::GetGCHeap()->GetMaxGeneration())
+ {
+#ifdef VERIFY_HEAP
+ //for VSW 294550: we saw stale obeject reference in SyncBlkCache, so we want to make sure the card
+ //table logic above works correctly so that every ephemeral entry is promoted.
+ //For verification, we make a copy of the sync table in relocation phase and promote it use the
+ //slow approach and compare the result with the original one
+ DWORD freeSyncTalbeIndexCopy = m_FreeSyncTableIndex;
+ SyncTableEntry * syncTableShadow = NULL;
+ if ((g_pConfig->GetHeapVerifyLevel()& EEConfig::HEAPVERIFY_SYNCBLK) && !((ScanContext*)lp1)->promotion)
+ {
+ syncTableShadow = new(nothrow) SyncTableEntry [m_FreeSyncTableIndex];
+ if (syncTableShadow)
+ {
+ memcpy (syncTableShadow, SyncTableEntry::GetSyncTableEntry(), m_FreeSyncTableIndex * sizeof (SyncTableEntry));
+ }
+ }
+#endif //VERIFY_HEAP
+
+ //scan the bitmap
+ size_t dw = 0;
+ while (1)
+ {
+ while (dw < BitMapSize (m_SyncTableSize) && (m_EphemeralBitmap[dw]==0))
+ {
+ dw++;
+ }
+ if (dw < BitMapSize (m_SyncTableSize))
+ {
+ //found one
+ for (int i = 0; i < card_word_width; i++)
+ {
+ size_t card = i+dw*card_word_width;
+ if (CardSetP (card))
+ {
+ BOOL clear_card = TRUE;
+ for (int idx = 0; idx < card_size; idx++)
+ {
+ size_t nb = CardIndex (card) + idx;
+ if (( nb < m_FreeSyncTableIndex) && (nb > 0))
+ {
+ Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
+ if (o && !((size_t)o & 1))
+ {
+ if (GCHeap::GetGCHeap()->IsEphemeral (o))
+ {
+ clear_card = FALSE;
+
+ GCWeakPtrScanElement ((int)nb, scanProc,
+ lp1, lp2, fSetSyncBlockCleanup);
+ }
+ }
+ }
+ }
+ if (clear_card)
+ ClearCard (card);
+ }
+ }
+ dw++;
+ }
+ else
+ break;
+ }
+
+#ifdef VERIFY_HEAP
+ //for VSW 294550: we saw stale obeject reference in SyncBlkCache, so we want to make sure the card
+ //table logic above works correctly so that every ephemeral entry is promoted. To verify, we make a
+ //copy of the sync table and promote it use the slow approach and compare the result with the real one
+ if (g_pConfig->GetHeapVerifyLevel()& EEConfig::HEAPVERIFY_SYNCBLK)
+ {
+ if (syncTableShadow)
+ {
+ for (DWORD nb = 1; nb < m_FreeSyncTableIndex; nb++)
+ {
+ Object **keyv = (Object **) &syncTableShadow[nb].m_Object;
+
+ if (((size_t) *keyv & 1) == 0)
+ {
+ (*scanProc) (keyv, NULL, lp1, lp2);
+ SyncBlock *pSB = syncTableShadow[nb].m_SyncBlock;
+ if (*keyv != 0 && (!pSB || !pSB->IsIDisposable()))
+ {
+ if (syncTableShadow[nb].m_Object != SyncTableEntry::GetSyncTableEntry()[nb].m_Object)
+ DebugBreak ();
+ }
+ }
+ }
+ delete []syncTableShadow;
+ syncTableShadow = NULL;
+ }
+ if (freeSyncTalbeIndexCopy != m_FreeSyncTableIndex)
+ DebugBreak ();
+ }
+#endif //VERIFY_HEAP
+
+ }
+ else
+ {
+ for (DWORD nb = 1; nb < m_FreeSyncTableIndex; nb++)
+ {
+ GCWeakPtrScanElement (nb, scanProc, lp1, lp2, fSetSyncBlockCleanup);
+ }
+
+
+ }
+
+ if (fSetSyncBlockCleanup)
+ {
+ // mark the finalizer thread saying requires cleanup
+ FinalizerThread::GetFinalizerThread()->SetSyncBlockCleanup();
+ FinalizerThread::EnableFinalization();
+ }
+
+#if defined(VERIFY_HEAP)
+ if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)
+ {
+ if (((ScanContext*)lp1)->promotion)
+ {
+
+ for (int nb = 1; nb < (int)m_FreeSyncTableIndex; nb++)
+ {
+ Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
+ if (((size_t)o & 1) == 0)
+ {
+ o->Validate();
+ }
+ }
+ }
+ }
+#endif // VERIFY_HEAP
+}
+
+/* Scan the weak pointers in the SyncBlockEntry and report them to the GC. If the
+ reference is dead, then return TRUE */
+
+BOOL SyncBlockCache::GCWeakPtrScanElement (int nb, HANDLESCANPROC scanProc, LPARAM lp1, LPARAM lp2,
+ BOOL& cleanup)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Object **keyv = (Object **) &SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
+
+#ifdef DUMP_SB
+ struct Param
+ {
+ Object **keyv;
+ char *name;
+ } param;
+ param.keyv = keyv;
+
+ PAL_TRY(Param *, pParam, &param) {
+ if (! *pParam->keyv)
+ pParam->name = "null";
+ else if ((size_t) *pParam->keyv & 1)
+ pParam->name = "free";
+ else {
+ pParam->name = (*pParam->keyv)->GetClass()->GetDebugClassName();
+ if (strlen(pParam->name) == 0)
+ pParam->name = "<INVALID>";
+ }
+ } PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
+ param.name = "<INVALID>";
+ }
+ PAL_ENDTRY
+ LogSpewAlways("[%4.4d]: %8.8x, %s\n", nb, *keyv, param.name);
+#endif
+
+ if (((size_t) *keyv & 1) == 0)
+ {
+#ifdef VERIFY_HEAP
+ if (g_pConfig->GetHeapVerifyLevel () & EEConfig::HEAPVERIFY_SYNCBLK)
+ {
+ STRESS_LOG3 (LF_GC | LF_SYNC, LL_INFO100000, "scanning syncblk[%d, %p, %p]\n", nb, (size_t)SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock, (size_t)*keyv);
+ }
+#endif
+
+ (*scanProc) (keyv, NULL, lp1, lp2);
+ SyncBlock *pSB = SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock;
+ if ((*keyv == 0 ) || (pSB && pSB->IsIDisposable()))
+ {
+#ifdef VERIFY_HEAP
+ if (g_pConfig->GetHeapVerifyLevel () & EEConfig::HEAPVERIFY_SYNCBLK)
+ {
+ STRESS_LOG3 (LF_GC | LF_SYNC, LL_INFO100000, "freeing syncblk[%d, %p, %p]\n", nb, (size_t)pSB, (size_t)*keyv);
+ }
+#endif
+
+ if (*keyv)
+ {
+ _ASSERTE (pSB);
+ GCDeleteSyncBlock(pSB);
+ //clean the object syncblock header
+ ((Object*)(*keyv))->GetHeader()->GCResetIndex();
+ }
+ else if (pSB)
+ {
+
+ cleanup = TRUE;
+ // insert block into cleanup list
+ InsertCleanupSyncBlock (SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock);
+#ifdef DUMP_SB
+ LogSpewAlways(" Cleaning up block at %4.4d\n", nb);
+#endif
+ }
+
+ // delete the entry
+#ifdef DUMP_SB
+ LogSpewAlways(" Deleting block at %4.4d\n", nb);
+#endif
+ SyncTableEntry::GetSyncTableEntry()[nb].m_Object = (Object *)(m_FreeSyncTableList | 1);
+ m_FreeSyncTableList = nb << 1;
+ SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock = NULL;
+ return TRUE;
+ }
+ else
+ {
+#ifdef DUMP_SB
+ LogSpewAlways(" Keeping block at %4.4d with oref %8.8x\n", nb, *keyv);
+#endif
+ }
+ }
+ return FALSE;
+}
+
+void SyncBlockCache::GCDone(BOOL demoting, int max_gen)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (demoting &&
+ (GCHeap::GetGCHeap()->GetCondemnedGeneration() ==
+ GCHeap::GetGCHeap()->GetMaxGeneration()))
+ {
+ //scan the bitmap
+ size_t dw = 0;
+ while (1)
+ {
+ while (dw < BitMapSize (m_SyncTableSize) &&
+ (m_EphemeralBitmap[dw]==(DWORD)~0))
+ {
+ dw++;
+ }
+ if (dw < BitMapSize (m_SyncTableSize))
+ {
+ //found one
+ for (int i = 0; i < card_word_width; i++)
+ {
+ size_t card = i+dw*card_word_width;
+ if (!CardSetP (card))
+ {
+ for (int idx = 0; idx < card_size; idx++)
+ {
+ size_t nb = CardIndex (card) + idx;
+ if (( nb < m_FreeSyncTableIndex) && (nb > 0))
+ {
+ Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
+ if (o && !((size_t)o & 1))
+ {
+ if (GCHeap::GetGCHeap()->WhichGeneration (o) < (unsigned int)max_gen)
+ {
+ SetCard (card);
+ break;
+
+ }
+ }
+ }
+ }
+ }
+ }
+ dw++;
+ }
+ else
+ break;
+ }
+ }
+}
+
+
+#if defined (VERIFY_HEAP)
+
+#ifndef _DEBUG
+#ifdef _ASSERTE
+#undef _ASSERTE
+#endif
+#define _ASSERTE(c) if (!(c)) DebugBreak()
+#endif
+
+void SyncBlockCache::VerifySyncTableEntry()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ for (DWORD nb = 1; nb < m_FreeSyncTableIndex; nb++)
+ {
+ Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
+ // if the slot was just allocated, the object may still be null
+ if (o && (((size_t)o & 1) == 0))
+ {
+ //there is no need to verify next object's header because this is called
+ //from verify_heap, which will verify every object anyway
+ o->Validate(TRUE, FALSE);
+
+ //
+ // This loop is just a heuristic to try to catch errors, but it is not 100%.
+ // To prevent false positives, we weaken our assert below to exclude the case
+ // where the index is still NULL, but we've reached the end of our loop.
+ //
+ static const DWORD max_iterations = 100;
+ DWORD loop = 0;
+
+ for (; loop < max_iterations; loop++)
+ {
+ // The syncblock index may be updating by another thread.
+ if (o->GetHeader()->GetHeaderSyncBlockIndex() != 0)
+ {
+ break;
+ }
+ __SwitchToThread(0, CALLER_LIMITS_SPINNING);
+ }
+
+ DWORD idx = o->GetHeader()->GetHeaderSyncBlockIndex();
+ _ASSERTE(idx == nb || ((0 == idx) && (loop == max_iterations)));
+ _ASSERTE(!GCHeap::GetGCHeap()->IsEphemeral(o) || CardSetP(CardOf(nb)));
+ }
+ }
+}
+
+#ifndef _DEBUG
+#undef _ASSERTE
+#define _ASSERTE(expr) ((void)0)
+#endif // _DEBUG
+
+#endif // VERIFY_HEAP
+
+#if CHECK_APP_DOMAIN_LEAKS
+void SyncBlockCache::CheckForUnloadedInstances(ADIndex unloadingIndex)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Can only do in leak mode because agile objects will be in the domain with
+ // their index set to their creating domain and check will fail.
+ if (! g_pConfig->AppDomainLeaks())
+ return;
+
+ for (DWORD nb = 1; nb < m_FreeSyncTableIndex; nb++)
+ {
+ SyncTableEntry *pEntry = &SyncTableEntry::GetSyncTableEntry()[nb];
+ Object *oref = (Object *) pEntry->m_Object;
+ if (((size_t) oref & 1) != 0)
+ continue;
+
+ ADIndex idx;
+ if (oref)
+ idx = pEntry->m_Object->GetHeader()->GetRawAppDomainIndex();
+ if (! idx.m_dwIndex && pEntry->m_SyncBlock)
+ idx = pEntry->m_SyncBlock->GetAppDomainIndex();
+ // if the following assert fires, someobody is holding a reference to an object in an unloaded appdomain
+ if (idx == unloadingIndex)
+ {
+ // object must be agile to have survived the unload. If can't make it agile, that's a bug
+ if (!oref->TrySetAppDomainAgile(TRUE))
+ _ASSERTE(!"Detected instance of unloaded appdomain that survived GC\n");
+ }
+ }
+}
+#endif
+
+#ifdef _DEBUG
+
+void DumpSyncBlockCache()
+{
+ STATIC_CONTRACT_NOTHROW;
+
+ SyncBlockCache *pCache = SyncBlockCache::GetSyncBlockCache();
+
+ LogSpewAlways("Dumping SyncBlockCache size %d\n", pCache->m_FreeSyncTableIndex);
+
+ static int dumpSBStyle = -1;
+ if (dumpSBStyle == -1)
+ dumpSBStyle = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SBDumpStyle);
+ if (dumpSBStyle == 0)
+ return;
+
+ BOOL isString = FALSE;
+ DWORD objectCount = 0;
+ DWORD slotCount = 0;
+
+ for (DWORD nb = 1; nb < pCache->m_FreeSyncTableIndex; nb++)
+ {
+ isString = FALSE;
+ char buffer[1024], buffer2[1024];
+ LPCUTF8 descrip = "null";
+ SyncTableEntry *pEntry = &SyncTableEntry::GetSyncTableEntry()[nb];
+ Object *oref = (Object *) pEntry->m_Object;
+ if (((size_t) oref & 1) != 0)
+ {
+ descrip = "free";
+ oref = 0;
+ }
+ else
+ {
+ ++slotCount;
+ if (oref)
+ {
+ ++objectCount;
+
+ struct Param
+ {
+ LPCUTF8 descrip;
+ Object *oref;
+ char *buffer2;
+ UINT cch2;
+ BOOL isString;
+ } param;
+ param.descrip = descrip;
+ param.oref = oref;
+ param.buffer2 = buffer2;
+ param.cch2 = COUNTOF(buffer2);
+ param.isString = isString;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ pParam->descrip = pParam->oref->GetMethodTable()->GetDebugClassName();
+ if (strlen(pParam->descrip) == 0)
+ pParam->descrip = "<INVALID>";
+ else if (pParam->oref->GetMethodTable() == g_pStringClass)
+ {
+ sprintf_s(pParam->buffer2, pParam->cch2, "%s (%S)", pParam->descrip, ObjectToSTRINGREF((StringObject*)pParam->oref)->GetBuffer());
+ pParam->descrip = pParam->buffer2;
+ pParam->isString = TRUE;
+ }
+ }
+ PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
+ param.descrip = "<INVALID>";
+ }
+ PAL_ENDTRY
+
+ descrip = param.descrip;
+ isString = param.isString;
+ }
+ ADIndex idx;
+ if (oref)
+ idx = pEntry->m_Object->GetHeader()->GetRawAppDomainIndex();
+ if (! idx.m_dwIndex && pEntry->m_SyncBlock)
+ idx = pEntry->m_SyncBlock->GetAppDomainIndex();
+ if (idx.m_dwIndex && ! SystemDomain::System()->TestGetAppDomainAtIndex(idx))
+ {
+ sprintf_s(buffer, COUNTOF(buffer), "** unloaded (%3.3x) %s", idx.m_dwIndex, descrip);
+ descrip = buffer;
+ }
+ else
+ {
+ sprintf_s(buffer, COUNTOF(buffer), "(AD %3.3x) %s", idx.m_dwIndex, descrip);
+ descrip = buffer;
+ }
+ }
+ if (dumpSBStyle < 2)
+ LogSpewAlways("[%4.4d]: %8.8x %s\n", nb, oref, descrip);
+ else if (dumpSBStyle == 2 && ! isString)
+ LogSpewAlways("[%4.4d]: %s\n", nb, descrip);
+ }
+ LogSpewAlways("Done dumping SyncBlockCache used slots: %d, objects: %d\n", slotCount, objectCount);
+}
+#endif
+
+// ***************************************************************************
+//
+// ObjHeader class implementation
+//
+// ***************************************************************************
+
+#if defined(ENABLE_CONTRACTS_IMPL)
+// The LOCK_TAKEN/RELEASED macros need a "pointer" to the lock object to do
+// comparisons between takes & releases (and to provide debugging info to the
+// developer). Ask the syncblock for its lock contract pointer, if the
+// syncblock exists. Otherwise, use the MethodTable* from the Object. That's not great,
+// as it's not unique, so we might miss unbalanced lock takes/releases from
+// different objects of the same type. However, our hands are tied, and we can't
+// do much better.
+void * ObjHeader::GetPtrForLockContract()
+{
+ if (GetHeaderSyncBlockIndex() == 0)
+ {
+ return (void *) GetBaseObject()->GetMethodTable();
+ }
+
+ return PassiveGetSyncBlock()->GetPtrForLockContract();
+}
+#endif // defined(ENABLE_CONTRACTS_IMPL)
+
+// this enters the monitor of an object
+void ObjHeader::EnterObjMonitor()
+{
+ WRAPPER_NO_CONTRACT;
+ GetSyncBlock()->EnterMonitor();
+}
+
+// Non-blocking version of above
+BOOL ObjHeader::TryEnterObjMonitor(INT32 timeOut)
+{
+ WRAPPER_NO_CONTRACT;
+ return GetSyncBlock()->TryEnterMonitor(timeOut);
+}
+
+BOOL ObjHeader::LeaveObjMonitor()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ //this function switch to preemp mode so we need to protect the object in some path
+ OBJECTREF thisObj = ObjectToOBJECTREF (GetBaseObject ());
+
+ DWORD dwSwitchCount = 0;
+
+ for (;;)
+ {
+ AwareLock::LeaveHelperAction action = thisObj->GetHeader ()->LeaveObjMonitorHelper(GetThread());
+
+ switch(action)
+ {
+ case AwareLock::LeaveHelperAction_None:
+ // We are done
+ return TRUE;
+ case AwareLock::LeaveHelperAction_Signal:
+ {
+ // Signal the event
+ SyncBlock *psb = thisObj->GetHeader ()->PassiveGetSyncBlock();
+ if (psb != NULL)
+ psb->QuickGetMonitor()->Signal();
+ }
+ return TRUE;
+ case AwareLock::LeaveHelperAction_Yield:
+ YieldProcessor();
+ continue;
+ case AwareLock::LeaveHelperAction_Contention:
+ // Some thread is updating the syncblock value.
+ {
+ //protect the object before switching mode
+ GCPROTECT_BEGIN (thisObj);
+ GCX_PREEMP();
+ __SwitchToThread(0, ++dwSwitchCount);
+ GCPROTECT_END ();
+ }
+ continue;
+ default:
+ // Must be an error otherwise - ignore it
+ _ASSERTE(action == AwareLock::LeaveHelperAction_Error);
+ return FALSE;
+ }
+ }
+}
+
+// The only difference between LeaveObjMonitor and LeaveObjMonitorAtException is switch
+// to preemptive mode around __SwitchToThread
+BOOL ObjHeader::LeaveObjMonitorAtException()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ DWORD dwSwitchCount = 0;
+
+ for (;;)
+ {
+ AwareLock::LeaveHelperAction action = LeaveObjMonitorHelper(GetThread());
+
+ switch(action)
+ {
+ case AwareLock::LeaveHelperAction_None:
+ // We are done
+ return TRUE;
+ case AwareLock::LeaveHelperAction_Signal:
+ {
+ // Signal the event
+ SyncBlock *psb = PassiveGetSyncBlock();
+ if (psb != NULL)
+ psb->QuickGetMonitor()->Signal();
+ }
+ return TRUE;
+ case AwareLock::LeaveHelperAction_Yield:
+ YieldProcessor();
+ continue;
+ case AwareLock::LeaveHelperAction_Contention:
+ // Some thread is updating the syncblock value.
+ //
+ // We never toggle GC mode while holding the spinlock (BeginNoTriggerGC/EndNoTriggerGC
+ // in EnterSpinLock/ReleaseSpinLock ensures it). Thus we do not need to switch to preemptive
+ // while waiting on the spinlock.
+ //
+ {
+ __SwitchToThread(0, ++dwSwitchCount);
+ }
+ continue;
+ default:
+ // Must be an error otherwise - ignore it
+ _ASSERTE(action == AwareLock::LeaveHelperAction_Error);
+ return FALSE;
+ }
+ }
+}
+
+#endif //!DACCESS_COMPILE
+
+// Returns TRUE if the lock is owned and FALSE otherwise
+// threadId is set to the ID (Thread::GetThreadId()) of the thread which owns the lock
+// acquisitionCount is set to the number of times the lock needs to be released before
+// it is unowned
+BOOL ObjHeader::GetThreadOwningMonitorLock(DWORD *pThreadId, DWORD *pAcquisitionCount)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+#ifndef DACCESS_COMPILE
+ if (!IsGCSpecialThread ()) {MODE_COOPERATIVE;} else {MODE_ANY;}
+#endif
+ }
+ CONTRACTL_END;
+ SUPPORTS_DAC;
+
+
+ DWORD bits = GetBits();
+
+ if (bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX)
+ {
+ if (bits & BIT_SBLK_IS_HASHCODE)
+ {
+ //
+ // This thread does not own the lock.
+ //
+ *pThreadId = 0;
+ *pAcquisitionCount = 0;
+ return FALSE;
+ }
+ else
+ {
+ //
+ // We have a syncblk
+ //
+ DWORD index = bits & MASK_SYNCBLOCKINDEX;
+ SyncBlock* psb = g_pSyncTable[(int)index].m_SyncBlock;
+
+ _ASSERTE(psb->GetMonitor() != NULL);
+ Thread* pThread = psb->GetMonitor()->m_HoldingThread;
+ if(pThread == NULL)
+ {
+ *pThreadId = 0;
+ *pAcquisitionCount = 0;
+ return FALSE;
+ }
+ else
+ {
+ *pThreadId = pThread->GetThreadId();
+ *pAcquisitionCount = psb->GetMonitor()->m_Recursion;
+ return TRUE;
+ }
+ }
+ }
+ else
+ {
+ //
+ // We have a thinlock
+ //
+
+ DWORD lockThreadId, recursionLevel;
+ lockThreadId = bits & SBLK_MASK_LOCK_THREADID;
+ recursionLevel = (bits & SBLK_MASK_LOCK_RECLEVEL) >> SBLK_RECLEVEL_SHIFT;
+ //if thread ID is 0, recursionLevel got to be zero
+ //but thread ID doesn't have to be valid because the lock could be orphanend
+ _ASSERTE (lockThreadId != 0 || recursionLevel == 0 );
+
+ *pThreadId = lockThreadId;
+ if(lockThreadId != 0)
+ {
+ // in the header, the recursionLevel of 0 means the lock is owned once
+ // (this differs from m_Recursion in the AwareLock)
+ *pAcquisitionCount = recursionLevel + 1;
+ return TRUE;
+ }
+ else
+ {
+ *pAcquisitionCount = 0;
+ return FALSE;
+ }
+ }
+}
+
+#ifndef DACCESS_COMPILE
+
+#ifdef MP_LOCKS
+DEBUG_NOINLINE void ObjHeader::EnterSpinLock()
+{
+ // NOTE: This function cannot have a dynamic contract. If it does, the contract's
+ // destructor will reset the CLR debug state to what it was before entering the
+ // function, which will undo the BeginNoTriggerGC() call below.
+ SCAN_SCOPE_BEGIN;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+#ifdef _DEBUG
+ int i = 0;
+#endif
+
+ DWORD dwSwitchCount = 0;
+
+ while (TRUE)
+ {
+#ifdef _DEBUG
+#ifdef _WIN64
+ // Give 64bit more time because there isn't a remoting fast path now, and we've hit this assert
+ // needlessly in CLRSTRESS.
+ if (i++ > 30000)
+#else
+ if (i++ > 10000)
+#endif // _WIN64
+ _ASSERTE(!"ObjHeader::EnterLock timed out");
+#endif
+ // get the value so that it doesn't get changed under us.
+ LONG curValue = m_SyncBlockValue.LoadWithoutBarrier();
+
+ // check if lock taken
+ if (! (curValue & BIT_SBLK_SPIN_LOCK))
+ {
+ // try to take the lock
+ LONG newValue = curValue | BIT_SBLK_SPIN_LOCK;
+ LONG result = FastInterlockCompareExchange((LONG*)&m_SyncBlockValue, newValue, curValue);
+ if (result == curValue)
+ break;
+ }
+ if (g_SystemInfo.dwNumberOfProcessors > 1)
+ {
+ for (int spinCount = 0; spinCount < BIT_SBLK_SPIN_COUNT; spinCount++)
+ {
+ if (! (m_SyncBlockValue & BIT_SBLK_SPIN_LOCK))
+ break;
+ YieldProcessor(); // indicate to the processor that we are spining
+ }
+ if (m_SyncBlockValue & BIT_SBLK_SPIN_LOCK)
+ __SwitchToThread(0, ++dwSwitchCount);
+ }
+ else
+ __SwitchToThread(0, ++dwSwitchCount);
+ }
+
+ INCONTRACT(Thread* pThread = GetThread());
+ INCONTRACT(if (pThread != NULL) pThread->BeginNoTriggerGC(__FILE__, __LINE__));
+}
+#else
+DEBUG_NOINLINE void ObjHeader::EnterSpinLock()
+{
+ SCAN_SCOPE_BEGIN;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+#ifdef _DEBUG
+ int i = 0;
+#endif
+
+ DWORD dwSwitchCount = 0;
+
+ while (TRUE)
+ {
+#ifdef _DEBUG
+ if (i++ > 10000)
+ _ASSERTE(!"ObjHeader::EnterLock timed out");
+#endif
+ // get the value so that it doesn't get changed under us.
+ LONG curValue = m_SyncBlockValue.LoadWithoutBarrier();
+
+ // check if lock taken
+ if (! (curValue & BIT_SBLK_SPIN_LOCK))
+ {
+ // try to take the lock
+ LONG newValue = curValue | BIT_SBLK_SPIN_LOCK;
+ LONG result = FastInterlockCompareExchange((LONG*)&m_SyncBlockValue, newValue, curValue);
+ if (result == curValue)
+ break;
+ }
+ __SwitchToThread(0, ++dwSwitchCount);
+ }
+
+ INCONTRACT(Thread* pThread = GetThread());
+ INCONTRACT(if (pThread != NULL) pThread->BeginNoTriggerGC(__FILE__, __LINE__));
+}
+#endif //MP_LOCKS
+
+DEBUG_NOINLINE void ObjHeader::ReleaseSpinLock()
+{
+ SCAN_SCOPE_END;
+ LIMITED_METHOD_CONTRACT;
+
+ INCONTRACT(Thread* pThread = GetThread());
+ INCONTRACT(if (pThread != NULL) pThread->EndNoTriggerGC());
+
+ FastInterlockAnd(&m_SyncBlockValue, ~BIT_SBLK_SPIN_LOCK);
+}
+
+#endif //!DACCESS_COMPILE
+
+ADIndex ObjHeader::GetRawAppDomainIndex()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ // pull the value out before checking it to avoid race condition
+ DWORD value = m_SyncBlockValue.LoadWithoutBarrier();
+ if ((value & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) == 0)
+ return ADIndex((value >> SBLK_APPDOMAIN_SHIFT) & SBLK_MASK_APPDOMAININDEX);
+ return ADIndex(0);
+}
+
+ADIndex ObjHeader::GetAppDomainIndex()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_SUPPORTS_DAC;
+
+ ADIndex indx = GetRawAppDomainIndex();
+ if (indx.m_dwIndex)
+ return indx;
+ SyncBlock* syncBlock = PassiveGetSyncBlock();
+ if (! syncBlock)
+ return ADIndex(0);
+
+ return syncBlock->GetAppDomainIndex();
+}
+
+#ifndef DACCESS_COMPILE
+
+void ObjHeader::SetAppDomainIndex(ADIndex indx)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ //
+ // This should only be called during the header initialization,
+ // so don't worry about races.
+ //
+
+ BOOL done = FALSE;
+
+#ifdef _DEBUG
+ static int forceSB = -1;
+
+ if (forceSB == -1)
+ forceSB = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ADForceSB);
+
+ if (forceSB)
+ // force a synblock so we get one for every object.
+ GetSyncBlock();
+#endif
+
+ if (GetHeaderSyncBlockIndex() == 0 && indx.m_dwIndex < SBLK_MASK_APPDOMAININDEX)
+ {
+ ENTER_SPIN_LOCK(this);
+ //Try one more time
+ if (GetHeaderSyncBlockIndex() == 0)
+ {
+ _ASSERTE(GetRawAppDomainIndex().m_dwIndex == 0);
+ // can store it in the object header
+ FastInterlockOr(&m_SyncBlockValue, indx.m_dwIndex << SBLK_APPDOMAIN_SHIFT);
+ done = TRUE;
+ }
+ LEAVE_SPIN_LOCK(this);
+ }
+
+ if (!done)
+ {
+ // must create a syncblock entry and store the appdomain indx there
+ SyncBlock *psb = GetSyncBlock();
+ _ASSERTE(psb);
+ psb->SetAppDomainIndex(indx);
+ }
+}
+
+void ObjHeader::ResetAppDomainIndex(ADIndex indx)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ //
+ // This should only be called during the header initialization,
+ // so don't worry about races.
+ //
+
+ BOOL done = FALSE;
+
+ if (GetHeaderSyncBlockIndex() == 0 && indx.m_dwIndex < SBLK_MASK_APPDOMAININDEX)
+ {
+ ENTER_SPIN_LOCK(this);
+ //Try one more time
+ if (GetHeaderSyncBlockIndex() == 0)
+ {
+ // can store it in the object header
+ while (TRUE)
+ {
+ DWORD oldValue = m_SyncBlockValue.LoadWithoutBarrier();
+ DWORD newValue = (oldValue & (~(SBLK_MASK_APPDOMAININDEX << SBLK_APPDOMAIN_SHIFT))) |
+ (indx.m_dwIndex << SBLK_APPDOMAIN_SHIFT);
+ if (FastInterlockCompareExchange((LONG*)&m_SyncBlockValue,
+ newValue,
+ oldValue) == (LONG)oldValue)
+ {
+ break;
+ }
+ }
+ done = TRUE;
+ }
+ LEAVE_SPIN_LOCK(this);
+ }
+
+ if (!done)
+ {
+ // must create a syncblock entry and store the appdomain indx there
+ SyncBlock *psb = GetSyncBlock();
+ _ASSERTE(psb);
+ psb->SetAppDomainIndex(indx);
+ }
+}
+
+void ObjHeader::ResetAppDomainIndexNoFailure(ADIndex indx)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(indx.m_dwIndex < SBLK_MASK_APPDOMAININDEX);
+ }
+ CONTRACTL_END;
+
+ ENTER_SPIN_LOCK(this);
+ if (GetHeaderSyncBlockIndex() == 0)
+ {
+ // can store it in the object header
+ while (TRUE)
+ {
+ DWORD oldValue = m_SyncBlockValue.LoadWithoutBarrier();
+ DWORD newValue = (oldValue & (~(SBLK_MASK_APPDOMAININDEX << SBLK_APPDOMAIN_SHIFT))) |
+ (indx.m_dwIndex << SBLK_APPDOMAIN_SHIFT);
+ if (FastInterlockCompareExchange((LONG*)&m_SyncBlockValue,
+ newValue,
+ oldValue) == (LONG)oldValue)
+ {
+ break;
+ }
+ }
+ }
+ else
+ {
+ SyncBlock *psb = PassiveGetSyncBlock();
+ _ASSERTE(psb);
+ psb->SetAppDomainIndex(indx);
+ }
+ LEAVE_SPIN_LOCK(this);
+}
+
+DWORD ObjHeader::GetSyncBlockIndex()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ DWORD indx;
+
+ if ((indx = GetHeaderSyncBlockIndex()) == 0)
+ {
+ BOOL fMustCreateSyncBlock = FALSE;
+
+ if (GetAppDomainIndex().m_dwIndex)
+ {
+ // if have an appdomain set then must create a sync block to store it
+ fMustCreateSyncBlock = TRUE;
+ }
+ else
+ {
+ //Need to get it from the cache
+ SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
+
+ //Try one more time
+ if (GetHeaderSyncBlockIndex() == 0)
+ {
+ ENTER_SPIN_LOCK(this);
+ // Now the header will be stable - check whether hashcode, appdomain index or lock information is stored in it.
+ DWORD bits = GetBits();
+ if (((bits & (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE)) == (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE)) ||
+ ((bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) == 0 &&
+ (bits & ((SBLK_MASK_APPDOMAININDEX<<SBLK_APPDOMAIN_SHIFT)|SBLK_MASK_LOCK_RECLEVEL|SBLK_MASK_LOCK_THREADID)) != 0))
+ {
+ // Need a sync block to store this info
+ fMustCreateSyncBlock = TRUE;
+ }
+ else
+ {
+ SetIndex(BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | SyncBlockCache::GetSyncBlockCache()->NewSyncBlockSlot(GetBaseObject()));
+ }
+ LEAVE_SPIN_LOCK(this);
+ }
+ // SyncBlockCache::LockHolder goes out of scope here
+ }
+
+ if (fMustCreateSyncBlock)
+ GetSyncBlock();
+
+ if ((indx = GetHeaderSyncBlockIndex()) == 0)
+ COMPlusThrowOM();
+ }
+
+ return indx;
+}
+
+#if defined (VERIFY_HEAP)
+
+BOOL ObjHeader::Validate (BOOL bVerifySyncBlkIndex)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ DWORD bits = GetBits ();
+ Object * obj = GetBaseObject ();
+ BOOL bVerifyMore = g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_SYNCBLK;
+ //the highest 2 bits have reloaded meaning
+ //for string objects:
+ // BIT_SBLK_STRING_HAS_NO_HIGH_CHARS 0x80000000
+ // BIT_SBLK_STRING_HIGH_CHARS_KNOWN 0x40000000
+ // BIT_SBLK_STRING_HAS_SPECIAL_SORT 0xC0000000
+ //for other objects:
+ // BIT_SBLK_AGILE_IN_PROGRESS 0x80000000
+ // BIT_SBLK_FINALIZER_RUN 0x40000000
+ if (bits & BIT_SBLK_STRING_HIGH_CHAR_MASK)
+ {
+ if (obj->GetGCSafeMethodTable () == g_pStringClass)
+ {
+ if (bVerifyMore)
+ {
+ ASSERT_AND_CHECK (((StringObject *)obj)->ValidateHighChars());
+ }
+ }
+ else
+ {
+#if CHECK_APP_DOMAIN_LEAKS
+ if (bVerifyMore)
+ {
+ if (bits & BIT_SBLK_AGILE_IN_PROGRESS)
+ {
+ BOOL fResult;
+ ASSERT_AND_CHECK (
+ //BIT_SBLK_AGILE_IN_PROGRESS is set only if the object needs to check appdomain agile
+ obj->ShouldCheckAppDomainAgile(FALSE, &fResult)
+ //before BIT_SBLK_AGILE_IN_PROGRESS is cleared, the object might already be marked as agile
+ ||(obj->PassiveGetSyncBlock () && obj->PassiveGetSyncBlock ()->IsAppDomainAgile ())
+ ||(obj->PassiveGetSyncBlock () && obj->PassiveGetSyncBlock ()->IsCheckedForAppDomainAgile ())
+ );
+ }
+ }
+#else //CHECK_APP_DOMAIN_LEAKS
+ //BIT_SBLK_AGILE_IN_PROGRESS is set only in debug build
+ ASSERT_AND_CHECK (!(bits & BIT_SBLK_AGILE_IN_PROGRESS));
+#endif //CHECK_APP_DOMAIN_LEAKS
+ if (bits & BIT_SBLK_FINALIZER_RUN)
+ {
+ ASSERT_AND_CHECK (obj->GetGCSafeMethodTable ()->HasFinalizer ());
+ }
+ }
+ }
+
+ //BIT_SBLK_GC_RESERVE (0x20000000) is only set during GC. But for frozen object, we don't clean the bit
+ if (bits & BIT_SBLK_GC_RESERVE)
+ {
+ if (!GCHeap::GetGCHeap()->IsGCInProgress () && !GCHeap::GetGCHeap()->IsConcurrentGCInProgress ())
+ {
+#ifdef FEATURE_BASICFREEZE
+ ASSERT_AND_CHECK (GCHeap::GetGCHeap()->IsInFrozenSegment(obj));
+#else //FEATURE_BASICFREEZE
+ _ASSERTE(!"Reserve bit not cleared");
+ return FALSE;
+#endif //FEATURE_BASICFREEZE
+ }
+ }
+
+ //Don't know how to verify BIT_SBLK_SPIN_LOCK (0x10000000)
+
+ //BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX (0x08000000)
+ if (bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX)
+ {
+ //if BIT_SBLK_IS_HASHCODE (0x04000000) is not set,
+ //rest of the DWORD is SyncBlk Index
+ if (!(bits & BIT_SBLK_IS_HASHCODE))
+ {
+ if (bVerifySyncBlkIndex && CNameSpace::GetGcRuntimeStructuresValid ())
+ {
+ DWORD sbIndex = bits & MASK_SYNCBLOCKINDEX;
+ ASSERT_AND_CHECK(SyncTableEntry::GetSyncTableEntry()[sbIndex].m_Object == obj);
+ }
+ }
+ else
+ {
+ // rest of the DWORD is a hash code and we don't have much to validate it
+ }
+ }
+ else
+ {
+ //if BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX is clear, rest of DWORD is thin lock thread ID,
+ //thin lock recursion level and appdomain index
+ DWORD lockThreadId = bits & SBLK_MASK_LOCK_THREADID;
+ DWORD recursionLevel = (bits & SBLK_MASK_LOCK_RECLEVEL) >> SBLK_RECLEVEL_SHIFT;
+ //if thread ID is 0, recursionLeve got to be zero
+ //but thread ID doesn't have to be valid because the lock could be orphanend
+ ASSERT_AND_CHECK (lockThreadId != 0 || recursionLevel == 0 );
+
+ DWORD adIndex = (bits >> SBLK_APPDOMAIN_SHIFT) & SBLK_MASK_APPDOMAININDEX;
+ if (adIndex!= 0)
+ {
+#ifndef _DEBUG
+ //in non debug build, only objects of domain neutral type have appdomain index in header
+ ASSERT_AND_CHECK (obj->GetGCSafeMethodTable()->IsDomainNeutral());
+#endif //!_DEBUG
+ //todo: validate the AD index.
+ //The trick here is agile objects could have a invalid AD index. Ideally we should call
+ //Object::GetAppDomain to do all the agile validation but it has side effects like mark the object to
+ //be agile and it only does the check if g_pConfig->AppDomainLeaks() is on
+ }
+ }
+
+ return TRUE;
+}
+
+#endif //VERIFY_HEAP
+
+// This holder takes care of the SyncBlock memory cleanup if an OOM occurs inside a call to NewSyncBlockSlot.
+//
+// Warning: Assumes you already own the cache lock.
+// Assumes nothing allocated inside the SyncBlock (only releases the memory, does not destruct.)
+//
+// This holder really just meets GetSyncBlock()'s special needs. It's not a general purpose holder.
+
+
+// Do not inline this call. (fyuan)
+// SyncBlockMemoryHolder is normally a check for empty pointer and return. Inlining VoidDeleteSyncBlockMemory adds expensive exception handling.
+void VoidDeleteSyncBlockMemory(SyncBlock* psb)
+{
+ LIMITED_METHOD_CONTRACT;
+ SyncBlockCache::GetSyncBlockCache()->DeleteSyncBlockMemory(psb);
+}
+
+typedef Wrapper<SyncBlock*, DoNothing<SyncBlock*>, VoidDeleteSyncBlockMemory, NULL> SyncBlockMemoryHolder;
+
+
+// get the sync block for an existing object
+SyncBlock *ObjHeader::GetSyncBlock()
+{
+ CONTRACT(SyncBlock *)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ PTR_SyncBlock syncBlock = GetBaseObject()->PassiveGetSyncBlock();
+ DWORD indx = 0;
+ BOOL indexHeld = FALSE;
+
+ if (syncBlock)
+ {
+#ifdef _DEBUG
+ // Has our backpointer been correctly updated through every GC?
+ PTR_SyncTableEntry pEntries(SyncTableEntry::GetSyncTableEntry());
+ _ASSERTE(pEntries[GetHeaderSyncBlockIndex()].m_Object == GetBaseObject());
+#endif // _DEBUG
+ RETURN syncBlock;
+ }
+
+ //Need to get it from the cache
+ {
+ SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
+
+ //Try one more time
+ syncBlock = GetBaseObject()->PassiveGetSyncBlock();
+ if (syncBlock)
+ RETURN syncBlock;
+
+
+ SyncBlockMemoryHolder syncBlockMemoryHolder(SyncBlockCache::GetSyncBlockCache()->GetNextFreeSyncBlock());
+ syncBlock = syncBlockMemoryHolder;
+
+ if ((indx = GetHeaderSyncBlockIndex()) == 0)
+ {
+ indx = SyncBlockCache::GetSyncBlockCache()->NewSyncBlockSlot(GetBaseObject());
+ }
+ else
+ {
+ //We already have an index, we need to hold the syncblock
+ indexHeld = TRUE;
+ }
+
+ {
+ //! NewSyncBlockSlot has side-effects that we don't have backout for - thus, that must be the last
+ //! failable operation called.
+ CANNOTTHROWCOMPLUSEXCEPTION();
+ FAULT_FORBID();
+
+
+ syncBlockMemoryHolder.SuppressRelease();
+
+ new (syncBlock) SyncBlock(indx);
+
+ {
+ // after this point, nobody can update the index in the header to give an AD index
+ ENTER_SPIN_LOCK(this);
+
+ {
+ // If there's an appdomain index stored in the header, transfer it to the syncblock
+
+ ADIndex dwAppDomainIndex = GetAppDomainIndex();
+ if (dwAppDomainIndex.m_dwIndex)
+ syncBlock->SetAppDomainIndex(dwAppDomainIndex);
+
+ // If the thin lock in the header is in use, transfer the information to the syncblock
+ DWORD bits = GetBits();
+ if ((bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) == 0)
+ {
+ DWORD lockThreadId = bits & SBLK_MASK_LOCK_THREADID;
+ DWORD recursionLevel = (bits & SBLK_MASK_LOCK_RECLEVEL) >> SBLK_RECLEVEL_SHIFT;
+ if (lockThreadId != 0 || recursionLevel != 0)
+ {
+ // recursionLevel can't be non-zero if thread id is 0
+ _ASSERTE(lockThreadId != 0);
+
+ Thread *pThread = g_pThinLockThreadIdDispenser->IdToThreadWithValidation(lockThreadId);
+
+ if (pThread == NULL)
+ {
+ // The lock is orphaned.
+ pThread = (Thread*) -1;
+ }
+ syncBlock->InitState();
+ syncBlock->SetAwareLock(pThread, recursionLevel + 1);
+ }
+ }
+ else if ((bits & BIT_SBLK_IS_HASHCODE) != 0)
+ {
+ DWORD hashCode = bits & MASK_HASHCODE;
+
+ syncBlock->SetHashCode(hashCode);
+ }
+ }
+
+ SyncTableEntry::GetSyncTableEntry() [indx].m_SyncBlock = syncBlock;
+
+ // in order to avoid a race where some thread tries to get the AD index and we've already nuked it,
+ // make sure the syncblock etc is all setup with the AD index prior to replacing the index
+ // in the header
+ if (GetHeaderSyncBlockIndex() == 0)
+ {
+ // We have transferred the AppDomain into the syncblock above.
+ SetIndex(BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | indx);
+ }
+
+ //If we had already an index, hold the syncblock
+ //for the lifetime of the object.
+ if (indexHeld)
+ syncBlock->SetPrecious();
+
+ LEAVE_SPIN_LOCK(this);
+ }
+ // SyncBlockCache::LockHolder goes out of scope here
+ }
+ }
+
+ RETURN syncBlock;
+}
+
+BOOL ObjHeader::Wait(INT32 timeOut, BOOL exitContext)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // The following code may cause GC, so we must fetch the sync block from
+ // the object now in case it moves.
+ SyncBlock *pSB = GetBaseObject()->GetSyncBlock();
+
+ // GetSyncBlock throws on failure
+ _ASSERTE(pSB != NULL);
+
+ // make sure we own the crst
+ if (!pSB->DoesCurrentThreadOwnMonitor())
+ COMPlusThrow(kSynchronizationLockException);
+
+#ifdef _DEBUG
+ Thread *pThread = GetThread();
+ DWORD curLockCount = pThread->m_dwLockCount;
+#endif
+
+ BOOL result = pSB->Wait(timeOut,exitContext);
+
+ _ASSERTE (curLockCount == pThread->m_dwLockCount);
+
+ return result;
+}
+
+void ObjHeader::Pulse()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // The following code may cause GC, so we must fetch the sync block from
+ // the object now in case it moves.
+ SyncBlock *pSB = GetBaseObject()->GetSyncBlock();
+
+ // GetSyncBlock throws on failure
+ _ASSERTE(pSB != NULL);
+
+ // make sure we own the crst
+ if (!pSB->DoesCurrentThreadOwnMonitor())
+ COMPlusThrow(kSynchronizationLockException);
+
+ pSB->Pulse();
+}
+
+void ObjHeader::PulseAll()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // The following code may cause GC, so we must fetch the sync block from
+ // the object now in case it moves.
+ SyncBlock *pSB = GetBaseObject()->GetSyncBlock();
+
+ // GetSyncBlock throws on failure
+ _ASSERTE(pSB != NULL);
+
+ // make sure we own the crst
+ if (!pSB->DoesCurrentThreadOwnMonitor())
+ COMPlusThrow(kSynchronizationLockException);
+
+ pSB->PulseAll();
+}
+
+
+// ***************************************************************************
+//
+// AwareLock class implementation (GC-aware locking)
+//
+// ***************************************************************************
+
+void AwareLock::AllocLockSemEvent()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // Before we switch from cooperative, ensure that this syncblock won't disappear
+ // under us. For something as expensive as an event, do it permanently rather
+ // than transiently.
+ SetPrecious();
+
+ GCX_PREEMP();
+
+ // No need to take a lock - CLREvent::CreateMonitorEvent is thread safe
+ m_SemEvent.CreateMonitorEvent((SIZE_T)this);
+}
+
+void AwareLock::Enter()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ Thread *pCurThread = GetThread();
+
+ for (;;)
+ {
+ // Read existing lock state.
+ LONG state = m_MonitorHeld.LoadWithoutBarrier();
+
+ if (state == 0)
+ {
+ // Common case: lock not held, no waiters. Attempt to acquire lock by
+ // switching lock bit.
+ if (FastInterlockCompareExchange((LONG*)&m_MonitorHeld, 1, 0) == 0)
+ {
+ break;
+ }
+ }
+ else
+ {
+ // It's possible to get here with waiters but no lock held, but in this
+ // case a signal is about to be fired which will wake up a waiter. So
+ // for fairness sake we should wait too.
+ // Check first for recursive lock attempts on the same thread.
+ if (m_HoldingThread == pCurThread)
+ {
+ goto Recursion;
+ }
+
+ // Attempt to increment this count of waiters then goto contention
+ // handling code.
+ if (FastInterlockCompareExchange((LONG*)&m_MonitorHeld, (state + 2), state) == state)
+ {
+ goto MustWait;
+ }
+ }
+ }
+
+ // We get here if we successfully acquired the mutex.
+ m_HoldingThread = pCurThread;
+ m_Recursion = 1;
+ pCurThread->IncLockCount();
+
+#if defined(_DEBUG) && defined(TRACK_SYNC)
+ {
+ // The best place to grab this is from the ECall frame
+ Frame *pFrame = pCurThread->GetFrame();
+ int caller = (pFrame && pFrame != FRAME_TOP
+ ? (int) pFrame->GetReturnAddress()
+ : -1);
+ pCurThread->m_pTrackSync->EnterSync(caller, this);
+ }
+#endif
+
+ return;
+
+MustWait:
+ // Didn't manage to get the mutex, must wait.
+ EnterEpilog(pCurThread);
+ return;
+
+Recursion:
+ // Got the mutex via recursive locking on the same thread.
+ _ASSERTE(m_Recursion >= 1);
+ m_Recursion++;
+#if defined(_DEBUG) && defined(TRACK_SYNC)
+ // The best place to grab this is from the ECall frame
+ Frame *pFrame = pCurThread->GetFrame();
+ int caller = (pFrame && pFrame != FRAME_TOP ? (int) pFrame->GetReturnAddress() : -1);
+ pCurThread->m_pTrackSync->EnterSync(caller, this);
+#endif
+}
+
+BOOL AwareLock::TryEnter(INT32 timeOut)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ if (timeOut == 0) {MODE_ANY;} else {MODE_COOPERATIVE;}
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (timeOut != 0)
+ {
+ LARGE_INTEGER qpFrequency, qpcStart, qpcEnd;
+ BOOL canUseHighRes = QueryPerformanceCounter(&qpcStart);
+
+ // try some more busy waiting
+ if (Contention(timeOut))
+ return TRUE;
+
+ DWORD elapsed = 0;
+ if (canUseHighRes && QueryPerformanceCounter(&qpcEnd) && QueryPerformanceFrequency(&qpFrequency))
+ elapsed = (DWORD)((qpcEnd.QuadPart-qpcStart.QuadPart)/(qpFrequency.QuadPart/1000));
+
+ if (elapsed >= (DWORD)timeOut)
+ return FALSE;
+
+ if (timeOut != (INT32)INFINITE)
+ timeOut -= elapsed;
+ }
+
+ Thread *pCurThread = GetThread();
+ TESTHOOKCALL(AppDomainCanBeUnloaded(pCurThread->GetDomain()->GetId().m_dwId,FALSE));
+
+ if (pCurThread->IsAbortRequested())
+ {
+ pCurThread->HandleThreadAbort();
+ }
+
+retry:
+
+ for (;;) {
+
+ // Read existing lock state.
+ LONG state = m_MonitorHeld.LoadWithoutBarrier();
+
+ if (state == 0)
+ {
+ // Common case: lock not held, no waiters. Attempt to acquire lock by
+ // switching lock bit.
+ if (FastInterlockCompareExchange((LONG*)&m_MonitorHeld, 1, 0) == 0)
+ {
+ break;
+ }
+ }
+ else
+ {
+ // It's possible to get here with waiters but no lock held, but in this
+ // case a signal is about to be fired which will wake up a waiter. So
+ // for fairness sake we should wait too.
+ // Check first for recursive lock attempts on the same thread.
+ if (m_HoldingThread == pCurThread)
+ {
+ goto Recursion;
+ }
+ else
+ {
+ goto WouldBlock;
+ }
+ }
+ }
+
+ // We get here if we successfully acquired the mutex.
+ m_HoldingThread = pCurThread;
+ m_Recursion = 1;
+ pCurThread->IncLockCount();
+
+#if defined(_DEBUG) && defined(TRACK_SYNC)
+ {
+ // The best place to grab this is from the ECall frame
+ Frame *pFrame = pCurThread->GetFrame();
+ int caller = (pFrame && pFrame != FRAME_TOP ? (int) pFrame->GetReturnAddress() : -1);
+ pCurThread->m_pTrackSync->EnterSync(caller, this);
+ }
+#endif
+
+ return TRUE;
+
+WouldBlock:
+ // Didn't manage to get the mutex, return failure if no timeout, else wait
+ // for at most timeout milliseconds for the mutex.
+ if (!timeOut)
+ {
+ return FALSE;
+ }
+
+ // The precondition for EnterEpilog is that the count of waiters be bumped
+ // to account for this thread
+
+ for (;;)
+ {
+ // Read existing lock state.
+ LONG state = m_MonitorHeld.LoadWithoutBarrier();
+
+ if (state == 0)
+ {
+ goto retry;
+ }
+
+ if (FastInterlockCompareExchange((LONG*)&m_MonitorHeld, (state + 2), state) == state)
+ {
+ break;
+ }
+ }
+
+ return EnterEpilog(pCurThread, timeOut);
+
+Recursion:
+ // Got the mutex via recursive locking on the same thread.
+ _ASSERTE(m_Recursion >= 1);
+ m_Recursion++;
+#if defined(_DEBUG) && defined(TRACK_SYNC)
+ // The best place to grab this is from the ECall frame
+ Frame *pFrame = pCurThread->GetFrame();
+ int caller = (pFrame && pFrame != FRAME_TOP ? (int) pFrame->GetReturnAddress() : -1);
+ pCurThread->m_pTrackSync->EnterSync(caller, this);
+#endif
+
+ return true;
+}
+
+BOOL AwareLock::EnterEpilog(Thread* pCurThread, INT32 timeOut)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ // While we are in this frame the thread is considered blocked on the
+ // critical section of the monitor lock according to the debugger
+ DebugBlockingItem blockingMonitorInfo;
+ blockingMonitorInfo.dwTimeout = timeOut;
+ blockingMonitorInfo.pMonitor = this;
+ blockingMonitorInfo.pAppDomain = SystemDomain::GetCurrentDomain();
+ blockingMonitorInfo.type = DebugBlock_MonitorCriticalSection;
+ DebugBlockingItemHolder holder(pCurThread, &blockingMonitorInfo);
+
+ // We need a separate helper because it uses SEH and the holder has a
+ // destructor
+ return EnterEpilogHelper(pCurThread, timeOut);
+}
+
+BOOL AwareLock::EnterEpilogHelper(Thread* pCurThread, INT32 timeOut)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ DWORD ret = 0;
+ BOOL finished = false;
+
+ // Require all callers to be in cooperative mode. If they have switched to preemptive
+ // mode temporarily before calling here, then they are responsible for protecting
+ // the object associated with this lock.
+ _ASSERTE(pCurThread->PreemptiveGCDisabled());
+
+
+
+ OBJECTREF obj = GetOwningObject();
+
+ // We cannot allow the AwareLock to be cleaned up underneath us by the GC.
+ IncrementTransientPrecious();
+
+ GCPROTECT_BEGIN(obj);
+ {
+ if (!m_SemEvent.IsMonitorEventAllocated())
+ {
+ AllocLockSemEvent();
+ }
+ _ASSERTE(m_SemEvent.IsMonitorEventAllocated());
+
+ pCurThread->EnablePreemptiveGC();
+
+ for (;;)
+ {
+ // We might be interrupted during the wait (Thread.Interrupt), so we need an
+ // exception handler round the call.
+ struct Param
+ {
+ AwareLock *pThis;
+ INT32 timeOut;
+ DWORD ret;
+ } param;
+ param.pThis = this;
+ param.timeOut = timeOut;
+ param.ret = ret;
+
+ EE_TRY_FOR_FINALLY(Param *, pParam, &param)
+ {
+ // Measure the time we wait so that, in the case where we wake up
+ // and fail to acquire the mutex, we can adjust remaining timeout
+ // accordingly.
+ ULONGLONG start = CLRGetTickCount64();
+
+ pParam->ret = pParam->pThis->m_SemEvent.Wait(pParam->timeOut, TRUE);
+ _ASSERTE((pParam->ret == WAIT_OBJECT_0) || (pParam->ret == WAIT_TIMEOUT));
+
+ // When calculating duration we consider a couple of special cases.
+ // If the end tick is the same as the start tick we make the
+ // duration a millisecond, to ensure we make forward progress if
+ // there's a lot of contention on the mutex. Secondly, we have to
+ // cope with the case where the tick counter wrapped while we where
+ // waiting (we can cope with at most one wrap, so don't expect three
+ // month timeouts to be very accurate). Luckily for us, the latter
+ // case is taken care of by 32-bit modulo arithmetic automatically.
+
+ if (pParam->timeOut != (INT32) INFINITE)
+ {
+ ULONGLONG end = CLRGetTickCount64();
+ ULONGLONG duration;
+ if (end == start)
+ {
+ duration = 1;
+ }
+ else
+ {
+ duration = end - start;
+ }
+ duration = min(duration, (DWORD)pParam->timeOut);
+ pParam->timeOut -= (INT32)duration;
+ }
+ }
+ EE_FINALLY
+ {
+ if (GOT_EXCEPTION())
+ {
+ // We must decrement the waiter count.
+ for (;;)
+ {
+ LONG state = m_MonitorHeld.LoadWithoutBarrier();
+ _ASSERTE((state >> 1) != 0);
+ if (FastInterlockCompareExchange((LONG*)&m_MonitorHeld, state - 2, state) == state)
+ {
+ break;
+ }
+ }
+
+ // And signal the next waiter, else they'll wait forever.
+ m_SemEvent.Set();
+ }
+ } EE_END_FINALLY;
+
+ ret = param.ret;
+
+ if (ret == WAIT_OBJECT_0)
+ {
+ // Attempt to acquire lock (this also involves decrementing the waiter count).
+ for (;;)
+ {
+ LONG state = m_MonitorHeld.LoadWithoutBarrier();
+ _ASSERTE(((size_t)state >> 1) != 0);
+
+ if ((size_t)state & 1)
+ {
+ break;
+ }
+
+ if (FastInterlockCompareExchange((LONG*)&m_MonitorHeld, ((state - 2) | 1), state) == state)
+ {
+ finished = true;
+ break;
+ }
+ }
+ }
+ else
+ {
+ // We timed out, decrement waiter count.
+ for (;;)
+ {
+ LONG state = m_MonitorHeld.LoadWithoutBarrier();
+ _ASSERTE((state >> 1) != 0);
+ if (FastInterlockCompareExchange((LONG*)&m_MonitorHeld, state - 2, state) == state)
+ {
+ finished = true;
+ break;
+ }
+ }
+ }
+
+ if (finished)
+ {
+ break;
+ }
+ }
+
+ pCurThread->DisablePreemptiveGC();
+ }
+ GCPROTECT_END();
+ DecrementTransientPrecious();
+
+ if (ret == WAIT_TIMEOUT)
+ {
+ return FALSE;
+ }
+
+ m_HoldingThread = pCurThread;
+ m_Recursion = 1;
+ pCurThread->IncLockCount();
+
+#if defined(_DEBUG) && defined(TRACK_SYNC)
+ // The best place to grab this is from the ECall frame
+ Frame *pFrame = pCurThread->GetFrame();
+ int caller = (pFrame && pFrame != FRAME_TOP ? (int) pFrame->GetReturnAddress() : -1);
+ pCurThread->m_pTrackSync->EnterSync(caller, this);
+#endif
+
+ return (ret != WAIT_TIMEOUT);
+}
+
+
+BOOL AwareLock::Leave()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Thread* pThread = GetThread();
+
+ AwareLock::LeaveHelperAction action = LeaveHelper(pThread);
+
+ switch(action)
+ {
+ case AwareLock::LeaveHelperAction_None:
+ // We are done
+ return TRUE;
+ case AwareLock::LeaveHelperAction_Signal:
+ // Signal the event
+ Signal();
+ return TRUE;
+ default:
+ // Must be an error otherwise
+ _ASSERTE(action == AwareLock::LeaveHelperAction_Error);
+ return FALSE;
+ }
+}
+
+#ifdef _DEBUG
+#define _LOGCONTENTION
+#endif // _DEBUG
+
+#ifdef _LOGCONTENTION
+inline void LogContention()
+{
+ WRAPPER_NO_CONTRACT;
+#ifdef LOGGING
+ if (LoggingOn(LF_SYNC, LL_INFO100))
+ {
+ LogSpewAlways("Contention: Stack Trace Begin\n");
+ void LogStackTrace();
+ LogStackTrace();
+ LogSpewAlways("Contention: Stack Trace End\n");
+ }
+#endif
+}
+#else
+#define LogContention()
+#endif
+
+
+
+bool AwareLock::Contention(INT32 timeOut)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ DWORD startTime = 0;
+ if (timeOut != (INT32)INFINITE)
+ startTime = GetTickCount();
+
+ COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cContention++);
+
+#ifndef FEATURE_CORECLR
+ // Fire a contention start event for a managed contention
+ FireEtwContentionStart_V1(ETW::ContentionLog::ContentionStructs::ManagedContention, GetClrInstanceId());
+#endif // !FEATURE_CORECLR
+
+ LogContention();
+ Thread *pCurThread = GetThread();
+ OBJECTREF obj = GetOwningObject();
+ bool bEntered = false;
+ bool bKeepGoing = true;
+
+ // We cannot allow the AwareLock to be cleaned up underneath us by the GC.
+ IncrementTransientPrecious();
+
+ GCPROTECT_BEGIN(obj);
+ {
+ GCX_PREEMP();
+
+ // Try spinning and yielding before eventually blocking.
+ // The limit of 10 is largely arbitrary - feel free to tune if you have evidence
+ // you're making things better
+ for (DWORD iter = 0; iter < g_SpinConstants.dwRepetitions && bKeepGoing; iter++)
+ {
+ DWORD i = g_SpinConstants.dwInitialDuration;
+
+ do
+ {
+ if (TryEnter())
+ {
+ bEntered = true;
+ goto entered;
+ }
+
+ if (g_SystemInfo.dwNumberOfProcessors <= 1)
+ {
+ bKeepGoing = false;
+ break;
+ }
+
+ if (timeOut != (INT32)INFINITE && GetTickCount() - startTime >= (DWORD)timeOut)
+ {
+ bKeepGoing = false;
+ break;
+ }
+
+ // Spin for i iterations, and make sure to never go more than 20000 iterations between
+ // checking if we should SwitchToThread
+ int remainingDelay = i;
+
+ while (remainingDelay > 0)
+ {
+ int currentDelay = min(remainingDelay, 20000);
+ remainingDelay -= currentDelay;
+
+ // Delay by approximately 2*currentDelay clock cycles (Pentium III).
+
+ // This is brittle code - future processors may of course execute this
+ // faster or slower, and future code generators may eliminate the loop altogether.
+ // The precise value of the delay is not critical, however, and I can't think
+ // of a better way that isn't machine-dependent.
+ for (int delayCount = currentDelay; (--delayCount != 0); )
+ {
+ YieldProcessor(); // indicate to the processor that we are spining
+ }
+
+ // TryEnter will not take the lock if it has waiters. This means we should not spin
+ // for long periods without giving the waiters a chance to run, since we won't
+ // make progress until they run and they may be waiting for our CPU. So once
+ // we're spinning >20000 iterations, check every 20000 iterations if there are
+ // waiters and if so call SwitchToThread.
+ //
+ // Since this only affects the spinning heuristic, calling HasWaiters now
+ // and getting a dirty read is fine. Note that it is important that TryEnter
+ // not take the lock because we could easily starve waiting threads.
+ // They make only one attempt before going back to sleep, and spinners on
+ // other CPUs would likely get the lock. We could fix this by allowing a
+ // woken thread to become a spinner again, at which point there are no
+ // starvation concerns and TryEnter can take the lock.
+ if (remainingDelay > 0 && HasWaiters())
+ {
+ __SwitchToThread(0, CALLER_LIMITS_SPINNING);
+ }
+ }
+
+ // exponential backoff: wait a factor longer in the next iteration
+ i *= g_SpinConstants.dwBackoffFactor;
+ }
+ while (i < g_SpinConstants.dwMaximumDuration);
+
+ {
+ GCX_COOP();
+ pCurThread->HandleThreadAbort();
+ }
+
+ __SwitchToThread(0, CALLER_LIMITS_SPINNING);
+ }
+entered: ;
+ }
+ GCPROTECT_END();
+ // we are in co-operative mode so no need to keep this set
+ DecrementTransientPrecious();
+ if (!bEntered && timeOut == (INT32)INFINITE)
+ {
+ // We've tried hard to enter - we need to eventually block to avoid wasting too much cpu
+ // time.
+ Enter();
+ bEntered = TRUE;
+ }
+#ifndef FEATURE_CORECLR
+ FireEtwContentionStop(ETW::ContentionLog::ContentionStructs::ManagedContention, GetClrInstanceId());
+#endif // !FEATURE_CORECLR
+ return bEntered;
+}
+
+
+LONG AwareLock::LeaveCompletely()
+{
+ WRAPPER_NO_CONTRACT;
+
+ LONG count = 0;
+ while (Leave()) {
+ count++;
+ }
+ _ASSERTE(count > 0); // otherwise we were never in the lock
+
+ return count;
+}
+
+
+BOOL AwareLock::OwnedByCurrentThread()
+{
+ WRAPPER_NO_CONTRACT;
+ return (GetThread() == m_HoldingThread);
+}
+
+
+// ***************************************************************************
+//
+// SyncBlock class implementation
+//
+// ***************************************************************************
+
+// We maintain two queues for SyncBlock::Wait.
+// 1. Inside SyncBlock we queue all threads that are waiting on the SyncBlock.
+// When we pulse, we pick the thread from this queue using FIFO.
+// 2. We queue all SyncBlocks that a thread is waiting for in Thread::m_WaitEventLink.
+// When we pulse a thread, we find the event from this queue to set, and we also
+// or in a 1 bit in the syncblock value saved in the queue, so that we can return
+// immediately from SyncBlock::Wait if the syncblock has been pulsed.
+BOOL SyncBlock::Wait(INT32 timeOut, BOOL exitContext)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ Thread *pCurThread = GetThread();
+ BOOL isTimedOut = FALSE;
+ BOOL isEnqueued = FALSE;
+ WaitEventLink waitEventLink;
+ WaitEventLink *pWaitEventLink;
+
+ // As soon as we flip the switch, we are in a race with the GC, which could clean
+ // up the SyncBlock underneath us -- unless we report the object.
+ _ASSERTE(pCurThread->PreemptiveGCDisabled());
+
+ // Does this thread already wait for this SyncBlock?
+ WaitEventLink *walk = pCurThread->WaitEventLinkForSyncBlock(this);
+ if (walk->m_Next) {
+ if (walk->m_Next->m_WaitSB == this) {
+ // Wait on the same lock again.
+ walk->m_Next->m_RefCount ++;
+ pWaitEventLink = walk->m_Next;
+ }
+ else if ((SyncBlock*)(((DWORD_PTR)walk->m_Next->m_WaitSB) & ~1)== this) {
+ // This thread has been pulsed. No need to wait.
+ return TRUE;
+ }
+ }
+ else {
+ // First time this thread is going to wait for this SyncBlock.
+ CLREvent* hEvent;
+ if (pCurThread->m_WaitEventLink.m_Next == NULL) {
+ hEvent = &(pCurThread->m_EventWait);
+ }
+ else {
+ hEvent = GetEventFromEventStore();
+ }
+ waitEventLink.m_WaitSB = this;
+ waitEventLink.m_EventWait = hEvent;
+ waitEventLink.m_Thread = pCurThread;
+ waitEventLink.m_Next = NULL;
+ waitEventLink.m_LinkSB.m_pNext = NULL;
+ waitEventLink.m_RefCount = 1;
+ pWaitEventLink = &waitEventLink;
+ walk->m_Next = pWaitEventLink;
+
+ // Before we enqueue it (and, thus, before it can be dequeued), reset the event
+ // that will awaken us.
+ hEvent->Reset();
+
+ // This thread is now waiting on this sync block
+ ThreadQueue::EnqueueThread(pWaitEventLink, this);
+
+ isEnqueued = TRUE;
+ }
+
+ _ASSERTE ((SyncBlock*)((DWORD_PTR)walk->m_Next->m_WaitSB & ~1)== this);
+
+ PendingSync syncState(walk);
+
+ OBJECTREF obj = m_Monitor.GetOwningObject();
+
+ m_Monitor.IncrementTransientPrecious();
+
+ // While we are in this frame the thread is considered blocked on the
+ // event of the monitor lock according to the debugger
+ DebugBlockingItem blockingMonitorInfo;
+ blockingMonitorInfo.dwTimeout = timeOut;
+ blockingMonitorInfo.pMonitor = &m_Monitor;
+ blockingMonitorInfo.pAppDomain = SystemDomain::GetCurrentDomain();
+ blockingMonitorInfo.type = DebugBlock_MonitorEvent;
+ DebugBlockingItemHolder holder(pCurThread, &blockingMonitorInfo);
+
+ GCPROTECT_BEGIN(obj);
+ {
+ GCX_PREEMP();
+
+ // remember how many times we synchronized
+ syncState.m_EnterCount = LeaveMonitorCompletely();
+ _ASSERTE(syncState.m_EnterCount > 0);
+
+ Context* targetContext;
+ targetContext = pCurThread->GetContext();
+ _ASSERTE(targetContext);
+ Context* defaultContext;
+ defaultContext = pCurThread->GetDomain()->GetDefaultContext();
+ _ASSERTE(defaultContext);
+#ifdef FEATURE_REMOTING
+ if (exitContext &&
+ targetContext != defaultContext)
+ {
+ Context::MonitorWaitArgs waitArgs = {timeOut, &syncState, &isTimedOut};
+ Context::CallBackInfo callBackInfo = {Context::MonitorWait_callback, (void*) &waitArgs};
+ Context::RequestCallBack(CURRENT_APPDOMAIN_ID, defaultContext, &callBackInfo);
+ }
+ else
+#else
+ _ASSERTE( exitContext==NULL || targetContext == defaultContext);
+#endif
+ {
+ isTimedOut = pCurThread->Block(timeOut, &syncState);
+ }
+ }
+ GCPROTECT_END();
+ m_Monitor.DecrementTransientPrecious();
+
+ return !isTimedOut;
+}
+
+void SyncBlock::Pulse()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ WaitEventLink *pWaitEventLink;
+
+ if ((pWaitEventLink = ThreadQueue::DequeueThread(this)) != NULL)
+ pWaitEventLink->m_EventWait->Set();
+}
+
+void SyncBlock::PulseAll()
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ WaitEventLink *pWaitEventLink;
+
+ while ((pWaitEventLink = ThreadQueue::DequeueThread(this)) != NULL)
+ pWaitEventLink->m_EventWait->Set();
+}
+
+bool SyncBlock::SetInteropInfo(InteropSyncBlockInfo* pInteropInfo)
+{
+ WRAPPER_NO_CONTRACT;
+ SetPrecious();
+
+ // We could be agile, but not have noticed yet. We can't assert here
+ // that we live in any given domain, nor is this an appropriate place
+ // to re-parent the syncblock.
+/* _ASSERTE (m_dwAppDomainIndex.m_dwIndex == 0 ||
+ m_dwAppDomainIndex == SystemDomain::System()->DefaultDomain()->GetIndex() ||
+ m_dwAppDomainIndex == GetAppDomain()->GetIndex());
+ m_dwAppDomainIndex = GetAppDomain()->GetIndex();
+*/
+ return (FastInterlockCompareExchangePointer(&m_pInteropInfo,
+ pInteropInfo,
+ NULL) == NULL);
+}
+
+#ifdef EnC_SUPPORTED
+// Store information about fields added to this object by EnC
+// This must be called from a thread in the AppDomain of this object instance
+void SyncBlock::SetEnCInfo(EnCSyncBlockInfo *pEnCInfo)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // We can't recreate the field contents, so this SyncBlock can never go away
+ SetPrecious();
+
+ // Store the field info (should only ever happen once)
+ _ASSERTE( m_pEnCInfo == NULL );
+ m_pEnCInfo = pEnCInfo;
+
+ // Also store the AppDomain that this object lives in.
+ // Also verify that the AD was either not yet set, or set correctly before overwriting it.
+ // I'm not sure why it should ever be set to the default domain and then changed to a different domain,
+ // perhaps that can be removed.
+ _ASSERTE (m_dwAppDomainIndex.m_dwIndex == 0 ||
+ m_dwAppDomainIndex == SystemDomain::System()->DefaultDomain()->GetIndex() ||
+ m_dwAppDomainIndex == GetAppDomain()->GetIndex());
+ m_dwAppDomainIndex = GetAppDomain()->GetIndex();
+}
+#endif // EnC_SUPPORTED
+#endif // !DACCESS_COMPILE
+
+#if defined(_WIN64) && defined(_DEBUG)
+void ObjHeader::IllegalAlignPad()
+{
+ WRAPPER_NO_CONTRACT;
+#ifdef LOGGING
+ void** object = ((void**) this) + 1;
+ LogSpewAlways("\n\n******** Illegal ObjHeader m_alignpad not 0, object" FMT_ADDR "\n\n",
+ DBG_ADDR(object));
+#endif
+ _ASSERTE(m_alignpad == 0);
+}
+#endif // _WIN64 && _DEBUG
+
+
diff --git a/src/vm/syncblk.h b/src/vm/syncblk.h
new file mode 100644
index 0000000000..ad527b9338
--- /dev/null
+++ b/src/vm/syncblk.h
@@ -0,0 +1,1395 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// SYNCBLK.H
+//
+
+//
+// Definition of a SyncBlock and the SyncBlockCache which manages it
+
+// See file:#SyncBlockOverview Sync block overview
+
+#ifndef _SYNCBLK_H_
+#define _SYNCBLK_H_
+
+#include "util.hpp"
+#include "slist.h"
+#include "crst.h"
+#include "handletable.h"
+#include "vars.hpp"
+
+// #SyncBlockOverview
+//
+// Every Object is preceded by an ObjHeader (at a negative offset). The code:ObjHeader has an index to a
+// code:SyncBlock. This index is 0 for the bulk of all instances, which indicates that the object shares a
+// dummy SyncBlock with most other objects.
+//
+// The SyncBlock is primarily responsible for object synchronization. However, it is also a "kitchen sink" of
+// sparsely allocated instance data. For instance, the default implementation of Hash() is based on the
+// existence of a code:SyncTableEntry. And objects exposed to or from COM, or through context boundaries, can
+// store sparse data here.
+//
+// SyncTableEntries and SyncBlocks are allocated in non-GC memory. A weak pointer from the SyncTableEntry to
+// the instance is used to ensure that the SyncBlock and SyncTableEntry are reclaimed (recycled) when the
+// instance dies.
+//
+// The organization of the SyncBlocks isn't intuitive (at least to me). Here's the explanation:
+//
+// Before each Object is an code:ObjHeader. If the object has a code:SyncBlock, the code:ObjHeader contains a
+// non-0 index to it.
+//
+// The index is looked up in the code:g_pSyncTable of SyncTableEntries. This means the table is consecutive
+// for all outstanding indices. Whenever it needs to grow, it doubles in size and copies all the original
+// entries. The old table is kept until GC time, when it can be safely discarded.
+//
+// Each code:SyncTableEntry has a backpointer to the object and a forward pointer to the actual SyncBlock.
+// The SyncBlock is allocated out of a SyncBlockArray which is essentially just a block of SyncBlocks.
+//
+// The code:SyncBlockArray s are managed by a code:SyncBlockCache that handles the actual allocations and
+// frees of the blocks.
+//
+// So...
+//
+// Each allocation and release has to handle free lists in the table of entries and the table of blocks.
+//
+// We burn an extra 4 bytes for the pointer from the SyncTableEntry to the SyncBlock.
+//
+// The reason for this is that many objects have a SyncTableEntry but no SyncBlock. That's because someone
+// (e.g. HashTable) called Hash() on them.
+//
+// Incidentally, there's a better write-up of all this stuff in the archives.
+
+#ifdef _TARGET_X86_
+#include <pshpack4.h>
+#endif // _TARGET_X86_
+
+// forwards:
+class SyncBlock;
+class SyncBlockCache;
+class SyncTableEntry;
+class SyncBlockArray;
+class AwareLock;
+class Thread;
+class AppDomain;
+
+#ifdef EnC_SUPPORTED
+class EnCSyncBlockInfo;
+typedef DPTR(EnCSyncBlockInfo) PTR_EnCSyncBlockInfo;
+
+#endif // EnC_SUPPORTED
+
+#include "eventstore.hpp"
+
+#include "eventstore.hpp"
+
+#include "synch.h"
+
+
+// At a negative offset from each Object is an ObjHeader. The 'size' of the
+// object includes these bytes. However, we rely on the previous object allocation
+// to zero out the ObjHeader for the current allocation. And the limits of the
+// GC space are initialized to respect this "off by one" error.
+
+// m_SyncBlockValue is carved up into an index and a set of bits. Steal bits by
+// reducing the mask. We use the very high bit, in _DEBUG, to be sure we never forget
+// to mask the Value to obtain the Index
+
+ // These first three are only used on strings (If the first one is on, we know whether
+ // the string has high byte characters, and the second bit tells which way it is.
+ // Note that we are reusing the FINALIZER_RUN bit since strings don't have finalizers,
+ // so the value of this bit does not matter for strings
+#define BIT_SBLK_STRING_HAS_NO_HIGH_CHARS 0x80000000
+
+// Used as workaround for infinite loop case. Will set this bit in the sblk if we have already
+// seen this sblk in our agile checking logic. Problem is seen when object 1 has a ref to object 2
+// and object 2 has a ref to object 1. The agile checker will infinitely loop on these references.
+#define BIT_SBLK_AGILE_IN_PROGRESS 0x80000000
+#define BIT_SBLK_STRING_HIGH_CHARS_KNOWN 0x40000000
+#define BIT_SBLK_STRING_HAS_SPECIAL_SORT 0xC0000000
+#define BIT_SBLK_STRING_HIGH_CHAR_MASK 0xC0000000
+
+#define BIT_SBLK_FINALIZER_RUN 0x40000000
+#define BIT_SBLK_GC_RESERVE 0x20000000
+
+// This lock is only taken when we need to modify the index value in m_SyncBlockValue.
+// It should not be taken if the object already has a real syncblock index.
+#define BIT_SBLK_SPIN_LOCK 0x10000000
+
+#define BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX 0x08000000
+
+// if BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX is clear, the rest of the header dword is layed out as follows:
+// - lower ten bits (bits 0 thru 9) is thread id used for the thin locks
+// value is zero if no thread is holding the lock
+// - following six bits (bits 10 thru 15) is recursion level used for the thin locks
+// value is zero if lock is not taken or only taken once by the same thread
+// - following 11 bits (bits 16 thru 26) is app domain index
+// value is zero if no app domain index is set for the object
+#define SBLK_MASK_LOCK_THREADID 0x000003FF // special value of 0 + 1023 thread ids
+#define SBLK_MASK_LOCK_RECLEVEL 0x0000FC00 // 64 recursion levels
+#define SBLK_LOCK_RECLEVEL_INC 0x00000400 // each level is this much higher than the previous one
+#define SBLK_APPDOMAIN_SHIFT 16 // shift right this much to get appdomain index
+#define SBLK_RECLEVEL_SHIFT 10 // shift right this much to get recursion level
+#define SBLK_MASK_APPDOMAININDEX 0x000007FF // 2048 appdomain indices
+
+// add more bits here... (adjusting the following mask to make room)
+
+// if BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX is set,
+// then if BIT_SBLK_IS_HASHCODE is also set, the rest of the dword is the hash code (bits 0 thru 25),
+// otherwise the rest of the dword is the sync block index (bits 0 thru 25)
+#define BIT_SBLK_IS_HASHCODE 0x04000000
+
+#define HASHCODE_BITS 26
+
+#define MASK_HASHCODE ((1<<HASHCODE_BITS)-1)
+#define SYNCBLOCKINDEX_BITS 26
+#define MASK_SYNCBLOCKINDEX ((1<<SYNCBLOCKINDEX_BITS)-1)
+
+// Spin for about 1000 cycles before waiting longer.
+#define BIT_SBLK_SPIN_COUNT 1000
+
+// The GC is highly dependent on SIZE_OF_OBJHEADER being exactly the sizeof(ObjHeader)
+// We define this macro so that the preprocessor can calculate padding structures.
+#ifdef _WIN64
+#define SIZEOF_OBJHEADER 8
+#else // !_WIN64
+#define SIZEOF_OBJHEADER 4
+#endif // !_WIN64
+
+
+inline void InitializeSpinConstants()
+{
+ WRAPPER_NO_CONTRACT;
+
+#if !defined(DACCESS_COMPILE) && !defined(BINDER)
+ g_SpinConstants.dwInitialDuration = g_pConfig->SpinInitialDuration();
+ g_SpinConstants.dwMaximumDuration = min(g_pConfig->SpinLimitProcCap(), g_SystemInfo.dwNumberOfProcessors) * g_pConfig->SpinLimitProcFactor() + g_pConfig->SpinLimitConstant();
+ g_SpinConstants.dwBackoffFactor = g_pConfig->SpinBackoffFactor();
+ g_SpinConstants.dwRepetitions = g_pConfig->SpinRetryCount();
+#endif
+}
+
+// this is a 'GC-aware' Lock. It is careful to enable preemptive GC before it
+// attempts any operation that can block. Once the operation is finished, it
+// restores the original state of GC.
+
+// AwareLocks can only be created inside SyncBlocks, since they depend on the
+// enclosing SyncBlock for coordination. This is enforced by the private ctor.
+typedef DPTR(class AwareLock) PTR_AwareLock;
+
+class AwareLock
+{
+ friend class CheckAsmOffsets;
+
+ friend class SyncBlockCache;
+ friend class SyncBlock;
+
+public:
+ Volatile<LONG> m_MonitorHeld;
+ ULONG m_Recursion;
+ PTR_Thread m_HoldingThread;
+
+ private:
+ LONG m_TransientPrecious;
+
+
+ // This is a backpointer from the syncblock to the synctable entry. This allows
+ // us to recover the object that holds the syncblock.
+ DWORD m_dwSyncIndex;
+
+ CLREvent m_SemEvent;
+
+ // Only SyncBlocks can create AwareLocks. Hence this private constructor.
+ AwareLock(DWORD indx)
+ : m_MonitorHeld(0),
+ m_Recursion(0),
+#ifndef DACCESS_COMPILE
+// PreFAST has trouble with intializing a NULL PTR_Thread.
+ m_HoldingThread(NULL),
+#endif // DACCESS_COMPILE
+ m_TransientPrecious(0),
+ m_dwSyncIndex(indx)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ ~AwareLock()
+ {
+ LIMITED_METHOD_CONTRACT;
+ // We deliberately allow this to remain incremented if an exception blows
+ // through a lock attempt. This simply prevents the GC from aggressively
+ // reclaiming a particular syncblock until the associated object is garbage.
+ // From a perf perspective, it's not worth using SEH to prevent this from
+ // happening.
+ //
+ // _ASSERTE(m_TransientPrecious == 0);
+ }
+
+#if defined(ENABLE_CONTRACTS_IMPL)
+ // The LOCK_TAKEN/RELEASED macros need a "pointer" to the lock object to do
+ // comparisons between takes & releases (and to provide debugging info to the
+ // developer). Since AwareLocks are always allocated embedded inside SyncBlocks,
+ // and since SyncBlocks don't move (unlike the GC objects that use
+ // the syncblocks), it's safe for us to just use the AwareLock pointer directly
+ void * GetPtrForLockContract()
+ {
+ return (void *) this;
+ }
+#endif // defined(ENABLE_CONTRACTS_IMPL)
+
+public:
+ enum EnterHelperResult {
+ EnterHelperResult_Entered,
+ EnterHelperResult_Contention,
+ EnterHelperResult_UseSlowPath
+ };
+
+ enum LeaveHelperAction {
+ LeaveHelperAction_None,
+ LeaveHelperAction_Signal,
+ LeaveHelperAction_Yield,
+ LeaveHelperAction_Contention,
+ LeaveHelperAction_Error,
+ };
+
+ // Helper encapsulating the fast path entering monitor. Returns what kind of result was achieved.
+ AwareLock::EnterHelperResult EnterHelper(Thread* pCurThread);
+ AwareLock::EnterHelperResult EnterHelperSpin(Thread* pCurThread, INT32 timeOut = -1);
+
+ // Helper encapsulating the core logic for leaving monitor. Returns what kind of
+ // follow up action is necessary
+ AwareLock::LeaveHelperAction LeaveHelper(Thread* pCurThread);
+
+ void Enter();
+ BOOL TryEnter(INT32 timeOut = 0);
+ BOOL EnterEpilog(Thread *pCurThread, INT32 timeOut = INFINITE);
+ BOOL EnterEpilogHelper(Thread *pCurThread, INT32 timeOut);
+ BOOL Leave();
+
+ void Signal()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // CLREvent::SetMonitorEvent works even if the event has not been intialized yet
+ m_SemEvent.SetMonitorEvent();
+ }
+
+ bool Contention(INT32 timeOut = INFINITE);
+ void AllocLockSemEvent();
+ LONG LeaveCompletely();
+ BOOL OwnedByCurrentThread();
+
+ void IncrementTransientPrecious()
+ {
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockIncrement(&m_TransientPrecious);
+ _ASSERTE(m_TransientPrecious > 0);
+ }
+
+ void DecrementTransientPrecious()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_TransientPrecious > 0);
+ FastInterlockDecrement(&m_TransientPrecious);
+ }
+
+ DWORD GetSyncBlockIndex();
+
+ void SetPrecious();
+
+ // Provide access to the object associated with this awarelock, so client can
+ // protect it.
+ inline OBJECTREF GetOwningObject();
+
+ // Provide access to the Thread object that owns this awarelock. This is used
+ // to provide a host to find out owner of a lock.
+ inline PTR_Thread GetOwningThread()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_HoldingThread;
+ }
+
+ // Do we have waiters?
+ inline BOOL HasWaiters()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_MonitorHeld >> 1) > 0;
+ }
+};
+
+#ifdef FEATURE_COMINTEROP
+class ComCallWrapper;
+class ComClassFactory;
+struct RCW;
+class RCWHolder;
+typedef DPTR(class ComCallWrapper) PTR_ComCallWrapper;
+#endif // FEATURE_COMINTEROP
+
+class InteropSyncBlockInfo
+{
+ friend class RCWHolder;
+
+public:
+#ifndef FEATURE_PAL
+ // List of InteropSyncBlockInfo instances that have been freed since the last syncblock cleanup.
+ static SLIST_HEADER s_InteropInfoStandbyList;
+#endif // !FEATURE_PAL
+
+ InteropSyncBlockInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+ ZeroMemory(this, sizeof(InteropSyncBlockInfo));
+ }
+#ifndef DACCESS_COMPILE
+ ~InteropSyncBlockInfo();
+#endif
+
+#ifndef FEATURE_PAL
+ // Deletes all items in code:s_InteropInfoStandbyList.
+ static void FlushStandbyList();
+#endif // !FEATURE_PAL
+
+#ifdef FEATURE_COMINTEROP
+
+ //
+ // We'll be using the sentinel value of 0x1 to indicate that a particular
+ // field was set at one time, but is now NULL.
+
+#ifndef DACCESS_COMPILE
+ RCW* GetRawRCW()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (RCW *)((size_t)m_pRCW & ~1);
+ }
+
+ // Returns either NULL or an RCW on which AcquireLock has been called.
+ RCW* GetRCWAndIncrementUseCount();
+
+ // Sets the m_pRCW field in a thread-safe manner, pRCW can be NULL.
+ void SetRawRCW(RCW* pRCW);
+
+ bool RCWWasUsed()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_pRCW != NULL);
+ }
+#else // !DACCESS_COMPILE
+ TADDR DacGetRawRCW()
+ {
+ return (TADDR)((size_t)m_pRCW & ~1);
+ }
+#endif // DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+ void SetCCW(ComCallWrapper* pCCW)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (pCCW == NULL)
+ pCCW = (ComCallWrapper*) 0x1;
+
+ m_pCCW = pCCW;
+ }
+#endif // !DACCESS_COMPILE
+
+ PTR_ComCallWrapper GetCCW()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (m_pCCW == (PTR_ComCallWrapper)0x1)
+ return NULL;
+
+ return m_pCCW;
+ }
+
+ bool CCWWasUsed()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_pCCW == NULL)
+ return false;
+
+ return true;
+ }
+
+#ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+ void SetComClassFactory(ComClassFactory* pCCF)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (pCCF == NULL)
+ pCCF = (ComClassFactory*)0x1;
+
+ m_pCCF = pCCF;
+ }
+
+ ComClassFactory* GetComClassFactory()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_pCCF == (ComClassFactory*)0x1)
+ return NULL;
+
+ return m_pCCF;
+ }
+
+ bool CCFWasUsed()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_pCCF == NULL)
+ return false;
+
+ return true;
+ }
+#endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+#endif // FEATURE_COMINTEROP
+
+#if !defined(DACCESS_COMPILE) && !defined(BINDER)
+ // set m_pUMEntryThunkOrInterceptStub if not already set - return true if not already set
+ bool SetUMEntryThunk(void* pUMEntryThunk)
+ {
+ WRAPPER_NO_CONTRACT;
+ return (FastInterlockCompareExchangePointer(&m_pUMEntryThunkOrInterceptStub,
+ pUMEntryThunk,
+ NULL) == NULL);
+ }
+
+ // set m_pUMEntryThunkOrInterceptStub if not already set - return true if not already set
+ bool SetInterceptStub(Stub* pInterceptStub)
+ {
+ WRAPPER_NO_CONTRACT;
+ void *pPtr = (void *)((UINT_PTR)pInterceptStub | 1);
+ return (FastInterlockCompareExchangePointer(&m_pUMEntryThunkOrInterceptStub,
+ pPtr,
+ NULL) == NULL);
+ }
+
+ void FreeUMEntryThunkOrInterceptStub();
+
+ void OnADUnload();
+
+#endif // DACCESS_COMPILE
+
+ void* GetUMEntryThunk()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (((UINT_PTR)m_pUMEntryThunkOrInterceptStub & 1) ? NULL : m_pUMEntryThunkOrInterceptStub);
+ }
+
+ Stub* GetInterceptStub()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (((UINT_PTR)m_pUMEntryThunkOrInterceptStub & 1) ? (Stub *)((UINT_PTR)m_pUMEntryThunkOrInterceptStub & ~1) : NULL);
+ }
+
+private:
+ // If this is a delegate marshalled out to unmanaged code, this points
+ // to the thunk generated for unmanaged code to call back on.
+ // If this is a delegate representing an unmanaged function pointer,
+ // this may point to a stub that intercepts calls to the unmng target.
+ // It is currently used for pInvokeStackImbalance MDA and host hook.
+ // We differentiate between the two by setting the lowest bit if it's
+ // an intercept stub.
+ void* m_pUMEntryThunkOrInterceptStub;
+
+#ifdef FEATURE_COMINTEROP
+ // If this object is being exposed to COM, it will have an associated CCW object
+ PTR_ComCallWrapper m_pCCW;
+
+#ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+ // If this object represents a type object, it will have an associated class factory
+ ComClassFactory* m_pCCF;
+#endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
+
+public:
+#ifndef DACCESS_COMPILE
+ // If this is a __ComObject, it will have an associated RCW object
+ RCW* m_pRCW;
+#else
+ // We can't define this as PTR_RCW, as this would create a typedef cycle. Use TADDR
+ // instead.
+ TADDR m_pRCW;
+#endif
+#endif // FEATURE_COMINTEROP
+
+};
+
+typedef DPTR(InteropSyncBlockInfo) PTR_InteropSyncBlockInfo;
+
+// this is a lazily created additional block for an object which contains
+// synchronzation information and other "kitchen sink" data
+typedef DPTR(SyncBlock) PTR_SyncBlock;
+// See code:#SyncBlockOverview for more
+class SyncBlock
+{
+ // ObjHeader creates our Mutex and Event
+ friend class ObjHeader;
+ friend class SyncBlockCache;
+ friend struct ThreadQueue;
+#ifdef DACCESS_COMPILE
+ friend class ClrDataAccess;
+#endif
+ friend class CheckAsmOffsets;
+
+ protected:
+ AwareLock m_Monitor; // the actual monitor
+
+ public:
+ // If this object is exposed to unmanaged code, we keep some extra info here.
+ PTR_InteropSyncBlockInfo m_pInteropInfo;
+
+ protected:
+#ifdef EnC_SUPPORTED
+ // And if the object has new fields added via EnC, this is a list of them
+ PTR_EnCSyncBlockInfo m_pEnCInfo;
+#endif // EnC_SUPPORTED
+
+ // We thread two different lists through this link. When the SyncBlock is
+ // active, we create a list of waiting threads here. When the SyncBlock is
+ // released (we recycle them), the SyncBlockCache maintains a free list of
+ // SyncBlocks here.
+ //
+ // We can't afford to use an SList<> here because we only want to burn
+ // space for the minimum, which is the pointer within an SLink.
+ SLink m_Link;
+
+ // This is the index for the appdomain to which the object belongs. If we
+ // can't set it in the object header, then we set it here. Note that an
+ // object doesn't always have this filled in. Only for COM interop,
+ // finalizers and objects in handles
+ ADIndex m_dwAppDomainIndex;
+
+ // This is the hash code for the object. It can either have been transfered
+ // from the header dword, in which case it will be limited to 26 bits, or
+ // have been generated right into this member variable here, when it will
+ // be a full 32 bits.
+
+ // A 0 in this variable means no hash code has been set yet - this saves having
+ // another flag to express this state, and it enables us to use a 32-bit interlocked
+ // operation to set the hash code, on the other hand it means that hash codes
+ // can never be 0. ObjectNative::GetHashCode in COMObject.cpp makes sure to enforce this.
+ DWORD m_dwHashCode;
+
+#if CHECK_APP_DOMAIN_LEAKS
+ DWORD m_dwFlags;
+
+ enum {
+ IsObjectAppDomainAgile = 1,
+ IsObjectCheckedForAppDomainAgile = 2,
+ };
+#endif
+ // In some early version of VB when there were no arrays developers used to use BSTR as arrays
+ // The way this was done was by adding a trail byte at the end of the BSTR
+ // To support this scenario, we need to use the sync block for this special case and
+ // save the trail character in here.
+ // This stores the trail character when a BSTR is used as an array
+ WCHAR m_BSTRTrailByte;
+
+ public:
+ SyncBlock(DWORD indx)
+ : m_Monitor(indx)
+#ifdef EnC_SUPPORTED
+ , m_pEnCInfo(PTR_NULL)
+#endif // EnC_SUPPORTED
+ , m_dwHashCode(0)
+#if CHECK_APP_DOMAIN_LEAKS
+ , m_dwFlags(0)
+#endif
+ , m_BSTRTrailByte(0)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_pInteropInfo = NULL;
+
+ // The monitor must be 32-bit aligned for atomicity to be guaranteed.
+ _ASSERTE((((size_t) &m_Monitor) & 3) == 0);
+ }
+
+ DWORD GetSyncBlockIndex()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_Monitor.GetSyncBlockIndex();
+ }
+
+ // As soon as a syncblock acquires some state that cannot be recreated, we latch
+ // a bit.
+ void SetPrecious()
+ {
+ WRAPPER_NO_CONTRACT;
+ m_Monitor.SetPrecious();
+ }
+
+ BOOL IsPrecious()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_Monitor.m_dwSyncIndex & SyncBlockPrecious) != 0;
+ }
+
+ void OnADUnload();
+
+ // True is the syncblock and its index are disposable.
+ // If new members are added to the syncblock, this
+ // method needs to be modified accordingly
+ BOOL IsIDisposable()
+ {
+ WRAPPER_NO_CONTRACT;
+ return (!IsPrecious() &&
+ m_Monitor.m_MonitorHeld.RawValue() == (LONG)0 &&
+ m_Monitor.m_TransientPrecious == 0);
+ }
+
+ // Gets the InteropInfo block, creates a new one if none is present.
+ InteropSyncBlockInfo* GetInteropInfo()
+ {
+ CONTRACT (InteropSyncBlockInfo*)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ if (!m_pInteropInfo)
+ {
+ NewHolder<InteropSyncBlockInfo> pInteropInfo;
+#ifndef FEATURE_PAL
+ pInteropInfo = (InteropSyncBlockInfo *)InterlockedPopEntrySList(&InteropSyncBlockInfo::s_InteropInfoStandbyList);
+
+ if (pInteropInfo != NULL)
+ {
+ // cache hit - reinitialize the data structure
+ new (pInteropInfo) InteropSyncBlockInfo();
+ }
+ else
+#endif // !FEATURE_PAL
+ {
+ pInteropInfo = new InteropSyncBlockInfo();
+ }
+
+ if (SetInteropInfo(pInteropInfo))
+ pInteropInfo.SuppressRelease();
+ }
+
+ RETURN m_pInteropInfo;
+ }
+
+ PTR_InteropSyncBlockInfo GetInteropInfoNoCreate()
+ {
+ CONTRACT (PTR_InteropSyncBlockInfo)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ RETURN m_pInteropInfo;
+ }
+
+ // Returns false if the InteropInfo block was already set - does not overwrite the previous value.
+ // True if the InteropInfo block was successfully set with the passed in value.
+ bool SetInteropInfo(InteropSyncBlockInfo* pInteropInfo);
+
+#ifdef EnC_SUPPORTED
+ // Get information about fields added to this object by the Debugger's Edit and Continue support
+ PTR_EnCSyncBlockInfo GetEnCInfo()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pEnCInfo;
+ }
+
+ // Store information about fields added to this object by the Debugger's Edit and Continue support
+ void SetEnCInfo(EnCSyncBlockInfo *pEnCInfo);
+#endif // EnC_SUPPORTED
+
+ ADIndex GetAppDomainIndex()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_dwAppDomainIndex;
+ }
+
+ void SetAppDomainIndex(ADIndex dwAppDomainIndex)
+ {
+ WRAPPER_NO_CONTRACT;
+ SetPrecious();
+ m_dwAppDomainIndex = dwAppDomainIndex;
+ }
+
+ void SetAwareLock(Thread *holdingThread, DWORD recursionLevel)
+ {
+ LIMITED_METHOD_CONTRACT;
+ // <NOTE>
+ // DO NOT SET m_MonitorHeld HERE! THIS IS NOT PROTECTED BY ANY LOCK!!
+ // </NOTE>
+ m_Monitor.m_HoldingThread = PTR_Thread(holdingThread);
+ m_Monitor.m_Recursion = recursionLevel;
+ }
+
+ DWORD GetHashCode()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwHashCode;
+ }
+
+ DWORD SetHashCode(DWORD hashCode)
+ {
+ WRAPPER_NO_CONTRACT;
+ DWORD result = FastInterlockCompareExchange((LONG*)&m_dwHashCode, hashCode, 0);
+ if (result == 0)
+ {
+ // the sync block now holds a hash code, which we can't afford to lose.
+ SetPrecious();
+ return hashCode;
+ }
+ else
+ return result;
+ }
+
+ void *operator new (size_t sz, void* p)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return p ;
+ }
+ void operator delete(void *p)
+ {
+ LIMITED_METHOD_CONTRACT;
+ // We've already destructed. But retain the memory.
+ }
+
+ void EnterMonitor()
+ {
+ WRAPPER_NO_CONTRACT;
+ m_Monitor.Enter();
+ }
+
+ BOOL TryEnterMonitor(INT32 timeOut = 0)
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_Monitor.TryEnter(timeOut);
+ }
+
+ // leave the monitor
+ BOOL LeaveMonitor()
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_Monitor.Leave();
+ }
+
+ AwareLock* GetMonitor()
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ //hold the syncblock
+#ifndef DACCESS_COMPILE
+ SetPrecious();
+#endif
+
+ //Note that for DAC we did not return a PTR_ type. This pointer is interior and
+ //the SyncBlock has already been marshaled so that GetMonitor could be called.
+ return &m_Monitor;
+ }
+
+ AwareLock* QuickGetMonitor()
+ {
+ LIMITED_METHOD_CONTRACT;
+ // Note that the syncblock isn't marked precious, so use caution when
+ // calling this method.
+ return &m_Monitor;
+ }
+
+ BOOL DoesCurrentThreadOwnMonitor()
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_Monitor.OwnedByCurrentThread();
+ }
+
+ LONG LeaveMonitorCompletely()
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_Monitor.LeaveCompletely();
+ }
+
+ BOOL Wait(INT32 timeOut, BOOL exitContext);
+ void Pulse();
+ void PulseAll();
+
+ enum
+ {
+ // This bit indicates that the syncblock is valuable and can neither be discarded
+ // nor re-created.
+ SyncBlockPrecious = 0x80000000,
+ };
+
+#if CHECK_APP_DOMAIN_LEAKS
+ BOOL IsAppDomainAgile()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwFlags & IsObjectAppDomainAgile;
+ }
+ void SetIsAppDomainAgile()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_dwFlags |= IsObjectAppDomainAgile;
+ }
+ void UnsetIsAppDomainAgile()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_dwFlags = m_dwFlags & ~IsObjectAppDomainAgile;
+ }
+ BOOL IsCheckedForAppDomainAgile()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwFlags & IsObjectCheckedForAppDomainAgile;
+ }
+ void SetIsCheckedForAppDomainAgile()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_dwFlags |= IsObjectCheckedForAppDomainAgile;
+ }
+#endif //CHECK_APP_DOMAIN_LEAKS
+
+ BOOL HasCOMBstrTrailByte()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_BSTRTrailByte!=0);
+ }
+ WCHAR GetCOMBstrTrailByte()
+ {
+ return m_BSTRTrailByte;
+ }
+ void SetCOMBstrTrailByte(WCHAR trailByte)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_BSTRTrailByte = trailByte;
+ SetPrecious();
+ }
+
+ protected:
+ // <NOTE>
+ // This should ONLY be called when initializing a SyncBlock (i.e. ONLY from
+ // ObjHeader::GetSyncBlock()), otherwise we'll have a race condition.
+ // </NOTE>
+ void InitState()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_Monitor.m_MonitorHeld.RawValue() = 1;
+ }
+
+#if defined(ENABLE_CONTRACTS_IMPL)
+ // The LOCK_TAKEN/RELEASED macros need a "pointer" to the lock object to do
+ // comparisons between takes & releases (and to provide debugging info to the
+ // developer). Use the AwareLock (m_Monitor)
+ void * GetPtrForLockContract()
+ {
+ return m_Monitor.GetPtrForLockContract();
+ }
+#endif // defined(ENABLE_CONTRACTS_IMPL)
+};
+
+class SyncTableEntry
+{
+ public:
+ PTR_SyncBlock m_SyncBlock;
+ VolatilePtr<Object, PTR_Object> m_Object;
+ static PTR_SyncTableEntry GetSyncTableEntry();
+#ifndef DACCESS_COMPILE
+ static SyncTableEntry*& GetSyncTableEntryByRef();
+#endif
+};
+
+#ifdef _DEBUG
+extern void DumpSyncBlockCache();
+#endif
+
+// this class stores free sync blocks after they're allocated and
+// unused
+
+typedef DPTR(SyncBlockCache) PTR_SyncBlockCache;
+
+// The SyncBlockCache is the data structure that manages SyncBlocks
+// as well as SyncTableEntries (See explaintation at top of this file).
+//
+// There is only one process global SyncBlockCache (SyncBlockCache::s_pSyncBlockCache)
+// and SyncTableEntry table (g_pSyncTable).
+//
+// see code:#SyncBlockOverview for more
+class SyncBlockCache
+{
+#ifdef DACCESS_COMPILE
+ friend class ClrDataAccess;
+#endif
+
+ friend class SyncBlock;
+
+
+ private:
+ PTR_SLink m_pCleanupBlockList; // list of sync blocks that need cleanup
+ SLink* m_FreeBlockList; // list of free sync blocks
+ Crst m_CacheLock; // cache lock
+ DWORD m_FreeCount; // count of active sync blocks
+ DWORD m_ActiveCount; // number active
+ SyncBlockArray *m_SyncBlocks; // Array of new SyncBlocks.
+ DWORD m_FreeSyncBlock; // Next Free Syncblock in the array
+
+ // The next variables deal with SyncTableEntries. Instead of having the object-header
+ // point directly at SyncBlocks, the object points a a syncTableEntry, which points at
+ // the syncBlock. This is done because in a common case (need a hash code for an object)
+ // you just need a syncTableEntry.
+
+ DWORD m_FreeSyncTableIndex; // We allocate a large array of SyncTableEntry structures.
+ // This index points at the boundry between used, and never-been
+ // used SyncTableEntries.
+ size_t m_FreeSyncTableList; // index of the first free SyncTableEntry in our free list.
+ // The entry at this index has its m_object field to the index
+ // of the next element (shifted by 1, low bit marks not in use)
+ DWORD m_SyncTableSize;
+ SyncTableEntry *m_OldSyncTables; // Next old SyncTable
+
+ BOOL m_bSyncBlockCleanupInProgress; // A flag indicating if sync block cleanup is in progress.
+ DWORD* m_EphemeralBitmap; // card table for ephemeral scanning
+
+ BOOL GCWeakPtrScanElement(int elindex, HANDLESCANPROC scanProc, LPARAM lp1, LPARAM lp2, BOOL& cleanup);
+
+ void SetCard (size_t card);
+ void ClearCard (size_t card);
+ BOOL CardSetP (size_t card);
+ void CardTableSetBit (size_t idx);
+ void Grow();
+
+
+ public:
+ SPTR_DECL(SyncBlockCache, s_pSyncBlockCache);
+ static SyncBlockCache*& GetSyncBlockCache();
+
+ void *operator new(size_t size, void *pInPlace)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return pInPlace;
+ }
+
+ void operator delete(void *p)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ SyncBlockCache();
+ ~SyncBlockCache();
+
+ static void Attach();
+ static void Detach();
+ void DoDetach();
+
+ static void Start();
+ static void Stop();
+
+ // returns and removes next from free list
+ SyncBlock* GetNextFreeSyncBlock();
+ // returns and removes the next from cleanup list
+ SyncBlock* GetNextCleanupSyncBlock();
+ // inserts a syncblock into the cleanup list
+ void InsertCleanupSyncBlock(SyncBlock* psb);
+
+ // Obtain a new syncblock slot in the SyncBlock table. Used as a hash code
+ DWORD NewSyncBlockSlot(Object *obj);
+
+ // return sync block to cache or delete
+ void DeleteSyncBlock(SyncBlock *sb);
+
+ // returns the sync block memory to the free pool but does not destruct sync block (must own cache lock already)
+ void DeleteSyncBlockMemory(SyncBlock *sb);
+
+ // return sync block to cache or delete, called from GC
+ void GCDeleteSyncBlock(SyncBlock *sb);
+
+ void GCWeakPtrScan(HANDLESCANPROC scanProc, LPARAM lp1, LPARAM lp2);
+
+ void GCDone(BOOL demoting, int max_gen);
+
+ void CleanupSyncBlocks();
+
+ void CleanupSyncBlocksInAppDomain(AppDomain *pDomain);
+
+ int GetTableEntryCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_FreeSyncTableIndex - 1;
+ }
+
+ // Determines if a sync block cleanup is in progress.
+ BOOL IsSyncBlockCleanupInProgress()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_bSyncBlockCleanupInProgress;
+ }
+
+#if !defined(BINDER)
+ // Encapsulate a CrstHolder, so that clients of our lock don't have to know
+ // the details of our implementation.
+ class LockHolder : public CrstHolder
+ {
+ public:
+ LockHolder(SyncBlockCache *pCache)
+ : CrstHolder(&pCache->m_CacheLock)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+ }
+ };
+ friend class LockHolder;
+#endif
+
+#if CHECK_APP_DOMAIN_LEAKS
+ void CheckForUnloadedInstances(ADIndex unloadingIndex);
+#endif
+#ifdef _DEBUG
+ friend void DumpSyncBlockCache();
+#endif
+
+#ifdef VERIFY_HEAP
+ void VerifySyncTableEntry();
+#endif
+};
+
+// See code:#SyncBlockOverView for more
+class ObjHeader
+{
+ friend class CheckAsmOffsets;
+
+ private:
+ // !!! Notice: m_SyncBlockValue *MUST* be the last field in ObjHeader.
+#ifdef _WIN64
+ DWORD m_alignpad;
+#endif // _WIN64
+
+ Volatile<DWORD> m_SyncBlockValue; // the Index and the Bits
+
+#if defined(_WIN64) && defined(_DEBUG)
+ void IllegalAlignPad();
+#endif // _WIN64 && _DEBUG
+
+ INCONTRACT(void * GetPtrForLockContract());
+
+ public:
+
+ // Access to the Sync Block Index, by masking the Value.
+ FORCEINLINE DWORD GetHeaderSyncBlockIndex()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+#if defined(_WIN64) && defined(_DEBUG) && !defined(DACCESS_COMPILE)
+ // On WIN64 this field is never modified, but was initialized to 0
+ if (m_alignpad != 0)
+ IllegalAlignPad();
+#endif // _WIN64 && _DEBUG && !DACCESS_COMPILE
+
+ // pull the value out before checking it to avoid race condition
+ DWORD value = m_SyncBlockValue.LoadWithoutBarrier();
+ if ((value & (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE)) != BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX)
+ return 0;
+ return value & MASK_SYNCBLOCKINDEX;
+ }
+ // Ditto for setting the index, which is careful not to disturb the underlying
+ // bit field -- even in the presence of threaded access.
+ //
+ // This service can only be used to transition from a 0 index to a non-0 index.
+#if !defined(BINDER)
+ void SetIndex(DWORD indx)
+ {
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ PRECONDITION(GetHeaderSyncBlockIndex() == 0);
+ PRECONDITION(m_SyncBlockValue & BIT_SBLK_SPIN_LOCK);
+ }
+ CONTRACTL_END
+
+
+#ifdef _DEBUG
+ // if we have an index here, make sure we already transferred it to the syncblock
+ // before we clear it out
+ ADIndex adIndex = GetRawAppDomainIndex();
+ if (adIndex.m_dwIndex)
+ {
+ SyncBlock *pSyncBlock = SyncTableEntry::GetSyncTableEntry() [indx & ~BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX].m_SyncBlock;
+ _ASSERTE(pSyncBlock && pSyncBlock->GetAppDomainIndex() == adIndex);
+ }
+#endif
+
+ LONG newValue;
+ LONG oldValue;
+ while (TRUE) {
+ oldValue = m_SyncBlockValue.LoadWithoutBarrier();
+ _ASSERTE(GetHeaderSyncBlockIndex() == 0);
+ // or in the old value except any index that is there -
+ // note that indx could be carrying the BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX bit that we need to preserve
+ newValue = (indx |
+ (oldValue & ~(BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE | MASK_SYNCBLOCKINDEX)));
+ if (FastInterlockCompareExchange((LONG*)&m_SyncBlockValue,
+ newValue,
+ oldValue)
+ == oldValue)
+ {
+ return;
+ }
+ }
+ }
+#endif // !BINDER
+
+ // Used only during shutdown
+ void ResetIndex()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(m_SyncBlockValue & BIT_SBLK_SPIN_LOCK);
+ FastInterlockAnd(&m_SyncBlockValue, ~(BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE | MASK_SYNCBLOCKINDEX));
+ }
+
+ // Used only GC
+ void GCResetIndex()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_SyncBlockValue.RawValue() &=~(BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE | MASK_SYNCBLOCKINDEX);
+ }
+
+ void SetAppDomainIndex(ADIndex);
+ void ResetAppDomainIndex(ADIndex);
+ void ResetAppDomainIndexNoFailure(ADIndex);
+ ADIndex GetRawAppDomainIndex();
+ ADIndex GetAppDomainIndex();
+
+ // For now, use interlocked operations to twiddle bits in the bitfield portion.
+ // If we ever have high-performance requirements where we can guarantee that no
+ // other threads are accessing the ObjHeader, this can be reconsidered for those
+ // particular bits.
+ void SetBit(DWORD bit)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE((bit & MASK_SYNCBLOCKINDEX) == 0);
+ FastInterlockOr(&m_SyncBlockValue, bit);
+ }
+ void ClrBit(DWORD bit)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE((bit & MASK_SYNCBLOCKINDEX) == 0);
+ FastInterlockAnd(&m_SyncBlockValue, ~bit);
+ }
+ //GC accesses this bit when all threads are stopped.
+ void SetGCBit()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_SyncBlockValue.RawValue() |= BIT_SBLK_GC_RESERVE;
+ }
+ void ClrGCBit()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_SyncBlockValue.RawValue() &= ~BIT_SBLK_GC_RESERVE;
+ }
+
+ // Don't bother masking out the index since anyone who wants bits will presumably
+ // restrict the bits they consider.
+ DWORD GetBits()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+#if defined(_WIN64) && defined(_DEBUG) && !defined(DACCESS_COMPILE)
+ // On WIN64 this field is never modified, but was initialized to 0
+ if (m_alignpad != 0)
+ IllegalAlignPad();
+#endif // _WIN64 && _DEBUG && !DACCESS_COMPILE
+
+ return m_SyncBlockValue.LoadWithoutBarrier();
+ }
+
+
+ DWORD SetBits(DWORD newBits, DWORD oldBits)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE((oldBits & BIT_SBLK_SPIN_LOCK) == 0);
+ DWORD result = FastInterlockCompareExchange((LONG*)&m_SyncBlockValue, newBits, oldBits);
+ return result;
+ }
+
+#ifdef _DEBUG
+ BOOL HasEmptySyncBlockInfo()
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_SyncBlockValue.LoadWithoutBarrier() == 0;
+ }
+#endif
+
+ // TRUE if the header has a real SyncBlockIndex (i.e. it has an entry in the
+ // SyncTable, though it doesn't necessarily have an entry in the SyncBlockCache)
+ BOOL HasSyncBlockIndex()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (GetHeaderSyncBlockIndex() != 0);
+ }
+
+ // retrieve or allocate a sync block for this object
+ SyncBlock *GetSyncBlock();
+
+ // retrieve sync block but don't allocate
+ PTR_SyncBlock PassiveGetSyncBlock()
+ {
+#if !defined(BINDER)
+ LIMITED_METHOD_DAC_CONTRACT;
+ return g_pSyncTable [(int)GetHeaderSyncBlockIndex()].m_SyncBlock;
+#else
+ _ASSERTE(FALSE);
+ return NULL;
+#endif // BINDER
+ }
+
+ DWORD GetSyncBlockIndex();
+
+ // this enters the monitor of an object
+ void EnterObjMonitor();
+
+ // non-blocking version of above
+ BOOL TryEnterObjMonitor(INT32 timeOut = 0);
+
+ // Inlineable fast path of EnterObjMonitor/TryEnterObjMonitor
+ AwareLock::EnterHelperResult EnterObjMonitorHelper(Thread* pCurThread);
+ AwareLock::EnterHelperResult EnterObjMonitorHelperSpin(Thread* pCurThread);
+
+ // leaves the monitor of an object
+ BOOL LeaveObjMonitor();
+
+ // should be called only from unwind code
+ BOOL LeaveObjMonitorAtException();
+
+ // Helper encapsulating the core logic for releasing monitor. Returns what kind of
+ // follow up action is necessary
+ AwareLock::LeaveHelperAction LeaveObjMonitorHelper(Thread* pCurThread);
+
+ // Returns TRUE if the lock is owned and FALSE otherwise
+ // threadId is set to the ID (Thread::GetThreadId()) of the thread which owns the lock
+ // acquisitionCount is set to the number of times the lock needs to be released before
+ // it is unowned
+ BOOL GetThreadOwningMonitorLock(DWORD *pThreadId, DWORD *pAcquisitionCount);
+
+ PTR_Object GetBaseObject()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return dac_cast<PTR_Object>(dac_cast<TADDR>(this + 1));
+ }
+
+ BOOL Wait(INT32 timeOut, BOOL exitContext);
+ void Pulse();
+ void PulseAll();
+
+ void EnterSpinLock();
+ void ReleaseSpinLock();
+
+ BOOL Validate (BOOL bVerifySyncBlkIndex = TRUE);
+};
+
+
+typedef DPTR(class ObjHeader) PTR_ObjHeader;
+
+
+#define ENTER_SPIN_LOCK(pOh) \
+ pOh->EnterSpinLock();
+
+#define LEAVE_SPIN_LOCK(pOh) \
+ pOh->ReleaseSpinLock();
+
+
+#ifdef DACCESS_COMPILE
+// A visitor function used to enumerate threads in the ThreadQueue below
+typedef void (*FP_TQ_THREAD_ENUMERATION_CALLBACK)(PTR_Thread pThread, VOID* pUserData);
+#endif
+
+// A SyncBlock contains an m_Link field that is used for two purposes. One
+// is to manage a FIFO queue of threads that are waiting on this synchronization
+// object. The other is to thread free SyncBlocks into a list for recycling.
+// We don't want to burn anything else on the SyncBlock instance, so we can't
+// use an SList or similar data structure. So here's the encapsulation for the
+// queue of waiting threads.
+//
+// Note that Enqueue is slower than it needs to be, because we don't want to
+// burn extra space in the SyncBlock to remember the head and the tail of the Q.
+// An alternate approach would be to treat the list as a LIFO stack, which is not
+// a fair policy because it permits to starvation.
+//
+// Important!!! While there is a lock that is used in process to keep multiple threads
+// from altering the queue simultaneously, the queue must still be consistent at all
+// times, even when the lock is held. The debugger inspects the queue from out of process
+// and just looks at the memory...it must be valid even if the lock is held. Be careful if you
+// change the way the queue is updated.
+struct ThreadQueue
+{
+ // Given a link in the chain, get the Thread that it represents
+ static PTR_WaitEventLink WaitEventLinkForLink(PTR_SLink pLink);
+
+ // Unlink the head of the Q. We are always in the SyncBlock's critical
+ // section.
+ static WaitEventLink *DequeueThread(SyncBlock *psb);
+
+ // Enqueue is the slow one. We have to find the end of the Q since we don't
+ // want to burn storage for this in the SyncBlock.
+ static void EnqueueThread(WaitEventLink *pWaitEventLink, SyncBlock *psb);
+
+ // Wade through the SyncBlock's list of waiting threads and remove the
+ // specified thread.
+ static BOOL RemoveThread (Thread *pThread, SyncBlock *psb);
+
+#ifdef DACCESS_COMPILE
+ // Enumerates the threads in the queue from front to back by calling
+ // pCallbackFunction on each one
+ static void EnumerateThreads(SyncBlock *psb,
+ FP_TQ_THREAD_ENUMERATION_CALLBACK pCallbackFunction,
+ void* pUserData);
+#endif
+};
+
+
+// The true size of an object is whatever C++ thinks, plus the ObjHeader we
+// allocate before it.
+
+#define ObjSizeOf(c) (sizeof(c) + sizeof(ObjHeader))
+
+
+inline void AwareLock::SetPrecious()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_dwSyncIndex |= SyncBlock::SyncBlockPrecious;
+}
+
+inline DWORD AwareLock::GetSyncBlockIndex()
+{
+ LIMITED_METHOD_CONTRACT;
+ return (m_dwSyncIndex & ~SyncBlock::SyncBlockPrecious);
+}
+
+#ifdef _TARGET_X86_
+#include <poppack.h>
+#endif // _TARGET_X86_
+
+#endif // _SYNCBLK_H_
+
+
diff --git a/src/vm/syncblk.inl b/src/vm/syncblk.inl
new file mode 100644
index 0000000000..67b288bdcc
--- /dev/null
+++ b/src/vm/syncblk.inl
@@ -0,0 +1,292 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#ifndef _SYNCBLK_INL_
+#define _SYNCBLK_INL_
+
+#ifndef DACCESS_COMPILE
+
+FORCEINLINE AwareLock::EnterHelperResult AwareLock::EnterHelper(Thread* pCurThread)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ for (;;)
+ {
+ LONG state = m_MonitorHeld.LoadWithoutBarrier();
+
+ if (state == 0)
+ {
+ if (InterlockedCompareExchangeAcquire((LONG*)&m_MonitorHeld, 1, 0) == 0)
+ {
+ m_HoldingThread = pCurThread;
+ m_Recursion = 1;
+ pCurThread->IncLockCount();
+ return AwareLock::EnterHelperResult_Entered;
+ }
+ }
+ else
+ {
+ if (GetOwningThread() == pCurThread) /* monitor is held, but it could be a recursive case */
+ {
+ m_Recursion++;
+ return AwareLock::EnterHelperResult_Entered;
+ }
+
+ return AwareLock::EnterHelperResult_Contention;
+ }
+ }
+}
+
+FORCEINLINE AwareLock::EnterHelperResult ObjHeader::EnterObjMonitorHelper(Thread* pCurThread)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ DWORD tid = pCurThread->GetThreadId();
+
+ LONG oldvalue = m_SyncBlockValue.LoadWithoutBarrier();
+
+ if ((oldvalue & (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX +
+ BIT_SBLK_SPIN_LOCK +
+ SBLK_MASK_LOCK_THREADID +
+ SBLK_MASK_LOCK_RECLEVEL)) == 0)
+ {
+ if (tid > SBLK_MASK_LOCK_THREADID)
+ {
+ return AwareLock::EnterHelperResult_UseSlowPath;
+ }
+
+ LONG newvalue = oldvalue | tid;
+ if (InterlockedCompareExchangeAcquire((LONG*)&m_SyncBlockValue, newvalue, oldvalue) == oldvalue)
+ {
+ pCurThread->IncLockCount();
+ return AwareLock::EnterHelperResult_Entered;
+ }
+ }
+ else
+ if (oldvalue & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX)
+ {
+ // If we have a hash code already, we need to create a sync block
+ if (oldvalue & BIT_SBLK_IS_HASHCODE)
+ {
+ return AwareLock::EnterHelperResult_UseSlowPath;
+ }
+
+ SyncBlock *syncBlock = g_pSyncTable [oldvalue & MASK_SYNCBLOCKINDEX].m_SyncBlock;
+ _ASSERTE(syncBlock != NULL);
+
+ return syncBlock->m_Monitor.EnterHelper(pCurThread);
+ }
+ else
+ {
+ // The header is transitioning - treat this as if the lock was taken
+ if (oldvalue & BIT_SBLK_SPIN_LOCK)
+ {
+ return AwareLock::EnterHelperResult_Contention;
+ }
+
+ // Here we know we have the "thin lock" layout, but the lock is not free.
+ // It could still be the recursion case - compare the thread id to check
+ if (tid == (DWORD) (oldvalue & SBLK_MASK_LOCK_THREADID))
+ {
+ // Ok, the thread id matches, it's the recursion case.
+ // Bump up the recursion level and check for overflow
+ LONG newvalue = oldvalue + SBLK_LOCK_RECLEVEL_INC;
+
+ if ((newvalue & SBLK_MASK_LOCK_RECLEVEL) == 0)
+ {
+ return AwareLock::EnterHelperResult_UseSlowPath;
+ }
+
+ if (InterlockedCompareExchangeAcquire((LONG*)&m_SyncBlockValue, newvalue, oldvalue) == oldvalue)
+ {
+ return AwareLock::EnterHelperResult_Entered;
+ }
+ }
+ }
+
+ return AwareLock::EnterHelperResult_Contention;
+}
+
+inline AwareLock::EnterHelperResult ObjHeader::EnterObjMonitorHelperSpin(Thread* pCurThread)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ if (1 == g_SystemInfo.dwNumberOfProcessors)
+ {
+ return AwareLock::EnterHelperResult_Contention;
+ }
+
+ DWORD spincount = g_SpinConstants.dwInitialDuration;
+
+ for (;;)
+ {
+ //
+ // exponential backoff
+ //
+ for (DWORD i = 0; i < spincount; i++)
+ {
+ YieldProcessor();
+ }
+
+ AwareLock::EnterHelperResult result = EnterObjMonitorHelper(pCurThread);
+ if (result != AwareLock::EnterHelperResult_Contention)
+ {
+ return result;
+ }
+
+ spincount *= g_SpinConstants.dwBackoffFactor;
+ if (spincount > g_SpinConstants.dwMaximumDuration)
+ {
+ break;
+ }
+ }
+
+ return AwareLock::EnterHelperResult_Contention;
+}
+
+// Helper encapsulating the core logic for releasing monitor. Returns what kind of
+// follow up action is necessary. This is FORCEINLINE to make it provide a very efficient implementation.
+FORCEINLINE AwareLock::LeaveHelperAction AwareLock::LeaveHelper(Thread* pCurThread)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ if (m_HoldingThread != pCurThread)
+ return AwareLock::LeaveHelperAction_Error;
+
+ _ASSERTE((size_t)m_MonitorHeld & 1);
+ _ASSERTE(m_Recursion >= 1);
+
+#if defined(_DEBUG) && defined(TRACK_SYNC)
+ // The best place to grab this is from the ECall frame
+ Frame *pFrame = pCurThread->GetFrame();
+ int caller = (pFrame && pFrame != FRAME_TOP ? (int) pFrame->GetReturnAddress() : -1);
+ pCurThread->m_pTrackSync->LeaveSync(caller, this);
+#endif
+
+ if (--m_Recursion != 0)
+ {
+ return AwareLock::LeaveHelperAction_None;
+ }
+
+ m_HoldingThread->DecLockCount();
+ m_HoldingThread = NULL;
+
+ for (;;)
+ {
+ // Read existing lock state
+ LONG state = m_MonitorHeld.LoadWithoutBarrier();
+
+ // Clear lock bit.
+ if (InterlockedCompareExchangeRelease((LONG*)&m_MonitorHeld, state - 1, state) == state)
+ {
+ // If wait count is non-zero on successful clear, we must signal the event.
+ if (state & ~1)
+ {
+ return AwareLock::LeaveHelperAction_Signal;
+ }
+ break;
+ }
+ }
+
+ return AwareLock::LeaveHelperAction_None;
+}
+
+// Helper encapsulating the core logic for releasing monitor. Returns what kind of
+// follow up action is necessary. This is FORCEINLINE to make it provide a very efficient implementation.
+FORCEINLINE AwareLock::LeaveHelperAction ObjHeader::LeaveObjMonitorHelper(Thread* pCurThread)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ DWORD syncBlockValue = m_SyncBlockValue.LoadWithoutBarrier();
+
+ if ((syncBlockValue & (BIT_SBLK_SPIN_LOCK + BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX)) == 0)
+ {
+ if ((syncBlockValue & SBLK_MASK_LOCK_THREADID) != pCurThread->GetThreadId())
+ {
+ // This thread does not own the lock.
+ return AwareLock::LeaveHelperAction_Error;
+ }
+
+ if (syncBlockValue & SBLK_MASK_LOCK_RECLEVEL)
+ {
+ // recursion and ThinLock
+ DWORD newValue = syncBlockValue - SBLK_LOCK_RECLEVEL_INC;
+ if (InterlockedCompareExchangeRelease((LONG*)&m_SyncBlockValue, newValue, syncBlockValue) != (LONG)syncBlockValue)
+ {
+ return AwareLock::LeaveHelperAction_Yield;
+ }
+ }
+ else
+ {
+ // We are leaving the lock
+ DWORD newValue = (syncBlockValue & (~SBLK_MASK_LOCK_THREADID));
+ if (InterlockedCompareExchangeRelease((LONG*)&m_SyncBlockValue, newValue, syncBlockValue) != (LONG)syncBlockValue)
+ {
+ return AwareLock::LeaveHelperAction_Yield;
+ }
+ pCurThread->DecLockCount();
+ }
+
+ return AwareLock::LeaveHelperAction_None;
+ }
+
+ if ((syncBlockValue & (BIT_SBLK_SPIN_LOCK + BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX + BIT_SBLK_IS_HASHCODE)) == BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX)
+ {
+ SyncBlock *syncBlock = g_pSyncTable [syncBlockValue & MASK_SYNCBLOCKINDEX].m_SyncBlock;
+ _ASSERTE(syncBlock != NULL);
+
+ return syncBlock->m_Monitor.LeaveHelper(pCurThread);
+ }
+
+ if (syncBlockValue & BIT_SBLK_SPIN_LOCK)
+ {
+ return AwareLock::LeaveHelperAction_Contention;
+ }
+
+ // This thread does not own the lock.
+ return AwareLock::LeaveHelperAction_Error;
+}
+
+#endif // DACCESS_COMPILE
+
+// Provide access to the object associated with this awarelock, so client can
+// protect it.
+inline OBJECTREF AwareLock::GetOwningObject()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ // gcc on mac needs these intermediate casts to avoid some ambiuous overloading in the DAC case
+ PTR_SyncTableEntry table = SyncTableEntry::GetSyncTableEntry();
+ return (OBJECTREF)(Object*)(PTR_Object)table[(m_dwSyncIndex & ~SyncBlock::SyncBlockPrecious)].m_Object;
+}
+
+#endif // _SYNCBLK_INL_
diff --git a/src/vm/syncclean.cpp b/src/vm/syncclean.cpp
new file mode 100644
index 0000000000..0e1e70f54c
--- /dev/null
+++ b/src/vm/syncclean.cpp
@@ -0,0 +1,104 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "common.h"
+
+#include "syncclean.hpp"
+#include "virtualcallstub.h"
+#include "threadsuspend.h"
+
+VolatilePtr<Bucket> SyncClean::m_HashMap = NULL;
+VolatilePtr<EEHashEntry*> SyncClean::m_EEHashTable;
+
+void SyncClean::Terminate()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ CleanUp();
+}
+
+void SyncClean::AddHashMap (Bucket *bucket)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!g_fEEStarted) {
+ delete [] bucket;
+ return;
+ }
+
+ BEGIN_GETTHREAD_ALLOWED
+ _ASSERTE (GetThread() == NULL || GetThread()->PreemptiveGCDisabled());
+ END_GETTHREAD_ALLOWED
+
+ Bucket * pTempBucket = NULL;
+ do
+ {
+ pTempBucket = (Bucket *)m_HashMap;
+ NextObsolete (bucket) = pTempBucket;
+ }
+ while (FastInterlockCompareExchangePointer(m_HashMap.GetPointer(), bucket, pTempBucket) != pTempBucket);
+}
+
+void SyncClean::AddEEHashTable (EEHashEntry** entry)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!g_fEEStarted) {
+ delete [] (entry-1);
+ return;
+ }
+
+ BEGIN_GETTHREAD_ALLOWED
+ _ASSERTE (GetThread() == NULL || GetThread()->PreemptiveGCDisabled());
+ END_GETTHREAD_ALLOWED
+
+ EEHashEntry ** pTempHashEntry = NULL;
+ do
+ {
+ pTempHashEntry = (EEHashEntry**)m_EEHashTable;
+ entry[-1] = (EEHashEntry *)pTempHashEntry;
+ }
+ while (FastInterlockCompareExchangePointer(m_EEHashTable.GetPointer(), entry, pTempHashEntry) != pTempHashEntry);
+}
+
+void SyncClean::CleanUp ()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Only GC thread can call this.
+ _ASSERTE (g_fProcessDetach ||
+ IsGCSpecialThread() ||
+ (GCHeap::IsGCInProgress() && GetThread() == ThreadSuspend::GetSuspensionThread()));
+ if (m_HashMap)
+ {
+ Bucket * pTempBucket = FastInterlockExchangePointer(m_HashMap.GetPointer(), NULL);
+
+ while (pTempBucket)
+ {
+ Bucket* pNextBucket = NextObsolete (pTempBucket);
+ delete [] pTempBucket;
+ pTempBucket = pNextBucket;
+ }
+ }
+
+ if (m_EEHashTable)
+ {
+ EEHashEntry ** pTempHashEntry = FastInterlockExchangePointer(m_EEHashTable.GetPointer(), NULL);
+
+ while (pTempHashEntry) {
+ EEHashEntry **pNextHashEntry = (EEHashEntry **)pTempHashEntry[-1];
+ pTempHashEntry --;
+ delete [] pTempHashEntry;
+ pTempHashEntry = pNextHashEntry;
+ }
+ }
+
+ // Give others we want to reclaim during the GC sync point a chance to do it
+ VirtualCallStubManager::ReclaimAll();
+}
diff --git a/src/vm/syncclean.hpp b/src/vm/syncclean.hpp
new file mode 100644
index 0000000000..3f7197c705
--- /dev/null
+++ b/src/vm/syncclean.hpp
@@ -0,0 +1,31 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#ifndef _SYNCCLEAN_HPP_
+#define _SYNCCLEAN_HPP_
+
+// We keep a list of memory blocks to be freed at the end of GC, but before we resume EE.
+// To make this work, we need to make sure that these data are accessed in cooperative GC
+// mode.
+
+class Bucket;
+struct EEHashEntry;
+class Crst;
+class CrstStatic;
+
+class SyncClean {
+public:
+ static void Terminate ();
+
+ static void AddHashMap (Bucket *bucket);
+ static void AddEEHashTable (EEHashEntry** entry);
+ static void CleanUp ();
+
+private:
+ static VolatilePtr<Bucket> m_HashMap; // Cleanup list for HashMap
+ static VolatilePtr<EEHashEntry *> m_EEHashTable; // Cleanup list for EEHashTable
+};
+#endif
diff --git a/src/vm/synch.cpp b/src/vm/synch.cpp
new file mode 100644
index 0000000000..d6283473aa
--- /dev/null
+++ b/src/vm/synch.cpp
@@ -0,0 +1,1059 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+#include "common.h"
+
+#include "corhost.h"
+#include "synch.h"
+#include "rwlock.h"
+
+void CLREventBase::CreateAutoEvent (BOOL bInitialState // If TRUE, initial state is signalled
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ // disallow creation of Crst before EE starts
+ // Can not assert here. ASP.Net uses our Threadpool before EE is started.
+ PRECONDITION((m_handle == INVALID_HANDLE_VALUE));
+ PRECONDITION((!IsOSEvent()));
+ }
+ CONTRACTL_END;
+
+ SetAutoEvent();
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostSyncManager *pManager = CorHost2::GetHostSyncManager();
+ if (pManager != NULL) {
+ IHostAutoEvent *pEvent;
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pManager->CreateAutoEvent(&pEvent);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (hr != S_OK) {
+ _ASSERTE (hr == E_OUTOFMEMORY);
+ ThrowOutOfMemory();
+ }
+ if (bInitialState)
+ {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ pEvent->Set();
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+ m_handle = (HANDLE)pEvent;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ HANDLE h = UnsafeCreateEvent(NULL,FALSE,bInitialState,NULL);
+ if (h == NULL) {
+ ThrowOutOfMemory();
+ }
+ m_handle = h;
+ }
+
+}
+
+void CLREventBase::CreateManualEvent (BOOL bInitialState // If TRUE, initial state is signalled
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ // disallow creation of Crst before EE starts
+ // Can not assert here. ASP.Net uses our Threadpool before EE is started.
+ PRECONDITION((m_handle == INVALID_HANDLE_VALUE));
+ PRECONDITION((!IsOSEvent()));
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostSyncManager *pManager = CorHost2::GetHostSyncManager();
+ if (pManager != NULL){
+ IHostManualEvent *pEvent;
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pManager->CreateManualEvent(bInitialState, &pEvent);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (hr != S_OK) {
+ _ASSERTE (hr == E_OUTOFMEMORY);
+ ThrowOutOfMemory();
+ }
+ m_handle = (HANDLE)pEvent;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ HANDLE h = UnsafeCreateEvent(NULL,TRUE,bInitialState,NULL);
+ if (h == NULL) {
+ ThrowOutOfMemory();
+ }
+ m_handle = h;
+ }
+}
+
+
+void CLREventBase::CreateMonitorEvent(SIZE_T Cookie)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ // disallow creation of Crst before EE starts
+ PRECONDITION((g_fEEStarted));
+ PRECONDITION((GetThread() != NULL));
+ PRECONDITION((!IsOSEvent()));
+ }
+ CONTRACTL_END;
+
+ // thread-safe SetAutoEvent
+ FastInterlockOr(&m_dwFlags, CLREVENT_FLAGS_AUTO_EVENT);
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostSyncManager *pManager = CorHost2::GetHostSyncManager();
+ if (pManager != NULL){
+ IHostAutoEvent *pEvent;
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pManager->CreateMonitorEvent(Cookie,&pEvent);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (hr != S_OK) {
+ _ASSERTE (hr == E_OUTOFMEMORY);
+ ThrowOutOfMemory();
+ }
+ if (FastInterlockCompareExchangePointer(&m_handle,
+ reinterpret_cast<HANDLE>(pEvent),
+ INVALID_HANDLE_VALUE) != INVALID_HANDLE_VALUE)
+ {
+ // We lost the race
+ pEvent->Release();
+ }
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ HANDLE h = UnsafeCreateEvent(NULL,FALSE,FALSE,NULL);
+ if (h == NULL) {
+ ThrowOutOfMemory();
+ }
+ if (FastInterlockCompareExchangePointer(&m_handle,
+ h,
+ INVALID_HANDLE_VALUE) != INVALID_HANDLE_VALUE)
+ {
+ // We lost the race
+ CloseHandle(h);
+ }
+ }
+
+ // thread-safe SetInDeadlockDetection
+ FastInterlockOr(&m_dwFlags, CLREVENT_FLAGS_IN_DEADLOCK_DETECTION);
+
+ for (;;)
+ {
+ LONG oldFlags = m_dwFlags;
+
+ if (oldFlags & CLREVENT_FLAGS_MONITOREVENT_ALLOCATED)
+ {
+ // Other thread has set the flag already. Nothing left for us to do.
+ break;
+ }
+
+ LONG newFlags = oldFlags | CLREVENT_FLAGS_MONITOREVENT_ALLOCATED;
+ if (FastInterlockCompareExchange((LONG*)&m_dwFlags, newFlags, oldFlags) != oldFlags)
+ {
+ // We lost the race
+ continue;
+ }
+
+ // Because we set the allocated bit, we are the ones to do the signalling
+ if (oldFlags & CLREVENT_FLAGS_MONITOREVENT_SIGNALLED)
+ {
+ // We got the honour to signal the event
+ Set();
+ }
+ break;
+ }
+}
+
+
+void CLREventBase::SetMonitorEvent()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // SetMonitorEvent is robust against initialization races. It is possible to
+ // call CLREvent::SetMonitorEvent on event that has not been initialialized yet by CreateMonitorEvent.
+ // CreateMonitorEvent will signal the event once it is created if it happens.
+
+ for (;;)
+ {
+ LONG oldFlags = m_dwFlags;
+
+ if (oldFlags & CLREVENT_FLAGS_MONITOREVENT_ALLOCATED)
+ {
+ // Event has been allocated already. Use the regular codepath.
+ Set();
+ break;
+ }
+
+ LONG newFlags = oldFlags | CLREVENT_FLAGS_MONITOREVENT_SIGNALLED;
+ if (FastInterlockCompareExchange((LONG*)&m_dwFlags, newFlags, oldFlags) != oldFlags)
+ {
+ // We lost the race
+ continue;
+ }
+ break;
+ }
+}
+
+#ifdef FEATURE_RWLOCK
+void CLREventBase::CreateRWLockWriterEvent (BOOL bInitialState, // If TRUE, initial state is signalled
+ CRWLock *pRWLock
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ // disallow creation of Crst before EE starts
+ PRECONDITION((g_fEEStarted));
+ PRECONDITION((m_handle == INVALID_HANDLE_VALUE));
+ PRECONDITION((GetThread() != NULL));
+ PRECONDITION((!IsOSEvent()));
+ }
+ CONTRACTL_END;
+
+ SetAutoEvent();
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostSyncManager *pManager = CorHost2::GetHostSyncManager();
+ if (pManager != NULL){
+ // Need to have a fixed cookie. Use a weak handle for this purpose.
+ IHostAutoEvent *pEvent;
+ HRESULT hr;
+ SIZE_T cookie = (SIZE_T)pRWLock->GetObjectHandle();
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pManager->CreateRWLockWriterEvent(cookie, &pEvent);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (hr != S_OK) {
+ _ASSERTE (hr == E_OUTOFMEMORY);
+ ThrowOutOfMemory();
+ }
+ if (bInitialState)
+ {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ pEvent->Set();
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+ m_handle = (HANDLE)pEvent;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ HANDLE h = UnsafeCreateEvent(NULL,FALSE,bInitialState,NULL);
+ if (h == NULL) {
+ ThrowOutOfMemory();
+ }
+ m_handle = h;
+ }
+
+ SetInDeadlockDetection();
+}
+
+void CLREventBase::CreateRWLockReaderEvent (BOOL bInitialState, // If TRUE, initial state is signalled
+ CRWLock *pRWLock
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ // disallow creation of Crst before EE starts
+ PRECONDITION((g_fEEStarted));
+ PRECONDITION((m_handle == INVALID_HANDLE_VALUE));
+ PRECONDITION((GetThread() != NULL));
+ PRECONDITION((!IsOSEvent()));
+ }
+ CONTRACTL_END;
+
+ IHostSyncManager *pManager = CorHost2::GetHostSyncManager();
+ if (pManager == NULL) {
+ HANDLE h = UnsafeCreateEvent(NULL,TRUE,bInitialState,NULL);
+ if (h == NULL) {
+ ThrowOutOfMemory();
+ }
+ m_handle = h;
+ }
+ else {
+ IHostManualEvent *pEvent;
+ HRESULT hr;
+ SIZE_T cookie = (SIZE_T)pRWLock->GetObjectHandle();
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pManager->CreateRWLockReaderEvent(bInitialState, cookie, &pEvent);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (hr != S_OK) {
+ _ASSERTE (hr == E_OUTOFMEMORY);
+ ThrowOutOfMemory();
+ }
+ m_handle = (HANDLE)pEvent;
+ }
+
+ SetInDeadlockDetection();
+}
+#endif // FEATURE_RWLOCK
+
+
+void CLREventBase::CreateOSAutoEvent (BOOL bInitialState // If TRUE, initial state is signalled
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ // disallow creation of Crst before EE starts
+ PRECONDITION((m_handle == INVALID_HANDLE_VALUE));
+ }
+ CONTRACTL_END;
+
+ // Can not assert here. ASP.Net uses our Threadpool before EE is started.
+ //_ASSERTE (g_fEEStarted);
+
+ SetOSEvent();
+ SetAutoEvent();
+
+ HANDLE h = UnsafeCreateEvent(NULL,FALSE,bInitialState,NULL);
+ if (h == NULL) {
+ ThrowOutOfMemory();
+ }
+ m_handle = h;
+}
+
+
+void CLREventBase::CreateOSManualEvent (BOOL bInitialState // If TRUE, initial state is signalled
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ // disallow creation of Crst before EE starts
+ PRECONDITION((m_handle == INVALID_HANDLE_VALUE));
+ }
+ CONTRACTL_END;
+
+ // Can not assert here. ASP.Net uses our Threadpool before EE is started.
+ //_ASSERTE (g_fEEStarted);
+
+ SetOSEvent();
+
+ HANDLE h = UnsafeCreateEvent(NULL,TRUE,bInitialState,NULL);
+ if (h == NULL) {
+ ThrowOutOfMemory();
+ }
+ m_handle = h;
+}
+
+
+void CLREventBase::CloseEvent()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ if (IsInDeadlockDetection()) {GC_TRIGGERS;} else {GC_NOTRIGGER;}
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ GCX_MAYBE_PREEMP(IsInDeadlockDetection() && IsValid());
+
+ _ASSERTE(Thread::Debug_AllowCallout());
+
+ if (m_handle != INVALID_HANDLE_VALUE) {
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (!IsOSEvent() && CLRSyncHosted())
+ {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+
+ if (IsAutoEvent()) {
+ ((IHostAutoEvent*)m_handle)->Release();
+ }
+ else {
+ ((IHostManualEvent*)m_handle)->Release();
+ }
+
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ CloseHandle(m_handle);
+ }
+
+ m_handle = INVALID_HANDLE_VALUE;
+ }
+ m_dwFlags = 0;
+}
+
+
+BOOL CLREventBase::Set()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ PRECONDITION((m_handle != INVALID_HANDLE_VALUE));
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(Thread::Debug_AllowCallout());
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (!IsOSEvent() && CLRSyncHosted())
+ {
+ if (IsAutoEvent()) {
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = ((IHostAutoEvent*)m_handle)->Set();
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ return hr == S_OK;
+ }
+ else {
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = ((IHostManualEvent*)m_handle)->Set();
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ return hr == S_OK;
+ }
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ return UnsafeSetEvent(m_handle);
+ }
+
+}
+
+
+BOOL CLREventBase::Reset()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ PRECONDITION((m_handle != INVALID_HANDLE_VALUE));
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(Thread::Debug_AllowCallout());
+
+ // We do not allow Reset on AutoEvent
+ _ASSERTE (!IsAutoEvent() ||
+ !"Can not call Reset on AutoEvent");
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (!IsOSEvent() && CLRSyncHosted())
+ {
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = ((IHostManualEvent*)m_handle)->Reset();
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ return hr == S_OK;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ return UnsafeResetEvent(m_handle);
+ }
+}
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+static DWORD HostAutoEventWait (void *args, DWORD timeout, DWORD option)
+{
+ BOOL alertable = (option & WAIT_ALERTABLE);
+ CONTRACTL
+ {
+ if (alertable)
+ {
+ THROWS;
+ }
+ else
+ {
+ NOTHROW;
+ }
+ if (GetThread())
+ {
+ if (alertable)
+ GC_TRIGGERS;
+ else
+ GC_NOTRIGGER;
+ }
+ else
+ {
+ DISABLED(GC_TRIGGERS);
+ }
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(args));
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = ((IHostAutoEvent*)args)->Wait(timeout,option);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+#ifdef _DEBUG
+ if (FAILED(hr) && timeout == INFINITE) {
+ _ASSERTE (option & WAIT_ALERTABLE);
+ }
+#endif
+ if (hr == S_OK) {
+ return WAIT_OBJECT_0;
+ }
+ else if (hr == HOST_E_DEADLOCK) {
+ RaiseDeadLockException();
+ }
+ else if (hr == HOST_E_INTERRUPTED) {
+ _ASSERTE (option & WAIT_ALERTABLE);
+ Thread *pThread = GetThread();
+ if (pThread)
+ {
+ Thread::UserInterruptAPC(APC_Code);
+ }
+ return WAIT_IO_COMPLETION;
+ }
+ else if (hr == HOST_E_TIMEOUT) {
+ return WAIT_TIMEOUT;
+ }
+ else if (hr == HOST_E_ABANDONED) {
+ return WAIT_ABANDONED;
+ }
+ else if (hr == E_FAIL) {
+ _ASSERTE (!"Unknown host wait failure");
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ _ASSERTE (!"Unknown host wait return");
+ }
+ return 0;
+}
+
+static DWORD HostManualEventWait (void *args, DWORD timeout, DWORD option)
+{
+ CONTRACTL
+ {
+ if (option & WAIT_ALERTABLE)
+ {
+ THROWS;
+ }
+ else
+ {
+ NOTHROW;
+ }
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(args));
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = ((IHostManualEvent*)args)->Wait(timeout,option);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+
+ if (hr == COR_E_STACKOVERFLOW)
+ {
+ Thread *pThread = GetThread();
+ if (pThread && pThread->HasThreadStateNC(Thread::TSNC_WaitUntilGCFinished))
+ {
+ return hr;
+ }
+ }
+#ifdef _DEBUG
+ if (FAILED(hr) && timeout == INFINITE) {
+ _ASSERTE (option & WAIT_ALERTABLE);
+ }
+#endif
+ if (hr == S_OK) {
+ return WAIT_OBJECT_0;
+ }
+ else if (hr == HOST_E_DEADLOCK) {
+ RaiseDeadLockException();
+ }
+ else if (hr == HOST_E_INTERRUPTED) {
+ _ASSERTE (option & WAIT_ALERTABLE);
+ Thread *pThread = GetThread();
+ if (pThread)
+ {
+ Thread::UserInterruptAPC(APC_Code);
+ }
+ return WAIT_IO_COMPLETION;
+ }
+ else if (hr == HOST_E_TIMEOUT) {
+ return WAIT_TIMEOUT;
+ }
+ else if (hr == HOST_E_ABANDONED) {
+ return WAIT_ABANDONED;
+ }
+ else if (hr == E_FAIL) {
+ _ASSERTE (!"Unknown host wait failure");
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ _ASSERTE (!"Unknown host wait return");
+ }
+ return 0;
+}
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+static DWORD CLREventWaitHelper2(HANDLE handle, DWORD dwMilliseconds, BOOL alertable)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ LeaveRuntimeHolder holder((size_t)WaitForSingleObjectEx);
+ return WaitForSingleObjectEx(handle,dwMilliseconds,alertable);
+}
+
+static DWORD CLREventWaitHelper(HANDLE handle, DWORD dwMilliseconds, BOOL alertable)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ struct Param
+ {
+ HANDLE handle;
+ DWORD dwMilliseconds;
+ BOOL alertable;
+ DWORD result;
+ } param;
+ param.handle = handle;
+ param.dwMilliseconds = dwMilliseconds;
+ param.alertable = alertable;
+ param.result = WAIT_FAILED;
+
+ // Can not use EX_TRY/CATCH. EX_CATCH toggles GC mode. This function is called
+ // through RareDisablePreemptiveGC. EX_CATCH breaks profiler callback.
+ PAL_TRY(Param *, pParam, &param)
+ {
+ // Need to move to another helper (cannot have SEH and C++ destructors
+ // on automatic variables in one function)
+ pParam->result = CLREventWaitHelper2(pParam->handle, pParam->dwMilliseconds, pParam->alertable);
+ }
+ PAL_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
+ {
+ param.result = WAIT_FAILED;
+ }
+ PAL_ENDTRY;
+
+ return param.result;
+}
+
+
+DWORD CLREventBase::Wait(DWORD dwMilliseconds, BOOL alertable, PendingSync *syncState)
+{
+ WRAPPER_NO_CONTRACT;
+ return WaitEx(dwMilliseconds, alertable?WaitMode_Alertable:WaitMode_None,syncState);
+}
+
+
+DWORD CLREventBase::WaitEx(DWORD dwMilliseconds, WaitMode mode, PendingSync *syncState)
+{
+ BOOL alertable = (mode & WaitMode_Alertable)!=0;
+ CONTRACTL
+ {
+ if (alertable)
+ {
+ THROWS; // Thread::DoAppropriateWait can throw
+ }
+ else
+ {
+ NOTHROW;
+ }
+ if (GetThread())
+ {
+ if (alertable)
+ GC_TRIGGERS;
+ else
+ GC_NOTRIGGER;
+ }
+ else
+ {
+ DISABLED(GC_TRIGGERS);
+ }
+ SO_TOLERANT;
+ PRECONDITION(m_handle != INVALID_HANDLE_VALUE); // Handle has to be valid
+ }
+ CONTRACTL_END;
+
+
+ _ASSERTE(Thread::Debug_AllowCallout());
+
+ Thread * pThread = GetThread();
+
+#ifdef _DEBUG
+ // If a CLREvent is OS event only, we can not wait for the event on a managed thread
+ if (IsOSEvent())
+ _ASSERTE (pThread == NULL);
+#endif
+ _ASSERTE((pThread != NULL) || !g_fEEStarted || dbgOnly_IsSpecialEEThread());
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (!IsOSEvent() && CLRSyncHosted())
+ {
+ if ((pThread != NULL) && alertable) {
+ DWORD dwRet = WAIT_FAILED;
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW (pThread, return WAIT_FAILED;);
+ dwRet = pThread->DoAppropriateWait(IsAutoEvent()?HostAutoEventWait:HostManualEventWait,
+ m_handle,dwMilliseconds,
+ mode,
+ syncState);
+ END_SO_INTOLERANT_CODE;
+ return dwRet;
+ }
+ else {
+ _ASSERTE (syncState == NULL);
+ DWORD option = 0;
+ if (alertable) {
+ option |= WAIT_ALERTABLE;
+ }
+ if (IsAutoEvent()) {
+ return HostAutoEventWait((IHostAutoEvent*)m_handle,dwMilliseconds, option);
+ }
+ else {
+ return HostManualEventWait((IHostManualEvent*)m_handle,dwMilliseconds, option);
+ }
+ }
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ if (pThread && alertable) {
+ DWORD dwRet = WAIT_FAILED;
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW (pThread, return WAIT_FAILED;);
+ dwRet = pThread->DoAppropriateWait(1, &m_handle, FALSE, dwMilliseconds,
+ mode,
+ syncState);
+ END_SO_INTOLERANT_CODE;
+ return dwRet;
+ }
+ else {
+ _ASSERTE (syncState == NULL);
+ return CLREventWaitHelper(m_handle,dwMilliseconds,alertable);
+ }
+ }
+}
+
+void CLRSemaphore::Create (DWORD dwInitial, DWORD dwMax)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ PRECONDITION(m_handle == INVALID_HANDLE_VALUE);
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostSyncManager *pManager = CorHost2::GetHostSyncManager();
+ if (pManager != NULL) {
+ IHostSemaphore *pSemaphore;
+ #undef CreateSemaphore
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pManager->CreateSemaphore(dwInitial,dwMax,&pSemaphore);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ #define CreateSemaphore(lpSemaphoreAttributes, lInitialCount, lMaximumCount, lpName) \
+ Dont_Use_CreateSemaphore(lpSemaphoreAttributes, lInitialCount, lMaximumCount, lpName)
+ if (hr != S_OK) {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ ThrowOutOfMemory();
+ }
+ m_handle = (HANDLE)pSemaphore;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ HANDLE h = UnsafeCreateSemaphore(NULL,dwInitial,dwMax,NULL);
+ if (h == NULL) {
+ ThrowOutOfMemory();
+ }
+ m_handle = h;
+ }
+}
+
+
+void CLRSemaphore::Close()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_handle != INVALID_HANDLE_VALUE) {
+ if (!CLRSyncHosted()) {
+ CloseHandle(m_handle);
+ }
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ else {
+ ((IHostSemaphore*)m_handle)->Release();
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ m_handle = INVALID_HANDLE_VALUE;
+ }
+}
+
+BOOL CLRSemaphore::Release(LONG lReleaseCount, LONG *lpPreviousCount)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ PRECONDITION(m_handle != INVALID_HANDLE_VALUE);
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (CLRSyncHosted())
+ {
+ #undef ReleaseSemaphore
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = ((IHostSemaphore*)m_handle)->ReleaseSemaphore(lReleaseCount,lpPreviousCount);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ #define ReleaseSemaphore(hSemaphore, lReleaseCount, lpPreviousCount) \
+ Dont_Use_ReleaseSemaphore(hSemaphore, lReleaseCount, lpPreviousCount)
+ return hr == S_OK;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ return ::UnsafeReleaseSemaphore(m_handle, lReleaseCount, lpPreviousCount);
+ }
+}
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+static DWORD HostSemaphoreWait (void *args, DWORD timeout, DWORD option)
+{
+ CONTRACTL
+ {
+ if ((option & WAIT_ALERTABLE))
+ {
+ THROWS; // Thread::DoAppropriateWait can throw
+ }
+ else
+ {
+ NOTHROW;
+ }
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(args));
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = ((IHostSemaphore*)args)->Wait(timeout,option);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (hr == S_OK) {
+ return WAIT_OBJECT_0;
+ }
+ else if (hr == HOST_E_INTERRUPTED) {
+ _ASSERTE (option & WAIT_ALERTABLE);
+ Thread *pThread = GetThread();
+ if (pThread)
+ {
+ Thread::UserInterruptAPC(APC_Code);
+ }
+ return WAIT_IO_COMPLETION;
+ }
+ else if (hr == HOST_E_TIMEOUT) {
+ return WAIT_TIMEOUT;
+ }
+ else if (hr == E_FAIL) {
+ _ASSERTE (!"Unknown host wait failure");
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ _ASSERTE (!"Unknown host wait return");
+ }
+ return 0;
+}
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+DWORD CLRSemaphore::Wait(DWORD dwMilliseconds, BOOL alertable)
+{
+ CONTRACTL
+ {
+ if (GetThread() && alertable)
+ {
+ THROWS; // Thread::DoAppropriateWait can throw
+ }
+ else
+ {
+ NOTHROW;
+ }
+ if (GetThread())
+ {
+ if (alertable)
+ GC_TRIGGERS;
+ else
+ GC_NOTRIGGER;
+ }
+ else
+ {
+ DISABLED(GC_TRIGGERS);
+ }
+ SO_TOLERANT;
+ PRECONDITION(m_handle != INVALID_HANDLE_VALUE); // Invalid to have invalid handle
+ }
+ CONTRACTL_END;
+
+
+ Thread *pThread = GetThread();
+ _ASSERTE (pThread || !g_fEEStarted || dbgOnly_IsSpecialEEThread());
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (CLRSyncHosted())
+ {
+ if (pThread && alertable) {
+ return pThread->DoAppropriateWait(HostSemaphoreWait,
+ m_handle,dwMilliseconds,
+ alertable?WaitMode_Alertable:WaitMode_None,
+ NULL);
+ }
+ else {
+ DWORD option = 0;
+ if (alertable) {
+ option |= WAIT_ALERTABLE;
+ }
+ return HostSemaphoreWait((IHostSemaphore*)m_handle,dwMilliseconds,option);
+ }
+ }
+ else
+#endif // !FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ // TODO wwl: if alertable is FALSE, do we support a host to break a deadlock?
+ // Currently we can not call through DoAppropriateWait because of CannotThrowComplusException.
+ // We should re-consider this after our code is exception safe.
+ if (pThread && alertable) {
+ return pThread->DoAppropriateWait(1, &m_handle, FALSE, dwMilliseconds,
+ alertable?WaitMode_Alertable:WaitMode_None,
+ NULL);
+ }
+ else {
+ DWORD result = WAIT_FAILED;
+ EX_TRY
+ {
+ LeaveRuntimeHolder holder((size_t)WaitForSingleObjectEx);
+ result = WaitForSingleObjectEx(m_handle,dwMilliseconds,alertable);
+ }
+ EX_CATCH
+ {
+ result = WAIT_FAILED;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ return result;
+ }
+ }
+}
+
+void CLRMutex::Create(LPSECURITY_ATTRIBUTES lpMutexAttributes, BOOL bInitialOwner, LPCTSTR lpName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ PRECONDITION(m_handle == INVALID_HANDLE_VALUE && m_handle != NULL);
+ }
+ CONTRACTL_END;
+
+ if (bInitialOwner)
+ {
+ Thread::BeginThreadAffinity();
+ }
+ m_handle = WszCreateMutex(lpMutexAttributes,bInitialOwner,lpName);
+ if (m_handle == NULL)
+ {
+ if (bInitialOwner)
+ {
+ Thread::EndThreadAffinity();
+ }
+ ThrowOutOfMemory();
+ }
+}
+
+void CLRMutex::Close()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_handle != INVALID_HANDLE_VALUE)
+ {
+ CloseHandle(m_handle);
+ m_handle = INVALID_HANDLE_VALUE;
+ }
+}
+
+BOOL CLRMutex::Release()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ PRECONDITION(m_handle != INVALID_HANDLE_VALUE && m_handle != NULL);
+ }
+ CONTRACTL_END;
+
+ BOOL fRet = ReleaseMutex(m_handle);
+ if (fRet)
+ {
+ Thread::EndThreadAffinity();
+ EE_LOCK_RELEASED(this);
+ }
+ return fRet;
+}
+
+DWORD CLRMutex::Wait(DWORD dwMilliseconds, BOOL bAlertable)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CAN_TAKE_LOCK;
+ PRECONDITION(m_handle != INVALID_HANDLE_VALUE && m_handle != NULL);
+ }
+ CONTRACTL_END;
+
+ Thread::BeginThreadAffinity();
+ DWORD fRet = WaitForSingleObjectEx(m_handle, dwMilliseconds, bAlertable);
+ if ((fRet != WAIT_OBJECT_0) && (fRet != WAIT_ABANDONED))
+ {
+ Thread::EndThreadAffinity();
+ }
+
+ if (fRet == WAIT_OBJECT_0)
+ {
+ EE_LOCK_TAKEN(this);
+ }
+
+ return fRet;
+}
diff --git a/src/vm/synch.h b/src/vm/synch.h
new file mode 100644
index 0000000000..d6061f6566
--- /dev/null
+++ b/src/vm/synch.h
@@ -0,0 +1,209 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+//
+
+
+#ifndef __Synch_h__
+#define __Synch_h__
+
+enum WaitMode
+{
+ WaitMode_None =0x0,
+ WaitMode_Alertable = 0x1, // Can be waken by APC. May pumping message.
+ WaitMode_IgnoreSyncCtx = 0x2, // Dispatch to synchronization context if existed.
+ WaitMode_ADUnload = 0x4, // The block is to wait for AD unload start. If it is interrupted by AD Unload, we can start aborting.
+ WaitMode_InDeadlock = 0x8, // The wait can be terminated by host's deadlock detection
+};
+
+
+struct PendingSync;
+class CRWLock;
+
+class CLREventBase
+{
+public:
+ CLREventBase()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_handle = INVALID_HANDLE_VALUE;
+ m_dwFlags = 0;
+ }
+
+ // Create an Event that is host aware
+ void CreateAutoEvent(BOOL bInitialState);
+ void CreateManualEvent(BOOL bInitialState);
+
+ void CreateMonitorEvent(SIZE_T Cookie); // robust against initialization races - for exclusive use by AwareLock
+
+#ifdef FEATURE_RWLOCK
+ void CreateRWLockReaderEvent(BOOL bInitialState, CRWLock* pRWLock);
+ void CreateRWLockWriterEvent(BOOL bInitialState, CRWLock* pRWLock);
+#endif
+
+ // Create an Event that is not host aware
+ void CreateOSAutoEvent (BOOL bInitialState);
+ void CreateOSManualEvent (BOOL bInitialState);
+
+ void CloseEvent();
+
+ BOOL IsValid() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_handle != INVALID_HANDLE_VALUE;
+ }
+
+ BOOL IsMonitorEventAllocated()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwFlags & CLREVENT_FLAGS_MONITOREVENT_ALLOCATED;
+ }
+
+#ifndef DACCESS_COMPILE
+ HANDLE GetHandleUNHOSTED() {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE (IsOSEvent() || !CLRSyncHosted());
+ return m_handle;
+ }
+#endif // DACCESS_COMPILE
+
+ BOOL Set();
+ void SetMonitorEvent(); // robust against races - for exclusive use by AwareLock
+ BOOL Reset();
+ DWORD Wait(DWORD dwMilliseconds, BOOL bAlertable, PendingSync *syncState=NULL);
+ DWORD WaitEx(DWORD dwMilliseconds, WaitMode mode, PendingSync *syncState=NULL);
+
+protected:
+ HANDLE m_handle;
+
+private:
+ enum
+ {
+ CLREVENT_FLAGS_AUTO_EVENT = 0x0001,
+ CLREVENT_FLAGS_OS_EVENT = 0x0002,
+ CLREVENT_FLAGS_IN_DEADLOCK_DETECTION = 0x0004,
+
+ CLREVENT_FLAGS_MONITOREVENT_ALLOCATED = 0x0008,
+ CLREVENT_FLAGS_MONITOREVENT_SIGNALLED = 0x0010,
+
+ CLREVENT_FLAGS_STATIC = 0x0020,
+
+ // Several bits unused;
+ };
+
+ Volatile<DWORD> m_dwFlags;
+
+ BOOL IsAutoEvent() { LIMITED_METHOD_CONTRACT; return m_dwFlags & CLREVENT_FLAGS_AUTO_EVENT; }
+ void SetAutoEvent ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ // cannot use `|=' operator on `Volatile<DWORD>'
+ m_dwFlags = m_dwFlags | CLREVENT_FLAGS_AUTO_EVENT;
+ }
+ BOOL IsOSEvent() { LIMITED_METHOD_CONTRACT; return m_dwFlags & CLREVENT_FLAGS_OS_EVENT; }
+ void SetOSEvent ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ // cannot use `|=' operator on `Volatile<DWORD>'
+ m_dwFlags = m_dwFlags | CLREVENT_FLAGS_OS_EVENT;
+ }
+ BOOL IsInDeadlockDetection() { LIMITED_METHOD_CONTRACT; return m_dwFlags & CLREVENT_FLAGS_IN_DEADLOCK_DETECTION; }
+ void SetInDeadlockDetection ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ // cannot use `|=' operator on `Volatile<DWORD>'
+ m_dwFlags = m_dwFlags | CLREVENT_FLAGS_IN_DEADLOCK_DETECTION;
+ }
+};
+
+
+class CLREvent : public CLREventBase
+{
+public:
+
+#ifndef DACCESS_COMPILE
+ ~CLREvent()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ CloseEvent();
+ }
+#endif
+};
+
+
+// CLREventStatic
+// Same as CLREvent, but intended to be used for global variables.
+// Instances may leak their handle, because of the order in which
+// global destructors are run. Note that you can still explicitly
+// call CloseHandle, which will indeed not leak the handle.
+class CLREventStatic : public CLREventBase
+{
+};
+
+
+class CLRSemaphore {
+public:
+ CLRSemaphore()
+ : m_handle(INVALID_HANDLE_VALUE)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ ~CLRSemaphore()
+ {
+ WRAPPER_NO_CONTRACT;
+ Close ();
+ }
+
+ void Create(DWORD dwInitial, DWORD dwMax);
+ void Close();
+
+ BOOL IsValid() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_handle != INVALID_HANDLE_VALUE;
+ }
+
+ DWORD Wait(DWORD dwMilliseconds, BOOL bAlertable);
+ BOOL Release(LONG lReleaseCount, LONG* lpPreviouseCount);
+
+private:
+ HANDLE m_handle;
+};
+
+class CLRMutex {
+public:
+ CLRMutex()
+ : m_handle(INVALID_HANDLE_VALUE)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ ~CLRMutex()
+ {
+ WRAPPER_NO_CONTRACT;
+ Close ();
+ }
+
+ void Create(LPSECURITY_ATTRIBUTES lpMutexAttributes, BOOL bInitialOwner, LPCTSTR lpName);
+ void Close();
+
+ BOOL IsValid() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_handle != INVALID_HANDLE_VALUE;
+ }
+
+ DWORD Wait(DWORD dwMilliseconds, BOOL bAlertable);
+ BOOL Release();
+
+private:
+ HANDLE m_handle;
+};
+
+BOOL CLREventWaitWithTry(CLREventBase *pEvent, DWORD timeout, BOOL fAlertable, DWORD *pStatus);
+#endif
diff --git a/src/vm/synchronizationcontextnative.cpp b/src/vm/synchronizationcontextnative.cpp
new file mode 100644
index 0000000000..7c03389a41
--- /dev/null
+++ b/src/vm/synchronizationcontextnative.cpp
@@ -0,0 +1,161 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Implementation: SynchronizationContextNative.cpp
+**
+**
+** Purpose: Native methods on System.Threading.SynchronizationContext.
+**
+**
+===========================================================*/
+
+#include "common.h"
+
+#ifdef FEATURE_APPX
+#include <roapi.h>
+#include <windows.ui.core.h>
+#endif
+#include "synchronizationcontextnative.h"
+
+#ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+FCIMPL3(DWORD, SynchronizationContextNative::WaitHelper, PTRArray *handleArrayUNSAFE, CLR_BOOL waitAll, DWORD millis)
+{
+ FCALL_CONTRACT;
+
+ DWORD ret = 0;
+
+ PTRARRAYREF handleArrayObj = (PTRARRAYREF) handleArrayUNSAFE;
+ HELPER_METHOD_FRAME_BEGIN_RET_1(handleArrayObj);
+
+ CQuickArray<HANDLE> qbHandles;
+ int cHandles = handleArrayObj->GetNumComponents();
+
+ // Since DoAppropriateWait could cause a GC, we need to copy the handles to an unmanaged block
+ // of memory to ensure they aren't relocated during the call to DoAppropriateWait.
+ qbHandles.AllocThrows(cHandles);
+ memcpy(qbHandles.Ptr(), handleArrayObj->GetDataPtr(), cHandles * sizeof(HANDLE));
+
+ Thread * pThread = GetThread();
+ ret = pThread->DoAppropriateWait(cHandles, qbHandles.Ptr(), waitAll, millis,
+ (WaitMode)(WaitMode_Alertable | WaitMode_IgnoreSyncCtx));
+
+ HELPER_METHOD_FRAME_END();
+ return ret;
+}
+FCIMPLEND
+#endif // #ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+
+#ifdef FEATURE_APPX
+
+Volatile<ABI::Windows::UI::Core::ICoreWindowStatic*> g_pICoreWindowStatic;
+
+void* QCALLTYPE SynchronizationContextNative::GetWinRTDispatcherForCurrentThread()
+{
+ QCALL_CONTRACT;
+ void* result = NULL;
+ BEGIN_QCALL;
+
+ _ASSERTE(WinRTSupported());
+
+ //
+ // Get access to ICoreWindow's statics. We grab just one ICoreWindowStatic for the whole process.
+ //
+ ABI::Windows::UI::Core::ICoreWindowStatic* pICoreWindowStatic = g_pICoreWindowStatic;
+ if (pICoreWindowStatic == NULL)
+ {
+ SafeComHolderPreemp<ABI::Windows::UI::Core::ICoreWindowStatic> pNewICoreWindowStatic;
+ {
+ HRESULT hr = clr::winrt::GetActivationFactory(RuntimeClass_Windows_UI_Core_CoreWindow, (ABI::Windows::UI::Core::ICoreWindowStatic**)pNewICoreWindowStatic.GetAddr());
+
+ //
+ // Older Windows builds don't support ICoreWindowStatic. We should just return a null CoreDispatcher
+ // in that case, rather than throwing.
+ //
+ if (hr != E_NOTIMPL)
+ IfFailThrow(hr);
+ }
+
+ if (pNewICoreWindowStatic != NULL)
+ {
+ ABI::Windows::UI::Core::ICoreWindowStatic* old = InterlockedCompareExchangeT<ABI::Windows::UI::Core::ICoreWindowStatic*>(&g_pICoreWindowStatic, pNewICoreWindowStatic, NULL);
+ if (old == NULL)
+ {
+ pNewICoreWindowStatic.SuppressRelease();
+ pICoreWindowStatic = pNewICoreWindowStatic;
+ }
+ else
+ {
+ pICoreWindowStatic = old;
+ }
+ }
+ }
+
+
+ if (pICoreWindowStatic != NULL)
+ {
+ //
+ // Get the current ICoreWindow
+ //
+ SafeComHolderPreemp<ABI::Windows::UI::Core::ICoreWindow> pCoreWindow;
+
+ //
+ // workaround: we're currently ignoring the HRESULT from get_Current, because Windows is returning errors for threads that have no CoreWindow.
+ // A better behavior would be to return S_OK, with a NULL CoreWindow. If/when Windows does the right thing here, we can change this
+ // back to checking the HRESULT.
+ //
+ pICoreWindowStatic->GetForCurrentThread(&pCoreWindow);
+
+ if (pCoreWindow != NULL)
+ {
+ //
+ // Get the ICoreDispatcher for this window
+ //
+ SafeComHolderPreemp<ABI::Windows::UI::Core::ICoreDispatcher> pCoreDispatcher;
+ IfFailThrow(pCoreWindow->get_Dispatcher(&pCoreDispatcher));
+
+ if (pCoreDispatcher != NULL)
+ {
+ //
+ // Does the dispatcher belong to the current thread?
+ //
+ boolean hasThreadAccess = FALSE;
+ IfFailThrow(pCoreDispatcher->get_HasThreadAccess(&hasThreadAccess));
+ if (hasThreadAccess)
+ {
+ //
+ // This is the dispatcher for the current thread. Return it.
+ //
+ pCoreDispatcher.SuppressRelease();
+ result = (void*)pCoreDispatcher;
+ }
+ }
+ }
+ }
+
+ END_QCALL;
+ return result;
+}
+
+void SynchronizationContextNative::Cleanup()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ if (g_pICoreWindowStatic)
+ {
+ SafeRelease(g_pICoreWindowStatic);
+ g_pICoreWindowStatic = NULL;
+ }
+}
+
+
+
+#endif //FEATURE_APPX
diff --git a/src/vm/synchronizationcontextnative.h b/src/vm/synchronizationcontextnative.h
new file mode 100644
index 0000000000..d0902e2330
--- /dev/null
+++ b/src/vm/synchronizationcontextnative.h
@@ -0,0 +1,33 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: SynchronizationContextNative.h
+**
+**
+** Purpose: Native methods on System.Threading.SynchronizationContext.
+**
+**
+===========================================================*/
+
+#ifndef _SYNCHRONIZATIONCONTEXTNATIVE_H
+#define _SYNCHRONIZATIONCONTEXTNATIVE_H
+
+class SynchronizationContextNative
+{
+public:
+
+#ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+ static FCDECL3(DWORD, WaitHelper, PTRArray *handleArrayUNSAFE, CLR_BOOL waitAll, DWORD millis);
+#endif
+
+#ifdef FEATURE_APPX
+ static void* QCALLTYPE GetWinRTDispatcherForCurrentThread();
+ static void Cleanup();
+#endif
+};
+#endif // _SYNCHRONIZATIONCONTEXTNATIVE_H
+
diff --git a/src/vm/testhookmgr.cpp b/src/vm/testhookmgr.cpp
new file mode 100644
index 0000000000..94b46abd97
--- /dev/null
+++ b/src/vm/testhookmgr.cpp
@@ -0,0 +1,779 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+//
+
+#include "common.h"
+#include "testhookmgr.h"
+#include "appdomain.hpp"
+#include "appdomain.inl"
+#include "finalizerthread.h"
+
+#ifdef FEATURE_TESTHOOKS
+CLRTestHookManager* CLRTestHookManager::g_pManager=NULL;
+CLRTestHookManager::~CLRTestHookManager()
+{
+
+}
+
+HRESULT CLRTestHookManager::AddTestHook(ICLRTestHook* hook)
+{
+ WRAPPER_NO_CONTRACT;
+ DWORD newidx=FastInterlockIncrement(&m_nHooks);
+ if (newidx>=NumItems(m_pHooks))
+ {
+ FastInterlockDecrement(&m_nHooks);
+ return DISP_E_OVERFLOW;
+ }
+ m_pHooks[newidx-1].Set(hook);
+ return S_OK;
+}
+
+
+ICLRTestHookManager* CLRTestHookManager::Start()
+{
+ LIMITED_METHOD_CONTRACT;
+ if (g_pManager==NULL)
+ {
+ CLRTestHookManager* newman=new (nothrow)CLRTestHookManager();
+ if (newman!=NULL && FastInterlockCompareExchangePointer(&g_pManager, newman, 0)!=0)
+ delete newman;
+ }
+ if(g_pManager)
+ g_pManager->AddRef();
+ return g_pManager;
+}
+
+CLRTestHookManager::CLRTestHookManager()
+{
+ WRAPPER_NO_CONTRACT;
+ m_nHooks=0;
+ m_cRef=1;
+ ZeroMemory(m_pHooks,sizeof(m_pHooks));
+}
+
+HRESULT CLRTestHookManager::AppDomainStageChanged(DWORD adid,DWORD oldstage,DWORD newstage)
+{
+ STATIC_CONTRACT_NOTHROW;
+
+ struct Param
+ {
+ CLRTestHookManager *pThis;
+ DWORD adid;
+ DWORD oldstage;
+ DWORD newstage;
+ } param;
+ param.pThis = this;
+ param.adid = adid;
+ param.oldstage = oldstage;
+ param.newstage = newstage;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ //ignores the returned codes
+ for (LONG i = 0; i < pParam->pThis->m_nHooks; i++)
+ {
+ ICLRTestHook* hook = pParam->pThis->m_pHooks[i].v1();
+ if(hook)
+ {
+ HRESULT hr=hook->AppDomainStageChanged(pParam->adid, pParam->oldstage, pParam->newstage);
+ _ASSERTE(SUCCEEDED(hr));
+ }
+ }
+ }
+ PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ {
+ _ASSERTE(!"Test Hook threw an exception.");
+ }
+ PAL_ENDTRY;
+
+ return S_OK;
+};
+
+
+HRESULT CLRTestHookManager::NextFileLoadLevel(DWORD adid, LPVOID domainfile,DWORD newlevel)
+{
+ STATIC_CONTRACT_THROWS;
+ HRESULT hr=S_OK;
+ {
+ for (LONG i=0;i<m_nHooks;i++)
+ {
+ ICLRTestHook* hook=m_pHooks[i].v1();
+ if(hook)
+ {
+ HRESULT hr2=hook->NextFileLoadLevel( adid, domainfile, newlevel);
+ _ASSERTE(SUCCEEDED(hr)||SUCCEEDED(hr2));
+ if (SUCCEEDED(hr))
+ hr=hr2;
+ }
+ }
+ }
+ IfFailThrow(hr);
+ return hr;
+}
+
+HRESULT CLRTestHookManager::CompletingFileLoadLevel(DWORD adid, LPVOID domainfile,DWORD newlevel)
+{
+ STATIC_CONTRACT_THROWS;
+ HRESULT hr=S_OK;
+ {
+ for (LONG i=0;i<m_nHooks;i++)
+ {
+ ICLRTestHook* hook=m_pHooks[i].v1();
+ if(hook)
+ {
+ HRESULT hr2=hook->CompletingFileLoadLevel( adid, domainfile, newlevel);
+ _ASSERTE(SUCCEEDED(hr)||SUCCEEDED(hr2));
+ if (SUCCEEDED(hr))
+ hr=hr2;
+ }
+ }
+ }
+
+
+ IfFailThrow(hr);
+ return hr;
+}
+
+HRESULT CLRTestHookManager::CompletedFileLoadLevel(DWORD adid, LPVOID domainfile,DWORD newlevel)
+{
+ STATIC_CONTRACT_NOTHROW;
+
+ struct Param
+ {
+ CLRTestHookManager *pThis;
+ DWORD adid;
+ LPVOID domainfile;
+ DWORD newlevel;
+ } param;
+ param.pThis = this;
+ param.adid = adid;
+ param.domainfile = domainfile;
+ param.newlevel = newlevel;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ //ignores the returned codes
+ for (LONG i = 0; i < pParam->pThis->m_nHooks; i++)
+ {
+ ICLRTestHook* hook = pParam->pThis->m_pHooks[i].v1();
+ if(hook)
+ {
+ HRESULT hr=hook->CompletedFileLoadLevel(pParam->adid, pParam->domainfile, pParam->newlevel);
+ _ASSERTE(SUCCEEDED(hr));
+ }
+ }
+ }
+ PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ {
+ _ASSERTE(!"Test Hook threw an exception.");
+ }
+ PAL_ENDTRY
+
+ return S_OK;
+}
+
+HRESULT CLRTestHookManager::EnteringAppDomain(DWORD adid)
+{
+ STATIC_CONTRACT_THROWS;
+ HRESULT hr=S_OK;
+ {
+ for (LONG i=0;i<m_nHooks;i++)
+ {
+ ICLRTestHook* hook=m_pHooks[i].v1();
+ if(hook)
+ {
+ HRESULT hr2=hook->EnteringAppDomain(adid);
+ _ASSERTE(SUCCEEDED(hr)||SUCCEEDED(hr2));
+ if (SUCCEEDED(hr))
+ hr=hr2;
+ }
+ }
+ }
+ IfFailThrow(hr);
+ return hr;
+}
+
+HRESULT CLRTestHookManager::EnteredAppDomain(DWORD adid)
+{
+ STATIC_CONTRACT_THROWS;
+ HRESULT hr=S_OK;
+ {
+ for (LONG i=0;i<m_nHooks;i++)
+ {
+ ICLRTestHook* hook=m_pHooks[i].v1();
+ if(hook)
+ {
+ HRESULT hr2=hook->EnteredAppDomain(adid);
+ _ASSERTE(SUCCEEDED(hr)||SUCCEEDED(hr2));
+ if (SUCCEEDED(hr))
+ hr=hr2;
+ }
+ }
+ }
+ IfFailThrow(hr);
+ return hr;
+}
+
+HRESULT CLRTestHookManager::LeavingAppDomain(DWORD adid)
+{
+ STATIC_CONTRACT_THROWS;
+ HRESULT hr=S_OK;
+ {
+ for (LONG i=0;i<m_nHooks;i++)
+ {
+ ICLRTestHook* hook=m_pHooks[i].v1();
+ if(hook)
+ {
+ HRESULT hr2=hook->LeavingAppDomain(adid);
+ _ASSERTE(SUCCEEDED(hr)||SUCCEEDED(hr2));
+ if (SUCCEEDED(hr))
+ hr=hr2;
+ }
+ }
+ }
+ IfFailThrow(hr);
+ return hr;
+}
+
+HRESULT CLRTestHookManager::LeftAppDomain(DWORD adid)
+{
+ STATIC_CONTRACT_THROWS;
+ HRESULT hr=S_OK;
+ {
+ for (LONG i=0;i<m_nHooks;i++)
+ {
+ ICLRTestHook* hook=m_pHooks[i].v1();
+ if(hook)
+ {
+ HRESULT hr2=hook->LeftAppDomain(adid);
+ _ASSERTE(SUCCEEDED(hr)||SUCCEEDED(hr2));
+ if (SUCCEEDED(hr))
+ hr=hr2;
+ }
+ }
+ }
+ IfFailThrow(hr);
+ return hr;
+}
+
+HRESULT CLRTestHookManager::UnwindingThreads(DWORD adid)
+{
+ STATIC_CONTRACT_NOTHROW;
+
+ struct Param
+ {
+ CLRTestHookManager *pThis;
+ DWORD adid;
+ } param;
+ param.pThis = this;
+ param.adid = adid;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ //ignores the returned codes
+ for (LONG i = 0; i < pParam->pThis->m_nHooks; i++)
+ {
+ ICLRTestHook* hook = pParam->pThis->m_pHooks[i].v1();
+ if(hook)
+ {
+ HRESULT hr=hook->UnwindingThreads(pParam->adid);
+ _ASSERTE(SUCCEEDED(hr));
+ }
+ }
+ }
+ PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ {
+ _ASSERTE(!"Test Hook threw an exception.");
+ }
+ PAL_ENDTRY
+
+ return S_OK;
+}
+
+HRESULT CLRTestHookManager::RuntimeStarted(DWORD code)
+{
+ STATIC_CONTRACT_NOTHROW;
+
+ struct Param
+ {
+ CLRTestHookManager *pThis;
+ DWORD code;
+ } param;
+ param.pThis = this;
+ param.code = code;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ //ignores the returned codes
+ for (LONG i = 0; i < pParam->pThis->m_nHooks; i++)
+ {
+ ICLRTestHook* hook = pParam->pThis->m_pHooks[i].v1();
+ if(hook)
+ {
+ HRESULT hr=hook->RuntimeStarted(pParam->code);
+ _ASSERTE(SUCCEEDED(hr));
+ }
+ }
+ }
+ PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ {
+ _ASSERTE(!"Test Hook threw an exception.");
+ }
+ PAL_ENDTRY
+
+ return S_OK;
+}
+
+HRESULT CLRTestHookManager::UnwoundThreads(DWORD adid)
+{
+ STATIC_CONTRACT_NOTHROW;
+
+ struct Param
+ {
+ CLRTestHookManager *pThis;
+ DWORD adid;
+ } param;
+ param.pThis = this;
+ param.adid = adid;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ //ignores the returned codes
+ for (LONG i = 0; i < pParam->pThis->m_nHooks; i++)
+ {
+ ICLRTestHook* hook = pParam->pThis->m_pHooks[i].v1();
+ if(hook)
+ {
+ HRESULT hr=hook->UnwoundThreads(pParam->adid);
+ _ASSERTE(SUCCEEDED(hr));
+ }
+ }
+ }
+ PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ {
+ _ASSERTE(!"Test Hook threw an exception.");
+ }
+ PAL_ENDTRY
+
+ return S_OK;
+}
+
+HRESULT CLRTestHookManager::AppDomainDestroyed(DWORD adid)
+{
+ STATIC_CONTRACT_NOTHROW;
+
+ struct Param
+ {
+ CLRTestHookManager *pThis;
+ DWORD adid;
+ } param;
+ param.pThis = this;
+ param.adid = adid;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ //ignores the returned codes
+ for (LONG i = 0; i < pParam->pThis->m_nHooks; i++)
+ {
+ ICLRTestHook* hook = pParam->pThis->m_pHooks[i].v1();
+ if(hook)
+ {
+ HRESULT hr=hook->AppDomainDestroyed(pParam->adid);
+ _ASSERTE(SUCCEEDED(hr));
+ }
+ }
+ }
+ PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ {
+ _ASSERTE(!"Test Hook threw an exception.");
+ }
+ PAL_ENDTRY
+
+ return S_OK;
+}
+
+STDMETHODIMP CLRTestHookManager::ImageMapped(LPCWSTR wszPath, LPCVOID pBaseAddress,DWORD flags)
+{
+ STATIC_CONTRACT_NOTHROW;
+
+ struct Param
+ {
+ CLRTestHookManager *pThis;
+ LPCWSTR wszPath;
+ LPCVOID pBaseAddress;
+ DWORD flags;
+ } param;
+ param.pThis = this;
+ param.wszPath = wszPath;
+ param.pBaseAddress = pBaseAddress;
+ param.flags = flags;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ //ignores the returned codes
+ for (LONG i = 0; i < pParam->pThis->m_nHooks; i++)
+ {
+ ICLRTestHook2* hook = pParam->pThis->m_pHooks[i].v2();
+ if(hook)
+ {
+ HRESULT hr=hook->ImageMapped(pParam->wszPath,pParam->pBaseAddress,pParam->flags);
+ _ASSERTE(SUCCEEDED(hr));
+ }
+ }
+ }
+ PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ {
+ _ASSERTE(!"Test Hook threw an exception.");
+ }
+ PAL_ENDTRY
+
+ return S_OK;
+
+}
+
+HRESULT CLRTestHookManager::AppDomainCanBeUnloaded(DWORD adid, BOOL bUnsafePoint)
+{
+ STATIC_CONTRACT_THROWS;
+ HRESULT hr=S_OK;
+ {
+ if (!ThreadCanBeAborted())
+ return S_OK;
+ for (LONG i=0;i<m_nHooks;i++)
+ {
+ ICLRTestHook* hook=m_pHooks[i].v1();
+ if(hook)
+ {
+ HRESULT hr2=hook->AppDomainCanBeUnloaded(adid,bUnsafePoint);
+ _ASSERTE(SUCCEEDED(hr)||SUCCEEDED(hr2));
+ if (SUCCEEDED(hr))
+ hr=hr2;
+ }
+ }
+ }
+ IfFailThrow(hr);
+ return hr;
+}
+
+HRESULT CLRTestHookManager::StartingNativeImageBind(LPCWSTR wszAsmName, BOOL bIsCompilationProcess)
+{
+ STATIC_CONTRACT_THROWS;
+ HRESULT hr=S_OK;
+ {
+ for (LONG i=0;i<m_nHooks;i++)
+ {
+ ICLRTestHook3* hook=m_pHooks[i].v3();
+ if(hook)
+ {
+ HRESULT hr2=hook->StartingNativeImageBind(wszAsmName, bIsCompilationProcess);
+ _ASSERTE(SUCCEEDED(hr)||SUCCEEDED(hr2));
+ if (SUCCEEDED(hr))
+ hr=hr2;
+ }
+ }
+ }
+
+ IfFailThrow(hr);
+ return hr;
+}
+
+HRESULT CLRTestHookManager::CompletedNativeImageBind(LPVOID pFile,LPCUTF8 simpleName, BOOL hasNativeImage)
+{
+ STATIC_CONTRACT_THROWS;
+ HRESULT hr=S_OK;
+ {
+ for (LONG i=0;i<m_nHooks;i++)
+ {
+ ICLRTestHook3* hook=m_pHooks[i].v3();
+ if(hook)
+ {
+ HRESULT hr2=hook->CompletedNativeImageBind(pFile, simpleName, hasNativeImage);
+ _ASSERTE(SUCCEEDED(hr)||SUCCEEDED(hr2));
+ if (SUCCEEDED(hr))
+ hr=hr2;
+ }
+ }
+ }
+
+ IfFailThrow(hr);
+ return hr;
+}
+
+HRESULT CLRTestHookManager::AboutToLockImage(LPCWSTR wszPath, BOOL bIsCompilationProcess)
+{
+ STATIC_CONTRACT_THROWS;
+ HRESULT hr=S_OK;
+ {
+ for (LONG i=0;i<m_nHooks;i++)
+ {
+ ICLRTestHook3* hook=m_pHooks[i].v3();
+ if(hook)
+ {
+ HRESULT hr2=hook->AboutToLockImage(wszPath, bIsCompilationProcess);
+ _ASSERTE(SUCCEEDED(hr)||SUCCEEDED(hr2));
+ if (SUCCEEDED(hr))
+ hr=hr2;
+ }
+ }
+ }
+
+ IfFailThrow(hr);
+ return hr;
+}
+
+HRESULT CLRTestHookManager::EnableSlowPath (BOOL bEnable)
+{
+ WRAPPER_NO_CONTRACT;
+ ThreadStore::TrapReturningThreads(bEnable);
+ return S_OK;
+}
+
+ULONG CLRTestHookManager::AddRef()
+{
+ return FastInterlockIncrement(&m_cRef);
+}
+
+ULONG CLRTestHookManager::Release()
+{
+ ULONG nRet= FastInterlockDecrement(&m_cRef);
+ // never goes away
+ return nRet;
+}
+
+HRESULT CLRTestHookManager::QueryInterface(REFIID riid, void **ppv)
+{
+ if (riid!=IID_IUnknown && riid!=IID_ICLRTestHookManager)
+ return E_NOINTERFACE;
+ AddRef();
+ *ppv=(ICLRTestHookManager*)this;
+ return S_OK;
+}
+
+
+HRESULT CLRTestHookManager::CheckConfig()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr=S_OK;
+ if (g_pConfig)
+ {
+ LPWSTR szTestHooks=NULL;
+ hr=CLRConfig::GetConfigValue(CLRConfig::INTERNAL_TestHooks,&szTestHooks);
+ if (SUCCEEDED(hr) && szTestHooks!=NULL && *szTestHooks!=W('\0'))
+ {
+ LPWSTR curr=szTestHooks;
+ do
+ {
+ LPWSTR next=wcschr(curr,W(';'));
+ if (next)
+ *(next++)=0;
+ LPWSTR delim=wcschr(curr,W(','));
+ if (delim)
+ {
+ *(delim++)=W('\0');
+ HMODULE hMod=WszLoadLibrary(curr);
+ _ASSERTE(hMod);
+ if (hMod!=NULL)
+ {
+ MAKE_MULTIBYTE_FROMWIDE(szFName,delim,CP_ACP);
+ CLRTESTHOOKPROC* fn=(CLRTESTHOOKPROC*)GetProcAddress(hMod,szFName);
+ _ASSERTE(fn);
+ if(fn)
+ fn(Start());
+ }
+ }
+ curr=next;
+ }
+ while(curr!=NULL && *curr!=W('\0'));
+
+ delete szTestHooks;
+ }
+ }
+ return hr;
+}
+
+
+HRESULT CLRTestHookManager::UnloadAppDomain(DWORD adid,DWORD flags)
+{
+ HRESULT hr = S_OK;
+ BEGIN_ENTRYPOINT_NOTHROW;
+ // We do not use BEGIN_EXTERNAL_ENTRYPOINT here because
+ // we do not want to setup Thread. Process may be OOM, and we want Unload
+ // to work.
+ if (flags==ADUF_FORCEFULLGC)
+ {
+ SystemDomain::LockHolder ulh;
+ ADID id(adid);
+ AppDomainFromIDHolder pApp(id,TRUE,AppDomainFromIDHolder::SyncType_ADLock);//, AppDomainFromIDHolder::SyncType_ADLock);
+ if(!pApp.IsUnloaded())
+ pApp->SetForceGCOnUnload(TRUE);
+ }
+ hr = AppDomain::UnloadById(ADID(adid), flags!=ADUF_ASYNCHRONOUS,TRUE);
+ END_ENTRYPOINT_NOTHROW;
+ return hr;
+}
+
+VOID CLRTestHookManager::DoApproriateWait( int cObjs, HANDLE *pObjs, INT32 iTimeout, BOOL bWaitAll, int* res)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+
+ Thread* thread=GetThread();
+ DWORD result = WAIT_FAILED;
+ if(thread)
+ result=thread->DoAppropriateWait(cObjs,pObjs,bWaitAll,iTimeout,WaitMode_Alertable,NULL);
+ else
+ {
+ LeaveRuntimeHolder holder((size_t)WaitForSingleObjectEx);
+ result = WaitForMultipleObjectsEx(cObjs,pObjs,bWaitAll,iTimeout,TRUE);
+ }
+}
+
+
+HRESULT CLRTestHookManager::GC(int generation)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(GetThread()==NULL || !GetThread()->PreemptiveGCDisabled());
+ GCHeap::GetGCHeap()->GarbageCollect(generation);
+ FinalizerThread::FinalizerThreadWait();
+ return S_OK;
+}
+
+
+HRESULT CLRTestHookManager::GetSimpleName(LPVOID domainfile,LPCUTF8* name)
+{
+ HRESULT hr=S_OK;
+ EX_TRY
+ {
+ *name=((DomainFile*)domainfile)->GetSimpleName();
+ }
+ EX_CATCH_HRESULT(hr);
+ return hr;
+}
+
+
+
+INT_PTR CLRTestHookManager::GetCurrentThreadType()
+{
+ WRAPPER_NO_CONTRACT;
+ return (INT_PTR) ClrFlsGetValue (TlsIdx_ThreadType);
+}
+
+INT_PTR CLRTestHookManager::GetCurrentThreadLockCount (VOID)
+{
+ LIMITED_METHOD_CONTRACT;
+ Thread* thread=GetThread();
+ if(!thread)
+ return 0;
+ return thread->m_dwLockCount;
+
+};
+
+
+BOOL CLRTestHookManager::IsPreemptiveGC (VOID)
+{
+ LIMITED_METHOD_CONTRACT;
+ Thread *thread = GetThread();
+ // Preemptive GC is default
+ if (thread == NULL)
+ return TRUE;
+ else
+ return !thread->PreemptiveGCDisabled();
+};
+
+
+BOOL CLRTestHookManager::ThreadCanBeAborted (VOID)
+{
+ LIMITED_METHOD_CONTRACT;
+ return (GetThread()==NULL || GetThread()->IsAbortPrevented() || GetThread()->IsAsyncPrevented())?FALSE:TRUE;
+}
+
+HRESULT CLRTestHookManager::HasNativeImage(LPVOID domainfile,BOOL* pHasNativeImage)
+{
+ STATIC_CONTRACT_THROWS;
+ HRESULT hr=S_OK;
+ EX_TRY
+ {
+ if (domainfile && ((DomainFile*)domainfile)->GetFile())
+ {
+ *pHasNativeImage=((DomainFile*)domainfile)->GetFile()->HasNativeImage();
+ }
+ else
+ *pHasNativeImage = 0;
+ }
+ EX_CATCH_HRESULT(hr);
+ return hr;
+}
+
+
+void CLRTestHookInfo::Set(ICLRTestHook* hook)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (SUCCEEDED(hook->QueryInterface(IID_ICLRTestHook3,(void**)&m_Hook.v3)))
+ {
+ m_Version=3;
+ return;
+ }
+ else if (SUCCEEDED(hook->QueryInterface(IID_ICLRTestHook2,(void**)&m_Hook.v2)))
+ {
+ m_Version=2;
+ return;
+ }
+ else
+ {
+ m_Version=1;
+ }
+ hook->AddRef();
+ m_Hook.v1=hook;
+}
+
+ICLRTestHook* CLRTestHookInfo::v1()
+{
+ return m_Hook.v1;
+}
+
+ICLRTestHook2* CLRTestHookInfo::v2()
+{
+ LIMITED_METHOD_CONTRACT;
+ if(m_Version==2)
+ return m_Hook.v2;
+ return NULL;
+}
+
+ICLRTestHook3* CLRTestHookInfo::v3()
+{
+ if(m_Version>=3)
+ return m_Hook.v3;
+ return NULL;
+}
+
+
+
+//to make sure CLRTestHook is ok
+static CLRTestHook _hook;
+
+#endif
+
+
diff --git a/src/vm/testhookmgr.h b/src/vm/testhookmgr.h
new file mode 100644
index 0000000000..2eaa008edd
--- /dev/null
+++ b/src/vm/testhookmgr.h
@@ -0,0 +1,102 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+//
+
+#ifndef CLR_TESTHOOKMGR_H
+#define CLR_TESTHOOKMGR_H
+
+#include "testhook.h"
+
+#if defined(_DEBUG) && !defined(CROSSGEN_COMPILE)
+#define FEATURE_TESTHOOKS
+#endif
+
+#ifdef FEATURE_TESTHOOKS
+#define TESTHOOKCALL(function) \
+ do { if(CLRTestHookManager::Enabled()) \
+ CLRTestHookManager::TestHook()->function;} while(0)
+
+#define TESTHOOKENABLED CLRTestHookManager::Enabled()
+#define TESTHOOKSLOWPATH CLRTestHookManager::SlowPathEnabled()
+
+#define MAX_TEST_HOOKS 32
+
+//Forward declarations
+template <typename T> class Volatile;
+extern "C" Volatile<LONG> g_TrapReturningThreads;
+
+
+struct CLRTestHookInfo
+{
+protected:
+ int m_Version;
+ union
+ {
+ ICLRTestHook* v1;
+ ICLRTestHook2* v2;
+ ICLRTestHook3* v3;
+ } m_Hook;
+public:
+ void Set(ICLRTestHook*);
+ ICLRTestHook* v1();
+ ICLRTestHook2* v2();
+ ICLRTestHook3* v3();
+};
+
+class CLRTestHookManager: public ICLRTestHookManager2, public ICLRTestHook3
+{
+protected:
+ static CLRTestHookManager* g_pManager;
+ Volatile<LONG> m_nHooks;
+ Volatile<LONG> m_cRef;
+ CLRTestHookInfo m_pHooks[MAX_TEST_HOOKS];
+ ~CLRTestHookManager();
+public:
+ CLRTestHookManager();
+ STDMETHOD(AddTestHook)(ICLRTestHook* hook);
+ static ICLRTestHookManager* Start();
+ static BOOL Enabled() {return g_pManager!=NULL;};
+ static BOOL SlowPathEnabled() {return Enabled() && g_TrapReturningThreads;};
+ static CLRTestHookManager* TestHook() {return g_pManager;};
+ static HRESULT CheckConfig();
+ STDMETHOD_(ULONG,AddRef) ();
+ STDMETHOD_(ULONG, Release());
+ STDMETHOD (QueryInterface)(REFIID riid, void * *ppvObject);
+ STDMETHOD(AppDomainStageChanged)(DWORD adid,DWORD oldstage,DWORD newstage);
+ STDMETHOD(NextFileLoadLevel)(DWORD adid, LPVOID domainfile,DWORD newlevel);
+ STDMETHOD(CompletingFileLoadLevel)(DWORD adid, LPVOID domainfile,DWORD newlevel);
+ STDMETHOD(CompletedFileLoadLevel)(DWORD adid, LPVOID domainfile,DWORD newlevel);
+ STDMETHOD(EnteringAppDomain)(DWORD adid);
+ STDMETHOD(EnteredAppDomain)(DWORD adid);
+ STDMETHOD(LeavingAppDomain)(DWORD adid);
+ STDMETHOD(LeftAppDomain)(DWORD adid);
+ STDMETHOD(UnwindingThreads)(DWORD adid);
+ STDMETHOD(UnwoundThreads)(DWORD adid);
+ STDMETHOD(AppDomainCanBeUnloaded)(DWORD adid, BOOL bUnsafePoint);
+ STDMETHOD(AppDomainDestroyed)(DWORD adid);
+ STDMETHOD(EnableSlowPath) (BOOL bEnable);
+ STDMETHOD(UnloadAppDomain)(DWORD adid,DWORD flags);
+ STDMETHOD(StartingNativeImageBind)(LPCWSTR wszAsmName, BOOL bIsCompilationProcess);
+ STDMETHOD(CompletedNativeImageBind)(LPVOID pFile,LPCUTF8 simpleName, BOOL hasNativeImage);
+ STDMETHOD(AboutToLockImage)(LPCWSTR wszPath, BOOL bIsCompilationProcess);
+ STDMETHOD_(VOID,DoApproriateWait)( int cObjs, HANDLE *pObjs, INT32 iTimeout, BOOL bWaitAll, int* res);
+ STDMETHOD(GC)(int generation);
+ STDMETHOD(GetSimpleName)(LPVOID domainfile,LPCUTF8* name);
+ STDMETHOD(RuntimeStarted)(DWORD code);
+ STDMETHOD_(INT_PTR,GetCurrentThreadType)(VOID);
+ STDMETHOD_(INT_PTR,GetCurrentThreadLockCount) (VOID);
+ STDMETHOD_(BOOL,IsPreemptiveGC)(VOID);
+ STDMETHOD_(BOOL,ThreadCanBeAborted) (VOID);
+ STDMETHOD(ImageMapped(LPCWSTR wszPath, LPCVOID pBaseAddress,DWORD flags));
+ STDMETHOD(HasNativeImage)(LPVOID domainfile,BOOL* pHasNativeImage);
+};
+#else
+#define TESTHOOKCALL(function)
+#define TESTHOOKENABLED FALSE
+#define TESTHOOKSLOWPATH FALSE
+#endif
+#endif
diff --git a/src/vm/threaddebugblockinginfo.cpp b/src/vm/threaddebugblockinginfo.cpp
new file mode 100644
index 0000000000..a77c69b457
--- /dev/null
+++ b/src/vm/threaddebugblockinginfo.cpp
@@ -0,0 +1,91 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ThreadDebugBlockingInfo.cpp
+//
+
+//
+//
+#include "common.h"
+#include "threaddebugblockinginfo.h"
+
+//Constructor
+ThreadDebugBlockingInfo::ThreadDebugBlockingInfo()
+{
+ m_firstBlockingItem = NULL;
+}
+
+//Destructor
+ThreadDebugBlockingInfo::~ThreadDebugBlockingInfo()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(m_firstBlockingItem == NULL);
+}
+
+// Adds a new blocking item at the front of the list
+// The caller is responsible for allocating the memory this points to and keeping it alive until
+// after PopBlockingItem is called
+#ifndef DACCESS_COMPILE
+VOID ThreadDebugBlockingInfo::PushBlockingItem(DebugBlockingItem *pItem)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(pItem != NULL);
+ pItem->pNext = m_firstBlockingItem;
+ m_firstBlockingItem = pItem;
+}
+#endif //!DACCESS_COMPILE
+
+// Removes the most recently added item (FIFO)
+#ifndef DACCESS_COMPILE
+VOID ThreadDebugBlockingInfo::PopBlockingItem()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(m_firstBlockingItem != NULL);
+ m_firstBlockingItem = m_firstBlockingItem->pNext;
+}
+#endif //!DACCESS_COMPILE
+
+// Calls the visitor function on each item in the stack from front to back
+#ifdef DACCESS_COMPILE
+VOID ThreadDebugBlockingInfo::VisitBlockingItems(DebugBlockingItemVisitor visitorFunc, VOID* pUserData)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SUPPORTS_DAC;
+
+ PTR_DebugBlockingItem pItem = m_firstBlockingItem;
+ while(pItem != NULL)
+ {
+ visitorFunc(pItem, pUserData);
+ pItem = pItem->pNext;
+ }
+}
+#endif //DACCESS_COMPILE
+
+// Holder constructor pushes a blocking item on the blocking info stack
+#ifndef DACCESS_COMPILE
+DebugBlockingItemHolder::DebugBlockingItemHolder(Thread *pThread, DebugBlockingItem *pItem) :
+m_pThread(pThread)
+{
+ LIMITED_METHOD_CONTRACT;
+ pThread->DebugBlockingInfo.PushBlockingItem(pItem);
+}
+#endif //DACCESS_COMPILE
+
+// Holder destructor pops a blocking item off the blocking info stack
+#ifndef DACCESS_COMPILE
+DebugBlockingItemHolder::~DebugBlockingItemHolder()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_pThread->DebugBlockingInfo.PopBlockingItem();
+}
+#endif //DACCESS_COMPILE
diff --git a/src/vm/threaddebugblockinginfo.h b/src/vm/threaddebugblockinginfo.h
new file mode 100644
index 0000000000..202a1db91e
--- /dev/null
+++ b/src/vm/threaddebugblockinginfo.h
@@ -0,0 +1,81 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ThreadDebugBlockingInfo.h
+//
+
+//
+//
+// Threads.h was getting so bloated that it seemed time to refactor a little. Rather than shove in yet another
+// 50 lines of random definitions of things which happened to be per-thread I separated it out here.
+#ifndef __ThreadBlockingInfo__
+#define __ThreadBlockingInfo__
+#include "mscoree.h"
+
+// Different ways thread can block that the debugger will expose
+enum DebugBlockingItemType
+{
+ DebugBlock_MonitorCriticalSection,
+ DebugBlock_MonitorEvent,
+};
+
+typedef DPTR(struct DebugBlockingItem) PTR_DebugBlockingItem;
+
+// Represents something a thread blocked on that is exposed via the debugger
+struct DebugBlockingItem
+{
+ // right now we only do monitor locks but this could be
+ // expanded to other pieces of data if we want to expose
+ // other things that we block on
+ PTR_AwareLock pMonitor;
+ // The app domain of the object we are blocking on
+ PTR_AppDomain pAppDomain;
+ // Indicates how the thread is blocked on the item
+ DebugBlockingItemType type;
+ // blocking timeout in milliseconds or INFINTE for no timeout
+ DWORD dwTimeout;
+ // next pointer for a linked list of these items
+ PTR_DebugBlockingItem pNext;
+};
+
+// A visitor function used when enumerating DebuggableBlockingItems
+typedef VOID (*DebugBlockingItemVisitor)(PTR_DebugBlockingItem item, VOID* pUserData);
+
+// Maintains a stack of DebugBlockingItems that a thread is currently waiting on
+// It is a stack rather than a single item because we wait interruptibly. During the interruptible
+// wait we can run more code for an APC or to handle a windows message that could again block on another lock
+class ThreadDebugBlockingInfo
+{
+private:
+ // head of the linked list which is our stack
+ PTR_DebugBlockingItem m_firstBlockingItem;
+
+public:
+ ThreadDebugBlockingInfo();
+ ~ThreadDebugBlockingInfo();
+
+#ifndef DACCESS_COMPILE
+ // Adds a new blocking item at the front of the list
+ VOID PushBlockingItem(DebugBlockingItem *pItem);
+ // Removes the most recently added item (FIFO)
+ VOID PopBlockingItem();
+#else
+ // Calls the visitor function on each item in the stack from front to back
+ VOID VisitBlockingItems(DebugBlockingItemVisitor vistorFunc, VOID* pUserData);
+#endif //DACCESS_COMPILE
+};
+
+#ifndef DACCESS_COMPILE
+class DebugBlockingItemHolder
+{
+private:
+ Thread *m_pThread;
+
+public:
+ DebugBlockingItemHolder(Thread *pThread, DebugBlockingItem *pItem);
+ ~DebugBlockingItemHolder();
+};
+#endif //!DACCESS_COMPILE
+
+#endif // __ThreadBlockingInfo__
diff --git a/src/vm/threadpoolrequest.cpp b/src/vm/threadpoolrequest.cpp
new file mode 100644
index 0000000000..69ea5e4458
--- /dev/null
+++ b/src/vm/threadpoolrequest.cpp
@@ -0,0 +1,788 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//=========================================================================
+
+//
+// ThreadPoolRequest.cpp
+//
+
+//
+//
+//=========================================================================
+
+#include "common.h"
+#include "comdelegate.h"
+#include "comthreadpool.h"
+#include "threadpoolrequest.h"
+#include "win32threadpool.h"
+#include "class.h"
+#include "object.h"
+#include "field.h"
+#include "excep.h"
+#include "security.h"
+#include "eeconfig.h"
+#include "corhost.h"
+#include "nativeoverlapped.h"
+#ifdef FEATURE_REMOTING
+#include "crossdomaincalls.h"
+#endif
+#include "appdomain.inl"
+
+UnManagedPerAppDomainTPCount PerAppDomainTPCountList::s_unmanagedTPCount;
+
+//The list of all per-appdomain work-request counts.
+ArrayListStatic PerAppDomainTPCountList::s_appDomainIndexList;
+
+//Make this point to unmanaged TP in case, no appdomains have initialized yet.
+LONG PerAppDomainTPCountList::s_ADHint=-1;
+
+void PerAppDomainTPCountList::InitAppDomainIndexList()
+{
+ LIMITED_METHOD_CONTRACT;
+ s_appDomainIndexList.Init();
+}
+
+
+//---------------------------------------------------------------------------
+//AddNewTPIndex adds and returns a per-appdomain TP entry whenever a new appdomain
+//is created. Our list count should be equal to the max number of appdomains created
+//in the system.
+//
+//Assumptions:
+//This function needs to be called under the SystemDomain lock.
+//The ArrayListStatic data dtructure allows traversing of the counts without a
+//lock, but addition to the list requires synchronization.
+//
+TPIndex PerAppDomainTPCountList::AddNewTPIndex()
+{
+ STANDARD_VM_CONTRACT;
+
+ DWORD count = s_appDomainIndexList.GetCount();
+ DWORD i = FindFirstFreeTpEntry();
+
+ if (i == UNUSED_THREADPOOL_INDEX)
+ i = count;
+
+ TPIndex index(i+1);
+ if(count > i)
+ {
+
+ IPerAppDomainTPCount * pAdCount = dac_cast<PTR_IPerAppDomainTPCount>(s_appDomainIndexList.Get(i));
+ pAdCount->SetTPIndex(index);
+ return index;
+ }
+
+ ManagedPerAppDomainTPCount * pAdCount = new ManagedPerAppDomainTPCount(index);
+ pAdCount->ResetState();
+
+ IfFailThrow(s_appDomainIndexList.Append(pAdCount));
+
+ return index;
+}
+
+DWORD PerAppDomainTPCountList::FindFirstFreeTpEntry()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ DWORD DwnumADs = s_appDomainIndexList.GetCount();
+ DWORD Dwi;
+ IPerAppDomainTPCount * pAdCount;
+ DWORD DwfreeIndex = UNUSED_THREADPOOL_INDEX;
+
+ for (Dwi=0;Dwi < DwnumADs;Dwi++)
+ {
+ pAdCount = dac_cast<PTR_IPerAppDomainTPCount>(s_appDomainIndexList.Get(Dwi));
+ _ASSERTE(pAdCount);
+
+ if(pAdCount->IsTPIndexUnused())
+ {
+ DwfreeIndex = Dwi;
+ STRESS_LOG1(LF_THREADPOOL, LL_INFO1000, "FindFirstFreeTpEntry: reusing index %d\n", DwfreeIndex + 1);
+ break;
+ }
+ }
+
+ return DwfreeIndex;
+}
+
+
+void PerAppDomainTPCountList::SetAppDomainId(TPIndex index, ADID id)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ IPerAppDomainTPCount * pAdCount = dac_cast<PTR_IPerAppDomainTPCount>(s_appDomainIndexList.Get(index.m_dwIndex-1));
+
+ //SetAppDomainID needs to be called after the PerDomainCount has been
+ //succesfully allocated for the appdomain.
+ _ASSERTE(pAdCount);
+
+ STRESS_LOG2(LF_THREADPOOL, LL_INFO1000, "SetAppDomainId: index %d id %d\n", index.m_dwIndex, id.m_dwId);
+ pAdCount->SetAppDomainId(id);
+}
+
+//---------------------------------------------------------------------------
+//ResetAppDomainIndex: Resets the AppDomain ID and the per-appdomain
+// thread pool counts
+//
+//Arguments:
+//index - The index into the s_appDomainIndexList for the AppDomain we're
+// trying to clear (the AD being unloaded)
+//
+//Assumptions:
+//This function needs to be called from the AD unload thread after all domain
+//bound objects have been finalized when it's safe to recycle the TPIndex.
+//ClearAppDomainRequestsActive can be called from this function because no
+// managed code is running (If managed code is running, this function needs
+//to be called under a managed per-appdomain lock).
+//
+void PerAppDomainTPCountList::ResetAppDomainIndex(TPIndex index)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ IPerAppDomainTPCount * pAdCount = dac_cast<PTR_IPerAppDomainTPCount>(s_appDomainIndexList.Get(index.m_dwIndex-1));
+ _ASSERTE(pAdCount);
+
+ STRESS_LOG2(LF_THREADPOOL, LL_INFO1000, "ResetAppDomainIndex: index %d pAdCount %p\n", index.m_dwIndex, pAdCount);
+
+ pAdCount->ResetState();
+ pAdCount->SetTPIndexUnused();
+}
+
+//---------------------------------------------------------------------------
+//ResetAppDomainTPCounts: Resets the per-appdomain thread pool counts for a
+// given AppDomain. Don't clear the ADID until we can
+// safely recycle the TPIndex
+//
+//Arguments:
+//index - The index into the s_appDomainIndexList for the AppDomain we're
+// trying to clear
+//
+//Assumptions:
+//This function needs to be called from the AD unload thread after we make sure
+//that no more code is running in unmanaged code. ClearAppDomainRequestsActive
+//can be called from this function because no managed code is running (If
+//managed code is running, this function needs to be called under a managed
+//per-appdomain lock).
+//
+void PerAppDomainTPCountList::ResetAppDomainTPCounts(TPIndex index)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ IPerAppDomainTPCount * pAdCount = dac_cast<PTR_IPerAppDomainTPCount>(s_appDomainIndexList.Get(index.m_dwIndex-1));
+ _ASSERTE(pAdCount);
+
+ STRESS_LOG2(LF_THREADPOOL, LL_INFO1000, "ResetAppDomainTPCounts: index %d pAdCount %p\n", index.m_dwIndex, pAdCount);
+ //Correct the thread pool counts, in case the appdomain was unloaded rudely.
+ if(pAdCount->IsRequestPending())
+ {
+ ThreadpoolMgr::ClearAppDomainRequestsActive(FALSE, TRUE, (LONG)index.m_dwIndex);
+ }
+
+ pAdCount->ClearAppDomainRequestsActive(TRUE);
+}
+
+//---------------------------------------------------------------------------
+//AreRequestsPendingInAnyAppDomains checks to see if there any requests pending
+//in other appdomains. It also checks for pending unmanaged work requests.
+//This function is called at end of thread quantum to see if the thread needs to
+//transition into a different appdomain. This function may also be called by
+//the scheduler to check for any unscheduled work.
+//
+bool PerAppDomainTPCountList::AreRequestsPendingInAnyAppDomains()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ SO_TOLERANT; //Its ok for tis function to fail.
+ }
+ CONTRACTL_END;
+
+ DWORD DwnumADs = s_appDomainIndexList.GetCount();
+ DWORD Dwi;
+ IPerAppDomainTPCount * pAdCount;
+ bool fRequestsPending = false;
+
+ for (Dwi=0;Dwi < DwnumADs;Dwi++)
+ {
+
+ pAdCount = dac_cast<PTR_IPerAppDomainTPCount>(s_appDomainIndexList.Get(Dwi));
+ _ASSERTE(pAdCount);
+
+ if(pAdCount->IsRequestPending())
+ {
+ fRequestsPending = true;
+ break;
+ }
+ }
+
+ if(s_unmanagedTPCount.IsRequestPending())
+ {
+ fRequestsPending = true;
+ }
+
+ return fRequestsPending;
+}
+
+
+//---------------------------------------------------------------------------
+//GetAppDomainIndexForThreadpoolDispatch is essentailly the
+//"AppDomain Scheduler". This function makes fairness/policy decisions as to
+//which appdomain the thread needs to enter to. This function needs to guarantee
+//that all appdomain work requests are processed fairly. At this time all
+//appdomain requests and the unmanaged work requests are treated with the same
+//priority.
+//
+//Return Value:
+//The appdomain ID in which to dispatch the worker thread,nmanaged work items
+//need to be processed.
+//
+LONG PerAppDomainTPCountList::GetAppDomainIndexForThreadpoolDispatch()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ IPerAppDomainTPCount * pAdCount;
+ DWORD DwnumADs = s_appDomainIndexList.GetCount();
+ DWORD Dwi;
+
+ LONG hint = s_ADHint;
+ LONG temphint = hint;
+
+ if (hint == -1)
+ {
+ pAdCount = &s_unmanagedTPCount;
+ }
+ else
+ {
+ pAdCount = dac_cast<PTR_IPerAppDomainTPCount>(s_appDomainIndexList.Get(hint));
+ }
+
+ _ASSERTE( pAdCount);
+
+ if (pAdCount->TakeActiveRequest())
+ goto HintDone;
+
+ //temphint ensures that the check for appdomains proceeds in a pure round robin fashion.
+ temphint = hint;
+
+ //If there is no work in any appdomains, check the unmanaged queue,
+ hint = -1;
+
+ for (Dwi=0;Dwi<DwnumADs;Dwi++)
+ {
+ if (temphint == -1)
+ {
+ temphint = 0;
+ }
+
+ pAdCount = dac_cast<PTR_IPerAppDomainTPCount>(s_appDomainIndexList.Get(temphint));
+ if (pAdCount->TakeActiveRequest())
+ {
+ hint = temphint;
+ goto HintDone;
+ }
+
+ temphint++;
+
+ _ASSERTE( temphint <= (LONG) DwnumADs);
+
+ if(temphint == (LONG) DwnumADs)
+ {
+ temphint = 0;
+ }
+ }
+
+ if (hint == -1 && !s_unmanagedTPCount.TakeActiveRequest())
+ {
+ //no work!
+ return 0;
+ }
+
+ HintDone:
+
+ LONG count = (LONG) s_appDomainIndexList.GetCount();
+
+ if((hint+1) < count)
+ {
+ s_ADHint = hint+1;
+ }
+ else
+ {
+ s_ADHint = -1;
+ }
+
+ if (hint == -1)
+ {
+ return hint;
+ }
+ else
+ {
+ return (hint+1);
+ }
+}
+
+
+void UnManagedPerAppDomainTPCount::SetAppDomainRequestsActive()
+{
+ WRAPPER_NO_CONTRACT;
+#ifndef DACCESS_COMPILE
+ LONG count = m_outstandingThreadRequestCount;
+ while (count < (LONG)ThreadpoolMgr::NumberOfProcessors)
+ {
+ LONG prevCount = FastInterlockCompareExchange(&m_outstandingThreadRequestCount, count+1, count);
+ if (prevCount == count)
+ {
+ if (!CLRThreadpoolHosted())
+ {
+ ThreadpoolMgr::MaybeAddWorkingWorker();
+ ThreadpoolMgr::EnsureGateThreadRunning();
+ }
+ break;
+ }
+ count = prevCount;
+ }
+#endif
+}
+
+bool FORCEINLINE UnManagedPerAppDomainTPCount::TakeActiveRequest()
+{
+ LIMITED_METHOD_CONTRACT;
+ LONG count = m_outstandingThreadRequestCount;
+
+ while (count > 0)
+ {
+ LONG prevCount = FastInterlockCompareExchange(&m_outstandingThreadRequestCount, count-1, count);
+ if (prevCount == count)
+ return true;
+ count = prevCount;
+ }
+
+ return false;
+}
+
+
+FORCEINLINE void ReleaseWorkRequest(WorkRequest *workRequest) { ThreadpoolMgr::RecycleMemory( workRequest, ThreadpoolMgr::MEMTYPE_WorkRequest ); }
+typedef Wrapper< WorkRequest *, DoNothing<WorkRequest *>, ReleaseWorkRequest > WorkRequestHolder;
+
+void UnManagedPerAppDomainTPCount::QueueUnmanagedWorkRequest(LPTHREAD_START_ROUTINE function, PVOID context)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;;
+
+#ifndef DACCESS_COMPILE
+ WorkRequestHolder pWorkRequest;
+
+ //Note, ideally we would want to use our own queues instead of those in
+ //the thread pool class. However, the queus in thread pool class have
+ //caching support, that shares memory with other commonly used structures
+ //in the VM thread pool implementation. So, we decided to leverage those.
+
+ pWorkRequest = ThreadpoolMgr::MakeWorkRequest(function, context);
+
+ //MakeWorkRequest should throw if unable to allocate memory
+ _ASSERTE(pWorkRequest != NULL);
+ PREFIX_ASSUME(pWorkRequest != NULL);
+
+ m_lock.Init(LOCK_TYPE_DEFAULT);
+
+ {
+ SpinLock::Holder slh(&m_lock);
+
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, ThreadPoolEnqueue) &&
+ !ThreadpoolMgr::AreEtwQueueEventsSpeciallyHandled(function))
+ FireEtwThreadPoolEnqueue(pWorkRequest, GetClrInstanceId());
+ ThreadpoolMgr::EnqueueWorkRequest(pWorkRequest);
+ pWorkRequest.SuppressRelease();
+ m_NumRequests++;
+ }
+
+ SetAppDomainRequestsActive();
+#endif //DACCESS_COMPILE
+}
+
+PVOID UnManagedPerAppDomainTPCount::DeQueueUnManagedWorkRequest(bool* lastOne)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;;
+
+ *lastOne = true;
+
+ WorkRequest * pWorkRequest = ThreadpoolMgr::DequeueWorkRequest();
+
+ if (pWorkRequest)
+ {
+ m_NumRequests--;
+
+ if(m_NumRequests > 0)
+ *lastOne = false;
+ }
+
+ return (PVOID) pWorkRequest;
+}
+
+//---------------------------------------------------------------------------
+//DispatchWorkItem manages dispatching of unmanaged work requests. It keeps
+//processing unmanaged requests for the "Quanta". Essentially this function is
+//a tight loop of dequeueing unmanaged work requests and dispatching them.
+//
+void UnManagedPerAppDomainTPCount::DispatchWorkItem(bool* foundWork, bool* wasNotRecalled)
+{
+#ifndef DACCESS_COMPILE
+ *foundWork = false;
+ *wasNotRecalled = true;
+
+ bool enableWorkerTracking = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ThreadPool_EnableWorkerTracking) ? true : false;
+
+ DWORD startTime;
+ DWORD endTime;
+
+ startTime = GetTickCount();
+
+ //For all practical puposes, the unmanaged part of thread pool is treated
+ //as a special appdomain for thread pool purposes. The same logic as the
+ //one in managed code for dispatching thread pool requests is repeated here.
+ //Namely we continue to process requests until eithere there are none, or
+ //the "Quanta has expired". See threadpool.cs for the managed counterpart.
+
+ WorkRequest * pWorkRequest=NULL;
+ LPTHREAD_START_ROUTINE wrFunction;
+ LPVOID wrContext;
+
+ bool firstIteration = true;
+ bool lastOne = false;
+
+ while (*wasNotRecalled)
+ {
+ m_lock.Init(LOCK_TYPE_DEFAULT);
+ {
+ SpinLock::Holder slh(&m_lock);
+ pWorkRequest = (WorkRequest*) DeQueueUnManagedWorkRequest(&lastOne);
+ }
+
+ if (NULL == pWorkRequest)
+ break;
+
+ if (firstIteration && !lastOne)
+ SetAppDomainRequestsActive();
+
+ firstIteration = false;
+ *foundWork = true;
+
+ if (GCHeap::IsGCInProgress(TRUE))
+ {
+ // GC is imminent, so wait until GC is complete before executing next request.
+ // this reduces in-flight objects allocated right before GC, easing the GC's work
+ GCHeap::WaitForGCCompletion(TRUE);
+ }
+
+ PREFIX_ASSUME(pWorkRequest != NULL);
+ _ASSERTE(pWorkRequest);
+
+ wrFunction = pWorkRequest->Function;
+ wrContext = pWorkRequest->Context;
+
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, ThreadPoolDequeue) &&
+ !ThreadpoolMgr::AreEtwQueueEventsSpeciallyHandled(wrFunction))
+ FireEtwThreadPoolDequeue(pWorkRequest, GetClrInstanceId());
+
+ ThreadpoolMgr::FreeWorkRequest(pWorkRequest);
+
+ if (enableWorkerTracking)
+ {
+ ThreadpoolMgr::ReportThreadStatus(true);
+ (wrFunction) (wrContext);
+ ThreadpoolMgr::ReportThreadStatus(false);
+ }
+ else
+ {
+ (wrFunction) (wrContext);
+ }
+
+ ThreadpoolMgr::NotifyWorkItemCompleted();
+ if (ThreadpoolMgr::ShouldAdjustMaxWorkersActive())
+ {
+ DangerousNonHostedSpinLockTryHolder tal(&ThreadpoolMgr::ThreadAdjustmentLock);
+ if (tal.Acquired())
+ {
+ ThreadpoolMgr::AdjustMaxWorkersActive();
+ }
+ else
+ {
+ // the lock is held by someone else, so they will take care of this for us.
+ }
+ }
+ *wasNotRecalled = ThreadpoolMgr::ShouldWorkerKeepRunning();
+
+ Thread *pThread = GetThread();
+ if (pThread)
+ {
+ if (pThread->IsAbortRequested())
+ {
+ pThread->EEResetAbort(Thread::TAR_ALL);
+ }
+ pThread->InternalReset(FALSE);
+ }
+
+ endTime = GetTickCount();
+
+ if ((endTime - startTime) >= TP_QUANTUM)
+ {
+ break;
+ }
+ }
+
+ // if we're exiting for any reason other than the queue being empty, then we need to make sure another thread
+ // will visit us later.
+ if (NULL != pWorkRequest)
+ {
+ SetAppDomainRequestsActive();
+ }
+
+#endif //DACCESS_COMPILE
+}
+
+
+void ManagedPerAppDomainTPCount::SetAppDomainRequestsActive()
+{
+ //This function should either be called by managed code or during AD unload, but before
+ //the TpIndex is set to unused.
+ //
+ // Note that there is a separate count in managed code that stays in sync with this one over time.
+ // The manage count is incremented before this one, and this one is decremented before the managed
+ // one.
+ //
+
+ _ASSERTE(m_index.m_dwIndex != UNUSED_THREADPOOL_INDEX);
+ _ASSERTE(m_id.m_dwId != 0);
+
+#ifndef DACCESS_COMPILE
+ LONG count = m_numRequestsPending;
+ while (count != ADUnloading)
+ {
+ LONG prev = FastInterlockCompareExchange(&m_numRequestsPending, count+1, count);
+ if (prev == count)
+ {
+ if (!CLRThreadpoolHosted())
+ {
+ ThreadpoolMgr::MaybeAddWorkingWorker();
+ ThreadpoolMgr::EnsureGateThreadRunning();
+ }
+ break;
+ }
+ count = prev;
+ }
+#endif
+}
+
+void ManagedPerAppDomainTPCount::ClearAppDomainRequestsActive(BOOL bADU)
+{
+ LIMITED_METHOD_CONTRACT;
+ //This function should either be called by managed code or during AD unload, but before
+ //the TpIndex is set to unused.
+
+ _ASSERTE(m_index.m_dwIndex != UNUSED_THREADPOOL_INDEX);
+ _ASSERTE(m_id.m_dwId != 0);
+
+ if (bADU)
+ {
+ m_numRequestsPending = ADUnloading;
+ }
+ else
+ {
+ LONG count = m_numRequestsPending;
+ while (count != ADUnloading && count > 0)
+ {
+ LONG prev = FastInterlockCompareExchange(&m_numRequestsPending, 0, count);
+ if (prev == count)
+ break;
+ count = prev;
+ }
+ }
+}
+
+bool ManagedPerAppDomainTPCount::TakeActiveRequest()
+{
+ LIMITED_METHOD_CONTRACT;
+ LONG count = m_numRequestsPending;
+ while (count != ADUnloading && count > 0)
+ {
+ LONG prev = FastInterlockCompareExchange(&m_numRequestsPending, count-1, count);
+ if (prev == count)
+ return true;
+ count = prev;
+ }
+ return false;
+}
+
+void ManagedPerAppDomainTPCount::ClearAppDomainUnloading()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+ //
+ // While the AD was trying to unload, we may have queued some work. We would not
+ // have added that work to this count, because the AD was unloading. So we assume
+ // here that we have work to do.
+ //
+ // We set this to NumberOfProcessors because that's the maximum count that the AD
+ // might have tried to add. It's OK for this count to be larger than the AD thinks
+ // it should be, but if it's smaller then we will be permanently out of sync with the
+ // AD.
+ //
+ m_numRequestsPending = ThreadpoolMgr::NumberOfProcessors;
+ if (!CLRThreadpoolHosted() && ThreadpoolMgr::IsInitialized())
+ {
+ ThreadpoolMgr::MaybeAddWorkingWorker();
+ ThreadpoolMgr::EnsureGateThreadRunning();
+ }
+#endif
+}
+
+
+#ifndef DACCESS_COMPILE
+
+//---------------------------------------------------------------------------
+//DispatchWorkItem makes sure the right exception handling frames are setup,
+//the thread is transitioned into the correct appdomain, and the right managed
+//callback is called.
+//
+void ManagedPerAppDomainTPCount::DispatchWorkItem(bool* foundWork, bool* wasNotRecalled)
+{
+ *foundWork = false;
+ *wasNotRecalled = true;
+
+ HRESULT hr;
+ Thread * pThread = GetThread();
+ if (pThread == NULL)
+ {
+ ClrFlsSetThreadType(ThreadType_Threadpool_Worker);
+ pThread = SetupThreadNoThrow(&hr);
+ if (pThread == NULL)
+ {
+ return;
+ }
+ }
+
+ //We are in a state where AppDomain Unload has begun, but not all threads have been
+ //forced out of the unloading domain. This check below will prevent us from getting
+ //unmanaged AD unloaded exceptions while trying to enter an unloaded appdomain.
+
+ if(IsAppDomainUnloading())
+ {
+ __SwitchToThread(0, CALLER_LIMITS_SPINNING);
+ return;
+ }
+
+ CONTRACTL
+ {
+ MODE_PREEMPTIVE;
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ GCX_COOP();
+ BEGIN_SO_INTOLERANT_CODE(pThread);
+
+ //
+ // NOTE: there is a potential race between the time we retrieve the app
+ // domain pointer, and the time which this thread enters the domain.
+ //
+ // To solve the race, we rely on the fact that there is a thread sync (via
+ // GC) between releasing an app domain's handle, and destroying the
+ // app domain. Thus it is important that we not go into preemptive gc mode
+ // in that window.
+ //
+
+ {
+ ADID appDomainId(m_id);
+
+ // This TPIndex may have been recycled since we chose it for workitem dispatch.
+ // Thus it's possible for the ADID we just read to refer to an AppDomain that's still
+ // being created. If so, the new AppDomain will necessarily have zero requests
+ // pending (because the destruction of the previous AD that used this TPIndex
+ // will have reset this object). We don't want to call into such an AppDomain.
+// TODO: fix this another way!
+// if (IsRequestPending())
+ {
+ //This holder resets our thread's security state when exiting this scope
+ ThreadSecurityStateHolder secState(pThread);
+
+ ManagedThreadBase::ThreadPool(appDomainId, QueueUserWorkItemManagedCallback, wasNotRecalled);
+ }
+
+ if (pThread->IsAbortRequested())
+ {
+ // thread was aborted, and may not have had a chance to tell us it has work.
+ ENTER_DOMAIN_ID(m_id)
+ {
+ ThreadpoolMgr::SetAppDomainRequestsActive();
+ ThreadpoolMgr::QueueUserWorkItem(NULL,
+ NULL,
+ 0,
+ FALSE);
+
+ }
+ END_DOMAIN_TRANSITION;
+ }
+ }
+
+ // We should have released all locks.
+ _ASSERTE(g_fEEShutDown || pThread->m_dwLockCount == 0 || pThread->m_fRudeAborted);
+
+ END_SO_INTOLERANT_CODE;
+
+ *foundWork = true;
+}
+
+#endif // !DACCESS_COMPILE
diff --git a/src/vm/threadpoolrequest.h b/src/vm/threadpoolrequest.h
new file mode 100644
index 0000000000..432422f03b
--- /dev/null
+++ b/src/vm/threadpoolrequest.h
@@ -0,0 +1,359 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//=========================================================================
+
+//
+// ThreadPoolRequest.h
+//
+
+//
+// This file contains definitions of classes needed to mainain per-appdomain
+// thread pool work requests. This is needed as unmanaged and managed work
+// requests are allocted, managed and dispatched in drastically different ways.
+// However, the scheduler need be aware of these differences, and it should
+// simply talk to a common interface for managing work request counts.
+//
+//=========================================================================
+
+#ifndef _THREADPOOL_REQUEST_H
+#define _THREADPOOL_REQUEST_H
+
+#define TP_QUANTUM 2
+#define UNUSED_THREADPOOL_INDEX (DWORD)-1
+
+//--------------------------------------------------------------------------
+//IPerAppDomainTPCount is an interface for implementing per-appdomain thread
+//pool state. It's implementation should include logic to maintain work-counts,
+//notify thread pool class when work arrives or no work is left. Finally
+//there is logic to dipatch work items correctly in the right domain.
+//
+//Notes:
+//This class was designed to support both the managed and unmanaged uses
+//of thread pool. The unmananged part may directly be used through com
+//interfaces. The differences between the actual management of counts and
+//dispatching of work is quite different between the two. This interface
+//hides these differences to the thread scheduler implemented by the thread pool
+//class.
+//
+
+class IPerAppDomainTPCount{
+public:
+ virtual void ResetState() = 0;
+ virtual BOOL IsRequestPending() = 0;
+
+ //This functions marks the begining of requests queued for the domain.
+ //It needs to notify the scheduler of work-arrival among other things.
+ virtual void SetAppDomainRequestsActive() = 0;
+
+ //This functions marks the end of requests queued for this domain.
+ virtual void ClearAppDomainRequestsActive(BOOL bADU = FALSE) = 0;
+
+ //Clears the "active" flag if it was set, and returns whether it was set.
+ virtual bool TakeActiveRequest() = 0;
+
+ //Takes care of dispatching requests in the right domain.
+ virtual void DispatchWorkItem(bool* foundWork, bool* wasNotRecalled) = 0;
+ virtual void SetAppDomainId(ADID id) = 0;
+ virtual void SetTPIndexUnused() = 0;
+ virtual BOOL IsTPIndexUnused() = 0;
+ virtual void SetTPIndex(TPIndex index) = 0;
+ virtual void SetAppDomainUnloading() = 0;
+ virtual void ClearAppDomainUnloading() = 0;
+};
+
+typedef DPTR(IPerAppDomainTPCount) PTR_IPerAppDomainTPCount;
+
+static const LONG ADUnloading = -1;
+
+//--------------------------------------------------------------------------
+//ManagedPerAppDomainTPCount maintains per-appdomain thread pool state.
+//This class maintains the count of per-appdomain work-items queued by
+//ThreadPool.QueueUserWorkItem. It also dispatches threads in the appdomain
+//correctly by setting up the right exception handling frames etc.
+//
+//Note: The counts are not accurate, and neither do they need to be. The
+//actual work queue is in managed (implemented in threadpool.cs). This class
+//just provides heuristics to the thread pool scheduler, along with
+//synchronization to indicate start/end of requests to the scheduler.
+class ManagedPerAppDomainTPCount : public IPerAppDomainTPCount {
+public:
+
+ ManagedPerAppDomainTPCount(TPIndex index) {ResetState(); m_index = index;}
+
+ inline void ResetState()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_numRequestsPending = 0;
+ m_id.m_dwId = 0;
+ }
+
+ inline BOOL IsRequestPending()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_numRequestsPending != ADUnloading && m_numRequestsPending > 0;
+ }
+
+ void SetAppDomainRequestsActive();
+ void ClearAppDomainRequestsActive(BOOL bADU);
+ bool TakeActiveRequest();
+
+ inline void SetAppDomainId(ADID id)
+ {
+ LIMITED_METHOD_CONTRACT;
+ //This function should be called during appdomain creation when no managed code
+ //has started running yet. That implies, no requests should be pending
+ //or dispatched to this structure yet.
+
+ _ASSERTE(m_numRequestsPending != ADUnloading);
+ _ASSERTE(m_id.m_dwId == 0);
+
+ m_id = id;
+ }
+
+ inline void SetTPIndex(TPIndex index)
+ {
+ LIMITED_METHOD_CONTRACT;
+ //This function should be called during appdomain creation when no managed code
+ //has started running yet. That implies, no requests should be pending
+ //or dispatched to this structure yet.
+
+ _ASSERTE(m_numRequestsPending != ADUnloading);
+ _ASSERTE(m_id.m_dwId == 0);
+ _ASSERTE(m_index.m_dwIndex == UNUSED_THREADPOOL_INDEX);
+
+ m_index = index;
+ }
+
+ inline BOOL IsTPIndexUnused()
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (m_index.m_dwIndex == UNUSED_THREADPOOL_INDEX)
+ {
+ //This function is called during appdomain creation, and no new appdomains can be
+ //added removed at this time. So, make sure that the per-appdomain structures that
+ //have been cleared(reclaimed) don't have any pending requests to them.
+
+ _ASSERTE(m_numRequestsPending != ADUnloading);
+ _ASSERTE(m_id.m_dwId == 0);
+
+ return TRUE;
+ }
+
+ return FALSE;
+ }
+
+ inline void SetTPIndexUnused()
+ {
+ WRAPPER_NO_CONTRACT;
+ //This function should be called during appdomain unload when all threads have
+ //succesfully exited the appdomain. That implies, no requests should be pending
+ //or dispatched to this structure.
+
+ _ASSERTE(m_id.m_dwId == 0);
+
+ m_index.m_dwIndex = UNUSED_THREADPOOL_INDEX;
+ }
+
+ inline void SetAppDomainUnloading()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_numRequestsPending = ADUnloading;
+ }
+
+ inline void ClearAppDomainUnloading();
+
+ inline BOOL IsAppDomainUnloading()
+ {
+ return m_numRequestsPending.Load() == ADUnloading;
+ }
+
+ void DispatchWorkItem(bool* foundWork, bool* wasNotRecalled);
+
+private:
+ Volatile<LONG> m_numRequestsPending;
+ ADID m_id;
+ TPIndex m_index;
+};
+
+//--------------------------------------------------------------------------
+//UnManagedPerAppDomainTPCount maintains the thread pool state/counts for
+//unmanaged work requests. From thread pool point of view we treat unmanaged
+//requests as a special "appdomain". This helps in scheduling policies, and
+//follow same fairness policies as requests in other appdomains.
+class UnManagedPerAppDomainTPCount : public IPerAppDomainTPCount {
+public:
+
+ UnManagedPerAppDomainTPCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+ ResetState();
+ }
+
+ inline void InitResources()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ }
+
+ inline void CleanupResources()
+ {
+ }
+
+ inline void ResetState()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_NumRequests = 0;
+ m_outstandingThreadRequestCount = 0;
+ }
+
+ inline BOOL IsRequestPending()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_outstandingThreadRequestCount != 0 ? TRUE : FALSE;
+ }
+
+ void SetAppDomainRequestsActive();
+
+ inline void ClearAppDomainRequestsActive(BOOL bADU)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_outstandingThreadRequestCount = 0;
+ }
+
+ bool TakeActiveRequest();
+
+ inline void SetAppDomainId(ADID id)
+ {
+ }
+
+ void QueueUnmanagedWorkRequest(LPTHREAD_START_ROUTINE function, PVOID context);
+ PVOID DeQueueUnManagedWorkRequest(bool* lastOne);
+
+ void DispatchWorkItem(bool* foundWork, bool* wasNotRecalled);
+
+ inline void SetTPIndexUnused()
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERT(FALSE);
+ }
+
+ inline BOOL IsTPIndexUnused()
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERT(FALSE);
+ return FALSE;
+ }
+
+ inline void SetTPIndex(TPIndex index)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERT(FALSE);
+ }
+
+ inline void SetAppDomainUnloading()
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERT(FALSE);
+ }
+
+ inline void ClearAppDomainUnloading()
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERT(FALSE);
+ }
+
+private:
+ ULONG m_NumRequests;
+ Volatile<LONG> m_outstandingThreadRequestCount;
+ SpinLock m_lock;
+};
+
+//--------------------------------------------------------------------------
+//PerAppDomainTPCountList maintains the collection of per-appdomain thread
+//pool states. Per appdomain counts are added to the list during appdomain
+//creation inside the sdomain lock. The counts are reset during appdomain
+//unload after all the threads have
+//This class maintains the count of per-appdomain work-items queued by
+//ThreadPool.QueueUserWorkItem. It also dispatches threads in the appdomain
+//correctly by setting up the right exception handling frames etc.
+//
+//Note: The counts are not accurate, and neither do they need to be. The
+//actual work queue is in managed (implemented in threadpool.cs). This class
+//just provides heuristics to the thread pool scheduler, along with
+//synchronization to indicate start/end of requests to the scheduler.
+class PerAppDomainTPCountList{
+public:
+ static void InitAppDomainIndexList();
+ static void ResetAppDomainIndex(TPIndex index);
+ static void ResetAppDomainTPCounts(TPIndex index);
+ static bool AreRequestsPendingInAnyAppDomains();
+ static LONG GetAppDomainIndexForThreadpoolDispatch();
+ static void SetAppDomainId(TPIndex index, ADID id);
+ static TPIndex AddNewTPIndex();
+ static void SetAppDomainUnloading(TPIndex index)
+ {
+ WRAPPER_NO_CONTRACT;
+ IPerAppDomainTPCount * pAdCount = dac_cast<PTR_IPerAppDomainTPCount> (s_appDomainIndexList.Get(index.m_dwIndex-1));
+ _ASSERTE(pAdCount);
+ pAdCount->SetAppDomainUnloading();
+ }
+
+ static void ClearAppDomainUnloading(TPIndex index)
+ {
+ WRAPPER_NO_CONTRACT;
+ IPerAppDomainTPCount * pAdCount = dac_cast<PTR_IPerAppDomainTPCount> (s_appDomainIndexList.Get(index.m_dwIndex-1));
+ _ASSERTE(pAdCount);
+ pAdCount->ClearAppDomainUnloading();
+ }
+
+ typedef Holder<TPIndex, SetAppDomainUnloading, ClearAppDomainUnloading> AppDomainUnloadingHolder;
+
+ inline static IPerAppDomainTPCount* GetPerAppdomainCount(TPIndex index)
+ {
+ return dac_cast<PTR_IPerAppDomainTPCount>(s_appDomainIndexList.Get(index.m_dwIndex-1));
+ }
+
+ inline static UnManagedPerAppDomainTPCount* GetUnmanagedTPCount()
+ {
+ return &s_unmanagedTPCount;
+ }
+
+private:
+ static DWORD FindFirstFreeTpEntry();
+
+ static UnManagedPerAppDomainTPCount s_unmanagedTPCount;
+
+ //The list of all per-appdomain work-request counts.
+ static ArrayListStatic s_appDomainIndexList;
+
+ static LONG s_ADHint;
+};
+
+#endif //_THREADPOOL_REQUEST_H
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/vm/threads.cpp b/src/vm/threads.cpp
new file mode 100644
index 0000000000..4c3c60bb6a
--- /dev/null
+++ b/src/vm/threads.cpp
@@ -0,0 +1,13602 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// THREADS.CPP
+//
+
+//
+//
+
+
+#include "common.h"
+
+#include "tls.h"
+#include "frames.h"
+#include "threads.h"
+#include "stackwalk.h"
+#include "excep.h"
+#include "comsynchronizable.h"
+#include "log.h"
+#include "gc.h"
+#include "mscoree.h"
+#include "dbginterface.h"
+#include "corprof.h" // profiling
+#include "eeprofinterfaces.h"
+#include "eeconfig.h"
+#include "perfcounters.h"
+#include "corhost.h"
+#include "win32threadpool.h"
+#include "jitinterface.h"
+#include "appdomainstack.inl"
+#include "eventtrace.h"
+#ifdef FEATURE_REMOTING
+#include "appdomainhelper.h"
+#endif
+#include "comutilnative.h"
+#include "finalizerthread.h"
+#include "threadsuspend.h"
+
+#ifdef FEATURE_FUSION
+#include "fusion.h"
+#endif
+#include "wrappers.h"
+
+#include "nativeoverlapped.h"
+
+#include "mdaassistants.h"
+#include "appdomain.inl"
+#include "vmholder.h"
+#include "exceptmacros.h"
+#include "win32threadpool.h"
+
+#ifdef FEATURE_COMINTEROP
+#include "runtimecallablewrapper.h"
+#include "interoputil.h"
+#include "interoputil.inl"
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+#include "olecontexthelpers.h"
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+#ifdef FEATURE_UEF_CHAINMANAGER
+// This is required to register our UEF callback with the UEF chain manager
+#include <mscoruefwrapper.h>
+#endif // FEATURE_UEF_CHAINMANAGER
+
+
+SPTR_IMPL(ThreadStore, ThreadStore, s_pThreadStore);
+CONTEXT *ThreadStore::s_pOSContext = NULL;
+CLREvent *ThreadStore::s_pWaitForStackCrawlEvent;
+
+#ifndef DACCESS_COMPILE
+
+#include "constrainedexecutionregion.h"
+
+
+BOOL Thread::s_fCleanFinalizedThread = FALSE;
+
+#ifdef ENABLE_GET_THREAD_GENERIC_FULL_CHECK
+BOOL Thread::s_fEnforceEEThreadNotRequiredContracts = FALSE;
+#endif
+
+Volatile<LONG> Thread::s_threadPoolCompletionCountOverflow = 0;
+
+CrstStatic g_DeadlockAwareCrst;
+
+
+#if defined(_DEBUG)
+BOOL MatchThreadHandleToOsId ( HANDLE h, DWORD osId )
+{
+#ifndef FEATURE_PAL
+ LIMITED_METHOD_CONTRACT;
+
+ DWORD id = GetThreadId(h);
+
+ // OS call GetThreadId may fail, and return 0. In this case we can not
+ // make a decision if the two match or not. Instead, we ignore this check.
+ return id == 0 || id == osId;
+#else // !FEATURE_PAL
+ return TRUE;
+#endif // !FEATURE_PAL
+}
+#endif // _DEBUG
+
+
+#ifdef _DEBUG_IMPL
+template<> AutoCleanupGCAssert<TRUE>::AutoCleanupGCAssert()
+{
+ SCAN_SCOPE_BEGIN;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+}
+
+template<> AutoCleanupGCAssert<FALSE>::AutoCleanupGCAssert()
+{
+ SCAN_SCOPE_BEGIN;
+ STATIC_CONTRACT_MODE_PREEMPTIVE;
+}
+
+template<> void GCAssert<TRUE>::BeginGCAssert()
+{
+ SCAN_SCOPE_BEGIN;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+}
+
+template<> void GCAssert<FALSE>::BeginGCAssert()
+{
+ SCAN_SCOPE_BEGIN;
+ STATIC_CONTRACT_MODE_PREEMPTIVE;
+}
+#endif
+
+
+// #define NEW_TLS 1
+
+#ifdef _DEBUG
+void Thread::SetFrame(Frame *pFrame)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ DEBUG_ONLY;
+ MODE_COOPERATIVE;
+ // It only makes sense for a Thread to call SetFrame on itself.
+ PRECONDITION(this == GetThread());
+ PRECONDITION(CheckPointer(pFrame));
+ }
+ CONTRACTL_END;
+
+ if (g_pConfig->fAssertOnFailFast())
+ {
+ Frame *pWalk = m_pFrame;
+ BOOL fExist = FALSE;
+ while (pWalk != (Frame*) -1)
+ {
+ if (pWalk == pFrame)
+ {
+ fExist = TRUE;
+ break;
+ }
+ pWalk = pWalk->m_Next;
+ }
+ pWalk = m_pFrame;
+ while (fExist && pWalk != pFrame && pWalk != (Frame*)-1)
+ {
+ if (pWalk->GetVTablePtr() == ContextTransitionFrame::GetMethodFrameVPtr())
+ {
+ _ASSERTE (((ContextTransitionFrame *)pWalk)->GetReturnDomain() == m_pDomain);
+ }
+ pWalk = pWalk->m_Next;
+ }
+ }
+
+ m_pFrame = pFrame;
+
+ // If stack overrun corruptions are expected, then skip this check
+ // as the Frame chain may have been corrupted.
+ if (g_pConfig->fAssertOnFailFast() == false)
+ return;
+
+ Frame* espVal = (Frame*)GetCurrentSP();
+
+ while (pFrame != (Frame*) -1)
+ {
+ static Frame* stopFrame = 0;
+ if (pFrame == stopFrame)
+ _ASSERTE(!"SetFrame frame == stopFrame");
+
+ _ASSERTE(espVal < pFrame);
+ _ASSERTE(pFrame < m_CacheStackBase);
+ _ASSERTE(pFrame->GetFrameType() < Frame::TYPE_COUNT);
+
+ pFrame = pFrame->m_Next;
+ }
+}
+
+#endif // _DEBUG
+
+//************************************************************************
+// PRIVATE GLOBALS
+//************************************************************************
+
+extern unsigned __int64 getTimeStamp();
+
+extern unsigned __int64 getTickFrequency();
+
+unsigned __int64 tgetFrequency() {
+ static unsigned __int64 cachedFreq = (unsigned __int64) -1;
+
+ if (cachedFreq != (unsigned __int64) -1)
+ return cachedFreq;
+ else {
+ cachedFreq = getTickFrequency();
+ return cachedFreq;
+ }
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+static StackWalkAction DetectHandleILStubsForDebugger_StackWalkCallback(CrawlFrame *pCF, VOID *pData)
+{
+ WRAPPER_NO_CONTRACT;
+ // It suffices to wait for the first CrawlFrame with non-NULL function
+ MethodDesc *pMD = pCF->GetFunction();
+ if (pMD != NULL)
+ {
+ *(bool *)pData = pMD->IsILStub();
+ return SWA_ABORT;
+ }
+
+ return SWA_CONTINUE;
+}
+
+// This is really just a heuristic to detect if we are executing in an M2U IL stub or
+// one of the marshaling methods it calls. It doesn't deal with U2M IL stubs.
+// We loop through the frame chain looking for an uninitialized TransitionFrame.
+// If there is one, then we are executing in an M2U IL stub or one of the methods it calls.
+// On the other hand, if there is an initialized TransitionFrame, then we are not.
+// Also, if there is an HMF on the stack, then we stop. This could be the case where
+// an IL stub calls an FCALL which ends up in a managed method, and the debugger wants to
+// stop in those cases. Some examples are COMException..ctor and custom marshalers.
+//
+// X86 IL stubs use InlinedCallFrame and are indistinguishable from ordinary methods with
+// inlined P/Invoke when judging just from the frame chain. We use stack walk to decide
+// this case.
+bool Thread::DetectHandleILStubsForDebugger()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ Frame* pFrame = GetFrame();
+
+ if (pFrame != NULL)
+ {
+ while (pFrame != FRAME_TOP)
+ {
+ // Check for HMF's. See the comment at the beginning of this function.
+ if (pFrame->GetVTablePtr() == HelperMethodFrame::GetMethodFrameVPtr())
+ {
+ break;
+ }
+ // If there is an entry frame (i.e. U2M managed), we should break.
+ else if (pFrame->GetFrameType() == Frame::TYPE_ENTRY)
+ {
+ break;
+ }
+ // Check for M2U transition frames. See the comment at the beginning of this function.
+ else if (pFrame->GetFrameType() == Frame::TYPE_EXIT)
+ {
+ if (pFrame->GetReturnAddress() == NULL)
+ {
+ // If the return address is NULL, then the frame has not been initialized yet.
+ // We may see InlinedCallFrame in ordinary methods as well. Have to do
+ // stack walk to find out if this is really an IL stub.
+ bool fInILStub = false;
+
+ StackWalkFrames(&DetectHandleILStubsForDebugger_StackWalkCallback,
+ &fInILStub,
+ QUICKUNWIND,
+ dac_cast<PTR_Frame>(pFrame));
+
+ if (fInILStub) return true;
+ }
+ else
+ {
+ // The frame is fully initialized.
+ return false;
+ }
+ }
+ pFrame = pFrame->Next();
+ }
+ }
+ return false;
+}
+
+
+#ifdef FEATURE_IMPLICIT_TLS
+
+extern "C" {
+#ifndef __llvm__
+__declspec(thread)
+#else // !__llvm__
+__thread
+#endif // !__llvm__
+ThreadLocalInfo gCurrentThreadInfo =
+ {
+ NULL, // m_pThread
+ NULL, // m_pAppDomain
+ NULL, // m_EETlsData
+#if defined(FEATURE_MERGE_JIT_AND_ENGINE)
+ NULL, // m_pCompiler
+#endif
+ };
+} // extern "C"
+// index into TLS Array. Definition added by compiler
+EXTERN_C UINT32 _tls_index;
+
+#else // FEATURE_IMPLICIT_TLS
+extern "C" {
+GVAL_IMPL_INIT(DWORD, gThreadTLSIndex, TLS_OUT_OF_INDEXES); // index ( (-1) == uninitialized )
+GVAL_IMPL_INIT(DWORD, gAppDomainTLSIndex, TLS_OUT_OF_INDEXES); // index ( (-1) == uninitialized )
+}
+#endif // FEATURE_IMPLICIT_TLS
+
+#ifndef DACCESS_COMPILE
+#ifdef FEATURE_IMPLICIT_TLS
+EXTERN_C Thread* STDCALL GetThread()
+{
+ return gCurrentThreadInfo.m_pThread;
+}
+
+EXTERN_C AppDomain* STDCALL GetAppDomain()
+{
+ return gCurrentThreadInfo.m_pAppDomain;
+}
+
+BOOL SetThread(Thread* t)
+{
+ LIMITED_METHOD_CONTRACT
+
+ gCurrentThreadInfo.m_pThread = t;
+ return TRUE;
+}
+
+BOOL SetAppDomain(AppDomain* ad)
+{
+ LIMITED_METHOD_CONTRACT
+
+ gCurrentThreadInfo.m_pAppDomain = ad;
+ return TRUE;
+}
+
+#if defined(FEATURE_MERGE_JIT_AND_ENGINE)
+Compiler* GetTlsCompiler()
+{
+ LIMITED_METHOD_CONTRACT
+
+ return gCurrentThreadInfo.m_pCompiler;
+}
+void SetTlsCompiler(Compiler* c)
+{
+ LIMITED_METHOD_CONTRACT
+ gCurrentThreadInfo.m_pCompiler = c;
+}
+#endif // defined(FEATURE_MERGE_JIT_AND_ENGINE)
+
+#define ThreadInited() (TRUE)
+
+#else // FEATURE_IMPLICIT_TLS
+BOOL SetThread(Thread* t)
+{
+ WRAPPER_NO_CONTRACT
+ return UnsafeTlsSetValue(GetThreadTLSIndex(), t);
+}
+
+BOOL SetAppDomain(AppDomain* ad)
+{
+ WRAPPER_NO_CONTRACT
+ return UnsafeTlsSetValue(GetAppDomainTLSIndex(), ad);
+}
+
+#define ThreadInited() (gThreadTLSIndex != TLS_OUT_OF_INDEXES)
+
+#endif // FEATURE_IMPLICIT_TLS
+
+
+BOOL Thread::Alert ()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ BOOL fRetVal = FALSE;
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ HostComHolder<IHostTask> pHostTask(GetHostTaskWithAddRef());
+ if (pHostTask && !HasThreadStateNC(TSNC_OSAlertableWait)) {
+ HRESULT hr;
+
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pHostTask->Alert();
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ fRetVal = SUCCEEDED(hr);
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ HANDLE handle = GetThreadHandle();
+ if (handle != INVALID_HANDLE_VALUE && handle != SWITCHOUT_HANDLE_VALUE)
+ {
+ fRetVal = ::QueueUserAPC(UserInterruptAPC, handle, APC_Code);
+ }
+ }
+
+ return fRetVal;
+}
+
+struct HostJoinOnThreadArgs
+{
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostTask *pHostTask;
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ WaitMode mode;
+};
+
+DWORD HostJoinOnThread (void *args, DWORD timeout, DWORD option)
+{
+ CONTRACTL {
+ THROWS;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ HostJoinOnThreadArgs *joinArgs = (HostJoinOnThreadArgs*) args;
+ IHostTask *pHostTask = joinArgs->pHostTask;
+ if ((joinArgs->mode & WaitMode_InDeadlock) == 0)
+ {
+ option |= WAIT_NOTINDEADLOCK;
+ }
+
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pHostTask->Join(timeout, option);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (hr == S_OK) {
+ return WAIT_OBJECT_0;
+ }
+ else if (hr == HOST_E_TIMEOUT) {
+ return WAIT_TIMEOUT;
+ }
+ else if (hr == HOST_E_INTERRUPTED) {
+ _ASSERTE (option & WAIT_ALERTABLE);
+ Thread *pThread = GetThread();
+ if (pThread)
+ {
+ Thread::UserInterruptAPC(APC_Code);
+ }
+ return WAIT_IO_COMPLETION;
+ }
+ else if (hr == HOST_E_ABANDONED)
+ {
+ // The task died.
+ return WAIT_OBJECT_0;
+ }
+ else if (hr == HOST_E_DEADLOCK)
+ {
+ _ASSERTE ((option & WAIT_NOTINDEADLOCK) == 0);
+ RaiseDeadLockException();
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ _ASSERTE (!"Unknown host join status\n");
+ return E_FAIL;
+}
+
+
+DWORD Thread::Join(DWORD timeout, BOOL alertable)
+{
+ WRAPPER_NO_CONTRACT;
+ return JoinEx(timeout,alertable?WaitMode_Alertable:WaitMode_None);
+}
+DWORD Thread::JoinEx(DWORD timeout, WaitMode mode)
+{
+ CONTRACTL {
+ THROWS;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+ BOOL alertable = (mode & WaitMode_Alertable)?TRUE:FALSE;
+
+ Thread *pCurThread = GetThread();
+ _ASSERTE(pCurThread || dbgOnly_IsSpecialEEThread());
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ HostComHolder<IHostTask> pHostTask (GetHostTaskWithAddRef());
+ if (pHostTask != NULL) {
+ HostJoinOnThreadArgs args = {pHostTask, mode};
+ if (pCurThread) {
+ return GetThread()->DoAppropriateWait(HostJoinOnThread, &args, timeout, mode);
+ }
+ else {
+ return HostJoinOnThread (&args,timeout,alertable?WAIT_ALERTABLE:0);
+ }
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ // We're not hosted, so WaitMode_InDeadlock is irrelevant. Clear it, so that this wait can be
+ // forwarded to a SynchronizationContext if needed.
+ mode = (WaitMode)(mode & ~WaitMode_InDeadlock);
+
+ HANDLE handle = GetThreadHandle();
+ if (handle == INVALID_HANDLE_VALUE || handle == SWITCHOUT_HANDLE_VALUE) {
+ return WAIT_FAILED;
+ }
+ if (pCurThread) {
+ return pCurThread->DoAppropriateWait(1, &handle, FALSE, timeout, mode);
+ }
+ else {
+ return WaitForSingleObjectEx(handle,timeout,alertable);
+ }
+ }
+}
+
+extern INT32 MapFromNTPriority(INT32 NTPriority);
+
+BOOL Thread::SetThreadPriority(
+ int nPriority // thread priority level
+)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ BOOL fRet;
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ HostComHolder<IHostTask> pHostTask (GetHostTaskWithAddRef());
+ if (pHostTask != NULL) {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ fRet = (pHostTask->SetPriority(nPriority) == S_OK);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ if (GetThreadHandle() == INVALID_HANDLE_VALUE) {
+ // When the thread starts running, we will set the thread priority.
+ fRet = TRUE;
+ }
+ else
+ fRet = ::SetThreadPriority(GetThreadHandle(), nPriority);
+ }
+
+ if (fRet)
+ {
+ GCX_COOP();
+ THREADBASEREF pObject = (THREADBASEREF)ObjectFromHandle(m_ExposedObject);
+ if (pObject != NULL)
+ {
+ // TODO: managed ThreadPriority only supports up to 4.
+ pObject->SetPriority (MapFromNTPriority(nPriority));
+ }
+ }
+ return fRet;
+}
+
+int Thread::GetThreadPriority()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ int nRetVal = -1;
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ HostComHolder<IHostTask> pHostTask(GetHostTaskWithAddRef());
+ if (pHostTask != NULL) {
+ int nPriority;
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pHostTask->GetPriority(&nPriority);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+
+ nRetVal = (hr == S_OK)?nPriority:THREAD_PRIORITY_ERROR_RETURN;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ if (GetThreadHandle() == INVALID_HANDLE_VALUE) {
+ nRetVal = FALSE;
+ }
+ else
+ nRetVal = ::GetThreadPriority(GetThreadHandle());
+
+ return nRetVal;
+}
+
+void Thread::ChooseThreadCPUGroupAffinity()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+#if !defined(FEATURE_CORECLR)
+ if (!CPUGroupInfo::CanEnableGCCPUGroups() || !CPUGroupInfo::CanEnableThreadUseAllCpuGroups())
+ return;
+
+ // We only handle the non-hosted case here. If CLR is hosted, the hosting
+ // process controls the physical OS Threads. If CLR is not hosted, we can
+ // set thread group affinity on OS threads directly.
+ HostComHolder<IHostTask> pHostTask (GetHostTaskWithAddRef());
+ if (pHostTask != NULL)
+ return;
+
+ //Borrow the ThreadStore Lock here: Lock ThreadStore before distributing threads
+ ThreadStoreLockHolder TSLockHolder(TRUE);
+
+ // this thread already has CPU group affinity set
+ if (m_pAffinityMask != 0)
+ return;
+
+ if (GetThreadHandle() == INVALID_HANDLE_VALUE)
+ return;
+
+ GROUP_AFFINITY groupAffinity;
+ CPUGroupInfo::ChooseCPUGroupAffinity(&groupAffinity);
+ CPUGroupInfo::SetThreadGroupAffinity(GetThreadHandle(), &groupAffinity, NULL);
+ m_wCPUGroup = groupAffinity.Group;
+ m_pAffinityMask = groupAffinity.Mask;
+#endif
+}
+
+void Thread::ClearThreadCPUGroupAffinity()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#if !defined(FEATURE_CORECLR)
+ if (!CPUGroupInfo::CanEnableGCCPUGroups() || !CPUGroupInfo::CanEnableThreadUseAllCpuGroups())
+ return;
+
+ // We only handle the non-hosted case here. If CLR is hosted, the hosting
+ // process controls the physical OS Threads. If CLR is not hosted, we can
+ // set thread group affinity on OS threads directly.
+ HostComHolder<IHostTask> pHostTask (GetHostTaskWithAddRef());
+ if (pHostTask != NULL)
+ return;
+
+ ThreadStoreLockHolder TSLockHolder(TRUE);
+
+ // this thread does not have CPU group affinity set
+ if (m_pAffinityMask == 0)
+ return;
+
+ GROUP_AFFINITY groupAffinity;
+ groupAffinity.Group = m_wCPUGroup;
+ groupAffinity.Mask = m_pAffinityMask;
+ CPUGroupInfo::ClearCPUGroupAffinity(&groupAffinity);
+
+ m_wCPUGroup = 0;
+ m_pAffinityMask = 0;
+#endif
+}
+
+DWORD Thread::StartThread()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ DWORD dwRetVal = (DWORD) -1;
+#ifdef _DEBUG
+ _ASSERTE (m_Creater.IsSameThread());
+ m_Creater.ResetThreadId();
+#endif
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ HostComHolder<IHostTask> pHostTask(GetHostTaskWithAddRef());
+ if (pHostTask)
+ {
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pHostTask->Start();
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (hr == S_OK) {
+ dwRetVal = 1;
+ }
+ else
+ dwRetVal = (DWORD) -1;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ _ASSERTE (GetThreadHandle() != INVALID_HANDLE_VALUE &&
+ GetThreadHandle() != SWITCHOUT_HANDLE_VALUE);
+ dwRetVal = ::ResumeThread(GetThreadHandle());
+ }
+
+ return dwRetVal;
+}
+
+
+// Class static data:
+LONG Thread::m_DebugWillSyncCount = -1;
+LONG Thread::m_DetachCount = 0;
+LONG Thread::m_ActiveDetachCount = 0;
+int Thread::m_offset_counter = 0;
+Volatile<LONG> Thread::m_threadsAtUnsafePlaces = 0;
+
+//-------------------------------------------------------------------------
+// Public function: SetupThreadNoThrow()
+// Creates Thread for current thread if not previously created.
+// Returns NULL for failure (usually due to out-of-memory.)
+//-------------------------------------------------------------------------
+Thread* SetupThreadNoThrow(HRESULT *pHR)
+{
+ CONTRACTL {
+ NOTHROW;
+ SO_TOLERANT;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ Thread *pThread = GetThread();
+ if (pThread != NULL)
+ {
+ return pThread;
+ }
+
+ EX_TRY
+ {
+ pThread = SetupThread();
+ }
+ EX_CATCH
+ {
+ // We failed SetupThread. GET_EXCEPTION() may depend on Thread object.
+ if (__pException == NULL)
+ {
+ hr = E_OUTOFMEMORY;
+ }
+ else
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (pHR)
+ {
+ *pHR = hr;
+ }
+
+ return pThread;
+}
+
+void DeleteThread(Thread* pThread)
+{
+ CONTRACTL {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+ //_ASSERTE (pThread == GetThread());
+ SetThread(NULL);
+ SetAppDomain(NULL);
+
+ if (pThread->HasThreadStateNC(Thread::TSNC_ExistInThreadStore))
+ {
+ pThread->DetachThread(FALSE);
+ }
+ else
+ {
+#ifdef FEATURE_COMINTEROP
+ pThread->RevokeApartmentSpy();
+#endif // FEATURE_COMINTEROP
+
+ FastInterlockOr((ULONG *)&pThread->m_State, Thread::TS_Dead);
+
+ // ~Thread() calls SafeSetThrowables which has a conditional contract
+ // which says that if you call it with a NULL throwable then it is
+ // MODE_ANY, otherwise MODE_COOPERATIVE. Scan doesn't understand that
+ // and assumes that we're violating the MODE_COOPERATIVE.
+ CONTRACT_VIOLATION(ModeViolation);
+
+ delete pThread;
+ }
+}
+
+void EnsurePreemptive()
+{
+ WRAPPER_NO_CONTRACT;
+ Thread *pThread = GetThread();
+ if (pThread && pThread->PreemptiveGCDisabled())
+ {
+ pThread->EnablePreemptiveGC();
+ }
+}
+
+typedef StateHolder<DoNothing, EnsurePreemptive> EnsurePreemptiveModeIfException;
+
+Thread* SetupThread(BOOL fInternal)
+{
+ CONTRACTL {
+ THROWS;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(ThreadInited());
+ Thread* pThread;
+ if ((pThread = GetThread()) != NULL)
+ return pThread;
+
+#ifdef FEATURE_STACK_PROBE
+ RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), NULL);
+#endif //FEATURE_STACK_PROBE
+
+ CONTRACT_VIOLATION(SOToleranceViolation);
+
+ // For interop debugging, we must mark that we're in a can't-stop region
+ // b.c we may take Crsts here that may block the helper thread.
+ // We're especially fragile here b/c we don't have a Thread object yet
+ CantStopHolder hCantStop;
+
+ EnsurePreemptiveModeIfException ensurePreemptive;
+
+#ifdef _DEBUG
+ // Verify that for fiber mode, we do not have a thread that matches the current StackBase.
+ if (CLRTaskHosted()) {
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostTaskManager *provider = CorHost2::GetHostTaskManager();
+
+ IHostTask *pHostTask = NULL;
+
+ // Starting with SQL11 GetCurrentTask() may actually create a task if one does not
+ // exist yet. To avoid an unbalanced BeginThreadAffinity/EndThreadAffinity assert
+ // we must not call it inside a scope protected by ThreadStoreLockHolder (which calls
+ // BeginThreadAffinity/EndThreadAffinity in its constructor/destructor). Post SQL11,
+ // SQL may create the task in BeginThreadAffinity() but until then we have to be
+ // able to run on CHK bits w/o tripping the ASSERT.
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ provider->GetCurrentTask(&pHostTask);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+
+ if (pHostTask)
+ {
+ ThreadStoreLockHolder TSLockHolder;
+ SafeComHolder<IHostTask> pHostTaskHolder(pHostTask);
+ while ((pThread = ThreadStore::s_pThreadStore->GetAllThreadList(pThread, 0, 0)) != NULL)
+ {
+ _ASSERTE ((pThread->m_State&Thread::TS_Unstarted) || pThread->GetHostTask() != pHostTask);
+ }
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ }
+#endif
+
+#ifdef _DEBUG
+ CHECK chk;
+ if (g_pConfig->SuppressChecks())
+ {
+ // EnterAssert will suppress any checks
+ chk.EnterAssert();
+ }
+#endif
+
+ // Normally, HasStarted is called from the thread's entrypoint to introduce it to
+ // the runtime. But sometimes that thread is used for DLL_THREAD_ATTACH notifications
+ // that call into managed code. In that case, a call to SetupThread here must
+ // find the correct Thread object and install it into TLS.
+
+ if (ThreadStore::s_pThreadStore->m_PendingThreadCount != 0)
+ {
+ DWORD ourOSThreadId = ::GetCurrentThreadId();
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostTask *curHostTask = NULL;
+ IHostTaskManager *hostTaskManager = CorHost2::GetHostTaskManager();
+ if (hostTaskManager) {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hostTaskManager->GetCurrentTask(&curHostTask);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+
+ SafeComHolder<IHostTask> pHostTaskHolder(curHostTask);
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ ThreadStoreLockHolder TSLockHolder;
+ _ASSERTE(pThread == NULL);
+ while ((pThread = ThreadStore::s_pThreadStore->GetAllThreadList(pThread, Thread::TS_Unstarted | Thread::TS_FailStarted, Thread::TS_Unstarted)) != NULL)
+ {
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (curHostTask)
+ {
+ if (curHostTask == pThread->GetHostTask())
+ {
+ break;
+ }
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ if (pThread->GetOSThreadId() == ourOSThreadId)
+ {
+ break;
+ }
+ }
+
+ if (pThread != NULL)
+ {
+ STRESS_LOG2(LF_SYNC, LL_INFO1000, "T::ST - recycling thread 0x%p (state: 0x%x)\n", pThread, pThread->m_State.Load());
+ }
+ }
+
+ // It's perfectly reasonable to not find this guy. It's just an unrelated
+ // thread spinning up.
+ if (pThread)
+ {
+ if (IsThreadPoolWorkerSpecialThread())
+ {
+ FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread);
+ pThread->SetBackground(TRUE);
+ }
+ else if (IsThreadPoolIOCompletionSpecialThread())
+ {
+ FastInterlockOr ((ULONG *) &pThread->m_State, Thread::TS_CompletionPortThread);
+ pThread->SetBackground(TRUE);
+ }
+ else if (IsTimerSpecialThread() || IsWaitSpecialThread())
+ {
+ FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread);
+ pThread->SetBackground(TRUE);
+ }
+
+ BOOL fStatus = pThread->HasStarted();
+ ensurePreemptive.SuppressRelease();
+ return fStatus ? pThread : NULL;
+ }
+ }
+
+ // First time we've seen this thread in the runtime:
+ pThread = new Thread();
+
+// What state are we in here? COOP???
+
+ Holder<Thread*,DoNothing<Thread*>,DeleteThread> threadHolder(pThread);
+
+ CExecutionEngine::SetupTLSForThread(pThread);
+
+ // A host can deny a thread entering runtime by returning a NULL IHostTask.
+ // But we do want threads used by threadpool.
+ if (IsThreadPoolWorkerSpecialThread() ||
+ IsThreadPoolIOCompletionSpecialThread() ||
+ IsTimerSpecialThread() ||
+ IsWaitSpecialThread())
+ {
+ fInternal = TRUE;
+ }
+
+ if (!pThread->InitThread(fInternal) ||
+ !pThread->PrepareApartmentAndContext())
+ ThrowOutOfMemory();
+
+#ifndef FEATURE_IMPLICIT_TLS
+ // make sure we will not fail when we store in TLS in the future.
+ if (!UnsafeTlsSetValue(gThreadTLSIndex, NULL))
+ {
+ ThrowOutOfMemory();
+ }
+ if (!UnsafeTlsSetValue(GetAppDomainTLSIndex(), NULL))
+ {
+ ThrowOutOfMemory();
+ }
+#endif
+
+ // reset any unstarted bits on the thread object
+ FastInterlockAnd((ULONG *) &pThread->m_State, ~Thread::TS_Unstarted);
+ FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_LegalToJoin);
+
+ ThreadStore::AddThread(pThread);
+
+ BOOL fOK = SetThread(pThread);
+ _ASSERTE (fOK);
+ fOK = SetAppDomain(pThread->GetDomain());
+ _ASSERTE (fOK);
+
+ // We now have a Thread object visable to the RS. unmark special status.
+ hCantStop.Release();
+
+ pThread->SetupThreadForHost();
+
+ threadHolder.SuppressRelease();
+
+ FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_FullyInitialized);
+
+#ifdef _DEBUG
+ pThread->AddFiberInfo(Thread::ThreadTrackInfo_Lifetime);
+#endif
+
+#ifdef DEBUGGING_SUPPORTED
+ //
+ // If we're debugging, let the debugger know that this
+ // thread is up and running now.
+ //
+ if (CORDebuggerAttached())
+ {
+ g_pDebugInterface->ThreadCreated(pThread);
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO10000, "ThreadCreated() not called due to CORDebuggerAttached() being FALSE for thread 0x%x\n", pThread->GetThreadId()));
+ }
+#endif // DEBUGGING_SUPPORTED
+
+#ifdef PROFILING_SUPPORTED
+ // If a profiler is present, then notify the profiler that a
+ // thread has been created.
+ if (!IsGCSpecialThread())
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackThreads());
+ {
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->ThreadCreated(
+ (ThreadID)pThread);
+ }
+
+ DWORD osThreadId = ::GetCurrentThreadId();
+ g_profControlBlock.pProfInterface->ThreadAssignedToOSThread(
+ (ThreadID)pThread, osThreadId);
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ _ASSERTE(!pThread->IsBackground()); // doesn't matter, but worth checking
+ pThread->SetBackground(TRUE);
+
+ ensurePreemptive.SuppressRelease();
+
+ if (IsThreadPoolWorkerSpecialThread())
+ {
+ FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread);
+ }
+ else if (IsThreadPoolIOCompletionSpecialThread())
+ {
+ FastInterlockOr ((ULONG *) &pThread->m_State, Thread::TS_CompletionPortThread);
+ }
+ else if (IsTimerSpecialThread() || IsWaitSpecialThread())
+ {
+ FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread);
+ }
+
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+ if (g_fEnableARM)
+ {
+ pThread->QueryThreadProcessorUsage();
+ }
+#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
+
+#ifdef FEATURE_EVENT_TRACE
+ ETW::ThreadLog::FireThreadCreated(pThread);
+#endif // FEATURE_EVENT_TRACE
+
+ return pThread;
+}
+
+//-------------------------------------------------------------------------
+void STDMETHODCALLTYPE CorMarkThreadInThreadPool()
+{
+ LIMITED_METHOD_CONTRACT;
+ BEGIN_ENTRYPOINT_VOIDRET;
+ END_ENTRYPOINT_VOIDRET;
+
+ // this is no longer needed after our switch to
+ // the Win32 threadpool.
+ // keeping in mscorwks for compat reasons and to keep rotor sscoree and
+ // mscoree consistent.
+}
+
+
+//-------------------------------------------------------------------------
+// Public function: SetupUnstartedThread()
+// This sets up a Thread object for an exposed System.Thread that
+// has not been started yet. This allows us to properly enumerate all threads
+// in the ThreadStore, so we can report on even unstarted threads. Clearly
+// there is no physical thread to match, yet.
+//
+// When there is, complete the setup with code:Thread::HasStarted()
+//-------------------------------------------------------------------------
+Thread* SetupUnstartedThread(BOOL bRequiresTSL)
+{
+ CONTRACTL {
+ THROWS;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(ThreadInited());
+ Thread* pThread = new Thread();
+
+ if (pThread)
+ {
+ FastInterlockOr((ULONG *) &pThread->m_State,
+ (Thread::TS_Unstarted | Thread::TS_WeOwn));
+
+ ThreadStore::AddThread(pThread, bRequiresTSL);
+ }
+
+ return pThread;
+}
+
+FCIMPL0(INT32, GetRuntimeId_Wrapper)
+{
+ FCALL_CONTRACT;
+
+ return GetRuntimeId();
+}
+FCIMPLEND
+
+//-------------------------------------------------------------------------
+// Public function: DestroyThread()
+// Destroys the specified Thread object, for a thread which is about to die.
+//-------------------------------------------------------------------------
+void DestroyThread(Thread *th)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE (th == GetThread());
+
+ _ASSERTE(g_fEEShutDown || th->m_dwLockCount == 0 || th->m_fRudeAborted);
+
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+ if (g_fEnableARM)
+ {
+ AppDomain* pDomain = th->GetDomain();
+ pDomain->UpdateProcessorUsage(th->QueryThreadProcessorUsage());
+ FireEtwThreadTerminated((ULONGLONG)th, (ULONGLONG)pDomain, GetClrInstanceId());
+ }
+#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
+
+ th->FinishSOWork();
+
+ GCX_PREEMP_NO_DTOR();
+
+ if (th->IsAbortRequested()) {
+ // Reset trapping count.
+ th->UnmarkThreadForAbort(Thread::TAR_ALL);
+ }
+
+ // Clear any outstanding stale EH state that maybe still active on the thread.
+#ifdef WIN64EXCEPTIONS
+ ExceptionTracker::PopTrackers((void*)-1);
+#else // !WIN64EXCEPTIONS
+#ifdef _TARGET_X86_
+ PTR_ThreadExceptionState pExState = th->GetExceptionState();
+ if (pExState->IsExceptionInProgress())
+ {
+ GCX_COOP();
+ pExState->GetCurrentExceptionTracker()->UnwindExInfo((void *)-1);
+ }
+#else // !_TARGET_X86_
+#error Unsupported platform
+#endif // _TARGET_X86_
+#endif // WIN64EXCEPTIONS
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ // If CLR is hosted, don't call OnThreadTerminate here. Instead the host will call
+ // ExitTask which calls DetachThread.
+ if (th->GetHostTask() == NULL)
+#else // !FEATURE_INCLUDE_ALL_INTERFACES
+ if (g_fEEShutDown == 0)
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ th->SetThreadState(Thread::TS_ReportDead);
+ th->OnThreadTerminate(FALSE);
+ }
+}
+
+//-------------------------------------------------------------------------
+// Public function: DetachThread()
+// Marks the thread as needing to be destroyed, but doesn't destroy it yet.
+//-------------------------------------------------------------------------
+HRESULT Thread::DetachThread(BOOL fDLLThreadDetach)
+{
+ // !!! Can not use contract here.
+ // !!! Contract depends on Thread object for GC_TRIGGERS.
+ // !!! At the end of this function, we call InternalSwitchOut,
+ // !!! and then GetThread()=NULL, and dtor of contract does not work any more.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ // @todo . We need to probe here, but can't introduce destructors etc.
+ BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
+
+ // Clear any outstanding stale EH state that maybe still active on the thread.
+#ifdef WIN64EXCEPTIONS
+ ExceptionTracker::PopTrackers((void*)-1);
+#else // !WIN64EXCEPTIONS
+#ifdef _TARGET_X86_
+ PTR_ThreadExceptionState pExState = GetExceptionState();
+ if (pExState->IsExceptionInProgress())
+ {
+ GCX_COOP();
+ pExState->GetCurrentExceptionTracker()->UnwindExInfo((void *)-1);
+ }
+#else // !_TARGET_X86_
+#error Unsupported platform
+#endif // _TARGET_X86_
+#endif // WIN64EXCEPTIONS
+
+#ifdef FEATURE_COMINTEROP
+ IErrorInfo *pErrorInfo;
+ // Avoid calling GetErrorInfo() if ole32 has already executed the DLL_THREAD_DETACH,
+ // otherwise we'll cause ole32 to re-allocate and leak its TLS data (SOleTlsData).
+ if (ClrTeb::GetOleReservedPtr() != NULL && GetErrorInfo(0, &pErrorInfo) == S_OK)
+ {
+ // if this is our IErrorInfo, release it now - we don't want ole32 to do it later as
+ // part of its DLL_THREAD_DETACH as we won't be able to handle the call at that point
+ if (!ComInterfaceSlotIs(pErrorInfo, 2, Unknown_ReleaseSpecial_IErrorInfo))
+ {
+ // if it's not our IErrorInfo, put it back
+ SetErrorInfo(0, pErrorInfo);
+ }
+ pErrorInfo->Release();
+ }
+
+ // Revoke our IInitializeSpy registration only if we are not in DLL_THREAD_DETACH
+ // (COM will do it or may have already done it automatically in that case).
+ if (!fDLLThreadDetach)
+ {
+ RevokeApartmentSpy();
+ }
+#endif // FEATURE_COMINTEROP
+
+ _ASSERTE(!PreemptiveGCDisabled());
+ _ASSERTE(g_fEEShutDown || m_dwLockCount == 0 || m_fRudeAborted);
+
+ _ASSERTE ((m_State & Thread::TS_Detached) == 0);
+
+ _ASSERTE (this == GetThread());
+
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+ if (g_fEnableARM && m_pDomain)
+ {
+ m_pDomain->UpdateProcessorUsage(QueryThreadProcessorUsage());
+ FireEtwThreadTerminated((ULONGLONG)this, (ULONGLONG)m_pDomain, GetClrInstanceId());
+ }
+#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
+
+ FinishSOWork();
+
+ FastInterlockIncrement(&Thread::m_DetachCount);
+
+ if (IsAbortRequested()) {
+ // Reset trapping count.
+ UnmarkThreadForAbort(Thread::TAR_ALL);
+ }
+
+ if (!IsBackground())
+ {
+ FastInterlockIncrement(&Thread::m_ActiveDetachCount);
+ ThreadStore::CheckForEEShutdown();
+ }
+
+ END_CONTRACT_VIOLATION;
+
+ InternalSwitchOut();
+
+#ifdef ENABLE_CONTRACTS_DATA
+ m_pClrDebugState = NULL;
+#endif //ENABLE_CONTRACTS_DATA
+
+ FastInterlockOr((ULONG*)&m_State, (int) (Thread::TS_Detached | Thread::TS_ReportDead));
+ // Do not touch Thread object any more. It may be destroyed.
+
+ // These detached threads will be cleaned up by finalizer thread. But if the process uses
+ // little managed heap, it will be a while before GC happens, and finalizer thread starts
+ // working on detached thread. So we wake up finalizer thread to clean up resources.
+ //
+ // (It's possible that this is the startup thread, and startup failed, and so the finalization
+ // machinery isn't fully initialized. Hence this check.)
+ if (g_fEEStarted)
+ FinalizerThread::EnableFinalization();
+
+ return S_OK;
+}
+
+#ifndef FEATURE_IMPLICIT_TLS
+//---------------------------------------------------------------------------
+// Returns the TLS index for the Thread. This is strictly for the use of
+// our ASM stub generators that generate inline code to access the Thread.
+// Normally, you should use GetThread().
+//---------------------------------------------------------------------------
+DWORD GetThreadTLSIndex()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return gThreadTLSIndex;
+}
+
+//---------------------------------------------------------------------------
+// Returns the TLS index for the AppDomain. This is strictly for the use of
+// our ASM stub generators that generate inline code to access the AppDomain.
+// Normally, you should use GetAppDomain().
+//---------------------------------------------------------------------------
+DWORD GetAppDomainTLSIndex()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return gAppDomainTLSIndex;
+}
+#endif
+
+DWORD GetRuntimeId()
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifndef FEATURE_IMPLICIT_TLS
+ _ASSERTE(GetThreadTLSIndex() != TLS_OUT_OF_INDEXES);
+ return GetThreadTLSIndex() + 3;
+#else
+ return _tls_index;
+#endif
+}
+
+//---------------------------------------------------------------------------
+// Creates new Thread for reverse p-invoke calls.
+//---------------------------------------------------------------------------
+Thread* __stdcall CreateThreadBlockThrow()
+{
+
+ WRAPPER_NO_CONTRACT;
+
+ // This is a workaround to disable our check for throwing exception in SetupThread.
+ // We want to throw an exception for reverse p-invoke, and our assertion may fire if
+ // a unmanaged caller does not setup an exception handler.
+ CONTRACT_VIOLATION(ThrowsViolation); // WON'T FIX - This enables catastrophic failure exception in reverse P/Invoke - the only way we can communicate an error to legacy code.
+ Thread* pThread = NULL;
+ BEGIN_ENTRYPOINT_THROWS;
+
+ if (!CanRunManagedCode())
+ {
+ // CLR is shutting down - someone's DllMain detach event may be calling back into managed code.
+ // It is misleading to use our COM+ exception code, since this is not a managed exception.
+ ULONG_PTR arg = E_PROCESS_SHUTDOWN_REENTRY;
+ RaiseException(EXCEPTION_EXX, 0, 1, &arg);
+ }
+
+ HRESULT hr = S_OK;
+ pThread = SetupThreadNoThrow(&hr);
+ if (pThread == NULL)
+ {
+ // Creating Thread failed, and we need to throw an exception to report status.
+ // It is misleading to use our COM+ exception code, since this is not a managed exception.
+ ULONG_PTR arg = hr;
+ RaiseException(EXCEPTION_EXX, 0, 1, &arg);
+ }
+ END_ENTRYPOINT_THROWS;
+
+ return pThread;
+}
+
+#ifdef _DEBUG
+DWORD_PTR Thread::OBJREF_HASH = OBJREF_TABSIZE;
+#endif
+
+#ifndef FEATURE_IMPLICIT_TLS
+
+#ifdef ENABLE_GET_THREAD_GENERIC_FULL_CHECK
+
+// ----------------------------------------------------------------------------
+// GetThreadGenericFullCheck
+//
+// Description:
+// The non-PAL, x86 / x64 assembly versions of GetThreadGeneric call into this C
+// function to optionally do some verification before returning the EE Thread object
+// for the current thread. Currently the primary enforcement this function does is
+// around the EE_THREAD_(NOT)_REQUIRED contracts. For a definition of these
+// contracts, how they're used, and how temporary "safe" scopes may be created
+// using BEGIN_GETTHREAD_ALLOWED / END_GETTHREAD_ALLOWED, see the comments at the top
+// of contract.h.
+//
+// The EE_THREAD_(NOT)_REQUIRED contracts are enforced as follows:
+// * code:EEContract::DoChecks enforces the following:
+// * On entry to an EE_THREAD_REQUIRED function, GetThread() != NULL
+// * An EE_THREAD_REQUIRED function may not be called from an
+// EE_THREAD_NOT_REQUIRED function, unless there is an intervening
+// BEGIN/END_GETTHREAD_ALLOWED scope
+// * This function (GetThreadGenericFullCheck) enforces that an
+// EE_THREAD_NOT_REQUIRED function may not call GetThread(), unless there is
+// an intervening BEGIN/END_GETTHREAD_ALLOWED scope. While this enforcement
+// is straightforward below, the tricky part is getting
+// GetThreadGenericFullCheck() to actually be called when GetThread() is
+// called, given the optimizations around GetThread():
+// * code:InitThreadManager ensures that non-PAL, debug, x86/x64 builds that
+// run with COMPLUS_EnforceEEThreadNotRequiredContracts set are forced to
+// use GetThreadGeneric instead of the dynamically generated optimized
+// TLS getter.
+// * The non-PAL, debug, x86/x64 GetThreadGeneric() (implemented in the
+// processor-specific assembly files) knows to call
+// GetThreadGenericFullCheck() to do the enforcement.
+//
+Thread * GetThreadGenericFullCheck()
+{
+ // Can not have a dynamic contract here. Contract depends on GetThreadGeneric.
+ // Contract here causes stack overflow.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ if (!ThreadInited())
+ {
+ // #GTInfiniteRecursion
+ //
+ // Normally, we'd want to assert here, but that could lead to infinite recursion.
+ // Bringing up the assert dialog requires a string lookup, which requires getting
+ // the Thread's UI culture ID, which, or course, requires getting the Thread. So
+ // we'll just break instead.
+ DebugBreak();
+ }
+
+ if (g_fEEStarted &&
+
+ // Using ShouldEnforceEEThreadNotRequiredContracts() instead
+ // of directly checking CLRConfig::GetConfigValue, as the latter contains a dynamic
+ // contract and therefore calls GetThread(), which would cause infinite recursion.
+ Thread::ShouldEnforceEEThreadNotRequiredContracts() &&
+
+ // The following verifies that it's safe to call GetClrDebugState() below without
+ // risk of its callees invoking extra error checking or fiber code that could
+ // recursively call GetThread() and overflow the stack
+ (CExecutionEngine::GetTlsData() != NULL))
+ {
+ // It's safe to peek into the debug state, so let's do so, to see if
+ // our caller is really allowed to be calling GetThread(). This enforces
+ // the EE_THREAD_NOT_REQUIRED contract.
+ ClrDebugState * pDbg = GetClrDebugState(FALSE); // FALSE=don't allocate
+ if ((pDbg != NULL) && (!pDbg->IsGetThreadAllowed()))
+ {
+ // We need to bracket the ASSERTE with BEGIN/END_GETTHREAD_ALLOWED to avoid
+ // infinite recursion (see
+ // code:GetThreadGenericFullCheck#GTInfiniteRecursion). The ASSERTE here will
+ // cause us to reenter this function to get the thread (again). However,
+ // BEGIN/END_GETTHREAD_ALLOWED at least stops the recursion right then and
+ // there, as it prevents us from reentering this block yet again (since
+ // BEGIN/END_GETTHREAD_ALLOWED causes pDbg->IsGetThreadAllowed() to be TRUE).
+ // All such reentries to this function will quickly return the thread without
+ // executing the code below, so the original ASSERTE can proceed.
+ BEGIN_GETTHREAD_ALLOWED;
+ _ASSERTE(!"GetThread() called in a EE_THREAD_NOT_REQUIRED scope. If the GetThread() call site has a clear code path for a return of NULL, then consider using GetThreadNULLOk() or BEGIN/END_GETTHREAD_ALLOWED");
+ END_GETTHREAD_ALLOWED;
+ }
+ }
+
+ Thread * pThread = (Thread *) UnsafeTlsGetValue(gThreadTLSIndex);
+
+ // set bogus last error to help find places that fail to save it across GetThread calls
+ ::SetLastError(LAST_ERROR_TRASH_VALUE);
+
+ return pThread;
+}
+
+#endif // ENABLE_GET_THREAD_GENERIC_FULL_CHECK
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+//
+// Some platforms have this implemented in assembly
+//
+EXTERN_C Thread* STDCALL GetThreadGeneric(VOID);
+EXTERN_C AppDomain* STDCALL GetAppDomainGeneric(VOID);
+#else
+Thread* STDCALL GetThreadGeneric()
+{
+ // Can not have contract here. Contract depends on GetThreadGeneric.
+ // Contract here causes stack overflow.
+ //CONTRACTL {
+ // NOTHROW;
+ // GC_NOTRIGGER;
+ //}
+ //CONTRACTL_END;
+
+ // see code:GetThreadGenericFullCheck#GTInfiniteRecursion
+ _ASSERTE(ThreadInited());
+
+ Thread* pThread = (Thread*)UnsafeTlsGetValue(gThreadTLSIndex);
+
+ TRASH_LASTERROR;
+
+ return pThread;
+}
+
+AppDomain* STDCALL GetAppDomainGeneric()
+{
+ // No contract. This function is called during ExitTask.
+ //CONTRACTL {
+ // NOTHROW;
+ // GC_NOTRIGGER;
+ //}
+ //CONTRACTL_END;
+
+ _ASSERTE(ThreadInited());
+
+ AppDomain* pAppDomain = (AppDomain*)UnsafeTlsGetValue(GetAppDomainTLSIndex());
+
+ TRASH_LASTERROR;
+
+ return pAppDomain;
+}
+#endif // defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+
+//
+// FLS getter to avoid unnecessary indirection via execution engine. It will be used if we get high TLS slot
+// from the OS where we cannot use the fast optimized assembly helpers. (It happens pretty often in hosted scenarios).
+//
+VOID * ClrFlsGetBlockDirect()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return UnsafeTlsGetValue(CExecutionEngine::GetTlsIndex());
+}
+
+extern "C" void * ClrFlsGetBlock();
+
+#endif // FEATURE_IMPLICIT_TLS
+
+
+extern "C" void STDCALL JIT_PatchedCodeStart();
+extern "C" void STDCALL JIT_PatchedCodeLast();
+
+//---------------------------------------------------------------------------
+// One-time initialization. Called during Dll initialization. So
+// be careful what you do in here!
+//---------------------------------------------------------------------------
+void InitThreadManager()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ // All patched helpers should fit into one page.
+ // If you hit this assert on retail build, there is most likely problem with BBT script.
+ _ASSERTE_ALL_BUILDS("clr/src/VM/threads.cpp", (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart < PAGE_SIZE);
+
+ // I am using virtual protect to cover the entire range that this code falls in.
+ //
+
+ // We could reset it to non-writeable inbetween GCs and such, but then we'd have to keep on re-writing back and forth,
+ // so instead we'll leave it writable from here forward.
+
+ DWORD oldProt;
+ if (!ClrVirtualProtect((void *)JIT_PatchedCodeStart, (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart,
+ PAGE_EXECUTE_READWRITE, &oldProt))
+ {
+ _ASSERTE(!"ClrVirtualProtect of code page failed");
+ COMPlusThrowWin32();
+ }
+
+#ifndef FEATURE_PAL
+
+#ifdef FEATURE_IMPLICIT_TLS
+ _ASSERTE(GetThread() == NULL);
+
+ // Mscordbi calculates the address of currentThread pointer using OFFSETOF__TLS__tls_CurrentThread. Ensure that
+ // value is correct.
+
+ PTEB Teb;
+ BYTE* tlsData;
+ BYTE** tlsArray;
+
+ Teb = NtCurrentTeb();
+ tlsArray = (BYTE**)Teb->ThreadLocalStoragePointer;
+ tlsData = (BYTE*)tlsArray[_tls_index];
+
+ Thread **ppThread = (Thread**) (tlsData + OFFSETOF__TLS__tls_CurrentThread);
+ _ASSERTE_ALL_BUILDS("clr/src/VM/Threads.cpp",
+ (&(gCurrentThreadInfo.m_pThread) == ppThread) &&
+ "Offset of m_pThread as specified by OFFSETOF__TLS__tls_CurrentThread is not correct. "
+ "This can change due to addition/removal of declspec(Thread) thread local variables.");
+
+ _ASSERTE_ALL_BUILDS("clr/src/VM/Threads.cpp",
+ ((BYTE*)&(gCurrentThreadInfo.m_EETlsData) == tlsData + OFFSETOF__TLS__tls_EETlsData) &&
+ "Offset of m_EETlsData as specified by OFFSETOF__TLS__tls_EETlsData is not correct. "
+ "This can change due to addition/removal of declspec(Thread) thread local variables.");
+#else
+ _ASSERTE(gThreadTLSIndex == TLS_OUT_OF_INDEXES);
+#endif
+ _ASSERTE(g_TrapReturningThreads == 0);
+#endif // !FEATURE_PAL
+
+ // Consult run-time switches that choose whether to use generic or optimized
+ // versions of GetThread and GetAppDomain
+
+ BOOL fUseGenericTlsGetters = FALSE;
+
+#ifdef ENABLE_GET_THREAD_GENERIC_FULL_CHECK
+ // Debug builds allow user to throw a switch to force use of the generic GetThread
+ // for the sole purpose of enforcing EE_THREAD_NOT_REQUIRED contracts
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_EnforceEEThreadNotRequiredContracts) != 0)
+ {
+ // Set this static on Thread so this value can be safely read later on by
+ // code:GetThreadGenericFullCheck
+ Thread::s_fEnforceEEThreadNotRequiredContracts = TRUE;
+
+ fUseGenericTlsGetters = TRUE;
+ }
+#endif
+
+#ifndef FEATURE_IMPLICIT_TLS
+ // Now, we setup GetThread and GetAppDomain to point to their optimized or generic versions. Irrespective
+ // of the version they call into, we write opcode sequence into the dummy GetThread/GetAppDomain
+ // implementations (living in jithelp.s/.asm) via the MakeOptimizedTlsGetter calls below.
+ //
+ // For this to work, we must ensure that the dummy versions lie between the JIT_PatchedCodeStart
+ // and JIT_PatchedCodeLast code range (which lies in the .text section) so that when we change the protection
+ // above, we do so for GetThread and GetAppDomain as well.
+
+ //---------------------------------------------------------------------------
+ // INITIALIZE GetThread
+ //---------------------------------------------------------------------------
+
+ // No backout necessary - part of the one time global initialization
+ gThreadTLSIndex = UnsafeTlsAlloc();
+ if (gThreadTLSIndex == TLS_OUT_OF_INDEXES)
+ COMPlusThrowWin32();
+
+ MakeOptimizedTlsGetter(gThreadTLSIndex, (PVOID)GetThread, TLS_GETTER_MAX_SIZE, (POPTIMIZEDTLSGETTER)GetThreadGeneric, fUseGenericTlsGetters);
+
+ //---------------------------------------------------------------------------
+ // INITIALIZE GetAppDomain
+ //---------------------------------------------------------------------------
+
+ // No backout necessary - part of the one time global initialization
+ gAppDomainTLSIndex = UnsafeTlsAlloc();
+ if (gAppDomainTLSIndex == TLS_OUT_OF_INDEXES)
+ COMPlusThrowWin32();
+
+ MakeOptimizedTlsGetter(gAppDomainTLSIndex, (PVOID)GetAppDomain, TLS_GETTER_MAX_SIZE, (POPTIMIZEDTLSGETTER)GetAppDomainGeneric, fUseGenericTlsGetters);
+
+ //---------------------------------------------------------------------------
+ // Switch general purpose TLS getter to more efficient one if possible
+ //---------------------------------------------------------------------------
+
+ // Make sure that the TLS index is allocated
+ CExecutionEngine::CheckThreadState(0, FALSE);
+
+ DWORD masterSlotIndex = CExecutionEngine::GetTlsIndex();
+ POPTIMIZEDTLSGETTER pGetter = MakeOptimizedTlsGetter(masterSlotIndex, (PVOID)ClrFlsGetBlock, TLS_GETTER_MAX_SIZE);
+ __ClrFlsGetBlock = pGetter ? pGetter : ClrFlsGetBlockDirect;
+#else
+ __ClrFlsGetBlock = (POPTIMIZEDTLSGETTER) CExecutionEngine::GetTlsData;
+#endif // FEATURE_IMPLICIT_TLS
+
+ IfFailThrow(Thread::CLRSetThreadStackGuarantee(Thread::STSGuarantee_Force));
+
+ ThreadStore::InitThreadStore();
+
+ // NOTE: CRST_UNSAFE_ANYMODE prevents a GC mode switch when entering this crst.
+ // If you remove this flag, we will switch to preemptive mode when entering
+ // g_DeadlockAwareCrst, which means all functions that enter it will become
+ // GC_TRIGGERS. (This includes all uses of CrstHolder.) So be sure
+ // to update the contracts if you remove this flag.
+ g_DeadlockAwareCrst.Init(CrstDeadlockDetection, CRST_UNSAFE_ANYMODE);
+
+#ifdef _DEBUG
+ // Randomize OBJREF_HASH to handle hash collision.
+ Thread::OBJREF_HASH = OBJREF_TABSIZE - (DbgGetEXETimeStamp()%10);
+#endif // _DEBUG
+}
+
+
+//************************************************************************
+// Thread members
+//************************************************************************
+
+
+#if defined(_DEBUG) && defined(TRACK_SYNC)
+
+// One outstanding synchronization held by this thread:
+struct Dbg_TrackSyncEntry
+{
+ UINT_PTR m_caller;
+ AwareLock *m_pAwareLock;
+
+ BOOL Equiv (UINT_PTR caller, void *pAwareLock)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_caller == caller) && (m_pAwareLock == pAwareLock);
+ }
+
+ BOOL Equiv (void *pAwareLock)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_pAwareLock == pAwareLock);
+ }
+};
+
+// Each thread has a stack that tracks all enter and leave requests
+struct Dbg_TrackSyncStack : public Dbg_TrackSync
+{
+ enum
+ {
+ MAX_TRACK_SYNC = 20, // adjust stack depth as necessary
+ };
+
+ void EnterSync (UINT_PTR caller, void *pAwareLock);
+ void LeaveSync (UINT_PTR caller, void *pAwareLock);
+
+ Dbg_TrackSyncEntry m_Stack [MAX_TRACK_SYNC];
+ UINT_PTR m_StackPointer;
+ BOOL m_Active;
+
+ Dbg_TrackSyncStack() : m_StackPointer(0),
+ m_Active(TRUE)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+};
+
+// ensure that registers are preserved across this call
+#ifdef _MSC_VER
+#pragma optimize("", off)
+#endif
+// A pain to do all this from ASM, but watch out for trashed registers
+EXTERN_C void EnterSyncHelper (UINT_PTR caller, void *pAwareLock)
+{
+ BEGIN_ENTRYPOINT_THROWS;
+ WRAPPER_NO_CONTRACT;
+ GetThread()->m_pTrackSync->EnterSync(caller, pAwareLock);
+ END_ENTRYPOINT_THROWS;
+
+}
+EXTERN_C void LeaveSyncHelper (UINT_PTR caller, void *pAwareLock)
+{
+ BEGIN_ENTRYPOINT_THROWS;
+ WRAPPER_NO_CONTRACT;
+ GetThread()->m_pTrackSync->LeaveSync(caller, pAwareLock);
+ END_ENTRYPOINT_THROWS;
+
+}
+#ifdef _MSC_VER
+#pragma optimize("", on)
+#endif
+
+void Dbg_TrackSyncStack::EnterSync(UINT_PTR caller, void *pAwareLock)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ STRESS_LOG4(LF_SYNC, LL_INFO100, "Dbg_TrackSyncStack::EnterSync, IP=%p, Recursion=%d, MonitorHeld=%d, HoldingThread=%p.\n",
+ caller,
+ ((AwareLock*)pAwareLock)->m_Recursion,
+ ((AwareLock*)pAwareLock)->m_MonitorHeld,
+ ((AwareLock*)pAwareLock)->m_HoldingThread );
+
+ if (m_Active)
+ {
+ if (m_StackPointer >= MAX_TRACK_SYNC)
+ {
+ _ASSERTE(!"Overflowed synchronization stack checking. Disabling");
+ m_Active = FALSE;
+ return;
+ }
+ }
+ m_Stack[m_StackPointer].m_caller = caller;
+ m_Stack[m_StackPointer].m_pAwareLock = (AwareLock *) pAwareLock;
+
+ m_StackPointer++;
+
+}
+
+void Dbg_TrackSyncStack::LeaveSync(UINT_PTR caller, void *pAwareLock)
+{
+ WRAPPER_NO_CONTRACT;
+
+ STRESS_LOG4(LF_SYNC, LL_INFO100, "Dbg_TrackSyncStack::LeaveSync, IP=%p, Recursion=%d, MonitorHeld=%d, HoldingThread=%p.\n",
+ caller,
+ ((AwareLock*)pAwareLock)->m_Recursion,
+ ((AwareLock*)pAwareLock)->m_MonitorHeld,
+ ((AwareLock*)pAwareLock)->m_HoldingThread );
+
+ if (m_Active)
+ {
+ if (m_StackPointer == 0)
+ _ASSERTE(!"Underflow in leaving synchronization");
+ else
+ if (m_Stack[m_StackPointer - 1].Equiv(pAwareLock))
+ {
+ m_StackPointer--;
+ }
+ else
+ {
+ for (int i=m_StackPointer - 2; i>=0; i--)
+ {
+ if (m_Stack[i].Equiv(pAwareLock))
+ {
+ _ASSERTE(!"Locks are released out of order. This might be okay...");
+ memcpy(&m_Stack[i], &m_Stack[i+1],
+ sizeof(m_Stack[0]) * (m_StackPointer - i - 1));
+
+ return;
+ }
+ }
+ _ASSERTE(!"Trying to release a synchronization lock which isn't held");
+ }
+ }
+}
+
+#endif // TRACK_SYNC
+
+
+static DWORD dwHashCodeSeed = 123456789;
+
+#ifdef _DEBUG
+void CheckADValidity(AppDomain* pDomain, DWORD ADValidityKind)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ FORBID_FAULT;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ //
+ // Note: this apparently checks if any one of the supplied conditions is satisified, rather
+ // than checking that *all* of them are satisfied. One would have expected it to assert all of the
+ // conditions but it does not.
+ //
+
+ CONTRACT_VIOLATION(FaultViolation);
+ if (::GetAppDomain()==pDomain)
+ return;
+ if ((ADValidityKind & ADV_DEFAULTAD) &&
+ pDomain->IsDefaultDomain())
+ return;
+ if ((ADValidityKind & ADV_ITERATOR) &&
+ pDomain->IsHeldByIterator())
+ return;
+ if ((ADValidityKind & ADV_CREATING) &&
+ pDomain->IsBeingCreated())
+ return;
+ if ((ADValidityKind & ADV_COMPILATION) &&
+ pDomain->IsCompilationDomain())
+ return;
+ if ((ADValidityKind & ADV_FINALIZER) &&
+ IsFinalizerThread())
+ return;
+ if ((ADValidityKind & ADV_ADUTHREAD) &&
+ IsADUnloadHelperThread())
+ return;
+ if ((ADValidityKind & ADV_RUNNINGIN) &&
+ pDomain->IsRunningIn(GetThread()))
+ return;
+ if ((ADValidityKind & ADV_REFTAKER) &&
+ pDomain->IsHeldByRefTaker())
+ return;
+
+ _ASSERTE(!"Appdomain* can be invalid");
+}
+#endif
+
+
+//--------------------------------------------------------------------
+// Thread construction
+//--------------------------------------------------------------------
+Thread::Thread()
+{
+ CONTRACTL {
+ THROWS;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+ m_pFrame = FRAME_TOP;
+ m_pUnloadBoundaryFrame = NULL;
+
+ m_fPreemptiveGCDisabled = 0;
+
+#ifdef _DEBUG
+ m_ulForbidTypeLoad = 0;
+ m_GCOnTransitionsOK = TRUE;
+#endif
+
+#ifdef ENABLE_CONTRACTS
+ m_pClrDebugState = NULL;
+ m_ulEnablePreemptiveGCCount = 0;
+#endif
+
+ // Initialize data members related to thread statics
+ m_pTLBTable = NULL;
+ m_TLBTableSize = 0;
+ m_pThreadLocalBlock = NULL;
+
+ m_dwLockCount = 0;
+ m_dwBeginLockCount = 0;
+ m_dwBeginCriticalRegionCount = 0;
+ m_dwCriticalRegionCount = 0;
+ m_dwThreadAffinityCount = 0;
+
+#ifdef _DEBUG
+ dbg_m_cSuspendedThreads = 0;
+ dbg_m_cSuspendedThreadsWithoutOSLock = 0;
+ m_Creater.ResetThreadId();
+ m_dwUnbreakableLockCount = 0;
+#endif
+
+ m_dwForbidSuspendThread = 0;
+
+ // Initialize lock state
+ m_pHead = &m_embeddedEntry;
+ m_embeddedEntry.pNext = m_pHead;
+ m_embeddedEntry.pPrev = m_pHead;
+ m_embeddedEntry.dwLLockID = 0;
+ m_embeddedEntry.dwULockID = 0;
+ m_embeddedEntry.wReaderLevel = 0;
+
+ m_pBlockingLock = NULL;
+
+ m_alloc_context.init();
+ m_thAllocContextObj = 0;
+
+ m_UserInterrupt = 0;
+ m_WaitEventLink.m_Next = NULL;
+ m_WaitEventLink.m_LinkSB.m_pNext = NULL;
+ m_ThreadHandle = INVALID_HANDLE_VALUE;
+ m_ThreadHandleForClose = INVALID_HANDLE_VALUE;
+ m_ThreadHandleForResume = INVALID_HANDLE_VALUE;
+ m_WeOwnThreadHandle = FALSE;
+
+#ifdef _DEBUG
+ m_ThreadId = UNINITIALIZED_THREADID;
+#endif //_DEBUG
+
+ // Initialize this variable to a very different start value for each thread
+ // Using linear congruential generator from Knuth Vol. 2, p. 102, line 24
+ dwHashCodeSeed = dwHashCodeSeed * 1566083941 + 1;
+ m_dwHashCodeSeed = dwHashCodeSeed;
+
+ m_hijackLock = FALSE;
+
+ m_OSThreadId = 0;
+ m_Priority = INVALID_THREAD_PRIORITY;
+ m_ExternalRefCount = 1;
+ m_UnmanagedRefCount = 0;
+ m_State = TS_Unstarted;
+ m_StateNC = TSNC_Unknown;
+
+ // It can't be a LongWeakHandle because we zero stuff out of the exposed
+ // object as it is finalized. At that point, calls to GetCurrentThread()
+ // had better get a new one,!
+ m_ExposedObject = CreateGlobalShortWeakHandle(NULL);
+
+ GlobalShortWeakHandleHolder exposedObjectHolder(m_ExposedObject);
+
+ m_StrongHndToExposedObject = CreateGlobalStrongHandle(NULL);
+ GlobalStrongHandleHolder strongHndToExposedObjectHolder(m_StrongHndToExposedObject);
+
+ m_LastThrownObjectHandle = NULL;
+ m_ltoIsUnhandled = FALSE;
+ #if HAS_TRACK_CXX_EXCEPTION_CODE_HACK // Is C++ exception code tracking turned on?vs
+ m_LastCxxSEHExceptionCode = 0;
+ #endif // HAS_TRACK_CXX_EXCEPTION_CODE_HACK
+
+
+ m_AbortReason = NULL;
+
+ m_debuggerFilterContext = NULL;
+ m_debuggerCantStop = 0;
+ m_debuggerWord = NULL;
+ m_fInteropDebuggingHijacked = FALSE;
+ m_profilerCallbackState = 0;
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+ m_dwProfilerEvacuationCounter = 0;
+#endif // FEATURE_PROFAPI_ATTACH_DETACH
+
+ m_pProfilerFilterContext = NULL;
+
+ m_CacheStackBase = 0;
+ m_CacheStackLimit = 0;
+ m_CacheStackSufficientExecutionLimit = 0;
+
+ m_LastAllowableStackAddress= 0;
+ m_ProbeLimit = 0;
+
+#ifdef _DEBUG
+ m_pCleanedStackBase = NULL;
+#endif
+
+#ifdef STACK_GUARDS_DEBUG
+ m_pCurrentStackGuard = NULL;
+#endif
+
+#ifdef FEATURE_HIJACK
+ m_ppvHJRetAddrPtr = (VOID**) 0xCCCCCCCCCCCCCCCC;
+ m_pvHJRetAddr = (VOID*) 0xCCCCCCCCCCCCCCCC;
+ X86_ONLY(m_LastRedirectIP = 0);
+ X86_ONLY(m_SpinCount = 0);
+#endif // FEATURE_HIJACK
+
+#if defined(_DEBUG) && defined(TRACK_SYNC)
+ m_pTrackSync = new Dbg_TrackSyncStack;
+ NewHolder<Dbg_TrackSyncStack> trackSyncHolder(static_cast<Dbg_TrackSyncStack*>(m_pTrackSync));
+#endif // TRACK_SYNC
+
+ m_RequestedStackSize = 0;
+ m_PreventAsync = 0;
+ m_PreventAbort = 0;
+ m_nNestedMarshalingExceptions = 0;
+ m_pDomain = NULL;
+#ifdef FEATURE_COMINTEROP
+ m_fDisableComObjectEagerCleanup = false;
+#endif //FEATURE_COMINTEROP
+ m_Context = NULL;
+ m_TraceCallCount = 0;
+ m_ThrewControlForThread = 0;
+ m_OSContext = NULL;
+ m_ThreadTasks = (ThreadTasks)0;
+ m_pLoadLimiter= NULL;
+ m_pLoadingFile = NULL;
+
+ // The state and the tasks must be 32-bit aligned for atomicity to be guaranteed.
+ _ASSERTE((((size_t) &m_State) & 3) == 0);
+ _ASSERTE((((size_t) &m_ThreadTasks) & 3) == 0);
+
+ // Track perf counter for the logical thread object.
+ COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cCurrentThreadsLogical++);
+
+ // On all callbacks, call the trap code, which we now have
+ // wired to cause a GC. Thus we will do a GC on all Transition Frame Transitions (and more).
+ if (GCStress<cfg_transition>::IsEnabled())
+ {
+ m_State = (ThreadState) (m_State | TS_GCOnTransitions);
+ }
+
+ //m_pSharedStaticData = NULL;
+ //m_pUnsharedStaticData = NULL;
+ //m_pStaticDataHash = NULL;
+ //m_pSDHCrst = NULL;
+
+ m_fSecurityStackwalk = FALSE;
+
+ m_AbortType = EEPolicy::TA_None;
+ m_AbortInfo = 0;
+ m_AbortEndTime = MAXULONGLONG;
+ m_RudeAbortEndTime = MAXULONGLONG;
+ m_AbortController = 0;
+ m_AbortRequestLock = 0;
+ m_fRudeAbortInitiated = FALSE;
+
+ m_pIOCompletionContext = NULL;
+
+#ifdef _DEBUG
+ m_fRudeAborted = FALSE;
+ m_dwAbortPoint = 0;
+#endif
+
+#ifdef STRESS_THREAD
+ m_stressThreadCount = -1;
+#endif
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ m_pHostTask = NULL;
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ m_pFiberData = NULL;
+
+ m_TaskId = INVALID_TASK_ID;
+ m_dwConnectionId = INVALID_CONNECTION_ID;
+
+#ifdef _DEBUG
+ DWORD_PTR *ttInfo = NULL;
+ size_t nBytes = MaxThreadRecord *
+ (sizeof(FiberSwitchInfo)-sizeof(size_t)+MaxStackDepth*sizeof(size_t));
+ if (CLRTaskHosted() || g_pConfig->SaveThreadInfo()) {
+ ttInfo = new DWORD_PTR[(nBytes/sizeof(DWORD_PTR))*ThreadTrackInfo_Max];
+ memset(ttInfo,0,nBytes*ThreadTrackInfo_Max);
+ }
+ for (DWORD i = 0; i < ThreadTrackInfo_Max; i ++)
+ {
+ m_FiberInfoIndex[i] = 0;
+ m_pFiberInfo[i] = (FiberSwitchInfo*)((DWORD_PTR)ttInfo + i*nBytes);
+ }
+ NewArrayHolder<DWORD_PTR> fiberInfoHolder(ttInfo);
+#endif
+
+ m_OSContext = new CONTEXT();
+ NewHolder<CONTEXT> contextHolder(m_OSContext);
+
+ if (CLRTaskHosted())
+ {
+ m_pSavedRedirectContext = new CONTEXT();
+ }
+ else
+ {
+ m_pSavedRedirectContext = NULL;
+ }
+ NewHolder<CONTEXT> savedRedirectContextHolder(m_pSavedRedirectContext);
+
+#ifdef FEATURE_COMINTEROP
+ m_pRCWStack = new RCWStackHeader();
+#endif
+
+ m_pCerPreparationState = NULL;
+#ifdef _DEBUG
+ m_bGCStressing = FALSE;
+ m_bUniqueStacking = FALSE;
+#endif
+
+ m_pPendingTypeLoad = NULL;
+
+#ifdef FEATURE_PREJIT
+ m_pIBCInfo = NULL;
+#endif
+
+ m_dwAVInRuntimeImplOkayCount = 0;
+
+#if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS) // GCCOVER
+ m_fPreemptiveGCDisabledForGCStress = false;
+#endif
+
+#ifdef _DEBUG
+ m_pHelperMethodFrameCallerList = (HelperMethodFrameCallerList*)-1;
+#endif
+
+ m_dwHostTaskRefCount = 0;
+
+ m_pExceptionDuringStartup = NULL;
+
+#ifdef HAVE_GCCOVER
+ m_pbDestCode = NULL;
+ m_pbSrcCode = NULL;
+#ifdef _TARGET_X86_
+ m_pLastAVAddress = NULL;
+#endif // _TARGET_X86_
+#endif // HAVE_GCCOVER
+
+ m_fCompletionPortDrained = FALSE;
+
+ m_WorkingOnThreadContext = NULL;
+ m_debuggerActivePatchSkipper = NULL;
+ m_dwThreadHandleBeingUsed = 0;
+ SetProfilerCallbacksAllowed(TRUE);
+
+ m_pCreatingThrowableForException = NULL;
+#ifdef _DEBUG
+ m_dwDisableAbortCheckCount = 0;
+#endif // _DEBUG
+
+#ifdef WIN64EXCEPTIONS
+ m_dwIndexClauseForCatch = 0;
+ m_sfEstablisherOfActualHandlerFrame.Clear();
+#endif // WIN64EXCEPTIONS
+
+ m_threadPoolCompletionCount = 0;
+
+ Thread *pThread = GetThread();
+ _ASSERTE(SystemDomain::System()->DefaultDomain()->GetDefaultContext());
+ InitContext();
+ _ASSERTE(m_Context);
+ if (pThread)
+ {
+ _ASSERTE(pThread->GetDomain() && pThread->GetDomain()->GetDefaultContext());
+ // Start off the new thread in the default context of
+ // the creating thread's appDomain. This could be changed by SetDelegate
+ SetKickOffDomainId(pThread->GetDomain()->GetId());
+ } else
+ SetKickOffDomainId((ADID)DefaultADID);
+
+ // Do not expose thread until it is fully constructed
+ g_pThinLockThreadIdDispenser->NewId(this, this->m_ThreadId);
+
+ //
+ // DO NOT ADD ADDITIONAL CONSTRUCTION AFTER THIS POINT.
+ // NewId() allows this Thread instance to be accessed via a Thread Id. Do not
+ // add additional construction after this point to prevent the race condition
+ // of accessing a partially constructed Thread via Thread Id lookup.
+ //
+
+ exposedObjectHolder.SuppressRelease();
+ strongHndToExposedObjectHolder.SuppressRelease();
+#if defined(_DEBUG) && defined(TRACK_SYNC)
+ trackSyncHolder.SuppressRelease();
+#endif
+#ifdef _DEBUG
+ fiberInfoHolder.SuppressRelease();
+#endif
+ contextHolder.SuppressRelease();
+ savedRedirectContextHolder.SuppressRelease();
+
+#ifndef FEATURE_LEAK_CULTURE_INFO
+ managedThreadCurrentCulture = NULL;
+ managedThreadCurrentUICulture = NULL;
+#endif // FEATURE_LEAK_CULTURE_INFO
+
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+ m_ullProcessorUsageBaseline = 0;
+#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
+
+#ifdef FEATURE_COMINTEROP
+ m_uliInitializeSpyCookie.QuadPart = 0ul;
+ m_fInitializeSpyRegistered = false;
+ m_pLastSTACtxCookie = NULL;
+#endif // FEATURE_COMINTEROP
+
+ m_fGCSpecial = FALSE;
+
+#if !defined(FEATURE_CORECLR)
+ m_wCPUGroup = 0;
+ m_pAffinityMask = 0;
+#endif
+
+ m_pAllLoggedTypes = NULL;
+}
+
+
+//--------------------------------------------------------------------
+// Failable initialization occurs here.
+//--------------------------------------------------------------------
+BOOL Thread::InitThread(BOOL fInternal)
+{
+ CONTRACTL {
+ THROWS;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostTaskManager *provider = CorHost2::GetHostTaskManager();
+ if (provider) {
+ if (m_pHostTask == NULL)
+ {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ provider->GetCurrentTask(&m_pHostTask);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+ // workaround wwl: finalizer thread is not created by SQL
+ if (m_pHostTask == NULL && !fInternal) {
+ ThrowHR(HOST_E_INVALIDOPERATION);
+ }
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ HANDLE hDup = INVALID_HANDLE_VALUE;
+ BOOL ret = TRUE;
+
+ // This message actually serves a purpose (which is why it is always run)
+ // The Stress log is run during hijacking, when other threads can be suspended
+ // at arbitrary locations (including when holding a lock that NT uses to serialize
+ // all memory allocations). By sending a message now, we insure that the stress
+ // log will not allocate memory at these critical times an avoid deadlock.
+ STRESS_LOG2(LF_ALWAYS, LL_ALWAYS, "SetupThread managed Thread %p Thread Id = %x\n", this, GetThreadId());
+
+ if ((m_State & TS_WeOwn) == 0)
+ {
+ COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cRecognizedThreads++);
+ }
+ else
+ {
+ COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cCurrentThreadsPhysical++);
+ }
+
+#ifndef FEATURE_PAL
+ // workaround: Remove this when we flow impersonation token to host.
+ ThreadAffinityHolder affinityHolder(FALSE);
+ BOOL reverted = FALSE;
+ HANDLE threadToken = INVALID_HANDLE_VALUE;
+#endif // !FEATURE_PAL
+
+ if (m_ThreadHandle == INVALID_HANDLE_VALUE)
+ {
+ // For WinCE, all clients have the same handle for a thread. Duplication is
+ // not possible. We make sure we never close this handle unless we created
+ // the thread (TS_WeOwn).
+ //
+ // For Win32, each client has its own handle. This is achieved by duplicating
+ // the pseudo-handle from ::GetCurrentThread(). Unlike WinCE, this service
+ // returns a pseudo-handle which is only useful for duplication. In this case
+ // each client is responsible for closing its own (duplicated) handle.
+ //
+ // We don't bother duplicating if WeOwn, because we created the handle in the
+ // first place.
+ // Thread is created when or after the physical thread started running
+ HANDLE curProcess = ::GetCurrentProcess();
+
+#ifndef FEATURE_PAL
+
+ // If we're impersonating on NT, then DuplicateHandle(GetCurrentThread()) is going to give us a handle with only
+ // THREAD_TERMINATE, THREAD_QUERY_INFORMATION, and THREAD_SET_INFORMATION. This doesn't include
+ // THREAD_SUSPEND_RESUME nor THREAD_GET_CONTEXT. We need to be able to suspend the thread, and we need to be
+ // able to get its context. Therefore, if we're impersonating, we revert to self, dup the handle, then
+ // re-impersonate before we leave this routine.
+ if (!RevertIfImpersonated(&reverted, &threadToken, &affinityHolder))
+ {
+ COMPlusThrowWin32();
+ }
+
+ class EnsureResetThreadToken
+ {
+ private:
+ BOOL m_NeedReset;
+ HANDLE m_threadToken;
+ public:
+ EnsureResetThreadToken(HANDLE threadToken, BOOL reverted)
+ {
+ m_threadToken = threadToken;
+ m_NeedReset = reverted;
+ }
+ ~EnsureResetThreadToken()
+ {
+ UndoRevert(m_NeedReset, m_threadToken);
+ if (m_threadToken != INVALID_HANDLE_VALUE)
+ {
+ CloseHandle(m_threadToken);
+ }
+ }
+ };
+
+ EnsureResetThreadToken resetToken(threadToken, reverted);
+
+#endif // !FEATURE_PAL
+
+ if (::DuplicateHandle(curProcess, ::GetCurrentThread(), curProcess, &hDup,
+ 0 /*ignored*/, FALSE /*inherit*/, DUPLICATE_SAME_ACCESS))
+ {
+ _ASSERTE(hDup != INVALID_HANDLE_VALUE);
+
+ SetThreadHandle(hDup);
+ m_WeOwnThreadHandle = TRUE;
+ }
+ else
+ {
+ COMPlusThrowWin32();
+ }
+ }
+
+ if ((m_State & TS_WeOwn) == 0)
+ {
+ if (!AllocHandles())
+ {
+ ThrowOutOfMemory();
+ }
+ }
+
+ _ASSERTE(HasValidThreadHandle());
+
+ m_random.Init();
+
+ // Set floating point mode to round to nearest
+#ifndef FEATURE_PAL
+#ifndef _TARGET_ARM64_
+ //ARM64TODO: remove the ifdef
+ (void) _controlfp_s( NULL, _RC_NEAR, _RC_CHOP|_RC_UP|_RC_DOWN|_RC_NEAR );
+#endif
+
+ m_pTEB = (struct _NT_TIB*)NtCurrentTeb();
+
+#endif // !FEATURE_PAL
+
+ if (m_CacheStackBase == 0)
+ {
+ _ASSERTE(m_CacheStackLimit == 0);
+ _ASSERTE(m_LastAllowableStackAddress == 0);
+ _ASSERTE(m_ProbeLimit == 0);
+ ret = SetStackLimits(fAll);
+ if (ret == FALSE)
+ {
+ ThrowOutOfMemory();
+ }
+
+ // We commit the thread's entire stack when it enters the runtime to allow us to be reliable in low me
+ // situtations. See the comments in front of Thread::CommitThreadStack() for mor information.
+ ret = Thread::CommitThreadStack(this);
+ if (ret == FALSE)
+ {
+ ThrowOutOfMemory();
+ }
+ }
+
+ ret = Thread::AllocateIOCompletionContext();
+ if (!ret)
+ {
+ ThrowOutOfMemory();
+ }
+
+ _ASSERTE(ret); // every failure case for ret should throw.
+ return ret;
+}
+
+// Allocate all the handles. When we are kicking of a new thread, we can call
+// here before the thread starts running.
+BOOL Thread::AllocHandles()
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(!m_SafeEvent.IsValid());
+ _ASSERTE(!m_UserSuspendEvent.IsValid());
+ _ASSERTE(!m_DebugSuspendEvent.IsValid());
+ _ASSERTE(!m_EventWait.IsValid());
+
+ BOOL fOK = TRUE;
+ EX_TRY {
+ // create a manual reset event for getting the thread to a safe point
+ m_SafeEvent.CreateManualEvent(FALSE);
+ m_UserSuspendEvent.CreateManualEvent(FALSE);
+ m_DebugSuspendEvent.CreateManualEvent(FALSE);
+ m_EventWait.CreateManualEvent(TRUE);
+ }
+ EX_CATCH {
+ fOK = FALSE;
+ if (!m_SafeEvent.IsValid()) {
+ m_SafeEvent.CloseEvent();
+ }
+
+ if (!m_UserSuspendEvent.IsValid()) {
+ m_UserSuspendEvent.CloseEvent();
+ }
+
+ if (!m_DebugSuspendEvent.IsValid()) {
+ m_DebugSuspendEvent.CloseEvent();
+ }
+
+ if (!m_EventWait.IsValid()) {
+ m_EventWait.CloseEvent();
+ }
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ return fOK;
+}
+
+
+//--------------------------------------------------------------------
+// This is the alternate path to SetupThread/InitThread. If we created
+// an unstarted thread, we have SetupUnstartedThread/HasStarted.
+//--------------------------------------------------------------------
+BOOL Thread::HasStarted(BOOL bRequiresTSL)
+{
+ CONTRACTL {
+ NOTHROW;
+ DISABLED(GC_NOTRIGGER);
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // @todo need a probe that tolerates not having a thread setup at all
+ CONTRACT_VIOLATION(SOToleranceViolation);
+
+ _ASSERTE(!m_fPreemptiveGCDisabled); // can't use PreemptiveGCDisabled() here
+
+ // This is cheating a little. There is a pathway here from SetupThread, but only
+ // via IJW SystemDomain::RunDllMain. Normally SetupThread returns a thread in
+ // preemptive mode, ready for a transition. But in the IJW case, it can return a
+ // cooperative mode thread. RunDllMain handles this "surprise" correctly.
+ m_fPreemptiveGCDisabled = TRUE;
+
+ // Normally, HasStarted is called from the thread's entrypoint to introduce it to
+ // the runtime. But sometimes that thread is used for DLL_THREAD_ATTACH notifications
+ // that call into managed code. In that case, the second HasStarted call is
+ // redundant and should be ignored.
+ if (GetThread() == this)
+ return TRUE;
+
+
+ _ASSERTE(GetThread() == 0);
+ _ASSERTE(HasValidThreadHandle());
+
+ BOOL fKeepTLS = FALSE;
+ BOOL fCanCleanupCOMState = FALSE;
+ BOOL res = TRUE;
+
+ res = SetStackLimits(fAll);
+ if (res == FALSE)
+ {
+ m_pExceptionDuringStartup = Exception::GetOOMException();
+ goto FAILURE;
+ }
+
+ // We commit the thread's entire stack when it enters the runtime to allow us to be reliable in low memory
+ // situtations. See the comments in front of Thread::CommitThreadStack() for mor information.
+ res = Thread::CommitThreadStack(this);
+ if (res == FALSE)
+ {
+ m_pExceptionDuringStartup = Exception::GetOOMException();
+ goto FAILURE;
+ }
+
+ // If any exception happens during HasStarted, we will cache the exception in Thread::m_pExceptionDuringStartup
+ // which will be thrown in Thread.Start as an internal exception
+ EX_TRY
+ {
+ //
+ // Initialization must happen in the following order - hosts like SQL Server depend on this.
+ //
+ CExecutionEngine::SetupTLSForThread(this);
+
+ fCanCleanupCOMState = TRUE;
+ res = PrepareApartmentAndContext();
+ if (!res)
+ {
+ ThrowOutOfMemory();
+ }
+
+ InitThread(FALSE);
+
+ if (SetThread(this) == FALSE)
+ {
+ ThrowOutOfMemory();
+ }
+
+ if (SetAppDomain(m_pDomain) == FALSE)
+ {
+ ThrowOutOfMemory();
+ }
+
+#ifdef _DEBUG
+ AddFiberInfo(Thread::ThreadTrackInfo_Lifetime);
+#endif
+
+ SetupThreadForHost();
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (m_pHostTask)
+ {
+ // If we have notify host of ICLRTask, host will call code:ExitTask to release
+ // its reference to ICLRTask. Also host may call SwitchOut and SwitchIn.
+ // ExitTask needs Thread in TLS.
+ fKeepTLS = TRUE;
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ ThreadStore::TransferStartedThread(this, bRequiresTSL);
+
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+ if (g_fEnableARM)
+ {
+ QueryThreadProcessorUsage();
+ }
+#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
+#ifdef FEATURE_EVENT_TRACE
+ ETW::ThreadLog::FireThreadCreated(this);
+#endif // FEATURE_EVENT_TRACE
+ }
+ EX_CATCH
+ {
+ if (__pException != NULL)
+ {
+ __pException.SuppressRelease();
+ m_pExceptionDuringStartup = __pException;
+ }
+ res = FALSE;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+FAILURE:
+ if (res == FALSE)
+ {
+ if (m_fPreemptiveGCDisabled)
+ {
+ m_fPreemptiveGCDisabled = FALSE;
+ }
+ _ASSERTE (HasThreadState(TS_Unstarted));
+
+ SetThreadState(TS_FailStarted);
+
+ if (GetThread() != NULL && IsAbortRequested())
+ UnmarkThreadForAbort(TAR_ALL);
+
+ if (!fKeepTLS)
+ {
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+ //
+ // Undo our call to PrepareApartmentAndContext above, so we don't leak a CoInitialize
+ // If we're keeping TLS, then the host's call to ExitTask will clean this up instead.
+ //
+ if (fCanCleanupCOMState)
+ {
+ // The thread pointer in TLS may not be set yet, if we had a failure before we set it.
+ // So we'll set it up here (we'll unset it a few lines down).
+ if (SetThread(this) != FALSE)
+ {
+ CleanupCOMState();
+ }
+ }
+#endif
+ FastInterlockDecrement(&ThreadStore::s_pThreadStore->m_PendingThreadCount);
+ // One of the components of OtherThreadsComplete() has changed, so check whether
+ // we should now exit the EE.
+ ThreadStore::CheckForEEShutdown();
+ DecExternalCount(/*holdingLock*/ !bRequiresTSL);
+ SetThread(NULL);
+ SetAppDomain(NULL);
+ }
+ }
+ else
+ {
+ FastInterlockOr((ULONG *) &m_State, TS_FullyInitialized);
+
+#ifdef DEBUGGING_SUPPORTED
+ //
+ // If we're debugging, let the debugger know that this
+ // thread is up and running now.
+ //
+ if (CORDebuggerAttached())
+ {
+ g_pDebugInterface->ThreadCreated(this);
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO10000, "ThreadCreated() not called due to CORDebuggerAttached() being FALSE for thread 0x%x\n", GetThreadId()));
+ }
+
+#endif // DEBUGGING_SUPPORTED
+
+#ifdef PROFILING_SUPPORTED
+ // If a profiler is running, let them know about the new thread.
+ //
+ // The call to IsGCSpecial is crucial to avoid a deadlock. See code:Thread::m_fGCSpecial for more
+ // information
+ if (!IsGCSpecial())
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackThreads());
+ BOOL gcOnTransition = GC_ON_TRANSITIONS(FALSE); // disable GCStress 2 to avoid the profiler receiving a RuntimeThreadSuspended notification even before the ThreadCreated notification
+
+ {
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->ThreadCreated((ThreadID) this);
+ }
+
+ GC_ON_TRANSITIONS(gcOnTransition);
+
+ DWORD osThreadId = ::GetCurrentThreadId();
+ g_profControlBlock.pProfInterface->ThreadAssignedToOSThread(
+ (ThreadID) this, osThreadId);
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ // Is there a pending user suspension?
+ if (m_State & TS_SuspendUnstarted)
+ {
+ BOOL doSuspend = FALSE;
+
+ {
+ ThreadStoreLockHolder TSLockHolder;
+
+ // Perhaps we got resumed before it took effect?
+ if (m_State & TS_SuspendUnstarted)
+ {
+ FastInterlockAnd((ULONG *) &m_State, ~TS_SuspendUnstarted);
+ SetupForSuspension(TS_UserSuspendPending);
+ MarkForSuspension(TS_UserSuspendPending);
+ doSuspend = TRUE;
+ }
+ }
+
+ if (doSuspend)
+ {
+ GCX_PREEMP();
+ WaitSuspendEvents();
+ }
+ }
+ }
+
+ return res;
+}
+
+BOOL Thread::AllocateIOCompletionContext()
+{
+ WRAPPER_NO_CONTRACT;
+ PIOCompletionContext pIOC = new (nothrow) IOCompletionContext;
+
+ if(pIOC != NULL)
+ {
+ pIOC->lpOverlapped = NULL;
+ m_pIOCompletionContext = pIOC;
+ return TRUE;
+ }
+ else
+ {
+ return FALSE;
+ }
+}
+
+VOID Thread::FreeIOCompletionContext()
+{
+ WRAPPER_NO_CONTRACT;
+ if (m_pIOCompletionContext != NULL)
+ {
+ PIOCompletionContext pIOC = (PIOCompletionContext) m_pIOCompletionContext;
+ delete pIOC;
+ m_pIOCompletionContext = NULL;
+ }
+}
+
+void Thread::HandleThreadStartupFailure()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(GetThread() != NULL);
+
+ struct ProtectArgs
+ {
+ OBJECTREF pThrowable;
+ OBJECTREF pReason;
+ } args;
+ memset(&args, 0, sizeof(ProtectArgs));
+
+ GCPROTECT_BEGIN(args);
+
+ MethodTable *pMT = MscorlibBinder::GetException(kThreadStartException);
+ args.pThrowable = AllocateObject(pMT);
+
+ MethodDescCallSite exceptionCtor(METHOD__THREAD_START_EXCEPTION__EX_CTOR);
+
+ if (m_pExceptionDuringStartup)
+ {
+ args.pReason = CLRException::GetThrowableFromException(m_pExceptionDuringStartup);
+ Exception::Delete(m_pExceptionDuringStartup);
+ m_pExceptionDuringStartup = NULL;
+ }
+
+ ARG_SLOT args1[] = {
+ ObjToArgSlot(args.pThrowable),
+ ObjToArgSlot(args.pReason),
+ };
+ exceptionCtor.Call(args1);
+
+ GCPROTECT_END(); //Prot
+
+ RaiseTheExceptionInternalOnly(args.pThrowable, FALSE);
+}
+
+#ifndef FEATURE_PAL
+BOOL RevertIfImpersonated(BOOL *bReverted, HANDLE *phToken, ThreadAffinityHolder *pTAHolder)
+{
+ WRAPPER_NO_CONTRACT;
+
+ BOOL bImpersonated = OpenThreadToken(GetCurrentThread(), // we are assuming that if this call fails,
+ TOKEN_IMPERSONATE, // we are not impersonating. There is no win32
+ TRUE, // api to figure this out. The only alternative
+ phToken); // is to use NtCurrentTeb->IsImpersonating().
+ if (bImpersonated)
+ {
+ pTAHolder->Acquire();
+ *bReverted = RevertToSelf();
+ return *bReverted;
+
+ }
+ return TRUE;
+}
+
+void UndoRevert(BOOL bReverted, HANDLE hToken)
+{
+ if (bReverted)
+ {
+ if (!SetThreadToken(NULL, hToken))
+ {
+ _ASSERT("Undo Revert -> SetThreadToken failed");
+ STRESS_LOG1(LF_EH, LL_INFO100, "UndoRevert/SetThreadToken failed for hToken = %d\n",hToken);
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_SECURITY);
+ }
+ }
+ return;
+}
+#endif // !FEATURE_PAL
+
+
+// We don't want ::CreateThread() calls scattered throughout the source. So gather
+// them all here.
+
+BOOL Thread::CreateNewThread(SIZE_T stackSize, LPTHREAD_START_ROUTINE start, void *args)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+ BOOL bRet;
+
+ //This assert is here to prevent a bug in the future
+ // CreateTask currently takes a DWORD and we will downcast
+ // if that interface changes to take a SIZE_T this Assert needs to be removed.
+ //
+ _ASSERTE(stackSize <= 0xFFFFFFFF);
+
+#ifndef FEATURE_PAL
+ ThreadAffinityHolder affinityHolder(FALSE);
+ HandleHolder token;
+ BOOL bReverted = FALSE;
+ bRet = RevertIfImpersonated(&bReverted, &token, &affinityHolder);
+ if (bRet != TRUE)
+ return bRet;
+#endif // !FEATURE_PAL
+
+ m_StateNC = (ThreadStateNoConcurrency)((ULONG)m_StateNC | TSNC_CLRCreatedThread);
+ if (!CLRTaskHosted()) {
+ bRet = CreateNewOSThread(stackSize, start, args);
+ }
+ else {
+ bRet = CreateNewHostTask(stackSize, start, args);
+ }
+#ifndef FEATURE_PAL
+ UndoRevert(bReverted, token);
+#endif // !FEATURE_PAL
+
+ return bRet;
+}
+
+
+// This is to avoid the 64KB/1MB aliasing problem present on Pentium 4 processors,
+// which can significantly impact performance with HyperThreading enabled
+DWORD __stdcall Thread::intermediateThreadProc(PVOID arg)
+{
+ WRAPPER_NO_CONTRACT;
+
+ m_offset_counter++;
+ if (m_offset_counter * offset_multiplier > PAGE_SIZE)
+ m_offset_counter = 0;
+
+ (void)_alloca(m_offset_counter * offset_multiplier);
+
+ intermediateThreadParam* param = (intermediateThreadParam*)arg;
+
+ LPTHREAD_START_ROUTINE ThreadFcnPtr = param->lpThreadFunction;
+ PVOID args = param->lpArg;
+ delete param;
+
+ return ThreadFcnPtr(args);
+}
+
+HANDLE Thread::CreateUtilityThread(Thread::StackSizeBucket stackSizeBucket, LPTHREAD_START_ROUTINE start, void *args, DWORD flags, DWORD* pThreadId)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // TODO: we should always use small stacks for most of these threads. For CLR 4, we're being conservative
+ // here because this is a last-minute fix.
+
+ SIZE_T stackSize;
+
+ switch (stackSizeBucket)
+ {
+ case StackSize_Small:
+ stackSize = 256 * 1024;
+ break;
+
+ case StackSize_Medium:
+ stackSize = 512 * 1024;
+ break;
+
+ default:
+ _ASSERTE(!"Bad stack size bucket");
+ case StackSize_Large:
+ stackSize = 1024 * 1024;
+ break;
+ }
+
+ flags |= STACK_SIZE_PARAM_IS_A_RESERVATION;
+
+ DWORD threadId;
+ HANDLE hThread = CreateThread(NULL, stackSize, start, args, flags, &threadId);
+
+ if (pThreadId)
+ *pThreadId = threadId;
+
+ return hThread;
+}
+
+#ifndef FEATURE_CORECLR
+/*
+ The following are copied from MSDN:
+ http://msdn.microsoft.com/library/default.asp?url=/library/en-us/dllproc/base/thread_stack_size.asp
+
+ To change the initially committed stack space, use the dwStackSize parameter of the CreateThread,
+ CreateRemoteThread, or CreateFiber function. This value is rounded up to the nearest page.
+ Generally, the reserve size is the default reserve size specified in the executable header.
+ However, if the initially committed size specified by dwStackSize is larger than the default reserve size,
+ the reserve size is this new commit size rounded up to the nearest multiple of 1 MB.
+
+ To change the reserved stack size, set the dwCreationFlags parameter of CreateThread or CreateRemoteThread
+ to STACK_SIZE_PARAM_IS_A_RESERVATION and use the dwStackSize parameter. In this case, the initially
+ committed size is the default size specified in the executable header.
+
+*/
+BOOL Thread::CheckThreadStackSize(SIZE_T *pSizeToCommitOrReserve,
+ BOOL isSizeToReserve // When TRUE, the previous argument is the stack size to reserve.
+ // Otherwise, it is the size to commit.
+ )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ //g_SystemInfo is global pointer to SYSTEM_INFO struct
+ SIZE_T dwAllocSize = (SIZE_T)g_SystemInfo.dwAllocationGranularity;
+ SIZE_T dwPageSize = (SIZE_T)g_SystemInfo.dwPageSize;
+
+ //Don't want users creating threads
+ // with a stackSize request < 256K
+ //This value may change up or down as we see fit so don't doc to user
+ //
+ if(isSizeToReserve && 0x40000 > (*pSizeToCommitOrReserve))
+ {
+ *pSizeToCommitOrReserve = 0x40000;
+ }
+
+ *pSizeToCommitOrReserve = ALIGN(*pSizeToCommitOrReserve, dwAllocSize);
+
+ //
+ // Let's get the stack sizes from the PE file that started process.
+ //
+ SIZE_T ExeSizeOfStackReserve = 0;
+ SIZE_T ExeSizeOfStackCommit = 0;
+
+ if (!GetProcessDefaultStackSize(&ExeSizeOfStackReserve, &ExeSizeOfStackCommit))
+ return FALSE;
+
+ // Now let's decide which sizes OS are going to use.
+ SIZE_T sizeToReserve = 0;
+ SIZE_T sizeToCommit = 0;
+
+ if (isSizeToReserve) {
+ // The passed-in *pSizeToCommitOrReserve is the stack size to reserve.
+ sizeToReserve = *pSizeToCommitOrReserve;
+ // OS will use ExeSizeOfStackCommit as the commited size.
+ sizeToCommit = ExeSizeOfStackCommit;
+ }
+ else {
+ // The passed-in *pSizeToCommitOrReserve is the stack size to commit.
+ sizeToCommit = *pSizeToCommitOrReserve;
+ // OS will use ExeSizeOfStackReserve as the reserved size.
+ sizeToReserve = ExeSizeOfStackReserve;
+
+ // However, if the initially committed size specified by dwStackSize is larger than
+ // the default reserve size, the reserve size is this new commit size rounded up to
+ // the nearest multiple of 1 MB.
+ if (sizeToCommit > ExeSizeOfStackReserve) {
+ sizeToReserve = ALIGN(sizeToCommit, 0x1000000);
+ }
+
+ if (!g_pConfig->GetDisableCommitThreadStack())
+ {
+ // We will commit the full stack when a thread starts. But if the PageFile is full, we may hit
+ // stack overflow at random places during startup.
+ // Therefore if we are unlikely to obtain space from PageFile, we will fail creation of a thread.
+
+ *pSizeToCommitOrReserve = sizeToReserve - HARD_GUARD_REGION_SIZE;
+
+ // OS's behavior is not consistent on if guard page is marked when we ask OS to commit the stack
+ // up to 2nd to last page.
+ // On Win2k3, the 2nd to last page is marked with guard bit.
+ // On WinXP, the 2nd to last page is not marked with guard bit.
+ // To be safe, we will not commit the 2nd to last page.
+ *pSizeToCommitOrReserve -= HARD_GUARD_REGION_SIZE;
+ // To make it more interesting, on X64, if we request to commit stack except the last two pages,
+ // OS commit the whole stack, and mark the last two pages as guard page.
+ *pSizeToCommitOrReserve -= 2*HARD_GUARD_REGION_SIZE;
+ }
+ }
+
+ // Ok, we now know what sizes OS will use to create the thread.
+ // Check to see if we have the room for guard pages.
+ return ThreadWillCreateGuardPage(sizeToReserve, sizeToCommit);
+}
+#endif // FEATURE_CORECLR
+
+BOOL Thread::GetProcessDefaultStackSize(SIZE_T* reserveSize, SIZE_T* commitSize)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ //
+ // Let's get the stack sizes from the PE file that started process.
+ //
+ static SIZE_T ExeSizeOfStackReserve = 0;
+ static SIZE_T ExeSizeOfStackCommit = 0;
+
+ static BOOL fSizesGot = FALSE;
+
+#ifndef FEATURE_PAL
+ if (!fSizesGot)
+ {
+ HINSTANCE hInst = WszGetModuleHandle(NULL);
+ _ASSERTE(hInst); // WszGetModuleHandle should never fail on the module that started the process.
+ EX_TRY
+ {
+ PEDecoder pe(hInst);
+ pe.GetEXEStackSizes(&ExeSizeOfStackReserve, &ExeSizeOfStackCommit);
+ fSizesGot = TRUE;
+ }
+ EX_CATCH
+ {
+ fSizesGot = FALSE;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+#endif // !FEATURE_PAL
+
+ if (!fSizesGot) {
+ //return some somewhat-reasonable numbers
+ if (NULL != reserveSize) *reserveSize = 256*1024;
+ if (NULL != commitSize) *commitSize = 256*1024;
+ return FALSE;
+ }
+
+ if (NULL != reserveSize) *reserveSize = ExeSizeOfStackReserve;
+ if (NULL != commitSize) *commitSize = ExeSizeOfStackCommit;
+ return TRUE;
+}
+
+BOOL Thread::CreateNewOSThread(SIZE_T sizeToCommitOrReserve, LPTHREAD_START_ROUTINE start, void *args)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ DWORD ourId = 0;
+ HANDLE h = NULL;
+ DWORD dwCreationFlags = CREATE_SUSPENDED;
+
+#ifdef FEATURE_CORECLR
+ dwCreationFlags |= STACK_SIZE_PARAM_IS_A_RESERVATION;
+#else
+ if(sizeToCommitOrReserve != 0)
+ {
+ dwCreationFlags |= STACK_SIZE_PARAM_IS_A_RESERVATION;
+
+ //
+ // In this case we also force CommitThreadStack to commit the whole stack, even if we're configured not to do so.
+ // The config value is used to reduce the resource usage for default stack allocations; for non-default allocations,
+ // we assume the user has given us the correct size (and they're really going to need it). This way we don't
+ // need to offer a Thread constructor that takes a confusing "stack size param is a commit size" parameter.
+ //
+ SetThreadStateNC(TSNC_ForceStackCommit);
+ }
+
+ // Check that we will have (reserved and never committed) guard pages at the end of the stack.
+ // If this call returns false then it will lead to an OOM exception on return.
+ // This is reasonable since a large stack was requested and we couldn't get it.
+ if(!CheckThreadStackSize(&sizeToCommitOrReserve,
+ (sizeToCommitOrReserve != 0)))
+ {
+ return FALSE;
+ }
+#endif
+
+ intermediateThreadParam* lpThreadArgs = new (nothrow) intermediateThreadParam;
+ if (lpThreadArgs == NULL)
+ {
+ return FALSE;
+ }
+ NewHolder<intermediateThreadParam> argHolder(lpThreadArgs);
+
+ // Make sure we have all our handles, in case someone tries to suspend us
+ // as we are starting up.
+ if (!AllocHandles())
+ {
+ // OS is out of handles/memory?
+ return FALSE;
+ }
+
+ lpThreadArgs->lpThreadFunction = start;
+ lpThreadArgs->lpArg = args;
+
+ h = ::CreateThread(NULL /*=SECURITY_ATTRIBUTES*/,
+ sizeToCommitOrReserve,
+ intermediateThreadProc,
+ lpThreadArgs,
+ dwCreationFlags,
+ &ourId);
+
+ if (h == NULL)
+ return FALSE;
+
+ argHolder.SuppressRelease();
+
+ _ASSERTE(!m_fPreemptiveGCDisabled); // leave in preemptive until HasStarted.
+
+ SetThreadHandle(h);
+ m_WeOwnThreadHandle = TRUE;
+
+ // Before we do the resume, we need to take note of the new ThreadId. This
+ // is necessary because -- before the thread starts executing at KickofThread --
+ // it may perform some DllMain DLL_THREAD_ATTACH notifications. These could
+ // call into managed code. During the consequent SetupThread, we need to
+ // perform the Thread::HasStarted call instead of going through the normal
+ // 'new thread' pathway.
+ _ASSERTE(GetOSThreadId() == 0);
+ _ASSERTE(ourId != 0);
+
+ m_OSThreadId = ourId;
+
+ FastInterlockIncrement(&ThreadStore::s_pThreadStore->m_PendingThreadCount);
+
+#ifdef _DEBUG
+ m_Creater.SetThreadId();
+#endif
+
+ return TRUE;
+}
+
+
+
+BOOL Thread::CreateNewHostTask(SIZE_T stackSize, LPTHREAD_START_ROUTINE start, void *args)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ // Make sure we have all our handles, in case someone tries to suspend us
+ // as we are starting up.
+
+ if (!AllocHandles())
+ {
+ return FALSE;
+ }
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostTask *pHostTask = NULL;
+
+ if (CorHost2::GetHostTaskManager()) {
+ //If you change this value to pass a SIZE_T stackSize you must
+ // remove this _ASSERTE(stackSize <= 0xFFFFFFFF); from
+ // CreateNewThread
+ //
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = CorHost2::GetHostTaskManager()->CreateTask((DWORD)stackSize, start, args, &pHostTask);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (hr != S_OK)
+ return FALSE;
+ }
+
+ _ASSERTE(!m_fPreemptiveGCDisabled); // leave in preemptive until HasStarted.
+
+ // Before we do the resume, we need to take note of the new ThreadId. This
+ // is necessary because -- before the thread starts executing at KickofThread --
+ // it may perform some DllMain DLL_THREAD_ATTACH notifications. These could
+ // call into managed code. During the consequent SetupThread, we need to
+ // perform the Thread::HasStarted call instead of going through the normal
+ // 'new thread' pathway.
+ _ASSERTE(m_pHostTask == NULL);
+ _ASSERTE(pHostTask != NULL);
+
+ m_pHostTask = pHostTask;
+
+ FastInterlockIncrement(&ThreadStore::s_pThreadStore->m_PendingThreadCount);
+
+#ifdef _DEBUG
+ m_Creater.SetThreadId();
+#endif
+
+ return TRUE;
+#else // !FEATURE_INCLUDE_ALL_INTERFACES
+ return FALSE;
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+}
+
+//
+// #threadDestruction
+//
+// General comments on thread destruction.
+//
+// The C++ Thread object can survive beyond the time when the Win32 thread has died.
+// This is important if an exposed object has been created for this thread. The
+// exposed object will survive until it is GC'ed.
+//
+// A client like an exposed object can place an external reference count on that
+// object. We also place a reference count on it when we construct it, and we lose
+// that count when the thread finishes doing useful work (OnThreadTerminate).
+//
+// One way OnThreadTerminate() is called is when the thread finishes doing useful
+// work. This case always happens on the correct thread.
+//
+// The other way OnThreadTerminate() is called is during product shutdown. We do
+// a "best effort" to eliminate all threads except the Main thread before shutdown
+// happens. But there may be some background threads or external threads still
+// running.
+//
+// When the final reference count disappears, we destruct. Until then, the thread
+// remains in the ThreadStore, but is marked as "Dead".
+//<TODO>
+// @TODO cwb: for a typical shutdown, only background threads are still around.
+// Should we interrupt them? What about the non-typical shutdown?</TODO>
+
+int Thread::IncExternalCount()
+{
+ CONTRACTL {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+ Thread *pCurThread = GetThread();
+
+ _ASSERTE(m_ExternalRefCount > 0);
+ int retVal = FastInterlockIncrement((LONG*)&m_ExternalRefCount);
+ // If we have an exposed object and the refcount is greater than one
+ // we must make sure to keep a strong handle to the exposed object
+ // so that we keep it alive even if nobody has a reference to it.
+ if (pCurThread && ((*((void**)m_ExposedObject)) != NULL))
+ {
+ // The exposed object exists and needs a strong handle so check
+ // to see if it has one.
+ // Only a managed thread can setup StrongHnd.
+ if ((*((void**)m_StrongHndToExposedObject)) == NULL)
+ {
+ GCX_COOP();
+ // Store the object in the strong handle.
+ StoreObjectInHandle(m_StrongHndToExposedObject, ObjectFromHandle(m_ExposedObject));
+ }
+ }
+
+ return retVal;
+}
+
+int Thread::DecExternalCount(BOOL holdingLock)
+{
+ CONTRACTL {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+ // Note that it's possible to get here with a NULL current thread (during
+ // shutdown of the thread manager).
+ Thread *pCurThread = GetThread();
+ _ASSERTE (pCurThread == NULL || IsAtProcessExit()
+ || (!holdingLock && !ThreadStore::HoldingThreadStore(pCurThread))
+ || (holdingLock && ThreadStore::HoldingThreadStore(pCurThread)));
+
+ BOOL ToggleGC = FALSE;
+ BOOL SelfDelete = FALSE;
+
+ int retVal;
+
+ // Must synchronize count and exposed object handle manipulation. We use the
+ // thread lock for this, which implies that we must be in pre-emptive mode
+ // to begin with and avoid any activity that would invoke a GC (this
+ // acquires the thread store lock).
+ if (pCurThread)
+ {
+ // TODO: we would prefer to use a GC Holder here, however it is hard
+ // to get the case where we're deleting this thread correct given
+ // the current macros. We want to supress the release of the holder
+ // here which puts us in Preemptive mode, and also the switch to
+ // Cooperative mode below, but since both holders will be named
+ // the same thing (due to the generic nature of the macro) we can
+ // not use GCX_*_SUPRESS_RELEASE() for 2 holders in the same scope
+ // b/c they will both apply simply to the most narrowly scoped
+ // holder.
+
+ ToggleGC = pCurThread->PreemptiveGCDisabled();
+ if (ToggleGC)
+ {
+ pCurThread->EnablePreemptiveGC();
+ }
+ }
+
+ GCX_ASSERT_PREEMP();
+
+ ThreadStoreLockHolder tsLock(!holdingLock);
+
+ _ASSERTE(m_ExternalRefCount >= 1);
+ _ASSERTE(!holdingLock ||
+ ThreadStore::s_pThreadStore->m_Crst.GetEnterCount() > 0 ||
+ IsAtProcessExit());
+
+ retVal = FastInterlockDecrement((LONG*)&m_ExternalRefCount);
+
+ if (retVal == 0)
+ {
+ HANDLE h = GetThreadHandle();
+ if (h == INVALID_HANDLE_VALUE)
+ {
+ h = m_ThreadHandleForClose;
+ m_ThreadHandleForClose = INVALID_HANDLE_VALUE;
+ }
+ // Can not assert like this. We have already removed the Unstarted bit.
+ //_ASSERTE (IsUnstarted() || h != INVALID_HANDLE_VALUE);
+ if (h != INVALID_HANDLE_VALUE && m_WeOwnThreadHandle)
+ {
+ ::CloseHandle(h);
+ SetThreadHandle(INVALID_HANDLE_VALUE);
+ }
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (m_pHostTask) {
+ ReleaseHostTask();
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ // Switch back to cooperative mode to manipulate the thread.
+ if (pCurThread)
+ {
+ // TODO: we would prefer to use GCX_COOP here, see comment above.
+ pCurThread->DisablePreemptiveGC();
+ }
+
+ GCX_ASSERT_COOP();
+
+ // during process detach the thread might still be in the thread list
+ // if it hasn't seen its DLL_THREAD_DETACH yet. Use the following
+ // tweak to decide if the thread has terminated yet.
+ if (!HasValidThreadHandle())
+ {
+ SelfDelete = this == pCurThread;
+ m_ExceptionState.FreeAllStackTraces();
+ if (SelfDelete) {
+ SetThread(NULL);
+#ifdef _DEBUG
+ AddFiberInfo(ThreadTrackInfo_Lifetime);
+#endif
+ }
+ delete this;
+ }
+
+ tsLock.Release();
+
+ // It only makes sense to restore the GC mode if we didn't just destroy
+ // our own thread object.
+ if (pCurThread && !SelfDelete && !ToggleGC)
+ {
+ pCurThread->EnablePreemptiveGC();
+ }
+
+ // Cannot use this here b/c it creates a holder named the same as GCX_ASSERT_COOP
+ // in the same scope above...
+ //
+ // GCX_ASSERT_PREEMP()
+
+ return retVal;
+ }
+ else if (pCurThread == NULL)
+ {
+ // We're in shutdown, too late to be worrying about having a strong
+ // handle to the exposed thread object, we've already performed our
+ // final GC.
+ tsLock.Release();
+
+ return retVal;
+ }
+ else
+ {
+ // Check to see if the external ref count reaches exactly one. If this
+ // is the case and we have an exposed object then it is that exposed object
+ // that is holding a reference to us. To make sure that we are not the
+ // ones keeping the exposed object alive we need to remove the strong
+ // reference we have to it.
+ if ((retVal == 1) && ((*((void**)m_StrongHndToExposedObject)) != NULL))
+ {
+ // Switch back to cooperative mode to manipulate the object.
+
+ // Don't want to switch back to COOP until we let go of the lock
+ // however we are allowed to call StoreObjectInHandle here in preemptive
+ // mode because we are setting the value to NULL.
+ CONTRACT_VIOLATION(ModeViolation);
+
+ // Clear the handle and leave the lock.
+ // We do not have to to DisablePreemptiveGC here, because
+ // we just want to put NULL into a handle.
+ StoreObjectInHandle(m_StrongHndToExposedObject, NULL);
+
+ tsLock.Release();
+
+ // Switch back to the initial GC mode.
+ if (ToggleGC)
+ {
+ pCurThread->DisablePreemptiveGC();
+ }
+
+ GCX_ASSERT_COOP();
+
+ return retVal;
+ }
+ }
+
+ tsLock.Release();
+
+ // Switch back to the initial GC mode.
+ if (ToggleGC)
+ {
+ pCurThread->DisablePreemptiveGC();
+ }
+
+ return retVal;
+}
+
+
+
+//--------------------------------------------------------------------
+// Destruction. This occurs after the associated native thread
+// has died.
+//--------------------------------------------------------------------
+Thread::~Thread()
+{
+ CONTRACTL {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+ // TODO: enable this
+ //_ASSERTE(GetThread() != this);
+ _ASSERTE(m_ThrewControlForThread == 0);
+
+ // AbortRequest is coupled with TrapReturningThread.
+ // We should have unmarked the thread for abort.
+ // !!! Can not assert here. If a thread has no managed code on stack
+ // !!! we leave the g_TrapReturningThread set so that the thread will be
+ // !!! aborted if it enters managed code.
+ //_ASSERTE(!IsAbortRequested());
+
+ // We should not have the Thread marked for abort. But if we have
+ // we need to unmark it so that g_TrapReturningThreads is decremented.
+ if (IsAbortRequested())
+ {
+ UnmarkThreadForAbort(TAR_ALL);
+ }
+
+#if defined(_DEBUG) && defined(TRACK_SYNC)
+ _ASSERTE(IsAtProcessExit() || ((Dbg_TrackSyncStack *) m_pTrackSync)->m_StackPointer == 0);
+ delete m_pTrackSync;
+#endif // TRACK_SYNC
+
+ _ASSERTE(IsDead() || IsUnstarted() || IsAtProcessExit());
+
+ if (m_WaitEventLink.m_Next != NULL && !IsAtProcessExit())
+ {
+ WaitEventLink *walk = &m_WaitEventLink;
+ while (walk->m_Next) {
+ ThreadQueue::RemoveThread(this, (SyncBlock*)((DWORD_PTR)walk->m_Next->m_WaitSB & ~1));
+ StoreEventToEventStore (walk->m_Next->m_EventWait);
+ }
+ m_WaitEventLink.m_Next = NULL;
+ }
+
+ if (m_StateNC & TSNC_ExistInThreadStore) {
+ BOOL ret;
+ ret = ThreadStore::RemoveThread(this);
+ _ASSERTE(ret);
+ }
+
+#ifdef _DEBUG
+ m_pFrame = (Frame *)POISONC;
+#endif
+
+ // Update Perfmon counters.
+ COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cCurrentThreadsLogical--);
+
+ // Current recognized threads are non-runtime threads that are alive and ran under the
+ // runtime. Check whether this Thread was one of them.
+ if ((m_State & TS_WeOwn) == 0)
+ {
+ COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cRecognizedThreads--);
+ }
+ else
+ {
+ COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cCurrentThreadsPhysical--);
+ }
+
+ // Normally we shouldn't get here with a valid thread handle; however if SetupThread
+ // failed (due to an OOM for example) then we need to CloseHandle the thread
+ // handle if we own it.
+ if (m_WeOwnThreadHandle && (GetThreadHandle() != INVALID_HANDLE_VALUE))
+ {
+ CloseHandle(GetThreadHandle());
+ }
+
+ if (m_SafeEvent.IsValid())
+ {
+ m_SafeEvent.CloseEvent();
+ }
+ if (m_UserSuspendEvent.IsValid())
+ {
+ m_UserSuspendEvent.CloseEvent();
+ }
+ if (m_DebugSuspendEvent.IsValid())
+ {
+ m_DebugSuspendEvent.CloseEvent();
+ }
+ if (m_EventWait.IsValid())
+ {
+ m_EventWait.CloseEvent();
+ }
+
+ FreeIOCompletionContext();
+
+ if (m_OSContext)
+ delete m_OSContext;
+
+ if (GetSavedRedirectContext())
+ {
+ delete GetSavedRedirectContext();
+ SetSavedRedirectContext(NULL);
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (m_pRCWStack)
+ delete m_pRCWStack;
+#endif
+
+ if (m_pExceptionDuringStartup)
+ {
+ Exception::Delete (m_pExceptionDuringStartup);
+ }
+
+ ClearContext();
+
+ if (!IsAtProcessExit())
+ {
+ // Destroy any handles that we're using to hold onto exception objects
+ SafeSetThrowables(NULL);
+
+ DestroyShortWeakHandle(m_ExposedObject);
+ DestroyStrongHandle(m_StrongHndToExposedObject);
+ }
+
+ g_pThinLockThreadIdDispenser->DisposeId(GetThreadId());
+
+ //Ensure DeleteThreadStaticData was executed
+ _ASSERTE(m_pThreadLocalBlock == NULL);
+ _ASSERTE(m_pTLBTable == NULL);
+ _ASSERTE(m_TLBTableSize == 0);
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (m_pHostTask) {
+ ReleaseHostTask();
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+#ifdef FEATURE_PREJIT
+ if (m_pIBCInfo) {
+ delete m_pIBCInfo;
+ }
+#endif
+
+#ifdef _DEBUG
+ if (m_pFiberInfo) {
+ delete [] (DWORD_PTR*)m_pFiberInfo[0];
+ }
+#endif
+
+#ifdef FEATURE_EVENT_TRACE
+ // Destruct the thread local type cache for allocation sampling
+ if(m_pAllLoggedTypes) {
+ ETW::TypeSystemLog::DeleteTypeHashNoLock(&m_pAllLoggedTypes);
+ }
+#endif // FEATURE_EVENT_TRACE
+
+ // Wait for another thread to leave its loop in DeadlockAwareLock::TryBeginEnterLock
+ CrstHolder lock(&g_DeadlockAwareCrst);
+}
+
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+void Thread::BaseCoUninitialize()
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_SO_INTOLERANT;
+ STATIC_CONTRACT_MODE_PREEMPTIVE;
+
+ _ASSERTE(GetThread() == this);
+
+ BEGIN_SO_TOLERANT_CODE(this);
+ // BEGIN_SO_TOLERANT_CODE wraps a __try/__except around this call, so if the OS were to allow
+ // an exception to leak through to us, we'll catch it.
+ ::CoUninitialize();
+ END_SO_TOLERANT_CODE;
+
+}// BaseCoUninitialize
+
+#ifdef FEATURE_COMINTEROP
+void Thread::BaseWinRTUninitialize()
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_SO_INTOLERANT;
+ STATIC_CONTRACT_MODE_PREEMPTIVE;
+
+ _ASSERTE(WinRTSupported());
+ _ASSERTE(GetThread() == this);
+ _ASSERTE(IsWinRTInitialized());
+
+ BEGIN_SO_TOLERANT_CODE(this);
+ RoUninitialize();
+ END_SO_TOLERANT_CODE;
+}
+#endif // FEATURE_COMINTEROP
+
+void Thread::CoUninitialize()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ // Running threads might have performed a CoInitialize which must
+ // now be balanced.
+ BOOL needsUninitialize = IsCoInitialized()
+#ifdef FEATURE_COMINTEROP
+ || IsWinRTInitialized()
+#endif // FEATURE_COMINTEROP
+ ;
+
+ if (!IsAtProcessExit() && needsUninitialize)
+ {
+ GCX_PREEMP();
+ CONTRACT_VIOLATION(ThrowsViolation);
+
+ if (IsCoInitialized())
+ {
+ BaseCoUninitialize();
+ FastInterlockAnd((ULONG *)&m_State, ~TS_CoInitialized);
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (IsWinRTInitialized())
+ {
+ _ASSERTE(WinRTSupported());
+ BaseWinRTUninitialize();
+ ResetWinRTInitialized();
+ }
+#endif // FEATURE_COMNITEROP
+ }
+}
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+void Thread::CleanupDetachedThreads()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!ThreadStore::HoldingThreadStore());
+
+ ThreadStoreLockHolder threadStoreLockHolder;
+
+ Thread *thread = ThreadStore::GetAllThreadList(NULL, 0, 0);
+
+ STRESS_LOG0(LF_SYNC, LL_INFO1000, "T::CDT called\n");
+
+ while (thread != NULL)
+ {
+ Thread *next = ThreadStore::GetAllThreadList(thread, 0, 0);
+
+ if (thread->IsDetached() && thread->m_UnmanagedRefCount == 0)
+ {
+ STRESS_LOG1(LF_SYNC, LL_INFO1000, "T::CDT - detaching thread 0x%p\n", thread);
+
+ // Unmark that the thread is detached while we have the
+ // thread store lock. This will ensure that no other
+ // thread will race in here and try to delete it, too.
+ FastInterlockAnd((ULONG*)&(thread->m_State), ~TS_Detached);
+ FastInterlockDecrement(&m_DetachCount);
+ if (!thread->IsBackground())
+ FastInterlockDecrement(&m_ActiveDetachCount);
+
+ // If the debugger is attached, then we need to unlock the
+ // thread store before calling OnThreadTerminate. That
+ // way, we won't be holding the thread store lock if we
+ // need to block sending a detach thread event.
+ BOOL debuggerAttached =
+#ifdef DEBUGGING_SUPPORTED
+ CORDebuggerAttached();
+#else // !DEBUGGING_SUPPORTED
+ FALSE;
+#endif // !DEBUGGING_SUPPORTED
+
+ if (debuggerAttached)
+ ThreadStore::UnlockThreadStore();
+
+ thread->OnThreadTerminate(debuggerAttached ? FALSE : TRUE);
+
+#ifdef DEBUGGING_SUPPORTED
+ if (debuggerAttached)
+ {
+ ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_OTHER);
+
+ // We remember the next Thread in the thread store
+ // list before deleting the current one. But we can't
+ // use that Thread pointer now that we release the
+ // thread store lock in the middle of the loop. We
+ // have to start from the beginning of the list every
+ // time. If two threads T1 and T2 race into
+ // CleanupDetachedThreads, then T1 will grab the first
+ // Thread on the list marked for deletion and release
+ // the lock. T2 will grab the second one on the
+ // list. T2 may complete destruction of its Thread,
+ // then T1 might re-acquire the thread store lock and
+ // try to use the next Thread in the thread store. But
+ // T2 just deleted that next Thread.
+ thread = ThreadStore::GetAllThreadList(NULL, 0, 0);
+ }
+ else
+#endif // DEBUGGING_SUPPORTED
+ {
+ thread = next;
+ }
+ }
+ else if (thread->HasThreadState(TS_Finalized))
+ {
+ STRESS_LOG1(LF_SYNC, LL_INFO1000, "T::CDT - finalized thread 0x%p\n", thread);
+
+ thread->ResetThreadState(TS_Finalized);
+ // We have finalized the managed Thread object. Now it is time to clean up the unmanaged part
+ thread->DecExternalCount(TRUE);
+ thread = next;
+ }
+ else
+ {
+ thread = next;
+ }
+ }
+
+ s_fCleanFinalizedThread = FALSE;
+}
+
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+void Thread::CleanupCOMState()
+{
+ CONTRACTL {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_COMINTEROP
+ if (GetFinalApartment() == Thread::AS_InSTA)
+ ReleaseRCWsInCachesNoThrow(GetCurrentCtxCookie());
+#endif // FEATURE_COMINTEROP
+
+ // Running threads might have performed a CoInitialize which must
+ // now be balanced. However only the thread that called COInitialize can
+ // call CoUninitialize.
+
+ BOOL needsUninitialize = IsCoInitialized()
+#ifdef FEATURE_COMINTEROP
+ || IsWinRTInitialized()
+#endif // FEATURE_COMINTEROP
+ ;
+
+ if (needsUninitialize)
+ {
+ GCX_PREEMP();
+ CONTRACT_VIOLATION(ThrowsViolation);
+
+ if (IsCoInitialized())
+ {
+ BaseCoUninitialize();
+ ResetCoInitialized();
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (IsWinRTInitialized())
+ {
+ _ASSERTE(WinRTSupported());
+ BaseWinRTUninitialize();
+ ResetWinRTInitialized();
+ }
+#endif // FEATURE_COMINTEROP
+ }
+}
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+// See general comments on thread destruction (code:#threadDestruction) above.
+void Thread::OnThreadTerminate(BOOL holdingLock)
+{
+ CONTRACTL {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+ // #ReportDeadOnThreadTerminate
+ // Caller should have put the TS_ReportDead bit on by now.
+ // We don't want any windows after the exit event but before the thread is marked dead.
+ // If a debugger attached during such a window (or even took a dump at the exit event),
+ // then it may not realize the thread is dead.
+ // So ensure we mark the thread as dead before we send the tool notifications.
+ // The TS_ReportDead bit will cause the debugger to view this as TS_Dead.
+ _ASSERTE(HasThreadState(TS_ReportDead));
+
+ // Should not use OSThreadId:
+ // OSThreadId may change for the current thread is the thread is blocked and rescheduled
+ // by host.
+ Thread *pCurrentThread = GetThread();
+ DWORD CurrentThreadID = pCurrentThread?pCurrentThread->GetThreadId():0;
+ DWORD ThisThreadID = GetThreadId();
+
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+ // If the currently running thread is the thread that died and it is an STA thread, then we
+ // need to release all the RCW's in the current context. However, we cannot do this if we
+ // are in the middle of process detach.
+ if (!IsAtProcessExit() && this == GetThread())
+ {
+ CleanupCOMState();
+ }
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+ if (g_fEEShutDown != 0)
+ {
+ // We have started shutdown. Not safe to touch CLR state.
+ return;
+ }
+
+ // We took a count during construction, and we rely on the count being
+ // non-zero as we terminate the thread here.
+ _ASSERTE(m_ExternalRefCount > 0);
+
+ // The thread is no longer running. It's important that we zero any general OBJECTHANDLE's
+ // on this Thread object. That's because we need the managed Thread object to be subject to
+ // GC and yet any HANDLE is opaque to the GC when it comes to collecting cycles. If e.g. the
+ // Thread's AbortReason (which is an arbitrary object) contains transitively a reference back
+ // to the Thread, then we have an uncollectible cycle. When the thread is executing, nothing
+ // can be collected anyway. But now that we stop running the cycle concerns us.
+ //
+ // It's important that we only use OBJECTHANDLE's that are retrievable while the thread is
+ // still running. That's what allows us to zero them here with impunity:
+ {
+ // No handles to clean up in the m_ExceptionState
+ _ASSERTE(!m_ExceptionState.IsExceptionInProgress());
+
+ GCX_COOP();
+
+ // Destroy the LastThrown handle (and anything that violates the above assert).
+ SafeSetThrowables(NULL);
+
+ // Cleaning up the AbortReason is tricky, since the handle is only valid if the ADID is valid
+ // ...and we can only perform this operation if other threads aren't racing to update these
+ // values on our thread asynchronously.
+ ClearAbortReason();
+
+ // Free all structures related to thread statics for this thread
+ DeleteThreadStaticData();
+
+#ifdef FEATURE_LEAK_CULTURE_INFO
+ //Clear the references which could create cycles
+ // This allows the GC to collect them
+ THREADBASEREF thread = (THREADBASEREF) GetExposedObjectRaw();
+ if (thread != NULL)
+ {
+ thread->ResetCulture();
+ }
+#endif
+ }
+
+ if (GCHeap::IsGCHeapInitialized())
+ {
+ // Guaranteed to NOT be a shutdown case, because we tear down the heap before
+ // we tear down any threads during shutdown.
+ if (ThisThreadID == CurrentThreadID)
+ {
+ GCX_COOP();
+ GCHeap::GetGCHeap()->FixAllocContext(&m_alloc_context, FALSE, NULL, NULL);
+ m_alloc_context.init();
+ }
+ }
+
+ // We switch a thread to dead when it has finished doing useful work. But it
+ // remains in the thread store so long as someone keeps it alive. An exposed
+ // object will do this (it releases the refcount in its finalizer). If the
+ // thread is never released, we have another look during product shutdown and
+ // account for the unreleased refcount of the uncollected exposed object:
+ if (IsDead())
+ {
+ GCX_COOP();
+
+ _ASSERTE(IsAtProcessExit());
+ ClearContext();
+ if (m_ExposedObject != NULL)
+ DecExternalCount(holdingLock); // may destruct now
+ }
+ else
+ {
+#ifdef DEBUGGING_SUPPORTED
+ //
+ // If we're debugging, let the debugger know that this thread is
+ // gone.
+ //
+ // There is a race here where the debugger could have attached after
+ // we checked (and thus didn't release the lock). In this case,
+ // we can't call out to the debugger or we risk a deadlock.
+ //
+ if (!holdingLock && CORDebuggerAttached())
+ {
+ g_pDebugInterface->DetachThread(this);
+ }
+#endif // DEBUGGING_SUPPORTED
+
+#ifdef PROFILING_SUPPORTED
+ // If a profiler is present, then notify the profiler of thread destroy
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackThreads());
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->ThreadDestroyed((ThreadID) this);
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ if (!holdingLock)
+ {
+ LOG((LF_SYNC, INFO3, "OnThreadTerminate obtain lock\n"));
+ ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_OTHER);
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (ThisThreadID == CurrentThreadID && pCurrentThread)
+ {
+ // Before we call UnlockThreadStore, we remove out Thread from TLS
+ // Therefore we will not dec the lock count on thread.
+ DECTHREADLOCKCOUNTTHREAD(pCurrentThread);
+ }
+#endif
+ }
+
+ if (GCHeap::IsGCHeapInitialized() && ThisThreadID != CurrentThreadID)
+ {
+ // We must be holding the ThreadStore lock in order to clean up alloc context.
+ // We should never call FixAllocContext during GC.
+ GCHeap::GetGCHeap()->FixAllocContext(&m_alloc_context, FALSE, NULL, NULL);
+ m_alloc_context.init();
+ }
+
+ FastInterlockOr((ULONG *) &m_State, TS_Dead);
+ ThreadStore::s_pThreadStore->m_DeadThreadCount++;
+
+ if (IsUnstarted())
+ ThreadStore::s_pThreadStore->m_UnstartedThreadCount--;
+ else
+ {
+ if (IsBackground())
+ ThreadStore::s_pThreadStore->m_BackgroundThreadCount--;
+ }
+
+ FastInterlockAnd((ULONG *) &m_State, ~(TS_Unstarted | TS_Background));
+
+ //
+ // If this thread was told to trip for debugging between the
+ // sending of the detach event above and the locking of the
+ // thread store lock, then remove the flag and decrement the
+ // global trap returning threads count.
+ //
+ if (!IsAtProcessExit())
+ {
+ // A thread can't die during a GCPending, because the thread store's
+ // lock is held by the GC thread.
+ if (m_State & TS_DebugSuspendPending)
+ UnmarkForSuspension(~TS_DebugSuspendPending);
+
+ if (m_State & TS_UserSuspendPending)
+ UnmarkForSuspension(~TS_UserSuspendPending);
+
+ if (CurrentThreadID == ThisThreadID && IsAbortRequested())
+ {
+ UnmarkThreadForAbort(Thread::TAR_ALL);
+ }
+ }
+
+ if (GetThreadHandle() != INVALID_HANDLE_VALUE)
+ {
+ if (m_ThreadHandleForClose == INVALID_HANDLE_VALUE)
+ {
+ m_ThreadHandleForClose = GetThreadHandle();
+ }
+ SetThreadHandle (INVALID_HANDLE_VALUE);
+ }
+
+ m_OSThreadId = 0;
+
+ // If nobody else is holding onto the thread, we may destruct it here:
+ ULONG oldCount = DecExternalCount(TRUE);
+ // If we are shutting down the process, we only have one thread active in the
+ // system. So we can disregard all the reasons that hold this thread alive --
+ // TLS is about to be reclaimed anyway.
+ if (IsAtProcessExit())
+ while (oldCount > 0)
+ {
+ oldCount = DecExternalCount(TRUE);
+ }
+
+ // ASSUME THAT THE THREAD IS DELETED, FROM HERE ON
+
+ _ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >= 0);
+ _ASSERTE(ThreadStore::s_pThreadStore->m_BackgroundThreadCount >= 0);
+ _ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >=
+ ThreadStore::s_pThreadStore->m_BackgroundThreadCount);
+ _ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >=
+ ThreadStore::s_pThreadStore->m_UnstartedThreadCount);
+ _ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >=
+ ThreadStore::s_pThreadStore->m_DeadThreadCount);
+
+ // One of the components of OtherThreadsComplete() has changed, so check whether
+ // we should now exit the EE.
+ ThreadStore::CheckForEEShutdown();
+
+ if (ThisThreadID == CurrentThreadID)
+ {
+ // NULL out the thread block in the tls. We can't do this if we aren't on the
+ // right thread. But this will only happen during a shutdown. And we've made
+ // a "best effort" to reduce to a single thread before we begin the shutdown.
+ SetThread(NULL);
+ SetAppDomain(NULL);
+ }
+
+ if (!holdingLock)
+ {
+ LOG((LF_SYNC, INFO3, "OnThreadTerminate releasing lock\n"));
+ ThreadSuspend::UnlockThreadStore(ThisThreadID == CurrentThreadID);
+ }
+ }
+}
+
+// Helper functions to check for duplicate handles. we only do this check if
+// a waitfor multiple fails.
+int __cdecl compareHandles( const void *arg1, const void *arg2 )
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ HANDLE h1 = *(HANDLE*)arg1;
+ HANDLE h2 = *(HANDLE*)arg2;
+ return (h1 == h2) ? 0 : ((h1 < h2) ? -1 : 1);
+}
+
+BOOL CheckForDuplicateHandles(int countHandles, HANDLE *handles)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ qsort(handles,countHandles,sizeof(HANDLE),compareHandles);
+ for (int i=1; i < countHandles; i++)
+ {
+ if (handles[i-1] == handles[i])
+ return TRUE;
+ }
+ return FALSE;
+}
+//--------------------------------------------------------------------
+// Based on whether this thread has a message pump, do the appropriate
+// style of Wait.
+//--------------------------------------------------------------------
+DWORD Thread::DoAppropriateWait(int countHandles, HANDLE *handles, BOOL waitAll,
+ DWORD millis, WaitMode mode, PendingSync *syncState)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ INDEBUG(BOOL alertable = (mode & WaitMode_Alertable) != 0;);
+ _ASSERTE(alertable || syncState == 0);
+
+ struct Param
+ {
+ Thread *pThis;
+ int countHandles;
+ HANDLE *handles;
+ BOOL waitAll;
+ DWORD millis;
+ WaitMode mode;
+ DWORD dwRet;
+ } param;
+ param.pThis = this;
+ param.countHandles = countHandles;
+ param.handles = handles;
+ param.waitAll = waitAll;
+ param.millis = millis;
+ param.mode = mode;
+ param.dwRet = (DWORD) -1;
+
+ EE_TRY_FOR_FINALLY(Param *, pParam, &param) {
+ pParam->dwRet = pParam->pThis->DoAppropriateWaitWorker(pParam->countHandles, pParam->handles, pParam->waitAll, pParam->millis, pParam->mode);
+ }
+ EE_FINALLY {
+ if (syncState) {
+ if (!GOT_EXCEPTION() &&
+ param.dwRet >= WAIT_OBJECT_0 && param.dwRet < (DWORD)(WAIT_OBJECT_0 + countHandles)) {
+ // This thread has been removed from syncblk waiting list by the signalling thread
+ syncState->Restore(FALSE);
+ }
+ else
+ syncState->Restore(TRUE);
+ }
+
+ _ASSERTE (param.dwRet != WAIT_IO_COMPLETION);
+ }
+ EE_END_FINALLY;
+
+ return(param.dwRet);
+}
+
+DWORD Thread::DoAppropriateWait(AppropriateWaitFunc func, void *args,
+ DWORD millis, WaitMode mode,
+ PendingSync *syncState)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ INDEBUG(BOOL alertable = (mode & WaitMode_Alertable) != 0;);
+ _ASSERTE(alertable || syncState == 0);
+
+ struct Param
+ {
+ Thread *pThis;
+ AppropriateWaitFunc func;
+ void *args;
+ DWORD millis;
+ WaitMode mode;
+ DWORD dwRet;
+ } param;
+ param.pThis = this;
+ param.func = func;
+ param.args = args;
+ param.millis = millis;
+ param.mode = mode;
+ param.dwRet = (DWORD) -1;
+
+ EE_TRY_FOR_FINALLY(Param *, pParam, &param) {
+ pParam->dwRet = pParam->pThis->DoAppropriateWaitWorker(pParam->func, pParam->args, pParam->millis, pParam->mode);
+ }
+ EE_FINALLY {
+ if (syncState) {
+ if (!GOT_EXCEPTION() && WAIT_OBJECT_0 == param.dwRet) {
+ // This thread has been removed from syncblk waiting list by the signalling thread
+ syncState->Restore(FALSE);
+ }
+ else
+ syncState->Restore(TRUE);
+ }
+
+ _ASSERTE (WAIT_IO_COMPLETION != param.dwRet);
+ }
+ EE_END_FINALLY;
+
+ return(param.dwRet);
+}
+
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+//--------------------------------------------------------------------
+// helper to do message wait
+//--------------------------------------------------------------------
+DWORD MsgWaitHelper(int numWaiters, HANDLE* phEvent, BOOL bWaitAll, DWORD millis, BOOL bAlertable)
+{
+ STATIC_CONTRACT_THROWS;
+ // The true contract for GC trigger should be the following. But this puts a very strong restriction
+ // on contract for functions that call EnablePreemptiveGC.
+ //if (GetThread() && !ThreadStore::HoldingThreadStore(GetThread())) {GC_TRIGGERS;} else {GC_NOTRIGGER;}
+ STATIC_CONTRACT_SO_INTOLERANT;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ DWORD flags = 0;
+ DWORD dwReturn=WAIT_ABANDONED;
+
+ Thread* pThread = GetThread();
+ // If pThread is NULL, we'd better shut down.
+ if (pThread == NULL)
+ _ASSERTE (g_fEEShutDown);
+
+ DWORD lastError = 0;
+ BEGIN_SO_TOLERANT_CODE(pThread);
+
+ // If we're going to pump, we cannot use WAIT_ALL. That's because the wait would
+ // only be satisfied if a message arrives while the handles are signalled. If we
+ // want true WAIT_ALL, we need to fire up a different thread in the MTA and wait
+ // on his result. This isn't implemented yet.
+ //
+ // A change was added to WaitHandleNative::CorWaitMultipleNative to disable WaitAll
+ // in an STA with more than one handle.
+ if (bWaitAll)
+ {
+ if (numWaiters == 1)
+ bWaitAll = FALSE;
+
+ // The check that's supposed to prevent this condition from occuring, in WaitHandleNative::CorWaitMultipleNative,
+ // is unfortunately behind FEATURE_COMINTEROP instead of FEATURE_COMINTEROP_APARTMENT_SUPPORT.
+ // So on CoreCLR (where FEATURE_COMINTEROP is not currently defined) we can actually reach this point.
+ // We can't fix this, because it's a breaking change, so we just won't assert here.
+ // The result is that WaitAll on an STA thread in CoreCLR will behave stragely, as described above.
+#if defined(FEATURE_COMINTEROP) && !defined(FEATURE_CORECLR)
+ else
+ _ASSERTE(!"WaitAll in an STA with more than one handle will deadlock");
+#endif
+ }
+
+ if (bWaitAll)
+ flags |= COWAIT_WAITALL;
+
+ if (bAlertable)
+ flags |= COWAIT_ALERTABLE;
+
+ HRESULT hr = S_OK;
+ hr = CoWaitForMultipleHandles(flags, millis, numWaiters, phEvent, &dwReturn);
+
+ if (hr == RPC_S_CALLPENDING)
+ {
+ dwReturn = WAIT_TIMEOUT;
+ }
+ else if (FAILED(hr))
+ {
+ // The service behaves differently on an STA vs. MTA in how much
+ // error information it propagates back, and in which form. We currently
+ // only get here in the STA case, so bias this logic that way.
+ dwReturn = WAIT_FAILED;
+ }
+ else
+{
+ dwReturn += WAIT_OBJECT_0; // success -- bias back
+ }
+
+ lastError = ::GetLastError();
+
+ END_SO_TOLERANT_CODE;
+
+ // END_SO_TOLERANT_CODE overwrites lasterror. Let's reset it.
+ ::SetLastError(lastError);
+
+ return dwReturn;
+}
+
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+DWORD WaitForMultipleObjectsEx_SO_TOLERANT (DWORD nCount, HANDLE *lpHandles, BOOL bWaitAll,DWORD dwMilliseconds, BOOL bAlertable)
+{
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+ DWORD dwRet = WAIT_FAILED;
+ DWORD lastError = 0;
+
+ BEGIN_SO_TOLERANT_CODE (GetThread ());
+ dwRet = ::WaitForMultipleObjectsEx (nCount, lpHandles, bWaitAll, dwMilliseconds, bAlertable);
+ lastError = ::GetLastError();
+ END_SO_TOLERANT_CODE;
+
+ // END_SO_TOLERANT_CODE overwrites lasterror. Let's reset it.
+ ::SetLastError(lastError);
+ return dwRet;
+}
+
+//--------------------------------------------------------------------
+// Do appropriate wait based on apartment state (STA or MTA)
+DWORD Thread::DoAppropriateAptStateWait(int numWaiters, HANDLE* pHandles, BOOL bWaitAll,
+ DWORD timeout, WaitMode mode)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ BOOL alertable = (mode & WaitMode_Alertable) != 0;
+
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+ if (alertable && !GetDomain()->MustForceTrivialWaitOperations())
+ {
+ ApartmentState as = GetFinalApartment();
+ if (AS_InMTA != as)
+ {
+ return MsgWaitHelper(numWaiters, pHandles, bWaitAll, timeout, alertable);
+ }
+ }
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+ return WaitForMultipleObjectsEx_SO_TOLERANT(numWaiters, pHandles, bWaitAll, timeout, alertable);
+}
+
+// A helper called by our two flavors of DoAppropriateWaitWorker
+void Thread::DoAppropriateWaitWorkerAlertableHelper(WaitMode mode)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ // If thread abort is prevented, we do not want this thread to see thread abort and thread interrupt exception.
+ if (IsAbortPrevented())
+ {
+ return;
+ }
+
+ // A word about ordering for Interrupt. If someone tries to interrupt a thread
+ // that's in the interruptible state, we queue an APC. But if they try to interrupt
+ // a thread that's not in the interruptible state, we just record that fact. So
+ // we have to set TS_Interruptible before we test to see whether someone wants to
+ // interrupt us or else we have a race condition that causes us to skip the APC.
+ FastInterlockOr((ULONG *) &m_State, TS_Interruptible);
+
+ if (HasThreadStateNC(TSNC_InRestoringSyncBlock))
+ {
+ // The thread is restoring SyncBlock for Object.Wait.
+ ResetThreadStateNC(TSNC_InRestoringSyncBlock);
+ }
+ else
+ {
+ HandleThreadInterrupt((mode & WaitMode_ADUnload) != 0);
+
+ // Safe to clear the interrupted state, no APC could have fired since we
+ // reset m_UserInterrupt (which inhibits our APC callback from doing
+ // anything).
+ FastInterlockAnd((ULONG *) &m_State, ~TS_Interrupted);
+ }
+}
+
+void MarkOSAlertableWait()
+{
+ LIMITED_METHOD_CONTRACT;
+ GetThread()->SetThreadStateNC (Thread::TSNC_OSAlertableWait);
+}
+
+void UnMarkOSAlertableWait()
+{
+ LIMITED_METHOD_CONTRACT;
+ GetThread()->ResetThreadStateNC (Thread::TSNC_OSAlertableWait);
+}
+
+//--------------------------------------------------------------------
+// Based on whether this thread has a message pump, do the appropriate
+// style of Wait.
+//--------------------------------------------------------------------
+DWORD Thread::DoAppropriateWaitWorker(int countHandles, HANDLE *handles, BOOL waitAll,
+ DWORD millis, WaitMode mode)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ DWORD ret = 0;
+
+ BOOL alertable = (mode & WaitMode_Alertable) != 0;
+#ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+ // Waits from SynchronizationContext.WaitHelper are always just WaitMode_IgnoreSyncCtx.
+ // So if we defer to a sync ctx, we will lose any extra bits. We must therefore not
+ // defer to a sync ctx if doing any non-default wait.
+ // If you're doing a default wait, but want to ignore sync ctx, specify WaitMode_IgnoreSyncCtx
+ // which will make mode != WaitMode_Alertable.
+ BOOL ignoreSyncCtx = (mode != WaitMode_Alertable);
+
+ if (GetDomain()->MustForceTrivialWaitOperations())
+ ignoreSyncCtx = TRUE;
+
+ // Unless the ignoreSyncCtx flag is set, first check to see if there is a synchronization
+ // context on the current thread and if there is, dispatch to it to do the wait.
+ // If the wait is non alertable we cannot forward the call to the sync context
+ // since fundamental parts of the system (such as the GC) rely on non alertable
+ // waits not running any managed code. Also if we are past the point in shutdown were we
+ // are allowed to run managed code then we can't forward the call to the sync context.
+ if (!ignoreSyncCtx && alertable && CanRunManagedCode(LoaderLockCheck::None)
+ && !HasThreadStateNC(Thread::TSNC_BlockedForShutdown))
+ {
+ GCX_COOP();
+
+ BOOL fSyncCtxPresent = FALSE;
+ OBJECTREF SyncCtxObj = NULL;
+ GCPROTECT_BEGIN(SyncCtxObj)
+ {
+ GetSynchronizationContext(&SyncCtxObj);
+ if (SyncCtxObj != NULL)
+ {
+ SYNCHRONIZATIONCONTEXTREF syncRef = (SYNCHRONIZATIONCONTEXTREF)SyncCtxObj;
+ if (syncRef->IsWaitNotificationRequired())
+ {
+ fSyncCtxPresent = TRUE;
+ ret = DoSyncContextWait(&SyncCtxObj, countHandles, handles, waitAll, millis);
+ }
+ }
+ }
+ GCPROTECT_END();
+
+ if (fSyncCtxPresent)
+ return ret;
+ }
+#endif // #ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+
+ // Before going to pre-emptive mode the thread needs to be flagged as waiting for
+ // the debugger. This used to be accomplished by the TS_Interruptible flag but that
+ // doesn't work reliably, see DevDiv Bugs 699245. Some methods call in here already in
+ // COOP mode so we set the bit before the transition. For the calls that are already
+ // in pre-emptive mode those are still buggy. This is only a partial fix.
+ BOOL isCoop = PreemptiveGCDisabled();
+ ThreadStateNCStackHolder tsNC(isCoop && alertable, TSNC_DebuggerSleepWaitJoin);
+
+ GCX_PREEMP();
+
+ if (alertable)
+ {
+ DoAppropriateWaitWorkerAlertableHelper(mode);
+ }
+
+ LeaveRuntimeHolder holder((size_t)WaitForMultipleObjectsEx);
+ StateHolder<MarkOSAlertableWait,UnMarkOSAlertableWait> OSAlertableWait(alertable);
+
+ ThreadStateHolder tsh(alertable, TS_Interruptible | TS_Interrupted);
+
+ ULONGLONG dwStart = 0, dwEnd;
+retry:
+ if (millis != INFINITE)
+ {
+ dwStart = CLRGetTickCount64();
+ }
+
+ ret = DoAppropriateAptStateWait(countHandles, handles, waitAll, millis, mode);
+
+ if (ret == WAIT_IO_COMPLETION)
+ {
+ _ASSERTE (alertable);
+
+ if (m_State & TS_Interrupted)
+ {
+ HandleThreadInterrupt(mode & WaitMode_ADUnload);
+ }
+ // We could be woken by some spurious APC or an EE APC queued to
+ // interrupt us. In the latter case the TS_Interrupted bit will be set
+ // in the thread state bits. Otherwise we just go back to sleep again.
+ if (millis != INFINITE)
+ {
+ dwEnd = CLRGetTickCount64();
+ if (dwEnd >= dwStart + millis)
+ {
+ ret = WAIT_TIMEOUT;
+ goto WaitCompleted;
+ }
+ else
+ {
+ millis -= (DWORD)(dwEnd - dwStart);
+ }
+ }
+ goto retry;
+ }
+ _ASSERTE((ret >= WAIT_OBJECT_0 && ret < (WAIT_OBJECT_0 + (DWORD)countHandles)) ||
+ (ret >= WAIT_ABANDONED && ret < (WAIT_ABANDONED + (DWORD)countHandles)) ||
+ (ret == WAIT_TIMEOUT) || (ret == WAIT_FAILED));
+ // countHandles is used as an unsigned -- it should never be negative.
+ _ASSERTE(countHandles >= 0);
+
+ // We support precisely one WAIT_FAILED case, where we attempt to wait on a
+ // thread handle and the thread is in the process of dying we might get a
+ // invalid handle substatus. Turn this into a successful wait.
+ // There are three cases to consider:
+ // 1) Only waiting on one handle: return success right away.
+ // 2) Waiting for all handles to be signalled: retry the wait without the
+ // affected handle.
+ // 3) Waiting for one of multiple handles to be signalled: return with the
+ // first handle that is either signalled or has become invalid.
+ if (ret == WAIT_FAILED)
+ {
+ DWORD errorCode = ::GetLastError();
+ if (errorCode == ERROR_INVALID_PARAMETER)
+ {
+ if (CheckForDuplicateHandles(countHandles, handles))
+ COMPlusThrow(kDuplicateWaitObjectException);
+ else
+ COMPlusThrowHR(HRESULT_FROM_WIN32(errorCode));
+ }
+ else if (errorCode == ERROR_ACCESS_DENIED)
+ {
+ // A Win32 ACL could prevent us from waiting on the handle.
+ COMPlusThrow(kUnauthorizedAccessException);
+ }
+ else if (errorCode == ERROR_NOT_ENOUGH_MEMORY)
+ {
+ ThrowOutOfMemory();
+ }
+ else if (errorCode != ERROR_INVALID_HANDLE)
+ {
+ ThrowWin32(errorCode);
+ }
+
+ if (countHandles == 1)
+ ret = WAIT_OBJECT_0;
+ else if (waitAll)
+ {
+ // Probe all handles with a timeout of zero. When we find one that's
+ // invalid, move it out of the list and retry the wait.
+#ifdef _DEBUG
+ BOOL fFoundInvalid = FALSE;
+#endif
+ for (int i = 0; i < countHandles; i++)
+ {
+ // WaitForSingleObject won't pump memssage; we already probe enough space
+ // before calling this function and we don't want to fail here, so we don't
+ // do a transition to tolerant code here
+ DWORD subRet = WaitForSingleObject (handles[i], 0);
+ if (subRet != WAIT_FAILED)
+ continue;
+ _ASSERTE(::GetLastError() == ERROR_INVALID_HANDLE);
+ if ((countHandles - i - 1) > 0)
+ memmove(&handles[i], &handles[i+1], (countHandles - i - 1) * sizeof(HANDLE));
+ countHandles--;
+#ifdef _DEBUG
+ fFoundInvalid = TRUE;
+#endif
+ break;
+ }
+ _ASSERTE(fFoundInvalid);
+
+ // Compute the new timeout value by assume that the timeout
+ // is not large enough for more than one wrap
+ dwEnd = CLRGetTickCount64();
+ if (millis != INFINITE)
+ {
+ if (dwEnd >= dwStart + millis)
+ {
+ ret = WAIT_TIMEOUT;
+ goto WaitCompleted;
+ }
+ else
+ {
+ millis -= (DWORD)(dwEnd - dwStart);
+ }
+ }
+ goto retry;
+ }
+ else
+ {
+ // Probe all handles with a timeout as zero, succeed with the first
+ // handle that doesn't timeout.
+ ret = WAIT_OBJECT_0;
+ int i;
+ for (i = 0; i < countHandles; i++)
+ {
+ TryAgain:
+ // WaitForSingleObject won't pump memssage; we already probe enough space
+ // before calling this function and we don't want to fail here, so we don't
+ // do a transition to tolerant code here
+ DWORD subRet = WaitForSingleObject (handles[i], 0);
+ if ((subRet == WAIT_OBJECT_0) || (subRet == WAIT_FAILED))
+ break;
+ if (subRet == WAIT_ABANDONED)
+ {
+ ret = (ret - WAIT_OBJECT_0) + WAIT_ABANDONED;
+ break;
+ }
+ // If we get alerted it just masks the real state of the current
+ // handle, so retry the wait.
+ if (subRet == WAIT_IO_COMPLETION)
+ goto TryAgain;
+ _ASSERTE(subRet == WAIT_TIMEOUT);
+ ret++;
+ }
+ _ASSERTE(i != countHandles);
+ }
+ }
+
+WaitCompleted:
+
+ _ASSERTE((ret != WAIT_TIMEOUT) || (millis != INFINITE));
+
+ return ret;
+}
+
+
+DWORD Thread::DoAppropriateWaitWorker(AppropriateWaitFunc func, void *args,
+ DWORD millis, WaitMode mode)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ BOOL alertable = (mode & WaitMode_Alertable)!=0;
+
+ // Before going to pre-emptive mode the thread needs to be flagged as waiting for
+ // the debugger. This used to be accomplished by the TS_Interruptible flag but that
+ // doesn't work reliably, see DevDiv Bugs 699245. Some methods call in here already in
+ // COOP mode so we set the bit before the transition. For the calls that are already
+ // in pre-emptive mode those are still buggy. This is only a partial fix.
+ BOOL isCoop = PreemptiveGCDisabled();
+ ThreadStateNCStackHolder tsNC(isCoop && alertable, TSNC_DebuggerSleepWaitJoin);
+ GCX_PREEMP();
+
+ // <TODO>
+ // @TODO cwb: we don't know whether a thread has a message pump or
+ // how to pump its messages, currently.
+ // @TODO cwb: WinCE isn't going to support Thread.Interrupt() correctly until
+ // we get alertable waits on that platform.</TODO>
+ DWORD ret;
+ if(alertable)
+ {
+ DoAppropriateWaitWorkerAlertableHelper(mode);
+ }
+
+ DWORD option;
+ if (alertable)
+ {
+ option = WAIT_ALERTABLE;
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+ ApartmentState as = GetFinalApartment();
+ if ((AS_InMTA != as) && !GetDomain()->MustForceTrivialWaitOperations())
+ {
+ option |= WAIT_MSGPUMP;
+ }
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+ }
+ else
+ {
+ option = 0;
+ }
+
+ ThreadStateHolder tsh(alertable, TS_Interruptible | TS_Interrupted);
+
+ ULONGLONG dwStart = 0;
+ ULONGLONG dwEnd;
+
+retry:
+ if (millis != INFINITE)
+ {
+ dwStart = CLRGetTickCount64();
+ }
+ ret = func(args, millis, option);
+
+ if (ret == WAIT_IO_COMPLETION)
+ {
+ _ASSERTE (alertable);
+
+ if ((m_State & TS_Interrupted))
+ {
+ HandleThreadInterrupt(mode & WaitMode_ADUnload);
+ }
+ if (millis != INFINITE)
+ {
+ dwEnd = CLRGetTickCount64();
+ if (dwEnd >= dwStart + millis)
+ {
+ ret = WAIT_TIMEOUT;
+ goto WaitCompleted;
+ }
+ else
+ {
+ millis -= (DWORD)(dwEnd - dwStart);
+ }
+ }
+ goto retry;
+ }
+
+WaitCompleted:
+ _ASSERTE(ret == WAIT_OBJECT_0 ||
+ ret == WAIT_ABANDONED ||
+ ret == WAIT_TIMEOUT ||
+ ret == WAIT_FAILED);
+
+ _ASSERTE((ret != WAIT_TIMEOUT) || (millis != INFINITE));
+
+ return ret;
+}
+
+#ifndef FEATURE_CORECLR
+//--------------------------------------------------------------------
+// Only one style of wait for DoSignalAndWait since we don't support this on STA Threads
+//--------------------------------------------------------------------
+DWORD Thread::DoSignalAndWait(HANDLE *handles, DWORD millis, BOOL alertable, PendingSync *syncState)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+
+ _ASSERTE(alertable || syncState == 0);
+
+ struct Param
+ {
+ Thread *pThis;
+ HANDLE *handles;
+ DWORD millis;
+ BOOL alertable;
+ DWORD dwRet;
+ } param;
+ param.pThis = this;
+ param.handles = handles;
+ param.millis = millis;
+ param.alertable = alertable;
+ param.dwRet = (DWORD) -1;
+
+ EE_TRY_FOR_FINALLY(Param *, pParam, &param) {
+ pParam->dwRet = pParam->pThis->DoSignalAndWaitWorker(pParam->handles, pParam->millis, pParam->alertable);
+ }
+ EE_FINALLY {
+ if (syncState) {
+ if (!GOT_EXCEPTION() && WAIT_OBJECT_0 == param.dwRet) {
+ // This thread has been removed from syncblk waiting list by the signalling thread
+ syncState->Restore(FALSE);
+ }
+ else
+ syncState->Restore(TRUE);
+ }
+
+ _ASSERTE (WAIT_IO_COMPLETION != param.dwRet);
+ }
+ EE_END_FINALLY;
+
+ return(param.dwRet);
+}
+
+
+DWORD Thread::DoSignalAndWaitWorker(HANDLE* pHandles, DWORD millis,BOOL alertable)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ DWORD ret = 0;
+
+ GCX_PREEMP();
+
+ if(alertable)
+ {
+ DoAppropriateWaitWorkerAlertableHelper(WaitMode_None);
+ }
+
+ LeaveRuntimeHolder holder((size_t)WaitForMultipleObjectsEx);
+ StateHolder<MarkOSAlertableWait,UnMarkOSAlertableWait> OSAlertableWait(alertable);
+
+ ThreadStateHolder tsh(alertable, TS_Interruptible | TS_Interrupted);
+
+ ULONGLONG dwStart = 0, dwEnd;
+
+ if (INFINITE != millis)
+ {
+ dwStart = CLRGetTickCount64();
+ }
+
+ ret = SignalObjectAndWait(pHandles[0], pHandles[1], millis, alertable);
+
+retry:
+
+ if (WAIT_IO_COMPLETION == ret)
+ {
+ _ASSERTE (alertable);
+ // We could be woken by some spurious APC or an EE APC queued to
+ // interrupt us. In the latter case the TS_Interrupted bit will be set
+ // in the thread state bits. Otherwise we just go back to sleep again.
+ if ((m_State & TS_Interrupted))
+ {
+ HandleThreadInterrupt(FALSE);
+ }
+ if (INFINITE != millis)
+ {
+ dwEnd = CLRGetTickCount64();
+ if (dwStart + millis <= dwEnd)
+ {
+ ret = WAIT_TIMEOUT;
+ goto WaitCompleted;
+ }
+ else
+ {
+ millis -= (DWORD)(dwEnd - dwStart);
+ }
+ dwStart = CLRGetTickCount64();
+ }
+ //Retry case we don't want to signal again so only do the wait...
+ ret = WaitForSingleObjectEx(pHandles[1],millis,TRUE);
+ goto retry;
+ }
+
+ if (WAIT_FAILED == ret)
+ {
+ DWORD errorCode = ::GetLastError();
+ //If the handle to signal is a mutex and
+ // the calling thread is not the owner, errorCode is ERROR_NOT_OWNER
+
+ switch(errorCode)
+ {
+ case ERROR_INVALID_HANDLE:
+ case ERROR_NOT_OWNER:
+ case ERROR_ACCESS_DENIED:
+ COMPlusThrowWin32();
+ break;
+
+ case ERROR_TOO_MANY_POSTS:
+ ret = ERROR_TOO_MANY_POSTS;
+ break;
+
+ default:
+ CONSISTENCY_CHECK_MSGF(0, ("This errorCode is not understood '(%d)''\n", errorCode));
+ COMPlusThrowWin32();
+ break;
+ }
+ }
+
+WaitCompleted:
+
+ //Check that the return state is valid
+ _ASSERTE(WAIT_OBJECT_0 == ret ||
+ WAIT_ABANDONED == ret ||
+ WAIT_TIMEOUT == ret ||
+ WAIT_FAILED == ret ||
+ ERROR_TOO_MANY_POSTS == ret);
+
+ //Wrong to time out if the wait was infinite
+ _ASSERTE((WAIT_TIMEOUT != ret) || (INFINITE != millis));
+
+ return ret;
+}
+#endif // FEATURE_CORECLR
+
+#ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+DWORD Thread::DoSyncContextWait(OBJECTREF *pSyncCtxObj, int countHandles, HANDLE *handles, BOOL waitAll, DWORD millis)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(handles));
+ PRECONDITION(IsProtectedByGCFrame (pSyncCtxObj));
+ }
+ CONTRACTL_END;
+ MethodDescCallSite invokeWaitMethodHelper(METHOD__SYNCHRONIZATION_CONTEXT__INVOKE_WAIT_METHOD_HELPER);
+
+ BASEARRAYREF handleArrayObj = (BASEARRAYREF)AllocatePrimitiveArray(ELEMENT_TYPE_I, countHandles);
+ memcpyNoGCRefs(handleArrayObj->GetDataPtr(), handles, countHandles * sizeof(HANDLE));
+
+ ARG_SLOT args[6] =
+ {
+ ObjToArgSlot(*pSyncCtxObj),
+ ObjToArgSlot(handleArrayObj),
+ BoolToArgSlot(waitAll),
+ (ARG_SLOT)millis,
+ };
+
+ // Needed by TriggerGCForMDAInternal to avoid infinite recursion
+ ThreadStateNCStackHolder holder(TRUE, TSNC_InsideSyncContextWait);
+
+ return invokeWaitMethodHelper.Call_RetI4(args);
+}
+#endif // #ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+
+// Called out of SyncBlock::Wait() to block this thread until the Notify occurs.
+BOOL Thread::Block(INT32 timeOut, PendingSync *syncState)
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(this == GetThread());
+
+ // Before calling Block, the SyncBlock queued us onto it's list of waiting threads.
+ // However, before calling Block the SyncBlock temporarily left the synchronized
+ // region. This allowed threads to enter the region and call Notify, in which
+ // case we may have been signalled before we entered the Wait. So we aren't in the
+ // m_WaitSB list any longer. Not a problem: the following Wait will return
+ // immediately. But it means we cannot enforce the following assertion:
+// _ASSERTE(m_WaitSB != NULL);
+
+ return (Wait(syncState->m_WaitEventLink->m_Next->m_EventWait, timeOut, syncState) != WAIT_OBJECT_0);
+}
+
+
+// Return whether or not a timeout occured. TRUE=>we waited successfully
+DWORD Thread::Wait(HANDLE *objs, int cntObjs, INT32 timeOut, PendingSync *syncInfo)
+{
+ WRAPPER_NO_CONTRACT;
+
+ DWORD dwResult;
+ DWORD dwTimeOut32;
+
+ _ASSERTE(timeOut >= 0 || timeOut == INFINITE_TIMEOUT);
+
+ dwTimeOut32 = (timeOut == INFINITE_TIMEOUT
+ ? INFINITE
+ : (DWORD) timeOut);
+
+ dwResult = DoAppropriateWait(cntObjs, objs, FALSE /*=waitAll*/, dwTimeOut32,
+ WaitMode_Alertable /*alertable*/,
+ syncInfo);
+
+ // Either we succeeded in the wait, or we timed out
+ _ASSERTE((dwResult >= WAIT_OBJECT_0 && dwResult < (DWORD)(WAIT_OBJECT_0 + cntObjs)) ||
+ (dwResult == WAIT_TIMEOUT));
+
+ return dwResult;
+}
+
+// Return whether or not a timeout occured. TRUE=>we waited successfully
+DWORD Thread::Wait(CLREvent *pEvent, INT32 timeOut, PendingSync *syncInfo)
+{
+ WRAPPER_NO_CONTRACT;
+
+ DWORD dwResult;
+ DWORD dwTimeOut32;
+
+ _ASSERTE(timeOut >= 0 || timeOut == INFINITE_TIMEOUT);
+
+ dwTimeOut32 = (timeOut == INFINITE_TIMEOUT
+ ? INFINITE
+ : (DWORD) timeOut);
+
+ dwResult = pEvent->Wait(dwTimeOut32, TRUE /*alertable*/, syncInfo);
+
+ // Either we succeeded in the wait, or we timed out
+ _ASSERTE((dwResult == WAIT_OBJECT_0) ||
+ (dwResult == WAIT_TIMEOUT));
+
+ return dwResult;
+}
+
+void Thread::Wake(SyncBlock *psb)
+{
+ WRAPPER_NO_CONTRACT;
+
+ CLREvent* hEvent = NULL;
+ WaitEventLink *walk = &m_WaitEventLink;
+ while (walk->m_Next) {
+ if (walk->m_Next->m_WaitSB == psb) {
+ hEvent = walk->m_Next->m_EventWait;
+ // We are guaranteed that only one thread can change walk->m_Next->m_WaitSB
+ // since the thread is helding the syncblock.
+ walk->m_Next->m_WaitSB = (SyncBlock*)((DWORD_PTR)walk->m_Next->m_WaitSB | 1);
+ break;
+ }
+#ifdef _DEBUG
+ else if ((SyncBlock*)((DWORD_PTR)walk->m_Next & ~1) == psb) {
+ _ASSERTE (!"Can not wake a thread on the same SyncBlock more than once");
+ }
+#endif
+ }
+ PREFIX_ASSUME (hEvent != NULL);
+ hEvent->Set();
+}
+
+#define WAIT_INTERRUPT_THREADABORT 0x1
+#define WAIT_INTERRUPT_INTERRUPT 0x2
+#define WAIT_INTERRUPT_OTHEREXCEPTION 0x4
+
+// When we restore
+DWORD EnterMonitorForRestore(SyncBlock *pSB)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ DWORD state = 0;
+ EX_TRY
+ {
+ pSB->EnterMonitor();
+ }
+ EX_CATCH
+ {
+ // Assume it is a normal exception unless proven.
+ state = WAIT_INTERRUPT_OTHEREXCEPTION;
+ Thread *pThread = GetThread();
+ if (pThread->IsAbortInitiated())
+ {
+ state = WAIT_INTERRUPT_THREADABORT;
+ }
+ else if (__pException != NULL)
+ {
+ if (__pException->GetHR() == COR_E_THREADINTERRUPTED)
+ {
+ state = WAIT_INTERRUPT_INTERRUPT;
+ }
+ }
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return state;
+}
+
+// This is the service that backs us out of a wait that we interrupted. We must
+// re-enter the monitor to the same extent the SyncBlock would, if we returned
+// through it (instead of throwing through it). And we need to cancel the wait,
+// if it didn't get notified away while we are processing the interrupt.
+void PendingSync::Restore(BOOL bRemoveFromSB)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_EnterCount);
+
+ Thread *pCurThread = GetThread();
+
+ _ASSERTE (pCurThread == m_OwnerThread);
+
+ WaitEventLink *pRealWaitEventLink = m_WaitEventLink->m_Next;
+
+ pRealWaitEventLink->m_RefCount --;
+ if (pRealWaitEventLink->m_RefCount == 0)
+ {
+ if (bRemoveFromSB) {
+ ThreadQueue::RemoveThread(pCurThread, pRealWaitEventLink->m_WaitSB);
+ }
+ if (pRealWaitEventLink->m_EventWait != &pCurThread->m_EventWait) {
+ // Put the event back to the pool.
+ StoreEventToEventStore(pRealWaitEventLink->m_EventWait);
+ }
+ // Remove from the link.
+ m_WaitEventLink->m_Next = m_WaitEventLink->m_Next->m_Next;
+ }
+
+ // Someone up the stack is responsible for keeping the syncblock alive by protecting
+ // the object that owns it. But this relies on assertions that EnterMonitor is only
+ // called in cooperative mode. Even though we are safe in preemptive, do the
+ // switch.
+ GCX_COOP_THREAD_EXISTS(pCurThread);
+ // We need to make sure that EnterMonitor succeeds. We may have code like
+ // lock (a)
+ // {
+ // a.Wait
+ // }
+ // We need to make sure that the finally from lock is excuted with the lock owned.
+ DWORD state = 0;
+ SyncBlock *psb = (SyncBlock*)((DWORD_PTR)pRealWaitEventLink->m_WaitSB & ~1);
+ for (LONG i=0; i < m_EnterCount;)
+ {
+ if ((state & (WAIT_INTERRUPT_THREADABORT | WAIT_INTERRUPT_INTERRUPT)) != 0)
+ {
+ // If the thread has been interrupted by Thread.Interrupt or Thread.Abort,
+ // disable the check at the beginning of DoAppropriateWait
+ pCurThread->SetThreadStateNC(Thread::TSNC_InRestoringSyncBlock);
+ }
+ DWORD result = EnterMonitorForRestore(psb);
+ if (result == 0)
+ {
+ i++;
+ }
+ else
+ {
+ // We block the thread until the thread acquires the lock.
+ // This is to make sure that when catch/finally is executed, the thread has the lock.
+ // We do not want thread to run its catch/finally if the lock is not taken.
+ state |= result;
+
+ // If the thread is being rudely aborted, and the thread has
+ // no Cer on stack, we will not run managed code to release the
+ // lock, so we can terminate the loop.
+ if (pCurThread->IsRudeAbortInitiated() &&
+ !pCurThread->IsExecutingWithinCer())
+ {
+ break;
+ }
+ }
+ }
+
+ pCurThread->ResetThreadStateNC(Thread::TSNC_InRestoringSyncBlock);
+
+ if ((state & WAIT_INTERRUPT_THREADABORT) != 0)
+ {
+ pCurThread->HandleThreadAbort();
+ }
+ else if ((state & WAIT_INTERRUPT_INTERRUPT) != 0)
+ {
+ COMPlusThrow(kThreadInterruptedException);
+ }
+}
+
+
+
+// This is the callback from the OS, when we queue an APC to interrupt a waiting thread.
+// The callback occurs on the thread we wish to interrupt. It is a STATIC method.
+void __stdcall Thread::UserInterruptAPC(ULONG_PTR data)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(data == APC_Code);
+
+ Thread *pCurThread = GetThread();
+ if (pCurThread)
+ {
+ // We should only take action if an interrupt is currently being
+ // requested (our synchronization does not guarantee that we won't fire
+ // spuriously). It's safe to check the m_UserInterrupt field and then
+ // set TS_Interrupted in a non-atomic fashion because m_UserInterrupt is
+ // only cleared in this thread's context (though it may be set from any
+ // context).
+ if (pCurThread->IsUserInterrupted())
+ {
+ // Set bit to indicate this routine was called (as opposed to other
+ // generic APCs).
+ FastInterlockOr((ULONG *) &pCurThread->m_State, TS_Interrupted);
+ }
+ }
+}
+
+// This is the workhorse for Thread.Interrupt().
+void Thread::UserInterrupt(ThreadInterruptMode mode)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ FastInterlockOr((DWORD*)&m_UserInterrupt, mode);
+
+ if (HasValidThreadHandle() &&
+ HasThreadState (TS_Interruptible))
+ {
+#ifdef _DEBUG
+ AddFiberInfo(ThreadTrackInfo_Abort);
+#endif
+ Alert();
+ }
+}
+
+// Implementation of Thread.Sleep().
+void Thread::UserSleep(INT32 time)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ INCONTRACT(_ASSERTE(!GetThread()->GCNoTrigger()));
+
+ DWORD res;
+
+ // Before going to pre-emptive mode the thread needs to be flagged as waiting for
+ // the debugger. This used to be accomplished by the TS_Interruptible flag but that
+ // doesn't work reliably, see DevDiv Bugs 699245.
+ ThreadStateNCStackHolder tsNC(TRUE, TSNC_DebuggerSleepWaitJoin);
+ GCX_PREEMP();
+
+ // A word about ordering for Interrupt. If someone tries to interrupt a thread
+ // that's in the interruptible state, we queue an APC. But if they try to interrupt
+ // a thread that's not in the interruptible state, we just record that fact. So
+ // we have to set TS_Interruptible before we test to see whether someone wants to
+ // interrupt us or else we have a race condition that causes us to skip the APC.
+ FastInterlockOr((ULONG *) &m_State, TS_Interruptible);
+
+ // If someone has interrupted us, we should not enter the wait.
+ if (IsUserInterrupted())
+ {
+ HandleThreadInterrupt(FALSE);
+ }
+
+ ThreadStateHolder tsh(TRUE, TS_Interruptible | TS_Interrupted);
+
+ FastInterlockAnd((ULONG *) &m_State, ~TS_Interrupted);
+
+ DWORD dwTime = (DWORD)time;
+retry:
+
+ ULONGLONG start = CLRGetTickCount64();
+
+ res = ClrSleepEx (dwTime, TRUE);
+
+ if (res == WAIT_IO_COMPLETION)
+ {
+ // We could be woken by some spurious APC or an EE APC queued to
+ // interrupt us. In the latter case the TS_Interrupted bit will be set
+ // in the thread state bits. Otherwise we just go back to sleep again.
+ if ((m_State & TS_Interrupted))
+ {
+ HandleThreadInterrupt(FALSE);
+ }
+
+ if (dwTime == INFINITE)
+ {
+ goto retry;
+ }
+ else
+ {
+ ULONGLONG actDuration = CLRGetTickCount64() - start;
+
+ if (dwTime > actDuration)
+ {
+ dwTime -= (DWORD)actDuration;
+ goto retry;
+ }
+ else
+ {
+ res = WAIT_TIMEOUT;
+ }
+ }
+ }
+ _ASSERTE(res == WAIT_TIMEOUT || res == WAIT_OBJECT_0);
+}
+
+
+// Correspondence between an EE Thread and an exposed System.Thread:
+OBJECTREF Thread::GetExposedObject()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ TRIGGERSGC();
+
+ Thread *pCurThread = GetThread();
+ _ASSERTE (!(pCurThread == NULL || IsAtProcessExit()));
+
+ _ASSERTE(pCurThread->PreemptiveGCDisabled());
+
+ if (ObjectFromHandle(m_ExposedObject) == NULL)
+ {
+ // Allocate the exposed thread object.
+ THREADBASEREF attempt = (THREADBASEREF) AllocateObject(g_pThreadClass);
+ GCPROTECT_BEGIN(attempt);
+
+ // The exposed object keeps us alive until it is GC'ed. This
+ // doesn't mean the physical thread continues to run, of course.
+ // We have to set this outside of the ThreadStore lock, because this might trigger a GC.
+ attempt->SetInternal(this);
+
+ BOOL fNeedThreadStore = (! ThreadStore::HoldingThreadStore(pCurThread));
+ // Take a lock to make sure that only one thread creates the object.
+ ThreadStoreLockHolder tsHolder(fNeedThreadStore);
+
+ // Check to see if another thread has not already created the exposed object.
+ if (ObjectFromHandle(m_ExposedObject) == NULL)
+ {
+ // Keep a weak reference to the exposed object.
+ StoreObjectInHandle(m_ExposedObject, (OBJECTREF) attempt);
+
+ ObjectInHandleHolder exposedHolder(m_ExposedObject);
+
+ // Increase the external ref count. We can't call IncExternalCount because we
+ // already hold the thread lock and IncExternalCount won't be able to take it.
+ ULONG retVal = FastInterlockIncrement ((LONG*)&m_ExternalRefCount);
+
+#ifdef _DEBUG
+ AddFiberInfo(ThreadTrackInfo_Lifetime);
+#endif
+ // Check to see if we need to store a strong pointer to the object.
+ if (retVal > 1)
+ StoreObjectInHandle(m_StrongHndToExposedObject, (OBJECTREF) attempt);
+
+ ObjectInHandleHolder strongHolder(m_StrongHndToExposedObject);
+
+
+ attempt->SetManagedThreadId(GetThreadId());
+
+
+ // Note that we are NOT calling the constructor on the Thread. That's
+ // because this is an internal create where we don't want a Start
+ // address. And we don't want to expose such a constructor for our
+ // customers to accidentally call. The following is in lieu of a true
+ // constructor:
+ attempt->InitExisting();
+
+ exposedHolder.SuppressRelease();
+ strongHolder.SuppressRelease();
+ }
+ else
+ {
+ attempt->ClearInternal();
+ }
+
+ GCPROTECT_END();
+ }
+ return ObjectFromHandle(m_ExposedObject);
+}
+
+
+// We only set non NULL exposed objects for unstarted threads that haven't exited
+// their constructor yet. So there are no race conditions.
+void Thread::SetExposedObject(OBJECTREF exposed)
+{
+ CONTRACTL {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+ if (exposed != NULL)
+ {
+ _ASSERTE (GetThread() != this);
+ _ASSERTE(IsUnstarted());
+ _ASSERTE(ObjectFromHandle(m_ExposedObject) == NULL);
+ // The exposed object keeps us alive until it is GC'ed. This doesn't mean the
+ // physical thread continues to run, of course.
+ StoreObjectInHandle(m_ExposedObject, exposed);
+ // This makes sure the contexts on the backing thread
+ // and the managed thread start off in sync with each other.
+#ifdef FEATURE_REMOTING
+ _ASSERTE(m_Context);
+ ((THREADBASEREF)exposed)->SetExposedContext(m_Context->GetExposedObjectRaw());
+#endif
+ // BEWARE: the IncExternalCount call below may cause GC to happen.
+
+ // IncExternalCount will store exposed in m_StrongHndToExposedObject which is in default domain.
+ // If the creating thread is killed before the target thread is killed in Thread.Start, Thread object
+ // will be kept alive forever.
+ // Instead, IncExternalCount should be called after the target thread has been started in Thread.Start.
+ // IncExternalCount();
+ }
+ else
+ {
+ // Simply set both of the handles to NULL. The GC of the old exposed thread
+ // object will take care of decrementing the external ref count.
+ StoreObjectInHandle(m_ExposedObject, NULL);
+ StoreObjectInHandle(m_StrongHndToExposedObject, NULL);
+ }
+}
+
+void Thread::SetLastThrownObject(OBJECTREF throwable, BOOL isUnhandled)
+{
+ CONTRACTL
+ {
+ if ((throwable == NULL) || CLRException::IsPreallocatedExceptionObject(throwable)) NOTHROW; else THROWS; // From CreateHandle
+ GC_NOTRIGGER;
+ if (throwable == NULL) MODE_ANY; else MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ STRESS_LOG_COND1(LF_EH, LL_INFO100, OBJECTREFToObject(throwable) != NULL, "in Thread::SetLastThrownObject: obj = %p\n", OBJECTREFToObject(throwable));
+
+ // you can't have a NULL unhandled exception
+ _ASSERTE(!(throwable == NULL && isUnhandled));
+
+ if (m_LastThrownObjectHandle != NULL)
+ {
+ // We'll somtimes use a handle for a preallocated exception object. We should never, ever destroy one of
+ // these handles... they'll be destroyed when the Runtime shuts down.
+ if (!CLRException::IsPreallocatedExceptionHandle(m_LastThrownObjectHandle))
+ {
+ DestroyHandle(m_LastThrownObjectHandle);
+ }
+
+ m_LastThrownObjectHandle = NULL; // Make sure to set this to NULL here just in case we throw trying to make
+ // a new handle below.
+ }
+
+ if (throwable != NULL)
+ {
+ _ASSERTE(this == GetThread());
+
+ // Non-compliant exceptions are always wrapped.
+ // The use of the ExceptionNative:: helper here (rather than the global ::IsException helper)
+ // is hokey, but we need a GC_NOTRIGGER version and it's only for an ASSERT.
+ _ASSERTE(IsException(throwable->GetMethodTable()));
+
+ // If we're tracking one of the preallocated exception objects, then just use the global handle that
+ // matches it rather than creating a new one.
+ if (CLRException::IsPreallocatedExceptionObject(throwable))
+ {
+ m_LastThrownObjectHandle = CLRException::GetPreallocatedHandleForObject(throwable);
+ }
+ else
+ {
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+ {
+ m_LastThrownObjectHandle = GetDomain()->CreateHandle(throwable);
+ }
+ END_SO_INTOLERANT_CODE;
+ }
+
+ _ASSERTE(m_LastThrownObjectHandle != NULL);
+ m_ltoIsUnhandled = isUnhandled;
+ }
+ else
+ {
+ m_ltoIsUnhandled = FALSE;
+ }
+}
+
+void Thread::SetSOForLastThrownObject()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+
+ // If we are saving stack overflow exception, we can just null out the current handle.
+ // The current domain is going to be unloaded or the process is going to be killed, so
+ // we will not leak a handle.
+ m_LastThrownObjectHandle = CLRException::GetPreallocatedStackOverflowExceptionHandle();
+}
+
+//
+// This is a nice wrapper for SetLastThrownObject which catches any exceptions caused by not being able to create
+// the handle for the throwable, and setting the last thrown object to the preallocated out of memory exception
+// instead.
+//
+OBJECTREF Thread::SafeSetLastThrownObject(OBJECTREF throwable)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ if (throwable == NULL) MODE_ANY; else MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // We return the original throwable if nothing goes wrong.
+ OBJECTREF ret = throwable;
+
+ EX_TRY
+ {
+ // Try to set the throwable.
+ SetLastThrownObject(throwable);
+ }
+ EX_CATCH
+ {
+ // If it didn't work, then set the last thrown object to the preallocated OOM exception, and return that
+ // object instead of the original throwable.
+ ret = CLRException::GetPreallocatedOutOfMemoryException();
+ SetLastThrownObject(ret);
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return ret;
+}
+
+//
+// This is a nice wrapper for SetThrowable and SetLastThrownObject, which catches any exceptions caused by not
+// being able to create the handle for the throwable, and sets the throwable to the preallocated out of memory
+// exception instead. It also updates the last thrown object, which is always updated when the throwable is
+// updated.
+//
+OBJECTREF Thread::SafeSetThrowables(OBJECTREF throwable DEBUG_ARG(ThreadExceptionState::SetThrowableErrorChecking stecFlags),
+ BOOL isUnhandled)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ if (throwable == NULL) MODE_ANY; else MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // We return the original throwable if nothing goes wrong.
+ OBJECTREF ret = throwable;
+
+ EX_TRY
+ {
+ // Try to set the throwable.
+ SetThrowable(throwable DEBUG_ARG(stecFlags));
+
+ // Now, if the last thrown object is different, go ahead and update it. This makes sure that we re-throw
+ // the right object when we rethrow.
+ if (LastThrownObject() != throwable)
+ {
+ SetLastThrownObject(throwable);
+ }
+
+ if (isUnhandled)
+ {
+ MarkLastThrownObjectUnhandled();
+ }
+ }
+ EX_CATCH
+ {
+ // If either set didn't work, then set both throwables to the preallocated OOM exception, and return that
+ // object instead of the original throwable.
+ ret = CLRException::GetPreallocatedOutOfMemoryException();
+
+ // Neither of these will throw because we're setting with a preallocated exception.
+ SetThrowable(ret DEBUG_ARG(stecFlags));
+ SetLastThrownObject(ret, isUnhandled);
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+
+ return ret;
+}
+
+void Thread::SetLastThrownObjectHandle(OBJECTHANDLE h)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (m_LastThrownObjectHandle != NULL &&
+ !CLRException::IsPreallocatedExceptionHandle(m_LastThrownObjectHandle))
+ {
+ DestroyHandle(m_LastThrownObjectHandle);
+ }
+
+ m_LastThrownObjectHandle = h;
+}
+
+//
+// Create a duplicate handle of the current throwable and set the last thrown object to that. This ensures that the
+// last thrown object and the current throwable have handles that are in the same app domain.
+//
+void Thread::SafeUpdateLastThrownObject(void)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ OBJECTHANDLE hThrowable = GetThrowableAsHandle();
+
+ if (hThrowable != NULL)
+ {
+ EX_TRY
+ {
+ // Using CreateDuplicateHandle here ensures that the AD of the last thrown object matches the domain of
+ // the current throwable.
+ SetLastThrownObjectHandle(CreateDuplicateHandle(hThrowable));
+ }
+ EX_CATCH
+ {
+ // If we can't create a duplicate handle, we set both throwables to the preallocated OOM exception.
+ SafeSetThrowables(CLRException::GetPreallocatedOutOfMemoryException());
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+}
+
+// Background threads must be counted, because the EE should shut down when the
+// last non-background thread terminates. But we only count running ones.
+void Thread::SetBackground(BOOL isBack, BOOL bRequiresTSL)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ // booleanize IsBackground() which just returns bits
+ if (isBack == !!IsBackground())
+ return;
+
+ LOG((LF_SYNC, INFO3, "SetBackground obtain lock\n"));
+ ThreadStoreLockHolder TSLockHolder(FALSE);
+ if (bRequiresTSL)
+ {
+ TSLockHolder.Acquire();
+ }
+
+ if (IsDead())
+ {
+ // This can only happen in a race condition, where the correct thing to do
+ // is ignore it. If it happens without the race condition, we throw an
+ // exception.
+ }
+ else
+ if (isBack)
+ {
+ if (!IsBackground())
+ {
+ FastInterlockOr((ULONG *) &m_State, TS_Background);
+
+ // unstarted threads don't contribute to the background count
+ if (!IsUnstarted())
+ ThreadStore::s_pThreadStore->m_BackgroundThreadCount++;
+
+ // If we put the main thread into a wait, until only background threads exist,
+ // then we make that
+ // main thread a background thread. This cleanly handles the case where it
+ // may or may not be one as it enters the wait.
+
+ // One of the components of OtherThreadsComplete() has changed, so check whether
+ // we should now exit the EE.
+ ThreadStore::CheckForEEShutdown();
+ }
+ }
+ else
+ {
+ if (IsBackground())
+ {
+ FastInterlockAnd((ULONG *) &m_State, ~TS_Background);
+
+ // unstarted threads don't contribute to the background count
+ if (!IsUnstarted())
+ ThreadStore::s_pThreadStore->m_BackgroundThreadCount--;
+
+ _ASSERTE(ThreadStore::s_pThreadStore->m_BackgroundThreadCount >= 0);
+ _ASSERTE(ThreadStore::s_pThreadStore->m_BackgroundThreadCount <=
+ ThreadStore::s_pThreadStore->m_ThreadCount);
+ }
+ }
+
+ if (bRequiresTSL)
+ {
+ TSLockHolder.Release();
+ }
+}
+
+#ifdef FEATURE_COMINTEROP
+class ApartmentSpyImpl : public IUnknownCommon<IInitializeSpy>
+{
+
+public:
+ HRESULT STDMETHODCALLTYPE PreInitialize(DWORD dwCoInit, DWORD dwCurThreadAptRefs)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return S_OK;
+ }
+
+ HRESULT STDMETHODCALLTYPE PostInitialize(HRESULT hrCoInit, DWORD dwCoInit, DWORD dwNewThreadAptRefs)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return hrCoInit; // this HRESULT will be returned from CoInitialize(Ex)
+ }
+
+ HRESULT STDMETHODCALLTYPE PreUninitialize(DWORD dwCurThreadAptRefs)
+ {
+ // Don't assume that Thread exists and do not create it.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_PREEMPTIVE;
+
+ HRESULT hr = S_OK;
+
+ if (dwCurThreadAptRefs == 1 && !g_fEEShutDown)
+ {
+ // This is the last CoUninitialize on this thread and the CLR is still running. If it's an STA
+ // we take the opportunity to perform COM/WinRT cleanup now, when the apartment is still alive.
+
+ Thread *pThread = GetThreadNULLOk();
+ if (pThread != NULL)
+ {
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ if (pThread->GetFinalApartment() == Thread::AS_InSTA)
+ {
+ // This will release RCWs and purge the WinRT factory cache on all AppDomains. It
+ // will also synchronize with the finalizer thread which ensures that the RCWs
+ // that were already in the global RCW cleanup list will be cleaned up as well.
+ //
+ ReleaseRCWsInCachesNoThrow(GetCurrentCtxCookie());
+ }
+ }
+ END_EXTERNAL_ENTRYPOINT;
+ }
+ }
+ return hr;
+ }
+
+ HRESULT STDMETHODCALLTYPE PostUninitialize(DWORD dwNewThreadAptRefs)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return S_OK;
+ }
+};
+#endif // FEATURE_COMINTEROP
+
+// When the thread starts running, make sure it is running in the correct apartment
+// and context.
+BOOL Thread::PrepareApartmentAndContext()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ m_OSThreadId = ::GetCurrentThreadId();
+
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+ // Be very careful in here because we haven't set up e.g. TLS yet.
+
+ if (m_State & (TS_InSTA | TS_InMTA))
+ {
+ // Make sure TS_InSTA and TS_InMTA aren't both set.
+ _ASSERTE(!((m_State & TS_InSTA) && (m_State & TS_InMTA)));
+
+ // Determine the apartment state to set based on the requested state.
+ ApartmentState aState = m_State & TS_InSTA ? AS_InSTA : AS_InMTA;
+
+ // Clear the requested apartment state from the thread. This is requested since
+ // the thread might actually be a fiber that has already been initialized to
+ // a different apartment state than the requested one. If we didn't clear
+ // the requested apartment state, then we could end up with both TS_InSTA and
+ // TS_InMTA set at the same time.
+ FastInterlockAnd ((ULONG *) &m_State, ~TS_InSTA & ~TS_InMTA);
+
+ // Attempt to set the requested apartment state.
+ SetApartment(aState, FALSE);
+ }
+
+ // In the case where we own the thread and we have switched it to a different
+ // starting context, it is the responsibility of the caller (KickOffThread())
+ // to notice that the context changed, and to adjust the delegate that it will
+ // dispatch on, as appropriate.
+#endif //FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+#ifdef FEATURE_COMINTEROP
+ // Our IInitializeSpy will be registered in AppX always, in classic processes
+ // only if the internal config switch is on.
+ if (AppX::IsAppXProcess() || g_pConfig->EnableRCWCleanupOnSTAShutdown())
+ {
+ NewHolder<ApartmentSpyImpl> pSpyImpl = new ApartmentSpyImpl();
+
+ IfFailThrow(CoRegisterInitializeSpy(pSpyImpl, &m_uliInitializeSpyCookie));
+ pSpyImpl.SuppressRelease();
+
+ m_fInitializeSpyRegistered = true;
+ }
+#endif // FEATURE_COMINTEROP
+
+ return TRUE;
+}
+
+
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+// TS_InSTA (0x00004000) -> AS_InSTA (0)
+// TS_InMTA (0x00008000) -> AS_InMTA (1)
+#define TS_TO_AS(ts) \
+ (Thread::ApartmentState)((((DWORD)ts) >> 14) - 1) \
+
+// Retrieve the apartment state of the current thread. There are three possible
+// states: thread hosts an STA, thread is part of the MTA or thread state is
+// undecided. The last state may indicate that the apartment has not been set at
+// all (nobody has called CoInitializeEx) or that the EE does not know the
+// current state (EE has not called CoInitializeEx).
+Thread::ApartmentState Thread::GetApartment()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ApartmentState as = AS_Unknown;
+ ThreadState maskedTs = (ThreadState)(((DWORD)m_State) & (TS_InSTA|TS_InMTA));
+ if (maskedTs)
+ {
+ _ASSERTE((maskedTs == TS_InSTA) || (maskedTs == TS_InMTA));
+ static_assert_no_msg(TS_TO_AS(TS_InSTA) == AS_InSTA);
+ static_assert_no_msg(TS_TO_AS(TS_InMTA) == AS_InMTA);
+
+ as = TS_TO_AS(maskedTs);
+ }
+
+ if (
+#ifdef MDA_SUPPORTED
+ (NULL == MDA_GET_ASSISTANT(InvalidApartmentStateChange)) &&
+#endif
+ (as != AS_Unknown))
+ {
+ return as;
+ }
+
+ return GetApartmentRare(as);
+}
+
+Thread::ApartmentState Thread::GetApartmentRare(Thread::ApartmentState as)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (this == GetThread())
+ {
+ THDTYPE type;
+ HRESULT hr = S_OK;
+
+#ifdef MDA_SUPPORTED
+ MdaInvalidApartmentStateChange* pProbe = MDA_GET_ASSISTANT(InvalidApartmentStateChange);
+ if (pProbe)
+ {
+ // Without notifications from OLE32, we cannot know when the apartment state of a
+ // thread changes. But we have cached this fact and depend on it for all our
+ // blocking and COM Interop behavior to work correctly. Using the CDH, log that it
+ // is not changing underneath us, on those platforms where it is relatively cheap for
+ // us to do so.
+ if (as != AS_Unknown)
+ {
+ hr = GetCurrentThreadTypeNT5(&type);
+ if (hr == S_OK)
+ {
+ if (type == THDTYPE_PROCESSMESSAGES && as == AS_InMTA)
+ {
+ pProbe->ReportViolation(this, as, FALSE);
+ }
+ else if (type == THDTYPE_BLOCKMESSAGES && as == AS_InSTA)
+ {
+ pProbe->ReportViolation(this, as, FALSE);
+ }
+ }
+ }
+ }
+#endif
+
+ if (as == AS_Unknown)
+ {
+ hr = GetCurrentThreadTypeNT5(&type);
+ if (hr == S_OK)
+ {
+ as = (type == THDTYPE_PROCESSMESSAGES) ? AS_InSTA : AS_InMTA;
+
+ // If we get back THDTYPE_PROCESSMESSAGES, we are guaranteed to
+ // be an STA thread. If not, we are an MTA thread, however
+ // we can't know if the thread has been explicitly set to MTA
+ // (via a call to CoInitializeEx) or if it has been implicitly
+ // made MTA (if it hasn't been CoInitializeEx'd but CoInitialize
+ // has already been called on some other thread in the process.
+ if (as == AS_InSTA)
+ FastInterlockOr((ULONG *) &m_State, AS_InSTA);
+ }
+ }
+ }
+
+ return as;
+}
+
+
+// Retrieve the explicit apartment state of the current thread. There are three possible
+// states: thread hosts an STA, thread is part of the MTA or thread state is
+// undecided. The last state may indicate that the apartment has not been set at
+// all (nobody has called CoInitializeEx), the EE does not know the
+// current state (EE has not called CoInitializeEx), or the thread is implicitly in
+// the MTA.
+Thread::ApartmentState Thread::GetExplicitApartment()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!((m_State & TS_InSTA) && (m_State & TS_InMTA)));
+
+ // Initialize m_State by calling GetApartment.
+ GetApartment();
+
+ ApartmentState as = (m_State & TS_InSTA) ? AS_InSTA :
+ (m_State & TS_InMTA) ? AS_InMTA :
+ AS_Unknown;
+
+ return as;
+}
+
+
+Thread::ApartmentState Thread::GetFinalApartment()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(this == GetThread());
+
+ ApartmentState as = AS_Unknown;
+ if (g_fEEShutDown)
+ {
+ // On shutdown, do not use cached value. Someone might have called
+ // CoUninitialize.
+ FastInterlockAnd ((ULONG *) &m_State, ~TS_InSTA & ~TS_InMTA);
+ }
+
+ as = GetApartment();
+ if (as == AS_Unknown)
+ {
+ // On Win2k and above, GetApartment will only return AS_Unknown if CoInitialize
+ // hasn't been called in the process. In that case we can simply assume MTA. However we
+ // cannot cache this value in the Thread because if a CoInitialize does occur, then the
+ // thread state might change.
+ as = AS_InMTA;
+ }
+
+ return as;
+}
+
+// when we get apartment tear-down notification,
+// we want reset the apartment state we cache on the thread
+VOID Thread::ResetApartment()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // reset the TS_InSTA bit and TS_InMTA bit
+ ThreadState t_State = (ThreadState)(~(TS_InSTA | TS_InMTA));
+ FastInterlockAnd((ULONG *) &m_State, t_State);
+}
+
+// Attempt to set current thread's apartment state. The actual apartment state
+// achieved is returned and may differ from the input state if someone managed
+// to call CoInitializeEx on this thread first (note that calls to SetApartment
+// made before the thread has started are guaranteed to succeed).
+// The fFireMDAOnMismatch indicates if we should fire the apartment state probe
+// on an apartment state mismatch.
+Thread::ApartmentState Thread::SetApartment(ApartmentState state, BOOL fFireMDAOnMismatch)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // Reset any bits that request for CoInitialize
+ ResetRequiresCoInitialize();
+
+ // Setting the state to AS_Unknown indicates we should CoUninitialize
+ // the thread.
+ if (state == AS_Unknown)
+ {
+ BOOL needUninitialize = (m_State & TS_CoInitialized)
+#ifdef FEATURE_COMINTEROP
+ || IsWinRTInitialized()
+#endif // FEATURE_COMINTEROP
+ ;
+
+ if (needUninitialize)
+ {
+ GCX_PREEMP();
+
+ // If we haven't CoInitialized the thread, then we don't have anything to do.
+ if (m_State & TS_CoInitialized)
+ {
+ // We should never be attempting to CoUninitialize another thread than
+ // the currently running thread.
+ _ASSERTE(m_OSThreadId == ::GetCurrentThreadId());
+
+ // CoUninitialize the thread and reset the STA/MTA/CoInitialized state bits.
+ ::CoUninitialize();
+
+ ThreadState uninitialized = static_cast<ThreadState>(TS_InSTA | TS_InMTA | TS_CoInitialized);
+ FastInterlockAnd((ULONG *) &m_State, ~uninitialized);
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if (IsWinRTInitialized())
+ {
+ _ASSERTE(WinRTSupported());
+ BaseWinRTUninitialize();
+ ResetWinRTInitialized();
+ }
+#endif // FEATURE_COMINTEROP
+ }
+ return GetApartment();
+ }
+
+ // Call GetApartment to initialize the current apartment state.
+ //
+ // Important note: For Win2k and above this can return AS_InMTA even if the current
+ // thread has never been CoInitialized. Because of this we MUST NOT look at the
+ // return value of GetApartment here. We can however look at the m_State flags
+ // since these will only be set to TS_InMTA if we know for a fact the the
+ // current thread has explicitly been made MTA (via a call to CoInitializeEx).
+ GetApartment();
+
+ // If the current thread is STA, then it is impossible to change it to
+ // MTA.
+ if (m_State & TS_InSTA)
+ {
+#ifdef MDA_SUPPORTED
+ if (state == AS_InMTA && fFireMDAOnMismatch)
+ {
+ MDA_TRIGGER_ASSISTANT(InvalidApartmentStateChange, ReportViolation(this, state, TRUE));
+ }
+#endif
+ return AS_InSTA;
+ }
+
+ // If the current thread is EXPLICITLY MTA, then it is impossible to change it to
+ // STA.
+ if (m_State & TS_InMTA)
+ {
+#ifdef MDA_SUPPORTED
+ if (state == AS_InSTA && fFireMDAOnMismatch)
+ {
+ MDA_TRIGGER_ASSISTANT(InvalidApartmentStateChange, ReportViolation(this, state, TRUE));
+ }
+#endif
+ return AS_InMTA;
+ }
+
+ // If the thread isn't even started yet, we mark the state bits without
+ // calling CoInitializeEx (since we're obviously not in the correct thread
+ // context yet). We'll retry this call when the thread is started.
+ // Don't use the TS_Unstarted state bit to check for this, it's cleared far
+ // too late in the day for us. Instead check whether we're in the correct
+ // thread context.
+ if (m_OSThreadId != ::GetCurrentThreadId())
+ {
+ FastInterlockOr((ULONG *) &m_State, (state == AS_InSTA) ? TS_InSTA : TS_InMTA);
+ return state;
+ }
+
+ HRESULT hr;
+ {
+ GCX_PREEMP();
+
+ // Attempt to set apartment by calling CoInitializeEx. This may fail if
+ // another caller (outside EE) beat us to it.
+ //
+ // Important note: When calling CoInitializeEx(COINIT_MULTITHREADED) on a
+ // thread that has never been CoInitialized, the return value will always
+ // be S_OK, even if another thread in the process has already been
+ // CoInitialized to MTA. However if the current thread has already been
+ // CoInitialized to MTA, then S_FALSE will be returned.
+ hr = ::CoInitializeEx(NULL, (state == AS_InSTA) ?
+ COINIT_APARTMENTTHREADED : COINIT_MULTITHREADED);
+ }
+
+ if (SUCCEEDED(hr))
+ {
+ ThreadState t_State = (state == AS_InSTA) ? TS_InSTA : TS_InMTA;
+
+ if (hr == S_OK)
+ {
+ // The thread has never been CoInitialized.
+ t_State = (ThreadState)(t_State | TS_CoInitialized);
+ }
+ else
+ {
+ _ASSERTE(hr == S_FALSE);
+
+ // If the thread has already been CoInitialized to the proper mode, then
+ // we don't want to leave an outstanding CoInit so we CoUninit.
+ {
+ GCX_PREEMP();
+ ::CoUninitialize();
+ }
+ }
+
+ // We succeeded in setting the apartment state to the requested state.
+ FastInterlockOr((ULONG *) &m_State, t_State);
+ }
+ else if (hr == RPC_E_CHANGED_MODE)
+ {
+ // We didn't manage to enforce the requested apartment state, but at least
+ // we can work out what the state is now. No need to actually do the CoInit --
+ // obviously someone else already took care of that.
+ FastInterlockOr((ULONG *) &m_State, ((state == AS_InSTA) ? TS_InMTA : TS_InSTA));
+
+#ifdef MDA_SUPPORTED
+ if (fFireMDAOnMismatch)
+ {
+ // Report via the customer debug helper that we failed to set the apartment type
+ // to the specified type.
+ MDA_TRIGGER_ASSISTANT(InvalidApartmentStateChange, ReportViolation(this, state, TRUE));
+ }
+#endif
+ }
+ else if (hr == E_OUTOFMEMORY)
+ {
+ COMPlusThrowOM();
+ }
+ else
+ {
+ _ASSERTE(!"Unexpected HRESULT returned from CoInitializeEx!");
+ }
+
+#ifdef FEATURE_COMINTEROP
+
+ // If WinRT is supported on this OS, also initialize it at the same time. Since WinRT sits on top of COM
+ // we need to make sure that it is initialized in the same threading mode as we just started COM itself
+ // with (or that we detected COM had already been started with).
+ if (WinRTSupported() && !IsWinRTInitialized())
+ {
+ GCX_PREEMP();
+
+ BOOL isSTA = m_State & TS_InSTA;
+ _ASSERTE(isSTA || (m_State & TS_InMTA));
+
+ HRESULT hrWinRT = RoInitialize(isSTA ? RO_INIT_SINGLETHREADED : RO_INIT_MULTITHREADED);
+
+ if (SUCCEEDED(hrWinRT))
+ {
+ if (hrWinRT == S_OK)
+ {
+ SetThreadStateNC(TSNC_WinRTInitialized);
+ }
+ else
+ {
+ _ASSERTE(hrWinRT == S_FALSE);
+
+ // If the thread has already been initialized, back it out. We may not
+ // always be able to call RoUninitialize on shutdown so if there's
+ // a way to avoid having to, we should take advantage of that.
+ RoUninitialize();
+ }
+ }
+ else if (hrWinRT == E_OUTOFMEMORY)
+ {
+ COMPlusThrowOM();
+ }
+ else
+ {
+ // We don't check for RPC_E_CHANGEDMODE, since we're using the mode that was read in by
+ // initializing COM above. COM and WinRT need to always be in the same mode, so we should never
+ // see that return code at this point.
+ _ASSERTE(!"Unexpected HRESULT From RoInitialize");
+ }
+ }
+
+ // Since we've just called CoInitialize, COM has effectively been started up.
+ // To ensure the CLR is aware of this, we need to call EnsureComStarted.
+ EnsureComStarted(FALSE);
+#endif // FEATURE_COMINTEROP
+
+ return GetApartment();
+}
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+
+//----------------------------------------------------------------------------
+//
+// ThreadStore Implementation
+//
+//----------------------------------------------------------------------------
+
+ThreadStore::ThreadStore()
+ : m_Crst(CrstThreadStore, (CrstFlags) (CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD)),
+ m_ThreadCount(0),
+ m_MaxThreadCount(0),
+ m_UnstartedThreadCount(0),
+ m_BackgroundThreadCount(0),
+ m_PendingThreadCount(0),
+ m_DeadThreadCount(0),
+ m_GuidCreated(FALSE),
+ m_HoldingThread(0)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ m_TerminationEvent.CreateManualEvent(FALSE);
+ _ASSERTE(m_TerminationEvent.IsValid());
+}
+
+
+void ThreadStore::InitThreadStore()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ s_pThreadStore = new ThreadStore;
+
+ g_pThinLockThreadIdDispenser = new IdDispenser();
+
+ ThreadSuspend::g_pGCSuspendEvent = new CLREvent();
+ ThreadSuspend::g_pGCSuspendEvent->CreateManualEvent(FALSE);
+
+#ifdef _DEBUG
+ Thread::MaxThreadRecord = EEConfig::GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_MaxThreadRecord,Thread::MaxThreadRecord);
+ Thread::MaxStackDepth = EEConfig::GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_MaxStackDepth,Thread::MaxStackDepth);
+ if (Thread::MaxStackDepth > 100) {
+ Thread::MaxStackDepth = 100;
+ }
+#endif
+
+ s_pWaitForStackCrawlEvent = new CLREvent();
+ s_pWaitForStackCrawlEvent->CreateManualEvent(FALSE);
+}
+
+// Enter and leave the critical section around the thread store. Clients should
+// use LockThreadStore and UnlockThreadStore because ThreadStore lock has
+// additional semantics well beyond a normal lock.
+DEBUG_NOINLINE void ThreadStore::Enter()
+{
+ WRAPPER_NO_CONTRACT;
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+ CHECK_ONE_STORE();
+ m_Crst.Enter();
+
+ // Threadstore needs special shutdown handling.
+ if (g_fSuspendOnShutdown)
+ {
+ m_Crst.ReleaseAndBlockForShutdownIfNotSpecialThread();
+ }
+}
+
+DEBUG_NOINLINE void ThreadStore::Leave()
+{
+ WRAPPER_NO_CONTRACT;
+ ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
+ CHECK_ONE_STORE();
+ m_Crst.Leave();
+}
+
+void ThreadStore::LockThreadStore()
+{
+ WRAPPER_NO_CONTRACT;
+
+ // The actual implementation is in ThreadSuspend class since it is coupled
+ // with thread suspension logic
+ ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_OTHER);
+}
+
+void ThreadStore::UnlockThreadStore()
+{
+ WRAPPER_NO_CONTRACT;
+
+ // The actual implementation is in ThreadSuspend class since it is coupled
+ // with thread suspension logic
+ ThreadSuspend::UnlockThreadStore(FALSE, ThreadSuspend::SUSPEND_OTHER);
+}
+
+// AddThread adds 'newThread' to m_ThreadList
+void ThreadStore::AddThread(Thread *newThread, BOOL bRequiresTSL)
+{
+ CONTRACTL {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+ LOG((LF_SYNC, INFO3, "AddThread obtain lock\n"));
+
+ ThreadStoreLockHolder TSLockHolder(FALSE);
+ if (bRequiresTSL)
+ {
+ TSLockHolder.Acquire();
+ }
+
+ s_pThreadStore->m_ThreadList.InsertTail(newThread);
+
+ s_pThreadStore->m_ThreadCount++;
+ if (s_pThreadStore->m_MaxThreadCount < s_pThreadStore->m_ThreadCount)
+ s_pThreadStore->m_MaxThreadCount = s_pThreadStore->m_ThreadCount;
+
+ if (newThread->IsUnstarted())
+ s_pThreadStore->m_UnstartedThreadCount++;
+
+ newThread->SetThreadStateNC(Thread::TSNC_ExistInThreadStore);
+
+ _ASSERTE(!newThread->IsBackground());
+ _ASSERTE(!newThread->IsDead());
+
+ if (bRequiresTSL)
+ {
+ TSLockHolder.Release();
+ }
+}
+
+
+// Whenever one of the components of OtherThreadsComplete() has changed in the
+// correct direction, see whether we can now shutdown the EE because only background
+// threads are running.
+void ThreadStore::CheckForEEShutdown()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (g_fWeControlLifetime &&
+ s_pThreadStore->OtherThreadsComplete())
+ {
+ BOOL bRet;
+ bRet = s_pThreadStore->m_TerminationEvent.Set();
+ _ASSERTE(bRet);
+ }
+}
+
+
+BOOL ThreadStore::RemoveThread(Thread *target)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ BOOL found;
+ Thread *ret;
+
+#if 0 // This assert is not valid when failing to create background GC thread.
+ // Main GC thread holds the TS lock.
+ _ASSERTE (ThreadStore::HoldingThreadStore());
+#endif
+
+ _ASSERTE(s_pThreadStore->m_Crst.GetEnterCount() > 0 ||
+ IsAtProcessExit());
+ _ASSERTE(s_pThreadStore->DbgFindThread(target));
+ ret = s_pThreadStore->m_ThreadList.FindAndRemove(target);
+ _ASSERTE(ret && ret == target);
+ found = (ret != NULL);
+
+ if (found)
+ {
+ target->ResetThreadStateNC(Thread::TSNC_ExistInThreadStore);
+
+ s_pThreadStore->m_ThreadCount--;
+
+ if (target->IsDead())
+ s_pThreadStore->m_DeadThreadCount--;
+
+ // Unstarted threads are not in the Background count:
+ if (target->IsUnstarted())
+ s_pThreadStore->m_UnstartedThreadCount--;
+ else
+ if (target->IsBackground())
+ s_pThreadStore->m_BackgroundThreadCount--;
+
+ FastInterlockExchangeAdd(
+ &Thread::s_threadPoolCompletionCountOverflow,
+ target->m_threadPoolCompletionCount);
+
+ _ASSERTE(s_pThreadStore->m_ThreadCount >= 0);
+ _ASSERTE(s_pThreadStore->m_BackgroundThreadCount >= 0);
+ _ASSERTE(s_pThreadStore->m_ThreadCount >=
+ s_pThreadStore->m_BackgroundThreadCount);
+ _ASSERTE(s_pThreadStore->m_ThreadCount >=
+ s_pThreadStore->m_UnstartedThreadCount);
+ _ASSERTE(s_pThreadStore->m_ThreadCount >=
+ s_pThreadStore->m_DeadThreadCount);
+
+ // One of the components of OtherThreadsComplete() has changed, so check whether
+ // we should now exit the EE.
+ CheckForEEShutdown();
+ }
+ return found;
+}
+
+
+// When a thread is created as unstarted. Later it may get started, in which case
+// someone calls Thread::HasStarted() on that physical thread. This completes
+// the Setup and calls here.
+void ThreadStore::TransferStartedThread(Thread *thread, BOOL bRequiresTSL)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(GetThread() == thread);
+
+ LOG((LF_SYNC, INFO3, "TransferUnstartedThread obtain lock\n"));
+ ThreadStoreLockHolder TSLockHolder(FALSE);
+ if (bRequiresTSL)
+ {
+ TSLockHolder.Acquire();
+ }
+
+ _ASSERTE(s_pThreadStore->DbgFindThread(thread));
+ _ASSERTE(thread->HasValidThreadHandle());
+ _ASSERTE(thread->m_State & Thread::TS_WeOwn);
+ _ASSERTE(thread->IsUnstarted());
+ _ASSERTE(!thread->IsDead());
+
+ if (thread->m_State & Thread::TS_AbortRequested)
+ {
+ PAL_CPP_THROW(EEException *, new EEException(COR_E_THREADABORTED));
+ }
+
+ // Of course, m_ThreadCount is already correct since it includes started and
+ // unstarted threads.
+
+ s_pThreadStore->m_UnstartedThreadCount--;
+
+ // We only count background threads that have been started
+ if (thread->IsBackground())
+ s_pThreadStore->m_BackgroundThreadCount++;
+
+ _ASSERTE(s_pThreadStore->m_PendingThreadCount > 0);
+ FastInterlockDecrement(&s_pThreadStore->m_PendingThreadCount);
+
+ // As soon as we erase this bit, the thread becomes eligible for suspension,
+ // stopping, interruption, etc.
+ FastInterlockAnd((ULONG *) &thread->m_State, ~Thread::TS_Unstarted);
+ FastInterlockOr((ULONG *) &thread->m_State, Thread::TS_LegalToJoin);
+
+ // release ThreadStore Crst to avoid Crst Violation when calling HandleThreadAbort later
+ if (bRequiresTSL)
+ {
+ TSLockHolder.Release();
+ }
+
+ // One of the components of OtherThreadsComplete() has changed, so check whether
+ // we should now exit the EE.
+ CheckForEEShutdown();
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+
+// Access the list of threads. You must be inside a critical section, otherwise
+// the "cursor" thread might disappear underneath you. Pass in NULL for the
+// cursor to begin at the start of the list.
+Thread *ThreadStore::GetAllThreadList(Thread *cursor, ULONG mask, ULONG bits)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+ SUPPORTS_DAC;
+
+#ifndef DACCESS_COMPILE
+ _ASSERTE((s_pThreadStore->m_Crst.GetEnterCount() > 0) || IsAtProcessExit());
+#endif
+
+ while (TRUE)
+ {
+ cursor = (cursor
+ ? s_pThreadStore->m_ThreadList.GetNext(cursor)
+ : s_pThreadStore->m_ThreadList.GetHead());
+
+ if (cursor == NULL)
+ break;
+
+ if ((cursor->m_State & mask) == bits)
+ return cursor;
+ }
+ return NULL;
+}
+
+// Iterate over the threads that have been started
+Thread *ThreadStore::GetThreadList(Thread *cursor)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+ SUPPORTS_DAC;
+
+ return GetAllThreadList(cursor, (Thread::TS_Unstarted | Thread::TS_Dead), 0);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Grab a consistent snapshot of the thread's state, for reporting purposes only.
+//
+// Return Value:
+// the current state of the thread
+//
+
+Thread::ThreadState Thread::GetSnapshotState()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ ThreadState res = m_State;
+
+ if (res & TS_ReportDead)
+ {
+ res = (ThreadState) (res | TS_Dead);
+ }
+
+ return res;
+}
+
+#ifndef DACCESS_COMPILE
+
+BOOL CLREventWaitWithTry(CLREventBase *pEvent, DWORD timeout, BOOL fAlertable, DWORD *pStatus)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ WRAPPER(GC_TRIGGERS);
+ }
+ CONTRACTL_END;
+
+ BOOL fLoop = TRUE;
+ EX_TRY
+ {
+ *pStatus = pEvent->Wait(timeout, fAlertable);
+ fLoop = FALSE;
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return fLoop;
+}
+
+// We shut down the EE only when all the non-background threads have terminated
+// (unless this is an exceptional termination). So the main thread calls here to
+// wait before tearing down the EE.
+void ThreadStore::WaitForOtherThreads()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ CHECK_ONE_STORE();
+
+ Thread *pCurThread = GetThread();
+
+ // Regardless of whether the main thread is a background thread or not, force
+ // it to be one. This simplifies our rules for counting non-background threads.
+ pCurThread->SetBackground(TRUE);
+
+ LOG((LF_SYNC, INFO3, "WaitForOtherThreads obtain lock\n"));
+ ThreadStoreLockHolder TSLockHolder(TRUE);
+ if (!OtherThreadsComplete())
+ {
+ TSLockHolder.Release();
+
+ FastInterlockOr((ULONG *) &pCurThread->m_State, Thread::TS_ReportDead);
+
+ DWORD ret = WAIT_OBJECT_0;
+ while (CLREventWaitWithTry(&m_TerminationEvent, INFINITE, TRUE, &ret))
+ {
+ }
+ _ASSERTE(ret == WAIT_OBJECT_0);
+ }
+}
+
+
+// Every EE process can lazily create a GUID that uniquely identifies it (for
+// purposes of remoting).
+const GUID &ThreadStore::GetUniqueEEId()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ if (!m_GuidCreated)
+ {
+ ThreadStoreLockHolder TSLockHolder(TRUE);
+ if (!m_GuidCreated)
+ {
+ HRESULT hr = ::CoCreateGuid(&m_EEGuid);
+
+ _ASSERTE(SUCCEEDED(hr));
+ if (SUCCEEDED(hr))
+ m_GuidCreated = TRUE;
+ }
+
+ if (!m_GuidCreated)
+ return IID_NULL;
+ }
+ return m_EEGuid;
+}
+
+
+#ifdef _DEBUG
+BOOL ThreadStore::DbgFindThread(Thread *target)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ CHECK_ONE_STORE();
+
+ // Cache the current change stamp for g_TrapReturningThreads
+ LONG chgStamp = g_trtChgStamp;
+ STRESS_LOG3(LF_STORE, LL_INFO100, "ThreadStore::DbgFindThread - [thread=%p]. trt=%d. chgStamp=%d\n", GetThread(), g_TrapReturningThreads.Load(), chgStamp);
+
+#if 0 // g_TrapReturningThreads debug code.
+ int iRetry = 0;
+Retry:
+#endif // g_TrapReturningThreads debug code.
+ BOOL found = FALSE;
+ Thread *cur = NULL;
+ LONG cnt = 0;
+ LONG cntBack = 0;
+ LONG cntUnstart = 0;
+ LONG cntDead = 0;
+ LONG cntReturn = 0;
+
+ while ((cur = GetAllThreadList(cur, 0, 0)) != NULL)
+ {
+ cnt++;
+
+ if (cur->IsDead())
+ cntDead++;
+
+ // Unstarted threads do not contribute to the count of background threads
+ if (cur->IsUnstarted())
+ cntUnstart++;
+ else
+ if (cur->IsBackground())
+ cntBack++;
+
+ if (cur == target)
+ found = TRUE;
+
+ // Note that (DebugSuspendPending | SuspendPending) implies a count of 2.
+ // We don't count GCPending because a single trap is held for the entire
+ // GC, instead of counting each interesting thread.
+ if (cur->m_State & Thread::TS_DebugSuspendPending)
+ cntReturn++;
+
+ if (cur->m_State & Thread::TS_UserSuspendPending)
+ cntReturn++;
+
+ if (cur->m_TraceCallCount > 0)
+ cntReturn++;
+
+ if (cur->IsAbortRequested())
+ cntReturn++;
+ }
+
+ _ASSERTE(cnt == m_ThreadCount);
+ _ASSERTE(cntUnstart == m_UnstartedThreadCount);
+ _ASSERTE(cntBack == m_BackgroundThreadCount);
+ _ASSERTE(cntDead == m_DeadThreadCount);
+ _ASSERTE(0 <= m_PendingThreadCount);
+
+#if 0 // g_TrapReturningThreads debug code.
+ if (cntReturn != g_TrapReturningThreads /*&& !g_fEEShutDown*/)
+ { // If count is off, try again, to account for multiple threads.
+ if (iRetry < 4)
+ {
+ // printf("Retry %d. cntReturn:%d, gReturn:%d\n", iRetry, cntReturn, g_TrapReturningThreads);
+ ++iRetry;
+ goto Retry;
+ }
+ printf("cnt:%d, Un:%d, Back:%d, Dead:%d, cntReturn:%d, TrapReturn:%d, eeShutdown:%d, threadShutdown:%d\n",
+ cnt,cntUnstart,cntBack,cntDead,cntReturn,g_TrapReturningThreads, g_fEEShutDown, Thread::IsAtProcessExit());
+ LOG((LF_CORDB, LL_INFO1000,
+ "SUSPEND: cnt:%d, Un:%d, Back:%d, Dead:%d, cntReturn:%d, TrapReturn:%d, eeShutdown:%d, threadShutdown:%d\n",
+ cnt,cntUnstart,cntBack,cntDead,cntReturn,g_TrapReturningThreads, g_fEEShutDown, Thread::IsAtProcessExit()) );
+
+ //_ASSERTE(cntReturn + 2 >= g_TrapReturningThreads);
+ }
+ if (iRetry > 0 && iRetry < 4)
+ {
+ printf("%d retries to re-sync counted TrapReturn with global TrapReturn.\n", iRetry);
+ }
+#endif // g_TrapReturningThreads debug code.
+
+ STRESS_LOG4(LF_STORE, LL_INFO100, "ThreadStore::DbgFindThread - [thread=%p]. trt=%d. chg=%d. cnt=%d\n", GetThread(), g_TrapReturningThreads.Load(), g_trtChgStamp.Load(), cntReturn);
+
+ // Because of race conditions and the fact that the GC places its
+ // own count, I can't assert this precisely. But I do want to be
+ // sure that this count isn't wandering ever higher -- with a
+ // nasty impact on the performance of GC mode changes and method
+ // call chaining!
+ //
+ // We don't bother asserting this during process exit, because
+ // during a shutdown we will quietly terminate threads that are
+ // being waited on. (If we aren't shutting down, we carefully
+ // decrement our counts and alert anyone waiting for us to
+ // return).
+ //
+ // Note: we don't actually assert this if
+ // ThreadStore::TrapReturningThreads() updated g_TrapReturningThreads
+ // between the beginning of this function and the moment of the assert.
+ // *** The order of evaluation in the if condition is important ***
+ _ASSERTE(
+ (g_trtChgInFlight != 0 || (cntReturn + 2 >= g_TrapReturningThreads) || chgStamp != g_trtChgStamp) ||
+ g_fEEShutDown);
+
+ return found;
+}
+
+#endif // _DEBUG
+
+void Thread::HandleThreadInterrupt (BOOL fWaitForADUnload)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ // If we're waiting for shutdown, we don't want to abort/interrupt this thread
+ if (HasThreadStateNC(Thread::TSNC_BlockedForShutdown))
+ return;
+
+ BEGIN_SO_INTOLERANT_CODE(this);
+
+ if ((m_UserInterrupt & TI_Abort) != 0)
+ {
+ // If the thread is waiting for AD unload to finish, and the thread is interrupted,
+ // we can start aborting.
+ HandleThreadAbort(fWaitForADUnload);
+ }
+ if ((m_UserInterrupt & TI_Interrupt) != 0)
+ {
+ if (ReadyForInterrupt())
+ {
+ ResetThreadState ((ThreadState)(TS_Interrupted | TS_Interruptible));
+ FastInterlockAnd ((DWORD*)&m_UserInterrupt, ~TI_Interrupt);
+
+#ifdef _DEBUG
+ AddFiberInfo(ThreadTrackInfo_Abort);
+#endif
+
+ COMPlusThrow(kThreadInterruptedException);
+ }
+ }
+ END_SO_INTOLERANT_CODE;
+}
+
+#ifdef _DEBUG
+#define MAXSTACKBYTES (2 * PAGE_SIZE)
+void CleanStackForFastGCStress ()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ PVOID StackLimit = ClrTeb::GetStackLimit();
+ size_t nBytes = (size_t)&nBytes - (size_t)StackLimit;
+ nBytes &= ~sizeof (size_t);
+ if (nBytes > MAXSTACKBYTES) {
+ nBytes = MAXSTACKBYTES;
+ }
+ size_t* buffer = (size_t*) _alloca (nBytes);
+ memset(buffer, 0, nBytes);
+ GetThread()->m_pCleanedStackBase = &nBytes;
+}
+
+void Thread::ObjectRefFlush(Thread* thread)
+{
+
+ BEGIN_PRESERVE_LAST_ERROR;
+
+ // The constructor and destructor of AutoCleanupSONotMainlineHolder (allocated by SO_NOT_MAINLINE_FUNCTION below)
+ // may trash the last error, so we need to save and restore last error here. Also, we need to add a scope here
+ // because we can't let the destructor run after we call SetLastError().
+ {
+ // this is debug only code, so no need to validate
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_ENTRY_POINT;
+
+ _ASSERTE(thread->PreemptiveGCDisabled()); // Should have been in managed code
+ memset(thread->dangerousObjRefs, 0, sizeof(thread->dangerousObjRefs));
+ thread->m_allObjRefEntriesBad = FALSE;
+ CLEANSTACKFORFASTGCSTRESS ();
+ }
+
+ END_PRESERVE_LAST_ERROR;
+}
+#endif
+
+#if defined(STRESS_HEAP)
+
+PtrHashMap *g_pUniqueStackMap = NULL;
+Crst *g_pUniqueStackCrst = NULL;
+
+#define UniqueStackDepth 8
+
+BOOL StackCompare (UPTR val1, UPTR val2)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ size_t *p1 = (size_t *)(val1 << 1);
+ size_t *p2 = (size_t *)val2;
+ if (p1[0] != p2[0]) {
+ return FALSE;
+ }
+ size_t nElem = p1[0];
+ if (nElem >= UniqueStackDepth) {
+ nElem = UniqueStackDepth;
+ }
+ p1 ++;
+ p2 ++;
+
+ for (size_t n = 0; n < nElem; n ++) {
+ if (p1[n] != p2[n]) {
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+void UniqueStackSetupMap()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (g_pUniqueStackCrst == NULL)
+ {
+ Crst *Attempt = new Crst (
+ CrstUniqueStack,
+ CrstFlags(CRST_REENTRANCY | CRST_UNSAFE_ANYMODE));
+
+ if (FastInterlockCompareExchangePointer(&g_pUniqueStackCrst,
+ Attempt,
+ NULL) != NULL)
+ {
+ // We lost the race
+ delete Attempt;
+ }
+ }
+
+ // Now we have a Crst we can use to synchronize the remainder of the init.
+ if (g_pUniqueStackMap == NULL)
+ {
+ CrstHolder ch(g_pUniqueStackCrst);
+
+ if (g_pUniqueStackMap == NULL)
+ {
+ PtrHashMap *map = new (SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()) PtrHashMap ();
+ LockOwner lock = {g_pUniqueStackCrst, IsOwnerOfCrst};
+ map->Init (256, StackCompare, TRUE, &lock);
+ g_pUniqueStackMap = map;
+ }
+ }
+}
+
+BOOL StartUniqueStackMapHelper()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ BOOL fOK = TRUE;
+ EX_TRY
+ {
+ if (g_pUniqueStackMap == NULL)
+ {
+ UniqueStackSetupMap();
+ }
+ }
+ EX_CATCH
+ {
+ fOK = FALSE;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return fOK;
+}
+
+BOOL StartUniqueStackMap ()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return StartUniqueStackMapHelper();
+}
+
+#ifndef FEATURE_PAL
+
+size_t UpdateStackHash(size_t hash, size_t retAddr)
+{
+ return ((hash << 3) + hash) ^ retAddr;
+}
+
+/***********************************************************************/
+size_t getStackHash(size_t* stackTrace, size_t* stackTop, size_t* stackStop, size_t stackBase, size_t stackLimit)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // return a hash of every return address found between 'stackTop' (the lowest address)
+ // and 'stackStop' (the highest address)
+
+ size_t hash = 0;
+ int idx = 0;
+
+#ifdef _TARGET_X86_
+
+ static size_t moduleBase = (size_t) -1;
+ static size_t moduleTop = (size_t) -1;
+ if (moduleTop == (size_t) -1)
+ {
+ MEMORY_BASIC_INFORMATION mbi;
+
+ if (ClrVirtualQuery(getStackHash, &mbi, sizeof(mbi)))
+ {
+ moduleBase = (size_t)mbi.AllocationBase;
+ moduleTop = (size_t)mbi.BaseAddress + mbi.RegionSize;
+ }
+ else
+ {
+ // way bad error, probably just assert and exit
+ _ASSERTE (!"ClrVirtualQuery failed");
+ moduleBase = 0;
+ moduleTop = 0;
+ }
+ }
+
+ while (stackTop < stackStop)
+ {
+ // Clean out things that point to stack, as those can't be return addresses
+ if (*stackTop > moduleBase && *stackTop < moduleTop)
+ {
+ TADDR dummy;
+
+ if (isRetAddr((TADDR)*stackTop, &dummy))
+ {
+ hash = UpdateStackHash(hash, *stackTop);
+
+ // If there is no jitted code on the stack, then just use the
+ // top 16 frames as the context.
+ idx++;
+ if (idx <= UniqueStackDepth)
+ {
+ stackTrace [idx] = *stackTop;
+ }
+ }
+ }
+ stackTop++;
+ }
+
+#else // _TARGET_X86_
+
+ CONTEXT ctx;
+ ClrCaptureContext(&ctx);
+
+ UINT_PTR uControlPc = (UINT_PTR)GetIP(&ctx);
+ UINT_PTR uImageBase;
+
+ UINT_PTR uPrevControlPc = uControlPc;
+
+ for (;;)
+ {
+ RtlLookupFunctionEntry(uControlPc,
+ ARM_ONLY((DWORD*))(&uImageBase),
+ NULL
+ );
+
+ if (((UINT_PTR)g_pMSCorEE) != uImageBase)
+ {
+ break;
+ }
+
+ uControlPc = Thread::VirtualUnwindCallFrame(&ctx);
+
+ UINT_PTR uRetAddrForHash = uControlPc;
+
+ if (uPrevControlPc == uControlPc)
+ {
+ // This is a special case when we fail to acquire the loader lock
+ // in RtlLookupFunctionEntry(), which then returns false. The end
+ // result is that we cannot go any further on the stack and
+ // we will loop infinitely (because the owner of the loader lock
+ // is blocked on us).
+ hash = 0;
+ break;
+ }
+ else
+ {
+ uPrevControlPc = uControlPc;
+ }
+
+ hash = UpdateStackHash(hash, uRetAddrForHash);
+
+ // If there is no jitted code on the stack, then just use the
+ // top 16 frames as the context.
+ idx++;
+ if (idx <= UniqueStackDepth)
+ {
+ stackTrace [idx] = uRetAddrForHash;
+ }
+ }
+#endif // _TARGET_X86_
+
+ stackTrace [0] = idx;
+
+ return(hash);
+}
+
+void UniqueStackHelper(size_t stackTraceHash, size_t *stackTrace)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ EX_TRY {
+ size_t nElem = stackTrace[0];
+ if (nElem >= UniqueStackDepth) {
+ nElem = UniqueStackDepth;
+ }
+ AllocMemHolder<size_t> stackTraceInMap = SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(size_t *)) * (S_SIZE_T(nElem) + S_SIZE_T(1)));
+ memcpy (stackTraceInMap, stackTrace, sizeof(size_t *) * (nElem + 1));
+ g_pUniqueStackMap->InsertValue(stackTraceHash, stackTraceInMap);
+ stackTraceInMap.SuppressRelease();
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+/***********************************************************************/
+/* returns true if this stack has not been seen before, useful for
+ running tests only once per stack trace. */
+
+BOOL Thread::UniqueStack(void* stackStart)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ // If we where not told where to start, start at the caller of UniqueStack
+ if (stackStart == 0)
+ {
+ stackStart = &stackStart;
+ }
+
+ if (g_pUniqueStackMap == NULL)
+ {
+ if (!StartUniqueStackMap ())
+ {
+ // We fail to initialize unique stack map due to OOM.
+ // Let's say the stack is unique.
+ return TRUE;
+ }
+ }
+
+ size_t stackTrace[UniqueStackDepth+1] = {0};
+
+ // stackTraceHash represents a hash of entire stack at the time we make the call,
+ // We insure at least GC per unique stackTrace. What information is contained in
+ // 'stackTrace' is somewhat arbitrary. We choose it to mean all functions live
+ // on the stack up to the first jitted function.
+
+ size_t stackTraceHash;
+ Thread* pThread = GetThread();
+
+
+ void* stopPoint = pThread->m_CacheStackBase;
+
+#ifdef _TARGET_X86_
+ // Find the stop point (most jitted function)
+ Frame* pFrame = pThread->GetFrame();
+ for(;;)
+ {
+ // skip GC frames
+ if (pFrame == 0 || pFrame == (Frame*) -1)
+ break;
+
+ pFrame->GetFunction(); // This insures that helper frames are inited
+
+ if (pFrame->GetReturnAddress() != 0)
+ {
+ stopPoint = pFrame;
+ break;
+ }
+ pFrame = pFrame->Next();
+ }
+#endif // _TARGET_X86_
+
+ // Get hash of all return addresses between here an the top most jitted function
+ stackTraceHash = getStackHash (stackTrace, (size_t*) stackStart, (size_t*) stopPoint,
+ size_t(pThread->m_CacheStackBase), size_t(pThread->m_CacheStackLimit));
+
+ if (stackTraceHash == 0 ||
+ g_pUniqueStackMap->LookupValue (stackTraceHash, stackTrace) != (LPVOID)INVALIDENTRY)
+ {
+ return FALSE;
+ }
+ BOOL fUnique = FALSE;
+
+ {
+ CrstHolder ch(g_pUniqueStackCrst);
+#ifdef _DEBUG
+ if (GetThread ())
+ GetThread ()->m_bUniqueStacking = TRUE;
+#endif
+ if (g_pUniqueStackMap->LookupValue (stackTraceHash, stackTrace) != (LPVOID)INVALIDENTRY)
+ {
+ fUnique = FALSE;
+ }
+ else
+ {
+ fUnique = TRUE;
+ FAULT_NOT_FATAL();
+ UniqueStackHelper(stackTraceHash, stackTrace);
+ }
+#ifdef _DEBUG
+ if (GetThread ())
+ GetThread ()->m_bUniqueStacking = FALSE;
+#endif
+ }
+
+#ifdef _DEBUG
+ static int fCheckStack = -1;
+ if (fCheckStack == -1)
+ {
+ fCheckStack = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_FastGCCheckStack);
+ }
+ if (fCheckStack && pThread->m_pCleanedStackBase > stackTrace
+ && pThread->m_pCleanedStackBase - stackTrace > (int) MAXSTACKBYTES)
+ {
+ _ASSERTE (!"Garbage on stack");
+ }
+#endif
+ return fUnique;
+}
+
+#else // !FEATURE_PAL
+
+BOOL Thread::UniqueStack(void* stackStart)
+{
+ return FALSE;
+}
+
+#endif // !FEATURE_PAL
+
+#endif // STRESS_HEAP
+
+
+/*
+ * GetStackLowerBound
+ *
+ * Returns the lower bound of the stack space. Note -- the practical bound is some number of pages greater than
+ * this value -- those pages are reserved for a stack overflow exception processing.
+ *
+ * Parameters:
+ * None
+ *
+ * Returns:
+ * address of the lower bound of the threads's stack.
+ */
+void * Thread::GetStackLowerBound()
+{
+ // Called during fiber switch. Can not have non-static contract.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ #ifndef FEATURE_PAL
+ MEMORY_BASIC_INFORMATION lowerBoundMemInfo;
+ SIZE_T dwRes;
+
+ dwRes = ClrVirtualQuery((const void *)&lowerBoundMemInfo, &lowerBoundMemInfo, sizeof(MEMORY_BASIC_INFORMATION));
+
+ if (sizeof(MEMORY_BASIC_INFORMATION) == dwRes)
+ {
+ return (void *)(lowerBoundMemInfo.AllocationBase);
+ }
+ else
+ {
+ return NULL;
+ }
+#else // !FEATURE_PAL
+ return PAL_GetStackLimit();
+#endif // !FEATURE_PAL
+}
+
+/*
+ * GetStackUpperBound
+ *
+ * Return the upper bound of the thread's stack space.
+ *
+ * Parameters:
+ * None
+ *
+ * Returns:
+ * address of the base of the threads's stack.
+ */
+void *Thread::GetStackUpperBound()
+{
+ // Called during fiber switch. Can not have non-static contract.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ return ClrTeb::GetStackBase();
+}
+
+BOOL Thread::SetStackLimits(SetStackLimitScope scope)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (scope == fAll)
+ {
+ m_CacheStackBase = GetStackUpperBound();
+ m_CacheStackLimit = GetStackLowerBound();
+ if (m_CacheStackLimit == NULL)
+ {
+ _ASSERTE(!"Failed to set stack limits");
+ return FALSE;
+ }
+
+ // Compute the limit used by EnsureSufficientExecutionStack and cache it on the thread. The limit
+ // is currently set at 50% of the stack, which should be sufficient to allow the average Framework
+ // function to run, and to allow us to throw and dispatch an exception up a reasonable call chain.
+ m_CacheStackSufficientExecutionLimit = reinterpret_cast<UINT_PTR>(m_CacheStackBase) -
+ (reinterpret_cast<UINT_PTR>(m_CacheStackBase) - reinterpret_cast<UINT_PTR>(m_CacheStackLimit)) / 2;
+ }
+
+ // Ensure that we've setup the stack guarantee properly before we cache the stack limits
+ // as they depend upon the stack guarantee.
+ if (FAILED(CLRSetThreadStackGuarantee()))
+ return FALSE;
+
+ // Cache the last stack addresses that we are allowed to touch. We throw a stack overflow
+ // if we cross that line. Note that we ignore any subsequent calls to STSG for Whidbey until
+ // we see an exception and recache the values. We use the LastAllowableAddresses to
+ // determine if we've taken a hard SO and the ProbeLimits on the probes themselves.
+
+ m_LastAllowableStackAddress = GetLastNormalStackAddress();
+
+ if (g_pConfig->ProbeForStackOverflow())
+ {
+ m_ProbeLimit = m_LastAllowableStackAddress;
+ }
+ else
+ {
+ // If we have stack probing disabled, set the probeLimit to 0 so that all probes will pass. This
+ // way we don't have to do an extra check in the probe code.
+ m_ProbeLimit = 0;
+ }
+
+ return TRUE;
+}
+
+//---------------------------------------------------------------------------------------------
+// Routines we use to managed a thread's stack, for fiber switching or stack overflow purposes.
+//---------------------------------------------------------------------------------------------
+
+HRESULT Thread::CLRSetThreadStackGuarantee(SetThreadStackGuaranteeScope fScope)
+{
+ CONTRACTL
+ {
+ WRAPPER(NOTHROW);
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_PAL
+ // TODO: we need to measure what the stack usage needs are at the limits in the hosted scenario for host callbacks
+
+ if (Thread::IsSetThreadStackGuaranteeInUse(fScope))
+ {
+ // <TODO> Tune this as needed </TODO>
+ ULONG uGuardSize = SIZEOF_DEFAULT_STACK_GUARANTEE;
+ int EXTRA_PAGES = 0;
+#if defined(_WIN64)
+#if defined(_TARGET_AMD64_)
+ // AMD64 Free Build EH Stack Stats:
+ // --------------------------------
+ // currently the maximum stack usage we'll face while handling a SO includes:
+ // 4.3k for the OS (kernel32!RaiseException, Rtl EH dispatch code, RtlUnwindEx [second pass])
+ // 1.2k for the CLR EH setup (NakedThrowHelper*)
+ // 4.5k for other heavy CLR stack creations (2x CONTEXT, 1x REGDISPLAY)
+ // ~1.0k for other misc CLR stack allocations
+ // -----
+ // 11.0k --> ~2.75 pages for CLR SO EH dispatch
+ //
+ // -plus we might need some more for debugger EH dispatch, Watson, etc...
+ // -also need to take into account that we can lose up to 1 page of the guard region
+ // -additionally, we need to provide some region to hosts to allow for lock aquisition in a hosted scenario
+ //
+ EXTRA_PAGES = 3;
+ INDEBUG(EXTRA_PAGES += 1);
+
+#endif // _TARGET_AMD64_
+
+ int ThreadGuardPages = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ThreadGuardPages);
+ if (ThreadGuardPages == 0)
+ {
+ uGuardSize += (EXTRA_PAGES * PAGE_SIZE);
+ }
+ else
+ {
+ uGuardSize += (ThreadGuardPages * PAGE_SIZE);
+ }
+
+#else // _WIN64
+#ifdef _DEBUG
+ uGuardSize += (1 * PAGE_SIZE); // one extra page for debug infrastructure
+#endif // _DEBUG
+#endif // _WIN64
+
+ LOG((LF_EH, LL_INFO10000, "STACKOVERFLOW: setting thread stack guarantee to 0x%x\n", uGuardSize));
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (CorHost2::GetHostTaskManager())
+ {
+ HRESULT hr;
+ ULONG uCurrentGuarantee = 0;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+
+ // First, we'll see what the current guard size is.
+ hr = CorHost2::GetHostTaskManager()->GetStackGuarantee(&uCurrentGuarantee);
+
+ // Call SetStackGuarantee only if the guard isn't big enough for us.
+ if (FAILED(hr) || uCurrentGuarantee < uGuardSize)
+ hr = CorHost2::GetHostTaskManager()->SetStackGuarantee(uGuardSize);
+
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+
+ if (hr != E_NOTIMPL)
+ return hr;
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ if (!::SetThreadStackGuarantee(&uGuardSize))
+ {
+ return HRESULT_FROM_GetLastErrorNA();
+ }
+ }
+
+#endif // !FEATURE_PAL
+
+ return S_OK;
+}
+
+
+/*
+ * GetLastNormalStackAddress
+ *
+ * GetLastNormalStackAddress returns the last stack address before the guard
+ * region of a thread. This is the last address that one could write to before
+ * a stack overflow occurs.
+ *
+ * Parameters:
+ * StackLimit - the base of the stack allocation
+ *
+ * Returns:
+ * Address of the first page of the guard region.
+ */
+UINT_PTR Thread::GetLastNormalStackAddress(UINT_PTR StackLimit)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ UINT_PTR cbStackGuarantee = GetStackGuarantee();
+
+ // Here we take the "hard guard region size", the "stack guarantee" and the "fault page" and add them
+ // all together. Note that the "fault page" is the reason for the extra OS_PAGE_SIZE below. The OS
+ // will guarantee us a certain amount of stack remaining after a stack overflow. This is called the
+ // "stack guarantee". But to do this, it has to fault on the page before that region as the app is
+ // allowed to fault at the very end of that page. So, as a result, the last normal stack address is
+ // one page sooner.
+ return StackLimit + (cbStackGuarantee
+#ifndef FEATURE_PAL
+ + OS_PAGE_SIZE
+#endif // !FEATURE_PAL
+ + HARD_GUARD_REGION_SIZE);
+}
+
+#ifdef _DEBUG
+
+static void DebugLogMBIFlags(UINT uState, UINT uProtect)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_PAL
+
+#define LOG_FLAG(flags, name) \
+ if (flags & name) \
+ { \
+ LOG((LF_EH, LL_INFO1000, "" #name " ")); \
+ } \
+
+ if (uState)
+ {
+ LOG((LF_EH, LL_INFO1000, "State: "));
+
+ LOG_FLAG(uState, MEM_COMMIT);
+ LOG_FLAG(uState, MEM_RESERVE);
+ LOG_FLAG(uState, MEM_DECOMMIT);
+ LOG_FLAG(uState, MEM_RELEASE);
+ LOG_FLAG(uState, MEM_FREE);
+ LOG_FLAG(uState, MEM_PRIVATE);
+ LOG_FLAG(uState, MEM_MAPPED);
+ LOG_FLAG(uState, MEM_RESET);
+ LOG_FLAG(uState, MEM_TOP_DOWN);
+ LOG_FLAG(uState, MEM_WRITE_WATCH);
+ LOG_FLAG(uState, MEM_PHYSICAL);
+ LOG_FLAG(uState, MEM_LARGE_PAGES);
+ LOG_FLAG(uState, MEM_4MB_PAGES);
+ }
+
+ if (uProtect)
+ {
+ LOG((LF_EH, LL_INFO1000, "Protect: "));
+
+ LOG_FLAG(uProtect, PAGE_NOACCESS);
+ LOG_FLAG(uProtect, PAGE_READONLY);
+ LOG_FLAG(uProtect, PAGE_READWRITE);
+ LOG_FLAG(uProtect, PAGE_WRITECOPY);
+ LOG_FLAG(uProtect, PAGE_EXECUTE);
+ LOG_FLAG(uProtect, PAGE_EXECUTE_READ);
+ LOG_FLAG(uProtect, PAGE_EXECUTE_READWRITE);
+ LOG_FLAG(uProtect, PAGE_EXECUTE_WRITECOPY);
+ LOG_FLAG(uProtect, PAGE_GUARD);
+ LOG_FLAG(uProtect, PAGE_NOCACHE);
+ LOG_FLAG(uProtect, PAGE_WRITECOMBINE);
+ }
+
+#undef LOG_FLAG
+#endif // !FEATURE_PAL
+}
+
+
+static void DebugLogStackRegionMBIs(UINT_PTR uLowAddress, UINT_PTR uHighAddress)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_INTOLERANT;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ MEMORY_BASIC_INFORMATION meminfo;
+ UINT_PTR uStartOfThisRegion = uLowAddress;
+
+ LOG((LF_EH, LL_INFO1000, "----------------------------------------------------------------------\n"));
+
+ while (uStartOfThisRegion < uHighAddress)
+ {
+ SIZE_T res = ClrVirtualQuery((const void *)uStartOfThisRegion, &meminfo, sizeof(meminfo));
+
+ if (sizeof(meminfo) != res)
+ {
+ LOG((LF_EH, LL_INFO1000, "VirtualQuery failed on %p\n", uStartOfThisRegion));
+ break;
+ }
+
+ UINT_PTR uStartOfNextRegion = uStartOfThisRegion + meminfo.RegionSize;
+
+ if (uStartOfNextRegion > uHighAddress)
+ {
+ uStartOfNextRegion = uHighAddress;
+ }
+
+ UINT_PTR uRegionSize = uStartOfNextRegion - uStartOfThisRegion;
+
+ LOG((LF_EH, LL_INFO1000, "0x%p -> 0x%p (%d pg) ", uStartOfThisRegion, uStartOfNextRegion - 1, uRegionSize / OS_PAGE_SIZE));
+ DebugLogMBIFlags(meminfo.State, meminfo.Protect);
+ LOG((LF_EH, LL_INFO1000, "\n"));
+
+ uStartOfThisRegion = uStartOfNextRegion;
+ }
+
+ LOG((LF_EH, LL_INFO1000, "----------------------------------------------------------------------\n"));
+}
+
+// static
+void Thread::DebugLogStackMBIs()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_INTOLERANT;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ Thread* pThread = GetThread(); // N.B. this can be NULL!
+
+ UINT_PTR uStackLimit = (UINT_PTR)GetStackLowerBound();
+ UINT_PTR uStackBase = (UINT_PTR)GetStackUpperBound();
+ if (pThread)
+ {
+ uStackLimit = (UINT_PTR)pThread->GetCachedStackLimit();
+ uStackBase = (UINT_PTR)pThread->GetCachedStackBase();
+ }
+ else
+ {
+ uStackLimit = (UINT_PTR)GetStackLowerBound();
+ uStackBase = (UINT_PTR)GetStackUpperBound();
+ }
+ UINT_PTR uStackSize = uStackBase - uStackLimit;
+
+ LOG((LF_EH, LL_INFO1000, "----------------------------------------------------------------------\n"));
+ LOG((LF_EH, LL_INFO1000, "Stack Snapshot 0x%p -> 0x%p (%d pg)\n", uStackLimit, uStackBase, uStackSize / OS_PAGE_SIZE));
+ if (pThread)
+ {
+ LOG((LF_EH, LL_INFO1000, "Last normal addr: 0x%p\n", pThread->GetLastNormalStackAddress()));
+ }
+
+ DebugLogStackRegionMBIs(uStackLimit, uStackBase);
+}
+#endif // _DEBUG
+
+//
+// IsSPBeyondLimit
+//
+// Determines if the stack pointer is beyond the stack limit, in which case
+// we can assume we've taken a hard SO.
+//
+// Parameters: none
+//
+// Returns: bool indicating if SP is beyond the limit or not
+//
+BOOL Thread::IsSPBeyondLimit()
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Reset the stack limits if necessary.
+ // @todo . Add a vectored handler for X86 so that we reset the stack limits
+ // there, as anything that supports SetThreadStackGuarantee will support vectored handlers.
+ // Then we can always assume during EH processing that our stack limits are good and we
+ // don't have to call ResetStackLimits.
+ ResetStackLimits();
+ char *approxSP = (char *)GetCurrentSP();
+ if (approxSP < (char *)(GetLastAllowableStackAddress()))
+ {
+ return TRUE;
+ }
+ return FALSE;
+}
+
+__declspec(noinline) void AllocateSomeStack(){
+ LIMITED_METHOD_CONTRACT;
+#ifdef _TARGET_X86_
+ const size_t size = 0x200;
+#else //_TARGET_X86_
+ const size_t size = 0x400;
+#endif //_TARGET_X86_
+
+ INT8* mem = (INT8*)_alloca(size);
+ // Actually touch the memory we just allocated so the compiler can't
+ // optimize it away completely.
+ // NOTE: this assumes the stack grows down (towards 0).
+ VolatileStore<INT8>(mem, 0);
+}
+
+
+/*
+ * CommitThreadStack
+ *
+ * Commit the thread's entire stack. A thread's stack is usually only reserved memory, not committed. The OS will
+ * commit more pages as the thread's stack grows. But, if the system is low on memory and disk space, its possible
+ * that the OS will not have enough memory to grow the stack. That causes a stack overflow exception at very random
+ * times, and the CLR can't handle that.
+ *
+ * Parameters:
+ * The Thread object for this thread, if there is one. NULL otherwise.
+ *
+ * Returns:
+ * TRUE if the function succeeded, FALSE otherwise.
+ */
+/*static*/
+BOOL Thread::CommitThreadStack(Thread* pThreadOptional)
+{
+
+#ifndef FEATURE_CORECLR
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (FAILED(CLRSetThreadStackGuarantee(STSGuarantee_Force)))
+ return FALSE;
+
+ if (g_pConfig->GetDisableCommitThreadStack() && (pThreadOptional == NULL || !pThreadOptional->HasThreadStateNC(TSNC_ForceStackCommit)))
+ return TRUE;
+
+
+ // This is a temporary fix for VSWhidbey 259155. In CommitThreadStack() we determine the bounds of the
+ // region between the guard page and the hard guard region for a thread's stack and then commit that
+ // region. Sometimes we cross a page boundary while calculating the bounds or doing the commit (in
+ // VirtualQuery or VirtualAlloc), such that the guard page is moved after we've already gotten it's
+ // location. When that happens we commit too many pages and destroy the guard page. To fix this we
+ // do a small stack allocation that ensures that we have enough stack space for all of the
+ // CommitThreadStack() work
+
+ AllocateSomeStack();
+
+ // Grab the info about the first region of the stack. First, we grab the region where we are now (&tmpMBI),
+ // then we use the allocation base of that to grab the first region.
+ MEMORY_BASIC_INFORMATION tmpMBI;
+ SIZE_T dwRes;
+
+ dwRes = ClrVirtualQuery((const void *)&tmpMBI, &tmpMBI, sizeof(MEMORY_BASIC_INFORMATION));
+
+ if (sizeof(MEMORY_BASIC_INFORMATION) != dwRes)
+ {
+ return FALSE;
+ }
+
+ dwRes = ClrVirtualQuery((const void *)((BYTE*)tmpMBI.AllocationBase + HARD_GUARD_REGION_SIZE), &tmpMBI, sizeof(MEMORY_BASIC_INFORMATION));
+
+ if (sizeof(MEMORY_BASIC_INFORMATION) != dwRes)
+ {
+ return FALSE;
+ }
+
+ // We commit the reserved part of the stack, if necessary, minus one page for the "hard" guard page.
+ if (tmpMBI.State == MEM_RESERVE)
+ {
+ // Note: we leave the "hard" guard region uncommitted.
+ void *base = (BYTE*)tmpMBI.AllocationBase + HARD_GUARD_REGION_SIZE;
+
+ // We are committing a page on stack. If we call host for this operation,
+ // host needs to avoid adding it to the memory consumption. Therefore
+ // we call into OS directly.
+#undef VirtualAlloc
+ void *p = VirtualAlloc(base,
+ tmpMBI.RegionSize,
+ MEM_COMMIT,
+ PAGE_READWRITE);
+#define VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect) \
+ Dont_Use_VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect)
+
+ if (p != base )
+ {
+ DWORD err = GetLastError();
+ STRESS_LOG2(LF_EH, LL_ALWAYS,
+ "Thread::CommitThreadStack: failed to commit stack for TID 0x%x with error 0x%x\n",
+ ::GetCurrentThreadId(), err);
+
+ return FALSE;
+ }
+ }
+
+ INDEBUG(DebugLogStackMBIs());
+
+#endif
+ return TRUE;
+}
+
+#ifndef FEATURE_PAL
+
+// static // private
+BOOL Thread::DoesRegionContainGuardPage(UINT_PTR uLowAddress, UINT_PTR uHighAddress)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ SIZE_T dwRes;
+ MEMORY_BASIC_INFORMATION meminfo;
+ UINT_PTR uStartOfCurrentRegion = uLowAddress;
+
+ while (uStartOfCurrentRegion < uHighAddress)
+ {
+#undef VirtualQuery
+ // This code can run below YieldTask, which means that it must not call back into the host.
+ // The reason is that YieldTask is invoked by the host, and the host needs not be reentrant.
+ dwRes = VirtualQuery((const void *)uStartOfCurrentRegion, &meminfo, sizeof(meminfo));
+#define VirtualQuery(lpAddress, lpBuffer, dwLength) Dont_Use_VirtualQuery(lpAddress, lpBuffer, dwLength)
+
+ // If the query fails then assume we have no guard page.
+ if (sizeof(meminfo) != dwRes)
+ {
+ return FALSE;
+ }
+
+ if (meminfo.Protect & PAGE_GUARD)
+ {
+ return TRUE;
+ }
+
+ uStartOfCurrentRegion += meminfo.RegionSize;
+ }
+
+ return FALSE;
+}
+
+#endif // !FEATURE_PAL
+
+/*
+ * DetermineIfGuardPagePresent
+ *
+ * DetermineIfGuardPagePresent returns TRUE if the thread's stack contains a proper guard page. This function makes
+ * a physical check of the stack, rather than relying on whether or not the CLR is currently processing a stack
+ * overflow exception.
+ *
+ * It seems reasonable to want to check just the 3rd page for !MEM_COMMIT or PAGE_GUARD, but that's no good in a
+ * world where a) one can extend the guard region arbitrarily with SetThreadStackGuarantee(), b) a thread's stack
+ * could be pre-committed, and c) another lib might reset the guard page very high up on the stack, much as we
+ * do. In that world, we have to do VirtualQuery from the lower bound up until we find a region with PAGE_GUARD on
+ * it. If we've never SO'd, then that's two calls to VirtualQuery.
+ *
+ * Parameters:
+ * None
+ *
+ * Returns:
+ * TRUE if the thread has a guard page, FALSE otherwise.
+ */
+BOOL Thread::DetermineIfGuardPagePresent()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_PAL
+ BOOL bStackGuarded = FALSE;
+ UINT_PTR uStackBase = (UINT_PTR)GetCachedStackBase();
+ UINT_PTR uStackLimit = (UINT_PTR)GetCachedStackLimit();
+
+ // Note: we start our queries after the hard guard page (one page up from the base of the stack.) We know the
+ // very last region of the stack is never the guard page (its always the uncomitted "hard" guard page) so there's
+ // no need to waste a query on it.
+ bStackGuarded = DoesRegionContainGuardPage(uStackLimit + HARD_GUARD_REGION_SIZE,
+ uStackBase);
+
+ LOG((LF_EH, LL_INFO10000, "Thread::DetermineIfGuardPagePresent: stack guard page: %s\n", bStackGuarded ? "PRESENT" : "MISSING"));
+
+ return bStackGuarded;
+#else // !FEATURE_PAL
+ return TRUE;
+#endif // !FEATURE_PAL
+}
+
+/*
+ * GetLastNormalStackAddress
+ *
+ * GetLastNormalStackAddress returns the last stack address before the guard
+ * region of this thread. This is the last address that one could write to
+ * before a stack overflow occurs.
+ *
+ * Parameters:
+ * None
+ *
+ * Returns:
+ * Address of the first page of the guard region.
+ */
+UINT_PTR Thread::GetLastNormalStackAddress()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return GetLastNormalStackAddress((UINT_PTR)m_CacheStackLimit);
+}
+
+
+#ifdef FEATURE_STACK_PROBE
+/*
+ * CanResetStackTo
+ *
+ * Given a target stack pointer, this function will tell us whether or not we could restore the guard page if we
+ * unwound the stack that far.
+ *
+ * Parameters:
+ * stackPointer -- stack pointer that we want to try to reset the thread's stack up to.
+ *
+ * Returns:
+ * TRUE if there's enough room to reset the stack, false otherwise.
+ */
+BOOL Thread::CanResetStackTo(LPCVOID stackPointer)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // How much space between the given stack pointer and the first guard page?
+ //
+ // This must be signed since the stack pointer might be in the guard region,
+ // which is at a lower address than GetLastNormalStackAddress will return.
+ INT_PTR iStackSpaceLeft = (INT_PTR)stackPointer - GetLastNormalStackAddress();
+
+ // We need to have enough space to call back into the EE from the handler, so we use the twice the entry point amount.
+ // We need enough to do work and enough that partway through that work we won't probe and COMPlusThrowSO.
+
+ const INT_PTR iStackSizeThreshold = (ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT * 2) * OS_PAGE_SIZE);
+
+ if (iStackSpaceLeft > iStackSizeThreshold)
+ {
+ return TRUE;
+ }
+ else
+ {
+ return FALSE;
+ }
+}
+#endif // FEATURE_STACK_PROBE
+
+/*
+ * IsStackSpaceAvailable
+ *
+ * Given a number of stack pages, this function will tell us whether or not we have that much space
+ * before the top of the stack. If we are in the guard region we must be already handling an SO,
+ * so we report how much space is left in the guard region
+ *
+ * Parameters:
+ * numPages -- the number of pages that we need. This can be a fractional amount.
+ *
+ * Returns:
+ * TRUE if there's that many pages of stack available
+ */
+BOOL Thread::IsStackSpaceAvailable(float numPages)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // How much space between the current stack pointer and the first guard page?
+ //
+ // This must be signed since the stack pointer might be in the guard region,
+ // which is at a lower address than GetLastNormalStackAddress will return.
+ float iStackSpaceLeft = static_cast<float>((INT_PTR)GetCurrentSP() - (INT_PTR)GetLastNormalStackAddress());
+
+ // If we have access to the stack guarantee (either in the guard region or we've tripped the guard page), then
+ // use that.
+ if ((iStackSpaceLeft/OS_PAGE_SIZE) < numPages && !DetermineIfGuardPagePresent())
+ {
+ UINT_PTR stackGuarantee = GetStackGuarantee();
+ // GetLastNormalStackAddress actually returns the 2nd to last stack page on the stack. We'll add that to our available
+ // amount of stack, in addition to any sort of stack guarantee we might have.
+ //
+ // All these values are OS supplied, and will never overflow. (If they do, that means the stack is on the order
+ // over GB, which isn't possible.
+ iStackSpaceLeft += stackGuarantee + OS_PAGE_SIZE;
+ }
+ if ((iStackSpaceLeft/OS_PAGE_SIZE) < numPages)
+ {
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+/*
+ * GetStackGuarantee
+ *
+ * Returns the amount of stack guaranteed after an SO but before the OS rips the process.
+ *
+ * Parameters:
+ * none
+ *
+ * Returns:
+ * The stack guarantee in OS pages.
+ */
+UINT_PTR Thread::GetStackGuarantee()
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifndef FEATURE_PAL
+ // There is a new API available on new OS's called SetThreadStackGuarantee. It allows you to change the size of
+ // the guard region on a per-thread basis. If we're running on an OS that supports the API, then we must query
+ // it to see if someone has changed the size of the guard region for this thread.
+ if (!IsSetThreadStackGuaranteeInUse())
+ {
+ return SIZEOF_DEFAULT_STACK_GUARANTEE;
+ }
+
+ ULONG cbNewStackGuarantee = 0;
+ // Passing in a value of 0 means that we're querying, and the value is changed with the new guard region
+ // size.
+ if (::SetThreadStackGuarantee(&cbNewStackGuarantee) &&
+ (cbNewStackGuarantee != 0))
+ {
+ return cbNewStackGuarantee;
+ }
+#endif // FEATURE_PAL
+
+ return SIZEOF_DEFAULT_STACK_GUARANTEE;
+}
+
+#ifndef FEATURE_PAL
+
+//
+// MarkPageAsGuard
+//
+// Given a page base address, try to turn it into a guard page and then requery to determine success.
+//
+// static // private
+BOOL Thread::MarkPageAsGuard(UINT_PTR uGuardPageBase)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ DWORD flOldProtect;
+
+ ClrVirtualProtect((LPVOID)uGuardPageBase, 1,
+ (PAGE_READWRITE | PAGE_GUARD), &flOldProtect);
+
+ // Intentionally ignore return value -- if it failed, we'll find out below
+ // and keep moving up the stack until we either succeed or we hit the guard
+ // region. If we don't succeed before we hit the guard region, we'll end up
+ // with a fatal error.
+
+ // Now, make sure the guard page is really there. If its not, then VirtualProtect most likely failed
+ // because our stack had grown onto the page we were trying to protect by the time we made it into
+ // VirtualProtect. So try the next page down.
+ MEMORY_BASIC_INFORMATION meminfo;
+ SIZE_T dwRes;
+
+ dwRes = ClrVirtualQuery((const void *)uGuardPageBase, &meminfo, sizeof(meminfo));
+
+ return ((sizeof(meminfo) == dwRes) && (meminfo.Protect & PAGE_GUARD));
+}
+
+
+/*
+ * RestoreGuardPage
+ *
+ * RestoreGuardPage will replace the guard page on this thread's stack. The assumption is that it was removed by
+ * the OS due to a stack overflow exception. This function requires that you know that you have enough stack space
+ * to restore the guard page, so make sure you know what you're doing when you decide to call this.
+ *
+ * Parameters:
+ * None
+ *
+ * Returns:
+ * Nothing
+ */
+VOID Thread::RestoreGuardPage()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ // Need a hard SO probe here.
+ CONTRACT_VIOLATION(SOToleranceViolation);
+
+ BOOL bStackGuarded = DetermineIfGuardPagePresent();
+
+ // If the guard page is still there, then just return.
+ if (bStackGuarded)
+ {
+ LOG((LF_EH, LL_INFO100, "Thread::RestoreGuardPage: no need to restore... guard page is already there.\n"));
+ return;
+ }
+
+ UINT_PTR approxStackPointer;
+ UINT_PTR guardPageBase;
+ UINT_PTR guardRegionThreshold;
+ BOOL pageMissing;
+
+ if (!bStackGuarded)
+ {
+ // The normal guard page is the 3rd page from the base. The first page is the "hard" guard, the second one is
+ // reserve, and the 3rd one is marked as a guard page. However, since there is now an API (on some platforms)
+ // to change the size of the guard region, we'll just go ahead and protect the next page down from where we are
+ // now. The guard page will get pushed forward again, just like normal, until the next stack overflow.
+ approxStackPointer = (UINT_PTR)GetCurrentSP();
+ guardPageBase = (UINT_PTR)ALIGN_DOWN(approxStackPointer, OS_PAGE_SIZE) - OS_PAGE_SIZE;
+
+ // OS uses soft guard page to update the stack info in TEB. If our guard page is not beyond the current stack, the TEB
+ // will not be updated, and then OS's check of stack during exception will fail.
+ if (approxStackPointer >= guardPageBase)
+ {
+ guardPageBase -= OS_PAGE_SIZE;
+ }
+ // If we're currently "too close" to the page we want to mark as a guard then the call to VirtualProtect to set
+ // PAGE_GUARD will fail, but it won't return an error. Therefore, we protect the page, then query it to make
+ // sure it worked. If it didn't, we try the next page down. We'll either find a page to protect, or run into
+ // the guard region and rip the process down with EEPOLICY_HANDLE_FATAL_ERROR below.
+ guardRegionThreshold = GetLastNormalStackAddress();
+ pageMissing = TRUE;
+
+ while (pageMissing)
+ {
+ LOG((LF_EH, LL_INFO10000,
+ "Thread::RestoreGuardPage: restoring guard page @ 0x%p, approxStackPointer=0x%p, "
+ "last normal stack address=0x%p\n",
+ guardPageBase, approxStackPointer, guardRegionThreshold));
+
+ // Make sure we set the guard page above the guard region.
+ if (guardPageBase < guardRegionThreshold)
+ {
+ goto lFatalError;
+ }
+
+ if (MarkPageAsGuard(guardPageBase))
+ {
+ // The current GuardPage should be beyond the current SP.
+ _ASSERTE (guardPageBase < approxStackPointer);
+ pageMissing = FALSE;
+ }
+ else
+ {
+ guardPageBase -= OS_PAGE_SIZE;
+ }
+ }
+ }
+
+ FinishSOWork();
+ //GetAppDomain()->EnableADUnloadWorker(EEPolicy::ADU_Rude);
+
+ INDEBUG(DebugLogStackMBIs());
+
+ return;
+
+lFatalError:
+ STRESS_LOG2(LF_EH, LL_ALWAYS,
+ "Thread::RestoreGuardPage: too close to the guard region (0x%p) to restore guard page @0x%p\n",
+ guardRegionThreshold, guardPageBase);
+ _ASSERTE(!"Too close to the guard page to reset it!");
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_STACKOVERFLOW);
+}
+
+#endif // !FEATURE_PAL
+
+#endif // #ifndef DACCESS_COMPILE
+
+//
+// InitRegDisplay: initializes a REGDISPLAY for a thread. If validContext
+// is false, pRD is filled from the current context of the thread. The
+// thread's current context is also filled in pctx. If validContext is true,
+// pctx should point to a valid context and pRD is filled from that.
+//
+bool Thread::InitRegDisplay(const PREGDISPLAY pRD, PT_CONTEXT pctx, bool validContext)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (!validContext)
+ {
+ if (GetFilterContext()!= NULL)
+ {
+ pctx = GetFilterContext();
+ }
+ else
+ {
+#ifdef DACCESS_COMPILE
+ DacNotImpl();
+#else
+ pctx->ContextFlags = CONTEXT_FULL;
+
+ _ASSERTE(this != GetThread()); // do not call GetThreadContext on the active thread
+
+ BOOL ret = EEGetThreadContext(this, pctx);
+ if (!ret)
+ {
+ SetIP(pctx, 0);
+#ifdef _TARGET_X86_
+ pRD->ControlPC = pctx->Eip;
+ pRD->PCTAddr = (TADDR)&(pctx->Eip);
+#elif defined(_TARGET_AMD64_)
+ // nothing more to do here, on Win64 setting the IP to 0 is enough.
+#elif defined(_TARGET_ARM_)
+ // nothing more to do here, on Win64 setting the IP to 0 is enough.
+#else
+ PORTABILITY_ASSERT("NYI for platform Thread::InitRegDisplay");
+#endif
+
+ return false;
+ }
+#endif // DACCESS_COMPILE
+ }
+ }
+
+ FillRegDisplay( pRD, pctx );
+
+ return true;
+}
+
+
+void Thread::FillRegDisplay(const PREGDISPLAY pRD, PT_CONTEXT pctx)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ ::FillRegDisplay(pRD, pctx);
+
+#if defined(DEBUG_REGDISPLAY) && (defined(_WIN64) || defined(_TARGET_ARM_))
+ CONSISTENCY_CHECK(!pRD->_pThread || pRD->_pThread == this);
+ pRD->_pThread = this;
+
+ CheckRegDisplaySP(pRD);
+#endif // defined(DEBUG_REGDISPLAY) && (defined(_WIN64) || defined(_TARGET_ARM_))
+}
+
+
+#if defined(DEBUG_REGDISPLAY) && (defined(_WIN64) || defined(_TARGET_ARM_))
+
+void CheckRegDisplaySP (REGDISPLAY *pRD)
+{
+ if (pRD->SP && pRD->_pThread)
+ {
+ _ASSERTE(PTR_VOID(pRD->SP) >= pRD->_pThread->GetCachedStackLimit());
+ _ASSERTE(PTR_VOID(pRD->SP) < pRD->_pThread->GetCachedStackBase());
+ }
+}
+
+#endif // defined(DEBUG_REGDISPLAY) && (defined(_WIN64) || defined(_TARGET_ARM_))
+
+// Trip Functions
+// ==============
+// When a thread reaches a safe place, it will rendezvous back with us, via one of
+// the following trip functions:
+
+void CommonTripThread()
+{
+#ifndef DACCESS_COMPILE
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ Thread *thread = GetThread();
+
+ thread->HandleThreadAbort ();
+
+ if (thread->IsYieldRequested())
+ {
+ __SwitchToThread(0, CALLER_LIMITS_SPINNING);
+ }
+
+ if (thread->CatchAtSafePoint())
+ {
+ _ASSERTE(!ThreadStore::HoldingThreadStore(thread));
+#ifdef FEATURE_HIJACK
+ thread->UnhijackThread();
+#endif // FEATURE_HIJACK
+
+ // Trap
+ thread->PulseGCMode();
+ }
+#else
+ DacNotImpl();
+#endif // #ifndef DACCESS_COMPILE
+}
+
+#ifndef DACCESS_COMPILE
+
+void Thread::SetFilterContext(CONTEXT *pContext)
+{
+ // SetFilterContext is like pushing a Frame onto the Frame chain.
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE; // Absolutely must be in coop to coordinate w/ Runtime suspension.
+ PRECONDITION(GetThread() == this); // must be on current thread.
+ } CONTRACTL_END;
+
+ m_debuggerFilterContext = pContext;
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+T_CONTEXT *Thread::GetFilterContext(void)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return m_debuggerFilterContext;
+}
+
+#ifndef DACCESS_COMPILE
+
+// @todo - eventually complete remove the CantStop count on the thread and use
+// the one in the PreDef block. For now, we increment both our thread counter,
+// and the FLS counter. Eventually we can remove our thread counter and only use
+// the FLS counter.
+void Thread::SetDebugCantStop(bool fCantStop)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (fCantStop)
+ {
+ IncCantStopCount();
+ m_debuggerCantStop++;
+ }
+ else
+ {
+ DecCantStopCount();
+ m_debuggerCantStop--;
+ }
+}
+
+// @todo - remove this, we only read this from oop.
+bool Thread::GetDebugCantStop(void)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_debuggerCantStop != 0;
+}
+
+
+//-----------------------------------------------------------------------------
+// Call w/a wrapper.
+// We've already transitioned AppDomains here. This just places a 1st-pass filter to sniff
+// for catch-handler found callbacks for the debugger.
+//-----------------------------------------------------------------------------
+void MakeADCallDebuggerWrapper(
+ FPAPPDOMAINCALLBACK fpCallback,
+ CtxTransitionBaseArgs * args,
+ ContextTransitionFrame* pFrame)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+
+ BYTE * pCatcherStackAddr = (BYTE*) pFrame;
+
+ struct Param : NotifyOfCHFFilterWrapperParam
+ {
+ FPAPPDOMAINCALLBACK fpCallback;
+ CtxTransitionBaseArgs *args;
+ } param;
+ param.pFrame = pCatcherStackAddr;
+ param.fpCallback = fpCallback;
+ param.args = args;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ pParam->fpCallback(pParam->args);
+ }
+ PAL_EXCEPT_FILTER(AppDomainTransitionExceptionFilter)
+ {
+ // Should never reach here b/c handler should always continue search.
+ _ASSERTE(false);
+ }
+ PAL_ENDTRY
+}
+
+
+// Invoke a callback in another appdomain.
+// Caller should have checked that we're actually transitioning domains here.
+void MakeCallWithAppDomainTransition(
+ ADID TargetDomain,
+ FPAPPDOMAINCALLBACK fpCallback,
+ CtxTransitionBaseArgs * args)
+{
+ DEBUG_ASSURE_NO_RETURN_BEGIN(MAKECALL)
+
+ Thread* _ctx_trans_pThread = GetThread();
+ TESTHOOKCALL(EnteringAppDomain((TargetDomain.m_dwId)));
+ AppDomainFromIDHolder pTargetDomain(TargetDomain, TRUE);
+ pTargetDomain.ThrowIfUnloaded();
+ _ASSERTE(_ctx_trans_pThread != NULL);
+ _ASSERTE(_ctx_trans_pThread->GetDomain()->GetId()!= TargetDomain);
+
+ bool _ctx_trans_fRaiseNeeded = false;
+ Exception* _ctx_trans_pTargetDomainException=NULL; \
+
+ FrameWithCookie<ContextTransitionFrame> _ctx_trans_Frame;
+ ContextTransitionFrame* _ctx_trans_pFrame = &_ctx_trans_Frame;
+
+ _ctx_trans_pThread->EnterContextRestricted(
+ pTargetDomain->GetDefaultContext(),
+ _ctx_trans_pFrame);
+
+ pTargetDomain.Release();
+ args->pCtxFrame = _ctx_trans_pFrame;
+ TESTHOOKCALL(EnteredAppDomain((TargetDomain.m_dwId)));
+ /* work around unreachable code warning */
+ EX_TRY
+ {
+ // Invoke the callback
+ if (CORDebuggerAttached())
+ {
+ // If a debugger is attached, do it through a wrapper that will sniff for CHF callbacks.
+ MakeADCallDebuggerWrapper(fpCallback, args, GET_CTX_TRANSITION_FRAME());
+ }
+ else
+ {
+ // If no debugger is attached, call directly.
+ fpCallback(args);
+ }
+ }
+ EX_CATCH
+ {
+ LOG((LF_EH|LF_APPDOMAIN, LL_INFO1000, "ENTER_DOMAIN(%s, %s, %d): exception in flight\n",
+ __FUNCTION__, __FILE__, __LINE__));
+
+ _ctx_trans_pTargetDomainException=EXTRACT_EXCEPTION();
+ _ctx_trans_fRaiseNeeded = true;
+ }
+ /* SwallowAllExceptions is fine because we don't get to this point */
+ /* unless fRaiseNeeded = true or no exception was thrown */
+ EX_END_CATCH(SwallowAllExceptions);
+ TESTHOOKCALL(LeavingAppDomain((TargetDomain.m_dwId)));
+ if (_ctx_trans_fRaiseNeeded)
+ {
+ LOG((LF_EH, LL_INFO1000, "RaiseCrossContextException(%s, %s, %d)\n",
+ __FUNCTION__, __FILE__, __LINE__));
+ _ctx_trans_pThread->RaiseCrossContextException(_ctx_trans_pTargetDomainException,_ctx_trans_pFrame);
+ }
+
+ LOG((LF_APPDOMAIN, LL_INFO1000, "LEAVE_DOMAIN(%s, %s, %d)\n",
+ __FUNCTION__, __FILE__, __LINE__));
+
+ _ctx_trans_pThread->ReturnToContext(_ctx_trans_pFrame);
+
+#ifdef FEATURE_TESTHOOKS
+ TESTHOOKCALL(LeftAppDomain(TargetDomain.m_dwId));
+#endif
+
+ DEBUG_ASSURE_NO_RETURN_END(MAKECALL)
+}
+
+
+#ifdef FEATURE_REMOTING
+void Thread::SetExposedContext(Context *c)
+{
+
+ // Set the ExposedContext ...
+
+ // Note that we use GetxxRaw() here to cover our bootstrap case
+ // for AppDomain proxy creation
+ // Leaving the exposed object NULL lets us create the default
+ // managed context just before we marshal a new AppDomain in
+ // RemotingServices::CreateProxyForDomain.
+
+ Thread* pThread = GetThread();
+ if (!pThread)
+ return;
+
+ CONTRACTL {
+ NOTHROW;
+ MODE_COOPERATIVE;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if(m_ExposedObject != NULL) {
+ THREADBASEREF threadObj = (THREADBASEREF) ObjectFromHandle(m_ExposedObject);
+ if(threadObj != NULL)
+ if (!c)
+ threadObj->SetExposedContext(NULL);
+ else
+ threadObj->SetExposedContext(c->GetExposedObjectRaw());
+
+ }
+}
+#endif
+
+void Thread::InitContext()
+{
+ CONTRACTL {
+ THROWS;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+ // this should only be called when initializing a thread
+ _ASSERTE(m_Context == NULL);
+ _ASSERTE(m_pDomain == NULL);
+ GCX_COOP_NO_THREAD_BROKEN();
+ m_Context = SystemDomain::System()->DefaultDomain()->GetDefaultContext();
+#ifdef FEATURE_REMOTING
+ SetExposedContext(m_Context);
+#endif
+ m_pDomain = m_Context->GetDomain();
+ _ASSERTE(m_pDomain);
+ m_pDomain->ThreadEnter(this, NULL);
+
+ // Every thread starts in the default domain, so push it here.
+ PushDomain((ADID)DefaultADID);
+}
+
+void Thread::ClearContext()
+{
+ CONTRACTL {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+ // if one is null, both must be
+ _ASSERTE(m_pDomain && m_Context || ! (m_pDomain && m_Context));
+
+ if (!m_pDomain)
+ return;
+
+ m_pDomain->ThreadExit(this, NULL);
+
+ // must set exposed context to null first otherwise object verification
+ // checks will fail AV when m_Context is null
+#ifdef FEATURE_REMOTING
+ SetExposedContext(NULL);
+#endif
+ m_pDomain = NULL;
+#ifdef FEATURE_COMINTEROP
+ m_fDisableComObjectEagerCleanup = false;
+#endif //FEATURE_COMINTEROP
+ m_Context = NULL;
+ m_ADStack.ClearDomainStack();
+}
+
+
+void Thread::DoContextCallBack(ADID appDomain, Context *pContext, Context::ADCallBackFcnType pTarget, LPVOID args)
+{
+ //Do not deference pContext if it's not from the current appdomain
+
+#ifdef _DEBUG
+ TADDR espVal = (TADDR)GetCurrentSP();
+
+ LOG((LF_APPDOMAIN, LL_INFO100, "Thread::DoADCallBack Calling %p at esp %p in [%d]\n",
+ pTarget, espVal, appDomain.m_dwId));
+#endif
+ _ASSERTE(GetThread()->GetContext() != pContext);
+ Thread* pThread = GetThread();
+
+ // Get the default context for the current domain as well as for the
+ // destination domain.
+ AppDomain* pCurrDomain = pThread->GetContext()->GetDomain();
+ Context* pCurrDefCtx = pCurrDomain->GetDefaultContext();
+ BOOL bDefaultTargetCtx=FALSE;
+
+ {
+ AppDomainFromIDHolder ad(appDomain, TRUE);
+ ad.ThrowIfUnloaded();
+ bDefaultTargetCtx=(ad->GetDefaultContext()==pContext);
+ }
+
+ if (pCurrDefCtx == pThread->GetContext() && bDefaultTargetCtx)
+ {
+ ENTER_DOMAIN_ID(appDomain);
+ (pTarget)(args);
+ END_DOMAIN_TRANSITION;
+ }
+ else
+ {
+#ifdef FEATURE_REMOTING
+ _ASSERTE(pContext->GetDomain()==::GetAppDomain());
+ Context::ADCallBackArgs callTgtArgs = {pTarget, args};
+ Context::CallBackInfo callBackInfo = {Context::ADTransition_callback, (void*) &callTgtArgs};
+ Context::RequestCallBack(appDomain,pContext, (void*) &callBackInfo);
+#else
+ UNREACHABLE();
+#endif
+ }
+ LOG((LF_APPDOMAIN, LL_INFO100, "Thread::DoADCallBack Done at esp %p\n", espVal));
+}
+
+
+void Thread::DoADCallBack(AppDomain* pDomain , Context::ADCallBackFcnType pTarget, LPVOID args, DWORD dwADV,
+ BOOL fSetupEHAtTransition /* = TRUE */)
+{
+
+
+#ifdef _DEBUG
+ TADDR espVal = (TADDR)GetCurrentSP();
+
+ LOG((LF_APPDOMAIN, LL_INFO100, "Thread::DoADCallBack Calling %p at esp %p in [%d]\n",
+ pTarget, espVal, pDomain->GetId().m_dwId));
+#endif
+ Thread* pThread = GetThread();
+
+ // Get the default context for the current domain as well as for the
+ // destination domain.
+ AppDomain* pCurrDomain = pThread->GetContext()->GetDomain();
+
+ if (pCurrDomain!=pDomain)
+ {
+ // use the target domain's default context as the target context
+ // so that the actual call to a transparent proxy would enter the object into the correct context.
+
+ BOOL fThrow = FALSE;
+
+#ifdef FEATURE_PAL
+ // FEATURE_PAL must setup EH at AD transition - the option to omit the setup
+ // is only for regular Windows builds.
+ _ASSERTE(fSetupEHAtTransition);
+#endif // FEATURE_PAL
+
+ LOG((LF_APPDOMAIN, LL_INFO10, "Thread::DoADCallBack - performing AD transition with%s EH at transition boundary.\n",
+ (fSetupEHAtTransition == FALSE)?"out":""));
+
+ if (fSetupEHAtTransition)
+ {
+ ENTER_DOMAIN_PTR(pDomain,dwADV)
+ {
+ (pTarget)(args);
+
+ // unloadBoundary is cleared by ReturnToContext, so get it now.
+ Frame* unloadBoundaryFrame = pThread->GetUnloadBoundaryFrame();
+ fThrow = pThread->ShouldChangeAbortToUnload(GET_CTX_TRANSITION_FRAME(), unloadBoundaryFrame);
+ }
+ END_DOMAIN_TRANSITION;
+ }
+#ifndef FEATURE_PAL
+ else
+ {
+ ENTER_DOMAIN_PTR_NO_EH_AT_TRANSITION(pDomain,dwADV)
+ {
+ (pTarget)(args);
+
+ // unloadBoundary is cleared by ReturnToContext, so get it now.
+ Frame* unloadBoundaryFrame = pThread->GetUnloadBoundaryFrame();
+ fThrow = pThread->ShouldChangeAbortToUnload(GET_CTX_TRANSITION_FRAME(), unloadBoundaryFrame);
+ }
+ END_DOMAIN_TRANSITION_NO_EH_AT_TRANSITION;
+ }
+#endif // !FEATURE_PAL
+
+ // if someone caught the abort before it got back out to the AD transition (like DispatchEx_xxx does)
+ // then need to turn the abort into an unload, as they're gonna keep seeing it anyway
+ if (fThrow)
+ {
+ LOG((LF_APPDOMAIN, LL_INFO10, "Thread::DoADCallBack turning abort into unload\n"));
+ COMPlusThrow(kAppDomainUnloadedException, W("Remoting_AppDomainUnloaded_ThreadUnwound"));
+ }
+ }
+ else
+ {
+#ifdef FEATURE_REMOTING
+ Context::ADCallBackArgs callTgtArgs = {pTarget, args};
+ Context::CallBackInfo callBackInfo = {Context::ADTransition_callback, (void*) &callTgtArgs};
+ Context::RequestCallBack(CURRENT_APPDOMAIN_ID, pCurrDomain->GetDefaultContext(), (void*) &callBackInfo);
+#else
+ UNREACHABLE();
+#endif
+ }
+ LOG((LF_APPDOMAIN, LL_INFO100, "Thread::DoADCallBack Done at esp %p\n", espVal));
+}
+
+void Thread::DoADCallBack(ADID appDomainID , Context::ADCallBackFcnType pTarget, LPVOID args, BOOL fSetupEHAtTransition /* = TRUE */)
+{
+
+
+#ifdef _DEBUG
+ TADDR espVal = (TADDR)GetCurrentSP();
+
+ LOG((LF_APPDOMAIN, LL_INFO100, "Thread::DoADCallBack Calling %p at esp %p in [%d]\n",
+ pTarget, espVal, appDomainID.m_dwId));
+#endif
+ Thread* pThread = GetThread();
+
+ // Get the default context for the current domain as well as for the
+ // destination domain.
+ AppDomain* pCurrDomain = pThread->GetContext()->GetDomain();
+
+ if (pCurrDomain->GetId()!=appDomainID)
+ {
+ // use the target domain's default context as the target context
+ // so that the actual call to a transparent proxy would enter the object into the correct context.
+
+ BOOL fThrow = FALSE;
+
+#ifdef FEATURE_PAL
+ // FEATURE_PAL must setup EH at AD transition - the option to omit the setup
+ // is only for regular Windows builds.
+ _ASSERTE(fSetupEHAtTransition);
+#endif // FEATURE_PAL
+
+ LOG((LF_APPDOMAIN, LL_INFO10, "Thread::DoADCallBack - performing AD transition with%s EH at transition boundary.\n",
+ (fSetupEHAtTransition == FALSE)?"out":""));
+
+ if (fSetupEHAtTransition)
+ {
+ ENTER_DOMAIN_ID(appDomainID)
+ {
+ (pTarget)(args);
+
+ // unloadBoundary is cleared by ReturnToContext, so get it now.
+ Frame* unloadBoundaryFrame = pThread->GetUnloadBoundaryFrame();
+ fThrow = pThread->ShouldChangeAbortToUnload(GET_CTX_TRANSITION_FRAME(), unloadBoundaryFrame);
+ }
+ END_DOMAIN_TRANSITION;
+ }
+#ifndef FEATURE_PAL
+ else
+ {
+ ENTER_DOMAIN_ID_NO_EH_AT_TRANSITION(appDomainID)
+ {
+ (pTarget)(args);
+
+ // unloadBoundary is cleared by ReturnToContext, so get it now.
+ Frame* unloadBoundaryFrame = pThread->GetUnloadBoundaryFrame();
+ fThrow = pThread->ShouldChangeAbortToUnload(GET_CTX_TRANSITION_FRAME(), unloadBoundaryFrame);
+ }
+ END_DOMAIN_TRANSITION_NO_EH_AT_TRANSITION;
+ }
+#endif // !FEATURE_PAL
+
+ // if someone caught the abort before it got back out to the AD transition (like DispatchEx_xxx does)
+ // then need to turn the abort into an unload, as they're gonna keep seeing it anyway
+ if (fThrow)
+ {
+ LOG((LF_APPDOMAIN, LL_INFO10, "Thread::DoADCallBack turning abort into unload\n"));
+ COMPlusThrow(kAppDomainUnloadedException, W("Remoting_AppDomainUnloaded_ThreadUnwound"));
+ }
+ }
+ else
+ {
+#ifdef FEATURE_REMOTING
+ Context::ADCallBackArgs callTgtArgs = {pTarget, args};
+ Context::CallBackInfo callBackInfo = {Context::ADTransition_callback, (void*) &callTgtArgs};
+ Context::RequestCallBack(CURRENT_APPDOMAIN_ID, pCurrDomain->GetDefaultContext(), (void*) &callBackInfo);
+#else
+ UNREACHABLE();
+#endif
+ }
+ LOG((LF_APPDOMAIN, LL_INFO100, "Thread::DoADCallBack Done at esp %p\n", espVal));
+}
+
+void Thread::EnterContextRestricted(Context *pContext, ContextTransitionFrame *pFrame)
+{
+ CONTRACTL {
+ THROWS;
+ MODE_COOPERATIVE;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(GetThread() == this);
+ _ASSERTE(pContext); // should never enter a null context
+ _ASSERTE(m_Context); // should always have a current context
+
+ AppDomain *pPrevDomain = m_pDomain;
+ AppDomain *pDomain = pContext->GetDomain();
+ // and it should always have an AD set
+ _ASSERTE(pDomain);
+
+ if (m_pDomain != pDomain && !pDomain->CanThreadEnter(this))
+ {
+ pFrame->SetReturnContext(NULL);
+ COMPlusThrow(kAppDomainUnloadedException);
+ }
+
+ pFrame->SetReturnContext(m_Context);
+ pFrame->SetReturnExecutionContext(NULL);
+
+ if (pPrevDomain != pDomain)
+ {
+ pFrame->SetLockCount(m_dwBeginLockCount, m_dwBeginCriticalRegionCount);
+ m_dwBeginLockCount = m_dwLockCount;
+ m_dwBeginCriticalRegionCount = m_dwCriticalRegionCount;
+ }
+
+ if (m_Context == pContext) {
+ _ASSERTE(m_Context->GetDomain() == pContext->GetDomain());
+ return;
+ }
+
+ LOG((LF_APPDOMAIN, LL_INFO1000, "%sThread::EnterContext from (%p) [%d] (count %d)\n",
+ FinalizerThread::IsCurrentThreadFinalizer() ? "FT: " : "",
+ m_Context, m_Context->GetDomain()->GetId().m_dwId,
+ m_Context->GetDomain()->GetThreadEnterCount()));
+ LOG((LF_APPDOMAIN, LL_INFO1000, " into (%p) [%d] (count %d)\n", pContext,
+ pContext->GetDomain()->GetId().m_dwId,
+ pContext->GetDomain()->GetThreadEnterCount()));
+
+#ifdef _DEBUG_ADUNLOAD
+ printf("Thread::EnterContext %x from (%8.8x) [%d]\n", GetThreadId(), m_Context,
+ m_Context ? m_Context->GetDomain()->GetId() : -1);
+ printf(" into (%8.8x) [%d] %S\n", pContext,
+ pContext->GetDomain()->GetId());
+#endif
+
+ CantStopHolder hCantStop;
+
+ bool fChangedDomains = m_pDomain != pDomain;
+ if (fChangedDomains)
+ {
+
+#ifdef FEATURE_STACK_PROBE
+ if (pDomain == SystemDomain::System()->DefaultDomain() &&
+ GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) == eRudeUnloadAppDomain)
+ {
+ // Make sure default domain does not see SO.
+ // probe for our entry point amount and throw if not enough stack
+ RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT*2), this);
+ }
+#endif
+
+ _ASSERTE(pFrame);
+
+ PushDomain(pDomain->GetId());
+ STRESS_LOG1(LF_APPDOMAIN, LL_INFO100000, "Entering into ADID=%d\n", pDomain->GetId().m_dwId);
+
+#ifndef FEATURE_CORECLR
+ //
+ // Push execution contexts (that could contain call context) into frame to avoid leaks
+ //
+
+ if (IsExposedObjectSet())
+ {
+ THREADBASEREF ref = (THREADBASEREF) ObjectFromHandle(m_ExposedObject);
+ _ASSERTE(ref != NULL);
+ if (ref->GetExecutionContext() != NULL)
+ {
+ pFrame->SetReturnExecutionContext(ref->GetExecutionContext());
+ ref->SetExecutionContext(NULL);
+ }
+ }
+#endif //!FEATURE_CORECLR
+
+ //
+ // Store the last thrown object in the ContextTransitionFrame before we null it out
+ // to prevent it from leaking into the domain we are transitionning into.
+ //
+
+ pFrame->SetLastThrownObjectInParentContext(LastThrownObject());
+ SafeSetLastThrownObject(NULL);
+ }
+
+ m_Context = pContext;
+ pFrame->Push();
+
+#ifdef _DEBUG_ADUNLOAD
+ printf("Thread::EnterContext %x,%8.8x push? %d current frame is %8.8x\n", GetThreadId(), this, 1, GetFrame());
+#endif
+
+ if (fChangedDomains)
+ {
+ pDomain->ThreadEnter(this, pFrame);
+
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+ if (g_fEnableARM)
+ {
+ // Update previous AppDomain's count of processor usage by threads executing within it.
+ pPrevDomain->UpdateProcessorUsage(QueryThreadProcessorUsage());
+ FireEtwThreadDomainEnter((ULONGLONG)this, (ULONGLONG)pDomain, GetClrInstanceId());
+ }
+#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
+
+ // NULL out the Thread's pointer to the current ThreadLocalBlock. On the next
+ // access to thread static data, the Thread's pointer to the current ThreadLocalBlock
+ // will be updated correctly.
+ m_pThreadLocalBlock = NULL;
+
+ m_pDomain = pDomain;
+ SetAppDomain(m_pDomain);
+ }
+#ifdef FEATURE_REMOTING
+ SetExposedContext(pContext);
+#endif
+}
+
+// main difference between EnterContext and ReturnToContext is that are allowed to return
+// into a domain that is unloading but cannot enter a domain that is unloading
+void Thread::ReturnToContext(ContextTransitionFrame *pFrame)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+ _ASSERTE(GetThread() == this);
+
+ Context *pReturnContext = pFrame->GetReturnContext();
+ _ASSERTE(pReturnContext);
+
+ ADID pADOnStack;
+
+ AppDomain *pReturnDomain = pReturnContext->GetDomain();
+ AppDomain* pCurrentDomain = m_pDomain;
+
+ bool fChangedDomains = m_pDomain != pReturnDomain;
+
+ if (fChangedDomains)
+ {
+ if (HasLockInCurrentDomain())
+ {
+ if (GetAppDomain()->IsDefaultDomain() || // We should never orphan a lock in default domain.
+ !IsRudeAbort()) // If rudeabort, managed backout may not be run.
+ {
+ // One would like to assert that this case never occurs, but
+ // a rude abort can easily leave unreachable locked objects,
+ // which we have to allow.
+ STRESS_LOG2(LF_SYNC, LL_INFO1000, "Locks are orphaned while exiting a domain (enter: %d, exit: %d)\n", m_dwBeginLockCount, m_dwLockCount);
+#ifdef _DEBUG
+ STRESS_LOG0 (LF_APPDOMAIN, LL_INFO10, "Thread::ReturnToContext Lock not released\n");
+#endif
+ }
+
+ AppDomain *pFromDomain = GetAppDomain();
+
+ // There is a race when EE Thread for a new thread is allocated in the place of the old EE Thread.
+ // The lock accounting will get confused if there are orphaned locks. Set the flag that allows us to relax few asserts.
+ SetThreadStateNC(TSNC_UnbalancedLocks);
+ pFromDomain->SetOrphanedLocks();
+
+ if (!pFromDomain->IsDefaultDomain())
+ {
+ // If a Thread orphaned a lock, we don't want a host to recycle the Thread object,
+ // since the lock count is reset when the thread leaves this domain.
+ SetThreadStateNC(TSNC_CannotRecycle);
+ }
+
+ // It is a disaster if a lock leaks in default domain. We can never unload default domain.
+ // _ASSERTE (!pFromDomain->IsDefaultDomain());
+ EPolicyAction action = GetEEPolicy()->GetActionOnFailure(FAIL_OrphanedLock);
+ switch (action)
+ {
+ case eUnloadAppDomain:
+ if (!pFromDomain->IsDefaultDomain())
+ {
+ pFromDomain->EnableADUnloadWorker(EEPolicy::ADU_Safe);
+ }
+ break;
+ case eRudeUnloadAppDomain:
+ if (!pFromDomain->IsDefaultDomain())
+ {
+ pFromDomain->EnableADUnloadWorker(EEPolicy::ADU_Rude);
+ }
+ break;
+ case eExitProcess:
+ case eFastExitProcess:
+ case eRudeExitProcess:
+ case eDisableRuntime:
+ GetEEPolicy()->HandleExitProcessFromEscalation(action,HOST_E_EXITPROCESS_ADUNLOAD);
+ break;
+ default:
+ break;
+ }
+ }
+
+ m_dwLockCount = m_dwBeginLockCount;
+ m_dwCriticalRegionCount = m_dwBeginCriticalRegionCount;
+
+ pFrame->GetLockCount(&m_dwBeginLockCount, &m_dwBeginCriticalRegionCount);
+ }
+
+ if (m_Context == pReturnContext)
+ {
+ _ASSERTE(m_Context->GetDomain() == pReturnContext->GetDomain());
+ return;
+ }
+
+ GCX_COOP();
+
+ LOG((LF_APPDOMAIN, LL_INFO1000, "%sThread::ReturnToContext from (%p) [%d] (count %d)\n",
+ FinalizerThread::IsCurrentThreadFinalizer() ? "FT: " : "",
+ m_Context, m_Context->GetDomain()->GetId().m_dwId,
+ m_Context->GetDomain()->GetThreadEnterCount()));
+ LOG((LF_APPDOMAIN, LL_INFO1000, " into (%p) [%d] (count %d)\n", pReturnContext,
+ pReturnContext->GetDomain()->GetId().m_dwId,
+ pReturnContext->GetDomain()->GetThreadEnterCount()));
+
+#ifdef _DEBUG_ADUNLOAD
+ printf("Thread::ReturnToContext %x from (%p) [%d]\n", GetThreadId(), m_Context,
+ m_Context->GetDomain()->GetId(),
+ printf(" into (%p) [%d]\n", pReturnContext,
+ pReturnContext->GetDomain()->GetId(),
+ m_Context->GetDomain()->GetThreadEnterCount());
+#endif
+
+ CantStopHolder hCantStop;
+
+ m_Context = pReturnContext;
+#ifdef FEATURE_REMOTING
+ SetExposedContext(pReturnContext);
+#endif
+
+ if (fChangedDomains)
+ {
+ pADOnStack = m_ADStack.PopDomain();
+ STRESS_LOG2(LF_APPDOMAIN, LL_INFO100000, "Returning from %d to %d\n", pADOnStack.m_dwId, pReturnContext->GetDomain()->GetId().m_dwId);
+
+ _ASSERTE(pADOnStack == m_pDomain->GetId());
+
+ _ASSERTE(pFrame);
+ //_ASSERTE(!fLinkFrame || pThread->GetFrame() == pFrame);
+
+ FlushIBCInfo();
+
+ // NULL out the Thread's pointer to the current ThreadLocalBlock. On the next
+ // access to thread static data, the Thread's pointer to the current ThreadLocalBlock
+ // will be updated correctly.
+ m_pThreadLocalBlock = NULL;
+
+ m_pDomain = pReturnDomain;
+ SetAppDomain(pReturnDomain);
+
+ if (pFrame == m_pUnloadBoundaryFrame)
+ {
+ m_pUnloadBoundaryFrame = NULL;
+ if (IsAbortRequested())
+ {
+ EEResetAbort(TAR_ADUnload);
+ }
+ ResetBeginAbortedForADUnload();
+ }
+
+ // Restore the last thrown object to what it was before the AD transition. Note that if
+ // an exception was thrown out of the AD we transitionned into, it will be raised in
+ // RaiseCrossContextException and the EH system will store it as the last thrown
+ // object if it gets handled by an EX_CATCH.
+ SafeSetLastThrownObject(pFrame->GetLastThrownObjectInParentContext());
+ }
+
+ pFrame->Pop();
+
+ if (fChangedDomains)
+ {
+#ifndef FEATURE_CORECLR
+ //
+ // Pop execution contexts (could contain call context) from frame if applicable
+ //
+
+ if (IsExposedObjectSet())
+ {
+ THREADBASEREF ref = (THREADBASEREF) ObjectFromHandle(m_ExposedObject);
+ _ASSERTE(ref != NULL);
+ ref->SetExecutionContext(pFrame->GetReturnExecutionContext());
+ }
+#endif //!FEATURE_CORECLR
+
+ // Do this last so that thread is not labeled as out of the domain until all cleanup is done.
+ ADID adid=pCurrentDomain->GetId();
+ pCurrentDomain->ThreadExit(this, pFrame);
+
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+ if (g_fEnableARM)
+ {
+ // Update the old AppDomain's count of processor usage by threads executing within it.
+ pCurrentDomain->UpdateProcessorUsage(QueryThreadProcessorUsage());
+ FireEtwThreadDomainEnter((ULONGLONG)this, (ULONGLONG)pReturnDomain, GetClrInstanceId());
+ }
+#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
+ }
+
+ if (fChangedDomains && IsAbortRequested() && HasLockInCurrentDomain())
+ {
+ EPolicyAction action = GetEEPolicy()->GetActionOnFailure(FAIL_CriticalResource);
+ // It is a disaster if a lock leaks in default domain. We can never unload default domain.
+ // _ASSERTE (action == eThrowException || !pReturnDomain->IsDefaultDomain());
+ switch (action)
+ {
+ case eUnloadAppDomain:
+ if (!pReturnDomain->IsDefaultDomain())
+ {
+ pReturnDomain->EnableADUnloadWorker(EEPolicy::ADU_Safe);
+ }
+ break;
+ case eRudeUnloadAppDomain:
+ if (!pReturnDomain->IsDefaultDomain())
+ {
+ pReturnDomain->EnableADUnloadWorker(EEPolicy::ADU_Rude);
+ }
+ break;
+ case eExitProcess:
+ case eFastExitProcess:
+ case eRudeExitProcess:
+ case eDisableRuntime:
+ GetEEPolicy()->HandleExitProcessFromEscalation(action,HOST_E_EXITPROCESS_ADUNLOAD);
+ break;
+ default:
+ break;
+ }
+ }
+
+#ifdef _DEBUG_ADUNLOAD
+ printf("Thread::ReturnToContext %x,%8.8x pop? %d current frame is %8.8x\n", GetThreadId(), this, 1, GetFrame());
+#endif
+
+ return;
+}
+
+
+void Thread::ReturnToContextAndThrow(ContextTransitionFrame* pFrame, EEException* pEx, BOOL* pContextSwitched)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pContextSwitched));
+ }
+ CONTRACTL_END;
+#ifdef FEATURE_TESTHOOKS
+ ADID adid=GetAppDomain()->GetId();
+#endif
+ ReturnToContext(pFrame);
+ *pContextSwitched=TRUE;
+#ifdef FEATURE_TESTHOOKS
+ TESTHOOKCALL(LeftAppDomain(adid.m_dwId));
+#endif
+
+ COMPlusThrow(CLRException::GetThrowableFromException(pEx));
+}
+
+void Thread::ReturnToContextAndOOM(ContextTransitionFrame* pFrame)
+{
+
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+#ifdef FEATURE_TESTHOOKS
+ ADID adid=GetAppDomain()->GetId();
+#endif
+
+ ReturnToContext(pFrame);
+#ifdef FEATURE_TESTHOOKS
+ TESTHOOKCALL(LeftAppDomain(adid.m_dwId));
+#endif
+
+ COMPlusThrowOM();
+}
+
+
+#ifdef FEATURE_CORECLR
+
+//---------------------------------------------------------------------------------------
+// Allocates an agile CrossAppDomainMarshaledException whose ToString() and ErrorCode
+// matches the original exception.
+//
+// This is our "remoting" story for exceptions that leak across appdomains in Telesto.
+//---------------------------------------------------------------------------------------
+static OBJECTREF WrapThrowableInCrossAppDomainMarshaledException(OBJECTREF pOriginalThrowable)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(GetThread() != NULL);
+
+
+ struct _gc
+ {
+ OBJECTREF pOriginalThrowable;
+ OBJECTREF pThrowable;
+ STRINGREF pOriginalMessage;
+ }
+ prot;
+
+
+ memset(&prot, 0, sizeof(prot));
+
+ GCPROTECT_BEGIN(prot);
+ prot.pOriginalThrowable = pOriginalThrowable;
+ prot.pOriginalMessage = GetExceptionMessage(prot.pOriginalThrowable);
+ HRESULT originalHResult = GetExceptionHResult(prot.pOriginalThrowable);
+
+ MethodTable *pMT = MscorlibBinder::GetClass(CLASS__CROSSAPPDOMAINMARSHALEDEXCEPTION);
+ prot.pThrowable = AllocateObject(pMT);
+
+ MethodDescCallSite exceptionCtor(METHOD__CROSSAPPDOMAINMARSHALEDEXCEPTION__STR_INT_CTOR);
+
+ ARG_SLOT args1[] = {
+ ObjToArgSlot(prot.pThrowable),
+ ObjToArgSlot(prot.pOriginalMessage),
+ (ARG_SLOT)originalHResult,
+ };
+ exceptionCtor.Call(args1);
+
+#ifndef FEATURE_PAL
+ // Since, on CoreCLR, we dont have serialization of exceptions going across
+ // AD transition boundaries, we will copy over the bucket details to the
+ // CrossAppDomainMarshalledException object from the original exception object
+ // if it isnt a preallocated exception.
+ if (IsWatsonEnabled() && (!CLRException::IsPreallocatedExceptionObject(prot.pOriginalThrowable)))
+ {
+ // If the watson buckets are present, then copy them over.
+ // They maybe missing if the original throwable couldnt get them from Watson helper functions
+ // during SetupInitialThrowBucketDetails due to OOM.
+ if (((EXCEPTIONREF)prot.pOriginalThrowable)->AreWatsonBucketsPresent())
+ {
+ _ASSERTE(prot.pThrowable != NULL);
+ // Copy them to CrossADMarshalledException object
+ CopyWatsonBucketsBetweenThrowables(prot.pOriginalThrowable, prot.pThrowable);
+
+ // The exception object should now have the buckets inside it
+ _ASSERTE(((EXCEPTIONREF)prot.pThrowable)->AreWatsonBucketsPresent());
+ }
+ }
+#endif // !FEATURE_PAL
+
+ GCPROTECT_END(); //Prot
+
+
+ return prot.pThrowable;
+}
+
+
+
+#endif
+
+
+// for cases when marshaling is not needed
+// throws it is able to take a shortcut, otherwise just returns
+void Thread::RaiseCrossContextExceptionHelper(Exception* pEx, ContextTransitionFrame* pFrame)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_PAL
+ // Ensure that IP for WatsonBucketing has been collected if the exception is preallocated.
+#ifdef _DEBUG
+
+#ifdef FEATURE_CORECLR
+ // On CoreCLR, Watson may not be enabled. Thus, we should
+ // skip this.
+ if (IsWatsonEnabled())
+#endif // FEATURE_CORECLR
+ {
+ if (CLRException::IsPreallocatedExceptionObject(CLRException::GetThrowableFromException(pEx)))
+ {
+ // If a preallocated exception escapes unhandled till the AD Transition boundary, then
+ // AppDomainTransitionExceptionFilter will capture the watson buckets and stick them
+ // in the UE Watson bucket tracker.
+ //
+ // This is done *only* for exceptions escaping AD transition boundaries that are NOT
+ // at the thread base.
+ PTR_EHWatsonBucketTracker pUEWatsonBucketTracker = GetThread()->GetExceptionState()->GetUEWatsonBucketTracker();
+ if(pUEWatsonBucketTracker->RetrieveWatsonBuckets() != NULL)
+ {
+ _ASSERTE(pUEWatsonBucketTracker->CapturedAtADTransition() || pUEWatsonBucketTracker->CapturedForThreadAbort());
+ }
+ }
+ }
+#endif // _DEBUG
+#endif // !FEATURE_PAL
+
+#ifdef FEATURE_TESTHOOKS
+ ADID adid=GetAppDomain()->GetId();
+#endif
+
+#define RETURNANDTHROWNEWEXCEPTION(pOldException, Type, ExArgs) \
+ { \
+ Exception::Delete(pOldException); \
+ SetLastThrownObject(NULL); \
+ ReturnToContext(pFrame); \
+ CONTRACT_VIOLATION(ThrowsViolation); \
+ TESTHOOKCALL(LeftAppDomain(adid.m_dwId)); \
+ Type ex ExArgs; \
+ COMPlusThrow(CLRException::GetThrowableFromException(&ex)); \
+ }
+
+#define RETURNANDRETHROW(ex) \
+ { \
+ SafeSetLastThrownObject (NULL); \
+ ReturnToContext(pFrame); \
+ CONTRACT_VIOLATION(ThrowsViolation); \
+ TESTHOOKCALL(LeftAppDomain(adid.m_dwId)); \
+ PAL_CPP_THROW(Exception*,ex); \
+ }
+
+ CANNOTTHROWCOMPLUSEXCEPTION(); //no exceptions until returning to context
+
+ Frame* pUnloadBoundary = GetUnloadBoundaryFrame();
+
+ LOG((LF_EH, LL_INFO100, "Exception crossed into another context. Rethrowing in new context.\n"));
+
+
+ // will throw a kAppDomainUnloadedException if necessary
+ if (ShouldChangeAbortToUnload(pFrame, pUnloadBoundary))
+ RETURNANDTHROWNEWEXCEPTION(pEx,EEResourceException,(kAppDomainUnloadedException, W("Remoting_AppDomainUnloaded_ThreadUnwound")));
+
+ // Can't marshal return value from unloaded appdomain. Haven't
+ // yet hit the boundary. Throw a generic exception instead.
+ // ThreadAbort is more consistent with what goes on elsewhere --
+ // the AppDomainUnloaded is only introduced at the top-most boundary.
+ //
+
+ if (GetDomain() == SystemDomain::AppDomainBeingUnloaded()
+ && GetThread()!=SystemDomain::System()->GetUnloadingThread() &&
+ GetThread()!=FinalizerThread::GetFinalizerThread())
+ {
+ if (pUnloadBoundary)
+ RETURNANDTHROWNEWEXCEPTION(pEx,EEException,(kThreadAbortException))
+ else
+ RETURNANDTHROWNEWEXCEPTION(pEx,EEResourceException,(kAppDomainUnloadedException, W("Remoting_AppDomainUnloaded_ThreadUnwound")));
+ }
+
+ if (IsRudeAbort())
+ RETURNANDTHROWNEWEXCEPTION(pEx,EEException,(kThreadAbortException));
+
+
+ // There are a few classes that have the potential to create
+ // infinite loops if we try to marshal them. For ThreadAbort,
+ // ExecutionEngine, StackOverflow, and
+ // OutOfMemory, throw a new exception of the same type.
+ //
+ // <TODO>@NICE: We lose the inner stack trace. A little better
+ // would be to at least check if the inner exceptions are
+ // all the same type as the outer. They could be
+ // rethrown if this were true.</TODO>
+ //
+
+ if(pEx && !pEx->IsDomainBound())
+ {
+ RETURNANDRETHROW(pEx);
+ }
+#undef RETURNANDTHROWNEWEXCEPTION
+#undef RETURNANDRETHROW
+}
+
+Thread::RaiseCrossContextResult
+Thread::TryRaiseCrossContextException(Exception **ppExOrig,
+ Exception *pException,
+ RuntimeExceptionKind *pKind,
+ OBJECTREF *ppThrowable,
+ ORBLOBREF *pOrBlob)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ WRAPPER(GC_TRIGGERS);
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ BOOL bIsClassInitException = FALSE;
+ RaiseCrossContextResult result = RaiseCrossContextSuccess;
+ int alreadyMarshaling = StartedMarshalingException();
+
+ EX_TRY
+ {
+ bIsClassInitException = (pException->GetHR() == COR_E_TYPEINITIALIZATION);
+
+ //just in case something throws
+ //!!!should be released before any call to ReturnToContext !!!
+ ExceptionHolder exception(*ppExOrig);
+
+ if (IsExceptionOfType(kOutOfMemoryException, pException))
+ *pKind = kOutOfMemoryException;
+ else
+ if (IsExceptionOfType(kThreadAbortException, pException))
+ *pKind = kThreadAbortException;
+ else
+ if (IsExceptionOfType(kStackOverflowException, pException))
+ *pKind = kStackOverflowException;
+ else
+ if (alreadyMarshaling)
+ {
+ // If we started marshaling already, something went wrong
+ // This should only happen in case of busted ResourceManager
+ _ASSERTE(!"Already marshalling the exception for cross AD transition - perhaps ResourceManager issue?");
+
+ // ASK: Instead of throwing ExecutionEngineException from here, is there a better
+ // ResourceManager related exception that can be thrown instead? If none, can
+ // kContextMarshalException be thrown? Its obsolete but comes close to the usage
+ // context.
+ *pKind = kContextMarshalException;
+ }
+
+ // Serialize the exception
+ if (*pKind == kLastException)
+ {
+ *ppThrowable = CLRException::GetThrowableFromException(exception);
+ _ASSERTE(*ppThrowable != NULL);
+
+#ifdef FEATURE_CORECLR
+ (*pOrBlob) = WrapThrowableInCrossAppDomainMarshaledException(*ppThrowable);
+#if CHECK_APP_DOMAIN_LEAKS
+ (*pOrBlob)->SetAppDomainAgile();
+#endif //CHECK_APP_DOMAIN_LEAKS
+#else
+ AppDomainHelper::MarshalObject(ppThrowable, pOrBlob);
+#endif //FEATURE_CORECLR
+
+ }
+ }
+ EX_CATCH
+ {
+ // We got a new Exception in original domain
+ *ppExOrig = EXTRACT_EXCEPTION();
+ // Got ClassInitException while marshaling ClassInitException. Class is unusable. Do not attempt anymore.
+ if (bIsClassInitException && *ppExOrig && ((*ppExOrig)->GetHR() == COR_E_TYPEINITIALIZATION))
+ result = RaiseCrossContextClassInit;
+ else
+ result = RaiseCrossContextRetry;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ FinishedMarshalingException();
+
+ return result;
+}
+
+// * pEx should be deleted before popping the frame, except for one case
+// * SafeSetLastThrownObject is called after pEx is deleted
+void DECLSPEC_NORETURN Thread::RaiseCrossContextException(Exception* pExOrig, ContextTransitionFrame* pFrame)
+{
+ CONTRACTL
+ {
+ THROWS;
+ WRAPPER(GC_TRIGGERS);
+ }
+ CONTRACTL_END;
+
+ // <TODO>@TODO: Set the IsInUnmanagedHandler bits (aka IgnoreThreadAbort bits) appropriately.</TODO>
+
+ GCX_COOP();
+
+ // These are the only data transfered between the appdomains
+ // Make sure that anything added here is appdomain agile
+ RuntimeExceptionKind kind = kLastException;
+ RaiseCrossContextResult result = RaiseCrossContextSuccess;
+ ORBLOBREF orBlob = NULL;
+
+ // Get the corruption severity for the exception caught at AppDomain transition boundary.
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ CorruptionSeverity severity = GetThread()->GetExceptionState()->GetLastActiveExceptionCorruptionSeverity();
+ if (severity == NotSet)
+ {
+ // No severity set at this point implies the exception was not corrupting
+ severity = NotCorrupting;
+ }
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+#ifdef FEATURE_TESTHOOKS
+ ADID adid=GetAppDomain()->GetId();
+#endif
+
+#define MAX_RAISE_RETRY_COUNT 256
+
+ DWORD dwRaiseRetryCount;
+ for (dwRaiseRetryCount = 0; dwRaiseRetryCount < MAX_RAISE_RETRY_COUNT; dwRaiseRetryCount++)
+ {
+ // pEx is NULL means that the exception is CLRLastThrownObjectException
+ CLRLastThrownObjectException lastThrown;
+ Exception* pException = pExOrig?pExOrig:&lastThrown;
+
+ // Set the current frame
+ SetFrame(pFrame);
+ RaiseCrossContextExceptionHelper(pExOrig, pFrame);
+ _ASSERTE(pFrame->GetReturnContext());
+
+ struct _gc {
+ OBJECTREF pThrowable;
+ ORBLOBREF orBlob;
+ } gc;
+ ZeroMemory(&gc, sizeof(_gc));
+
+ GCPROTECT_BEGIN(gc);
+ result = Thread::TryRaiseCrossContextException(&pExOrig, pException, &kind, &gc.pThrowable, &gc.orBlob);
+ GCPROTECT_END();
+
+ if (result != RaiseCrossContextRetry)
+ {
+ orBlob = gc.orBlob;
+ break;
+ }
+
+ // We got a new exception and therefore need to retry marshaling it.
+ GCX_COOP_NO_DTOR();
+ }
+
+ // Set the exception kind if we exceed MAX_RAISE_RETRY_COUNT, something is really wrong.
+ if (dwRaiseRetryCount == MAX_RAISE_RETRY_COUNT)
+ {
+ LOG((LF_EH, LL_INFO100, "Unable to marshal the exception event after maximum retries (%d). Using ContextMarshalException instead.\n", MAX_RAISE_RETRY_COUNT));
+ // This might be a good place to use ContextMarshalException type. However, it is marked obsolete.
+ kind = kContextMarshalException;
+ }
+
+ // Return to caller domain
+ {
+ // ReturnToContext does not work inside GC_PROTECT and has GC_NOTRIGGER contract.
+ // GCX_FORBID() ensures that the formerly protected values remain intact.
+ GCX_FORBID();
+ ReturnToContext(pFrame);
+ }
+
+ {
+ struct _gc {
+ OBJECTREF pMarshaledInit;
+ OBJECTREF pMarshaledThrowable;
+ ORBLOBREF orBlob;
+ } gc;
+ ZeroMemory(&gc, sizeof(_gc));
+
+ gc.orBlob = orBlob;
+
+ // Create the appropriate exception
+ GCPROTECT_BEGIN(gc);
+#ifdef FEATURE_TESTHOOKS
+ TESTHOOKCALL(LeftAppDomain(adid.m_dwId));
+#endif
+ if (result == RaiseCrossContextClassInit)
+ {
+ HRESULT hr=S_OK;
+ EX_TRY
+ {
+ WCHAR wszTemplate[30];
+ IfFailThrow(UtilLoadStringRC(IDS_EE_NAME_UNKNOWN,
+ wszTemplate,
+ sizeof(wszTemplate)/sizeof(wszTemplate[0]),
+ FALSE));
+
+ CreateTypeInitializationExceptionObject(wszTemplate, NULL, &gc.pMarshaledInit, &gc.pMarshaledThrowable);
+ }
+ EX_CATCH
+ {
+ // Unable to create ClassInitException in caller domain
+ hr=COR_E_TYPEINITIALIZATION;
+ }
+ EX_END_CATCH(RethrowTransientExceptions);
+ IfFailThrow(hr);
+ }
+ else
+ {
+ switch (kind)
+ {
+ case kLastException:
+#ifdef FEATURE_CORECLR
+ gc.pMarshaledThrowable = gc.orBlob;
+#else
+ AppDomainHelper::UnmarshalObject(GetAppDomain(), &gc.orBlob, &gc.pMarshaledThrowable);
+#endif //FEATURE_CORECLR
+
+ break;
+ case kOutOfMemoryException:
+ COMPlusThrowOM();
+ break;
+ case kStackOverflowException:
+ gc.pMarshaledThrowable = CLRException::GetPreallocatedStackOverflowException();
+ break;
+ default:
+ {
+ EEException ex(kind);
+ gc.pMarshaledThrowable = CLRException::GetThrowableFromException(&ex);
+ }
+ }
+ }
+
+ // ... and throw it.
+ VALIDATEOBJECTREF(gc.pMarshaledThrowable);
+ COMPlusThrow(gc.pMarshaledThrowable
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ , severity
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ );
+
+ GCPROTECT_END();
+ }
+}
+
+struct FindADCallbackType {
+ AppDomain *pSearchDomain;
+ AppDomain *pPrevDomain;
+ Frame *pFrame;
+ int count;
+ enum TargetTransition
+ {fFirstTransitionInto, fMostRecentTransitionInto}
+ fTargetTransition;
+
+ FindADCallbackType() : pSearchDomain(NULL), pPrevDomain(NULL), pFrame(NULL)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+};
+
+StackWalkAction StackWalkCallback_FindAD(CrawlFrame* pCF, void* data)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ FindADCallbackType *pData = (FindADCallbackType *)data;
+
+ Frame *pFrame = pCF->GetFrame();
+
+ if (!pFrame)
+ return SWA_CONTINUE;
+
+ AppDomain *pReturnDomain = pFrame->GetReturnDomain();
+ if (!pReturnDomain || pReturnDomain == pData->pPrevDomain)
+ return SWA_CONTINUE;
+
+ LOG((LF_APPDOMAIN, LL_INFO100, "StackWalkCallback_FindAD transition frame %8.8x into AD [%d]\n",
+ pFrame, pReturnDomain->GetId().m_dwId));
+
+ if (pData->pPrevDomain == pData->pSearchDomain) {
+ ++pData->count;
+ // this is a transition into the domain we are unloading, so save it in case it is the first
+ pData->pFrame = pFrame;
+ if (pData->fTargetTransition == FindADCallbackType::fMostRecentTransitionInto)
+ return SWA_ABORT; // only need to find last transition, so bail now
+ }
+
+ pData->pPrevDomain = pReturnDomain;
+ return SWA_CONTINUE;
+}
+
+// This determines if a thread is running in the given domain at any point on the stack
+Frame *Thread::IsRunningIn(AppDomain *pDomain, int *count)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ FindADCallbackType fct;
+ fct.pSearchDomain = pDomain;
+ if (!fct.pSearchDomain)
+ return FALSE;
+
+ // set prev to current so if are currently running in the target domain,
+ // we will detect the transition
+ fct.pPrevDomain = m_pDomain;
+ fct.fTargetTransition = FindADCallbackType::fMostRecentTransitionInto;
+ fct.count = 0;
+
+ // when this returns, if there is a transition into the AD, it will be in pFirstFrame
+ StackWalkAction res;
+ res = StackWalkFrames(StackWalkCallback_FindAD, (void*) &fct, ALLOW_ASYNC_STACK_WALK);
+ if (count)
+ *count = fct.count;
+ return fct.pFrame;
+}
+
+// This finds the very first frame on the stack where the thread transitioned into the given domain
+Frame *Thread::GetFirstTransitionInto(AppDomain *pDomain, int *count)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ FindADCallbackType fct;
+ fct.pSearchDomain = pDomain;
+ // set prev to current so if are currently running in the target domain,
+ // we will detect the transition
+ fct.pPrevDomain = m_pDomain;
+ fct.fTargetTransition = FindADCallbackType::fFirstTransitionInto;
+ fct.count = 0;
+
+ // when this returns, if there is a transition into the AD, it will be in pFirstFrame
+ StackWalkAction res;
+ res = StackWalkFrames(StackWalkCallback_FindAD, (void*) &fct, ALLOW_ASYNC_STACK_WALK);
+ if (count)
+ *count = fct.count;
+ return fct.pFrame;
+}
+
+// Get outermost (oldest) AppDomain for this thread (not counting the default
+// domain every one starts in).
+AppDomain *Thread::GetInitialDomain()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ AppDomain *pDomain = m_pDomain;
+ AppDomain *pPrevDomain = NULL;
+ Frame *pFrame = GetFrame();
+ while (pFrame != FRAME_TOP)
+ {
+ if (pFrame->GetVTablePtr() == ContextTransitionFrame::GetMethodFrameVPtr())
+ {
+ if (pPrevDomain)
+ pDomain = pPrevDomain;
+ pPrevDomain = pFrame->GetReturnDomain();
+ }
+ pFrame = pFrame->Next();
+ }
+ return pDomain;
+}
+
+#ifndef DACCESS_COMPILE
+void Thread::SetUnloadBoundaryFrame(Frame *pFrame)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE((this == GetThread() && PreemptiveGCDisabled()) ||
+ ThreadStore::HoldingThreadStore());
+ if ((ULONG_PTR)m_pUnloadBoundaryFrame < (ULONG_PTR)pFrame)
+ {
+ m_pUnloadBoundaryFrame = pFrame;
+ }
+ if (pFrame == NULL)
+ {
+ ResetBeginAbortedForADUnload();
+ }
+}
+
+void Thread::ResetUnloadBoundaryFrame()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(this == GetThread() && PreemptiveGCDisabled());
+ m_pUnloadBoundaryFrame=NULL;
+ ResetBeginAbortedForADUnload();
+}
+
+#endif
+
+BOOL Thread::ShouldChangeAbortToUnload(Frame *pFrame, Frame *pUnloadBoundaryFrame)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (! pUnloadBoundaryFrame)
+ pUnloadBoundaryFrame = GetUnloadBoundaryFrame();
+
+ // turn the abort request into an AD unloaded exception when go past the boundary.
+ if (pFrame != pUnloadBoundaryFrame)
+ return FALSE;
+
+ // Only time have an unloadboundaryframe is when have specifically marked that thread for aborting
+ // during unload processing, so this won't trigger UnloadedException if have simply thrown a ThreadAbort
+ // past an AD transition frame
+ _ASSERTE (IsAbortRequested());
+
+ EEResetAbort(TAR_ADUnload);
+
+ if (m_AbortType == EEPolicy::TA_None)
+ {
+ return TRUE;
+ }
+ else
+ {
+ return FALSE;
+ }
+}
+
+BOOL Thread::HaveExtraWorkForFinalizer()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_ThreadTasks
+ || OverlappedDataObject::CleanupNeededFromGC()
+ || ThreadpoolMgr::HaveTimerInfosToFlush()
+ || ExecutionManager::IsCacheCleanupRequired()
+ || Thread::CleanupNeededForFinalizedThread()
+ || (m_DetachCount > 0)
+ || CExecutionEngine::HasDetachedTlsInfo()
+ || AppDomain::HasWorkForFinalizerThread()
+ || SystemDomain::System()->RequireAppDomainCleanup();
+}
+
+void Thread::DoExtraWorkForFinalizer()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(GetThread() == this);
+ _ASSERTE(this == FinalizerThread::GetFinalizerThread());
+
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+ if (RequiresCoInitialize())
+ {
+ SetApartment(AS_InMTA, FALSE);
+ }
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+ if (AppDomain::HasWorkForFinalizerThread())
+ {
+ AppDomain::ProcessUnloadDomainEventOnFinalizeThread();
+ }
+
+ if (RequireSyncBlockCleanup())
+ {
+#ifndef FEATURE_PAL
+ InteropSyncBlockInfo::FlushStandbyList();
+#endif // !FEATURE_PAL
+
+#ifdef FEATURE_COMINTEROP
+ RCW::FlushStandbyList();
+#endif // FEATURE_COMINTEROP
+
+ SyncBlockCache::GetSyncBlockCache()->CleanupSyncBlocks();
+ }
+ if (SystemDomain::System()->RequireAppDomainCleanup())
+ {
+ SystemDomain::System()->ProcessDelayedUnloadDomains();
+ }
+
+ CExecutionEngine::CleanupDetachedTlsInfo();
+
+ if(m_DetachCount > 0 || Thread::CleanupNeededForFinalizedThread())
+ {
+ Thread::CleanupDetachedThreads();
+ }
+
+ if(ExecutionManager::IsCacheCleanupRequired() && GCHeap::GetGCHeap()->GetCondemnedGeneration()>=1)
+ {
+ ExecutionManager::ClearCaches();
+ }
+
+ OverlappedDataObject::RequestCleanupFromGC();
+
+ // If there were any TimerInfos waiting to be released, they'll get flushed now
+ ThreadpoolMgr::FlushQueueOfTimerInfos();
+
+}
+
+
+// HELPERS FOR THE BASE OF A MANAGED THREAD, INCLUDING AD TRANSITION SUPPORT
+
+// We have numerous places where we start up a managed thread. This includes several places in the
+// ThreadPool, the 'new Thread(...).Start()' case, and the Finalizer. Try to factor the code so our
+// base exception handling behavior is consistent across those places. The resulting code is convoluted,
+// but it's better than the prior situation of each thread being on a different plan.
+
+// We need Middle & Outer methods for the usual problem of combining C++ & SEH.
+
+/* The effect of all this is that we get:
+
+ Base of thread -- OS unhandled exception filter that we hook
+
+ SEH handler from DispatchOuter
+ C++ handler from DispatchMiddle
+
+ And if there is an AppDomain transition before we call back to user code, we additionally get:
+
+ AppDomain transition -- contains its own handlers to terminate the first pass
+ and marshal the exception.
+
+ SEH handler from DispatchOuter
+ C++ handler from DispatchMiddle
+
+ Regardless of whether or not there is an AppDomain transition, we then have:
+
+ User code that obviously can throw.
+
+ So if we don't have an AD transition, or we take a fault before we successfully transition the
+ AppDomain, then the base-most DispatchOuter/Middle will deal with the exception. This may
+ involve swallowing exceptions or it may involve Watson & debugger attach. It will always
+ involve notifications to any AppDomain.UnhandledException event listeners.
+
+ But if we did transition the AppDomain, then any Watson, debugger attach and UnhandledException
+ events will occur in that AppDomain in the initial first pass. So we get a good debugging
+ experience and we get notifications to the host that show which AppDomain is allowing exceptions
+ to go unhandled (so perhaps it can be unloaded or otherwise dealt with).
+
+ The trick is that if the exception goes unhandled at the process level, we would normally try
+ to fire AppDomain events and display the faulting exception on the console from two more
+ places. These are the base-most DispatchOuter/Middle pair and the hook of the OS unhandled
+ exception handler at the base of the thread.
+
+ This is redundant and messy. (There's no concern with getting a 2nd Watson because we only
+ do one of these per process anyway). The solution for the base-most DispatchOuter/Middle is
+ to use the ManagedThreadCallState.flags to control whether the exception has already been
+ dealt with or not. These flags cause the ThreadBaseRedirectingFilter to either do normal
+ "base of the thread" exception handling, or to ignore the exception because it has already
+ been reported in the AppDomain we transitioned to.
+
+ But turning off the reporting in the OS unhandled exception filter is harder. We don't want
+ to flip a bit on the Thread to disable this, unless we can be sure we are only disabling
+ something we already reported, and that this thread will never recover from that situation and
+ start executing code again. Here's the normal nightmare scenario with SEH:
+
+ 1) exception of type A is thrown
+ 2) All the filters in the 1st pass say they don't want an A
+ 3) The exception gets all the way out and is considered unhandled. We report this "fact".
+ 4) Imagine we then set a bit that says this thread shouldn't report unhandled exceptions.
+ 5) The 2nd pass starts.
+ 6) Inside a finally, someone throws an exception of type B.
+ 7) A new 1st pass starts from the point of the throw, with a type B.
+ 8) Now a filter says "Yes, I will swallow exception B."
+ 9) We no longer have an unhandled exception, and execution continues merrily.
+
+ This is an unavoidable consequence of the 2-pass model. If you report unhandled exceptions
+ in the 1st pass (for good debugging), you might find that this was premature and you don't
+ have an unhandled exception when you get to the 2nd pass.
+
+ But it would not be optimal if in step 4 we set a bit that says we should suppress normal
+ notifications and reporting on this thread, believing that the process will terminate.
+
+ The solution is to recognize that the base OS unhandled exception filter runs in two modes.
+ In the first mode, it operates as today and serves as our backstop. In the second mode
+ it is fully redundant with the handlers pushed after the AppDomain transition, which are
+ completely containing the exception to the AD that it occurred in (for purposes of reporting).
+ So we just need a flag on the thread that says whether or not that set of handlers are pushed
+ and functioning. That flag enables / disables the base exception reporting and is called
+ TSNC_AppDomainContainUnhandled
+
+*/
+
+
+enum ManagedThreadCallStateFlags
+{
+ MTCSF_NormalBase,
+ MTCSF_ContainToAppDomain,
+ MTCSF_SuppressDuplicate,
+};
+
+struct ManagedThreadCallState
+{
+ ADID pAppDomainId;
+ AppDomain* pUnsafeAppDomain;
+ BOOL bDomainIsAsID;
+
+ Context::ADCallBackFcnType pTarget;
+ LPVOID args;
+ UnhandledExceptionLocation filterType;
+ ManagedThreadCallStateFlags flags;
+ BOOL IsAppDomainEqual(AppDomain* pApp)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return bDomainIsAsID?(pApp->GetId()==pAppDomainId):(pUnsafeAppDomain==pApp);
+ }
+ ManagedThreadCallState(ADID AppDomainId,Context::ADCallBackFcnType Target,LPVOID Args,
+ UnhandledExceptionLocation FilterType, ManagedThreadCallStateFlags Flags):
+ pAppDomainId(AppDomainId),
+ pUnsafeAppDomain(NULL),
+ bDomainIsAsID(TRUE),
+ pTarget(Target),
+ args(Args),
+ filterType(FilterType),
+ flags(Flags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ };
+protected:
+ ManagedThreadCallState(AppDomain* AppDomain,Context::ADCallBackFcnType Target,LPVOID Args,
+ UnhandledExceptionLocation FilterType, ManagedThreadCallStateFlags Flags):
+ pAppDomainId(ADID(0)),
+ pUnsafeAppDomain(AppDomain),
+ bDomainIsAsID(FALSE),
+ pTarget(Target),
+ args(Args),
+ filterType(FilterType),
+ flags(Flags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ };
+ void InitForFinalizer(AppDomain* AppDomain,Context::ADCallBackFcnType Target,LPVOID Args)
+ {
+ LIMITED_METHOD_CONTRACT;
+ filterType=FinalizerThread;
+ pUnsafeAppDomain=AppDomain;
+ pTarget=Target;
+ args=Args;
+ };
+
+ friend void ManagedThreadBase_NoADTransition(Context::ADCallBackFcnType pTarget,
+ UnhandledExceptionLocation filterType);
+ friend void ManagedThreadBase::FinalizerAppDomain(AppDomain* pAppDomain,
+ Context::ADCallBackFcnType pTarget,
+ LPVOID args,
+ ManagedThreadCallState *pTurnAround);
+};
+
+// The following static helpers are outside of the ManagedThreadBase struct because I
+// don't want to change threads.h whenever I change the mechanism for how unhandled
+// exceptions works. The ManagedThreadBase struct is for the public exposure of the
+// API only.
+
+static void ManagedThreadBase_DispatchOuter(ManagedThreadCallState *pCallState);
+
+
+// Here's the tricky part. *IF and only IF* we took an AppDomain transition at the base, then we
+// now want to push another complete set of handlers above us. The reason is that we want the
+// Watson report and the unhandled exception event to occur in the target AppDomain. If we don't
+// do this apparently redundant push of handlers, then we will marshal back the exception to the
+// handlers on the Default AppDomain side. This will erase all the important exception state by
+// unwinding (catch and rethrow) in DoADCallBack. And it will cause all unhandled exceptions to
+// be reported from the Default AppDomain, which is annoying to any AppDomain.UnhandledException
+// event listeners.
+//
+// So why not skip the handlers that are in the Default AppDomain and just push the ones after the
+// transition? Well, transitioning out of the Default AppDomain into the target AppDomain could
+// fail. We need handlers pushed for that case. And in that case it's perfectly reasonable to
+// report the problem as occurring in the Default AppDomain, which is what the base handlers will
+// do.
+
+static void ManagedThreadBase_DispatchInCorrectAD(LPVOID args)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ ManagedThreadCallState *pCallState = (ManagedThreadCallState *) args;
+
+ // Ensure we aren't going to infinitely recurse.
+ _ASSERTE(pCallState->IsAppDomainEqual(GetThread()->GetDomain()));
+
+ // And then go round one more time. But this time we want to ensure that the filter contains
+ // any exceptions that aren't swallowed. These must be treated as unhandled, rather than
+ // propagated through the AppDomain boundary in search of an outer handler. Otherwise we
+ // will not get correct Watson behavior.
+ pCallState->flags = MTCSF_ContainToAppDomain;
+ ManagedThreadBase_DispatchOuter(pCallState);
+ pCallState->flags = MTCSF_NormalBase;
+}
+
+static void ManagedThreadBase_DispatchInner(ManagedThreadCallState *pCallState)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+
+ Thread *pThread = GetThread();
+
+ if (!pCallState->IsAppDomainEqual(pThread->GetDomain()))
+ {
+ // On Win7 and later, AppDomain transitions at the threadbase will *not* have EH setup at transition boundary.
+ // This implies that an unhandled exception from the base domain (i.e. AD in which the thread starts) will
+ // not return to DefDomain but will continue to go up the stack with the thread still being in base domain.
+ // We have a holder in ENTER_DOMAIN_*_NO_EH_AT_TRANSITION macro (ReturnToPreviousAppDomainHolder) that will
+ // revert AD context at threadbase if an unwind is triggered after the exception has gone unhandled.
+ //
+ // This also implies that there will be no exception object marshalling (and it may not be required after all)
+ // as well and once the holder reverts the AD context, the LastThrownObject in Thread will be set to NULL.
+#ifndef FEATURE_PAL
+ BOOL fSetupEHAtTransition = !(RunningOnWin7());
+#else // !FEATURE_PAL
+ BOOL fSetupEHAtTransition = TRUE;
+#endif // !FEATURE_PAL
+
+ if (pCallState->bDomainIsAsID)
+ pThread->DoADCallBack(pCallState->pAppDomainId,
+ ManagedThreadBase_DispatchInCorrectAD,
+ pCallState, fSetupEHAtTransition);
+ else
+ pThread->DoADCallBack(pCallState->pUnsafeAppDomain,
+ ManagedThreadBase_DispatchInCorrectAD,
+ pCallState, ADV_FINALIZER, fSetupEHAtTransition);
+ }
+ else
+ {
+ // Since no AppDomain transition is necessary, we need no additional handlers pushed
+ // *AFTER* the transition. We now have adequate handlers below us. Go ahead and
+ // dispatch the call.
+ (*pCallState->pTarget) (pCallState->args);
+ }
+}
+
+static void ManagedThreadBase_DispatchMiddle(ManagedThreadCallState *pCallState)
+{
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ // We have the probe outside the EX_TRY below since corresponding EX_CATCH
+ // also invokes SO_INTOLERANT code.
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+
+ EX_TRY
+ {
+ // During an unwind, we have some cleanup:
+ //
+ // 1) We should no longer suppress any unhandled exception reporting at the base
+ // of the thread, because any handler that contained the exception to the AppDomain
+ // where it occurred is now being removed from the stack.
+ //
+ // 2) We need to unwind the Frame chain. We cannot do it when we get to the __except clause
+ // because at this point we are in the 2nd phase and the stack has been popped. Any
+ // stack crawling from another thread will see a frame chain in a popped region of stack.
+ // Nor can we pop it in a filter, since this would destroy all the stack-walking information
+ // we need to perform the 2nd pass. So doing it in a C++ destructor will ensure it happens
+ // during the 2nd pass but before the stack is actually popped.
+ class Cleanup
+ {
+ Frame *m_pEntryFrame;
+ Thread *m_pThread;
+
+ public:
+ Cleanup(Thread* pThread)
+ {
+ m_pThread = pThread;
+ m_pEntryFrame = pThread->m_pFrame;
+ }
+
+ ~Cleanup()
+ {
+ GCX_COOP();
+ m_pThread->SetFrame(m_pEntryFrame);
+ m_pThread->ResetThreadStateNC(Thread::TSNC_AppDomainContainUnhandled);
+ }
+ };
+
+ Cleanup cleanup(GetThread());
+
+ ManagedThreadBase_DispatchInner(pCallState);
+ }
+ EX_CATCH_CPP_ONLY
+ {
+ GCX_COOP();
+ Exception *pException = GET_EXCEPTION();
+
+ // RudeThreadAbort is a pre-allocated instance of ThreadAbort. So the following is sufficient.
+ // For Whidbey, by default only swallow certain exceptions. If reverting back to Everett's
+ // behavior (swallowing all unhandled exception), then swallow all unhandled exception.
+ //
+ if (SwallowUnhandledExceptions() ||
+ IsExceptionOfType(kThreadAbortException, pException) ||
+ IsExceptionOfType(kAppDomainUnloadedException, pException))
+ {
+ // Do nothing to swallow the exception
+ }
+ else
+ {
+ // Setting up the unwind_and_continue_handler ensures that C++ exceptions do not leak out.
+ // An example is when Thread1 in Default AppDomain creates AppDomain2, enters it, creates
+ // another thread T2 and T2 throws OOM exception (that goes unhandled). At the transition
+ // boundary, END_DOMAIN_TRANSITION will catch it and invoke RaiseCrossContextException
+ // that will rethrow the OOM as a C++ exception.
+ //
+ // Without unwind_and_continue_handler below, the exception will fly up the stack to
+ // this point, where it will be rethrown and thus leak out.
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+ EX_RETHROW;
+
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+ }
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ END_SO_INTOLERANT_CODE;
+}
+
+/*
+typedef struct Param
+{
+ ManagedThreadCallState * m_pCallState;
+ Frame * m_pFrame;
+ Param(ManagedThreadCallState * pCallState, Frame * pFrame): m_pCallState(pCallState), m_pFrame(pFrame) {}
+} TryParam;
+*/
+typedef struct Param: public NotifyOfCHFFilterWrapperParam
+{
+ ManagedThreadCallState * m_pCallState;
+ Param(ManagedThreadCallState * pCallState): m_pCallState(pCallState) {}
+} TryParam;
+
+// Dispatch to the appropriate filter, based on the active CallState.
+static LONG ThreadBaseRedirectingFilter(PEXCEPTION_POINTERS pExceptionInfo, LPVOID pParam)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+
+ LONG (*ptrFilter) (PEXCEPTION_POINTERS, PVOID);
+
+ TryParam * pRealParam = reinterpret_cast<TryParam *>(pParam);
+ ManagedThreadCallState * _pCallState = pRealParam->m_pCallState;
+ ManagedThreadCallStateFlags flags = _pCallState->flags;
+
+ if (flags == MTCSF_SuppressDuplicate)
+ {
+ LOG((LF_EH, LL_INFO100, "ThreadBaseRedirectingFilter: setting TSNC_AppDomainContainUnhandled\n"));
+ GetThread()->SetThreadStateNC(Thread::TSNC_AppDomainContainUnhandled);
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ LONG ret = -1;
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return EXCEPTION_CONTINUE_SEARCH;);
+
+ // This will invoke the swallowing filter. If that returns EXCEPTION_CONTINUE_SEARCH,
+ // it will trigger unhandled exception processing.
+ ptrFilter = ThreadBaseExceptionAppDomainFilter;
+
+ // WARNING - ptrFilter may not return
+ // This occurs when the debugger decides to intercept an exception and catch it in a frame closer
+ // to the leaf than the one executing this filter
+ ret = (*ptrFilter) (pExceptionInfo, _pCallState);
+
+ // Although EXCEPTION_EXECUTE_HANDLER can also be returned in cases corresponding to
+ // unhandled exceptions, all of those cases have already notified the debugger of an unhandled
+ // exception which prevents a second notification indicating the exception was caught
+ if (ret == EXCEPTION_EXECUTE_HANDLER)
+ {
+
+ // WARNING - NotifyOfCHFFilterWrapper may not return
+ // This occurs when the debugger decides to intercept an exception and catch it in a frame closer
+ // to the leaf than the one executing this filter
+ NotifyOfCHFFilterWrapper(pExceptionInfo, pRealParam);
+ }
+
+ // If we are containing unhandled exceptions to the AppDomain we transitioned into, and the
+ // exception is coming out, then this exception is going unhandled. We have already done
+ // Watson and managed events, so suppress all filters below us. Otherwise we are swallowing
+ // it and returning out of the AppDomain.
+ if (flags == MTCSF_ContainToAppDomain)
+ {
+ if(ret == EXCEPTION_CONTINUE_SEARCH)
+ {
+ _pCallState->flags = MTCSF_SuppressDuplicate;
+ }
+ else if(ret == EXCEPTION_EXECUTE_HANDLER)
+ {
+ _pCallState->flags = MTCSF_NormalBase;
+ }
+ // else if( EXCEPTION_CONTINUE_EXECUTION ) do nothing
+ }
+
+ // Get the reference to the current thread..
+ Thread *pCurThread = GetThread();
+ _ASSERTE(pCurThread);
+
+ if (flags == MTCSF_ContainToAppDomain)
+ {
+
+ if (((ManagedThreadCallState *) _pCallState)->flags == MTCSF_SuppressDuplicate)
+ {
+ // Set the flag that we have done unhandled exception processing
+ // for this managed thread that started in a non-default domain
+ LOG((LF_EH, LL_INFO100, "ThreadBaseRedirectingFilter: setting TSNC_AppDomainContainUnhandled\n"));
+ pCurThread->SetThreadStateNC(Thread::TSNC_AppDomainContainUnhandled);
+ }
+ }
+ else
+ {
+ _ASSERTE(flags == MTCSF_NormalBase);
+
+#ifdef FEATURE_CORECLR
+ if(!IsSingleAppDomain())
+ {
+ // This assert shouldnt be hit in CoreCLR since:
+ //
+ // 1) It has no concept of managed entry point that is invoked by the shim. You can
+ // only run managed code via hosting APIs that will run code in non-default domains.
+ //
+ // 2) Managed threads cannot be created in DefaultDomain since no user code executes
+ // in default domain.
+ //
+ // So, if this is hit, something is not right!
+ _ASSERTE(!"How come a managed thread in CoreCLR has suffered unhandled exception in DefaultDomain?");
+ }
+#endif // FEATURE_CORECLR
+
+ LOG((LF_EH, LL_INFO100, "ThreadBaseRedirectingFilter: setting TSNC_ProcessedUnhandledException\n"));
+
+#if defined(FEATURE_CORECLR)
+ //
+ // In the default domain, when an exception goes unhandled on a managed thread whose threadbase is in the VM (e.g. explicitly spawned threads,
+ // ThreadPool threads, finalizer thread, etc), CLR can end up in the unhandled exception processing path twice.
+ //
+ // The first attempt to perform UE processing happens at the managed thread base (via this function). When it completes,
+ // we will set TSNC_ProcessedUnhandledException state against the thread to indicate that we have perform the unhandled exception processing.
+ //
+ // On the desktop CLR, after the first attempt, we will return back to the OS with EXCEPTION_CONTINUE_SEARCH as unhandled exceptions cannot be swallowed. When the exception reaches
+ // the native threadbase in the OS kernel, the OS will invoke the UEF registered for the process. This can result in CLR's UEF (COMUnhandledExceptionFilter)
+ // getting invoked that will attempt to perform UE processing yet again for the same thread. To avoid this duplicate processing, we check the presence of
+ // TSNC_ProcessedUnhandledException state on the thread and if present, we simply return back to the OS.
+ //
+ // On desktop CoreCLR, we will only do UE processing once (at the managed threadbase) since no thread is created in default domain - all are created and executed in non-default domain.
+ // As a result, we go via completely different codepath that prevents duplication of UE processing from happening, especially since desktop CoreCLR is targetted for SL and SL
+ // always passes us a flag to swallow unhandled exceptions.
+ //
+ // On CoreSys CoreCLR, the host can ask CoreCLR to run all code in the default domain. As a result, when we return from the first attempt to perform UE
+ // processing, the call could return back with EXCEPTION_EXECUTE_HANDLER since, like desktop CoreCLR is instructed by SL host to swallow all unhandled exceptions,
+ // CoreSys CoreCLR can also be instructed by its Phone host to swallow all unhandled exceptions. As a result, the exception dispatch will never continue to go upstack
+ // to the native threadbase in the OS kernel and thus, there will never be a second attempt to perform UE processing. Hence, we dont, and shouldnt, need to set
+ // TSNC_ProcessedUnhandledException state against the thread if we are in SingleAppDomain mode and have been asked to swallow the exception.
+ //
+ // If we continue to set TSNC_ProcessedUnhandledException and a ThreadPool Thread A has an exception go unhandled, we will swallow it correctly for the first time.
+ // The next time Thread A has an exception go unhandled, our UEF will see TSNC_ProcessedUnhandledException set and assume (incorrectly) UE processing has happened and
+ // will fail to honor the host policy (e.g. swallow unhandled exception). Thus, the 2nd unhandled exception may end up crashing the app when it should not.
+ //
+ if (IsSingleAppDomain() && (ret != EXCEPTION_EXECUTE_HANDLER))
+#endif // defined(FEATURE_CORECLR)
+ {
+ // Since we have already done unhandled exception processing for it, we dont want it
+ // to happen again if our UEF gets invoked upon returning back to the OS.
+ //
+ // Set the flag to indicate so.
+ pCurThread->SetThreadStateNC(Thread::TSNC_ProcessedUnhandledException);
+ }
+ }
+
+#ifdef FEATURE_UEF_CHAINMANAGER
+ if (g_pUEFManager && (ret == EXCEPTION_CONTINUE_SEARCH))
+ {
+ // Since the "UEF" of this runtime instance didnt handle the exception,
+ // invoke the other registered UEF callbacks as well
+ ret = g_pUEFManager->InvokeUEFCallbacks(pExceptionInfo);
+ }
+#endif // FEATURE_UEF_CHAINMANAGER
+
+ END_SO_INTOLERANT_CODE;
+ return ret;
+}
+
+static void ManagedThreadBase_DispatchOuter(ManagedThreadCallState *pCallState)
+{
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ // HasStarted() must have already been performed by our caller
+ _ASSERTE(GetThread() != NULL);
+
+ Thread *pThread = GetThread();
+#ifdef WIN64EXCEPTIONS
+ Frame *pFrame = pThread->m_pFrame;
+#endif // WIN64EXCEPTIONS
+
+ // The sole purpose of having this frame is to tell the debugger that we have a catch handler here
+ // which may swallow managed exceptions. The debugger needs this in order to send a
+ // CatchHandlerFound (CHF) notification.
+ FrameWithCookie<DebuggerU2MCatchHandlerFrame> catchFrame;
+
+ TryParam param(pCallState);
+ param.pFrame = &catchFrame;
+
+ struct TryArgs
+ {
+ TryParam *pTryParam;
+ Thread *pThread;
+
+#ifdef FEATURE_CORECLR
+ BOOL *pfHadException;
+#endif // FEATURE_CORECLR
+
+#ifdef WIN64EXCEPTIONS
+ Frame *pFrame;
+#endif // WIN64EXCEPTIONS
+ }args;
+
+ args.pTryParam = &param;
+ args.pThread = pThread;
+
+#ifdef FEATURE_CORECLR
+ BOOL fHadException = TRUE;
+ args.pfHadException = &fHadException;
+#endif // FEATURE_CORECLR
+
+#ifdef WIN64EXCEPTIONS
+ args.pFrame = pFrame;
+#endif // WIN64EXCEPTIONS
+
+ PAL_TRY(TryArgs *, pArgs, &args)
+ {
+ PAL_TRY(TryParam *, pParam, pArgs->pTryParam)
+ {
+ ManagedThreadBase_DispatchMiddle(pParam->m_pCallState);
+ }
+ PAL_EXCEPT_FILTER(ThreadBaseRedirectingFilter)
+ {
+ // Note: one of our C++ exceptions will never reach this filter because they're always caught by
+ // the EX_CATCH in ManagedThreadBase_DispatchMiddle().
+ //
+ // If eCLRDeterminedPolicy, we only swallow for TA, RTA, and ADU exception.
+ // For eHostDeterminedPolicy, we will swallow all the managed exception.
+ #ifdef WIN64EXCEPTIONS
+ // this must be done after the second pass has run, it does not
+ // reference anything on the stack, so it is safe to run in an
+ // SEH __except clause as well as a C++ catch clause.
+ ExceptionTracker::PopTrackers(pArgs->pFrame);
+ #endif // WIN64EXCEPTIONS
+
+ // Fortunately, ThreadAbortExceptions are always
+ if (pArgs->pThread->IsAbortRequested())
+ pArgs->pThread->EEResetAbort(Thread::TAR_Thread);
+ }
+ PAL_ENDTRY;
+
+#ifdef FEATURE_CORECLR
+ *(pArgs->pfHadException) = FALSE;
+#endif // FEATURE_CORECLR
+ }
+ PAL_FINALLY
+ {
+#ifdef FEATURE_CORECLR
+ // If we had a breakpoint exception that has gone unhandled,
+ // then switch to the correct AD context. Its fine to do this
+ // here because:
+ //
+ // 1) We are in an unwind (this is a C++ destructor).
+ // 2) SetFrame (below) does validation to be in the correct AD context. Thus,
+ // this should be done before that.
+ if (fHadException && (GetCurrentExceptionCode() == STATUS_BREAKPOINT))
+ {
+ ReturnToPreviousAppDomain();
+ }
+#endif // FEATURE_CORECLR
+ catchFrame.Pop();
+ }
+ PAL_ENDTRY;
+}
+
+
+// For the implementation, there are three variants of work possible:
+
+// 1. Establish the base of a managed thread, and switch to the correct AppDomain.
+static void ManagedThreadBase_FullTransitionWithAD(ADID pAppDomain,
+ Context::ADCallBackFcnType pTarget,
+ LPVOID args,
+ UnhandledExceptionLocation filterType)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ ManagedThreadCallState CallState(pAppDomain, pTarget, args, filterType, MTCSF_NormalBase);
+ ManagedThreadBase_DispatchOuter(&CallState);
+}
+
+// 2. Establish the base of a managed thread, but the AppDomain transition must be
+// deferred until later.
+void ManagedThreadBase_NoADTransition(Context::ADCallBackFcnType pTarget,
+ UnhandledExceptionLocation filterType)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ AppDomain *pAppDomain = GetAppDomain();
+
+ ManagedThreadCallState CallState(pAppDomain, pTarget, NULL, filterType, MTCSF_NormalBase);
+
+ // self-describing, to create a pTurnAround data for eventual delivery to a subsequent AppDomain
+ // transition.
+ CallState.args = &CallState;
+
+ ManagedThreadBase_DispatchOuter(&CallState);
+}
+
+
+
+// And here are the various exposed entrypoints for base thread behavior
+
+// The 'new Thread(...).Start()' case from COMSynchronizable kickoff thread worker
+void ManagedThreadBase::KickOff(ADID pAppDomain, Context::ADCallBackFcnType pTarget, LPVOID args)
+{
+ WRAPPER_NO_CONTRACT;
+ ManagedThreadBase_FullTransitionWithAD(pAppDomain, pTarget, args, ManagedThread);
+}
+
+// The IOCompletion, QueueUserWorkItem, AddTimer, RegisterWaitForSingleObject cases in the ThreadPool
+void ManagedThreadBase::ThreadPool(ADID pAppDomain, Context::ADCallBackFcnType pTarget, LPVOID args)
+{
+ WRAPPER_NO_CONTRACT;
+ ManagedThreadBase_FullTransitionWithAD(pAppDomain, pTarget, args, ThreadPoolThread);
+}
+
+// The Finalizer thread establishes exception handling at its base, but defers all the AppDomain
+// transitions.
+void ManagedThreadBase::FinalizerBase(Context::ADCallBackFcnType pTarget)
+{
+ WRAPPER_NO_CONTRACT;
+ ManagedThreadBase_NoADTransition(pTarget, FinalizerThread);
+}
+
+void ManagedThreadBase::FinalizerAppDomain(AppDomain *pAppDomain,
+ Context::ADCallBackFcnType pTarget,
+ LPVOID args,
+ ManagedThreadCallState *pTurnAround)
+{
+ WRAPPER_NO_CONTRACT;
+ pTurnAround->InitForFinalizer(pAppDomain,pTarget,args);
+ _ASSERTE(pTurnAround->flags == MTCSF_NormalBase);
+ ManagedThreadBase_DispatchInner(pTurnAround);
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: Thread::GetStaticFieldAddress private
+//
+// Synopsis: Get the address of the field relative to the current thread.
+// If an address has not been assigned yet then create one.
+//
+//+----------------------------------------------------------------------------
+
+LPVOID Thread::GetStaticFieldAddress(FieldDesc *pFD)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pFD != NULL);
+ _ASSERTE(pFD->IsThreadStatic());
+ _ASSERTE(!pFD->IsRVA());
+
+ // for static field the MethodTable is exact even for generic classes
+ MethodTable *pMT = pFD->GetEnclosingMethodTable();
+
+ // We need to make sure that the class has been allocated, however
+ // we should not call the class constructor
+ ThreadStatics::GetTLM(pMT)->EnsureClassAllocated(pMT);
+
+ PTR_BYTE base = NULL;
+
+ if (pFD->GetFieldType() == ELEMENT_TYPE_CLASS ||
+ pFD->GetFieldType() == ELEMENT_TYPE_VALUETYPE)
+ {
+ base = pMT->GetGCThreadStaticsBasePointer();
+ }
+ else
+ {
+ base = pMT->GetNonGCThreadStaticsBasePointer();
+ }
+
+ _ASSERTE(base != NULL);
+
+ DWORD offset = pFD->GetOffset();
+ _ASSERTE(offset <= FIELD_OFFSET_LAST_REAL_OFFSET);
+
+ LPVOID result = (LPVOID)((PTR_BYTE)base + (DWORD)offset);
+
+ // For value classes, the handle points at an OBJECTREF
+ // which holds the boxed value class, so derefernce and unbox.
+ if (pFD->GetFieldType() == ELEMENT_TYPE_VALUETYPE)
+ {
+ OBJECTREF obj = ObjectToOBJECTREF(*(Object**) result);
+ result = obj->GetData();
+ }
+
+ return result;
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+ //+----------------------------------------------------------------------------
+//
+// Method: Thread::GetStaticFieldAddrNoCreate private
+//
+// Synopsis: Get the address of the field relative to the thread.
+// If an address has not been assigned, return NULL.
+// No creating is allowed.
+//
+//+----------------------------------------------------------------------------
+
+TADDR Thread::GetStaticFieldAddrNoCreate(FieldDesc *pFD, PTR_AppDomain pDomain)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pFD != NULL);
+ _ASSERTE(pFD->IsThreadStatic());
+
+ // for static field the MethodTable is exact even for generic classes
+ PTR_MethodTable pMT = pFD->GetEnclosingMethodTable();
+
+ PTR_BYTE base = NULL;
+
+ if (pFD->GetFieldType() == ELEMENT_TYPE_CLASS ||
+ pFD->GetFieldType() == ELEMENT_TYPE_VALUETYPE)
+ {
+ base = pMT->GetGCThreadStaticsBasePointer(dac_cast<PTR_Thread>(this), pDomain);
+ }
+ else
+ {
+ base = pMT->GetNonGCThreadStaticsBasePointer(dac_cast<PTR_Thread>(this), pDomain);
+ }
+
+ if (base == NULL)
+ return NULL;
+
+ DWORD offset = pFD->GetOffset();
+ _ASSERTE(offset <= FIELD_OFFSET_LAST_REAL_OFFSET);
+
+ TADDR result = dac_cast<TADDR>(base) + (DWORD)offset;
+
+ // For value classes, the handle points at an OBJECTREF
+ // which holds the boxed value class, so derefernce and unbox.
+ if (pFD->IsByValue())
+ {
+ _ASSERTE(result != NULL);
+ result = dac_cast<TADDR>
+ ((* PTR_UNCHECKED_OBJECTREF(result))->GetData());
+ }
+
+ return result;
+}
+
+#ifndef DACCESS_COMPILE
+
+//
+// NotifyFrameChainOfExceptionUnwind
+// -----------------------------------------------------------
+// This method will walk the Frame chain from pStartFrame to
+// the last frame that is below pvLimitSP and will call each
+// frame's ExceptionUnwind method. It will return the first
+// Frame that is above pvLimitSP.
+//
+Frame * Thread::NotifyFrameChainOfExceptionUnwind(Frame* pStartFrame, LPVOID pvLimitSP)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ DISABLED(GC_TRIGGERS); // due to UnwindFrameChain from NOTRIGGER areas
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pStartFrame));
+ PRECONDITION(CheckPointer(pvLimitSP));
+ }
+ CONTRACTL_END;
+
+ Frame * pFrame;
+
+#ifdef _DEBUG
+ //
+ // assert that the specified Thread's Frame chain actually
+ // contains the start Frame.
+ //
+ pFrame = m_pFrame;
+ while ((pFrame != pStartFrame) &&
+ (pFrame != FRAME_TOP))
+ {
+ pFrame = pFrame->Next();
+ }
+ CONSISTENCY_CHECK_MSG(pFrame == pStartFrame, "pStartFrame is not on pThread's Frame chain!");
+#endif // _DEBUG
+
+ pFrame = pStartFrame;
+ while (pFrame < pvLimitSP)
+ {
+ CONSISTENCY_CHECK(pFrame != PTR_NULL);
+ CONSISTENCY_CHECK((pFrame) > static_cast<Frame *>((LPVOID)GetCurrentSP()));
+ pFrame->ExceptionUnwind();
+ pFrame = pFrame->Next();
+ }
+
+ // return the frame after the last one notified of the unwind
+ return pFrame;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: Thread::DeleteThreadStaticData private
+//
+// Synopsis: Delete the static data for each appdomain that this thread
+// visited.
+//
+//
+//+----------------------------------------------------------------------------
+
+void Thread::DeleteThreadStaticData()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // Deallocate the memory used by the table of ThreadLocalBlocks
+ if (m_pTLBTable != NULL)
+ {
+ for (SIZE_T i = 0; i < m_TLBTableSize; ++i)
+ {
+ ThreadLocalBlock * pTLB = m_pTLBTable[i];
+ if (pTLB != NULL)
+ {
+ m_pTLBTable[i] = NULL;
+ pTLB->FreeTable();
+ delete pTLB;
+ }
+ }
+
+ delete m_pTLBTable;
+ m_pTLBTable = NULL;
+ }
+ m_pThreadLocalBlock = NULL;
+ m_TLBTableSize = 0;
+}
+
+//+----------------------------------------------------------------------------
+//
+// Method: Thread::DeleteThreadStaticData protected
+//
+// Synopsis: Delete the static data for the given appdomain. This is called
+// when the appdomain unloads.
+//
+//
+//+----------------------------------------------------------------------------
+
+void Thread::DeleteThreadStaticData(AppDomain *pDomain)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // Look up the AppDomain index
+ SIZE_T index = pDomain->GetIndex().m_dwIndex;
+
+ ThreadLocalBlock * pTLB = NULL;
+
+ // NULL out the pointer to the ThreadLocalBlock
+ if (index < m_TLBTableSize)
+ {
+ pTLB = m_pTLBTable[index];
+ m_pTLBTable[index] = NULL;
+ }
+
+ if (pTLB != NULL)
+ {
+ // Since the AppDomain is being unloaded anyway, all the memory used by
+ // the TLB will be reclaimed, so we don't really have to call FreeTable()
+ pTLB->FreeTable();
+
+ delete pTLB;
+ }
+}
+
+#ifdef FEATURE_LEAK_CULTURE_INFO
+void Thread::ResetCultureForDomain(ADID id)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ THREADBASEREF thread = (THREADBASEREF) GetExposedObjectRaw();
+
+ if (thread == NULL)
+ return;
+
+ CULTUREINFOBASEREF userCulture = thread->GetCurrentUserCulture();
+ if (userCulture != NULL)
+ {
+ if (!userCulture->IsSafeCrossDomain() && userCulture->GetCreatedDomainID() == id)
+ thread->ResetCurrentUserCulture();
+ }
+
+ CULTUREINFOBASEREF UICulture = thread->GetCurrentUICulture();
+ if (UICulture != NULL)
+ {
+ if (!UICulture->IsSafeCrossDomain() && UICulture->GetCreatedDomainID() == id)
+ thread->ResetCurrentUICulture();
+ }
+}
+#endif // FEATURE_LEAK_CULTURE_INFO
+
+#ifndef FEATURE_LEAK_CULTURE_INFO
+void Thread::InitCultureAccessors()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF *pCurrentCulture = NULL;
+ Thread *pThread = GetThread();
+
+ GCX_COOP();
+
+ if (managedThreadCurrentCulture == NULL) {
+ managedThreadCurrentCulture = MscorlibBinder::GetField(FIELD__THREAD__CULTURE);
+ pCurrentCulture = (OBJECTREF*)pThread->GetStaticFieldAddress(managedThreadCurrentCulture);
+ }
+
+ if (managedThreadCurrentUICulture == NULL) {
+ managedThreadCurrentUICulture = MscorlibBinder::GetField(FIELD__THREAD__UI_CULTURE);
+ pCurrentCulture = (OBJECTREF*)pThread->GetStaticFieldAddress(managedThreadCurrentUICulture);
+ }
+}
+#endif // FEATURE_LEAK_CULTURE_INFO
+
+
+ARG_SLOT Thread::CallPropertyGet(BinderMethodID id, OBJECTREF pObject)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (!pObject) {
+ return 0;
+ }
+
+ ARG_SLOT retVal;
+
+ GCPROTECT_BEGIN(pObject);
+ MethodDescCallSite propGet(id, &pObject);
+
+ // Set up the Stack.
+ ARG_SLOT pNewArgs = ObjToArgSlot(pObject);
+
+ // Make the actual call.
+ retVal = propGet.Call_RetArgSlot(&pNewArgs);
+ GCPROTECT_END();
+
+ return retVal;
+}
+
+ARG_SLOT Thread::CallPropertySet(BinderMethodID id, OBJECTREF pObject, OBJECTREF pValue)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (!pObject) {
+ return 0;
+ }
+
+ ARG_SLOT retVal;
+
+ GCPROTECT_BEGIN(pObject);
+ GCPROTECT_BEGIN(pValue);
+ MethodDescCallSite propSet(id, &pObject);
+
+ // Set up the Stack.
+ ARG_SLOT pNewArgs[] = {
+ ObjToArgSlot(pObject),
+ ObjToArgSlot(pValue)
+ };
+
+ // Make the actual call.
+ retVal = propSet.Call_RetArgSlot(pNewArgs);
+ GCPROTECT_END();
+ GCPROTECT_END();
+
+ return retVal;
+}
+
+OBJECTREF Thread::GetCulture(BOOL bUICulture)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ FieldDesc * pFD;
+
+ _ASSERTE(PreemptiveGCDisabled());
+
+ // This is the case when we're building mscorlib and haven't yet created
+ // the system assembly.
+ if (SystemDomain::System()->SystemAssembly()==NULL || g_fForbidEnterEE) {
+ return NULL;
+ }
+
+ // Get the actual thread culture.
+ OBJECTREF pCurThreadObject = GetExposedObject();
+ _ASSERTE(pCurThreadObject!=NULL);
+
+ THREADBASEREF pThreadBase = (THREADBASEREF)(pCurThreadObject);
+ OBJECTREF pCurrentCulture = bUICulture ? pThreadBase->GetCurrentUICulture() : pThreadBase->GetCurrentUserCulture();
+
+ if (pCurrentCulture==NULL) {
+ GCPROTECT_BEGIN(pThreadBase);
+ if (bUICulture) {
+ // Call the Getter for the CurrentUICulture. This will cause it to populate the field.
+ ARG_SLOT retVal = CallPropertyGet(METHOD__THREAD__GET_UI_CULTURE,
+ (OBJECTREF)pThreadBase);
+ pCurrentCulture = ArgSlotToObj(retVal);
+ } else {
+ //This is faster than calling the property, because this is what the call does anyway.
+ pFD = MscorlibBinder::GetField(FIELD__CULTURE_INFO__CURRENT_CULTURE);
+ _ASSERTE(pFD);
+
+ pFD->CheckRunClassInitThrowing();
+
+ pCurrentCulture = pFD->GetStaticOBJECTREF();
+ _ASSERTE(pCurrentCulture!=NULL);
+ }
+ GCPROTECT_END();
+ }
+
+ return pCurrentCulture;
+}
+
+
+
+// copy culture name into szBuffer and return length
+int Thread::GetParentCultureName(__out_ecount(length) LPWSTR szBuffer, int length, BOOL bUICulture)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // This is the case when we're building mscorlib and haven't yet created
+ // the system assembly.
+ if (SystemDomain::System()->SystemAssembly()==NULL) {
+ const WCHAR *tempName = W("en");
+ INT32 tempLength = (INT32)wcslen(tempName);
+ _ASSERTE(length>=tempLength);
+ memcpy(szBuffer, tempName, tempLength*sizeof(WCHAR));
+ return tempLength;
+ }
+
+ ARG_SLOT Result = 0;
+ INT32 retVal=0;
+ WCHAR *buffer=NULL;
+ INT32 bufferLength=0;
+ STRINGREF cultureName = NULL;
+
+ GCX_COOP();
+
+ struct _gc {
+ OBJECTREF pCurrentCulture;
+ OBJECTREF pParentCulture;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ GCPROTECT_BEGIN(gc);
+
+ gc.pCurrentCulture = GetCulture(bUICulture);
+ if (gc.pCurrentCulture != NULL) {
+ Result = CallPropertyGet(METHOD__CULTURE_INFO__GET_PARENT, gc.pCurrentCulture);
+ }
+
+ if (Result) {
+ gc.pParentCulture = (OBJECTREF)(ArgSlotToObj(Result));
+ if (gc.pParentCulture != NULL)
+ {
+ Result = 0;
+ Result = CallPropertyGet(METHOD__CULTURE_INFO__GET_NAME, gc.pParentCulture);
+ }
+ }
+
+ GCPROTECT_END();
+
+ if (Result==0) {
+ return 0;
+ }
+
+
+ // Extract the data out of the String.
+ cultureName = (STRINGREF)(ArgSlotToObj(Result));
+ cultureName->RefInterpretGetStringValuesDangerousForGC((WCHAR**)&buffer, &bufferLength);
+
+ if (bufferLength<length) {
+ memcpy(szBuffer, buffer, bufferLength * sizeof (WCHAR));
+ szBuffer[bufferLength]=0;
+ retVal = bufferLength;
+ }
+
+ return retVal;
+}
+
+
+
+
+// copy culture name into szBuffer and return length
+int Thread::GetCultureName(__out_ecount(length) LPWSTR szBuffer, int length, BOOL bUICulture)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // This is the case when we're building mscorlib and haven't yet created
+ // the system assembly.
+ if (SystemDomain::System()->SystemAssembly()==NULL || g_fForbidEnterEE) {
+ const WCHAR *tempName = W("en-US");
+ INT32 tempLength = (INT32)wcslen(tempName);
+ _ASSERTE(length>=tempLength);
+ memcpy(szBuffer, tempName, tempLength*sizeof(WCHAR));
+ return tempLength;
+ }
+
+ ARG_SLOT Result = 0;
+ INT32 retVal=0;
+ WCHAR *buffer=NULL;
+ INT32 bufferLength=0;
+ STRINGREF cultureName = NULL;
+
+ GCX_COOP ();
+
+ OBJECTREF pCurrentCulture = NULL;
+ GCPROTECT_BEGIN(pCurrentCulture)
+ {
+ pCurrentCulture = GetCulture(bUICulture);
+ if (pCurrentCulture != NULL)
+ Result = CallPropertyGet(METHOD__CULTURE_INFO__GET_NAME, pCurrentCulture);
+ }
+ GCPROTECT_END();
+
+ if (Result==0) {
+ return 0;
+ }
+
+ // Extract the data out of the String.
+ cultureName = (STRINGREF)(ArgSlotToObj(Result));
+ cultureName->RefInterpretGetStringValuesDangerousForGC((WCHAR**)&buffer, &bufferLength);
+
+ if (bufferLength<length) {
+ memcpy(szBuffer, buffer, bufferLength * sizeof (WCHAR));
+ szBuffer[bufferLength]=0;
+ retVal = bufferLength;
+ }
+
+ return retVal;
+}
+
+LCID GetThreadCultureIdNoThrow(Thread *pThread, BOOL bUICulture)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LCID Result = LCID(-1);
+
+ EX_TRY
+ {
+ Result = pThread->GetCultureId(bUICulture);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH (SwallowAllExceptions);
+
+ return (INT32)Result;
+}
+
+// Return a language identifier.
+LCID Thread::GetCultureId(BOOL bUICulture)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // This is the case when we're building mscorlib and haven't yet created
+ // the system assembly.
+ if (SystemDomain::System()->SystemAssembly()==NULL || g_fForbidEnterEE) {
+ return (LCID) -1;
+ }
+
+ LCID Result = (LCID) -1;
+
+#ifdef FEATURE_USE_LCID
+ GCX_COOP();
+
+ OBJECTREF pCurrentCulture = NULL;
+ GCPROTECT_BEGIN(pCurrentCulture)
+ {
+ pCurrentCulture = GetCulture(bUICulture);
+ if (pCurrentCulture != NULL)
+ Result = (LCID)CallPropertyGet(METHOD__CULTURE_INFO__GET_ID, pCurrentCulture);
+ }
+ GCPROTECT_END();
+#endif
+
+ return Result;
+}
+
+void Thread::SetCultureId(LCID lcid, BOOL bUICulture)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ GCX_COOP();
+
+ OBJECTREF CultureObj = NULL;
+ GCPROTECT_BEGIN(CultureObj)
+ {
+ // Convert the LCID into a CultureInfo.
+ GetCultureInfoForLCID(lcid, &CultureObj);
+
+ // Set the newly created culture as the thread's culture.
+ SetCulture(&CultureObj, bUICulture);
+ }
+ GCPROTECT_END();
+}
+
+void Thread::SetCulture(OBJECTREF *CultureObj, BOOL bUICulture)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // Retrieve the exposed thread object.
+ OBJECTREF pCurThreadObject = GetExposedObject();
+ _ASSERTE(pCurThreadObject!=NULL);
+
+ // Set the culture property on the thread.
+ THREADBASEREF pThreadBase = (THREADBASEREF)(pCurThreadObject);
+ CallPropertySet(bUICulture
+ ? METHOD__THREAD__SET_UI_CULTURE
+ : METHOD__THREAD__SET_CULTURE,
+ (OBJECTREF)pThreadBase, *CultureObj);
+}
+
+void Thread::SetHasPromotedBytes ()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ m_fPromoted = TRUE;
+
+ _ASSERTE(GCHeap::IsGCInProgress() && IsGCThread ());
+
+ if (!m_fPreemptiveGCDisabled)
+ {
+ if (FRAME_TOP == GetFrame())
+ m_fPromoted = FALSE;
+ }
+}
+
+BOOL ThreadStore::HoldingThreadStore(Thread *pThread)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (pThread)
+ {
+ return (pThread == s_pThreadStore->m_HoldingThread);
+ }
+ else
+ {
+ return (s_pThreadStore->m_holderthreadid.IsSameThread());
+ }
+}
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+void Thread::SetupFiberData()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE (this == GetThread());
+ _ASSERTE (m_pFiberData == NULL);
+
+ m_pFiberData = ClrTeb::GetFiberDataPtr();
+ if (m_pFiberData != NULL && (g_CORDebuggerControlFlags & DBCF_FIBERMODE) == 0)
+ {
+ // We are in fiber mode
+ g_CORDebuggerControlFlags |= DBCF_FIBERMODE;
+ if (g_pDebugInterface)
+ {
+ g_pDebugInterface->SetFiberMode(true);
+ }
+ }
+}
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+#ifdef _DEBUG
+
+int Thread::MaxThreadRecord = 20;
+int Thread::MaxStackDepth = 20;
+
+const int Thread::MaxThreadTrackInfo = Thread::ThreadTrackInfo_Max;
+
+void Thread::AddFiberInfo(DWORD type)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+#ifndef FEATURE_PAL
+
+ if (m_pFiberInfo[0] == NULL) {
+ return;
+ }
+
+ DWORD mask = g_pConfig->SaveThreadInfoMask();
+ if ((mask & type) == 0)
+ {
+ return;
+ }
+
+ int slot = -1;
+ while (type != 0)
+ {
+ type >>= 1;
+ slot ++;
+ }
+
+ _ASSERTE (slot < ThreadTrackInfo_Max);
+
+ // use try to force ebp frame.
+ PAL_TRY_NAKED {
+ ULONG index = FastInterlockIncrement((LONG*)&m_FiberInfoIndex[slot])-1;
+ index %= MaxThreadRecord;
+ size_t unitBytes = sizeof(FiberSwitchInfo)-sizeof(size_t)+MaxStackDepth*sizeof(size_t);
+ FiberSwitchInfo *pInfo = (FiberSwitchInfo*)((char*)m_pFiberInfo[slot] + index*unitBytes);
+ pInfo->timeStamp = getTimeStamp();
+ pInfo->threadID = GetCurrentThreadId();
+
+#ifdef FEATURE_HIJACK
+ // We can't crawl the stack of a thread that currently has a hijack pending
+ // (since the hijack routine won't be recognized by any code manager). So we
+ // undo any hijack, the EE will re-attempt it later.
+ // Stack crawl happens on the current thread, which may not be 'this' thread.
+ Thread* pCurrentThread = GetThread();
+ if (pCurrentThread != NULL && (pCurrentThread->m_State & TS_Hijacked))
+ {
+ pCurrentThread->UnhijackThread();
+ }
+#endif
+
+ int count = UtilCaptureStackBackTrace (2,MaxStackDepth,(PVOID*)pInfo->callStack,NULL);
+ while (count < MaxStackDepth) {
+ pInfo->callStack[count++] = 0;
+ }
+ }
+ PAL_EXCEPT_NAKED (EXCEPTION_EXECUTE_HANDLER)
+ {
+ }
+ PAL_ENDTRY_NAKED;
+#endif // !FEATURE_PAL
+}
+
+#endif // _DEBUG
+
+HRESULT Thread::SwitchIn(HANDLE threadHandle)
+{
+ // can't have dynamic contracts because this method is going to mess with TLS
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+
+ //can't do heap allocation in this method
+ CantAllocHolder caHolder;
+
+ // !!! Can not use the following line, since it uses an object which .dctor calls
+ // !!! FLS_SETVALUE, and a new FLS is created after SwitchOut.
+ // CANNOTTHROWCOMPLUSEXCEPTION();
+
+ // Case Cookie to thread object and add to tls
+#ifdef _DEBUG
+ Thread *pThread = GetThread();
+ // If this is hit, we need to understand.
+ // Sometimes we see the assert but the memory does not match the assert.
+ if (pThread) {
+ DebugBreak();
+ }
+ //_ASSERT(GetThread() == NULL);
+#endif
+
+ if (GetThread() != NULL) {
+ return HOST_E_INVALIDOPERATION;
+ }
+
+ CExecutionEngine::SwitchIn();
+
+ // !!! no contract for this class.
+ // !!! We have not switched in tls block.
+ class EnsureTlsData
+ {
+ private:
+ Thread *m_pThread;
+ BOOL m_fNeedReset;
+ public:
+ EnsureTlsData(Thread* pThread){m_pThread = pThread; m_fNeedReset = TRUE;}
+ ~EnsureTlsData()
+ {
+ if (m_fNeedReset)
+ {
+ SetThread(NULL);
+ SetAppDomain(NULL);
+ CExecutionEngine::SwitchOut();
+ }
+ }
+ void SuppressRelease()
+ {
+ m_fNeedReset = FALSE;
+ }
+ };
+
+ EnsureTlsData ensure(this);
+
+#ifdef _DEBUG
+ if (CLRTaskHosted()) {
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostTask *pTask = NULL;
+ _ASSERTE (CorHost2::GetHostTaskManager()->GetCurrentTask(&pTask) == S_OK &&
+ (pTask == GetHostTask() || GetHostTask() == NULL));
+
+ if (pTask)
+ pTask->Release();
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ }
+#endif
+
+ if (SetThread(this))
+ {
+ Thread *pThread = GetThread();
+ if (!pThread)
+ return E_OUTOFMEMORY;
+
+ // !!! make sure that we switchin TLS so that FLS is available for Contract etc.
+
+ // We redundantly keep the domain in its own TLS slot, for faster access from
+ // stubs
+ if (!SetAppDomain(m_pDomainAtTaskSwitch))
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ CANNOTTHROWCOMPLUSEXCEPTION();
+#if 0
+ // We switch out a fiber only if the fiber is in preemptive gc mode.
+ _ASSERTE (!PreemptiveGCDisabled());
+#endif
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (CLRTaskHosted() && GetHostTask() == NULL)
+ {
+ // Reset has been called on this task.
+
+ if (! SetStackLimits(fAll))
+ {
+ return E_FAIL;
+ }
+
+ // We commit the thread's entire stack when it enters the runtime to allow us to be reliable in low memory
+ // situtations. See the comments in front of Thread::CommitThreadStack() for mor information.
+ if (!Thread::CommitThreadStack(this))
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ HRESULT hr = CorHost2::GetHostTaskManager()->GetCurrentTask(&m_pHostTask);
+ _ASSERTE (hr == S_OK && m_pHostTask);
+
+#ifdef _DEBUG
+ AddFiberInfo(ThreadTrackInfo_Lifetime);
+#endif
+
+ m_pFiberData = ClrTeb::GetFiberDataPtr();
+
+ m_OSThreadId = ::GetCurrentThreadId();
+
+#ifdef ENABLE_CONTRACTS
+ m_pClrDebugState = ::GetClrDebugState();
+#endif
+ ResetThreadState(TS_TaskReset);
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ // We have to be switched in on the same fiber
+ _ASSERTE (GetCachedStackBase() == GetStackUpperBound());
+
+ if (m_pFiberData)
+ {
+ // only set the m_OSThreadId to bad food in Fiber mode
+ m_OSThreadId = ::GetCurrentThreadId();
+#ifdef PROFILING_SUPPORTED
+ // If a profiler is present, then notify the profiler that a
+ // thread has been created.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackThreads());
+ g_profControlBlock.pProfInterface->ThreadAssignedToOSThread(
+ (ThreadID)this, m_OSThreadId);
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+ }
+ SetThreadHandle(threadHandle);
+
+#ifndef FEATURE_PAL
+ m_pTEB = (struct _NT_TIB*)NtCurrentTeb();
+#endif // !FEATURE_PAL
+
+#if 0
+ if (g_TrapReturningThreads && m_fPreemptiveGCDisabled && this != ThreadSuspend::GetSuspensionThread()) {
+ WorkingOnThreadContextHolder workingOnThreadContext(this);
+ if (workingOnThreadContext.Acquired())
+ {
+ HandledJITCase(TRUE);
+ }
+ }
+#endif
+
+#ifdef _DEBUG
+ // For debugging purpose, we save callstack during task switch. On Win64, the callstack
+ // is done within OS loader lock, and obtaining managed callstack may cause fiber switch.
+ SetThreadStateNC(TSNC_InTaskSwitch);
+ AddFiberInfo(ThreadTrackInfo_Schedule);
+ ResetThreadStateNC(TSNC_InTaskSwitch);
+#endif
+
+ ensure.SuppressRelease();
+ return S_OK;
+ }
+ else
+ {
+ return E_FAIL;
+ }
+}
+
+HRESULT Thread::SwitchOut()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return E_NOTIMPL;
+}
+
+void Thread::InternalSwitchOut()
+{
+ INDEBUG( BOOL fNoTLS = (CExecutionEngine::CheckThreadStateNoCreate(0) == NULL));
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ {
+ // Can't do heap allocation in this method.
+ // We need to scope this holder because its destructor accesses FLS.
+ CantAllocHolder caHolder;
+
+ // !!! Can not use the following line, since it uses an object which .dctor calls
+ // !!! FLS_SETVALUE, and a new FLS is created after SwitchOut.
+ // CANNOTTHROWCOMPLUSEXCEPTION();
+
+ _ASSERTE(GetThread() == this);
+
+ _ASSERTE (!fNoTLS ||
+ (CExecutionEngine::CheckThreadStateNoCreate(0) == NULL));
+
+#if 0
+ // workaround wwl: for SQL reschedule
+#ifndef _DEBUG
+ if (PreemptiveGCDisabled)
+ {
+ DebugBreak();
+ }
+#endif
+ _ASSERTE(!PreemptiveGCDisabled());
+#endif
+
+ // Can not assert here. If a mutex is orphaned, the thread will have ThreadAffinity.
+ //_ASSERTE(!HasThreadAffinity());
+
+ _ASSERTE (!fNoTLS ||
+ (CExecutionEngine::CheckThreadStateNoCreate(0) == NULL));
+
+#ifdef _DEBUG
+ // For debugging purpose, we save callstack during task switch. On Win64, the callstack
+ // is done within OS loader lock, and obtaining managed callstack may cause fiber switch.
+ SetThreadStateNC(TSNC_InTaskSwitch);
+ AddFiberInfo(ThreadTrackInfo_Schedule);
+ ResetThreadStateNC(TSNC_InTaskSwitch);
+#endif
+
+ _ASSERTE (!fNoTLS ||
+ (CExecutionEngine::CheckThreadStateNoCreate(0) == NULL));
+
+ m_pDomainAtTaskSwitch = GetAppDomain();
+
+ if (m_pFiberData)
+ {
+ // only set the m_OSThreadId to bad food in Fiber mode
+ m_OSThreadId = SWITCHED_OUT_FIBER_OSID;
+#ifdef PROFILING_SUPPORTED
+ // If a profiler is present, then notify the profiler that a
+ // thread has been created.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackThreads());
+ g_profControlBlock.pProfInterface->ThreadAssignedToOSThread(
+ (ThreadID)this, m_OSThreadId);
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+ }
+
+ _ASSERTE (!fNoTLS ||
+ (CExecutionEngine::CheckThreadStateNoCreate(0) == NULL));
+
+ HANDLE hThread = GetThreadHandle();
+
+ SetThreadHandle (SWITCHOUT_HANDLE_VALUE);
+ while (m_dwThreadHandleBeingUsed > 0)
+ {
+ // Another thread is using the handle now.
+#undef Sleep
+ // We can not call __SwitchToThread since we can not go back to host.
+ ::Sleep(10);
+#define Sleep(a) Dont_Use_Sleep(a)
+ }
+
+ if (m_WeOwnThreadHandle && m_ThreadHandleForClose == INVALID_HANDLE_VALUE)
+ {
+ m_ThreadHandleForClose = hThread;
+ }
+
+ // The host is getting control of this thread, so if we were trying
+ // to yield this thread, we can stop those attempts now.
+ ResetThreadState(TS_YieldRequested);
+
+ _ASSERTE (!fNoTLS ||
+ (CExecutionEngine::CheckThreadStateNoCreate(0) == NULL));
+ }
+
+ CExecutionEngine::SwitchOut();
+
+ // We need to make sure that TLS are touched last here.
+ // Contract uses TLS.
+ SetThread(NULL);
+ SetAppDomain(NULL);
+
+ _ASSERTE (!fNoTLS ||
+ (CExecutionEngine::CheckThreadStateNoCreate(0) == NULL));
+}
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+HRESULT Thread::GetMemStats (COR_GC_THREAD_STATS *pStats)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // Get the allocation context which contains this counter in it.
+ alloc_context *p = &m_alloc_context;
+ pStats->PerThreadAllocation = p->alloc_bytes + p->alloc_bytes_loh;
+ if (GetHasPromotedBytes())
+ pStats->Flags = COR_GC_THREAD_HAS_PROMOTED_BYTES;
+
+ return S_OK;
+}
+#endif //FEATURE_INCLUDE_ALL_INTERFACES
+
+
+LONG Thread::GetTotalThreadPoolCompletionCount()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LONG total;
+ if (g_fEEStarted) //make sure we actually have a thread store
+ {
+ // make sure up-to-date thread-local counts are visible to us
+ ::FlushProcessWriteBuffers();
+
+ // enumerate all threads, summing their local counts.
+ ThreadStoreLockHolder tsl;
+
+ total = s_threadPoolCompletionCountOverflow.Load();
+
+ Thread *pThread = NULL;
+ while ((pThread = ThreadStore::GetAllThreadList(pThread, 0, 0)) != NULL)
+ {
+ total += pThread->m_threadPoolCompletionCount;
+ }
+ }
+ else
+ {
+ total = s_threadPoolCompletionCountOverflow.Load();
+ }
+
+ return total;
+}
+
+
+INT32 Thread::ResetManagedThreadObject(INT32 nPriority)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ GCX_COOP();
+ return ResetManagedThreadObjectInCoopMode(nPriority);
+}
+
+INT32 Thread::ResetManagedThreadObjectInCoopMode(INT32 nPriority)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ THREADBASEREF pObject = (THREADBASEREF)ObjectFromHandle(m_ExposedObject);
+ if (pObject != NULL)
+ {
+ pObject->ResetCulture();
+ pObject->ResetName();
+ nPriority = pObject->GetPriority();
+ }
+
+ return nPriority;
+}
+
+void Thread::FullResetThread()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ GCX_COOP();
+
+ // We need to put this thread in COOPERATIVE GC first to solve race between AppDomain::Unload
+ // and Thread::Reset. AppDomain::Unload does a full GC to collect all roots in one AppDomain.
+ // ThreadStaticData used to be coupled with a managed array of objects in the managed Thread
+ // object, however this is no longer the case.
+
+ // TODO: Do we still need to put this thread into COOP mode?
+
+ GCX_FORBID();
+ DeleteThreadStaticData();
+ ResetSecurityInfo();
+
+ m_alloc_context.alloc_bytes = 0;
+ m_fPromoted = FALSE;
+}
+
+BOOL Thread::IsRealThreadPoolResetNeeded()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if(!IsBackground())
+ return TRUE;
+
+ THREADBASEREF pObject = (THREADBASEREF)ObjectFromHandle(m_ExposedObject);
+
+ if(pObject != NULL)
+ {
+ INT32 nPriority = pObject->GetPriority();
+
+ if(nPriority != ThreadNative::PRIORITY_NORMAL)
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+void Thread::InternalReset(BOOL fFull, BOOL fNotFinalizerThread, BOOL fThreadObjectResetNeeded, BOOL fResetAbort)
+{
+ CONTRACTL {
+ NOTHROW;
+ if(!fNotFinalizerThread || fThreadObjectResetNeeded) {GC_TRIGGERS;SO_INTOLERANT;} else {GC_NOTRIGGER;SO_TOLERANT;}
+ }
+ CONTRACTL_END;
+
+ _ASSERTE (this == GetThread());
+
+ FinishSOWork();
+
+ INT32 nPriority = ThreadNative::PRIORITY_NORMAL;
+
+ if (!fNotFinalizerThread && this == FinalizerThread::GetFinalizerThread())
+ {
+ nPriority = ThreadNative::PRIORITY_HIGHEST;
+ }
+
+ if(fThreadObjectResetNeeded)
+ {
+ nPriority = ResetManagedThreadObject(nPriority);
+ }
+
+ if (fFull)
+ {
+ FullResetThread();
+ }
+
+ _ASSERTE (m_dwCriticalRegionCount == 0);
+ m_dwCriticalRegionCount = 0;
+
+ _ASSERTE (m_dwThreadAffinityCount == 0);
+ m_dwThreadAffinityCount = 0;
+
+ //m_MarshalAlloc.Collapse(NULL);
+
+ if (fResetAbort && IsAbortRequested()) {
+ UnmarkThreadForAbort(TAR_ALL);
+ }
+
+ if (fResetAbort && IsAborted())
+ ClearAborted();
+
+ if (IsThreadPoolThread() && fThreadObjectResetNeeded)
+ {
+ SetBackground(TRUE);
+ if (nPriority != ThreadNative::PRIORITY_NORMAL)
+ {
+ SetThreadPriority(THREAD_PRIORITY_NORMAL);
+ }
+ }
+ else if (!fNotFinalizerThread && this == FinalizerThread::GetFinalizerThread())
+ {
+ SetBackground(TRUE);
+ if (nPriority != ThreadNative::PRIORITY_HIGHEST)
+ {
+ SetThreadPriority(THREAD_PRIORITY_HIGHEST);
+ }
+ }
+}
+
+HRESULT Thread::Reset(BOOL fFull)
+{
+ // !!! Can not use non-static contract here.
+ // !!! Contract depends on Thread object for GC_TRIGGERS.
+ // !!! At the end of this function, we call InternalSwitchOut,
+ // !!! and then GetThread()=NULL, and dtor of contract does not work any more.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_ENTRY_POINT;
+
+ if ( !g_fEEStarted)
+ return(E_FAIL);
+
+ HRESULT hr = S_OK;
+
+ BEGIN_SO_INTOLERANT_CODE_NOPROBE;
+
+#ifdef _DEBUG
+ if (CLRTaskHosted()) {
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ // Reset is a heavy operation. We will call into SQL for lock and memory operations.
+ // The host better keeps IHostTask alive.
+ _ASSERTE (GetCurrentHostTask() == GetHostTask());
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ }
+
+ _ASSERTE (GetThread() == this);
+#ifdef _TARGET_X86_
+ _ASSERTE (GetExceptionState()->GetContextRecord() == NULL);
+#endif
+#endif
+
+ if (GetThread() != this)
+ {
+ IfFailGo(E_UNEXPECTED);
+ }
+
+ if (HasThreadState(Thread::TS_YieldRequested))
+ {
+ ResetThreadState(Thread::TS_YieldRequested);
+ }
+
+ _ASSERTE (!PreemptiveGCDisabled());
+ _ASSERTE (m_pFrame == FRAME_TOP);
+ // A host should not recycle a CLRTask if the task is created by us through CreateNewThread.
+ // We need to make Thread.Join work for this case.
+ if ((m_StateNC & (TSNC_CLRCreatedThread | TSNC_CannotRecycle)) != 0)
+ {
+ // Todo: wwl better returning code.
+ IfFailGo(E_UNEXPECTED);
+ }
+
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+ if (IsCoInitialized())
+ {
+ // The current thread has done CoInitialize
+ IfFailGo(E_UNEXPECTED);
+ }
+#endif
+
+#ifdef _DEBUG
+ AddFiberInfo(ThreadTrackInfo_Lifetime);
+#endif
+
+ SetThreadState(TS_TaskReset);
+
+ if (IsAbortRequested())
+ {
+ EEResetAbort(Thread::TAR_ALL);
+ }
+
+ InternalReset(fFull);
+
+ if (PreemptiveGCDisabled())
+ {
+ EnablePreemptiveGC();
+ }
+
+ {
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ // We need to scope this assert because of
+ // the jumps to ErrExit from above.
+ GCX_ASSERT_PREEMP();
+
+ _ASSERTE (m_pHostTask);
+
+ ReleaseHostTask();
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+#ifdef WIN64EXCEPTIONS
+ ExceptionTracker::PopTrackers((void*)-1);
+#endif // WIN64EXCEPTIONS
+
+ ResetThreadStateNC(TSNC_UnbalancedLocks);
+ m_dwLockCount = 0;
+ m_dwCriticalRegionCount = 0;
+
+ InternalSwitchOut();
+ m_OSThreadId = SWITCHED_OUT_FIBER_OSID;
+ }
+
+ErrExit:
+
+ END_SO_INTOLERANT_CODE_NOPROBE;
+
+#ifdef ENABLE_CONTRACTS_DATA
+ // Decouple our cache from the Task.
+ // Next time, the thread may be run on a different thread.
+ if (SUCCEEDED(hr))
+ {
+ m_pClrDebugState = NULL;
+ }
+#endif
+
+ return hr;
+}
+
+HRESULT Thread::ExitTask ()
+{
+ // !!! Can not use contract here.
+ // !!! Contract depends on Thread object for GC_TRIGGERS.
+ // !!! At the end of this function, we call InternalSwitchOut,
+ // !!! and then GetThread()=NULL, and dtor of contract does not work any more.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_ENTRY_POINT;
+
+ if ( !g_fEEStarted)
+ return(E_FAIL);
+
+ HRESULT hr = S_OK;
+
+ // <TODO> We need to probe here, but can't introduce destructors etc.</TODO>
+ BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
+
+ //OnThreadTerminate(FALSE);
+ _ASSERTE (this == GetThread());
+ _ASSERTE (!PreemptiveGCDisabled());
+
+ // Can not assert the following. SQL may call ExitTask after addref and abort a task.
+ //_ASSERTE (m_UnmanagedRefCount == 0);
+ if (this != GetThread())
+ IfFailGo(HOST_E_INVALIDOPERATION);
+
+ if (HasThreadState(Thread::TS_YieldRequested))
+ {
+ ResetThreadState(Thread::TS_YieldRequested);
+ }
+
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+ if (IsCoInitialized())
+ {
+ // This thread has used ole32. We need to balance CoInitialize call on this thread.
+ // We also need to free any COM objects created on this thread.
+
+ // If we don't do this work, ole32 is going to do the same during its DLL_THREAD_DETACH,
+ // and may re-enter CLR.
+ CleanupCOMState();
+ }
+#endif
+ m_OSThreadId = SWITCHED_OUT_FIBER_OSID;
+ hr = DetachThread(FALSE);
+ // !!! Do not touch any field of Thread object. The Thread object is subject to delete
+ // !!! after DetachThread call.
+ErrExit:;
+
+ END_CONTRACT_VIOLATION;
+
+ return hr;
+}
+
+HRESULT Thread::Abort ()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW;);
+ EX_TRY
+ {
+ UserAbort(TAR_Thread, EEPolicy::TA_Safe, INFINITE, Thread::UAC_Host);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ END_SO_INTOLERANT_CODE;
+
+ return S_OK;
+}
+
+HRESULT Thread::RudeAbort()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+
+ EX_TRY
+ {
+ UserAbort(TAR_Thread, EEPolicy::TA_Rude, INFINITE, Thread::UAC_Host);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ END_SO_INTOLERANT_CODE;
+
+ return S_OK;
+}
+
+HRESULT Thread::NeedsPriorityScheduling(BOOL *pbNeedsPriorityScheduling)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ *pbNeedsPriorityScheduling = (m_fPreemptiveGCDisabled ||
+ (g_fEEStarted && this == FinalizerThread::GetFinalizerThread()));
+ return S_OK;
+}
+
+HRESULT Thread::YieldTask()
+{
+#undef Sleep
+ CONTRACTL {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ //can't do heap allocation in this method
+ CantAllocHolder caHolder;
+ _ASSERTE(CLRTaskHosted());
+
+ // The host must guarantee that we have enough stack before they call this API.
+ // We unfortunately do not have a good mechanism to indicate/enforce this and it's too
+ // late in Whidbey to add one now. We should definitely consider adding such a
+ // mechanism in Orcas however. For now we will work around this by marking the
+ // method as SO_TOLERANT and disabling SO tolerance violations for any code it calls.
+ CONTRACT_VIOLATION(SOToleranceViolation);
+
+ //
+ // YieldTask should not be called from a managed thread, as it can lead to deadlocks.
+ // However, some tests do this, and it would be hard to change that. Let's at least ensure
+ // that they are not shooting themselves in the foot.
+ //
+ Thread* pCurThread = GetThread();
+ if (this == pCurThread)
+ {
+ // We will suspend the target thread. If YieldTask is called on the current thread,
+ // we will suspend the current thread forever.
+ return HOST_E_INVALIDOPERATION;
+ }
+
+ FAULT_FORBID();
+
+ // This function has been called by the host, and the host needs not
+ // be reentrant. Therefore, no code running below this function can
+ // cause calls back into the host.
+ ForbidCallsIntoHostOnThisThreadHolder forbidCallsIntoHostOnThisThread(TRUE /*dummy*/);
+ while (!forbidCallsIntoHostOnThisThread.Acquired())
+ {
+ // We can not call __SwitchToThread since we can not go back to host.
+ ::Sleep(10);
+ forbidCallsIntoHostOnThisThread.Acquire();
+ }
+
+ // So that the thread can yield when it tries to switch to coop gc.
+ CounterHolder trtHolder(&g_TrapReturningThreads);
+
+ // One worker on a thread only.
+ while (TRUE)
+ {
+ LONG curValue = m_State;
+ if ((curValue & TS_YieldRequested) != 0)
+ {
+ // The host has previously called YieldTask for this thread,
+ // and the thread has not cleared the flag yet.
+ return S_FALSE;
+ }
+ else if ((curValue & TS_Unstarted) != 0)
+ {
+ // The task is still unstarted, so we can consider the host
+ // to be in control of this thread, which means we have
+ // succeeded in getting the host in control.
+ return S_OK;
+ }
+
+ CONSISTENCY_CHECK(sizeof(m_State) == sizeof(LONG));
+ if (FastInterlockCompareExchange((LONG*)&m_State, curValue | TS_YieldRequested, curValue) == curValue)
+ {
+ break;
+ }
+ }
+
+#ifdef PROFILING_SUPPORTED
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackSuspends());
+ g_profControlBlock.pProfInterface->RuntimeThreadSuspended((ThreadID)this);
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ while (m_State & TS_YieldRequested)
+ {
+ BOOL fDone = FALSE;
+
+ if (m_State & (TS_Dead | TS_Detached))
+ {
+ // The thread is dead, in other words, yielded forever.
+ // Don't bother clearing TS_YieldRequested, as nobody
+ // is going to look at it any more.
+ break;
+ }
+
+ CounterHolder handleHolder(&m_dwThreadHandleBeingUsed);
+ HANDLE hThread = GetThreadHandle();
+ if (hThread == INVALID_HANDLE_VALUE)
+ {
+ // The thread is dead, in other words, yielded forever.
+ // Don't bother clearing TS_YieldRequested, as nobody
+ // is going to look at it any more.
+ break;
+ }
+ else if (hThread == SWITCHOUT_HANDLE_VALUE)
+ {
+ // The thread is currently switched out.
+ // This means that the host has control of the thread,
+ // so we can stop our attempts to yield it. Note that
+ // TS_YieldRequested is cleared in InternalSwitchOut. (If we
+ // were to clear it here, we could race against another
+ // thread that is running YieldTask.)
+ break;
+ }
+
+ DWORD dwSuspendCount = ::SuspendThread(hThread);
+ if ((int)dwSuspendCount >= 0)
+ {
+ if (!EnsureThreadIsSuspended(hThread, this))
+ {
+ goto Retry;
+ }
+
+ if (hThread == GetThreadHandle())
+ {
+ if (m_dwForbidSuspendThread != 0)
+ {
+ goto Retry;
+ }
+ }
+ else
+ {
+ // A thread was switch out but in again.
+ // We suspended the wrong thread; resume it and give
+ // up our attempts to yield. Note that TS_YieldRequested
+ // is cleared in InternalSwitchOut.
+ ::ResumeThread(hThread);
+ break;
+ }
+ }
+ else
+ {
+ // We can get here either SuspendThread fails
+ // Or the fiber thread dies after this fiber switched out.
+
+ if ((int)dwSuspendCount != -1)
+ {
+ STRESS_LOG1(LF_SYNC, LL_INFO1000, "In Thread::YieldTask ::SuspendThread returned %x \n", dwSuspendCount);
+ }
+ if (GetThreadHandle() == SWITCHOUT_HANDLE_VALUE)
+ {
+ // The thread was switched out while we tried to suspend it.
+ // This means that the host has control of the thread,
+ // so we can stop our attempts to yield it. Note that
+ // TS_YieldRequested is cleared in InternalSwitchOut. (If we
+ // were to clear it here, we could race against another
+ // thread that is running YieldTask.)
+ break;
+ }
+ else {
+ continue;
+ }
+ }
+
+ if (!m_fPreemptiveGCDisabled)
+ {
+ ::ResumeThread(hThread);
+ break;
+ }
+
+#ifdef FEATURE_HIJACK
+
+#ifdef _DEBUG
+ if (pCurThread != NULL)
+ {
+ pCurThread->dbg_m_cSuspendedThreads ++;
+ _ASSERTE(pCurThread->dbg_m_cSuspendedThreads > 0);
+ }
+#endif
+
+ // Only check for HandledJITCase if we actually suspended the thread.
+ if ((int)dwSuspendCount >= 0)
+ {
+ WorkingOnThreadContextHolder workingOnThreadContext(this);
+ if (workingOnThreadContext.Acquired() && HandledJITCase())
+ {
+ // Redirect thread so we can capture a good thread context
+ // (GetThreadContext is not sufficient, due to an OS bug).
+ // If we don't succeed (should only happen on Win9X, due to
+ // a different OS bug), we must resume the thread and try
+ // again.
+ fDone = CheckForAndDoRedirectForYieldTask();
+ }
+ }
+
+#ifdef _DEBUG
+ if (pCurThread != NULL)
+ {
+ _ASSERTE(pCurThread->dbg_m_cSuspendedThreads > 0);
+ pCurThread->dbg_m_cSuspendedThreads --;
+ _ASSERTE(pCurThread->dbg_m_cSuspendedThreadsWithoutOSLock <= pCurThread->dbg_m_cSuspendedThreads);
+ }
+#endif //_DEBUG
+
+#endif // FEATURE_HIJACK
+Retry:
+ ::ResumeThread(hThread);
+ if (fDone)
+ {
+ // We managed to redirect the thread, so we know that it will yield.
+ // We can let the actual yielding happen asynchronously.
+ break;
+ }
+ handleHolder.Release();
+ ::Sleep(1);
+ }
+#ifdef PROFILING_SUPPORTED
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackSuspends());
+ g_profControlBlock.pProfInterface->RuntimeThreadResumed((ThreadID)this);
+ END_PIN_PROFILER();
+ }
+#endif
+ return S_OK;
+#define Sleep(a) Dont_Use_Sleep(a)
+}
+
+HRESULT Thread::LocksHeld(SIZE_T *pLockCount)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ *pLockCount = m_dwLockCount + m_dwCriticalRegionCount;
+ return S_OK;
+}
+
+HRESULT Thread::SetTaskIdentifier(TASKID asked)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // @todo: Should be check for uniqueness?
+ m_TaskId = asked;
+ return S_OK;
+}
+
+HRESULT Thread::BeginPreventAsyncAbort()
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef _DEBUG
+ int count =
+#endif
+ FastInterlockIncrement((LONG*)&m_PreventAbort);
+
+#ifdef _DEBUG
+ ASSERT(count > 0);
+ AddFiberInfo(ThreadTrackInfo_Abort);
+
+ FastInterlockIncrement((LONG*)&m_dwDisableAbortCheckCount);
+#endif
+
+ return S_OK;
+}
+
+HRESULT Thread::EndPreventAsyncAbort()
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef _DEBUG
+ int count =
+#endif
+ FastInterlockDecrement((LONG*)&m_PreventAbort);
+
+#ifdef _DEBUG
+ ASSERT(count >= 0);
+ AddFiberInfo(ThreadTrackInfo_Abort);
+
+ FastInterlockDecrement((LONG*)&m_dwDisableAbortCheckCount);
+#endif
+
+ return S_OK;
+}
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+// We release m_pHostTask during ICLRTask::Reset and ICLRTask::ExitTask call.
+// This function allows us to synchronize obtaining m_pHostTask with Thread reset or exit.
+IHostTask* Thread::GetHostTaskWithAddRef()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ CounterIncrease(&m_dwHostTaskRefCount);
+ IHostTask *pHostTask = m_pHostTask;
+ if (pHostTask != NULL)
+ {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ pHostTask->AddRef();
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+ CounterDecrease(&m_dwHostTaskRefCount);
+ return pHostTask;
+}
+
+void Thread::ReleaseHostTask()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (m_pHostTask == NULL)
+ {
+ return;
+ }
+
+ IHostTask *pHostTask = m_pHostTask;
+ m_pHostTask = NULL;
+
+ YIELD_WHILE (m_dwHostTaskRefCount > 0);
+
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ pHostTask->Release();
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+
+ STRESS_LOG1 (LF_SYNC, LL_INFO100, "Release HostTask %p", pHostTask);
+}
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ULONG Thread::AddRef()
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(m_ExternalRefCount > 0);
+
+ _ASSERTE (m_UnmanagedRefCount != (DWORD) -1);
+ ULONG ref = FastInterlockIncrement((LONG*)&m_UnmanagedRefCount);
+
+#ifdef _DEBUG
+ AddFiberInfo(ThreadTrackInfo_Lifetime);
+#endif
+ return ref;
+}
+
+ULONG Thread::Release()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC_HOST_ONLY;
+
+ _ASSERTE (m_ExternalRefCount > 0);
+ _ASSERTE (m_UnmanagedRefCount > 0);
+ ULONG ref = FastInterlockDecrement((LONG*)&m_UnmanagedRefCount);
+#ifdef _DEBUG
+ AddFiberInfo(ThreadTrackInfo_Lifetime);
+#endif
+ return ref;
+}
+
+HRESULT Thread::QueryInterface(REFIID riid, void **ppUnk)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (IID_ICLRTask2 == riid)
+ *ppUnk = (ICLRTask2 *)this;
+ else if (IID_ICLRTask == riid)
+ *ppUnk = (ICLRTask *)this;
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ return E_NOINTERFACE;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ AddRef();
+ return S_OK;
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+}
+
+BOOL IsHostedThread()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (!CLRTaskHosted())
+ {
+ return FALSE;
+ }
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ Thread *pThread = GetThread();
+ if (pThread && pThread->GetHostTask() != NULL)
+ {
+ return TRUE;
+ }
+
+ IHostTaskManager *pManager = CorHost2::GetHostTaskManager();
+ IHostTask *pHostTask = NULL;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ pManager->GetCurrentTask(&pHostTask);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+
+ BOOL fRet = (pHostTask != NULL);
+ if (pHostTask)
+ {
+ if (pThread)
+ {
+ _ASSERTE (pThread->GetHostTask() == NULL);
+ pThread->m_pHostTask = pHostTask;
+ }
+ else
+ {
+ pHostTask->Release();
+ }
+ }
+
+ return fRet;
+#else // !FEATURE_INCLUDE_ALL_INTERFACES
+ return FALSE;
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+}
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+IHostTask *GetCurrentHostTask()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ IHostTaskManager *provider = CorHost2::GetHostTaskManager();
+
+ IHostTask *pHostTask = NULL;
+
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ provider->GetCurrentTask(&pHostTask);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+
+ if (pHostTask)
+ {
+ pHostTask->Release();
+ }
+
+ return pHostTask;
+}
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+void __stdcall Thread::LeaveRuntime(size_t target)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = LeaveRuntimeNoThrow(target);
+ if (FAILED(hr))
+ ThrowHR(hr);
+}
+
+HRESULT Thread::LeaveRuntimeNoThrow(size_t target)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (!CLRTaskHosted())
+ {
+ return S_OK;
+ }
+
+ if (!IsHostedThread())
+ {
+ return S_OK;
+ }
+
+ HRESULT hr = S_OK;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ // A SQL thread can enter the runtime w/o a managed thread.
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(hr = COR_E_STACKOVERFLOW);
+
+ IHostTaskManager *pManager = CorHost2::GetHostTaskManager();
+ if (pManager)
+ {
+#ifdef _DEBUG
+ Thread *pThread = GetThread();
+ if (pThread)
+ {
+ pThread->AddFiberInfo(Thread::ThreadTrackInfo_UM_M);
+ }
+#endif
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pManager->LeaveRuntime(target);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+ END_SO_INTOLERANT_CODE;
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ return hr;
+}
+
+void __stdcall Thread::LeaveRuntimeThrowComplus(size_t target)
+{
+
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ ENTRY_POINT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostTaskManager *pManager = NULL;
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ if (!CLRTaskHosted())
+ {
+ goto Exit;
+ }
+
+ if (!IsHostedThread())
+ {
+ goto Exit;
+ }
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ pManager = CorHost2::GetHostTaskManager();
+ if (pManager)
+ {
+#ifdef _DEBUG
+ Thread *pThread = GetThread();
+ if (pThread)
+ {
+ pThread->AddFiberInfo(Thread::ThreadTrackInfo_UM_M);
+ }
+#endif
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pManager->LeaveRuntime(target);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ if (FAILED(hr))
+ {
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+ ThrowHR(hr);
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+ }
+
+
+Exit:
+;
+
+}
+
+void __stdcall Thread::EnterRuntime()
+{
+ if (!CLRTaskHosted())
+ {
+ // optimize for the most common case
+ return;
+ }
+
+ DWORD dwLastError = GetLastError();
+
+ CONTRACTL {
+ THROWS;
+ ENTRY_POINT;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ //BEGIN_ENTRYPOINT_THROWS;
+
+ HRESULT hr = EnterRuntimeNoThrowWorker();
+ if (FAILED(hr))
+ ThrowHR(hr);
+
+ SetLastError(dwLastError);
+ //END_ENTRYPOINT_THROWS;
+
+}
+
+HRESULT Thread::EnterRuntimeNoThrow()
+{
+ if (!CLRTaskHosted())
+ {
+ // optimize for the most common case
+ return S_OK;
+ }
+
+ DWORD dwLastError = GetLastError();
+
+ // This function can be called during a hard SO when managed code has called out to native
+ // which has SOd, so we can't probe here. We already probe in LeaveRuntime, which will be
+ // called at roughly the same stack level as LeaveRuntime, so we assume that the probe for
+ // LeaveRuntime will cover us here.
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = EnterRuntimeNoThrowWorker();
+
+ SetLastError(dwLastError);
+
+ return hr;
+}
+
+HRESULT Thread::EnterRuntimeNoThrowWorker()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (!IsHostedThread())
+ {
+ return S_OK;
+ }
+
+ HRESULT hr = S_OK;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostTaskManager *pManager = CorHost2::GetHostTaskManager();
+
+ if (pManager)
+ {
+#ifdef _DEBUG
+ // A SQL thread can enter the runtime w/o a managed thread.
+ Thread *pThread = GetThread();
+ if (pThread)
+ {
+ pThread->AddFiberInfo(Thread::ThreadTrackInfo_UM_M);
+ }
+#endif
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pManager->EnterRuntime();
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ return hr;
+}
+
+void Thread::ReverseEnterRuntime()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = ReverseEnterRuntimeNoThrow();
+
+ if (hr != S_OK)
+ ThrowHR(hr);
+}
+
+__declspec(noinline) void Thread::ReverseEnterRuntimeThrowComplusHelper(HRESULT hr)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+ ThrowHR(hr);
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+}
+
+void Thread::ReverseEnterRuntimeThrowComplus()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = ReverseEnterRuntimeNoThrow();
+
+ if (hr != S_OK)
+ {
+ ReverseEnterRuntimeThrowComplusHelper(hr);
+ }
+}
+
+
+HRESULT Thread::ReverseEnterRuntimeNoThrow()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (!CLRTaskHosted())
+ {
+ return S_OK;
+ }
+
+ if (!IsHostedThread())
+ {
+ return S_OK;
+ }
+
+ HRESULT hr = S_OK;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostTaskManager *pManager = CorHost2::GetHostTaskManager();
+ if (pManager)
+ {
+#ifdef _DEBUG
+ // A SQL thread can enter the runtime w/o a managed thread.
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(hr = COR_E_STACKOVERFLOW);
+
+ Thread *pThread = GetThread();
+ if (pThread)
+ {
+ pThread->AddFiberInfo(Thread::ThreadTrackInfo_UM_M);
+ }
+ END_SO_INTOLERANT_CODE;
+
+#endif
+ hr = pManager->ReverseEnterRuntime();
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ return hr;
+}
+
+void Thread::ReverseLeaveRuntime()
+{
+ // This function can be called during a hard SO so we can't probe here. We already probe in
+ // ReverseEnterRuntime, which will be called at roughly the same stack level as ReverseLeaveRuntime,
+ // so we assume that the probe for ReverseEnterRuntime will cover us here.
+
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // SetupForComCallHR calls this inside a CATCH, but it triggers a THROWs violation
+ CONTRACT_VIOLATION(ThrowsViolation);
+
+ if (!CLRTaskHosted())
+ {
+ return;
+ }
+
+ if (!IsHostedThread())
+ {
+ return;
+ }
+
+ HRESULT hr = S_OK;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostTaskManager *pManager = CorHost2::GetHostTaskManager();
+
+ if (pManager)
+ {
+#ifdef _DEBUG
+ // A SQL thread can enter the runtime w/o a managed thread.
+ Thread *pThread = GetThread();
+ if (pThread)
+ {
+ pThread->AddFiberInfo(Thread::ThreadTrackInfo_UM_M);
+ }
+#endif
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pManager->ReverseLeaveRuntime();
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ if (hr != S_OK)
+ ThrowHR(hr);
+
+}
+
+// For OS EnterCriticalSection, call host to enable ThreadAffinity
+void Thread::BeginThreadAffinity()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (!CLRTaskHosted())
+ {
+ return;
+ }
+
+ if (IsGCSpecialThread() || IsDbgHelperSpecialThread())
+ {
+ return;
+ }
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostTaskManager *pManager = CorHost2::GetHostTaskManager();
+
+ HRESULT hr;
+
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pManager->BeginThreadAffinity();
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ _ASSERTE (hr == S_OK);
+ Thread *pThread = GetThread();
+
+ if (pThread)
+ {
+ pThread->IncThreadAffinityCount();
+#ifdef _DEBUG
+ pThread->AddFiberInfo(Thread::ThreadTrackInfo_Affinity);
+#endif
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+}
+
+
+// For OS EnterCriticalSection, call host to enable ThreadAffinity
+void Thread::EndThreadAffinity()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (!CLRTaskHosted())
+ {
+ return;
+ }
+
+ if (IsGCSpecialThread() || IsDbgHelperSpecialThread())
+ {
+ return;
+ }
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostTaskManager *pManager = CorHost2::GetHostTaskManager();
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ Thread *pThread = GetThread();
+ if (pThread)
+ {
+ pThread->DecThreadAffinityCount ();
+#ifdef _DEBUG
+ pThread->AddFiberInfo(Thread::ThreadTrackInfo_Affinity);
+#endif
+ }
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ HRESULT hr = S_OK;
+
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pManager->EndThreadAffinity();
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+
+ _ASSERTE (hr == S_OK);
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+}
+
+void Thread::SetupThreadForHost()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE (GetThread() == this);
+ CONTRACT_VIOLATION(SOToleranceViolation);
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostTask *pHostTask = GetHostTask();
+ if (pHostTask) {
+ SetupFiberData();
+
+ // @todo - need to block for Interop debugging before leaving the runtime here.
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = pHostTask->SetCLRTask(this);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (FAILED(hr))
+ {
+ ThrowHR(hr);
+ }
+ if (m_WeOwnThreadHandle)
+ {
+ // If host provides a thread handle, we do not need to own a handle.
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ CorHost2::GetHostTaskManager()->SwitchToTask(0);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (m_ThreadHandleForClose != INVALID_HANDLE_VALUE)
+ {
+ m_WeOwnThreadHandle = FALSE;
+ CloseHandle(m_ThreadHandleForClose);
+ m_ThreadHandleForClose = INVALID_HANDLE_VALUE;
+ }
+ }
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+}
+
+
+ETaskType GetCurrentTaskType()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ ETaskType TaskType = TT_UNKNOWN;
+ size_t type = (size_t)ClrFlsGetValue (TlsIdx_ThreadType);
+ if (type & ThreadType_DbgHelper)
+ {
+ TaskType = TT_DEBUGGERHELPER;
+ }
+ else if (type & ThreadType_GC)
+ {
+ TaskType = TT_GC;
+ }
+ else if (type & ThreadType_Finalizer)
+ {
+ TaskType = TT_FINALIZER;
+ }
+ else if (type & ThreadType_Timer)
+ {
+ TaskType = TT_THREADPOOL_TIMER;
+ }
+ else if (type & ThreadType_Gate)
+ {
+ TaskType = TT_THREADPOOL_GATE;
+ }
+ else if (type & ThreadType_Wait)
+ {
+ TaskType = TT_THREADPOOL_WAIT;
+ }
+ else if (type & ThreadType_ADUnloadHelper)
+ {
+ TaskType = TT_ADUNLOAD;
+ }
+ else if (type & ThreadType_Threadpool_IOCompletion)
+ {
+ TaskType = TT_THREADPOOL_IOCOMPLETION;
+ }
+ else if (type & ThreadType_Threadpool_Worker)
+ {
+ TaskType = TT_THREADPOOL_WORKER;
+ }
+ else
+ {
+ Thread *pThread = GetThread();
+ if (pThread)
+ {
+ TaskType = TT_USER;
+ }
+ }
+
+ return TaskType;
+}
+
+DeadlockAwareLock::DeadlockAwareLock(const char *description)
+ : m_pHoldingThread(NULL)
+#ifdef _DEBUG
+ , m_description(description)
+#endif
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+DeadlockAwareLock::~DeadlockAwareLock()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ // Wait for another thread to leave its loop in DeadlockAwareLock::TryBeginEnterLock
+ CrstHolder lock(&g_DeadlockAwareCrst);
+}
+
+CHECK DeadlockAwareLock::CheckDeadlock(Thread *pThread)
+{
+ CONTRACTL
+ {
+ PRECONDITION(g_DeadlockAwareCrst.OwnedByCurrentThread());
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // Note that this check is recursive in order to produce descriptive check failure messages.
+ Thread *pHoldingThread = m_pHoldingThread.Load();
+ if (pThread == pHoldingThread)
+ {
+ CHECK_FAILF(("Lock %p (%s) is held by thread %d", this, m_description, pThread));
+ }
+
+ if (pHoldingThread != NULL)
+ {
+ DeadlockAwareLock *pBlockingLock = pHoldingThread->m_pBlockingLock.Load();
+ if (pBlockingLock != NULL)
+ {
+ CHECK_MSGF(pBlockingLock->CheckDeadlock(pThread),
+ ("Deadlock: Lock %p (%s) is held by thread %d", this, m_description, pHoldingThread));
+ }
+ }
+
+ CHECK_OK;
+}
+
+BOOL DeadlockAwareLock::CanEnterLock()
+{
+ Thread * pThread = GetThread();
+
+ CONSISTENCY_CHECK_MSG(pThread != NULL,
+ "Cannot do deadlock detection on non-EE thread");
+ CONSISTENCY_CHECK_MSG(pThread->m_pBlockingLock.Load() == NULL,
+ "Cannot block on two locks at once");
+
+ {
+ CrstHolder lock(&g_DeadlockAwareCrst);
+
+ // Look for deadlocks
+ DeadlockAwareLock *pLock = this;
+
+ while (TRUE)
+ {
+ Thread * holdingThread = pLock->m_pHoldingThread;
+
+ if (holdingThread == pThread)
+ {
+ // Deadlock!
+ return FALSE;
+ }
+ if (holdingThread == NULL)
+ {
+ // Lock is unheld
+ break;
+ }
+
+ pLock = holdingThread->m_pBlockingLock;
+
+ if (pLock == NULL)
+ {
+ // Thread is running free
+ break;
+ }
+ }
+
+ return TRUE;
+ }
+}
+
+BOOL DeadlockAwareLock::TryBeginEnterLock()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ Thread * pThread = GetThread();
+
+ CONSISTENCY_CHECK_MSG(pThread != NULL,
+ "Cannot do deadlock detection on non-EE thread");
+ CONSISTENCY_CHECK_MSG(pThread->m_pBlockingLock.Load() == NULL,
+ "Cannot block on two locks at once");
+
+ {
+ CrstHolder lock(&g_DeadlockAwareCrst);
+
+ // Look for deadlocks
+ DeadlockAwareLock *pLock = this;
+
+ while (TRUE)
+ {
+ Thread * holdingThread = pLock->m_pHoldingThread;
+
+ if (holdingThread == pThread)
+ {
+ // Deadlock!
+ return FALSE;
+ }
+ if (holdingThread == NULL)
+ {
+ // Lock is unheld
+ break;
+ }
+
+ pLock = holdingThread->m_pBlockingLock;
+
+ if (pLock == NULL)
+ {
+ // Thread is running free
+ break;
+ }
+ }
+
+ pThread->m_pBlockingLock = this;
+ }
+
+ return TRUE;
+};
+
+void DeadlockAwareLock::BeginEnterLock()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ Thread * pThread = GetThread();
+
+ CONSISTENCY_CHECK_MSG(pThread != NULL,
+ "Cannot do deadlock detection on non-EE thread");
+ CONSISTENCY_CHECK_MSG(pThread->m_pBlockingLock.Load() == NULL,
+ "Cannot block on two locks at once");
+
+ {
+ CrstHolder lock(&g_DeadlockAwareCrst);
+
+ // Look for deadlock loop
+ CONSISTENCY_CHECK_MSG(CheckDeadlock(pThread), "Deadlock detected!");
+
+ pThread->m_pBlockingLock = this;
+ }
+};
+
+void DeadlockAwareLock::EndEnterLock()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ Thread * pThread = GetThread();
+
+ CONSISTENCY_CHECK(m_pHoldingThread.Load() == NULL || m_pHoldingThread.Load() == pThread);
+ CONSISTENCY_CHECK(pThread->m_pBlockingLock.Load() == this);
+
+ // No need to take a lock when going from blocking to holding. This
+ // transition implies the lack of a deadlock that other threads can see.
+ // (If they would see a deadlock after the transition, they would see
+ // one before as well.)
+
+ m_pHoldingThread = pThread;
+}
+
+void DeadlockAwareLock::LeaveLock()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ CONSISTENCY_CHECK(m_pHoldingThread == GetThread());
+ CONSISTENCY_CHECK(GetThread()->m_pBlockingLock.Load() == NULL);
+
+ m_pHoldingThread = NULL;
+}
+
+
+#ifdef _DEBUG
+
+// Normally, any thread we operate on has a Thread block in its TLS. But there are
+// a few special threads we don't normally execute managed code on.
+//
+// There is a scenario where we run managed code on such a thread, which is when the
+// DLL_THREAD_ATTACH notification of an (IJW?) module calls into managed code. This
+// is incredibly dangerous. If a GC is provoked, the system may have trouble performing
+// the GC because its threads aren't available yet.
+static DWORD SpecialEEThreads[10];
+static LONG cnt_SpecialEEThreads = 0;
+
+void dbgOnly_IdentifySpecialEEThread()
+{
+ WRAPPER_NO_CONTRACT;
+
+ LONG ourCount = FastInterlockIncrement(&cnt_SpecialEEThreads);
+
+ _ASSERTE(ourCount < (LONG) NumItems(SpecialEEThreads));
+ SpecialEEThreads[ourCount-1] = ::GetCurrentThreadId();
+}
+
+BOOL dbgOnly_IsSpecialEEThread()
+{
+ WRAPPER_NO_CONTRACT;
+
+ DWORD ourId = ::GetCurrentThreadId();
+
+ for (LONG i=0; i<cnt_SpecialEEThreads; i++)
+ if (ourId == SpecialEEThreads[i])
+ return TRUE;
+
+ // If we have an EE thread doing helper thread duty, then it is temporarily
+ // 'special' too.
+ #ifdef DEBUGGING_SUPPORTED
+ if (g_pDebugInterface)
+ {
+ //<TODO>We probably should use Thread::GetThreadId</TODO>
+ DWORD helperID = g_pDebugInterface->GetHelperThreadID();
+ if (helperID == ourId)
+ return TRUE;
+ }
+ #endif
+
+ //<TODO>Clean this up</TODO>
+ if (GetThread() == NULL)
+ return TRUE;
+
+
+ return FALSE;
+}
+
+#endif // _DEBUG
+
+
+// There is an MDA which can detect illegal reentrancy into the CLR. For instance, if you call managed
+// code from a native vectored exception handler, this might cause a reverse PInvoke to occur. But if the
+// exception was triggered from code that was executing in cooperative GC mode, we now have GC holes and
+// general corruption.
+#ifdef MDA_SUPPORTED
+NOINLINE BOOL HasIllegalReentrancyRare()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ ENTRY_POINT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Thread *pThread = GetThread();
+ if (pThread == NULL || !pThread->PreemptiveGCDisabled())
+ return FALSE;
+
+ BEGIN_ENTRYPOINT_VOIDRET;
+ MDA_TRIGGER_ASSISTANT(Reentrancy, ReportViolation());
+ END_ENTRYPOINT_VOIDRET;
+ return TRUE;
+}
+#endif
+
+// Actually fire the Reentrancy probe, if warranted.
+BOOL HasIllegalReentrancy()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ ENTRY_POINT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef MDA_SUPPORTED
+ if (NULL == MDA_GET_ASSISTANT(Reentrancy))
+ return FALSE;
+ return HasIllegalReentrancyRare();
+#else
+ return FALSE;
+#endif // MDA_SUPPORTED
+}
+
+
+#endif // #ifndef DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+
+void
+STATIC_DATA::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ WRAPPER_NO_CONTRACT;
+
+ DAC_ENUM_STHIS(STATIC_DATA);
+}
+
+void
+Thread::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ WRAPPER_NO_CONTRACT;
+
+ DAC_ENUM_VTHIS();
+ if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE)
+ {
+ if (m_pDomain.IsValid())
+ {
+ m_pDomain->EnumMemoryRegions(flags, true);
+ }
+
+ if (m_Context.IsValid())
+ {
+ m_Context->EnumMemoryRegions(flags);
+ }
+ }
+
+ if (m_debuggerFilterContext.IsValid())
+ {
+ m_debuggerFilterContext.EnumMem();
+ }
+
+ OBJECTHANDLE_EnumMemoryRegions(m_LastThrownObjectHandle);
+
+ m_ExceptionState.EnumChainMemoryRegions(flags);
+
+ // Like the old thread static implementation, we only enumerate
+ // the current TLB. Should we be enumerating all of the TLBs?
+ if (m_pThreadLocalBlock.IsValid())
+ m_pThreadLocalBlock->EnumMemoryRegions(flags);
+
+ if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE)
+ {
+
+ //
+ // Allow all of the frames on the stack to enumerate
+ // their memory.
+ //
+
+ PTR_Frame frame = m_pFrame;
+ while (frame.IsValid() &&
+ frame.GetAddr() != dac_cast<TADDR>(FRAME_TOP))
+ {
+ frame->EnumMemoryRegions(flags);
+ frame = frame->m_Next;
+ }
+ }
+
+ //
+ // Try and do a stack trace and save information
+ // for each part of the stack. This is very vulnerable
+ // to memory problems so ignore all exceptions here.
+ //
+
+ CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED
+ (
+ EnumMemoryRegionsWorker(flags);
+ );
+}
+
+void
+Thread::EnumMemoryRegionsWorker(CLRDataEnumMemoryFlags flags)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (IsUnstarted())
+ {
+ return;
+ }
+
+ T_CONTEXT context;
+ BOOL DacGetThreadContext(Thread* thread, T_CONTEXT* context);
+ REGDISPLAY regDisp;
+ StackFrameIterator frameIter;
+
+ TADDR previousSP = 0; //start at zero; this allows first check to always succeed.
+ TADDR currentSP;
+
+ // Init value. The Limit itself is not legal, so move one target pointer size to the smallest-magnitude
+ // legal address.
+ currentSP = dac_cast<TADDR>(m_CacheStackLimit) + sizeof(TADDR);
+
+ if (GetFilterContext())
+ {
+ context = *GetFilterContext();
+ }
+ else
+ {
+ DacGetThreadContext(this, &context);
+ }
+
+ FillRegDisplay(&regDisp, &context);
+ frameIter.Init(this, NULL, &regDisp, 0);
+ while (frameIter.IsValid())
+ {
+ //
+ // There are identical stack pointer checking semantics in code:ClrDataAccess::EnumMemWalkStackHelper
+ // You ***MUST*** maintain identical semantics for both checks!
+ //
+
+ // Before we continue, we should check to be sure we have a valid
+ // stack pointer. This is to prevent stacks that are not walked
+ // properly due to
+ // a) stack corruption bugs
+ // b) bad stack walks
+ // from continuing on indefinitely.
+ //
+ // We will force SP to strictly increase.
+ // this check can only happen for real stack frames (i.e. not for explicit frames that don't update the RegDisplay)
+ // for ia64, SP may be equal, but in this case BSP must strictly decrease.
+ // We will force SP to be properly aligned.
+ // We will force SP to be in the correct range.
+ //
+ if (frameIter.GetFrameState() == StackFrameIterator::SFITER_FRAMELESS_METHOD)
+ {
+ // This check cannot be applied to explicit frames; they may not move the SP at all.
+ // Also, a single function can push several on the stack at a time with no guarantees about
+ // ordering so we can't check that the addresses of the explicit frames are monotonically increasing.
+ // There is the potential that the walk will not terminate if a set of explicit frames reference
+ // each other circularly. While we could choose a limit for the number of explicit frames allowed
+ // in a row like the total stack size/pointer size, we have no known problems with this scenario.
+ // Thus for now we ignore it.
+ currentSP = (TADDR)GetRegdisplaySP(&regDisp);
+
+ if (currentSP <= previousSP)
+ {
+ _ASSERTE(!"Target stack has been corrupted, SP for current frame must be larger than previous frame.");
+ break;
+ }
+ }
+
+ // On windows desktop, the stack pointer should be a multiple
+ // of pointer-size-aligned in the target address space
+ if (currentSP % sizeof(TADDR) != 0)
+ {
+ _ASSERTE(!"Target stack has been corrupted, SP must be aligned.");
+ break;
+ }
+
+ if (!IsAddressInStack(currentSP))
+ {
+ _ASSERTE(!"Target stack has been corrupted, SP must in in the stack range.");
+ break;
+ }
+
+ // Enumerate the code around the call site to help debugger stack walking heuristics
+ PCODE callEnd = GetControlPC(&regDisp);
+ DacEnumCodeForStackwalk(callEnd);
+
+ if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE)
+ {
+ if (frameIter.m_crawl.GetAppDomain())
+ {
+ frameIter.m_crawl.GetAppDomain()->EnumMemoryRegions(flags, true);
+ }
+ }
+
+ // To stackwalk through funceval frames, we need to be sure to preserve the
+ // DebuggerModule's m_pRuntimeDomainFile. This is the only case that doesn't use the current
+ // vmDomainFile in code:DacDbiInterfaceImpl::EnumerateInternalFrames. The following
+ // code mimics that function.
+ // Allow failure, since we want to continue attempting to walk the stack regardless of the outcome.
+ EX_TRY
+ {
+ if ((frameIter.GetFrameState() == StackFrameIterator::SFITER_FRAME_FUNCTION) ||
+ (frameIter.GetFrameState() == StackFrameIterator::SFITER_SKIPPED_FRAME_FUNCTION))
+ {
+ Frame * pFrame = frameIter.m_crawl.GetFrame();
+ g_pDebugInterface->EnumMemoryRegionsIfFuncEvalFrame(flags, pFrame);
+ }
+ }
+ EX_CATCH_RETHROW_ONLY_COR_E_OPERATIONCANCELLED
+
+ MethodDesc* pMD = frameIter.m_crawl.GetFunction();
+ if (pMD != NULL)
+ {
+ pMD->EnumMemoryRegions(flags);
+#if defined(WIN64EXCEPTIONS) && defined(FEATURE_PREJIT)
+ // Enumerate unwind info
+ // Note that we don't do this based on the MethodDesc because in theory there isn't a 1:1 correspondence
+ // between MethodDesc and code (and so unwind info, and even debug info). Eg., EnC creates new versions
+ // of the code, but the MethodDesc always points at the latest version (which isn't necessarily
+ // the one on the stack). In practice this is unlikely to be a problem since wanting a minidump
+ // and making EnC edits are usually mutually exclusive.
+ if (frameIter.m_crawl.IsFrameless())
+ {
+ frameIter.m_crawl.GetJitManager()->EnumMemoryRegionsForMethodUnwindInfo(flags, frameIter.m_crawl.GetCodeInfo());
+ }
+#endif // defined(WIN64EXCEPTIONS) && defined(FEATURE_PREJIT)
+ }
+
+ previousSP = currentSP;
+
+ if (frameIter.Next() != SWA_CONTINUE)
+ {
+ break;
+ }
+ }
+}
+
+void
+ThreadStore::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ WRAPPER_NO_CONTRACT;
+
+ // This will write out the context of the s_pThreadStore. ie
+ // just the pointer
+ //
+ s_pThreadStore.EnumMem();
+ if (s_pThreadStore.IsValid())
+ {
+ // write out the whole ThreadStore structure
+ DacEnumHostDPtrMem(s_pThreadStore);
+
+ // The thread list may be corrupt, so just
+ // ignore exceptions during enumeration.
+ EX_TRY
+ {
+ Thread* thread = s_pThreadStore->m_ThreadList.GetHead();
+ LONG dwNumThreads = s_pThreadStore->m_ThreadCount;
+
+ for (LONG i = 0; (i < dwNumThreads) && (thread != NULL); i++)
+ {
+ // Even if this thread is totally broken and we can't enum it, struggle on.
+ // If we do not, we will leave this loop and not enum stack memory for any further threads.
+ CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED(
+ thread->EnumMemoryRegions(flags);
+ );
+ thread = s_pThreadStore->m_ThreadList.GetNext(thread);
+ }
+ }
+ EX_CATCH_RETHROW_ONLY_COR_E_OPERATIONCANCELLED
+ }
+}
+
+#endif // #ifdef DACCESS_COMPILE
+
+
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+// For the purposes of tracking resource usage we implement a simple cpu resource usage counter on each
+// thread. Every time QueryThreadProcessorUsage() is invoked it returns the amount of cpu time (a combination
+// of user and kernel mode time) used since the last call to QueryThreadProcessorUsage(). The result is in 100
+// nanosecond units.
+ULONGLONG Thread::QueryThreadProcessorUsage()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Get current values for the amount of kernel and user time used by this thread over its entire lifetime.
+ FILETIME sCreationTime, sExitTime, sKernelTime, sUserTime;
+ HANDLE hThread = GetThreadHandle();
+ BOOL fResult = GetThreadTimes(hThread,
+ &sCreationTime,
+ &sExitTime,
+ &sKernelTime,
+ &sUserTime);
+ if (!fResult)
+ {
+#ifdef _DEBUG
+ ULONG error = GetLastError();
+ printf("GetThreadTimes failed: %d; handle is %p\n", error, hThread);
+ _ASSERTE(FALSE);
+#endif
+ return 0;
+ }
+
+ // Combine the user and kernel times into a single value (FILETIME is just a structure representing an
+ // unsigned int64 in two 32-bit pieces).
+ _ASSERTE(sizeof(FILETIME) == sizeof(UINT64));
+ ULONGLONG ullCurrentUsage = *(ULONGLONG*)&sKernelTime + *(ULONGLONG*)&sUserTime;
+
+ // Store the current processor usage as the new baseline, and retrieve the previous usage.
+ ULONGLONG ullPreviousUsage = VolatileLoad(&m_ullProcessorUsageBaseline);
+ if (ullPreviousUsage >= ullCurrentUsage ||
+ ullPreviousUsage != (ULONGLONG)InterlockedCompareExchange64(
+ (LONGLONG*)&m_ullProcessorUsageBaseline,
+ (LONGLONG)ullCurrentUsage,
+ (LONGLONG)ullPreviousUsage))
+ {
+ // another thread beat us to it, and already reported this usage.
+ return 0;
+ }
+
+ // The result is the difference between this value and the previous usage value.
+ return ullCurrentUsage - ullPreviousUsage;
+}
+#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
diff --git a/src/vm/threads.h b/src/vm/threads.h
new file mode 100644
index 0000000000..d4a41be367
--- /dev/null
+++ b/src/vm/threads.h
@@ -0,0 +1,7792 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// THREADS.H -
+//
+
+
+//
+//
+// Currently represents a logical and physical COM+ thread. Later, these concepts will be separated.
+//
+
+//
+// #RuntimeThreadLocals.
+//
+// Windows has a feature call Thread Local Storage (TLS, which is data that the OS allocates every time it
+// creates a thread). Programs access this storage by using the Windows TlsAlloc, TlsGetValue, TlsSetValue
+// APIs (see http://msdn2.microsoft.com/en-us/library/ms686812.aspx). The runtime allocates two such slots
+// for its use
+//
+// * A slot that holds a pointer to the runtime thread object code:Thread (see code:#ThreadClass). The
+// runtime has a special optimized version of this helper code:GetThread (we actually emit assembly
+// code on the fly so it is as fast as possible). These code:Thread objects live in the
+// code:ThreadStore.
+//
+// * The other slot holds the current code:AppDomain (a managed equivalent of a process). The
+// runtime thread object also has a pointer to the thread's AppDomain (see code:Thread.m_pDomain,
+// so in theory this TLS is redundant. It is there for speed (one less pointer indirection). The
+// optimized helper for this is code:GetAppDomain (we emit assembly code on the fly for this one
+// too).
+//
+// Initially these TLS slots are empty (when the OS starts up), however before we run managed code, we must
+// set them properly so that managed code knows what AppDomain it is in and we can suspend threads properly
+// for a GC (see code:#SuspendingTheRuntime)
+//
+// #SuspendingTheRuntime
+//
+// One of the primary differences between runtime code (managed code), and traditional (unmanaged code) is
+// the existence of the GC heap (see file:gc.cpp#Overview). For the GC to do its job, it must be able to
+// traverse all references to the GC heap, including ones on the stack of every thread, as well as any in
+// hardware registers. While it is simple to state this requirement, it has long reaching effects, because
+// properly accounting for all GC heap references ALL the time turns out to be quite hard. When we make a
+// bookkeeping mistake, a GC reference is not reported at GC time, which means it will not be updated when the
+// GC happens. Since memory in the GC heap can move, this can cause the pointer to point at 'random' places
+// in the GC heap, causing data corruption. This is a 'GC Hole', and is very bad. We have special modes (see
+// code:EEConfig.GetGCStressLevel) called GCStress to help find such issues.
+//
+// In order to find all GC references on the stacks we need insure that no thread is manipulating a GC
+// reference at the time of the scan. This is the job of code:Thread.SuspendRuntime. Logically it suspends
+// every thread in the process. Unfortunately it can not literally simply call the OS SuspendThread API on
+// all threads. The reason is that the other threads MIGHT hold important locks (for example there is a lock
+// that is taken when unmanaged heap memory is requested, or when a DLL is loaded). In general process
+// global structures in the OS will be protected by locks, and if you suspend a thread it might hold that
+// lock. If you happen to need that OS service (eg you might need to allocated unmanaged memory), then
+// deadlock will occur (as you wait on the suspended thread, that never wakes up).
+//
+// Luckily, we don't need to actually suspend the threads, we just need to insure that all GC references on
+// the stack are stable. This is where the concept of cooperative mode and preemptive mode (a bad name) come
+// from.
+//
+// #CooperativeMode
+//
+// The runtime keeps a table of all threads that have ever run managed code in the code:ThreadStore table.
+// The ThreadStore table holds a list of Thread objects (see code:#ThreadClass). This object holds all
+// infomation about managed threads. Cooperative mode is defined as the mode the thread is in when the field
+// code:Thread.m_fPreemptiveGCDisabled is non-zero. When this field is zero the thread is said to be in
+// Preemptive mode (named because if you preempt the thread in this mode, it is guaranteed to be in a place
+// where a GC can occur).
+//
+// When a thread is in cooperative mode, it is basically saying that it is potentially modifying GC
+// references, and so the runtime must Cooperate with it to get to a 'GC Safe' location where the GC
+// references can be enumerated. This is the mode that a thread is in MOST times when it is running managed
+// code (in fact if the EIP is in JIT compiled code, there is only one place where you are NOT in cooperative
+// mode (Inlined PINVOKE transition code)). Conversely, any time non-runtime unmanaged code is running, the
+// thread MUST NOT be in cooperative mode (you risk deadlock otherwise). Only code in mscorwks.dll might be
+// running in either cooperative or preemptive mode.
+//
+// It is easier to describe the invariant associated with being in Preemptive mode. When the thread is in
+// preemptive mode (when code:Thread.m_fPreemptiveGCDisabled is zero), the thread guarantees two things
+//
+// * That it not currently running code that manipulates GC references.
+// * That it has set the code:Thread.m_pFrame pointer in the code:Thread to be a subclass of the class
+// code:Frame which marks the location on the stack where the last managed method frame is. This
+// allows the GC to start crawling the stack from there (essentially skip over the unmanaged frames).
+// * That the thread will not reenter managed code if the global variable code:g_TrapReturningThreads is
+// set (it will call code:Thread.RareDisablePreemptiveGC first which will block if a a suspension is
+// in progress)
+//
+// The basic idea is that the suspension logic in code:Thread.SuspendRuntime first sets the global variable
+// code:g_TrapReturningThreads and then checks if each thread in the ThreadStore is in Cooperative mode. If a
+// thread is NOT in cooperative mode, the logic simply skips the thread, because it knows that the thread
+// will stop itself before reentering managed code (because code:g_TrapReturningThreads is set). This avoids
+// the deadlock problem mentioned earlier, because threads that are running unmanaged code are allowed to
+// run. Enumeration of GC references starts at the first managed frame (pointed at by code:Thread.m_pFrame).
+//
+// When a thread is in cooperative mode, it means that GC references might be being manipulated. There are
+// two important possibilities
+//
+// * The CPU is running JIT compiled code
+// * The CPU is running code elsewhere (which should only be in mscorwks.dll, because everywhere else a
+// transition to preemptive mode should have happened first)
+//
+// * #PartiallyInteruptibleCode
+// * #FullyInteruptibleCode
+//
+// If the Instruction pointer (x86/x64: EIP, ARM: R15/PC) is in JIT compiled code, we can detect this because we have tables that
+// map the ranges of every method back to their code:MethodDesc (this the code:ICodeManager interface). In
+// addition to knowing the method, these tables also point at 'GCInfo' that tell for that method which stack
+// locations and which registers hold GC references at any particular instruction pointer. If the method is
+// what is called FullyInterruptible, then we have information for any possible instruction pointer in the
+// method and we can simply stop the thread (however we have to do this carefully TODO explain).
+//
+// However for most methods, we only keep GC information for paticular EIP's, in particular we keep track of
+// GC reference liveness only at call sites. Thus not every location is 'GC Safe' (that is we can enumerate
+// all references, but must be 'driven' to a GC safe location).
+//
+// We drive threads to GC safe locations by hijacking. This is a term for updating the return address on the
+// stack so that we gain control when a method returns. If we find that we are in JITTed code but NOT at a GC
+// safe location, then we find the return address for the method and modfiy it to cause the runtime to stop.
+// We then let the method run. Hopefully the method quickly returns, and hits our hijack, and we are now at a
+// GC-safe location (all call sites are GC-safe). If not we repeat the procedure (possibly moving the
+// hijack). At some point a method returns, and we get control. For methods that have loops that don't make
+// calls, we are forced to make the method FullyInterruptible, so we can be sure to stop the mehod.
+//
+// This leaves only the case where we are in cooperative modes, but not in JIT compiled code (we should be in
+// clr.dll). In this case we simply let the thread run. The idea is that code in clr.dll makes the
+// promise that it will not do ANYTHING that will block (which includes taking a lock), while in cooperative
+// mode, or do anything that might take a long time without polling to see if a GC is needed. Thus this code
+// 'cooperates' to insure that GCs can happen in a timely fashion.
+//
+// If you need to switch the GC mode of the current thread, look for the GCX_COOP() and GCX_PREEMP() macros.
+//
+
+#ifndef __threads_h__
+#define __threads_h__
+
+#include "vars.hpp"
+#include "util.hpp"
+#include "eventstore.hpp"
+#include "argslot.h"
+#include "context.h"
+#include "regdisp.h"
+#include "mscoree.h"
+#include "appdomainstack.h"
+#include "gc.h"
+#include <clrhost.h>
+
+class Thread;
+class ThreadStore;
+class MethodDesc;
+struct PendingSync;
+class AppDomain;
+class NDirect;
+class Frame;
+class ThreadBaseObject;
+class AppDomainStack;
+class LoadLevelLimiter;
+class DomainFile;
+class DeadlockAwareLock;
+struct HelperMethodFrameCallerList;
+class ThreadLocalIBCInfo;
+class EECodeInfo;
+class DebuggerPatchSkip;
+class MethodCallGraphPreparer;
+class FaultingExceptionFrame;
+class ContextTransitionFrame;
+enum BinderMethodID : int;
+class CRWLock;
+struct LockEntry;
+class PendingTypeLoadHolder;
+
+struct ThreadLocalBlock;
+typedef DPTR(struct ThreadLocalBlock) PTR_ThreadLocalBlock;
+typedef DPTR(PTR_ThreadLocalBlock) PTR_PTR_ThreadLocalBlock;
+
+#include "stackwalktypes.h"
+#include "log.h"
+#include "stackingallocator.h"
+#include "excep.h"
+#include "synch.h"
+#include "exstate.h"
+#include "threaddebugblockinginfo.h"
+#include "interoputil.h"
+#include "eventtrace.h"
+
+#ifdef CROSSGEN_COMPILE
+
+#include "asmconstants.h"
+
+class Thread
+{
+ friend class ThreadStatics;
+
+ PTR_ThreadLocalBlock m_pThreadLocalBlock;
+ PTR_PTR_ThreadLocalBlock m_pTLBTable;
+ SIZE_T m_TLBTableSize;
+
+public:
+ BOOL IsAddressInStack (PTR_VOID addr) const { return TRUE; }
+
+ Frame *IsRunningIn(AppDomain* pDomain, int *count) { return NULL; }
+
+ StackingAllocator m_MarshalAlloc;
+
+private:
+ MethodCallGraphPreparer * m_pCerPreparationState;
+
+public:
+ MethodCallGraphPreparer * GetCerPreparationState()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pCerPreparationState;
+ }
+
+ void SetCerPreparationState(MethodCallGraphPreparer * pCerPreparationState)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pCerPreparationState = pCerPreparationState;
+ }
+
+ private:
+ LoadLevelLimiter *m_pLoadLimiter;
+
+ public:
+ LoadLevelLimiter *GetLoadLevelLimiter()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pLoadLimiter;
+ }
+
+ void SetLoadLevelLimiter(LoadLevelLimiter *limiter)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pLoadLimiter = limiter;
+ }
+
+ PTR_Frame GetFrame() { return NULL; }
+ DWORD CatchAtSafePoint() { return 0; }
+ DWORD CatchAtSafePointOpportunistic() { return 0; }
+
+ static void ObjectRefProtected(const OBJECTREF* ref) { }
+ static void ObjectRefNew(const OBJECTREF* ref) { }
+
+ static void ReverseLeaveRuntime();
+ static void __stdcall EnterRuntime();
+
+ static void BeginThreadAffinity() { }
+ static void EndThreadAffinity() { }
+
+ void EnablePreemptiveGC() { }
+ void DisablePreemptiveGC() { }
+
+ inline void IncLockCount() { }
+ inline void DecLockCount() { }
+
+ void EnterContextRestricted(Context* c, ContextTransitionFrame* pFrame) { }
+
+ static LPVOID GetStaticFieldAddress(FieldDesc *pFD) { return NULL; }
+
+ PTR_AppDomain GetDomain() { return ::GetAppDomain(); }
+
+ DWORD GetThreadId() { return 0; }
+
+ inline DWORD GetOverridesCount() { return 0; }
+ inline BOOL CheckThreadWideSpecialFlag(DWORD flags) { return 0; }
+
+ BOOL PreemptiveGCDisabled() { return false; }
+ void PulseGCMode() { }
+
+ OBJECTREF GetThrowable() { return NULL; }
+
+ OBJECTREF LastThrownObject() { return NULL; }
+
+ static BOOL Debug_AllowCallout() { return TRUE; }
+
+ static void IncForbidSuspendThread() { }
+ static void DecForbidSuspendThread() { }
+
+ // The ForbidSuspendThreadHolder is used during the initialization of the stack marker infrastructure so
+ // it can't do any backout stack validation (which is why we pass in VALIDATION_TYPE=HSV_NoValidation).
+ typedef StateHolder<Thread::IncForbidSuspendThread, Thread::DecForbidSuspendThread, HSV_NoValidation> ForbidSuspendThreadHolder;
+
+ static BYTE GetOffsetOfCurrentFrame()
+ {
+ LIMITED_METHOD_CONTRACT;
+ size_t ofs = Thread_m_pFrame;
+ _ASSERTE(FitsInI1(ofs));
+ return (BYTE)ofs;
+ }
+
+ static BYTE GetOffsetOfGCFlag()
+ {
+ LIMITED_METHOD_CONTRACT;
+ size_t ofs = Thread_m_fPreemptiveGCDisabled;
+ _ASSERTE(FitsInI1(ofs));
+ return (BYTE)ofs;
+ }
+
+ void SetLoadingFile(DomainFile *pFile)
+ {
+ }
+
+ typedef Holder<Thread *, DoNothing, DoNothing> LoadingFileHolder;
+
+ enum ThreadState
+ {
+ TS_YieldRequested = 0x00000040, // The task should yield
+ };
+
+ BOOL HasThreadState(ThreadState ts)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ((DWORD)m_State & ts);
+ }
+
+ BOOL HasThreadStateOpportunistic(ThreadState ts)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_State.LoadWithoutBarrier() & ts;
+ }
+
+ Volatile<ThreadState> m_State;
+
+ enum ThreadStateNoConcurrency
+ {
+ TSNC_OwnsSpinLock = 0x00000400, // The thread owns a spinlock.
+
+ TSNC_DisableOleaut32Check = 0x00040000, // Disable oleaut32 delay load check. Oleaut32 has
+ // been loaded
+
+ TSNC_LoadsTypeViolation = 0x40000000, // Use by type loader to break deadlocks caused by type load level ordering violations
+ };
+
+ ThreadStateNoConcurrency m_StateNC;
+
+ void SetThreadStateNC(ThreadStateNoConcurrency tsnc)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_StateNC = (ThreadStateNoConcurrency)((DWORD)m_StateNC | tsnc);
+ }
+
+ void ResetThreadStateNC(ThreadStateNoConcurrency tsnc)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_StateNC = (ThreadStateNoConcurrency)((DWORD)m_StateNC & ~tsnc);
+ }
+
+ BOOL HasThreadStateNC(ThreadStateNoConcurrency tsnc)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return ((DWORD)m_StateNC & tsnc);
+ }
+
+ PendingTypeLoadHolder* m_pPendingTypeLoad;
+
+#ifndef DACCESS_COMPILE
+ PendingTypeLoadHolder* GetPendingTypeLoad()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pPendingTypeLoad;
+ }
+
+ void SetPendingTypeLoad(PendingTypeLoadHolder* pPendingTypeLoad)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pPendingTypeLoad = pPendingTypeLoad;
+ }
+#endif
+
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+ enum ApartmentState { AS_Unknown };
+#endif
+
+#if defined(FEATURE_COMINTEROP) && defined(MDA_SUPPORTED)
+ void RegisterRCW(RCW *pRCW)
+ {
+ }
+
+ BOOL RegisterRCWNoThrow(RCW *pRCW)
+ {
+ return FALSE;
+ }
+
+ RCW *UnregisterRCW(INDEBUG(SyncBlock *pSB))
+ {
+ return NULL;
+ }
+#endif
+
+ DWORD m_dwLastError;
+};
+
+inline void DoReleaseCheckpoint(void *checkPointMarker)
+{
+ WRAPPER_NO_CONTRACT;
+ GetThread()->m_MarshalAlloc.Collapse(checkPointMarker);
+}
+
+// CheckPointHolder : Back out to a checkpoint on the thread allocator.
+typedef Holder<void*, DoNothing,DoReleaseCheckpoint> CheckPointHolder;
+
+class AVInRuntimeImplOkayHolder
+{
+public:
+ AVInRuntimeImplOkayHolder()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+ AVInRuntimeImplOkayHolder(Thread * pThread)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+ ~AVInRuntimeImplOkayHolder()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+};
+
+class LeaveRuntimeHolder
+{
+public:
+ template <typename T>
+ LeaveRuntimeHolder(T target)
+ {
+ STATIC_CONTRACT_LIMITED_METHOD;
+ }
+};
+
+class LeaveRuntimeHolderNoThrow
+{
+public:
+ template <typename T>
+ LeaveRuntimeHolderNoThrow(T target)
+ {
+ STATIC_CONTRACT_LIMITED_METHOD;
+ }
+
+ HRESULT GetHR() const
+ {
+ STATIC_CONTRACT_LIMITED_METHOD;
+ return S_OK;
+ }
+};
+
+inline BOOL dbgOnly_IsSpecialEEThread() { return FALSE; }
+
+#define INCTHREADLOCKCOUNT() { }
+#define DECTHREADLOCKCOUNT() { }
+#define INCTHREADLOCKCOUNTTHREAD(thread) { }
+#define DECTHREADLOCKCOUNTTHREAD(thread) { }
+
+#define FORBIDGC_LOADER_USE_ENABLED() false
+#define ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE() ;
+
+#define BEGIN_FORBID_TYPELOAD()
+#define END_FORBID_TYPELOAD()
+#define TRIGGERS_TYPELOAD()
+
+#define TRIGGERSGC() ANNOTATION_GC_TRIGGERS
+
+inline void CommonTripThread() { }
+
+//current ad, always safe
+#define ADV_CURRENTAD 0
+//default ad, never unloaded
+#define ADV_DEFAULTAD 1
+// held by iterator, iterator holds a ref
+#define ADV_ITERATOR 2
+// the appdomain is on the stack
+#define ADV_RUNNINGIN 4
+// we're in process of creating the appdomain, refcount guaranteed to be >0
+#define ADV_CREATING 8
+// compilation domain - ngen guarantees it won't be unloaded until everyone left
+#define ADV_COMPILATION 0x10
+// finalizer thread - synchronized with ADU
+#define ADV_FINALIZER 0x40
+// adu thread - cannot race with itself
+#define ADV_ADUTHREAD 0x80
+// held by AppDomainRefTaker
+#define ADV_REFTAKER 0x100
+
+#define CheckADValidity(pDomain,ADValidityKind) { }
+
+#define ENTER_DOMAIN_PTR(_pDestDomain,ADValidityKind) {
+#define END_DOMAIN_TRANSITION }
+
+class DeadlockAwareLock
+{
+public:
+ DeadlockAwareLock(const char *description = NULL) { }
+ ~DeadlockAwareLock() { }
+
+ BOOL CanEnterLock() { return TRUE; }
+
+ BOOL TryBeginEnterLock() { return TRUE; }
+ void BeginEnterLock() { }
+
+ void EndEnterLock() { }
+
+ void LeaveLock() { }
+
+public:
+ typedef StateHolder<DoNothing,DoNothing> BlockingLockHolder;
+};
+
+// Do not include threads.inl
+#define _THREADS_INL
+
+typedef Thread::ForbidSuspendThreadHolder ForbidSuspendThreadHolder;
+
+#else // CROSSGEN_COMPILE
+
+#ifdef _TARGET_ARM_
+#include "armsinglestepper.h"
+#endif
+
+#if !defined(PLATFORM_SUPPORTS_SAFE_THREADSUSPEND)
+// DISABLE_THREADSUSPEND controls whether Thread::SuspendThread will be used at all.
+// This API is dangerous on non-Windows platforms, as it can lead to deadlocks,
+// due to low level OS resources that the PAL is not aware of, or due to the fact that
+// PAL-unaware code in the process may hold onto some OS resources.
+#define DISABLE_THREADSUSPEND
+#endif
+
+// NT thread priorities range from -15 to +15.
+#define INVALID_THREAD_PRIORITY ((DWORD)0x80000000)
+
+// For a fiber which switched out, we set its OSID to a special number
+// Note: there's a copy of this macro in strike.cpp
+#define SWITCHED_OUT_FIBER_OSID 0xbaadf00d;
+
+#ifdef _DEBUG
+// A thread doesn't recieve its id until fully constructed.
+#define UNINITIALIZED_THREADID 0xbaadf00d
+#endif //_DEBUG
+
+// Capture all the synchronization requests, for debugging purposes
+#if defined(_DEBUG) && defined(TRACK_SYNC)
+
+// Each thread has a stack that tracks all enter and leave requests
+struct Dbg_TrackSync
+{
+ virtual void EnterSync (UINT_PTR caller, void *pAwareLock) = 0;
+ virtual void LeaveSync (UINT_PTR caller, void *pAwareLock) = 0;
+};
+
+EXTERN_C void EnterSyncHelper (UINT_PTR caller, void *pAwareLock);
+EXTERN_C void LeaveSyncHelper (UINT_PTR caller, void *pAwareLock);
+
+#endif // TRACK_SYNC
+
+#ifdef FEATURE_HIJACK
+//****************************************************************************************
+// This is the type of the start function of a redirected thread pulled from a
+// HandledJITCase during runtime suspension
+typedef void (__stdcall *PFN_REDIRECTTARGET)();
+
+// Used to capture information about the state of execution of a *SUSPENDED*
+// thread.
+struct ExecutionState;
+
+// Describes the weird argument sets during hijacking
+struct HijackArgs;
+#endif // FEATURE_HIJACK
+
+//***************************************************************************
+
+#ifdef ENABLE_CONTRACTS_IMPL
+inline Thread* GetThreadNULLOk()
+{
+ LIMITED_METHOD_CONTRACT;
+ Thread * pThread;
+ BEGIN_GETTHREAD_ALLOWED_IN_NO_THROW_REGION;
+ pThread = GetThread();
+ END_GETTHREAD_ALLOWED_IN_NO_THROW_REGION;
+ return pThread;
+}
+#else
+#define GetThreadNULLOk() GetThread()
+#endif
+
+//***************************************************************************
+#if defined(_DEBUG) && defined(_TARGET_X86_) && !defined(FEATURE_CORECLR)
+ #define HAS_TRACK_CXX_EXCEPTION_CODE_HACK 1
+ #define TRACK_CXX_EXCEPTION_CODE_HACK
+#else
+ #define HAS_TRACK_CXX_EXCEPTION_CODE_HACK 0
+#endif
+
+// manifest constant for waiting in the exposed classlibs
+const INT32 INFINITE_TIMEOUT = -1;
+
+/***************************************************************************/
+// Public enum shared between thread and threadpool
+// These are two kinds of threadpool thread that the threadpool mgr needs
+// to keep track of
+enum ThreadpoolThreadType
+{
+ WorkerThread,
+ CompletionPortThread,
+ WaitThread,
+ TimerMgrThread
+};
+//***************************************************************************
+// Public functions
+//
+// Thread* GetThread() - returns current Thread
+// Thread* SetupThread() - creates new Thread.
+// Thread* SetupUnstartedThread() - creates new unstarted Thread which
+// (obviously) isn't in a TLS.
+// void DestroyThread() - the underlying logical thread is going
+// away.
+// void DetachThread() - the underlying logical thread is going
+// away but we don't want to destroy it yet.
+//
+// Public functions for ASM code generators
+//
+// int GetThreadTLSIndex() - returns TLS index used to point to Thread
+// int GetAppDomainTLSIndex() - returns TLS index used to point to AppDomain
+// Thread* __stdcall CreateThreadBlockThrow() - creates new Thread on reverse p-invoke
+//
+// Public functions for one-time init/cleanup
+//
+// void InitThreadManager() - onetime init
+// void TerminateThreadManager() - onetime cleanup
+//
+// Public functions for taking control of a thread at a safe point
+//
+// VOID OnHijackObjectTripThread() - we've hijacked a JIT object-ref return
+// VOID OnHijackScalarTripThread() - we've hijacked a JIT non-object ref return
+//
+//***************************************************************************
+
+
+//***************************************************************************
+// Public functions
+//***************************************************************************
+
+//---------------------------------------------------------------------------
+//
+//---------------------------------------------------------------------------
+Thread* SetupThread(BOOL fInternal);
+inline Thread* SetupThread()
+{
+ WRAPPER_NO_CONTRACT;
+ return SetupThread(FALSE);
+}
+// A host can deny a thread entering runtime by returning a NULL IHostTask.
+// But we do want threads used by threadpool.
+inline Thread* SetupInternalThread()
+{
+ WRAPPER_NO_CONTRACT;
+ return SetupThread(TRUE);
+}
+Thread* SetupThreadNoThrow(HRESULT *phresult = NULL);
+// WARNING : only GC calls this with bRequiresTSL set to FALSE.
+Thread* SetupUnstartedThread(BOOL bRequiresTSL=TRUE);
+void DestroyThread(Thread *th);
+
+
+FCDECL0(INT32, GetRuntimeId_Wrapper);
+
+//---------------------------------------------------------------------------
+//---------------------------------------------------------------------------
+#ifndef FEATURE_IMPLICIT_TLS
+DWORD GetThreadTLSIndex();
+DWORD GetAppDomainTLSIndex();
+#endif
+
+DWORD GetRuntimeId();
+
+EXTERN_C Thread* __stdcall CreateThreadBlockThrow();
+
+//---------------------------------------------------------------------------
+// One-time initialization. Called during Dll initialization.
+//---------------------------------------------------------------------------
+void InitThreadManager();
+
+
+// When we want to take control of a thread at a safe point, the thread will
+// eventually come back to us in one of the following trip functions:
+
+#ifdef FEATURE_HIJACK
+EXTERN_C void __stdcall OnHijackObjectTripThread(); // hijacked JIT code is returning an objectref
+EXTERN_C void __stdcall OnHijackInteriorPointerTripThread(); // hijacked JIT code is returning a byref
+EXTERN_C void __stdcall OnHijackScalarTripThread(); // hijacked JIT code is returning a non-objectref, non-FP
+#ifdef _TARGET_X86_
+EXTERN_C void __stdcall OnHijackFloatingPointTripThread(); // hijacked JIT code is returning an FP value
+#endif // _TARGET_X86_
+#endif // FEATURE_HIJACK
+
+void CommonTripThread();
+
+
+// When we resume a thread at a new location, to get an exception thrown, we have to
+// pretend the exception originated elsewhere.
+EXTERN_C void ThrowControlForThread(
+#ifdef WIN64EXCEPTIONS
+ FaultingExceptionFrame *pfef
+#endif // WIN64EXCEPTIONS
+ );
+
+
+// RWLock state inside TLS
+struct LockEntry
+{
+ LockEntry *pNext; // next entry
+ LockEntry *pPrev; // prev entry
+ LONG dwULockID;
+ LONG dwLLockID; // owning lock
+ WORD wReaderLevel; // reader nesting level
+};
+
+#if defined(_DEBUG)
+BOOL MatchThreadHandleToOsId ( HANDLE h, DWORD osId );
+#endif
+
+#ifdef FEATURE_COMINTEROP
+
+#define RCW_STACK_SIZE 64
+
+class RCWStack
+{
+public:
+ inline RCWStack()
+ {
+ LIMITED_METHOD_CONTRACT;
+ memset(this, 0, sizeof(RCWStack));
+ }
+
+ inline VOID SetEntry(unsigned int index, RCW* pRCW)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(index < RCW_STACK_SIZE);
+ PRECONDITION(CheckPointer(pRCW, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ m_pList[index] = pRCW;
+ }
+
+ inline RCW* GetEntry(unsigned int index)
+ {
+ CONTRACT (RCW*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(index < RCW_STACK_SIZE);
+ }
+ CONTRACT_END;
+
+ RETURN m_pList[index];
+ }
+
+ inline VOID SetNextStack(RCWStack* pStack)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pStack));
+ PRECONDITION(m_pNext == NULL);
+ }
+ CONTRACTL_END;
+
+ m_pNext = pStack;
+ }
+
+ inline RCWStack* GetNextStack()
+ {
+ CONTRACT (RCWStack*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ RETURN m_pNext;
+ }
+
+private:
+ RCWStack* m_pNext;
+ RCW* m_pList[RCW_STACK_SIZE];
+};
+
+
+class RCWStackHeader
+{
+public:
+ RCWStackHeader()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_iIndex = 0;
+ m_iSize = RCW_STACK_SIZE;
+ m_pHead = new RCWStack();
+ }
+
+ ~RCWStackHeader()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ RCWStack* pStack = m_pHead;
+ RCWStack* pNextStack = NULL;
+
+ while (pStack)
+ {
+ pNextStack = pStack->GetNextStack();
+ delete pStack;
+ pStack = pNextStack;
+ }
+ }
+
+ bool Push(RCW* pRCW)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pRCW, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ if (!GrowListIfNeeded())
+ return false;
+
+ // Fast Path
+ if (m_iIndex < RCW_STACK_SIZE)
+ {
+ m_pHead->SetEntry(m_iIndex, pRCW);
+ m_iIndex++;
+ return true;
+ }
+
+ // Slow Path
+ unsigned int count = m_iIndex;
+ RCWStack* pStack = m_pHead;
+ while (count >= RCW_STACK_SIZE)
+ {
+ pStack = pStack->GetNextStack();
+ _ASSERTE(pStack);
+
+ count -= RCW_STACK_SIZE;
+ }
+
+ pStack->SetEntry(count, pRCW);
+ m_iIndex++;
+ return true;
+ }
+
+ RCW* Pop()
+ {
+ CONTRACT (RCW*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(m_iIndex > 0);
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ RCW* pRCW = NULL;
+
+ m_iIndex--;
+
+ // Fast Path
+ if (m_iIndex < RCW_STACK_SIZE)
+ {
+ pRCW = m_pHead->GetEntry(m_iIndex);
+ m_pHead->SetEntry(m_iIndex, NULL);
+ RETURN pRCW;
+ }
+
+ // Slow Path
+ unsigned int count = m_iIndex;
+ RCWStack* pStack = m_pHead;
+ while (count >= RCW_STACK_SIZE)
+ {
+ pStack = pStack->GetNextStack();
+ _ASSERTE(pStack);
+ count -= RCW_STACK_SIZE;
+ }
+
+ pRCW = pStack->GetEntry(count);
+ pStack->SetEntry(count, NULL);
+
+ RETURN pRCW;
+ }
+
+ BOOL IsInStack(RCW* pRCW)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pRCW));
+ }
+ CONTRACTL_END;
+
+ if (m_iIndex == 0)
+ return FALSE;
+
+ // Fast Path
+ if (m_iIndex <= RCW_STACK_SIZE)
+ {
+ for (int i = 0; i < (int)m_iIndex; i++)
+ {
+ if (pRCW == m_pHead->GetEntry(i))
+ return TRUE;
+ }
+
+ return FALSE;
+ }
+
+ // Slow Path
+ RCWStack* pStack = m_pHead;
+ int totalcount = 0;
+ while (pStack != NULL)
+ {
+ for (int i = 0; (i < RCW_STACK_SIZE) && (totalcount < m_iIndex); i++, totalcount++)
+ {
+ if (pRCW == pStack->GetEntry(i))
+ return TRUE;
+ }
+
+ pStack = pStack->GetNextStack();
+ }
+
+ return FALSE;
+ }
+
+private:
+ bool GrowListIfNeeded()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ PRECONDITION(CheckPointer(m_pHead));
+ }
+ CONTRACTL_END;
+
+ if (m_iIndex == m_iSize)
+ {
+ RCWStack* pStack = m_pHead;
+ RCWStack* pNextStack = NULL;
+ while ( (pNextStack = pStack->GetNextStack()) != NULL)
+ pStack = pNextStack;
+
+ RCWStack* pNewStack = new (nothrow) RCWStack();
+ if (NULL == pNewStack)
+ return false;
+
+ pStack->SetNextStack(pNewStack);
+
+ m_iSize += RCW_STACK_SIZE;
+ }
+
+ return true;
+ }
+
+ // Zero-based index to the first free element in the list.
+ int m_iIndex;
+
+ // Total size of the list, including all stacks.
+ int m_iSize;
+
+ // Pointer to the first stack.
+ RCWStack* m_pHead;
+};
+
+#endif // FEATURE_COMINTEROP
+
+
+typedef DWORD (*AppropriateWaitFunc) (void *args, DWORD timeout, DWORD option);
+
+// The Thread class represents a managed thread. This thread could be internal
+// or external (i.e. it wandered in from outside the runtime). For internal
+// threads, it could correspond to an exposed System.Thread object or it
+// could correspond to an internal worker thread of the runtime.
+//
+// If there's a physical Win32 thread underneath this object (i.e. it isn't an
+// unstarted System.Thread), then this instance can be found in the TLS
+// of that physical thread.
+
+
+#ifdef FEATURE_HIJACK
+EXTERN_C void STDCALL OnHijackObjectWorker(HijackArgs * pArgs);
+EXTERN_C void STDCALL OnHijackInteriorPointerWorker(HijackArgs * pArgs);
+EXTERN_C void STDCALL OnHijackScalarWorker(HijackArgs * pArgs);
+#endif // FEATURE_HIJACK
+
+
+// This is the code we pass around for Thread.Interrupt, mainly for assertions
+#define APC_Code 0xEECEECEE
+
+#ifdef DACCESS_COMPILE
+class BaseStackGuard;
+#endif
+
+// #ThreadClass
+//
+// A code:Thread contains all the per-thread information needed by the runtime. You can get at this
+// structure throught the and OS TLS slot see code:#RuntimeThreadLocals for more
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+class Thread: public ICLRTask2
+#else // !FEATURE_INCLUDE_ALL_INTERFACES
+// Implementing IUnknown would prevent the field (e.g. m_Context) layout from being rearranged (which will need to be fixed in
+// "asmconstants.h" for the respective architecture). As it is, ICLRTask derives from IUnknown and would have got IUnknown implemented
+// here - so doing this explicitly and maintaining layout sanity should be just fine.
+class Thread: public IUnknown
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+{
+ friend struct ThreadQueue; // used to enqueue & dequeue threads onto SyncBlocks
+ friend class ThreadStore;
+ friend class ThreadSuspend;
+ friend class SyncBlock;
+ friend class Context;
+ friend struct PendingSync;
+ friend class AppDomain;
+ friend class ThreadNative;
+ friend class DeadlockAwareLock;
+#ifdef _DEBUG
+ friend class EEContract;
+#endif
+#ifdef DACCESS_COMPILE
+ friend class ClrDataAccess;
+ friend class ClrDataTask;
+#endif
+
+ friend BOOL NTGetThreadContext(Thread *pThread, T_CONTEXT *pContext);
+ friend BOOL NTSetThreadContext(Thread *pThread, const T_CONTEXT *pContext);
+
+ friend void CommonTripThread();
+
+#ifdef FEATURE_HIJACK
+ // MapWin32FaultToCOMPlusException needs access to Thread::IsAddrOfRedirectFunc()
+ friend DWORD MapWin32FaultToCOMPlusException(EXCEPTION_RECORD *pExceptionRecord);
+ friend void STDCALL OnHijackObjectWorker(HijackArgs * pArgs);
+ friend void STDCALL OnHijackInteriorPointerWorker(HijackArgs * pArgs);
+ friend void STDCALL OnHijackScalarWorker(HijackArgs * pArgs);
+#endif
+
+ friend void InitThreadManager();
+ friend void ThreadBaseObject::SetDelegate(OBJECTREF delegate);
+
+ friend void CallFinalizerOnThreadObject(Object *obj);
+
+ friend class ContextTransitionFrame; // To set m_dwBeginLockCount
+
+ // Debug and Profiler caches ThreadHandle.
+ friend class Debugger; // void Debugger::ThreadStarted(Thread* pRuntimeThread, BOOL fAttaching);
+#if defined(DACCESS_COMPILE)
+ friend class DacDbiInterfaceImpl; // DacDbiInterfaceImpl::GetThreadHandle(HANDLE * phThread);
+#endif // DACCESS_COMPILE
+ friend class ProfToEEInterfaceImpl; // HRESULT ProfToEEInterfaceImpl::GetHandleFromThread(ThreadID threadId, HANDLE *phThread);
+ friend class CExecutionEngine;
+ friend class UnC;
+ friend class CheckAsmOffsets;
+
+ friend class ExceptionTracker;
+ friend class ThreadExceptionState;
+
+ friend class StackFrameIterator;
+
+ friend class ThreadStatics;
+
+ VPTR_BASE_CONCRETE_VTABLE_CLASS(Thread)
+
+public:
+ enum SetThreadStackGuaranteeScope { STSGuarantee_Force, STSGuarantee_OnlyIfEnabled };
+ static BOOL IsSetThreadStackGuaranteeInUse(SetThreadStackGuaranteeScope fScope = STSGuarantee_OnlyIfEnabled)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if(STSGuarantee_Force == fScope)
+ return TRUE;
+
+ //The runtime must be hosted to have escalation policy
+ //If escalation policy is enabled but StackOverflow is not part of the policy
+ // then we don't use SetThreadStackGuarantee
+ if(!CLRHosted() ||
+ GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) == eRudeExitProcess)
+ {
+ //FAIL_StackOverflow is ProcessExit so don't use SetThreadStackGuarantee
+ return FALSE;
+ }
+ return TRUE;
+ }
+
+public:
+
+ // If we are trying to suspend a thread, we set the appropriate pending bit to
+ // indicate why we want to suspend it (TS_GCSuspendPending, TS_UserSuspendPending,
+ // TS_DebugSuspendPending).
+ //
+ // If instead the thread has blocked itself, via WaitSuspendEvent, we indicate
+ // this with TS_SyncSuspended. However, we need to know whether the synchronous
+ // suspension is for a user request, or for an internal one (GC & Debug). That's
+ // because a user request is not allowed to resume a thread suspended for
+ // debugging or GC. -- That's not stricly true. It is allowed to resume such a
+ // thread so long as it was ALSO suspended by the user. In other words, this
+ // ensures that user resumptions aren't unbalanced from user suspensions.
+ //
+ enum ThreadState
+ {
+ TS_Unknown = 0x00000000, // threads are initialized this way
+
+ TS_AbortRequested = 0x00000001, // Abort the thread
+ TS_GCSuspendPending = 0x00000002, // waiting to get to safe spot for GC
+ TS_UserSuspendPending = 0x00000004, // user suspension at next opportunity
+ TS_DebugSuspendPending = 0x00000008, // Is the debugger suspending threads?
+ TS_GCOnTransitions = 0x00000010, // Force a GC on stub transitions (GCStress only)
+
+ TS_LegalToJoin = 0x00000020, // Is it now legal to attempt a Join()
+
+ TS_YieldRequested = 0x00000040, // The task should yield
+
+#ifdef FEATURE_HIJACK
+ TS_Hijacked = 0x00000080, // Return address has been hijacked
+#endif // FEATURE_HIJACK
+ TS_BlockGCForSO = 0x00000100, // If a thread does not have enough stack, WaitUntilGCComplete may fail.
+ // Either GC suspension will wait until the thread has cleared this bit,
+ // Or the current thread is going to spin if GC has suspended all threads.
+ TS_Background = 0x00000200, // Thread is a background thread
+ TS_Unstarted = 0x00000400, // Thread has never been started
+ TS_Dead = 0x00000800, // Thread is dead
+
+ TS_WeOwn = 0x00001000, // Exposed object initiated this thread
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+ TS_CoInitialized = 0x00002000, // CoInitialize has been called for this thread
+
+ TS_InSTA = 0x00004000, // Thread hosts an STA
+ TS_InMTA = 0x00008000, // Thread is part of the MTA
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+ // Some bits that only have meaning for reporting the state to clients.
+ TS_ReportDead = 0x00010000, // in WaitForOtherThreads()
+ TS_FullyInitialized = 0x00020000, // Thread is fully initialized and we are ready to broadcast its existence to external clients
+
+ TS_TaskReset = 0x00040000, // The task is reset
+
+ TS_SyncSuspended = 0x00080000, // Suspended via WaitSuspendEvent
+ TS_DebugWillSync = 0x00100000, // Debugger will wait for this thread to sync
+
+ TS_StackCrawlNeeded = 0x00200000, // A stackcrawl is needed on this thread, such as for thread abort
+ // See comment for s_pWaitForStackCrawlEvent for reason.
+
+ TS_SuspendUnstarted = 0x00400000, // latch a user suspension on an unstarted thread
+
+ TS_Aborted = 0x00800000, // is the thread aborted?
+ TS_TPWorkerThread = 0x01000000, // is this a threadpool worker thread?
+
+ TS_Interruptible = 0x02000000, // sitting in a Sleep(), Wait(), Join()
+ TS_Interrupted = 0x04000000, // was awakened by an interrupt APC. !!! This can be moved to TSNC
+
+ TS_CompletionPortThread = 0x08000000, // Completion port thread
+
+ TS_AbortInitiated = 0x10000000, // set when abort is begun
+
+ TS_Finalized = 0x20000000, // The associated managed Thread object has been finalized.
+ // We can clean up the unmanaged part now.
+
+ TS_FailStarted = 0x40000000, // The thread fails during startup.
+ TS_Detached = 0x80000000, // Thread was detached by DllMain
+
+ // <TODO> @TODO: We need to reclaim the bits that have no concurrency issues (i.e. they are only
+ // manipulated by the owning thread) and move them off to a different DWORD. Note if this
+ // enum is changed, we also need to update SOS to reflect this.</TODO>
+
+ // We require (and assert) that the following bits are less than 0x100.
+ TS_CatchAtSafePoint = (TS_UserSuspendPending | TS_AbortRequested |
+ TS_GCSuspendPending | TS_DebugSuspendPending | TS_GCOnTransitions | TS_YieldRequested),
+ };
+
+ // Thread flags that aren't really states in themselves but rather things the thread
+ // has to do.
+ enum ThreadTasks
+ {
+ TT_CleanupSyncBlock = 0x00000001, // The synch block needs to be cleaned up.
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+ TT_CallCoInitialize = 0x00000002, // CoInitialize needs to be called.
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+ };
+
+ // Thread flags that have no concurrency issues (i.e., they are only manipulated by the owning thread). Use these
+ // state flags when you have a new thread state that doesn't belong in the ThreadState enum above.
+ //
+ // <TODO>@TODO: its possible that the ThreadTasks from above and these flags should be merged.</TODO>
+ enum ThreadStateNoConcurrency
+ {
+ TSNC_Unknown = 0x00000000, // threads are initialized this way
+
+ TSNC_DebuggerUserSuspend = 0x00000001, // marked "suspended" by the debugger
+ TSNC_DebuggerReAbort = 0x00000002, // thread needs to re-abort itself when resumed by the debugger
+ TSNC_DebuggerIsStepping = 0x00000004, // debugger is stepping this thread
+ TSNC_DebuggerIsManagedException = 0x00000008, // EH is re-raising a managed exception.
+ TSNC_WaitUntilGCFinished = 0x00000010, // The current thread is waiting for GC. If host returns
+ // SO during wait, we will either spin or make GC wait.
+ TSNC_BlockedForShutdown = 0x00000020, // Thread is blocked in WaitForEndOfShutdown. We should not hit WaitForEndOfShutdown again.
+ TSNC_SOWorkNeeded = 0x00000040, // The thread needs to wake up AD unload helper thread to finish SO work
+ TSNC_CLRCreatedThread = 0x00000080, // The thread was created through Thread::CreateNewThread
+ TSNC_ExistInThreadStore = 0x00000100, // For dtor to know if it needs to be removed from ThreadStore
+ TSNC_UnsafeSkipEnterCooperative = 0x00000200, // This is a "fix" for deadlocks caused when cleaning up COM
+ TSNC_OwnsSpinLock = 0x00000400, // The thread owns a spinlock.
+ TSNC_PreparingAbort = 0x00000800, // Preparing abort. This avoids recursive HandleThreadAbort call.
+ TSNC_OSAlertableWait = 0x00001000, // Preparing abort. This avoids recursive HandleThreadAbort call.
+ TSNC_ADUnloadHelper = 0x00002000, // This thread is AD Unload helper.
+ TSNC_CreatingTypeInitException = 0x00004000, // Thread is trying to create a TypeInitException
+ TSNC_InTaskSwitch = 0x00008000, // A task is switching
+ TSNC_AppDomainContainUnhandled = 0x00010000, // Used to control how unhandled exception reporting occurs.
+ // See detailed explanation for this bit in threads.cpp
+ TSNC_InRestoringSyncBlock = 0x00020000, // The thread is restoring its SyncBlock for Object.Wait.
+ // After the thread is interrupted once, we turn off interruption
+ // at the beginning of wait.
+ TSNC_DisableOleaut32Check = 0x00040000, // Disable oleaut32 delay load check. Oleaut32 has
+ // been loaded
+ TSNC_CannotRecycle = 0x00080000, // A host can not recycle this Thread object. When a thread
+ // has orphaned lock, we will apply this.
+ TSNC_RaiseUnloadEvent = 0x00100000, // Finalize thread is raising managed unload event which
+ // may call AppDomain.Unload.
+ TSNC_UnbalancedLocks = 0x00200000, // Do not rely on lock accounting for this thread:
+ // we left an app domain with a lock count different from
+ // when we entered it
+ TSNC_DisableSOCheckInHCALL = 0x00400000, // Some HCALL method may be called directly from VM.
+ // We can not assert they are called in SOTolerant
+ // region.
+ TSNC_IgnoreUnhandledExceptions = 0x00800000, // Set for a managed thread born inside an appdomain created with the APPDOMAIN_IGNORE_UNHANDLED_EXCEPTIONS flag.
+ TSNC_ProcessedUnhandledException = 0x01000000,// Set on a thread on which we have done unhandled exception processing so that
+ // we dont perform it again when OS invokes our UEF. Currently, applicable threads include:
+ // 1) entry point thread of a managed app
+ // 2) new managed thread created in default domain
+ //
+ // For such threads, we will return to the OS after our UE processing is done
+ // and the OS will start invoking the UEFs. If our UEF gets invoked, it will try to
+ // perform the UE processing again. We will use this flag to prevent the duplicated
+ // effort.
+ //
+ // Once we are completely independent of the OS UEF, we could remove this.
+#ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+ TSNC_InsideSyncContextWait = 0x02000000, // Whether we are inside DoSyncContextWait
+#endif // FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+ TSNC_DebuggerSleepWaitJoin = 0x04000000, // Indicates to the debugger that this thread is in a sleep wait or join state
+ // This almost mirrors the TS_Interruptible state however that flag can change
+ // during GC-preemptive mode whereas this one cannot.
+#ifdef FEATURE_COMINTEROP
+ TSNC_WinRTInitialized = 0x08000000, // the thread has initialized WinRT
+#endif // FEATURE_COMINTEROP
+
+ TSNC_ForceStackCommit = 0x10000000, // Commit the whole stack, even if disableCommitThreadStack is set
+
+ TSNC_CallingManagedCodeDisabled = 0x20000000, // Use by multicore JIT feature to asert on calling managed code/loading module in background thread
+ // Exception, system module is allowed, security demand is allowed
+
+ TSNC_LoadsTypeViolation = 0x40000000, // Use by type loader to break deadlocks caused by type load level ordering violations
+
+ TSNC_EtwStackWalkInProgress = 0x80000000, // Set on the thread so that ETW can know that stackwalking is in progress
+ // and does not proceed with a stackwalk on the same thread
+ // There are cases during managed debugging when we can run into this situation
+ };
+
+ // Functions called by host
+ STDMETHODIMP QueryInterface(REFIID riid, void** ppv)
+ DAC_EMPTY_RET(E_NOINTERFACE);
+ STDMETHODIMP_(ULONG) AddRef(void)
+ DAC_EMPTY_RET(0);
+ STDMETHODIMP_(ULONG) Release(void)
+ DAC_EMPTY_RET(0);
+ STDMETHODIMP SwitchIn(HANDLE threadHandle)
+ DAC_EMPTY_RET(E_FAIL);
+ STDMETHODIMP SwitchOut()
+ DAC_EMPTY_RET(E_FAIL);
+ STDMETHODIMP Reset (BOOL fFull)
+ DAC_EMPTY_RET(E_FAIL);
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ STDMETHODIMP GetMemStats(COR_GC_THREAD_STATS *memUsage)
+ DAC_EMPTY_RET(E_FAIL);
+#endif //FEATURE_INCLUDE_ALL_INTERFACES
+ STDMETHODIMP ExitTask()
+ DAC_EMPTY_RET(E_FAIL);
+ STDMETHODIMP Abort()
+ DAC_EMPTY_RET(E_FAIL);
+ STDMETHODIMP RudeAbort()
+ DAC_EMPTY_RET(E_FAIL);
+ STDMETHODIMP NeedsPriorityScheduling(BOOL *pbNeedsPriorityScheduling)
+ DAC_EMPTY_RET(E_FAIL);
+
+ STDMETHODIMP YieldTask()
+ DAC_EMPTY_RET(E_FAIL);
+ STDMETHODIMP LocksHeld(SIZE_T *pLockCount)
+ DAC_EMPTY_RET(E_FAIL);
+ STDMETHODIMP SetTaskIdentifier(TASKID asked)
+ DAC_EMPTY_RET(E_FAIL);
+
+ STDMETHODIMP BeginPreventAsyncAbort()
+ DAC_EMPTY_RET(E_FAIL);
+ STDMETHODIMP EndPreventAsyncAbort()
+ DAC_EMPTY_RET(E_FAIL);
+
+ STDMETHODIMP SetLocale(LCID lcid);
+ STDMETHODIMP SetUILocale(LCID lcid);
+
+ void InternalReset (BOOL fFull, BOOL fNotFinalizerThread=FALSE, BOOL fThreadObjectResetNeeded=TRUE, BOOL fResetAbort=TRUE);
+ INT32 ResetManagedThreadObject(INT32 nPriority);
+ INT32 ResetManagedThreadObjectInCoopMode(INT32 nPriority);
+ BOOL IsRealThreadPoolResetNeeded();
+private:
+ //Helpers for reset...
+ void FullResetThread();
+public:
+ void InternalSwitchOut();
+
+ HRESULT DetachThread(BOOL fDLLThreadDetach);
+
+ void SetThreadState(ThreadState ts)
+ {
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockOr((DWORD*)&m_State, ts);
+ }
+
+ void ResetThreadState(ThreadState ts)
+ {
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockAnd((DWORD*)&m_State, ~ts);
+ }
+
+ BOOL HasThreadState(ThreadState ts)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ((DWORD)m_State & ts);
+ }
+
+ //
+ // This is meant to be used for quick opportunistic checks for thread abort and similar conditions. This method
+ // does not erect memory barrier and so it may return wrong result sometime that the caller has to handle.
+ //
+ BOOL HasThreadStateOpportunistic(ThreadState ts)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_State.LoadWithoutBarrier() & ts;
+ }
+
+ void SetThreadStateNC(ThreadStateNoConcurrency tsnc)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_StateNC = (ThreadStateNoConcurrency)((DWORD)m_StateNC | tsnc);
+ }
+
+ void ResetThreadStateNC(ThreadStateNoConcurrency tsnc)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_StateNC = (ThreadStateNoConcurrency)((DWORD)m_StateNC & ~tsnc);
+ }
+
+ BOOL HasThreadStateNC(ThreadStateNoConcurrency tsnc)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return ((DWORD)m_StateNC & tsnc);
+ }
+
+ void MarkEtwStackWalkInProgress()
+ {
+ WRAPPER_NO_CONTRACT;
+ SetThreadStateNC(Thread::TSNC_EtwStackWalkInProgress);
+ }
+
+ void MarkEtwStackWalkCompleted()
+ {
+ WRAPPER_NO_CONTRACT;
+ ResetThreadStateNC(Thread::TSNC_EtwStackWalkInProgress);
+ }
+
+ BOOL IsEtwStackWalkInProgress()
+ {
+ WRAPPER_NO_CONTRACT;
+ return HasThreadStateNC(Thread::TSNC_EtwStackWalkInProgress);
+ }
+
+ DWORD RequireSyncBlockCleanup()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_ThreadTasks & TT_CleanupSyncBlock);
+ }
+
+ void SetSyncBlockCleanup()
+ {
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockOr((ULONG *)&m_ThreadTasks, TT_CleanupSyncBlock);
+ }
+
+ void ResetSyncBlockCleanup()
+ {
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockAnd((ULONG *)&m_ThreadTasks, ~TT_CleanupSyncBlock);
+ }
+
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+ DWORD IsCoInitialized()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_State & TS_CoInitialized);
+ }
+
+ void SetCoInitialized()
+ {
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockOr((ULONG *)&m_State, TS_CoInitialized);
+ FastInterlockAnd((ULONG*)&m_ThreadTasks, ~TT_CallCoInitialize);
+ }
+
+ void ResetCoInitialized()
+ {
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockAnd((ULONG *)&m_State,~TS_CoInitialized);
+ }
+
+#ifdef FEATURE_COMINTEROP
+ BOOL IsWinRTInitialized()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return HasThreadStateNC(TSNC_WinRTInitialized);
+ }
+
+ void ResetWinRTInitialized()
+ {
+ LIMITED_METHOD_CONTRACT;
+ ResetThreadStateNC(TSNC_WinRTInitialized);
+ }
+#endif // FEATURE_COMINTEROP
+
+ DWORD RequiresCoInitialize()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_ThreadTasks & TT_CallCoInitialize);
+ }
+
+ void SetRequiresCoInitialize()
+ {
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockOr((ULONG *)&m_ThreadTasks, TT_CallCoInitialize);
+ }
+
+ void ResetRequiresCoInitialize()
+ {
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockAnd((ULONG *)&m_ThreadTasks,~TT_CallCoInitialize);
+ }
+
+ void CleanupCOMState();
+
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+#ifdef FEATURE_COMINTEROP
+ bool IsDisableComObjectEagerCleanup()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_fDisableComObjectEagerCleanup;
+ }
+ void SetDisableComObjectEagerCleanup()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_fDisableComObjectEagerCleanup = true;
+ }
+#endif //FEATURE_COMINTEROP
+
+ // returns if there is some extra work for the finalizer thread.
+ BOOL HaveExtraWorkForFinalizer();
+
+ // do the extra finalizer work.
+ void DoExtraWorkForFinalizer();
+
+#ifndef DACCESS_COMPILE
+ DWORD CatchAtSafePoint()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_State & TS_CatchAtSafePoint);
+ }
+
+ DWORD CatchAtSafePointOpportunistic()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return HasThreadStateOpportunistic(TS_CatchAtSafePoint);
+ }
+#endif // DACCESS_COMPILE
+
+ DWORD IsBackground()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_State & TS_Background);
+ }
+
+ DWORD IsUnstarted()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return (m_State & TS_Unstarted);
+ }
+
+ DWORD IsDead()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_State & TS_Dead);
+ }
+
+ DWORD IsAborted()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_State & TS_Aborted);
+ }
+
+ void SetAborted()
+ {
+ FastInterlockOr((ULONG *) &m_State, TS_Aborted);
+ }
+
+ void ClearAborted()
+ {
+ FastInterlockAnd((ULONG *) &m_State, ~TS_Aborted);
+ }
+
+ DWORD DoWeOwn()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_State & TS_WeOwn);
+ }
+
+ // For reporting purposes, grab a consistent snapshot of the thread's state
+ ThreadState GetSnapshotState();
+
+ // For delayed destruction of threads
+ DWORD IsDetached()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_State & TS_Detached);
+ }
+
+#ifdef FEATURE_STACK_PROBE
+//---------------------------------------------------------------------------------------
+//
+// IsSOTolerant - Is the current thread in SO Tolerant region?
+//
+// Arguments:
+// pLimitFrame: the limit of search for frames
+//
+// Return Value:
+// TRUE if in SO tolerant region.
+// FALSE if in SO intolerant region.
+//
+// Note:
+// We walk our frame chain to decide. If HelperMethodFrame is seen first, we are in tolerant
+// region. If EnterSOIntolerantCodeFrame is seen first, we are in intolerant region.
+//
+ BOOL IsSOTolerant(void * pLimitFrame);
+#endif
+
+#ifdef _DEBUG
+ class DisableSOCheckInHCALL
+ {
+ private:
+ Thread *m_pThread;
+ public:
+ DisableSOCheckInHCALL()
+ {
+ m_pThread = GetThread();
+ m_pThread->SetThreadStateNC(TSNC_DisableSOCheckInHCALL);
+ }
+ ~DisableSOCheckInHCALL()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pThread->ResetThreadStateNC(TSNC_DisableSOCheckInHCALL);
+ }
+ };
+#endif
+ static LONG m_DetachCount;
+ static LONG m_ActiveDetachCount; // Count how many non-background detached
+
+ static Volatile<LONG> m_threadsAtUnsafePlaces;
+
+ // Offsets for the following variables need to fit in 1 byte, so keep near
+ // the top of the object. Also, we want cache line filling to work for us
+ // so the critical stuff is ordered based on frequency of use.
+
+ Volatile<ThreadState> m_State; // Bits for the state of the thread
+
+ // If TRUE, GC is scheduled cooperatively with this thread.
+ // NOTE: This "byte" is actually a boolean - we don't allow
+ // recursive disables.
+ Volatile<ULONG> m_fPreemptiveGCDisabled;
+
+ PTR_Frame m_pFrame; // The Current Frame
+ PTR_Frame m_pUnloadBoundaryFrame;
+
+ //-----------------------------------------------------------
+ // If the thread has wandered in from the outside this is
+ // its Domain.
+ //-----------------------------------------------------------
+ PTR_AppDomain m_pDomain;
+
+ // Track the number of locks (critical section, spin lock, syncblock lock,
+ // EE Crst, GC lock) held by the current thread.
+ DWORD m_dwLockCount;
+
+ // Unique thread id used for thin locks - kept as small as possible, as we have limited space
+ // in the object header to store it.
+ DWORD m_ThreadId;
+
+ #if HAS_TRACK_CXX_EXCEPTION_CODE_HACK // do we have C++ exception code tracking?
+ // It's very hard to deal with SEH properly using C++ catch handlers. The
+ // following field is updated with the correct SEH exception whenever a C++
+ // __CxxFrameHandler3 call is made on this thread. If you grab it at the
+ // top of a C++ catch(...), it's likely to be correct.
+ DWORD m_LastCxxSEHExceptionCode;
+ #endif // HAS_TRACK_CXX_EXCEPTION_CODE_HACK
+
+ // RWLock state
+ LockEntry *m_pHead;
+ LockEntry m_embeddedEntry;
+
+#ifndef DACCESS_COMPILE
+ Frame* NotifyFrameChainOfExceptionUnwind(Frame* pStartFrame, LPVOID pvLimitSP);
+#endif // DACCESS_COMPILE
+
+#if defined(FEATURE_COMINTEROP) && !defined(DACCESS_COMPILE)
+ void RegisterRCW(RCW *pRCW)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pRCW));
+ }
+ CONTRACTL_END;
+
+ if (!m_pRCWStack->Push(pRCW))
+ {
+ ThrowOutOfMemory();
+ }
+ }
+
+ // Returns false on OOM.
+ BOOL RegisterRCWNoThrow(RCW *pRCW)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pRCW, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ return m_pRCWStack->Push(pRCW);
+ }
+
+ RCW *UnregisterRCW(INDEBUG(SyncBlock *pSB))
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pSB));
+ }
+ CONTRACTL_END;
+
+ RCW* pPoppedRCW = m_pRCWStack->Pop();
+
+#ifdef _DEBUG
+ // The RCW we popped must be the one pointed to by pSB if pSB still points to an RCW.
+ RCW* pCurrentRCW = pSB->GetInteropInfoNoCreate()->GetRawRCW();
+ _ASSERTE(pCurrentRCW == NULL || pPoppedRCW == NULL || pCurrentRCW == pPoppedRCW);
+#endif // _DEBUG
+
+ return pPoppedRCW;
+ }
+
+ BOOL RCWIsInUse(RCW* pRCW)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pRCW));
+ }
+ CONTRACTL_END;
+
+ return m_pRCWStack->IsInStack(pRCW);
+ }
+#endif // FEATURE_COMINTEROP && !DACCESS_COMPILE
+
+ // The context within which this thread is executing. As the thread crosses
+ // context boundaries, the context mechanism adjusts this so it's always
+ // current.
+ // <TODO>@TODO cwb: When we add COM+ 1.0 Context Interop, this should get moved out
+ // of the Thread object and into its own slot in the TLS.</TODO>
+ // The address of the context object is also used as the ContextID!
+ PTR_Context m_Context;
+
+public:
+
+ // on MP systems, each thread has its own allocation chunk so we can avoid
+ // lock prefixes and expensive MP cache snooping stuff
+ alloc_context m_alloc_context;
+
+ inline alloc_context *GetAllocContext() { LIMITED_METHOD_CONTRACT; return &m_alloc_context; }
+
+ // This is the type handle of the first object in the alloc context at the time
+ // we fire the AllocationTick event. It's only for tooling purpose.
+ TypeHandle m_thAllocContextObj;
+
+#ifndef FEATURE_PAL
+private:
+ _NT_TIB *m_pTEB;
+public:
+ _NT_TIB *GetTEB() {
+ LIMITED_METHOD_CONTRACT;
+ return m_pTEB;
+ }
+ PEXCEPTION_REGISTRATION_RECORD *GetExceptionListPtr() {
+ WRAPPER_NO_CONTRACT;
+ return &GetTEB()->ExceptionList;
+ }
+#endif // !FEATURE_PAL
+
+ inline void SetTHAllocContextObj(TypeHandle th) {LIMITED_METHOD_CONTRACT; m_thAllocContextObj = th; }
+
+ inline TypeHandle GetTHAllocContextObj() {LIMITED_METHOD_CONTRACT; return m_thAllocContextObj; }
+
+#ifdef FEATURE_COMINTEROP
+ // The header for the per-thread in-use RCW stack.
+ RCWStackHeader* m_pRCWStack;
+#endif // FEATURE_COMINTEROP
+
+ // Allocator used during marshaling for temporary buffers, much faster than
+ // heap allocation.
+ //
+ // Uses of this allocator should be effectively statically scoped, i.e. a "region"
+ // is started using a CheckPointHolder and GetCheckpoint, and this region can then be used for allocations
+ // from that point onwards, and then all memory is reclaimed when the static scope for the
+ // checkpoint is exited by the running thread.
+ StackingAllocator m_MarshalAlloc;
+
+ // Flags used to indicate tasks the thread has to do.
+ ThreadTasks m_ThreadTasks;
+
+ // Flags for thread states that have no concurrency issues.
+ ThreadStateNoConcurrency m_StateNC;
+
+ inline void IncLockCount();
+ inline void DecLockCount();
+
+private:
+ DWORD m_dwBeginLockCount; // lock count when the thread enters current domain
+ DWORD m_dwBeginCriticalRegionCount; // lock count when the thread enters current domain
+ DWORD m_dwCriticalRegionCount;
+
+ DWORD m_dwThreadAffinityCount;
+
+#ifdef _DEBUG
+ DWORD dbg_m_cSuspendedThreads;
+ // Count of suspended threads that we know are not in native code (and therefore cannot hold OS lock which prevents us calling out to host)
+ DWORD dbg_m_cSuspendedThreadsWithoutOSLock;
+ EEThreadId m_Creater;
+#endif
+
+ // After we suspend a thread, we may need to call EEJitManager::JitCodeToMethodInfo
+ // or StressLog which may waits on a spinlock. It is unsafe to suspend a thread while it
+ // is in this state.
+ Volatile<LONG> m_dwForbidSuspendThread;
+public:
+
+ static void IncForbidSuspendThread()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+#ifndef DACCESS_COMPILE
+ Thread * pThread = GetThreadNULLOk();
+ if (pThread)
+ {
+ _ASSERTE (pThread->m_dwForbidSuspendThread != (LONG)MAXLONG);
+#ifdef _DEBUG
+ {
+ //DEBUG_ONLY;
+ STRESS_LOG2(LF_SYNC, LL_INFO100000, "Set forbid suspend [%d] for thread %p.\n", pThread->m_dwForbidSuspendThread.Load(), pThread);
+ }
+#endif
+ FastInterlockIncrement(&pThread->m_dwForbidSuspendThread);
+ }
+#endif //!DACCESS_COMPILE
+ }
+
+ static void DecForbidSuspendThread()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+#ifndef DACCESS_COMPILE
+ Thread * pThread = GetThreadNULLOk();
+ if (pThread)
+ {
+ _ASSERTE (pThread->m_dwForbidSuspendThread != (LONG)0);
+ FastInterlockDecrement(&pThread->m_dwForbidSuspendThread);
+#ifdef _DEBUG
+ {
+ //DEBUG_ONLY;
+ STRESS_LOG2(LF_SYNC, LL_INFO100000, "Reset forbid suspend [%d] for thread %p.\n", pThread->m_dwForbidSuspendThread.Load(), pThread);
+ }
+#endif
+ }
+#endif //!DACCESS_COMPILE
+ }
+
+ bool IsInForbidSuspendRegion()
+ {
+ return m_dwForbidSuspendThread != (LONG)0;
+ }
+
+ // The ForbidSuspendThreadHolder is used during the initialization of the stack marker infrastructure so
+ // it can't do any backout stack validation (which is why we pass in VALIDATION_TYPE=HSV_NoValidation).
+ typedef StateHolder<Thread::IncForbidSuspendThread, Thread::DecForbidSuspendThread, HSV_NoValidation> ForbidSuspendThreadHolder;
+
+private:
+ // Per thread counter to dispense hash code - kept in the thread so we don't need a lock
+ // or interlocked operations to get a new hash code;
+ DWORD m_dwHashCodeSeed;
+
+ // Lock thread is trying to acquire
+ VolatilePtr<DeadlockAwareLock> m_pBlockingLock;
+
+public:
+
+ inline BOOL HasLockInCurrentDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(m_dwLockCount >= m_dwBeginLockCount);
+ _ASSERTE(m_dwCriticalRegionCount >= m_dwBeginCriticalRegionCount);
+
+ // Equivalent to (m_dwLockCount != m_dwBeginLockCount ||
+ // m_dwCriticalRegionCount ! m_dwBeginCriticalRegionCount),
+ // but without branching instructions
+ return ((m_dwLockCount ^ m_dwBeginLockCount) |
+ (m_dwCriticalRegionCount ^ m_dwBeginCriticalRegionCount));
+ }
+
+ inline void BeginCriticalRegion()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE (GetThread() == this);
+ if (CLRHosted())
+ {
+ m_dwCriticalRegionCount ++;
+ _ASSERTE (m_dwCriticalRegionCount != 0);
+ }
+ }
+
+ inline void EndCriticalRegion()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE (GetThread() == this);
+ if (CLRHosted())
+ {
+ _ASSERTE (m_dwCriticalRegionCount > 0);
+ m_dwCriticalRegionCount --;
+ }
+ }
+
+ inline void BeginCriticalRegion_NoCheck()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE (GetThread() == this);
+ m_dwCriticalRegionCount ++;
+ _ASSERTE (m_dwCriticalRegionCount != 0);
+ }
+
+ inline void EndCriticalRegion_NoCheck()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE (GetThread() == this);
+ _ASSERTE (m_dwCriticalRegionCount > 0);
+ m_dwCriticalRegionCount --;
+ }
+
+ inline BOOL HasCriticalRegion()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwCriticalRegionCount != 0;
+ }
+
+ inline DWORD GetNewHashCode()
+ {
+ LIMITED_METHOD_CONTRACT;
+ // Every thread has its own generator for hash codes so that we won't get into a situation
+ // where two threads consistently give out the same hash codes.
+ // Choice of multiplier guarantees period of 2**32 - see Knuth Vol 2 p16 (3.2.1.2 Theorem A).
+ DWORD multiplier = GetThreadId()*4 + 5;
+ m_dwHashCodeSeed = m_dwHashCodeSeed*multiplier + 1;
+ return m_dwHashCodeSeed;
+ }
+
+#ifdef _DEBUG
+ // If the current thread suspends other threads, we need to make sure that the thread
+ // only allocates memory if the suspended threads do not have OS Heap lock.
+ static BOOL Debug_AllowCallout()
+ {
+ LIMITED_METHOD_CONTRACT;
+ Thread * pThread = GetThreadNULLOk();
+ return ((pThread == NULL) || (pThread->dbg_m_cSuspendedThreads == pThread->dbg_m_cSuspendedThreadsWithoutOSLock));
+ }
+
+ // Returns number of threads that are currently suspended by the current thread and that can potentially hold OS lock
+ BOOL Debug_GetUnsafeSuspendeeCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (dbg_m_cSuspendedThreads - dbg_m_cSuspendedThreadsWithoutOSLock);
+ }
+#endif
+
+public:
+ static void __stdcall LeaveRuntime(size_t target);
+ static HRESULT LeaveRuntimeNoThrow(size_t target);
+ static void __stdcall LeaveRuntimeThrowComplus(size_t target);
+ static void __stdcall EnterRuntime();
+ static HRESULT EnterRuntimeNoThrow();
+ static HRESULT EnterRuntimeNoThrowWorker();
+
+ // Reverse PInvoke hook for host
+ static void ReverseEnterRuntime();
+ static HRESULT ReverseEnterRuntimeNoThrow();
+ static void ReverseEnterRuntimeThrowComplusHelper(HRESULT hr);
+ static void ReverseEnterRuntimeThrowComplus();
+ static void ReverseLeaveRuntime();
+
+ inline void IncThreadAffinityCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE (GetThread() == this);
+ m_dwThreadAffinityCount++;
+ _ASSERTE (m_dwThreadAffinityCount > 0);
+ }
+ inline void DecThreadAffinityCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE (GetThread() == this);
+ _ASSERTE (m_dwThreadAffinityCount > 0);
+ m_dwThreadAffinityCount --;
+ }
+
+ // Hook for OS Critical Section, Mutex, and others that require thread affinity
+ static void BeginThreadAffinity();
+ static void EndThreadAffinity();
+
+ static void BeginThreadAffinityAndCriticalRegion()
+ {
+ LIMITED_METHOD_CONTRACT;
+ BeginThreadAffinity();
+ GetThread()->BeginCriticalRegion();
+ }
+
+ static void EndThreadAffinityAndCriticalRegion()
+ {
+ LIMITED_METHOD_CONTRACT;
+ GetThread()->EndCriticalRegion();
+ EndThreadAffinity();
+ }
+
+ BOOL HasThreadAffinity()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwThreadAffinityCount > 0;
+ }
+
+ private:
+ LoadLevelLimiter *m_pLoadLimiter;
+
+ public:
+ LoadLevelLimiter *GetLoadLevelLimiter()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pLoadLimiter;
+ }
+
+ void SetLoadLevelLimiter(LoadLevelLimiter *limiter)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pLoadLimiter = limiter;
+ }
+
+
+
+public:
+
+ //--------------------------------------------------------------
+ // Constructor.
+ //--------------------------------------------------------------
+#ifndef DACCESS_COMPILE
+ Thread();
+#endif
+
+ //--------------------------------------------------------------
+ // Failable initialization occurs here.
+ //--------------------------------------------------------------
+ BOOL InitThread(BOOL fInternal);
+ BOOL AllocHandles();
+
+ void SetupThreadForHost();
+
+ //--------------------------------------------------------------
+ // If the thread was setup through SetupUnstartedThread, rather
+ // than SetupThread, complete the setup here when the thread is
+ // actually running.
+ // WARNING : only GC calls this with bRequiresTSL set to FALSE.
+ //--------------------------------------------------------------
+ BOOL HasStarted(BOOL bRequiresTSL=TRUE);
+
+ // We don't want ::CreateThread() calls scattered throughout the source.
+ // Create all new threads here. The thread is created as suspended, so
+ // you must ::ResumeThread to kick it off. It is guaranteed to create the
+ // thread, or throw.
+ BOOL CreateNewThread(SIZE_T stackSize, LPTHREAD_START_ROUTINE start, void *args);
+
+
+ enum StackSizeBucket
+ {
+ StackSize_Small,
+ StackSize_Medium,
+ StackSize_Large
+ };
+
+ //
+ // Creates a raw OS thread; use this only for CLR-internal threads that never execute user code.
+ // StackSizeBucket determines how large the stack should be.
+ //
+ static HANDLE CreateUtilityThread(StackSizeBucket stackSizeBucket, LPTHREAD_START_ROUTINE start, void *args, DWORD flags = 0, DWORD* pThreadId = NULL);
+
+ //--------------------------------------------------------------
+ // Destructor
+ //--------------------------------------------------------------
+#ifndef DACCESS_COMPILE
+ ~Thread();
+#endif
+
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+ void CoUninitialize();
+ void BaseCoUninitialize();
+ void BaseWinRTUninitialize();
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+ void OnThreadTerminate(BOOL holdingLock);
+
+ static void CleanupDetachedThreads();
+ //--------------------------------------------------------------
+ // Returns innermost active Frame.
+ //--------------------------------------------------------------
+ PTR_Frame GetFrame()
+ {
+ SUPPORTS_DAC;
+
+#ifndef DACCESS_COMPILE
+#ifdef _DEBUG_IMPL
+ WRAPPER_NO_CONTRACT;
+ if (this == GetThreadNULLOk())
+ {
+ void* curSP;
+ curSP = (void *)GetCurrentSP();
+ _ASSERTE((curSP <= m_pFrame && m_pFrame < m_CacheStackBase) || m_pFrame == (Frame*) -1);
+ }
+#else
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"NYI");
+#endif
+#endif // #ifndef DACCESS_COMPILE
+ return m_pFrame;
+ }
+
+ //--------------------------------------------------------------
+ // Replaces innermost active Frames.
+ //--------------------------------------------------------------
+#ifndef DACCESS_COMPILE
+ void SetFrame(Frame *pFrame)
+#ifdef _DEBUG
+ ;
+#else
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pFrame = pFrame;
+ }
+#endif
+ ;
+#endif
+ inline Frame* FindFrame(SIZE_T StackPointer);
+
+ bool DetectHandleILStubsForDebugger();
+
+#ifndef DACCESS_COMPILE
+ void SetUnloadBoundaryFrame(Frame *pFrame);
+ void ResetUnloadBoundaryFrame();
+#endif
+
+ PTR_Frame GetUnloadBoundaryFrame()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pUnloadBoundaryFrame;
+ }
+
+ void SetWin32FaultAddress(DWORD eip)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_Win32FaultAddress = eip;
+ }
+
+ void SetWin32FaultCode(DWORD code)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_Win32FaultCode = code;
+ }
+
+ DWORD GetWin32FaultAddress()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_Win32FaultAddress;
+ }
+
+ DWORD GetWin32FaultCode()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_Win32FaultCode;
+ }
+
+#ifdef ENABLE_CONTRACTS
+ ClrDebugState *GetClrDebugState()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pClrDebugState;
+ }
+#endif
+
+ //**************************************************************
+ // GC interaction
+ //**************************************************************
+
+ //--------------------------------------------------------------
+ // Enter cooperative GC mode. NOT NESTABLE.
+ //--------------------------------------------------------------
+ FORCEINLINE_NONDEBUG void DisablePreemptiveGC()
+ {
+#ifndef DACCESS_COMPILE
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(this == GetThread());
+ _ASSERTE(!m_fPreemptiveGCDisabled);
+ // holding a spin lock in preemp mode and transit to coop mode will cause other threads
+ // spinning waiting for GC
+ _ASSERTE ((m_StateNC & Thread::TSNC_OwnsSpinLock) == 0);
+
+#ifdef ENABLE_CONTRACTS_IMPL
+ TriggersGC(this);
+#endif
+
+ // Logically, we just want to check whether a GC is in progress and halt
+ // at the boundary if it is -- before we disable preemptive GC. However
+ // this opens up a race condition where the GC starts after we make the
+ // check. SuspendRuntime will ignore such a thread because it saw it as
+ // outside the EE. So the thread would run wild during the GC.
+ //
+ // Instead, enter cooperative mode and then check if a GC is in progress.
+ // If so, go back out and try again. The reason we go back out before we
+ // try again, is that SuspendRuntime might have seen us as being in
+ // cooperative mode if it checks us between the next two statements.
+ // In that case, it will be trying to move us to a safe spot. If
+ // we don't let it see us leave, it will keep waiting on us indefinitely.
+
+ // ------------------------------------------------------------------------
+ // ** WARNING ** WARNING ** WARNING ** WARNING ** WARNING ** WARNING ** |
+ // ------------------------------------------------------------------------
+ //
+ // DO NOT CHANGE THIS METHOD WITHOUT VISITING ALL THE STUB GENERATORS
+ // THAT EFFECTIVELY INLINE IT INTO THEIR STUBS
+ //
+ // ------------------------------------------------------------------------
+ // ** WARNING ** WARNING ** WARNING ** WARNING ** WARNING ** WARNING ** |
+ // ------------------------------------------------------------------------
+
+ m_fPreemptiveGCDisabled.StoreWithoutBarrier(1);
+
+ if (g_TrapReturningThreads.LoadWithoutBarrier())
+ {
+ RareDisablePreemptiveGC();
+ }
+#else
+ LIMITED_METHOD_CONTRACT;
+#endif
+ }
+
+ NOINLINE void RareDisablePreemptiveGC();
+
+ void HandleThreadAbort()
+ {
+ HandleThreadAbort(FALSE);
+ }
+ void HandleThreadAbort(BOOL fForce); // fForce=TRUE only for a thread waiting to start AD unload
+
+ void PreWorkForThreadAbort();
+
+private:
+ void HandleThreadAbortTimeout();
+
+public:
+ //--------------------------------------------------------------
+ // Leave cooperative GC mode. NOT NESTABLE.
+ //--------------------------------------------------------------
+ FORCEINLINE_NONDEBUG void EnablePreemptiveGC()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+#ifndef DACCESS_COMPILE
+ _ASSERTE(this == GetThread());
+ _ASSERTE(m_fPreemptiveGCDisabled);
+ // holding a spin lock in coop mode and transit to preemp mode will cause deadlock on GC
+ _ASSERTE ((m_StateNC & Thread::TSNC_OwnsSpinLock) == 0);
+
+#ifdef ENABLE_CONTRACTS_IMPL
+ _ASSERTE(!GCForbidden());
+ TriggersGC(this);
+#endif
+
+ // ------------------------------------------------------------------------
+ // ** WARNING ** WARNING ** WARNING ** WARNING ** WARNING ** WARNING ** |
+ // ------------------------------------------------------------------------
+ //
+ // DO NOT CHANGE THIS METHOD WITHOUT VISITING ALL THE STUB GENERATORS
+ // THAT EFFECTIVELY INLINE IT INTO THEIR STUBS
+ //
+ // ------------------------------------------------------------------------
+ // ** WARNING ** WARNING ** WARNING ** WARNING ** WARNING ** WARNING ** |
+ // ------------------------------------------------------------------------
+
+ m_fPreemptiveGCDisabled.StoreWithoutBarrier(0);
+#ifdef ENABLE_CONTRACTS
+ m_ulEnablePreemptiveGCCount ++;
+#endif // _DEBUG
+
+ if (CatchAtSafePoint())
+ RareEnablePreemptiveGC();
+#endif
+ }
+
+#if defined(STRESS_HEAP) && defined(_DEBUG)
+ void PerformPreemptiveGC();
+#endif
+ void RareEnablePreemptiveGC();
+ void PulseGCMode();
+
+ //--------------------------------------------------------------
+ // Query mode
+ //--------------------------------------------------------------
+ BOOL PreemptiveGCDisabled()
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(this == GetThread());
+ //
+ // m_fPreemptiveGCDisabled is always modified by the thread itself, and so the thread itself
+ // can read it without memory barrier.
+ //
+ return m_fPreemptiveGCDisabled.LoadWithoutBarrier();
+ }
+
+ BOOL PreemptiveGCDisabledOther()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_fPreemptiveGCDisabled);
+ }
+
+#ifdef ENABLE_CONTRACTS_IMPL
+
+ void BeginNoTriggerGC(const char *szFile, int lineNum)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_pClrDebugState->IncrementGCNoTriggerCount();
+ if (PreemptiveGCDisabled())
+ {
+ m_pClrDebugState->IncrementGCForbidCount();
+ }
+ }
+
+ void EndNoTriggerGC()
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(m_pClrDebugState->GetGCNoTriggerCount() != 0 || (m_pClrDebugState->ViolationMask() & BadDebugState));
+ m_pClrDebugState->DecrementGCNoTriggerCount();
+
+ if (m_pClrDebugState->GetGCForbidCount())
+ {
+ m_pClrDebugState->DecrementGCForbidCount();
+ }
+ }
+
+ void BeginForbidGC(const char *szFile, int lineNum)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(this == GetThread());
+#ifdef PROFILING_SUPPORTED
+ _ASSERTE(PreemptiveGCDisabled()
+ || CORProfilerPresent() || // This added to allow profiler to use GetILToNativeMapping
+ // while in preemptive GC mode
+ (g_fEEShutDown & (ShutDown_Finalize2 | ShutDown_Profiler)) == ShutDown_Finalize2);
+#else // PROFILING_SUPPORTED
+ _ASSERTE(PreemptiveGCDisabled());
+#endif // PROFILING_SUPPORTED
+ BeginNoTriggerGC(szFile, lineNum);
+ }
+
+ void EndForbidGC()
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(this == GetThread());
+#ifdef PROFILING_SUPPORTED
+ _ASSERTE(PreemptiveGCDisabled() ||
+ CORProfilerPresent() || // This added to allow profiler to use GetILToNativeMapping
+ // while in preemptive GC mode
+ (g_fEEShutDown & (ShutDown_Finalize2 | ShutDown_Profiler)) == ShutDown_Finalize2);
+#else // PROFILING_SUPPORTED
+ _ASSERTE(PreemptiveGCDisabled());
+#endif // PROFILING_SUPPORTED
+ EndNoTriggerGC();
+ }
+
+ BOOL GCNoTrigger()
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(this == GetThread());
+ if ( (GCViolation|BadDebugState) & m_pClrDebugState->ViolationMask() )
+ {
+ return FALSE;
+ }
+ return m_pClrDebugState->GetGCNoTriggerCount();
+ }
+
+ BOOL GCForbidden()
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(this == GetThread());
+ if ( (GCViolation|BadDebugState) & m_pClrDebugState->ViolationMask())
+ {
+ return FALSE;
+ }
+ return m_pClrDebugState->GetGCForbidCount();
+ }
+
+ BOOL RawGCNoTrigger()
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (m_pClrDebugState->ViolationMask() & BadDebugState)
+ {
+ return 0;
+ }
+ return m_pClrDebugState->GetGCNoTriggerCount();
+ }
+
+ BOOL RawGCForbidden()
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (m_pClrDebugState->ViolationMask() & BadDebugState)
+ {
+ return 0;
+ }
+ return m_pClrDebugState->GetGCForbidCount();
+ }
+#endif // ENABLE_CONTRACTS_IMPL
+
+ //---------------------------------------------------------------
+ // Expose key offsets and values for stub generation.
+ //---------------------------------------------------------------
+ static BYTE GetOffsetOfCurrentFrame()
+ {
+ LIMITED_METHOD_CONTRACT;
+ size_t ofs = offsetof(class Thread, m_pFrame);
+ _ASSERTE(FitsInI1(ofs));
+ return (BYTE)ofs;
+ }
+
+ static BYTE GetOffsetOfState()
+ {
+ LIMITED_METHOD_CONTRACT;
+ size_t ofs = offsetof(class Thread, m_State);
+ _ASSERTE(FitsInI1(ofs));
+ return (BYTE)ofs;
+ }
+
+ static BYTE GetOffsetOfGCFlag()
+ {
+ LIMITED_METHOD_CONTRACT;
+ size_t ofs = offsetof(class Thread, m_fPreemptiveGCDisabled);
+ _ASSERTE(FitsInI1(ofs));
+ return (BYTE)ofs;
+ }
+
+ static void StaticDisablePreemptiveGC( Thread *pThread)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(pThread != NULL);
+ pThread->DisablePreemptiveGC();
+ }
+
+ static void StaticEnablePreemptiveGC( Thread *pThread)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(pThread != NULL);
+ pThread->EnablePreemptiveGC();
+ }
+
+
+ //---------------------------------------------------------------
+ // Expose offset of the app domain word for the interop and delegate callback
+ //---------------------------------------------------------------
+ static SIZE_T GetOffsetOfAppDomain()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (SIZE_T)(offsetof(class Thread, m_pDomain));
+ }
+
+ //---------------------------------------------------------------
+ // Expose offset of the place for storing the filter context for the debugger.
+ //---------------------------------------------------------------
+ static SIZE_T GetOffsetOfDebuggerFilterContext()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (SIZE_T)(offsetof(class Thread, m_debuggerFilterContext));
+ }
+
+ //---------------------------------------------------------------
+ // Expose offset of the debugger word for the debugger
+ //---------------------------------------------------------------
+ static SIZE_T GetOffsetOfDebuggerWord()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (SIZE_T)(offsetof(class Thread, m_debuggerWord));
+ }
+
+ //---------------------------------------------------------------
+ // Expose offset of the debugger cant stop count for the debugger
+ //---------------------------------------------------------------
+ static SIZE_T GetOffsetOfCantStop()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (SIZE_T)(offsetof(class Thread, m_debuggerCantStop));
+ }
+
+ //---------------------------------------------------------------
+ // Expose offset of m_StateNC
+ //---------------------------------------------------------------
+ static SIZE_T GetOffsetOfStateNC()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (SIZE_T)(offsetof(class Thread, m_StateNC));
+ }
+
+ //---------------------------------------------------------------
+ // Last exception to be thrown
+ //---------------------------------------------------------------
+ inline void SetThrowable(OBJECTREF pThrowable
+ DEBUG_ARG(ThreadExceptionState::SetThrowableErrorChecking stecFlags = ThreadExceptionState::STEC_All));
+
+ OBJECTREF GetThrowable()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return m_ExceptionState.GetThrowable();
+ }
+
+ // An unmnaged thread can check if a managed is processing an exception
+ BOOL HasException()
+ {
+ LIMITED_METHOD_CONTRACT;
+ OBJECTHANDLE pThrowable = m_ExceptionState.GetThrowableAsHandle();
+ return pThrowable && *PTR_UNCHECKED_OBJECTREF(pThrowable);
+ }
+
+ OBJECTHANDLE GetThrowableAsHandle()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_ExceptionState.GetThrowableAsHandle();
+ }
+
+ // special null test (for use when we're in the wrong GC mode)
+ BOOL IsThrowableNull()
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsHandleNullUnchecked(m_ExceptionState.GetThrowableAsHandle());
+ }
+
+ BOOL IsExceptionInProgress()
+ {
+ SUPPORTS_DAC;
+ LIMITED_METHOD_CONTRACT;
+ return m_ExceptionState.IsExceptionInProgress();
+ }
+
+
+ //---------------------------------------------------------------
+ // Per-thread information used by handler
+ //---------------------------------------------------------------
+ // exception handling info stored in thread
+ // can't allocate this as needed because can't make exception-handling depend upon memory allocation
+
+ PTR_ThreadExceptionState GetExceptionState()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return PTR_ThreadExceptionState(PTR_HOST_MEMBER_TADDR(Thread, this, m_ExceptionState));
+ }
+
+ // Access to the Context this thread is executing in.
+ Context *GetContext()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+#ifndef DACCESS_COMPILE
+
+ // if another thread is asking about our thread, we could be in the middle of an AD transition so
+ // the context and AD may not match if have set one but not the other. Can live without checking when
+ // another thread is asking it as this method is mostly called on our own thread so will mostly get the
+ // checking. If are int the middle of a transition, this could return either the old or the new AD.
+ // But no matter what we do, such as lock on the transition, by the time are done could still have
+ // changed right after we asked, so really no point.
+ _ASSERTE((this != GetThreadNULLOk()) || (m_Context == NULL && m_pDomain == NULL) || (m_Context->GetDomain() == m_pDomain) || g_fEEShutDown);
+#endif // DACCESS_COMPILE
+ return m_Context;
+ }
+
+#ifdef FEATURE_REMOTING
+ void SetExposedContext(Context *c);
+#endif
+
+ // This callback is used when we are executing in the EE and discover that we need
+ // to switch appdomains.
+ //
+ // Set the last parameter to FALSE if you want to perform the AD transition *without*
+ // EH (this can affect marshalling of exceptions).
+ void DoADCallBack(ADID appDomain , Context::ADCallBackFcnType pTarget, LPVOID args, BOOL fSetupEHAtTransition = TRUE);
+ void DoADCallBack(AppDomain* pDomain , Context::ADCallBackFcnType pTarget, LPVOID args, DWORD dwADV, BOOL fSetupEHAtTransition = TRUE);
+ void DoContextCallBack(ADID appDomain, Context* c , Context::ADCallBackFcnType pTarget, LPVOID args);
+
+ // Except for security and the call in from the remoting code in mscorlib, you should never do an
+ // AppDomain transition directly through these functions. Rather, you should use DoADCallBack above
+ // to call into managed code to perform the transition for you so that the correct policy code etc
+ // is run on the transition,
+ void EnterContextRestricted(Context* c, ContextTransitionFrame* pFrame);
+ void ReturnToContext(ContextTransitionFrame *pFrame);
+
+private:
+ typedef enum {
+ RaiseCrossContextSuccess,
+ RaiseCrossContextRetry,
+ RaiseCrossContextClassInit
+ } RaiseCrossContextResult;
+
+
+ // The "orBlob" stores the serialized image of a managed Exception object as it gets marshaled
+ // across AD boundaries.
+ //
+ // In Telesto, we don't support true appdomain marshaling so the "orBlob" is in fact an
+ // agile wrapper object whose ToString() echoes the original exception's ToString().
+#ifdef FEATURE_CORECLR
+ typedef OBJECTREF ORBLOBREF;
+#else
+ typedef U1ARRAYREF ORBLOBREF;
+#endif
+
+ RaiseCrossContextResult TryRaiseCrossContextException(Exception **ppExOrig,
+ Exception *pException,
+ RuntimeExceptionKind *pKind,
+ OBJECTREF *ppThrowable,
+ ORBLOBREF *pOrBlob);
+public:
+
+ void DECLSPEC_NORETURN RaiseCrossContextException(Exception* pEx, ContextTransitionFrame* pFrame);
+ void RaiseCrossContextExceptionHelper(Exception* pEx,ContextTransitionFrame* pFrame);
+
+ // ClearContext are to be called only during shutdown
+ void ClearContext();
+
+ // Used by security to prevent recursive stackwalking.
+ BOOL IsSecurityStackwalkInProgess()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_fSecurityStackwalk;
+ }
+
+ void SetSecurityStackwalkInProgress(BOOL fSecurityStackwalk)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_fSecurityStackwalk = fSecurityStackwalk;
+ }
+
+private:
+ void ReturnToContextAndThrow(ContextTransitionFrame* pFrame, EEException* pEx, BOOL* pContextSwitched);
+ void ReturnToContextAndOOM(ContextTransitionFrame* pFrame);
+
+private:
+ // don't ever call these except when creating thread!!!!!
+ void InitContext();
+
+ BOOL m_fSecurityStackwalk;
+
+public:
+ PTR_AppDomain GetDomain(INDEBUG(BOOL fMidContextTransitionOK = FALSE))
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // if another thread is asking about our thread, we could be in the middle of an AD transition so
+ // the context and AD may not match if have set one but not the other. Can live without checking when
+ // another thread is asking it as this method is mostly called on our own thread so will mostly get the
+ // checking. If are int the middle of a transition, this could return either the old or the new AD.
+ // But no matter what we do, such as lock on the transition, by the time are done could still have
+ // changed right after we asked, so really no point.
+#ifdef _DEBUG_IMPL
+ BEGIN_GETTHREAD_ALLOWED_IN_NO_THROW_REGION;
+ if (!g_fEEShutDown && this == GetThread())
+ {
+ if (!fMidContextTransitionOK)
+ {
+ // We also want to suppress the "domain on context == domain on thread" check if this might
+ // be called during a context or AD transition (in which case fMidContextTransitionOK is nonzero).
+ // A profiler stackwalk can occur at arbitrary times, including during these transitions, but
+ // the stackwalk is still safe to do at this point, so we don't want to trigger this assert.
+ _ASSERTE((m_Context == NULL && m_pDomain == NULL) || m_Context->GetDomain() == m_pDomain);
+ }
+ AppDomain* valueInTLSSlot = GetAppDomain();
+ _ASSERTE(valueInTLSSlot == 0 || valueInTLSSlot == m_pDomain);
+ }
+ END_GETTHREAD_ALLOWED_IN_NO_THROW_REGION;
+#endif
+
+ return m_pDomain;
+ }
+
+ Frame *IsRunningIn(AppDomain* pDomain, int *count);
+ Frame *GetFirstTransitionInto(AppDomain *pDomain, int *count);
+
+ BOOL ShouldChangeAbortToUnload(Frame *pFrame, Frame *pUnloadBoundaryFrame=NULL);
+
+ // Get outermost (oldest) AppDomain for this thread.
+ AppDomain *GetInitialDomain();
+
+ //---------------------------------------------------------------
+ // Track use of the thread block. See the general comments on
+ // thread destruction in threads.cpp, for details.
+ //---------------------------------------------------------------
+ int IncExternalCount();
+ int DecExternalCount(BOOL holdingLock);
+
+
+ //---------------------------------------------------------------
+ // !!!! THESE ARE NOT SAFE FOR GENERAL USE !!!!
+ // IncExternalCountDANGEROUSProfilerOnly()
+ // DecExternalCountDANGEROUSProfilerOnly()
+ // Currently only the profiler API should be using these
+ // functions, because the profiler is responsible for ensuring
+ // that the thread exists, undestroyed, before operating on it.
+ // All other clients should use IncExternalCount/DecExternalCount
+ // instead
+ //---------------------------------------------------------------
+ int IncExternalCountDANGEROUSProfilerOnly()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef _DEBUG
+ int cRefs =
+#else // _DEBUG
+ return
+#endif //_DEBUG
+ FastInterlockIncrement((LONG*)&m_ExternalRefCount);
+
+#ifdef _DEBUG
+ // This should never be called on a thread being destroyed
+ _ASSERTE(cRefs != 1);
+ return cRefs;
+#endif //_DEBUG
+ }
+
+ int DecExternalCountDANGEROUSProfilerOnly()
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifdef _DEBUG
+ int cRefs =
+#else // _DEBUG
+ return
+#endif //_DEBUG
+
+ FastInterlockDecrement((LONG*)&m_ExternalRefCount);
+
+#ifdef _DEBUG
+ // This should never cause the last reference on the thread to be released
+ _ASSERTE(cRefs != 0);
+ return cRefs;
+#endif //_DEBUG
+ }
+
+ // Get and Set the exposed System.Thread object which corresponds to
+ // this thread. Also the thread handle and Id.
+ OBJECTREF GetExposedObject();
+ OBJECTREF GetExposedObjectRaw();
+ void SetExposedObject(OBJECTREF exposed);
+ OBJECTHANDLE GetExposedObjectHandleForDebugger()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_ExposedObject;
+ }
+
+ // Query whether the exposed object exists
+ BOOL IsExposedObjectSet()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ return (ObjectFromHandle(m_ExposedObject) != NULL) ;
+ }
+#ifndef FEATURE_CORECLR
+ void GetSynchronizationContext(OBJECTREF *pSyncContextObj)
+ {
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_NOTRIGGER;
+ NOTHROW;
+ PRECONDITION(CheckPointer(pSyncContextObj));
+ }
+ CONTRACTL_END;
+
+ *pSyncContextObj = NULL;
+
+ THREADBASEREF ExposedThreadObj = (THREADBASEREF)GetExposedObjectRaw();
+ if (ExposedThreadObj != NULL)
+ *pSyncContextObj = ExposedThreadObj->GetSynchronizationContext();
+ }
+#endif //!FEATURE_CORECLR
+#ifdef FEATURE_COMPRESSEDSTACK
+ OBJECTREF GetCompressedStack()
+ {
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+ THREADBASEREF ExposedThreadObj = (THREADBASEREF)GetExposedObjectRaw();
+ if (ExposedThreadObj != NULL)
+ return (OBJECTREF)(ExposedThreadObj->GetCompressedStack());
+ return NULL;
+ }
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
+
+ // When we create a managed thread, the thread is suspended. We call StartThread to get
+ // the thread start.
+ DWORD StartThread();
+
+ // The result of attempting to OS-suspend an EE thread.
+ enum SuspendThreadResult
+ {
+ // We successfully suspended the thread. This is the only
+ // case where the caller should subsequently call ResumeThread.
+ STR_Success,
+
+ // The underlying call to the operating system's SuspendThread
+ // or GetThreadContext failed. This is usually taken to mean
+ // that the OS thread has exited. (This can possibly also mean
+ //
+ // that the suspension count exceeded the allowed maximum, but
+ // Thread::SuspendThread asserts that does not happen.)
+ STR_Failure,
+
+ // The thread handle is invalid. This means that the thread
+ // is dead (or dying), or that the object has been created for
+ // an exposed System.Thread that has not been started yet.
+ STR_UnstartedOrDead,
+
+ // The fOneTryOnly flag was set, and we managed to OS suspend the
+ // thread, but we found that it had its m_dwForbidSuspendThread
+ // flag set. If fOneTryOnly is not set, Thread::Suspend will
+ // retry in this case.
+ STR_Forbidden,
+
+ // Stress logging is turned on, but no stress log had been created
+ // for the thread yet, and we failed to create one. This can mean
+ // that either we are not allowed to call into the host, or we ran
+ // out of memory.
+ STR_NoStressLog,
+
+ // The EE thread is currently switched out. This can only happen
+ // if we are hosted and the host schedules EE threads on fibers.
+ STR_SwitchedOut,
+ };
+
+#ifndef DISABLE_THREADSUSPEND
+ // SuspendThread
+ // Attempts to OS-suspend the thread, whichever GC mode it is in.
+ // Arguments:
+ // fOneTryOnly - If TRUE, report failure if the thread has its
+ // m_dwForbidSuspendThread flag set. If FALSE, retry.
+ // pdwSuspendCount - If non-NULL, will contain the return code
+ // of the underlying OS SuspendThread call on success,
+ // undefined on any kind of failure.
+ // Return value:
+ // A SuspendThreadResult value indicating success or failure.
+ SuspendThreadResult SuspendThread(BOOL fOneTryOnly = FALSE, DWORD *pdwSuspendCount = NULL);
+
+ DWORD ResumeThread();
+#endif // DISABLE_THREADSUSPEND
+
+ int GetThreadPriority();
+ BOOL SetThreadPriority(
+ int nPriority // thread priority level
+ );
+ BOOL Alert ();
+ DWORD Join(DWORD timeout, BOOL alertable);
+ DWORD JoinEx(DWORD timeout, WaitMode mode);
+
+ BOOL GetThreadContext(
+ LPCONTEXT lpContext // context structure
+ )
+ {
+ WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ _ASSERTE (m_pHostTask == NULL || GetThreadHandle() != SWITCHOUT_HANDLE_VALUE);
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ return ::GetThreadContext (GetThreadHandle(), lpContext);
+ }
+
+#ifndef DACCESS_COMPILE
+ BOOL SetThreadContext(
+ CONST CONTEXT *lpContext // context structure
+ )
+ {
+ WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ _ASSERTE (m_pHostTask == NULL || GetThreadHandle() != SWITCHOUT_HANDLE_VALUE);
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ return ::SetThreadContext (GetThreadHandle(), lpContext);
+ }
+#endif
+
+ BOOL HasValidThreadHandle ()
+ {
+ WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ return m_pHostTask != NULL ||
+ GetThreadHandle() != INVALID_HANDLE_VALUE;
+#else // !FEATURE_INCLUDE_ALL_INTERFACES
+ return GetThreadHandle() != INVALID_HANDLE_VALUE;
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ }
+
+ DWORD GetThreadId()
+ {
+ STATIC_CONTRACT_SO_TOLERANT;
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(m_ThreadId != UNINITIALIZED_THREADID);
+ return m_ThreadId;
+ }
+
+ DWORD GetOSThreadId()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+#ifndef DACCESS_COMPILE
+ _ASSERTE (m_OSThreadId != 0xbaadf00d);
+#endif // !DACCESS_COMPILE
+ return m_OSThreadId;
+ }
+
+ // This API is to be used for Debugger only.
+ // We need to be able to return the true value of m_OSThreadId.
+ //
+ DWORD GetOSThreadIdForDebugger()
+ {
+ SUPPORTS_DAC;
+ LIMITED_METHOD_CONTRACT;
+ return m_OSThreadId;
+ }
+
+ TASKID GetTaskId()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_TaskId;
+ }
+ CONNID GetConnectionId()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwConnectionId;
+ }
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostTask* GetHostTask() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pHostTask;
+ }
+
+ IHostTask* GetHostTaskWithAddRef();
+
+ void ReleaseHostTask();
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ void SetConnectionId(CONNID dwConnectionId)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_dwConnectionId = dwConnectionId;
+ }
+
+ BOOL IsThreadPoolThread()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_State & (Thread::TS_TPWorkerThread | Thread::TS_CompletionPortThread);
+ }
+
+ // public suspend functions. System ones are internal, like for GC. User ones
+ // correspond to suspend/resume calls on the exposed System.Thread object.
+ static bool SysStartSuspendForDebug(AppDomain *pAppDomain);
+ static bool SysSweepThreadsForDebug(bool forceSync);
+ static void SysResumeFromDebug(AppDomain *pAppDomain);
+#ifndef FEATURE_CORECLR
+ void UserSuspendThread();
+ BOOL UserResumeThread();
+#endif // FEATURE_CORECLR
+
+ void UserSleep(INT32 time);
+
+ // AD unload uses ThreadAbort support. We need to distinguish pure ThreadAbort and AD unload
+ // cases.
+ enum ThreadAbortRequester
+ {
+ TAR_Thread = 0x00000001, // Request by Thread
+ TAR_ADUnload = 0x00000002, // Request by AD unload
+ TAR_FuncEval = 0x00000004, // Request by Func-Eval
+ TAR_StackOverflow = 0x00000008, // Request by StackOverflow. TAR_THREAD should be set at the same time.
+ TAR_ALL = 0xFFFFFFFF,
+ };
+
+private:
+
+ //
+ // Bit mask for tracking which aborts came in and why.
+ //
+ enum ThreadAbortInfo
+ {
+ TAI_ThreadAbort = 0x00000001,
+ TAI_ThreadV1Abort = 0x00000002,
+ TAI_ThreadRudeAbort = 0x00000004,
+ TAI_ADUnloadAbort = 0x00000008,
+ TAI_ADUnloadV1Abort = 0x00000010,
+ TAI_ADUnloadRudeAbort = 0x00000020,
+ TAI_FuncEvalAbort = 0x00000040,
+ TAI_FuncEvalV1Abort = 0x00000080,
+ TAI_FuncEvalRudeAbort = 0x00000100,
+ TAI_ForADUnloadThread = 0x10000000, // AD unload thread is working on the thread
+ };
+
+ static const DWORD TAI_AnySafeAbort = (TAI_ThreadAbort |
+ TAI_ADUnloadAbort |
+ TAI_FuncEvalAbort
+ );
+
+ static const DWORD TAI_AnyV1Abort = (TAI_ThreadV1Abort |
+ TAI_ADUnloadV1Abort |
+ TAI_FuncEvalV1Abort
+ );
+
+ static const DWORD TAI_AnyRudeAbort = (TAI_ThreadRudeAbort |
+ TAI_ADUnloadRudeAbort |
+ TAI_FuncEvalRudeAbort
+ );
+
+ static const DWORD TAI_AnyFuncEvalAbort = (TAI_FuncEvalAbort |
+ TAI_FuncEvalV1Abort |
+ TAI_FuncEvalRudeAbort
+ );
+
+
+ // Specifies type of thread abort.
+ DWORD m_AbortInfo;
+ DWORD m_AbortType;
+ ULONGLONG m_AbortEndTime;
+ ULONGLONG m_RudeAbortEndTime;
+ BOOL m_fRudeAbortInitiated;
+ LONG m_AbortController;
+
+ static ULONGLONG s_NextSelfAbortEndTime;
+
+ void SetRudeAbortEndTimeFromEEPolicy();
+
+ // This is a spin lock to serialize setting/resetting of AbortType and AbortRequest.
+ LONG m_AbortRequestLock;
+
+ static void LockAbortRequest(Thread *pThread);
+ static void UnlockAbortRequest(Thread *pThread);
+
+ typedef Holder<Thread*, Thread::LockAbortRequest, Thread::UnlockAbortRequest> AbortRequestLockHolder;
+
+ static void AcquireAbortControl(Thread *pThread)
+ {
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockIncrement (&pThread->m_AbortController);
+ }
+
+ static void ReleaseAbortControl(Thread *pThread)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE (pThread->m_AbortController > 0);
+ FastInterlockDecrement (&pThread->m_AbortController);
+ }
+
+ typedef Holder<Thread*, Thread::AcquireAbortControl, Thread::ReleaseAbortControl> AbortControlHolder;
+
+ BOOL IsBeingAbortedForADUnload()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_AbortInfo & TAI_ForADUnloadThread) != 0;
+ }
+
+ void ResetBeginAbortedForADUnload();
+
+public:
+#ifdef _DEBUG
+ BOOL m_fRudeAborted;
+ DWORD m_dwAbortPoint;
+#endif
+
+
+public:
+ enum UserAbort_Client
+ {
+ UAC_Normal,
+ UAC_Host, // Called by host through IClrTask::Abort
+ UAC_WatchDog, // Called by ADUnload helper thread
+ UAC_FinalizerTimeout,
+ };
+
+ HRESULT UserAbort(ThreadAbortRequester requester,
+ EEPolicy::ThreadAbortTypes abortType,
+ DWORD timeout,
+ UserAbort_Client client
+ );
+
+ BOOL HandleJITCaseForAbort();
+
+ void UserResetAbort(ThreadAbortRequester requester)
+ {
+ InternalResetAbort(requester, FALSE);
+ }
+ void EEResetAbort(ThreadAbortRequester requester)
+ {
+ InternalResetAbort(requester, TRUE);
+ }
+
+private:
+ void InternalResetAbort(ThreadAbortRequester requester, BOOL fResetRudeAbort);
+
+ void SetAbortEndTime(ULONGLONG endTime, BOOL fRudeAbort);
+
+public:
+
+ ULONGLONG GetAbortEndTime()
+ {
+ WRAPPER_NO_CONTRACT;
+ return IsRudeAbort()?m_RudeAbortEndTime:m_AbortEndTime;
+ }
+
+ // We distinguish interrupting a thread between Thread.Interrupt and other usage.
+ // For Thread.Interrupt usage, we will interrupt an alertable wait using the same
+ // rule as ReadyForAbort. Wait in EH clause or CER region is not interrupted.
+ // For other usage, we will try to Abort the thread.
+ // If we can not do the operation, we will delay until next wait.
+ enum ThreadInterruptMode
+ {
+ TI_Interrupt = 0x00000001, // Requested by Thread.Interrupt
+ TI_Abort = 0x00000002, // Requested by Thread.Abort or AppDomain.Unload
+ };
+
+private:
+ BOOL ReadyForInterrupt()
+ {
+ return ReadyForAsyncException(TI_Interrupt);
+ }
+
+ BOOL ReadyForAsyncException(ThreadInterruptMode mode);
+
+public:
+ inline BOOL IsYieldRequested()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_State & TS_YieldRequested);
+ }
+
+ void UserInterrupt(ThreadInterruptMode mode);
+
+ void SetAbortRequest(EEPolicy::ThreadAbortTypes abortType); // Should only be called by ADUnload
+ BOOL ReadyForAbort()
+ {
+ return ReadyForAsyncException(TI_Abort);
+ }
+
+ BOOL IsRudeAbort();
+ BOOL IsRudeAbortOnlyForADUnload();
+ BOOL IsRudeUnload();
+ BOOL IsFuncEvalAbort();
+
+#if defined(_TARGET_AMD64_) && defined(FEATURE_HIJACK)
+ BOOL IsSafeToInjectThreadAbort(PTR_CONTEXT pContextToCheck);
+#endif // defined(_TARGET_AMD64_) && defined(FEATURE_HIJACK)
+
+ inline BOOL IsAbortRequested()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_State & TS_AbortRequested);
+ }
+
+ inline BOOL IsAbortInitiated()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_State & TS_AbortInitiated);
+ }
+
+ inline BOOL IsRudeAbortInitiated()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return IsAbortRequested() && m_fRudeAbortInitiated;
+ }
+
+ inline void SetAbortInitiated()
+ {
+ WRAPPER_NO_CONTRACT;
+ if (IsRudeAbort()) {
+ m_fRudeAbortInitiated = TRUE;
+ }
+ FastInterlockOr((ULONG *)&m_State, TS_AbortInitiated);
+ // The following should be factored better, but I'm looking for a minimal V1 change.
+ ResetUserInterrupted();
+ }
+
+ inline void ResetAbortInitiated()
+ {
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockAnd((ULONG *)&m_State, ~TS_AbortInitiated);
+ m_fRudeAbortInitiated = FALSE;
+ }
+
+ inline void SetPreparingAbort()
+ {
+ WRAPPER_NO_CONTRACT;
+ SetThreadStateNC(TSNC_PreparingAbort);
+ }
+
+ inline void ResetPreparingAbort()
+ {
+ WRAPPER_NO_CONTRACT;
+ ResetThreadStateNC(TSNC_PreparingAbort);
+ }
+
+private:
+ inline static void SetPreparingAbortForHolder()
+ {
+ GetThread()->SetPreparingAbort();
+ }
+ inline static void ResetPreparingAbortForHolder()
+ {
+ GetThread()->ResetPreparingAbort();
+ }
+ typedef StateHolder<Thread::SetPreparingAbortForHolder, Thread::ResetPreparingAbortForHolder> PreparingAbortHolder;
+
+public:
+
+ inline void SetIsCreatingTypeInitException()
+ {
+ WRAPPER_NO_CONTRACT;
+ SetThreadStateNC(TSNC_CreatingTypeInitException);
+ }
+
+ inline void ResetIsCreatingTypeInitException()
+ {
+ WRAPPER_NO_CONTRACT;
+ ResetThreadStateNC(TSNC_CreatingTypeInitException);
+ }
+
+ inline BOOL IsCreatingTypeInitException()
+ {
+ WRAPPER_NO_CONTRACT;
+ return HasThreadStateNC(TSNC_CreatingTypeInitException);
+ }
+
+private:
+ void SetAbortRequestBit();
+
+ void RemoveAbortRequestBit();
+
+public:
+ void MarkThreadForAbort(ThreadAbortRequester requester, EEPolicy::ThreadAbortTypes abortType, BOOL fTentative = FALSE);
+ void UnmarkThreadForAbort(ThreadAbortRequester requester, BOOL fForce = TRUE);
+
+private:
+ static void ThreadAbortWatchDogAbort(Thread *pThread);
+ static void ThreadAbortWatchDogEscalate(Thread *pThread);
+
+public:
+ static void ThreadAbortWatchDog();
+
+ static ULONGLONG GetNextSelfAbortEndTime()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return s_NextSelfAbortEndTime;
+ }
+
+#ifdef FEATURE_HIJACK
+ // Tricks for resuming threads from fully interruptible code with a ThreadStop.
+ BOOL ResumeUnderControl(T_CONTEXT *pCtx);
+#endif // FEATURE_HIJACK
+
+ enum InducedThrowReason {
+ InducedThreadStop = 1,
+ InducedThreadRedirect = 2,
+ InducedThreadRedirectAtEndOfCatch = 3,
+ };
+
+ DWORD m_ThrewControlForThread; // flag that is set when the thread deliberately raises an exception for stop/abort
+
+ inline DWORD ThrewControlForThread()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_ThrewControlForThread;
+ }
+
+ inline void SetThrowControlForThread(InducedThrowReason reason)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_ThrewControlForThread = reason;
+ }
+
+ inline void ResetThrowControlForThread()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_ThrewControlForThread = 0;
+ }
+
+ PTR_CONTEXT m_OSContext; // ptr to a Context structure used to record the OS specific ThreadContext for a thread
+ // this is used for thread stop/abort and is intialized on demand
+
+ PT_CONTEXT GetAbortContext ();
+
+ // These will only ever be called from the debugger's helper
+ // thread.
+ //
+ // When a thread is being created after a debug suspension has
+ // started, we get the event on the debugger helper thread. It
+ // will turn around and call this to set the debug suspend pending
+ // flag on the newly created flag, since it was missed by
+ // SysStartSuspendForGC as it didn't exist when that function was
+ // run.
+ void MarkForDebugSuspend();
+
+ // When the debugger uses the trace flag to single step a thread,
+ // it also calls this function to mark this info in the thread's
+ // state. The out-of-process portion of the debugger will read the
+ // thread's state for a variety of reasons, including looking for
+ // this flag.
+ void MarkDebuggerIsStepping(bool onOff)
+ {
+ WRAPPER_NO_CONTRACT;
+ if (onOff)
+ SetThreadStateNC(Thread::TSNC_DebuggerIsStepping);
+ else
+ ResetThreadStateNC(Thread::TSNC_DebuggerIsStepping);
+ }
+
+#ifdef _TARGET_ARM_
+ // ARM doesn't currently support any reliable hardware mechanism for single-stepping. Instead we emulate
+ // this in software. This support is used only by the debugger.
+private:
+ ArmSingleStepper m_singleStepper;
+public:
+#ifndef DACCESS_COMPILE
+ // Given the context with which this thread shall be resumed and the first WORD of the instruction that
+ // should be executed next (this is not always the WORD under PC since the debugger uses this mechanism to
+ // skip breakpoints written into the code), set the thread up to execute one instruction and then throw an
+ // EXCEPTION_SINGLE_STEP. (In fact an EXCEPTION_BREAKPOINT will be thrown, but this is fixed up in our
+ // first chance exception handler, see IsDebuggerFault in excep.cpp).
+ void EnableSingleStep()
+ {
+ m_singleStepper.Enable();
+ }
+
+ void BypassWithSingleStep(DWORD ip, WORD opcode1, WORD opcode2)
+ {
+ m_singleStepper.Bypass(ip, opcode1, opcode2);
+ }
+
+ void DisableSingleStep()
+ {
+ m_singleStepper.Disable();
+ }
+
+ void ApplySingleStep(CONTEXT *pCtx)
+ {
+ m_singleStepper.Apply(pCtx);
+ }
+
+ bool IsSingleStepEnabled() const
+ {
+ return m_singleStepper.IsEnabled();
+ }
+
+ // Fixup code called by our vectored exception handler to complete the emulation of single stepping
+ // initiated by EnableSingleStep above. Returns true if the exception was indeed encountered during
+ // stepping.
+ bool HandleSingleStep(CONTEXT *pCtx, DWORD dwExceptionCode)
+ {
+ return m_singleStepper.Fixup(pCtx, dwExceptionCode);
+ }
+#endif // !DACCESS_COMPILE
+#endif // _TARGET_ARM_
+
+ private:
+
+ PendingTypeLoadHolder* m_pPendingTypeLoad;
+
+ public:
+
+#ifndef DACCESS_COMPILE
+ PendingTypeLoadHolder* GetPendingTypeLoad()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pPendingTypeLoad;
+ }
+
+ void SetPendingTypeLoad(PendingTypeLoadHolder* pPendingTypeLoad)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pPendingTypeLoad = pPendingTypeLoad;
+ }
+#endif
+
+#ifdef FEATURE_PREJIT
+
+ private:
+
+ ThreadLocalIBCInfo* m_pIBCInfo;
+
+ public:
+
+#ifndef DACCESS_COMPILE
+
+ ThreadLocalIBCInfo* GetIBCInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(g_IBCLogger.InstrEnabled());
+ return m_pIBCInfo;
+ }
+
+ void SetIBCInfo(ThreadLocalIBCInfo* pInfo)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(g_IBCLogger.InstrEnabled());
+ m_pIBCInfo = pInfo;
+ }
+
+ void FlushIBCInfo()
+ {
+ WRAPPER_NO_CONTRACT;
+ if (m_pIBCInfo != NULL)
+ m_pIBCInfo->FlushDelayedCallbacks();
+ }
+
+#endif // #ifndef DACCESS_COMPILE
+
+#endif // #ifdef FEATURE_PREJIT
+
+ // Indicate whether this thread should run in the background. Background threads
+ // don't interfere with the EE shutting down. Whereas a running non-background
+ // thread prevents us from shutting down (except through System.Exit(), of course)
+ // WARNING : only GC calls this with bRequiresTSL set to FALSE.
+ void SetBackground(BOOL isBack, BOOL bRequiresTSL=TRUE);
+
+ // When the thread starts running, make sure it is running in the correct apartment
+ // and context.
+ BOOL PrepareApartmentAndContext();
+
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+ // Retrieve the apartment state of the current thread. There are three possible
+ // states: thread hosts an STA, thread is part of the MTA or thread state is
+ // undecided. The last state may indicate that the apartment has not been set at
+ // all (nobody has called CoInitializeEx) or that the EE does not know the
+ // current state (EE has not called CoInitializeEx).
+ enum ApartmentState { AS_InSTA, AS_InMTA, AS_Unknown };
+ ApartmentState GetApartment();
+ ApartmentState GetApartmentRare(Thread::ApartmentState as);
+ ApartmentState GetExplicitApartment();
+
+ // Sets the apartment state if it has not already been set and
+ // returns the state.
+ ApartmentState GetFinalApartment();
+
+ // Attempt to set current thread's apartment state. The actual apartment state
+ // achieved is returned and may differ from the input state if someone managed to
+ // call CoInitializeEx on this thread first (note that calls to SetApartment made
+ // before the thread has started are guaranteed to succeed).
+ // The fFireMDAOnMismatch indicates if we should fire the apartment state probe
+ // on an apartment state mismatch.
+ ApartmentState SetApartment(ApartmentState state, BOOL fFireMDAOnMismatch);
+
+ // when we get apartment tear-down notification,
+ // we want reset the apartment state we cache on the thread
+ VOID ResetApartment();
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+
+ // Either perform WaitForSingleObject or MsgWaitForSingleObject as appropriate.
+ DWORD DoAppropriateWait(int countHandles, HANDLE *handles, BOOL waitAll,
+ DWORD millis, WaitMode mode,
+ PendingSync *syncInfo = 0);
+
+ DWORD DoAppropriateWait(AppropriateWaitFunc func, void *args, DWORD millis,
+ WaitMode mode, PendingSync *syncInfo = 0);
+#ifndef FEATURE_CORECLR
+ DWORD DoSignalAndWait(HANDLE *handles, DWORD millis, BOOL alertable,
+ PendingSync *syncState = 0);
+#endif
+private:
+ void DoAppropriateWaitWorkerAlertableHelper(WaitMode mode);
+ DWORD DoAppropriateWaitWorker(int countHandles, HANDLE *handles, BOOL waitAll,
+ DWORD millis, WaitMode mode);
+ DWORD DoAppropriateWaitWorker(AppropriateWaitFunc func, void *args,
+ DWORD millis, WaitMode mode);
+#ifndef FEATURE_CORECLR
+ DWORD DoSignalAndWaitWorker(HANDLE* pHandles, DWORD millis,BOOL alertable);
+#endif
+ DWORD DoAppropriateAptStateWait(int numWaiters, HANDLE* pHandles, BOOL bWaitAll, DWORD timeout, WaitMode mode);
+#ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+ DWORD DoSyncContextWait(OBJECTREF *pSyncCtxObj, int countHandles, HANDLE *handles, BOOL waitAll, DWORD millis);
+#endif // #ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+public:
+
+ //************************************************************************
+ // Enumerate all frames.
+ //************************************************************************
+
+ /* Flags used for StackWalkFramesEx */
+
+ // FUNCTIONSONLY excludes all functionless frames and all funclets
+ #define FUNCTIONSONLY 0x0001
+
+ // SKIPFUNCLETS includes functionless frames but excludes all funclets and everything between funclets and their parent methods
+ #define SKIPFUNCLETS 0x0002
+
+ #define POPFRAMES 0x0004
+
+ /* use the following flag only if you REALLY know what you are doing !!! */
+ #define QUICKUNWIND 0x0008 // do not restore all registers during unwind
+
+ #define HANDLESKIPPEDFRAMES 0x0010 // temporary to handle skipped frames for appdomain unload
+ // stack crawl. Eventually need to always do this but it
+ // breaks the debugger right now.
+
+ #define LIGHTUNWIND 0x0020 // allow using cache schema (see StackwalkCache class)
+
+ #define NOTIFY_ON_U2M_TRANSITIONS 0x0040 // Provide a callback for native transitions.
+ // This is only useful to a debugger trying to find native code
+ // in the stack.
+
+ #define DISABLE_MISSING_FRAME_DETECTION 0x0080 // disable detection of missing TransitionFrames
+
+ // One thread may be walking the stack of another thread
+ // If you need to use this, you may also need to put a call to CrawlFrame::CheckGSCookies
+ // in your callback routine if it does any potentially time-consuming activity.
+ #define ALLOW_ASYNC_STACK_WALK 0x0100
+
+ #define THREAD_IS_SUSPENDED 0x0200 // Be careful not to cause deadlocks, this thread is suspended
+
+ // Stackwalk tries to verify some objects, but it could be called in relocate phase of GC,
+ // where objects could be in invalid state, this flag is to tell stackwalk to skip the validation
+ #define ALLOW_INVALID_OBJECTS 0x0400
+
+ // Caller has verified that the thread to be walked is in the middle of executing
+ // JITd or NGENd code, according to the thread's current context (or seeded
+ // context if one was provided). The caller ensures this when the stackwalk
+ // is initiated by a profiler.
+ #define THREAD_EXECUTING_MANAGED_CODE 0x0800
+
+ // This stackwalk is due to the DoStackSnapshot profiler API
+ #define PROFILER_DO_STACK_SNAPSHOT 0x1000
+
+ // When this flag is set, the stackwalker does not automatically advance to the
+ // faulting managed stack frame when it encounters an ExInfo. This should only be
+ // necessary for native debuggers doing mixed-mode stackwalking.
+ #define NOTIFY_ON_NO_FRAME_TRANSITIONS 0x2000
+
+ // Normally, the stackwalker does not stop at the initial CONTEXT if the IP is in native code.
+ // This flag changes the stackwalker behaviour. Currently this is only used in the debugger stackwalking
+ // API.
+ #define NOTIFY_ON_INITIAL_NATIVE_CONTEXT 0x4000
+
+ // Indicates that we are enumerating GC references and should follow appropriate
+ // callback rules for parent methods vs funclets. Only supported on X64 and Evanesco.
+ //
+ // Refer to StackFrameIterator::Filter for detailed comments on this flag.
+ #define GC_FUNCLET_REFERENCE_REPORTING 0x8000
+
+ StackWalkAction StackWalkFramesEx(
+ PREGDISPLAY pRD, // virtual register set at crawl start
+ PSTACKWALKFRAMESCALLBACK pCallback,
+ VOID *pData,
+ unsigned flags,
+ PTR_Frame pStartFrame = PTR_NULL);
+
+private:
+ // private helpers used by StackWalkFramesEx and StackFrameIterator
+ StackWalkAction MakeStackwalkerCallback(CrawlFrame* pCF, PSTACKWALKFRAMESCALLBACK pCallback, VOID* pData DEBUG_ARG(UINT32 uLoopIteration));
+
+#ifdef _DEBUG
+ void DebugLogStackWalkInfo(CrawlFrame* pCF, __in_z LPCSTR pszTag, UINT32 uLoopIteration);
+#endif // _DEBUG
+
+public:
+
+ StackWalkAction StackWalkFrames(
+ PSTACKWALKFRAMESCALLBACK pCallback,
+ VOID *pData,
+ unsigned flags = 0,
+ PTR_Frame pStartFrame = PTR_NULL);
+
+ bool InitRegDisplay(const PREGDISPLAY, const PT_CONTEXT, bool validContext);
+ void FillRegDisplay(const PREGDISPLAY pRD, PT_CONTEXT pctx);
+
+#ifdef WIN64EXCEPTIONS
+ static PCODE VirtualUnwindCallFrame(T_CONTEXT* pContext, T_KNONVOLATILE_CONTEXT_POINTERS* pContextPointers = NULL,
+ EECodeInfo * pCodeInfo = NULL);
+ static UINT_PTR VirtualUnwindCallFrame(PREGDISPLAY pRD, EECodeInfo * pCodeInfo = NULL);
+ static PCODE VirtualUnwindLeafCallFrame(T_CONTEXT* pContext);
+ static PCODE VirtualUnwindNonLeafCallFrame(T_CONTEXT* pContext, T_KNONVOLATILE_CONTEXT_POINTERS* pContextPointers = NULL,
+ PRUNTIME_FUNCTION pFunctionEntry = NULL, UINT_PTR uImageBase = NULL);
+ static UINT_PTR VirtualUnwindToFirstManagedCallFrame(T_CONTEXT* pContext);
+#endif // WIN64EXCEPTIONS
+
+ // During a <clinit>, this thread must not be asynchronously
+ // stopped or interrupted. That would leave the class unavailable
+ // and is therefore a security hole.
+ static void IncPreventAsync()
+ {
+ WRAPPER_NO_CONTRACT;
+ Thread *pThread = GetThread();
+ FastInterlockIncrement((LONG*)&pThread->m_PreventAsync);
+ }
+ static void DecPreventAsync()
+ {
+ WRAPPER_NO_CONTRACT;
+ Thread *pThread = GetThread();
+ FastInterlockDecrement((LONG*)&pThread->m_PreventAsync);
+ }
+
+ bool IsAsyncPrevented()
+ {
+ return m_PreventAsync != 0;
+ }
+
+ typedef StateHolder<Thread::IncPreventAsync, Thread::DecPreventAsync> ThreadPreventAsyncHolder;
+
+ // During a <clinit>, this thread must not be asynchronously
+ // stopped or interrupted. That would leave the class unavailable
+ // and is therefore a security hole.
+ static void IncPreventAbort()
+ {
+ WRAPPER_NO_CONTRACT;
+ Thread *pThread = GetThread();
+ FastInterlockIncrement((LONG*)&pThread->m_PreventAbort);
+ }
+ static void DecPreventAbort()
+ {
+ WRAPPER_NO_CONTRACT;
+ Thread *pThread = GetThread();
+ FastInterlockDecrement((LONG*)&pThread->m_PreventAbort);
+ }
+
+ BOOL IsAbortPrevented()
+ {
+ return m_PreventAbort != 0;
+ }
+
+ typedef StateHolder<Thread::IncPreventAbort, Thread::DecPreventAbort> ThreadPreventAbortHolder;
+
+ // The ThreadStore manages a list of all the threads in the system. I
+ // can't figure out how to expand the ThreadList template type without
+ // making m_Link public.
+ SLink m_Link;
+
+ // For N/Direct calls with the "setLastError" bit, this field stores
+ // the errorcode from that call.
+ DWORD m_dwLastError;
+
+#ifdef FEATURE_INTERPRETER
+ // When we're interpreting IL stubs for N/Direct calls with the "setLastError" bit,
+ // the interpretation will trash the last error before we get to the call to "SetLastError".
+ // Therefore, we record it here immediately after the calli, and treat "SetLastError" as an
+ // intrinsic that transfers the value stored here into the field above.
+ DWORD m_dwLastErrorInterp;
+#endif
+
+ // Debugger per-thread flag for enabling notification on "manual"
+ // method calls, for stepping logic
+ void IncrementTraceCallCount();
+ void DecrementTraceCallCount();
+
+ FORCEINLINE int IsTraceCall()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_TraceCallCount;
+ }
+
+ // Functions to get culture information for thread.
+ int GetParentCultureName(__out_ecount(length) LPWSTR szBuffer, int length, BOOL bUICulture);
+ int GetCultureName(__out_ecount(length) LPWSTR szBuffer, int length, BOOL bUICulture);
+ LCID GetCultureId(BOOL bUICulture);
+ OBJECTREF GetCulture(BOOL bUICulture);
+
+ // Release user cultures that can't survive appdomain unload
+#ifdef FEATURE_LEAK_CULTURE_INFO
+ void ResetCultureForDomain(ADID id);
+#endif // FEATURE_LEAK_CULTURE_INFO
+
+ // Functions to set the culture on the thread.
+ void SetCultureId(LCID lcid, BOOL bUICulture);
+ void SetCulture(OBJECTREF *CultureObj, BOOL bUICulture);
+
+private:
+
+ // Used by the culture accesors.
+ ARG_SLOT CallPropertyGet(BinderMethodID id, OBJECTREF pObject);
+ ARG_SLOT CallPropertySet(BinderMethodID id, OBJECTREF pObject, OBJECTREF pValue);
+
+#ifdef FEATURE_HIJACK
+ // Used in suspension code to redirect a thread at a HandledJITCase
+ BOOL RedirectThreadAtHandledJITCase(PFN_REDIRECTTARGET pTgt);
+ BOOL RedirectCurrentThreadAtHandledJITCase(PFN_REDIRECTTARGET pTgt, T_CONTEXT *pCurrentThreadCtx);
+
+ // Will Redirect the thread using RedirectThreadAtHandledJITCase if necessary
+ BOOL CheckForAndDoRedirect(PFN_REDIRECTTARGET pRedirectTarget);
+ BOOL CheckForAndDoRedirectForDbg();
+ BOOL CheckForAndDoRedirectForGC();
+ BOOL CheckForAndDoRedirectForUserSuspend();
+ BOOL CheckForAndDoRedirectForYieldTask();
+
+ // Exception handling must be very aware of redirection, so we provide a helper
+ // to identifying redirection targets
+ static BOOL IsAddrOfRedirectFunc(void * pFuncAddr);
+
+#if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS)
+public:
+
+ BOOL CheckForAndDoRedirectForGCStress (T_CONTEXT *pCurrentThreadCtx);
+
+private:
+
+ bool m_fPreemptiveGCDisabledForGCStress;
+#endif // HAVE_GCCOVER && USE_REDIRECT_FOR_GCSTRESS
+#endif // FEATURE_HIJACK
+
+public:
+
+#ifndef DACCESS_COMPILE
+ // These re-calculate the proper value on each call for the currently executing thread. Use GetCachedStackLimit
+ // and GetCachedStackBase for the cached values on this Thread.
+ static void * GetStackLowerBound();
+ static void * GetStackUpperBound();
+#endif
+
+ enum SetStackLimitScope { fAll, fAllowableOnly };
+ BOOL SetStackLimits(SetStackLimitScope scope);
+
+ // These access the stack base and limit values for this thread. (They are cached during InitThread.) The
+ // "stack base" is the "upper bound", i.e., where the stack starts growing from. (Main's call frame is at the
+ // upper bound.) The "stack limit" is the "lower bound", i.e., how far the stack can grow down to.
+ // The "stack sufficient execution limit" is used by EnsureSufficientExecutionStack() to limit how much stack
+ // should remain to execute the average Framework method.
+ PTR_VOID GetCachedStackBase() {LIMITED_METHOD_DAC_CONTRACT; return m_CacheStackBase; }
+ PTR_VOID GetCachedStackLimit() {LIMITED_METHOD_DAC_CONTRACT; return m_CacheStackLimit;}
+ UINT_PTR GetCachedStackSufficientExecutionLimit() {LIMITED_METHOD_DAC_CONTRACT; return m_CacheStackSufficientExecutionLimit;}
+
+private:
+ // Access the base and limit of the stack. (I.e. the memory ranges that the thread has reserved for its stack).
+ //
+ // Note that the base is at a higher address than the limit, since the stack grows downwards.
+ //
+ // Note that we generally access the stack of the thread we are crawling, which is cached in the ScanContext.
+ PTR_VOID m_CacheStackBase;
+ PTR_VOID m_CacheStackLimit;
+ UINT_PTR m_CacheStackSufficientExecutionLimit;
+
+#define HARD_GUARD_REGION_SIZE OS_PAGE_SIZE
+
+private:
+ //
+ static HRESULT CLRSetThreadStackGuarantee(SetThreadStackGuaranteeScope fScope = STSGuarantee_OnlyIfEnabled);
+
+ // try to turn a page into a guard page
+ static BOOL MarkPageAsGuard(UINT_PTR uGuardPageBase);
+
+ // scan a region for a guard page
+ static BOOL DoesRegionContainGuardPage(UINT_PTR uLowAddress, UINT_PTR uHighAddress);
+
+ // Every stack has a single reserved page at its limit that we call the 'hard guard page'. This page is never
+ // committed, and access to it after a stack overflow will terminate the thread.
+#define HARD_GUARD_REGION_SIZE OS_PAGE_SIZE
+#define SIZEOF_DEFAULT_STACK_GUARANTEE 1 * OS_PAGE_SIZE
+
+public:
+ // This will return the last stack address that one could write to before a stack overflow.
+ static UINT_PTR GetLastNormalStackAddress(UINT_PTR stackBase);
+ UINT_PTR GetLastNormalStackAddress();
+
+ UINT_PTR GetLastAllowableStackAddress()
+ {
+ return m_LastAllowableStackAddress;
+ }
+
+ UINT_PTR GetProbeLimit()
+ {
+ return m_ProbeLimit;
+ }
+
+ void ResetStackLimits()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ if (!IsSetThreadStackGuaranteeInUse())
+ {
+ return;
+ }
+ SetStackLimits(fAllowableOnly);
+ }
+
+ BOOL IsSPBeyondLimit();
+
+ INDEBUG(static void DebugLogStackMBIs());
+
+#if defined(_DEBUG_IMPL) && !defined(DACCESS_COMPILE)
+ // Verify that the cached stack base is for the current thread.
+ BOOL HasRightCacheStackBase()
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_CacheStackBase == GetStackUpperBound();
+ }
+#endif
+
+public:
+ static BOOL UniqueStack(void* startLoc = 0);
+
+ BOOL IsAddressInStack (PTR_VOID addr) const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(m_CacheStackBase != NULL);
+ _ASSERTE(m_CacheStackLimit != NULL);
+ _ASSERTE(m_CacheStackLimit < m_CacheStackBase);
+ return m_CacheStackLimit < addr && addr <= m_CacheStackBase;
+ }
+
+ // DetermineIfGuardPagePresent returns TRUE if the thread's stack contains a proper guard page. This function
+ // makes a physical check of the stack, rather than relying on whether or not the CLR is currently processing a
+ // stack overflow exception.
+ BOOL DetermineIfGuardPagePresent();
+
+#ifdef FEATURE_STACK_PROBE
+ // CanResetStackTo will return TRUE if the given stack pointer is far enough away from the guard page to proper
+ // restore the guard page with RestoreGuardPage.
+ BOOL CanResetStackTo(LPCVOID stackPointer);
+#endif
+
+ // IsStackSpaceAvailable will return true if there are the given number of stack pages available on the stack.
+ BOOL IsStackSpaceAvailable(float numPages);
+
+ // Returns the amount of stack available after an SO but before the OS rips the process.
+ static UINT_PTR GetStackGuarantee();
+
+ // RestoreGuardPage will replace the guard page on this thread's stack. The assumption is that it was removed
+ // by the OS due to a stack overflow exception. This function requires that you know that you have enough stack
+ // space to restore the guard page, so make sure you know what you're doing when you decide to call this.
+ VOID RestoreGuardPage();
+
+ // Commit the thread's entire stack. Note: this works on managed or unmanaged threads, and pLowerBoundMemInfo
+ // is optional.
+ static BOOL CommitThreadStack(Thread* pThreadOptional);
+
+#ifdef FEATURE_HIJACK
+private:
+ // Redirecting of threads in managed code at suspension
+
+ enum RedirectReason {
+ RedirectReason_GCSuspension,
+ RedirectReason_DebugSuspension,
+ RedirectReason_UserSuspension,
+ RedirectReason_YieldTask,
+#if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS) // GCCOVER
+ RedirectReason_GCStress,
+#endif // HAVE_GCCOVER && USE_REDIRECT_FOR_GCSTRESS
+ };
+ static void __stdcall RedirectedHandledJITCase(RedirectReason reason);
+ static void __stdcall RedirectedHandledJITCaseForDbgThreadControl();
+ static void __stdcall RedirectedHandledJITCaseForGCThreadControl();
+ static void __stdcall RedirectedHandledJITCaseForUserSuspend();
+ static void __stdcall RedirectedHandledJITCaseForYieldTask();
+#if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS) // GCCOVER
+ static void __stdcall Thread::RedirectedHandledJITCaseForGCStress();
+#endif // defined(HAVE_GCCOVER) && USE_REDIRECT_FOR_GCSTRESS
+
+ friend void CPFH_AdjustContextForThreadSuspensionRace(T_CONTEXT *pContext, Thread *pThread);
+#endif // FEATURE_HIJACK
+
+private:
+ //-------------------------------------------------------------
+ // Waiting & Synchronization
+ //-------------------------------------------------------------
+
+ // For suspends. The thread waits on this event. A client sets the event to cause
+ // the thread to resume.
+ void WaitSuspendEvents(BOOL fDoWait = TRUE);
+ BOOL WaitSuspendEventsHelper(void);
+
+ // Helpers to ensure that the bits for suspension and the number of active
+ // traps remain coordinated.
+ void MarkForSuspension(ULONG bit);
+ void UnmarkForSuspension(ULONG bit);
+
+ void SetupForSuspension(ULONG bit)
+ {
+ WRAPPER_NO_CONTRACT;
+ if (bit & TS_UserSuspendPending) {
+ m_UserSuspendEvent.Reset();
+ }
+ if (bit & TS_DebugSuspendPending) {
+ m_DebugSuspendEvent.Reset();
+ }
+ }
+
+ void ReleaseFromSuspension(ULONG bit)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ UnmarkForSuspension(~bit);
+
+ //
+ // If the thread is set free, mark it as not-suspended now
+ //
+ ThreadState oldState = m_State;
+
+ while ((oldState & (TS_UserSuspendPending | TS_DebugSuspendPending)) == 0)
+ {
+ //
+ // Construct the destination state we desire - all suspension bits turned off.
+ //
+ ThreadState newState = (ThreadState)(oldState & ~(TS_UserSuspendPending |
+ TS_DebugSuspendPending |
+ TS_SyncSuspended));
+
+ if (FastInterlockCompareExchange((LONG *)&m_State, newState, oldState) == (LONG)oldState)
+ {
+ break;
+ }
+
+ //
+ // The state changed underneath us, refresh it and try again.
+ //
+ oldState = m_State;
+ }
+
+ if (bit & TS_UserSuspendPending) {
+ m_UserSuspendEvent.Set();
+ }
+
+ if (bit & TS_DebugSuspendPending) {
+ m_DebugSuspendEvent.Set();
+ }
+
+ }
+
+ // For getting a thread to a safe point. A client waits on the event, which is
+ // set by the thread when it reaches a safe spot.
+#ifndef FEATURE_CORECLR
+ void FinishSuspendingThread();
+#endif // FEATURE_CORECLR
+ void SetSafeEvent();
+
+public:
+ FORCEINLINE void UnhijackThreadNoAlloc()
+ {
+#if defined(FEATURE_HIJACK) && !defined(DACCESS_COMPILE)
+ if (m_State & TS_Hijacked)
+ {
+ *m_ppvHJRetAddrPtr = m_pvHJRetAddr;
+ FastInterlockAnd((ULONG *) &m_State, ~TS_Hijacked);
+ }
+#endif
+ }
+
+ void UnhijackThread();
+
+ // Flags that may be passed to GetSafelyRedirectableThreadContext, to customize
+ // which checks it should perform. This allows a subset of the context verification
+ // logic used by HandledJITCase to be shared with other callers, such as profiler
+ // stackwalking
+ enum GetSafelyRedirectableThreadContextOptions
+ {
+ // Perform the default thread context checks
+ kDefaultChecks = 0x00000000,
+
+ // Compares the thread context's IP against m_LastRedirectIP, and potentially
+ // updates m_LastRedirectIP, when determining the safeness of the thread's
+ // context. HandledJITCase will always set this flag.
+ // This flag is ignored on non-x86 platforms, and also on x86 if the OS supports
+ // trap frame reporting.
+ kPerfomLastRedirectIPCheck = 0x00000001,
+
+ // Use g_pDebugInterface->IsThreadContextInvalid() to see if breakpoints might
+ // confuse the stack walker. HandledJITCase will always set this flag.
+ kCheckDebuggerBreakpoints = 0x00000002,
+ };
+
+ // Helper used by HandledJITCase and others who need an absolutely reliable
+ // register context.
+ BOOL GetSafelyRedirectableThreadContext(DWORD dwOptions, T_CONTEXT * pCtx, REGDISPLAY * pRD);
+
+private:
+#ifdef FEATURE_HIJACK
+ // Add and remove hijacks for JITted calls.
+ void HijackThread(VOID *pvHijackAddr, ExecutionState *esb);
+ BOOL HandledJITCase(BOOL ForTaskSwitchIn = FALSE);
+
+ VOID *m_pvHJRetAddr; // original return address (before hijack)
+ VOID **m_ppvHJRetAddrPtr; // place we bashed a new return address
+ MethodDesc *m_HijackedFunction; // remember what we hijacked
+
+#ifdef _TARGET_X86_
+ PCODE m_LastRedirectIP;
+ ULONG m_SpinCount;
+#endif // _TARGET_X86_
+#endif // FEATURE_HIJACK
+
+
+ DWORD m_Win32FaultAddress;
+ DWORD m_Win32FaultCode;
+
+
+ // Support for Wait/Notify
+ BOOL Block(INT32 timeOut, PendingSync *syncInfo);
+ void Wake(SyncBlock *psb);
+ DWORD Wait(HANDLE *objs, int cntObjs, INT32 timeOut, PendingSync *syncInfo);
+ DWORD Wait(CLREvent* pEvent, INT32 timeOut, PendingSync *syncInfo);
+
+ // support for Thread.Interrupt() which breaks out of Waits, Sleeps, Joins
+ LONG m_UserInterrupt;
+ DWORD IsUserInterrupted()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_UserInterrupt;
+ }
+ void ResetUserInterrupted()
+ {
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockExchange(&m_UserInterrupt, 0);
+ }
+
+ void HandleThreadInterrupt(BOOL fWaitForADUnload);
+
+public:
+ static void __stdcall UserInterruptAPC(ULONG_PTR ignore);
+
+#if defined(_DEBUG) && defined(TRACK_SYNC)
+
+// Each thread has a stack that tracks all enter and leave requests
+public:
+ Dbg_TrackSync *m_pTrackSync;
+
+#endif // TRACK_SYNC
+
+private:
+#ifdef ENABLE_CONTRACTS_DATA
+ struct ClrDebugState *m_pClrDebugState; // Pointer to ClrDebugState for quick access
+
+ ULONG m_ulEnablePreemptiveGCCount;
+#endif // _DEBUG
+
+#ifdef ENABLE_GET_THREAD_GENERIC_FULL_CHECK
+
+private:
+ // Set once on initialization, single-threaded, inside friend code:InitThreadManager,
+ // based on whether the user has set COMPLUS_EnforceEEThreadNotRequiredContracts.
+ // This is then later accessed via public
+ // code:Thread::ShouldEnforceEEThreadNotRequiredContracts. See
+ // code:GetThreadGenericFullCheck for details.
+ static BOOL s_fEnforceEEThreadNotRequiredContracts;
+
+public:
+ static BOOL ShouldEnforceEEThreadNotRequiredContracts();
+
+#endif // ENABLE_GET_THREAD_GENERIC_FULL_CHECK
+
+
+
+private:
+ // For suspends:
+ CLREvent m_SafeEvent;
+ CLREvent m_UserSuspendEvent;
+ CLREvent m_DebugSuspendEvent;
+
+ // For Object::Wait, Notify and NotifyAll, we use an Event inside the
+ // thread and we queue the threads onto the SyncBlock of the object they
+ // are waiting for.
+ CLREvent m_EventWait;
+ WaitEventLink m_WaitEventLink;
+ WaitEventLink* WaitEventLinkForSyncBlock (SyncBlock *psb)
+ {
+ LIMITED_METHOD_CONTRACT;
+ WaitEventLink *walk = &m_WaitEventLink;
+ while (walk->m_Next) {
+ _ASSERTE (walk->m_Next->m_Thread == this);
+ if ((SyncBlock*)(((DWORD_PTR)walk->m_Next->m_WaitSB) & ~1)== psb) {
+ break;
+ }
+ walk = walk->m_Next;
+ }
+ return walk;
+ }
+
+ // Access to thread handle and ThreadId.
+ HANDLE GetThreadHandle()
+ {
+ LIMITED_METHOD_CONTRACT;
+#if defined(_DEBUG) && !defined(DACCESS_COMPILE)
+ {
+ CounterHolder handleHolder(&m_dwThreadHandleBeingUsed);
+ HANDLE handle = m_ThreadHandle;
+ _ASSERTE ( handle == INVALID_HANDLE_VALUE
+ || handle == SWITCHOUT_HANDLE_VALUE
+ || m_OSThreadId == 0
+ || m_OSThreadId == 0xbaadf00d
+ || ::MatchThreadHandleToOsId(handle, m_OSThreadId) );
+ }
+#endif
+
+ DACCOP_IGNORE(FieldAccess, "Treated as raw address, no marshaling is necessary");
+ return m_ThreadHandle;
+ }
+
+ void SetThreadHandle(HANDLE h)
+ {
+ LIMITED_METHOD_CONTRACT;
+#if defined(_DEBUG)
+ _ASSERTE ( h == INVALID_HANDLE_VALUE
+ || h == SWITCHOUT_HANDLE_VALUE
+ || m_OSThreadId == 0
+ || m_OSThreadId == 0xbaadf00d
+ || ::MatchThreadHandleToOsId(h, m_OSThreadId) );
+#endif
+ FastInterlockExchangePointer(&m_ThreadHandle, h);
+ }
+
+ // We maintain a correspondence between this object, the ThreadId and ThreadHandle
+ // in Win32, and the exposed Thread object.
+ HANDLE m_ThreadHandle;
+
+ // <TODO> It would be nice to remove m_ThreadHandleForClose to simplify Thread.Join,
+ // but at the moment that isn't possible without extensive work.
+ // This handle is used by SwitchOut to store the old handle which may need to be closed
+ // if we are the owner. The handle can't be closed before checking the external count
+ // which we can't do in SwitchOut since that may require locking or switching threads.</TODO>
+ HANDLE m_ThreadHandleForClose;
+ HANDLE m_ThreadHandleForResume;
+ BOOL m_WeOwnThreadHandle;
+ DWORD m_OSThreadId;
+
+ BOOL CreateNewOSThread(SIZE_T stackSize, LPTHREAD_START_ROUTINE start, void *args);
+ BOOL CreateNewHostTask(SIZE_T stackSize, LPTHREAD_START_ROUTINE start, void *args);
+
+ OBJECTHANDLE m_ExposedObject;
+ OBJECTHANDLE m_StrongHndToExposedObject;
+
+ DWORD m_Priority; // initialized to INVALID_THREAD_PRIORITY, set to actual priority when a
+ // thread does a busy wait for GC, reset to INVALID_THREAD_PRIORITY after wait is over
+ friend class NDirect; // Quick access to thread stub creation
+
+#ifdef HAVE_GCCOVER
+ friend void DoGcStress (PT_CONTEXT regs, MethodDesc *pMD); // Needs to call UnhijackThread
+#endif // HAVE_GCCOVER
+
+ ULONG m_ExternalRefCount;
+
+ ULONG m_UnmanagedRefCount;
+
+ LONG m_TraceCallCount;
+
+ //-----------------------------------------------------------
+ // Bytes promoted on this thread since the last GC?
+ //-----------------------------------------------------------
+ DWORD m_fPromoted;
+public:
+ void SetHasPromotedBytes ();
+ DWORD GetHasPromotedBytes ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_fPromoted;
+ }
+
+private:
+ //-----------------------------------------------------------
+ // Last exception to be thrown.
+ //-----------------------------------------------------------
+ friend class EEDbgInterfaceImpl;
+
+private:
+ // Stores the most recently thrown exception. We need to have a handle in case a GC occurs before
+ // we catch so we don't lose the object. Having a static allows others to catch outside of COM+ w/o leaking
+ // a handler and allows rethrow outside of COM+ too.
+ // Differs from m_pThrowable in that it doesn't stack on nested exceptions.
+ OBJECTHANDLE m_LastThrownObjectHandle; // Unsafe to use directly. Use accessors instead.
+
+ // Indicates that the throwable in m_lastThrownObjectHandle should be treated as
+ // unhandled. This occurs during fatal error and a few other early error conditions
+ // before EH is fully set up.
+ BOOL m_ltoIsUnhandled;
+
+ friend void DECLSPEC_NORETURN EEPolicy::HandleFatalStackOverflow(EXCEPTION_POINTERS *pExceptionInfo, BOOL fSkipDebugger);
+
+public:
+
+ BOOL IsLastThrownObjectNull() { WRAPPER_NO_CONTRACT; return (m_LastThrownObjectHandle == NULL); }
+
+ OBJECTREF LastThrownObject()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (m_LastThrownObjectHandle == NULL)
+ {
+ return NULL;
+ }
+ else
+ {
+ // We only have a handle if we have an object to keep in it.
+ _ASSERTE(ObjectFromHandle(m_LastThrownObjectHandle) != NULL);
+ return ObjectFromHandle(m_LastThrownObjectHandle);
+ }
+ }
+
+ OBJECTHANDLE LastThrownObjectHandle()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return m_LastThrownObjectHandle;
+ }
+
+ void SetLastThrownObject(OBJECTREF throwable, BOOL isUnhandled = FALSE);
+ void SetSOForLastThrownObject();
+ OBJECTREF SafeSetLastThrownObject(OBJECTREF throwable);
+
+ // Inidcates that the last thrown object is now treated as unhandled
+ void MarkLastThrownObjectUnhandled()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_ltoIsUnhandled = TRUE;
+ }
+
+ // TRUE if the throwable in LTO should be treated as unhandled
+ BOOL IsLastThrownObjectUnhandled()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_ltoIsUnhandled;
+ }
+
+ void SafeUpdateLastThrownObject(void);
+ OBJECTREF SafeSetThrowables(OBJECTREF pThrowable
+ DEBUG_ARG(ThreadExceptionState::SetThrowableErrorChecking stecFlags = ThreadExceptionState::STEC_All),
+ BOOL isUnhandled = FALSE);
+
+ bool IsLastThrownObjectStackOverflowException()
+ {
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(NULL != g_pPreallocatedStackOverflowException);
+
+ return (m_LastThrownObjectHandle == g_pPreallocatedStackOverflowException);
+ }
+
+ void SetKickOffDomainId(ADID ad);
+ ADID GetKickOffDomainId();
+
+ // get the current notification (if any) from this thread
+ OBJECTHANDLE GetThreadCurrNotification();
+
+ // set the current notification on this thread
+ void SetThreadCurrNotification(OBJECTHANDLE handle);
+
+ // clear the current notification (if any) from this thread
+ void ClearThreadCurrNotification();
+
+private:
+ void SetLastThrownObjectHandle(OBJECTHANDLE h);
+
+ ADID m_pKickOffDomainId;
+
+ ThreadExceptionState m_ExceptionState;
+
+ //-----------------------------------------------------------
+ // For stack probing. These are the last allowable addresses that a thread
+ // can touch. Going beyond is a stack overflow. The ProbeLimit will be
+ // set based on whether SO probing is enabled. The LastAllowableAddress
+ // will always represent the true stack limit.
+ //-----------------------------------------------------------
+ UINT_PTR m_ProbeLimit;
+
+ UINT_PTR m_LastAllowableStackAddress;
+
+private:
+
+ // Save the domain when a task is switched out, and restore it when
+ // the task is switched in.
+ PTR_AppDomain m_pDomainAtTaskSwitch;
+
+ //---------------------------------------------------------------
+ // m_debuggerFilterContext holds the thread's "filter context" for the
+ // debugger. This filter context is used by the debugger to seed
+ // stack walks on the thread.
+ //---------------------------------------------------------------
+ PTR_CONTEXT m_debuggerFilterContext;
+
+ //---------------------------------------------------------------
+ // m_profilerFilterContext holds an additional context for the
+ // case when a (sampling) profiler wishes to hijack the thread
+ // and do a stack walk on the same thread.
+ //---------------------------------------------------------------
+ T_CONTEXT *m_pProfilerFilterContext;
+
+ //---------------------------------------------------------------
+ // m_hijackLock holds a BOOL that is used for mutual exclusion
+ // between profiler stack walks and thread hijacks (bashing
+ // return addresses on the stack)
+ //---------------------------------------------------------------
+ Volatile<LONG> m_hijackLock;
+ //---------------------------------------------------------------
+ // m_debuggerCantStop holds a count of entries into "can't stop"
+ // areas that the Interop Debugging Services must know about.
+ //---------------------------------------------------------------
+ DWORD m_debuggerCantStop;
+
+ //---------------------------------------------------------------
+ // A word reserved for use by the CLR Debugging Services during
+ // managed/unmanaged debugging.
+ //---------------------------------------------------------------
+ VOID* m_debuggerWord;
+
+ //---------------------------------------------------------------
+ // The current custom notification data object (or NULL if none
+ // pending)
+ //---------------------------------------------------------------
+ OBJECTHANDLE m_hCurrNotification;
+
+ //---------------------------------------------------------------
+ // For Interop-Debugging; track if a thread is hijacked.
+ //---------------------------------------------------------------
+ BOOL m_fInteropDebuggingHijacked;
+
+ //---------------------------------------------------------------
+ // Bitmask to remember per-thread state useful for the profiler API. See
+ // COR_PRF_CALLBACKSTATE_* flags in clr\src\inc\ProfilePriv.h for bit values.
+ //---------------------------------------------------------------
+ DWORD m_profilerCallbackState;
+
+#if defined(FEATURE_PROFAPI_ATTACH_DETACH) || defined(DATA_PROFAPI_ATTACH_DETACH)
+ //---------------------------------------------------------------
+ // m_dwProfilerEvacuationCounter keeps track of how many profiler
+ // callback calls remain on the stack
+ //---------------------------------------------------------------
+ // Why volatile?
+ // See code:ProfilingAPIUtility::InitializeProfiling#LoadUnloadCallbackSynchronization.
+ Volatile<DWORD> m_dwProfilerEvacuationCounter;
+#endif // defined(FEATURE_PROFAPI_ATTACH_DETACH) || defined(DATA_PROFAPI_ATTACH_DETACH)
+
+private:
+ Volatile<LONG> m_threadPoolCompletionCount;
+ static Volatile<LONG> s_threadPoolCompletionCountOverflow; //counts completions for threads that have been destroyed.
+
+public:
+ static void IncrementThreadPoolCompletionCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+ Thread* pThread = GetThread();
+ if (pThread)
+ pThread->m_threadPoolCompletionCount++;
+ else
+ FastInterlockIncrement(&s_threadPoolCompletionCountOverflow);
+ }
+
+ static LONG GetTotalThreadPoolCompletionCount();
+
+private:
+
+ //-------------------------------------------------------------------------
+ // AppDomains on the current call stack
+ //-------------------------------------------------------------------------
+ AppDomainStack m_ADStack;
+
+ //-------------------------------------------------------------------------
+ // Support creation of assemblies in DllMain (see ceemain.cpp)
+ //-------------------------------------------------------------------------
+ DomainFile* m_pLoadingFile;
+
+
+ // The ThreadAbort reason (Get/Set/ClearExceptionStateInfo on the managed thread) is
+ // held here as an OBJECTHANDLE and the ADID of the AppDomain in which it is valid.
+ // Atomic updates of this state use the Thread's Crst.
+
+ OBJECTHANDLE m_AbortReason;
+ ADID m_AbortReasonDomainID;
+
+ void ClearAbortReason(BOOL pNoLock = FALSE);
+
+public:
+
+ void SetInteropDebuggingHijacked(BOOL f)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_fInteropDebuggingHijacked = f;
+ }
+ BOOL GetInteropDebuggingHijacked()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_fInteropDebuggingHijacked;
+ }
+
+ inline DWORD IncrementOverridesCount();
+ inline DWORD DecrementOverridesCount();
+ inline DWORD GetOverridesCount();
+ inline DWORD IncrementAssertCount();
+ inline DWORD DecrementAssertCount();
+ inline DWORD GetAssertCount();
+ inline void PushDomain(ADID pDomain);
+ inline ADID PopDomain();
+ inline DWORD GetNumAppDomainsOnThread();
+ inline BOOL CheckThreadWideSpecialFlag(DWORD flags);
+ inline void InitDomainIteration(DWORD *pIndex);
+ inline ADID GetNextDomainOnStack(DWORD *pIndex, DWORD *pOverrides, DWORD *pAsserts);
+ inline void UpdateDomainOnStack(DWORD pIndex, DWORD asserts, DWORD overrides);
+
+ BOOL IsDefaultSecurityInfo(void)
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_ADStack.IsDefaultSecurityInfo();
+ }
+
+ BOOL AllDomainsHomogeneousWithNoStackModifiers(void)
+ {
+ WRAPPER_NO_CONTRACT;
+ return m_ADStack.AllDomainsHomogeneousWithNoStackModifiers();
+ }
+
+ const AppDomainStack& GetAppDomainStack(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_ADStack;
+ }
+ AppDomainStack* GetAppDomainStackPointer(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return &m_ADStack;
+ }
+
+ void SetAppDomainStack(const AppDomainStack& appDomainStack)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_ADStack = appDomainStack; // this is a function call, massive operator=
+ }
+
+ void ResetSecurityInfo( void )
+ {
+ WRAPPER_NO_CONTRACT;
+ m_ADStack.ClearDomainStack();
+ }
+
+ void SetFilterContext(T_CONTEXT *pContext);
+ T_CONTEXT *GetFilterContext(void);
+
+ void SetProfilerFilterContext(T_CONTEXT *pContext)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_pProfilerFilterContext = pContext;
+ }
+
+ // Used by the profiler API to find which flags have been set on the Thread object,
+ // in order to authorize a profiler's call into ICorProfilerInfo(2).
+ DWORD GetProfilerCallbackFullState()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(GetThread() == this);
+ return m_profilerCallbackState;
+ }
+
+ // Used by profiler API to set at once all callback flag bits stored on the Thread object.
+ // Used to reinstate the previous state that had been modified by a previous call to
+ // SetProfilerCallbackStateFlags
+ void SetProfilerCallbackFullState(DWORD dwFullState)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(GetThread() == this);
+ m_profilerCallbackState = dwFullState;
+ }
+
+ // Used by profiler API to set individual callback flags on the Thread object.
+ // Returns the previous state of all flags.
+ DWORD SetProfilerCallbackStateFlags(DWORD dwFlags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(GetThread() == this);
+
+ DWORD dwRet = m_profilerCallbackState;
+ m_profilerCallbackState |= dwFlags;
+ return dwRet;
+ }
+
+ T_CONTEXT *GetProfilerFilterContext(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pProfilerFilterContext;
+ }
+
+#ifdef FEATURE_PROFAPI_ATTACH_DETACH
+
+ FORCEINLINE DWORD GetProfilerEvacuationCounter(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwProfilerEvacuationCounter;
+ }
+
+ FORCEINLINE void IncProfilerEvacuationCounter(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_dwProfilerEvacuationCounter++;
+ _ASSERTE(m_dwProfilerEvacuationCounter != 0U);
+ }
+
+ FORCEINLINE void DecProfilerEvacuationCounter(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_dwProfilerEvacuationCounter != 0U);
+ m_dwProfilerEvacuationCounter--;
+ }
+
+#endif // FEATURE_PROFAPI_ATTACH_DETACH
+
+ //-------------------------------------------------------------------------
+ // The hijack lock enforces that a thread on which a profiler is currently
+ // performing a stack walk cannot be hijacked.
+ //
+ // Note that the hijack lock cannot be managed by the host (i.e., this
+ // cannot be a Crst), because this could lead to a deadlock: YieldTask,
+ // which is called by the host, may need to hijack, for which it would
+ // need to take this lock - but since the host needs not be reentrant,
+ // taking the lock cannot cause a call back into the host.
+ //-------------------------------------------------------------------------
+ static BOOL EnterHijackLock(Thread *pThread)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return ::InterlockedCompareExchange(&(pThread->m_hijackLock), TRUE, FALSE) == FALSE;
+ }
+
+ static void LeaveHijackLock(Thread *pThread)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ pThread->m_hijackLock = FALSE;
+ }
+
+ typedef ConditionalStateHolder<Thread *, Thread::EnterHijackLock, Thread::LeaveHijackLock> HijackLockHolder;
+ //-------------------------------------------------------------------------
+
+ static bool ThreadsAtUnsafePlaces(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_threadsAtUnsafePlaces != (LONG)0);
+ }
+
+ static void IncThreadsAtUnsafePlaces(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ InterlockedIncrement(&m_threadsAtUnsafePlaces);
+ }
+
+ static void DecThreadsAtUnsafePlaces(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ InterlockedDecrement(&m_threadsAtUnsafePlaces);
+ }
+
+ void PrepareForEERestart(BOOL SuspendSucceeded)
+ {
+ WRAPPER_NO_CONTRACT;
+
+#ifdef FEATURE_HIJACK
+ // Only unhijack the thread if the suspend succeeded. If it failed,
+ // the target thread may currently be using the original stack
+ // location of the return address for something else.
+ if (SuspendSucceeded)
+ UnhijackThread();
+#endif // FEATURE_HIJACK
+
+ ResetThreadState(TS_GCSuspendPending);
+ }
+
+ void SetDebugCantStop(bool fCantStop);
+ bool GetDebugCantStop(void);
+
+ static LPVOID GetStaticFieldAddress(FieldDesc *pFD);
+ TADDR GetStaticFieldAddrNoCreate(FieldDesc *pFD, PTR_AppDomain pDomain);
+
+ void SetLoadingFile(DomainFile *pFile)
+ {
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(m_pLoadingFile == NULL);
+ m_pLoadingFile = pFile;
+ }
+
+ void ClearLoadingFile()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pLoadingFile = NULL;
+ }
+
+ DomainFile *GetLoadingFile()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pLoadingFile;
+ }
+
+ private:
+
+ static void LoadingFileRelease(Thread *pThread)
+ {
+ WRAPPER_NO_CONTRACT;
+ pThread->ClearLoadingFile();
+ }
+
+ public:
+
+ typedef Holder<Thread *, DoNothing, Thread::LoadingFileRelease> LoadingFileHolder;
+#ifndef FEATURE_LEAK_CULTURE_INFO
+ void InitCultureAccessors();
+ FieldDesc *managedThreadCurrentCulture;
+ FieldDesc *managedThreadCurrentUICulture;
+#endif
+private:
+ // Don't allow a thread to be asynchronously stopped or interrupted (e.g. because
+ // it is performing a <clinit>)
+ int m_PreventAsync;
+ int m_PreventAbort;
+ int m_nNestedMarshalingExceptions;
+ BOOL IsMarshalingException()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_nNestedMarshalingExceptions != 0);
+ }
+ int StartedMarshalingException()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_nNestedMarshalingExceptions++;
+ }
+ void FinishedMarshalingException()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_nNestedMarshalingExceptions > 0);
+ m_nNestedMarshalingExceptions--;
+ }
+
+ static LONG m_DebugWillSyncCount;
+
+ // IP cache used by QueueCleanupIP.
+ #define CLEANUP_IPS_PER_CHUNK 4
+ struct CleanupIPs {
+ IUnknown *m_Slots[CLEANUP_IPS_PER_CHUNK];
+ CleanupIPs *m_Next;
+ CleanupIPs() {LIMITED_METHOD_CONTRACT; memset(this, 0, sizeof(*this)); }
+ };
+ CleanupIPs m_CleanupIPs;
+
+#define BEGIN_FORBID_TYPELOAD() _ASSERTE_IMPL((GetThreadNULLOk() == 0) || ++GetThreadNULLOk()->m_ulForbidTypeLoad)
+#define END_FORBID_TYPELOAD() _ASSERTE_IMPL((GetThreadNULLOk() == 0) || GetThreadNULLOk()->m_ulForbidTypeLoad--)
+#define TRIGGERS_TYPELOAD() _ASSERTE_IMPL((GetThreadNULLOk() == 0) || !GetThreadNULLOk()->m_ulForbidTypeLoad)
+
+#ifdef _DEBUG
+public:
+ DWORD m_GCOnTransitionsOK;
+ ULONG m_ulForbidTypeLoad;
+
+
+/****************************************************************************/
+/* The code below an attempt to catch people who don't protect GC pointers that
+ they should be protecting. Basically, OBJECTREF's constructor, adds the slot
+ to a table. When we protect a slot, we remove it from the table. When GC
+ could happen, all entries in the table are marked as bad. When access to
+ an OBJECTREF happens (the -> operator) we assert the slot is not bad. To make
+ this fast, the table is not perfect (there can be collisions), but this should
+ not cause false positives, but it may allow errors to go undetected */
+
+#ifdef _WIN64
+#define OBJREF_HASH_SHIFT_AMOUNT 3
+#else // _WIN64
+#define OBJREF_HASH_SHIFT_AMOUNT 2
+#endif // _WIN64
+
+ // For debugging, you may want to make this number very large, (8K)
+ // should basically insure that no collisions happen
+#define OBJREF_TABSIZE 256
+ DWORD_PTR dangerousObjRefs[OBJREF_TABSIZE]; // Really objectRefs with lower bit stolen
+ // m_allObjRefEntriesBad is TRUE iff dangerousObjRefs are all marked as GC happened
+ // It's purely a perf optimization for debug builds that'll help for the cases where we make 2 successive calls
+ // to Thread::TriggersGC. In that case, the entire array doesn't need to be walked and marked, since we just did
+ // that.
+ BOOL m_allObjRefEntriesBad;
+
+ static DWORD_PTR OBJREF_HASH;
+ // Remembers that this object ref pointer is 'alive' and unprotected (Bad if GC happens)
+ static void ObjectRefNew(const OBJECTREF* ref) {
+ WRAPPER_NO_CONTRACT;
+ Thread * curThread = GetThreadNULLOk();
+ if (curThread == 0) return;
+
+ curThread->dangerousObjRefs[((size_t)ref >> OBJREF_HASH_SHIFT_AMOUNT) % OBJREF_HASH] = (size_t)ref;
+ curThread->m_allObjRefEntriesBad = FALSE;
+ }
+
+ static void ObjectRefAssign(const OBJECTREF* ref) {
+ WRAPPER_NO_CONTRACT;
+ Thread * curThread = GetThreadNULLOk();
+ if (curThread == 0) return;
+
+ curThread->m_allObjRefEntriesBad = FALSE;
+ DWORD_PTR* slot = &curThread->dangerousObjRefs[((DWORD_PTR) ref >> OBJREF_HASH_SHIFT_AMOUNT) % OBJREF_HASH];
+ if ((*slot & ~3) == (size_t) ref)
+ *slot = *slot & ~1; // Don't care about GC's that have happened
+ }
+
+ // If an object is protected, it can be removed from the 'dangerous table'
+ static void ObjectRefProtected(const OBJECTREF* ref) {
+#ifdef USE_CHECKED_OBJECTREFS
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(IsObjRefValid(ref));
+ Thread * curThread = GetThreadNULLOk();
+ if (curThread == 0) return;
+
+ curThread->m_allObjRefEntriesBad = FALSE;
+ DWORD_PTR* slot = &curThread->dangerousObjRefs[((DWORD_PTR) ref >> OBJREF_HASH_SHIFT_AMOUNT) % OBJREF_HASH];
+ if ((*slot & ~3) == (DWORD_PTR) ref)
+ *slot = (size_t) ref | 2; // mark has being protected
+#else
+ LIMITED_METHOD_CONTRACT;
+#endif
+ }
+
+ static bool IsObjRefValid(const OBJECTREF* ref) {
+ WRAPPER_NO_CONTRACT;
+ Thread * curThread = GetThreadNULLOk();
+ if (curThread == 0) return(true);
+
+ // If the object ref is NULL, we'll let it pass.
+ if (*((DWORD_PTR*) ref) == 0)
+ return(true);
+
+ DWORD_PTR val = curThread->dangerousObjRefs[((DWORD_PTR) ref >> OBJREF_HASH_SHIFT_AMOUNT) % OBJREF_HASH];
+ // if not in the table, or not the case that it was unprotected and GC happened, return true.
+ if((val & ~3) != (size_t) ref || (val & 3) != 1)
+ return(true);
+ // If the pointer lives in the GC heap, than it is protected, and thus valid.
+ if (dac_cast<TADDR>(g_lowest_address) <= val && val < dac_cast<TADDR>(g_highest_address))
+ return(true);
+ return(false);
+ }
+
+ // Clears the table. Useful to do when crossing the managed-code - EE boundary
+ // as you ususally only care about OBJECTREFS that have been created after that
+ static void STDCALL ObjectRefFlush(Thread* thread);
+
+
+#ifdef ENABLE_CONTRACTS_IMPL
+ // Marks all Objrefs in the table as bad (since they are unprotected)
+ static void TriggersGC(Thread* thread) {
+ WRAPPER_NO_CONTRACT;
+ if ((GCViolation|BadDebugState) & (UINT_PTR)(GetViolationMask()))
+ {
+ return;
+ }
+ if (!thread->m_allObjRefEntriesBad)
+ {
+ thread->m_allObjRefEntriesBad = TRUE;
+ for(unsigned i = 0; i < OBJREF_TABSIZE; i++)
+ thread->dangerousObjRefs[i] |= 1; // mark all slots as GC happened
+ }
+ }
+#endif // ENABLE_CONTRACTS_IMPL
+
+#endif // _DEBUG
+
+private:
+ PTR_CONTEXT m_pSavedRedirectContext;
+
+ BOOL IsContextSafeToRedirect(T_CONTEXT* pContext);
+
+public:
+ PT_CONTEXT GetSavedRedirectContext()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_pSavedRedirectContext);
+ }
+
+#ifndef DACCESS_COMPILE
+ void SetSavedRedirectContext(PT_CONTEXT pCtx)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pSavedRedirectContext = pCtx;
+ }
+#endif
+
+ void EnsurePreallocatedContext();
+
+ // m_pThreadLocalBLock points to the ThreadLocalBlock that corresponds to the
+ // AppDomain that the Thread is currently in
+
+ // m_pTLBTable points to the this Thread's table of ThreadLocalBlocks, indexed by
+ // ADIndex. It's important to note that ADIndexes get recycled when AppDomains are
+ // torn down. m_TLBTableSize holds to current size the size of this Thread's TLB table.
+ // See "ThreadStatics.h" for more information.
+
+ PTR_ThreadLocalBlock m_pThreadLocalBlock;
+ PTR_PTR_ThreadLocalBlock m_pTLBTable;
+ SIZE_T m_TLBTableSize;
+
+ // This getter is used by SOS; if m_pThreadLocalBlock is NULL, it's
+ // important that we look in the TLB table as well
+ /*
+ PTR_ThreadLocalBlock GetThreadLocalBlock()
+ {
+ // If the current TLB pointer is NULL, search the TLB table
+ if (m_pThreadLocalBlock != NULL)
+ return m_pThreadLocalBlock;
+
+ ADIndex index = GetDomain()->GetIndex();
+
+ // Check to see if we have a ThreadLocalBlock for the the current AppDomain,
+ if (index.m_dwIndex < m_TLBTableSize)
+ {
+ // Update the current ThreadLocalBlock pointer,
+ // but only on non-DAC builds
+#ifndef DACCESS_COMPILE
+ m_pThreadLocalBlock = m_pTLBTable[index.m_dwIndex];
+#endif
+ return m_pTLBTable[index.m_dwIndex];
+ }
+
+ return NULL;
+ }
+ */
+
+protected:
+
+ // Called during AD teardown to clean up any references this
+ // thread may have to the AppDomain
+ void DeleteThreadStaticData(AppDomain *pDomain);
+
+private:
+
+ // Called during Thread death to clean up all structures
+ // associated with thread statics
+ void DeleteThreadStaticData();
+
+#ifdef _DEBUG
+private:
+ // When we create an object, or create an OBJECTREF, or create an Interior Pointer, or enter EE from managed
+ // code, we will set this flag.
+ // Inside GCHeap::StressHeap, we only do GC if this flag is TRUE. Then we reset it to zero.
+ BOOL m_fStressHeapCount;
+public:
+ void EnableStressHeap()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_fStressHeapCount = TRUE;
+ }
+ void DisableStressHeap()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_fStressHeapCount = FALSE;
+ }
+ BOOL StressHeapIsEnabled()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_fStressHeapCount;
+ }
+
+ size_t *m_pCleanedStackBase;
+#endif
+
+#ifdef STRESS_THREAD
+public:
+ LONG m_stressThreadCount;
+#endif
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+public:
+ IHostTask *m_pHostTask;
+#endif
+
+private:
+ PVOID m_pFiberData;
+
+ TASKID m_TaskId;
+ CONNID m_dwConnectionId;
+
+public:
+ void SetupFiberData();
+
+#ifdef _DEBUG
+public:
+ void AddFiberInfo(DWORD type);
+ enum {
+ ThreadTrackInfo_Lifetime=0x1, // creation, destruction, ref-count
+ ThreadTrackInfo_Schedule=0x2, // switch in/out
+ ThreadTrackInfo_UM_M=0x4, // Unmanaged <-> managed transtion
+ ThreadTrackInfo_Abort=0x8, // Thread abort
+ ThreadTrackInfo_Affinity=0x10, // Thread's affinity
+ ThreadTrackInfo_GCMode=0x20,
+ ThreadTrackInfo_Escalation=0x40,// escalation point
+ ThreadTrackInfo_SO=0x80,
+ ThreadTrackInfo_Max=8
+ };
+private:
+ static int MaxThreadRecord;
+ static int MaxStackDepth;
+ static const int MaxThreadTrackInfo;
+ struct FiberSwitchInfo
+ {
+ unsigned __int64 timeStamp;
+ DWORD threadID;
+ size_t callStack[1];
+ };
+ FiberSwitchInfo *m_pFiberInfo[ThreadTrackInfo_Max];
+ DWORD m_FiberInfoIndex[ThreadTrackInfo_Max];
+#endif
+
+#ifdef DACCESS_COMPILE
+public:
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+ void EnumMemoryRegionsWorker(CLRDataEnumMemoryFlags flags);
+#endif
+
+private:
+ // Head of a linked list of opaque records that record if and how the thread is currently preparing a
+ // graph of methods for CER usage. This is used to determine if a re-entrant preparation request should
+ // complete immediately as a no-op (because it would lead to an infinite recursion) or should proceed
+ // recursively.
+ MethodCallGraphPreparer * m_pCerPreparationState;
+
+public:
+ MethodCallGraphPreparer * GetCerPreparationState()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pCerPreparationState;
+ }
+
+ void SetCerPreparationState(MethodCallGraphPreparer * pCerPreparationState)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pCerPreparationState = pCerPreparationState;
+ }
+
+ // Is the current thread currently executing within a constrained execution region?
+ static BOOL IsExecutingWithinCer();
+
+ // Determine whether the method at the given frame in the thread's execution stack is executing within a CER.
+ BOOL IsWithinCer(CrawlFrame *pCf);
+
+private:
+ // used to pad stack on thread creation to avoid aliasing penalty in P4 HyperThread scenarios
+
+ static DWORD __stdcall intermediateThreadProc(PVOID arg);
+ static int m_offset_counter;
+ static const int offset_multiplier = 128;
+
+ typedef struct {
+ LPTHREAD_START_ROUTINE lpThreadFunction;
+ PVOID lpArg;
+ } intermediateThreadParam;
+
+#ifdef _DEBUG
+// when the thread is doing a stressing GC, some Crst violation could be ignored, by a non-elegant solution.
+private:
+ BOOL m_bGCStressing; // the flag to indicate if the thread is doing a stressing GC
+ BOOL m_bUniqueStacking; // the flag to indicate if the thread is doing a UniqueStack
+public:
+ BOOL GetGCStressing ()
+ {
+ return m_bGCStressing;
+ }
+ BOOL GetUniqueStacking ()
+ {
+ return m_bUniqueStacking;
+ }
+#endif
+
+private:
+ //-----------------------------------------------------------------------------
+ // AVInRuntimeImplOkay : its okay to have an AV in Runtime implemetation while
+ // this holder is in effect.
+ //
+ // {
+ // AVInRuntimeImplOkayHolder foo();
+ // } // make AV's in the Runtime illegal on out of scope.
+ //-----------------------------------------------------------------------------
+ DWORD m_dwAVInRuntimeImplOkayCount;
+
+ static void AVInRuntimeImplOkayAcquire(Thread * pThread)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (pThread)
+ {
+ _ASSERTE(pThread->m_dwAVInRuntimeImplOkayCount != (DWORD)-1);
+ pThread->m_dwAVInRuntimeImplOkayCount++;
+ }
+ }
+
+ static void AVInRuntimeImplOkayRelease(Thread * pThread)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (pThread)
+ {
+ _ASSERTE(pThread->m_dwAVInRuntimeImplOkayCount > 0);
+ pThread->m_dwAVInRuntimeImplOkayCount--;
+ }
+ }
+
+public:
+ static BOOL AVInRuntimeImplOkay(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ Thread * pThread = GetThreadNULLOk();
+
+ if (pThread)
+ {
+ return (pThread->m_dwAVInRuntimeImplOkayCount > 0);
+ }
+ else
+ {
+ return FALSE;
+ }
+ }
+
+ class AVInRuntimeImplOkayHolder
+ {
+ Thread * const m_pThread;
+ public:
+ AVInRuntimeImplOkayHolder() :
+ m_pThread(GetThread())
+ {
+ LIMITED_METHOD_CONTRACT;
+ AVInRuntimeImplOkayAcquire(m_pThread);
+ }
+ AVInRuntimeImplOkayHolder(Thread * pThread) :
+ m_pThread(pThread)
+ {
+ LIMITED_METHOD_CONTRACT;
+ AVInRuntimeImplOkayAcquire(m_pThread);
+ }
+ ~AVInRuntimeImplOkayHolder()
+ {
+ LIMITED_METHOD_CONTRACT;
+ AVInRuntimeImplOkayRelease(m_pThread);
+ }
+ };
+
+#ifdef _DEBUG
+private:
+ DWORD m_dwUnbreakableLockCount;
+public:
+ void IncUnbreakableLockCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE (m_dwUnbreakableLockCount != (DWORD)-1);
+ m_dwUnbreakableLockCount ++;
+ }
+ void DecUnbreakableLockCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE (m_dwUnbreakableLockCount > 0);
+ m_dwUnbreakableLockCount --;
+ }
+ BOOL HasUnbreakableLock() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwUnbreakableLockCount != 0;
+ }
+ DWORD GetUnbreakableLockCount() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwUnbreakableLockCount;
+ }
+#endif // _DEBUG
+
+#ifdef _DEBUG
+private:
+ friend class FCallTransitionState;
+ friend class PermitHelperMethodFrameState;
+ friend class CompletedFCallTransitionState;
+ HelperMethodFrameCallerList *m_pHelperMethodFrameCallerList;
+#endif // _DEBUG
+
+private:
+ LONG m_dwHostTaskRefCount;
+
+private:
+ // If HasStarted fails, we cache the exception here, and rethrow on the thread which
+ // calls Thread.Start.
+ Exception* m_pExceptionDuringStartup;
+
+public:
+ void HandleThreadStartupFailure();
+
+#ifdef HAVE_GCCOVER
+private:
+ BYTE* m_pbDestCode;
+ BYTE* m_pbSrcCode;
+#ifdef _TARGET_X86_
+ LPVOID m_pLastAVAddress;
+#endif // _TARGET_X86_
+
+public:
+ void CommitGCStressInstructionUpdate();
+ void PostGCStressInstructionUpdate(BYTE* pbDestCode, BYTE* pbSrcCode)
+ {
+ LIMITED_METHOD_CONTRACT;
+ PRECONDITION(!HasPendingGCStressInstructionUpdate());
+
+ m_pbDestCode = pbDestCode;
+ m_pbSrcCode = pbSrcCode;
+ }
+ bool HasPendingGCStressInstructionUpdate()
+ {
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK((NULL == m_pbDestCode) == (NULL == m_pbSrcCode));
+ return m_pbDestCode != NULL;
+ }
+ void ClearGCStressInstructionUpdate()
+ {
+ LIMITED_METHOD_CONTRACT;
+ PRECONDITION(HasPendingGCStressInstructionUpdate());
+
+ m_pbDestCode = NULL;
+ m_pbSrcCode = NULL;
+ }
+#ifdef _TARGET_X86_
+ void SetLastAVAddress(LPVOID address)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pLastAVAddress = address;
+ }
+ LPVOID GetLastAVAddress()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pLastAVAddress;
+ }
+#endif // _TARGET_X86_
+#endif // HAVE_GCCOVER
+
+#if defined(_DEBUG) && defined(FEATURE_STACK_PROBE)
+ class ::BaseStackGuard;
+private:
+ // This field is used for debugging purposes to allow easy access to the stack guard
+ // chain and also in SO-tolerance checking to quickly determine if a guard is in place.
+ BaseStackGuard *m_pCurrentStackGuard;
+
+public:
+ BaseStackGuard *GetCurrentStackGuard()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pCurrentStackGuard;
+ }
+
+ void SetCurrentStackGuard(BaseStackGuard *pGuard)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pCurrentStackGuard = pGuard;
+ }
+#endif
+
+private:
+ BOOL m_fCompletionPortDrained;
+public:
+ void MarkCompletionPortDrained()
+ {
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockExchange ((LONG*)&m_fCompletionPortDrained, TRUE);
+ }
+ void UnmarkCompletionPortDrained()
+ {
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockExchange ((LONG*)&m_fCompletionPortDrained, FALSE);
+ }
+ BOOL IsCompletionPortDrained()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_fCompletionPortDrained;
+ }
+
+ // --------------------------------
+ // Store the maxReservedStackSize
+ // This is passed in from managed code in the thread constructor
+ // ---------------------------------
+private:
+ SIZE_T m_RequestedStackSize;
+
+public:
+
+ // Get the MaxStackSize
+ SIZE_T RequestedThreadStackSize()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_RequestedStackSize);
+ }
+
+ // Set the MaxStackSize
+ void RequestedThreadStackSize(SIZE_T requestedStackSize)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_RequestedStackSize = requestedStackSize;
+ }
+
+ static BOOL CheckThreadStackSize(SIZE_T *SizeToCommitOrReserve,
+ BOOL isSizeToReserve // When TRUE, the previous argument is the stack size to reserve.
+ // Otherwise, it is the size to commit.
+ );
+
+ static BOOL GetProcessDefaultStackSize(SIZE_T* reserveSize, SIZE_T* commitSize);
+
+private:
+ // YieldTask, ThreadAbort, GC all change thread context. ThreadAbort and GC uses ThreadStore lock to synchronize. But YieldTask can
+ // not block. We use a counter to allow one thread to change thread context.
+
+ Volatile<PVOID> m_WorkingOnThreadContext;
+
+ // Although this is a pointer, it is used as a flag to indicate the current context is unsafe
+ // to inspect. When NULL the context is safe to use, otherwise it points to the active patch skipper
+ // and the context is unsafe to use. When running a patch skipper we could be in one of two
+ // debug-only situations that the context inspecting/modifying code isn't generally prepared
+ // to deal with.
+ // a) We have set the IP to point somewhere in the patch skip table but have not yet run the
+ // instruction
+ // b) We executed the instruction in the patch skip table and now the IP could be anywhere
+ // The debugger may need to fix up the IP to compensate for the instruction being run
+ // from a different address.
+ VolatilePtr<DebuggerPatchSkip> m_debuggerActivePatchSkipper;
+
+public:
+ VOID BeginDebuggerPatchSkip(DebuggerPatchSkip* patchSkipper)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!m_debuggerActivePatchSkipper.Load());
+ FastInterlockExchangePointer(m_debuggerActivePatchSkipper.GetPointer(), patchSkipper);
+ _ASSERTE(m_debuggerActivePatchSkipper.Load());
+ }
+
+ VOID EndDebuggerPatchSkip()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_debuggerActivePatchSkipper.Load());
+ FastInterlockExchangePointer(m_debuggerActivePatchSkipper.GetPointer(), NULL);
+ _ASSERTE(!m_debuggerActivePatchSkipper.Load());
+ }
+
+private:
+
+ static BOOL EnterWorkingOnThreadContext(Thread *pThread)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if(pThread->m_debuggerActivePatchSkipper.Load() != NULL)
+ {
+ return FALSE;
+ }
+ if (CLRTaskHosted())
+ {
+ PVOID myID = ClrTeb::GetFiberPtrId();
+ PVOID id = FastInterlockCompareExchangePointer(pThread->m_WorkingOnThreadContext.GetPointer(), myID, NULL);
+ return id == NULL || id == myID;
+ }
+ else
+ {
+ return TRUE;
+ }
+ }
+
+ static void LeaveWorkingOnThreadContext(Thread *pThread)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (pThread->m_WorkingOnThreadContext == ClrTeb::GetFiberPtrId())
+ {
+ pThread->m_WorkingOnThreadContext = NULL;
+ }
+ }
+
+ typedef ConditionalStateHolder<Thread *, Thread::EnterWorkingOnThreadContext, Thread::LeaveWorkingOnThreadContext> WorkingOnThreadContextHolder;
+
+ BOOL WorkingOnThreadContext()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return !CLRTaskHosted() || m_WorkingOnThreadContext == ClrTeb::GetFiberPtrId();
+ }
+
+public:
+ void PrepareThreadForSOWork()
+ {
+ WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_HIJACK
+ UnhijackThread();
+#endif // FEATURE_HIJACK
+ ResetThrowControlForThread();
+
+ // Since this Thread has taken an SO, there may be state left-over after we
+ // short-circuited exception or other error handling, and so we don't want
+ // to risk recycling it.
+ SetThreadStateNC(TSNC_CannotRecycle);
+ }
+
+ void SetSOWorkNeeded()
+ {
+ SetThreadStateNC(TSNC_SOWorkNeeded);
+ }
+
+ BOOL IsSOWorkNeeded()
+ {
+ return HasThreadStateNC(TSNC_SOWorkNeeded);
+ }
+
+ void FinishSOWork();
+
+ void ClearExceptionStateAfterSO(void* pStackFrameSP)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // Clear any stale exception state.
+ m_ExceptionState.ClearExceptionStateAfterSO(pStackFrameSP);
+ }
+
+private:
+ BOOL m_fAllowProfilerCallbacks;
+
+public:
+ //
+ // These two methods are for profiler support. The profiler clears the allowed
+ // value once it has delivered a ThreadDestroyed callback, so that it does not
+ // deliver any notifications to the profiler afterwards which reference this
+ // thread. Callbacks on this thread which do not reference this thread are
+ // allowable.
+ //
+ BOOL ProfilerCallbacksAllowed(void)
+ {
+ return m_fAllowProfilerCallbacks;
+ }
+
+ void SetProfilerCallbacksAllowed(BOOL fValue)
+ {
+ m_fAllowProfilerCallbacks = fValue;
+ }
+
+private:
+ //
+ //This context is used for optimizations on I/O thread pool thread. In case the
+ //overlapped structure is from a different appdomain, it is stored in this structure
+ //to be processed later correctly by entering the right domain.
+ PVOID m_pIOCompletionContext;
+ BOOL AllocateIOCompletionContext();
+ VOID FreeIOCompletionContext();
+public:
+ inline PVOID GetIOCompletionContext()
+ {
+ return m_pIOCompletionContext;
+ }
+
+private:
+ // Inside a host, we don't own a thread handle, and we avoid DuplicateHandle call.
+ // If a thread is dying after we obtain the thread handle, our SuspendThread may fail
+ // because the handle may be closed and reused for a completely different type of handle.
+ // To solve this problem, we have a counter m_dwThreadHandleBeingUsed. Before we grab
+ // the thread handle, we increment the counter. Before we return a thread back to SQL
+ // in Reset and ExitTask, we wait until the counter drops to 0.
+ Volatile<LONG> m_dwThreadHandleBeingUsed;
+
+
+private:
+ static BOOL s_fCleanFinalizedThread;
+
+public:
+#ifndef DACCESS_COMPILE
+ static void SetCleanupNeededForFinalizedThread()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE (IsFinalizerThread());
+ s_fCleanFinalizedThread = TRUE;
+ }
+#endif //!DACCESS_COMPILE
+
+ static BOOL CleanupNeededForFinalizedThread()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return s_fCleanFinalizedThread;
+ }
+
+private:
+ // When we create throwable for an exception, we need to run managed code.
+ // If the same type of exception is thrown while creating managed object, like InvalidProgramException,
+ // we may be in an infinite recursive case.
+ Exception *m_pCreatingThrowableForException;
+ friend OBJECTREF CLRException::GetThrowable();
+
+#ifdef _DEBUG
+private:
+ int m_dwDisableAbortCheckCount; // Disable check before calling managed code.
+ // !!! Use this very carefully. If managed code runs user code
+ // !!! or blocks on locks, the thread may not be aborted.
+public:
+ static void DisableAbortCheck()
+ {
+ WRAPPER_NO_CONTRACT;
+ Thread *pThread = GetThread();
+ FastInterlockIncrement((LONG*)&pThread->m_dwDisableAbortCheckCount);
+ }
+ static void EnableAbortCheck()
+ {
+ WRAPPER_NO_CONTRACT;
+ Thread *pThread = GetThread();
+ _ASSERTE (pThread->m_dwDisableAbortCheckCount > 0);
+ FastInterlockDecrement((LONG*)&pThread->m_dwDisableAbortCheckCount);
+ }
+
+ BOOL IsAbortCheckDisabled()
+ {
+ return m_dwDisableAbortCheckCount > 0;
+ }
+
+ typedef StateHolder<Thread::DisableAbortCheck, Thread::EnableAbortCheck> DisableAbortCheckHolder;
+#endif
+
+private:
+ // At the end of a catch, we may raise ThreadAbortException. If catch clause set IP to resume in the
+ // corresponding try block, our exception system will execute the same catch clause again and again.
+ // So we save reference to the clause post which TA was reraised, which is used in ExceptionTracker::ProcessManagedCallFrame
+ // to make ThreadAbort proceed ahead instead of going in a loop.
+ // This problem only happens on Win64 due to JIT64. The common scenario is VB's "On error resume next"
+#ifdef WIN64EXCEPTIONS
+ DWORD m_dwIndexClauseForCatch;
+ StackFrame m_sfEstablisherOfActualHandlerFrame;
+#endif // WIN64EXCEPTIONS
+
+public:
+ // Holds per-thread information the debugger uses to expose locking information
+ // See ThreadDebugBlockingInfo.h for more details
+ ThreadDebugBlockingInfo DebugBlockingInfo;
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+ // For the purposes of tracking resource usage we implement a simple cpu resource usage counter on each
+ // thread. Every time QueryThreadProcessorUsage() is invoked it returns the amount of cpu time (a
+ // combination of user and kernel mode time) used since the last call to QueryThreadProcessorUsage(). The
+ // result is in 100 nanosecond units.
+ ULONGLONG QueryThreadProcessorUsage();
+
+private:
+ // The amount of processor time (both user and kernel) in 100ns units used by this thread at the time of
+ // the last call to QueryThreadProcessorUsage().
+ ULONGLONG m_ullProcessorUsageBaseline;
+#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
+
+ // Disables pumping and thread join in RCW creation
+ bool m_fDisableComObjectEagerCleanup;
+
+private:
+ CLRRandom m_random;
+
+public:
+ CLRRandom* GetRandom() {return &m_random;}
+
+#ifdef FEATURE_COMINTEROP
+private:
+ // Cookie returned from CoRegisterInitializeSpy
+ ULARGE_INTEGER m_uliInitializeSpyCookie;
+
+ // True if m_uliInitializeSpyCookie is valid
+ bool m_fInitializeSpyRegistered;
+
+ // The last STA COM context we saw - used to speed up RCW creation
+ LPVOID m_pLastSTACtxCookie;
+
+public:
+ inline void RevokeApartmentSpy();
+ inline LPVOID GetLastSTACtxCookie(BOOL *pfNAContext);
+ inline void SetLastSTACtxCookie(LPVOID pCtxCookie, BOOL fNAContext);
+#endif // FEATURE_COMINTEROP
+
+private:
+ // This duplicates the ThreadType_GC bit stored in TLS (TlsIdx_ThreadType). It exists
+ // so that any thread can query whether any other thread is a "GC Special" thread.
+ // (In contrast, ::IsGCSpecialThread() only gives this info about the currently
+ // executing thread.) The Profiling API uses this to determine whether it should
+ // "hide" the thread from profilers. GC Special threads (in particular the bgc
+ // thread) need to be hidden from profilers because the bgc thread creation path
+ // occurs while the EE is suspended, and while the thread that's suspending the
+ // runtime is waiting for the bgc thread to signal an event. The bgc thread cannot
+ // switch to preemptive mode and call into a profiler at this time, or else a
+ // deadlock will result when toggling back to cooperative mode (bgc thread toggling
+ // to coop will block due to the suspension, and the thread suspending the runtime
+ // continues to block waiting for the bgc thread to signal its creation events).
+ // Furthermore, profilers have no need to be aware of GC special threads anyway,
+ // since managed code never runs on them.
+ bool m_fGCSpecial;
+
+public:
+ // Profiling API uses this to determine whether it should hide this thread from the
+ // profiler.
+ bool IsGCSpecial();
+
+ // GC calls this when creating special threads that also happen to have an EE Thread
+ // object associated with them (e.g., the bgc thread).
+ void SetGCSpecial(bool fGCSpecial);
+
+#if !defined(FEATURE_CORECLR)
+private:
+ WORD m_wCPUGroup;
+ DWORD_PTR m_pAffinityMask;
+#endif
+
+public:
+ void ChooseThreadCPUGroupAffinity();
+ void ClearThreadCPUGroupAffinity();
+
+private:
+ // Per thread table used to implement allocation sampling.
+ AllLoggedTypes * m_pAllLoggedTypes;
+
+public:
+ AllLoggedTypes * GetAllocationSamplingTable()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pAllLoggedTypes;
+ }
+
+ void SetAllocationSamplingTable(AllLoggedTypes * pAllLoggedTypes)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // Assert if we try to set the m_pAllLoggedTypes to a non NULL value if it is already non-NULL.
+ // This implies a memory leak.
+ _ASSERTE(pAllLoggedTypes != NULL ? m_pAllLoggedTypes == NULL : TRUE);
+ m_pAllLoggedTypes = pAllLoggedTypes;
+ }
+};
+
+// End of class Thread
+
+
+LCID GetThreadCultureIdNoThrow(Thread *pThread, BOOL bUICulture);
+
+// Request/Remove Thread Affinity for the current thread
+typedef StateHolder<Thread::BeginThreadAffinityAndCriticalRegion, Thread::EndThreadAffinityAndCriticalRegion> ThreadAffinityAndCriticalRegionHolder;
+typedef StateHolder<Thread::BeginThreadAffinity, Thread::EndThreadAffinity> ThreadAffinityHolder;
+typedef Thread::ForbidSuspendThreadHolder ForbidSuspendThreadHolder;
+typedef Thread::ThreadPreventAsyncHolder ThreadPreventAsyncHolder;
+typedef Thread::ThreadPreventAbortHolder ThreadPreventAbortHolder;
+typedef StateHolder<Thread::ReverseEnterRuntime, Thread::ReverseLeaveRuntime> ReverseEnterRuntimeHolder;
+
+// Combines ForBindSuspendThreadHolder and CrstHolder into one.
+class ForbidSuspendThreadCrstHolder
+{
+public:
+ // Note: member initialization is intentionally ordered.
+ ForbidSuspendThreadCrstHolder(CrstBase * pCrst)
+ : m_forbid_suspend_holder()
+ , m_lock_holder(pCrst)
+ { WRAPPER_NO_CONTRACT; }
+
+private:
+ ForbidSuspendThreadHolder m_forbid_suspend_holder;
+ CrstHolder m_lock_holder;
+};
+
+// Non-throwing flavor of ReverseEnterRuntimeHolder that requires explicit call to AcquireNoThrow to acquire
+class ReverseEnterRuntimeHolderNoThrow : StateHolder<DoNothing, Thread::ReverseLeaveRuntime>
+{
+public:
+ ReverseEnterRuntimeHolderNoThrow()
+ : StateHolder<DoNothing, Thread::ReverseLeaveRuntime>(FALSE)
+ {
+ }
+
+ BOOL AcquireNoThrow()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ HRESULT hr = Thread::ReverseEnterRuntimeNoThrow();
+ if (SUCCEEDED(hr))
+ Acquire();
+ return hr;
+ }
+};
+
+ETaskType GetCurrentTaskType();
+
+class LeaveRuntimeHolder
+{
+public:
+ template <typename T>
+ LeaveRuntimeHolder(T target)
+ {
+ STATIC_CONTRACT_WRAPPER;
+ if (!CLRTaskHosted())
+ return;
+
+ Thread::LeaveRuntime((size_t)target);
+ }
+
+ ~LeaveRuntimeHolder()
+ {
+ STATIC_CONTRACT_WRAPPER;
+ if (!CLRTaskHosted())
+ return;
+
+ Thread::EnterRuntime();
+ }
+};
+
+class LeaveRuntimeHolderNoThrow
+{
+public:
+ template <typename T>
+ LeaveRuntimeHolderNoThrow(T target)
+ {
+ STATIC_CONTRACT_WRAPPER;
+ if (!CLRTaskHosted())
+ {
+ hr = S_OK;
+ return;
+ }
+
+ hr = Thread::LeaveRuntimeNoThrow((size_t)target);
+ }
+
+ ~LeaveRuntimeHolderNoThrow()
+ {
+ STATIC_CONTRACT_WRAPPER;
+ if (!CLRTaskHosted())
+ {
+ hr = S_OK;
+ return;
+ }
+
+ hr = Thread::EnterRuntimeNoThrow();
+ }
+
+ HRESULT GetHR() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return hr;
+ }
+
+private:
+ HRESULT hr;
+};
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+IHostTask *GetCurrentHostTask();
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+
+typedef Thread::AVInRuntimeImplOkayHolder AVInRuntimeImplOkayHolder;
+
+BOOL RevertIfImpersonated(BOOL *bReverted, HANDLE *phToken, ThreadAffinityHolder *pTAHolder);
+void UndoRevert(BOOL bReverted, HANDLE hToken);
+
+// ---------------------------------------------------------------------------
+//
+// The ThreadStore manages all the threads in the system.
+//
+// There is one ThreadStore in the system, available through
+// ThreadStore::m_pThreadStore.
+// ---------------------------------------------------------------------------
+
+typedef SList<Thread, false, PTR_Thread> ThreadList;
+
+
+// The ThreadStore is a singleton class
+#define CHECK_ONE_STORE() _ASSERTE(this == ThreadStore::s_pThreadStore);
+
+typedef DPTR(class ThreadStore) PTR_ThreadStore;
+typedef DPTR(class ExceptionTracker) PTR_ExceptionTracker;
+
+class ThreadStore
+{
+ friend class Thread;
+ friend class ThreadSuspend;
+ friend Thread* SetupThread(BOOL);
+ friend class AppDomain;
+#ifdef DACCESS_COMPILE
+ friend class ClrDataAccess;
+ friend Thread* __stdcall DacGetThread(ULONG32 osThreadID);
+#endif
+
+public:
+
+ ThreadStore();
+
+ static void InitThreadStore();
+ static void LockThreadStore();
+ static void UnlockThreadStore();
+
+ // Add a Thread to the ThreadStore
+ // WARNING : only GC calls this with bRequiresTSL set to FALSE.
+ static void AddThread(Thread *newThread, BOOL bRequiresTSL=TRUE);
+
+ // RemoveThread finds the thread in the ThreadStore and discards it.
+ static BOOL RemoveThread(Thread *target);
+
+ // Transfer a thread from the unstarted to the started list.
+ // WARNING : only GC calls this with bRequiresTSL set to FALSE.
+ static void TransferStartedThread(Thread *target, BOOL bRequiresTSL=TRUE);
+
+ // Before using the thread list, be sure to take the critical section. Otherwise
+ // it can change underneath you, perhaps leading to an exception after Remove.
+ // Prev==NULL to get the first entry in the list.
+ static Thread *GetAllThreadList(Thread *Prev, ULONG mask, ULONG bits);
+ static Thread *GetThreadList(Thread *Prev);
+
+ // Every EE process can lazily create a GUID that uniquely identifies it (for
+ // purposes of remoting).
+ const GUID &GetUniqueEEId();
+
+ // We shut down the EE when the last non-background thread terminates. This event
+ // is used to signal the main thread when this condition occurs.
+ void WaitForOtherThreads();
+ static void CheckForEEShutdown();
+ CLREvent m_TerminationEvent;
+
+ // Have all the foreground threads completed? In other words, can we release
+ // the main thread?
+ BOOL OtherThreadsComplete()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_ThreadCount - m_UnstartedThreadCount - m_DeadThreadCount - Thread::m_ActiveDetachCount + m_PendingThreadCount >= m_BackgroundThreadCount);
+
+ return (m_ThreadCount - m_UnstartedThreadCount - m_DeadThreadCount
+ - Thread::m_ActiveDetachCount + m_PendingThreadCount
+ == m_BackgroundThreadCount);
+ }
+
+ // If you want to trap threads re-entering the EE (be this for GC, or debugging,
+ // or Thread.Suspend() or whatever, you need to TrapReturningThreads(TRUE). When
+ // you are finished snagging threads, call TrapReturningThreads(FALSE). This
+ // counts internally.
+ //
+ // Of course, you must also fix RareDisablePreemptiveGC to do the right thing
+ // when the trap occurs.
+ static void TrapReturningThreads(BOOL yes);
+
+private:
+
+ // Enter and leave the critical section around the thread store. Clients should
+ // use LockThreadStore and UnlockThreadStore.
+ void Enter();
+ void Leave();
+
+ // Critical section for adding and removing threads to the store
+ Crst m_Crst;
+
+ // List of all the threads known to the ThreadStore (started & unstarted).
+ ThreadList m_ThreadList;
+
+ // m_ThreadCount is the count of all threads in m_ThreadList. This includes
+ // background threads / unstarted threads / whatever.
+ //
+ // m_UnstartedThreadCount is the subset of m_ThreadCount that have not yet been
+ // started.
+ //
+ // m_BackgroundThreadCount is the subset of m_ThreadCount that have been started
+ // but which are running in the background. So this is a misnomer in the sense
+ // that unstarted background threads are not reflected in this count.
+ //
+ // m_PendingThreadCount is used to solve a race condition. The main thread could
+ // start another thread running and then exit. The main thread might then start
+ // tearing down the EE before the new thread moves itself out of m_UnstartedThread-
+ // Count in TransferUnstartedThread. This count is atomically bumped in
+ // CreateNewThread, and atomically reduced within a locked thread store.
+ //
+ // m_DeadThreadCount is the subset of m_ThreadCount which have died. The Win32
+ // thread has disappeared, but something (like the exposed object) has kept the
+ // refcount non-zero so we can't destruct yet.
+ //
+ // m_MaxThreadCount is the maximum value of m_ThreadCount. ie. the largest number
+ // of simultaneously active threads
+
+protected:
+ LONG m_ThreadCount;
+ LONG m_MaxThreadCount;
+public:
+ LONG ThreadCountInEE ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_ThreadCount;
+ }
+#if defined(_DEBUG) || defined(DACCESS_COMPILE)
+ LONG MaxThreadCountInEE ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_MaxThreadCount;
+ }
+#endif
+private:
+ LONG m_UnstartedThreadCount;
+ LONG m_BackgroundThreadCount;
+ LONG m_PendingThreadCount;
+
+ LONG m_DeadThreadCount;
+
+private:
+ // Space for the lazily-created GUID.
+ GUID m_EEGuid;
+ BOOL m_GuidCreated;
+
+ // Even in the release product, we need to know what thread holds the lock on
+ // the ThreadStore. This is so we never deadlock when the GC thread halts a
+ // thread that holds this lock.
+ Thread *m_HoldingThread;
+ EEThreadId m_holderthreadid; // current holder (or NULL)
+
+public:
+
+ static BOOL HoldingThreadStore()
+ {
+ WRAPPER_NO_CONTRACT;
+ // Note that GetThread() may be 0 if it is the debugger thread
+ // or perhaps a concurrent GC thread.
+ return HoldingThreadStore(GetThread());
+ }
+
+ static BOOL HoldingThreadStore(Thread *pThread);
+
+#ifdef DACCESS_COMPILE
+ static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+ SPTR_DECL(ThreadStore, s_pThreadStore);
+
+#ifdef _DEBUG
+public:
+ BOOL DbgFindThread(Thread *target);
+ LONG DbgBackgroundThreadCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_BackgroundThreadCount;
+ }
+
+ BOOL IsCrstForThreadStore (const CrstBase* const pCrstBase)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (void *)pCrstBase == (void*)&m_Crst;
+ }
+
+#endif
+private:
+ static CONTEXT *s_pOSContext;
+public:
+ // We can not do any memory allocation after we suspend a thread in order ot
+ // avoid deadlock situation.
+ static void AllocateOSContext();
+ static CONTEXT *GrabOSContext();
+
+private:
+ // Thread abort needs to walk stack to decide if thread abort can proceed.
+ // It is unsafe to crawl a stack of thread if the thread is OS-suspended which we do during
+ // thread abort. For example, Thread T1 aborts thread T2. T2 is suspended by T1. Inside SQL
+ // this means that no thread sharing the same scheduler with T2 can run. If T1 needs a lock which
+ // is owned by one thread on the scheduler, T1 will wait forever.
+ // Our solution is to move T2 to a safe point, resume it, and then do stack crawl.
+ static CLREvent *s_pWaitForStackCrawlEvent;
+public:
+ static void WaitForStackCrawlEvent()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+ s_pWaitForStackCrawlEvent->Wait(INFINITE,FALSE);
+ }
+ static void SetStackCrawlEvent()
+ {
+ LIMITED_METHOD_CONTRACT;
+ s_pWaitForStackCrawlEvent->Set();
+ }
+ static void ResetStackCrawlEvent()
+ {
+ LIMITED_METHOD_CONTRACT;
+ s_pWaitForStackCrawlEvent->Reset();
+ }
+};
+
+struct TSSuspendHelper {
+ static void SetTrap() { ThreadStore::TrapReturningThreads(TRUE); }
+ static void UnsetTrap() { ThreadStore::TrapReturningThreads(FALSE); }
+};
+typedef StateHolder<TSSuspendHelper::SetTrap, TSSuspendHelper::UnsetTrap> TSSuspendHolder;
+
+typedef StateHolder<ThreadStore::LockThreadStore,ThreadStore::UnlockThreadStore> ThreadStoreLockHolder;
+
+#endif
+
+// This class dispenses small thread ids for the thin lock mechanism.
+// Recently we started using this class to dispense domain neutral module IDs as well.
+class IdDispenser
+{
+private:
+ DWORD m_highestId; // highest id given out so far
+ SIZE_T m_recycleBin; // link list to chain all ids returning to us
+ Crst m_Crst; // lock to protect our data structures
+ DPTR(PTR_Thread) m_idToThread; // map thread ids to threads
+ DWORD m_idToThreadCapacity; // capacity of the map
+
+ void GrowIdToThread()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+ DWORD newCapacity = m_idToThreadCapacity == 0 ? 16 : m_idToThreadCapacity*2;
+ Thread **newIdToThread = new Thread*[newCapacity];
+
+ newIdToThread[0] = NULL;
+
+ for (DWORD i = 1; i < m_idToThreadCapacity; i++)
+ {
+ newIdToThread[i] = m_idToThread[i];
+ }
+ for (DWORD j = m_idToThreadCapacity; j < newCapacity; j++)
+ {
+ newIdToThread[j] = NULL;
+ }
+ delete[] m_idToThread;
+ m_idToThread = newIdToThread;
+ m_idToThreadCapacity = newCapacity;
+#else
+ DacNotImpl();
+#endif // !DACCESS_COMPILE
+
+ }
+
+public:
+ IdDispenser() :
+ // NOTE: CRST_UNSAFE_ANYMODE prevents a GC mode switch when entering this crst.
+ // If you remove this flag, we will switch to preemptive mode when entering
+ // m_Crst, which means all functions that enter it will become
+ // GC_TRIGGERS. (This includes all uses of CrstHolder.) So be sure
+ // to update the contracts if you remove this flag.
+ m_Crst(CrstThreadIdDispenser, CRST_UNSAFE_ANYMODE)
+ {
+ WRAPPER_NO_CONTRACT;
+ m_highestId = 0;
+ m_recycleBin = 0;
+ m_idToThreadCapacity = 0;
+ m_idToThread = NULL;
+ }
+
+ ~IdDispenser()
+ {
+ LIMITED_METHOD_CONTRACT;
+ delete[] m_idToThread;
+ }
+
+ bool IsValidId(DWORD id)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (id > 0) && (id <= m_highestId);
+ }
+
+ void NewId(Thread *pThread, DWORD & newId)
+ {
+#ifndef DACCESS_COMPILE
+ WRAPPER_NO_CONTRACT;
+ DWORD result;
+ CrstHolder ch(&m_Crst);
+
+ if (m_recycleBin != 0)
+ {
+ _ASSERTE(FitsIn<DWORD>(m_recycleBin));
+ result = static_cast<DWORD>(m_recycleBin);
+ m_recycleBin = reinterpret_cast<SIZE_T>(m_idToThread[m_recycleBin]);
+ }
+ else
+ {
+ // we make sure ids don't wrap around - before they do, we always return the highest possible
+ // one and rely on our caller to detect this situation
+ if (m_highestId + 1 > m_highestId)
+ m_highestId = m_highestId + 1;
+ result = m_highestId;
+ if (result >= m_idToThreadCapacity)
+ GrowIdToThread();
+ }
+
+ _ASSERTE(result < m_idToThreadCapacity);
+ newId = result;
+ if (result < m_idToThreadCapacity)
+ m_idToThread[result] = pThread;
+
+#else
+ DacNotImpl();
+#endif // !DACCESS_COMPILE
+ }
+
+ void DisposeId(DWORD id)
+ {
+#ifndef DACCESS_COMPILE
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+ CrstHolder ch(&m_Crst);
+
+ _ASSERTE(IsValidId(id));
+ if (id == m_highestId)
+ {
+ m_highestId--;
+ }
+ else
+ {
+ m_idToThread[id] = reinterpret_cast<PTR_Thread>(m_recycleBin);
+ m_recycleBin = id;
+#ifdef _DEBUG
+ size_t index = (size_t)m_idToThread[id];
+ while (index != 0)
+ {
+ _ASSERTE(index != id);
+ index = (size_t)m_idToThread[index];
+ }
+#endif
+ }
+#else
+ DacNotImpl();
+#endif // !DACCESS_COMPILE
+ }
+
+ Thread *IdToThread(DWORD id)
+ {
+ LIMITED_METHOD_CONTRACT;
+ CrstHolder ch(&m_Crst);
+
+ Thread *result = NULL;
+ if (id <= m_highestId)
+ result = m_idToThread[id];
+ // m_idToThread may have Thread*, or the next free slot
+ _ASSERTE ((size_t)result > m_idToThreadCapacity);
+
+ return result;
+ }
+
+ Thread *IdToThreadWithValidation(DWORD id)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ CrstHolder ch(&m_Crst);
+
+ Thread *result = NULL;
+ if (id <= m_highestId)
+ result = m_idToThread[id];
+ // m_idToThread may have Thread*, or the next free slot
+ if ((size_t)result <= m_idToThreadCapacity)
+ result = NULL;
+ _ASSERTE(result == NULL || ((size_t)result & 0x3) == 0 || ((Thread*)result)->GetThreadId() == id);
+ return result;
+ }
+};
+typedef DPTR(IdDispenser) PTR_IdDispenser;
+
+#ifndef CROSSGEN_COMPILE
+
+// Dispenser of small thread ids for thin lock mechanism
+GPTR_DECL(IdDispenser,g_pThinLockThreadIdDispenser);
+
+// forward declaration
+DWORD MsgWaitHelper(int numWaiters, HANDLE* phEvent, BOOL bWaitAll, DWORD millis, BOOL alertable = FALSE);
+
+// When a thread is being created after a debug suspension has started, it sends an event up to the
+// debugger. Afterwards, with the Debugger Lock still held, it will check to see if we had already asked to suspend the
+// Runtime. If we have, then it will turn around and call this to set the debug suspend pending flag on the newly
+// created thread, since it was missed by SysStartSuspendForDebug as it didn't exist when that function was run.
+//
+inline void Thread::MarkForDebugSuspend(void)
+{
+ WRAPPER_NO_CONTRACT;
+ if (!(m_State & TS_DebugSuspendPending))
+ {
+ FastInterlockOr((ULONG *) &m_State, TS_DebugSuspendPending);
+ ThreadStore::TrapReturningThreads(TRUE);
+ }
+}
+
+// Debugger per-thread flag for enabling notification on "manual"
+// method calls, for stepping logic.
+
+inline void Thread::IncrementTraceCallCount()
+{
+ WRAPPER_NO_CONTRACT;
+ FastInterlockIncrement(&m_TraceCallCount);
+ ThreadStore::TrapReturningThreads(TRUE);
+}
+
+inline void Thread::DecrementTraceCallCount()
+{
+ WRAPPER_NO_CONTRACT;
+ ThreadStore::TrapReturningThreads(FALSE);
+ FastInterlockDecrement(&m_TraceCallCount);
+}
+
+// When we enter an Object.Wait() we are logically inside the synchronized
+// region of that object. Of course, we've actually completely left the region,
+// or else nobody could Notify us. But if we throw ThreadInterruptedException to
+// break out of the Wait, all the catchers are going to expect the synchronized
+// state to be correct. So we carry it around in case we need to restore it.
+struct PendingSync
+{
+ LONG m_EnterCount;
+ WaitEventLink *m_WaitEventLink;
+#ifdef _DEBUG
+ Thread *m_OwnerThread;
+#endif
+
+ PendingSync(WaitEventLink *s) : m_WaitEventLink(s)
+ {
+ WRAPPER_NO_CONTRACT;
+#ifdef _DEBUG
+ m_OwnerThread = GetThread();
+#endif
+ }
+ void Restore(BOOL bRemoveFromSB);
+};
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+
+//
+// Tracking of unmanaged locks has very low value. It is only
+// exposed via SQL hosting interfaces. The hosts cannot really
+// do anything interesting with it because of the unmanaged locks
+// are always taken with holders in the VM, and hosts can keep
+// track of the unmanaged locks taken via hosting API. We should
+// consider getting rid of it in the next SxS version.
+//
+
+#define INCTHREADLOCKCOUNT() \
+{ \
+ /* IncLockCount() asserts GetThread() == this */ \
+ BEGIN_GETTHREAD_ALLOWED_IN_NO_THROW_REGION; \
+ Thread *thread = GetThread(); \
+ if (thread) \
+ thread->IncLockCount(); \
+ END_GETTHREAD_ALLOWED_IN_NO_THROW_REGION; \
+}
+
+#define INCTHREADLOCKCOUNTTHREAD(thread) \
+{ \
+ /* IncLockCount() asserts GetThread() == this */ \
+ if (thread) \
+ (thread)->IncLockCount(); \
+}
+
+#define DECTHREADLOCKCOUNT( ) \
+{ \
+ /* IncLockCount() asserts GetThread() == this */ \
+ BEGIN_GETTHREAD_ALLOWED_IN_NO_THROW_REGION; \
+ Thread *thread = GetThread(); \
+ if (thread) \
+ thread->DecLockCount(); \
+ END_GETTHREAD_ALLOWED_IN_NO_THROW_REGION; \
+}
+
+#define DECTHREADLOCKCOUNTTHREAD(thread) \
+{ \
+ /* IncLockCount() asserts GetThread() == this */ \
+ if (thread) \
+ (thread)->DecLockCount(); \
+}
+
+#else
+
+#define INCTHREADLOCKCOUNT() { }
+#define DECTHREADLOCKCOUNT() { }
+#define INCTHREADLOCKCOUNTTHREAD(thread) { }
+#define DECTHREADLOCKCOUNTTHREAD(thread) { }
+
+#endif
+
+// --------------------------------------------------------------------------------
+// GCHolder is used to implement the normal GCX_ macros.
+//
+// GCHolder is normally used indirectly through GCX_ convenience macros, but can be used
+// directly if needed (e.g. due to multiple holders in one scope, or to use
+// in class definitions).
+//
+// GCHolder (or derived types) should only be instantiated as automatic variables
+// --------------------------------------------------------------------------------
+
+#ifdef ENABLE_CONTRACTS_IMPL
+#define GCHOLDER_CONTRACT_ARGS_NoDtor , false, szConstruct, szFunction, szFile, lineNum
+#define GCHOLDER_CONTRACT_ARGS_HasDtor , true, szConstruct, szFunction, szFile, lineNum
+#define GCHOLDER_DECLARE_CONTRACT_ARGS_BARE \
+ const char * szConstruct = "Unknown" \
+ , const char * szFunction = "Unknown" \
+ , const char * szFile = "Unknown" \
+ , int lineNum = 0
+#define GCHOLDER_DECLARE_CONTRACT_ARGS , GCHOLDER_DECLARE_CONTRACT_ARGS_BARE
+#define GCHOLDER_DECLARE_CONTRACT_ARGS_INTERNAL , bool fPushStackRecord = true, GCHOLDER_DECLARE_CONTRACT_ARGS_BARE
+
+#define GCHOLDER_SETUP_CONTRACT_STACK_RECORD(mode) \
+ m_fPushedRecord = false; \
+ \
+ if (fPushStackRecord && conditional) \
+ { \
+ m_pClrDebugState = GetClrDebugState(); \
+ m_oldClrDebugState = *m_pClrDebugState; \
+ \
+ m_pClrDebugState->ViolationMaskReset( ModeViolation ); \
+ \
+ m_ContractStackRecord.m_szFunction = szFunction; \
+ m_ContractStackRecord.m_szFile = szFile; \
+ m_ContractStackRecord.m_lineNum = lineNum; \
+ m_ContractStackRecord.m_testmask = \
+ (Contract::ALL_Disabled & ~((UINT)(Contract::MODE_Mask))) \
+ | (mode); \
+ m_ContractStackRecord.m_construct = szConstruct; \
+ m_pClrDebugState->LinkContractStackTrace( &m_ContractStackRecord ); \
+ m_fPushedRecord = true; \
+ }
+#define GCHOLDER_CHECK_FOR_PREEMP_IN_NOTRIGGER(pThread) \
+ if (pThread->GCNoTrigger()) \
+ { \
+ CONTRACT_ASSERT("Coop->preemp->coop switch attempted in a GC_NOTRIGGER scope", \
+ Contract::GC_NoTrigger, \
+ Contract::GC_Mask, \
+ szFunction, \
+ szFile, \
+ lineNum \
+ ); \
+ }
+#else
+#define GCHOLDER_CONTRACT_ARGS_NoDtor
+#define GCHOLDER_CONTRACT_ARGS_HasDtor
+#define GCHOLDER_DECLARE_CONTRACT_ARGS_BARE
+#define GCHOLDER_DECLARE_CONTRACT_ARGS
+#define GCHOLDER_DECLARE_CONTRACT_ARGS_INTERNAL
+#define GCHOLDER_SETUP_CONTRACT_STACK_RECORD(mode)
+#define GCHOLDER_CHECK_FOR_PREEMP_IN_NOTRIGGER(pThread)
+#endif // ENABLE_CONTRACTS_IMPL
+
+#ifndef DACCESS_COMPILE
+class GCHolderBase
+{
+protected:
+ // NOTE: This method is FORCEINLINE'ed into its callers, but the callers are just the
+ // corresponding methods in the derived types, not all sites that use GC holders. This
+ // is done so that the #pragma optimize will take affect since the optimize settings
+ // are taken from the template instantiation site, not the template definition site.
+ template <BOOL THREAD_EXISTS>
+ FORCEINLINE_NONDEBUG
+ void PopInternal()
+ {
+ SCAN_SCOPE_END;
+ WRAPPER_NO_CONTRACT;
+
+#ifdef ENABLE_CONTRACTS_IMPL
+ if (m_fPushedRecord)
+ {
+ *m_pClrDebugState = m_oldClrDebugState;
+ }
+ // Make sure that we're using the version of this template that matches the
+ // invariant setup in EnterInternal{Coop|Preemp}{_HackNoThread}
+ _ASSERTE(!!THREAD_EXISTS == m_fThreadMustExist);
+#endif
+
+ if (m_WasCoop)
+ {
+ // m_WasCoop is only TRUE if we've already verified there's an EE thread.
+ BEGIN_GETTHREAD_ALLOWED;
+
+ _ASSERTE(m_Thread != NULL); // Cannot switch to cooperative with no thread
+ if (!m_Thread->PreemptiveGCDisabled())
+ m_Thread->DisablePreemptiveGC();
+
+ END_GETTHREAD_ALLOWED;
+ }
+ else
+ {
+ // Either we initialized m_Thread explicitly with GetThread() in the
+ // constructor, or our caller (instantiator of GCHolder) called our constructor
+ // with GetThread() (which we already asserted in the constuctor)
+ // (i.e., m_Thread == GetThread()). Also, note that if THREAD_EXISTS,
+ // then m_Thread must be non-null (as it's == GetThread()). So the
+ // "if" below looks a little hokey since we're checking for either condition.
+ // But the template param THREAD_EXISTS allows us to statically early-out
+ // when it's TRUE, so we check it for perf.
+ if (THREAD_EXISTS || m_Thread != NULL)
+ {
+ BEGIN_GETTHREAD_ALLOWED;
+ if (m_Thread->PreemptiveGCDisabled())
+ m_Thread->EnablePreemptiveGC();
+ END_GETTHREAD_ALLOWED;
+ }
+ }
+
+ // If we have a thread then we assert that we ended up in the same state
+ // which we started in.
+ if (THREAD_EXISTS || m_Thread != NULL)
+ {
+ _ASSERTE(!!m_WasCoop == !!(m_Thread->PreemptiveGCDisabled()));
+ }
+ }
+
+ // NOTE: The rest of these methods are all FORCEINLINE so that the uses where 'conditional==true'
+ // can have the if-checks removed by the compiler. The callers are just the corresponding methods
+ // in the derived types, not all sites that use GC holders.
+
+
+ // This is broken - there is a potential race with the GC thread. It is currently
+ // used for a few cases where (a) we potentially haven't started up the EE yet, or
+ // (b) we are on a "special thread". We need a real solution here though.
+ FORCEINLINE_NONDEBUG
+ void EnterInternalCoop_HackNoThread(bool conditional GCHOLDER_DECLARE_CONTRACT_ARGS_INTERNAL)
+ {
+ GCHOLDER_SETUP_CONTRACT_STACK_RECORD(Contract::MODE_Coop);
+
+ m_Thread = GetThreadNULLOk();
+
+#ifdef ENABLE_CONTRACTS_IMPL
+ m_fThreadMustExist = false;
+#endif // ENABLE_CONTRACTS_IMPL
+
+ if (m_Thread != NULL)
+ {
+ BEGIN_GETTHREAD_ALLOWED;
+ m_WasCoop = m_Thread->PreemptiveGCDisabled();
+
+ if (conditional && !m_WasCoop)
+ {
+ m_Thread->DisablePreemptiveGC();
+ _ASSERTE(m_Thread->PreemptiveGCDisabled());
+ }
+ END_GETTHREAD_ALLOWED;
+ }
+ else
+ {
+ m_WasCoop = FALSE;
+ }
+ }
+
+ FORCEINLINE_NONDEBUG
+ void EnterInternalPreemp(bool conditional GCHOLDER_DECLARE_CONTRACT_ARGS_INTERNAL)
+ {
+ GCHOLDER_SETUP_CONTRACT_STACK_RECORD(Contract::MODE_Preempt);
+
+ m_Thread = GetThreadNULLOk();
+
+#ifdef ENABLE_CONTRACTS_IMPL
+ m_fThreadMustExist = false;
+ if (m_Thread != NULL && conditional)
+ {
+ BEGIN_GETTHREAD_ALLOWED;
+ GCHOLDER_CHECK_FOR_PREEMP_IN_NOTRIGGER(m_Thread);
+ END_GETTHREAD_ALLOWED;
+ }
+#endif // ENABLE_CONTRACTS_IMPL
+
+ if (m_Thread != NULL)
+ {
+ BEGIN_GETTHREAD_ALLOWED;
+ m_WasCoop = m_Thread->PreemptiveGCDisabled();
+
+ if (conditional && m_WasCoop)
+ {
+ m_Thread->EnablePreemptiveGC();
+ _ASSERTE(!m_Thread->PreemptiveGCDisabled());
+ }
+ END_GETTHREAD_ALLOWED;
+ }
+ else
+ {
+ m_WasCoop = FALSE;
+ }
+ }
+
+ FORCEINLINE_NONDEBUG
+ void EnterInternalCoop(Thread *pThread, bool conditional GCHOLDER_DECLARE_CONTRACT_ARGS_INTERNAL)
+ {
+ // This is the perf version. So we deliberately restrict the calls
+ // to already setup threads to avoid the null checks and GetThread call
+ _ASSERTE(pThread && (pThread == GetThread()));
+#ifdef ENABLE_CONTRACTS_IMPL
+ m_fThreadMustExist = true;
+#endif // ENABLE_CONTRACTS_IMPL
+
+ GCHOLDER_SETUP_CONTRACT_STACK_RECORD(Contract::MODE_Coop);
+
+ m_Thread = pThread;
+ m_WasCoop = m_Thread->PreemptiveGCDisabled();
+ if (conditional && !m_WasCoop)
+ {
+ m_Thread->DisablePreemptiveGC();
+ _ASSERTE(m_Thread->PreemptiveGCDisabled());
+ }
+ }
+
+ template <BOOL THREAD_EXISTS>
+ FORCEINLINE_NONDEBUG
+ void EnterInternalPreemp(Thread *pThread, bool conditional GCHOLDER_DECLARE_CONTRACT_ARGS_INTERNAL)
+ {
+ // This is the perf version. So we deliberately restrict the calls
+ // to already setup threads to avoid the null checks and GetThread call
+ _ASSERTE(!THREAD_EXISTS || (pThread && (pThread == GetThread())));
+#ifdef ENABLE_CONTRACTS_IMPL
+ m_fThreadMustExist = !!THREAD_EXISTS;
+#endif // ENABLE_CONTRACTS_IMPL
+
+ GCHOLDER_SETUP_CONTRACT_STACK_RECORD(Contract::MODE_Preempt);
+
+ m_Thread = pThread;
+
+ if (THREAD_EXISTS || (m_Thread != NULL))
+ {
+ GCHOLDER_CHECK_FOR_PREEMP_IN_NOTRIGGER(m_Thread);
+ m_WasCoop = m_Thread->PreemptiveGCDisabled();
+ if (conditional && m_WasCoop)
+ {
+ m_Thread->EnablePreemptiveGC();
+ _ASSERTE(!m_Thread->PreemptiveGCDisabled());
+ }
+ }
+ else
+ {
+ m_WasCoop = FALSE;
+ }
+ }
+
+private:
+ Thread * m_Thread;
+ BOOL m_WasCoop; // This is BOOL and not 'bool' because PreemptiveGCDisabled returns BOOL,
+ // so the codegen is better if we don't have to convert to 'bool'.
+#ifdef ENABLE_CONTRACTS_IMPL
+ bool m_fThreadMustExist; // used to validate that the proper Pop<THREAD_EXISTS> method is used
+ bool m_fPushedRecord;
+ ClrDebugState m_oldClrDebugState;
+ ClrDebugState *m_pClrDebugState;
+ ContractStackRecord m_ContractStackRecord;
+#endif
+};
+
+class GCCoopNoDtor : public GCHolderBase
+{
+public:
+ DEBUG_NOINLINE
+ void Enter(bool conditional GCHOLDER_DECLARE_CONTRACT_ARGS)
+ {
+ WRAPPER_NO_CONTRACT;
+ SCAN_SCOPE_BEGIN;
+ if (conditional)
+ {
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ }
+ // The thread must be non-null to enter MODE_COOP
+ this->EnterInternalCoop(GetThread(), conditional GCHOLDER_CONTRACT_ARGS_NoDtor);
+ }
+
+ DEBUG_NOINLINE
+ void Leave()
+ {
+ WRAPPER_NO_CONTRACT;
+ SCAN_SCOPE_BEGIN;
+ this->PopInternal<TRUE>(); // Thread must be non-NULL
+ }
+};
+
+class GCPreempNoDtor : public GCHolderBase
+{
+public:
+ DEBUG_NOINLINE
+ void Enter(bool conditional GCHOLDER_DECLARE_CONTRACT_ARGS)
+ {
+ SCAN_SCOPE_BEGIN;
+ if (conditional)
+ {
+ STATIC_CONTRACT_MODE_PREEMPTIVE;
+ }
+
+ this->EnterInternalPreemp(conditional GCHOLDER_CONTRACT_ARGS_NoDtor);
+ }
+
+ DEBUG_NOINLINE
+ void Enter(Thread * pThreadNullOk, bool conditional GCHOLDER_DECLARE_CONTRACT_ARGS)
+ {
+ SCAN_SCOPE_BEGIN;
+ if (conditional)
+ {
+ STATIC_CONTRACT_MODE_PREEMPTIVE;
+ }
+
+ this->EnterInternalPreemp<FALSE>( // Thread may be NULL
+ pThreadNullOk, conditional GCHOLDER_CONTRACT_ARGS_NoDtor);
+ }
+
+ DEBUG_NOINLINE
+ void Leave()
+ {
+ SCAN_SCOPE_END;
+ this->PopInternal<FALSE>(); // Thread may be NULL
+ }
+};
+
+class GCCoop : public GCHolderBase
+{
+public:
+ DEBUG_NOINLINE
+ GCCoop(GCHOLDER_DECLARE_CONTRACT_ARGS_BARE)
+ {
+ SCAN_SCOPE_BEGIN;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ // The thread must be non-null to enter MODE_COOP
+ this->EnterInternalCoop(GetThread(), true GCHOLDER_CONTRACT_ARGS_HasDtor);
+ }
+
+ DEBUG_NOINLINE
+ GCCoop(bool conditional GCHOLDER_DECLARE_CONTRACT_ARGS)
+ {
+ SCAN_SCOPE_BEGIN;
+ if (conditional)
+ {
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ }
+
+ // The thread must be non-null to enter MODE_COOP
+ this->EnterInternalCoop(GetThread(), conditional GCHOLDER_CONTRACT_ARGS_HasDtor);
+ }
+
+ DEBUG_NOINLINE
+ ~GCCoop()
+ {
+ SCAN_SCOPE_END;
+ this->PopInternal<TRUE>(); // Thread must be non-NULL
+ }
+};
+
+// This is broken - there is a potential race with the GC thread. It is currently
+// used for a few cases where (a) we potentially haven't started up the EE yet, or
+// (b) we are on a "special thread". We need a real solution here though.
+class GCCoopHackNoThread : public GCHolderBase
+{
+public:
+ DEBUG_NOINLINE
+ GCCoopHackNoThread(GCHOLDER_DECLARE_CONTRACT_ARGS_BARE)
+ {
+ SCAN_SCOPE_BEGIN;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ this->EnterInternalCoop_HackNoThread(true GCHOLDER_CONTRACT_ARGS_HasDtor);
+ }
+
+ DEBUG_NOINLINE
+ GCCoopHackNoThread(bool conditional GCHOLDER_DECLARE_CONTRACT_ARGS)
+ {
+ SCAN_SCOPE_BEGIN;
+ if (conditional)
+ {
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ }
+
+ this->EnterInternalCoop_HackNoThread(conditional GCHOLDER_CONTRACT_ARGS_HasDtor);
+ }
+
+ DEBUG_NOINLINE
+ ~GCCoopHackNoThread()
+ {
+ SCAN_SCOPE_END;
+ this->PopInternal<FALSE>(); // Thread might be NULL
+ }
+};
+
+class GCCoopThreadExists : public GCHolderBase
+{
+public:
+ DEBUG_NOINLINE
+ GCCoopThreadExists(Thread * pThread GCHOLDER_DECLARE_CONTRACT_ARGS)
+ {
+ SCAN_SCOPE_BEGIN;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ this->EnterInternalCoop(pThread, true GCHOLDER_CONTRACT_ARGS_HasDtor);
+ }
+
+ DEBUG_NOINLINE
+ GCCoopThreadExists(Thread * pThread, bool conditional GCHOLDER_DECLARE_CONTRACT_ARGS)
+ {
+ SCAN_SCOPE_BEGIN;
+ if (conditional)
+ {
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ }
+
+ this->EnterInternalCoop(pThread, conditional GCHOLDER_CONTRACT_ARGS_HasDtor);
+ }
+
+ DEBUG_NOINLINE
+ ~GCCoopThreadExists()
+ {
+ SCAN_SCOPE_END;
+ this->PopInternal<TRUE>(); // Thread must be non-NULL
+ }
+};
+
+class GCPreemp : public GCHolderBase
+{
+public:
+ DEBUG_NOINLINE
+ GCPreemp(GCHOLDER_DECLARE_CONTRACT_ARGS_BARE)
+ {
+ SCAN_SCOPE_BEGIN;
+ STATIC_CONTRACT_MODE_PREEMPTIVE;
+
+ this->EnterInternalPreemp(true GCHOLDER_CONTRACT_ARGS_HasDtor);
+ }
+
+ DEBUG_NOINLINE
+ GCPreemp(bool conditional GCHOLDER_DECLARE_CONTRACT_ARGS)
+ {
+ SCAN_SCOPE_BEGIN;
+ if (conditional)
+ {
+ STATIC_CONTRACT_MODE_PREEMPTIVE;
+ }
+
+ this->EnterInternalPreemp(conditional GCHOLDER_CONTRACT_ARGS_HasDtor);
+ }
+
+ DEBUG_NOINLINE
+ ~GCPreemp()
+ {
+ SCAN_SCOPE_END;
+ this->PopInternal<FALSE>(); // Thread may be NULL
+ }
+};
+
+class GCPreempThreadExists : public GCHolderBase
+{
+public:
+ DEBUG_NOINLINE
+ GCPreempThreadExists(Thread * pThread GCHOLDER_DECLARE_CONTRACT_ARGS)
+ {
+ SCAN_SCOPE_BEGIN;
+ STATIC_CONTRACT_MODE_PREEMPTIVE;
+
+ this->EnterInternalPreemp<TRUE>( // Thread must be non-NULL
+ pThread, true GCHOLDER_CONTRACT_ARGS_HasDtor);
+ }
+
+ DEBUG_NOINLINE
+ GCPreempThreadExists(Thread * pThread, bool conditional GCHOLDER_DECLARE_CONTRACT_ARGS)
+ {
+ SCAN_SCOPE_BEGIN;
+ if (conditional)
+ {
+ STATIC_CONTRACT_MODE_PREEMPTIVE;
+ }
+
+ this->EnterInternalPreemp<TRUE>( // Thread must be non-NULL
+ pThread, conditional GCHOLDER_CONTRACT_ARGS_HasDtor);
+ }
+
+ DEBUG_NOINLINE
+ ~GCPreempThreadExists()
+ {
+ SCAN_SCOPE_END;
+ this->PopInternal<TRUE>(); // Thread must be non-NULL
+ }
+};
+#endif // DACCESS_COMPILE
+
+
+// --------------------------------------------------------------------------------
+// GCAssert is used to implement the assert GCX_ macros. Usage is similar to GCHolder.
+//
+// GCAsserting for preemptive mode automatically passes on unmanaged threads.
+//
+// Note that the assert is "2 sided"; it happens on entering and on leaving scope, to
+// help ensure mode integrity.
+//
+// GCAssert is a noop in a free build
+// --------------------------------------------------------------------------------
+
+template<BOOL COOPERATIVE>
+class GCAssert
+{
+ public:
+ DEBUG_NOINLINE void BeginGCAssert();
+ DEBUG_NOINLINE void EndGCAssert()
+ {
+ SCAN_SCOPE_END;
+ }
+};
+
+template<BOOL COOPERATIVE>
+class AutoCleanupGCAssert
+{
+#ifdef _DEBUG_IMPL
+public:
+ DEBUG_NOINLINE AutoCleanupGCAssert();
+
+ DEBUG_NOINLINE ~AutoCleanupGCAssert()
+ {
+ SCAN_SCOPE_END;
+ WRAPPER_NO_CONTRACT;
+ // This is currently disabled; we currently have a lot of code which doesn't
+ // back out the GC mode properly (instead relying on the EX_TRY macros.)
+ //
+ // @todo enable this when we remove raw GC mode switching.
+#if 0
+ DoCheck();
+#endif
+ }
+
+ private:
+ FORCEINLINE void DoCheck()
+ {
+ WRAPPER_NO_CONTRACT;
+ Thread *pThread = GetThread();
+ if (COOPERATIVE)
+ {
+ _ASSERTE(pThread != NULL);
+ _ASSERTE(pThread->PreemptiveGCDisabled());
+ }
+ else
+ {
+ _ASSERTE(pThread == NULL || !(pThread->PreemptiveGCDisabled()));
+ }
+ }
+#endif
+};
+
+
+// --------------------------------------------------------------------------------
+// GCForbid is used to add ForbidGC semantics to the current GC mode. Note that
+// it requires the thread to be in cooperative mode already.
+//
+// GCForbid is a noop in a free build
+// --------------------------------------------------------------------------------
+#ifndef DACCESS_COMPILE
+class GCForbid : AutoCleanupGCAssert<TRUE>
+{
+#ifdef ENABLE_CONTRACTS_IMPL
+ public:
+ DEBUG_NOINLINE GCForbid(BOOL fConditional, const char *szFunction, const char *szFile, int lineNum)
+ {
+ SCAN_SCOPE_BEGIN;
+ if (fConditional)
+ {
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ }
+
+ m_fConditional = fConditional;
+ if (m_fConditional)
+ {
+ Thread *pThread = GetThread();
+ m_pClrDebugState = pThread ? pThread->GetClrDebugState() : ::GetClrDebugState();
+ m_oldClrDebugState = *m_pClrDebugState;
+
+ m_pClrDebugState->ViolationMaskReset( GCViolation );
+
+ GetThread()->BeginForbidGC(szFile, lineNum);
+
+ m_ContractStackRecord.m_szFunction = szFunction;
+ m_ContractStackRecord.m_szFile = (char*)szFile;
+ m_ContractStackRecord.m_lineNum = lineNum;
+ m_ContractStackRecord.m_testmask = (Contract::ALL_Disabled & ~((UINT)(Contract::GC_Mask))) | Contract::GC_NoTrigger;
+ m_ContractStackRecord.m_construct = "GCX_FORBID";
+ m_pClrDebugState->LinkContractStackTrace( &m_ContractStackRecord );
+ }
+ }
+
+ DEBUG_NOINLINE GCForbid(const char *szFunction, const char *szFile, int lineNum)
+ {
+ SCAN_SCOPE_BEGIN;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ m_fConditional = TRUE;
+
+ Thread *pThread = GetThread();
+ m_pClrDebugState = pThread ? pThread->GetClrDebugState() : ::GetClrDebugState();
+ m_oldClrDebugState = *m_pClrDebugState;
+
+ m_pClrDebugState->ViolationMaskReset( GCViolation );
+
+ GetThread()->BeginForbidGC(szFile, lineNum);
+
+ m_ContractStackRecord.m_szFunction = szFunction;
+ m_ContractStackRecord.m_szFile = (char*)szFile;
+ m_ContractStackRecord.m_lineNum = lineNum;
+ m_ContractStackRecord.m_testmask = (Contract::ALL_Disabled & ~((UINT)(Contract::GC_Mask))) | Contract::GC_NoTrigger;
+ m_ContractStackRecord.m_construct = "GCX_FORBID";
+ m_pClrDebugState->LinkContractStackTrace( &m_ContractStackRecord );
+ }
+
+ DEBUG_NOINLINE ~GCForbid()
+ {
+ SCAN_SCOPE_END;
+
+ if (m_fConditional)
+ {
+ GetThread()->EndForbidGC();
+ *m_pClrDebugState = m_oldClrDebugState;
+ }
+ }
+
+ private:
+ BOOL m_fConditional;
+ ClrDebugState *m_pClrDebugState;
+ ClrDebugState m_oldClrDebugState;
+ ContractStackRecord m_ContractStackRecord;
+#endif // _DEBUG_IMPL
+};
+#endif // !DACCESS_COMPILE
+
+// --------------------------------------------------------------------------------
+// GCNoTrigger is used to add NoTriggerGC semantics to the current GC mode. Unlike
+// GCForbid, it does not require a thread to be in cooperative mode.
+//
+// GCNoTrigger is a noop in a free build
+// --------------------------------------------------------------------------------
+#ifndef DACCESS_COMPILE
+class GCNoTrigger
+{
+#ifdef ENABLE_CONTRACTS_IMPL
+ public:
+ DEBUG_NOINLINE GCNoTrigger(BOOL fConditional, const char *szFunction, const char *szFile, int lineNum)
+ {
+ SCAN_SCOPE_BEGIN;
+ if (fConditional)
+ {
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ }
+
+ m_fConditional = fConditional;
+
+ if (m_fConditional)
+ {
+ Thread * pThread = GetThreadNULLOk();
+ m_pClrDebugState = pThread ? pThread->GetClrDebugState() : ::GetClrDebugState();
+ m_oldClrDebugState = *m_pClrDebugState;
+
+ m_pClrDebugState->ViolationMaskReset( GCViolation );
+
+ if (pThread != NULL)
+ {
+ pThread->BeginNoTriggerGC(szFile, lineNum);
+ }
+
+ m_ContractStackRecord.m_szFunction = szFunction;
+ m_ContractStackRecord.m_szFile = (char*)szFile;
+ m_ContractStackRecord.m_lineNum = lineNum;
+ m_ContractStackRecord.m_testmask = (Contract::ALL_Disabled & ~((UINT)(Contract::GC_Mask))) | Contract::GC_NoTrigger;
+ m_ContractStackRecord.m_construct = "GCX_NOTRIGGER";
+ m_pClrDebugState->LinkContractStackTrace( &m_ContractStackRecord );
+ }
+ }
+
+ DEBUG_NOINLINE GCNoTrigger(const char *szFunction, const char *szFile, int lineNum)
+ {
+ SCAN_SCOPE_BEGIN;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ m_fConditional = TRUE;
+
+ Thread * pThread = GetThreadNULLOk();
+ m_pClrDebugState = pThread ? pThread->GetClrDebugState() : ::GetClrDebugState();
+ m_oldClrDebugState = *m_pClrDebugState;
+
+ m_pClrDebugState->ViolationMaskReset( GCViolation );
+
+ if (pThread != NULL)
+ {
+ pThread->BeginNoTriggerGC(szFile, lineNum);
+ }
+
+ m_ContractStackRecord.m_szFunction = szFunction;
+ m_ContractStackRecord.m_szFile = (char*)szFile;
+ m_ContractStackRecord.m_lineNum = lineNum;
+ m_ContractStackRecord.m_testmask = (Contract::ALL_Disabled & ~((UINT)(Contract::GC_Mask))) | Contract::GC_NoTrigger;
+ m_ContractStackRecord.m_construct = "GCX_NOTRIGGER";
+ m_pClrDebugState->LinkContractStackTrace( &m_ContractStackRecord );
+ }
+
+ DEBUG_NOINLINE ~GCNoTrigger()
+ {
+ SCAN_SCOPE_END;
+
+ if (m_fConditional)
+ {
+ Thread * pThread = GetThreadNULLOk();
+ if (pThread)
+ {
+ pThread->EndNoTriggerGC();
+ }
+ *m_pClrDebugState = m_oldClrDebugState;
+ }
+ }
+
+ private:
+ BOOL m_fConditional;
+ ClrDebugState *m_pClrDebugState;
+ ClrDebugState m_oldClrDebugState;
+ ContractStackRecord m_ContractStackRecord;
+#endif // _DEBUG_IMPL
+};
+#endif //!DACCESS_COMPILE
+
+class CoopTransitionHolder
+{
+ Frame * m_pFrame;
+
+public:
+ CoopTransitionHolder(Thread * pThread)
+ : m_pFrame(pThread->m_pFrame)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ ~CoopTransitionHolder()
+ {
+ WRAPPER_NO_CONTRACT;
+ if (m_pFrame != NULL)
+ COMPlusCooperativeTransitionHandler(m_pFrame);
+ }
+
+ void SuppressRelease()
+ {
+ LIMITED_METHOD_CONTRACT;
+ // FRAME_TOP and NULL must be distinct values.
+ // static_assert_no_msg(FRAME_TOP_VALUE != NULL);
+ m_pFrame = NULL;
+ }
+};
+
+// --------------------------------------------------------------------------------
+// GCX macros - see util.hpp
+// --------------------------------------------------------------------------------
+
+#ifdef _DEBUG_IMPL
+
+// Normally, any thread we operate on has a Thread block in its TLS. But there are
+// a few special threads we don't normally execute managed code on.
+BOOL dbgOnly_IsSpecialEEThread();
+void dbgOnly_IdentifySpecialEEThread();
+
+#ifdef USE_CHECKED_OBJECTREFS
+#define ASSERT_PROTECTED(objRef) Thread::ObjectRefProtected(objRef)
+#else
+#define ASSERT_PROTECTED(objRef)
+#endif
+
+#else
+
+#define ASSERT_PROTECTED(objRef)
+
+#endif
+
+
+#ifdef ENABLE_CONTRACTS_IMPL
+
+#define BEGINFORBIDGC() {if (GetThreadNULLOk() != NULL) GetThreadNULLOk()->BeginForbidGC(__FILE__, __LINE__);}
+#define ENDFORBIDGC() {if (GetThreadNULLOk() != NULL) GetThreadNULLOk()->EndForbidGC();}
+
+class FCallGCCanTrigger
+{
+public:
+ static DEBUG_NOINLINE void Enter()
+ {
+ SCAN_SCOPE_BEGIN;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ Thread * pThread = GetThreadNULLOk();
+ if (pThread != NULL)
+ {
+ Enter(pThread);
+ }
+ }
+
+ static DEBUG_NOINLINE void Enter(Thread* pThread)
+ {
+ SCAN_SCOPE_BEGIN;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ pThread->EndForbidGC();
+ }
+
+ static DEBUG_NOINLINE void Leave(const char *szFunction, const char *szFile, int lineNum)
+ {
+ SCAN_SCOPE_END;
+ Thread * pThread = GetThreadNULLOk();
+ if (pThread != NULL)
+ {
+ Leave(pThread, szFunction, szFile, lineNum);
+ }
+ }
+
+ static DEBUG_NOINLINE void Leave(Thread* pThread, const char *szFunction, const char *szFile, int lineNum)
+ {
+ SCAN_SCOPE_END;
+ pThread->BeginForbidGC(szFile, lineNum);
+ }
+};
+
+#define TRIGGERSGC_NOSTOMP() do { \
+ ANNOTATION_GC_TRIGGERS; \
+ Thread* curThread = GetThread(); \
+ if(curThread->GCNoTrigger()) \
+ { \
+ CONTRACT_ASSERT("TRIGGERSGC found in a GC_NOTRIGGER region.", Contract::GC_NoTrigger, Contract::GC_Mask, __FUNCTION__, __FILE__, __LINE__); \
+ } \
+ } while(0)
+
+
+#define TRIGGERSGC() do { \
+ TRIGGERSGC_NOSTOMP(); \
+ Thread::TriggersGC(GetThread()); \
+ } while(0)
+
+
+inline BOOL GC_ON_TRANSITIONS(BOOL val) {
+ WRAPPER_NO_CONTRACT;
+ Thread* thread = GetThread();
+ if (thread == 0)
+ return(FALSE);
+ BOOL ret = thread->m_GCOnTransitionsOK;
+ thread->m_GCOnTransitionsOK = val;
+ return(ret);
+}
+
+#else // _DEBUG_IMPL
+
+#define BEGINFORBIDGC()
+#define ENDFORBIDGC()
+#define TRIGGERSGC_NOSTOMP() ANNOTATION_GC_TRIGGERS
+#define TRIGGERSGC() ANNOTATION_GC_TRIGGERS
+
+inline BOOL GC_ON_TRANSITIONS(BOOL val) {
+ return FALSE;
+}
+
+#endif // _DEBUG_IMPL
+
+#ifdef _DEBUG
+inline void ENABLESTRESSHEAP() {
+ WRAPPER_NO_CONTRACT;
+ Thread * thread = GetThreadNULLOk();
+ if (thread) {
+ thread->EnableStressHeap();
+ }
+}
+
+void CleanStackForFastGCStress ();
+#define CLEANSTACKFORFASTGCSTRESS() \
+if (g_pConfig->GetGCStressLevel() && g_pConfig->FastGCStressLevel() > 1) { \
+ CleanStackForFastGCStress (); \
+}
+
+#else // _DEBUG
+#define CLEANSTACKFORFASTGCSTRESS()
+
+#endif // _DEBUG
+
+
+
+
+inline void DoReleaseCheckpoint(void *checkPointMarker)
+{
+ WRAPPER_NO_CONTRACT;
+ GetThread()->m_MarshalAlloc.Collapse(checkPointMarker);
+}
+
+
+// CheckPointHolder : Back out to a checkpoint on the thread allocator.
+typedef Holder<void*, DoNothing, DoReleaseCheckpoint> CheckPointHolder;
+
+
+#ifdef _DEBUG_IMPL
+// Holder for incrementing the ForbidGCLoaderUse counter.
+class GCForbidLoaderUseHolder
+{
+ public:
+ GCForbidLoaderUseHolder()
+ {
+ WRAPPER_NO_CONTRACT;
+ ClrFlsIncrementValue(TlsIdx_ForbidGCLoaderUseCount, 1);
+ }
+
+ ~GCForbidLoaderUseHolder()
+ {
+ WRAPPER_NO_CONTRACT;
+ ClrFlsIncrementValue(TlsIdx_ForbidGCLoaderUseCount, -1);
+ }
+};
+
+#endif
+
+// Declaring this macro turns off the GC_TRIGGERS/THROWS/INJECT_FAULT contract in LoadTypeHandle.
+// If you do this, you must restrict your use of the loader only to retrieve TypeHandles
+// for types that have already been loaded and resolved. If you fail to observe this restriction, you will
+// reach a GC_TRIGGERS point somewhere in the loader and assert. If you're lucky, that is.
+// (If you're not lucky, you will introduce a GC hole.)
+//
+// The main user of this workaround is the GC stack crawl. It must parse signatures and retrieve
+// type handles for valuetypes in method parameters. Some other uses have creeped into the codebase -
+// some justified, others not.
+//
+// ENABLE_FORBID_GC_LOADER is *not* the same as using tokenNotToLoad to suppress loading.
+// You should use tokenNotToLoad in preference to ENABLE_FORBID. ENABLE_FORBID is a fragile
+// workaround and places enormous responsibilities on the caller. The only reason it exists at all
+// is that the GC stack crawl simply cannot tolerate exceptions or new GC's - that's an immovable
+// rock we're faced with.
+//
+// The key differences are:
+//
+// ENABLE_FORBID tokenNotToLoad
+// -------------------------------------------- ------------------------------------------------------
+// caller must guarantee the type is already caller does not have to guarantee the type
+// loaded - otherwise, we will crash badly. is already loaded.
+//
+// loader will not throw, trigger gc or OOM loader may throw, trigger GC or OOM.
+//
+//
+//
+#ifdef ENABLE_CONTRACTS_IMPL
+#define ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE() GCForbidLoaderUseHolder __gcfluh; \
+ CANNOTTHROWCOMPLUSEXCEPTION(); \
+ GCX_NOTRIGGER(); \
+ FAULT_FORBID();
+#else // _DEBUG_IMPL
+#define ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE() ;
+#endif // _DEBUG_IMPL
+// This macro lets us define a conditional CONTRACT for the GC_TRIGGERS behavior.
+// This is for the benefit of a select group of callers that use the loader
+// in ForbidGC mode strictly to retrieve existing TypeHandles. The reason
+// we use a threadstate rather than an extra parameter is that these annoying
+// callers call the loader through intermediaries (MetaSig) and it proved to be too
+// cumbersome to pass this state down through all those callers.
+//
+// Don't make GC_TRIGGERS conditional just because your function ends up calling
+// LoadTypeHandle indirectly. We don't want to proliferate conditonal contracts more
+// than necessary so declare such functions as GC_TRIGGERS until the need
+// for the conditional contract is actually proven through code inspection or
+// coverage.
+#if defined(DACCESS_COMPILE)
+
+// Disable (<non-zero constant> || <expression>) is always a non-zero constant.
+// <expression> is never evaluated and might have side effects, because
+// FORBIDGC_LOADER_USE_ENABLED is used in that pattern and additionally the rule
+// has little value.
+#ifdef _PREFAST_
+#pragma warning(disable:6286)
+#endif
+#define FORBIDGC_LOADER_USE_ENABLED() true
+
+#else // DACCESS_COMPILE
+#if defined (_DEBUG_IMPL) || defined(_PREFAST_)
+#ifndef DACCESS_COMPILE
+#define FORBIDGC_LOADER_USE_ENABLED() (ClrFlsGetValue(TlsIdx_ForbidGCLoaderUseCount))
+#else
+#define FORBIDGC_LOADER_USE_ENABLED() TRUE
+#endif
+#else // _DEBUG_IMPL
+
+// If you got an error about FORBIDGC_LOADER_USE_ENABLED being undefined, it's because you tried
+// to use this predicate in a free build outside of a CONTRACT or ASSERT.
+//
+#define FORBIDGC_LOADER_USE_ENABLED() (sizeof(YouCannotUseThisHere) != 0)
+#endif // _DEBUG_IMPL
+#endif // DACCESS_COMPILE
+
+#ifdef FEATURE_STACK_PROBE
+#ifdef _DEBUG_IMPL
+inline void NO_FORBIDGC_LOADER_USE_ThrowSO()
+{
+ WRAPPER_NO_CONTRACT;
+ if (FORBIDGC_LOADER_USE_ENABLED())
+ {
+ //if you hitting this assert maybe a failure was injected at the place
+ // it won't occur in a real-world scenario, see VSW 397871
+ // then again maybe it 's a bug at the place FORBIDGC_LOADER_USE_ENABLED was set
+ _ASSERTE(!"Unexpected SO, please read the comment");
+ }
+ else
+ COMPlusThrowSO();
+}
+#else
+inline void NO_FORBIDGC_LOADER_USE_ThrowSO()
+{
+ COMPlusThrowSO();
+}
+#endif
+#endif
+
+// There is an MDA which can detect illegal reentrancy into the CLR. For instance, if you call managed
+// code from a native vectored exception handler, this might cause a reverse PInvoke to occur. But if the
+// exception was triggered from code that was executing in cooperative GC mode, we now have GC holes and
+// general corruption.
+BOOL HasIllegalReentrancy();
+
+
+// This class can be used to "schedule" a culture setting,
+// kicking in when leaving scope or during exception unwinding.
+// Note: during destruction, this can throw. You have been warned.
+class ReturnCultureHolder
+{
+public:
+ ReturnCultureHolder(Thread* pThread, OBJECTREF* culture, BOOL bUICulture)
+ {
+ CONTRACTL
+ {
+ WRAPPER(NOTHROW);
+ WRAPPER(GC_NOTRIGGER);
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pThread));
+ }
+ CONTRACTL_END;
+
+ m_pThread = pThread;
+ m_culture = culture;
+ m_bUICulture = bUICulture;
+ m_acquired = TRUE;
+ }
+
+ FORCEINLINE void SuppressRelease()
+ {
+ m_acquired = FALSE;
+ }
+
+ ~ReturnCultureHolder()
+ {
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (m_acquired)
+ m_pThread->SetCulture(m_culture, m_bUICulture);
+ }
+
+private:
+ ReturnCultureHolder()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ Thread* m_pThread;
+ OBJECTREF* m_culture;
+ BOOL m_bUICulture;
+ BOOL m_acquired;
+};
+
+
+//
+// _pThread: (Thread*) current Thread
+// _pCurrDomain: (AppDomain*) current AppDomain
+// _pDestDomain: (AppDomain*) AppDomain to transition to
+// _predicate_expr: (bool) Expression to predicate the transition. If this is true, we transition,
+// otherwise we don't. WARNING : if you change this macro, be sure you
+// guarantee that this macro argument is only evaluated once.
+//
+
+//
+// @TODO: can't we take the transition with a holder?
+//
+#define ENTER_DOMAIN_SETUPVARS(_pThread, _predicate_expr) \
+{ \
+ DEBUG_ASSURE_NO_RETURN_BEGIN(DOMAIN) \
+ \
+ Thread* _ctx_trans_pThread = (_pThread); \
+ bool _ctx_trans_fTransitioned = false; \
+ bool _ctx_trans_fPredicate = (_predicate_expr); \
+ bool _ctx_trans_fRaiseNeeded = false; \
+ Exception* _ctx_trans_pTargetDomainException=NULL; \
+ ADID _ctx_trans_pDestDomainId=ADID(0); \
+ FrameWithCookie<ContextTransitionFrame> _ctx_trans_Frame; \
+ ContextTransitionFrame* _ctx_trans_pFrame = &_ctx_trans_Frame; \
+
+#define ENTER_DOMAIN_SWITCH_CTX_BY_ADID(_pCurrDomainPtr,_pDestDomainId,_bUnsafePoint) \
+ AppDomain* _ctx_trans_pCurrDomain=_pCurrDomainPtr; \
+ _ctx_trans_pDestDomainId=(ADID)_pDestDomainId; \
+ BOOL _ctx_trans_bUnsafePoint=_bUnsafePoint; \
+ if (_ctx_trans_fPredicate && \
+ (_ctx_trans_pCurrDomain==NULL || \
+ (_ctx_trans_pCurrDomain->GetId() != _ctx_trans_pDestDomainId))) \
+ { \
+ AppDomainFromIDHolder _ctx_trans_ad(_ctx_trans_pDestDomainId,_ctx_trans_bUnsafePoint); \
+ _ctx_trans_ad.ThrowIfUnloaded(); \
+ \
+ _ctx_trans_ad->EnterContext(_ctx_trans_pThread, \
+ _ctx_trans_ad->GetDefaultContext(), \
+ _ctx_trans_pFrame); \
+ \
+ _ctx_trans_ad.Release(); \
+ _ctx_trans_fTransitioned = true; \
+ }
+
+#define ENTER_DOMAIN_SWITCH_CTX_BY_ADPTR(_pCurrDomain,_pDestDomain) \
+ AppDomain* _ctx_trans_pCurrDomain=_pCurrDomain; \
+ AppDomain* _ctx_trans_pDestDomain=_pDestDomain; \
+ _ctx_trans_pDestDomainId=_ctx_trans_pDestDomain->GetId(); \
+ \
+ if (_ctx_trans_fPredicate && (_ctx_trans_pCurrDomain != _ctx_trans_pDestDomain)) \
+ { \
+ TESTHOOKCALL(AppDomainCanBeUnloaded(_ctx_trans_pDestDomain->GetId().m_dwId,FALSE)); \
+ GCX_FORBID(); \
+ if (!_ctx_trans_pDestDomain->CanThreadEnter(_ctx_trans_pThread)) \
+ COMPlusThrow(kAppDomainUnloadedException); \
+ \
+ _ctx_trans_pThread->EnterContextRestricted( \
+ _ctx_trans_pDestDomain->GetDefaultContext(), \
+ _ctx_trans_pFrame); \
+ \
+ _ctx_trans_fTransitioned = true; \
+ }
+
+
+
+#define ENTER_DOMAIN_SETUP_EH \
+ /* work around unreachable code warning */ \
+ SCAN_BLOCKMARKER_N(DOMAIN); \
+ if (true) EX_TRY \
+ { \
+ SCAN_BLOCKMARKER_MARK_N(DOMAIN); \
+ LOG((LF_APPDOMAIN, LL_INFO1000, "ENTER_DOMAIN(%s, %s, %d): %s\n", \
+ __FUNCTION__, __FILE__, __LINE__, \
+ _ctx_trans_fTransitioned ? "ENTERED" : "NOP"));
+
+// Note: we go to preemptive mode before the EX_RETHROW Going preemp here is safe, since there are many other paths in
+// this macro that toggle the GC mode, too.
+#define END_DOMAIN_TRANSITION \
+ TESTHOOKCALL(LeavingAppDomain(::GetAppDomain()->GetId().m_dwId)); \
+ } \
+ EX_CATCH \
+ { \
+ SCAN_BLOCKMARKER_USE_N(DOMAIN); \
+ LOG((LF_EH|LF_APPDOMAIN, LL_INFO1000, "ENTER_DOMAIN(%s, %s, %d): exception in flight\n", \
+ __FUNCTION__, __FILE__, __LINE__)); \
+ \
+ if (!_ctx_trans_fTransitioned) \
+ { \
+ if (_ctx_trans_pThread->PreemptiveGCDisabled()) \
+ { \
+ _ctx_trans_pThread->EnablePreemptiveGC(); \
+ } \
+ \
+ EX_RETHROW; \
+ } \
+ \
+ \
+ _ctx_trans_pTargetDomainException=EXTRACT_EXCEPTION(); \
+ \
+ /* Save Watson buckets before the exception object is changed */ \
+ CAPTURE_BUCKETS_AT_TRANSITION(_ctx_trans_pThread, GET_THROWABLE()); \
+ \
+ _ctx_trans_fRaiseNeeded = true; \
+ SCAN_BLOCKMARKER_END_USE_N(DOMAIN); \
+ } \
+ /* SwallowAllExceptions is fine because we don't get to this point */ \
+ /* unless fRaiseNeeded = true or no exception was thrown */ \
+ EX_END_CATCH(SwallowAllExceptions); \
+ \
+ if (_ctx_trans_fRaiseNeeded) \
+ { \
+ SCAN_BLOCKMARKER_USE_N(DOMAIN); \
+ LOG((LF_EH, LL_INFO1000, "RaiseCrossContextException(%s, %s, %d)\n", \
+ __FUNCTION__, __FILE__, __LINE__)); \
+ _ctx_trans_pThread->RaiseCrossContextException(_ctx_trans_pTargetDomainException, _ctx_trans_pFrame); \
+ } \
+ \
+ LOG((LF_APPDOMAIN, LL_INFO1000, "LEAVE_DOMAIN(%s, %s, %d)\n", \
+ __FUNCTION__, __FILE__, __LINE__)); \
+ \
+ if (_ctx_trans_fTransitioned) \
+ { \
+ GCX_FORBID(); \
+ _ctx_trans_pThread->ReturnToContext(_ctx_trans_pFrame); \
+ } \
+ TESTHOOKCALL(LeftAppDomain(_ctx_trans_pDestDomainId.m_dwId)); \
+ DEBUG_ASSURE_NO_RETURN_END(DOMAIN) \
+}
+
+//current ad, always safe
+#define ADV_CURRENTAD 0
+//default ad, never unloaded
+#define ADV_DEFAULTAD 1
+// held by iterator, iterator holds a ref
+#define ADV_ITERATOR 2
+// the appdomain is on the stack
+#define ADV_RUNNINGIN 4
+// we're in process of creating the appdomain, refcount guaranteed to be >0
+#define ADV_CREATING 8
+// compilation domain - ngen guarantees it won't be unloaded until everyone left
+#define ADV_COMPILATION 0x10
+// finalizer thread - synchronized with ADU
+#define ADV_FINALIZER 0x40
+// adu thread - cannot race with itself
+#define ADV_ADUTHREAD 0x80
+// held by AppDomainRefTaker
+#define ADV_REFTAKER 0x100
+
+#ifdef _DEBUG
+void CheckADValidity(AppDomain* pDomain, DWORD ADValidityKind);
+#else
+#define CheckADValidity(pDomain,ADValidityKind)
+#endif
+
+// Please keep these macros in sync with the NO_EH_AT_TRANSITION macros below.
+#define ENTER_DOMAIN_ID_PREDICATED(_pDestDomain,_predicate_expr) \
+ TESTHOOKCALL(EnteringAppDomain(_pDestDomain.m_dwId)) ; \
+ ENTER_DOMAIN_SETUPVARS(GetThread(), _predicate_expr) \
+ ENTER_DOMAIN_SWITCH_CTX_BY_ADID(_ctx_trans_pThread->GetDomain(), _pDestDomain, FALSE) \
+ ENTER_DOMAIN_SETUP_EH \
+ TESTHOOKCALL(EnteredAppDomain(_pDestDomain.m_dwId));
+
+#define ENTER_DOMAIN_PTR_PREDICATED(_pDestDomain,ADValidityKind,_predicate_expr) \
+ TESTHOOKCALL(EnteringAppDomain((_pDestDomain)->GetId().m_dwId)); \
+ ENTER_DOMAIN_SETUPVARS(GetThread(), _predicate_expr) \
+ CheckADValidity(_ctx_trans_fPredicate?(_pDestDomain):GetAppDomain(),ADValidityKind); \
+ ENTER_DOMAIN_SWITCH_CTX_BY_ADPTR(_ctx_trans_pThread->GetDomain(), _pDestDomain) \
+ ENTER_DOMAIN_SETUP_EH \
+ TESTHOOKCALL(EnteredAppDomain((_pDestDomain)->GetId().m_dwId));
+
+
+#define ENTER_DOMAIN_PTR(_pDestDomain,ADValidityKind) \
+ TESTHOOKCALL(EnteringAppDomain((_pDestDomain)->GetId().m_dwId)); \
+ CheckADValidity(_pDestDomain,ADValidityKind); \
+ ENTER_DOMAIN_SETUPVARS(GetThread(), true) \
+ ENTER_DOMAIN_SWITCH_CTX_BY_ADPTR(_ctx_trans_pThread->GetDomain(), _pDestDomain) \
+ ENTER_DOMAIN_SETUP_EH \
+ TESTHOOKCALL(EnteredAppDomain((_pDestDomain)->GetId().m_dwId));
+
+#define ENTER_DOMAIN_ID(_pDestDomain) \
+ ENTER_DOMAIN_ID_PREDICATED(_pDestDomain,true)
+
+// <EnableADTransitionWithoutEH>
+// The following macros support the AD transition *without* using EH at transition boundary.
+// Please keep them in sync with the macros above.
+#define ENTER_DOMAIN_PTR_NO_EH_AT_TRANSITION(_pDestDomain,ADValidityKind) \
+ TESTHOOKCALL(EnteringAppDomain((_pDestDomain)->GetId().m_dwId)); \
+ CheckADValidity(_pDestDomain,ADValidityKind); \
+ ENTER_DOMAIN_SETUPVARS(GetThread(), true) \
+ ENTER_DOMAIN_SWITCH_CTX_BY_ADPTR(_ctx_trans_pThread->GetDomain(), _pDestDomain) \
+ TESTHOOKCALL(EnteredAppDomain((_pDestDomain)->GetId().m_dwId)); \
+ ReturnToPreviousAppDomainHolder __returnToPreviousAppDomainHolder;
+
+#define ENTER_DOMAIN_ID_NO_EH_AT_TRANSITION_PREDICATED(_pDestDomain,_predicate_expr) \
+ TESTHOOKCALL(EnteringAppDomain(_pDestDomain.m_dwId)) ; \
+ ENTER_DOMAIN_SETUPVARS(GetThread(), _predicate_expr) \
+ ENTER_DOMAIN_SWITCH_CTX_BY_ADID(_ctx_trans_pThread->GetDomain(), _pDestDomain, FALSE) \
+ TESTHOOKCALL(EnteredAppDomain(_pDestDomain.m_dwId)); \
+ ReturnToPreviousAppDomainHolder __returnToPreviousAppDomainHolder;
+
+#define ENTER_DOMAIN_ID_NO_EH_AT_TRANSITION(_pDestDomain) \
+ ENTER_DOMAIN_ID_NO_EH_AT_TRANSITION_PREDICATED(_pDestDomain,true)
+
+#define END_DOMAIN_TRANSITION_NO_EH_AT_TRANSITION \
+ TESTHOOKCALL(LeavingAppDomain(::GetAppDomain()->GetId().m_dwId)); \
+ LOG((LF_APPDOMAIN, LL_INFO1000, "LEAVE_DOMAIN(%s, %s, %d)\n", \
+ __FUNCTION__, __FILE__, __LINE__)); \
+ \
+ if (_ctx_trans_fTransitioned) \
+ { \
+ GCX_FORBID(); \
+ _ctx_trans_pThread->ReturnToContext(_ctx_trans_pFrame); \
+ } \
+ __returnToPreviousAppDomainHolder.SuppressRelease(); \
+ TESTHOOKCALL(LeftAppDomain(_ctx_trans_pDestDomainId.m_dwId)); \
+ DEBUG_ASSURE_NO_RETURN_END(DOMAIN) \
+ } // Close scope setup by ENTER_DOMAIN_SETUPVARS
+
+// </EnableADTransitionWithoutEH>
+
+#define GET_CTX_TRANSITION_FRAME() \
+ (_ctx_trans_pFrame)
+
+//-----------------------------------------------------------------------------
+// System to make Cross-Appdomain calls.
+//
+// Cross-AppDomain calls are made via a callback + args. This gives us the flexibility
+// to check if a transition is needed, and take fast vs. slow paths for the debugger.
+//
+// Example usage:
+// struct FooArgs : public CtxTransitionBaseArgs { ... } args (...); // load up args
+// MakeCallWithPossibleAppDomainTransition(pNewDomain, MyFooFunc, &args);
+//
+// MyFooFunc is always executed in pNewDomain.
+// If we're already in pNewDomain, then that just becomes MyFooFunc(&args);
+// else we'll switch ADs, and do the proper Try/Catch/Rethrow.
+//-----------------------------------------------------------------------------
+
+// All Arg structs should derive from this. This makes certain standard args
+// are available (such as the context-transition frame).
+// The ADCallback helpers will fill in these base args.
+struct CtxTransitionBaseArgs;
+
+// Pointer type for the AppDomain callback function.
+typedef void (*FPAPPDOMAINCALLBACK)(
+ CtxTransitionBaseArgs* pData // Caller's private data
+);
+
+
+//-----------------------------------------------------------------------------
+// Call w/a wrapper.
+// We've already transitioned AppDomains here. This just places a 1st-pass filter to sniff
+// for catch-handler found callbacks for the debugger.
+//-----------------------------------------------------------------------------
+void MakeADCallDebuggerWrapper(
+ FPAPPDOMAINCALLBACK fpCallback,
+ CtxTransitionBaseArgs * args,
+ ContextTransitionFrame* pFrame);
+
+// Invoke a callback in another appdomain.
+// Caller should have checked that we're actually transitioning domains here.
+void MakeCallWithAppDomainTransition(
+ ADID pTargetDomain,
+ FPAPPDOMAINCALLBACK fpCallback,
+ CtxTransitionBaseArgs * args);
+
+// Invoke the callback in the AppDomain.
+// Ensure that predicate only gets evaluted once!!
+#define MakePredicatedCallWithPossibleAppDomainTransition(pTargetDomain, fPredicate, fpCallback, args) \
+{ \
+ Thread* _ctx_trans_pThread = GetThread(); \
+ _ASSERTE(_ctx_trans_pThread != NULL); \
+ ADID _ctx_trans_pCurrDomain = _ctx_trans_pThread->GetDomain()->GetId(); \
+ ADID _ctx_trans_pDestDomain = (pTargetDomain); \
+ \
+ if (fPredicate && (_ctx_trans_pCurrDomain != _ctx_trans_pDestDomain)) \
+ { \
+ /* Transition domains and make the call */ \
+ MakeCallWithAppDomainTransition(pTargetDomain, (FPAPPDOMAINCALLBACK) fpCallback, args); \
+ } \
+ else \
+ { \
+ /* No transition needed. Just call directly. */ \
+ (fpCallback)(args); \
+ }\
+}
+
+// Invoke the callback in the AppDomain.
+#define MakeCallWithPossibleAppDomainTransition(pTargetDomain, fpCallback, args) \
+ MakePredicatedCallWithPossibleAppDomainTransition(pTargetDomain, true, fpCallback, args)
+
+
+struct CtxTransitionBaseArgs
+{
+ // This function fills out the private base args.
+ friend void MakeCallWithAppDomainTransition(
+ ADID pTargetDomain,
+ FPAPPDOMAINCALLBACK fpCallback,
+ CtxTransitionBaseArgs * args);
+
+public:
+ CtxTransitionBaseArgs() { pCtxFrame = NULL; }
+ // This will be NULL if we didn't actually transition.
+ ContextTransitionFrame* GetCtxTransitionFrame() { return pCtxFrame; }
+private:
+ ContextTransitionFrame* pCtxFrame;
+};
+
+
+// We have numerous places where we start up a managed thread. This includes several places in the
+// ThreadPool, the 'new Thread(...).Start()' case, and the Finalizer. Try to factor the code so our
+// base exception handling behavior is consistent across those places. The resulting code is convoluted,
+// but it's better than the prior situation of each thread being on a different plan.
+
+// If you add a new kind of managed thread (i.e. thread proc) to the system, you must:
+//
+// 1) Call HasStarted() before calling any ManagedThreadBase_* routine.
+// 2) Define a ManagedThreadBase_* routine for your scenario and declare it below.
+// 3) Always perform any AD transitions through the ManagedThreadBase_* mechanism.
+// 4) Allow the ManagedThreadBase_* mechanism to perform all your exception handling, including
+// dispatching of unhandled exception events, deciding what to swallow, etc.
+// 5) If you must separate your base thread proc behavior from your AD transitioning behavior,
+// define a second ManagedThreadADCall_* helper and declare it below.
+// 6) Never decide this is too much work and that you will roll your own thread proc code.
+
+// intentionally opaque.
+struct ManagedThreadCallState;
+
+struct ManagedThreadBase
+{
+ // The 'new Thread(...).Start()' case from COMSynchronizable kickoff thread worker
+ static void KickOff(ADID pAppDomain,
+ Context::ADCallBackFcnType pTarget,
+ LPVOID args);
+
+ // The IOCompletion, QueueUserWorkItem, AddTimer, RegisterWaitForSingleObject cases in
+ // the ThreadPool
+ static void ThreadPool(ADID pAppDomain, Context::ADCallBackFcnType pTarget, LPVOID args);
+
+ // The Finalizer thread separates the tasks of establishing exception handling at its
+ // base and transitioning into AppDomains. The turnaround structure that ties the 2 calls together
+ // is the ManagedThreadCallState.
+
+
+ // For the case (like Finalization) where the base transition and the AppDomain transition are
+ // separated, an opaque structure is used to tie together the two calls.
+
+ static void FinalizerBase(Context::ADCallBackFcnType pTarget);
+ static void FinalizerAppDomain(AppDomain* pAppDomain,
+ Context::ADCallBackFcnType pTarget,
+ LPVOID args,
+ ManagedThreadCallState *pTurnAround);
+};
+
+
+// DeadlockAwareLock is a base for building deadlock-aware locks.
+// Note that DeadlockAwareLock only works if ALL locks involved in the deadlock are deadlock aware.
+
+class DeadlockAwareLock
+{
+ private:
+ VolatilePtr<Thread> m_pHoldingThread;
+#ifdef _DEBUG
+ const char *m_description;
+#endif
+
+ public:
+ DeadlockAwareLock(const char *description = NULL);
+ ~DeadlockAwareLock();
+
+ // Test for deadlock
+ BOOL CanEnterLock();
+
+ // Call BeginEnterLock before attempting to acquire the lock
+ BOOL TryBeginEnterLock(); // returns FALSE if deadlock
+ void BeginEnterLock(); // Asserts if deadlock
+
+ // Call EndEnterLock after acquiring the lock
+ void EndEnterLock();
+
+ // Call LeaveLock after releasing the lock
+ void LeaveLock();
+
+ const char *GetDescription();
+
+ private:
+ CHECK CheckDeadlock(Thread *pThread);
+
+ static void ReleaseBlockingLock()
+ {
+ Thread *pThread = GetThread();
+ _ASSERTE (pThread);
+ pThread->m_pBlockingLock = NULL;
+ }
+public:
+ typedef StateHolder<DoNothing,DeadlockAwareLock::ReleaseBlockingLock> BlockingLockHolder;
+};
+
+inline Context* GetCurrentContext()
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return GetThread()->GetContext();
+}
+
+inline void SetTypeHandleOnThreadForAlloc(TypeHandle th)
+{
+ // We are doing this unconditionally even though th is only used by ETW events in GC. When the ETW
+ // event is not enabled we still need to set it because it may not be enabled here but by the
+ // time we are checking in GC, the event is enabled - we don't want GC to read a random value
+ // from before in this case.
+ GetThread()->SetTHAllocContextObj(th);
+}
+
+#endif // CROSSGEN_COMPILE
+
+#ifdef FEATURE_IMPLICIT_TLS
+class Compiler;
+// users of OFFSETOF__TLS__tls_CurrentThread macro expect the offset of these variables wrt to _tls_start to be stable.
+// Defining each of the following thread local variable separately without the struct causes the offsets to change in
+// different flavors of build. Eg. in chk build the offset of m_pThread is 0x4 while in ret build it becomes 0x8 as 0x4 is
+// occupied by m_pAddDomain. Packing all thread local variables in a struct and making struct instance to be thread local
+// ensures that the offsets of the variables are stable in all build flavors.
+struct ThreadLocalInfo
+{
+ Thread* m_pThread;
+ AppDomain* m_pAppDomain;
+ void** m_EETlsData; // ClrTlsInfo::data
+#ifdef FEATURE_MERGE_JIT_AND_ENGINE
+ Compiler* m_pCompiler;
+#endif
+};
+#endif // FEATURE_IMPLICIT_TLS
+
+class ThreadStateHolder
+{
+public:
+ ThreadStateHolder (BOOL fNeed, DWORD state)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE (GetThread());
+ m_fNeed = fNeed;
+ m_state = state;
+ }
+ ~ThreadStateHolder ()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_fNeed)
+ {
+ Thread *pThread = GetThread();
+ _ASSERTE (pThread);
+ FastInterlockAnd((ULONG *) &pThread->m_State, ~m_state);
+ }
+ }
+private:
+ BOOL m_fNeed;
+ DWORD m_state;
+};
+
+// Sets an NC threadstate if not already set, and restores the old state
+// of that bit upon destruction
+
+// fNeed > 0, make sure state is set, restored in destructor
+// fNeed = 0, no change
+// fNeed < 0, make sure state is reset, restored in destructor
+
+class ThreadStateNCStackHolder
+{
+ public:
+ ThreadStateNCStackHolder (BOOL fNeed, Thread::ThreadStateNoConcurrency state)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE (GetThread());
+ m_fNeed = fNeed;
+ m_state = state;
+
+ if (fNeed)
+ {
+ Thread *pThread = GetThread();
+ _ASSERTE (pThread);
+
+ if (fNeed < 0)
+ {
+ // if the state is set, reset it
+ if (pThread->HasThreadStateNC(state))
+ {
+ pThread->ResetThreadStateNC(m_state);
+ }
+ else
+ {
+ m_fNeed = FALSE;
+ }
+ }
+ else
+ {
+ // if the state is already set then no change is
+ // necessary during the back out
+ if(pThread->HasThreadStateNC(state))
+ {
+ m_fNeed = FALSE;
+ }
+ else
+ {
+ pThread->SetThreadStateNC(state);
+ }
+ }
+ }
+ }
+
+ ~ThreadStateNCStackHolder()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_fNeed)
+ {
+ Thread *pThread = GetThread();
+ _ASSERTE (pThread);
+
+ if (m_fNeed < 0)
+ {
+ pThread->SetThreadStateNC(m_state); // set it
+ }
+ else
+ {
+ pThread->ResetThreadStateNC(m_state);
+ }
+ }
+ }
+
+private:
+ BOOL m_fNeed;
+ Thread::ThreadStateNoConcurrency m_state;
+};
+
+BOOL Debug_IsLockedViaThreadSuspension();
+
+#endif //__threads_h__
diff --git a/src/vm/threads.inl b/src/vm/threads.inl
new file mode 100644
index 0000000000..d3d34057ca
--- /dev/null
+++ b/src/vm/threads.inl
@@ -0,0 +1,297 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+
+
+//
+//
+/*============================================================
+**
+** Header: Threads.inl
+**
+** Purpose: Implements Thread inline functions
+**
+**
+===========================================================*/
+#ifndef _THREADS_INL
+#define _THREADS_INL
+
+#include "threads.h"
+#include "appdomain.hpp"
+#include "frames.h"
+
+#ifdef ENABLE_GET_THREAD_GENERIC_FULL_CHECK
+// See code:GetThreadGenericFullCheck
+inline /* static */ BOOL Thread::ShouldEnforceEEThreadNotRequiredContracts()
+{
+ LIMITED_METHOD_CONTRACT;
+ return s_fEnforceEEThreadNotRequiredContracts;
+}
+#endif // ENABLE_GET_THREAD_GENERIC_FULL_CHECK
+
+inline void Thread::IncLockCount()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(GetThread() == this);
+ m_dwLockCount++;
+ _ASSERTE(m_dwLockCount != 0 || HasThreadStateNC(TSNC_UnbalancedLocks) || GetDomain()->OkToIgnoreOrphanedLocks());
+}
+
+inline void Thread::DecLockCount()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(GetThread() == this);
+ _ASSERTE(m_dwLockCount > 0 || HasThreadStateNC(TSNC_UnbalancedLocks) || GetDomain()->OkToIgnoreOrphanedLocks());
+ m_dwLockCount--;
+}
+
+inline
+Frame* Thread::FindFrame(SIZE_T StackPointer)
+{
+ Frame* pFrame = GetFrame();
+
+ while ((SIZE_T)pFrame < StackPointer)
+ {
+ pFrame = pFrame->Next();
+ }
+
+ return pFrame;
+}
+
+inline void Thread::SetThrowable(OBJECTREF pThrowable DEBUG_ARG(ThreadExceptionState::SetThrowableErrorChecking stecFlags))
+{
+ WRAPPER_NO_CONTRACT;
+
+ m_ExceptionState.SetThrowable(pThrowable DEBUG_ARG(stecFlags));
+}
+
+inline void Thread::SetKickOffDomainId(ADID ad)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ m_pKickOffDomainId = ad;
+}
+
+
+inline ADID Thread::GetKickOffDomainId()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_pKickOffDomainId.m_dwId != 0);
+ return m_pKickOffDomainId;
+}
+
+// get the current notification (if any) from this thread
+inline OBJECTHANDLE Thread::GetThreadCurrNotification()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ return m_hCurrNotification;
+}
+
+// set the current notification (if any) from this thread
+inline void Thread::SetThreadCurrNotification(OBJECTHANDLE handle)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ m_hCurrNotification = handle;
+}
+
+// clear the current notification (if any) from this thread
+inline void Thread::ClearThreadCurrNotification()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ m_hCurrNotification = NULL;
+}
+
+
+inline OBJECTREF Thread::GetExposedObjectRaw()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ return ObjectFromHandle(m_ExposedObject);
+}
+
+inline void Thread::FinishSOWork()
+{
+ WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_STACK_PROBE
+ if (HasThreadStateNC(TSNC_SOWorkNeeded))
+ {
+ ResetThreadStateNC(TSNC_SOWorkNeeded);
+ // Wake up AD unload thread to finish SO work that is delayed due to limit stack
+ AppDomain::EnableADUnloadWorkerForThreadAbort();
+ }
+#else
+ _ASSERTE(!HasThreadStateNC(TSNC_SOWorkNeeded));
+#endif
+}
+
+inline DWORD Thread::IncrementOverridesCount()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_ADStack.IncrementOverridesCount();
+}
+
+inline DWORD Thread::DecrementOverridesCount()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_ADStack.DecrementOverridesCount();
+}
+
+inline DWORD Thread::GetOverridesCount()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_ADStack.GetOverridesCount();
+}
+
+inline DWORD Thread::IncrementAssertCount()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_ADStack.IncrementAssertCount();
+}
+
+inline DWORD Thread::DecrementAssertCount()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_ADStack.DecrementAssertCount();
+}
+
+inline DWORD Thread::GetAssertCount()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_ADStack.GetAssertCount();
+}
+
+#ifndef DACCESS_COMPILE
+inline void Thread::PushDomain(ADID pDomain)
+{
+ WRAPPER_NO_CONTRACT;
+ m_ADStack.PushDomain(pDomain);
+}
+
+inline ADID Thread::PopDomain()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_ADStack.PopDomain();
+}
+#endif // DACCESS_COMPILE
+
+inline DWORD Thread::GetNumAppDomainsOnThread()
+{
+ WRAPPER_NO_CONTRACT;
+ return m_ADStack.GetNumDomains();
+}
+
+inline BOOL Thread::CheckThreadWideSpecialFlag(DWORD flags)
+{
+ WRAPPER_NO_CONTRACT;
+ return m_ADStack.GetThreadWideSpecialFlag() & flags;
+}
+
+inline void Thread::InitDomainIteration(DWORD *pIndex)
+{
+ WRAPPER_NO_CONTRACT;
+ m_ADStack.InitDomainIteration(pIndex);
+}
+
+inline ADID Thread::GetNextDomainOnStack(DWORD *pIndex, DWORD *pOverrides, DWORD *pAsserts)
+{
+ WRAPPER_NO_CONTRACT;
+ return m_ADStack.GetNextDomainOnStack(pIndex, pOverrides, pAsserts);
+}
+
+inline void Thread::UpdateDomainOnStack(DWORD pIndex, DWORD asserts, DWORD overrides)
+{
+ WRAPPER_NO_CONTRACT;
+ return m_ADStack.UpdateDomainOnStack(pIndex, asserts, overrides);
+}
+
+#ifdef FEATURE_COMINTEROP
+inline void Thread::RevokeApartmentSpy()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_fInitializeSpyRegistered)
+ {
+ VERIFY(SUCCEEDED(CoRevokeInitializeSpy(m_uliInitializeSpyCookie)));
+ m_fInitializeSpyRegistered = false;
+ }
+}
+
+inline LPVOID Thread::GetLastSTACtxCookie(BOOL *pfNAContext)
+{
+ LIMITED_METHOD_CONTRACT;
+ *pfNAContext = ((UINT_PTR)m_pLastSTACtxCookie & 1);
+ return (LPVOID)((UINT_PTR)m_pLastSTACtxCookie & ~1);
+}
+
+inline void Thread::SetLastSTACtxCookie(LPVOID pCtxCookie, BOOL fNAContext)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (fNAContext)
+ {
+ // The ctx cookie is an interface pointer so we can steal the lowest bit
+ // to mark whether the context is known to be Neutral Apartment or not.
+ m_pLastSTACtxCookie = (LPVOID)((UINT_PTR)pCtxCookie | 1);
+ }
+ else
+ {
+ m_pLastSTACtxCookie = pCtxCookie;
+ }
+}
+#endif // FEATURE_COMINTEROP
+
+#include "appdomainstack.inl"
+
+inline bool Thread::IsGCSpecial()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fGCSpecial;
+}
+
+inline void Thread::SetGCSpecial(bool fGCSpecial)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_fGCSpecial = fGCSpecial;
+}
+
+#endif
diff --git a/src/vm/threadstatics.cpp b/src/vm/threadstatics.cpp
new file mode 100644
index 0000000000..a07c637995
--- /dev/null
+++ b/src/vm/threadstatics.cpp
@@ -0,0 +1,710 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// ThreadStatics.cpp
+//
+
+//
+//
+
+
+#include "common.h"
+
+#include "threadstatics.h"
+#include "field.h"
+
+
+#ifndef DACCESS_COMPILE
+
+void ThreadLocalBlock::FreeTLM(SIZE_T i)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ _ASSERTE(m_pTLMTable != NULL);
+
+ PTR_ThreadLocalModule pThreadLocalModule = m_pTLMTable[i].pTLM;
+ m_pTLMTable[i].pTLM = NULL;
+
+ if (pThreadLocalModule != NULL)
+ {
+ if (pThreadLocalModule->m_pDynamicClassTable != NULL)
+ {
+ for (DWORD k = 0; k < pThreadLocalModule->m_aDynamicEntries; ++k)
+ {
+ if (pThreadLocalModule->m_pDynamicClassTable[k].m_pDynamicEntry != NULL)
+ {
+ delete pThreadLocalModule->m_pDynamicClassTable[k].m_pDynamicEntry;
+ pThreadLocalModule->m_pDynamicClassTable[k].m_pDynamicEntry = NULL;
+ }
+ }
+ delete pThreadLocalModule->m_pDynamicClassTable;
+ pThreadLocalModule->m_pDynamicClassTable = NULL;
+ }
+
+ delete pThreadLocalModule;
+ }
+}
+
+void ThreadLocalBlock::FreeTable()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_INTOLERANT;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+ // Free the TLM table
+ if (m_pTLMTable != NULL)
+ {
+ // Iterate over the table and free each TLM
+ for (SIZE_T i = 0; i < m_TLMTableSize; ++i)
+ {
+ if (m_pTLMTable[i].pTLM != NULL)
+ {
+ FreeTLM(i);
+ }
+ }
+
+ // Free the table itself
+ delete m_pTLMTable;
+ m_pTLMTable = NULL;
+ }
+
+ // Set table size to zero
+ m_TLMTableSize = 0;
+
+ // Free the ThreadStaticHandleTable
+ if (m_pThreadStaticHandleTable != NULL)
+ {
+ delete m_pThreadStaticHandleTable;
+ m_pThreadStaticHandleTable = NULL;
+ }
+
+ // Free any pinning handles we may have created
+ FreePinningHandles();
+}
+
+void ThreadLocalBlock::EnsureModuleIndex(ModuleIndex index)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ if (m_TLMTableSize > index.m_dwIndex)
+ {
+ _ASSERTE(m_pTLMTable != NULL);
+ return;
+ }
+
+ SIZE_T aModuleIndices = max(16, m_TLMTableSize);
+ while (aModuleIndices <= index.m_dwIndex)
+ {
+ aModuleIndices *= 2;
+ }
+
+ // If this allocation fails, we will throw. If it succeeds,
+ // then we are good to go
+ PTR_TLMTableEntry pNewModuleSlots = (PTR_TLMTableEntry) (void*) new BYTE[sizeof(TLMTableEntry) * aModuleIndices];
+
+ // Zero out the new TLM table
+ memset(pNewModuleSlots, 0 , sizeof(TLMTableEntry) * aModuleIndices);
+
+ if (m_pTLMTable != NULL)
+ {
+ memcpy(pNewModuleSlots, m_pTLMTable, sizeof(TLMTableEntry) * m_TLMTableSize);
+ }
+ else
+ {
+ _ASSERTE(m_TLMTableSize == 0);
+ }
+
+ PTR_TLMTableEntry pOldModuleSlots = m_pTLMTable;
+
+ m_pTLMTable = pNewModuleSlots;
+ m_TLMTableSize = aModuleIndices;
+
+ if (pOldModuleSlots != NULL)
+ delete pOldModuleSlots;
+}
+
+#endif
+
+void ThreadLocalBlock::SetModuleSlot(ModuleIndex index, PTR_ThreadLocalModule pLocalModule)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ // This method will not grow the table. You need to grow
+ // the table explicitly before calling SetModuleSlot()
+
+ _ASSERTE(index.m_dwIndex < m_TLMTableSize);
+
+ m_pTLMTable[index.m_dwIndex].pTLM = pLocalModule;
+}
+
+#ifdef DACCESS_COMPILE
+
+void
+ThreadLocalModule::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+
+ // Enumerate the ThreadLocalModule itself. TLMs are allocated to be larger than
+ // sizeof(ThreadLocalModule) to make room for ClassInit flags and non-GC statics.
+ // "DAC_ENUM_DTHIS()" probably does not account for this, so we might not enumerate
+ // all of the ClassInit flags and non-GC statics.
+ DAC_ENUM_DTHIS();
+
+ if (m_pDynamicClassTable != NULL)
+ {
+ DacEnumMemoryRegion(dac_cast<TADDR>(m_pDynamicClassTable),
+ m_aDynamicEntries * sizeof(DynamicClassInfo));
+
+ for (SIZE_T i = 0; i < m_aDynamicEntries; i++)
+ {
+ PTR_DynamicEntry entry = dac_cast<PTR_DynamicEntry>(m_pDynamicClassTable[i].m_pDynamicEntry);
+ if (entry.IsValid())
+ {
+ entry.EnumMem();
+ }
+ }
+ }
+}
+
+void
+ThreadLocalBlock::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+
+ // Enumerate the ThreadLocalBlock itself
+ DAC_ENUM_DTHIS();
+
+ if (m_pTLMTable.IsValid())
+ {
+ DacEnumMemoryRegion(dac_cast<TADDR>(m_pTLMTable),
+ m_TLMTableSize * sizeof(TADDR));
+
+ for (SIZE_T i = 0; i < m_TLMTableSize; i++)
+ {
+ PTR_ThreadLocalModule domMod = m_pTLMTable[i].pTLM;
+ if (domMod.IsValid())
+ {
+ domMod->EnumMemoryRegions(flags);
+ }
+ }
+ }
+}
+
+#endif
+
+DWORD ThreadLocalModule::GetClassFlags(MethodTable* pMT, DWORD iClassIndex) // iClassIndex defaults to (DWORD)-1
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ if (pMT->IsDynamicStatics())
+ {
+ DWORD dynamicClassID = pMT->GetModuleDynamicEntryID();
+ if(m_aDynamicEntries <= dynamicClassID)
+ return FALSE;
+ return (m_pDynamicClassTable[dynamicClassID].m_dwFlags);
+ }
+ else
+ {
+ if (iClassIndex == (DWORD)-1)
+ iClassIndex = pMT->GetClassIndex();
+ return GetPrecomputedStaticsClassData()[iClassIndex];
+ }
+}
+
+#ifndef DACCESS_COMPILE
+
+void ThreadLocalModule::SetClassFlags(MethodTable* pMT, DWORD dwFlags)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ if (pMT->IsDynamicStatics())
+ {
+ DWORD dwID = pMT->GetModuleDynamicEntryID();
+ EnsureDynamicClassIndex(dwID);
+ m_pDynamicClassTable[dwID].m_dwFlags |= dwFlags;
+ }
+ else
+ {
+ GetPrecomputedStaticsClassData()[pMT->GetClassIndex()] |= dwFlags;
+ }
+}
+
+void ThreadLocalBlock::AddPinningHandleToList(OBJECTHANDLE oh)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ ObjectHandleList::NodeType* pNewNode = new ObjectHandleList::NodeType(oh);
+ m_PinningHandleList.LinkHead(pNewNode);
+}
+
+void ThreadLocalBlock::FreePinningHandles()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ // Destroy all pinning handles in the list, and free the nodes
+ ObjectHandleList::NodeType* pHandleNode;
+ while ((pHandleNode = m_PinningHandleList.UnlinkHead()) != NULL)
+ {
+ DestroyPinningHandle(pHandleNode->data);
+ delete pHandleNode;
+ }
+}
+
+void ThreadLocalBlock::AllocateThreadStaticHandles(Module * pModule, PTR_ThreadLocalModule pThreadLocalModule)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pThreadLocalModule->GetPrecomputedGCStaticsBaseHandleAddress() != NULL);
+ _ASSERTE(pThreadLocalModule->GetPrecomputedGCStaticsBaseHandle() == NULL);
+
+ if (pModule->GetNumGCThreadStaticHandles() > 0)
+ {
+ AllocateStaticFieldObjRefPtrs(pModule->GetNumGCThreadStaticHandles(),
+ pThreadLocalModule->GetPrecomputedGCStaticsBaseHandleAddress());
+
+ // We should throw if we fail to allocate and never hit this assert
+ _ASSERTE(pThreadLocalModule->GetPrecomputedGCStaticsBaseHandle() != NULL);
+ _ASSERTE(pThreadLocalModule->GetPrecomputedGCStaticsBasePointer() != NULL);
+ }
+}
+
+OBJECTHANDLE ThreadLocalBlock::AllocateStaticFieldObjRefPtrs(int nRequested, OBJECTHANDLE * ppLazyAllocate)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION((nRequested > 0));
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (ppLazyAllocate && *ppLazyAllocate)
+ {
+ // Allocation already happened
+ return *ppLazyAllocate;
+ }
+
+ // Make sure the large heap handle table is initialized.
+ if (!m_pThreadStaticHandleTable)
+ InitThreadStaticHandleTable();
+
+ // Allocate the handles.
+ OBJECTHANDLE result = m_pThreadStaticHandleTable->AllocateHandles(nRequested);
+
+ if (ppLazyAllocate)
+ {
+ *ppLazyAllocate = result;
+ }
+
+ return result;
+}
+
+void ThreadLocalBlock::InitThreadStaticHandleTable()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(m_pThreadStaticHandleTable==NULL);
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ // If the allocation fails this will throw; callers need
+ // to account for this possibility
+ m_pThreadStaticHandleTable = new ThreadStaticHandleTable(GetAppDomain());
+}
+
+void ThreadLocalBlock::AllocateThreadStaticBoxes(MethodTable * pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(pMT->GetNumBoxedThreadStatics() > 0);
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ FieldDesc *pField = pMT->HasGenericsStaticsInfo() ?
+ pMT->GetGenericsStaticFieldDescs() : (pMT->GetApproxFieldDescListRaw() + pMT->GetNumIntroducedInstanceFields());
+
+ // Move pField to point to the list of thread statics
+ pField += pMT->GetNumStaticFields() - pMT->GetNumThreadStaticFields();
+
+ FieldDesc *pFieldEnd = pField + pMT->GetNumThreadStaticFields();
+
+ while (pField < pFieldEnd)
+ {
+ _ASSERTE(pField->IsThreadStatic());
+
+ // We only care about thread statics which are value types
+ if (pField->IsByValue())
+ {
+ TypeHandle th = pField->GetFieldTypeHandleThrowing();
+ MethodTable* pFieldMT = th.GetMethodTable();
+
+ // AllocateStaticBox will pin this object if this class is FixedAddressVTStatics.
+ // We save this pinning handle in a list attached to the ThreadLocalBlock. When
+ // the thread dies, we release all the pinning handles in the list.
+
+ OBJECTHANDLE handle;
+ OBJECTREF obj = MethodTable::AllocateStaticBox(pFieldMT, pMT->HasFixedAddressVTStatics(), &handle);
+
+ PTR_BYTE pStaticBase = pMT->GetGCThreadStaticsBasePointer();
+ _ASSERTE(pStaticBase != NULL);
+
+ SetObjectReference( (OBJECTREF*)(pStaticBase + pField->GetOffset()), obj, GetAppDomain() );
+
+ // If we created a pinning handle, save it to the list
+ if (handle != NULL)
+ AddPinningHandleToList(handle);
+ }
+
+ pField++;
+ }
+}
+
+#endif
+
+#ifndef DACCESS_COMPILE
+
+void ThreadLocalModule::EnsureDynamicClassIndex(DWORD dwID)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (dwID < m_aDynamicEntries)
+ {
+ _ASSERTE(m_pDynamicClassTable != NULL);
+ return;
+ }
+
+ SIZE_T aDynamicEntries = max(16, m_aDynamicEntries);
+ while (aDynamicEntries <= dwID)
+ {
+ aDynamicEntries *= 2;
+ }
+
+ DynamicClassInfo* pNewDynamicClassTable;
+
+ // If this allocation fails, we throw. If it succeeds,
+ // then we are good to go
+ pNewDynamicClassTable = (DynamicClassInfo*)(void*)new BYTE[sizeof(DynamicClassInfo) * aDynamicEntries];
+
+ // Zero out the dynamic class table
+ memset(pNewDynamicClassTable, 0, sizeof(DynamicClassInfo) * aDynamicEntries);
+
+ // We might always be guaranteed that this will be non-NULL, but just to be safe
+ if (m_pDynamicClassTable != NULL)
+ {
+ memcpy(pNewDynamicClassTable, m_pDynamicClassTable, sizeof(DynamicClassInfo) * m_aDynamicEntries);
+ }
+ else
+ {
+ _ASSERTE(m_aDynamicEntries == 0);
+ }
+
+ _ASSERTE(m_aDynamicEntries%2 == 0);
+
+ DynamicClassInfo* pOldDynamicClassTable = m_pDynamicClassTable;
+
+ m_pDynamicClassTable = pNewDynamicClassTable;
+ m_aDynamicEntries = aDynamicEntries;
+
+ if (pOldDynamicClassTable != NULL)
+ delete pOldDynamicClassTable;
+}
+
+void ThreadLocalModule::AllocateDynamicClass(MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!pMT->IsSharedByGenericInstantiations());
+ _ASSERTE(pMT->IsDynamicStatics());
+
+ DWORD dwID = pMT->GetModuleDynamicEntryID();
+
+ EnsureDynamicClassIndex(dwID);
+
+ _ASSERTE(m_aDynamicEntries > dwID);
+
+ EEClass *pClass = pMT->GetClass();
+ DWORD dwStaticBytes = pClass->GetNonGCThreadStaticFieldBytes();
+ DWORD dwNumHandleStatics = pClass->GetNumHandleThreadStatics();
+
+ _ASSERTE(!IsClassAllocated(pMT));
+ _ASSERTE(!IsClassInitialized(pMT));
+ _ASSERTE(!IsClassInitError(pMT));
+
+ DynamicEntry *pDynamicStatics = m_pDynamicClassTable[dwID].m_pDynamicEntry;
+
+ // We need this check because maybe a class had a cctor but no statics
+ if (dwStaticBytes > 0 || dwNumHandleStatics > 0)
+ {
+ // Collectible types do not support static fields yet
+ if (pMT->Collectible())
+ COMPlusThrow(kNotSupportedException, W("NotSupported_CollectibleNotYet"));
+
+ if (pDynamicStatics == NULL)
+ {
+ // If this allocation fails, we will throw
+ pDynamicStatics = (DynamicEntry*)new BYTE[sizeof(DynamicEntry) + dwStaticBytes];
+
+#ifdef FEATURE_64BIT_ALIGNMENT
+ // The memory block has be aligned at MAX_PRIMITIVE_FIELD_SIZE to guarantee alignment of statics
+ static_assert_no_msg(sizeof(DynamicEntry) % MAX_PRIMITIVE_FIELD_SIZE == 0);
+ _ASSERTE(IS_ALIGNED(pDynamicStatics, MAX_PRIMITIVE_FIELD_SIZE));
+#endif
+
+ // Zero out the new DynamicEntry
+ memset((BYTE*)pDynamicStatics, 0, sizeof(DynamicEntry) + dwStaticBytes);
+
+ // Save the DynamicEntry in the DynamicClassTable
+ m_pDynamicClassTable[dwID].m_pDynamicEntry = pDynamicStatics;
+ }
+
+ if (dwNumHandleStatics > 0)
+ {
+ PTR_ThreadLocalBlock pThreadLocalBlock = GetThread()->m_pThreadLocalBlock;
+ _ASSERTE(pThreadLocalBlock != NULL);
+ pThreadLocalBlock->AllocateStaticFieldObjRefPtrs(dwNumHandleStatics,
+ &pDynamicStatics->m_pGCStatics);
+ }
+ }
+}
+
+void ThreadLocalModule::PopulateClass(MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(this != NULL);
+ _ASSERTE(pMT != NULL);
+ _ASSERTE(!IsClassAllocated(pMT));
+
+ // If this is a dynamic class then we need to allocate
+ // an entry in our dynamic class table
+ if (pMT->IsDynamicStatics())
+ AllocateDynamicClass(pMT);
+
+ // We need to allocate boxes any value-type statics that are not
+ // primitives or enums, because these statics may contain references
+ // to objects on the GC heap
+ if (pMT->GetNumBoxedThreadStatics() > 0)
+ {
+ PTR_ThreadLocalBlock pThreadLocalBlock = ThreadStatics::GetCurrentTLB();
+ _ASSERTE(pThreadLocalBlock != NULL);
+ pThreadLocalBlock->AllocateThreadStaticBoxes(pMT);
+ }
+
+ // Mark the class as allocated
+ SetClassAllocated(pMT);
+}
+
+PTR_ThreadLocalModule ThreadStatics::AllocateAndInitTLM(ModuleIndex index, PTR_ThreadLocalBlock pThreadLocalBlock, Module * pModule) //static
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ pThreadLocalBlock->EnsureModuleIndex(index);
+
+ _ASSERTE(pThreadLocalBlock != NULL);
+ _ASSERTE(pModule != NULL);
+
+ NewHolder<ThreadLocalModule> pThreadLocalModule = AllocateTLM(pModule);
+
+ pThreadLocalBlock->AllocateThreadStaticHandles(pModule, pThreadLocalModule);
+
+ pThreadLocalBlock->SetModuleSlot(index, pThreadLocalModule);
+ pThreadLocalModule.SuppressRelease();
+
+ return pThreadLocalModule;
+}
+
+
+PTR_ThreadLocalModule ThreadStatics::GetTLM(ModuleIndex index, Module * pModule) //static
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // Get the TLM if it already exists
+ PTR_ThreadLocalModule pThreadLocalModule = ThreadStatics::GetTLMIfExists(index);
+
+ // If the TLM does not exist, create it now
+ if (pThreadLocalModule == NULL)
+ {
+ // Get the current ThreadLocalBlock
+ PTR_ThreadLocalBlock pThreadLocalBlock = ThreadStatics::GetCurrentTLB();
+ _ASSERTE(pThreadLocalBlock != NULL);
+
+ // Allocate and initialize the TLM, and add it to the TLB's table
+ pThreadLocalModule = AllocateAndInitTLM(index, pThreadLocalBlock, pModule);
+ }
+
+ return pThreadLocalModule;
+}
+
+PTR_ThreadLocalModule ThreadStatics::GetTLM(MethodTable * pMT) //static
+{
+ Module * pModule = pMT->GetModuleForStatics();
+ return GetTLM(pModule->GetModuleIndex(), pModule);
+}
+
+PTR_ThreadLocalBlock ThreadStatics::AllocateTLB(PTR_Thread pThread, ADIndex index) //static
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ _ASSERTE(pThread->m_pThreadLocalBlock == NULL);
+
+ // Grow the TLB table so that it has room to store the newly allocated
+ // ThreadLocalBlock. If this growing the table fails we cannot proceed.
+ ThreadStatics::EnsureADIndex(pThread, index);
+
+ // Allocate a new TLB and update this Thread's pointer to the current
+ // ThreadLocalBlock. Constructor zeroes out everything for us.
+ pThread->m_pThreadLocalBlock = new ThreadLocalBlock();
+
+ // Store the newly allocated ThreadLocalBlock in the TLB table
+ if (pThread->m_pThreadLocalBlock != NULL)
+ {
+ // We grew the TLB table earlier, so it should have room
+ _ASSERTE(index.m_dwIndex >= 0 && index.m_dwIndex < pThread->m_TLBTableSize);
+ pThread->m_pTLBTable[index.m_dwIndex] = pThread->m_pThreadLocalBlock;
+ }
+
+ return pThread->m_pThreadLocalBlock;
+}
+
+PTR_ThreadLocalModule ThreadStatics::AllocateTLM(Module * pModule)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ SIZE_T size = pModule->GetThreadLocalModuleSize();
+
+ _ASSERTE(size >= ThreadLocalModule::OffsetOfDataBlob());
+
+ PTR_ThreadLocalModule pThreadLocalModule = (ThreadLocalModule*)new BYTE[size];
+
+ // We guarantee alignment for 64-bit regular thread statics on 32-bit platforms even without FEATURE_64BIT_ALIGNMENT for performance reasons.
+
+ // The memory block has to be aligned at MAX_PRIMITIVE_FIELD_SIZE to guarantee alignment of statics
+ _ASSERTE(IS_ALIGNED(pThreadLocalModule, MAX_PRIMITIVE_FIELD_SIZE));
+
+ // Zero out the part of memory where the TLM resides
+ memset(pThreadLocalModule, 0, size);
+
+ return pThreadLocalModule;
+}
+
+#endif
+
+PTR_ThreadLocalBlock ThreadStatics::GetTLBIfExists(PTR_Thread pThread, ADIndex index) //static
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // Check to see if we have a ThreadLocalBlock for the this AppDomain,
+ if (index.m_dwIndex < pThread->m_TLBTableSize)
+ {
+ return pThread->m_pTLBTable[index.m_dwIndex];
+ }
+
+ return NULL;
+}
diff --git a/src/vm/threadstatics.h b/src/vm/threadstatics.h
new file mode 100644
index 0000000000..5c24c7458e
--- /dev/null
+++ b/src/vm/threadstatics.h
@@ -0,0 +1,689 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ThreadStatics.h
+//
+
+//
+//
+// Classes can contain instance fields and statics fields. In addition to regular statics, .NET offers
+// several types of special statics. In IL, thread static fields are marked with the ThreadStaticAttribute,
+// distinguishing them from regular statics and other types of special statics. A thread static field is
+// not shared between threads. Each executing thread has a separate instance of the field, and independently
+// sets and gets values for that field.
+//
+// This implementation of thread statics closely parallels the implementation for regular statics. Regular
+// statics use the DomainLocalBlock and DomainLocalModule structures to allocate space for statics each time
+// a module is loaded in an AppDomain.
+//
+
+//
+
+#ifndef __threadstatics_h__
+#define __threadstatics_h__
+
+#ifndef BINDER
+#include "vars.hpp"
+#include "util.hpp"
+
+#include "appdomain.hpp"
+#include "field.h"
+#include "methodtable.h"
+#include "threads.h"
+#endif
+
+// Defines ObjectHandeList type
+#include "specialstatics.h"
+
+
+typedef DPTR(struct ThreadLocalModule) PTR_ThreadLocalModule;
+
+struct ThreadLocalModule
+{
+ friend class ClrDataAccess;
+ friend class CheckAsmOffsets;
+ friend struct ThreadLocalBlock;
+
+ struct DynamicEntry
+ {
+ OBJECTHANDLE m_pGCStatics;
+#ifdef FEATURE_64BIT_ALIGNMENT
+ // Padding to make m_pDataBlob aligned at MAX_PRIMITIVE_FIELD_SIZE.
+ // code:MethodTableBuilder::PlaceThreadStaticFields assumes that the start of the data blob is aligned
+ SIZE_T m_padding;
+#endif
+ BYTE m_pDataBlob[0];
+#ifndef BINDER
+ inline PTR_BYTE GetGCStaticsBasePointer()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_pGCStatics != NULL);
+
+ return dac_cast<PTR_BYTE>((PTR_OBJECTREF)((PTRARRAYREF)ObjectFromHandle(m_pGCStatics))->GetDataPtr());
+ }
+#endif
+ inline PTR_BYTE GetGCStaticsBaseHandle()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return dac_cast<PTR_BYTE>(m_pGCStatics);
+ }
+ inline PTR_BYTE GetNonGCStaticsBasePointer()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return dac_cast<PTR_BYTE>(this);
+ }
+ static DWORD GetOffsetOfDataBlob()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return offsetof(DynamicEntry, m_pDataBlob);
+ }
+ };
+ typedef DPTR(DynamicEntry) PTR_DynamicEntry;
+
+ struct DynamicClassInfo
+ {
+ PTR_DynamicEntry m_pDynamicEntry;
+ DWORD m_dwFlags;
+ };
+ typedef DPTR(DynamicClassInfo) PTR_DynamicClassInfo;
+
+ // Note the difference between:
+ //
+ // GetPrecomputedNonGCStaticsBasePointer() and
+ // GetPrecomputedStaticsClassData()
+ //
+ // GetPrecomputedNonGCStaticsBasePointer returns the pointer that should be added to field offsets to retrieve statics
+ // GetPrecomputedStaticsClassData returns a pointer to the first byte of the precomputed statics block
+ inline TADDR GetPrecomputedNonGCStaticsBasePointer()
+ {
+ LIMITED_METHOD_CONTRACT
+ return dac_cast<TADDR>(this);
+ }
+
+ static SIZE_T GetOffsetOfDataBlob() { return offsetof(ThreadLocalModule, m_pDataBlob); }
+ static SIZE_T GetOffsetOfGCStaticHandle() { return offsetof(ThreadLocalModule, m_pGCStatics); }
+
+#ifndef BINDER
+ inline PTR_OBJECTREF GetPrecomputedGCStaticsBasePointer()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_pGCStatics != NULL);
+
+ return (PTR_OBJECTREF)((PTRARRAYREF)ObjectFromHandle(m_pGCStatics))->GetDataPtr();
+ }
+#endif
+
+ inline OBJECTHANDLE GetPrecomputedGCStaticsBaseHandle()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return m_pGCStatics;
+ }
+
+ inline OBJECTHANDLE * GetPrecomputedGCStaticsBaseHandleAddress()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return &m_pGCStatics;
+ }
+
+#ifndef BINDER
+ // Returns bytes so we can add offsets
+ inline PTR_BYTE GetGCStaticsBasePointer(MethodTable * pMT)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ if (pMT->IsDynamicStatics())
+ {
+ return GetDynamicEntryGCStaticsBasePointer(pMT->GetModuleDynamicEntryID());
+ }
+ else
+ {
+ return dac_cast<PTR_BYTE>(GetPrecomputedGCStaticsBasePointer());
+ }
+ }
+#endif
+
+ inline PTR_BYTE GetNonGCStaticsBasePointer(MethodTable * pMT)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ if (pMT->IsDynamicStatics())
+ {
+ return GetDynamicEntryNonGCStaticsBasePointer(pMT->GetModuleDynamicEntryID());
+ }
+ else
+ {
+ return dac_cast<PTR_BYTE>(this);
+ }
+ }
+
+ inline DynamicEntry* GetDynamicEntry(DWORD n)
+ {
+ LIMITED_METHOD_CONTRACT
+ SUPPORTS_DAC;
+ _ASSERTE(m_pDynamicClassTable && m_aDynamicEntries > n);
+ DynamicEntry* pEntry = m_pDynamicClassTable[n].m_pDynamicEntry;
+
+ return pEntry;
+ }
+
+#ifndef BINDER
+ // These helpers can now return null, as the debugger may do queries on a type
+ // before the calls to PopulateClass happen
+ inline PTR_BYTE GetDynamicEntryGCStaticsBasePointer(DWORD n)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ if (n >= m_aDynamicEntries)
+ {
+ return NULL;
+ }
+
+ DynamicEntry* pEntry = GetDynamicEntry(n);
+ if (!pEntry)
+ {
+ return NULL;
+ }
+
+ return pEntry->GetGCStaticsBasePointer();
+ }
+#endif
+
+ inline PTR_BYTE GetDynamicEntryNonGCStaticsBasePointer(DWORD n)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ if (n >= m_aDynamicEntries)
+ {
+ return NULL;
+ }
+
+ DynamicEntry* pEntry = GetDynamicEntry(n);
+ if (!pEntry)
+ {
+ return NULL;
+ }
+
+ return pEntry->GetNonGCStaticsBasePointer();
+ }
+#ifndef BINDER
+ FORCEINLINE PTR_DynamicClassInfo GetDynamicClassInfoIfInitialized(DWORD n)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // m_aDynamicEntries is set last, it needs to be checked first
+ if (n >= m_aDynamicEntries)
+ {
+ return NULL;
+ }
+
+ _ASSERTE(m_pDynamicClassTable != NULL);
+ PTR_DynamicClassInfo pDynamicClassInfo = (PTR_DynamicClassInfo)(m_pDynamicClassTable + n);
+
+ // ClassInitFlags::INITIALIZED_FLAG is set last, it needs to be checked first
+ if ((pDynamicClassInfo->m_dwFlags & ClassInitFlags::INITIALIZED_FLAG) == 0)
+ {
+ return NULL;
+ }
+
+ PREFIX_ASSUME(pDynamicClassInfo != NULL);
+ return pDynamicClassInfo;
+ }
+
+ // iClassIndex is slightly expensive to compute, so if we already know
+ // it, we can use this helper
+
+ inline BOOL IsClassInitialized(MethodTable* pMT, DWORD iClassIndex = (DWORD)-1)
+ {
+ WRAPPER_NO_CONTRACT;
+ return (GetClassFlags(pMT, iClassIndex) & ClassInitFlags::INITIALIZED_FLAG) != 0;
+ }
+
+ inline BOOL IsClassAllocated(MethodTable* pMT, DWORD iClassIndex = (DWORD)-1)
+ {
+ WRAPPER_NO_CONTRACT;
+ return (GetClassFlags(pMT, iClassIndex) & ClassInitFlags::ALLOCATECLASS_FLAG) != 0;
+ }
+
+ BOOL IsClassInitError(MethodTable* pMT, DWORD iClassIndex = (DWORD)-1)
+ {
+ WRAPPER_NO_CONTRACT;
+ return (GetClassFlags(pMT, iClassIndex) & ClassInitFlags::ERROR_FLAG) != 0;
+ }
+
+ void SetClassInitialized(MethodTable* pMT)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_INTOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!IsClassInitialized(pMT));
+ _ASSERTE(!IsClassInitError(pMT));
+
+ SetClassFlags(pMT, ClassInitFlags::INITIALIZED_FLAG);
+ }
+
+ void SetClassAllocatedAndInitialized(MethodTable* pMT)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(!IsClassInitialized(pMT));
+ _ASSERTE(!IsClassInitError(pMT));
+
+ SetClassFlags(pMT, ClassInitFlags::ALLOCATECLASS_FLAG | ClassInitFlags::INITIALIZED_FLAG);
+ }
+
+ void SetClassAllocated(MethodTable* pMT)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ SetClassFlags(pMT, ClassInitFlags::ALLOCATECLASS_FLAG);
+ }
+
+ void SetClassInitError(MethodTable* pMT)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ SetClassFlags(pMT, ClassInitFlags::ERROR_FLAG);
+ }
+
+#ifndef DACCESS_COMPILE
+
+ void EnsureDynamicClassIndex(DWORD dwID);
+
+ void AllocateDynamicClass(MethodTable *pMT);
+
+ void PopulateClass(MethodTable *pMT);
+
+#endif
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+#endif
+ static DWORD OffsetOfDataBlob()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return offsetof(ThreadLocalModule, m_pDataBlob);
+ }
+
+private:
+
+ void SetClassFlags(MethodTable* pMT, DWORD dwFlags);
+
+ DWORD GetClassFlags(MethodTable* pMT, DWORD iClassIndex);
+
+
+ PTR_DynamicClassInfo m_pDynamicClassTable; // used for generics and reflection.emit in memory
+ SIZE_T m_aDynamicEntries; // number of entries in dynamic table
+ OBJECTHANDLE m_pGCStatics; // Handle to GC statics of the module
+
+ // Note that the static offset calculation in code:Module::BuildStaticsOffsets takes the offset m_pDataBlob
+ // into consideration so we do not need any padding to ensure that the start of the data blob is aligned
+
+ BYTE m_pDataBlob[0]; // First byte of the statics blob
+
+ // Layout of m_pDataBlob is:
+ // ClassInit bytes (hold flags for cctor run, cctor error, etc)
+ // Non GC Statics
+
+public:
+#ifndef BINDER
+ inline PTR_BYTE GetPrecomputedStaticsClassData()
+ {
+ LIMITED_METHOD_CONTRACT
+ return dac_cast<PTR_BYTE>(this) + offsetof(ThreadLocalModule, m_pDataBlob);
+ }
+
+ inline BOOL IsPrecomputedClassInitialized(DWORD classID)
+ {
+ return GetPrecomputedStaticsClassData()[classID] & ClassInitFlags::INITIALIZED_FLAG;
+ }
+
+#ifndef DACCESS_COMPILE
+
+ FORCEINLINE void EnsureClassAllocated(MethodTable * pMT)
+ {
+ _ASSERTE(this != NULL);
+
+ // Check if the class needs to be allocated
+ if (!IsClassAllocated(pMT))
+ PopulateClass(pMT);
+
+ // If PopulateClass() does not throw, then we are guaranteed
+ // that the class has been allocated
+ _ASSERTE(IsClassAllocated(pMT));
+ }
+
+ FORCEINLINE void CheckRunClassInitThrowing(MethodTable * pMT)
+ {
+ _ASSERTE(this != NULL);
+
+ // Check if the class has been marked as inited in the ThreadLocalModule
+ if (!IsClassInitialized(pMT))
+ {
+ // Ensure that the class has been allocated
+ EnsureClassAllocated(pMT);
+
+ // Check if the class has been marked as inited in the DomainLocalModule,
+ // if not we must call CheckRunClassInitThrowing()
+ if (!pMT->IsClassInited())
+ pMT->CheckRunClassInitThrowing();
+
+ // We cannot mark the class as inited in the TLM until it has been marked
+ // as inited in the DLM. MethodTable::CheckRunClassInitThrowing() can return
+ // before the class constructor has finished running (because of recursion),
+ // so we actually need to check if the class has been marked as inited in the
+ // DLM before marking it as inited in the TLM.
+ if (pMT->IsClassInited())
+ SetClassInitialized(pMT);
+ }
+ }
+
+#endif
+#endif
+}; // struct ThreadLocalModule
+
+
+
+#ifndef BINDER
+
+typedef DPTR(struct TLMTableEntry) PTR_TLMTableEntry;
+
+struct TLMTableEntry
+{
+ PTR_ThreadLocalModule pTLM;
+};
+
+
+typedef DPTR(struct ThreadLocalBlock) PTR_ThreadLocalBlock;
+typedef DPTR(PTR_ThreadLocalBlock) PTR_PTR_ThreadLocalBlock;
+
+struct ThreadLocalBlock
+{
+ friend class ClrDataAccess;
+
+private:
+ PTR_TLMTableEntry m_pTLMTable; // Table of ThreadLocalModules
+ SIZE_T m_TLMTableSize; // Current size of table
+
+ // Each ThreadLocalBlock has its own ThreadStaticHandleTable. The ThreadStaticHandleTable works
+ // by allocating Object arrays on the GC heap and keeping them alive with pinning handles.
+ //
+ // We use the ThreadStaticHandleTable to allocate space for GC thread statics. A GC thread
+ // static is thread static that is either a reference type or a value type whose layout
+ // contains a pointer to a reference type.
+
+ ThreadStaticHandleTable * m_pThreadStaticHandleTable;
+
+ // Need to keep a list of the pinning handles we've created
+ // so they can be cleaned up when the thread dies
+ ObjectHandleList m_PinningHandleList;
+
+public:
+
+#ifndef DACCESS_COMPILE
+ void AddPinningHandleToList(OBJECTHANDLE oh);
+ void FreePinningHandles();
+ void AllocateThreadStaticHandles(Module * pModule, ThreadLocalModule * pThreadLocalModule);
+ OBJECTHANDLE AllocateStaticFieldObjRefPtrs(int nRequested, OBJECTHANDLE* ppLazyAllocate = NULL);
+ void InitThreadStaticHandleTable();
+
+ void AllocateThreadStaticBoxes(MethodTable* pMT);
+#endif
+
+public: // used by code generators
+ static SIZE_T GetOffsetOfModuleSlotsPointer() { return offsetof(ThreadLocalBlock, m_pTLMTable); }
+
+public:
+
+#ifndef DACCESS_COMPILE
+ ThreadLocalBlock()
+ : m_pTLMTable(NULL), m_TLMTableSize(0), m_pThreadStaticHandleTable(NULL) {}
+
+ void FreeTLM(SIZE_T i);
+
+ void FreeTable();
+
+ void EnsureModuleIndex(ModuleIndex index);
+
+#endif
+
+ void SetModuleSlot(ModuleIndex index, PTR_ThreadLocalModule pLocalModule);
+
+ FORCEINLINE PTR_ThreadLocalModule GetTLMIfExists(ModuleIndex index)
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ if (index.m_dwIndex >= m_TLMTableSize)
+ return NULL;
+
+ return m_pTLMTable[index.m_dwIndex].pTLM;
+ }
+
+ FORCEINLINE PTR_ThreadLocalModule GetTLMIfExists(MethodTable* pMT)
+ {
+ WRAPPER_NO_CONTRACT;
+ ModuleIndex index = pMT->GetModuleForStatics()->GetModuleIndex();
+ return GetTLMIfExists(index);
+ }
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+};
+
+
+
+
+class ThreadStatics
+{
+ public:
+
+#ifndef DACCESS_COMPILE
+ static PTR_ThreadLocalBlock AllocateTLB(PTR_Thread pThread, ADIndex index);
+ static PTR_ThreadLocalModule AllocateTLM(Module * pModule);
+ static PTR_ThreadLocalModule AllocateAndInitTLM(ModuleIndex index, PTR_ThreadLocalBlock pThreadLocalBlock, Module * pModule);
+
+ static PTR_ThreadLocalModule GetTLM(ModuleIndex index, Module * pModule);
+ static PTR_ThreadLocalModule GetTLM(MethodTable * pMT);
+#endif
+ static PTR_ThreadLocalBlock GetTLBIfExists(PTR_Thread pThread, ADIndex index);
+
+#ifndef DACCESS_COMPILE
+ // Grows the TLB table
+ inline static void EnsureADIndex(PTR_Thread pThread, ADIndex index)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ SIZE_T size = max(16, pThread->m_TLBTableSize);
+ while (size <= index.m_dwIndex)
+ {
+ size *= 2;
+ }
+
+ // If this allocation fails, we will throw. If it succeeds,
+ // then we are good to go
+ PTR_ThreadLocalBlock * pNewTLBTable = (PTR_ThreadLocalBlock *)(void*)new PTR_ThreadLocalBlock [size];
+
+ // Zero out the new TLB table
+ memset(pNewTLBTable, 0, sizeof(PTR_ThreadLocalBlock) * size);
+
+ if (pThread->m_pTLBTable != NULL)
+ {
+ memcpy(pNewTLBTable, pThread->m_pTLBTable, sizeof(PTR_ThreadLocalBlock) * pThread->m_TLBTableSize);
+ }
+
+ PTR_ThreadLocalBlock * pOldTLBTable = pThread->m_pTLBTable;
+
+ pThread->m_pTLBTable = pNewTLBTable;
+ pThread->m_TLBTableSize = size;
+
+ delete pOldTLBTable;
+ }
+
+ FORCEINLINE static PTR_ThreadLocalBlock GetCurrentTLBIfExists()
+ {
+ // Get the current thread
+ PTR_Thread pThread = GetThread();
+
+ // If the current TLB pointer is NULL, search the TLB table
+ if (pThread->m_pThreadLocalBlock == NULL)
+ {
+ ADIndex index = pThread->GetDomain()->GetIndex();
+ pThread->m_pThreadLocalBlock = ThreadStatics::GetTLBIfExists(pThread, index);
+ }
+
+ return pThread->m_pThreadLocalBlock;
+ }
+#endif
+
+ FORCEINLINE static PTR_ThreadLocalBlock GetCurrentTLBIfExists(PTR_Thread pThread, PTR_AppDomain pDomain)
+ {
+ SUPPORTS_DAC;
+
+ // If the current TLB pointer is NULL, search the TLB table
+ PTR_ThreadLocalBlock pTLB = pThread->m_pThreadLocalBlock;
+ if (pTLB == NULL)
+ {
+ if (pDomain == NULL)
+ {
+ pDomain = pThread->GetDomain();
+ }
+
+ pTLB = ThreadStatics::GetTLBIfExists(pThread, pDomain->GetIndex());
+
+ // Update the ThreadLocalBlock pointer,
+ // but only on non-DAC builds
+#ifndef DACCESS_COMPILE
+ pThread->m_pThreadLocalBlock = pTLB;
+#endif
+ }
+
+ return pTLB;
+ }
+
+#ifndef DACCESS_COMPILE
+ FORCEINLINE static PTR_ThreadLocalBlock GetCurrentTLB()
+ {
+ // Get the current thread
+ Thread * pThread = GetThread();
+
+ // If the current TLB pointer is NULL, search the TLB table
+ if (pThread->m_pThreadLocalBlock == NULL)
+ {
+ AppDomain * pDomain = pThread->GetDomain();
+ pThread->m_pThreadLocalBlock = ThreadStatics::GetTLBIfExists(pThread, pDomain->GetIndex());
+ if (pThread->m_pThreadLocalBlock == NULL)
+ {
+ // Allocate the new ThreadLocalBlock.
+ // If the allocation fails this will throw.
+ return ThreadStatics::AllocateTLB(pThread, pDomain->GetIndex());
+ }
+ }
+
+ return pThread->m_pThreadLocalBlock;
+ }
+
+ FORCEINLINE static PTR_ThreadLocalModule GetTLMIfExists(ModuleIndex index)
+ {
+ // Get the current ThreadLocalBlock
+ PTR_ThreadLocalBlock pThreadLocalBlock = GetCurrentTLBIfExists();
+ if (pThreadLocalBlock == NULL)
+ return NULL;
+
+ // Get the TLM from the ThreadLocalBlock's table
+ return pThreadLocalBlock->GetTLMIfExists(index);
+ }
+
+ FORCEINLINE static PTR_ThreadLocalModule GetTLMIfExists(MethodTable * pMT)
+ {
+ // Get the current ThreadLocalBlock
+ PTR_ThreadLocalBlock pThreadLocalBlock = GetCurrentTLBIfExists();
+ if (pThreadLocalBlock == NULL)
+ return NULL;
+
+ // Get the TLM from the ThreadLocalBlock's table
+ return pThreadLocalBlock->GetTLMIfExists(pMT);
+ }
+#endif
+
+};
+#endif
+
+
+#endif
diff --git a/src/vm/threadsuspend.cpp b/src/vm/threadsuspend.cpp
new file mode 100644
index 0000000000..46e9e65c5e
--- /dev/null
+++ b/src/vm/threadsuspend.cpp
@@ -0,0 +1,8507 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// threadsuspend.CPP
+//
+// This file contains the implementation of thread suspension. The implementation of thread suspension
+// used to be spread through multiple places. That is why, many methods still live in their own homes
+// (class Thread, class ThreadStore, etc.). They should be eventually refactored into class ThreadSuspend.
+//
+
+#include "common.h"
+
+#include "threadsuspend.h"
+
+#include "finalizerthread.h"
+#include "dbginterface.h"
+
+#include "mdaassistants.h"
+
+// from ntstatus.h
+#define STATUS_SUSPEND_COUNT_EXCEEDED ((NTSTATUS)0xC000004AL)
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM_)
+#define HIJACK_NONINTERRUPTIBLE_THREADS
+#endif
+
+bool ThreadSuspend::s_fSuspendRuntimeInProgress = false;
+
+CLREvent* ThreadSuspend::g_pGCSuspendEvent = NULL;
+
+ThreadSuspend::SUSPEND_REASON ThreadSuspend::m_suspendReason;
+Thread* ThreadSuspend::m_pThreadAttemptingSuspendForGC;
+
+CLREventBase * ThreadSuspend::s_hAbortEvt = NULL;
+CLREventBase * ThreadSuspend::s_hAbortEvtCache = NULL;
+
+
+// If you add any thread redirection function, make sure the debugger can 1) recognize the redirection
+// function, and 2) retrieve the original CONTEXT. See code:Debugger.InitializeHijackFunctionAddress and
+// code:DacDbiInterfaceImpl.RetrieveHijackedContext.
+extern "C" void RedirectedHandledJITCaseForGCThreadControl_Stub(void);
+extern "C" void RedirectedHandledJITCaseForDbgThreadControl_Stub(void);
+extern "C" void RedirectedHandledJITCaseForUserSuspend_Stub(void);
+extern "C" void RedirectedHandledJITCaseForYieldTask_Stub(void);
+
+#define GetRedirectHandlerForGCThreadControl() \
+ ((PFN_REDIRECTTARGET) GetEEFuncEntryPoint(RedirectedHandledJITCaseForGCThreadControl_Stub))
+#define GetRedirectHandlerForDbgThreadControl() \
+ ((PFN_REDIRECTTARGET) GetEEFuncEntryPoint(RedirectedHandledJITCaseForDbgThreadControl_Stub))
+#define GetRedirectHandlerForUserSuspend() \
+ ((PFN_REDIRECTTARGET) GetEEFuncEntryPoint(RedirectedHandledJITCaseForUserSuspend_Stub))
+#define GetRedirectHandlerForYieldTask() \
+ ((PFN_REDIRECTTARGET) GetEEFuncEntryPoint(RedirectedHandledJITCaseForYieldTask_Stub))
+
+#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+#if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS) // GCCOVER
+extern "C" void RedirectedHandledJITCaseForGCStress_Stub(void);
+#define GetRedirectHandlerForGCStress() \
+ ((PFN_REDIRECTTARGET) GetEEFuncEntryPoint(RedirectedHandledJITCaseForGCStress_Stub))
+#endif // HAVE_GCCOVER && USE_REDIRECT_FOR_GCSTRESS
+#endif // _TARGET_AMD64_ || _TARGET_ARM_
+
+
+// Every PING_JIT_TIMEOUT ms, check to see if a thread in JITted code has wandered
+// into some fully interruptible code (or should have a different hijack to improve
+// our chances of snagging it at a safe spot).
+#define PING_JIT_TIMEOUT 10
+
+// When we find a thread in a spot that's not safe to abort -- how long to wait before
+// we try again.
+#define ABORT_POLL_TIMEOUT 10
+#ifdef _DEBUG
+#define ABORT_FAIL_TIMEOUT 40000
+#endif // _DEBUG
+
+//
+// CANNOT USE IsBad*Ptr() methods here. They are *banned* APIs because of various
+// reasons (see http://winweb/wincet/bannedapis.htm).
+//
+#define IS_VALID_WRITE_PTR(addr, size) _ASSERTE(addr != NULL)
+#define IS_VALID_CODE_PTR(addr) _ASSERTE(addr != NULL)
+
+
+void ThreadSuspend::SetSuspendRuntimeInProgress()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(ThreadStore::HoldingThreadStore() || IsAtProcessExit());
+ _ASSERTE(!s_fSuspendRuntimeInProgress || IsAtProcessExit());
+ s_fSuspendRuntimeInProgress = true;
+}
+
+void ThreadSuspend::ResetSuspendRuntimeInProgress()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(ThreadStore::HoldingThreadStore() || IsAtProcessExit());
+ _ASSERTE(s_fSuspendRuntimeInProgress || IsAtProcessExit());
+ s_fSuspendRuntimeInProgress = false;
+}
+
+
+// When SuspendThread returns, target thread may still be executing user code.
+// We can not access data, e.g. m_fPreemptiveGCDisabled, changed by target thread.
+// But our code depends on reading these data. To make this operation safe, we
+// call GetThreadContext which returns only after target thread does not execute
+// any user code.
+
+// Message from David Cutler
+/*
+ After SuspendThread returns, can the suspended thread continue to execute code in user mode?
+
+ [David Cutler] The suspended thread cannot execute any more user code, but it might be currently "running"
+ on a logical processor whose other logical processor is currently actually executing another thread.
+ In this case the target thread will not suspend until the hardware switches back to executing instructions
+ on its logical processor. In this case even the memory barrier would not necessarily work - a better solution
+ would be to use interlocked operations on the variable itself.
+
+ After SuspendThread returns, does the store buffer of the CPU for the suspended thread still need to drain?
+
+ Historically, we've assumed that the answer to both questions is No. But on one 4/8 hyper-threaded machine
+ running Win2K3 SP1 build 1421, we've seen two stress failures where SuspendThread returns while writes seem to still be in flight.
+
+ Usually after we suspend a thread, we then call GetThreadContext. This seems to guarantee consistency.
+ But there are places we would like to avoid GetThreadContext, if it's safe and legal.
+
+ [David Cutler] Get context delivers a APC to the target thread and waits on an event that will be set
+ when the target thread has delivered its context.
+
+ Chris.
+*/
+
+// Message from Neill Clift
+/*
+ What SuspendThread does is insert an APC block into a target thread and request an inter-processor interrupt to
+ do the APC interrupt. It doesn't wait till the thread actually enters some state or the interrupt has been serviced.
+
+ I took a quick look at the APIC spec in the Intel manuals this morning. Writing to the APIC posts a message on a bus.
+ Processors accept messages and presumably queue the s/w interrupts at this time. We don't wait for this acceptance
+ when we send the IPI so at least on APIC machines when you suspend a thread it continues to execute code for some short time
+ after the routine returns. We use other mechanisms for IPI and so it could work differently on different h/w.
+
+*/
+BOOL EnsureThreadIsSuspended (HANDLE hThread, Thread* pThread)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ WRAPPER_NO_CONTRACT;
+
+ CONTEXT ctx;
+ ctx.ContextFlags = CONTEXT_INTEGER;
+
+ BOOL ret;
+ ret = ::GetThreadContext(hThread, &ctx);
+ return ret;
+}
+
+FORCEINLINE VOID MyEnterLogLock()
+{
+ EnterLogLock();
+}
+FORCEINLINE VOID MyLeaveLogLock()
+{
+ LeaveLogLock();
+}
+
+// On non-Windows CORECLR platforms remove Thread::SuspendThread support
+#ifndef DISABLE_THREADSUSPEND
+// SuspendThread
+// Attempts to OS-suspend the thread, whichever GC mode it is in.
+// Arguments:
+// fOneTryOnly - If TRUE, report failure if the thread has its
+// m_dwForbidSuspendThread flag set. If FALSE, retry.
+// pdwSuspendCount - If non-NULL, will contain the return code
+// of the underlying OS SuspendThread call on success,
+// undefined on any kind of failure.
+// Return value:
+// A SuspendThreadResult value indicating success or failure.
+Thread::SuspendThreadResult Thread::SuspendThread(BOOL fOneTryOnly, DWORD *pdwSuspendCount)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#ifdef STRESS_LOG
+ if (StressLog::StressLogOn((unsigned int)-1, 0))
+ {
+ // Make sure to create the stress log for the current thread
+ // (if needed) before we suspend the target thread. The target
+ // thread may be holding the stress log lock when we suspend it,
+ // which could cause a deadlock.
+ if (StressLog::CreateThreadStressLog() == NULL)
+ {
+ return STR_NoStressLog;
+ }
+ }
+#endif
+
+ Volatile<HANDLE> hThread;
+ SuspendThreadResult str = (SuspendThreadResult) -1;
+ DWORD dwSuspendCount = 0;
+ DWORD tries = 1;
+#if defined(_DEBUG)
+ int nCnt = 0;
+ bool bDiagSuspend = g_pConfig->GetDiagnosticSuspend();
+ ULONGLONG i64TimestampStart = CLRGetTickCount64();
+ ULONGLONG i64TimestampCur = i64TimestampStart;
+ ULONGLONG i64TimestampPrev = i64TimestampStart;
+
+ // This is the max allowed timestamp ticks to transpire from beginning of
+ // our attempt to suspend the thread, before we'll assert (implying we believe
+ // there might be a deadlock) - (default = 2000).
+ ULONGLONG i64TimestampTicksMax = g_pConfig->SuspendThreadDeadlockTimeoutMs();
+#endif // _DEBUG
+
+#if defined(_DEBUG)
+ // Stop the stress log from allocating any new memory while in this function
+ // as that can lead to deadlocks
+ CantAllocHolder hldrCantAlloc;
+#endif
+
+ DWORD dwSwitchCount = 0;
+
+ while (TRUE) {
+ StateHolder<MyEnterLogLock, MyLeaveLogLock> LogLockHolder(FALSE);
+
+ CounterHolder handleHolder(&m_dwThreadHandleBeingUsed);
+
+ // Whether or not "goto retry" should YieldProcessor and __SwitchToThread
+ BOOL doSwitchToThread = TRUE;
+
+ hThread = GetThreadHandle();
+ if (hThread == INVALID_HANDLE_VALUE) {
+ str = STR_UnstartedOrDead;
+ break;
+ }
+ else if (hThread == SWITCHOUT_HANDLE_VALUE) {
+ str = STR_SwitchedOut;
+ break;
+ }
+
+ {
+ // We do not want to suspend the target thread while it is holding the log lock.
+ // By acquiring the lock ourselves, we know that this is not the case.
+ LogLockHolder.Acquire();
+
+ // It is important to avoid two threads suspending each other.
+ // Before a thread suspends another, it increments its own m_dwForbidSuspendThread count first,
+ // then it checks the target thread's m_dwForbidSuspendThread.
+ ForbidSuspendThreadHolder forbidSuspend;
+ if ((m_dwForbidSuspendThread != 0))
+ {
+#if defined(_DEBUG)
+ // Enable the diagnostic ::SuspendThread() if the
+ // DiagnosticSuspend config setting is set.
+ // This will interfere with the mutual suspend race but it's
+ // here only for diagnostic purposes anyway
+ if (!bDiagSuspend)
+#endif // _DEBUG
+ goto retry;
+ }
+
+ dwSuspendCount = ::SuspendThread(hThread);
+
+ //
+ // Since SuspendThread is asynchronous, we now must wait for the thread to
+ // actually be suspended before decrementing our own m_dwForbidSuspendThread count.
+ // Otherwise there would still be a chance for the "suspended" thread to suspend us
+ // before it really stops running.
+ //
+ if ((int)dwSuspendCount >= 0)
+ {
+ if (!EnsureThreadIsSuspended(hThread, this))
+ {
+ ::ResumeThread(hThread);
+ str = STR_Failure;
+ break;
+ }
+ }
+ }
+ if ((int)dwSuspendCount >= 0)
+ {
+ if (hThread == GetThreadHandle())
+ {
+ if (m_dwForbidSuspendThread != 0)
+ {
+#if defined(_DEBUG)
+ // Log diagnostic below 8 times during the i64TimestampTicksMax period
+ if (i64TimestampCur-i64TimestampStart >= nCnt*(i64TimestampTicksMax>>3) )
+ {
+ CONTEXT ctx;
+ SetIP(&ctx, -1);
+ ctx.ContextFlags = CONTEXT_CONTROL;
+ this->GetThreadContext(&ctx);
+ STRESS_LOG7(LF_SYNC, LL_INFO1000,
+ "Thread::SuspendThread[%p]: EIP=%p. nCnt=%d. result=%d.\n"
+ "\t\t\t\t\t\t\t\t\t forbidSuspend=%d. coop=%d. state=%x.\n",
+ this, GetIP(&ctx), nCnt, dwSuspendCount,
+ (LONG)this->m_dwForbidSuspendThread, (ULONG)this->m_fPreemptiveGCDisabled, this->GetSnapshotState());
+
+ // Enable a preemptive assert in diagnostic mode: before we
+ // resume the target thread to get its current state in the debugger
+ if (bDiagSuspend)
+ {
+ // triggered after 6 * 250msec
+ _ASSERTE(nCnt < 6 && "Timing out in Thread::SuspendThread");
+ }
+
+ ++nCnt;
+ }
+#endif // _DEBUG
+ ::ResumeThread(hThread);
+
+#if defined(_DEBUG)
+ // If the suspend diagnostics are enabled we need to spin here in order to avoid
+ // the case where we Suspend/Resume the target thread without giving it a chance to run.
+ if ((!fOneTryOnly) && bDiagSuspend)
+ {
+ while ( m_dwForbidSuspendThread != 0 &&
+ CLRGetTickCount64()-i64TimestampStart < nCnt*(i64TimestampTicksMax>>3) )
+ {
+ if (g_SystemInfo.dwNumberOfProcessors > 1)
+ {
+ if ((tries++) % 20 != 0)
+ {
+ YieldProcessor(); // play nice on hyperthreaded CPUs
+ } else {
+ __SwitchToThread(0, ++dwSwitchCount);
+ }
+ }
+ else
+ {
+ __SwitchToThread(0, ++dwSwitchCount); // don't spin on uniproc machines
+ }
+ }
+ }
+#endif // _DEBUG
+ goto retry;
+ }
+ // We suspend the right thread
+#ifdef _DEBUG
+ Thread * pCurThread = GetThread();
+ if (pCurThread != NULL)
+ {
+ pCurThread->dbg_m_cSuspendedThreads ++;
+ _ASSERTE(pCurThread->dbg_m_cSuspendedThreads > 0);
+ }
+#endif
+ IncCantAllocCount();
+
+ m_ThreadHandleForResume = hThread;
+ str = STR_Success;
+ break;
+ }
+ else
+ {
+ // A thread was switch out but in again.
+ // We suspend a wrong thread.
+ ::ResumeThread(hThread);
+ doSwitchToThread = FALSE;
+ goto retry;
+ }
+ }
+ else {
+ // We can get here either SuspendThread fails
+ // Or the fiber thread dies after this fiber switched out.
+
+ if ((int)dwSuspendCount != -1) {
+ STRESS_LOG1(LF_SYNC, LL_INFO1000, "In Thread::SuspendThread ::SuspendThread returned %x\n", dwSuspendCount);
+ }
+ if (GetThreadHandle() == SWITCHOUT_HANDLE_VALUE) {
+ str = STR_SwitchedOut;
+ break;
+ }
+ else {
+ // Our callers generally expect that STR_Failure means that
+ // the thread has exited.
+#ifndef FEATURE_PAL
+ _ASSERTE(NtCurrentTeb()->LastStatusValue != STATUS_SUSPEND_COUNT_EXCEEDED);
+#endif // !FEATURE_PAL
+ str = STR_Failure;
+ break;
+ }
+ }
+
+retry:
+ handleHolder.Release();
+ LogLockHolder.Release();
+
+ if (fOneTryOnly)
+ {
+ str = STR_Forbidden;
+ break;
+ }
+
+#if defined(_DEBUG)
+ i64TimestampPrev = i64TimestampCur;
+ i64TimestampCur = CLRGetTickCount64();
+ // CLRGetTickCount64() is global per machine (not per CPU, like getTimeStamp()).
+ // Next ASSERT states that CLRGetTickCount64() is increasing, or has wrapped.
+ // If it wrapped, the last iteration should have executed faster then 0.5 seconds.
+ _ASSERTE(i64TimestampCur >= i64TimestampPrev || i64TimestampCur <= 500);
+
+ if (i64TimestampCur - i64TimestampStart >= i64TimestampTicksMax)
+ {
+ dwSuspendCount = ::SuspendThread(hThread);
+ _ASSERTE(!"It takes too long to suspend a thread");
+ if ((int)dwSuspendCount >= 0)
+ ::ResumeThread(hThread);
+ }
+#endif // _DEBUG
+
+ if (doSwitchToThread)
+ {
+ // When looking for deadlocks we need to allow the target thread to run in order to make some progress.
+ // On multi processor machines we saw the suspending thread resuming immediately after the __SwitchToThread()
+ // because it has another few processors available. As a consequence the target thread was being Resumed and
+ // Suspended right away, w/o a real chance to make any progress.
+ if (g_SystemInfo.dwNumberOfProcessors > 1)
+ {
+ if ((tries++) % 20 != 0) {
+ YieldProcessor(); // play nice on hyperthreaded CPUs
+ } else {
+ __SwitchToThread(0, ++dwSwitchCount);
+ }
+ }
+ else
+ {
+ __SwitchToThread(0, ++dwSwitchCount); // don't spin on uniproc machines
+ }
+ }
+
+ }
+
+#ifdef PROFILING_SUPPORTED
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackSuspends());
+ if (str == STR_Success)
+ {
+ g_profControlBlock.pProfInterface->RuntimeThreadSuspended((ThreadID)this);
+ }
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ if (pdwSuspendCount != NULL)
+ {
+ *pdwSuspendCount = dwSuspendCount;
+ }
+ _ASSERTE(str != (SuspendThreadResult) -1);
+ return str;
+
+}
+#endif // DISABLE_THREADSUSPEND
+
+// On non-Windows CORECLR platforms remove Thread::ResumeThread support
+#ifndef DISABLE_THREADSUSPEND
+DWORD Thread::ResumeThread()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE (m_ThreadHandleForResume != INVALID_HANDLE_VALUE);
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ _ASSERTE (m_pHostTask == 0 || GetThreadHandle() != SWITCHOUT_HANDLE_VALUE);
+#else // !FEATURE_INCLUDE_ALL_INTERFACES
+ _ASSERTE (GetThreadHandle() != SWITCHOUT_HANDLE_VALUE);
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ //DWORD res = ::ResumeThread(GetThreadHandle());
+ DWORD res = ::ResumeThread(m_ThreadHandleForResume);
+ _ASSERTE (res != 0 && "Thread is not previously suspended");
+#ifdef _DEBUG_IMPL
+ _ASSERTE (!m_Creater.IsSameThread());
+ if ((res != (DWORD)-1) && (res != 0))
+ {
+ Thread * pCurThread = GetThread();
+ if (pCurThread != NULL)
+ {
+ _ASSERTE(pCurThread->dbg_m_cSuspendedThreads > 0);
+ pCurThread->dbg_m_cSuspendedThreads --;
+ _ASSERTE(pCurThread->dbg_m_cSuspendedThreadsWithoutOSLock <= pCurThread->dbg_m_cSuspendedThreads);
+ }
+ }
+#endif
+ if (res != (DWORD) -1 && res != 0)
+ {
+ DecCantAllocCount();
+ }
+#ifdef PROFILING_SUPPORTED
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackSuspends());
+ if ((res != 0) && (res != (DWORD)-1))
+ {
+ g_profControlBlock.pProfInterface->RuntimeThreadResumed((ThreadID)this);
+ }
+ END_PIN_PROFILER();
+ }
+#endif
+ return res;
+
+}
+#endif // DISABLE_THREADSUSPEND
+
+#ifdef _DEBUG
+void* forceStackA;
+
+// CheckSuspended
+// Checks whether the given thread is currently suspended.
+// Note that if we cannot determine the true suspension status
+// of the thread, we succeed. Intended to be used in asserts
+// in operations that require the target thread to be suspended.
+// Arguments:
+// pThread - The thread to examine.
+// Return value:
+// FALSE, if the thread is definitely not suspended.
+// TRUE, otherwise.
+static inline BOOL CheckSuspended(Thread *pThread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ DEBUG_ONLY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(GetThread() != pThread);
+ _ASSERTE(CheckPointer(pThread));
+
+#ifndef DISABLE_THREADSUSPEND
+ // Only perform this test if we're allowed to call back into the host.
+ // Thread::SuspendThread contains several potential calls into the host.
+ if (CanThisThreadCallIntoHost())
+ {
+ DWORD dwSuspendCount;
+ Thread::SuspendThreadResult str = pThread->SuspendThread(FALSE, &dwSuspendCount);
+ forceStackA = &dwSuspendCount;
+ if (str == Thread::STR_Success)
+ {
+ pThread->ResumeThread();
+ return dwSuspendCount >= 1;
+ }
+ }
+#endif // !DISABLE_THREADSUSPEND
+ return TRUE;
+}
+#endif //_DEBUG
+
+BOOL EEGetThreadContext(Thread *pThread, CONTEXT *pContext)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(CheckSuspended(pThread));
+
+ BOOL ret = pThread->GetThreadContext(pContext);
+
+ STRESS_LOG6(LF_SYNC, LL_INFO1000, "Got thread context ret = %d EIP = %p ESP = %p EBP = %p, pThread = %p, ContextFlags = 0x%x\n",
+ ret, GetIP(pContext), GetSP(pContext), GetFP(pContext), pThread, pContext->ContextFlags);
+
+ return ret;
+
+}
+
+BOOL EESetThreadContext(Thread *pThread, const CONTEXT *pContext)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#ifdef _TARGET_X86_
+ _ASSERTE(CheckSuspended(pThread));
+#endif
+
+ BOOL ret = pThread->SetThreadContext(pContext);
+
+ STRESS_LOG6(LF_SYNC, LL_INFO1000, "Set thread context ret = %d EIP = %p ESP = %p EBP = %p, pThread = %p, ContextFlags = 0x%x\n",
+ ret, GetIP((CONTEXT*)pContext), GetSP((CONTEXT*)pContext), GetFP((CONTEXT*)pContext), pThread, pContext->ContextFlags);
+
+ return ret;
+}
+
+// The AbortReason must be cleared at the following times:
+//
+// 1. When the application performs a ResetAbort.
+//
+// 2. When the physical thread stops running. That's because we must eliminate any
+// cycles that would otherwise be uncollectible, between the Reason and the Thread.
+// Nobody can retrieve the Reason after the thread stops running anyway.
+//
+// We don't have to do any work when the AppDomain containing the Reason object is unloaded.
+// That's because the HANDLE is released as part of the tear-down. The 'adid' prevents us
+// from ever using the trash handle value thereafter.
+
+void Thread::ClearAbortReason(BOOL pNoLock)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ OBJECTHANDLE oh;
+ ADID adid;
+
+ if (pNoLock){
+ // Stash the fields so we can destroy the OBJECTHANDLE if appropriate.
+ oh = m_AbortReason;
+ adid = m_AbortReasonDomainID;
+
+ // Clear the fields.
+ m_AbortReason = 0;
+ m_AbortReasonDomainID = ADID(INVALID_APPDOMAIN_ID);
+ }
+ else
+ // Scope the lock to stashing and clearing the two fields on the Thread object.
+ {
+ // Atomically get the OBJECTHANDLE and ADID of the object, and then
+ // clear them.
+
+ // NOTE: get the lock on this thread object, not on the executing thread.
+ Thread::AbortRequestLockHolder lock(this);
+
+ // Stash the fields so we can destroy the OBJECTHANDLE if appropriate.
+ oh = m_AbortReason;
+ adid = m_AbortReasonDomainID;
+
+ // Clear the fields.
+ m_AbortReason = 0;
+ m_AbortReasonDomainID = ADID(INVALID_APPDOMAIN_ID);
+ }
+
+ // If there is an OBJECTHANDLE, try to clear it.
+ if (oh != 0 && adid.m_dwId != 0)
+ { // See if the domain is still valid; if so, destroy the ObjectHandle
+ AppDomainFromIDHolder ad(adid, TRUE);
+ if (!ad.IsUnloaded())
+ { // Still a valid domain, so destroy the handle.
+ DestroyHandle(oh);
+ }
+ }
+}
+
+
+// Context passed down through a stack crawl (see code below).
+struct StackCrawlContext
+{
+ enum SCCType
+ {
+ SCC_CheckWithinEH = 0x00000001,
+ SCC_CheckWithinCer = 0x00000002,
+ };
+ Thread* pAbortee;
+ int eType;
+ BOOL fUnprotectedCode;
+ BOOL fWithinEHClause;
+ BOOL fWithinCer;
+ BOOL fHasManagedCodeOnStack;
+ BOOL fWriteToStressLog;
+
+ BOOL fHaveLatchedCF;
+ CrawlFrame LatchedCF;
+};
+
+
+#if _TARGET_AMD64_
+// Returns the PC for the next instruction
+// If you want to skip ahead of a NOP padding sequence
+// You'll need to call this thing multiple times, until it returns NULL
+
+#define NOP_REX_PREFIX ((char)0x66)
+#define XCHG_EAX_EAX ((char)0x90)
+
+int isAtNop(const void *ip)
+{
+ const char *NextByte = (const char *)ip;
+ STRESS_LOG1(LF_EH, LL_INFO100, "AMD64 - isAtNop ip 0x%p\n", ip);
+
+ int lpfxCount = 0;
+ //
+ // Read in length prefixes - no effect
+ //
+ for (lpfxCount = 0; lpfxCount < 14; lpfxCount++)
+ {
+ if (NextByte[lpfxCount] != NOP_REX_PREFIX)
+ break;
+ }
+
+ //
+ // xchg eax, eax
+ //
+ if (NextByte[lpfxCount] == XCHG_EAX_EAX)
+ return (lpfxCount + 1);
+ return 0;
+}
+#endif // _TARGET_AMD64_
+
+// Crawl the stack looking for Thread Abort related information (whether we're executing inside a CER or an error handling clauses
+// of some sort).
+static StackWalkAction TAStackCrawlCallBackWorker(CrawlFrame* pCf, StackCrawlContext *pData)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pData->eType & (StackCrawlContext::SCC_CheckWithinCer | StackCrawlContext::SCC_CheckWithinEH));
+
+ if(pCf->IsFrameless())
+ {
+ IJitManager* pJitManager = pCf->GetJitManager();
+ _ASSERTE(pJitManager);
+ if (pJitManager && !pData->fHasManagedCodeOnStack)
+ {
+ pData->fHasManagedCodeOnStack = TRUE;
+ }
+ }
+
+ // Get the method for this frame if it exists (might not be a managed method, so check the explicit frame if that's what we're
+ // looking at).
+ MethodDesc *pMD = pCf->GetFunction();
+ Frame *pFrame = pCf->GetFrame();
+ if (pMD == NULL && pFrame != NULL)
+ pMD = pFrame->GetFunction();
+
+ // Non-method frames don't interest us.
+ if (pMD == NULL)
+ return SWA_CONTINUE;
+
+ #if defined(_DEBUG)
+ #define METHODNAME(pFunc) (pFunc?pFunc->m_pszDebugMethodName:"<n/a>")
+ #else
+ #define METHODNAME(pFunc) "<n/a>"
+ #endif
+ if (pData->fWriteToStressLog)
+ {
+ STRESS_LOG5(LF_EH, LL_INFO100, "TAStackCrawlCallBack: STACKCRAWL method:%pM ('%s'), offset %x, Frame:%p, FrameVtable = %pV\n",
+ pMD, METHODNAME(pMD), pCf->IsFrameless()?pCf->GetRelOffset():0, pFrame, pCf->IsFrameless()?0:(*(void**)pFrame));
+ }
+ #undef METHODNAME
+
+ // If we're asking about CERs and we don't yet have a definite answer either way then take a closer look at the current method.
+ if (pData->eType & StackCrawlContext::SCC_CheckWithinCer && !pData->fUnprotectedCode && !pData->fWithinCer)
+ {
+ // Check for CER root methods (these are never inlined). If we've found one of these at the root of a bunch of potential CER
+ // methods (i.e. those with a compatible reliability contract) then we're executing in a CER.
+ if (IsCerRootMethod(pMD))
+ pData->fWithinCer = true;
+
+ // Only need to look deeper if we couldn't decide if we're in a CER yet.
+ if (!pData->fWithinCer)
+ {
+ // IL stubs are transparent to CERs.
+ if (!pMD->IsILStub())
+ // Check for reliability contracts on the method (and class and assembly). If it's high enough level to be included
+ // in a CER then we can continue (hopefully finding a CER root method further down the stack). Otherwise we've got
+ // at least one method that's not part of a CER on the top of the stack so we're definitely not executing within a
+ // CER.
+ if (CheckForReliabilityContract(pMD) < RCL_BASIC_CONTRACT)
+ pData->fUnprotectedCode = true;
+ }
+ }
+
+ // If we weren't asked about EH clauses then we can return now (stop the stack trace if we have a definitive answer on the CER
+ // question, move to the next frame otherwise).
+ if ((pData->eType & StackCrawlContext::SCC_CheckWithinEH) == 0)
+ return ((pData->fWithinCer || pData->fUnprotectedCode) && pData->fHasManagedCodeOnStack) ? SWA_ABORT : SWA_CONTINUE;
+
+ // If we already discovered we're within an EH clause but are still processing (presumably to determine whether we're within a
+ // CER), then we can just skip to the next frame straight away. Also terminate here if the current frame is not frameless since
+ // there isn't any useful EH information for non-managed frames.
+ if (pData->fWithinEHClause || !pCf->IsFrameless())
+ return SWA_CONTINUE;
+
+ IJitManager* pJitManager = pCf->GetJitManager();
+ _ASSERTE(pJitManager);
+
+ EH_CLAUSE_ENUMERATOR pEnumState;
+ unsigned EHCount = pJitManager->InitializeEHEnumeration(pCf->GetMethodToken(), &pEnumState);
+ if (EHCount == 0)
+ // We do not have finally clause here.
+ return SWA_CONTINUE;
+
+ DWORD offs = (DWORD)pCf->GetRelOffset();
+
+ if (!pCf->IsActiveFrame())
+ {
+ // If we aren't the topmost method, then our IP is a return address and
+ // we can't use it to directly compare against the EH ranges because we
+ // may be in an cloned finally which has a call as the last instruction.
+
+ offs--;
+ }
+
+ if (pData->fWriteToStressLog)
+ {
+ STRESS_LOG1(LF_EH, LL_INFO100, "TAStackCrawlCallBack: STACKCRAWL Offset 0x%x V\n", offs);
+ }
+ EE_ILEXCEPTION_CLAUSE EHClause;
+
+ StackWalkAction action = SWA_CONTINUE;
+#ifdef _TARGET_X86_
+ // On X86, the EH encoding for catch clause is completely mess.
+ // If catch clause is in its own basic block, the end of catch includes everything in the basic block.
+ // For nested catch, the end of catch may include several jmp instructions after JIT_EndCatch call.
+ // To better decide if we are inside a nested catch, we check if offs-1 is in more than one catch clause.
+ DWORD countInCatch = 0;
+ BOOL fAtJitEndCatch = FALSE;
+ if (pData->pAbortee == GetThread() &&
+ pData->pAbortee->ThrewControlForThread() == Thread::InducedThreadRedirectAtEndOfCatch &&
+ GetControlPC(pCf->GetRegisterSet()) == (PCODE)GetIP(pData->pAbortee->GetAbortContext()))
+ {
+ fAtJitEndCatch = TRUE;
+ offs -= 1;
+ }
+#endif // _TARGET_X86_
+
+#if _TARGET_AMD64_
+ STRESS_LOG1(LF_EH, LL_INFO10, "AMD64 - in TAStackCrawlCallBack 0x%x offset\n", offs);
+ STRESS_LOG1(LF_EH, LL_INFO10, "AMD64 - in TAStackCrawlCallBack frame IP is 0x%p\n", (void *)GetControlPC(pCf->GetRegisterSet()));
+ STRESS_LOG3(LF_EH, LL_INFO100, "AMD64 - in TAStackCrawlCallBack: STACKCRAWL method:%pM (), Frame:%p, FrameVtable = %pV\n",
+ pMD, pFrame, pCf->IsFrameless()?0:(*(void**)pFrame));
+
+ DWORD OffsSkipNop = offs;
+
+ if ( pCf->IsFrameless() && pCf->IsActiveFrame())
+ {
+ // If the frame is the top most frame and is managed,
+ // get the ip
+ void *oldIP = (void *)GetControlPC(pCf->GetRegisterSet());
+
+ // skip over the nop to get newIP
+ int bNop = isAtNop(oldIP);
+
+ // Skip over the nop if any
+ OffsSkipNop += bNop;
+
+ if (bNop != 0)
+ {
+ STRESS_LOG1(LF_EH, LL_INFO100, "AMD64 - TAStackCrawlCallBack: STACKCRAWL new Offset 0x%x V\n", OffsSkipNop );
+ }
+ }
+#endif // _TARGET_AMD64_
+
+
+ for(ULONG i=0; i < EHCount; i++)
+ {
+ pJitManager->GetNextEHClause(&pEnumState, &EHClause);
+ _ASSERTE(IsValidClause(&EHClause));
+
+#if _TARGET_AMD64_
+ if (offs != OffsSkipNop && OffsSkipNop == EHClause.TryStartPC)
+ {
+ // Ensure that the new offset isnt landing us into a cloned finally
+ // that "appears" to have a try-block which starts with a non-NOP
+ // instruction when actually its a cloned finally.
+ if (!IsClonedFinally(&EHClause))
+ {
+ // If we are at the nop instruction injected by JIT right before a try catch,
+ // we don't want to async abort. This is a workaround to address issue with the C# lock statement bug,
+ // where we could end up leaking a lock.
+ //
+ STRESS_LOG1(LF_EH, LL_INFO100, "AMD64 - TAStackCrawlCallBack: STACKCRAWL AMD64 TA at beginning of a Try catch 0x%x \n", OffsSkipNop );
+ pData->fWithinEHClause = true;
+ return SWA_ABORT;
+ }
+ }
+#endif // _TARGET_AMD64_
+
+ // !!! If this function is called on Aborter thread, we should check for finally only.
+
+ // !!! If this function is called on Aborter thread, we should check for finally only.
+ // !!! Catch and filter clause are skipped. In UserAbort, the first thing after ReadyForAbort
+ // !!! is to check if the target thread is processing exception.
+ // !!! If exception is in flight, we don't induce ThreadAbort. Instead at the end of Jit_EndCatch
+ // !!! we will handle abort.
+ if (pData->pAbortee != GetThread() && !IsFaultOrFinally(&EHClause))
+ {
+ continue;
+ }
+ if (offs >= EHClause.HandlerStartPC &&
+ offs < EHClause.HandlerEndPC)
+ {
+#ifdef _TARGET_X86_
+ if (fAtJitEndCatch)
+ {
+ // On X86, JIT's EH info may include the instruction after JIT_EndCatch inside the same catch
+ // clause if it is in the same basic block.
+ // So for this case, the offs is in at least one catch handler, but since we are at the end of
+ // catch, this one should not be counted.
+ countInCatch ++;
+ if (countInCatch == 1)
+ {
+ continue;
+ }
+ }
+#endif
+ pData->fWithinEHClause = true;
+ // We're within an EH clause. If we're asking about CERs too then stop the stack walk if we've reached a conclusive
+ // result or continue looking otherwise. Else we can stop the stackwalk now.
+ if (pData->eType & StackCrawlContext::SCC_CheckWithinCer)
+ {
+ action = (pData->fWithinCer || pData->fUnprotectedCode) ? SWA_ABORT : SWA_CONTINUE;
+ }
+ else
+ {
+ action = SWA_ABORT;
+ }
+ break;
+ }
+ }
+
+#ifdef _TARGET_X86_
+#ifdef _DEBUG
+ if (fAtJitEndCatch)
+ {
+ _ASSERTE (countInCatch > 0);
+ }
+#endif // _DEBUG
+#endif // _TARGET_X86_
+ return action;
+}
+
+// Wrapper around code:TAStackCrawlCallBackWorker that abstracts away the differences between the reporting order
+// of x86 and 64-bit stackwalker implementations, and also deals with interop calls that have an implicit reliability
+// contract. If a P/Invoke or CLR->COM call returns SafeHandle or CriticalHandle, the IL stub could be aborted
+// before having a chance to store the native handle into the Safe/CriticalHandle object. Therefore such calls are
+// treated as unbreakable by convention.
+StackWalkAction TAStackCrawlCallBack(CrawlFrame* pCf, void* data)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ StackCrawlContext *pData = (StackCrawlContext *)data;
+
+ // We have the current frame in pCf and possibly one latched frame in pData->LatchedCF. This enumeration
+ // describes which of these should be passed to code:TAStackCrawlCallBackWorker and in what order.
+ enum LatchedFrameAction
+ {
+ DiscardLatchedFrame, // forget the latched frame, report the current one
+ DiscardCurrentFrame, // ignore the current frame, report the latched one
+ ProcessLatchedInOrder, // report the latched frame, then report the current frame
+ ProcessLatchedReversed, // report the current frame, then report the latched frame
+ LatchCurrentFrame // latch the current frame, don't report anything
+ }
+ frameAction = DiscardLatchedFrame;
+
+#ifdef _TARGET_X86_
+ // On X86 the IL stub method is reported to us before the frame with the actual interop method. We need to
+ // swap the order because if the worker saw the IL stub - which is a CER root - first, it would terminate the
+ // stack walk and wouldn't allow the thread to be aborted, regardless of how the interop method is annotated.
+ if (pData->fHaveLatchedCF)
+ {
+ // Does the current and latched frame represent the same call?
+ if (pCf->pFrame == pData->LatchedCF.pFrame)
+ {
+ if (pData->LatchedCF.GetFunction()->AsDynamicMethodDesc()->IsUnbreakable())
+ {
+ // Report only the latched IL stub frame which is a CER root.
+ frameAction = DiscardCurrentFrame;
+ }
+ else
+ {
+ // Report the interop method (current frame) which may be annotated, then the IL stub.
+ frameAction = ProcessLatchedReversed;
+ }
+ }
+ else
+ {
+ // The two frames are unrelated - process them in order.
+ frameAction = ProcessLatchedInOrder;
+ }
+ pData->fHaveLatchedCF = FALSE;
+ }
+ else
+ {
+ MethodDesc *pMD = pCf->GetFunction();
+ if (pMD != NULL && pMD->IsILStub() && InlinedCallFrame::FrameHasActiveCall(pCf->pFrame))
+ {
+ // This may be IL stub for an interesting interop call - latch it.
+ frameAction = LatchCurrentFrame;
+ }
+ }
+#else // _TARGET_X86_
+ // On 64-bit the IL stub method is reported after the actual interop method so we don't have to swap them.
+ // However, we still want to discard the interop method frame if the call is unbreakable by convention.
+ if (pData->fHaveLatchedCF)
+ {
+ MethodDesc *pMD = pCf->GetFunction();
+ if (pMD != NULL && pMD->IsILStub() &&
+ pData->LatchedCF.GetFrame()->GetReturnAddress() == GetControlPC(pCf->GetRegisterSet()) &&
+ pMD->AsDynamicMethodDesc()->IsUnbreakable())
+ {
+ // The current and latched frame represent the same call and the IL stub is marked as unbreakable.
+ // We will discard the interop method and report only the IL stub which is a CER root.
+ frameAction = DiscardLatchedFrame;
+ }
+ else
+ {
+ // Otherwise process the two frames in order.
+ frameAction = ProcessLatchedInOrder;
+ }
+ pData->fHaveLatchedCF = FALSE;
+ }
+ else
+ {
+ MethodDesc *pMD = pCf->GetFunction();
+ if (pCf->GetFrame() != NULL && pMD != NULL && (pMD->IsNDirect() || pMD->IsComPlusCall()))
+ {
+ // This may be interop method of an interesting interop call - latch it.
+ frameAction = LatchCurrentFrame;
+ }
+ }
+#endif // _TARGET_X86_
+
+ // Execute the "frame action".
+ StackWalkAction action;
+ switch (frameAction)
+ {
+ case DiscardLatchedFrame:
+ action = TAStackCrawlCallBackWorker(pCf, pData);
+ break;
+
+ case DiscardCurrentFrame:
+ action = TAStackCrawlCallBackWorker(&pData->LatchedCF, pData);
+ break;
+
+ case ProcessLatchedInOrder:
+ action = TAStackCrawlCallBackWorker(&pData->LatchedCF, pData);
+ if (action == SWA_CONTINUE)
+ action = TAStackCrawlCallBackWorker(pCf, pData);
+ break;
+
+ case ProcessLatchedReversed:
+ action = TAStackCrawlCallBackWorker(pCf, pData);
+ if (action == SWA_CONTINUE)
+ action = TAStackCrawlCallBackWorker(&pData->LatchedCF, pData);
+ break;
+
+ case LatchCurrentFrame:
+ pData->LatchedCF = *pCf;
+ pData->fHaveLatchedCF = TRUE;
+ action = SWA_CONTINUE;
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ return action;
+}
+
+// Is the current thread currently executing within a constrained execution region?
+BOOL Thread::IsExecutingWithinCer()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (!g_fEEStarted)
+ return FALSE;
+
+ Thread *pThread = GetThread();
+ _ASSERTE (pThread);
+ StackCrawlContext sContext = { pThread,
+ StackCrawlContext::SCC_CheckWithinCer,
+ FALSE,
+ FALSE,
+ FALSE,
+ FALSE,
+ FALSE,
+ FALSE};
+
+ pThread->StackWalkFrames(TAStackCrawlCallBack, &sContext);
+
+#ifdef STRESS_LOG
+ if (sContext.fWithinCer && StressLog::StressLogOn(~0ul, 0))
+ {
+ // If stress log is on, write info to stress log
+ StackCrawlContext sContext1 = { pThread,
+ StackCrawlContext::SCC_CheckWithinCer,
+ FALSE,
+ FALSE,
+ FALSE,
+ FALSE,
+ TRUE,
+ FALSE};
+
+ pThread->StackWalkFrames(TAStackCrawlCallBack, &sContext1);
+ }
+#endif
+
+ return sContext.fWithinCer;
+}
+
+
+// Context structure used during stack walks to determine whether a given method is executing within a CER.
+struct CerStackCrawlContext
+{
+ MethodDesc *m_pStartMethod; // First method we crawl (here for debug purposes)
+ bool m_fFirstFrame; // True for first callback only
+ bool m_fWithinCer; // The result
+};
+
+// Callback used on the stack crawl described above.
+StackWalkAction CerStackCrawlCallBack(CrawlFrame *pCf, void *pData)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ CerStackCrawlContext *pCtx = (CerStackCrawlContext *)pData;
+
+ // Skip initial frame which should be our target.
+ if (pCtx->m_fFirstFrame)
+ {
+ _ASSERTE(pCtx->m_pStartMethod == pCf->GetFunction());
+ pCtx->m_fFirstFrame = false;
+ return SWA_CONTINUE;
+ }
+
+ // If we get this far we've located the target method and are scanning the calling tree to see if we have a chain of methods
+ // marked with strong reliability contracts terminated by a CER root method.
+ MethodDesc *pMD = pCf->GetFunction();
+ _ASSERTE(pMD != NULL);
+
+ // If the current method is the root of a CER then we can say the target method was executing in a CER and terminate the stack
+ // walk.
+ // @TODO: Need to be more specific than this: only certain areas of the root method are actually in the CER.
+ if (IsCerRootMethod(pMD))
+ {
+ pCtx->m_fWithinCer = true;
+ return SWA_ABORT;
+ }
+
+ // Now look at reliability contracts on the current method. If they're missing or very weak then the chain is broken and the
+ // target method cannot possibly be in a CER.
+ if (CheckForReliabilityContract(pMD) < RCL_BASIC_CONTRACT)
+ return SWA_ABORT;
+
+ // Otherwise everything looks OK so far and we need to investigate the next frame.
+ return SWA_CONTINUE;
+}
+
+// Determine whether the method at the given depth in the thread's execution stack is executing within a CER.
+BOOL Thread::IsWithinCer(CrawlFrame *pCf)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // There had better be a method associated with this frame.
+ MethodDesc *pMD = pCf->GetFunction();
+ _ASSERTE(pMD != NULL);
+
+ // Try the cheap checks first (before resorting to an actual stackwalk).
+
+ // Handle IL stubs specially. We get called for these guys and they always appear to have a strong reliability contract (due to
+ // the System.StubHelpers class they're placed in) but the stack walking logic we have below will skip them (messing up our
+ // accounting). For simplicitly and speed we'll just always say these guys are in a CER (we trust the code and it won't block
+ // indefinitely so it's a safe guess).
+ if (pMD->IsILStub())
+ return TRUE;
+
+ // If the method is itself the root of a CER then we say yes immediately.
+ // @TODO: Need to be more specific than this: only certain areas of the root method are actually in the CER.
+ if (IsCerRootMethod(pMD))
+ return TRUE;
+
+ // Now look at reliability contracts on the method. If they're missing or very weak then this method cannot possibly be in a
+ // CER.
+ if (CheckForReliabilityContract(pMD) < RCL_BASIC_CONTRACT)
+ return FALSE;
+
+ // No way around it: this method has a good reliability contract but is not the root of a CER. We'll have to have to walk the
+ // stack to determine whether it was called from a good root.
+
+ // Now things get really tricky. We want to perform a recursive stackwalk (we're called as part of an ongoing stackwalk and we
+ // wish to recursively look at one or more of the callers of the current frame).
+ //
+ // On x86 this is relatively straightforward -- we make a copy of the current crawl frame context (since walking the stack
+ // updates the context) and build a new regdisplay around it. We can then start a new crawl from that context (ignoring the
+ // first frame of course, because that's this frame).
+ //
+ // 64-bit is trickier because the context provided by the OS might not be (from our point of view) a valid current context. In
+ // particular IA64 provides a mostly valid context except that the SP is from the caller. AMD64 on the other hand will always
+ // provide a consistent context, but it may belong to either the current or caller frame. As noted above though, we're really
+ // not all that interested in the current context, so as long as we can get to a consistent caller context we're happy.
+ //
+ // So for AMD64 we'll either have a complete current context and we'll use the the x86 algorithm or we have a complete caller
+ // context and we can use more or less the x86 algorithm except we don't need to skip the first frame on the stackwalk callback.
+ //
+ // IA64 is trickier since it doesn't always give us a consistent context (current or caller). Here we'll have to bite the bullet
+ // and perform a full stackwalk to build the context we're after. We'll use a combination of the caller SP and the current BSP
+ // as a discriminator (to determine when the full stackwalk has synchronized with this frame and the real walk can begin, it's
+ // the same discriminator the OS uses).
+ //
+ // <REVISIT_TODO> We will want to try and cache the context we eventually arrive at from this stack walk, since we're likely to see
+ // further calls to IsWithinCer further down the stack and we can use the end context as a much faster way to sync to a valid
+ // context in those cases. The chief technical difficulty there is cache management since the OS is handling the actual
+ // exception walk (so we're not sure when to invalidate our cached data, which presumably we'd store on the Thread). Look into
+ // hooking into the ExceptionTracker mechanism for this.</REVISIT_TODO>
+
+ REGDISPLAY *pCurrentRd = pCf->GetRegisterSet();
+ REGDISPLAY rd;
+ CONTEXT ctx;
+ CerStackCrawlContext sContext = { pMD, true, false };
+
+#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM_)
+ // This check is similar to the one in ExceptionTracker::InitializeCrawlFrame.
+ //
+ // However, on ARM, we can easily check if we have the caller context (or not) by
+ // checking the IsCallerContextValid field of RegDisplay, which is set during
+ // the first pass of exception dispatch since the OS always passes us the caller
+ // context in that scenario (refer to ExceptionTracker::InitializeCrawlFrame
+ // implementation for details).
+ if (ARM_ONLY(pCurrentRd->IsCallerContextValid) NOT_ARM(GetControlPC(pCurrentRd) != GetIP(pCurrentRd->pCurrentContext)))
+ {
+ // This is the case on AMD64, or ARM, where the OS has handed us the caller context. Build a regdisplay around that (pretending that
+ // it's the current context) and reset our first frame flag so the stack walk we're about to do thinks we've already
+ // processed the current frame.
+ ctx = *pCurrentRd->pCallerContext;
+ FillRegDisplay(&rd, &ctx);
+ sContext.m_fFirstFrame = false;
+ }
+ else
+#endif // defined(_TARGET_AMD64_) || defined(_TARGET_ARM_)
+ {
+ // On x86, ARM or AMD64, where the OS gave us the current context, we just copy that into a new regdisplay (our stackwalking
+ // callback will skip the first (current) frame for us).
+ CopyRegDisplay(pCurrentRd, &rd, &ctx);
+ }
+
+ // The stackwalker requires a starting frame as input. If we're currently inspecting an explicit frame then it's easy -- we just
+ // pass that. Otherwise (we're on some frameless managed method) we look at all of the frames for the current thread and choose
+ // the one that would synchronize us for walking to the next frame.
+ Frame *pFrame;
+ if (pCf->IsFrameless())
+ {
+#if defined(_TARGET_X86_)
+ TADDR limitSP = GetRegdisplaySP(&rd);
+#else
+ TADDR limitSP = (TADDR)( EECodeManager::GetCallerSp(&rd) );
+#endif
+ pFrame = GetFrame();
+ while (pFrame && (TADDR)(pFrame) < limitSP)
+ pFrame = pFrame->Next();
+ }
+ else
+ {
+ pFrame = pCf->GetFrame();
+
+#ifdef _TARGET_X86_
+ if (pFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr())
+ {
+ // If we walk from an ICF, the function will not be reported again because X86 stack walker handles managed stack frame
+ // before explicit frames contained in it.
+ sContext.m_fFirstFrame = false;
+ }
+#endif // _TARGET_X86_
+ }
+
+ StackWalkFramesEx(&rd, CerStackCrawlCallBack, &sContext, QUICKUNWIND | FUNCTIONSONLY, pFrame);
+
+ _ASSERTE(!sContext.m_fFirstFrame);
+
+ return sContext.m_fWithinCer;
+}
+
+
+#if defined(_TARGET_AMD64_) && defined(FEATURE_HIJACK)
+BOOL Thread::IsSafeToInjectThreadAbort(PTR_CONTEXT pContextToCheck)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(pContextToCheck != NULL);
+ }
+ CONTRACTL_END;
+
+ EECodeInfo codeInfo(GetIP(pContextToCheck));
+ _ASSERTE(codeInfo.IsValid());
+
+ // Check if the method uses a frame register. If it does not, then RSP will be used by the OS as the frame register
+ // and returned as the EstablisherFrame. This is fine at any instruction in the method (including epilog) since there is always a
+ // difference of stackslot size between the callerSP and the callee SP due to return address having been pushed on the stack.
+ if (!codeInfo.HasFrameRegister())
+ {
+ return TRUE;
+ }
+
+ BOOL fSafeToInjectThreadAbort = TRUE;
+
+ if (IsIPInEpilog(pContextToCheck, &codeInfo, &fSafeToInjectThreadAbort))
+ {
+ return fSafeToInjectThreadAbort;
+ }
+ else
+ {
+ return TRUE;
+ }
+}
+#endif // defined(_TARGET_AMD64_) && defined(FEATURE_HIJACK)
+
+
+#ifdef _TARGET_AMD64_
+// CONTEXT_CONTROL does not include any nonvolatile registers that might be the frame pointer.
+#define CONTEXT_MIN_STACKWALK (CONTEXT_CONTROL | CONTEXT_INTEGER)
+#else
+#define CONTEXT_MIN_STACKWALK (CONTEXT_CONTROL)
+#endif
+
+
+BOOL Thread::ReadyForAsyncException(ThreadInterruptMode mode)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (((mode & TI_Abort) != 0 && !IsAbortRequested()) ||
+ ((mode & TI_Interrupt) != 0 && (m_UserInterrupt & TI_Interrupt) == 0))
+ {
+ return FALSE;
+ }
+
+ if (IsAbortRequested() && HasThreadStateNC(TSNC_SOWorkNeeded))
+ {
+ return TRUE;
+ }
+
+ // This needs the probe with GenerateHardSO
+ CONTRACT_VIOLATION(SOToleranceViolation);
+
+ if (GetThread() == this && HasThreadStateNC (TSNC_PreparingAbort) && !IsRudeAbort() )
+ {
+ STRESS_LOG0(LF_APPDOMAIN, LL_INFO10, "in Thread::ReadyForAbort PreparingAbort\n");
+ // Avoid recursive call
+ return FALSE;
+ }
+
+ if (IsAbortPrevented())
+ {
+ //
+ // If the thread is marked to have a FuncEval abort request, then allow that to go through
+ // since we dont want to block funcEval aborts. Such requests are initiated by the
+ // right-side when the thread is doing funcEval and the exception would be caught in the
+ // left-side's funcEval implementation that will then clear the funcEval-abort-state from the thread.
+ //
+ // If another thread also marked this one for a non-FuncEval abort, then the left-side will
+ // proceed to [re]throw that exception post funcEval abort. When we come here next, we would follow
+ // the usual rules to raise the exception and if raised, to prevent the abort if applicable.
+ //
+ if (!IsFuncEvalAbort())
+ {
+ STRESS_LOG0(LF_APPDOMAIN, LL_INFO10, "in Thread::ReadyForAbort prevent abort\n");
+ return FALSE;
+ }
+ }
+
+ // The thread requests not to be aborted. Honor this for safe abort.
+ if (!IsRudeAbort() && IsAsyncPrevented())
+ {
+ STRESS_LOG0(LF_APPDOMAIN, LL_INFO10, "in Thread::ReadyForAbort AsyncPrevented\n");
+ return FALSE;
+ }
+
+ // If we are doing safe abort, we can not abort a thread if it has locks.
+ if (m_AbortType == EEPolicy::TA_Safe && HasLockInCurrentDomain()) {
+ STRESS_LOG0(LF_APPDOMAIN, LL_INFO10, "in Thread::ReadyForAbort HasLock\n");
+ return FALSE;
+ }
+
+ REGDISPLAY rd;
+
+ Frame *pStartFrame = NULL;
+ if (ThrewControlForThread() == Thread::InducedThreadRedirect ||
+ ThrewControlForThread() == Thread::InducedThreadRedirectAtEndOfCatch)
+ {
+ _ASSERTE(GetThread() == this);
+ _ASSERTE(ExecutionManager::IsManagedCode(GetIP(m_OSContext)));
+ FillRegDisplay(&rd, m_OSContext);
+
+ if (ThrewControlForThread() == Thread::InducedThreadRedirectAtEndOfCatch)
+ {
+ // On 64bit, this function may be called from COMPlusCheckForAbort when
+ // stack has not unwound, but m_OSContext points to the place after unwind.
+ //
+ TADDR sp = GetSP(m_OSContext);
+ Frame *pFrameAddr = m_pFrame;
+ while (pFrameAddr < (LPVOID)sp)
+ {
+ pFrameAddr = pFrameAddr->Next();
+ }
+ if (pFrameAddr != m_pFrame)
+ {
+ pStartFrame = pFrameAddr;
+ }
+ }
+#if defined(_TARGET_AMD64_) && defined(FEATURE_HIJACK)
+ else if (ThrewControlForThread() == Thread::InducedThreadRedirect)
+ {
+ if (!IsSafeToInjectThreadAbort(m_OSContext))
+ {
+ STRESS_LOG0(LF_EH, LL_INFO10, "Thread::ReadyForAbort: Not injecting abort since we are at an unsafe instruction.\n");
+ return FALSE;
+ }
+ }
+#endif // defined(_TARGET_AMD64_) && defined(FEATURE_HIJACK)
+ }
+ else
+ {
+ if (GetFilterContext())
+ {
+ FillRegDisplay(&rd, GetFilterContext());
+ }
+ else
+ {
+ CONTEXT ctx;
+ SetIP(&ctx, 0);
+ SetSP(&ctx, 0);
+ FillRegDisplay(&rd, &ctx);
+ }
+ }
+
+#ifdef STRESS_LOG
+ REGDISPLAY rd1;
+ if (StressLog::StressLogOn(~0ul, 0))
+ {
+ CONTEXT ctx1;
+ CopyRegDisplay(&rd, &rd1, &ctx1);
+ }
+#endif
+
+ // Walk the stack to determine if we are running in Constrained Execution Region or finally EH clause (in the non-rude abort
+ // case). We cannot initiate an abort in these circumstances.
+ StackCrawlContext TAContext =
+ {
+ this,
+ StackCrawlContext::SCC_CheckWithinCer | (IsRudeAbort() ? 0 : StackCrawlContext::SCC_CheckWithinEH),
+ FALSE,
+ FALSE,
+ FALSE,
+ FALSE,
+ FALSE
+ };
+
+ StackWalkFramesEx(&rd, TAStackCrawlCallBack, &TAContext, QUICKUNWIND, pStartFrame);
+
+ if (!TAContext.fHasManagedCodeOnStack && IsAbortInitiated() && GetThread() == this)
+ {
+ EEResetAbort(TAR_Thread);
+ return FALSE;
+ }
+
+ if (TAContext.fWithinCer)
+ {
+ STRESS_LOG0(LF_APPDOMAIN, LL_INFO10, "in Thread::ReadyForAbort RunningCer\n");
+ return FALSE;
+ }
+
+#ifdef STRESS_LOG
+ if (StressLog::StressLogOn(~0ul, 0) &&
+ (IsRudeAbort() || !TAContext.fWithinEHClause))
+ {
+ //Save into stresslog.
+ StackCrawlContext TAContext1 =
+ {
+ this,
+ StackCrawlContext::SCC_CheckWithinCer | (IsRudeAbort() ? 0 : StackCrawlContext::SCC_CheckWithinEH),
+ FALSE,
+ FALSE,
+ FALSE,
+ FALSE,
+ TRUE
+ };
+
+ StackWalkFramesEx(&rd1, TAStackCrawlCallBack, &TAContext1, QUICKUNWIND, pStartFrame);
+ }
+#endif
+
+ if (IsRudeAbort()) {
+ // If it is rude abort, there is no additional restriction on abort.
+ STRESS_LOG0(LF_APPDOMAIN, LL_INFO10, "in Thread::ReadyForAbort RudeAbort\n");
+ return TRUE;
+ }
+
+ if (TAContext.fWithinEHClause)
+ {
+ STRESS_LOG0(LF_APPDOMAIN, LL_INFO10, "in Thread::ReadyForAbort RunningEHClause\n");
+ }
+
+ //if (m_AbortType == EEPolicy::TA_V1Compatible) {
+ // return TRUE;
+ //}
+
+ // If we are running finally, we can not abort for Safe Abort.
+ return !TAContext.fWithinEHClause;
+}
+
+BOOL Thread::IsRudeAbort()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ return (IsAbortRequested() && (m_AbortType == EEPolicy::TA_Rude));
+}
+
+BOOL Thread::IsRudeAbortOnlyForADUnload()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return (IsAbortRequested() &&
+ (m_AbortInfo & TAI_ADUnloadRudeAbort) &&
+ !(m_AbortInfo & (TAI_ThreadRudeAbort | TAI_FuncEvalRudeAbort))
+ );
+}
+
+BOOL Thread::IsRudeUnload()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return (IsAbortRequested() && (m_AbortInfo & TAI_ADUnloadRudeAbort));
+}
+
+BOOL Thread::IsFuncEvalAbort()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return (IsAbortRequested() && (m_AbortInfo & TAI_AnyFuncEvalAbort));
+}
+
+//
+// If the OS is down in kernel mode when we do a GetThreadContext,any
+// updates we make to the context will not take effect if we try to do
+// a SetThreadContext. As a result, newer OSes expose the idea of
+// "trap frame reporting" which will tell us if it is unsafe to modify
+// the context and pass it along to SetThreadContext.
+//
+// On OSes that support trap frame reporting, we will return FALSE if
+// we can determine that the OS is not in user mode. Otherwise, we
+// return TRUE.
+//
+BOOL Thread::IsContextSafeToRedirect(CONTEXT* pContext)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_PAL
+
+#if !defined(_TARGET_X86_)
+ CONSISTENCY_CHECK_MSG(pContext->ContextFlags & CONTEXT_EXCEPTION_REPORTING, "ERROR: you didn't pass CONTEXT_EXCEPTION_REQUEST into GetThreadContext\n");
+#endif // !defined(_TARGET_X86_)
+
+ if (pContext->ContextFlags & CONTEXT_EXCEPTION_REPORTING)
+ {
+ if (pContext->ContextFlags & (CONTEXT_SERVICE_ACTIVE|CONTEXT_EXCEPTION_ACTIVE))
+ {
+ // cannot process exception
+ LOG((LF_ALWAYS, LL_WARNING, "thread [os id=0x08%x id=0x08%x] redirect failed due to ContextFlags of 0x%08x\n", m_OSThreadId, m_ThreadId, pContext->ContextFlags));
+ return FALSE;
+ }
+ }
+
+#endif // !FEATURE_PAL
+
+ return TRUE;
+}
+
+void Thread::SetAbortEndTime(ULONGLONG endTime, BOOL fRudeAbort)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ {
+ AbortRequestLockHolder lh(this);
+ if (fRudeAbort)
+ {
+ if (endTime < m_RudeAbortEndTime)
+ {
+ m_RudeAbortEndTime = endTime;
+ }
+ }
+ else
+ {
+ if (endTime < m_AbortEndTime)
+ {
+ m_AbortEndTime = endTime;
+ }
+ }
+ }
+
+}
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#endif
+HRESULT
+Thread::UserAbort(ThreadAbortRequester requester,
+ EEPolicy::ThreadAbortTypes abortType,
+ DWORD timeout,
+ UserAbort_Client client
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+ STRESS_LOG2(LF_SYNC | LF_APPDOMAIN, LL_INFO100, "UserAbort Thread %p Thread Id = %x\n", this, GetThreadId());
+#ifdef _DEBUG
+ AddFiberInfo(ThreadTrackInfo_Abort);
+#endif
+
+ BOOL fHoldingThreadStoreLock = ThreadStore::HoldingThreadStore();
+
+ // For SafeAbort from FuncEval abort, we do not apply escalation policy. Debugger
+ // tries SafeAbort first with a short timeout. The thread will return to debugger.
+ // After some break, the thread is going to do RudeAbort if abort has not finished.
+ EClrOperation operation;
+ if (abortType == EEPolicy::TA_Rude)
+ {
+ if (HasLockInCurrentDomain())
+ {
+ operation = OPR_ThreadRudeAbortInCriticalRegion;
+ }
+ else
+ {
+ operation = OPR_ThreadRudeAbortInNonCriticalRegion;
+ }
+ }
+ else
+ {
+ operation = OPR_ThreadAbort;
+ }
+
+ // Debugger func-eval aborts (both rude + normal) don't have any escalation policy. They are invoked
+ // by the debugger and the debugger handles the consequences.
+ // Furthermore, in interop-debugging, threads will be hard-suspened in preemptive mode while we try to abort them.
+ // So any abort strategy that relies on a timeout and the target thread slipping is dangerous. Escalation policy would let a
+ // host circumvent the timeout and thus we may wait forever for the target thread to slip. We'd deadlock here. Since the escalation
+ // policy doesn't let the host break this deadlock (and certianly doesn't let the debugger break the deadlock), it's unsafe
+ // to have an escalation policy for func-eval aborts at all.
+ BOOL fEscalation = (requester != TAR_FuncEval);
+ if (fEscalation)
+ {
+ EPolicyAction action = GetEEPolicy()->GetDefaultAction(operation, this);
+ switch (action)
+ {
+ case eAbortThread:
+ GetEEPolicy()->NotifyHostOnDefaultAction(operation,action);
+ break;
+ case eRudeAbortThread:
+ if (abortType != EEPolicy::TA_Rude)
+ {
+ abortType = EEPolicy::TA_Rude;
+ }
+ GetEEPolicy()->NotifyHostOnDefaultAction(operation,action);
+ break;
+ case eUnloadAppDomain:
+ {
+ AppDomain *pDomain = GetDomain();
+ if (!pDomain->IsDefaultDomain())
+ {
+ GetEEPolicy()->NotifyHostOnDefaultAction(operation,action);
+ pDomain->EnableADUnloadWorker(EEPolicy::ADU_Safe);
+ }
+ }
+ // AD unload does not abort finalizer thread.
+ if (this != FinalizerThread::GetFinalizerThread())
+ {
+ if (this == GetThread())
+ {
+ Join(INFINITE,TRUE);
+ }
+ return S_OK;
+ }
+ break;
+ case eRudeUnloadAppDomain:
+ {
+ AppDomain *pDomain = GetDomain();
+ if (!pDomain->IsDefaultDomain())
+ {
+ GetEEPolicy()->NotifyHostOnDefaultAction(operation,action);
+ pDomain->EnableADUnloadWorker(EEPolicy::ADU_Rude);
+ }
+ }
+ // AD unload does not abort finalizer thread.
+ if (this != FinalizerThread::GetFinalizerThread())
+ {
+ if (this == GetThread())
+ {
+ Join(INFINITE,TRUE);
+ }
+ return S_OK;
+ }
+ break;
+ case eExitProcess:
+ case eFastExitProcess:
+ case eRudeExitProcess:
+ case eDisableRuntime:
+ GetEEPolicy()->NotifyHostOnDefaultAction(operation,action);
+ EEPolicy::HandleExitProcessFromEscalation(action, HOST_E_EXITPROCESS_THREADABORT);
+ _ASSERTE (!"Should not reach here");
+ break;
+ default:
+ _ASSERTE (!"unknown policy for thread abort");
+ }
+
+ DWORD timeoutFromPolicy;
+ if (abortType != EEPolicy::TA_Rude)
+ {
+ timeoutFromPolicy = GetEEPolicy()->GetTimeout(OPR_ThreadAbort);
+ }
+ else if (!HasLockInCurrentDomain())
+ {
+ timeoutFromPolicy = GetEEPolicy()->GetTimeout(OPR_ThreadRudeAbortInNonCriticalRegion);
+ }
+ else
+ {
+ timeoutFromPolicy = GetEEPolicy()->GetTimeout(OPR_ThreadRudeAbortInCriticalRegion);
+ }
+ if (timeout > timeoutFromPolicy)
+ {
+ timeout = timeoutFromPolicy;
+ }
+ }
+
+ AbortControlHolder AbortController(this);
+
+ // Swap in timeout
+ if (timeout != INFINITE)
+ {
+ ULONG64 curTime = CLRGetTickCount64();
+ ULONG64 newEndTime = curTime + timeout;
+
+ SetAbortEndTime(newEndTime, abortType == EEPolicy::TA_Rude);
+ }
+
+ // If the abort comes from the thread abort watchdog, proceed with the abort only
+ // if the abort is still requested. This handles race between watchdog and UnmarkThreadForAbort.
+ BOOL fTentative = (requester == Thread::TAR_Thread) && (client == UAC_WatchDog);
+ MarkThreadForAbort(requester, abortType, fTentative);
+
+ Thread *pCurThread = GetThread();
+
+ // If aboring self
+ if (this == pCurThread)
+ {
+ SetAbortInitiated();
+#ifdef _DEBUG
+ m_dwAbortPoint = 1;
+#endif
+
+ if (CLRHosted() && GetAbortEndTime() != MAXULONGLONG)
+ {
+ // ToDo: Skip debugger funcval
+ // Use our helper thread to watch abort.
+ AppDomain::EnableADUnloadWorkerForThreadAbort();
+ }
+
+ GCX_COOP();
+
+ OBJECTREF exceptObj;
+
+ if (IsRudeAbort())
+ {
+ exceptObj = CLRException::GetPreallocatedRudeThreadAbortException();
+ }
+ else
+ {
+ EEException eeExcept(kThreadAbortException);
+ exceptObj = CLRException::GetThrowableFromException(&eeExcept);
+ }
+
+ RaiseTheExceptionInternalOnly(exceptObj, FALSE);
+ }
+
+#ifdef MDA_SUPPORTED
+ if (requester != TAR_FuncEval)
+ {
+ // FuncEval abort is always aborting another thread. No need to trigger MDA.
+ MDA_TRIGGER_ASSISTANT(AsynchronousThreadAbort, ReportViolation(GetThread(), this));
+ }
+#endif
+
+ _ASSERTE(this != pCurThread); // Aborting another thread.
+
+ if (client == UAC_Host)
+ {
+ // A host may call ICLRTask::Abort on a critical thread. We don't want to
+ // block this thread.
+ AppDomain::EnableADUnloadWorkerForThreadAbort();
+ return S_OK;
+ }
+
+#ifdef _DEBUG
+ DWORD elapsed_time = 0;
+#endif
+
+ ThreadAffinityHolder affinity;
+ // We do not want this thread to be alerted.
+ ThreadPreventAsyncHolder preventAsync(pCurThread != NULL);
+
+#ifdef _DEBUG
+ // If UserAbort times out, put up msgbox once.
+ BOOL fAlreadyAssert = FALSE;
+#endif
+
+ BOOL fOneTryOnly = (client == UAC_WatchDog) || (client == UAC_FinalizerTimeout);
+ BOOL fFirstRun = TRUE;
+ BOOL fNeedEscalation;
+
+#if !defined(DISABLE_THREADSUSPEND)
+ DWORD dwSwitchCount = 0;
+#endif // !defined(DISABLE_THREADSUSPEND)
+
+LRetry:
+ fNeedEscalation = FALSE;
+ for (;;)
+ {
+ if (fOneTryOnly)
+ {
+ if (!fFirstRun)
+ {
+ return S_OK;
+ }
+ fFirstRun = FALSE;
+ }
+ // Lock the thread store
+ LOG((LF_SYNC, INFO3, "UserAbort obtain lock\n"));
+
+ ULONGLONG abortEndTime = GetAbortEndTime();
+ if (abortEndTime != MAXULONGLONG)
+ {
+ ULONGLONG now_time = CLRGetTickCount64();
+
+ if (now_time >= abortEndTime)
+ {
+ EPolicyAction action1 = eNoAction;
+ DWORD timeout1 = INFINITE;
+ if (fEscalation)
+ {
+ if (!IsRudeAbort())
+ {
+ action1 = GetEEPolicy()->GetActionOnTimeout(OPR_ThreadAbort, this);
+ timeout1 = GetEEPolicy()->GetTimeout(OPR_ThreadAbort);
+ }
+ else if (HasLockInCurrentDomain())
+ {
+ action1 = GetEEPolicy()->GetActionOnTimeout(OPR_ThreadRudeAbortInCriticalRegion, this);
+ timeout1 = GetEEPolicy()->GetTimeout(OPR_ThreadRudeAbortInCriticalRegion);
+ }
+ else
+ {
+ action1 = GetEEPolicy()->GetActionOnTimeout(OPR_ThreadRudeAbortInNonCriticalRegion, this);
+ timeout1 = GetEEPolicy()->GetTimeout(OPR_ThreadRudeAbortInNonCriticalRegion);
+ }
+ }
+ if (action1 == eNoAction)
+ {
+ // timeout, but no action on timeout.
+ // Debugger can call this function to about func-eval with a timeout
+ return HRESULT_FROM_WIN32(ERROR_TIMEOUT);
+ }
+ if (timeout1 != INFINITE)
+ {
+ // There is an escalation policy.
+ fNeedEscalation = TRUE;
+ break;
+ }
+ }
+ }
+
+ // Thread abort needs to walk stack to decide if thread abort can proceed.
+ // It is unsafe to crawl a stack of thread if the thread is OS-suspended which we do during
+ // thread abort. For example, Thread T1 aborts thread T2. T2 is suspended by T1. Inside SQL
+ // this means that no thread sharing the same scheduler with T2 can run. If T1 needs a lock which
+ // is owned by one thread on the scheduler, T1 will wait forever.
+ // Our solution is to move T2 to a safe point, resume it, and then do stack crawl.
+
+ // We need to make sure that ThreadStoreLock is released after CheckForAbort. This makes sure
+ // that ThreadAbort does not race against GC.
+ class CheckForAbort
+ {
+ private:
+ Thread *m_pThread;
+ BOOL m_fHoldingThreadStoreLock;
+ BOOL m_NeedRelease;
+ public:
+ CheckForAbort(Thread *pThread, BOOL fHoldingThreadStoreLock)
+ : m_pThread(pThread),
+ m_fHoldingThreadStoreLock(fHoldingThreadStoreLock),
+ m_NeedRelease(TRUE)
+ {
+ if (!fHoldingThreadStoreLock)
+ {
+ ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_OTHER);
+ }
+ ThreadStore::ResetStackCrawlEvent();
+
+ // The thread being aborted may clear the TS_AbortRequested bit and the matching increment
+ // of g_TrapReturningThreads behind our back. Increment g_TrapReturningThreads here
+ // to ensure that we stop for the stack crawl even if the TS_AbortRequested bit is cleared.
+ ThreadStore::TrapReturningThreads(TRUE);
+ }
+ void NeedStackCrawl()
+ {
+ m_pThread->SetThreadState(Thread::TS_StackCrawlNeeded);
+ }
+ ~CheckForAbort()
+ {
+ Release();
+ }
+ void Release()
+ {
+ if (m_NeedRelease)
+ {
+ m_NeedRelease = FALSE;
+ ThreadStore::TrapReturningThreads(FALSE);
+ ThreadStore::SetStackCrawlEvent();
+ m_pThread->ResetThreadState(TS_StackCrawlNeeded);
+ if (!m_fHoldingThreadStoreLock)
+ {
+ ThreadSuspend::UnlockThreadStore();
+ }
+ }
+ }
+ };
+ CheckForAbort checkForAbort(this, fHoldingThreadStoreLock);
+
+ // We own TS lock. The state of the Thread can not be changed.
+ if (m_State & TS_Unstarted)
+ {
+ // This thread is not yet started.
+#ifdef _DEBUG
+ m_dwAbortPoint = 2;
+#endif
+ if(requester == Thread::TAR_Thread)
+ SetAborted();
+ return S_OK;
+ }
+
+ if (GetThreadHandle() == INVALID_HANDLE_VALUE &&
+ (m_State & TS_Unstarted) == 0)
+ {
+ // The thread is going to die or is already dead.
+ UnmarkThreadForAbort(Thread::TAR_ALL);
+#ifdef _DEBUG
+ m_dwAbortPoint = 3;
+#endif
+ if(requester == Thread::TAR_Thread)
+ SetAborted();
+ return S_OK;
+ }
+
+ // What if someone else has this thread suspended already? It'll depend where the
+ // thread got suspended.
+ //
+ // User Suspend:
+ // We'll just set the abort bit and hope for the best on the resume.
+ //
+ // GC Suspend:
+ // If it's suspended in jitted code, we'll hijack the IP.
+ // <REVISIT_TODO> Consider race w/ GC suspension</REVISIT_TODO>
+ // If it's suspended but not in jitted code, we'll get suspended for GC, the GC
+ // will complete, and then we'll abort the target thread.
+ //
+
+ // It's possible that the thread has completed the abort already.
+ //
+ if (!(m_State & TS_AbortRequested))
+ {
+#ifdef _DEBUG
+ m_dwAbortPoint = 4;
+#endif
+ if(requester == Thread::TAR_Thread)
+ SetAborted();
+ return S_OK;
+ }
+
+ // If a thread is Dead or Detached, abort is a NOP.
+ //
+ if (m_State & (TS_Dead | TS_Detached | TS_TaskReset))
+ {
+ UnmarkThreadForAbort(Thread::TAR_ALL);
+ if(requester == Thread::TAR_Thread)
+ SetAborted();
+#ifdef _DEBUG
+ m_dwAbortPoint = 5;
+#endif
+ return S_OK;
+ }
+
+ // It's possible that some stub notices the AbortRequested bit -- even though we
+ // haven't done any real magic yet. If the thread has already started it's abort, we're
+ // done.
+ //
+ // Two more cases can be folded in here as well. If the thread is unstarted, it'll
+ // abort when we start it.
+ //
+ // If the thread is user suspended (SyncSuspended) -- we're out of luck. Set the bit and
+ // hope for the best on resume.
+ //
+ if ((m_State & TS_AbortInitiated) && !IsRudeAbort())
+ {
+#ifdef _DEBUG
+ m_dwAbortPoint = 6;
+#endif
+ break;
+ }
+
+ BOOL fOutOfRuntime = FALSE;
+ BOOL fNeedStackCrawl = FALSE;
+
+#ifdef DISABLE_THREADSUSPEND
+ // On platforms that do not support safe thread suspension we have to
+ // rely on the GCPOLL mechanism; the mechanism is activated above by
+ // TrapReturningThreads. However when reading shared state we need
+ // to erect appropriate memory barriers. So the interlocked operation
+ // below ensures that any future reads on this thread will happen after
+ // any earlier writes on a different thread have taken effect.
+ FastInterlockOr((DWORD*)&m_State, 0);
+
+#else // DISABLE_THREADSUSPEND
+
+ // Win32 suspend the thread, so it isn't moving under us.
+ SuspendThreadResult str = SuspendThread();
+ switch (str)
+ {
+ case STR_Success:
+ break;
+
+ case STR_Failure:
+ case STR_UnstartedOrDead:
+ case STR_NoStressLog:
+ checkForAbort.Release();
+ __SwitchToThread(0, ++dwSwitchCount);
+ continue;
+
+ case STR_SwitchedOut:
+ // If the thread is in preemptive gc mode, we can erect a barrier to block the
+ // thread to return to cooperative mode. Then we can do stack crawl and make decision.
+ if (!m_fPreemptiveGCDisabled)
+ {
+ checkForAbort.NeedStackCrawl();
+ if (GetThreadHandle() != SWITCHOUT_HANDLE_VALUE || m_fPreemptiveGCDisabled)
+ {
+ checkForAbort.Release();
+ __SwitchToThread(0, ++dwSwitchCount);
+ continue;
+ }
+ else
+ {
+ goto LStackCrawl;
+ }
+ }
+ else
+ {
+ goto LPrepareRetry;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+
+ _ASSERTE(str == STR_Success);
+
+#endif // DISABLE_THREADSUSPEND
+
+ // It's possible that the thread has completed the abort already.
+ //
+ if (!(m_State & TS_AbortRequested))
+ {
+#ifndef DISABLE_THREADSUSPEND
+ ResumeThread();
+#endif
+ if(requester == Thread::TAR_Thread)
+ SetAborted();
+#ifdef _DEBUG
+ m_dwAbortPoint = 63;
+#endif
+ return S_OK;
+ }
+
+ // Check whether some stub noticed the AbortRequested bit in-between our test above
+ // and us suspending the thread.
+ if ((m_State & TS_AbortInitiated) && !IsRudeAbort())
+ {
+#ifndef DISABLE_THREADSUSPEND
+ ResumeThread();
+#endif
+#ifdef _DEBUG
+ m_dwAbortPoint = 65;
+#endif
+ break;
+ }
+
+ // If Threads is stopped under a managed debugger, it will have both
+ // TS_DebugSuspendPending and TS_SyncSuspended, regardless of whether
+ // the thread is actually suspended or not.
+ // If it's suspended w/o the debugger (eg, by via Thread.Suspend), it will
+ // also have TS_UserSuspendPending set.
+ if (m_State & TS_SyncSuspended)
+ {
+#ifndef DISABLE_THREADSUSPEND
+ ResumeThread();
+#endif
+ checkForAbort.Release();
+#ifdef _DEBUG
+ m_dwAbortPoint = 7;
+#endif
+
+ //
+ // If it's stopped by the debugger, we don't want to throw an exception.
+ // Debugger suspension is to have no effect of the runtime behaviour.
+ //
+ if (m_State & TS_DebugSuspendPending)
+ {
+ return S_OK;
+ }
+
+ COMPlusThrow(kThreadStateException, IDS_EE_THREAD_ABORT_WHILE_SUSPEND);
+ }
+
+ // If the thread has no managed code on it's call stack, abort is a NOP. We're about
+ // to touch the unmanaged thread's stack -- for this to be safe, we can't be
+ // Dead/Detached/Unstarted.
+ //
+ _ASSERTE(!(m_State & ( TS_Dead
+ | TS_Detached
+ | TS_Unstarted)));
+
+#ifdef _TARGET_X86_
+ // TODO WIN64: consider this if there is a way to detect of managed code on stack.
+ if ((m_pFrame == FRAME_TOP)
+ && (GetFirstCOMPlusSEHRecord(this) == EXCEPTION_CHAIN_END)
+ )
+ {
+#ifndef DISABLE_THREADSUSPEND
+ ResumeThread();
+#endif
+#ifdef _DEBUG
+ m_dwAbortPoint = 8;
+#endif
+
+ if(requester == Thread::TAR_Thread)
+ SetAborted();
+ return S_OK;
+ }
+#endif // _TARGET_X86_
+
+
+ if (!m_fPreemptiveGCDisabled)
+ {
+ if ((m_pFrame != FRAME_TOP) && m_pFrame->IsTransitionToNativeFrame()
+#ifdef _TARGET_X86_
+ && ((size_t) GetFirstCOMPlusSEHRecord(this) > ((size_t) m_pFrame) - 20)
+#endif // _TARGET_X86_
+ )
+ {
+ fOutOfRuntime = TRUE;
+ }
+ }
+
+ checkForAbort.NeedStackCrawl();
+ if (!m_fPreemptiveGCDisabled)
+ {
+ fNeedStackCrawl = TRUE;
+ }
+#ifdef FEATURE_HIJACK
+ else
+ {
+ HandleJITCaseForAbort();
+ }
+#endif // FEATURE_HIJACK
+
+#ifndef DISABLE_THREADSUSPEND
+ // The thread is not suspended now.
+ ResumeThread();
+#endif
+
+ if (!fNeedStackCrawl)
+ {
+ goto LPrepareRetry;
+ }
+
+#ifndef DISABLE_THREADSUSPEND
+LStackCrawl:
+#endif // DISABLE_THREADSUSPEND
+
+ if (!ReadyForAbort()) {
+ goto LPrepareRetry;
+ }
+
+ // !!! Check for Exception in flight should happen before induced thread abort.
+ // !!! ReadyForAbort skips catch and filter clause.
+
+ // If an exception is currently being thrown, one of two things will happen. Either, we'll
+ // catch, and notice the abort request in our end-catch, or we'll not catch [in which case
+ // we're leaving managed code anyway. The top-most handler is responsible for resetting
+ // the bit.
+ //
+ if (HasException() &&
+ // For rude abort, we will initiated abort
+ !IsRudeAbort())
+ {
+#ifdef _DEBUG
+ m_dwAbortPoint = 9;
+#endif
+ break;
+ }
+
+ // If the thread is in sleep, wait, or join interrupt it
+ // However, we do NOT want to interrupt if the thread is already processing an exception
+ if (m_State & TS_Interruptible)
+ {
+ UserInterrupt(TI_Abort); // if the user wakes up because of this, it will read the
+ // abort requested bit and initiate the abort
+#ifdef _DEBUG
+ m_dwAbortPoint = 10;
+#endif
+ goto LPrepareRetry;
+ }
+
+ if (fOutOfRuntime)
+ {
+ // If the thread is running outside the EE, and is behind a stub that's going
+ // to catch...
+#ifdef _DEBUG
+ m_dwAbortPoint = 11;
+#endif
+ break;
+ }
+
+ // Ok. It's not in managed code, nor safely out behind a stub that's going to catch
+ // it on the way in. We have to poll.
+
+LPrepareRetry:
+
+ checkForAbort.Release();
+
+ if (fOneTryOnly)
+ {
+ break;
+ }
+
+ // Don't do a Sleep. It's possible that the thread we are trying to abort is
+ // stuck in unmanaged code trying to get into the apartment that we are supposed
+ // to be pumping! Instead, ping the current thread's handle. Obviously this
+ // will time out, but it will pump if we need it to.
+ if (pCurThread)
+ {
+ pCurThread->Join(ABORT_POLL_TIMEOUT, TRUE);
+ }
+ else
+ {
+ ClrSleepEx(ABORT_POLL_TIMEOUT, FALSE);
+ }
+
+
+#ifdef _DEBUG
+ elapsed_time += ABORT_POLL_TIMEOUT;
+ if (g_pConfig->GetGCStressLevel() == 0 && !fAlreadyAssert)
+ {
+ _ASSERTE(elapsed_time < ABORT_FAIL_TIMEOUT);
+ fAlreadyAssert = TRUE;
+ }
+#endif
+
+ } // for(;;)
+
+ if (fOneTryOnly && !fNeedEscalation)
+ {
+ return S_OK;
+ }
+
+ if ((GetAbortEndTime() != MAXULONGLONG) && IsAbortRequested())
+ {
+ while (TRUE)
+ {
+ if (!IsAbortRequested())
+ {
+ return S_OK;
+ }
+ ULONGLONG curTime = CLRGetTickCount64();
+ if (curTime >= GetAbortEndTime())
+ {
+ break;
+ }
+
+ if (pCurThread)
+ {
+ pCurThread->Join(100, TRUE);
+ }
+ else
+ {
+ ClrSleepEx(100, FALSE);
+ }
+
+ }
+
+ if (IsAbortRequested() && fEscalation)
+ {
+ EPolicyAction action1;
+ EClrOperation operation1;
+ if (!IsRudeAbort())
+ {
+ operation1 = OPR_ThreadAbort;
+ }
+ else if (HasLockInCurrentDomain())
+ {
+ operation1 = OPR_ThreadRudeAbortInCriticalRegion;
+ }
+ else
+ {
+ operation1 = OPR_ThreadRudeAbortInNonCriticalRegion;
+ }
+ action1 = GetEEPolicy()->GetActionOnTimeout(operation1, this);
+ switch (action1)
+ {
+ case eRudeAbortThread:
+ GetEEPolicy()->NotifyHostOnTimeout(operation1, action1);
+ MarkThreadForAbort(requester, EEPolicy::TA_Rude);
+ SetRudeAbortEndTimeFromEEPolicy();
+ goto LRetry;
+ case eUnloadAppDomain:
+ {
+ AppDomain *pDomain = GetDomain();
+ if (!pDomain->IsDefaultDomain())
+ {
+ GetEEPolicy()->NotifyHostOnTimeout(operation1, action1);
+ pDomain->EnableADUnloadWorker(EEPolicy::ADU_Safe);
+ }
+ }
+ // AD unload does not abort finalizer thread.
+ if (this == FinalizerThread::GetFinalizerThread())
+ {
+ GetEEPolicy()->NotifyHostOnTimeout(operation1, action1);
+ MarkThreadForAbort(requester, EEPolicy::TA_Rude);
+ SetRudeAbortEndTimeFromEEPolicy();
+ goto LRetry;
+ }
+ else
+ {
+ if (this == GetThread())
+ {
+ Join(INFINITE,TRUE);
+ }
+ return S_OK;
+ }
+ break;
+ case eRudeUnloadAppDomain:
+ {
+ AppDomain *pDomain = GetDomain();
+ if (!pDomain->IsDefaultDomain())
+ {
+ GetEEPolicy()->NotifyHostOnTimeout(operation1, action1);
+ pDomain->EnableADUnloadWorker(EEPolicy::ADU_Rude);
+ }
+ }
+ // AD unload does not abort finalizer thread.
+ if (this == FinalizerThread::GetFinalizerThread())
+ {
+ MarkThreadForAbort(requester, EEPolicy::TA_Rude);
+ SetRudeAbortEndTimeFromEEPolicy();
+ goto LRetry;
+ }
+ else
+ {
+ if (this == GetThread())
+ {
+ Join(INFINITE,TRUE);
+ }
+ return S_OK;
+ }
+ break;
+ case eExitProcess:
+ case eFastExitProcess:
+ case eRudeExitProcess:
+ case eDisableRuntime:
+ GetEEPolicy()->NotifyHostOnTimeout(operation1, action1);
+ EEPolicy::HandleExitProcessFromEscalation(action1, HOST_E_EXITPROCESS_TIMEOUT);
+ _ASSERTE (!"Should not reach here");
+ break;
+ default:
+ break;
+ }
+ }
+
+ return HRESULT_FROM_WIN32(ERROR_TIMEOUT);
+ }
+
+ if(requester == Thread::TAR_Thread)
+ SetAborted();
+ return S_OK;
+}
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+void Thread::SetRudeAbortEndTimeFromEEPolicy()
+{
+ LIMITED_METHOD_CONTRACT;
+ DWORD timeout;
+ if (HasLockInCurrentDomain())
+ {
+ timeout = GetEEPolicy()->GetTimeout(OPR_ThreadRudeAbortInCriticalRegion);
+ }
+ else
+ {
+ timeout = GetEEPolicy()->GetTimeout(OPR_ThreadRudeAbortInCriticalRegion);
+ }
+ ULONGLONG newEndTime;
+ if (timeout == INFINITE)
+ {
+ newEndTime = MAXULONGLONG;
+ }
+ else
+ {
+ newEndTime = CLRGetTickCount64() + timeout;
+ }
+
+ SetAbortEndTime(newEndTime, TRUE);
+}
+
+ULONGLONG Thread::s_NextSelfAbortEndTime = MAXULONGLONG;
+
+void Thread::ThreadAbortWatchDogAbort(Thread *pThread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+ EEPolicy::ThreadAbortTypes abortType = EEPolicy::TA_Safe;
+ if (pThread->m_AbortInfo & TAI_ThreadRudeAbort)
+ {
+ abortType = EEPolicy::TA_Rude;
+ }
+ else if (pThread->m_AbortInfo & TAI_ThreadV1Abort)
+ {
+ abortType = EEPolicy::TA_V1Compatible;
+ }
+ else if (pThread->m_AbortInfo & TAI_ThreadAbort)
+ {
+ abortType = EEPolicy::TA_Safe;
+ }
+ else
+ {
+ return;
+ }
+
+ EX_TRY
+ {
+ pThread->UserAbort(Thread::TAR_Thread, abortType, INFINITE, Thread::UAC_WatchDog);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+void Thread::ThreadAbortWatchDogEscalate(Thread *pThread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+ EPolicyAction action = eNoAction;
+ EClrOperation operation = OPR_ThreadRudeAbortInNonCriticalRegion;
+ if (!pThread->IsRudeAbort())
+ {
+ operation = OPR_ThreadAbort;
+ }
+ else if (pThread->HasLockInCurrentDomain())
+ {
+ operation = OPR_ThreadRudeAbortInCriticalRegion;
+ }
+ else
+ {
+ operation = OPR_ThreadRudeAbortInNonCriticalRegion;
+ }
+ action = GetEEPolicy()->GetActionOnTimeout(operation, pThread);
+ // We only support escalation to rude abort
+
+ EX_TRY {
+ switch (action)
+ {
+ case eRudeAbortThread:
+ GetEEPolicy()->NotifyHostOnTimeout(operation,action);
+ pThread->UserAbort(Thread::TAR_Thread, EEPolicy::TA_Rude, INFINITE, Thread::UAC_WatchDog);
+ break;
+ case eUnloadAppDomain:
+ {
+ AppDomain *pDomain = pThread->GetDomain();
+ if (!pDomain->IsDefaultDomain())
+ {
+ GetEEPolicy()->NotifyHostOnTimeout(operation,action);
+ pDomain->EnableADUnloadWorker(EEPolicy::ADU_Safe);
+ }
+ }
+ break;
+ case eRudeUnloadAppDomain:
+ {
+ AppDomain *pDomain = pThread->GetDomain();
+ if (!pDomain->IsDefaultDomain())
+ {
+ GetEEPolicy()->NotifyHostOnTimeout(operation,action);
+ pDomain->EnableADUnloadWorker(EEPolicy::ADU_Rude);
+ }
+ }
+ break;
+ case eExitProcess:
+ case eFastExitProcess:
+ case eRudeExitProcess:
+ case eDisableRuntime:
+ // HandleExitProcessFromEscalation will try to grab ThreadStore again.
+ _ASSERTE (ThreadStore::HoldingThreadStore());
+ ThreadStore::UnlockThreadStore();
+ GetEEPolicy()->NotifyHostOnTimeout(operation,action);
+ EEPolicy::HandleExitProcessFromEscalation(action, HOST_E_EXITPROCESS_THREADABORT);
+ _ASSERTE (!"Should not reach here");
+ break;
+ case eNoAction:
+ break;
+ default:
+ _ASSERTE (!"unknown policy for thread abort");
+ }
+ }
+ EX_CATCH {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+// If a thread is self-aborted and has a timeout, we need to watch the thread
+void Thread::ThreadAbortWatchDog()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+ if (CLRHosted())
+ {
+ ThreadStoreLockHolder tsLock;
+
+ ULONGLONG curTime = CLRGetTickCount64();
+
+ s_NextSelfAbortEndTime = MAXULONGLONG;
+
+ Thread *thread = NULL;
+ while ((thread = ThreadStore::GetThreadList(thread)) != NULL)
+ {
+ if (!thread->IsAbortRequested())
+ {
+ continue;
+ }
+
+ if (thread == FinalizerThread::GetFinalizerThread() && !g_FinalizerIsRunning)
+ {
+ // if finalizer method is not running, don't try to abort the finalizer thread
+ continue;
+ }
+
+ BOOL fNeedsToInitiateAbort = !thread->IsAbortInitiated() || thread->IsRudeAbort();
+ ULONGLONG endTime = thread->GetAbortEndTime();
+ if (fNeedsToInitiateAbort)
+ {
+ s_NextSelfAbortEndTime = 0;
+ }
+ else if (endTime < s_NextSelfAbortEndTime)
+ {
+ s_NextSelfAbortEndTime = endTime;
+ }
+
+ if (thread->m_AbortController == 0)
+ {
+ STRESS_LOG3(LF_ALWAYS, LL_ALWAYS, "ThreadAbortWatchDog for Thread %p Thread Id = %x with timeout %x\n",
+ thread, thread->GetThreadId(), endTime);
+
+ if (endTime != MAXULONGLONG && curTime >= endTime)
+ {
+ ThreadAbortWatchDogEscalate(thread);
+ }
+ else if (fNeedsToInitiateAbort)
+ {
+ ThreadAbortWatchDogAbort(thread);
+ }
+ }
+ }
+ }
+}
+
+void Thread::LockAbortRequest(Thread* pThread)
+{
+ WRAPPER_NO_CONTRACT;
+
+ DWORD dwSwitchCount = 0;
+
+ while (TRUE) {
+ for (unsigned i = 0; i < 10000; i ++) {
+ if (VolatileLoad(&(pThread->m_AbortRequestLock)) == 0) {
+ break;
+ }
+ YieldProcessor(); // indicate to the processor that we are spinning
+ }
+ if (FastInterlockCompareExchange(&(pThread->m_AbortRequestLock),1,0) == 0) {
+ return;
+ }
+ __SwitchToThread(0, ++dwSwitchCount);
+ }
+}
+
+void Thread::UnlockAbortRequest(Thread *pThread)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE (pThread->m_AbortRequestLock == 1);
+ FastInterlockExchange(&pThread->m_AbortRequestLock, 0);
+}
+
+void Thread::MarkThreadForAbort(ThreadAbortRequester requester, EEPolicy::ThreadAbortTypes abortType, BOOL fTentative /*=FALSE*/)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE ((requester & TAR_StackOverflow) == 0 || (requester & TAR_Thread) == TAR_Thread);
+
+ AbortRequestLockHolder lh(this);
+
+ if (fTentative)
+ {
+ if (!IsAbortRequested())
+ {
+ STRESS_LOG0(LF_SYNC, LL_INFO1000, "Tentative thread abort abandoned\n");
+ return;
+ }
+ }
+
+#ifdef _DEBUG
+ if (abortType == EEPolicy::TA_Rude)
+ {
+ m_fRudeAborted = TRUE;
+ }
+#endif
+
+ DWORD abortInfo = 0;
+
+ if (requester & TAR_Thread)
+ {
+ if (abortType == EEPolicy::TA_Safe)
+ {
+ abortInfo |= TAI_ThreadAbort;
+ }
+ else if (abortType == EEPolicy::TA_Rude)
+ {
+ abortInfo |= TAI_ThreadRudeAbort;
+ }
+ else if (abortType == EEPolicy::TA_V1Compatible)
+ {
+ abortInfo |= TAI_ThreadV1Abort;
+ }
+ }
+
+ if (requester & TAR_ADUnload)
+ {
+ if (abortType == EEPolicy::TA_Safe)
+ {
+ abortInfo |= TAI_ADUnloadAbort;
+ }
+ else if (abortType == EEPolicy::TA_Rude)
+ {
+ abortInfo |= TAI_ADUnloadRudeAbort;
+ }
+ else if (abortType == EEPolicy::TA_V1Compatible)
+ {
+ abortInfo |= TAI_ADUnloadV1Abort;
+ }
+ if (IsADUnloadHelperThread())
+ {
+ abortInfo |= TAI_ForADUnloadThread;
+ }
+ }
+
+ if (requester & TAR_FuncEval)
+ {
+ if (abortType == EEPolicy::TA_Safe)
+ {
+ abortInfo |= TAI_FuncEvalAbort;
+ }
+ else if (abortType == EEPolicy::TA_Rude)
+ {
+ abortInfo |= TAI_FuncEvalRudeAbort;
+ }
+ else if (abortType == EEPolicy::TA_V1Compatible)
+ {
+ abortInfo |= TAI_FuncEvalV1Abort;
+ }
+ }
+
+ if (abortInfo == 0)
+ {
+ ASSERT(!"Invalid abort information");
+ return;
+ }
+
+ if (requester == TAR_Thread)
+ {
+ DWORD timeoutFromPolicy;
+ if (abortType != EEPolicy::TA_Rude)
+ {
+ timeoutFromPolicy = GetEEPolicy()->GetTimeout(OPR_ThreadAbort);
+ }
+ else if (!HasLockInCurrentDomain())
+ {
+ timeoutFromPolicy = GetEEPolicy()->GetTimeout(OPR_ThreadRudeAbortInNonCriticalRegion);
+ }
+ else
+ {
+ timeoutFromPolicy = GetEEPolicy()->GetTimeout(OPR_ThreadRudeAbortInCriticalRegion);
+ }
+ if (timeoutFromPolicy != INFINITE)
+ {
+ ULONGLONG endTime = CLRGetTickCount64() + timeoutFromPolicy;
+ if (abortType != EEPolicy::TA_Rude)
+ {
+ if (endTime < m_AbortEndTime)
+ {
+ m_AbortEndTime = endTime;
+ }
+ }
+ else if (endTime < m_RudeAbortEndTime)
+ {
+ m_RudeAbortEndTime = endTime;
+ }
+ // We can not call into host if we are in the middle of stack overflow.
+ // And we don't need to wake up our watchdog if there is no timeout.
+ if (GetThread() == this && (requester & TAR_StackOverflow) == 0)
+ {
+ AppDomain::EnableADUnloadWorkerForThreadAbort();
+ }
+ }
+ }
+
+ if (abortInfo == (m_AbortInfo & abortInfo))
+ {
+ //
+ // We are already doing this kind of abort.
+ //
+ return;
+ }
+
+ m_AbortInfo |= abortInfo;
+
+ if (m_AbortType >= (DWORD)abortType)
+ {
+ // another thread is aborting at a higher level
+ return;
+ }
+
+ m_AbortType = abortType;
+
+ if (!IsAbortRequested())
+ {
+ // We must set this before we start flipping thread bits to avoid races where
+ // trap returning threads is already high due to other reasons.
+
+ // The thread is asked for abort the first time
+ SetAbortRequestBit();
+
+#ifdef _DEBUG
+ AddFiberInfo(ThreadTrackInfo_Abort);
+#endif
+ }
+ STRESS_LOG4(LF_APPDOMAIN, LL_ALWAYS, "Mark Thread %p Thread Id = %x for abort from requester %d (type %d)\n", this, GetThreadId(), requester, abortType);
+}
+
+void Thread::SetAbortRequestBit()
+{
+ WRAPPER_NO_CONTRACT;
+ while (TRUE)
+ {
+ Volatile<LONG> curValue = (LONG)m_State;
+ if ((curValue & TS_AbortRequested) != 0)
+ {
+ break;
+ }
+ if (FastInterlockCompareExchange((LONG*)&m_State, curValue|TS_AbortRequested, curValue) == curValue)
+ {
+ ThreadStore::TrapReturningThreads(TRUE);
+
+ break;
+ }
+ }
+}
+
+void Thread::RemoveAbortRequestBit()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+#ifdef _DEBUG
+ // There's a race between removing the TS_AbortRequested bit and decrementing g_TrapReturningThreads
+ // We may remove the bit, but before we have a chance to call ThreadStore::TrapReturningThreads(FALSE)
+ // DbgFindThread() may execute, and find too few threads with the bit set.
+ // To ensure the assert in DbgFindThread does not fire under such a race we set the ChgInFlight before hand.
+ CounterHolder trtHolder(&g_trtChgInFlight);
+#endif
+ while (TRUE)
+ {
+ Volatile<LONG> curValue = (LONG)m_State;
+ if ((curValue & TS_AbortRequested) == 0)
+ {
+ break;
+ }
+ if (FastInterlockCompareExchange((LONG*)&m_State, curValue&(~TS_AbortRequested), curValue) == curValue)
+ {
+ ThreadStore::TrapReturningThreads(FALSE);
+
+ break;
+ }
+ }
+}
+
+// Make sure that when AbortRequest bit is cleared, we also dec TrapReturningThreads count.
+void Thread::UnmarkThreadForAbort(ThreadAbortRequester requester, BOOL fForce)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // Switch to COOP (for ClearAbortReason) before acquiring AbortRequestLock
+ GCX_COOP();
+
+ AbortRequestLockHolder lh(this);
+
+ //
+ // Unmark the bits that are being turned off
+ //
+ if (requester & TAR_Thread)
+ {
+ if ((m_AbortInfo != TAI_ThreadRudeAbort) || fForce)
+ {
+ m_AbortInfo &= ~(TAI_ThreadAbort |
+ TAI_ThreadV1Abort |
+ TAI_ThreadRudeAbort );
+ }
+
+ if (m_AbortReason)
+ {
+ ClearAbortReason(TRUE);
+ }
+ }
+
+ if (requester & TAR_ADUnload)
+ {
+ m_AbortInfo &= ~(TAI_ADUnloadAbort |
+ TAI_ADUnloadV1Abort |
+ TAI_ADUnloadRudeAbort);
+ }
+
+ if (requester & TAR_FuncEval)
+ {
+ m_AbortInfo &= ~(TAI_FuncEvalAbort |
+ TAI_FuncEvalV1Abort |
+ TAI_FuncEvalRudeAbort);
+ }
+
+ //
+ // Decide which type of abort to do based on the new bit field.
+ //
+ if (m_AbortInfo & TAI_AnyRudeAbort)
+ {
+ m_AbortType = EEPolicy::TA_Rude;
+ }
+ else if (m_AbortInfo & TAI_AnyV1Abort)
+ {
+ m_AbortType = EEPolicy::TA_V1Compatible;
+ }
+ else if (m_AbortInfo & TAI_AnySafeAbort)
+ {
+ m_AbortType = EEPolicy::TA_Safe;
+ }
+ else
+ {
+ m_AbortType = EEPolicy::TA_None;
+ }
+
+ //
+ // If still aborting, do nothing
+ //
+ if (m_AbortType != EEPolicy::TA_None)
+ {
+ return;
+ }
+
+ m_AbortEndTime = MAXULONGLONG;
+ m_RudeAbortEndTime = MAXULONGLONG;
+
+ if (IsAbortRequested())
+ {
+ RemoveAbortRequestBit();
+ FastInterlockAnd((DWORD*)&m_State,~(TS_AbortInitiated));
+ m_fRudeAbortInitiated = FALSE;
+ ResetUserInterrupted();
+
+#ifdef _DEBUG
+ AddFiberInfo(ThreadTrackInfo_Abort);
+#endif
+ }
+
+ STRESS_LOG3(LF_APPDOMAIN, LL_ALWAYS, "Unmark Thread %p Thread Id = %x for abort from requester %d\n", this, GetThreadId(), requester);
+}
+
+// Make sure that when AbortRequest bit is cleared, we also dec TrapReturningThreads count.
+void Thread::ResetBeginAbortedForADUnload()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ AbortRequestLockHolder lh(this);
+
+ m_AbortInfo &= ~TAI_ForADUnloadThread;
+}
+
+void Thread::InternalResetAbort(ThreadAbortRequester requester, BOOL fResetRudeAbort)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(this == GetThread());
+ _ASSERTE(!IsDead());
+
+ // managed code can not reset Rude thread abort
+ UnmarkThreadForAbort(requester, fResetRudeAbort);
+#ifdef _DEBUG
+ AddFiberInfo(ThreadTrackInfo_Abort);
+#endif
+}
+
+
+// Throw a thread abort request when a suspended thread is resumed. Make sure you know what you
+// are doing when you call this routine.
+void Thread::SetAbortRequest(EEPolicy::ThreadAbortTypes abortType)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ MarkThreadForAbort(TAR_ADUnload, abortType);
+
+ if (m_State & TS_Interruptible)
+ {
+ UserInterrupt(TI_Abort);
+ }
+}
+
+
+void ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_REASON reason)
+{
+ CONTRACTL {
+ NOTHROW;
+ if ((GetThread() != NULL) && GetThread()->PreemptiveGCDisabled()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+ // There's a nasty problem here. Once we start shutting down because of a
+ // process detach notification, threads are disappearing from under us. There
+ // are a surprising number of cases where the dying thread holds the ThreadStore
+ // lock. For example, the finalizer thread holds this during startup in about
+ // 10 of our COM BVTs.
+ if (!IsAtProcessExit())
+ {
+ BOOL gcOnTransitions;
+
+ Thread *pCurThread = GetThread();
+
+ gcOnTransitions = GC_ON_TRANSITIONS(FALSE); // dont do GC for GCStress 3
+
+ BOOL toggleGC = ( pCurThread != NULL
+ && pCurThread->PreemptiveGCDisabled()
+ && reason != ThreadSuspend::SUSPEND_FOR_GC);
+
+ // Note: there is logic in gc.cpp surrounding suspending all
+ // runtime threads for a GC that depends on the fact that we
+ // do an EnablePreemptiveGC and a DisablePreemptiveGC around
+ // taking this lock.
+ if (toggleGC)
+ pCurThread->EnablePreemptiveGC();
+
+ LOG((LF_SYNC, INFO3, "Locking thread store\n"));
+
+ // Any thread that holds the thread store lock cannot be stopped by unmanaged breakpoints and exceptions when
+ // we're doing managed/unmanaged debugging. Calling SetDebugCantStop(true) on the current thread helps us
+ // remember that.
+ if (pCurThread)
+ pCurThread->SetDebugCantStop(true);
+
+ // This is used to avoid thread starvation if non-GC threads are competing for
+ // the thread store lock when there is a real GC-thread waiting to get in.
+ // This is initialized lazily when the first non-GC thread backs out because of
+ // a waiting GC thread.
+ if (s_hAbortEvt != NULL &&
+ !(reason == ThreadSuspend::SUSPEND_FOR_GC ||
+ reason == ThreadSuspend::SUSPEND_FOR_GC_PREP ||
+ reason == ThreadSuspend::SUSPEND_FOR_DEBUGGER_SWEEP) &&
+ m_pThreadAttemptingSuspendForGC != NULL &&
+ m_pThreadAttemptingSuspendForGC != pCurThread)
+ {
+ CLREventBase * hAbortEvt = s_hAbortEvt;
+
+ if (hAbortEvt != NULL)
+ {
+ LOG((LF_SYNC, INFO3, "Performing suspend abort wait.\n"));
+ hAbortEvt->Wait(INFINITE, FALSE);
+ LOG((LF_SYNC, INFO3, "Release from suspend abort wait.\n"));
+ }
+ }
+
+ // ThreadStoreLock is a critical lock used by GC, ThreadAbort, AD unload, Yield.
+ // If a task is switched out while it owns ThreadStoreLock, it may not be able to
+ // release it because the scheduler may be running managed code without yielding.
+ Thread::BeginThreadAffinity();
+
+ // This is shutdown aware. If we're in shutdown, and not helper/finalizer/shutdown
+ // then this will not take the lock and just block forever.
+ ThreadStore::s_pThreadStore->Enter();
+
+
+ _ASSERTE(ThreadStore::s_pThreadStore->m_holderthreadid.IsUnknown());
+ ThreadStore::s_pThreadStore->m_holderthreadid.SetThreadId();
+
+ LOG((LF_SYNC, INFO3, "Locked thread store\n"));
+
+ // Established after we obtain the lock, so only useful for synchronous tests.
+ // A thread attempting to suspend us asynchronously already holds this lock.
+ ThreadStore::s_pThreadStore->m_HoldingThread = pCurThread;
+
+#ifndef _PREFAST_
+ if (toggleGC)
+ pCurThread->DisablePreemptiveGC();
+#endif
+
+ GC_ON_TRANSITIONS(gcOnTransitions);
+ }
+#ifdef _DEBUG
+ else
+ LOG((LF_SYNC, INFO3, "Locking thread store skipped upon detach\n"));
+#endif
+}
+
+void ThreadSuspend::UnlockThreadStore(BOOL bThreadDestroyed, ThreadSuspend::SUSPEND_REASON reason)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // There's a nasty problem here. Once we start shutting down because of a
+ // process detach notification, threads are disappearing from under us. There
+ // are a surprising number of cases where the dying thread holds the ThreadStore
+ // lock. For example, the finalizer thread holds this during startup in about
+ // 10 of our COM BVTs.
+ if (!IsAtProcessExit())
+ {
+ Thread *pCurThread = GetThread();
+
+ LOG((LF_SYNC, INFO3, "Unlocking thread store\n"));
+ _ASSERTE(GetThread() == NULL || ThreadStore::s_pThreadStore->m_HoldingThread == GetThread());
+
+#ifdef _DEBUG
+ // If Thread object has been destroyed, we need to reset the ownership info in Crst.
+ _ASSERTE(!bThreadDestroyed || GetThread() == NULL);
+ if (bThreadDestroyed) {
+ ThreadStore::s_pThreadStore->m_Crst.m_holderthreadid.SetThreadId();
+ }
+#endif
+
+ ThreadStore::s_pThreadStore->m_HoldingThread = NULL;
+ ThreadStore::s_pThreadStore->m_holderthreadid.ResetThreadId();
+ ThreadStore::s_pThreadStore->Leave();
+
+ Thread::EndThreadAffinity();
+
+ // We're out of the critical area for managed/unmanaged debugging.
+ if (!bThreadDestroyed && pCurThread)
+ pCurThread->SetDebugCantStop(false);
+ }
+#ifdef _DEBUG
+ else
+ LOG((LF_SYNC, INFO3, "Unlocking thread store skipped upon detach\n"));
+#endif
+}
+
+
+void ThreadStore::AllocateOSContext()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(HoldingThreadStore());
+ if (s_pOSContext == NULL
+#ifdef _DEBUG
+ || s_pOSContext == (CONTEXT*)0x1
+#endif
+ )
+ {
+ s_pOSContext = new (nothrow) CONTEXT();
+ }
+#ifdef _DEBUG
+ if (s_pOSContext == NULL)
+ {
+ s_pOSContext = (CONTEXT*)0x1;
+ }
+#endif
+}
+
+CONTEXT *ThreadStore::GrabOSContext()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(HoldingThreadStore());
+ CONTEXT *pContext = s_pOSContext;
+ s_pOSContext = NULL;
+#ifdef _DEBUG
+ if (pContext == (CONTEXT*)0x1)
+ {
+ pContext = NULL;
+ }
+#endif
+ return pContext;
+}
+
+extern void WaitForEndOfShutdown();
+
+//----------------------------------------------------------------------------
+//
+// Suspending threads, rendezvousing with threads that reach safe places, etc.
+//
+//----------------------------------------------------------------------------
+
+// A note on SUSPENSIONS.
+//
+// We must not suspend a thread while it is holding the ThreadStore lock, or
+// the lock on the thread. Why? Because we need those locks to resume the
+// thread (and to perform a GC, use the debugger, spawn or kill threads, etc.)
+//
+// There are two types of suspension we must consider to enforce the above
+// rule. Synchronous suspensions are where we persuade the thread to suspend
+// itself. This is CommonTripThread and its cousins. In other words, the
+// thread toggles the GC mode, or it hits a hijack, or certain opcodes in the
+// interpreter, etc. In these cases, the thread can simply check whether it
+// is holding these locks before it suspends itself.
+//
+// The other style is an asynchronous suspension. This is where another
+// thread looks to see where we are. If we are in a fully interruptible region
+// of JIT code, we will be left suspended. In this case, the thread performing
+// the suspension must hold the locks on the thread and the threadstore. This
+// ensures that we aren't suspended while we are holding these locks.
+//
+// Note that in the asynchronous case it's not enough to just inspect the thread
+// to see if it's holding these locks. Since the thread must be in preemptive
+// mode to block to acquire these locks, and since there will be a few inst-
+// ructions between acquiring the lock and noting in our state that we've
+// acquired it, then there would be a window where we would seem eligible for
+// suspension -- but in fact would not be.
+
+//----------------------------------------------------------------------------
+
+// We can't leave preemptive mode and enter cooperative mode, if a GC is
+// currently in progress. This is the situation when returning back into
+// the EE from outside. See the comments in DisablePreemptiveGC() to understand
+// why we Enable GC here!
+void Thread::RareDisablePreemptiveGC()
+{
+ BEGIN_PRESERVE_LAST_ERROR;
+
+ CONTRACTL {
+ NOTHROW;
+ SO_TOLERANT;
+ DISABLED(GC_TRIGGERS); // I think this is actually wrong: prevents a p->c->p mode switch inside a NOTRIGGER region.
+ }
+ CONTRACTL_END;
+
+ CONTRACT_VIOLATION(SOToleranceViolation);
+
+ if (IsAtProcessExit())
+ {
+ goto Exit;
+ }
+
+
+#ifdef _DEBUG
+ AddFiberInfo(ThreadTrackInfo_GCMode);
+#endif
+
+ // This should NEVER be called if the TSNC_UnsafeSkipEnterCooperative bit is set!
+ _ASSERTE(!(m_StateNC & TSNC_UnsafeSkipEnterCooperative) && "DisablePreemptiveGC called while the TSNC_UnsafeSkipEnterCooperative bit is set");
+
+ // Holding a spin lock in preemp mode and switch to coop mode could cause other threads spinning
+ // waiting for GC
+ _ASSERTE ((m_StateNC & Thread::TSNC_OwnsSpinLock) == 0);
+
+ // If this thread is asked to yield
+ if (m_State & TS_YieldRequested)
+ {
+ __SwitchToThread(0, CALLER_LIMITS_SPINNING);
+ }
+
+ if (!GCHeap::IsGCHeapInitialized())
+ {
+ goto Exit;
+ }
+
+ // Note IsGCInProgress is also true for say Pause (anywhere SuspendEE happens) and GCThread is the
+ // thread that did the Pause. While in Pause if another thread attempts Rev/Pinvoke it should get inside the following and
+ // block until resume
+ if (((GCHeap::IsGCInProgress() && (this != ThreadSuspend::GetSuspensionThread())) ||
+ (m_State & (TS_UserSuspendPending | TS_DebugSuspendPending | TS_StackCrawlNeeded))) &&
+ (!g_fSuspendOnShutdown || IsFinalizerThread() || IsShutdownSpecialThread()))
+ {
+ if (!ThreadStore::HoldingThreadStore(this))
+ {
+ STRESS_LOG1(LF_SYNC, LL_INFO1000, "RareDisablePreemptiveGC: entering. Thread state = %x\n", m_State.Load());
+
+ DWORD dwSwitchCount = 0;
+
+ do
+ {
+ EnablePreemptiveGC();
+
+ // Cannot use GCX_PREEMP_NO_DTOR here because we're inside of the thread
+ // PREEMP->COOP switch mechanism and GCX_PREEMP's assert's will fire.
+ // Instead we use BEGIN_GCX_ASSERT_PREEMP to inform Scan of the mode
+ // change here.
+ BEGIN_GCX_ASSERT_PREEMP;
+
+ // just wait until the GC is over.
+ if (this != ThreadSuspend::GetSuspensionThread())
+ {
+#ifdef PROFILING_SUPPORTED
+ // If profiler desires GC events, notify it that this thread is waiting until the GC is over
+ // Do not send suspend notifications for debugger suspensions
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackSuspends());
+ if (!(m_State & TS_DebugSuspendPending))
+ {
+ g_profControlBlock.pProfInterface->RuntimeThreadSuspended((ThreadID)this);
+ }
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+
+#if !defined(FEATURE_CORECLR) // simple hosting
+ // First, check to see if there's an IDbgThreadControl interface that needs
+ // notification of the suspension
+ if (m_State & TS_DebugSuspendPending)
+ {
+ IDebuggerThreadControl *pDbgThreadControl = CorHost::GetDebuggerThreadControl();
+
+ if (pDbgThreadControl)
+ pDbgThreadControl->ThreadIsBlockingForDebugger();
+
+ }
+
+ if (CLRGCHosted())
+ {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ CorHost2::GetHostGCManager()->ThreadIsBlockingForSuspension();
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+
+ // If not, check to see if there's an IGCThreadControl interface that needs
+ // notification of the suspension
+ IGCThreadControl *pGCThreadControl = CorHost::GetGCThreadControl();
+
+ if (pGCThreadControl)
+ pGCThreadControl->ThreadIsBlockingForSuspension();
+#endif // !defined(FEATURE_CORECLR)
+
+ DWORD status = S_OK;
+ SetThreadStateNC(TSNC_WaitUntilGCFinished);
+ status = GCHeap::GetGCHeap()->WaitUntilGCComplete();
+ ResetThreadStateNC(TSNC_WaitUntilGCFinished);
+
+ if (status == (DWORD)COR_E_STACKOVERFLOW)
+ {
+ // One of two things can happen here:
+ // 1. GC is suspending the process. GC needs to wait.
+ // 2. GC is proceeding after suspension. The current thread needs to spin.
+ SetThreadState(TS_BlockGCForSO);
+ while (GCHeap::IsGCInProgress() && m_fPreemptiveGCDisabled.Load() == 0)
+ {
+#undef Sleep
+ // We can not go to a host for blocking operation due ot lack of stack.
+ // Instead we will spin here until
+ // 1. GC is finished; Or
+ // 2. GC lets this thread to run and will wait for it
+ Sleep(10);
+#define Sleep(a) Dont_Use_Sleep(a)
+ }
+ ResetThreadState(TS_BlockGCForSO);
+ if (m_fPreemptiveGCDisabled.Load() == 1)
+ {
+ // GC suspension has allowed this thread to switch back to cooperative mode.
+ break;
+ }
+ }
+ if (!GCHeap::IsGCInProgress())
+ {
+ if (HasThreadState(TS_StackCrawlNeeded))
+ {
+ SetThreadStateNC(TSNC_WaitUntilGCFinished);
+ ThreadStore::WaitForStackCrawlEvent();
+ ResetThreadStateNC(TSNC_WaitUntilGCFinished);
+ }
+ else
+ {
+ __SwitchToThread(0, ++dwSwitchCount);
+ }
+ }
+
+#ifdef PROFILING_SUPPORTED
+ // Let the profiler know that this thread is resuming
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackSuspends());
+ g_profControlBlock.pProfInterface->RuntimeThreadResumed((ThreadID)this);
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+ }
+
+ END_GCX_ASSERT_PREEMP;
+
+ // disable preemptive gc.
+ FastInterlockOr(&m_fPreemptiveGCDisabled, 1);
+
+ // The fact that we check whether 'this' is the GC thread may seem
+ // strange. After all, we determined this before entering the method.
+ // However, it is possible for the current thread to become the GC
+ // thread while in this loop. This happens if you use the COM+
+ // debugger to suspend this thread and then release it.
+
+ } while ((GCHeap::IsGCInProgress() && (this != ThreadSuspend::GetSuspensionThread())) ||
+ (m_State & (TS_UserSuspendPending | TS_DebugSuspendPending | TS_StackCrawlNeeded)));
+ }
+ STRESS_LOG0(LF_SYNC, LL_INFO1000, "RareDisablePreemptiveGC: leaving\n");
+ }
+
+ // Block all threads except finalizer and shutdown thread during shutdown.
+ // If g_fSuspendFinalizerOnShutdown is set, block the finalizer too.
+ if ((g_fSuspendOnShutdown && !IsFinalizerThread() && !IsShutdownSpecialThread()) ||
+ (g_fSuspendFinalizerOnShutdown && IsFinalizerThread()))
+ {
+ STRESS_LOG1(LF_SYNC, LL_INFO1000, "RareDisablePreemptiveGC: entering. Thread state = %x\n", m_State.Load());
+
+ EnablePreemptiveGC();
+
+ // Cannot use GCX_PREEMP_NO_DTOR here because we're inside of the thread
+ // PREEMP->COOP switch mechanism and GCX_PREEMP's assert's will fire.
+ // Instead we use BEGIN_GCX_ASSERT_PREEMP to inform Scan of the mode
+ // change here.
+ BEGIN_GCX_ASSERT_PREEMP;
+
+#ifdef PROFILING_SUPPORTED
+ // If profiler desires GC events, notify it that this thread is waiting until the GC is over
+ // Do not send suspend notifications for debugger suspensions
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackSuspends());
+ if (!(m_State & TS_DebugSuspendPending))
+ {
+ g_profControlBlock.pProfInterface->RuntimeThreadSuspended((ThreadID)this);
+ }
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+
+#if !defined(FEATURE_CORECLR) // simple hosting
+ // First, check to see if there's an IDbgThreadControl interface that needs
+ // notification of the suspension
+ if (m_State & TS_DebugSuspendPending)
+ {
+ IDebuggerThreadControl *pDbgThreadControl = CorHost::GetDebuggerThreadControl();
+
+ if (pDbgThreadControl)
+ pDbgThreadControl->ThreadIsBlockingForDebugger();
+
+ }
+
+ if (CLRGCHosted())
+ {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ CorHost2::GetHostGCManager()->ThreadIsBlockingForSuspension();
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+
+ // If not, check to see if there's an IGCThreadControl interface that needs
+ // notification of the suspension
+ IGCThreadControl *pGCThreadControl = CorHost::GetGCThreadControl();
+
+ if (pGCThreadControl)
+ pGCThreadControl->ThreadIsBlockingForSuspension();
+#endif // !defined(FEATURE_CORECLR)
+
+ // The thread is blocked for shutdown. We do not concern for GC violation.
+ CONTRACT_VIOLATION(GCViolation);
+
+ WaitForEndOfShutdown();
+
+ END_GCX_ASSERT_PREEMP;
+
+ __SwitchToThread(INFINITE, CALLER_LIMITS_SPINNING);
+ _ASSERTE(!"Cannot reach here");
+ }
+
+Exit: ;
+ END_PRESERVE_LAST_ERROR;
+}
+
+void Thread::HandleThreadAbortTimeout()
+{
+ WRAPPER_NO_CONTRACT;
+
+ EPolicyAction action = eNoAction;
+ EClrOperation operation = OPR_ThreadRudeAbortInNonCriticalRegion;
+
+ if (IsFuncEvalAbort())
+ {
+ // There can't be escalation policy for FuncEvalAbort timeout.
+ // The debugger should retain control of the policy. For example, if a RudeAbort times out, it's
+ // probably because the debugger had some other thread frozen. When the thread is thawed, things might
+ // be fine, so we don't want to escelate the FuncEvalRudeAbort (which will be swalled by FuncEvalHijackWorker)
+ // into a user RudeThreadAbort (which will at least rip the entire thread).
+ return;
+ }
+
+ if (!IsRudeAbort())
+ {
+ operation = OPR_ThreadAbort;
+ }
+ else if (HasLockInCurrentDomain())
+ {
+ operation = OPR_ThreadRudeAbortInCriticalRegion;
+ }
+ else
+ {
+ operation = OPR_ThreadRudeAbortInNonCriticalRegion;
+ }
+ action = GetEEPolicy()->GetActionOnTimeout(operation, this);
+ // We only support escalation to rude abort
+
+ EX_TRY {
+ switch (action)
+ {
+ case eRudeAbortThread:
+ GetEEPolicy()->NotifyHostOnTimeout(operation,action);
+ MarkThreadForAbort(TAR_Thread, EEPolicy::TA_Rude);
+ break;
+ case eUnloadAppDomain:
+ {
+ AppDomain *pDomain = GetDomain();
+ if (!pDomain->IsDefaultDomain())
+ {
+ GetEEPolicy()->NotifyHostOnTimeout(operation,action);
+ pDomain->EnableADUnloadWorker(EEPolicy::ADU_Safe);
+ }
+ }
+ break;
+ case eRudeUnloadAppDomain:
+ {
+ AppDomain *pDomain = GetDomain();
+ if (!pDomain->IsDefaultDomain())
+ {
+ GetEEPolicy()->NotifyHostOnTimeout(operation,action);
+ pDomain->EnableADUnloadWorker(EEPolicy::ADU_Rude);
+ }
+ }
+ break;
+ case eExitProcess:
+ case eFastExitProcess:
+ case eRudeExitProcess:
+ case eDisableRuntime:
+ GetEEPolicy()->NotifyHostOnTimeout(operation,action);
+ EEPolicy::HandleExitProcessFromEscalation(action, HOST_E_EXITPROCESS_THREADABORT);
+ _ASSERTE (!"Should not reach here");
+ break;
+ case eNoAction:
+ break;
+ default:
+ _ASSERTE (!"unknown policy for thread abort");
+ }
+ }
+ EX_CATCH {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+void Thread::HandleThreadAbort (BOOL fForce)
+{
+ BEGIN_PRESERVE_LAST_ERROR;
+
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ BEGIN_SO_INTOLERANT_CODE(this);
+ TESTHOOKCALL(AppDomainCanBeUnloaded(GetDomain()->GetId().m_dwId,FALSE));
+
+ // It's possible we could go through here if we hit a hard SO and MC++ has called back
+ // into the runtime on this thread
+
+ FinishSOWork();
+
+ if (IsAbortRequested() && GetAbortEndTime() < CLRGetTickCount64())
+ {
+ HandleThreadAbortTimeout();
+ }
+
+ // @TODO: we should consider treating this function as an FCALL or HCALL and use FCThrow instead of COMPlusThrow
+
+ // Sometimes we call this without any CLR SEH in place. An example is UMThunkStubRareDisableWorker.
+ // That's okay since COMPlusThrow will eventually erect SEH around the RaiseException. It prevents
+ // us from stating CONTRACT here.
+
+ if (fForce || ReadyForAbort())
+ {
+ ResetThreadState ((ThreadState)(TS_Interrupted | TS_Interruptible));
+ // We are going to abort. Abort satisfies Thread.Interrupt requirement.
+ FastInterlockExchange (&m_UserInterrupt, 0);
+
+ // generate either a ThreadAbort exception
+ STRESS_LOG1(LF_APPDOMAIN, LL_INFO100, "Thread::HandleThreadAbort throwing abort for %x\n", GetThreadId());
+
+ GCX_COOP_NO_DTOR();
+
+ // Can not use holder. GCX_COOP forces the thread back to the original state during
+ // exception unwinding, which may put the thread back to cooperative mode.
+ // GCX_COOP();
+
+ if (!IsAbortInitiated() ||
+ (IsRudeAbort() && !IsRudeAbortInitiated()))
+ {
+ PreWorkForThreadAbort();
+ }
+
+ PreparingAbortHolder paHolder;
+
+ OBJECTREF exceptObj;
+
+ if (IsRudeAbort())
+ {
+ exceptObj = CLRException::GetPreallocatedRudeThreadAbortException();
+ }
+ else
+ {
+ EEException eeExcept(kThreadAbortException);
+ exceptObj = CLRException::GetThrowableFromException(&eeExcept);
+ }
+
+#ifdef _DEBUG
+ AddFiberInfo(ThreadTrackInfo_Abort);
+#endif
+ RaiseTheExceptionInternalOnly(exceptObj, FALSE);
+ }
+ END_SO_INTOLERANT_CODE;
+
+ END_PRESERVE_LAST_ERROR;
+}
+
+void Thread::PreWorkForThreadAbort()
+{
+ WRAPPER_NO_CONTRACT;
+
+ SetAbortInitiated();
+ // if an abort and interrupt happen at the same time (e.g. on a sleeping thread),
+ // the abort is favored. But we do need to reset the interrupt bits.
+ FastInterlockAnd((ULONG *) &m_State, ~(TS_Interruptible | TS_Interrupted));
+ ResetUserInterrupted();
+
+ if (IsRudeAbort() && !(m_AbortInfo & (TAI_ADUnloadAbort |
+ TAI_ADUnloadRudeAbort |
+ TAI_ADUnloadV1Abort)
+ )) {
+ if (HasLockInCurrentDomain()) {
+ AppDomain *pDomain = GetAppDomain();
+ // Cannot enable the following assertion.
+ // We may take the lock, but the lock will be released during exception backout.
+ //_ASSERTE(!pDomain->IsDefaultDomain());
+ EPolicyAction action = GetEEPolicy()->GetDefaultAction(OPR_ThreadRudeAbortInCriticalRegion, this);
+ switch (action)
+ {
+ case eRudeUnloadAppDomain:
+ if (!pDomain->IsDefaultDomain())
+ {
+ GetEEPolicy()->NotifyHostOnDefaultAction(OPR_ThreadRudeAbortInCriticalRegion,action);
+ pDomain->EnableADUnloadWorker(EEPolicy::ADU_Rude);
+ }
+ break;
+ case eExitProcess:
+ case eFastExitProcess:
+ case eRudeExitProcess:
+ case eDisableRuntime:
+ {
+ // We're about to exit the process, if we take an SO here we'll just exit faster right???
+ CONTRACT_VIOLATION(SOToleranceViolation);
+
+ GetEEPolicy()->NotifyHostOnDefaultAction(OPR_ThreadRudeAbortInCriticalRegion,action);
+ GetEEPolicy()->HandleExitProcessFromEscalation(action,HOST_E_EXITPROCESS_ADUNLOAD);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+}
+
+#if defined(STRESS_HEAP) && defined(_DEBUG)
+
+// This function is for GC stress testing. Before we enable preemptive GC, let us do a GC
+// because GC may happen while the thread is in preemptive GC mode.
+void Thread::PerformPreemptiveGC()
+{
+ CONTRACTL {
+ NOTHROW;
+ DISABLED(GC_TRIGGERS); // I think this is actually wrong: prevents a p->c->p mode switch inside a NOTRIGGER region.
+ DEBUG_ONLY;
+ }
+ CONTRACTL_END;
+
+ if (IsAtProcessExit())
+ return;
+
+ if (!GCStressPolicy::IsEnabled() || !GCStress<cfg_transition>::IsEnabled())
+ return;
+
+ if (!GCHeap::IsGCHeapInitialized())
+ return;
+
+ if (!m_GCOnTransitionsOK
+#ifdef ENABLE_CONTRACTS
+ || RawGCNoTrigger()
+#endif
+ || g_fEEShutDown
+ || GCHeap::IsGCInProgress(TRUE)
+ || GCHeap::GetGCHeap()->GetGcCount() == 0 // Need something that works for isolated heap.
+ || ThreadStore::HoldingThreadStore())
+ return;
+
+ if (Thread::ThreadsAtUnsafePlaces())
+ return;
+
+#ifdef DEBUGGING_SUPPORTED
+ // Don't collect if the debugger is attach and either 1) there
+ // are any threads held at unsafe places or 2) this thread is
+ // under the control of the debugger's dispatch logic (as
+ // evidenced by having a non-NULL filter context.)
+ if ((CORDebuggerAttached() &&
+ (g_pDebugInterface->ThreadsAtUnsafePlaces() ||
+ (GetFilterContext() != NULL))))
+ return;
+#endif // DEBUGGING_SUPPORTED
+
+ _ASSERTE(m_fPreemptiveGCDisabled.Load() == 0); // we are in preemptive mode when we call this
+
+ m_GCOnTransitionsOK = FALSE;
+ {
+ GCX_COOP();
+ m_bGCStressing = TRUE;
+ GCHeap::GetGCHeap()->StressHeap();
+ m_bGCStressing = FALSE;
+ }
+ m_GCOnTransitionsOK = TRUE;
+}
+#endif // STRESS_HEAP && DEBUG
+
+// To leave cooperative mode and enter preemptive mode, if a GC is in progress, we
+// no longer care to suspend this thread. But if we are trying to suspend the thread
+// for other reasons (e.g. Thread.Suspend()), now is a good time.
+//
+// Note that it is possible for an N/Direct call to leave the EE without explicitly
+// enabling preemptive GC.
+void Thread::RareEnablePreemptiveGC()
+{
+ CONTRACTL {
+ NOTHROW;
+ DISABLED(GC_TRIGGERS); // I think this is actually wrong: prevents a p->c->p mode switch inside a NOTRIGGER region.
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // @todo - Needs a hard SO probe
+ CONTRACT_VIOLATION(GCViolation|FaultViolation|SOToleranceViolation);
+
+ // If we have already received our PROCESS_DETACH during shutdown, there is only one thread in the
+ // process and no coordination is necessary.
+ if (IsAtProcessExit())
+ return;
+
+#ifdef _DEBUG
+ AddFiberInfo(ThreadTrackInfo_GCMode);
+#endif
+
+ // EnablePreemptiveGC already set us to preemptive mode before triggering the Rare path.
+ // Force other threads to see this update, since the Rare path implies that someone else
+ // is observing us (e.g. SuspendRuntime).
+
+ _ASSERTE (!m_fPreemptiveGCDisabled);
+
+ // holding a spin lock in coop mode and transit to preemp mode will cause deadlock on GC
+ _ASSERTE ((m_StateNC & Thread::TSNC_OwnsSpinLock) == 0);
+
+ FastInterlockOr (&m_fPreemptiveGCDisabled, 0);
+
+#if defined(STRESS_HEAP) && defined(_DEBUG)
+ if (!IsDetached())
+ PerformPreemptiveGC();
+#endif
+
+ STRESS_LOG1(LF_SYNC, LL_INFO100000, "RareEnablePreemptiveGC: entering. Thread state = %x\n", m_State.Load());
+ if (!ThreadStore::HoldingThreadStore(this))
+ {
+#ifdef FEATURE_HIJACK
+ // Remove any hijacks we might have.
+ UnhijackThread();
+#endif // FEATURE_HIJACK
+
+ // wake up any threads waiting to suspend us, like the GC thread.
+ SetSafeEvent();
+ ThreadSuspend::g_pGCSuspendEvent->Set();
+
+ // for GC, the fact that we are leaving the EE means that it no longer needs to
+ // suspend us. But if we are doing a non-GC suspend, we need to block now.
+ // Give the debugger precedence over user suspensions:
+ while (m_State & (TS_DebugSuspendPending | TS_UserSuspendPending))
+ {
+#ifdef DEBUGGING_SUPPORTED
+ // We don't notify the debugger that this thread is now suspended. We'll just
+ // let the debugger's helper thread sweep and pick it up.
+ // We also never take the TSL in here either.
+ // Life's much simpler this way...
+
+
+#if !defined(FEATURE_CORECLR) // simple hosting
+ // Check to see if there's an IDbgThreadControl interface that needs
+ // notification of the suspension
+ if (m_State & TS_DebugSuspendPending)
+ {
+ IDebuggerThreadControl *pDbgThreadControl = CorHost::GetDebuggerThreadControl();
+
+ if (pDbgThreadControl)
+ pDbgThreadControl->ThreadIsBlockingForDebugger();
+
+ }
+#endif // !defined(FEATURE_CORECLR)
+#endif // DEBUGGING_SUPPORTED
+
+#ifdef LOGGING
+#if !defined(FEATURE_CORECLR) // simple hosting
+ if (!CorHost::IsDebuggerSpecialThread(GetThreadId()))
+#endif // !defined(FEATURE_CORECLR)
+ {
+ LOG((LF_CORDB, LL_INFO1000, "[0x%x] SUSPEND: suspended while enabling gc.\n", GetThreadId()));
+ }
+#if !defined(FEATURE_CORECLR) // simple hosting
+ else
+ {
+ LOG((LF_CORDB, LL_INFO1000,
+ "[0x%x] ALERT: debugger special thread did not suspend while enabling gc.\n", GetThreadId()));
+ }
+#endif // !defined(FEATURE_CORECLR)
+#endif
+
+ WaitSuspendEvents(); // sets bits, too
+
+ }
+ }
+ STRESS_LOG0(LF_SYNC, LL_INFO100000, " RareEnablePreemptiveGC: leaving.\n");
+}
+
+
+// Called out of CommonTripThread, we are passing through a Safe spot. Do the right
+// thing with this thread. This may involve waiting for the GC to complete, or
+// performing a pending suspension.
+void Thread::PulseGCMode()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(this == GetThread());
+
+ if (PreemptiveGCDisabled() && CatchAtSafePoint())
+ {
+ EnablePreemptiveGC();
+ DisablePreemptiveGC();
+ }
+}
+
+
+// Indicate whether threads should be trapped when returning to the EE (i.e. disabling
+// preemptive GC mode)
+Volatile<LONG> g_fTrapReturningThreadsLock;
+void ThreadStore::TrapReturningThreads(BOOL yes)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ // make sure that a thread doesn't get suspended holding g_fTrapReturningThreadsLock
+ // if a suspended thread held this lock and then the suspending thread called in
+ // here (which it does) the suspending thread would deadlock causing the suspension
+ // as a whole to deadlock
+ ForbidSuspendThreadHolder suspend;
+
+ DWORD dwSwitchCount = 0;
+ while (1 == FastInterlockExchange(&g_fTrapReturningThreadsLock, 1))
+ {
+ // we can't forbid suspension while we are sleeping and don't hold the lock
+ // this will trigger an assert on SQLCLR but is a general issue
+ suspend.Release();
+ __SwitchToThread(0, ++dwSwitchCount);
+ suspend.Acquire();
+ }
+
+ if (yes)
+ {
+#ifdef _DEBUG
+ CounterHolder trtHolder(&g_trtChgInFlight);
+ FastInterlockIncrement(&g_trtChgStamp);
+#endif
+
+ FastInterlockIncrement (&g_TrapReturningThreads);
+#ifdef ENABLE_FAST_GCPOLL_HELPER
+ EnableJitGCPoll();
+#endif
+ _ASSERTE(g_TrapReturningThreads > 0);
+
+#ifdef _DEBUG
+ trtHolder.Release();
+#endif
+ }
+ else
+ {
+ FastInterlockDecrement (&g_TrapReturningThreads);
+#ifdef ENABLE_FAST_GCPOLL_HELPER
+ if (0 == g_TrapReturningThreads)
+ DisableJitGCPoll();
+#endif
+ _ASSERTE(g_TrapReturningThreads >= 0);
+ }
+#ifdef ENABLE_FAST_GCPOLL_HELPER
+ //Ensure that we flush the cache line containing the GC Poll Helper.
+ MemoryBarrier();
+#endif //ENABLE_FAST_GCPOLL_HELPER
+ g_fTrapReturningThreadsLock = 0;
+
+}
+
+#ifdef FEATURE_HIJACK
+
+#ifdef _TARGET_X86_
+//****************************************************************************************
+// This will check who caused the exception. If it was caused by the the redirect function,
+// the reason is to resume the thread back at the point it was redirected in the first
+// place. If the exception was not caused by the function, then it was caused by the call
+// out to the I[GC|Debugger]ThreadControl client and we need to determine if it's an
+// exception that we can just eat and let the runtime resume the thread, or if it's an
+// uncatchable exception that we need to pass on to the runtime.
+//
+int RedirectedHandledJITCaseExceptionFilter(
+ PEXCEPTION_POINTERS pExcepPtrs, // Exception data
+ RedirectedThreadFrame *pFrame, // Frame on stack
+ BOOL fDone, // Whether redirect completed without exception
+ CONTEXT *pCtx) // Saved context
+{
+ // !!! Do not use a non-static contract here.
+ // !!! Contract may insert an exception handling record.
+ // !!! This function assumes that GetCurrentSEHRecord() returns the exception record set up in
+ // !!! Thread::RedirectedHandledJITCase
+ //
+ // !!! Do not use an object with dtor, since it injects a fs:0 entry.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+
+ if (pExcepPtrs->ExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW)
+ {
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ // Get the thread handle
+ Thread *pThread = GetThread();
+ _ASSERTE(pThread);
+
+
+ STRESS_LOG2(LF_SYNC, LL_INFO100, "In RedirectedHandledJITCaseExceptionFilter fDone = %d pFrame = %p\n", fDone, pFrame);
+
+ // If we get here via COM+ exception, gc-mode is unknown. We need it to
+ // be cooperative for this function.
+ GCX_COOP_NO_DTOR();
+
+ // If the exception was due to the called client, then we need to figure out if it
+ // is an exception that can be eaten or if it needs to be handled elsewhere.
+ if (!fDone)
+ {
+ if (pExcepPtrs->ExceptionRecord->ExceptionFlags & EXCEPTION_NONCONTINUABLE)
+ {
+ return (EXCEPTION_CONTINUE_SEARCH);
+ }
+
+ // Get the latest thrown object
+ OBJECTREF throwable = CLRException::GetThrowableFromExceptionRecord(pExcepPtrs->ExceptionRecord);
+
+ // If this is an uncatchable exception, then let the exception be handled elsewhere
+ if (IsUncatchable(&throwable))
+ {
+ pThread->EnablePreemptiveGC();
+ return (EXCEPTION_CONTINUE_SEARCH);
+ }
+ }
+#ifdef _DEBUG
+ else
+ {
+ _ASSERTE(pExcepPtrs->ExceptionRecord->ExceptionCode == EXCEPTION_HIJACK);
+ }
+#endif
+
+ // Unlink the frame in preparation for resuming in managed code
+ pFrame->Pop();
+
+ // Copy the saved context record into the EH context;
+ ReplaceExceptionContextRecord(pExcepPtrs->ContextRecord, pCtx);
+
+ if (pThread->GetSavedRedirectContext())
+ {
+ delete pCtx;
+ }
+ else
+ {
+ // Save it for future use to avoid repeatedly new'ing
+ pThread->SetSavedRedirectContext(pCtx);
+ }
+
+ /////////////////////////////////////////////////////////////////////////////
+ // NOTE: Ugly, ugly workaround.
+ // We need to resume the thread into the managed code where it was redirected,
+ // and the corresponding ESP is below the current one. But C++ expects that
+ // on an EXCEPTION_CONTINUE_EXECUTION that the ESP will be above where it has
+ // installed the SEH handler. To solve this, we need to remove all handlers
+ // that reside above the resumed ESP, but we must leave the OS-installed
+ // handler at the top, so we grab the top SEH handler, call
+ // PopSEHRecords which will remove all SEH handlers above the target ESP and
+ // then link the OS handler back in with SetCurrentSEHRecord.
+
+ // Get the special OS handler and save it until PopSEHRecords is done
+ EXCEPTION_REGISTRATION_RECORD *pCurSEH = GetCurrentSEHRecord();
+
+ // Unlink all records above the target resume ESP
+ PopSEHRecords((LPVOID)(size_t)pCtx->Esp);
+
+ // Link the special OS handler back in to the top
+ pCurSEH->Next = GetCurrentSEHRecord();
+
+ // Register the special OS handler as the top handler with the OS
+ SetCurrentSEHRecord(pCurSEH);
+
+ // Resume execution at point where thread was originally redirected
+ return (EXCEPTION_CONTINUE_EXECUTION);
+}
+#endif // _TARGET_X86_
+
+void RedirectedThreadFrame::ExceptionUnwind()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ STRESS_LOG1(LF_SYNC, LL_INFO1000, "In RedirectedThreadFrame::ExceptionUnwind pFrame = %p\n", this);
+
+ Thread* pThread = GetThread();
+
+ if (pThread->GetSavedRedirectContext())
+ {
+ delete m_Regs;
+ }
+ else
+ {
+ // Save it for future use to avoid repeatedly new'ing
+ pThread->SetSavedRedirectContext(m_Regs);
+ }
+
+ m_Regs = NULL;
+}
+
+void NotifyHostOnGCSuspension()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ CorHost2::GetHostGCManager()->ThreadIsBlockingForSuspension();
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+}
+
+// This function is called from the assembly functions used to redirect a thread. It must not cause
+// an exception (except SO).
+extern "C" PCONTEXT __stdcall GetCurrentSavedRedirectContext()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DWORD dwLastError = GetLastError();
+ PCONTEXT pContext = GetThread()->GetSavedRedirectContext();
+ SetLastError(dwLastError);
+
+ return pContext;
+}
+
+void __stdcall Thread::RedirectedHandledJITCase(RedirectReason reason)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ // We must preserve this in case we've interrupted an IL pinvoke stub before it
+ // was able to save the error.
+ DWORD dwLastError = GetLastError();
+
+ Thread *pThread = GetThread();
+ _ASSERTE(pThread);
+
+#ifdef FEATURE_STACK_PROBE
+ if (GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) == eRudeUnloadAppDomain)
+ {
+ RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), pThread);
+ }
+#endif
+
+ BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
+
+ // Get the saved context
+ CONTEXT *pCtx = pThread->GetSavedRedirectContext();
+ _ASSERTE(pCtx);
+
+ INDEBUG(Thread::ObjectRefFlush(pThread));
+
+ // Create a frame on the stack
+ FrameWithCookie<RedirectedThreadFrame> frame(pCtx);
+
+ STRESS_LOG5(LF_SYNC, LL_INFO1000, "In RedirectedHandledJITcase reason 0x%x pFrame = %p pc = %p sp = %p fp = %p", reason, &frame, GetIP(pCtx), GetSP(pCtx), GetFP(pCtx));
+
+#ifdef _TARGET_X86_
+ // This will indicate to the exception filter whether or not the exception is caused
+ // by us or the client.
+ BOOL fDone = FALSE;
+ int filter_count = 0; // A counter to avoid a nasty case where an
+ // up-stack filter throws another exception
+ // causing our filter to be run again for
+ // some unrelated exception.
+
+ __try
+#endif // _TARGET_X86_
+ {
+ // Make sure this thread doesn't reuse the context memory in re-entrancy cases
+ _ASSERTE(pThread->GetSavedRedirectContext() != NULL);
+ pThread->SetSavedRedirectContext(NULL);
+
+ // Link in the frame
+ frame.Push();
+
+#if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS) // GCCOVER
+ if (reason == RedirectReason_GCStress)
+ {
+ _ASSERTE(pThread->PreemptiveGCDisabledOther());
+ DoGcStress(frame.GetContext(), NULL);
+ }
+ else
+#endif // HAVE_GCCOVER && USE_REDIRECT_FOR_GCSTRESS
+ {
+ // Enable PGC before calling out to the client to allow runtime suspend to finish
+ GCX_PREEMP_NO_DTOR();
+
+ // <REVISIT_TODO>@TODO: Is this necessary? Does debugger wait on the events, or does it just
+ // poll every so often?</REVISIT_TODO>
+ // Notify the thread that is performing the suspension that this thread
+ // is now in PGC mode and that it can remove this thread from the list of
+ // threads it needs to wait for.
+ pThread->SetSafeEvent();
+
+ // Notify the interface of the pending suspension
+ switch (reason) {
+ case RedirectReason_GCSuspension:
+#if !defined(FEATURE_CORECLR) // simple hosting
+ if (CorHost::GetGCThreadControl())
+ CorHost::GetGCThreadControl()->ThreadIsBlockingForSuspension();
+ if (CLRGCHosted())
+ {
+ NotifyHostOnGCSuspension();
+ }
+#endif // !defined(FEATURE_CORECLR)
+ break;
+ case RedirectReason_DebugSuspension:
+#if !defined(FEATURE_CORECLR) // simple hosting
+ if (CorHost::GetDebuggerThreadControl() && CorHost::IsDebuggerSpecialThread(pThread->GetThreadId()))
+ CorHost::GetDebuggerThreadControl()->ThreadIsBlockingForDebugger();
+#endif // !defined(FEATURE_CORECLR)
+ break;
+ case RedirectReason_UserSuspension:
+ // Do nothing;
+ break;
+ case RedirectReason_YieldTask:
+ if (pThread->IsYieldRequested())
+ {
+ __SwitchToThread(0, CALLER_LIMITS_SPINNING);
+ }
+ break;
+ default:
+ _ASSERTE(!"Invalid redirect reason");
+ break;
+ }
+
+ // Disable preemptive GC so we can unlink the frame
+ GCX_PREEMP_NO_DTOR_END();
+ }
+
+#ifdef _TARGET_X86_
+ pThread->HandleThreadAbort(); // Might throw an exception.
+
+ // Indicate that the call to the service went without an exception, and that
+ // we're raising our own exception to resume the thread to where it was
+ // redirected from
+ fDone = TRUE;
+
+ // Save the instruction pointer where we redirected last. This does not race with the check
+ // against this variable in HandledJitCase because the GC will not attempt to redirect the
+ // thread until the instruction pointer of this thread is back in managed code.
+ pThread->m_LastRedirectIP = GetIP(pCtx);
+ pThread->m_SpinCount = 0;
+
+ RaiseException(EXCEPTION_HIJACK, 0, 0, NULL);
+
+#else // _TARGET_X86_
+
+#if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS) // GCCOVER
+ //
+ // If GCStress interrupts an IL stub or inlined p/invoke while it's running in preemptive mode, it switches the mode to
+ // cooperative - but we will resume to preemptive below. We should not trigger an abort in that case, as it will fail
+ // due to the GC mode.
+ //
+ if (!pThread->m_fPreemptiveGCDisabledForGCStress)
+#endif
+ {
+
+ UINT_PTR uAbortAddr;
+ UINT_PTR uResumePC = (UINT_PTR)GetIP(pCtx);
+ CopyOSContext(pThread->m_OSContext, pCtx);
+ uAbortAddr = (UINT_PTR)COMPlusCheckForAbort();
+ if (uAbortAddr)
+ {
+ LOG((LF_EH, LL_INFO100, "thread abort in progress, resuming thread under control... (handled jit case)\n"));
+
+ CONSISTENCY_CHECK(CheckPointer(pCtx));
+
+ STRESS_LOG1(LF_EH, LL_INFO10, "resume under control: ip: %p (handled jit case)\n", uResumePC);
+
+ SetIP(pThread->m_OSContext, uResumePC);
+
+#if defined(_TARGET_ARM_)
+ // Save the original resume PC in Lr
+ pCtx->Lr = uResumePC;
+
+ // Since we have set a new IP, we have to clear conditional execution flags too.
+ ClearITState(pThread->m_OSContext);
+#endif // _TARGET_ARM_
+
+ SetIP(pCtx, uAbortAddr);
+ }
+ }
+
+ // Unlink the frame in preparation for resuming in managed code
+ frame.Pop();
+
+ {
+ // Free the context struct if we already have one cached
+ if (pThread->GetSavedRedirectContext())
+ {
+ CONTEXT* pCtxTemp = (CONTEXT*)_alloca(sizeof(CONTEXT));
+ memcpy(pCtxTemp, pCtx, sizeof(CONTEXT));
+ delete pCtx;
+ pCtx = pCtxTemp;
+ }
+ else
+ {
+ // Save it for future use to avoid repeatedly new'ing
+ pThread->SetSavedRedirectContext(pCtx);
+ }
+
+#if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS) // GCCOVER
+ if (pThread->m_fPreemptiveGCDisabledForGCStress)
+ {
+ pThread->EnablePreemptiveGC();
+ pThread->m_fPreemptiveGCDisabledForGCStress = false;
+ }
+#endif
+
+ LOG((LF_SYNC, LL_INFO1000, "Resuming execution with RtlRestoreContext\n"));
+
+ SetLastError(dwLastError);
+
+ RtlRestoreContext(pCtx, NULL);
+ }
+#endif // _TARGET_X86_
+ }
+#ifdef _TARGET_X86_
+ __except (++filter_count == 1
+ ? RedirectedHandledJITCaseExceptionFilter(GetExceptionInformation(), &frame, fDone, pCtx)
+ : EXCEPTION_CONTINUE_SEARCH)
+ {
+ _ASSERTE(!"Reached body of __except in Thread::RedirectedHandledJITCase");
+ }
+
+#endif // _TARGET_X86_
+
+ END_CONTRACT_VIOLATION;
+
+}
+//****************************************************************************************
+// This helper is called when a thread suspended in managed code at a sequence point while
+// suspending the runtime and there is a client interested in re-assigning the thread to
+// do interesting work while the runtime is suspended. This will call into the client
+// notifying it that the thread will be suspended for a runtime suspension.
+//
+void __stdcall Thread::RedirectedHandledJITCaseForDbgThreadControl()
+{
+ WRAPPER_NO_CONTRACT;
+ RedirectedHandledJITCase(RedirectReason_DebugSuspension);
+}
+
+
+//****************************************************************************************
+// This helper is called when a thread suspended in managed code at a sequence point when
+// suspending the runtime.
+//
+// We do this because the obvious code sequence:
+//
+// SuspendThread(t1);
+// GetContext(t1, &ctx);
+// ctx.Ecx = <some new value>;
+// SetContext(t1, &ctx);
+// ResumeThread(t1);
+//
+// simply does not work due to a nasty race with exception handling in the OS. If the
+// thread that is suspended has just faulted, then the update can disappear without ever
+// modifying the real thread ... and there is no way to tell.
+//
+// Updating the EIP may not work ... but when it doens't, we're ok ... an exception ends
+// up getting dispatched anyway.
+//
+// If the host is interested in getting control, then we give control to the host. If the
+// host is not interested in getting control, then we call out to the host. After that,
+// we raise an exception and will end up waiting for the GC to finish inside the filter.
+//
+void __stdcall Thread::RedirectedHandledJITCaseForGCThreadControl()
+{
+ WRAPPER_NO_CONTRACT;
+ RedirectedHandledJITCase(RedirectReason_GCSuspension);
+}
+
+//***********************
+// Like the above, but called for a UserSuspend.
+//
+void __stdcall Thread::RedirectedHandledJITCaseForUserSuspend()
+{
+ WRAPPER_NO_CONTRACT;
+ RedirectedHandledJITCase(RedirectReason_UserSuspension);
+}
+
+//***********************
+// Like the above, but called for YieldTask.
+//
+void __stdcall Thread::RedirectedHandledJITCaseForYieldTask()
+{
+ WRAPPER_NO_CONTRACT;
+ RedirectedHandledJITCase(RedirectReason_YieldTask);
+}
+
+#if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS) // GCCOVER
+
+//***********************
+// Like the above, but called for GC stress.
+//
+void __stdcall Thread::RedirectedHandledJITCaseForGCStress()
+{
+ WRAPPER_NO_CONTRACT;
+ RedirectedHandledJITCase(RedirectReason_GCStress);
+}
+
+#endif // HAVE_GCCOVER && _DEBUG && USE_REDIRECT_FOR_GCSTRESS
+
+//****************************************************************************************
+// This will take a thread that's been suspended in managed code at a sequence point and
+// will Redirect the thread. It will save all register information, build a frame on the
+// thread's stack, put a pointer to the frame at the top of the stack and set the IP of
+// the thread to pTgt. pTgt is then responsible for unlinking the thread,
+//
+// NOTE: Cannot play with a suspended thread's stack memory, since the OS will use the
+// top of the stack to store information. The thread must be resumed and play with it's
+// own stack.
+//
+
+#ifdef _TARGET_X86_
+#define CONTEXT_COMPLETE (CONTEXT_FULL | CONTEXT_FLOATING_POINT | \
+ CONTEXT_DEBUG_REGISTERS | CONTEXT_EXTENDED_REGISTERS | CONTEXT_EXCEPTION_REQUEST)
+#else
+#define CONTEXT_COMPLETE (CONTEXT_FULL | CONTEXT_EXCEPTION_REQUEST)
+#endif
+
+BOOL Thread::RedirectThreadAtHandledJITCase(PFN_REDIRECTTARGET pTgt)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(HandledJITCase());
+ _ASSERTE(GetThread() != this);
+
+ ////////////////////////////////////////////////////////////////
+ // Acquire a context structure to save the thread state into
+
+ // We need to distinguish between two types of callers:
+ // - Most callers, including GC, operate while holding the ThreadStore
+ // lock. This means that we can pre-allocate a context structure
+ // globally in the ThreadStore and use it in this function.
+ // - Some callers (currently only YieldTask) cannot take the ThreadStore
+ // lock. Therefore we always allocate a SavedRedirectContext in the
+ // Thread constructor. (Since YieldTask currently is the only caller
+ // that does not hold the ThreadStore lock, we only do this when
+ // we're hosted.)
+
+ // Check whether we have a SavedRedirectContext we can reuse:
+ CONTEXT *pCtx = GetSavedRedirectContext();
+
+ // If we've never allocated a context for this thread, do so now
+ if (!pCtx)
+ {
+ // If our caller took the ThreadStore lock, then it pre-allocated
+ // a context in the ThreadStore:
+ if (ThreadStore::HoldingThreadStore())
+ {
+ pCtx = ThreadStore::GrabOSContext();
+ }
+
+ if (!pCtx)
+ {
+ // Even when our caller is YieldTask, we can find a NULL
+ // SavedRedirectContext in this function: Consider the scenario
+ // where GC is in progress and has already redirected a thread.
+ // That thread will set its SavedRedirectContext to NULL to enable
+ // reentrancy. Now assume that the host calls YieldTask for the
+ // redirected thread. In this case, this function will simply
+ // fail, but that is fine: The redirected thread will check,
+ // before it resumes execution, whether it should yield.
+ return (FALSE);
+ }
+
+ // Save the pointer for the redirect function
+ _ASSERTE(GetSavedRedirectContext() == NULL);
+ SetSavedRedirectContext(pCtx);
+ }
+
+ //////////////////////////////////////
+ // Get and save the thread's context
+
+ // Always get complete context
+ pCtx->ContextFlags = CONTEXT_COMPLETE;
+ BOOL bRes = EEGetThreadContext(this, pCtx);
+ _ASSERTE(bRes && "Failed to GetThreadContext in RedirectThreadAtHandledJITCase - aborting redirect.");
+
+ if (!bRes)
+ return (FALSE);
+
+ if (!IsContextSafeToRedirect(pCtx))
+ return (FALSE);
+
+ if (CLRTaskHosted())
+ {
+ PCODE dwOrigEip = GetIP(pCtx);
+ if (!ExecutionManager::IsManagedCode(dwOrigEip))
+ return FALSE;
+ }
+
+ ////////////////////////////////////////////////////
+ // Now redirect the thread to the helper function
+
+ // Temporarily set the IP of the context to the target for SetThreadContext
+ PCODE dwOrigEip = GetIP(pCtx);
+#ifdef _TARGET_ARM_
+ // Redirection can be required when in IT Block.
+ // In that case must reset the IT state before redirection.
+ DWORD dwOrigCpsr = pCtx->Cpsr;
+ ClearITState(pCtx);
+#endif
+ _ASSERTE(ExecutionManager::IsManagedCode(dwOrigEip));
+ SetIP(pCtx, (PCODE)pTgt);
+
+
+ STRESS_LOG4(LF_SYNC, LL_INFO10000, "Redirecting thread %p(tid=%x) from address 0x%08x to address 0x%p\n",
+ this, this->GetThreadId(), dwOrigEip, pTgt);
+
+ bRes = EESetThreadContext(this, pCtx);
+ _ASSERTE(bRes && "Failed to SetThreadContext in RedirectThreadAtHandledJITCase - aborting redirect.");
+
+ // Restore original IP
+ SetIP(pCtx, dwOrigEip);
+#ifdef _TARGET_ARM_
+ // restore IT State in the context
+ pCtx->Cpsr = dwOrigCpsr;
+#endif
+
+
+ //////////////////////////////////////////////////
+ // Indicate whether or not the redirect succeeded
+
+ return (bRes);
+}
+
+BOOL Thread::CheckForAndDoRedirect(PFN_REDIRECTTARGET pRedirectTarget)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(this != GetThread());
+ _ASSERTE(PreemptiveGCDisabledOther());
+ _ASSERTE(IsAddrOfRedirectFunc(pRedirectTarget));
+
+ BOOL fRes = FALSE;
+ fRes = RedirectThreadAtHandledJITCase(pRedirectTarget);
+ LOG((LF_GC, LL_INFO1000, "RedirectThreadAtHandledJITCase %s.\n", fRes ? "SUCCEEDED" : "FAILED"));
+
+ return (fRes);
+}
+
+
+BOOL Thread::RedirectCurrentThreadAtHandledJITCase(PFN_REDIRECTTARGET pTgt, CONTEXT *pCurrentThreadCtx)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // REVISIT_TODO need equivalent of this for the current thread
+ //_ASSERTE(HandledJITCase());
+
+ _ASSERTE(GetThread() == this);
+ _ASSERTE(PreemptiveGCDisabledOther());
+ _ASSERTE(IsAddrOfRedirectFunc(pTgt));
+ _ASSERTE(pCurrentThreadCtx);
+ _ASSERTE((pCurrentThreadCtx->ContextFlags & (CONTEXT_COMPLETE - CONTEXT_EXCEPTION_REQUEST))
+ == (CONTEXT_COMPLETE - CONTEXT_EXCEPTION_REQUEST));
+ _ASSERTE(ExecutionManager::IsManagedCode(GetIP(pCurrentThreadCtx)));
+
+ ////////////////////////////////////////////////////////////////
+ // Allocate a context structure to save the thread state into
+
+ // Check to see if we've already got memory allocated for this purpose.
+ CONTEXT *pCtx = GetSavedRedirectContext();
+
+ // If we've never allocated a context for this thread, do so now
+ if (!pCtx)
+ {
+ pCtx = new (nothrow) CONTEXT();
+
+ if (!pCtx)
+ return (FALSE);
+
+ // Save the pointer for the redirect function
+ _ASSERTE(GetSavedRedirectContext() == NULL);
+ SetSavedRedirectContext(pCtx);
+ }
+
+ //////////////////////////////////////
+ // Get and save the thread's context
+
+ CopyMemory(pCtx, pCurrentThreadCtx, sizeof(CONTEXT));
+
+ // Clear any new bits we don't understand (like XSAVE) in case we pass
+ // this context to RtlRestoreContext (like for gcstress)
+ pCtx->ContextFlags &= CONTEXT_ALL;
+
+ // Ensure that this flag is set for the next time through the normal path,
+ // RedirectThreadAtHandledJITCase.
+ pCtx->ContextFlags |= CONTEXT_EXCEPTION_REQUEST;
+
+ ////////////////////////////////////////////////////
+ // Now redirect the thread to the helper function
+
+ SetIP(pCurrentThreadCtx, (PCODE)pTgt);
+
+#ifdef _TARGET_ARM_
+ // Redirection can be required when in IT Block
+ // Clear the IT State before redirecting
+ ClearITState(pCurrentThreadCtx);
+#endif
+
+ //////////////////////////////////////////////////
+ // Indicate whether or not the redirect succeeded
+
+ return TRUE;
+}
+
+
+//************************************************************************
+// Exception handling needs to special case the redirection. So provide
+// a helper to identify redirection targets and keep the exception
+// checks in sync with the redirection here.
+// See CPFH_AdjustContextForThreadSuspensionRace for details.
+BOOL Thread::IsAddrOfRedirectFunc(void * pFuncAddr)
+{
+ WRAPPER_NO_CONTRACT;
+
+#if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS) // GCCOVER
+ if (pFuncAddr == GetRedirectHandlerForGCStress())
+ return TRUE;
+#endif // HAVE_GCCOVER && USE_REDIRECT_FOR_GCSTRESS
+
+ return
+ (pFuncAddr == GetRedirectHandlerForGCThreadControl()) ||
+ (pFuncAddr == GetRedirectHandlerForDbgThreadControl()) ||
+ (pFuncAddr == GetRedirectHandlerForUserSuspend()) ||
+ (pFuncAddr == GetRedirectHandlerForYieldTask());
+}
+
+//************************************************************************
+// Redirect thread at a GC suspension.
+BOOL Thread::CheckForAndDoRedirectForGC()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_GC, LL_INFO1000, "Redirecting thread %08x for GCThreadSuspension", GetThreadId()));
+ return CheckForAndDoRedirect(GetRedirectHandlerForGCThreadControl());
+}
+
+//************************************************************************
+// Redirect thread at a debug suspension.
+BOOL Thread::CheckForAndDoRedirectForDbg()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO1000, "Redirecting thread %08x for DebugSuspension", GetThreadId()));
+ return CheckForAndDoRedirect(GetRedirectHandlerForDbgThreadControl());
+}
+
+//*************************************************************************
+// Redirect thread at a user suspend.
+BOOL Thread::CheckForAndDoRedirectForUserSuspend()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_SYNC, LL_INFO1000, "Redirecting thread %08x for UserSuspension", GetThreadId()));
+ return CheckForAndDoRedirect(GetRedirectHandlerForUserSuspend());
+}
+
+//*************************************************************************
+// Redirect thread to make task yield.
+BOOL Thread::CheckForAndDoRedirectForYieldTask()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_SYNC, LL_INFO1000, "Redirecting thread %08x for YieldTask", GetThreadId()));
+ return CheckForAndDoRedirect(GetRedirectHandlerForYieldTask());
+}
+
+#if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS) // GCCOVER
+//*************************************************************************
+// Redirect thread at a GC stress point.
+BOOL Thread::CheckForAndDoRedirectForGCStress (CONTEXT *pCurrentThreadCtx)
+{
+ WRAPPER_NO_CONTRACT;
+
+ LOG((LF_CORDB, LL_INFO1000, "Redirecting thread %08x for GCStress", GetThreadId()));
+
+ m_fPreemptiveGCDisabledForGCStress = !PreemptiveGCDisabled();
+ GCX_COOP_NO_DTOR();
+
+ BOOL fSuccess = RedirectCurrentThreadAtHandledJITCase(GetRedirectHandlerForGCStress(), pCurrentThreadCtx);
+
+ if (!fSuccess)
+ {
+ GCX_COOP_NO_DTOR_END();
+ m_fPreemptiveGCDisabledForGCStress = false;
+ }
+
+ return fSuccess;
+}
+#endif // HAVE_GCCOVER && USE_REDIRECT_FOR_GCSTRESS
+
+#endif // FEATURE_HIJACK
+
+
+#ifdef PROFILING_SUPPORTED
+// Simple helper to convert the GC's SUSPEND_REASON enum to the profiling API's public
+// COR_PRF_SUSPEND_REASON enum. Used in code:Thread::SuspendRuntime to help with
+// sending the suspension event to the profiler.
+COR_PRF_SUSPEND_REASON GCSuspendReasonToProfSuspendReason(ThreadSuspend::SUSPEND_REASON gcReason)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ switch(gcReason)
+ {
+ default:
+ return COR_PRF_SUSPEND_OTHER;
+ case ThreadSuspend::SUSPEND_FOR_GC:
+ return COR_PRF_SUSPEND_FOR_GC;
+ case ThreadSuspend::SUSPEND_FOR_APPDOMAIN_SHUTDOWN:
+ return COR_PRF_SUSPEND_FOR_APPDOMAIN_SHUTDOWN;
+ case ThreadSuspend::SUSPEND_FOR_REJIT:
+ return COR_PRF_SUSPEND_FOR_REJIT;
+ case ThreadSuspend::SUSPEND_FOR_SHUTDOWN:
+ return COR_PRF_SUSPEND_FOR_SHUTDOWN;
+ case ThreadSuspend::SUSPEND_FOR_DEBUGGER:
+ return COR_PRF_SUSPEND_FOR_INPROC_DEBUGGER;
+ case ThreadSuspend::SUSPEND_FOR_GC_PREP:
+ return COR_PRF_SUSPEND_FOR_GC_PREP;
+ }
+}
+#endif // PROFILING_SUPPORTED
+
+//************************************************************************************
+// To support fast application switch (FAS), one requirement is that the CPU
+// consumption during the time the CLR is paused should be 0. Given that the process
+// will be anyway suspended this should've been an NOP for CLR. However, in Mango
+// we ensured that no handle timed out or no other such context switch happens
+// during the pause time. To match that and also to ensure that in-between the
+// pause and when the process is suspended (~60 sec) no context switch happens due to
+// CLR handles (like Waits/sleeps due to calls from BCL) we call APC on these
+// Threads and make them wait on the resume handle
+void __stdcall PauseAPC(__in ULONG_PTR dwParam)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if(g_IsPaused && (GetThread()->m_State & Thread::TS_Interruptible))
+ {
+ _ASSERTE(g_ClrResumeEvent.IsValid());
+ EX_TRY {
+ g_ClrResumeEvent.Wait(INFINITE, FALSE);
+ }
+ EX_CATCH {
+ // Assert on debug builds
+ _ASSERTE(FALSE);
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+}
+
+
+//************************************************************************************
+//
+// SuspendRuntime is responsible for ensuring that all managed threads reach a
+// "safe point." It returns when all threads are known to be in "preemptive" mode.
+// This is *only* called by ThreadSuspend::SuspendEE; these two methods should really
+// be refactored into a separate "managed execution lock."
+//
+// Note that we use this method for more than just GC suspension. We also suspend
+// for debugging, etc.
+//
+// The basic algorithm is this:
+//
+// while there are threads in cooperative mode:
+// for each thread in cooprative mode:
+// suspend the native thread.
+// if it's still in cooperative mode, and it's running JIT'd code:
+// Redirect/hijack the thread
+//
+// Redirection vs. Hijacking:
+//
+// JIT'd code does not generally poll to see if a GC wants to run. Instead, the JIT
+// records "GC info" describing where the "safe points" are in the code. While we
+// have a native thread suspended in JIT'd code, we can see which instruction it
+// is currently executing. If that instruction is a safe point, then the GC may proceed.
+// Returning from a managed method is *always* a safe point, so if the thread is not
+// currently at a safe point we can "hijack" its return address. Once that it done,
+// if/when the method tried to return the thread will be sent to a hijack routine
+// that will leave cooperative mode and wait for the GC to complete.
+//
+// If the thread is already at a safe point, you might think we could simply leave it
+// suspended and proceed with the GC. In principle, this should be what we do.
+// However, various historical OS bugs prevent this from working. The problem is that
+// we are not guaranteed to capture an accurate CONTEXT (register state) for a suspended
+// thread. So instead, we "redirect" the thread, by overwriting its instruction pointer.
+// We then resume the thread, and it immediately starts executing our "redirect" routine,
+// which leaves cooperative mode and waits for the GC to complete.
+//
+// See code:Thread#SuspendingTheRuntime for more
+HRESULT ThreadSuspend::SuspendRuntime(ThreadSuspend::SUSPEND_REASON reason)
+{
+ CONTRACTL {
+ NOTHROW;
+ if (GetThread())
+ {
+ GC_TRIGGERS; // CLREvent::Wait is GC_TRIGGERS
+ }
+ else
+ {
+ DISABLED(GC_TRIGGERS);
+ }
+ }
+ CONTRACTL_END;
+
+ // This thread
+ Thread *pCurThread = GetThread();
+
+ // The thread we're working on (suspending, etc.) right now.
+ Thread *thread = NULL;
+
+ // The number of threads we found in COOP mode.
+ LONG countThreads = 0;
+
+ DWORD res;
+
+ // Caller is expected to be holding the ThreadStore lock. Also, caller must
+ // have set GcInProgress before coming here, or things will break;
+ _ASSERTE(ThreadStore::HoldingThreadStore() || IsAtProcessExit());
+ _ASSERTE(GCHeap::IsGCInProgress() );
+
+ STRESS_LOG1(LF_SYNC, LL_INFO1000, "Thread::SuspendRuntime(reason=0x%x)\n", reason);
+
+#if !defined(FEATURE_CORECLR) // simple hosting
+ // Alert the host that a GC is starting, in case the host is scheduling threads
+ // for non-runtime tasks during GC.
+ IGCThreadControl *pGCThreadControl = CorHost::GetGCThreadControl();
+
+ if (pGCThreadControl)
+ pGCThreadControl->SuspensionStarting();
+
+ if (CLRGCHosted())
+ {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ CorHost2::GetHostGCManager()->SuspensionStarting();
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+#endif // !defined(FEATURE_CORECLR)
+
+#ifdef PROFILING_SUPPORTED
+ // If the profiler desires information about GCs, then let it know that one
+ // is starting.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackSuspends());
+ _ASSERTE(reason != ThreadSuspend::SUSPEND_FOR_DEBUGGER);
+ _ASSERTE(reason != ThreadSuspend::SUSPEND_FOR_DEBUGGER_SWEEP);
+
+ {
+ g_profControlBlock.pProfInterface->RuntimeSuspendStarted(
+ GCSuspendReasonToProfSuspendReason(reason));
+ }
+ if (pCurThread)
+ {
+ // Notify the profiler that the thread that is actually doing the GC is 'suspended',
+ // meaning that it is doing stuff other than run the managed code it was before the
+ // GC started.
+ g_profControlBlock.pProfInterface->RuntimeThreadSuspended((ThreadID)pCurThread);
+ }
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ //
+ // If this thread is running at low priority, boost its priority. We remember the old
+ // priority so that we can restore it in ResumeRuntime.
+ //
+ if (pCurThread) // concurrent GC occurs on threads we don't know about
+ {
+ _ASSERTE(pCurThread->m_Priority == INVALID_THREAD_PRIORITY);
+ int priority = pCurThread->GetThreadPriority();
+ if (priority < THREAD_PRIORITY_NORMAL)
+ {
+ pCurThread->m_Priority = priority;
+ pCurThread->SetThreadPriority(THREAD_PRIORITY_NORMAL);
+ }
+ }
+
+#ifdef FEATURE_STACK_PROBE
+ // If CLR is hosted with IHostTaskManager and escalation policy for StackOverflow,
+ // we need to make sure a thread is never blocked with a small stack, because the thread can
+ // not be moved from scheduler in the host, and the scheduler may hold some resource needed by
+ // suspension thread.
+
+ // If we need to handle SO, GC will wait until a target thread has finished LeaveRuntime call. At
+ // this point, the thread is off scheduler. If it hits SO, we will kill the process. If the thread hits
+ // SO while calling LeaveRuntime, we treat this as SO in managed code, and unload the domain instead.
+ BOOL fConsiderSO = (CLRTaskHosted() &&
+ GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) == eRudeUnloadAppDomain);
+#endif
+
+ // From this point until the end of the function, consider all active thread
+ // suspension to be in progress. This is mainly to give the profiler API a hint
+ // that trying to suspend a thread (in order to walk its stack) could delay the
+ // overall EE suspension. So the profiler API would early-abort the stackwalk
+ // in such a case.
+ SuspendRuntimeInProgressHolder hldSuspendRuntimeInProgress;
+
+
+ // Flush the store buffers on all CPUs, to ensure two things:
+ // - we get a reliable reading of the threads' m_fPreemptiveGCDisabled state
+ // - other threads see that g_TrapReturningThreads is set
+ // See VSW 475315 and 488918 for details.
+
+ ::FlushProcessWriteBuffers();
+
+ //
+ // Make a pass through all threads. We do a couple of things here:
+ // 1) we count the number of threads that are observed to be in cooperative mode.
+ // 2) for threads currently running managed code, we try to redirect/jihack them.
+ //
+ // Later we will make more passes where we do roughly the same thing. We should combine the two loops.
+ //
+
+ while ((thread = ThreadStore::GetThreadList(thread)) != NULL)
+ {
+ if (thread->HasThreadState(Thread::TS_GCSuspendPending))
+ {
+ thread->ResetThreadState(Thread::TS_GCSuspendPending);
+ }
+
+ if (thread == pCurThread)
+ continue;
+
+ STRESS_LOG3(LF_SYNC, LL_INFO10000, " Inspecting thread 0x%x ID 0x%x coop mode = %d\n",
+ thread, thread->GetThreadId(), thread->m_fPreemptiveGCDisabled.Load());
+
+ // Nothing confusing left over from last time.
+ _ASSERTE((thread->m_State & Thread::TS_GCSuspendPending) == 0);
+
+ // Threads can be in Preemptive or Cooperative GC mode. Threads cannot switch
+ // to Cooperative mode without special treatment when a GC is happening.
+ if (thread->m_fPreemptiveGCDisabled)
+ {
+ // Check a little more carefully. Threads might sneak out without telling
+ // us, because of inlined PInvoke which doesn't go through RareEnablePreemptiveGC.
+
+#ifdef DISABLE_THREADSUSPEND
+ // On platforms that do not support safe thread suspension we have
+ // to rely on the GCPOLL mechanism.
+
+ // When we do not suspend the target thread we rely on the GCPOLL
+ // mechanism enabled by TrapReturningThreads. However when reading
+ // shared state we need to erect appropriate memory barriers. So
+ // the interlocked operation below ensures that any future reads on
+ // this thread will happen after any earlier writes on a different
+ // thread.
+ //
+ // <TODO> Need more careful review of this </TODO>
+ //
+ FastInterlockOr(&thread->m_fPreemptiveGCDisabled, 0);
+
+ if (thread->m_fPreemptiveGCDisabled)
+ {
+ FastInterlockOr((ULONG *) &thread->m_State, Thread::TS_GCSuspendPending);
+ countThreads++;
+ }
+#else // DISABLE_THREADSUSPEND
+
+#ifdef FEATURE_HIJACK
+ DWORD dwSwitchCount = 0;
+ RetrySuspension:
+#endif
+
+ // We can not allocate memory after we suspend a thread.
+ // Otherwise, we may deadlock the process, because the thread we just suspended
+ // might hold locks we would need to acquire while allocating.
+ ThreadStore::AllocateOSContext();
+
+#ifdef TIME_SUSPEND
+ DWORD startSuspend = g_SuspendStatistics.GetTime();
+#endif
+
+ //
+ // Suspend the native thread.
+ //
+ Thread::SuspendThreadResult str = thread->SuspendThread();
+
+ // We should just always build with this TIME_SUSPEND stuff, and report the results via ETW.
+#ifdef TIME_SUSPEND
+ g_SuspendStatistics.osSuspend.Accumulate(
+ SuspendStatistics::GetElapsed(startSuspend,
+ g_SuspendStatistics.GetTime()));
+
+ if (str == Thread::STR_Success)
+ g_SuspendStatistics.cntOSSuspendResume++;
+ else
+ g_SuspendStatistics.cntFailedSuspends++;
+#endif
+
+ if (str == Thread::STR_NoStressLog)
+ {
+ STRESS_LOG2(LF_SYNC, LL_ERROR, " ERROR: Could not suspend thread 0x%x, result = %d\n", thread, str);
+ }
+ else
+ if (thread->m_fPreemptiveGCDisabled)
+ {
+ // We now know for sure that the thread is still in cooperative mode. If it's in JIT'd code, here
+ // is where we try to hijack/redirect the thread. If it's in VM code, we have to just let the VM
+ // finish what it's doing.
+
+#ifdef FEATURE_HIJACK
+ // Only check for HandledJITCase if we actually suspended the thread.
+ if (str == Thread::STR_Success)
+ {
+ Thread::WorkingOnThreadContextHolder workingOnThreadContext(thread);
+
+ //
+ // Note that thread->HandledJITCase is not a simple predicate - it actually will hijack the thread if that's possible.
+ // So HandledJITCase can do one of these:
+ //
+ // - Return TRUE, in which case it's our responsibility to redirect the thread
+ // - Return FALSE after hijacking the thread - we shouldn't try to redirect
+ // - Return FALSE but not hijack the thread - there's nothing we can do either
+ //
+ // Here is another great opportunity for refactoring :)
+ //
+ if (workingOnThreadContext.Acquired() && thread->HandledJITCase())
+ {
+ // Redirect thread so we can capture a good thread context
+ // (GetThreadContext is not sufficient, due to an OS bug).
+ if (!thread->CheckForAndDoRedirectForGC())
+ {
+#ifdef TIME_SUSPEND
+ g_SuspendStatistics.cntFailedRedirections++;
+#endif
+ STRESS_LOG1(LF_SYNC, LL_INFO1000, "Failed to CheckForAndDoRedirectForGC(). Retry suspension for thread %p\n", thread);
+ thread->ResumeThread();
+ __SwitchToThread(0, ++dwSwitchCount);
+ goto RetrySuspension;
+ }
+#ifdef TIME_SUSPEND
+ else
+ g_SuspendStatistics.cntRedirections++;
+#endif
+ STRESS_LOG1(LF_SYNC, LL_INFO1000, "Thread::SuspendRuntime() - Thread %p redirected().\n", thread);
+ }
+ }
+#endif // FEATURE_HIJACK
+
+ FastInterlockOr((ULONG *) &thread->m_State, Thread::TS_GCSuspendPending);
+
+ countThreads++;
+
+ // Only resume if we actually suspended the thread above.
+ if (str == Thread::STR_Success)
+ thread->ResumeThread();
+
+ STRESS_LOG1(LF_SYNC, LL_INFO1000, " Thread 0x%x is in cooperative needs to rendezvous\n", thread);
+ }
+ else
+ if (str == Thread::STR_Success)
+ {
+ STRESS_LOG1(LF_SYNC, LL_WARNING, " Inspecting thread 0x%x was in cooperative, but now is not\n", thread);
+ // Oops.
+ thread->ResumeThread();
+ }
+ else
+ if (str == Thread::STR_SwitchedOut) {
+ STRESS_LOG1(LF_SYNC, LL_WARNING, " Inspecting thread 0x%x was in cooperative, but now is switched out\n", thread);
+ }
+ else {
+ _ASSERTE(str == Thread::STR_Failure || str == Thread::STR_UnstartedOrDead);
+ STRESS_LOG3(LF_SYNC, LL_ERROR, " ERROR: Could not suspend thread 0x%x, result = %d, lastError = 0x%x\n", thread, str, GetLastError());
+ }
+
+#endif // DISABLE_THREADSUSPEND
+
+ }
+ else
+ {
+ // To ensure 0 CPU utilization for FAS (see implementation of PauseAPC)
+ // we queue the APC to all interruptable threads.
+ if(g_IsPaused && (thread->m_State & Thread::TS_Interruptible))
+ {
+ HANDLE handle = thread->GetThreadHandle();
+ QueueUserAPC((PAPCFUNC)PauseAPC, handle, APC_Code);
+ }
+ }
+
+#ifdef FEATURE_STACK_PROBE
+ if (thread->m_fPreemptiveGCDisabled.Load() == 0 && fConsiderSO)
+ {
+ if ((UINT_PTR)thread->m_pFrame - thread->GetLastAllowableStackAddress() <
+ ADJUST_PROBE(BACKOUT_CODE_STACK_LIMIT) * OS_PAGE_SIZE)
+ {
+ if (!thread->HasThreadState(Thread::TS_GCSuspendPending))
+ {
+ thread->SetThreadState(Thread::TS_GCSuspendPending);
+ countThreads++;
+ STRESS_LOG2(LF_SYNC, LL_INFO1000, " Setting thread 0x%x ID 0x%x for GC (SO)\n",
+ thread, thread->GetThreadId());
+ }
+ }
+ }
+#endif
+ }
+
+#ifdef _DEBUG
+
+ {
+ int countCheck = 0;
+ Thread *InnerThread = NULL;
+
+ while ((InnerThread = ThreadStore::GetThreadList(InnerThread)) != NULL)
+ {
+ if (InnerThread != pCurThread &&
+ (InnerThread->m_State & Thread::TS_GCSuspendPending) != 0)
+ {
+ countCheck++;
+ }
+ }
+ _ASSERTE(countCheck == countThreads);
+ }
+
+#endif
+
+ //
+ // Now we keep retrying until we find that no threads are in cooperative mode. This should be merged into
+ // the first loop.
+ //
+ while (countThreads)
+ {
+ _ASSERTE (thread == NULL);
+ STRESS_LOG1(LF_SYNC, LL_INFO1000, " A total of %d threads need to rendezvous\n", countThreads);
+ while ((thread = ThreadStore::GetThreadList(thread)) != NULL)
+ {
+ if (thread == pCurThread)
+ continue;
+
+ if (thread->HasThreadState(Thread::TS_BlockGCForSO))
+ {
+ // The thread is trying to block for GC. But we don't have enough stack to do
+ // this operation.
+ // We will let the thread switch back to cooperative mode, and continue running.
+ if (thread->m_fPreemptiveGCDisabled.Load() == 0)
+ {
+ if (!thread->HasThreadState(Thread::TS_GCSuspendPending))
+ {
+ thread->SetThreadState(Thread::TS_GCSuspendPending);
+ countThreads ++;
+ }
+ thread->ResetThreadState(Thread::TS_BlockGCForSO);
+ FastInterlockOr (&thread->m_fPreemptiveGCDisabled, 1);
+ }
+ continue;
+ }
+ if ((thread->m_State & Thread::TS_GCSuspendPending) == 0)
+ continue;
+
+ if (!thread->m_fPreemptiveGCDisabled)
+ {
+#ifdef FEATURE_STACK_PROBE
+ if (fConsiderSO && (UINT_PTR)thread->m_pFrame - thread->GetLastAllowableStackAddress() <
+ ADJUST_PROBE(BACKOUT_CODE_STACK_LIMIT) * OS_PAGE_SIZE)
+ {
+ // The thread is not ready for GC.
+ }
+ else
+#endif
+ {
+ // Inlined N/Direct can sneak out to preemptive without actually checking.
+ // If we find one, we can consider it suspended (since it can't get back in).
+ STRESS_LOG1(LF_SYNC, LL_INFO1000, " Thread %x went preemptive it is at a GC safe point\n", thread);
+ countThreads--;
+ thread->ResetThreadState(Thread::TS_GCSuspendPending);
+
+ // To ensure 0 CPU utilization for FAS (see implementation of PauseAPC)
+ // we queue the APC to all interruptable threads.
+ if(g_IsPaused && (thread->m_State & Thread::TS_Interruptible))
+ {
+ HANDLE handle = thread->GetThreadHandle();
+ QueueUserAPC((PAPCFUNC)PauseAPC, handle, APC_Code);
+ }
+ }
+ }
+ }
+
+ if (countThreads == 0)
+ {
+ break;
+ }
+
+#ifdef _DEBUG
+ DWORD dbgStartTimeout = GetTickCount();
+#endif
+
+ // If another thread is trying to do a GC, there is a chance of deadlock
+ // because this thread holds the threadstore lock and the GC thread is stuck
+ // trying to get it, so this thread must bail and do a retry after the GC completes.
+ //
+ // <REVISIT> Shouldn't we do this only if *this* thread isn't attempting a GC? We're mostly
+ // done suspending the EE at this point - why give up just because another thread wants
+ // to do exactly the same thing? Note that GetGCThreadAttemptingSuspend will never (AFAIK)
+ // return the current thread here, because we NULL it out after obtaining the thread store lock. </REVISIT>
+ //
+ if (m_pThreadAttemptingSuspendForGC != NULL && m_pThreadAttemptingSuspendForGC != pCurThread)
+ {
+#ifdef PROFILING_SUPPORTED
+ // Must let the profiler know that this thread is aborting its attempt at suspending
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackSuspends());
+ g_profControlBlock.pProfInterface->RuntimeSuspendAborted();
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ STRESS_LOG0(LF_SYNC, LL_ALWAYS, "Thread::SuspendRuntime() - Timing out.\n");
+ return (ERROR_TIMEOUT);
+ }
+
+#ifdef TIME_SUSPEND
+ DWORD startWait = g_SuspendStatistics.GetTime();
+#endif
+
+ //
+ // Wait for at least one thread to tell us it's left cooperative mode.
+ // we do this by waiting on g_pGCSuspendEvent. We cannot simply wait forever, because we
+ // might have done return-address hijacking on a thread, and that thread might not
+ // return from the method we hijacked (maybe it calls into some other managed code that
+ // executes a long loop, for example). We we wait with a timeout, and retry hijacking/redirection.
+ //
+ // This is unfortunate, because it means that in some cases we wait for PING_JIT_TIMEOUT
+ // milliseconds, causing long GC pause times.
+ //
+ // We should fix this, by calling SwitchToThread/Sleep(0) a few times before waiting on the event.
+ // This will not fix it 100% of the time (we may still have to wait on the event), but
+ // the event is needed to work around limitations of SwitchToThread/Sleep(0).
+ //
+ // For now, we simply wait.
+ //
+
+ res = g_pGCSuspendEvent->Wait(PING_JIT_TIMEOUT, FALSE);
+
+
+#ifdef TIME_SUSPEND
+ g_SuspendStatistics.wait.Accumulate(
+ SuspendStatistics::GetElapsed(startWait,
+ g_SuspendStatistics.GetTime()));
+
+ g_SuspendStatistics.cntWaits++;
+ if (res == WAIT_TIMEOUT)
+ g_SuspendStatistics.cntWaitTimeouts++;
+#endif
+
+ if (res == WAIT_TIMEOUT || res == WAIT_IO_COMPLETION)
+ {
+ STRESS_LOG1(LF_SYNC, LL_INFO1000, " Timed out waiting for rendezvous event %d threads remaining\n", countThreads);
+#ifdef _DEBUG
+ DWORD dbgEndTimeout = GetTickCount();
+
+ if ((dbgEndTimeout > dbgStartTimeout) &&
+ (dbgEndTimeout - dbgStartTimeout > g_pConfig->SuspendDeadlockTimeout()))
+ {
+ // Do not change this to _ASSERTE.
+ // We want to catch the state of the machine at the
+ // time when we can not suspend some threads.
+ // It takes too long for _ASSERTE to stop the process.
+ DebugBreak();
+ _ASSERTE(!"Timed out trying to suspend EE due to thread");
+ char message[256];
+ _ASSERTE (thread == NULL);
+ while ((thread = ThreadStore::GetThreadList(thread)) != NULL)
+ {
+ if (thread == pCurThread)
+ continue;
+
+ if ((thread->m_State & Thread::TS_GCSuspendPending) == 0)
+ continue;
+
+ if (thread->m_fPreemptiveGCDisabled)
+ {
+ DWORD id = thread->m_OSThreadId;
+ if (id == 0xbaadf00d)
+ {
+ sprintf_s (message, COUNTOF(message), "Thread CLR ID=%x cannot be suspended",
+ thread->GetThreadId());
+ }
+ else
+ {
+ sprintf_s (message, COUNTOF(message), "Thread OS ID=%x cannot be suspended",
+ id);
+ }
+ DbgAssertDialog(__FILE__, __LINE__, message);
+ }
+ }
+ // if we continue from the assert we'll reset the time
+ dbgStartTimeout = GetTickCount();
+ }
+#endif
+
+#ifndef DISABLE_THREADSUSPEND
+ // all these threads should be in cooperative mode unless they have
+ // set their SafeEvent on the way out. But there's a race between
+ // when we time out and when they toggle their mode, so sometimes
+ // we will suspend a thread that has just left.
+ _ASSERTE (thread == NULL);
+ while ((thread = ThreadStore::GetThreadList(thread)) != NULL)
+ {
+ if (thread == pCurThread)
+ continue;
+
+ if ((thread->m_State & Thread::TS_GCSuspendPending) == 0)
+ continue;
+
+ if (!thread->m_fPreemptiveGCDisabled)
+ {
+ continue;
+ }
+
+#ifdef FEATURE_HIJACK
+ RetrySuspension2:
+#endif // FEATURE_HIJACK
+ // We can not allocate memory after we suspend a thread.
+ // Otherwise, we may deadlock the process when CLR is hosted.
+ ThreadStore::AllocateOSContext();
+
+#ifdef TIME_SUSPEND
+ DWORD startSuspend = g_SuspendStatistics.GetTime();
+#endif
+
+ Thread::SuspendThreadResult str = thread->SuspendThread();
+
+#ifdef TIME_SUSPEND
+ g_SuspendStatistics.osSuspend.Accumulate(
+ SuspendStatistics::GetElapsed(startSuspend,
+ g_SuspendStatistics.GetTime()));
+
+ if (str == Thread::STR_Success)
+ g_SuspendStatistics.cntOSSuspendResume++;
+ else
+ g_SuspendStatistics.cntFailedSuspends++;
+#endif
+
+#ifdef FEATURE_HIJACK
+ // Only check HandledJITCase if we actually suspended the thread, and
+ // the thread is in cooperative mode.
+ // See comment at the previous invocation of HandledJITCase - it does
+ // more than you think!
+ if (str == Thread::STR_Success && thread->m_fPreemptiveGCDisabled)
+ {
+ Thread::WorkingOnThreadContextHolder workingOnThreadContext(thread);
+ if (workingOnThreadContext.Acquired() && thread->HandledJITCase())
+ {
+ // Redirect thread so we can capture a good thread context
+ // (GetThreadContext is not sufficient, due to an OS bug).
+ if (!thread->CheckForAndDoRedirectForGC())
+ {
+#ifdef TIME_SUSPEND
+ g_SuspendStatistics.cntFailedRedirections++;
+#endif
+ STRESS_LOG1(LF_SYNC, LL_INFO1000, "Failed to CheckForAndDoRedirectForGC(). Retry suspension 2 for thread %p\n", thread);
+ thread->ResumeThread();
+ goto RetrySuspension2;
+ }
+#ifdef TIME_SUSPEND
+ else
+ g_SuspendStatistics.cntRedirections++;
+#endif
+ }
+ }
+#endif // FEATURE_HIJACK
+
+ if (str == Thread::STR_Success)
+ thread->ResumeThread();
+ }
+#endif // DISABLE_THREADSUSPEND
+ }
+ else
+ if (res == WAIT_OBJECT_0)
+ {
+ g_pGCSuspendEvent->Reset();
+ continue;
+ }
+ else
+ {
+ // No WAIT_FAILED, WAIT_ABANDONED, etc.
+ _ASSERTE(!"unexpected wait termination during gc suspension");
+ }
+ }
+
+#ifdef PROFILING_SUPPORTED
+ // If a profiler is keeping track of GC events, notify it
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackSuspends());
+ g_profControlBlock.pProfInterface->RuntimeSuspendFinished();
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+#ifdef _DEBUG
+ if (reason == ThreadSuspend::SUSPEND_FOR_GC) {
+ thread = NULL;
+ while ((thread = ThreadStore::GetThreadList(thread)) != NULL)
+ {
+ thread->DisableStressHeap();
+ _ASSERTE (!thread->HasThreadState(Thread::TS_GCSuspendPending));
+ }
+ }
+#endif
+
+ // We know all threads are in preemptive mode, so go ahead and reset the event.
+ g_pGCSuspendEvent->Reset();
+
+#ifdef HAVE_GCCOVER
+ //
+ // Now that the EE has been suspended, let's see if any oustanding
+ // gcstress instruction updates need to occur. Each thread can
+ // have only one pending at a time.
+ //
+ thread = NULL;
+ while ((thread = ThreadStore::GetThreadList(thread)) != NULL)
+ {
+ thread->CommitGCStressInstructionUpdate();
+ }
+#endif // HAVE_GCCOVER
+
+ STRESS_LOG0(LF_SYNC, LL_INFO1000, "Thread::SuspendRuntime() - Success\n");
+ return S_OK;
+}
+
+#ifdef HAVE_GCCOVER
+
+void Thread::CommitGCStressInstructionUpdate()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (HasPendingGCStressInstructionUpdate())
+ {
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+
+ *m_pbDestCode = *m_pbSrcCode;
+
+#elif defined(_TARGET_ARM_)
+
+ if (GetARMInstructionLength(m_pbDestCode) == 2)
+ *(WORD*)m_pbDestCode = *(WORD*)m_pbSrcCode;
+ else
+ *(DWORD*)m_pbDestCode = *(DWORD*)m_pbSrcCode;
+
+#elif defined(_TARGET_ARM64_)
+
+ *(DWORD*)m_pbDestCode = *(DWORD*)m_pbSrcCode;
+
+#else
+
+ *m_pbDestCode = *m_pbSrcCode;
+
+#endif
+
+ ClearGCStressInstructionUpdate();
+ }
+}
+
+#endif // HAVE_GCCOVER
+
+
+#ifdef _DEBUG
+void EnableStressHeapHelper()
+{
+ WRAPPER_NO_CONTRACT;
+ ENABLESTRESSHEAP();
+}
+#endif
+
+// We're done with our GC. Let all the threads run again.
+// By this point we've already unblocked most threads. This just releases the ThreadStore lock.
+void ThreadSuspend::ResumeRuntime(BOOL bFinishedGC, BOOL SuspendSucceded)
+{
+ CONTRACTL {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+ Thread *pCurThread = GetThread();
+
+ // Caller is expected to be holding the ThreadStore lock. But they must have
+ // reset GcInProgress, or threads will continue to suspend themselves and won't
+ // be resumed until the next GC.
+ _ASSERTE(IsGCSpecialThread() || ThreadStore::HoldingThreadStore());
+ _ASSERTE(!GCHeap::IsGCInProgress() );
+
+ STRESS_LOG2(LF_SYNC, LL_INFO1000, "Thread::ResumeRuntime(finishedGC=%d, SuspendSucceeded=%d) - Start\n", bFinishedGC, SuspendSucceded);
+
+ //
+ // Notify everyone who cares, that this suspension is over, and this thread is going to go do other things.
+ //
+
+#if !defined(FEATURE_CORECLR) // simple hosting
+ // Alert the host that a GC is ending, in case the host is scheduling threads
+ // for non-runtime tasks during GC.
+ IGCThreadControl *pGCThreadControl = CorHost::GetGCThreadControl();
+
+ if (pGCThreadControl)
+ {
+ // If we the suspension was for a GC, tell the host what generation GC.
+ DWORD Generation = (bFinishedGC
+ ? GCHeap::GetGCHeap()->GetCondemnedGeneration()
+ : ~0U);
+
+ pGCThreadControl->SuspensionEnding(Generation);
+ }
+
+ if (CLRGCHosted())
+ {
+ // If we the suspension was for a GC, tell the host what generation GC.
+ DWORD Generation = (bFinishedGC
+ ? GCHeap::GetGCHeap()->GetCondemnedGeneration()
+ : ~0U);
+
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ CorHost2::GetHostGCManager()->SuspensionEnding(Generation);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+#endif // !defined(FEATURE_CORECLR)
+
+#ifdef PROFILING_SUPPORTED
+ // Need to give resume event for the GC thread
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackSuspends());
+ if (pCurThread)
+ {
+ g_profControlBlock.pProfInterface->RuntimeThreadResumed((ThreadID)pCurThread);
+ }
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+#ifdef TIME_SUSPEND
+ DWORD startRelease = g_SuspendStatistics.GetTime();
+#endif
+
+ //
+ // Unlock the thread store. At this point, all threads should be allowed to run.
+ //
+ ThreadSuspend::UnlockThreadStore();
+
+#ifdef TIME_SUSPEND
+ g_SuspendStatistics.releaseTSL.Accumulate(SuspendStatistics::GetElapsed(startRelease,
+ g_SuspendStatistics.GetTime()));
+#endif
+
+#ifdef PROFILING_SUPPORTED
+ //
+ // This thread is logically "resuming" from a GC now. Tell the profiler.
+ //
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackSuspends());
+ GCX_PREEMP();
+ g_profControlBlock.pProfInterface->RuntimeResumeFinished();
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ //
+ // If we raised this thread's priority in SuspendRuntime, we restore it here.
+ //
+ if (pCurThread)
+ {
+ if (pCurThread->m_Priority != INVALID_THREAD_PRIORITY)
+ {
+ pCurThread->SetThreadPriority(pCurThread->m_Priority);
+ pCurThread->m_Priority = INVALID_THREAD_PRIORITY;
+ }
+ }
+
+ STRESS_LOG0(LF_SYNC, LL_INFO1000, "Thread::ResumeRuntime() - End\n");
+}
+
+#ifndef FEATURE_PAL
+#ifdef _TARGET_X86_
+//****************************************************************************************
+// This will resume the thread at the location of redirection.
+//
+int RedirectedThrowControlExceptionFilter(
+ PEXCEPTION_POINTERS pExcepPtrs // Exception data
+ )
+{
+ // !!! Do not use a non-static contract here.
+ // !!! Contract may insert an exception handling record.
+ // !!! This function assumes that GetCurrentSEHRecord() returns the exception record set up in
+ // !!! ThrowControlForThread
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+
+ if (pExcepPtrs->ExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW)
+ {
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ // Get the thread handle
+ Thread *pThread = GetThread();
+ _ASSERTE(pThread);
+
+
+ STRESS_LOG0(LF_SYNC, LL_INFO100, "In RedirectedThrowControlExceptionFilter\n");
+
+ // If we get here via COM+ exception, gc-mode is unknown. We need it to
+ // be cooperative for this function.
+ _ASSERTE (pThread->PreemptiveGCDisabled());
+
+ _ASSERTE(pExcepPtrs->ExceptionRecord->ExceptionCode == BOOTUP_EXCEPTION_COMPLUS);
+
+ // Copy the saved context record into the EH context;
+ CONTEXT *pCtx = pThread->m_OSContext;
+ ReplaceExceptionContextRecord(pExcepPtrs->ContextRecord, pCtx);
+
+ /////////////////////////////////////////////////////////////////////////////
+ // NOTE: Ugly, ugly workaround.
+ // We need to resume the thread into the managed code where it was redirected,
+ // and the corresponding ESP is below the current one. But C++ expects that
+ // on an EXCEPTION_CONTINUE_EXECUTION that the ESP will be above where it has
+ // installed the SEH handler. To solve this, we need to remove all handlers
+ // that reside above the resumed ESP, but we must leave the OS-installed
+ // handler at the top, so we grab the top SEH handler, call
+ // PopSEHRecords which will remove all SEH handlers above the target ESP and
+ // then link the OS handler back in with SetCurrentSEHRecord.
+
+ // Get the special OS handler and save it until PopSEHRecords is done
+ EXCEPTION_REGISTRATION_RECORD *pCurSEH = GetCurrentSEHRecord();
+
+ // Unlink all records above the target resume ESP
+ PopSEHRecords((LPVOID)(size_t)pCtx->Esp);
+
+ // Link the special OS handler back in to the top
+ pCurSEH->Next = GetCurrentSEHRecord();
+
+ // Register the special OS handler as the top handler with the OS
+ SetCurrentSEHRecord(pCurSEH);
+
+ // Resume execution at point where thread was originally redirected
+ return (EXCEPTION_CONTINUE_EXECUTION);
+}
+#endif
+
+// Resume a thread at this location, to persuade it to throw a ThreadStop. The
+// exception handler needs a reasonable idea of how large this method is, so don't
+// add lots of arbitrary code here.
+void
+ThrowControlForThread(
+#ifdef WIN64EXCEPTIONS
+ FaultingExceptionFrame *pfef
+#endif // WIN64EXCEPTIONS
+ )
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ Thread *pThread = GetThread();
+ _ASSERTE(pThread);
+ _ASSERTE(pThread->m_OSContext);
+
+ _ASSERTE(pThread->PreemptiveGCDisabled());
+
+#ifdef FEATURE_STACK_PROBE
+ if (GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) == eRudeUnloadAppDomain)
+ {
+ RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), pThread);
+ }
+#endif
+
+ // Check if we can start abort
+ // We use InducedThreadRedirect as a marker to tell stackwalker that a thread is redirected from JIT code.
+ // This is to distinguish a thread is in Preemptive mode and in JIT code.
+ // After stackcrawl, we change to InducedThreadStop.
+ if (pThread->ThrewControlForThread() == Thread::InducedThreadRedirect ||
+ pThread->ThrewControlForThread() == Thread::InducedThreadRedirectAtEndOfCatch)
+ {
+ _ASSERTE((pThread->m_OSContext->ContextFlags & CONTEXT_ALL) == CONTEXT_ALL);
+ if (!pThread->ReadyForAbort())
+ {
+ STRESS_LOG0(LF_SYNC, LL_INFO100, "ThrowControlForThread resume\n");
+ pThread->ResetThrowControlForThread();
+ // Thread abort is not allowed at this point
+#ifndef WIN64EXCEPTIONS
+ __try{
+ RaiseException(BOOTUP_EXCEPTION_COMPLUS,0,0,NULL);
+ }
+ __except(RedirectedThrowControlExceptionFilter(GetExceptionInformation()))
+ {
+ _ASSERTE(!"Should not reach here");
+ }
+#else // WIN64EXCEPTIONS
+ RtlRestoreContext(pThread->m_OSContext, NULL);
+#endif // !WIN64EXCEPTIONS
+ _ASSERTE(!"Should not reach here");
+ }
+ pThread->SetThrowControlForThread(Thread::InducedThreadStop);
+ }
+
+#if defined(WIN64EXCEPTIONS)
+ *(TADDR*)pfef = FaultingExceptionFrame::GetMethodFrameVPtr();
+ *pfef->GetGSCookiePtr() = GetProcessGSCookie();
+#else // WIN64EXCEPTIONS
+ FrameWithCookie<FaultingExceptionFrame> fef;
+ FaultingExceptionFrame *pfef = &fef;
+#endif // WIN64EXCEPTIONS
+ pfef->InitAndLink(pThread->m_OSContext);
+
+ // !!! Can not assert here. Sometimes our EHInfo for catch clause extends beyond
+ // !!! Jit_EndCatch. Not sure if we have guarantee on catch clause.
+ //_ASSERTE (pThread->ReadyForAbort());
+
+ STRESS_LOG0(LF_SYNC, LL_INFO100, "ThrowControlForThread Aborting\n");
+
+ // Here we raise an exception.
+ RaiseComPlusException();
+}
+#endif // !FEATURE_PAL
+
+#ifdef FEATURE_HIJACK
+// This function is called by UserAbort and StopEEAndUnwindThreads.
+// It forces a thread to abort if allowed and the thread is running managed code.
+BOOL Thread::HandleJITCaseForAbort()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(ThreadStore::HoldingThreadStore());
+
+ WorkingOnThreadContextHolder workingOnThreadContext(this);
+ if (!workingOnThreadContext.Acquired())
+ {
+ return FALSE;
+ }
+
+ _ASSERTE (m_fPreemptiveGCDisabled);
+
+ CONTEXT ctx;
+ ctx.ContextFlags = CONTEXT_CONTROL | CONTEXT_EXCEPTION_REQUEST;
+ BOOL success = EEGetThreadContext(this, &ctx);
+ _ASSERTE(success && "Thread::HandleJITCaseForAbort : Failed to get thread context");
+
+ if (success)
+ {
+ success = IsContextSafeToRedirect(&ctx);
+ }
+
+ if (success)
+ {
+ PCODE curIP = GetIP(&ctx);
+
+ // check if this is code managed by the code manager (ie. in the code heap)
+ if (ExecutionManager::IsManagedCode(curIP))
+ {
+ return ResumeUnderControl(&ctx);
+ }
+ }
+
+ return FALSE;
+}
+
+// Threads suspended by the Win32 ::SuspendThread() are resumed in two ways. If we
+// suspended them in error, they are resumed via the Win32 ::ResumeThread(). But if
+// this is the HandledJIT() case and the thread is in fully interruptible code, we
+// can resume them under special control. ResumeRuntime and UserResume are cases
+// of this.
+//
+// The suspension has done its work (e.g. GC or user thread suspension). But during
+// the resumption we may have more that we want to do with this thread. For example,
+// there may be a pending ThreadAbort request. Instead of resuming the thread at its
+// current EIP, we tweak its resumption point via the thread context. Then it starts
+// executing at a new spot where we can have our way with it.
+
+BOOL Thread::ResumeUnderControl(CONTEXT *pCtx)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ BOOL fSuccess = FALSE;
+
+ LOG((LF_APPDOMAIN, LL_INFO100, "ResumeUnderControl %x\n", GetThreadId()));
+
+ BOOL fSucceeded;
+
+ m_OSContext->ContextFlags = CONTEXT_ALL | CONTEXT_EXCEPTION_REQUEST;
+ fSucceeded = EEGetThreadContext(this, m_OSContext);
+
+ if (fSucceeded)
+ {
+ if (GetIP(pCtx) != GetIP(m_OSContext))
+ {
+ return FALSE;
+ }
+ fSucceeded = IsContextSafeToRedirect(m_OSContext);
+ }
+
+ if (fSucceeded)
+ {
+ PCODE resumePC = GetIP(m_OSContext);
+ SetIP(m_OSContext, GetEEFuncEntryPoint(THROW_CONTROL_FOR_THREAD_FUNCTION));
+ SetThrowControlForThread(InducedThreadRedirect);
+ STRESS_LOG1(LF_SYNC, LL_INFO100, "ResumeUnderControl for Thread %p\n", this);
+
+#ifdef _TARGET_AMD64_
+ // We need to establish the return value on the stack in the redirection stub, to
+ // achieve crawlability. We use 'rcx' as the way to communicate the return value.
+ // However, we are going to crawl in ReadyForAbort and we are going to resume in
+ // ThrowControlForThread using m_OSContext. It's vital that the original correct
+ // Rcx is present at those times, or we will have corrupted Rcx at the point of
+ // resumption.
+ UINT_PTR keepRcx = m_OSContext->Rcx;
+
+ m_OSContext->Rcx = (UINT_PTR)resumePC;
+#endif // _TARGET_AMD64_
+
+#if defined(_TARGET_ARM_)
+ // We save the original ControlPC in LR on ARM.
+ UINT_PTR originalLR = m_OSContext->Lr;
+ m_OSContext->Lr = (UINT_PTR)resumePC;
+
+ // Since we have set a new IP, we have to clear conditional execution flags too.
+ UINT_PTR originalCpsr = m_OSContext->Cpsr;
+ ClearITState(m_OSContext);
+#endif // _TARGET_ARM_
+
+ EESetThreadContext(this, m_OSContext);
+
+#ifdef _TARGET_ARM_
+ // Restore the original LR now that the OS context has been updated to resume @ redirection function.
+ m_OSContext->Lr = originalLR;
+ m_OSContext->Cpsr = originalCpsr;
+#endif // _TARGET_ARM_
+
+#ifdef _TARGET_AMD64_
+ // and restore.
+ m_OSContext->Rcx = keepRcx;
+#endif // _TARGET_AMD64_
+
+ SetIP(m_OSContext, resumePC);
+
+ fSuccess = TRUE;
+ }
+#if _DEBUG
+ else
+ _ASSERTE(!"Couldn't obtain thread context -- StopRequest delayed");
+#endif
+ return fSuccess;
+}
+#endif // FEATURE_HIJACK
+
+
+PCONTEXT Thread::GetAbortContext ()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ LOG((LF_EH, LL_INFO100, "Returning abort context: %p\n", m_OSContext));
+ return m_OSContext;
+}
+
+
+//****************************************************************************
+// Return true if we've Suspended the runtime,
+// False if we still need to sweep.
+//****************************************************************************
+bool Thread::SysStartSuspendForDebug(AppDomain *pAppDomain)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ Thread *pCurThread = GetThread();
+ Thread *thread = NULL;
+
+ if (IsAtProcessExit())
+ {
+ LOG((LF_CORDB, LL_INFO1000,
+ "SUSPEND: skipping suspend due to process detach.\n"));
+ return true;
+ }
+
+ LOG((LF_CORDB, LL_INFO1000, "[0x%x] SUSPEND: starting suspend. Trap count: %d\n",
+ pCurThread ? pCurThread->GetThreadId() : (DWORD) -1, g_TrapReturningThreads.Load()));
+
+ // Caller is expected to be holding the ThreadStore lock
+ _ASSERTE(ThreadStore::HoldingThreadStore() || IsAtProcessExit());
+
+#if !defined(FEATURE_CORECLR) // simple hosting
+ // If there is a debugging thread control object, tell it we're suspending the Runtime.
+ IDebuggerThreadControl *pDbgThreadControl = CorHost::GetDebuggerThreadControl();
+
+ if (pDbgThreadControl)
+ pDbgThreadControl->StartBlockingForDebugger(0);
+#endif // !defined(FEATURE_CORECLR)
+
+ // NOTE::NOTE::NOTE::NOTE::NOTE
+ // This function has parallel logic in SuspendRuntime. Please make
+ // sure to make appropriate changes there as well.
+
+ _ASSERTE(m_DebugWillSyncCount == -1);
+ m_DebugWillSyncCount++;
+
+ // From this point until the end of the function, consider all active thread
+ // suspension to be in progress. This is mainly to give the profiler API a hint
+ // that trying to suspend a thread (in order to walk its stack) could delay the
+ // overall EE suspension. So the profiler API would early-abort the stackwalk
+ // in such a case.
+ ThreadSuspend::SuspendRuntimeInProgressHolder hldSuspendRuntimeInProgress;
+
+ while ((thread = ThreadStore::GetThreadList(thread)) != NULL)
+ {
+#if 0
+//<REVISIT_TODO> @todo APPD This needs to be finished, replaced, or yanked --MiPanitz</REVISIT_TODO>
+ if (m_DebugAppDomainTarget != NULL &&
+ thread->GetDomain() != m_DebugAppDomainTarget)
+ {
+ continue;
+ }
+#endif
+
+ // Don't try to suspend threads that you've left suspended.
+ if (thread->m_StateNC & TSNC_DebuggerUserSuspend)
+ continue;
+
+ if (thread == pCurThread)
+ {
+ LOG((LF_CORDB, LL_INFO1000,
+ "[0x%x] SUSPEND: marking current thread.\n",
+ thread->GetThreadId()));
+
+ _ASSERTE(!thread->m_fPreemptiveGCDisabled);
+
+ // Mark this thread so it trips when it tries to re-enter
+ // after completing this call.
+ thread->SetupForSuspension(TS_DebugSuspendPending);
+ thread->MarkForSuspension(TS_DebugSuspendPending);
+ continue;
+ }
+
+ thread->SetupForSuspension(TS_DebugSuspendPending);
+
+ // Threads can be in Preemptive or Cooperative GC mode.
+ // Threads cannot switch to Cooperative mode without special
+ // treatment when a GC is happening. But they can certainly
+ // switch back and forth during a debug suspension -- until we
+ // can get their Pending bit set.
+
+#ifdef FEATURE_HIJACK
+ DWORD dwSwitchCount = 0;
+ RetrySuspension:
+#endif // FEATURE_HIJACK
+
+ // We can not allocate memory after we suspend a thread.
+ // Otherwise, we may deadlock the process when CLR is hosted.
+ ThreadStore::AllocateOSContext();
+
+#ifdef DISABLE_THREADSUSPEND
+ // On platforms that do not support safe thread suspension we have
+ // to rely on the GCPOLL mechanism.
+
+ // When we do not suspend the target thread we rely on the GCPOLL
+ // mechanism enabled by TrapReturningThreads. However when reading
+ // shared state we need to erect appropriate memory barriers. So
+ // the interlocked operation below ensures that any future reads on
+ // this thread will happen after any earlier writes on a different
+ // thread.
+ SuspendThreadResult str = STR_Success;
+ FastInterlockOr(&thread->m_fPreemptiveGCDisabled, 0);
+#else
+ SuspendThreadResult str = thread->SuspendThread();
+#endif // DISABLE_THREADSUSPEND
+
+ if (thread->m_fPreemptiveGCDisabled && str == STR_Success)
+ {
+
+#ifdef FEATURE_HIJACK
+ WorkingOnThreadContextHolder workingOnThreadContext(thread);
+ if (workingOnThreadContext.Acquired() && thread->HandledJITCase())
+ {
+ // Redirect thread so we can capture a good thread context
+ // (GetThreadContext is not sufficient, due to an OS bug).
+ // If we don't succeed (should only happen on Win9X, due to
+ // a different OS bug), we must resume the thread and try
+ // again.
+ if (!thread->CheckForAndDoRedirectForDbg())
+ {
+ thread->ResumeThread();
+ __SwitchToThread(0, ++dwSwitchCount);
+ goto RetrySuspension;
+ }
+ }
+#endif // FEATURE_HIJACK
+
+ // Remember that this thread will be running to a safe point
+ FastInterlockIncrement(&m_DebugWillSyncCount);
+
+ // When the thread reaches a safe place, it will wait
+ // on the DebugSuspendEvent which clients can set when they
+ // want to release us.
+ thread->MarkForSuspension(TS_DebugSuspendPending |
+ TS_DebugWillSync
+ );
+
+#ifdef DISABLE_THREADSUSPEND
+ // There'a a race above between the moment we first check m_fPreemptiveGCDisabled
+ // and the moment we enable TrapReturningThreads in MarkForSuspension. However,
+ // nothing bad happens if the thread has transitioned to preemptive before marking
+ // the thread for suspension; the thread will later be identified as Synced in
+ // SysSweepThreadsForDebug
+#else // DISABLE_THREADSUSPEND
+ // Resume the thread and let it run to a safe point
+ thread->ResumeThread();
+#endif // DISABLE_THREADSUSPEND
+
+ LOG((LF_CORDB, LL_INFO1000,
+ "[0x%x] SUSPEND: gc disabled - will sync.\n",
+ thread->GetThreadId()));
+ }
+ else if (!thread->m_fPreemptiveGCDisabled)
+ {
+ // Mark threads that are outside the Runtime so that if
+ // they attempt to re-enter they will trip.
+ thread->MarkForSuspension(TS_DebugSuspendPending);
+
+#ifdef DISABLE_THREADSUSPEND
+ // There'a a race above between the moment we first check m_fPreemptiveGCDisabled
+ // and the moment we enable TrapReturningThreads in MarkForSuspension. To account
+ // for that we check whether the thread moved into cooperative mode, and if it had
+ // we mark it as a DebugWillSync thread, that will be handled later in
+ // SysSweepThreadsForDebug
+ if (thread->m_fPreemptiveGCDisabled)
+ {
+ // Remember that this thread will be running to a safe point
+ FastInterlockIncrement(&m_DebugWillSyncCount);
+ thread->SetThreadState(TS_DebugWillSync);
+ }
+#else // DISABLE_THREADSUSPEND
+ if (str == STR_Success) {
+ thread->ResumeThread();
+ }
+#endif // DISABLE_THREADSUSPEND
+
+ LOG((LF_CORDB, LL_INFO1000,
+ "[0x%x] SUSPEND: gc enabled.\n", thread->GetThreadId()));
+ }
+ }
+
+ //
+ // Return true if all threads are synchronized now, otherwise the
+ // debugge must wait for the SuspendComplete, called from the last
+ // thread to sync.
+ //
+
+ if (FastInterlockDecrement(&m_DebugWillSyncCount) < 0)
+ {
+ LOG((LF_CORDB, LL_INFO1000,
+ "SUSPEND: all threads sync before return.\n"));
+ return true;
+ }
+ else
+ return false;
+}
+
+//
+// This method is called by the debugger helper thread when it times out waiting for a set of threads to
+// synchronize. Its used to chase down threads that are not syncronizing quickly. It returns true if all the threads are
+// now synchronized. This also means that we own the thread store lock.
+//
+// This can be safely called if we're already suspended.
+bool Thread::SysSweepThreadsForDebug(bool forceSync)
+{
+ CONTRACT(bool) {
+ NOTHROW;
+ DISABLED(GC_TRIGGERS); // WaitUntilConcurrentGCComplete toggle GC mode, disabled because called by unmanaged thread
+
+ // We assume that only the "real" helper thread ever calls this (not somebody doing helper thread duty).
+ PRECONDITION(IsDbgHelperSpecialThread());
+ PRECONDITION(GetThread() == NULL);
+
+ // Iff we return true, then we have the TSL (or the aux lock used in workarounds).
+ POSTCONDITION(RETVAL == !!ThreadStore::HoldingThreadStore());
+ }
+ CONTRACT_END;
+
+ _ASSERTE(!forceSync); // deprecated parameter
+
+ Thread *thread = NULL;
+
+ // NOTE::NOTE::NOTE::NOTE::NOTE
+ // This function has parallel logic in SuspendRuntime. Please make
+ // sure to make appropriate changes there as well.
+
+ // We use ThreadSuspend::SUSPEND_FOR_DEBUGGER_SWEEP here to avoid a deadlock which
+ // can occur due to the s_hAbortEvt event. This event causes any thread trying
+ // to take the ThreadStore lock to wait for a GC to complete. If a thread is
+ // in SuspendEE for a GC and suspends for the debugger, then this thread will
+ // deadlock if we do not pass in SUSPEND_FOR_DEBUGGER_SWEEP here.
+ ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_FOR_DEBUGGER_SWEEP);
+
+ // From this point until the end of the function, consider all active thread
+ // suspension to be in progress. This is mainly to give the profiler API a hint
+ // that trying to suspend a thread (in order to walk its stack) could delay the
+ // overall EE suspension. So the profiler API would early-abort the stackwalk
+ // in such a case.
+ ThreadSuspend::SuspendRuntimeInProgressHolder hldSuspendRuntimeInProgress;
+
+ // Loop over the threads...
+ while (((thread = ThreadStore::GetThreadList(thread)) != NULL) && (m_DebugWillSyncCount >= 0))
+ {
+ // Skip threads that we aren't waiting for to sync.
+ if ((thread->m_State & TS_DebugWillSync) == 0)
+ continue;
+
+#ifdef DISABLE_THREADSUSPEND
+
+ // On platforms that do not support safe thread suspension we have
+ // to rely on the GCPOLL mechanism.
+
+ // When we do not suspend the target thread we rely on the GCPOLL
+ // mechanism enabled by TrapReturningThreads. However when reading
+ // shared state we need to erect appropriate memory barriers. So
+ // the interlocked operation below ensures that any future reads on
+ // this thread will happen after any earlier writes on a different
+ // thread.
+ FastInterlockOr(&thread->m_fPreemptiveGCDisabled, 0);
+ if (!thread->m_fPreemptiveGCDisabled)
+ {
+ // If the thread toggled to preemptive mode, then it's synced.
+ goto Label_MarkThreadAsSynced;
+ }
+ else
+ {
+ continue;
+ }
+
+#else // DISABLE_THREADSUSPEND
+ // Suspend the thread
+
+#ifdef FEATURE_HIJACK
+ DWORD dwSwitchCount = 0;
+#endif
+
+RetrySuspension:
+ // We can not allocate memory after we suspend a thread.
+ // Otherwise, we may deadlock the process when CLR is hosted.
+ ThreadStore::AllocateOSContext();
+
+ SuspendThreadResult str = thread->SuspendThread();
+
+ if (str == STR_Failure || str == STR_UnstartedOrDead)
+ {
+ // The thread cannot actually be unstarted - if it was, we would not
+ // have marked it with TS_DebugWillSync in the first phase.
+ _ASSERTE(!(thread->m_State & TS_Unstarted));
+
+ // If the thread has gone, we can't wait on it.
+ goto Label_MarkThreadAsSynced;
+ }
+ else if (str == STR_SwitchedOut)
+ {
+ // The thread was switched b/c of fiber-mode stuff.
+ if (!thread->m_fPreemptiveGCDisabled)
+ {
+ goto Label_MarkThreadAsSynced;
+ }
+ else
+ {
+ goto RetrySuspension;
+ }
+ }
+ else if (str == STR_NoStressLog)
+ {
+ goto RetrySuspension;
+ }
+ else if (!thread->m_fPreemptiveGCDisabled)
+ {
+ // If the thread toggled to preemptive mode, then it's synced.
+
+ // We can safely resume the thread here b/c it's in PreemptiveMode and the
+ // EE will trap anybody trying to re-enter cooperative. So letting it run free
+ // won't hurt the runtime.
+ _ASSERTE(str == STR_Success);
+ thread->ResumeThread();
+
+ goto Label_MarkThreadAsSynced;
+ }
+#ifdef FEATURE_HIJACK
+ // If the thread is in jitted code, HandledJitCase will try to hijack it; and the hijack
+ // will toggle the GC.
+ else
+ {
+ _ASSERTE(str == STR_Success);
+ WorkingOnThreadContextHolder workingOnThreadContext(thread);
+ if (workingOnThreadContext.Acquired() && thread->HandledJITCase())
+ {
+ // Redirect thread so we can capture a good thread context
+ // (GetThreadContext is not sufficient, due to an OS bug).
+ // If we don't succeed (should only happen on Win9X, due to
+ // a different OS bug), we must resume the thread and try
+ // again.
+ if (!thread->CheckForAndDoRedirectForDbg())
+ {
+ thread->ResumeThread();
+ __SwitchToThread(0, ++dwSwitchCount);
+ goto RetrySuspension;
+ }
+
+ // The hijack will toggle our GC mode, and thus we could wait for the next sweep,
+ // and the GC-mode check above would catch and sync us. But there's no reason to wait,
+ // if the thread is hijacked, it's as good as synced, so mark it now.
+ thread->ResumeThread();
+ goto Label_MarkThreadAsSynced;
+ }
+ }
+#endif // FEATURE_HIJACK
+
+ // If we didn't take the thread out of the set, then resume it and give it another chance to reach a safe
+ // point.
+ thread->ResumeThread();
+ continue;
+
+#endif // DISABLE_THREADSUSPEND
+
+ // The thread is synced. Remove the sync bits and dec the sync count.
+Label_MarkThreadAsSynced:
+ FastInterlockAnd((ULONG *) &thread->m_State, ~TS_DebugWillSync);
+ if (FastInterlockDecrement(&m_DebugWillSyncCount) < 0)
+ {
+ // If that was the last thread, then the CLR is synced.
+ // We return while own the thread store lock. We return true now, which indicates this to the caller.
+ RETURN true;
+ }
+ continue;
+
+ } // end looping through Thread Store
+
+ if (m_DebugWillSyncCount < 0)
+ {
+ RETURN true;
+ }
+
+ // The CLR is not yet synced. We release the threadstore lock and return false.
+ hldSuspendRuntimeInProgress.Release();
+ ThreadSuspend::UnlockThreadStore();
+
+ RETURN false;
+}
+
+void Thread::SysResumeFromDebug(AppDomain *pAppDomain)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ Thread *thread = NULL;
+
+ if (IsAtProcessExit())
+ {
+ LOG((LF_CORDB, LL_INFO1000,
+ "RESUME: skipping resume due to process detach.\n"));
+ return;
+ }
+
+ LOG((LF_CORDB, LL_INFO1000, "RESUME: starting resume AD:0x%x.\n", pAppDomain));
+
+#if !defined(FEATURE_CORECLR) // simple hosting
+ // Notify the client that it should release any threads that it had doing work
+ // while the runtime was debugger-suspended.
+ IDebuggerThreadControl *pIDTC = CorHost::GetDebuggerThreadControl();
+ if (pIDTC)
+ {
+ LOG((LF_CORDB, LL_INFO1000, "RESUME: notifying IDebuggerThreadControl client.\n"));
+ pIDTC->ReleaseAllRuntimeThreads();
+ }
+#endif // !defined(FEATURE_CORECLR)
+
+ // Make sure we completed the previous sync
+ _ASSERTE(m_DebugWillSyncCount == -1);
+
+ // Caller is expected to be holding the ThreadStore lock
+ _ASSERTE(ThreadStore::HoldingThreadStore() || IsAtProcessExit());
+
+ while ((thread = ThreadStore::GetThreadList(thread)) != NULL)
+ {
+ // Only consider resuming threads if they're in the correct appdomain
+ if (pAppDomain != NULL && thread->GetDomain() != pAppDomain)
+ {
+ LOG((LF_CORDB, LL_INFO1000, "RESUME: Not resuming thread 0x%x, since it's "
+ "in appdomain 0x%x.\n", thread, pAppDomain));
+ continue;
+ }
+
+ // If the user wants to keep the thread suspended, then
+ // don't release the thread.
+ if (!(thread->m_StateNC & TSNC_DebuggerUserSuspend))
+ {
+ // If we are still trying to suspend this thread, forget about it.
+ if (thread->m_State & TS_DebugSuspendPending)
+ {
+ LOG((LF_CORDB, LL_INFO1000,
+ "[0x%x] RESUME: TS_DebugSuspendPending was set, but will be removed\n",
+ thread->GetThreadId()));
+
+#ifdef _TARGET_ARM_
+ if (thread->IsSingleStepEnabled())
+ {
+ if (ISREDIRECTEDTHREAD(thread))
+ thread->ApplySingleStep(GETREDIRECTEDCONTEXT(thread));
+ }
+#endif
+ // Note: we unmark for suspension _then_ set the suspend event.
+ thread->ReleaseFromSuspension(TS_DebugSuspendPending);
+ }
+
+ }
+ else
+ {
+ // Thread will remain suspended due to a request from the debugger.
+
+ LOG((LF_CORDB,LL_INFO10000,"Didn't unsuspend thread 0x%x"
+ "(ID:0x%x)\n", thread, thread->GetThreadId()));
+ LOG((LF_CORDB,LL_INFO10000,"Suspending:0x%x\n",
+ thread->m_State & TS_DebugSuspendPending));
+ _ASSERTE((thread->m_State & TS_DebugWillSync) == 0);
+
+ }
+ }
+
+ LOG((LF_CORDB, LL_INFO1000, "RESUME: resume complete. Trap count: %d\n", g_TrapReturningThreads.Load()));
+}
+
+#ifndef FEATURE_CORECLR
+
+// Suspend a thread at the system level. We distinguish between user suspensions,
+// and system suspensions so that a VB program cannot resume a thread we have
+// suspended for GC.
+//
+// This service won't return until the suspension is complete. This deserves some
+// explanation. The thread is considered to be suspended if it can make no further
+// progress within the EE. For example, a thread that has exited the EE via
+// COM Interop or N/Direct is considered suspended -- if we've arranged it so that
+// the thread cannot return back to the EE without blocking.
+void Thread::UserSuspendThread()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ // Read the general comments on thread suspension earlier, to understand why we
+ // take these locks.
+
+ // GC can occur in here:
+ STRESS_LOG0(LF_SYNC, LL_INFO100, "UserSuspendThread obtain lock\n");
+ ThreadStoreLockHolder tsl;
+
+ // User suspensions (e.g. from VB and C#) are distinguished from internal
+ // suspensions so a poorly behaved program cannot resume a thread that the system
+ // has suspended for GC.
+ if (m_State & TS_UserSuspendPending)
+ {
+ // This thread is already experiencing a user suspension, so ignore the
+ // new request.
+ _ASSERTE(!ThreadStore::HoldingThreadStore(this));
+ }
+ else
+ if (this != GetThread())
+ {
+ // First suspension of a thread other than the current one.
+ if (m_State & TS_Unstarted)
+ {
+ // There is an important window in here. T1 can call T2.Start() and then
+ // T2.Suspend(). Suspend is disallowed on an unstarted thread. But from T1's
+ // point of view, T2 is started. In reality, T2 hasn't been scheduled by the
+ // OS, so it is still an unstarted thread. We don't want to perform a normal
+ // suspension on it in this case, because it is currently contributing to the
+ // PendingThreadCount. We want to get it fully started before we suspend it.
+ // This is particularly important if its background status is changing
+ // underneath us because otherwise we might not detect that the process should
+ // be exited at the right time.
+ //
+ // It turns out that this is a simple situation to implement. We are holding
+ // the ThreadStoreLock. TransferStartedThread will likewise acquire that
+ // lock. So if we detect it, we simply set a bit telling the thread to
+ // suspend itself. This is NOT the normal suspension request because we don't
+ // want the thread to suspend until it has fully started.
+ FastInterlockOr((ULONG *) &m_State, TS_SuspendUnstarted);
+ }
+ else if (m_State & (TS_Detached | TS_Dead))
+ {
+ return;
+ }
+ else
+ {
+ // We just want to trap this thread if it comes back into cooperative mode
+ SetupForSuspension(TS_UserSuspendPending);
+ m_SafeEvent.Reset();
+
+ // Pause it so we can operate on it without it squirming under us.
+RetrySuspension:
+ // We can not allocate memory after we suspend a thread.
+ // Otherwise, we may deadlock the process when CLR is hosted.
+ ThreadStore::AllocateOSContext();
+
+ SuspendThreadResult str = SuspendThread();
+
+ // The only safe place to suspend a thread asynchronously is if it is in
+ // fully interruptible cooperative JIT code. Preemptive mode can hold all
+ // kinds of locks that make it unsafe to suspend. All other cases are
+ // handled somewhat synchronously (e.g. through hijacks, GC mode toggles, etc.)
+ //
+ // For example, on a SMP if the thread is blocked waiting for the ThreadStore
+ // lock, it can cause a deadlock if we suspend it (even though it is in
+ // preemptive mode).
+ //
+ // If a thread is in preemptive mode (including the tricky optimized N/Direct
+ // case), we can just mark it for suspension. It will make no further progress
+ // in the EE.
+ if (str == STR_NoStressLog)
+ {
+ // We annot assume anything about the thread's current state.
+ goto RetrySuspension;
+ }
+ else if (!m_fPreemptiveGCDisabled)
+ {
+ MarkForSuspension(TS_UserSuspendPending);
+
+ // Let the thread run until it reaches a safe spot.
+ if (str == STR_Success)
+ {
+ ResumeThread();
+ }
+ }
+ else if (str == STR_Failure || str == STR_UnstartedOrDead)
+ {
+ // The thread cannot be unstarted, as we have already
+ // checked for that above.
+ _ASSERTE(!(m_State & TS_Unstarted));
+
+ // Nothing to do if the thread has already terminated.
+ }
+ else if (str == STR_SwitchedOut)
+ {
+ goto RetrySuspension;
+ }
+ else
+ {
+ _ASSERTE(str == STR_Success);
+#ifdef FEATURE_HIJACK
+ WorkingOnThreadContextHolder workingOnThreadContext(this);
+ if (workingOnThreadContext.Acquired() && HandledJITCase())
+ {
+ _ASSERTE(m_fPreemptiveGCDisabled);
+ // Redirect thread so we can capture a good thread context
+ // (GetThreadContext is not sufficient, due to an OS bug).
+ // If we don't succeed (should only happen on Win9X, due to
+ // a different OS bug), we must resume the thread and try
+ // again.
+ if (!CheckForAndDoRedirectForUserSuspend())
+ {
+ ResumeThread();
+ goto RetrySuspension;
+ }
+ }
+#endif // FEATURE_HIJACK
+
+ // Thread is executing in cooperative mode. We're going to have to
+ // move it to a safe spot.
+ MarkForSuspension(TS_UserSuspendPending);
+
+ // Let the thread run until it reaches a safe spot.
+ ResumeThread();
+
+ // wait until it leaves cooperative GC mode or is JIT suspended
+ FinishSuspendingThread();
+ }
+ }
+ }
+ else
+ {
+ GCX_PREEMP();
+ SetupForSuspension(TS_UserSuspendPending);
+ MarkForSuspension(TS_UserSuspendPending);
+
+ // prepare to block ourselves
+ tsl.Release();
+ _ASSERTE(!ThreadStore::HoldingThreadStore(this));
+
+ WaitSuspendEvents();
+ }
+}
+
+
+// if the only suspension of this thread is user imposed, resume it. But don't
+// resume from any system suspensions (like GC).
+BOOL Thread::UserResumeThread()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ // If we are attempting to resume when we aren't in a user suspension,
+ // its an error.
+ BOOL res = FALSE;
+
+ // Note that the model does not count. In other words, you can call Thread.Suspend()
+ // five times and Thread.Resume() once. The result is that the thread resumes.
+
+ STRESS_LOG0(LF_SYNC, INFO3, "UserResumeThread obtain lock\n");
+ ThreadStoreLockHolder TSLockHolder;
+
+ // If we have marked a thread for suspension, while that thread is still starting
+ // up, simply remove the bit to resume it.
+ if (m_State & TS_SuspendUnstarted)
+ {
+ _ASSERTE((m_State & TS_UserSuspendPending) == 0);
+ FastInterlockAnd((ULONG *) &m_State, ~TS_SuspendUnstarted);
+ res = TRUE;
+ }
+
+ // If we are still trying to suspend the thread, forget about it.
+ if (m_State & TS_UserSuspendPending)
+ {
+ ReleaseFromSuspension(TS_UserSuspendPending);
+ SetSafeEvent();
+ res = TRUE;
+ }
+
+ return res;
+}
+
+
+// We are asynchronously trying to suspend this thread. Stay here until we achieve
+// that goal (in fully interruptible JIT code), or the thread dies, or it leaves
+// the EE (in which case the Pending flag will cause it to synchronously suspend
+// itself later, or if the thread tells us it is going to synchronously suspend
+// itself because of hijack activity, etc.
+void Thread::FinishSuspendingThread()
+{
+ CONTRACTL {
+ NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+ DWORD res;
+
+ // There are two threads of interest -- the current thread and the thread we are
+ // going to wait for. Since the current thread is about to wait, it's important
+ // that it be in preemptive mode at this time.
+
+#if _DEBUG
+ DWORD dbgTotalTimeout = 0;
+#endif
+
+ // Wait for us to enter the ping period, then check if we are in interruptible
+ // JIT code.
+ while (TRUE)
+ {
+ ThreadSuspend::UnlockThreadStore();
+ res = m_SafeEvent.Wait(PING_JIT_TIMEOUT,FALSE);
+ STRESS_LOG0(LF_SYNC, INFO3, "FinishSuspendingThread obtain lock\n");
+ ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_OTHER);
+
+ if (res == WAIT_TIMEOUT)
+ {
+#ifdef _DEBUG
+ if ((dbgTotalTimeout += PING_JIT_TIMEOUT) >= g_pConfig->SuspendDeadlockTimeout())
+ {
+ _ASSERTE(!"Timeout detected trying to synchronously suspend a thread");
+ dbgTotalTimeout = 0;
+ }
+#endif
+ // Suspend the thread and see if we are in interruptible code (placing
+ // a hijack if warranted).
+#ifdef FEATURE_HIJACK
+ RetrySuspension:
+#endif // FEATURE_HIJACK
+ // The thread is detached/dead. Suspend is no op.
+ if (m_State & (TS_Detached | TS_Dead))
+ {
+ return;
+ }
+
+ // We can not allocate memory after we suspend a thread.
+ // Otherwise, we may deadlock the process when CLR is hosted.
+ ThreadStore::AllocateOSContext();
+
+ SuspendThreadResult str = SuspendThread();
+
+ if (m_fPreemptiveGCDisabled && str == STR_Success)
+ {
+#ifdef FEATURE_HIJACK
+ WorkingOnThreadContextHolder workingOnThreadContext(this);
+ if (workingOnThreadContext.Acquired() && HandledJITCase())
+ {
+ _ASSERTE(m_State & TS_UserSuspendPending);
+ // Redirect thread so we can capture a good thread context
+ // (GetThreadContext is not sufficient, due to an OS bug).
+ // If we don't succeed (should only happen on Win9X, due to
+ // a different OS bug), we must resume the thread and try
+ // again.
+ if (!CheckForAndDoRedirectForUserSuspend())
+ {
+ ResumeThread();
+ goto RetrySuspension;
+ }
+ }
+#endif // FEATURE_HIJACK
+ // Keep trying...
+ ResumeThread();
+ }
+ else if (!m_fPreemptiveGCDisabled)
+ {
+ // The thread has transitioned out of the EE. It can't get back in
+ // without synchronously suspending itself. We can now return to our
+ // caller since this thread cannot make further progress within the
+ // EE.
+ if (str == STR_Success)
+ {
+ ResumeThread();
+ }
+ break;
+ }
+ else if (str == STR_SwitchedOut)
+ {
+ // The task has been switched out while in Cooperative GC mode.
+ // We will wait for the thread again.
+ }
+ }
+ else
+ {
+ // SafeEvent has been set so we don't need to actually suspend. Either
+ // the thread died, or it will enter a synchronous suspension based on
+ // the UserSuspendPending bit.
+ _ASSERTE(res == WAIT_OBJECT_0);
+ _ASSERTE(!ThreadStore::HoldingThreadStore(this));
+ break;
+ }
+ }
+}
+
+#endif // FEATURE_CORECLR
+
+
+void Thread::SetSafeEvent()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ m_SafeEvent.Set();
+}
+
+
+/*
+ *
+ * WaitSuspendEventsHelper
+ *
+ * This function is a simple helper function for WaitSuspendEvents. It is needed
+ * because of the EX_TRY macro. This macro does an alloca(), which allocates space
+ * off the stack, not free'ing it. Thus, doing a EX_TRY in a loop can easily result
+ * in a stack overflow error. By factoring out the EX_TRY into a separate function,
+ * we recover that stack space.
+ *
+ * Parameters:
+ * None.
+ *
+ * Return:
+ * true if meant to continue, else false.
+ *
+ */
+BOOL Thread::WaitSuspendEventsHelper(void)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ DWORD result = WAIT_FAILED;
+
+ EX_TRY {
+
+ if (m_State & TS_UserSuspendPending) {
+
+ ThreadState oldState = m_State;
+
+ while (oldState & TS_UserSuspendPending) {
+
+ ThreadState newState = (ThreadState)(oldState | TS_SyncSuspended);
+ if (FastInterlockCompareExchange((LONG *)&m_State, newState, oldState) == (LONG)oldState)
+ {
+ result = m_UserSuspendEvent.Wait(INFINITE,FALSE);
+#if _DEBUG
+ newState = m_State;
+ _ASSERTE(!(newState & TS_SyncSuspended) || (newState & TS_DebugSuspendPending));
+#endif
+ break;
+ }
+
+ oldState = m_State;
+ }
+
+
+ } else if (m_State & TS_DebugSuspendPending) {
+
+ ThreadState oldState = m_State;
+
+ while (oldState & TS_DebugSuspendPending) {
+
+ ThreadState newState = (ThreadState)(oldState | TS_SyncSuspended);
+ if (FastInterlockCompareExchange((LONG *)&m_State, newState, oldState) == (LONG)oldState)
+ {
+ result = m_DebugSuspendEvent.Wait(INFINITE,FALSE);
+#if _DEBUG
+ newState = m_State;
+ _ASSERTE(!(newState & TS_SyncSuspended) || (newState & TS_UserSuspendPending));
+#endif
+ break;
+ }
+
+ oldState = m_State;
+ }
+ }
+ }
+ EX_CATCH {
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ return result != WAIT_OBJECT_0;
+}
+
+
+// There's a bit of a workaround here
+void Thread::WaitSuspendEvents(BOOL fDoWait)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ _ASSERTE(!PreemptiveGCDisabled());
+ _ASSERTE((m_State & TS_SyncSuspended) == 0);
+
+ // Let us do some useful work before suspending ourselves.
+
+ // If we're required to perform a wait, do so. Typically, this is
+ // skipped if this thread is a Debugger Special Thread.
+ if (fDoWait)
+ {
+ while (TRUE)
+ {
+ WaitSuspendEventsHelper();
+
+ ThreadState oldState = m_State;
+
+ //
+ // If all reasons to suspend are off, we think we can exit
+ // this loop, but we need to check atomically.
+ //
+ if ((oldState & (TS_UserSuspendPending | TS_DebugSuspendPending)) == 0)
+ {
+ //
+ // Construct the destination state we desire - all suspension bits turned off.
+ //
+ ThreadState newState = (ThreadState)(oldState & ~(TS_UserSuspendPending |
+ TS_DebugSuspendPending |
+ TS_SyncSuspended));
+
+ if (FastInterlockCompareExchange((LONG *)&m_State, newState, oldState) == (LONG)oldState)
+ {
+ //
+ // We are done.
+ //
+ break;
+ }
+ }
+ }
+ }
+}
+
+#ifdef FEATURE_HIJACK
+// For the PAL, we poll so we don't need to hijack
+
+// Hijacking JITted calls
+// ======================
+
+// State of execution when we suspend a thread
+struct ExecutionState
+{
+ BOOL m_FirstPass;
+ BOOL m_IsJIT; // are we executing JITted code?
+ MethodDesc *m_pFD; // current function/method we're executing
+ VOID **m_ppvRetAddrPtr; // pointer to return address in frame
+ DWORD m_RelOffset; // relative offset at which we're currently executing in this fcn
+ IJitManager *m_pJitManager;
+ METHODTOKEN m_MethodToken;
+ BOOL m_IsInterruptible; // is this code interruptible?
+
+ ExecutionState() : m_FirstPass(TRUE) {LIMITED_METHOD_CONTRACT; }
+};
+
+// Client is responsible for suspending the thread before calling
+void Thread::HijackThread(VOID *pvHijackAddr, ExecutionState *esb)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ static int EnterCount = 0;
+ _ASSERTE(EnterCount++ == 0);
+#endif
+
+ // Don't hijack if are in the first level of running a filter/finally/catch.
+ // This is because they share ebp with their containing function further down the
+ // stack and we will hijack their containing function incorrectly
+ if (IsInFirstFrameOfHandler(this, esb->m_pJitManager, esb->m_MethodToken, esb->m_RelOffset))
+ {
+ _ASSERTE(--EnterCount == 0);
+ STRESS_LOG3(LF_SYNC, LL_INFO100, "Thread::HijackThread(%p to %p): Early out - IsInFirstFrameOfHandler. State=%x.\n", this, pvHijackAddr, (ThreadState)m_State);
+ return;
+ }
+
+ // Don't hijack if a profiler stackwalk is in progress
+ HijackLockHolder hijackLockHolder(this);
+ if (!hijackLockHolder.Acquired())
+ {
+ _ASSERTE(--EnterCount == 0);
+ STRESS_LOG3(LF_SYNC, LL_INFO100, "Thread::HijackThread(%p to %p): Early out - !hijackLockHolder.Acquired. State=%x.\n", this, pvHijackAddr, (ThreadState)m_State);
+ return;
+ }
+
+ IS_VALID_CODE_PTR((FARPROC) pvHijackAddr);
+
+ if (m_State & TS_Hijacked)
+ UnhijackThread();
+
+ // Make sure that the location of the return address is on the stack
+ _ASSERTE(IsAddressInStack(esb->m_ppvRetAddrPtr));
+
+ // Obtain the location of the return address in the currently executing stack frame
+ m_ppvHJRetAddrPtr = esb->m_ppvRetAddrPtr;
+
+ // Remember the place that the return would have gone
+ m_pvHJRetAddr = *esb->m_ppvRetAddrPtr;
+
+ IS_VALID_CODE_PTR((FARPROC) (TADDR)m_pvHJRetAddr);
+ // TODO [DAVBR]: For the full fix for VsWhidbey 450273, the below
+ // may be uncommented once isLegalManagedCodeCaller works properly
+ // with non-return address inputs, and with non-DEBUG builds
+ //_ASSERTE(isLegalManagedCodeCaller((TADDR)m_pvHJRetAddr));
+ STRESS_LOG2(LF_SYNC, LL_INFO100, "Hijacking return address 0x%p for thread %p\n", m_pvHJRetAddr, this);
+
+ // Remember the method we're executing
+ m_HijackedFunction = esb->m_pFD;
+
+ // Bash the stack to return to one of our stubs
+ *esb->m_ppvRetAddrPtr = pvHijackAddr;
+ FastInterlockOr((ULONG *) &m_State, TS_Hijacked);
+
+#ifdef _DEBUG
+ _ASSERTE(--EnterCount == 0);
+#endif
+}
+
+// If we are unhijacking another thread (no the current thread), then the caller is responsible for
+// suspending that thread.
+// It's legal to unhijack the current thread without special treatment.
+void Thread::UnhijackThread()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ if (m_State & TS_Hijacked)
+ {
+ IS_VALID_WRITE_PTR(m_ppvHJRetAddrPtr, sizeof(void*));
+ IS_VALID_CODE_PTR((FARPROC) m_pvHJRetAddr);
+
+ // Can't make the following assertion, because sometimes we unhijack after
+ // the hijack has tripped (i.e. in the case we actually got some value from
+ // it.
+// _ASSERTE(*m_ppvHJRetAddrPtr == OnHijackObjectTripThread ||
+// *m_ppvHJRetAddrPtr == OnHijackScalarTripThread);
+
+ STRESS_LOG2(LF_SYNC, LL_INFO100, "Unhijacking return address 0x%p for thread %p\n", m_pvHJRetAddr, this);
+ // restore the return address and clear the flag
+ *m_ppvHJRetAddrPtr = m_pvHJRetAddr;
+ FastInterlockAnd((ULONG *) &m_State, ~TS_Hijacked);
+
+ // But don't touch m_pvHJRetAddr. We may need that to resume a thread that
+ // is currently hijacked!
+ }
+}
+
+//
+// The function below, ThreadCaughtInKernelModeExceptionHandling, exists to detect and work around a very subtle
+// race that we have when we suspend a thread while that thread is in the kernel handling an exception.
+//
+// When a user-mode thread takes an exception, the OS must get involved to handle that exception before user-mode
+// exception handling takes place. The exception causes the thread to enter kernel-mode. To handle the exception,
+// the kernel does the following: 1) pushes a CONTEXT, then an EXCEPTION_RECORD, and finally an EXCEPTION_POINTERS
+// struct onto the thread's user-mode stack. 2) the Esp value in the thread's user-mode context is updated to
+// reflect the fact that these structures have just been pushed. 3) some segment registers in the user-mode context
+// are modified. 4) the Eip value in the user-mode context is changed to point to the user-mode exception dispatch
+// routine. 5) the kernel resumes user-mode execution with the altered context.
+//
+// Note that during this entire process: 1) the thread can be suspeded by another user-mode thread, and 2)
+// Get/SetThreadContext all operate on the user-mode context.
+//
+// There are two important races to consider here: a race with attempting to hijack the thread in HandledJITCase,
+// and a race attempting to trace the thread's stack in HandledJITCase.
+//
+//
+// Race #1: failure to hijack a thread in HandledJITCase.
+//
+// In HandledJITCase, if we see that a thread's Eip is in managed code at an interruptable point, we will attempt
+// to move the thread to a hijack in order to stop it's execution for a variety of reasons (GC, debugger, user-mode
+// supension, etc.) We do this by suspending the thread, inspecting Eip, changing Eip to the address of the hijack
+// routine, and resuming the thread.
+//
+// The problem here is that in step #4 above, the kernel is going to change Eip in the thread's context to point to
+// the user-mode exception dispatch routine. If we suspend a thread when it has taken an exception in managed code,
+// we may see Eip pointing to managed code and attempt to hijack the thread. When we resume the thread, step #4
+// will eventually execute and the thread will go to the user-mode exception dispatch routine instead of to our
+// hijack.
+//
+// We tollerate this by recgonizing that this has happened when we arrive in our exception handler
+// (COMPlusFrameHandler), and we fix up the IP in the context passed to the handler.
+//
+//
+// Race #2: inability to trace a managed call stack
+//
+// If we suspend a thread after step #2 above, but before step #4, then we will see an Eip pointing to managed
+// code, but an Esp that points to the newly pushed exception structures. If we are in a managed function that does
+// not have an Ebp frame, the return address will be relative to Esp and we will not be able to resolve the return
+// address properly. Since we will attempt to place a return address hijack (as part of our heroic efforts to trap
+// the thread quickly), we may end up writing over random memory with our hijack. This is obviously extremely
+// bad. Realistically, any attempt to trace a thread's stack in this case is suspect, even if the mangaed function
+// has a EBP frame.
+//
+// The solution is to attempt to detect this race and abandon the hijack attempt. We have developed the following
+// heuristic to detect this case. Basically, we look to see if Esp points to an EXCEPTION_POINTERS structure, and
+// that this structure points to valid EXCEPTION_RECORD and CONTEXT structures. They must be ordered on the stack,
+// and the faulting address in the EXCEPTION_RECORD should be the thread's current Eip, and the Eip in the CONTEXT
+// should be the thread's current Eip.
+//
+// This is the heuristic codified. Given Eip and Esp from the thread's current context:
+//
+// 1. if Eip points to a managed function, and...
+// 2. the pointer at Esp is equal to Esp + sizeof(EXCEPTION_POINTERS), and...
+// 3. the faulting address in the EXCEPTION_RECORD at that location is equal to the current Eip, and...
+// 4. the NumberParameters field in the EXCEPTION_RECORD is valid (between 0 and EXCEPTION_MAXIMUM_PARAMETERS), and...
+// 5. the pointer at Esp + 4 is equal to Esp + sizeof(EXCEPTION_POINTERS) + the dynamic size of the EXCEPTION_RECORD, and...
+// 6. the Eip value of the CONTEXT at that location is equal to the current Eip, then we have recgonized the race.
+//
+// The validation of Eip in both places, combined with ensuring that the pointer values are on the thread's stack
+// make this a safe heuristic to evaluate. Even if one could end up in a function with the stack looking exactly
+// like this, and even if we are trying to suspend such a thread and we catch it at the Eip that matches the values
+// at the proper places on the stack, then the worst that will happen is we won't attempt to hijack the thread at
+// that point. We'll resume it and try again later. There will be at least one other instruction in the function
+// that is not at the Eip value on the stack, and we'll be able to trace the thread's stack from that instruction
+// and place the return address hijack.
+//
+// As races go, race #1 above is very, very easy to hit. We hit it in the wild before we shipped V1, and a simple
+// test program with one thread constantly AV'ing and another thread attempting to suspend the first thread every
+// half second hit's the race almost instantly.
+//
+// Race #2 is extremely rare in comparison. The same program properly instrumented only hits the race about 5 times
+// every 2000 attempts or so. We did not hit this even in very stressful exception tests and
+// it's never been seen in the wild.
+//
+// Note: a new feature has been added in recent OS's that allows us to detect both of these races with a simple
+// call to GetThreadContext. This feature exists on all Win64 platforms, so this change is only for 32-bit
+// platforms. We've asked for this fix to be applied to future 32-bit OS's, so we can remove this on those
+// platforms when that happens. Furthermore, once we stop supporting the older 32-bit OS versions that don't have
+// the new feature, we can remove these alltogether.
+//
+// WARNING: Interrupts (int 3) immediately increment the IP whereas traps (AVs) do not.
+// So this heuristic only works for trap, but not for interrupts. As a result, the race
+// is still a problem for interrupts. This means that the race can cause a process crash
+// if the managed debugger puts an "int 3" in order to do a stepping operation,
+// and GC or a sampling profiler tries to suspend the thread. This can be handled
+// by modifying the code below to additionally check if the instruction just before
+// the IP is an "int 3".
+//
+
+#ifdef _TARGET_X86_
+
+#ifndef FEATURE_PAL
+#define WORKAROUND_RACES_WITH_KERNEL_MODE_EXCEPTION_HANDLING
+#endif // !FEATURE_PAL
+
+#ifdef WORKAROUND_RACES_WITH_KERNEL_MODE_EXCEPTION_HANDLING
+BOOL ThreadCaughtInKernelModeExceptionHandling(Thread *pThread, CONTEXT *ctx)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(pThread != NULL);
+ PRECONDITION(ctx != NULL);
+ }
+ CONTRACTL_END;
+
+ // Validate that Esp plus all of our maximum structure sizes is on the thread's stack. We use the cached bounds
+ // on the Thread object. If we're that close to the top of the thread's stack, then we can't possibly have hit
+ // the race. If we pass this test, we can assume all memory accesses below are legal, since they're all on the
+ // thread's stack.
+ if ((ctx->Esp + sizeof(EXCEPTION_POINTERS) + sizeof(EXCEPTION_RECORD) + sizeof(CONTEXT)) >=
+ (UINT_PTR)pThread->GetCachedStackBase())
+ {
+ return FALSE;
+ }
+
+ // The calculations below assume that a DWORD is the same size as a pointer. Since this is only needed on
+ // 32-bit platforms, this should be fine.
+ _ASSERTE(sizeof(DWORD) == sizeof(void*));
+
+ // There are cases where the ESP is just decremented but the page is not touched, thus the page is not commited or
+ // still has page guard bit set. We can't hit the race in such case so we just leave. Besides, we can't access the
+ // memory with page guard flag or not committed.
+ MEMORY_BASIC_INFORMATION mbi;
+#undef VirtualQuery
+ // This code can run below YieldTask, which means that it must not call back into the host.
+ // The reason is that YieldTask is invoked by the host, and the host needs not be reentrant.
+ if (VirtualQuery((LPCVOID)(UINT_PTR)ctx->Esp, &mbi, sizeof(mbi)) == sizeof(mbi))
+ {
+ if (!(mbi.State & MEM_COMMIT) || (mbi.Protect & PAGE_GUARD))
+ return FALSE;
+ }
+ else
+ STRESS_LOG0 (LF_SYNC, ERROR, "VirtualQuery failed!");
+#define VirtualQuery(lpAddress, lpBuffer, dwLength) Dont_Use_VirtualQuery(lpAddress, lpBuffer, dwLength)
+
+ // The first two values on the stack should be a pointer to the EXCEPTION_RECORD and a pointer to the CONTEXT.
+ UINT_PTR Esp = (UINT_PTR)ctx->Esp;
+ UINT_PTR ER = *((UINT_PTR*)Esp);
+ UINT_PTR CTX = *((UINT_PTR*)(Esp + sizeof(EXCEPTION_RECORD*)));
+
+ // The EXCEPTION_RECORD should be at Esp + sizeof(EXCEPTION_POINTERS)... if it's not, then we haven't hit the race.
+ if (ER != (Esp + sizeof(EXCEPTION_POINTERS)))
+ {
+ return FALSE;
+ }
+
+ // Assume we have an EXCEPTION_RECORD at Esp + sizeof(EXCEPTION_POINTERS) and look at values within that.
+ EXCEPTION_RECORD *pER = (EXCEPTION_RECORD*)ER;
+
+ // Make sure the faulting address in the EXCEPTION_RECORD matches the thread's current Eip.
+ if ((UINT_PTR)pER->ExceptionAddress != ctx->Eip)
+ {
+ return FALSE;
+ }
+
+ // Validate the number of exception parameters.
+ if ((pER->NumberParameters > EXCEPTION_MAXIMUM_PARAMETERS))
+ {
+ return FALSE;
+ }
+
+ // We have a plausable number of exception parameters, so compute the exact size of this exception
+ // record. Remember, an EXCEPTION_RECORD has a variable sized array of optional information at the end called
+ // the ExceptionInformation. It's an array of pointers up to EXCEPTION_MAXIMUM_PARAMETERS in length.
+ DWORD exceptionRecordSize = sizeof(EXCEPTION_RECORD) -
+ ((EXCEPTION_MAXIMUM_PARAMETERS - pER->NumberParameters) * sizeof(pER->ExceptionInformation[0]));
+
+ // On Vista WOW on X64, the OS pushes the maximum number of parameters onto the stack.
+ DWORD exceptionRecordMaxSize = sizeof(EXCEPTION_RECORD);
+
+ // The CONTEXT pointer should be pointing right after the EXCEPTION_RECORD.
+ if ((CTX != (ER + exceptionRecordSize)) &&
+ (CTX != (ER + exceptionRecordMaxSize)))
+ {
+ return FALSE;
+ }
+
+ // Assume we have a CONTEXT at Esp + 8 + exceptionRecordSize and look at values within that.
+ CONTEXT *pCTX = (CONTEXT*)CTX;
+
+ // Make sure the Eip in the CONTEXT on the stack matches the current Eip value.
+ if (pCTX->Eip != ctx->Eip)
+ {
+ return FALSE;
+ }
+
+ // If all the tests above fail, then it means that we've hit race #2 described in the text before this function.
+ STRESS_LOG3(LF_SYNC, LL_INFO100,
+ "ThreadCaughtInKernelModeExceptionHandling returning TRUE. Eip=%p, Esp=%p, ExceptionCode=%p\n",
+ ctx->Eip, ctx->Esp, pER->ExceptionCode);
+
+ return TRUE;
+}
+#endif //WORKAROUND_RACES_WITH_KERNEL_MODE_EXCEPTION_HANDLING
+#endif //_TARGET_X86_
+
+// Get the ExecutionState for the specified *SUSPENDED* thread. Note that this is
+// a 'StackWalk' call back (PSTACKWALKFRAMESCALLBACK).
+StackWalkAction SWCB_GetExecutionState(CrawlFrame *pCF, VOID *pData)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ ExecutionState *pES = (ExecutionState *) pData;
+ StackWalkAction action = SWA_ABORT;
+
+ if (pES->m_FirstPass)
+ {
+ // This will help factor out some repeated code.
+ bool notJittedCase = false;
+
+ // If we're jitted code at the top of the stack, grab everything
+ if (pCF->IsFrameless() && pCF->IsActiveFunc())
+ {
+ pES->m_IsJIT = TRUE;
+ pES->m_pFD = pCF->GetFunction();
+ pES->m_MethodToken = pCF->GetMethodToken();
+ pES->m_ppvRetAddrPtr = 0;
+ pES->m_IsInterruptible = pCF->IsGcSafe();
+ pES->m_RelOffset = pCF->GetRelOffset();
+ pES->m_pJitManager = pCF->GetJitManager();
+
+ STRESS_LOG3(LF_SYNC, LL_INFO1000, "Stopped in Jitted code at pc = %p sp = %p fullyInt=%d\n",
+ GetControlPC(pCF->GetRegisterSet()), GetRegdisplaySP(pCF->GetRegisterSet()), pES->m_IsInterruptible);
+
+#if defined(FEATURE_CONSERVATIVE_GC) && !defined(USE_GC_INFO_DECODER)
+ if (g_pConfig->GetGCConservative())
+ {
+ // Conservative GC enabled; behave as if HIJACK_NONINTERRUPTIBLE_THREADS had not been
+ // set above:
+ //
+ notJittedCase = true;
+ }
+ else
+#endif // FEATURE_CONSERVATIVE_GC
+ {
+#ifndef HIJACK_NONINTERRUPTIBLE_THREADS
+ if (!pES->m_IsInterruptible)
+ {
+ notJittedCase = true;
+ }
+#else // HIJACK_NONINTERRUPTIBLE_THREADS
+ // if we're not interruptible right here, we need to determine the
+ // return address for hijacking.
+ if (!pES->m_IsInterruptible)
+ {
+#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM_)
+ PREGDISPLAY pRDT = pCF->GetRegisterSet();
+ _ASSERTE(pRDT != NULL);
+
+ // For simplicity, don't hijack in funclets
+ bool fIsFunclet = pCF->IsFunclet();
+ if (fIsFunclet)
+ {
+ notJittedCase = true;
+ }
+ else
+ {
+ // We already have the caller context available at this point
+ _ASSERTE(pRDT->IsCallerContextValid);
+#ifdef _TARGET_ARM_
+
+ // Why do we use CallerContextPointers below?
+ //
+ // Assume the following callstack, growing from left->right:
+ //
+ // C -> B -> A
+ //
+ // Assuming A is non-interruptible function and pushes LR on stack,
+ // when we get the stackwalk callback for A, the CallerContext would
+ // contain non-volatile register state for B and CallerContextPtrs would
+ // contain the location where the caller's (B's) non-volatiles where restored
+ // from. This would be the stack location in A where they were pushed. Thus,
+ // CallerContextPtrs->Lr would contain the stack location in A where LR (representing an address in B)
+ // was pushed and thus, contains the return address in B.
+
+ // Note that the JIT always pushes LR even for leaf methods to make hijacking
+ // work for them. See comment in code:Compiler::genPushCalleeSavedRegisters.
+
+ if(pRDT->pCallerContextPointers->Lr == &pRDT->pContext->Lr)
+ {
+ // This is the case when we are either:
+ //
+ // 1) In a leaf method that does not push LR on stack, OR
+ // 2) In the prolog/epilog of a non-leaf method that has not yet pushed LR on stack
+ // or has LR already popped off.
+ //
+ // The remaining case of non-leaf method is that of IP being in the body of the
+ // function. In such a case, LR would be have been pushed on the stack and thus,
+ // we wouldnt be here but in the "else" clause below.
+ //
+ // For (1) we can use CallerContext->ControlPC to be used as the return address
+ // since we know that leaf frames will return back to their caller.
+ // For this, we may need JIT support to do so.
+ notJittedCase = true;
+ }
+ else
+ {
+ // This is the case of IP being inside the method body and LR is
+ // pushed on the stack. We get it to determine the return address
+ // in the caller of the current non-interruptible frame.
+ pES->m_ppvRetAddrPtr = (void **) pRDT->pCallerContextPointers->Lr;
+ }
+#else
+ pES->m_ppvRetAddrPtr = (void **) ((size_t)GetSP(pRDT->pCallerContext) - sizeof(void*));
+#endif
+ }
+#else
+ // peel off the next frame to expose the return address on the stack
+ pES->m_FirstPass = FALSE;
+ action = SWA_CONTINUE;
+#endif // _TARGET_AMD64_ || _TARGET_ARM_
+ }
+#endif // HIJACK_NONINTERRUPTIBLE_THREADS
+ }
+ // else we are successfully out of here with SWA_ABORT
+ }
+ else
+ {
+#ifdef _TARGET_X86_
+ STRESS_LOG2(LF_SYNC, LL_INFO1000, "Not in Jitted code at EIP = %p, &EIP = %p\n", GetControlPC(pCF->GetRegisterSet()), pCF->GetRegisterSet()->PCTAddr);
+#else
+ STRESS_LOG1(LF_SYNC, LL_INFO1000, "Not in Jitted code at pc = %p\n", GetControlPC(pCF->GetRegisterSet()));
+#endif
+ notJittedCase = true;
+ }
+
+ // Cases above may have set "notJITtedCase", which we handle as follows:
+ if (notJittedCase)
+ {
+ pES->m_IsJIT = FALSE;
+#ifdef _DEBUG
+ pES->m_pFD = (MethodDesc *)POISONC;
+ pES->m_ppvRetAddrPtr = (void **)POISONC;
+ pES->m_IsInterruptible = FALSE;
+#endif
+ }
+ }
+ else
+ {
+#ifdef _TARGET_X86_
+ // Second pass, looking for the address of the return address so we can
+ // hijack:
+
+ PREGDISPLAY pRDT = pCF->GetRegisterSet();
+
+ if (pRDT != NULL)
+ {
+ // pPC points to the return address sitting on the stack, as our
+ // current EIP for the penultimate stack frame.
+ pES->m_ppvRetAddrPtr = (void **) pRDT->PCTAddr;
+
+ STRESS_LOG2(LF_SYNC, LL_INFO1000, "Partially Int case hijack address = 0x%x val = 0x%x\n", pES->m_ppvRetAddrPtr, *pES->m_ppvRetAddrPtr);
+ }
+#else
+ PORTABILITY_ASSERT("Platform NYI");
+#endif
+ }
+
+ return action;
+}
+
+// Get the ExecutionState for the specified SwitchIn thread. Note that this is
+// a 'StackWalk' call back (PSTACKWALKFRAMESCALLBACK).
+StackWalkAction SWCB_GetExecutionStateForSwitchIn(CrawlFrame *pCF, VOID *pData)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ ExecutionState *pES = (ExecutionState *) pData;
+ StackWalkAction action = SWA_CONTINUE;
+
+ if (pES->m_FirstPass) {
+ if (pCF->IsFrameless()) {
+#ifdef _TARGET_X86_
+ pES->m_FirstPass = FALSE;
+#else
+ _ASSERTE(!"Platform NYI");
+#endif
+
+ pES->m_IsJIT = TRUE;
+ pES->m_pFD = pCF->GetFunction();
+ pES->m_MethodToken = pCF->GetMethodToken();
+ // We do not care if the code is interruptible
+ pES->m_IsInterruptible = FALSE;
+ pES->m_RelOffset = pCF->GetRelOffset();
+ pES->m_pJitManager = pCF->GetJitManager();
+ }
+ }
+ else {
+#ifdef _TARGET_X86_
+ if (pCF->IsFrameless()) {
+ PREGDISPLAY pRDT = pCF->GetRegisterSet();
+ if (pRDT) {
+ // pPC points to the return address sitting on the stack, as our
+ // current EIP for the penultimate stack frame.
+ pES->m_ppvRetAddrPtr = (void **) pRDT->PCTAddr;
+ action = SWA_ABORT;
+ }
+ }
+#else
+ _ASSERTE(!"Platform NYI");
+#endif
+ }
+ return action;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Helper used by HandledJITCase and others (like the profiling API) who need an
+// absolutely reliable register context.
+//
+// Arguments:
+// * dwOptions - [in] Combination of flags from enum
+// GetSafelyRedirectableThreadContextOptions to customize the checks performed by
+// this function.
+// * pCtx - [out] This Thread's current context. Callers may rely on this only if nonzero
+// is returned
+// * pRD - [out] Matching REGDISPLAY filled from the pCtx found by this function.
+// Callers may rely on this only if nonzero is returned
+//
+// Return Value:
+// Nonzero iff all requested checks have succeeded, which would imply that it is
+// a reliable time to use this Thread's context.
+//
+BOOL Thread::GetSafelyRedirectableThreadContext(DWORD dwOptions, CONTEXT * pCtx, REGDISPLAY * pRD)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pCtx != NULL);
+ _ASSERTE(pRD != NULL);
+
+ // We are never in interruptible code if there if a filter context put in place by the debugger.
+ if (GetFilterContext() != NULL)
+ return FALSE;
+
+#ifdef DEBUGGING_SUPPORTED
+ if ((dwOptions & kCheckDebuggerBreakpoints) != 0)
+ {
+ // If we are running under the control of a managed debugger that may have placed breakpoints in the code stream,
+ // then there is a special case that we need to check. See the comments in debugger.cpp for more information.
+ if (CORDebuggerAttached() && (g_pDebugInterface->IsThreadContextInvalid(this)))
+ return FALSE;
+ }
+#endif // DEBUGGING_SUPPORTED
+
+ // Make sure we specify CONTEXT_EXCEPTION_REQUEST to detect "trap frame reporting".
+ _ASSERTE(GetFilterContext() == NULL);
+
+ ZeroMemory(pCtx, sizeof(*pCtx));
+ pCtx->ContextFlags = CONTEXT_FULL | CONTEXT_EXCEPTION_REQUEST;
+ if (!EEGetThreadContext(this, pCtx))
+ {
+ return FALSE;
+ }
+
+ //
+ // workaround around WOW64 problems. Only do this workaround if a) this is x86, and b) the OS does not support trap frame reporting,
+ // If the OS *does* support trap frame reporting, then the call to IsContextSafeToRedirect below will return FALSE if we run
+ // into this race.
+ //
+#ifdef _TARGET_X86_
+ if (!(pCtx->ContextFlags & CONTEXT_EXCEPTION_REPORTING) &&
+ ((dwOptions & kPerfomLastRedirectIPCheck) != 0))
+ {
+ // This code fixes a race between GetThreadContext and NtContinue. If we redirect managed code
+ // at the same place twice in a row, we run the risk of reading a bogus CONTEXT when we redirect
+ // the second time. This leads to access violations on x86 machines. To fix the problem, we
+ // never redirect at the same instruction pointer that we redirected at on the previous GC.
+ if (GetIP(pCtx) == m_LastRedirectIP)
+ {
+ // We need to test for an infinite loop in assembly, as this will break the heuristic we
+ // are using.
+ const BYTE short_jmp = 0xeb; // Machine code for a short jump.
+ const BYTE self = 0xfe; // -2. Short jumps are calculated as [ip]+2+[second_byte].
+
+ // If we find that we are in an infinite loop, we'll set the last redirected IP to 0 so that we will
+ // redirect the next time we attempt it. Delaying one interation allows us to narrow the window of
+ // the race we are working around in this corner case.
+ BYTE *ip = (BYTE *)m_LastRedirectIP;
+ if (ip[0] == short_jmp && ip[1] == self)
+ m_LastRedirectIP = 0;
+
+ // We set a hard limit of 5 times we will spin on this to avoid any tricky race which we have not
+ // accounted for.
+ m_SpinCount++;
+ if (m_SpinCount >= 5)
+ m_LastRedirectIP = 0;
+
+ STRESS_LOG0(LF_GC, LL_INFO10000, "GetSafelyRedirectableThreadContext() - Cannot redirect at the same IP as the last redirection.\n");
+ return FALSE;
+ }
+ }
+#endif
+
+ if (!IsContextSafeToRedirect(pCtx))
+ {
+ STRESS_LOG0(LF_GC, LL_INFO10000, "GetSafelyRedirectableThreadContext() - trap frame reporting an invalid CONTEXT\n");
+ return FALSE;
+ }
+
+ ZeroMemory(pRD, sizeof(*pRD));
+ if (!InitRegDisplay(pRD, pCtx, true))
+ return FALSE;
+
+ return TRUE;
+}
+
+// Called while the thread is suspended. If we aren't in JITted code, this isn't
+// a JITCase and we return FALSE. If it is a JIT case and we are in interruptible
+// code, then we are handled. Our caller has found a good spot and can keep us
+// suspended. If we aren't in interruptible code, then we aren't handled. So we
+// pick a spot to hijack the return address and our caller will wait to get us
+// somewhere safe.
+BOOL Thread::HandledJITCase(BOOL ForTaskSwitchIn)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ BOOL ret = FALSE;
+ ExecutionState esb;
+ StackWalkAction action;
+
+ _ASSERTE(WorkingOnThreadContext());
+
+ CONTEXT ctx;
+ REGDISPLAY rd;
+ if (!GetSafelyRedirectableThreadContext(
+ kPerfomLastRedirectIPCheck | kCheckDebuggerBreakpoints,
+ &ctx,
+ &rd))
+ {
+ STRESS_LOG0(LF_GC, LL_INFO10000, "HandledJITCase() - GetSafelyRedirectableThreadContext() returned FALSE\n");
+ return FALSE;
+ }
+
+ if (!ExecutionManager::IsManagedCode(GetIP(&ctx)))
+ {
+ return FALSE;
+ }
+
+#ifdef WORKAROUND_RACES_WITH_KERNEL_MODE_EXCEPTION_HANDLING
+ if (ThreadCaughtInKernelModeExceptionHandling(this, &ctx))
+ {
+ return FALSE;
+ }
+#endif //WORKAROUND_RACES_WITH_KERNEL_MODE_EXCEPTION_HANDLING
+
+#ifdef _DEBUG
+ // We know IP is in managed code, mark current thread as safe for calls into host
+ Thread * pCurThread = GetThread();
+ if (pCurThread != NULL)
+ {
+ pCurThread->dbg_m_cSuspendedThreadsWithoutOSLock ++;
+ _ASSERTE(pCurThread->dbg_m_cSuspendedThreadsWithoutOSLock <= pCurThread->dbg_m_cSuspendedThreads);
+ }
+#endif //_DEBUG
+
+ // Walk one or two frames of the stack...
+ if (ForTaskSwitchIn) {
+ action = StackWalkFramesEx(&rd,SWCB_GetExecutionStateForSwitchIn, &esb, QUICKUNWIND | DISABLE_MISSING_FRAME_DETECTION | THREAD_IS_SUSPENDED | ALLOW_ASYNC_STACK_WALK, NULL);
+ }
+ else {
+#ifdef TIME_SUSPEND
+ DWORD startCrawl = g_SuspendStatistics.GetTime();
+#endif
+ action = StackWalkFramesEx(&rd,SWCB_GetExecutionState, &esb,
+ QUICKUNWIND | DISABLE_MISSING_FRAME_DETECTION |
+ THREAD_IS_SUSPENDED | ALLOW_ASYNC_STACK_WALK, NULL);
+
+#ifdef TIME_SUSPEND
+ g_SuspendStatistics.crawl.Accumulate(
+ SuspendStatistics::GetElapsed(startCrawl,
+ g_SuspendStatistics.GetTime()));
+
+ g_SuspendStatistics.cntHijackCrawl++;
+#endif
+ }
+
+ //
+ // action should either be SWA_ABORT, in which case we properly walked
+ // the stack frame and found out whether this is a JIT case, or
+ // SWA_FAILED, in which case the walk couldn't even be started because
+ // there are no stack frames, which also isn't a JIT case.
+ //
+ if (action == SWA_ABORT && esb.m_IsJIT)
+ {
+ // If we are interruptible and we are in cooperative mode, our caller can
+ // just leave us suspended.
+ if (esb.m_IsInterruptible && m_fPreemptiveGCDisabled)
+ {
+ _ASSERTE(!ThreadStore::HoldingThreadStore(this));
+ ret = TRUE;
+ }
+ else
+ if (esb.m_ppvRetAddrPtr)
+ {
+ // we need to hijack the return address. Base this on whether or not
+ // the method returns an object reference, so we know whether to protect
+ // it or not.
+ VOID *pvHijackAddr = OnHijackScalarTripThread;
+ if (esb.m_pFD)
+ {
+#ifdef _WIN64
+ // For simplicity, we don't hijack in funclets, but if you ever change that,
+ // be sure to choose the OnHijack... callback type to match that of the FUNCLET
+ // not the main method (it would probably be Scalar).
+#endif // _WIN64
+
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+ // Mark that we are performing a stackwalker like operation on the current thread.
+ // This is necessary to allow the signature parsing functions to work without triggering any loads
+ ClrFlsValueSwitch _threadStackWalking(TlsIdx_StackWalkerWalkingThread, this);
+
+#ifdef _TARGET_X86_
+ MetaSig msig(esb.m_pFD);
+ if (msig.HasFPReturn())
+ {
+ // Figuring out whether the function returns FP or not is hard to do
+ // on-the-fly, so we use a different callback helper on x86 where this
+ // piece of information is needed in order to perform the right save &
+ // restore of the return value around the call to OnHijackScalarWorker.
+ pvHijackAddr = OnHijackFloatingPointTripThread;
+ }
+ else
+#endif // _TARGET_X86_
+ {
+ MetaSig::RETURNTYPE type = esb.m_pFD->ReturnsObject();
+ if (type == MetaSig::RETOBJ)
+ pvHijackAddr = OnHijackObjectTripThread;
+ else if (type == MetaSig::RETBYREF)
+ pvHijackAddr = OnHijackInteriorPointerTripThread;
+ }
+ }
+
+#ifdef FEATURE_ENABLE_GCPOLL
+ // On platforms that support both hijacking and GC polling
+ // decide whether to hijack based on a configuration value.
+ // COMPLUS_GCPollType = 1 is the setting that enables hijacking
+ // in GCPOLL enabled builds.
+ EEConfig::GCPollType pollType = g_pConfig->GetGCPollType();
+ if (EEConfig::GCPOLL_TYPE_HIJACK == pollType || EEConfig::GCPOLL_TYPE_DEFAULT == pollType)
+#endif // FEATURE_ENABLE_GCPOLL
+ {
+ HijackThread(pvHijackAddr, &esb);
+ }
+
+ }
+ }
+ // else it's not even a JIT case
+
+#ifdef _DEBUG
+ // Restore back the number of threads without OS lock
+ if (pCurThread != NULL)
+ {
+ pCurThread->dbg_m_cSuspendedThreadsWithoutOSLock --;
+ }
+#endif //_DEBUG
+
+ STRESS_LOG1(LF_SYNC, LL_INFO10000, " HandledJitCase returning %d\n", ret);
+ return ret;
+}
+
+
+HijackFrame::HijackFrame(LPVOID returnAddress, Thread *thread, HijackArgs *args)
+ : m_ReturnAddress((TADDR)returnAddress),
+ m_Thread(thread),
+ m_Args(args)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_Thread == GetThread());
+
+ m_Next = m_Thread->GetFrame();
+ m_Thread->SetFrame(this);
+}
+
+
+// A hijacked method is returning an ObjectRef to its caller. Note that we bash the
+// return address in HijackArgs.
+void STDCALL OnHijackObjectWorker(HijackArgs * pArgs)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ Thread *thread = GetThread();
+
+#ifdef FEATURE_STACK_PROBE
+ if (GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) == eRudeUnloadAppDomain)
+ {
+ RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), thread);
+ }
+#endif
+
+ CONTRACT_VIOLATION(SOToleranceViolation);
+#ifdef HIJACK_NONINTERRUPTIBLE_THREADS
+ OBJECTREF oref(ObjectToOBJECTREF(*(Object **) &pArgs->ReturnValue));
+
+ FastInterlockAnd((ULONG *) &thread->m_State, ~Thread::TS_Hijacked);
+
+ // Fix up our caller's stack, so it can resume from the hijack correctly
+ pArgs->ReturnAddress = (size_t)thread->m_pvHJRetAddr;
+
+ // Build a frame so that stack crawling can proceed from here back to where
+ // we will resume execution.
+ FrameWithCookie<HijackFrame> frame((void *)pArgs->ReturnAddress, thread, pArgs);
+
+ GCPROTECT_BEGIN(oref)
+ {
+#ifdef _DEBUG
+ BOOL GCOnTransition = FALSE;
+ if (g_pConfig->FastGCStressLevel()) {
+ GCOnTransition = GC_ON_TRANSITIONS (FALSE);
+ }
+#endif
+
+#ifdef TIME_SUSPEND
+ g_SuspendStatistics.cntHijackTrap++;
+#endif
+
+ CommonTripThread();
+#ifdef _DEBUG
+ if (g_pConfig->FastGCStressLevel()) {
+ GC_ON_TRANSITIONS (GCOnTransition);
+ }
+#endif
+ *((OBJECTREF *) &pArgs->ReturnValue) = oref;
+ }
+ GCPROTECT_END(); // trashes oref here!
+
+ frame.Pop();
+#else
+ PORTABILITY_ASSERT("OnHijackObjectWorker not implemented on this platform.");
+#endif
+}
+
+
+// A hijacked method is returning an ObjectRef to its caller. Note that we bash the
+// return address in HijackObjectArgs.
+void STDCALL OnHijackInteriorPointerWorker(HijackArgs * pArgs)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+#ifdef HIJACK_NONINTERRUPTIBLE_THREADS
+ Thread *thread = GetThread();
+ void* ptr = (void*)(pArgs->ReturnValue);
+
+#ifdef FEATURE_STACK_PROBE
+ if (GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) == eRudeUnloadAppDomain)
+ {
+ RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), thread);
+ }
+#endif
+
+ CONTRACT_VIOLATION(SOToleranceViolation);
+
+ FastInterlockAnd((ULONG *) &thread->m_State, ~Thread::TS_Hijacked);
+
+ // Fix up our caller's stack, so it can resume from the hijack correctly
+ pArgs->ReturnAddress = (size_t)thread->m_pvHJRetAddr;
+
+ // Build a frame so that stack crawling can proceed from here back to where
+ // we will resume execution.
+ FrameWithCookie<HijackFrame> frame((void *)pArgs->ReturnAddress, thread, pArgs);
+
+ GCPROTECT_BEGININTERIOR(ptr)
+ {
+#ifdef _DEBUG
+ BOOL GCOnTransition = FALSE;
+ if (g_pConfig->FastGCStressLevel()) {
+ GCOnTransition = GC_ON_TRANSITIONS (FALSE);
+ }
+#endif
+
+#ifdef TIME_SUSPEND
+ g_SuspendStatistics.cntHijackTrap++;
+#endif
+
+ CommonTripThread();
+#ifdef _DEBUG
+ if (g_pConfig->FastGCStressLevel()) {
+ GC_ON_TRANSITIONS (GCOnTransition);
+ }
+#endif
+ pArgs->ReturnValue = (size_t)ptr;
+ }
+ GCPROTECT_END(); // trashes or here!
+
+ frame.Pop();
+#else
+ PORTABILITY_ASSERT("OnHijackInteriorPointerWorker not implemented on this platform.");
+#endif
+}
+
+
+// A hijacked method is returning a scalar to its caller. Note that we bash the
+// return address as an int on the stack. Since this is cdecl, our caller gets the
+// bashed value. This is not intuitive for C programmers!
+void STDCALL OnHijackScalarWorker(HijackArgs * pArgs)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+#ifdef HIJACK_NONINTERRUPTIBLE_THREADS
+ Thread *thread = GetThread();
+
+#ifdef FEATURE_STACK_PROBE
+ if (GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) == eRudeUnloadAppDomain)
+ {
+ // Make sure default domain does not see SO.
+ // probe for our entry point amount and throw if not enough stack
+ RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), thread);
+ }
+#endif
+
+ CONTRACT_VIOLATION(SOToleranceViolation);
+ FastInterlockAnd((ULONG *) &thread->m_State, ~Thread::TS_Hijacked);
+
+ // Fix up our caller's stack, so it can resume from the hijack correctly
+ pArgs->ReturnAddress = (size_t)thread->m_pvHJRetAddr;
+
+ // Build a frame so that stack crawling can proceed from here back to where
+ // we will resume execution.
+ FrameWithCookie<HijackFrame> frame((void *)pArgs->ReturnAddress, thread, pArgs);
+
+#ifdef _DEBUG
+ BOOL GCOnTransition = FALSE;
+ if (g_pConfig->FastGCStressLevel()) {
+ GCOnTransition = GC_ON_TRANSITIONS (FALSE);
+ }
+#endif
+
+#ifdef TIME_SUSPEND
+ g_SuspendStatistics.cntHijackTrap++;
+#endif
+
+ CommonTripThread();
+#ifdef _DEBUG
+ if (g_pConfig->FastGCStressLevel()) {
+ GC_ON_TRANSITIONS (GCOnTransition);
+ }
+#endif
+
+ frame.Pop();
+#else
+ PORTABILITY_ASSERT("OnHijackScalarWorker not implemented on this platform.");
+#endif
+}
+
+#endif // FEATURE_HIJACK
+
+// Some simple helpers to keep track of the threads we are waiting for
+void Thread::MarkForSuspension(ULONG bit)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(bit == TS_DebugSuspendPending ||
+ bit == (TS_DebugSuspendPending | TS_DebugWillSync) ||
+ bit == TS_UserSuspendPending);
+
+ _ASSERTE(IsAtProcessExit() || ThreadStore::HoldingThreadStore());
+
+ _ASSERTE((m_State & bit) == 0);
+
+ FastInterlockOr((ULONG *) &m_State, bit);
+ ThreadStore::TrapReturningThreads(TRUE);
+}
+
+void Thread::UnmarkForSuspension(ULONG mask)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(mask == ~TS_DebugSuspendPending ||
+ mask == ~TS_UserSuspendPending);
+
+ _ASSERTE(IsAtProcessExit() || ThreadStore::HoldingThreadStore());
+
+ _ASSERTE((m_State & ~mask) != 0);
+
+ // we decrement the global first to be able to satisfy the assert from DbgFindThread
+ ThreadStore::TrapReturningThreads(FALSE);
+ FastInterlockAnd((ULONG *) &m_State, mask);
+}
+
+//----------------------------------------------------------------------------
+
+void ThreadSuspend::RestartEE(BOOL bFinishedGC, BOOL SuspendSucceded)
+{
+#ifdef TIME_SUSPEND
+ g_SuspendStatistics.StartRestart();
+#endif //TIME_SUSPEND
+
+ FireEtwGCRestartEEBegin_V1(GetClrInstanceId());
+
+ //
+ // SyncClean holds a list of things to be cleaned up when it's possible.
+ // SyncClean uses the GC mode to synchronize access to this list. Threads must be
+ // in COOP mode to add things to the list, and the list can only be cleaned up
+ // while no threads are adding things.
+ // Since we know that no threads are in COOP mode at this point (because the EE is
+ // suspended), we clean up the list here.
+ //
+ SyncClean::CleanUp();
+
+#ifdef PROFILING_SUPPORTED
+ // If a profiler is keeping track suspend events, notify it. This notification
+ // must happen before we set TrapReturning threads to FALSE because as soon as
+ // we remove the return trap threads can start "running" managed code again as
+ // they return from unmanaged. (Whidbey Bug #7505)
+ // Also must notify before setting GcInProgress = FALSE.
+ //
+ // It's very odd that we do this here, in ThreadSuspend::RestartEE, while the
+ // corresponding call to RuntimeSuspendStarted is done at a lower architectural layer,
+ // in ThreadSuspend::SuspendRuntime.
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackSuspends());
+ g_profControlBlock.pProfInterface->RuntimeResumeStarted();
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ //
+ // Unhijack all threads, and reset their "suspend pending" flags. Why isn't this in
+ // Thread::ResumeRuntime?
+ //
+ Thread *thread = NULL;
+ while ((thread = ThreadStore::GetThreadList(thread)) != NULL)
+ {
+ thread->PrepareForEERestart(SuspendSucceded);
+ }
+
+ //
+ // Revert to being a normal thread
+ //
+ ClrFlsClearThreadType (ThreadType_DynamicSuspendEE);
+ GCHeap::GetGCHeap()->SetGCInProgress(FALSE);
+
+ //
+ // Allow threads to enter COOP mode (though we still need to wake the ones
+ // that we hijacked).
+ //
+ // Note: this is the last barrier that keeps managed threads
+ // from entering cooperative mode. If the sequence changes,
+ // you may have to change routine GCHeap::SafeToRestartManagedThreads
+ // as well.
+ //
+ ThreadStore::TrapReturningThreads(FALSE);
+ g_pSuspensionThread = 0;
+
+ //
+ // Any threads that are waiting in WaitUntilGCComplete will continue now.
+ //
+ GCHeap::GetGCHeap()->GetWaitForGCEvent()->Set();
+ _ASSERTE(IsGCSpecialThread() || ThreadStore::HoldingThreadStore());
+
+ ResumeRuntime(bFinishedGC, SuspendSucceded);
+
+ FireEtwGCRestartEEEnd_V1(GetClrInstanceId());
+
+#ifdef TIME_SUSPEND
+ g_SuspendStatistics.EndRestart();
+#endif //TIME_SUSPEND
+}
+
+// The contract between GC and the EE, for starting and finishing a GC is as follows:
+//
+// SuspendEE:
+// LockThreadStore
+// SetGCInProgress
+// SuspendRuntime
+//
+// ... perform the GC ...
+//
+// RestartEE:
+// SetGCDone
+// ResumeRuntime
+// calls UnlockThreadStore
+//
+// Note that this is intentionally *not* symmetrical. The EE will assert that the
+// GC does most of this stuff in the correct sequence.
+
+//
+// This is the only way to call ThreadSuspend::SuspendRuntime, and that method is
+// so tightly coupled to this one, with intermingled responsibilities, that we don't
+// understand why we have a separation at all. At some point we should refactor all of
+// the suspension code into a separate abstraction, which we would like to call the
+// "managed execution lock." The current "layering" of this stuff has it mixed
+// randomly into the Thread and GC code, and split into almost completely arbitrary
+// layers.
+//
+void ThreadSuspend::SuspendEE(SUSPEND_REASON reason)
+{
+#ifdef TIME_SUSPEND
+ g_SuspendStatistics.StartSuspend();
+#endif //TIME_SUSPEND
+
+ BOOL gcOnTransitions;
+
+ ETW::GCLog::ETW_GC_INFO Info;
+ Info.SuspendEE.Reason = reason;
+ Info.SuspendEE.GcCount = (((reason == SUSPEND_FOR_GC) || (reason == SUSPEND_FOR_GC_PREP)) ?
+ (ULONG)GCHeap::GetGCHeap()->GetGcCount() : (ULONG)-1);
+
+ FireEtwGCSuspendEEBegin_V1(Info.SuspendEE.Reason, Info.SuspendEE.GcCount, GetClrInstanceId());
+
+ LOG((LF_SYNC, INFO3, "Suspending the runtime for reason %d\n", reason));
+
+ gcOnTransitions = GC_ON_TRANSITIONS(FALSE); // dont do GC for GCStress 3
+
+ Thread* pCurThread = GetThread();
+
+ DWORD dwSwitchCount = 0;
+
+ // Note: we need to make sure to re-set m_pThreadAttemptingSuspendForGC when we retry
+ // due to the debugger case below!
+retry_for_debugger:
+
+ //
+ // Set variable to indicate that this thread is preforming a true GC
+ // This gives this thread priority over other threads that are trying to acquire the ThreadStore Lock
+ // for other reasons.
+ //
+ if (reason == ThreadSuspend::SUSPEND_FOR_GC || reason == ThreadSuspend::SUSPEND_FOR_GC_PREP)
+ {
+ m_pThreadAttemptingSuspendForGC = pCurThread;
+
+ //
+ // also unblock any thread waiting around for this thread to suspend. This prevents us from completely
+ // starving other suspension clients, such as the debugger, which we otherwise would do because of
+ // the priority we just established.
+ //
+ g_pGCSuspendEvent->Set();
+ }
+
+#ifdef TIME_SUSPEND
+ DWORD startAcquire = g_SuspendStatistics.GetTime();
+#endif
+
+ //
+ // Acquire the TSL. We will hold this until the we restart the EE.
+ //
+ ThreadSuspend::LockThreadStore(reason);
+
+#ifdef TIME_SUSPEND
+ g_SuspendStatistics.acquireTSL.Accumulate(SuspendStatistics::GetElapsed(startAcquire,
+ g_SuspendStatistics.GetTime()));
+#endif
+
+ //
+ // If we've blocked other threads that are waiting for the ThreadStore lock, unblock them now
+ // (since we already got it). This allows them to get the TSL after we release it.
+ //
+ if ( s_hAbortEvtCache != NULL &&
+ (reason == ThreadSuspend::SUSPEND_FOR_GC || reason == ThreadSuspend::SUSPEND_FOR_GC_PREP))
+ {
+ LOG((LF_SYNC, INFO3, "GC thread is backing out the suspend abort event.\n"));
+ s_hAbortEvt = NULL;
+
+ LOG((LF_SYNC, INFO3, "GC thread is signalling the suspend abort event.\n"));
+ s_hAbortEvtCache->Set();
+ }
+
+ //
+ // Also null-out m_pThreadAttemptingSuspendForGC since it should only matter if s_hAbortEvt is
+ // in play.
+ //
+ if (reason == ThreadSuspend::SUSPEND_FOR_GC || reason == ThreadSuspend::SUSPEND_FOR_GC_PREP)
+ {
+ m_pThreadAttemptingSuspendForGC = NULL;
+ }
+
+ {
+ //
+ // Now we're going to acquire an exclusive lock on managed code execution (including
+ // "maunally managed" code in GCX_COOP regions).
+ //
+ // First, we reset the event that we're about to tell other threads to wait for.
+ //
+ GCHeap::GetGCHeap()->GetWaitForGCEvent()->Reset();
+
+ //
+ // Remember that we're the one doing the GC. Actually, maybe we're not doing a GC -
+ // what this really indicates is that we are trying to acquire the "managed execution lock."
+ //
+ {
+ g_pSuspensionThread = pCurThread;
+
+ //
+ // Tell all threads, globally, to wait for WaitForGCEvent.
+ //
+ ThreadStore::TrapReturningThreads(TRUE);
+
+ //
+ // Remember why we're doing this.
+ //
+ m_suspendReason = reason;
+
+ //
+ // There's a GC in progress. (again, not necessarily - we suspend the EE for other reasons.
+ // I wonder how much confusion this has caused....)
+ // It seems like much of the above is redundant. We should investigate reducing the number
+ // of mechanisms we use to indicate that a suspension is in progress.
+ //
+ GCHeap::GetGCHeap()->SetGCInProgress(TRUE);
+
+ //
+ // Gratuitous memory barrier. (may be needed - but I'm not sure why.)
+ //
+ MemoryBarrier();
+
+ ClrFlsSetThreadType (ThreadType_DynamicSuspendEE);
+ }
+
+ HRESULT hr;
+ {
+ _ASSERTE(ThreadStore::HoldingThreadStore() || g_fProcessDetach);
+
+ //
+ // Now that we've instructed all threads to please stop,
+ // go interrupt the ones that are running managed code and force them to stop.
+ // This does not return successfully until all threads have acknowledged that they
+ // will not run managed code.
+ //
+ hr = SuspendRuntime(reason);
+ ASSERT( hr == S_OK || hr == ERROR_TIMEOUT);
+
+#ifdef TIME_SUSPEND
+ if (hr == ERROR_TIMEOUT)
+ g_SuspendStatistics.cntCollideRetry++;
+#endif
+ }
+
+ if (hr == ERROR_TIMEOUT)
+ STRESS_LOG0(LF_SYNC, LL_INFO1000, "SysSuspension colission");
+
+ // If the debugging services are attached, then its possible
+ // that there is a thread which appears to be stopped at a gc
+ // safe point, but which really is not. If that is the case,
+ // back off and try again.
+
+ // If this is not the GC thread and another thread has triggered
+ // a GC, then we may have bailed out of SuspendRuntime, so we
+ // must resume all of the threads and tell the GC that we are
+ // at a safepoint - since this is the exact same behaviour
+ // that the debugger needs, just use it's code.
+ if ((hr == ERROR_TIMEOUT)
+ || Thread::ThreadsAtUnsafePlaces()
+#ifdef DEBUGGING_SUPPORTED // seriously? When would we want to disable debugging support? :)
+ || (CORDebuggerAttached() &&
+ g_pDebugInterface->ThreadsAtUnsafePlaces())
+#endif // DEBUGGING_SUPPORTED
+ )
+ {
+ // In this case, the debugger has stopped at least one
+ // thread at an unsafe place. The debugger will usually
+ // have already requested that we stop. If not, it will
+ // usually either do so shortly, or resume the thread that is
+ // at the unsafe place. Either way, we have to wait for the
+ // debugger to decide what it wants to do.
+ //
+ // In some rare cases, the end-user debugger may have frozen
+ // a thread at a gc-unsafe place, and so we'll loop forever
+ // here and never resolve the deadlock. Unfortunately we can't
+ // easily abort a GC
+ // and so for now we just wait for the debugger to timeout and
+ // hopefully thaw that thread. Maybe instead we should try to
+ // detect this situation sooner (when thread abort is possible)
+ // and notify the debugger with NotifyOfCrossThreadDependency, giving
+ // it the chance to thaw other threads or abort us before getting
+ // wedged in the GC.
+ //
+ // Note: we've still got the ThreadStore lock held.
+ //
+ // <REVISIT>The below manipulation of two static variables (s_hAbortEvtCache and s_hAbortEvt)
+ // is protected by the ThreadStore lock, which we are still holding. But we access these
+ // in ThreadSuspend::LockThreadStore, prior to obtaining the lock. </REVISIT>
+ //
+ LOG((LF_GCROOTS | LF_GC | LF_CORDB,
+ LL_INFO10,
+ "***** Giving up on current GC suspension due "
+ "to debugger or timeout *****\n"));
+
+ if (s_hAbortEvtCache == NULL)
+ {
+ LOG((LF_SYNC, INFO3, "Creating suspend abort event.\n"));
+
+ CLREvent * pEvent = NULL;
+
+ EX_TRY
+ {
+ pEvent = new CLREvent();
+ pEvent->CreateManualEvent(FALSE);
+ s_hAbortEvtCache = pEvent;
+ }
+ EX_CATCH
+ {
+ // Bummer... couldn't init the abort event. Its a shame, but not fatal. We'll simply not use it
+ // on this iteration and try again next time.
+ if (pEvent) {
+ _ASSERTE(!pEvent->IsValid());
+ pEvent->CloseEvent();
+ delete pEvent;
+ }
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+ }
+
+ if (s_hAbortEvtCache != NULL)
+ {
+ LOG((LF_SYNC, INFO3, "Using suspend abort event.\n"));
+ s_hAbortEvt = s_hAbortEvtCache;
+ s_hAbortEvt->Reset();
+ }
+
+ // Mark that we're done with the gc, so that the debugger can proceed.
+ RestartEE(FALSE, FALSE);
+
+ LOG((LF_GCROOTS | LF_GC | LF_CORDB,
+ LL_INFO10, "The EE is free now...\n"));
+
+ // If someone's trying to suspent *this* thread, this is a good opportunity.
+ // <REVIST>This call to CatchAtSafePoint is redundant - PulseGCMode already checks this.</REVISIT>
+ if (pCurThread && pCurThread->CatchAtSafePoint())
+ {
+ // <REVISIT> This assert is fired on BGC thread 'cause we
+ // got timeout.</REVISIT>
+ //_ASSERTE((pCurThread->PreemptiveGCDisabled()) || IsGCSpecialThread());
+ pCurThread->PulseGCMode(); // Go suspend myself.
+ }
+ else
+ {
+ // otherwise, just yield so the debugger can finish what it's doing.
+ __SwitchToThread (0, ++dwSwitchCount);
+ }
+
+ goto retry_for_debugger;
+ }
+ }
+ GC_ON_TRANSITIONS(gcOnTransitions);
+
+ FireEtwGCSuspendEEEnd_V1(GetClrInstanceId());
+
+#ifdef TIME_SUSPEND
+ g_SuspendStatistics.EndSuspend(reason == SUSPEND_FOR_GC || reason == SUSPEND_FOR_GC_PREP);
+#endif //TIME_SUSPEND
+}
+
+#ifdef _DEBUG
+BOOL Debug_IsLockedViaThreadSuspension()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GCHeap::IsGCInProgress() &&
+ (dbgOnly_IsSpecialEEThread() ||
+ IsGCSpecialThread() ||
+ GetThread() == ThreadSuspend::GetSuspensionThread());
+}
+#endif
+
+#if defined(TIME_SUSPEND) || defined(GC_STATS)
+
+DWORD StatisticsBase::secondsToDisplay = 0;
+
+DWORD StatisticsBase::GetTime()
+{
+ LIMITED_METHOD_CONTRACT;
+ LARGE_INTEGER large;
+
+ if (divisor == 0)
+ {
+ if (QueryPerformanceFrequency(&large) && (large.QuadPart != 0))
+ divisor = (DWORD)(large.QuadPart / (1000 * 1000)); // microseconds
+ else
+ divisor = 1;
+ }
+
+ if (QueryPerformanceCounter(&large))
+ return (DWORD) (large.QuadPart / divisor);
+ else
+ return 0;
+}
+
+DWORD StatisticsBase::GetElapsed(DWORD start, DWORD stop)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (stop > start)
+ return stop - start;
+
+ INT64 bigStop = stop;
+ bigStop += 0x100000000ULL;
+ bigStop -= start;
+
+ // The assert below was seen firing in stress, so comment it out for now
+ //_ASSERTE(((INT64)(DWORD)bigStop) == bigStop);
+
+ if (((INT64)(DWORD)bigStop) == bigStop)
+ return (DWORD) bigStop;
+ else
+ return 0;
+}
+
+void StatisticsBase::RollOverIfNeeded()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Our counters are 32 bits and can count to 4 GB in microseconds or 4K in seconds.
+ // Reset when we get close to overflowing
+ const DWORD RolloverInterval = 3900;
+
+ // every so often, print a summary of our statistics
+ DWORD ticksNow = GetTickCount();
+
+ if (secondsToDisplay == 0)
+ {
+ secondsToDisplay = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_StatsUpdatePeriod);
+ if (secondsToDisplay == 0)
+ secondsToDisplay = 1;
+ else if (secondsToDisplay > RolloverInterval)
+ secondsToDisplay = RolloverInterval;
+ }
+
+ if (ticksNow - startTick > secondsToDisplay * 1000)
+ {
+ DisplayAndUpdate();
+
+ startTick = GetTickCount();
+
+ // Our counters are 32 bits and can count to 4 GB in microseconds or 4K in seconds.
+ // Reset when we get close to overflowing
+ if (++cntDisplay >= (int)(RolloverInterval / secondsToDisplay))
+ Initialize();
+ }
+}
+
+#endif // defined(TIME_SUSPEND) || defined(GC_STATS)
+
+
+#ifdef TIME_SUSPEND
+
+// There is a current and a prior copy of the statistics. This allows us to display deltas per reporting
+// interval, as well as running totals. The 'min' and 'max' values require special treatment. They are
+// Reset (zeroed) in the current statistics when we begin a new interval and they are updated via a
+// comparison with the global min/max.
+SuspendStatistics g_SuspendStatistics;
+SuspendStatistics g_LastSuspendStatistics;
+
+WCHAR* SuspendStatistics::logFileName = NULL;
+
+// Called whenever our timers start to overflow
+void SuspendStatistics::Initialize()
+{
+ LIMITED_METHOD_CONTRACT;
+ // for efficiency sake we're taking a dependency on the layout of a C++ object
+ // with a vtable. protect against violations of our premise:
+ static_assert(offsetof(SuspendStatistics, cntDisplay) == sizeof(void*),
+ "The first field of SuspendStatistics follows the pointer sized vtable");
+
+ int podOffs = offsetof(SuspendStatistics, cntDisplay); // offset of the first POD field
+ memset((BYTE*)(&g_SuspendStatistics)+podOffs, 0, sizeof(g_SuspendStatistics)-podOffs);
+ memset((BYTE*)(&g_LastSuspendStatistics)+podOffs, 0, sizeof(g_LastSuspendStatistics)-podOffs);
+}
+
+// Top of SuspendEE
+void SuspendStatistics::StartSuspend()
+{
+ LIMITED_METHOD_CONTRACT;
+ startSuspend = GetTime();
+}
+
+// Bottom of SuspendEE
+void SuspendStatistics::EndSuspend(BOOL bForGC)
+{
+ LIMITED_METHOD_CONTRACT;
+ DWORD time = GetElapsed(startSuspend, GetTime());
+
+ suspend.Accumulate(time);
+ cntSuspends++;
+ // details on suspends...
+ if (!bForGC)
+ cntNonGCSuspends++;
+ if (GCHeap::GetGCHeap()->IsConcurrentGCInProgress())
+ {
+ cntSuspendsInBGC++;
+ if (!bForGC)
+ cntNonGCSuspendsInBGC++;
+ }
+}
+
+// Time spent in the current suspend (for pro-active debugging)
+DWORD SuspendStatistics::CurrentSuspend()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetElapsed(startSuspend, GetTime());
+}
+
+// Top of RestartEE
+void SuspendStatistics::StartRestart()
+{
+ LIMITED_METHOD_CONTRACT;
+ startRestart = GetTime();
+}
+
+// Bottom of RestartEE
+void SuspendStatistics::EndRestart()
+{
+ LIMITED_METHOD_CONTRACT;
+ DWORD timeNow = GetTime();
+
+ restart.Accumulate(GetElapsed(startRestart, timeNow));
+ cntRestarts++;
+
+ paused.Accumulate(SuspendStatistics::GetElapsed(startSuspend, timeNow));
+
+ RollOverIfNeeded();
+}
+
+// Time spent in the current restart
+DWORD SuspendStatistics::CurrentRestart()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetElapsed(startRestart, GetTime());
+}
+
+void SuspendStatistics::DisplayAndUpdate()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // TODO: this fires at times...
+ // _ASSERTE(cntSuspends == cntRestarts);
+
+ if (logFileName == NULL)
+ {
+ logFileName = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_SuspendTimeLog);
+ }
+
+ FILE* logFile;
+
+ if (logFileName != NULL && (logFile = _wfopen((LPCWSTR)logFileName, W("a"))) != NULL)
+ {
+ if (cntDisplay == 0)
+ fprintf(logFile, "\nSUSP **** Initialize *****\n\n");
+
+ fprintf(logFile, "SUSP **** Summary ***** %d\n", cntDisplay);
+
+ paused.DisplayAndUpdate (logFile, "Paused ", &g_LastSuspendStatistics.paused, cntSuspends, g_LastSuspendStatistics.cntSuspends);
+ suspend.DisplayAndUpdate (logFile, "Suspend", &g_LastSuspendStatistics.suspend, cntSuspends, g_LastSuspendStatistics.cntSuspends);
+ restart.DisplayAndUpdate (logFile, "Restart", &g_LastSuspendStatistics.restart, cntRestarts, g_LastSuspendStatistics.cntSuspends);
+ acquireTSL.DisplayAndUpdate(logFile, "LockTSL", &g_LastSuspendStatistics.acquireTSL, cntSuspends, g_LastSuspendStatistics.cntSuspends);
+ releaseTSL.DisplayAndUpdate(logFile, "Unlock ", &g_LastSuspendStatistics.releaseTSL, cntSuspends, g_LastSuspendStatistics.cntSuspends);
+ osSuspend.DisplayAndUpdate (logFile, "OS Susp", &g_LastSuspendStatistics.osSuspend, cntOSSuspendResume, g_LastSuspendStatistics.cntOSSuspendResume);
+ crawl.DisplayAndUpdate (logFile, "Crawl", &g_LastSuspendStatistics.crawl, cntHijackCrawl, g_LastSuspendStatistics.cntHijackCrawl);
+ wait.DisplayAndUpdate (logFile, "Wait", &g_LastSuspendStatistics.wait, cntWaits, g_LastSuspendStatistics.cntWaits);
+
+ fprintf(logFile, "OS Suspend Failures %d (%d), Wait Timeouts %d (%d), Hijack traps %d (%d)\n",
+ cntFailedSuspends - g_LastSuspendStatistics.cntFailedSuspends, cntFailedSuspends,
+ cntWaitTimeouts - g_LastSuspendStatistics.cntWaitTimeouts, cntWaitTimeouts,
+ cntHijackTrap - g_LastSuspendStatistics.cntHijackTrap, cntHijackTrap);
+
+ fprintf(logFile, "Redirected EIP Failures %d (%d), Collided GC/Debugger/ADUnload %d (%d)\n",
+ cntFailedRedirections - g_LastSuspendStatistics.cntFailedRedirections, cntFailedRedirections,
+ cntCollideRetry - g_LastSuspendStatistics.cntCollideRetry, cntCollideRetry);
+
+ fprintf(logFile, "Suspend: All %d (%d). NonGC: %d (%d). InBGC: %d (%d). NonGCInBGC: %d (%d)\n\n",
+ cntSuspends - g_LastSuspendStatistics.cntSuspends, cntSuspends,
+ cntNonGCSuspends - g_LastSuspendStatistics.cntNonGCSuspends, cntNonGCSuspends,
+ cntSuspendsInBGC - g_LastSuspendStatistics.cntSuspendsInBGC, cntSuspendsInBGC,
+ cntNonGCSuspendsInBGC - g_LastSuspendStatistics.cntNonGCSuspendsInBGC, cntNonGCSuspendsInBGC);
+
+ // close the log file...
+ fclose(logFile);
+ }
+
+ memcpy(&g_LastSuspendStatistics, this, sizeof(g_LastSuspendStatistics));
+
+ suspend.Reset();
+ restart.Reset();
+ paused.Reset();
+ acquireTSL.Reset();
+ releaseTSL.Reset();
+ osSuspend.Reset();
+ crawl.Reset();
+ wait.Reset();
+}
+
+#endif // TIME_SUSPEND
+
+#if defined(TIME_SUSPEND) || defined(GC_STATS)
+
+const char* const str_timeUnit[] = { "usec", "msec", "sec" };
+const int timeUnitFactor[] = { 1, 1000, 1000000 };
+
+void MinMaxTot::DisplayAndUpdate(FILE* logFile, __in_z char *pName, MinMaxTot *pLastOne, int fullCount, int priorCount, timeUnit unit /* = usec */)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ int tuf = timeUnitFactor[unit];
+ int delta = fullCount - priorCount;
+
+ fprintf(logFile, "%s %u (%u) times for %u (%u) %s. Min %u (%u), Max %u (%u), Avg %u (%u)\n",
+ pName,
+ delta, fullCount,
+ (totVal - pLastOne->totVal) / tuf, totVal / tuf,
+ str_timeUnit[(int)unit],
+ minVal / tuf, pLastOne->minVal / tuf,
+ maxVal / tuf, pLastOne->maxVal / tuf,
+ (delta == 0 ? 0 : (totVal - pLastOne->totVal) / delta) / tuf,
+ (fullCount == 0 ? 0 : totVal / fullCount) / tuf);
+
+ if (minVal > pLastOne->minVal && pLastOne->minVal != 0)
+ minVal = pLastOne->minVal;
+
+ if (maxVal < pLastOne->maxVal)
+ maxVal = pLastOne->maxVal;
+}
+
+#endif // defined(TIME_SUSPEND) || defined(GC_STATS)
diff --git a/src/vm/threadsuspend.h b/src/vm/threadsuspend.h
new file mode 100644
index 0000000000..572584104c
--- /dev/null
+++ b/src/vm/threadsuspend.h
@@ -0,0 +1,265 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// threadsuspend.h
+
+#ifndef _THREAD_SUSPEND_H_
+#define _THREAD_SUSPEND_H_
+
+#if defined(TIME_SUSPEND) || defined(GC_STATS)
+
+enum timeUnit { usec, msec, sec };
+
+// running aggregations
+struct MinMaxTot
+{
+ DWORD minVal, maxVal, totVal;
+
+ void Accumulate(DWORD time)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (time < minVal || minVal == 0)
+ minVal = time;
+
+ if (time > maxVal)
+ maxVal = time;
+
+ // We are supposed to anticipate overflow and clear our totals
+ // However we still see this assert and for now, let's ignore it...
+ // _ASSERTE(((DWORD) (totVal + time)) > ((DWORD) totVal));
+ if (((DWORD) (totVal + time)) > ((DWORD) totVal))
+ totVal += time;
+ }
+
+ void Reset()
+ {
+ LIMITED_METHOD_CONTRACT;
+ minVal = maxVal = 0;
+ }
+
+ void DisplayAndUpdate(FILE* logFile, __in_z char *pName, MinMaxTot *pLastOne, int fullCount, int priorCount, timeUnit=usec);
+};
+
+// A note about timings. We use QueryPerformanceCounter to measure all timings in units. During
+// Initialization, we compute a divisor to convert those timings into microseconds. This means
+// that we can accumulate about 4,000 seconds (over one hour) of GC time into 32-bit quantities
+// before we must reinitialize.
+
+// A note about performance: derived classes have taken a dependency on cntDisplay being the first
+// field of this class, following the vtable*. When this is violated a compile time assert will fire.
+struct StatisticsBase
+{
+ // display the statistics every so many seconds.
+ static DWORD secondsToDisplay;
+
+ // we must re-initialize after an hour of GC time, to avoid overflow. It's more convenient to
+ // re-initialize after an hour of wall-clock time, instead
+ int cntDisplay;
+
+ // convert all timings into microseconds
+ DWORD divisor;
+ DWORD GetTime();
+ static DWORD GetElapsed(DWORD start, DWORD stop);
+
+ // we want to print statistics every 10 seconds - this is to remember the start of the 10 sec interval.
+ DWORD startTick;
+
+ // derived classes must call this regularly (from a logical "end of a cycle")
+ void RollOverIfNeeded();
+
+ virtual void Initialize() = 0;
+ virtual void DisplayAndUpdate() = 0;
+};
+
+#endif // defined(TIME_SUSPEND) || defined(GC_STATS)
+
+#ifdef TIME_SUSPEND
+
+struct SuspendStatistics
+ : public StatisticsBase
+{
+ static WCHAR* logFileName;
+
+ // number of times we call SuspendEE, RestartEE
+ int cntSuspends, cntRestarts;
+
+ int cntSuspendsInBGC, cntNonGCSuspends, cntNonGCSuspendsInBGC;
+
+ // Times for current suspension & restart
+ DWORD startSuspend, startRestart;
+
+ // min, max and total time spent performing a Suspend, a Restart, or Paused from the start of
+ // a Suspend to the end of a Restart. We can compute 'avg' using 'cnt' and 'tot' values.
+ MinMaxTot suspend, restart, paused;
+
+ // We know there can be contention on acquiring the ThreadStoreLock, or yield points when hosted (like
+ // BeginThreadAffinity on the leading edge and EndThreadAffinity on the trailing edge).
+ MinMaxTot acquireTSL, releaseTSL;
+
+ // And if we OS suspend a thread that is blocking or perhaps throwing an exception and is therefore
+ // stuck in the kernel, it could take approximately a second. So track the time taken for OS
+ // suspends
+ MinMaxTot osSuspend;
+
+ // And if we place a hijack, we need to crawl a stack to do so.
+ MinMaxTot crawl;
+
+ // And waiting can be a significant part of the total suspension time.
+ MinMaxTot wait;
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////
+ // There are some interesting events that are worth counting, because they show where the time is going:
+
+ // number of times we waited on g_pGCSuspendEvent while trying to suspend the EE
+ int cntWaits;
+
+ // and the number of times those Waits timed out rather than being signalled by a cooperating thread
+ int cntWaitTimeouts;
+
+ // number of times we did an OS (or hosted) suspend or resume on a thread
+ int cntOSSuspendResume;
+
+ // number of times we crawled a stack for a hijack
+ int cntHijackCrawl;
+
+ // and the number of times the hijack actually trapped a thread for us
+ int cntHijackTrap;
+
+ // the number of times we redirected a thread in fully interruptible code, by rewriting its EIP
+ // so it will throw to a blocking point
+ int cntRedirections;
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////
+ // And there are some "failure" cases that should never or almost never occur.
+
+ // number of times we have a collision between e.g. Debugger suspension & GC suspension.
+ // In these cases, everyone yields to the GC but at some cost.
+ int cntCollideRetry;
+
+ // number of times the OS or Host was unable to ::SuspendThread a thread for us. This count should be
+ // approximately 0.
+ int cntFailedSuspends;
+
+ // number of times we were unable to redirect a thread by rewriting its register state in a
+ // suspended context. This count should be approximately 0.
+ int cntFailedRedirections;
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////
+ // Internal mechanism:
+
+ virtual void Initialize();
+ virtual void DisplayAndUpdate();
+
+ // Public API
+
+ void StartSuspend();
+ void EndSuspend(BOOL bForGC);
+ DWORD CurrentSuspend();
+
+ void StartRestart();
+ void EndRestart();
+ DWORD CurrentRestart();
+};
+
+extern SuspendStatistics g_SuspendStatistics;
+extern SuspendStatistics g_LastSuspendStatistics;
+
+#endif // TIME_SUSPEND
+
+BOOL EEGetThreadContext(Thread *pThread, CONTEXT *pContext);
+BOOL EnsureThreadIsSuspended(HANDLE hThread, Thread* pThread);
+
+class ThreadSuspend
+{
+ friend class Thread;
+ friend class ThreadStore;
+
+public:
+ typedef enum
+ {
+ SUSPEND_OTHER = 0,
+ SUSPEND_FOR_GC = 1,
+ SUSPEND_FOR_APPDOMAIN_SHUTDOWN = 2,
+ SUSPEND_FOR_REJIT = 3,
+ SUSPEND_FOR_SHUTDOWN = 4,
+ SUSPEND_FOR_DEBUGGER = 5,
+ SUSPEND_FOR_GC_PREP = 6,
+ SUSPEND_FOR_DEBUGGER_SWEEP = 7 // This must only be used in Thread::SysSweepThreadsForDebug
+ } SUSPEND_REASON;
+
+private:
+ static SUSPEND_REASON m_suspendReason; // This contains the reason
+ // that the runtime was suspended
+ static Thread* m_pThreadAttemptingSuspendForGC;
+
+public:
+ static HRESULT SuspendRuntime(ThreadSuspend::SUSPEND_REASON reason);
+ static void ResumeRuntime(BOOL bFinishedGC, BOOL SuspendSucceded);
+
+private:
+ static CLREvent * g_pGCSuspendEvent;
+
+ // This is true iff we're currently in the process of suspending threads. Once the
+ // threads have been suspended, this is false. This is set via an instance of
+ // SuspendRuntimeInProgressHolder placed in SuspendRuntime, SysStartSuspendForDebug,
+ // and SysSweepThreadsForDebug. Code outside Thread reads this via
+ // Thread::SysIsSuspendInProgress.
+ //
+ // *** THERE IS NO SYNCHRONIZATION AROUND SETTING OR READING THIS ***
+ // This value is only useful for code that can be more efficient if it has a good guess
+ // as to whether we're suspending the runtime. This is NOT to be used by code that
+ // *requires* this knowledge with 100% accuracy in order to behave correctly, unless
+ // you add synchronization yourself. An example of where Thread::SysIsSuspendInProgress
+ // is used is by the profiler API, in ProfToEEInterfaceImpl::DoStackSnapshot. The profiler
+ // API needs to suspend the target thread whose stack it is about to walk. But the profiler
+ // API should avoid this if the runtime is being suspended. Otherwise, the thread trying to
+ // suspend the runtime (thread A) might get stuck when it tries to suspend the thread
+ // executing ProfToEEInterfaceImpl::DoStackSnapshot (thread B), since thread B will be
+ // busy trying to suspend the target of the stack walk (thread C). Bad luck with timing
+ // could cause A to try to suspend B over and over again while B is busy suspending C, and
+ // then suspending D, etc., assuming the profiler does a lot of stack walks. This, in turn,
+ // could cause the deadlock detection assert in Thread::SuspendThread to fire. So the
+ // moral here is that, if B realizes the runtime is being suspended, it can just fail the stackwalk
+ // immediately without trying to do the suspend. But if B occasionally gets false positives or
+ // false negatives from calling Thread::SysIsSuspendInProgress, the worst is we might
+ // delay the EE suspension a little bit, or we might too eagerly fail from ProfToEEInterfaceImpl::DoStackSnapshot.
+ // But there won't be any corruption or AV. More details on the profiler API scenario in VsWhidbey bug 454936.
+ static bool s_fSuspendRuntimeInProgress;
+
+ static void SetSuspendRuntimeInProgress();
+ static void ResetSuspendRuntimeInProgress();
+
+ typedef StateHolder<ThreadSuspend::SetSuspendRuntimeInProgress, ThreadSuspend::ResetSuspendRuntimeInProgress> SuspendRuntimeInProgressHolder;
+
+public:
+ static bool SysIsSuspendInProgress() { return s_fSuspendRuntimeInProgress; }
+
+public:
+ //suspend all threads
+ static void SuspendEE(SUSPEND_REASON reason);
+ static void RestartEE(BOOL bFinishedGC, BOOL SuspendSucceded); //resume threads.
+
+ static void LockThreadStore(ThreadSuspend::SUSPEND_REASON reason);
+ static void UnlockThreadStore(BOOL bThreadDestroyed = FALSE,
+ ThreadSuspend::SUSPEND_REASON reason = ThreadSuspend::SUSPEND_OTHER);
+
+ static Thread * GetSuspensionThread()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return g_pSuspensionThread;
+ }
+
+private:
+ // This is used to avoid thread starvation if non-GC threads are competing for
+ // the thread store lock when there is a real GC-thread waiting to get in.
+ // This is initialized lazily when the first non-GC thread backs out because of
+ // a waiting GC thread. The s_hAbortEvtCache is used to store the handle when
+ // it is not being used.
+ static CLREventBase *s_hAbortEvt;
+ static CLREventBase *s_hAbortEvtCache;
+
+ static LONG m_DebugWillSyncCount;
+};
+
+#endif // _THREAD_SUSPEND_H_
diff --git a/src/vm/tlbexport.cpp b/src/vm/tlbexport.cpp
new file mode 100644
index 0000000000..cb21fac29a
--- /dev/null
+++ b/src/vm/tlbexport.cpp
@@ -0,0 +1,6342 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//===========================================================================
+// File: TlbExport.CPP
+//
+
+//
+// Notes: Create a TypeLib from COM+ metadata.
+//---------------------------------------------------------------------------
+
+#include "common.h"
+
+#include "comcallablewrapper.h"
+#include "field.h"
+#include "dllimport.h"
+#include "fieldmarshaler.h"
+#include "eeconfig.h"
+#include "comdelegate.h"
+#include <nsutilpriv.h>
+#include <tlbimpexp.h>
+#include <mlang.h>
+#include "tlbexport.h"
+#include "commtmemberinfomap.h"
+#include <corerror.h>
+#include "posterror.h"
+#include "typeparse.h"
+
+#if defined(VALUE_MASK)
+#undef VALUE_MASK
+#endif
+
+#include <guidfromname.h>
+#include <stgpool.h>
+#include <siginfo.hpp>
+#include <typestring.h>
+#include "perfcounters.h"
+#include "comtypelibconverter.h"
+#include "caparser.h"
+
+// Define to export an empty dispinterface for an AutoDispatch IClassX
+#define EMPTY_DISPINTERFACE_ICLASSX
+#ifndef S_USEIUNKNOWN
+#define S_USEIUNKNOWN (HRESULT)2
+#endif
+
+#if defined(_DEBUG) && defined(_TRACE)
+#define TRACE printf
+#else
+#define TRACE NullFn
+inline void NullFn(const char *pf,...) {}
+#endif
+
+#if defined(_DEBUG)
+#define IfFailReport(expr) \
+ do { if(FAILED(hr = (expr))) { DebBreakHr(hr); ReportError(hr); } } while (0)
+#else
+#define IfFailReport(expr) \
+ do { if(FAILED(hr = (expr))) { ReportError(hr); } } while (0)
+#endif
+
+//-----------------------------------------------------------------------------
+//-----------------------------------------------------------------------------
+// This value determines whether, by default, we add the TYPEFLAG_FPROXY bit
+// to exported interfaces. If the value is true, Automation proxy is the
+// default, and we do not set the bit. If the value is false, no Automation
+// proxy is the default and we DO set the bit.
+#define DEFAULT_AUTOMATION_PROXY_VALUE TRUE
+//-----------------------------------------------------------------------------
+
+//*****************************************************************************
+// Constants.
+//*****************************************************************************
+static const WCHAR szRetVal[] = W("pRetVal");
+static const WCHAR szTypeLibExt[] = W(".TLB");
+
+static const WCHAR szTypeLibKeyName[] = W("TypeLib");
+static const WCHAR szClsidKeyName[] = W("CLSID");
+
+static const WCHAR szIClassX[] = W("_%ls");
+static const int cbIClassX = 1;
+static const WCHAR cIClassX = W('_');
+
+static const WCHAR szAlias[] = W("_MIDL_COMPAT_%ls");
+static const int cbAlias = lengthof(szAlias) - 1;
+static const WCHAR szParamName[] = W("p%d");
+
+static const WCHAR szGuidName[] = W("GUID");
+
+static const CHAR szObjectClass[] = "Object";
+static const CHAR szArrayClass[] = "Array";
+static const CHAR szDateTimeClass[] = "DateTime";
+static const CHAR szDecimalClass[] = "Decimal";
+static const CHAR szGuidClass[] = "Guid";
+static const CHAR szStringClass[] = g_StringName;
+static const CHAR szStringBufferClass[] = g_StringBufferName;
+static const CHAR szIEnumeratorClass[] = "IEnumerator";
+static const CHAR szColor[] = "Color";
+
+static const char szRuntime[] = {"System."};
+static const size_t cbRuntime = (lengthof(szRuntime)-1);
+
+static const char szText[] = {"System.Text."};
+static const size_t cbText = (lengthof(szText)-1);
+
+static const char szCollections[] = {"System.Collections."};
+static const size_t cbCollections = (lengthof(szCollections)-1);
+
+static const char szDrawing[] = {"System.Drawing."};
+static const size_t cbDrawing = (lengthof(szDrawing)-1);
+
+// The length of the following string(w/o the terminator): "HKEY_CLASSES_ROOT\\CLSID\\{00000000-0000-0000-0000-000000000000}".
+static const int cCOMCLSIDRegKeyLength = 62;
+
+// The length of the following string(w/o the terminator): "{00000000-0000-0000-0000-000000000000}".
+static const int cCLSIDStrLength = 38;
+
+// {17093CC8-9BD2-11cf-AA4F-304BF89C0001}
+static const GUID GUID_TRANS_SUPPORTED = {0x17093CC8,0x9BD2,0x11cf,{0xAA,0x4F,0x30,0x4B,0xF8,0x9C,0x00,0x01}};
+
+// {00020430-0000-0000-C000-000000000046}
+static const GUID LIBID_STDOLE2 = { 0x00020430, 0x0000, 0x0000, { 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46 } };
+
+// {66504301-BE0F-101A-8BBB-00AA00300CAB}
+static const GUID GUID_OleColor = { 0x66504301, 0xBE0F, 0x101A, { 0x8B, 0xBB, 0x00, 0xAA, 0x00, 0x30, 0x0C, 0xAB } };
+
+// LIBID mscoree
+static const GUID LIBID_MSCOREE = {0x5477469e,0x83b1,0x11d2,{0x8b,0x49,0x00,0xa0,0xc9,0xb7,0xc9,0xc4}};
+
+static const char XXX_DESCRIPTION_TYPE[] = {"System.ComponentModel.DescriptionAttribute"};
+static const char XXX_ASSEMBLY_DESCRIPTION_TYPE[] = {"System.Reflection.AssemblyDescriptionAttribute"};
+
+//*****************************************************************************
+// Table to map COM+ calling conventions to TypeLib calling conventions.
+//*****************************************************************************
+CALLCONV Clr2TlbCallConv[] =
+{
+ CC_STDCALL, // IMAGE_CEE_CS_CALLCONV_DEFAULT = 0x0,
+ CC_CDECL, // IMAGE_CEE_CS_CALLCONV_C = 0x1,
+ CC_STDCALL, // IMAGE_CEE_CS_CALLCONV_STDCALL = 0x2,
+ CC_STDCALL, // IMAGE_CEE_CS_CALLCONV_THISCALL = 0x3,
+ CC_FASTCALL, // IMAGE_CEE_CS_CALLCONV_FASTCALL = 0x4,
+ CC_CDECL, // IMAGE_CEE_CS_CALLCONV_VARARG = 0x5,
+ CC_MAX // IMAGE_CEE_CS_CALLCONV_FIELD = 0x6,
+ // IMAGE_CEE_CS_CALLCONV_MAX = 0x7
+};
+
+
+
+// Forward declarations.
+extern HRESULT _FillVariant(MDDefaultValue *pMDDefaultValue, VARIANT *pvar);
+extern HRESULT _FillMDDefaultValue(BYTE bType, void const *pValue, MDDefaultValue *pMDDefaultValue);
+
+//*****************************************************************************
+// Stolen from classlib.
+//*****************************************************************************
+double _TicksToDoubleDate(const __int64 ticks)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ const INT64 MillisPerSecond = 1000;
+ const INT64 MillisPerDay = MillisPerSecond * 60 * 60 * 24;
+ const INT64 TicksPerMillisecond = 10000;
+ const INT64 TicksPerSecond = TicksPerMillisecond * 1000;
+ const INT64 TicksPerMinute = TicksPerSecond * 60;
+ const INT64 TicksPerHour = TicksPerMinute * 60;
+ const INT64 TicksPerDay = TicksPerHour * 24;
+ const int DaysPer4Years = 365 * 4 + 1;
+ const int DaysPer100Years = DaysPer4Years * 25 - 1;
+ const int DaysPer400Years = DaysPer100Years * 4 + 1;
+ const int DaysTo1899 = DaysPer400Years * 4 + DaysPer100Years * 3 - 367;
+ const INT64 DoubleDateOffset = DaysTo1899 * TicksPerDay;
+ const int DaysTo10000 = DaysPer400Years * 25 - 366;
+ const INT64 MaxMillis = DaysTo10000 * MillisPerDay;
+ const int DaysPerYear = 365; // non-leap year
+ const INT64 OADateMinAsTicks = (DaysPer100Years - DaysPerYear) * TicksPerDay;
+
+ // Returns OleAut's zero'ed date ticks.
+ if (ticks == 0)
+ return 0.0;
+
+ if (ticks < OADateMinAsTicks)
+ return 0.0;
+
+ // Currently, our max date == OA's max date (12/31/9999), so we don't
+ // need an overflow check in that direction.
+ __int64 millis = (ticks - DoubleDateOffset) / TicksPerMillisecond;
+ if (millis < 0)
+ {
+ __int64 frac = millis % MillisPerDay;
+ if (frac != 0) millis -= (MillisPerDay + frac) * 2;
+ }
+
+ return (double)millis / MillisPerDay;
+} // double _TicksToDoubleDate()
+
+
+//*****************************************************************************
+// Get the name of a typelib or typeinfo, add it to error text.
+//*****************************************************************************
+void PostTypeLibError(
+ IUnknown *pUnk, // An interface on the typeinfo.
+ HRESULT hrT, // The TypeInfo error.
+ HRESULT hrX) // The Exporter error.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pUnk));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr; // A result.
+ WCHAR rcErr[1024]; // Buffer for error message.
+
+ SafeComHolder<ITypeInfo> pITI=0; // The ITypeInfo * on the typeinfo.
+ SafeComHolder<ITypeLib> pITLB=0; // The ITypeLib *.
+ BSTRHolder name=0; // The name of the TypeInfo.
+
+ // Try to get a name.
+ hr = SafeQueryInterface(pUnk, IID_ITypeInfo, (IUnknown**)&pITI);
+ if (SUCCEEDED(hr))
+ {
+ IfFailThrow(pITI->GetDocumentation(MEMBERID_NIL, &name, 0,0,0));
+ }
+ else
+ {
+ hr = SafeQueryInterface(pUnk, IID_ITypeLib, (IUnknown**)&pITLB);
+ if (SUCCEEDED(hr))
+ IfFailThrow(pITLB->GetDocumentation(MEMBERID_NIL, &name, 0,0,0));
+ }
+
+ if (name == NULL)
+ {
+ name = SysAllocString(W("???"));
+ if (name == NULL)
+ COMPlusThrowHR(E_OUTOFMEMORY);
+ }
+
+ // Format the typelib error.
+ FormatRuntimeError(rcErr, lengthof(rcErr), hrT);
+
+ SString strHRHex;
+ strHRHex.Printf("%.8x", hrX);
+
+ COMPlusThrowHR(hrX, hrX, strHRHex, name, rcErr);
+} // void PostTypeLibError()
+
+
+
+
+void ExportTypeLibFromLoadedAssembly(
+ Assembly *pAssembly, // The assembly.
+ LPCWSTR szTlb, // The typelib name.
+ ITypeLib **ppTlb, // If not null, also return ITypeLib here.
+ ITypeLibExporterNotifySink *pINotify,// Notification callback.
+ int flags) // Export flags.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pAssembly));
+ PRECONDITION(CheckPointer(szTlb, NULL_OK));
+ PRECONDITION(CheckPointer(ppTlb));
+ PRECONDITION(CheckPointer(pINotify, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ TypeLibExporter exporter; // Exporter object.
+ LPCWSTR szModule=0; // Module filename.
+ WCHAR rcDrive[_MAX_DRIVE];
+ WCHAR rcDir[_MAX_DIR];
+ WCHAR rcFile[_MAX_FNAME];
+ WCHAR rcTlb[_MAX_PATH+5]; // Buffer for the tlb filename.
+ int bDynamic=0; // If true, dynamic module.
+ Module *pModule; // The Assembly's SecurityModule.
+
+ pModule = pAssembly->GetManifestModule();
+ _ASSERTE(pModule);
+
+ // Retrieve the module filename.
+ szModule = pModule->GetPath();
+ PREFIX_ASSUME(szModule != NULL);
+
+ // Make sure the assembly has not been imported from COM.
+ if (pAssembly->IsImportedFromTypeLib())
+ COMPlusThrowHR(TLBX_E_CIRCULAR_EXPORT, (UINT)TLBX_E_CIRCULAR_EXPORT, W(""), szModule, NULL);
+
+ // If the module is dynamic then it will not have a file name. We
+ // assign a dummy name for typelib name (if the scope does not have
+ // a name), but won't create a typelib on disk.
+ if (*szModule == 0)
+ {
+ bDynamic = TRUE;
+ szModule = W("Dynamic");
+ }
+
+ // Create the typelib name, if none provided. Don't create one for Dynamic modules.
+ if (!szTlb || !*szTlb)
+ {
+ if (bDynamic)
+ szTlb = W("");
+ else
+ {
+ SplitPath(szModule, rcDrive, _MAX_DRIVE, rcDir, _MAX_DIR, rcFile, _MAX_FNAME, 0, 0);
+ MakePath(rcTlb, rcDrive, rcDir, rcFile, szTypeLibExt);
+ szTlb = rcTlb;
+ }
+ }
+
+ // Do the conversion.
+ exporter.Convert(pAssembly, szTlb, pINotify, flags);
+
+ // Get a copy of the ITypeLib*
+ IfFailThrow(exporter.GetTypeLib(IID_ITypeLib, (IUnknown**)ppTlb));
+} // void ExportTypeLibFromLoadedAssemblyInternal()
+
+
+HRESULT ExportTypeLibFromLoadedAssemblyNoThrow(
+ Assembly *pAssembly, // The assembly.
+ LPCWSTR szTlb, // The typelib name.
+ ITypeLib **ppTlb, // If not null, also return ITypeLib here.
+ ITypeLibExporterNotifySink *pINotify,// Notification callback.
+ int flags) // Export flags.
+{
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ ExportTypeLibFromLoadedAssembly(pAssembly,
+ szTlb,
+ ppTlb,
+ pINotify,
+ flags);
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+}
+
+//*****************************************************************************
+// Default notification class.
+//*****************************************************************************
+class CDefaultNotify : public ITypeLibExporterNotifySink
+{
+public:
+ virtual HRESULT __stdcall ReportEvent(
+ ImporterEventKind EventKind, // Type of event.
+ long EventCode, // HR of event.
+ BSTR EventMsg) // Text message for event.
+ {
+ LIMITED_METHOD_CONTRACT;
+ return S_OK;
+ } // virtual HRESULT __stdcall ReportEvent()
+
+ //-------------------------------------------------------------------------
+ virtual HRESULT __stdcall ResolveRef(
+ IUnknown *Asm,
+ IUnknown **pRetVal)
+ {
+ CONTRACTL
+ {
+ DISABLED(NOTHROW);
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(Asm));
+ PRECONDITION(CheckPointer(pRetVal));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK; // A result.
+ Assembly *pAssembly=0; // The referenced Assembly.
+ ITypeLib *pTLB=0; // The created TypeLib.
+ MethodTable *pAssemblyClass = NULL; //@todo -- get this.
+ LPVOID RetObj = NULL; // The object to return.
+
+ BEGIN_EXTERNAL_ENTRYPOINT(&hr)
+ {
+ {
+ GCX_COOP_THREAD_EXISTS(GET_THREAD());
+ // Get the Referenced Assembly from the IUnknown.
+ ASSEMBLYREF asmRef = NULL;
+ GCPROTECT_BEGIN(asmRef);
+ GetObjectRefFromComIP((OBJECTREF*)&asmRef, Asm, pAssemblyClass);
+ pAssembly = asmRef->GetAssembly();
+ GCPROTECT_END();
+ }
+
+ // Default resolution provides no notification, flags are 0.
+ ExportTypeLibFromLoadedAssembly(pAssembly, 0, &pTLB, 0 /*pINotify*/, 0 /* flags*/);
+ }
+ END_EXTERNAL_ENTRYPOINT;
+
+ *pRetVal = pTLB;
+
+ return hr;
+ } // virtual HRESULT __stdcall ResolveRef()
+
+ //-------------------------------------------------------------------------
+ virtual HRESULT STDMETHODCALLTYPE QueryInterface(// S_OK or E_NOINTERFACE
+ REFIID riid, // Desired interface.
+ void **ppvObject) // Put interface pointer here.
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(ppvObject));
+ }
+ CONTRACTL_END;
+
+ *ppvObject = 0;
+ if (riid == IID_IUnknown || riid == IID_ITypeLibExporterNotifySink)
+ {
+ *ppvObject = this;
+ return S_OK;
+ }
+ return E_NOINTERFACE;
+ } // virtual HRESULT QueryInterface()
+
+ //-------------------------------------------------------------------------
+ virtual ULONG STDMETHODCALLTYPE AddRef(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+ } // virtual ULONG STDMETHODCALLTYPE AddRef()
+
+ //-------------------------------------------------------------------------
+ virtual ULONG STDMETHODCALLTYPE Release(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return 1;
+ } // virtual ULONG STDMETHODCALLTYPE Release()
+};
+
+static CDefaultNotify g_Notify;
+
+//*****************************************************************************
+// CTOR/DTOR.
+//*****************************************************************************
+TypeLibExporter::TypeLibExporter()
+ : m_pICreateTLB(0),
+ m_pIUnknown(0),
+ m_pIDispatch(0),
+ m_pGuid(0),
+ m_hIUnknown(-1)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#if defined(_DEBUG)
+ static int i;
+ ++i; // So a breakpoint can be set.
+#endif
+} // TypeLibExporter::TypeLibExporter()
+
+TypeLibExporter::~TypeLibExporter()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ ReleaseResources();
+} // TypeLibExporter::~TypeLibExporter()
+
+//*****************************************************************************
+// Get an interface pointer from the ICreateTypeLib interface.
+//*****************************************************************************
+HRESULT TypeLibExporter::GetTypeLib(
+ REFGUID iid,
+ IUnknown **ppITypeLib)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(ppITypeLib));
+ }
+ CONTRACTL_END;
+
+ return SafeQueryInterface(m_pICreateTLB, iid, (IUnknown**)ppITypeLib);
+} // HRESULT TypeLibExporter::GetTypeLib()
+
+//*****************************************************************************
+// LayOut a TypeLib. Call LayOut on all ICreateTypeInfo2s first.
+//*****************************************************************************
+void TypeLibExporter::LayOut() // S_OK or error.
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK; // A result.
+ int cTypes; // Count of exported types.
+ int ix; // Loop control.
+ CExportedTypesInfo *pData; // For iterating the entries.
+
+ cTypes = m_Exports.Count();
+
+ // Call LayOut on all ICreateTypeInfo2*s.
+ for (ix=0; ix<cTypes; ++ix)
+ {
+ pData = m_Exports[ix];
+ if (pData->pCTI && FAILED(hr = pData->pCTI->LayOut()))
+ PostTypeLibError(pData->pCTI, hr, TLBX_E_LAYOUT_ERROR);
+ }
+
+ for (ix=0; ix<cTypes; ++ix)
+ {
+ pData = m_Exports[ix];
+ if (pData->pCTIClassItf && FAILED(hr = pData->pCTIClassItf->LayOut()))
+ PostTypeLibError(pData->pCTIClassItf, hr, TLBX_E_LAYOUT_ERROR);
+ }
+
+ // Repeat for injected types.
+ cTypes = m_InjectedExports.Count();
+ for (ix=0; ix<cTypes; ++ix)
+ {
+ pData = m_InjectedExports[ix];
+ if (pData->pCTI && FAILED(hr = pData->pCTI->LayOut()))
+ PostTypeLibError(pData->pCTI, hr, TLBX_E_LAYOUT_ERROR);
+ }
+
+ for (ix=0; ix<cTypes; ++ix)
+ {
+ pData = m_InjectedExports[ix];
+ if (pData->pCTIClassItf && FAILED(hr = pData->pCTIClassItf->LayOut()))
+ PostTypeLibError(pData->pCTIClassItf, hr, TLBX_E_LAYOUT_ERROR);
+ }
+} // HRESULT TypeLibExporter::LayOut()
+
+//*****************************************************************************
+// Release all pointers.
+//*****************************************************************************
+void TypeLibExporter::ReleaseResources()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Release the ITypeInfo* pointers.
+ m_Exports.Clear();
+ m_InjectedExports.Clear();
+
+ // Clean up the created TLB.
+ SafeRelease(m_pICreateTLB);
+ m_pICreateTLB = 0;
+
+ // Clean up the ITypeInfo*s for well-known interfaces.
+ SafeRelease(m_pIUnknown);
+ m_pIUnknown = 0;
+
+ SafeRelease(m_pIDispatch);
+ m_pIDispatch = 0;
+
+ SafeRelease(m_pGuid);
+ m_pGuid = 0;
+} // void TypeLibExporter::ReleaseResources()
+
+//*****************************************************************************
+// Enumerate the Types in a Module, add to the list.
+//*****************************************************************************
+void TypeLibExporter::AddModuleTypes(
+ Module *pModule) // The module to convert.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pModule));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ ULONG cTD; // Count of typedefs.
+ mdTypeDef td; // A TypeDef.
+ MethodTable *pClass; // A MethodTable for a TypeDef.
+ ULONG ix; // Loop control.
+ CExportedTypesInfo *pExported; // For adding classes to the exported types cache.
+ CExportedTypesInfo sExported; // For adding classes to the exported types cache.
+
+
+ // Convert all the types visible to COM.
+ // Get an enumerator on TypeDefs in the scope.
+ HENUMInternalHolder eTD(pModule->GetMDImport());
+ eTD.EnumTypeDefInit();
+ cTD = pModule->GetMDImport()->EnumTypeDefGetCount(&eTD);
+
+ // Add all the classes to the hash.
+ for (ix=0; ix<cTD; ++ix)
+ {
+ ZeroHolder zhType = &m_ErrorContext.m_pScope; // Clear error reporting info.
+
+ // Get the TypeDef.
+ if (!pModule->GetMDImport()->EnumTypeDefNext(&eTD, &td))
+ IfFailReport(E_UNEXPECTED);
+
+ IMDInternalImport* pInternalImport = pModule->GetMDImport();
+
+ // Error reporting info.
+ m_ErrorContext.m_tkType = td;
+ m_ErrorContext.m_pScope = pModule->GetMDImport();
+
+ // Get the class, perform the step.
+ pClass = LoadClass(pModule, td);
+
+ // Enumerate the formal type parameters
+ HENUMInternal hEnumGenericPars;
+ hr = pInternalImport->EnumInit(mdtGenericParam, td, &hEnumGenericPars);
+ if (SUCCEEDED(hr))
+ {
+ DWORD numGenericArgs = pInternalImport->EnumGetCount(&hEnumGenericPars);
+ // skip generic classes
+ if( numGenericArgs > 0 )
+ {
+ // We'll only warn if the type is marked ComVisible.
+ if (SpecialIsGenericTypeVisibleFromCom(TypeHandle(pClass)))
+ ReportWarning(TLBX_I_GENERIC_TYPE, TLBX_I_GENERIC_TYPE);
+
+ continue;
+ }
+ }
+
+ // If the flag to not ignore non COM visible types in name decoration is set, then
+ // add the ComVisible(false) types to our list of exported types by skipping this check.
+ if ((m_flags & TlbExporter_OldNames) == 0)
+ {
+ // If the type isn't visible from COM, don't add it to the list of exports.
+ if (!IsTypeVisibleFromCom(TypeHandle(pClass)))
+ continue;
+ }
+
+ // See if this class is already in the list.
+ sExported.pClass = pClass;
+ pExported = m_Exports.Find(&sExported);
+ if (pExported != 0)
+ continue;
+
+ // New class, add to list.
+ pExported = m_Exports.Add(&sExported);
+ if (!pExported)
+ IfFailReport(E_OUTOFMEMORY);
+
+ // Prefix can't tell that IfFailReport will actually throw an exception if pExported is NULL so
+ // let's tell it explicitly that if we reach this point pExported will not be NULL.
+ PREFIX_ASSUME(pExported != NULL);
+ pExported->pClass = pClass;
+ pExported->pCTI = 0;
+ pExported->pCTIClassItf = 0;
+ }
+} // HRESULT TypeLibExporter::AddModuleTypes()
+
+//*****************************************************************************
+// Enumerate the Modules in an assembly, add the types to the list.
+//*****************************************************************************
+void TypeLibExporter::AddAssemblyTypes(
+ Assembly *pAssembly) // The assembly to convert.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pAssembly));
+ }
+ CONTRACTL_END;
+
+ Module *pManifestModule; // A module in the assembly.
+ mdFile mf; // A file token.
+
+ if (pAssembly->GetManifestImport())
+ {
+ // Enumerator over the modules of the assembly.
+ HENUMInternalHolder phEnum(pAssembly->GetManifestImport());
+ phEnum.EnumInit(mdtFile, mdTokenNil);
+
+ // Get the module for the assembly.
+ pManifestModule = pAssembly->GetManifestModule();
+ AddModuleTypes(pManifestModule);
+
+ while (pAssembly->GetManifestImport()->EnumNext(&phEnum, &mf))
+ {
+ DomainFile *pDomainFile = pAssembly->GetManifestModule()->LoadModule(GetAppDomain(), mf, FALSE);
+
+ if (pDomainFile != NULL && !pDomainFile->GetFile()->IsResource())
+ AddModuleTypes(pDomainFile->GetModule());
+ }
+ }
+} // HRESULT TypeLibExporter::AddAssemblyTypes()
+
+//*****************************************************************************
+// Convert COM+ metadata to a typelib.
+//*****************************************************************************
+void TypeLibExporter::Convert(
+ Assembly *pAssembly, // The Assembly to convert
+ LPCWSTR szTlbName, // Name of resulting TLB
+ ITypeLibExporterNotifySink *pNotify,// Notification callback.
+ int flags) // Conversion flags
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pAssembly));
+ PRECONDITION(CheckPointer(szTlbName));
+ PRECONDITION(CheckPointer(pNotify, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK; // A result.
+ ULONG i; // Loop control.
+ SString sName; // Library name.
+ GUID guid; // Library guid.
+ VARIANT vt = {0}; // Variant for ExportedFromComPlus.
+ CQuickArray<WCHAR> qLocale; // Wide string for locale.
+ LCID lcid; // LCID for typelib, default 0.
+
+ // Set PerfCounters
+ COUNTER_ONLY(GetPerfCounters().m_Interop.cTLBExports++);
+
+ SafeComHolder<IMultiLanguage> pIML=0; // For locale->lcid conversion.
+ SafeComHolder<ITypeLib> pITLB=0; // TypeLib for IUnknown, IDispatch.
+ BSTRHolder szTIName=0; // Name of a TypeInfo.
+ BSTRHolder szDescription=0; // Assembly Description.
+
+ // Error reporting information.
+ m_ErrorContext.m_szAssembly = pAssembly->GetSimpleName();
+
+ m_flags = flags;
+
+ // Set the callback.
+ m_pNotify = pNotify ? pNotify : &g_Notify;
+
+ // If we haven't set 32-bit or 64-bit export yet, set it now with defaults.
+ UpdateBitness(pAssembly);
+
+ // Check the bitness of the assembly against our output bitness
+ IfFailReport(CheckBitness(pAssembly));
+
+ // Get some well known TypeInfos.
+ GCX_PREEMP();
+
+ BSTR wzPath;// = SysAllocStringLen(NULL, _MAX_PATH);
+ IfFailReport(QueryPathOfRegTypeLib(LIBID_STDOLE2, -1, -1, 0, &wzPath));
+
+ if (IsExportingAs64Bit())
+ {
+ hr = LoadTypeLibEx(wzPath, (REGKIND)(REGKIND_NONE | LOAD_TLB_AS_64BIT), &pITLB);
+ }
+ else
+ {
+ hr = LoadTypeLibEx(wzPath, (REGKIND)(REGKIND_NONE | LOAD_TLB_AS_32BIT), &pITLB);
+ }
+
+ // If we failed to load StdOle2.tlb, then we're probably on a downlevel platform (< XP)
+ // so we'll just load whatever we have...note that at this point, cross-compile is not an option.
+ if (FAILED(hr))
+ {
+ IfFailReport(LoadRegTypeLib(LIBID_STDOLE2, -1, -1, 0, &pITLB));
+ }
+
+ IfFailReport(pITLB->GetTypeInfoOfGuid(IID_IUnknown, &m_pIUnknown));
+ IfFailReport(pITLB->GetTypeInfoOfGuid(IID_IDispatch, &m_pIDispatch));
+
+ // Look for GUID (which unfortunately has no GUID).
+ for (i=0; i<pITLB->GetTypeInfoCount() && !m_pGuid; ++i)
+ {
+ IfFailReport(pITLB->GetDocumentation(i, &szTIName, 0, 0, 0));
+ if (SString::_wcsicmp(szTIName, szGuidName) == 0)
+ IfFailReport(pITLB->GetTypeInfo(i, &m_pGuid));
+ }
+
+ // Create the output typelib.
+
+ // Win2K: passing in too long a filename triggers a nasty buffer overrun bug
+ // when the SaveAll() method is called. We'll avoid triggering this here.
+ //
+ if (wcslen(szTlbName) > MAX_PATH)
+ IfFailReport(HRESULT_FROM_WIN32(ERROR_FILENAME_EXCED_RANGE));
+
+ // Reverting to old behavior here until we can fix up the vtable offsets as well.
+ // Set the SYSKIND based on the 64bit/32bit switches
+ if (IsExportingAs64Bit())
+ {
+ IfFailReport(CreateTypeLib2(SYS_WIN64, szTlbName, &m_pICreateTLB));
+ }
+ else
+ {
+ IfFailReport(CreateTypeLib2(SYS_WIN32, szTlbName, &m_pICreateTLB));
+ }
+
+ // Set the typelib GUID.
+ IfFailReport(GetTypeLibGuidForAssembly(pAssembly, &guid));
+ IfFailReport(m_pICreateTLB->SetGuid(guid));
+
+ // Retrieve the type library's version number.
+ USHORT usMaj, usMin;
+ IfFailReport(GetTypeLibVersionForAssembly(pAssembly, &usMaj, &usMin));
+
+ // Set the TLB's version number.
+ IfFailReport(m_pICreateTLB->SetVersion(usMaj, usMin));
+
+ // Set the LCID. If no locale, set to 0, otherwise typelib defaults to 409.
+ lcid = 0;
+ LPCUTF8 pLocale = pAssembly->GetLocale();
+ if (pLocale && *pLocale)
+ {
+ // Have to build a BSTR, not just a unicode string (i.e. allocate a
+ // DWORD of length information at a negative offset from the string
+ // start).
+ _ASSERTE((sizeof(WCHAR) * 2) == sizeof(DWORD));
+ hr = qLocale.ReSizeNoThrow(sizeof(DWORD));
+ if (SUCCEEDED(hr))
+ hr = Utf2Quick(pLocale, qLocale, 2);
+ if (SUCCEEDED(hr))
+ {
+ *(DWORD*)qLocale.Ptr() = (DWORD)wcslen(&qLocale.Ptr()[2]);
+ hr = ::CoCreateInstance(CLSID_CMultiLanguage, NULL, CLSCTX_INPROC_SERVER, IID_IMultiLanguage, (void**)&pIML);
+ }
+ if (SUCCEEDED(hr))
+ pIML->GetLcidFromRfc1766(&lcid, (BSTR)&qLocale.Ptr()[2]);
+ }
+ HRESULT hr2 = m_pICreateTLB->SetLcid(lcid);
+ if (hr2 == TYPE_E_UNKNOWNLCID)
+ {
+ ReportWarning(TYPE_E_UNKNOWNLCID, TYPE_E_UNKNOWNLCID);
+ hr2 = m_pICreateTLB->SetLcid(0);
+ }
+ IfFailReport(hr2);
+
+ // Get the list of types in the assembly.
+ AddAssemblyTypes(pAssembly);
+ m_Exports.InitArray();
+
+ // Get the assembly value for AutomationProxy.
+ m_bAutomationProxy = DEFAULT_AUTOMATION_PROXY_VALUE;
+ GetAutomationProxyAttribute(pAssembly->GetManifestImport(), TokenFromRid(1, mdtAssembly), &m_bAutomationProxy);
+
+ // Pre load any caller-specified names into the typelib namespace.
+ PreLoadNames();
+
+ // Convert all the types.
+ ConvertAllTypeDefs();
+
+ // Set library level properties.
+ sName.AppendUTF8(pAssembly->GetSimpleName());
+
+ // Make it a legal typelib name.
+ SString replaceChar = SL(W("_"));
+
+ SString::Iterator iter = sName.Begin();
+ while (sName.Find(iter, W(".")))
+ sName.Replace(iter, 1, replaceChar);
+
+ iter = sName.Begin();
+ while (sName.Find(iter, W(" ")))
+ sName.Replace(iter, 1, replaceChar);
+
+ IfFailReport(m_pICreateTLB->SetName((LPOLESTR)sName.GetUnicode()));
+
+ // If the assembly has a description CA, set that as the library Doc string.
+ if (GetStringCustomAttribute(pAssembly->GetManifestImport(), XXX_ASSEMBLY_DESCRIPTION_TYPE, TokenFromRid(mdtAssembly, 1), (BSTR &)szDescription))
+ m_pICreateTLB->SetDocString((LPWSTR)szDescription);
+
+ // Mark this typelib as exported.
+ LPCWSTR pszFullName;
+ {
+ //@todo: exceptions?
+ StackSString name;
+ pAssembly->GetDisplayName(name);
+ pszFullName = name.GetUnicode();
+
+ vt.vt = VT_BSTR;
+ vt.bstrVal = SysAllocString(pszFullName);
+ if (vt.bstrVal == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+ }
+
+ //WszMultiByteToWideChar(CP_ACP,0, (char*)rBuf.Ptr(), (DWORD)rBuf.Size(), vt.bstrVal, (DWORD)rBuf.Size());
+ IfFailReport(m_pICreateTLB->SetCustData(GUID_ExportedFromComPlus, &vt));
+
+ // Lay out the TypeInfos.
+ LayOut();
+
+ if(vt.bstrVal)
+ {
+ SysFreeString(vt.bstrVal);
+ vt.bstrVal = NULL;
+ }
+
+} // HRESULT TypeLibExporter::Convert()
+
+
+void TypeLibExporter::UpdateBitness(Assembly* pAssembly)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // If one has already been set, just return.
+ if ((TlbExportAs64Bit(m_flags)) || (TlbExportAs32Bit(m_flags)))
+ return;
+
+ // If we are exporting a dynamic assembly, just go with the machine type we're running on.
+ if (pAssembly->IsDynamic())
+ {
+#ifdef _WIN64
+ m_flags |= TlbExporter_ExportAs64Bit;
+#else
+ m_flags |= TlbExporter_ExportAs32Bit;
+#endif
+ return;
+ }
+
+ // Get the assembly info
+ PEFile* pPEFile = pAssembly->GetDomainAssembly()->GetFile();
+ _ASSERTE(pPEFile);
+
+ DWORD PEKind, MachineKind;
+ pPEFile->GetPEKindAndMachine(&PEKind, &MachineKind);
+
+ // Based on the assembly flags, determine a bitness to export with.
+ // Algorithm base copied from ComputeProcArchFlags() in bcl\system\reflection\assembly.cs
+ if ((PEKind & pe32Plus) == pe32Plus)
+ {
+ switch (MachineKind)
+ {
+ case IMAGE_FILE_MACHINE_IA64:
+ case IMAGE_FILE_MACHINE_AMD64:
+ m_flags |= TlbExporter_ExportAs64Bit;
+ break;
+
+ case IMAGE_FILE_MACHINE_I386:
+ if ((PEKind & peILonly) == peILonly)
+ {
+#ifdef _WIN64
+ m_flags |= TlbExporter_ExportAs64Bit;
+#else
+ m_flags |= TlbExporter_ExportAs32Bit;
+#endif
+ }
+ else
+ {
+ _ASSERTE(!"Invalid MachineKind / PEKind pair on the assembly!");
+ }
+ break;
+
+ default:
+ _ASSERTE(!"Unknown MachineKind!");
+ }
+ }
+ else if (MachineKind == IMAGE_FILE_MACHINE_I386)
+ {
+ if ((PEKind & pe32BitRequired) == pe32BitRequired)
+ {
+ m_flags |= TlbExporter_ExportAs32Bit;
+ }
+ else if ((PEKind & peILonly) == peILonly)
+ {
+#ifdef _WIN64
+ m_flags |= TlbExporter_ExportAs64Bit;
+#else
+ m_flags |= TlbExporter_ExportAs32Bit;
+#endif
+ }
+ else
+ {
+ m_flags |= TlbExporter_ExportAs32Bit;
+ }
+ }
+ else if (MachineKind == IMAGE_FILE_MACHINE_ARMNT)
+ {
+ m_flags |= TlbExporter_ExportAs32Bit;
+ }
+ else
+ {
+#ifdef _WIN64
+ m_flags |= TlbExporter_ExportAs64Bit;
+#else
+ m_flags |= TlbExporter_ExportAs32Bit;
+#endif
+ }
+}
+
+
+// Find out if our assembly / bitness combination is valid.
+HRESULT TypeLibExporter::CheckBitness(Assembly* pAssembly)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (pAssembly->IsDynamic())
+ return S_OK;
+
+ PEFile* pPEFile = pAssembly->GetDomainAssembly()->GetFile();
+ if (pPEFile == NULL)
+ return TLBX_E_BITNESS_MISMATCH;
+
+ DWORD PEKind, MachineKind;
+ pPEFile->GetPEKindAndMachine(&PEKind, &MachineKind);
+
+ // Neutral assembly?
+ if ((PEKind & peILonly) == peILonly)
+ return S_OK;
+
+ if (IsExportingAs64Bit())
+ {
+ if ((MachineKind == IMAGE_FILE_MACHINE_IA64) || (MachineKind == IMAGE_FILE_MACHINE_AMD64))
+ return S_OK;
+ }
+ else
+ {
+ if ((MachineKind == IMAGE_FILE_MACHINE_I386) || (MachineKind == IMAGE_FILE_MACHINE_ARMNT))
+ return S_OK;
+ }
+
+ return TLBX_E_BITNESS_MISMATCH;
+}
+
+
+//*****************************************************************************
+//*****************************************************************************
+void TypeLibExporter::PreLoadNames()
+{
+ STANDARD_VM_CONTRACT;
+
+ SafeComHolder<ITypeLibExporterNameProvider> pINames = 0;
+ HRESULT hr = S_OK; // A result.
+ SafeArrayHolder pNames = 0; // Names provided by caller.
+ VARTYPE vt; // Type of data.
+ int lBound, uBound, ix; // Loop control.
+ BSTR name;
+
+ // Look for names provider, but don't require it.
+ hr = SafeQueryInterface(m_pNotify, IID_ITypeLibExporterNameProvider, (IUnknown**)&pINames);
+ if (FAILED(hr))
+ return;
+
+ // There is a provider, so get the list of names.
+ IfFailReport(pINames->GetNames(&pNames));
+
+ // Better have a single dimension array of strings.
+ if (pNames == 0)
+ IfFailReport(TLBX_E_BAD_NAMES);
+
+ if (SafeArrayGetDim(pNames) != 1)
+ IfFailReport(TLBX_E_BAD_NAMES);
+
+ IfFailReport(SafeArrayGetVartype(pNames, &vt));
+ if (vt != VT_BSTR)
+ IfFailReport(TLBX_E_BAD_NAMES);
+
+ // Get names bounds.
+ IfFailReport(SafeArrayGetLBound(pNames, 1, (LONG*)&lBound));
+ IfFailReport(SafeArrayGetUBound(pNames, 1, (LONG*)&uBound));
+
+ // Enumerate the names.
+ for (ix=lBound; ix<=uBound; ++ix)
+ {
+ IfFailReport(SafeArrayGetElement(pNames, (LONG*)&ix, (void*)&name));
+ m_pICreateTLB->SetName(name);
+ }
+}
+
+//*****************************************************************************
+//*****************************************************************************
+void TypeLibExporter::FormatErrorContextString(
+ CErrorContext *pContext, // The context to format.
+ SString *pOut) // Buffer to format into.
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pContext));
+ PRECONDITION(CheckPointer(pOut));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ SString *pBuf;
+ SString ssInternal;
+
+ // Nested contexts?
+ if (pContext->m_prev == 0)
+ { // No, just convert into caller's buffer.
+ pBuf = pOut;
+ }
+ else
+ { // Yes, convert locally, then concatenate.
+ pBuf = &ssInternal;
+ }
+
+ // More?
+ if (pContext->m_pScope)
+ {
+ // Check whether type is nested (which requires more formatting).
+ DWORD dwFlags;
+ IfFailReport(pContext->m_pScope->GetTypeDefProps(pContext->m_tkType, &dwFlags, 0));
+
+ if (IsTdNested(dwFlags))
+ {
+ TypeNameBuilder tnb(pBuf, TypeNameBuilder::ParseStateNAME);
+ TypeString::AppendNestedTypeDef(tnb, pContext->m_pScope, pContext->m_tkType);
+ }
+ else
+ TypeString::AppendTypeDef(*pBuf, pContext->m_pScope, pContext->m_tkType);
+
+ // Member?
+ if (pContext->m_szMember)
+ {
+ pBuf->Append(NAMESPACE_SEPARATOR_WSTR);
+
+ pBuf->AppendUTF8(pContext->m_szMember);
+
+ // Param?
+ if (pContext->m_szParam)
+ {
+ pBuf->Append(W("("));
+ pBuf->AppendUTF8(pContext->m_szParam);
+ pBuf->Append(W(")"));
+ }
+ else if (pContext->m_ixParam > -1)
+ {
+ pBuf->AppendPrintf(W("(#%d)"), pContext->m_ixParam);
+ }
+ } // member
+
+ pBuf->Append(ASSEMBLY_SEPARATOR_WSTR);
+ } // Type name
+
+ pBuf->AppendUTF8(pContext->m_szAssembly);
+
+ // If there is a nested context, put it all together.
+ if (pContext->m_prev)
+ {
+ // Format the context this one was nested inside.
+ SString ssOuter;
+ FormatErrorContextString(pContext->m_prev, &ssOuter);
+
+ // Put them together with text.
+ LPWSTR pUnicodeBuffer = pOut->OpenUnicodeBuffer(1024);
+ FormatRuntimeError(pUnicodeBuffer, 1024, TLBX_E_CTX_NESTED, pBuf->GetUnicode(), ssOuter.GetUnicode());
+ pOut->CloseBuffer((COUNT_T)wcslen(pUnicodeBuffer));
+ }
+} // HRESULT TypeLibExporter::FormatErrorContextString()
+
+//*****************************************************************************
+//*****************************************************************************
+void TypeLibExporter::FormatErrorContextString(
+ SString *pBuf) // Buffer to format into.
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pBuf));
+ }
+ CONTRACTL_END;
+
+ FormatErrorContextString(&m_ErrorContext, pBuf);
+} // HRESULT TypeLibExporter::FormatErrorContextString()
+
+//*****************************************************************************
+// Event reporting helper.
+//*****************************************************************************
+void TypeLibExporter::ReportError(HRESULT hrRpt)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ WCHAR rcErr[1024];
+ SString ssName;
+ SafeComHolder<IErrorInfo> pErrorInfo;
+ BSTRHolder bstrDescription = NULL;
+
+ // Format the error message.
+ if (SafeGetErrorInfo(&pErrorInfo) != S_OK)
+ pErrorInfo = NULL;
+
+ // If we retrieved and IErrorInfo then retrieve the description.
+ if (pErrorInfo)
+ {
+ if (FAILED(pErrorInfo->GetDescription(&bstrDescription)))
+ bstrDescription = NULL;
+ }
+
+ if (bstrDescription)
+ {
+ // Use the description as the error message.
+ wcsncpy_s(rcErr, COUNTOF(rcErr), bstrDescription, _TRUNCATE);
+ }
+ else
+ {
+ // Format the error message.
+ FormatRuntimeError(rcErr, lengthof(rcErr), hrRpt);
+ }
+
+ // Format the context.
+ FormatErrorContextString(&ssName);
+
+ // Post the error to the errorinfo object.
+ VMPostError(TLBX_E_ERROR_MESSAGE, ssName.GetUnicode(), rcErr);
+
+ // Throw the exception, including context info.
+ COMPlusThrowHR(TLBX_E_ERROR_MESSAGE, kGetErrorInfo);
+} // void TypeLibExporter::ReportError()
+
+//*****************************************************************************
+// Event reporting helper.
+//*****************************************************************************
+void TypeLibExporter::ReportEvent( // Returns the original HR.
+ int ev, // The event kind.
+ int hr, // HR.
+ ...) // Variable args.
+{
+ STANDARD_VM_CONTRACT;
+
+ WCHAR rcMsg[1024]; // Buffer for message.
+ va_list marker; // User text.
+ BSTRHolder bstrMsg=0; // BSTR for message.
+
+ // Format the message.
+ va_start(marker, hr);
+ hr = FormatRuntimeErrorVa(rcMsg, lengthof(rcMsg), hr, marker);
+ va_end(marker);
+
+ // Convert to a BSTR.
+ bstrMsg = SysAllocString(rcMsg);
+
+ // Display it, and clean up.
+ if (bstrMsg != NULL)
+ m_pNotify->ReportEvent(static_cast<ImporterEventKind>(ev), hr, bstrMsg);
+
+} // HRESULT CImportTlb::ReportEvent()
+
+//*****************************************************************************
+// Warning reporting helper.
+//*****************************************************************************
+void TypeLibExporter::ReportWarning( // Original error code.
+ HRESULT hrReturn, // HR to return.
+ HRESULT hrRpt, // Error code.
+ ...) // Args to message.
+{
+ STANDARD_VM_CONTRACT;
+
+ WCHAR rcErr[1024]; // Buffer for error message.
+ SString ssName; // Buffer for context.
+ va_list marker; // User text.
+ BSTRHolder bstrMsg=0; // BSTR for message.
+ BSTRHolder bstrBuf=0; // Buffer for message.
+ UINT iLen; // Length of allocated buffer.
+
+ // Format the message.
+ va_start(marker, hrRpt);
+ FormatRuntimeErrorVa(rcErr, lengthof(rcErr), hrRpt, marker);
+ va_end(marker);
+
+ // Format the context.
+ FormatErrorContextString(&ssName);
+
+ // Put them together.
+ iLen = (UINT)(wcslen(rcErr) + ssName.GetCount() + 200);
+ bstrBuf = SysAllocStringLen(0, iLen);
+
+ if (bstrBuf != NULL)
+ {
+ FormatRuntimeError(bstrBuf, iLen, TLBX_W_WARNING_MESSAGE, ssName.GetUnicode(), rcErr);
+
+ // Have to copy to another BSTR, because the runtime will also print the trash after the
+ // terminating nul.
+ bstrMsg = SysAllocString(bstrBuf);
+
+ if (bstrMsg != NULL)
+ m_pNotify->ReportEvent(NOTIF_CONVERTWARNING, hrRpt, bstrMsg);
+ }
+
+} // void TypeLibExporter::ReportWarning()
+
+// Throws exceptions encountered during type exportation.
+// Wrapped with ThrowHRWithContext.
+void TypeLibExporter::InternalThrowHRWithContext(HRESULT hrRpt, ...)
+{
+ STANDARD_VM_CONTRACT;
+
+ WCHAR rcErr[2048];
+ SString ssName;
+ va_list marker;
+
+ // Format the error message.
+ va_start(marker, hrRpt);
+ FormatRuntimeErrorVa(rcErr, lengthof(rcErr), hrRpt, marker);
+ va_end(marker);
+
+ // Format the context.
+ FormatErrorContextString(&ssName);
+
+ // Post the error to the errorinfo object.
+ VMPostError(TLBX_E_ERROR_MESSAGE, ssName.GetUnicode(), rcErr);
+
+ // Throw the exception, including context info.
+ COMPlusThrowHR(TLBX_E_ERROR_MESSAGE, kGetErrorInfo);
+} // void TypeLibExporter::InternalThrowHRWithContext()
+
+//*****************************************************************************
+// Post a class load error on failure.
+//*****************************************************************************
+void TypeLibExporter::PostClassLoadError(
+ LPCUTF8 pszName, // Name of the class.
+ SString& message) // Exception message of class load failure.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pszName));
+ }
+ CONTRACTL_END;
+
+ // See if we got anything back.
+ if (!message.IsEmpty())
+ InternalThrowHRWithContext(TLBX_E_CLASS_LOAD_EXCEPTION, pszName, message.GetUnicode());
+ else
+ InternalThrowHRWithContext(TLBX_E_CANT_LOAD_CLASS, pszName);
+} // HRESULT TypeLibExporter::PostClassLoadError()
+
+//*****************************************************************************
+// Determine the type, if any, of auto-interface for a class.
+// May be none, dispatch, or dual.
+//*****************************************************************************
+void TypeLibExporter::ClassHasIClassX(
+ MethodTable *pClass, // The class.
+ CorClassIfaceAttr *pClassItfType) // None, dual, dispatch
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pClass));
+ PRECONDITION(!pClass->IsInterface());
+ PRECONDITION(CheckPointer(pClassItfType));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ ComMethodTable *pClassComMT = NULL;
+
+ *pClassItfType = clsIfNone;
+
+ // If the class is a COM import or if it isn't COM visible, then from the
+ // exporter's perspective, it doens't have an IClassX.
+ if (!pClass->IsComImport())
+ {
+ ComCallWrapperTemplate *pTemplate = ComCallWrapperTemplate::GetTemplate(pClass);
+ if (pTemplate->SupportsIClassX())
+ {
+ pClassComMT = ComCallWrapperTemplate::SetupComMethodTableForClass(pClass, FALSE);
+ _ASSERTE(pClassComMT);
+
+ if (pClassComMT->IsComVisible())
+ *pClassItfType = pClassComMT->GetClassInterfaceType();
+ }
+ }
+} // HRESULT TypeLibExporter::ClassHasIClassX()
+
+//*****************************************************************************
+// Load a class by token, post an error on failure.
+//*****************************************************************************
+MethodTable * TypeLibExporter::LoadClass(
+ Module *pModule, // Module with Loader to use to load the class.
+ mdToken tk) // The token to load.
+{
+ CONTRACT(MethodTable *)
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pModule));
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ // Get the MethodTable for the token.
+ TypeHandle th;
+ SString exceptionMessage;
+
+ EX_TRY
+ {
+ th = ClassLoader::LoadTypeDefOrRefThrowing(pModule, tk,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef);
+ }
+ EX_CATCH
+ {
+ GET_EXCEPTION()->GetMessage(exceptionMessage);
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (th.IsNull())
+ {
+ // Format a hopefully useful error message.
+ LPCUTF8 pNS, pName;
+ SString sName;
+
+ if (TypeFromToken(tk) == mdtTypeDef)
+ {
+ if (FAILED(pModule->GetMDImport()->GetNameOfTypeDef(tk, &pName, &pNS)))
+ {
+ pName = pNS = "Invalid TypeDef record";
+ }
+ }
+ else
+ {
+ _ASSERTE(TypeFromToken(tk) == mdtTypeRef);
+ if (FAILED(pModule->GetMDImport()->GetNameOfTypeRef(tk, &pNS, &pName)))
+ {
+ pNS = pName = "Invalid TypeRef record";
+ }
+ }
+
+ if (pNS && *pNS)
+ {
+ sName.AppendUTF8(pNS);
+ sName.AppendUTF8(NAMESPACE_SEPARATOR_STR);
+ }
+
+ sName.AppendUTF8(pName);
+
+ StackScratchBuffer scratch;
+ PostClassLoadError(sName.GetUTF8(scratch), exceptionMessage);
+ }
+
+ RETURN (th.AsMethodTable());
+
+} // void TypeLibExporter::LoadClass()
+
+//*****************************************************************************
+// Load a class by name, post an error on failure.
+//*****************************************************************************
+TypeHandle TypeLibExporter::LoadClass(
+ Module *pModule, // Module with Loader to use to load the class.
+ LPCUTF8 pszName) // Name of class to load.
+{
+ CONTRACT(TypeHandle)
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(CheckPointer(pszName));
+ POSTCONDITION(!RETVAL.IsNull());
+ }
+ CONTRACT_END;
+
+ TypeHandle th;
+ SString exceptionMessage;
+
+ EX_TRY
+ {
+ th = TypeName::GetTypeUsingCASearchRules(pszName, pModule->GetAssembly());
+ _ASSERTE(!th.IsNull());
+ }
+ EX_CATCH
+ {
+ GET_EXCEPTION()->GetMessage(exceptionMessage);
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (th.IsNull())
+ {
+ PostClassLoadError(pszName, exceptionMessage);
+ }
+
+ RETURN th;
+
+} // void TypeLibExporter::LoadClass()
+
+
+//*****************************************************************************
+// Enumerate the TypeDefs and convert them to TypeInfos.
+//*****************************************************************************
+void TypeLibExporter::ConvertAllTypeDefs()
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK; // A result.
+ CExportedTypesInfo *pData; // For iterating the entries.
+ int cTypes; // Count of types.
+ int ix; // Loop control.
+
+ LPCSTR pName1, pNS1; // Names of a type.
+ LPCSTR pName2, pNS2; // Names of another type.
+ MethodTable *pc1; // A Type.
+ MethodTable *pc2; // Another type.
+ CQuickArray<BYTE> bNamespace; // Array of flags for namespace decoration.
+
+ cTypes = m_Exports.Count();
+
+ // If there are no types in the assembly, then we are done.
+ if (cTypes <= 0)
+ return;
+
+ // Order by name, then look for duplicates.
+ m_Exports.SortByName();
+
+ // Resize the array for namespace flags now, but use the ICreateTypeInfo*, so that
+ // the flags will be sorted.
+ bNamespace.ReSizeThrows(cTypes);
+
+ // Get names of first type.
+ pc1 = m_Exports[0]->pClass;
+ IfFailReport(pc1->GetMDImport()->GetNameOfTypeDef(pc1->GetCl(), &pName1, &pNS1));
+
+ // Iterate through the types, looking for duplicate type names.
+ for (ix=0; ix<cTypes-1; ++ix)
+ {
+ // Get the Type pointers and the types' names.
+ pc2 = m_Exports[ix+1]->pClass;
+ IfFailReport(pc2->GetMDImport()->GetNameOfTypeDef(pc2->GetCl(), &pName2, &pNS2));
+
+ // If the types match (case insensitive). mark both types for namespace
+ // decoration.
+ if (stricmpUTF8(pName1, pName2) == 0)
+ {
+ m_Exports[ix]->pCTI = reinterpret_cast<ICreateTypeInfo2*>(1);
+ m_Exports[ix+1]->pCTI = reinterpret_cast<ICreateTypeInfo2*>(1);
+ }
+ else
+ { // Didn't match, so advance "class 1" pointer.
+ pc1 = pc2;
+ pName1 = pName2;
+ pNS1 = pNS2;
+ }
+ }
+
+ // Put into token order for actual creation.
+ m_Exports.SortByToken();
+
+ // Fill the flag array, from the ICreateTypeInfo* pointers.
+ memset(bNamespace.Ptr(), 0, bNamespace.Size()*sizeof(BYTE));
+ for (ix=0; ix<cTypes; ++ix)
+ {
+ if (m_Exports[ix]->pCTI)
+ bNamespace[ix] = 1, m_Exports[ix]->pCTI = 0;
+ }
+
+ // Pass 1. Create the TypeInfos.
+ // There are four steps in the process:
+ // a) Creates the TypeInfos for the types themselves. When a duplicate
+ // is encountered, skip the type until later, so that we don't create
+ // a decorated name that will conflict with a subsequent non-decorated
+ // name. We want to preserve a type's given name as much as possible.
+ // b) Create the TypeInfos for the types that were duplicates in step a.
+ // Perform decoration of the names as necessary to eliminate duplicates.
+ // c) Create the TypeInfos for the IClassXs. When there is a duplicate,
+ // skip, as in step a.
+ // d) Create the remaining TypeInfos for IClassXs. Perform decoration of
+ // the names as necessary to eliminate duplicates.
+
+ // Step a, Create the TypeInfos for the TypeDefs, no decoration.
+ for (ix=0; ix<cTypes; ++ix)
+ {
+ int bAutoProxy = m_bAutomationProxy;
+ pData = m_Exports[ix];
+ pData->tkind = TKindFromClass(pData->pClass);
+ GetAutomationProxyAttribute(pData->pClass->GetMDImport(), pData->pClass->GetCl(), &bAutoProxy);
+ pData->bAutoProxy = (bAutoProxy != 0);
+
+ CreateITypeInfo(pData, (bNamespace[ix]!=0), false);
+ }
+ // Step b, Create the TypeInfos for the TypeDefs, decoration as needed.
+ for (ix=0; ix<cTypes; ++ix)
+ {
+ pData = m_Exports[ix];
+ if (pData->pCTI == 0)
+ CreateITypeInfo(pData, (bNamespace[ix]!=0), true);
+ }
+
+ // Step c, Create the TypeInfos for the IClassX interfaces. No decoration.
+ for (ix=0; ix<cTypes; ++ix)
+ {
+ pData = m_Exports[ix];
+ CreateIClassXITypeInfo(pData, (bNamespace[ix]!=0), false);
+ }
+ // Step d, Create the TypeInfos for the IClassX interfaces. Decoration as required.
+ for (ix=0; ix<cTypes; ++ix)
+ {
+ pData = m_Exports[ix];
+ if (pData->pCTIClassItf == 0)
+ CreateIClassXITypeInfo(pData, (bNamespace[ix]!=0), true);
+ }
+
+ // Pass 2, add the ImplTypes to the CoClasses.
+ for (ix=0; ix<cTypes; ++ix)
+ {
+ pData = m_Exports[ix];
+ ConvertImplTypes(pData);
+ }
+
+ // Pass 3, fill in the TypeInfo details...
+ for (ix=0; ix<cTypes; ++ix)
+ {
+ pData = m_Exports[ix];
+ ConvertDetails(pData);
+ }
+
+ hr = S_OK;
+} // void TypeLibExporter::ConvertAllTypeDefs()
+
+//*****************************************************************************
+// Convert one TypeDef. Useful for one-off TypeDefs in other scopes where
+// that other scope's typelib doesn't contain a TypeInfo. This happens
+// for the event information with imported typelibs.
+//*****************************************************************************
+HRESULT TypeLibExporter::ConvertOneTypeDef(
+ MethodTable *pClass) // The one class to convert.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pClass));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK; // A result.
+ ICreateTypeInfo2 *pCTI=0; // The TypeInfo to create.
+ ICreateTypeInfo2 *pDefault=0; // A possible IClassX TypeInfo.
+ CErrorContext SavedContext; // Previous error context.
+ CExportedTypesInfo *pExported; // For adding classes to the exported types cache.
+ CExportedTypesInfo sExported; // For adding classes to the exported types cache.
+
+ // Save error reporting context.
+ SavedContext = m_ErrorContext;
+ m_ErrorContext.m_szAssembly = pClass->GetAssembly()->GetSimpleName();
+ m_ErrorContext.m_tkType = mdTypeDefNil;
+ m_ErrorContext.m_pScope = 0;
+ m_ErrorContext.m_szMember = 0;
+ m_ErrorContext.m_szParam = 0;
+ m_ErrorContext.m_ixParam = -1;
+ m_ErrorContext.m_prev = &SavedContext;
+
+ // See if this class is already in the list.
+ sExported.pClass = pClass;
+ pExported = m_InjectedExports.Find(&sExported);
+ if (pExported == 0)
+ {
+ // Get the AutoProxy value for an isolated class.
+ int bAutoProxy = DEFAULT_AUTOMATION_PROXY_VALUE;
+ if (FALSE == GetAutomationProxyAttribute(pClass->GetMDImport(), pClass->GetCl(), &bAutoProxy))
+ GetAutomationProxyAttribute(pClass->GetAssembly()->GetManifestImport(), TokenFromRid(1, mdtAssembly), &bAutoProxy);
+
+ // New class, add to list.
+ if (NULL == (pExported = m_InjectedExports.Add(&sExported)))
+ IfFailReport(E_OUTOFMEMORY);
+ m_InjectedExports.UpdateArray();
+
+ // Prefix can't tell that IfFailReport will actually throw an exception if pExported is NULL so
+ // let's tell it explicitly that if we reach this point pExported will not be NULL.
+ PREFIX_ASSUME(pExported != NULL);
+ pExported->pClass = pClass;
+ pExported->pCTI = 0;
+ pExported->pCTIClassItf = 0;
+ pExported->tkind = TKindFromClass(pClass);
+ pExported->bAutoProxy = (bAutoProxy != 0);
+
+ // Step 1, Create the TypeInfos for the TypeDefs.
+ CreateITypeInfo(pExported);
+
+ // Step 1a, Create the TypeInfos for the IClassX interfaces.
+ CreateIClassXITypeInfo(pExported);
+
+ // Step 2, add the ImplTypes to the CoClasses.
+ ConvertImplTypes(pExported);
+
+ // Step 3, fill in the TypeInfo details...
+ ConvertDetails(pExported);
+ }
+
+ // Restore error reporting context.
+ m_ErrorContext = SavedContext;
+
+ return (hr);
+} // HRESULT TypeLibExporter::ConvertOneTypeDef()
+
+
+//*****************************************************************************
+// Create the ITypeInfo for a type. Well, sort of. This function will create
+// the first of possibly two typeinfos for the type. If the type is a class
+// we will create a COCLASS typeinfo now, and an INTERFACE typeinfo later,
+// which typeinfo will be the default interface for the coclass. If this
+// typeinfo needs to be aliased, we will create the ALIAS now (with the
+// real name) and the aliased typeinfo later, with the real attributes, but
+// with a mangled name.
+//*****************************************************************************
+void TypeLibExporter::CreateITypeInfo(
+ CExportedTypesInfo *pData, // Conversion data.
+ bool bNamespace, // If true, use namespace + name
+ bool bResolveDup) // If true, decorate name to resolve dups.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pData));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK; // A result.
+ LPCUTF8 pName; // Name in UTF8.
+ LPCUTF8 pNS; // Namespace in UTF8.
+ SString sName; // Name of the TypeDef.
+ TYPEKIND tkind; // The TYPEKIND of a TypeDef.
+ GUID clsid; // A TypeDef's clsid.
+ DWORD dwFlags; // A TypeDef's flags.
+ int iSuffix = 0; // Counter for suffix.
+ mdTypeDef td; // Token for the class.
+
+ VariantHolder vt; // For defining custom attribute.
+ SafeComHolder<ICreateTypeInfo> pCTITemp=0; // For creating a typeinfo.
+ SafeComHolder<ICreateTypeInfo2> pCTI2=0; // For creating the typeinfo.
+ SafeComHolder<ITypeLib> pITLB=0; // For dup IID reporting.
+ SafeComHolder<ITypeInfo> pITIDup=0; // For dup IID reporting.
+ BSTRHolder bstrDup=0; // For dup IID reporting.
+ BSTRHolder bstrDescr=0; // For description.
+ ZeroHolder zhType = &m_ErrorContext.m_pScope; // Clear error reporting info.
+ CorClassIfaceAttr classItfType = clsIfNone; // For class interface type.
+ TypeHandle thClass = TypeHandle(pData->pClass); // TypeHandle representing the class.
+
+ DefineFullyQualifiedNameForClassW();
+
+ // Get the TypeDef and some info about it.
+ td = pData->pClass->GetCl();
+ IfFailReport(pData->pClass->GetMDImport()->GetTypeDefProps(td, &dwFlags, 0));
+ tkind = pData->tkind;
+
+ // Error reporting info.
+ m_ErrorContext.m_tkType = td;
+ m_ErrorContext.m_pScope = pData->pClass->GetMDImport();
+
+ pData->pCTI = 0;
+ pData->pCTIClassItf = 0;
+
+ // If it is ComImport or WindowsRuntimeImport, do not export it.
+ if (IsTdImport(dwFlags) || pData->pClass->IsProjectedFromWinRT())
+ return;
+
+ // Check to see if the type is supposed to be visible from COM. If it
+ // is not then we go to the next type.
+ if (!IsTypeVisibleFromCom(TypeHandle(pData->pClass)))
+ return;
+
+ // Get the GUID for the class. Will generate from name if no defined GUID,
+ // will also use signatures if interface.
+ pData->pClass->GetGuid(&clsid, TRUE);
+
+ // Get the name.
+ IfFailReport(pData->pClass->GetMDImport()->GetNameOfTypeDef(td, &pName, &pNS));
+
+ // Warn about exporting AutoLayout valueclasses
+ if ( (pData->pClass->IsValueType()) && (!pData->pClass->IsEnum()) && (IsTdAutoLayout(pData->pClass->GetAttrClass())))
+ ReportWarning(TLBX_W_EXPORTING_AUTO_LAYOUT, TLBX_W_EXPORTING_AUTO_LAYOUT, pName);
+
+ // Warn about exporting generic classes.
+ if (pData->pClass->GetNumGenericArgs() != 0)
+ ReportWarning(TLBX_I_GENERIC_TYPE, TLBX_I_GENERIC_TYPE);
+
+ // Classes that derive from generic classes can be COM visible, however we don't
+ // expose a class interface for them. Give a warning to the user about this.
+ if (pData->pClass->HasGenericClassInstantiationInHierarchy())
+ {
+ if (!pData->pClass->IsComImport() && IsTypeVisibleFromCom(thClass))
+ {
+ // Note that we can't call ClassHasIClassX here since it would return
+ // classIfNone if the type has generic parents in it's hierarchy.
+ if (ReadClassInterfaceTypeCustomAttribute(thClass) != clsIfNone)
+ ReportWarning(TLBX_I_GENERIC_BASE_TYPE, TLBX_I_GENERIC_BASE_TYPE);
+ }
+ }
+
+ // Warn about exporting reference types as structs.
+ if ((pData->tkind == TKIND_RECORD || pData->tkind == TKIND_UNION) && !pData->pClass->IsValueType())
+ ReportWarning(TLBX_I_REF_TYPE_AS_STRUCT, TLBX_I_REF_TYPE_AS_STRUCT);
+
+ // workaround for microsoft.wfc.interop.dll -- skip their IDispatch.
+ if (clsid == IID_IDispatch || clsid == IID_IUnknown)
+ {
+ ReportEvent(NOTIF_CONVERTWARNING, TLBX_S_NOSTDINTERFACE, pName);
+ return;
+ }
+
+ if (bNamespace)
+ {
+ sName.MakeFullNamespacePath(SString(SString::Utf8, pNS), SString(SString::Utf8, pName));
+
+ SString replaceChar = SL(W("_"));
+
+ SString::Iterator iter = sName.Begin();
+ while (sName.Find(iter, W(".")))
+ sName.Replace(iter, 1, replaceChar);
+ }
+ else
+ { // Convert name to wide chars.
+ sName.AppendUTF8(pName);
+ }
+
+ // Create the typeinfo for this typedef.
+ for (;;)
+ {
+ // Attempt to create the TypeDef.
+ hr = m_pICreateTLB->CreateTypeInfo((LPOLESTR)sName.GetUnicode(), tkind, &pCTITemp);
+
+ // If a name conflict, decorate, otherwise, done.
+ if (hr != TYPE_E_NAMECONFLICT)
+ break;
+
+ if (!bResolveDup)
+ {
+ hr = S_FALSE;
+ return;
+ }
+
+ if (iSuffix == 0)
+ {
+ iSuffix = 2;
+ }
+ else
+ {
+ sName.Delete(sName.End()-=2, 2);
+ }
+
+ SString sDup;
+ sDup.Printf(szDuplicateDecoration, iSuffix++);
+
+ sName.Append(sDup);
+ }
+
+ IfFailReport(hr);
+ IfFailReport(SafeQueryInterface(pCTITemp, IID_ICreateTypeInfo2, (IUnknown**)&pCTI2));
+
+ // Set the guid.
+ _ASSERTE(clsid != GUID_NULL);
+ hr = pCTI2->SetGuid(clsid);
+ if (FAILED(hr))
+ {
+ if (hr == TYPE_E_DUPLICATEID)
+ {
+ IfFailReport(SafeQueryInterface(m_pICreateTLB, IID_ITypeLib, (IUnknown**)&pITLB));
+ IfFailReport(pITLB->GetTypeInfoOfGuid(clsid, &pITIDup));
+ IfFailReport(pITIDup->GetDocumentation(MEMBERID_NIL, &bstrDup, 0,0,0));
+ InternalThrowHRWithContext(TLBX_E_DUPLICATE_IID, sName.GetUnicode(), (BSTR)bstrDup);
+ }
+ return;
+ }
+ TRACE("TypeInfo %x: %ls, {%08x-%04x-%04x-%04x-%02x%02x%02x%02x}\n", pCTI2, sName,
+ clsid.Data1, clsid.Data2, clsid.Data3, clsid.Data4[0]<<8|clsid.Data4[1], clsid.Data4[2], clsid.Data4[3], clsid.Data4[4], clsid.Data4[5]);
+
+ IfFailReport(pCTI2->SetVersion(1, 0));
+
+ // Record the fully qualified type name in a custom attribute.
+ // If the TypelibImportClassAttribute exists, use that instead.
+ SString sName2;
+ hr = GetTypeLibImportClassName(pData->pClass, sName2);
+ if (hr == S_OK)
+ {
+ V_BSTR(&vt) = ::SysAllocString(sName2.GetUnicode());
+ if (V_BSTR(&vt) == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+
+ V_VT(&vt) = VT_BSTR;
+ }
+ else
+ {
+ // Default to the real name.
+ LPCWSTR pszName = GetFullyQualifiedNameForClassNestedAwareW(pData->pClass);
+
+ V_BSTR(&vt) = ::SysAllocString(pszName);
+ if (V_BSTR(&vt) == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+
+ V_VT(&vt) = VT_BSTR;
+ }
+
+ IfFailReport(pCTI2->SetCustData(GUID_ManagedName, &vt));
+
+ // If the class is decorated with a description, apply it to the typelib.
+ if (GetDescriptionString(pData->pClass, td, (BSTR &)bstrDescr))
+ IfFailReport(pCTI2->SetDocString(bstrDescr));
+
+ // Transfer ownership of the pointer.
+ pData->pCTI = pCTI2;
+ pCTI2.SuppressRelease();
+ pCTI2 = 0;
+} // void TypeLibExporter::CreateITypeInfo()
+
+HRESULT TypeLibExporter::GetTypeLibImportClassName(
+ MethodTable*pClass,
+ SString& szName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ _ASSERTE(NULL != pClass);
+
+ HRESULT hr = S_OK;
+
+ // Check for the presence of the TypelibImportClassAttribute.
+ const char* pvData; // Pointer to a custom attribute data.
+ ULONG cbData; // Size of custom attribute data.
+
+ hr = pClass->GetMDImport()->GetCustomAttributeByName(pClass->GetCl(),
+ INTEROP_TYPELIBIMPORTCLASS_TYPE,
+ reinterpret_cast<const void**>(&pvData),
+ &cbData);
+
+ if (hr == S_OK && cbData > 5 && pvData[0] == 1 && pvData[1] == 0)
+ {
+ CustomAttributeParser cap(pvData, cbData);
+ VERIFY(SUCCEEDED(cap.ValidateProlog())); // Validated above, just ensure consistency.
+
+ LPCUTF8 szString;
+ ULONG cbString;
+ if (SUCCEEDED(cap.GetNonNullString(&szString, &cbString)))
+ {
+ // Set the string and null terminate it.
+ szName.SetUTF8(szString, cbString);
+ szName.AppendASCII("\0");
+
+ // We successfully retrieved the string.
+ return S_OK;
+ }
+ }
+
+ return S_FALSE;
+}
+
+
+
+//*****************************************************************************
+// See if an object has a Description, and get it as a BSTR.
+//*****************************************************************************
+BOOL TypeLibExporter::GetDescriptionString(
+ MethodTable *pClass, // Class containing the token.
+ mdToken tk, // Token of the object.
+ BSTR &bstrDescr) // Put description here.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pClass));
+ }
+ CONTRACTL_END;
+
+ // Check for a description custom attribute.
+ return GetStringCustomAttribute(pClass->GetMDImport(), XXX_DESCRIPTION_TYPE, tk, bstrDescr);
+
+} // HRESULT TypeLibExporter::GetDescriptionString()
+
+//*****************************************************************************
+// See if an object has a custom attribute, and get it as a BSTR.
+//*****************************************************************************
+BOOL TypeLibExporter::GetStringCustomAttribute(
+ IMDInternalImport *pImport,
+ LPCSTR szName,
+ mdToken tk,
+ BSTR &bstrDescr)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pImport));
+ PRECONDITION(CheckPointer(szName));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr; // A result.
+ const void *pvData; // Pointer to a custom attribute data.
+ ULONG cbData; // Size of custom attribute data.
+
+ // Look for the desired custom attribute.
+ IfFailReport(pImport->GetCustomAttributeByName(tk, szName, &pvData,&cbData));
+ if (hr == S_OK && cbData > 2)
+ {
+ CustomAttributeParser cap(pvData, cbData);
+ IfFailReport(cap.SkipProlog());
+
+ LPCUTF8 szString;
+ ULONG cbString;
+ IfFailReport(cap.GetString(&szString, &cbString));
+
+ bstrDescr = SysAllocStringLen(0, cbString); // allocates cbString+1 characters (appends '\0')
+ if (bstrDescr == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+
+ if (cbString > 0)
+ {
+ ULONG cch = WszMultiByteToWideChar(CP_UTF8, 0, szString, cbString, bstrDescr, cbString);
+ bstrDescr[cch] = W('\0');
+ }
+
+ return TRUE;
+ }
+
+ return FALSE;
+} // HRESULT GetStringCustomAttribute()
+
+//*****************************************************************************
+// Get the value for AutomationProxy for an object. Return the default
+// if there is no attribute.
+//*****************************************************************************
+BOOL TypeLibExporter::GetAutomationProxyAttribute(
+ IMDInternalImport *pImport,
+ mdToken tk,
+ int *bValue)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pImport));
+ PRECONDITION(CheckPointer(bValue));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr; // A result.
+ const void *pvData; // Pointer to a custom attribute data.
+ ULONG cbData; // Size of custom attribute data.
+
+ IfFailReport(pImport->GetCustomAttributeByName(tk, INTEROP_AUTOPROXY_TYPE, &pvData, &cbData));
+ if (hr == S_OK && cbData > 2)
+ {
+ CustomAttributeParser cap(pvData, cbData);
+ if (FAILED(cap.SkipProlog()))
+ return FALSE;
+
+ UINT8 u1;
+ if (FAILED(cap.GetU1(&u1)))
+ return FALSE;
+
+ *bValue = u1 != 0;
+ }
+
+ if (hr == S_OK)
+ return TRUE;
+
+ return FALSE;
+} // void TypeLibExporter::GetAutomationProxyAttribute()
+
+//*****************************************************************************
+// Create the IClassX ITypeInfo.
+//*****************************************************************************
+void TypeLibExporter::CreateIClassXITypeInfo(
+ CExportedTypesInfo *pData, // Conversion data.
+ bool bNamespace, // If true, use namespace + name
+ bool bResolveDup) // If true, decorate name to resolve dups.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pData));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK; // A result.
+ LPCUTF8 pName; // Name in UTF8.
+ LPCUTF8 pNS; // Namespace in UTF8.
+ SString sName; // Name of the TypeDef.
+ SString sNameTypeInfo; // Name of the IClassX.
+ TYPEKIND tkind; // The TYPEKIND of a TypeDef.
+ GUID clsid; // A TypeDef's clsid.
+ DWORD dwFlags; // A TypeDef's flags.
+ LPWSTR pSuffix; // Pointer into the name.
+ int iSuffix = 0; // Counter for suffix.
+ GUID guid = {0}; // A default interface's IID.
+ HREFTYPE href; // href of base interface of IClassX.
+ mdTypeDef td; // Token for the class.
+ CorClassIfaceAttr classItfType = clsIfNone; // For class interface type.
+
+ VariantHolder vt; // For defining custom attribute.
+ SafeComHolder<ICreateTypeInfo> pCTITemp=0; // For creating a typeinfo.
+ SafeComHolder<ITypeInfo> pITemp=0; // An ITypeInfo to get a name.
+ SafeComHolder<ITypeLib> pITLB=0; // For dup IID reporting.
+ SafeComHolder<ITypeInfo> pITIDup=0; // For dup IID reporting.
+ SafeComHolder<ICreateTypeInfo2> pCTI2=0; // For creating the typeinfo.
+ BSTRHolder bstrName=0; // An ITypeInfo's name.
+ BSTRHolder bstrDescr=0; // For description.
+ BSTRHolder bstrDup=0; // For dup IID reporting.
+ ZeroHolder zhType = &m_ErrorContext.m_pScope; // Clear error reporting info.
+
+ MethodTable* pClassOuter = pData->pClass;
+
+ DefineFullyQualifiedNameForClassW();
+
+ // Get the TypeDef and some info about it.
+ td = pData->pClass->GetCl();
+ IfFailReport(pData->pClass->GetMDImport()->GetTypeDefProps(td, &dwFlags, 0));
+ tkind = pData->tkind;
+
+ // Error reporting info.
+ m_ErrorContext.m_tkType = td;
+ m_ErrorContext.m_pScope = pData->pClass->GetMDImport();
+
+ // A CoClass needs an IClassX, and an alias kind needs an alias.
+ if (tkind != TKIND_COCLASS)
+ return;
+
+ // Check to see if the type is supposed to be visible from COM. If it
+ // is not then we go to the next type.
+ if (!IsTypeVisibleFromCom(TypeHandle(pClassOuter)))
+ return;
+
+ // Imported types don't need an IClassX.
+ if (IsTdImport(dwFlags))
+ return;
+
+ // Check to see if we need to set up an IClassX for the class.
+ ClassHasIClassX(pData->pClass, &classItfType);
+ if (classItfType == clsIfNone)
+ return;
+
+ // Get full name from metadata.
+ IfFailReport(pData->pClass->GetMDImport()->GetNameOfTypeDef(td, &pName, &pNS));
+
+ // Get the GUID for the class. Used to generate IClassX guid.
+ pData->pClass->GetGuid(&clsid, TRUE);
+
+ // Get the name of the class. Use the ITypeInfo if there is one, except don't
+ // use the typeinfo for types which are Aliased.
+ if (pData->pCTI)
+ {
+ IfFailReport(SafeQueryInterface(pData->pCTI, IID_ITypeInfo, (IUnknown**)&pITemp));
+ IfFailReport(pITemp->GetDocumentation(MEMBERID_NIL, &bstrName, 0,0,0));
+ sName.Append(bstrName);
+ }
+ else
+ {
+ sName.AppendUTF8(pName);
+ }
+
+ // Create the typeinfo name for the IClassX
+ sNameTypeInfo.Set(cIClassX);
+ sNameTypeInfo.Append(sName);
+
+ tkind = TKIND_INTERFACE;
+ pSuffix = 0;
+ for (;;)
+ {
+ // Try to create the TypeInfo.
+ hr = m_pICreateTLB->CreateTypeInfo((LPOLESTR)sNameTypeInfo.GetUnicode(), tkind, &pCTITemp);
+
+ // If a name conflict, decorate, otherwise, done.
+ if (hr != TYPE_E_NAMECONFLICT)
+ break;
+
+ if (!bResolveDup)
+ {
+ hr = S_FALSE;
+ return;
+ }
+
+ if (iSuffix == 0)
+ {
+ iSuffix = 2;
+ }
+ else
+ {
+ sNameTypeInfo.Delete(sNameTypeInfo.End()-=2, 2);
+ }
+
+ SString sDup;
+ sDup.Printf(szDuplicateDecoration, iSuffix++);
+
+ sNameTypeInfo.Append(sDup);
+ }
+
+ IfFailReport(hr);
+ IfFailReport(SafeQueryInterface(pCTITemp, IID_ICreateTypeInfo2, (IUnknown**)&pCTI2));
+
+ // Generate the "IClassX" UUID and set it.
+ GenerateClassItfGuid(TypeHandle(pData->pClass), &guid);
+ hr = pCTI2->SetGuid(guid);
+ if (FAILED(hr))
+ {
+ if (hr == TYPE_E_DUPLICATEID)
+ {
+ IfFailReport(SafeQueryInterface(m_pICreateTLB, IID_ITypeLib, (IUnknown**)&pITLB));
+ IfFailReport(pITLB->GetTypeInfoOfGuid(guid, &pITIDup));
+ IfFailReport(pITIDup->GetDocumentation(MEMBERID_NIL, &bstrDup, 0,0,0));
+ InternalThrowHRWithContext(TLBX_E_DUPLICATE_IID, sNameTypeInfo.GetUnicode(), (BSTR)bstrDup);
+ }
+ return;
+ }
+
+ // Adding methods may cause an href to this typeinfo, which will cause it to be layed out.
+ // Set the inheritance, so that nesting will be correct when that layout happens.
+ // Add IDispatch as impltype 0.
+ GetRefTypeInfo(pCTI2, m_pIDispatch, &href);
+ IfFailReport(pCTI2->AddImplType(0, href));
+
+ // Record the fully qualified type name in a custom attribute.
+ LPCWSTR szName = GetFullyQualifiedNameForClassNestedAwareW(pData->pClass);
+ V_VT(&vt) = VT_BSTR;
+ V_BSTR(&vt) = SysAllocString(szName);
+ if (V_BSTR(&vt) == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+
+ IfFailReport(pCTI2->SetCustData(GUID_ManagedName, &vt));
+
+ TRACE("IClassX %x: %ls, {%08x-%04x-%04x-%04x-%02x%02x%02x%02x}\n", pCTI2, sName,
+ guid.Data1, guid.Data2, guid.Data3, guid.Data4[0]<<8|guid.Data4[1], guid.Data4[2], guid.Data4[3], guid.Data4[4], guid.Data4[5]);
+
+ // If the class is decorated with a description, apply it to the typelib.
+ if(GetDescriptionString(pData->pClass, td, (BSTR &)bstrDescr))
+ IfFailReport(pCTI2->SetDocString(bstrDescr));
+
+ // Transfer ownership of the pointer.
+ _ASSERTE(pData->pCTIClassItf == 0);
+ pData->pCTIClassItf = pCTI2;
+ pCTI2.SuppressRelease();
+ pCTI2 = 0;
+} // HRESULT TypeLibExporter::CreateIClassXITypeInfo()
+
+//*****************************************************************************
+// Add the impltypes to an ITypeInfo.
+//*****************************************************************************
+void TypeLibExporter::ConvertImplTypes(
+ CExportedTypesInfo *pData) // Conversion data.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pData));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK; // A result.
+ DWORD dwFlags; // A TypeDef's flags.
+ mdTypeDef td; // Token for the class.
+ ZeroHolder zhType = &m_ErrorContext.m_pScope; // Clear error reporting info.
+
+ // Get the TypeDef and some info about it.
+ td = pData->pClass->GetCl();
+ IfFailReport(pData->pClass->GetMDImport()->GetTypeDefProps(td, &dwFlags, 0));
+
+ // Error reporting info.
+ m_ErrorContext.m_tkType = td;
+ m_ErrorContext.m_pScope = pData->pClass->GetMDImport();
+
+ // If there is no ITypeInfo, skip it.
+ if (pData->pCTI == 0)
+ return;
+
+ // Check to see if the type is supposed to be visible from COM. If it
+ // is not then we go to the next type.
+ if (!IsTypeVisibleFromCom(TypeHandle(pData->pClass)))
+ return;
+
+ // Add the ImplTypes to the CoClass.
+ switch (pData->tkind)
+ {
+ case TKIND_INTERFACE:
+ case TKIND_DISPATCH:
+ // Add the base type to the interface.
+ ConvertInterfaceImplTypes(pData->pCTI, pData->pClass);
+ break;
+
+ case TKIND_RECORD:
+ case TKIND_UNION:
+ case TKIND_ENUM:
+ // Nothing to do at this step.
+ break;
+
+ case TKIND_COCLASS:
+ // Add the ImplTypes to the CoClass.
+ ConvertClassImplTypes(pData->pCTI, pData->pCTIClassItf, pData->pClass);
+ break;
+
+ default:
+ _ASSERTE(!"Unknown TYPEKIND");
+ IfFailReport(E_INVALIDARG);
+ break;
+ }
+} // HRESULT TypeLibExporter::ConvertImplTypes()
+
+//*****************************************************************************
+// Convert the details (members) of an ITypeInfo.
+//*****************************************************************************
+void TypeLibExporter::ConvertDetails(
+ CExportedTypesInfo *pData) // Conversion data.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pData));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK; // A result.
+ DWORD dwFlags; // A TypeDef's flags.
+ mdTypeDef td; // Token for the class.
+ ZeroHolder zhType = &m_ErrorContext.m_pScope; // Clear error reporting info.
+
+ // Get the TypeDef and some info about it.
+ td = pData->pClass->GetCl();
+ IfFailReport(pData->pClass->GetMDImport()->GetTypeDefProps(td, &dwFlags, 0));
+
+ // Error reporting info.
+ m_ErrorContext.m_tkType = td;
+ m_ErrorContext.m_pScope = pData->pClass->GetMDImport();
+
+ // If there is no TypeInfo, skip it, but for CoClass need to populate IClassX.
+ if (pData->pCTI == 0 && pData->tkind != TKIND_COCLASS)
+ return;
+
+ // Check to see if the type is supposed to be visible from COM. If it
+ // is not then we go to the next type.
+ if (!IsTypeVisibleFromCom(TypeHandle(pData->pClass)))
+ return;
+
+ // Fill in the rest of the typeinfo for this typedef.
+ switch (pData->tkind)
+ {
+ case TKIND_INTERFACE:
+ case TKIND_DISPATCH:
+ ConvertInterfaceDetails(pData->pCTI, pData->pClass, pData->bAutoProxy);
+ break;
+
+ case TKIND_RECORD:
+ case TKIND_UNION:
+ ConvertRecord(pData);
+ break;
+
+ case TKIND_ENUM:
+ ConvertEnum(pData->pCTI, pData->pClass);
+ break;
+
+ case TKIND_COCLASS:
+ // Populate the methods on the IClassX interface.
+ ConvertClassDetails(pData->pCTI, pData->pCTIClassItf, pData->pClass, pData->bAutoProxy);
+ break;
+
+ default:
+ _ASSERTE(!"Unknown TYPEKIND");
+ IfFailReport(E_INVALIDARG);
+ break;
+ } // Switch (tkind)
+
+ // Report that this type has been converted.
+ SString ssType;
+ if (IsTdNested(dwFlags))
+ {
+ TypeNameBuilder tnb(&ssType, TypeNameBuilder::ParseStateNAME);
+ TypeString::AppendNestedTypeDef(tnb, m_ErrorContext.m_pScope, m_ErrorContext.m_tkType);
+ }
+ else
+ TypeString::AppendTypeDef(ssType, m_ErrorContext.m_pScope, m_ErrorContext.m_tkType);
+ ReportEvent(NOTIF_TYPECONVERTED, TLBX_I_TYPE_EXPORTED, ssType.GetUnicode());
+} // void TypeLibExporter::ConvertDetails()
+
+//*****************************************************************************
+// Add the ImplTypes to the TypeInfo.
+//*****************************************************************************
+void TypeLibExporter::ConvertInterfaceImplTypes(
+ ICreateTypeInfo2 *pThisTypeInfo, // The typeinfo being created.
+ MethodTable *pClass) // MethodTable for the TypeInfo.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pThisTypeInfo));
+ PRECONDITION(CheckPointer(pClass));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ ULONG ulIface; // Is this interface [dual]?
+ HREFTYPE href; // href of base interface.
+
+ // IDispatch or IUnknown derived?
+ IfFailReport(pClass->GetMDImport()->GetIfaceTypeOfTypeDef(pClass->GetCl(), &ulIface));
+
+ // Parent interface.
+ if (IsDispatchBasedItf((CorIfaceAttr)ulIface))
+ {
+ // Get the HREFTYPE for IDispatch.
+ GetRefTypeInfo(pThisTypeInfo, m_pIDispatch, &href);
+ }
+ else
+ {
+ // Get the HREFTYPE for IUnknown.
+ GetRefTypeInfo(pThisTypeInfo, m_pIUnknown, &href);
+ }
+
+ // Add the HREF as an interface.
+ IfFailReport(pThisTypeInfo->AddImplType(0, href));
+} // void TypeLibExporter::ConvertInterfaceImplTypes()
+
+
+//*****************************************************************************
+// Create the TypeInfo for an interface by iterating over functions.
+//*****************************************************************************
+void TypeLibExporter::ConvertInterfaceDetails (
+ ICreateTypeInfo2 *pThisTypeInfo, // The typeinfo being created.
+ MethodTable *pMT, // MethodTable for the TypeInfo.
+ int bAutoProxy) // If true, oleaut32 is the interface's marshaller.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pThisTypeInfo));
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ ULONG ulIface; // Is this interface [dual]?
+ DWORD dwTIFlags=0; // TypeLib flags.
+ int cVisibleMembers = 0; // The count of methods that are visible to COM.
+
+ // Retrieve the map of members.
+ ComMTMemberInfoMap MemberMap(pMT);
+
+ // IDispatch or IUnknown derived?
+ IfFailReport(pMT->GetMDImport()->GetIfaceTypeOfTypeDef(pMT->GetCl(), &ulIface));
+
+ if (IsDispatchBasedItf((CorIfaceAttr)ulIface))
+ {
+ // IDispatch derived.
+ dwTIFlags |= TYPEFLAG_FDISPATCHABLE;
+
+ if (ulIface == ifDual)
+ dwTIFlags |= TYPEFLAG_FDUAL | TYPEFLAG_FOLEAUTOMATION;
+ else
+ _ASSERTE(ulIface == ifDispatch);
+ }
+ else
+ {
+ // IUnknown derived.
+ dwTIFlags |= TYPEFLAG_FOLEAUTOMATION;
+ }
+
+ if (!bAutoProxy)
+ dwTIFlags |= TYPEFLAG_FPROXY;
+
+ // Set appropriate flags.
+ IfFailReport(pThisTypeInfo->SetTypeFlags(dwTIFlags));
+
+ // Retrieve the method properties.
+ size_t sizeOfPtr = IsExportingAs64Bit() ? 8 : 4;
+
+ MemberMap.Init(sizeOfPtr);
+ if (MemberMap.HadDuplicateDispIds())
+ ReportWarning(TLBX_I_DUPLICATE_DISPID, TLBX_I_DUPLICATE_DISPID);
+
+ // We need a scope to bypass the inialization skipped by goto ErrExit
+ // compiler error.
+ {
+ CQuickArray<ComMTMethodProps> &rProps = MemberMap.GetMethods();
+
+ // Now add the methods to the TypeInfo.
+ MethodTable::MethodIterator it(pMT);
+ for (; it.IsValid(); it.Next())
+ {
+ if (it.IsVirtual())
+ {
+ // Only convert the method if it is visible from COM.
+ if (rProps[it.GetSlotNumber()].bMemberVisible)
+ {
+ if (ConvertMethod(pThisTypeInfo, &rProps[it.GetSlotNumber()], cVisibleMembers, ulIface))
+ cVisibleMembers++;
+ }
+ }
+ }
+ }
+} // void TypeLibExporter::ConvertInterfaceDetails()
+
+//*****************************************************************************
+// Export a Record to a TypeLib.
+//*****************************************************************************
+void TypeLibExporter::ConvertRecordBaseClass(
+ CExportedTypesInfo *pData, // Conversion data.
+ MethodTable *pSubMT, // The base class.
+ ULONG &ixVar) // Variable index in the typelib.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pData));
+ PRECONDITION(CheckPointer(pSubMT));
+ }
+ CONTRACTL_END;
+
+ // The typeinfo being created.
+ ICreateTypeInfo2 *pThisTypeInfo = pData->pCTI;
+
+ HRESULT hr = S_OK; // A result.
+ mdFieldDef fd; // A Field def.
+ ULONG iFD; // Loop control.
+ ULONG cFD; // Count of total MemberDefs.
+ DWORD dwFlags; // Field flags.
+ LPCUTF8 szName; // Name in UTF8.
+ LPCUTF8 szNamespace; // A Namespace in UTF8.
+ SString sName; // Name
+
+ // To enum fields.
+ HENUMInternalHolder eFDi(pSubMT->GetMDImport());
+
+ // If there is no class here, or if the class is Object, don't add members.
+ if (pSubMT == 0 ||
+ pSubMT == g_pObjectClass)
+ return;
+
+ // If this class has a base class, export those members first.
+ ConvertRecordBaseClass(pData, pSubMT->GetParentMethodTable(), ixVar);
+
+ // Build the member name prefix.
+ IfFailReport(pSubMT->GetMDImport()->GetNameOfTypeDef(pSubMT->GetCl(), &szName, &szNamespace));
+
+ sName.SetUTF8(szName);
+ sName.Append(W("_"));
+
+ // Get an enumerator for the MemberDefs in the TypeDef.
+ eFDi.EnumInit(mdtFieldDef, pSubMT->GetCl());
+ cFD = pSubMT->GetMDImport()->EnumGetCount(&eFDi);
+
+ SString sNameMember;
+ // For each MemberDef...
+ for (iFD=0; iFD<cFD; ++iFD)
+ {
+ // Get the next field.
+ if (!pSubMT->GetMDImport()->EnumNext(&eFDi, &fd))
+ {
+ IfFailReport(E_UNEXPECTED);
+ }
+
+ IfFailReport(pSubMT->GetMDImport()->GetFieldDefProps(fd, &dwFlags));
+
+ // Only non-static fields.
+ if (!IsFdStatic(dwFlags))
+ {
+ IfFailReport(pSubMT->GetMDImport()->GetNameOfFieldDef(fd, &szName));
+
+ sNameMember.Set(sName);
+ sNameMember.AppendUTF8(szName);
+ if (ConvertVariable(pThisTypeInfo, pSubMT, fd, sNameMember, ixVar))
+ ixVar++;
+ }
+ }
+} // void TypeLibExporter::ConvertRecordBaseClass()
+
+void TypeLibExporter::ConvertRecord(
+ CExportedTypesInfo *pData) // Conversion data.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pData));
+ }
+ CONTRACTL_END;
+
+ ICreateTypeInfo2 *pThisTypeInfo=pData->pCTI; // The typeinfo being created.
+ MethodTable *pMT=pData->pClass; // MethodTable for the TypeInfo.
+
+ HRESULT hr = S_OK; // A result.
+ mdFieldDef fd; // A Field def.
+ ULONG iFD; // Loop control.
+ ULONG ixVar=0; // Index of current var converted.
+ ULONG cFD; // Count of total MemberDefs.
+ DWORD dwFlags; // Field flags.
+ DWORD dwPack; // Class pack size.
+ mdToken tkExtends; // A class's parent.
+ LPCUTF8 szName; // Name in UTF8.
+ SString sName; // Name.
+
+ // To enum fields.
+ HENUMInternalHolder eFDi(pMT->GetMDImport());
+
+ // If the type is a struct, but it has explicit layout, don't export the members,
+ // because we can't export them accurately (unless they're really sequential).
+ if (pData->tkind == TKIND_RECORD)
+ {
+ IfFailReport(pMT->GetMDImport()->GetTypeDefProps(pMT->GetCl(), &dwFlags, &tkExtends));
+
+ if (IsTdExplicitLayout(dwFlags))
+ {
+ ReportWarning(S_OK, TLBX_I_NONSEQUENTIALSTRUCT);
+ return;
+ }
+ }
+
+ // Set the packing size, if there is one.
+ dwPack = 0;
+ if (FAILED(pMT->GetMDImport()->GetClassPackSize(pMT->GetCl(), &dwPack)))
+ {
+ dwPack = 0;
+ }
+ if (dwPack == 0)
+ {
+ dwPack = DEFAULT_PACKING_SIZE;
+ }
+
+ IfFailReport(pThisTypeInfo->SetAlignment((USHORT)dwPack));
+
+ // Haven't seen any non-public members yet.
+ m_bWarnedOfNonPublic = FALSE;
+
+ // If this class has a base class, export those members first.
+ ConvertRecordBaseClass(pData, pMT->GetParentMethodTable(), ixVar);
+
+ // Get an enumerator for the MemberDefs in the TypeDef.
+ eFDi.EnumInit(mdtFieldDef, pMT->GetCl());
+ cFD = pMT->GetMDImport()->EnumGetCount(&eFDi);
+
+ // For each MemberDef...
+ for (iFD=0; iFD<cFD; ++iFD)
+ {
+ // Get the next field.
+ if (!pMT->GetMDImport()->EnumNext(&eFDi, &fd))
+ {
+ IfFailReport(E_UNEXPECTED);
+ }
+
+ IfFailReport(pMT->GetMDImport()->GetFieldDefProps(fd, &dwFlags));
+
+ // Skip static fields.
+ if (IsFdStatic(dwFlags) == 0)
+ {
+ IfFailReport(pMT->GetMDImport()->GetNameOfFieldDef(fd, &szName));
+
+ sName.SetUTF8(szName);
+ if (ConvertVariable(pThisTypeInfo, pMT, fd, sName, ixVar))
+ ixVar++;
+ }
+ }
+} // HRESULT TypeLibExporter::ConvertRecord()
+
+//*****************************************************************************
+// Export an Enum to a typelib.
+//*****************************************************************************
+void TypeLibExporter::ConvertEnum(
+ ICreateTypeInfo2 *pThisTypeInfo, // The typeinfo being created.
+ MethodTable *pMT) // MethodTable for the TypeInfo.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pThisTypeInfo));
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK; // A result.
+ mdFieldDef fd; // A Field def.
+ DWORD dwTIFlags=0; // TypeLib flags.
+ ULONG dwFlags; // A field's flags.
+ ULONG iFD; // Loop control.
+ ULONG cFD; // Count of total MemberDefs.
+ ULONG iVar=0; // Count of vars actually converted.
+ LPCUTF8 szName; // Name in UTF8.
+ SString sName; // Name.
+ SafeComHolder<ITypeInfo> pThisTI=0; // TypeInfo for this ICreateITypeInfo.
+ BSTRHolder szThisTypeInfo=0; // Name of this ITypeInfo.
+
+ IMDInternalImport* pImport = pMT->GetMDImport();
+
+ // To enum fields.
+ HENUMInternalHolder eFDi(pImport);
+
+ // Explicitly set the flags.
+ IfFailReport(pThisTypeInfo->SetTypeFlags(dwTIFlags));
+
+ // Get an enumerator for the MemberDefs in the TypeDef.
+ eFDi.EnumInit(mdtFieldDef, pMT->GetCl());
+ cFD = pImport->EnumGetCount(&eFDi);
+
+ // Build the member name prefix. If generating an enum, get the real name from the default interface.
+ IfFailReport(SafeQueryInterface(pThisTypeInfo, IID_ITypeInfo, (IUnknown**)&pThisTI));
+ IfFailReport(pThisTI->GetDocumentation(MEMBERID_NIL, &szThisTypeInfo, 0,0,0));
+
+ sName.Set(szThisTypeInfo);
+ sName.Append(W("_"));
+
+ SString sNameMember;
+ // For each MemberDef...
+ for (iFD=0; iFD<cFD; ++iFD)
+ {
+ // Get the next field.
+ if (!pImport->EnumNext(&eFDi, &fd))
+ {
+ IfFailReport(E_UNEXPECTED);
+ }
+
+ // Only convert static fields.
+ IfFailReport(pImport->GetFieldDefProps(fd, &dwFlags));
+
+ if (IsFdStatic(dwFlags) == 0)
+ {
+ continue;
+ }
+
+ // Skip ComVisible(false) members
+ if (!IsMemberVisibleFromCom(pMT, fd, mdTokenNil))
+ {
+ continue;
+ }
+
+ sNameMember.Set(sName);
+ IfFailReport(pImport->GetNameOfFieldDef(fd, &szName));
+
+ sNameMember.AppendUTF8(szName);
+
+ if (ConvertEnumMember(pThisTypeInfo, pMT, fd, sNameMember, iVar))
+ {
+ iVar++;
+ }
+ }
+} // void TypeLibExporter::ConvertEnum()
+
+//*****************************************************************************
+// Does a class have a default ctor?
+//*****************************************************************************
+BOOL TypeLibExporter::HasDefaultCtor(
+ MethodTable *pMT) // The class in question.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr; // A result.
+ mdMethodDef md; // A method of the type.
+ DWORD dwFlags; // Method's flags.
+ ULONG cMD; // Count of returned tokens.
+ ULONG iMD; // Loop control.
+ PCCOR_SIGNATURE pSig; // The signature.
+ ULONG ixSig; // Index into signature.
+ ULONG cbSig; // Size of the signature.
+ ULONG callconv; // Method's calling convention.
+ ULONG cParams; // Method's count of parameters.
+ BOOL rslt=FALSE; // Was one found?
+ LPCUTF8 pName; // Method name.
+
+ IMDInternalImport* pImport = pMT->GetMDImport();
+
+ // To enum methods.
+ HENUMInternalHolder eMDi(pImport);
+
+ // Get an enumerator for the MemberDefs in the TypeDef.
+ eMDi.EnumInit(mdtMethodDef, pMT->GetCl());
+ cMD = pImport->EnumGetCount(&eMDi);
+
+ // For each MemberDef...
+ for (iMD=0; iMD<cMD; ++iMD)
+ {
+ // Get the next field.
+ if (!pImport->EnumNext(&eMDi, &md))
+ {
+ IfFailReport(E_UNEXPECTED);
+ }
+
+ // Is the name special? Is the method public?
+ IfFailReport(pImport->GetMethodDefProps(md, &dwFlags));
+
+ if (!IsMdRTSpecialName(dwFlags) || !IsMdPublic(dwFlags))
+ continue;
+
+ // Yes, is the name a ctor?
+ IfFailReport(pImport->GetNameOfMethodDef(md, &pName));
+
+ if (!IsMdInstanceInitializer(dwFlags, pName))
+ continue;
+
+ // It is a ctor. Is it a default ctor?
+ IfFailReport(pImport->GetSigOfMethodDef(md, &cbSig, &pSig));
+
+ // Skip the calling convention, and get the param count.
+ ixSig = CorSigUncompressData(pSig, &callconv);
+ CorSigUncompressData(&pSig[ixSig], &cParams);
+
+ // Default ctor has zero params.
+ if (cParams == 0)
+ {
+ rslt = TRUE;
+ break;
+ }
+ }
+
+ return rslt;
+} // BOOL TypeLibExporter::HasDefaultCtor()
+
+//*****************************************************************************
+// Export a class to a TypeLib.
+//*****************************************************************************
+void TypeLibExporter::ConvertClassImplTypes(
+ ICreateTypeInfo2 *pThisTypeInfo, // The typeinfo being created.
+ ICreateTypeInfo2 *pClassItfTypeInfo,// The ICLassX for the TypeInfo.
+ MethodTable *pMT) // MethodTable for the TypeInfo.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pThisTypeInfo, NULL_OK));
+ PRECONDITION(CheckPointer(pClassItfTypeInfo, NULL_OK));
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ HREFTYPE href; // HREF to a TypeInfo.
+ DWORD dwFlags; // Metadata flags.
+ int flags=0; // Flags for the interface impl or CoClass.
+ UINT iImpl=0; // Current Impl index.
+ MethodTable *pIDefault = 0; // Default interface, if any.
+ MethodTable *pDefItfMT = 0; // Default interface method table, if any.
+ CQuickArray<MethodTable *> SrcItfList; // List of event sources.
+ CorClassIfaceAttr classItfType = clsIfNone; // For class interface type.
+ DefaultInterfaceType DefItfType;
+ TypeHandle hndDefItfClass;
+
+ SafeComHolder<ITypeInfo> pTI=0; // TypeInfo for default dispinterface.
+ SafeComHolder<ICreateTypeInfo2> pCTI2 = NULL; // The ICreateTypeInfo2 interface used to define custom data.
+
+ // We should never be converting the class impl types of COM imported CoClasses.
+ _ASSERTE(!pMT->IsComImport());
+
+ if (pThisTypeInfo)
+ {
+ IfFailReport(pMT->GetMDImport()->GetTypeDefProps(pMT->GetCl(), &dwFlags, 0));
+
+ // If abstract class, or no default ctor, don't make it creatable.
+ if (!IsTdAbstract(dwFlags) && HasDefaultCtor(pMT))
+ flags |= TYPEFLAG_FCANCREATE;
+
+ // PreDeclid as appropriate.
+ IfFailReport(pThisTypeInfo->SetTypeFlags(flags));
+ }
+
+ // Retrieve the MethodTable that represents the default interface.
+ DefItfType = GetDefaultInterfaceForClassWrapper(TypeHandle(pMT), &hndDefItfClass);
+
+ // Remember the MethodTable of the default interface.
+ pIDefault = hndDefItfClass.GetMethodTable();
+
+ // For some classes we synthesize an IClassX. We don't do that for
+ // configured class, classes imported from COM,
+ // or for classes with an explicit default interface.
+ if (pClassItfTypeInfo)
+ {
+ // Set the interface as the default for the class.
+ IfFailReport(SafeQueryInterface(pClassItfTypeInfo, IID_ITypeInfo, (IUnknown**)&pTI));
+ GetRefTypeInfo(pThisTypeInfo, pTI, &href);
+ IfFailReport(pThisTypeInfo->AddImplType(iImpl, href));
+
+ // If the class interface is the default interface, mark it as such.
+ if (pMT == pIDefault)
+ IfFailReport(pThisTypeInfo->SetImplTypeFlags(iImpl, IMPLTYPEFLAG_FDEFAULT));
+
+ // Increment the impl count.
+ ++iImpl;
+ }
+
+ // Go up the class hierarchy and add the IClassX's of the parent classes
+ // as interfaces implemented by the COM component.
+ MethodTable *pParentClass = pMT->GetComPlusParentMethodTable();
+ while (pParentClass)
+ {
+ // If the parent class has an IClassX interface then add it.
+ ClassHasIClassX(pParentClass, &classItfType);
+ if (classItfType == clsIfAutoDual)
+ {
+ hr = EEClassToHref(pThisTypeInfo, pParentClass, FALSE, &href);
+
+ // If not IUnknown, add the HREF as an interface.
+ if (hr != S_USEIUNKNOWN)
+ {
+ IfFailReport(pThisTypeInfo->AddImplType(iImpl, href));
+ if (pParentClass == pIDefault)
+ IfFailReport(pThisTypeInfo->SetImplTypeFlags(iImpl, IMPLTYPEFLAG_FDEFAULT));
+
+ ++iImpl;
+ }
+ }
+
+ // Process the next class up the hierarchy.
+ pParentClass = pParentClass->GetComPlusParentMethodTable();
+ }
+
+ ComCallWrapperTemplate *pClassTemplate = ComCallWrapperTemplate::GetTemplate(TypeHandle(pMT));
+ MethodTable::InterfaceMapIterator it = pMT->IterateInterfaceMap();
+ while (it.Next())
+ {
+ flags = 0;
+
+ // Get the MethodTable for an implemented interface.
+ MethodTable *pIClass = it.GetInterface();
+
+ // Retrieve the ComMethodTable for the interface.
+ ComMethodTable *pItfComMT = pClassTemplate->GetComMTForItf(pIClass);
+
+ // If the interface is visible from COM, add it.
+ if (IsTypeVisibleFromCom(TypeHandle(pIClass)) && !pItfComMT->IsComClassItf())
+ {
+#if defined(_DEBUG)
+ TRACE("Class %s implements %s\n", pMT->GetDebugClassName(), pIClass->GetDebugClassName());
+#endif
+ // Get an href for the managed class.
+ hr = EEClassToHref(pThisTypeInfo, pIClass, FALSE, &href);
+
+ // If not IUnknown, add the HREF as an interface.
+ if (hr != S_USEIUNKNOWN)
+ {
+ if (pIClass == pIDefault)
+ flags |= IMPLTYPEFLAG_FDEFAULT;
+
+ IfFailReport(pThisTypeInfo->AddImplType(iImpl, href));
+ IfFailReport(pThisTypeInfo->SetImplTypeFlags(iImpl, flags));
+ ++iImpl;
+ }
+ }
+ else if (!IsTypeVisibleFromCom(TypeHandle(pIClass)) && (pIClass == pIDefault))
+ {
+ // Report a warning if the default interface is not COM visible
+ ReportWarning(TLBX_W_DEFAULT_INTF_NOT_VISIBLE, TLBX_W_DEFAULT_INTF_NOT_VISIBLE);
+ }
+ }
+
+ // Retrieve the list of COM source interfaces for the managed class.
+ GetComSourceInterfacesForClass(pMT, SrcItfList);
+
+ // Add all the source interfaces to the CoClass.
+ flags = IMPLTYPEFLAG_FSOURCE | IMPLTYPEFLAG_FDEFAULT;
+ for (UINT i = 0; i < SrcItfList.Size(); i++)
+ {
+ hr = EEClassToHref(pThisTypeInfo, SrcItfList[i], FALSE, &href);
+
+ // If not IUnknown, add the HREF as an interface.
+ if (hr != S_USEIUNKNOWN)
+ {
+ IfFailReport(pThisTypeInfo->AddImplType(iImpl, href));
+ IfFailReport(pThisTypeInfo->SetImplTypeFlags(iImpl, flags));
+ ++iImpl;
+ flags = IMPLTYPEFLAG_FSOURCE;
+ }
+ }
+} // void TypeLibExporter::ConvertClassImplTypes()
+
+//*****************************************************************************
+// Export a class to a TypeLib.
+//*****************************************************************************
+void TypeLibExporter::ConvertClassDetails(
+ ICreateTypeInfo2 *pThisTypeInfo, // The typeinfo being created.
+ ICreateTypeInfo2 *pDefaultTypeInfo, // The ICLassX for the TypeInfo.
+ MethodTable *pMT, // MethodTable for the TypeInfo.
+ int bAutoProxy) // If true, oleaut32 is the proxy.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pThisTypeInfo, NULL_OK));
+ PRECONDITION(CheckPointer(pDefaultTypeInfo, NULL_OK));
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ CorClassIfaceAttr classItfType = clsIfNone;
+
+ ClassHasIClassX(pMT, &classItfType);
+ if (classItfType == clsIfAutoDual)
+ {
+ // Set up the IClassX interface.
+ ConvertIClassX(pDefaultTypeInfo, pMT, bAutoProxy);
+ }
+ else if (pDefaultTypeInfo)
+ {
+ DWORD dwTIFlags = TYPEFLAG_FDUAL | TYPEFLAG_FOLEAUTOMATION | TYPEFLAG_FDISPATCHABLE | TYPEFLAG_FHIDDEN;
+ if (!bAutoProxy)
+ dwTIFlags |= TYPEFLAG_FPROXY;
+ IfFailReport(pDefaultTypeInfo->SetTypeFlags(dwTIFlags));
+ }
+} // void TypeLibExporter::ConvertClassDetails()
+
+//*****************************************************************************
+// Create the DispInterface for the vtable that describes an entire class.
+//*****************************************************************************
+void TypeLibExporter::ConvertIClassX(
+ ICreateTypeInfo2 *pThisTypeInfo, // The TypeInfo for the IClassX.
+ MethodTable *pMT, // The MethodTable object for the class.
+ int bAutoProxy) // If true, oleaut32 is the proxy.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pThisTypeInfo));
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK; // A result.
+ DWORD dwTIFlags=0; // TypeLib flags.
+ DWORD nSlots; // Number of vtable slots.
+ UINT i; // Loop control.
+ int cVisibleMembers = 0; // The count of methods that are visible to COM.
+ ComMTMemberInfoMap MemberMap(pMT); // The map of members.
+
+ // Should be an actual class.
+ _ASSERTE(!pMT->IsInterface());
+
+ // Retrieve the method properties.
+ size_t sizeOfPtr = IsExportingAs64Bit() ? 8 : 4;
+
+ MemberMap.Init(sizeOfPtr);
+ if (MemberMap.HadDuplicateDispIds())
+ ReportWarning(TLBX_I_DUPLICATE_DISPID, TLBX_I_DUPLICATE_DISPID);
+
+ // We need a scope to bypass the inialization skipped by goto ErrExit
+ // compiler error.
+ {
+ CQuickArray<ComMTMethodProps> &rProps = MemberMap.GetMethods();
+ nSlots = (DWORD)rProps.Size();
+
+ dwTIFlags |= TYPEFLAG_FDUAL | TYPEFLAG_FOLEAUTOMATION | TYPEFLAG_FDISPATCHABLE | TYPEFLAG_FHIDDEN | TYPEFLAG_FNONEXTENSIBLE;
+ if (!bAutoProxy)
+ dwTIFlags |= TYPEFLAG_FPROXY;
+ IfFailReport(pThisTypeInfo->SetTypeFlags(dwTIFlags));
+
+ // Assign slot numbers.
+ for (i=0; i<nSlots; ++i)
+ rProps[i].oVft = (short)((7 + i) * sizeOfPtr);
+
+ // Now add the methods to the TypeInfo.
+ for (i=0; i<nSlots; ++i)
+ {
+ TRACE("[%d] %10ls pMeth:%08x, prop:%d, semantic:%d, dispid:0x%x, oVft:%d\n", i, rProps[i].pName, rProps[i].pMeth,
+ rProps[i].property, rProps[i].semantic, rProps[i].dispid, rProps[i].oVft);
+ if (rProps[i].bMemberVisible)
+ {
+ if (rProps[i].semantic < FieldSemanticOffset)
+ {
+ if (ConvertMethod(pThisTypeInfo, &rProps[i], cVisibleMembers, ifDual))
+ cVisibleMembers++;
+ }
+ else
+ {
+ if (ConvertFieldAsMethod(pThisTypeInfo, &rProps[i], cVisibleMembers))
+ cVisibleMembers++;
+ }
+ }
+ }
+ }
+} // void TypeLibExporter::ConvertIClassX()
+
+
+//*****************************************************************************
+// Export a Method's metadata to a typelib.
+//*****************************************************************************
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#endif
+BOOL TypeLibExporter::ConvertMethod(
+ ICreateTypeInfo2 *pCTI, // ICreateTypeInfo2 to get the method.
+ ComMTMethodProps *pProps, // Some properties of the method.
+ ULONG iMD, // Index of the member
+ ULONG ulIface) // Is this interface : IUnknown, [dual], or DISPINTERFACE?
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pCTI));
+ PRECONDITION(CheckPointer(pProps));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK; // A result.
+ HRESULT hrSignature = S_OK; // A failure HR;
+ LPCUTF8 pszName; // Name in UTF8.
+ SString sName; // Holds name.
+ ULONG dwImplFlags; // The function's impl flags.
+ PCCOR_SIGNATURE pbSig; // Pointer to Cor signature.
+ ULONG cbSig; // Size of Cor signature.
+ ULONG ixSig; // Index into signature.
+ ULONG cbElem; // Size of an element in the signature.
+ ULONG callconv; // A member's calling convention.
+ ULONG ret; // The return type.
+ ULONG elem; // A signature element.
+ TYPEDESC *pRetVal=0; // Return type's TYPEDESC.
+ ULONG cSrcParams; // Count of source params.
+ ULONG cDestParams = 0; // Count of dest parameters.
+ USHORT iSrcParam; // Loop control, over params.
+ USHORT iDestParam; // Loop control, over params.
+ USHORT iLCIDParam; // The index of the LCID param.
+ ULONG dwParamFlags; // A parameter's flags.
+ CDescPool sPool; // Pool of memory in which to build funcdesc.
+ CDescPool sVariants; // Pool of variants for default values.
+ PARAMDESCEX *pParamDesc; // Pointer to one param default value.
+ int bHrMunge=true; // Munge return type to HRESULT?
+ CQuickArray<BSTR> rNames; // Array of names to function and parameters.
+ ULONG cNames=0; // Count of function and parameter names.
+ FUNCDESC *pfunc = NULL; // A funcdesc.
+ MethodDesc *pMeth; // A MethodDesc.
+ IMDInternalImport *pInternalImport; // Internal interface containing the method.
+ MDDefaultValue defaultValue; // place holder for default value
+ PCCOR_SIGNATURE pvNativeType; // native parameter type
+ ULONG cbNativeType = 0; // native parameter type length
+ MethodTable *pMT; // Class containing the method.
+ int bHasOptorDefault=false; // If true, the method has optional params or default values -- no vararg
+ const void *pvData; // Pointer to a custom attribute.
+ ULONG cbData; // Size of custom attribute.
+ BOOL bByRef; // Is a parameter byref?
+ BSTRHolder bstrDescr=0; // Description of the method.
+ VariantHolder vtManagedName; // Variant used to set the managed name of the member.
+
+ ZeroHolder zhParam = &m_ErrorContext.m_szParam; // Clear error reporting info.
+ ZeroHolder zhMember = &m_ErrorContext.m_szMember; // Clear error reporting info.
+
+ // Get info about the method.
+ pMeth = pProps->pMeth;
+ pMeth->GetSig(&pbSig, &cbSig);
+ pInternalImport = pMeth->GetMDImport();
+ pMT = pMeth->GetMethodTable();
+ IfFailReport(pInternalImport->GetMethodImplProps(pMeth->GetMemberDef(), 0, &dwImplFlags));
+
+ // Error reporting info.
+ IfFailReport(pInternalImport->GetNameOfMethodDef(pMeth->GetMemberDef(), &m_ErrorContext.m_szMember));
+
+ // Allocate one variant.
+ pParamDesc = reinterpret_cast<PARAMDESCEX*>(sVariants.AllocZero(sizeof(PARAMDESCEX)));
+ if(NULL == pParamDesc)
+ IfFailReport(E_OUTOFMEMORY);
+
+ // Prepare to parse signature and build the FUNCDESC.
+ pfunc = reinterpret_cast<FUNCDESC*>(sPool.AllocZero(sizeof(FUNCDESC)));
+ if (pfunc == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+
+ ixSig = 0;
+
+ // Get the calling convention.
+ ixSig += CorSigUncompressData(&pbSig[ixSig], &callconv);
+ _ASSERTE((callconv & IMAGE_CEE_CS_CALLCONV_MASK) != IMAGE_CEE_CS_CALLCONV_FIELD);
+ pfunc->callconv = Clr2TlbCallConv[callconv & IMAGE_CEE_CS_CALLCONV_MASK];
+
+ // vtable offset.
+ pfunc->oVft = pProps->oVft;
+
+ // Get the argument count. Allow for an extra in case of [retval].
+ ixSig += CorSigUncompressData(&pbSig[ixSig], &cSrcParams);
+ cDestParams = cSrcParams;
+ rNames.ReSizeThrows(cDestParams+3);
+ memset(rNames.Ptr(), 0, (cDestParams+3) * sizeof(BSTR));
+
+ // Set some method properties.
+ pfunc->memid = pProps->dispid;
+ if (pfunc->memid == -11111) //@todo: fix for msvbalib.dll
+ pfunc->memid = -1;
+ pfunc->funckind = FUNC_PUREVIRTUAL;
+
+ // Set the invkind based on whether the function is an accessor.
+ if (pProps->semantic == 0)
+ pfunc->invkind = INVOKE_FUNC;
+ else if (pProps->semantic == msGetter)
+ pfunc->invkind = INVOKE_PROPERTYGET;
+ else if (pProps->semantic == msSetter)
+ pfunc->invkind = INVOKE_PROPERTYPUTREF;
+ else if (pProps->semantic == msOther)
+ pfunc->invkind = INVOKE_PROPERTYPUT;
+ else
+ pfunc->invkind = INVOKE_FUNC; // non-accessor property function.
+
+ rNames[0] = pProps->pName;
+ cNames = 1;
+
+ // Convert return type to elemdesc. If we are doing HRESULT munging, we need to
+ // examine the return type, and if it is not VOID, create an additional final
+ // parameter as a pointer to the type.
+
+ // Get the return type.
+ cbElem = CorSigUncompressData(&pbSig[ixSig], &ret);
+
+ // Error reporting info.
+ m_ErrorContext.m_ixParam = 0;
+
+ // Get native type of return if available
+ mdParamDef pdParam;
+ pvNativeType = NULL;
+ hr = pInternalImport->FindParamOfMethod(pMeth->GetMemberDef(), 0, &pdParam);
+ if (hr == S_OK)
+ {
+ hr = pInternalImport->GetFieldMarshal(pdParam, &pvNativeType, &cbNativeType);
+ if (hr != CLDB_E_RECORD_NOTFOUND)
+ {
+ IfFailReport(hr);
+ }
+ }
+
+ // Determine if we need to do HRESULT munging.
+ bHrMunge = !IsMiPreserveSig(dwImplFlags);
+
+ // Reset some properties for DISPINTERFACES.
+ if (ulIface == ifDispatch)
+ {
+ pfunc->callconv = CC_STDCALL;
+ pfunc->funckind = FUNC_DISPATCH;
+
+ // Never munge a dispinterface.
+ bHrMunge = false;
+ }
+
+ if (bHrMunge)
+ {
+ // Munge the return type into a new last param, set return type to HRESULT.
+ pfunc->elemdescFunc.tdesc.vt = VT_HRESULT;
+
+ // Does the function actually return anything?
+ if (ret == ELEMENT_TYPE_VOID)
+ {
+ // Skip over the return value, no [retval].
+ pRetVal = 0;
+ ixSig += cbElem;
+ }
+ else
+ {
+ // Allocate a TYPEDESC to be pointed to, convert type into it.
+ pRetVal = reinterpret_cast<TYPEDESC*>(sPool.AllocZero(sizeof(TYPEDESC)));
+ if (pRetVal == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+
+ hr = CorSigToTypeDesc(pCTI, pMT, &pbSig[ixSig], pvNativeType, cbNativeType, &cbElem, pRetVal, &sPool, TRUE);
+ if (FAILED(hr))
+ return FALSE;
+
+ ixSig += cbElem;
+
+ ++cDestParams;
+ // It is pretty weird for a property putter to return something, but apparenly legal.
+ //_ASSERTE(pfunc->invkind != INVOKE_PROPERTYPUT && pfunc->invkind != INVOKE_PROPERTYPUTREF);
+
+ // Todo: When the C compiler tries to import a typelib with a C
+ // array return type (even if it's a retval),
+ // it generates a wrapper method with a signature like "int [] foo()",
+ // which isn't valid C, so it barfs. So, we'll change the return type
+ // to a pointer by hand.
+ if (pRetVal->vt == VT_CARRAY)
+ {
+ pRetVal->vt = VT_PTR;
+ pRetVal->lptdesc = &pRetVal->lpadesc->tdescElem;
+ }
+ }
+ }
+ else
+ {
+ // No munging, convert return type.
+ pRetVal = 0;
+ hr = CorSigToTypeDesc(pCTI, pMT, &pbSig[ixSig], pvNativeType, cbNativeType, &cbElem, &pfunc->elemdescFunc.tdesc, &sPool, TRUE);
+ if (FAILED(hr))
+ return FALSE;
+
+ ixSig += cbElem;
+ }
+
+ // Error reporting info.
+ m_ErrorContext.m_ixParam = -1;
+
+ // Check to see if there is an LCIDConversion attribute on the method.
+ iLCIDParam = (USHORT)GetLCIDParameterIndex(pMeth);
+ if (iLCIDParam != (USHORT)-1)
+ {
+ BOOL bValidLCID = TRUE;
+
+ // Make sure the parameter index is valid.
+ if (iLCIDParam > cSrcParams)
+ {
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_INVALIDLCIDPARAM);
+ bValidLCID = FALSE;
+ }
+
+ // LCID's are not allowed on pure dispatch interfaces.
+ if (ulIface == ifDispatch)
+ {
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_LCIDONDISPONLYITF);
+ bValidLCID = FALSE;
+ }
+
+ if (bValidLCID)
+ {
+ // Take the LCID parameter into account in the exported method.
+ ++cDestParams;
+ }
+ else
+ {
+ // The LCID is invalid so we will ignore it.
+ iLCIDParam = -1;
+ }
+ }
+
+ // for each parameter
+ pfunc->lprgelemdescParam = reinterpret_cast<ELEMDESC*>(sPool.AllocZero(cDestParams * sizeof(ELEMDESC)));
+ if (pfunc->lprgelemdescParam == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+
+ // Holds the allocated strings so we can deallocate on function exit.
+ // Only need +1 as we don't clean up the first and last names (function name and retval)
+ NewArrayHolder<BSTRHolder> namesHolder = new BSTRHolder[cDestParams+1];
+
+ // Variant array used to hold default value data
+ NewArrayHolder<VariantPtrHolder> vtDefaultValues = new VariantPtrHolder[cDestParams];
+
+ pfunc->cParams = static_cast<short>(cDestParams);
+ for (iSrcParam=1, iDestParam=0; iDestParam<cDestParams; ++iSrcParam, ++iDestParam)
+ {
+ // Check to see if we need to insert the LCID param before the current param.
+ if (iLCIDParam == iDestParam)
+ {
+ // Set the flags and the type of the parameter.
+ pfunc->lprgelemdescParam[iDestParam].paramdesc.wParamFlags = PARAMFLAG_FIN | PARAMFLAG_FLCID;
+ pfunc->lprgelemdescParam[iDestParam].tdesc.vt = VT_I4;
+
+ // Generate a parameter name.
+ sName.Printf(szParamName, iDestParam + 1);
+
+ rNames[iDestParam + 1] = SysAllocString(sName.GetUnicode());
+ if (rNames[iDestParam + 1] == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+
+ namesHolder[iDestParam+1] = rNames[iDestParam + 1];
+
+ ++cNames;
+
+ // Increment the current destination parameter.
+ ++iDestParam;
+ }
+
+ // If we are past the end of the source parameters then we are done.
+ if (iSrcParam > cSrcParams)
+ break;
+
+ // Get additional parameter metadata.
+ dwParamFlags = 0;
+ sName.Clear();
+
+ // Error reporting info.
+ m_ErrorContext.m_ixParam = iSrcParam;
+
+ // See if there is a ParamDef for this param.
+ hr = pInternalImport->FindParamOfMethod(pMeth->GetMemberDef(), iSrcParam, &pdParam);
+
+ pvNativeType = NULL;
+ if (hr == S_OK)
+ {
+ // Get info about the param.
+ IfFailReport(pInternalImport->GetParamDefProps(pdParam, &iSrcParam, &dwParamFlags, &pszName));
+
+ // Error reporting info.
+ m_ErrorContext.m_szParam = pszName;
+
+ // Turn off reserved (internal use) bits.
+ dwParamFlags &= ~pdReservedMask;
+
+ // Convert name from UTF8 to unicode.
+ sName.SetUTF8(pszName);
+
+ // Param default value, if any.
+ IfFailReport(pInternalImport->GetDefaultValue(pdParam, &defaultValue));
+ IfFailReport(_FillVariant(&defaultValue, &pParamDesc->varDefaultValue));
+
+ // If no default value, check for decimal custom attribute.
+ if (pParamDesc->varDefaultValue.vt == VT_EMPTY)
+ {
+ IfFailReport(pMT->GetMDImport()->GetCustomAttributeByName(pdParam, INTEROP_DECIMALVALUE_TYPE, &pvData,&cbData));
+ if (hr == S_OK && cbData >= (2 + sizeof(BYTE)+sizeof(BYTE)+sizeof(UINT)+sizeof(UINT)+sizeof(UINT)))
+ {
+ const BYTE *pbData = (const BYTE *)pvData;
+ pParamDesc->varDefaultValue.vt = VT_DECIMAL;
+ pParamDesc->varDefaultValue.decVal.scale = *(BYTE*)(pbData+2);
+ pParamDesc->varDefaultValue.decVal.sign= *(BYTE*)(pbData+3);
+ pParamDesc->varDefaultValue.decVal.Hi32= GET_UNALIGNED_32(pbData+4);
+ pParamDesc->varDefaultValue.decVal.Mid32= GET_UNALIGNED_32(pbData+8);
+ pParamDesc->varDefaultValue.decVal.Lo32= GET_UNALIGNED_32(pbData+12);
+ }
+ }
+ // If still no default value, check for date time custom attribute.
+ if (pParamDesc->varDefaultValue.vt == VT_EMPTY)
+ {
+ IfFailReport(pMT->GetMDImport()->GetCustomAttributeByName(pdParam, INTEROP_DATETIMEVALUE_TYPE, &pvData,&cbData));
+ if (hr == S_OK && cbData >= (2 + sizeof(__int64)))
+ {
+ const BYTE *pbData = (const BYTE *)pvData;
+ pParamDesc->varDefaultValue.vt = VT_DATE;
+ pParamDesc->varDefaultValue.date = _TicksToDoubleDate(GET_UNALIGNED_64(pbData+2));
+ }
+ }
+ // If still no default value, check for IDispatch custom attribute.
+ if (pParamDesc->varDefaultValue.vt == VT_EMPTY)
+ {
+ IfFailReport(pMT->GetMDImport()->GetCustomAttributeByName(pdParam, INTEROP_IDISPATCHVALUE_TYPE, &pvData,&cbData));
+ if (hr == S_OK)
+ {
+ pParamDesc->varDefaultValue.vt = VT_DISPATCH;
+ pParamDesc->varDefaultValue.pdispVal = 0;
+ }
+ }
+ // If still no default value, check for IUnknown custom attribute.
+ if (pParamDesc->varDefaultValue.vt == VT_EMPTY)
+ {
+ IfFailReport(pMT->GetMDImport()->GetCustomAttributeByName(pdParam, INTEROP_IUNKNOWNVALUE_TYPE, &pvData,&cbData));
+ if (hr == S_OK)
+ {
+ pParamDesc->varDefaultValue.vt = VT_UNKNOWN;
+ pParamDesc->varDefaultValue.punkVal = 0;
+ }
+ }
+
+ if (pParamDesc->varDefaultValue.vt != VT_EMPTY)
+ {
+ // Copy the variant into the holder object so we release on function exit.
+ vtDefaultValues[iDestParam] = (VARIANT*)&pParamDesc->varDefaultValue;
+
+ pfunc->lprgelemdescParam[iDestParam].paramdesc.pparamdescex = pParamDesc;
+ dwParamFlags |= PARAMFLAG_FHASDEFAULT;
+
+ // Allocate another paramdesc.
+ pParamDesc = reinterpret_cast<PARAMDESCEX*>(sVariants.AllocZero(sizeof(PARAMDESCEX)));
+ if (pParamDesc == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+
+ bHasOptorDefault = true;
+ }
+
+ // native marshal type, if any.
+ hr = pInternalImport->GetFieldMarshal(pdParam, &pvNativeType, &cbNativeType);
+ if (hr != CLDB_E_RECORD_NOTFOUND)
+ {
+ IfFailReport(hr);
+ }
+
+ // Remember if there are optional params.
+ if (dwParamFlags & PARAMFLAG_FOPT)
+ bHasOptorDefault = true;
+ }
+ else
+ {
+ pdParam = 0, m_ErrorContext.m_szParam = 0;
+ }
+
+ // Do we need a name for this parameter?
+ if ((pfunc->invkind & (INVOKE_PROPERTYPUT | INVOKE_PROPERTYPUTREF)) == 0 ||
+ iSrcParam < cSrcParams)
+ {
+ // Yes, so make one up if we don't have one.
+ if (sName.GetCount() == 0)
+ {
+ sName.Printf(szParamName, iDestParam + 1);
+ }
+
+ rNames[iDestParam + 1] = SysAllocString(sName.GetUnicode());
+ if (rNames[iDestParam + 1] == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+
+ namesHolder[iDestParam+1] = rNames[iDestParam + 1];
+
+ ++cNames;
+ }
+
+ // Save the element type.
+ CorSigUncompressData(&pbSig[ixSig], &elem);
+
+ // Convert the param info to elemdesc.
+ bByRef = FALSE;
+ hr = CorSigToTypeDesc(pCTI, pMT, &pbSig[ixSig], pvNativeType, cbNativeType, &cbElem,
+ &pfunc->lprgelemdescParam[iDestParam].tdesc, &sPool, TRUE, &bByRef);
+ if (FAILED(hr))
+ return FALSE;
+
+ ixSig += cbElem;
+
+ // If there is no [in,out], set one, based on the parameter.
+ if ((dwParamFlags & (PARAMFLAG_FOUT | PARAMFLAG_FIN)) == 0)
+ {
+ // If param is by reference, make in/out
+ if (bByRef)
+ dwParamFlags |= PARAMFLAG_FIN | PARAMFLAG_FOUT;
+ else
+ dwParamFlags |= PARAMFLAG_FIN;
+ }
+
+ // If this is the last param, and it an array of objects, and has a ParamArrayAttribute,
+ // the function is varargs.
+ if ((iSrcParam == cSrcParams) && !IsNilToken(pdParam) && !bHasOptorDefault)
+ {
+ if (pfunc->lprgelemdescParam[iDestParam].tdesc.vt == VT_SAFEARRAY &&
+ pfunc->lprgelemdescParam[iDestParam].tdesc.lpadesc->tdescElem.vt == VT_VARIANT)
+ {
+ if (pInternalImport->GetCustomAttributeByName(pdParam, INTEROP_PARAMARRAY_TYPE, 0,0) == S_OK)
+ pfunc->cParamsOpt = -1;
+ }
+ }
+
+ pfunc->lprgelemdescParam[iDestParam].paramdesc.wParamFlags = static_cast<USHORT>(dwParamFlags);
+ }
+
+ // Is there a [retval]?
+ if (pRetVal)
+ {
+ // Error reporting info.
+ m_ErrorContext.m_ixParam = 0;
+ m_ErrorContext.m_szParam = 0;
+
+ _ASSERTE(bHrMunge);
+ _ASSERTE(cDestParams > cSrcParams);
+ pfunc->lprgelemdescParam[cDestParams-1].tdesc.vt = VT_PTR;
+ pfunc->lprgelemdescParam[cDestParams-1].tdesc.lptdesc = pRetVal;
+ pfunc->lprgelemdescParam[cDestParams-1].paramdesc.wParamFlags = PARAMFLAG_FOUT | PARAMFLAG_FRETVAL;
+
+ // no need to allocate a new string for this. rather use the constant szRetVal
+ rNames[cDestParams] = (LPWSTR)szRetVal;
+
+ ++cNames;
+ }
+
+ // Error reporting info.
+ m_ErrorContext.m_ixParam = -1;
+
+ // Was there a signature error? If so, exit now that all sigs have been reported.
+ IfFailReport(hrSignature);
+
+ IfFailReport(pCTI->AddFuncDesc(iMD, pfunc));
+
+ IfFailReport(pCTI->SetFuncAndParamNames(iMD, rNames.Ptr(), cNames));
+
+ if (pProps->bFunction2Getter)
+ {
+ VARIANT vtOne;
+ vtOne.vt = VT_I4;
+ vtOne.lVal = 1;
+ IfFailReport(pCTI->SetFuncCustData(iMD, GUID_Function2Getter, &vtOne));
+ }
+
+ // If the managed name of the method is different from the unmanaged name, then
+ // we need to capture the managed name in a custom value. We only apply this
+ // attribute for methods since properties cannot be overloaded.
+ if (pProps->semantic == 0)
+ {
+ sName.SetUTF8(pMeth->GetName());
+ if (sName.Compare(SString(pProps->pName)) != 0)
+ {
+ V_VT(&vtManagedName) = VT_BSTR;
+
+ if (NULL == (V_BSTR(&vtManagedName) = SysAllocString(sName.GetUnicode())))
+ IfFailReport(E_OUTOFMEMORY);
+
+ IfFailReport(pCTI->SetFuncCustData(iMD, GUID_ManagedName, &vtManagedName));
+ }
+ }
+
+ // Check for a description.
+ if(GetDescriptionString(pMT, pMeth->GetMemberDef(), (BSTR &)bstrDescr))
+ IfFailReport(pCTI->SetFuncDocString(iMD, bstrDescr));
+
+
+ // Error reporting info.
+ m_ErrorContext.m_szMember = 0;
+ m_ErrorContext.m_szParam = 0;
+ m_ErrorContext.m_ixParam = -1;
+
+ return TRUE;
+} // void TypeLibExporter::ConvertMethod()
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+//*****************************************************************************
+// Export a Field as getter/setter method's to a typelib.
+//*****************************************************************************
+BOOL TypeLibExporter::ConvertFieldAsMethod(
+ ICreateTypeInfo2 *pCTI, // ICreateTypeInfo2 to get the method.
+ ComMTMethodProps *pProps, // Some properties of the method.
+ ULONG iMD) // Index of the member
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pCTI));
+ PRECONDITION(CheckPointer(pProps));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK; // A result.
+ PCCOR_SIGNATURE pbSig; // Pointer to Cor signature.
+ ULONG cbSig; // Size of Cor signature.
+ ULONG ixSig; // Index into signature.
+ ULONG cbElem; // Size of an element in the signature.
+
+ ULONG callconv; // A member's calling convention.
+ TYPEDESC *pType; // TYPEDESC for the field type.
+ CDescPool sPool; // Pool of memory in which to build funcdesc.
+ BSTR rNames[2]; // Array of names to function and parameters.
+ ULONG cNames; // Count of function and parameter names.
+ FUNCDESC *pfunc; // A funcdesc.
+ ComCallMethodDesc *pFieldMeth; // A MethodDesc for a field call.
+ FieldDesc *pField; // A FieldDesc.
+ IMDInternalImport *pInternalImport; // Internal interface containing the field.
+ PCCOR_SIGNATURE pvNativeType; // native field type
+ ULONG cbNativeType; // native field type length
+ MethodTable *pMT; // Class containing the field.
+ BSTRHolder bstrDescr=0; // Description of the method.
+
+ // Get info about the method.
+ pFieldMeth = reinterpret_cast<ComCallMethodDesc*>(pProps->pMeth);
+ pField = pFieldMeth->GetFieldDesc();
+ pField->GetSig(&pbSig, &cbSig);
+ pInternalImport = pField->GetMDImport();
+ pMT = pField->GetEnclosingMethodTable();
+
+ // Error reporting info.
+ IfFailReport(pMT->GetMDImport()->GetNameOfFieldDef(pField->GetMemberDef(), &m_ErrorContext.m_szMember));
+
+ // Prepare to parse signature and build the FUNCDESC.
+ pfunc = reinterpret_cast<FUNCDESC*>(sPool.AllocZero(sizeof(FUNCDESC)));
+ if (NULL == pfunc)
+ IfFailReport(E_OUTOFMEMORY);
+ ixSig = 0;
+
+ // Get the calling convention.
+ ixSig += CorSigUncompressData(&pbSig[ixSig], &callconv);
+ _ASSERTE(callconv == IMAGE_CEE_CS_CALLCONV_FIELD);
+ pfunc->callconv = CC_STDCALL;
+
+ // vtable offset.
+ pfunc->oVft = pProps->oVft;
+
+ // Set some method properties.
+ pfunc->memid = pProps->dispid;
+ pfunc->funckind = FUNC_PUREVIRTUAL;
+
+ // Set the invkind based on whether the function is an accessor.
+ if ((pProps->semantic - FieldSemanticOffset) == msGetter)
+ pfunc->invkind = INVOKE_PROPERTYGET;
+ else if ((pProps->semantic - FieldSemanticOffset) == msSetter)
+ {
+ if (IsVbRefType(&pbSig[ixSig], pInternalImport))
+ pfunc->invkind = INVOKE_PROPERTYPUTREF;
+ else
+ pfunc->invkind = INVOKE_PROPERTYPUT;
+ }
+ else
+ _ASSERTE(!"Incorrect semantic in ConvertFieldAsMethod");
+
+ // Name of the function.
+ rNames[0] = pProps->pName;
+ cNames = 1;
+
+ // Return type is HRESULT.
+ pfunc->elemdescFunc.tdesc.vt = VT_HRESULT;
+
+ // Set up the one and only parameter.
+ pfunc->lprgelemdescParam = reinterpret_cast<ELEMDESC*>(sPool.AllocZero(sizeof(ELEMDESC)));
+ if (NULL == pfunc->lprgelemdescParam)
+ IfFailReport(E_OUTOFMEMORY);
+ pfunc->cParams = 1;
+
+ // Do we need a name for the parameter? If PROPERTYGET, we do.
+ if (pfunc->invkind == INVOKE_PROPERTYGET)
+ {
+ // Yes, so make one up.
+ rNames[1] = (WCHAR*)szRetVal;
+ ++cNames;
+ }
+
+ // If Getter, convert param as ptr, otherwise convert directly.
+ if (pfunc->invkind == INVOKE_PROPERTYGET)
+ {
+ pType = reinterpret_cast<TYPEDESC*>(sPool.AllocZero(sizeof(TYPEDESC)));
+ if (NULL == pType)
+ IfFailReport(E_OUTOFMEMORY);
+
+ pfunc->lprgelemdescParam[0].tdesc.vt = VT_PTR;
+ pfunc->lprgelemdescParam[0].tdesc.lptdesc = pType;
+ pfunc->lprgelemdescParam[0].paramdesc.wParamFlags = PARAMFLAG_FOUT | PARAMFLAG_FRETVAL;
+ }
+ else
+ {
+ pType = &pfunc->lprgelemdescParam[0].tdesc;
+ pfunc->lprgelemdescParam[0].paramdesc.wParamFlags = PARAMFLAG_FIN;
+ }
+
+ // Get native field type
+ pvNativeType = NULL;
+ hr = pInternalImport->GetFieldMarshal(
+ pField->GetMemberDef(),
+ &pvNativeType,
+ &cbNativeType);
+ if (hr != CLDB_E_RECORD_NOTFOUND)
+ {
+ IfFailReport(hr);
+ }
+
+ // Convert the field type to elemdesc.
+ hr = CorSigToTypeDesc(pCTI, pMT, &pbSig[ixSig], pvNativeType, cbNativeType, &cbElem, pType, &sPool, TRUE);
+ if (FAILED(hr))
+ return FALSE;
+
+ ixSig += cbElem;
+
+ // It is unfortunate that we can not handle this better. Fortunately
+ // this should be very rare.
+ // This is a weird case - if we're getting a CARRAY, we cannot add
+ // a VT_PTR in the sig, as it will cause the C getter to return an
+ // array, which is bad. So we omit the extra pointer, which at least
+ // makes the compiler happy.
+ if (pfunc->invkind == INVOKE_PROPERTYGET
+ && pType->vt == VT_CARRAY)
+ {
+ pfunc->lprgelemdescParam[0].tdesc.vt = pType->vt;
+ pfunc->lprgelemdescParam[0].tdesc.lptdesc = pType->lptdesc;
+ }
+
+ // A property put of an object should be a propertyputref
+ if (pfunc->invkind == INVOKE_PROPERTYPUT &&
+ (pType->vt == VT_UNKNOWN || pType->vt == VT_DISPATCH))
+ {
+ pfunc->invkind = INVOKE_PROPERTYPUTREF;
+ }
+
+ IfFailReport(pCTI->AddFuncDesc(iMD, pfunc));
+
+ IfFailReport(pCTI->SetFuncAndParamNames(iMD, rNames, cNames));
+
+ // Check for a description.
+ if(GetDescriptionString(pMT, pField->GetMemberDef(), (BSTR &)bstrDescr))
+ IfFailReport(pCTI->SetFuncDocString(iMD, bstrDescr));
+
+ // Error reporting info.
+ m_ErrorContext.m_szMember = 0;
+
+ return TRUE;
+} // void TypeLibExporter::ConvertFieldAsMethod()
+
+//*****************************************************************************
+// Export a variable's metadata to a typelib.
+//*****************************************************************************
+BOOL TypeLibExporter::ConvertVariable(
+ ICreateTypeInfo2 *pCTI, // ICreateTypeInfo2 to get the variable.
+ MethodTable *pMT, // The class containing the variable.
+ mdFieldDef md, // The member definition.
+ SString& sName, // Name of the member.
+ ULONG iMD) // Index of the member
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pCTI));
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK; // A result.
+ PCCOR_SIGNATURE pbSig; // Pointer to Cor signature.
+ ULONG cbSig; // Size of Cor signature.
+ ULONG ixSig; // Index into signature.
+ ULONG cbElem; // Size of an element in the signature.
+ DWORD dwFlags; // A member's flags.
+ ULONG callconv; // A member's calling convention.
+ MDDefaultValue defaultValue; // default value
+ ULONG dispid=DISPID_UNKNOWN; // The variable's dispid.
+ CDescPool sPool; // Pool of memory in which to build vardesc.
+ VARDESC *pvar; // A vardesc.
+ PCCOR_SIGNATURE pvNativeType; // native field type
+ ULONG cbNativeType; // native field type length
+ const void *pvData; // Pointer to a custom attribute.
+ ULONG cbData; // Size of custom attribute.
+ LPWSTR pSuffix; // Pointer into the name.
+ int iSuffix = 0; // Counter for suffix.
+ BSTRHolder bstrDescr=0; // Description of the method.
+
+ VARIANT vtTemp;
+ VariantPtrHolder vtVariant = &vtTemp;
+
+ SafeVariantInit(vtVariant);
+
+ // Error reporting info.
+ IfFailReport(pMT->GetMDImport()->GetNameOfFieldDef(md, &m_ErrorContext.m_szMember));
+
+ // Get info about the field.
+ IfFailReport(pMT->GetMDImport()->GetDispIdOfMemberDef(md, &dispid));
+ IfFailReport(pMT->GetMDImport()->GetFieldDefProps(md, &dwFlags));
+ if (IsFdHasDefault(dwFlags))
+ {
+ IfFailReport(pMT->GetMDImport()->GetDefaultValue(md, &defaultValue));
+ IfFailReport( _FillVariant(&defaultValue, vtVariant) );
+ }
+
+ // If exporting a non-public member of a struct, warn the user.
+ if (!IsFdPublic(dwFlags) && !m_bWarnedOfNonPublic)
+ {
+ m_bWarnedOfNonPublic = TRUE;
+ ReportWarning(TLBX_E_NONPUBLIC_FIELD, TLBX_E_NONPUBLIC_FIELD);
+ }
+
+ IfFailReport(pMT->GetMDImport()->GetSigOfFieldDef(md, &cbSig, &pbSig));
+
+ // Prepare to parse signature and build the VARDESC.
+ pvar = reinterpret_cast<VARDESC*>(sPool.AllocZero(sizeof(VARDESC)));
+ if(pvar == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+ ixSig = 0;
+
+ // Get the calling convention.
+ ixSig += CorSigUncompressData(&pbSig[ixSig], &callconv);
+ _ASSERTE(callconv == IMAGE_CEE_CS_CALLCONV_FIELD);
+
+ // Get native field type
+ pvNativeType = NULL;
+ hr = pMT->GetMDImport()->GetFieldMarshal(md, &pvNativeType, &cbNativeType);
+ if (hr != CLDB_E_RECORD_NOTFOUND)
+ {
+ IfFailReport(hr);
+ }
+
+ // Convert the type to elemdesc.
+ hr = CorSigToTypeDesc(pCTI, pMT, &pbSig[ixSig], pvNativeType, cbNativeType, &cbElem, &pvar->elemdescVar.tdesc, &sPool, FALSE);
+ if (FAILED(hr))
+ return FALSE;
+
+ ixSig += cbElem;
+
+ pvar->wVarFlags = 0;
+ pvar->varkind = VAR_PERINSTANCE;
+ pvar->memid = dispid;
+
+ // Constant value.
+ if (vtVariant->vt != VT_EMPTY)
+ pvar->lpvarValue = vtVariant;
+ else
+ {
+ IfFailReport(pMT->GetMDImport()->GetCustomAttributeByName(md, INTEROP_DECIMALVALUE_TYPE, &pvData,&cbData));
+ if (hr == S_OK && cbData >= (2 + sizeof(BYTE)+sizeof(BYTE)+sizeof(UINT)+sizeof(UINT)+sizeof(UINT)))
+ {
+ const BYTE *pbData = (const BYTE *)pvData;
+ vtVariant->vt = VT_DECIMAL;
+ vtVariant->decVal.scale = *(BYTE*)(pbData+2);
+ vtVariant->decVal.sign= *(BYTE*)(pbData+3);
+ vtVariant->decVal.Hi32= GET_UNALIGNED_32(pbData+4);
+ vtVariant->decVal.Mid32= GET_UNALIGNED_32(pbData+8);
+ vtVariant->decVal.Lo32= GET_UNALIGNED_32(pbData+12);
+ pvar->lpvarValue = vtVariant;
+ }
+ // If still no default value, check for date time custom attribute.
+ if (vtVariant->vt == VT_EMPTY)
+ {
+ IfFailReport(pMT->GetMDImport()->GetCustomAttributeByName(md, INTEROP_DATETIMEVALUE_TYPE, &pvData,&cbData));
+ if (hr == S_OK && cbData >= (2 + sizeof(__int64)))
+ {
+ const BYTE *pbData = (const BYTE *)pvData;
+ vtVariant->vt = VT_DATE;
+ vtVariant->date = _TicksToDoubleDate(GET_UNALIGNED_64(pbData+2));
+ }
+ }
+ // If still no default value, check for IDispatch custom attribute.
+ if (vtVariant->vt == VT_EMPTY)
+ {
+ IfFailReport(pMT->GetMDImport()->GetCustomAttributeByName(md, INTEROP_IDISPATCHVALUE_TYPE, &pvData,&cbData));
+ if (hr == S_OK)
+ {
+ vtVariant->vt = VT_DISPATCH;
+ vtVariant->pdispVal = 0;
+ }
+ }
+ // If still no default value, check for IUnknown custom attribute.
+ if (vtVariant->vt == VT_EMPTY)
+ {
+ IfFailReport(pMT->GetMDImport()->GetCustomAttributeByName(md, INTEROP_IUNKNOWNVALUE_TYPE, &pvData,&cbData));
+ if (hr == S_OK)
+ {
+ vtVariant->vt = VT_UNKNOWN;
+ vtVariant->punkVal = 0;
+ }
+ }
+ }
+
+ IfFailReport(pCTI->AddVarDesc(iMD, pvar));
+
+ // Set the name for the member; decorate if necessary.
+ pSuffix = 0;
+ for (;;)
+ {
+ // Attempt to set the name.
+ hr = pCTI->SetVarName(iMD, (LPOLESTR)sName.GetUnicode());
+
+ // If a name conflict, decorate, otherwise, done.
+ if (hr != TYPE_E_AMBIGUOUSNAME)
+ break;
+
+ if (iSuffix == 0)
+ {
+ iSuffix = 2;
+ }
+ else
+ {
+ sName.Delete(sName.End()-=2, 2);
+ }
+
+ SString sDup;
+ sDup.Printf(szDuplicateDecoration, iSuffix++);
+
+ sName.Append(sDup);
+ }
+ IfFailReport(hr);
+
+ // Check for a description.
+ if(GetDescriptionString(pMT, md, (BSTR &)bstrDescr))
+ IfFailReport(pCTI->SetVarDocString(iMD, bstrDescr));
+
+ // Error reporting info.
+ m_ErrorContext.m_szMember = 0;
+
+ return TRUE;
+} // HRESULT TypeLibExporter::ConvertVariable()
+
+//*****************************************************************************
+// Export a variable's metadata to a typelib.
+//*****************************************************************************
+BOOL TypeLibExporter::ConvertEnumMember(
+ ICreateTypeInfo2 *pCTI, // ICreateTypeInfo2 to get the variable.
+ MethodTable *pMT, // The Class containing the member.
+ mdFieldDef md, // The member definition.
+ SString& sName, // Name of the member.
+ ULONG iMD) // Index of the member
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pCTI));
+ PRECONDITION(CheckPointer(pMT));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK; // A result.
+ LPCUTF8 pName, pNS; // To format name.
+ DWORD dwFlags; // A member's flags.
+ VARIANT vtVariant; // A Variant.
+ MDDefaultValue defaultValue; // default value
+ ULONG dispid=DISPID_UNKNOWN; // The variable's dispid.
+ CDescPool sPool; // Pool of memory in which to build vardesc.
+ VARDESC *pvar; // A vardesc.
+ BSTRHolder bstrDescr=0; // Description of the method.
+
+ vtVariant.vt = VT_EMPTY;
+
+ // Error reporting info.
+ IfFailReport(pMT->GetMDImport()->GetNameOfFieldDef(md, &m_ErrorContext.m_szMember));
+
+ // Get info about the field.
+ IfFailReport(pMT->GetMDImport()->GetDispIdOfMemberDef(md, &dispid));
+ IfFailReport(pMT->GetMDImport()->GetFieldDefProps(md, &dwFlags));
+
+ // We do not need to handle decimal's here since enum's can only be integral types.
+ IfFailReport(pMT->GetMDImport()->GetDefaultValue(md, &defaultValue));
+
+ // Prepare to parse signature and build the VARDESC.
+ pvar = reinterpret_cast<VARDESC*>(sPool.AllocZero(sizeof(VARDESC)));
+ if (NULL == pvar)
+ IfFailReport(E_OUTOFMEMORY);
+
+ IfFailReport( _FillVariant(&defaultValue, &vtVariant) );
+
+ // Don't care what the metadata says the type is -- the type is I4 in the typelib.
+ pvar->elemdescVar.tdesc.vt = VT_I4;
+
+ pvar->wVarFlags = 0;
+ pvar->varkind = VAR_CONST;
+ pvar->memid = dispid;
+
+ // Constant value.
+ if (vtVariant.vt != VT_EMPTY)
+ {
+ pvar->lpvarValue = &vtVariant;
+
+ // If this is an I8 or UI8, do the conversion manually, because some
+ // systems' oleaut32 don't support 64-bit integers.
+ if (vtVariant.vt == VT_I8)
+ {
+ // If withing range of 32-bit signed number, OK.
+ if (vtVariant.llVal <= LONG_MAX && vtVariant.llVal >= LONG_MIN)
+ vtVariant.vt = VT_I4, hr = S_OK;
+ else
+ hr = E_FAIL;
+ }
+ else if (vtVariant.vt == VT_UI8)
+ {
+ // If withing range of 32-bit unsigned number, OK.
+ if (vtVariant.ullVal <= ULONG_MAX)
+ vtVariant.vt = VT_UI4, hr = S_OK;
+ else
+ hr = E_FAIL;
+ }
+ else
+ {
+ hr = SafeVariantChangeTypeEx(&vtVariant, &vtVariant, 0, 0, VT_I4);
+ }
+
+ if (FAILED(hr))
+ {
+ if (FAILED(pMT->GetMDImport()->GetNameOfTypeDef(pMT->GetCl(), &pName, &pNS)))
+ {
+ pName = pNS = "Invalid TypeDef record";
+ }
+ ReportWarning(TLBX_W_ENUM_VALUE_TOOBIG, TLBX_W_ENUM_VALUE_TOOBIG, pName, sName.GetUnicode());
+ return FALSE;
+ }
+ }
+ else
+ { // No value assigned, use 0.
+ pvar->lpvarValue = &vtVariant;
+ vtVariant.vt = VT_I4;
+ vtVariant.lVal = 0;
+ }
+
+ IfFailReport(pCTI->AddVarDesc(iMD, pvar));
+ IfFailReport(pCTI->SetVarName(iMD, (LPOLESTR)sName.GetUnicode()));
+
+ // Check for a description.
+ if(GetDescriptionString(pMT, md, (BSTR &)bstrDescr))
+ IfFailReport(pCTI->SetVarDocString(iMD, bstrDescr));
+
+ // Error reporting info.
+ m_ErrorContext.m_szMember = 0;
+
+ return TRUE;
+} // void TypeLibExporter::ConvertEnumMember()
+
+//*****************************************************************************
+// Given a COM+ signature of a field or property, determine if it should
+// be a PROPERTYPUT or PROPERTYPUTREF.
+//*****************************************************************************
+BOOL TypeLibExporter::IsVbRefType(
+ PCCOR_SIGNATURE pbSig,
+ IMDInternalImport *pInternalImport)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pInternalImport));
+ }
+ CONTRACTL_END;
+
+ ULONG elem=0; // An element from a COM+ signature.
+ ULONG cbElem=0;
+
+ cbElem = CorSigUncompressData(pbSig, &elem);
+ if (elem == ELEMENT_TYPE_PTR || elem == ELEMENT_TYPE_BYREF)
+ {
+ return IsVbRefType(&pbSig[cbElem], pInternalImport);
+ }
+ else
+ {
+ switch (elem)
+ {
+ // For documentation -- arrays are NOT ref types here.
+ //case ELEMENT_TYPE_SDARRAY:
+ //case ELEMENT_TYPE_ARRAY:
+ //case ELEMENT_TYPE_SZARRAY:
+ // Look for variant.
+ case ELEMENT_TYPE_VALUETYPE:
+ return FALSE;
+
+ case ELEMENT_TYPE_CLASS:
+ return TRUE;
+
+ case ELEMENT_TYPE_OBJECT:
+ return FALSE;
+
+ default:
+ break;
+ }
+ }
+
+ return FALSE;
+} // BOOL TypeLibExporter::IsVbRefType()
+
+BOOL TypeLibExporter::IsExportingAs64Bit()
+{
+ LIMITED_METHOD_CONTRACT;
+ if (TlbExportAs64Bit(m_flags))
+ {
+ return TRUE;
+ }
+ else if (TlbExportAs32Bit(m_flags))
+ {
+ return FALSE;
+ }
+ else
+ {
+#ifdef _WIN64
+ return TRUE;
+#else
+ return FALSE;
+#endif
+ }
+} // BOOL TypeLibExporter::IsExportingAs64Bit()
+
+void TypeLibExporter::ArrayToTypeDesc(ICreateTypeInfo2 *pCTI, CDescPool *ppool, ArrayMarshalInfo *pArrayMarshalInfo, TYPEDESC *ptdesc)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pCTI));
+ PRECONDITION(CheckPointer(ppool));
+ PRECONDITION(CheckPointer(pArrayMarshalInfo));
+ PRECONDITION(CheckPointer(ptdesc));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_FAIL;
+ VARTYPE vtElement = pArrayMarshalInfo->GetElementVT();
+ TypeHandle thElement = pArrayMarshalInfo->GetElementTypeHandle();
+
+ if (vtElement == VT_RECORD)
+ {
+ // We are dealing with an array of embedded structures.
+ ptdesc->vt = VT_USERDEFINED;
+ EEClassToHref(pCTI, thElement.GetMethodTable(), FALSE, &ptdesc->hreftype);
+ }
+ else if ((vtElement == VT_UNKNOWN || vtElement == VT_DISPATCH) && !thElement.IsObjectType())
+ {
+ if (!thElement.IsValueType() && !pArrayMarshalInfo->IsSafeArraySubTypeExplicitlySpecified())
+ {
+ // We are dealing with an array of user defined interfaces.
+ ptdesc->vt = VT_PTR;
+ ptdesc->lptdesc = reinterpret_cast<TYPEDESC*>(ppool->AllocZero(sizeof(TYPEDESC)));
+ if (ptdesc->lptdesc == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+
+ ptdesc->lptdesc->vt = VT_USERDEFINED;
+ EEClassToHref(pCTI, thElement.GetMethodTable(), FALSE, &ptdesc->lptdesc->hreftype);
+ }
+ else
+ {
+ // The user specified that the array of value classes be converted to an
+ // array of IUnknown or IDispatch pointers.
+ ptdesc->vt = vtElement;
+ }
+ }
+ else if (pArrayMarshalInfo->IsPtr())
+ {
+ ptdesc->vt = VT_PTR;
+ ptdesc->lptdesc = reinterpret_cast<TYPEDESC*>(ppool->AllocZero(sizeof(TYPEDESC)));
+ if (ptdesc->lptdesc == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+
+ ptdesc->lptdesc->vt = vtElement;
+ }
+ else
+ {
+ // We are dealing with an array of primitive types.
+ ptdesc->vt = vtElement;
+ }
+}
+// HRESULT ArrayToTypeDesc(ArrayMarshalInfo *pArrayMarshalInfo, TYPEDESC *pElementTypeDesc)
+
+VARTYPE TypeLibExporter::GetVtForIntPtr()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return static_cast<VARTYPE>(IsExportingAs64Bit() ? VT_I8 : VT_I4);
+} // VARTYPE TypeLibExporter::GetVtForIntPtr()
+
+VARTYPE TypeLibExporter::GetVtForUIntPtr()
+{
+ WRAPPER_NO_CONTRACT;
+
+ return static_cast<VARTYPE>(IsExportingAs64Bit() ? VT_UI8 : VT_UI4);
+} // VARTYPE TypeLibExporter::GetVtForUIntPtr()
+
+/*
+BOOL TypeLibExporter::ValidateSafeArrayElemVT(VARTYPE vt)
+{
+ switch(vt)
+ {
+ case VT_I2:
+ case VT_I4:
+ case VT_R4:
+ case VT_R8:
+ case VT_CY:
+ case VT_DATE:
+ case VT_BSTR:
+ case VT_DISPATCH:
+ case VT_ERROR:
+ case VT_BOOL:
+ case VT_VARIANT:
+ case VT_UNKNOWN:
+ case VT_DECIMAL:
+ case VT_RECORD:
+ case VT_I1:
+ case VT_UI1:
+ case VT_UI2:
+ case VT_UI4:
+ case VT_INT:
+ case VT_UINT:
+ return TRUE;
+
+ default:
+ return FALSE;
+ }
+}
+*/
+
+//*****************************************************************************
+// Read a COM+ signature element and create a TYPEDESC that corresponds
+// to it.
+//*****************************************************************************
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#endif
+HRESULT TypeLibExporter::CorSigToTypeDesc(
+ ICreateTypeInfo2 *pCTI, // Typeinfo being created.
+ MethodTable *pMT, // MethodTable with the token.
+ PCCOR_SIGNATURE pbSig, // Pointer to the Cor Signature.
+ PCCOR_SIGNATURE pbNativeSig, // Pointer to the native sig, if any
+ ULONG cbNativeSig, // Count of bytes in native sig.
+ ULONG *pcbElem, // Put # bytes consumed here.
+ TYPEDESC *ptdesc, // Build the typedesc here.
+ CDescPool *ppool, // Pool for additional storage as required.
+ BOOL bMethodSig, // TRUE if the sig is for a method, FALSE for a field.
+ BOOL *pbByRef) // If not null, and the type is byref, set to true.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pCTI));
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(CheckPointer(pcbElem));
+ PRECONDITION(CheckPointer(ptdesc));
+ PRECONDITION(CheckPointer(ppool));
+ PRECONDITION(CheckPointer(pbByRef, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr=S_OK;
+ ULONG elem = 0; // The element type.
+ ULONG cbElem = 0; // Bytes in the element.
+ ULONG cb; // Bytes in a sub-element.
+ ULONG cbNativeElem = 0; // # of bytes parsed off of native type.
+ ULONG nativeElem = 0; // The native element type
+ ULONG nativeCount; // The native element size
+ mdToken tkTypeRef; // Token for a TypeRef/TypeDef
+ SString sName; // Buffer to build a name from NS/Name.
+ LPCUTF8 pclsname; // Class name for ELEMENT_TYPE_CLASS.
+ HREFTYPE hRef = 0; // HREF to some type.
+ IMDInternalImport *pInternalImport; // Internal interface containing the signature.
+ Module* pModule = NULL; // Module containing the signature.
+ int i; // Loop control.
+ SigTypeContext emptyTypeContext; // an empty type context is sufficient: all methods should be non-generic
+ ULONG dwTypeFlags = 0; // The type flags.
+ BOOL fAnsi = FALSE; // Is the structure marked as CharSet=Ansi.
+ BOOL fIsStringBuilder = FALSE;
+ LPCUTF8 pNS;
+
+
+ pInternalImport = pMT->GetMDImport();
+ pModule = pMT->GetModule();
+
+ // Just be sure the count is zero if the pointer is.
+ if (pbNativeSig == NULL)
+ cbNativeSig = 0;
+
+ // Grab the native marshaling type.
+ if (cbNativeSig > 0)
+ {
+ cbNativeElem = CorSigUncompressData(pbNativeSig, &nativeElem);
+ pbNativeSig += cbNativeElem;
+ cbNativeSig -= cbNativeElem;
+
+ // AsAny makes no sense for COM Interop. Ignore it.
+ if (nativeElem == NATIVE_TYPE_ASANY)
+ {
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_ASANY);
+ nativeElem = 0;
+ }
+ }
+
+ // If we are dealing with a struct, determine if it is marked as CharSet=Ansi.
+ if (!bMethodSig)
+ {
+ // Make sure one of Auto, Ansi or Unicode is specified.
+ if (!IsTdAnsiClass(dwTypeFlags) && !IsTdAutoClass(dwTypeFlags) && !IsTdUnicodeClass(dwTypeFlags))
+ {
+ _ASSERTE(!"Bad stringformat value in wrapper class.");
+ ReportWarning(TLBX_E_BAD_SIGNATURE, E_FAIL); // bad metadata
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+
+ if (FAILED(pInternalImport->GetTypeDefProps(pMT->GetCl(), &dwTypeFlags, NULL)))
+ {
+ ReportWarning(TLBX_E_BAD_SIGNATURE, E_FAIL);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+ fAnsi = IsTdAnsiClass(dwTypeFlags);
+ }
+
+ // Get the element type.
+TryAgain:
+ cbElem += CorSigUncompressData(pbSig+cbElem, &elem);
+
+ // Handle the custom marshaler native type separately.
+ if (elem != ELEMENT_TYPE_BYREF && nativeElem == NATIVE_TYPE_CUSTOMMARSHALER)
+ {
+ switch(elem)
+ {
+ case ELEMENT_TYPE_VAR:
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_OBJECT:
+ // @TODO(DM): Ask the custom marshaler for the ITypeInfo to use for the unmanaged type.
+ ptdesc->vt = VT_UNKNOWN;
+ break;
+
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_ARRAY:
+ ptdesc->vt = GetVtForIntPtr();
+ break;
+
+ default:
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_BAD_NATIVETYPE);
+ return(TLBX_E_BAD_SIGNATURE);
+ break;
+ }
+
+ // Eat the rest of the signature. The extra -1's are to account
+ // for the byte parsed off above.
+ SigPointer p(&pbSig[cbElem-1]);
+ IfFailThrow(p.SkipExactlyOne());
+ cbElem += (ULONG)(p.GetPtr() - &pbSig[cbElem]); // Note I didn't use -1 here.
+ goto ExitFunc;
+ }
+
+// This label is used to try again with a new element type, but without consuming more signature.
+// Usage is to set 'elem' to a new value, goto this label.
+TryWithElemType:
+ switch (elem)
+ {
+ case ELEMENT_TYPE_END: // 0x0,
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_UNKNOWN_SIGNATURE);
+ return(TLBX_E_BAD_SIGNATURE);
+ break;
+
+ case ELEMENT_TYPE_VOID: // 0x1,
+ ptdesc->vt = VT_VOID;
+ break;
+
+ case ELEMENT_TYPE_BOOLEAN: // 0x2,
+ switch (nativeElem)
+ {
+ case 0:
+ ptdesc->vt = static_cast<VARTYPE>(bMethodSig ? VT_BOOL : VT_I4);
+ break;
+
+ case NATIVE_TYPE_VARIANTBOOL:
+ ptdesc->vt = VT_BOOL;
+ break;
+
+ case NATIVE_TYPE_BOOLEAN:
+ ptdesc->vt = VT_I4;
+ break;
+
+ case NATIVE_TYPE_U1:
+ case NATIVE_TYPE_I1:
+ ptdesc->vt = VT_UI1;
+ break;
+
+ default:
+ DEBUG_STMT(DbgWriteEx(W("Bad Native COM attribute specified!\n")));
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_BAD_NATIVETYPE);
+ return(TLBX_E_BAD_SIGNATURE);
+ }
+ break;
+
+ case ELEMENT_TYPE_CHAR: // 0x3,
+ if (nativeElem == 0)
+ {
+ if (!bMethodSig && IsTdAutoClass(dwTypeFlags))
+ {
+ // Types with a char set of auto and that would be represented differently
+ // on different platforms are not allowed to be exported to COM.
+ DefineFullyQualifiedNameForClassW();
+ LPCWSTR szName = GetFullyQualifiedNameForClassW(pMT);
+ _ASSERTE(szName);
+
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_AUTO_CS_NOT_ALLOWED, szName);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+
+ ptdesc->vt = static_cast<VARTYPE>(fAnsi ? VT_UI1 : VT_UI2);
+ }
+ else
+ {
+ switch (nativeElem)
+ {
+ case 0:
+ case NATIVE_TYPE_U2:
+ case NATIVE_TYPE_I2:
+ ptdesc->vt = VT_UI2;
+ break;
+
+ case NATIVE_TYPE_U1:
+ case NATIVE_TYPE_I1:
+ ptdesc->vt = VT_UI1;
+ break;
+
+ default:
+ DEBUG_STMT(DbgWriteEx(W("Bad Native COM attribute specified!\n")));
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_BAD_NATIVETYPE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+ }
+ break;
+
+ case ELEMENT_TYPE_I1: // 0x4,
+ ptdesc->vt = VT_I1;
+ break;
+
+ case ELEMENT_TYPE_U1: // 0x5,
+ ptdesc->vt = VT_UI1;
+ break;
+
+ case ELEMENT_TYPE_I2: // 0x6,
+ ptdesc->vt = VT_I2;
+ break;
+
+ case ELEMENT_TYPE_U2: // 0x7,
+ ptdesc->vt = VT_UI2;
+ break;
+
+ case ELEMENT_TYPE_I4: // 0x8,
+ switch (nativeElem)
+ {
+ case 0:
+ case NATIVE_TYPE_I4:
+ case NATIVE_TYPE_U4: case NATIVE_TYPE_INTF: //@todo: Fix Microsoft.Win32.Interop.dll and remove this line.
+ ptdesc->vt = VT_I4;
+ break;
+
+ case NATIVE_TYPE_ERROR:
+ ptdesc->vt = VT_HRESULT;
+ break;
+
+ default:
+ DEBUG_STMT(DbgWriteEx(W("Bad Native COM attribute specified!\n")));
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_BAD_NATIVETYPE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+ break;
+
+ case ELEMENT_TYPE_U4: // 0x9,
+ switch (nativeElem)
+ {
+ case 0:
+ case NATIVE_TYPE_U4:
+ ptdesc->vt = VT_UI4;
+ break;
+
+ case NATIVE_TYPE_ERROR:
+ ptdesc->vt = VT_HRESULT;
+ break;
+
+ default:
+ DEBUG_STMT(DbgWriteEx(W("Bad Native COM attribute specified!\n")));
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_BAD_NATIVETYPE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+ break;
+
+ case ELEMENT_TYPE_I8: // 0xa,
+ ptdesc->vt = VT_I8;
+ break;
+
+ case ELEMENT_TYPE_U8: // 0xb,
+ ptdesc->vt = VT_UI8;
+ break;
+
+ case ELEMENT_TYPE_R4: // 0xc,
+ ptdesc->vt = VT_R4;
+ break;
+
+ case ELEMENT_TYPE_R8: // 0xd,
+ ptdesc->vt = VT_R8;
+ break;
+
+ case ELEMENT_TYPE_OBJECT:
+ goto IsObject;
+
+ case ELEMENT_TYPE_STRING: // 0xe,
+ IsString:
+ if (nativeElem == 0)
+ {
+ if (bMethodSig)
+ {
+ ptdesc->vt = VT_BSTR;
+ }
+ else
+ {
+ if (IsTdAutoClass(dwTypeFlags))
+ {
+ // Types with a char set of auto and that would be represented differently
+ // on different platforms are not allowed to be exported to COM.
+ DefineFullyQualifiedNameForClassW();
+ LPCWSTR szName = GetFullyQualifiedNameForClassW(pMT);
+ _ASSERTE(szName);
+
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_AUTO_CS_NOT_ALLOWED, szName);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+
+ ptdesc->vt = static_cast<VARTYPE>(fAnsi ? VT_LPSTR : VT_LPWSTR);
+ }
+ }
+ else
+ {
+ switch (nativeElem)
+ {
+ case NATIVE_TYPE_BSTR:
+ if (fIsStringBuilder)
+ {
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_BAD_NATIVETYPE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+ ptdesc->vt = VT_BSTR;
+ break;
+
+ case NATIVE_TYPE_LPSTR:
+ ptdesc->vt = VT_LPSTR;
+ break;
+
+ case NATIVE_TYPE_LPWSTR:
+ ptdesc->vt = VT_LPWSTR;
+ break;
+
+ case NATIVE_TYPE_LPTSTR:
+ {
+ // NATIVE_TYPE_LPTSTR is not allowed to be exported to COM.
+ DefineFullyQualifiedNameForClassW();
+ LPCWSTR szName = GetFullyQualifiedNameForClassW(pMT);
+ _ASSERTE(szName);
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_LPTSTR_NOT_ALLOWED, szName);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+ case NATIVE_TYPE_FIXEDSYSSTRING:
+ // NATIVE_TYPE_FIXEDSYSSTRING is only allowed on fields.
+ if (bMethodSig)
+ {
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_BAD_NATIVETYPE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+
+ // Retrieve the count of characters.
+ if (cbNativeSig != 0)
+ {
+ cb = CorSigUncompressData(pbNativeSig, &nativeCount);
+ pbNativeSig += cb;
+ cbNativeSig -= cb;
+ }
+ else
+ {
+ nativeCount = 0;
+ }
+
+ // Fixed strings become embedded array's of characters.
+ ptdesc->vt = VT_CARRAY;
+ ptdesc->lpadesc = reinterpret_cast<ARRAYDESC*>(ppool->AllocZero(sizeof(ARRAYDESC)));
+ if (ptdesc->lpadesc == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+
+ // Set the count of characters.
+ ptdesc->lpadesc->cDims = 1;
+ ptdesc->lpadesc->rgbounds[0].cElements = nativeCount;
+ ptdesc->lpadesc->rgbounds[0].lLbound = 0;
+
+ if (IsTdAutoClass(dwTypeFlags))
+ {
+ // Types with a char set of auto and that would be represented differently
+ // on different platforms are not allowed to be exported to COM.
+ DefineFullyQualifiedNameForClassW();
+ LPCWSTR szName = GetFullyQualifiedNameForClassW(pMT);
+ _ASSERTE(szName);
+
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_AUTO_CS_NOT_ALLOWED, szName);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+
+ ptdesc->lpadesc->tdescElem.vt = static_cast<VARTYPE>(fAnsi ? VT_UI1 : VT_UI2);
+ break;
+
+ default:
+ DEBUG_STMT(DbgWriteEx(W("Bad Native COM attribute specified!\n")));
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_BAD_NATIVETYPE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+ }
+ break;
+
+ // every type above PTR will be simple type
+ case ELEMENT_TYPE_PTR: // 0xf,
+ case ELEMENT_TYPE_BYREF: // 0x10,
+ // TYPEDESC is a pointer.
+ ptdesc->vt = VT_PTR;
+ if (pbByRef)
+ *pbByRef = TRUE;
+
+ // Pointer to what?
+ ptdesc->lptdesc = reinterpret_cast<TYPEDESC*>(ppool->AllocZero(sizeof(TYPEDESC)));
+ if (ptdesc->lptdesc == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+
+ hr = CorSigToTypeDesc(pCTI, pMT, &pbSig[cbElem], pbNativeSig-cbNativeElem,
+ cbNativeSig+cbNativeElem, &cb, ptdesc->lptdesc, ppool, bMethodSig);
+ cbElem += cb;
+
+ if (FAILED(hr))
+ goto ExitFunc;
+
+ break;
+
+ case ELEMENT_TYPE_CLASS: // 0x12,
+ case ELEMENT_TYPE_VALUETYPE:
+ // Get the TD/TR.
+ cb = CorSigUncompressToken(&pbSig[cbElem], &tkTypeRef);
+ cbElem += cb;
+
+ if (TypeFromToken(tkTypeRef) == mdtTypeDef)
+ {
+ // Get the name of the TypeDef.
+ if (FAILED(pInternalImport->GetNameOfTypeDef(tkTypeRef, &pclsname, &pNS)))
+ {
+ IfFailReport(COR_E_BADIMAGEFORMAT);
+ }
+ }
+ else
+ {
+ // Get the name of the TypeRef.
+ _ASSERTE(TypeFromToken(tkTypeRef) == mdtTypeRef);
+ IfFailReport(pInternalImport->GetNameOfTypeRef(tkTypeRef, &pNS, &pclsname));
+ }
+
+ if (pNS)
+ {
+ sName.MakeFullNamespacePath(SString(SString::Utf8, pNS), SString(SString::Utf8, pclsname));
+ StackScratchBuffer scratch;
+ pclsname = sName.GetUTF8(scratch);
+ }
+
+ _ASSERTE(strlen(szRuntime) == cbRuntime); // If you rename System, fix this invariant.
+ _ASSERTE(strlen(szText) == cbText); // If you rename System.Text, fix this invariant.
+
+ // Is it System.something?
+ if (SString::_strnicmp(pclsname, szRuntime, cbRuntime) == 0)
+ {
+ // Which one?
+ LPCUTF8 pcls; pcls = pclsname + cbRuntime;
+ if (stricmpUTF8(pcls, szStringClass) == 0)
+ {
+ goto IsString;
+ }
+ else if (stricmpUTF8(pcls, szDateTimeClass) == 0)
+ {
+ ptdesc->vt = VT_DATE;
+ goto ExitFunc;
+ }
+ else if (stricmpUTF8(pcls, szDecimalClass) == 0)
+ {
+ switch (nativeElem)
+ {
+ case NATIVE_TYPE_CURRENCY:
+ // Make this a currency.
+ ptdesc->vt = VT_CY;
+ break;
+
+ case 0:
+ // Make this a decimal
+ ptdesc->vt = VT_DECIMAL;
+ break;
+
+ default:
+ DEBUG_STMT(DbgWriteEx(W("Bad Native COM attribute specified!\n")));
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_BAD_NATIVETYPE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+ goto ExitFunc;
+ }
+ else if (stricmpUTF8(pcls, szGuidClass) == 0)
+ {
+ switch (nativeElem)
+ {
+ case NATIVE_TYPE_LPSTRUCT:
+ // Make this a pointer to . . .
+ ptdesc->vt = VT_PTR;
+ if (pbByRef)
+ *pbByRef = TRUE;
+
+ ptdesc->lptdesc = reinterpret_cast<TYPEDESC*>(ppool->AllocZero(sizeof(TYPEDESC)));
+ if (ptdesc->lptdesc == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+
+ // . . . a user defined type for GUID
+ ptdesc->lptdesc->vt = VT_USERDEFINED;
+ GetRefTypeInfo(pCTI, m_pGuid, &ptdesc->lptdesc->hreftype);
+ break;
+
+ case 0:
+ case NATIVE_TYPE_STRUCT:
+ // a user defined type for GUID
+ ptdesc->vt = VT_USERDEFINED;
+ GetRefTypeInfo(pCTI, m_pGuid, &ptdesc->hreftype);
+ break;
+
+ default:
+ DEBUG_STMT(DbgWriteEx(W("Bad Native COM attribute specified!\n")));
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_BAD_NATIVETYPE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+ goto ExitFunc;
+ }
+ else if (stricmpUTF8(pcls, szArrayClass) == 0)
+ {
+ // If no native type is specified then assume its a NATIVE_TYPE_INTF.
+ if (nativeElem == 0)
+ nativeElem = NATIVE_TYPE_INTF;
+
+ if (nativeElem == NATIVE_TYPE_SAFEARRAY)
+ {
+ // Compat: If no safe array used def subtype was specified we will map it to a SAFEARRAY of VARIANTs.
+ ULONG vtElement = VT_VARIANT;
+ TypeHandle thElement = TypeHandle(g_pObjectClass);
+
+ if (cbNativeSig > 0)
+ {
+ // Retrieve the safe array sub type.
+ cb = CorSigUncompressData(pbNativeSig, &vtElement);
+ pbNativeSig += cb;
+ cbNativeSig -= cb;
+
+ // Get the type name if specified.
+ if (cbNativeSig > 0)
+ {
+ ULONG cbClass = 0;
+
+ cb = CorSigUncompressData(pbNativeSig, &cbClass);
+ pbNativeSig += cb;
+ cbNativeSig -= cb;
+
+ if (cbClass > 0)
+ {
+ // Load the type. Use an SString for the string since we need to NULL terminate the string
+ // that comes from the metadata.
+ StackScratchBuffer utf8Name;
+ SString safeArrayUserDefTypeName(SString::Utf8, (LPUTF8)pbNativeSig, cbClass);
+ thElement = LoadClass(pMT->GetModule(), safeArrayUserDefTypeName.GetUTF8(utf8Name));
+ }
+ }
+ }
+ else
+ {
+ if (!bMethodSig)
+ {
+ // The field marshaller converts these to SAFEARRAYs of the type specified
+ // at runtime by the array. This isn't expressible in a type library
+ // so provide a warning.
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_W_BAD_SAFEARRAYFIELD_NO_ELEMENTVT);
+ }
+ }
+
+ ArrayMarshalInfo arrayMarshalInfo(IsExportingAs64Bit() ? amiExport64Bit : amiExport32Bit);
+ MarshalInfo::MarshalScenario ms = bMethodSig ? MarshalInfo::MARSHAL_SCENARIO_COMINTEROP : MarshalInfo::MARSHAL_SCENARIO_FIELD;
+ arrayMarshalInfo.InitForSafeArray(ms, thElement, (VARTYPE)vtElement, fAnsi);
+
+ if (!arrayMarshalInfo.IsValid())
+ {
+ ReportWarning(TLBX_E_BAD_SIGNATURE, arrayMarshalInfo.GetErrorResourceId());
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+
+ // TYPEDESC is an array.
+ ptdesc->vt = VT_SAFEARRAY;
+ ptdesc->lptdesc = reinterpret_cast<TYPEDESC*>(ppool->AllocZero(sizeof(TYPEDESC)));
+ if (ptdesc->lptdesc == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+
+ ArrayToTypeDesc(pCTI, ppool, &arrayMarshalInfo, ptdesc->lptdesc);
+
+ goto ExitFunc;
+ }
+ else if (nativeElem == NATIVE_TYPE_FIXEDARRAY)
+ {
+ // NATIVE_TYPE_FIXEDARRAY is only allowed on fields.
+ if (bMethodSig)
+ {
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_BAD_NATIVETYPE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+
+ // Retrieve the size of the fixed array. This is required.
+ if (cbNativeSig == 0)
+ {
+ ReportWarning(TLBX_E_BAD_SIGNATURE, IDS_EE_BADMARSHALFIELD_FIXEDARRAY_NOSIZE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+
+ cb = CorSigUncompressData(pbNativeSig, &nativeCount);
+ pbNativeSig += cb;
+ cbNativeSig -= cb;
+
+ // A size const of 0 isn't supported.
+ if (nativeCount == 0)
+ {
+ ReportWarning(TLBX_E_BAD_SIGNATURE, IDS_EE_BADMARSHALFIELD_FIXEDARRAY_ZEROSIZE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+
+ // Since these always export to arrays of BSTRs, we don't need to fetch the native type.
+
+ // Set the data
+ ptdesc->vt = VT_CARRAY;
+ ptdesc->lpadesc = NULL;
+ ptdesc->lpadesc = reinterpret_cast<ARRAYDESC*>(ppool->AllocZero(sizeof(ARRAYDESC)));
+ if (ptdesc->lpadesc == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+
+ // Compat: FixedArrays of System.Arrays map to fixed arrays of BSTRs.
+ ptdesc->lpadesc->tdescElem.vt = VT_BSTR;
+ ptdesc->lpadesc->cDims = 1;
+ ptdesc->lpadesc->rgbounds->cElements = nativeCount;
+ ptdesc->lpadesc->rgbounds->lLbound = 0;
+
+ goto ExitFunc;
+ }
+ else if (nativeElem != NATIVE_TYPE_INTF)
+ {
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_BAD_NATIVETYPE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+
+ // If the native type is NATIVE_TYPE_INTF then we fall through and convert
+ // System.Array to its IClassX interface.
+ }
+ else if (stricmpUTF8(pcls, szObjectClass) == 0)
+ {
+ IsObject:
+ // This next statement is to work around a "feature" that marshals an object inside
+ // a struct as an interface, instead of as a variant. fieldmarshal metadata
+ // can override that.
+ if (nativeElem == 0 && !bMethodSig)
+ nativeElem = NATIVE_TYPE_IUNKNOWN;
+
+ switch (nativeElem)
+ {
+ case NATIVE_TYPE_INTF:
+ case NATIVE_TYPE_IUNKNOWN:
+ // an IUnknown based interface.
+ ptdesc->vt = VT_UNKNOWN;
+ break;
+
+ case NATIVE_TYPE_IDISPATCH:
+ // an IDispatch based interface.
+ ptdesc->vt = VT_DISPATCH;
+ break;
+
+ case 0:
+ case NATIVE_TYPE_STRUCT:
+ // a VARIANT
+ ptdesc->vt = VT_VARIANT;
+ break;
+
+ default:
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_BAD_NATIVETYPE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+ goto ExitFunc;
+ }
+ } // System
+
+ if (SString::_strnicmp(pclsname, szText, cbText) == 0)
+ {
+ LPCUTF8 pcls; pcls = pclsname + cbText;
+ if (stricmpUTF8(pcls, szStringBufferClass) == 0)
+ {
+ fIsStringBuilder = TRUE;
+
+ // If there is no fieldmarshal information, marshal as a LPWSTR
+ if (nativeElem == 0)
+ nativeElem = NATIVE_TYPE_LPWSTR;
+
+ // Marshaller treats stringbuilders as [in, out] by default.
+ if (pbByRef)
+ *pbByRef = TRUE;
+
+ goto IsString;
+ }
+ } // System.Text
+
+ if (SString::_strnicmp(pclsname, szCollections, cbCollections) == 0)
+ {
+ LPCUTF8 pcls; pcls = pclsname + cbCollections;
+ if (stricmpUTF8(pcls, szIEnumeratorClass) == 0)
+ {
+ StdOleTypeToHRef(pCTI, IID_IEnumVARIANT, &hRef);
+ ptdesc->vt = VT_PTR;
+ ptdesc->lptdesc = reinterpret_cast<TYPEDESC*>(ppool->AllocZero(sizeof(TYPEDESC)));
+ if (ptdesc->lptdesc == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+
+ ptdesc->lptdesc->vt = VT_USERDEFINED;
+ ptdesc->lptdesc->hreftype = hRef;
+ goto ExitFunc;
+ }
+ } // System.Collections
+
+ if (SString::_strnicmp(pclsname, szDrawing, cbDrawing) == 0)
+ {
+ LPCUTF8 pcls; pcls = pclsname + cbDrawing;
+ if (stricmpUTF8(pcls, szColor) == 0)
+ {
+ StdOleTypeToHRef(pCTI, GUID_OleColor, &hRef);
+ ptdesc->vt = VT_USERDEFINED;
+ ptdesc->hreftype = hRef;
+ goto ExitFunc;
+ }
+ } // System.Drawing
+
+ // It is not a built-in VT type, so build the typedesc.
+
+ // Determine whether the type is a reference type (IUnknown derived) or a struct type.
+ // Get the MethodTable for the referenced class.
+ MethodTable *pRefdClass; // MethodTable object for referenced TypeDef.
+ pRefdClass = LoadClass(pMT->GetModule(), tkTypeRef);
+
+ // Is the type a ref type or a struct type. Note that a ref type that has layout
+ // is exported as a TKIND_RECORD but is referenced as a **Foo, whereas a
+ // value type is also exported as a TKIND_RECORD but is referenced as a *Foo.
+ if (elem == ELEMENT_TYPE_CLASS)
+ {
+ // Check if it is a delegate (which can be marshaled as a function pointer).
+ if (COMDelegate::IsDelegate(pRefdClass))
+ {
+ if (nativeElem == NATIVE_TYPE_FUNC)
+ {
+ ptdesc->vt = GetVtForIntPtr();
+ goto ExitFunc;
+ }
+ else if (nativeElem != 0 && nativeElem != NATIVE_TYPE_INTF)
+ {
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_BAD_NATIVETYPE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+ }
+ else if (TypeHandle(pRefdClass).CanCastTo(TypeHandle(MscorlibBinder::GetClass(CLASS__SAFE_HANDLE))))
+ {
+ ptdesc->vt = GetVtForIntPtr();
+ goto ExitFunc;
+ }
+ else if (TypeHandle(pRefdClass).CanCastTo(TypeHandle(MscorlibBinder::GetClass(CLASS__CRITICAL_HANDLE))))
+ {
+ ptdesc->vt = GetVtForIntPtr();
+ goto ExitFunc;
+ }
+
+ if (pRefdClass->HasLayout())
+ {
+ if (nativeElem == NATIVE_TYPE_INTF)
+ {
+ // Classes with layout are exported as structs. Because of this, we can't export field or
+ // parameters of these types marked with [MarshalAs(UnmanagedType.Interface)] as interface
+ // pointers of the actual type. The best we can do is make them IUnknown pointers and
+ // provide a warning.
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_W_LAYOUTCLASS_AS_INTERFACE);
+ ptdesc->vt = VT_UNKNOWN;
+ goto ExitFunc;
+ }
+ else if (!bMethodSig)
+ {
+ // Classes with layout inside structures must be either marked with [MarshalAs(UnmanagedType.Interface)],
+ // [MarshalAs(UnmanagedType.Struct)] or not have any MarshalAs information.
+ if ((nativeElem != 0) && (nativeElem != NATIVE_TYPE_STRUCT))
+ {
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_BAD_NATIVETYPE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+
+ // These types are embedded structures so we can treat them as value classes.
+ goto IsStructWithLayout;
+ }
+ else
+ {
+ // Classes with layout as parameters must be either marked with [MarshalAs(UnmanagedType.Interface)]
+ // [MarshalAs(UnmanagedType.LPStruct)] or not have any MarshalAs information.
+ if ((nativeElem != 0) && (nativeElem != NATIVE_TYPE_LPSTRUCT))
+ {
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_BAD_NATIVETYPE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+ }
+ }
+
+ // A reference to some non-system-defined/non delegate derived type. Get the reference to the
+ // type, unless it is an imported COM type, in which case, we'll just use
+ // IUnknown.
+ // If the type is not visible from COM then we return S_USEIUNKNOWN.
+ if (!IsTypeVisibleFromCom(TypeHandle(pRefdClass)))
+ hr = S_USEIUNKNOWN;
+ else
+ hr = EEClassToHref(pCTI, pRefdClass, TRUE, &hRef);
+
+ if (hr == S_USEIUNKNOWN)
+ {
+ // Not a known type, so use IUnknown
+ ptdesc->vt = VT_UNKNOWN;
+ goto ExitFunc;
+ }
+
+ // Not a known class, so make this a pointer to . . .
+ ptdesc->vt = VT_PTR;
+ ptdesc->lptdesc = reinterpret_cast<TYPEDESC*>(ppool->AllocZero(sizeof(TYPEDESC)));
+ if (ptdesc->lptdesc == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+
+ // . . . a user defined type . . .
+ ptdesc->lptdesc->vt = VT_USERDEFINED;
+ // . . . based on the token.
+ ptdesc->lptdesc->hreftype = hRef;
+ }
+ else // It's a value type.
+ {
+IsStructWithLayout:
+ // If it is an enum, check the underlying type. All COM enums are 32 bits,
+ // so if the .Net enum is not a 32 bit enum, convert to the underlying type
+ // instead of the enum type.
+ if (pRefdClass->IsEnum())
+ {
+ // Get the element type of the underlying type.
+ CorElementType et = pRefdClass->GetInternalCorElementType();
+ // If it is not a 32-bit type or MarshalAs is specified, convert as the
+ // underlying type.
+ if ((et != ELEMENT_TYPE_I4 && et != ELEMENT_TYPE_U4) ||
+ (nativeElem != 0))
+ {
+ elem = et;
+ goto TryWithElemType;
+ }
+ // Fall through to convert as the enum type.
+ }
+ else
+ {
+ // Value classes must be either marked with [MarshalAs(UnmanagedType.Struct)]
+ // or not have any MarshalAs information.
+ if ((nativeElem != 0) && (nativeElem != NATIVE_TYPE_STRUCT))
+ {
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_BAD_NATIVETYPE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+ }
+
+ // A reference to some non-system-defined type. Get the reference to the
+ // type. Since this is a value class we must get a valid href. Otherwise
+ // we fail the conversion.
+ hr = TokenToHref(pCTI, pMT, tkTypeRef, FALSE, &hRef);
+ if (hr == S_USEIUNKNOWN)
+ {
+ SString sClsName;
+ sClsName.SetUTF8(pclsname);
+
+ LPCWSTR szVCName = sClsName.GetUnicode();
+ if (NAMESPACE_SEPARATOR_WCHAR == *szVCName)
+ szVCName++;
+
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_NONVISIBLEVALUECLASS, szVCName);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+
+ // Value class is like other UserDefined types, except passed by value, ie
+ // on the stack, instead of by pointer.
+ // . . . a user defined type . . .
+ ptdesc->vt = VT_USERDEFINED;
+ // . . . based on the token.
+ ptdesc->hreftype = hRef;
+ }
+ break;
+
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_ARRAY:
+ {
+ SigPointer sig(&pbSig[cbElem]);
+
+ // Retrieve the type handle for the array elements.
+ TypeHandle thElement = sig.GetTypeHandleThrowing(pModule, &emptyTypeContext);
+ _ASSERTE(!thElement.IsNull());
+
+ // Update the index into the managed signature array.
+ IfFailThrow(sig.SkipExactlyOne());
+ cbElem += static_cast<ULONG>(sig.GetPtr() - &pbSig[cbElem]);
+
+ switch (nativeElem)
+ {
+ case 0:
+ case NATIVE_TYPE_SAFEARRAY:
+ {
+ ULONG vtElement = VT_EMPTY;
+
+ // Retrieve the safe array element type.
+ if (cbNativeSig != 0)
+ {
+ cb = CorSigUncompressData(pbNativeSig, &vtElement);
+ pbNativeSig += cb;
+ cbNativeSig -= cb;
+ }
+
+ ArrayMarshalInfo arrayMarshalInfo(IsExportingAs64Bit() ? amiExport64Bit : amiExport32Bit);
+ MarshalInfo::MarshalScenario ms = bMethodSig ? MarshalInfo::MARSHAL_SCENARIO_COMINTEROP : MarshalInfo::MARSHAL_SCENARIO_FIELD;
+ arrayMarshalInfo.InitForSafeArray(ms, thElement, (VARTYPE)vtElement, fAnsi);
+
+ if (!arrayMarshalInfo.IsValid())
+ {
+ ReportWarning(TLBX_E_BAD_SIGNATURE, arrayMarshalInfo.GetErrorResourceId());
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+
+ // TYPEDESC is an array.
+ ptdesc->vt = VT_SAFEARRAY;
+ ptdesc->lptdesc = reinterpret_cast<TYPEDESC*>(ppool->AllocZero(sizeof(TYPEDESC)));
+ if (ptdesc->lptdesc == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+
+ ArrayToTypeDesc(pCTI, ppool, &arrayMarshalInfo, ptdesc->lptdesc);
+ }
+ break;
+
+ case NATIVE_TYPE_FIXEDARRAY:
+ {
+ ULONG ntElement = NATIVE_TYPE_DEFAULT;
+
+ // NATIVE_TYPE_FIXEDARRAY is only allowed on fields.
+ if (bMethodSig)
+ {
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_BAD_NATIVETYPE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+
+ // Retrieve the size of the fixed array. This is required.
+ if (cbNativeSig == 0)
+ {
+ ReportWarning(TLBX_E_BAD_SIGNATURE, IDS_EE_BADMARSHALFIELD_FIXEDARRAY_NOSIZE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+
+ cb = CorSigUncompressData(pbNativeSig, &nativeCount);
+ pbNativeSig += cb;
+ cbNativeSig -= cb;
+
+ // A size const of 0 isn't supported.
+ if (nativeCount == 0)
+ {
+ ReportWarning(TLBX_E_BAD_SIGNATURE, IDS_EE_BADMARSHALFIELD_FIXEDARRAY_ZEROSIZE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+
+ // Read the optional array sub type if specified.
+ if (cbNativeSig != 0)
+ {
+ cb = CorSigUncompressData(pbNativeSig, &ntElement);
+ pbNativeSig += cb;
+ cbNativeSig -= cb;
+ }
+
+ ArrayMarshalInfo arrayMarshalInfo(IsExportingAs64Bit() ? amiExport64Bit : amiExport32Bit);
+ arrayMarshalInfo.InitForFixedArray(thElement, (CorNativeType)ntElement, fAnsi);
+
+ if (!arrayMarshalInfo.IsValid())
+ {
+ ReportWarning(TLBX_E_BAD_SIGNATURE, arrayMarshalInfo.GetErrorResourceId());
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+
+ // Set the data
+ ptdesc->vt = VT_CARRAY;
+ ptdesc->lpadesc = reinterpret_cast<ARRAYDESC*>(ppool->AllocZero(sizeof(ARRAYDESC)));
+ if (ptdesc->lpadesc == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+
+ ArrayToTypeDesc(pCTI, ppool, &arrayMarshalInfo, &ptdesc->lpadesc->tdescElem);
+
+ ptdesc->lpadesc->cDims = 1;
+ ptdesc->lpadesc->rgbounds->cElements = nativeCount;
+ ptdesc->lpadesc->rgbounds->lLbound = 0;
+ }
+ break;
+
+ case NATIVE_TYPE_ARRAY:
+ {
+ ULONG ntElement = NATIVE_TYPE_DEFAULT;
+
+ // NATIVE_TYPE_ARRAY is not allowed on fields.
+ if (!bMethodSig)
+ {
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_ARRAY_NEEDS_NT_FIXED);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+
+ // Read the optional array sub type if specified.
+ if (cbNativeSig != 0)
+ {
+ cb = CorSigUncompressData(pbNativeSig, &ntElement);
+ pbNativeSig += cb;
+ cbNativeSig -= cb;
+ }
+
+ ArrayMarshalInfo arrayMarshalInfo(IsExportingAs64Bit() ? amiExport64Bit : amiExport32Bit);
+ arrayMarshalInfo.InitForNativeArray(MarshalInfo::MARSHAL_SCENARIO_COMINTEROP, thElement, (CorNativeType)ntElement, fAnsi);
+
+ if (!arrayMarshalInfo.IsValid())
+ {
+ ReportWarning(TLBX_E_BAD_SIGNATURE, arrayMarshalInfo.GetErrorResourceId());
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+
+ ptdesc->vt = VT_PTR;
+ ptdesc->lptdesc = reinterpret_cast<TYPEDESC*>(ppool->AllocZero(sizeof(TYPEDESC)));
+ if(ptdesc->lptdesc == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+
+ ArrayToTypeDesc(pCTI, ppool, &arrayMarshalInfo, ptdesc->lptdesc);
+ }
+ break;
+
+ default:
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_BAD_NATIVETYPE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ }
+
+ // If we are dealing with an ELEMENT_TYPE_ARRAY, we need to eat the array description.
+ if (elem == ELEMENT_TYPE_ARRAY)
+ {
+ // Eat the rank.
+ cbElem += CorSigUncompressData(pbSig+cbElem, &elem);
+
+ // Count of ubounds, ubounds.
+ cbElem += CorSigUncompressData(pbSig+cbElem, &elem);
+ for (i=elem; i>0; --i)
+ cbElem += CorSigUncompressData(pbSig+cbElem, &elem);
+
+ // Count of lbounds, lbounds.
+ cbElem += CorSigUncompressData(pbSig+cbElem, &elem);
+ for (i=elem; i>0; --i)
+ cbElem += CorSigUncompressData(pbSig+cbElem, &elem);
+ }
+
+ break;
+ }
+
+ case ELEMENT_TYPE_TYPEDBYREF: // 0x16
+ ptdesc->vt = VT_VARIANT;
+ break;
+
+ //------------------------------------------
+ // This really should be the commented out
+ // block following.
+ case ELEMENT_TYPE_I: // 0x18,
+ ptdesc->vt = GetVtForIntPtr();
+ break;
+
+ case ELEMENT_TYPE_U: // 0x19,
+ ptdesc->vt = GetVtForUIntPtr();
+ break;
+
+ case ELEMENT_TYPE_CMOD_REQD: // 0x1F // required C modifier : E_T_CMOD_REQD <mdTypeRef/mdTypeDef>
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_UNKNOWN_SIGNATURE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+
+ case ELEMENT_TYPE_SENTINEL:
+ goto TryAgain;
+
+ case ELEMENT_TYPE_CMOD_OPT: // 0x20 // optional C modifier : E_T_CMOD_OPT <mdTypeRef/mdTypeDef>
+ cb = CorSigUncompressToken(&pbSig[cbElem], &tkTypeRef);
+ cbElem += cb;
+ goto TryAgain;
+
+ case ELEMENT_TYPE_FNPTR:
+ {
+ ptdesc->vt = GetVtForIntPtr();
+
+ // Eat the rest of the signature.
+ SigPointer p(&pbSig[cbElem-1]);
+ IfFailThrow(p.SkipExactlyOne());
+ cbElem += (ULONG)(p.GetPtr() - &pbSig[cbElem]); // Note I didn't use -1 here.
+ break;
+ }
+
+ case ELEMENT_TYPE_GENERICINST:
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_GENERICINST_SIGNATURE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ break;
+
+ case ELEMENT_TYPE_VAR:
+ case ELEMENT_TYPE_MVAR:
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_GENERICPAR_SIGNATURE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ break;
+
+ default:
+ ReportWarning(TLBX_E_BAD_SIGNATURE, TLBX_E_UNKNOWN_SIGNATURE);
+ hr = TLBX_E_BAD_SIGNATURE;
+ goto ExitFunc;
+ break;
+ }
+
+ExitFunc:
+ *pcbElem = cbElem;
+
+ if (hr == S_USEIUNKNOWN)
+ hr = S_OK;
+
+ return hr;
+} // TypeLibExporter::CorSigToTypeDesc
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+//*****************************************************************************
+// Get an HREFTYPE for an ITypeInfo, in the context of a ICreateTypeInfo2.
+//*****************************************************************************
+HRESULT TypeLibExporter::TokenToHref(
+ ICreateTypeInfo2 *pCTI, // Typeinfo being created.
+ MethodTable *pMT, // MethodTable with the token.
+ mdToken tk, // The TypeRef to resolve.
+ BOOL bWarnOnUsingIUnknown, // A flag indicating if we should warn on substituting IUnknown.
+ HREFTYPE *pHref) // Put HREFTYPE here.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pCTI));
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(CheckPointer(pHref));
+ }
+ CONTRACTL_END;
+
+ MethodTable *pRefdClass; // MethodTable object for referenced TypeDef.
+
+ // Get the MethodTable for the referenced class, and see if it is being converted.
+ pRefdClass = LoadClass(pMT->GetModule(), tk);
+
+ // If the type is not visible from COM then we return S_USEIUNKNOWN.
+ if (!IsTypeVisibleFromCom(TypeHandle(pRefdClass)))
+ return S_USEIUNKNOWN;
+
+ return EEClassToHref(pCTI, pRefdClass, bWarnOnUsingIUnknown, pHref);
+} // HRESULT TypeLibExporter::TokenToHref()
+
+//*****************************************************************************
+// Call the resolver to export the typelib for an assembly.
+//*****************************************************************************
+void TypeLibExporter::ExportReferencedAssembly(
+ Assembly *pAssembly)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pAssembly));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK; // A result.
+ ITypeLib *pTLB = 0; // Exported typelib.
+
+ // Assembly as IP.
+ SafeComHolder<IUnknown> pIAssembly = 0;
+
+ {
+ // Switch to cooperative to get an object ref.
+ GCX_COOP();
+
+ // Invoke the callback to resolve the reference.
+ OBJECTREF orAssembly=0;
+ GCPROTECT_BEGIN(orAssembly)
+ {
+ orAssembly = pAssembly->GetExposedObject();
+
+ pIAssembly = GetComIPFromObjectRef(&orAssembly, MscorlibBinder::GetClass(CLASS__IASSEMBLY));
+ }
+ GCPROTECT_END();
+ }
+
+ IfFailReport(m_pNotify->ResolveRef((IUnknown*)pIAssembly, (IUnknown**)&pTLB));
+
+ // If we got a typelib, store it on the assembly.
+ if (pTLB)
+ pAssembly->SetTypeLib(pTLB);
+} // void TypeLibExporter::ExportReferencedAssembly()
+
+//*****************************************************************************
+// Determine if a class represents a well-known interface, and return that
+// interface (from its real typelib) if it does.
+//*****************************************************************************
+void TypeLibExporter::GetWellKnownInterface(
+ MethodTable *pMT, // MethodTable to check.
+ ITypeInfo **ppTI) // Put ITypeInfo here, if found.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(CheckPointer(ppTI));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr; // A result.
+ GUID guid; // The MethodTable guid.
+ WCHAR wzGuid[40]; // Guid in string format.
+ LONG cbGuid; // Size of guid buffer.
+ GUID guidTlb; // The typelib guid.
+ DWORD dwError; // Note: HRESULT_FROM_WIN32 macro evaluates the argument 3x times
+
+
+ HKEYHolder hInterface; // Registry key HKCR/Interface
+ HKEYHolder hGuid; // Registry key of .../{xxx...xxx}
+ HKEYHolder hTlb; // Registry key of .../TypeLib
+
+ // The ITypeLib.
+ SafeComHolder<ITypeLib> pTLB=0;
+
+ // Get the GUID for the class. Will generate from name if no defined GUID,
+ // will also use signatures if interface.
+ pMT->GetGuid(&guid, TRUE);
+
+ GuidToLPWSTR(guid, wzGuid, lengthof(wzGuid));
+
+ // Look up that interface in the registry.
+ dwError = WszRegOpenKeyEx(HKEY_CLASSES_ROOT, W("Interface"),0,KEY_READ, &hInterface);
+ hr = HRESULT_FROM_WIN32(dwError);
+ if (FAILED(hr))
+ return;
+
+ dwError = WszRegOpenKeyEx((HKEY)hInterface, wzGuid, 0, KEY_READ, &hGuid);
+ hr = HRESULT_FROM_WIN32(dwError);
+ if (FAILED(hr))
+ return;
+
+ dwError = WszRegOpenKeyEx((HKEY)hGuid, W("TypeLib"), 0, KEY_READ, &hTlb);
+ hr = HRESULT_FROM_WIN32(dwError);
+ if (FAILED(hr))
+ return;
+
+ cbGuid = sizeof(wzGuid);
+ dwError = WszRegQueryValue((HKEY)hTlb, W(""), wzGuid, &cbGuid);
+ hr = HRESULT_FROM_WIN32(dwError);
+ if (FAILED(hr))
+ return;
+
+ CLSIDFromString(wzGuid, &guidTlb);
+
+ // Retrive the major and minor version number.
+ USHORT wMajor;
+ USHORT wMinor;
+ Assembly *pAssembly = pMT->GetAssembly();
+
+ hr = GetTypeLibVersionForAssembly(pAssembly,&wMajor, &wMinor);
+ if (SUCCEEDED(hr))
+ {
+ hr = LoadRegTypeLib(guidTlb, wMajor, wMinor, 0, &pTLB);
+ }
+ if (FAILED(hr))
+ {
+ pAssembly->GetVersion(&wMajor, &wMinor, NULL, NULL);
+
+ hr = LoadRegTypeLib(guidTlb, wMajor, wMinor, 0, &pTLB);
+ if (FAILED(hr))
+ {
+ hr = LoadRegTypeLib(guidTlb, -1, -1, 0, &pTLB);
+ if (FAILED(hr))
+ {
+ return;
+ }
+ }
+ }
+
+
+ hr = pTLB->GetTypeInfoOfGuid(guid, ppTI);
+} // void TypeLibExporter::GetWellKnownInterface()
+
+//*****************************************************************************
+// Get an HREFTYPE for an ITypeInfo, in the context of a ICreateTypeInfo2.
+//*****************************************************************************
+HRESULT TypeLibExporter::EEClassToHref( // S_OK or error.
+ ICreateTypeInfo2 *pCTI, // Typeinfo being created.
+ MethodTable *pClass, // The MethodTable * to resolve.
+ BOOL bWarnOnUsingIUnknown, // A flag indicating if we should warn on substituting IUnknown.
+ HREFTYPE *pHref) // Put HREFTYPE here.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pCTI));
+ PRECONDITION(CheckPointer(pClass));
+ PRECONDITION(CheckPointer(pHref));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr=S_OK; // A result.
+ int bUseIUnknown=false; // Use IUnknown (if so, don't release pTI)?
+ int bUseIUnknownWarned=false; // If true, used IUnknown, but already issued a more specific warning.
+ CExportedTypesInfo sExported; // Cached ICreateTypeInfo pointers.
+ CExportedTypesInfo *pExported; // Pointer to found or new cached pointers.
+ CHrefOfClassHashKey sLookup; // Hash structure to lookup.
+ CHrefOfClassHashKey *pFound; // Found structure.
+ bool bImportedAssembly; // The assembly containing pClass is imported.
+ bool bForceResolveCallback; // Type library resolution should always be handled by caller first.
+
+ // A different typeinfo; default for pTI.
+ SafeComHolder<ITypeInfo> pTIDef=0;
+
+ // A TypeInfo; maybe for TypeDef, maybe for TypeRef.
+ SafeComHolder<ITypeInfo> pTI=0;
+
+
+ // See if we already know this MethodTable' href.
+ sLookup.pClass = pClass;
+ if ((pFound=m_HrefOfClassHash.Find(&sLookup)) != NULL)
+ {
+ *pHref = pFound->href;
+ if (*pHref == m_hIUnknown)
+ return S_USEIUNKNOWN;
+ return S_OK;
+ }
+
+ // See if the class is in the export list.
+ sExported.pClass = pClass;
+ pExported = m_Exports.Find(&sExported);
+
+ // If not in the exported assembly, possibly it was injected?
+ if (pExported == 0)
+ {
+ pExported = m_InjectedExports.Find(&sExported);
+ }
+
+ // Is there an export for this class?
+ if (pExported)
+ {
+ // Yes, For interfaces and value types (and enums), just use the typeinfo.
+ if (pClass->IsValueType() || pClass->IsEnum() || pClass->HasLayout())
+ {
+ // No default interface, so use the class itself.
+ if (pExported->pCTI)
+ IfFailReport(SafeQueryInterface(pExported->pCTI, IID_ITypeInfo, (IUnknown**)&pTI));
+ }
+ else
+ if (!pClass->IsInterface())
+ {
+ // If there is an explicit default interface, get the class for it.
+ TypeHandle hndDefItfClass;
+ DefaultInterfaceType DefItfType;
+ DefItfType = GetDefaultInterfaceForClassWrapper(TypeHandle(pClass), &hndDefItfClass);
+ switch (DefItfType)
+ {
+ case DefaultInterfaceType_Explicit:
+ {
+ _ASSERTE(!hndDefItfClass.IsNull());
+
+ // Recurse to get the href for the default interface class.
+ hr = EEClassToHref(pCTI, hndDefItfClass.GetMethodTable(), bWarnOnUsingIUnknown, pHref);
+ // Done. Note that the previous call will have cached the href for
+ // the default interface class. As this function exits, it will
+ // also cache the SAME href for this class.
+ goto ErrExit;
+ }
+
+ case DefaultInterfaceType_AutoDispatch:
+ case DefaultInterfaceType_AutoDual:
+ {
+ _ASSERTE(!hndDefItfClass.IsNull());
+
+ if (hndDefItfClass.GetMethodTable() != pClass)
+ {
+ // Recurse to get the href for the default interface class.
+ hr = EEClassToHref(pCTI, hndDefItfClass.GetMethodTable(), bWarnOnUsingIUnknown, pHref);
+ // Done. Note that the previous call will have cached the href for
+ // the default interface class. As this function exits, it will
+ // also cache the SAME href for this class.
+ goto ErrExit;
+ }
+
+ // Return the class interface.
+ _ASSERTE(pExported->pCTIClassItf);
+ IfFailReport(SafeQueryInterface(pExported->pCTIClassItf, IID_ITypeInfo, (IUnknown**)&pTI));
+ break;
+ }
+
+ case DefaultInterfaceType_IUnknown:
+ case DefaultInterfaceType_BaseComClass:
+ {
+ pTI = m_pIUnknown;
+ bUseIUnknown=true;
+ SafeAddRef(pTI);
+ break;
+ }
+
+ default:
+ {
+ _ASSERTE(!"Invalid default interface type!");
+ hr = E_FAIL;
+ break;
+ }
+ }
+ }
+ else
+ { // This is an interface, so use the typeinfo for the interface, if there is one.
+ if (pExported->pCTI)
+ IfFailReport(SafeQueryInterface(pExported->pCTI, IID_ITypeInfo, (IUnknown**)&pTI));
+ }
+
+ if ((IUnknown*)pTI == 0)
+ {
+ // This is a class from the module/assembly, yet it is not being exported.
+
+ // Whatever happens, the result is OK.
+ hr = S_OK;
+
+ if (pClass->IsComImport())
+ {
+ // If it is an imported type, get an href to it.
+ GetWellKnownInterface(pClass, &pTI);
+ }
+
+ // If still didn't get a TypeInfo, use IUnknown.
+ if ((IUnknown*)pTI == 0)
+ {
+ pTI = m_pIUnknown;
+ bUseIUnknown=true;
+ SafeAddRef(pTI);
+ }
+ }
+ }
+ else
+ { // Not local. Try to get from the class' module's typelib.
+ // If the caller wants to get a chance to resolve type library references themselves (before we go probing the assembly),
+ // we'll skip the next step and go directly to the notify sink callback.
+ bForceResolveCallback = (m_flags & TlbExporter_CallerResolvedReferences) != 0;
+ if (!bForceResolveCallback)
+ hr = GetITypeInfoForEEClass(pClass, &pTI, false/* interface, not coclass */, false/* do not create */, m_flags);
+
+ // If getting the typeinfo from the class itself failed, there are
+ // several possibilities:
+ // - typelib didn't exist, and couldn't be created.
+ // - typelib did exist, but didn't contain the typeinfo.
+ // We can create a local (to the exported typelib) copy of the
+ // typeinfo, and get a reference to that.
+ // However, we don't want to export the whole tree into this typelib,
+ // so we only create the typeinfo if the typelib existed but the
+ // typeinfo wasn't found and the assembly is not an imported assembly.
+ bImportedAssembly = pClass->GetAssembly()->IsImportedFromTypeLib();
+
+ if (bForceResolveCallback || (FAILED(hr) && hr != TYPE_E_ELEMENTNOTFOUND && !bImportedAssembly))
+ {
+ // Invoke the callback to resolve the reference.
+
+ Assembly *pAssembly = pClass->GetAssembly();
+
+ ExportReferencedAssembly(pAssembly);
+
+ hr = GetITypeInfoForEEClass(pClass, &pTI, false/* interface, not coclass */, false/* do not create */, m_flags);
+ }
+
+ if (hr == TYPE_E_ELEMENTNOTFOUND)
+ {
+ if (pClass->IsComImport())
+ {
+ // If it is an imported type, get an href to it.
+
+ // Whatever happens, the result is OK.
+ hr = S_OK;
+
+ GetWellKnownInterface(pClass, &pTI);
+
+ // If still didn't get a TypeInfo, use IUnknown.
+ if ((IUnknown*)pTI == 0)
+ {
+ pTI = m_pIUnknown;
+ bUseIUnknown=true;
+ SafeAddRef(pTI);
+ }
+ }
+ else
+ {
+ // Convert the single typedef from the other scope.
+ ConvertOneTypeDef(pClass);
+
+ // Now that the type has been injected, recurse to let the default-interface code run.
+ hr = EEClassToHref(pCTI, pClass, bWarnOnUsingIUnknown, pHref);
+
+ // This class should already have been cached by the recursive call. Don't want to add
+ // it again.
+ goto ErrExit2;
+ }
+ }
+ else if (FAILED(hr))
+ {
+ DefineFullyQualifiedNameForClassWOnStack();
+ LPCWSTR szName = GetFullyQualifiedNameForClassNestedAwareW(pClass);
+ if (hr == TLBX_W_LIBNOTREGISTERED)
+ {
+ // The imported typelib is not registered on this machine. Give a warning, and substitute IUnknown.
+ ReportEvent(NOTIF_CONVERTWARNING, hr, szName, (LPCWSTR) pClass->GetAssembly()->GetManifestModule()->GetPath());
+ hr = S_OK;
+ pTI = m_pIUnknown;
+ bUseIUnknown = true;
+ SafeAddRef(pTI);
+ bUseIUnknownWarned = true;
+ }
+ else if (hr == TLBX_E_CANTLOADLIBRARY)
+ {
+ // The imported typelib is registered, but can't be loaded. Corrupt? Missing?
+ InternalThrowHRWithContext(TLBX_E_CANTLOADLIBRARY, szName, (LPCWSTR) pClass->GetAssembly()->GetManifestModule()->GetPath());
+ }
+ IfFailReport(hr);
+ }
+ }
+
+ // Make sure we could resolve the typeinfo.
+ if (!(IUnknown*)pTI)
+ IfFailReport(TYPE_E_ELEMENTNOTFOUND);
+
+ // Assert that the containing typelib for pContainer is the typelib being created.
+#if defined(_DEBUG)
+ {
+ SafeComHolder<ITypeInfo> pTI=0;
+ SafeComHolder<ITypeLib> pTL=0;
+ SafeComHolder<ITypeLib> pTLMe=0;
+ UINT ix;
+ SafeQueryInterface(pCTI, IID_ITypeInfo, (IUnknown**)&pTI);
+ SafeQueryInterface(m_pICreateTLB, IID_ITypeLib, (IUnknown**)&pTLMe);
+ pTI->GetContainingTypeLib(&pTL, &ix);
+ _ASSERTE(pTL == pTLMe);
+ }
+#endif
+
+ // If there is an ITypeInfo, convert to HREFTYPE.
+ if ((IUnknown*)pTI)
+ {
+ if ((IUnknown*)pTI != m_pIUnknown)
+ {
+ // Resolve to default.
+ if (pTIDef)
+ hr = S_OK; // Already have default.
+ else
+ {
+ // TypeLib API has a issue (sort of by design):
+ // Before a type (and its dependencies) is completely created (all members added),
+ // if you call Layout(), or anything that will lead to Layout(), such as ITypeInfo::GetTypeAttr
+ // it will give the type an incorrect size. Ideally TypeLib API should fail in this case.
+ // Anyway, we only need to avoid calling Layout() directly or indirectly until we have
+ // completely created all types.
+ // In this case, we are calling ITypeInfo::GetTypeAttr() in the function below, which is only
+ // needed for coclasses. Fortunately, coclass doesn't have a size problem, as it don't have any members
+ // So, we skip calling GetDefaultInterfaceForCoclass unless the class is an coclass.
+ if (TKindFromClass(pClass) == TKIND_COCLASS)
+ IfFailReport(GetDefaultInterfaceForCoclass(pTI, &pTIDef));
+ else
+ hr = S_FALSE;
+ }
+
+ if (hr == S_OK)
+ hr = pCTI->AddRefTypeInfo(pTIDef, pHref);
+ else
+ hr = pCTI->AddRefTypeInfo(pTI, pHref);
+ }
+ else
+ { // pTI == m_pIUnknown
+ if (m_hIUnknown == -1)
+ hr = pCTI->AddRefTypeInfo(pTI, &m_hIUnknown);
+ *pHref = m_hIUnknown;
+ }
+ }
+
+ErrExit:
+ // If we got the href...
+ if (hr == S_OK)
+ {
+ // Save for later use.
+ if ( NULL == (pFound=m_HrefOfClassHash.Add(&sLookup)))
+ IfFailReport(E_OUTOFMEMORY);
+
+ pFound->pClass = pClass;
+ pFound->href = *pHref;
+ }
+
+ // If substituting IUnknown, give a warning.
+ if (hr == S_OK && bUseIUnknown && bWarnOnUsingIUnknown && !bUseIUnknownWarned)
+ {
+ DefineFullyQualifiedNameForClassWOnStack();
+ LPCWSTR szName = GetFullyQualifiedNameForClassNestedAwareW(pClass);
+ ReportWarning(S_OK, TLBX_I_USEIUNKNOWN, szName);
+ }
+
+ErrExit2:
+ if (hr == S_OK && bUseIUnknown)
+ hr = S_USEIUNKNOWN;
+
+ return hr;
+} // HRESULT TypeLibExporter::EEClassToHref()
+
+//*****************************************************************************
+// Retrieve an HRef to the a type defined in StdOle.
+//*****************************************************************************
+void TypeLibExporter::StdOleTypeToHRef(ICreateTypeInfo2 *pCTI, REFGUID rGuid, HREFTYPE *pHref)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pCTI));
+ PRECONDITION(CheckPointer(pHref));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ SafeComHolder<ITypeLib> pITLB = NULL;
+ SafeComHolder<ITypeInfo> pITI = NULL;
+ MEMBERID MemID = 0;
+ USHORT cFound = 0;
+
+ IfFailReport(LoadRegTypeLib(LIBID_STDOLE2, -1, -1, 0, &pITLB));
+ IfFailReport(pITLB->GetTypeInfoOfGuid(rGuid, &pITI));
+ IfFailReport(pCTI->AddRefTypeInfo(pITI, pHref));
+} // void TypeLibExporter::ColorToHRef()
+
+//*****************************************************************************
+// Given a TypeDef's flags, determine the proper TYPEKIND.
+//*****************************************************************************
+TYPEKIND TypeLibExporter::TKindFromClass(
+ MethodTable *pClass) // MethodTable.
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pClass));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ ULONG ulIface = ifDual; // Is this interface [dual], IUnknown, or DISPINTERFACE.
+
+ if (pClass->IsInterface())
+ {
+ // IDispatch or IUnknown derived?
+ IfFailReport(pClass->GetMDImport()->GetIfaceTypeOfTypeDef(pClass->GetCl(), &ulIface));
+ if (ulIface == ifDispatch)
+ return TKIND_DISPATCH;
+
+ return TKIND_INTERFACE;
+ }
+
+ if (pClass->IsEnum())
+ return TKIND_ENUM;
+
+ if (pClass->IsValueType() || pClass->HasLayout())
+ {
+ TYPEKIND tkResult=TKIND_RECORD; // The resulting typekind.
+ mdFieldDef fd; // A Field def.
+ ULONG cFD; // Count of fields.
+ ULONG iFD=0; // Loop control.
+ ULONG ulOffset; // Field offset.
+ bool bNonZero=false; // Found any non-zero?
+ MD_CLASS_LAYOUT sLayout; // For enumerating layouts.
+
+ // To enum fields.
+ HENUMInternalHolder eFDi(pClass->GetMDImport());
+ eFDi.EnumInit(mdtFieldDef, pClass->GetCl());
+
+ // Get an enumerator for the FieldDefs in the TypeDef. Only need the counts.
+ cFD = pClass->GetMDImport()->EnumGetCount(&eFDi);
+
+ // Get an enumerator for the class layout.
+ IfFailReport(pClass->GetMDImport()->GetClassLayoutInit(pClass->GetCl(), &sLayout));
+
+ // Enumerate the layout.
+ while (pClass->GetMDImport()->GetClassLayoutNext(&sLayout, &fd, &ulOffset) == S_OK)
+ {
+ if (ulOffset != 0)
+ {
+ bNonZero = true;
+ break;
+ }
+ ++iFD;
+ }
+
+ // If there were fields, all had layout, and all layouts are zero, call it a union.
+ if (cFD > 0 && iFD == cFD && !bNonZero)
+ tkResult = TKIND_UNION;
+
+ return tkResult;
+ }
+
+ return TKIND_COCLASS;
+} // TYPEKIND TypeLibExporter::TKindFromClass()
+
+//*****************************************************************************
+// Generate a HREFTYPE in the output TypeLib for a TypeInfo.
+//*****************************************************************************
+void TypeLibExporter::GetRefTypeInfo(
+ ICreateTypeInfo2 *pContainer,
+ ITypeInfo *pReferenced,
+ HREFTYPE *pHref)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pContainer));
+ PRECONDITION(CheckPointer(pReferenced));
+ PRECONDITION(CheckPointer(pHref));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr; // A result.
+ CHrefOfTIHashKey sLookup; // Hash structure to lookup.
+ CHrefOfTIHashKey *pFound; // Found structure.
+
+ // See if we already know this TypeInfo.
+ sLookup.pITI = pReferenced;
+ if ((pFound=m_HrefHash.Find(&sLookup)) != NULL)
+ {
+ *pHref = pFound->href;
+ return;
+ }
+
+ // Assert that the containing typelib for pContainer is the typelib being created.
+#if defined(_DEBUG)
+ {
+ SafeComHolder<ITypeInfo> pTI=0;
+ SafeComHolder<ITypeLib> pTL=0;
+ SafeComHolder<ITypeLib> pTLMe=0;
+ UINT ix;
+
+ SafeQueryInterface(pContainer, IID_ITypeInfo, (IUnknown**)&pTI);
+ SafeQueryInterface(m_pICreateTLB, IID_ITypeLib, (IUnknown**)&pTLMe);
+ pTI->GetContainingTypeLib(&pTL, &ix);
+ _ASSERTE(pTL == pTLMe);
+ }
+#endif
+
+ // Haven't seen it -- add the href.
+ // NOTE: This code assumes that hreftypes are per-typelib.
+ IfFailReport(pContainer->AddRefTypeInfo(pReferenced, pHref));
+
+ // Save for later use.
+ pFound=m_HrefHash.Add(&sLookup);
+ if (pFound == NULL)
+ IfFailReport(E_OUTOFMEMORY);
+
+ // Prefix can't tell that IfFailReport will actually throw an exception if pFound is NULL so
+ // let's tell it explicitly that if we reach this point pFound will not be NULL.
+ PREFIX_ASSUME(pFound != NULL);
+ pFound->pITI = pReferenced;
+ pFound->href = *pHref;
+ pReferenced->AddRef();
+} // HRESULT TypeLibExporter::GetRefTypeInfo()
+
+//*****************************************************************************
+// Implementation of a hashed ITypeInfo to HREFTYPE association.
+//*****************************************************************************
+void TypeLibExporter::CHrefOfTIHash::Clear()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ CHrefOfTIHashKey *p;
+ for (p=GetFirst(); p; p=GetNext(p))
+ {
+ SafeRelease(p->pITI);
+ }
+
+ CClosedHash<class CHrefOfTIHashKey>::Clear();
+} // void TypeLibExporter::CHrefOfTIHash::Clear()
+
+unsigned int TypeLibExporter::CHrefOfTIHash::Hash(const CHrefOfTIHashKey *pData)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifndef _WIN64
+ // The pointers are at least 4-byte aligned, so ignore bottom two bits.
+ return (unsigned int) (((size_t)(pData->pITI))>>2);
+#else
+ // @TODO IA64: Is this a good hashing mechanism on IA64?
+ return (unsigned int) (((size_t)(pData->pITI))>>3);
+#endif
+} // unsigned long TypeLibExporter::CHrefOfTIHash::Hash()
+
+unsigned int TypeLibExporter::CHrefOfTIHash::Compare(const CHrefOfTIHashKey *p1, CHrefOfTIHashKey *p2)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (p1->pITI == p2->pITI)
+ return (0);
+ return (1);
+} // unsigned long TypeLibExporter::CHrefOfTIHash::Compare()
+
+TypeLibExporter::CHrefOfTIHash::ELEMENTSTATUS TypeLibExporter::CHrefOfTIHash::Status(CHrefOfTIHashKey *p)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (p->pITI == reinterpret_cast<ITypeInfo*>(FREE))
+ return (FREE);
+ if (p->pITI == reinterpret_cast<ITypeInfo*>(DELETED))
+ return (DELETED);
+ return (USED);
+} // TypeLibExporter::CHrefOfTIHash::ELEMENTSTATUS TypeLibExporter::CHrefOfTIHash::Status()
+
+void TypeLibExporter::CHrefOfTIHash::SetStatus(CHrefOfTIHashKey *p, ELEMENTSTATUS s)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ p->pITI = reinterpret_cast<ITypeInfo*>(s);
+} // void TypeLibExporter::CHrefOfTIHash::SetStatus()
+
+void *TypeLibExporter::CHrefOfTIHash::GetKey(CHrefOfTIHashKey *p)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return &p->pITI;
+} // void *TypeLibExporter::CHrefOfTIHash::GetKey()
+
+
+//*****************************************************************************
+// Implementation of a hashed MethodTable* to HREFTYPE association.
+//*****************************************************************************
+void TypeLibExporter::CHrefOfClassHash::Clear()
+{
+ WRAPPER_NO_CONTRACT;
+ CClosedHash<class CHrefOfClassHashKey>::Clear();
+} // void TypeLibExporter::CHrefOfClassHash::Clear()
+
+unsigned int TypeLibExporter::CHrefOfClassHash::Hash(const CHrefOfClassHashKey *pData)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifndef _WIN64
+ // Tbe pointers are at least 4-byte aligned, so ignore bottom two bits.
+ return (unsigned int) (((size_t)(pData->pClass))>>2);
+#else
+ // @TODO IA64: Is this a good hashing mechanism on IA64?
+ return (unsigned int) (((size_t)(pData->pClass))>>3);
+#endif
+} // unsigned long TypeLibExporter::CHrefOfClassHash::Hash()
+
+unsigned int TypeLibExporter::CHrefOfClassHash::Compare(const CHrefOfClassHashKey *p1, CHrefOfClassHashKey *p2)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (p1->pClass == p2->pClass)
+ return (0);
+ return (1);
+} // unsigned long TypeLibExporter::CHrefOfClassHash::Compare()
+
+TypeLibExporter::CHrefOfClassHash::ELEMENTSTATUS TypeLibExporter::CHrefOfClassHash::Status(CHrefOfClassHashKey *p)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (p->pClass == reinterpret_cast<MethodTable*>(FREE))
+ return (FREE);
+ if (p->pClass == reinterpret_cast<MethodTable*>(DELETED))
+ return (DELETED);
+ return (USED);
+} // TypeLibExporter::CHrefOfClassHash::ELEMENTSTATUS TypeLibExporter::CHrefOfClassHash::Status()
+
+void TypeLibExporter::CHrefOfClassHash::SetStatus(CHrefOfClassHashKey *p, ELEMENTSTATUS s)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ p->pClass = reinterpret_cast<MethodTable*>(s);
+} // void TypeLibExporter::CHrefOfClassHash::SetStatus()
+
+void *TypeLibExporter::CHrefOfClassHash::GetKey(CHrefOfClassHashKey *p)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return &p->pClass;
+} // void *TypeLibExporter::CHrefOfClassHash::GetKey()
+
+
+//*****************************************************************************
+// Implementation of a hashed MethodTable* to conversion information association.
+//*****************************************************************************
+void TypeLibExporter::CExportedTypesHash::Clear()
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Iterate over entries and free pointers.
+ CExportedTypesInfo *pData;
+ pData = GetFirst();
+ while (pData)
+ {
+ SetStatus(pData, DELETED);
+ pData = GetNext(pData);
+ }
+
+ CClosedHash<class CExportedTypesInfo>::Clear();
+} // void TypeLibExporter::CExportedTypesHash::Clear()
+
+unsigned int TypeLibExporter::CExportedTypesHash::Hash(const CExportedTypesInfo *pData)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifndef _WIN64
+ // Tbe pointers are at least 4-byte aligned, so ignore bottom two bits.
+ return (unsigned int) (((size_t)(pData->pClass))>>2);
+#else
+ // @TODO IA64: Is this a good hashing mechanism on IA64?
+ return (unsigned int) (((size_t)(pData->pClass))>>3);
+#endif
+} // unsigned long TypeLibExporter::CExportedTypesHash::Hash()
+
+unsigned int TypeLibExporter::CExportedTypesHash::Compare(const CExportedTypesInfo *p1, CExportedTypesInfo *p2)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (p1->pClass == p2->pClass)
+ return (0);
+ return (1);
+} // unsigned long TypeLibExporter::CExportedTypesHash::Compare()
+
+TypeLibExporter::CExportedTypesHash::ELEMENTSTATUS TypeLibExporter::CExportedTypesHash::Status(CExportedTypesInfo *p)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (p->pClass == reinterpret_cast<MethodTable*>(FREE))
+ return (FREE);
+ if (p->pClass == reinterpret_cast<MethodTable*>(DELETED))
+ return (DELETED);
+ return (USED);
+} // TypeLibExporter::CExportedTypesHash::ELEMENTSTATUS TypeLibExporter::CExportedTypesHash::Status()
+
+void TypeLibExporter::CExportedTypesHash::SetStatus(CExportedTypesInfo *p, ELEMENTSTATUS s)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // If deleting a used entry, free the pointers.
+ if (s == DELETED && Status(p) == USED)
+ {
+ if (p->pCTI) p->pCTI->Release(), p->pCTI=0;
+ if (p->pCTIClassItf) p->pCTIClassItf->Release(), p->pCTIClassItf=0;
+ }
+ p->pClass = reinterpret_cast<MethodTable*>(s);
+} // void TypeLibExporter::CExportedTypesHash::SetStatus()
+
+void *TypeLibExporter::CExportedTypesHash::GetKey(CExportedTypesInfo *p)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return &p->pClass;
+} // void *TypeLibExporter::CExportedTypesHash::GetKey()
+
+void TypeLibExporter::CExportedTypesHash::InitArray()
+{
+ STANDARD_VM_CONTRACT;
+
+ // For iterating the entries.
+ CExportedTypesInfo *pData = 0;
+
+ // Make room for the data.
+ m_iCount = 0;
+ m_Array = new CExportedTypesInfo*[Base::Count()];
+
+ // Fill the array.
+ pData = GetFirst();
+ while (pData)
+ {
+ m_Array[m_iCount++] = pData;
+ pData = GetNext(pData);
+ }
+} // void TypeLibExporter::CExportedTypesHash::InitArray()
+
+void TypeLibExporter::CExportedTypesHash::UpdateArray()
+{
+ STANDARD_VM_CONTRACT;
+
+ // For iterating the entries.
+ CExportedTypesInfo *pData = 0;
+
+ // Clear the old data.
+ if (m_Array)
+ delete[] m_Array;
+
+ // Make room for the data.
+ m_iCount = 0;
+ m_Array = new CExportedTypesInfo*[Base::Count()];
+
+ // Fill the array.
+ pData = GetFirst();
+ while (pData)
+ {
+ m_Array[m_iCount++] = pData;
+ pData = GetNext(pData);
+ }
+} // void TypeLibExporter::CExportedTypesHash::UpdateArray()
+
+void TypeLibExporter::CExportedTypesHash::SortByName()
+{
+ WRAPPER_NO_CONTRACT;
+
+ CSortByName sorter(m_Array, (int)m_iCount);
+ sorter.Sort();
+} // void TypeLibExporter::CExportedTypesHash::SortByName()
+
+void TypeLibExporter::CExportedTypesHash::SortByToken()
+{
+ WRAPPER_NO_CONTRACT;
+
+ CSortByToken sorter(m_Array, (int)m_iCount);
+ sorter.Sort();
+} // void TypeLibExporter::CExportedTypesHash::SortByToken()
+
+int TypeLibExporter::CExportedTypesHash::CSortByToken::Compare(
+ CExportedTypesInfo **p1,
+ CExportedTypesInfo **p2)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ MethodTable *pC1 = (*p1)->pClass;
+ MethodTable *pC2 = (*p2)->pClass;
+ // Compare scopes.
+ if (pC1->GetMDImport() < pC2->GetMDImport())
+ return -1;
+ if (pC1->GetMDImport() > pC2->GetMDImport())
+ return 1;
+ // Same scopes, compare tokens.
+ if (pC1->GetTypeDefRid() < pC2->GetTypeDefRid())
+ return -1;
+ if (pC1->GetTypeDefRid() > pC2->GetTypeDefRid())
+ return 1;
+ // Hmmm. Same class.
+ return 0;
+} // int TypeLibExporter::CExportedTypesHash::CSortByToken::Compare()
+
+int TypeLibExporter::CExportedTypesHash::CSortByName::Compare(
+ CExportedTypesInfo **p1,
+ CExportedTypesInfo **p2)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(p1));
+ PRECONDITION(CheckPointer(p2));
+ PRECONDITION(CheckPointer(*p1));
+ PRECONDITION(CheckPointer(*p2));
+ }
+ CONTRACTL_END;
+
+ int iRslt; // A compare result.
+
+ MethodTable *pC1 = (*p1)->pClass;
+ MethodTable *pC2 = (*p2)->pClass;
+
+ // Ignore scopes. Need to see name collisions across scopes.
+ // Same scopes, compare names.
+ LPCSTR pName1, pNS1;
+ LPCSTR pName2, pNS2;
+ IfFailThrow(pC1->GetMDImport()->GetNameOfTypeDef(pC1->GetCl(), &pName1, &pNS1));
+ IfFailThrow(pC2->GetMDImport()->GetNameOfTypeDef(pC2->GetCl(), &pName2, &pNS2));
+
+ // Compare case-insensitive, because we want different capitalizations to sort together.
+ SString sName1(SString::Utf8, pName1);
+ SString sName2(SString::Utf8, pName2);
+
+ iRslt = sName1.CompareCaseInsensitive(sName2);
+ if (iRslt)
+ return iRslt;
+
+ // If names are spelled the same, ignoring capitalization, sort by namespace.
+ // We will attempt to use namespace for disambiguation.
+ SString sNS1(SString::Utf8, pNS1);
+ SString sNS2(SString::Utf8, pNS2);
+
+ iRslt = sNS1.CompareCaseInsensitive(sNS2);
+ return iRslt;
+} // int TypeLibExporter::CExportedTypesHash::CSortByName::Compare()
+
diff --git a/src/vm/tlbexport.h b/src/vm/tlbexport.h
new file mode 100644
index 0000000000..7cfc19e377
--- /dev/null
+++ b/src/vm/tlbexport.h
@@ -0,0 +1,486 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//===========================================================================
+// File: TlbExport.h
+//
+
+//
+// Notes: Create a TypeLib from COM+ metadata.
+//---------------------------------------------------------------------------
+
+
+#ifndef FEATURE_COMINTEROP
+#error FEATURE_COMINTEROP is required for this file
+#endif // FEATURE_COMINTEROP
+
+class ITypeCreateTypeLib2;
+struct ICreateTypeInfo2;
+struct ITypeInfo;
+struct ITypeLibExporterNotifySink;
+
+class CDescPool;
+struct ComMTMethodProps;
+class ComMTMemberInfoMap;
+
+static LPCSTR szVariantClassFullQual = g_VariantClassName;
+
+//*************************************************************************
+// Helper functions.
+//*************************************************************************
+HRESULT Utf2Quick(
+ LPCUTF8 pStr, // The string to convert.
+ CQuickArray<WCHAR> &rStr, // The QuickArray<WCHAR> to convert it into.
+ int iCurLen = 0); // Inital characters in the array to leave (default 0).
+
+//*****************************************************************************
+// Signature utilities.
+//*****************************************************************************
+class MetaSigExport : public MetaSig
+{
+public:
+ MetaSigExport(MethodDesc *pMD) :
+ MetaSig(pMD)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ BOOL IsVbRefType()
+ {
+ CONTRACT (BOOL)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACT_END;
+
+ // Get the arg, and skip decorations.
+ SigPointer pt = GetArgProps();
+ CorElementType mt;
+ if (FAILED(pt.PeekElemType(&mt)))
+ return FALSE;
+
+ while (mt == ELEMENT_TYPE_BYREF || mt == ELEMENT_TYPE_PTR)
+ {
+ // Eat the one just examined, and peek at the next one.
+ if (FAILED(pt.GetElemType(NULL)) || FAILED(pt.PeekElemType(&mt)))
+ return FALSE;
+ }
+
+ // Is it just Object?
+ if (mt == ELEMENT_TYPE_OBJECT)
+ RETURN TRUE;
+
+ // A particular class?
+ if (mt == ELEMENT_TYPE_CLASS)
+ {
+ // Exclude "string".
+ if (pt.IsStringType(m_pModule, GetSigTypeContext()))
+ RETURN FALSE;
+ RETURN TRUE;
+ }
+
+ // A particular valuetype?
+ if (mt == ELEMENT_TYPE_VALUETYPE)
+ {
+ // Include "variant".
+ if (pt.IsClass(m_pModule, szVariantClassFullQual, GetSigTypeContext()))
+ RETURN TRUE;
+ RETURN FALSE;
+ }
+
+ // An array, a string, or POD.
+ RETURN FALSE;
+ }
+}; // class MetaSigExport : public MetaSig
+
+
+//*************************************************************************
+// Class to convert COM+ metadata to a TypeLib.
+//*************************************************************************
+class TypeLibExporter
+{
+private:
+ class CExportedTypesInfo
+ {
+ public:
+ MethodTable* pClass; // The class being exported.
+ ICreateTypeInfo2* pCTI; // The ICreateTypeInfo2 for the EE class.
+ ICreateTypeInfo2* pCTIClassItf; // The ICreateTypeInfo2 for the IClassX.
+ TYPEKIND tkind; // Typekind of the exported class.
+ bool bAutoProxy; // If true, oleaut32 is the interface's proxy.
+ };
+
+ class CExportedTypesHash : public CClosedHashEx<CExportedTypesInfo, CExportedTypesHash>
+ {
+ protected:
+ friend class CSortByToken;
+ friend class CSortByName;
+
+ class CSortByToken : public CQuickSort<CExportedTypesInfo*>
+ {
+ public:
+ CSortByToken(CExportedTypesInfo **pBase, int iCount) :
+ CQuickSort<CExportedTypesInfo*>(pBase, iCount)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+ virtual int Compare(CExportedTypesInfo **ps1, CExportedTypesInfo **ps2);
+ };
+
+ class CSortByName : public CQuickSort<CExportedTypesInfo*>
+ {
+ public:
+ CSortByName(CExportedTypesInfo **pBase, int iCount) :
+ CQuickSort<CExportedTypesInfo*>(pBase, iCount)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+ virtual int Compare(CExportedTypesInfo **ps1, CExportedTypesInfo **ps2);
+ };
+
+ public:
+ typedef CClosedHashEx<CExportedTypesInfo, CExportedTypesHash> Base;
+ typedef CExportedTypesInfo T;
+
+ CExportedTypesHash() :
+ Base(1009),
+ m_iCount(0),
+ m_Array(NULL)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+ ~CExportedTypesHash()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ Clear();
+ delete[] m_Array;
+ }
+
+ virtual void Clear();
+
+ unsigned int Hash(const T *pData);
+ unsigned int Compare(const T *p1, T *p2);
+ ELEMENTSTATUS Status(T *p);
+ void SetStatus(T *p, ELEMENTSTATUS s);
+ void* GetKey(T *p);
+
+ //@todo: move to CClosedHashEx
+ T* GetFirst()
+ {
+ WRAPPER_NO_CONTRACT;
+ return (T*)CClosedHashBase::GetFirst();
+ }
+ T* GetNext(T*prev)
+ {
+ WRAPPER_NO_CONTRACT;
+ return (T*)CClosedHashBase::GetNext((BYTE*)prev);
+ }
+
+ void InitArray();
+ void UpdateArray();
+
+ T* operator[](ULONG ix)
+ {
+ CONTRACT (T*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(ix < m_iCount);
+ POSTCONDITION(CheckPointer(RETVAL));
+ }
+ CONTRACT_END;
+
+ RETURN m_Array[ix];
+ }
+ int Count()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_iCount;
+ }
+
+ void SortByName();
+ void SortByToken();
+
+ protected:
+ CExportedTypesInfo** m_Array;
+ ULONG m_iCount;
+ };
+
+protected:
+ struct CErrorContext;
+
+ class CHrefOfTIHashKey
+ {
+ public:
+ ITypeInfo* pITI;
+ HREFTYPE href;
+ };
+
+ class CHrefOfTIHash : public CClosedHash<class CHrefOfTIHashKey>
+ {
+ public:
+ typedef CHrefOfTIHashKey T;
+
+ CHrefOfTIHash() :
+ CClosedHash<class CHrefOfTIHashKey>(101)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+ ~CHrefOfTIHash()
+ {
+ CONTRACTL { NOTHROW; SO_TOLERANT; } CONTRACTL_END;
+ Clear();
+ }
+
+ virtual void Clear();
+
+ unsigned int Hash(const T *pData);
+ unsigned int Hash(const void *pData)
+ {
+ WRAPPER_NO_CONTRACT;
+ return Hash((const T*)pData);
+ }
+
+ unsigned int Compare(const T *p1, T *p2);
+ unsigned int Compare(const void *p1, BYTE *p2)
+ {
+ WRAPPER_NO_CONTRACT;
+ return Compare((const T*)p1, (T*)p2);
+ }
+
+ ELEMENTSTATUS Status(T *p);
+ ELEMENTSTATUS Status(BYTE *p)
+ {
+ WRAPPER_NO_CONTRACT;
+ return Status((T*)p);
+ }
+
+ void SetStatus(T *p, ELEMENTSTATUS s);
+ void SetStatus(BYTE *p, ELEMENTSTATUS s)
+ {
+ WRAPPER_NO_CONTRACT;
+ SetStatus((T*)p, s);
+ }
+
+ void *GetKey(T *p);
+ void* GetKey(BYTE *p)
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetKey((T*)p);
+ }
+ };
+
+ class CHrefOfClassHashKey
+ {
+ public:
+ MethodTable* pClass;
+ HREFTYPE href;
+ };
+
+ class CHrefOfClassHash : public CClosedHash<class CHrefOfClassHashKey>
+ {
+ public:
+ typedef CHrefOfClassHashKey T;
+
+ CHrefOfClassHash() :
+ CClosedHash<class CHrefOfClassHashKey>(101)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+ ~CHrefOfClassHash()
+ {
+ WRAPPER_NO_CONTRACT;
+ Clear();
+ }
+
+ virtual void Clear();
+
+ unsigned int Hash(const T *pData);
+ unsigned int Hash(const void *pData)
+ {
+ WRAPPER_NO_CONTRACT;
+ return Hash((const T*)pData);
+ }
+
+
+ unsigned int Compare(const T *p1, T *p2);
+ unsigned int Compare(const void *p1, BYTE *p2)
+ {
+ WRAPPER_NO_CONTRACT;
+ return Compare((const T*)p1, (T*)p2);
+ }
+
+
+ ELEMENTSTATUS Status(T *p);
+ ELEMENTSTATUS Status(BYTE *p)
+ {
+ WRAPPER_NO_CONTRACT;
+ return Status((T*)p);
+ }
+
+
+ void SetStatus(T *p, ELEMENTSTATUS s);
+ void SetStatus(BYTE *p, ELEMENTSTATUS s)
+ {
+ WRAPPER_NO_CONTRACT;
+ SetStatus((T*)p, s);
+ }
+
+
+ void *GetKey(T *p);
+ void* GetKey(BYTE *p)
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetKey((T*)p);
+ }
+ };
+
+ struct CErrorContext
+ {
+ CErrorContext() :
+ m_prev(0),
+ m_szAssembly(0),
+ m_tkType(mdTypeDefNil),
+ m_pScope(0),
+ m_szMember(0),
+ m_szParam(0),
+ m_ixParam(-1)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ // The following variables hold context info for error reporting.
+ CErrorContext* m_prev; // A previous context.
+ LPCUTF8 m_szAssembly; // Current assembly name.
+ mdToken m_tkType; // Current type's metadata token.
+ IMDInternalImport *m_pScope; // Current type's scope.
+ LPCUTF8 m_szMember; // Current member's name.
+ LPCUTF8 m_szParam; // Current param's name.
+ int m_ixParam; // Current param index.
+ };
+
+public:
+ TypeLibExporter();
+ ~TypeLibExporter();
+
+ void Convert(Assembly *pAssembly, LPCWSTR szTlbName, ITypeLibExporterNotifySink *pNotify=0, int flags=0);
+ void LayOut();
+ HRESULT GetTypeLib(REFGUID iid, IUnknown **ppTlb);
+ void ReleaseResources();
+
+protected:
+ void PreLoadNames();
+
+ void UpdateBitness(Assembly* pAssembly);
+ HRESULT CheckBitness(Assembly* pAssembly);
+
+ // TypeLib emit functions.
+ HRESULT TokenToHref(ICreateTypeInfo2 *pTI, MethodTable *pClass, mdToken tk, BOOL bWarnOnUsingIUnknown, HREFTYPE *pHref);
+ void GetWellKnownInterface(MethodTable *pClass, ITypeInfo **ppTI);
+ HRESULT EEClassToHref(ICreateTypeInfo2 *pTI, MethodTable *pClass, BOOL bWarnOnUsingIUnknown, HREFTYPE *pHref);
+ void StdOleTypeToHRef(ICreateTypeInfo2 *pCTI, REFGUID rGuid, HREFTYPE *pHref);
+ void ExportReferencedAssembly(Assembly *pAssembly);
+
+ // Metadata import functions.
+ void AddModuleTypes(Module *pModule);
+ void AddAssemblyTypes(Assembly *pAssembly);
+
+ void ConvertAllTypeDefs();
+ HRESULT ConvertOneTypeDef(MethodTable *pClass);
+
+ HRESULT GetTypeLibImportClassName(MethodTable *pClass, SString& pszName);
+
+ void CreateITypeInfo(CExportedTypesInfo *pData, bool bNamespace=false, bool bResolveDup=true);
+ void CreateIClassXITypeInfo(CExportedTypesInfo *pData, bool bNamespace=false, bool bResolveDup=true);
+ void ConvertImplTypes(CExportedTypesInfo *pData);
+ void ConvertDetails(CExportedTypesInfo *pData);
+
+ void ConvertInterfaceImplTypes(ICreateTypeInfo2 *pICTI, MethodTable *pClass);
+ void ConvertInterfaceDetails(ICreateTypeInfo2 *pICTI, MethodTable *pClass, int bAutoProxy);
+ void ConvertRecord(CExportedTypesInfo *pData);
+ void ConvertRecordBaseClass(CExportedTypesInfo *pData, MethodTable *pSubClass, ULONG &ixVar);
+ void ConvertEnum(ICreateTypeInfo2 *pICTI, MethodTable *pClass);
+ void ConvertClassImplTypes(ICreateTypeInfo2 *pICTI, ICreateTypeInfo2 *pIDefault, MethodTable *pClass);
+ void ConvertClassDetails(ICreateTypeInfo2 *pICTI, ICreateTypeInfo2 *pIDefault, MethodTable *pClass, int bAutoProxy);
+
+ BOOL HasDefaultCtor(MethodTable *pMT);
+
+ void ConvertIClassX(ICreateTypeInfo2 *pICTI, MethodTable *pClass, int bAutoProxy);
+ BOOL ConvertMethod(ICreateTypeInfo2 *pTI, ComMTMethodProps *pProps, ULONG iMD, ULONG ulIface);
+ BOOL ConvertFieldAsMethod(ICreateTypeInfo2 *pTI, ComMTMethodProps *pProps, ULONG iMD);
+ BOOL ConvertVariable(ICreateTypeInfo2 *pTI, MethodTable *pClass, mdFieldDef md, SString& sName, ULONG iMD);
+ BOOL ConvertEnumMember(ICreateTypeInfo2 *pTI, MethodTable *pClass, mdFieldDef md, SString& sName, ULONG iMD);
+
+ // Error/status functions.
+ void InternalThrowHRWithContext(HRESULT hr, ...);
+ void FormatErrorContextString(CErrorContext *pContext, SString *pOut);
+ void FormatErrorContextString(SString *pOut);
+ void ReportError(HRESULT hr);
+ void ReportEvent(int ev, int hr, ...);
+ void ReportWarning(HRESULT hrReturn, HRESULT hrRpt, ...);
+ void PostClassLoadError(LPCUTF8 pszName, SString& message);
+
+ // Utility functions.
+ void ClassHasIClassX(MethodTable *pClass, CorClassIfaceAttr *pRslt);
+ MethodTable * LoadClass(Module *pModule, mdToken tk);
+ TypeHandle LoadClass(Module *pModule, LPCUTF8 pszName);
+ HRESULT CorSigToTypeDesc(ICreateTypeInfo2 *pTI, MethodTable *pClass, PCCOR_SIGNATURE pbSig, PCCOR_SIGNATURE pbNativeSig, ULONG cbNativeSig,
+ ULONG *cbElem, TYPEDESC *ptdesc, CDescPool *ppool, BOOL bMethodSig, BOOL *pbByRef=0);
+ BOOL IsVbRefType(PCCOR_SIGNATURE pbSig, IMDInternalImport *pInternalImport);
+
+ BOOL IsExportingAs64Bit();
+
+ void ArrayToTypeDesc(ICreateTypeInfo2 *pCTI, CDescPool *ppool, ArrayMarshalInfo *pArrayMarshalInfo, TYPEDESC *pElementTypeDesc);
+
+ VARTYPE GetVtForIntPtr();
+ VARTYPE GetVtForUIntPtr();
+
+ //BOOL ValidateSafeArrayElemVT(VARTYPE vt);
+
+ BOOL GetDescriptionString(MethodTable *pClass, mdToken tk, BSTR &bstrDescr);
+ BOOL GetStringCustomAttribute(IMDInternalImport *pImport, LPCSTR szName, mdToken tk, BSTR &bstrDescr);
+
+ BOOL GetAutomationProxyAttribute(IMDInternalImport *pImport, mdToken tk, int *bValue);
+
+ TYPEKIND TKindFromClass(MethodTable *pClass);
+
+protected:
+ void GetRefTypeInfo(ICreateTypeInfo2 *pContainer, ITypeInfo *pReferenced, HREFTYPE *pHref);
+
+ CHrefOfTIHash m_HrefHash; // Hashed table of HREFTYPEs of ITypeInfos
+ CHrefOfClassHash m_HrefOfClassHash; // Hashed table of HREFTYPEs of ITypeInfos
+ CErrorContext m_ErrorContext;
+
+private:
+ ClassLoader* m_pLoader; // Domain where the Module being converted was loaded
+ ITypeInfo* m_pIUnknown; // TypeInfo for IUnknown.
+ HREFTYPE m_hIUnknown; // href for IUnknown.
+ ITypeInfo* m_pIDispatch; // TypeInfo for IDispatch.
+ ITypeInfo* m_pGuid; // TypeInfo for GUID.
+
+ ITypeLibExporterNotifySink* m_pNotify; // Notification callback.
+
+ ICreateTypeLib2* m_pICreateTLB; // The created typelib.
+
+ int m_flags; // Conversion flags.
+ int m_bAutomationProxy; // Should interfaces be marked such that oleaut32 is the proxy?
+ int m_bWarnedOfNonPublic;
+
+ CExportedTypesHash m_Exports;
+ CExportedTypesHash m_InjectedExports;
+};
+
+
+// eof ------------------------------------------------------------------------
diff --git a/src/vm/typectxt.cpp b/src/vm/typectxt.cpp
new file mode 100644
index 0000000000..3cebea05b6
--- /dev/null
+++ b/src/vm/typectxt.cpp
@@ -0,0 +1,336 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// typectxt.cpp
+//
+
+//
+// Simple struct to record the data necessary to interpret ELEMENT_TYPE_VAR
+// and ELEMENT_TYPE_MVAR within pieces of metadata, in particular within
+// signatures parsed by MetaSig and SigPointer.
+//
+
+
+
+#include "common.h"
+#include "method.hpp"
+#include "typehandle.h"
+#include "field.h"
+
+
+
+void SigTypeContext::InitTypeContext(MethodDesc *md, Instantiation exactClassInst, Instantiation exactMethodInst, SigTypeContext *pRes)
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ MethodTable *pMT = md->GetMethodTable();
+
+ if (pMT->IsArray())
+ {
+ pRes->m_classInst = exactClassInst.IsEmpty() ? pMT->GetClassOrArrayInstantiation() : exactClassInst;
+ }
+ else
+ {
+ pRes->m_classInst = exactClassInst;
+ }
+ pRes->m_methodInst = exactMethodInst;
+}
+
+void SigTypeContext::InitTypeContext(MethodDesc *md, SigTypeContext *pRes)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+
+ PRECONDITION(CheckPointer(md));
+ } CONTRACTL_END;
+
+ MethodTable *pMT = md->GetMethodTable();
+ if (pMT->IsArray())
+ {
+ pRes->m_classInst = pMT->GetClassOrArrayInstantiation();
+ }
+ else
+ {
+ pRes->m_classInst = pMT->GetInstantiation();
+ }
+ pRes->m_methodInst = md->GetMethodInstantiation();
+}
+
+void SigTypeContext::InitTypeContext(MethodDesc *md, TypeHandle declaringType, SigTypeContext *pRes)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+
+ PRECONDITION(CheckPointer(md));
+ } CONTRACTL_END;
+
+ if (declaringType.IsNull())
+ {
+ SigTypeContext::InitTypeContext(md, pRes);
+ }
+ else
+ {
+ MethodTable *pMDMT = md->GetMethodTable();
+ if (pMDMT->IsArray())
+ {
+ pRes->m_classInst = declaringType.GetClassOrArrayInstantiation();
+ }
+ else
+ {
+ pRes->m_classInst = declaringType.GetInstantiationOfParentClass(pMDMT);
+ }
+ pRes->m_methodInst = md->GetMethodInstantiation();
+ }
+}
+
+#ifndef DACCESS_COMPILE
+TypeHandle GetDeclaringMethodTableFromTypeVarTypeDesc(TypeVarTypeDesc *pTypeVar, MethodDesc *pMD)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // This currently should only happen in cases where we've already loaded the constraints.
+ // Currently, the only known case where use this code is reflection over methods exposed on a TypeVariable.
+ _ASSERTE(pTypeVar->ConstraintsLoaded());
+
+ if (pTypeVar->ConstraintsLoaded())
+ {
+ DWORD cConstraints;
+ TypeHandle *pTypeHandles = pTypeVar->GetCachedConstraints(&cConstraints);
+ for (DWORD iConstraint = 0; iConstraint < cConstraints; iConstraint++)
+ {
+ if (pTypeHandles[iConstraint].IsGenericVariable())
+ {
+ TypeHandle th = GetDeclaringMethodTableFromTypeVarTypeDesc(pTypeHandles[iConstraint].AsGenericVariable(), pMD);
+ if (!th.IsNull())
+ return th;
+ }
+ else
+ {
+ MethodTable *pMT = pTypeHandles[iConstraint].GetMethodTable();
+ while (pMT != NULL)
+ {
+ if (pMT == pMD->GetMethodTable())
+ {
+ return TypeHandle(pMT);
+ }
+
+ pMT = pMT->GetParentMethodTable();
+ }
+ }
+ }
+ }
+ return TypeHandle();
+}
+
+void SigTypeContext::InitTypeContext(MethodDesc *md, TypeHandle declaringType, Instantiation exactMethodInst, SigTypeContext *pRes)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+
+ PRECONDITION(CheckPointer(md));
+ } CONTRACTL_END;
+
+ if (declaringType.IsNull())
+ {
+ SigTypeContext::InitTypeContext(md, pRes);
+ }
+ else
+ {
+ // <TODO> factor this with the work above </TODO>
+ if (declaringType.IsGenericVariable())
+ {
+ declaringType = GetDeclaringMethodTableFromTypeVarTypeDesc(declaringType.AsGenericVariable(), md);
+ }
+
+ if (declaringType.IsNull())
+ {
+ SigTypeContext::InitTypeContext(md, pRes);
+ }
+ else
+ {
+ MethodTable *pMDMT = md->GetMethodTable();
+ if (pMDMT->IsArray())
+ {
+ pRes->m_classInst = declaringType.GetClassOrArrayInstantiation();
+ }
+ else
+ {
+ pRes->m_classInst = declaringType.GetInstantiationOfParentClass(pMDMT);
+ }
+ }
+ }
+ pRes->m_methodInst = !exactMethodInst.IsEmpty() ? exactMethodInst : md->GetMethodInstantiation();
+}
+#endif // !DACCESS_COMPILE
+
+void SigTypeContext::InitTypeContext(FieldDesc *pFD, TypeHandle declaringType, SigTypeContext *pRes)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+
+ PRECONDITION(CheckPointer(declaringType, NULL_OK));
+ PRECONDITION(CheckPointer(pFD));
+ } CONTRACTL_END;
+ LIMITED_METHOD_CONTRACT;
+ InitTypeContext(pFD->GetExactClassInstantiation(declaringType),Instantiation(), pRes);
+}
+
+
+void SigTypeContext::InitTypeContext(TypeHandle th, SigTypeContext *pRes)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ if (th.IsNull())
+ {
+ InitTypeContext(pRes);
+ }
+ else if (th.GetMethodTable()->IsArray())
+ {
+ InitTypeContext(th.GetMethodTable()->GetClassOrArrayInstantiation(), Instantiation(), pRes);
+ }
+ else
+ {
+ InitTypeContext(th.GetInstantiation(), Instantiation(), pRes);
+ }
+}
+
+
+const SigTypeContext * SigTypeContext::GetOptionalTypeContext(MethodDesc *md, TypeHandle declaringType, SigTypeContext *pRes)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(md);
+ if (md->HasClassOrMethodInstantiation() || md->GetMethodTable()->IsArray())
+ {
+ SigTypeContext::InitTypeContext(md, declaringType,pRes);
+ return pRes;
+ }
+ else
+ {
+ _ASSERTE(pRes->m_classInst.IsEmpty());
+ _ASSERTE(pRes->m_methodInst.IsEmpty());
+ return NULL;
+ }
+}
+
+const SigTypeContext * SigTypeContext::GetOptionalTypeContext(TypeHandle th, SigTypeContext *pRes)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE (!th.IsNull());
+ if (th.HasInstantiation() || th.GetMethodTable()->IsArray())
+ {
+ SigTypeContext::InitTypeContext(th,pRes);
+ return pRes;
+ }
+ else
+ {
+ // It should already have been null-initialized when allocated on the stack.
+ _ASSERTE(pRes->m_classInst.IsEmpty());
+ _ASSERTE(pRes->m_methodInst.IsEmpty());
+ return NULL;
+ }
+}
+
+BOOL SigTypeContext::IsValidTypeOnlyInstantiationOf(const SigTypeContext *pCtxTypicalMethodInstantiation, const SigTypeContext *pCtxTypeOnlyInstantiation)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Compare class inst counts
+ if (pCtxTypicalMethodInstantiation->m_classInst.GetNumArgs() != pCtxTypeOnlyInstantiation->m_classInst.GetNumArgs())
+ return FALSE;
+
+ // Compare method inst counts
+ if (pCtxTypicalMethodInstantiation->m_methodInst.GetNumArgs() != pCtxTypeOnlyInstantiation->m_methodInst.GetNumArgs())
+ return FALSE;
+
+ DWORD i;
+
+ // Ensure that no type variables are part of the instantiation of the generic type
+ for (i = 0; i < pCtxTypicalMethodInstantiation->m_classInst.GetNumArgs(); i++) {
+ if (pCtxTypeOnlyInstantiation->m_classInst[i].IsGenericVariable())
+ return FALSE;
+ }
+
+ // Compare method inst values to ensure they represent the same generic method parameters
+ for (i = 0; i < pCtxTypicalMethodInstantiation->m_methodInst.GetNumArgs(); i++) {
+ _ASSERTE(pCtxTypicalMethodInstantiation->m_methodInst[i].IsGenericVariable());
+
+ if (pCtxTypicalMethodInstantiation->m_methodInst[i] != pCtxTypeOnlyInstantiation->m_methodInst[i])
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+BOOL SigTypeContext::Equal(const SigTypeContext *pCtx1, const SigTypeContext *pCtx2)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Compare class inst counts
+ if (pCtx1->m_classInst.GetNumArgs() != pCtx2->m_classInst.GetNumArgs())
+ return FALSE;
+
+ // Compare method inst counts
+ if (pCtx1->m_methodInst.GetNumArgs() != pCtx2->m_methodInst.GetNumArgs())
+ return FALSE;
+
+ DWORD i;
+
+ // Compare class inst values
+ for (i = 0; i < pCtx1->m_classInst.GetNumArgs(); i++) {
+ if (pCtx1->m_classInst[i] != pCtx2->m_classInst[i])
+ return FALSE;
+ }
+
+ // Compare method inst values
+ for (i = 0; i < pCtx1->m_methodInst.GetNumArgs(); i++) {
+ if (pCtx1->m_methodInst[i] != pCtx2->m_methodInst[i])
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
diff --git a/src/vm/typectxt.h b/src/vm/typectxt.h
new file mode 100644
index 0000000000..eef652aca7
--- /dev/null
+++ b/src/vm/typectxt.h
@@ -0,0 +1,192 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// typectxt.h
+//
+
+//
+
+#ifndef _H_TYPECTXT
+#define _H_TYPECTXT
+
+//------------------------------------------------------------------------
+// A signature type context gives the information necessary to interpret
+// the ELEMENT_TYPE_VAR and ELEMENT_TYPE_MVAR elements of a regular
+// metadata signature. These are usually stack allocated at appropriate
+// points where the SigPointer objects are created, or are allocated
+// inside a MetaSig (which are themselves normally stack allocated)
+//
+// They are normally passed as "const SigTypeContext *".
+//------------------------------------------------------------------------
+
+class SigTypeContext
+{
+public:
+ // Store pointers first and DWORDs second to ensure good packing on 64-bit
+ Instantiation m_classInst;
+ Instantiation m_methodInst;
+
+ // Default constructor for non-generic code
+ inline SigTypeContext()
+ { WRAPPER_NO_CONTRACT; InitTypeContext(this); }
+
+
+ // Initialize a type context given instantiations.
+ inline SigTypeContext(Instantiation classInst, Instantiation methodInst)
+ { WRAPPER_NO_CONTRACT; InitTypeContext(classInst, methodInst, this); }
+
+
+ // Initialize a type context from a MethodDesc. If this is a MethodDesc that gets
+ // shared between generic instantiations (e.g. one being jitted by a code-sharing JIT)
+ // and a null declaring Type is passed then the type context will
+ // be a representative context, not an exact one.
+ // This is sufficient for most purposes, e.g. GC and field layout, because
+ // these operations are "parametric", i.e. behave the same for all shared types.
+ //
+ // If declaringType is non-null, then the MethodDesc is assumed to be
+ // shared between generic classes, and the type handle is used to give the
+ // exact type context. The method should be one of the methods supported by the
+ // given type handle.
+ //
+ // If the method is a method in an array type then the type context will
+ // contain one item in the class instantiation corresponding to the
+ // element type of the array.
+ //
+ // Finally, exactMethodInst should be specified if md might represent a generic method definition,
+ // as type parameters are not always available off the method desc for generic method definitions without
+ // forcing a load. Typically the caller will use MethodDesc::LoadMethodInstantiation.
+ inline SigTypeContext(MethodDesc *md)
+ { WRAPPER_NO_CONTRACT; InitTypeContext(md,this); }
+
+ inline SigTypeContext(MethodDesc *md, TypeHandle declaringType)
+ { WRAPPER_NO_CONTRACT; InitTypeContext(md,declaringType,this); }
+
+ inline SigTypeContext(MethodDesc *md, TypeHandle declaringType, Instantiation exactMethodInst)
+ { WRAPPER_NO_CONTRACT; InitTypeContext(md,declaringType,exactMethodInst,this); }
+
+ // This is similar to the one above except that exact
+ // instantiations are provided explicitly.
+ // This will only normally be used when the code is shared
+ // between generic instantiations and after fetching the
+ // exact instantiations from the stack.
+ //
+ inline SigTypeContext(MethodDesc *md, Instantiation exactClassInst, Instantiation exactMethodInst)
+ { WRAPPER_NO_CONTRACT; InitTypeContext(md,exactClassInst,exactMethodInst,this); }
+
+ // Initialize a type context from a type handle. This is used when
+ // generating the type context for a
+ // any of the metadata in the class covered by the type handle apart from
+ // the metadata for any generic methods in the class.
+ // If the type handle satisfies th.IsNull() then the created type context
+ // will be empty.
+ inline SigTypeContext(TypeHandle th)
+ { WRAPPER_NO_CONTRACT; InitTypeContext(th,this); }
+
+ inline SigTypeContext(FieldDesc *pFD, TypeHandle declaringType = TypeHandle())
+ { WRAPPER_NO_CONTRACT; InitTypeContext(pFD,declaringType,this); }
+
+ // Copy contructor - try not to use this. The C++ compiler is not doing a good job
+ // of copy-constructor based code, and we've had perf regressions when using this too
+ // much for this simple objects. Use an explicit call to InitTypeContext instead,
+ // or use GetOptionalTypeContext.
+ inline SigTypeContext(const SigTypeContext &c)
+ { WRAPPER_NO_CONTRACT; InitTypeContext(&c,this); }
+
+ // Copy contructor from a possibly-NULL pointer.
+ inline SigTypeContext(const SigTypeContext *c)
+ { WRAPPER_NO_CONTRACT; InitTypeContext(c,this); }
+
+ static void InitTypeContext(MethodDesc *md, SigTypeContext *pRes);
+ static void InitTypeContext(MethodDesc *md, TypeHandle declaringType, SigTypeContext *pRes);
+ static void InitTypeContext(MethodDesc *md, TypeHandle declaringType, Instantiation exactMethodInst, SigTypeContext *pRes);
+ static void InitTypeContext(MethodDesc *md, Instantiation exactClassInst, Instantiation exactMethodInst, SigTypeContext *pRes);
+ static void InitTypeContext(TypeHandle th, SigTypeContext *pRes);
+ static void InitTypeContext(FieldDesc *pFD, TypeHandle declaringType, SigTypeContext *pRes);
+ inline static void InitTypeContext(Instantiation classInst, Instantiation methodInst, SigTypeContext *pRes);
+ inline static void InitTypeContext(SigTypeContext *);
+ inline static void InitTypeContext(const SigTypeContext *c, SigTypeContext *pRes);
+
+ // These are allowed to return NULL if an empty type context is generated. The NULL value
+ // can then be passed around to represent the empty type context.
+ // pRes must be non-null.
+ // pRes must initially be zero-initialized, e.g. by the default SigTypeContext constructor.
+ static const SigTypeContext * GetOptionalTypeContext(MethodDesc *md, TypeHandle declaringType, SigTypeContext *pRes);
+ static const SigTypeContext * GetOptionalTypeContext(TypeHandle th, SigTypeContext *pRes);
+
+ // SigTypeContexts are used as part of keys for various data structures indiexed by instantiation
+ static BOOL Equal(const SigTypeContext *pCtx1, const SigTypeContext *pCtx2);
+ static BOOL IsValidTypeOnlyInstantiationOf(const SigTypeContext *pCtxTypicalMethodInstantiation, const SigTypeContext *pCtxTypeOnlyInstantiation);
+ BOOL IsEmpty() const { LIMITED_METHOD_CONTRACT; return m_classInst.IsEmpty() && m_methodInst.IsEmpty(); }
+
+};
+
+inline void SigTypeContext::InitTypeContext(SigTypeContext *pRes)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+}
+
+inline void SigTypeContext::InitTypeContext(Instantiation classInst,
+ Instantiation methodInst,
+ SigTypeContext *pRes)
+{
+ LIMITED_METHOD_CONTRACT;
+ pRes->m_classInst = classInst;
+ pRes->m_methodInst = methodInst;
+}
+
+
+// Copy contructor from a possibly-NULL pointer.
+inline void SigTypeContext::InitTypeContext(const SigTypeContext *c,SigTypeContext *pRes)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (c)
+ {
+ pRes->m_classInst = c->m_classInst;
+ pRes->m_methodInst = c->m_methodInst;
+ }
+ else
+ {
+ pRes->m_classInst = Instantiation();
+ pRes->m_methodInst = Instantiation();
+ }
+}
+
+//------------------------------------------------------------------------
+// Encapsulates pointers to code:SigTypeContext and code:Substitution chain
+// that have been used to instantiate a generic type. The context is passed
+// as "const InstantiationContext *" from code:SigPointer.GetTypeHandleThrowing
+// down to code:TypeVarTypeDesc.SatisfiesConstraints where it is needed for
+// instantiating constraints attached to type arguments.
+//
+// The reason why we need to pass these pointers down to the code that verifies
+// that constraints are satisified is the case when another type variable is
+// substituted for a type variable and this argument is constrained by a generic
+// type. In order to verify that constraints of the argument satisfy constraints
+// of the parameter, the argument constraints must be instantiated in the same
+// "instantiation context" as the original signature - and unfortunately this
+// context cannot be extracted from the rest of the information that we have
+// about the type that is being loaded.
+//
+// See code:TypeVarTypeDesc.SatisfiesConstraints for more details and an
+// example scenario in which we are unable to verify constraints without this
+// context.
+//------------------------------------------------------------------------
+
+class InstantiationContext
+{
+public:
+ const SigTypeContext *m_pArgContext;
+ const Substitution *m_pSubstChain;
+
+ inline InstantiationContext(const SigTypeContext *pArgContext = NULL, const Substitution *pSubstChain = NULL)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ m_pArgContext = pArgContext;
+ m_pSubstChain = pSubstChain;
+ }
+};
+
+#endif
diff --git a/src/vm/typedesc.cpp b/src/vm/typedesc.cpp
new file mode 100644
index 0000000000..550332778f
--- /dev/null
+++ b/src/vm/typedesc.cpp
@@ -0,0 +1,2459 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: typedesc.cpp
+//
+
+
+//
+// This file contains definitions for methods in the code:TypeDesc class and its
+// subclasses
+// code:ParamTypeDesc,
+// code:ArrayTypeDesc,
+// code:TyVarTypeDesc,
+// code:FnPtrTypeDesc
+//
+
+//
+// ============================================================================
+
+#include "common.h"
+#include "typedesc.h"
+#include "typestring.h"
+#if defined(FEATURE_PREJIT)
+#include "compile.h"
+#endif
+#include "array.h"
+#include "stackprobe.h"
+
+
+#ifndef DACCESS_COMPILE
+#ifdef _DEBUG
+
+BOOL ParamTypeDesc::Verify() {
+
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_CANNOT_TAKE_LOCK;
+ STATIC_CONTRACT_DEBUG_ONLY;
+ STATIC_CONTRACT_SUPPORTS_DAC;
+
+ _ASSERTE(m_TemplateMT.IsNull() || m_TemplateMT.GetValue()->SanityCheck());
+ _ASSERTE(!GetTypeParam().IsNull());
+ BAD_FORMAT_NOTHROW_ASSERT(GetTypeParam().IsTypeDesc() || !GetTypeParam().AsMethodTable()->IsArray());
+ BAD_FORMAT_NOTHROW_ASSERT(CorTypeInfo::IsModifier_NoThrow(GetInternalCorElementType()) ||
+ GetInternalCorElementType() == ELEMENT_TYPE_VALUETYPE);
+ GetTypeParam().Verify();
+ return(true);
+}
+
+BOOL ArrayTypeDesc::Verify() {
+
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_CANNOT_TAKE_LOCK;
+ STATIC_CONTRACT_DEBUG_ONLY;
+ STATIC_CONTRACT_SUPPORTS_DAC;
+
+ // m_TemplateMT == 0 may be null when building types involving TypeVarTypeDesc's
+ BAD_FORMAT_NOTHROW_ASSERT(m_TemplateMT.IsNull() || m_TemplateMT.GetValue()->IsArray());
+ BAD_FORMAT_NOTHROW_ASSERT(CorTypeInfo::IsArray_NoThrow(GetInternalCorElementType()));
+ ParamTypeDesc::Verify();
+ return(true);
+}
+
+#endif
+
+#endif // #ifndef DACCESS_COMPILE
+
+TypeHandle TypeDesc::GetBaseTypeParam()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE(HasTypeParam());
+
+ TypeHandle th = dac_cast<PTR_ParamTypeDesc>(this)->GetTypeParam();
+ while (th.HasTypeParam())
+ {
+ th = dac_cast<PTR_ParamTypeDesc>(th.AsTypeDesc())->GetTypeParam();
+ }
+ _ASSERTE(!th.IsNull());
+
+ return th;
+}
+
+PTR_Module TypeDesc::GetLoaderModule()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ SUPPORTS_DAC;
+
+ if (HasTypeParam())
+ {
+ return GetBaseTypeParam().GetLoaderModule();
+ }
+ else if (IsGenericVariable())
+ {
+ return dac_cast<PTR_TypeVarTypeDesc>(this)->GetModule();
+ }
+ else
+ {
+ PTR_Module retVal = NULL;
+ BOOL fFail = FALSE;
+
+ _ASSERTE(GetInternalCorElementType() == ELEMENT_TYPE_FNPTR);
+ PTR_FnPtrTypeDesc asFnPtr = dac_cast<PTR_FnPtrTypeDesc>(this);
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), fFail = TRUE );
+ if (!fFail)
+ {
+ retVal = ClassLoader::ComputeLoaderModuleForFunctionPointer(asFnPtr->GetRetAndArgTypesPointer(), asFnPtr->GetNumArgs()+1);
+ }
+ END_SO_INTOLERANT_CODE;
+ return retVal;
+ }
+}
+
+
+PTR_Module TypeDesc::GetZapModule()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return ExecutionManager::FindZapModule(dac_cast<TADDR>(this));
+}
+
+PTR_BaseDomain TypeDesc::GetDomain()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ Module *pZapModule = GetZapModule();
+ if (pZapModule != NULL)
+ {
+ return pZapModule->GetDomain();
+ }
+
+ if (HasTypeParam())
+ {
+ return GetBaseTypeParam().GetDomain();
+ }
+ if (IsGenericVariable())
+ {
+ PTR_TypeVarTypeDesc asVar = dac_cast<PTR_TypeVarTypeDesc>(this);
+ return asVar->GetModule()->GetDomain();
+ }
+ _ASSERTE(GetInternalCorElementType() == ELEMENT_TYPE_FNPTR);
+ PTR_FnPtrTypeDesc asFnPtr = dac_cast<PTR_FnPtrTypeDesc>(this);
+ return BaseDomain::ComputeBaseDomain(asFnPtr->GetRetAndArgTypesPointer()[0].GetDomain(),
+ Instantiation(asFnPtr->GetRetAndArgTypesPointer(), asFnPtr->GetNumArgs()+1));
+}
+
+PTR_Module TypeDesc::GetModule() {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ // Function pointer types belong to no module
+ //PRECONDITION(GetInternalCorElementType() != ELEMENT_TYPE_FNPTR);
+ }
+ CONTRACTL_END
+
+ // Note here we are making the assumption that a typeDesc lives in
+ // the classloader of its element type.
+
+ if (HasTypeParam())
+ {
+ return GetBaseTypeParam().GetModule();
+ }
+
+ if (IsGenericVariable())
+ {
+ PTR_TypeVarTypeDesc asVar = dac_cast<PTR_TypeVarTypeDesc>(this);
+ return asVar->GetModule();
+ }
+
+ _ASSERTE(GetInternalCorElementType() == ELEMENT_TYPE_FNPTR);
+
+ return GetLoaderModule();
+}
+
+BOOL TypeDesc::IsDomainNeutral()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ return GetDomain()->IsSharedDomain();
+}
+
+BOOL ParamTypeDesc::OwnsTemplateMethodTable()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ CorElementType kind = GetInternalCorElementType();
+
+ // The m_TemplateMT for pointer types is UIntPtr
+ if (!CorTypeInfo::IsArray_NoThrow(kind))
+ {
+ return FALSE;
+ }
+
+ CorElementType elemType = m_Arg.GetSignatureCorElementType();
+
+ // This check matches precisely one in Module::CreateArrayMethodTable
+ //
+ // They indicate if an array TypeDesc is non-canonical (in much the same a a generic
+ // method table being non-canonical), i.e. it is not the primary
+ // owner of the m_TemplateMT (the primary owner is the TypeDesc for object[])
+
+ if (CorTypeInfo::IsGenericVariable_NoThrow(elemType))
+ {
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+Assembly* TypeDesc::GetAssembly() {
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ Module *pModule = GetModule();
+ PREFIX_ASSUME(pModule!=NULL);
+ return pModule->GetAssembly();
+}
+
+void TypeDesc::GetName(SString &ssBuf)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ CorElementType kind = GetInternalCorElementType();
+ TypeHandle th;
+ int rank;
+
+ if (CorTypeInfo::IsModifier(kind))
+ th = GetTypeParam();
+ else
+ th = TypeHandle(this);
+
+ if (kind == ELEMENT_TYPE_ARRAY)
+ rank = ((ArrayTypeDesc*) this)->GetRank();
+ else if (CorTypeInfo::IsGenericVariable(kind))
+ rank = ((TypeVarTypeDesc*) this)->GetIndex();
+ else
+ rank = 0;
+
+ ConstructName(kind, th, rank, ssBuf);
+}
+
+void TypeDesc::ConstructName(CorElementType kind,
+ TypeHandle param,
+ int rank,
+ SString &ssBuff)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM()); // SString operations can allocate.
+ }
+ CONTRACTL_END
+
+ if (CorTypeInfo::IsModifier(kind))
+ {
+ param.GetName(ssBuff);
+ }
+
+ switch(kind)
+ {
+ case ELEMENT_TYPE_BYREF:
+ ssBuff.Append(W('&'));
+ break;
+
+ case ELEMENT_TYPE_PTR:
+ ssBuff.Append(W('*'));
+ break;
+
+ case ELEMENT_TYPE_SZARRAY:
+ ssBuff.Append(W("[]"));
+ break;
+
+ case ELEMENT_TYPE_ARRAY:
+ ssBuff.Append(W('['));
+
+ if (rank == 1)
+ {
+ ssBuff.Append(W('*'));
+ }
+ else
+ {
+ while(--rank > 0)
+ {
+ ssBuff.Append(W(','));
+ }
+ }
+
+ ssBuff.Append(W(']'));
+ break;
+
+ case ELEMENT_TYPE_VAR:
+ case ELEMENT_TYPE_MVAR:
+ if (kind == ELEMENT_TYPE_VAR)
+ {
+ ssBuff.Printf(W("!%d"), rank);
+ }
+ else
+ {
+ ssBuff.Printf(W("!!%d"), rank);
+ }
+ break;
+
+ case ELEMENT_TYPE_FNPTR:
+ ssBuff.Printf(W("FNPTR"));
+ break;
+
+ default:
+ LPCUTF8 namesp = CorTypeInfo::GetNamespace(kind);
+ if(namesp && *namesp) {
+ ssBuff.AppendUTF8(namesp);
+ ssBuff.Append(W('.'));
+ }
+
+ LPCUTF8 name = CorTypeInfo::GetName(kind);
+ BAD_FORMAT_NOTHROW_ASSERT(name);
+ if (name && *name) {
+ ssBuff.AppendUTF8(name);
+ }
+ }
+}
+
+BOOL TypeDesc::IsArray()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return CorTypeInfo::IsArray_NoThrow(GetInternalCorElementType());
+}
+
+BOOL TypeDesc::IsGenericVariable()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return CorTypeInfo::IsGenericVariable_NoThrow(GetInternalCorElementType());
+}
+
+BOOL TypeDesc::IsFnPtr()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (GetInternalCorElementType() == ELEMENT_TYPE_FNPTR);
+}
+
+BOOL TypeDesc::IsNativeValueType()
+{
+ WRAPPER_NO_CONTRACT;
+ return (GetInternalCorElementType() == ELEMENT_TYPE_VALUETYPE);
+}
+
+BOOL TypeDesc::HasTypeParam()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return CorTypeInfo::IsModifier_NoThrow(GetInternalCorElementType()) ||
+ GetInternalCorElementType() == ELEMENT_TYPE_VALUETYPE;
+}
+
+#ifndef DACCESS_COMPILE
+
+BOOL TypeDesc::CanCastTo(TypeHandle toType, TypeHandlePairList *pVisited)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ if (TypeHandle(this) == toType)
+ return TRUE;
+
+ //A boxed variable type can be cast to any of its constraints, or object, if none are specified
+ if (IsGenericVariable())
+ {
+ TypeVarTypeDesc *tyvar = (TypeVarTypeDesc*) this;
+
+ DWORD numConstraints;
+ TypeHandle *constraints = tyvar->GetConstraints(&numConstraints, CLASS_DEPENDENCIES_LOADED);
+
+ if (toType == g_pObjectClass)
+ return TRUE;
+
+ if (toType == g_pValueTypeClass)
+ {
+ mdGenericParam genericParamToken = tyvar->GetToken();
+ DWORD flags;
+ if (FAILED(tyvar->GetModule()->GetMDImport()->GetGenericParamProps(genericParamToken, NULL, &flags, NULL, NULL, NULL)))
+ {
+ return FALSE;
+ }
+ DWORD specialConstraints = flags & gpSpecialConstraintMask;
+ if ((specialConstraints & gpNotNullableValueTypeConstraint) != 0)
+ return TRUE;
+ }
+
+ if (constraints == NULL)
+ return FALSE;
+
+ for (DWORD i = 0; i < numConstraints; i++)
+ {
+ if (constraints[i].CanCastTo(toType, pVisited))
+ return TRUE;
+ }
+ return FALSE;
+ }
+
+ // If we're not casting to a TypeDesc (i.e. not to a reference array type, variable type etc.)
+ // then we must be trying to cast to a class or interface type.
+ if (!toType.IsTypeDesc())
+ {
+ MethodTable *pMT = GetMethodTable();
+ if (pMT == 0) {
+ // I don't have an underlying method table, therefore I'm
+ // a variable type, pointer type, function pointer type
+ // etc. I am not an object or value type. Therefore
+ // I can't be cast to an object or value type.
+ return FALSE;
+ }
+
+ // This does the right thing if 'type' == System.Array or System.Object, System.Clonable ...
+ if (pMT->CanCastToClassOrInterface(toType.AsMethodTable(), pVisited) != 0)
+ {
+ return TRUE;
+ }
+
+ if (IsArray() && toType.AsMethodTable()->IsInterface())
+ {
+ if (ArraySupportsBizarreInterface((ArrayTypeDesc*)this, toType.AsMethodTable()))
+ {
+ return TRUE;
+ }
+
+ }
+
+ return FALSE;
+ }
+
+ TypeDesc* toTypeDesc = toType.AsTypeDesc();
+
+ CorElementType toKind = toTypeDesc->GetInternalCorElementType();
+ CorElementType fromKind = GetInternalCorElementType();
+
+ // The element kinds must match, only exception is that SZARRAY matches a one dimension ARRAY
+ if (!(toKind == fromKind || (toKind == ELEMENT_TYPE_ARRAY && fromKind == ELEMENT_TYPE_SZARRAY)))
+ return FALSE;
+
+ switch (toKind)
+ {
+ case ELEMENT_TYPE_ARRAY:
+ if (dac_cast<PTR_ArrayTypeDesc>(this)->GetRank() != dac_cast<PTR_ArrayTypeDesc>(toTypeDesc)->GetRank())
+ return FALSE;
+ // fall through
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_BYREF:
+ case ELEMENT_TYPE_PTR:
+ return TypeDesc::CanCastParam(dac_cast<PTR_ParamTypeDesc>(this)->GetTypeParam(), dac_cast<PTR_ParamTypeDesc>(toTypeDesc)->GetTypeParam(), pVisited);
+
+ case ELEMENT_TYPE_VAR:
+ case ELEMENT_TYPE_MVAR:
+ case ELEMENT_TYPE_FNPTR:
+ return FALSE;
+
+ default:
+ BAD_FORMAT_NOTHROW_ASSERT(toKind == ELEMENT_TYPE_TYPEDBYREF || CorTypeInfo::IsPrimitiveType(toKind));
+ return TRUE;
+ }
+}
+
+BOOL TypeDesc::CanCastParam(TypeHandle fromParam, TypeHandle toParam, TypeHandlePairList *pVisited)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ // While boxed value classes inherit from object their
+ // unboxed versions do not. Parameterized types have the
+ // unboxed version, thus, if the from type parameter is value
+ // class then only an exact match/equivalence works.
+ if (fromParam.IsEquivalentTo(toParam))
+ return TRUE;
+
+ // Object parameters dont need an exact match but only inheritance, check for that
+ CorElementType fromParamCorType = fromParam.GetVerifierCorElementType();
+ if (CorTypeInfo::IsObjRef(fromParamCorType))
+ {
+ return fromParam.CanCastTo(toParam, pVisited);
+ }
+ else if (CorTypeInfo::IsGenericVariable(fromParamCorType))
+ {
+ TypeVarTypeDesc* varFromParam = fromParam.AsGenericVariable();
+
+ if (!varFromParam->ConstraintsLoaded())
+ varFromParam->LoadConstraints(CLASS_DEPENDENCIES_LOADED);
+
+ if (!varFromParam->ConstrainedAsObjRef())
+ return FALSE;
+
+ return fromParam.CanCastTo(toParam, pVisited);
+ }
+ else if(CorTypeInfo::IsPrimitiveType(fromParamCorType))
+ {
+ CorElementType toParamCorType = toParam.GetVerifierCorElementType();
+ if(CorTypeInfo::IsPrimitiveType(toParamCorType))
+ {
+ if (toParamCorType == fromParamCorType)
+ return TRUE;
+
+ // Primitive types such as E_T_I4 and E_T_U4 are interchangeable
+ // Enums with interchangeable underlying types are interchangable
+ // BOOL is NOT interchangeable with I1/U1, neither CHAR -- with I2/U2
+ if((toParamCorType != ELEMENT_TYPE_BOOLEAN)
+ &&(fromParamCorType != ELEMENT_TYPE_BOOLEAN)
+ &&(toParamCorType != ELEMENT_TYPE_CHAR)
+ &&(fromParamCorType != ELEMENT_TYPE_CHAR))
+ {
+ if ((CorTypeInfo::Size(toParamCorType) == CorTypeInfo::Size(fromParamCorType))
+ && (CorTypeInfo::IsFloat(toParamCorType) == CorTypeInfo::IsFloat(fromParamCorType)))
+ {
+ return TRUE;
+ }
+ }
+ } // end if(CorTypeInfo::IsPrimitiveType(toParamCorType))
+ } // end if(CorTypeInfo::IsPrimitiveType(fromParamCorType))
+
+ // Anything else is not a match.
+ return FALSE;
+}
+
+TypeHandle::CastResult TypeDesc::CanCastToNoGC(TypeHandle toType)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END
+
+ if (TypeHandle(this) == toType)
+ return TypeHandle::CanCast;
+
+ //A boxed variable type can be cast to any of its constraints, or object, if none are specified
+ if (IsGenericVariable())
+ {
+ TypeVarTypeDesc *tyvar = (TypeVarTypeDesc*) this;
+
+ if (!tyvar->ConstraintsLoaded())
+ return TypeHandle::MaybeCast;
+
+ DWORD numConstraints;
+ TypeHandle *constraints = tyvar->GetCachedConstraints(&numConstraints);
+
+ if (toType == g_pObjectClass)
+ return TypeHandle::CanCast;
+
+ if (toType == g_pValueTypeClass)
+ return TypeHandle::MaybeCast;
+
+ if (constraints == NULL)
+ return TypeHandle::CannotCast;
+
+ for (DWORD i = 0; i < numConstraints; i++)
+ {
+ if (constraints[i].CanCastToNoGC(toType) == TypeHandle::CanCast)
+ return TypeHandle::CanCast;
+ }
+ return TypeHandle::MaybeCast;
+ }
+
+ // If we're not casting to a TypeDesc (i.e. not to a reference array type, variable type etc.)
+ // then we must be trying to cast to a class or interface type.
+ if (!toType.IsTypeDesc())
+ {
+ MethodTable *pMT = GetMethodTable();
+ if (pMT == 0) {
+ // I don't have an underlying method table, therefore I'm
+ // a variable type, pointer type, function pointer type
+ // etc. I am not an object or value type. Therefore
+ // I can't be cast to an object or value type.
+ return TypeHandle::CannotCast;
+ }
+
+ // This does the right thing if 'type' == System.Array or System.Object, System.Clonable ...
+ return pMT->CanCastToClassOrInterfaceNoGC(toType.AsMethodTable());
+ }
+
+ TypeDesc* toTypeDesc = toType.AsTypeDesc();
+
+ CorElementType toKind = toTypeDesc->GetInternalCorElementType();
+ CorElementType fromKind = GetInternalCorElementType();
+
+ // The element kinds must match, only exception is that SZARRAY matches a one dimension ARRAY
+ if (!(toKind == fromKind || (toKind == ELEMENT_TYPE_ARRAY && fromKind == ELEMENT_TYPE_SZARRAY)))
+ return TypeHandle::CannotCast;
+
+ switch (toKind)
+ {
+ case ELEMENT_TYPE_ARRAY:
+ if (dac_cast<PTR_ArrayTypeDesc>(this)->GetRank() != dac_cast<PTR_ArrayTypeDesc>(toTypeDesc)->GetRank())
+ return TypeHandle::CannotCast;
+ // fall through
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_BYREF:
+ case ELEMENT_TYPE_PTR:
+ return TypeDesc::CanCastParamNoGC(dac_cast<PTR_ParamTypeDesc>(this)->GetTypeParam(), dac_cast<PTR_ParamTypeDesc>(toTypeDesc)->GetTypeParam());
+
+ case ELEMENT_TYPE_VAR:
+ case ELEMENT_TYPE_MVAR:
+ case ELEMENT_TYPE_FNPTR:
+ return TypeHandle::CannotCast;
+
+ default:
+ BAD_FORMAT_NOTHROW_ASSERT(toKind == ELEMENT_TYPE_TYPEDBYREF || CorTypeInfo::IsPrimitiveType_NoThrow(toKind));
+ return TypeHandle::CanCast;
+ }
+}
+
+TypeHandle::CastResult TypeDesc::CanCastParamNoGC(TypeHandle fromParam, TypeHandle toParam)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END
+
+ // While boxed value classes inherit from object their
+ // unboxed versions do not. Parameterized types have the
+ // unboxed version, thus, if the from type parameter is value
+ // class then only an exact match works.
+ if (fromParam == toParam)
+ return TypeHandle::CanCast;
+
+ // Object parameters dont need an exact match but only inheritance, check for that
+ CorElementType fromParamCorType = fromParam.GetVerifierCorElementType();
+ if (CorTypeInfo::IsObjRef_NoThrow(fromParamCorType))
+ {
+ return fromParam.CanCastToNoGC(toParam);
+ }
+ else if (CorTypeInfo::IsGenericVariable_NoThrow(fromParamCorType))
+ {
+ TypeVarTypeDesc* varFromParam = fromParam.AsGenericVariable();
+
+ if (!varFromParam->ConstraintsLoaded())
+ return TypeHandle::MaybeCast;
+
+ if (!varFromParam->ConstrainedAsObjRef())
+ return TypeHandle::CannotCast;
+
+ return fromParam.CanCastToNoGC(toParam);
+ }
+ else if (CorTypeInfo::IsPrimitiveType_NoThrow(fromParamCorType))
+ {
+ CorElementType toParamCorType = toParam.GetVerifierCorElementType();
+ if(CorTypeInfo::IsPrimitiveType_NoThrow(toParamCorType))
+ {
+ if (toParamCorType == fromParamCorType)
+ return TypeHandle::CanCast;
+
+ // Primitive types such as E_T_I4 and E_T_U4 are interchangeable
+ // Enums with interchangeable underlying types are interchangable
+ // BOOL is NOT interchangeable with I1/U1, neither CHAR -- with I2/U2
+ if((toParamCorType != ELEMENT_TYPE_BOOLEAN)
+ &&(fromParamCorType != ELEMENT_TYPE_BOOLEAN)
+ &&(toParamCorType != ELEMENT_TYPE_CHAR)
+ &&(fromParamCorType != ELEMENT_TYPE_CHAR))
+ {
+ if ((CorTypeInfo::Size_NoThrow(toParamCorType) == CorTypeInfo::Size_NoThrow(fromParamCorType))
+ && (CorTypeInfo::IsFloat_NoThrow(toParamCorType) == CorTypeInfo::IsFloat_NoThrow(fromParamCorType)))
+ {
+ return TypeHandle::CanCast;
+ }
+ }
+ } // end if(CorTypeInfo::IsPrimitiveType(toParamCorType))
+ } // end if(CorTypeInfo::IsPrimitiveType(fromParamCorType))
+ else
+ {
+ // Types with equivalence need the slow path
+ MethodTable * pFromMT = fromParam.GetMethodTable();
+ if (pFromMT != NULL && pFromMT->HasTypeEquivalence())
+ return TypeHandle::MaybeCast;
+ MethodTable * pToMT = toParam.GetMethodTable();
+ if (pToMT != NULL && pToMT->HasTypeEquivalence())
+ return TypeHandle::MaybeCast;
+ }
+
+ // Anything else is not a match.
+ return TypeHandle::CannotCast;
+}
+
+BOOL TypeDesc::IsEquivalentTo(TypeHandle type COMMA_INDEBUG(TypeHandlePairList *pVisited))
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (TypeHandle(this) == type)
+ return TRUE;
+
+ if (!type.IsTypeDesc())
+ return FALSE;
+
+ TypeDesc *pOther = type.AsTypeDesc();
+
+ // bail early for normal types
+ if (!HasTypeEquivalence() || !pOther->HasTypeEquivalence())
+ return FALSE;
+
+ // if the TypeDesc types are different, then they are not equivalent
+ if (GetInternalCorElementType() != pOther->GetInternalCorElementType())
+ return FALSE;
+
+ if (HasTypeParam())
+ {
+ // pointer, byref, array
+
+ // Arrays must have the same rank.
+ if (IsArray())
+ {
+ ArrayTypeDesc *pThisArray = (ArrayTypeDesc *)this;
+ ArrayTypeDesc *pOtherArray = (ArrayTypeDesc *)pOther;
+ if (pThisArray->GetRank() != pOtherArray->GetRank())
+ return FALSE;
+ }
+
+ return GetTypeParam().IsEquivalentTo(pOther->GetTypeParam() COMMA_INDEBUG(pVisited));
+ }
+
+ // var, mvar, fnptr
+ return FALSE;
+}
+#endif // #ifndef DACCESS_COMPILE
+
+
+
+TypeHandle TypeDesc::GetParent() {
+
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ CorElementType kind = GetInternalCorElementType();
+ if (CorTypeInfo::IsArray_NoThrow(kind)) {
+ _ASSERTE(IsArray());
+ BAD_FORMAT_NOTHROW_ASSERT(kind == ELEMENT_TYPE_SZARRAY || kind == ELEMENT_TYPE_ARRAY);
+ return ((ArrayTypeDesc*)this)->GetParent();
+ }
+ if (CorTypeInfo::IsPrimitiveType_NoThrow(kind))
+ return (MethodTable*)g_pObjectClass;
+ return TypeHandle();
+}
+
+#ifndef DACCESS_COMPILE
+
+#ifndef CROSSGEN_COMPILE
+OBJECTREF ParamTypeDesc::GetManagedClassObject()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+
+ INJECT_FAULT(COMPlusThrowOM());
+
+ PRECONDITION(GetInternalCorElementType() == ELEMENT_TYPE_ARRAY ||
+ GetInternalCorElementType() == ELEMENT_TYPE_SZARRAY ||
+ GetInternalCorElementType() == ELEMENT_TYPE_BYREF ||
+ GetInternalCorElementType() == ELEMENT_TYPE_PTR);
+ }
+ CONTRACTL_END;
+
+ if (m_hExposedClassObject == NULL) {
+ REFLECTCLASSBASEREF refClass = NULL;
+ GCPROTECT_BEGIN(refClass);
+ if (GetAssembly()->IsIntrospectionOnly())
+ refClass = (REFLECTCLASSBASEREF) AllocateObject(MscorlibBinder::GetClass(CLASS__CLASS_INTROSPECTION_ONLY));
+ else
+ refClass = (REFLECTCLASSBASEREF) AllocateObject(g_pRuntimeTypeClass);
+
+ LoaderAllocator *pLoaderAllocator = GetLoaderAllocator();
+ TypeHandle th = TypeHandle(this);
+ ((ReflectClassBaseObject*)OBJECTREFToObject(refClass))->SetType(th);
+ ((ReflectClassBaseObject*)OBJECTREFToObject(refClass))->SetKeepAlive(pLoaderAllocator->GetExposedObject());
+
+ // Let all threads fight over who wins using InterlockedCompareExchange.
+ // Only the winner can set m_hExposedClassObject from NULL.
+ LOADERHANDLE hExposedClassObject = pLoaderAllocator->AllocateHandle(refClass);
+
+ EnsureWritablePages(this);
+ if (FastInterlockCompareExchangePointer(&m_hExposedClassObject, hExposedClassObject, static_cast<LOADERHANDLE>(NULL)))
+ {
+ pLoaderAllocator->ClearHandle(hExposedClassObject);
+ }
+
+ // Log the TypeVarTypeDesc access
+ g_IBCLogger.LogTypeMethodTableWriteableAccess(&th);
+
+ GCPROTECT_END();
+ }
+ return GetManagedClassObjectIfExists();
+}
+#endif // CROSSGEN_COMPILE
+
+#endif // #ifndef DACCESS_COMPILE
+
+BOOL TypeDesc::IsRestored()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_CANNOT_TAKE_LOCK;
+ SUPPORTS_DAC;
+
+ TypeHandle th = TypeHandle(this);
+ g_IBCLogger.LogTypeMethodTableAccess(&th);
+ return IsRestored_NoLogging();
+}
+
+BOOL TypeDesc::IsRestored_NoLogging()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_CANNOT_TAKE_LOCK;
+ SUPPORTS_DAC;
+
+ return (m_typeAndFlags & TypeDesc::enum_flag_Unrestored) == 0;
+}
+
+ClassLoadLevel TypeDesc::GetLoadLevel()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ SUPPORTS_DAC;
+
+ if (m_typeAndFlags & TypeDesc::enum_flag_UnrestoredTypeKey)
+ {
+ return CLASS_LOAD_UNRESTOREDTYPEKEY;
+ }
+ else if (m_typeAndFlags & TypeDesc::enum_flag_Unrestored)
+ {
+ return CLASS_LOAD_UNRESTORED;
+ }
+ else if (m_typeAndFlags & TypeDesc::enum_flag_IsNotFullyLoaded)
+ {
+ if (m_typeAndFlags & TypeDesc::enum_flag_DependenciesLoaded)
+ {
+ return CLASS_DEPENDENCIES_LOADED;
+ }
+ else
+ {
+ return CLASS_LOAD_EXACTPARENTS;
+ }
+ }
+
+ return CLASS_LOADED;
+}
+
+
+// Recursive worker that pumps the transitive closure of a type's dependencies to the specified target level.
+// Dependencies include:
+//
+// - parent
+// - interfaces
+// - canonical type, for non-canonical instantiations
+// - typical type, for non-typical instantiations
+//
+// Parameters:
+//
+// pVisited - used to prevent endless recursion in the case of cyclic dependencies
+//
+// level - target level to pump to - must be CLASS_DEPENDENCIES_LOADED or CLASS_LOADED
+//
+// if CLASS_DEPENDENCIES_LOADED, all transitive dependencies are resolved to their
+// exact types.
+//
+// if CLASS_LOADED, all type-safety checks are done on the type and all its transitive
+// dependencies. Note that for the CLASS_LOADED case, some types may be left
+// on the pending list rather that pushed to CLASS_LOADED in the case of cyclic
+// dependencies - the root caller must handle this.
+//
+//
+// pfBailed - if we or one of our depedencies bails early due to cyclic dependencies, we
+// must set *pfBailed to TRUE. Otherwise, we must *leave it unchanged* (thus, the
+// boolean acts as a cumulative OR.)
+//
+// pPending - if one of our dependencies bailed, the type cannot yet be promoted to CLASS_LOADED
+// as the dependencies will be checked later and may fail a security check then.
+// Instead, DoFullyLoad() will add the type to the pending list - the root caller
+// is responsible for promoting the type after the full transitive closure has been
+// walked. Note that it would be just as correct to always defer to the pending list -
+// however, that is a little less performant.
+//
+// pInstContext - instantiation context created in code:SigPointer.GetTypeHandleThrowing and ultimately
+// passed down to code:TypeVarTypeDesc.SatisfiesConstraints.
+//
+void TypeDesc::DoFullyLoad(Generics::RecursionGraph *pVisited, ClassLoadLevel level,
+ DFLPendingList *pPending, BOOL *pfBailed, const InstantiationContext *pInstContext)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END
+
+ _ASSERTE(level == CLASS_LOADED || level == CLASS_DEPENDENCIES_LOADED);
+ _ASSERTE(pfBailed != NULL);
+ _ASSERTE(!(level == CLASS_LOADED && pPending == NULL));
+
+
+#ifndef DACCESS_COMPILE
+
+ if (Generics::RecursionGraph::HasSeenType(pVisited, TypeHandle(this)))
+ {
+ *pfBailed = TRUE;
+ return;
+ }
+
+ if (GetLoadLevel() >= level)
+ {
+ return;
+ }
+
+ if (level == CLASS_LOADED)
+ {
+ UINT numTH = pPending->Count();
+ TypeHandle *pTypeHndPending = pPending->Table();
+ for (UINT idxPending = 0; idxPending < numTH; idxPending++)
+ {
+ if (pTypeHndPending[idxPending].IsTypeDesc() && pTypeHndPending[idxPending].AsTypeDesc() == this)
+ {
+ *pfBailed = TRUE;
+ return;
+ }
+ }
+
+ }
+
+
+ BOOL fBailed = FALSE;
+
+ // First ensure that we're loaded to just below CLASS_LOADED
+ ClassLoader::EnsureLoaded(TypeHandle(this), (ClassLoadLevel) (level-1));
+
+ Generics::RecursionGraph newVisited(pVisited, TypeHandle(this));
+
+ if (HasTypeParam())
+ {
+ // Fully load the type parameter
+ GetTypeParam().DoFullyLoad(&newVisited, level, pPending, &fBailed, pInstContext);
+
+ ParamTypeDesc* pPTD = (ParamTypeDesc*) this;
+
+ // Fully load the template method table
+ if (!pPTD->m_TemplateMT.IsNull())
+ {
+ pPTD->m_TemplateMT.GetValue()->DoFullyLoad(&newVisited, level, pPending, &fBailed, pInstContext);
+ }
+ }
+
+ switch (level)
+ {
+ case CLASS_DEPENDENCIES_LOADED:
+ FastInterlockOr(&m_typeAndFlags, TypeDesc::enum_flag_DependenciesLoaded);
+ break;
+
+ case CLASS_LOADED:
+ if (fBailed)
+ {
+ // We couldn't complete security checks on some dependency because he is already being processed by one of our callers.
+ // Do not mark this class fully loaded yet. Put him on the pending list and he will be marked fully loaded when
+ // everything unwinds.
+
+ *pfBailed = TRUE;
+
+ TypeHandle* pthPending = pPending->AppendThrowing();
+ *pthPending = TypeHandle(this);
+ }
+ else
+ {
+ // Finally, mark this method table as fully loaded
+ SetIsFullyLoaded();
+ }
+ break;
+
+ default:
+ _ASSERTE(!"Can't get here.");
+ break;
+ }
+#endif
+}
+
+
+#ifdef FEATURE_PREJIT
+void TypeDesc::DoRestoreTypeKey()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END
+
+#ifndef DACCESS_COMPILE
+ if (HasTypeParam())
+ {
+ ParamTypeDesc* pPTD = (ParamTypeDesc*) this;
+ EnsureWritablePages(pPTD);
+
+ // Must have the same loader module, so not encoded
+ CONSISTENCY_CHECK(!pPTD->m_Arg.IsEncodedFixup());
+ ClassLoader::EnsureLoaded(pPTD->m_Arg, CLASS_LOAD_UNRESTORED);
+
+ // Might live somewhere else e.g. Object[] is shared across all ref array types
+ Module::RestoreMethodTablePointer(&(pPTD->m_TemplateMT), NULL, CLASS_LOAD_UNRESTORED);
+ }
+ else
+ {
+ EnsureWritablePages(this);
+ }
+
+ FastInterlockAnd(&m_typeAndFlags, ~TypeDesc::enum_flag_UnrestoredTypeKey);
+#endif
+}
+
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+// This just performs a shallow save
+void TypeDesc::Save(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ ClassLoader::EnsureLoaded(TypeHandle(this));
+
+ if (LoggingOn(LF_ZAP, LL_INFO10000))
+ {
+ StackSString name;
+ TypeString::AppendType(name, TypeHandle(this));
+ LOG((LF_ZAP, LL_INFO10000, "TypeDesc::Save %S\n", name.GetUnicode()));
+ }
+
+ if (IsGenericVariable())
+ {
+ ((TypeVarTypeDesc*)this)->Save(image);
+ }
+ else if (GetInternalCorElementType() == ELEMENT_TYPE_FNPTR)
+ {
+ ((FnPtrTypeDesc *)this)->Save(image);
+ }
+ else
+ {
+ _ASSERTE(HasTypeParam());
+ ((ParamTypeDesc*)this)->Save(image);
+ }
+
+}
+
+void TypeDesc::Fixup(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (IsGenericVariable())
+ {
+ TypeVarTypeDesc* tyvar = (TypeVarTypeDesc*) this;
+ tyvar->Fixup(image);
+ }
+ else if (GetInternalCorElementType() == ELEMENT_TYPE_FNPTR)
+ {
+ ((FnPtrTypeDesc*)this)->Fixup(image);
+ }
+ else
+ {
+ // Works for array and PTR/BYREF types, but not function pointers
+ _ASSERTE(HasTypeParam());
+
+ if (IsArray())
+ {
+ ((ArrayTypeDesc*) this)->Fixup(image);
+ }
+ else
+ {
+ ((ParamTypeDesc*) this)->Fixup(image);
+ }
+ }
+
+ if (NeedsRestore(image))
+ {
+ TypeDesc *pTD = (TypeDesc*) image->GetImagePointer(this);
+ _ASSERTE(pTD != NULL);
+ pTD->m_typeAndFlags |= TypeDesc::enum_flag_Unrestored | TypeDesc::enum_flag_UnrestoredTypeKey | TypeDesc::enum_flag_IsNotFullyLoaded;
+ }
+
+}
+
+BOOL TypeDesc::ComputeNeedsRestore(DataImage *image, TypeHandleList *pVisited)
+{
+ STATIC_STANDARD_VM_CONTRACT;
+
+ _ASSERTE(GetAppDomain()->IsCompilationDomain());
+
+ if (HasTypeParam())
+ {
+ return dac_cast<PTR_ParamTypeDesc>(this)->ComputeNeedsRestore(image, pVisited);
+ }
+ else
+ return FALSE;
+}
+
+
+
+void ParamTypeDesc::Save(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ if (IsArray())
+ {
+ image->StoreStructure(this, sizeof(ArrayTypeDesc), DataImage::ITEM_ARRAY_TYPEDESC);
+ }
+ else
+ {
+ image->StoreStructure(this, sizeof(ParamTypeDesc), DataImage::ITEM_PARAM_TYPEDESC);
+ }
+
+ // This set of checks matches precisely those in Module::CreateArrayMethodTable
+ // and ParamTypeDesc::ComputeNeedsRestore
+ //
+ // They indicate if an array TypeDesc is non-canonical (in much the same a a generic
+ // method table being non-canonical), i.e. it is not the primary
+ // owner of the m_TemplateMT (the primary owner is the TypeDesc for object[])
+ //
+ if (OwnsTemplateMethodTable())
+ {
+ // This TypeDesc should be the only one saving this MT
+ _ASSERTE(!image->IsStored(m_TemplateMT.GetValue()));
+ Module::SaveMethodTable(image, m_TemplateMT.GetValue(), 0);
+ }
+
+}
+
+
+void ParamTypeDesc::Fixup(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(image->GetModule()->GetAssembly() ==
+ GetAppDomain()->ToCompilationDomain()->GetTargetAssembly());
+
+ if (LoggingOn(LF_ZAP, LL_INFO10000))
+ {
+ StackSString name;
+ TypeString::AppendType(name, TypeHandle(this));
+ LOG((LF_ZAP, LL_INFO10000, "ParamTypeDesc::Fixup %S\n", name.GetUnicode()));
+ }
+
+ if (!m_TemplateMT.IsNull())
+ {
+ if (OwnsTemplateMethodTable())
+ {
+ // In all other cases the type desc "owns" the m_TemplateMT
+ // and it is always stored in the same module as the TypeDesc (i.e. the
+ // TypeDesc and the MT are "tightly-knit") In other words if one is present in
+ // an NGEN image then then other will be, and if one is "used" at runtime then
+ // the other will be too.
+ image->FixupPointerField(this, offsetof(ParamTypeDesc, m_TemplateMT));
+ m_TemplateMT.GetValue()->Fixup(image);
+ }
+ else
+ {
+ // Fixup the pointer to the possibly-shared m_TemplateMT. This might be in a different module.
+ image->FixupMethodTablePointer(this, &m_TemplateMT);
+ }
+ }
+
+ // Fixup the pointer to the element type.
+ image->HardBindTypeHandlePointer(this, offsetof(ParamTypeDesc, m_Arg));
+
+ // The managed object will get regenerated on demand
+ image->ZeroField(this, offsetof(ParamTypeDesc, m_hExposedClassObject), sizeof(m_hExposedClassObject));
+}
+
+void ArrayTypeDesc::Fixup(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ ParamTypeDesc::Fixup(image);
+
+#ifdef FEATURE_COMINTEROP
+ // We don't save CCW templates into ngen images
+ image->ZeroField(this, offsetof(ArrayTypeDesc, m_pCCWTemplate), sizeof(m_pCCWTemplate));
+#endif // FEATURE_COMINTEROP
+}
+
+BOOL ParamTypeDesc::ComputeNeedsRestore(DataImage *image, TypeHandleList *pVisited)
+{
+ STATIC_STANDARD_VM_CONTRACT;
+
+ _ASSERTE(GetAppDomain()->IsCompilationDomain());
+
+ if (m_typeAndFlags & TypeDesc::enum_flag_NeedsRestore)
+ {
+ return TRUE;
+ }
+ if (m_typeAndFlags & TypeDesc::enum_flag_PreRestored)
+ {
+ return FALSE;
+ }
+
+ BOOL res = FALSE;
+ if (!image->CanPrerestoreEagerBindToTypeHandle(m_Arg, pVisited))
+ {
+ res = TRUE;
+ }
+
+ // This set of checks matches precisely those in Module::CreateArrayMethodTable and ParamTypeDesc::Fixup
+ //
+ if (!m_TemplateMT.IsNull())
+ {
+ if (OwnsTemplateMethodTable())
+ {
+ if (m_TemplateMT.GetValue()->ComputeNeedsRestore(image, pVisited))
+ {
+ res = TRUE;
+ }
+ }
+ else
+ {
+ if (!image->CanPrerestoreEagerBindToMethodTable(m_TemplateMT.GetValue(), pVisited))
+ {
+ res = TRUE;
+ }
+ }
+ }
+
+ // Cache the results of running the algorithm.
+ // We can only cache the result if we have not speculatively assumed
+ // that any types are not NeedsRestore, i.e. the visited list is empty
+ if (pVisited == NULL)
+ {
+ if (LoggingOn(LF_ZAP, LL_INFO10000))
+ {
+ StackSString name;
+ TypeString::AppendType(name, TypeHandle(this));
+ LOG((LF_ZAP, LL_INFO10000, "ParamTypeDesc::ComputeNeedsRestore=%d for %S\n", res, name.GetUnicode()));
+ }
+ m_typeAndFlags |= (res ? TypeDesc::enum_flag_NeedsRestore : TypeDesc::enum_flag_PreRestored);
+ }
+ return res;
+}
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+void TypeDesc::SetIsRestored()
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_CANNOT_TAKE_LOCK;
+
+ TypeHandle th = TypeHandle(this);
+ FastInterlockAnd(EnsureWritablePages(&m_typeAndFlags), ~TypeDesc::enum_flag_Unrestored);
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+void TypeDesc::Restore()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ CONSISTENCY_CHECK(!HasUnrestoredTypeKey());
+ }
+ CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+ if (HasTypeParam())
+ {
+ ParamTypeDesc *pPTD = dac_cast<PTR_ParamTypeDesc>(this);
+
+ OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOAD_EXACTPARENTS);
+
+ // Must have the same loader module
+ ClassLoader::EnsureLoaded(pPTD->m_Arg, CLASS_LOAD_EXACTPARENTS);
+
+ // Method-table pointer must have been restored by DoRestoreTypeKey
+ Module::RestoreMethodTablePointer(&pPTD->m_TemplateMT, NULL, CLASS_LOAD_EXACTPARENTS);
+ }
+
+ SetIsRestored();
+#else
+ DacNotImpl();
+#endif // #ifndef DACCESS_COMPILE
+}
+
+#endif // FEATURE_PREJIT
+
+
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+void TypeVarTypeDesc::Save(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ // We don't persist the constraints: instead, load them back on demand
+ m_numConstraints = (DWORD) -1;
+
+ LOG((LF_ZAP, LL_INFO10000, " TypeVarTypeDesc::Save %x (%p)\n", GetToken(), this));
+ image->StoreStructure(this, sizeof(TypeVarTypeDesc),
+ DataImage::ITEM_TYVAR_TYPEDESC);
+}
+
+void TypeVarTypeDesc::Fixup(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ LOG((LF_ZAP, LL_INFO10000, " TypeVarTypeDesc::Fixup %x (%p)\n", GetToken(), this));
+ image->FixupPointerField(this, offsetof(TypeVarTypeDesc, m_pModule));
+ image->ZeroField(this, offsetof(TypeVarTypeDesc, m_hExposedClassObject), sizeof(m_hExposedClassObject));
+
+ // We don't persist the constraints: instead, load them back on demand
+ image->ZeroPointerField(this, offsetof(TypeVarTypeDesc, m_constraints));
+
+}
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+MethodDesc * TypeVarTypeDesc::LoadOwnerMethod()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ PRECONDITION(TypeFromToken(m_typeOrMethodDef) == mdtMethodDef);
+ }
+ CONTRACTL_END;
+
+ MethodDesc *pMD = m_pModule->LookupMethodDef(m_typeOrMethodDef);
+ if (pMD == NULL)
+ {
+ pMD = MemberLoader::GetMethodDescFromMethodDef(m_pModule, m_typeOrMethodDef, FALSE);
+ }
+ return pMD;
+}
+
+TypeHandle TypeVarTypeDesc::LoadOwnerType()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ PRECONDITION(TypeFromToken(m_typeOrMethodDef) == mdtTypeDef);
+ }
+ CONTRACTL_END;
+
+ TypeHandle genericType = m_pModule->LookupTypeDef(m_typeOrMethodDef);
+ if (genericType.IsNull())
+ {
+ genericType = ClassLoader::LoadTypeDefThrowing(m_pModule, m_typeOrMethodDef,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef);
+ }
+ return genericType;
+}
+
+TypeHandle* TypeVarTypeDesc::GetCachedConstraints(DWORD *pNumConstraints)
+{
+ LIMITED_METHOD_CONTRACT;
+ PRECONDITION(CheckPointer(pNumConstraints));
+ PRECONDITION(m_numConstraints != (DWORD) -1);
+
+ *pNumConstraints = m_numConstraints;
+ return m_constraints;
+}
+
+
+
+
+TypeHandle* TypeVarTypeDesc::GetConstraints(DWORD *pNumConstraints, ClassLoadLevel level /* = CLASS_LOADED */)
+{
+ WRAPPER_NO_CONTRACT;
+ PRECONDITION(CheckPointer(pNumConstraints));
+ PRECONDITION(level == CLASS_DEPENDENCIES_LOADED || level == CLASS_LOADED);
+
+ if (m_numConstraints == (DWORD) -1)
+ LoadConstraints(level);
+
+ *pNumConstraints = m_numConstraints;
+ return m_constraints;
+}
+
+
+void TypeVarTypeDesc::LoadConstraints(ClassLoadLevel level /* = CLASS_LOADED */)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ INJECT_FAULT(COMPlusThrowOM());
+
+ PRECONDITION(level == CLASS_DEPENDENCIES_LOADED || level == CLASS_LOADED);
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(((INT_PTR)&m_constraints) % sizeof(m_constraints) == 0);
+ _ASSERTE(((INT_PTR)&m_numConstraints) % sizeof(m_numConstraints) == 0);
+
+ DWORD numConstraints = m_numConstraints;
+
+ if (numConstraints == (DWORD) -1)
+ {
+ EnsureWritablePages(this);
+
+ IMDInternalImport* pInternalImport = GetModule()->GetMDImport();
+
+ HENUMInternalHolder hEnum(pInternalImport);
+ mdGenericParamConstraint tkConstraint;
+
+ SigTypeContext typeContext;
+ mdToken defToken = GetTypeOrMethodDef();
+
+ MethodTable *pMT = NULL;
+ if (TypeFromToken(defToken) == mdtMethodDef)
+ {
+ MethodDesc *pMD = LoadOwnerMethod();
+ _ASSERTE(pMD->IsGenericMethodDefinition());
+
+ SigTypeContext::InitTypeContext(pMD,&typeContext);
+
+ _ASSERTE(!typeContext.m_methodInst.IsEmpty());
+ pMT = pMD->GetMethodTable();
+ }
+ else
+ {
+ _ASSERTE(TypeFromToken(defToken) == mdtTypeDef);
+ TypeHandle genericType = LoadOwnerType();
+ _ASSERTE(genericType.IsGenericTypeDefinition());
+
+ SigTypeContext::InitTypeContext(genericType,&typeContext);
+ }
+
+ hEnum.EnumInit(mdtGenericParamConstraint, GetToken());
+ numConstraints = pInternalImport->EnumGetCount(&hEnum);
+ if (numConstraints != 0)
+ {
+ LoaderAllocator* pAllocator=m_pModule->GetLoaderAllocator();
+ // If there is a single class constraint we put in in element 0 of the array
+ AllocMemHolder<TypeHandle> constraints
+ (pAllocator->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(numConstraints) * S_SIZE_T(sizeof(TypeHandle))));
+
+ DWORD i = 0;
+ while (pInternalImport->EnumNext(&hEnum, &tkConstraint))
+ {
+ _ASSERTE(i <= numConstraints);
+ mdToken tkConstraintType, tkParam;
+ if (FAILED(pInternalImport->GetGenericParamConstraintProps(tkConstraint, &tkParam, &tkConstraintType)))
+ {
+ GetModule()->GetAssembly()->ThrowTypeLoadException(pInternalImport, pMT->GetCl(), IDS_CLASSLOAD_BADFORMAT);
+ }
+ _ASSERTE(tkParam == GetToken());
+ TypeHandle thConstraint = ClassLoader::LoadTypeDefOrRefOrSpecThrowing(GetModule(), tkConstraintType,
+ &typeContext,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::FailIfUninstDefOrRef,
+ ClassLoader::LoadTypes,
+ level);
+
+ constraints[i++] = thConstraint;
+
+ // Method type constraints behave contravariantly
+ // (cf Bounded polymorphism e.g. see
+ // Cardelli & Wegner, On understanding types, data abstraction and polymorphism, Computing Surveys 17(4), Dec 1985)
+ if (pMT != NULL && pMT->HasVariance() && TypeFromToken(tkConstraintType) == mdtTypeSpec)
+ {
+ ULONG cSig;
+ PCCOR_SIGNATURE pSig;
+ if (FAILED(pInternalImport->GetTypeSpecFromToken(tkConstraintType, &pSig, &cSig)))
+ {
+ GetModule()->GetAssembly()->ThrowTypeLoadException(pInternalImport, pMT->GetCl(), IDS_CLASSLOAD_BADFORMAT);
+ }
+ if (!EEClass::CheckVarianceInSig(pMT->GetNumGenericArgs(),
+ pMT->GetClass()->GetVarianceInfo(),
+ pMT->GetModule(),
+ SigPointer(pSig, cSig),
+ gpContravariant))
+ {
+ GetModule()->GetAssembly()->ThrowTypeLoadException(pInternalImport, pMT->GetCl(), IDS_CLASSLOAD_VARIANCE_IN_CONSTRAINT);
+ }
+ }
+ }
+
+ if (InterlockedCompareExchangeT(&m_constraints, constraints.operator->(), NULL) == NULL)
+ {
+ constraints.SuppressRelease();
+ }
+ }
+
+ m_numConstraints = numConstraints;
+ }
+
+ for (DWORD i = 0; i < numConstraints; i++)
+ {
+ ClassLoader::EnsureLoaded(m_constraints[i], level);
+ }
+}
+
+BOOL TypeVarTypeDesc::ConstrainedAsObjRef()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(ConstraintsLoaded());
+ }
+ CONTRACTL_END;
+
+ IMDInternalImport* pInternalImport = GetModule()->GetMDImport();
+ mdGenericParam genericParamToken = GetToken();
+ DWORD flags;
+ if (FAILED(pInternalImport->GetGenericParamProps(genericParamToken, NULL, &flags, NULL, NULL, NULL)))
+ {
+ return FALSE;
+ }
+ DWORD specialConstraints = flags & gpSpecialConstraintMask;
+
+ if ((specialConstraints & gpReferenceTypeConstraint) != 0)
+ return TRUE;
+
+ return ConstrainedAsObjRefHelper();
+}
+
+// A recursive helper that helps determine whether this variable is constrained as ObjRef.
+// Please note that we do not check the gpReferenceTypeConstraint special constraint here
+// because this property does not propagate up the constraining hierarchy.
+// (e.g. "class A<S, T> where S : T, where T : class" does not guarantee that S is ObjRef)
+BOOL TypeVarTypeDesc::ConstrainedAsObjRefHelper()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ DWORD dwNumConstraints = 0;
+ TypeHandle* constraints = GetCachedConstraints(&dwNumConstraints);
+
+ for (DWORD i = 0; i < dwNumConstraints; i++)
+ {
+ TypeHandle constraint = constraints[i];
+
+ if (constraint.IsGenericVariable() && constraint.AsGenericVariable()->ConstrainedAsObjRefHelper())
+ return TRUE;
+
+ if (!constraint.IsInterface() && CorTypeInfo::IsObjRef_NoThrow(constraint.GetInternalCorElementType()))
+ {
+ // Object, ValueType, and Enum are ObjRefs but they do not constrain the var to ObjRef!
+ MethodTable *mt = constraint.GetMethodTable();
+
+ if (mt != g_pObjectClass &&
+ mt != g_pValueTypeClass &&
+ mt != g_pEnumClass)
+ {
+ return TRUE;
+ }
+ }
+ }
+
+ return FALSE;
+}
+
+BOOL TypeVarTypeDesc::ConstrainedAsValueType()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(ConstraintsLoaded());
+ }
+ CONTRACTL_END;
+
+ IMDInternalImport* pInternalImport = GetModule()->GetMDImport();
+ mdGenericParam genericParamToken = GetToken();
+ DWORD flags;
+ if (FAILED(pInternalImport->GetGenericParamProps(genericParamToken, NULL, &flags, NULL, NULL, NULL)))
+ {
+ return FALSE;
+ }
+ DWORD specialConstraints = flags & gpSpecialConstraintMask;
+
+ if ((specialConstraints & gpNotNullableValueTypeConstraint) != 0)
+ return TRUE;
+
+ DWORD dwNumConstraints = 0;
+ TypeHandle* constraints = GetCachedConstraints(&dwNumConstraints);
+
+ for (DWORD i = 0; i < dwNumConstraints; i++)
+ {
+ TypeHandle constraint = constraints[i];
+
+ if (constraint.IsGenericVariable())
+ {
+ if (constraint.AsGenericVariable()->ConstrainedAsValueType())
+ return TRUE;
+ }
+ else
+ {
+ // the following condition will also disqualify interfaces
+ if (!CorTypeInfo::IsObjRef_NoThrow(constraint.GetInternalCorElementType()))
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+//---------------------------------------------------------------------------------------------------------------------
+// Loads the type of a constraint given the constraint token and instantiation context. If pInstContext is
+// not NULL and the constraint's type is a typespec, pInstContext will be used to instantiate the typespec.
+// Otherwise typical instantiation is returned if the constraint type is generic.
+//---------------------------------------------------------------------------------------------------------------------
+static
+TypeHandle LoadTypeVarConstraint(TypeVarTypeDesc *pTypeVar, mdGenericParamConstraint tkConstraint,
+ const InstantiationContext *pInstContext)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pTypeVar));
+ }
+ CONTRACTL_END;
+
+ Module *pTyModule = pTypeVar->GetModule();
+ IMDInternalImport* pInternalImport = pTyModule->GetMDImport();
+
+ mdToken tkConstraintType, tkParam;
+ IfFailThrow(pInternalImport->GetGenericParamConstraintProps(tkConstraint, &tkParam, &tkConstraintType));
+ _ASSERTE(tkParam == pTypeVar->GetToken());
+ mdToken tkOwnerToken = pTypeVar->GetTypeOrMethodDef();
+
+ if (TypeFromToken(tkConstraintType) == mdtTypeSpec && pInstContext != NULL)
+ {
+ if(pInstContext->m_pSubstChain == NULL)
+ {
+ // The substitution chain will be null in situations
+ // where we are instantiating types that are open, and therefore
+ // we should be using the fully open TypeVar constraint instantiation code
+ // below. However, in the case of a open method on a closed generic class
+ // we will also have a null substitution chain. In this case, if we can ensure
+ // that the instantiation type parameters are non type-var types, it is valid
+ // to use the passed in instantiation when instantiating the type var constraint.
+ BOOL fContextContainsValidGenericTypeParams = FALSE;
+
+ if (TypeFromToken(tkOwnerToken) == mdtMethodDef)
+ {
+ SigTypeContext sigTypeContext;
+
+ MethodDesc *pMD = pTypeVar->LoadOwnerMethod();
+
+ SigTypeContext::InitTypeContext(pMD, &sigTypeContext);
+ fContextContainsValidGenericTypeParams = SigTypeContext::IsValidTypeOnlyInstantiationOf(&sigTypeContext, pInstContext->m_pArgContext);
+ }
+
+ if (!fContextContainsValidGenericTypeParams)
+ goto LoadConstraintOnOpenType;
+ }
+
+ // obtain the constraint type's signature if it's a typespec
+ ULONG cbSig;
+ PCCOR_SIGNATURE ptr;
+
+ IfFailThrow(pInternalImport->GetSigFromToken(tkConstraintType, &cbSig, &ptr));
+
+ SigPointer pSig(ptr, cbSig);
+
+ // instantiate the signature using the current InstantiationContext
+ return pSig.GetTypeHandleThrowing(pTyModule,
+ pInstContext->m_pArgContext,
+ ClassLoader::LoadTypes, CLASS_DEPENDENCIES_LOADED, FALSE,
+ pInstContext->m_pSubstChain);
+ }
+ else
+ {
+LoadConstraintOnOpenType:
+
+ SigTypeContext sigTypeContext;
+
+ switch (TypeFromToken(tkOwnerToken))
+ {
+ case mdtTypeDef:
+ {
+ // the type variable is declared by a type - load the handle of the type
+ TypeHandle thOwner = pTyModule->GetClassLoader()->LoadTypeDefThrowing(pTyModule,
+ tkOwnerToken,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef,
+ tdNoTypes,
+ CLASS_LOAD_APPROXPARENTS
+ );
+
+ SigTypeContext::InitTypeContext(thOwner, &sigTypeContext);
+ break;
+ }
+
+ case mdtMethodDef:
+ {
+ // the type variable is declared by a method - load its method desc
+ MethodDesc *pMD = pTyModule->LookupMethodDef(tkOwnerToken);
+
+ SigTypeContext::InitTypeContext(pMD, &sigTypeContext);
+ break;
+ }
+
+ default:
+ {
+ COMPlusThrow(kBadImageFormatException);
+ }
+ }
+
+ // load the (typical instantiation of) constraint type
+ return ClassLoader::LoadTypeDefOrRefOrSpecThrowing(pTyModule,
+ tkConstraintType,
+ &sigTypeContext,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::FailIfUninstDefOrRef,
+ ClassLoader::LoadTypes,
+ CLASS_DEPENDENCIES_LOADED);
+ }
+}
+
+//---------------------------------------------------------------------------------------------------------------------
+// We come here only if a type parameter with a special constraint is instantiated by an argument that is itself
+// a type parameter. In this case, we'll need to examine *its* constraints to see if the range of types that would satisfy its
+// constraints is a subset of the range of types that would satisfy the special constraint.
+//
+// This routine will return TRUE if it can prove that argument "pTyArg" has a constraint that will satisfy the special constraint.
+//
+// (NOTE: It does not check against anything other than one specific specialConstraint (it doesn't even know what they are.) This is
+// just one step in the checking of constraints.)
+//---------------------------------------------------------------------------------------------------------------------
+static
+BOOL SatisfiesSpecialConstraintRecursive(TypeVarTypeDesc *pTyArg, DWORD specialConstraint, TypeHandleList *pVisitedVars = NULL)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pTyArg));
+ }
+ CONTRACTL_END;
+
+ // The caller must invoke for all special constraints that apply - this fcn can only reliably test against one
+ // constraint at a time.
+ _ASSERTE(specialConstraint == gpNotNullableValueTypeConstraint
+ || specialConstraint == gpReferenceTypeConstraint
+ || specialConstraint == gpDefaultConstructorConstraint);
+
+ IMDInternalImport* pInternalImport = pTyArg->GetModule()->GetMDImport();
+
+ // Get the argument type's own special constraints
+ DWORD argFlags;
+ IfFailThrow(pTyArg->GetModule()->GetMDImport()->GetGenericParamProps(pTyArg->GetToken(), NULL, &argFlags, NULL, NULL, NULL));
+
+ DWORD argSpecialConstraints = argFlags & gpSpecialConstraintMask;
+
+ // First, if the argument's own special constraints match the parameter's special constraints,
+ // we can safely conclude the constraint is satisfied.
+ switch (specialConstraint)
+ {
+ case gpNotNullableValueTypeConstraint:
+ {
+ if ((argSpecialConstraints & gpNotNullableValueTypeConstraint) != 0)
+ {
+ return TRUE;
+ }
+ break;
+ }
+
+ case gpReferenceTypeConstraint:
+ {
+ // gpReferenceTypeConstraint is not "inherited" so ignore it if pTyArg is a variable
+ // constraining the argument rather than the argument itself.
+
+ if (pVisitedVars == NULL && (argSpecialConstraints & gpReferenceTypeConstraint) != 0)
+ {
+ return TRUE;
+ }
+ break;
+ }
+
+ case gpDefaultConstructorConstraint:
+ {
+ // gpDefaultConstructorConstraint is not "inherited" so ignore it if pTyArg is a variable
+ // constraining the argument rather than the argument itself.
+
+ if ((pVisitedVars == NULL && (argSpecialConstraints & gpDefaultConstructorConstraint) != 0) ||
+ (argSpecialConstraints & gpNotNullableValueTypeConstraint) != 0)
+ {
+ return TRUE;
+ }
+ break;
+ }
+ }
+
+ // The special constraints did not match. However, we may find a primary type constraint
+ // that would always satisfy the special constraint.
+ HENUMInternalHolder hEnum(pInternalImport);
+ hEnum.EnumInit(mdtGenericParamConstraint, pTyArg->GetToken());
+
+ mdGenericParamConstraint tkConstraint;
+ while (pInternalImport->EnumNext(&hEnum, &tkConstraint))
+ {
+ // We pass NULL instantiation context here because when checking for special constraints, it makes
+ // no difference whether we load a typical (e.g. A<T>) or concrete (e.g. A<string>) instantiation.
+ TypeHandle thConstraint = LoadTypeVarConstraint(pTyArg, tkConstraint, NULL);
+
+ if (thConstraint.IsGenericVariable())
+ {
+ // The variable is constrained by another variable, which we need to check recursively. An
+ // example of why this is necessary follows:
+ //
+ // class A<T> where T : class
+ // { }
+ // class B<S, R> : A<S> where S : R where R : EventArgs
+ // { }
+ //
+ if (!TypeHandleList::Exists(pVisitedVars, thConstraint))
+ {
+ TypeHandleList newVisitedVars(thConstraint, pVisitedVars);
+ if (SatisfiesSpecialConstraintRecursive(thConstraint.AsGenericVariable(),
+ specialConstraint,
+ &newVisitedVars))
+ {
+ return TRUE;
+ }
+ }
+ }
+ else if (thConstraint.IsInterface())
+ {
+ // This is a secondary constraint - this tells us nothing about the eventual instantiation that
+ // we can use here.
+ }
+ else
+ {
+ // This is a type constraint. Remember that the eventual instantiation is only guaranteed to be
+ // something *derived* from this type, not the actual type itself. To emphasize, we rename the local.
+
+ TypeHandle thAncestorOfType = thConstraint;
+
+ if (specialConstraint == gpNotNullableValueTypeConstraint)
+ {
+ if (thAncestorOfType.IsValueType() && !(thAncestorOfType.AsMethodTable()->IsNullable()))
+ {
+ return TRUE;
+ }
+ }
+
+ if (specialConstraint == gpReferenceTypeConstraint)
+ {
+
+ if (!thAncestorOfType.IsTypeDesc())
+ {
+ MethodTable *pAncestorMT = thAncestorOfType.AsMethodTable();
+
+ if ((!(pAncestorMT->IsValueType())) && pAncestorMT != g_pObjectClass && pAncestorMT != g_pValueTypeClass)
+ {
+ // ValueTypes are sealed except when they aren't (cough, cough, System.Enum...). Sigh.
+ // Don't put all our trust in IsValueType() here - check the ancestry chain as well.
+ BOOL fIsValueTypeAnAncestor = FALSE;
+ MethodTable *pParentMT = pAncestorMT->GetParentMethodTable();
+ while (pParentMT)
+ {
+ if (pParentMT == g_pValueTypeClass)
+ {
+ fIsValueTypeAnAncestor = TRUE;
+ break;
+ }
+ pParentMT = pParentMT->GetParentMethodTable();
+ }
+
+ if (!fIsValueTypeAnAncestor)
+ {
+ return TRUE;
+ }
+ }
+ }
+ }
+
+ if (specialConstraint == gpDefaultConstructorConstraint)
+ {
+ // If a valuetype, just check to ensure that doesn't have a private default ctor.
+ // If not a valuetype, not much we can conclude knowing just an ancestor class.
+ if (thAncestorOfType.IsValueType() && thAncestorOfType.GetMethodTable()->HasExplicitOrImplicitPublicDefaultConstructor())
+ {
+ return TRUE;
+ }
+ }
+
+ }
+ }
+
+ // If we got here, we found no evidence that the argument's constraints are strict enough to satisfy the parameter's constraints.
+ return FALSE;
+}
+
+//---------------------------------------------------------------------------------------------------------------------
+// Walks the "constraining chain" of a type variable and appends all concrete constraints as well as type vars
+// to the provided ArrayList. Upon leaving the function, the list contains all types that the type variable is
+// known to be assignable to.
+//
+// E.g.
+// class A<S, T> where S : T, IComparable where T : EventArgs
+// {
+// void f<U>(U u) where U : S, IDisposable { }
+// }
+// This would put 5 types to the U's list: S, T, IDisposable, IComparable, and EventArgs.
+//---------------------------------------------------------------------------------------------------------------------
+static
+void GatherConstraintsRecursive(TypeVarTypeDesc *pTyArg, ArrayList *pArgList, const InstantiationContext *pInstContext,
+ TypeHandleList *pVisitedVars = NULL)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pTyArg));
+ PRECONDITION(CheckPointer(pArgList));
+ }
+ CONTRACTL_END;
+
+ IMDInternalImport* pInternalImport = pTyArg->GetModule()->GetMDImport();
+
+ // enumerate constraints of the pTyArg
+ HENUMInternalHolder hEnum(pInternalImport);
+ hEnum.EnumInit(mdtGenericParamConstraint, pTyArg->GetToken());
+
+ mdGenericParamConstraint tkConstraint;
+ while (pInternalImport->EnumNext(&hEnum, &tkConstraint))
+ {
+ TypeHandle thConstraint = LoadTypeVarConstraint(pTyArg, tkConstraint, pInstContext);
+
+ if (thConstraint.IsGenericVariable())
+ {
+ // see if it's safe to recursively call ourselves
+ if (!TypeHandleList::Exists(pVisitedVars, thConstraint))
+ {
+ pArgList->Append(thConstraint.AsPtr());
+
+ TypeHandleList newVisitedVars(thConstraint, pVisitedVars);
+ GatherConstraintsRecursive(thConstraint.AsGenericVariable(), pArgList, pInstContext, &newVisitedVars);
+ }
+
+ // Note: circular type parameter constraints will be detected and reported later in
+ // MethodTable::DoFullyLoad, we just have to avoid SO here.
+ }
+ else
+ {
+ pArgList->Append(thConstraint.AsPtr());
+ }
+ }
+}
+
+// pTypeContextOfConstraintDeclarer = type context of the generic type that declares the constraint
+// This is needed to load the "X" type when the constraint is the frm
+// "where T:X".
+// Caution: Do NOT use it to load types or constraints attached to "thArg".
+//
+// thArg = typehandle of the type being substituted for the type parameter.
+//
+// pInstContext = the instantiation context (type context + substitution chain) to be
+// used when loading constraints attached to "thArg".
+//
+BOOL TypeVarTypeDesc::SatisfiesConstraints(SigTypeContext *pTypeContextOfConstraintDeclarer, TypeHandle thArg,
+ const InstantiationContext *pInstContext/*=NULL*/)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ PRECONDITION(!thArg.IsNull());
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ IMDInternalImport* pInternalImport = GetModule()->GetMDImport();
+ mdGenericParamConstraint tkConstraint;
+
+ INDEBUG(mdToken defToken = GetTypeOrMethodDef());
+ _ASSERTE(TypeFromToken(defToken) == mdtMethodDef || TypeFromToken(defToken) == mdtTypeDef);
+
+ // prepare for the enumeration of this variable's general constraints
+ mdGenericParam genericParamToken = GetToken();
+
+ HENUMInternalHolder hEnum(pInternalImport);
+ hEnum.EnumInit(mdtGenericParamConstraint, genericParamToken);
+
+ ArrayList argList;
+
+ // First check special constraints (must-be-reference-type, must-be-value-type, and must-have-default-constructor)
+ DWORD flags;
+ IfFailThrow(pInternalImport->GetGenericParamProps(genericParamToken, NULL, &flags, NULL, NULL, NULL));
+
+ DWORD specialConstraints = flags & gpSpecialConstraintMask;
+
+ if (thArg.IsGenericVariable())
+ {
+ TypeVarTypeDesc *pTyVar = thArg.AsGenericVariable();
+
+ if ((specialConstraints & gpNotNullableValueTypeConstraint) != 0)
+ {
+ if (!SatisfiesSpecialConstraintRecursive(pTyVar, gpNotNullableValueTypeConstraint))
+ {
+ return FALSE;
+ }
+ }
+
+ if ((specialConstraints & gpReferenceTypeConstraint) != 0)
+ {
+ if (!SatisfiesSpecialConstraintRecursive(pTyVar, gpReferenceTypeConstraint))
+ {
+ return FALSE;
+ }
+ }
+
+ if ((specialConstraints & gpDefaultConstructorConstraint) != 0)
+ {
+ if (!SatisfiesSpecialConstraintRecursive(pTyVar, gpDefaultConstructorConstraint))
+ {
+ return FALSE;
+ }
+ }
+
+ if (hEnum.EnumGetCount() == 0)
+ {
+ // return immediately if there are no general constraints to satisfy (fast path)
+ return TRUE;
+ }
+
+ // Now walk the "constraining chain" of type variables and gather all constraint types.
+ //
+ // This work should not be left to code:TypeHandle.CanCastTo because we need typespec constraints
+ // to be instantiated in pInstContext. If we just do thArg.CanCastTo(thConstraint), it would load
+ // typical instantiations of the constraints and the can-cast-to check may fail. In addition,
+ // code:TypeHandle.CanCastTo will SO if the constraints are circular.
+ //
+ // Consider:
+ //
+ // class A<T>
+ // {
+ // void f<U>(B<U, T> b) where U : A<T> { }
+ // }
+ // class B<S, R> where S : A<R> { }
+ //
+ // If we load the signature of, say, A<int>.f<U> (concrete class but typical method), and end up
+ // here verifying that S : A<R> is satisfied by U : A<T>, we must instantiate the constraint type
+ // A<T> using pInstContext so that it becomes A<int>. Otherwise the constraint check fails.
+ //
+ GatherConstraintsRecursive(pTyVar, &argList, pInstContext);
+ }
+ else
+ {
+ if ((specialConstraints & gpNotNullableValueTypeConstraint) != 0)
+ {
+ if (!thArg.IsValueType())
+ return FALSE;
+ else
+ {
+ // the type argument is a value type, however if it is any kind of Nullable we want to fail
+ // as the constraint accepts any value type except Nullable types (Nullable itself is a value type)
+ if (thArg.AsMethodTable()->IsNullable())
+ return FALSE;
+ }
+ }
+
+ if ((specialConstraints & gpReferenceTypeConstraint) != 0)
+ {
+ if (thArg.IsValueType())
+ return FALSE;
+ }
+
+ if ((specialConstraints & gpDefaultConstructorConstraint) != 0)
+ {
+ if (thArg.IsTypeDesc() || (!thArg.AsMethodTable()->HasExplicitOrImplicitPublicDefaultConstructor()))
+ return FALSE;
+ }
+ }
+
+ // Complete the list by adding thArg itself. If thArg is not a generic variable this will be the only
+ // item in the list. If it is a generic variable, we need it in the list as well in addition to all the
+ // constraints gathered by GatherConstraintsRecursive, because e.g. class A<S, T> : where S : T
+ // can be instantiated using A<U, U>.
+ argList.Append(thArg.AsPtr());
+
+ // At this point argList contains all types that thArg is known to be assignable to. The list may
+ // contain duplicates and it consists of zero or more type variables, zero or more possibly generic
+ // interfaces, and at most one possibly generic class.
+
+ // Now check general subtype constraints
+ while (pInternalImport->EnumNext(&hEnum, &tkConstraint))
+ {
+ mdToken tkConstraintType, tkParam;
+ IfFailThrow(pInternalImport->GetGenericParamConstraintProps(tkConstraint, &tkParam, &tkConstraintType));
+
+ _ASSERTE(tkParam == GetToken());
+ TypeHandle thConstraint = ClassLoader::LoadTypeDefOrRefOrSpecThrowing(GetModule(),
+ tkConstraintType,
+ pTypeContextOfConstraintDeclarer,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::FailIfUninstDefOrRef,
+ ClassLoader::LoadTypes,
+ CLASS_DEPENDENCIES_LOADED);
+
+ // System.Object constraint will be always satisfied - even if argList is empty
+ if (!thConstraint.IsObjectType())
+ {
+ BOOL fCanCast = FALSE;
+
+ // loop over all types that we know the arg will be assignable to
+ ArrayList::Iterator iter = argList.Iterate();
+ while (iter.Next())
+ {
+ TypeHandle thElem = TypeHandle::FromPtr(iter.GetElement());
+
+ if (thElem.IsGenericVariable())
+ {
+ // if a generic variable equals to the constraint, then this constraint will be satisfied
+ if (thElem == thConstraint)
+ {
+ fCanCast = TRUE;
+ break;
+ }
+
+ // and any variable with the gpNotNullableValueTypeConstraint special constraint
+ // satisfies the "derived from System.ValueType" general subtype constraint
+ if (thConstraint == g_pValueTypeClass)
+ {
+ TypeVarTypeDesc *pTyElem = thElem.AsGenericVariable();
+ IfFailThrow(pTyElem->GetModule()->GetMDImport()->GetGenericParamProps(
+ pTyElem->GetToken(),
+ NULL,
+ &flags,
+ NULL,
+ NULL,
+ NULL));
+
+ if ((flags & gpNotNullableValueTypeConstraint) != 0)
+ {
+ fCanCast = TRUE;
+ break;
+ }
+ }
+ }
+ else
+ {
+ // if a concrete type can be cast to the constraint, then this constraint will be satisifed
+ if (thElem.CanCastTo(thConstraint))
+ {
+ fCanCast = TRUE;
+ break;
+ }
+ }
+ }
+
+ if (!fCanCast)
+ return FALSE;
+ }
+ }
+ return TRUE;
+}
+
+
+#ifndef CROSSGEN_COMPILE
+OBJECTREF TypeVarTypeDesc::GetManagedClassObject()
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+
+ INJECT_FAULT(COMPlusThrowOM());
+
+ PRECONDITION(IsGenericVariable());
+ }
+ CONTRACTL_END;
+
+ if (m_hExposedClassObject == NULL) {
+ REFLECTCLASSBASEREF refClass = NULL;
+ GCPROTECT_BEGIN(refClass);
+ if (GetAssembly()->IsIntrospectionOnly())
+ refClass = (REFLECTCLASSBASEREF) AllocateObject(MscorlibBinder::GetClass(CLASS__CLASS_INTROSPECTION_ONLY));
+ else
+ refClass = (REFLECTCLASSBASEREF) AllocateObject(g_pRuntimeTypeClass);
+
+ LoaderAllocator *pLoaderAllocator = GetLoaderAllocator();
+ TypeHandle th = TypeHandle(this);
+ ((ReflectClassBaseObject*)OBJECTREFToObject(refClass))->SetType(th);
+ ((ReflectClassBaseObject*)OBJECTREFToObject(refClass))->SetKeepAlive(pLoaderAllocator->GetExposedObject());
+
+ // Let all threads fight over who wins using InterlockedCompareExchange.
+ // Only the winner can set m_hExposedClassObject from NULL.
+ LOADERHANDLE hExposedClassObject = pLoaderAllocator->AllocateHandle(refClass);
+
+ if (FastInterlockCompareExchangePointer(EnsureWritablePages(&m_hExposedClassObject), hExposedClassObject, static_cast<LOADERHANDLE>(NULL)))
+ {
+ pLoaderAllocator->ClearHandle(hExposedClassObject);
+ }
+
+ GCPROTECT_END();
+ }
+ return GetManagedClassObjectIfExists();
+}
+#endif // CROSSGEN_COMPILE
+
+#endif //!DACCESS_COMPILE
+
+TypeHandle *
+FnPtrTypeDesc::GetRetAndArgTypes()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Decode encoded type handles on demand
+#if defined(FEATURE_PREJIT) && !defined(DACCESS_COMPILE)
+ for (DWORD i = 0; i <= m_NumArgs; i++)
+ {
+ Module::RestoreTypeHandlePointerRaw(&m_RetAndArgTypes[i]);
+ }
+#endif //defined(FEATURE_PREJIT) && !defined(DACCESS_COMPILE)
+
+ return m_RetAndArgTypes;
+} // FnPtrTypeDesc::GetRetAndArgTypes
+
+#ifndef DACCESS_COMPILE
+
+// Returns TRUE if all return and argument types are externally visible.
+BOOL
+FnPtrTypeDesc::IsExternallyVisible() const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ const TypeHandle * rgRetAndArgTypes = GetRetAndArgTypes();
+ for (DWORD i = 0; i <= m_NumArgs; i++)
+ {
+ if (!rgRetAndArgTypes[i].IsExternallyVisible())
+ {
+ return FALSE;
+ }
+ }
+ // All return/arguments types are externally visible
+ return TRUE;
+} // FnPtrTypeDesc::IsExternallyVisible
+
+// Returns TRUE if any of return or argument types is part of an assembly loaded for introspection.
+BOOL
+FnPtrTypeDesc::IsIntrospectionOnly() const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ const TypeHandle * rgRetAndArgTypes = GetRetAndArgTypes();
+ for (DWORD i = 0; i <= m_NumArgs; i++)
+ {
+ if (rgRetAndArgTypes[i].IsIntrospectionOnly())
+ {
+ return TRUE;
+ }
+ }
+ // None of the return/arguments type was loaded for introspection
+ return FALSE;
+} // FnPtrTypeDesc::IsIntrospectionOnly
+
+// Returns TRUE if any of return or argument types is part of an assembly loaded for introspection.
+// Instantiations of generic types are also recursively checked.
+BOOL
+FnPtrTypeDesc::ContainsIntrospectionOnlyTypes() const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ const TypeHandle * rgRetAndArgTypes = GetRetAndArgTypes();
+ for (DWORD i = 0; i <= m_NumArgs; i++)
+ {
+ if (rgRetAndArgTypes[i].ContainsIntrospectionOnlyTypes())
+ {
+ return TRUE;
+ }
+ }
+ // None of the return/arguments type contains types loaded for introspection
+ return FALSE;
+} // FnPtrTypeDesc::ContainsIntrospectionOnlyTypes
+
+#endif //DACCESS_COMPILE
+
+#if defined(FEATURE_NATIVE_IMAGE_GENERATION) && !defined(DACCESS_COMPILE)
+
+void FnPtrTypeDesc::Save(DataImage * image)
+{
+ STANDARD_VM_CONTRACT;
+
+ image->StoreStructure(
+ this,
+ sizeof(FnPtrTypeDesc) + (m_NumArgs * sizeof(TypeHandle)),
+ DataImage::ITEM_FPTR_TYPEDESC);
+}
+
+void FnPtrTypeDesc::Fixup(DataImage * image)
+{
+ STANDARD_VM_CONTRACT;
+
+ for (DWORD i = 0; i <= m_NumArgs; i++)
+ {
+ image->FixupTypeHandlePointerInPlace(
+ this,
+ (BYTE *)&m_RetAndArgTypes[i] - (BYTE *)this);
+ }
+}
+
+#endif //defined(FEATURE_NATIVE_IMAGE_GENERATION) && !defined(DACCESS_COMPILE)
+
+#ifdef DACCESS_COMPILE
+
+void
+ParamTypeDesc::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ DAC_ENUM_DTHIS();
+
+ PTR_MethodTable pTemplateMT = m_TemplateMT.GetValue();
+ if (pTemplateMT.IsValid())
+ {
+ pTemplateMT->EnumMemoryRegions(flags);
+ }
+
+ m_Arg.EnumMemoryRegions(flags);
+}
+
+void
+TypeVarTypeDesc::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ DAC_ENUM_DTHIS();
+
+ if (m_pModule.IsValid())
+ {
+ m_pModule->EnumMemoryRegions(flags, true);
+ }
+
+ if (m_numConstraints != (DWORD)-1)
+ {
+ PTR_TypeHandle constraint = m_constraints;
+ for (DWORD i = 0; i < m_numConstraints; i++)
+ {
+ if (constraint.IsValid())
+ {
+ constraint->EnumMemoryRegions(flags);
+ }
+ constraint++;
+ }
+ }
+} // TypeVarTypeDesc::EnumMemoryRegions
+
+void
+FnPtrTypeDesc::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ DAC_ENUM_DTHIS();
+
+ for (DWORD i = 0; i < m_NumArgs; i++)
+ {
+ m_RetAndArgTypes[i].EnumMemoryRegions(flags);
+ }
+}
+
+#endif //DACCESS_COMPILE
diff --git a/src/vm/typedesc.h b/src/vm/typedesc.h
new file mode 100644
index 0000000000..7c95f59df6
--- /dev/null
+++ b/src/vm/typedesc.h
@@ -0,0 +1,708 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: typedesc.h
+//
+
+
+//
+
+//
+// ============================================================================
+
+
+#ifndef TYPEDESC_H
+#define TYPEDESC_H
+#include <specstrings.h>
+
+class TypeHandleList;
+
+/*************************************************************************/
+/* TypeDesc is a discriminated union of all types that can not be directly
+ represented by a simple MethodTable*. The discrimintor of the union at the present
+ time is the CorElementType numeration. The subclass of TypeDesc are
+ the possible variants of the union.
+
+
+ ParamTypeDescs only include byref, array and pointer types. They do NOT
+ include instantaitions of generic types, which are represented by MethodTables.
+*/
+
+
+typedef DPTR(class TypeDesc) PTR_TypeDesc;
+
+class TypeDesc
+{
+public:
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+#ifndef DACCESS_COMPILE
+ TypeDesc(CorElementType type) {
+ LIMITED_METHOD_CONTRACT;
+
+ m_typeAndFlags = type;
+ }
+#endif
+
+ // This is the ELEMENT_TYPE* that would be used in the type sig for this type
+ // For enums this is the uderlying type
+ inline CorElementType GetInternalCorElementType() {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (CorElementType) (m_typeAndFlags & 0xff);
+ }
+
+ // Get the exact parent (superclass) of this type
+ TypeHandle GetParent();
+
+ // Returns the name of the array. Note that it returns
+ // the length of the returned string
+ static void ConstructName(CorElementType kind,
+ TypeHandle param,
+ int rank,
+ SString &ssBuff);
+
+ void GetName(SString &ssBuf);
+
+ //-------------------------------------------------------------------
+ // CASTING
+ //
+ // There are two variants of the "CanCastTo" method:
+ //
+ // CanCastTo
+ // - restore encoded pointers on demand
+ // - might throw, might trigger GC
+ // - return type is boolean (FALSE = cannot cast, TRUE = can cast)
+ //
+ // CanCastToNoGC
+ // - do not restore encoded pointers on demand
+ // - does not throw, does not trigger GC
+ // - return type is three-valued (CanCast, CannotCast, MaybeCast)
+ // - MaybeCast indicates that the test tripped on an encoded pointer
+ // so the caller should now call CanCastTo if it cares
+ //
+
+ BOOL CanCastTo(TypeHandle type, TypeHandlePairList *pVisited);
+ TypeHandle::CastResult CanCastToNoGC(TypeHandle type);
+
+ static BOOL CanCastParam(TypeHandle fromParam, TypeHandle toParam, TypeHandlePairList *pVisited);
+ static TypeHandle::CastResult CanCastParamNoGC(TypeHandle fromParam, TypeHandle toParam);
+
+#ifndef DACCESS_COMPILE
+ BOOL IsEquivalentTo(TypeHandle type COMMA_INDEBUG(TypeHandlePairList *pVisited));
+#endif
+
+ // BYREF
+ BOOL IsByRef() { // BYREFS are often treated specially
+ WRAPPER_NO_CONTRACT;
+
+ return(GetInternalCorElementType() == ELEMENT_TYPE_BYREF);
+ }
+
+ // PTR
+ BOOL IsPointer() {
+ WRAPPER_NO_CONTRACT;
+
+ return(GetInternalCorElementType() == ELEMENT_TYPE_PTR);
+ }
+
+ // ARRAY, SZARRAY
+ BOOL IsArray();
+
+ // VAR, MVAR
+ BOOL IsGenericVariable();
+
+ // ELEMENT_TYPE_FNPTR
+ BOOL IsFnPtr();
+
+ // VALUETYPE
+ BOOL IsNativeValueType();
+
+ // Is actually ParamTypeDesc (ARRAY, SZARRAY, BYREF, PTR)
+ BOOL HasTypeParam();
+
+#ifdef FEATURE_PREJIT
+ void Save(DataImage *image);
+ void Fixup(DataImage *image);
+
+ BOOL NeedsRestore(DataImage *image)
+ {
+ WRAPPER_NO_CONTRACT;
+ return ComputeNeedsRestore(image, NULL);
+ }
+
+ BOOL ComputeNeedsRestore(DataImage *image, TypeHandleList *pVisited);
+#endif
+
+ void DoRestoreTypeKey();
+ void Restore();
+ BOOL IsRestored();
+ BOOL IsRestored_NoLogging();
+ void SetIsRestored();
+
+ inline BOOL HasUnrestoredTypeKey() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return (m_typeAndFlags & TypeDesc::enum_flag_UnrestoredTypeKey) != 0;
+ }
+
+ BOOL HasTypeEquivalence() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_typeAndFlags & TypeDesc::enum_flag_HasTypeEquivalence) != 0;
+ }
+
+ BOOL IsFullyLoaded() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_typeAndFlags & TypeDesc::enum_flag_IsNotFullyLoaded) == 0;
+ }
+
+ VOID SetIsFullyLoaded()
+ {
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockAnd(&m_typeAndFlags, ~TypeDesc::enum_flag_IsNotFullyLoaded);
+ }
+
+ ClassLoadLevel GetLoadLevel();
+
+ void DoFullyLoad(Generics::RecursionGraph *pVisited, ClassLoadLevel level,
+ DFLPendingList *pPending, BOOL *pfBailed, const InstantiationContext *pInstContext);
+
+ // The module that defined the underlying type
+ PTR_Module GetModule();
+
+ // The ngen'ed module where this type-desc lives
+ PTR_Module GetZapModule();
+
+ // The module where this type lives for the purposes of loading and prejitting
+ // See ComputeLoaderModule for more information
+ PTR_Module GetLoaderModule();
+
+ // The assembly that defined this type (== GetModule()->GetAssembly())
+ Assembly* GetAssembly();
+
+ PTR_MethodTable GetMethodTable(); // only meaningful for ParamTypeDesc
+ TypeHandle GetTypeParam(); // only meaningful for ParamTypeDesc
+ Instantiation GetClassOrArrayInstantiation(); // only meaningful for ParamTypeDesc; see above
+
+ TypeHandle GetBaseTypeParam(); // only allowed for ParamTypeDesc, helper method used to avoid recursion
+
+ // Note that if the TypeDesc, e.g. a function pointer type, involves parts that may
+ // come from either a SharedDomain or an AppDomain then special rules apply to GetDomain.
+ // It returns the SharedDomain if all the
+ // constituent parts of the type are SharedDomain (i.e. domain-neutral),
+ // and returns an AppDomain if any of the parts are from an AppDomain,
+ // i.e. are domain-bound. If any of the parts are domain-bound
+ // then they will all belong to the same domain.
+ PTR_BaseDomain GetDomain();
+ BOOL IsDomainNeutral();
+
+#ifndef BINDER
+ PTR_LoaderAllocator GetLoaderAllocator()
+ {
+ SUPPORTS_DAC;
+
+ return GetLoaderModule()->GetLoaderAllocator();
+ }
+
+ protected:
+#endif // !BINDER
+ // See methodtable.h for details of the flags with the same name there
+ enum
+ {
+ enum_flag_NeedsRestore = 0x00000100, // Only used during ngen
+ enum_flag_PreRestored = 0x00000200, // Only used during ngen
+ enum_flag_Unrestored = 0x00000400,
+ enum_flag_UnrestoredTypeKey = 0x00000800,
+ enum_flag_IsNotFullyLoaded = 0x00001000,
+ enum_flag_DependenciesLoaded = 0x00002000,
+ enum_flag_HasTypeEquivalence = 0x00004000
+ };
+ //
+ // Low-order 8 bits of this flag are used to store the CorElementType, which
+ // discriminates what kind of TypeDesc we are
+ //
+ // The remaining bits are available for flags
+ //
+ DWORD m_typeAndFlags;
+};
+
+
+/*************************************************************************/
+// This variant is used for parameterized types that have exactly one argument
+// type. This includes arrays, byrefs, pointers.
+
+typedef DPTR(class ParamTypeDesc) PTR_ParamTypeDesc;
+
+
+class ParamTypeDesc : public TypeDesc {
+ friend class TypeDesc;
+ friend class JIT_TrialAlloc;
+ friend class CheckAsmOffsets;
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+#ifdef BINDER
+ friend class MdilModule;
+#endif
+
+public:
+#ifndef DACCESS_COMPILE
+ ParamTypeDesc(CorElementType type, MethodTable* pMT, TypeHandle arg)
+ : TypeDesc(type), m_Arg(arg), m_hExposedClassObject(0) {
+
+ LIMITED_METHOD_CONTRACT;
+
+ m_TemplateMT.SetValue(pMT);
+
+ // ParamTypeDescs start out life not fully loaded
+ m_typeAndFlags |= TypeDesc::enum_flag_IsNotFullyLoaded;
+
+ // Param type descs can only be equivalent if their constituent bits are equivalent.
+ if (arg.HasTypeEquivalence())
+ {
+ m_typeAndFlags |= TypeDesc::enum_flag_HasTypeEquivalence;
+ }
+
+ INDEBUGIMPL(Verify());
+ }
+#endif
+
+ INDEBUGIMPL(BOOL Verify();)
+
+ OBJECTREF GetManagedClassObject();
+#ifndef BINDER
+ OBJECTREF GetManagedClassObjectIfExists()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF objRet = NULL;
+ GET_LOADERHANDLE_VALUE_FAST(GetLoaderAllocator(), m_hExposedClassObject, &objRet);
+ return objRet;
+ }
+ OBJECTREF GetManagedClassObjectFast()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ OBJECTREF objRet = NULL;
+ LoaderAllocator::GetHandleValueFast(m_hExposedClassObject, &objRet);
+ return objRet;
+ }
+#endif
+
+ TypeHandle GetModifiedType()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_Arg;
+ }
+
+ TypeHandle GetTypeParam();
+
+#ifdef FEATURE_PREJIT
+ void Save(DataImage *image);
+ void Fixup(DataImage *image);
+ BOOL ComputeNeedsRestore(DataImage *image, TypeHandleList *pVisited);
+#endif
+
+ BOOL OwnsTemplateMethodTable();
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+ friend class StubLinkerCPU;
+
+#ifdef FEATURE_ARRAYSTUB_AS_IL
+ friend class ArrayOpLinker;
+#endif
+protected:
+ // the m_typeAndFlags field in TypeDesc tell what kind of parameterized type we have
+ FixupPointer<PTR_MethodTable> m_TemplateMT; // The shared method table, some variants do not use this field (it is null)
+ TypeHandle m_Arg; // The type that is being modified
+ LOADERHANDLE m_hExposedClassObject; // handle back to the internal reflection Type object
+};
+
+
+/*************************************************************************/
+/* An ArrayTypeDesc represents a Array of some pointer type. */
+
+class ArrayTypeDesc : public ParamTypeDesc
+{
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+public:
+#ifndef DACCESS_COMPILE
+ ArrayTypeDesc(MethodTable* arrayMT, TypeHandle elementType) :
+ ParamTypeDesc(arrayMT->IsMultiDimArray() ? ELEMENT_TYPE_ARRAY : ELEMENT_TYPE_SZARRAY, arrayMT, elementType)
+#ifdef FEATURE_COMINTEROP
+ , m_pCCWTemplate(NULL)
+#endif // FEATURE_COMINTEROP
+ {
+ STATIC_CONTRACT_SO_TOLERANT;
+ WRAPPER_NO_CONTRACT;
+#ifndef BINDER
+ INDEBUG(Verify());
+#endif
+ }
+
+//private: TypeHandle m_Arg; // The type that is being modified
+
+
+ // placement new operator
+ void* operator new(size_t size, void* spot) { return (spot); }
+
+#endif
+
+ TypeHandle GetArrayElementTypeHandle() {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ return GetTypeParam();
+ }
+
+#ifndef CLR_STANDALONE_BINDER
+ unsigned GetRank() {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ if (GetInternalCorElementType() == ELEMENT_TYPE_SZARRAY)
+ return 1;
+ else
+ return dac_cast<PTR_ArrayClass>(GetMethodTable()->GetClass())->GetRank();
+ }
+#else
+ unsigned GetRank();
+#endif
+
+ MethodTable* GetParent()
+ {
+#ifndef BINDER
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(!m_TemplateMT.IsNull());
+ _ASSERTE(m_TemplateMT.GetValue()->IsArray());
+ _ASSERTE(m_TemplateMT.GetValue()->ParentEquals(g_pArrayClass));
+
+ return g_pArrayClass;
+#else
+ _ASSERTE(0);
+#endif
+ }
+
+#ifdef FEATURE_COMINTEROP
+ ComCallWrapperTemplate *GetComCallWrapperTemplate()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pCCWTemplate;
+ }
+
+ BOOL SetComCallWrapperTemplate(ComCallWrapperTemplate *pTemplate)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ TypeHandle th(this);
+ g_IBCLogger.LogTypeMethodTableWriteableAccess(&th);
+
+ return (InterlockedCompareExchangeT(EnsureWritablePages(&m_pCCWTemplate), pTemplate, NULL) == NULL);
+ }
+#endif // FEATURE_COMINTEROP
+
+ INDEBUG(BOOL Verify();)
+
+#ifdef FEATURE_PREJIT
+ void Fixup(DataImage *image);
+#endif
+
+ MethodTable * GetTemplateMethodTable() {
+ WRAPPER_NO_CONTRACT;
+ MethodTable * pTemplateMT = m_TemplateMT.GetValue();
+ _ASSERTE(pTemplateMT->IsArray());
+ return pTemplateMT;
+ }
+
+ TADDR GetTemplateMethodTableMaybeTagged() {
+ WRAPPER_NO_CONTRACT;
+ return m_TemplateMT.GetValueMaybeTagged();
+ }
+
+#ifdef FEATURE_COMINTEROP
+ ComCallWrapperTemplate *m_pCCWTemplate;
+#endif // FEATURE_COMINTEROP
+};
+
+/*************************************************************************/
+// These are for verification of generic code and reflection over generic code.
+// Each TypeVarTypeDesc represents a class or method type variable, as specified by a GenericParam entry.
+// The type variables are tied back to the class or method that *defines* them.
+// This is done through typedef or methoddef tokens.
+
+class TypeVarTypeDesc : public TypeDesc
+{
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+#ifdef BINDER
+ friend class MdilModule;
+#endif
+public:
+
+#ifndef DACCESS_COMPILE
+
+ TypeVarTypeDesc(PTR_Module pModule, mdToken typeOrMethodDef, unsigned int index, mdGenericParam token) :
+ TypeDesc(TypeFromToken(typeOrMethodDef) == mdtTypeDef ? ELEMENT_TYPE_VAR : ELEMENT_TYPE_MVAR)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(TypeFromToken(typeOrMethodDef) == mdtTypeDef || TypeFromToken(typeOrMethodDef) == mdtMethodDef);
+ PRECONDITION(index >= 0);
+ PRECONDITION(TypeFromToken(token) == mdtGenericParam);
+ }
+ CONTRACTL_END;
+
+ m_pModule = pModule;
+ m_typeOrMethodDef = typeOrMethodDef;
+ m_token = token;
+ m_index = index;
+ m_hExposedClassObject = 0;
+ m_constraints = NULL;
+ m_numConstraints = (DWORD)-1;
+ }
+#endif // #ifndef DACCESS_COMPILE
+
+ // placement new operator
+ void* operator new(size_t size, void* spot) { LIMITED_METHOD_CONTRACT; return (spot); }
+
+ PTR_Module GetModule()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_pModule;
+ }
+
+ unsigned int GetIndex()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_index;
+ }
+
+ mdGenericParam GetToken()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_token;
+ }
+
+ mdToken GetTypeOrMethodDef()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_typeOrMethodDef;
+ }
+
+ OBJECTREF GetManagedClassObject();
+#ifndef BINDER
+ OBJECTREF GetManagedClassObjectIfExists()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF objRet = NULL;
+ GET_LOADERHANDLE_VALUE_FAST(GetLoaderAllocator(), m_hExposedClassObject, &objRet);
+ return objRet;
+ }
+ OBJECTREF GetManagedClassObjectFast()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ OBJECTREF objRet = NULL;
+ LoaderAllocator::GetHandleValueFast(m_hExposedClassObject, &objRet);
+ return objRet;
+ }
+#endif
+
+ // Load the owning type. Note that the result is not guaranteed to be full loaded
+ MethodDesc * LoadOwnerMethod();
+ TypeHandle LoadOwnerType();
+
+ BOOL ConstraintsLoaded() { LIMITED_METHOD_CONTRACT; return m_numConstraints != (DWORD)-1; }
+
+ // Return NULL if no constraints are specified
+ // Return an array of type handles if constraints are specified,
+ // with the number of constraints returned in pNumConstraints
+ TypeHandle* GetCachedConstraints(DWORD *pNumConstraints);
+ TypeHandle* GetConstraints(DWORD *pNumConstraints, ClassLoadLevel level = CLASS_LOADED);
+
+ // Load the constraints if not already loaded
+ void LoadConstraints(ClassLoadLevel level = CLASS_LOADED);
+
+ // Check the constraints on this type parameter hold in the supplied context for the supplied type
+ BOOL SatisfiesConstraints(SigTypeContext *pTypeContext, TypeHandle thArg,
+ const InstantiationContext *pInstContext = NULL);
+
+ // Check whether the constraints on this type force it to be a reference type (i.e. it is impossible
+ // to instantiate it with a value type).
+ BOOL ConstrainedAsObjRef();
+
+ // Check whether the constraints on this type force it to be a value type (i.e. it is impossible to
+ // instantiate it with a reference type).
+ BOOL ConstrainedAsValueType();
+
+#ifdef FEATURE_PREJIT
+ void Save(DataImage *image);
+ void Fixup(DataImage *image);
+#endif // FEATURE_PREJIT
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+protected:
+ BOOL ConstrainedAsObjRefHelper();
+
+ // Module containing the generic definition, also the loader module for this type desc
+ PTR_Module m_pModule;
+
+ // Declaring type or method
+ mdToken m_typeOrMethodDef;
+
+ // Constraints, determined on first call to GetConstraints
+ Volatile<DWORD> m_numConstraints; // -1 until number has been determined
+ PTR_TypeHandle m_constraints;
+
+ // slot index back to the internal reflection Type object
+ LOADERHANDLE m_hExposedClassObject;
+
+ // token for GenericParam entry
+ mdGenericParam m_token;
+
+ // index within declaring type or method, numbered from zero
+ unsigned int m_index;
+};
+
+/*************************************************************************/
+/* represents a function type. */
+
+typedef SPTR(class FnPtrTypeDesc) PTR_FnPtrTypeDesc;
+
+class FnPtrTypeDesc : public TypeDesc
+{
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+
+public:
+#ifndef DACCESS_COMPILE
+ FnPtrTypeDesc(BYTE callConv, DWORD numArgs, TypeHandle * retAndArgTypes)
+ : TypeDesc(ELEMENT_TYPE_FNPTR), m_NumArgs(numArgs), m_CallConv(callConv)
+ {
+ LIMITED_METHOD_CONTRACT;
+ for (DWORD i = 0; i <= numArgs; i++)
+ {
+ m_RetAndArgTypes[i] = retAndArgTypes[i];
+ }
+ }
+#endif //!DACCESS_COMPILE
+
+ DWORD GetNumArgs()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_NumArgs;
+ }
+
+ BYTE GetCallConv()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ _ASSERTE(FitsIn<BYTE>(m_CallConv));
+ return static_cast<BYTE>(m_CallConv);
+ }
+
+ // Return a pointer to the types of the signature, return type followed by argument types
+ // The type handles are guaranteed to be fixed up
+ TypeHandle * GetRetAndArgTypes();
+ // As above, but const version
+ const TypeHandle * GetRetAndArgTypes() const
+ {
+ WRAPPER_NO_CONTRACT;
+ return const_cast<FnPtrTypeDesc *>(this)->GetRetAndArgTypes();
+ }
+
+ // As above, but the type handles might be zap-encodings that need fixing up explicitly
+ PTR_TypeHandle GetRetAndArgTypesPointer()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return PTR_TypeHandle(m_RetAndArgTypes);
+ }
+
+#ifndef DACCESS_COMPILE
+
+ // Returns TRUE if all return and argument types are externally visible.
+ BOOL IsExternallyVisible() const;
+ // Returns TRUE if any of return or argument types is part of an assembly loaded for introspection.
+ BOOL IsIntrospectionOnly() const;
+ // Returns TRUE if any of return or argument types is part of an assembly loaded for introspection.
+ // Instantiations of generic types are also recursively checked.
+ BOOL ContainsIntrospectionOnlyTypes() const;
+
+#endif //DACCESS_COMPILE
+
+#ifdef FEATURE_PREJIT
+ void Save(DataImage *image);
+ void Fixup(DataImage *image);
+#endif //FEATURE_PREJIT
+
+#ifdef DACCESS_COMPILE
+ static ULONG32 DacSize(TADDR addr)
+ {
+ DWORD numArgs = *PTR_DWORD(addr + offsetof(FnPtrTypeDesc, m_NumArgs));
+ return (offsetof(FnPtrTypeDesc, m_RetAndArgTypes) +
+ (numArgs * sizeof(TypeHandle)));
+ }
+
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif //DACCESS_COMPILE
+
+protected:
+ // Number of arguments
+ DWORD m_NumArgs;
+
+ // Calling convention (actually just a single byte)
+ DWORD m_CallConv;
+
+ // Return type first, then argument types
+ TypeHandle m_RetAndArgTypes[1];
+}; // class FnPtrTypeDesc
+
+#endif // TYPEDESC_H
diff --git a/src/vm/typedesc.inl b/src/vm/typedesc.inl
new file mode 100644
index 0000000000..371922fcbd
--- /dev/null
+++ b/src/vm/typedesc.inl
@@ -0,0 +1,71 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: typedesc.inl
+//
+
+
+//
+
+//
+// ============================================================================
+
+
+#ifndef _TYPEDESC_INL_
+#define _TYPEDESC_INL_
+
+inline PTR_MethodTable TypeDesc::GetMethodTable() {
+
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (IsGenericVariable())
+ return NULL;
+
+ if (GetInternalCorElementType() == ELEMENT_TYPE_FNPTR)
+ return MscorlibBinder::GetElementType(ELEMENT_TYPE_U);
+
+ _ASSERTE(HasTypeParam());
+ ParamTypeDesc* asParam = dac_cast<PTR_ParamTypeDesc>(this);
+
+ if (GetInternalCorElementType() == ELEMENT_TYPE_VALUETYPE)
+ return dac_cast<PTR_MethodTable>(asParam->m_Arg.AsMethodTable());
+ else
+ return(asParam->m_TemplateMT.GetValue());
+}
+
+inline TypeHandle TypeDesc::GetTypeParam() {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (IsGenericVariable() || IsFnPtr())
+ return TypeHandle();
+
+ _ASSERTE(HasTypeParam());
+ ParamTypeDesc* asParam = dac_cast<PTR_ParamTypeDesc>(this);
+ return(asParam->m_Arg);
+}
+
+inline TypeHandle ParamTypeDesc::GetTypeParam() {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return(this->m_Arg);
+}
+
+inline Instantiation TypeDesc::GetClassOrArrayInstantiation() {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (GetInternalCorElementType() != ELEMENT_TYPE_FNPTR)
+ {
+ ParamTypeDesc* asParam = dac_cast<PTR_ParamTypeDesc>(this);
+ return Instantiation(&asParam->m_Arg, 1);
+ }
+ else
+ return Instantiation();
+}
+
+
+#endif // _TYPEDESC_INL_
+
+
+
diff --git a/src/vm/typeequivalencehash.cpp b/src/vm/typeequivalencehash.cpp
new file mode 100644
index 0000000000..b00fa98331
--- /dev/null
+++ b/src/vm/typeequivalencehash.cpp
@@ -0,0 +1,101 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//
+// Hash table associated with each module that records for all types defined in that module the mapping
+// between type name and token (or TypeHandle).
+//
+
+#include "common.h"
+#include "typeequivalencehash.hpp"
+#include "ngenhash.inl"
+
+#ifdef FEATURE_TYPEEQUIVALENCE
+TypeEquivalenceHashTable::EquivalenceMatch TypeEquivalenceHashTable::CheckEquivalence(TypeHandle thA, TypeHandle thB)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LookupContext lookupContext;
+ NgenHashValue hash = TypeEquivalenceEntry::HashTypeHandles(thA, thB);
+
+ PTR_TypeEquivalenceEntry search = BaseFindFirstEntryByHash(hash, &lookupContext);
+ while (search != NULL)
+ {
+ if (search->Match(thA, thB))
+ {
+ return search->GetEquivalence() ? Match : NoMatch;
+ }
+
+ search = BaseFindNextEntryByHash(&lookupContext);
+ }
+ return MatchUnknown;
+}
+
+#ifndef DACCESS_COMPILE
+/*static*/
+TypeEquivalenceHashTable *TypeEquivalenceHashTable::Create(AppDomain *pAppDomain, DWORD dwNumBuckets, CrstExplicitInit *pCrst)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ AllocMemTracker amt;
+ LoaderHeap *pHeap = pAppDomain->GetLowFrequencyHeap();
+ TypeEquivalenceHashTable *pThis = (TypeEquivalenceHashTable*)amt.Track(pHeap->AllocMem((S_SIZE_T)sizeof(TypeEquivalenceHashTable)));
+
+ // The base class get initialized through chaining of constructors. We allocated the hash instance via the
+ // loader heap instead of new so use an in-place new to call the constructors now.
+ new (pThis) TypeEquivalenceHashTable(pHeap, dwNumBuckets, pCrst);
+ amt.SuppressRelease();
+
+ return pThis;
+}
+
+void TypeEquivalenceHashTable::RecordEquivalence(TypeHandle thA, TypeHandle thB, TypeEquivalenceHashTable::EquivalenceMatch match)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(match != TypeEquivalenceHashTable::MatchUnknown);
+ }
+ CONTRACTL_END;
+
+ CrstHolder ch(m_pHashTableCrst);
+
+ // Was there a race in calculating equivalence and this thread lost?
+ // If so, return
+ EquivalenceMatch checkedMatch = CheckEquivalence(thA, thB);
+ if (checkedMatch != TypeEquivalenceHashTable::MatchUnknown)
+ {
+ _ASSERTE(checkedMatch == match);
+ return;
+ }
+
+ AllocMemTracker amt;
+ PTR_TypeEquivalenceEntry pNewEntry = BaseAllocateEntry(&amt);
+ amt.SuppressRelease();
+
+ pNewEntry->SetData(thA, thB, match == TypeEquivalenceHashTable::Match ? true : false);
+ NgenHashValue hash = TypeEquivalenceEntry::HashTypeHandles(thA, thB);
+
+ BaseInsertEntry(hash, pNewEntry);
+}
+#endif // !DACCESS_COMPILE
+#endif // FEATURE_TYPEEQUIVALENCE
+
diff --git a/src/vm/typeequivalencehash.hpp b/src/vm/typeequivalencehash.hpp
new file mode 100644
index 0000000000..73912ba114
--- /dev/null
+++ b/src/vm/typeequivalencehash.hpp
@@ -0,0 +1,117 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//
+// Hash table associated with each module that records for all types defined in that module the mapping
+// between type name and token (or TypeHandle).
+//
+
+#ifndef __TYPEEQUIVALENCE_HASH_INCLUDED
+#define __TYPEEQUIVALENCE_HASH_INCLUDED
+
+#include "ngenhash.h"
+
+#ifdef FEATURE_TYPEEQUIVALENCE
+
+// The type of each entry in the hash.
+typedef DPTR(struct TypeEquivalenceEntry) PTR_TypeEquivalenceEntry;
+typedef struct TypeEquivalenceEntry
+{
+ static NgenHashValue HashTypeHandles(TypeHandle thA, TypeHandle thB)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ UINT_PTR aPtr = thA.AsTAddr();
+ UINT_PTR bPtr = thB.AsTAddr();
+ DWORD hash = (DWORD)((aPtr + bPtr) >> 3);
+
+ return hash;
+ }
+
+ bool Match(TypeHandle thA, TypeHandle thB)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (((thA == m_thA) && (thB == m_thB)) ||
+ ((thB == m_thA) && (thA == m_thB)));
+ }
+
+ void SetData(TypeHandle thA, TypeHandle thB, bool fEquivalent)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_thA = thA;
+ m_thB = thB;
+ m_fEquivalent = fEquivalent;
+ }
+
+ bool GetEquivalence()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_fEquivalent;
+ }
+
+private:
+
+ TypeHandle m_thA;
+ TypeHandle m_thB;
+ bool m_fEquivalent;
+} TypeEquivalenceEntry_t;
+
+// The hash type itself. All common logic is provided by the NgenHashTable templated base class. See
+// NgenHash.h for details.
+typedef DPTR(class TypeEquivalenceHashTable) PTR_TypeEquivalenceHashTable;
+class TypeEquivalenceHashTable : public NgenHashTable<TypeEquivalenceHashTable, TypeEquivalenceEntry, 4>
+{
+ friend class NgenHashTable<TypeEquivalenceHashTable, TypeEquivalenceEntry, 4>;
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+
+public:
+ typedef enum EquivalenceMatch
+ {
+ MatchUnknown,
+ Match,
+ NoMatch
+ };
+
+ // The LookupContext type we export to track GetValue/FindNextNestedClass enumerations is simply a rename
+ // of the base classes' hash value enumerator.
+ typedef NgenHashTable<TypeEquivalenceHashTable, TypeEquivalenceEntry, 4>::LookupContext LookupContext;
+
+#ifndef DACCESS_COMPILE
+ static TypeEquivalenceHashTable *Create(AppDomain *pDomain, DWORD dwNumBuckets, CrstExplicitInit *pCrst);
+ void RecordEquivalence(TypeHandle thA, TypeHandle thB, EquivalenceMatch match);
+#endif
+ EquivalenceMatch CheckEquivalence(TypeHandle thA, TypeHandle thB);
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegionsForEntry(TypeEquivalenceEntry_t *pEntry, CLRDataEnumMemoryFlags flags) { return; }
+#endif
+
+#if defined(FEATURE_PREJIT) && !defined(DACCESS_COMPILE)
+private:
+
+ bool ShouldSave(DataImage *pImage, TypeEquivalenceEntry_t *pEntry) { return false; }
+ bool IsHotEntry(TypeEquivalenceEntry_t *pEntry, CorProfileData *pProfileData) { return false; }
+ bool SaveEntry(DataImage *pImage, CorProfileData *pProfileData, TypeEquivalenceEntry_t *pOldEntry, TypeEquivalenceEntry_t *pNewEntry, EntryMappingTable *pMap) { return true; }
+ void FixupEntry(DataImage *pImage, TypeEquivalenceEntry_t *pEntry, void *pFixupBase, DWORD cbFixupOffset) { return; }
+#endif // FEATURE_PREJIT && !DACCESS_COMPILE
+
+private:
+#ifndef DACCESS_COMPILE
+ TypeEquivalenceHashTable(LoaderHeap *pHeap, DWORD cInitialBuckets, CrstExplicitInit *pCrst) :
+ NgenHashTable<TypeEquivalenceHashTable, TypeEquivalenceEntry, 4>(NULL, pHeap, cInitialBuckets),
+ m_pHashTableCrst(pCrst)
+ {
+ }
+#endif
+ CrstExplicitInit* m_pHashTableCrst;
+};
+
+#endif // FEATURE_TYPEEQUIVALENCE
+
+#endif // !__CLASS_HASH_INCLUDED
diff --git a/src/vm/typehandle.cpp b/src/vm/typehandle.cpp
new file mode 100644
index 0000000000..e8bcbde4b6
--- /dev/null
+++ b/src/vm/typehandle.cpp
@@ -0,0 +1,2086 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: typehandle.cpp
+//
+
+
+//
+
+//
+// ============================================================================
+
+
+#include "common.h"
+#include "class.h"
+#include "typehandle.h"
+#include "eeconfig.h"
+#include "generics.h"
+#include "typedesc.h"
+#include "typekey.h"
+#include "typestring.h"
+#include "classloadlevel.h"
+#include "array.h"
+#ifdef FEATURE_PREJIT
+#include "zapsig.h"
+#endif
+
+// This method is not being called by all the constructors of TypeHandle
+// because of the following reason. SystemDomain::LoadBaseSystemClasses()
+// loads TYPE__OBJECT_ARRAY which causes the following issues:
+//
+// If mscorlib is JIT-compiled, Module::CreateArrayMethodTable calls
+// TypeString::AppendName() with a TypeHandle that wraps the MethodTable
+// being created.
+// If mscorlib is ngenned, Module::RestoreMethodTablePointer() needs
+// a TypeHandle to call ClassLoader::EnsureLoaded().
+//
+
+#if 0
+
+void TypeHandle::NormalizeUnsharedArrayMT()
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT; // @TODO: This is probably incorrect
+
+ if (IsNull() || IsTypeDesc())
+ return;
+
+ if (!AsMethodTable()->IsArray())
+ return;
+
+ // This is an array type with a unique unshared MethodTable.
+ // We know that there must exist an ArrayTypeDesc for it, and it
+ // must have been restored.
+ // Let's look it up and use it.
+
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+ TypeHandle elemType = AsMethodTable()->GetApproxArrayElementTypeHandle();
+ CorElementType kind = AsMethodTable()->GetInternalCorElementType();
+ unsigned rank = AsMethodTable()->GetRank();
+
+ // @todo This should be turned into a probe with a hard SO when we have one
+ CONTRACT_VIOLATION(SOToleranceViolation);
+ // == FailIfNotLoadedOrNotRestored
+ TypeHandle arrayType = ClassLoader::LoadArrayTypeThrowing( elemType,
+ kind,
+ rank,
+ ClassLoader::DontLoadTypes);
+ CONSISTENCY_CHECK(!arrayType.IsNull() && arrayType.IsArray());
+
+ //
+ // Update the current TypeHandle to use the ArrayTypeDesc
+ //
+ m_asPtr = arrayType.AsPtr();
+
+ INDEBUGIMPL(Verify());
+}
+
+#endif
+
+#ifdef _DEBUG_IMPL
+
+BOOL TypeHandle::Verify()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_CANNOT_TAKE_LOCK;
+ STATIC_CONTRACT_DEBUG_ONLY;
+ STATIC_CONTRACT_SUPPORTS_DAC;
+
+ if (IsNull())
+ return(TRUE);
+
+ // If you try to do IBC logging of a type being created, the type
+ // will look inconsistent. IBC logging knows to filter out such types.
+ if (g_IBCLogger.InstrEnabled())
+ return TRUE;
+
+ if (!IsRestored_NoLogging())
+ return TRUE;
+
+ if (!IsTypeDesc())
+ {
+ _ASSERTE(AsMethodTable()->SanityCheck()); // Sane method table
+
+ // @TODO: See TypeHandle::IsArrayType() for an explanation
+ // of why this assert is commented out.
+ //
+ // _ASSERTE(!AsMethodTable()->IsArray());
+ }
+ else
+ {
+ if (IsArray())
+ AsArray()->Verify();
+ }
+ return(TRUE);
+}
+
+#endif // _DEBUG_IMPL
+
+unsigned TypeHandle::GetSize() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ CorElementType type = GetInternalCorElementType();
+
+ if (type == ELEMENT_TYPE_VALUETYPE)
+ {
+ if (IsTypeDesc())
+ return(AsNativeValueType()->GetNativeSize());
+ else
+ return(AsMethodTable()->GetNumInstanceFieldBytes());
+ }
+
+ return(GetSizeForCorElementType(type));
+}
+
+PTR_Module TypeHandle::GetModule() const {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (IsTypeDesc())
+ return AsTypeDesc()->GetModule();
+ return(AsMethodTable()->GetModule());
+}
+
+Assembly* TypeHandle::GetAssembly() const {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (IsTypeDesc())
+ return AsTypeDesc()->GetAssembly();
+ return(AsMethodTable()->GetAssembly());
+}
+
+BOOL TypeHandle::IsArray() const {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return(IsTypeDesc() && AsTypeDesc()->IsArray());
+}
+
+BOOL TypeHandle::IsArrayType() const {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (IsTypeDesc())
+ {
+ return AsTypeDesc()->IsArray();
+ }
+ else
+ {
+ return AsMethodTable()->IsArray();
+ }
+}
+
+BOOL TypeHandle::IsGenericVariable() const {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return(IsTypeDesc() && CorTypeInfo::IsGenericVariable_NoThrow(AsTypeDesc()->GetInternalCorElementType()));
+}
+
+BOOL TypeHandle::HasTypeParam() const {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (!IsTypeDesc()) return FALSE;
+
+ CorElementType etype = AsTypeDesc()->GetInternalCorElementType();
+ return(CorTypeInfo::IsModifier_NoThrow(etype) || etype == ELEMENT_TYPE_VALUETYPE);
+}
+
+Module *TypeHandle::GetDefiningModuleForOpenType() const
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ Module* returnValue = NULL;
+
+ INTERIOR_STACK_PROBE_NOTHROW_CHECK_THREAD(goto Exit;);
+
+ if (IsGenericVariable())
+ {
+ PTR_TypeVarTypeDesc pTyVar = dac_cast<PTR_TypeVarTypeDesc>(AsTypeDesc());
+ returnValue = pTyVar->GetModule();
+ goto Exit;
+ }
+
+ if (HasTypeParam())
+ {
+ returnValue = GetTypeParam().GetDefiningModuleForOpenType();
+ }
+ else if (HasInstantiation())
+ {
+ returnValue = GetMethodTable()->GetDefiningModuleForOpenType();
+ }
+Exit:
+ ;
+ END_INTERIOR_STACK_PROBE;
+
+ return returnValue;
+}
+
+BOOL TypeHandle::ContainsGenericVariables(BOOL methodOnly /*=FALSE*/) const
+{
+ STATIC_CONTRACT_SO_TOLERANT;
+ STATIC_CONTRACT_NOTHROW;
+ SUPPORTS_DAC;
+
+ if (IsTypeDesc())
+ {
+ if (IsGenericVariable())
+ {
+ if (!methodOnly)
+ return TRUE;
+
+ PTR_TypeVarTypeDesc pTyVar = dac_cast<PTR_TypeVarTypeDesc>(AsTypeDesc());
+ return TypeFromToken(pTyVar->GetTypeOrMethodDef()) == mdtMethodDef;
+ }
+
+ if (HasTypeParam())
+ {
+ return GetTypeParam().ContainsGenericVariables(methodOnly);
+ }
+ }
+ else if (HasInstantiation())
+ {
+ if (GetMethodTable()->ContainsGenericVariables(methodOnly))
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+//@GENERICS:
+// Return the number of type parameters in the instantiation of an instantiated type
+// or the number of type parameters to a generic type
+// Return 0 otherwise.
+DWORD TypeHandle::GetNumGenericArgs() const {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (IsTypeDesc())
+ return 0;
+ else
+ return GetMethodTable()->GetNumGenericArgs();
+}
+
+BOOL TypeHandle::IsGenericTypeDefinition() const {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (!IsTypeDesc())
+ return AsMethodTable()->IsGenericTypeDefinition();
+ else
+ return FALSE;
+}
+
+PTR_MethodTable TypeHandle::GetCanonicalMethodTable() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (IsTypeDesc())
+ {
+ PTR_MethodTable pMT = AsTypeDesc()->GetMethodTable();
+ if (pMT != NULL)
+ pMT = pMT->GetCanonicalMethodTable();
+ return pMT;
+ }
+ else
+ {
+ return AsMethodTable()->GetCanonicalMethodTable();
+ }
+}
+
+// Obtain instantiation from an instantiated type or a pointer to the
+// element type of an array or pointer type
+Instantiation TypeHandle::GetClassOrArrayInstantiation() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (IsTypeDesc())
+ {
+ return AsTypeDesc()->GetClassOrArrayInstantiation();
+ }
+ else if (IsArrayType())
+ {
+ return AsMethodTable()->GetArrayInstantiation();
+ }
+ else
+ {
+ return GetInstantiation();
+ }
+}
+
+Instantiation TypeHandle::GetInstantiationOfParentClass(MethodTable *pWhichParent) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return GetMethodTable()->GetInstantiationOfParentClass(pWhichParent);
+}
+
+// Obtain element type from an array or pointer type
+TypeHandle TypeHandle::GetTypeParam() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (IsTypeDesc())
+ return AsTypeDesc()->GetTypeParam();
+ else
+ return TypeHandle();
+}
+
+#ifndef DACCESS_COMPILE
+TypeHandle TypeHandle::Instantiate(Instantiation inst) const
+{
+ STATIC_CONTRACT_WRAPPER;
+ return ClassLoader::LoadGenericInstantiationThrowing(GetModule(), GetCl(), inst);
+}
+
+TypeHandle TypeHandle::MakePointer() const
+{
+ STATIC_CONTRACT_WRAPPER;
+ return ClassLoader::LoadPointerOrByrefTypeThrowing(ELEMENT_TYPE_PTR, *this);
+}
+
+TypeHandle TypeHandle::MakeByRef() const
+{
+ STATIC_CONTRACT_WRAPPER;
+ return ClassLoader::LoadPointerOrByrefTypeThrowing(ELEMENT_TYPE_BYREF, *this);
+}
+
+TypeHandle TypeHandle::MakeSZArray() const
+{
+ STATIC_CONTRACT_WRAPPER;
+ return ClassLoader::LoadArrayTypeThrowing(*this);
+}
+
+TypeHandle TypeHandle::MakeArray(int rank) const
+{
+ STATIC_CONTRACT_WRAPPER;
+ return ClassLoader::LoadArrayTypeThrowing(*this, ELEMENT_TYPE_ARRAY, rank);
+}
+
+// The returned TypeHandle is a ParamTypeDesc that acts like a facade for the original valuetype. It makes the
+// valuetype look like its unmanaged view, i.e. GetSize() returns GetNativeSize(), IsBlittable() returns TRUE,
+// and JIT interface special-cases it when reporting GC pointers to the JIT.
+TypeHandle TypeHandle::MakeNativeValueType() const
+{
+ STATIC_CONTRACT_WRAPPER;
+ return ClassLoader::LoadNativeValueTypeThrowing(*this);
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+PTR_Module TypeHandle::GetLoaderModule() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (IsTypeDesc())
+ return AsTypeDesc()->GetLoaderModule();
+ else
+ return AsMethodTable()->GetLoaderModule();
+}
+
+PTR_Module TypeHandle::GetZapModule() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (IsTypeDesc())
+ return AsTypeDesc()->GetZapModule();
+ else
+ return AsMethodTable()->GetZapModule();
+}
+
+PTR_BaseDomain TypeHandle::GetDomain() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (IsTypeDesc())
+ return AsTypeDesc()->GetDomain();
+ else
+ return AsMethodTable()->GetDomain();
+
+}
+
+PTR_LoaderAllocator TypeHandle::GetLoaderAllocator() const
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_SO_INTOLERANT;
+ STATIC_CONTRACT_SUPPORTS_DAC;
+
+ if (IsTypeDesc())
+ {
+ return AsTypeDesc()->GetLoaderAllocator();
+ }
+ else
+ {
+ return AsMethodTable()->GetLoaderAllocator();
+ }
+}
+
+BOOL TypeHandle::IsSharedByGenericInstantiations() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (IsTypeDesc())
+ {
+ // Arrays are the only typedesc in valid generic instantiations (see code:Generics::CheckInstantiation)
+
+ if (HasTypeParam())
+ {
+ return GetTypeParam().IsCanonicalSubtype();
+ }
+ return FALSE;
+ }
+ else
+ return AsMethodTable()->IsSharedByGenericInstantiations();
+}
+
+BOOL TypeHandle::IsCanonicalSubtype() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (*this == TypeHandle(g_pCanonMethodTableClass)) || IsSharedByGenericInstantiations();
+}
+
+/* static */ BOOL TypeHandle::IsCanonicalSubtypeInstantiation(Instantiation inst)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ if (inst[i].IsCanonicalSubtype())
+ return TRUE;
+ }
+ return FALSE;
+}
+
+// Obtain instantiation from an instantiated type.
+// Return NULL if it's not one.
+Instantiation TypeHandle::GetInstantiation() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (!IsTypeDesc()) return AsMethodTable()->GetInstantiation();
+ else return Instantiation();
+}
+
+
+BOOL TypeHandle::IsValueType() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (!IsTypeDesc()) return AsMethodTable()->IsValueType();
+ else return AsTypeDesc()->IsNativeValueType();
+}
+
+BOOL TypeHandle::IsInterface() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return !IsTypeDesc() && AsMethodTable()->IsInterface();
+}
+
+BOOL TypeHandle::IsAbstract() const
+{
+ WRAPPER_NO_CONTRACT;
+ PREFIX_ASSUME(GetMethodTable() != NULL);
+ return GetMethodTable()->IsAbstract();
+}
+
+DWORD TypeHandle::IsTransparentProxy() const
+{
+ WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_REMOTING
+ return !IsTypeDesc() && AsMethodTable()->IsTransparentProxy();
+#else
+ return FALSE;
+#endif
+}
+
+#ifdef FEATURE_HFA
+bool TypeHandle::IsHFA() const
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!IsTypeDesc())
+ return AsMethodTable()->IsHFA();
+
+ if (AsTypeDesc()->IsNativeValueType())
+ return AsNativeValueType()->IsNativeHFA();
+
+ return false;
+}
+
+CorElementType TypeHandle::GetHFAType() const
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!IsTypeDesc())
+ return AsMethodTable()->GetHFAType();
+
+ if (AsTypeDesc()->IsNativeValueType())
+ return AsNativeValueType()->GetNativeHFAType();
+
+ return ELEMENT_TYPE_END;
+}
+#endif // FEATURE_HFA
+
+#ifdef FEATURE_64BIT_ALIGNMENT
+bool TypeHandle::RequiresAlign8() const
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (IsNativeValueType())
+ return AsNativeValueType()->NativeRequiresAlign8();
+
+ return GetMethodTable()->RequiresAlign8();
+}
+#endif // FEATURE_64BIT_ALIGNMENT
+
+#ifndef DACCESS_COMPILE
+
+BOOL TypeHandle::IsBlittable() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (!IsTypeDesc())
+ {
+ // This is a simple type (not an array, ptr or byref) so if
+ // simply check to see if the type is blittable.
+ return AsMethodTable()->IsBlittable();
+ }
+
+ if (AsTypeDesc()->IsArray())
+ {
+ // Single dimentional array's of blittable types are also blittable.
+ if (AsArray()->GetRank() == 1)
+ {
+ if (AsArray()->GetArrayElementTypeHandle().IsBlittable())
+ return TRUE;
+ }
+ }
+ else if (AsTypeDesc()->IsNativeValueType())
+ {
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+BOOL TypeHandle::HasLayout() const
+{
+ WRAPPER_NO_CONTRACT;
+ MethodTable *pMT = GetMethodTable();
+ return pMT ? pMT->HasLayout() : FALSE;
+}
+
+#ifdef FEATURE_COMINTEROP
+
+TypeHandle TypeHandle::GetCoClassForInterface() const
+{
+ WRAPPER_NO_CONTRACT;
+ PREFIX_ASSUME(GetMethodTable() != NULL);
+ return GetMethodTable()->GetCoClassForInterface();
+}
+
+DWORD TypeHandle::IsComClassInterface() const
+{
+ WRAPPER_NO_CONTRACT;
+ PREFIX_ASSUME(GetMethodTable() != NULL);
+ return GetMethodTable()->IsComClassInterface();
+}
+
+BOOL TypeHandle::IsComObjectType() const
+{
+ WRAPPER_NO_CONTRACT;
+ PREFIX_ASSUME(GetMethodTable() != NULL);
+ return GetMethodTable()->IsComObjectType();
+}
+
+BOOL TypeHandle::IsComEventItfType() const
+{
+ WRAPPER_NO_CONTRACT;
+ PREFIX_ASSUME(GetMethodTable() != NULL);
+ return GetMethodTable()->IsComEventItfType();
+}
+
+CorIfaceAttr TypeHandle::GetComInterfaceType() const
+{
+ WRAPPER_NO_CONTRACT;
+ PREFIX_ASSUME(GetMethodTable() != NULL);
+ return GetMethodTable()->GetComInterfaceType();
+}
+
+TypeHandle TypeHandle::GetDefItfForComClassItf() const
+{
+ WRAPPER_NO_CONTRACT;
+ PREFIX_ASSUME(GetMethodTable() != NULL);
+ return GetMethodTable()->GetDefItfForComClassItf();
+}
+
+BOOL TypeHandle::IsProjectedFromWinRT() const
+{
+ LIMITED_METHOD_CONTRACT;
+ PREFIX_ASSUME(GetMethodTable() != NULL);
+ return GetMethodTable()->IsProjectedFromWinRT();
+}
+
+BOOL TypeHandle::IsExportedToWinRT() const
+{
+ LIMITED_METHOD_CONTRACT;
+ PREFIX_ASSUME(GetMethodTable() != NULL);
+ return GetMethodTable()->IsExportedToWinRT();
+}
+
+ComCallWrapperTemplate *TypeHandle::GetComCallWrapperTemplate() const
+{
+ LIMITED_METHOD_CONTRACT;
+ PRECONDITION(IsArray() || !IsTypeDesc());
+
+ if (IsTypeDesc())
+ {
+ return AsArray()->GetComCallWrapperTemplate();
+ }
+ return AsMethodTable()->GetComCallWrapperTemplate();
+}
+
+BOOL TypeHandle::SetComCallWrapperTemplate(ComCallWrapperTemplate *pTemplate)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PRECONDITION(IsArray() || !IsTypeDesc());
+
+ if (IsTypeDesc())
+ {
+ return AsArray()->SetComCallWrapperTemplate(pTemplate);
+ }
+ return AsMethodTable()->SetComCallWrapperTemplate(pTemplate);
+}
+
+#endif // FEATURE_COMINTEROP
+
+//--------------------------------------------------------------------------------------
+// CanCastTo is necessary but not sufficient, as it assumes that any valuetype
+// involved is in its boxed form.
+
+BOOL TypeHandle::IsBoxedAndCanCastTo(TypeHandle type, TypeHandlePairList *pPairList) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+
+ LOADS_TYPE(CLASS_DEPENDENCIES_LOADED);
+
+ // The caller should check for an exact match.
+ // That will cover the cast of a (unboxed) valuetype to itself.
+ PRECONDITION(*this != type);
+ }
+ CONTRACTL_END
+
+
+ CorElementType fromParamCorType = GetVerifierCorElementType();
+
+ if (CorTypeInfo::IsObjRef(fromParamCorType))
+ {
+ // fromParamCorType is a reference type. We can just use CanCastTo
+ return CanCastTo(type, pPairList);
+ }
+ else if (CorTypeInfo::IsGenericVariable(fromParamCorType))
+ {
+ TypeVarTypeDesc* varFromParam = AsGenericVariable();
+
+ if (!varFromParam->ConstraintsLoaded())
+ varFromParam->LoadConstraints(CLASS_DEPENDENCIES_LOADED);
+
+ // A generic type parameter cannot be compatible with another type
+ // as it could be substitued with a valuetype. However, if it is
+ // constrained to a reference type, then we can use CanCastTo.
+ if (varFromParam->ConstrainedAsObjRef())
+ return CanCastTo(type, pPairList);
+ }
+
+ return FALSE;
+}
+
+//--------------------------------------------------------------------------------------
+// CanCastTo is necessary but not sufficient, as it assumes that any valuetype
+// involved is in its boxed form. See IsBoxedAndCanCastTo() if the valuetype
+// is not guaranteed to be in its boxed form.
+
+BOOL TypeHandle::CanCastTo(TypeHandle type, TypeHandlePairList *pVisited) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+
+ LOADS_TYPE(CLASS_DEPENDENCIES_LOADED);
+ }
+ CONTRACTL_END
+
+ if (*this == type)
+ return(true);
+
+ if (IsTypeDesc())
+ return AsTypeDesc()->CanCastTo(type, pVisited);
+
+ if (type.IsTypeDesc())
+ return(false);
+
+ if (AsMethodTable()->IsTransparentProxy())
+ return (false);
+
+ return AsMethodTable()->CanCastToClassOrInterface(type.AsMethodTable(), pVisited);
+}
+
+#include <optsmallperfcritical.h>
+TypeHandle::CastResult TypeHandle::CanCastToNoGC(TypeHandle type) const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (*this == type)
+ return(CanCast);
+
+ if (IsTypeDesc())
+ return AsTypeDesc()->CanCastToNoGC(type);
+
+ if (type.IsTypeDesc())
+ return(CannotCast);
+
+ if (AsMethodTable()->IsTransparentProxy())
+ return (CannotCast);
+
+ return AsMethodTable()->CanCastToClassOrInterfaceNoGC(type.AsMethodTable());
+}
+#include <optdefault.h>
+
+#endif // #ifndef DACCESS_COMPILE
+
+void TypeHandle::GetName(SString &result) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ INTERIOR_STACK_PROBE_NOTHROW_CHECK_THREAD(goto Exit;);
+ {
+
+ if (IsTypeDesc())
+ {
+ AsTypeDesc()->GetName(result);
+ goto Exit;
+ }
+
+ AsMethodTable()->_GetFullyQualifiedNameForClass(result);
+
+ // Tack the instantiation on the end
+ Instantiation inst = GetInstantiation();
+ if (!inst.IsEmpty())
+ TypeString::AppendInst(result, inst);
+ }
+Exit:
+ ;
+ END_INTERIOR_STACK_PROBE;
+}
+
+TypeHandle TypeHandle::GetParent() const
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ if (IsTypeDesc())
+ return(AsTypeDesc()->GetParent());
+ else
+ return TypeHandle(AsMethodTable()->GetParentMethodTable());
+}
+#ifndef DACCESS_COMPILE
+
+/* static */
+TypeHandle TypeHandle::MergeClassWithInterface(TypeHandle tClass, TypeHandle tInterface)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ MethodTable *pMTClass = tClass.AsMethodTable();
+
+ // Check if the class implements the interface
+ if (pMTClass->ImplementsEquivalentInterface(tInterface.AsMethodTable()))
+ {
+ // The class implements the interface or its equivalent, so our merged state should be the interface
+ return tInterface;
+ }
+
+ // Check if the class and the interface implement a common interface
+ MethodTable *pMTInterface = tInterface.AsMethodTable();
+ MethodTable::InterfaceMapIterator intIt = pMTInterface->IterateInterfaceMap();
+ while (intIt.Next())
+ {
+ MethodTable *pMT = intIt.GetInterface();
+ if (pMTClass->ImplementsEquivalentInterface(pMT))
+ {
+ // Found a common interface. If there are multiple common interfaces, then
+ // the problem is ambiguous so we'll just take the first one--it's the best
+ // we can do. If an ensuing code path relies on another common interface,
+ // the verifier will think the code is unverifiable, but it would require a
+ // major redesign of the verifier to fix that.
+ return TypeHandle(pMT);
+ }
+ }
+
+ // No compatible merge found - using Object
+ return TypeHandle(g_pObjectClass);
+}
+
+/* static */
+TypeHandle TypeHandle::MergeTypeHandlesToCommonParent(TypeHandle ta, TypeHandle tb)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ _ASSERTE(!ta.IsNull() && !tb.IsNull());
+
+ if (ta == tb)
+ return ta;
+
+ // Handle the array case
+ if (ta.IsArray())
+ {
+ if (tb.IsArray())
+ return MergeArrayTypeHandlesToCommonParent(ta, tb);
+ else if (tb.IsInterface())
+ {
+ //Check to see if we can merge the array to a common interface (such as Derived[] and IList<Base>)
+ if (ArraySupportsBizarreInterface(ta.AsArray(), tb.AsMethodTable()))
+ return tb;
+ }
+ ta = TypeHandle(g_pArrayClass); // keep merging from here.
+ }
+ else if (tb.IsArray())
+ {
+ if (ta.IsInterface() && ArraySupportsBizarreInterface(tb.AsArray(), ta.AsMethodTable()))
+ return ta;
+
+ tb = TypeHandle(g_pArrayClass);
+ }
+
+
+ // If either is a (by assumption boxed) type variable
+ // return the supertype, if they are related, or object if they are incomparable.
+ if (ta.IsGenericVariable() || tb.IsGenericVariable())
+ {
+ if (ta.CanCastTo(tb))
+ return tb;
+ if (tb.CanCastTo(ta))
+ return ta;
+ return TypeHandle(g_pObjectClass);
+ }
+
+
+ _ASSERTE(!ta.IsTypeDesc() && !tb.IsTypeDesc());
+
+
+ MethodTable *pMTa = ta.AsMethodTable();
+ MethodTable *pMTb = tb.AsMethodTable();
+
+ if (pMTb->IsInterface())
+ {
+
+ if (pMTa->IsInterface())
+ {
+ //
+ // Both classes are interfaces. Check that if one
+ // interface extends the other.
+ //
+ // Does tb extend ta ?
+ //
+
+ if (pMTb->ImplementsEquivalentInterface(pMTa))
+ {
+ // tb extends ta, so our merged state should be ta
+ return ta;
+ }
+
+ //
+ // Does tb extend ta ?
+ //
+ if (pMTa->ImplementsEquivalentInterface(pMTb))
+ {
+ // ta extends tb, so our merged state should be tb
+ return tb;
+ }
+
+ // No compatible merge found - using Object
+ return TypeHandle(g_pObjectClass);
+ }
+ else
+ return MergeClassWithInterface(ta, tb);
+ }
+ else if (pMTa->IsInterface())
+ return MergeClassWithInterface(tb, ta);
+
+ DWORD aDepth = 0;
+ DWORD bDepth = 0;
+ TypeHandle tSearch;
+
+ // find the depth in the class hierarchy for each class
+ for (tSearch = ta; (!tSearch.IsNull()); tSearch = tSearch.GetParent())
+ aDepth++;
+
+ for (tSearch = tb; (!tSearch.IsNull()); tSearch = tSearch.GetParent())
+ bDepth++;
+
+ // for whichever class is lower down in the hierarchy, walk up the superclass chain
+ // to the same level as the other class
+ while (aDepth > bDepth)
+ {
+ ta = ta.GetParent();
+ aDepth--;
+ }
+
+ while (bDepth > aDepth)
+ {
+ tb = tb.GetParent();
+ bDepth--;
+ }
+
+ while (!ta.IsEquivalentTo(tb))
+ {
+ ta = ta.GetParent();
+ tb = tb.GetParent();
+ }
+
+ // If no compatible merge is found, we end up using Object
+
+ _ASSERTE(!ta.IsNull());
+
+ return ta;
+}
+
+/* static */
+TypeHandle TypeHandle::MergeArrayTypeHandlesToCommonParent(TypeHandle ta, TypeHandle tb)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ CorElementType taKind = ta.GetInternalCorElementType();
+ CorElementType tbKind = tb.GetInternalCorElementType();
+ _ASSERTE(CorTypeInfo::IsArray(taKind) && CorTypeInfo::IsArray(tbKind));
+
+ TypeHandle taElem;
+ TypeHandle tMergeElem;
+
+ // If they match we are good to go.
+ if (ta == tb)
+ return ta;
+
+ if (ta == TypeHandle(g_pArrayClass))
+ return ta;
+ else if (tb == TypeHandle(g_pArrayClass))
+ return tb;
+
+ // Get the rank and kind of the first array
+ DWORD rank = ta.AsArray()->GetRank();
+ CorElementType mergeKind = taKind;
+
+ // if no match on the rank the common ancestor is System.Array
+ if (rank != tb.AsArray()->GetRank())
+ return TypeHandle(g_pArrayClass);
+
+ if (tbKind != taKind)
+ {
+ if (CorTypeInfo::IsArray(tbKind) &&
+ CorTypeInfo::IsArray(taKind) && rank == 1)
+ mergeKind = ELEMENT_TYPE_ARRAY;
+ else
+ return TypeHandle(g_pArrayClass);
+ }
+
+ // If both are arrays of reference types, return an array of the common
+ // ancestor.
+ taElem = ta.AsArray()->GetArrayElementTypeHandle();
+ if (taElem.IsEquivalentTo(tb.AsArray()->GetArrayElementTypeHandle()))
+ {
+ // The element types match/are equivalent, so we are good to go.
+ tMergeElem = taElem;
+ }
+ else if (taElem.IsArray() && tb.AsArray()->GetArrayElementTypeHandle().IsArray())
+ {
+ // Arrays - Find the common ancestor of the element types.
+ tMergeElem = MergeArrayTypeHandlesToCommonParent(taElem, tb.AsArray()->GetArrayElementTypeHandle());
+ }
+ else if (CorTypeInfo::IsObjRef(taElem.GetSignatureCorElementType()) &&
+ CorTypeInfo::IsObjRef(tb.AsArray()->GetArrayElementTypeHandle().GetSignatureCorElementType()))
+ {
+ // Find the common ancestor of the element types.
+ tMergeElem = MergeTypeHandlesToCommonParent(taElem, tb.AsArray()->GetArrayElementTypeHandle());
+ }
+ else
+ {
+ // The element types have nothing in common.
+ return TypeHandle(g_pArrayClass);
+ }
+
+
+ {
+ // This should just result in resolving an already loaded type.
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+ // == FailIfNotLoadedOrNotRestored
+ TypeHandle result = ClassLoader::LoadArrayTypeThrowing(tMergeElem, mergeKind, rank, ClassLoader::DontLoadTypes);
+ _ASSERTE(!result.IsNull());
+
+ // <TODO> should be able to assert IsRestored here </TODO>
+ return result;
+ }
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+BOOL TypeHandle::IsEnum() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (!IsTypeDesc() && AsMethodTable()->IsEnum());
+}
+
+BOOL TypeHandle::IsFnPtrType() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (IsTypeDesc() &&
+ (GetSignatureCorElementType() == ELEMENT_TYPE_FNPTR));
+}
+
+// Is this type part of an assembly loaded for introspection?
+BOOL
+TypeHandle::IsIntrospectionOnly() const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+#ifndef DACCESS_COMPILE
+ if (IsFnPtrType())
+ {
+ return AsFnPtrType()->IsIntrospectionOnly();
+ }
+ else if (HasTypeParam())
+ {
+ return GetTypeParam().IsIntrospectionOnly();
+ }
+ else
+ {
+ return GetModule()->IsIntrospectionOnly();
+ }
+#else //DACCESS_COMPILE
+ return FALSE;
+#endif //DACCESS_COMPILE
+} // TypeHandle::IsIntrospectionOnly
+
+// Checks this type and its components for "IsIntrospectionOnly"
+BOOL
+TypeHandle::ContainsIntrospectionOnlyTypes() const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+#ifndef DACCESS_COMPILE
+ if (IsFnPtrType())
+ {
+ return AsFnPtrType()->ContainsIntrospectionOnlyTypes();
+ }
+ else if (HasTypeParam())
+ {
+ return GetTypeParam().ContainsIntrospectionOnlyTypes();
+ }
+ else if (IsTypeDesc())
+ {
+ return GetModule()->IsIntrospectionOnly();
+ }
+ else
+ {
+ return AsMethodTable()->ContainsIntrospectionOnlyTypes();
+ }
+#else //DACCESS_COMPILE
+ return FALSE;
+#endif //DACCESS_COMPILE
+} // TypeHandle::ContainsIntrospectionOnlyTypes
+
+// Is this type part of an assembly loaded for introspection?
+BOOL
+TypeKey::IsIntrospectionOnly()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+#ifndef DACCESS_COMPILE
+ switch (m_kind)
+ {
+ case ELEMENT_TYPE_CLASS:
+ return u.asClass.m_pModule->IsIntrospectionOnly();
+
+ case ELEMENT_TYPE_ARRAY:
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_BYREF:
+ return TypeHandle::FromTAddr(u.asParamType.m_paramType).IsIntrospectionOnly();
+
+ case ELEMENT_TYPE_FNPTR:
+ // Return TRUE if any return/arguments type was loaded for introspection only
+ for (DWORD i = 0; i <= u.asFnPtr.m_numArgs; i++)
+ {
+ if (u.asFnPtr.m_pRetAndArgTypes[i].IsIntrospectionOnly())
+ {
+ return TRUE;
+ }
+ }
+ // None of return/arguments types was loaded for introspection only
+ return FALSE;
+
+ default:
+ UNREACHABLE_MSG("Corrupted typekey");
+ }
+#else //DACCESS_COMPILE
+ return FALSE;
+#endif //DACCESS_COMPILE
+} // TypeKey::IsIntrospectionOnly
+
+BOOL TypeHandle::IsRestored_NoLogging() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (!IsTypeDesc())
+ {
+ return GetMethodTable()->IsRestored_NoLogging();
+ }
+ else
+ {
+ return AsTypeDesc()->IsRestored_NoLogging();
+ }
+}
+
+BOOL TypeHandle::IsRestored() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (!IsTypeDesc())
+ {
+ return GetMethodTable()->IsRestored();
+ }
+ else
+ {
+ return AsTypeDesc()->IsRestored();
+ }
+}
+
+BOOL TypeHandle::IsEncodedFixup() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return CORCOMPILE_IS_POINTER_TAGGED(m_asTAddr);
+}
+
+BOOL TypeHandle::HasUnrestoredTypeKey() const
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ if (IsTypeDesc())
+ return AsTypeDesc()->HasUnrestoredTypeKey();
+ else
+ return AsMethodTable()->HasUnrestoredTypeKey();
+}
+
+#ifdef FEATURE_PREJIT
+void TypeHandle::DoRestoreTypeKey()
+{
+ CONTRACT_VOID
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(!IsEncodedFixup());
+ }
+ CONTRACT_END
+
+#ifndef DACCESS_COMPILE
+ if (IsTypeDesc())
+ {
+ AsTypeDesc()->DoRestoreTypeKey();
+ }
+
+ if (!IsTypeDesc() || IsArray())
+ {
+ MethodTable* pMT = GetMethodTable();
+ PREFIX_ASSUME(pMT != NULL);
+ pMT->DoRestoreTypeKey();
+ }
+#endif
+
+#ifdef _DEBUG
+#ifndef DACCESS_COMPILE
+ if (LoggingOn(LF_CLASSLOADER, LL_INFO10000))
+ {
+ StackSString name;
+ TypeString::AppendTypeDebug(name, *this);
+ LOG((LF_CLASSLOADER, LL_INFO10000, "GENERICS:RestoreTypeKey: type %S at %p\n", name.GetUnicode(), AsPtr()));
+ }
+#endif
+#endif
+
+
+ RETURN;
+}
+#endif
+
+void TypeHandle::CheckRestore() const
+{
+ CONTRACTL
+ {
+ if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
+ if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
+ PRECONDITION(!IsEncodedFixup());
+ }
+ CONTRACTL_END
+
+ if (!IsFullyLoaded())
+ {
+ ClassLoader::EnsureLoaded(*this);
+ _ASSERTE(IsFullyLoaded());
+ }
+
+ g_IBCLogger.LogTypeMethodTableAccess(this);
+}
+
+#ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+BOOL TypeHandle::ComputeNeedsRestore(DataImage *image, TypeHandleList *pVisited) const
+{
+ STATIC_STANDARD_VM_CONTRACT;
+
+ _ASSERTE(GetAppDomain()->IsCompilationDomain());
+
+ if (!IsTypeDesc())
+ return GetMethodTable()->ComputeNeedsRestore(image, pVisited);
+ else
+ return AsTypeDesc()->ComputeNeedsRestore(image, pVisited);
+}
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+BOOL
+TypeHandle::IsExternallyVisible() const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ if (!IsTypeDesc())
+ {
+ return AsMethodTable()->IsExternallyVisible();
+ }
+
+ if (IsGenericVariable())
+ { // VAR, MVAR
+ return TRUE;
+ }
+
+ if (IsFnPtrType())
+ { // FNPTR
+ // Function pointer has to check its all argument types
+ return AsFnPtrType()->IsExternallyVisible();
+ }
+ // ARRAY, SZARRAY, PTR, BYREF
+ _ASSERTE(HasTypeParam());
+
+ TypeHandle paramType = AsTypeDesc()->GetTypeParam();
+ _ASSERTE(!paramType.IsNull());
+
+ return paramType.IsExternallyVisible();
+} // TypeHandle::IsExternallyVisible
+
+#ifndef CROSSGEN_COMPILE
+OBJECTREF TypeHandle::GetManagedClassObject() const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ // Force a GC here because GetManagedClassObject could trigger GC nondeterminsticaly
+ GCStress<cfg_any, PulseGcTriggerPolicy>::MaybeTrigger();
+#endif // _DEBUG
+
+ if (!IsTypeDesc())
+ {
+ _ASSERT(AsMethodTable()->IsTransparentProxy() == false);
+ return AsMethodTable()->GetManagedClassObject();
+ }
+ else
+ {
+ switch(GetInternalCorElementType())
+ {
+ case ELEMENT_TYPE_ARRAY:
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_BYREF:
+ case ELEMENT_TYPE_PTR:
+ return ((ParamTypeDesc*)AsTypeDesc())->GetManagedClassObject();
+
+ case ELEMENT_TYPE_VAR:
+ case ELEMENT_TYPE_MVAR:
+ return ((TypeVarTypeDesc*)AsTypeDesc())->GetManagedClassObject();
+
+ // for this release a function pointer is mapped into an IntPtr. This result in a loss of information. Fix next release
+ case ELEMENT_TYPE_FNPTR:
+ return MscorlibBinder::GetElementType(ELEMENT_TYPE_I)->GetManagedClassObject();
+
+ default:
+ _ASSERTE(!"Bad Element Type");
+ return NULL;
+ }
+ }
+}
+
+
+OBJECTREF TypeHandle::GetManagedClassObjectFast() const
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+
+ FORBID_FAULT;
+ }
+ CONTRACTL_END;
+
+ OBJECTREF o = NULL;
+
+ if (!IsTypeDesc()) {
+ o = AsMethodTable()->GetManagedClassObjectIfExists();
+ }
+ else
+ {
+ switch(GetInternalCorElementType())
+ {
+ case ELEMENT_TYPE_ARRAY:
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_BYREF:
+ case ELEMENT_TYPE_PTR:
+ o = ((ParamTypeDesc*)AsTypeDesc())->GetManagedClassObjectFast();
+ break;
+
+ case ELEMENT_TYPE_VAR:
+ case ELEMENT_TYPE_MVAR:
+ o = ((TypeVarTypeDesc*)AsTypeDesc())->GetManagedClassObjectFast();
+ break;
+
+ // for this release a function pointer is mapped into an IntPtr. This result in a loss of information. Fix next release
+ case ELEMENT_TYPE_FNPTR:
+ // because TheFnPtrClass() can throw we return NULL for now. That is not a major deal because it just means we will
+ // not take advantage of this optimization, but the case is rather rare.
+ //o = TheFnPtrClass()->GetManagedClassObjectFast();
+ break;
+
+ default:
+ _ASSERTE(!"Bad Element Type");
+ return NULL;
+ }
+ }
+ return o;
+}
+#endif // CROSSGEN_COMPILE
+
+#endif // #ifndef DACCESS_COMPILE
+
+#if defined(CHECK_APP_DOMAIN_LEAKS) || defined(_DEBUG)
+
+BOOL TypeHandle::IsAppDomainAgile() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (!IsTypeDesc())
+ {
+ MethodTable *pMT = AsMethodTable();
+ return pMT->GetClass()->IsAppDomainAgile();
+ }
+ else if (IsArray())
+ {
+ TypeHandle th = AsArray()->GetArrayElementTypeHandle();
+ return th.IsArrayOfElementsAppDomainAgile();
+ }
+ else
+ {
+ // <TODO>@todo: consider other types of type handles agile?</TODO>
+ return FALSE;
+ }
+}
+
+BOOL TypeHandle::IsCheckAppDomainAgile() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (!IsTypeDesc())
+ {
+ MethodTable *pMT = AsMethodTable();
+ return pMT->GetClass()->IsCheckAppDomainAgile();
+ }
+ else if (IsArray())
+ {
+ TypeHandle th = AsArray()->GetArrayElementTypeHandle();
+ return th.IsArrayOfElementsCheckAppDomainAgile();
+ }
+ else
+ {
+ // <TODO>@todo: consider other types of type handles agile?</TODO>
+ return FALSE;
+ }
+}
+
+BOOL TypeHandle::IsArrayOfElementsAppDomainAgile() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (!IsTypeDesc())
+ {
+ MethodTable *pMT = AsMethodTable();
+ return (pMT->GetClass()->IsSealed()) && pMT->GetClass()->IsAppDomainAgile();
+ }
+ else
+ if (IsArray())
+ {
+ return AsArray()->GetArrayElementTypeHandle().IsArrayOfElementsAppDomainAgile();
+ }
+ else
+ {
+ // I'm not sure how to prove a typedesc is sealed, so
+ // just bail and return FALSE here rather than recursing.
+
+ return FALSE;
+ }
+}
+
+BOOL TypeHandle::IsArrayOfElementsCheckAppDomainAgile() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (!IsTypeDesc())
+ {
+ MethodTable *pMT = AsMethodTable();
+ return (pMT->GetClass()->IsAppDomainAgile()
+ && (pMT->GetClass()->IsSealed()) == 0)
+ || pMT->GetClass()->IsCheckAppDomainAgile();
+ }
+ else
+ if (IsArray())
+ {
+ return AsArray()->GetArrayElementTypeHandle().IsArrayOfElementsCheckAppDomainAgile();
+ }
+ else
+ {
+ // I'm not sure how to prove a typedesc is sealed, so
+ // just bail and return FALSE here rather than recursing.
+
+ return FALSE;
+ }
+}
+
+#endif // defined(CHECK_APP_DOMAIN_LEAKS) || defined(_DEBUG)
+
+
+BOOL TypeHandle::IsByRef() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return(IsTypeDesc() && AsTypeDesc()->IsByRef());
+
+}
+
+BOOL TypeHandle::IsPointer() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return(IsTypeDesc() && AsTypeDesc()->IsPointer());
+
+}
+
+//
+// The internal type is the type that most of the runtime cares about. This type has had two normalizations
+// applied to it
+//
+// * Enumerated type have been normalized to the primitive type that underlies them (typically int)
+// * Value types that look like ints (which include RuntimeTypeHandles, etc), have been morphed to be
+// their underlying type (much like enumeration types. See
+// * see code:MethodTable#KindsOfElementTypes for more
+// * This value is set by code:EEClass::ComputeInternalCorElementTypeForValueType
+CorElementType TypeHandle::GetInternalCorElementType() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (IsTypeDesc())
+ return AsTypeDesc()->GetInternalCorElementType();
+ else
+ return AsMethodTable()->GetInternalCorElementType();
+}
+
+BOOL TypeHandle::IsDomainNeutral() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (IsTypeDesc())
+ return AsTypeDesc()->IsDomainNeutral();
+ else
+ return AsMethodTable()->IsDomainNeutral();
+}
+
+BOOL TypeHandle::HasInstantiation() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (IsTypeDesc()) return false;
+ if (IsNull()) return false;
+ return AsMethodTable()->HasInstantiation();
+}
+
+ClassLoadLevel TypeHandle::GetLoadLevel() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (IsTypeDesc())
+ {
+ return AsTypeDesc()->GetLoadLevel();
+ }
+ else
+ {
+ return AsMethodTable()->GetLoadLevel();
+ }
+}
+
+BOOL TypeHandle::IsFullyLoaded() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (IsTypeDesc())
+ {
+ return AsTypeDesc()->IsFullyLoaded();
+ }
+ else
+ {
+ return AsMethodTable()->IsFullyLoaded();
+ }
+}
+
+void TypeHandle::DoFullyLoad(Generics::RecursionGraph *pVisited, ClassLoadLevel level,
+ DFLPendingList *pPending, BOOL *pfBailed, const InstantiationContext *pInstContext)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(level == CLASS_LOADED || level == CLASS_DEPENDENCIES_LOADED);
+ _ASSERTE(pfBailed != NULL);
+ _ASSERTE(!(level == CLASS_LOADED && pPending == NULL));
+
+
+ if (IsTypeDesc())
+ {
+ return AsTypeDesc()->DoFullyLoad(pVisited, level, pPending, pfBailed, pInstContext);
+ }
+ else
+ {
+ return AsMethodTable()->DoFullyLoad(pVisited, level, pPending, pfBailed, pInstContext);
+ }
+}
+
+// As its name suggests, this returns the type as it is in the meta-data signature. No morphing to deal
+// with verification or with value types that are treated as primitives is done.
+// see code:MethodTable#KindsOfElementTypes for more
+CorElementType TypeHandle::GetSignatureCorElementType() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // This gets used by
+ // MethodTable::DoRestoreTypeKey() -->
+ // Module::RestoreMethodTablePointer() -->
+ // ZapSig::DecodeType() -->
+ // SigPointer::GetTypeHandleThrowing -->
+ // TypeHandle::GetSignatureCorElementType
+ // early on during the process of restoring, i.e. after the EEClass for the
+ // MT is restored but not the parent method table. Thus we cannot
+ // assume that the parent method table is even yet a valid pointer.
+ // However both MethodTable::GetClass and MethodTable::IsValueType work
+ // even if the parent method table pointer has not been restored.
+
+ if (IsTypeDesc())
+ {
+ return AsTypeDesc()->GetInternalCorElementType();
+ }
+ else
+ {
+ return AsMethodTable()->GetSignatureCorElementType();
+ }
+}
+
+// As its name suggests, this returns the type used by the IL verifier. The basic difference between this
+// type and the type in the meta-data is that enumerations have been normalized to their underlieing
+// primitive type. see code:MethodTable#KindsOfElementTypes for more
+CorElementType TypeHandle::GetVerifierCorElementType() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (IsTypeDesc())
+ {
+ return AsTypeDesc()->GetInternalCorElementType();
+ }
+ else
+ {
+ return AsMethodTable()->GetVerifierCorElementType();
+ }
+}
+
+
+#ifdef DACCESS_COMPILE
+
+void
+TypeHandle::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ if (!m_asTAddr)
+ {
+ return;
+ }
+
+ CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED
+ (
+ if (IsArray())
+ {
+ AsArray()->EnumMemoryRegions(flags);
+ }
+ else if (IsGenericVariable())
+ {
+ AsGenericVariable()->EnumMemoryRegions(flags);
+ }
+ else if (IsFnPtrType())
+ {
+ AsFnPtrType()->EnumMemoryRegions(flags);
+ }
+ else if (IsTypeDesc())
+ {
+ DacEnumMemoryRegion(dac_cast<TADDR>(AsTypeDesc()), sizeof(TypeDesc));
+ }
+ else
+ {
+ GetMethodTable()->EnumMemoryRegions(flags);
+ }
+ );
+}
+
+#endif // DACCESS_COMPILE
+
+
+
+//--------------------------------------------------------------------------------------
+// For generic instantiations, check that it satisfies constraints.
+//
+// Because this is really a part of DoFullyLoad() that is broken out for readability reasons,
+// it takes both the typehandle and its template typehandle as a parameter (DoFullyLoad
+// already has the latter typehandle so this way, we avoid a second call to the loader.)
+//
+// Return value:
+//
+// Returns TRUE if constraints are satisfied.
+//
+// Returns FALSE if constraints are violated and the type is a canonical instantiation. (We
+// have to let these load as these form the basis of every instantiation. The canonical
+// methodtable is not available to users.
+//
+// THROWS if constraints are violated
+//
+//
+//--------------------------------------------------------------------------------------
+BOOL SatisfiesClassConstraints(TypeHandle instanceTypeHnd, TypeHandle typicalTypeHnd,
+ const InstantiationContext *pInstContext)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(!instanceTypeHnd.IsCanonicalSubtype());
+ }
+ CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+
+ Instantiation formalInst = typicalTypeHnd.GetInstantiation();
+ Instantiation actualInst = instanceTypeHnd.GetInstantiation();
+ _ASSERTE(formalInst.GetNumArgs() == actualInst.GetNumArgs());
+
+ for (DWORD i = 0; i < actualInst.GetNumArgs(); i++)
+ {
+ TypeHandle thActualArg = actualInst[i];
+
+ SigTypeContext typeContext;
+ SigTypeContext::InitTypeContext(instanceTypeHnd, &typeContext);
+
+ // Log the TypeVarTypeDesc access
+ g_IBCLogger.LogTypeMethodTableWriteableAccess(&thActualArg);
+
+ BOOL bSatisfiesConstraints =
+ formalInst[i].AsGenericVariable()->SatisfiesConstraints(&typeContext, thActualArg, pInstContext);
+
+ if (!bSatisfiesConstraints)
+ {
+ SString argNum;
+ argNum.Printf("%d", i);
+
+ SString typicalTypeHndName;
+ TypeString::AppendType(typicalTypeHndName, typicalTypeHnd);
+
+ SString actualParamName;
+ TypeString::AppendType(actualParamName, actualInst[i]);
+
+ SString formalParamName;
+ TypeString::AppendType(formalParamName, formalInst[i]);
+
+ COMPlusThrow(kTypeLoadException,
+ IDS_EE_CLASS_CONSTRAINTS_VIOLATION,
+ argNum,
+ actualParamName,
+ typicalTypeHndName,
+ formalParamName
+ );
+ }
+ }
+
+ return TRUE;
+
+#else
+ return TRUE;
+#endif
+}
+
+
+
+
+#ifndef DACCESS_COMPILE
+BOOL TypeHandle::SatisfiesClassConstraints() const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ BOOL returnValue = FALSE;
+ Instantiation classInst;
+ TypeHandle thCanonical;
+ Instantiation typicalInst;
+ SigTypeContext typeContext;
+ TypeHandle thParent;
+
+ INTERIOR_STACK_PROBE_CHECK_THREAD;
+
+ //TODO: cache (positive?) result in methodtable using, say, enum_flag2_UNUSEDxxx
+
+ //TODO: reconsider this check
+ thParent = GetParent();
+
+ if (!thParent.IsNull() && !thParent.SatisfiesClassConstraints())
+ {
+ returnValue = FALSE;
+ goto Exit;
+ }
+
+ if (!HasInstantiation())
+ {
+ returnValue = TRUE;
+ goto Exit;
+ }
+
+ classInst = GetInstantiation();
+ thCanonical = ClassLoader::LoadTypeDefThrowing(
+ GetModule(),
+ GetCl(),
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef);
+ typicalInst = thCanonical.GetInstantiation();
+
+ SigTypeContext::InitTypeContext(*this, &typeContext);
+
+ for (DWORD i = 0; i < classInst.GetNumArgs(); i++)
+ {
+ TypeHandle thArg = classInst[i];
+ _ASSERTE(!thArg.IsNull());
+
+ TypeVarTypeDesc* tyvar = typicalInst[i].AsGenericVariable();
+ _ASSERTE(tyvar != NULL);
+ _ASSERTE(TypeFromToken(tyvar->GetTypeOrMethodDef()) == mdtTypeDef);
+
+ tyvar->LoadConstraints(); //TODO: is this necessary for anything but the typical class?
+
+ if (!tyvar->SatisfiesConstraints(&typeContext, thArg))
+ {
+ returnValue = FALSE;
+ goto Exit;
+ }
+
+ }
+ returnValue = TRUE;
+Exit:
+ ;
+ END_INTERIOR_STACK_PROBE;
+
+ return returnValue;
+}
+
+TypeKey TypeHandle::GetTypeKey() const
+{
+ LIMITED_METHOD_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ PRECONDITION(!IsGenericVariable());
+
+ if (IsTypeDesc())
+ {
+ TypeDesc *pTD = AsTypeDesc();
+ CorElementType etype = pTD->GetInternalCorElementType();
+ if (CorTypeInfo::IsArray_NoThrow(etype))
+ {
+ TypeKey tk(etype, pTD->GetTypeParam(), FALSE, pTD->GetMethodTable()->GetRank());
+ return tk;
+ }
+ else if (CorTypeInfo::IsModifier_NoThrow(etype) || etype == ELEMENT_TYPE_VALUETYPE)
+ {
+ TypeKey tk(etype, pTD->GetTypeParam());
+ return tk;
+ }
+ else
+ {
+ CONSISTENCY_CHECK(etype == ELEMENT_TYPE_FNPTR);
+ FnPtrTypeDesc* pFTD = (FnPtrTypeDesc*) pTD;
+ TypeKey tk(pFTD->GetCallConv(), pFTD->GetNumArgs(), pFTD->GetRetAndArgTypesPointer());
+ return tk;
+ }
+ }
+ else
+ {
+ MethodTable *pMT = AsMethodTable();
+ if (pMT->IsArray())
+ {
+ TypeKey tk(pMT->GetInternalCorElementType(), pMT->GetApproxArrayElementTypeHandle(), TRUE, pMT->GetRank());
+ return tk;
+ }
+ else if (pMT->IsTypicalTypeDefinition())
+ {
+ TypeKey tk(pMT->GetModule(), pMT->GetCl());
+ return tk;
+ }
+ else
+ {
+ TypeKey tk(pMT->GetModule(), pMT->GetCl(), pMT->GetInstantiation());
+ return tk;
+ }
+ }
+}
+
+
+#ifdef _DEBUG
+// Check that a type handle matches the key provided
+CHECK TypeHandle::CheckMatchesKey(TypeKey *pKey) const
+{
+ WRAPPER_NO_CONTRACT;
+ CONTRACT_VIOLATION(TakesLockViolation); // this is debug-only code
+ CONSISTENCY_CHECK(!IsGenericVariable());
+
+ // Check first to avoid creating debug name
+ if (!GetTypeKey().Equals(pKey))
+ {
+ StackSString typeKeyString;
+ CONTRACT_VIOLATION(GCViolation|ThrowsViolation);
+ TypeString::AppendTypeKeyDebug(typeKeyString, pKey);
+ if (!IsTypeDesc() && AsMethodTable()->IsArray())
+ {
+ MethodTable *pMT = AsMethodTable();
+ CHECK_MSGF(pMT->GetInternalCorElementType() == pKey->GetKind(),
+ ("CorElementType %d of Array MethodTable does not match key %S", pMT->GetArrayElementType(), typeKeyString.GetUnicode()));
+
+ CHECK_MSGF(pMT->GetApproxArrayElementTypeHandle() == pKey->GetElementType(),
+ ("Element type of Array MethodTable does not match key %S",typeKeyString.GetUnicode()));
+
+ CHECK_MSGF(pMT->GetRank() == pKey->GetRank(),
+ ("Rank %d of Array MethodTable does not match key %S", pMT->GetRank(), typeKeyString.GetUnicode()));
+ }
+ else
+ if (IsTypeDesc())
+ {
+ TypeDesc *pTD = AsTypeDesc();
+ CHECK_MSGF(pTD->GetInternalCorElementType() == pKey->GetKind(),
+ ("CorElementType %d of TypeDesc does not match key %S", pTD->GetInternalCorElementType(), typeKeyString.GetUnicode()));
+
+ if (CorTypeInfo::IsModifier(pKey->GetKind()))
+ {
+ CHECK_MSGF(pTD->GetTypeParam() == pKey->GetElementType(),
+ ("Element type of TypeDesc does not match key %S",typeKeyString.GetUnicode()));
+ }
+ if (CorTypeInfo::IsArray(pKey->GetKind()))
+ {
+ CHECK_MSGF(pTD->GetMethodTable()->GetRank() == pKey->GetRank(),
+ ("Rank %d of array TypeDesc does not match key %S", pTD->GetMethodTable()->GetRank(), typeKeyString.GetUnicode()));
+ }
+ }
+ else
+ {
+ MethodTable *pMT = AsMethodTable();
+ CHECK_MSGF(pMT->GetModule() == pKey->GetModule(), ("Module of MethodTable does not match key %S", typeKeyString.GetUnicode()));
+ CHECK_MSGF(pMT->GetCl() == pKey->GetTypeToken(),
+ ("TypeDef %x of Methodtable does not match TypeDef %x of key %S", pMT->GetCl(), pKey->GetTypeToken(),
+ typeKeyString.GetUnicode()));
+
+ if (pMT->IsTypicalTypeDefinition())
+ {
+ CHECK_MSGF(pKey->GetNumGenericArgs() == 0 && !pKey->HasInstantiation(),
+ ("Key %S for Typical MethodTable has non-zero number of generic arguments", typeKeyString.GetUnicode()));
+ }
+ else
+ {
+ CHECK_MSGF(pMT->GetNumGenericArgs() == pKey->GetNumGenericArgs(),
+ ("Number of generic params %d in MethodTable does not match key %S", pMT->GetNumGenericArgs(), typeKeyString.GetUnicode()));
+ if (pKey->HasInstantiation())
+ {
+ for (DWORD i = 0; i < pMT->GetNumGenericArgs(); i++)
+ {
+#ifdef FEATURE_PREJIT
+ CHECK_MSGF(ZapSig::CompareTypeHandleFieldToTypeHandle(pMT->GetInstantiation().GetRawArgs()[i].GetValuePtr(), pKey->GetInstantiation()[i]),
+ ("Generic argument %d in MethodTable does not match key %S", i, typeKeyString.GetUnicode()));
+#else
+ CHECK_MSGF(pMT->GetInstantiation()[i] == pKey->GetInstantiation()[i],
+ ("Generic argument %d in MethodTable does not match key %S", i, typeKeyString.GetUnicode()));
+#endif
+ }
+ }
+ }
+ }
+ }
+ CHECK_OK;
+}
+
+const char * const classLoadLevelName[] =
+{
+ "BEGIN",
+ "UNRESTOREDTYPEKEY",
+ "UNRESTORED",
+ "APPROXPARENTS",
+ "EXACTPARENTS",
+ "DEPENDENCIES_LOADED",
+ "LOADED",
+};
+
+// Check that this type is loaded up to the level indicated
+// Also check that it is non-null
+CHECK TypeHandle::CheckLoadLevel(ClassLoadLevel requiredLevel)
+{
+ CHECK(!IsNull());
+ // CHECK_MSGF(!IsNull(), ("Type is null, required load level is %s", classLoadLevelName[requiredLevel]));
+ static_assert_no_msg(NumItems(classLoadLevelName) == (1 + CLASS_LOAD_LEVEL_FINAL));
+
+ // Quick check to avoid creating debug string
+ ClassLoadLevel actualLevel = GetLoadLevel();
+ if (actualLevel < requiredLevel)
+ {
+ // SString debugTypeName;
+ // TypeString::AppendTypeDebug(debugTypeName, *this);
+ CHECK(actualLevel >= requiredLevel);
+ // CHECK_MSGF(actualLevel >= requiredLevel,
+ // ("Type has not been sufficiently loaded (actual level is %d, required level is %d)",
+ // /* debugTypeName.GetUnicode(), */ actualLevel, requiredLevel /* classLoadLevelName[actualLevel], classLoadLevelName[requiredLevel] */));
+ }
+ CONSISTENCY_CHECK((actualLevel > CLASS_LOAD_UNRESTORED) == IsRestored());
+ CONSISTENCY_CHECK((actualLevel == CLASS_LOAD_UNRESTOREDTYPEKEY) == HasUnrestoredTypeKey());
+ CHECK_OK;
+}
+
+// Check that this type is fully loaded (i.e. to level CLASS_LOADED)
+CHECK TypeHandle::CheckFullyLoaded()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ if (IsGenericVariable())
+ {
+ CHECK_OK;
+ }
+ CheckLoadLevel(CLASS_LOADED);
+ CHECK_OK;
+}
+
+#endif //DEBUG
+
+#endif //DACCESS_COMPILE
diff --git a/src/vm/typehandle.h b/src/vm/typehandle.h
new file mode 100644
index 0000000000..d95d58aa8d
--- /dev/null
+++ b/src/vm/typehandle.h
@@ -0,0 +1,838 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: typehandle.h
+//
+
+
+//
+
+//
+// ============================================================================
+
+
+#ifndef TYPEHANDLE_H
+#define TYPEHANDLE_H
+
+#include "check.h"
+#include "classloadlevel.h"
+#include "fixuppointer.h"
+
+class TypeDesc;
+class TypeHandle;
+class Instantiation;
+class ArrayTypeDesc;
+class FnPtrTypeDesc;
+class ParamTypeDesc;
+class TypeVarTypeDesc;
+class MethodTable;
+class EEClass;
+class Module;
+class Assembly;
+class BaseDomain;
+class MethodDesc;
+class TypeKey;
+class TypeHandleList;
+class InstantiationContext;
+class DataImage;
+namespace Generics { class RecursionGraph; }
+struct CORINFO_CLASS_STRUCT_;
+
+typedef DPTR(class TypeVarTypeDesc) PTR_TypeVarTypeDesc;
+typedef SPTR(class FnPtrTypeDesc) PTR_FnPtrTypeDesc;
+typedef DPTR(class ParamTypeDesc) PTR_ParamTypeDesc;
+typedef DPTR(class ArrayTypeDesc) PTR_ArrayTypeDesc;
+typedef DPTR(class TypeDesc) PTR_TypeDesc;
+typedef DPTR(class TypeHandle) PTR_TypeHandle;
+
+
+typedef CUnorderedArray<TypeHandle, 40> DFLPendingList;
+
+class TypeHandlePairList;
+
+#ifdef FEATURE_COMINTEROP
+class ComCallWrapperTemplate;
+#endif // FEATURE_COMINTEROP
+
+/*************************************************************************/
+// A TypeHandle is the FUNDAMENTAL concept of type identity in the CLR.
+// That is two types are equal if and only if their type handles
+// are equal. A TypeHandle, is a pointer sized struture that encodes
+// everything you need to know to figure out what kind of type you are
+// actually dealing with.
+
+// At the present time a TypeHandle can point at two possible things
+//
+// 1) A MethodTable (Intrinsics, Classes, Value Types and their instantiations)
+// 2) A TypeDesc (all other cases: arrays, byrefs, pointer types, function pointers, generic type variables)
+//
+// or with IL stubs, a third thing:
+//
+// 3) A MethodTable for a native value type.
+//
+// Array MTs are not valid TypeHandles: for example no allocated object will
+// ever return such a type handle from Object::GetTypeHandle(), and
+// these type handles should not be passed across the JIT Interface
+// as CORINFO_CLASS_HANDLEs. However some code in the EE does create
+// temporary TypeHandles out of these MTs, so we can't yet assert
+// !pMT->IsArray() in the TypeHandle constructor.
+//
+// Wherever possible, you should be using TypeHandles or MethodTables.
+// Code that is known to work over Class/ValueClass types (including their
+// instantaitions) is currently written to use MethodTables.
+//
+// TypeDescs in turn break down into several variants and are
+// for special cases around the edges
+// - array types whose method tables get share
+// - types for function pointers for verification and reflection
+// - types for generic parameters for verification and reflection
+//
+// Generic type instantiations (in C# syntax: C<ty_1,...,ty_n>) are represented by
+// MethodTables, i.e. a new MethodTable gets allocated for each such instantiation.
+// The entries in these tables (i.e. the code) are, however, often shared.
+// Clients of TypeHandle don't need to know any of this detail; just use the
+// GetInstantiation and HasInstantiation methods.
+
+class TypeHandle
+{
+public:
+ TypeHandle() {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ m_asTAddr = 0;
+ }
+
+ static TypeHandle FromPtr(PTR_VOID aPtr)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return TypeHandle(dac_cast<TADDR>(aPtr));
+ }
+ // Create a TypeHandle from the target address of a MethodTable
+ static TypeHandle FromTAddr(TADDR data)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return TypeHandle(data);
+ }
+
+ // When you ask for a class in JitInterface when all you have
+ // is a methodDesc of an array method...
+ // Convert from a JitInterface handle to an internal EE TypeHandle
+ explicit TypeHandle(struct CORINFO_CLASS_STRUCT_*aPtr)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ m_asTAddr = dac_cast<TADDR>(aPtr);
+ // NormalizeUnsharedArrayMT();
+ INDEBUGIMPL(Verify());
+ }
+
+ TypeHandle(MethodTable const * aMT) {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ m_asTAddr = dac_cast<TADDR>(aMT);
+ // NormalizeUnsharedArrayMT();
+ INDEBUGIMPL(Verify());
+ }
+
+ explicit TypeHandle(TypeDesc *aType) {
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(aType);
+
+ m_asTAddr = (dac_cast<TADDR>(aType) | 2);
+ INDEBUGIMPL(Verify());
+ }
+
+ inline BOOL IsNativeValueType() const;
+ inline MethodTable *AsNativeValueType() const;
+
+private:
+ // This constructor has been made private. You must use the explicit static functions
+ // TypeHandle::FromPtr and TypeHandle::TAddr instead of these constructors.
+ // Allowing a public constructor that takes a "void *" or a "TADDR" is error-prone.
+ explicit TypeHandle(TADDR aTAddr)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ m_asTAddr = aTAddr;
+ // NormalizeUnsharedArrayMT();
+ INDEBUGIMPL(Verify());
+ }
+
+
+public:
+ FORCEINLINE int operator==(const TypeHandle& typeHnd) const {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return(m_asTAddr == typeHnd.m_asTAddr);
+ }
+
+ FORCEINLINE int operator!=(const TypeHandle& typeHnd) const {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return(m_asTAddr != typeHnd.m_asTAddr);
+ }
+
+ // Methods for probing exactly what kind of a type handle we have
+ FORCEINLINE BOOL IsNull() const {
+ LIMITED_METHOD_DAC_CONTRACT;
+#ifdef _PREFIX_
+ if (m_asTAddr == 0) {
+#ifndef DACCESS_COMPILE
+ PREFIX_ASSUME(m_asPtr == NULL);
+#endif
+ return true;
+ }
+ else {
+#ifndef DACCESS_COMPILE
+ PREFIX_ASSUME(m_asPtr != NULL);
+#endif
+ return false;
+ }
+#else
+ return(m_asTAddr == 0);
+#endif
+ }
+
+ // Note that this returns denormalized BOOL to help the compiler with optimizations
+ FORCEINLINE BOOL IsTypeDesc() const {
+ LIMITED_METHOD_DAC_CONTRACT;
+#ifdef _PREFIX_
+ if (m_asTAddr & 2) {
+ PREFIX_ASSUME(m_asTAddr != NULL);
+#ifndef DACCESS_COMPILE
+ PREFIX_ASSUME(m_asPtr != NULL);
+#endif
+ return true;
+ }
+ else {
+ return false;
+ }
+#else
+ return(m_asTAddr & 2);
+#endif
+ }
+
+ BOOL IsEnum() const;
+
+ BOOL IsFnPtrType() const;
+
+ inline PTR_MethodTable AsMethodTable() const;
+
+ inline PTR_TypeDesc AsTypeDesc() const;
+
+ // To the extent possible, you should try to use methods like the ones
+ // below that treat all types uniformly.
+
+ // Gets the size that this type would take up embedded in another object
+ // thus objects all return sizeof(void*).
+ unsigned GetSize() const;
+
+ // Returns the type name, including the generic instantiation if possible.
+ // See the TypeString class for better control over name formatting.
+ void GetName(SString &result) const;
+
+ // Returns the ELEMENT_TYPE_* that you would use in a signature
+ // The only normalization that happens is that for type handles
+ // for instantiated types (e.g. class List<String> or
+ // value type Pair<int,int>)) this returns either ELEMENT_TYPE_CLASS
+ // or ELEMENT_TYPE_VALUE, _not_ ELEMENT_TYPE_WITH.
+ CorElementType GetSignatureCorElementType() const;
+
+ // This helper:
+ // - Will return enums underlying type
+ // - Will return underlying primitive for System.Int32 etc...
+ // - Will return underlying primitive as will be used in the calling convention
+ // For example
+ // struct t
+ // {
+ // public int i;
+ // }
+ // will return ELEMENT_TYPE_I4 in x86 instead of ELEMENT_TYPE_VALUETYPE. We
+ // call this type of value type a primitive value type
+ //
+ // Internal representation is used among another things for the calling convention
+ // (jit benefits of primitive value types) or optimizing marshalling.
+ //
+ // This will NOT convert E_T_ARRAY, E_T_SZARRAY etc. to E_T_CLASS (though it probably
+ // should). Use CorTypeInfo::IsObjRef for that.
+ CorElementType GetInternalCorElementType() const;
+
+ // This helper will return the same as GetSignatureCorElementType except:
+ // - Will return enums underlying type
+ CorElementType GetVerifierCorElementType() const;
+
+ //-------------------------------------------------------------------
+ // CASTING
+ //
+ // There are two variants of the "CanCastTo" method:
+ //
+ // CanCastTo
+ // - restore encoded pointers on demand
+ // - might throw, might trigger GC
+ // - return type is boolean (FALSE = cannot cast, TRUE = can cast)
+ //
+ // CanCastToNoGC
+ // - do not restore encoded pointers on demand
+ // - does not throw, does not trigger GC
+ // - return type is three-valued (CanCast, CannotCast, MaybeCast)
+ // - MaybeCast indicates that the test tripped on an encoded pointer
+ // so the caller should now call CanCastTo if it cares
+ //
+ // Note that if the TypeHandle is a valuetype, the caller is responsible
+ // for checking that the valuetype is in its boxed form before calling
+ // CanCastTo. Otherwise, the caller should be using IsBoxedAndCanCastTo()
+ typedef enum { CannotCast, CanCast, MaybeCast } CastResult;
+
+ BOOL CanCastTo(TypeHandle type, TypeHandlePairList *pVisited = NULL) const;
+ BOOL IsBoxedAndCanCastTo(TypeHandle type, TypeHandlePairList *pVisited) const;
+ CastResult CanCastToNoGC(TypeHandle type) const;
+
+#ifndef DACCESS_COMPILE
+ // Type equivalence based on Guid and TypeIdentifier attributes
+ inline BOOL IsEquivalentTo(TypeHandle type COMMA_INDEBUG(TypeHandlePairList *pVisited = NULL)) const;
+#endif
+
+ // Get the parent, known to be decoded
+ TypeHandle GetParent() const;
+
+ // Obtain element type for an array or pointer, returning NULL otherwise
+ TypeHandle GetTypeParam() const;
+
+ // Obtain instantiation from an instantiated type
+ // NULL if not instantiated
+ Instantiation GetInstantiation() const;
+
+ // Does this type satisfy its class constraints, recursively up the hierarchy
+ BOOL SatisfiesClassConstraints() const;
+
+ TypeHandle Instantiate(Instantiation inst) const;
+ TypeHandle MakePointer() const;
+ TypeHandle MakeByRef() const;
+ TypeHandle MakeSZArray() const;
+ TypeHandle MakeArray(int rank) const;
+ TypeHandle MakeNativeValueType() const;
+
+ // Obtain instantiation from an instantiated type *or* a pointer to the element type for an array
+ Instantiation GetClassOrArrayInstantiation() const;
+
+ // Is this type instantiated?
+ BOOL HasInstantiation() const;
+
+ // Is this a generic type whose type arguments are its formal type parameters?
+ BOOL IsGenericTypeDefinition() const;
+
+ // Is this either a non-generic type (e.g. a non-genric class type or an array type or a pointer type etc.)
+ // or a generic type whose type arguments are its formal type parameters?
+ //Equivalent to (!HasInstantiation() || IsGenericTypeDefinition());
+ inline BOOL IsTypicalTypeDefinition() const;
+
+ enum InteropKind
+ {
+ Interop_ManagedToNative, // use for RCW-related queries
+ Interop_NativeToManaged, // use for CCW-related queries
+ };
+
+ inline BOOL SupportsGenericInterop(InteropKind interopKind) const;
+
+ BOOL IsSharedByGenericInstantiations() const;
+
+ // Recursively search the type arguments and if
+ // one of the type arguments is Canon then return TRUE
+ //
+ // A<__Canon> is the canonical TypeHandle (aka "representative" generic MT)
+ // A<B<__Canon>> is a subtype that contains a Canonical type
+ //
+ BOOL IsCanonicalSubtype() const;
+
+ // Similar to IsCanonicalSubtype, but applied to a vector.
+ static BOOL IsCanonicalSubtypeInstantiation(Instantiation inst);
+
+ // For an uninstantiated generic type, return the number of type parameters required for instantiation
+ // For an instantiated type, return the number of type parameters in the instantiation
+ // Otherwise return 0
+ DWORD GetNumGenericArgs() const;
+
+ BOOL IsValueType() const;
+ BOOL IsInterface() const;
+ BOOL IsAbstract() const;
+
+ inline DWORD IsObjectType() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return *this == TypeHandle(g_pObjectClass);
+ }
+
+ DWORD IsTransparentProxy() const;
+
+ // Retrieve the key corresponding to this handle
+ TypeKey GetTypeKey() const;
+
+ // To what level has this type been loaded?
+ ClassLoadLevel GetLoadLevel() const;
+
+ // Equivalent to GetLoadLevel() == CLASS_LOADED
+ BOOL IsFullyLoaded() const;
+
+ void DoFullyLoad(Generics::RecursionGraph *pVisited, ClassLoadLevel level, DFLPendingList *pPending, BOOL *pfBailed,
+ const InstantiationContext *pInstContext);
+
+ inline void SetIsFullyLoaded();
+
+
+#ifdef _DEBUG
+ // Check that this type matches the key given
+ // i.e. that all aspects (element type, module/token, rank for arrays, instantiation for generic types) match up
+ CHECK CheckMatchesKey(TypeKey *pKey) const;
+
+ // Check that this type is loaded up to the level indicated
+ // Also check that it is non-null
+ CHECK CheckLoadLevel(ClassLoadLevel level);
+
+ // Equivalent to CheckLoadLevel(CLASS_LOADED)
+ CHECK CheckFullyLoaded();
+#endif
+
+#ifdef FEATURE_HFA
+ bool IsHFA() const;
+ CorElementType GetHFAType() const;
+#endif // FEATURE_HFA
+
+#ifdef FEATURE_64BIT_ALIGNMENT
+ bool RequiresAlign8() const;
+#endif // FEATURE_64BIT_ALIGNMENT
+
+#ifndef DACCESS_COMPILE
+
+ BOOL IsBlittable() const;
+ BOOL HasLayout() const;
+
+#ifdef FEATURE_COMINTEROP
+ TypeHandle GetCoClassForInterface() const;
+ DWORD IsComClassInterface() const;
+ BOOL IsComObjectType() const;
+ BOOL IsComEventItfType() const;
+ CorIfaceAttr GetComInterfaceType() const;
+ TypeHandle GetDefItfForComClassItf() const;
+
+ BOOL IsProjectedFromWinRT() const;
+ BOOL IsExportedToWinRT() const;
+
+ ComCallWrapperTemplate *GetComCallWrapperTemplate() const;
+ BOOL SetComCallWrapperTemplate(ComCallWrapperTemplate *pTemplate);
+#endif // FEATURE_COMINTEROP
+
+#endif
+
+ // Unlike AsMethodTable, GetMethodTable will get the method table
+ // of the type, regardless of whether it is an array etc. Note, however
+ // this method table may be shared, and some types (like TypeByRef), have
+ // no method table (and this function returns NULL for them)
+ inline PTR_MethodTable GetMethodTable() const;
+
+ // Returns the method table which should be used for visibility checking.
+ // Like GetMethodTable except for TypeDescs returns the root ElementType.
+ // So for Foo[] instead of returning Array returns Foo.
+ inline MethodTable* GetMethodTableOfElementType() const;
+
+ // Returns the MethodTable for the SZARRAY or ARRAY type
+ inline MethodTable * GetPossiblySharedArrayMethodTable() const;
+
+ // As above but returns a TypeHandle (so it will return a non-null result
+ // for generic type variables, for instance).
+ inline TypeHandle GetElementType() const;
+
+ // Return the canonical representative MT amongst the set of MT's that share
+ // code with the MT for the given TypeHandle because of generics.
+ PTR_MethodTable GetCanonicalMethodTable() const;
+
+ // The module that defined the underlying type
+ // (First strip off array/ptr qualifiers and generic type arguments)
+ PTR_Module GetModule() const;
+
+ // The ngen'ed module where this type lives
+ PTR_Module GetZapModule() const;
+
+ // Does this immediate item live in an NGEN module?
+ BOOL IsZapped() const;
+
+ // The module where this type lives for the purposes of loading and prejitting
+ // Note: NGen time result might differ from runtime result for parametrized types (generics, arrays, etc.)
+ // See code:ClassLoader::ComputeLoaderModule or file:clsload.hpp#LoaderModule for more information
+#ifndef BINDER
+ PTR_Module GetLoaderModule() const;
+#else
+ MdilModule* GetLoaderModule() const;
+#endif
+
+ // The assembly that defined this type (== GetModule()->GetAssembly())
+ Assembly * GetAssembly() const;
+
+ // GetDomain on an instantiated type, e.g. C<ty1,ty2> returns the SharedDomain if all the
+ // constituent parts of the type are SharedDomain (i.e. domain-neutral),
+ // and returns an AppDomain if any of the parts are from an AppDomain,
+ // i.e. are domain-bound. If any of the parts are domain-bound
+ // then they will all belong to the same domain.
+ PTR_BaseDomain GetDomain() const;
+
+ PTR_LoaderAllocator GetLoaderAllocator() const;
+
+ BOOL IsDomainNeutral() const;
+
+ // Get the class token, assuming the type handle represents a named type,
+ // i.e. a class, a value type, a generic instantiation etc.
+ inline mdTypeDef GetCl() const;
+
+ // Shortcuts
+
+ // ARRAY or SZARRAY TypeDesc (arrays with a shared MethodTable)
+ // If this is TRUE, it is OK to call AsArray()
+ // Also see IsArrayType()
+ BOOL IsArray() const;
+
+ // See comment of IsArrayType() for the explanation of this method
+#if 0
+ void NormalizeUnsharedArrayMT();
+#endif
+
+ // ARRAY or SZARRAY
+ // Note that this does not imply that it is OK to call AsArray(). See IsArray()
+ //
+ // All arrays, even those with a unique unshared MethodTable, have an ArrayTypeDesc
+ // which is used for type identity. However, over time, people have started
+ // wrapping the MethodTables directly in a TypeHandle. Note that such
+ // TypeHandles cannot be used for type identity. However, IsArrayType() lets
+ // you check even for such cases where IsArray() returns FALSE, but the type
+ // still is an array type.
+ //
+ // @TODO: Change all the constructors of TypeHandle which take a MethodTable
+ // to call NormalizeUnsharedArrayMT(). TypeHandle::Verify() can then enforce
+ // that IsArray() is fully correct.
+ BOOL IsArrayType() const;
+
+ // VAR or MVAR
+ BOOL IsGenericVariable() const;
+
+ // BYREF
+ BOOL IsByRef() const;
+
+ // PTR
+ BOOL IsPointer() const;
+
+ // True if this type *is* a formal generic type parameter or any component of it is a formal generic type parameter
+ BOOL ContainsGenericVariables(BOOL methodOnly=FALSE) const;
+
+ Module* GetDefiningModuleForOpenType() const;
+
+ // Is actually ParamTypeDesc (ARRAY, SZARRAY, BYREF, PTR)
+ BOOL HasTypeParam() const;
+
+ BOOL IsRestored_NoLogging() const;
+ BOOL IsRestored() const;
+
+ // Does this type have zap-encoded components (generic arguments, etc)?
+ BOOL HasUnrestoredTypeKey() const;
+
+ // True if this type handle is a zap-encoded fixup
+ BOOL IsEncodedFixup() const;
+
+ // Only used at NGEN-time
+ BOOL ComputeNeedsRestore(DataImage *image, TypeHandleList *pVisited) const;
+
+ void DoRestoreTypeKey();
+
+ void CheckRestore() const;
+ BOOL IsExternallyVisible() const;
+
+ // Is this type part of an assembly loaded for introspection?
+ BOOL IsIntrospectionOnly() const;
+
+ // Checks this type and its components for "IsIntrospectionOnly"
+ BOOL ContainsIntrospectionOnlyTypes() const;
+
+ // Does this type participate in type equivalence?
+ inline BOOL HasTypeEquivalence() const;
+
+ // Not clear we should have this.
+ inline PTR_ArrayTypeDesc AsArray() const;
+
+ FnPtrTypeDesc* AsFnPtrType() const;
+
+ TypeVarTypeDesc* AsGenericVariable() const;
+
+ Instantiation GetInstantiationOfParentClass(MethodTable *pWhichParent) const;
+
+ PTR_VOID AsPtr() const { // Please don't use this if you can avoid it
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return(PTR_VOID(m_asTAddr));
+ }
+
+ TADDR AsTAddr() const {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return m_asTAddr;
+ }
+
+ INDEBUGIMPL(BOOL Verify();) // DEBUGGING Make certain this is a valid type handle
+
+#if defined(CHECK_APP_DOMAIN_LEAKS) || defined(_DEBUG)
+ BOOL IsAppDomainAgile() const;
+ BOOL IsCheckAppDomainAgile() const;
+
+ BOOL IsArrayOfElementsAppDomainAgile() const;
+ BOOL IsArrayOfElementsCheckAppDomainAgile() const;
+#endif
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+ OBJECTREF GetManagedClassObject() const;
+ OBJECTREF GetManagedClassObjectFast() const;
+
+ static TypeHandle MergeArrayTypeHandlesToCommonParent(
+ TypeHandle ta, TypeHandle tb);
+
+ static TypeHandle MergeTypeHandlesToCommonParent(
+ TypeHandle ta, TypeHandle tb);
+
+
+ BOOL NotifyDebuggerLoad(AppDomain *domain, BOOL attaching) const;
+ void NotifyDebuggerUnload(AppDomain *domain) const;
+
+ // Execute the callback functor for each MethodTable that makes up the given type handle. This method
+ // does not invoke the functor for generic variables
+ template<class T>
+ inline void ForEachComponentMethodTable(T &callback) const;
+
+private:
+ static TypeHandle MergeClassWithInterface(
+ TypeHandle tClass, TypeHandle tInterface);
+
+ union
+ {
+ TADDR m_asTAddr; // we look at the low order bits
+#ifndef DACCESS_COMPILE
+ void * m_asPtr;
+ PTR_MethodTable m_asMT;
+ PTR_TypeDesc m_asTypeDesc;
+ PTR_ArrayTypeDesc m_asArrayTypeDesc;
+ PTR_ParamTypeDesc m_asParamTypeDesc;
+ PTR_TypeVarTypeDesc m_asTypeVarTypeDesc;
+ PTR_FnPtrTypeDesc m_asFnPtrTypeDesc;
+#endif
+ };
+};
+
+class TypeHandleList
+{
+ TypeHandle m_typeHandle;
+ TypeHandleList* m_pNext;
+ bool m_fBrokenCycle;
+ public:
+ TypeHandleList(TypeHandle t, TypeHandleList* pNext) : m_typeHandle(t),m_pNext(pNext),m_fBrokenCycle(false) { };
+ static BOOL Exists(TypeHandleList* pList, TypeHandle t)
+ {
+ LIMITED_METHOD_CONTRACT;
+ while (pList != NULL) { if (pList->m_typeHandle == t) return TRUE; pList = pList->m_pNext; }
+ return FALSE;
+ }
+
+ // Supports enumeration of the list.
+ static BOOL GetNext(TypeHandleList** ppList, TypeHandle* pHandle)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (*ppList != NULL)
+ {
+ *pHandle = (*ppList)->m_typeHandle;
+ (*ppList) = (*ppList)->m_pNext;
+ return TRUE;
+ }
+ return FALSE;
+ }
+
+ void MarkBrokenCycle(TypeHandle th)
+ {
+ LIMITED_METHOD_CONTRACT;
+ TypeHandleList* pList = this;
+ while (pList->m_typeHandle != th) { pList->m_fBrokenCycle = true; pList = pList->m_pNext; }
+ }
+ bool HasBrokenCycleMark()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_fBrokenCycle;
+ }
+};
+
+class TypeHandlePairList // TODO: Template for TypeHandleList, TypeHandlePairList, TokenPairList?
+{
+ TypeHandle m_typeHandle1;
+ TypeHandle m_typeHandle2;
+ TypeHandlePairList *m_pNext;
+public:
+ TypeHandlePairList(TypeHandle t1, TypeHandle t2, TypeHandlePairList *pNext) : m_typeHandle1(t1), m_typeHandle2(t2), m_pNext(pNext) { };
+ static BOOL Exists(TypeHandlePairList *pList, TypeHandle t1, TypeHandle t2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ while (pList != NULL)
+ {
+ if (pList->m_typeHandle1 == t1 && pList->m_typeHandle2 == t2)
+ return TRUE;
+ if (pList->m_typeHandle1 == t2 && pList->m_typeHandle2 == t1)
+ return TRUE;
+
+ pList = pList->m_pNext;
+ }
+ return FALSE;
+ }
+};
+
+#if CHECK_INVARIANTS
+inline CHECK CheckPointer(TypeHandle th, IsNullOK ok = NULL_NOT_OK)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ SUPPORTS_DAC;
+ STATIC_CONTRACT_CANNOT_TAKE_LOCK;
+
+ if (th.IsNull())
+ {
+ CHECK_MSG(ok, "Illegal null TypeHandle");
+ }
+ else
+ {
+ __if_exists(TypeHandle::Check)
+ {
+ CHECK(th.Check());
+ }
+#if 0
+ CHECK(CheckInvariant(o));
+#endif
+ }
+
+ CHECK_OK;
+}
+
+#endif // CHECK_INVARIANTS
+
+/*************************************************************************/
+// dac_casts for TypeHandle makes FixupPointer<TypeHandle> work.
+//
+// TypeHandle is wrapper around pointer to MethodTable or TypeDesc. Even though
+// it may feel counterintuitive, it is possible to treat it like a pointer and
+// use the regular FixupPointer to implement TypeHandle indirection cells.
+// The lowest bit of TypeHandle (when wrapped inside FixupPointer) is
+// used to mark optional indirection.
+//
+template<>
+inline TADDR dac_cast(TypeHandle src)
+{
+ SUPPORTS_DAC;
+ return src.AsTAddr();
+}
+
+template<>
+inline TypeHandle dac_cast(TADDR src)
+{
+ SUPPORTS_DAC;
+ return TypeHandle::FromTAddr(src);
+}
+
+/*************************************************************************/
+// Instantiation is representation of generic instantiation.
+// It is simple read-only array of TypeHandles. In NGen, the type handles
+// may be encoded using indirections. That's one reason why it is convenient
+// to have wrapper class that performs the decoding.
+class Instantiation
+{
+public:
+ // Construct empty instantiation
+ Instantiation()
+ : m_pArgs(NULL), m_nArgs(0)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ }
+
+ // Copy construct
+ Instantiation(const Instantiation & inst)
+ : m_pArgs(inst.m_pArgs), m_nArgs(inst.m_nArgs)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(m_nArgs == 0 || m_pArgs != NULL);
+ }
+
+ // Construct instantiation from array of FixupPointers
+ Instantiation(FixupPointer<TypeHandle> * pArgs, DWORD nArgs)
+ : m_pArgs(pArgs), m_nArgs(nArgs)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(m_nArgs == 0 || m_pArgs != NULL);
+ }
+
+ // Construct instantiation from array of TypeHandles
+ Instantiation(TypeHandle * pArgs, DWORD nArgs)
+ : m_nArgs(nArgs)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ DACCOP_IGNORE(CastOfMarshalledType, "Dual mode DAC problem, but since the size is the same, the cast is safe");
+ m_pArgs = (FixupPointer<TypeHandle> *)pArgs;
+ _ASSERTE(m_nArgs == 0 || m_pArgs != NULL);
+ }
+
+#ifdef DACCESS_COMPILE
+ // Construct instantiation from target array of FixupPointers in DAC.
+ // This method will create local copy of the instantiation arguments.
+ Instantiation(DPTR(FixupPointer<TypeHandle>) pArgs, DWORD nArgs)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // Create a local copy of the instanitation under DAC
+ PVOID pLocalArgs = PTR_READ(dac_cast<TADDR>(pArgs), nArgs * sizeof(TypeHandle));
+ m_pArgs = (FixupPointer<TypeHandle> *)pLocalArgs;
+
+ m_nArgs = nArgs;
+
+ _ASSERTE(m_nArgs == 0 || m_pArgs != NULL);
+ }
+#endif
+
+ // Return i-th instantiation argument
+ TypeHandle operator[](DWORD iArg) const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(iArg < m_nArgs);
+ return m_pArgs[iArg].GetValue();
+ }
+
+ DWORD GetNumArgs() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_nArgs;
+ }
+
+ BOOL IsEmpty() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_nArgs == 0;
+ }
+
+ // Unsafe access to the instantiation. Do not use unless absolutely necessary!!!
+ FixupPointer<TypeHandle> * GetRawArgs() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pArgs;
+ }
+
+private:
+ // Note that for DAC builds, m_pArgs may be host allocated buffer, not a copy of an object marshalled by DAC.
+ FixupPointer<TypeHandle> * m_pArgs;
+ DWORD m_nArgs;
+};
+
+#endif // TYPEHANDLE_H
diff --git a/src/vm/typehandle.inl b/src/vm/typehandle.inl
new file mode 100644
index 0000000000..f18ffff79f
--- /dev/null
+++ b/src/vm/typehandle.inl
@@ -0,0 +1,286 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: typehandle.inl
+//
+
+
+//
+
+//
+// ============================================================================
+
+
+#ifndef _TYPEHANDLE_INL_
+#define _TYPEHANDLE_INL_
+
+#include "typehandle.h"
+
+inline mdTypeDef TypeHandle::GetCl() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ PREFIX_ASSUME(GetMethodTable() != NULL);
+ return GetMethodTable()->GetCl();
+}
+
+inline PTR_MethodTable TypeHandle::GetMethodTable() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (IsTypeDesc())
+ return(AsTypeDesc()->GetMethodTable());
+ else
+ return AsMethodTable();
+}
+
+inline void TypeHandle::SetIsFullyLoaded()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (IsTypeDesc())
+ return AsTypeDesc()->SetIsFullyLoaded();
+ else
+ return AsMethodTable()->SetIsFullyLoaded();
+}
+
+inline MethodTable* TypeHandle::GetMethodTableOfElementType() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (IsTypeDesc())
+ {
+ TypeHandle elementType = AsTypeDesc()->GetTypeParam();
+ return elementType.GetMethodTableOfElementType();
+ }
+ else
+ {
+ return AsMethodTable();
+ }
+}
+
+inline MethodTable * TypeHandle::GetPossiblySharedArrayMethodTable() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(IsArrayType());
+
+ if (IsArray())
+ return AsArray()->GetTemplateMethodTable();
+ else
+ return AsMethodTable();
+}
+
+inline TypeHandle TypeHandle::GetElementType() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (IsTypeDesc())
+ {
+ if (IsGenericVariable())
+ return *this;
+
+ return AsTypeDesc()->GetTypeParam().GetElementType();
+ }
+ else
+ {
+ return *this;
+ }
+}
+
+inline BOOL TypeHandle::IsZapped() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return (GetZapModule() != NULL);
+}
+
+inline PTR_ArrayTypeDesc TypeHandle::AsArray() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE(IsArray());
+
+ PTR_ArrayTypeDesc result = PTR_ArrayTypeDesc(m_asTAddr - 2);
+ PREFIX_ASSUME(result != NULL);
+ return result;
+}
+
+// Methods to allow you get get a the two possible representations
+inline PTR_MethodTable TypeHandle::AsMethodTable() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE(!IsTypeDesc());
+
+ return PTR_MethodTable(m_asTAddr);
+}
+
+inline PTR_TypeDesc TypeHandle::AsTypeDesc() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE(IsTypeDesc());
+
+ PTR_TypeDesc result = PTR_TypeDesc(m_asTAddr - 2);
+ PREFIX_ASSUME(result != NULL);
+ return result;
+}
+
+inline FnPtrTypeDesc* TypeHandle::AsFnPtrType() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE(IsFnPtrType());
+
+ FnPtrTypeDesc* result = PTR_FnPtrTypeDesc(m_asTAddr - 2);
+ PREFIX_ASSUME(result != NULL);
+ return result;
+}
+
+inline TypeVarTypeDesc* TypeHandle::AsGenericVariable() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE(IsGenericVariable());
+
+ TypeVarTypeDesc* result = PTR_TypeVarTypeDesc(m_asTAddr - 2);
+ PREFIX_ASSUME(result != NULL);
+ return result;
+}
+
+inline BOOL TypeHandle::IsNativeValueType() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (IsTypeDesc() && AsTypeDesc()->IsNativeValueType());
+}
+
+inline MethodTable *TypeHandle::AsNativeValueType() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(IsNativeValueType());
+ return AsTypeDesc()->GetMethodTable();
+}
+
+inline BOOL TypeHandle::IsTypicalTypeDefinition() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return !HasInstantiation() || IsGenericTypeDefinition();
+}
+
+inline BOOL TypeHandle::SupportsGenericInterop(InteropKind interopKind) const
+{
+ LIMITED_METHOD_CONTRACT;
+ return (!IsTypeDesc() && AsMethodTable()->SupportsGenericInterop(interopKind));
+}
+
+inline BOOL TypeHandle::HasTypeEquivalence() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (IsTypeDesc())
+ return AsTypeDesc()->HasTypeEquivalence();
+ else
+ return AsMethodTable()->HasTypeEquivalence();
+}
+
+
+//--------------------------------------------------------------------------------------
+// IsEquivalentTo is based on Guid and TypeIdentifier attributes to support the "no-PIA"
+// feature. The idea is that compilers pull types from the PIA into different assemblies
+// and these types - represented by separate MTs/TypeHandles - are considered equivalent
+// for certain operations.
+
+
+#ifndef DACCESS_COMPILE
+inline BOOL TypeHandle::IsEquivalentTo(TypeHandle type COMMA_INDEBUG(TypeHandlePairList *pVisited /*= NULL*/)) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (*this == type)
+ return TRUE;
+
+#ifdef FEATURE_TYPEEQUIVALENCE
+ // bail early for normal types
+ if (!HasTypeEquivalence() || !type.HasTypeEquivalence())
+ return FALSE;
+
+ if (IsTypeDesc())
+ return AsTypeDesc()->IsEquivalentTo(type COMMA_INDEBUG(pVisited));
+
+ if (type.IsTypeDesc())
+ return FALSE;
+
+ return AsMethodTable()->IsEquivalentTo_Worker(type.AsMethodTable() COMMA_INDEBUG(pVisited));
+#else
+ return FALSE;
+#endif
+}
+#endif
+
+// Execute the callback functor for each MethodTable that makes up the given type handle. This method
+// does not invoke the functor for generic variables
+template<class T>
+inline void TypeHandle::ForEachComponentMethodTable(T &callback) const
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (IsTypeDesc() && AsTypeDesc()->HasTypeParam())
+ {
+ // If we have a type parameter, then we just need to invoke ourselves on that parameter
+ AsTypeDesc()->GetTypeParam().ForEachComponentMethodTable(callback);
+ }
+ else if (IsFnPtrType())
+ {
+ // If we are a function pointer, then we need to invoke the callback method on the function
+ // pointer's return type as well as each of its argument types
+ FnPtrTypeDesc *pFnPtr = AsFnPtrType();
+ for (DWORD iArg = 0; iArg < pFnPtr->GetNumArgs() + 1; ++iArg)
+ {
+ pFnPtr->GetRetAndArgTypesPointer()[iArg].ForEachComponentMethodTable(callback);
+ }
+ }
+ else if (HasInstantiation())
+ {
+ // If we have a generic instantiation, we need to invoke the callback on each of the generic
+ // parameters as well as the root method table.
+ callback(GetMethodTable());
+
+ Instantiation instantiation = GetInstantiation();
+ for (DWORD iGenericArg = 0; iGenericArg < instantiation.GetNumArgs(); ++iGenericArg)
+ {
+ instantiation[iGenericArg].ForEachComponentMethodTable(callback);
+ }
+ }
+ else if (IsGenericVariable())
+ {
+ // We don't invoke the callback on generic variables since they don't have method tables
+ return;
+ }
+ else
+ {
+ // Otherwise, we must be a simple type, so just do the callback directly on the method table
+ callback(GetMethodTable());
+ }
+}
+
+
+#endif // _TYPEHANDLE_INL_
+
diff --git a/src/vm/typehash.cpp b/src/vm/typehash.cpp
new file mode 100644
index 0000000000..4c3b953dbe
--- /dev/null
+++ b/src/vm/typehash.cpp
@@ -0,0 +1,874 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: typehash.cpp
+//
+
+//
+
+#include "common.h"
+#include "excep.h"
+#include "typehash.h"
+#include "eeconfig.h"
+#include "generics.h"
+#include "typestring.h"
+#include "typedesc.h"
+#include "typekey.h"
+#ifdef FEATURE_PREJIT
+#include "zapsig.h"
+#include "compile.h"
+#endif
+#include "ngenhash.inl"
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable:4244)
+#endif // _MSC_VER
+
+#ifndef DACCESS_COMPILE
+
+// ============================================================================
+// Class hash table methods
+// ============================================================================
+/* static */
+EETypeHashTable *EETypeHashTable::Create(LoaderAllocator* pAllocator, Module *pModule, DWORD dwNumBuckets, AllocMemTracker *pamTracker)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ LoaderHeap *pHeap = pAllocator->GetLowFrequencyHeap();
+ EETypeHashTable *pThis = (EETypeHashTable*)pamTracker->Track(pHeap->AllocMem((S_SIZE_T)sizeof(EETypeHashTable)));
+
+ new (pThis) EETypeHashTable(pModule, pHeap, dwNumBuckets);
+
+#ifdef _DEBUG
+ pThis->InitUnseal();
+#endif
+
+ pThis->m_pAllocator = pAllocator;
+
+ return pThis;
+}
+
+LoaderAllocator *EETypeHashTable::GetLoaderAllocator()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (m_pAllocator)
+ {
+ return m_pAllocator;
+ }
+ else
+ {
+ _ASSERTE(m_pModule != NULL);
+ return m_pModule->GetLoaderAllocator();
+ }
+}
+
+#endif // #ifdef DACCESS_COMPILE
+
+void EETypeHashTable::Iterator::Reset()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (m_pTable)
+ {
+#ifdef _DEBUG
+ m_pTable->Unseal();
+#endif
+ m_pTable = NULL;
+ }
+
+ Init();
+}
+
+void EETypeHashTable::Iterator::Init()
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef _DEBUG
+ if (m_pTable)
+ m_pTable->Seal(); // The table cannot be changing while it is being iterated
+#endif
+
+ m_fIterating = false;
+}
+
+EETypeHashTable::Iterator::Iterator()
+{
+ WRAPPER_NO_CONTRACT;
+ m_pTable = NULL;
+ Init();
+}
+
+EETypeHashTable::Iterator::Iterator(EETypeHashTable * pTable)
+{
+ WRAPPER_NO_CONTRACT;
+ m_pTable = pTable;
+ Init();
+}
+
+EETypeHashTable::Iterator::~Iterator()
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef _DEBUG
+ if (m_pTable)
+ m_pTable->Unseal(); // Done with the iterator so we unseal
+#endif
+}
+
+BOOL EETypeHashTable::FindNext(Iterator *it, EETypeHashEntry **ppEntry)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (!it->m_fIterating)
+ {
+ BaseInitIterator(&it->m_sIterator);
+ it->m_fIterating = true;
+ }
+
+ *ppEntry = it->m_sIterator.Next();
+ return *ppEntry ? TRUE : FALSE;
+}
+
+DWORD EETypeHashTable::GetCount()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return BaseGetElementCount();
+}
+
+static DWORD HashTypeHandle(DWORD level, TypeHandle t);
+
+// Calculate hash value for a type def or instantiated type def
+static DWORD HashPossiblyInstantiatedType(DWORD level, mdTypeDef token, Instantiation inst)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(TypeFromToken(token) == mdtTypeDef);
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ INT_PTR dwHash = 5381;
+
+ dwHash = ((dwHash << 5) + dwHash) ^ token;
+ if (!inst.IsEmpty())
+ {
+ dwHash = ((dwHash << 5) + dwHash) ^ inst.GetNumArgs();
+
+ // Hash two levels of the hiearchy. A simple nesting of generics instantiations is
+ // pretty common in generic collections, e.g.: ICollection<KeyValuePair<TKey, TValue>>
+ if (level < 2)
+ {
+ // Hash n type parameters
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ dwHash = ((dwHash << 5) + dwHash) ^ HashTypeHandle(level+1, inst[i]);
+ }
+ }
+ }
+
+ return dwHash;
+}
+
+// Calculate hash value for a function pointer type
+static DWORD HashFnPtrType(DWORD level, BYTE callConv, DWORD numArgs, TypeHandle *retAndArgTypes)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ INT_PTR dwHash = 5381;
+
+ dwHash = ((dwHash << 5) + dwHash) ^ ELEMENT_TYPE_FNPTR;
+ dwHash = ((dwHash << 5) + dwHash) ^ callConv;
+ dwHash = ((dwHash << 5) + dwHash) ^ numArgs;
+ if (level < 1)
+ {
+ for (DWORD i = 0; i <= numArgs; i++)
+ {
+ dwHash = ((dwHash << 5) + dwHash) ^ HashTypeHandle(level+1, retAndArgTypes[i]);
+ }
+ }
+
+ return dwHash;
+}
+
+// Calculate hash value for an array/pointer/byref type
+static DWORD HashParamType(DWORD level, CorElementType kind, TypeHandle typeParam)
+{
+ WRAPPER_NO_CONTRACT;
+ INT_PTR dwHash = 5381;
+
+ dwHash = ((dwHash << 5) + dwHash) ^ kind;
+ dwHash = ((dwHash << 5) + dwHash) ^ HashTypeHandle(level, typeParam);
+
+ return dwHash;
+}
+
+// Calculate hash value from type handle
+static DWORD HashTypeHandle(DWORD level, TypeHandle t)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(t));
+ PRECONDITION(!t.IsEncodedFixup());
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ DWORD retVal = 0;
+
+ INTERIOR_STACK_PROBE_NOTHROW_CHECK_THREAD(goto Exit;);
+
+ if (t.HasTypeParam())
+ {
+ retVal = HashParamType(level, t.GetInternalCorElementType(), t.GetTypeParam());
+ }
+ else if (t.IsGenericVariable())
+ {
+ retVal = (dac_cast<PTR_TypeVarTypeDesc>(t.AsTypeDesc())->GetToken());
+ }
+ else if (t.HasInstantiation())
+ {
+ retVal = HashPossiblyInstantiatedType(level, t.GetCl(), t.GetInstantiation());
+ }
+ else if (t.IsFnPtrType())
+ {
+ FnPtrTypeDesc* pTD = t.AsFnPtrType();
+ retVal = HashFnPtrType(level, pTD->GetCallConv(), pTD->GetNumArgs(), pTD->GetRetAndArgTypesPointer());
+ }
+ else
+ retVal = HashPossiblyInstantiatedType(level, t.GetCl(), Instantiation());
+
+#if defined(FEATURE_STACK_PROBE) && !defined(DACCESS_COMPILE)
+Exit:
+ ;
+#endif
+ END_INTERIOR_STACK_PROBE;
+
+ return retVal;
+}
+
+// Calculate hash value from key
+static DWORD HashTypeKey(TypeKey* pKey)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pKey));
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ if (pKey->GetKind() == ELEMENT_TYPE_CLASS)
+ {
+ return HashPossiblyInstantiatedType(0, pKey->GetTypeToken(), pKey->GetInstantiation());
+ }
+ else if (pKey->GetKind() == ELEMENT_TYPE_FNPTR)
+ {
+ return HashFnPtrType(0, pKey->GetCallConv(), pKey->GetNumArgs(), pKey->GetRetAndArgTypes());
+ }
+ else
+ {
+ return HashParamType(0, pKey->GetKind(), pKey->GetElementType());
+ }
+}
+
+// Look up a value in the hash table
+//
+// The logic is subtle: type handles in the hash table may not be
+// restored, but we need to compare components of the types (rank and
+// element type for arrays, generic type and instantiation for
+// instantiated types) against pKey
+//
+// We avoid restoring types during search by cracking the signature
+// encoding used by the zapper for out-of-module types e.g. in the
+// instantiation of an instantiated type.
+EETypeHashEntry_t *EETypeHashTable::FindItem(TypeKey* pKey)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pKey));
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ EETypeHashEntry_t * result = NULL;
+
+ DWORD dwHash = HashTypeKey(pKey);
+ EETypeHashEntry_t * pSearch;
+ CorElementType kind = pKey->GetKind();
+ LookupContext sContext;
+
+ if (kind == ELEMENT_TYPE_CLASS)
+ {
+ pSearch = BaseFindFirstEntryByHash(dwHash, &sContext);
+ while (pSearch)
+ {
+ if (CompareInstantiatedType(pSearch->GetTypeHandle(), pKey->GetModule(), pKey->GetTypeToken(), pKey->GetInstantiation()))
+ {
+ result = pSearch;
+ break;
+ }
+
+ pSearch = BaseFindNextEntryByHash(&sContext);
+ }
+ }
+ else if (kind == ELEMENT_TYPE_FNPTR)
+ {
+ BYTE callConv = pKey->GetCallConv();
+ DWORD numArgs = pKey->GetNumArgs();
+ TypeHandle *retAndArgTypes = pKey->GetRetAndArgTypes();
+
+ pSearch = BaseFindFirstEntryByHash(dwHash, &sContext);
+ while (pSearch)
+ {
+ if (CompareFnPtrType(pSearch->GetTypeHandle(), callConv, numArgs, retAndArgTypes))
+ {
+ result = pSearch;
+ break;
+ }
+
+ pSearch = BaseFindNextEntryByHash(&sContext);
+ }
+ }
+ else
+ {
+ // Type parameters for array and pointer types are necessarily in the same loader module
+ // as the constructed type itself, so we can just do handle comparisons
+ // Unfortunately the rank of the array might live elsewhere
+
+ for (pSearch = BaseFindFirstEntryByHash(dwHash, &sContext);
+ pSearch != NULL;
+ pSearch = BaseFindNextEntryByHash(&sContext))
+ {
+ if (!pSearch->GetTypeHandle().IsRestored())
+ {
+ // workaround: If we encounter an unrestored MethodTable, then it
+ // isn't the type for which we are looking (plus, it will crash
+ // in GetSignatureCorElementType). However TypeDescs can be
+ // accessed when unrestored. Also they are accessed in that
+ // manner at startup when we're loading the global types
+ // (i.e. System.Object).
+
+ if (!pSearch->GetTypeHandle().IsTypeDesc())
+ {
+ // Not a match
+ continue;
+ }
+ else
+ {
+ // We have an unrestored TypeDesc
+ }
+ }
+
+ if (pSearch->GetTypeHandle().GetSignatureCorElementType() != kind)
+ continue;
+
+ if (pSearch->GetTypeHandle().GetTypeParam() != pKey->GetElementType())
+ continue;
+
+ if (pSearch->GetTypeHandle().IsTypeDesc() == pKey->IsTemplateMethodTable())
+ continue;
+
+ if (kind == ELEMENT_TYPE_ARRAY)
+ {
+ if (pKey->IsTemplateMethodTable())
+ {
+ if (pSearch->GetTypeHandle().AsMethodTable()->GetRank() != pKey->GetRank())
+ continue;
+ }
+ else
+ {
+ ArrayTypeDesc *pATD = pSearch->GetTypeHandle().AsArray();
+#ifdef FEATURE_PREJIT
+ // This ensures that GetAssemblyIfLoaded operations that may be triggered by signature walks will succeed if at all possible.
+ ClrFlsThreadTypeSwitch genericInstantionCompareHolder(ThreadType_GenericInstantiationCompare);
+
+ TADDR fixup = pATD->GetTemplateMethodTableMaybeTagged();
+ if (!CORCOMPILE_IS_POINTER_TAGGED(fixup))
+ {
+ TADDR canonFixup = pATD->GetTemplateMethodTable()->GetCanonicalMethodTableFixup();
+ if (CORCOMPILE_IS_POINTER_TAGGED(canonFixup))
+ fixup = canonFixup;
+ }
+
+ if (CORCOMPILE_IS_POINTER_TAGGED(fixup))
+ {
+ Module *pDefiningModule;
+ PCCOR_SIGNATURE pSig = m_pModule->GetEncodedSigIfLoaded(CORCOMPILE_UNTAG_TOKEN(fixup), &pDefiningModule);
+ if (pDefiningModule == NULL)
+ break;
+
+ _ASSERTE(*pSig == ELEMENT_TYPE_NATIVE_ARRAY_TEMPLATE_ZAPSIG);
+ pSig++;
+ _ASSERTE(*pSig == ELEMENT_TYPE_ARRAY);
+ pSig++;
+ SigPointer sp(pSig);
+ if (FAILED(sp.SkipExactlyOne()))
+ break; // return NULL;
+
+ ULONG data;
+ if (FAILED(sp.GetData(&data)))
+ break; // return NULL;
+
+ if (data != pKey->GetRank())
+ continue;
+ }
+ else
+#endif //FEATURE_PREJIT
+ {
+ if (pATD->GetRank() != pKey->GetRank())
+ continue;
+ }
+ }
+ }
+
+ result = pSearch;
+ break;
+ }
+ }
+
+ return result;
+}
+
+BOOL EETypeHashTable::CompareInstantiatedType(TypeHandle t, Module *pModule, mdTypeDef token, Instantiation inst)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(t));
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(!inst.IsEmpty());
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ if (t.IsTypeDesc())
+ return FALSE;
+
+ // Even the EEClass pointer might be encoded
+ MethodTable * pMT = t.AsMethodTable();
+
+ if (pMT->GetNumGenericArgs() != inst.GetNumArgs())
+ return FALSE;
+
+#ifdef FEATURE_PREJIT
+ // This ensures that GetAssemblyIfLoaded operations that may be triggered by signature walks will succeed if at all possible.
+ ClrFlsThreadTypeSwitch genericInstantionCompareHolder(ThreadType_GenericInstantiationCompare);
+
+ TADDR fixup = pMT->GetCanonicalMethodTableFixup();
+
+ // The EEClass pointer is actually an encoding.
+ if (CORCOMPILE_IS_POINTER_TAGGED(fixup))
+ {
+ Module *pDefiningModule;
+ PCCOR_SIGNATURE pSig = m_pModule->GetEncodedSigIfLoaded(CORCOMPILE_UNTAG_TOKEN(fixup), &pDefiningModule);
+
+ // First check that the modules for the generic type defs match
+ if (dac_cast<TADDR>(pDefiningModule) !=
+ dac_cast<TADDR>(pModule))
+ return FALSE;
+
+ // Now crack the signature encoding, expected to be an instantiated type
+ _ASSERTE(*pSig == ELEMENT_TYPE_GENERICINST);
+ pSig++;
+ _ASSERTE(*pSig == ELEMENT_TYPE_CLASS || *pSig == ELEMENT_TYPE_VALUETYPE);
+ pSig++;
+
+ // Check that the tokens of the generic type def match
+ if (CorSigUncompressToken(pSig) != token)
+ return FALSE;
+ }
+
+ // The EEClass pointer is a real pointer
+ else
+#endif //FEATURE_PREJIT
+ {
+ // First check that the typedef tokens match
+ if (pMT->GetCl() != token)
+ return FALSE;
+
+ // The class might not be restored, and its metadata module pointer might be encoded.
+ // This will return NULL if the module for the corresponding generic class
+ // is not loaded.
+ Module *pGenericModuleIfLoaded = pMT->GetModuleIfLoaded();
+
+ // Now check that the modules match
+ if (!pGenericModuleIfLoaded ||
+ dac_cast<TADDR>(pGenericModuleIfLoaded) !=
+ dac_cast<TADDR>(pModule))
+ return FALSE;
+
+ }
+
+ Instantiation candidateInst = t.GetInstantiation();
+
+ // Now check the instantiations. Some type arguments might be encoded.
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ // Fetch the type handle as TADDR. It may be may be encoded fixup - TypeHandle debug-only validation
+ // asserts on encoded fixups.
+ DACCOP_IGNORE(CastOfMarshalledType, "Dual mode DAC problem, but since the size is the same, the cast is safe");
+ TADDR candidateArg = ((FixupPointer<TADDR> *)candidateInst.GetRawArgs())[i].GetValue();
+
+ if (!ZapSig::CompareTaggedPointerToTypeHandle(m_pModule, candidateArg, inst[i]))
+ {
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+BOOL EETypeHashTable::CompareFnPtrType(TypeHandle t, BYTE callConv, DWORD numArgs, TypeHandle *retAndArgTypes)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(t));
+ PRECONDITION(CheckPointer(retAndArgTypes));
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END
+
+ if (!t.IsFnPtrType())
+ return FALSE;
+
+#ifndef DACCESS_COMPILE
+#ifdef FEATURE_PREJIT
+ // This ensures that GetAssemblyIfLoaded operations that may be triggered by signature walks will succeed if at all possible.
+ ClrFlsThreadTypeSwitch genericInstantionCompareHolder(ThreadType_GenericInstantiationCompare);
+#endif
+
+ FnPtrTypeDesc* pTD = t.AsFnPtrType();
+
+ if (pTD->GetNumArgs() != numArgs || pTD->GetCallConv() != callConv)
+ return FALSE;
+
+ // Now check the return and argument types. Some type arguments might be encoded.
+ TypeHandle *retAndArgTypes2 = pTD->GetRetAndArgTypesPointer();
+ for (DWORD i = 0; i <= numArgs; i++)
+ {
+ TADDR candidateArg = retAndArgTypes2[i].AsTAddr();
+ if (!ZapSig::CompareTaggedPointerToTypeHandle(m_pModule, candidateArg, retAndArgTypes[i]))
+ {
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+
+#else
+ DacNotImpl();
+ return FALSE;
+#endif // #ifndef DACCESS_COMPILE
+}
+
+TypeHandle EETypeHashTable::GetValue(TypeKey *pKey)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ EETypeHashEntry_t *pItem = FindItem(pKey);
+
+ if (pItem)
+ {
+ TypeHandle th = pItem->GetTypeHandle();
+ g_IBCLogger.LogTypeHashTableAccess(&th);
+ return pItem->GetTypeHandle();
+ }
+ else
+ return TypeHandle();
+}
+
+#ifndef DACCESS_COMPILE
+
+BOOL EETypeHashTable::ContainsValue(TypeHandle th)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_INTOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ TypeKey typeKey = th.GetTypeKey();
+ return !GetValue(&typeKey).IsNull();
+}
+
+// Insert a value not already in the hash table
+VOID EETypeHashTable::InsertValue(TypeHandle data)
+{
+ CONTRACTL
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(IsUnsealed()); // If we are sealed then we should not be adding to this hashtable
+ PRECONDITION(CheckPointer(data));
+ PRECONDITION(!data.IsEncodedFixup());
+ PRECONDITION(!data.IsGenericTypeDefinition()); // Generic type defs live in typedef table (availableClasses)
+ PRECONDITION(data.HasInstantiation() || data.HasTypeParam() || data.IsFnPtrType()); // It's an instantiated type or an array/ptr/byref type
+ PRECONDITION(!m_pModule || m_pModule->IsTenured()); // Destruct won't destruct m_pAvailableParamTypes for non-tenured modules - so make sure no one tries to insert one before the Module has been tenured
+ }
+ CONTRACTL_END
+
+ EETypeHashEntry_t * pNewEntry = (EETypeHashEntry_t*)BaseAllocateEntry(NULL);
+
+ pNewEntry->SetTypeHandle(data);
+
+ BaseInsertEntry(HashTypeHandle(0, data), pNewEntry);
+}
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+
+#ifdef _DEBUG
+void EETypeHashTableSeal(EETypeHashTable * pTable) { WRAPPER_NO_CONTRACT; pTable->Seal(); }
+void EETypeHashTableUnseal(EETypeHashTable * pTable) { WRAPPER_NO_CONTRACT; pTable->Unseal(); }
+typedef Wrapper<EETypeHashTable *, EETypeHashTableSeal, EETypeHashTableUnseal> EETypeHashTableSealHolder;
+#endif
+
+// Save the hash table and any type descriptors referenced by it
+// Method tables must be saved separately
+void EETypeHashTable::Save(DataImage *image, Module *module, CorProfileData *profileData)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(image->GetModule() == m_pModule);
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ // The table should not change while we are walking the buckets
+ EETypeHashTableSealHolder h(this);
+#endif
+
+ // The base class will call us back for every entry to see if it's considered hot. To determine this we
+ // have to walk through the profiling data. It's very inefficient for us to do this every time. Instead
+ // we'll walk the data once just now and mark each hot entry as we find it.
+ CORBBTPROF_TOKEN_INFO * pTypeProfilingData = profileData->GetTokenFlagsData(TypeProfilingData);
+ DWORD cTypeProfilingData = profileData->GetTokenFlagsCount(TypeProfilingData);
+
+ for (unsigned int i = 0; i < cTypeProfilingData; i++)
+ {
+ CORBBTPROF_TOKEN_INFO *entry = &pTypeProfilingData[i];
+ mdToken token = entry->token;
+ DWORD flags = entry->flags;
+
+ if (TypeFromToken(token) != ibcTypeSpec)
+ continue;
+
+ if ((flags & (1 << ReadTypeHashTable)) == 0)
+ continue;
+
+ CORBBTPROF_BLOB_ENTRY *pBlobEntry = profileData->GetBlobStream();
+ if (pBlobEntry)
+ {
+ while (pBlobEntry->TypeIsValid())
+ {
+ if (TypeFromToken(pBlobEntry->token) == ibcTypeSpec)
+ {
+ _ASSERTE(pBlobEntry->type == ParamTypeSpec);
+
+ CORBBTPROF_BLOB_PARAM_SIG_ENTRY *pBlobSigEntry = (CORBBTPROF_BLOB_PARAM_SIG_ENTRY *) pBlobEntry;
+
+ if (pBlobEntry->token == token)
+ {
+ if (flags & (1<<ReadTypeHashTable))
+ {
+ TypeHandle th = m_pModule->LoadIBCTypeHelper(pBlobSigEntry);
+#if defined(_DEBUG) && !defined(DACCESS_COMPILE)
+ g_pConfig->DebugCheckAndForceIBCFailure(EEConfig::CallSite_8);
+#endif
+ if (!th.IsNull())
+ {
+ // Found a hot type. See if we have it in our table.
+ DWORD dwHash = HashTypeHandle(0, th);
+ LookupContext sContext;
+ EETypeHashEntry_t *pSearch = BaseFindFirstEntryByHash(dwHash, &sContext);
+ while (pSearch)
+ {
+ if (pSearch->GetTypeHandle() == th)
+ {
+ // Found the corresponding entry in the table. Mark it as hot.
+ pSearch->MarkAsHot();
+ break;
+ }
+
+ pSearch = BaseFindNextEntryByHash(&sContext);
+ }
+ }
+ }
+ }
+ }
+ pBlobEntry = pBlobEntry->GetNextEntry();
+ }
+ }
+ }
+
+ BaseSave(image, profileData);
+}
+
+bool EETypeHashTable::ShouldSave(DataImage *pImage, EETypeHashEntry_t *pEntry)
+{
+ STANDARD_VM_CONTRACT;
+
+ return !!pImage->GetPreloader()->IsTypeInTransitiveClosureOfInstantiations(CORINFO_CLASS_HANDLE(pEntry->GetTypeHandle().AsPtr()));
+}
+
+bool EETypeHashTable::IsHotEntry(EETypeHashEntry_t *pEntry, CorProfileData *pProfileData)
+{
+ STANDARD_VM_CONTRACT;
+
+ // EETypeHashTable::Save() will have marked the entry as hot if the profile data indicated this.
+ return pEntry->IsHot();
+}
+
+bool EETypeHashTable::SaveEntry(DataImage *pImage, CorProfileData *pProfileData, EETypeHashEntry_t *pOldEntry, EETypeHashEntry_t *pNewEntry, EntryMappingTable *pMap)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return false;
+}
+
+void EETypeHashTable::Fixup(DataImage *image)
+{
+ STANDARD_VM_CONTRACT;
+
+ BaseFixup(image);
+
+ image->ZeroPointerField(this, offsetof(EETypeHashTable, m_pAllocator));
+
+#ifdef _DEBUG
+ // The persisted table should be unsealed.
+ EETypeHashTable *pNewTable = (EETypeHashTable*) image->GetImagePointer(this);
+ pNewTable->InitUnseal();
+#endif
+}
+
+void EETypeHashTable::FixupEntry(DataImage *pImage, EETypeHashEntry_t *pEntry, void *pFixupBase, DWORD cbFixupOffset)
+{
+ STANDARD_VM_CONTRACT;
+
+ TypeHandle pType = pEntry->GetTypeHandle();
+ _ASSERTE(!pType.IsNull());
+
+ // Clear any hot entry marking in the data, it's not needed after the Save phase.
+ pEntry->SetTypeHandle(pType);
+
+ if (pType.IsTypeDesc())
+ {
+ pImage->FixupField(pFixupBase, cbFixupOffset + offsetof(EETypeHashEntry_t, m_data),
+ pType.AsTypeDesc(), 2);
+
+ pType.AsTypeDesc()->Fixup(pImage);
+ }
+ else
+ {
+ pImage->FixupField(pFixupBase, cbFixupOffset + offsetof(EETypeHashEntry_t, m_data),
+ pType.AsMethodTable());
+
+ pType.AsMethodTable()->Fixup(pImage);
+ }
+}
+#endif // FEATURE_NATIVE_IMAGE_GENERATION
+
+#endif // #ifndef DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+
+void
+EETypeHashTable::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+
+ BaseEnumMemoryRegions(flags);
+}
+
+void EETypeHashTable::EnumMemoryRegionsForEntry(EETypeHashEntry_t *pEntry, CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+
+ pEntry->GetTypeHandle().EnumMemoryRegions(flags);
+}
+
+#endif // #ifdef DACCESS_COMPILE
+
+TypeHandle EETypeHashEntry::GetTypeHandle()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // Remove any hot entry indicator bit that may have been set as the result of Ngen saving.
+ return TypeHandle::FromTAddr(m_data & ~0x1);
+}
+
+void EETypeHashEntry::SetTypeHandle(TypeHandle handle)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // We plan to steal the low-order bit of the handle for ngen purposes.
+ _ASSERTE((handle.AsTAddr() & 0x1) == 0);
+ m_data = handle.AsTAddr();
+}
+
+#ifdef FEATURE_PREJIT
+bool EETypeHashEntry::IsHot()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Low order bit of data field indicates a hot entry.
+ return (m_data & 1) != 0;
+}
+
+void EETypeHashEntry::MarkAsHot()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Low order bit of data field indicates a hot entry.
+ m_data |= 0x1;
+}
+#endif // FEATURE_PREJIT
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif // _MSC_VER: warning C4244
diff --git a/src/vm/typehash.h b/src/vm/typehash.h
new file mode 100644
index 0000000000..b9fad552e0
--- /dev/null
+++ b/src/vm/typehash.h
@@ -0,0 +1,161 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: typehash.h
+//
+
+//
+
+#ifndef _TYPE_HASH_H
+#define _TYPE_HASH_H
+
+#include "ngenhash.h"
+
+//========================================================================================
+// This hash table is used by class loaders to look up constructed types:
+// arrays, pointers and instantiations of user-defined generic types.
+//
+// Each persisted module structure has an EETypeHashTable used for constructed types that
+// were ngen'ed into that module. See ceeload.hpp for more information about ngen modules.
+//
+// Types created at runtime are placed in an EETypeHashTable in BaseDomain.
+//
+// Keys are derivable from the data stored in the table (TypeHandle)
+// - for an instantiated type, the typedef module, typedef token, and instantiation
+// - for an array/pointer type, the CorElementType, rank, and type parameter
+//
+//========================================================================================
+
+// One of these is present for each element in the table
+// It simply chains together (hash,data) pairs
+typedef DPTR(struct EETypeHashEntry) PTR_EETypeHashEntry;
+typedef struct EETypeHashEntry
+{
+ TypeHandle GetTypeHandle();
+ void SetTypeHandle(TypeHandle handle);
+
+#ifdef FEATURE_PREJIT
+ // To make ngen saving much more efficient we support marking individual entries as hot (as determined by
+ // profile data).
+ bool IsHot();
+ void MarkAsHot();
+#endif // FEATURE_PREJIT
+
+private:
+ friend class EETypeHashTable;
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+
+ TADDR m_data;
+} EETypeHashEntry_t;
+
+
+// The type hash table itself
+typedef DPTR(class EETypeHashTable) PTR_EETypeHashTable;
+class EETypeHashTable : public NgenHashTable<EETypeHashTable, EETypeHashEntry, 2>
+{
+#ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+#endif
+
+public:
+ // This is the domain in which the hash table is allocated
+ PTR_LoaderAllocator m_pAllocator;
+
+#ifdef _DEBUG
+private:
+ Volatile<LONG> m_dwSealCount; // Can more types be added to the table?
+
+public:
+ void InitUnseal() { LIMITED_METHOD_CONTRACT; m_dwSealCount = 0; }
+ bool IsUnsealed() { LIMITED_METHOD_CONTRACT; return (m_dwSealCount == 0); }
+ void Seal() { LIMITED_METHOD_CONTRACT; FastInterlockIncrement(&m_dwSealCount); }
+ void Unseal() { LIMITED_METHOD_CONTRACT; FastInterlockDecrement(&m_dwSealCount); }
+#endif // _DEBUG
+
+private:
+#ifndef DACCESS_COMPILE
+ EETypeHashTable();
+ ~EETypeHashTable();
+#endif
+public:
+ static EETypeHashTable *Create(LoaderAllocator *pAllocator, Module *pModule, DWORD dwNumBuckets, AllocMemTracker *pamTracker);
+
+private:
+ friend class NgenHashTable<EETypeHashTable, EETypeHashEntry, 2>;
+
+#ifndef DACCESS_COMPILE
+ EETypeHashTable(Module *pModule, LoaderHeap *pHeap, DWORD cInitialBuckets) :
+ NgenHashTable<EETypeHashTable, EETypeHashEntry, 2>(pModule, pHeap, cInitialBuckets) {}
+#endif
+ void operator delete(void *p);
+
+public:
+ // Insert a value in the hash table, key implicit in data
+ // Value must not be present in the table already
+ VOID InsertValue(TypeHandle data);
+
+ // Look up a value in the hash table, key explicit in pKey
+ // Return a null type handle if not found
+ TypeHandle GetValue(TypeKey* pKey);
+
+ BOOL ContainsValue(TypeHandle th);
+
+ // An iterator for the table
+ class Iterator
+ {
+ public:
+ // This iterator can be reused for walking different tables
+ void Reset();
+ Iterator();
+
+ Iterator(EETypeHashTable * pTable);
+ ~Iterator();
+
+ private:
+ friend class EETypeHashTable;
+
+ void Init();
+
+ EETypeHashTable *m_pTable;
+ BaseIterator m_sIterator;
+ bool m_fIterating;
+ };
+
+ BOOL FindNext(Iterator *it, EETypeHashEntry **ppEntry);
+
+ DWORD GetCount();
+
+#if defined(FEATURE_PREJIT) && !defined(DACCESS_COMPILE)
+ // Save the hash table and any type descriptors referenced by it
+ // Template method tables (for arrays) must be saved separately
+ void Save(DataImage *image, Module *module, CorProfileData *profileData);
+
+ // Record fixups required on the hash table
+ // Recurse into type descriptors and template method tables referenced by it
+ void Fixup(DataImage *image);
+
+ bool ShouldSave(DataImage *pImage, EETypeHashEntry_t *pEntry);
+ bool IsHotEntry(EETypeHashEntry_t *pEntry, CorProfileData *pProfileData);
+ bool SaveEntry(DataImage *pImage, CorProfileData *pProfileData, EETypeHashEntry_t *pOldEntry, EETypeHashEntry_t *pNewEntry, EntryMappingTable *pMap);
+ void FixupEntry(DataImage *pImage, EETypeHashEntry_t *pEntry, void *pFixupBase, DWORD cbFixupOffset);
+#endif // FEATURE_PREJIT && !DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+ void EnumMemoryRegionsForEntry(EETypeHashEntry_t *pEntry, CLRDataEnumMemoryFlags flags);
+#endif
+
+private:
+ EETypeHashEntry_t * FindItem(TypeKey* pKey);
+ BOOL CompareInstantiatedType(TypeHandle t, Module *pModule, mdTypeDef token, Instantiation inst);
+ BOOL CompareFnPtrType(TypeHandle t, BYTE callConv, DWORD numArgs, TypeHandle *retAndArgTypes);
+ BOOL GrowHashTable();
+ LoaderAllocator* GetLoaderAllocator();
+};
+
+#endif /* _TYPE_HASH_H */
+
diff --git a/src/vm/typekey.h b/src/vm/typekey.h
new file mode 100644
index 0000000000..6560344288
--- /dev/null
+++ b/src/vm/typekey.h
@@ -0,0 +1,308 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// typekey.h
+//
+
+// ---------------------------------------------------------------------------
+//
+// Support for type lookups based on components of the type (as opposed to string)
+// Used in
+// * Table of constructed types (Module::m_pAvailableParamTypes)
+// * Types currently being loaded (ClassLoader::m_pUnresolvedClassHash)
+//
+// Type handles are in one-to-one correspondence with TypeKeys
+// In particular, note that tokens in the key are resolved TypeDefs
+//
+// ---------------------------------------------------------------------------
+
+
+#ifndef _H_TYPEKEY
+#define _H_TYPEKEY
+
+class TypeKey
+{
+ // ELEMENT_TYPE_CLASS for ordinary classes and generic instantiations (including value types)
+ // ELEMENT_TYPE_ARRAY and ELEMENT_TYPE_SZARRAY for array types
+ // ELEMENT_TYPE_PTR and ELEMENT_TYPE_BYREF for pointer types
+ // ELEMENT_TYPE_FNPTR for function pointer types
+ // ELEMENT_TYPE_VALUETYPE for native value types (used in IL stubs)
+ CorElementType m_kind;
+
+ union
+ {
+ // m_kind = CLASS
+ struct
+ {
+ Module * m_pModule;
+ mdToken m_typeDef;
+ DWORD m_numGenericArgs; // 0 for non-generic types
+ FixupPointer<TypeHandle> * m_pGenericArgs; // NULL for non-generic types
+ // Note that for DAC builds, m_pGenericArgs is a host allocated buffer (eg. by in SigPointer::GetTypeHandleThrowing),
+ // not a copy of an object marshalled by DAC.
+ } asClass;
+
+ // m_kind = ARRAY, SZARRAY, PTR or BYREF
+ struct
+ {
+ TADDR m_paramType; // The element type (actually a TypeHandle, but we don't want its constructor
+ // run on a C++ union)
+ DWORD m_rank; // Non-zero for ARRAY, 1 for SZARRAY, 0 for PTR or BYREF
+ BOOL m_isTemplateMethodTable; // TRUE if this key indexes the template method table for an array, rather than a type-desc
+ } asParamType;
+
+ // m_kind = FNPTR
+ struct
+ {
+ BYTE m_callConv;
+ DWORD m_numArgs;
+ TypeHandle* m_pRetAndArgTypes;
+ } asFnPtr;
+ } u;
+
+public:
+
+ // Constructor for BYREF/PTR/ARRAY/SZARRAY types
+ TypeKey(CorElementType etype, TypeHandle paramType, BOOL isTemplateMethodTable = FALSE, DWORD rank = 0)
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ PRECONDITION(rank > 0 && etype == ELEMENT_TYPE_ARRAY ||
+ rank == 1 && etype == ELEMENT_TYPE_SZARRAY ||
+ rank == 0 && (etype == ELEMENT_TYPE_PTR || etype == ELEMENT_TYPE_BYREF || etype == ELEMENT_TYPE_VALUETYPE));
+ PRECONDITION(CheckPointer(paramType));
+ m_kind = etype;
+ u.asParamType.m_paramType = paramType.AsTAddr();
+ u.asParamType.m_rank = rank;
+ u.asParamType.m_isTemplateMethodTable = isTemplateMethodTable;
+ }
+
+ // Constructor for instantiated types
+ TypeKey(Module *pModule, mdTypeDef token, Instantiation inst = Instantiation())
+ {
+ WRAPPER_NO_CONTRACT;
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(TypeFromToken(token) == mdtTypeDef);
+ PRECONDITION(!IsNilToken(token));
+ m_kind = ELEMENT_TYPE_CLASS;
+ u.asClass.m_pModule = pModule;
+ u.asClass.m_typeDef = token;
+ u.asClass.m_numGenericArgs = inst.GetNumArgs();
+ u.asClass.m_pGenericArgs = inst.GetRawArgs();
+ }
+
+ // Constructor for function pointer type
+ TypeKey(BYTE callConv, DWORD numArgs, TypeHandle* retAndArgTypes)
+ {
+ WRAPPER_NO_CONTRACT;
+ PRECONDITION(CheckPointer(retAndArgTypes));
+ m_kind = ELEMENT_TYPE_FNPTR;
+ u.asFnPtr.m_callConv = callConv;
+ u.asFnPtr.m_numArgs = numArgs;
+ u.asFnPtr.m_pRetAndArgTypes = retAndArgTypes;
+ }
+
+ CorElementType GetKind() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return m_kind;
+ }
+
+ // Accessors on array/pointer types
+ DWORD GetRank() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ PRECONDITION(CorTypeInfo::IsArray_NoThrow(m_kind));
+ return u.asParamType.m_rank;
+ }
+
+ BOOL IsTemplateMethodTable() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return u.asParamType.m_isTemplateMethodTable;
+ }
+
+ TypeHandle GetElementType() const
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ PRECONDITION(CorTypeInfo::IsModifier_NoThrow(m_kind) || m_kind == ELEMENT_TYPE_VALUETYPE);
+ return TypeHandle::FromTAddr(u.asParamType.m_paramType);
+ }
+
+ BOOL IsConstructed() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ return !(m_kind == ELEMENT_TYPE_CLASS && u.asClass.m_numGenericArgs == 0);
+ }
+
+ // Accessors on instantiated types
+ PTR_Module GetModule() const
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+ if (m_kind == ELEMENT_TYPE_CLASS)
+ return PTR_Module(u.asClass.m_pModule);
+ else if (CorTypeInfo::IsModifier_NoThrow(m_kind) || m_kind == ELEMENT_TYPE_VALUETYPE)
+ return GetElementType().GetModule();
+ else
+ return NULL;
+ }
+
+ mdTypeDef GetTypeToken() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ PRECONDITION(m_kind == ELEMENT_TYPE_CLASS);
+ return u.asClass.m_typeDef;
+ }
+
+ // Get the type parameters for this CLASS type.
+ // This is an array (host-allocated in DAC builds) of length GetNumGenericArgs().
+ Instantiation GetInstantiation() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ PRECONDITION(m_kind == ELEMENT_TYPE_CLASS);
+ return Instantiation(u.asClass.m_pGenericArgs, u.asClass.m_numGenericArgs);
+ }
+
+ DWORD GetNumGenericArgs() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ PRECONDITION(m_kind == ELEMENT_TYPE_CLASS);
+ return u.asClass.m_numGenericArgs;
+ }
+
+ BOOL HasInstantiation() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_kind == ELEMENT_TYPE_CLASS && u.asClass.m_numGenericArgs != 0;
+ }
+
+ // Accessors on function pointer types
+ DWORD GetNumArgs() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ PRECONDITION(m_kind == ELEMENT_TYPE_FNPTR);
+ return u.asFnPtr.m_numArgs;
+ }
+
+ BYTE GetCallConv() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ PRECONDITION(m_kind == ELEMENT_TYPE_FNPTR);
+ return u.asFnPtr.m_callConv;
+ }
+
+ TypeHandle* GetRetAndArgTypes() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ PRECONDITION(m_kind == ELEMENT_TYPE_FNPTR);
+ return u.asFnPtr.m_pRetAndArgTypes;
+ }
+
+ BOOL Equals(TypeKey *pKey) const
+ {
+ WRAPPER_NO_CONTRACT;
+ return TypeKey::Equals(this, pKey);
+ }
+
+ // Comparison and hashing
+ static BOOL Equals(const TypeKey *pKey1, const TypeKey *pKey2)
+ {
+ WRAPPER_NO_CONTRACT;
+ if (pKey1->m_kind != pKey2->m_kind)
+ {
+ return FALSE;
+ }
+ if (pKey1->m_kind == ELEMENT_TYPE_CLASS)
+ {
+ if (pKey1->u.asClass.m_typeDef != pKey2->u.asClass.m_typeDef ||
+ pKey1->u.asClass.m_pModule != pKey2->u.asClass.m_pModule ||
+ pKey1->u.asClass.m_numGenericArgs != pKey2->u.asClass.m_numGenericArgs)
+ {
+ return FALSE;
+ }
+ for (DWORD i = 0; i < pKey1->u.asClass.m_numGenericArgs; i++)
+ {
+ if (pKey1->u.asClass.m_pGenericArgs[i].GetValue() != pKey2->u.asClass.m_pGenericArgs[i].GetValue())
+ return FALSE;
+ }
+ return TRUE;
+ }
+ else if (CorTypeInfo::IsModifier_NoThrow(pKey1->m_kind) || pKey1->m_kind == ELEMENT_TYPE_VALUETYPE)
+ {
+ return pKey1->u.asParamType.m_paramType == pKey2->u.asParamType.m_paramType
+ && pKey1->u.asParamType.m_rank == pKey2->u.asParamType.m_rank
+ && pKey1->u.asParamType.m_isTemplateMethodTable == pKey2->u.asParamType.m_isTemplateMethodTable;
+ }
+ else
+ {
+ _ASSERTE(pKey1->m_kind == ELEMENT_TYPE_FNPTR);
+ if (pKey1->u.asFnPtr.m_callConv != pKey2->u.asFnPtr.m_callConv ||
+ pKey1->u.asFnPtr.m_numArgs != pKey2->u.asFnPtr.m_numArgs)
+ return FALSE;
+
+ // Includes return type
+ for (DWORD i = 0; i <= pKey1->u.asFnPtr.m_numArgs; i++)
+ {
+ if (pKey1->u.asFnPtr.m_pRetAndArgTypes[i] != pKey2->u.asFnPtr.m_pRetAndArgTypes[i])
+ return FALSE;
+ }
+ return TRUE;
+ }
+ }
+
+ DWORD ComputeHash() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ DWORD_PTR hashLarge;
+
+ if (m_kind == ELEMENT_TYPE_CLASS)
+ {
+ hashLarge = ((DWORD_PTR)u.asClass.m_pModule ^ (DWORD_PTR)u.asClass.m_numGenericArgs ^ (DWORD_PTR)u.asClass.m_typeDef);
+ }
+ else if (CorTypeInfo::IsModifier_NoThrow(m_kind) || m_kind == ELEMENT_TYPE_VALUETYPE)
+ {
+ hashLarge = (u.asParamType.m_paramType ^ (DWORD_PTR) u.asParamType.m_rank);
+ }
+ else hashLarge = 0;
+
+#if POINTER_BITS == 32
+ return hashLarge;
+#else
+ DWORD hash = *(DWORD *)&hashLarge;
+ for (unsigned i = 1; i < POINTER_BITS / 32; i++)
+ {
+ hash ^= ((DWORD *)&hashLarge)[i];
+ }
+ return hash;
+#endif
+ }
+
+ // Is this type part of an assembly loaded for introspection?
+ BOOL IsIntrospectionOnly();
+
+
+};
+
+
+#endif /* _H_TYPEKEY */
diff --git a/src/vm/typeparse.cpp b/src/vm/typeparse.cpp
new file mode 100644
index 0000000000..6adb8743f4
--- /dev/null
+++ b/src/vm/typeparse.cpp
@@ -0,0 +1,1974 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ---------------------------------------------------------------------------
+// typeparse.cpp
+// ---------------------------------------------------------------------------
+//
+
+//
+
+
+#include "common.h"
+#include "class.h"
+#include "typehandle.h"
+#include "sstring.h"
+#include "typeparse.h"
+#include "typestring.h"
+#include "assemblynative.hpp"
+#include "stackprobe.h"
+#include "fstring.h"
+
+//
+// TypeNameFactory
+//
+HRESULT __stdcall TypeNameFactory::QueryInterface(REFIID riid, void **ppUnk)
+{
+ WRAPPER_NO_CONTRACT;
+
+ *ppUnk = 0;
+
+ if (riid == IID_IUnknown)
+ *ppUnk = (IUnknown *)this;
+ else if (riid == IID_ITypeNameFactory)
+ *ppUnk = (ITypeNameFactory*)this;
+ else
+ return (E_NOINTERFACE);
+
+ AddRef();
+ return S_OK;
+}
+
+HRESULT TypeNameFactoryCreateObject(REFIID riid, void **ppUnk)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ TypeNameFactory *pTypeNameFactory = new (nothrow) TypeNameFactory();
+
+ if (!pTypeNameFactory)
+ return (E_OUTOFMEMORY);
+
+ hr = pTypeNameFactory->QueryInterface(riid, ppUnk);
+
+ if (FAILED(hr))
+ delete pTypeNameFactory;
+
+ return hr;
+}
+
+
+HRESULT __stdcall TypeNameFactory::ParseTypeName(LPCWSTR szTypeName, DWORD* pError, ITypeName** ppTypeName)
+{
+ CONTRACTL
+ {
+ SO_TOLERANT;
+ WRAPPER(THROWS);
+ }CONTRACTL_END;
+
+ if (!ppTypeName || !pError)
+ return E_INVALIDARG;
+
+ HRESULT hr = S_OK;
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+
+ *ppTypeName = NULL;
+ *pError = (DWORD)-1;
+
+ ITypeName* pTypeName = new (nothrow) TypeName(szTypeName, pError);
+
+ if (! pTypeName)
+ {
+ hr = E_OUTOFMEMORY;
+ }
+ else
+ {
+ pTypeName->AddRef();
+
+ if (*pError != (DWORD)-1)
+ {
+ pTypeName->Release();
+ hr = S_FALSE;
+ }
+ else
+ {
+ *ppTypeName = pTypeName;
+ }
+ }
+
+ END_SO_INTOLERANT_CODE;
+
+ return hr;
+}
+
+HRESULT __stdcall TypeNameFactory::GetTypeNameBuilder(ITypeNameBuilder** ppTypeNameBuilder)
+{
+ CONTRACTL
+ {
+ THROWS; // operator new has EX_TRY/EX_CATCH or other contract transitions(s)
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!ppTypeNameBuilder)
+ return E_INVALIDARG;
+
+ *ppTypeNameBuilder = NULL;
+
+ HRESULT hr = S_OK;
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+
+ ITypeNameBuilder* pTypeNameBuilder = new (nothrow) TypeNameBuilderWrapper();
+
+ if (pTypeNameBuilder)
+ {
+ pTypeNameBuilder->AddRef();
+
+ *ppTypeNameBuilder = pTypeNameBuilder;
+ }
+ else
+ {
+ hr = E_OUTOFMEMORY;
+ }
+
+ END_SO_INTOLERANT_CODE;
+
+ return hr;
+}
+
+//
+// TypeName
+//
+SString* TypeName::ToString(SString* pBuf, BOOL bAssemblySpec, BOOL bSignature, BOOL bGenericArguments)
+{
+ WRAPPER_NO_CONTRACT;
+
+ PRECONDITION(!bGenericArguments & !bSignature &! bAssemblySpec);
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return pBuf);
+ {
+ TypeNameBuilder tnb(pBuf);
+
+ for (COUNT_T i = 0; i < m_names.GetCount(); i ++)
+ tnb.AddName(m_names[i]->GetUnicode());
+ }
+ END_SO_INTOLERANT_CODE;
+
+ return pBuf;
+}
+
+
+DWORD __stdcall TypeName::AddRef()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_count++;
+
+ return m_count;
+}
+
+DWORD __stdcall TypeName::Release()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+ VALIDATE_BACKOUT_STACK_CONSUMPTION;
+
+ m_count--;
+
+ DWORD dwCount = m_count;
+ if (dwCount == 0)
+ delete this;
+
+ return dwCount;
+}
+
+TypeName::~TypeName()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+ VALIDATE_BACKOUT_STACK_CONSUMPTION;
+
+ for(COUNT_T i = 0; i < m_genericArguments.GetCount(); i ++)
+ m_genericArguments[i]->Release();
+}
+
+HRESULT __stdcall TypeName::QueryInterface(REFIID riid, void **ppUnk)
+{
+ WRAPPER_NO_CONTRACT;
+
+ *ppUnk = 0;
+
+ if (riid == IID_IUnknown)
+ *ppUnk = (IUnknown *)this;
+ else if (riid == IID_ITypeName)
+ *ppUnk = (ITypeName*)this;
+ else
+ return (E_NOINTERFACE);
+
+ AddRef();
+ return S_OK;
+}
+
+HRESULT __stdcall TypeName::GetNameCount(DWORD* pCount)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!pCount)
+ return E_INVALIDARG;
+
+ *pCount = m_names.GetCount();
+
+ return S_OK;
+}
+
+HRESULT __stdcall TypeName::GetNames(DWORD count, BSTR* bszName, DWORD* pFetched)
+{
+ CONTRACTL
+ {
+ SO_TOLERANT;
+ WRAPPER(THROWS);
+ }CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ if (!pFetched)
+ return E_INVALIDARG;
+
+ *pFetched = m_names.GetCount();
+
+ if (m_names.GetCount() > count)
+ return S_FALSE;
+
+ if (!bszName)
+ return E_INVALIDARG;
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+ {
+ for (COUNT_T i = 0; i < m_names.GetCount(); i ++)
+ bszName[i] = SysAllocString(m_names[i]->GetUnicode());
+ }
+ END_SO_INTOLERANT_CODE;
+
+ return hr;
+}
+
+HRESULT __stdcall TypeName::GetTypeArgumentCount(DWORD* pCount)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!pCount)
+ return E_INVALIDARG;
+
+ *pCount = m_genericArguments.GetCount();
+
+ return S_OK;
+}
+
+HRESULT __stdcall TypeName::GetTypeArguments(DWORD count, ITypeName** ppArguments, DWORD* pFetched)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!pFetched)
+ return E_INVALIDARG;
+
+ *pFetched = m_genericArguments.GetCount();
+
+ if (m_genericArguments.GetCount() > count)
+ return S_FALSE;
+
+ if (!ppArguments)
+ return E_INVALIDARG;
+
+ for (COUNT_T i = 0; i < m_genericArguments.GetCount(); i ++)
+ {
+ ppArguments[i] = m_genericArguments[i];
+ m_genericArguments[i]->AddRef();
+ }
+
+ return S_OK;
+}
+
+HRESULT __stdcall TypeName::GetModifierLength(DWORD* pCount)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (pCount == NULL)
+ return E_INVALIDARG;
+
+ *pCount = m_signature.GetCount();
+
+ return S_OK;
+}
+
+HRESULT __stdcall TypeName::GetModifiers(DWORD count, DWORD* pModifiers, DWORD* pFetched)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!pFetched)
+ return E_INVALIDARG;
+
+ *pFetched = m_signature.GetCount();
+
+ if (m_signature.GetCount() > count)
+ return S_FALSE;
+
+ if (!pModifiers)
+ return E_INVALIDARG;
+
+ for (COUNT_T i = 0; i < m_signature.GetCount(); i ++)
+ pModifiers[i] = m_signature[i];
+
+ return S_OK;
+}
+
+HRESULT __stdcall TypeName::GetAssemblyName(BSTR* pszAssemblyName)
+{
+ CONTRACTL
+ {
+ SO_TOLERANT;
+ WRAPPER(THROWS);
+ }CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ if (pszAssemblyName == NULL)
+ return E_INVALIDARG;
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+ {
+ *pszAssemblyName = SysAllocString(m_assembly.GetUnicode());
+ }
+ END_SO_INTOLERANT_CODE;
+
+ if (*pszAssemblyName == NULL)
+ hr= E_OUTOFMEMORY;
+
+ return hr;
+}
+
+#if !defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE)
+SAFEHANDLE TypeName::GetSafeHandle()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ SAFEHANDLE objSafeHandle = NULL;
+
+ GCPROTECT_BEGIN(objSafeHandle);
+
+ objSafeHandle = (SAFEHANDLE)AllocateObject(MscorlibBinder::GetClass(CLASS__SAFE_TYPENAMEPARSER_HANDLE));
+ CallDefaultConstructor(objSafeHandle);
+
+ this->AddRef();
+ objSafeHandle->SetHandle(this);
+
+ GCPROTECT_END();
+
+ return objSafeHandle;
+}
+
+/*static*/
+void QCALLTYPE TypeName::QCreateTypeNameParser(LPCWSTR wszTypeName, QCall::ObjectHandleOnStack pHandle, BOOL throwOnError)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ DWORD error = (DWORD)-1;
+ ReleaseHolder<TypeName> pTypeName = new TypeName(wszTypeName, &error);
+ pTypeName->AddRef();
+
+ if (error == (DWORD)-1)
+ {
+ GCX_COOP();
+ pHandle.Set(pTypeName->GetSafeHandle());
+ }
+ else
+ {
+ if (throwOnError)
+ {
+ StackSString buf;
+ StackSString msg(W("typeName@"));
+ COUNT_T size = buf.GetUnicodeAllocation();
+ _itow_s(error, buf.OpenUnicodeBuffer(size), size, /*radix*/10);
+ buf.CloseBuffer();
+ msg.Append(buf);
+ COMPlusThrowArgumentException(msg.GetUnicode(), NULL);
+ }
+ }
+
+ END_QCALL;
+}
+
+/*static*/
+void QCALLTYPE TypeName::QReleaseTypeNameParser(TypeName * pTypeName)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(pTypeName));
+ }
+ CONTRACTL_END;
+
+ BEGIN_QCALL;
+
+ pTypeName->Release();
+
+ END_QCALL;
+}
+
+/*static*/
+void QCALLTYPE TypeName::QGetNames(TypeName * pTypeName, QCall::ObjectHandleOnStack pNames)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(pTypeName));
+ }
+ CONTRACTL_END;
+
+ BEGIN_QCALL;
+
+ SArray<SString*> names = pTypeName->GetNames();
+ COUNT_T count = names.GetCount();
+
+ GCX_COOP();
+
+ if (count > 0)
+ {
+ PTRARRAYREF pReturnNames = NULL;
+
+ GCPROTECT_BEGIN(pReturnNames);
+
+ pReturnNames = (PTRARRAYREF)AllocateObjectArray(count, g_pStringClass);
+
+ for (COUNT_T i = 0; i < count; i++)
+ {
+ STRINGREF str = StringObject::NewString(names[i]->GetUnicode());
+ pReturnNames->SetAt(i, str);
+ }
+
+ pNames.Set(pReturnNames);
+
+ GCPROTECT_END();
+ }
+ else
+ {
+ pNames.Set(NULL);
+ }
+
+ END_QCALL;
+}
+
+/*static*/
+void QCALLTYPE TypeName::QGetTypeArguments(TypeName * pTypeName, QCall::ObjectHandleOnStack pTypeArguments)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(pTypeName));
+ }
+ CONTRACTL_END;
+
+ BEGIN_QCALL;
+
+ SArray<TypeName*> arguments = pTypeName->GetGenericArguments();
+ COUNT_T count = arguments.GetCount();
+
+ GCX_COOP();
+
+ if (count > 0)
+ {
+ PTRARRAYREF pReturnArguments = NULL;
+
+ GCPROTECT_BEGIN(pReturnArguments);
+
+ pReturnArguments = (PTRARRAYREF)AllocateObjectArray(count, MscorlibBinder::GetClass(CLASS__SAFE_TYPENAMEPARSER_HANDLE));
+
+ for (COUNT_T i = 0; i < count; i++)
+ {
+ SAFEHANDLE handle = arguments[i]->GetSafeHandle();
+ _ASSERTE(handle != NULL);
+
+ pReturnArguments->SetAt(i, handle);
+ }
+
+ pTypeArguments.Set(pReturnArguments);
+
+ GCPROTECT_END();
+ }
+ else
+ {
+ pTypeArguments.Set(NULL);
+ }
+
+ END_QCALL;
+}
+
+/*static*/
+void QCALLTYPE TypeName::QGetModifiers(TypeName * pTypeName, QCall::ObjectHandleOnStack pModifiers)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(pTypeName));
+ }
+ CONTRACTL_END;
+
+ BEGIN_QCALL;
+
+ SArray<DWORD> modifiers = pTypeName->GetSignature();
+ COUNT_T count = modifiers.GetCount();
+
+ GCX_COOP();
+
+ if (count > 0)
+ {
+ I4ARRAYREF pReturnModifiers = NULL;
+
+ GCPROTECT_BEGIN(pReturnModifiers);
+
+ //TODO: how do we Get
+ pReturnModifiers = (I4ARRAYREF)AllocatePrimitiveArray(ELEMENT_TYPE_I4, count);
+ INT32 *pToArray = pReturnModifiers->GetDirectPointerToNonObjectElements();
+
+ for (COUNT_T i = 0; i < count; i++)
+ {
+ pToArray[i] = modifiers[i];
+ }
+
+ pModifiers.Set(pReturnModifiers);
+
+ GCPROTECT_END();
+ }
+ else
+ {
+ pModifiers.Set(NULL);
+ }
+
+ END_QCALL;
+}
+
+/*static*/
+void QCALLTYPE TypeName::QGetAssemblyName(TypeName * pTypeName, QCall::StringHandleOnStack pAssemblyName)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ PRECONDITION(CheckPointer(pTypeName));
+ }
+ CONTRACTL_END;
+
+ BEGIN_QCALL;
+
+ pAssemblyName.Set(*(pTypeName->GetAssembly()));
+
+ END_QCALL;
+}
+#endif //!FEATURE_CORECLR && !CROSSGEN_COMPILE
+
+//
+// TypeName::TypeNameParser
+//
+#undef IfFailGo
+#define IfFailGo(P) if (!P) return FALSE;
+
+TypeName* TypeName::AddGenericArgument()
+{
+ WRAPPER_NO_CONTRACT;
+
+ TypeName* pGenArg = new TypeName();
+ pGenArg->AddRef();
+
+ pGenArg->m_bIsGenericArgument = TRUE;
+ return m_genericArguments.AppendEx(pGenArg);
+}
+
+TypeName::TypeNameParser::TypeNameTokens TypeName::TypeNameParser::LexAToken(BOOL ignorePlus)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_nextToken == TypeNameIdentifier)
+ return TypeNamePostIdentifier;
+
+ if (m_nextToken == TypeNameEnd)
+ return TypeNameEnd;
+
+ if (*m_itr == W('\0'))
+ return TypeNameEnd;
+
+ if (COMCharacter::nativeIsWhiteSpace(*m_itr))
+ {
+ m_itr++;
+ return LexAToken();
+ }
+
+ WCHAR c = *m_itr;
+ m_itr++;
+ switch(c)
+ {
+ case W(','): return TypeNameComma;
+ case W('['): return TypeNameOpenSqBracket;
+ case W(']'): return TypeNameCloseSqBracket;
+ case W('&'): return TypeNameAmperstand;
+ case W('*'): return TypeNameAstrix;
+ case W('+'): if (!ignorePlus) return TypeNamePlus;
+ case W('\\'):
+ m_itr--;
+ return TypeNameIdentifier;
+ }
+
+ ASSERT(!IsTypeNameReservedChar(c));
+
+ m_itr--;
+ return TypeNameIdentifier;
+}
+
+BOOL TypeName::TypeNameParser::GetIdentifier(SString* sszId, TypeName::TypeNameParser::TypeNameIdentifiers identifierType)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PRECONDITION(m_currentToken == TypeNameIdentifier && m_nextToken == TypeNamePostIdentifier);
+
+ sszId->Clear();
+
+ LPCWSTR start = m_currentItr;
+ InlineSArray<LPCWSTR, 32> m_escape;
+
+ if (identifierType == TypeNameId)
+ {
+ do
+ {
+ switch (* m_currentItr ++)
+ {
+ case W(','):
+ case W('['):
+ case W(']'):
+ case W('&'):
+ case W('*'):
+ case W('+'):
+ case W('\0'):
+ goto done;
+
+ case W('\\'):
+ m_escape.Append(m_currentItr - 1);
+
+ if (! IsTypeNameReservedChar(*m_currentItr) || *m_currentItr == '\0')
+ return FALSE;
+
+ m_currentItr++;
+ break;
+
+ default:
+ break;
+ }
+ }
+ while(true);
+
+done:
+ m_currentItr--;
+ }
+ else if (identifierType == TypeNameFusionName)
+ {
+ while(*m_currentItr != W('\0'))
+ m_currentItr++;
+ }
+ else if (identifierType == TypeNameEmbeddedFusionName)
+ {
+ for (; (*m_currentItr != W('\0')) && (*m_currentItr != W(']')); m_currentItr++)
+ {
+ if (*m_currentItr == W('\\'))
+ {
+ if (*(m_currentItr + 1) == W(']'))
+ {
+ m_escape.Append(m_currentItr);
+ m_currentItr ++;
+ continue;
+ }
+ }
+
+ if (*m_currentItr == '\0')
+ return FALSE;
+ }
+ if (*m_currentItr == W('\0'))
+ {
+ return FALSE;
+ }
+ }
+ else
+ return FALSE;
+
+ sszId->Set(start, (COUNT_T)(m_currentItr - start));
+
+ for (SCOUNT_T i = m_escape.GetCount() - 1; i >= 0; i--)
+ sszId->Delete(sszId->Begin() + (SCOUNT_T)(m_escape[i] - start), 1);
+
+ m_itr = m_currentItr;
+ m_nextToken = LexAToken();
+ return TRUE;
+}
+
+BOOL TypeName::TypeNameParser::START()
+{
+ WRAPPER_NO_CONTRACT;
+
+ NextToken();
+ NextToken();
+ return AQN();
+}
+
+// FULLNAME ',' ASSEMSPEC
+// FULLNAME
+// /* empty */
+BOOL TypeName::TypeNameParser::AQN()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ IfFailGo(TokenIs(TypeNameAQN));
+
+ if (TokenIs(TypeNameEnd))
+ return TRUE;
+
+ IfFailGo(FULLNAME());
+
+ if (TokenIs(TypeNameComma))
+ {
+ NextToken();
+ IfFailGo(ASSEMSPEC());
+ }
+
+ IfFailGo(TokenIs(TypeNameEnd));
+
+ return TRUE;
+}
+
+// fusionName
+BOOL TypeName::TypeNameParser::ASSEMSPEC()
+{
+ WRAPPER_NO_CONTRACT;
+ IfFailGo(TokenIs(TypeNameASSEMSPEC));
+
+ GetIdentifier(m_pTypeName->GetAssembly(), TypeNameFusionName);
+
+ NextToken();
+
+ return TRUE;
+}
+
+// NAME GENPARAMS QUALIFIER
+BOOL TypeName::TypeNameParser::FULLNAME()
+{
+ WRAPPER_NO_CONTRACT;
+ IfFailGo(TokenIs(TypeNameFULLNAME));
+ IfFailGo(NAME());
+
+ IfFailGo(GENPARAMS());
+
+ IfFailGo(QUALIFIER());
+
+ return TRUE;
+}
+
+// *empty*
+// '[' GENARGS ']'
+BOOL TypeName::TypeNameParser::GENPARAMS()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!TokenIs(TypeNameGENPARAM))
+ return TRUE;
+
+ if (!NextTokenIs(TypeNameGENARGS))
+ return TRUE;
+
+ NextToken();
+ IfFailGo(GENARGS());
+
+ IfFailGo(TokenIs(TypeNameCloseSqBracket));
+ NextToken();
+
+ return TRUE;
+}
+
+// GENARG
+// GENARG ',' GENARGS
+BOOL TypeName::TypeNameParser::GENARGS()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ IfFailGo(TokenIs(TypeNameGENARGS));
+
+ IfFailGo(GENARG());
+
+ if (TokenIs(TypeNameComma))
+ {
+ NextToken();
+ IfFailGo(GENARGS());
+ }
+
+ return TRUE;
+}
+
+// '[' EAQN ']'
+// FULLNAME
+BOOL TypeName::TypeNameParser::GENARG()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ IfFailGo(TokenIs(TypeNameGENARG));
+
+ TypeName* pEnclosingTypeName = m_pTypeName;
+ m_pTypeName = m_pTypeName->AddGenericArgument();
+ {
+ if (TokenIs(TypeNameOpenSqBracket))
+ {
+ NextToken();
+ IfFailGo(EAQN());
+
+ IfFailGo(TokenIs(TypeNameCloseSqBracket));
+ NextToken();
+ }
+ else
+ {
+ IfFailGo(FULLNAME());
+ }
+ }
+ m_pTypeName = pEnclosingTypeName;
+
+ return TRUE;
+}
+
+// FULLNAME ',' EASSEMSPEC
+// FULLNAME
+BOOL TypeName::TypeNameParser::EAQN()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ IfFailGo(TokenIs(TypeNameEAQN));
+
+ IfFailGo(FULLNAME());
+
+ if (TokenIs(TypeNameComma))
+ {
+ NextToken();
+ IfFailGo(EASSEMSPEC());
+ }
+
+ return TRUE;
+}
+
+// embeddedFusionName
+BOOL TypeName::TypeNameParser::EASSEMSPEC()
+{
+ WRAPPER_NO_CONTRACT;
+ IfFailGo(TokenIs(TypeNameEASSEMSPEC));
+
+ GetIdentifier(m_pTypeName->GetAssembly(), TypeNameEmbeddedFusionName);
+
+ NextToken();
+
+ return TRUE;
+}
+
+// *empty*
+// '&'
+// '*' QUALIFIER
+// ARRAY QUALIFIER
+BOOL TypeName::TypeNameParser::QUALIFIER()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!TokenIs(TypeNameQUALIFIER))
+ return TRUE;
+
+ if (TokenIs(TypeNameAmperstand))
+ {
+ m_pTypeName->SetByRef();
+
+ NextToken();
+ }
+ else if (TokenIs(TypeNameAstrix))
+ {
+ m_pTypeName->SetPointer();
+
+ NextToken();
+ IfFailGo(QUALIFIER());
+ }
+ else
+ {
+ IfFailGo(ARRAY());
+ IfFailGo(QUALIFIER());
+ }
+
+ return TRUE;
+}
+
+// '[' RANK ']'
+// '[' '*' ']'
+BOOL TypeName::TypeNameParser::ARRAY()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ IfFailGo(TokenIs(TypeNameARRAY));
+
+ NextToken();
+
+ if (TokenIs(TypeNameAstrix))
+ {
+ m_pTypeName->SetArray(1);
+
+ NextToken();
+ }
+ else
+ {
+ DWORD dwRank = 1;
+ IfFailGo(RANK(&dwRank));
+
+ if (dwRank == 1)
+ m_pTypeName->SetSzArray();
+ else
+ m_pTypeName->SetArray(dwRank);
+ }
+
+ IfFailGo(TokenIs(TypeNameCloseSqBracket));
+ NextToken();
+
+ return TRUE;
+}
+
+// *empty*
+// ',' RANK
+BOOL TypeName::TypeNameParser::RANK(DWORD* pdwRank)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!TokenIs(TypeNameRANK))
+ return TRUE;
+
+ NextToken();
+ *pdwRank = *pdwRank + 1;
+ IfFailGo(RANK(pdwRank));
+
+ return TRUE;
+}
+
+// id
+// id '+' NESTNAME
+BOOL TypeName::TypeNameParser::NAME()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ IfFailGo(TokenIs(TypeNameNAME));
+
+ GetIdentifier(m_pTypeName->AddName(), TypeNameId);
+
+ BOOL legacy = FALSE;
+#ifdef FEATURE_LEGACYNETCF
+ legacy = GetAppDomain()->GetAppDomainCompatMode() == BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8;
+#endif
+ if (legacy && (m_nextToken == TypeNameComma))
+ NextTokenLegacyAssemSpec();
+ else
+ NextToken();
+
+ if (TokenIs(TypeNamePlus))
+ {
+ NextToken();
+ IfFailGo(NESTNAME());
+ }
+
+ return TRUE;
+}
+
+// id
+// id '+' NESTNAME
+BOOL TypeName::TypeNameParser::NESTNAME()
+{
+ WRAPPER_NO_CONTRACT;
+ IfFailGo(TokenIs(TypeNameNESTNAME));
+
+ GetIdentifier(m_pTypeName->AddName(), TypeNameId);
+
+ NextToken();
+ if (TokenIs(TypeNamePlus))
+ {
+ NextToken();
+ IfFailGo(NESTNAME());
+ }
+
+ return TRUE;
+}
+
+//--------------------------------------------------------------------------------------------------------------
+// This version is used for resolving types named in custom attributes such as those used
+// for interop. Thus, it follows a well-known multistage set of rules for determining which
+// assembly the type is in. It will also enforce that the requesting assembly has access
+// rights to the type being loaded.
+//
+// The search logic is:
+//
+// if szTypeName is ASM-qualified, only that assembly will be searched.
+// if szTypeName is not ASM-qualified, we will search for the types in the following order:
+// - in pRequestingAssembly (if not NULL). pRequestingAssembly is the assembly that contained
+// the custom attribute from which the typename was derived.
+// - in mscorlib.dll
+// - raise an AssemblyResolveEvent() in the current appdomain
+//
+// pRequestingAssembly may be NULL. In that case, the "visibility" check will simply check that
+// the loaded type has public access.
+//--------------------------------------------------------------------------------------------------------------
+/* public static */
+TypeHandle TypeName::GetTypeUsingCASearchRules(LPCUTF8 szTypeName, Assembly *pRequestingAssembly, BOOL *pfNameIsAsmQualified/* = NULL*/, BOOL bDoVisibilityChecks/* = TRUE*/)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FAULT;
+
+ StackSString sszAssemblyQualifiedName(SString::Utf8, szTypeName);
+ return GetTypeUsingCASearchRules(sszAssemblyQualifiedName.GetUnicode(), pRequestingAssembly, pfNameIsAsmQualified, bDoVisibilityChecks);
+}
+
+TypeHandle TypeName::GetTypeUsingCASearchRules(LPCWSTR szTypeName, Assembly *pRequestingAssembly, BOOL *pfNameIsAsmQualified/* = NULL*/, BOOL bDoVisibilityChecks/* = TRUE*/)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FAULT;
+
+ BOOL bIntrospectionOnly = pRequestingAssembly ? pRequestingAssembly->IsIntrospectionOnly() : FALSE; // classfactory loads are always for execution
+
+ DWORD error = (DWORD)-1;
+
+ GCX_COOP();
+ OBJECTREF keepAlive = NULL;
+ TypeHandle th = TypeHandle();
+
+ GCPROTECT_BEGIN(keepAlive);
+
+#ifdef __GNUC__
+ // When compiling under GCC we have to use the -fstack-check option to ensure we always spot stack
+ // overflow. But this option is intolerant of locals growing too large, so we have to cut back a bit
+ // on what we can allocate inline here. Leave the Windows versions alone to retain the perf benefits
+ // since we don't have the same constraints.
+ NewHolder<TypeName> pTypeName = new TypeName(szTypeName, &error);
+#else // __GNUC__
+ TypeName typeName(szTypeName, &error);
+ TypeName *pTypeName = &typeName;
+#endif // __GNUC__
+
+ if (error != (DWORD)-1)
+ {
+ StackSString buf;
+ StackSString msg(W("typeName@"));
+ COUNT_T size = buf.GetUnicodeAllocation();
+ _itow_s(error,buf.OpenUnicodeBuffer(size),size,10);
+ buf.CloseBuffer();
+ msg.Append(buf);
+ COMPlusThrowArgumentException(msg.GetUnicode(), NULL);
+ }
+
+ if (pfNameIsAsmQualified)
+ {
+ *pfNameIsAsmQualified = TRUE;
+ if (pTypeName->GetAssembly()->IsEmpty())
+ *pfNameIsAsmQualified = FALSE;
+ }
+
+ th = pTypeName->GetTypeWorker(
+ /*bThrowIfNotFound = */ TRUE,
+ /*bIgnoreCase = */ FALSE,
+ bIntrospectionOnly,
+ /*pAssemblyGetType =*/ NULL,
+ /*fEnableCASearchRules = */ TRUE,
+ /*fProhibitAsmQualifiedName = */ FALSE,
+ NULL,
+ pRequestingAssembly,
+#ifdef FEATURE_HOSTED_BINDER
+ nullptr,
+#endif
+ FALSE,
+ &keepAlive);
+
+ ASSERT(!th.IsNull());
+ LoaderAllocator *pLoaderAllocator = th.GetLoaderAllocator();
+
+ if (pLoaderAllocator->IsCollectible())
+ {
+ if ((pRequestingAssembly == NULL) || !pRequestingAssembly->GetLoaderAllocator()->IsCollectible())
+ {
+ COMPlusThrow(kNotSupportedException, W("NotSupported_CollectibleBoundNonCollectible"));
+ }
+ else
+ {
+ pRequestingAssembly->GetLoaderAllocator()->EnsureReference(pLoaderAllocator);
+ }
+ }
+
+ GCPROTECT_END();
+ return th;
+}
+
+
+
+
+
+
+//--------------------------------------------------------------------------------------------------------------
+// This everything-but-the-kitchen-sink version is what used to be called "GetType()". It exposes all the
+// funky knobs needed for implementing the specific requirements of the managed Type.GetType() apis and friends.
+//--------------------------------------------------------------------------------------------------------------
+/*public static */ TypeHandle TypeName::GetTypeManaged(
+ LPCWSTR szTypeName,
+ DomainAssembly* pAssemblyGetType,
+ BOOL bThrowIfNotFound,
+ BOOL bIgnoreCase,
+ BOOL bIntrospectionOnly,
+ BOOL bProhibitAsmQualifiedName,
+ StackCrawlMark* pStackMark,
+ BOOL bLoadTypeFromPartialNameHack,
+ OBJECTREF *pKeepAlive
+#ifdef FEATURE_HOSTED_BINDER
+ , ICLRPrivBinder * pPrivHostBinder
+#endif
+ )
+{
+ STANDARD_VM_CONTRACT;
+
+ if (!*szTypeName)
+ COMPlusThrow(kArgumentException, W("Format_StringZeroLength"));
+
+ DWORD error = (DWORD)-1;
+
+ /* Partial name workaround loading must not load a collectible type */
+ if (bLoadTypeFromPartialNameHack)
+ pKeepAlive = NULL;
+
+#ifdef __GNUC__
+ // When compiling under GCC we have to use the -fstack-check option to ensure we always spot stack
+ // overflow. But this option is intolerant of locals growing too large, so we have to cut back a bit
+ // on what we can allocate inline here. Leave the Windows versions alone to retain the perf benefits
+ // since we don't have the same constraints.
+ NewHolder<TypeName> pTypeName = new TypeName(szTypeName, &error);
+#else // __GNUC__
+ TypeName typeName(szTypeName, &error);
+ TypeName *pTypeName = &typeName;
+#endif // __GNUC__
+
+ if (error != (DWORD)-1)
+ {
+ if (!bThrowIfNotFound)
+ return TypeHandle();
+
+ StackSString buf;
+ StackSString msg(W("typeName@"));
+ COUNT_T size = buf.GetUnicodeAllocation();
+ _itow_s(error, buf.OpenUnicodeBuffer(size), size, /*radix*/10);
+ buf.CloseBuffer();
+ msg.Append(buf);
+ COMPlusThrowArgumentException(msg.GetUnicode(), NULL);
+ }
+
+ BOOL bPeriodPrefix = szTypeName[0] == W('.');
+
+ TypeHandle result = pTypeName->GetTypeWorker(
+ bPeriodPrefix ? FALSE : bThrowIfNotFound,
+ bIgnoreCase,
+ bIntrospectionOnly,
+ pAssemblyGetType ? pAssemblyGetType->GetAssembly() : NULL,
+ /*fEnableCASearchRules = */TRUE,
+ bProhibitAsmQualifiedName,
+ pStackMark,
+ NULL,
+#ifdef FEATURE_HOSTED_BINDER
+ pPrivHostBinder,
+#endif
+ bLoadTypeFromPartialNameHack,
+ pKeepAlive);
+
+ if (bPeriodPrefix && result.IsNull())
+ {
+ new (pTypeName) TypeName(szTypeName + 1, &error);
+
+ if (error != (DWORD)-1)
+ {
+ if (!bThrowIfNotFound)
+ return TypeHandle();
+
+ StackSString buf;
+ StackSString msg(W("typeName@"));
+ COUNT_T size = buf.GetUnicodeAllocation();
+ _itow_s(error-1,buf.OpenUnicodeBuffer(size),size,10);
+ buf.CloseBuffer();
+ msg.Append(buf);
+ COMPlusThrowArgumentException(msg.GetUnicode(), NULL);
+ }
+
+ result = pTypeName->GetTypeWorker(
+ bThrowIfNotFound,
+ bIgnoreCase,
+ bIntrospectionOnly,
+ pAssemblyGetType ? pAssemblyGetType->GetAssembly() : NULL,
+ /*fEnableCASearchRules = */TRUE,
+ bProhibitAsmQualifiedName,
+ pStackMark,
+ NULL,
+#ifdef FEATURE_HOSTED_BINDER
+ pPrivHostBinder,
+#endif
+ bLoadTypeFromPartialNameHack,
+ pKeepAlive);
+ }
+
+ return result;
+}
+
+
+
+
+//-------------------------------------------------------------------------------------------
+// Retrieves a type from an assembly. It requires the caller to know which assembly
+// the type is in.
+//-------------------------------------------------------------------------------------------
+/* public static */ TypeHandle TypeName::GetTypeFromAssembly(LPCWSTR szTypeName, Assembly *pAssembly, BOOL bThrowIfNotFound /*= TRUE*/)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FAULT;
+
+ _ASSERTE(szTypeName != NULL);
+ _ASSERTE(pAssembly != NULL);
+
+ if (!*szTypeName)
+ COMPlusThrow(kArgumentException, W("Format_StringZeroLength"));
+
+ DWORD error = (DWORD)-1;
+
+#ifdef __GNUC__
+ // When compiling under GCC we have to use the -fstack-check option to ensure we always spot stack
+ // overflow. But this option is intolerant of locals growing too large, so we have to cut back a bit
+ // on what we can allocate inline here. Leave the Windows versions alone to retain the perf benefits
+ // since we don't have the same constraints.
+ NewHolder<TypeName> pTypeName = new TypeName(szTypeName, &error);
+#else // __GNUC__
+ TypeName typeName(szTypeName, &error);
+ TypeName *pTypeName = &typeName;
+#endif // __GNUC__
+
+ if (error != (DWORD)-1)
+ {
+ StackSString buf;
+ StackSString msg(W("typeName@"));
+ COUNT_T size = buf.GetUnicodeAllocation();
+ _itow_s(error,buf.OpenUnicodeBuffer(size),size,10);
+ buf.CloseBuffer();
+ msg.Append(buf);
+ COMPlusThrowArgumentException(msg.GetUnicode(), NULL);
+ }
+
+ // Because the typename can come from untrusted input, we will throw an exception rather than assert.
+ // (This also assures that the shipping build does the right thing.)
+ if (!(pTypeName->GetAssembly()->IsEmpty()))
+ {
+ COMPlusThrow(kArgumentException, IDS_EE_CANNOT_HAVE_ASSEMBLY_SPEC);
+ }
+
+ return pTypeName->GetTypeWorker(bThrowIfNotFound, /*bIgnoreCase = */FALSE, pAssembly->IsIntrospectionOnly(), pAssembly, /*fEnableCASearchRules = */FALSE, FALSE, NULL, NULL,
+#ifdef FEATURE_HOSTED_BINDER
+ nullptr, // pPrivHostBinder
+#endif
+ FALSE, NULL /* cannot find a collectible type unless it is in assembly */);
+
+
+
+
+}
+
+//-------------------------------------------------------------------------------------------
+// Retrieves a type. Will assert if the name is not fully qualified.
+//-------------------------------------------------------------------------------------------
+/* public static */ TypeHandle TypeName::GetTypeFromAsmQualifiedName(LPCWSTR szFullyQualifiedName, BOOL bForIntrospection)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FAULT;
+
+ _ASSERTE(szFullyQualifiedName != NULL);
+
+ if (!*szFullyQualifiedName)
+ COMPlusThrow(kArgumentException, W("Format_StringZeroLength"));
+
+ DWORD error = (DWORD)-1;
+
+#ifdef __GNUC__
+ // When compiling under GCC we have to use the -fstack-check option to ensure we always spot stack
+ // overflow. But this option is intolerant of locals growing too large, so we have to cut back a bit
+ // on what we can allocate inline here. Leave the Windows versions alone to retain the perf benefits
+ // since we don't have the same constraints.
+ NewHolder<TypeName> pTypeName = new TypeName(szFullyQualifiedName, &error);
+#else // __GNUC__
+ TypeName typeName(szFullyQualifiedName, &error);
+ TypeName *pTypeName = &typeName;
+#endif // __GNUC__
+
+ if (error != (DWORD)-1)
+ {
+ StackSString buf;
+ StackSString msg(W("typeName@"));
+ COUNT_T size = buf.GetUnicodeAllocation();
+ _itow_s(error,buf.OpenUnicodeBuffer(size),size,10);
+ buf.CloseBuffer();
+ msg.Append(buf);
+ COMPlusThrowArgumentException(msg.GetUnicode(), NULL);
+ }
+
+ return pTypeName->GetTypeFromAsm(bForIntrospection);
+}
+
+
+TypeHandle TypeName::GetTypeFromAsm(BOOL bForIntrospection)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FAULT;
+
+ // Because the typename can come from untrusted input, we will throw an exception rather than assert.
+ // (This also assures that the shipping build does the right thing.)
+ if (this->GetAssembly()->IsEmpty())
+ {
+ COMPlusThrow(kArgumentException, IDS_EE_NEEDS_ASSEMBLY_SPEC);
+ }
+
+ return this->GetTypeWorker(
+ /*bThrowIfNotFound =*/TRUE,
+ /*bIgnoreCase = */FALSE,
+ bForIntrospection,
+ NULL,
+ /*fEnableCASearchRules = */FALSE,
+ FALSE,
+ NULL,
+ NULL,
+#ifdef FEATURE_HOSTED_BINDER
+ nullptr, // pPrivHostBinder
+#endif
+ FALSE,
+ NULL /* cannot find a collectible type */);
+}
+
+
+
+// -------------------------------------------------------------------------------------------------------------
+// This is the "uber" GetType() that all public GetType() funnels through. It's main job is to figure out which
+// Assembly to load the type from and then invoke GetTypeHaveAssembly.
+//
+// It's got a highly baroque interface partly for historical reasons and partly because it's the uber-function
+// for all of the possible GetTypes.
+// -------------------------------------------------------------------------------------------------------------
+/* private instance */ TypeHandle TypeName::GetTypeWorker(
+ BOOL bThrowIfNotFound,
+ BOOL bIgnoreCase,
+ BOOL bIntrospectionOnly,
+ Assembly* pAssemblyGetType,
+
+ BOOL fEnableCASearchRules,
+ BOOL bProhibitAsmQualifiedName,
+ StackCrawlMark* pStackMark,
+ Assembly* pRequestingAssembly,
+#ifdef FEATURE_HOSTED_BINDER
+ ICLRPrivBinder * pPrivHostBinder,
+#endif
+ BOOL bLoadTypeFromPartialNameHack,
+ OBJECTREF *pKeepAlive)
+{
+ CONTRACT(TypeHandle)
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(!(RETVAL.IsNull() && bThrowIfNotFound));
+ }
+ CONTRACT_END
+
+ GCX_COOP();
+
+ ASSEMBLYREF asmRef = NULL;
+ TypeHandle th = TypeHandle();
+ GCPROTECT_BEGIN(asmRef);
+
+ // We don't ever want to get anything related to a collectible type in here if we are not able to return one.
+ ASSEMBLYREF *pAsmRef = &asmRef;
+ if (pKeepAlive == NULL)
+ pAsmRef = NULL;
+
+ //requires a lot of space
+ DECLARE_INTERIOR_STACK_PROBE;
+ // This function is recursive, so it must have an interior probe
+ if (bThrowIfNotFound)
+ {
+ DO_INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(12);
+ }
+ else
+ {
+ DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(12, goto Exit;);
+ }
+
+ // An explicit assembly has been specified so look for the type there
+ if (!GetAssembly()->IsEmpty())
+ {
+
+ if (bProhibitAsmQualifiedName && !m_bIsGenericArgument)
+ {
+ if (bThrowIfNotFound)
+ {
+ COMPlusThrow(kArgumentException, IDS_EE_ASSEMBLY_GETTYPE_CANNONT_HAVE_ASSEMBLY_SPEC);
+ }
+ else
+ {
+ th = TypeHandle();
+ goto Exit;
+ }
+ }
+
+ if (!pRequestingAssembly && pStackMark)
+ pRequestingAssembly = SystemDomain::GetCallersAssembly(pStackMark);
+
+ SString * pssOuterTypeName = NULL;
+ if (GetNames().GetCount() > 0)
+ {
+ pssOuterTypeName = GetNames()[0];
+ }
+
+ // We want to catch the exception if we're going to later try a partial bind.
+ if (bLoadTypeFromPartialNameHack)
+ {
+ EX_TRY
+ {
+ DomainAssembly *pDomainAssembly = LoadDomainAssembly(GetAssembly(), pRequestingAssembly,
+#ifdef FEATURE_HOSTED_BINDER
+ pPrivHostBinder,
+#endif
+ bThrowIfNotFound, bIntrospectionOnly, pssOuterTypeName);
+ if (pDomainAssembly)
+ {
+ th = GetTypeHaveAssembly(pDomainAssembly->GetAssembly(), bThrowIfNotFound, bIgnoreCase, pKeepAlive);
+ }
+ }
+ EX_CATCH
+ {
+ th = TypeHandle();
+ }
+ EX_END_CATCH(RethrowTransientExceptions);
+ }
+ else
+ {
+ DomainAssembly *pDomainAssembly = LoadDomainAssembly(GetAssembly(), pRequestingAssembly,
+#ifdef FEATURE_HOSTED_BINDER
+ pPrivHostBinder,
+#endif
+ bThrowIfNotFound, bIntrospectionOnly, pssOuterTypeName);
+ if (pDomainAssembly)
+ {
+ th = GetTypeHaveAssembly(pDomainAssembly->GetAssembly(), bThrowIfNotFound, bIgnoreCase, pKeepAlive);
+ }
+ }
+ }
+
+ // There's no explicit assembly so look in the assembly specified by the original caller (Assembly.GetType)
+ else if (pAssemblyGetType)
+ {
+ th = GetTypeHaveAssembly(pAssemblyGetType, bThrowIfNotFound, bIgnoreCase, pKeepAlive);
+
+#ifdef FEATURE_LEGACYNETCF
+ // 443770 - [AppCompat]: Glow Artisan - CLR_EXCEPTION_NOSOS_e0434352_agcoredllCCoreServicesSetFrameDirty
+ // NetCF searches mscorlib even if GetType() is explicitly called on another assembly.
+ if (th.IsNull() &&
+ GetAppDomain()->GetAppDomainCompatMode() == BaseDomain::APPDOMAINCOMPAT_APP_EARLIER_THAN_WP8 &&
+ pAssemblyGetType != SystemDomain::SystemAssembly())
+ {
+ th = GetTypeHaveAssembly(SystemDomain::SystemAssembly(), bThrowIfNotFound, bIgnoreCase, pKeepAlive);
+ }
+#endif
+ }
+
+ // Otherwise look in the caller's assembly then the system assembly
+ else if (fEnableCASearchRules)
+ {
+ if (bIntrospectionOnly)
+ {
+ if (pStackMark != NULL) // This is our test to see if we're being because of a managed api or because we are parsing a CA.
+ {
+ COMPlusThrow(kArgumentException, IDS_EE_REFLECTIONONLYGETTYPE_NOASSEMBLY);
+ }
+ }
+
+ if (!pRequestingAssembly && pStackMark)
+ pRequestingAssembly = SystemDomain::GetCallersAssembly(pStackMark);
+
+ // Look for type in caller's assembly
+ if (pRequestingAssembly)
+ th = GetTypeHaveAssembly(pRequestingAssembly, bThrowIfNotFound, bIgnoreCase, pKeepAlive);
+
+ // Look for type in system assembly
+ if (th.IsNull())
+ {
+ if (pRequestingAssembly != SystemDomain::SystemAssembly())
+ th = GetTypeHaveAssembly(SystemDomain::SystemAssembly(), bThrowIfNotFound, bIgnoreCase, pKeepAlive);
+ }
+
+ // Raise AssemblyResolveEvent to try to resolve assembly
+ if (th.IsNull() && !bIntrospectionOnly)
+ {
+ AppDomain *pDomain = (AppDomain *)SystemDomain::GetCurrentDomain();
+
+ if ((BaseDomain*)pDomain != SystemDomain::System())
+ {
+ TypeNameBuilder tnb;
+ for (COUNT_T i = 0; i < GetNames().GetCount(); i ++)
+ tnb.AddName(GetNames()[i]->GetUnicode());
+
+ StackScratchBuffer bufFullName;
+ DomainAssembly* pDomainAssembly = pDomain->RaiseTypeResolveEventThrowing(pRequestingAssembly?pRequestingAssembly->GetDomainAssembly():NULL,tnb.GetString()->GetANSI(bufFullName), pAsmRef);
+ if (pDomainAssembly)
+ th = GetTypeHaveAssembly(pDomainAssembly->GetAssembly(), bThrowIfNotFound, bIgnoreCase, pKeepAlive);
+ }
+ }
+ }
+ else
+ {
+ _ASSERTE(!"You must pass either a asm-qualified typename or an actual Assembly.");
+ }
+
+#ifdef FEATURE_FUSION
+ if (th.IsNull() && bLoadTypeFromPartialNameHack && GetAssembly() && !GetAssembly()->IsEmpty())
+ {
+ DomainAssembly* pPartialBindAssemblyHack = LoadAssemblyFromPartialNameHack(GetAssembly());
+
+ if (pPartialBindAssemblyHack)
+ th = GetTypeHaveAssembly(pPartialBindAssemblyHack->GetAssembly(), bThrowIfNotFound, bIgnoreCase, NULL);
+ }
+#endif // FEATURE_FUSION
+
+ if (!th.IsNull() && (!m_genericArguments.IsEmpty() || !m_signature.IsEmpty()))
+ {
+#ifdef CROSSGEN_COMPILE
+ // This method is used to parse type names in custom attributes. We do not support
+ // that these custom attributes will contain composed types.
+ CrossGenNotSupported("GetTypeWorker");
+#else
+ struct _gc
+ {
+ PTRARRAYREF refGenericArguments;
+ OBJECTREF keepAlive;
+ REFLECTCLASSBASEREF refGenericArg;
+ } gc;
+
+ gc.refGenericArguments = NULL;
+ gc.keepAlive = NULL;
+ gc.refGenericArg = NULL;
+
+ BOOL abortCall = FALSE;
+
+ GCPROTECT_BEGIN(gc);
+ INT32 cGenericArgs = m_genericArguments.GetCount();
+
+ if (cGenericArgs > 0)
+ {
+ TypeHandle arrayHandle = ClassLoader::LoadArrayTypeThrowing(TypeHandle(g_pRuntimeTypeClass), ELEMENT_TYPE_SZARRAY);
+ gc.refGenericArguments = (PTRARRAYREF)AllocateArrayEx(arrayHandle, &cGenericArgs, 1);
+ }
+ // Instantiate generic arguments
+ for (INT32 i = 0; i < cGenericArgs; i++)
+ {
+ TypeHandle thGenericArg = m_genericArguments[i]->GetTypeWorker(
+ bThrowIfNotFound, bIgnoreCase, bIntrospectionOnly,
+ pAssemblyGetType, fEnableCASearchRules, bProhibitAsmQualifiedName, pStackMark, pRequestingAssembly,
+#ifdef FEATURE_HOSTED_BINDER
+ pPrivHostBinder,
+#endif
+ bLoadTypeFromPartialNameHack,
+ (pKeepAlive != NULL) ? &gc.keepAlive : NULL /* Only pass a keepalive parameter if we were passed a keepalive parameter */);
+
+ if (thGenericArg.IsNull())
+ {
+ abortCall = TRUE;
+ break;
+ }
+
+ gc.refGenericArg = (REFLECTCLASSBASEREF)thGenericArg.GetManagedClassObject();
+
+ gc.refGenericArguments->SetAt(i, gc.refGenericArg);
+ }
+
+ MethodDescCallSite getTypeHelper(METHOD__RT_TYPE_HANDLE__GET_TYPE_HELPER);
+
+ ARG_SLOT args[5] = {
+ (ARG_SLOT)OBJECTREFToObject(th.GetManagedClassObject()),
+ (ARG_SLOT)OBJECTREFToObject(gc.refGenericArguments),
+ (ARG_SLOT)(SIZE_T)m_signature.OpenRawBuffer(),
+ m_signature.GetCount(),
+ };
+
+ REFLECTCLASSBASEREF refType = NULL;
+
+ if (!abortCall)
+ refType = (REFLECTCLASSBASEREF)getTypeHelper.Call_RetOBJECTREF(args);
+
+ if (refType != NULL)
+ {
+ th = refType->GetType();
+ if (pKeepAlive)
+ *pKeepAlive = refType;
+ }
+ else
+ {
+ th = TypeHandle();
+ }
+ GCPROTECT_END();
+#endif // CROSSGEN_COMPILE
+ }
+
+ if (th.IsNull() && bThrowIfNotFound)
+ {
+ StackSString buf;
+ LPCWSTR wszName = ToString(&buf)->GetUnicode();
+ MAKE_UTF8PTR_FROMWIDE(szName, wszName);
+
+ if (GetAssembly() && !GetAssembly()->IsEmpty())
+ {
+ ThrowTypeLoadException(NULL, szName, GetAssembly()->GetUnicode(), NULL, IDS_CLASSLOAD_GENERAL);
+ }
+ else if (pAssemblyGetType)
+ {
+ pAssemblyGetType->ThrowTypeLoadException(NULL, szName, IDS_CLASSLOAD_GENERAL);
+ }
+ else if (pRequestingAssembly)
+ {
+ pRequestingAssembly->ThrowTypeLoadException(NULL, szName, IDS_CLASSLOAD_GENERAL);
+ }
+ else
+ {
+ ThrowTypeLoadException(NULL, szName, NULL, NULL, IDS_CLASSLOAD_GENERAL);
+ }
+ }
+
+Exit:
+ ;
+ END_INTERIOR_STACK_PROBE;
+
+ GCPROTECT_END();
+
+ RETURN th;
+}
+
+//----------------------------------------------------------------------------------------------------------------
+// This is the one that actually loads the type once we've pinned down the Assembly it's in.
+//----------------------------------------------------------------------------------------------------------------
+/* private */
+TypeHandle
+TypeName::GetTypeHaveAssemblyHelper(
+ Assembly * pAssembly,
+ BOOL bThrowIfNotFound,
+ BOOL bIgnoreCase,
+ OBJECTREF * pKeepAlive,
+ BOOL bRecurse)
+{
+ WRAPPER_NO_CONTRACT;
+
+ TypeHandle th = TypeHandle();
+ SArray<SString *> & names = GetNames();
+ Module * pManifestModule = pAssembly->GetManifestModule();
+ Module * pLookOnlyInModule = NULL;
+ ClassLoader * pClassLoader = pAssembly->GetLoader();
+
+ NameHandle typeName(pManifestModule, mdtBaseType);
+
+#ifndef CROSSGEN_COMPILE
+ if (pAssembly->IsCollectible())
+ {
+ if (pKeepAlive == NULL)
+ {
+ COMPlusThrow(kNotSupportedException, W("NotSupported_CollectibleResolveFailure"));
+ }
+ *pKeepAlive = pAssembly->GetLoaderAllocator()->GetExposedObject();
+ }
+#endif
+
+ // Set up the name handle
+ if (bIgnoreCase)
+ typeName.SetCaseInsensitive();
+
+ EX_TRY
+ {
+ for (COUNT_T i = 0; i < names.GetCount(); i ++)
+ {
+ // each extra name represents one more level of nesting
+ LPCWSTR wname = names[i]->GetUnicode();
+
+ MAKE_UTF8PTR_FROMWIDE(name, wname);
+ typeName.SetName(name);
+
+ // typeName.m_pBucket gets set here if the type is found
+ // it will be used in the next iteration to look up the nested type
+ th = pClassLoader->LoadTypeHandleThrowing(&typeName, CLASS_LOADED, pLookOnlyInModule);
+
+ // DDB 117395: if we didn't find a type, don't bother looking for its nested type
+ if (th.IsNull())
+ break;
+
+ if (th.GetAssembly() != pAssembly)
+ { // It is forwarded type
+
+ // Use the found assembly class loader for potential nested types search
+ // The nested type has to be in the same module as the nesting type, so it doesn't make
+ // sense to follow the same chain of type forwarders again for the nested type
+ pClassLoader = th.GetAssembly()->GetLoader();
+ }
+
+ // Nested types must live in the module of the nesting type
+ if ((i == 0) && (names.GetCount() > 1) && (pLookOnlyInModule == NULL))
+ {
+ Module * pFoundModule = th.GetModule();
+
+ // Ensure that the bucket in the NameHandle is set to a valid bucket for all cases.
+
+ // If the type is in the manifest module, it will always be set correctly,
+ // or if the type is forwarded always lookup via the standard logic
+ if ((pFoundModule == pManifestModule) || (pFoundModule->GetAssembly() != pAssembly))
+ continue;
+
+ pLookOnlyInModule = pFoundModule;
+
+ // If the type is not in the manifest module, and the nesting type is in the exported
+ // types table of the manifest module, but the nested type is not, then unless the bucket
+ // is from the actual defining module, then the LoadTypeHandleThrowing logic will fail.
+ // To fix this, we must force the loader to record the bucket that refers to the nesting type
+ // from within the defining module's available class table.
+
+ // Re-run the LoadTypeHandleThrowing, but force it to only look in the class table for the module which
+ // defines the type. This should cause typeName.m_pBucket to be set to the bucket
+ // which corresponds to the type in the defining module, instead of potentially in the manifest module.
+ i = -1;
+ typeName.SetBucket(NULL);
+ }
+ }
+
+ if (th.IsNull() && bRecurse)
+ {
+ IMDInternalImport * pManifestImport = pManifestModule->GetMDImport();
+ HENUMInternalHolder phEnum(pManifestImport);
+ phEnum.EnumInit(mdtFile, mdTokenNil);
+ mdToken mdFile;
+
+ while (pManifestImport->EnumNext(&phEnum, &mdFile))
+ {
+ if (pManifestModule->LookupFile(mdFile))
+ continue;
+
+ pManifestModule->LoadModule(GetAppDomain(), mdFile, FALSE);
+
+ th = GetTypeHaveAssemblyHelper(pAssembly, bThrowIfNotFound, bIgnoreCase, NULL, FALSE);
+
+ if (!th.IsNull())
+ break;
+ }
+ }
+ }
+ EX_CATCH
+ {
+ if (bThrowIfNotFound)
+ EX_RETHROW;
+
+ Exception * ex = GET_EXCEPTION();
+
+ // Let non-File-not-found exceptions propagate
+ if (EEFileLoadException::GetFileLoadKind(ex->GetHR()) != kFileNotFoundException)
+ EX_RETHROW;
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ return th;
+} // TypeName::GetTypeHaveAssemblyHelper
+
+#ifdef FEATURE_FUSION
+DomainAssembly* LoadAssemblyFromPartialNameHack(SString* psszAssemblySpec, BOOL fCropPublicKey)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ MethodDescCallSite loadWithPartialNameHack(METHOD__ASSEMBLY__LOAD_WITH_PARTIAL_NAME_HACK);
+ ARG_SLOT args[2];
+ STRINGREF mszAssembly = NULL;
+ DomainAssembly* pPartialBindAssemblyHack = NULL;
+ GCPROTECT_BEGIN(mszAssembly);
+ {
+ mszAssembly = StringObject::NewString(psszAssemblySpec->GetUnicode());
+ args[0] = ObjToArgSlot(mszAssembly);
+ args[1] = BoolToArgSlot(fCropPublicKey);
+
+ ASSEMBLYREF assembly = (ASSEMBLYREF)loadWithPartialNameHack.Call_RetOBJECTREF(args);
+
+ if (assembly != NULL)
+ {
+ pPartialBindAssemblyHack = (DomainAssembly*) assembly->GetDomainAssembly();
+
+ if (pPartialBindAssemblyHack->GetAssembly()->IsCollectible())
+ {
+ // Should not be possible to reach
+ COMPlusThrow(kNotSupportedException, W("NotSupported_CollectibleAssemblyResolve"));
+ }
+ }
+ }
+ GCPROTECT_END();
+
+ return pPartialBindAssemblyHack;
+}
+#endif // FEATURE_FUSION
+
+DomainAssembly * LoadDomainAssembly(
+ SString * psszAssemblySpec,
+ Assembly * pRequestingAssembly,
+#ifdef FEATURE_HOSTED_BINDER
+ ICLRPrivBinder * pPrivHostBinder,
+#endif
+ BOOL bThrowIfNotFound,
+ BOOL bIntrospectionOnly,
+ SString * pssOuterTypeName)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+ AssemblySpec spec;
+ DomainAssembly *pDomainAssembly = NULL;
+
+ if (bIntrospectionOnly)
+ spec.SetIntrospectionOnly(TRUE);
+
+ StackScratchBuffer buffer;
+ LPCUTF8 szAssemblySpec = psszAssemblySpec->GetUTF8(buffer);
+ IfFailThrow(spec.Init(szAssemblySpec));
+
+ if (spec.IsContentType_WindowsRuntime())
+ {
+ _ASSERTE(pssOuterTypeName != NULL);
+ spec.SetWindowsRuntimeType(*pssOuterTypeName);
+ }
+
+#ifdef FEATURE_HOSTED_BINDER
+ if (pPrivHostBinder)
+ {
+ spec.SetHostBinder(pPrivHostBinder);
+ }
+ else
+#endif
+ if (pRequestingAssembly && (!pRequestingAssembly->IsDomainNeutral()) && (!pRequestingAssembly->IsCollectible()))
+ {
+ GCX_PREEMP();
+ spec.SetParentAssembly(pRequestingAssembly->GetDomainAssembly());
+ }
+
+ if (bThrowIfNotFound)
+ {
+ pDomainAssembly = spec.LoadDomainAssembly(FILE_LOADED);
+ }
+ else
+ {
+ EX_TRY
+ {
+ pDomainAssembly = spec.LoadDomainAssembly(FILE_LOADED, NULL, bThrowIfNotFound);
+ }
+ EX_CATCH
+ {
+ Exception *ex = GET_EXCEPTION();
+
+ // Let non-File-not-found execeptions propagate
+ if (EEFileLoadException::GetFileLoadKind(ex->GetHR()) != kFileNotFoundException)
+ EX_RETHROW;
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+ }
+
+ return pDomainAssembly;
+}
+
+
diff --git a/src/vm/typeparse.h b/src/vm/typeparse.h
new file mode 100644
index 0000000000..d56b8b0c3f
--- /dev/null
+++ b/src/vm/typeparse.h
@@ -0,0 +1,476 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ---------------------------------------------------------------------------
+// typeparse.h
+// ---------------------------------------------------------------------------
+//
+
+//
+
+
+#ifndef TYPEPARSE_H
+#define TYPEPARSE_H
+
+#include "common.h"
+#include "class.h"
+#include "typehandle.h"
+
+// To work around a warning about redefining "TypeName" include the file
+// that defines Windows.UI.Xaml.Interop.TypeName now.
+#ifdef FEATURE_COMINTEROP
+#include <windows.ui.xaml.interop.h>
+#endif
+
+//#define TYPE_NAME_RESERVED_CHAR W(",[]&*+\\")
+
+bool inline IsTypeNameReservedChar(WCHAR ch)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ switch (ch)
+ {
+ case W(','):
+ case W('['):
+ case W(']'):
+ case W('&'):
+ case W('*'):
+ case W('+'):
+ case W('\\'):
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+#ifdef FEATURE_FUSION
+DomainAssembly* LoadAssemblyFromPartialNameHack(SString* psszAssemblySpec, BOOL fCropPublicKey = FALSE);
+#endif // FEATURE_FUSION
+
+DomainAssembly * LoadDomainAssembly(
+ SString * psszAssemblySpec,
+ Assembly * pRequestingAssembly,
+#ifdef FEATURE_HOSTED_BINDER
+ ICLRPrivBinder * pPrivHostBinder,
+#endif
+ BOOL bThrowIfNotFound,
+ BOOL bIntrospectionOnly,
+ SString * pssOuterTypeName);
+
+class TypeNameFactory : public ITypeNameFactory
+{
+public:
+ static HRESULT CreateObject(REFIID riid, void **ppUnk);
+
+public:
+ virtual HRESULT __stdcall QueryInterface(REFIID riid, void **ppUnk);
+ virtual ULONG __stdcall AddRef() { LIMITED_METHOD_CONTRACT; m_count++; return m_count; }
+ virtual ULONG __stdcall Release() { LIMITED_METHOD_CONTRACT; SUPPORTS_DAC_HOST_ONLY; m_count--; ULONG count = m_count; if (count == 0) delete this; return count; }
+
+public:
+ virtual HRESULT __stdcall ParseTypeName(LPCWSTR szName, DWORD* pError, ITypeName** ppTypeName);
+ virtual HRESULT __stdcall GetTypeNameBuilder(ITypeNameBuilder** ppTypeBuilder);
+
+public:
+ TypeNameFactory() : m_count(0)
+ {
+ WRAPPER_NO_CONTRACT;
+ SString::Startup();
+ }
+
+private:
+ DWORD m_count;
+};
+
+class TypeName : public ITypeName
+{
+private:
+ template<typename PRODUCT>
+ class Factory
+ {
+ public:
+ const static DWORD MAX_PRODUCT = 4;
+
+ public:
+ Factory() : m_cProduct(0), m_next(NULL) { LIMITED_METHOD_CONTRACT; }
+ ~Factory()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+ VALIDATE_BACKOUT_STACK_CONSUMPTION;
+
+ if (m_next)
+ delete m_next;
+ }
+
+ PRODUCT* Create()
+ { WRAPPER_NO_CONTRACT; if (m_cProduct == (INT32)MAX_PRODUCT) return GetNext()->Create(); return &m_product[m_cProduct++]; }
+
+ private:
+ Factory* GetNext() { if (!m_next) m_next = new Factory<PRODUCT>(); return m_next; }
+
+ private:
+ PRODUCT m_product[MAX_PRODUCT];
+ INT32 m_cProduct;
+ Factory* m_next;
+ };
+ friend class TypeName::Factory<TypeName>;
+ friend class TypeNameBuilder;
+
+private:
+ class TypeNameParser
+ {
+ TypeNameParser(LPCWSTR szTypeName, TypeName* pTypeName, DWORD* pError)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (szTypeName == NULL)
+ {
+ szTypeName = W("");
+ }
+
+ m_currentToken = TypeNameEmpty;
+ m_nextToken = TypeNameEmpty;
+
+ *pError = (DWORD)-1;
+ m_pTypeName = pTypeName;
+ m_sszTypeName = szTypeName;
+ m_currentItr = m_itr = m_sszTypeName;
+
+ if (!START())
+ *pError = (DWORD)(m_currentItr - m_sszTypeName) - 1;
+ }
+
+ private:
+ friend class TypeName;
+
+ private:
+ typedef enum {
+ //
+ // TOKENS
+ //
+ TypeNameEmpty = 0x8000,
+ TypeNameIdentifier = 0x0001,
+ TypeNamePostIdentifier = 0x0002,
+ TypeNameOpenSqBracket = 0x0004,
+ TypeNameCloseSqBracket = 0x0008,
+ TypeNameComma = 0x0010,
+ TypeNamePlus = 0x0020,
+ TypeNameAstrix = 0x0040,
+ TypeNameAmperstand = 0x0080,
+ TypeNameBackSlash = 0x0100,
+ TypeNameEnd = 0x4000,
+
+ //
+ // 1 TOKEN LOOK AHEAD
+ //
+ TypeNameNAME = TypeNameIdentifier,
+ TypeNameNESTNAME = TypeNameIdentifier,
+ TypeNameASSEMSPEC = TypeNameIdentifier,
+ TypeNameGENPARAM = TypeNameOpenSqBracket | TypeNameEmpty,
+ TypeNameFULLNAME = TypeNameNAME,
+ TypeNameAQN = TypeNameFULLNAME | TypeNameEnd,
+ TypeNameASSEMBLYSPEC = TypeNameIdentifier,
+ TypeNameGENARG = TypeNameOpenSqBracket | TypeNameFULLNAME,
+ TypeNameGENARGS = TypeNameGENARG,
+ TypeNameEAQN = TypeNameIdentifier,
+ TypeNameEASSEMSPEC = TypeNameIdentifier,
+ TypeNameARRAY = TypeNameOpenSqBracket,
+ TypeNameQUALIFIER = TypeNameAmperstand | TypeNameAstrix | TypeNameARRAY | TypeNameEmpty,
+ TypeNameRANK = TypeNameComma | TypeNameEmpty,
+ } TypeNameTokens;
+
+ typedef enum {
+ TypeNameNone = 0x00,
+ TypeNameId = 0x01,
+ TypeNameFusionName = 0x02,
+ TypeNameEmbeddedFusionName = 0x03,
+ } TypeNameIdentifiers;
+
+ //
+ // LEXIFIER
+ //
+ private:
+ TypeNameTokens LexAToken(BOOL ignorePlus = FALSE);
+ BOOL GetIdentifier(SString* sszId, TypeNameIdentifiers identiferType);
+ void NextToken() { WRAPPER_NO_CONTRACT; m_currentToken = m_nextToken; m_currentItr = m_itr; m_nextToken = LexAToken(); }
+ void NextTokenLegacyAssemSpec() { WRAPPER_NO_CONTRACT; m_currentToken = m_nextToken; m_currentItr = m_itr; m_nextToken = LexAToken(true); }
+ BOOL NextTokenIs(TypeNameTokens token) { LIMITED_METHOD_CONTRACT; return !!(m_nextToken & token); }
+ BOOL TokenIs(TypeNameTokens token) { LIMITED_METHOD_CONTRACT; return !!(m_currentToken & token); }
+ BOOL TokenIs(int token) { LIMITED_METHOD_CONTRACT; return TokenIs((TypeNameTokens)token); }
+
+ //
+ // PRODUCTIONS
+ //
+ private:
+ BOOL START();
+
+ BOOL AQN();
+ // /* empty */
+ // FULLNAME ',' ASSEMSPEC
+ // FULLNAME
+
+ BOOL ASSEMSPEC();
+ // fusionName
+
+ BOOL FULLNAME();
+ // NAME GENPARAMS QUALIFIER
+
+ BOOL GENPARAMS();
+ // *empty*
+ // '[' GENARGS ']'
+
+ BOOL GENARGS();
+ // GENARG
+ // GENARG ',' GENARGS
+
+ BOOL GENARG();
+ // '[' EAQN ']'
+ // FULLNAME
+
+ BOOL EAQN();
+ // FULLNAME ',' EASSEMSPEC
+ // FULLNAME
+
+ BOOL EASSEMSPEC();
+ // embededFusionName
+
+ BOOL QUALIFIER();
+ // *empty*
+ // '&'
+ // *' QUALIFIER
+ // ARRAY QUALIFIER
+
+ BOOL ARRAY();
+ // '[' RANK ']'
+ // '[' '*' ']'
+
+ BOOL RANK(DWORD* pdwRank);
+ // *empty*
+ // ',' RANK
+
+ BOOL NAME();
+ // id
+ // id '+' NESTNAME
+
+ BOOL NESTNAME();
+ // id
+ // id '+' NESTNAME
+
+ public:
+ void MakeRotorHappy() { WRAPPER_NO_CONTRACT; }
+
+ private:
+ TypeName* m_pTypeName;
+ LPCWSTR m_sszTypeName;
+ LPCWSTR m_itr;
+ LPCWSTR m_currentItr;
+ TypeNameTokens m_currentToken;
+ TypeNameTokens m_nextToken;
+ };
+ friend class TypeName::TypeNameParser;
+
+public:
+ virtual HRESULT __stdcall QueryInterface(REFIID riid, void **ppUnk);
+ virtual ULONG __stdcall AddRef();
+ virtual ULONG __stdcall Release();
+
+public:
+ virtual HRESULT __stdcall GetNameCount(DWORD* pCount);
+ virtual HRESULT __stdcall GetNames(DWORD count, BSTR* rgbszNames, DWORD* pFetched);
+ virtual HRESULT __stdcall GetTypeArgumentCount(DWORD* pCount);
+ virtual HRESULT __stdcall GetTypeArguments(DWORD count, ITypeName** rgpArguments, DWORD* pFetched);
+ virtual HRESULT __stdcall GetModifierLength(DWORD* pCount);
+ virtual HRESULT __stdcall GetModifiers(DWORD count, DWORD* rgModifiers, DWORD* pFetched);
+ virtual HRESULT __stdcall GetAssemblyName(BSTR* rgbszAssemblyNames);
+
+public:
+ TypeName(LPCWSTR szTypeName, DWORD* pError) : m_bIsGenericArgument(FALSE), m_count(0)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ TypeNameParser parser(szTypeName, this, pError);
+ parser.MakeRotorHappy();
+ }
+
+ ~TypeName();
+
+public:
+#ifndef FEATURE_CORECLR
+ static void QCALLTYPE QCreateTypeNameParser (LPCWSTR wszTypeName, QCall::ObjectHandleOnStack pNames, BOOL throwOnError);
+ static void QCALLTYPE QReleaseTypeNameParser(TypeName * pTypeName);
+ static void QCALLTYPE QGetNames (TypeName * pTypeName, QCall::ObjectHandleOnStack pNames);
+ static void QCALLTYPE QGetTypeArguments (TypeName * pTypeName, QCall::ObjectHandleOnStack pTypeArguments);
+ static void QCALLTYPE QGetModifiers (TypeName * pTypeName, QCall::ObjectHandleOnStack pModifiers);
+ static void QCALLTYPE QGetAssemblyName (TypeName * pTypeName, QCall::StringHandleOnStack pAssemblyName);
+#endif //!FEATURE_CORECLR
+
+ //-------------------------------------------------------------------------------------------
+ // Retrieves a type from an assembly. It requires the caller to know which assembly
+ // the type is in.
+ //-------------------------------------------------------------------------------------------
+ static TypeHandle GetTypeFromAssembly(LPCWSTR szTypeName, Assembly *pAssembly, BOOL bThrowIfNotFound = TRUE);
+
+ TypeHandle GetTypeFromAsm(BOOL bForIntrospection);
+
+ //-------------------------------------------------------------------------------------------
+ // Retrieves a type. Will assert if the name is not fully qualified.
+ //-------------------------------------------------------------------------------------------
+ static TypeHandle GetTypeFromAsmQualifiedName(LPCWSTR szFullyQualifiedName, BOOL bForIntrospection);
+
+
+ //-------------------------------------------------------------------------------------------
+ // This version is used for resolving types named in custom attributes such as those used
+ // for interop. Thus, it follows a well-known multistage set of rules for determining which
+ // assembly the type is in. It will also enforce that the requesting assembly has access
+ // rights to the type being loaded.
+ //
+ // The search logic is:
+ //
+ // if szTypeName is ASM-qualified, only that assembly will be searched.
+ // if szTypeName is not ASM-qualified, we will search for the types in the following order:
+ // - in pRequestingAssembly (if not NULL). pRequestingAssembly is the assembly that contained
+ // the custom attribute from which the typename was derived.
+ // - in mscorlib.dll
+ // - raise an AssemblyResolveEvent() in the current appdomain
+ //
+ // pRequestingAssembly may be NULL. In that case, the "visibility" check will simply check that
+ // the loaded type has public access.
+ //
+ //--------------------------------------------------------------------------------------------
+ static TypeHandle GetTypeUsingCASearchRules(LPCUTF8 szTypeName, Assembly *pRequestingAssembly, BOOL *pfTypeNameWasQualified = NULL, BOOL bDoVisibilityChecks = TRUE);
+ static TypeHandle GetTypeUsingCASearchRules(LPCWSTR szTypeName, Assembly *pRequestingAssembly, BOOL *pfTypeNameWasQualified = NULL, BOOL bDoVisibilityChecks = TRUE);
+
+
+ //--------------------------------------------------------------------------------------------------------------
+ // This everything-but-the-kitchen-sink version is what used to be called "GetType()". It exposes all the
+ // funky knobs needed for implementing the specific requirements of the managed Type.GetType() apis and friends.
+ // Really that knowledge shouldn't even be embedded in the TypeParse class at all but for now, we'll
+ // settle for giving this entrypoint a really ugly name so that only the two FCALL's that really need it will call
+ // it.
+ //--------------------------------------------------------------------------------------------------------------
+ static TypeHandle GetTypeManaged(
+ LPCWSTR szTypeName,
+ DomainAssembly* pAssemblyGetType,
+ BOOL bThrowIfNotFound,
+ BOOL bIgnoreCase,
+ BOOL bIntrospectionOnly,
+ BOOL bProhibitAssemblyQualifiedName,
+ StackCrawlMark* pStackMark,
+ BOOL bLoadTypeFromPartialNameHack,
+ OBJECTREF *pKeepAlive
+#ifdef FEATURE_HOSTED_BINDER
+ , ICLRPrivBinder * pPrivHostBinder = nullptr
+#endif
+ );
+
+
+public:
+ SString* GetAssembly() { WRAPPER_NO_CONTRACT; return &m_assembly; }
+
+private:
+ TypeName() : m_bIsGenericArgument(FALSE), m_count(0) { LIMITED_METHOD_CONTRACT; }
+ TypeName* AddGenericArgument();
+
+ SString* AddName()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return m_names.AppendEx(m_nestNameFactory.Create());
+ }
+
+ SArray<SString*>& GetNames() { WRAPPER_NO_CONTRACT; return m_names; }
+ SArray<TypeName*>& GetGenericArguments() { WRAPPER_NO_CONTRACT; return m_genericArguments; }
+ SArray<DWORD>& GetSignature() { WRAPPER_NO_CONTRACT; return m_signature; }
+ void SetByRef() { WRAPPER_NO_CONTRACT; m_signature.Append(ELEMENT_TYPE_BYREF); }
+ void SetPointer() { WRAPPER_NO_CONTRACT; m_signature.Append(ELEMENT_TYPE_PTR); }
+ void SetSzArray() { WRAPPER_NO_CONTRACT; m_signature.Append(ELEMENT_TYPE_SZARRAY); }
+
+ void SetArray(DWORD rank)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ m_signature.Append(ELEMENT_TYPE_ARRAY);
+ m_signature.Append(rank);
+ }
+
+ SString* ToString(SString* pBuf, BOOL bAssemblySpec = FALSE, BOOL bSignature = FALSE, BOOL bGenericArguments = FALSE);
+
+private:
+ //----------------------------------------------------------------------------------------------------------------
+ // This is the "uber" GetType() that all public GetType() funnels through. It's main job is to figure out which
+ // Assembly to load the type from and then invoke GetTypeHaveAssembly.
+ //
+ // It's got a highly baroque interface partly for historical reasons and partly because it's the uber-function
+ // for all of the possible GetTypes.
+ //----------------------------------------------------------------------------------------------------------------
+ TypeHandle GetTypeWorker(
+ BOOL bThrowIfNotFound,
+ BOOL bIgnoreCase,
+ BOOL bIntrospectionOnly,
+ Assembly* pAssemblyGetType,
+
+ BOOL fEnableCASearchRules,
+
+ BOOL bProhibitAssemblyQualifiedName,
+
+ StackCrawlMark* pStackMark,
+ Assembly* pRequestingAssembly,
+#ifdef FEATURE_HOSTED_BINDER
+ ICLRPrivBinder * pPrivHostBinder,
+#endif
+ BOOL bLoadTypeFromPartialNameHack,
+ OBJECTREF *pKeepAlive);
+
+ //----------------------------------------------------------------------------------------------------------------
+ // These functions are the ones that actually loads the type once we've pinned down the Assembly it's in.
+ //----------------------------------------------------------------------------------------------------------------
+ TypeHandle GetTypeHaveAssembly(Assembly* pAssembly, BOOL bThrowIfNotFound, BOOL bIgnoreCase, OBJECTREF *pKeepAlive)
+ {
+ return GetTypeHaveAssemblyHelper(pAssembly, bThrowIfNotFound, bIgnoreCase, pKeepAlive, TRUE);
+ }
+ TypeHandle GetTypeHaveAssemblyHelper(Assembly* pAssembly, BOOL bThrowIfNotFound, BOOL bIgnoreCase, OBJECTREF *pKeepAlive, BOOL bRecurse);
+
+#ifndef FEATURE_CORECLR
+ SAFEHANDLE GetSafeHandle();
+#endif //!FEATURE_CORECLR
+
+private:
+ BOOL m_bIsGenericArgument;
+ DWORD m_count;
+ InlineSArray<DWORD, 128> m_signature;
+ InlineSArray<TypeName*, 16> m_genericArguments;
+ InlineSArray<SString*, 16> m_names;
+ InlineSString<128> m_assembly;
+ Factory<InlineSString<128> > m_nestNameFactory;
+};
+
+#endif
diff --git a/src/vm/typestring.cpp b/src/vm/typestring.cpp
new file mode 100644
index 0000000000..f13b84ff7a
--- /dev/null
+++ b/src/vm/typestring.cpp
@@ -0,0 +1,1674 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ---------------------------------------------------------------------------
+// typestring.cpp
+// ---------------------------------------------------------------------------
+//
+
+//
+// This module contains a helper function used to produce string
+// representations of types, with options to control the appearance of
+// namespace and assembly information. Its primary use is in
+// reflection (Type.Name, Type.FullName, Type.ToString, etc) but over
+// time it could replace the use of TypeHandle.GetName etc for
+// diagnostic messages.
+//
+// See the header file for more details
+// ---------------------------------------------------------------------------
+
+
+#include "common.h"
+#include "class.h"
+#include "typehandle.h"
+#include "sstring.h"
+#include "sigformat.h"
+#include "typeparse.h"
+#include "typestring.h"
+#include "ex.h"
+#include "typedesc.h"
+
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+TypeNameBuilder * QCALLTYPE TypeNameBuilder::_CreateTypeNameBuilder()
+{
+ QCALL_CONTRACT;
+
+ TypeNameBuilder * retVal = NULL;
+ BEGIN_QCALL;
+ retVal = new TypeNameBuilder();
+ END_QCALL;
+
+ return retVal;
+}
+
+void QCALLTYPE TypeNameBuilder::_ReleaseTypeNameBuilder(TypeNameBuilder * pTnb)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+ delete pTnb;
+ END_QCALL;
+}
+
+void QCALLTYPE TypeNameBuilder::_ToString(TypeNameBuilder * pTnb, QCall::StringHandleOnStack retString)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+ retString.Set(*pTnb->GetString());
+ END_QCALL;
+}
+
+void QCALLTYPE TypeNameBuilder::_AddName(TypeNameBuilder * pTnb, LPCWSTR wszName)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+ pTnb->AddName(wszName);
+ END_QCALL;
+}
+
+void QCALLTYPE TypeNameBuilder::_AddAssemblySpec(TypeNameBuilder * pTnb, LPCWSTR wszAssemblySpec)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+ pTnb->AddAssemblySpec(wszAssemblySpec);
+ END_QCALL;
+}
+
+void QCALLTYPE TypeNameBuilder::_OpenGenericArguments(TypeNameBuilder * pTnb)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+ pTnb->OpenGenericArguments();
+ END_QCALL;
+}
+
+void QCALLTYPE TypeNameBuilder::_CloseGenericArguments(TypeNameBuilder * pTnb)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+ pTnb->CloseGenericArguments();
+ END_QCALL;
+}
+
+void QCALLTYPE TypeNameBuilder::_OpenGenericArgument(TypeNameBuilder * pTnb)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+ pTnb->OpenGenericArgument();
+ END_QCALL;
+}
+
+void QCALLTYPE TypeNameBuilder::_CloseGenericArgument(TypeNameBuilder * pTnb)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+ pTnb->CloseGenericArgument();
+ END_QCALL;
+}
+
+void QCALLTYPE TypeNameBuilder::_AddPointer(TypeNameBuilder * pTnb)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+ pTnb->AddPointer();
+ END_QCALL;
+}
+
+void QCALLTYPE TypeNameBuilder::_AddByRef(TypeNameBuilder * pTnb)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+ pTnb->AddByRef();
+ END_QCALL;
+}
+
+void QCALLTYPE TypeNameBuilder::_AddSzArray(TypeNameBuilder * pTnb)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+ pTnb->AddSzArray();
+ END_QCALL;
+}
+
+void QCALLTYPE TypeNameBuilder::_AddArray(TypeNameBuilder * pTnb, DWORD dwRank)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+ pTnb->AddArray(dwRank);
+ END_QCALL;
+}
+
+void QCALLTYPE TypeNameBuilder::_Clear(TypeNameBuilder * pTnb)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+ pTnb->Clear();
+ END_QCALL;
+}
+
+#endif
+
+//
+// TypeNameBuilder
+//
+TypeNameBuilder::TypeNameBuilder(SString* pStr, ParseState parseState /*= ParseStateSTART*/) :
+ m_pStr(NULL)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ Clear();
+ m_pStr = pStr;
+ m_parseState = parseState;
+}
+
+void TypeNameBuilder::PushOpenGenericArgument()
+{
+ WRAPPER_NO_CONTRACT;
+
+ m_stack.Push(m_pStr->GetCount());
+}
+
+void TypeNameBuilder::PopOpenGenericArgument()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ COUNT_T index = m_stack.Pop();
+
+ if (!m_bHasAssemblySpec)
+ m_pStr->Delete(m_pStr->Begin() + index - 1, 1);
+
+ m_bHasAssemblySpec = FALSE;
+}
+
+/* This method escapes szName and appends it to this TypeNameBuilder */
+void TypeNameBuilder::EscapeName(LPCWSTR szName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (TypeString::ContainsReservedChar(szName))
+ {
+ while (* szName)
+ {
+ WCHAR c = * szName ++;
+
+ if (IsTypeNameReservedChar(c))
+ Append(W('\\'));
+
+ Append(c);
+ }
+ }
+ else
+ {
+ Append(szName);
+ }
+}
+
+void TypeNameBuilder::EscapeAssemblyName(LPCWSTR szName)
+{
+ WRAPPER_NO_CONTRACT;
+
+ Append(szName);
+}
+
+void TypeNameBuilder::EscapeEmbeddedAssemblyName(LPCWSTR szName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LPCWSTR itr = szName;
+ bool bContainsReservedChar = false;
+
+ while (*itr)
+ {
+ if (W(']') == *itr++)
+ {
+ bContainsReservedChar = true;
+ break;
+ }
+ }
+
+ if (bContainsReservedChar)
+ {
+ itr = szName;
+ while (*itr)
+ {
+ WCHAR c = *itr++;
+ if (c == ']')
+ Append(W('\\'));
+
+ Append(c);
+ }
+ }
+ else
+ {
+ Append(szName);
+ }
+}
+
+HRESULT TypeNameBuilder::OpenGenericArgument()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!CheckParseState(ParseStateSTART))
+ return Fail();
+
+ if (m_instNesting == 0)
+ return Fail();
+
+ HRESULT hr = S_OK;
+
+ m_parseState = ParseStateSTART;
+ m_bNestedName = FALSE;
+
+ if (!m_bFirstInstArg)
+ Append(W(','));
+
+ m_bFirstInstArg = FALSE;
+
+ if (m_bUseAngleBracketsForGenerics)
+ Append(W('<'));
+ else
+ Append(W('['));
+ PushOpenGenericArgument();
+
+ return hr;
+}
+
+HRESULT TypeNameBuilder::AddName(LPCWSTR szName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!szName)
+ return Fail();
+
+ if (!CheckParseState(ParseStateSTART | ParseStateNAME))
+ return Fail();
+
+ HRESULT hr = S_OK;
+
+ m_parseState = ParseStateNAME;
+
+ if (m_bNestedName)
+ Append(W('+'));
+
+ m_bNestedName = TRUE;
+
+ EscapeName(szName);
+
+ return hr;
+}
+
+HRESULT TypeNameBuilder::AddName(LPCWSTR szName, LPCWSTR szNamespace)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!szName)
+ return Fail();
+
+ if (!CheckParseState(ParseStateSTART | ParseStateNAME))
+ return Fail();
+
+ HRESULT hr = S_OK;
+
+ m_parseState = ParseStateNAME;
+
+ if (m_bNestedName)
+ Append(W('+'));
+
+ m_bNestedName = TRUE;
+
+ if (szNamespace && *szNamespace)
+ {
+ EscapeName(szNamespace);
+ Append(W('.'));
+ }
+
+ EscapeName(szName);
+
+ return hr;
+}
+
+HRESULT TypeNameBuilder::OpenGenericArguments()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!CheckParseState(ParseStateNAME))
+ return Fail();
+
+ HRESULT hr = S_OK;
+
+ m_parseState = ParseStateSTART;
+ m_instNesting ++;
+ m_bFirstInstArg = TRUE;
+
+ if (m_bUseAngleBracketsForGenerics)
+ Append(W('<'));
+ else
+ Append(W('['));
+
+ return hr;
+}
+
+HRESULT TypeNameBuilder::CloseGenericArguments()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!m_instNesting)
+ return Fail();
+ if (!CheckParseState(ParseStateSTART))
+ return Fail();
+
+ HRESULT hr = S_OK;
+
+ m_parseState = ParseStateGENARGS;
+
+ m_instNesting --;
+
+ if (m_bFirstInstArg)
+ {
+ m_pStr->Truncate(m_pStr->End() - 1);
+ }
+ else
+ {
+ if (m_bUseAngleBracketsForGenerics)
+ Append(W('>'));
+ else
+ Append(W(']'));
+ }
+
+ return hr;
+}
+
+HRESULT TypeNameBuilder::AddPointer()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!CheckParseState(ParseStateNAME | ParseStateGENARGS | ParseStatePTRARR))
+ return Fail();
+
+ m_parseState = ParseStatePTRARR;
+
+ Append(W('*'));
+
+ return S_OK;
+}
+
+HRESULT TypeNameBuilder::AddByRef()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!CheckParseState(ParseStateNAME | ParseStateGENARGS | ParseStatePTRARR))
+ return Fail();
+
+ m_parseState = ParseStateBYREF;
+
+ Append(W('&'));
+
+ return S_OK;
+}
+
+HRESULT TypeNameBuilder::AddSzArray()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!CheckParseState(ParseStateNAME | ParseStateGENARGS | ParseStatePTRARR))
+ return Fail();
+
+ m_parseState = ParseStatePTRARR;
+
+ Append(W("[]"));
+
+ return S_OK;
+}
+
+HRESULT TypeNameBuilder::AddArray(DWORD rank)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!CheckParseState(ParseStateNAME | ParseStateGENARGS | ParseStatePTRARR))
+ return Fail();
+
+ m_parseState = ParseStatePTRARR;
+
+ if (rank <= 0)
+ return E_INVALIDARG;
+
+ if (rank == 1)
+ Append(W("[*]"));
+ else if (rank > 64)
+ {
+ // Only taken in an error path, runtime will not load arrays of more than 32 dimentions
+ WCHAR wzDim[128];
+ _snwprintf_s(wzDim, 128, _TRUNCATE, W("[%d]"), rank);
+ Append(wzDim);
+ }
+ else
+ {
+ WCHAR* wzDim = new (nothrow) WCHAR[rank+3];
+
+ if(wzDim == NULL) // allocation failed, do it the long way (each Append -> memory realloc)
+ {
+ Append(W('['));
+ for(COUNT_T i = 1; i < rank; i ++)
+ Append(W(','));
+ Append(W(']'));
+ }
+ else // allocation OK, do it the fast way
+ {
+ WCHAR* pwz = wzDim+1;
+ *wzDim = '[';
+ for(COUNT_T i = 1; i < rank; i++, pwz++) *pwz=',';
+ *pwz = ']';
+ *(++pwz) = 0;
+ Append(wzDim);
+ delete [] wzDim;
+ }
+ }
+
+ return S_OK;
+}
+
+HRESULT TypeNameBuilder::CloseGenericArgument()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!CheckParseState(ParseStateNAME | ParseStateGENARGS | ParseStatePTRARR | ParseStateBYREF | ParseStateASSEMSPEC))
+ return Fail();
+
+ if (m_instNesting == 0)
+ return Fail();
+
+ m_parseState = ParseStateSTART;
+
+ if (m_bHasAssemblySpec)
+ {
+ if (m_bUseAngleBracketsForGenerics)
+ Append(W('>'));
+ else
+ Append(W(']'));
+ }
+
+ PopOpenGenericArgument();
+
+ return S_OK;
+}
+
+HRESULT TypeNameBuilder::AddAssemblySpec(LPCWSTR szAssemblySpec)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!CheckParseState(ParseStateNAME | ParseStateGENARGS | ParseStatePTRARR | ParseStateBYREF))
+ return Fail();
+
+ HRESULT hr = S_OK;
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+
+ m_parseState = ParseStateASSEMSPEC;
+
+ if (szAssemblySpec && *szAssemblySpec)
+ {
+ Append(W(", "));
+
+ if (m_instNesting > 0)
+ {
+ EscapeEmbeddedAssemblyName(szAssemblySpec);
+ }
+ else
+ {
+ EscapeAssemblyName(szAssemblySpec);
+ }
+
+ m_bHasAssemblySpec = TRUE;
+ hr = S_OK;
+ }
+
+ END_SO_INTOLERANT_CODE;
+
+ return hr;
+}
+
+HRESULT TypeNameBuilder::ToString(BSTR* pszStringRepresentation)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!CheckParseState(ParseStateNAME | ParseStateGENARGS | ParseStatePTRARR | ParseStateBYREF | ParseStateASSEMSPEC))
+ return Fail();
+
+ if (m_instNesting)
+ return Fail();
+
+ *pszStringRepresentation = SysAllocString(m_pStr->GetUnicode());
+
+ return S_OK;
+}
+
+HRESULT TypeNameBuilder::Clear()
+{
+ CONTRACTL
+ {
+ THROWS; // TypeNameBuilder::Stack::Clear might throw.
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ CONTRACT_VIOLATION(SOToleranceViolation);
+
+ if (m_pStr)
+ {
+ m_pStr->Clear();
+ }
+ m_bNestedName = FALSE;
+ m_instNesting = 0;
+ m_bFirstInstArg = FALSE;
+ m_parseState = ParseStateSTART;
+ m_bHasAssemblySpec = FALSE;
+ m_bUseAngleBracketsForGenerics = FALSE;
+ m_stack.Clear();
+
+ return S_OK;
+}
+
+
+
+// Append the name of the type td to the string
+// The following flags in the FormatFlags argument are significant: FormatNamespace
+void TypeString::AppendTypeDef(SString& ss, IMDInternalImport *pImport, mdTypeDef td, DWORD format)
+{
+ CONTRACT_VOID
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ THROWS;
+ }
+ CONTRACT_END
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(COMPlusThrowSO());
+ {
+ TypeNameBuilder tnb(&ss, TypeNameBuilder::ParseStateNAME);
+ AppendTypeDef(tnb, pImport, td, format);
+ }
+ END_SO_INTOLERANT_CODE;
+
+ RETURN;
+}
+
+
+void TypeString::AppendTypeDef(TypeNameBuilder& tnb, IMDInternalImport *pImport, mdTypeDef td, DWORD format)
+{
+ CONTRACT_VOID
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ THROWS;
+ PRECONDITION(CheckPointer(pImport));
+ PRECONDITION(TypeFromToken(td) == mdtTypeDef);
+ }
+ CONTRACT_END
+
+ LPCUTF8 szName;
+ LPCUTF8 szNameSpace;
+ IfFailThrow(pImport->GetNameOfTypeDef(td, &szName, &szNameSpace));
+
+ const WCHAR *wszNameSpace = NULL;
+
+ InlineSString<128> ssName(SString::Utf8, szName);
+ InlineSString<128> ssNameSpace;
+
+ if (format & FormatNamespace)
+ {
+ ssNameSpace.SetUTF8(szNameSpace);
+ wszNameSpace = ssNameSpace.GetUnicode();
+ }
+
+ tnb.AddName(ssName.GetUnicode(), wszNameSpace);
+
+ RETURN;
+}
+
+void TypeString::AppendNestedTypeDef(TypeNameBuilder& tnb, IMDInternalImport *pImport, mdTypeDef td, DWORD format)
+{
+ CONTRACT_VOID
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ THROWS;
+ PRECONDITION(CheckPointer(pImport));
+ PRECONDITION(TypeFromToken(td) == mdtTypeDef);
+ }
+ CONTRACT_END
+
+ DWORD dwAttr;
+ IfFailThrow(pImport->GetTypeDefProps(td, &dwAttr, NULL));
+
+ StackSArray<mdTypeDef> arNames;
+ arNames.Append(td);
+ if (format & FormatNamespace && IsTdNested(dwAttr))
+ {
+ while (SUCCEEDED(pImport->GetNestedClassProps(td, &td)))
+ arNames.Append(td);
+ }
+
+ for(SCOUNT_T i = arNames.GetCount() - 1; i >= 0; i --)
+ AppendTypeDef(tnb, pImport, arNames[i], format);
+
+ RETURN;
+}
+
+// Append a square-bracket-enclosed, comma-separated list of n type parameters in inst to the string s
+// and enclose each parameter in square brackets to disambiguate the commas
+// The following flags in the FormatFlags argument are significant: FormatNamespace FormatFullInst FormatAssembly FormatNoVersion
+void TypeString::AppendInst(SString& ss, Instantiation inst, DWORD format)
+{
+ CONTRACT_VOID
+ {
+ MODE_ANY;
+ if (format & (FormatAssembly|FormatFullInst)) GC_TRIGGERS; else GC_NOTRIGGER;
+ THROWS;
+ }
+ CONTRACT_END
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(COMPlusThrowSO());
+ {
+ TypeNameBuilder tnb(&ss, TypeNameBuilder::ParseStateNAME);
+ if ((format & FormatAngleBrackets) != 0)
+ tnb.SetUseAngleBracketsForGenerics(TRUE);
+ AppendInst(tnb, inst, format);
+ }
+ END_SO_INTOLERANT_CODE;
+
+ RETURN;
+}
+
+void TypeString::AppendInst(TypeNameBuilder& tnb, Instantiation inst, DWORD format)
+{
+ CONTRACT_VOID
+ {
+ MODE_ANY;
+ THROWS;
+ if (format & (FormatAssembly|FormatFullInst)) GC_TRIGGERS; else GC_NOTRIGGER;
+ PRECONDITION(!inst.IsEmpty());
+ }
+ CONTRACT_END
+
+ tnb.OpenGenericArguments();
+
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ tnb.OpenGenericArgument();
+
+ TypeHandle thArg = inst[i];
+
+ if ((format & FormatFullInst) != 0 && !thArg.IsGenericVariable())
+ {
+ AppendType(tnb, thArg, Instantiation(), format | FormatNamespace | FormatAssembly);
+ }
+ else
+ {
+ AppendType(tnb, thArg, Instantiation(), format & (FormatNamespace | FormatAngleBrackets
+#ifdef _DEBUG
+ | FormatDebug
+#endif
+ ));
+ }
+
+ tnb.CloseGenericArgument();
+ }
+
+ tnb.CloseGenericArguments();
+
+ RETURN;
+}
+
+void TypeString::AppendParamTypeQualifier(TypeNameBuilder& tnb, CorElementType kind, DWORD rank)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(CorTypeInfo::IsModifier(kind));
+ }
+ CONTRACTL_END
+
+ switch (kind)
+ {
+ case ELEMENT_TYPE_BYREF :
+ tnb.AddByRef();
+ break;
+ case ELEMENT_TYPE_PTR :
+ tnb.AddPointer();
+ break;
+ case ELEMENT_TYPE_SZARRAY :
+ tnb.AddSzArray();
+ break;
+ case ELEMENT_TYPE_ARRAY :
+ tnb.AddArray(rank);
+ break;
+ default :
+ break;
+ }
+}
+
+// Append a representation of the type t to the string s
+// The following flags in the FormatFlags argument are significant: FormatNamespace FormatFullInst FormatAssembly FormatNoVersion
+
+void TypeString::AppendType(SString& ss, TypeHandle ty, DWORD format)
+{
+ CONTRACT_VOID
+ {
+ MODE_ANY;
+ if (format & (FormatAssembly|FormatFullInst)) GC_TRIGGERS; else GC_NOTRIGGER;
+ THROWS;
+ }
+ CONTRACT_END
+
+ AppendType(ss, ty, Instantiation(), format);
+
+ RETURN;
+}
+
+void TypeString::AppendType(SString& ss, TypeHandle ty, Instantiation typeInstantiation, DWORD format)
+{
+ CONTRACT_VOID
+ {
+ MODE_ANY;
+ if (format & (FormatAssembly|FormatFullInst)) GC_TRIGGERS; else GC_NOTRIGGER;
+ THROWS;
+ }
+ CONTRACT_END
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(COMPlusThrowSO());
+ {
+ TypeNameBuilder tnb(&ss);
+ if ((format & FormatAngleBrackets) != 0)
+ tnb.SetUseAngleBracketsForGenerics(TRUE);
+ AppendType(tnb, ty, typeInstantiation, format);
+ }
+ END_SO_INTOLERANT_CODE;
+
+ RETURN;
+}
+
+void TypeString::AppendType(TypeNameBuilder& tnb, TypeHandle ty, Instantiation typeInstantiation, DWORD format)
+{
+ CONTRACT_VOID
+ {
+ MODE_ANY;
+
+ /* This method calls Assembly::GetDisplayName. Since that function
+ uses Fusion which takes some Crsts in some places, it is GC_TRIGGERS.
+ It could be made GC_NOTRIGGER by factoring out Assembly::GetDisplayName.
+ However, its better to leave stuff as GC_TRIGGERS unless really needed,
+ as GC_NOTRIGGER ties your hands up. */
+ if (format & (FormatAssembly|FormatFullInst)) GC_TRIGGERS; else GC_NOTRIGGER;
+ THROWS;
+ }
+ CONTRACT_END
+
+ INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(10);
+
+ BOOL bToString = (format & (FormatNamespace|FormatFullInst|FormatAssembly)) == FormatNamespace;
+
+ // It's null!
+ if (ty.IsNull())
+ {
+ tnb.AddName(W("(null)"));
+ }
+ else
+ // It's not restored yet!
+ if (ty.IsEncodedFixup())
+ {
+ tnb.AddName(W("(fixup)"));
+ }
+ else
+
+ // It's an array, with format
+ // element_ty[] (1-d, SZARRAY)
+ // element_ty[*] (1-d, ARRAY)
+ // element_ty[,] (2-d, ARRAY) etc
+ // or a pointer (*) or byref (&)
+ if (ty.HasTypeParam() || (!ty.IsTypeDesc() && ty.AsMethodTable()->IsArray()))
+ {
+ if (ty.GetSignatureCorElementType() != ELEMENT_TYPE_VALUETYPE)
+ {
+ DWORD rank;
+ TypeHandle elemType;
+ if (ty.HasTypeParam())
+ {
+ rank = ty.IsArray() ? ty.AsArray()->GetRank() : 0;
+ elemType = ty.GetTypeParam();
+ }
+ else
+ {
+ MethodTable *pMT = ty.GetMethodTable();
+ PREFIX_ASSUME(pMT != NULL);
+ rank = pMT->GetRank();
+ elemType = pMT->GetApproxArrayElementTypeHandle();
+ }
+
+ _ASSERTE(!elemType.IsNull());
+ AppendType(tnb, elemType, Instantiation(), format & ~FormatAssembly);
+ AppendParamTypeQualifier(tnb, ty.GetSignatureCorElementType(), rank);
+ }
+ else
+ {
+ tnb.Append(W("VALUETYPE"));
+ TypeHandle elemType = ty.GetTypeParam();
+ AppendType(tnb, elemType, Instantiation(), format & ~FormatAssembly);
+ }
+ }
+
+ // ...or type parameter
+ else if (ty.IsGenericVariable())
+ {
+ PTR_TypeVarTypeDesc tyvar = dac_cast<PTR_TypeVarTypeDesc>(ty.AsTypeDesc());
+
+ mdGenericParam token = tyvar->GetToken();
+
+ LPCSTR szName = NULL;
+ mdToken mdOwner;
+
+ IfFailThrow(ty.GetModule()->GetMDImport()->GetGenericParamProps(token, NULL, NULL, &mdOwner, NULL, &szName));
+
+ _ASSERTE(TypeFromToken(mdOwner) == mdtTypeDef || TypeFromToken(mdOwner) == mdtMethodDef);
+
+ LPCSTR szPrefix;
+ if (!(format & FormatGenericParam))
+ szPrefix = "";
+ else if (TypeFromToken(mdOwner) == mdtTypeDef)
+ szPrefix = "!";
+ else
+ szPrefix = "!!";
+
+ SmallStackSString pName(SString::Utf8, szPrefix);
+ pName.AppendUTF8(szName);
+ tnb.AddName(pName.GetUnicode());
+
+ format &= ~FormatAssembly;
+ }
+
+ // ...or function pointer
+ else if (ty.IsFnPtrType())
+ {
+ // Don't attempt to format this currently, it may trigger GC due to fixups.
+ tnb.AddName(W("(fnptr)"));
+ }
+
+ // ...otherwise it's just a plain type def or an instantiated type
+ else
+ {
+ // Get the TypeDef token and attributes
+ IMDInternalImport *pImport = ty.GetMethodTable()->GetMDImport();
+ mdTypeDef td = ty.GetCl();
+ _ASSERTE(!IsNilToken(td));
+
+#ifdef _DEBUG
+ if (format & FormatDebug)
+ {
+ WCHAR wzAddress[128];
+ _snwprintf_s(wzAddress, 128, _TRUNCATE, W("(%p)"), dac_cast<TADDR>(ty.AsPtr()));
+ tnb.AddName(wzAddress);
+ }
+#endif
+ AppendNestedTypeDef(tnb, pImport, td, format);
+
+ // Append the instantiation
+ if ((format & (FormatNamespace|FormatAssembly)) && ty.HasInstantiation() && (!ty.IsGenericTypeDefinition() || bToString))
+ {
+ if (typeInstantiation.IsEmpty())
+ AppendInst(tnb, ty.GetInstantiation(), format);
+ else
+ AppendInst(tnb, typeInstantiation, format);
+ }
+ }
+
+ // Now append the assembly
+ if (format & FormatAssembly)
+ {
+ Assembly* pAssembly = ty.GetAssembly();
+ _ASSERTE(pAssembly != NULL);
+
+ StackSString pAssemblyName;
+#ifdef DACCESS_COMPILE
+ pAssemblyName.SetUTF8(pAssembly->GetSimpleName());
+#else
+ pAssembly->GetDisplayName(pAssemblyName,
+ ASM_DISPLAYF_PUBLIC_KEY_TOKEN | ASM_DISPLAYF_CONTENT_TYPE |
+ (format & FormatNoVersion ? 0 : ASM_DISPLAYF_VERSION | ASM_DISPLAYF_CULTURE));
+#endif
+
+ tnb.AddAssemblySpec(pAssemblyName.GetUnicode());
+
+ }
+
+ END_INTERIOR_STACK_PROBE;
+
+
+ RETURN;
+}
+
+void TypeString::AppendMethod(SString& s, MethodDesc *pMD, Instantiation typeInstantiation, const DWORD format)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_TRIGGERS;
+ THROWS;
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(pMD->IsRestored_NoLogging());
+ PRECONDITION(s.Check());
+ }
+ CONTRACTL_END
+
+ AppendMethodImpl(s, pMD, typeInstantiation, format);
+}
+
+void TypeString::AppendMethodInternal(SString& s, MethodDesc *pMD, const DWORD format)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_TRIGGERS;
+ SUPPORTS_DAC;
+ THROWS;
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(pMD->IsRestored_NoLogging());
+ PRECONDITION(s.Check());
+ }
+ CONTRACTL_END
+
+ AppendMethodImpl(s, pMD, Instantiation(), format);
+}
+
+void TypeString::AppendMethodImpl(SString& ss, MethodDesc *pMD, Instantiation typeInstantiation, const DWORD format)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_TRIGGERS;
+ SUPPORTS_DAC;
+ THROWS;
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(pMD->IsRestored_NoLogging());
+ PRECONDITION(ss.Check());
+ }
+ CONTRACTL_END
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(COMPlusThrowSO());
+ {
+ TypeHandle th;
+
+ if (pMD->IsDynamicMethod())
+ {
+ if (pMD->IsLCGMethod())
+ {
+ SString sss(SString::Literal, "DynamicClass");
+ ss += sss;
+ }
+ else if (pMD->IsILStub())
+ {
+ SString sss(SString::Literal, ILStubResolver::GetStubClassName(pMD));
+ ss += sss;
+ }
+ }
+ else
+ {
+ th = TypeHandle(pMD->GetMethodTable());
+ AppendType(ss, th, typeInstantiation, format);
+ }
+
+ SString sss1(SString::Literal, NAMESPACE_SEPARATOR_STR);
+ ss += sss1;
+ SString sss2(SString::Utf8, pMD->GetName());
+ ss += sss2;
+
+ if (pMD->HasMethodInstantiation() && !pMD->IsGenericMethodDefinition())
+ {
+ AppendInst(ss, pMD->GetMethodInstantiation(), format);
+ }
+
+ if (format & FormatSignature)
+ {
+ // @TODO: The argument list should be formatted nicely using AppendType()
+
+ SigFormat sigFormatter(pMD, th);
+ const char* sigStr = sigFormatter.GetCStringParmsOnly();
+ SString sss(SString::Utf8, sigStr);
+ ss += sss;
+ }
+
+ if (format & FormatStubInfo) {
+ if (pMD->IsInstantiatingStub())
+ {
+ SString sss(SString::Literal, "{inst-stub}");
+ ss += sss;
+ }
+ if (pMD->IsUnboxingStub())
+ {
+ SString sss(SString::Literal, "{unbox-stub}");
+ ss += sss;
+ }
+ if (pMD->IsSharedByGenericMethodInstantiations())
+ {
+ SString sss(SString::Literal, "{method-shared}");
+ ss += sss;
+ }
+ else if (pMD->IsSharedByGenericInstantiations())
+ {
+ SString sss(SString::Literal, "{shared}");
+ ss += sss;
+ }
+ if (pMD->RequiresInstMethodTableArg())
+ {
+ SString sss(SString::Literal, "{requires-mt-arg}");
+ ss += sss;
+ }
+ if (pMD->RequiresInstMethodDescArg())
+ {
+ SString sss(SString::Literal, "{requires-mdesc-arg}");
+ ss += sss;
+ }
+ }
+ }
+ END_SO_INTOLERANT_CODE;
+}
+
+void TypeString::AppendField(SString& s, FieldDesc *pFD, Instantiation typeInstantiation, const DWORD format /* = FormatNamespace */)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_TRIGGERS;
+ THROWS;
+ PRECONDITION(CheckPointer(pFD));
+ PRECONDITION(s.Check());
+ }
+ CONTRACTL_END;
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(COMPlusThrowSO());
+ {
+ TypeHandle th(pFD->GetApproxEnclosingMethodTable());
+ AppendType(s, th, typeInstantiation, format);
+
+ s.AppendUTF8(NAMESPACE_SEPARATOR_STR);
+ s.AppendUTF8(pFD->GetName());
+ }
+ END_SO_INTOLERANT_CODE;
+}
+
+#ifdef _DEBUG
+void TypeString::AppendMethodDebug(SString& ss, MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_TRIGGERS;
+ NOTHROW;
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(pMD->IsRestored_NoLogging());
+ PRECONDITION(ss.Check());
+ }
+ CONTRACTL_END
+
+#ifndef DACCESS_COMPILE
+ EX_TRY
+ {
+ AppendMethodInternal(ss, pMD, FormatSignature | FormatNamespace);
+ }
+ EX_CATCH
+ {
+ // This function is only used as diagnostic aid in debug builds.
+ // If we run out of memory or hit some other problem,
+ // tough luck for the debugger.
+
+ // Should we set ss to Empty
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+#endif
+}
+
+void TypeString::AppendTypeDebug(SString& ss, TypeHandle t)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ PRECONDITION(CheckPointer(t));
+ PRECONDITION(ss.Check());
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END
+
+#ifndef DACCESS_COMPILE
+ {
+ EX_TRY
+ {
+ AppendType(ss, t, FormatNamespace | FormatDebug);
+ }
+ EX_CATCH
+ {
+ // This function is only used as diagnostic aid in debug builds.
+ // If we run out of memory or hit some other problem,
+ // tough luck for the debugger.
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+#endif
+}
+
+void TypeString::AppendTypeKeyDebug(SString& ss, TypeKey *pTypeKey)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ PRECONDITION(CheckPointer(pTypeKey));
+ PRECONDITION(ss.Check());
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END
+
+#ifndef DACCESS_COMPILE
+ {
+ EX_TRY
+ {
+ AppendTypeKey(ss, pTypeKey, FormatNamespace | FormatDebug);
+ }
+ EX_CATCH
+ {
+ // This function is only used as diagnostic aid in debug builds.
+ // If we run out of memory or hit some other problem,
+ // tough luck for the debugger.
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+#endif
+}
+
+#endif // _DEBUG
+
+
+void TypeString::AppendTypeKey(TypeNameBuilder& tnb, TypeKey *pTypeKey, DWORD format)
+{
+ CONTRACT_VOID
+ {
+ MODE_ANY;
+ THROWS;
+ if (format & (FormatAssembly|FormatFullInst)) GC_TRIGGERS; else GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pTypeKey));
+ SO_INTOLERANT;
+ }
+ CONTRACT_END
+
+ Module *pModule = NULL;
+
+ // It's an array, with format
+ // element_ty[] (1-d, SZARRAY)
+ // element_ty[*] (1-d, ARRAY)
+ // element_ty[,] (2-d, ARRAY) etc
+ // or a pointer (*) or byref (&)
+ CorElementType kind = pTypeKey->GetKind();
+ if (CorTypeInfo::IsModifier(kind))
+ {
+ DWORD rank = 0;
+ TypeHandle elemType = pTypeKey->GetElementType();
+ if (CorTypeInfo::IsArray(kind))
+ {
+ rank = pTypeKey->GetRank();
+ }
+
+ AppendType(tnb, elemType, Instantiation(), format);
+ AppendParamTypeQualifier(tnb, kind, rank);
+ pModule = elemType.GetModule();
+ }
+ else if (kind == ELEMENT_TYPE_VALUETYPE)
+ {
+ tnb.Append(W("VALUETYPE"));
+ TypeHandle elemType = pTypeKey->GetElementType();
+ AppendType(tnb, elemType, Instantiation(), format);
+ pModule = elemType.GetModule();
+ }
+ else if (kind == ELEMENT_TYPE_FNPTR)
+ {
+ RETURN;
+ }
+
+ // ...otherwise it's just a plain type def or an instantiated type
+ else
+ {
+ // Get the TypeDef token and attributes
+ pModule = pTypeKey->GetModule();
+ if (pModule != NULL)
+ {
+ IMDInternalImport *pImport = pModule->GetMDImport();
+ mdTypeDef td = pTypeKey->GetTypeToken();
+ _ASSERTE(!IsNilToken(td));
+
+ AppendNestedTypeDef(tnb, pImport, td, format);
+
+ // Append the instantiation
+ if ((format & (FormatNamespace|FormatAssembly)) && pTypeKey->HasInstantiation())
+ AppendInst(tnb, pTypeKey->GetInstantiation(), format);
+ }
+
+ }
+
+ // Now append the assembly
+ if (pModule != NULL && (format & FormatAssembly))
+ {
+ Assembly* pAssembly = pModule->GetAssembly();
+ _ASSERTE(pAssembly != NULL);
+
+ StackSString pAssemblyName;
+#ifdef DACCESS_COMPILE
+ pAssemblyName.SetUTF8(pAssembly->GetSimpleName());
+#else
+ pAssembly->GetDisplayName(pAssemblyName,
+ ASM_DISPLAYF_PUBLIC_KEY_TOKEN | ASM_DISPLAYF_CONTENT_TYPE |
+ (format & FormatNoVersion ? 0 : ASM_DISPLAYF_VERSION | ASM_DISPLAYF_CULTURE));
+#endif
+ tnb.AddAssemblySpec(pAssemblyName.GetUnicode());
+ }
+
+ RETURN;
+}
+
+void TypeString::AppendTypeKey(SString& ss, TypeKey *pTypeKey, DWORD format)
+{
+ CONTRACT_VOID
+ {
+ MODE_ANY;
+ if (format & (FormatAssembly|FormatFullInst)) GC_TRIGGERS; else GC_NOTRIGGER;
+ THROWS;
+ PRECONDITION(CheckPointer(pTypeKey));
+ }
+ CONTRACT_END
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(COMPlusThrowSO());
+ {
+ TypeNameBuilder tnb(&ss);
+ AppendTypeKey(tnb, pTypeKey, format);
+ }
+ END_SO_INTOLERANT_CODE;
+
+ RETURN;
+}
+
+/*static*/
+void TypeString::EscapeSimpleTypeName(SString* ssTypeName, SString* ssEscapedTypeName)
+{
+ SString::Iterator itr = ssTypeName->Begin();
+ WCHAR c;
+ while ((c = *itr++) != W('\0'))
+ {
+ if (IsTypeNameReservedChar(c))
+ ssEscapedTypeName->Append(W("\\"));
+
+ ssEscapedTypeName->Append(c);
+ }
+}
+
+/*static*/
+bool TypeString::ContainsReservedChar(LPCWSTR pTypeName)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ WCHAR c;
+ while ((c = * pTypeName++) != W('\0'))
+ {
+ if (IsTypeNameReservedChar(c))
+ {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+HRESULT __stdcall TypeNameBuilderWrapper::QueryInterface(REFIID riid, void **ppUnk)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ *ppUnk = 0;
+
+ if (riid == IID_IUnknown)
+ *ppUnk = (IUnknown *)this;
+ else if (riid == IID_ITypeNameBuilder)
+ *ppUnk = (ITypeNameBuilder*)this;
+ else
+ return (E_NOINTERFACE);
+
+ AddRef();
+ return S_OK;
+}
+
+ULONG __stdcall TypeNameBuilderWrapper::AddRef()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ return InterlockedIncrement(&m_ref);
+}
+
+ULONG __stdcall TypeNameBuilderWrapper::Release()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ SUPPORTS_DAC_HOST_ONLY;
+ }
+ CONTRACTL_END;
+
+ LONG ref = 0;
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+
+ ref = InterlockedDecrement(&m_ref);
+ if (ref == 0)
+ delete this;
+
+ END_SO_INTOLERANT_CODE;
+
+ return ref;
+}
+
+
+HRESULT __stdcall TypeNameBuilderWrapper::OpenGenericArguments()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+ hr = m_tnb.OpenGenericArguments();
+ END_SO_INTOLERANT_CODE;
+ return hr;
+}
+
+HRESULT __stdcall TypeNameBuilderWrapper::CloseGenericArguments()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+ hr = m_tnb.CloseGenericArguments();
+ END_SO_INTOLERANT_CODE;
+ return hr;
+}
+
+HRESULT __stdcall TypeNameBuilderWrapper::OpenGenericArgument()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+ hr = m_tnb.OpenGenericArgument();
+ END_SO_INTOLERANT_CODE;
+ return hr;
+}
+
+HRESULT __stdcall TypeNameBuilderWrapper::CloseGenericArgument()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+ hr = m_tnb.CloseGenericArgument();
+ END_SO_INTOLERANT_CODE;
+ return hr;
+}
+
+HRESULT __stdcall TypeNameBuilderWrapper::AddName(LPCWSTR szName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+ hr = m_tnb.AddName(szName);
+ END_SO_INTOLERANT_CODE;
+ return hr;
+}
+
+HRESULT __stdcall TypeNameBuilderWrapper::AddPointer()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+ hr = m_tnb.AddPointer();
+ END_SO_INTOLERANT_CODE;
+ return hr;
+}
+
+HRESULT __stdcall TypeNameBuilderWrapper::AddByRef()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+ hr = m_tnb.AddByRef();
+ END_SO_INTOLERANT_CODE;
+ return hr;
+}
+
+HRESULT __stdcall TypeNameBuilderWrapper::AddSzArray()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+ hr = m_tnb.AddSzArray();
+ END_SO_INTOLERANT_CODE;
+ return hr;
+}
+
+HRESULT __stdcall TypeNameBuilderWrapper::AddArray(DWORD rank)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+ hr = m_tnb.AddArray(rank);
+ END_SO_INTOLERANT_CODE;
+ return hr;
+}
+
+HRESULT __stdcall TypeNameBuilderWrapper::AddAssemblySpec(LPCWSTR szAssemblySpec)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+ hr = m_tnb.AddAssemblySpec(szAssemblySpec);
+ END_SO_INTOLERANT_CODE;
+ return hr;
+}
+
+HRESULT __stdcall TypeNameBuilderWrapper::ToString(BSTR* pszStringRepresentation)
+{
+ WRAPPER_NO_CONTRACT;
+
+ HRESULT hr;
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+ hr = m_tnb.ToString(pszStringRepresentation);
+ END_SO_INTOLERANT_CODE;
+ return hr;
+}
+
+HRESULT __stdcall TypeNameBuilderWrapper::Clear()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
+ hr = m_tnb.Clear();
+ END_SO_INTOLERANT_CODE;
+ return hr;
+}
diff --git a/src/vm/typestring.h b/src/vm/typestring.h
new file mode 100644
index 0000000000..0987fbfa44
--- /dev/null
+++ b/src/vm/typestring.h
@@ -0,0 +1,268 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ---------------------------------------------------------------------------
+// typestring.cpp
+// ---------------------------------------------------------------------------
+//
+
+//
+// This module contains all helper functions required to produce
+// string representations of types, with options to control the
+// appearance of namespace and assembly information. Its primary use
+// is in reflection (Type.Name, Type.FullName, Type.ToString, etc) but
+// over time it could replace the use of TypeHandle.GetName etc for
+// diagnostic messages.
+//
+// ---------------------------------------------------------------------------
+
+
+#ifndef TYPESTRING_H
+#define TYPESTRING_H
+
+#include "common.h"
+#include "class.h"
+#include "typehandle.h"
+#include "sstring.h"
+#include "typekey.h"
+#include "typeparse.h"
+#include "field.h"
+
+class TypeLibExporter;
+class TypeString;
+
+class TypeNameBuilder
+{
+ friend class TypeNameBuilderWrapper;
+
+public:
+ static void QCALLTYPE _ReleaseTypeNameBuilder(TypeNameBuilder * pTnb);
+ static TypeNameBuilder * QCALLTYPE _CreateTypeNameBuilder();
+ static void QCALLTYPE _OpenGenericArguments(TypeNameBuilder * pTnb);
+ static void QCALLTYPE _CloseGenericArguments(TypeNameBuilder *pTnb);
+ static void QCALLTYPE _OpenGenericArgument(TypeNameBuilder * pTnb);
+ static void QCALLTYPE _CloseGenericArgument(TypeNameBuilder * pTnb);
+ static void QCALLTYPE _AddName(TypeNameBuilder * pTnb, LPCWSTR wszName);
+ static void QCALLTYPE _AddPointer(TypeNameBuilder * pTnb);
+ static void QCALLTYPE _AddByRef(TypeNameBuilder * pTnb);
+ static void QCALLTYPE _AddSzArray(TypeNameBuilder * pTnb);
+ static void QCALLTYPE _AddArray(TypeNameBuilder * pTnb, DWORD dwRank);
+ static void QCALLTYPE _AddAssemblySpec(TypeNameBuilder * pTnb, LPCWSTR wszAssemblySpec);
+ static void QCALLTYPE _ToString(TypeNameBuilder * pTnb, QCall::StringHandleOnStack retString);
+ static void QCALLTYPE _Clear(TypeNameBuilder * pTnb);
+
+private:
+ friend class TypeString;
+ friend SString* TypeName::ToString(SString*, BOOL, BOOL, BOOL);
+ friend TypeHandle TypeName::GetTypeWorker(BOOL, BOOL, BOOL, Assembly*, BOOL, BOOL, StackCrawlMark*, Assembly*,
+#ifdef FEATURE_HOSTED_BINDER
+ ICLRPrivBinder * pPrivHostBinder,
+#endif
+ BOOL, OBJECTREF *);
+ HRESULT OpenGenericArguments();
+ HRESULT CloseGenericArguments();
+ HRESULT OpenGenericArgument();
+ HRESULT CloseGenericArgument();
+ HRESULT AddName(LPCWSTR szName);
+ HRESULT AddName(LPCWSTR szName, LPCWSTR szNamespace);
+ HRESULT AddPointer();
+ HRESULT AddByRef();
+ HRESULT AddSzArray();
+ HRESULT AddArray(DWORD rank);
+ HRESULT AddAssemblySpec(LPCWSTR szAssemblySpec);
+ HRESULT ToString(BSTR* pszStringRepresentation);
+ HRESULT Clear();
+
+private:
+ class Stack
+ {
+ public:
+ Stack() : m_depth(0) { LIMITED_METHOD_CONTRACT; }
+
+ public:
+ COUNT_T Push(COUNT_T element) { WRAPPER_NO_CONTRACT; *m_stack.Append() = element; m_depth++; return Tos(); }
+ COUNT_T Pop()
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(GetDepth() > 0);
+ }
+ CONTRACTL_END;
+
+ COUNT_T tos = Tos();
+ m_stack.Delete(m_stack.End() - 1);
+ m_depth--;
+ return tos;
+ }
+ COUNT_T Tos() { WRAPPER_NO_CONTRACT; return m_stack.End()[-1]; }
+ void Clear() { WRAPPER_NO_CONTRACT; while(GetDepth()) Pop(); }
+ COUNT_T GetDepth() { WRAPPER_NO_CONTRACT; return m_depth; }
+
+ private:
+ INT32 m_depth;
+ InlineSArray<COUNT_T, 16> m_stack;
+ };
+
+
+public:
+ typedef enum
+ {
+ ParseStateSTART = 0x0001,
+ ParseStateNAME = 0x0004,
+ ParseStateGENARGS = 0x0008,
+ ParseStatePTRARR = 0x0010,
+ ParseStateBYREF = 0x0020,
+ ParseStateASSEMSPEC = 0x0080,
+ ParseStateERROR = 0x0100,
+ }
+ ParseState;
+
+public:
+ TypeNameBuilder(SString* pStr, ParseState parseState = ParseStateSTART);
+ TypeNameBuilder() { WRAPPER_NO_CONTRACT; m_pStr = &m_str; Clear(); }
+ void SetUseAngleBracketsForGenerics(BOOL value) { m_bUseAngleBracketsForGenerics = value; }
+ void Append(LPCWSTR pStr) { WRAPPER_NO_CONTRACT; m_pStr->Append(pStr); }
+ void Append(WCHAR c) { WRAPPER_NO_CONTRACT; m_pStr->Append(c); }
+ SString* GetString() { WRAPPER_NO_CONTRACT; return m_pStr; }
+
+private:
+ void EscapeName(LPCWSTR szName);
+ void EscapeAssemblyName(LPCWSTR szName);
+ void EscapeEmbeddedAssemblyName(LPCWSTR szName);
+ BOOL CheckParseState(int validState) { WRAPPER_NO_CONTRACT; return ((int)m_parseState & validState) != 0; }
+ //BOOL CheckParseState(int validState) { WRAPPER_NO_CONTRACT; ASSERT(((int)m_parseState & validState) != 0); return TRUE; }
+ HRESULT Fail() { WRAPPER_NO_CONTRACT; m_parseState = ParseStateERROR; return E_FAIL; }
+ void PushOpenGenericArgument();
+ void PopOpenGenericArgument();
+
+private:
+ ParseState m_parseState;
+ SString* m_pStr;
+ InlineSString<256> m_str;
+ DWORD m_instNesting;
+ BOOL m_bFirstInstArg;
+ BOOL m_bNestedName;
+ BOOL m_bHasAssemblySpec;
+ BOOL m_bUseAngleBracketsForGenerics;
+ Stack m_stack;
+};
+
+// Class that's exposed to COM and wraps TypeNameBuilder (so that it can thunk
+// all the entry points in order to perform stack probes).
+class TypeNameBuilderWrapper : public ITypeNameBuilder
+{
+public:
+ virtual HRESULT __stdcall QueryInterface(REFIID riid, void **ppUnk);
+ virtual ULONG __stdcall AddRef();
+ virtual ULONG __stdcall Release();
+
+ virtual HRESULT __stdcall OpenGenericArguments();
+ virtual HRESULT __stdcall CloseGenericArguments();
+ virtual HRESULT __stdcall OpenGenericArgument();
+ virtual HRESULT __stdcall CloseGenericArgument();
+ virtual HRESULT __stdcall AddName(LPCWSTR szName);
+ virtual HRESULT __stdcall AddPointer();
+ virtual HRESULT __stdcall AddByRef();
+ virtual HRESULT __stdcall AddSzArray();
+ virtual HRESULT __stdcall AddArray(DWORD rank);
+ virtual HRESULT __stdcall AddAssemblySpec(LPCWSTR szAssemblySpec);
+ virtual HRESULT __stdcall ToString(BSTR* pszStringRepresentation);
+ virtual HRESULT __stdcall Clear();
+
+ TypeNameBuilderWrapper() : m_ref(0) { WRAPPER_NO_CONTRACT; }
+
+private:
+ LONG m_ref;
+ TypeNameBuilder m_tnb;
+};
+
+// --------------------------------------------------------------------------
+// This type can generate names for types. It is used by reflection methods
+// like System.RuntimeType.RuntimeTypeCache.ConstructName
+//
+
+class TypeString
+{
+ // -----------------------------------------------------------------------
+ // WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
+ // -----------------------------------------------------------------------
+ // Do no change the formatting of these strings as they are used by
+ // serialization, and it would break serialization backwards-compatibility.
+
+public:
+
+ typedef enum
+ {
+ FormatBasic = 0x00000000, // Not a bitmask, simply the tersest flag settings possible
+ FormatNamespace = 0x00000001, // Include namespace and/or enclosing class names in type names
+ FormatFullInst = 0x00000002, // Include namespace and assembly in generic types (regardless of other flag settings)
+ FormatAssembly = 0x00000004, // Include assembly display name in type names
+ FormatSignature = 0x00000008, // Include signature in method names
+ FormatNoVersion = 0x00000010, // Suppress version and culture information in all assembly names
+#ifdef _DEBUG
+ FormatDebug = 0x00000020, // For debug printing of types only
+#endif
+ FormatAngleBrackets = 0x00000040, // Whether generic types are C<T> or C[T]
+ FormatStubInfo = 0x00000080, // Include stub info like {unbox-stub}
+ FormatGenericParam = 0x00000100, // Use !name and !!name for generic type and method parameters
+ }
+ FormatFlags;
+
+public:
+ // Append the name of the type td to the string
+ // The following flags in the FormatFlags argument are significant: FormatNamespace
+ static void AppendTypeDef(SString& tnb, IMDInternalImport *pImport, mdTypeDef td, DWORD format = FormatNamespace);
+
+ // Append a square-bracket-enclosed, comma-separated list of n type parameters in inst to the string s
+ // and enclose each parameter in square brackets to disambiguate the commas
+ // The following flags in the FormatFlags argument are significant: FormatNamespace FormatFullInst FormatAssembly FormatNoVersion
+ static void AppendInst(SString& s, Instantiation inst, DWORD format = FormatNamespace);
+
+ // Append a representation of the type t to the string s
+ // The following flags in the FormatFlags argument are significant: FormatNamespace FormatFullInst FormatAssembly FormatNoVersion
+ static void AppendType(SString& s, TypeHandle t, DWORD format = FormatNamespace);
+
+ // Append a representation of the type t to the string s, using the generic
+ // instantiation info provided, instead of the instantiation in the TypeHandle.
+ static void AppendType(SString& s, TypeHandle t, Instantiation typeInstantiation, DWORD format = FormatNamespace);
+
+ static void AppendTypeKey(SString& s, TypeKey *pTypeKey, DWORD format = FormatNamespace);
+
+ // Appends the method name and generic instantiation info. This might
+ // look like "Namespace.ClassName[T].Foo[U, V]()"
+ static void AppendMethod(SString& s, MethodDesc *pMD, Instantiation typeInstantiation, const DWORD format = FormatNamespace|FormatSignature);
+
+ // Append a representation of the method m to the string s
+ // The following flags in the FormatFlags argument are significant: FormatNamespace FormatFullInst FormatAssembly FormatSignature FormatNoVersion
+ static void AppendMethodInternal(SString& s, MethodDesc *pMD, const DWORD format = FormatNamespace|FormatSignature|FormatStubInfo);
+
+ // Append the field name and generic instantiation info.
+ static void AppendField(SString& s, FieldDesc *pFD, Instantiation typeInstantiation, const DWORD format = FormatNamespace);
+#ifdef _DEBUG
+ // These versions are NOTHROWS. They are meant for diagnostic purposes only
+ // as they may leave "s" in a bad state if there are any problems/exceptions.
+ static void AppendMethodDebug(SString& s, MethodDesc *pMD);
+ static void AppendTypeDebug(SString& s, TypeHandle t);
+ static void AppendTypeKeyDebug(SString& s, TypeKey* pTypeKey);
+#endif
+
+private:
+ friend class TypeLibExporter;
+ friend class TypeNameBuilder;
+ static void AppendMethodImpl(SString& s, MethodDesc *pMD, Instantiation typeInstantiation, const DWORD format);
+ static void AppendTypeDef(TypeNameBuilder& tnb, IMDInternalImport *pImport, mdTypeDef td, DWORD format = FormatNamespace);
+ static void AppendNestedTypeDef(TypeNameBuilder& tnb, IMDInternalImport *pImport, mdTypeDef td, DWORD format = FormatNamespace);
+ static void AppendInst(TypeNameBuilder& tnb, Instantiation inst, DWORD format = FormatNamespace);
+ static void AppendType(TypeNameBuilder& tnb, TypeHandle t, Instantiation typeInstantiation, DWORD format = FormatNamespace); // ????
+ static void AppendTypeKey(TypeNameBuilder& tnb, TypeKey *pTypeKey, DWORD format = FormatNamespace);
+ static void AppendParamTypeQualifier(TypeNameBuilder& tnb, CorElementType kind, DWORD rank);
+ static void EscapeSimpleTypeName(SString* ssTypeName, SString* ssEscapedTypeName);
+ static bool ContainsReservedChar(LPCWSTR pTypeName);
+};
+
+#endif
+
diff --git a/src/vm/umthunkhash.cpp b/src/vm/umthunkhash.cpp
new file mode 100644
index 0000000000..84637b0290
--- /dev/null
+++ b/src/vm/umthunkhash.cpp
@@ -0,0 +1,172 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: umthunkhash.cpp
+//
+
+//
+
+
+#include "common.h"
+#include "umthunkhash.h"
+
+#ifdef FEATURE_MIXEDMODE
+
+UMThunkHash::UMThunkHash(Module *pModule, AppDomain *pDomain) :
+ CClosedHashBase(
+#ifdef _DEBUG
+ 3,
+#else
+ 17, // CClosedHashTable will grow as necessary
+#endif
+
+ sizeof(UTHEntry),
+ FALSE
+ ),
+ m_crst(CrstUMThunkHash)
+
+{
+ WRAPPER_NO_CONTRACT;
+ m_pModule = pModule;
+ m_dwAppDomainId = pDomain->GetId();
+}
+
+UMThunkHash::~UMThunkHash()
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ DESTRUCTOR_CHECK;
+ GC_TRIGGERS;
+ FORBID_FAULT;
+ MODE_ANY;
+ }
+ CONTRACT_END
+
+#ifndef DACCESS_COMPILE
+ UTHEntry *phe = (UTHEntry*)GetFirst();
+ while (phe) {
+ DeleteExecutable(phe->m_pUMEntryThunk);
+ phe->m_UMThunkMarshInfo.~UMThunkMarshInfo();
+ phe = (UTHEntry*)GetNext((BYTE*)phe);
+ }
+#endif
+ RETURN;
+}
+
+LPVOID UMThunkHash::GetUMThunk(LPVOID pTarget, PCCOR_SIGNATURE pSig, DWORD cSig)
+{
+ CONTRACT (LPVOID)
+ {
+ INSTANCE_CHECK;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END
+
+ UTHEntry *phe;
+ CrstHolder ch(&m_crst);
+
+ UTHKey key;
+ key.m_pTarget = pTarget;
+ key.m_pSig = pSig;
+ key.m_cSig = cSig;
+
+ phe =(UTHEntry *)Find((LPVOID)&key);
+#ifndef DACCESS_COMPILE
+ if (phe == NULL)
+ {
+ NewExecutableHolder<UMEntryThunk> uET= new (executable) UMEntryThunk();
+
+ bool bNew = FALSE;
+ phe = (UTHEntry *)FindOrAdd((LPVOID)&key,bNew);
+ if (phe != NULL)
+ {
+ _ASSERTE(bNew); // we are under lock
+
+ phe->m_pUMEntryThunk=uET.Extract();
+
+ //nothrow
+ phe->m_UMThunkMarshInfo.LoadTimeInit(Signature(pSig, cSig), m_pModule);
+ phe->m_pUMEntryThunk->LoadTimeInit((PCODE)pTarget, NULL, &(phe->m_UMThunkMarshInfo),
+ MethodTable::GetMethodDescForSlotAddress((PCODE)pTarget),
+ m_dwAppDomainId);
+
+ phe->m_key = key;
+ phe->m_status = USED;
+ }
+ }
+#endif //DACESS_COMPILE
+ if (phe)
+ RETURN (LPVOID)(phe->m_pUMEntryThunk->GetCode());
+ else
+ RETURN NULL;
+}
+
+
+unsigned int UMThunkHash::Hash(void const *pData)
+{
+ LIMITED_METHOD_CONTRACT;
+ UTHKey *pKey = (UTHKey*)pData;
+ return (ULONG)(size_t)(pKey->m_pTarget);
+}
+
+inline unsigned int UMThunkHash::Compare( // 0, -1, or 1.
+ void const *pData, // Raw key data on lookup.
+ BYTE *pElement) // The element to compare data against.
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ UTHKey *pkey1 = (UTHKey*)pData;
+ UTHKey *pkey2 = &( ((UTHEntry*)pElement)->m_key );
+
+ if (pkey1->m_pTarget != pkey2->m_pTarget)
+ return 1;
+
+ if (S_OK != MetaSig::CompareMethodSigsNT(pkey1->m_pSig, pkey1->m_cSig, m_pModule, NULL, pkey2->m_pSig, pkey2->m_cSig, m_pModule, NULL))
+ return 1;
+
+ return 0;
+}
+
+
+
+CClosedHashBase::ELEMENTSTATUS UMThunkHash::Status( // The status of the entry.
+ BYTE *pElement) // The element to check.
+{
+ LIMITED_METHOD_CONTRACT;
+ return ((UTHEntry*)pElement)->m_status;
+}
+
+//*****************************************************************************
+// Sets the status of the given element.
+//*****************************************************************************
+void UMThunkHash::SetStatus(
+ BYTE *pElement, // The element to set status for.
+ ELEMENTSTATUS eStatus) // New status.
+{
+ LIMITED_METHOD_CONTRACT;
+ ((UTHEntry*)pElement)->m_status = eStatus;
+}
+
+//*****************************************************************************
+// Returns the internal key value for an element.
+//*****************************************************************************
+void *UMThunkHash::GetKey( // The data to hash on.
+ BYTE *pElement) // The element to return data ptr for.
+{
+ LIMITED_METHOD_CONTRACT;
+ return (BYTE*) &(((UTHEntry*)pElement)->m_key);
+}
+
+#endif // FEATURE_MIXEDMODE
diff --git a/src/vm/umthunkhash.h b/src/vm/umthunkhash.h
new file mode 100644
index 0000000000..e9f3909fd2
--- /dev/null
+++ b/src/vm/umthunkhash.h
@@ -0,0 +1,88 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: umthunkhash.h
+//
+
+//
+
+
+#ifndef UMTHUNKHASH_H_
+#define UMTHUNKHASH_H_
+#include "dllimportcallback.h"
+
+#ifdef FEATURE_MIXEDMODE // IJW
+//
+// A hashtable for u->m thunks not represented in the fixup tables.
+//
+class UMThunkHash : public CClosedHashBase {
+ private:
+ //----------------------------------------------------
+ // Hash key for CClosedHashBase
+ //----------------------------------------------------
+ struct UTHKey {
+ LPVOID m_pTarget;
+ PCCOR_SIGNATURE m_pSig;
+ DWORD m_cSig;
+ };
+
+ //----------------------------------------------------
+ // Hash entry for CClosedHashBase
+ //----------------------------------------------------
+ struct UTHEntry {
+ UTHKey m_key;
+ ELEMENTSTATUS m_status;
+ UMEntryThunk *m_pUMEntryThunk;
+ UMThunkMarshInfo m_UMThunkMarshInfo;
+ };
+
+ public:
+ UMThunkHash(Module *pModule, AppDomain *pDomain);
+ ~UMThunkHash();
+ LPVOID GetUMThunk(LPVOID pTarget, PCCOR_SIGNATURE pSig, DWORD cSig);
+ // *** OVERRIDES FOR CClosedHashBase ***/
+
+ //*****************************************************************************
+ // Hash is called with a pointer to an element in the table. You must override
+ // this method and provide a hash algorithm for your element type.
+ //*****************************************************************************
+ virtual unsigned int Hash( // The key value.
+ void const *pData); // Raw data to hash.
+
+ //*****************************************************************************
+ // Compare is used in the typical memcmp way, 0 is eqaulity, -1/1 indicate
+ // direction of miscompare. In this system everything is always equal or not.
+ //*****************************************************************************
+ inline unsigned int Compare( // 0, -1, or 1.
+ void const *pData, // Raw key data on lookup.
+ BYTE *pElement); // The element to compare data against.
+
+ //*****************************************************************************
+ // Return true if the element is free to be used.
+ //*****************************************************************************
+ virtual ELEMENTSTATUS Status( // The status of the entry.
+ BYTE *pElement); // The element to check.
+
+ //*****************************************************************************
+ // Sets the status of the given element.
+ //*****************************************************************************
+ virtual void SetStatus(
+ BYTE *pElement, // The element to set status for.
+ ELEMENTSTATUS eStatus); // New status.
+
+ //*****************************************************************************
+ // Returns the internal key value for an element.
+ //*****************************************************************************
+ virtual void *GetKey( // The data to hash on.
+ BYTE *pElement); // The element to return data ptr for.
+
+protected:
+ Module *m_pModule;
+ ADID m_dwAppDomainId;
+ Crst m_crst;
+};
+
+#endif
+#endif
diff --git a/src/vm/util.cpp b/src/vm/util.cpp
new file mode 100644
index 0000000000..6fd3cc2b34
--- /dev/null
+++ b/src/vm/util.cpp
@@ -0,0 +1,4000 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: UTIL.CPP
+//
+
+// ===========================================================================
+
+
+#include "common.h"
+#include "excep.h"
+#include "corhost.h"
+#include "eventtrace.h"
+#include "posterror.h"
+#include "eemessagebox.h"
+#include "newapis.h"
+
+#include <shlobj.h>
+
+#include "dlwrap.h"
+
+#ifndef DACCESS_COMPILE
+
+// Helper function that encapsulates the parsing rules.
+//
+// Called first with *pdstout == NULL to figure out how many args there are
+// and the size of the required destination buffer.
+//
+// Called again with a nonnull *pdstout to fill in the actual buffer.
+//
+// Returns the # of arguments.
+static UINT ParseCommandLine(LPCWSTR psrc, __out LPWSTR *pdstout)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ UINT argcount = 1; // discovery of arg0 is unconditional, below
+ LPWSTR pdst = *pdstout;
+ BOOL fDoWrite = (pdst != NULL);
+
+ BOOL fInQuotes;
+ int iSlash;
+
+ /* A quoted program name is handled here. The handling is much
+ simpler than for other arguments. Basically, whatever lies
+ between the leading double-quote and next one, or a terminal null
+ character is simply accepted. Fancier handling is not required
+ because the program name must be a legal NTFS/HPFS file name.
+ Note that the double-quote characters are not copied, nor do they
+ contribute to numchars.
+
+ This "simplification" is necessary for compatibility reasons even
+ though it leads to mishandling of certain cases. For example,
+ "c:\tests\"test.exe will result in an arg0 of c:\tests\ and an
+ arg1 of test.exe. In any rational world this is incorrect, but
+ we need to preserve compatibility.
+ */
+
+ LPCWSTR pStart = psrc;
+ BOOL skipQuote = FALSE;
+
+ if (*psrc == W('\"'))
+ {
+ // scan from just past the first double-quote through the next
+ // double-quote, or up to a null, whichever comes first
+ while ((*(++psrc) != W('\"')) && (*psrc != W('\0')))
+ continue;
+
+ skipQuote = TRUE;
+ }
+ else
+ {
+ /* Not a quoted program name */
+
+ while (!ISWWHITE(*psrc) && *psrc != W('\0'))
+ psrc++;
+ }
+
+ // We have now identified arg0 as pStart (or pStart+1 if we have a leading
+ // quote) through psrc-1 inclusive
+ if (skipQuote)
+ pStart++;
+ while (pStart < psrc)
+ {
+ if (fDoWrite)
+ *pdst = *pStart;
+
+ pStart++;
+ pdst++;
+ }
+
+ // And terminate it.
+ if (fDoWrite)
+ *pdst = W('\0');
+
+ pdst++;
+
+ // if we stopped on a double-quote when arg0 is quoted, skip over it
+ if (skipQuote && *psrc == W('\"'))
+ psrc++;
+
+ while ( *psrc != W('\0'))
+ {
+LEADINGWHITE:
+
+ // The outofarg state.
+ while (ISWWHITE(*psrc))
+ psrc++;
+
+ if (*psrc == W('\0'))
+ break;
+ else
+ if (*psrc == W('#'))
+ {
+ while (*psrc != W('\0') && *psrc != W('\n'))
+ psrc++; // skip to end of line
+
+ goto LEADINGWHITE;
+ }
+
+ argcount++;
+ fInQuotes = FALSE;
+
+ while ((!ISWWHITE(*psrc) || fInQuotes) && *psrc != W('\0'))
+ {
+ switch (*psrc)
+ {
+ case W('\\'):
+ iSlash = 0;
+ while (*psrc == W('\\'))
+ {
+ iSlash++;
+ psrc++;
+ }
+
+ if (*psrc == W('\"'))
+ {
+ for ( ; iSlash >= 2; iSlash -= 2)
+ {
+ if (fDoWrite)
+ *pdst = W('\\');
+
+ pdst++;
+ }
+
+ if (iSlash & 1)
+ {
+ if (fDoWrite)
+ *pdst = *psrc;
+
+ psrc++;
+ pdst++;
+ }
+ else
+ {
+ fInQuotes = !fInQuotes;
+ psrc++;
+ }
+ }
+ else
+ for ( ; iSlash > 0; iSlash--)
+ {
+ if (fDoWrite)
+ *pdst = W('\\');
+
+ pdst++;
+ }
+
+ break;
+
+ case W('\"'):
+ fInQuotes = !fInQuotes;
+ psrc++;
+ break;
+
+ default:
+ if (fDoWrite)
+ *pdst = *psrc;
+
+ psrc++;
+ pdst++;
+ }
+ }
+
+ if (fDoWrite)
+ *pdst = W('\0');
+
+ pdst++;
+ }
+
+
+ _ASSERTE(*psrc == W('\0'));
+ *pdstout = pdst;
+ return argcount;
+}
+
+
+// Function to parse apart a command line and return the
+// arguments just like argv and argc
+// This function is a little funky because of the pointer work
+// but it is neat because it allows the recipient of the char**
+// to only have to do a single delete []
+LPWSTR* CommandLineToArgvW(__in LPWSTR lpCmdLine, DWORD *pNumArgs)
+{
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ INJECT_FAULT(return NULL;);
+ }
+ CONTRACTL_END
+
+ DWORD argcount = 0;
+ LPWSTR retval = NULL;
+ LPWSTR *pslot;
+ // First we need to find out how many strings there are in the command line
+ _ASSERTE(lpCmdLine);
+ _ASSERTE(pNumArgs);
+
+ LPWSTR pdst = NULL;
+ argcount = ParseCommandLine(lpCmdLine, &pdst);
+
+ // This check is because on WinCE the Application Name is not passed in as an argument to the app!
+ if (argcount == 0)
+ {
+ *pNumArgs = 0;
+ return NULL;
+ }
+
+ // Now we need alloc a buffer the size of the command line + the number of strings * DWORD
+ retval = new (nothrow) WCHAR[(argcount*sizeof(WCHAR*))/sizeof(WCHAR) + (pdst - (LPWSTR)NULL)];
+ if(!retval)
+ return NULL;
+
+ pdst = (LPWSTR)( argcount*sizeof(LPWSTR*) + (BYTE*)retval );
+ ParseCommandLine(lpCmdLine, &pdst);
+ pdst = (LPWSTR)( argcount*sizeof(LPWSTR*) + (BYTE*)retval );
+ pslot = (LPWSTR*)retval;
+ for (DWORD i = 0; i < argcount; i++)
+ {
+ *(pslot++) = pdst;
+ while (*pdst != W('\0'))
+ {
+ pdst++;
+ }
+ pdst++;
+ }
+
+
+
+ *pNumArgs = argcount;
+ return (LPWSTR*)retval;
+
+}
+
+
+
+
+//************************************************************************
+// CQuickHeap
+//
+// A fast non-multithread-safe heap for short term use.
+// Destroying the heap frees all blocks allocated from the heap.
+// Blocks cannot be freed individually.
+//
+// The heap uses COM+ exceptions to report errors.
+//
+// The heap does not use any internal synchronization so it is not
+// multithreadsafe.
+//************************************************************************
+CQuickHeap::CQuickHeap()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_pFirstQuickBlock = NULL;
+ m_pFirstBigQuickBlock = NULL;
+ m_pNextFree = NULL;
+}
+
+CQuickHeap::~CQuickHeap()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ QuickBlock *pQuickBlock = m_pFirstQuickBlock;
+ while (pQuickBlock) {
+ QuickBlock *ptmp = pQuickBlock;
+ pQuickBlock = pQuickBlock->m_next;
+ delete [] (BYTE*)ptmp;
+ }
+
+ pQuickBlock = m_pFirstBigQuickBlock;
+ while (pQuickBlock) {
+ QuickBlock *ptmp = pQuickBlock;
+ pQuickBlock = pQuickBlock->m_next;
+ delete [] (BYTE*)ptmp;
+ }
+}
+
+LPVOID CQuickHeap::Alloc(UINT sz)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT; // So long as we cleanup the heap when we're done, all the memory goes with it
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ sz = (sz+7) & ~7;
+
+ if ( sz > kBlockSize ) {
+
+ QuickBlock *pQuickBigBlock = (QuickBlock*) new BYTE[sz + sizeof(QuickBlock) - 1];
+ pQuickBigBlock->m_next = m_pFirstBigQuickBlock;
+ m_pFirstBigQuickBlock = pQuickBigBlock;
+
+ return pQuickBigBlock->m_bytes;
+
+
+ } else {
+ if (m_pNextFree == NULL || sz > (UINT)( &(m_pFirstQuickBlock->m_bytes[kBlockSize]) - m_pNextFree )) {
+ QuickBlock *pQuickBlock = (QuickBlock*) new BYTE[kBlockSize + sizeof(QuickBlock) - 1];
+ pQuickBlock->m_next = m_pFirstQuickBlock;
+ m_pFirstQuickBlock = pQuickBlock;
+ m_pNextFree = pQuickBlock->m_bytes;
+ }
+ LPVOID pv = m_pNextFree;
+ m_pNextFree += sz;
+ return pv;
+ }
+}
+
+//----------------------------------------------------------------------------
+// Output functions that avoid the crt's.
+//----------------------------------------------------------------------------
+
+static
+void NPrintToHandleA(HANDLE Handle, const char *pszString, size_t BytesToWrite)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ if (Handle == INVALID_HANDLE_VALUE || Handle == NULL)
+ return;
+
+ BOOL success;
+ DWORD dwBytesWritten;
+ const size_t maxWriteFileSize = 32767; // This is somewhat arbitrary limit, but 2**16-1 doesn't work
+
+ while (BytesToWrite > 0) {
+ DWORD dwChunkToWrite = (DWORD) min(BytesToWrite, maxWriteFileSize);
+ // No CharNextExA on CoreSystem, we just assume no multi-byte characters (this code path shouldn't be
+ // used in the production codepath for currently supported CoreSystem based products anyway).
+#ifndef FEATURE_CORESYSTEM
+ if (dwChunkToWrite < BytesToWrite) {
+ break;
+ // must go by char to find biggest string that will fit, taking DBCS chars into account
+ //dwChunkToWrite = 0;
+ //const char *charNext = pszString;
+ //while (dwChunkToWrite < maxWriteFileSize-2 && charNext) {
+ // charNext = CharNextExA(0, pszString+dwChunkToWrite, 0);
+ // dwChunkToWrite = (DWORD)(charNext - pszString);
+ //}
+ //if (dwChunkToWrite == 0)
+ // break;
+ }
+#endif // !FEATURE_CORESYSTEM
+
+ // Try to write to handle. If this is not a CUI app, then this is probably
+ // not going to work unless the dev took special pains to set their own console
+ // handle during CreateProcess. So try it, but don't yell if it doesn't work in
+ // that case. Also, if we redirect stdout to a pipe then the pipe breaks (ie, we
+ // write to something like the UNIX head command), don't complain.
+ success = WriteFile(Handle, pszString, dwChunkToWrite, &dwBytesWritten, NULL);
+ if (!success)
+ {
+#if defined(_DEBUG)
+ // This can happen if stdout is a closed pipe. This might not help
+ // much, but we'll have half a chance of seeing this.
+ OutputDebugStringA("CLR: Writing out an unhandled exception to stdout failed!\n");
+ OutputDebugStringA(pszString);
+#endif //_DEBUG
+
+ break;
+ }
+ else {
+ _ASSERTE(dwBytesWritten == dwChunkToWrite);
+ }
+ pszString = pszString + dwChunkToWrite;
+ BytesToWrite -= dwChunkToWrite;
+ }
+
+}
+
+static
+void PrintToHandleA(HANDLE Handle, const char *pszString)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ size_t len = strlen(pszString);
+ NPrintToHandleA(Handle, pszString, len);
+}
+
+void PrintToStdOutA(const char *pszString) {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ HANDLE Handle = GetStdHandle(STD_OUTPUT_HANDLE);
+ PrintToHandleA(Handle, pszString);
+}
+
+
+void PrintToStdOutW(const WCHAR *pwzString)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ MAKE_MULTIBYTE_FROMWIDE_BESTFIT(pStr, pwzString, GetConsoleOutputCP());
+
+ PrintToStdOutA(pStr);
+}
+
+void PrintToStdErrA(const char *pszString) {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ HANDLE Handle = GetStdHandle(STD_ERROR_HANDLE);
+ PrintToHandleA(Handle, pszString);
+}
+
+
+void PrintToStdErrW(const WCHAR *pwzString)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ MAKE_MULTIBYTE_FROMWIDE_BESTFIT(pStr, pwzString, GetConsoleOutputCP());
+
+ PrintToStdErrA(pStr);
+}
+
+
+
+void NPrintToStdOutA(const char *pszString, size_t nbytes) {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ HANDLE Handle = GetStdHandle(STD_OUTPUT_HANDLE);
+ NPrintToHandleA(Handle, pszString, nbytes);
+}
+
+
+void NPrintToStdOutW(const WCHAR *pwzString, size_t nchars)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ LPSTR pStr;
+ MAKE_MULTIBYTE_FROMWIDEN_BESTFIT(pStr, pwzString, (int)nchars, nbytes, GetConsoleOutputCP());
+
+ NPrintToStdOutA(pStr, nbytes);
+}
+
+void NPrintToStdErrA(const char *pszString, size_t nbytes) {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ HANDLE Handle = GetStdHandle(STD_ERROR_HANDLE);
+ NPrintToHandleA(Handle, pszString, nbytes);
+}
+
+
+void NPrintToStdErrW(const WCHAR *pwzString, size_t nchars)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ LPSTR pStr;
+
+ MAKE_MULTIBYTE_FROMWIDEN_BESTFIT(pStr, pwzString, (int)nchars, nbytes, GetConsoleOutputCP());
+
+ NPrintToStdErrA(pStr, nbytes);
+}
+//----------------------------------------------------------------------------
+
+
+
+
+
+//+--------------------------------------------------------------------------
+//
+// Function: VMDebugOutputA( . . . . )
+// VMDebugOutputW( . . . . )
+//
+// Synopsis: Output a message formatted in printf fashion to the debugger.
+// ANSI and wide character versions are both provided. Only
+// present in debug builds (i.e. when _DEBUG is defined).
+//
+// Arguments: [format] --- ANSI or Wide character format string
+// in printf/OutputDebugString-style format.
+//
+// [ ... ] --- Variable length argument list compatible
+// with the format string.
+//
+// Returns: Nothing.
+//
+// Notes: Has internal static sized character buffer of
+// width specified by the preprocessor constant DEBUGOUT_BUFSIZE.
+//
+//---------------------------------------------------------------------------
+#ifdef _DEBUG
+
+#define DEBUGOUT_BUFSIZE 1024
+
+void __cdecl VMDebugOutputA(__in LPSTR format, ...)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ va_list argPtr;
+ va_start(argPtr, format);
+
+ char szBuffer[DEBUGOUT_BUFSIZE];
+
+ if(vsprintf_s(szBuffer, DEBUGOUT_BUFSIZE-1, format, argPtr) > 0)
+ OutputDebugStringA(szBuffer);
+ va_end(argPtr);
+}
+
+void __cdecl VMDebugOutputW(__in LPWSTR format, ...)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_DEBUG_ONLY;
+
+ va_list argPtr;
+ va_start(argPtr, format);
+
+ WCHAR wszBuffer[DEBUGOUT_BUFSIZE];
+
+ if(vswprintf_s(wszBuffer, DEBUGOUT_BUFSIZE-2, format, argPtr) > 0)
+ WszOutputDebugString(wszBuffer);
+ va_end(argPtr);
+}
+
+#endif // #ifdef DACCESS_COMPILE
+
+//*****************************************************************************
+// Compare VarLoc's
+//*****************************************************************************
+
+bool operator ==(const ICorDebugInfo::VarLoc &varLoc1,
+ const ICorDebugInfo::VarLoc &varLoc2)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ if (varLoc1.vlType != varLoc2.vlType)
+ return false;
+
+ switch(varLoc1.vlType)
+ {
+ case ICorDebugInfo::VLT_REG:
+ case ICorDebugInfo::VLT_REG_BYREF:
+ return varLoc1.vlReg.vlrReg == varLoc2.vlReg.vlrReg;
+
+ case ICorDebugInfo::VLT_STK:
+ case ICorDebugInfo::VLT_STK_BYREF:
+ return varLoc1.vlStk.vlsBaseReg == varLoc2.vlStk.vlsBaseReg &&
+ varLoc1.vlStk.vlsOffset == varLoc2.vlStk.vlsOffset;
+
+ case ICorDebugInfo::VLT_REG_REG:
+ return varLoc1.vlRegReg.vlrrReg1 == varLoc2.vlRegReg.vlrrReg1 &&
+ varLoc1.vlRegReg.vlrrReg2 == varLoc2.vlRegReg.vlrrReg2;
+
+ case ICorDebugInfo::VLT_REG_STK:
+ return varLoc1.vlRegStk.vlrsReg == varLoc2.vlRegStk.vlrsReg &&
+ varLoc1.vlRegStk.vlrsStk.vlrssBaseReg == varLoc2.vlRegStk.vlrsStk.vlrssBaseReg &&
+ varLoc1.vlRegStk.vlrsStk.vlrssOffset == varLoc2.vlRegStk.vlrsStk.vlrssOffset;
+
+ case ICorDebugInfo::VLT_STK_REG:
+ return varLoc1.vlStkReg.vlsrStk.vlsrsBaseReg == varLoc2.vlStkReg.vlsrStk.vlsrsBaseReg &&
+ varLoc1.vlStkReg.vlsrStk.vlsrsOffset == varLoc2.vlStkReg.vlsrStk.vlsrsBaseReg &&
+ varLoc1.vlStkReg.vlsrReg == varLoc2.vlStkReg.vlsrReg;
+
+ case ICorDebugInfo::VLT_STK2:
+ // <TODO>@TODO : VLT_STK(2) is overloaded to also indicate valueclasses
+ // which should be VLT_STK_n. Just have a parameterized VLT_STK_n.</TODO>
+ return varLoc1.vlStk2.vls2BaseReg == varLoc1.vlStk2.vls2BaseReg &&
+ varLoc1.vlStk2.vls2Offset == varLoc1.vlStk2.vls2Offset;
+
+ case ICorDebugInfo::VLT_FPSTK:
+ return varLoc1.vlFPstk.vlfReg == varLoc1.vlFPstk.vlfReg;
+
+ default:
+ _ASSERTE(!"Bad vlType"); return false;
+ }
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+//*****************************************************************************
+// The following are used to read and write data given NativeVarInfo
+// for primitive types. For ValueClasses, FALSE will be returned.
+//*****************************************************************************
+
+SIZE_T GetRegOffsInCONTEXT(ICorDebugInfo::RegNum regNum)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+#ifdef _TARGET_X86_
+ switch(regNum)
+ {
+ case ICorDebugInfo::REGNUM_EAX: return offsetof(T_CONTEXT,Eax);
+ case ICorDebugInfo::REGNUM_ECX: return offsetof(T_CONTEXT,Ecx);
+ case ICorDebugInfo::REGNUM_EDX: return offsetof(T_CONTEXT,Edx);
+ case ICorDebugInfo::REGNUM_EBX: return offsetof(T_CONTEXT,Ebx);
+ // TODO: Fix AMBIENT_SP handling.
+ // AMBIENT_SP It isn't necessarily the same value as ESP. We probably shouldn't try
+ // and handle REGNUM_AMBIENT_SP here, and instead update our callers (eg.
+ // GetNativeVarVal) to handle this case explicitly. This logic should also be
+ // merged with the parallel (but correct in this case) logic in mscordbi.
+ case ICorDebugInfo::REGNUM_ESP:
+ case ICorDebugInfo::REGNUM_AMBIENT_SP:
+ return offsetof(T_CONTEXT,Esp);
+ case ICorDebugInfo::REGNUM_EBP: return offsetof(T_CONTEXT,Ebp);
+ case ICorDebugInfo::REGNUM_ESI: return offsetof(T_CONTEXT,Esi);
+ case ICorDebugInfo::REGNUM_EDI: return offsetof(T_CONTEXT,Edi);
+ default: _ASSERTE(!"Bad regNum"); return (SIZE_T) -1;
+ }
+#elif defined(_TARGET_AMD64_)
+ switch(regNum)
+ {
+ case ICorDebugInfo::REGNUM_RAX: return offsetof(CONTEXT, Rax);
+ case ICorDebugInfo::REGNUM_RCX: return offsetof(CONTEXT, Rcx);
+ case ICorDebugInfo::REGNUM_RDX: return offsetof(CONTEXT, Rdx);
+ case ICorDebugInfo::REGNUM_RBX: return offsetof(CONTEXT, Rbx);
+ case ICorDebugInfo::REGNUM_RSP: return offsetof(CONTEXT, Rsp);
+ case ICorDebugInfo::REGNUM_RBP: return offsetof(CONTEXT, Rbp);
+ case ICorDebugInfo::REGNUM_RSI: return offsetof(CONTEXT, Rsi);
+ case ICorDebugInfo::REGNUM_RDI: return offsetof(CONTEXT, Rdi);
+ case ICorDebugInfo::REGNUM_R8: return offsetof(CONTEXT, R8);
+ case ICorDebugInfo::REGNUM_R9: return offsetof(CONTEXT, R9);
+ case ICorDebugInfo::REGNUM_R10: return offsetof(CONTEXT, R10);
+ case ICorDebugInfo::REGNUM_R11: return offsetof(CONTEXT, R11);
+ case ICorDebugInfo::REGNUM_R12: return offsetof(CONTEXT, R12);
+ case ICorDebugInfo::REGNUM_R13: return offsetof(CONTEXT, R13);
+ case ICorDebugInfo::REGNUM_R14: return offsetof(CONTEXT, R14);
+ case ICorDebugInfo::REGNUM_R15: return offsetof(CONTEXT, R15);
+ default: _ASSERTE(!"Bad regNum"); return (SIZE_T)(-1);
+ }
+#elif defined(_TARGET_ARM_)
+
+ switch(regNum)
+ {
+ case ICorDebugInfo::REGNUM_R0: return offsetof(T_CONTEXT, R0);
+ case ICorDebugInfo::REGNUM_R1: return offsetof(T_CONTEXT, R1);
+ case ICorDebugInfo::REGNUM_R2: return offsetof(T_CONTEXT, R2);
+ case ICorDebugInfo::REGNUM_R3: return offsetof(T_CONTEXT, R3);
+ case ICorDebugInfo::REGNUM_R4: return offsetof(T_CONTEXT, R4);
+ case ICorDebugInfo::REGNUM_R5: return offsetof(T_CONTEXT, R5);
+ case ICorDebugInfo::REGNUM_R6: return offsetof(T_CONTEXT, R6);
+ case ICorDebugInfo::REGNUM_R7: return offsetof(T_CONTEXT, R7);
+ case ICorDebugInfo::REGNUM_R8: return offsetof(T_CONTEXT, R8);
+ case ICorDebugInfo::REGNUM_R9: return offsetof(T_CONTEXT, R9);
+ case ICorDebugInfo::REGNUM_R10: return offsetof(T_CONTEXT, R10);
+ case ICorDebugInfo::REGNUM_R11: return offsetof(T_CONTEXT, R11);
+ case ICorDebugInfo::REGNUM_R12: return offsetof(T_CONTEXT, R12);
+ case ICorDebugInfo::REGNUM_SP: return offsetof(T_CONTEXT, Sp);
+ case ICorDebugInfo::REGNUM_PC: return offsetof(T_CONTEXT, Pc);
+ case ICorDebugInfo::REGNUM_LR: return offsetof(T_CONTEXT, Lr);
+ case ICorDebugInfo::REGNUM_AMBIENT_SP: return offsetof(T_CONTEXT, Sp);
+ default: _ASSERTE(!"Bad regNum"); return (SIZE_T)(-1);
+ }
+#else
+ PORTABILITY_ASSERT("GetRegOffsInCONTEXT is not implemented on this platform.");
+ return (SIZE_T) -1;
+#endif // _TARGET_X86_
+}
+
+SIZE_T DereferenceByRefVar(SIZE_T addr)
+{
+ STATIC_CONTRACT_WRAPPER;
+
+ SIZE_T result = NULL;
+
+#if defined(DACCESS_COMPILE)
+ HRESULT hr = DacReadAll(addr, &result, sizeof(result), false);
+ if (FAILED(hr))
+ {
+ result = NULL;
+ }
+
+#else // !DACCESS_COMPILE
+ EX_TRY
+ {
+ AVInRuntimeImplOkayHolder AVOkay;
+
+ result = *(SIZE_T*)addr;
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+#endif // !DACCESS_COMPILE
+
+ return result;
+}
+
+// How are errors communicated to the caller?
+ULONG NativeVarLocations(const ICorDebugInfo::VarLoc & varLoc,
+ PT_CONTEXT pCtx,
+ ULONG numLocs,
+ NativeVarLocation* locs)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ _ASSERTE(numLocs >= MAX_NATIVE_VAR_LOCS);
+
+ bool fByRef = false;
+ switch(varLoc.vlType)
+ {
+ SIZE_T regOffs;
+ TADDR baseReg;
+
+ case ICorDebugInfo::VLT_REG_BYREF:
+ fByRef = true; // fall through
+ case ICorDebugInfo::VLT_REG:
+ regOffs = GetRegOffsInCONTEXT(varLoc.vlReg.vlrReg);
+ locs->addr = (ULONG64)(ULONG_PTR)pCtx + regOffs;
+ if (fByRef)
+ {
+ locs->addr = (ULONG64)DereferenceByRefVar((SIZE_T)locs->addr);
+ }
+ locs->size = sizeof(SIZE_T);
+ {
+ locs->contextReg = true;
+ }
+ return 1;
+
+ case ICorDebugInfo::VLT_STK_BYREF:
+ fByRef = true; // fall through
+ case ICorDebugInfo::VLT_STK:
+ regOffs = GetRegOffsInCONTEXT(varLoc.vlStk.vlsBaseReg);
+ baseReg = *(TADDR *)(regOffs + (BYTE*)pCtx);
+ locs->addr = baseReg + varLoc.vlStk.vlsOffset;
+ if (fByRef)
+ {
+ locs->addr = (ULONG64)DereferenceByRefVar((SIZE_T)locs->addr);
+ }
+ locs->size = sizeof(SIZE_T);
+ locs->contextReg = false;
+ return 1;
+
+ case ICorDebugInfo::VLT_REG_REG:
+ regOffs = GetRegOffsInCONTEXT(varLoc.vlRegReg.vlrrReg1);
+ locs->addr = (ULONG64)(ULONG_PTR)pCtx + regOffs;
+ locs->size = sizeof(SIZE_T);
+ locs->contextReg = true;
+ locs++;
+
+ regOffs = GetRegOffsInCONTEXT(varLoc.vlRegReg.vlrrReg2);
+ locs->addr = (ULONG64)(ULONG_PTR)pCtx + regOffs;
+ locs->size = sizeof(SIZE_T);
+ locs->contextReg = true;
+ return 2;
+
+ case ICorDebugInfo::VLT_REG_STK:
+ regOffs = GetRegOffsInCONTEXT(varLoc.vlRegStk.vlrsReg);
+ locs->addr = (ULONG64)(ULONG_PTR)pCtx + regOffs;
+ locs->size = sizeof(SIZE_T);
+ locs->contextReg = true;
+ locs++;
+
+ regOffs = GetRegOffsInCONTEXT(varLoc.vlRegStk.vlrsStk.vlrssBaseReg);
+ baseReg = *(TADDR *)(regOffs + (BYTE*)pCtx);
+ locs->addr = baseReg + varLoc.vlRegStk.vlrsStk.vlrssOffset;
+ locs->size = sizeof(SIZE_T);
+ locs->contextReg = false;
+ return 2;
+
+ case ICorDebugInfo::VLT_STK_REG:
+ regOffs = GetRegOffsInCONTEXT(varLoc.vlStkReg.vlsrStk.vlsrsBaseReg);
+ baseReg = *(TADDR *)(regOffs + (BYTE*)pCtx);
+ locs->addr = baseReg + varLoc.vlStkReg.vlsrStk.vlsrsOffset;
+ locs->size = sizeof(SIZE_T);
+ locs->contextReg = false;
+ locs++;
+
+ regOffs = GetRegOffsInCONTEXT(varLoc.vlStkReg.vlsrReg);
+ locs->addr = (ULONG64)(ULONG_PTR)pCtx + regOffs;
+ locs->size = sizeof(SIZE_T);
+ locs->contextReg = true;
+ return 2;
+
+ case ICorDebugInfo::VLT_STK2:
+ regOffs = GetRegOffsInCONTEXT(varLoc.vlStk2.vls2BaseReg);
+ baseReg = *(TADDR *)(regOffs + (BYTE*)pCtx);
+ locs->addr = baseReg + varLoc.vlStk2.vls2Offset;
+ locs->size = 2 * sizeof(SIZE_T);
+ locs->contextReg = false;
+ return 1;
+
+ case ICorDebugInfo::VLT_FPSTK:
+ _ASSERTE(!"NYI");
+ return 0;
+
+ default:
+ _ASSERTE(!"Bad locType");
+ return 0;
+ }
+}
+
+
+BOOL CompareFiles(HANDLE hFile1,HANDLE hFile2)
+{
+
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ BY_HANDLE_FILE_INFORMATION fileinfo1;
+ BY_HANDLE_FILE_INFORMATION fileinfo2;
+ if (!GetFileInformationByHandle(hFile1,&fileinfo1) ||
+ !GetFileInformationByHandle(hFile2,&fileinfo2))
+ ThrowLastError();
+ return fileinfo1.nFileIndexLow == fileinfo2.nFileIndexLow &&
+ fileinfo1.nFileIndexHigh == fileinfo2.nFileIndexHigh &&
+ fileinfo1.dwVolumeSerialNumber==fileinfo2.dwVolumeSerialNumber;
+}
+
+
+#ifndef DACCESS_COMPILE
+
+// Returns the location at which the variable
+// begins. Returns NULL for register vars. For reg-stack
+// split, it'll return the addr of the stack part.
+// This also works for VLT_REG (a single register).
+SIZE_T *NativeVarStackAddr(const ICorDebugInfo::VarLoc & varLoc,
+ PCONTEXT pCtx)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ SIZE_T *dwAddr = NULL;
+
+ bool fByRef = false;
+ switch(varLoc.vlType)
+ {
+ SIZE_T regOffs;
+ const BYTE * baseReg;
+
+ case ICorDebugInfo::VLT_REG_BYREF:
+ fByRef = true; // fall through
+ case ICorDebugInfo::VLT_REG:
+ regOffs = GetRegOffsInCONTEXT(varLoc.vlReg.vlrReg);
+ dwAddr = (SIZE_T *)(regOffs + (BYTE*)pCtx);
+ if (fByRef)
+ {
+ dwAddr = (SIZE_T*)(*dwAddr);
+ }
+ LOG((LF_CORDB, LL_INFO100, "NVSA: VLT_REG @ 0x%x (by ref = %d)\n", dwAddr, fByRef));
+ break;
+
+ case ICorDebugInfo::VLT_STK_BYREF:
+ fByRef = true; // fall through
+ case ICorDebugInfo::VLT_STK:
+ regOffs = GetRegOffsInCONTEXT(varLoc.vlStk.vlsBaseReg);
+ baseReg = (const BYTE *)*(SIZE_T *)(regOffs + (BYTE*)pCtx);
+ dwAddr = (SIZE_T *)(baseReg + varLoc.vlStk.vlsOffset);
+ if (fByRef)
+ {
+ dwAddr = (SIZE_T*)(*dwAddr);
+ }
+ LOG((LF_CORDB, LL_INFO100, "NVSA: VLT_STK @ 0x%x (by ref = %d)\n", dwAddr, fByRef));
+ break;
+
+ case ICorDebugInfo::VLT_STK2:
+ // <TODO>@TODO : VLT_STK2 is overloaded to also mean VLT_STK_n.
+ // return FALSE if n > 2;</TODO>
+
+ regOffs = GetRegOffsInCONTEXT(varLoc.vlStk2.vls2BaseReg);
+ baseReg = (const BYTE *)*(SIZE_T *)(regOffs + (BYTE*)pCtx);
+ dwAddr = (SIZE_T *)(baseReg + varLoc.vlStk2.vls2Offset);
+ LOG((LF_CORDB, LL_INFO100, "NVSA: VLT_STK_2 @ 0x%x\n",dwAddr));
+ break;
+
+ case ICorDebugInfo::VLT_REG_STK:
+ regOffs = GetRegOffsInCONTEXT(varLoc.vlRegStk.vlrsStk.vlrssBaseReg);
+ baseReg = (const BYTE *)*(SIZE_T *)(regOffs + (BYTE*)pCtx);
+ dwAddr = (SIZE_T *)(baseReg + varLoc.vlRegStk.vlrsStk.vlrssOffset);
+ LOG((LF_CORDB, LL_INFO100, "NVSA: REG_STK @ 0x%x\n",dwAddr));
+ break;
+
+ case ICorDebugInfo::VLT_STK_REG:
+ regOffs = GetRegOffsInCONTEXT(varLoc.vlStkReg.vlsrStk.vlsrsBaseReg);
+ baseReg = (const BYTE *)*(SIZE_T *)(regOffs + (BYTE*)pCtx);
+ dwAddr = (SIZE_T *)(baseReg + varLoc.vlStkReg.vlsrStk.vlsrsOffset);
+ LOG((LF_CORDB, LL_INFO100, "NVSA: STK_REG @ 0x%x\n",dwAddr));
+ break;
+
+ case ICorDebugInfo::VLT_REG_REG:
+ case ICorDebugInfo::VLT_FPSTK:
+ _ASSERTE(!"NYI"); break;
+
+ default:
+ _ASSERTE(!"Bad locType"); break;
+ }
+
+ return dwAddr;
+
+}
+
+
+#if defined(_WIN64)
+void GetNativeVarValHelper(SIZE_T* dstAddrLow, SIZE_T* dstAddrHigh, SIZE_T* srcAddr, SIZE_T size)
+{
+ if (size == 1)
+ *(BYTE*)dstAddrLow = *(BYTE*)srcAddr;
+ else if (size == 2)
+ *(USHORT*)dstAddrLow = *(USHORT*)srcAddr;
+ else if (size == 4)
+ *(ULONG*)dstAddrLow = *(ULONG*)srcAddr;
+ else if (size == 8)
+ *dstAddrLow = *srcAddr;
+ else if (size == 16)
+ {
+ *dstAddrLow = *srcAddr;
+ *dstAddrHigh = *(srcAddr+1);
+ }
+ else
+ {
+ _ASSERTE(!"util.cpp - unreachable code.\n");
+ UNREACHABLE();
+ }
+}
+#endif // _WIN64
+
+
+bool GetNativeVarVal(const ICorDebugInfo::VarLoc & varLoc,
+ PCONTEXT pCtx,
+ SIZE_T * pVal1,
+ SIZE_T * pVal2
+ WIN64_ARG(SIZE_T cbSize))
+{
+
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ switch(varLoc.vlType)
+ {
+#if !defined(_WIN64)
+ SIZE_T regOffs;
+
+ case ICorDebugInfo::VLT_REG:
+ *pVal1 = *NativeVarStackAddr(varLoc,pCtx);
+ break;
+
+ case ICorDebugInfo::VLT_STK:
+ *pVal1 = *NativeVarStackAddr(varLoc,pCtx);
+ break;
+
+ case ICorDebugInfo::VLT_STK2:
+ *pVal1 = *NativeVarStackAddr(varLoc,pCtx);
+ *pVal2 = *(NativeVarStackAddr(varLoc,pCtx)+ 1);
+ break;
+
+ case ICorDebugInfo::VLT_REG_REG:
+ regOffs = GetRegOffsInCONTEXT(varLoc.vlRegReg.vlrrReg1);
+ *pVal1 = *(SIZE_T *)(regOffs + (BYTE*)pCtx);
+ LOG((LF_CORDB, LL_INFO100, "GNVV: STK_REG_REG 1 @ 0x%x\n",
+ (SIZE_T *)(regOffs + (BYTE*)pCtx)));
+
+ regOffs = GetRegOffsInCONTEXT(varLoc.vlRegReg.vlrrReg2);
+ *pVal2 = *(SIZE_T *)(regOffs + (BYTE*)pCtx);
+ LOG((LF_CORDB, LL_INFO100, "GNVV: STK_REG_REG 2 @ 0x%x\n",
+ (SIZE_T *)(regOffs + (BYTE*)pCtx)));
+ break;
+
+ case ICorDebugInfo::VLT_REG_STK:
+ regOffs = GetRegOffsInCONTEXT(varLoc.vlRegStk.vlrsReg);
+ *pVal1 = *(SIZE_T *)(regOffs + (BYTE*)pCtx);
+ LOG((LF_CORDB, LL_INFO100, "GNVV: STK_REG_STK reg @ 0x%x\n",
+ (SIZE_T *)(regOffs + (BYTE*)pCtx)));
+ *pVal2 = *NativeVarStackAddr(varLoc,pCtx);
+ break;
+
+ case ICorDebugInfo::VLT_STK_REG:
+ *pVal1 = *NativeVarStackAddr(varLoc,pCtx);
+ regOffs = GetRegOffsInCONTEXT(varLoc.vlStkReg.vlsrReg);
+ *pVal2 = *(SIZE_T *)(regOffs + (BYTE*)pCtx);
+ LOG((LF_CORDB, LL_INFO100, "GNVV: STK_STK_REG reg @ 0x%x\n",
+ (SIZE_T *)(regOffs + (BYTE*)pCtx)));
+ break;
+
+ case ICorDebugInfo::VLT_FPSTK:
+ _ASSERTE(!"NYI"); break;
+
+#else // _WIN64
+ case ICorDebugInfo::VLT_REG:
+ case ICorDebugInfo::VLT_REG_FP:
+ case ICorDebugInfo::VLT_STK:
+ GetNativeVarValHelper(pVal1, pVal2, NativeVarStackAddr(varLoc, pCtx), cbSize);
+ break;
+
+ case ICorDebugInfo::VLT_REG_BYREF: // fall through
+ case ICorDebugInfo::VLT_STK_BYREF:
+ _ASSERTE(!"GNVV: This function should not be called for value types");
+ break;
+
+#endif // _WIN64
+
+ default:
+ _ASSERTE(!"Bad locType"); break;
+ }
+
+ return true;
+}
+
+
+#if defined(_WIN64)
+void SetNativeVarValHelper(SIZE_T* dstAddr, SIZE_T valueLow, SIZE_T valueHigh, SIZE_T size)
+{
+ if (size == 1)
+ *(BYTE*)dstAddr = (BYTE)valueLow;
+ else if (size == 2)
+ *(USHORT*)dstAddr = (USHORT)valueLow;
+ else if (size == 4)
+ *(ULONG*)dstAddr = (ULONG)valueLow;
+ else if (size == 8)
+ *dstAddr = valueLow;
+ else if (size == 16)
+ {
+ *dstAddr = valueLow;
+ *(dstAddr+1) = valueHigh;
+ }
+ else
+ {
+ _ASSERTE(!"util.cpp - unreachable code.\n");
+ UNREACHABLE();
+ }
+}
+#endif // _WIN64
+
+
+bool SetNativeVarVal(const ICorDebugInfo::VarLoc & varLoc,
+ PCONTEXT pCtx,
+ SIZE_T val1,
+ SIZE_T val2
+ WIN64_ARG(SIZE_T cbSize))
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ switch(varLoc.vlType)
+ {
+#if !defined(_WIN64)
+ SIZE_T regOffs;
+
+ case ICorDebugInfo::VLT_REG:
+ *NativeVarStackAddr(varLoc,pCtx) = val1;
+ break;
+
+ case ICorDebugInfo::VLT_STK:
+ *NativeVarStackAddr(varLoc,pCtx) = val1;
+ break;
+
+ case ICorDebugInfo::VLT_STK2:
+ *NativeVarStackAddr(varLoc,pCtx) = val1;
+ *(NativeVarStackAddr(varLoc,pCtx)+ 1) = val2;
+ break;
+
+ case ICorDebugInfo::VLT_REG_REG:
+ regOffs = GetRegOffsInCONTEXT(varLoc.vlRegReg.vlrrReg1);
+ *(SIZE_T *)(regOffs + (BYTE*)pCtx) = val1;
+ LOG((LF_CORDB, LL_INFO100, "SNVV: STK_REG_REG 1 @ 0x%x\n",
+ (SIZE_T *)(regOffs + (BYTE*)pCtx)));
+
+ regOffs = GetRegOffsInCONTEXT(varLoc.vlRegReg.vlrrReg2);
+ *(SIZE_T *)(regOffs + (BYTE*)pCtx) = val2;
+ LOG((LF_CORDB, LL_INFO100, "SNVV: STK_REG_REG 2 @ 0x%x\n",
+ (SIZE_T *)(regOffs + (BYTE*)pCtx)));
+ break;
+
+ case ICorDebugInfo::VLT_REG_STK:
+ regOffs = GetRegOffsInCONTEXT(varLoc.vlRegStk.vlrsReg);
+ *(SIZE_T *)(regOffs + (BYTE*)pCtx) = val1;
+ LOG((LF_CORDB, LL_INFO100, "SNVV: STK_REG_STK reg @ 0x%x\n",
+ (SIZE_T *)(regOffs + (BYTE*)pCtx)));
+ *NativeVarStackAddr(varLoc,pCtx) = val2;
+ break;
+
+ case ICorDebugInfo::VLT_STK_REG:
+ *NativeVarStackAddr(varLoc,pCtx) = val1;
+ regOffs = GetRegOffsInCONTEXT(varLoc.vlStkReg.vlsrReg);
+ *(SIZE_T *)(regOffs + (BYTE*)pCtx) = val2;
+ LOG((LF_CORDB, LL_INFO100, "SNVV: STK_STK_REG reg @ 0x%x\n",
+ (SIZE_T *)(regOffs + (BYTE*)pCtx)));
+ break;
+
+ case ICorDebugInfo::VLT_FPSTK:
+ _ASSERTE(!"NYI"); break;
+
+#else // _WIN64
+ case ICorDebugInfo::VLT_REG:
+ case ICorDebugInfo::VLT_REG_FP:
+ case ICorDebugInfo::VLT_STK:
+ SetNativeVarValHelper(NativeVarStackAddr(varLoc, pCtx), val1, val2, cbSize);
+ break;
+
+ case ICorDebugInfo::VLT_REG_BYREF: // fall through
+ case ICorDebugInfo::VLT_STK_BYREF:
+ _ASSERTE(!"GNVV: This function should not be called for value types");
+ break;
+
+#endif // _WIN64
+
+ default:
+ _ASSERTE(!"Bad locType"); break;
+ }
+
+ return true;
+}
+
+HRESULT VMPostError( // Returned error.
+ HRESULT hrRpt, // Reported error.
+ ...) // Error arguments.
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ GCX_PREEMP();
+
+ va_list marker; // User text.
+ va_start(marker, hrRpt);
+ hrRpt = PostErrorVA(hrRpt, marker);
+ va_end(marker);
+
+ return hrRpt;
+}
+
+#ifndef CROSSGEN_COMPILE
+void VMDumpCOMErrors(HRESULT hrErr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(FAILED(hrErr));
+ }
+ CONTRACTL_END;
+
+ SafeComHolderPreemp<IErrorInfo> pIErr(NULL);// Error interface.
+ BSTRHolder bstrDesc(NULL); // Description text.
+
+ // Try to get an error info object and display the message.
+ if (SafeGetErrorInfo(&pIErr) == S_OK && pIErr->GetDescription(&bstrDesc) == S_OK)
+ {
+ EEMessageBoxCatastrophic(IDS_EE_GENERIC, IDS_FATAL_ERROR, (BSTR)bstrDesc);
+ }
+ else
+ {
+ // Just give out the failed hr return code.
+ EEMessageBoxCatastrophic(IDS_COMPLUS_ERROR, IDS_FATAL_ERROR, hrErr);
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Helper method to load mscorsn.dll. It is used when an app requests a legacy
+// mode where mscorsn.dll it to be loaded during startup.
+//-----------------------------------------------------------------------------
+const WCHAR g_pwzOldStrongNameLibrary[] = W("mscorsn.dll");
+#define cchOldStrongNameLibrary ( \
+ (sizeof(g_pwzOldStrongNameLibrary)/sizeof(WCHAR)))
+
+HRESULT LoadMscorsn()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ INJECT_FAULT(return FALSE;);
+ }
+ CONTRACTL_END;
+
+ DWORD size = 0;
+ HRESULT hr = GetInternalSystemDirectory(NULL, &size);
+ if (hr != HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER))
+ return hr;
+
+ DWORD dwLength = size + cchOldStrongNameLibrary;
+ if (dwLength < size)
+ return HRESULT_FROM_WIN32(ERROR_ARITHMETIC_OVERFLOW);
+ NewArrayHolder<WCHAR> wszPath(new (nothrow) WCHAR[dwLength]);
+ if (!wszPath)
+ return E_OUTOFMEMORY;
+
+ hr = GetInternalSystemDirectory(wszPath, &size);
+ if (FAILED(hr))
+ return hr;
+
+ wcscat_s(wszPath, dwLength, g_pwzOldStrongNameLibrary);
+ CLRLoadLibrary(wszPath);
+ return S_OK;
+}
+
+#ifndef FEATURE_PAL
+
+//-----------------------------------------------------------------------------
+// WszSHGetFolderPath
+//
+// @func takes the CSIDL of a folder and returns the path name
+//
+// @rdesc Result Handle
+//-----------------------------------------------------------------------------------
+HRESULT WszSHGetFolderPath(
+ HWND hwndOwner,
+ int nFolder,
+ HANDLE hToken,
+ DWORD dwFlags,
+ size_t cchPathMax,
+ __out_ecount(MAX_PATH) LPWSTR pwszPath)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_PREEMPTIVE;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END;
+
+ // SHGetFolderPath requirement: path buffer >= MAX_PATH chars
+ _ASSERTE(cchPathMax >= MAX_PATH);
+
+ HRESULT hr;
+ ULONG maxLength = MAX_PATH;
+ HMODULE _hmodShell32 = 0;
+ HMODULE _hmodSHFolder = 0;
+
+ ETWOnStartup (LdLibShFolder_V1, LdLibShFolderEnd_V1);
+
+ typedef HRESULT (*PFNSHGETFOLDERPATH_W) (HWND hwndOwner, int nFolder, HANDLE hToken, DWORD dwFlags, LPWSTR pszPath);
+ static PFNSHGETFOLDERPATH_W pfnW = NULL;
+ if (NULL == pfnW)
+ {
+ _hmodShell32 = CLRLoadLibrary(W("shell32.dll"));
+
+ if (_hmodShell32)
+ pfnW = (PFNSHGETFOLDERPATH_W)GetProcAddress(_hmodShell32, "SHGetFolderPathW");
+
+ if (NULL == pfnW)
+ {
+ if (NULL == _hmodSHFolder)
+ _hmodSHFolder = CLRLoadLibrary(W("shfolder.dll"));
+
+ if (_hmodSHFolder)
+ pfnW = (PFNSHGETFOLDERPATH_W)GetProcAddress(_hmodSHFolder, "SHGetFolderPathW");
+ }
+ }
+
+ if (pfnW)
+ hr = pfnW(hwndOwner, nFolder, hToken, dwFlags, pwszPath);
+ else
+ hr = HRESULT_FROM_WIN32(GetLastError());
+
+ // NOTE: We leak the module handles and let the OS gather them at process shutdown.
+
+ return hr;
+}
+
+//-----------------------------------------------------------------------------
+// WszShellExecute
+//
+// @func calls ShellExecute with the provided parameters
+//
+// @rdesc Result
+//-----------------------------------------------------------------------------------
+HRESULT WszShellExecute(
+ HWND hwnd,
+ LPCTSTR lpOperation,
+ LPCTSTR lpFile,
+ LPCTSTR lpParameters,
+ LPCTSTR lpDirectory,
+ INT nShowCmd)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_PREEMPTIVE;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ HMODULE _hmodShell32 = 0;
+
+ typedef HINSTANCE (*PFNSHELLEXECUTE_W) (HWND hwnd, LPCTSTR lpOperation, LPCTSTR lpFile, LPCTSTR lpParameters, LPCTSTR lpDirectory, INT nShowCmd);
+ static PFNSHELLEXECUTE_W pfnW = NULL;
+ if (NULL == pfnW)
+ {
+ _hmodShell32 = CLRLoadLibrary(W("shell32.dll"));
+
+ if (_hmodShell32)
+ pfnW = (PFNSHELLEXECUTE_W)GetProcAddress(_hmodShell32, "ShellExecuteW");
+ }
+
+ if (pfnW)
+ {
+ HINSTANCE hSE = pfnW(hwnd, lpOperation, lpFile, lpParameters, lpDirectory, nShowCmd);
+
+ if ((int) hSE <= 32)
+ {
+ hr = HRESULT_FROM_WIN32((int) hSE);
+ }
+ }
+ else
+ {
+ hr = HRESULT_FROM_WIN32(GetLastError());
+ }
+
+ // NOTE: We leak the module handles and let the OS gather them at process shutdown.
+
+ return hr;
+}
+
+#ifndef DACCESS_COMPILE
+//-----------------------------------------------------------------------------
+
+//-----------------------------------------------------------------------------
+// WszShellExecuteEx
+//
+// @func calls ShellExecuteEx with the provided parameters
+//
+// @rdesc Result
+//-----------------------------------------------------------------------------------
+HRESULT WszShellExecuteEx(
+ LPSHELLEXECUTEINFO lpExecInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_PREEMPTIVE;
+ INJECT_FAULT(return E_OUTOFMEMORY;);
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ HMODULE _hmodShell32 = 0;
+
+ typedef BOOL (*PFNSHELLEXECUTEEX_W) (LPSHELLEXECUTEINFO lpExecInfo);
+ static PFNSHELLEXECUTEEX_W pfnW = NULL;
+ if (NULL == pfnW)
+ {
+ _hmodShell32 = CLRLoadLibrary(W("shell32.dll"));
+
+ if (_hmodShell32)
+ pfnW = (PFNSHELLEXECUTEEX_W)GetProcAddress(_hmodShell32, "ShellExecuteExW");
+ }
+
+ if (pfnW)
+ {
+ BOOL bSE = pfnW(lpExecInfo);
+
+ if (bSE)
+ {
+ hr = HRESULT_FROM_WIN32(GetLastError());
+ }
+ }
+ else
+ {
+ hr = HRESULT_FROM_WIN32(GetLastError());
+ }
+
+ // NOTE: We leak the module handles and let the OS gather them at process shutdown.
+
+ return hr;
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+BOOL IsUsingValidAppDataPath(__in_z WCHAR *userPath)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ WCHAR defaultPath[MAX_PATH];
+ HRESULT hr;
+ HANDLE hToken;
+
+ hToken = (HANDLE)(-1);
+
+ hr = WszSHGetFolderPath(NULL, CSIDL_APPDATA, hToken, SHGFP_TYPE_CURRENT, MAX_PATH, defaultPath);
+ if (FAILED(hr))
+ {
+ hr = WszSHGetFolderPath(NULL, CSIDL_APPDATA, hToken, SHGFP_TYPE_DEFAULT, MAX_PATH, defaultPath);
+ }
+ if (FAILED(hr))
+ return FALSE;
+
+ int result = wcscmp(defaultPath, userPath);
+
+ return result != 0;
+}
+
+#define FOLDER_LOCAL_SETTINGS_W W("Local Settings")
+#define FOLDER_APP_DATA_W W("\\Application Data")
+#define FOLDER_APP_DATA "\\Application Data"
+
+// Gets the location for roaming and local AppData
+BOOL GetUserDir(__out_ecount(bufferCount) WCHAR * buffer, size_t bufferCount, BOOL fRoaming)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_PREEMPTIVE;
+ INJECT_FAULT(return FALSE;);
+ }
+ CONTRACTL_END;
+
+ // SHGetFolderPath will return the default user profile if the context is that of a user
+ // without a user profile. Since we never want to end up writing files into the default profile
+ // which is used as a template for future user profiles, we first try to find out if the user
+ // profile is not loaded; and if that's the case we return an error.
+
+ if (!IsUserProfileLoaded())
+ return FALSE;
+
+ HRESULT hr;
+
+ // In Windows ME, there is currently a bug that makes local appdata and roaming appdata
+ // point to the same location, so we've decided to "do our own thing" and add \Local Settings before \Application Data
+ if (!fRoaming) {
+ WCHAR appdatafolder[MAX_PATH];
+ hr = WszSHGetFolderPath(NULL, CSIDL_APPDATA|CSIDL_FLAG_CREATE, NULL, SHGFP_TYPE_CURRENT, MAX_PATH, appdatafolder);
+ if (FAILED(hr))
+ {
+ hr = WszSHGetFolderPath(NULL, CSIDL_APPDATA|CSIDL_FLAG_CREATE, NULL, SHGFP_TYPE_DEFAULT, MAX_PATH, appdatafolder);
+ }
+ if (FAILED(hr))
+ return FALSE;
+ hr = WszSHGetFolderPath(NULL, CSIDL_LOCAL_APPDATA|CSIDL_FLAG_CREATE, NULL, SHGFP_TYPE_CURRENT, bufferCount, buffer);
+ if (FAILED(hr))
+ {
+ hr = WszSHGetFolderPath(NULL, CSIDL_LOCAL_APPDATA|CSIDL_FLAG_CREATE, NULL, SHGFP_TYPE_DEFAULT, bufferCount, buffer);
+ }
+ if (FAILED(hr))
+ return FALSE;
+
+ // folders are the same or failed to get local folder
+
+ if (!wcscmp(appdatafolder, buffer))
+ {
+ WCHAR tempPartialPath[MAX_PATH];
+ ULONG slen = (ULONG)wcslen(buffer);
+
+ if (buffer[slen - 1] == W('\\'))
+ {
+ --slen;
+ }
+
+ // Search for the parent directory.
+
+ WCHAR* parentDirectoryEnd = &buffer[slen - 1];
+ tempPartialPath[0] = W('\0');
+
+ for (ULONG index = slen - 1; index > 0; --index)
+ {
+ if (buffer[index] == W('\\'))
+ {
+ if (wcslen(&buffer[index]) >= NumItems(tempPartialPath))
+ {
+ _ASSERTE(!"Buffer not large enough");
+ return FALSE;
+ }
+
+ wcscpy_s( tempPartialPath, COUNTOF(tempPartialPath), &buffer[index] );
+ parentDirectoryEnd = &buffer[index+1];
+ break;
+ }
+ }
+
+ // Create the intermediate directory if it is not present
+ if ((parentDirectoryEnd + wcslen(FOLDER_LOCAL_SETTINGS_W)) >= (buffer + bufferCount))
+ {
+ _ASSERTE(!"Buffer not large enough");
+ return FALSE;
+ }
+
+ SIZE_T cchSafe;
+ // Prefast overflow sanity check the subtraction.
+ if (!ClrSafeInt<SIZE_T>::subtraction(bufferCount, (parentDirectoryEnd - buffer), cchSafe))
+ {
+ _ASSERTE(!"ClrSafeInt: Buffer is not large enough");
+ return FALSE;
+ }
+
+ wcscpy_s(parentDirectoryEnd, cchSafe, FOLDER_LOCAL_SETTINGS_W);
+
+ LONG lresult;
+
+ {
+ // Check if the directory is already present
+ lresult = WszGetFileAttributes(buffer);
+
+ if (lresult == -1)
+ {
+ if (!WszCreateDirectory(buffer, NULL) &&
+ !(WszGetFileAttributes(buffer) & FILE_ATTRIBUTE_DIRECTORY))
+ return FALSE;
+ }
+ else if ((lresult & FILE_ATTRIBUTE_DIRECTORY) == 0)
+ {
+ return FALSE;
+ }
+ }
+ if ((bufferCount - wcslen(buffer)) <= wcslen(tempPartialPath))
+ {
+ _ASSERTE(!"Buffer not large enough");
+ return FALSE;
+ }
+
+ wcscat_s(buffer, bufferCount, tempPartialPath);
+
+ // Check if the directory is already present
+ lresult = WszGetFileAttributes(buffer);
+
+ if (lresult == -1)
+ {
+ if (!WszCreateDirectory(buffer, NULL) &&
+ !(WszGetFileAttributes(buffer) & FILE_ATTRIBUTE_DIRECTORY))
+ return FALSE;
+ }
+ else if ((lresult & FILE_ATTRIBUTE_DIRECTORY) == 0)
+ {
+ return FALSE;
+ }
+ }
+ }
+ else {
+ hr = WszSHGetFolderPath(NULL, CSIDL_APPDATA|CSIDL_FLAG_CREATE, NULL, SHGFP_TYPE_CURRENT, bufferCount, buffer);
+ if (FAILED(hr))
+ {
+ hr = WszSHGetFolderPath(NULL, CSIDL_APPDATA|CSIDL_FLAG_CREATE, NULL, SHGFP_TYPE_DEFAULT, bufferCount, buffer);
+ }
+ if (FAILED(hr))
+ return FALSE;
+
+ if (!IsUsingValidAppDataPath(buffer))
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+const WCHAR PROFILE_LIST_PATH[] = W("Software\\Microsoft\\Windows NT\\CurrentVersion\\ProfileList\\");
+#define nProfileListPathKeyLen ( \
+ sizeof(PROFILE_LIST_PATH)/sizeof(WCHAR))
+
+HRESULT GetUserSidString (HANDLE hToken, __deref_out LPWSTR *pwszSid) {
+ DWORD dwSize = 0;
+ GetTokenInformation(hToken, TokenUser, NULL, 0, &dwSize);
+ NewArrayHolder<BYTE> pb(new (nothrow) BYTE[dwSize]);
+ if (pb == NULL)
+ return E_OUTOFMEMORY;
+ if (!GetTokenInformation(hToken, TokenUser, pb, dwSize, &dwSize))
+ return HRESULT_FROM_GetLastError();
+
+ PTOKEN_USER pUser = (PTOKEN_USER) pb.GetValue();
+
+ typedef BOOL (*CONVERTSIDTOSTRINGSID_W) (PSID Sid, LPWSTR* StringSid);
+ static CONVERTSIDTOSTRINGSID_W pfnW = NULL;
+ if (NULL == pfnW) {
+ HMODULE hModAdvapi32 = CLRLoadLibrary(W("advapi32.dll"));
+ if (hModAdvapi32)
+ pfnW = (CONVERTSIDTOSTRINGSID_W) GetProcAddress(hModAdvapi32, "ConvertSidToStringSidW");
+ }
+
+ if (!pfnW)
+ return E_NOTIMPL;
+ if (!pfnW(pUser->User.Sid, pwszSid))
+ return HRESULT_FROM_GetLastError();
+ return S_OK;
+}
+
+BOOL IsUserProfileLoaded() {
+ HandleHolder hToken;
+ if (!OpenThreadToken(GetCurrentThread(), TOKEN_QUERY, TRUE, &hToken))
+ if (!OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY, &hToken))
+ return FALSE;
+
+ // Get the SID string
+ LPWSTR wszSid = NULL;
+ if (FAILED(GetUserSidString(hToken, &wszSid)))
+ return FALSE;
+
+ // Concatenate the Sid string with the profile list path
+ size_t cchProfileRegPath = nProfileListPathKeyLen + wcslen(wszSid) + 1;
+ NewArrayHolder<WCHAR> wszProfileRegPath(new (nothrow) WCHAR[cchProfileRegPath]);
+ if (wszProfileRegPath == NULL) {
+#undef LocalFree
+ LocalFree(wszSid);
+#define LocalFree(hMem) Dont_Use_LocalFree(hMem)
+ return FALSE;
+ }
+ wcscpy_s(wszProfileRegPath, cchProfileRegPath, PROFILE_LIST_PATH);
+ wcscat_s(wszProfileRegPath, cchProfileRegPath, wszSid);
+
+#undef LocalFree
+ LocalFree(wszSid);
+#define LocalFree(hMem) Dont_Use_LocalFree(hMem)
+
+ // Open the user profile registry key
+ HKEYHolder hKey;
+ return (WszRegOpenKeyEx(HKEY_LOCAL_MACHINE, wszProfileRegPath, 0, KEY_READ, &hKey) == ERROR_SUCCESS);
+}
+
+BOOL GetInternetCacheDir(__out_ecount(bufferCount) WCHAR * buffer, size_t bufferCount)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ INJECT_FAULT(return FALSE;);
+ }
+ CONTRACTL_END;
+
+ _ASSERTE( bufferCount == MAX_PATH && "You should pass in a buffer of size MAX_PATH" );
+
+ HRESULT hr = WszSHGetFolderPath( NULL, CSIDL_INTERNET_CACHE, NULL, SHGFP_TYPE_CURRENT, bufferCount, buffer );
+ if (FAILED(hr))
+ hr = WszSHGetFolderPath( NULL, CSIDL_INTERNET_CACHE, NULL, SHGFP_TYPE_DEFAULT, bufferCount, buffer );
+
+ return SUCCEEDED(hr);
+}
+
+//-----------------------------------------------------------------------------
+// Wrap registry functions to use CQuickWSTR to allocate space. This does it
+// in a stack friendly manner.
+//-----------------------------------------------------------------------------
+LONG UtilRegEnumKey(HKEY hKey, // handle to key to query
+ DWORD dwIndex, // index of subkey to query
+ CQuickWSTR* lpName) // buffer for subkey name
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ INJECT_FAULT(return ERROR_NOT_ENOUGH_MEMORY;);
+ }
+ CONTRACTL_END;
+
+ DWORD size = (DWORD)lpName->MaxSize();
+ LONG result = WszRegEnumKeyEx(hKey,
+ dwIndex,
+ lpName->Ptr(),
+ &size,
+ NULL,
+ NULL,
+ NULL,
+ NULL);
+
+ if (result == ERROR_SUCCESS || result == ERROR_MORE_DATA) {
+
+ // Grow or shrink buffer to correct size
+ if (lpName->ReSizeNoThrow(size+1) != NOERROR)
+ result = ERROR_NOT_ENOUGH_MEMORY;
+
+ if (result == ERROR_MORE_DATA) {
+ size = (DWORD)lpName->MaxSize();
+ result = WszRegEnumKeyEx(hKey,
+ dwIndex,
+ lpName->Ptr(),
+ &size,
+ NULL,
+ NULL,
+ NULL,
+ NULL);
+ }
+ }
+
+ return result;
+}
+
+LONG UtilRegQueryStringValueEx(HKEY hKey, // handle to key to query
+ LPCWSTR lpValueName, // address of name of value to query
+ LPDWORD lpReserved, // reserved
+ LPDWORD lpType, // address of buffer for value type
+ CQuickWSTR* lpData)// data buffer
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ INJECT_FAULT(return ERROR_NOT_ENOUGH_MEMORY;);
+ }
+ CONTRACTL_END;
+
+ DWORD size = (DWORD)lpData->MaxSize();
+ LONG result = WszRegQueryValueEx(hKey,
+ lpValueName,
+ lpReserved,
+ lpType,
+ (LPBYTE) lpData->Ptr(),
+ &size);
+
+ if (result == ERROR_SUCCESS || result == ERROR_MORE_DATA) {
+
+ // Grow or shrink buffer to correct size
+ if (lpData->ReSizeNoThrow(size+1) != NOERROR)
+ result = ERROR_NOT_ENOUGH_MEMORY;
+
+ if (result == ERROR_MORE_DATA) {
+ size = (DWORD)lpData->MaxSize();
+ result = WszRegQueryValueEx(hKey,
+ lpValueName,
+ lpReserved,
+ lpType,
+ (LPBYTE) lpData->Ptr(),
+ &size);
+ }
+ }
+
+ return result;
+}
+
+BOOL ReportEventCLR(
+ WORD wType,
+ WORD wCategory,
+ DWORD dwEventID,
+ PSID lpUserSid,
+ SString * message)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ GCX_PREEMP();
+
+ SString buff;
+ buff.Printf(W(".NET Runtime version %s - %s"), VER_FILEVERSION_STR_L, message->GetUnicode());
+
+ DWORD dwRetVal = ClrReportEvent(W(".NET Runtime"),
+ wType, // event type
+ wCategory, // category
+ dwEventID, // event identifier
+ lpUserSid, // user security identifier
+ buff.GetUnicode()); // one substitution string
+
+ // Return BOOLEAN based upon return code
+ return (dwRetVal == ERROR_SUCCESS)?TRUE:FALSE;
+}
+
+// This function checks to see if GetLogicalProcessorInformation API is supported.
+// On success, this function allocates a SLPI array, sets nEntries to number
+// of elements in the SLPI array and returns a pointer to the SLPI array after filling it with information.
+//
+// Note: If successful, IsGLPISupported allocates memory for the SLPI array and expects the caller to
+// free the memory once the caller is done using the information in the SLPI array.
+//
+// If the API is not supported or any failure, returns NULL
+//
+SYSTEM_LOGICAL_PROCESSOR_INFORMATION *IsGLPISupported( PDWORD nEntries )
+{
+ DWORD cbslpi = 0;
+ DWORD dwNumElements = 0;
+ SYSTEM_LOGICAL_PROCESSOR_INFORMATION *pslpi = NULL;
+
+ // We setup the first call to GetLogicalProcessorInformation to fail so that we can obtain
+ // the size of the buffer required to allocate for the SLPI array that is returned
+
+ if (!GetLogicalProcessorInformation(pslpi, &cbslpi) &&
+ GetLastError() != ERROR_INSUFFICIENT_BUFFER)
+ {
+ // If we fail with anything other than an ERROR_INSUFFICIENT_BUFFER here, we punt with failure.
+ return NULL;
+ }
+
+ _ASSERTE(cbslpi);
+
+ // compute the number of SLPI entries required to hold the information returned from GLPI
+
+ dwNumElements = cbslpi / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION);
+
+ // allocate a buffer in the free heap to hold an array of SLPI entries from GLPI, number of elements in the array is dwNumElements
+
+ pslpi = new (nothrow) SYSTEM_LOGICAL_PROCESSOR_INFORMATION[ dwNumElements ];
+
+ if(pslpi == NULL)
+ {
+ // the memory allocation failed
+ return NULL;
+ }
+
+ // Make call to GetLogicalProcessorInformation. Returns array of SLPI structures
+
+ if (!GetLogicalProcessorInformation(pslpi, &cbslpi))
+ {
+ // GetLogicalProcessorInformation failed
+ delete[] pslpi ; //Allocation was fine but the API call itself failed and so we are releasing the memory before the return NULL.
+ return NULL ;
+ }
+
+ // GetLogicalProcessorInformation successful, set nEntries to number of entries in the SLPI array
+ *nEntries = dwNumElements;
+
+ return pslpi; // return pointer to SLPI array
+
+}//IsGLPISupported
+
+// This function returns the size of highest level cache on the physical chip. If it cannot
+// determine the cachesize this function returns 0.
+size_t GetLogicalProcessorCacheSizeFromOS()
+{
+ size_t cache_size = 0;
+ DWORD nEntries = 0;
+
+ // Try to use GetLogicalProcessorInformation API and get a valid pointer to the SLPI array if successful. Returns NULL
+ // if API not present or on failure.
+
+ SYSTEM_LOGICAL_PROCESSOR_INFORMATION *pslpi = IsGLPISupported(&nEntries) ;
+
+ if (pslpi == NULL)
+ {
+ // GetLogicalProcessorInformation not supported or failed.
+ goto Exit;
+ }
+
+ // Crack the information. Iterate through all the SLPI array entries for all processors in system.
+ // Will return the greatest of all the processor cache sizes or zero
+
+ size_t last_cache_size = 0;
+
+ for (DWORD i=0; i < nEntries; i++)
+ {
+ if (pslpi[i].Relationship == RelationCache)
+ {
+ last_cache_size = max(last_cache_size, pslpi[i].Cache.Size);
+ }
+ }
+ cache_size = last_cache_size;
+Exit:
+
+ if(pslpi)
+ delete[] pslpi; // release the memory allocated for the SLPI array.
+
+ return cache_size;
+}
+
+#endif // !FEATURE_PAL
+
+// This function returns the number of logical processors on a given physical chip. If it cannot
+// determine the number of logical cpus, or the machine is not populated uniformly with the same
+// type of processors, this function returns 0.
+
+DWORD GetLogicalCpuCountFromOS()
+{
+ // No CONTRACT possible because GetLogicalCpuCount uses SEH
+
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ static DWORD val = 0;
+ DWORD retVal = 0;
+
+// UNIXTODO: Implement the functionality in PAL?
+#ifndef FEATURE_PAL
+
+ DWORD nEntries = 0;
+
+ // Try to use GetLogicalProcessorInformation API and get a valid pointer to the SLPI array if successful. Returns NULL
+ // if API not present or on failure.
+ SYSTEM_LOGICAL_PROCESSOR_INFORMATION *pslpi = IsGLPISupported(&nEntries) ;
+
+ if (pslpi == NULL)
+ {
+ // GetLogicalProcessorInformation no supported
+ goto lDone;
+ }
+
+ DWORD prevcount = 0;
+ DWORD count = 1;
+
+ for (DWORD j = 0; j < nEntries; j++)
+ {
+ if (pslpi[j].Relationship == RelationProcessorCore)
+ {
+ // LTP_PC_SMT indicates HT or SMT
+ if (pslpi[j].ProcessorCore.Flags == LTP_PC_SMT)
+ {
+ SIZE_T pmask = pslpi[j].ProcessorMask;
+
+ // Count the processors in the mask
+ //
+ // These are not the fastest bit counters. There may be processor intrinsics
+ // (which would be best), but there are variants faster than these:
+ // See http://en.wikipedia.org/wiki/Hamming_weight.
+ // This is the naive implementation.
+#if !_WIN64
+ count = (pmask & 0x55555555) + ((pmask >> 1) & 0x55555555);
+ count = (count & 0x33333333) + ((count >> 2) & 0x33333333);
+ count = (count & 0x0F0F0F0F) + ((count >> 4) & 0x0F0F0F0F);
+ count = (count & 0x00FF00FF) + ((count >> 8) & 0x00FF00FF);
+ count = (count & 0x0000FFFF) + ((count >> 16)& 0x0000FFFF);
+#else
+ pmask = (pmask & 0x5555555555555555ull) + ((pmask >> 1) & 0x5555555555555555ull);
+ pmask = (pmask & 0x3333333333333333ull) + ((pmask >> 2) & 0x3333333333333333ull);
+ pmask = (pmask & 0x0f0f0f0f0f0f0f0full) + ((pmask >> 4) & 0x0f0f0f0f0f0f0f0full);
+ pmask = (pmask & 0x00ff00ff00ff00ffull) + ((pmask >> 8) & 0x00ff00ff00ff00ffull);
+ pmask = (pmask & 0x0000ffff0000ffffull) + ((pmask >> 16) & 0x0000ffff0000ffffull);
+ pmask = (pmask & 0x00000000ffffffffull) + ((pmask >> 32) & 0x00000000ffffffffull);
+ count = static_cast<DWORD>(pmask);
+#endif // !_WIN64 else
+ assert (count > 0);
+
+ if (prevcount)
+ {
+ if (count != prevcount)
+ {
+ retVal = 1; // masks are not symmetric
+ goto lDone;
+ }
+ }
+
+ prevcount = count;
+ }
+ }
+ }
+
+ retVal = count;
+
+lDone:
+
+ if(pslpi)
+ {
+ delete[] pslpi; // release the memory allocated for the SLPI array
+ }
+
+#endif // FEATURE_PAL
+
+ return retVal;
+}
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+
+#define CACHE_WAY_BITS 0xFFC00000 // number of cache WAYS-Associativity is returned in EBX[31:22] (10 bits) using cpuid function 4
+#define CACHE_PARTITION_BITS 0x003FF000 // number of cache Physical Partitions is returned in EBX[21:12] (10 bits) using cpuid function 4
+#define CACHE_LINESIZE_BITS 0x00000FFF // Linesize returned in EBX[11:0] (12 bits) using cpuid function 4
+
+#if defined(_TARGET_X86_)
+ // these are defined in cgenx86.cpp
+ extern DWORD getcpuid(DWORD arg1, unsigned char result[16]);
+ extern DWORD getextcpuid(DWORD arg1, DWORD arg2, unsigned char result[16]);
+#elif defined(_TARGET_AMD64_)
+ // these are defined in src\VM\AMD64\asmhelpers.asm
+ extern "C" DWORD __stdcall getcpuid(DWORD arg1, unsigned char result[16]);
+ extern "C" DWORD __stdcall getextcpuid(DWORD arg1, DWORD arg2, unsigned char result[16]);
+#endif
+
+
+// The following function uses a deterministic mechanism for enumerating/calculating the details of the cache hierarychy at runtime
+// by using deterministic cache parameter leafs on Prescott and higher processors.
+// If successful, this function returns the cache size in bytes of the highest level on-die cache. Returns 0 on failure.
+
+size_t GetIntelDeterministicCacheEnum()
+{
+ LIMITED_METHOD_CONTRACT;
+ size_t retVal = 0;
+ unsigned char buffer[16];
+
+ DWORD maxCpuid = getextcpuid(0,0,buffer);
+
+ DWORD* dwBuffer = (DWORD*)buffer;
+
+ if( (maxCpuid > 3) && (maxCpuid < 0x80000000) ) // Deterministic Cache Enum is Supported
+ {
+ DWORD dwCacheWays, dwCachePartitions, dwLineSize, dwSets;
+ DWORD retEAX = 0;
+ DWORD loopECX = 0;
+ size_t maxSize = 0;
+ size_t curSize = 0;
+
+ // Make First call to getextcpuid with loopECX=0. loopECX provides an index indicating which level to return information about.
+ // The second parameter is input EAX=4, to specify we want deterministic cache parameter leaf information.
+ // getextcpuid with EAX=4 should be executed with loopECX = 0,1, ... until retEAX [4:0] contains 00000b, indicating no more
+ // cache levels are supported.
+
+ getextcpuid(loopECX, 4, buffer);
+ retEAX = dwBuffer[0]; // get EAX
+
+ int i = 0;
+ while(retEAX & 0x1f) // Crack cache enums and loop while EAX > 0
+ {
+
+ dwCacheWays = (dwBuffer[1] & CACHE_WAY_BITS) >> 22;
+ dwCachePartitions = (dwBuffer[1] & CACHE_PARTITION_BITS) >> 12;
+ dwLineSize = dwBuffer[1] & CACHE_LINESIZE_BITS;
+ dwSets = dwBuffer[2]; // ECX
+
+ curSize = (dwCacheWays+1)*(dwCachePartitions+1)*(dwLineSize+1)*(dwSets+1);
+
+ if (maxSize < curSize)
+ maxSize = curSize;
+
+ loopECX++;
+ getextcpuid(loopECX, 4, buffer);
+ retEAX = dwBuffer[0] ; // get EAX[4:0];
+ i++;
+ if (i > 16) // prevent infinite looping
+ return 0;
+ }
+ retVal = maxSize;
+ }
+
+ return retVal ;
+}
+
+// The following function uses CPUID function 2 with descriptor values to determine the cache size. This requires a-priori
+// knowledge of the descriptor values. This works on gallatin and prior processors (already released processors).
+// If successful, this function returns the cache size in bytes of the highest level on-die cache. Returns 0 on failure.
+
+size_t GetIntelDescriptorValuesCache()
+{
+ LIMITED_METHOD_CONTRACT;
+ size_t size = 0;
+ size_t maxSize = 0;
+ unsigned char buffer[16];
+
+ getextcpuid(0,2, buffer); // call CPUID with EAX function 2H to obtain cache descriptor values
+
+ for (int i = buffer[0]; --i >= 0; )
+ {
+ int j;
+ for (j = 3; j < 16; j += 4)
+ {
+ // if the information in a register is marked invalid, set to null descriptors
+ if (buffer[j] & 0x80)
+ {
+ buffer[j-3] = 0;
+ buffer[j-2] = 0;
+ buffer[j-1] = 0;
+ buffer[j-0] = 0;
+ }
+ }
+
+ for (j = 1; j < 16; j++)
+ {
+ switch (buffer[j]) // need to add descriptor values for 8M and 12M when they become known
+ {
+ case 0x41:
+ case 0x79:
+ size = 128*1024;
+ break;
+
+ case 0x42:
+ case 0x7A:
+ case 0x82:
+ size = 256*1024;
+ break;
+
+ case 0x22:
+ case 0x43:
+ case 0x7B:
+ case 0x83:
+ case 0x86:
+ size = 512*1024;
+ break;
+
+ case 0x23:
+ case 0x44:
+ case 0x7C:
+ case 0x84:
+ case 0x87:
+ size = 1024*1024;
+ break;
+
+ case 0x25:
+ case 0x45:
+ case 0x85:
+ size = 2*1024*1024;
+ break;
+
+ case 0x29:
+ size = 4*1024*1024;
+ break;
+ }
+ if (maxSize < size)
+ maxSize = size;
+ }
+
+ if (i > 0)
+ getextcpuid(0,2, buffer);
+ }
+ return maxSize;
+}
+
+
+
+#define NUM_LOGICAL_BITS 0x00FF0000 // EBX[23:16] Bit 16-23 in ebx contains the number of logical
+ // processors per physical processor (using cpuid function 1)
+#define INITIAL_APIC_ID_BITS 0xFF000000 // EBX[31:24] Bits 24-31 (8 bits) return the 8-bit unique
+ // initial APIC ID for the processor this code is running on.
+ // Default value = 0xff if HT is not supported
+
+// This function uses CPUID function 1 to return the number of logical processors on a given physical chip.
+// It returns the number of logicals processors on a physical chip.
+
+DWORD GetLogicalCpuCountFallback()
+{
+ BYTE LogicalNum = 0;
+ BYTE PhysicalNum = 0;
+ DWORD lProcCounter = 0;
+ unsigned char buffer[16];
+
+ DWORD* dwBuffer = (DWORD*)buffer;
+ DWORD retVal = 1;
+
+ getextcpuid(0,1, buffer); //call CPUID with EAX=1
+
+ if (dwBuffer[3] & (1<<28)) // edx:bit 28 is HT bit
+ {
+ PhysicalNum = (BYTE) g_SystemInfo.dwNumberOfProcessors ; // total # of processors
+ LogicalNum = (BYTE) ((dwBuffer[1] & NUM_LOGICAL_BITS) >> 16); // # of logical per physical
+
+ if(LogicalNum > 1)
+ {
+#ifdef FEATURE_CORESYSTEM
+ // CoreSystem doesn't expose GetProcessAffinityMask or SetProcessAffinityMask or anything
+ // functionally equivalent. Just assume 1:1 mapping if we get here (in reality we shouldn't since
+ // all CoreSystems support GetLogicalProcessorInformation so GetLogicalCpuCountFromOS should have
+ // taken care of everything.
+ goto fDone;
+#else // FEATURE_CORESYSTEM
+ HANDLE hCurrentProcessHandle;
+ DWORD_PTR dwProcessAffinity;
+ DWORD_PTR dwSystemAffinity;
+ DWORD_PTR dwAffinityMask;
+
+ // Calculate the appropriate shifts and mask based on the
+ // number of logical processors.
+
+ BYTE i = 1, PHY_ID_MASK = 0xFF, PHY_ID_SHIFT = 0;
+ while (i < LogicalNum)
+ {
+ i *= 2;
+ PHY_ID_MASK <<= 1;
+ PHY_ID_SHIFT++;
+ }
+ hCurrentProcessHandle = GetCurrentProcess();
+
+ GetProcessAffinityMask(hCurrentProcessHandle, &dwProcessAffinity, &dwSystemAffinity);
+
+ // Check if available process affinity mask is equal to the available system affinity mask
+ // If the masks are equal, then all the processors the OS utilizes are available to the
+ // application.
+
+ if (dwProcessAffinity != dwSystemAffinity)
+ {
+ retVal = 0;
+ goto fDone;
+ }
+
+ dwAffinityMask = 1;
+
+ // loop over all processors, running APIC ID retrieval code starting
+ // with the first one by setting process affinity.
+ while (dwAffinityMask != 0 && dwAffinityMask <= dwProcessAffinity)
+ {
+ // Check if this CPU is available
+ if (dwAffinityMask & dwProcessAffinity)
+ {
+ if (SetProcessAffinityMask(hCurrentProcessHandle, dwAffinityMask))
+ {
+ BYTE APIC_ID, LOG_ID, PHY_ID;
+ __SwitchToThread(0, CALLER_LIMITS_SPINNING); // Give OS time to switch CPU
+
+ getextcpuid(0,1, buffer); //call cpuid with EAX=1
+
+ APIC_ID = (dwBuffer[1] & INITIAL_APIC_ID_BITS) >> 24;
+ LOG_ID = APIC_ID & ~PHY_ID_MASK;
+ PHY_ID = APIC_ID >> PHY_ID_SHIFT;
+ if (LOG_ID != 0)
+ lProcCounter++;
+ }
+ }
+ dwAffinityMask = dwAffinityMask << 1;
+ }
+ // Reset the processor affinity
+
+ SetProcessAffinityMask(hCurrentProcessHandle, dwProcessAffinity);
+
+ // Check if HT is enabled on all the processors
+ if(lProcCounter > 0 && (lProcCounter == (DWORD)(PhysicalNum / LogicalNum)))
+ {
+ retVal = lProcCounter;
+ goto fDone;
+ }
+#endif // FEATURE_CORESYSTEM
+ }
+ }
+fDone:
+
+ return retVal;
+}
+
+#endif // _TARGET_X86_ || _TARGET_AMD64_
+
+size_t GetLargestOnDieCacheSize(BOOL bTrueSize)
+{
+ // No CONTRACT possible because GetLargestOnDieCacheSize uses SEH
+
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+#if defined(_TARGET_AMD64_) || defined (_TARGET_X86_)
+
+ static size_t maxSize;
+ static size_t maxTrueSize;
+
+ if (maxSize)
+ {
+ // maxSize and maxTrueSize cached
+ if (bTrueSize)
+ {
+ return maxTrueSize;
+ }
+ else
+ {
+ return maxSize;
+ }
+ }
+
+ DefaultCatchFilterParam param;
+ param.pv = COMPLUS_EXCEPTION_EXECUTE_HANDLER;
+
+ PAL_TRY(DefaultCatchFilterParam *, pParam, &param)
+ {
+ unsigned char buffer[16];
+ DWORD* dwBuffer = (DWORD*)buffer;
+
+ DWORD maxCpuId = getcpuid(0, buffer);
+
+ if (dwBuffer[1] == 'uneG')
+ {
+ if (dwBuffer[3] == 'Ieni')
+ {
+ if (dwBuffer[2] == 'letn')
+ {
+ /*
+ //The following lines are commented because the OS API on Windows 2003 SP1 is not returning the Cache Relation information on x86.
+ //Once the OS API (LH and above) is updated with this information, we should start using the OS API to get the cache enumeration by
+ //uncommenting the lines below.
+
+ tempSize = GetLogicalProcessorCacheSizeFromOS(); //use OS API for cache enumeration on LH and above
+ */
+ size_t tempSize = 0;
+ if (maxCpuId >= 2) // cpuid support for cache size determination is available
+ {
+ tempSize = GetIntelDeterministicCacheEnum(); // try to use use deterministic cache size enumeration
+ if (!tempSize)
+ { // deterministic enumeration failed, fallback to legacy enumeration using descriptor values
+ tempSize = GetIntelDescriptorValuesCache();
+ }
+ }
+
+ // update maxSize once with final value
+ maxTrueSize = tempSize;
+
+#ifdef _WIN64
+ if (maxCpuId >= 2)
+ {
+ // If we're running on a Prescott or greater core, EM64T tests
+ // show that starting with a gen0 larger than LLC improves performance.
+ // Thus, start with a gen0 size that is larger than the cache. The value of
+ // 3 is a reasonable tradeoff between workingset and performance.
+ maxSize = maxTrueSize * 3;
+ }
+ else
+#endif
+ {
+ maxSize = maxTrueSize;
+ }
+ }
+ }
+ }
+
+ if (dwBuffer[1] == 'htuA') {
+ if (dwBuffer[3] == 'itne') {
+ if (dwBuffer[2] == 'DMAc') {
+
+ if (getcpuid(0x80000000, buffer) >= 0x80000006)
+ {
+ getcpuid(0x80000006, buffer);
+
+ DWORD dwL2CacheBits = dwBuffer[2];
+ DWORD dwL3CacheBits = dwBuffer[3];
+
+ maxTrueSize = (size_t)((dwL2CacheBits >> 16) * 1024); // L2 cache size in ECX bits 31-16
+
+ getcpuid(0x1, buffer);
+ DWORD dwBaseFamily = (dwBuffer[0] & (0xF << 8)) >> 8;
+ DWORD dwExtFamily = (dwBuffer[0] & (0xFF << 20)) >> 20;
+ DWORD dwFamily = dwBaseFamily >= 0xF ? dwBaseFamily + dwExtFamily : dwBaseFamily;
+
+ if (dwFamily >= 0x10)
+ {
+ BOOL bSkipAMDL3 = FALSE;
+
+ if (dwFamily == 0x10) // are we running on a Barcelona (Family 10h) processor?
+ {
+ // check model
+ DWORD dwBaseModel = (dwBuffer[0] & (0xF << 4)) >> 4 ;
+ DWORD dwExtModel = (dwBuffer[0] & (0xF << 16)) >> 16;
+ DWORD dwModel = dwBaseFamily >= 0xF ? (dwExtModel << 4) | dwBaseModel : dwBaseModel;
+
+ switch (dwModel)
+ {
+ case 0x2:
+ // 65nm parts do not benefit from larger Gen0
+ bSkipAMDL3 = TRUE;
+ break;
+
+ case 0x4:
+ default:
+ bSkipAMDL3 = FALSE;
+ }
+ }
+
+ if (!bSkipAMDL3)
+ {
+ // 45nm Greyhound parts (and future parts based on newer northbridge) benefit
+ // from increased gen0 size, taking L3 into account
+ getcpuid(0x80000008, buffer);
+ DWORD dwNumberOfCores = (dwBuffer[2] & (0xFF)) + 1; // NC is in ECX bits 7-0
+
+ DWORD dwL3CacheSize = (size_t)((dwL3CacheBits >> 18) * 512 * 1024); // L3 size in EDX bits 31-18 * 512KB
+ // L3 is shared between cores
+ dwL3CacheSize = dwL3CacheSize / dwNumberOfCores;
+ maxTrueSize += dwL3CacheSize; // due to exclusive caches, add L3 size (possibly zero) to L2
+ // L1 is too small to worry about, so ignore it
+ }
+ }
+
+
+ maxSize = maxTrueSize;
+ }
+ }
+ }
+ }
+ }
+ PAL_EXCEPT_FILTER(DefaultCatchFilter)
+ {
+ }
+ PAL_ENDTRY
+
+ // printf("GetLargestOnDieCacheSize returns %d, adjusted size %d\n", maxSize, maxTrueSize);
+ if (bTrueSize)
+ return maxTrueSize;
+ else
+ return maxSize;
+
+#else
+
+ size_t cache_size = GetLogicalProcessorCacheSizeFromOS() ; // Returns the size of the highest level processor cache
+ return cache_size;
+
+#endif
+}
+
+//---------------------------------------------------------------------
+
+#ifndef FEATURE_PAL
+ThreadLocaleHolder::~ThreadLocaleHolder()
+{
+#ifdef FEATURE_USE_LCID
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostTaskManager *pManager = CorHost2::GetHostTaskManager();
+ if (pManager)
+ {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ pManager->SetLocale(m_locale);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+#endif // FEATURE_USE_LCID
+ {
+ SetThreadLocale(m_locale);
+ }
+}
+
+HMODULE CLRGetModuleHandle(LPCWSTR lpModuleFileName)
+{
+ // Don't use dynamic contract: will override GetLastError value
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ ThreadAffinityHolder affinity;
+
+ HMODULE hMod = WszGetModuleHandle(lpModuleFileName);
+ return hMod;
+}
+
+
+HMODULE CLRGetCurrentModuleHandle()
+{
+ // Don't use dynamic contract: will override GetLastError value
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ ThreadAffinityHolder affinity;
+
+ HMODULE hMod = WszGetModuleHandle(NULL);
+ return hMod;
+}
+
+#ifndef FEATURE_CORECLR
+static ICLRRuntimeInfo *GetCLRRuntime()
+{
+ LIMITED_METHOD_CONTRACT;
+ return g_pCLRRuntime;
+}
+#endif // !FEATURE_CORECLR
+
+#endif // !FEATURE_PAL
+
+extern LPVOID EEHeapAllocInProcessHeap(DWORD dwFlags, SIZE_T dwBytes);
+extern BOOL EEHeapFreeInProcessHeap(DWORD dwFlags, LPVOID lpMem);
+extern void ShutdownRuntimeWithoutExiting(int exitCode);
+extern BOOL IsRuntimeStarted(DWORD *pdwStartupFlags);
+
+void * GetCLRFunction(LPCSTR FunctionName)
+{
+
+ void* func = NULL;
+ BEGIN_ENTRYPOINT_VOIDRET;
+
+ LIMITED_METHOD_CONTRACT;
+
+ if (strcmp(FunctionName, "EEHeapAllocInProcessHeap") == 0)
+ {
+ func = (void*)EEHeapAllocInProcessHeap;
+ }
+ else if (strcmp(FunctionName, "EEHeapFreeInProcessHeap") == 0)
+ {
+ func = (void*)EEHeapFreeInProcessHeap;
+ }
+#ifndef FEATURE_CORECLR
+ else if (strcmp(FunctionName, "GetCLRRuntime") == 0)
+ {
+ func = (void*)GetCLRRuntime;
+ }
+#endif // !FEATURE_CORECLR
+ else if (strcmp(FunctionName, "ShutdownRuntimeWithoutExiting") == 0)
+ {
+ func = (void*)ShutdownRuntimeWithoutExiting;
+ }
+ else if (strcmp(FunctionName, "IsRuntimeStarted") == 0)
+ {
+ func = (void*)IsRuntimeStarted;
+ }
+ else {
+ _ASSERTE ("Unknown function name");
+ func = NULL;
+ }
+ END_ENTRYPOINT_VOIDRET;
+
+ return func;
+}
+
+#endif // CROSSGEN_COMPILE
+
+LPVOID
+CLRMapViewOfFileEx(
+ IN HANDLE hFileMappingObject,
+ IN DWORD dwDesiredAccess,
+ IN DWORD dwFileOffsetHigh,
+ IN DWORD dwFileOffsetLow,
+ IN SIZE_T dwNumberOfBytesToMap,
+ IN LPVOID lpBaseAddress
+ )
+{
+#ifdef _DEBUG
+#ifdef _TARGET_X86_
+
+ char *tmp = new (nothrow) char;
+ if (!tmp)
+ {
+ SetLastError(ERROR_OUTOFMEMORY);
+ return NULL;
+ }
+ delete tmp;
+
+#endif // _TARGET_X86_
+#endif // _DEBUG
+
+ LPVOID pv = MapViewOfFileEx(hFileMappingObject,dwDesiredAccess,dwFileOffsetHigh,dwFileOffsetLow,dwNumberOfBytesToMap,lpBaseAddress);
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostMemoryManager *memoryManager = CorHost2::GetHostMemoryManager();
+ if (pv == NULL && memoryManager)
+ {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ if (SUCCEEDED(memoryManager->NeedsVirtualAddressSpace(lpBaseAddress, dwNumberOfBytesToMap)))
+ {
+ // after host releases VA, let us try again.
+ pv = MapViewOfFileEx(hFileMappingObject,dwDesiredAccess,dwFileOffsetHigh,dwFileOffsetLow,dwNumberOfBytesToMap,lpBaseAddress);
+ }
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ if (!pv)
+ {
+ if(GetLastError()==ERROR_SUCCESS)
+ SetLastError(ERROR_OUTOFMEMORY);
+ return NULL;
+ }
+
+#ifdef _DEBUG
+#ifdef _TARGET_X86_
+ if (pv && g_pConfig && g_pConfig->ShouldInjectFault(INJECTFAULT_MAPVIEWOFFILE))
+ {
+ MEMORY_BASIC_INFORMATION mbi;
+ memset(&mbi, 0, sizeof(mbi));
+ if (!ClrVirtualQuery(pv, &mbi, sizeof(mbi)))
+ {
+ if(GetLastError()==ERROR_SUCCESS)
+ SetLastError(ERROR_OUTOFMEMORY);
+ return NULL;
+ }
+ UnmapViewOfFile(pv);
+ pv = ClrVirtualAlloc(lpBaseAddress, mbi.RegionSize, MEM_RESERVE, PAGE_NOACCESS);
+ }
+ else
+#endif // _TARGET_X86_
+#endif // _DEBUG
+ {
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (memoryManager)
+ {
+ SIZE_T dwNumberOfBytesMapped = 0;
+ // Find out the size of the whole region.
+ LPVOID lpAddr = pv;
+ MEMORY_BASIC_INFORMATION mbi;
+ while (TRUE)
+ {
+ memset(&mbi, 0, sizeof(mbi));
+#undef VirtualQuery
+ if (!::VirtualQuery(lpAddr, &mbi, sizeof(mbi)))
+ {
+ break;
+ }
+#define VirtualQuery(lpAddress, lpBuffer, dwLength) \
+ Dont_Use_VirtualQuery(lpAddress, lpBuffer, dwLength)
+ if (mbi.AllocationBase != pv)
+ {
+ break;
+ }
+ dwNumberOfBytesMapped += mbi.RegionSize;
+ lpAddr = (LPVOID)((BYTE*)lpAddr + mbi.RegionSize);
+ }
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ memoryManager->AcquiredVirtualAddressSpace(pv, dwNumberOfBytesMapped);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ }
+
+ if (!pv && GetLastError()==ERROR_SUCCESS)
+ SetLastError(ERROR_OUTOFMEMORY);
+
+ return pv;
+}
+
+LPVOID
+CLRMapViewOfFile(
+ IN HANDLE hFileMappingObject,
+ IN DWORD dwDesiredAccess,
+ IN DWORD dwFileOffsetHigh,
+ IN DWORD dwFileOffsetLow,
+ IN SIZE_T dwNumberOfBytesToMap
+ )
+{
+ WRAPPER_NO_CONTRACT;
+ return CLRMapViewOfFileEx(hFileMappingObject,dwDesiredAccess,dwFileOffsetHigh,dwFileOffsetLow,dwNumberOfBytesToMap,NULL);
+}
+
+
+BOOL
+CLRUnmapViewOfFile(
+ IN LPVOID lpBaseAddress
+ )
+{
+ STATIC_CONTRACT_ENTRY_POINT;
+
+#ifdef _DEBUG
+#ifdef _TARGET_X86_
+ if (g_pConfig && g_pConfig->ShouldInjectFault(INJECTFAULT_MAPVIEWOFFILE))
+ {
+ return ClrVirtualFree((LPVOID)lpBaseAddress, 0, MEM_RELEASE);
+ }
+ else
+#endif // _TARGET_X86_
+#endif // _DEBUG
+ {
+ BOOL result = UnmapViewOfFile(lpBaseAddress);
+ if (result)
+ {
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostMemoryManager *memoryManager = CorHost2::GetHostMemoryManager();
+ if (memoryManager)
+ {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ memoryManager->ReleasedVirtualAddressSpace(lpBaseAddress);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ }
+ return result;
+ }
+}
+
+
+#ifndef CROSSGEN_COMPILE
+
+static HMODULE CLRLoadLibraryWorker(LPCWSTR lpLibFileName, DWORD *pLastError)
+{
+ // Don't use dynamic contract: will override GetLastError value
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FAULT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ ThreadAffinityHolder affinity;
+ HMODULE hMod;
+ UINT last = SetErrorMode(SEM_NOOPENFILEERRORBOX|SEM_FAILCRITICALERRORS);
+ {
+ INDEBUG(PEDecoder::ForceRelocForDLL(lpLibFileName));
+ hMod = WszLoadLibrary(lpLibFileName);
+ *pLastError = GetLastError();
+ }
+ SetErrorMode(last);
+ return hMod;
+}
+
+HMODULE CLRLoadLibrary(LPCWSTR lpLibFileName)
+{
+ // Don't use dynamic contract: will override GetLastError value
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FAULT;
+
+ DWORD dwLastError = 0;
+ HMODULE hmod = 0;
+
+ // This method should be marked "throws" due to the probe here.
+ STATIC_CONTRACT_VIOLATION(ThrowsViolation);
+
+ BEGIN_SO_TOLERANT_CODE(GetThread());
+ hmod = CLRLoadLibraryWorker(lpLibFileName, &dwLastError);
+ END_SO_TOLERANT_CODE;
+
+ SetLastError(dwLastError);
+ return hmod;
+}
+
+#ifndef FEATURE_PAL
+
+static HMODULE CLRLoadLibraryExWorker(LPCWSTR lpLibFileName, HANDLE hFile, DWORD dwFlags, DWORD *pLastError)
+
+{
+ // Don't use dynamic contract: will override GetLastError value
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FAULT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ ThreadAffinityHolder affinity;
+ HMODULE hMod;
+ UINT last = SetErrorMode(SEM_NOOPENFILEERRORBOX|SEM_FAILCRITICALERRORS);
+ {
+ INDEBUG(PEDecoder::ForceRelocForDLL(lpLibFileName));
+ hMod = WszLoadLibraryEx(lpLibFileName, hFile, dwFlags);
+ *pLastError = GetLastError();
+ }
+ SetErrorMode(last);
+ return hMod;
+}
+
+HMODULE CLRLoadLibraryEx(LPCWSTR lpLibFileName, HANDLE hFile, DWORD dwFlags)
+{
+ // Don't use dynamic contract: will override GetLastError value
+
+ // This will throw in the case of SO
+ //STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FAULT;
+
+ DWORD lastError = ERROR_SUCCESS;
+ HMODULE hmod = NULL;
+
+ BEGIN_SO_TOLERANT_CODE(GetThread());
+ hmod = CLRLoadLibraryExWorker(lpLibFileName, hFile, dwFlags, &lastError);
+ END_SO_TOLERANT_CODE;
+
+ SetLastError(lastError);
+ return hmod;
+}
+
+#endif // !FEATURE_PAL
+
+BOOL CLRFreeLibrary(HMODULE hModule)
+{
+ // Don't use dynamic contract: will override GetLastError value
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ ThreadAffinityHolder affinity;
+ return FreeLibrary(hModule);
+}
+
+VOID CLRFreeLibraryAndExitThread(HMODULE hModule,DWORD dwExitCode)
+{
+ // Don't use dynamic contract: will override GetLastError value
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_FORBID_FAULT;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ ThreadAffinityHolder affinity;
+
+ // This is no-return
+ FreeLibraryAndExitThread(hModule,dwExitCode);
+}
+
+#endif // CROSSGEN_COMPILE
+
+#endif // #ifndef DACCESS_COMPILE
+
+GPTR_IMPL(JITNotification, g_pNotificationTable);
+GVAL_IMPL(ULONG32, g_dacNotificationFlags);
+
+BOOL IsValidMethodCodeNotification(USHORT Notification)
+{
+ // If any bit is on other than that given by a valid combination of flags, no good.
+ if (Notification & ~(
+ CLRDATA_METHNOTIFY_NONE |
+ CLRDATA_METHNOTIFY_GENERATED |
+ CLRDATA_METHNOTIFY_DISCARDED))
+ {
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+JITNotifications::JITNotifications(JITNotification *jitTable)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (jitTable)
+ {
+ // Bookkeeping info is held in the first slot
+ m_jitTable = jitTable + 1;
+ }
+ else
+ {
+ m_jitTable = NULL;
+ }
+}
+
+BOOL JITNotifications::FindItem(TADDR clrModule, mdToken token, UINT *indexOut)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (m_jitTable == NULL)
+ {
+ return FALSE;
+ }
+
+ if (indexOut == NULL)
+ {
+ return FALSE;
+ }
+
+ UINT Length = GetLength();
+ for(UINT i=0; i < Length; i++)
+ {
+ JITNotification *pCurrent = m_jitTable + i;
+ if (!pCurrent->IsFree() &&
+ pCurrent->clrModule == clrModule &&
+ pCurrent->methodToken == token)
+ {
+ *indexOut = i;
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+// if clrModule is NULL, all active notifications are changed to NType
+BOOL JITNotifications::SetAllNotifications(TADDR clrModule,USHORT NType,BOOL *changedOut)
+{
+ if (m_jitTable == NULL)
+ {
+ return FALSE;
+ }
+
+ if (changedOut == NULL)
+ {
+ return FALSE;
+ }
+
+ *changedOut = FALSE;
+
+ UINT Length = GetLength();
+ for(UINT i=0; i < Length; i++)
+ {
+ JITNotification *pCurrent = m_jitTable + i;
+ if (!pCurrent->IsFree() &&
+ ((clrModule == NULL) || (pCurrent->clrModule == clrModule))&&
+ pCurrent->state != NType)
+ {
+ pCurrent->state = NType;
+ *changedOut = TRUE;
+ }
+ }
+
+ if (*changedOut && NType == CLRDATA_METHNOTIFY_NONE)
+ {
+ // Need to recompute length if we removed notifications
+ for (UINT iCurrent=Length; iCurrent > 0; iCurrent--)
+ {
+ JITNotification *pCurrent = m_jitTable + (iCurrent - 1);
+ if (pCurrent->IsFree())
+ {
+ DecrementLength();
+ }
+ }
+ }
+ return TRUE;
+}
+
+BOOL JITNotifications::SetNotification(TADDR clrModule, mdToken token, USHORT NType)
+{
+ UINT iIndex;
+
+ if (!IsActive())
+ {
+ return FALSE;
+ }
+
+ if (clrModule == NULL)
+ {
+ return FALSE;
+ }
+
+ if (NType == CLRDATA_METHNOTIFY_NONE)
+ {
+ // Remove an item if it exists
+ if (FindItem(clrModule, token, &iIndex))
+ {
+ JITNotification *pItem = m_jitTable + iIndex;
+ pItem->SetFree();
+ _ASSERTE(iIndex < GetLength());
+ // Update highest?
+ if (iIndex == (GetLength()-1))
+ {
+ DecrementLength();
+ }
+ }
+ return TRUE;
+ }
+
+ if (FindItem(clrModule, token, &iIndex))
+ {
+ JITNotification *pItem = m_jitTable + iIndex;
+ _ASSERTE(pItem->IsFree() == FALSE);
+ pItem->state = NType;
+ return TRUE;
+ }
+
+ // Find first free item
+ UINT iFirstFree = GetLength();
+ for (UINT i = 0; i < iFirstFree; i++)
+ {
+ JITNotification *pCurrent = m_jitTable + i;
+ if (pCurrent->state == CLRDATA_METHNOTIFY_NONE)
+ {
+ iFirstFree = i;
+ break;
+ }
+ }
+
+ if (iFirstFree == GetLength() &&
+ iFirstFree == GetTableSize())
+ {
+ // No more room
+ return FALSE;
+ }
+
+ JITNotification *pCurrent = m_jitTable + iFirstFree;
+ pCurrent->SetState(clrModule, token, NType);
+ if (iFirstFree == GetLength())
+ {
+ IncrementLength();
+ }
+
+ return TRUE;
+}
+
+UINT JITNotifications::GetLength()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(IsActive());
+
+ if (!IsActive())
+ {
+ return 0;
+ }
+
+ return (UINT) (m_jitTable - 1)->methodToken;
+}
+
+void JITNotifications::IncrementLength()
+{
+ _ASSERTE(IsActive());
+
+ if (!IsActive())
+ {
+ return;
+ }
+
+ UINT *pShort = (UINT *) &((m_jitTable - 1)->methodToken);
+ (*pShort)++;
+}
+
+void JITNotifications::DecrementLength()
+{
+ _ASSERTE(IsActive());
+
+ if (!IsActive())
+ {
+ return;
+ }
+
+ UINT *pShort = (UINT *) &((m_jitTable - 1)->methodToken);
+ (*pShort)--;
+}
+
+UINT JITNotifications::GetTableSize()
+{
+ _ASSERTE(IsActive());
+
+ if (!IsActive())
+ {
+ return 0;
+ }
+
+ return ((UINT) (m_jitTable - 1)->clrModule);
+}
+
+USHORT JITNotifications::Requested(TADDR clrModule, mdToken token)
+{
+ LIMITED_METHOD_CONTRACT;
+ UINT iIndex;
+ if (FindItem(clrModule, token, &iIndex))
+ {
+ JITNotification *pItem = m_jitTable + iIndex;
+ _ASSERTE(pItem->IsFree() == FALSE);
+ return pItem->state;
+ }
+
+ return CLRDATA_METHNOTIFY_NONE;
+}
+
+#ifdef DACCESS_COMPILE
+
+JITNotification *JITNotifications::InitializeNotificationTable(UINT TableSize)
+{
+ // We use the first entry in the table for recordkeeping info.
+
+ JITNotification *retTable = new (nothrow) JITNotification[TableSize+1];
+ if (retTable)
+ {
+ // Set the length
+ UINT *pUint = (UINT *) &(retTable->methodToken);
+ *pUint = 0;
+ // Set the table size
+ pUint = (UINT *) &(retTable->clrModule);
+ *pUint = TableSize;
+ }
+ return retTable;
+}
+
+template <class NotificationClass>
+BOOL UpdateOutOfProcTable(__GlobalPtr<NotificationClass*, DPTR(NotificationClass)> pHostTable, NotificationClass* copyFrom, UINT tableSize)
+{
+
+ ClrSafeInt<ULONG32> allocSize = S_SIZE_T(sizeof(NotificationClass)) * ClrSafeInt<UINT>(tableSize);
+ if (allocSize.IsOverflow())
+ {
+ return FALSE;
+ }
+
+ if (dac_cast<TADDR>(pHostTable) == NULL)
+ {
+ // The table has not been initialized in the target. Allocate space for it and update the pointer
+ // in the target so that we'll use this allocated memory from now on. Note that we never free this
+ // memory, but this isn't a big deal because it's only a single allocation.
+ TADDR Location;
+
+ if (DacAllocVirtual(0, allocSize.Value(),
+ MEM_COMMIT, PAGE_READWRITE, false,
+ &Location) != S_OK)
+ {
+ return FALSE;
+ }
+
+ DPTR(DPTR(NotificationClass)) ppTable = &pHostTable;
+ *ppTable = DPTR(NotificationClass)(Location);
+ if (DacWriteHostInstance(ppTable,false) != S_OK)
+ {
+ return FALSE;
+ }
+ }
+
+ // We store recordkeeping info right before the m_jitTable pointer, that must be written as well.
+ if (DacWriteAll(dac_cast<TADDR>(pHostTable), copyFrom,
+ allocSize.Value(), false) != S_OK)
+ {
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+BOOL JITNotifications::UpdateOutOfProcTable()
+{
+ return ::UpdateOutOfProcTable<JITNotification>(g_pNotificationTable, m_jitTable - 1, GetTableSize() + 1);
+}
+#endif // DACCESS_COMPILE
+
+GPTR_IMPL(GcNotification, g_pGcNotificationTable);
+
+GcNotifications::GcNotifications(GcNotification *gcTable)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (gcTable)
+ {
+ // Bookkeeping info is held in the first slot
+ m_gcTable = gcTable + 1;
+ }
+ else
+ {
+ m_gcTable = NULL;
+ }
+}
+
+BOOL GcNotifications::FindItem(GcEvtArgs ev_, UINT *indexOut)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (m_gcTable == NULL)
+ {
+ return FALSE;
+ }
+
+ if (indexOut == NULL)
+ {
+ return FALSE;
+ }
+
+ UINT length = Length();
+ for (UINT i = 0; i < length; i++)
+ {
+ if (m_gcTable[i].IsMatch(ev_))
+ {
+ *indexOut = i;
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+
+BOOL GcNotifications::SetNotification(GcEvtArgs ev)
+{
+ if (!IsActive())
+ {
+ return FALSE;
+ }
+
+ if (ev.typ < 0 || ev.typ >= GC_EVENT_TYPE_MAX)
+ {
+ return FALSE;
+ }
+
+ // build the "match" event
+ GcEvtArgs evStar = { ev.typ };
+ switch (ev.typ)
+ {
+ case GC_MARK_END:
+ // specify mark event matching all generations
+ evStar.condemnedGeneration = -1;
+ break;
+ default:
+ break;
+ }
+
+ // look for the entry that matches the evStar argument
+ UINT idx;
+ if (!FindItem(evStar, &idx))
+ {
+ // Find first free item
+ UINT iFirstFree = Length();
+ for (UINT i = 0; i < iFirstFree; i++)
+ {
+ GcNotification *pCurrent = m_gcTable + i;
+ if (pCurrent->IsFree())
+ {
+ iFirstFree = i;
+ break;
+ }
+ }
+
+ if (iFirstFree == Length() &&
+ iFirstFree == GetTableSize())
+ {
+ // No more room
+ return FALSE;
+ }
+
+ // guarantee the free cell is zeroed out
+ m_gcTable[iFirstFree].SetFree();
+ idx = iFirstFree;
+ }
+
+ // Now update the state
+ m_gcTable[idx].ev.typ = ev.typ;
+ switch (ev.typ)
+ {
+ case GC_MARK_END:
+ if (ev.condemnedGeneration == 0)
+ {
+ m_gcTable[idx].SetFree();
+ }
+ else
+ {
+ m_gcTable[idx].ev.condemnedGeneration |= ev.condemnedGeneration;
+ }
+ break;
+ default:
+ break;
+ }
+
+ // and if needed, update the array's length
+ if (idx == Length())
+ {
+ IncrementLength();
+ }
+
+ return TRUE;
+}
+
+#ifdef DACCESS_COMPILE
+
+GcNotification *GcNotifications::InitializeNotificationTable(UINT TableSize)
+{
+ // We use the first entry in the table for recordkeeping info.
+
+ GcNotification *retTable = new (nothrow) GcNotification[TableSize+1];
+ if (retTable)
+ {
+ // Set the length
+ UINT *pUint = (UINT *) &(retTable[0].ev.typ);
+ *pUint = 0;
+ // Set the table size
+ ++pUint;
+ *pUint = TableSize;
+ }
+ return retTable;
+}
+
+BOOL GcNotifications::UpdateOutOfProcTable()
+{
+ return ::UpdateOutOfProcTable<GcNotification>(g_pGcNotificationTable, m_gcTable - 1, GetTableSize() + 1);
+}
+#endif // DACCESS_COMPILE
+
+
+void DACNotifyExceptionHelper(TADDR *args,UINT argCount)
+{
+ struct Param
+ {
+ TADDR *args;
+ UINT argCount;
+ } param;
+ param.args = args;
+ param.argCount = argCount;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ if (IsDebuggerPresent() && !CORDebuggerAttached())
+ {
+ RaiseException(CLRDATA_NOTIFY_EXCEPTION, 0, pParam->argCount, (ULONG_PTR *) pParam->args);
+ }
+ }
+ PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
+ {
+ }
+ PAL_ENDTRY
+}
+
+// <TODO> FIX IN BETA 2
+//
+// g_dacNotificationFlags is only modified by the DAC and therefore the
+// optmizer can assume that it will always be its default value and has
+// been seen to eliminate the code in DoModuleLoadNotification,
+// etc... such that DAC notifications are no longer sent.
+//
+// TODO: fix this in Beta 2
+// the RIGHT fix is to make g_dacNotificationFlags volatile, but currently
+// we don't have DAC macros to do that. Additionally, there are a number
+// of other places we should look at DAC definitions to determine if they
+// should be also declared volatile.
+//
+// for now we just turn off optimization for these guys
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable: 4748)
+#pragma optimize("", off)
+#endif // _MSC_VER
+ // called from the runtime
+void DACNotify::DoJITNotification(MethodDesc *MethodDescPtr)
+{
+ WRAPPER_NO_CONTRACT;
+ TADDR Args[2] = { JIT_NOTIFICATION, (TADDR) MethodDescPtr };
+ DACNotifyExceptionHelper(Args,2);
+}
+
+void DACNotify::DoJITDiscardNotification(MethodDesc *MethodDescPtr)
+{
+ TADDR Args[2] = { JIT_DISCARD_NOTIFICATION, (TADDR) MethodDescPtr };
+ DACNotifyExceptionHelper(Args,2);
+}
+
+void DACNotify::DoModuleLoadNotification(Module *ModulePtr)
+{
+ WRAPPER_NO_CONTRACT;
+ if ((g_dacNotificationFlags & CLRDATA_NOTIFY_ON_MODULE_LOAD) != 0)
+ {
+ TADDR Args[2] = { MODULE_LOAD_NOTIFICATION, (TADDR) ModulePtr};
+ DACNotifyExceptionHelper(Args, 2);
+ }
+}
+
+void DACNotify::DoModuleUnloadNotification(Module *ModulePtr)
+{
+ WRAPPER_NO_CONTRACT;
+ if ((g_dacNotificationFlags & CLRDATA_NOTIFY_ON_MODULE_UNLOAD) != 0)
+ {
+ TADDR Args[2] = { MODULE_UNLOAD_NOTIFICATION, (TADDR) ModulePtr};
+ DACNotifyExceptionHelper(Args, 2);
+ }
+}
+
+void DACNotify::DoExceptionNotification(Thread* ThreadPtr)
+{
+ if ((g_dacNotificationFlags & CLRDATA_NOTIFY_ON_EXCEPTION) != 0)
+ {
+ TADDR Args[2] = { EXCEPTION_NOTIFICATION, (TADDR) ThreadPtr};
+ DACNotifyExceptionHelper(Args, 2);
+ }
+}
+
+void DACNotify::DoGCNotification(const GcEvtArgs& args)
+{
+ WRAPPER_NO_CONTRACT;
+ if (args.typ == GC_MARK_END)
+ {
+ TADDR Args[3] = { GC_NOTIFICATION, (TADDR) args.typ, args.condemnedGeneration };
+ DACNotifyExceptionHelper(Args, 3);
+ }
+}
+
+void DACNotify::DoExceptionCatcherEnterNotification(MethodDesc *MethodDescPtr, DWORD nativeOffset)
+{
+ WRAPPER_NO_CONTRACT;
+ if ((g_dacNotificationFlags & CLRDATA_NOTIFY_ON_EXCEPTION_CATCH_ENTER) != 0)
+ {
+ TADDR Args[3] = { CATCH_ENTER_NOTIFICATION, (TADDR) MethodDescPtr, (TADDR)nativeOffset };
+ DACNotifyExceptionHelper(Args, 3);
+ }
+}
+
+#ifdef _MSC_VER
+#pragma optimize("", on)
+#pragma warning(pop)
+#endif // _MSC_VER
+// </TODO>
+
+ // called from the DAC
+int DACNotify::GetType(TADDR Args[])
+{
+ // Type is an enum, and will thus fit into an int.
+ return static_cast<int>(Args[0]);
+}
+
+BOOL DACNotify::ParseJITNotification(TADDR Args[], TADDR& MethodDescPtr)
+{
+ _ASSERTE(Args[0] == JIT_NOTIFICATION);
+ if (Args[0] != JIT_NOTIFICATION)
+ {
+ return FALSE;
+ }
+
+ MethodDescPtr = Args[1];
+
+ return TRUE;
+}
+
+BOOL DACNotify::ParseJITDiscardNotification(TADDR Args[], TADDR& MethodDescPtr)
+{
+ _ASSERTE(Args[0] == JIT_DISCARD_NOTIFICATION);
+ if (Args[0] != JIT_DISCARD_NOTIFICATION)
+ {
+ return FALSE;
+ }
+
+ MethodDescPtr = Args[1];
+
+ return TRUE;
+}
+
+BOOL DACNotify::ParseModuleLoadNotification(TADDR Args[], TADDR& Module)
+{
+ _ASSERTE(Args[0] == MODULE_LOAD_NOTIFICATION);
+ if (Args[0] != MODULE_LOAD_NOTIFICATION)
+ {
+ return FALSE;
+ }
+
+ Module = Args[1];
+
+ return TRUE;
+}
+
+BOOL DACNotify::ParseModuleUnloadNotification(TADDR Args[], TADDR& Module)
+{
+ _ASSERTE(Args[0] == MODULE_UNLOAD_NOTIFICATION);
+ if (Args[0] != MODULE_UNLOAD_NOTIFICATION)
+ {
+ return FALSE;
+ }
+
+ Module = Args[1];
+
+ return TRUE;
+}
+
+BOOL DACNotify::ParseExceptionNotification(TADDR Args[], TADDR& ThreadPtr)
+{
+ _ASSERTE(Args[0] == EXCEPTION_NOTIFICATION);
+ if (Args[0] != EXCEPTION_NOTIFICATION)
+ {
+ return FALSE;
+ }
+
+ ThreadPtr = Args[1];
+
+ return TRUE;
+}
+
+
+BOOL DACNotify::ParseGCNotification(TADDR Args[], GcEvtArgs& args)
+{
+ _ASSERTE(Args[0] == GC_NOTIFICATION);
+ if (Args[0] != GC_NOTIFICATION)
+ {
+ return FALSE;
+ }
+
+ BOOL bRet = FALSE;
+
+ args.typ = (GcEvt_t) Args[1];
+ switch (args.typ)
+ {
+ case GC_MARK_END:
+ {
+ // The condemnedGeneration is an int.
+ args.condemnedGeneration = static_cast<int>(Args[2]);
+ bRet = TRUE;
+ break;
+ }
+ default:
+ bRet = FALSE;
+ break;
+ }
+
+ return bRet;
+}
+
+BOOL DACNotify::ParseExceptionCatcherEnterNotification(TADDR Args[], TADDR& MethodDescPtr, DWORD& nativeOffset)
+{
+ _ASSERTE(Args[0] == CATCH_ENTER_NOTIFICATION);
+ if (Args[0] != CATCH_ENTER_NOTIFICATION)
+ {
+ return FALSE;
+ }
+
+ MethodDescPtr = Args[1];
+ nativeOffset = (DWORD) Args[2];
+ return TRUE;
+}
+
+
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+
+
+#if defined(_DEBUG) && !defined(FEATURE_PAL)
+
+typedef USHORT
+(__stdcall *PFNRtlCaptureStackBackTrace)(
+ IN ULONG FramesToSkip,
+ IN ULONG FramesToCapture,
+ OUT PVOID * BackTrace,
+ OUT PULONG BackTraceHash);
+
+static PFNRtlCaptureStackBackTrace s_RtlCaptureStackBackTrace = NULL;
+
+WORD UtilCaptureStackBackTrace(
+ ULONG FramesToSkip,
+ ULONG FramesToCapture,
+ PVOID * BackTrace,
+ OUT PULONG BackTraceHash)
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifdef _DEBUG
+ Thread* t = GetThread();
+ if (t != NULL) {
+ // the thread should not have a hijack set up or we can't walk the stack.
+ _ASSERTE(!(t->m_State & Thread::TS_Hijacked));
+ }
+#endif
+
+ if(!s_RtlCaptureStackBackTrace)
+ {
+ // Don't need to worry about race conditions here since it will be the same value
+ HMODULE hModNtdll = GetModuleHandleA("ntdll.dll");
+ s_RtlCaptureStackBackTrace = reinterpret_cast<PFNRtlCaptureStackBackTrace>(
+ GetProcAddress(hModNtdll, "RtlCaptureStackBackTrace"));
+ }
+ if (!s_RtlCaptureStackBackTrace) {
+ return 0;
+ }
+ ULONG hash;
+ if (BackTraceHash == NULL) {
+ BackTraceHash = &hash;
+ }
+ return s_RtlCaptureStackBackTrace(FramesToSkip, FramesToCapture, BackTrace, BackTraceHash);
+}
+
+#endif // #if _DEBUG && !FEATURE_PAL
+
+
+#ifdef _DEBUG
+DisableDelayLoadCheckForOleaut32::DisableDelayLoadCheckForOleaut32()
+{
+ GetThread()->SetThreadStateNC(Thread::TSNC_DisableOleaut32Check);
+}
+
+DisableDelayLoadCheckForOleaut32::~DisableDelayLoadCheckForOleaut32()
+{
+ GetThread()->ResetThreadStateNC(Thread::TSNC_DisableOleaut32Check);
+}
+
+BOOL DelayLoadOleaut32CheckDisabled()
+{
+ Thread *pThread = GetThread();
+ if (pThread && pThread->HasThreadStateNC(Thread::TSNC_DisableOleaut32Check))
+ {
+ return TRUE;
+ }
+
+ return FALSE;
+}
+#endif
+
+BOOL EnableARM()
+{
+#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
+ CONTRACTL
+ {
+ NOTHROW;
+ // TODO: this should really be GC_TRIGGERS so we wouldn't need the
+ // CONTRACT_VIOLATION below but the hosting API that calls this
+ // can be called on a COOP thread and it has a GC_NOTRIGGER contract.
+ // We should use the AD unload thread to call this function on.
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ BOOL fARMEnabled = g_fEnableARM;
+
+ if (!fARMEnabled)
+ {
+ if (ThreadStore::s_pThreadStore)
+ {
+ // We need to establish the baselines for the CPU usage counting.
+ Thread *pThread = NULL;
+ CONTRACT_VIOLATION(GCViolation);
+
+ // I am returning TRUE here so the caller will NOT enable
+ // ARM - if we can't take the thread store lock, something
+ // is already kind of messed up so no need to proceed with
+ // enabling ARM.
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), return TRUE);
+ // Take the thread store lock while we enumerate threads.
+ ThreadStoreLockHolder tsl ;
+
+ while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
+ {
+ if (pThread->IsUnstarted() || pThread->IsDead())
+ continue;
+ pThread->QueryThreadProcessorUsage();
+ }
+
+ END_SO_INTOLERANT_CODE;
+ }
+ g_fEnableARM = TRUE;
+ }
+
+ return fARMEnabled;
+#else // FEATURE_APPDOMAIN_RESOURCE_MONITORING
+ return FALSE;
+#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
+}
+
+#endif // !DACCESS_COMPILE && !CROSSGEN_COMPILE
+
+
+static BOOL TrustMeIAmSafe(void *pLock)
+{
+ LIMITED_METHOD_CONTRACT;
+ return TRUE;
+}
+
+LockOwner g_lockTrustMeIAmThreadSafe = { NULL, TrustMeIAmSafe };
+
+
+DangerousNonHostedSpinLock g_randomLock;
+CLRRandom g_random;
+
+
+int GetRandomInt(int maxVal)
+{
+#ifndef CROSSGEN_COMPILE
+ // Use the thread-local Random instance if possible
+ Thread* pThread = GetThread();
+ if (pThread)
+ return pThread->GetRandom()->Next(maxVal);
+#endif
+
+ // No Thread object - need to fall back to the global generator.
+ // In DAC builds we don't need the lock (DAC is single-threaded) and can't get it anyway (DNHSL isn't supported)
+#ifndef DACCESS_COMPILE
+ DangerousNonHostedSpinLockHolder lh(&g_randomLock);
+#endif
+ if (!g_random.IsInitialized())
+ g_random.Init();
+ return g_random.Next(maxVal);
+}
+
+// These wrap the SString:L:CompareCaseInsenstive function in a way that makes it
+// easy to fix code that uses _stricmp. _stricmp should be avoided as it uses the current
+// C-runtime locale rather than the invariance culture.
+//
+// Note that unlike the real _stricmp, these functions unavoidably have a throws/gc_triggers/inject_fault
+// contract. So if need a case-insensitive comparison in a place where you can't tolerate this contract,
+// you've got a problem.
+int __cdecl stricmpUTF8(const char* szStr1, const char* szStr2)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ SString sStr1 (SString::Utf8, szStr1);
+ SString sStr2 (SString::Utf8, szStr2);
+ return sStr1.CompareCaseInsensitive(sStr2);
+
+}
+
+#ifndef DACCESS_COMPILE
+//
+// Casing Table Helpers for use in the EE.
+//
+
+// // Convert szIn to lower case in the Invariant locale.
+INT32 InternalCasingHelper::InvariantToLower(__out_bcount_opt(cMaxBytes) LPUTF8 szOut, int cMaxBytes, __in_z LPCUTF8 szIn)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ } CONTRACTL_END
+
+ return InvariantToLowerHelper(szOut, cMaxBytes, szIn, TRUE /*fAllowThrow*/);
+}
+
+// Convert szIn to lower case in the Invariant locale.
+INT32 InternalCasingHelper::InvariantToLowerNoThrow(__out_bcount_opt(cMaxBytes) LPUTF8 szOut, int cMaxBytes, __in_z LPCUTF8 szIn)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(return 0;);
+ } CONTRACTL_END
+
+
+ return InvariantToLowerHelper(szOut, cMaxBytes, szIn, FALSE /*fAllowThrow*/);
+}
+
+// Convert szIn to lower case in the Invariant locale.
+INT32 InternalCasingHelper::InvariantToLowerHelper(__out_bcount_opt(cMaxBytes) LPUTF8 szOut, int cMaxBytes, __in_z LPCUTF8 szIn, BOOL fAllowThrow)
+{
+
+ CONTRACTL {
+ // This fcn can trigger a lazy load of the TextInfo class.
+ if (fAllowThrow) THROWS; else NOTHROW;
+ if (fAllowThrow) GC_TRIGGERS; else GC_NOTRIGGER;
+ if (fAllowThrow) {INJECT_FAULT(COMPlusThrowOM());} else {INJECT_FAULT(return 0);}
+ MODE_ANY;
+
+ PRECONDITION((cMaxBytes == 0) || CheckPointer(szOut));
+ PRECONDITION(CheckPointer(szIn));
+ } CONTRACTL_END
+
+ int inLength = (int)(strlen(szIn)+1);
+ INT32 result = 0;
+
+ LPCUTF8 szInSave = szIn;
+ LPUTF8 szOutSave = szOut;
+ BOOL bFoundHighChars=FALSE;
+ //Compute our end point.
+ LPCUTF8 szEnd;
+ INT32 wideCopyLen;
+
+ CQuickBytes qbOut;
+ LPWSTR szWideOut;
+
+ if (cMaxBytes != 0 && szOut == NULL) {
+ if (fAllowThrow) {
+ COMPlusThrowHR(ERROR_INVALID_PARAMETER);
+ }
+ SetLastError(ERROR_INVALID_PARAMETER);
+ result = 0;
+ goto Exit;
+ }
+
+ if (cMaxBytes) {
+ szEnd = szOut + min(inLength, cMaxBytes);
+ //Walk the string copying the characters. Change the case on
+ //any character between A-Z.
+ for (; szOut<szEnd; szOut++, szIn++) {
+ if (*szIn>='A' && *szIn<='Z') {
+ *szOut = *szIn | 0x20;
+ }
+ else {
+ if (((UINT32)(*szIn))>((UINT32)0x80)) {
+ bFoundHighChars = TRUE;
+ break;
+ }
+ *szOut = *szIn;
+ }
+ }
+
+ if (!bFoundHighChars) {
+ //If we copied everything, tell them how many bytes we copied,
+ //and arrange it so that the original position of the string + the returned
+ //length gives us the position of the null (useful if we're appending).
+ if (--inLength > cMaxBytes) {
+ if (fAllowThrow) {
+ COMPlusThrowHR(HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER));
+ }
+ SetLastError(ERROR_INSUFFICIENT_BUFFER);
+ result = 0;
+ goto Exit;
+ }
+
+ result = inLength;
+ goto Exit;
+ }
+ }
+ else {
+ szEnd = szIn + inLength;
+ for (; szIn<szEnd; szIn++) {
+ if (((UINT32)(*szIn))>((UINT32)0x80)) {
+ bFoundHighChars = TRUE;
+ break;
+ }
+ }
+
+ if (!bFoundHighChars) {
+ result = inLength;
+ goto Exit;
+ }
+ }
+
+ szOut = szOutSave;
+
+#ifndef FEATURE_PAL
+
+ //convert the UTF8 to Unicode
+ //MAKE_WIDEPTR_FROMUTF8(szInWide, szInSave);
+
+ int __lszInWide;
+ LPWSTR szInWide;
+ __lszInWide = WszMultiByteToWideChar(CP_UTF8, 0, szInSave, -1, 0, 0);
+ if (__lszInWide > MAKE_MAX_LENGTH)
+ RaiseException(EXCEPTION_INT_OVERFLOW, EXCEPTION_NONCONTINUABLE, 0, 0);
+ szInWide = (LPWSTR) alloca(__lszInWide*sizeof(WCHAR));
+ if (szInWide == NULL) {
+ if (fAllowThrow) {
+ COMPlusThrowOM();
+ } else {
+ SetLastError(ERROR_NOT_ENOUGH_MEMORY);
+ result = 0;
+ goto Exit;
+ }
+ }
+ if (0==WszMultiByteToWideChar(CP_UTF8, 0, szInSave, -1, szInWide, __lszInWide)) {
+ RaiseException(ERROR_NO_UNICODE_TRANSLATION, EXCEPTION_NONCONTINUABLE, 0, 0);
+ }
+
+
+ wideCopyLen = (INT32)wcslen(szInWide)+1;
+ if (fAllowThrow) {
+ szWideOut = (LPWSTR)qbOut.AllocThrows(wideCopyLen * sizeof(WCHAR));
+ }
+ else {
+ szWideOut = (LPWSTR)qbOut.AllocNoThrow(wideCopyLen * sizeof(WCHAR));
+ if (!szWideOut) {
+ SetLastError(ERROR_NOT_ENOUGH_MEMORY);
+ result = 0;
+ goto Exit;
+ }
+ }
+
+ //Do the casing operation
+ NewApis::LCMapStringEx(W(""), LCMAP_LOWERCASE, szInWide, wideCopyLen, szWideOut, wideCopyLen, NULL, NULL, 0);
+
+ //Convert the Unicode back to UTF8
+ result = WszWideCharToMultiByte(CP_UTF8, 0, szWideOut, wideCopyLen, szOut, cMaxBytes, NULL, NULL);
+
+ if ((result == 0) && fAllowThrow) {
+ COMPlusThrowWin32();
+ }
+
+#endif // !FEATURE_PAL
+
+Exit:
+ return result;
+}
+
+//
+//
+// COMCharacter and Helper functions
+//
+//
+
+#ifndef FEATURE_PAL
+/*============================GetCharacterInfoHelper============================
+**Determines character type info (digit, whitespace, etc) for the given char.
+**Args: c is the character on which to operate.
+** CharInfoType is one of CT_CTYPE1, CT_CTYPE2, CT_CTYPE3 and specifies the type
+** of information being requested.
+**Returns: The bitmask returned by GetStringTypeEx. The caller needs to know
+** how to interpret this.
+**Exceptions: ArgumentException if GetStringTypeEx fails.
+==============================================================================*/
+INT32 GetCharacterInfoHelper(WCHAR c, INT32 CharInfoType)
+{
+ WRAPPER_NO_CONTRACT;
+
+ unsigned short result=0;
+ if (!GetStringTypeEx(LOCALE_USER_DEFAULT, CharInfoType, &(c), 1, &result)) {
+ _ASSERTE(!"This should not happen, verify the arguments passed to GetStringTypeEx()");
+ }
+ return(INT32)result;
+}
+#endif // !FEATURE_PAL
+
+/*==============================nativeIsWhiteSpace==============================
+**The locally available version of IsWhiteSpace. Designed to be called by other
+**native methods. The work is mostly done by GetCharacterInfoHelper
+**Args: c -- the character to check.
+**Returns: true if c is whitespace, false otherwise.
+**Exceptions: Only those thrown by GetCharacterInfoHelper.
+==============================================================================*/
+BOOL COMCharacter::nativeIsWhiteSpace(WCHAR c)
+{
+ WRAPPER_NO_CONTRACT;
+
+#ifndef FEATURE_PAL
+ if (c <= (WCHAR) 0x7F) // common case
+ {
+ BOOL result = (c == ' ') || (c == '\r') || (c == '\n') || (c == '\t') || (c == '\f') || (c == (WCHAR) 0x0B);
+
+ ASSERT(result == ((GetCharacterInfoHelper(c, CT_CTYPE1) & C1_SPACE)!=0));
+
+ return result;
+ }
+
+ // GetCharacterInfoHelper costs around 160 instructions
+ return((GetCharacterInfoHelper(c, CT_CTYPE1) & C1_SPACE)!=0);
+#else // !FEATURE_PAL
+ return iswspace(c);
+#endif // !FEATURE_PAL
+}
+
+/*================================nativeIsDigit=================================
+**The locally available version of IsDigit. Designed to be called by other
+**native methods. The work is mostly done by GetCharacterInfoHelper
+**Args: c -- the character to check.
+**Returns: true if c is whitespace, false otherwise.
+**Exceptions: Only those thrown by GetCharacterInfoHelper.
+==============================================================================*/
+BOOL COMCharacter::nativeIsDigit(WCHAR c)
+{
+ WRAPPER_NO_CONTRACT;
+#ifndef FEATURE_PAL
+ return((GetCharacterInfoHelper(c, CT_CTYPE1) & C1_DIGIT)!=0);
+#else // !FEATURE_PAL
+ return iswdigit(c);
+#endif // !FEATURE_PAL
+}
+
+BOOL RuntimeFileNotFound(HRESULT hr)
+{
+ LIMITED_METHOD_CONTRACT;
+ return Assembly::FileNotFound(hr);
+}
+
+#ifndef FEATURE_PAL
+HRESULT GetFileVersion( // S_OK or error
+ LPCWSTR wszFilePath, // Path to the executable.
+ ULARGE_INTEGER* pFileVersion) // Put file version here.
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ //
+ // Note that this code is equivalent to FusionGetFileVersionInfo, found in fusion\asmcache\asmcache.cpp
+ //
+
+ // Avoid confusion.
+ pFileVersion->QuadPart = 0;
+
+ DWORD ret;
+
+ DWORD dwHandle = 0;
+ DWORD bufSize = GetFileVersionInfoSizeW(wszFilePath, &dwHandle);
+ if (!bufSize)
+ {
+ return HRESULT_FROM_GetLastErrorNA();
+ }
+
+ // Allocate the buffer for the version info structure
+ // _alloca() can't return NULL -- raises STATUS_STACK_OVERFLOW.
+ BYTE* pVersionInfoBuffer = reinterpret_cast< BYTE* >(_alloca(bufSize));
+
+ ret = GetFileVersionInfoW(wszFilePath, dwHandle, bufSize, pVersionInfoBuffer);
+ if (!ret)
+ {
+ return HRESULT_FROM_GetLastErrorNA();
+ }
+
+ // Extract the actual File Version number that we care about.
+ UINT versionInfoSize = 0;
+ VS_FIXEDFILEINFO* pVSFileInfo;
+ ret = VerQueryValueW(pVersionInfoBuffer, W("\\"),
+ reinterpret_cast< void **>(&pVSFileInfo), &versionInfoSize);
+ if (!ret || versionInfoSize == 0)
+ {
+ return HRESULT_FROM_GetLastErrorNA();
+ }
+
+ pFileVersion->HighPart = pVSFileInfo->dwFileVersionMS;
+ pFileVersion->LowPart = pVSFileInfo->dwFileVersionLS;
+
+ return S_OK;
+}
+#endif // !FEATURE_PAL
+
+#endif // !DACCESS_COMPILE
diff --git a/src/vm/util.hpp b/src/vm/util.hpp
new file mode 100644
index 0000000000..b6f3adb131
--- /dev/null
+++ b/src/vm/util.hpp
@@ -0,0 +1,1372 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// util.hpp
+//
+
+//
+// Miscellaneous useful functions
+//
+
+#ifndef _H_UTIL
+#define _H_UTIL
+
+#define MAX_UINT32_HEX_CHAR_LEN 8 // max number of chars representing an unsigned int32, not including terminating null char.
+#define MAX_INT32_DECIMAL_CHAR_LEN 11 // max number of chars representing an int32, including sign, not including terminating null char.
+
+#include "utilcode.h"
+#include "metadata.h"
+#include "holderinst.h"
+#include "clrdata.h"
+#include "xclrdata.h"
+#include "posterror.h"
+#include "clr_std/type_traits"
+
+
+// Prevent the use of UtilMessageBox and WszMessageBox from inside the EE.
+#ifndef CLR_STANDALONE_BINDER
+#undef UtilMessageBoxCatastrophic
+#undef UtilMessageBoxCatastrophicNonLocalized
+#undef UtilMessageBoxCatastrophic
+#undef UtilMessageBoxCatastrophicNonLocalizedVA
+#undef UtilMessageBox
+#undef UtilMessageBoxNonLocalized
+#undef UtilMessageBoxVA
+#undef UtilMessageBoxNonLocalizedVA
+#undef WszMessageBox
+#define UtilMessageBoxCatastrophic __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
+#define UtilMessageBoxCatastrophicNonLocalized __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
+#define UtilMessageBoxCatastrophicVA __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
+#define UtilMessageBoxCatastrophicNonLocalizedVA __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
+#define UtilMessageBox __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
+#define UtilMessageBoxNonLocalized __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
+#define UtilMessageBoxVA __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
+#define UtilMessageBoxNonLocalizedVA __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
+#define WszMessageBox __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
+#endif
+
+//========================================================================
+// More convenient names for integer types of a guaranteed size.
+//========================================================================
+
+typedef __int8 I1;
+typedef ArrayDPTR(I1) PTR_I1;
+typedef unsigned __int8 U1;
+typedef __int16 I2;
+typedef unsigned __int16 U2;
+typedef __int32 I4;
+typedef unsigned __int32 U4;
+typedef __int64 I8;
+typedef unsigned __int64 U8;
+typedef float R4;
+typedef double R8;
+
+//
+// Forward the FastInterlock methods to the matching Win32 APIs. They are implemented
+// using compiler intrinsics so they are as fast as they can possibly be.
+//
+
+//
+// these don't have corresponding compiler intrinsics
+//
+
+#ifdef FEATURE_SINGLE_THREADED
+#define FastInterlockIncrement(a) (++(*a))
+#define FastInterlockDecrement(a) (--(*a))
+#define FastInterlockOr(a, b) (*a |= (DWORD)b)
+#define FastInterlockAnd(a, b) (*a &= (DWORD)b)
+#define FastInterlockIncrementLong(a) (++(*a))
+#define FastInterlockDecrementLong(a) (--(*a))
+#define FastInterlockOrLong(a, b) (*a |= (UINT64)b)
+#define FastInterlockAndLong(a, b) (*a &= (UINT64)b)
+#define FastInterlockCompareExchange InterlockedCompareExchange
+#define FastInterlockCompareExchangePointer InterlockedCompareExchangeT
+
+#else
+
+//
+// these DO have corresponding compiler intrinsics
+//
+#define FastInterlockIncrement InterlockedIncrement
+#define FastInterlockDecrement InterlockedDecrement
+#define FastInterlockExchange InterlockedExchange
+#define FastInterlockCompareExchange InterlockedCompareExchange
+#define FastInterlockExchangeAdd InterlockedExchangeAdd
+#define FastInterlockExchangeLong InterlockedExchange64
+#define FastInterlockCompareExchangeLong InterlockedCompareExchange64
+#define FastInterlockExchangeAddLong InterlockedExchangeAdd64
+
+//
+// Forward FastInterlock[Compare]ExchangePointer to the
+// Utilcode Interlocked[Compare]ExchangeT.
+//
+#define FastInterlockExchangePointer InterlockedExchangeT
+#define FastInterlockCompareExchangePointer InterlockedCompareExchangeT
+
+FORCEINLINE void FastInterlockOr(DWORD RAW_KEYWORD(volatile) *p, const int msk)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ InterlockedOr((LONG *)p, msk);
+}
+
+FORCEINLINE void FastInterlockAnd(DWORD RAW_KEYWORD(volatile) *p, const int msk)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ InterlockedAnd((LONG *)p, msk);
+}
+
+#endif
+
+#ifndef FEATURE_PAL
+// Copied from malloc.h: don't want to bring in the whole header file.
+void * __cdecl _alloca(size_t);
+#endif // !FEATURE_PAL
+
+#ifdef _PREFAST_
+// Suppress prefast warning #6255: alloca indicates failure by raising a stack overflow exception
+#pragma warning(disable:6255)
+#endif // _PREFAST_
+
+// Function to parse apart a command line and return the
+// arguments just like argv and argc
+LPWSTR* CommandLineToArgvW(__in LPWSTR lpCmdLine, DWORD *pNumArgs);
+#define ISWWHITE(x) ((x)==W(' ') || (x)==W('\t') || (x)==W('\n') || (x)==W('\r') )
+
+BOOL inline FitsInI1(__int64 val)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return val == (__int64)(__int8)val;
+}
+
+BOOL inline FitsInI2(__int64 val)
+{
+ LIMITED_METHOD_CONTRACT;
+ return val == (__int64)(__int16)val;
+}
+
+BOOL inline FitsInI4(__int64 val)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return val == (__int64)(__int32)val;
+}
+
+BOOL inline FitsInU1(unsigned __int64 val)
+{
+ LIMITED_METHOD_CONTRACT;
+ return val == (unsigned __int64)(unsigned __int8)val;
+}
+
+BOOL inline FitsInU2(unsigned __int64 val)
+{
+ LIMITED_METHOD_CONTRACT;
+ return val == (unsigned __int64)(unsigned __int16)val;
+}
+
+BOOL inline FitsInU4(unsigned __int64 val)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return val == (unsigned __int64)(unsigned __int32)val;
+}
+
+// returns FALSE if overflows 15 bits: otherwise, (*pa) is incremented by b
+BOOL inline SafeAddUINT15(UINT16 *pa, ULONG b)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ UINT16 a = *pa;
+ // first check if overflows 16 bits
+ if ( ((UINT16)b) != b )
+ {
+ return FALSE;
+ }
+ // now make sure that doesn't overflow 15 bits
+ if (((ULONG)a + b) > 0x00007FFF)
+ {
+ return FALSE;
+ }
+ (*pa) += (UINT16)b;
+ return TRUE;
+}
+
+
+// returns FALSE if overflows 16 bits: otherwise, (*pa) is incremented by b
+BOOL inline SafeAddUINT16(UINT16 *pa, ULONG b)
+{
+ UINT16 a = *pa;
+ if ( ((UINT16)b) != b )
+ {
+ return FALSE;
+ }
+ // now make sure that doesn't overflow 16 bits
+ if (((ULONG)a + b) > 0x0000FFFF)
+ {
+ return FALSE;
+ }
+ (*pa) += (UINT16)b;
+ return TRUE;
+}
+
+
+// returns FALSE if overflow: otherwise, (*pa) is incremented by b
+BOOL inline SafeAddUINT32(UINT32 *pa, UINT32 b)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ UINT32 a = *pa;
+ if ( ((UINT32)(a + b)) < a)
+ {
+ return FALSE;
+ }
+ (*pa) += b;
+ return TRUE;
+}
+
+// returns FALSE if overflow: otherwise, (*pa) is incremented by b
+BOOL inline SafeAddULONG(ULONG *pa, ULONG b)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ ULONG a = *pa;
+ if ( ((ULONG)(a + b)) < a)
+ {
+ return FALSE;
+ }
+ (*pa) += b;
+ return TRUE;
+}
+
+// returns FALSE if overflow: otherwise, (*pa) is multiplied by b
+BOOL inline SafeMulSIZE_T(SIZE_T *pa, SIZE_T b)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef _DEBUG_IMPL
+ {
+ //Make sure SIZE_T is unsigned
+ SIZE_T m = ((SIZE_T)(-1));
+ SIZE_T z = 0;
+ _ASSERTE(m > z);
+ }
+#endif
+
+
+ SIZE_T a = *pa;
+ const SIZE_T m = ((SIZE_T)(-1));
+ if ( (m / b) < a )
+ {
+ return FALSE;
+ }
+ (*pa) *= b;
+ return TRUE;
+}
+
+
+
+//************************************************************************
+// CQuickHeap
+//
+// A fast non-multithread-safe heap for short term use.
+// Destroying the heap frees all blocks allocated from the heap.
+// Blocks cannot be freed individually.
+//
+// The heap uses COM+ exceptions to report errors.
+//
+// The heap does not use any internal synchronization so it is not
+// multithreadsafe.
+//************************************************************************
+class CQuickHeap
+{
+ public:
+ CQuickHeap();
+ ~CQuickHeap();
+
+ //---------------------------------------------------------------
+ // Allocates a block of "sz" bytes. If there's not enough
+ // memory, throws an OutOfMemoryError.
+ //---------------------------------------------------------------
+ LPVOID Alloc(UINT sz);
+
+
+ private:
+ enum {
+#ifdef _DEBUG
+ kBlockSize = 24
+#else
+ kBlockSize = 1024
+#endif
+ };
+
+ // The QuickHeap allocates QuickBlock's as needed and chains
+ // them in a single-linked list. Most QuickBlocks have a size
+ // of kBlockSize bytes (not counting m_next), and individual
+ // allocation requests are suballocated from them.
+ // Allocation requests of greater than kBlockSize are satisfied
+ // by allocating a special big QuickBlock of the right size.
+ struct QuickBlock
+ {
+ QuickBlock *m_next;
+ BYTE m_bytes[1];
+ };
+
+
+ // Linked list of QuickBlock's.
+ QuickBlock *m_pFirstQuickBlock;
+
+ // Offset to next available byte in m_pFirstQuickBlock.
+ LPBYTE m_pNextFree;
+
+ // Linked list of big QuickBlock's
+ QuickBlock *m_pFirstBigQuickBlock;
+
+};
+
+//======================================================================
+// String Helpers
+//
+//
+//
+ULONG StringHashValueW(__in LPWSTR wzString);
+ULONG StringHashValueA(LPCSTR szString);
+
+void PrintToStdOutA(const char *pszString);
+void PrintToStdOutW(const WCHAR *pwzString);
+void PrintToStdErrA(const char *pszString);
+void PrintToStdErrW(const WCHAR *pwzString);
+void NPrintToStdOutA(const char *pszString, size_t nbytes);
+void NPrintToStdOutW(const WCHAR *pwzString, size_t nchars);
+void NPrintToStdErrA(const char *pszString, size_t nbytes);
+void NPrintToStdErrW(const WCHAR *pwzString, size_t nchars);
+
+
+//=====================================================================
+// Function for formatted text output to the debugger
+//
+//
+void __cdecl VMDebugOutputA(__in LPSTR format, ...);
+void __cdecl VMDebugOutputW(__in LPWSTR format, ...);
+
+//=====================================================================
+// VM-safe wrapper for PostError.
+//
+HRESULT VMPostError( // Returned error.
+ HRESULT hrRpt, // Reported error.
+ ...); // Error arguments.
+
+//=====================================================================
+// Displays the messaage box or logs the message, corresponding to the last COM+ error occured
+void VMDumpCOMErrors(HRESULT hrErr);
+HRESULT LoadMscorsn();
+
+#include "nativevaraccessors.h"
+
+#ifndef FEATURE_PAL
+
+HRESULT WszSHGetFolderPath(HWND hwndOwner, int nFolder, HANDLE hToken, DWORD dwFlags, size_t cchPath, __out_ecount(MAX_PATH) LPWSTR pszwPath);
+HRESULT WszShellExecute(HWND hwnd, LPCTSTR lpOperation, LPCTSTR lpFile, LPCTSTR lpParameters, LPCTSTR lpDirectory, INT nShowCmd);
+
+#ifndef DACCESS_COMPILE
+#include "shellapi.h"
+HRESULT WszShellExecuteEx(LPSHELLEXECUTEINFO lpExecInfo);
+#endif // #ifndef DACCESS_COMPILE
+
+#endif // !FEATURE_PAL
+
+BOOL GetUserDir(__out_ecount(bufferCount) WCHAR * buffer, size_t bufferCount, BOOL fRoaming);
+BOOL GetInternetCacheDir(__out_ecount(bufferCount) WCHAR * buffer, size_t bufferCount );
+
+HRESULT GetUserSidString (HANDLE hToken, __deref_out LPWSTR *wszSid);
+BOOL IsUserProfileLoaded();
+
+//======================================================================
+// Stack friendly registry helpers
+//
+LONG UtilRegEnumKey(HKEY hKey, // handle to key to query
+ DWORD dwIndex, // index of subkey to query
+ CQuickWSTR* lpName);// buffer for subkey name
+
+LONG UtilRegQueryStringValueEx(HKEY hKey, // handle to key to query
+ LPCWSTR lpValueName, // address of name of value to query
+ LPDWORD lpReserved, // reserved
+ LPDWORD lpType, // address of buffer for value type
+ CQuickWSTR* lpData);// data buffer
+
+//======================================================================
+// Event logging
+
+BOOL ReportEventCLR (
+ IN WORD wType, // Event type - warning, error, success, etc
+ IN WORD wCategory, // Event category
+ IN DWORD dwEventID, // Event identifier (defined in shimr\msg.mc)
+ IN PSID lpUserSid, // user's security identifier
+ IN SString * message // message to log
+ );
+
+// --------------------------------------------------------------------------------
+// GCX macros
+//
+// These are the normal way to change or assert the GC mode of a thread. They handle
+// the required stack discipline in mode switches with an autodestructor which
+// automatically triggers on leaving the current scope.
+//
+// Usage:
+// GCX_COOP(); Switch to cooperative mode, assume thread is setup
+// GCX_PREEMP(); Switch to preemptive mode, NOP if no thread setup
+// GCX_COOP_THREAD_EXISTS(Thread*); Fast switch to cooperative mode, must pass non-null Thread
+// GCX_PREEMP_THREAD_EXISTS(Thread*); Fast switch to preemptive mode, must pass non-null Thread
+//
+// (There is an intentional asymmetry between GCX_COOP and GCX_PREEMP. GCX_COOP
+// asserts if you call it without having a Thread setup. GCX_PREEMP becomes a NOP.
+// This is because all unmanaged threads are effectively preemp.)
+//
+// (There is actually one more case here - an "EE worker thread" such as the debugger
+// thread or GC thread, which we don't want to call SetupThread() on, but which is
+// effectively in cooperative mode due to explicit cooperation with the collector.
+// This case is not handled by these macros; the current working assumption is that
+// such threads never use them. But at some point we may have to consider
+// this case if there is utility code which is called from those threads.)
+//
+// GCX_MAYBE_*(BOOL); Same as above, but only do the switch if BOOL is TRUE.
+//
+// GCX_ASSERT_*(); Same as above, but assert mode rather than switch to mode.
+// Note that assert is applied during backout as well.
+// No overhead in a free build.
+//
+// GCX_FORBID(); Add "ForbidGC" semantics to a cooperative mode situation.
+// Asserts that the thread will not trigger a GC or
+// reach a GC-safe point, or call anything that might
+// do one of these things.
+//
+// GCX_NOTRIGGER(); "ForbidGC" without the automatic assertion for coop mode.
+//
+// --------------------------------------------------------------------------------
+
+template<BOOL COOPERATIVE>
+class AutoCleanupGCAssert;
+
+template<BOOL COOPERATIVE>
+class GCAssert;
+
+
+typedef AutoCleanupGCAssert<TRUE> AutoCleanupGCAssertCoop;
+typedef AutoCleanupGCAssert<FALSE> AutoCleanupGCAssertPreemp;
+
+typedef GCAssert<TRUE> GCAssertCoop;
+typedef GCAssert<FALSE> GCAssertPreemp;
+
+#if !defined(CROSSGEN_COMPILE) && !defined(DACCESS_COMPILE)
+
+#ifdef ENABLE_CONTRACTS_IMPL
+#define GCX_COOP() GCCoop __gcHolder("GCX_COOP", __FUNCTION__, __FILE__, __LINE__)
+#define GCX_COOP_NO_DTOR() GCCoopNoDtor __gcHolder; __gcHolder.Enter(TRUE, "GCX_COOP_NO_DTOR", __FUNCTION__, __FILE__, __LINE__)
+#define GCX_COOP_NO_DTOR_END() __gcHolder.Leave();
+#else
+#define GCX_COOP() GCCoop __gcHolder
+#define GCX_COOP_NO_DTOR() GCCoopNoDtor __gcHolder; __gcHolder.Enter(TRUE)
+#define GCX_COOP_NO_DTOR_END() __gcHolder.Leave();
+#endif
+
+#ifdef ENABLE_CONTRACTS_IMPL
+#define GCX_PREEMP() GCPreemp __gcHolder("GCX_PREEMP", __FUNCTION__, __FILE__, __LINE__)
+#define GCX_PREEMP_NO_DTOR() GCPreempNoDtor __gcHolder; __gcHolder.Enter(TRUE, "GCX_PREEMP_NO_DTOR", __FUNCTION__, __FILE__, __LINE__)
+#define GCX_PREEMP_NO_DTOR_HAVE_THREAD(curThreadNullOk) GCPreempNoDtor __gcHolder; __gcHolder.Enter(curThreadNullOk, TRUE, "GCX_PREEMP_NO_DTOR_HAVE_THREAD", __FUNCTION__, __FILE__, __LINE__)
+#define GCX_PREEMP_NO_DTOR_END() __gcHolder.Leave();
+#else
+#define GCX_PREEMP() GCPreemp __gcHolder
+#define GCX_PREEMP_NO_DTOR_HAVE_THREAD(curThreadNullOk) GCPreempNoDtor __gcHolder; __gcHolder.Enter(curThreadNullOk, TRUE)
+#define GCX_PREEMP_NO_DTOR() GCPreempNoDtor __gcHolder; __gcHolder.Enter(TRUE)
+#define GCX_PREEMP_NO_DTOR_END() __gcHolder.Leave()
+#endif
+
+#ifdef ENABLE_CONTRACTS_IMPL
+#define GCX_COOP_THREAD_EXISTS(curThread) GCCoopThreadExists __gcHolder((curThread), "GCX_COOP_THREAD_EXISTS", __FUNCTION__, __FILE__, __LINE__)
+#else
+#define GCX_COOP_THREAD_EXISTS(curThread) GCCoopThreadExists __gcHolder((curThread))
+#endif
+
+#ifdef ENABLE_CONTRACTS_IMPL
+#define GCX_PREEMP_THREAD_EXISTS(curThread) GCPreempThreadExists __gcHolder((curThread), "GCX_PREEMP_THREAD_EXISTS", __FUNCTION__, __FILE__, __LINE__)
+#else
+#define GCX_PREEMP_THREAD_EXISTS(curThread) GCPreempThreadExists __gcHolder((curThread))
+#endif
+
+#ifdef ENABLE_CONTRACTS_IMPL
+#define GCX_MAYBE_COOP(_cond) GCCoop __gcHolder(_cond, "GCX_MAYBE_COOP", __FUNCTION__, __FILE__, __LINE__)
+#define GCX_MAYBE_COOP_NO_DTOR(_cond) GCCoopNoDtor __gcHolder; __gcHolder.Enter(_cond, "GCX_MAYBE_COOP_NO_DTOR", __FUNCTION__, __FILE__, __LINE__)
+#define GCX_MAYBE_COOP_NO_DTOR_END() __gcHolder.Leave();
+#else
+#define GCX_MAYBE_COOP(_cond) GCCoop __gcHolder(_cond)
+#define GCX_MAYBE_COOP_NO_DTOR(_cond) GCCoopNoDtor __gcHolder; __gcHolder.Enter(_cond)
+#define GCX_MAYBE_COOP_NO_DTOR_END() __gcHolder.Leave();
+#endif
+
+#ifdef ENABLE_CONTRACTS_IMPL
+#define GCX_MAYBE_PREEMP(_cond) GCPreemp __gcHolder(_cond, "GCX_MAYBE_PREEMP", __FUNCTION__, __FILE__, __LINE__)
+#define GCX_MAYBE_PREEMP_NO_DTOR(_cond) GCPreempNoDtor __gcHolder; __gcHolder.Enter(_cond, "GCX_MAYBE_PREEMP_NO_DTOR", __FUNCTION__, __FILE__, __LINE__)
+#define GCX_MAYBE_PREEMP_NO_DTOR_END() __gcHolder.Leave();
+#else
+#define GCX_MAYBE_PREEMP(_cond) GCPreemp __gcHolder(_cond)
+#define GCX_MAYBE_PREEMP_NO_DTOR(_cond) GCPreempNoDtor __gcHolder; __gcHolder.Enter(_cond)
+#define GCX_MAYBE_PREEMP_NO_DTOR_END() __gcHolder.Leave()
+#endif
+
+#ifdef ENABLE_CONTRACTS_IMPL
+#define GCX_MAYBE_COOP_THREAD_EXISTS(curThread, _cond) GCCoopThreadExists __gcHolder((curThread), (_cond), "GCX_MAYBE_COOP_THREAD_EXISTS", __FUNCTION__, __FILE__, __LINE__)
+#else
+#define GCX_MAYBE_COOP_THREAD_EXISTS(curThread, _cond) GCCoopThreadExists __gcHolder((curThread), (_cond))
+#endif
+
+#ifdef ENABLE_CONTRACTS_IMPL
+#define GCX_MAYBE_PREEMP_THREAD_EXISTS(curThread, _cond) GCPreempThreadExists __gcHolder((curThread), (_cond), "GCX_MAYBE_PREEMP_THREAD_EXISTS", __FUNCTION__, __FILE__, __LINE__)
+#else
+#define GCX_MAYBE_PREEMP_THREAD_EXISTS(curThread, _cond) GCPreempThreadExists __gcHolder((curThread), (_cond))
+#endif
+
+// This has a potential race with the GC thread. It is currently
+// used for a few cases where (a) we potentially haven't started up the EE yet, or
+// (b) we are on a "special thread".
+#ifdef ENABLE_CONTRACTS_IMPL
+#define GCX_COOP_NO_THREAD_BROKEN() GCCoopHackNoThread __gcHolder("GCX_COOP_NO_THREAD_BROKEN", __FUNCTION__, __FILE__, __LINE__)
+#else
+#define GCX_COOP_NO_THREAD_BROKEN() GCCoopHackNoThread __gcHolder
+#endif
+
+
+#ifdef ENABLE_CONTRACTS_IMPL
+#define GCX_MAYBE_COOP_NO_THREAD_BROKEN(_cond) GCCoopHackNoThread __gcHolder(_cond, "GCX_MAYBE_COOP_NO_THREAD_BROKEN", __FUNCTION__, __FILE__, __LINE__)
+#else
+#define GCX_MAYBE_COOP_NO_THREAD_BROKEN(_cond) GCCoopHackNoThread __gcHolder(_cond)
+#endif
+
+#else // !defined(CROSSGEN_COMPILE) && !defined(DACCESS_COMPILE)
+
+#define GCX_COOP()
+#define GCX_COOP_NO_DTOR()
+#define GCX_COOP_NO_DTOR_END()
+
+#define GCX_PREEMP()
+#define GCX_PREEMP_NO_DTOR()
+#define GCX_PREEMP_NO_DTOR_HAVE_THREAD(curThreadNullOk)
+#define GCX_PREEMP_NO_DTOR_END()
+
+#define GCX_MAYBE_PREEMP(_cond)
+
+#define GCX_COOP_NO_THREAD_BROKEN()
+#define GCX_MAYBE_COOP_NO_THREAD_BROKEN(_cond)
+
+#define GCX_PREEMP_THREAD_EXISTS(curThread)
+#define GCX_COOP_THREAD_EXISTS(curThread)
+
+#define GCX_POP()
+
+#endif // !defined(CROSSGEN_COMPILE) && !defined(DACCESS_COMPILE)
+
+#if defined(_DEBUG_IMPL) && !defined(CROSSGEN_COMPILE)
+
+#define GCX_ASSERT_PREEMP() ::AutoCleanupGCAssertPreemp __gcHolder
+#define GCX_ASSERT_COOP() ::AutoCleanupGCAssertCoop __gcHolder
+
+#define BEGIN_GCX_ASSERT_COOP \
+ { \
+ GCAssertCoop __gcHolder; \
+ __gcHolder.BeginGCAssert()
+
+#define END_GCX_ASSERT_COOP \
+ __gcHolder.EndGCAssert(); \
+ }
+
+#define BEGIN_GCX_ASSERT_PREEMP \
+ { \
+ GCAssertPreemp __gcHolder; \
+ __gcHolder.BeginGCAssert()
+
+#define END_GCX_ASSERT_PREEMP \
+ __gcHolder.EndGCAssert(); \
+ }
+
+
+#else
+
+#define GCX_ASSERT_PREEMP()
+#define GCX_ASSERT_COOP()
+
+#define BEGIN_GCX_ASSERT_COOP \
+ {
+#define END_GCX_ASSERT_COOP \
+ }
+#define BEGIN_GCX_ASSERT_PREEMP \
+ {
+#define END_GCX_ASSERT_PREEMP \
+ }
+
+#endif
+
+#ifdef ENABLE_CONTRACTS_IMPL
+
+#define GCX_FORBID() ::GCForbid __gcForbidHolder(__FUNCTION__, __FILE__, __LINE__)
+#define GCX_NOTRIGGER() ::GCNoTrigger __gcNoTriggerHolder(__FUNCTION__, __FILE__, __LINE__)
+
+#define GCX_MAYBE_FORBID(fConditional) ::GCForbid __gcForbidHolder(fConditional, __FUNCTION__, __FILE__, __LINE__)
+#define GCX_MAYBE_NOTRIGGER(fConditional) ::GCNoTrigger __gcNoTriggerHolder(fConditional, __FUNCTION__, __FILE__, __LINE__)
+
+#else
+
+
+#define GCX_FORBID()
+#define GCX_NOTRIGGER()
+
+#define GCX_MAYBE_FORBID(fConditional)
+#define GCX_MAYBE_NOTRIGGER(fConditional)
+
+#endif
+
+typedef BOOL (*FnLockOwner)(LPVOID);
+struct LockOwner
+{
+ LPVOID lock;
+ FnLockOwner lockOwnerFunc;
+};
+
+// this is the standard lockowner for things that require a lock owner but which really don't
+// need any validation due to their simple/safe semantics
+// the classic example of this is a hash table that is initialized and then never grows
+extern LockOwner g_lockTrustMeIAmThreadSafe;
+
+// The OS ThreadId is not a stable ID for a thread we a host uses fiber instead of Thread.
+// For each managed Thread, we have a stable and unique id in Thread object. For other threads,
+// e.g. Server GC or Concurrent GC thread, debugger helper thread, we do not have a Thread object,
+// and we use OS ThreadId to identify them since they are not managed by a host.
+class EEThreadId
+{
+private:
+ void *m_FiberPtrId;
+public:
+#ifdef _DEBUG
+ EEThreadId()
+ : m_FiberPtrId(NULL)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+#endif
+
+ void SetThreadId()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ m_FiberPtrId = ClrTeb::GetFiberPtrId();
+ }
+
+ BOOL IsSameThread() const
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return (m_FiberPtrId == ClrTeb::GetFiberPtrId());
+ }
+
+
+#ifdef _DEBUG
+ BOOL IsUnknown() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_FiberPtrId == NULL;
+ }
+#endif
+ void ResetThreadId()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_FiberPtrId = NULL;
+ }
+};
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+#define CLRMEMORYHOSTED 0x1
+#define CLRTASKHOSTED 0x2
+#define CLRSYNCHOSTED 0x4
+#define CLRTHREADPOOLHOSTED 0x8
+#define CLRIOCOMPLETIONHOSTED 0x10
+#define CLRASSEMBLYHOSTED 0x20
+#define CLRGCHOSTED 0x40
+#define CLRSECURITYHOSTED 0x80
+#endif
+#define CLRHOSTED 0x80000000
+
+GVAL_DECL(DWORD, g_fHostConfig);
+
+
+inline BOOL CLRHosted()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return g_fHostConfig;
+}
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+inline BOOL CLRMemoryHosted()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return g_fHostConfig&CLRMEMORYHOSTED;
+}
+
+inline BOOL CLRTaskHosted()
+{
+ // !!! Can not use contract here.
+ // !!! This function is called by Thread::DetachThread after we free TLS memory.
+ // !!! Contract will recreate TLS memory.
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return g_fHostConfig&CLRTASKHOSTED;
+}
+
+inline BOOL CLRSyncHosted()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return g_fHostConfig&CLRSYNCHOSTED;
+}
+inline BOOL CLRThreadpoolHosted()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return g_fHostConfig&CLRTHREADPOOLHOSTED;
+}
+inline BOOL CLRIoCompletionHosted()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return g_fHostConfig&CLRIOCOMPLETIONHOSTED;
+}
+inline BOOL CLRAssemblyHosted()
+{
+ return g_fHostConfig&CLRASSEMBLYHOSTED;
+}
+
+inline BOOL CLRGCHosted()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return g_fHostConfig&CLRGCHOSTED;
+}
+
+inline BOOL CLRSecurityHosted()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return g_fHostConfig&CLRSECURITYHOSTED;
+}
+#else // FEATURE_INCLUDE_ALL_INTERFACES
+inline BOOL CLRMemoryHosted()
+{
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+}
+
+inline BOOL CLRTaskHosted()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return FALSE;
+}
+
+inline BOOL CLRSyncHosted()
+{
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+}
+
+inline BOOL CLRThreadpoolHosted()
+{
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+}
+
+inline BOOL CLRIoCompletionHosted()
+{
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+}
+
+inline BOOL CLRAssemblyHosted()
+{
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+}
+
+inline BOOL CLRGCHosted()
+{
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+}
+
+inline BOOL CLRSecurityHosted()
+{
+ LIMITED_METHOD_CONTRACT;
+ return FALSE;
+}
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+#ifndef FEATURE_PAL
+HMODULE CLRGetModuleHandle(LPCWSTR lpModuleFileName);
+
+// Equivalent to CLRGetModuleHandle(NULL) but doesn't have the INJECT_FAULT contract associated
+// with CLRGetModuleHandle.
+HMODULE CLRGetCurrentModuleHandle();
+
+HMODULE CLRLoadLibraryEx(LPCWSTR lpLibFileName, HANDLE hFile, DWORD dwFlags);
+#endif // !FEATURE_PAL
+
+HMODULE CLRLoadLibrary(LPCWSTR lpLibFileName);
+
+BOOL CLRFreeLibrary(HMODULE hModule);
+VOID CLRFreeLibraryAndExitThread(HMODULE hModule, DWORD dwExitCode);
+
+LPVOID
+CLRMapViewOfFileEx(
+ IN HANDLE hFileMappingObject,
+ IN DWORD dwDesiredAccess,
+ IN DWORD dwFileOffsetHigh,
+ IN DWORD dwFileOffsetLow,
+ IN SIZE_T dwNumberOfBytesToMap,
+ IN LPVOID lpBaseAddress
+ );
+
+LPVOID
+CLRMapViewOfFile(
+ IN HANDLE hFileMappingObject,
+ IN DWORD dwDesiredAccess,
+ IN DWORD dwFileOffsetHigh,
+ IN DWORD dwFileOffsetLow,
+ IN SIZE_T dwNumberOfBytesToMap
+ );
+
+
+BOOL
+CLRUnmapViewOfFile(
+ IN LPVOID lpBaseAddress
+ );
+
+BOOL CompareFiles(HANDLE hFile1,HANDLE hFile2);
+
+
+#ifndef DACCESS_COMPILE
+FORCEINLINE void VoidCLRUnmapViewOfFile(void *ptr) { CLRUnmapViewOfFile(ptr); }
+typedef Wrapper<void *, DoNothing, VoidCLRUnmapViewOfFile> CLRMapViewHolder;
+#else
+typedef Wrapper<void *, DoNothing, DoNothing> CLRMapViewHolder;
+#endif
+
+void GetProcessMemoryLoad(LPMEMORYSTATUSEX pMSEX);
+
+void ProcessEventForHost(EClrEvent event, void *data);
+void ProcessSOEventForHost(EXCEPTION_POINTERS *pExceptionInfo, BOOL fInSoTolerant);
+BOOL IsHostRegisteredForEvent(EClrEvent event);
+
+#define SetupThreadForComCall(OOMRetVal) \
+ MAKE_CURRENT_THREAD_AVAILABLE_EX(GetThreadNULLOk()); \
+ if (CURRENT_THREAD == NULL) \
+ { \
+ CURRENT_THREAD = SetupThreadNoThrow(); \
+ if (CURRENT_THREAD == NULL) \
+ return OOMRetVal; \
+ } \
+
+
+#define InternalSetupForComCall(CannotEnterRetVal, OOMRetVal, SORetVal, CheckCanRunManagedCode) \
+SetupThreadForComCall(OOMRetVal); \
+if (CheckCanRunManagedCode && !CanRunManagedCode()) \
+ return CannotEnterRetVal; \
+SO_INTOLERANT_CODE_NOTHROW(CURRENT_THREAD, return SORetVal) \
+
+#define ComCallHostNotificationHR() \
+ReverseEnterRuntimeHolderNoThrow REHolder; \
+if (CLRTaskHosted()) \
+{ \
+ HRESULT hr = REHolder.AcquireNoThrow(); \
+ if (FAILED(hr)) \
+ { \
+ return hr; \
+ } \
+}
+
+#define SetupForComCallHRNoHostNotif() InternalSetupForComCall(HOST_E_CLRNOTAVAILABLE, E_OUTOFMEMORY, COR_E_STACKOVERFLOW, true)
+#define SetupForComCallHRNoHostNotifNoCheckCanRunManagedCode() InternalSetupForComCall(HOST_E_CLRNOTAVAILABLE, E_OUTOFMEMORY, COR_E_STACKOVERFLOW, false)
+#define SetupForComCallDWORDNoHostNotif() InternalSetupForComCall(-1, -1, -1, true)
+
+#define SetupForComCallHR() \
+InternalSetupForComCall(HOST_E_CLRNOTAVAILABLE, E_OUTOFMEMORY, COR_E_STACKOVERFLOW, true) \
+ComCallHostNotificationHR()
+
+#define SetupForComCallHRNoCheckCanRunManagedCode() \
+InternalSetupForComCall(HOST_E_CLRNOTAVAILABLE, E_OUTOFMEMORY, COR_E_STACKOVERFLOW, false) \
+ComCallHostNotificationHR()
+
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+
+// Since Corrupting exceptions can escape COM interop boundaries,
+// these macros will be used to setup the initial SO-Intolerant transition.
+#define InternalSetupForComCallWithEscapingCorruptingExceptions(CannotEnterRetVal, OOMRetVal, SORetVal, CheckCanRunManagedCode) \
+if (CheckCanRunManagedCode && !CanRunManagedCode()) \
+ return CannotEnterRetVal; \
+SetupThreadForComCall(OOMRetVal); \
+BEGIN_SO_INTOLERANT_CODE_NOTHROW(CURRENT_THREAD, SORetVal) \
+
+#define BeginSetupForComCallHRWithEscapingCorruptingExceptions() \
+HRESULT __hr = S_OK; \
+InternalSetupForComCallWithEscapingCorruptingExceptions(HOST_E_CLRNOTAVAILABLE, E_OUTOFMEMORY, COR_E_STACKOVERFLOW, true) \
+ReverseEnterRuntimeHolderNoThrow REHolder; \
+if (CLRTaskHosted()) \
+{ \
+ __hr = REHolder.AcquireNoThrow(); \
+} \
+ \
+if (SUCCEEDED(__hr)) \
+{ \
+
+#define EndSetupForComCallHRWithEscapingCorruptingExceptions() \
+} \
+END_SO_INTOLERANT_CODE; \
+ \
+if (FAILED(__hr)) \
+{ \
+ return __hr; \
+} \
+
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+
+#define SetupForComCallDWORD() \
+InternalSetupForComCall(-1, -1, -1, true) \
+ReverseEnterRuntimeHolderNoThrow REHolder; \
+if (CLRTaskHosted()) \
+{ \
+ if (FAILED(REHolder.AcquireNoThrow())) \
+ { \
+ return -1; \
+ } \
+}
+
+// Special version of SetupForComCallDWORD that doesn't call
+// CanRunManagedCode() to avoid firing LoaderLock MDA
+#define SetupForComCallDWORDNoCheckCanRunManagedCode() \
+InternalSetupForComCall(-1, -1, -1, false) \
+ReverseEnterRuntimeHolderNoThrow REHolder; \
+if (CLRTaskHosted()) \
+{ \
+ if (FAILED(REHolder.AcquireNoThrow())) \
+ { \
+ return -1; \
+ } \
+}
+
+#include "unsafe.h"
+
+inline void UnsafeTlsFreeForHolder(DWORD* addr)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (addr && *addr != TLS_OUT_OF_INDEXES) {
+ UnsafeTlsFree(*addr);
+ *addr = TLS_OUT_OF_INDEXES;
+ }
+}
+
+// A holder to make sure tls slot is released and memory for allocated one is set to TLS_OUT_OF_INDEXES
+typedef Holder<DWORD*, DoNothing<DWORD*>, UnsafeTlsFreeForHolder> TlsHolder;
+
+// A holder for HMODULE.
+FORCEINLINE void VoidFreeLibrary(HMODULE h) { WRAPPER_NO_CONTRACT; CLRFreeLibrary(h); }
+
+typedef Wrapper<HMODULE, DoNothing<HMODULE>, VoidFreeLibrary, NULL> ModuleHandleHolder;
+
+#ifndef FEATURE_PAL
+
+// A holder for memory blocks allocated by Windows. This holder (and any OS APIs you call
+// that allocate objects on your behalf) should not be used when the CLR is memory-hosted.
+
+FORCEINLINE void VoidFreeWinAllocatedBlock(LPVOID pv)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!CLRMemoryHosted());
+
+#pragma push_macro("GetProcessHeap")
+#pragma push_macro("HeapFree")
+#undef GetProcessHeap
+#undef HeapFree
+ // 0: no special flags
+ ::HeapFree(::GetProcessHeap(), 0, pv);
+#pragma pop_macro("HeapFree")
+#pragma pop_macro("GetProcessHeap")
+}
+
+typedef Wrapper<LPVOID, DoNothing<LPVOID>, VoidFreeWinAllocatedBlock, NULL> WinAllocatedBlockHolder;
+
+#endif // !FEATURE_PAL
+
+#ifndef CLR_STANDALONE_BINDER
+// For debugging, we can track arbitrary Can't-Stop regions.
+// In V1.0, this was on the Thread object, but we need to track this for threads w/o a Thread object.
+FORCEINLINE void IncCantStopCount()
+{
+ ClrFlsIncrementValue(TlsIdx_CantStopCount, 1);
+}
+
+FORCEINLINE void DecCantStopCount()
+{
+ ClrFlsIncrementValue(TlsIdx_CantStopCount, -1);
+}
+
+typedef StateHolder<IncCantStopCount, DecCantStopCount> CantStopHolder;
+
+#ifdef _DEBUG
+// For debug-only, this can be used w/ a holder to ensure that we're keeping our CS count balanced.
+// We should never use this w/ control flow.
+inline int GetCantStopCount()
+{
+ return (int) (size_t) ClrFlsGetValue(TlsIdx_CantStopCount);
+}
+
+// At places where we know we're calling out to native code, we can assert that we're NOT in a CS region.
+// This is _debug only since we only use it for asserts; not for real code-flow control in a retail build.
+inline bool IsInCantStopRegion()
+{
+ return (GetCantStopCount() > 0);
+}
+#endif // _DEBUG
+#endif // !CLR_STANDALONE_BINDER
+
+
+// PAL does not support per-thread locales. The holder is no-op for FEATURE_PALs
+class ThreadLocaleHolder
+{
+#ifndef FEATURE_PAL
+public:
+
+ ThreadLocaleHolder()
+ {
+ m_locale = GetThreadLocale();
+ }
+
+ ~ThreadLocaleHolder();
+
+private:
+ LCID m_locale;
+#endif // !FEATURE_PAL
+};
+
+
+
+BOOL IsValidMethodCodeNotification(USHORT Notification);
+
+typedef DPTR(struct JITNotification) PTR_JITNotification;
+struct JITNotification
+{
+ USHORT state; // values from CLRDataMethodCodeNotification
+ TADDR clrModule;
+ mdToken methodToken;
+
+ JITNotification() { SetFree(); }
+ BOOL IsFree() { return state == CLRDATA_METHNOTIFY_NONE; }
+ void SetFree() { state = CLRDATA_METHNOTIFY_NONE; clrModule = NULL; methodToken = 0; }
+ void SetState(TADDR moduleIn, mdToken tokenIn, USHORT NType)
+ {
+ _ASSERTE(IsValidMethodCodeNotification(NType));
+ clrModule = moduleIn;
+ methodToken = tokenIn;
+ state = NType;
+ }
+};
+
+GPTR_DECL(JITNotification,g_pNotificationTable);
+GVAL_DECL(ULONG32, g_dacNotificationFlags);
+
+class JITNotifications
+{
+public:
+ JITNotifications(JITNotification *jitTable);
+ BOOL SetNotification(TADDR clrModule, mdToken token, USHORT NType);
+ USHORT Requested(TADDR clrModule, mdToken token);
+
+ // if clrModule is NULL, all active notifications are changed to NType
+ BOOL SetAllNotifications(TADDR clrModule,USHORT NType,BOOL *changedOut);
+ inline BOOL IsActive() { LIMITED_METHOD_CONTRACT; return m_jitTable!=NULL; }
+
+ UINT GetTableSize();
+#ifdef DACCESS_COMPILE
+ static JITNotification *InitializeNotificationTable(UINT TableSize);
+ // Updates target table from host copy
+ BOOL UpdateOutOfProcTable();
+#endif
+
+private:
+ UINT GetLength();
+ void IncrementLength();
+ void DecrementLength();
+
+ BOOL FindItem(TADDR clrModule, mdToken token, UINT *indexOut);
+
+ JITNotification *m_jitTable;
+};
+
+typedef DPTR(struct GcNotification) PTR_GcNotification;
+
+inline
+BOOL IsValidGcNotification(GcEvt_t evType)
+{ return (evType < GC_EVENT_TYPE_MAX); }
+
+#define CLRDATA_GC_NONE 0
+
+struct GcNotification
+{
+ GcEvtArgs ev;
+
+ GcNotification() { SetFree(); }
+ BOOL IsFree() { return ev.typ == CLRDATA_GC_NONE; }
+ void SetFree() { memset(this, 0, sizeof(*this)); ev.typ = (GcEvt_t) CLRDATA_GC_NONE; }
+ void Set(GcEvtArgs ev_)
+ {
+ _ASSERTE(IsValidGcNotification(ev_.typ));
+ ev = ev_;
+ }
+ BOOL IsMatch(GcEvtArgs ev_)
+ {
+ LIMITED_METHOD_CONTRACT;
+ if (ev.typ != ev_.typ)
+ {
+ return FALSE;
+ }
+ switch (ev.typ)
+ {
+ case GC_MARK_END:
+ if (ev_.condemnedGeneration == 0 ||
+ (ev.condemnedGeneration & ev_.condemnedGeneration) != 0)
+ {
+ return TRUE;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return FALSE;
+ }
+};
+
+GPTR_DECL(GcNotification, g_pGcNotificationTable);
+
+class GcNotifications
+{
+public:
+ GcNotifications(GcNotification *gcTable);
+ BOOL SetNotification(GcEvtArgs ev);
+ GcEvtArgs* GetNotification(GcEvtArgs ev)
+ {
+ LIMITED_METHOD_CONTRACT;
+ UINT idx;
+ if (FindItem(ev, &idx))
+ {
+ return &m_gcTable[idx].ev;
+ }
+ else
+ {
+ return NULL;
+ }
+ }
+
+ // if clrModule is NULL, all active notifications are changed to NType
+ inline BOOL IsActive()
+ { return m_gcTable != NULL; }
+
+ UINT GetTableSize()
+ { return Size(); }
+
+#ifdef DACCESS_COMPILE
+ static GcNotification *InitializeNotificationTable(UINT TableSize);
+ // Updates target table from host copy
+ BOOL UpdateOutOfProcTable();
+#endif
+
+private:
+ UINT& Length()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(IsActive());
+ UINT *pLen = (UINT *) &(m_gcTable[-1].ev.typ);
+ return *pLen;
+ }
+ UINT& Size()
+ {
+ _ASSERTE(IsActive());
+ UINT *pLen = (UINT *) &(m_gcTable[-1].ev.typ);
+ return *(pLen+1);
+ }
+ void IncrementLength()
+ { ++Length(); }
+ void DecrementLength()
+ { --Length(); }
+
+ BOOL FindItem(GcEvtArgs ev, UINT *indexOut);
+
+ GcNotification *m_gcTable;
+};
+
+
+class MethodDesc;
+class Module;
+
+class DACNotify
+{
+public:
+ // types
+ enum {
+ MODULE_LOAD_NOTIFICATION=1,
+ MODULE_UNLOAD_NOTIFICATION=2,
+ JIT_NOTIFICATION=3,
+ JIT_DISCARD_NOTIFICATION=4,
+ EXCEPTION_NOTIFICATION=5,
+ GC_NOTIFICATION= 6,
+ CATCH_ENTER_NOTIFICATION = 7,
+ };
+
+ // called from the runtime
+ static void DoJITNotification(MethodDesc *MethodDescPtr);
+ static void DoJITDiscardNotification(MethodDesc *MethodDescPtr);
+ static void DoModuleLoadNotification(Module *Module);
+ static void DoModuleUnloadNotification(Module *Module);
+ static void DoExceptionNotification(class Thread* ThreadPtr);
+ static void DoGCNotification(const GcEvtArgs& evtargs);
+ static void DoExceptionCatcherEnterNotification(MethodDesc *MethodDescPtr, DWORD nativeOffset);
+
+ // called from the DAC
+ static int GetType(TADDR Args[]);
+ static BOOL ParseJITNotification(TADDR Args[], TADDR& MethodDescPtr);
+ static BOOL ParseJITDiscardNotification(TADDR Args[], TADDR& MethodDescPtr);
+ static BOOL ParseModuleLoadNotification(TADDR Args[], TADDR& ModulePtr);
+ static BOOL ParseModuleUnloadNotification(TADDR Args[], TADDR& ModulePtr);
+ static BOOL ParseExceptionNotification(TADDR Args[], TADDR& ThreadPtr);
+ static BOOL ParseGCNotification(TADDR Args[], GcEvtArgs& evtargs);
+ static BOOL ParseExceptionCatcherEnterNotification(TADDR Args[], TADDR& MethodDescPtr, DWORD& nativeOffset);
+};
+
+void DACNotifyCompilationFinished(MethodDesc *pMethodDesc);
+
+#ifdef _DEBUG
+#ifndef FEATURE_PAL
+// NOTE: Windows Vista RTM SDK defines CaptureStackBackTrace as RtlCaptureStackBackTrace (in winbase.h)
+// Renamed CaptureStackBackTrace to UtilCaptureBackTrace in order to avoid conflicts with the Windows definition
+USHORT UtilCaptureStackBackTrace(
+ ULONG FramesToSkip,
+ ULONG FramesToCapture,
+ PVOID * BackTrace,
+ OUT PULONG BackTraceHash);
+#endif // !FEATURE_PAL
+#endif //_DEBUG
+
+
+// These wrap the SString:L:CompareCaseInsenstive function in a way that makes it
+// easy to fix code that uses _stricmp. _stricmp should be avoided as it uses the current
+// C-runtime locale rather than the invariance culture.
+//
+// Note that unlike the real _stricmp, these functions unavoidably have a throws/gc_triggers/inject_fault
+// contract. So if need a case-insensitive comparison in a place where you can't tolerate this contract,
+// you've got a problem.
+int __cdecl stricmpUTF8(const char* szStr1, const char* szStr2);
+
+#ifdef _DEBUG
+class DisableDelayLoadCheckForOleaut32
+{
+public:
+ DisableDelayLoadCheckForOleaut32();
+ ~DisableDelayLoadCheckForOleaut32();
+};
+#endif
+
+extern LONG g_OLEAUT32_Loaded;
+
+#ifndef FEATURE_CORECLR
+#define ENSURE_OLEAUT32_LOADED() \
+{ \
+ /* Should only be used in FCALL */ \
+ _ASSERTE (__me != 0); \
+ if (g_OLEAUT32_Loaded == 0) \
+ { \
+ /* CLRLoadLibrary/CLRFreeLibrary claim they trigger, but this */ \
+ /* isn't really true in this case because we're loading oleaut32 */ \
+ /* which we know doesn't contain any managed code in its DLLMain */ \
+ CONTRACT_VIOLATION(GCViolation|SOToleranceViolation); \
+ HMODULE hMod = CLRLoadLibrary(W("oleaut32")); \
+ if (hMod == NULL) \
+ { \
+ __FCThrow(__me, kOutOfMemoryException, 0, 0, 0, 0); \
+ } \
+ else \
+ { \
+ if (FastInterlockExchange(&g_OLEAUT32_Loaded, 1) == 1) \
+ { \
+ CLRFreeLibrary(hMod); \
+ } \
+ } \
+ } \
+} \
+INDEBUG(DisableDelayLoadCheckForOleaut32 _disableOleaut32Check);
+#else // !FEATURE_CORECLR
+#define ENSURE_OLEAUT32_LOADED()
+#endif // !FEATURE_CORECLR
+
+BOOL DbgIsExecutable(LPVOID lpMem, SIZE_T length);
+
+#ifndef DACCESS_COMPILE
+// returns if ARM was already enabled or not.
+BOOL EnableARM();
+#endif // !DACCESS_COMPILE
+
+int GetRandomInt(int maxVal);
+
+class InternalCasingHelper {
+
+ private:
+ // Convert szIn to lower case in the Invariant locale.
+ // TODO: NLS Arrowhead -Called by the two ToLowers)
+ static INT32 InvariantToLowerHelper(__out_bcount_opt(cMaxBytes) LPUTF8 szOut, int cMaxBytes, __in_z LPCUTF8 szIn, BOOL fAllowThrow);
+
+ public:
+ //
+ // Native helper functions to do correct casing operations in
+ // runtime native code.
+ //
+
+ // Convert szIn to lower case in the Invariant locale. (WARNING: May throw.)
+ static INT32 InvariantToLower(__out_bcount_opt(cMaxBytes) LPUTF8 szOut, int cMaxBytes, __in_z LPCUTF8 szIn);
+
+ // Convert szIn to lower case in the Invariant locale. (WARNING: This version
+ // won't throw but it will use stack space as an intermediary (so don't
+ // use for ridiculously long strings.)
+ static INT32 InvariantToLowerNoThrow(__out_bcount_opt(cMaxBytes) LPUTF8 szOut, int cMaxBytes, __in_z LPCUTF8 szIn);
+};
+
+//
+//
+// COMCHARACTER
+//
+//
+class COMCharacter {
+public:
+ //These are here for support from native code. They are never called from our managed classes.
+ static BOOL nativeIsWhiteSpace(WCHAR c);
+ static BOOL nativeIsDigit(WCHAR c);
+};
+
+#ifdef _DEBUG
+#define FORCEINLINE_NONDEBUG
+#else
+#define FORCEINLINE_NONDEBUG FORCEINLINE
+#endif
+
+#ifndef FEATURE_PAL
+// Extract the file version from an executable.
+HRESULT GetFileVersion(LPCWSTR wszFilePath, ULARGE_INTEGER* pFileVersion);
+#endif // !FEATURE_PAL
+
+#endif /* _H_UTIL */
+
diff --git a/src/vm/validator.cpp b/src/vm/validator.cpp
new file mode 100644
index 0000000000..517c74ccf0
--- /dev/null
+++ b/src/vm/validator.cpp
@@ -0,0 +1,947 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+/*
+ *
+ * Purpose: Provide IValidate implementation.
+ * IValidate is used to validate PE stub, Metadata and IL.
+ *
+ */
+
+#include "common.h"
+
+#include "corerror.h"
+#include "vererror.h"
+#include "ivalidator.h"
+#include "securityattributes.h"
+#include "corhost.h"
+#include "verifier.hpp"
+#include "pedecoder.h"
+#include "comcallablewrapper.h"
+#include "../dlls/mscorrc/resource.h"
+#include "posterror.h"
+#include "comcallablewrapper.h"
+#include "eeconfig.h"
+#include "corhost.h"
+#include "security.h"
+#include "appdomain.inl"
+
+typedef void (*VerifyErrorHandler)(void* pThis, HRESULT hrError, struct VerErrorStruct* pError);
+
+// Declare global variables
+#define DECLARE_DATA
+#include "veropcodes.hpp"
+#undef DECLARE_DATA
+
+class CValidator
+{
+public:
+ CValidator(IVEHandler *veh) : m_veh(veh)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+ HRESULT VerifyAllMethodsForClass(Module *pModule, mdTypeDef cl, ValidateWorkerArgs* pArgs);
+ HRESULT VerifyAllGlobalFunctions(Module *pModule, ValidateWorkerArgs* pArgs);
+ HRESULT VerifyAssembly(Assembly *pAssembly, ValidateWorkerArgs* pArgs);
+ HRESULT VerifyModule(Module* pModule, ValidateWorkerArgs* pArgs);
+ HRESULT ReportError(HRESULT hr, ValidateWorkerArgs* pArgs, mdToken tok=0);
+ HRESULT VerifyMethod(COR_ILMETHOD_DECODER* pILHeader, IVEHandler* pVEHandler, WORD wFlags, ValidateWorkerArgs* pArgs);
+ HRESULT VerifyExportedType(
+ Module * pModule,
+ mdToken tkExportedType,
+ ValidateWorkerArgs * pArgs);
+ void HandleError(HRESULT hrError, struct VerErrorStruct* pError);
+
+private:
+ IVEHandler *m_veh;
+ ValidateWorkerArgs* m_pArgs;
+}; // class CValidator
+
+HRESULT CValidator::ReportError(HRESULT hr, ValidateWorkerArgs* pArgs, mdToken tok /* = 0 */)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ if (m_veh == NULL)
+ return hr;
+
+ HRESULT hr2 = E_FAIL;
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), return COR_E_STACKOVERFLOW);
+ VEContext vec;
+
+ memset(&vec, 0, sizeof(VEContext));
+
+ if (tok != 0)
+ {
+ vec.flags = VER_ERR_TOKEN;
+ vec.Token = tok;
+ }
+
+ hr2 = Verifier::ReportError(m_veh, hr, &vec, pArgs);
+ END_SO_INTOLERANT_CODE;
+ return hr2;
+} // CValidator::ReportError
+
+// Separate method since EX_TRY uses _alloca and is in a loop below.
+COR_ILMETHOD* GetILHeader(MethodDesc *pMD)
+{
+ STANDARD_VM_CONTRACT;
+
+ COR_ILMETHOD *pILHeader = NULL;
+
+ EX_TRY
+ {
+ pILHeader = pMD->GetILHeader();
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return pILHeader;
+}
+
+HRESULT CValidator::VerifyAllMethodsForClass(Module *pModule, mdTypeDef cl, ValidateWorkerArgs* pArgs)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+ MethodTable *pMT = NULL;
+
+ // In the case of COR_GLOBAL_PARENT_TOKEN (i.e. global functions), it is guaranteed
+ // that the module has a method table or our caller will have skipped this step.
+ TypeHandle th;
+ {
+ // <REVISIT>
+ // Although there's no assert to disable here, we need to improve OOM reliability here. We are ignoring the HRESULT from the loader here.
+ // That could cause an OOM failure to be disguised as something else. OOM's
+ // need to be handled or propagated up to the caller.
+ // </REVISIT>
+ CONTRACT_VIOLATION(0);
+
+ EX_TRY {
+ th = ClassLoader::LoadTypeDefOrRefThrowing(pModule, cl,
+ ClassLoader::ReturnNullIfNotFound,
+ ClassLoader::PermitUninstDefOrRef);
+ }
+ EX_CATCH_HRESULT(hr);
+
+ if (FAILED(hr)) {
+ if ((hr==COR_E_TYPELOAD) || (hr==VER_E_TYPELOAD)) {
+ hr = ReportError(hr, pArgs,cl);
+ } else {
+ hr = ReportError(hr, pArgs);
+ }
+ goto Exit;
+ }
+ }
+
+ pMT = th.GetMethodTable();
+ if (pMT == NULL)
+ {
+ hr = ReportError(VER_E_TYPELOAD, pArgs, cl);
+ goto Exit;
+ }
+
+ g_fVerifierOff = false;
+
+ {
+ // Verify all methods in class - excluding inherited methods
+ MethodTable::MethodIterator it(pMT);
+ for (; it.IsValid(); it.Next())
+ {
+ pArgs->pMethodDesc = it.GetMethodDesc();
+
+ bool fVerifyTransparentMethod = true;
+ if (pArgs->fTransparentMethodsOnly)
+ {
+ MethodSecurityDescriptor msd(pArgs->pMethodDesc);
+ fVerifyTransparentMethod = !msd.IsCritical();
+ }
+
+ if (pArgs->pMethodDesc &&
+ pArgs->pMethodDesc->GetMethodTable() == pMT &&
+ pArgs->pMethodDesc->IsIL() &&
+ !pArgs->pMethodDesc->IsAbstract() &&
+ !pArgs->pMethodDesc->IsUnboxingStub() &&
+ fVerifyTransparentMethod)
+ {
+ COR_ILMETHOD* pILHeader = GetILHeader(pArgs->pMethodDesc);
+
+ if (pILHeader != NULL)
+ {
+ COR_ILMETHOD_DECODER::DecoderStatus status;
+ COR_ILMETHOD_DECODER ILHeader(pILHeader,
+ pArgs->pMethodDesc->GetMDImport(), &status);
+
+ if (status == COR_ILMETHOD_DECODER::SUCCESS)
+ {
+ hr = VerifyMethod(&ILHeader, m_veh, VER_FORCE_VERIFY, pArgs);
+ if (hr == VER_E_INTERNAL) // this probably means peverify.dll was missing
+ {
+ goto Exit;
+ }
+ }
+ else if (status == COR_ILMETHOD_DECODER::VERIFICATION_ERROR)
+ {
+ hr = COR_E_VERIFICATION;
+ }
+ else if (status == COR_ILMETHOD_DECODER::FORMAT_ERROR)
+ {
+ hr = COR_E_BADIMAGEFORMAT;
+ }
+ else
+ {
+ _ASSERTE(!"Unhandled status from COR_ILMETHOD_DECODER");
+ }
+ }
+ else
+ {
+ hr = COR_E_BADIMAGEFORMAT;
+ }
+
+ if (FAILED(hr))
+ hr = ReportError(hr, pArgs);
+
+ if (FAILED(hr))
+ goto Exit;
+ }
+ // We should ideally have an API to yield to the host,
+ // but this is not critical for Whidbey.
+ if (CLRTaskHosted())
+ ClrSleepEx(0, FALSE);
+ }
+ }
+
+Exit:
+ pArgs->pMethodDesc = NULL;
+ return hr;
+} // CValidator::VerifyAllMethodsForClass
+
+//---------------------------------------------------------------------------------------
+//
+void
+MethodDescAndCorILMethodDecoderToCorInfoMethodInfo(
+ MethodDesc * ftn,
+ COR_ILMETHOD_DECODER * ILHeader,
+ CORINFO_METHOD_INFO * pMethodInfo)
+{
+ STANDARD_VM_CONTRACT;
+
+ pMethodInfo->ftn = CORINFO_METHOD_HANDLE(ftn);
+ pMethodInfo->scope = CORINFO_MODULE_HANDLE(ftn->GetModule());
+ pMethodInfo->ILCode = const_cast<BYTE*>(ILHeader->Code);
+ pMethodInfo->ILCodeSize = ILHeader->GetCodeSize();
+ pMethodInfo->maxStack = ILHeader->GetMaxStack();
+ pMethodInfo->EHcount = ILHeader->EHCount();
+ pMethodInfo->options =
+ (CorInfoOptions)
+ (((ILHeader->GetFlags() & CorILMethod_InitLocals) ? CORINFO_OPT_INIT_LOCALS : 0) |
+ (ftn->AcquiresInstMethodTableFromThis() ? CORINFO_GENERICS_CTXT_FROM_THIS : 0) |
+ (ftn->RequiresInstMethodTableArg() ? CORINFO_GENERICS_CTXT_FROM_METHODTABLE : 0) |
+ (ftn->RequiresInstMethodDescArg() ? CORINFO_GENERICS_CTXT_FROM_METHODDESC : 0));
+
+ PCCOR_SIGNATURE pSigToConvert;
+ DWORD cbSigToConvert;
+ ftn->GetSig(&pSigToConvert, &cbSigToConvert);
+ CONSISTENCY_CHECK(NULL != pSigToConvert);
+ // fetch the method signature
+ CEEInfo::ConvToJitSig(
+ pSigToConvert,
+ cbSigToConvert,
+ pMethodInfo->scope,
+ mdTokenNil,
+ &pMethodInfo->args,
+ ftn,
+ false);
+
+ //@GENERICS:
+ // Shared generic methods and shared methods on generic structs take an extra argument representing their instantiation
+ if (ftn->RequiresInstArg())
+ pMethodInfo->args.callConv = (CorInfoCallConv) (pMethodInfo->args.callConv | CORINFO_CALLCONV_PARAMTYPE);
+
+ // method attributes and signature are consistant
+ _ASSERTE(!!ftn->IsStatic() == ((pMethodInfo->args.callConv & CORINFO_CALLCONV_HASTHIS) == 0));
+
+ // And its local variables
+ CEEInfo::ConvToJitSig(
+ ILHeader->LocalVarSig,
+ ILHeader->cbLocalVarSig,
+ pMethodInfo->scope,
+ mdTokenNil,
+ &pMethodInfo->locals,
+ ftn,
+ true);
+} // MethodDescAndCorILMethodDecoderToCorInfoMethodInfo
+
+//---------------------------------------------------------------------------------------
+//
+void PEVerifyErrorHandler(void* pThis, HRESULT hrError, struct VerErrorStruct* pError)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ ((CValidator*)pThis)->HandleError(hrError, pError);
+}
+
+void CValidator::HandleError(HRESULT hrError, struct VerErrorStruct* pError)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+ _ASSERTE(sizeof(VEContext) == sizeof(struct VerErrorStruct));
+ Verifier::ReportError(m_veh, hrError, (VEContext*)pError, m_pArgs);
+ END_SO_INTOLERANT_CODE;
+}
+typedef void (__stdcall* VerifyFunc)(ICorJitInfo* pJitInfo, CORINFO_METHOD_INFO* pMethodInfo, VerifyErrorHandler pErrorHandler, void* pThis);
+static void VerifyMethodHelper(VerifyFunc pVerFunc, CEEJitInfo* pJI, CORINFO_METHOD_INFO* pMethodInfo, void* pThis)
+{
+ // Helper method to allow us to use SO_TOLERANT_CODE macro
+ STATIC_CONTRACT_SO_INTOLERANT;
+ WRAPPER_NO_CONTRACT;
+
+ BEGIN_SO_TOLERANT_CODE(GetThread());
+ // Verify the method
+ pVerFunc(pJI, pMethodInfo, PEVerifyErrorHandler, pThis);
+ END_SO_TOLERANT_CODE;
+
+}
+
+static Volatile<VerifyFunc> g_pVerFunc = NULL;
+
+HRESULT CValidator::VerifyMethod(COR_ILMETHOD_DECODER* pILHeader, IVEHandler* pVEHandler, WORD wFlags, ValidateWorkerArgs* pArgs)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ // Find the DLL entrypoint
+ m_pArgs = pArgs;
+ if (g_pVerFunc.Load() == NULL)
+ {
+ HINSTANCE hJit64 = NULL;
+ if (SUCCEEDED(g_pCLRRuntime->LoadLibrary(W("peverify.dll"), &hJit64)))
+ {
+ typedef void (__stdcall* psxsPeVerifyStartup) (CoreClrCallbacks);
+ psxsPeVerifyStartup sxsPeVerifyStartup = (psxsPeVerifyStartup) GetProcAddress(hJit64, "sxsPeVerifyStartup");
+
+ if(sxsPeVerifyStartup)
+ {
+ CoreClrCallbacks cccallbacks = GetClrCallbacks();
+ (*sxsPeVerifyStartup) (cccallbacks);
+ g_pVerFunc = (VerifyFunc)GetProcAddress(hJit64, "VerifyMethod");
+ }
+ }
+ }
+
+ if(!g_pVerFunc)
+ {
+ _ASSERTE(!"Failed to load peverify.dll or find VerifyMethod proc address");
+ hr = VER_E_INTERNAL;
+ }
+ else
+ {
+ Thread *pThread = GetThread();
+ if (pThread->IsAbortRequested())
+ {
+ pThread->HandleThreadAbort();
+ }
+ // Prepare the args
+ MethodDesc* ftn = pArgs->pMethodDesc;
+ CEEJitInfo ji(pArgs->pMethodDesc, pILHeader, NULL, true /* verify only */);
+ CORINFO_METHOD_INFO methodInfo;
+ MethodDescAndCorILMethodDecoderToCorInfoMethodInfo(ftn, pILHeader, &methodInfo);
+
+ // Verify the method
+ VerifyMethodHelper(g_pVerFunc, &ji, &methodInfo, this);
+ }
+ }
+ EX_CATCH
+ {
+ // Catch and report any errors that peverify.dll lets fall through (ideally that should never happen)
+ hr = GET_EXCEPTION()->GetHR();
+ hr = ReportError(hr, pArgs);
+ }
+ EX_END_CATCH(RethrowTerminalExceptions)
+
+ return hr;
+} // CValidator::VerifyMethod
+
+// Helper function to verify the global functions
+HRESULT CValidator::VerifyAllGlobalFunctions(Module *pModule, ValidateWorkerArgs* pArgs)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+ // Is there anything worth verifying?
+ if (pModule->GetGlobalMethodTable())
+ hr = VerifyAllMethodsForClass(pModule, COR_GLOBAL_PARENT_TOKEN, pArgs);
+ return hr;
+} // CValidator::VerifyAllGlobalFunctions
+
+HRESULT CValidator::VerifyModule(Module* pModule, ValidateWorkerArgs* pArgs)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Get a count of all the classdefs and enumerate them.
+ HRESULT hr = S_OK;
+ IMDInternalImport * pMDI = NULL;
+
+ if (pModule == NULL)
+ {
+ IfFailGo(VER_E_BAD_MD);
+ }
+
+ pMDI = pModule->GetMDImport();
+ if (pMDI == NULL)
+ {
+ IfFailGo(VER_E_BAD_MD);
+ }
+
+ // First verify all global functions - if there are any
+ IfFailGoto(
+ VerifyAllGlobalFunctions(pModule, pArgs),
+ ErrExit_SkipReportError);
+
+ {
+ HENUMTypeDefInternalHolder hTypeDefEnum(pMDI);
+
+ IfFailGo(hTypeDefEnum.EnumTypeDefInitNoThrow());
+
+ // Verify all TypeDefs
+ mdTypeDef tkTypeDef;
+ while (pMDI->EnumTypeDefNext(&hTypeDefEnum, &tkTypeDef))
+ {
+ IfFailGoto(
+ VerifyAllMethodsForClass(pModule, tkTypeDef, pArgs),
+ ErrExit_SkipReportError);
+ }
+ }
+
+ {
+ HENUMInternalHolder hExportedTypeEnum(pMDI);
+
+ IfFailGo(hExportedTypeEnum.EnumInitNoThrow(
+ mdtExportedType,
+ mdTokenNil));
+
+ // Verify all ExportedTypes
+ mdToken tkExportedType;
+ while (pMDI->EnumNext(&hExportedTypeEnum, &tkExportedType))
+ {
+ IfFailGoto(
+ VerifyExportedType(pModule, tkExportedType, pArgs),
+ ErrExit_SkipReportError);
+ }
+ }
+
+ErrExit:
+ if (FAILED(hr))
+ {
+ hr = ReportError(hr, pArgs);
+ }
+
+ErrExit_SkipReportError:
+ return hr;
+} // CValidator::VerifyModule
+
+HRESULT CValidator::VerifyAssembly(Assembly *pAssembly, ValidateWorkerArgs* pArgs)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr;
+
+ _ASSERTE(pAssembly->GetManifestImport());
+
+ // Verify the module containing the manifest. There is no
+ // FileRefence so will no show up in the list.
+ hr = VerifyModule(pAssembly->GetManifestModule(), pArgs);
+ if (FAILED(hr))
+ goto Exit;
+
+ {
+ IMDInternalImport* pManifestImport = pAssembly->GetManifestImport();
+
+ HENUMInternalHolder hEnum(pManifestImport);
+
+ mdToken mdFile;
+ hr = hEnum.EnumInitNoThrow(mdtFile, mdTokenNil);
+ if (FAILED(hr))
+ {
+ hr = ReportError(hr, pArgs);
+ goto Exit;
+ }
+
+ while(pManifestImport->EnumNext(&hEnum, &mdFile))
+ {
+ DomainFile* pModule = pAssembly->GetManifestModule()->LoadModule(GetAppDomain(), mdFile, FALSE);
+
+ if (pModule != NULL)
+ {
+ hr = VerifyModule(pModule->GetModule(), pArgs);
+ if (FAILED(hr))
+ goto Exit;
+ }
+ }
+ }
+
+Exit:
+ return hr;
+} // CValidator::VerifyAssembly
+
+HRESULT
+CValidator::VerifyExportedType(
+ Module * pModule,
+ mdToken tkExportedType,
+ ValidateWorkerArgs * pArgs)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr;
+ TypeHandle th;
+ NameHandle nameHandle(pModule, tkExportedType);
+
+ LPCSTR szNamespace;
+ LPCSTR szName;
+ IfFailGo(pModule->GetMDImport()->GetExportedTypeProps(
+ tkExportedType,
+ &szNamespace,
+ &szName,
+ NULL, // tkImplementation
+ NULL, // tkTypeDefId
+ NULL)); // dwExportedTypeFlags
+
+ nameHandle.SetName(szNamespace, szName);
+
+ EX_TRY
+ {
+ th = pModule->GetClassLoader()->LoadTypeHandleThrowing(
+ &nameHandle,
+ CLASS_LOADED,
+ pModule);
+ hr = S_OK;
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ IfFailGo(hr);
+ if (th.GetMethodTable() == NULL)
+ {
+ IfFailGo(VER_E_TYPELOAD);
+ }
+
+ErrExit:
+ if (FAILED(hr))
+ {
+ hr = ReportError(hr, pArgs, tkExportedType);
+ }
+
+ return hr;
+} // CValidator::VerifyExportedType
+
+static void ValidateWorker(LPVOID /* ValidateWorker_Args */ ptr)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ ValidateWorkerArgs *args = (ValidateWorkerArgs *) ptr;
+ AppDomain *pDomain = GetThread()->GetDomain();
+
+ StackSString ssFile(args->wszFileName);
+ StackSString ssFileDir;
+ StackSString ssDirectory;
+
+ // Fill ssDirectory with just drive of the file (e.g. 'C:')
+ SplitPath(ssFile, &ssDirectory, &ssFileDir, NULL, NULL);
+ // Now apped directory from the file name (incl. leading and trailing '/' or '\')
+ ssDirectory.Append(ssFileDir);
+
+ {
+ // Set up the domain to resolve all dependency assemblies for introspection
+ struct _gc {
+ OBJECTREF orAppDomain;
+ STRINGREF refDirectory;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+
+ GCPROTECT_BEGIN(gc);
+
+ gc.orAppDomain = pDomain->GetExposedObject();
+ if (!ssDirectory.IsEmpty())
+ {
+ gc.refDirectory = StringObject::NewString(ssDirectory);
+ }
+
+ MethodDescCallSite meth(METHOD__APP_DOMAIN__ENABLE_RESOLVE_ASSEMBLIES_FOR_INTROSPECTION, &gc.orAppDomain);
+ ARG_SLOT args[2] =
+ {
+ ObjToArgSlot(gc.orAppDomain),
+ ObjToArgSlot(gc.refDirectory)
+ };
+ meth.Call(args);
+
+ GCPROTECT_END();
+ }
+
+ GCX_PREEMP();
+
+ Assembly *pAssembly;
+ if (args->wszFileName)
+ {
+ // Load the primary assembly for introspection
+ AssemblySpec spec;
+ spec.SetCodeBase(args->wszFileName);
+ spec.SetIntrospectionOnly(TRUE);
+ pAssembly = spec.LoadAssembly(FILE_LOADED);
+ }
+ else
+ {
+ // TODO: This is a workaround to get SQLCLR running.
+ // Our loader requires that a parent assembly is specified in order to load an
+ // assembly from byte array. But here we do not know the parent.
+ PEAssemblyHolder pFile(PEAssembly::OpenMemory(SystemDomain::System()->SystemFile(),
+ args->pe, args->size, TRUE));
+ pAssembly = pDomain->LoadAssembly(NULL, pFile, FILE_LOADED);
+ }
+
+ // Verify the assembly
+ args->hr = args->val->VerifyAssembly(pAssembly, args);
+}
+
+
+static HRESULT ValidateHelper(
+ IVEHandler *veh,
+ IUnknown *pAppDomain,
+ DWORD ulAppDomainId,
+ BOOL UseId,
+ unsigned long ulFlags,
+ unsigned long ulMaxError,
+ unsigned long token,
+ __in_z LPWSTR fileName,
+ BYTE *pe,
+ unsigned long ulSize)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ Thread *pThread = GetThread();
+
+ if (pe == NULL)
+ return E_POINTER;
+
+ HRESULT hr = S_OK;
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, return COR_E_STACKOVERFLOW);
+ ADID pDomain;
+ ValidateWorkerArgs args;
+ CValidator val(veh);
+ AppDomainFromIDHolder ad;
+
+ BOOL Chk = FALSE;
+ BOOL UnloadDomain = FALSE;
+
+ GCX_COOP();
+
+ EX_TRY {
+ PEDecoder pev(pe, (COUNT_T)ulSize);
+
+ args.wszFileName = fileName;
+ args.fVerbose = (ulFlags & VALIDATOR_EXTRA_VERBOSE) ? true : false;
+ args.fShowSourceLines = (ulFlags & VALIDATOR_SHOW_SOURCE_LINES) ? true : false;
+ args.fTransparentMethodsOnly = (ulFlags & VALIDATOR_TRANSPARENT_ONLY) ? true : false;
+ args.val = &val;
+ args.pe = pe;
+ args.size = ulSize;
+
+ if((ulFlags & VALIDATOR_NOCHECK_PEFORMAT) == 0)
+ {
+ // Verify the PE header / native stubs first
+ // <REVISIT> This validation is not performed on non-manifest modules. </REVISIT>
+ Chk = ((ulFlags & VALIDATOR_CHECK_ILONLY) != 0) ? (BOOL) pev.CheckILOnlyFormat() :
+ (BOOL) pev.CheckILFormat();
+ if (!Chk)
+ {
+ hr = val.ReportError(VER_E_BAD_PE, &args);
+
+ if (FAILED(hr))
+ goto End;
+ }
+ }
+ if((ulFlags & VALIDATOR_CHECK_PEFORMAT_ONLY) != 0)
+ goto End;
+
+ if (fileName)
+ {
+ AppDomain* pAD = AppDomain::CreateDomainContext(fileName);
+ UnloadDomain = TRUE;
+ pAD->SetPassiveDomain();
+ pDomain=pAD->GetId();
+ }
+ else if (UseId)
+ {
+ pDomain = (ADID)ulAppDomainId;
+ }
+ else
+ {
+ SystemDomain::LockHolder lh;
+ ComCallWrapper* pWrap = GetCCWFromIUnknown(pAppDomain, FALSE);
+ if (pWrap == NULL)
+ {
+ hr = COR_E_APPDOMAINUNLOADED;
+ goto End;
+ }
+ pDomain = pWrap->GetDomainID();
+ }
+
+ if (FAILED(hr))
+ {
+ hr = val.ReportError(hr, &args);
+ goto End;
+ }
+
+ ad.Assign(pDomain, TRUE);
+ if (ad.IsUnloaded())
+ COMPlusThrow(kAppDomainUnloadedException);
+ if (ad->IsIllegalVerificationDomain())
+ COMPlusThrow(kFileLoadException, IDS_LOADINTROSPECTION_DISALLOWED);
+ ad->SetVerificationDomain();
+ ad.Release();
+
+ args.val = &val;
+
+ // We need a file path here. This is to do a fusion bind, and also
+ // to make sure we can find any modules in the assembly. We assume
+ // that the path points to the same place the bytes came from, which is true
+ // with PEVerify, but perhaps not with other clients.
+
+ if (pDomain != pThread->GetDomain()->GetId())
+ {
+ pThread->DoADCallBack(
+ pDomain, ValidateWorker, &args);
+ }
+ else
+ {
+ ValidateWorker(&args);
+ }
+
+ if (FAILED(args.hr))
+ hr = val.ReportError(args.hr, &args);
+
+ // Only Unload the domain if we created it.
+ if (UnloadDomain)
+ AppDomain::UnloadById(pDomain,TRUE);
+End:;
+
+ }
+ EX_CATCH
+ {
+ hr = GET_EXCEPTION()->GetHR();
+ hr = val.ReportError(hr, &args);
+ }
+ EX_END_CATCH(RethrowSOExceptions)
+
+ END_SO_INTOLERANT_CODE;
+ return hr;
+}
+
+void GetFormattingErrorMsg(__out_ecount(ulMaxLength) __out_z LPWSTR msg, unsigned int ulMaxLength)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(ulMaxLength >= 30);
+ } CONTRACTL_END;
+
+ EX_TRY
+ {
+ SString s;
+ s.LoadResource(CCompRC::Debugging, IDS_VER_E_FORMATTING);
+ wcsncpy_s(msg, ulMaxLength, s.GetUnicode(), _TRUNCATE);
+ }
+ EX_CATCH
+ {
+ wcscpy_s(msg, ulMaxLength, W("Error loading resource string"));
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+}
+
+static HRESULT FormatEventInfoHelper(
+ HRESULT hVECode,
+ VEContext Context,
+ __out_ecount(ulMaxLength) __out_z LPWSTR msg,
+ unsigned int ulMaxLength,
+ SAFEARRAY *psa)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(ulMaxLength >= 30);
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+
+ VerError err;
+ memcpy(&err, &Context, sizeof(VerError));
+
+ ValidateWorkerArgs argsDefault;
+ ValidateWorkerArgs* pArgs = &argsDefault;
+
+ // We passed a pointer to the ValidateWorkerArgs object through
+ // the SAFEARRAY casted as a UINT because there was no room left in the
+ // interface to pass information through it.
+ {
+ UINT dim;
+ LONG l;
+#ifdef _WIN64
+ VARTYPE vt;
+#endif // _WIN64
+ VARIANT var;
+
+ if(!psa) {
+ goto lDone;
+ }
+
+ dim = SafeArrayGetDim(psa);
+ if (dim != 1) {
+ _ASSERTE(!"There should be one element in the SafeArray");
+ goto lDone;
+ }
+
+ if (FAILED(SafeArrayGetLBound(psa, 1, &l))) {
+ _ASSERTE(false);
+ goto lDone;
+ }
+ if (l != 0) {
+ _ASSERTE(!"expected the lower bound to be zero");
+ goto lDone;
+ }
+
+ if (FAILED(SafeArrayGetUBound(psa, 1, &l))) {
+ _ASSERTE(false);
+ goto lDone;
+ }
+ if (l != 0) {
+ _ASSERTE(!"expected the upper bound to be zero");
+ goto lDone;
+ }
+#ifdef _WIN64
+ // This check fails on Win2K when it should pass
+ SafeArrayGetVartype(psa, &vt);
+ if(vt != VT_VARIANT) {
+ _ASSERTE(!"expected the ElementType to be a VT_VARIANT");
+ goto lDone;
+ }
+#endif // _WIN64
+ l = 0;
+ SafeArrayGetElement(psa, &l, &var);
+
+#ifdef _WIN64
+ if (V_VT(&var) != VT_UI8) { // We expect the VarType to be a VT_UI8 (VT_UI8 is not supported on Windows 2000)
+ _ASSERTE(false);
+ goto lDone;
+ }
+
+ pArgs = (ValidateWorkerArgs*)(size_t)V_UI8(&var);
+#else
+ // We don't check that the type is V_UINT here because that check fails on Win2K when it should pass
+ pArgs = (ValidateWorkerArgs*)(size_t)V_UINT(&var);
+#endif
+
+ }
+lDone: ;
+
+ EX_TRY
+ {
+ Verifier::GetErrorMsg(hVECode, err, msg, ulMaxLength, pArgs);
+ }
+ EX_CATCH
+ {
+ GetFormattingErrorMsg(msg, ulMaxLength);
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ END_SO_INTOLERANT_CODE;
+ return S_OK;
+}
+
+HRESULT CorValidator::Validate(
+ IVEHandler *veh,
+ IUnknown *pAppDomain,
+ unsigned long ulFlags,
+ unsigned long ulMaxError,
+ unsigned long token,
+ __in_z LPWSTR fileName,
+ BYTE *pe,
+ unsigned long ulSize)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return ValidateHelper(veh, pAppDomain, 0, FALSE, ulFlags, ulMaxError,
+ token, fileName, pe, ulSize);
+}
+
+HRESULT CLRValidator::Validate(
+ IVEHandler *veh,
+ unsigned long ulAppDomainId,
+ unsigned long ulFlags,
+ unsigned long ulMaxError,
+ unsigned long token,
+ __in_z LPWSTR fileName,
+ BYTE *pe,
+ unsigned long ulSize)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return ValidateHelper(veh, NULL, ulAppDomainId, TRUE, ulFlags, ulMaxError,
+ token, fileName, pe, ulSize);
+}
+
+HRESULT CorValidator::FormatEventInfo(
+ HRESULT hVECode,
+ VEContext Context,
+ __out_ecount(ulMaxLength) LPWSTR msg,
+ unsigned long ulMaxLength,
+ SAFEARRAY *psa)
+{
+ WRAPPER_NO_CONTRACT;
+ return FormatEventInfoHelper(hVECode, Context, msg, ulMaxLength, psa);
+}
+
+HRESULT CLRValidator::FormatEventInfo(
+ HRESULT hVECode,
+ VEContext Context,
+ __out_ecount(ulMaxLength) LPWSTR msg,
+ unsigned long ulMaxLength,
+ SAFEARRAY *psa)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_TOLERANT;
+ return FormatEventInfoHelper(hVECode, Context, msg, ulMaxLength, psa);
+}
+
+
diff --git a/src/vm/vars.cpp b/src/vm/vars.cpp
new file mode 100644
index 0000000000..5275a92e16
--- /dev/null
+++ b/src/vm/vars.cpp
@@ -0,0 +1,363 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// vars.cpp - Global Var definitions
+//
+
+
+
+#include "common.h"
+#include "vars.hpp"
+#include "cordbpriv.h"
+#include "eeprofinterfaces.h"
+#include "bbsweep.h"
+
+#ifndef DACCESS_COMPILE
+//
+// Allow use of native images?
+//
+bool g_fAllowNativeImages = true;
+
+//
+// Default install library
+//
+const WCHAR g_pwBaseLibrary[] = W("mscorlib.dll");
+const WCHAR g_pwBaseLibraryName[] = W("mscorlib");
+const char g_psBaseLibrary[] = "mscorlib.dll";
+const char g_psBaseLibraryName[] = "mscorlib";
+const char g_psBaseLibrarySatelliteAssemblyName[] = "mscorlib.resources";
+
+#ifdef FEATURE_COMINTEROP
+const WCHAR g_pwBaseLibraryTLB[] = W("mscorlib.tlb");
+const char g_psBaseLibraryTLB[] = "mscorlib.tlb";
+#endif // FEATURE_COMINTEROP
+
+Volatile<LONG> g_TrapReturningThreads;
+
+HINSTANCE g_pMSCorEE;
+BBSweep g_BBSweep;
+
+#ifdef _DEBUG
+// next two variables are used to enforce an ASSERT in Thread::DbgFindThread
+// that does not allow g_TrapReturningThreads to creep up unchecked.
+Volatile<LONG> g_trtChgStamp = 0;
+Volatile<LONG> g_trtChgInFlight = 0;
+
+char * g_ExceptionFile; // Source of the last thrown exception (COMPLUSThrow())
+DWORD g_ExceptionLine; // ... ditto ...
+void * g_ExceptionEIP; // Managed EIP of the last guy to call JITThrow.
+#endif // _DEBUG
+void * g_LastAccessViolationEIP; // The EIP of the place we last threw an AV. Used to diagnose stress issues.
+
+#endif // #ifndef DACCESS_COMPILE
+GPTR_IMPL(IdDispenser, g_pThinLockThreadIdDispenser);
+
+GPTR_IMPL(IdDispenser, g_pModuleIndexDispenser);
+
+IBCLogger g_IBCLogger;
+
+// For [<I1, etc. up to and including [Object
+GARY_IMPL(PTR_ArrayTypeDesc, g_pPredefinedArrayTypes, ELEMENT_TYPE_MAX);
+
+GPTR_IMPL(EEConfig, g_pConfig); // configuration data (from the registry)
+
+GPTR_IMPL(MethodTable, g_pObjectClass);
+GPTR_IMPL(MethodTable, g_pRuntimeTypeClass);
+GPTR_IMPL(MethodTable, g_pCanonMethodTableClass); // System.__Canon
+GPTR_IMPL(MethodTable, g_pStringClass);
+GPTR_IMPL(MethodTable, g_pArrayClass);
+GPTR_IMPL(MethodTable, g_pSZArrayHelperClass);
+GPTR_IMPL(MethodTable, g_pNullableClass);
+GPTR_IMPL(MethodTable, g_pExceptionClass);
+GPTR_IMPL(MethodTable, g_pThreadAbortExceptionClass);
+GPTR_IMPL(MethodTable, g_pOutOfMemoryExceptionClass);
+GPTR_IMPL(MethodTable, g_pStackOverflowExceptionClass);
+GPTR_IMPL(MethodTable, g_pExecutionEngineExceptionClass);
+GPTR_IMPL(MethodTable, g_pDelegateClass);
+GPTR_IMPL(MethodTable, g_pMulticastDelegateClass);
+GPTR_IMPL(MethodTable, g_pValueTypeClass);
+GPTR_IMPL(MethodTable, g_pEnumClass);
+GPTR_IMPL(MethodTable, g_pThreadClass);
+GPTR_IMPL(MethodTable, g_pCriticalFinalizerObjectClass);
+GPTR_IMPL(MethodTable, g_pAsyncFileStream_AsyncResultClass);
+GPTR_IMPL(MethodTable, g_pFreeObjectMethodTable);
+GPTR_IMPL(MethodTable, g_pOverlappedDataClass);
+
+GPTR_IMPL(MethodTable, g_ArgumentHandleMT);
+GPTR_IMPL(MethodTable, g_ArgIteratorMT);
+GPTR_IMPL(MethodTable, g_TypedReferenceMT);
+
+#ifdef FEATURE_COMINTEROP
+GPTR_IMPL(MethodTable, g_pBaseCOMObject);
+GPTR_IMPL(MethodTable, g_pBaseRuntimeClass);
+#endif
+
+GPTR_IMPL(MethodDesc, g_pPrepareConstrainedRegionsMethod);
+GPTR_IMPL(MethodDesc, g_pExecuteBackoutCodeHelperMethod);
+
+GPTR_IMPL(MethodDesc, g_pObjectCtorMD);
+GPTR_IMPL(MethodDesc, g_pObjectFinalizerMD);
+
+GPTR_IMPL(Thread,g_pFinalizerThread);
+GPTR_IMPL(Thread,g_pSuspensionThread);
+
+// Global SyncBlock cache
+GPTR_IMPL(SyncTableEntry,g_pSyncTable);
+
+#ifdef STRESS_LOG
+GPTR_IMPL_INIT(StressLog, g_pStressLog, &StressLog::theLog);
+#endif
+
+#ifdef FEATURE_COMINTEROP
+// Global RCW cleanup list
+GPTR_IMPL(RCWCleanupList,g_pRCWCleanupList);
+#endif // FEATURE_COMINTEROP
+
+
+#ifndef DACCESS_COMPILE
+
+// <TODO> @TODO Remove eventually - </TODO> determines whether the verifier throws an exception when something fails
+bool g_fVerifierOff;
+
+#ifndef FEATURE_CORECLR
+IAssemblyUsageLog *g_pIAssemblyUsageLogGac;
+#endif
+
+// <TODO> @TODO - PROMOTE. </TODO>
+OBJECTHANDLE g_pPreallocatedOutOfMemoryException;
+OBJECTHANDLE g_pPreallocatedStackOverflowException;
+OBJECTHANDLE g_pPreallocatedExecutionEngineException;
+OBJECTHANDLE g_pPreallocatedRudeThreadAbortException;
+OBJECTHANDLE g_pPreallocatedThreadAbortException;
+OBJECTHANDLE g_pPreallocatedSentinelObject;
+OBJECTHANDLE g_pPreallocatedBaseException;
+
+#ifdef FEATURE_CAS_POLICY
+CertificateCache *g_pCertificateCache = NULL;
+#endif
+
+//
+//
+// Global System Info
+//
+SYSTEM_INFO g_SystemInfo;
+
+// Configurable constants used across our spin locks
+// Initialization here is necessary so that we have meaningful values before the runtime is started
+// These initial values were selected to match the defaults, but anything reasonable is close enough
+SpinConstants g_SpinConstants = {
+ 50, // dwInitialDuration
+ 40000, // dwMaximumDuration - ideally (20000 * max(2, numProc))
+ 3, // dwBackoffFactor
+ 10 // dwRepetitions
+};
+
+// support for Event Tracing for Windows (ETW)
+ETW::CEtwTracer * g_pEtwTracer = NULL;
+
+#endif // #ifndef DACCESS_COMPILE
+
+#ifdef FEATURE_IPCMAN
+// support for IPCManager
+GPTR_IMPL(IPCWriterInterface, g_pIPCManagerInterface);
+#endif // FEATURE_IPCMAN
+
+//
+// Support for the COM+ Debugger.
+//
+GPTR_IMPL(DebugInterface, g_pDebugInterface);
+// A managed debugger may set this flag to high from out of process.
+GVAL_IMPL_INIT(DWORD, g_CORDebuggerControlFlags, DBCF_NORMAL_OPERATION);
+
+#ifdef DEBUGGING_SUPPORTED
+GPTR_IMPL(EEDbgInterfaceImpl, g_pEEDbgInterfaceImpl);
+#endif // DEBUGGING_SUPPORTED
+
+#if defined(PROFILING_SUPPORTED_DATA) || defined(PROFILING_SUPPPORTED)
+// Profiling support
+HINSTANCE g_pDebuggerDll = NULL;
+
+GVAL_IMPL(ProfControlBlock, g_profControlBlock);
+#endif // defined(PROFILING_SUPPORTED_DATA) || defined(PROFILING_SUPPPORTED)
+
+#ifndef DACCESS_COMPILE
+
+// Global default for Concurrent GC. The default is value is 1
+int g_IGCconcurrent = 1;
+
+int g_IGCHoardVM = 0;
+
+#ifdef GCTRIMCOMMIT
+
+int g_IGCTrimCommit = 0;
+
+#endif
+
+BOOL g_fEnableETW = FALSE;
+
+BOOL g_fEnableARM = FALSE;
+
+//
+// Global state variable indicating if the EE is in its init phase.
+//
+bool g_fEEInit = false;
+
+//
+// Global state variables indicating which stage of shutdown we are in
+//
+
+#endif // #ifndef DACCESS_COMPILE
+
+// See comments at code:EEShutDown for details on how and why this gets set. Use
+// code:IsAtProcessExit to read this.
+GVAL_IMPL(bool, g_fProcessDetach);
+
+GVAL_IMPL_INIT(DWORD, g_fEEShutDown, 0);
+
+#ifndef FEATURE_PAL
+GVAL_IMPL(SIZE_T, g_runtimeLoadedBaseAddress);
+GVAL_IMPL(SIZE_T, g_runtimeVirtualSize);
+#endif // !FEATURE_PAL
+
+#ifndef DACCESS_COMPILE
+
+Volatile<LONG> g_fForbidEnterEE = false;
+bool g_fFinalizerRunOnShutDown = false;
+bool g_fManagedAttach = false;
+bool g_fNoExceptions = false;
+#ifdef FEATURE_COMINTEROP
+bool g_fShutDownCOM = false;
+#endif //FEATURE_COMINTEROP
+
+DWORD g_FinalizerWaiterStatus = 0;
+
+const WCHAR g_pwzClickOnceEnv_FullName[] = W("__COR_COMMAND_LINE_APP_FULL_NAME__");
+const WCHAR g_pwzClickOnceEnv_Manifest[] = W("__COR_COMMAND_LINE_MANIFEST__");
+const WCHAR g_pwzClickOnceEnv_Parameter[] = W("__COR_COMMAND_LINE_PARAMETER__");
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+DWORD g_dwGlobalSharePolicy = AppDomain::SHARE_POLICY_UNSPECIFIED;
+#endif
+
+//
+// Do we own the lifetime of the process, ie. is it an EXE?
+//
+bool g_fWeControlLifetime = false;
+
+#ifdef _DEBUG
+// The following should only be used for assertions. (Famous last words).
+bool dbg_fDrasticShutdown = false;
+#endif
+bool g_fInControlC = false;
+
+//
+// Cached command line file provided by the host.
+//
+LPWSTR g_pCachedCommandLine = NULL;
+LPWSTR g_pCachedModuleFileName = 0;
+
+// host configuration file. If set, it is added to every AppDomain (fusion context)
+LPCWSTR g_pszHostConfigFile = NULL;
+SIZE_T g_dwHostConfigFile = 0;
+
+// AppDomainManager assembly and type names provided as environment variables.
+LPWSTR g_wszAppDomainManagerAsm = NULL;
+LPWSTR g_wszAppDomainManagerType = NULL;
+bool g_fDomainManagerInitialized = false;
+
+//
+// IJW needs the shim HINSTANCE
+//
+HINSTANCE g_hInstShim = NULL;
+
+char g_Version[] = VER_PRODUCTVERSION_STR;
+
+#endif // #ifndef DACCESS_COMPILE
+
+#ifdef DACCESS_COMPILE
+
+void OBJECTHANDLE_EnumMemoryRegions(OBJECTHANDLE handle)
+{
+ SUPPORTS_DAC;
+ PTR_TADDR ref = PTR_TADDR(handle);
+ if (ref.IsValid())
+ {
+ ref.EnumMem();
+
+ PTR_Object obj = PTR_Object(*ref);
+ if (obj.IsValid())
+ {
+ obj->EnumMemoryRegions();
+ }
+ }
+}
+
+void OBJECTREF_EnumMemoryRegions(OBJECTREF ref)
+{
+ if (ref.IsValid())
+ {
+ ref->EnumMemoryRegions();
+ }
+}
+
+#endif // #ifdef DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+//
+// We need the following to be the compiler's notion of volatile.
+//
+extern "C" RAW_KEYWORD(volatile) const GSCookie s_gsCookie = 0;
+
+#else
+__GlobalVal< GSCookie > s_gsCookie(&g_dacGlobals.dac__s_gsCookie);
+#endif //!DACCESS_COMPILE
+
+BOOL IsCompilationProcess()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+#if defined(FEATURE_NATIVE_IMAGE_GENERATION) && !defined(DACCESS_COMPILE)
+ return g_pCEECompileInfo != NULL;
+#else
+ return FALSE;
+#endif
+}
+
+//==============================================================================
+
+enum NingenState
+{
+ kNotInitialized = 0,
+ kNingenEnabled = 1,
+ kNingenDisabled = 2,
+};
+
+extern int g_ningenState;
+int g_ningenState = kNotInitialized;
+
+// Removes all execution of managed or third-party code in the ngen compilation process.
+BOOL NingenEnabled()
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef CROSSGEN_COMPILE
+ // Always enable ningen for cross-compile
+ return TRUE;
+#else // CROSSGEN_COMPILE
+
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+ // Note that ningen is enabled by default to get byte-to-byte identical NGen images between native compile and cross-compile
+ if (g_ningenState == kNotInitialized)
+ {
+ // This code must be idempotent as we don't have a lock to prevent a race to initialize g_ningenState.
+ g_ningenState = (IsCompilationProcess() && (0 != CLRConfig::GetConfigValue(CLRConfig::INTERNAL_Ningen))) ? kNingenEnabled : kNingenDisabled;
+ }
+
+ return g_ningenState == kNingenEnabled;
+#else
+ return FALSE;
+#endif
+
+#endif // CROSSGEN_COMPILE
+}
diff --git a/src/vm/vars.hpp b/src/vm/vars.hpp
new file mode 100644
index 0000000000..c2128859c2
--- /dev/null
+++ b/src/vm/vars.hpp
@@ -0,0 +1,923 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// vars.hpp
+//
+// Global variables
+//
+
+
+#ifndef _VARS_HPP
+#define _VARS_HPP
+
+// This will need ifdefs for non-x86 processors (ia64 is pointer to 128bit instructions)!
+#define SLOT PBYTE
+typedef DPTR(SLOT) PTR_SLOT;
+
+typedef LPVOID DictionaryEntry;
+
+/* Define the implementation dependent size types */
+
+#ifndef _INTPTR_T_DEFINED
+#ifdef _WIN64
+typedef __int64 intptr_t;
+#else
+typedef int intptr_t;
+#endif
+#define _INTPTR_T_DEFINED
+#endif
+
+#ifndef _UINTPTR_T_DEFINED
+#ifdef _WIN64
+typedef unsigned __int64 uintptr_t;
+#else
+typedef unsigned int uintptr_t;
+#endif
+#define _UINTPTR_T_DEFINED
+#endif
+
+#ifndef _PTRDIFF_T_DEFINED
+#ifdef _WIN64
+typedef __int64 ptrdiff_t;
+#else
+typedef int ptrdiff_t;
+#endif
+#define _PTRDIFF_T_DEFINED
+#endif
+
+
+#ifndef _SIZE_T_DEFINED
+#ifdef _WIN64
+typedef unsigned __int64 size_t;
+#else
+typedef unsigned int size_t;
+#endif
+#define _SIZE_T_DEFINED
+#endif
+
+
+#ifndef _WCHAR_T_DEFINED
+typedef unsigned short wchar_t;
+#define _WCHAR_T_DEFINED
+#endif
+
+#ifndef CLR_STANDALONE_BINDER
+#include "util.hpp"
+#include <corpriv.h>
+#include <cordbpriv.h>
+
+#ifndef FEATURE_CORECLR
+#include <metahost.h>
+#endif // !FEATURE_CORECLR
+
+#include "eeprofinterfaces.h"
+#include "eehash.h"
+
+#ifdef FEATURE_CAS_POLICY
+#include "certificatecache.h"
+#endif
+
+#endif //CLR_STANDALONE_BINDER
+
+#include "profilepriv.h"
+
+class ClassLoader;
+class LoaderHeap;
+class GCHeap;
+class Object;
+class StringObject;
+class TransparentProxyObject;
+class ArrayClass;
+class MethodTable;
+class MethodDesc;
+class SyncBlockCache;
+class SyncTableEntry;
+class ThreadStore;
+class IPCWriterInterface;
+namespace ETW { class CEtwTracer; };
+class DebugInterface;
+class DebugInfoManager;
+class EEDbgInterfaceImpl;
+class EECodeManager;
+class Crst;
+#ifdef FEATURE_COMINTEROP
+class RCWCleanupList;
+#endif // FEATURE_COMINTEROP
+class BBSweep;
+struct IAssemblyUsageLog;
+
+//
+// object handles are opaque types that track object pointers
+//
+#ifndef DACCESS_COMPILE
+
+struct OBJECTHANDLE__
+{
+ void* unused;
+};
+typedef struct OBJECTHANDLE__* OBJECTHANDLE;
+
+#else
+
+typedef TADDR OBJECTHANDLE;
+
+#endif
+
+//
+// loader handles are opaque types that track object pointers that have a lifetime
+// that matches that of a loader allocator
+//
+struct LOADERHANDLE__
+{
+ void* unused;
+};
+typedef TADDR LOADERHANDLE;
+
+
+#ifdef DACCESS_COMPILE
+void OBJECTHANDLE_EnumMemoryRegions(OBJECTHANDLE handle);
+void OBJECTREF_EnumMemoryRegions(OBJECTREF ref);
+#endif
+
+
+#ifdef USE_CHECKED_OBJECTREFS
+
+
+//=========================================================================
+// In the retail build, OBJECTREF is typedef'd to "Object*".
+// In the debug build, we use operator overloading to detect
+// common programming mistakes that create GC holes. The critical
+// rules are:
+//
+// 1. Your thread must have disabled preemptive GC before
+// reading or writing any OBJECTREF. When preemptive GC is enabled,
+// another other thread can suspend you at any time and
+// move or discard objects.
+// 2. You must guard your OBJECTREF's using a root pointer across
+// any code that might trigger a GC.
+//
+// Each of the overloads validate that:
+//
+// 1. Preemptive GC is currently disabled
+// 2. The object looks consistent (checked by comparing the
+// object's methodtable pointer with that of the class.)
+//
+// Limitations:
+// - Can't say
+//
+// if (or) {}
+//
+// must say
+//
+// if (or != NULL) {}
+//
+//
+//=========================================================================
+class OBJECTREF {
+ private:
+ // Holds the real object pointer.
+ // The union gives us better debugger pretty printing
+ union {
+ Object *m_asObj;
+ class StringObject* m_asString;
+ class ArrayBase* m_asArray;
+ class PtrArray* m_asPtrArray;
+ class DelegateObject* m_asDelegate;
+ class TransparentProxyObject* m_asTP;
+
+ class ReflectClassBaseObject* m_asReflectClass;
+#ifdef FEATURE_COMPRESSEDSTACK
+ class CompressedStackObject* m_asCompressedStack;
+#endif // #ifdef FEATURE_COMPRESSEDSTACK
+#if defined(FEATURE_IMPERSONATION) || defined(FEATURE_COMPRESSEDSTACK)
+ class SecurityContextObject* m_asSecurityContext;
+#endif // #if defined(FEATURE_IMPERSONATION) || defined(FEATURE_COMPRESSEDSTACK)
+ class ExecutionContextObject* m_asExecutionContext;
+ class AppDomainBaseObject* m_asAppDomainBase;
+ class PermissionSetObject* m_asPermissionSetObject;
+ };
+
+ public:
+ //-------------------------------------------------------------
+ // Default constructor, for non-initializing declarations:
+ //
+ // OBJECTREF or;
+ //-------------------------------------------------------------
+ OBJECTREF();
+
+ //-------------------------------------------------------------
+ // Copy constructor, for passing OBJECTREF's as function arguments.
+ //-------------------------------------------------------------
+ OBJECTREF(const OBJECTREF & objref);
+
+ //-------------------------------------------------------------
+ // To allow NULL to be used as an OBJECTREF.
+ //-------------------------------------------------------------
+ OBJECTREF(TADDR nul);
+
+ //-------------------------------------------------------------
+ // Test against NULL.
+ //-------------------------------------------------------------
+ int operator!() const;
+
+ //-------------------------------------------------------------
+ // Compare two OBJECTREF's.
+ //-------------------------------------------------------------
+ int operator==(const OBJECTREF &objref) const;
+
+ //-------------------------------------------------------------
+ // Compare two OBJECTREF's.
+ //-------------------------------------------------------------
+ int operator!=(const OBJECTREF &objref) const;
+
+ //-------------------------------------------------------------
+ // Forward method calls.
+ //-------------------------------------------------------------
+ Object* operator->();
+ const Object* operator->() const;
+
+ //-------------------------------------------------------------
+ // Assignment. We don't validate the destination so as not
+ // to break the sequence:
+ //
+ // OBJECTREF or;
+ // or = ...;
+ //-------------------------------------------------------------
+ OBJECTREF& operator=(const OBJECTREF &objref);
+ OBJECTREF& operator=(TADDR nul);
+
+ // allow explict casts
+ explicit OBJECTREF(Object *pObject);
+
+ void Validate(BOOL bDeep = TRUE, BOOL bVerifyNextHeader = TRUE, BOOL bVerifySyncBlock = TRUE);
+
+};
+
+//-------------------------------------------------------------
+// template class REF for different types of REF class to be used
+// in the debug mode
+// Template type should be a class that extends Object
+//-------------------------------------------------------------
+
+
+
+template <class T>
+class REF : public OBJECTREF
+{
+ public:
+
+ //-------------------------------------------------------------
+ // Default constructor, for non-initializing declarations:
+ //
+ // OBJECTREF or;
+ //-------------------------------------------------------------
+ REF() :OBJECTREF ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ // no op
+ }
+
+ //-------------------------------------------------------------
+ // Copy constructor, for passing OBJECTREF's as function arguments.
+ //-------------------------------------------------------------
+ explicit REF(const OBJECTREF& objref) : OBJECTREF(objref)
+ {
+ LIMITED_METHOD_CONTRACT;
+ //no op
+ }
+
+
+ //-------------------------------------------------------------
+ // To allow NULL to be used as an OBJECTREF.
+ //-------------------------------------------------------------
+ REF(TADDR nul) : OBJECTREF (nul)
+ {
+ LIMITED_METHOD_CONTRACT;
+ // no op
+ }
+
+ explicit REF(T* pObject) : OBJECTREF(pObject)
+ {
+ LIMITED_METHOD_CONTRACT;
+ // no op
+ }
+
+ //-------------------------------------------------------------
+ // Forward method calls.
+ //-------------------------------------------------------------
+ T* operator->()
+ {
+ // What kind of statement can we make about member methods on Object
+ // except that we need to be in COOPERATIVE when touching them?
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ return (T *)OBJECTREF::operator->();
+ }
+
+ const T* operator->() const
+ {
+ // What kind of statement can we make about member methods on Object
+ // except that we need to be in COOPERATIVE when touching them?
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ return (const T *)OBJECTREF::operator->();
+ }
+
+ //-------------------------------------------------------------
+ // Assignment. We don't validate the destination so as not
+ // to break the sequence:
+ //
+ // OBJECTREF or;
+ // or = ...;
+ //-------------------------------------------------------------
+ REF<T> &operator=(OBJECTREF &objref)
+ {
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_CANNOT_TAKE_LOCK;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+ return (REF<T>&)OBJECTREF::operator=(objref);
+ }
+
+};
+
+#define VALIDATEOBJECTREF(objref) ((objref).Validate())
+#define VALIDATEOBJECT(obj) obj->Validate()
+
+#define ObjectToOBJECTREF(obj) (OBJECTREF(obj))
+#define OBJECTREFToObject(objref) ((objref).operator-> ())
+#define ObjectToSTRINGREF(obj) (STRINGREF(obj))
+#define STRINGREFToObject(objref) (*( (StringObject**) &(objref) ))
+#define ObjectToSTRINGBUFFERREF(obj) (STRINGBUFFERREF(obj))
+#define STRINGBUFFERREFToObject(objref) (*( (StringBufferObject**) &(objref) ))
+
+#else // _DEBUG_IMPL
+
+#define VALIDATEOBJECTREF(objref)
+#define VALIDATEOBJECT(obj)
+
+#define ObjectToOBJECTREF(obj) ((PTR_Object) (obj))
+#define OBJECTREFToObject(objref) ((PTR_Object) (objref))
+#define ObjectToSTRINGREF(obj) ((PTR_StringObject) (obj))
+#define STRINGREFToObject(objref) ((PTR_StringObject) (objref))
+#define ObjectToSTRINGBUFFERREF(obj) ((Ptr_StringBufferObject) (obj))
+#define STRINGBUFFERREFToObject(objref) ((Ptr_StringBufferObject) (objref))
+
+#endif // _DEBUG_IMPL
+
+
+// <TODO> Get rid of these! Don't use them any more!</TODO>
+#define MAX_CLASSNAME_LENGTH 1024
+#define MAX_NAMESPACE_LENGTH 1024
+
+class EEConfig;
+class ClassLoaderList;
+class Module;
+class ArrayTypeDesc;
+
+#ifndef BINDER
+
+#define EXTERN extern
+
+// For [<I1, etc. up to and including [Object
+GARY_DECL(PTR_ArrayTypeDesc, g_pPredefinedArrayTypes, ELEMENT_TYPE_MAX);
+
+extern "C" Volatile<LONG> g_TrapReturningThreads;
+
+EXTERN HINSTANCE g_pMSCorEE;
+EXTERN BBSweep g_BBSweep;
+EXTERN IBCLogger g_IBCLogger;
+
+#ifdef _DEBUG
+// next two variables are used to enforce an ASSERT in Thread::DbgFindThread
+// that does not allow g_TrapReturningThreads to creep up unchecked.
+EXTERN Volatile<LONG> g_trtChgStamp;
+EXTERN Volatile<LONG> g_trtChgInFlight;
+EXTERN char * g_ExceptionFile;
+EXTERN DWORD g_ExceptionLine;
+EXTERN void * g_ExceptionEIP;
+#endif
+EXTERN void * g_LastAccessViolationEIP;
+
+GPTR_DECL(EEConfig, g_pConfig); // configuration data (from the registry)
+GPTR_DECL(MethodTable, g_pObjectClass);
+GPTR_DECL(MethodTable, g_pRuntimeTypeClass);
+GPTR_DECL(MethodTable, g_pCanonMethodTableClass); // System.__Canon
+GPTR_DECL(MethodTable, g_pStringClass);
+GPTR_DECL(MethodTable, g_pArrayClass);
+GPTR_DECL(MethodTable, g_pSZArrayHelperClass);
+GPTR_DECL(MethodTable, g_pNullableClass);
+GPTR_DECL(MethodTable, g_pExceptionClass);
+GPTR_DECL(MethodTable, g_pThreadAbortExceptionClass);
+GPTR_DECL(MethodTable, g_pOutOfMemoryExceptionClass);
+GPTR_DECL(MethodTable, g_pStackOverflowExceptionClass);
+GPTR_DECL(MethodTable, g_pExecutionEngineExceptionClass);
+GPTR_DECL(MethodTable, g_pThreadAbortExceptionClass);
+GPTR_DECL(MethodTable, g_pDelegateClass);
+GPTR_DECL(MethodTable, g_pMulticastDelegateClass);
+GPTR_DECL(MethodTable, g_pFreeObjectMethodTable);
+GPTR_DECL(MethodTable, g_pValueTypeClass);
+GPTR_DECL(MethodTable, g_pEnumClass);
+GPTR_DECL(MethodTable, g_pThreadClass);
+GPTR_DECL(MethodTable, g_pCriticalFinalizerObjectClass);
+GPTR_DECL(MethodTable, g_pAsyncFileStream_AsyncResultClass);
+GPTR_DECL(MethodTable, g_pOverlappedDataClass);
+
+GPTR_DECL(MethodTable, g_ArgumentHandleMT);
+GPTR_DECL(MethodTable, g_ArgIteratorMT);
+GPTR_DECL(MethodTable, g_TypedReferenceMT);
+
+#ifdef FEATURE_COMINTEROP
+GPTR_DECL(MethodTable, g_pBaseCOMObject);
+GPTR_DECL(MethodTable, g_pBaseRuntimeClass);
+#endif
+
+GPTR_DECL(MethodDesc, g_pPrepareConstrainedRegionsMethod);
+GPTR_DECL(MethodDesc, g_pExecuteBackoutCodeHelperMethod);
+
+GPTR_DECL(MethodDesc, g_pObjectCtorMD);
+GPTR_DECL(MethodDesc, g_pObjectFinalizerMD);
+
+//<TODO> @TODO Remove eventually - determines whether the verifier throws an exception when something fails</TODO>
+EXTERN bool g_fVerifierOff;
+
+#ifndef FEATURE_CORECLR
+EXTERN IAssemblyUsageLog *g_pIAssemblyUsageLogGac;
+#endif
+
+// Global System Information
+extern SYSTEM_INFO g_SystemInfo;
+
+// <TODO>@TODO - PROMOTE.</TODO>
+// <TODO>@TODO - I'd like to make these private members of CLRException some day.</TODO>
+EXTERN OBJECTHANDLE g_pPreallocatedOutOfMemoryException;
+EXTERN OBJECTHANDLE g_pPreallocatedStackOverflowException;
+EXTERN OBJECTHANDLE g_pPreallocatedExecutionEngineException;
+EXTERN OBJECTHANDLE g_pPreallocatedRudeThreadAbortException;
+
+// We may not be able to create a normal thread abort exception if OOM or StackOverFlow.
+// When this happens, we will use our pre-allocated thread abort exception.
+EXTERN OBJECTHANDLE g_pPreallocatedThreadAbortException;
+
+// we use this as a dummy object to indicate free space in the handle tables -- this object is never visible to the world
+EXTERN OBJECTHANDLE g_pPreallocatedSentinelObject;
+
+// We use this object to return a preallocated System.Exception instance when we have nothing
+// better to return.
+EXTERN OBJECTHANDLE g_pPreallocatedBaseException;
+#endif // !BINDER
+
+GPTR_DECL(Thread,g_pFinalizerThread);
+GPTR_DECL(Thread,g_pSuspensionThread);
+
+// Global SyncBlock cache
+typedef DPTR(SyncTableEntry) PTR_SyncTableEntry;
+GPTR_DECL(SyncTableEntry, g_pSyncTable);
+
+#if !defined(BINDER)
+
+#ifdef FEATURE_COMINTEROP
+// Global RCW cleanup list
+typedef DPTR(RCWCleanupList) PTR_RCWCleanupList;
+GPTR_DECL(RCWCleanupList,g_pRCWCleanupList);
+#endif // FEATURE_COMINTEROP
+
+#ifdef FEATURE_CAS_POLICY
+EXTERN CertificateCache *g_pCertificateCache;
+#endif
+
+#ifdef FEATURE_IPCMAN
+// support for IPCManager
+typedef DPTR(IPCWriterInterface) PTR_IPCWriterInterface;
+GPTR_DECL(IPCWriterInterface, g_pIPCManagerInterface);
+#endif // FEATURE_IPCMAN
+
+// support for Event Tracing for Windows (ETW)
+EXTERN ETW::CEtwTracer* g_pEtwTracer;
+
+#ifdef STRESS_LOG
+class StressLog;
+typedef DPTR(StressLog) PTR_StressLog;
+GPTR_DECL(StressLog, g_pStressLog);
+#endif
+
+
+//
+// Support for the COM+ Debugger.
+//
+GPTR_DECL(DebugInterface, g_pDebugInterface);
+GVAL_DECL(DWORD, g_CORDebuggerControlFlags);
+#ifdef DEBUGGING_SUPPORTED
+GPTR_DECL(EEDbgInterfaceImpl, g_pEEDbgInterfaceImpl);
+#endif // DEBUGGING_SUPPORTED
+
+#ifdef PROFILING_SUPPORTED
+EXTERN HINSTANCE g_pDebuggerDll;
+#endif
+
+// Global default for Concurrent GC. The default is on (value 1)
+EXTERN int g_IGCconcurrent;
+extern int g_IGCHoardVM;
+
+#ifdef GCTRIMCOMMIT
+extern int g_IGCTrimCommit;
+#endif
+
+extern BOOL g_fEnableETW;
+extern BOOL g_fEnableARM;
+
+// Returns a BOOL to indicate if the runtime is active or not
+BOOL IsRuntimeActive();
+
+//
+// Can we run managed code?
+//
+struct LoaderLockCheck
+{
+ enum kind
+ {
+ ForMDA,
+ ForCorrectness,
+ None,
+ };
+};
+BOOL CanRunManagedCode(LoaderLockCheck::kind checkKind, HINSTANCE hInst = 0);
+inline BOOL CanRunManagedCode(HINSTANCE hInst = 0)
+{
+ return CanRunManagedCode(LoaderLockCheck::ForMDA, hInst);
+}
+
+//
+// Global state variable indicating if the EE is in its init phase.
+//
+EXTERN bool g_fEEInit;
+
+//
+// Global state variable indicating if the EE has been started up.
+//
+EXTERN Volatile<BOOL> g_fEEStarted;
+
+#ifdef FEATURE_COMINTEROP
+//
+// Global state variable indicating if COM has been started up.
+//
+EXTERN BOOL g_fComStarted;
+#endif
+
+#if !defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE)
+//
+// Pointer to the activated CLR interface provided by the shim.
+//
+EXTERN ICLRRuntimeInfo *g_pCLRRuntime;
+#endif
+
+//
+// Global state variables indicating which stage of shutdown we are in
+//
+GVAL_DECL(DWORD, g_fEEShutDown);
+EXTERN DWORD g_fFastExitProcess;
+#ifndef DACCESS_COMPILE
+EXTERN BOOL g_fSuspendOnShutdown;
+EXTERN BOOL g_fSuspendFinalizerOnShutdown;
+#endif // DACCESS_COMPILE
+EXTERN Volatile<LONG> g_fForbidEnterEE;
+EXTERN bool g_fFinalizerRunOnShutDown;
+GVAL_DECL(bool, g_fProcessDetach);
+EXTERN bool g_fManagedAttach;
+EXTERN bool g_fNoExceptions;
+#ifdef FEATURE_COMINTEROP
+EXTERN bool g_fShutDownCOM;
+#endif // FEATURE_COMINTEROP
+
+// Indicates whether we're executing shut down as a result of DllMain
+// (DLL_PROCESS_DETACH). See comments at code:EEShutDown for details.
+inline BOOL IsAtProcessExit()
+{
+ SUPPORTS_DAC;
+ return g_fProcessDetach;
+}
+
+enum FWStatus
+{
+ FWS_WaitInterrupt = 0x00000001,
+};
+
+EXTERN DWORD g_FinalizerWaiterStatus;
+extern ULONGLONG g_ObjFinalizeStartTime;
+extern Volatile<BOOL> g_FinalizerIsRunning;
+extern Volatile<ULONG> g_FinalizerLoopCount;
+
+extern LONG GetProcessedExitProcessEventCount();
+
+#ifndef DACCESS_COMPILE
+//
+// Allow use of native images?
+//
+extern bool g_fAllowNativeImages;
+
+//
+// Default install library
+//
+EXTERN const WCHAR g_pwBaseLibrary[];
+EXTERN const WCHAR g_pwBaseLibraryName[];
+EXTERN const char g_psBaseLibrary[];
+EXTERN const char g_psBaseLibraryName[];
+EXTERN const char g_psBaseLibrarySatelliteAssemblyName[];
+
+#ifdef FEATURE_COMINTEROP
+EXTERN const WCHAR g_pwBaseLibraryTLB[];
+EXTERN const char g_psBaseLibraryTLB[];
+#endif // FEATURE_COMINTEROP
+#endif // DACCESS_COMPILE
+
+EXTERN const WCHAR g_pwzClickOnceEnv_FullName[];
+EXTERN const WCHAR g_pwzClickOnceEnv_Manifest[];
+EXTERN const WCHAR g_pwzClickOnceEnv_Parameter[];
+
+#ifdef FEATURE_LOADER_OPTIMIZATION
+EXTERN DWORD g_dwGlobalSharePolicy;
+#endif
+
+//
+// Do we own the lifetime of the process, ie. is it an EXE?
+//
+EXTERN bool g_fWeControlLifetime;
+
+#ifdef _DEBUG
+// The following should only be used for assertions. (Famous last words).
+EXTERN bool dbg_fDrasticShutdown;
+#endif
+EXTERN bool g_fInControlC;
+
+// There is a global table of prime numbers that's available for e.g. hashing
+extern const DWORD g_rgPrimes[71];
+
+//
+// Cached command line file provided by the host.
+//
+extern LPWSTR g_pCachedCommandLine;
+extern LPWSTR g_pCachedModuleFileName;
+
+//
+// Host configuration file. One per process.
+//
+extern LPCWSTR g_pszHostConfigFile;
+extern SIZE_T g_dwHostConfigFile;
+
+// AppDomainManager type
+extern LPWSTR g_wszAppDomainManagerAsm;
+extern LPWSTR g_wszAppDomainManagerType;
+extern bool g_fDomainManagerInitialized;
+
+//
+// Macros to check debugger and profiler settings.
+//
+inline bool CORDebuggerPendingAttach()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ // If we're in rude shutdown, then pretend the debugger is detached.
+ // We want shutdown to be as simple as possible, so this avoids
+ // us trying to do elaborate operations while exiting.
+ return (g_CORDebuggerControlFlags & DBCF_PENDING_ATTACH) && !IsAtProcessExit();
+}
+
+inline bool CORDebuggerAttached()
+{
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+ // If we're in rude shutdown, then pretend the debugger is detached.
+ // We want shutdown to be as simple as possible, so this avoids
+ // us trying to do elaborate operations while exiting.
+ return (g_CORDebuggerControlFlags & DBCF_ATTACHED) && !IsAtProcessExit();
+}
+
+#define CORDebuggerAllowJITOpts(dwDebuggerBits) \
+ (((dwDebuggerBits) & DACF_ALLOW_JIT_OPTS) \
+ || \
+ ((g_CORDebuggerControlFlags & DBCF_ALLOW_JIT_OPT) && \
+ !((dwDebuggerBits) & DACF_USER_OVERRIDE)))
+
+#define CORDebuggerEnCMode(dwDebuggerBits) \
+ ((dwDebuggerBits) & DACF_ENC_ENABLED)
+
+#define CORDebuggerTraceCall() \
+ (CORDebuggerAttached() && GetThread()->IsTraceCall())
+
+
+
+//
+// Define stuff for precedence between profiling and debugging
+// flags that can both be set.
+//
+
+#if defined(PROFILING_SUPPORTED) || defined(PROFILING_SUPPORTED_DATA)
+
+#ifdef DEBUGGING_SUPPORTED
+
+#define CORDisableJITOptimizations(dwDebuggerBits) \
+ (CORProfilerDisableOptimizations() || \
+ !CORDebuggerAllowJITOpts(dwDebuggerBits))
+
+#else // !DEBUGGING_SUPPORTED
+
+#define CORDisableJITOptimizations(dwDebuggerBits) \
+ CORProfilerDisableOptimizations()
+
+#endif// DEBUGGING_SUPPORTED
+
+#else // !defined(PROFILING_SUPPORTED) && !defined(PROFILING_SUPPORTED_DATA)
+
+#ifdef DEBUGGING_SUPPORTED
+
+#define CORDisableJITOptimizations(dwDebuggerBits) \
+ !CORDebuggerAllowJITOpts(dwDebuggerBits)
+
+#else // DEBUGGING_SUPPORTED
+
+#define CORDisableJITOptimizations(dwDebuggerBits) FALSE
+
+#endif// DEBUGGING_SUPPORTED
+
+#endif// defined(PROFILING_SUPPORTED) || defined(PROFILING_SUPPORTED_DATA)
+
+
+
+
+//
+// IJW needs the shim HINSTANCE
+//
+EXTERN HINSTANCE g_hInstShim;
+
+#ifndef FEATURE_PAL
+GVAL_DECL(SIZE_T, g_runtimeLoadedBaseAddress);
+GVAL_DECL(SIZE_T, g_runtimeVirtualSize);
+#endif // !FEATURE_PAL
+
+#endif /* !BINDER */
+
+#ifndef MAXULONG
+#define MAXULONG 0xffffffff
+#endif
+
+#ifndef MAXULONGLONG
+#define MAXULONGLONG UI64(0xffffffffffffffff)
+#endif
+
+// #ADID_vs_ADIndex
+// code:ADID is an ID for an appdomain that is sparse and remains unique within the process for the lifetime of the process.
+// Remoting and (I believe) the thread pool use the former as a way of referring to appdomains outside of their normal lifetime safely.
+// Interop also uses ADID to handle issues involving unloaded domains.
+//
+// code:ADIndex is an ID for an appdomain that's dense and may be reused once the appdomain is unloaded.
+// This is useful for fast array based lookup from a number to an appdomain property.
+struct ADIndex
+{
+ DWORD m_dwIndex;
+ ADIndex ()
+ : m_dwIndex(0)
+ {}
+ explicit ADIndex (DWORD id)
+ : m_dwIndex(id)
+ {
+ SUPPORTS_DAC;
+ }
+ BOOL operator==(const ADIndex& ad) const
+ {
+ return m_dwIndex == ad.m_dwIndex;
+ }
+ BOOL operator!=(const ADIndex& ad) const
+ {
+ return m_dwIndex != ad.m_dwIndex;
+ }
+};
+
+// An ADID is a number that represents an appdomain. They are allcoated with code:SystemDomain::GetNewAppDomainId
+// ADIDs are NOT reused today, so they are unique even after the appdomain dies.
+//
+// see also code:BaseDomain::m_dwId
+// see also code:ADIndex
+// see also code:ADIndex#ADID_vs_ADIndex
+struct ADID
+{
+ DWORD m_dwId;
+ ADID ()
+ : m_dwId(0)
+ {LIMITED_METHOD_CONTRACT;}
+ explicit ADID (DWORD id)
+ : m_dwId(id)
+ {LIMITED_METHOD_CONTRACT;}
+ BOOL operator==(const ADID& ad) const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_dwId == ad.m_dwId;
+ }
+ BOOL operator!=(const ADID& ad) const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_dwId != ad.m_dwId;
+ }
+};
+
+struct TPIndex
+{
+ DWORD m_dwIndex;
+ TPIndex ()
+ : m_dwIndex(0)
+ {}
+ explicit TPIndex (DWORD id)
+ : m_dwIndex(id)
+ {}
+ BOOL operator==(const TPIndex& tpindex) const
+ {
+ return m_dwIndex == tpindex.m_dwIndex;
+ }
+ BOOL operator!=(const TPIndex& tpindex) const
+ {
+ return m_dwIndex != tpindex.m_dwIndex;
+ }
+};
+
+// Every Module is assigned a ModuleIndex, regardless of whether the Module is domain
+// neutral or domain specific. When a domain specific Module is unloaded, its ModuleIndex
+// can be reused.
+
+// ModuleIndexes are not the same as ModuleIDs. The main purpose of a ModuleIndex is
+// to have a compact way to refer to any Module (domain neutral or domain specific).
+// The main purpose of a ModuleID is to facilitate looking up the DomainLocalModule
+// that corresponds to a given Module in a given AppDomain.
+
+struct ModuleIndex
+{
+ SIZE_T m_dwIndex;
+ ModuleIndex ()
+ : m_dwIndex(0)
+ {}
+ explicit ModuleIndex (SIZE_T id)
+ : m_dwIndex(id)
+ { LIMITED_METHOD_DAC_CONTRACT; }
+ BOOL operator==(const ModuleIndex& ad) const
+ {
+ return m_dwIndex == ad.m_dwIndex;
+ }
+ BOOL operator!=(const ModuleIndex& ad) const
+ {
+ return m_dwIndex != ad.m_dwIndex;
+ }
+};
+
+//-----------------------------------------------------------------------------
+// GSCookies (guard-stack cookies) for detecting buffer overruns
+//-----------------------------------------------------------------------------
+
+typedef DPTR(GSCookie) PTR_GSCookie;
+
+#ifndef CLR_STANDALONE_BINDER
+#ifndef DACCESS_COMPILE
+// const is so that it gets placed in the .text section (which is read-only)
+// volatile is so that accesses to it do not get optimized away because of the const
+//
+
+extern "C" RAW_KEYWORD(volatile) const GSCookie s_gsCookie;
+
+inline
+GSCookie * GetProcessGSCookiePtr() { return const_cast<GSCookie *>(&s_gsCookie); }
+
+#else
+
+extern __GlobalVal< GSCookie > s_gsCookie;
+
+inline
+PTR_GSCookie GetProcessGSCookiePtr() { return PTR_GSCookie(&s_gsCookie); }
+
+#endif //!DACCESS_COMPILE
+
+inline
+GSCookie GetProcessGSCookie() { return *(RAW_KEYWORD(volatile) GSCookie *)(&s_gsCookie); }
+
+class CEECompileInfo;
+extern CEECompileInfo *g_pCEECompileInfo;
+
+#ifdef FEATURE_READYTORUN_COMPILER
+extern bool g_fReadyToRunCompilation;
+#endif
+
+// Returns true if this is NGen compilation process.
+// This is a superset of CompilationDomain::IsCompilationDomain() as there is more
+// than one AppDomain in ngen (the DefaultDomain)
+BOOL IsCompilationProcess();
+
+// Flag for cross-platform ngen: Removes all execution of managed or third-party code in the ngen compilation process.
+BOOL NingenEnabled();
+
+// Passed to JitManager APIs to determine whether to avoid calling into the host.
+// The profiling API stackwalking uses this to ensure to avoid re-entering the host
+// (particularly SQL) from a hijacked thread.
+enum HostCallPreference
+{
+ AllowHostCalls,
+ NoHostCalls,
+};
+
+#endif /* _VARS_HPP */
+#endif /* !CLR_STANDALONE_BINDER */
diff --git a/src/vm/verifier.cpp b/src/vm/verifier.cpp
new file mode 100644
index 0000000000..0e8f07ef98
--- /dev/null
+++ b/src/vm/verifier.cpp
@@ -0,0 +1,470 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// verifier.cpp
+//
+
+//
+//
+//
+// Registry / Environment settings :
+//
+// Create registry entries in CURRENT_USER\Software\Microsoft\.NETFramework
+// or set environment variables COMPlus_* with the names given below.
+// Environment settings override registry settings.
+//
+// For breaking into the debugger / Skipping verification :
+// (available only in the debug build).
+//
+// VerBreakOnError [STRING] Break into the debugger on error. Set to 1
+// VerSkip [STRING] method names (case sensitive)
+// VerBreak [STRING] method names (case sensitive)
+// VerOffset [STRING] Offset in the method in hex
+// VerPass [STRING] 1 / 2 ==> First pass, second pass
+// VerMsgMethodInfoOff [STRING] Print method / module info on error
+//
+// NOTE : If there are more than one methods in the list and an offset
+// is specified, this offset is applicable to all methods in the list
+//
+// NOTE : Verifier should be enabled for this to work.
+//
+// To Switch the verifier Off (Default is On) :
+// (available on all builds).
+//
+// VerifierOff [STRING] 1 ==> Verifier is Off, 0 ==> Verifier is On
+//
+// [See EEConfig.h / EEConfig.cpp]
+//
+//
+// Meaning of code marked with @XXX
+//
+// @VER_ASSERT : Already verified.
+// @VER_IMPL : Verification rules implemented here.
+// @DEBUG : To be removed/commented before checkin.
+//
+
+
+#include "common.h"
+
+#include "verifier.hpp"
+#include "ceeload.h"
+#include "clsload.hpp"
+#include "method.hpp"
+#include "vars.hpp"
+#include "object.h"
+#include "field.h"
+#include "comdelegate.h"
+#include "security.h"
+#include "dbginterface.h"
+#include "securityattributes.h"
+#include "eeconfig.h"
+#include "sourceline.h"
+#include "typedesc.h"
+#include "typestring.h"
+#include "../dlls/mscorrc/resource.h"
+
+
+#define VER_NAME_INFO_SIZE 128
+#define VER_SMALL_BUF_LEN 256
+#define VER_FAILED_TO_LOAD_RESOURCE_STRING "(Failed to load resource string)"
+
+#define VER_LD_RES(e, fld) \
+ { \
+ if ((sRes.LoadResource(CCompRC::Error, e ))) \
+ { \
+ sPrint.Printf(sRes.GetUnicode(), err.fld); \
+ sMessage += sPrint; \
+ } \
+ else \
+ { \
+ SString s(SString::Ascii, VER_FAILED_TO_LOAD_RESOURCE_STRING); \
+ sMessage += s; \
+ } \
+ }
+
+// Copies the error message to the input char*
+WCHAR* Verifier::GetErrorMsg(
+ HRESULT hrError,
+ VerError err,
+ __inout_ecount(len) WCHAR *wszMsg,
+ int len,
+ ValidateWorkerArgs* pArgs)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ SString sMessage; // to debug, watch "(WCHAR*)sMessage.m_buffer"
+ SString sPrint;
+ LPCSTR szMethodName;
+
+ NewHolder<SourceLine> pSL(NULL);
+
+ if (pArgs->pMethodDesc)
+ {
+ // source lines
+ if (pArgs->fShowSourceLines && pArgs->wszFileName)
+ {
+ pSL = new SourceLine(pArgs->wszFileName);
+ if(pSL->IsInitialized())
+ {
+ DWORD dwFunctionToken = pArgs->pMethodDesc->GetMemberDef();
+ WCHAR wcBuffer[VER_SMALL_BUF_LEN];
+ wcBuffer[0] = 0;
+ DWORD dwLineNumber;
+ HRESULT hr;
+ hr = pSL->GetSourceLine( dwFunctionToken, err.dwOffset, wcBuffer, VER_SMALL_BUF_LEN, &dwLineNumber );
+ sPrint.Printf(W("%s(%d) : "), wcBuffer, dwLineNumber);
+ sMessage += sPrint;
+ }
+ SString sRes;
+ sRes.LoadResource(CCompRC::Debugging, IDS_VER_E_ILERROR);
+ sMessage += sRes;
+ }
+
+ // module
+ sMessage += W("[");
+ sMessage += pArgs->pMethodDesc->GetModule()->GetPath();
+
+ // class
+ sMessage += W(" : ");
+ if (pArgs->pMethodDesc->GetMethodTable() != NULL)
+ {
+ // DefineFullyQualifiedNameForClass();
+ // GetFullyQualifiedNameForClassNestedAware(pClass);
+ // sMessage += FilterAscii(_szclsname_, szTemp, VER_NAME_INFO_SIZE);
+ SString clsname;
+ TypeString::AppendType(clsname,TypeHandle(pArgs->pMethodDesc->GetMethodTable()));
+ sMessage += clsname;
+ }
+ else
+ {
+ SString sRes;
+ sRes.LoadResource(CCompRC::Debugging, IDS_VER_E_GLOBAL);
+ sMessage += sRes;
+ }
+
+ // method
+ sMessage += W("::");
+ if (FAILED(pArgs->pMethodDesc->GetModule()->GetMDImport()->GetNameOfMethodDef(pArgs->pMethodDesc->GetMemberDef(), &szMethodName)))
+ {
+ szMethodName = "Invalid MethodDef record";
+ }
+ SString sNameOfMethod(SString::Utf8, szMethodName);
+ sMessage += sNameOfMethod;
+
+ if (pArgs->pMethodDesc->IsGenericMethodDefinition())
+ {
+ SString inst;
+ TypeString::AppendInst(inst,pArgs->pMethodDesc->GetMethodInstantiation(),TypeString::FormatBasic);
+ sMessage += inst;
+ }
+
+ sMessage += W("]");
+
+ // MD token
+ if(pArgs->fVerbose)
+ {
+ SString sRes;
+ sRes.LoadResource(CCompRC::Debugging, IDS_VER_E_MDTOKEN);
+ DWORD dwMDToken = pArgs->pMethodDesc->GetMemberDef();
+ sPrint.Printf(sRes.GetUnicode(), dwMDToken);
+ sMessage += sPrint;
+ }
+ }
+
+ // Fill In the details
+ SString sRes;
+
+ // Create the generic error fields
+
+ if (err.dwFlags & VER_ERR_OFFSET)
+ VER_LD_RES(VER_E_OFFSET, dwOffset);
+
+ if (err.dwFlags & VER_ERR_OPCODE)
+ {
+ if (sRes.LoadResource(CCompRC::Error, VER_E_OPCODE))
+ {
+ sPrint.Printf(sRes, ppOpcodeNameList[err.opcode]);
+ sMessage += W(" ");
+ sMessage += sPrint;
+ }
+ }
+
+ if (err.dwFlags & VER_ERR_OPERAND)
+ VER_LD_RES(VER_E_OPERAND, dwOperand);
+
+ if (err.dwFlags & VER_ERR_TOKEN)
+ VER_LD_RES(VER_E_TOKEN, token);
+
+ if (err.dwFlags & VER_ERR_EXCEP_NUM_1)
+ VER_LD_RES(VER_E_EXCEPT, dwException1);
+
+ if (err.dwFlags & VER_ERR_EXCEP_NUM_2)
+ VER_LD_RES(VER_E_EXCEPT, dwException2);
+
+ if (err.dwFlags & VER_ERR_STACK_SLOT)
+ VER_LD_RES(VER_E_STACK_SLOT, dwStackSlot);
+
+ if ((err.dwFlags & VER_ERR_SIG_MASK) == VER_ERR_LOCAL_SIG)
+ {
+ if (err.dwVarNumber != VER_ERR_NO_LOC)
+ {
+ if(pArgs->fShowSourceLines && pSL && pSL->IsInitialized() && pArgs->pMethodDesc)
+ {
+ if ((sRes.LoadResource(CCompRC::Error, VER_E_LOC_BYNAME)))
+ {
+ DWORD dwFunctionToken = pArgs->pMethodDesc->GetMemberDef();
+ WCHAR wcBuffer[VER_SMALL_BUF_LEN];
+ wcBuffer[0] = 0;
+ HRESULT hr;
+ hr = pSL->GetLocalName(dwFunctionToken, err.dwVarNumber, wcBuffer, VER_SMALL_BUF_LEN);
+ sPrint.Printf(sRes.GetUnicode(), wcBuffer);
+ }
+ else
+ {
+ SString s(SString::Ascii, VER_FAILED_TO_LOAD_RESOURCE_STRING);
+ sPrint = s;
+ }
+ }
+ else
+ {
+ if ((sRes.LoadResource(CCompRC::Error, VER_E_LOC)))
+ sPrint.Printf(sRes.GetUnicode(), err.dwVarNumber);
+ else
+ {
+ SString s(SString::Ascii, VER_FAILED_TO_LOAD_RESOURCE_STRING);
+ sPrint = s;
+ }
+ }
+ sMessage += sPrint;
+ }
+ }
+
+ if ((err.dwFlags & VER_ERR_SIG_MASK) == VER_ERR_FIELD_SIG)
+ {
+ if (sRes.LoadResource(CCompRC::Error, VER_E_FIELD_SIG))
+ {
+ sMessage += W(" ");
+ sMessage += sRes;
+ }
+ }
+
+ if (((err.dwFlags & VER_ERR_SIG_MASK) == VER_ERR_METHOD_SIG) ||
+ ((err.dwFlags & VER_ERR_SIG_MASK) == VER_ERR_CALL_SIG))
+ {
+ if (err.dwArgNumber != VER_ERR_NO_ARG)
+ {
+ if (err.dwArgNumber != VER_ERR_ARG_RET)
+ {
+ VER_LD_RES(VER_E_ARG, dwArgNumber);
+ }
+ else if (sRes.LoadResource(CCompRC::Error, VER_E_RET_SIG))
+ {
+ sMessage += W(" ");
+ sMessage += sRes;
+ }
+ }
+ }
+
+ if (err.dwFlags & VER_ERR_TYPE_1)
+ sMessage += err.wszType1;
+
+ if (err.dwFlags & VER_ERR_TYPE_2)
+ sMessage += err.wszType2;
+
+ if (err.dwFlags & VER_ERR_ADDL_MSG)
+ sMessage += err.wszAdditionalMessage;
+
+ if (err.dwFlags & VER_ERR_TYPE_F)
+ {
+ if (sRes.LoadResource(CCompRC::Error, VER_E_FOUND))
+ {
+ sPrint.Printf(sRes, err.wszTypeFound);
+ sMessage += sPrint;
+ }
+ }
+
+ if (err.dwFlags & VER_ERR_TYPE_E)
+ {
+ if (sRes.LoadResource(CCompRC::Error, VER_E_EXPECTED))
+ {
+ sPrint.Printf(sRes, err.wszTypeExpected);
+ sMessage += sPrint;
+ }
+ }
+
+ // Handle the special cases
+ switch (hrError)
+ {
+ case VER_E_UNKNOWN_OPCODE:
+ VER_LD_RES(VER_E_UNKNOWN_OPCODE, opcode);
+ break;
+
+ case VER_E_SIG_CALLCONV:
+ VER_LD_RES(VER_E_SIG_CALLCONV, bCallConv);
+ break;
+
+ case VER_E_SIG_ELEMTYPE:
+ VER_LD_RES(VER_E_SIG_ELEMTYPE, elem);
+ break;
+
+ case COR_E_ASSEMBLYEXPECTED:
+ Verifier::GetAssemblyName(hrError,sMessage, sRes, sPrint, pArgs);
+ break;
+
+ case SECURITY_E_UNVERIFIABLE:
+ Verifier::GetAssemblyName(hrError,sMessage, sRes, sPrint, pArgs);
+ break;
+
+ case CORSEC_E_MIN_GRANT_FAIL:
+ Verifier::GetAssemblyName(hrError,sMessage, sRes, sPrint, pArgs);
+ break;
+
+ case __HRESULT_FROM_WIN32(ERROR_BAD_FORMAT):
+ // fall through
+
+ default:
+ Verifier::GetDefaultMessage(hrError,sMessage, sRes, sPrint);
+ }
+
+ wcsncpy_s(wszMsg, len, sMessage.GetUnicode(), _TRUNCATE);
+ return wszMsg;
+}
+
+/*static*/ VOID Verifier::GetDefaultMessage(HRESULT hrError, SString& sMessage, SString& sRes, SString& sPrint)
+{
+ if (sMessage.GetCount() > 0)
+ sMessage += W(" ");
+
+ if (HRESULT_FACILITY(hrError) == FACILITY_URT && sRes.LoadResource(CCompRC::Error, MSG_FOR_URT_HR(hrError)))
+ sMessage += sRes;
+ else
+ {
+ WCHAR win32Msg[VER_SMALL_BUF_LEN];
+ BOOL useWin32Msg = WszFormatMessage( FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL,
+ hrError,
+#if FEATURE_USE_LCID
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default language
+#else
+ 0,
+#endif
+ (LPTSTR) win32Msg,
+ VER_SMALL_BUF_LEN - 1,
+ NULL );
+
+ if (sRes.LoadResource(CCompRC::Error, VER_E_HRESULT))
+ {
+ sPrint.Printf(sRes, hrError);
+
+ if (useWin32Msg)
+ {
+ sPrint += W(" - ");
+ sPrint += win32Msg;
+ }
+
+ sMessage += W(" ");
+ sMessage += sPrint;
+ }
+ else
+ {
+ SString s(SString::Ascii, VER_FAILED_TO_LOAD_RESOURCE_STRING);
+ sMessage += s;
+ }
+ }
+}
+
+/*static*/ HRESULT Verifier::ReportError(IVEHandler *pVeh, HRESULT hrError, VEContext* pVec, ValidateWorkerArgs* pArgs)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ // Filter out error messages that require parameters
+ switch(hrError)
+ {
+ case COR_E_TYPELOAD: hrError = VER_E_TYPELOAD; break;
+ }
+
+ HRESULT hr = E_FAIL;
+ EX_TRY
+ {
+ GCX_PREEMP();
+
+ // There is no room for expansion in the VEHandler interface, so we're
+ // stuffing our extra data into the SafeArray that was originally
+ // designed to be used only by the MDValidator.
+
+ // Note: VT_VARIANT is the only supported safe array type on Rotor
+ SAFEARRAY* pSafeArray = SafeArrayCreateVector(VT_VARIANT, 0, 1);
+ _ASSERTE(pSafeArray);
+ if (pSafeArray)
+ {
+ VARIANT var;
+#ifdef _WIN64
+ V_VT(&var) = VT_UI8; // machine sized int. (VT_UI8 not supported on Windows 2000)
+ V_UINT_PTR(&var) = (UINT64)(size_t)(pArgs);
+#else
+ V_VT(&var) = VT_UINT; // machine sized int
+ V_UINT_PTR(&var) = (ULONG_PTR)(pArgs);
+#endif
+ LONG i = 0;
+ HRESULT hrPutElement;
+ hrPutElement = SafeArrayPutElement(pSafeArray, &i, &var);
+ _ASSERTE(hrPutElement == S_OK);
+ }
+
+ // Call the handler
+ hr = pVeh->VEHandler(hrError, *pVec, pSafeArray);
+
+ // Clean up the SafeArray we allocated
+ HRESULT hrDestroy;
+ hrDestroy = SafeArrayDestroy(pSafeArray);
+ _ASSERTE(hrDestroy == S_OK);
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return hr;
+}
+
+/*static*/ VOID Verifier::GetAssemblyName(HRESULT hrError, SString& sMessage, SString& sRes, SString& sPrint, ValidateWorkerArgs* pArgs)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ if(sRes.LoadResource(CCompRC::Error, hrError))
+ {
+ // find the '%1'
+ SString::Iterator i = sRes.Begin();
+ if (sRes.Find(i, W("'%1'")))
+ {
+ // replace the '%1' with the module name
+ if(pArgs->wszFileName)
+ {
+ sPrint = pArgs->wszFileName;
+ sRes.Replace(i + 1, 2, sPrint);
+ }
+ else
+ {
+ sPrint = W("");
+ sRes.Replace(i, 4, sPrint);
+ }
+ sMessage += sRes;
+ }
+ }
+ else
+ {
+ SString s(SString::Ascii, VER_FAILED_TO_LOAD_RESOURCE_STRING);
+ sMessage += s;
+ }
+}
diff --git a/src/vm/verifier.hpp b/src/vm/verifier.hpp
new file mode 100644
index 0000000000..90f27e89b1
--- /dev/null
+++ b/src/vm/verifier.hpp
@@ -0,0 +1,112 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// verifier.hpp
+//
+
+//
+//
+//
+// Dead code verification is for supporting FJIT. If FJIT gets fixed so that it
+// can handle dead code, remove code #ifdefed in _VER_VERIFY_DEAD_CODE
+//
+
+
+#ifndef _VERIFIER_HPP
+#define _VERIFIER_HPP
+
+#define _VER_VERIFY_DEAD_CODE 1 // Verifies dead code
+
+#include "ivehandler.h"
+#include "vererror.h"
+
+class Verifier;
+class CValidator;
+class ValidateWorkerArgs;
+
+#define VER_FORCE_VERIFY 0x0001 // Fail even for fully trusted code
+#define VER_STOP_ON_FIRST_ERROR 0x0002 // Tools can handle multiple errors
+
+// Extensions to ELEMENT_TYPE_* enumeration in cor.h
+
+// Any objref
+#define VER_ELEMENT_TYPE_OBJREF (ELEMENT_TYPE_MAX)
+
+// Any value class
+#define VER_ELEMENT_TYPE_VALUE_CLASS (ELEMENT_TYPE_MAX+1)
+
+// A by-ref anything
+#define VER_ELEMENT_TYPE_BYREF (ELEMENT_TYPE_MAX+2)
+
+// Unknown/invalid type
+#define VER_ELEMENT_TYPE_UNKNOWN (ELEMENT_TYPE_MAX+3)
+
+// Sentinel value (stored at slots -1 and -2 of the stack to catch stack overflow)
+#define VER_ELEMENT_TYPE_SENTINEL (ELEMENT_TYPE_MAX+4)
+
+#define VER_LAST_BASIC_TYPE (ELEMENT_TYPE_MAX+4)
+
+#define VER_ARG_RET VER_ERR_ARG_RET
+#define VER_NO_ARG VER_ERR_NO_ARG
+
+
+
+#include "cor.h"
+#include "veropcodes.hpp"
+#include "util.hpp"
+
+
+#define MAX_SIGMSG_LENGTH 100
+#define MAX_FAILMSG_LENGTH 384 + MAX_SIGMSG_LENGTH
+
+
+struct VerExceptionInfo;
+struct VerExceptionBlock;
+class Verifier;
+
+
+
+class Verifier
+{
+ friend class VerSig;
+ friend class Item;
+
+public:
+ static WCHAR* GetErrorMsg(HRESULT hError, VerError err, __inout_ecount(len) WCHAR *wszMsg, int len, ValidateWorkerArgs* pArgs);
+ static HRESULT ReportError(IVEHandler *pVeh, HRESULT hrError, VEContext* pVec, ValidateWorkerArgs* pArgs);
+
+private:
+ static VOID GetDefaultMessage(HRESULT hrError, SString& sMessage, SString& sRes, SString& sPrint);
+ static VOID GetAssemblyName(HRESULT hrError, SString& sMessage, SString& sRes, SString& sPrint, ValidateWorkerArgs* pArgs);
+};
+
+
+class ValidateWorkerArgs
+{
+public:
+ CValidator *val;
+ HRESULT hr;
+ bool fDeletePEFile;
+ MethodDesc* pMethodDesc;
+ LPWSTR wszFileName;
+ BYTE *pe;
+ unsigned int size;
+ bool fVerbose;
+ bool fShowSourceLines;
+ bool fTransparentMethodsOnly;
+
+ ValidateWorkerArgs()
+ : val(NULL),
+ hr(S_OK),
+ fDeletePEFile(true),
+ pMethodDesc(NULL),
+ wszFileName(NULL),
+ fVerbose(false),
+ fShowSourceLines(false),
+ fTransparentMethodsOnly(false)
+ {LIMITED_METHOD_CONTRACT; }
+};
+
+#endif /* _VERIFIER_HPP */
diff --git a/src/vm/veropcodes.hpp b/src/vm/veropcodes.hpp
new file mode 100644
index 0000000000..a4a8907433
--- /dev/null
+++ b/src/vm/veropcodes.hpp
@@ -0,0 +1,31 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// veropcodes.hpp
+//
+
+//
+// Declares the enumeration of the opcodes and the decoding tables.
+//
+
+#include "openum.h"
+
+#define HackInlineAnnData 0x7F
+
+#ifdef DECLARE_DATA
+#define OPDEF(c,s,pop,push,args,type,l,s1,s2,ctrl) L##s,
+
+const WCHAR * const ppOpcodeNameList[] =
+{
+#include "../inc/opcode.def"
+};
+
+#undef OPDEF
+
+#else /* !DECLARE_DATA */
+
+extern const WCHAR * const ppOpcodeNameList[];
+
+#endif /* DECLARE_DATA */
diff --git a/src/vm/vertable.h b/src/vm/vertable.h
new file mode 100644
index 0000000000..f66d58fdc1
--- /dev/null
+++ b/src/vm/vertable.h
@@ -0,0 +1,381 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*
+ * Types for pop stack/push stack
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * 1 I1/U1
+ * 2 I2/U2
+ * 4 I4/U4
+ * 8 I8/U8
+ * r R4
+ * d R8
+ * o objref (can be an array or null)
+ * [ single dimensional array of (prefix)
+ * & byref/managed ptr (prefix)
+ *
+ * Pop only
+ * ~~~~~~~~
+ * R real number
+ * N number -any integer or real number
+ * Q number or unmanaged pointer
+ * X number, unmanaged pointer, managed pointer, or objref [Obsolete]
+ * Y integer (I1..I4), unmanaged pointer, managed pointer, or objref
+ * I Integral type (1, 2, 4, or 8 byte, or platform-independent integer type)
+ * A Anything
+ *
+ * CE "ceq" semantics - pop 2 arguments, do type checking as if for "ceq" instruction:
+ * Integer Real ManagedPtr UnmanagedPtr Objref
+ * Integer y
+ * Real y
+ * ManagedPtr y y
+ * UnmanagedPtr y
+ * Objref y y
+ *
+ * CG "cgt" semantics - pop 2 arguments, do type checking as if for "cgt" instruction:
+ * Integer Real ManagedPtr UnmanagedPtr Objref
+ * Integer y
+ * Real y
+ * ManagedPtr y
+ * UnmanagedPtr
+ * Objref y
+ *
+ * = Pop another item off the stack, and it must be the same type (int,real,objref,etc.) as the
+ * last item popped (note, System.Int32 <-> I4 etc. are allowed). Other value
+ * classes are NOT allowed.
+ *
+ * i (deprecated) Platform independent size value, but NOT an objref (I4/R4/ptr on 32-bit, I8/R8/ptr on 64-bit)
+ * p (deprecated) Platform independent size value OR objref
+ * * (deprecated) anything
+
+ * Push only
+ * ~~~~~~~~~
+ * n null objref (valid for push only)
+ * - Rewind the stack to undo the last pop (you cannot have trashed that location, though)
+ *
+ * Usage: <pop stack> : <operand checks> <push stack> <branches> <!>
+ *
+ * Order is important! Operand checks come after pop stack and before push stack.
+ * For example, to check the operand being a valid local variable number (only), do ":L"
+ *
+ * If there is a "!" at the end, it means the instruction is either invalid, not supported, or that
+ * there is a case statement to handle the instruction. If no case statement exists, the verifier
+ * will fail verification of the method.
+ *
+ * ! can be used to perform some operand checks and/or stack pops/pushes, while still allowing specific
+ * behavior; e.g. verifying that the inline operand is a valid local variable number.
+ *
+ * <operand checks>
+ * ~~~~~~~~~~~~~~~~
+ * #d Overwrite inline operand with digit "d" (must be in 0...9 range)
+ * L Check that the operand is a valid local variable number.
+ * A Check that the operand is a valid argument number.
+ *
+ * <branches>
+ * ~~~~~~~~~~
+ * b1 - one byte conditional branch
+ * b4 - four byte conditional branch
+ * u1 - one byte unconditional branch
+ * u4 - four byte unconditional branch
+ * l1 - one byte leave
+ * l4 - one byte leave
+ *
+ */
+
+VEROPCODE(CEE_NOP, ":")
+VEROPCODE(CEE_BREAK, ":")
+VEROPCODE(CEE_LDARG_0, ":#0A!")
+VEROPCODE(CEE_LDARG_1, ":#1A!")
+VEROPCODE(CEE_LDARG_2, ":#2A!")
+VEROPCODE(CEE_LDARG_3, ":#3A!")
+VEROPCODE(CEE_LDLOC_0, ":#0L!")
+VEROPCODE(CEE_LDLOC_1, ":#1L!")
+VEROPCODE(CEE_LDLOC_2, ":#2L!")
+VEROPCODE(CEE_LDLOC_3, ":#3L!")
+VEROPCODE(CEE_STLOC_0, ":#0L!")
+VEROPCODE(CEE_STLOC_1, ":#1L!")
+VEROPCODE(CEE_STLOC_2, ":#2L!")
+VEROPCODE(CEE_STLOC_3, ":#3L!")
+VEROPCODE(CEE_LDARG_S, ":A!")
+VEROPCODE(CEE_LDARGA_S, ":A!")
+VEROPCODE(CEE_STARG_S, ":A!")
+VEROPCODE(CEE_LDLOC_S, ":L!")
+VEROPCODE(CEE_LDLOCA_S, ":L!")
+VEROPCODE(CEE_STLOC_S, ":L!")
+VEROPCODE(CEE_LDNULL, ":n")
+VEROPCODE(CEE_LDC_I4_M1, ":4")
+VEROPCODE(CEE_LDC_I4_0, ":4")
+VEROPCODE(CEE_LDC_I4_1, ":4")
+VEROPCODE(CEE_LDC_I4_2, ":4")
+VEROPCODE(CEE_LDC_I4_3, ":4")
+VEROPCODE(CEE_LDC_I4_4, ":4")
+VEROPCODE(CEE_LDC_I4_5, ":4")
+VEROPCODE(CEE_LDC_I4_6, ":4")
+VEROPCODE(CEE_LDC_I4_7, ":4")
+VEROPCODE(CEE_LDC_I4_8, ":4")
+VEROPCODE(CEE_LDC_I4_S, ":4")
+VEROPCODE(CEE_LDC_I4, ":4")
+VEROPCODE(CEE_LDC_I8, ":8")
+VEROPCODE(CEE_LDC_R4, ":r")
+VEROPCODE(CEE_LDC_R8, ":d")
+VEROPCODE(CEE_UNUSED49, "!")
+VEROPCODE(CEE_DUP, "!")
+VEROPCODE(CEE_POP, "A:")
+VEROPCODE(CEE_JMP, "!") // Unverifiable !
+VEROPCODE(CEE_CALL, "!")
+VEROPCODE(CEE_CALLI, "!")
+VEROPCODE(CEE_RET, "!")
+VEROPCODE(CEE_BR_S, ":u1")
+VEROPCODE(CEE_BRFALSE_S, "Y:b1")
+VEROPCODE(CEE_BRTRUE_S, "Y:b1")
+VEROPCODE(CEE_BEQ_S, "CE:b1")
+VEROPCODE(CEE_BGE_S, "CG:b1")
+VEROPCODE(CEE_BGT_S, "CG:b1")
+VEROPCODE(CEE_BLE_S, "CG:b1")
+VEROPCODE(CEE_BLT_S, "CG:b1")
+VEROPCODE(CEE_BNE_UN_S, "CE:b1")
+VEROPCODE(CEE_BGE_UN_S, "CG:b1")
+VEROPCODE(CEE_BGT_UN_S, "CG:b1")
+VEROPCODE(CEE_BLE_UN_S, "CG:b1")
+VEROPCODE(CEE_BLT_UN_S, "CG:b1")
+VEROPCODE(CEE_BR, ":u4")
+VEROPCODE(CEE_BRFALSE, "Y:b4")
+VEROPCODE(CEE_BRTRUE, "Y:b4")
+VEROPCODE(CEE_BEQ, "CE:b4")
+VEROPCODE(CEE_BGE, "CG:b4")
+VEROPCODE(CEE_BGT, "CG:b4")
+VEROPCODE(CEE_BLE, "CG:b4")
+VEROPCODE(CEE_BLT, "CG:b4")
+VEROPCODE(CEE_BNE_UN, "CE:b4")
+VEROPCODE(CEE_BGE_UN, "CG:b4")
+VEROPCODE(CEE_BGT_UN, "CG:b4")
+VEROPCODE(CEE_BLE_UN, "CG:b4")
+VEROPCODE(CEE_BLT_UN, "CG:b4")
+VEROPCODE(CEE_SWITCH, "!")
+VEROPCODE(CEE_LDIND_I1, "&1:4")
+VEROPCODE(CEE_LDIND_U1, "&1:4")
+VEROPCODE(CEE_LDIND_I2, "&2:4")
+VEROPCODE(CEE_LDIND_U2, "&2:4")
+VEROPCODE(CEE_LDIND_I4, "&4:4")
+VEROPCODE(CEE_LDIND_U4, "&4:4")
+VEROPCODE(CEE_LDIND_I8, "&8:8")
+VEROPCODE(CEE_LDIND_I, "&i:i") // <TODO> not correct on 64 bit</TODO>
+VEROPCODE(CEE_LDIND_R4, "&r:r")
+VEROPCODE(CEE_LDIND_R8, "&d:d")
+VEROPCODE(CEE_LDIND_REF, "!")
+VEROPCODE(CEE_STIND_REF, "!")
+VEROPCODE(CEE_STIND_I1, "4&1:")
+VEROPCODE(CEE_STIND_I2, "4&2:")
+VEROPCODE(CEE_STIND_I4, "4&4:")
+VEROPCODE(CEE_STIND_I8, "8&8:")
+VEROPCODE(CEE_STIND_R4, "r&r:")
+VEROPCODE(CEE_STIND_R8, "d&d:")
+VEROPCODE(CEE_ADD, "N=:-")
+VEROPCODE(CEE_SUB, "N=:-")
+VEROPCODE(CEE_MUL, "N=:-")
+VEROPCODE(CEE_DIV, "N=:-")
+VEROPCODE(CEE_DIV_UN, "I=:-")
+VEROPCODE(CEE_REM, "N=:-")
+VEROPCODE(CEE_REM_UN, "I=:-")
+VEROPCODE(CEE_AND, "I=:-")
+VEROPCODE(CEE_OR, "I=:-")
+VEROPCODE(CEE_XOR, "I=:-")
+VEROPCODE(CEE_SHL, "4I:-")
+VEROPCODE(CEE_SHR, "4I:-")
+VEROPCODE(CEE_SHR_UN, "4I:-")
+VEROPCODE(CEE_NEG, "N:-")
+VEROPCODE(CEE_NOT, "I:-")
+VEROPCODE(CEE_CONV_I1, "Q:4")
+VEROPCODE(CEE_CONV_I2, "Q:4")
+VEROPCODE(CEE_CONV_I4, "Q:4")
+VEROPCODE(CEE_CONV_I8, "Q:8")
+VEROPCODE(CEE_CONV_R4, "N:r")
+VEROPCODE(CEE_CONV_R8, "N:d")
+VEROPCODE(CEE_CONV_U4, "Q:4")
+VEROPCODE(CEE_CONV_U8, "Q:8")
+VEROPCODE(CEE_CALLVIRT, "!")
+VEROPCODE(CEE_CPOBJ, "!")
+VEROPCODE(CEE_LDOBJ, "!")
+VEROPCODE(CEE_LDSTR, "!")
+VEROPCODE(CEE_NEWOBJ, "!")
+VEROPCODE(CEE_CASTCLASS, "!")
+VEROPCODE(CEE_ISINST, "!")
+VEROPCODE(CEE_CONV_R_UN, "Q:r")
+VEROPCODE(CEE_UNUSED58, "!")
+VEROPCODE(CEE_UNUSED1, "!")
+VEROPCODE(CEE_UNBOX, "!")
+VEROPCODE(CEE_THROW, "!")
+VEROPCODE(CEE_LDFLD, "!")
+VEROPCODE(CEE_LDFLDA, "!")
+VEROPCODE(CEE_STFLD, "!")
+VEROPCODE(CEE_LDSFLD, "!")
+VEROPCODE(CEE_LDSFLDA, "!")
+VEROPCODE(CEE_STSFLD, "!")
+VEROPCODE(CEE_STOBJ, "!")
+VEROPCODE(CEE_CONV_OVF_I1_UN, "Q:4")
+VEROPCODE(CEE_CONV_OVF_I2_UN, "Q:4")
+VEROPCODE(CEE_CONV_OVF_I4_UN, "Q:4")
+VEROPCODE(CEE_CONV_OVF_I8_UN, "Q:8")
+VEROPCODE(CEE_CONV_OVF_U1_UN, "Q:4")
+VEROPCODE(CEE_CONV_OVF_U2_UN, "Q:4")
+VEROPCODE(CEE_CONV_OVF_U4_UN, "Q:4")
+VEROPCODE(CEE_CONV_OVF_U8_UN, "Q:8")
+VEROPCODE(CEE_CONV_OVF_I_UN, "Q:i")
+VEROPCODE(CEE_CONV_OVF_U_UN, "Q:i")
+VEROPCODE(CEE_BOX, "!")
+VEROPCODE(CEE_NEWARR, "!")
+VEROPCODE(CEE_LDLEN, "[*:4")
+VEROPCODE(CEE_LDELEMA, "!")
+VEROPCODE(CEE_LDELEM_I1, "4[1:4")
+VEROPCODE(CEE_LDELEM_U1, "4[1:4")
+VEROPCODE(CEE_LDELEM_I2, "4[2:4")
+VEROPCODE(CEE_LDELEM_U2, "4[2:4")
+VEROPCODE(CEE_LDELEM_I4, "4[4:4")
+VEROPCODE(CEE_LDELEM_U4, "4[4:4")
+VEROPCODE(CEE_LDELEM_I8, "4[8:8")
+VEROPCODE(CEE_LDELEM_I, "4[i:i")
+VEROPCODE(CEE_LDELEM_R4, "4[r:r")
+VEROPCODE(CEE_LDELEM_R8, "4[d:d")
+VEROPCODE(CEE_LDELEM_REF, "!")
+VEROPCODE(CEE_STELEM_I, "i4[i:")
+VEROPCODE(CEE_STELEM_I1, "44[1:")
+VEROPCODE(CEE_STELEM_I2, "44[2:")
+VEROPCODE(CEE_STELEM_I4, "44[4:")
+VEROPCODE(CEE_STELEM_I8, "84[8:")
+VEROPCODE(CEE_STELEM_R4, "r4[r:")
+VEROPCODE(CEE_STELEM_R8, "d4[d:")
+VEROPCODE(CEE_STELEM_REF, "!")
+VEROPCODE(CEE_LDELEM, "!")
+VEROPCODE(CEE_STELEM, "!")
+VEROPCODE(CEE_UNBOX_ANY, "!")
+VEROPCODE(CEE_UNUSED5, "!")
+VEROPCODE(CEE_UNUSED6, "!")
+VEROPCODE(CEE_UNUSED7, "!")
+VEROPCODE(CEE_UNUSED8, "!")
+VEROPCODE(CEE_UNUSED9, "!")
+VEROPCODE(CEE_UNUSED10, "!")
+VEROPCODE(CEE_UNUSED11, "!")
+VEROPCODE(CEE_UNUSED12, "!")
+VEROPCODE(CEE_UNUSED13, "!")
+VEROPCODE(CEE_UNUSED14, "!")
+VEROPCODE(CEE_UNUSED15, "!")
+VEROPCODE(CEE_UNUSED16, "!")
+VEROPCODE(CEE_UNUSED17, "!")
+VEROPCODE(CEE_CONV_OVF_I1, "Q:4")
+VEROPCODE(CEE_CONV_OVF_U1, "Q:4")
+VEROPCODE(CEE_CONV_OVF_I2, "Q:4")
+VEROPCODE(CEE_CONV_OVF_U2, "Q:4")
+VEROPCODE(CEE_CONV_OVF_I4, "Q:4")
+VEROPCODE(CEE_CONV_OVF_U4, "Q:4")
+VEROPCODE(CEE_CONV_OVF_I8, "Q:8")
+VEROPCODE(CEE_CONV_OVF_U8, "Q:8")
+VEROPCODE(CEE_UNUSED50, "!")
+VEROPCODE(CEE_UNUSED18, "!")
+VEROPCODE(CEE_UNUSED19, "!")
+VEROPCODE(CEE_UNUSED20, "!")
+VEROPCODE(CEE_UNUSED21, "!")
+VEROPCODE(CEE_UNUSED22, "!")
+VEROPCODE(CEE_UNUSED23, "!")
+VEROPCODE(CEE_REFANYVAL, "!")
+VEROPCODE(CEE_CKFINITE, "R:-")
+VEROPCODE(CEE_UNUSED24, "!")
+VEROPCODE(CEE_UNUSED25, "!")
+VEROPCODE(CEE_MKREFANY, "!")
+VEROPCODE(CEE_UNUSED59, "!")
+VEROPCODE(CEE_UNUSED60, "!")
+VEROPCODE(CEE_UNUSED61, "!")
+VEROPCODE(CEE_UNUSED62, "!")
+VEROPCODE(CEE_UNUSED63, "!")
+VEROPCODE(CEE_UNUSED64, "!")
+VEROPCODE(CEE_UNUSED65, "!")
+VEROPCODE(CEE_UNUSED66, "!")
+VEROPCODE(CEE_UNUSED67, "!")
+VEROPCODE(CEE_LDTOKEN, "!")
+VEROPCODE(CEE_CONV_U2, "Q:4")
+VEROPCODE(CEE_CONV_U1, "Q:4")
+VEROPCODE(CEE_CONV_I, "Q:i")
+VEROPCODE(CEE_CONV_OVF_I, "Q:i")
+VEROPCODE(CEE_CONV_OVF_U, "Q:i")
+VEROPCODE(CEE_ADD_OVF, "I=:-")
+VEROPCODE(CEE_ADD_OVF_UN, "I=:-")
+VEROPCODE(CEE_MUL_OVF, "I=:-")
+VEROPCODE(CEE_MUL_OVF_UN, "I=:-")
+VEROPCODE(CEE_SUB_OVF, "I=:-")
+VEROPCODE(CEE_SUB_OVF_UN, "I=:-")
+VEROPCODE(CEE_ENDFINALLY, "!")
+VEROPCODE(CEE_LEAVE, ":l4")
+VEROPCODE(CEE_LEAVE_S, ":l1")
+VEROPCODE(CEE_STIND_I, "i&i:") // <TODO> : 64 bit</TODO>
+VEROPCODE(CEE_CONV_U, "Q:i")
+VEROPCODE(CEE_UNUSED26, "!")
+VEROPCODE(CEE_UNUSED27, "!")
+VEROPCODE(CEE_UNUSED28, "!")
+VEROPCODE(CEE_UNUSED29, "!")
+VEROPCODE(CEE_UNUSED30, "!")
+VEROPCODE(CEE_UNUSED31, "!")
+VEROPCODE(CEE_UNUSED32, "!")
+VEROPCODE(CEE_UNUSED33, "!")
+VEROPCODE(CEE_UNUSED34, "!")
+VEROPCODE(CEE_UNUSED35, "!")
+VEROPCODE(CEE_UNUSED36, "!")
+VEROPCODE(CEE_UNUSED37, "!")
+VEROPCODE(CEE_UNUSED38, "!")
+VEROPCODE(CEE_UNUSED39, "!")
+VEROPCODE(CEE_UNUSED40, "!")
+VEROPCODE(CEE_UNUSED41, "!")
+VEROPCODE(CEE_UNUSED42, "!")
+VEROPCODE(CEE_UNUSED43, "!")
+VEROPCODE(CEE_UNUSED44, "!")
+VEROPCODE(CEE_UNUSED45, "!")
+VEROPCODE(CEE_UNUSED46, "!")
+VEROPCODE(CEE_UNUSED47, "!")
+VEROPCODE(CEE_UNUSED48, "!")
+VEROPCODE(CEE_PREFIX7, "!")
+VEROPCODE(CEE_PREFIX6, "!")
+VEROPCODE(CEE_PREFIX5, "!")
+VEROPCODE(CEE_PREFIX4, "!")
+VEROPCODE(CEE_PREFIX3, "!")
+VEROPCODE(CEE_PREFIX2, "!")
+VEROPCODE(CEE_PREFIX1, "!")
+VEROPCODE(CEE_PREFIXREF, "!")
+VEROPCODE(CEE_ARGLIST, "!")
+VEROPCODE(CEE_CEQ, "CE:4")
+VEROPCODE(CEE_CGT, "CG:4")
+VEROPCODE(CEE_CGT_UN, "CE:4")
+VEROPCODE(CEE_CLT, "CG:4")
+VEROPCODE(CEE_CLT_UN, "CG:4")
+VEROPCODE(CEE_LDFTN, "!")
+VEROPCODE(CEE_LDVIRTFTN, "!")
+VEROPCODE(CEE_UNUSED56, "!")
+VEROPCODE(CEE_LDARG, ":A!")
+VEROPCODE(CEE_LDARGA, ":A!")
+VEROPCODE(CEE_STARG, ":A!")
+VEROPCODE(CEE_LDLOC, ":L!")
+VEROPCODE(CEE_LDLOCA, ":L!")
+VEROPCODE(CEE_STLOC, ":L!")
+VEROPCODE(CEE_LOCALLOC, "i:i!") // Unverifiable !
+VEROPCODE(CEE_UNUSED57, "!")
+VEROPCODE(CEE_ENDFILTER, "4:!")
+VEROPCODE(CEE_UNALIGNED, ":")
+VEROPCODE(CEE_VOLATILE, ":")
+VEROPCODE(CEE_TAILCALL, ":")
+VEROPCODE(CEE_INITOBJ, "!")
+VEROPCODE(CEE_CONSTRAINED, ":")
+VEROPCODE(CEE_CPBLK, "ii4:!") // Unverifiable !
+VEROPCODE(CEE_INITBLK, "i44:!") // Unverifiable !
+VEROPCODE(CEE_UNUSED69, "!")
+VEROPCODE(CEE_RETHROW, "!")
+VEROPCODE(CEE_UNUSED51, "!")
+VEROPCODE(CEE_SIZEOF, "!")
+VEROPCODE(CEE_REFANYTYPE, "!")
+VEROPCODE(CEE_READONLY, ":")
+VEROPCODE(CEE_UNUSED53, "!")
+VEROPCODE(CEE_UNUSED54, "!")
+VEROPCODE(CEE_UNUSED55, "!")
+VEROPCODE(CEE_UNUSED70, "!")
+VEROPCODE(CEE_ILLEGAL, "!")
+VEROPCODE(CEE_MACRO_END, "!")
+VEROPCODE(CEE_COUNT, "!")
+
diff --git a/src/vm/virtualcallstub.cpp b/src/vm/virtualcallstub.cpp
new file mode 100644
index 0000000000..d1ad6ec16d
--- /dev/null
+++ b/src/vm/virtualcallstub.cpp
@@ -0,0 +1,4198 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: VirtualCallStub.CPP
+//
+// This file contains the virtual call stub manager and caches
+//
+
+
+
+//
+
+//
+// ============================================================================
+
+#include "common.h"
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+#include "array.h"
+#ifdef FEATURE_PREJIT
+#include "compile.h"
+#endif
+
+#ifndef DACCESS_COMPILE
+
+//@TODO: make these conditional on whether logs are being produced
+//instrumentation counters
+UINT32 g_site_counter = 0; //# of call sites
+UINT32 g_site_write = 0; //# of call site backpatch writes
+UINT32 g_site_write_poly = 0; //# of call site backpatch writes to point to resolve stubs
+UINT32 g_site_write_mono = 0; //# of call site backpatch writes to point to dispatch stubs
+
+UINT32 g_stub_lookup_counter = 0; //# of lookup stubs
+UINT32 g_stub_mono_counter = 0; //# of dispatch stubs
+UINT32 g_stub_poly_counter = 0; //# of resolve stubs
+UINT32 g_stub_space = 0; //# of bytes of stubs
+
+UINT32 g_reclaim_counter = 0; //# of times a ReclaimAll was performed
+
+UINT32 g_worker_call = 0; //# of calls into ResolveWorker
+UINT32 g_worker_call_no_patch = 0;
+UINT32 g_worker_collide_to_mono = 0; //# of times we converted a poly stub to a mono stub instead of writing the cache entry
+
+UINT32 g_external_call = 0; //# of calls into GetTarget(token, pMT)
+UINT32 g_external_call_no_patch = 0;
+
+UINT32 g_insert_cache_external = 0; //# of times Insert was called for IK_EXTERNAL
+UINT32 g_insert_cache_shared = 0; //# of times Insert was called for IK_SHARED
+UINT32 g_insert_cache_dispatch = 0; //# of times Insert was called for IK_DISPATCH
+UINT32 g_insert_cache_resolve = 0; //# of times Insert was called for IK_RESOLVE
+UINT32 g_insert_cache_hit = 0; //# of times Insert found an empty cache entry
+UINT32 g_insert_cache_miss = 0; //# of times Insert already had a matching cache entry
+UINT32 g_insert_cache_collide = 0; //# of times Insert found a used cache entry
+UINT32 g_insert_cache_write = 0; //# of times Insert wrote a cache entry
+
+UINT32 g_cache_entry_counter = 0; //# of cache structs
+UINT32 g_cache_entry_space = 0; //# of bytes used by cache lookup structs
+
+UINT32 g_call_lookup_counter = 0; //# of times lookup stubs entered
+
+UINT32 g_mono_call_counter = 0; //# of time dispatch stubs entered
+UINT32 g_mono_miss_counter = 0; //# of times expected MT did not match actual MT (dispatch stubs)
+
+UINT32 g_poly_call_counter = 0; //# of times resolve stubs entered
+UINT32 g_poly_miss_counter = 0; //# of times cache missed (resolve stub)
+
+UINT32 g_chained_lookup_call_counter = 0; //# of hits in a chained lookup
+UINT32 g_chained_lookup_miss_counter = 0; //# of misses in a chained lookup
+
+UINT32 g_chained_lookup_external_call_counter = 0; //# of hits in an external chained lookup
+UINT32 g_chained_lookup_external_miss_counter = 0; //# of misses in an external chained lookup
+
+UINT32 g_chained_entry_promoted = 0; //# of times a cache entry is promoted to the start of the chain
+
+UINT32 g_bucket_space = 0; //# of bytes in caches and tables, not including the stubs themselves
+UINT32 g_bucket_space_dead = 0; //# of bytes of abandoned buckets not yet recycled
+
+#endif // !DACCESS_COMPILE
+
+// This is the number of times a successful chain lookup will occur before the
+// entry is promoted to the front of the chain. This is declared as extern because
+// the default value (CALL_STUB_CACHE_INITIAL_SUCCESS_COUNT) is defined in the header.
+extern size_t g_dispatch_cache_chain_success_counter;
+
+#define DECLARE_DATA
+#include "virtualcallstub.h"
+#undef DECLARE_DATA
+#include "profilepriv.h"
+#include "contractimpl.h"
+
+SPTR_IMPL_INIT(VirtualCallStubManagerManager, VirtualCallStubManagerManager, g_pManager, NULL);
+
+#ifndef DACCESS_COMPILE
+
+#ifdef STUB_LOGGING
+UINT32 STUB_MISS_COUNT_VALUE = 100;
+UINT32 STUB_COLLIDE_WRITE_PCT = 100;
+UINT32 STUB_COLLIDE_MONO_PCT = 0;
+#endif // STUB_LOGGING
+
+FastTable* BucketTable::dead = NULL; //linked list of the abandoned buckets
+
+DispatchCache *g_resolveCache = NULL; //cache of dispatch stubs for in line lookup by resolve stubs.
+
+size_t g_dispatch_cache_chain_success_counter = CALL_STUB_CACHE_INITIAL_SUCCESS_COUNT;
+
+#ifdef STUB_LOGGING
+UINT32 g_resetCacheCounter;
+UINT32 g_resetCacheIncr;
+UINT32 g_dumpLogCounter;
+UINT32 g_dumpLogIncr;
+#endif // STUB_LOGGING
+
+//@TODO: use the existing logging mechanisms. for now we write to a file.
+HANDLE g_hStubLogFile;
+
+void VirtualCallStubManager::StartupLogging()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ GCX_PREEMP();
+
+ EX_TRY
+ {
+ FAULT_NOT_FATAL(); // We handle filecreation problems locally
+ SString str;
+ str.Printf(W("StubLog_%d.log"), GetCurrentProcessId());
+ g_hStubLogFile = WszCreateFile (str.GetUnicode(),
+ GENERIC_WRITE,
+ 0,
+ 0,
+ CREATE_ALWAYS,
+ FILE_ATTRIBUTE_NORMAL,
+ 0);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+
+ if (g_hStubLogFile == INVALID_HANDLE_VALUE) {
+ g_hStubLogFile = NULL;
+ }
+}
+
+#define OUTPUT_FORMAT_INT "\t%-30s %d\r\n"
+#define OUTPUT_FORMAT_PCT "\t%-30s %#5.2f%%\r\n"
+#define OUTPUT_FORMAT_INT_PCT "\t%-30s %5d (%#5.2f%%)\r\n"
+
+void VirtualCallStubManager::LoggingDump()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ VirtualCallStubManagerIterator it =
+ VirtualCallStubManagerManager::GlobalManager()->IterateVirtualCallStubManagers();
+
+ while (it.Next())
+ {
+ it.Current()->LogStats();
+ }
+
+ g_resolveCache->LogStats();
+
+ // Temp space to use for formatting the output.
+ static const int FMT_STR_SIZE = 160;
+ char szPrintStr[FMT_STR_SIZE];
+ DWORD dwWriteByte;
+
+ if(g_hStubLogFile)
+ {
+#ifdef STUB_LOGGING
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\nstub tuning parameters\r\n");
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\t%-30s %3d (0x%02x)\r\n", "STUB_MISS_COUNT_VALUE",
+ STUB_MISS_COUNT_VALUE, STUB_MISS_COUNT_VALUE);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\t%-30s %3d%% (0x%02x)\r\n", "STUB_COLLIDE_WRITE_PCT",
+ STUB_COLLIDE_WRITE_PCT, STUB_COLLIDE_WRITE_PCT);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\t%-30s %3d%% (0x%02x)\r\n", "STUB_COLLIDE_MONO_PCT",
+ STUB_COLLIDE_MONO_PCT, STUB_COLLIDE_MONO_PCT);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\t%-30s %3d%% (0x%02x)\r\n", "DumpLogCounter",
+ g_dumpLogCounter, g_dumpLogCounter);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\t%-30s %3d%% (0x%02x)\r\n", "DumpLogIncr",
+ g_dumpLogCounter, g_dumpLogIncr);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\t%-30s %3d%% (0x%02x)\r\n", "ResetCacheCounter",
+ g_resetCacheCounter, g_resetCacheCounter);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\t%-30s %3d%% (0x%02x)\r\n", "ResetCacheIncr",
+ g_resetCacheCounter, g_resetCacheIncr);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+#endif // STUB_LOGGING
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\nsite data\r\n");
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ //output counters
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "site_counter", g_site_counter);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "site_write", g_site_write);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "site_write_mono", g_site_write_mono);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "site_write_poly", g_site_write_poly);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\n%-30s %d\r\n", "reclaim_counter", g_reclaim_counter);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\nstub data\r\n");
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "stub_lookup_counter", g_stub_lookup_counter);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "stub_mono_counter", g_stub_mono_counter);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "stub_poly_counter", g_stub_poly_counter);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "stub_space", g_stub_space);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+#ifdef STUB_LOGGING
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\nlookup stub data\r\n");
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ UINT32 total_calls = g_mono_call_counter + g_poly_call_counter;
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "lookup_call_counter", g_call_lookup_counter);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\n%-30s %d\r\n", "total stub dispatch calls", total_calls);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\n%-30s %#5.2f%%\r\n", "mono stub data",
+ 100.0 * double(g_mono_call_counter)/double(total_calls));
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "mono_call_counter", g_mono_call_counter);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "mono_miss_counter", g_mono_miss_counter);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_PCT, "miss percent",
+ 100.0 * double(g_mono_miss_counter)/double(g_mono_call_counter));
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\n%-30s %#5.2f%%\r\n", "poly stub data",
+ 100.0 * double(g_poly_call_counter)/double(total_calls));
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "poly_call_counter", g_poly_call_counter);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "poly_miss_counter", g_poly_miss_counter);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_PCT, "miss percent",
+ 100.0 * double(g_poly_miss_counter)/double(g_poly_call_counter));
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+#endif // STUB_LOGGING
+
+#ifdef CHAIN_LOOKUP
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\nchain lookup data\r\n");
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+#ifdef STUB_LOGGING
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "chained_lookup_call_counter", g_chained_lookup_call_counter);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "chained_lookup_miss_counter", g_chained_lookup_miss_counter);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_PCT, "miss percent",
+ 100.0 * double(g_chained_lookup_miss_counter)/double(g_chained_lookup_call_counter));
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "chained_lookup_external_call_counter", g_chained_lookup_external_call_counter);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "chained_lookup_external_miss_counter", g_chained_lookup_external_miss_counter);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_PCT, "miss percent",
+ 100.0 * double(g_chained_lookup_external_miss_counter)/double(g_chained_lookup_external_call_counter));
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+#endif // STUB_LOGGING
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "chained_entry_promoted", g_chained_entry_promoted);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+#endif // CHAIN_LOOKUP
+
+#ifdef STUB_LOGGING
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\n%-30s %#5.2f%%\r\n", "worker (slow resolver) data",
+ 100.0 * double(g_worker_call)/double(total_calls));
+#else // !STUB_LOGGING
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\nworker (slow resolver) data\r\n");
+#endif // !STUB_LOGGING
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "worker_call", g_worker_call);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "worker_call_no_patch", g_worker_call_no_patch);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "external_call", g_external_call);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "external_call_no_patch", g_external_call_no_patch);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "worker_collide_to_mono", g_worker_collide_to_mono);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ UINT32 total_inserts = g_insert_cache_external
+ + g_insert_cache_shared
+ + g_insert_cache_dispatch
+ + g_insert_cache_resolve;
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\n%-30s %d\r\n", "insert cache data", total_inserts);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_external", g_insert_cache_external,
+ 100.0 * double(g_insert_cache_external)/double(total_inserts));
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_shared", g_insert_cache_shared,
+ 100.0 * double(g_insert_cache_shared)/double(total_inserts));
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_dispatch", g_insert_cache_dispatch,
+ 100.0 * double(g_insert_cache_dispatch)/double(total_inserts));
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_resolve", g_insert_cache_resolve,
+ 100.0 * double(g_insert_cache_resolve)/double(total_inserts));
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_hit", g_insert_cache_hit,
+ 100.0 * double(g_insert_cache_hit)/double(total_inserts));
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_miss", g_insert_cache_miss,
+ 100.0 * double(g_insert_cache_miss)/double(total_inserts));
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_collide", g_insert_cache_collide,
+ 100.0 * double(g_insert_cache_collide)/double(total_inserts));
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_write", g_insert_cache_write,
+ 100.0 * double(g_insert_cache_write)/double(total_inserts));
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\ncache data\r\n");
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ size_t total, used;
+ g_resolveCache->GetLoadFactor(&total, &used);
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "cache_entry_used", used);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "cache_entry_counter", g_cache_entry_counter);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "cache_entry_space", g_cache_entry_space);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\nstub hash table data\r\n");
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "bucket_space", g_bucket_space);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "bucket_space_dead", g_bucket_space_dead);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\ncache_load:\t%d used, %d total, utilization %#5.2f%%\r\n",
+ used, total, 100.0 * double(used) / double(total));
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+#ifdef STUB_LOGGING
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\ncache entry write counts\r\n");
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ DispatchCache::CacheEntryData *rgCacheData = g_resolveCache->cacheData;
+ for (UINT16 i = 0; i < CALL_STUB_CACHE_SIZE; i++)
+ {
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), " %4d", rgCacheData[i]);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ if (i % 16 == 15)
+ {
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\n");
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ }
+ }
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\n");
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+#endif // STUB_LOGGING
+
+#if 0
+ for (unsigned i = 0; i < ContractImplMap::max_delta_count; i++)
+ {
+ if (ContractImplMap::deltasDescs[i] != 0)
+ {
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "deltasDescs[%d]\t%d\r\n", i, ContractImplMap::deltasDescs[i]);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ }
+ }
+ for (unsigned i = 0; i < ContractImplMap::max_delta_count; i++)
+ {
+ if (ContractImplMap::deltasSlots[i] != 0)
+ {
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "deltasSlots[%d]\t%d\r\n", i, ContractImplMap::deltasSlots[i]);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ }
+ }
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "cout of maps:\t%d\r\n", ContractImplMap::countMaps);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "count of interfaces:\t%d\r\n", ContractImplMap::countInterfaces);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "count of deltas:\t%d\r\n", ContractImplMap::countDelta);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "total delta for descs:\t%d\r\n", ContractImplMap::totalDeltaDescs);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "total delta for slots:\t%d\r\n", ContractImplMap::totalDeltaSlots);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+#endif // 0
+ }
+}
+
+void VirtualCallStubManager::FinishLogging()
+{
+ LoggingDump();
+
+ if(g_hStubLogFile)
+ {
+ CloseHandle(g_hStubLogFile);
+ }
+ g_hStubLogFile = NULL;
+}
+
+void VirtualCallStubManager::ResetCache()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ g_resolveCache->LogStats();
+
+ g_insert_cache_external = 0;
+ g_insert_cache_shared = 0;
+ g_insert_cache_dispatch = 0;
+ g_insert_cache_resolve = 0;
+ g_insert_cache_hit = 0;
+ g_insert_cache_miss = 0;
+ g_insert_cache_collide = 0;
+ g_insert_cache_write = 0;
+
+ // Go through each cache entry and if the cache element there is in
+ // the cache entry heap of the manager being deleted, then we just
+ // set the cache entry to empty.
+ DispatchCache::Iterator it(g_resolveCache);
+ while (it.IsValid())
+ {
+ it.UnlinkEntry();
+ }
+
+}
+
+void VirtualCallStubManager::Init(BaseDomain *pDomain, LoaderAllocator *pLoaderAllocator)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pDomain));
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ // Record the parent domain
+ parentDomain = pDomain;
+ isCollectible = !!pLoaderAllocator->IsCollectible();
+
+ //
+ // Init critical sections
+ //
+
+ m_indCellLock.Init(CrstVSDIndirectionCellLock, CRST_UNSAFE_ANYMODE);
+
+ //
+ // Now allocate all BucketTables
+ //
+
+ NewHolder<BucketTable> resolvers_holder(new BucketTable(CALL_STUB_MIN_BUCKETS));
+ NewHolder<BucketTable> dispatchers_holder(new BucketTable(CALL_STUB_MIN_BUCKETS*2));
+ NewHolder<BucketTable> lookups_holder(new BucketTable(CALL_STUB_MIN_BUCKETS));
+ NewHolder<BucketTable> cache_entries_holder(new BucketTable(CALL_STUB_MIN_BUCKETS));
+
+ //
+ // Now allocate our LoaderHeaps
+ //
+
+ //
+ // First do some calculation to determine how many pages that we
+ // will need to commit and reserve for each out our loader heaps
+ //
+ DWORD indcell_heap_reserve_size;
+ DWORD indcell_heap_commit_size;
+ DWORD cache_entry_heap_reserve_size;
+ DWORD cache_entry_heap_commit_size;
+ DWORD lookup_heap_reserve_size;
+ DWORD lookup_heap_commit_size;
+ DWORD dispatch_heap_reserve_size;
+ DWORD dispatch_heap_commit_size;
+ DWORD resolve_heap_reserve_size;
+ DWORD resolve_heap_commit_size;
+
+ //
+ // Setup an expected number of items to commit and reserve
+ //
+ // The commit number is not that important as we alwasy commit at least one page worth of items
+ // The reserve number shoudl be high enough to cover a typical lare application,
+ // in order to minimize the fragmentation of our rangelists
+ //
+
+ if (parentDomain->IsDefaultDomain())
+ {
+ indcell_heap_commit_size = 16; indcell_heap_reserve_size = 2000;
+ cache_entry_heap_commit_size = 16; cache_entry_heap_reserve_size = 800;
+
+ lookup_heap_commit_size = 24; lookup_heap_reserve_size = 250;
+ dispatch_heap_commit_size = 24; dispatch_heap_reserve_size = 600;
+ resolve_heap_commit_size = 24; resolve_heap_reserve_size = 300;
+ }
+ else if (parentDomain->IsSharedDomain())
+ {
+ indcell_heap_commit_size = 16; indcell_heap_reserve_size = 100;
+#ifdef _WIN64
+ indcell_heap_reserve_size = 2000;
+#endif
+ cache_entry_heap_commit_size = 16; cache_entry_heap_reserve_size = 500;
+
+ lookup_heap_commit_size = 24; lookup_heap_reserve_size = 200;
+ dispatch_heap_commit_size = 24; dispatch_heap_reserve_size = 450;
+ resolve_heap_commit_size = 24; resolve_heap_reserve_size = 200;
+ }
+ else
+ {
+ indcell_heap_commit_size = 8; indcell_heap_reserve_size = 8;
+ cache_entry_heap_commit_size = 8; cache_entry_heap_reserve_size = 8;
+
+ lookup_heap_commit_size = 8; lookup_heap_reserve_size = 8;
+ dispatch_heap_commit_size = 8; dispatch_heap_reserve_size = 8;
+ resolve_heap_commit_size = 8; resolve_heap_reserve_size = 8;
+ }
+
+#ifdef _WIN64
+ // If we're on 64-bit, there's a ton of address space, so reserve more space to
+ // try to avoid getting into the situation where the resolve heap is more than
+ // a rel32 jump away from the dispatch heap, since this will cause us to produce
+ // larger dispatch stubs on AMD64.
+ dispatch_heap_reserve_size *= 10;
+ resolve_heap_reserve_size *= 10;
+#endif
+
+ //
+ // Convert the number of items into a size in bytes to commit abd reserve
+ //
+ indcell_heap_reserve_size *= sizeof(void *);
+ indcell_heap_commit_size *= sizeof(void *);
+
+ cache_entry_heap_reserve_size *= sizeof(ResolveCacheElem);
+ cache_entry_heap_commit_size *= sizeof(ResolveCacheElem);
+
+ lookup_heap_reserve_size *= sizeof(LookupHolder);
+ lookup_heap_commit_size *= sizeof(LookupHolder);
+
+ DWORD dispatchHolderSize = sizeof(DispatchHolder);
+#ifdef _TARGET_AMD64_
+ dispatchHolderSize = static_cast<DWORD>(DispatchHolder::GetHolderSize(DispatchStub::e_TYPE_SHORT));
+#endif
+
+ dispatch_heap_reserve_size *= dispatchHolderSize;
+ dispatch_heap_commit_size *= dispatchHolderSize;
+
+ resolve_heap_reserve_size *= sizeof(ResolveHolder);
+ resolve_heap_commit_size *= sizeof(ResolveHolder);
+
+ //
+ // Align up all of the commit and reserve sizes
+ //
+ indcell_heap_reserve_size = (DWORD) ALIGN_UP(indcell_heap_reserve_size, PAGE_SIZE);
+ indcell_heap_commit_size = (DWORD) ALIGN_UP(indcell_heap_commit_size, PAGE_SIZE);
+
+ cache_entry_heap_reserve_size = (DWORD) ALIGN_UP(cache_entry_heap_reserve_size, PAGE_SIZE);
+ cache_entry_heap_commit_size = (DWORD) ALIGN_UP(cache_entry_heap_commit_size, PAGE_SIZE);
+
+ lookup_heap_reserve_size = (DWORD) ALIGN_UP(lookup_heap_reserve_size, PAGE_SIZE);
+ lookup_heap_commit_size = (DWORD) ALIGN_UP(lookup_heap_commit_size, PAGE_SIZE);
+
+ dispatch_heap_reserve_size = (DWORD) ALIGN_UP(dispatch_heap_reserve_size, PAGE_SIZE);
+ dispatch_heap_commit_size = (DWORD) ALIGN_UP(dispatch_heap_commit_size, PAGE_SIZE);
+
+ resolve_heap_reserve_size = (DWORD) ALIGN_UP(resolve_heap_reserve_size, PAGE_SIZE);
+ resolve_heap_commit_size = (DWORD) ALIGN_UP(resolve_heap_commit_size, PAGE_SIZE);
+
+ BYTE * initReservedMem = NULL;
+
+ if (!isCollectible)
+ {
+ DWORD dwTotalReserveMemSizeCalc = indcell_heap_reserve_size +
+ cache_entry_heap_reserve_size +
+ lookup_heap_reserve_size +
+ dispatch_heap_reserve_size +
+ resolve_heap_reserve_size;
+
+ DWORD dwTotalReserveMemSize = (DWORD) ALIGN_UP(dwTotalReserveMemSizeCalc, VIRTUAL_ALLOC_RESERVE_GRANULARITY);
+
+ // If there's wasted reserved memory, we hand this out to the heaps to avoid waste.
+ {
+ DWORD dwWastedReserveMemSize = dwTotalReserveMemSize - dwTotalReserveMemSizeCalc;
+ if (dwWastedReserveMemSize != 0)
+ {
+ DWORD cWastedPages = dwWastedReserveMemSize / PAGE_SIZE;
+ DWORD cPagesPerHeap = cWastedPages / 5;
+ DWORD cPagesRemainder = cWastedPages % 5; // We'll throw this at the resolve heap
+
+ indcell_heap_reserve_size += cPagesPerHeap * PAGE_SIZE;
+ cache_entry_heap_reserve_size += cPagesPerHeap * PAGE_SIZE;
+ lookup_heap_reserve_size += cPagesPerHeap * PAGE_SIZE;
+ dispatch_heap_reserve_size += cPagesPerHeap * PAGE_SIZE;
+ resolve_heap_reserve_size += cPagesPerHeap * PAGE_SIZE;
+ resolve_heap_reserve_size += cPagesRemainder * PAGE_SIZE;
+ }
+
+ CONSISTENCY_CHECK((indcell_heap_reserve_size +
+ cache_entry_heap_reserve_size +
+ lookup_heap_reserve_size +
+ dispatch_heap_reserve_size +
+ resolve_heap_reserve_size) ==
+ dwTotalReserveMemSize);
+ }
+
+ initReservedMem = ClrVirtualAllocExecutable (dwTotalReserveMemSize, MEM_RESERVE, PAGE_NOACCESS);
+
+ m_initialReservedMemForHeaps = (BYTE *) initReservedMem;
+
+ if (initReservedMem == NULL)
+ COMPlusThrowOM();
+ }
+ else
+ {
+ indcell_heap_reserve_size = PAGE_SIZE;
+ indcell_heap_commit_size = PAGE_SIZE;
+
+ cache_entry_heap_reserve_size = PAGE_SIZE;
+ cache_entry_heap_commit_size = PAGE_SIZE;
+
+ lookup_heap_reserve_size = PAGE_SIZE;
+ lookup_heap_commit_size = PAGE_SIZE;
+
+ dispatch_heap_reserve_size = PAGE_SIZE;
+ dispatch_heap_commit_size = PAGE_SIZE;
+
+ resolve_heap_reserve_size = PAGE_SIZE;
+ resolve_heap_commit_size = PAGE_SIZE;
+
+#ifdef _DEBUG
+ DWORD dwTotalReserveMemSizeCalc = indcell_heap_reserve_size +
+ cache_entry_heap_reserve_size +
+ lookup_heap_reserve_size +
+ dispatch_heap_reserve_size +
+ resolve_heap_reserve_size;
+#endif
+
+ DWORD dwActualVSDSize = 0;
+
+ initReservedMem = pLoaderAllocator->GetVSDHeapInitialBlock(&dwActualVSDSize);
+ _ASSERTE(dwActualVSDSize == dwTotalReserveMemSizeCalc);
+
+ m_initialReservedMemForHeaps = (BYTE *) initReservedMem;
+
+ if (initReservedMem == NULL)
+ COMPlusThrowOM();
+ }
+
+ // Hot memory, Writable, No-Execute, infrequent writes
+ NewHolder<LoaderHeap> indcell_heap_holder(
+ new LoaderHeap(indcell_heap_reserve_size, indcell_heap_commit_size,
+ initReservedMem, indcell_heap_reserve_size,
+#ifdef ENABLE_PERF_COUNTERS
+ &(GetPerfCounters().m_Loading.cbLoaderHeapSize),
+#else
+ NULL,
+#endif
+ NULL, FALSE));
+
+ initReservedMem += indcell_heap_reserve_size;
+
+ // Hot memory, Writable, No-Execute, infrequent writes
+ NewHolder<LoaderHeap> cache_entry_heap_holder(
+ new LoaderHeap(cache_entry_heap_reserve_size, cache_entry_heap_commit_size,
+ initReservedMem, cache_entry_heap_reserve_size,
+#ifdef ENABLE_PERF_COUNTERS
+ &(GetPerfCounters().m_Loading.cbLoaderHeapSize),
+#else
+ NULL,
+#endif
+ &cache_entry_rangeList, FALSE));
+
+ initReservedMem += cache_entry_heap_reserve_size;
+
+ // Warm memory, Writable, Execute, write exactly once
+ NewHolder<LoaderHeap> lookup_heap_holder(
+ new LoaderHeap(lookup_heap_reserve_size, lookup_heap_commit_size,
+ initReservedMem, lookup_heap_reserve_size,
+#ifdef ENABLE_PERF_COUNTERS
+ &(GetPerfCounters().m_Loading.cbLoaderHeapSize),
+#else
+ NULL,
+#endif
+ &lookup_rangeList, TRUE));
+
+ initReservedMem += lookup_heap_reserve_size;
+
+ // Hot memory, Writable, Execute, write exactly once
+ NewHolder<LoaderHeap> dispatch_heap_holder(
+ new LoaderHeap(dispatch_heap_reserve_size, dispatch_heap_commit_size,
+ initReservedMem, dispatch_heap_reserve_size,
+#ifdef ENABLE_PERF_COUNTERS
+ &(GetPerfCounters().m_Loading.cbLoaderHeapSize),
+#else
+ NULL,
+#endif
+ &dispatch_rangeList, TRUE));
+
+ initReservedMem += dispatch_heap_reserve_size;
+
+ // Hot memory, Writable, Execute, write exactly once
+ NewHolder<LoaderHeap> resolve_heap_holder(
+ new LoaderHeap(resolve_heap_reserve_size, resolve_heap_commit_size,
+ initReservedMem, resolve_heap_reserve_size,
+#ifdef ENABLE_PERF_COUNTERS
+ &(GetPerfCounters().m_Loading.cbLoaderHeapSize),
+#else
+ NULL,
+#endif
+ &resolve_rangeList, TRUE));
+
+ initReservedMem += resolve_heap_reserve_size;
+
+ // Allocate the initial counter block
+ NewHolder<counter_block> m_counters_holder(new counter_block);
+
+ //
+ // On success of every allocation, assign the objects and suppress the release
+ //
+
+ indcell_heap = indcell_heap_holder; indcell_heap_holder.SuppressRelease();
+ lookup_heap = lookup_heap_holder; lookup_heap_holder.SuppressRelease();
+ dispatch_heap = dispatch_heap_holder; dispatch_heap_holder.SuppressRelease();
+ resolve_heap = resolve_heap_holder; resolve_heap_holder.SuppressRelease();
+ cache_entry_heap = cache_entry_heap_holder; cache_entry_heap_holder.SuppressRelease();
+
+ resolvers = resolvers_holder; resolvers_holder.SuppressRelease();
+ dispatchers = dispatchers_holder; dispatchers_holder.SuppressRelease();
+ lookups = lookups_holder; lookups_holder.SuppressRelease();
+
+ cache_entries = cache_entries_holder; cache_entries_holder.SuppressRelease();
+
+ m_counters = m_counters_holder; m_counters_holder.SuppressRelease();
+
+ // Create the initial failure counter block
+ m_counters->next = NULL;
+ m_counters->used = 0;
+ m_cur_counter_block = m_counters;
+
+ m_cur_counter_block_for_reclaim = m_counters;
+ m_cur_counter_block_for_reclaim_index = 0;
+
+ // Keep track of all of our managers
+ VirtualCallStubManagerManager::GlobalManager()->AddStubManager(this);
+}
+
+void VirtualCallStubManager::Uninit()
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (isCollectible)
+ {
+ parentDomain->GetCollectibleVSDRanges()->RemoveRanges(this);
+ }
+
+ // Keep track of all our managers
+ VirtualCallStubManagerManager::GlobalManager()->RemoveStubManager(this);
+}
+
+VirtualCallStubManager::~VirtualCallStubManager()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ } CONTRACTL_END;
+
+ LogStats();
+
+ // Go through each cache entry and if the cache element there is in
+ // the cache entry heap of the manager being deleted, then we just
+ // set the cache entry to empty.
+ DispatchCache::Iterator it(g_resolveCache);
+ while (it.IsValid())
+ {
+ // Using UnlinkEntry performs an implicit call to Next (see comment for UnlinkEntry).
+ // Thus, we need to avoid calling Next when we delete an entry so
+ // that we don't accidentally skip entries.
+ while (it.IsValid() && cache_entry_rangeList.IsInRange((TADDR)it.Entry()))
+ {
+ it.UnlinkEntry();
+ }
+ it.Next();
+ }
+
+ if (indcell_heap) { delete indcell_heap; indcell_heap = NULL;}
+ if (lookup_heap) { delete lookup_heap; lookup_heap = NULL;}
+ if (dispatch_heap) { delete dispatch_heap; dispatch_heap = NULL;}
+ if (resolve_heap) { delete resolve_heap; resolve_heap = NULL;}
+ if (cache_entry_heap) { delete cache_entry_heap; cache_entry_heap = NULL;}
+
+ if (resolvers) { delete resolvers; resolvers = NULL;}
+ if (dispatchers) { delete dispatchers; dispatchers = NULL;}
+ if (lookups) { delete lookups; lookups = NULL;}
+ if (cache_entries) { delete cache_entries; cache_entries = NULL;}
+
+ // Now get rid of the memory taken by the counter_blocks
+ while (m_counters != NULL)
+ {
+ counter_block *del = m_counters;
+ m_counters = m_counters->next;
+ delete del;
+ }
+
+ // This was the block reserved by Init for the heaps.
+ // For the collectible case, the VSD logic does not allocate the memory.
+ if (m_initialReservedMemForHeaps && !isCollectible)
+ ClrVirtualFree (m_initialReservedMemForHeaps, 0, MEM_RELEASE);
+
+ // Free critical section
+ m_indCellLock.Destroy();
+}
+
+// Initialize static structures, and start up logging if necessary
+void VirtualCallStubManager::InitStatic()
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef STUB_LOGGING
+ // Note if you change these values using environment variables then you must use hex values :-(
+ STUB_MISS_COUNT_VALUE = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubMissCount);
+ STUB_COLLIDE_WRITE_PCT = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubCollideWritePct);
+ STUB_COLLIDE_MONO_PCT = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubCollideMonoPct);
+ g_dumpLogCounter = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubDumpLogCounter);
+ g_dumpLogIncr = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubDumpLogIncr);
+ g_resetCacheCounter = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubResetCacheCounter);
+ g_resetCacheIncr = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubResetCacheIncr);
+#endif // STUB_LOGGING
+
+#ifndef STUB_DISPATCH_PORTABLE
+ DispatchHolder::InitializeStatic();
+ ResolveHolder::InitializeStatic();
+#endif // !STUB_DISPATCH_PORTABLE
+ LookupHolder::InitializeStatic();
+
+ g_resolveCache = new DispatchCache();
+
+ if(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_VirtualCallStubLogging))
+ StartupLogging();
+
+ VirtualCallStubManagerManager::InitStatic();
+}
+
+// Static shutdown code.
+// At the moment, this doesn't do anything more than log statistics.
+void VirtualCallStubManager::UninitStatic()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ if (g_hStubLogFile != NULL)
+ {
+ VirtualCallStubManagerIterator it =
+ VirtualCallStubManagerManager::GlobalManager()->IterateVirtualCallStubManagers();
+ while (it.Next())
+ {
+ it.Current()->LogStats();
+ }
+
+ g_resolveCache->LogStats();
+
+ FinishLogging();
+ }
+}
+
+/* reclaim/rearrange any structures that can only be done during a gc sync point
+i.e. need to be serialized and non-concurrant. */
+void VirtualCallStubManager::ReclaimAll()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ /* @todo: if/when app domain unloading is supported,
+ and when we have app domain specific stub heaps, we can complete the unloading
+ of an app domain stub heap at this point, and make any patches to existing stubs that are
+ not being unload so that they nolonger refer to any of the unloaded app domains code or types
+ */
+
+ //reclaim space of abandoned buckets
+ BucketTable::Reclaim();
+
+ VirtualCallStubManagerIterator it =
+ VirtualCallStubManagerManager::GlobalManager()->IterateVirtualCallStubManagers();
+ while (it.Next())
+ {
+ it.Current()->Reclaim();
+ }
+
+ g_reclaim_counter++;
+}
+
+/* reclaim/rearrange any structures that can only be done during a gc sync point
+i.e. need to be serialized and non-concurrant. */
+void VirtualCallStubManager::Reclaim()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ UINT32 limit = min(counter_block::MAX_COUNTER_ENTRIES,
+ m_cur_counter_block_for_reclaim->used);
+ limit = min(m_cur_counter_block_for_reclaim_index + 16, limit);
+
+ for (UINT32 i = m_cur_counter_block_for_reclaim_index; i < limit; i++)
+ {
+ m_cur_counter_block_for_reclaim->block[i] += (STUB_MISS_COUNT_VALUE/10)+1;
+ }
+
+ // Increment the index by the number we processed
+ m_cur_counter_block_for_reclaim_index = limit;
+
+ // If we ran to the end of the block, go to the next
+ if (m_cur_counter_block_for_reclaim_index == m_cur_counter_block->used)
+ {
+ m_cur_counter_block_for_reclaim = m_cur_counter_block_for_reclaim->next;
+ m_cur_counter_block_for_reclaim_index = 0;
+
+ // If this was the last block in the chain, go back to the beginning
+ if (m_cur_counter_block_for_reclaim == NULL)
+ m_cur_counter_block_for_reclaim = m_counters;
+ }
+}
+
+#endif // !DACCESS_COMPILE
+
+//----------------------------------------------------------------------------
+/* static */
+VirtualCallStubManager *VirtualCallStubManager::FindStubManager(PCODE stubAddress, StubKind* wbStubKind)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ } CONTRACTL_END
+
+#ifndef DACCESS_COMPILE
+ VirtualCallStubManager *pCur;
+ StubKind kind;
+
+ //
+ // See if we are managed by the current domain
+ //
+ AppDomain *pDomain = GetThread()->GetDomain();
+ pCur = pDomain->GetLoaderAllocator()->GetVirtualCallStubManager();
+ // For the following call stack:
+ // SimpleRWLock::TryEnterRead
+ // SimpleRWLock::EnterRead
+ // LockedRangeList::IsInRangeWorker
+ // VirtualCallStubManager::isDispatchingStub
+ //
+ CONTRACT_VIOLATION(SOToleranceViolation);
+ kind = pCur->getStubKind(stubAddress);
+ if (kind != SK_UNKNOWN)
+ {
+ if (wbStubKind)
+ *wbStubKind = kind;
+ return pCur;
+ }
+
+ //
+ // See if we are managed by the shared domain
+ //
+ pCur = SharedDomain::GetDomain()->GetLoaderAllocator()->GetVirtualCallStubManager();
+ kind = pCur->getStubKind(stubAddress);
+ if (kind != SK_UNKNOWN)
+ {
+ if (wbStubKind)
+ *wbStubKind = kind;
+ return pCur;
+ }
+
+ //
+ // See if we are managed by a collectible loader allocator
+ //
+ if (pDomain->GetCollectibleVSDRanges()->IsInRange(stubAddress, reinterpret_cast<TADDR *>(&pCur)))
+ {
+ _ASSERTE(pCur != NULL);
+
+ kind = pCur->getStubKind(stubAddress);
+ if (kind != SK_UNKNOWN)
+ {
+ if (wbStubKind)
+ *wbStubKind = kind;
+ return pCur;
+ }
+ }
+
+ if (wbStubKind)
+ *wbStubKind = SK_UNKNOWN;
+
+#else // DACCESS_COMPILE
+ _ASSERTE(!"DACCESS Not implemented.");
+#endif // DACCESS_COMPILE
+
+ return NULL;
+}
+
+/* for use by debugger.
+*/
+BOOL VirtualCallStubManager::CheckIsStub_Internal(PCODE stubStartAddress)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+ SUPPORTS_DAC;
+
+ BOOL fIsOwner = isStub(stubStartAddress);
+
+#if defined(_TARGET_X86_) && defined(FEATURE_PREJIT)
+ if (!fIsOwner && parentDomain->IsDefaultDomain())
+ {
+ fIsOwner = (stubStartAddress == GetEEFuncEntryPoint(StubDispatchFixupStub));
+ }
+#endif // defined(_TARGET_X86_) && defined(FEATURE_PREJIT)
+
+ return fIsOwner;
+}
+
+/* for use by debugger.
+*/
+
+extern "C" void STDCALL StubDispatchFixupPatchLabel();
+
+BOOL VirtualCallStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(CheckIsStub_Internal(stubStartAddress));
+
+#ifdef FEATURE_PREJIT
+ if (stubStartAddress == GetEEFuncEntryPoint(StubDispatchFixupStub))
+ {
+ trace->InitForManagerPush(GetEEFuncEntryPoint(StubDispatchFixupPatchLabel), this);
+ return TRUE;
+ }
+#endif
+
+ // @workaround: Well, we really need the context to figure out where we're going, so
+ // we'll do a TRACE_MGR_PUSH so that TraceManager gets called and we can use
+ // the provided context to figure out where we're going.
+ trace->InitForManagerPush(stubStartAddress, this);
+ return TRUE;
+}
+
+//----------------------------------------------------------------------------
+BOOL VirtualCallStubManager::TraceManager(Thread *thread,
+ TraceDestination *trace,
+ T_CONTEXT *pContext,
+ BYTE **pRetAddr)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+#ifdef FEATURE_PREJIT
+ // This is the case for the lazy slot fixup
+ if (GetIP(pContext) == GFN_TADDR(StubDispatchFixupPatchLabel)) {
+
+ *pRetAddr = (BYTE *)StubManagerHelpers::GetReturnAddress(pContext);
+
+ // The destination for the virtual invocation
+ return StubManager::TraceStub(StubManagerHelpers::GetTailCallTarget(pContext), trace);
+ }
+#endif // FEATURE_PREJIT
+
+ TADDR pStub = GetIP(pContext);
+
+ // The return address should be on the top of the stack
+ *pRetAddr = (BYTE *)StubManagerHelpers::GetReturnAddress(pContext);
+
+ // Get the token from the stub
+ CONSISTENCY_CHECK(isStub(pStub));
+ size_t token = GetTokenFromStub(pStub);
+
+ // Get the this object from ECX
+ Object *pObj = StubManagerHelpers::GetThisPtr(pContext);
+
+ // Call common trace code.
+ return (TraceResolver(pObj, token, trace));
+}
+
+#ifndef DACCESS_COMPILE
+
+PCODE VirtualCallStubManager::GetCallStub(TypeHandle ownerType, MethodDesc *pMD)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(!pMD->IsInterface() || ownerType.GetMethodTable()->HasSameTypeDefAs(pMD->GetMethodTable()));
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ return GetCallStub(ownerType, pMD->GetSlot());
+}
+
+//find or create a stub
+PCODE VirtualCallStubManager::GetCallStub(TypeHandle ownerType, DWORD slot)
+{
+ CONTRACT (PCODE) {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ POSTCONDITION(RETVAL != NULL);
+ } CONTRACT_END;
+
+ GCX_COOP(); // This is necessary for BucketTable synchronization
+
+ MethodTable * pMT = ownerType.GetMethodTable();
+
+ DispatchToken token;
+ if (pMT->IsInterface())
+ token = pMT->GetLoaderAllocator()->GetDispatchToken(pMT->GetTypeID(), slot);
+ else
+ token = DispatchToken::CreateDispatchToken(slot);
+
+ //get a stub from lookups, make if necessary
+ PCODE stub = CALL_STUB_EMPTY_ENTRY;
+ PCODE addrOfResolver = GetEEFuncEntryPoint(ResolveWorkerAsmStub);
+
+ LookupEntry entryL;
+ Prober probeL(&entryL);
+ if (lookups->SetUpProber(token.To_SIZE_T(), 0, &probeL))
+ {
+ if ((stub = (PCODE)(lookups->Find(&probeL))) == CALL_STUB_EMPTY_ENTRY)
+ {
+ LookupHolder *pLookupHolder = GenerateLookupStub(addrOfResolver, token.To_SIZE_T());
+ stub = (PCODE) (lookups->Add((size_t)(pLookupHolder->stub()->entryPoint()), &probeL));
+ }
+ }
+
+ _ASSERTE(stub != CALL_STUB_EMPTY_ENTRY);
+ stats.site_counter++;
+
+ RETURN (stub);
+}
+
+#ifdef FEATURE_PREJIT
+extern "C" PCODE STDCALL StubDispatchFixupWorker(TransitionBlock * pTransitionBlock,
+ TADDR siteAddrForRegisterIndirect,
+ DWORD sectionIndex,
+ Module * pModule)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ ENTRY_POINT;
+ } CONTRACTL_END;
+
+ PCODE pTarget = NULL;
+
+ MAKE_CURRENT_THREAD_AVAILABLE();
+
+ FrameWithCookie<StubDispatchFrame> frame(pTransitionBlock);
+ StubDispatchFrame * pSDFrame = &frame;
+
+ PCODE returnAddress = pSDFrame->GetUnadjustedReturnAddress();
+
+ StubCallSite callSite(siteAddrForRegisterIndirect, returnAddress);
+
+ TADDR pIndirectCell = (TADDR)callSite.GetIndirectCell();
+
+ // FUTURE: Consider always passing in module and section index to avoid the lookups
+ if (pModule == NULL)
+ {
+ pModule = ExecutionManager::FindZapModule(pIndirectCell);
+ sectionIndex = (DWORD)-1;
+ }
+ _ASSERTE(pModule != NULL);
+
+ pSDFrame->SetCallSite(pModule, pIndirectCell);
+
+ pSDFrame->Push(CURRENT_THREAD);
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+ PEImageLayout *pNativeImage = pModule->GetNativeOrReadyToRunImage();
+
+ DWORD rva = pNativeImage->GetDataRva(pIndirectCell);
+
+ PTR_CORCOMPILE_IMPORT_SECTION pImportSection;
+ if (sectionIndex != (DWORD) -1)
+ {
+ pImportSection = pModule->GetImportSectionFromIndex(sectionIndex);
+ _ASSERTE(pImportSection == pModule->GetImportSectionForRVA(rva));
+ }
+ else
+ {
+ pImportSection = pModule->GetImportSectionForRVA(rva);
+ }
+ _ASSERTE(pImportSection != NULL);
+
+ _ASSERTE(pImportSection->EntrySize == sizeof(TADDR));
+
+ COUNT_T index = (rva - VAL32(pImportSection->Section.VirtualAddress)) / sizeof(TADDR);
+
+ // Get the stub manager for this module
+ VirtualCallStubManager *pMgr = pModule->GetLoaderAllocator()->GetVirtualCallStubManager();
+
+ // Force a GC on every jit if the stress level is high enough
+ GCStress<cfg_any>::MaybeTrigger();
+
+ // Get the data section
+ PTR_DWORD pSignatures = dac_cast<PTR_DWORD>(pNativeImage->GetRvaData(pImportSection->Signatures));
+
+ PCCOR_SIGNATURE pBlob = (BYTE *)pNativeImage->GetRvaData(pSignatures[index]);
+
+ BYTE kind = *pBlob++;
+
+ Module * pInfoModule = pModule;
+ if (kind & ENCODE_MODULE_OVERRIDE)
+ {
+ DWORD moduleIndex = CorSigUncompressData(pBlob);
+ pInfoModule = pModule->GetModuleFromIndex(moduleIndex);
+ kind &= ~ENCODE_MODULE_OVERRIDE;
+ }
+ _ASSERTE(kind == ENCODE_VIRTUAL_ENTRY_SLOT);
+
+ DWORD slot = CorSigUncompressData(pBlob);
+
+ TypeHandle ownerType = ZapSig::DecodeType(pModule, pInfoModule, pBlob);
+
+ MethodTable * pMT = ownerType.GetMethodTable();
+
+ DispatchToken token;
+ if (pMT->IsInterface())
+ token = pMT->GetLoaderAllocator()->GetDispatchToken(pMT->GetTypeID(), slot);
+ else
+ token = DispatchToken::CreateDispatchToken(slot);
+
+ OBJECTREF pObj = pSDFrame->GetThis();
+ if (pObj == NULL) {
+ COMPlusThrow(kNullReferenceException);
+ }
+
+ pTarget = pMgr->ResolveWorker(&callSite, pObj, token, VirtualCallStubManager::SK_LOOKUP);
+ _ASSERTE(pTarget != NULL);
+
+ // Ready to return
+
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+ pSDFrame->Pop(CURRENT_THREAD);
+
+ return pTarget;
+}
+#endif // FEATURE_PREJIT
+
+//+----------------------------------------------------------------------------
+//
+// Method: VirtualCallStubManager::GenerateStubIndirection
+//
+// Synopsis: This method allocates an indirection cell for use by the virtual stub dispatch (currently
+// only implemented for interface calls).
+// For normal methods: the indirection cell allocated will never be freed until app domain unload
+// For dynamic methods: we recycle the indirection cells when a dynamic method is collected. To
+// do that we keep all the recycled indirection cells in a linked list: m_RecycledIndCellList. When
+// the dynamic method needs an indirection cell it allocates one from m_RecycledIndCellList. Each
+// dynamic method keeps track of all the indirection cells it uses and add them back to
+// m_RecycledIndCellList when it is finalized.
+//
+//+----------------------------------------------------------------------------
+BYTE *VirtualCallStubManager::GenerateStubIndirection(PCODE target, BOOL fUseRecycledCell /* = FALSE*/ )
+{
+ CONTRACT (BYTE*) {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(target != NULL);
+ POSTCONDITION(CheckPointer(RETVAL));
+ } CONTRACT_END;
+
+ _ASSERTE(isStub(target));
+
+ CrstHolder lh(&m_indCellLock);
+
+ // The indirection cell to hold the pointer to the stub
+ BYTE * ret = NULL;
+ UINT32 cellsPerBlock = INDCELLS_PER_BLOCK;
+
+ // First try the recycled indirection cell list for Dynamic methods
+ if (fUseRecycledCell)
+ ret = GetOneRecycledIndCell();
+
+ // Try the free indirection cell list
+ if (!ret)
+ ret = GetOneFreeIndCell();
+
+ // Allocate from loader heap
+ if (!ret)
+ {
+ // Free list is empty, allocate a block of indcells from indcell_heap and insert it into the free list.
+ BYTE ** pBlock = (BYTE **) (void *) indcell_heap->AllocMem(S_SIZE_T(cellsPerBlock) * S_SIZE_T(sizeof(BYTE *)));
+
+ // return the first cell in the block and add the rest to the free list
+ ret = (BYTE *)pBlock;
+
+ // link all the cells together
+ // we don't need to null terminate the linked list, InsertIntoFreeIndCellList will do it.
+ for (UINT32 i = 1; i < cellsPerBlock - 1; ++i)
+ {
+ pBlock[i] = (BYTE *)&(pBlock[i+1]);
+ }
+
+ // insert the list into the free indcell list.
+ InsertIntoFreeIndCellList((BYTE *)&pBlock[1], (BYTE*)&pBlock[cellsPerBlock - 1]);
+ }
+
+ *((PCODE *)ret) = target;
+ RETURN ret;
+}
+
+ResolveCacheElem *VirtualCallStubManager::GetResolveCacheElem(void *pMT,
+ size_t token,
+ void *target)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ //get an cache entry elem, or make one if necessary
+ ResolveCacheElem* elem = NULL;
+ ResolveCacheEntry entryRC;
+ Prober probeRC(&entryRC);
+ if (cache_entries->SetUpProber(token, (size_t) pMT, &probeRC))
+ {
+ elem = (ResolveCacheElem*) (cache_entries->Find(&probeRC));
+ if (elem == CALL_STUB_EMPTY_ENTRY)
+ {
+ elem = GenerateResolveCacheElem(target, pMT, token);
+ elem = (ResolveCacheElem*) (cache_entries->Add((size_t) elem, &probeRC));
+ }
+ }
+ _ASSERTE(elem && (elem != CALL_STUB_EMPTY_ENTRY));
+ return elem;
+}
+
+#endif // !DACCESS_COMPILE
+
+size_t VirtualCallStubManager::GetTokenFromStub(PCODE stub)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ _ASSERTE(stub != NULL);
+ StubKind stubKind = SK_UNKNOWN;
+ VirtualCallStubManager * pMgr = FindStubManager(stub, &stubKind);
+
+ return GetTokenFromStubQuick(pMgr, stub, stubKind);
+}
+
+size_t VirtualCallStubManager::GetTokenFromStubQuick(VirtualCallStubManager * pMgr, PCODE stub, StubKind kind)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ _ASSERTE(pMgr != NULL);
+ _ASSERTE(stub != NULL);
+ _ASSERTE(kind != SK_UNKNOWN);
+
+#ifndef DACCESS_COMPILE
+
+ if (kind == SK_DISPATCH)
+ {
+ _ASSERTE(pMgr->isDispatchingStub(stub));
+ DispatchStub * dispatchStub = (DispatchStub *) PCODEToPINSTR(stub);
+ ResolveHolder * resolveHolder = ResolveHolder::FromFailEntry(dispatchStub->failTarget());
+ _ASSERTE(pMgr->isResolvingStub(resolveHolder->stub()->resolveEntryPoint()));
+ return resolveHolder->stub()->token();
+ }
+ else if (kind == SK_RESOLVE)
+ {
+ _ASSERTE(pMgr->isResolvingStub(stub));
+ ResolveHolder * resolveHolder = ResolveHolder::FromResolveEntry(stub);
+ return resolveHolder->stub()->token();
+ }
+ else if (kind == SK_LOOKUP)
+ {
+ _ASSERTE(pMgr->isLookupStub(stub));
+ LookupHolder * lookupHolder = LookupHolder::FromLookupEntry(stub);
+ return lookupHolder->stub()->token();
+ }
+
+ _ASSERTE(!"Should not get here.");
+
+#else // DACCESS_COMPILE
+
+ DacNotImpl();
+
+#endif // DACCESS_COMPILE
+
+ return 0;
+}
+
+#ifndef DACCESS_COMPILE
+
+#ifdef CHAIN_LOOKUP
+ResolveCacheElem* __fastcall VirtualCallStubManager::PromoteChainEntry(ResolveCacheElem *pElem)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pElem));
+ } CONTRACTL_END;
+
+ // @todo - Remove this when have a probe that generates a hard SO.
+ CONTRACT_VIOLATION(SOToleranceViolation);
+ g_resolveCache->PromoteChainEntry(pElem);
+ return pElem;
+}
+#endif // CHAIN_LOOKUP
+
+/* Resolve to a method and return its address or NULL if there is none.
+ Our return value is the target address that control should continue to. Our caller will
+ enter the target address as if a direct call with the original stack frame had been made from
+ the actual call site. Hence our strategy is to either return a target address
+ of the actual method implementation, or the prestub if we cannot find the actual implementation.
+ If we are returning a real method address, we may patch the original call site to point to a
+ dispatching stub before returning. Note, if we encounter a method that hasn't been jitted
+ yet, we will return the prestub, which should cause it to be jitted and we will
+ be able to build the dispatching stub on a later call thru the call site. If we encounter
+ any other kind of problem, rather than throwing an exception, we will also return the
+ prestub, unless we are unable to find the method at all, in which case we return NULL.
+ */
+PCODE VSD_ResolveWorker(TransitionBlock * pTransitionBlock,
+ TADDR siteAddrForRegisterIndirect,
+ size_t token
+#ifndef _TARGET_X86_
+ , UINT_PTR flags
+#endif
+ )
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(CheckPointer(pTransitionBlock));
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ MAKE_CURRENT_THREAD_AVAILABLE();
+
+ FrameWithCookie<StubDispatchFrame> frame(pTransitionBlock);
+ StubDispatchFrame * pSDFrame = &frame;
+
+ PCODE returnAddress = pSDFrame->GetUnadjustedReturnAddress();
+
+ StubCallSite callSite(siteAddrForRegisterIndirect, returnAddress);
+
+ OBJECTREF pObj = pSDFrame->GetThis();
+
+ PCODE target = NULL;
+
+ if (pObj == NULL) {
+ pSDFrame->SetForNullReferenceException();
+ pSDFrame->Push(CURRENT_THREAD);
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+ COMPlusThrow(kNullReferenceException);
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+ _ASSERTE(!"Throw returned");
+ }
+
+#ifndef _TARGET_X86_
+ if (flags & SDF_ResolvePromoteChain)
+ {
+ BEGIN_SO_INTOLERANT_CODE(CURRENT_THREAD);
+
+ ResolveCacheElem * pElem = (ResolveCacheElem *)token;
+ g_resolveCache->PromoteChainEntry(pElem);
+ target = (PCODE) pElem->target;
+
+ // Have we failed the dispatch stub too many times?
+ if (flags & SDF_ResolveBackPatch)
+ {
+ PCODE stubAddr = callSite.GetSiteTarget();
+ VirtualCallStubManager * pMgr = VirtualCallStubManager::FindStubManager(stubAddr);
+ pMgr->BackPatchWorker(&callSite);
+ }
+
+ END_SO_INTOLERANT_CODE;
+
+ return target;
+ }
+#endif
+
+ pSDFrame->SetCallSite(NULL, (TADDR)callSite.GetIndirectCell());
+
+ DispatchToken representativeToken(token);
+ MethodTable * pRepresentativeMT = pObj->GetTrueMethodTable();
+ if (representativeToken.IsTypedToken())
+ {
+ pRepresentativeMT = CURRENT_THREAD->GetDomain()->LookupType(representativeToken.GetTypeID());
+ CONSISTENCY_CHECK(CheckPointer(pRepresentativeMT));
+ }
+
+ pSDFrame->SetRepresentativeSlot(pRepresentativeMT, representativeToken.GetSlotNumber());
+ pSDFrame->Push(CURRENT_THREAD);
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER;
+
+ // For Virtual Delegates the m_siteAddr is a field of a managed object
+ // Thus we have to report it as an interior pointer,
+ // so that it is updated during a gc
+ GCPROTECT_BEGININTERIOR( *(callSite.GetIndirectCellAddress()) );
+
+ GCStress<vsd_on_resolve>::MaybeTriggerAndProtect(pObj);
+
+ PCODE callSiteTarget = callSite.GetSiteTarget();
+ CONSISTENCY_CHECK(callSiteTarget != NULL);
+
+ VirtualCallStubManager::StubKind stubKind = VirtualCallStubManager::SK_UNKNOWN;
+ VirtualCallStubManager *pMgr = VirtualCallStubManager::FindStubManager(callSiteTarget, &stubKind);
+ PREFIX_ASSUME(pMgr != NULL);
+
+#ifndef _TARGET_X86_
+ // Have we failed the dispatch stub too many times?
+ if (flags & SDF_ResolveBackPatch)
+ {
+ pMgr->BackPatchWorker(&callSite);
+ }
+#endif
+
+ target = pMgr->ResolveWorker(&callSite, pObj, token, stubKind);
+
+ GCPROTECT_END();
+
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
+ pSDFrame->Pop(CURRENT_THREAD);
+
+ return target;
+}
+
+void VirtualCallStubManager::BackPatchWorkerStatic(PCODE returnAddress, TADDR siteAddrForRegisterIndirect)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ ENTRY_POINT;
+ PRECONDITION(returnAddress != NULL);
+ } CONTRACTL_END
+
+ BEGIN_ENTRYPOINT_VOIDRET;
+
+ StubCallSite callSite(siteAddrForRegisterIndirect, returnAddress);
+
+ PCODE callSiteTarget = callSite.GetSiteTarget();
+ CONSISTENCY_CHECK(callSiteTarget != NULL);
+
+ VirtualCallStubManager *pMgr = VirtualCallStubManager::FindStubManager(callSiteTarget);
+ PREFIX_ASSUME(pMgr != NULL);
+
+ pMgr->BackPatchWorker(&callSite);
+
+ END_ENTRYPOINT_VOIDRET;
+}
+
+PCODE VirtualCallStubManager::ResolveWorker(StubCallSite* pCallSite,
+ OBJECTREF pObj,
+ DispatchToken token,
+ StubKind stubKind)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(pObj != NULL);
+ } CONTRACTL_END;
+
+ MethodTable* objectType = pObj->GetMethodTable();
+ CONSISTENCY_CHECK(CheckPointer(objectType));
+
+ // pObj is not protected. Clear it in debug builds to avoid accidental use.
+ INDEBUG(pObj = NULL);
+
+#ifdef STUB_LOGGING
+ if (g_dumpLogCounter != 0)
+ {
+ UINT32 total_calls = g_mono_call_counter + g_poly_call_counter;
+
+ if (total_calls > g_dumpLogCounter)
+ {
+ VirtualCallStubManager::LoggingDump();
+ if (g_dumpLogIncr == 0)
+ g_dumpLogCounter = 0;
+ else
+ g_dumpLogCounter += g_dumpLogIncr;
+ }
+ }
+
+ if (g_resetCacheCounter != 0)
+ {
+ UINT32 total_calls = g_mono_call_counter + g_poly_call_counter;
+
+ if (total_calls > g_resetCacheCounter)
+ {
+ VirtualCallStubManager::ResetCache();
+ if (g_resetCacheIncr == 0)
+ g_resetCacheCounter = 0;
+ else
+ g_resetCacheCounter += g_resetCacheIncr;
+ }
+ }
+#endif // STUB_LOGGING
+
+ //////////////////////////////////////////////////////////////
+ // Get the managers associated with the callee
+
+ VirtualCallStubManager *pCalleeMgr = NULL; // Only set if the caller is shared, NULL otherwise
+
+ BOOL bCallToShorterLivedTarget = FALSE;
+
+ // We care about the following cases:
+ // Call from shared domain -> domain-specific target (collectible or not)
+ // Call from any site -> collectible target
+ if (parentDomain->IsSharedDomain())
+ {
+ // The callee's manager
+ pCalleeMgr = objectType->GetLoaderAllocator()->GetVirtualCallStubManager();
+ // We already know that we are the shared manager, so we can just see if the callee has the same manager
+ bCallToShorterLivedTarget = (pCalleeMgr != this);
+ }
+ else if (objectType->GetLoaderAllocator()->IsCollectible())
+ {
+ // The callee's manager
+ pCalleeMgr = objectType->GetLoaderAllocator()->GetVirtualCallStubManager();
+ if (pCalleeMgr != this)
+ {
+ bCallToShorterLivedTarget = TRUE;
+ }
+ else
+ {
+ pCalleeMgr = NULL;
+ }
+ }
+
+ stats.worker_call++;
+
+ LOG((LF_STUBS, LL_INFO100000, "ResolveWorker from %sStub, token" FMT_ADDR "object's MT" FMT_ADDR "ind-cell" FMT_ADDR "call-site" FMT_ADDR "%s\n",
+ (stubKind == SK_DISPATCH) ? "Dispatch" : (stubKind == SK_RESOLVE) ? "Resolve" : (stubKind == SK_LOOKUP) ? "Lookup" : "Unknown",
+ DBG_ADDR(token.To_SIZE_T()), DBG_ADDR(objectType), DBG_ADDR(pCallSite->GetIndirectCell()), DBG_ADDR(pCallSite->GetReturnAddress()),
+ bCallToShorterLivedTarget ? "bCallToShorterLivedTarget" : "" ));
+
+ PCODE stub = CALL_STUB_EMPTY_ENTRY;
+ PCODE target = NULL;
+ BOOL patch = FALSE;
+
+ // This code can throw an OOM, but we do not want to fail in this case because
+ // we must always successfully determine the target of a virtual call so that
+ // CERs can work (there are a couple of exceptions to this involving generics).
+ // Since the code below is just trying to see if a stub representing the current
+ // type and token exist, it is not strictly necessary in determining the target.
+ // We will treat the case of an OOM the same as the case of not finding an entry
+ // in the hash tables and will continue on to the slow resolve case, which is
+ // guaranteed not to fail outside of a couple of generics-specific cases.
+ EX_TRY
+ {
+ /////////////////////////////////////////////////////////////////////////////
+ // First see if we can find a dispatcher stub for this token and type. If a
+ // match is found, use the target stored in the entry.
+ {
+ DispatchEntry entryD;
+ Prober probeD(&entryD);
+ if (dispatchers->SetUpProber(token.To_SIZE_T(), (size_t) objectType, &probeD))
+ {
+ stub = (PCODE) dispatchers->Find(&probeD);
+ if (stub != CALL_STUB_EMPTY_ENTRY)
+ {
+ target = (PCODE)entryD.Target();
+ patch = TRUE;
+ }
+ }
+ }
+
+ /////////////////////////////////////////////////////////////////////////////////////
+ // Second see if we can find a ResolveCacheElem for this token and type.
+ // If a match is found, use the target stored in the entry.
+ if (target == NULL)
+ {
+ ResolveCacheElem * elem = NULL;
+ ResolveCacheEntry entryRC;
+ Prober probeRC(&entryRC);
+ if (cache_entries->SetUpProber(token.To_SIZE_T(), (size_t) objectType, &probeRC))
+ {
+ elem = (ResolveCacheElem *)(cache_entries->Find(&probeRC));
+ if (elem != CALL_STUB_EMPTY_ENTRY)
+ {
+ target = (PCODE)entryRC.Target();
+ patch = TRUE;
+ }
+ }
+ }
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH (SwallowAllExceptions);
+
+ /////////////////////////////////////////////////////////////////////////////////////
+ // If we failed to find a target in either the resolver or cache entry hash tables,
+ // we need to perform a full resolution of the token and type.
+ //@TODO: Would be nice to add assertion code to ensure we only ever call Resolver once per <token,type>.
+ if (target == NULL)
+ {
+ CONSISTENCY_CHECK(stub == CALL_STUB_EMPTY_ENTRY);
+ patch = Resolver(objectType, token, &target);
+
+#if defined(_DEBUG)
+ if (!objectType->IsTransparentProxy() &&
+ !objectType->IsComObjectType())
+ {
+ CONSISTENCY_CHECK(!MethodTable::GetMethodDescForSlotAddress(target)->IsGenericMethodDefinition());
+ }
+#endif // _DEBUG
+ }
+
+ CONSISTENCY_CHECK(target != NULL);
+
+ // Now that we've successfully determined the target, we will wrap the remaining logic in a giant
+ // TRY/CATCH statement because it is there purely to emit stubs and cache entries. In the event
+ // that emitting stub or cache entries throws an exception (for example, because of OOM), we should
+ // not fail to perform the required dispatch. This is all because the basic assumption of
+ // Constrained Execution Regions (CERs) is that all virtual method calls can be made without
+ // failure.
+ //
+ // NOTE: The THROWS contract for this method does not change, because there are still a few special
+ // cases involving generics that can throw when trying to determine the target method. These cases
+ // are exceptional and will be documented as unsupported for CERs.
+ //
+ // NOTE: We do not try to keep track of the memory that has been allocated throughout this process
+ // just so we can revert the memory should things fail. This is because we add the elements to the
+ // hash tables and can be reused later on. Additionally, the hash tables are unlocked so we could
+ // never remove the elements anyway.
+ EX_TRY
+ {
+ // If we're the shared domain, we can't burn a dispatch stub to the target
+ // if that target is outside the shared domain (through virtuals
+ // originating in the shared domain but overridden by a non-shared type and
+ // called on a collection, like HashTable would call GetHashCode on an
+ // arbitrary object in its colletion). Dispatch stubs would be hard to clean,
+ // but resolve stubs are easy to clean because we just clean the cache.
+ //@TODO: Figure out how to track these indirection cells so that in the
+ //@TODO: future we can create dispatch stubs for this case.
+ BOOL bCreateDispatchStub = !bCallToShorterLivedTarget;
+
+ DispatchCache::InsertKind insertKind = DispatchCache::IK_NONE;
+
+ if (target != NULL)
+ {
+ if (patch)
+ {
+ // NOTE: This means that we are sharing dispatch stubs among callsites. If we decide we don't want
+ // to do this in the future, just remove this condition
+ if (stub == CALL_STUB_EMPTY_ENTRY)
+ {
+ //we have a target but not the dispatcher stub, lets build it
+ //First we need a failure target (the resolver stub)
+ ResolveHolder *pResolveHolder = NULL;
+ ResolveEntry entryR;
+ Prober probeR(&entryR);
+ PCODE pBackPatchFcn;
+ PCODE pResolverFcn;
+
+#ifdef _TARGET_X86_
+ // Only X86 implementation needs a BackPatch function
+ pBackPatchFcn = (PCODE) GetEEFuncEntryPoint(BackPatchWorkerAsmStub);
+#else // !_TARGET_X86_
+ pBackPatchFcn = NULL;
+#endif // !_TARGET_X86_
+
+#ifdef CHAIN_LOOKUP
+ pResolverFcn = (PCODE) GetEEFuncEntryPoint(ResolveWorkerChainLookupAsmStub);
+#else // CHAIN_LOOKUP
+ // Use the the slow resolver
+ pResolverFcn = (PCODE) GetEEFuncEntryPoint(ResolveWorkerAsmStub);
+#endif
+
+ // First see if we've already created a resolve stub for this token
+ if (resolvers->SetUpProber(token.To_SIZE_T(), 0, &probeR))
+ {
+ // Find the right resolver, make it if necessary
+ PCODE addrOfResolver = (PCODE)(resolvers->Find(&probeR));
+ if (addrOfResolver == CALL_STUB_EMPTY_ENTRY)
+ {
+ pResolveHolder = GenerateResolveStub(pResolverFcn,
+ pBackPatchFcn,
+ token.To_SIZE_T());
+
+ // Add the resolve entrypoint into the cache.
+ //@TODO: Can we store a pointer to the holder rather than the entrypoint?
+ resolvers->Add((size_t)(pResolveHolder->stub()->resolveEntryPoint()), &probeR);
+ }
+ else
+ {
+ pResolveHolder = ResolveHolder::FromResolveEntry(addrOfResolver);
+ }
+ CONSISTENCY_CHECK(CheckPointer(pResolveHolder));
+ stub = pResolveHolder->stub()->resolveEntryPoint();
+ CONSISTENCY_CHECK(stub != NULL);
+ }
+
+ // Only create a dispatch stub if:
+ // 1. We successfully created or found a resolve stub.
+ // 2. We are not blocked from creating a dispatch stub.
+ // 3. The call site is currently wired to a lookup stub. If the call site is wired
+ // to anything else, then we're never going to use the dispatch stub so there's
+ // no use in creating it.
+ if (pResolveHolder != NULL && stubKind == SK_LOOKUP)
+ {
+ DispatchEntry entryD;
+ Prober probeD(&entryD);
+ if (bCreateDispatchStub &&
+ dispatchers->SetUpProber(token.To_SIZE_T(), (size_t) objectType, &probeD))
+ {
+ // We are allowed to create a reusable dispatch stub for all assemblies
+ // this allows us to optimize the call interception case the same way
+ DispatchHolder *pDispatchHolder = NULL;
+ PCODE addrOfDispatch = (PCODE)(dispatchers->Find(&probeD));
+ if (addrOfDispatch == CALL_STUB_EMPTY_ENTRY)
+ {
+ PCODE addrOfFail = pResolveHolder->stub()->failEntryPoint();
+ pDispatchHolder = GenerateDispatchStub(
+ target, addrOfFail, objectType, token.To_SIZE_T());
+ dispatchers->Add((size_t)(pDispatchHolder->stub()->entryPoint()), &probeD);
+ }
+ else
+ {
+ pDispatchHolder = DispatchHolder::FromDispatchEntry(addrOfDispatch);
+ }
+
+ // Now assign the entrypoint to stub
+ CONSISTENCY_CHECK(CheckPointer(pDispatchHolder));
+ stub = pDispatchHolder->stub()->entryPoint();
+ CONSISTENCY_CHECK(stub != NULL);
+ }
+ else
+ {
+ insertKind = DispatchCache::IK_SHARED;
+ }
+ }
+ }
+ }
+ else
+ {
+ stats.worker_call_no_patch++;
+ }
+ }
+
+ // When we get here, target is where to go to
+ // and patch is TRUE, telling us that we may have to back patch the call site with stub
+ if (stub != CALL_STUB_EMPTY_ENTRY)
+ {
+ _ASSERTE(patch);
+
+ // If we go here and have a dispatching stub in hand, it probably means
+ // that the cache used by the resolve stubs (g_resolveCache) does not have this stub,
+ // so insert it.
+ //
+ // We only insert into the cache if we have a ResolveStub or we have a DispatchStub
+ // that missed, since we want to keep the resolve cache empty of unused entries.
+ // If later the dispatch stub fails (because of another type at the call site),
+ // we'll insert the new value into the cache for the next time.
+ // Note that if we decide to skip creating a DispatchStub beacuise we are calling
+ // from a shared to unshared domain the we also will insert into the cache.
+
+ if (insertKind == DispatchCache::IK_NONE)
+ {
+ if (stubKind == SK_DISPATCH)
+ {
+ insertKind = DispatchCache::IK_DISPATCH;
+ }
+ else if (stubKind == SK_RESOLVE)
+ {
+ insertKind = DispatchCache::IK_RESOLVE;
+ }
+ }
+
+ if (insertKind != DispatchCache::IK_NONE)
+ {
+ // Because the TransparentProxy MT is process-global, we cannot cache targets for
+ // unshared interfaces because there is the possiblity of caching a
+ // <token, TPMT, target> entry where target is in AD1, and then matching against
+ // this entry from AD2 which happens to be using the same token, perhaps for a
+ // completely different interface.
+#ifdef FEATURE_REMOTING
+ if (token.IsTypedToken() && objectType->IsTransparentProxy())
+ {
+ MethodTable * pItfMT = GetTypeFromToken(token);
+ if (pItfMT->GetDomain() != SharedDomain::GetDomain())
+ {
+ insertKind = DispatchCache::IK_NONE;
+ }
+ }
+#endif
+ }
+
+ if (insertKind != DispatchCache::IK_NONE)
+ {
+ VirtualCallStubManager * pMgrForCacheElem = this;
+
+ // If we're calling from shared to unshared, make sure the cache element is
+ // allocated in the unshared manager so that when the unshared code unloads
+ // the cache element is unloaded.
+ if (bCallToShorterLivedTarget)
+ {
+ _ASSERTE(pCalleeMgr != NULL);
+ pMgrForCacheElem = pCalleeMgr;
+ }
+
+ // Find or create a new ResolveCacheElem
+ ResolveCacheElem *e = pMgrForCacheElem->GetResolveCacheElem(objectType, token.To_SIZE_T(), (void *)target);
+
+ // Try to insert this entry into the resolver cache table
+ // When we get a collision we may decide not to insert this element
+ // and Insert will return FALSE if we decided not to add the entry
+#ifdef STUB_LOGGING
+ BOOL didInsert =
+#endif
+ g_resolveCache->Insert(e, insertKind);
+
+#ifdef STUB_LOGGING
+ if ((STUB_COLLIDE_MONO_PCT > 0) && !didInsert && (stubKind == SK_RESOLVE))
+ {
+ // If we decided not to perform the insert and we came in with a resolve stub
+ // then we currently have a polymorphic callsite, So we flip a coin to decide
+ // whether to convert this callsite back into a dispatch stub (monomorphic callsite)
+
+ if (!bCallToShorterLivedTarget && bCreateDispatchStub)
+ {
+ // We are allowed to create a reusable dispatch stub for all assemblies
+ // this allows us to optimize the call interception case the same way
+
+ UINT32 coin = UINT32(GetRandomInt(100));
+
+ if (coin < STUB_COLLIDE_MONO_PCT)
+ {
+ DispatchEntry entryD;
+ Prober probeD(&entryD);
+ if (dispatchers->SetUpProber(token.To_SIZE_T(), (size_t) objectType, &probeD))
+ {
+ DispatchHolder *pDispatchHolder = NULL;
+ PCODE addrOfDispatch = (PCODE)(dispatchers->Find(&probeD));
+ if (addrOfDispatch == CALL_STUB_EMPTY_ENTRY)
+ {
+ // It is possible that we never created this monomorphic dispatch stub
+ // so we may have to create it now
+ ResolveHolder* pResolveHolder = ResolveHolder::FromResolveEntry(pCallSite->GetSiteTarget());
+ PCODE addrOfFail = pResolveHolder->stub()->failEntryPoint();
+ pDispatchHolder = GenerateDispatchStub(
+ pCallSite, target, addrOfFail, objectType, token.To_SIZE_T());
+ dispatchers->Add((size_t)(pDispatchHolder->stub()->entryPoint()), &probeD);
+ }
+ else
+ {
+ pDispatchHolder = DispatchHolder::FromDispatchEntry(addrOfDispatch);
+ }
+
+ // increment the of times we changed a cache collision into a mono stub
+ stats.worker_collide_to_mono++;
+
+ // Now assign the entrypoint to stub
+ CONSISTENCY_CHECK(pDispatchHolder != NULL);
+ stub = pDispatchHolder->stub()->entryPoint();
+ CONSISTENCY_CHECK(stub != NULL);
+ }
+ }
+ }
+ }
+#endif // STUB_LOGGING
+ }
+
+ if (stubKind == SK_LOOKUP)
+ {
+ BackPatchSite(pCallSite, (PCODE)stub);
+ }
+ }
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH (SwallowAllExceptions);
+
+ // Target can be NULL only if we can't resolve to an address
+ _ASSERTE(target != NULL);
+
+ return target;
+}
+
+/*
+Resolve the token in the context of the method table, and set the target to point to
+the address that we should go to to get to the implementation. Return a boolean indicating
+whether or not this is a permenent choice or a temporary choice. For example, if the code has
+not been jitted yet, return FALSE and set the target to the prestub. If the target is set to NULL,
+it means that the token is not resolvable.
+*/
+BOOL
+VirtualCallStubManager::Resolver(
+ MethodTable * pMT,
+ DispatchToken token,
+ PCODE * ppTarget)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(TypeHandle(pMT).CheckFullyLoaded());
+ } CONTRACTL_END;
+
+#ifdef _DEBUG
+ MethodTable * dbg_pTokenMT = pMT;
+ MethodDesc * dbg_pTokenMD = NULL;
+ if (token.IsTypedToken())
+ {
+ dbg_pTokenMT = GetThread()->GetDomain()->LookupType(token.GetTypeID());
+ dbg_pTokenMD = dbg_pTokenMT->FindDispatchSlot(token.GetSlotNumber()).GetMethodDesc();
+ }
+#endif // _DEBUG
+
+ g_IBCLogger.LogMethodTableAccess(pMT);
+
+ // NOTE: CERs are not hardened against transparent proxy types,
+ // so no need to worry about throwing an exception from here.
+#ifdef FEATURE_REMOTING
+ if (pMT->IsTransparentProxy())
+ {
+ if (IsInterfaceToken(token))
+ {
+ MethodTable * pItfMT = GetTypeFromToken(token);
+ DispatchSlot ds(pItfMT->FindDispatchSlot(token.GetSlotNumber()));
+ if (pItfMT->HasInstantiation())
+ {
+ MethodDesc * pTargetMD = ds.GetMethodDesc();
+ if (!pTargetMD->HasMethodInstantiation())
+ {
+ MethodDesc * pInstMD = MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pTargetMD,
+ pItfMT,
+ FALSE, // forceBoxedEntryPoint
+ Instantiation(), // methodInst
+ FALSE, // allowInstParam
+ TRUE); // forceRemotableMethod
+ *ppTarget = CRemotingServices::GetStubForInterfaceMethod(pInstMD);
+ return TRUE;
+ }
+ }
+ *ppTarget = ds.GetTarget();
+ }
+ else
+ {
+ CONSISTENCY_CHECK(IsClassToken(token));
+ // All we do here is call the TP thunk stub.
+ DispatchSlot thunkSlot(CTPMethodTable::GetMethodTable()->FindDispatchSlot(token.GetTypeID(), token.GetSlotNumber()));
+ CONSISTENCY_CHECK(!thunkSlot.IsNull());
+ *ppTarget = thunkSlot.GetTarget();
+ }
+ return TRUE;
+ }
+
+ CONSISTENCY_CHECK(!pMT->IsTransparentProxy());
+#endif // FEATURE_REMOTING
+
+ LOG((LF_LOADER, LL_INFO10000, "SD: VCSM::Resolver: (start) looking up %s method in %s\n",
+ token.IsThisToken() ? "this" : "interface",
+ pMT->GetClass()->GetDebugClassName()));
+
+ MethodDesc * pMD = NULL;
+ BOOL fShouldPatch = FALSE;
+ DispatchSlot implSlot(pMT->FindDispatchSlot(token));
+
+ // If we found a target, then just figure out if we're allowed to create a stub around
+ // this target and backpatch the callsite.
+ if (!implSlot.IsNull())
+ {
+ g_IBCLogger.LogDispatchTableSlotAccess(&implSlot);
+#if defined(LOGGING) || defined(_DEBUG)
+ {
+ pMD = implSlot.GetMethodDesc();
+ if (pMD != NULL)
+ {
+ // Make sure we aren't crossing app domain boundaries
+ CONSISTENCY_CHECK(GetAppDomain()->CheckValidModule(pMD->GetModule()));
+#ifdef LOGGING
+ WORD slot = pMD->GetSlot();
+ BOOL fIsOverriddenMethod =
+ (pMT->GetNumParentVirtuals() <= slot && slot < pMT->GetNumVirtuals());
+ LOG((LF_LOADER, LL_INFO10000, "SD: VCSM::Resolver: (end) looked up %s %s method %s::%s\n",
+ fIsOverriddenMethod ? "overridden" : "newslot",
+ token.IsThisToken() ? "this" : "interface",
+ pMT->GetClass()->GetDebugClassName(),
+ pMD->GetName()));
+#endif // LOGGING
+ }
+ }
+#endif // defined(LOGGING) || defined(_DEBUG)
+
+ BOOL fSlotCallsPrestub = DoesSlotCallPrestub(implSlot.GetTarget());
+ if (!fSlotCallsPrestub)
+ {
+ // Skip fixup precode jump for better perf
+ PCODE pDirectTarget = Precode::TryToSkipFixupPrecode(implSlot.GetTarget());
+ if (pDirectTarget != NULL)
+ implSlot = DispatchSlot(pDirectTarget);
+
+ // Only patch to a target if it's not going to call the prestub.
+ fShouldPatch = TRUE;
+ }
+ else
+ {
+ // Getting the MethodDesc is very expensive,
+ // so only call this when we are calling the prestub
+ pMD = implSlot.GetMethodDesc();
+
+ if (pMD == NULL)
+ {
+ // pMD can be NULL when another thread raced in and patched the Method Entry Point
+ // so that it no longer points at the prestub
+ // In such a case DoesSlotCallPrestub will now return FALSE
+ CONSISTENCY_CHECK(!DoesSlotCallPrestub(implSlot.GetTarget()));
+ fSlotCallsPrestub = FALSE;
+ }
+
+ if (!fSlotCallsPrestub)
+ {
+ // Only patch to a target if it's not going to call the prestub.
+ fShouldPatch = TRUE;
+ }
+ else
+ {
+ CONSISTENCY_CHECK(CheckPointer(pMD));
+ if (pMD->IsGenericMethodDefinition())
+ {
+ //@GENERICS: Currently, generic virtual methods are called only through JIT_VirtualFunctionPointer
+ // and so we could never have a virtual call stub at a call site for a generic virtual.
+ // As such, we're assuming the only callers to Resolver are calls to GetTarget caused
+ // indirectly by JIT_VirtualFunctionPointer. So, we're return TRUE for patching so that
+ // we can cache the result in GetTarget and we don't have to perform the full resolve
+ // every time. If the way we call generic virtual methods changes, this will also need
+ // to change.
+ fShouldPatch = TRUE;
+ }
+ else
+ {
+ g_IBCLogger.LogMethodDescAccess(pMD);
+ }
+ }
+ }
+ }
+#ifdef FEATURE_COMINTEROP
+ else if (IsInterfaceToken(token))
+ {
+ if (pMT->IsComObjectType())
+ {
+ MethodTable * pItfMT = GetTypeFromToken(token);
+ implSlot = pItfMT->FindDispatchSlot(token.GetSlotNumber());
+
+ if (pItfMT->HasInstantiation())
+ {
+ DispatchSlot ds(implSlot);
+ MethodDesc * pTargetMD = ds.GetMethodDesc();
+ if (!pTargetMD->HasMethodInstantiation())
+ {
+ _ASSERTE(pItfMT->IsProjectedFromWinRT() || pItfMT->IsWinRTRedirectedInterface(TypeHandle::Interop_ManagedToNative));
+
+ MethodDesc *pInstMD = MethodDesc::FindOrCreateAssociatedMethodDesc(
+ pTargetMD,
+ pItfMT,
+ FALSE, // forceBoxedEntryPoint
+ Instantiation(), // methodInst
+ FALSE, // allowInstParam
+ TRUE); // forceRemotableMethod
+
+ _ASSERTE(pInstMD->IsComPlusCall() || pInstMD->IsGenericComPlusCall());
+
+ *ppTarget = pInstMD->GetStableEntryPoint();
+ return TRUE;
+ }
+ }
+
+ fShouldPatch = TRUE;
+ }
+ }
+#endif // FEATURE_COMINTEROP
+
+ if (implSlot.IsNull())
+ {
+ MethodTable * pTokenMT = NULL;
+ MethodDesc * pTokenMD = NULL;
+ if (token.IsTypedToken())
+ {
+ pTokenMT = GetThread()->GetDomain()->LookupType(token.GetTypeID());
+ pTokenMD = pTokenMT->FindDispatchSlot(token.GetSlotNumber()).GetMethodDesc();
+ }
+
+#ifdef FEATURE_COMINTEROP
+ if ((pTokenMT != NULL) && (pTokenMT->GetClass()->IsEquivalentType()))
+ {
+ SString methodName;
+ DefineFullyQualifiedNameForClassW();
+ pTokenMD->GetFullMethodInfo(methodName);
+
+ COMPlusThrowHR(COR_E_MISSINGMETHOD, COR_E_MISSINGMETHOD, GetFullyQualifiedNameForClassNestedAwareW(pMT), methodName.GetUnicode());
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ // Method not found, and this should never happen for anything but equivalent types
+ CONSISTENCY_CHECK(!implSlot.IsNull() && "Valid method implementation was not found.");
+ COMPlusThrow(kEntryPointNotFoundException);
+ }
+ }
+
+ *ppTarget = implSlot.GetTarget();
+
+ return fShouldPatch;
+} // VirtualCallStubManager::Resolver
+
+#endif // !DACCESS_COMPILE
+
+//----------------------------------------------------------------------------
+// Given a contract, return true if the contract represents a slot on the target.
+BOOL VirtualCallStubManager::IsClassToken(DispatchToken token)
+{
+ CONTRACT (BOOL) {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACT_END;
+ RETURN (token.IsThisToken());
+}
+
+//----------------------------------------------------------------------------
+// Given a contract, return true if the contract represents an interface, false if just a slot.
+BOOL VirtualCallStubManager::IsInterfaceToken(DispatchToken token)
+{
+ CONTRACT (BOOL) {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACT_END;
+ BOOL ret = token.IsTypedToken();
+ // For now, only interfaces have typed dispatch tokens.
+ CONSISTENCY_CHECK(!ret || CheckPointer(GetThread()->GetDomain()->LookupType(token.GetTypeID())));
+ CONSISTENCY_CHECK(!ret || GetThread()->GetDomain()->LookupType(token.GetTypeID())->IsInterface());
+ RETURN (ret);
+}
+
+#ifndef DACCESS_COMPILE
+
+//----------------------------------------------------------------------------
+MethodDesc *
+VirtualCallStubManager::GetRepresentativeMethodDescFromToken(
+ DispatchToken token,
+ MethodTable * pMT)
+{
+ CONTRACT (MethodDesc *) {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pMT));
+ POSTCONDITION(CheckPointer(RETVAL));
+ SO_TOLERANT;
+ } CONTRACT_END;
+
+ // This is called when trying to create a HelperMethodFrame, which means there are
+ // potentially managed references on the stack that are not yet protected.
+ GCX_FORBID();
+
+ if (token.IsTypedToken())
+ {
+ pMT = GetThread()->GetDomain()->LookupType(token.GetTypeID());
+ CONSISTENCY_CHECK(CheckPointer(pMT));
+ token = DispatchToken::CreateDispatchToken(token.GetSlotNumber());
+ }
+ CONSISTENCY_CHECK(token.IsThisToken());
+ RETURN (pMT->GetMethodDescForSlot(token.GetSlotNumber()));
+}
+
+//----------------------------------------------------------------------------
+MethodTable *VirtualCallStubManager::GetTypeFromToken(DispatchToken token)
+{
+ CONTRACTL {
+ NOTHROW;
+ WRAPPER(GC_TRIGGERS);
+ } CONTRACTL_END;
+ MethodTable *pMT = GetThread()->GetDomain()->LookupType(token.GetTypeID());
+ _ASSERTE(pMT != NULL);
+ _ASSERTE(pMT->LookupTypeID() == token.GetTypeID());
+ return pMT;
+}
+
+#endif // !DACCESS_COMPILE
+
+//----------------------------------------------------------------------------
+MethodDesc *VirtualCallStubManager::GetInterfaceMethodDescFromToken(DispatchToken token)
+{
+ CONTRACTL {
+ NOTHROW;
+ WRAPPER(GC_TRIGGERS);
+ PRECONDITION(IsInterfaceToken(token));
+ } CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+
+ MethodTable * pMT = GetTypeFromToken(token);
+ PREFIX_ASSUME(pMT != NULL);
+ CONSISTENCY_CHECK(CheckPointer(pMT));
+ return pMT->GetMethodDescForSlot(token.GetSlotNumber());
+
+#else // DACCESS_COMPILE
+
+ DacNotImpl();
+ return NULL;
+
+#endif // DACCESS_COMPILE
+}
+
+#ifndef DACCESS_COMPILE
+
+//----------------------------------------------------------------------------
+// This will check to see if a match is in the cache.
+// Returns the target on success, otherwise NULL.
+PCODE VirtualCallStubManager::CacheLookup(size_t token, UINT16 tokenHash, MethodTable *pMT)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pMT));
+ } CONTRACTL_END
+
+ // Now look in the cache for a match
+ ResolveCacheElem *pElem = g_resolveCache->Lookup(token, tokenHash, pMT);
+
+ // If the element matches, return the target - we're done!
+ return (PCODE)(pElem != NULL ? pElem->target : NULL);
+}
+
+#ifdef FEATURE_REMOTING
+//----------------------------------------------------------------------------
+// This is used by TransparentProxyWorkerStub to take a stub address (token),
+// and MethodTable and return the target. This is the fast version that only
+// checks the cache and returns NULL if a target is not found.
+//
+PCODE VSD_GetTargetForTPWorkerQuick(TransparentProxyObject * orTP, size_t token)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(orTP));
+ PRECONDITION(orTP->IsTransparentProxy());
+ } CONTRACTL_END
+
+ GCX_FORBID();
+
+ DispatchToken tok;
+
+ // If we have a 16-bit number in the token then we have a slot number
+ if (((UINT16)token) == token)
+ {
+ tok = DispatchToken::CreateDispatchToken((UINT32)token);
+ }
+ // Otherwise, we have a MethodDesc
+ else
+ {
+ UINT32 typeID = 0;
+ MethodDesc * pMD = (MethodDesc *) token;
+
+ if (pMD->IsInterface())
+ {
+ typeID = pMD->GetMethodTable()->LookupTypeID();
+ // If this type has never had a TypeID assigned, then it couldn't possibly
+ // be in the cache. We do this instead of calling GetTypeID because that can
+ // throw, and this method is not protected from that.
+ if (typeID == TypeIDProvider::INVALID_TYPE_ID)
+ {
+ return NULL;
+ }
+
+#ifdef FAT_DISPATCH_TOKENS
+ if (DispatchToken::RequiresDispatchTokenFat(typeID, pMD->GetSlot()))
+ {
+ tok = pMD->GetMethodTable()->GetLoaderAllocator()->TryLookupDispatchToken(typeID, pMD->GetSlot());
+ if (!tok.IsValid())
+ {
+ return NULL;
+ }
+ }
+ else
+#endif
+ {
+ tok = DispatchToken::CreateDispatchToken(typeID, pMD->GetSlot());
+ }
+ }
+ else
+ {
+ // On AMD64 a non-virtual call on an in context transparent proxy
+ // results in us reaching here with (pMD->IsInterface == FALSE)
+ return pMD->GetSingleCallableAddrOfCode();
+ }
+ }
+
+ return VirtualCallStubManager::CacheLookup(tok.To_SIZE_T(), DispatchCache::INVALID_HASH, orTP->GetMethodTableBeingProxied());
+}
+
+//----------------------------------------------------------------------------
+// This is used by TransparentProxyWorkerStub to take a stub address (token),
+// and MethodTable and return the target. This is the slow version that can throw
+// On x86 we construct a HelperMethodFrame, while on the 64 bit platforms we are
+// called by ResolveWorkerStatic which already has constructed a frame
+//
+PCODE VSD_GetTargetForTPWorker(TransitionBlock * pTransitionBlock, size_t token)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ SO_TOLERANT;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(CheckPointer(pTransitionBlock));
+ } CONTRACTL_END
+
+ MAKE_CURRENT_THREAD_AVAILABLE();
+
+ DispatchToken tok;
+ MethodDesc *pRepresentativeMD = NULL;
+ PCODE pRet = NULL;
+
+ BEGIN_SO_INTOLERANT_CODE(CURRENT_THREAD);
+
+ FrameWithCookie<StubDispatchFrame> frame(pTransitionBlock);
+ StubDispatchFrame * pSDFrame = &frame;
+
+ MethodTable * pMT = CTPMethodTable::GetMethodTableBeingProxied(pSDFrame->GetThis());
+
+ // If we have a 16-bit number in the token then we have a slot number
+ if (((UINT16)token) == token) {
+ tok = DispatchToken::CreateDispatchToken((UINT32)token);
+ pRepresentativeMD = VirtualCallStubManager::GetRepresentativeMethodDescFromToken(tok.To_SIZE_T(), pMT);
+ }
+ // Otherwise, we have a MethodDesc
+ else {
+ // The token will be calculated after we erect a GC frame.
+ pRepresentativeMD = (MethodDesc *)token;
+ }
+ PREFIX_ASSUME(pRepresentativeMD != NULL);
+
+ // Get the current appdomain
+ AppDomain *pAD = (AppDomain *) CURRENT_THREAD->GetDomain();
+
+ // Get the virtual stub manager for this AD. We pick the current
+ // AD because when the AD is unloaded the cache entry will be cleared.
+ // If we happen to be calling from shared to shared, it's no big
+ // deal because we'll just come through here again and add a new
+ // cache entry. We can't choose the manager based on the return
+ // address because this could be tail-called or called indirectly
+ // via helper and so the return address won't be recognized.
+ VirtualCallStubManager *pMgr = pAD->GetLoaderAllocator()->GetVirtualCallStubManager();
+ CONSISTENCY_CHECK(CheckPointer(pMgr));
+
+ pSDFrame->SetFunction(pRepresentativeMD);
+ pSDFrame->Push(CURRENT_THREAD);
+ INSTALL_UNWIND_AND_CONTINUE_HANDLER_NO_PROBE;
+
+ // If we didn't properly create a token above, it's because we needed to wait until
+ // the helper frame was created (GetTypeID is a throwing operation).
+ if (!tok.IsValid()) {
+ tok = pAD->GetLoaderAllocator()->GetDispatchToken(pRepresentativeMD->GetMethodTable()->GetTypeID(),
+ pRepresentativeMD->GetSlot());
+ }
+ CONSISTENCY_CHECK(tok.IsValid());
+
+ pRet = pMgr->GetTarget(tok.To_SIZE_T(), pMT);
+ CONSISTENCY_CHECK(pRet != NULL);
+
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER_NO_PROBE;
+ pSDFrame->Pop(CURRENT_THREAD);
+
+ END_SO_INTOLERANT_CODE;
+
+ return pRet;
+}
+#endif // FEATURE_REMOTING
+
+//----------------------------------------------------------------------------
+/* static */
+PCODE
+VirtualCallStubManager::GetTarget(
+ DispatchToken token,
+ MethodTable * pMT)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(CheckPointer(pMT));
+ } CONTRACTL_END
+
+ // ToDo: add this after checking that reflection
+ // doesn't send us a TransparentProxy
+ //
+ // CONSISTENCY_CHECK(!pMT->IsTransparentProxy());
+
+ g_external_call++;
+
+ if (token.IsThisToken())
+ {
+ return pMT->GetRestoredSlot(token.GetSlotNumber());
+ }
+
+ GCX_COOP(); // This is necessary for BucketTable synchronization
+
+ PCODE target = NULL;
+
+#ifndef STUB_DISPATCH_PORTABLE
+ target = CacheLookup(token.To_SIZE_T(), DispatchCache::INVALID_HASH, pMT);
+ if (target != NULL)
+ return target;
+#endif // !STUB_DISPATCH_PORTABLE
+
+ // No match, now do full resolve
+ BOOL fPatch;
+ fPatch = Resolver(pMT, token, &target);
+ _ASSERTE(target != NULL);
+
+#ifndef STUB_DISPATCH_PORTABLE
+ if (fPatch)
+ {
+ ResolveCacheElem *pCacheElem = pMT->GetLoaderAllocator()->GetVirtualCallStubManager()->
+ GetResolveCacheElem(pMT, token.To_SIZE_T(), (BYTE *)target);
+
+ if (pCacheElem)
+ {
+ if (!g_resolveCache->Insert(pCacheElem, DispatchCache::IK_EXTERNAL))
+ {
+ // We decided not to perform the insert
+ }
+ }
+ }
+ else
+ {
+ g_external_call_no_patch++;
+ }
+#endif // !STUB_DISPATCH_PORTABLE
+
+ return target;
+}
+
+#endif // !DACCESS_COMPILE
+
+//----------------------------------------------------------------------------
+/*
+Resolve the token in the context of the method table, and set the target to point to
+the address that we should go to to get to the implementation. Return a boolean indicating
+whether or not this is a permenent choice or a temporary choice. For example, if the code has
+not been jitted yet, return FALSE and set the target to the prestub. If the target is set to NULL,
+it means that the token is not resolvable.
+*/
+BOOL
+VirtualCallStubManager::TraceResolver(
+ Object * pObj,
+ DispatchToken token,
+ TraceDestination * trace)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(CheckPointer(pObj, NULL_OK));
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END
+
+ // If someone is trying to step into a stub dispatch call on a null object,
+ // just say that we can't trace this call and we'll just end up throwing
+ // a null ref exception.
+ if (pObj == NULL)
+ {
+ return FALSE;
+ }
+
+ MethodTable *pMT = pObj->GetMethodTable();
+ CONSISTENCY_CHECK(CheckPointer(pMT));
+
+#ifdef FEATURE_REMOTING
+ if (pMT->IsTransparentProxy())
+ {
+#ifdef DACCESS_COMPILE
+ DacNotImpl();
+#else
+ trace->InitForFramePush(GetEEFuncEntryPoint(TransparentProxyStubPatchLabel));
+#endif
+ return TRUE;
+ }
+#endif
+
+ DispatchSlot slot(pMT->FindDispatchSlot(token));
+
+ if (slot.IsNull() && IsInterfaceToken(token) && pMT->IsComObjectType())
+ {
+ MethodDesc * pItfMD = GetInterfaceMethodDescFromToken(token);
+ CONSISTENCY_CHECK(pItfMD->GetMethodTable()->GetSlot(pItfMD->GetSlot()) == pItfMD->GetMethodEntryPoint());
+ slot = pItfMD->GetMethodTable()->FindDispatchSlot(pItfMD->GetSlot());
+ }
+
+ return (StubManager::TraceStub(slot.GetTarget(), trace));
+}
+
+#ifndef DACCESS_COMPILE
+
+//----------------------------------------------------------------------------
+/* Change the call site. It is failing the expected MT test in the dispatcher stub
+too often.
+*/
+void VirtualCallStubManager::BackPatchWorker(StubCallSite* pCallSite)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ } CONTRACTL_END
+
+ PCODE callSiteTarget = pCallSite->GetSiteTarget();
+
+ if (isDispatchingStub(callSiteTarget))
+ {
+ DispatchHolder * dispatchHolder = DispatchHolder::FromDispatchEntry(callSiteTarget);
+ DispatchStub * dispatchStub = dispatchHolder->stub();
+
+ //yes, patch it to point to the resolve stub
+ //We can ignore the races now since we now know that the call site does go thru our
+ //stub mechanisms, hence no matter who wins the race, we are correct.
+ //We find the correct resolve stub by following the failure path in the dispatcher stub itself
+ PCODE failEntry = dispatchStub->failTarget();
+ ResolveStub* resolveStub = ResolveHolder::FromFailEntry(failEntry)->stub();
+ PCODE resolveEntry = resolveStub->resolveEntryPoint();
+ BackPatchSite(pCallSite, resolveEntry);
+
+ LOG((LF_STUBS, LL_INFO10000, "BackPatchWorker call-site" FMT_ADDR "dispatchStub" FMT_ADDR "\n",
+ DBG_ADDR(pCallSite->GetReturnAddress()), DBG_ADDR(dispatchHolder->stub())));
+
+ //Add back the default miss count to the counter being used by this resolve stub
+ //Since resolve stub are shared amoung many dispatch stubs each dispatch stub
+ //that fails decrements the shared counter and the dispatch stub that trips the
+ //counter gets converted into a polymorphic site
+ INT32* counter = resolveStub->pCounter();
+ *counter += STUB_MISS_COUNT_VALUE;
+ }
+}
+
+//----------------------------------------------------------------------------
+/* consider changing the call site to point to stub, if appropriate do it
+*/
+void VirtualCallStubManager::BackPatchSite(StubCallSite* pCallSite, PCODE stub)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ PRECONDITION(stub != NULL);
+ PRECONDITION(CheckPointer(pCallSite));
+ PRECONDITION(pCallSite->GetSiteTarget() != NULL);
+ } CONTRACTL_END
+
+ PCODE patch = stub;
+
+ // This will take care of the prejit case and find the actual patch site
+ PCODE prior = pCallSite->GetSiteTarget();
+
+ //is this really going to change anything, if not don't do it.
+ if (prior == patch)
+ return;
+
+ //we only want to do the following transitions for right now:
+ // prior new
+ // lookup dispatching or resolving
+ // dispatching resolving
+ if (isResolvingStub(prior))
+ return;
+
+ if(isDispatchingStub(stub))
+ {
+ if(isDispatchingStub(prior))
+ {
+ return;
+ }
+ else
+ {
+ stats.site_write_mono++;
+ }
+ }
+ else
+ {
+ stats.site_write_poly++;
+ }
+
+ //patch the call site
+ pCallSite->SetSiteTarget(patch);
+
+ stats.site_write++;
+}
+
+//----------------------------------------------------------------------------
+void StubCallSite::SetSiteTarget(PCODE newTarget)
+{
+ WRAPPER_NO_CONTRACT;
+ PTR_PCODE pCell = GetIndirectCell();
+ if (EnsureWritablePagesNoThrow(pCell, sizeof(PCODE)))
+ *pCell = newTarget;
+}
+
+//----------------------------------------------------------------------------
+/* Generate a dispatcher stub, pMTExpected is the method table to burn in the stub, and the two addrOf's
+are the addresses the stub is to transfer to depending on the test with pMTExpected
+*/
+DispatchHolder *VirtualCallStubManager::GenerateDispatchStub(PCODE addrOfCode,
+ PCODE addrOfFail,
+ void * pMTExpected,
+ size_t dispatchToken)
+{
+ CONTRACT (DispatchHolder*) {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(addrOfCode != NULL);
+ PRECONDITION(addrOfFail != NULL);
+ PRECONDITION(CheckPointer(pMTExpected));
+ POSTCONDITION(CheckPointer(RETVAL));
+ } CONTRACT_END;
+
+ size_t dispatchHolderSize = sizeof(DispatchHolder);
+
+#ifdef _TARGET_AMD64_
+ // See comment around m_fShouldAllocateLongJumpDispatchStubs for explanation.
+ if (m_fShouldAllocateLongJumpDispatchStubs
+ INDEBUG(|| g_pConfig->ShouldGenerateLongJumpDispatchStub()))
+ {
+ RETURN GenerateDispatchStubLong(addrOfCode,
+ addrOfFail,
+ pMTExpected,
+ dispatchToken);
+ }
+
+ dispatchHolderSize = DispatchHolder::GetHolderSize(DispatchStub::e_TYPE_SHORT);
+#endif
+
+ //allocate from the requisite heap and copy the template over it.
+ DispatchHolder * holder = (DispatchHolder*) (void*)
+ dispatch_heap->AllocAlignedMem(dispatchHolderSize, CODE_SIZE_ALIGN);
+
+#ifdef _TARGET_AMD64_
+ if (!DispatchHolder::CanShortJumpDispatchStubReachFailTarget(addrOfFail, (LPCBYTE)holder))
+ {
+ m_fShouldAllocateLongJumpDispatchStubs = TRUE;
+ RETURN GenerateDispatchStub(addrOfCode, addrOfFail, pMTExpected, dispatchToken);
+ }
+#endif
+
+ holder->Initialize(addrOfCode,
+ addrOfFail,
+ (size_t)pMTExpected
+#ifdef _TARGET_AMD64_
+ , DispatchStub::e_TYPE_SHORT
+#endif
+ );
+
+ ClrFlushInstructionCache(holder->stub(), holder->stub()->size());
+
+ AddToCollectibleVSDRangeList(holder);
+
+ //incr our counters
+ stats.stub_mono_counter++;
+ stats.stub_space += (UINT32)dispatchHolderSize;
+ LOG((LF_STUBS, LL_INFO10000, "GenerateDispatchStub for token" FMT_ADDR "and pMT" FMT_ADDR "at" FMT_ADDR "\n",
+ DBG_ADDR(dispatchToken), DBG_ADDR(pMTExpected), DBG_ADDR(holder->stub())));
+
+ RETURN (holder);
+}
+
+#ifdef _TARGET_AMD64_
+//----------------------------------------------------------------------------
+/* Generate a dispatcher stub, pMTExpected is the method table to burn in the stub, and the two addrOf's
+are the addresses the stub is to transfer to depending on the test with pMTExpected
+*/
+DispatchHolder *VirtualCallStubManager::GenerateDispatchStubLong(PCODE addrOfCode,
+ PCODE addrOfFail,
+ void * pMTExpected,
+ size_t dispatchToken)
+{
+ CONTRACT (DispatchHolder*) {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(addrOfCode != NULL);
+ PRECONDITION(addrOfFail != NULL);
+ PRECONDITION(CheckPointer(pMTExpected));
+ POSTCONDITION(CheckPointer(RETVAL));
+ } CONTRACT_END;
+
+ //allocate from the requisite heap and copy the template over it.
+ DispatchHolder * holder = (DispatchHolder*) (void*)
+ dispatch_heap->AllocAlignedMem(DispatchHolder::GetHolderSize(DispatchStub::e_TYPE_LONG), CODE_SIZE_ALIGN);
+
+ holder->Initialize(addrOfCode,
+ addrOfFail,
+ (size_t)pMTExpected,
+ DispatchStub::e_TYPE_LONG);
+
+ ClrFlushInstructionCache(holder->stub(), holder->stub()->size());
+
+ AddToCollectibleVSDRangeList(holder);
+
+ //incr our counters
+ stats.stub_mono_counter++;
+ stats.stub_space += static_cast<UINT32>(DispatchHolder::GetHolderSize(DispatchStub::e_TYPE_LONG));
+ LOG((LF_STUBS, LL_INFO10000, "GenerateDispatchStub for token" FMT_ADDR "and pMT" FMT_ADDR "at" FMT_ADDR "\n",
+ DBG_ADDR(dispatchToken), DBG_ADDR(pMTExpected), DBG_ADDR(holder->stub())));
+
+ RETURN (holder);
+}
+#endif
+
+//----------------------------------------------------------------------------
+/* Generate a resolve stub for the given dispatchToken.
+addrOfResolver is where to go if the inline cache check misses
+addrOfPatcher is who to call if the fail piece is being called too often by dispacher stubs
+*/
+ResolveHolder *VirtualCallStubManager::GenerateResolveStub(PCODE addrOfResolver,
+ PCODE addrOfPatcher,
+ size_t dispatchToken)
+{
+ CONTRACT (ResolveHolder*) {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(addrOfResolver != NULL);
+#if defined(_TARGET_X86_)
+ PRECONDITION(addrOfPatcher != NULL);
+#endif
+ POSTCONDITION(CheckPointer(RETVAL));
+ } CONTRACT_END;
+
+ _ASSERTE(addrOfResolver);
+
+ //get a counter for the fail piece
+
+ UINT32 counter_index = counter_block::MAX_COUNTER_ENTRIES;
+ counter_block *cur_block = NULL;
+
+ while (true)
+ {
+ cur_block = VolatileLoad(&m_cur_counter_block);
+
+ if ((cur_block != NULL) && (cur_block->used < counter_block::MAX_COUNTER_ENTRIES))
+ {
+ counter_index = FastInterlockIncrement((LONG*)&cur_block->used) - 1;
+ if (counter_index < counter_block::MAX_COUNTER_ENTRIES)
+ {
+ // Typical case we allocate the next free counter in the block
+ break;
+ }
+ }
+
+ // Otherwise we have to create a new counter_block to serve as the head of m_cur_counter_block list
+
+ // Create the new block in the main heap
+ counter_block *pNew = new counter_block;
+
+ // Initialize the new block
+ pNew->next = cur_block;
+ pNew->used = 0;
+
+ // Try to link in the new block
+ if (InterlockedCompareExchangeT(&m_cur_counter_block, pNew, cur_block) != cur_block)
+ {
+ // Lost a race to add pNew as new head
+ delete pNew;
+ }
+ }
+
+ CONSISTENCY_CHECK(counter_index < counter_block::MAX_COUNTER_ENTRIES);
+ CONSISTENCY_CHECK(CheckPointer(cur_block));
+
+ // Initialize the default miss counter for this resolve stub
+ INT32* counterAddr = &(cur_block->block[counter_index]);
+ *counterAddr = STUB_MISS_COUNT_VALUE;
+
+ //allocate from the requisite heap and copy the templates for each piece over it.
+ ResolveHolder * holder = (ResolveHolder*) (void*)
+ resolve_heap->AllocAlignedMem(sizeof(ResolveHolder), CODE_SIZE_ALIGN);
+
+ holder->Initialize(addrOfResolver, addrOfPatcher,
+ dispatchToken, DispatchCache::HashToken(dispatchToken),
+ g_resolveCache->GetCacheBaseAddr(), counterAddr);
+ ClrFlushInstructionCache(holder->stub(), holder->stub()->size());
+
+ AddToCollectibleVSDRangeList(holder);
+
+ //incr our counters
+ stats.stub_poly_counter++;
+ stats.stub_space += sizeof(ResolveHolder)+sizeof(size_t);
+ LOG((LF_STUBS, LL_INFO10000, "GenerateResolveStub for token" FMT_ADDR "at" FMT_ADDR "\n",
+ DBG_ADDR(dispatchToken), DBG_ADDR(holder->stub())));
+
+ RETURN (holder);
+}
+
+//----------------------------------------------------------------------------
+/* Generate a lookup stub for the given dispatchToken. addrOfResolver is where the stub always transfers control
+*/
+LookupHolder *VirtualCallStubManager::GenerateLookupStub(PCODE addrOfResolver, size_t dispatchToken)
+{
+ CONTRACT (LookupHolder*) {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ PRECONDITION(addrOfResolver != NULL);
+ POSTCONDITION(CheckPointer(RETVAL));
+ } CONTRACT_END;
+
+ //allocate from the requisite heap and copy the template over it.
+ LookupHolder * holder = (LookupHolder*) (void*) lookup_heap->AllocAlignedMem(sizeof(LookupHolder), CODE_SIZE_ALIGN);
+
+ holder->Initialize(addrOfResolver, dispatchToken);
+ ClrFlushInstructionCache(holder->stub(), holder->stub()->size());
+
+ AddToCollectibleVSDRangeList(holder);
+
+ //incr our counters
+ stats.stub_lookup_counter++;
+ stats.stub_space += sizeof(LookupHolder);
+ LOG((LF_STUBS, LL_INFO10000, "GenerateLookupStub for token" FMT_ADDR "at" FMT_ADDR "\n",
+ DBG_ADDR(dispatchToken), DBG_ADDR(holder->stub())));
+
+ RETURN (holder);
+}
+
+//----------------------------------------------------------------------------
+/* Generate a cache entry
+*/
+ResolveCacheElem *VirtualCallStubManager::GenerateResolveCacheElem(void *addrOfCode,
+ void *pMTExpected,
+ size_t token)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ CONSISTENCY_CHECK(CheckPointer(pMTExpected));
+
+ //allocate from the requisite heap and set the appropriate fields
+ ResolveCacheElem *e = (ResolveCacheElem*) (void*)
+ cache_entry_heap->AllocAlignedMem(sizeof(ResolveCacheElem), CODE_SIZE_ALIGN);
+
+ e->pMT = pMTExpected;
+ e->token = token;
+ e->target = addrOfCode;
+
+ e->pNext = NULL;
+
+ //incr our counters
+ stats.cache_entry_counter++;
+ stats.cache_entry_space += sizeof(ResolveCacheElem);
+
+ return e;
+}
+
+//------------------------------------------------------------------
+// Adds the stub manager to our linked list of virtual stub managers
+// and adds to the global list.
+//------------------------------------------------------------------
+void VirtualCallStubManagerManager::AddStubManager(VirtualCallStubManager *pMgr)
+{
+ WRAPPER_NO_CONTRACT;
+
+ SimpleWriteLockHolder lh(&m_RWLock);
+
+ pMgr->m_pNext = m_pManagers;
+ m_pManagers = pMgr;
+
+ STRESS_LOG2(LF_CORDB | LF_CLASSLOADER, LL_INFO100,
+ "VirtualCallStubManagerManager::AddStubManager - 0x%p (vptr 0x%p)\n", pMgr, (*(PVOID*)pMgr));
+}
+
+//------------------------------------------------------------------
+// Removes the stub manager from our linked list of virtual stub
+// managers and fromthe global list.
+//------------------------------------------------------------------
+void VirtualCallStubManagerManager::RemoveStubManager(VirtualCallStubManager *pMgr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ SimpleWriteLockHolder lh(&m_RWLock);
+
+ // Remove this manager from our list.
+ for (VirtualCallStubManager **pCur = &m_pManagers;
+ *pCur != NULL;
+ pCur = &((*pCur)->m_pNext))
+ {
+ if (*pCur == pMgr)
+ *pCur = (*pCur)->m_pNext;
+ }
+
+ // Make sure we don't have a residual pointer left over.
+ m_pCacheElem = NULL;
+
+ STRESS_LOG1(LF_CORDB | LF_CLASSLOADER, LL_INFO100,
+ "VirtualCallStubManagerManager::RemoveStubManager - 0x%p\n", pMgr);
+}
+
+//------------------------------------------------------------------
+// Logs stub usage statistics
+//------------------------------------------------------------------
+void VirtualCallStubManager::LogStats()
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_FORBID_FAULT;
+
+ // Our Init routine assignes all fields atomically so testing one field should suffice to
+ // test whehter the Init succeeded.
+ if (!resolvers)
+ {
+ return;
+ }
+
+ BOOL isShared = parentDomain->IsSharedDomain();
+ BOOL isDefault = parentDomain->IsDefaultDomain();
+
+ // Temp space to use for formatting the output.
+ static const int FMT_STR_SIZE = 160;
+ char szPrintStr[FMT_STR_SIZE];
+ DWORD dwWriteByte;
+
+ if (g_hStubLogFile && (stats.site_write != 0))
+ {
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\nStats for %s Manager\r\n", isShared ? "the Shared" :
+ isDefault ? "the Default" : "an Unshared");
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ //output counters
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "site_counter", stats.site_counter);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "site_write", stats.site_write);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "site_write_mono", stats.site_write_mono);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "site_write_poly", stats.site_write_poly);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\nstub data\r\n");
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "stub_lookup_counter", stats.stub_lookup_counter);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "stub_mono_counter", stats.stub_mono_counter);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "stub_poly_counter", stats.stub_poly_counter);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "stub_space", stats.stub_space);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ size_t total, used;
+ g_resolveCache->GetLoadFactor(&total, &used);
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "cache_entry_used", used);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "cache_entry_counter", stats.cache_entry_counter);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "cache_entry_space", stats.cache_entry_space);
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+
+ sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\ncache_load:\t%d used, %d total, utilization %#5.2f%%\r\n",
+ used, total, 100.0 * double(used) / double(total));
+ WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
+ }
+
+ resolvers->LogStats();
+ dispatchers->LogStats();
+ lookups->LogStats();
+ cache_entries->LogStats();
+
+ g_site_counter += stats.site_counter;
+ g_stub_lookup_counter += stats.stub_lookup_counter;
+ g_stub_poly_counter += stats.stub_poly_counter;
+ g_stub_mono_counter += stats.stub_mono_counter;
+ g_site_write += stats.site_write;
+ g_site_write_poly += stats.site_write_poly;
+ g_site_write_mono += stats.site_write_mono;
+ g_worker_call += stats.worker_call;
+ g_worker_call_no_patch += stats.worker_call_no_patch;
+ g_worker_collide_to_mono += stats.worker_collide_to_mono;
+ g_stub_space += stats.stub_space;
+ g_cache_entry_counter += stats.cache_entry_counter;
+ g_cache_entry_space += stats.cache_entry_space;
+
+ stats.site_counter = 0;
+ stats.stub_lookup_counter = 0;
+ stats.stub_poly_counter = 0;
+ stats.stub_mono_counter = 0;
+ stats.site_write = 0;
+ stats.site_write_poly = 0;
+ stats.site_write_mono = 0;
+ stats.worker_call = 0;
+ stats.worker_call_no_patch = 0;
+ stats.worker_collide_to_mono = 0;
+ stats.stub_space = 0;
+ stats.cache_entry_counter = 0;
+ stats.cache_entry_space = 0;
+}
+
+void Prober::InitProber(size_t key1, size_t key2, size_t* table)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ } CONTRACTL_END
+
+ _ASSERTE(table);
+
+ keyA = key1;
+ keyB = key2;
+ base = &table[CALL_STUB_FIRST_INDEX];
+ mask = table[CALL_STUB_MASK_INDEX];
+ FormHash();
+}
+
+size_t Prober::Find()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ } CONTRACTL_END
+
+ size_t entry;
+ //if this prober has already visited every slot, there is nothing more to look at.
+ //note, this means that if a prober is going to be reused, the FormHash() function
+ //needs to be called to reset it.
+ if (NoMore())
+ return CALL_STUB_EMPTY_ENTRY;
+ do
+ {
+ entry = Read();
+
+ //if we hit an empty entry, it means it cannot be in the table
+ if(entry==CALL_STUB_EMPTY_ENTRY)
+ {
+ return CALL_STUB_EMPTY_ENTRY;
+ }
+
+ //we have a real entry, see if it is the one we want using our comparer
+ comparer->SetContents(entry);
+ if (comparer->Equals(keyA, keyB))
+ {
+ return entry;
+ }
+ } while(Next()); //Next() returns false when we have visited every slot
+ return CALL_STUB_EMPTY_ENTRY;
+}
+
+size_t Prober::Add(size_t newEntry)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ } CONTRACTL_END
+
+ size_t entry;
+ //if we have visited every slot then there is no room in the table to add this new entry
+ if (NoMore())
+ return CALL_STUB_EMPTY_ENTRY;
+
+ do
+ {
+ entry = Read();
+ if (entry==CALL_STUB_EMPTY_ENTRY)
+ {
+ //it's not in the table and we have the correct empty slot in hand
+ //in which to add it.
+ //try and grab it, if we succeed we break out to add the entry
+ //if we fail, it means a racer swoped in a wrote in
+ //this slot, so we will just keep looking
+ if (GrabEntry(newEntry))
+ {
+ break;
+ }
+
+ // We didn't grab this entry, so keep trying.
+ continue;
+ }
+ //check if this entry is already in the table, if so we are done
+ comparer->SetContents(entry);
+ if (comparer->Equals(keyA, keyB))
+ {
+ return entry;
+ }
+ } while(Next()); //Next() returns false when we have visited every slot
+
+ //if we have visited every slot then there is no room in the table to add this new entry
+ if (NoMore())
+ return CALL_STUB_EMPTY_ENTRY;
+
+ CONSISTENCY_CHECK(Read() == newEntry);
+ return newEntry;
+}
+
+/*Atomically grab an entry, if it is empty, so we can write in it.
+@TODO: It is not clear if this routine is actually necessary and/or if the
+interlocked compare exchange is necessary as opposed to just a read write with racing allowed.
+If we didn't have it, all that would happen is potentially more duplicates or
+dropped entries, and we are supposed to run correctly even if they
+happen. So in a sense this is a perf optimization, whose value has
+not been measured, i.e. it might be faster without it.
+*/
+BOOL Prober::GrabEntry(size_t entryValue)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return FastInterlockCompareExchangePointer(&base[index],
+ entryValue, static_cast<size_t>(CALL_STUB_EMPTY_ENTRY)) == CALL_STUB_EMPTY_ENTRY;
+}
+
+inline void FastTable::IncrementCount()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // This MUST be an interlocked increment, since BucketTable::GetMoreSpace relies on
+ // the return value of FastTable::isFull to tell it whether or not to continue with
+ // trying to allocate a new FastTable. If two threads race and try to increment this
+ // at the same time and one increment is lost, then the size will be inaccurate and
+ // BucketTable::GetMoreSpace will never succeed, resulting in an infinite loop trying
+ // to add a new entry.
+ FastInterlockIncrement((LONG *)&contents[CALL_STUB_COUNT_INDEX]);
+}
+
+size_t FastTable::Add(size_t entry, Prober* probe)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ } CONTRACTL_END
+
+ size_t result = probe->Add(entry);
+ if (result == entry) IncrementCount();
+ return result;
+}
+
+size_t FastTable::Find(Prober* probe)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return probe->Find();
+}
+
+/*Increase the size of the bucket referenced by the prober p and copy the existing members into it.
+Since duplicates and lost entries are okay, we can build the larger table
+and then try to swap it in. If it turns out that somebody else is racing us,
+the worst that will happen is we drop a few entries on the floor, which is okay.
+If by chance we swap out a table that somebody else is inserting an entry into, that
+is okay too, just another dropped entry. If we detect dups, we just drop them on
+the floor. */
+BOOL BucketTable::GetMoreSpace(const Prober* p)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE; // This is necessary for synchronization with BucketTable::Reclaim
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ //get ahold of the current bucket
+ Prober probe(p->comparer);
+ size_t index = ComputeBucketIndex(p->keyA, p->keyB);
+
+ FastTable* oldBucket = (FastTable*) Read(index);
+
+ if (!oldBucket->isFull())
+ {
+ return TRUE;
+ }
+ //make a larger bucket
+ size_t numEntries;
+ if (oldBucket->tableSize() == CALL_STUB_MIN_ENTRIES)
+ {
+ numEntries = CALL_STUB_SECONDARY_ENTRIES;
+ }
+ else
+ {
+ numEntries = oldBucket->tableSize()*CALL_STUB_GROWTH_FACTOR;
+ }
+
+ FastTable* newBucket = FastTable::MakeTable(numEntries);
+
+ //copy via insertion from the old to the new bucket
+ size_t* limit = &oldBucket->contents[(oldBucket->tableSize())+CALL_STUB_FIRST_INDEX];
+ size_t* e;
+ for (e = &oldBucket->contents[CALL_STUB_FIRST_INDEX]; e<limit; e++)
+ {
+ size_t moved = *e;
+ if (moved == CALL_STUB_EMPTY_ENTRY)
+ {
+ continue;
+ }
+ probe.comparer->SetContents(moved);
+ probe.InitProber(probe.comparer->KeyA(), probe.comparer->KeyB(), &newBucket->contents[0]);
+ //if the new bucket fills up, give up (this should never happen I think)
+ if (newBucket->Add(moved, &probe) == CALL_STUB_EMPTY_ENTRY)
+ {
+ _ASSERTE(!"This should never happen");
+ return FALSE;
+ }
+ }
+
+ // Doing an interlocked exchange here ensures that if someone has raced and beaten us to
+ // replacing the entry, then we will just put the new bucket we just created in the
+ // dead list instead of risking a race condition which would put a duplicate of the old
+ // bucket in the dead list (and even possibly cause a cyclic list).
+ if (FastInterlockCompareExchangePointer(reinterpret_cast<FastTable * volatile *>(&buckets[index]), newBucket, oldBucket) != oldBucket)
+ oldBucket = newBucket;
+
+ // Link the old onto the "to be reclaimed" list.
+ // Use the dead link field of the abandoned buckets to form the list
+ FastTable* list;
+ do {
+ list = VolatileLoad(&dead);
+ oldBucket->contents[CALL_STUB_DEAD_LINK] = (size_t) list;
+ } while (FastInterlockCompareExchangePointer(&dead, oldBucket, list) != list);
+
+#ifdef _DEBUG
+ {
+ // Validate correctness of the list
+ FastTable *curr = oldBucket;
+ while (curr)
+ {
+ FastTable *next = (FastTable *) curr->contents[CALL_STUB_DEAD_LINK];
+ size_t i = 0;
+ while (next)
+ {
+ next = (FastTable *) next->contents[CALL_STUB_DEAD_LINK];
+ _ASSERTE(curr != next); // Make sure we don't have duplicates
+ _ASSERTE(i++ < SIZE_T_MAX/4); // This just makes sure we don't have a cycle
+ }
+ curr = next;
+ }
+ }
+#endif // _DEBUG
+
+ //update our counters
+ stats.bucket_space_dead += UINT32((oldBucket->tableSize()+CALL_STUB_FIRST_INDEX)*sizeof(void*));
+ stats.bucket_space -= UINT32((oldBucket->tableSize()+CALL_STUB_FIRST_INDEX)*sizeof(void*));
+ stats.bucket_space += UINT32((newBucket->tableSize()+CALL_STUB_FIRST_INDEX)*sizeof(void*));
+ return TRUE;
+}
+
+void BucketTable::Reclaim()
+{
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ }
+ CONTRACTL_END
+
+ //reclaim the dead (abandoned) buckets on the dead list
+ // The key issue is to not reclaim the list if any thread is in a stub or
+ // if any thread is accessing (read or write) the cache tables. So we will declare
+ // those points to be non-gc safe points, and reclaim when the gc syncs the threads
+ //@TODO: add an assert to ensure we are at a gc safe point
+ FastTable* list = dead;
+
+ //see if there is anything to do.
+ //We ignore the race, since we will just pick them up on the next go around
+ if (list == NULL) return;
+
+ //Try and grab the list exclusively, if we fail, it means that either somebody
+ //else grabbed it, or something go added. In either case we just give up and assume
+ //we will catch it on the next go around.
+ //we use an interlock here in case we are called during shutdown not at a gc safe point
+ //in which case the race is between several threads wanting to reclaim.
+ //We are assuming that we are assuming the actually having to do anything is rare
+ //so that the interlocked overhead is acceptable. If this is not true, then
+ //we need to examine exactly how and when we may be called during shutdown.
+ if (FastInterlockCompareExchangePointer(&dead, NULL, list) != list)
+ return;
+
+#ifdef _DEBUG
+ // Validate correctness of the list
+ FastTable *curr = list;
+ while (curr)
+ {
+ FastTable *next = (FastTable *) curr->contents[CALL_STUB_DEAD_LINK];
+ size_t i = 0;
+ while (next)
+ {
+ next = (FastTable *) next->contents[CALL_STUB_DEAD_LINK];
+ _ASSERTE(curr != next); // Make sure we don't have duplicates
+ _ASSERTE(i++ < SIZE_T_MAX/4); // This just makes sure we don't have a cycle
+ }
+ curr = next;
+ }
+#endif // _DEBUG
+
+ //we now have the list off by ourself, so we can just walk and cleanup
+ while (list)
+ {
+ size_t next = list->contents[CALL_STUB_DEAD_LINK];
+ delete [] (size_t*)list;
+ list = (FastTable*) next;
+ }
+}
+
+//
+// When using SetUpProber the proper values to use for keyA, keyB are:
+//
+// KeyA KeyB
+//-------------------------------------------------------
+// lookups token the stub calling convention
+// dispatchers token the expected MT
+// resolver token the stub calling convention
+// cache_entries token the expected method table
+//
+BOOL BucketTable::SetUpProber(size_t keyA, size_t keyB, Prober *prober)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE; // This is necessary for synchronization with BucketTable::Reclaim
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ // The buckets[index] table starts off initialized to all CALL_STUB_EMPTY_ENTRY
+ // and we should write each buckets[index] exactly once. However in a multi-proc
+ // scenario each processor could see old memory values that would cause us to
+ // leak memory.
+ //
+ // Since this a a fairly hot code path and it is very rare for buckets[index]
+ // to be CALL_STUB_EMPTY_ENTRY, we can first try a non-volatile read and then
+ // if it looks like we need to create a new FastTable we double check by doing
+ // a volatile read.
+ //
+ // Note that BucketTable::GetMoreSpace also updates buckets[index] when the FastTable
+ // grows to 90% full. (CALL_STUB_LOAD_FACTOR is 90%)
+
+ size_t index = ComputeBucketIndex(keyA, keyB);
+ size_t bucket = buckets[index]; // non-volatile read
+ if (bucket==CALL_STUB_EMPTY_ENTRY)
+ {
+ bucket = Read(index); // volatile read
+ }
+
+ if (bucket==CALL_STUB_EMPTY_ENTRY)
+ {
+ FastTable* newBucket = FastTable::MakeTable(CALL_STUB_MIN_ENTRIES);
+
+ // Doing an interlocked exchange here ensures that if someone has raced and beaten us to
+ // replacing the entry, then we will free the new bucket we just created.
+ bucket = FastInterlockCompareExchangePointer(&buckets[index], reinterpret_cast<size_t>(newBucket), static_cast<size_t>(CALL_STUB_EMPTY_ENTRY));
+ if (bucket == CALL_STUB_EMPTY_ENTRY)
+ {
+ // We successfully wrote newBucket into buckets[index], overwritting the CALL_STUB_EMPTY_ENTRY value
+ stats.bucket_space += UINT32((newBucket->tableSize()+CALL_STUB_FIRST_INDEX)*sizeof(void*));
+ bucket = (size_t) newBucket;
+ }
+ else
+ {
+ // Someone else wrote buckets[index] before us
+ // and bucket contains the value that they wrote
+ // We must free the memory that we allocated
+ // and we will use the value that someone else wrote
+ delete newBucket;
+ newBucket = (FastTable*) bucket;
+ }
+ }
+
+ return ((FastTable*)(bucket))->SetUpProber(keyA, keyB, prober);
+}
+
+size_t BucketTable::Add(size_t entry, Prober* probe)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE; // This is necessary for synchronization with BucketTable::Reclaim
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END
+
+ FastTable* table = (FastTable*)(probe->items());
+ size_t result = table->Add(entry,probe);
+ if (result != CALL_STUB_EMPTY_ENTRY)
+ {
+ return result;
+ }
+ //we must have missed count(s) and the table is now full, so lets
+ //grow and retry (this should be rare)
+ if (!GetMoreSpace(probe)) return CALL_STUB_EMPTY_ENTRY;
+ if (!SetUpProber(probe->keyA, probe->keyB, probe)) return CALL_STUB_EMPTY_ENTRY;
+ return Add(entry, probe); //recurse in for the retry to write the entry
+}
+
+void BucketTable::LogStats()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Update stats
+ g_bucket_space += stats.bucket_space;
+ g_bucket_space_dead += stats.bucket_space_dead;
+
+ stats.bucket_space = 0;
+ stats.bucket_space_dead = 0;
+}
+
+DispatchCache::DispatchCache()
+#ifdef CHAIN_LOOKUP
+ : m_writeLock(CrstStubDispatchCache, CRST_UNSAFE_ANYMODE)
+#endif
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END
+
+ //initialize the cache to be empty, i.e. all slots point to the empty entry
+ ResolveCacheElem* e = new ResolveCacheElem();
+ e->pMT = (void *) (-1); //force all method tables to be misses
+ e->pNext = NULL; // null terminate the chain for the empty entry
+ empty = e;
+ for (int i = 0;i<CALL_STUB_CACHE_SIZE;i++)
+ ClearCacheEntry(i);
+
+ // Initialize statistics
+ memset(&stats, 0, sizeof(stats));
+#ifdef STUB_LOGGING
+ memset(&cacheData, 0, sizeof(cacheData));
+#endif
+}
+
+ResolveCacheElem* DispatchCache::Lookup(size_t token, UINT16 tokenHash, void* mt)
+{
+ WRAPPER_NO_CONTRACT;
+ if (tokenHash == INVALID_HASH)
+ tokenHash = HashToken(token);
+ UINT16 idx = HashMT(tokenHash, mt);
+ ResolveCacheElem *pCurElem = GetCacheEntry(idx);
+
+#if defined(STUB_LOGGING) && defined(CHAIN_LOOKUP)
+ BOOL chainedLookup = FALSE;
+#endif
+ // No need to conditionlize on CHAIN_LOOKUP, since this loop
+ // will only run once when CHAIN_LOOKUP is undefined, since
+ // there will only ever be one element in a bucket (chain of 1).
+ while (pCurElem != empty) {
+ if (pCurElem->Equals(token, mt)) {
+ return pCurElem;
+ }
+#if defined(STUB_LOGGING) && defined(CHAIN_LOOKUP)
+ // Only want to inc the counter once per chain search.
+ if (pCurElem == GetCacheEntry(idx)) {
+ chainedLookup = TRUE;
+ g_chained_lookup_external_call_counter++;
+ }
+#endif // defined(STUB_LOGGING) && defined(CHAIN_LOOKUP)
+ pCurElem = pCurElem->Next();
+ }
+#if defined(STUB_LOGGING) && defined(CHAIN_LOOKUP)
+ if (chainedLookup) {
+ g_chained_lookup_external_miss_counter++;
+ }
+#endif // defined(STUB_LOGGING) && defined(CHAIN_LOOKUP)
+ return NULL; /* with chain lookup disabled this returns NULL */
+}
+
+// returns true if we wrote the resolver cache entry with the new elem
+// also returns true if the cache entry already contained elem (the miss case)
+//
+BOOL DispatchCache::Insert(ResolveCacheElem* elem, InsertKind insertKind)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ FORBID_FAULT;
+ PRECONDITION(insertKind != IK_NONE);
+ } CONTRACTL_END;
+
+#ifdef CHAIN_LOOKUP
+ CrstHolder lh(&m_writeLock);
+#endif
+
+ // Figure out what bucket this element belongs in
+ UINT16 tokHash = HashToken(elem->token);
+ UINT16 hash = HashMT(tokHash, elem->pMT);
+ UINT16 idx = hash;
+ BOOL write = FALSE;
+ BOOL miss = FALSE;
+ BOOL hit = FALSE;
+ BOOL collide = FALSE;
+
+#ifdef _DEBUG
+ elem->debug_hash = tokHash;
+ elem->debug_index = idx;
+#endif // _DEBUG
+
+ ResolveCacheElem* cell = GetCacheEntry(idx);
+
+#ifdef CHAIN_LOOKUP
+ // There is the possibility of a race where two threads will
+ // try to generate a ResolveCacheElem for the same tuple, and
+ // the first thread will get the lock and insert the element
+ // and the second thread coming in should detect this and not
+ // re-add the element, since it is already likely at the start
+ // of the list, and would result in the element looping to
+ // itself.
+ if (Lookup(elem->token, tokHash, elem->pMT))
+#else // !CHAIN_LOOKUP
+ if (cell == elem)
+#endif // !CHAIN_LOOKUP
+ {
+ miss = TRUE;
+ write = FALSE;
+ }
+ else
+ {
+ if (cell == empty)
+ {
+ hit = TRUE;
+ write = TRUE;
+ }
+ }
+ CONSISTENCY_CHECK(!(hit && miss));
+
+ // If we didn't have a miss or a hit then we had a collision with
+ // a non-empty entry in our resolver cache
+ if (!hit && !miss)
+ {
+ collide = TRUE;
+
+#ifdef CHAIN_LOOKUP
+ // Always insert the entry into the chain
+ write = TRUE;
+#else // !CHAIN_LOOKUP
+
+ if (STUB_COLLIDE_WRITE_PCT < 100)
+ {
+ UINT32 coin = UINT32(GetRandomInt(100));
+
+ write = (coin < STUB_COLLIDE_WRITE_PCT);
+ }
+ else
+ {
+ write = TRUE;
+ }
+
+#endif // !CHAIN_LOOKUP
+ }
+
+ if (write)
+ {
+#ifdef CHAIN_LOOKUP
+ // We create a list with the last pNext pointing at empty
+ elem->pNext = cell;
+#else // !CHAIN_LOOKUP
+ elem->pNext = empty;
+#endif // !CHAIN_LOOKUP
+ SetCacheEntry(idx, elem);
+ stats.insert_cache_write++;
+ }
+
+ LOG((LF_STUBS, LL_INFO1000, "%8s Insert(token" FMT_ADDR "MethodTable" FMT_ADDR ") at [%03x] %7s %5s \n",
+ (insertKind == IK_DISPATCH) ? "Dispatch" : (insertKind == IK_RESOLVE) ? "Resolve" : "External",
+ DBG_ADDR(elem->token), DBG_ADDR(elem->pMT), hash,
+ hit ? "HIT" : miss ? "MISS" : "COLLIDE", write ? "WRITE" : "KEEP"));
+
+ if (insertKind == IK_DISPATCH)
+ stats.insert_cache_dispatch++;
+ else if (insertKind == IK_RESOLVE)
+ stats.insert_cache_resolve++;
+ else if (insertKind == IK_SHARED)
+ stats.insert_cache_shared++;
+ else if (insertKind == IK_EXTERNAL)
+ stats.insert_cache_external++;
+
+ if (hit)
+ stats.insert_cache_hit++;
+ else if (miss)
+ stats.insert_cache_miss++;
+ else if (collide)
+ stats.insert_cache_collide++;
+
+ return write || miss;
+}
+
+#ifdef CHAIN_LOOKUP
+void DispatchCache::PromoteChainEntry(ResolveCacheElem* elem)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ } CONTRACTL_END;
+
+ CrstHolder lh(&m_writeLock);
+ g_chained_entry_promoted++;
+
+ // Figure out what bucket this element belongs in
+ UINT16 tokHash = HashToken(elem->token);
+ UINT16 hash = HashMT(tokHash, elem->pMT);
+ UINT16 idx = hash;
+
+ ResolveCacheElem *curElem = GetCacheEntry(idx);
+
+ // If someone raced in and promoted this element before us,
+ // then we can just return. Furthermore, it would be an
+ // error if we performed the below code, since we'd end up
+ // with a self-referential element and an infinite loop.
+ if (curElem == elem)
+ {
+ return;
+ }
+
+ // Now loop through the chain to find the element that is
+ // point to the element we're promoting so we can remove
+ // it from the chain.
+ while (curElem->Next() != elem)
+ {
+ curElem = curElem->pNext;
+ CONSISTENCY_CHECK(curElem != NULL);
+ }
+
+ // Remove the element from the chain
+ CONSISTENCY_CHECK(curElem->pNext == elem);
+ curElem->pNext = elem->pNext;
+
+ // Set the promoted entry to the head of the list.
+ elem->pNext = GetCacheEntry(idx);
+ SetCacheEntry(idx, elem);
+}
+#endif // CHAIN_LOOKUP
+
+void DispatchCache::LogStats()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ g_insert_cache_external += stats.insert_cache_external;
+ g_insert_cache_shared += stats.insert_cache_shared;
+ g_insert_cache_dispatch += stats.insert_cache_dispatch;
+ g_insert_cache_resolve += stats.insert_cache_resolve;
+ g_insert_cache_hit += stats.insert_cache_hit;
+ g_insert_cache_miss += stats.insert_cache_miss;
+ g_insert_cache_collide += stats.insert_cache_collide;
+ g_insert_cache_write += stats.insert_cache_write;
+
+ stats.insert_cache_external = 0;
+ stats.insert_cache_shared = 0;
+ stats.insert_cache_dispatch = 0;
+ stats.insert_cache_resolve = 0;
+ stats.insert_cache_hit = 0;
+ stats.insert_cache_miss = 0;
+ stats.insert_cache_collide = 0;
+ stats.insert_cache_write = 0;
+}
+
+/* The following tablse have bits that have the following properties:
+ 1. Each entry has 12-bits with 5,6 or 7 one bits and 5,6 or 7 zero bits.
+ 2. For every bit we try to have half one bits and half zero bits
+ 3. Adjacent entries when xor-ed should have 5,6 or 7 bits that are different
+*/
+#ifdef _WIN64
+static const UINT16 tokenHashBits[64] =
+#else // !_WIN64
+static const UINT16 tokenHashBits[32] =
+#endif // !_WIN64
+{
+ 0xcd5, 0x8b9, 0x875, 0x439,
+ 0xbf0, 0x38d, 0xa5b, 0x6a7,
+ 0x78a, 0x9c8, 0xee2, 0x3d3,
+ 0xd94, 0x54e, 0x698, 0xa6a,
+ 0x753, 0x932, 0x4b7, 0x155,
+ 0x3a7, 0x9c8, 0x4e9, 0xe0b,
+ 0xf05, 0x994, 0x472, 0x626,
+ 0x15c, 0x3a8, 0x56e, 0xe2d,
+
+#ifdef _WIN64
+ 0xe3c, 0xbe2, 0x58e, 0x0f3,
+ 0x54d, 0x70f, 0xf88, 0xe2b,
+ 0x353, 0x153, 0x4a5, 0x943,
+ 0xaf2, 0x88f, 0x72e, 0x978,
+ 0xa13, 0xa0b, 0xc3c, 0xb72,
+ 0x0f7, 0x49a, 0xdd0, 0x366,
+ 0xd84, 0xba5, 0x4c5, 0x6bc,
+ 0x8ec, 0x0b9, 0x617, 0x85c,
+#endif // _WIN64
+};
+
+/*static*/ UINT16 DispatchCache::HashToken(size_t token)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ UINT16 hash = 0;
+ int index = 0;
+
+ // Note if you change the number of bits in CALL_STUB_CACHE_NUM_BITS
+ // then we have to recompute the hash function
+ // Though making the number of bits smaller should still be OK
+ static_assert_no_msg(CALL_STUB_CACHE_NUM_BITS <= 12);
+
+ while (token)
+ {
+ if (token & 1)
+ hash ^= tokenHashBits[index];
+
+ index++;
+ token >>= 1;
+ }
+ _ASSERTE((hash & ~CALL_STUB_CACHE_MASK) == 0);
+ return hash;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+DispatchCache::Iterator::Iterator(DispatchCache *pCache) : m_pCache(pCache), m_curBucket(-1)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pCache));
+ } CONTRACTL_END;
+
+ // Move to the first valid entry
+ NextValidBucket();
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+void DispatchCache::Iterator::Next()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ if (!IsValid()) {
+ return;
+ }
+
+ // Move to the next element in the chain
+ m_ppCurElem = &((*m_ppCurElem)->pNext);
+
+ // If the next element was the empty sentinel entry, move to the next valid bucket.
+ if (*m_ppCurElem == m_pCache->empty) {
+ NextValidBucket();
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+// This doesn't actually delete the entry, it just unlinks it from the chain.
+// Returns the unlinked entry.
+ResolveCacheElem *DispatchCache::Iterator::UnlinkEntry()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CONSISTENCY_CHECK(IsValid());
+ } CONTRACTL_END;
+ ResolveCacheElem *pUnlinkedEntry = *m_ppCurElem;
+ *m_ppCurElem = (*m_ppCurElem)->pNext;
+ pUnlinkedEntry->pNext = m_pCache->empty;
+ // If unlinking this entry took us to the end of this bucket, need to move to the next.
+ if (*m_ppCurElem == m_pCache->empty) {
+ NextValidBucket();
+ }
+ return pUnlinkedEntry;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+void DispatchCache::Iterator::NextValidBucket()
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CONSISTENCY_CHECK(IsValid());
+ } CONTRACTL_END;
+
+ // Move to the next bucket that contains a cache entry
+ do {
+ NextBucket();
+ } while (IsValid() && *m_ppCurElem == m_pCache->empty);
+}
+
+#endif // !DACCESS_COMPILE
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+VirtualCallStubManager *VirtualCallStubManagerManager::FindVirtualCallStubManager(PCODE stubAddress)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ SUPPORTS_DAC;
+
+#ifndef DACCESS_COMPILE
+ // Check the cached element
+ {
+ VirtualCallStubManager *pMgr = m_pCacheElem;
+ if (pMgr != NULL && pMgr->CheckIsStub_Internal(stubAddress))
+ {
+ return pMgr;
+ }
+ }
+
+ // Check the current and shared domains.
+ {
+ Thread *pThread = GetThread();
+
+ if (pThread != NULL)
+ {
+ // Check the current domain
+ {
+ BaseDomain *pDom = pThread->GetDomain();
+ VirtualCallStubManager *pMgr = pDom->GetLoaderAllocator()->GetVirtualCallStubManager();
+ if (pMgr->CheckIsStub_Internal(stubAddress))
+ {
+ m_pCacheElem = pMgr;
+ return pMgr;
+ }
+ }
+ // Check the shared domain
+ {
+ BaseDomain *pDom = SharedDomain::GetDomain();
+ VirtualCallStubManager *pMgr = pDom->GetLoaderAllocator()->GetVirtualCallStubManager();
+ if (pMgr->CheckIsStub_Internal(stubAddress))
+ {
+ m_pCacheElem = pMgr;
+ return pMgr;
+ }
+ }
+ }
+ }
+#endif
+
+ // If both previous attempts fail, run through the list. This is likely
+ // because the thread is a debugger thread running outside of the domain
+ // that owns the target stub.
+ {
+ VirtualCallStubManagerIterator it =
+ VirtualCallStubManagerManager::GlobalManager()->IterateVirtualCallStubManagers();
+
+ while (it.Next())
+ {
+ if (it.Current()->CheckIsStub_Internal(stubAddress))
+ {
+#ifndef DACCESS_COMPILE
+ m_pCacheElem = it.Current();
+#endif
+ return it.Current();
+ }
+ }
+ }
+
+ // No VirtualCallStubManager owns this address.
+ return NULL;
+}
+
+static VirtualCallStubManager * const IT_START = (VirtualCallStubManager *)(-1);
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+// Move to the next element. Iterators are created at
+// start-1, so must call Next before using Current
+BOOL VirtualCallStubManagerIterator::Next()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (m_fIsStart)
+ {
+ m_fIsStart = FALSE;
+ }
+ else if (m_pCurMgr != NULL)
+ {
+ m_pCurMgr = m_pCurMgr->m_pNext;
+ }
+
+ return (m_pCurMgr != NULL);
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+// Get the current contents of the iterator
+VirtualCallStubManager *VirtualCallStubManagerIterator::Current()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ CONSISTENCY_CHECK(!m_fIsStart);
+ CONSISTENCY_CHECK(CheckPointer(m_pCurMgr));
+
+ return m_pCurMgr;
+}
+
+#ifndef DACCESS_COMPILE
+/////////////////////////////////////////////////////////////////////////////////////////////
+VirtualCallStubManagerManager::VirtualCallStubManagerManager()
+ : m_pManagers(NULL),
+ m_pCacheElem(NULL),
+ m_RWLock(COOPERATIVE_OR_PREEMPTIVE, LOCK_TYPE_DEFAULT)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+/* static */
+void VirtualCallStubManagerManager::InitStatic()
+{
+ STANDARD_VM_CONTRACT;
+
+ CONSISTENCY_CHECK(g_pManager == NULL);
+ g_pManager = new VirtualCallStubManagerManager();
+}
+#endif
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+VirtualCallStubManagerIterator VirtualCallStubManagerManager::IterateVirtualCallStubManagers()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ VirtualCallStubManagerIterator it(VirtualCallStubManagerManager::GlobalManager());
+ return it;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+BOOL VirtualCallStubManagerManager::CheckIsStub_Internal(
+ PCODE stubStartAddress)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ VirtualCallStubManager *pMgr = FindVirtualCallStubManager(stubStartAddress);
+ return (pMgr != NULL);
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+BOOL VirtualCallStubManagerManager::DoTraceStub(
+ PCODE stubStartAddress,
+ TraceDestination *trace)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Find the owning manager. We should succeed, since presumably someone already
+ // called CheckIsStub on us to find out that we own the address, and already
+ // called TraceManager to initiate a trace.
+ VirtualCallStubManager *pMgr = FindVirtualCallStubManager(stubStartAddress);
+ CONSISTENCY_CHECK(CheckPointer(pMgr));
+
+ return pMgr->DoTraceStub(stubStartAddress, trace);
+}
+
+#ifndef DACCESS_COMPILE
+/////////////////////////////////////////////////////////////////////////////////////////////
+MethodDesc *VirtualCallStubManagerManager::Entry2MethodDesc(
+ PCODE stubStartAddress,
+ MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END
+
+ if (pMT == NULL)
+ return NULL;
+
+ VirtualCallStubManager::StubKind sk;
+
+ // Find the owning manager.
+ VirtualCallStubManager *pMgr = VirtualCallStubManager::FindStubManager(stubStartAddress, &sk);
+ if (pMgr == NULL)
+ return NULL;
+
+ // Do the full resolve
+ size_t token = VirtualCallStubManager::GetTokenFromStubQuick(pMgr, stubStartAddress, sk);
+
+ CONSISTENCY_CHECK(!pMT->IsTransparentProxy());
+
+ PCODE target = NULL;
+ VirtualCallStubManager::Resolver(pMT, token, &target);
+
+ return pMT->GetMethodDescForSlotAddress(target);
+}
+#endif
+
+#ifdef DACCESS_COMPILE
+void VirtualCallStubManagerManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+ WRAPPER_NO_CONTRACT;
+ VirtualCallStubManagerIterator it = IterateVirtualCallStubManagers();
+ while (it.Next())
+ {
+ it.Current()->DoEnumMemoryRegions(flags);
+ }
+}
+#endif
+
+//----------------------------------------------------------------------------
+BOOL VirtualCallStubManagerManager::TraceManager(
+ Thread *thread, TraceDestination *trace,
+ T_CONTEXT *pContext, BYTE **pRetAddr)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Find the owning manager. We should succeed, since presumably someone already
+ // called CheckIsStub on us to find out that we own the address.
+ VirtualCallStubManager *pMgr = FindVirtualCallStubManager(GetIP(pContext));
+ CONSISTENCY_CHECK(CheckPointer(pMgr));
+
+ // Forward the call to the appropriate manager.
+ return pMgr->TraceManager(thread, trace, pContext, pRetAddr);
+}
diff --git a/src/vm/virtualcallstub.h b/src/vm/virtualcallstub.h
new file mode 100644
index 0000000000..cc68d2fe01
--- /dev/null
+++ b/src/vm/virtualcallstub.h
@@ -0,0 +1,1618 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: VirtualCallStub.h
+//
+
+
+
+//
+
+// See code:VirtualCallStubManager for details
+//
+// ============================================================================
+
+#ifndef _VIRTUAL_CALL_STUB_H
+#define _VIRTUAL_CALL_STUB_H
+
+#define CHAIN_LOOKUP
+
+#if defined(_TARGET_X86_)
+// If this is uncommented, leaves a file "StubLog_<pid>.log" with statistics on the behavior
+// of stub-based interface dispatch.
+//#define STUB_LOGGING
+#endif
+
+#include "stubmgr.h"
+
+/////////////////////////////////////////////////////////////////////////////////////
+// Forward class declarations
+class FastTable;
+class BucketTable;
+class Entry;
+class Prober;
+class VirtualCallStubManager;
+class VirtualCallStubManagerManager;
+struct LookupHolder;
+struct DispatchHolder;
+struct ResolveHolder;
+
+/////////////////////////////////////////////////////////////////////////////////////
+// Forward function declarations
+extern "C" void InContextTPQuickDispatchAsmStub();
+
+extern "C" PCODE STDCALL StubDispatchFixupWorker(TransitionBlock * pTransitionBlock,
+ TADDR siteAddrForRegisterIndirect,
+ DWORD sectionIndex,
+ Module * pModule);
+
+extern "C" PCODE STDCALL VSD_ResolveWorker(TransitionBlock * pTransitionBlock,
+ TADDR siteAddrForRegisterIndirect,
+ size_t token
+#ifndef _TARGET_X86_
+ , UINT_PTR flags
+#endif
+ );
+
+#ifdef FEATURE_REMOTING
+// This is used by TransparentProxyWorkerStub to take a stub address (token), and
+// MethodTable and return the target. It will look in the cache first, and if not found
+// will call the resolver and then put the result into the cache.
+extern "C" PCODE STDCALL VSD_GetTargetForTPWorkerQuick(TransparentProxyObject * orTP, size_t token);
+extern "C" PCODE STDCALL VSD_GetTargetForTPWorker(TransitionBlock * pTransitionBlock, size_t token);
+#endif
+
+/////////////////////////////////////////////////////////////////////////////////////
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+typedef INT32 DISPL;
+#endif
+
+/////////////////////////////////////////////////////////////////////////////////////
+// Represents the struct that is added to the resolve cache
+// NOTE: If you change the layout of this struct, you'll need to update various
+// ASM helpers in VirtualCallStubCpu that rely on offsets of members.
+//
+struct ResolveCacheElem
+{
+ void *pMT;
+ size_t token; // DispatchToken
+ void *target;
+
+ // These are used for chaining
+ ResolveCacheElem *pNext;
+ ResolveCacheElem *Next()
+ { LIMITED_METHOD_CONTRACT; return VolatileLoad(&pNext); }
+
+#ifdef _DEBUG
+ UINT16 debug_hash;
+ UINT16 debug_index;
+#endif // _DEBUG
+
+ BOOL Equals(size_t token, void *pMT)
+ { LIMITED_METHOD_CONTRACT; return (this->pMT == pMT && this->token == token); }
+
+ BOOL Equals(ResolveCacheElem *pElem)
+ { WRAPPER_NO_CONTRACT; return Equals(pElem->token, pElem->pMT); }
+
+};
+
+enum
+{
+ e_resolveCacheElem_sizeof_mt = sizeof(void *),
+ e_resolveCacheElem_sizeof_token = sizeof(size_t),
+ e_resolveCacheElem_sizeof_target = sizeof(void *),
+ e_resolveCacheElem_sizeof_next = sizeof(ResolveCacheElem *),
+
+ e_resolveCacheElem_offset_mt = 0,
+ e_resolveCacheElem_offset_token = e_resolveCacheElem_offset_mt + e_resolveCacheElem_sizeof_mt,
+ e_resolveCacheElem_offset_target = e_resolveCacheElem_offset_token + e_resolveCacheElem_sizeof_token,
+ e_resolveCacheElem_offset_next = e_resolveCacheElem_offset_target + e_resolveCacheElem_sizeof_target,
+};
+
+/////////////////////////////////////////////////////////////////////////////////////
+// A utility class to help manipulate a call site
+struct StubCallSite
+{
+ friend class VirtualCallStubManager;
+
+private:
+
+ // On x86 are four possible kinds of callsites when you take into account all features
+ // Relative: direct call, e.g. "call addr". Not used currently.
+ // RelativeIndirect (JmpRel): indirect call through a relative address, e.g. "call [addr]"
+ // RegisterIndirect: indirect call through a register, e.g. "call [eax]"
+ // DelegateCallSite: anything else, tail called through a register by shuffle thunk, e.g. "jmp [eax]"
+ //
+ // On all other platforms we always use an indirect call through an indirection cell
+ // In these cases all calls are made by the platform equivalent of "call [addr]".
+ //
+ // DelegateCallSite are particular in that they can come in a variety of forms:
+ // a direct delegate call has a sequence defined by the jit but a multicast or secure delegate
+ // are defined in a stub and have a different shape
+ //
+ PTR_PCODE m_siteAddr; // Stores the address of an indirection cell
+ PCODE m_returnAddr;
+
+public:
+
+#if defined(_TARGET_X86_)
+ StubCallSite(TADDR siteAddrForRegisterIndirect, PCODE returnAddr);
+
+ PCODE GetCallerAddress();
+#else // !defined(_TARGET_X86_)
+ // On platforms where we always use an indirection cell things
+ // are much simpler - the siteAddr always stores a pointer to a
+ // value that in turn points to the indirection cell.
+
+ StubCallSite(TADDR siteAddr, PCODE returnAddr)
+ { LIMITED_METHOD_CONTRACT; m_siteAddr = dac_cast<PTR_PCODE>(siteAddr); m_returnAddr = returnAddr; }
+
+ PCODE GetCallerAddress() { LIMITED_METHOD_CONTRACT; return m_returnAddr; }
+#endif // !defined(_TARGET_X86_)
+
+ PCODE GetSiteTarget() { WRAPPER_NO_CONTRACT; return *(GetIndirectCell()); }
+ void SetSiteTarget(PCODE newTarget);
+ PTR_PCODE GetIndirectCell() { LIMITED_METHOD_CONTRACT; return dac_cast<PTR_PCODE>(m_siteAddr); }
+ PTR_PCODE * GetIndirectCellAddress() { LIMITED_METHOD_CONTRACT; return &m_siteAddr; }
+
+ PCODE GetReturnAddress() { LIMITED_METHOD_CONTRACT; return m_returnAddr; }
+};
+
+#ifdef FEATURE_PREJIT
+extern "C" void StubDispatchFixupStub(); // for lazy fixup of ngen call sites
+#endif
+
+// These are the assembly language entry points that the stubs use when they want to go into the EE
+
+extern "C" void ResolveWorkerAsmStub(); // resolve a token and transfer control to that method
+extern "C" void ResolveWorkerChainLookupAsmStub(); // for chaining of entries in the cache
+
+#ifdef _TARGET_X86_
+extern "C" void BackPatchWorkerAsmStub(); // backpatch a call site to point to a different stub
+#endif // _TARGET_X86_
+
+
+typedef VPTR(class VirtualCallStubManager) PTR_VirtualCallStubManager;
+
+// VirtualCallStubManager is the heart of the stub dispatch logic. See the book of the runtime entry
+//
+// file:../../doc/BookOfTheRuntime/ClassLoader/VirtualStubDispatchDesign.doc
+//
+// The basic idea is that a call to an interface (it could also be used for virtual calls in general, but we
+// do not do this), is simply the code
+//
+// call [DispatchCell]
+//
+// Where we make sure 'DispatchCell' points at stubs that will do the right thing. DispatchCell is writable
+// so we can udpate the code over time. There are three basic types of stubs that the dispatch cell can point
+// to.
+// * Lookup: The intial stub that has no 'fast path' and simply pushes a ID for interface being called
+// and calls into the runtime at code:VirtualCallStubManager.ResolveWorkerStatic.
+// * Dispatch: Lookup stubs are patched to this stub which has a fast path that checks for a particular
+// Method Table and if that fails jumps to code that
+// * Decrements a 'missCount' (starts out as code:STUB_MISS_COUNT_VALUE). If this count goes to zero
+// code:VirtualCallStubManager.BackPatchWorkerStatic is called, morphs it into a resolve stub
+// (however since this decrementing logic is SHARED among all dispatch stubs, it may take
+// multiples of code:STUB_MISS_COUNT_VALUE if mulitple call sites are actively polymorphic (this
+// seems unlikley).
+// * Calls a resolve stub (Whenever a dispatch stub is created, it always has a cooresponding resolve
+// stub (but the resolve stubs are shared among many dispatch stubs).
+// * Resolve: see code:ResolveStub. This looks up the Method table in a process wide cache (see
+// code:ResolveCacheElem, and if found, jumps to it. This code path is about 17 instructions long (so
+// pretty fast, but certainly much slower than a normal call). If the method table is not found in
+// the cache, it calls into the runtime code:VirtualCallStubManager.ResolveWorkerStatic, which
+// populates it.
+// So the general progression is call site's cells
+// * start out life pointing to a lookup stub
+// * On first call they get updated into a dispatch stub. When this misses, it calls a resolve stub,
+// which populates a resovle stub's cache, but does not update the call site' cell (thus it is still
+// pointing at the dispatch cell.
+// * After code:STUB_MISS_COUNT_VALUE misses, we update the call site's cell to point directly at the
+// resolve stub (thus avoiding the overhead of the quick check that always seems to be failing and
+// the miss count update).
+//
+// QUESTION: What is the lifetimes of the various stubs and hash table entries?
+//
+// QUESTION: There does not seem to be any logic that will change a call site's cell once it becomes a
+// Resolve stub. Thus once a particular call site becomes a Resolve stub we live with the Resolve stub's
+// (in)efficiency forever.
+//
+// see code:#StubDispatchNotes for more
+class VirtualCallStubManager : public StubManager
+{
+ friend class ClrDataAccess;
+ friend class VirtualCallStubManagerManager;
+ friend class VirtualCallStubManagerIterator;
+
+ VPTR_VTABLE_CLASS(VirtualCallStubManager, StubManager)
+
+public:
+#ifdef _DEBUG
+ virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "VirtualCallStubManager"; }
+#endif
+
+ // The reason for our existence, return a callstub for type id and slot number
+ // where type id = 0 for the class contract (i.e. a virtual call), and type id > 0 for an
+ // interface invoke where the id indicates which interface it is.
+ //
+ // The function is idempotent, i.e.
+ // you'll get the same callstub twice if you call it with identical inputs.
+ PCODE GetCallStub(TypeHandle ownerType, MethodDesc *pMD);
+ PCODE GetCallStub(TypeHandle ownerType, DWORD slot);
+
+ // Generate an fresh indirection cell.
+ BYTE* GenerateStubIndirection(PCODE stub, BOOL fUseRecycledCell = FALSE);
+
+ // Set up static data structures - called during EEStartup
+ static void InitStatic();
+ static void UninitStatic();
+
+ // Per instance initialization - called during AppDomain::Init and ::Uninit and for collectible loader allocators
+ void Init(BaseDomain* pDomain, LoaderAllocator *pLoaderAllocator);
+ void Uninit();
+
+ //@TODO: the logging should be tied into the VMs normal loggin mechanisms,
+ //@TODO: for now we just always write a short log file called "StubLog_<pid>.log"
+ static void StartupLogging();
+ static void LoggingDump();
+ static void FinishLogging();
+
+ static void ResetCache();
+
+ // Reclaim/rearrange any structures that can only be done during a gc sync point.
+ // This is the mechanism we are using to avoid synchronization of alot of our
+ // cache and hash table accesses. We are requiring that during a gc sync point we are not
+ // executing any stub code at all, hence at this time we are serialized on a single thread (gc)
+ // and no other thread is accessing the data structures.
+ static void ReclaimAll();
+ void Reclaim();
+
+#ifndef DACCESS_COMPILE
+ VirtualCallStubManager()
+ : StubManager(),
+ lookup_rangeList(),
+ resolve_rangeList(),
+ dispatch_rangeList(),
+ cache_entry_rangeList(),
+ parentDomain(NULL),
+ isCollectible(false),
+ m_initialReservedMemForHeaps(NULL),
+ m_FreeIndCellList(NULL),
+ m_RecycledIndCellList(NULL),
+ indcell_heap(NULL),
+ cache_entry_heap(NULL),
+ lookup_heap(NULL),
+ dispatch_heap(NULL),
+ resolve_heap(NULL),
+#ifdef _TARGET_AMD64_
+ m_fShouldAllocateLongJumpDispatchStubs(FALSE),
+#endif
+ lookups(NULL),
+ cache_entries(NULL),
+ dispatchers(NULL),
+ resolvers(NULL),
+ m_counters(NULL),
+ m_cur_counter_block(NULL),
+ m_cur_counter_block_for_reclaim(NULL),
+ m_cur_counter_block_for_reclaim_index(NULL),
+ m_pNext(NULL)
+ {
+ LIMITED_METHOD_CONTRACT;
+ ZeroMemory(&stats, sizeof(stats));
+ }
+
+ ~VirtualCallStubManager();
+#endif // !DACCESS_COMPILE
+
+
+ enum StubKind {
+ SK_UNKNOWN,
+ SK_LOOKUP, // Lookup Stubs are SLOW stubs that simply call into the runtime to do all work.
+ SK_DISPATCH, // Dispatch Stubs have a fast check for one type otherwise jumps to runtime. Works for monomorphic sites
+ SK_RESOLVE, // Resolve Stubs do a hash lookup before fallling back to the runtime. Works for polymorphic sites.
+ SK_BREAKPOINT
+ };
+
+ // peek at the assembly code and predict which kind of a stub we have
+ StubKind predictStubKind(PCODE stubStartAddress);
+
+ /* know thine own stubs. It is possible that when multiple
+ virtualcallstub managers are built that these may need to become
+ non-static, and the callers modified accordingly */
+ StubKind getStubKind(PCODE stubStartAddress)
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ // Rather than calling IsInRange(stubStartAddress) for each possible stub kind
+ // we can peek at the assembly code and predict which kind of a stub we have
+ StubKind predictedKind = predictStubKind(stubStartAddress);
+
+ if (predictedKind == SK_DISPATCH)
+ {
+ if (isDispatchingStub(stubStartAddress))
+ return SK_DISPATCH;
+ }
+ else if (predictedKind == SK_LOOKUP)
+ {
+ if (isLookupStub(stubStartAddress))
+ return SK_LOOKUP;
+ }
+ else if (predictedKind == SK_RESOLVE)
+ {
+ if (isResolvingStub(stubStartAddress))
+ return SK_RESOLVE;
+ }
+
+ // This is the slow case. If the predict returned SK_UNKNOWN, SK_BREAKPOINT,
+ // or the predict was found to be incorrect when checked against the RangeLists
+ // (isXXXStub), then we'll check each stub heap in sequence.
+ if (isDispatchingStub(stubStartAddress))
+ return SK_DISPATCH;
+ else if (isLookupStub(stubStartAddress))
+ return SK_LOOKUP;
+ else if (isResolvingStub(stubStartAddress))
+ return SK_RESOLVE;
+
+ return SK_UNKNOWN;
+ }
+
+ inline BOOL isStub(PCODE stubStartAddress)
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ return (getStubKind(stubStartAddress) != SK_UNKNOWN);
+ }
+
+ BOOL isDispatchingStub(PCODE stubStartAddress)
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ return GetDispatchRangeList()->IsInRange(stubStartAddress);
+ }
+
+ BOOL isResolvingStub(PCODE stubStartAddress)
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ return GetResolveRangeList()->IsInRange(stubStartAddress);
+ }
+
+ BOOL isLookupStub(PCODE stubStartAddress)
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ return GetLookupRangeList()->IsInRange(stubStartAddress);
+ }
+
+ static BOOL isDispatchingStubStatic(PCODE addr)
+ {
+ WRAPPER_NO_CONTRACT;
+ StubKind stubKind;
+ FindStubManager(addr, &stubKind);
+ return stubKind == SK_DISPATCH;
+ }
+
+ static BOOL isResolvingStubStatic(PCODE addr)
+ {
+ WRAPPER_NO_CONTRACT;
+ StubKind stubKind;
+ FindStubManager(addr, &stubKind);
+ return stubKind == SK_RESOLVE;
+ }
+
+ static BOOL isLookupStubStatic(PCODE addr)
+ {
+ WRAPPER_NO_CONTRACT;
+ StubKind stubKind;
+ FindStubManager(addr, &stubKind);
+ return stubKind == SK_LOOKUP;
+ }
+
+ //use range lists to track the chunks of memory that are part of each heap
+ LockedRangeList lookup_rangeList;
+ LockedRangeList resolve_rangeList;
+ LockedRangeList dispatch_rangeList;
+ LockedRangeList cache_entry_rangeList;
+
+ // Get dac-ized pointers to rangelist.
+ RangeList* GetLookupRangeList()
+ {
+ SUPPORTS_DAC;
+
+ TADDR addr = PTR_HOST_MEMBER_TADDR(VirtualCallStubManager, this, lookup_rangeList);
+ return PTR_RangeList(addr);
+ }
+ RangeList* GetResolveRangeList()
+ {
+ SUPPORTS_DAC;
+
+ TADDR addr = PTR_HOST_MEMBER_TADDR(VirtualCallStubManager, this, resolve_rangeList);
+ return PTR_RangeList(addr);
+ }
+ RangeList* GetDispatchRangeList()
+ {
+ SUPPORTS_DAC;
+
+ TADDR addr = PTR_HOST_MEMBER_TADDR(VirtualCallStubManager, this, dispatch_rangeList);
+ return PTR_RangeList(addr);
+ }
+ RangeList* GetCacheEntryRangeList()
+ {
+ SUPPORTS_DAC;
+ TADDR addr = PTR_HOST_MEMBER_TADDR(VirtualCallStubManager, this, cache_entry_rangeList);
+ return PTR_RangeList(addr);
+ }
+
+private:
+
+ //allocate and initialize a stub of the desired kind
+ DispatchHolder *GenerateDispatchStub(PCODE addrOfCode,
+ PCODE addrOfFail,
+ void *pMTExpected,
+ size_t dispatchToken);
+
+#ifdef _TARGET_AMD64_
+ // Used to allocate a long jump dispatch stub. See comment around
+ // m_fShouldAllocateLongJumpDispatchStubs for explaination.
+ DispatchHolder *GenerateDispatchStubLong(PCODE addrOfCode,
+ PCODE addrOfFail,
+ void *pMTExpected,
+ size_t dispatchToken);
+#endif
+
+ ResolveHolder *GenerateResolveStub(PCODE addrOfResolver,
+ PCODE addrOfPatcher,
+ size_t dispatchToken);
+
+ LookupHolder *GenerateLookupStub(PCODE addrOfResolver,
+ size_t dispatchToken);
+
+ template <typename STUB_HOLDER>
+ void AddToCollectibleVSDRangeList(STUB_HOLDER *holder)
+ {
+ if (isCollectible)
+ {
+ parentDomain->GetCollectibleVSDRanges()->AddRange(reinterpret_cast<BYTE *>(holder->stub()),
+ reinterpret_cast<BYTE *>(holder->stub()) + holder->stub()->size(),
+ this);
+ }
+ }
+
+ // The resolve cache is static across all AppDomains
+ ResolveCacheElem *GenerateResolveCacheElem(void *addrOfCode,
+ void *pMTExpected,
+ size_t token);
+
+ ResolveCacheElem *GetResolveCacheElem(void *pMT,
+ size_t token,
+ void *target);
+
+ //Given a dispatch token and a method table, determine the
+ //target address to go to. The return value (BOOL) states whether this address
+ //is cacheable or not.
+ static BOOL Resolver(MethodTable * pMT,
+ DispatchToken token,
+ PCODE * ppTarget);
+
+ // This can be used to find a target without needing the ability to throw
+ static BOOL TraceResolver(Object *pObj, DispatchToken token, TraceDestination *trace);
+
+public:
+ // Return the MethodDesc corresponding to this token.
+ static MethodDesc *GetRepresentativeMethodDescFromToken(DispatchToken token, MethodTable *pMT);
+ static MethodDesc *GetInterfaceMethodDescFromToken(DispatchToken token);
+ static MethodTable *GetTypeFromToken(DispatchToken token);
+
+ //This is used to get the token out of a stub
+ static size_t GetTokenFromStub(PCODE stub);
+
+ //This is used to get the token out of a stub and we know the stub manager and stub kind
+ static size_t GetTokenFromStubQuick(VirtualCallStubManager * pMgr, PCODE stub, StubKind kind);
+
+ // General utility functions
+ // Quick lookup in the cache. NOTHROW, GC_NOTRIGGER
+ static PCODE CacheLookup(size_t token, UINT16 tokenHash, MethodTable *pMT);
+
+ // Full exhaustive lookup. THROWS, GC_TRIGGERS
+ static PCODE GetTarget(DispatchToken token, MethodTable *pMT);
+
+private:
+ // Given a dispatch token, return true if the token represents an interface, false if just a slot.
+ static BOOL IsInterfaceToken(DispatchToken token);
+
+ // Given a dispatch token, return true if the token represents a slot on the target.
+ static BOOL IsClassToken(DispatchToken token);
+
+#ifdef CHAIN_LOOKUP
+ static ResolveCacheElem* __fastcall PromoteChainEntry(ResolveCacheElem *pElem);
+#endif
+
+ // Flags used by the non-x86 versions of VSD_ResolveWorker
+
+#define SDF_ResolveBackPatch (0x01)
+#define SDF_ResolvePromoteChain (0x02)
+#define SDF_ResolveFlags (0x03)
+
+ // These method needs to call the instance methods.
+ friend PCODE VSD_ResolveWorker(TransitionBlock * pTransitionBlock,
+ TADDR siteAddrForRegisterIndirect,
+ size_t token
+#ifndef _TARGET_X86_
+ , UINT_PTR flags
+#endif
+ );
+
+ //These are the entrypoints that the stubs actually end up calling via the
+ // xxxAsmStub methods above
+ static void STDCALL BackPatchWorkerStatic(PCODE returnAddr, TADDR siteAddrForRegisterIndirect);
+
+public:
+ PCODE ResolveWorker(StubCallSite* pCallSite, OBJECTREF pObj, DispatchToken token, StubKind stubKind);
+ void BackPatchWorker(StubCallSite* pCallSite);
+
+ //Change the callsite to point to stub
+ void BackPatchSite(StubCallSite* pCallSite, PCODE stub);
+
+public:
+ /* the following two public functions are to support tracing or stepping thru
+ stubs via the debugger. */
+ virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress);
+ virtual BOOL TraceManager(Thread *thread,
+ TraceDestination *trace,
+ T_CONTEXT *pContext,
+ BYTE **pRetAddr);
+ size_t GetSize()
+ {
+ LIMITED_METHOD_CONTRACT;
+ size_t retval=0;
+ if(indcell_heap)
+ retval+=indcell_heap->GetSize();
+ if(cache_entry_heap)
+ retval+=cache_entry_heap->GetSize();
+ if(lookup_heap)
+ retval+=lookup_heap->GetSize();
+ if(dispatch_heap)
+ retval+=dispatch_heap->GetSize();
+ if(resolve_heap)
+ retval+=resolve_heap->GetSize();
+ return retval;
+ };
+
+private:
+ /* the following two private functions are to support tracing or stepping thru
+ stubs via the debugger. */
+ virtual BOOL DoTraceStub(PCODE stubStartAddress,
+ TraceDestination *trace);
+
+private:
+ // The parent domain of this manager
+ PTR_BaseDomain parentDomain;
+ bool isCollectible;
+
+ BYTE * m_initialReservedMemForHeaps;
+
+ static const UINT32 INDCELLS_PER_BLOCK = 32; // 32 indirection cells per block.
+
+ CrstExplicitInit m_indCellLock;
+
+ // List of free indirection cells. The cells were directly allocated from the loader heap
+ // (code:VirtualCallStubManager::GenerateStubIndirection)
+ BYTE * m_FreeIndCellList;
+
+ // List of recycled indirection cells. The cells were recycled from finalized dynamic methods
+ // (code:LCGMethodResolver::RecycleIndCells).
+ BYTE * m_RecycledIndCellList;
+
+#ifndef DACCESS_COMPILE
+ // This methods returns the a free cell from m_FreeIndCellList. It returns NULL if the list is empty.
+ BYTE * GetOneFreeIndCell()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return GetOneIndCell(&m_FreeIndCellList);
+ }
+
+ // This methods returns the a recycled cell from m_RecycledIndCellList. It returns NULL if the list is empty.
+ BYTE * GetOneRecycledIndCell()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return GetOneIndCell(&m_RecycledIndCellList);
+ }
+
+ // This methods returns the a cell from ppList. It returns NULL if the list is empty.
+ BYTE * GetOneIndCell(BYTE ** ppList)
+ {
+ CONTRACT (BYTE*) {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(ppList));
+ PRECONDITION(m_indCellLock.OwnedByCurrentThread());
+ } CONTRACT_END;
+
+ BYTE * temp = *ppList;
+
+ if (temp)
+ {
+ BYTE * pNext = *((BYTE **)temp);
+ *ppList = pNext;
+ RETURN temp;
+ }
+
+ RETURN NULL;
+ }
+
+ // insert a linked list of indirection cells at the beginning of m_FreeIndCellList
+ void InsertIntoFreeIndCellList(BYTE * head, BYTE * tail)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ InsertIntoIndCellList(&m_FreeIndCellList, head, tail);
+ }
+
+ // insert a linked list of indirection cells at the beginning of ppList
+ void InsertIntoIndCellList(BYTE ** ppList, BYTE * head, BYTE * tail)
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(ppList));
+ PRECONDITION(CheckPointer(head));
+ PRECONDITION(CheckPointer(tail));
+ PRECONDITION(m_indCellLock.OwnedByCurrentThread());
+ } CONTRACTL_END;
+
+ BYTE * temphead = *ppList;
+ *((BYTE**)tail) = temphead;
+ *ppList = head;
+ }
+#endif // !DACCESS_COMPILE
+
+ PTR_LoaderHeap indcell_heap; // indirection cells go here
+ PTR_LoaderHeap cache_entry_heap; // resolve cache elem entries go here
+ PTR_LoaderHeap lookup_heap; // lookup stubs go here
+ PTR_LoaderHeap dispatch_heap; // dispatch stubs go here
+ PTR_LoaderHeap resolve_heap; // resolve stubs go here
+
+#ifdef _TARGET_AMD64_
+ // When we layout the stub heaps, we put them close together in a sequential order
+ // so that we maximize performance with respect to branch predictions. On AMD64,
+ // dispatch stubs use a rel32 jump on failure to the resolve stub. This works for
+ // a while because of the ordering, but as soon as we have to start allocating more
+ // memory for either the dispatch or resolve heaps we have a chance that we'll be
+ // further away than a rel32 jump can reach, because we're in a 64-bit address
+ // space. As such, this flag will indicate when we allocate the first dispatch stub
+ // that cannot reach a resolve stub, and when this happens we'll switch over to
+ // allocating the larger version of the dispatch stub which contains an abs64 jump.
+ //@TODO: This is a bit of a workaround, but the limitations of LoaderHeap require that we
+ //@TODO: take this approach. Hopefully in Orcas we'll have a chance to rewrite LoaderHeap.
+ BOOL m_fShouldAllocateLongJumpDispatchStubs; // Defaults to FALSE.
+#endif
+
+ BucketTable * lookups; // hash table of lookups keyed by tokens
+ BucketTable * cache_entries; // hash table of dispatch token/target structs for dispatch cache
+ BucketTable * dispatchers; // hash table of dispatching stubs keyed by tokens/actualtype
+ BucketTable * resolvers; // hash table of resolvers keyed by tokens/resolverstub
+
+ // This structure is used to keep track of the fail counters.
+ // We only need one fail counter per ResolveStub,
+ // and most programs use less than 250 ResolveStubs
+ // We allocate these on the main heap using "new counter block"
+ struct counter_block
+ {
+ static const UINT32 MAX_COUNTER_ENTRIES = 256-2; // 254 counters should be enough for most cases.
+
+ counter_block * next; // the next block
+ UINT32 used; // the index of the next free entry
+ INT32 block[MAX_COUNTER_ENTRIES]; // the counters
+ };
+
+ counter_block *m_counters; // linked list of counter blocks of failure counters
+ counter_block *m_cur_counter_block; // current block for updating counts
+ counter_block *m_cur_counter_block_for_reclaim; // current block for updating
+ UINT32 m_cur_counter_block_for_reclaim_index; // index into the current block for updating
+
+ // Used to keep track of all the VCSManager objects in the system.
+ PTR_VirtualCallStubManager m_pNext; // Linked list pointer
+
+public:
+ // Given a stub address, find the VCSManager that owns it.
+ static VirtualCallStubManager *FindStubManager(PCODE addr,
+ StubKind* wbStubKind = NULL);
+
+#ifndef DACCESS_COMPILE
+ // insert a linked list of indirection cells at the beginning of m_RecycledIndCellList
+ void InsertIntoRecycledIndCellList_Locked(BYTE * head, BYTE * tail)
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ } CONTRACTL_END;
+
+ CrstHolder lh(&m_indCellLock);
+
+ InsertIntoIndCellList(&m_RecycledIndCellList, head, tail);
+ }
+#endif // !DACCESS_COMPILE
+
+ // These are the counters for keeping statistics
+ struct
+ {
+ UINT32 site_counter; //# of call sites
+ UINT32 stub_lookup_counter; //# of lookup stubs
+ UINT32 stub_poly_counter; //# of resolve stubs
+ UINT32 stub_mono_counter; //# of dispatch stubs
+ UINT32 site_write; //# of call site backpatch writes
+ UINT32 site_write_poly; //# of call site backpatch writes to point to resolve stubs
+ UINT32 site_write_mono; //# of call site backpatch writes to point to dispatch stubs
+ UINT32 worker_call; //# of calls into ResolveWorker
+ UINT32 worker_call_no_patch; //# of times call_worker resulted in no patch
+ UINT32 worker_collide_to_mono; //# of times we converted a poly stub to a mono stub instead of writing the cache entry
+ UINT32 stub_space; //# of bytes of stubs
+ UINT32 cache_entry_counter; //# of cache structs
+ UINT32 cache_entry_space; //# of bytes used by cache lookup structs
+ } stats;
+
+ void LogStats();
+
+#ifdef DACCESS_COMPILE
+protected:
+ virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+ virtual LPCWSTR GetStubManagerName(PCODE addr)
+ {
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(isStub(addr));
+
+ if (isLookupStub(addr))
+ {
+ return W("VSD_LookupStub");
+ }
+ else if (isDispatchingStub(addr))
+ {
+ return W("VSD_DispatchStub");
+ }
+ else
+ {
+ CONSISTENCY_CHECK(isResolvingStub(addr));
+ return W("VSD_ResolveStub");
+ }
+ }
+#endif
+};
+
+/********************************************************************************************************
+********************************************************************************************************/
+typedef VPTR(class VirtualCallStubManagerManager) PTR_VirtualCallStubManagerManager;
+
+class VirtualCallStubManagerIterator;
+class VirtualCallStubManagerManager : public StubManager
+{
+ VPTR_VTABLE_CLASS(VirtualCallStubManagerManager, StubManager)
+
+ friend class StubManager;
+ friend class VirtualCallStubManager;
+ friend class VirtualCallStubManagerIterator;
+ friend class StubManagerIterator;
+
+ public:
+ virtual BOOL TraceManager(Thread *thread, TraceDestination *trace,
+ T_CONTEXT *pContext, BYTE **pRetAddr);
+
+ virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress);
+
+ virtual BOOL DoTraceStub(PCODE stubStartAddress, TraceDestination *trace);
+
+ static MethodDesc *Entry2MethodDesc(PCODE stubStartAddress, MethodTable *pMT);
+
+#ifdef DACCESS_COMPILE
+ virtual void DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+ virtual LPCWSTR GetStubManagerName(PCODE addr)
+ { WRAPPER_NO_CONTRACT; return FindVirtualCallStubManager(addr)->GetStubManagerName(addr); }
+#endif
+
+ private:
+ // Used to keep track of all the VCSManager objects in the system.
+ PTR_VirtualCallStubManager m_pManagers; // Head of the linked list
+
+#ifndef DACCESS_COMPILE
+ // Ctor. This is only used by StaticInit.
+ VirtualCallStubManagerManager();
+#endif
+
+ // A cache element to quickly check the last matched manager.
+ Volatile<VirtualCallStubManager*> m_pCacheElem;
+
+ // RW lock for reading entries and removing them.
+ SimpleRWLock m_RWLock;
+
+ // This will look through all the managers in an intelligent fashion to
+ // find the manager that owns the address.
+ VirtualCallStubManager *FindVirtualCallStubManager(PCODE stubAddress);
+
+ protected:
+ // Add a VCSManager to the linked list.
+ void AddStubManager(VirtualCallStubManager *pMgr);
+
+ // Remove a VCSManager from the linked list.
+ void RemoveStubManager(VirtualCallStubManager *pMgr);
+
+ VirtualCallStubManager *FirstManager()
+ { WRAPPER_NO_CONTRACT; return m_pManagers; }
+
+#ifndef DACCESS_COMPILE
+ static void InitStatic();
+#endif
+
+ public:
+ SPTR_DECL(VirtualCallStubManagerManager, g_pManager);
+
+ static VirtualCallStubManagerManager *GlobalManager()
+ { LIMITED_METHOD_DAC_CONTRACT; CONSISTENCY_CHECK(CheckPointer(g_pManager)); return g_pManager; }
+
+ VirtualCallStubManagerIterator IterateVirtualCallStubManagers();
+
+#ifdef _DEBUG
+ // Debug helper to help identify stub-managers.
+ virtual const char * DbgGetName() { LIMITED_METHOD_CONTRACT; return "VirtualCallStubManagerManager"; }
+#endif
+};
+
+/********************************************************************************************************
+********************************************************************************************************/
+class VirtualCallStubManagerIterator
+{
+ friend class VirtualCallStubManagerManager;
+
+ public:
+ BOOL Next();
+ VirtualCallStubManager *Current();
+
+ // Copy ctor
+ inline VirtualCallStubManagerIterator(const VirtualCallStubManagerIterator &it);
+
+ protected:
+ inline VirtualCallStubManagerIterator(VirtualCallStubManagerManager *pMgr);
+
+ BOOL m_fIsStart;
+ VirtualCallStubManager *m_pCurMgr;
+};
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+// Ctor
+inline VirtualCallStubManagerIterator::VirtualCallStubManagerIterator(VirtualCallStubManagerManager *pMgr)
+ : m_fIsStart(TRUE), m_pCurMgr(pMgr->m_pManagers)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ CONSISTENCY_CHECK(CheckPointer(pMgr));
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+// Copy ctor
+inline VirtualCallStubManagerIterator::VirtualCallStubManagerIterator(const VirtualCallStubManagerIterator &it)
+ : m_fIsStart(it.m_fIsStart), m_pCurMgr(it.m_pCurMgr)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+}
+
+/********************************************************************************************************
+#StubDispatchNotes
+
+A note on approach. The cache and hash tables used by the stub and lookup mechanism
+are designed with an eye to minimizing interlocking and/or syncing and/or locking operations.
+They are intended to run in a highly concurrent environment. Since there is no magic,
+some tradeoffs and and some implementation constraints are required. The basic notion
+is that if all reads and writes are atomic and if all functions and operations operate
+correctly in the face of commutative reorderings of the visibility of all reads and writes
+across threads, then we don't have to interlock, sync, or serialize. Our approximation of
+this is:
+
+1. All reads and all writes to tables must be atomic. This effectively limits the actual entry
+size in a table to be a pointer or a pointer sized thing.
+
+2. All functions, like comparisons for equality or computation of hash values must function
+correctly in the face of concurrent updating of the underlying table. This is accomplished
+by making the underlying structures/entries effectively immutable, if concurrency is in anyway possible.
+By effectively immutatable, we mean that the stub or token structure is either immutable or that
+if it is ever written, all possibley concurrent writes are attempting to write the same value (atomically)
+or that the competing (atomic) values do not affect correctness, and that the function operates correctly whether
+or not any of the writes have taken place (is visible yet). The constraint we maintain is that all competeing
+updates (and their visibility or lack thereof) do not alter the correctness of the program.
+
+3. All tables are inexact. The counts they hold (e.g. number of contained entries) may be inaccurrate,
+but that inaccurracy cannot affect their correctness. Table modifications, such as insertion of
+an new entry may not succeed, but such failures cannot affect correctness. This implies that just
+because a stub/entry is not present in a table, e.g. has been removed, that does not mean that
+it is not in use. It also implies that internal table structures, such as discarded hash table buckets,
+cannot be freely recycled since another concurrent thread may still be walking thru it.
+
+4. Occassionaly it is necessary to pick up the pieces that have been dropped on the floor
+so to speak, e.g. actually recycle hash buckets that aren't in use. Since we have a natural
+sync point already in the GC, we use that to provide cleanup points. We need to make sure that code that
+is walking our structures is not a GC safe point. Hence if the GC calls back into us inside the GC
+sync point, we know that nobody is inside our stuctures and we can safely rearrange and recycle things.
+********************************************************************************************************/
+
+//initial and increment value for fail stub counters
+#ifdef STUB_LOGGING
+extern UINT32 STUB_MISS_COUNT_VALUE;
+extern UINT32 STUB_COLLIDE_WRITE_PCT;
+extern UINT32 STUB_COLLIDE_MONO_PCT;
+#else // !STUB_LOGGING
+#define STUB_MISS_COUNT_VALUE 100
+#define STUB_COLLIDE_WRITE_PCT 100
+#define STUB_COLLIDE_MONO_PCT 0
+#endif // !STUB_LOGGING
+
+//size and mask of the cache used by resolve stubs
+// CALL_STUB_CACHE_SIZE must be equal to 2^CALL_STUB_CACHE_NUM_BITS
+#define CALL_STUB_CACHE_NUM_BITS 12 //10
+#define CALL_STUB_CACHE_SIZE 4096 //1024
+#define CALL_STUB_CACHE_MASK (CALL_STUB_CACHE_SIZE-1)
+#define CALL_STUB_CACHE_PROBES 5
+//min sizes for BucketTable and buckets and the growth and hashing constants
+#define CALL_STUB_MIN_BUCKETS 32
+#define CALL_STUB_MIN_ENTRIES 4
+//this is so that the very first growth will jump from 4 to 32 entries, then double from there.
+#define CALL_STUB_SECONDARY_ENTRIES 8
+#define CALL_STUB_GROWTH_FACTOR 2
+#define CALL_STUB_LOAD_FACTOR 90
+#define CALL_STUB_HASH_CONST1 1327
+#define CALL_STUB_HASH_CONST2 43627
+#define LARGE_PRIME 7199369
+//internal layout of buckets=size-1,count,entries....
+#define CALL_STUB_MASK_INDEX 0
+#define CALL_STUB_COUNT_INDEX 1
+#define CALL_STUB_DEAD_LINK 2
+#define CALL_STUB_FIRST_INDEX 3
+//marker entries in cache and hash tables
+#define CALL_STUB_EMPTY_ENTRY 0
+// number of successes for a chained element before it gets moved to the front
+#define CALL_STUB_CACHE_INITIAL_SUCCESS_COUNT (0x100)
+
+/*******************************************************************************************************
+Entry is an abstract class. We will make specific subclasses for each kind of
+entry. Entries hold references to stubs or tokens. The principle thing they provide
+is a virtual Equals function that is used by the caching and hashing tables within which
+the stubs and tokens are stored. Entries are typically stack allocated by the routines
+that call into the hash and caching functions, and the functions stuff stubs into the entry
+to do the comparisons. Essentially specific entry subclasses supply a vtable to a stub
+as and when needed. This means we don't have to have vtables attached to stubs.
+
+Summarizing so far, there is a struct for each kind of stub or token of the form XXXXStub.
+They provide that actual storage layouts.
+There is a stuct in which each stub which has code is containted of the form XXXXHolder.
+They provide alignment and anciliary storage for the stub code.
+There is a subclass of Entry for each kind of stub or token, of the form XXXXEntry.
+They provide the specific implementations of the virtual functions declared in Entry. */
+class Entry
+{
+public:
+ //access and compare the keys of the entry
+ virtual BOOL Equals(size_t keyA, size_t keyB)=0;
+ virtual size_t KeyA()=0;
+ virtual size_t KeyB()=0;
+
+ //contents is the struct or token that the entry exposes
+ virtual void SetContents(size_t contents)=0;
+};
+
+/* define the platform specific Stubs and stub holders */
+
+#include <virtualcallstubcpu.hpp>
+
+#if USES_LOOKUP_STUBS
+/**********************************************************************************************
+LookupEntry wraps LookupStubs and provide the concrete implementation of the abstract class Entry.
+Virtual and interface call sites when they are first jitted point to LookupStubs. The hash table
+that contains look up stubs is keyed by token, hence the Equals function uses the embedded token in
+the stub for comparison purposes. Since we are willing to allow duplicates in the hash table (as
+long as they are relatively rare) we do use direct comparison of the tokens rather than extracting
+the fields from within the tokens, for perf reasons. */
+class LookupEntry : public Entry
+{
+public:
+ //Creates an entry that wraps lookup stub s
+ LookupEntry(size_t s)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(VirtualCallStubManager::isLookupStubStatic((PCODE)s));
+ stub = (LookupStub*) s;
+ }
+
+ //default contructor to allow stack and inline allocation of lookup entries
+ LookupEntry() {LIMITED_METHOD_CONTRACT; stub = NULL;}
+
+ //implementations of abstract class Entry
+ BOOL Equals(size_t keyA, size_t keyB)
+ { WRAPPER_NO_CONTRACT; return stub && (keyA == KeyA()) && (keyB == KeyB()); }
+
+ size_t KeyA() { WRAPPER_NO_CONTRACT; return Token(); }
+ size_t KeyB() { WRAPPER_NO_CONTRACT; return (size_t)0; }
+
+ void SetContents(size_t contents)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(VirtualCallStubManager::isLookupStubStatic((PCODE)contents));
+ stub = LookupHolder::FromLookupEntry((PCODE)contents)->stub();
+ }
+
+ //extract the token of the underlying lookup stub
+
+ inline size_t Token() { LIMITED_METHOD_CONTRACT; return stub ? stub->token() : 0; }
+
+private:
+ LookupStub* stub; //the stub the entry wrapping
+};
+#endif // USES_LOOKUP_STUBS
+
+/**********************************************************************************************
+ResolveCacheEntry wraps a ResolveCacheElem and provides lookup functionality for entries that
+were created that may be added to the ResolveCache
+*/
+class ResolveCacheEntry : public Entry
+{
+public:
+ ResolveCacheEntry(size_t elem)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(elem != 0);
+ pElem = (ResolveCacheElem*) elem;
+ }
+
+ //default contructor to allow stack and inline allocation of lookup entries
+ ResolveCacheEntry() { LIMITED_METHOD_CONTRACT; pElem = NULL; }
+
+ //access and compare the keys of the entry
+ virtual BOOL Equals(size_t keyA, size_t keyB)
+ { WRAPPER_NO_CONTRACT; return pElem && (keyA == KeyA()) && (keyB == KeyB()); }
+ virtual size_t KeyA()
+ { LIMITED_METHOD_CONTRACT; return pElem != NULL ? pElem->token : 0; }
+ virtual size_t KeyB()
+ { LIMITED_METHOD_CONTRACT; return pElem != NULL ? (size_t) pElem->pMT : 0; }
+
+ //contents is the struct or token that the entry exposes
+ virtual void SetContents(size_t contents)
+ {
+ LIMITED_METHOD_CONTRACT;
+ pElem = (ResolveCacheElem*) contents;
+ }
+
+ inline const BYTE *Target()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return pElem != NULL ? (const BYTE *)pElem->target : NULL;
+ }
+
+private:
+ ResolveCacheElem *pElem;
+};
+
+/**********************************************************************************************
+ResolveEntry wraps ResolveStubs and provide the concrete implementation of the abstract class Entry.
+Polymorphic call sites and monomorphic calls that fail end up in a ResolveStub. Resolve stubs
+are stored in hash tables keyed by token, hence the Equals function uses the embedded token in
+the stub for comparison purposes. Since we are willing to allow duplicates in the hash table (as
+long as they are relatively rare) we do use direct comparison of the tokens rather than extracting
+the fields from within the tokens, for perf reasons. */
+class ResolveEntry : public Entry
+{
+public:
+ //Creates an entry that wraps resolve stub s
+ ResolveEntry (size_t s)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(VirtualCallStubManager::isResolvingStubStatic((PCODE)s));
+ stub = (ResolveStub*) s;
+ }
+ //default contructor to allow stack and inline allocation of resovler entries
+ ResolveEntry() { LIMITED_METHOD_CONTRACT; stub = CALL_STUB_EMPTY_ENTRY; }
+
+ //implementations of abstract class Entry
+ inline BOOL Equals(size_t keyA, size_t keyB)
+ { WRAPPER_NO_CONTRACT; return stub && (keyA == KeyA()) && (keyB == KeyB()); }
+ inline size_t KeyA() { WRAPPER_NO_CONTRACT; return Token(); }
+ inline size_t KeyB() { WRAPPER_NO_CONTRACT; return (size_t)0; }
+
+ void SetContents(size_t contents)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(VirtualCallStubManager::isResolvingStubStatic((PCODE)contents));
+ stub = ResolveHolder::FromResolveEntry((PCODE)contents)->stub();
+ }
+ //extract the token of the underlying resolve stub
+ inline size_t Token() { WRAPPER_NO_CONTRACT; return stub ? (size_t)(stub->token()) : 0; }
+private:
+ ResolveStub* stub; //the stub the entry is wrapping
+};
+
+/**********************************************************************************************
+DispatchEntry wraps DispatchStubs and provide the concrete implementation of the abstract class Entry.
+Monomorphic and mostly monomorphic call sites eventually point to DispatchStubs. Dispatch stubs
+are placed in hash and cache tables keyed by the expected Method Table and token they are built for.
+Since we are willing to allow duplicates in the hash table (as long as they are relatively rare)
+we do use direct comparison of the tokens rather than extracting the fields from within the tokens,
+for perf reasons.*/
+class DispatchEntry : public Entry
+{
+public:
+ //Creates an entry that wraps dispatch stub s
+ DispatchEntry (size_t s)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(VirtualCallStubManager::isDispatchingStubStatic((PCODE)s));
+ stub = (DispatchStub*) s;
+ }
+ //default contructor to allow stack and inline allocation of resovler entries
+ DispatchEntry() { LIMITED_METHOD_CONTRACT; stub = CALL_STUB_EMPTY_ENTRY; }
+
+ //implementations of abstract class Entry
+ inline BOOL Equals(size_t keyA, size_t keyB)
+ { WRAPPER_NO_CONTRACT; return stub && (keyA == KeyA()) && (keyB == KeyB()); }
+ inline size_t KeyA() { WRAPPER_NO_CONTRACT; return Token(); }
+ inline size_t KeyB() { WRAPPER_NO_CONTRACT; return ExpectedMT();}
+
+ void SetContents(size_t contents)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(VirtualCallStubManager::isDispatchingStubStatic((PCODE)contents));
+ stub = DispatchHolder::FromDispatchEntry((PCODE)contents)->stub();
+ }
+
+ //extract the fields of the underlying dispatch stub
+ inline size_t ExpectedMT()
+ { WRAPPER_NO_CONTRACT; return stub ? (size_t)(stub->expectedMT()) : 0; }
+
+ size_t Token()
+ {
+ WRAPPER_NO_CONTRACT;
+ if (stub)
+ {
+ ResolveHolder * resolveHolder = ResolveHolder::FromFailEntry(stub->failTarget());
+ size_t token = resolveHolder->stub()->token();
+ _ASSERTE(token == VirtualCallStubManager::GetTokenFromStub((PCODE)stub));
+ return token;
+ }
+ else
+ {
+ return 0;
+ }
+ }
+
+ inline PCODE Target()
+ { WRAPPER_NO_CONTRACT; return stub ? stub->implTarget() : 0; }
+
+private:
+ DispatchStub* stub;
+};
+
+/*************************************************************************************************
+DispatchCache is the cache table that the resolve stubs use for inline polymorphic resolution
+of a call. The cache entry is logically a triplet of (method table, token, impl address) where method table
+is the type of the calling frame's <this>, token identifies the method being invoked,
+i.e. is a (type id,slot #) pair, and impl address is the address of the method implementation.
+*/
+class DispatchCache
+{
+public:
+ static const UINT16 INVALID_HASH = (UINT16)(-1);
+
+ DispatchCache();
+
+ //read and write the cache keyed by (method table,token) pair.
+ inline ResolveCacheElem* Lookup(size_t token, void* mt)
+ { WRAPPER_NO_CONTRACT; return Lookup(token, INVALID_HASH, mt);}
+
+ ResolveCacheElem* Lookup(size_t token, UINT16 tokenHash, void* mt);
+
+ enum InsertKind {IK_NONE, IK_DISPATCH, IK_RESOLVE, IK_SHARED, IK_EXTERNAL};
+
+ BOOL Insert(ResolveCacheElem* elem, InsertKind insertKind);
+#ifdef CHAIN_LOOKUP
+ void PromoteChainEntry(ResolveCacheElem* elem);
+#endif
+
+ // This is the heavyweight hashing algorithm. Use sparingly.
+ static UINT16 HashToken(size_t token);
+
+ inline void GetLoadFactor(size_t *total, size_t *used)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ *total = CALL_STUB_CACHE_SIZE;
+ size_t count = 0;
+ for (size_t i = 0; i < CALL_STUB_CACHE_SIZE; i++)
+ if (cache[i] != empty)
+ count++;
+ *used = count;
+ }
+
+ inline void *GetCacheBaseAddr()
+ { LIMITED_METHOD_CONTRACT; return &cache[0]; }
+ inline size_t GetCacheCount()
+ { LIMITED_METHOD_CONTRACT; return CALL_STUB_CACHE_SIZE; }
+ inline ResolveCacheElem *GetCacheEntry(size_t idx)
+ { LIMITED_METHOD_CONTRACT; return VolatileLoad(&cache[idx]); }
+ inline BOOL IsCacheEntryEmpty(size_t idx)
+ { LIMITED_METHOD_CONTRACT; return cache[idx] == empty; }
+
+ inline void SetCacheEntry(size_t idx, ResolveCacheElem *elem)
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifdef STUB_LOGGING
+ cacheData[idx].numWrites++;
+#endif
+#ifdef CHAIN_LOOKUP
+ CONSISTENCY_CHECK(m_writeLock.OwnedByCurrentThread());
+#endif
+ cache[idx] = elem;
+ }
+
+ inline void ClearCacheEntry(size_t idx)
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifdef STUB_LOGGING
+ cacheData[idx].numClears++;
+#endif
+ cache[idx] = empty;
+ }
+
+ struct
+ {
+ UINT32 insert_cache_external; //# of times Insert was called for IK_EXTERNAL
+ UINT32 insert_cache_shared; //# of times Insert was called for IK_SHARED
+ UINT32 insert_cache_dispatch; //# of times Insert was called for IK_DISPATCH
+ UINT32 insert_cache_resolve; //# of times Insert was called for IK_RESOLVE
+ UINT32 insert_cache_hit; //# of times Insert found an empty cache entry
+ UINT32 insert_cache_miss; //# of times Insert already had a matching cache entry
+ UINT32 insert_cache_collide; //# of times Insert found a used cache entry
+ UINT32 insert_cache_write; //# of times Insert wrote a cache entry
+ } stats;
+
+ void LogStats();
+
+ // Unlocked iterator of entries. Use only when read/write access to the cache
+ // is safe. This would typically be at GC sync points, currently needed during
+ // appdomain unloading.
+ class Iterator
+ {
+ public:
+ Iterator(DispatchCache *pCache);
+ inline BOOL IsValid()
+ { WRAPPER_NO_CONTRACT; return (m_curBucket < (INT32)m_pCache->GetCacheCount()); }
+ void Next();
+ // Unlink the current entry.
+ // **NOTE** Using this method implicitly performs a call to Next to move
+ // past the unlinked entry. Thus, one could accidentally skip
+ // entries unless you take this into consideration.
+ ResolveCacheElem *UnlinkEntry();
+ inline ResolveCacheElem *Entry()
+ { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(IsValid()); return *m_ppCurElem; }
+
+ private:
+ void NextValidBucket();
+ inline void NextBucket()
+ { LIMITED_METHOD_CONTRACT; m_curBucket++; m_ppCurElem = &m_pCache->cache[m_curBucket]; }
+
+ DispatchCache *m_pCache;
+ INT32 m_curBucket;
+ ResolveCacheElem **m_ppCurElem;
+ };
+
+private:
+#ifdef CHAIN_LOOKUP
+ Crst m_writeLock;
+#endif
+
+ //the following hash computation is also inlined in the resolve stub in asm (SO NO TOUCHIE)
+ inline static UINT16 HashMT(UINT16 tokenHash, void* mt)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ UINT16 hash;
+
+ size_t mtHash = (size_t) mt;
+ mtHash = (((mtHash >> CALL_STUB_CACHE_NUM_BITS) + mtHash) >> LOG2_PTRSIZE) & CALL_STUB_CACHE_MASK;
+ hash = (UINT16) mtHash;
+
+ hash ^= (tokenHash & CALL_STUB_CACHE_MASK);
+
+ return hash;
+ }
+
+ ResolveCacheElem* cache[CALL_STUB_CACHE_SIZE]; //must be first
+ ResolveCacheElem* empty; //empty entry, initialized to fail all comparisons
+#ifdef STUB_LOGGING
+public:
+ struct CacheEntryData {
+ UINT32 numWrites;
+ UINT16 numClears;
+ };
+ CacheEntryData cacheData[CALL_STUB_CACHE_SIZE];
+#endif // STUB_LOGGING
+};
+
+/**************************************************************************************************
+The hash tables are accessed via instances of the Prober. Prober is a probe into a bucket
+of the hash table, and therefore has an index which is the current probe position.
+It includes a count of the number of probes done in that bucket so far and a stride
+to step thru the bucket with. To do comparisons, it has a reference to an entry with which
+it can do comparisons (Equals(...)) of the entries (stubs) inside the hash table. It also has
+the key pair (keyA, keyB) that it is looking for.
+
+Typically, an entry of the appropriate type is created on the stack and then the prober is created passing
+in a reference to the entry. The prober is used for a complete operation, such as look for and find an
+entry (stub), creating and inserting it as necessary.
+
+The initial index and the stride are orthogonal hashes of the key pair, i.e. we are doing a varient of
+double hashing. When we initialize the prober (see FormHash below) we set the initial probe based on
+one hash. The stride (used as a modulo addition of the probe position) is based on a different hash and
+is such that it will vist every location in the bucket before repeating. Hence it is imperative that
+the bucket size and the stride be relative prime wrt each other. We have chosen to make bucket sizes
+a power of 2, so we force stride to be odd.
+
+Note -- it must be assumed that multiple probers are walking the same tables and buckets at the same time.
+Additionally, the counts may not be accurate, and there may be duplicates in the tables. Since the tables
+do not allow concurrrent deletion, some of the concurrency issues are ameliorated.
+*/
+class Prober
+{
+ friend class FastTable;
+ friend class BucketTable;
+public:
+ Prober(Entry* e) {LIMITED_METHOD_CONTRACT; comparer = e;}
+ //find the requested entry, if not there return CALL_STUB_EMPTY_ENTRY
+ size_t Find();
+ //add the entry into the bucket, if it is not already in the bucket.
+ //return the entry actually in the bucket (existing or added)
+ size_t Add(size_t entry);
+private:
+ //return the bucket (FastTable*) that the prober is currently walking
+ inline size_t* items() {LIMITED_METHOD_CONTRACT; return &base[-CALL_STUB_FIRST_INDEX];}
+ //are there more probes possible, or have we probed everything in the bucket
+ inline BOOL NoMore() {LIMITED_METHOD_CONTRACT; return probes>mask;} //both probes and mask are (-1)
+ //advance the probe to a new place in the bucket
+ inline BOOL Next()
+ {
+ WRAPPER_NO_CONTRACT;
+ index = (index + stride) & mask;
+ probes++;
+ return !NoMore();
+ }
+ inline size_t Read()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(base);
+ return VolatileLoad(&base[index]);
+ }
+ //initialize a prober across a bucket (table) for the specified keys.
+ void InitProber(size_t key1, size_t key2, size_t* table);
+ //set up the initial index and stride and probe count
+ inline void FormHash()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ probes = 0;
+ //these two hash functions have not been formally measured for effectiveness
+ //but they are at least orthogonal
+
+ size_t a = ((keyA>>16) + keyA);
+ size_t b = ((keyB>>16) ^ keyB);
+ index = (((a*CALL_STUB_HASH_CONST1)>>4)+((b*CALL_STUB_HASH_CONST2)>>4)+CALL_STUB_HASH_CONST1) & mask;
+ stride = ((a+(b*CALL_STUB_HASH_CONST1)+CALL_STUB_HASH_CONST2) | 1) & mask;
+ }
+ //atomically grab an empty slot so we can insert a new entry into the bucket
+ BOOL GrabEntry(size_t entryValue);
+ size_t keyA; //key pair we are looking for
+ size_t keyB;
+ size_t* base; //we have our own pointer to the bucket, so races don't matter.
+ // We won't care if we do the lookup in an
+ // outdated bucket (has grown out from under us).
+ // All that will happen is possibly dropping an entry
+ // on the floor or adding a duplicate.
+ size_t index; //current probe point in the bucket
+ size_t stride; //amount to step on each successive probe, must be relatively prime wrt the bucket size
+ size_t mask; //size of bucket - 1
+ size_t probes; //number probes - 1
+ Entry* comparer;//used to compare an entry against the sought after key pair
+};
+
+/********************************************************************************************************
+FastTable is used to implement the buckets of a BucketTable, a bucketized hash table. A FastTable is
+an array of entries (contents). The first two slots of contents store the size-1 and count of entries
+actually in the FastTable. Note that the count may be inaccurate and there may be duplicates. Careful
+attention must be paid to eliminate the need for interlocked or serialized or locked operations in face
+of concurrency.
+*/
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable : 4200) // disable zero-sized array warning
+#endif // _MSC_VER
+class FastTable
+{
+ friend class BucketTable;
+public:
+private:
+ FastTable() { LIMITED_METHOD_CONTRACT; }
+ ~FastTable() { LIMITED_METHOD_CONTRACT; }
+
+ //initialize a prober for the specified keys.
+ inline BOOL SetUpProber(size_t keyA, size_t keyB, Prober* probe)
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ } CONTRACTL_END;
+
+ _ASSERTE(probe);
+ _ASSERTE(contents);
+ probe->InitProber(keyA, keyB, &contents[0]);
+ return TRUE;
+ }
+ //find the requested entry (keys of prober), if not there return CALL_STUB_EMPTY_ENTRY
+ size_t Find(Prober* probe);
+ //add the entry, if it is not already there. Probe is used to search.
+ //Return the entry actually containted (existing or added)
+ size_t Add(size_t entry, Prober* probe);
+ void IncrementCount();
+
+ // Create a FastTable with space for numberOfEntries. Please note that this method
+ // does not throw on OOM. **YOU MUST CHECK FOR NULL RETURN**
+ static FastTable* MakeTable(size_t numberOfEntries)
+ {
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ INJECT_FAULT(COMPlusThrowOM(););
+ } CONTRACTL_END;
+
+ size_t size = CALL_STUB_MIN_ENTRIES;
+ while (size < numberOfEntries) {size = size<<1;}
+// if (size == CALL_STUB_MIN_ENTRIES)
+// size += 3;
+ size_t* bucket = new size_t[(sizeof(FastTable)/sizeof(size_t))+size+CALL_STUB_FIRST_INDEX];
+ FastTable* table = new (bucket) FastTable();
+ table->InitializeContents(size);
+ return table;
+ }
+ //Initialize as empty
+ void InitializeContents(size_t size)
+ {
+ LIMITED_METHOD_CONTRACT;
+ memset(&contents[0], CALL_STUB_EMPTY_ENTRY, (size+CALL_STUB_FIRST_INDEX)*sizeof(BYTE*));
+ contents[CALL_STUB_MASK_INDEX] = size-1;
+ }
+ inline size_t tableMask() {LIMITED_METHOD_CONTRACT; return (size_t) (contents[CALL_STUB_MASK_INDEX]);}
+ inline size_t tableSize() {LIMITED_METHOD_CONTRACT; return tableMask()+1;}
+ inline size_t tableCount() {LIMITED_METHOD_CONTRACT; return (size_t) (contents[CALL_STUB_COUNT_INDEX]);}
+ inline BOOL isFull()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (tableCount()+1) * 100 / CALL_STUB_LOAD_FACTOR >= tableSize();
+ }
+ //we store (size-1) in bucket[CALL_STUB_MASK_INDEX==0],
+ //we store the used count in bucket[CALL_STUB_COUNT_INDEX==1],
+ //we have an unused cell to use as a temp at bucket[CALL_STUB_DEAD_LINK==2],
+ //and the table starts at bucket[CALL_STUB_FIRST_INDEX==3],
+ size_t contents[];
+};
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+/******************************************************************************************************
+BucketTable is a bucketized hash table. It uses FastTables for its buckets. The hash tables
+used by the VirtualCallStubManager are BucketTables. The number of buckets is fixed at the time
+the table is created. The actual buckets are allocated as needed, and grow as necessary. The reason
+for using buckets is primarily to reduce the cost of growing, since only a single bucket is actually
+grown at any given time. Since the hash tables are accessed infrequently, the load factor that
+controls growth is quite high (90%). Since we use hashing to pick the bucket, and we use hashing to
+lookup inside the bucket, it is important that the hashing function used here is orthogonal to the ones
+used in the buckets themselves (see FastTable::FormHash).
+*/
+class BucketTable
+{
+public:
+ BucketTable(size_t numberOfBuckets)
+ {
+ WRAPPER_NO_CONTRACT;
+ size_t size = CALL_STUB_MIN_BUCKETS;
+ while (size < numberOfBuckets) {size = size<<1;}
+ buckets = AllocateBuckets(size);
+ // Initialize statistics counters
+ memset(&stats, 0, sizeof(stats));
+ }
+
+ ~BucketTable()
+ {
+ LIMITED_METHOD_CONTRACT;
+ if(buckets != NULL)
+ {
+ size_t size = bucketCount()+CALL_STUB_FIRST_INDEX;
+ for(size_t ix = CALL_STUB_FIRST_INDEX; ix < size; ix++) delete (FastTable*)(buckets[ix]);
+ delete buckets;
+ }
+ }
+
+ //initialize a prober for the specified keys.
+ BOOL SetUpProber(size_t keyA, size_t keyB, Prober *prober);
+ //find the requested entry (keys of prober), if not there return CALL_STUB_EMPTY_ENTRY
+ inline size_t Find(Prober* probe) {WRAPPER_NO_CONTRACT; return probe->Find();}
+ //add the entry, if it is not already there. Probe is used to search.
+ size_t Add(size_t entry, Prober* probe);
+ //reclaim abandoned buckets. Buckets are abaondoned when they need to grow.
+ //needs to be called inside a gc sync point.
+ static void Reclaim();
+
+ struct
+ {
+ UINT32 bucket_space; //# of bytes in caches and tables, not including the stubs themselves
+ UINT32 bucket_space_dead; //# of bytes of abandoned buckets not yet recycled.
+ } stats;
+
+ void LogStats();
+
+private:
+ inline size_t bucketMask() {LIMITED_METHOD_CONTRACT; return (size_t) (buckets[CALL_STUB_MASK_INDEX]);}
+ inline size_t bucketCount() {LIMITED_METHOD_CONTRACT; return bucketMask()+1;}
+ inline size_t ComputeBucketIndex(size_t keyA, size_t keyB)
+ {
+ LIMITED_METHOD_CONTRACT;
+ size_t a = ((keyA>>16) + keyA);
+ size_t b = ((keyB>>16) ^ keyB);
+ return CALL_STUB_FIRST_INDEX+(((((a*CALL_STUB_HASH_CONST2)>>5)^((b*CALL_STUB_HASH_CONST1)>>5))+CALL_STUB_HASH_CONST2) & bucketMask());
+ }
+ //grows the bucket referenced by probe.
+ BOOL GetMoreSpace(const Prober* probe);
+ //creates storage in which to store references to the buckets
+ static size_t* AllocateBuckets(size_t size)
+ {
+ LIMITED_METHOD_CONTRACT;
+ size_t* buckets = new size_t[size+CALL_STUB_FIRST_INDEX];
+ if (buckets != NULL)
+ {
+ memset(&buckets[0], CALL_STUB_EMPTY_ENTRY, (size+CALL_STUB_FIRST_INDEX)*sizeof(void*));
+ buckets[CALL_STUB_MASK_INDEX] = size-1;
+ }
+ return buckets;
+ }
+ inline size_t Read(size_t index)
+ {
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(index <= bucketMask()+CALL_STUB_FIRST_INDEX);
+ return VolatileLoad(&buckets[index]);
+ }
+
+#ifdef _MSC_VER
+#pragma warning(disable: 4267) //work-around for the compiler
+#endif
+ inline void Write(size_t index, size_t value)
+ {
+ LIMITED_METHOD_CONTRACT;
+ CONSISTENCY_CHECK(index <= bucketMask()+CALL_STUB_FIRST_INDEX);
+ VolatileStore(&buckets[index], value);
+ }
+#ifdef _MSC_VER
+#pragma warning(default: 4267)
+#endif
+
+ // We store (#buckets-1) in bucket[CALL_STUB_MASK_INDEX ==0]
+ // We have two unused cells at bucket[CALL_STUB_COUNT_INDEX ==1]
+ // and bucket[CALL_STUB_DEAD_LINK ==2]
+ // and the table starts at bucket[CALL_STUB_FIRST_INDEX ==3]
+ // the number of elements is bucket[CALL_STUB_MASK_INDEX]+CALL_STUB_FIRST_INDEX
+ size_t* buckets;
+ static FastTable* dead; //linked list head of to be deleted (abandoned) buckets
+};
+
+#endif // !_VIRTUAL_CALL_STUB_H
+
diff --git a/src/vm/vm.settings b/src/vm/vm.settings
new file mode 100644
index 0000000000..5e7ac72cbe
--- /dev/null
+++ b/src/vm/vm.settings
@@ -0,0 +1,71 @@
+<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+
+ <!--Import the settings-->
+ <Import Project="$(_NTDRIVE)$(_NTROOT)\ndp\clr\clr.props" />
+ <Import Project="$(_NTDRIVE)$(_NTROOT)\ndp\clr\src\debug\SetDebugTargetLocal.props" />
+
+ <PropertyGroup>
+
+ <ClWarningLevel Condition="'$(BuildArchitecture)'=='i386'">4</ClWarningLevel>
+
+ <UserIncludes>
+ $(ClrSrcDirectory)\vm;
+ $(ClrSrcDirectory)\vm\$(TargetCpu);
+ $(IntermediateOutputDirectory);
+ $(ClrSrcDirectory)\ceegen\inc;
+ $(ClrSrcDirectory)\classlibnative\inc;
+ $(ClrSrcDirectory)\md\inc;
+ $(ClrSrcDirectory)\xmlparser;
+ $(ClrSrcDirectory)\fusion\inc;
+ $(ClrSrcDirectory)\classlibnative\bcltype;
+ $(ClrSrcDirectory)\classlibnative\cryptography;
+ $(ClrSrcDirectory)\classlibnative\remoting;
+ $(VCToolsIncPath);
+ $(UserIncludes);
+ $(ClrSrcDirectory)\debug\inc\$(TargetCpu);
+ $(ClrSrcDirectory)\debug\inc\dump;
+ $(ClrSrcDirectory)\zap;
+ $(ClrSrcDirectory)\strongname\inc;
+ $(ClrSrcDirectory)\gc
+ </UserIncludes>
+
+ <UserAssemble386IncludePath>$(UserIncludes)</UserAssemble386IncludePath>
+ <UserAssembleAmd64IncludePath>$(UserIncludes)</UserAssembleAmd64IncludePath>
+ <UserAssembleArmIncludePath>$(UserIncludes)</UserAssembleArmIncludePath>
+
+ <OutputPath>$(ClrLibDest)</OutputPath>
+ <TargetType>LIBRARY</TargetType>
+
+ <ClDefines>$(ClDefines);UNICODE;_UNICODE</ClDefines>
+
+ <ClDefines Condition="'$(DebugBuild)' == 'true'">$(ClDefines);WRITE_BARRIER_CHECK=1;</ClDefines>
+ <ClDefines>$(ClDefines);FEATURE_LEAVE_RUNTIME_HOLDER=1;</ClDefines>
+
+ <AsmDefines Condition="'$(DebugBuild)' == 'true'">WRITE_BARRIER_CHECK=1</AsmDefines>
+
+ <ClAdditionalOptions Condition="'$(LibLinkTimeCodeGeneration)' == 'true'">$(ClAdditionalOptions) -wd4702</ClAdditionalOptions>
+ <ClWarningsDisabledUnderPogoAndLtcg>$(ClWarningsDisabledUnderPogoAndLtcg);4702</ClWarningsDisabledUnderPogoAndLtcg>
+ <LinkWarningsDisabledUnderPogo>$(LinkWarningsDisabledUnderPogo);4702</LinkWarningsDisabledUnderPogo>
+
+ <ClDefines Condition="'$(PerfcountersSupportedBuild)' == 'true'">$(ClDefines);ENABLE_PERF_COUNTERS;</ClDefines>
+ <PCHHeader>common.h</PCHHeader>
+ <EnableCxxPCHHeaders>true</EnableCxxPCHHeaders>
+ <PCHCompile>$(ClrSrcDirectory)\vm\common.cpp</PCHCompile>
+
+ <VmSourcesDir>$(ClrSrcDirectory)\vm</VmSourcesDir>
+ <Amd64SourcesDir>$(ClrSrcDirectory)\vm\amd64</Amd64SourcesDir>
+ <I386SourcesDir>$(ClrSrcDirectory)\vm\i386</I386SourcesDir>
+ <ArmSourcesDir>$(ClrSrcDirectory)\vm\arm</ArmSourcesDir>
+ <Arm64SourcesDir>$(ClrSrcDirectory)\vm\arm64</Arm64SourcesDir>
+
+ <AsmDefines>$(AsmDefines);$(CDefines)</AsmDefines>
+ <Assemble386MasmCompatible>true</Assemble386MasmCompatible>
+ </PropertyGroup>
+
+
+ <ItemGroup>
+ <ProjectReference Include="$(ClrSrcDirectory)inc\corguids.nativeproj" />
+ <ProjectReference Include="$(ClrSrcDirectory)dlls\shim\mscoreei.nativeproj" Condition="'$(FeatureCoreclr)' != 'true'"/> <!-- For eventmsg.h -->
+ </ItemGroup>
+
+</Project>
diff --git a/src/vm/vm.targets b/src/vm/vm.targets
new file mode 100644
index 0000000000..ccef8079a7
--- /dev/null
+++ b/src/vm/vm.targets
@@ -0,0 +1,59 @@
+<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+
+ <Import Project="$(Clrbase)\clr.targets" />
+
+ <PropertyGroup>
+ <Assemble386AssemblerDependsOn>
+ $(Assemble386AssemblerDependsOn);
+ Generate__asmconstants_inc
+ </Assemble386AssemblerDependsOn>
+ <AssembleAmd64AssemblerDependsOn>
+ $(AssembleAmd64AssemblerDependsOn);
+ Generate__asmconstants_inc
+ </AssembleAmd64AssemblerDependsOn>
+ <AssembleArmAssemblerDependsOn>
+ $(AssembleArmAssemblerDependsOn);
+ Generate__asmconstants_inc
+ </AssembleArmAssemblerDependsOn>
+ </PropertyGroup>
+
+ <ItemGroup>
+ <Clean Condition="'$(AsmSuffix)' == 'asm'" Include="$(IntermediateOutputDirectory)\asmconstants.inc" />
+ </ItemGroup>
+
+
+ <Target Name="Generate__asmconstants_inc" Condition="'$(AsmSuffix)' == 'asm'" Inputs="$(Clrbase)\src\vm\$(BuildArchitecture)\asmconstants.h" Outputs="$(IntermediateOutputDirectory)\asmconstants.inc">
+ <Exec Command="$(PerlCommand) $(Clrbase)\src\vm\h2inc.pl $(Clrbase)\src\vm\$(BuildArchitecture)\asmconstants.h &gt; $(IntermediateOutputDirectory)\asmconstants.tmp" StandardOutputImportance="Normal" />
+ <CL
+ TrackerLogDirectory="$(IntermediateOutputDirectory)"
+ PreprocessorDefinitions="$(CDefines);$(AsmDefines);$(TargetDefines)"
+ PreprocessSuppressLineNumbers="true"
+ PreprocessToFile="true"
+ PreprocessOutputPath="$(IntermediateOutputDirectory)\"
+ Sources="$(IntermediateOutputDirectory)\asmconstants.tmp"
+ TLogReadFiles="@(ClTLogReadFile)"
+ TLogWriteFiles="@(ClTLogWriteFile)"
+ ToolExe="$(ClToolExe)"
+ ToolPath="$(ClToolPath)"
+ TrackFileAccess="$(TrackFileAccess)"
+ >
+ </CL>
+
+ <ItemGroup>
+ <Internal_AsmConstantsGenerated Include="$(IntermediateOutputDirectory)\asmconstants.tmp" Condition="Exists('$(IntermediateOutputDirectory)\asmconstants.tmp')"/>
+ <AsmConstantsGenerated Include="$(IntermediateOutputDirectory)\asmconstants.i" />
+ </ItemGroup>
+
+ <Copy
+ SourceFiles="@(AsmConstantsGenerated)"
+ DestinationFiles="@(AsmConstantsGenerated->'$(IntermediateOutputDirectory)\%(Filename).inc')"
+ OverwriteReadOnlyFiles="$(OverwriteReadOnlyFiles)"
+ Retries="$(CopyRetryCount)"
+ RetryDelayMilliseconds="$(CopyRetryDelayMilliseconds)"
+ Condition="'@(Internal_AsmConstantsGenerated)' != ''"
+ />
+
+ </Target>
+
+</Project>
+
diff --git a/src/vm/vmholder.h b/src/vm/vmholder.h
new file mode 100644
index 0000000000..59f94c8fb1
--- /dev/null
+++ b/src/vm/vmholder.h
@@ -0,0 +1,27 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+
+#ifndef __VMHOLDER_H_
+#define __VMHOLDER_H_
+
+#include "holder.h"
+
+template <typename TYPE>
+inline void DoTheReleaseHost(TYPE *value)
+{
+ if (value)
+ {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ value->Release();
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+
+ }
+}
+
+NEW_WRAPPER_TEMPLATE1(HostComHolder, DoTheReleaseHost<_TYPE>);
+
+#endif
diff --git a/src/vm/weakreferencenative.cpp b/src/vm/weakreferencenative.cpp
new file mode 100644
index 0000000000..1d6a5ba808
--- /dev/null
+++ b/src/vm/weakreferencenative.cpp
@@ -0,0 +1,982 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Implementation: WeakReferenceNative.cpp
+**
+**
+===========================================================*/
+
+#include "common.h"
+
+#include "weakreferencenative.h"
+#include "handletablepriv.h"
+
+//************************************************************************
+
+// We use several special values of the handle to track extra state without increasing the instance size.
+const LPVOID specialWeakReferenceHandles[3] = { 0, 0, 0 };
+
+// SPECIAL_HANDLE_SPINLOCK is used to implement spinlock that protects against races between setting the target and finalization
+#define SPECIAL_HANDLE_SPINLOCK ((OBJECTHANDLE)(&specialWeakReferenceHandles[0]))
+
+// SPECIAL_HANDLE_FINALIZED is used to track the original type of the handle so that IsTrackResurrection keeps working on finalized
+// objects for backward compatibility.
+#define SPECIAL_HANDLE_FINALIZED_SHORT ((OBJECTHANDLE)(&specialWeakReferenceHandles[1]))
+#define SPECIAL_HANDLE_FINALIZED_LONG ((OBJECTHANDLE)(&specialWeakReferenceHandles[2]))
+
+#define IS_SPECIAL_HANDLE(h) ((size_t)(h) - (size_t)(&specialWeakReferenceHandles) < sizeof(specialWeakReferenceHandles))
+
+//
+// A WeakReference instance can hold one of three types of handles - short or long weak handles,
+// or a WinRT weak reference handle. The WinRT weak reference handle has the extra capability
+// of recreating an RCW for a COM object which is still alive even though the previous RCW had
+// been collected. In order to differentiate this type of handle from the standard weak handles,
+// the bottom bit is stolen.
+//
+// Note that the bit is stolen only in the local copy of the object handle, held in the m_handle
+// field of the weak reference object. The handle in the handle table itself does not have its
+// bottom bit stolen, and requires using HandleFetchType to determine what type it is. The bit
+// is strictly a performance optimization for the weak reference implementation, and it is
+// responsible for setting up the bit as it needs and ensuring that it is cleared whenever an
+// object handle leaves the weak reference code, for instance to interact with the handle table
+// or diagnostics tools.
+//
+// The following functions are to set, test, and unset that bit before the handle is used.
+//
+
+// Determine if an object handle is a WinRT weak reference handle
+bool IsWinRTWeakReferenceHandle(OBJECTHANDLE handle)
+{
+ STATIC_CONTRACT_LEAF;
+ return (reinterpret_cast<UINT_PTR>(handle) & 0x1) != 0x0;
+}
+
+// Mark an object handle as being a WinRT weak reference handle
+OBJECTHANDLE SetWinRTWeakReferenceHandle(OBJECTHANDLE handle)
+{
+ STATIC_CONTRACT_LEAF;
+
+ _ASSERTE(!IsWinRTWeakReferenceHandle(handle));
+ return reinterpret_cast<OBJECTHANDLE>(reinterpret_cast<UINT_PTR>(handle) | 0x1);
+}
+
+// Get the object handle value even if the object is a WinRT weak reference
+OBJECTHANDLE GetHandleValue(OBJECTHANDLE handle)
+{
+ STATIC_CONTRACT_LEAF;
+ UINT_PTR mask = ~(static_cast<UINT_PTR>(0x1));
+ return reinterpret_cast<OBJECTHANDLE>(reinterpret_cast<UINT_PTR>(handle) & mask);
+}
+
+FORCEINLINE OBJECTHANDLE AcquireWeakHandleSpinLock(WEAKREFERENCEREF pThis);
+FORCEINLINE void ReleaseWeakHandleSpinLock(WEAKREFERENCEREF pThis, OBJECTHANDLE newHandle);
+
+struct WeakHandleSpinLockHolder
+{
+ OBJECTHANDLE RawHandle;
+ OBJECTHANDLE Handle;
+ WEAKREFERENCEREF* pWeakReference;
+
+ WeakHandleSpinLockHolder(OBJECTHANDLE rawHandle, WEAKREFERENCEREF* weakReference)
+ : RawHandle(rawHandle), Handle(GetHandleValue(rawHandle)), pWeakReference(weakReference)
+ {
+ STATIC_CONTRACT_LEAF;
+ }
+
+ ~WeakHandleSpinLockHolder()
+ {
+ WRAPPER_NO_CONTRACT;
+ ReleaseWeakHandleSpinLock(*pWeakReference, RawHandle);
+ }
+
+private:
+ WeakHandleSpinLockHolder(const WeakHandleSpinLockHolder& other);
+ WeakHandleSpinLockHolder& operator=(const WeakHandleSpinLockHolder& other);
+};
+
+#ifdef FEATURE_COMINTEROP
+
+// Get a WinRT weak reference for the object underlying an RCW if applicable. If the incoming object cannot
+// use a WinRT weak reference, nullptr is returned. Otherwise, an AddRef-ed IWeakReference* for the COM
+// object underlying the RCW is returned.
+//
+// In order to qualify to be used with a HNDTYPE_WEAK_WINRT, the incoming object must:
+// * be an RCW
+// * respond to a QI for IWeakReferenceSource
+// * succeed when asked for an IWeakReference*
+//
+// Note that *pObject should be GC protected on the way into this method
+IWeakReference* GetWinRTWeakReference(OBJECTREF* pObject)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ PRECONDITION(CheckPointer(pObject));
+ }
+ CONTRACTL_END;
+
+ if (*pObject == NULL)
+ {
+ return nullptr;
+ }
+
+ ASSERT_PROTECTED(pObject);
+
+ MethodTable* pMT = (*pObject)->GetMethodTable();
+
+ // If the object is not an RCW, then we do not want to use a WinRT weak reference to it
+ if (!pMT->IsComObjectType())
+ {
+ return nullptr;
+ }
+
+ // If the object is a managed type deriving from a COM type, then we also do not want to use a WinRT
+ // weak reference to it. (Otherwise, we'll wind up resolving IWeakReference-s back into the CLR
+ // when we don't want to have reentrancy).
+ if (pMT != g_pBaseCOMObject && pMT->IsExtensibleRCW())
+ {
+ return nullptr;
+ }
+
+ SafeComHolder<IWeakReferenceSource> pWeakReferenceSource(reinterpret_cast<IWeakReferenceSource*>(GetComIPFromObjectRef(pObject, IID_IWeakReferenceSource, false /* throwIfNoComIP */)));
+ if (pWeakReferenceSource == nullptr)
+ {
+ return nullptr;
+ }
+
+ GCX_PREEMP();
+ SafeComHolderPreemp<IWeakReference> pWeakReference;
+ if (FAILED(pWeakReferenceSource->GetWeakReference(&pWeakReference)))
+ {
+ return nullptr;
+ }
+
+ return pWeakReference.Extract();
+}
+
+// Given an object handle that stores a WinRT weak reference, attempt to create an RCW
+// and store it back in the handle, returning the RCW. If the underlying WinRT object
+// is not alive, then the result is NULL.
+//
+// In order to create a new RCW, we must:
+// * Have an m_handle of HNDTYPE_WEAK_WINRT (ie the bottom bit of m_handle is set)
+// * Have stored an IWeakReference* in the handle extra info when setting up the handle
+// (see GetWinRTWeakReference)
+// * The IWeakReference* must respond to a Resolve request for IID_IInspectable
+// *
+NOINLINE Object* LoadWinRTWeakReferenceTarget(WEAKREFERENCEREF weakReference, TypeHandle targetType, LPVOID __me)
+{
+ CONTRACTL
+ {
+ FCALL_CHECK;
+ PRECONDITION(weakReference != NULL);
+ }
+ CONTRACTL_END;
+
+ struct
+ {
+ WEAKREFERENCEREF weakReference;
+ OBJECTREF rcw;
+ OBJECTREF target;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ gc.weakReference = weakReference;
+
+ FC_INNER_PROLOG_NO_ME_SETUP();
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_PROTECT(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, gc);
+
+ // Acquire the spin lock to get the IWeakReference* associated with the weak reference. We will then need to
+ // release the lock while resolving the IWeakReference* since we need to enter preemptive mode while calling out
+ // to COM to resolve the object and we don't want to do that while holding the lock. If we wind up being able
+ // to geenrate a new RCW, we'll reacquire the lock to save the RCW in the handle.
+ //
+ // Since we're acquiring and releasing the lock multiple times, we need to check the handle state each time we
+ // reacquire the lock to make sure that another thread hasn't reassigned the target of the handle or finalized it
+ SafeComHolder<IWeakReference> pWinRTWeakReference = nullptr;
+ {
+ WeakHandleSpinLockHolder handle(AcquireWeakHandleSpinLock(gc.weakReference), &gc.weakReference);
+ GCX_NOTRIGGER();
+
+ // Make sure that while we were not holding the spin lock, another thread did not change the target of
+ // this weak reference. Only fetch the IWeakReference* if we still have a valid handle holding a NULL object
+ // and the handle is still a HNDTYPE_WEAK_WINRT type handle.
+ if ((handle.Handle != NULL) && !IS_SPECIAL_HANDLE(handle.Handle))
+ {
+ if (*(Object **)(handle.Handle) != NULL)
+ {
+ // While we released the spin lock, another thread already set a new target for the weak reference.
+ // We don't want to replace it with an RCW that we fetch, so save it to return as the object the
+ // weak reference is targeting.
+ gc.target = ObjectToOBJECTREF(*(Object **)(handle.Handle));
+ }
+ else if(IsWinRTWeakReferenceHandle(handle.RawHandle))
+ {
+ _ASSERTE(HandleFetchType(handle.Handle) == HNDTYPE_WEAK_WINRT);
+
+ // Retrieve the associated IWeakReference* for this weak reference. Add a reference to it while we release
+ // the spin lock so that another thread doesn't release it out from underneath us.
+ //
+ // Setting pWinRTWeakReference will claim that it triggers a GC, however that's not true in this case because
+ // it's always set to NULL here and there's nothing for it to release.
+ _ASSERTE(pWinRTWeakReference.IsNull());
+ CONTRACT_VIOLATION(GCViolation);
+ pWinRTWeakReference = reinterpret_cast<IWeakReference*>(HndGetHandleExtraInfo(handle.Handle));
+ if (!pWinRTWeakReference.IsNull())
+ {
+ pWinRTWeakReference->AddRef();
+ }
+ }
+ }
+ }
+
+ // If the weak reference was in a state that it had an IWeakReference* for us to use, then we need to find the IUnknown
+ // identity of the underlying COM object (assuming that object is still alive). This work is done without holding the
+ // spin lock since it will call out to arbitrary code and as such we need to switch to preemptive mode.
+ SafeComHolder<IUnknown> pTargetIdentity = nullptr;
+ if (pWinRTWeakReference != nullptr)
+ {
+ _ASSERTE(gc.target == NULL);
+
+ GCX_PREEMP();
+
+ // Using the IWeakReference*, get ahold of the target WinRT object's IInspectable*. If this resolve fails, then we
+ // assume that the underlying WinRT object is no longer alive, and thus we cannot create a new RCW for it.
+ SafeComHolderPreemp<IInspectable> pTarget = nullptr;
+ if (SUCCEEDED(pWinRTWeakReference->Resolve(IID_IInspectable, &pTarget)))
+ {
+ if (!pTarget.IsNull())
+ {
+ // Get the IUnknown identity for the underlying object
+ SafeQueryInterfacePreemp(pTarget, IID_IUnknown, &pTargetIdentity);
+ }
+ }
+ }
+
+ // If we were able to get an IUnkown identity for the object, then we can find or create an associated RCW for it.
+ if (!pTargetIdentity.IsNull())
+ {
+ GetObjectRefFromComIP(&gc.rcw, pTargetIdentity);
+ }
+
+ // If we were able to get an RCW, then we need to reacquire the spin lock and store the RCW in the handle. Note that
+ // it's possible that another thread has acquired the spin lock and set the target of the weak reference while we were
+ // building the RCW. In that case, we will defer to the hadle that the other thread set, and let the RCW die.
+ if (gc.rcw != NULL)
+ {
+ // Make sure the type we got back from the WinRT object is compatible with the type the managed
+ // weak reference expects. (For instance, in the WeakReference<T> case, the returned type
+ // had better be compatible with T).
+ TypeHandle rcwType(gc.rcw->GetMethodTable());
+ if (!rcwType.CanCastTo(targetType))
+ {
+ SString weakReferenceTypeName;
+ TypeString::AppendType(weakReferenceTypeName, targetType, TypeString::FormatNamespace | TypeString::FormatFullInst | TypeString::FormatAssembly);
+
+ SString resolvedTypeName;
+ TypeString::AppendType(resolvedTypeName, rcwType, TypeString::FormatNamespace | TypeString::FormatFullInst | TypeString::FormatAssembly);
+
+ COMPlusThrow(kInvalidCastException, IDS_EE_WINRT_WEAKREF_BAD_TYPE, weakReferenceTypeName.GetUnicode(), resolvedTypeName.GetUnicode());
+ }
+
+ WeakHandleSpinLockHolder handle(AcquireWeakHandleSpinLock(gc.weakReference), &gc.weakReference);
+ GCX_NOTRIGGER();
+
+
+ // Now that we've reacquired the lock, see if the handle is still empty. If so, then save the RCW as the new target of the handle.
+ if ((handle.Handle != NULL) && !IS_SPECIAL_HANDLE(handle.Handle))
+ {
+ _ASSERTE(gc.target == NULL);
+ gc.target = ObjectToOBJECTREF(*(Object **)(handle.Handle));
+
+ if (gc.target == NULL)
+ {
+ StoreObjectInHandle(handle.Handle, gc.rcw);
+ gc.target = gc.rcw;
+ }
+ }
+ }
+
+ HELPER_METHOD_FRAME_END();
+ FC_INNER_EPILOG();
+
+ return OBJECTREFToObject(gc.target);
+}
+
+#endif // FEATURE_COMINTEROP
+
+//************************************************************************
+
+//
+// Spinlock implemented by overloading the WeakReference::m_Handle field that protects against races between setting
+// the target and finalization
+//
+
+NOINLINE OBJECTHANDLE AcquireWeakHandleSpinLockSpin(WEAKREFERENCEREF pThis)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ DWORD dwSwitchCount = 0;
+
+ //
+ // Boilerplate spinning logic stolen from other locks
+ //
+ for (;;)
+ {
+ if (g_SystemInfo.dwNumberOfProcessors > 1)
+ {
+ DWORD spincount = g_SpinConstants.dwInitialDuration;
+
+ for (;;)
+ {
+ for (DWORD i = 0; i < spincount; i++)
+ {
+ YieldProcessor();
+ }
+
+ OBJECTHANDLE handle = InterlockedExchangeT(&pThis->m_Handle, SPECIAL_HANDLE_SPINLOCK);
+ if (handle != SPECIAL_HANDLE_SPINLOCK)
+ return handle;
+
+ spincount *= g_SpinConstants.dwBackoffFactor;
+ if (spincount > g_SpinConstants.dwMaximumDuration)
+ {
+ break;
+ }
+ }
+ }
+
+ __SwitchToThread(0, ++dwSwitchCount);
+
+ OBJECTHANDLE handle = InterlockedExchangeT(&pThis->m_Handle, SPECIAL_HANDLE_SPINLOCK);
+ if (handle != SPECIAL_HANDLE_SPINLOCK)
+ return handle;
+ }
+}
+
+FORCEINLINE OBJECTHANDLE AcquireWeakHandleSpinLock(WEAKREFERENCEREF pThis)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ OBJECTHANDLE handle = InterlockedExchangeT(&pThis->m_Handle, SPECIAL_HANDLE_SPINLOCK);
+ if (handle != SPECIAL_HANDLE_SPINLOCK)
+ return handle;
+ return AcquireWeakHandleSpinLockSpin(pThis);
+}
+
+FORCEINLINE void ReleaseWeakHandleSpinLock(WEAKREFERENCEREF pThis, OBJECTHANDLE newHandle)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(newHandle != SPECIAL_HANDLE_SPINLOCK);
+ pThis->m_Handle = newHandle;
+}
+
+//************************************************************************
+
+MethodTable *pWeakReferenceMT = NULL;
+MethodTable *pWeakReferenceOfTCanonMT = NULL;
+
+//************************************************************************
+
+FCIMPL3(void, WeakReferenceNative::Create, WeakReferenceObject * pThisUNSAFE, Object * pTargetUNSAFE, CLR_BOOL trackResurrection)
+{
+ FCALL_CONTRACT;
+
+ struct
+ {
+ WEAKREFERENCEREF pThis;
+ OBJECTREF pTarget;
+ } gc;
+
+ gc.pThis = WEAKREFERENCEREF(pThisUNSAFE);
+ gc.pTarget = OBJECTREF(pTargetUNSAFE);
+
+ HELPER_METHOD_FRAME_BEGIN_PROTECT(gc);
+
+ if (gc.pThis == NULL)
+ COMPlusThrow(kNullReferenceException);
+
+ if (pWeakReferenceMT == NULL)
+ pWeakReferenceMT = MscorlibBinder::GetClass(CLASS__WEAKREFERENCE);
+
+ _ASSERTE(gc.pThis->GetMethodTable()->CanCastToClass(pWeakReferenceMT));
+
+ // Create the handle.
+#ifdef FEATURE_COMINTEROP
+ IWeakReference* pRawWinRTWeakReference = GetWinRTWeakReference(&gc.pTarget);
+ if (pRawWinRTWeakReference != nullptr)
+ {
+ SafeComHolder<IWeakReference> pWinRTWeakReferenceHolder(pRawWinRTWeakReference);
+ gc.pThis->m_Handle = SetWinRTWeakReferenceHandle(GetAppDomain()->CreateWinRTWeakHandle(gc.pTarget, pWinRTWeakReferenceHolder));
+ pWinRTWeakReferenceHolder.SuppressRelease();
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ gc.pThis->m_Handle = GetAppDomain()->CreateTypedHandle(gc.pTarget,
+ trackResurrection ? HNDTYPE_WEAK_LONG : HNDTYPE_WEAK_SHORT);
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+FCIMPL3(void, WeakReferenceOfTNative::Create, WeakReferenceObject * pThisUNSAFE, Object * pTargetUNSAFE, CLR_BOOL trackResurrection)
+{
+ FCALL_CONTRACT;
+
+ struct
+ {
+ WEAKREFERENCEREF pThis;
+ OBJECTREF pTarget;
+ } gc;
+
+ gc.pThis = WEAKREFERENCEREF(pThisUNSAFE);
+ gc.pTarget = OBJECTREF(pTargetUNSAFE);
+
+ HELPER_METHOD_FRAME_BEGIN_PROTECT(gc);
+
+ if (gc.pThis == NULL)
+ COMPlusThrow(kNullReferenceException);
+
+ if (pWeakReferenceOfTCanonMT == NULL)
+ pWeakReferenceOfTCanonMT = gc.pThis->GetMethodTable()->GetCanonicalMethodTable();
+
+ _ASSERTE(gc.pThis->GetMethodTable()->GetCanonicalMethodTable() == pWeakReferenceOfTCanonMT);
+
+ // Create the handle.
+#ifdef FEATURE_COMINTEROP
+ IWeakReference* pRawWinRTWeakReference = GetWinRTWeakReference(&gc.pTarget);
+ if (pRawWinRTWeakReference != nullptr)
+ {
+ SafeComHolder<IWeakReference> pWinRTWeakReferenceHolder(pRawWinRTWeakReference);
+ gc.pThis->m_Handle = SetWinRTWeakReferenceHandle(GetAppDomain()->CreateWinRTWeakHandle(gc.pTarget, pWinRTWeakReferenceHolder));
+ pWinRTWeakReferenceHolder.SuppressRelease();
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ gc.pThis->m_Handle = GetAppDomain()->CreateTypedHandle(gc.pTarget,
+ trackResurrection ? HNDTYPE_WEAK_LONG : HNDTYPE_WEAK_SHORT);
+ }
+
+ HELPER_METHOD_FRAME_END();
+}
+FCIMPLEND
+
+//************************************************************************
+
+// This entrypoint is also used for direct finalization by the GC. Note that we cannot depend on the runtime being suspended
+// when this is called because of background GC. Background GC is going to call this method while user managed code is running.
+void FinalizeWeakReference(Object * obj)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ WEAKREFERENCEREF pThis((WeakReferenceObject *)(obj));
+
+ OBJECTHANDLE handle = AcquireWeakHandleSpinLock(pThis);
+ OBJECTHANDLE handleToDestroy = NULL;
+ bool isWeakWinRTHandle = false;
+
+ // Check for not yet constructed or already finalized handle
+ if ((handle != NULL) && !IS_SPECIAL_HANDLE(handle))
+ {
+ handleToDestroy = GetHandleValue(handle);
+
+ // Cache the old handle value
+ UINT handleType = HandleFetchType(handleToDestroy);
+#ifdef FEATURE_COMINTEROP
+ _ASSERTE(handleType == HNDTYPE_WEAK_LONG || handleType == HNDTYPE_WEAK_SHORT || handleType == HNDTYPE_WEAK_WINRT);
+ isWeakWinRTHandle = handleType == HNDTYPE_WEAK_WINRT;
+#else // !FEATURE_COMINTEROP
+ _ASSERTE(handleType == HNDTYPE_WEAK_LONG || handleType == HNDTYPE_WEAK_SHORT);
+#endif // FEATURE_COMINTEROP
+
+ handle = (handleType == HNDTYPE_WEAK_LONG) ?
+ SPECIAL_HANDLE_FINALIZED_LONG : SPECIAL_HANDLE_FINALIZED_SHORT;
+ }
+
+ // Release the spin lock
+ ReleaseWeakHandleSpinLock(pThis, handle);
+
+ if (handleToDestroy != NULL)
+ {
+#ifdef FEATURE_COMINTEROP
+ if (isWeakWinRTHandle)
+ {
+ DestroyWinRTWeakHandle(handleToDestroy);
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ DestroyTypedHandle(handleToDestroy);
+ }
+ }
+}
+
+FCIMPL1(void, WeakReferenceNative::Finalize, WeakReferenceObject * pThis)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL();
+
+ if (pThis == NULL)
+ {
+ FCUnique(0x1);
+ COMPlusThrow(kNullReferenceException);
+ }
+
+ FinalizeWeakReference(pThis);
+
+ HELPER_METHOD_FRAME_END_POLL();
+}
+FCIMPLEND
+
+FCIMPL1(void, WeakReferenceOfTNative::Finalize, WeakReferenceObject * pThis)
+{
+ FCALL_CONTRACT;
+
+ HELPER_METHOD_FRAME_BEGIN_NOPOLL();
+
+ if (pThis == NULL)
+ COMPlusThrow(kNullReferenceException);
+
+ FinalizeWeakReference(pThis);
+
+ HELPER_METHOD_FRAME_END_POLL();
+}
+FCIMPLEND
+
+//************************************************************************
+
+#include <optsmallperfcritical.h>
+
+static FORCEINLINE OBJECTREF GetWeakReferenceTarget(WEAKREFERENCEREF pThis)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ OBJECTHANDLE rawHandle = pThis->m_Handle.LoadWithoutBarrier();
+ OBJECTHANDLE handle = GetHandleValue(rawHandle);
+
+ if (handle == NULL)
+ return NULL;
+
+ // Try a speculative lock-free read first
+ if (rawHandle != SPECIAL_HANDLE_SPINLOCK)
+ {
+ //
+ // There is a theoretic chance that the speculative lock-free read may AV while reading the value
+ // of freed handle if the handle table decides to release the memory that the handle lives in.
+ // It is not exploitable security issue because of we will fail fast on the AV. It is denial of service only.
+ // Non-malicious user code will never hit.
+ //
+ // We had this theoretical bug in there since forever. Fixing it by always taking the lock would
+ // degrade the performance critical weak handle getter several times. The right fix may be
+ // to ensure that handle table memory is released only if the runtime is suspended.
+ //
+ Object * pSpeculativeTarget = VolatileLoad((Object **)(handle));
+
+ //
+ // We want to ensure that the handle was still alive when we fetched the target,
+ // so we double check m_handle here. Note that the reading of the handle
+ // value has to take memory barrier for this to work, but reading of m_handle does not.
+ //
+ if (rawHandle == pThis->m_Handle.LoadWithoutBarrier())
+ {
+ return OBJECTREF(pSpeculativeTarget);
+ }
+ }
+
+
+ rawHandle = AcquireWeakHandleSpinLock(pThis);
+ GCX_NOTRIGGER();
+
+ handle = GetHandleValue(rawHandle);
+ OBJECTREF pTarget = OBJECTREF(*(Object **)(handle));
+
+ ReleaseWeakHandleSpinLock(pThis, rawHandle);
+
+ return pTarget;
+}
+
+FCIMPL1(Object *, WeakReferenceNative::GetTarget, WeakReferenceObject * pThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ WEAKREFERENCEREF pThis(pThisUNSAFE);
+ if (pThis == NULL)
+ {
+ FCUnique(0x1);
+ FCThrow(kNullReferenceException);
+ }
+
+ OBJECTREF pTarget = GetWeakReferenceTarget(pThis);
+
+#ifdef FEATURE_COMINTEROP
+ // If we found an object, or we're not a WinRT weak reference, then we're done. Othewrise
+ // we can try to create a new RCW to the underlying WinRT object if it's still alive.
+ if (pTarget != NULL || !IsWinRTWeakReferenceHandle(pThis->m_Handle))
+ {
+ FC_GC_POLL_AND_RETURN_OBJREF(pTarget);
+ }
+
+ FC_INNER_RETURN(Object*, LoadWinRTWeakReferenceTarget(pThis, g_pObjectClass, GetEEFuncEntryPointMacro(WeakReferenceNative::GetTarget)));
+#else // !FEATURE_COMINTEROP
+ FC_GC_POLL_AND_RETURN_OBJREF(pTarget);
+#endif // FEATURE_COMINTEROP
+}
+FCIMPLEND
+
+FCIMPL1(Object *, WeakReferenceOfTNative::GetTarget, WeakReferenceObject * pThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ WEAKREFERENCEREF pThis(pThisUNSAFE);
+ if (pThis == NULL)
+ {
+ FCThrow(kNullReferenceException);
+ }
+
+ OBJECTREF pTarget = GetWeakReferenceTarget(pThis);
+
+
+#ifdef FEATURE_COMINTEROP
+ // If we found an object, or we're not a WinRT weak reference, then we're done. Othewrise
+ // we can try to create a new RCW to the underlying WinRT object if it's still alive.
+ if (pTarget != NULL || !IsWinRTWeakReferenceHandle(pThis->m_Handle))
+ {
+ FC_GC_POLL_AND_RETURN_OBJREF(pTarget);
+ }
+
+ FC_INNER_RETURN(Object*, LoadWinRTWeakReferenceTarget(pThis, pThis->GetMethodTable()->GetInstantiation()[0], GetEEFuncEntryPointMacro(WeakReferenceOfTNative::GetTarget)));
+#else // !FEATURE_COMINTEROP
+ FC_GC_POLL_AND_RETURN_OBJREF(pTarget);
+#endif // FEATURE_COMINTEROP
+}
+FCIMPLEND
+
+FCIMPL1(FC_BOOL_RET, WeakReferenceNative::IsAlive, WeakReferenceObject * pThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ WEAKREFERENCEREF pThis(pThisUNSAFE);
+ if (pThis == NULL)
+ {
+ FCThrow(kNullReferenceException);
+ }
+
+ BOOL fRet = GetWeakReferenceTarget(pThis) != NULL;
+
+ FC_GC_POLL_RET();
+
+ FC_RETURN_BOOL(fRet);
+}
+FCIMPLEND
+
+#include <optdefault.h>
+
+//************************************************************************
+
+#include <optsmallperfcritical.h>
+
+// Slow path helper for setting the target of a weak reference. This code is used if a WinRT weak reference might
+// be required.
+NOINLINE void SetWeakReferenceTarget(WEAKREFERENCEREF weakReference, OBJECTREF target, LPVOID __me)
+{
+ FCALL_CONTRACT;
+
+ FC_INNER_PROLOG_NO_ME_SETUP();
+ HELPER_METHOD_FRAME_BEGIN_ATTRIB_2(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, target, weakReference);
+
+#ifdef FEATURE_COMINTEROP
+ SafeComHolder<IWeakReference> pTargetWeakReference(GetWinRTWeakReference(&target));
+#endif // FEATURE_COMINTEROP
+
+
+ WeakHandleSpinLockHolder handle(AcquireWeakHandleSpinLock(weakReference), &weakReference);
+ GCX_NOTRIGGER();
+
+#ifdef FEATURE_COMINTEROP
+ //
+ // We have four combinations to handle here
+ //
+ // Existing target is a GC object, new target is a GC object:
+ // * Just store the new object in the handle
+ //
+ // Existing target is WinRT, new target is WinRT:
+ // * Release the existing IWeakReference*
+ // * Store the new IWeakReference*
+ // * Store the new object in the handle
+ //
+ // Existing target is WinRT, new target is GC:
+ // * Release the existing IWeakReference*
+ // * Store null to the IWeakReference* field
+ // * Store the new object in the handle
+ //
+ // Existing target is GC, new target is WinRT:
+ // * Destroy the existing handle
+ // * Allocate a new WinRT weak handle for the new target
+ //
+
+ if (IsWinRTWeakReferenceHandle(handle.RawHandle))
+ {
+ // If the existing reference is a WinRT weak reference, we need to release its IWeakReference pointer
+ // and update it with the new weak reference pointer. If the incoming object is not an RCW that can
+ // use IWeakReference, then pTargetWeakReference will be null. Therefore, no matter what the incoming
+ // object type is, we can unconditionally store pTargetWeakReference to the object handle's extra data.
+ IWeakReference* pExistingWeakReference = reinterpret_cast<IWeakReference*>(HndGetHandleExtraInfo(handle.Handle));
+ HndSetHandleExtraInfo(handle.Handle, HNDTYPE_WEAK_WINRT, reinterpret_cast<LPARAM>(pTargetWeakReference.GetValue()));
+ StoreObjectInHandle(handle.Handle, target);
+
+ if (pExistingWeakReference != nullptr)
+ {
+ pExistingWeakReference->Release();
+ }
+ }
+ else if (pTargetWeakReference != nullptr)
+ {
+ // The existing handle is not a WinRT weak reference, but we need to store the new object in
+ // a WinRT weak reference. Therefore we need to destroy the old handle and create a new WinRT
+ // handle. The new handle needs to be allocated first to prevent the weak reference from holding
+ // a destroyed handle if we fail to allocate the new one.
+ _ASSERTE(!IsWinRTWeakReferenceHandle(handle.RawHandle));
+ OBJECTHANDLE previousHandle = handle.RawHandle;
+
+ handle.Handle = GetAppDomain()->CreateWinRTWeakHandle(target, pTargetWeakReference);
+ handle.RawHandle = SetWinRTWeakReferenceHandle(handle.Handle);
+
+ DestroyTypedHandle(previousHandle);
+ }
+ else
+#endif // FEATURE_COMINTEROP
+ {
+ StoreObjectInHandle(handle.Handle, target);
+ }
+
+#ifdef FEATURE_COMINTEROP
+ pTargetWeakReference.SuppressRelease();
+#endif // FEATURE_COMINTEROP
+
+ HELPER_METHOD_FRAME_END();
+ FC_INNER_EPILOG();
+}
+
+FCIMPL2(void, WeakReferenceNative::SetTarget, WeakReferenceObject * pThisUNSAFE, Object * pTargetUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ WEAKREFERENCEREF pThis(pThisUNSAFE);
+ OBJECTREF pTarget(pTargetUNSAFE);
+
+ if (pThis == NULL)
+ {
+ FCUnique(0x1);
+ FCThrowVoid(kNullReferenceException);
+ }
+
+ bool storedObject = false;
+
+ OBJECTHANDLE handle = AcquireWeakHandleSpinLock(pThis);
+ {
+ if (handle == NULL || IS_SPECIAL_HANDLE(handle))
+ {
+ ReleaseWeakHandleSpinLock(pThis, handle);
+ FCThrowResVoid(kInvalidOperationException, W("InvalidOperation_HandleIsNotInitialized"));
+ }
+
+ // Switch to no-trigger after the handle was validate. FCThrow triggers.
+ GCX_NOTRIGGER();
+
+ // If the existing handle is a GC weak handle and the new target is not an RCW, then
+ // we can avoid setting up a helper method frame and just reset the handle directly.
+ if (!IsWinRTWeakReferenceHandle(handle))
+ {
+ if (pTarget == NULL || !pTarget->GetMethodTable()->IsComObjectType())
+ {
+ StoreObjectInHandle(handle, pTarget);
+ storedObject = true;
+ }
+ }
+
+ // SetWeakReferenceTarget will reacquire the spinlock after setting up a helper method frame. This allows
+ // the frame setup to throw without worrying about leaking the spinlock, and allows the epilog to be cleanly
+ // walked by the epilog decoder.
+ ReleaseWeakHandleSpinLock(pThis, handle);
+ }
+
+ // If we reset the handle directly, then early out before setting up a helper method frame
+ if (storedObject)
+ {
+ FC_GC_POLL();
+ return;
+ }
+
+ FC_INNER_RETURN_VOID(SetWeakReferenceTarget(pThis, pTarget, GetEEFuncEntryPointMacro(WeakReferenceNative::SetTarget)));
+}
+FCIMPLEND
+
+FCIMPL2(void, WeakReferenceOfTNative::SetTarget, WeakReferenceObject * pThisUNSAFE, Object * pTargetUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ WEAKREFERENCEREF pThis(pThisUNSAFE);
+ OBJECTREF pTarget(pTargetUNSAFE);
+
+ if (pThis == NULL)
+ {
+ FCThrowVoid(kNullReferenceException);
+ }
+
+ bool storedObject = false;
+
+ OBJECTHANDLE handle = AcquireWeakHandleSpinLock(pThis);
+ {
+ if (handle == NULL || IS_SPECIAL_HANDLE(handle))
+ {
+ ReleaseWeakHandleSpinLock(pThis, handle);
+ FCThrowResVoid(kInvalidOperationException, W("InvalidOperation_HandleIsNotInitialized"));
+ }
+
+ // Switch to no-trigger after the handle was validate. FCThrow triggers.
+ GCX_NOTRIGGER();
+
+ // If the existing handle is a GC weak handle and the new target is not an RCW, then
+ // we can avoid setting up a helper method frame and just reset the handle directly.
+ if (!IsWinRTWeakReferenceHandle(handle))
+ {
+ if (pTarget == NULL || !pTarget->GetMethodTable()->IsComObjectType())
+ {
+ StoreObjectInHandle(handle, pTarget);
+ storedObject = true;
+ }
+ }
+
+ // SetWeakReferenceTarget will reacquire the spinlock after setting up a helper method frame. This allows
+ // the frame setup to throw without worrying about leaking the spinlock, and allows the epilog to be cleanly
+ // walked by the epilog decoder.
+ ReleaseWeakHandleSpinLock(pThis, handle);
+ }
+
+ // If we reset the handle directly, then early out before setting up a helper method frame
+ if (storedObject)
+ {
+ FC_GC_POLL();
+ return;
+ }
+
+ FC_INNER_RETURN_VOID(SetWeakReferenceTarget(pThis, pTarget, GetEEFuncEntryPointMacro(WeakReferenceOfTNative::SetTarget)));
+}
+FCIMPLEND
+
+#include <optdefault.h>
+
+//************************************************************************
+
+FCIMPL1(FC_BOOL_RET, WeakReferenceNative::IsTrackResurrection, WeakReferenceObject * pThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ WEAKREFERENCEREF pThis(pThisUNSAFE);
+
+ if (pThis == NULL)
+ {
+ FCUnique(0x1);
+ FCThrow(kNullReferenceException);
+ }
+
+ BOOL trackResurrection = FALSE;
+ OBJECTHANDLE handle = AcquireWeakHandleSpinLock(pThis);
+ {
+ GCX_NOTRIGGER();
+
+ if (handle == NULL)
+ {
+ trackResurrection = FALSE;
+ }
+ else
+ if (IS_SPECIAL_HANDLE(handle))
+ {
+ trackResurrection = (handle == SPECIAL_HANDLE_FINALIZED_LONG);
+ }
+ else
+ {
+ trackResurrection = HandleFetchType(GetHandleValue(handle)) == HNDTYPE_WEAK_LONG;
+ }
+
+ ReleaseWeakHandleSpinLock(pThis, handle);
+ }
+
+ FC_GC_POLL_RET();
+ FC_RETURN_BOOL(trackResurrection);
+}
+FCIMPLEND
+
+FCIMPL1(FC_BOOL_RET, WeakReferenceOfTNative::IsTrackResurrection, WeakReferenceObject * pThisUNSAFE)
+{
+ FCALL_CONTRACT;
+
+ WEAKREFERENCEREF pThis(pThisUNSAFE);
+
+ if (pThis == NULL)
+ {
+ FCThrow(kNullReferenceException);
+ }
+
+ BOOL trackResurrection = FALSE;
+ OBJECTHANDLE handle = AcquireWeakHandleSpinLock(pThis);
+ {
+ GCX_NOTRIGGER();
+
+ if (handle == NULL)
+ {
+ trackResurrection = FALSE;
+ }
+ else
+ if (IS_SPECIAL_HANDLE(handle))
+ {
+ trackResurrection = (handle == SPECIAL_HANDLE_FINALIZED_LONG);
+ }
+ else
+ {
+ trackResurrection = HandleFetchType(GetHandleValue(handle)) == HNDTYPE_WEAK_LONG;
+ }
+
+ ReleaseWeakHandleSpinLock(pThis, handle);
+ }
+
+ FC_GC_POLL_RET();
+ FC_RETURN_BOOL(trackResurrection);
+}
+FCIMPLEND
+
+//************************************************************************
diff --git a/src/vm/weakreferencenative.h b/src/vm/weakreferencenative.h
new file mode 100644
index 0000000000..626f63540e
--- /dev/null
+++ b/src/vm/weakreferencenative.h
@@ -0,0 +1,43 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+/*============================================================
+**
+** Header: WeakReferenceNative.h
+**
+**
+===========================================================*/
+
+#ifndef _WEAKREFERENCENATIVE_H
+#define _WEAKREFERENCENATIVE_H
+
+//
+// The implementations of WeakReferenceNative and WeakReferenceOfTNative are identical, but the managed signatures
+// are different. WeakReferenceOfTNative has strongly typed signatures. It is necessary for correct security transparancy
+// annotations without compromising inlining (security critical code cannot be inlined into security neutral code).
+//
+
+class WeakReferenceNative
+{
+public:
+ static FCDECL3(void, Create, WeakReferenceObject * pThis, Object * pTarget, CLR_BOOL trackResurrection);
+ static FCDECL1(void, Finalize, WeakReferenceObject * pThis);
+ static FCDECL1(Object *, GetTarget, WeakReferenceObject * pThis);
+ static FCDECL2(void, SetTarget, WeakReferenceObject * pThis, Object * pTarget);
+ static FCDECL1(FC_BOOL_RET, IsTrackResurrection, WeakReferenceObject * pThis);
+ static FCDECL1(FC_BOOL_RET, IsAlive, WeakReferenceObject * pThis);
+};
+
+class WeakReferenceOfTNative
+{
+public:
+ static FCDECL3(void, Create, WeakReferenceObject * pThis, Object * pTarget, CLR_BOOL trackResurrection);
+ static FCDECL1(void, Finalize, WeakReferenceObject * pThis);
+ static FCDECL1(Object *, GetTarget, WeakReferenceObject * pThis);
+ static FCDECL2(void, SetTarget, WeakReferenceObject * pThis, Object * pTarget);
+ static FCDECL1(FC_BOOL_RET, IsTrackResurrection, WeakReferenceObject * pThis);
+};
+
+#endif // _WEAKREFERENCENATIVE_H
diff --git a/src/vm/win32threadpool.cpp b/src/vm/win32threadpool.cpp
new file mode 100644
index 0000000000..3e80eb1e67
--- /dev/null
+++ b/src/vm/win32threadpool.cpp
@@ -0,0 +1,5597 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+/*++
+
+Module Name:
+
+ Win32ThreadPool.cpp
+
+Abstract:
+
+ This module implements Threadpool support using Win32 APIs
+
+
+Revision History:
+ December 1999 - Created
+
+--*/
+
+#include "common.h"
+#include "log.h"
+#include "threadpoolrequest.h"
+#include "win32threadpool.h"
+#include "delegateinfo.h"
+#include "eeconfig.h"
+#include "dbginterface.h"
+#include "corhost.h"
+#include "eventtrace.h"
+#include "threads.h"
+#include "appdomain.inl"
+#include "nativeoverlapped.h"
+#include "hillclimbing.h"
+
+
+#ifndef FEATURE_PAL
+#ifndef DACCESS_COMPILE
+
+// APIs that must be accessed through dynamic linking.
+typedef int (WINAPI *NtQueryInformationThreadProc) (
+ HANDLE ThreadHandle,
+ THREADINFOCLASS ThreadInformationClass,
+ PVOID ThreadInformation,
+ ULONG ThreadInformationLength,
+ PULONG ReturnLength);
+NtQueryInformationThreadProc g_pufnNtQueryInformationThread = NULL;
+
+typedef int (WINAPI *NtQuerySystemInformationProc) (
+ SYSTEM_INFORMATION_CLASS SystemInformationClass,
+ PVOID SystemInformation,
+ ULONG SystemInformationLength,
+ PULONG ReturnLength OPTIONAL);
+NtQuerySystemInformationProc g_pufnNtQuerySystemInformation = NULL;
+
+typedef HANDLE (WINAPI * CreateWaitableTimerExProc) (
+ LPSECURITY_ATTRIBUTES lpTimerAttributes,
+ LPCTSTR lpTimerName,
+ DWORD dwFlags,
+ DWORD dwDesiredAccess);
+CreateWaitableTimerExProc g_pufnCreateWaitableTimerEx = NULL;
+
+typedef BOOL (WINAPI * SetWaitableTimerExProc) (
+ HANDLE hTimer,
+ const LARGE_INTEGER *lpDueTime,
+ LONG lPeriod,
+ PTIMERAPCROUTINE pfnCompletionRoutine,
+ LPVOID lpArgToCompletionRoutine,
+ void* WakeContext, //should be PREASON_CONTEXT, but it's not defined for us (and we don't use it)
+ ULONG TolerableDelay);
+SetWaitableTimerExProc g_pufnSetWaitableTimerEx = NULL;
+
+#endif // !DACCESS_COMPILE
+#endif // !FEATURE_PAL
+
+BOOL ThreadpoolMgr::InitCompletionPortThreadpool = FALSE;
+HANDLE ThreadpoolMgr::GlobalCompletionPort; // used for binding io completions on file handles
+
+SVAL_IMPL(ThreadpoolMgr::ThreadCounter,ThreadpoolMgr,CPThreadCounter);
+
+SVAL_IMPL_INIT(LONG,ThreadpoolMgr,MaxLimitTotalCPThreads,1000); // = MaxLimitCPThreadsPerCPU * number of CPUS
+SVAL_IMPL(LONG,ThreadpoolMgr,MinLimitTotalCPThreads);
+SVAL_IMPL(LONG,ThreadpoolMgr,MaxFreeCPThreads); // = MaxFreeCPThreadsPerCPU * Number of CPUS
+
+Volatile<LONG> ThreadpoolMgr::NumCPInfrastructureThreads = 0; // number of threads currently busy handling draining cycle
+
+SVAL_IMPL(ThreadpoolMgr::ThreadCounter, ThreadpoolMgr, WorkerCounter);
+
+SVAL_IMPL(LONG,ThreadpoolMgr,MinLimitTotalWorkerThreads); // = MaxLimitCPThreadsPerCPU * number of CPUS
+SVAL_IMPL(LONG,ThreadpoolMgr,MaxLimitTotalWorkerThreads); // = MaxLimitCPThreadsPerCPU * number of CPUS
+
+SVAL_IMPL(LONG,ThreadpoolMgr,cpuUtilization);
+LONG ThreadpoolMgr::cpuUtilizationAverage = 0;
+
+HillClimbing ThreadpoolMgr::HillClimbingInstance;
+
+Volatile<LONG> ThreadpoolMgr::PriorCompletedWorkRequests = 0;
+Volatile<DWORD> ThreadpoolMgr::PriorCompletedWorkRequestsTime;
+Volatile<DWORD> ThreadpoolMgr::NextCompletedWorkRequestsTime;
+LARGE_INTEGER ThreadpoolMgr::CurrentSampleStartTime;
+
+int ThreadpoolMgr::ThreadAdjustmentInterval;
+
+#define INVALID_HANDLE ((HANDLE) -1)
+#define NEW_THREAD_THRESHOLD 7 // Number of requests outstanding before we start a new thread
+#define CP_THREAD_PENDINGIO_WAIT 5000 // polling interval when thread is retired but has a pending io
+#define GATE_THREAD_DELAY 500 /*milliseconds*/
+#define GATE_THREAD_DELAY_TOLERANCE 50 /*milliseconds*/
+#define DELAY_BETWEEN_SUSPENDS 5000 + GATE_THREAD_DELAY // time to delay between suspensions
+#define SUSPEND_TIME GATE_THREAD_DELAY+100 // milliseconds to suspend during SuspendProcessing
+
+LONG ThreadpoolMgr::Initialization=0; // indicator of whether the threadpool is initialized.
+Volatile<unsigned int> ThreadpoolMgr::LastDequeueTime; // used to determine if work items are getting thread starved
+int ThreadpoolMgr::offset_counter = 0;
+
+SPTR_IMPL(WorkRequest,ThreadpoolMgr,WorkRequestHead); // Head of work request queue
+SPTR_IMPL(WorkRequest,ThreadpoolMgr,WorkRequestTail); // Head of work request queue
+
+SVAL_IMPL(ThreadpoolMgr::LIST_ENTRY,ThreadpoolMgr,TimerQueue); // queue of timers
+
+//unsigned int ThreadpoolMgr::LastCpuSamplingTime=0; // last time cpu utilization was sampled by gate thread
+unsigned int ThreadpoolMgr::LastCPThreadCreation=0; // last time a completion port thread was created
+unsigned int ThreadpoolMgr::NumberOfProcessors; // = NumberOfWorkerThreads - no. of blocked threads
+
+
+CrstStatic ThreadpoolMgr::WorkerCriticalSection;
+CLREvent * ThreadpoolMgr::RetiredCPWakeupEvent; // wakeup event for completion port threads
+CrstStatic ThreadpoolMgr::WaitThreadsCriticalSection;
+ThreadpoolMgr::LIST_ENTRY ThreadpoolMgr::WaitThreadsHead;
+
+ThreadpoolMgr::UnfairSemaphore* ThreadpoolMgr::WorkerSemaphore;
+CLRSemaphore* ThreadpoolMgr::RetiredWorkerSemaphore;
+
+CrstStatic ThreadpoolMgr::TimerQueueCriticalSection;
+HANDLE ThreadpoolMgr::TimerThread=NULL;
+Thread *ThreadpoolMgr::pTimerThread=NULL;
+DWORD ThreadpoolMgr::LastTickCount;
+
+#ifdef _DEBUG
+DWORD ThreadpoolMgr::TickCountAdjustment=0;
+#endif
+
+LONG ThreadpoolMgr::GateThreadStatus=GATE_THREAD_STATUS_NOT_RUNNING;
+
+ThreadpoolMgr::RecycledListsWrapper ThreadpoolMgr::RecycledLists;
+
+ThreadpoolMgr::TimerInfo *ThreadpoolMgr::TimerInfosToBeRecycled = NULL;
+
+BOOL ThreadpoolMgr::IsApcPendingOnWaitThread = FALSE;
+
+#ifndef DACCESS_COMPILE
+
+// Macros for inserting/deleting from doubly linked list
+
+#define InitializeListHead(ListHead) (\
+ (ListHead)->Flink = (ListHead)->Blink = (ListHead))
+
+//
+// these are named the same as slightly different macros in the NT headers
+//
+#undef RemoveHeadList
+#undef RemoveEntryList
+#undef InsertTailList
+#undef InsertHeadList
+
+#define RemoveHeadList(ListHead,FirstEntry) \
+ {\
+ FirstEntry = (LIST_ENTRY*) (ListHead)->Flink;\
+ ((LIST_ENTRY*)FirstEntry->Flink)->Blink = (ListHead);\
+ (ListHead)->Flink = FirstEntry->Flink;\
+ }
+
+#define RemoveEntryList(Entry) {\
+ LIST_ENTRY* _EX_Entry;\
+ _EX_Entry = (Entry);\
+ ((LIST_ENTRY*) _EX_Entry->Blink)->Flink = _EX_Entry->Flink;\
+ ((LIST_ENTRY*) _EX_Entry->Flink)->Blink = _EX_Entry->Blink;\
+ }
+
+#define InsertTailList(ListHead,Entry) \
+ (Entry)->Flink = (ListHead);\
+ (Entry)->Blink = (ListHead)->Blink;\
+ ((LIST_ENTRY*)(ListHead)->Blink)->Flink = (Entry);\
+ (ListHead)->Blink = (Entry);
+
+#define InsertHeadList(ListHead,Entry) {\
+ LIST_ENTRY* _EX_Flink;\
+ LIST_ENTRY* _EX_ListHead;\
+ _EX_ListHead = (LIST_ENTRY*)(ListHead);\
+ _EX_Flink = (LIST_ENTRY*) _EX_ListHead->Flink;\
+ (Entry)->Flink = _EX_Flink;\
+ (Entry)->Blink = _EX_ListHead;\
+ _EX_Flink->Blink = (Entry);\
+ _EX_ListHead->Flink = (Entry);\
+ }
+
+#define IsListEmpty(ListHead) \
+ ((ListHead)->Flink == (ListHead))
+
+#define SetLastHRError(hr) \
+ if (HRESULT_FACILITY(hr) == FACILITY_WIN32)\
+ SetLastError(HRESULT_CODE(hr));\
+ else \
+ SetLastError(ERROR_INVALID_DATA);\
+
+/************************************************************************/
+
+void ThreadpoolMgr::RecycledListsWrapper::Initialize( unsigned int numProcs )
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ pRecycledListPerProcessor = new RecycledListInfo[numProcs][MEMTYPE_COUNT];
+}
+
+//--//
+
+void ThreadpoolMgr::EnsureInitialized()
+{
+ CONTRACTL
+ {
+ THROWS; // Initialize can throw
+ MODE_ANY;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (IsInitialized())
+ return;
+
+ DWORD dwSwitchCount = 0;
+
+retry:
+ if (InterlockedCompareExchange(&Initialization, 1, 0) == 0)
+ {
+ if (Initialize())
+ Initialization = -1;
+ else
+ {
+ Initialization = 0;
+ COMPlusThrowOM();
+ }
+ }
+ else // someone has already begun initializing.
+ {
+ // wait until it finishes
+ while (Initialization != -1)
+ {
+ __SwitchToThread(0, ++dwSwitchCount);
+ goto retry;
+ }
+ }
+}
+
+DWORD GetDefaultMaxLimitWorkerThreads(DWORD minLimit)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ GC_NOTRIGGER;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+ //
+ // We determine the max limit for worker threads as follows:
+ //
+ // 1) It must be at least MinLimitTotalWorkerThreads
+ // 2) It must be no greater than (half the virtual address space)/(thread stack size)
+ // 3) It must be <= MaxPossibleWorkerThreads
+ //
+ // TODO: what about CP threads? Can they follow a similar plan? How do we allocate
+ // thread counts between the two kinds of threads?
+ //
+ SIZE_T stackReserveSize = 0;
+ Thread::GetProcessDefaultStackSize(&stackReserveSize, NULL);
+
+ ULONGLONG halfVirtualAddressSpace;
+
+ MEMORYSTATUSEX memStats;
+ memStats.dwLength = sizeof(memStats);
+ if (GlobalMemoryStatusEx(&memStats))
+ {
+ halfVirtualAddressSpace = memStats.ullTotalVirtual / 2;
+ }
+ else
+ {
+ //assume the normal Win32 32-bit virtual address space
+ halfVirtualAddressSpace = 0x000000007FFE0000ull / 2;
+ }
+
+ ULONGLONG limit = halfVirtualAddressSpace / stackReserveSize;
+ limit = max(limit, (ULONGLONG)minLimit);
+ limit = min(limit, (ULONGLONG)ThreadpoolMgr::ThreadCounter::MaxPossibleCount);
+
+ _ASSERTE(FitsIn<DWORD>(limit));
+ return (DWORD)limit;
+}
+
+BOOL ThreadpoolMgr::Initialize()
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ BOOL bRet = FALSE;
+ BOOL bExceptionCaught = FALSE;
+
+ UnManagedPerAppDomainTPCount* pADTPCount;
+ pADTPCount = PerAppDomainTPCountList::GetUnmanagedTPCount();
+
+ //ThreadPool_CPUGroup
+ CPUGroupInfo::EnsureInitialized();
+ if (CPUGroupInfo::CanEnableGCCPUGroups() && CPUGroupInfo::CanEnableThreadUseAllCpuGroups())
+ NumberOfProcessors = CPUGroupInfo::GetNumActiveProcessors();
+ else
+ NumberOfProcessors = GetCurrentProcessCpuCount();
+ InitPlatformVariables();
+
+ EX_TRY
+ {
+ ThreadAdjustmentInterval = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_HillClimbing_SampleIntervalLow);
+
+ pADTPCount->InitResources();
+ WorkerCriticalSection.Init(CrstThreadpoolWorker);
+ WaitThreadsCriticalSection.Init(CrstThreadpoolWaitThreads);
+ TimerQueueCriticalSection.Init(CrstThreadpoolTimerQueue);
+
+ // initialize WaitThreadsHead
+ InitializeListHead(&WaitThreadsHead);
+
+ // initialize TimerQueue
+ InitializeListHead(&TimerQueue);
+
+ RetiredCPWakeupEvent = new CLREvent();
+ RetiredCPWakeupEvent->CreateAutoEvent(FALSE);
+ _ASSERTE(RetiredCPWakeupEvent->IsValid());
+
+ WorkerSemaphore = new UnfairSemaphore(ThreadCounter::MaxPossibleCount);
+
+ RetiredWorkerSemaphore = new CLRSemaphore();
+ RetiredWorkerSemaphore->Create(0, ThreadCounter::MaxPossibleCount);
+
+ //ThreadPool_CPUGroup
+ if (CPUGroupInfo::CanEnableGCCPUGroups() && CPUGroupInfo::CanEnableThreadUseAllCpuGroups())
+ RecycledLists.Initialize( CPUGroupInfo::GetNumActiveProcessors() );
+ else
+ RecycledLists.Initialize( g_SystemInfo.dwNumberOfProcessors );
+ /*
+ {
+ SYSTEM_INFO sysInfo;
+
+ ::GetSystemInfo( &sysInfo );
+
+ RecycledLists.Initialize( sysInfo.dwNumberOfProcessors );
+ }
+ */
+ }
+ EX_CATCH
+ {
+ pADTPCount->CleanupResources();
+
+ if (RetiredCPWakeupEvent)
+ {
+ delete RetiredCPWakeupEvent;
+ RetiredCPWakeupEvent = NULL;
+ }
+
+ // Note: It is fine to call Destroy on unitialized critical sections
+ WorkerCriticalSection.Destroy();
+ WaitThreadsCriticalSection.Destroy();
+ TimerQueueCriticalSection.Destroy();
+
+ bExceptionCaught = TRUE;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (bExceptionCaught)
+ {
+ goto end;
+ }
+
+ // initialize Worker and CP thread settings
+ DWORD forceMin;
+ forceMin = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ThreadPool_ForceMinWorkerThreads);
+ MinLimitTotalWorkerThreads = forceMin > 0 ? (LONG)forceMin : (LONG)NumberOfProcessors;
+
+ DWORD forceMax;
+ forceMax = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ThreadPool_ForceMaxWorkerThreads);
+ MaxLimitTotalWorkerThreads = forceMax > 0 ? (LONG)forceMax : (LONG)GetDefaultMaxLimitWorkerThreads(MinLimitTotalWorkerThreads);
+
+ ThreadCounter::Counts counts;
+ counts.NumActive = 0;
+ counts.NumWorking = 0;
+ counts.NumRetired = 0;
+ counts.MaxWorking = MinLimitTotalWorkerThreads;
+ WorkerCounter.counts.AsLongLong = counts.AsLongLong;
+
+#ifdef _DEBUG
+ TickCountAdjustment = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ThreadpoolTickCountAdjustment);
+#endif
+
+ // initialize CP thread settings
+ MinLimitTotalCPThreads = NumberOfProcessors;
+
+ MaxFreeCPThreads = NumberOfProcessors*MaxFreeCPThreadsPerCPU;
+
+ counts.NumActive = 0;
+ counts.NumWorking = 0;
+ counts.NumRetired = 0;
+ counts.MaxWorking = MinLimitTotalCPThreads;
+ CPThreadCounter.counts.AsLongLong = counts.AsLongLong;
+
+#ifndef FEATURE_PAL
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ if (CLRIoCompletionHosted())
+ {
+ HANDLE hPort;
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = CorHost2::GetHostIoCompletionManager()->CreateIoCompletionPort(&hPort);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (FAILED(hr))
+ goto end;
+
+ GlobalCompletionPort = hPort;
+ }
+ else
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+ {
+ GlobalCompletionPort = CreateIoCompletionPort(INVALID_HANDLE_VALUE,
+ NULL,
+ 0, /*ignored for invalid handle value*/
+ NumberOfProcessors);
+ }
+#endif // !FEATURE_PAL
+
+ HillClimbingInstance.Initialize();
+
+ bRet = TRUE;
+end:
+ return bRet;
+}
+
+void ThreadpoolMgr::InitPlatformVariables()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_PAL
+ HINSTANCE hNtDll;
+ HINSTANCE hCoreSynch;
+ {
+ CONTRACT_VIOLATION(GCViolation|FaultViolation);
+ hNtDll = CLRLoadLibrary(W("ntdll.dll"));
+ _ASSERTE(hNtDll);
+#ifdef FEATURE_CORESYSTEM
+ hCoreSynch = CLRLoadLibrary(W("api-ms-win-core-synch-l1-1-0.dll"));
+#else
+ hCoreSynch = CLRLoadLibrary(W("kernel32.dll"));
+#endif
+ _ASSERTE(hCoreSynch);
+ }
+
+ // These APIs must be accessed via dynamic binding since they may be removed in future
+ // OS versions.
+ g_pufnNtQueryInformationThread = (NtQueryInformationThreadProc)GetProcAddress(hNtDll,"NtQueryInformationThread");
+ g_pufnNtQuerySystemInformation = (NtQuerySystemInformationProc)GetProcAddress(hNtDll,"NtQuerySystemInformation");
+
+
+ // These APIs are only supported on newer Windows versions
+ g_pufnCreateWaitableTimerEx = (CreateWaitableTimerExProc)GetProcAddress(hCoreSynch, "CreateWaitableTimerExW");
+ g_pufnSetWaitableTimerEx = (SetWaitableTimerExProc)GetProcAddress(hCoreSynch, "SetWaitableTimerEx");
+#endif
+}
+
+BOOL ThreadpoolMgr::SetMaxThreadsHelper(DWORD MaxWorkerThreads,
+ DWORD MaxIOCompletionThreads)
+{
+ CONTRACTL
+ {
+ THROWS; // Crst can throw and toggle GC mode
+ MODE_ANY;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ BOOL result = FALSE;
+
+ // doesn't need to be WorkerCS, but using it to avoid race condition between setting min and max, and didn't want to create a new CS.
+ CrstHolder csh(&WorkerCriticalSection);
+
+ if (MaxWorkerThreads >= (DWORD)MinLimitTotalWorkerThreads &&
+ MaxIOCompletionThreads >= (DWORD)MinLimitTotalCPThreads)
+ {
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ThreadPool_ForceMaxWorkerThreads) == 0)
+ {
+ MaxLimitTotalWorkerThreads = min(MaxWorkerThreads, (DWORD)ThreadCounter::MaxPossibleCount);
+
+ ThreadCounter::Counts counts = WorkerCounter.GetCleanCounts();
+ while (counts.MaxWorking > MaxLimitTotalWorkerThreads)
+ {
+ ThreadCounter::Counts newCounts = counts;
+ newCounts.MaxWorking = MaxLimitTotalWorkerThreads;
+
+ ThreadCounter::Counts oldCounts = WorkerCounter.CompareExchangeCounts(newCounts, counts);
+ if (oldCounts == counts)
+ counts = newCounts;
+ else
+ counts = oldCounts;
+ }
+ }
+
+ END_SO_INTOLERANT_CODE;
+
+ MaxLimitTotalCPThreads = min(MaxIOCompletionThreads, (DWORD)ThreadCounter::MaxPossibleCount);
+
+ result = TRUE;
+ }
+
+ return result;
+ }
+
+/************************************************************************/
+BOOL ThreadpoolMgr::SetMaxThreads(DWORD MaxWorkerThreads,
+ DWORD MaxIOCompletionThreads)
+{
+ CONTRACTL
+ {
+ THROWS; // SetMaxThreadsHelper can throw and toggle GC mode
+ MODE_ANY;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostThreadpoolManager *threadpoolProvider = CorHost2::GetHostThreadpoolManager();
+ if (threadpoolProvider) {
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = threadpoolProvider->SetMaxThreads(MaxWorkerThreads);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (FAILED(hr))
+ {
+ SetLastHRError(hr);
+ return FALSE;
+ }
+ }
+
+ IHostIoCompletionManager *ioCompletionProvider = CorHost2::GetHostIoCompletionManager();
+ if (ioCompletionProvider) {
+ HRESULT hr;
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = ioCompletionProvider->SetMaxThreads(MaxIOCompletionThreads);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (FAILED(hr))
+ {
+ SetLastHRError(hr);
+ return FALSE;
+ }
+ }
+
+ if (threadpoolProvider && ioCompletionProvider) {
+ return TRUE;
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ if (IsInitialized())
+ {
+ return SetMaxThreadsHelper(MaxWorkerThreads, MaxIOCompletionThreads);
+ }
+
+ if (InterlockedCompareExchange(&Initialization, 1, 0) == 0)
+ {
+ Initialize();
+
+ BOOL helper_result = FALSE;
+ helper_result = SetMaxThreadsHelper(MaxWorkerThreads, MaxIOCompletionThreads);
+
+ Initialization = -1;
+ return helper_result;
+ }
+ else // someone else is initializing. Too late, return false
+ {
+ return FALSE;
+ }
+
+}
+
+BOOL ThreadpoolMgr::GetMaxThreads(DWORD* MaxWorkerThreads,
+ DWORD* MaxIOCompletionThreads)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ HRESULT hr = S_OK;
+
+ IHostThreadpoolManager *threadpoolProvider = CorHost2::GetHostThreadpoolManager();
+ if (threadpoolProvider) {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = threadpoolProvider->GetMaxThreads(MaxWorkerThreads);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (FAILED(hr))
+ {
+ SetLastHRError(hr);
+ return FALSE;
+ }
+ }
+
+ IHostIoCompletionManager *ioCompletionProvider = CorHost2::GetHostIoCompletionManager();
+ if (ioCompletionProvider) {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = ioCompletionProvider->GetMaxThreads(MaxIOCompletionThreads);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (FAILED(hr))
+ {
+ SetLastHRError(hr);
+ return FALSE;
+ }
+ }
+
+ if (threadpoolProvider && ioCompletionProvider) {
+ return TRUE;
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ if (!MaxWorkerThreads || !MaxIOCompletionThreads)
+ {
+ SetLastHRError(ERROR_INVALID_DATA);
+ return FALSE;
+ }
+
+ if (IsInitialized())
+ {
+ *MaxWorkerThreads = (DWORD)MaxLimitTotalWorkerThreads;
+ *MaxIOCompletionThreads = MaxLimitTotalCPThreads;
+ }
+ else
+ {
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), *MaxWorkerThreads = 1024);
+
+ //ThreadPool_CPUGroup
+ CPUGroupInfo::EnsureInitialized();
+ if (CPUGroupInfo::CanEnableGCCPUGroups() && CPUGroupInfo::CanEnableThreadUseAllCpuGroups())
+ NumberOfProcessors = CPUGroupInfo::GetNumActiveProcessors();
+ else
+ NumberOfProcessors = GetCurrentProcessCpuCount();
+ DWORD min = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ThreadPool_ForceMinWorkerThreads);
+ if (min == 0)
+ min = NumberOfProcessors;
+
+ DWORD forceMax = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ThreadPool_ForceMaxWorkerThreads);
+ if (forceMax > 0)
+ {
+ *MaxWorkerThreads = forceMax;
+ }
+ else
+ {
+ *MaxWorkerThreads = GetDefaultMaxLimitWorkerThreads(min);
+ }
+
+ END_SO_INTOLERANT_CODE;
+
+ *MaxIOCompletionThreads = MaxLimitTotalCPThreads;
+ }
+ return TRUE;
+}
+
+BOOL ThreadpoolMgr::SetMinThreads(DWORD MinWorkerThreads,
+ DWORD MinIOCompletionThreads)
+{
+ CONTRACTL
+ {
+ THROWS; // Crst can throw and toggle GC mode
+ MODE_ANY;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ HRESULT hr = S_OK;
+
+ IHostThreadpoolManager *threadpoolProvider = CorHost2::GetHostThreadpoolManager();
+ if (threadpoolProvider) {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = threadpoolProvider->SetMinThreads(MinWorkerThreads);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (FAILED(hr))
+ {
+ SetLastHRError(hr);
+ return FALSE;
+ }
+ }
+
+ IHostIoCompletionManager *ioCompletionProvider = CorHost2::GetHostIoCompletionManager();
+ if (ioCompletionProvider) {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = ioCompletionProvider->SetMinThreads(MinIOCompletionThreads);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (FAILED(hr))
+ {
+ SetLastHRError(hr);
+ return FALSE;
+ }
+ }
+ if (threadpoolProvider && ioCompletionProvider) {
+ return TRUE;
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ if (!IsInitialized())
+ {
+ if (InterlockedCompareExchange(&Initialization, 1, 0) == 0)
+ {
+ Initialize();
+ Initialization = -1;
+ }
+ }
+
+ if (IsInitialized())
+ {
+ // doesn't need to be WorkerCS, but using it to avoid race condition between setting min and max, and didn't want to create a new CS.
+ CrstHolder csh(&WorkerCriticalSection);
+
+ BOOL init_result = false;
+
+ if (MinWorkerThreads >= 0 && MinIOCompletionThreads >= 0 &&
+ MinWorkerThreads <= (DWORD) MaxLimitTotalWorkerThreads &&
+ MinIOCompletionThreads <= (DWORD) MaxLimitTotalCPThreads)
+ {
+ BEGIN_SO_INTOLERANT_CODE(GetThread());
+
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ThreadPool_ForceMinWorkerThreads) == 0)
+ {
+ MinLimitTotalWorkerThreads = min(MinWorkerThreads, (DWORD)ThreadCounter::MaxPossibleCount);
+
+ ThreadCounter::Counts counts = WorkerCounter.GetCleanCounts();
+ while (counts.MaxWorking < MinLimitTotalWorkerThreads)
+ {
+ ThreadCounter::Counts newCounts = counts;
+ newCounts.MaxWorking = MinLimitTotalWorkerThreads;
+
+ ThreadCounter::Counts oldCounts = WorkerCounter.CompareExchangeCounts(newCounts, counts);
+ if (oldCounts == counts)
+ {
+ counts = newCounts;
+
+ // if we increased the limit, and there are pending workitems, we need
+ // to dispatch a thread to process the work.
+ if (newCounts.MaxWorking > oldCounts.MaxWorking &&
+ PerAppDomainTPCountList::AreRequestsPendingInAnyAppDomains())
+ {
+ MaybeAddWorkingWorker();
+ }
+ }
+ else
+ {
+ counts = oldCounts;
+ }
+ }
+ }
+
+ END_SO_INTOLERANT_CODE;
+
+ MinLimitTotalCPThreads = min(MinIOCompletionThreads, (DWORD)ThreadCounter::MaxPossibleCount);
+
+ init_result = TRUE;
+ }
+
+ return init_result;
+ }
+ // someone else is initializing. Too late, return false
+ return FALSE;
+
+}
+
+BOOL ThreadpoolMgr::GetMinThreads(DWORD* MinWorkerThreads,
+ DWORD* MinIOCompletionThreads)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ HRESULT hr = S_OK;
+
+ IHostThreadpoolManager *threadpoolProvider = CorHost2::GetHostThreadpoolManager();
+ if (threadpoolProvider) {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = threadpoolProvider->GetMinThreads(MinWorkerThreads);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (FAILED(hr))
+ {
+ SetLastHRError(hr);
+ return FALSE;
+ }
+ }
+
+ IHostIoCompletionManager *ioCompletionProvider = CorHost2::GetHostIoCompletionManager();
+ if (ioCompletionProvider) {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = ioCompletionProvider->GetMinThreads(MinIOCompletionThreads);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (FAILED(hr))
+ {
+ SetLastHRError(hr);
+ return FALSE;
+ }
+ }
+
+ if (threadpoolProvider && ioCompletionProvider) {
+ return TRUE;
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ if (!MinWorkerThreads || !MinIOCompletionThreads)
+ {
+ SetLastHRError(ERROR_INVALID_DATA);
+ return FALSE;
+ }
+
+ if (IsInitialized())
+ {
+ *MinWorkerThreads = (DWORD)MinLimitTotalWorkerThreads;
+ *MinIOCompletionThreads = MinLimitTotalCPThreads;
+ }
+ else
+ {
+ CPUGroupInfo::EnsureInitialized();
+ if (CPUGroupInfo::CanEnableGCCPUGroups() && CPUGroupInfo::CanEnableThreadUseAllCpuGroups())
+ NumberOfProcessors = CPUGroupInfo::GetNumActiveProcessors();
+ else
+ NumberOfProcessors = GetCurrentProcessCpuCount();
+ DWORD forceMin;
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), forceMin=0);
+ forceMin = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ThreadPool_ForceMinWorkerThreads);
+ END_SO_INTOLERANT_CODE;
+ *MinWorkerThreads = forceMin > 0 ? forceMin : NumberOfProcessors;
+ *MinIOCompletionThreads = NumberOfProcessors;
+ }
+ return TRUE;
+}
+
+BOOL ThreadpoolMgr::GetAvailableThreads(DWORD* AvailableWorkerThreads,
+ DWORD* AvailableIOCompletionThreads)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (IsInitialized())
+ {
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ HRESULT hr = S_OK;
+
+ IHostThreadpoolManager *threadpoolProvider = CorHost2::GetHostThreadpoolManager();
+ if (threadpoolProvider) {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = threadpoolProvider->GetAvailableThreads(AvailableWorkerThreads);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (FAILED(hr))
+ {
+ SetLastHRError(hr);
+ return FALSE;
+ }
+ }
+
+ IHostIoCompletionManager *ioCompletionProvider = CorHost2::GetHostIoCompletionManager();
+ if (ioCompletionProvider) {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ hr = ioCompletionProvider->GetAvailableThreads(AvailableIOCompletionThreads);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (FAILED(hr))
+ {
+ SetLastHRError(hr);
+ return FALSE;
+ }
+ }
+
+ if (threadpoolProvider && ioCompletionProvider) {
+ return TRUE;
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ if (!AvailableWorkerThreads || !AvailableIOCompletionThreads)
+ {
+ SetLastHRError(ERROR_INVALID_DATA);
+ return FALSE;
+ }
+
+ ThreadCounter::Counts counts = WorkerCounter.GetCleanCounts();
+
+ if (MaxLimitTotalWorkerThreads < counts.NumActive)
+ *AvailableWorkerThreads = 0;
+ else
+ *AvailableWorkerThreads = MaxLimitTotalWorkerThreads - counts.NumWorking;
+
+ counts = CPThreadCounter.GetCleanCounts();
+ if (MaxLimitTotalCPThreads < counts.NumActive)
+ *AvailableIOCompletionThreads = counts.NumActive - counts.NumWorking;
+ else
+ *AvailableIOCompletionThreads = MaxLimitTotalCPThreads - counts.NumWorking;
+ }
+ else
+ {
+ GetMaxThreads(AvailableWorkerThreads,AvailableIOCompletionThreads);
+ }
+ return TRUE;
+}
+
+void QueueUserWorkItemHelp(LPTHREAD_START_ROUTINE Function, PVOID Context)
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_ANY;
+ /* Cannot use contract here because of SEH
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;*/
+
+ Function(Context);
+
+ Thread *pThread = GetThread();
+ if (pThread) {
+ if (pThread->IsAbortRequested())
+ pThread->EEResetAbort(Thread::TAR_ALL);
+ pThread->InternalReset(FALSE);
+ }
+}
+
+//
+// WorkingThreadCounts tracks the number of worker threads currently doing user work, and the maximum number of such threads
+// since the last time TakeMaxWorkingThreadCount was called. This information is for diagnostic purposes only,
+// and is tracked only if the CLR config value INTERNAL_ThreadPool_EnableWorkerTracking is non-zero (this feature is off
+// by default).
+//
+union WorkingThreadCounts
+{
+ struct
+ {
+ int currentWorking : 16;
+ int maxWorking : 16;
+ };
+
+ LONG asLong;
+};
+
+WorkingThreadCounts g_workingThreadCounts;
+
+//
+// If worker tracking is enabled (see above) then this is called immediately before and after a worker thread executes
+// each work item.
+//
+void ThreadpoolMgr::ReportThreadStatus(bool isWorking)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ _ASSERTE(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ThreadPool_EnableWorkerTracking));
+ while (true)
+ {
+ WorkingThreadCounts currentCounts, newCounts;
+ currentCounts.asLong = VolatileLoad(&g_workingThreadCounts.asLong);
+
+ newCounts = currentCounts;
+
+ if (isWorking)
+ newCounts.currentWorking++;
+
+ if (newCounts.currentWorking > newCounts.maxWorking)
+ newCounts.maxWorking = newCounts.currentWorking;
+
+ if (!isWorking)
+ newCounts.currentWorking--;
+
+ if (currentCounts.asLong == InterlockedCompareExchange(&g_workingThreadCounts.asLong, newCounts.asLong, currentCounts.asLong))
+ break;
+ }
+}
+
+//
+// Returns the max working count since the previous call to TakeMaxWorkingThreadCount, and resets WorkingThreadCounts.maxWorking.
+//
+int TakeMaxWorkingThreadCount()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ _ASSERTE(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ThreadPool_EnableWorkerTracking));
+ while (true)
+ {
+ WorkingThreadCounts currentCounts, newCounts;
+ currentCounts.asLong = VolatileLoad(&g_workingThreadCounts.asLong);
+
+ newCounts = currentCounts;
+ newCounts.maxWorking = 0;
+
+ if (currentCounts.asLong == InterlockedCompareExchange(&g_workingThreadCounts.asLong, newCounts.asLong, currentCounts.asLong))
+ {
+ // If we haven't updated the counts since the last call to TakeMaxWorkingThreadCount, then we never updated maxWorking.
+ // In that case, the number of working threads for the whole period since the last TakeMaxWorkingThreadCount is the
+ // current number of working threads.
+ return currentCounts.maxWorking == 0 ? currentCounts.currentWorking : currentCounts.maxWorking;
+ }
+ }
+}
+
+
+/************************************************************************/
+
+BOOL ThreadpoolMgr::QueueUserWorkItem(LPTHREAD_START_ROUTINE Function,
+ PVOID Context,
+ DWORD Flags,
+ BOOL UnmanagedTPRequest)
+{
+ CONTRACTL
+ {
+ THROWS; // EnsureInitialized, EnqueueWorkRequest can throw OOM
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ EnsureInitialized();
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ HRESULT hr = S_OK;
+
+ IHostThreadpoolManager *provider = CorHost2::GetHostThreadpoolManager();
+ if (provider) {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+
+ if(UnmanagedTPRequest)
+ {
+ hr = provider->QueueUserWorkItem(Function, Context, Flags);
+ }
+ else
+ {
+ hr = provider->QueueUserWorkItem(ExecuteHostRequest, Context, Flags);
+ }
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (FAILED(hr))
+ {
+ SetLastHRError(hr);
+ return FALSE;
+ }
+ else
+ {
+ return TRUE;
+ }
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ if (Flags == CALL_OR_QUEUE)
+ {
+ // we've been asked to call this directly if the thread pressure is not too high
+
+ int MinimumAvailableCPThreads = (NumberOfProcessors < 3) ? 3 : NumberOfProcessors;
+
+ ThreadCounter::Counts counts = CPThreadCounter.GetCleanCounts();
+ if ((MaxLimitTotalCPThreads - counts.NumActive) >= MinimumAvailableCPThreads )
+ {
+ ThreadLocaleHolder localeHolder;
+
+ QueueUserWorkItemHelp(Function, Context);
+ return TRUE;
+ }
+
+ }
+
+ if (UnmanagedTPRequest)
+ {
+ UnManagedPerAppDomainTPCount* pADTPCount;
+ pADTPCount = PerAppDomainTPCountList::GetUnmanagedTPCount();
+ pADTPCount->QueueUnmanagedWorkRequest(Function, Context);
+ }
+ else
+ {
+ // caller has already registered its TPCount; this call is just to adjust the thread count
+ }
+
+ return TRUE;
+}
+
+
+bool ThreadpoolMgr::ShouldWorkerKeepRunning()
+{
+ WRAPPER_NO_CONTRACT;
+ if (CLRThreadpoolHosted())
+ return true;
+
+ //
+ // Maybe this thread should retire now. Let's see.
+ //
+ bool shouldThisThreadKeepRunning = true;
+
+ // Dirty read is OK here; the worst that can happen is that we won't retire this time. In the
+ // case where we might retire, we have to succeed a CompareExchange, which will have the effect
+ // of validating this read.
+ ThreadCounter::Counts counts = WorkerCounter.DangerousGetDirtyCounts();
+ while (true)
+ {
+ if (counts.NumActive <= counts.MaxWorking)
+ {
+ shouldThisThreadKeepRunning = true;
+ break;
+ }
+
+ ThreadCounter::Counts newCounts = counts;
+ newCounts.NumWorking--;
+ newCounts.NumActive--;
+ newCounts.NumRetired++;
+
+ ThreadCounter::Counts oldCounts = WorkerCounter.CompareExchangeCounts(newCounts, counts);
+
+ if (oldCounts == counts)
+ {
+ shouldThisThreadKeepRunning = false;
+ break;
+ }
+
+ counts = oldCounts;
+ }
+
+ return shouldThisThreadKeepRunning;
+}
+
+DangerousNonHostedSpinLock ThreadpoolMgr::ThreadAdjustmentLock;
+
+
+//
+// This method must only be called if ShouldAdjustMaxWorkersActive has returned true, *and*
+// ThreadAdjustmentLock is held.
+//
+void ThreadpoolMgr::AdjustMaxWorkersActive()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ if (GetThread()) { GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!CLRThreadpoolHosted());
+ _ASSERTE(ThreadAdjustmentLock.IsHeld());
+
+ DWORD currentTicks = GetTickCount();
+ LONG totalNumCompletions = Thread::GetTotalThreadPoolCompletionCount();
+ LONG numCompletions = totalNumCompletions - PriorCompletedWorkRequests;
+
+ LARGE_INTEGER startTime = CurrentSampleStartTime;
+ LARGE_INTEGER endTime;
+ QueryPerformanceCounter(&endTime);
+
+ static LARGE_INTEGER freq;
+ if (freq.QuadPart == 0)
+ QueryPerformanceFrequency(&freq);
+
+ double elapsed = (double)(endTime.QuadPart - startTime.QuadPart) / freq.QuadPart;
+
+ //
+ // It's possible for the current sample to be reset while we're holding
+ // ThreadAdjustmentLock. This will result in a very short sample, possibly
+ // with completely bogus counts. We'll try to detect this by checking the sample
+ // interval; if it's very short, then we try again later.
+ //
+ if (elapsed*1000.0 >= (ThreadAdjustmentInterval/2))
+ {
+ ThreadCounter::Counts currentCounts = WorkerCounter.GetCleanCounts();
+
+ int newMax = HillClimbingInstance.Update(
+ currentCounts.MaxWorking,
+ elapsed,
+ numCompletions,
+ &ThreadAdjustmentInterval);
+
+ while (newMax != currentCounts.MaxWorking)
+ {
+ ThreadCounter::Counts newCounts = currentCounts;
+ newCounts.MaxWorking = newMax;
+
+ ThreadCounter::Counts oldCounts = WorkerCounter.CompareExchangeCounts(newCounts, currentCounts);
+ if (oldCounts == currentCounts)
+ {
+ //
+ // If we're increasing the max, inject a thread. If that thread finds work, it will inject
+ // another thread, etc., until nobody finds work or we reach the new maximum.
+ //
+ // If we're reducing the max, whichever threads notice this first will retire themselves.
+ //
+ if (newMax > oldCounts.MaxWorking)
+ MaybeAddWorkingWorker();
+
+ break;
+ }
+ else
+ {
+ // we failed - maybe try again
+ if (oldCounts.MaxWorking > currentCounts.MaxWorking &&
+ oldCounts.MaxWorking >= newMax)
+ {
+ // someone (probably the gate thread) increased the thread count more than
+ // we are about to do. Don't interfere.
+ break;
+ }
+
+ currentCounts = oldCounts;
+ }
+ }
+
+ PriorCompletedWorkRequests = totalNumCompletions;
+ PriorCompletedWorkRequestsTime = currentTicks;
+ NextCompletedWorkRequestsTime = PriorCompletedWorkRequestsTime + ThreadAdjustmentInterval;
+ CurrentSampleStartTime = endTime;
+ }
+}
+
+
+void ThreadpoolMgr::MaybeAddWorkingWorker()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ if (GetThread()) { GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!CLRThreadpoolHosted());
+
+ ThreadCounter::Counts counts = WorkerCounter.GetCleanCounts();
+ ThreadCounter::Counts newCounts;
+ while (true)
+ {
+ newCounts = counts;
+ newCounts.NumWorking = max(counts.NumWorking, min(counts.NumWorking + 1, counts.MaxWorking));
+ newCounts.NumActive = max(counts.NumActive, newCounts.NumWorking);
+ newCounts.NumRetired = max(0, counts.NumRetired - (newCounts.NumActive - counts.NumActive));
+
+ if (newCounts == counts)
+ return;
+
+ ThreadCounter::Counts oldCounts = WorkerCounter.CompareExchangeCounts(newCounts, counts);
+
+ if (oldCounts == counts)
+ break;
+
+ counts = oldCounts;
+ }
+
+ int toUnretire = counts.NumRetired - newCounts.NumRetired;
+ int toCreate = (newCounts.NumActive - counts.NumActive) - toUnretire;
+ int toRelease = (newCounts.NumWorking - counts.NumWorking) - (toUnretire + toCreate);
+
+ _ASSERTE(toUnretire >= 0);
+ _ASSERTE(toCreate >= 0);
+ _ASSERTE(toRelease >= 0);
+ _ASSERTE(toUnretire + toCreate + toRelease <= 1);
+
+ if (toUnretire > 0)
+ {
+ LONG previousCount;
+ INDEBUG(BOOL success =) RetiredWorkerSemaphore->Release((LONG)toUnretire, &previousCount);
+ _ASSERTE(success);
+ }
+
+ if (toRelease > 0)
+ WorkerSemaphore->Release(toRelease);
+
+ while (toCreate > 0)
+ {
+ if (CreateWorkerThread())
+ {
+ toCreate--;
+ }
+ else
+ {
+ //
+ // Uh-oh, we promised to create a new thread, but the creation failed. We have to renege on our
+ // promise. This may possibly result in no work getting done for a while, but the gate thread will
+ // eventually notice that no completions are happening and force the creation of a new thread.
+ // Of course, there's no guarantee *that* will work - but hopefully enough time will have passed
+ // to allow whoever's using all the memory right now to release some.
+ //
+ counts = WorkerCounter.GetCleanCounts();
+ while (true)
+ {
+ //
+ // If we said we would create a thread, we also said it would be working. So we need to
+ // decrement both NumWorking and NumActive by the number of threads we will no longer be creating.
+ //
+ newCounts = counts;
+ newCounts.NumWorking -= toCreate;
+ newCounts.NumActive -= toCreate;
+
+ ThreadCounter::Counts oldCounts = WorkerCounter.CompareExchangeCounts(newCounts, counts);
+
+ if (oldCounts == counts)
+ break;
+
+ counts = oldCounts;
+ }
+
+ toCreate = 0;
+ }
+ }
+}
+
+BOOL ThreadpoolMgr::PostQueuedCompletionStatus(LPOVERLAPPED lpOverlapped,
+ LPOVERLAPPED_COMPLETION_ROUTINE Function)
+{
+ CONTRACTL
+ {
+ THROWS; // EnsureInitialized can throw OOM
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_PAL
+ EnsureInitialized();
+
+ // if hosted then we need to queue to worker thread, since hosting API doesn't include this method
+ if (CLRIoCompletionHosted())
+ {
+ PostRequestHolder postRequest = MakePostRequest(Function, lpOverlapped);
+ if (postRequest)
+ {
+ // Will execute in the Default AppDomain
+ if (FALSE == QueueUserWorkItem(QUWIPostCompletion, postRequest, QUEUE_ONLY))
+ {
+ return FALSE;
+ }
+ else
+ {
+ postRequest.SuppressRelease();
+ return TRUE;
+ }
+ }
+ else
+ return FALSE;
+ }
+
+ _ASSERTE(GlobalCompletionPort != NULL);
+
+ if (!InitCompletionPortThreadpool)
+ InitCompletionPortThreadpool = TRUE;
+
+ GrowCompletionPortThreadpoolIfNeeded();
+
+ // In order to allow external ETW listeners to correlate activities that use our IO completion port
+ // as a dispatch mechanism, we have to ensure the runtime's calls to ::PostQueuedCompletionStatus
+ // and ::GetQueuedCompletionStatus are "annotated" with ETW events representing to operations
+ // performed.
+ // There are currently 4 codepaths that post to the GlobalCompletionPort:
+ // 1. and 2. - the Overlapped drainage events. Those are uninteresting to ETW listeners and
+ // currently call the global ::PostQueuedCompletionStatus directly.
+ // 3. the managed API ThreadPool.UnsafeQueueNativeOverlapped(), calling CorPostQueuedCompletionStatus()
+ // which already fires the ETW event as needed
+ // 4. the managed API ThreadPool.RegisterWaitForSingleObject which needs to fire the ETW event
+ // at the time the managed API is called (on the orignial user thread), and not when the ::PQCS
+ // is called (from the dedicated wait thread).
+ // If additional codepaths appear they need to either fire the ETW event before calling this or ensure
+ // we do not fire an unmatched "dequeue" event in ThreadpoolMgr::CompletionPortThreadStart
+ // The current possible values for Function:
+ // - CallbackForInitiateDrainageOfCompletionPortQueue and
+ // CallbackForContinueDrainageOfCompletionPortQueue for Drainage
+ // - BindIoCompletionCallbackStub for ThreadPool.UnsafeQueueNativeOverlapped
+ // - WaitIOCompletionCallback for ThreadPool.RegisterWaitForSingleObject
+
+ return ::PostQueuedCompletionStatus(GlobalCompletionPort,
+ 0,
+ (ULONG_PTR) Function,
+ lpOverlapped);
+#else
+ SetLastError(ERROR_CALL_NOT_IMPLEMENTED);
+ return FALSE;
+#endif // !FEATURE_PAL
+}
+
+
+void ThreadpoolMgr::WaitIOCompletionCallback(
+ DWORD dwErrorCode,
+ DWORD numBytesTransferred,
+ LPOVERLAPPED lpOverlapped)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (dwErrorCode == ERROR_SUCCESS)
+ DWORD ret = AsyncCallbackCompletion((PVOID)lpOverlapped);
+}
+
+#ifndef FEATURE_PAL
+// We need to make sure that the next jobs picked up by a completion port thread
+// is inserted into the queue after we start cleanup. The cleanup starts when a completion
+// port thread processes a special overlapped (overlappedForInitiateCleanup).
+// To do this, we loop through all completion port threads.
+// 1. If a thread is in cooperative mode, it is processing a job now, and the next job
+// it picks up will be after we start cleanup.
+// 2. A completion port thread may be waiting for a job, or is going to dispatch a job.
+// We can not distinguish these two. So we queue a dummy job to the queue after the starting
+// job.
+OVERLAPPED overlappedForInitiateCleanup;
+OVERLAPPED overlappedForContinueCleanup;
+#endif // !FEATURE_PAL
+
+Volatile<ULONG> g_fCompletionPortDrainNeeded = FALSE;
+
+VOID ThreadpoolMgr::CallbackForContinueDrainageOfCompletionPortQueue(
+ DWORD dwErrorCode,
+ DWORD dwNumberOfBytesTransfered,
+ LPOVERLAPPED lpOverlapped
+ )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_PAL
+ CounterHolder hldNumCPIT(&NumCPInfrastructureThreads);
+
+ // It is OK if this overlapped is from a previous round.
+ // We have started a new round. The next job picked by this thread is
+ // going to be after the marker.
+ Thread* pThread = GetThread();
+ if (pThread && !pThread->IsCompletionPortDrained())
+ {
+ pThread->MarkCompletionPortDrained();
+ }
+ if (g_fCompletionPortDrainNeeded)
+ {
+ ::PostQueuedCompletionStatus(GlobalCompletionPort,
+ 0,
+ (ULONG_PTR)CallbackForContinueDrainageOfCompletionPortQueue,
+ &overlappedForContinueCleanup);
+ // IO Completion port thread is LIFO queue. We want our special packet to be picked up by a different thread.
+ while (g_fCompletionPortDrainNeeded && pThread->IsCompletionPortDrained())
+ {
+ __SwitchToThread(100, CALLER_LIMITS_SPINNING);
+ }
+ }
+#endif // !FEATURE_PAL
+}
+
+
+VOID
+ThreadpoolMgr::CallbackForInitiateDrainageOfCompletionPortQueue(
+ DWORD dwErrorCode,
+ DWORD dwNumberOfBytesTransfered,
+ LPOVERLAPPED lpOverlapped
+ )
+{
+ #ifndef FEATURE_PAL
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ CounterHolder hldNumCPIT(&NumCPInfrastructureThreads);
+ {
+ ThreadStoreLockHolder tsl;
+ Thread *pThread = NULL;
+ while ((pThread = ThreadStore::GetAllThreadList(pThread, Thread::TS_CompletionPortThread, Thread::TS_CompletionPortThread)) != NULL)
+ {
+ pThread->UnmarkCompletionPortDrained();
+ }
+ }
+
+ FastInterlockOr(&g_fCompletionPortDrainNeeded, 1);
+
+ // Wake up retiring CP Threads so it can mark its status.
+ ThreadCounter::Counts counts = CPThreadCounter.GetCleanCounts();
+ if (counts.NumRetired > 0)
+ RetiredCPWakeupEvent->Set();
+
+ DWORD nTry = 0;
+ BOOL fTryNextTime = FALSE;
+ BOOL fMore = TRUE;
+ BOOL fFirstTime = TRUE;
+ while (fMore)
+ {
+ fMore = FALSE;
+ Thread *pCurThread = GetThread();
+ Thread *pThread = NULL;
+ {
+
+ ThreadStoreLockHolder tsl;
+
+ ::FlushProcessWriteBuffers();
+
+ while ((pThread = ThreadStore::GetAllThreadList(pThread, Thread::TS_CompletionPortThread, Thread::TS_CompletionPortThread)) != NULL)
+ {
+ if (pThread == pCurThread || pThread->IsDead() || pThread->IsCompletionPortDrained())
+ {
+ continue;
+ }
+
+ if (pThread->PreemptiveGCDisabledOther() || pThread->GetFrame() != FRAME_TOP)
+ {
+ // The thread is processing an IO job now. When it picks up next job, the job
+ // will be after the marker.
+ pThread->MarkCompletionPortDrained();
+ }
+ else
+ {
+ if (fFirstTime)
+ {
+ ::PostQueuedCompletionStatus(GlobalCompletionPort,
+ 0,
+ (ULONG_PTR)CallbackForContinueDrainageOfCompletionPortQueue,
+ &overlappedForContinueCleanup);
+ }
+ fMore = TRUE;
+ }
+ }
+ }
+ if (fMore)
+ {
+ __SwitchToThread(10, CALLER_LIMITS_SPINNING);
+ nTry ++;
+ if (nTry > 1000)
+ {
+ fTryNextTime = TRUE;
+ break;
+ }
+ }
+ fFirstTime = FALSE;
+ }
+
+ FastInterlockAnd(&g_fCompletionPortDrainNeeded, 0);
+ OverlappedDataObject::FinishCleanup(!fTryNextTime);
+#endif // !FEATURE_PAL
+}
+
+extern void WINAPI BindIoCompletionCallbackStub(DWORD ErrorCode,
+ DWORD numBytesTransferred,
+ LPOVERLAPPED lpOverlapped);
+
+void HostIOCompletionCallback(
+ DWORD ErrorCode,
+ DWORD numBytesTransferred,
+ LPOVERLAPPED lpOverlapped)
+{
+#ifndef FEATURE_PAL
+ if (lpOverlapped == &overlappedForInitiateCleanup)
+ {
+ ThreadpoolMgr::CallbackForInitiateDrainageOfCompletionPortQueue (
+ ErrorCode,
+ numBytesTransferred,
+ lpOverlapped);
+ }
+ else if (lpOverlapped == &overlappedForContinueCleanup)
+ {
+ ThreadpoolMgr::CallbackForContinueDrainageOfCompletionPortQueue(
+ ErrorCode,
+ numBytesTransferred,
+ lpOverlapped);
+ }
+ else
+ {
+ BindIoCompletionCallbackStub (
+ ErrorCode,
+ numBytesTransferred,
+ lpOverlapped);
+ }
+#endif // !FEATURE_PAL
+}
+
+BOOL ThreadpoolMgr::DrainCompletionPortQueue()
+{
+#ifndef FEATURE_PAL
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (GlobalCompletionPort == 0)
+ {
+ return FALSE;
+ }
+
+ return ::PostQueuedCompletionStatus(GlobalCompletionPort,
+ 0,
+ (ULONG_PTR)CallbackForInitiateDrainageOfCompletionPortQueue,
+ &overlappedForInitiateCleanup);
+#else
+ return FALSE;
+#endif // !FEATURE_PAL
+}
+
+
+DWORD __stdcall ThreadpoolMgr::QUWIPostCompletion(PVOID pArgs)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ PostRequest* postRequest = (PostRequest*) pArgs;
+
+ EX_TRY
+ {
+ (postRequest->Function)(postRequest->errorCode, postRequest->numBytesTransferred, postRequest->lpOverlapped);
+ }
+ EX_CATCH
+ {
+ RecycleMemory( postRequest, MEMTYPE_PostRequest );
+ if (!SwallowUnhandledExceptions())
+ EX_RETHROW;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ return ERROR_SUCCESS;
+
+}
+
+
+// This is either made by a worker thread or a CP thread
+// indicated by threadTypeStatus
+void ThreadpoolMgr::EnsureGateThreadRunning()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // The gate thread is only needed if the CLR is providing part of the ThreadPool implementation.
+ _ASSERTE(!CLRThreadpoolHosted() || !CLRIoCompletionHosted());
+
+ while (true)
+ {
+ switch (GateThreadStatus)
+ {
+ case GATE_THREAD_STATUS_REQUESTED:
+ //
+ // No action needed; the gate thread is running, and someone else has already registered a request
+ // for it to stay.
+ //
+ return;
+
+ case GATE_THREAD_STATUS_WAITING_FOR_REQUEST:
+ //
+ // Prevent the gate thread from exiting, if it hasn't already done so. If it has, we'll create it on the next iteration of
+ // this loop.
+ //
+ FastInterlockCompareExchange(&GateThreadStatus, GATE_THREAD_STATUS_REQUESTED, GATE_THREAD_STATUS_WAITING_FOR_REQUEST);
+ break;
+
+ case GATE_THREAD_STATUS_NOT_RUNNING:
+ //
+ // We need to create a new gate thread
+ //
+ if (FastInterlockCompareExchange(&GateThreadStatus, GATE_THREAD_STATUS_REQUESTED, GATE_THREAD_STATUS_NOT_RUNNING) == GATE_THREAD_STATUS_NOT_RUNNING)
+ {
+ if (!CreateGateThread())
+ {
+ //
+ // If we failed to create the gate thread, someone else will need to try again later.
+ //
+ GateThreadStatus = GATE_THREAD_STATUS_NOT_RUNNING;
+ }
+ return;
+ }
+ break;
+
+ default:
+ _ASSERTE(!"Invalid value of ThreadpoolMgr::GateThreadStatus");
+ }
+ }
+
+ return;
+}
+
+
+bool ThreadpoolMgr::ShouldGateThreadKeepRunning()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // The gate thread is only needed if the CLR is providing part of the ThreadPool implementation.
+ _ASSERTE(!CLRThreadpoolHosted() || !CLRIoCompletionHosted());
+
+ _ASSERTE(GateThreadStatus == GATE_THREAD_STATUS_WAITING_FOR_REQUEST ||
+ GateThreadStatus == GATE_THREAD_STATUS_REQUESTED);
+
+ //
+ // Switch to WAITING_FOR_REQUEST, and see if we had a request since the last check.
+ //
+ LONG previousStatus = FastInterlockExchange(&GateThreadStatus, GATE_THREAD_STATUS_WAITING_FOR_REQUEST);
+
+ if (previousStatus == GATE_THREAD_STATUS_WAITING_FOR_REQUEST)
+ {
+ //
+ // No recent requests for the gate thread. Check to see if we're still needed.
+ //
+
+ //
+ // Are there any free threads in the I/O completion pool? If there are, we don't need a gate thread.
+ // This implies that whenever we decrement NumFreeCPThreads to 0, we need to call EnsureGateThreadRunning().
+ //
+ ThreadCounter::Counts counts = CPThreadCounter.GetCleanCounts();
+ bool needGateThreadForCompletionPort =
+ InitCompletionPortThreadpool &&
+ (counts.NumActive - counts.NumWorking) <= 0;
+
+ //
+ // Are there any work requests in any worker queue? If so, we need a gate thread.
+ // This imples that whenever a work queue goes from empty to non-empty, we need to call EnsureGateThreadRunning().
+ //
+ bool needGateThreadForWorkerThreads =
+ !CLRThreadpoolHosted() &&
+ PerAppDomainTPCountList::AreRequestsPendingInAnyAppDomains();
+
+ //
+ // If worker tracking is enabled, we need to fire periodic ETW events with active worker counts. This is
+ // done by the gate thread.
+ // We don't have to do anything special with EnsureGateThreadRunning() here, because this is only needed
+ // once work has been added to the queue for the first time (which is covered above).
+ //
+ bool needGateThreadForWorkerTracking =
+ 0 != CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ThreadPool_EnableWorkerTracking);
+
+ if (!(needGateThreadForCompletionPort ||
+ needGateThreadForWorkerThreads ||
+ needGateThreadForWorkerTracking))
+ {
+ //
+ // It looks like we shouldn't be running. But another thread may now tell us to run. If so, they will set GateThreadStatus
+ // back to GATE_THREAD_STATUS_REQUESTED.
+ //
+ previousStatus = FastInterlockCompareExchange(&GateThreadStatus, GATE_THREAD_STATUS_NOT_RUNNING, GATE_THREAD_STATUS_WAITING_FOR_REQUEST);
+ if (previousStatus == GATE_THREAD_STATUS_WAITING_FOR_REQUEST)
+ return false;
+ }
+ }
+
+
+ _ASSERTE(GateThreadStatus == GATE_THREAD_STATUS_WAITING_FOR_REQUEST ||
+ GateThreadStatus == GATE_THREAD_STATUS_REQUESTED);
+ return true;
+}
+
+
+
+//************************************************************************
+void ThreadpoolMgr::EnqueueWorkRequest(WorkRequest* workRequest)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ AppendWorkRequest(workRequest);
+}
+
+WorkRequest* ThreadpoolMgr::DequeueWorkRequest()
+{
+ WorkRequest* entry = NULL;
+ CONTRACT(WorkRequest*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+
+ POSTCONDITION(CheckPointer(entry, NULL_OK));
+ } CONTRACT_END;
+
+ entry = RemoveWorkRequest();
+
+ RETURN entry;
+}
+
+DWORD WINAPI ThreadpoolMgr::ExecuteHostRequest(PVOID pArg)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ThreadLocaleHolder localeHolder;
+
+ bool foundWork, wasNotRecalled;
+ ExecuteWorkRequest(&foundWork, &wasNotRecalled);
+ return ERROR_SUCCESS;
+}
+
+void ThreadpoolMgr::ExecuteWorkRequest(bool* foundWork, bool* wasNotRecalled)
+{
+ CONTRACTL
+ {
+ THROWS; // QueueUserWorkItem can throw
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ IPerAppDomainTPCount* pAdCount;
+
+ LONG index = PerAppDomainTPCountList::GetAppDomainIndexForThreadpoolDispatch();
+
+ if (index == 0)
+ {
+ *foundWork = false;
+ *wasNotRecalled = true;
+ return;
+ }
+
+ if(IsThreadPoolHosted())
+ {
+ //Only managed callBacks go this route under hosts.
+ //Also, since if we came here, atleast one managed requests was
+ //created, and that means atleast one app domain exists.
+
+ if (index == -1)
+ {
+ index = 1;
+ }
+ }
+
+ if (index == -1)
+ {
+ pAdCount = PerAppDomainTPCountList::GetUnmanagedTPCount();
+ }
+ else
+ {
+
+ pAdCount = PerAppDomainTPCountList::GetPerAppdomainCount(TPIndex((DWORD)index));
+ _ASSERTE(pAdCount);
+ }
+
+ pAdCount->DispatchWorkItem(foundWork, wasNotRecalled);
+}
+
+//--------------------------------------------------------------------------
+//This function informs the thread scheduler that the first requests has been
+//queued on an appdomain, or it's the first unmanaged TP request.
+//Arguments:
+// UnmanagedTP: Indicates that the request arises from the unmanaged
+//part of Thread Pool.
+//Assumptions:
+// This function must be called under a per-appdomain lock or the
+//correct lock under unmanaged TP queue.
+//
+BOOL ThreadpoolMgr::SetAppDomainRequestsActive(BOOL UnmanagedTP)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ BOOL fShouldSignalEvent = FALSE;
+
+ IPerAppDomainTPCount* pAdCount;
+
+ if(UnmanagedTP)
+ {
+ pAdCount = PerAppDomainTPCountList::GetUnmanagedTPCount();
+ _ASSERTE(pAdCount);
+ }
+ else
+ {
+ Thread* pCurThread = GetThread();
+ _ASSERTE( pCurThread);
+
+ AppDomain* pAppDomain = pCurThread->GetDomain();
+ _ASSERTE(pAppDomain);
+
+ TPIndex tpindex = pAppDomain->GetTPIndex();
+ pAdCount = PerAppDomainTPCountList::GetPerAppdomainCount(tpindex);
+
+ _ASSERTE(pAdCount);
+ }
+
+ pAdCount->SetAppDomainRequestsActive();
+
+ return fShouldSignalEvent;
+}
+
+void ThreadpoolMgr::ClearAppDomainRequestsActive(BOOL UnmanagedTP, BOOL AdUnloading, LONG id)
+//--------------------------------------------------------------------------
+//This function informs the thread scheduler that the kast request has been
+//dequeued on an appdomain, or it's the last unmanaged TP request.
+//Arguments:
+// UnmanagedTP: Indicates that the request arises from the unmanaged
+//part of Thread Pool.
+// id: Indicates the id of the appdomain. The id is needed as this
+//function can be called (indirectly) from the appdomain unload thread from
+//unmanaged code to clear per-appdomain state during rude unload.
+//Assumptions:
+// This function must be called under a per-appdomain lock or the
+//correct lock under unmanaged TP queue.
+//
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ IPerAppDomainTPCount* pAdCount;
+
+ if(UnmanagedTP)
+ {
+ pAdCount = PerAppDomainTPCountList::GetUnmanagedTPCount();
+ _ASSERTE(pAdCount);
+ }
+ else
+ {
+ if (AdUnloading)
+ {
+ pAdCount = PerAppDomainTPCountList::GetPerAppdomainCount(TPIndex(id));
+ }
+ else
+ {
+ Thread* pCurThread = GetThread();
+ _ASSERTE( pCurThread);
+
+ AppDomain* pAppDomain = pCurThread->GetDomain();
+ _ASSERTE(pAppDomain);
+
+ TPIndex tpindex = pAppDomain->GetTPIndex();
+
+ pAdCount = PerAppDomainTPCountList::GetPerAppdomainCount(tpindex);
+ }
+
+ _ASSERTE(pAdCount);
+ }
+
+ pAdCount->ClearAppDomainRequestsActive();
+}
+
+
+// Remove a block from the appropriate recycleList and return.
+// If recycleList is empty, fall back to new.
+LPVOID ThreadpoolMgr::GetRecycledMemory(enum MemType memType)
+{
+ LPVOID result = NULL;
+ CONTRACT(LPVOID)
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ POSTCONDITION(CheckPointer(result));
+ } CONTRACT_END;
+
+ if(RecycledLists.IsInitialized())
+ {
+ RecycledListInfo& list = RecycledLists.GetRecycleMemoryInfo( memType );
+
+ result = list.Remove();
+ }
+
+ if(result == NULL)
+ {
+ switch (memType)
+ {
+ case MEMTYPE_DelegateInfo:
+ result = new DelegateInfo;
+ break;
+ case MEMTYPE_AsyncCallback:
+ result = new AsyncCallback;
+ break;
+ case MEMTYPE_WorkRequest:
+ result = new WorkRequest;
+ break;
+ case MEMTYPE_PostRequest:
+ result = new PostRequest;
+ break;
+ default:
+ _ASSERTE(!"Unknown Memtype");
+ result = NULL;
+ break;
+ }
+ }
+
+ RETURN result;
+}
+
+// Insert freed block in recycle list. If list is full, return to system heap
+void ThreadpoolMgr::RecycleMemory(LPVOID mem, enum MemType memType)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if(RecycledLists.IsInitialized())
+ {
+ RecycledListInfo& list = RecycledLists.GetRecycleMemoryInfo( memType );
+
+ if(list.CanInsert())
+ {
+ list.Insert( mem );
+ return;
+ }
+ }
+
+ switch (memType)
+ {
+ case MEMTYPE_DelegateInfo:
+ delete (DelegateInfo*) mem;
+ break;
+ case MEMTYPE_AsyncCallback:
+ delete (AsyncCallback*) mem;
+ break;
+ case MEMTYPE_WorkRequest:
+ delete (WorkRequest*) mem;
+ break;
+ case MEMTYPE_PostRequest:
+ delete (PostRequest*) mem;
+ break;
+ default:
+ _ASSERTE(!"Unknown Memtype");
+
+ }
+}
+
+#define THROTTLE_RATE 0.10 /* rate by which we increase the delay as number of threads increase */
+
+// This is to avoid the 64KB/1MB aliasing problem present on Pentium 4 processors,
+// which can significantly impact performance with HyperThreading enabled
+DWORD __stdcall ThreadpoolMgr::intermediateThreadProc(PVOID arg)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+ offset_counter++;
+ if (offset_counter * offset_multiplier > PAGE_SIZE)
+ offset_counter = 0;
+
+ (void)_alloca(offset_counter * offset_multiplier);
+
+ intermediateThreadParam* param = (intermediateThreadParam*)arg;
+
+ LPTHREAD_START_ROUTINE ThreadFcnPtr = param->lpThreadFunction;
+ PVOID args = param->lpArg;
+ delete param;
+
+ return ThreadFcnPtr(args);
+}
+
+Thread* ThreadpoolMgr::CreateUnimpersonatedThread(LPTHREAD_START_ROUTINE lpStartAddress, LPVOID lpArgs, BOOL *pIsCLRThread)
+{
+ STATIC_CONTRACT_NOTHROW;
+ if (GetThread()) { STATIC_CONTRACT_GC_TRIGGERS;} else {DISABLED(STATIC_CONTRACT_GC_NOTRIGGER);}
+ STATIC_CONTRACT_MODE_ANY;
+ /* cannot use contract because of SEH
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;*/
+
+ Thread* pThread = NULL;
+
+ if (g_fEEStarted) {
+ *pIsCLRThread = TRUE;
+ }
+ else
+ *pIsCLRThread = FALSE;
+ if (*pIsCLRThread) {
+ EX_TRY
+ {
+ pThread = SetupUnstartedThread();
+ }
+ EX_CATCH
+ {
+ pThread = NULL;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ if (pThread == NULL) {
+ return NULL;
+ }
+ }
+ DWORD threadId;
+ BOOL bOK = FALSE;
+ HANDLE threadHandle = NULL;
+
+ if (*pIsCLRThread) {
+ // CreateNewThread takes care of reverting any impersonation - so dont do anything here.
+ bOK = pThread->CreateNewThread(0, // default stack size
+ lpStartAddress,
+ lpArgs //arguments
+ );
+ }
+ else {
+#ifndef FEATURE_PAL
+ ThreadAffinityHolder affinityHolder(FALSE);
+ HandleHolder token;
+ BOOL bReverted = FALSE;
+ bOK = RevertIfImpersonated(&bReverted, &token, &affinityHolder);
+ if (bOK != TRUE)
+ return NULL;
+#endif // !FEATURE_PAL
+ NewHolder<intermediateThreadParam> lpThreadArgs(new (nothrow) intermediateThreadParam);
+ if (lpThreadArgs != NULL)
+ {
+ lpThreadArgs->lpThreadFunction = lpStartAddress;
+ lpThreadArgs->lpArg = lpArgs;
+ threadHandle = CreateThread(NULL, // security descriptor
+ 0, // default stack size
+ intermediateThreadProc,
+ lpThreadArgs, // arguments
+ CREATE_SUSPENDED,
+ &threadId);
+ if (threadHandle != NULL)
+ lpThreadArgs.SuppressRelease();
+ }
+#ifndef FEATURE_PAL
+ UndoRevert(bReverted, token);
+#endif // !FEATURE_PAL
+ }
+
+ if (*pIsCLRThread && !bOK)
+ {
+ pThread->DecExternalCount(FALSE);
+ pThread = NULL;
+ }
+
+ if (*pIsCLRThread) {
+ return pThread;
+ }
+ else
+ return (Thread*)threadHandle;
+}
+
+
+BOOL ThreadpoolMgr::CreateWorkerThread()
+{
+ CONTRACTL
+ {
+ if (GetThread()) { GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ NOTHROW;
+ MODE_ANY; // We may try to add a worker thread while queuing a work item thru an fcall
+ }
+ CONTRACTL_END;
+
+ Thread *pThread;
+ BOOL fIsCLRThread;
+ if ((pThread = CreateUnimpersonatedThread(WorkerThreadStart, NULL, &fIsCLRThread)) != NULL)
+ {
+ if (fIsCLRThread) {
+ pThread->ChooseThreadCPUGroupAffinity();
+ pThread->StartThread();
+ }
+ else {
+ DWORD status;
+ status = ResumeThread((HANDLE)pThread);
+ _ASSERTE(status != (DWORD) (-1));
+ CloseHandle((HANDLE)pThread); // we don't need this anymore
+ }
+
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+
+DWORD __stdcall ThreadpoolMgr::WorkerThreadStart(LPVOID lpArgs)
+{
+ ClrFlsSetThreadType (ThreadType_Threadpool_Worker);
+
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!CLRThreadpoolHosted());
+
+ Thread *pThread = NULL;
+ DWORD dwSwitchCount = 0;
+ BOOL fThreadInit = FALSE;
+
+ ThreadCounter::Counts counts, oldCounts, newCounts;
+ bool foundWork = true, wasNotRecalled = true;
+
+ counts = WorkerCounter.GetCleanCounts();
+ FireEtwThreadPoolWorkerThreadStart(counts.NumActive, counts.NumRetired, GetClrInstanceId());
+
+#ifdef FEATURE_COMINTEROP
+ BOOL fCoInited = FALSE;
+ // Threadpool threads should be initialized as MTA. If we are unable to do so,
+ // return failure.
+ {
+ fCoInited = SUCCEEDED(::CoInitializeEx(NULL, COINIT_MULTITHREADED));
+ if (!fCoInited)
+ {
+ goto Exit;
+ }
+ }
+#endif // FEATURE_COMINTEROP
+Work:
+
+ if (!fThreadInit) {
+ if (g_fEEStarted) {
+ pThread = SetupThreadNoThrow();
+ if (pThread == NULL) {
+ __SwitchToThread(0, ++dwSwitchCount);
+ goto Work;
+ }
+
+ // converted to CLRThread and added to ThreadStore, pick an group affinity for this thread
+ pThread->ChooseThreadCPUGroupAffinity();
+
+ #ifdef FEATURE_COMINTEROP
+ if (pThread->SetApartment(Thread::AS_InMTA, TRUE) != Thread::AS_InMTA)
+ {
+ counts = WorkerCounter.GetCleanCounts();
+ while (true)
+ {
+ newCounts = counts;
+ newCounts.NumActive--;
+ newCounts.NumWorking--;
+ oldCounts = WorkerCounter.CompareExchangeCounts(newCounts, counts);
+ if (oldCounts == counts)
+ break;
+ counts = oldCounts;
+ }
+ goto Exit;
+ }
+ #endif // FEATURE_COMINTEROP
+
+ pThread->SetBackground(TRUE);
+ fThreadInit = TRUE;
+ }
+ }
+
+ GCX_PREEMP_NO_DTOR();
+ _ASSERTE(pThread == NULL || !pThread->PreemptiveGCDisabled());
+
+ // make sure there's really work. If not, go back to sleep
+
+ counts = WorkerCounter.GetCleanCounts();
+ while (true)
+ {
+ _ASSERTE(counts.NumActive > 0);
+ _ASSERTE(counts.NumWorking > 0);
+
+ newCounts = counts;
+
+ bool retired;
+
+ if (counts.NumActive > counts.MaxWorking)
+ {
+ newCounts.NumActive--;
+ newCounts.NumRetired++;
+ retired = true;
+ }
+ else
+ {
+ retired = false;
+
+ if (foundWork)
+ break;
+ }
+
+ newCounts.NumWorking--;
+
+ oldCounts = WorkerCounter.CompareExchangeCounts(newCounts, counts);
+
+ if (oldCounts == counts)
+ {
+ if (retired)
+ goto Retire;
+ else
+ goto WaitForWork;
+ }
+
+ counts = oldCounts;
+ }
+
+ if (GCHeap::IsGCInProgress(TRUE))
+ {
+ // GC is imminent, so wait until GC is complete before executing next request.
+ // this reduces in-flight objects allocated right before GC, easing the GC's work
+ GCHeap::WaitForGCCompletion(TRUE);
+ }
+
+ {
+ ThreadLocaleHolder localeHolder;
+
+ ThreadpoolMgr::UpdateLastDequeueTime();
+ ThreadpoolMgr::ExecuteWorkRequest(&foundWork, &wasNotRecalled);
+ }
+
+ if (foundWork)
+ {
+ // Reset TLS etc. for next WorkRequest.
+ if (pThread == NULL)
+ pThread = GetThread();
+
+ if (pThread)
+ {
+ if (pThread->IsAbortRequested())
+ pThread->EEResetAbort(Thread::TAR_ALL);
+ pThread->InternalReset(FALSE);
+ }
+ }
+
+ if (wasNotRecalled)
+ goto Work;
+
+Retire:
+
+ counts = WorkerCounter.GetCleanCounts();
+ FireEtwThreadPoolWorkerThreadRetirementStart(counts.NumActive, counts.NumRetired, GetClrInstanceId());
+
+ // It's possible that some work came in just before we decremented the active thread count, in which
+ // case whoever queued that work may be expecting us to pick it up - so they would not have signalled
+ // the worker semaphore. If there are other threads waiting, they will never be woken up, because
+ // whoever queued the work expects that it's already been picked up. The solution is to signal the semaphore
+ // if there's any work available.
+ if (PerAppDomainTPCountList::AreRequestsPendingInAnyAppDomains())
+ MaybeAddWorkingWorker();
+
+ while (true)
+ {
+RetryRetire:
+ DWORD result = RetiredWorkerSemaphore->Wait(AppX::IsAppXProcess() ? WorkerTimeoutAppX : WorkerTimeout, FALSE);
+ _ASSERTE(WAIT_OBJECT_0 == result || WAIT_TIMEOUT == result);
+
+ if (WAIT_OBJECT_0 == result)
+ {
+ foundWork = true;
+ counts = WorkerCounter.GetCleanCounts();
+ FireEtwThreadPoolWorkerThreadRetirementStop(counts.NumActive, counts.NumRetired, GetClrInstanceId());
+ goto Work;
+ }
+
+ if (!IsIoPending())
+ {
+ //
+ // We're going to exit. There's a nasty race here. We're about to decrement NumRetired,
+ // since we're going to exit. Once we've done that, nobody will expect this thread
+ // to be waiting for RetiredWorkerSemaphore. But between now and then, other threads still
+ // think we're waiting on the semaphore, and they will happily do the following to try to
+ // wake us up:
+ //
+ // 1) Decrement NumRetired
+ // 2) Increment NumActive
+ // 3) Increment NumWorking
+ // 4) Signal RetiredWorkerSemaphore
+ //
+ // We will not receive that signal. If we don't do something special here,
+ // we will decrement NumRetired an extra time, and leave the world thinking there
+ // are fewer retired threads, and more working threads than reality.
+ //
+ // What can we do about this? First, we *need* to decrement NumRetired. If someone did it before us,
+ // it might go negative. This is the easiest way to tell that we've encountered this race. In that case,
+ // we will simply not commit the decrement, swallow the signal that was sent, and proceed as if we
+ // got WAIT_OBJECT_0 in the wait above.
+ //
+ // If we don't hit zero while decrementing NumRetired, we still may have encountered this race. But
+ // if we don't hit zero, then there's another retired thread that will pick up this signal. So it's ok
+ // to exit.
+ //
+ counts = WorkerCounter.GetCleanCounts();
+ while (true)
+ {
+ if (counts.NumRetired == 0)
+ goto RetryRetire;
+
+ newCounts = counts;
+ newCounts.NumRetired--;
+
+ oldCounts = WorkerCounter.CompareExchangeCounts(newCounts, counts);
+ if (oldCounts == counts)
+ {
+ counts = newCounts;
+ break;
+ }
+ counts = oldCounts;
+ }
+
+ FireEtwThreadPoolWorkerThreadRetirementStop(counts.NumActive, counts.NumRetired, GetClrInstanceId());
+ goto Exit;
+ }
+ }
+
+WaitForWork:
+
+ // It's possible that we decided we had no work just before some work came in,
+ // but reduced the worker count *after* the work came in. In this case, we might
+ // miss the notification of available work. So we make a sweep through the ADs here,
+ // and wake up a thread (maybe this one!) if there is work to do.
+ if (PerAppDomainTPCountList::AreRequestsPendingInAnyAppDomains())
+ {
+ foundWork = true;
+ MaybeAddWorkingWorker();
+ }
+
+ FireEtwThreadPoolWorkerThreadWait(counts.NumActive, counts.NumRetired, GetClrInstanceId());
+
+RetryWaitForWork:
+ if (!WorkerSemaphore->Wait(AppX::IsAppXProcess() ? WorkerTimeoutAppX : WorkerTimeout))
+ {
+ if (!IsIoPending())
+ {
+ //
+ // We timed out, and are about to exit. This puts us in a very similar situation to the
+ // retirement case above - someone may think we're still waiting, and go ahead and:
+ //
+ // 1) Increment NumWorking
+ // 2) Signal WorkerSemaphore
+ //
+ // The solution is much like retirement; when we're decrementing NumActive, we need to make
+ // sure it doesn't drop below NumWorking. If it would, then we need to go back and wait
+ // again.
+ //
+
+ DangerousNonHostedSpinLockHolder tal(&ThreadAdjustmentLock);
+
+ counts = WorkerCounter.GetCleanCounts();
+ while (true)
+ {
+ if (counts.NumActive == counts.NumWorking)
+ {
+ goto RetryWaitForWork;
+ }
+
+ newCounts = counts;
+ newCounts.NumActive--;
+
+ // if we timed out while active, then Hill Climbing needs to be told that we need fewer threads
+ newCounts.MaxWorking = max(MinLimitTotalWorkerThreads, min(newCounts.NumActive, newCounts.MaxWorking));
+
+ oldCounts = WorkerCounter.CompareExchangeCounts(newCounts, counts);
+
+ if (oldCounts == counts)
+ {
+ HillClimbingInstance.ForceChange(newCounts.MaxWorking, ThreadTimedOut);
+ goto Exit;
+ }
+
+ counts = oldCounts;
+ }
+ }
+ else
+ {
+ goto RetryWaitForWork;
+ }
+ }
+ else
+ {
+ foundWork = true;
+ goto Work;
+ }
+
+Exit:
+
+#ifdef FEATURE_COMINTEROP
+ if (pThread) {
+ pThread->SetApartment(Thread::AS_Unknown, TRUE);
+ pThread->CoUninitialize();
+ }
+
+ // Couninit the worker thread
+ if (fCoInited)
+ {
+ CoUninitialize();
+ }
+#endif
+
+ if (pThread) {
+ pThread->ClearThreadCPUGroupAffinity();
+
+ DestroyThread(pThread);
+ }
+
+ _ASSERTE(!IsIoPending());
+
+ counts = WorkerCounter.GetCleanCounts();
+ FireEtwThreadPoolWorkerThreadStop(counts.NumActive, counts.NumRetired, GetClrInstanceId());
+
+ return ERROR_SUCCESS;
+}
+
+
+#ifdef _MSC_VER
+#pragma warning(default:4702)
+#endif
+BOOL ThreadpoolMgr::SuspendProcessing()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ BOOL shouldRetire = TRUE;
+ DWORD sleepInterval = SUSPEND_TIME;
+ int oldCpuUtilization = cpuUtilization;
+ for (int i = 0; i < shouldRetire; i++)
+ {
+ __SwitchToThread(sleepInterval, CALLER_LIMITS_SPINNING);
+ if ((cpuUtilization <= (oldCpuUtilization - 4)))
+ { // if cpu util. dips by 4% or more, then put it back in circulation
+ shouldRetire = FALSE;
+ break;
+ }
+ }
+
+ return shouldRetire;
+}
+
+
+// this should only be called by unmanaged thread (i.e. there should be no mgd
+// caller on the stack) since we are swallowing terminal exceptions
+DWORD ThreadpoolMgr::SafeWait(CLREvent * ev, DWORD sleepTime, BOOL alertable)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_PREEMPTIVE;
+ /* cannot use contract because of SEH
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;*/
+
+ DWORD status = WAIT_TIMEOUT;
+ EX_TRY
+ {
+ status = ev->Wait(sleepTime,FALSE);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+ return status;
+}
+
+/************************************************************************/
+
+BOOL ThreadpoolMgr::RegisterWaitForSingleObject(PHANDLE phNewWaitObject,
+ HANDLE hWaitObject,
+ WAITORTIMERCALLBACK Callback,
+ PVOID Context,
+ ULONG timeout,
+ DWORD dwFlag )
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ if (GetThread()) { GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+ EnsureInitialized();
+
+ ThreadCB* threadCB;
+ {
+ CrstHolder csh(&WaitThreadsCriticalSection);
+
+ threadCB = FindWaitThread();
+ }
+
+ *phNewWaitObject = NULL;
+
+ if (threadCB)
+ {
+ WaitInfo* waitInfo = new (nothrow) WaitInfo;
+
+ if (waitInfo == NULL)
+ return FALSE;
+
+ waitInfo->waitHandle = hWaitObject;
+ waitInfo->Callback = Callback;
+ waitInfo->Context = Context;
+ waitInfo->timeout = timeout;
+ waitInfo->flag = dwFlag;
+ waitInfo->threadCB = threadCB;
+ waitInfo->state = 0;
+ waitInfo->refCount = 1; // safe to do this since no wait has yet been queued, so no other thread could be modifying this
+ waitInfo->ExternalCompletionEvent = INVALID_HANDLE;
+ waitInfo->ExternalEventSafeHandle = NULL;
+ waitInfo->handleOwningAD = (ADID) 0;
+
+ waitInfo->timer.startTime = GetTickCount();
+ waitInfo->timer.remainingTime = timeout;
+
+ *phNewWaitObject = waitInfo;
+
+ // We fire the "enqueue" ETW event here, to "mark" the thread that had called the API, rather than the
+ // thread that will PostQueuedCompletionStatus (the dedicated WaitThread).
+ // This event correlates with ThreadPoolIODequeue in ThreadpoolMgr::AsyncCallbackCompletion
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, ThreadPoolIOEnqueue))
+ FireEtwThreadPoolIOEnqueue((LPOVERLAPPED)waitInfo, Callback, (dwFlag & WAIT_SINGLE_EXECUTION) == 0, GetClrInstanceId());
+
+ BOOL status = QueueUserAPC((PAPCFUNC)InsertNewWaitForSelf, threadCB->threadHandle, (size_t) waitInfo);
+
+ if (status == FALSE)
+ {
+ *phNewWaitObject = NULL;
+ delete waitInfo;
+ }
+
+ return status;
+ }
+
+ return FALSE;
+}
+
+
+// Returns a wait thread that can accomodate another wait request. The
+// caller is responsible for synchronizing access to the WaitThreadsHead
+ThreadpoolMgr::ThreadCB* ThreadpoolMgr::FindWaitThread()
+{
+ CONTRACTL
+ {
+ THROWS; // CreateWaitThread can throw
+ MODE_ANY;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+ do
+ {
+ for (LIST_ENTRY* Node = (LIST_ENTRY*) WaitThreadsHead.Flink ;
+ Node != &WaitThreadsHead ;
+ Node = (LIST_ENTRY*)Node->Flink)
+ {
+ _ASSERTE(offsetof(WaitThreadInfo,link) == 0);
+
+ ThreadCB* threadCB = ((WaitThreadInfo*) Node)->threadCB;
+
+ if (threadCB->NumWaitHandles < MAX_WAITHANDLES) // this test and following ...
+
+ {
+ InterlockedIncrement(&threadCB->NumWaitHandles); // ... increment are protected by WaitThreadsCriticalSection.
+ // but there might be a concurrent decrement in DeactivateWait
+ // or InsertNewWaitForSelf, hence the interlock
+ return threadCB;
+ }
+ }
+
+ // if reached here, there are no wait threads available, so need to create a new one
+ if (!CreateWaitThread())
+ return NULL;
+
+
+ // Now loop back
+ } while (TRUE);
+
+}
+
+BOOL ThreadpoolMgr::CreateWaitThread()
+{
+ CONTRACTL
+ {
+ THROWS; // CLREvent::CreateAutoEvent can throw OOM
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+ DWORD threadId;
+
+ if (g_fEEShutDown & ShutDown_Finalize2){
+ // The process is shutting down. Shutdown thread has ThreadStore lock,
+ // wait thread is blocked on the lock.
+ return FALSE;
+ }
+
+ NewHolder<WaitThreadInfo> waitThreadInfo(new (nothrow) WaitThreadInfo);
+ if (waitThreadInfo == NULL)
+ return FALSE;
+
+ NewHolder<ThreadCB> threadCB(new (nothrow) ThreadCB);
+
+ if (threadCB == NULL)
+ {
+ return FALSE;
+ }
+
+ threadCB->startEvent.CreateAutoEvent(FALSE);
+ HANDLE threadHandle = Thread::CreateUtilityThread(Thread::StackSize_Small, WaitThreadStart, (LPVOID)threadCB, CREATE_SUSPENDED, &threadId);
+
+ if (threadHandle == NULL)
+ {
+ threadCB->startEvent.CloseEvent();
+ return FALSE;
+ }
+
+ waitThreadInfo.SuppressRelease();
+ threadCB.SuppressRelease();
+ threadCB->threadHandle = threadHandle;
+ threadCB->threadId = threadId; // may be useful for debugging otherwise not used
+ threadCB->NumWaitHandles = 0;
+ threadCB->NumActiveWaits = 0;
+ for (int i=0; i< MAX_WAITHANDLES; i++)
+ {
+ InitializeListHead(&(threadCB->waitPointer[i]));
+ }
+
+ waitThreadInfo->threadCB = threadCB;
+
+ DWORD status = ResumeThread(threadHandle);
+
+ {
+ // We will QueueUserAPC on the newly created thread.
+ // Let us wait until the thread starts running.
+ GCX_PREEMP();
+ DWORD timeout=500;
+ while (TRUE) {
+ if (g_fEEShutDown & ShutDown_Finalize2){
+ // The process is shutting down. Shutdown thread has ThreadStore lock,
+ // wait thread is blocked on the lock.
+ return FALSE;
+ }
+ DWORD wait_status = threadCB->startEvent.Wait(timeout, FALSE);
+ if (wait_status == WAIT_OBJECT_0) {
+ break;
+ }
+ }
+ }
+ threadCB->startEvent.CloseEvent();
+
+ // check to see if setup succeeded
+ if (threadCB->threadHandle == NULL)
+ return FALSE;
+
+ InsertHeadList(&WaitThreadsHead,&waitThreadInfo->link);
+
+ _ASSERTE(status != (DWORD) (-1));
+
+ return (status != (DWORD) (-1));
+
+}
+
+// Executed as an APC on a WaitThread. Add the wait specified in pArg to the list of objects it is waiting on
+void ThreadpoolMgr::InsertNewWaitForSelf(WaitInfo* pArgs)
+{
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+ WaitInfo* waitInfo = pArgs;
+
+ // the following is safe since only this thread is allowed to change the state
+ if (!(waitInfo->state & WAIT_DELETE))
+ {
+ waitInfo->state = (WAIT_REGISTERED | WAIT_ACTIVE);
+ }
+ else
+ {
+ // some thread unregistered the wait
+ DeleteWait(waitInfo);
+ return;
+ }
+
+
+ ThreadCB* threadCB = waitInfo->threadCB;
+
+ _ASSERTE(threadCB->NumActiveWaits <= threadCB->NumWaitHandles);
+
+ int index = FindWaitIndex(threadCB, waitInfo->waitHandle);
+ _ASSERTE(index >= 0 && index <= threadCB->NumActiveWaits);
+
+ if (index == threadCB->NumActiveWaits)
+ {
+ threadCB->waitHandle[threadCB->NumActiveWaits] = waitInfo->waitHandle;
+ threadCB->NumActiveWaits++;
+ }
+ else
+ {
+ // this is a duplicate waithandle, so the increment in FindWaitThread
+ // wasn't strictly necessary. This will avoid unnecessary thread creation.
+ InterlockedDecrement(&threadCB->NumWaitHandles);
+ }
+
+ _ASSERTE(offsetof(WaitInfo, link) == 0);
+ InsertTailList(&(threadCB->waitPointer[index]), (&waitInfo->link));
+
+ return;
+}
+
+// returns the index of the entry that matches waitHandle or next free entry if not found
+int ThreadpoolMgr::FindWaitIndex(const ThreadCB* threadCB, const HANDLE waitHandle)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ for (int i=0;i<threadCB->NumActiveWaits; i++)
+ if (threadCB->waitHandle[i] == waitHandle)
+ return i;
+
+ // else not found
+ return threadCB->NumActiveWaits;
+}
+
+
+// if no wraparound that the timer is expired if duetime is less than current time
+// if wraparound occurred, then the timer expired if dueTime was greater than last time or dueTime is less equal to current time
+#define TimeExpired(last,now,duetime) (last <= now ? \
+ (duetime <= now && duetime >= last): \
+ (duetime >= last || duetime <= now))
+
+#define TimeInterval(end,start) ( end > start ? (end - start) : ((0xffffffff - start) + end + 1) )
+
+// Returns the minimum of the remaining time to reach a timeout among all the waits
+DWORD ThreadpoolMgr::MinimumRemainingWait(LIST_ENTRY* waitInfo, unsigned int numWaits)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ unsigned int min = (unsigned int) -1;
+ DWORD currentTime = GetTickCount();
+
+ for (unsigned i=0; i < numWaits ; i++)
+ {
+ WaitInfo* waitInfoPtr = (WaitInfo*) (waitInfo[i].Flink);
+ PVOID waitInfoHead = &(waitInfo[i]);
+ do
+ {
+ if (waitInfoPtr->timeout != INFINITE)
+ {
+ // compute remaining time
+ DWORD elapsedTime = TimeInterval(currentTime,waitInfoPtr->timer.startTime );
+
+ __int64 remainingTime = (__int64) (waitInfoPtr->timeout) - (__int64) elapsedTime;
+
+ // update remaining time
+ waitInfoPtr->timer.remainingTime = remainingTime > 0 ? (int) remainingTime : 0;
+
+ // ... and min
+ if (waitInfoPtr->timer.remainingTime < min)
+ min = waitInfoPtr->timer.remainingTime;
+ }
+
+ waitInfoPtr = (WaitInfo*) (waitInfoPtr->link.Flink);
+
+ } while ((PVOID) waitInfoPtr != waitInfoHead);
+
+ }
+ return min;
+}
+
+#ifdef _MSC_VER
+#ifdef _WIN64
+#pragma warning (disable : 4716)
+#else
+#pragma warning (disable : 4715)
+#endif
+#endif
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:22008) // "Prefast integer overflow check on (0 + lval) is bogus. Tried local disable without luck, doing whole method."
+#endif
+
+DWORD __stdcall ThreadpoolMgr::WaitThreadStart(LPVOID lpArgs)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ ClrFlsSetThreadType (ThreadType_Wait);
+
+ ThreadCB* threadCB = (ThreadCB*) lpArgs;
+ Thread* pThread = SetupThreadNoThrow();
+
+ if (pThread == NULL)
+ {
+ _ASSERTE(threadCB->threadHandle != NULL);
+ threadCB->threadHandle = NULL;
+ }
+
+ threadCB->startEvent.Set();
+
+ if (pThread == NULL)
+ {
+ return 0;
+ }
+
+ BEGIN_SO_INTOLERANT_CODE(pThread); // we probe at the top of the thread so we can safely call anything below here.
+ {
+ // wait threads never die. (Why?)
+ for (;;)
+ {
+ DWORD status;
+ DWORD timeout = 0;
+
+ if (threadCB->NumActiveWaits == 0)
+ {
+
+#undef SleepEx
+ // <TODO>@TODO Consider doing a sleep for an idle period and terminating the thread if no activity</TODO>
+ //We use SleepEx instead of CLRSLeepEx because CLRSleepEx calls into SQL(or other hosts) in hosted
+ //scenarios. SQL does not deliver APC's, and the waithread wait insertion/deletion logic depends on
+ //APC's being delivered.
+ status = SleepEx(INFINITE,TRUE);
+#define SleepEx(a,b) Dont_Use_SleepEx(a,b)
+
+ _ASSERTE(status == WAIT_IO_COMPLETION);
+ }
+ else if (IsWaitThreadAPCPending())
+ {
+ //Do a sleep if an APC is pending, This was done to solve the corner case where the wait is signaled,
+ //and APC to deregiter the wait never fires. That scenario leads to an infinite loop. This check would
+ //allow the thread to enter alertable wait and thus cause the APC to fire.
+
+ ResetWaitThreadAPCPending();
+
+ //We use SleepEx instead of CLRSLeepEx because CLRSleepEx calls into SQL(or other hosts) in hosted
+ //scenarios. SQL does not deliver APC's, and the waithread wait insertion/deletion logic depends on
+ //APC's being delivered.
+
+ #undef SleepEx
+ status = SleepEx(0,TRUE);
+ #define SleepEx(a,b) Dont_Use_SleepEx(a,b)
+
+ continue;
+ }
+ else
+ {
+ // compute minimum timeout. this call also updates the remainingTime field for each wait
+ timeout = MinimumRemainingWait(threadCB->waitPointer,threadCB->NumActiveWaits);
+
+ status = WaitForMultipleObjectsEx( threadCB->NumActiveWaits,
+ threadCB->waitHandle,
+ FALSE, // waitall
+ timeout,
+ TRUE ); // alertable
+
+ _ASSERTE( (status == WAIT_TIMEOUT) ||
+ (status == WAIT_IO_COMPLETION) ||
+ //It could be that there are no waiters at this point,
+ //as the APC to deregister the wait may have run.
+ (status == WAIT_OBJECT_0) ||
+ (status >= WAIT_OBJECT_0 && status < (DWORD)(WAIT_OBJECT_0 + threadCB->NumActiveWaits)) ||
+ (status == WAIT_FAILED));
+
+ //It could be that the last waiter also got deregistered.
+ if (threadCB->NumActiveWaits == 0)
+ {
+ continue;
+ }
+ }
+
+ if (status == WAIT_IO_COMPLETION)
+ continue;
+
+ if (status == WAIT_TIMEOUT)
+ {
+ for (int i=0; i< threadCB->NumActiveWaits; i++)
+ {
+ WaitInfo* waitInfo = (WaitInfo*) (threadCB->waitPointer[i]).Flink;
+ PVOID waitInfoHead = &(threadCB->waitPointer[i]);
+
+ do
+ {
+ _ASSERTE(waitInfo->timer.remainingTime >= timeout);
+
+ WaitInfo* wTemp = (WaitInfo*) waitInfo->link.Flink;
+
+ if (waitInfo->timer.remainingTime == timeout)
+ {
+ ProcessWaitCompletion(waitInfo,i,TRUE);
+ }
+
+ waitInfo = wTemp;
+
+ } while ((PVOID) waitInfo != waitInfoHead);
+ }
+ }
+ else if (status >= WAIT_OBJECT_0 && status < (DWORD)(WAIT_OBJECT_0 + threadCB->NumActiveWaits))
+ {
+ unsigned index = status - WAIT_OBJECT_0;
+ WaitInfo* waitInfo = (WaitInfo*) (threadCB->waitPointer[index]).Flink;
+ PVOID waitInfoHead = &(threadCB->waitPointer[index]);
+ BOOL isAutoReset;
+
+ // Setting to unconditional TRUE is inefficient since we will re-enter the wait and release
+ // the next waiter, but short of using undocumented NT apis is the only solution.
+ // Querying the state with a WaitForSingleObject is not an option as it will reset an
+ // auto reset event if it has been signalled since the previous wait.
+ isAutoReset = TRUE;
+
+ do
+ {
+ WaitInfo* wTemp = (WaitInfo*) waitInfo->link.Flink;
+ ProcessWaitCompletion(waitInfo,index,FALSE);
+
+ waitInfo = wTemp;
+
+ } while (((PVOID) waitInfo != waitInfoHead) && !isAutoReset);
+
+ // If an app registers a recurring wait for an event that is always signalled (!),
+ // then no apc's will be executed since the thread never enters the alertable state.
+ // This can be fixed by doing the following:
+ // SleepEx(0,TRUE);
+ // However, it causes an unnecessary context switch. It is not worth penalizing well
+ // behaved apps to protect poorly written apps.
+
+
+ }
+ else
+ {
+ _ASSERTE(status == WAIT_FAILED);
+ // wait failed: application error
+ // find out which wait handle caused the wait to fail
+ for (int i = 0; i < threadCB->NumActiveWaits; i++)
+ {
+ DWORD subRet = WaitForSingleObject(threadCB->waitHandle[i], 0);
+
+ if (subRet != WAIT_FAILED)
+ continue;
+
+ // remove all waits associated with this wait handle
+
+ WaitInfo* waitInfo = (WaitInfo*) (threadCB->waitPointer[i]).Flink;
+ PVOID waitInfoHead = &(threadCB->waitPointer[i]);
+
+ do
+ {
+ WaitInfo* temp = (WaitInfo*) waitInfo->link.Flink;
+
+ DeactivateNthWait(waitInfo,i);
+
+
+ // Note, we cannot cleanup here since there is no way to suppress finalization
+ // we will just leak, and rely on the finalizer to clean up the memory
+ //if (InterlockedDecrement(&waitInfo->refCount) == 0)
+ // DeleteWait(waitInfo);
+
+
+ waitInfo = temp;
+
+ } while ((PVOID) waitInfo != waitInfoHead);
+
+ break;
+ }
+ }
+ }
+ }
+ END_SO_INTOLERANT_CODE;
+
+ //This is unreachable...so no return required.
+}
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+#ifdef _MSC_VER
+#ifdef _WIN64
+#pragma warning (default : 4716)
+#else
+#pragma warning (default : 4715)
+#endif
+#endif
+
+void ThreadpoolMgr::ProcessWaitCompletion(WaitInfo* waitInfo,
+ unsigned index,
+ BOOL waitTimedOut
+ )
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS;
+ STATIC_CONTRACT_MODE_PREEMPTIVE;
+ /* cannot use contract because of SEH
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;*/
+
+ AsyncCallback* asyncCallback = NULL;
+ EX_TRY{
+ if ( waitInfo->flag & WAIT_SINGLE_EXECUTION)
+ {
+ DeactivateNthWait (waitInfo,index) ;
+ }
+ else
+ { // reactivate wait by resetting timer
+ waitInfo->timer.startTime = GetTickCount();
+ }
+
+ asyncCallback = MakeAsyncCallback();
+ if (asyncCallback)
+ {
+ asyncCallback->wait = waitInfo;
+ asyncCallback->waitTimedOut = waitTimedOut;
+
+ InterlockedIncrement(&waitInfo->refCount);
+
+#ifndef FEATURE_PAL
+ if (FALSE == PostQueuedCompletionStatus((LPOVERLAPPED)asyncCallback, (LPOVERLAPPED_COMPLETION_ROUTINE)WaitIOCompletionCallback))
+#else // FEATURE_PAL
+ if (FALSE == QueueUserWorkItem(AsyncCallbackCompletion, asyncCallback, QUEUE_ONLY))
+#endif // !FEATURE_PAL
+ ReleaseAsyncCallback(asyncCallback);
+ }
+ }
+ EX_CATCH {
+ if (asyncCallback)
+ ReleaseAsyncCallback(asyncCallback);
+
+ if (SwallowUnhandledExceptions())
+ {
+ // Do nothing to swallow the exception
+ }
+ else
+ {
+ EX_RETHROW;
+ }
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+
+DWORD __stdcall ThreadpoolMgr::AsyncCallbackCompletion(PVOID pArgs)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_PREEMPTIVE;
+ GC_TRIGGERS;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ Thread * pThread = GetThread();
+
+ if (pThread == NULL)
+ {
+ HRESULT hr = ERROR_SUCCESS;
+
+ ClrFlsSetThreadType(ThreadType_Threadpool_Worker);
+ pThread = SetupThreadNoThrow(&hr);
+
+ if (pThread == NULL)
+ {
+ return hr;
+ }
+ }
+
+ BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, return ERROR_STACK_OVERFLOW);
+ {
+ AsyncCallback * asyncCallback = (AsyncCallback*) pArgs;
+
+ WaitInfo * waitInfo = asyncCallback->wait;
+
+ AsyncCallbackHolder asyncCBHolder;
+ asyncCBHolder.Assign(asyncCallback);
+
+ // We fire the "dequeue" ETW event here, before executing the user code, to enable correlation with
+ // the ThreadPoolIOEnqueue fired in ThreadpoolMgr::RegisterWaitForSingleObject
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, ThreadPoolIODequeue))
+ FireEtwThreadPoolIODequeue(waitInfo, waitInfo->Callback, GetClrInstanceId());
+
+ // the user callback can throw, the host must be prepared to handle it.
+ // SQL is ok, since they have a top-level SEH handler. However, there's
+ // no easy way to verify it
+
+ ((WAITORTIMERCALLBACKFUNC) waitInfo->Callback)
+ ( waitInfo->Context, asyncCallback->waitTimedOut != FALSE);
+ }
+ END_SO_INTOLERANT_CODE;
+
+ return ERROR_SUCCESS;
+}
+
+void ThreadpoolMgr::DeactivateWait(WaitInfo* waitInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ ThreadCB* threadCB = waitInfo->threadCB;
+ DWORD endIndex = threadCB->NumActiveWaits-1;
+ DWORD index;
+
+ for (index = 0; index <= endIndex; index++)
+ {
+ LIST_ENTRY* head = &(threadCB->waitPointer[index]);
+ LIST_ENTRY* current = head;
+ do {
+ if (current->Flink == (PVOID) waitInfo)
+ goto FOUND;
+
+ current = (LIST_ENTRY*) current->Flink;
+
+ } while (current != head);
+ }
+
+FOUND:
+ _ASSERTE(index <= endIndex);
+
+ DeactivateNthWait(waitInfo, index);
+}
+
+
+void ThreadpoolMgr::DeactivateNthWait(WaitInfo* waitInfo, DWORD index)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ ThreadCB* threadCB = waitInfo->threadCB;
+
+ if (waitInfo->link.Flink != waitInfo->link.Blink)
+ {
+ RemoveEntryList(&(waitInfo->link));
+ }
+ else
+ {
+
+ ULONG EndIndex = threadCB->NumActiveWaits -1;
+
+ // Move the remaining ActiveWaitArray left.
+
+ ShiftWaitArray( threadCB, index+1, index,EndIndex - index ) ;
+
+ // repair the blink and flink of the first and last elements in the list
+ for (unsigned int i = 0; i< EndIndex-index; i++)
+ {
+ WaitInfo* firstWaitInfo = (WaitInfo*) threadCB->waitPointer[index+i].Flink;
+ WaitInfo* lastWaitInfo = (WaitInfo*) threadCB->waitPointer[index+i].Blink;
+ firstWaitInfo->link.Blink = &(threadCB->waitPointer[index+i]);
+ lastWaitInfo->link.Flink = &(threadCB->waitPointer[index+i]);
+ }
+ // initialize the entry just freed
+ InitializeListHead(&(threadCB->waitPointer[EndIndex]));
+
+ threadCB->NumActiveWaits-- ;
+ InterlockedDecrement(&threadCB->NumWaitHandles);
+ }
+
+ waitInfo->state &= ~WAIT_ACTIVE ;
+
+}
+
+void ThreadpoolMgr::DeleteWait(WaitInfo* waitInfo)
+{
+ CONTRACTL
+ {
+ if (waitInfo->ExternalEventSafeHandle != NULL) { THROWS;} else { NOTHROW; }
+ MODE_ANY;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ }
+ CONTRACTL_END;
+
+ if(waitInfo->Context && (waitInfo->flag & WAIT_FREE_CONTEXT)) {
+ DelegateInfo* pDelegate = (DelegateInfo*) waitInfo->Context;
+
+ // Since the delegate release destroys a handle, we need to be in
+ // co-operative mode
+ {
+ GCX_COOP();
+ pDelegate->Release();
+ }
+
+ RecycleMemory( pDelegate, MEMTYPE_DelegateInfo );
+ }
+
+ if (waitInfo->flag & WAIT_INTERNAL_COMPLETION)
+ {
+ waitInfo->InternalCompletionEvent.Set();
+ return; // waitInfo will be deleted by the thread that's waiting on this event
+ }
+ else if (waitInfo->ExternalCompletionEvent != INVALID_HANDLE)
+ {
+ UnsafeSetEvent(waitInfo->ExternalCompletionEvent);
+ }
+ else if (waitInfo->ExternalEventSafeHandle != NULL)
+ {
+ // Release the safe handle and the GC handle holding it
+ ReleaseWaitInfo(waitInfo);
+ }
+
+ delete waitInfo;
+
+
+}
+
+
+
+/************************************************************************/
+BOOL ThreadpoolMgr::UnregisterWaitEx(HANDLE hWaitObject,HANDLE Event)
+{
+ CONTRACTL
+ {
+ THROWS; //NOTHROW;
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(IsInitialized()); // cannot call unregister before first registering
+
+ const BOOL Blocking = (Event == (HANDLE) -1);
+ WaitInfo* waitInfo = (WaitInfo*) hWaitObject;
+
+ if (!hWaitObject)
+ {
+ return FALSE;
+ }
+
+ // we do not allow callbacks to run in the wait thread, hence the assert
+ _ASSERTE(GetCurrentThreadId() != waitInfo->threadCB->threadId);
+
+
+ if (Blocking)
+ {
+ waitInfo->InternalCompletionEvent.CreateAutoEvent(FALSE);
+ waitInfo->flag |= WAIT_INTERNAL_COMPLETION;
+
+ }
+ else
+ {
+ waitInfo->ExternalCompletionEvent = (Event ? Event : INVALID_HANDLE);
+ _ASSERTE((waitInfo->flag & WAIT_INTERNAL_COMPLETION) == 0);
+ // we still want to block until the wait has been deactivated
+ waitInfo->PartialCompletionEvent.CreateAutoEvent(FALSE);
+ }
+
+ BOOL status = QueueDeregisterWait(waitInfo->threadCB->threadHandle, waitInfo);
+
+
+ if (status == 0)
+ {
+ STRESS_LOG1(LF_THREADPOOL, LL_ERROR, "Queue APC failed in UnregisterWaitEx %x", status);
+
+ if (Blocking)
+ waitInfo->InternalCompletionEvent.CloseEvent();
+ else
+ waitInfo->PartialCompletionEvent.CloseEvent();
+ return FALSE;
+ }
+
+ if (!Blocking)
+ {
+ waitInfo->PartialCompletionEvent.Wait(INFINITE,TRUE);
+ waitInfo->PartialCompletionEvent.CloseEvent();
+ // we cannot do DeleteWait in DeregisterWait, since the DeleteWait could happen before
+ // we close the event. So, the code has been moved here.
+ if (InterlockedDecrement(&waitInfo->refCount) == 0)
+ {
+ DeleteWait(waitInfo);
+ }
+ }
+
+ else // i.e. blocking
+ {
+ _ASSERTE(waitInfo->flag & WAIT_INTERNAL_COMPLETION);
+ _ASSERTE(waitInfo->ExternalEventSafeHandle == NULL);
+
+ waitInfo->InternalCompletionEvent.Wait(INFINITE,TRUE);
+ waitInfo->InternalCompletionEvent.CloseEvent();
+ delete waitInfo; // if WAIT_INTERNAL_COMPLETION is not set, waitInfo will be deleted in DeleteWait
+ }
+ return TRUE;
+}
+
+
+void ThreadpoolMgr::DeregisterWait(WaitInfo* pArgs)
+{
+
+ WRAPPER_NO_CONTRACT;
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+ WaitInfo* waitInfo = pArgs;
+
+ if ( ! (waitInfo->state & WAIT_REGISTERED) )
+ {
+ // set state to deleted, so that it does not get registered
+ waitInfo->state |= WAIT_DELETE ;
+
+ // since the wait has not even been registered, we dont need an interlock to decrease the RefCount
+ waitInfo->refCount--;
+
+ if (waitInfo->PartialCompletionEvent.IsValid())
+ {
+ waitInfo->PartialCompletionEvent.Set();
+ }
+ return;
+ }
+
+ if (waitInfo->state & WAIT_ACTIVE)
+ {
+ DeactivateWait(waitInfo);
+ }
+
+ if ( waitInfo->PartialCompletionEvent.IsValid())
+ {
+ waitInfo->PartialCompletionEvent.Set();
+ return; // we cannot delete the wait here since the PartialCompletionEvent
+ // may not have been closed yet. so, we return and rely on the waiter of PartialCompletionEvent
+ // to do the close
+ }
+
+ if (InterlockedDecrement(&waitInfo->refCount) == 0)
+ {
+ // After we suspend EE during shutdown, a thread may be blocked in WaitForEndOfShutdown in alertable state.
+ // We don't allow a thread reenter runtime while processing APC or pumping message.
+ if (!g_fSuspendOnShutdown )
+ {
+ DeleteWait(waitInfo);
+ }
+ }
+ return;
+}
+
+
+/* This gets called in a finalizer thread ONLY IF an app does not deregister the
+ the wait. Note that just because the registeredWaitHandle is collected by GC
+ does not mean it is safe to delete the wait. The refcount tells us when it is
+ safe.
+*/
+void ThreadpoolMgr::WaitHandleCleanup(HANDLE hWaitObject)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ WaitInfo* waitInfo = (WaitInfo*) hWaitObject;
+ _ASSERTE(waitInfo->refCount > 0);
+
+ DWORD result = QueueDeregisterWait(waitInfo->threadCB->threadHandle, waitInfo);
+
+ if (result == 0)
+ STRESS_LOG1(LF_THREADPOOL, LL_ERROR, "Queue APC failed in WaitHandleCleanup %x", result);
+
+}
+
+BOOL ThreadpoolMgr::CreateGateThread()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // The gate thread is only needed if the CLR is providing part of the ThreadPool implementation.
+ _ASSERTE(!CLRThreadpoolHosted() || !CLRIoCompletionHosted());
+
+ HANDLE threadHandle = Thread::CreateUtilityThread(Thread::StackSize_Small, GateThreadStart, NULL);
+
+ if (threadHandle)
+ {
+ CloseHandle(threadHandle); //we don't need this anymore
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+
+
+/************************************************************************/
+
+BOOL ThreadpoolMgr::BindIoCompletionCallback(HANDLE FileHandle,
+ LPOVERLAPPED_COMPLETION_ROUTINE Function,
+ ULONG Flags,
+ DWORD& errCode)
+{
+
+ CONTRACTL
+ {
+ THROWS; // EnsureInitialized can throw
+ if (GetThread()) { GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_PAL
+
+ errCode = S_OK;
+
+ EnsureInitialized();
+
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostIoCompletionManager *provider = CorHost2::GetHostIoCompletionManager();
+ if (provider) {
+ BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
+ errCode = provider->Bind(GlobalCompletionPort, FileHandle);
+ END_SO_TOLERANT_CODE_CALLING_HOST;
+ if (FAILED(errCode))
+ {
+ SetLastHRError(errCode);
+ return FALSE;
+ }
+ else
+ {
+ return TRUE;
+ }
+ }
+#endif // FEATURE_INCLUDE_ALL_INTERFACES
+
+ _ASSERTE(GlobalCompletionPort != NULL);
+
+ if (!InitCompletionPortThreadpool)
+ InitCompletionPortThreadpool = TRUE;
+
+ GrowCompletionPortThreadpoolIfNeeded();
+
+ HANDLE h = CreateIoCompletionPort(FileHandle,
+ GlobalCompletionPort,
+ (ULONG_PTR) Function,
+ NumberOfProcessors);
+ if (h == NULL)
+ {
+ errCode = GetLastError();
+ return FALSE;
+ }
+
+ _ASSERTE(h == GlobalCompletionPort);
+
+ return TRUE;
+#else // FEATURE_PAL
+ SetLastError(ERROR_CALL_NOT_IMPLEMENTED);
+ return FALSE;
+#endif // !FEATURE_PAL
+}
+
+#ifndef FEATURE_PAL
+BOOL ThreadpoolMgr::CreateCompletionPortThread(LPVOID lpArgs)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ if (GetThread()) { GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Thread *pThread;
+ BOOL fIsCLRThread;
+ if ((pThread = CreateUnimpersonatedThread(CompletionPortThreadStart, lpArgs, &fIsCLRThread)) != NULL)
+ {
+ LastCPThreadCreation = GetTickCount(); // record this for use by logic to spawn additional threads
+
+ if (fIsCLRThread) {
+ pThread->ChooseThreadCPUGroupAffinity();
+ pThread->StartThread();
+ }
+ else {
+ DWORD status;
+ status = ResumeThread((HANDLE)pThread);
+ _ASSERTE(status != (DWORD) (-1));
+ CloseHandle((HANDLE)pThread); // we don't need this anymore
+ }
+
+ ThreadCounter::Counts counts = CPThreadCounter.GetCleanCounts();
+ FireEtwIOThreadCreate_V1(counts.NumActive + counts.NumRetired, counts.NumRetired, GetClrInstanceId());
+
+ return TRUE;
+ }
+
+
+ return FALSE;
+}
+
+DWORD __stdcall ThreadpoolMgr::CompletionPortThreadStart(LPVOID lpArgs)
+{
+ ClrFlsSetThreadType (ThreadType_Threadpool_IOCompletion);
+
+ CONTRACTL
+ {
+ THROWS;
+ if (GetThread()) { MODE_PREEMPTIVE;} else { DISABLED(MODE_ANY);}
+ if (GetThread()) { GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE (!CLRIoCompletionHosted());
+
+ DWORD numBytes=0;
+ size_t key=0;
+
+ LPOVERLAPPED pOverlapped = NULL;
+ DWORD errorCode;
+ PIOCompletionContext context;
+ BOOL fIsCompletionContext;
+
+ const DWORD CP_THREAD_WAIT = AppX::IsAppXProcess() ? 5000 : 15000; /* milliseconds */
+
+ _ASSERTE(GlobalCompletionPort != NULL);
+
+ BOOL fThreadInit = FALSE;
+ Thread *pThread = NULL;
+
+ if (g_fEEStarted) {
+ pThread = SetupThreadNoThrow();
+ if (pThread == NULL) {
+ return 0;
+ }
+
+ // converted to CLRThread and added to ThreadStore, pick an group affinity for this thread
+ pThread->ChooseThreadCPUGroupAffinity();
+
+ fThreadInit = TRUE;
+ }
+
+#ifdef FEATURE_COMINTEROP
+ // Threadpool threads should be initialized as MTA. If we are unable to do so,
+ // return failure.
+ BOOL fCoInited = FALSE;
+ {
+ fCoInited = SUCCEEDED(::CoInitializeEx(NULL, COINIT_MULTITHREADED));
+ if (!fCoInited)
+ {
+ goto Exit;
+ }
+ }
+
+ if (pThread && pThread->SetApartment(Thread::AS_InMTA, TRUE) != Thread::AS_InMTA)
+ {
+ // @todo: should we log the failure
+ goto Exit;
+ }
+#endif // FEATURE_COMINTEROP
+
+ ThreadCounter::Counts oldCounts;
+ ThreadCounter::Counts newCounts;
+
+ DWORD cpThreadWait = CP_THREAD_WAIT;
+ for (;; )
+ {
+Top:
+ if (!fThreadInit) {
+ if (g_fEEStarted) {
+ pThread = SetupThreadNoThrow();
+ if (pThread == NULL) {
+ break;
+ }
+
+ // converted to CLRThread and added to ThreadStore, pick an group affinity for this thread
+ pThread->ChooseThreadCPUGroupAffinity();
+
+#ifdef FEATURE_COMINTEROP
+ if (pThread->SetApartment(Thread::AS_InMTA, TRUE) != Thread::AS_InMTA)
+ {
+ // @todo: should we log the failure
+ goto Exit;
+ }
+#endif // FEATURE_COMINTEROP
+
+ fThreadInit = TRUE;
+ }
+ }
+
+ GCX_PREEMP_NO_DTOR();
+
+ //
+ // We're about to wait on the IOCP; mark ourselves as no longer "working."
+ //
+ while (true)
+ {
+ ThreadCounter::Counts oldCounts = CPThreadCounter.DangerousGetDirtyCounts();
+ ThreadCounter::Counts newCounts = oldCounts;
+ newCounts.NumWorking--;
+
+ //
+ // If we've only got one thread left, it won't be allowed to exit, because we need to keep
+ // one thread listening for completions. So there's no point in having a timeout; it will
+ // only use power unnecessarily.
+ //
+ cpThreadWait = (newCounts.NumActive == 1) ? INFINITE : CP_THREAD_WAIT;
+
+ if (oldCounts == CPThreadCounter.CompareExchangeCounts(newCounts, oldCounts))
+ break;
+ }
+
+ errorCode = S_OK;
+
+ if (lpArgs == NULL)
+ {
+ CONTRACT_VIOLATION(ThrowsViolation);
+
+ if (g_fCompletionPortDrainNeeded && pThread)
+ {
+ // We have started draining completion port.
+ // The next job picked up by this thread is going to be after our special marker.
+ if (!pThread->IsCompletionPortDrained())
+ {
+ pThread->MarkCompletionPortDrained();
+ }
+ }
+
+ context = NULL;
+ fIsCompletionContext = FALSE;
+
+ if (pThread == NULL)
+ {
+ pThread = GetThread();
+ }
+
+ if (pThread)
+ {
+
+ context = (PIOCompletionContext) pThread->GetIOCompletionContext();
+
+ if (context->lpOverlapped != NULL)
+ {
+ errorCode = context->ErrorCode;
+ numBytes = context->numBytesTransferred;
+ pOverlapped = context->lpOverlapped;
+ key = context->key;
+
+ context->lpOverlapped = NULL;
+ fIsCompletionContext = TRUE;
+ }
+ }
+
+ if((context == NULL) || (!fIsCompletionContext))
+ {
+ _ASSERTE (context == NULL || context->lpOverlapped == NULL);
+
+ LeaveRuntimeHolder holder((size_t)GetQueuedCompletionStatus);
+
+ BOOL status = GetQueuedCompletionStatus(
+ GlobalCompletionPort,
+ &numBytes,
+ (PULONG_PTR)&key,
+ &pOverlapped,
+ cpThreadWait
+ );
+
+ if (status == 0)
+ errorCode = GetLastError();
+ }
+ }
+ else
+ {
+ QueuedStatus *CompletionStatus = (QueuedStatus*)lpArgs;
+ numBytes = CompletionStatus->numBytes;
+ key = (size_t)CompletionStatus->key;
+ pOverlapped = CompletionStatus->pOverlapped;
+ errorCode = CompletionStatus->errorCode;
+ delete CompletionStatus;
+ lpArgs = NULL; // one-time deal for initial CP packet
+ }
+
+ // We fire IODequeue events whether the IO completion was retrieved in the above call to
+ // GetQueuedCompletionStatus or during an earlier call (e.g. in GateThreadStart, and passed here in lpArgs,
+ // or in CompletionPortDispatchWorkWithinAppDomain, and passed here through StoreOverlappedInfoInThread)
+
+ // For the purposes of activity correlation we only fire ETW events here, if needed OR if not fired at a higher
+ // abstration level (e.g. ThreadpoolMgr::RegisterWaitForSingleObject)
+ // Note: we still fire the event for managed async IO, despite the fact we don't have a paired IOEnqueue event
+ // for this case. We do this to "mark" the end of the previous workitem. When we provide full support at the higher
+ // abstraction level for managed IO we can remove the IODequeues fired here
+ if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, ThreadPoolIODequeue)
+ && !AreEtwIOQueueEventsSpeciallyHandled((LPOVERLAPPED_COMPLETION_ROUTINE)key) && pOverlapped != NULL)
+ FireEtwThreadPoolIODequeue(pOverlapped, (BYTE*)pOverlapped - offsetof(OverlappedDataObject, Internal), GetClrInstanceId());
+
+ bool enterRetirement;
+
+ while (true)
+ {
+ //
+ // When we reach this point, this thread is "active" but not "working." Depending on the result of the call to GetQueuedCompletionStatus,
+ // and the state of the rest of the IOCP threads, we need to figure out whether to de-activate (exit) this thread, retire this thread,
+ // or transition to "working."
+ //
+ oldCounts = CPThreadCounter.GetCleanCounts();
+ newCounts = oldCounts;
+ enterRetirement = false;
+
+ if (errorCode == WAIT_TIMEOUT)
+ {
+ //
+ // We timed out, and are going to try to exit or retire.
+ //
+ newCounts.NumActive--;
+
+ //
+ // We need at least one free thread, or we have no way of knowing if completions are being queued.
+ //
+ if (newCounts.NumWorking == newCounts.NumActive)
+ {
+ newCounts = oldCounts;
+ newCounts.NumWorking++; //not really working, but we'll decremented it at the top
+ if (oldCounts == CPThreadCounter.CompareExchangeCounts(newCounts, oldCounts))
+ goto Top;
+ else
+ continue;
+ }
+
+ //
+ // We can't exit a thread that has pending I/O - we'll "retire" it instead.
+ //
+ if (IsIoPending())
+ {
+ enterRetirement = true;
+ newCounts.NumRetired++;
+ }
+ }
+ else
+ {
+ //
+ // We have work to do
+ //
+ newCounts.NumWorking++;
+ }
+
+ if (oldCounts == CPThreadCounter.CompareExchangeCounts(newCounts, oldCounts))
+ break;
+ }
+
+ if (errorCode == WAIT_TIMEOUT)
+ {
+ if (!enterRetirement)
+ {
+ goto Exit;
+ }
+ else
+ {
+ // now in "retired mode" waiting for pending io to complete
+ FireEtwIOThreadRetire_V1(newCounts.NumActive + newCounts.NumRetired, newCounts.NumRetired, GetClrInstanceId());
+
+ for (;;)
+ {
+#ifndef FEATURE_PAL
+ if (g_fCompletionPortDrainNeeded && pThread)
+ {
+ // The thread is not going to process IO job now.
+ if (!pThread->IsCompletionPortDrained())
+ {
+ pThread->MarkCompletionPortDrained();
+ }
+ }
+#endif // !FEATURE_PAL
+
+ DWORD status = SafeWait(RetiredCPWakeupEvent,CP_THREAD_PENDINGIO_WAIT,FALSE);
+ _ASSERTE(status == WAIT_TIMEOUT || status == WAIT_OBJECT_0);
+
+ if (status == WAIT_TIMEOUT)
+ {
+ if (IsIoPending())
+ {
+ continue;
+ }
+ else
+ {
+ // We can now exit; decrement the retired count.
+ while (true)
+ {
+ oldCounts = CPThreadCounter.GetCleanCounts();
+ newCounts = oldCounts;
+ newCounts.NumRetired--;
+ if (oldCounts == CPThreadCounter.CompareExchangeCounts(newCounts, oldCounts))
+ break;
+ }
+ goto Exit;
+ }
+ }
+ else
+ {
+ // put back into rotation -- we need a thread
+ while (true)
+ {
+ oldCounts = CPThreadCounter.GetCleanCounts();
+ newCounts = oldCounts;
+ newCounts.NumRetired--;
+ newCounts.NumActive++;
+ newCounts.NumWorking++; //we're not really working, but we'll decrement this before waiting for work.
+ if (oldCounts == CPThreadCounter.CompareExchangeCounts(newCounts, oldCounts))
+ break;
+ }
+ FireEtwIOThreadUnretire_V1(newCounts.NumActive + newCounts.NumRetired, newCounts.NumRetired, GetClrInstanceId());
+ goto Top;
+ }
+ }
+ }
+ }
+
+ // we should not reach this point unless we have work to do
+ _ASSERTE(errorCode != WAIT_TIMEOUT && !enterRetirement);
+
+ // if we have no more free threads, start the gate thread
+ if (newCounts.NumWorking >= newCounts.NumActive)
+ EnsureGateThreadRunning();
+
+
+ // We can not assert here. If stdin/stdout/stderr of child process are redirected based on
+ // async io, GetQueuedCompletionStatus returns when child process operates on its stdin/stdout/stderr.
+ // Parent process does not issue any ReadFile/WriteFile, and hence pOverlapped is going to be NULL.
+ //_ASSERTE(pOverlapped != NULL);
+
+ if (pOverlapped != NULL)
+ {
+ _ASSERTE(key != 0); // should be a valid function address
+
+ if (key != 0)
+ {
+ if (GCHeap::IsGCInProgress(TRUE))
+ {
+ //Indicate that this thread is free, and waiting on GC, not doing any user work.
+ //This helps in threads not getting injected when some threads have woken up from the
+ //GC event, and some have not.
+ while (true)
+ {
+ oldCounts = CPThreadCounter.GetCleanCounts();
+ newCounts = oldCounts;
+ newCounts.NumWorking--;
+ if (oldCounts == CPThreadCounter.CompareExchangeCounts(newCounts, oldCounts))
+ break;
+ }
+
+ // GC is imminent, so wait until GC is complete before executing next request.
+ // this reduces in-flight objects allocated right before GC, easing the GC's work
+ GCHeap::WaitForGCCompletion(TRUE);
+
+ while (true)
+ {
+ oldCounts = CPThreadCounter.GetCleanCounts();
+ newCounts = oldCounts;
+ newCounts.NumWorking++;
+ if (oldCounts == CPThreadCounter.CompareExchangeCounts(newCounts, oldCounts))
+ break;
+ }
+
+ if (newCounts.NumWorking >= newCounts.NumActive)
+ EnsureGateThreadRunning();
+ }
+ else
+ {
+ GrowCompletionPortThreadpoolIfNeeded();
+ }
+
+ {
+ CONTRACT_VIOLATION(ThrowsViolation);
+
+ ThreadLocaleHolder localeHolder;
+
+ ((LPOVERLAPPED_COMPLETION_ROUTINE) key)(errorCode, numBytes, pOverlapped);
+ }
+
+ if (pThread == NULL) {
+ pThread = GetThread();
+ }
+ if (pThread) {
+ if (pThread->IsAbortRequested())
+ pThread->EEResetAbort(Thread::TAR_ALL);
+ pThread->InternalReset(FALSE);
+ }
+ }
+ else
+ {
+ // Application bug - can't do much, just ignore it
+ }
+
+ }
+
+ } // for (;;)
+
+Exit:
+
+ oldCounts = CPThreadCounter.GetCleanCounts();
+
+ // we should never destroy or retire all IOCP threads, because then we won't have any threads to notice incoming completions.
+ _ASSERTE(oldCounts.NumActive > 0);
+
+ FireEtwIOThreadTerminate_V1(oldCounts.NumActive + oldCounts.NumRetired, oldCounts.NumRetired, GetClrInstanceId());
+
+#ifdef FEATURE_COMINTEROP
+ if (pThread) {
+ pThread->SetApartment(Thread::AS_Unknown, TRUE);
+ pThread->CoUninitialize();
+ }
+ // Couninit the worker thread
+ if (fCoInited)
+ {
+ CoUninitialize();
+ }
+#endif
+
+ if (pThread) {
+ pThread->ClearThreadCPUGroupAffinity();
+
+ DestroyThread(pThread);
+ }
+
+ return 0;
+}
+
+LPOVERLAPPED ThreadpoolMgr::CompletionPortDispatchWorkWithinAppDomain(
+ Thread* pThread,
+ DWORD* pErrorCode,
+ DWORD* pNumBytes,
+ size_t* pKey,
+ DWORD adid)
+//
+//This function is called just after dispatching the previous BindIO callback
+//to Managed code. This is a perf optimization to do a quick call to
+//GetQueuedCompletionStatus with a timeout of 0 ms. If there is work in the
+//same appdomain, dispatch it back immediately. If not stick it in a well known
+//place, and reenter the target domain. The timeout of zero is chosen so as to
+//not delay appdomain unloads.
+//
+{
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ LPOVERLAPPED lpOverlapped=NULL;
+
+ BOOL status=FALSE;
+ OVERLAPPEDDATAREF overlapped=NULL;
+ BOOL ManagedCallback=FALSE;
+
+ if (CLRIoCompletionHosted())
+ {
+ return NULL;
+ }
+
+ *pErrorCode = S_OK;
+
+ LeaveRuntimeHolder holder((size_t)GetQueuedCompletionStatus);
+
+
+ //Very Very Important!
+ //Do not change the timeout for GetQueuedCompletionStatus to a non-zero value.
+ //Selecting a non-zero value can cause the thread to block, and lead to expensive context switches.
+ //In real life scenarios, we have noticed a packet to be not availabe immediately, but very shortly
+ //(after few 100's of instructions), and falling back to the VM is good in that case as compared to
+ //taking a context switch. Changing the timeout to non-zero can lead to perf degrades, that are very
+ //hard to diagnose.
+
+ status = ::GetQueuedCompletionStatus(
+ GlobalCompletionPort,
+ pNumBytes,
+ (PULONG_PTR)pKey,
+ &lpOverlapped,
+ 0);
+
+ DWORD lastError = GetLastError();
+
+ if (status == 0)
+ {
+ if (lpOverlapped != NULL)
+ {
+ *pErrorCode = lastError;
+ }
+ else
+ {
+ return NULL;
+ }
+ }
+
+ if (((LPOVERLAPPED_COMPLETION_ROUTINE) *pKey) != BindIoCompletionCallbackStub)
+ {
+ //_ASSERTE(FALSE);
+ }
+ else
+ {
+ ManagedCallback = TRUE;
+ overlapped = ObjectToOVERLAPPEDDATAREF(OverlappedDataObject::GetOverlapped(lpOverlapped));
+ }
+
+ if (ManagedCallback && (overlapped->GetAppDomainId() == adid))
+ {
+ _ASSERTE(*pKey != 0); // should be a valid function address
+
+ if (*pKey ==0)
+ {
+ //Application Bug.
+ return NULL;
+ }
+
+ }
+ else
+ {
+ //Just retruned back from managed code, a Thread structure should exist.
+ _ASSERTE (pThread);
+
+ //Oops, this is an overlapped fom a different appdomain. STick it in
+ //the thread. We will process it later.
+
+ StoreOverlappedInfoInThread(pThread, *pErrorCode, *pNumBytes, *pKey, lpOverlapped);
+
+ lpOverlapped = NULL;
+ }
+
+#ifndef DACCESS_COMPILE
+ return lpOverlapped;
+#endif
+}
+
+void ThreadpoolMgr::StoreOverlappedInfoInThread(Thread* pThread, DWORD dwErrorCode, DWORD dwNumBytes, size_t key, LPOVERLAPPED lpOverlapped)
+{
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_ANY;
+ STATIC_CONTRACT_SO_TOLERANT;
+
+ _ASSERTE(pThread);
+
+ PIOCompletionContext context;
+
+ context = (PIOCompletionContext) pThread->GetIOCompletionContext();
+
+ _ASSERTE(context);
+
+ context->ErrorCode = dwErrorCode;
+ context->numBytesTransferred = dwNumBytes;
+ context->lpOverlapped = lpOverlapped;
+ context->key = key;
+}
+
+BOOL ThreadpoolMgr::ShouldGrowCompletionPortThreadpool(ThreadCounter::Counts counts)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_ANY;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ if (counts.NumWorking >= counts.NumActive
+ && NumCPInfrastructureThreads == 0
+ && (counts.NumActive == 0 || !GCHeap::IsGCInProgress(TRUE))
+ )
+ {
+ // adjust limit if neeeded
+ if (counts.NumRetired == 0)
+ {
+ if (counts.NumActive + counts.NumRetired < MaxLimitTotalCPThreads &&
+ (counts.NumActive < MinLimitTotalCPThreads || cpuUtilization < CpuUtilizationLow))
+ {
+ // add one more check to make sure that we haven't fired off a new
+ // thread since the last time time we checked the cpu utilization.
+ // However, don't bother if we haven't reached the MinLimit (2*number of cpus)
+ if ((counts.NumActive < MinLimitTotalCPThreads) ||
+ SufficientDelaySinceLastSample(LastCPThreadCreation,counts.NumActive))
+ {
+ return TRUE;
+ }
+ }
+ }
+
+ if (counts.NumRetired > 0)
+ return TRUE;
+ }
+ return FALSE;
+}
+
+void ThreadpoolMgr::GrowCompletionPortThreadpoolIfNeeded()
+{
+ CONTRACTL
+ {
+ if (GetThread()) { GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ NOTHROW;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ThreadCounter::Counts oldCounts, newCounts;
+ while (true)
+ {
+ oldCounts = CPThreadCounter.GetCleanCounts();
+ newCounts = oldCounts;
+
+ if(!ShouldGrowCompletionPortThreadpool(oldCounts))
+ {
+ break;
+ }
+ else
+ {
+ if (oldCounts.NumRetired > 0)
+ {
+ // wakeup retired thread instead
+ RetiredCPWakeupEvent->Set();
+ return;
+ }
+ else
+ {
+ // create a new thread. New IOCP threads start as "active" and "working"
+ newCounts.NumActive++;
+ newCounts.NumWorking++;
+ if (oldCounts == CPThreadCounter.CompareExchangeCounts(newCounts, oldCounts))
+ {
+ if (!CreateCompletionPortThread(NULL))
+ {
+ // if thread creation failed, we have to adjust the counts back down.
+ while (true)
+ {
+ oldCounts = CPThreadCounter.GetCleanCounts();
+ newCounts = oldCounts;
+ newCounts.NumActive--;
+ newCounts.NumWorking--;
+ if (oldCounts == CPThreadCounter.CompareExchangeCounts(newCounts, oldCounts))
+ break;
+ }
+ }
+ return;
+ }
+ }
+ }
+ }
+}
+#endif // !FEATURE_PAL
+
+// Returns true if there is pending io on the thread.
+BOOL ThreadpoolMgr::IsIoPending()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_PAL
+ int Status;
+ ULONG IsIoPending;
+
+ if (g_pufnNtQueryInformationThread)
+ {
+ Status =(int) (*g_pufnNtQueryInformationThread)(GetCurrentThread(),
+ ThreadIsIoPending,
+ &IsIoPending,
+ sizeof(IsIoPending),
+ NULL);
+
+
+ if ((Status < 0) || IsIoPending)
+ return TRUE;
+ else
+ return FALSE;
+ }
+ return TRUE;
+#else
+ return FALSE;
+#endif // !FEATURE_PAL
+}
+
+#ifndef FEATURE_PAL
+
+#ifdef _WIN64
+#pragma warning (disable : 4716)
+#else
+#pragma warning (disable : 4715)
+#endif
+
+int ThreadpoolMgr::GetCPUBusyTime_NT(PROCESS_CPU_INFORMATION* pOldInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ PROCESS_CPU_INFORMATION newUsage;
+ newUsage.idleTime.QuadPart = 0;
+ newUsage.kernelTime.QuadPart = 0;
+ newUsage.userTime.QuadPart = 0;
+
+ if (CPUGroupInfo::CanEnableGCCPUGroups() && CPUGroupInfo::CanEnableThreadUseAllCpuGroups())
+ {
+#if !defined(FEATURE_CORECLR)
+ FILETIME newIdleTime, newKernelTime, newUserTime;
+
+ CPUGroupInfo::GetSystemTimes(&newIdleTime, &newKernelTime, &newUserTime);
+ newUsage.idleTime.u.LowPart = newIdleTime.dwLowDateTime;
+ newUsage.idleTime.u.HighPart = newIdleTime.dwHighDateTime;
+ newUsage.kernelTime.u.LowPart = newKernelTime.dwLowDateTime;
+ newUsage.kernelTime.u.HighPart = newKernelTime.dwHighDateTime;
+ newUsage.userTime.u.LowPart = newUserTime.dwLowDateTime;
+ newUsage.userTime.u.HighPart = newUserTime.dwHighDateTime;
+#endif
+ }
+ else
+ {
+ (*g_pufnNtQuerySystemInformation)(SystemProcessorPerformanceInformation,
+ pOldInfo->usageBuffer,
+ pOldInfo->usageBufferSize,
+ NULL);
+
+ SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION* pInfoArray = pOldInfo->usageBuffer;
+ DWORD_PTR pmask = pOldInfo->affinityMask;
+
+ int proc_no = 0;
+ while (pmask)
+ {
+ if (pmask & 1)
+ { //should be good: 1CPU 28823 years, 256CPUs 100+years
+ newUsage.idleTime.QuadPart += pInfoArray[proc_no].IdleTime.QuadPart;
+ newUsage.kernelTime.QuadPart += pInfoArray[proc_no].KernelTime.QuadPart;
+ newUsage.userTime.QuadPart += pInfoArray[proc_no].UserTime.QuadPart;
+ }
+
+ pmask >>=1;
+ proc_no++;
+ }
+ }
+
+ __int64 cpuTotalTime, cpuBusyTime;
+
+ cpuTotalTime = (newUsage.userTime.QuadPart - pOldInfo->userTime.QuadPart) +
+ (newUsage.kernelTime.QuadPart - pOldInfo->kernelTime.QuadPart);
+ cpuBusyTime = cpuTotalTime -
+ (newUsage.idleTime.QuadPart - pOldInfo->idleTime.QuadPart);
+
+ // Preserve reading
+ pOldInfo->idleTime = newUsage.idleTime;
+ pOldInfo->kernelTime = newUsage.kernelTime;
+ pOldInfo->userTime = newUsage.userTime;
+
+ __int64 reading = 0;
+
+ if (cpuTotalTime > 0)
+ reading = ((cpuBusyTime * 100) / cpuTotalTime);
+
+ _ASSERTE(FitsIn<int>(reading));
+ return (int)reading;
+}
+
+#else // !FEATURE_PAL
+
+int ThreadpoolMgr::GetCPUBusyTime_NT(PAL_IOCP_CPU_INFORMATION* pOldInfo)
+{
+ return PAL_GetCPUBusyTime(pOldInfo);
+}
+
+#endif // !FEATURE_PAL
+
+//
+// A timer that ticks every GATE_THREAD_DELAY milliseconds.
+// On platforms that support it, we use a coalescable waitable timer object.
+// For other platforms, we use Sleep, via __SwitchToThread.
+//
+class GateThreadTimer
+{
+#ifndef FEATURE_PAL
+ HANDLE m_hTimer;
+
+public:
+ GateThreadTimer()
+ : m_hTimer(NULL)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ if (g_pufnCreateWaitableTimerEx && g_pufnSetWaitableTimerEx)
+ {
+ m_hTimer = g_pufnCreateWaitableTimerEx(NULL, NULL, 0, TIMER_ALL_ACCESS);
+ if (m_hTimer)
+ {
+ //
+ // Set the timer to fire GATE_THREAD_DELAY milliseconds from now, then every GATE_THREAD_DELAY milliseconds thereafter.
+ // We also set the tolerance to GET_THREAD_DELAY_TOLERANCE, allowing the OS to coalesce this timer.
+ //
+ LARGE_INTEGER dueTime;
+ dueTime.QuadPart = MILLI_TO_100NANO(-(LONGLONG)GATE_THREAD_DELAY); //negative value indicates relative time
+ if (!g_pufnSetWaitableTimerEx(m_hTimer, &dueTime, GATE_THREAD_DELAY, NULL, NULL, NULL, GATE_THREAD_DELAY_TOLERANCE))
+ {
+ CloseHandle(m_hTimer);
+ m_hTimer = NULL;
+ }
+ }
+ }
+ }
+
+ ~GateThreadTimer()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ if (m_hTimer)
+ {
+ CloseHandle(m_hTimer);
+ m_hTimer = NULL;
+ }
+ }
+
+#endif // !FEATURE_PAL
+
+public:
+ void Wait()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_PAL
+ if (m_hTimer)
+ WaitForSingleObject(m_hTimer, INFINITE);
+ else
+#endif // !FEATURE_PAL
+ __SwitchToThread(GATE_THREAD_DELAY, CALLER_LIMITS_SPINNING);
+ }
+};
+
+
+DWORD __stdcall ThreadpoolMgr::GateThreadStart(LPVOID lpArgs)
+{
+ ClrFlsSetThreadType (ThreadType_Gate);
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ // The gate thread is only needed if the CLR is providing part of the ThreadPool implementation.
+ _ASSERTE(!CLRThreadpoolHosted() || !CLRIoCompletionHosted());
+
+ _ASSERTE(GateThreadStatus == GATE_THREAD_STATUS_REQUESTED);
+
+ GateThreadTimer timer;
+
+ // TODO: do we need to do this?
+ timer.Wait(); // delay getting initial CPU reading
+
+#ifndef FEATURE_PAL
+ PROCESS_CPU_INFORMATION prevCPUInfo;
+
+ if (!g_pufnNtQuerySystemInformation)
+ {
+ _ASSERT(!"NtQuerySystemInformation API not available!");
+ return 0;
+ }
+
+ //GateThread can start before EESetup, so ensure CPU group information is initialized;
+ CPUGroupInfo::EnsureInitialized();
+
+ // initialize CPU usage information structure;
+ prevCPUInfo.idleTime.QuadPart = 0;
+ prevCPUInfo.kernelTime.QuadPart = 0;
+ prevCPUInfo.userTime.QuadPart = 0;
+
+ PREFIX_ASSUME(NumberOfProcessors < 65536);
+ prevCPUInfo.numberOfProcessors = NumberOfProcessors;
+
+ /* In following cases, affinity mask can be zero
+ * 1. hosted, the hosted process already uses multiple cpu groups.
+ * thus, during CLR initialization, GetCurrentProcessCpuCount() returns 64, and GC threads
+ * are created to fill up the initial CPU group. ==> use g_SystemInfo.dwNumberOfProcessors
+ * 2. GCCpuGroups=1, CLR creates GC threads for all processors in all CPU groups
+ * thus, the threadpool thread would use a whole CPU group (if Thread_UseAllCpuGroups is not set).
+ * ==> use g_SystemInfo.dwNumberOfProcessors.
+ * 3. !defined(FEATURE_PAL) but defined(FEATURE_CORESYSTEM), GetCurrentProcessCpuCount()
+ * returns g_SystemInfo.dwNumberOfProcessors ==> use g_SystemInfo.dwNumberOfProcessors;
+ * Other cases:
+ * 1. Normal case: the mask is all or a subset of all processors in a CPU group;
+ * 2. GCCpuGroups=1 && Thread_UseAllCpuGroups = 1, the mask is not used
+ */
+ prevCPUInfo.affinityMask = GetCurrentProcessCpuMask();
+ if (prevCPUInfo.affinityMask == 0)
+ { // create a mask that has g_SystemInfo.dwNumberOfProcessors;
+ DWORD_PTR mask = 0, maskpos = 1;
+ for (unsigned int i=0; i < g_SystemInfo.dwNumberOfProcessors; i++)
+ {
+ mask |= maskpos;
+ maskpos <<= 1;
+ }
+ prevCPUInfo.affinityMask = mask;
+ }
+
+ // in some cases GetCurrentProcessCpuCount() returns a number larger than
+ // g_SystemInfo.dwNumberOfProcessor when there are CPU groups, use the larger
+ // one to create buffer. This buffer must be cleared with 0's to get correct
+ // CPU usage statistics
+ int elementsNeeded = NumberOfProcessors > g_SystemInfo.dwNumberOfProcessors ?
+ NumberOfProcessors : g_SystemInfo.dwNumberOfProcessors;
+ if (!ClrSafeInt<int>::multiply(elementsNeeded, sizeof(SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION),
+ prevCPUInfo.usageBufferSize))
+ return 0;
+
+ prevCPUInfo.usageBuffer = (SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION *)alloca(prevCPUInfo.usageBufferSize);
+ if (prevCPUInfo.usageBuffer == NULL)
+ return 0;
+
+ memset((void *)prevCPUInfo.usageBuffer, 0, prevCPUInfo.usageBufferSize); //must clear it with 0s
+
+ GetCPUBusyTime_NT(&prevCPUInfo);
+#else // !FEATURE_PAL
+ PAL_IOCP_CPU_INFORMATION prevCPUInfo;
+ GetCPUBusyTime_NT(&prevCPUInfo); // ignore return value the first time
+#endif // !FEATURE_PAL
+
+ BOOL IgnoreNextSample = FALSE;
+
+ do
+ {
+ timer.Wait();
+
+ if(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ThreadPool_EnableWorkerTracking))
+ FireEtwThreadPoolWorkingThreadCount(TakeMaxWorkingThreadCount(), GetClrInstanceId());
+
+#ifdef DEBUGGING_SUPPORTED
+ // if we are stopped at a debug breakpoint, go back to sleep
+ if (CORDebuggerAttached() && g_pDebugInterface->IsStopped())
+ continue;
+#endif // DEBUGGING_SUPPORTED
+
+ if(g_IsPaused)
+ {
+ _ASSERTE(g_ClrResumeEvent.IsValid());
+ EX_TRY {
+ g_ClrResumeEvent.Wait(INFINITE, TRUE);
+ }
+ EX_CATCH {
+ // Assert on debug builds
+ _ASSERTE(FALSE);
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+
+ if (!GCHeap::IsGCInProgress(FALSE) )
+ {
+ if (IgnoreNextSample)
+ {
+ IgnoreNextSample = FALSE;
+ int cpuUtilizationTemp = GetCPUBusyTime_NT(&prevCPUInfo); // updates prevCPUInfo as side effect
+ // don't artificially drive down average if cpu is high
+ if (cpuUtilizationTemp <= CpuUtilizationLow)
+ cpuUtilization = CpuUtilizationLow + 1;
+ else
+ cpuUtilization = cpuUtilizationTemp;
+ }
+ else
+ {
+ cpuUtilization = GetCPUBusyTime_NT(&prevCPUInfo); // updates prevCPUInfo as side effect
+ }
+ }
+ else
+ {
+ int cpuUtilizationTemp = GetCPUBusyTime_NT(&prevCPUInfo); // updates prevCPUInfo as side effect
+ // don't artificially drive down average if cpu is high
+ if (cpuUtilizationTemp <= CpuUtilizationLow)
+ cpuUtilization = CpuUtilizationLow + 1;
+ else
+ cpuUtilization = cpuUtilizationTemp;
+ IgnoreNextSample = TRUE;
+ }
+
+#ifndef FEATURE_PAL
+ // don't mess with CP thread pool settings if not initialized yet
+ if (InitCompletionPortThreadpool)
+ {
+ _ASSERTE (!CLRIoCompletionHosted());
+
+ ThreadCounter::Counts oldCounts, newCounts;
+ oldCounts = CPThreadCounter.GetCleanCounts();
+
+ if (oldCounts.NumActive == oldCounts.NumWorking &&
+ oldCounts.NumRetired == 0 &&
+ oldCounts.NumActive < MaxLimitTotalCPThreads &&
+ !g_fCompletionPortDrainNeeded &&
+ NumCPInfrastructureThreads == 0 && // infrastructure threads count as "to be free as needed"
+ !GCHeap::IsGCInProgress(TRUE))
+
+ {
+ BOOL status;
+ DWORD numBytes;
+ size_t key;
+ LPOVERLAPPED pOverlapped;
+ DWORD errorCode;
+
+ errorCode = S_OK;
+
+ status = GetQueuedCompletionStatus(
+ GlobalCompletionPort,
+ &numBytes,
+ (PULONG_PTR)&key,
+ &pOverlapped,
+ 0 // immediate return
+ );
+
+ if (status == 0)
+ {
+ errorCode = GetLastError();
+ }
+
+ if(pOverlapped == &overlappedForContinueCleanup)
+ {
+ // if we picked up a "Continue Drainage" notification DO NOT create a new CP thread
+ }
+ else
+ if (errorCode != WAIT_TIMEOUT)
+ {
+ QueuedStatus *CompletionStatus = NULL;
+
+ // loop, retrying until memory is allocated. Under such conditions the gate
+ // thread is not useful anyway, so I feel comfortable with this behavior
+ do
+ {
+ // make sure to free mem later in thread
+ CompletionStatus = new (nothrow) QueuedStatus;
+ if (CompletionStatus == NULL)
+ {
+ __SwitchToThread(GATE_THREAD_DELAY, CALLER_LIMITS_SPINNING);
+ }
+ }
+ while (CompletionStatus == NULL);
+
+ CompletionStatus->numBytes = numBytes;
+ CompletionStatus->key = (PULONG_PTR)key;
+ CompletionStatus->pOverlapped = pOverlapped;
+ CompletionStatus->errorCode = errorCode;
+
+ // IOCP threads are created as "active" and "working"
+ while (true)
+ {
+ oldCounts = CPThreadCounter.GetCleanCounts();
+ newCounts = oldCounts;
+ newCounts.NumActive++;
+ newCounts.NumWorking++;
+ if (oldCounts == CPThreadCounter.CompareExchangeCounts(newCounts, oldCounts))
+ break;
+ }
+
+ // loop, retrying until thread is created.
+ while (!CreateCompletionPortThread((LPVOID)CompletionStatus))
+ {
+ __SwitchToThread(GATE_THREAD_DELAY, CALLER_LIMITS_SPINNING);
+ }
+ }
+ }
+ else if (cpuUtilization < CpuUtilizationLow)
+ {
+ // this could be an indication that threads might be getting blocked or there is no work
+ if (oldCounts.NumWorking == oldCounts.NumActive && // don't bump the limit if there are already free threads
+ oldCounts.NumRetired > 0)
+ {
+ RetiredCPWakeupEvent->Set();
+ }
+ }
+ }
+#endif // !FEATURE_PAL
+
+ if (!CLRThreadpoolHosted() &&
+ (0 == CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ThreadPool_DisableStarvationDetection)))
+ {
+ if (PerAppDomainTPCountList::AreRequestsPendingInAnyAppDomains() && SufficientDelaySinceLastDequeue())
+ {
+ DangerousNonHostedSpinLockHolder tal(&ThreadAdjustmentLock);
+
+ ThreadCounter::Counts counts = WorkerCounter.GetCleanCounts();
+ while (counts.NumActive < MaxLimitTotalWorkerThreads && //don't add a thread if we're at the max
+ counts.NumActive >= counts.MaxWorking) //don't add a thread if we're already in the process of adding threads
+ {
+ bool breakIntoDebugger = (0 != CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ThreadPool_DebugBreakOnWorkerStarvation));
+ if (breakIntoDebugger)
+ {
+ OutputDebugStringW(W("The CLR ThreadPool detected work queue starvation!"));
+ DebugBreak();
+ }
+
+ ThreadCounter::Counts newCounts = counts;
+ newCounts.MaxWorking = newCounts.NumActive + 1;
+
+ ThreadCounter::Counts oldCounts = WorkerCounter.CompareExchangeCounts(newCounts, counts);
+ if (oldCounts == counts)
+ {
+ HillClimbingInstance.ForceChange(newCounts.MaxWorking, Starvation);
+ MaybeAddWorkingWorker();
+ break;
+ }
+ else
+ {
+ counts = oldCounts;
+ }
+ }
+ }
+ }
+ }
+ while (ShouldGateThreadKeepRunning());
+
+ return 0;
+}
+
+// called by logic to spawn a new completion port thread.
+// return false if not enough time has elapsed since the last
+// time we sampled the cpu utilization.
+BOOL ThreadpoolMgr::SufficientDelaySinceLastSample(unsigned int LastThreadCreationTime,
+ unsigned NumThreads, // total number of threads of that type (worker or CP)
+ double throttleRate // the delay is increased by this percentage for each extra thread
+ )
+{
+ LIMITED_METHOD_CONTRACT;
+
+ unsigned dwCurrentTickCount = GetTickCount();
+
+ unsigned delaySinceLastThreadCreation = dwCurrentTickCount - LastThreadCreationTime;
+
+ unsigned minWaitBetweenThreadCreation = GATE_THREAD_DELAY;
+
+ if (throttleRate > 0.0)
+ {
+ _ASSERTE(throttleRate <= 1.0);
+
+ unsigned adjustedThreadCount = NumThreads > NumberOfProcessors ? (NumThreads - NumberOfProcessors) : 0;
+
+ minWaitBetweenThreadCreation = (unsigned) (GATE_THREAD_DELAY * pow((1.0 + throttleRate),(double)adjustedThreadCount));
+ }
+ // the amount of time to wait should grow up as the number of threads is increased
+
+ return (delaySinceLastThreadCreation > minWaitBetweenThreadCreation);
+
+}
+
+
+// called by logic to spawn new worker threads, return true if it's been too long
+// since the last dequeue operation - takes number of worker threads into account
+// in deciding "too long"
+BOOL ThreadpoolMgr::SufficientDelaySinceLastDequeue()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ #define DEQUEUE_DELAY_THRESHOLD (GATE_THREAD_DELAY * 2)
+
+ unsigned delay = GetTickCount() - LastDequeueTime;
+ unsigned tooLong;
+
+ if(cpuUtilization < CpuUtilizationLow)
+ {
+ tooLong = GATE_THREAD_DELAY;
+ }
+ else
+ {
+ ThreadCounter::Counts counts = WorkerCounter.GetCleanCounts();
+ unsigned numThreads = counts.MaxWorking;
+ tooLong = numThreads * DEQUEUE_DELAY_THRESHOLD;
+ }
+
+ return (delay > tooLong);
+
+}
+
+
+#ifdef _MSC_VER
+#ifdef _WIN64
+#pragma warning (default : 4716)
+#else
+#pragma warning (default : 4715)
+#endif
+#endif
+
+/************************************************************************/
+
+struct CreateTimerThreadParams {
+ CLREvent event;
+ BOOL setupSucceeded;
+};
+
+BOOL ThreadpoolMgr::CreateTimerQueueTimer(PHANDLE phNewTimer,
+ WAITORTIMERCALLBACK Callback,
+ PVOID Parameter,
+ DWORD DueTime,
+ DWORD Period,
+ ULONG Flag)
+{
+ CONTRACTL
+ {
+ THROWS; // EnsureInitialized, CreateAutoEvent can throw
+ if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);} // There can be calls thru ICorThreadpool
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ EnsureInitialized();
+
+ // For now we use just one timer thread. Consider using multiple timer threads if
+ // number of timers in the queue exceeds a certain threshold. The logic and code
+ // would be similar to the one for creating wait threads.
+ if (NULL == TimerThread)
+ {
+ CrstHolder csh(&TimerQueueCriticalSection);
+
+ // check again
+ if (NULL == TimerThread)
+ {
+ CreateTimerThreadParams params;
+ params.event.CreateAutoEvent(FALSE);
+ params.setupSucceeded = FALSE;
+
+ HANDLE TimerThreadHandle = Thread::CreateUtilityThread(Thread::StackSize_Small, TimerThreadStart, &params);
+
+ if (TimerThreadHandle == NULL)
+ {
+ params.event.CloseEvent();
+ ThrowOutOfMemory();
+ }
+
+ {
+ GCX_PREEMP();
+ for(;;)
+ {
+ // if a host throws because it couldnt allocate another thread,
+ // just retry the wait.
+ if (SafeWait(&params.event,INFINITE, FALSE) != WAIT_TIMEOUT)
+ break;
+ }
+ }
+ params.event.CloseEvent();
+
+ if (!params.setupSucceeded)
+ {
+ CloseHandle(TimerThreadHandle);
+ return FALSE;
+ }
+
+ TimerThread = TimerThreadHandle;
+ }
+
+ }
+
+
+ NewHolder<TimerInfo> timerInfoHolder;
+ TimerInfo * timerInfo = new (nothrow) TimerInfo;
+ *phNewTimer = (HANDLE) timerInfo;
+
+ if (NULL == timerInfo)
+ ThrowOutOfMemory();
+
+ timerInfoHolder.Assign(timerInfo);
+
+ timerInfo->FiringTime = DueTime;
+ timerInfo->Function = Callback;
+ timerInfo->Context = Parameter;
+ timerInfo->Period = Period;
+ timerInfo->state = 0;
+ timerInfo->flag = Flag;
+ timerInfo->ExternalCompletionEvent = INVALID_HANDLE;
+ timerInfo->ExternalEventSafeHandle = NULL;
+ timerInfo->handleOwningAD = (ADID) 0;
+
+ BOOL status = QueueUserAPC((PAPCFUNC)InsertNewTimer,TimerThread,(size_t)timerInfo);
+ if (FALSE == status)
+ {
+ return FALSE;
+ }
+
+ timerInfoHolder.SuppressRelease();
+ return TRUE;
+}
+
+#ifdef _MSC_VER
+#ifdef _WIN64
+#pragma warning (disable : 4716)
+#else
+#pragma warning (disable : 4715)
+#endif
+#endif
+DWORD __stdcall ThreadpoolMgr::TimerThreadStart(LPVOID p)
+{
+ ClrFlsSetThreadType (ThreadType_Timer);
+
+ STATIC_CONTRACT_THROWS;
+ STATIC_CONTRACT_GC_TRIGGERS; // due to SetApartment
+ STATIC_CONTRACT_MODE_PREEMPTIVE;
+ STATIC_CONTRACT_SO_INTOLERANT;
+ /* cannot use contract because of SEH
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;*/
+
+ CreateTimerThreadParams* params = (CreateTimerThreadParams*)p;
+
+ Thread* pThread = SetupThreadNoThrow();
+
+ params->setupSucceeded = (pThread == NULL) ? 0 : 1;
+ params->event.Set();
+
+ if (pThread == NULL)
+ return 0;
+
+ pTimerThread = pThread;
+ // Timer threads never die
+
+ LastTickCount = GetTickCount();
+
+#ifdef FEATURE_COMINTEROP
+ if (pThread->SetApartment(Thread::AS_InMTA, TRUE) != Thread::AS_InMTA)
+ {
+ // @todo: should we log the failure
+ goto Exit;
+ }
+#endif // FEATURE_COMINTEROP
+
+ for (;;)
+ {
+ // moved to its own function since EX_TRY consumes stack
+#ifdef _MSC_VER
+#pragma inline_depth (0) // the function containing EX_TRY can't be inlined here
+#endif
+ TimerThreadFire();
+#ifdef _MSC_VER
+#pragma inline_depth (20)
+#endif
+ }
+
+#ifdef FEATURE_COMINTEROP
+// unreachable code
+// if (pThread) {
+// pThread->SetApartment(Thread::AS_Unknown, TRUE);
+// }
+Exit:
+
+ // @todo: replace with host provided ExitThread
+ return 0;
+#endif
+}
+
+void ThreadpoolMgr::TimerThreadFire()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ EX_TRY {
+ DWORD timeout = FireTimers();
+
+#undef SleepEx
+ SleepEx(timeout, TRUE);
+#define SleepEx(a,b) Dont_Use_SleepEx(a,b)
+
+ // the thread could wake up either because an APC completed or the sleep timeout
+ // in both case, we need to sweep the timer queue, firing timers, and readjusting
+ // the next firing time
+
+ }
+ EX_CATCH {
+ // Assert on debug builds since a dead timer thread is a fatal error
+ _ASSERTE(FALSE);
+ if (SwallowUnhandledExceptions())
+ {
+ // Do nothing to swallow the exception
+ }
+ else
+ {
+ EX_RETHROW;
+ }
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+#ifdef _MSC_VER
+#ifdef _WIN64
+#pragma warning (default : 4716)
+#else
+#pragma warning (default : 4715)
+#endif
+#endif
+
+// Executed as an APC in timer thread
+void ThreadpoolMgr::InsertNewTimer(TimerInfo* pArg)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ STATIC_CONTRACT_SO_INTOLERANT;
+
+ _ASSERTE(pArg);
+ TimerInfo * timerInfo = pArg;
+
+ if (timerInfo->state & TIMER_DELETE)
+ { // timer was deleted before it could be registered
+ DeleteTimer(timerInfo);
+ return;
+ }
+
+ // set the firing time = current time + due time (note initially firing time = due time)
+ DWORD currentTime = GetTickCount();
+ if (timerInfo->FiringTime == (ULONG) -1)
+ {
+ timerInfo->state = TIMER_REGISTERED;
+ timerInfo->refCount = 1;
+
+ }
+ else
+ {
+ timerInfo->FiringTime += currentTime;
+
+ timerInfo->state = (TIMER_REGISTERED | TIMER_ACTIVE);
+ timerInfo->refCount = 1;
+
+ // insert the timer in the queue
+ InsertTailList(&TimerQueue,(&timerInfo->link));
+ }
+
+ return;
+}
+
+
+// executed by the Timer thread
+// sweeps through the list of timers, readjusting the firing times, queueing APCs for
+// those that have expired, and returns the next firing time interval
+DWORD ThreadpoolMgr::FireTimers()
+{
+ CONTRACTL
+ {
+ THROWS; // QueueUserWorkItem can throw
+ if (GetThread()) { GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ if (GetThread()) { MODE_PREEMPTIVE;} else { DISABLED(MODE_ANY);}
+ }
+ CONTRACTL_END;
+
+ DWORD currentTime = GetTickCount();
+ DWORD nextFiringInterval = (DWORD) -1;
+ TimerInfo* timerInfo = NULL;
+
+ EX_TRY
+ {
+ for (LIST_ENTRY* node = (LIST_ENTRY*) TimerQueue.Flink;
+ node != &TimerQueue;
+ )
+ {
+ timerInfo = (TimerInfo*) node;
+ node = (LIST_ENTRY*) node->Flink;
+
+ if (TimeExpired(LastTickCount, currentTime, timerInfo->FiringTime))
+ {
+ if (timerInfo->Period == 0 || timerInfo->Period == (ULONG) -1)
+ {
+ DeactivateTimer(timerInfo);
+ }
+
+ InterlockedIncrement(&timerInfo->refCount);
+
+ QueueUserWorkItem(AsyncTimerCallbackCompletion,
+ timerInfo,
+ QUEUE_ONLY /* TimerInfo take care of deleting*/);
+
+ timerInfo->FiringTime = currentTime+timerInfo->Period;
+
+ if ((timerInfo->Period != 0) && (timerInfo->Period != (ULONG) -1) && (nextFiringInterval > timerInfo->Period))
+ nextFiringInterval = timerInfo->Period;
+ }
+
+ else
+ {
+ DWORD firingInterval = TimeInterval(timerInfo->FiringTime,currentTime);
+ if (firingInterval < nextFiringInterval)
+ nextFiringInterval = firingInterval;
+ }
+ }
+ }
+ EX_CATCH
+ {
+ // If QueueUserWorkItem throws OOM, swallow the exception and retry on
+ // the next call to FireTimers(), otherwise retrhow.
+ Exception *ex = GET_EXCEPTION();
+ // undo the call to DeactivateTimer()
+ InterlockedDecrement(&timerInfo->refCount);
+ timerInfo->state = timerInfo->state & TIMER_ACTIVE;
+ InsertTailList(&TimerQueue, (&timerInfo->link));
+ if (ex->GetHR() != E_OUTOFMEMORY)
+ {
+ EX_RETHROW;
+ }
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ LastTickCount = currentTime;
+
+ return nextFiringInterval;
+}
+
+DWORD __stdcall ThreadpoolMgr::AsyncTimerCallbackCompletion(PVOID pArgs)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ Thread* pThread = GetThread();
+
+ if (pThread == NULL)
+ {
+ HRESULT hr = ERROR_SUCCESS;
+
+ ClrFlsSetThreadType(ThreadType_Threadpool_Worker);
+ pThread = SetupThreadNoThrow(&hr);
+
+ if (pThread == NULL)
+ {
+ return hr;
+ }
+ }
+
+ BEGIN_SO_INTOLERANT_CODE(pThread);
+ {
+ TimerInfo* timerInfo = (TimerInfo*) pArgs;
+ ((WAITORTIMERCALLBACKFUNC) timerInfo->Function) (timerInfo->Context, TRUE) ;
+
+ if (InterlockedDecrement(&timerInfo->refCount) == 0)
+ {
+ DeleteTimer(timerInfo);
+ }
+ }
+ END_SO_INTOLERANT_CODE;
+
+ return ERROR_SUCCESS;
+}
+
+
+// removes the timer from the timer queue, thereby cancelling it
+// there may still be pending callbacks that haven't completed
+void ThreadpoolMgr::DeactivateTimer(TimerInfo* timerInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ RemoveEntryList((LIST_ENTRY*) timerInfo);
+
+ // This timer info could go into another linked list of timer infos
+ // waiting to be released. Reinitialize the list pointers
+ InitializeListHead(&timerInfo->link);
+ timerInfo->state = timerInfo->state & ~TIMER_ACTIVE;
+}
+
+DWORD __stdcall ThreadpoolMgr::AsyncDeleteTimer(PVOID pArgs)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_PREEMPTIVE;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ Thread * pThread = GetThread();
+
+ if (pThread == NULL)
+ {
+ HRESULT hr = ERROR_SUCCESS;
+
+ ClrFlsSetThreadType(ThreadType_Threadpool_Worker);
+ pThread = SetupThreadNoThrow(&hr);
+
+ if (pThread == NULL)
+ {
+ return hr;
+ }
+ }
+
+ DeleteTimer((TimerInfo*) pArgs);
+
+ return ERROR_SUCCESS;
+}
+
+void ThreadpoolMgr::DeleteTimer(TimerInfo* timerInfo)
+{
+ CONTRACTL
+ {
+ if (GetThread() == pTimerThread) { NOTHROW; } else { THROWS; }
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE((timerInfo->state & TIMER_ACTIVE) == 0);
+
+ _ASSERTE(!(timerInfo->flag & WAIT_FREE_CONTEXT));
+
+ if (timerInfo->flag & WAIT_INTERNAL_COMPLETION)
+ {
+ timerInfo->InternalCompletionEvent.Set();
+ return; // the timerInfo will be deleted by the thread that's waiting on InternalCompletionEvent
+ }
+
+ // ExternalCompletionEvent comes from Host, ExternalEventSafeHandle from managed code.
+ // They are mutually exclusive.
+ _ASSERTE(!(timerInfo->ExternalCompletionEvent != INVALID_HANDLE &&
+ timerInfo->ExternalEventSafeHandle != NULL));
+
+ if (timerInfo->ExternalCompletionEvent != INVALID_HANDLE)
+ {
+ UnsafeSetEvent(timerInfo->ExternalCompletionEvent);
+ timerInfo->ExternalCompletionEvent = INVALID_HANDLE;
+ }
+
+ // We cannot block the timer thread, so some cleanup is deferred to other threads.
+ if (GetThread() == pTimerThread)
+ {
+ // Notify the ExternalEventSafeHandle with an user work item
+ if (timerInfo->ExternalEventSafeHandle != NULL)
+ {
+ BOOL success = FALSE;
+ EX_TRY
+ {
+ if (QueueUserWorkItem(AsyncDeleteTimer,
+ timerInfo,
+ QUEUE_ONLY) != FALSE)
+ {
+ success = TRUE;
+ }
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ // If unable to queue a user work item, fall back to queueing timer for release
+ // which will happen *sometime* in the future.
+ if (success == FALSE)
+ {
+ QueueTimerInfoForRelease(timerInfo);
+ }
+
+ return;
+ }
+
+ // Releasing GC handles can block. So we wont do this on the timer thread.
+ // We'll put it in a list which will be processed by a worker thread
+ if (timerInfo->Context != NULL)
+ {
+ QueueTimerInfoForRelease(timerInfo);
+ return;
+ }
+ }
+
+ // To get here we are either not the Timer thread or there is no blocking work to be done
+
+ if (timerInfo->Context != NULL)
+ {
+ GCX_COOP();
+ DelegateInfo *pDelInfo = (DelegateInfo *)timerInfo->Context;
+ pDelInfo->Release();
+ RecycleMemory( pDelInfo, MEMTYPE_DelegateInfo );
+ }
+
+ if (timerInfo->ExternalEventSafeHandle != NULL)
+ {
+ ReleaseTimerInfo(timerInfo);
+ }
+
+ delete timerInfo;
+
+}
+
+// We add TimerInfos from deleted timers into a linked list.
+// A worker thread will later release the handles held by the TimerInfo
+// and recycle them if possible (See DelegateInfo::MakeDelegateInfo)
+void ThreadpoolMgr::QueueTimerInfoForRelease(TimerInfo *pTimerInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // The synchronization in this method depends on the fact that
+ // - There is only one timer thread
+ // - The one and only timer thread is executing this method.
+ // - This function wont go into an alertable state. That could trigger another APC.
+ // Else two threads can be queueing timerinfos and a race could
+ // lead to leaked memory and handles
+ _ASSERTE(GetThread());
+ _ASSERTE(pTimerThread == GetThread());
+ TimerInfo *pHead = NULL;
+
+ // Make sure this timer info has been deactivated and removed from any other lists
+ _ASSERTE((pTimerInfo->state & TIMER_ACTIVE) == 0);
+ //_ASSERTE(pTimerInfo->link.Blink == &(pTimerInfo->link) &&
+ // pTimerInfo->link.Flink == &(pTimerInfo->link));
+ // Make sure "link" is the first field in TimerInfo
+ _ASSERTE(pTimerInfo == (PVOID)&pTimerInfo->link);
+
+ // Grab any previously published list
+ if ((pHead = InterlockedExchangeT(&TimerInfosToBeRecycled, NULL)) != NULL)
+ {
+ // If there already is a list, just append
+ InsertTailList((LIST_ENTRY *)pHead, &pTimerInfo->link);
+ pTimerInfo = pHead;
+ }
+ else
+ // If this is the head, make its next and previous ptrs point to itself
+ InitializeListHead((LIST_ENTRY*)&pTimerInfo->link);
+
+ // Publish the list
+ (void) InterlockedExchangeT(&TimerInfosToBeRecycled, pTimerInfo);
+
+}
+
+void ThreadpoolMgr::FlushQueueOfTimerInfos()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ TimerInfo *pHeadTimerInfo = NULL, *pCurrTimerInfo = NULL;
+ LIST_ENTRY *pNextInfo = NULL;
+
+ if ((pHeadTimerInfo = InterlockedExchangeT(&TimerInfosToBeRecycled, NULL)) == NULL)
+ return;
+
+ do
+ {
+ RemoveHeadList((LIST_ENTRY *)pHeadTimerInfo, pNextInfo);
+ _ASSERTE(pNextInfo != NULL);
+
+ pCurrTimerInfo = (TimerInfo *) pNextInfo;
+
+ GCX_COOP();
+ if (pCurrTimerInfo->Context != NULL)
+ {
+ DelegateInfo *pCurrDelInfo = (DelegateInfo *) pCurrTimerInfo->Context;
+ pCurrDelInfo->Release();
+
+ RecycleMemory( pCurrDelInfo, MEMTYPE_DelegateInfo );
+ }
+
+ if (pCurrTimerInfo->ExternalEventSafeHandle != NULL)
+ {
+ ReleaseTimerInfo(pCurrTimerInfo);
+ }
+
+ delete pCurrTimerInfo;
+
+ }
+ while ((TimerInfo *)pNextInfo != pHeadTimerInfo);
+}
+
+/************************************************************************/
+BOOL ThreadpoolMgr::ChangeTimerQueueTimer(
+ HANDLE Timer,
+ ULONG DueTime,
+ ULONG Period)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ INJECT_FAULT(COMPlusThrowOM());
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(IsInitialized());
+ _ASSERTE(Timer); // not possible to give invalid handle in managed code
+
+ NewHolder<TimerUpdateInfo> updateInfoHolder;
+ TimerUpdateInfo *updateInfo = new TimerUpdateInfo;
+ updateInfoHolder.Assign(updateInfo);
+
+ updateInfo->Timer = (TimerInfo*) Timer;
+ updateInfo->DueTime = DueTime;
+ updateInfo->Period = Period;
+
+ BOOL status = QueueUserAPC((PAPCFUNC)UpdateTimer,
+ TimerThread,
+ (size_t) updateInfo);
+
+ if (status)
+ updateInfoHolder.SuppressRelease();
+
+ return(status);
+}
+
+void ThreadpoolMgr::UpdateTimer(TimerUpdateInfo* pArgs)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ TimerUpdateInfo* updateInfo = (TimerUpdateInfo*) pArgs;
+ TimerInfo* timerInfo = updateInfo->Timer;
+
+ timerInfo->Period = updateInfo->Period;
+
+ if (updateInfo->DueTime == (ULONG) -1)
+ {
+ if (timerInfo->state & TIMER_ACTIVE)
+ {
+ DeactivateTimer(timerInfo);
+ }
+ // else, noop (the timer was already inactive)
+ _ASSERTE((timerInfo->state & TIMER_ACTIVE) == 0);
+
+ delete updateInfo;
+ return;
+ }
+
+ DWORD currentTime = GetTickCount();
+ timerInfo->FiringTime = currentTime + updateInfo->DueTime;
+
+ delete updateInfo;
+
+ if (! (timerInfo->state & TIMER_ACTIVE))
+ {
+ // timer not active (probably a one shot timer that has expired), so activate it
+ timerInfo->state |= TIMER_ACTIVE;
+ _ASSERTE(timerInfo->refCount >= 1);
+ // insert the timer in the queue
+ InsertTailList(&TimerQueue,(&timerInfo->link));
+
+ }
+
+ return;
+}
+
+/************************************************************************/
+BOOL ThreadpoolMgr::DeleteTimerQueueTimer(
+ HANDLE Timer,
+ HANDLE Event)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(IsInitialized()); // cannot call delete before creating timer
+ _ASSERTE(Timer); // not possible to give invalid handle in managed code
+
+ // make volatile to avoid compiler reordering check after async call.
+ // otherwise, DeregisterTimer could delete timerInfo before the comparison.
+ VolatilePtr<TimerInfo> timerInfo = (TimerInfo*) Timer;
+
+ if (Event == (HANDLE) -1)
+ {
+ //CONTRACT_VIOLATION(ThrowsViolation);
+ timerInfo->InternalCompletionEvent.CreateAutoEvent(FALSE);
+ timerInfo->flag |= WAIT_INTERNAL_COMPLETION;
+ }
+ else if (Event)
+ {
+ timerInfo->ExternalCompletionEvent = Event;
+ }
+#ifdef _DEBUG
+ else /* Event == NULL */
+ {
+ _ASSERTE(timerInfo->ExternalCompletionEvent == INVALID_HANDLE);
+ }
+#endif
+
+ BOOL isBlocking = timerInfo->flag & WAIT_INTERNAL_COMPLETION;
+
+ BOOL status = QueueUserAPC((PAPCFUNC)DeregisterTimer,
+ TimerThread,
+ (size_t)(TimerInfo*)timerInfo);
+
+ if (FALSE == status)
+ {
+ if (isBlocking)
+ timerInfo->InternalCompletionEvent.CloseEvent();
+ return FALSE;
+ }
+
+ if (isBlocking)
+ {
+ _ASSERTE(timerInfo->ExternalEventSafeHandle == NULL);
+ _ASSERTE(timerInfo->ExternalCompletionEvent == INVALID_HANDLE);
+ _ASSERTE(GetThread() != pTimerThread);
+
+ timerInfo->InternalCompletionEvent.Wait(INFINITE,TRUE /*alertable*/);
+ timerInfo->InternalCompletionEvent.CloseEvent();
+ // Release handles and delete TimerInfo
+ _ASSERTE(timerInfo->refCount == 0);
+ // if WAIT_INTERNAL_COMPLETION flag is not set, timerInfo will be deleted in DeleteTimer.
+ timerInfo->flag &= ~WAIT_INTERNAL_COMPLETION;
+ DeleteTimer(timerInfo);
+ }
+ return status;
+}
+
+void ThreadpoolMgr::DeregisterTimer(TimerInfo* pArgs)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ TimerInfo* timerInfo = (TimerInfo*) pArgs;
+
+ if (! (timerInfo->state & TIMER_REGISTERED) )
+ {
+ // set state to deleted, so that it does not get registered
+ timerInfo->state |= TIMER_DELETE ;
+
+ // since the timer has not even been registered, we dont need an interlock to decrease the RefCount
+ timerInfo->refCount--;
+
+ return;
+ }
+
+ if (timerInfo->state & TIMER_ACTIVE)
+ {
+ DeactivateTimer(timerInfo);
+ }
+
+ if (InterlockedDecrement(&timerInfo->refCount) == 0 )
+ {
+ DeleteTimer(timerInfo);
+ }
+ return;
+}
+
+#endif // !DACCESS_COMPILE
diff --git a/src/vm/win32threadpool.h b/src/vm/win32threadpool.h
new file mode 100644
index 0000000000..20d2412c18
--- /dev/null
+++ b/src/vm/win32threadpool.h
@@ -0,0 +1,1392 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+/*++
+
+Module Name:
+
+ Win32ThreadPool.h
+
+Abstract:
+
+ This module is the header file for thread pools using Win32 APIs.
+
+Revision History:
+
+
+--*/
+
+#ifndef _WIN32THREADPOOL_H
+#define _WIN32THREADPOOL_H
+
+#include "delegateinfo.h"
+#include "util.hpp"
+#include "nativeoverlapped.h"
+#include "hillclimbing.h"
+
+#define MAX_WAITHANDLES 64
+
+#define MAX_CACHED_EVENTS 40 // upper limit on number of wait events cached
+
+#define WAIT_REGISTERED 0x01
+#define WAIT_ACTIVE 0x02
+#define WAIT_DELETE 0x04
+
+#define TIMER_REGISTERED 0x01
+#define TIMER_ACTIVE 0x02
+#define TIMER_DELETE 0x04
+
+#define WAIT_SINGLE_EXECUTION 0x00000001
+#define WAIT_FREE_CONTEXT 0x00000002
+#define WAIT_INTERNAL_COMPLETION 0x00000004
+
+#define QUEUE_ONLY 0x00000000 // do not attempt to call on the thread
+#define CALL_OR_QUEUE 0x00000001 // call on the same thread if not too busy, else queue
+
+const int MaxLimitThreadsPerCPU=250; // upper limit on number of cp threads per CPU
+const int MaxFreeCPThreadsPerCPU=2; // upper limit on number of free cp threads per CPU
+
+const int CpuUtilizationHigh=95; // remove threads when above this
+const int CpuUtilizationLow =80; // inject more threads if below this
+
+#ifndef FEATURE_PAL
+extern HANDLE (WINAPI *g_pufnCreateIoCompletionPort)(HANDLE FileHandle,
+ HANDLE ExistingCompletionPort,
+ ULONG_PTR CompletionKey,
+ DWORD NumberOfConcurrentThreads);
+
+extern int (WINAPI *g_pufnNtQueryInformationThread) (HANDLE ThreadHandle,
+ THREADINFOCLASS ThreadInformationClass,
+ PVOID ThreadInformation,
+ ULONG ThreadInformationLength,
+ PULONG ReturnLength);
+
+extern int (WINAPI * g_pufnNtQuerySystemInformation) (SYSTEM_INFORMATION_CLASS SystemInformationClass,
+ PVOID SystemInformation,
+ ULONG SystemInformationLength,
+ PULONG ReturnLength OPTIONAL);
+#endif // !FEATURE_PAL
+
+#define FILETIME_TO_INT64(t) (*(__int64*)&(t))
+#define MILLI_TO_100NANO(x) (x * 10000) // convert from milliseond to 100 nanosecond unit
+
+/**
+ * This type is supposed to be private to ThreadpoolMgr.
+ * It's at global scope because Strike needs to be able to access its
+ * definition.
+ */
+struct WorkRequest {
+ WorkRequest* next;
+ LPTHREAD_START_ROUTINE Function;
+ PVOID Context;
+
+};
+
+typedef struct _IOCompletionContext
+{
+ DWORD ErrorCode;
+ DWORD numBytesTransferred;
+ LPOVERLAPPED lpOverlapped;
+ size_t key;
+} IOCompletionContext, *PIOCompletionContext;
+
+typedef DPTR(WorkRequest) PTR_WorkRequest;
+class ThreadpoolMgr
+{
+ friend class ClrDataAccess;
+ friend struct DelegateInfo;
+ friend class ThreadPoolNative;
+ friend class TimerNative;
+ friend class UnManagedPerAppDomainTPCount;
+ friend class ManagedPerAppDomainTPCount;
+ friend class PerAppDomainTPCountList;
+ friend class HillClimbing;
+
+
+ //
+ // UnfairSemaphore is a more scalable semaphore than CLRSemaphore. It prefers to release threads that have more recently begun waiting,
+ // to preserve locality. Additionally, very recently-waiting threads can be released without an addition kernel transition to unblock
+ // them, which reduces latency.
+ //
+ // UnfairSemaphore is only appropriate in scenarios where the order of unblocking threads is not important, and where threads frequently
+ // need to be woken. This is true of the ThreadPool's "worker semaphore", but not, for example, of the "retired worker semaphore" which is
+ // only rarely signalled.
+ //
+ // A further optimization that could be done here would be to replace CLRSemaphore with a Win32 IO Completion Port. Completion ports
+ // unblock threads in LIFO order, unlike the roughly-FIFO ordering of ordinary semaphores, and that would help to keep the "warm" threads warm.
+ // We did not do this in CLR 4.0 because hosts currently have no way of intercepting calls to IO Completion Ports (other than THE completion port
+ // behind the I/O thread pool), and we did not have time to explore the implications of this. Also, completion ports are not available on the Mac,
+ // though Snow Leopard has something roughly similar (and a regular Semaphore would do on the Mac in a pinch).
+ //
+ class UnfairSemaphore
+ {
+ private:
+
+ // padding to ensure we get our own cache line
+ BYTE padding1[64];
+
+ //
+ // We track everything we care about in a single 64-bit struct to allow us to
+ // do CompareExchanges on this for atomic updates.
+ //
+ union Counts
+ {
+ struct
+ {
+ int spinners : 16; //how many threads are currently spin-waiting for this semaphore?
+ int countForSpinners : 16; //how much of the semaphore's count is availble to spinners?
+ int waiters : 16; //how many threads are blocked in the OS waiting for this semaphore?
+ int countForWaiters : 16; //how much count is available to waiters?
+ };
+
+ LONGLONG asLongLong;
+
+ } m_counts;
+
+ private:
+ CLRSemaphore m_sem; //waiters wait on this
+
+ // padding to ensure we get our own cache line
+ BYTE padding2[64];
+
+ INDEBUG(int m_maxCount;)
+
+ bool UpdateCounts(Counts newCounts, Counts currentCounts)
+ {
+ LIMITED_METHOD_CONTRACT;
+ Counts oldCounts;
+ oldCounts.asLongLong = FastInterlockCompareExchangeLong(&m_counts.asLongLong, newCounts.asLongLong, currentCounts.asLongLong);
+ if (oldCounts.asLongLong == currentCounts.asLongLong)
+ {
+ // we succesfully updated the counts. Now validate what we put in.
+ // Note: we can't validate these unless the CompareExchange succeeds, because
+ // on x86 a VolatileLoad of m_counts is not atomic; we could end up getting inconsistent
+ // values. It's not until we've successfully stored the new values that we know for sure
+ // that the old values were correct (because if they were not, the CompareExchange would have
+ // failed.
+ _ASSERTE(newCounts.spinners >= 0);
+ _ASSERTE(newCounts.countForSpinners >= 0);
+ _ASSERTE(newCounts.waiters >= 0);
+ _ASSERTE(newCounts.countForWaiters >= 0);
+ _ASSERTE(newCounts.countForSpinners + newCounts.countForWaiters <= m_maxCount);
+
+ return true;
+ }
+ else
+ {
+ // we lost a race with some other thread, and will need to try again.
+ return false;
+ }
+ }
+
+ public:
+
+ UnfairSemaphore(int maxCount)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ _ASSERTE(maxCount <= 0x7fff); //counts need to fit in signed 16-bit ints
+ INDEBUG(m_maxCount = maxCount;)
+
+ m_counts.asLongLong = 0;
+ m_sem.Create(0, maxCount);
+ }
+
+ //
+ // no destructor - CLRSemaphore will close itself in its own destructor.
+ //
+ //~UnfairSemaphore()
+ //{
+ //}
+
+
+ void Release(int countToRelease)
+ {
+ while (true)
+ {
+ Counts currentCounts, newCounts;
+ currentCounts.asLongLong = VolatileLoad(&m_counts.asLongLong);
+ newCounts = currentCounts;
+
+ int remainingCount = countToRelease;
+
+ // First, prefer to release existing spinners,
+ // because a) they're hot, and b) we don't need a kernel
+ // transition to release them.
+ int spinnersToRelease = max(0, min(remainingCount, currentCounts.spinners - currentCounts.countForSpinners));
+ newCounts.countForSpinners += spinnersToRelease;
+ remainingCount -= spinnersToRelease;
+
+ // Next, prefer to release existing waiters
+ int waitersToRelease = max(0, min(remainingCount, currentCounts.waiters - currentCounts.countForWaiters));
+ newCounts.countForWaiters += waitersToRelease;
+ remainingCount -= waitersToRelease;
+
+ // Finally, release any future spinners that might come our way
+ newCounts.countForSpinners += remainingCount;
+
+ // Try to commit the transaction
+ if (UpdateCounts(newCounts, currentCounts))
+ {
+ // Now we need to release the waiters we promised to release
+ if (waitersToRelease > 0)
+ {
+ LONG previousCount;
+ INDEBUG(BOOL success =) m_sem.Release((LONG)waitersToRelease, &previousCount);
+ _ASSERTE(success);
+ }
+ break;
+ }
+ }
+ }
+
+
+ bool Wait(DWORD timeout)
+ {
+ while (true)
+ {
+ Counts currentCounts, newCounts;
+ currentCounts.asLongLong = VolatileLoad(&m_counts.asLongLong);
+ newCounts = currentCounts;
+
+ // First, just try to grab some count.
+ if (currentCounts.countForSpinners > 0)
+ {
+ newCounts.countForSpinners--;
+ if (UpdateCounts(newCounts, currentCounts))
+ return true;
+ }
+ else
+ {
+ // No count available, become a spinner
+ newCounts.spinners++;
+ if (UpdateCounts(newCounts, currentCounts))
+ break;
+ }
+ }
+
+ //
+ // Now we're a spinner.
+ //
+ int numSpins = 0;
+ const int spinLimitPerProcessor = 50;
+ while (true)
+ {
+ Counts currentCounts, newCounts;
+
+ currentCounts.asLongLong = VolatileLoad(&m_counts.asLongLong);
+ newCounts = currentCounts;
+
+ if (currentCounts.countForSpinners > 0)
+ {
+ newCounts.countForSpinners--;
+ newCounts.spinners--;
+ if (UpdateCounts(newCounts, currentCounts))
+ return true;
+ }
+ else
+ {
+ double spinnersPerProcessor = (double)currentCounts.spinners / ThreadpoolMgr::NumberOfProcessors;
+ int spinLimit = (int)((spinLimitPerProcessor / spinnersPerProcessor) + 0.5);
+ if (numSpins >= spinLimit)
+ {
+ newCounts.spinners--;
+ newCounts.waiters++;
+ if (UpdateCounts(newCounts, currentCounts))
+ break;
+ }
+ else
+ {
+ //
+ // We yield to other threads using SleepEx rather than the more traditional SwitchToThread.
+ // This is because SwitchToThread does not yield to threads currently scheduled to run on other
+ // processors. On a 4-core machine, for example, this means that SwitchToThread is only ~25% likely
+ // to yield to the correct thread in some scenarios.
+ // SleepEx has the disadvantage of not yielding to lower-priority threads. However, this is ok because
+ // once we've called this a few times we'll become a "waiter" and wait on the CLRSemaphore, and that will
+ // yield to anything that is runnable.
+ //
+ ClrSleepEx(0, FALSE);
+ numSpins++;
+ }
+ }
+ }
+
+ //
+ // Now we're a waiter
+ //
+ DWORD result = m_sem.Wait(timeout, FALSE);
+ _ASSERTE(WAIT_OBJECT_0 == result || WAIT_TIMEOUT == result);
+
+ while (true)
+ {
+ Counts currentCounts, newCounts;
+
+ currentCounts.asLongLong = VolatileLoad(&m_counts.asLongLong);
+ newCounts = currentCounts;
+
+ newCounts.waiters--;
+
+ if (result == WAIT_OBJECT_0)
+ newCounts.countForWaiters--;
+
+ if (UpdateCounts(newCounts, currentCounts))
+ return (result == WAIT_OBJECT_0);
+ }
+ }
+ };
+
+public:
+ struct ThreadCounter
+ {
+ static const int MaxPossibleCount = 0x7fff;
+
+ union Counts
+ {
+ struct
+ {
+ //
+ // Note: these are signed rather than unsigned to allow us to detect under/overflow.
+ //
+ int MaxWorking : 16; //Determined by HillClimbing; adjusted elsewhere for timeouts, etc.
+ int NumActive : 16; //Active means working or waiting on WorkerSemaphore. These are "warm/hot" threads.
+ int NumWorking : 16; //Trying to get work from various queues. Not waiting on either semaphore.
+ int NumRetired : 16; //Not trying to get work; waiting on RetiredWorkerSemaphore. These are "cold" threads.
+
+ // Note: the only reason we need "retired" threads at all is that it allows some threads to eventually time out
+ // even if other threads are getting work. If we ever make WorkerSemaphore a true LIFO semaphore, we will no longer
+ // need the concept of "retirement" - instead, the very "coldest" threads will naturally be the first to time out.
+ };
+
+ LONGLONG AsLongLong;
+
+ bool operator==(Counts other) {LIMITED_METHOD_CONTRACT; return AsLongLong == other.AsLongLong;}
+
+ } counts;
+
+ Counts GetCleanCounts()
+ {
+ LIMITED_METHOD_CONTRACT;
+ Counts result;
+#ifndef DACCESS_COMPILE
+ result.AsLongLong = FastInterlockCompareExchangeLong(&counts.AsLongLong, 0, 0);
+ ValidateCounts(result);
+#else
+ result.AsLongLong = 0; //prevents prefast warning for DAC builds
+#endif
+ return result;
+ }
+
+ //
+ // This does a non-atomic read of the counts. The returned value is suitable only
+ // for use inside of a read-compare-exchange loop, where the compare-exhcange must succeed
+ // before any action is taken. Use GetCleanWorkerCounts for other needs, but keep in mind
+ // it's much slower.
+ //
+ Counts DangerousGetDirtyCounts()
+ {
+ LIMITED_METHOD_CONTRACT;
+ Counts result;
+#ifndef DACCESS_COMPILE
+ result.AsLongLong = VolatileLoad(&counts.AsLongLong);
+#else
+ result.AsLongLong = 0; //prevents prefast warning for DAC builds
+#endif
+ return result;
+ }
+
+
+ Counts CompareExchangeCounts(Counts newCounts, Counts oldCounts)
+ {
+ LIMITED_METHOD_CONTRACT;
+ Counts result;
+#ifndef DACCESS_COMPILE
+ result.AsLongLong = FastInterlockCompareExchangeLong(&counts.AsLongLong, newCounts.AsLongLong, oldCounts.AsLongLong);
+ if (result == oldCounts)
+ {
+ // can only do validation on success; if we failed, it may have been due to a previous
+ // dirty read, which may contain invalid values.
+ ValidateCounts(result);
+ ValidateCounts(newCounts);
+ }
+#else
+ result.AsLongLong = 0; //prevents prefast warning for DAC builds
+#endif
+ return result;
+ }
+
+ private:
+ static void ValidateCounts(Counts counts)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(counts.MaxWorking > 0);
+ _ASSERTE(counts.NumActive >= 0);
+ _ASSERTE(counts.NumWorking >= 0);
+ _ASSERTE(counts.NumRetired >= 0);
+ _ASSERTE(counts.NumWorking <= counts.NumActive);
+ }
+ };
+
+public:
+
+ static void ReportThreadStatus(bool isWorking);
+
+ // enumeration of different kinds of memory blocks that are recycled
+ enum MemType
+ {
+ MEMTYPE_AsyncCallback = 0,
+ MEMTYPE_DelegateInfo = 1,
+ MEMTYPE_WorkRequest = 2,
+ MEMTYPE_PostRequest = 3,
+ MEMTYPE_COUNT = 4,
+ };
+
+ static BOOL Initialize();
+
+ static BOOL SetMaxThreadsHelper(DWORD MaxWorkerThreads,
+ DWORD MaxIOCompletionThreads);
+
+ static BOOL SetMaxThreads(DWORD MaxWorkerThreads,
+ DWORD MaxIOCompletionThreads);
+
+ static BOOL GetMaxThreads(DWORD* MaxWorkerThreads,
+ DWORD* MaxIOCompletionThreads);
+
+ static BOOL SetMinThreads(DWORD MinWorkerThreads,
+ DWORD MinIOCompletionThreads);
+
+ static BOOL GetMinThreads(DWORD* MinWorkerThreads,
+ DWORD* MinIOCompletionThreads);
+
+ static BOOL GetAvailableThreads(DWORD* AvailableWorkerThreads,
+ DWORD* AvailableIOCompletionThreads);
+
+ static BOOL QueueUserWorkItem(LPTHREAD_START_ROUTINE Function,
+ PVOID Context,
+ ULONG Flags,
+ BOOL UnmanagedTPRequest=TRUE);
+
+ static BOOL PostQueuedCompletionStatus(LPOVERLAPPED lpOverlapped,
+ LPOVERLAPPED_COMPLETION_ROUTINE Function);
+
+ inline static BOOL IsCompletionPortInitialized()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return GlobalCompletionPort != NULL;
+ }
+
+ static BOOL DrainCompletionPortQueue();
+
+ static BOOL RegisterWaitForSingleObject(PHANDLE phNewWaitObject,
+ HANDLE hWaitObject,
+ WAITORTIMERCALLBACK Callback,
+ PVOID Context,
+ ULONG timeout,
+ DWORD dwFlag);
+
+ static BOOL UnregisterWaitEx(HANDLE hWaitObject,HANDLE CompletionEvent);
+ static void WaitHandleCleanup(HANDLE hWaitObject);
+
+ static BOOL BindIoCompletionCallback(HANDLE FileHandle,
+ LPOVERLAPPED_COMPLETION_ROUTINE Function,
+ ULONG Flags,
+ DWORD& errorCode);
+
+ static void WaitIOCompletionCallback(DWORD dwErrorCode,
+ DWORD numBytesTransferred,
+ LPOVERLAPPED lpOverlapped);
+
+ static VOID CallbackForInitiateDrainageOfCompletionPortQueue(
+ DWORD dwErrorCode,
+ DWORD dwNumberOfBytesTransfered,
+ LPOVERLAPPED lpOverlapped
+ );
+
+ static VOID CallbackForContinueDrainageOfCompletionPortQueue(
+ DWORD dwErrorCode,
+ DWORD dwNumberOfBytesTransfered,
+ LPOVERLAPPED lpOverlapped
+ );
+
+ static BOOL SetAppDomainRequestsActive(BOOL UnmanagedTP = FALSE);
+ static void ClearAppDomainRequestsActive(BOOL UnmanagedTP = FALSE, BOOL AdUnloading = FALSE, LONG index = -1);
+
+ static inline void UpdateLastDequeueTime()
+ {
+ LIMITED_METHOD_CONTRACT;
+ LastDequeueTime = GetTickCount();
+ }
+
+ static BOOL CreateTimerQueueTimer(PHANDLE phNewTimer,
+ WAITORTIMERCALLBACK Callback,
+ PVOID Parameter,
+ DWORD DueTime,
+ DWORD Period,
+ ULONG Flags);
+
+ static BOOL ChangeTimerQueueTimer(HANDLE Timer,
+ ULONG DueTime,
+ ULONG Period);
+ static BOOL DeleteTimerQueueTimer(HANDLE Timer,
+ HANDLE CompletionEvent);
+
+ static void RecycleMemory(LPVOID mem, enum MemType memType);
+
+ static void FlushQueueOfTimerInfos();
+
+ static BOOL HaveTimerInfosToFlush() { return TimerInfosToBeRecycled != NULL; }
+
+ inline static BOOL IsThreadPoolHosted()
+ {
+#ifdef FEATURE_INCLUDE_ALL_INTERFACES
+ IHostThreadpoolManager *provider = CorHost2::GetHostThreadpoolManager();
+ if (provider)
+ return TRUE;
+ else
+#endif
+ return FALSE;
+ }
+
+#ifndef FEATURE_PAL
+ static LPOVERLAPPED CompletionPortDispatchWorkWithinAppDomain(Thread* pThread, DWORD* pErrorCode, DWORD* pNumBytes, size_t* pKey, DWORD adid);
+ static void StoreOverlappedInfoInThread(Thread* pThread, DWORD dwErrorCode, DWORD dwNumBytes, size_t key, LPOVERLAPPED lpOverlapped);
+#endif // !FEATURE_PAL
+
+ // Enable filtering of correlation ETW events for cases handled at a higher abstraction level
+
+#ifndef DACCESS_COMPILE
+ static FORCEINLINE BOOL AreEtwQueueEventsSpeciallyHandled(LPTHREAD_START_ROUTINE Function)
+ {
+ // Timer events are handled at a higher abstraction level: in the managed Timer class
+ return (Function == ThreadpoolMgr::AsyncTimerCallbackCompletion);
+ }
+
+ static FORCEINLINE BOOL AreEtwIOQueueEventsSpeciallyHandled(LPOVERLAPPED_COMPLETION_ROUTINE Function)
+ {
+ // We ignore drainage events b/c they are uninteresting
+ // We handle registered waits at a higher abstraction level
+ return (Function == ThreadpoolMgr::CallbackForInitiateDrainageOfCompletionPortQueue
+ || Function == ThreadpoolMgr::CallbackForContinueDrainageOfCompletionPortQueue
+ || Function == ThreadpoolMgr::WaitIOCompletionCallback);
+ }
+#endif
+
+private:
+
+#ifndef DACCESS_COMPILE
+
+ inline static void FreeWorkRequest(WorkRequest* workRequest)
+ {
+ RecycleMemory( workRequest, MEMTYPE_WorkRequest ); //delete workRequest;
+ }
+
+ inline static WorkRequest* MakeWorkRequest(LPTHREAD_START_ROUTINE function, PVOID context)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;;
+
+ WorkRequest* wr = (WorkRequest*) GetRecycledMemory(MEMTYPE_WorkRequest);
+ _ASSERTE(wr);
+ if (NULL == wr)
+ return NULL;
+ wr->Function = function;
+ wr->Context = context;
+ wr->next = NULL;
+ return wr;
+ }
+
+ struct PostRequest {
+ LPOVERLAPPED_COMPLETION_ROUTINE Function;
+ DWORD errorCode;
+ DWORD numBytesTransferred;
+ LPOVERLAPPED lpOverlapped;
+ };
+
+
+ inline static PostRequest* MakePostRequest(LPOVERLAPPED_COMPLETION_ROUTINE function, LPOVERLAPPED overlapped)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;;
+
+ PostRequest* pr = (PostRequest*) GetRecycledMemory(MEMTYPE_PostRequest);
+ _ASSERTE(pr);
+ if (NULL == pr)
+ return NULL;
+ pr->Function = function;
+ pr->errorCode = 0;
+ pr->numBytesTransferred = 0;
+ pr->lpOverlapped = overlapped;
+
+ return pr;
+ }
+
+ inline static void ReleasePostRequest(PostRequest *postRequest)
+ {
+ WRAPPER_NO_CONTRACT;
+ ThreadpoolMgr::RecycleMemory(postRequest, MEMTYPE_PostRequest);
+ }
+
+ typedef Wrapper< PostRequest *, DoNothing<PostRequest *>, ThreadpoolMgr::ReleasePostRequest > PostRequestHolder;
+
+#endif // #ifndef DACCESS_COMPILE
+
+ typedef struct {
+ DWORD numBytes;
+ ULONG_PTR *key;
+ LPOVERLAPPED pOverlapped;
+ DWORD errorCode;
+ } QueuedStatus;
+
+ typedef DPTR(struct _LIST_ENTRY) PTR_LIST_ENTRY;
+ typedef struct _LIST_ENTRY {
+ struct _LIST_ENTRY *Flink;
+ struct _LIST_ENTRY *Blink;
+ } LIST_ENTRY, *PLIST_ENTRY;
+
+ struct WaitInfo;
+
+ typedef struct {
+ HANDLE threadHandle;
+ DWORD threadId;
+ CLREvent startEvent;
+ LONG NumWaitHandles; // number of wait objects registered to the thread <=64
+ LONG NumActiveWaits; // number of objects, thread is actually waiting on (this may be less than
+ // NumWaitHandles since the thread may not have activated some waits
+ HANDLE waitHandle[MAX_WAITHANDLES]; // array of wait handles (copied from waitInfo since
+ // we need them to be contiguous)
+ LIST_ENTRY waitPointer[MAX_WAITHANDLES]; // array of doubly linked list of corresponding waitinfo
+ } ThreadCB;
+
+
+ typedef struct {
+ ULONG startTime; // time at which wait was started
+ // endTime = startTime+timeout
+ ULONG remainingTime; // endTime - currentTime
+ } WaitTimerInfo;
+
+ struct WaitInfo {
+ LIST_ENTRY link; // Win9x does not allow duplicate waithandles, so we need to
+ // group all waits on a single waithandle using this linked list
+ HANDLE waitHandle;
+ WAITORTIMERCALLBACK Callback;
+ PVOID Context;
+ ULONG timeout;
+ WaitTimerInfo timer;
+ DWORD flag;
+ DWORD state;
+ ThreadCB* threadCB;
+ LONG refCount; // when this reaches 0, the waitInfo can be safely deleted
+ CLREvent PartialCompletionEvent; // used to synchronize deactivation of a wait
+ CLREvent InternalCompletionEvent; // only one of InternalCompletion or ExternalCompletion is used
+ // but I cant make a union since CLREvent has a non-default constructor
+ HANDLE ExternalCompletionEvent; // they are signalled when all callbacks have completed (refCount=0)
+ ADID handleOwningAD;
+ OBJECTHANDLE ExternalEventSafeHandle;
+
+ } ;
+
+ // structure used to maintain global information about wait threads. Protected by WaitThreadsCriticalSection
+ typedef struct WaitThreadTag {
+ LIST_ENTRY link;
+ ThreadCB* threadCB;
+ } WaitThreadInfo;
+
+
+ struct AsyncCallback{
+ WaitInfo* wait;
+ BOOL waitTimedOut;
+ } ;
+
+#ifndef DACCESS_COMPILE
+
+ static VOID
+ AcquireAsyncCallback(AsyncCallback *pAsyncCB)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ static VOID
+ ReleaseAsyncCallback(AsyncCallback *pAsyncCB)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ WaitInfo *waitInfo = pAsyncCB->wait;
+ ThreadpoolMgr::RecycleMemory((LPVOID*)pAsyncCB, ThreadpoolMgr::MEMTYPE_AsyncCallback);
+
+ // if this was a single execution, we now need to stop rooting registeredWaitHandle
+ // in a GC handle. This will cause the finalizer to pick it up and call the cleanup
+ // routine.
+ if ( (waitInfo->flag & WAIT_SINGLE_EXECUTION) && (waitInfo->flag & WAIT_FREE_CONTEXT))
+ {
+
+ DelegateInfo* pDelegate = (DelegateInfo*) waitInfo->Context;
+
+ _ASSERTE(pDelegate->m_registeredWaitHandle);
+
+ {
+ GCX_COOP();
+ AppDomainFromIDHolder ad(pDelegate->m_appDomainId, TRUE);
+ if (!ad.IsUnloaded())
+ // if no domain then handle already gone or about to go.
+ StoreObjectInHandle(pDelegate->m_registeredWaitHandle, NULL);
+ }
+ }
+
+ if (InterlockedDecrement(&waitInfo->refCount) == 0)
+ ThreadpoolMgr::DeleteWait(waitInfo);
+
+ }
+
+ typedef Holder<AsyncCallback *, ThreadpoolMgr::AcquireAsyncCallback, ThreadpoolMgr::ReleaseAsyncCallback> AsyncCallbackHolder;
+ inline static AsyncCallback* MakeAsyncCallback()
+ {
+ WRAPPER_NO_CONTRACT;
+ return (AsyncCallback*) GetRecycledMemory(MEMTYPE_AsyncCallback);
+ }
+
+ static VOID ReleaseInfo(OBJECTHANDLE& hndSafeHandle,
+ ADID& owningAD,
+ HANDLE hndNativeHandle)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END
+
+// Use of EX_TRY, GCPROTECT etc in the same function is causing prefast to complain about local variables with
+// same name masking each other (#246). The error could not be suppressed with "#pragma PREFAST_SUPPRESS"
+#ifndef _PREFAST_
+
+ if (hndSafeHandle != NULL)
+ {
+
+ SAFEHANDLEREF refSH = NULL;
+
+ GCX_COOP();
+ GCPROTECT_BEGIN(refSH);
+
+ {
+ EX_TRY
+ {
+ ENTER_DOMAIN_ID(owningAD);
+ {
+ // Read the GC handle
+ refSH = (SAFEHANDLEREF) ObjectToOBJECTREF(ObjectFromHandle(hndSafeHandle));
+
+ // Destroy the GC handle
+ DestroyHandle(hndSafeHandle);
+
+ if (refSH != NULL)
+ {
+ SafeHandleHolder h(&refSH);
+
+ HANDLE hEvent = refSH->GetHandle();
+ if (hEvent != INVALID_HANDLE_VALUE)
+ {
+ UnsafeSetEvent(hEvent);
+ }
+ }
+ }
+ END_DOMAIN_TRANSITION;
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+
+ GCPROTECT_END();
+
+ hndSafeHandle = NULL;
+ owningAD = (ADID) 0;
+ }
+#endif
+ }
+
+#endif // #ifndef DACCESS_COMPILE
+
+ typedef struct {
+ LIST_ENTRY link;
+ HANDLE Handle;
+ } WaitEvent ;
+
+ // Timer
+ typedef struct {
+ LIST_ENTRY link; // doubly linked list of timers
+ ULONG FiringTime; // TickCount of when to fire next
+ WAITORTIMERCALLBACK Function; // Function to call when timer fires
+ PVOID Context; // Context to pass to function when timer fires
+ ULONG Period;
+ DWORD flag; // How do we deal with the context
+ DWORD state;
+ LONG refCount;
+ HANDLE ExternalCompletionEvent; // only one of this is used, but cant do a union since CLREvent has a non-default constructor
+ CLREvent InternalCompletionEvent; // flags indicates which one is being used
+ OBJECTHANDLE ExternalEventSafeHandle;
+ ADID handleOwningAD;
+ } TimerInfo;
+
+ static VOID AcquireWaitInfo(WaitInfo *pInfo)
+ {
+ }
+ static VOID ReleaseWaitInfo(WaitInfo *pInfo)
+ {
+ WRAPPER_NO_CONTRACT;
+#ifndef DACCESS_COMPILE
+ ReleaseInfo(pInfo->ExternalEventSafeHandle,
+ pInfo->handleOwningAD,
+ pInfo->ExternalCompletionEvent);
+#endif
+ }
+ static VOID AcquireTimerInfo(TimerInfo *pInfo)
+ {
+ }
+ static VOID ReleaseTimerInfo(TimerInfo *pInfo)
+ {
+ WRAPPER_NO_CONTRACT;
+#ifndef DACCESS_COMPILE
+ ReleaseInfo(pInfo->ExternalEventSafeHandle,
+ pInfo->handleOwningAD,
+ pInfo->ExternalCompletionEvent);
+#endif
+ }
+
+ typedef Holder<WaitInfo *, ThreadpoolMgr::AcquireWaitInfo, ThreadpoolMgr::ReleaseWaitInfo> WaitInfoHolder;
+ typedef Holder<TimerInfo *, ThreadpoolMgr::AcquireTimerInfo, ThreadpoolMgr::ReleaseTimerInfo> TimerInfoHolder;
+
+ typedef struct {
+ TimerInfo* Timer; // timer to be updated
+ ULONG DueTime ; // new due time
+ ULONG Period ; // new period
+ } TimerUpdateInfo;
+
+ // Definitions and data structures to support recycling of high-frequency
+ // memory blocks. We use a spin-lock to access the list
+
+ class RecycledListInfo
+ {
+ static const unsigned int MaxCachedEntries = 40;
+
+ struct Entry
+ {
+ Entry* next;
+ };
+
+ Volatile<LONG> lock; // this is the spin lock
+ DWORD count; // count of number of elements in the list
+ Entry* root; // ptr to first element of recycled list
+#ifndef _WIN64
+ DWORD filler; // Pad the structure to a multiple of the 16.
+#endif
+
+ //--//
+
+public:
+ RecycledListInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ lock = 0;
+ root = NULL;
+ count = 0;
+ }
+
+ FORCEINLINE bool CanInsert()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return count < MaxCachedEntries;
+ }
+
+ FORCEINLINE LPVOID Remove()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if(root == NULL) return NULL; // No need for acquiring the lock, there's nothing to remove.
+
+ AcquireLock();
+
+ Entry* ret = (Entry*)root;
+
+ if(ret)
+ {
+ root = ret->next;
+ count -= 1;
+ }
+
+ ReleaseLock();
+
+ return ret;
+ }
+
+ FORCEINLINE void Insert( LPVOID mem )
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ AcquireLock();
+
+ Entry* entry = (Entry*)mem;
+
+ entry->next = root;
+
+ root = entry;
+ count += 1;
+
+ ReleaseLock();
+ }
+
+ private:
+ FORCEINLINE void AcquireLock()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ unsigned int rounds = 0;
+
+ DWORD dwSwitchCount = 0;
+
+ while(lock != 0 || FastInterlockExchange( &lock, 1 ) != 0)
+ {
+ YieldProcessor(); // indicate to the processor that we are spinning
+
+ rounds++;
+
+ if((rounds % 32) == 0)
+ {
+ __SwitchToThread( 0, ++dwSwitchCount );
+ }
+ }
+ }
+
+ FORCEINLINE void ReleaseLock()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ lock = 0;
+ }
+ };
+
+ //
+ // It's critical that we ensure these pointers are allocated by the linker away from
+ // variables that are modified a lot at runtime.
+ //
+ // The use of the CacheGuard is a temporary solution,
+ // the thread pool has to be refactor away from static variable and
+ // toward a single global structure, where we can control the locality of variables.
+ //
+ class RecycledListsWrapper
+ {
+ DWORD CacheGuardPre[64/sizeof(DWORD)];
+
+ RecycledListInfo (*pRecycledListPerProcessor)[MEMTYPE_COUNT]; // RecycledListInfo [numProc][MEMTYPE_COUNT]
+
+ DWORD CacheGuardPost[64/sizeof(DWORD)];
+
+ public:
+ void Initialize( unsigned int numProcs );
+
+ FORCEINLINE bool IsInitialized()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return pRecycledListPerProcessor != NULL;
+ }
+
+ FORCEINLINE RecycledListInfo& GetRecycleMemoryInfo( enum MemType memType )
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (CPUGroupInfo::CanEnableGCCPUGroups() && CPUGroupInfo::CanEnableThreadUseAllCpuGroups())
+ return pRecycledListPerProcessor[CPUGroupInfo::CalculateCurrentProcessorNumber()][memType];
+ else
+ // Turns out GetCurrentProcessorNumber can return a value greater than the number of processors reported by
+ // GetSystemInfo, if we're running in WOW64 on a machine with >32 processors.
+ return pRecycledListPerProcessor[GetCurrentProcessorNumber()%NumberOfProcessors][memType];
+ }
+ };
+
+#define GATE_THREAD_STATUS_NOT_RUNNING 0 // There is no gate thread
+#define GATE_THREAD_STATUS_REQUESTED 1 // There is a gate thread, and someone has asked it to stick around recently
+#define GATE_THREAD_STATUS_WAITING_FOR_REQUEST 2 // There is a gate thread, but nobody has asked it to stay. It may die soon
+
+ // Private methods
+
+ static DWORD __stdcall intermediateThreadProc(PVOID arg);
+
+ typedef struct {
+ LPTHREAD_START_ROUTINE lpThreadFunction;
+ PVOID lpArg;
+ } intermediateThreadParam;
+
+ static Thread* CreateUnimpersonatedThread(LPTHREAD_START_ROUTINE lpStartAddress, LPVOID lpArgs, BOOL *pIsCLRThread);
+
+ static BOOL CreateWorkerThread();
+
+ static void EnqueueWorkRequest(WorkRequest* wr);
+
+ static WorkRequest* DequeueWorkRequest();
+
+ static void ExecuteWorkRequest(bool* foundWork, bool* wasNotRecalled);
+
+ static DWORD WINAPI ExecuteHostRequest(PVOID pArg);
+
+#ifndef DACCESS_COMPILE
+
+ inline static void AppendWorkRequest(WorkRequest* entry)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (WorkRequestTail)
+ {
+ _ASSERTE(WorkRequestHead != NULL);
+ WorkRequestTail->next = entry;
+ }
+ else
+ {
+ _ASSERTE(WorkRequestHead == NULL);
+ WorkRequestHead = entry;
+ }
+
+ WorkRequestTail = entry;
+ _ASSERTE(WorkRequestTail->next == NULL);
+ }
+
+ inline static WorkRequest* RemoveWorkRequest()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ WorkRequest* entry = NULL;
+ if (WorkRequestHead)
+ {
+ entry = WorkRequestHead;
+ WorkRequestHead = entry->next;
+ if (WorkRequestHead == NULL)
+ WorkRequestTail = NULL;
+ }
+ return entry;
+ }
+
+ static void EnsureInitialized();
+ static void InitPlatformVariables();
+
+ inline static BOOL IsInitialized()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return Initialization == -1;
+ }
+
+ static void MaybeAddWorkingWorker();
+
+ static void NotifyWorkItemCompleted()
+ {
+ WRAPPER_NO_CONTRACT;
+ if (!CLRThreadpoolHosted())
+ {
+ Thread::IncrementThreadPoolCompletionCount();
+ UpdateLastDequeueTime();
+ }
+ }
+
+ static bool ShouldAdjustMaxWorkersActive()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (CLRThreadpoolHosted())
+ return false;
+
+ DWORD requiredInterval = NextCompletedWorkRequestsTime - PriorCompletedWorkRequestsTime;
+ DWORD elapsedInterval = GetTickCount() - PriorCompletedWorkRequestsTime;
+ if (elapsedInterval >= requiredInterval)
+ {
+ ThreadCounter::Counts counts = WorkerCounter.GetCleanCounts();
+ if (counts.NumActive <= counts.MaxWorking)
+ return true;
+ }
+
+ return false;
+ }
+
+ static void AdjustMaxWorkersActive();
+ static bool ShouldWorkerKeepRunning();
+
+ static BOOL SuspendProcessing();
+
+ static DWORD SafeWait(CLREvent * ev, DWORD sleepTime, BOOL alertable);
+
+ static DWORD __stdcall WorkerThreadStart(LPVOID lpArgs);
+
+ static BOOL AddWaitRequest(HANDLE waitHandle, WaitInfo* waitInfo);
+
+
+ static ThreadCB* FindWaitThread(); // returns a wait thread that can accomodate another wait request
+
+ static BOOL CreateWaitThread();
+
+ static void __stdcall InsertNewWaitForSelf(WaitInfo* pArg);
+
+ static int FindWaitIndex(const ThreadCB* threadCB, const HANDLE waitHandle);
+
+ static DWORD MinimumRemainingWait(LIST_ENTRY* waitInfo, unsigned int numWaits);
+
+ static void ProcessWaitCompletion( WaitInfo* waitInfo,
+ unsigned index, // array index
+ BOOL waitTimedOut);
+
+ static DWORD __stdcall WaitThreadStart(LPVOID lpArgs);
+
+ static DWORD __stdcall AsyncCallbackCompletion(PVOID pArgs);
+
+ static void QueueTimerInfoForRelease(TimerInfo *pTimerInfo);
+
+ static DWORD __stdcall QUWIPostCompletion(PVOID pArgs);
+
+ static void DeactivateWait(WaitInfo* waitInfo);
+ static void DeactivateNthWait(WaitInfo* waitInfo, DWORD index);
+
+ static void DeleteWait(WaitInfo* waitInfo);
+
+
+ inline static void ShiftWaitArray( ThreadCB* threadCB,
+ ULONG SrcIndex,
+ ULONG DestIndex,
+ ULONG count)
+ {
+ LIMITED_METHOD_CONTRACT;
+ memcpy(&threadCB->waitHandle[DestIndex],
+ &threadCB->waitHandle[SrcIndex],
+ count * sizeof(HANDLE));
+ memcpy(&threadCB->waitPointer[DestIndex],
+ &threadCB->waitPointer[SrcIndex],
+ count * sizeof(LIST_ENTRY));
+ }
+
+ static void __stdcall DeregisterWait(WaitInfo* pArgs);
+
+#ifndef FEATURE_PAL
+ // holds the aggregate of system cpu usage of all processors
+ typedef struct _PROCESS_CPU_INFORMATION
+ {
+ LARGE_INTEGER idleTime;
+ LARGE_INTEGER kernelTime;
+ LARGE_INTEGER userTime;
+ DWORD_PTR affinityMask;
+ int numberOfProcessors;
+ SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION* usageBuffer;
+ int usageBufferSize;
+ } PROCESS_CPU_INFORMATION;
+
+ static int GetCPUBusyTime_NT(PROCESS_CPU_INFORMATION* pOldInfo);
+ static BOOL CreateCompletionPortThread(LPVOID lpArgs);
+ static DWORD __stdcall CompletionPortThreadStart(LPVOID lpArgs);
+public:
+ inline static bool HaveNativeWork()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return WorkRequestHead != NULL;
+ }
+
+ static void GrowCompletionPortThreadpoolIfNeeded();
+ static BOOL ShouldGrowCompletionPortThreadpool(ThreadCounter::Counts counts);
+#else
+ static int GetCPUBusyTime_NT(PAL_IOCP_CPU_INFORMATION* pOldInfo);
+
+#endif // !FEATURE_PAL
+
+private:
+ static BOOL IsIoPending();
+
+ static BOOL CreateGateThread();
+ static void EnsureGateThreadRunning();
+ static bool ShouldGateThreadKeepRunning();
+ static DWORD __stdcall GateThreadStart(LPVOID lpArgs);
+ static BOOL SufficientDelaySinceLastSample(unsigned int LastThreadCreationTime,
+ unsigned NumThreads, // total number of threads of that type (worker or CP)
+ double throttleRate=0.0 // the delay is increased by this percentage for each extra thread
+ );
+ static BOOL SufficientDelaySinceLastDequeue();
+
+ static LPVOID GetRecycledMemory(enum MemType memType);
+
+ static DWORD __stdcall TimerThreadStart(LPVOID args);
+ static void TimerThreadFire(); // helper method used by TimerThreadStart
+ static void __stdcall InsertNewTimer(TimerInfo* pArg);
+ static DWORD FireTimers();
+ static DWORD __stdcall AsyncTimerCallbackCompletion(PVOID pArgs);
+ static void DeactivateTimer(TimerInfo* timerInfo);
+ static DWORD __stdcall AsyncDeleteTimer(PVOID pArgs);
+ static void DeleteTimer(TimerInfo* timerInfo);
+ static void __stdcall UpdateTimer(TimerUpdateInfo* pArgs);
+
+ static void __stdcall DeregisterTimer(TimerInfo* pArgs);
+
+ inline static DWORD QueueDeregisterWait(HANDLE waitThread, WaitInfo* waitInfo)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ DWORD result = QueueUserAPC(reinterpret_cast<PAPCFUNC>(DeregisterWait), waitThread, reinterpret_cast<ULONG_PTR>(waitInfo));
+ SetWaitThreadAPCPending();
+ return result;
+ }
+
+
+ inline static void SetWaitThreadAPCPending() {IsApcPendingOnWaitThread = TRUE;}
+ inline static void ResetWaitThreadAPCPending() {IsApcPendingOnWaitThread = FALSE;}
+ inline static BOOL IsWaitThreadAPCPending() {return IsApcPendingOnWaitThread;}
+
+#ifdef _DEBUG
+ inline static DWORD GetTickCount()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ::GetTickCount() + TickCountAdjustment;
+ }
+#endif
+
+#endif // #ifndef DACCESS_COMPILE
+ // Private variables
+
+ static LONG Initialization; // indicator of whether the threadpool is initialized.
+
+ SVAL_DECL(LONG,MinLimitTotalWorkerThreads); // same as MinLimitTotalCPThreads
+ SVAL_DECL(LONG,MaxLimitTotalWorkerThreads); // same as MaxLimitTotalCPThreads
+
+ static Volatile<unsigned int> LastDequeueTime; // used to determine if work items are getting thread starved
+
+ static HillClimbing HillClimbingInstance;
+
+ static Volatile<LONG> PriorCompletedWorkRequests;
+ static Volatile<DWORD> PriorCompletedWorkRequestsTime;
+ static Volatile<DWORD> NextCompletedWorkRequestsTime;
+
+ static LARGE_INTEGER CurrentSampleStartTime;
+
+ static int ThreadAdjustmentInterval;
+
+ private:
+
+ SPTR_DECL(WorkRequest,WorkRequestHead); // Head of work request queue
+ SPTR_DECL(WorkRequest,WorkRequestTail); // Head of work request queue
+
+ static unsigned int LastCPThreadCreation; // last time a completion port thread was created
+ static unsigned int NumberOfProcessors; // = NumberOfWorkerThreads - no. of blocked threads
+
+ static BOOL IsApcPendingOnWaitThread; // Indicates if an APC is pending on the wait thread
+
+ // This needs to be non-hosted, because worker threads can run prior to EE startup.
+ static DangerousNonHostedSpinLock ThreadAdjustmentLock;
+
+public:
+ static CrstStatic WorkerCriticalSection;
+
+private:
+
+ static const DWORD WorkerTimeout = 20 * 1000;
+ static const DWORD WorkerTimeoutAppX = 5 * 1000; // shorter timeout to allow threads to exit prior to app suspension
+
+ SVAL_DECL(ThreadCounter,WorkerCounter);
+
+ //
+ // WorkerSemaphore is an UnfairSemaphore because:
+ // 1) Threads enter and exit this semaphore very frequently, and thus benefit greatly from the spinning done by UnfairSemaphore
+ // 2) There is no functional reason why any particular thread should be preferred when waking workers. This only impacts performance,
+ // and un-fairness helps performance in this case.
+ //
+ static UnfairSemaphore* WorkerSemaphore;
+
+ //
+ // RetiredWorkerSemaphore is a regular CLRSemaphore, not an UnfairSemaphore, because if a thread waits on this semaphore is it almost certainly
+ // NOT going to be released soon, so the spinning done in UnfairSemaphore only burns valuable CPU time. However, if UnfairSemaphore is ever
+ // implemented in terms of a Win32 IO Completion Port, we should reconsider this. The IOCP's LIFO unblocking behavior could help keep working set
+ // down, by constantly re-using the same small set of retired workers rather than round-robining between all of them as CLRSemaphore will do.
+ // If we go that route, we should add a "no-spin" option to UnfairSemaphore.Wait to avoid wasting CPU.
+ //
+ static CLRSemaphore* RetiredWorkerSemaphore;
+
+ static CLREvent * RetiredCPWakeupEvent;
+
+ static CrstStatic WaitThreadsCriticalSection;
+ static LIST_ENTRY WaitThreadsHead; // queue of wait threads, each thread can handle upto 64 waits
+
+ static TimerInfo *TimerInfosToBeRecycled; // list of delegate infos associated with deleted timers
+ static CrstStatic TimerQueueCriticalSection; // critical section to synchronize timer queue access
+ SVAL_DECL(LIST_ENTRY,TimerQueue); // queue of timers
+ static HANDLE TimerThread; // Currently we only have one timer thread
+ static Thread* pTimerThread;
+ static DWORD LastTickCount; // the count just before timer thread goes to sleep
+
+ static BOOL InitCompletionPortThreadpool; // flag indicating whether completion port threadpool has been initialized
+ static HANDLE GlobalCompletionPort; // used for binding io completions on file handles
+
+ public:
+ SVAL_DECL(ThreadCounter,CPThreadCounter);
+
+ private:
+ SVAL_DECL(LONG,MaxLimitTotalCPThreads); // = MaxLimitCPThreadsPerCPU * number of CPUS
+ SVAL_DECL(LONG,MinLimitTotalCPThreads);
+ SVAL_DECL(LONG,MaxFreeCPThreads); // = MaxFreeCPThreadsPerCPU * Number of CPUS
+
+ static LONG GateThreadStatus; // See GateThreadStatus enumeration
+
+ static Volatile<LONG> NumCPInfrastructureThreads; // number of threads currently busy handling draining cycle
+
+ SVAL_DECL(LONG,cpuUtilization);
+ static LONG cpuUtilizationAverage;
+
+ static RecycledListsWrapper RecycledLists;
+
+#ifdef _DEBUG
+ static DWORD TickCountAdjustment; // add this value to value returned by GetTickCount
+#endif
+
+ static int offset_counter;
+ static const int offset_multiplier = 128;
+};
+
+
+
+
+#endif // _WIN32THREADPOOL_H
diff --git a/src/vm/winrthelpers.cpp b/src/vm/winrthelpers.cpp
new file mode 100644
index 0000000000..a294a67ad0
--- /dev/null
+++ b/src/vm/winrthelpers.cpp
@@ -0,0 +1,165 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// winrthelpers.inl
+//
+
+//
+// Helpers to fetch the first WinRT Type def from metadata import
+//
+// ======================================================================================
+
+#include "common.h"
+
+// --------------------------------------------------------------------------------------
+// Return the first public WinRT type's namespace and typename - the names have the lifetime of the MetaData scope.
+//
+//static
+HRESULT GetFirstWinRTTypeDef(
+ IMDInternalImport * pMDInternalImport,
+ LPCSTR * pszNameSpace, // Tight to the lifetime of pssFakeNameSpaceAllocationBuffer when the WinMD file is empty
+ LPCSTR * pszTypeName,
+ LPCWSTR wszAssemblyPath, // Used for creating fake binding type name in case the WinMD file is empty
+ SString * pssFakeNameSpaceAllocationBuffer) // Used as allocation buffer for fake namespace
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE((wszAssemblyPath == NULL) || (pssFakeNameSpaceAllocationBuffer != NULL));
+
+ static const char const_szWinRTPrefix[] = "<WinRT>";
+
+ HRESULT hr = S_OK;
+ HENUMInternalHolder hEnum(pMDInternalImport);
+ mdToken tk;
+
+ hEnum.EnumTypeDefInit();
+
+ while (pMDInternalImport->EnumTypeDefNext(&hEnum, &tk))
+ {
+ DWORD dwAttr;
+ IfFailRet(pMDInternalImport->GetTypeDefProps(tk, &dwAttr, NULL));
+ if (IsTdPublic(dwAttr) && IsTdWindowsRuntime(dwAttr))
+ {
+ IfFailRet(pMDInternalImport->GetNameOfTypeDef(tk, pszTypeName, pszNameSpace));
+ return hr;
+ }
+ }
+
+ // We didn't find any public Windows runtime types. In the case of 1st party WinMDs, this means
+ // it's not exporting anything so we really cannot bind to it.
+ // For WinMDs built with WinMDExp, it's because the adapter has promoted the CLR implementation to
+ // public (no WindowsRuntime flag, though), and made the WinRT copy private.
+ // So there should exist a public type (not nested, not an interface), which has a corresponding
+ // private type with the same name prepended with <WinRT> that is marked as windows runtime
+ // This isn't very efficient O(n^2) but we expect all public types in WinMDs to have WinRT visible
+ // versions too so it should early out in the first iteration in almost all cases.
+ HENUMInternalHolder hEnum2(pMDInternalImport);
+ hEnum2.EnumTypeDefInit();
+
+ while (pMDInternalImport->EnumTypeDefNext(&hEnum2, &tk))
+ {
+ DWORD dwAttr;
+ IfFailRet(pMDInternalImport->GetTypeDefProps(tk, &dwAttr, NULL));
+ if (IsTdPublic(dwAttr) && !IsTdInterface(dwAttr))
+ {
+ // Look for a matching private windows runtime type
+ mdToken tkPrivate;
+ HENUMInternalHolder hSubEnum(pMDInternalImport);
+
+ LPCSTR szNameSpace = NULL;
+ LPCSTR szName = NULL;
+ IfFailRet(pMDInternalImport->GetNameOfTypeDef(tk, &szName, &szNameSpace));
+
+ hSubEnum.EnumTypeDefInit();
+
+ while (pMDInternalImport->EnumTypeDefNext(&hSubEnum, &tkPrivate))
+ {
+ DWORD dwSubAttr;
+ IfFailRet(pMDInternalImport->GetTypeDefProps(tkPrivate, &dwSubAttr, NULL));
+ if (IsTdNotPublic(dwSubAttr) && IsTdWindowsRuntime(dwSubAttr))
+ {
+ LPCSTR szSubNameSpace = NULL;
+ LPCSTR szSubName = NULL;
+ IfFailRet(pMDInternalImport->GetNameOfTypeDef(tkPrivate, &szSubName, &szSubNameSpace));
+ if (!strncmp(szSubName, const_szWinRTPrefix, strlen(const_szWinRTPrefix)))
+ {
+ szSubName += strlen(const_szWinRTPrefix);
+ // Skip over the <WinRT> prefix. Now pointing at type name
+ if (!strcmp(szSubNameSpace, szNameSpace) &&
+ !strcmp(szSubName, szName))
+ {
+ *pszNameSpace = szNameSpace;
+ *pszTypeName = szName;
+ return S_OK;
+ }
+ }
+ }
+ }
+ }
+ }
+ // The .winmd file is empty - i.e. there is no type we can bind to
+
+ if ((wszAssemblyPath != NULL) && (*wszAssemblyPath != 0))
+ { // Create fake name for WinMD binding purposes (used when .winmd file is loaded by file path - ngen, NativeBinder, etc.)
+ // We will use WinMD file name as namespace and use fake hardcoded type name
+ SString ssAssemblyPath(wszAssemblyPath);
+ SString ssAssemblyName;
+ SplitPath(ssAssemblyPath,
+ NULL, // drive
+ NULL, // dir
+ &ssAssemblyName, // name
+ NULL); // ext
+ if (!ssAssemblyName.IsEmpty())
+ {
+ *pszTypeName = "FakeTypeNameForCLRBinding";
+ ssAssemblyName.ConvertToUTF8(*pssFakeNameSpaceAllocationBuffer);
+ *pszNameSpace = pssFakeNameSpaceAllocationBuffer->GetUTF8NoConvert();
+ return S_OK;
+ }
+ }
+
+ return CLR_E_BIND_TYPE_NOT_FOUND;
+} // GetFirstWinRTTypeDef
+
+// --------------------------------------------------------------------------------------
+//static
+HRESULT
+GetBindableWinRTName(
+ IMDInternalImport * pMDInternalImport,
+ IAssemblyName * pIAssemblyName)
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr = S_OK;
+
+ LPCSTR szNameSpace;
+ LPCSTR szTypeName;
+
+ // Note: This function is used only by native binder which does not support empty WinMDs - see code:CEECompileInfo::LoadAssemblyByPath
+ // Therefore we do not have to use file name to create fake type name
+ IfFailRet(GetFirstWinRTTypeDef(pMDInternalImport, &szNameSpace, &szTypeName, NULL, NULL));
+
+ DWORD dwSize = MAX_PATH;
+ WCHAR wzAsmName[MAX_PATH];
+
+ dwSize = MAX_PATH * sizeof(WCHAR);
+ IfFailRet(pIAssemblyName->GetProperty(ASM_NAME_NAME, wzAsmName, &dwSize));
+
+ StackSString sNamespaceAndType(wzAsmName);
+ sNamespaceAndType.Append(W("!"));
+ sNamespaceAndType.AppendUTF8(szNameSpace);
+ sNamespaceAndType.Append(W("."));
+ sNamespaceAndType.AppendUTF8(szTypeName);
+
+ pIAssemblyName->SetProperty(ASM_NAME_NAME, sNamespaceAndType.GetUnicode(), (lstrlenW(sNamespaceAndType.GetUnicode()) + 1) * sizeof(WCHAR));
+
+ return hr;
+}
diff --git a/src/vm/winrthelpers.h b/src/vm/winrthelpers.h
new file mode 100644
index 0000000000..43581a9e73
--- /dev/null
+++ b/src/vm/winrthelpers.h
@@ -0,0 +1,31 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// WinRtHelpers.h
+//
+
+//
+// Helpers to fetch the first WinRT Type def from metadata import
+//
+// ======================================================================================
+
+#pragma once
+
+#ifdef FEATURE_COMINTEROP
+
+// --------------------------------------------------------------------------------------
+// Return the first public WinRT type's namespace and typename - the names have the lifetime of the MetaData scope.
+HRESULT GetFirstWinRTTypeDef(
+ IMDInternalImport * pMDInternalImport,
+ LPCSTR * pszNameSpace, // Tight to the lifetime of pssFakeNameSpaceAllocationBuffer when the WinMD file is empty
+ LPCSTR * pszTypeName,
+ LPCWSTR wszAssemblyPath, // Used for creating fake binding type name in case the WinMD file is empty
+ SString * pssFakeNameSpaceAllocationBuffer); // Used as allocation buffer for fake namespace
+
+HRESULT GetBindableWinRTName(
+ IMDInternalImport * pMDInternalImport,
+ IAssemblyName * pIAssemblyName);
+
+#endif //FEATURE_COMINTEROP
diff --git a/src/vm/winrtredirector.h b/src/vm/winrtredirector.h
new file mode 100644
index 0000000000..2ab807c88e
--- /dev/null
+++ b/src/vm/winrtredirector.h
@@ -0,0 +1,153 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: winrtredirector.h
+//
+
+//
+
+//
+// ============================================================================
+
+#ifndef WINRT_DELEGATE_REDIRECTOR_H
+#define WINRT_DELEGATE_REDIRECTOR_H
+
+#include "winrttypenameconverter.h"
+
+// Provides functionality related to redirecting WinRT interfaces.
+// @TODO: This should take advantage of the macros in WinRTProjectedTypes.h instead of hardcoding
+// the knowledge about redirected interfaces here.
+class WinRTInterfaceRedirector
+{
+public:
+#ifndef CLR_STANDALONE_BINDER
+ // Returns a MethodDesc to be used as an interop stub for the given redirected interface/slot/direction.
+ static MethodDesc *GetStubMethodForRedirectedInterface(
+ WinMDAdapter::RedirectedTypeIndex interfaceIndex, // redirected interface index
+ int slot, // slot number of the method for which a stub is needed
+ TypeHandle::InteropKind interopKind, // Interop_ManagedToNative (stub for RCW) or Interop_NativeToManaged (stub for CCW)
+ BOOL fICollectionStub, // need stub for ICollection`1 (only valid with Interop_ManagedToNative)
+ Instantiation methodInst = Instantiation()); // requested method instantiation if the stub method is generic
+
+ // Returns a MethodDesc to be used as an interop stub for the given method and direction.
+ static MethodDesc *GetStubMethodForRedirectedInterfaceMethod(MethodDesc *pMD, TypeHandle::InteropKind interopKind);
+
+ // Returns MethodTable (typical instantiation) of the Framework copy of the specified redirected WinRT interface.
+ static MethodTable *GetWinRTTypeForRedirectedInterfaceIndex(WinMDAdapter::RedirectedTypeIndex index);
+
+ // Loads a type from the given Framework assembly.
+ static MethodTable *LoadTypeFromRedirectedAssembly(WinMDAdapter::FrameworkAssemblyIndex index, LPCWSTR wzTypeName);
+
+ // Loads a method from the given Framework assembly.
+ static MethodDesc *LoadMethodFromRedirectedAssembly(WinMDAdapter::FrameworkAssemblyIndex index, LPCWSTR wzTypeName, LPCUTF8 szMethodName);
+
+ // Lists WinRT-legal types assignable from .NET reference types that are projected from WinRT structures/arrays/delegates.
+ enum WinRTLegalStructureBaseType
+ {
+ BaseType_None,
+ BaseType_Object, // System.Object (assignable from Type, string, Exception)
+ BaseType_IEnumerable, // System.Collections.IEnumerable (assignable from string)
+ BaseType_IEnumerableOfChar // System.Collections.Generic.IEnumerable<char> (assignable from string)
+ };
+
+ // Determines if the generic argument in the given instantiation is a WinRT-legal base type of a WinRT structure type.
+ static WinRTLegalStructureBaseType GetStructureBaseType(Instantiation inst)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(inst.GetNumArgs() == 1);
+
+ if (!inst[0].IsTypeDesc())
+ {
+ MethodTable *pInstArgMT = inst[0].AsMethodTable();
+
+ if (pInstArgMT == g_pObjectClass)
+ return BaseType_Object;
+
+ if (pInstArgMT == MscorlibBinder::GetExistingClass(CLASS__IENUMERABLE))
+ return BaseType_IEnumerable;
+
+ if (pInstArgMT->HasSameTypeDefAs(MscorlibBinder::GetExistingClass(CLASS__IENUMERABLEGENERIC)) &&
+ pInstArgMT->GetInstantiation()[0].GetSignatureCorElementType() == ELEMENT_TYPE_CHAR)
+ return BaseType_IEnumerableOfChar;
+ }
+ return BaseType_None;
+ }
+#endif // !CLR_STANDALONE_BINDER
+
+ // Returns the redirection index if the MethodTable* is a redirected interface.
+ static inline bool ResolveRedirectedInterface(MethodTable *pMT, WinMDAdapter::RedirectedTypeIndex * pIndex);
+
+#ifndef CLR_STANDALONE_BINDER
+
+#ifdef _DEBUG
+ static void VerifyRedirectedInterfaceStubs();
+#endif // _DEBUG
+
+private:
+ static inline int GetStubInfoIndex(WinMDAdapter::RedirectedTypeIndex index)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ switch (index)
+ {
+ case WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IEnumerable: return 0;
+ case WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IList: return 1;
+ case WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IDictionary: return 2;
+ case WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IReadOnlyList: return 3;
+ case WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_IReadOnlyDictionary: return 4;
+ case WinMDAdapter::RedirectedTypeIndex_System_Collections_IEnumerable: return 5;
+ case WinMDAdapter::RedirectedTypeIndex_System_Collections_IList: return 6;
+ case WinMDAdapter::RedirectedTypeIndex_System_Collections_Specialized_INotifyCollectionChanged: return 7;
+ case WinMDAdapter::RedirectedTypeIndex_System_ComponentModel_INotifyPropertyChanged: return 8;
+ case WinMDAdapter::RedirectedTypeIndex_System_Windows_Input_ICommand: return 9;
+ case WinMDAdapter::RedirectedTypeIndex_System_IDisposable: return 10;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ struct RedirectedInterfaceStubInfo
+ {
+ const BinderClassID m_WinRTInterface;
+ const int m_iCLRMethodCount;
+ const BinderMethodID *m_rCLRStubMethods;
+ const int m_iWinRTMethodCount;
+ const BinderMethodID *m_rWinRTStubMethods;
+ };
+
+ struct NonMscorlibRedirectedInterfaceInfo
+ {
+ const WinMDAdapter::FrameworkAssemblyIndex m_AssemblyIndex;
+ const LPCWSTR m_wzWinRTInterfaceTypeName;
+ const LPCWSTR m_wzCLRStubClassTypeName;
+ const LPCWSTR m_wzWinRTStubClassTypeName;
+ const LPCUTF8 *m_rszMethodNames;
+ };
+
+ enum
+ {
+ s_NumRedirectedInterfaces = 11
+ };
+
+ // Describes stubs used for marshaling of redirected interfaces.
+ const static RedirectedInterfaceStubInfo s_rInterfaceStubInfos[2 * s_NumRedirectedInterfaces];
+ const static NonMscorlibRedirectedInterfaceInfo s_rNonMscorlibInterfaceInfos[3];
+
+ const static int NON_MSCORLIB_MARKER = 0x80000000;
+#endif // !CLR_STANDALONE_BINDER
+};
+
+
+// Provides functionality related to redirecting WinRT delegates.
+class WinRTDelegateRedirector
+{
+public:
+ static MethodTable *GetWinRTTypeForRedirectedDelegateIndex(WinMDAdapter::RedirectedTypeIndex index);
+
+ static bool WinRTDelegateRedirector::ResolveRedirectedDelegate(MethodTable *pMT, WinMDAdapter::RedirectedTypeIndex *pIndex);
+};
+
+#endif // WINRT_DELEGATE_REDIRECTOR_H
diff --git a/src/vm/winrtredirector.inl b/src/vm/winrtredirector.inl
new file mode 100644
index 0000000000..9e4210c2d9
--- /dev/null
+++ b/src/vm/winrtredirector.inl
@@ -0,0 +1,71 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: winrtredirector.inl
+//
+
+//
+
+//
+// ============================================================================
+
+#if !defined(WINRT_DELEGATE_REDIRECTOR_INL) && defined(WINRT_DELEGATE_REDIRECTOR_H)
+#define WINRT_DELEGATE_REDIRECTOR_INL
+
+#ifdef FEATURE_COMINTEROP
+
+/*static*/
+inline bool WinRTInterfaceRedirector::ResolveRedirectedInterface(MethodTable *pMT, WinMDAdapter::RedirectedTypeIndex * pIndex)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ WinMDAdapter::RedirectedTypeIndex index;
+ WinMDAdapter::WinMDTypeKind kind;
+
+ if (WinRTTypeNameConverter::ResolveRedirectedType(pMT, &index, &kind))
+ {
+ if ((kind == WinMDAdapter::WinMDTypeKind_Interface || kind == WinMDAdapter::WinMDTypeKind_PInterface) &&
+ // filter out KeyValuePair and Nullable which are structures projected from WinRT interfaces
+ index != WinMDAdapter::RedirectedTypeIndex_System_Collections_Generic_KeyValuePair &&
+ index != WinMDAdapter::RedirectedTypeIndex_System_Nullable)
+ {
+ if (pIndex != NULL)
+ {
+ *pIndex = index;
+ }
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/*static */
+inline bool WinRTDelegateRedirector::ResolveRedirectedDelegate(MethodTable *pMT, WinMDAdapter::RedirectedTypeIndex *pIndex)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ WinMDAdapter::RedirectedTypeIndex index;
+ WinMDAdapter::WinMDTypeKind kind;
+
+ if (WinRTTypeNameConverter::ResolveRedirectedType(pMT, &index, &kind))
+ {
+ if (kind == WinMDAdapter::WinMDTypeKind_Delegate ||
+ kind == WinMDAdapter::WinMDTypeKind_PDelegate)
+ {
+ if (pIndex != NULL)
+ {
+ *pIndex = index;
+ }
+ return true;
+ }
+ }
+
+ return false;
+}
+
+#endif // FEATURE_COMINTEROP
+
+#endif // WINRT_DELEGATE_REDIRECTOR_INL
diff --git a/src/vm/winrttypenameconverter.cpp b/src/vm/winrttypenameconverter.cpp
new file mode 100644
index 0000000000..f3108a83ac
--- /dev/null
+++ b/src/vm/winrttypenameconverter.cpp
@@ -0,0 +1,940 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: WinRTTypeNameConverter.cpp
+//
+
+//
+
+//
+// ============================================================================
+
+#include "common.h"
+
+#ifdef FEATURE_COMINTEROP
+#include "winrttypenameconverter.h"
+#include "typeresolution.h"
+
+
+struct RedirectedTypeNames
+{
+ LPCSTR szClrNamespace;
+ LPCSTR szClrName;
+ WinMDAdapter::FrameworkAssemblyIndex assembly;
+ WinMDAdapter::WinMDTypeKind kind;
+};
+
+#define DEFINE_PROJECTED_TYPE(szWinRTNS, szWinRTName, szClrNS, szClrName, nClrAsmIdx, ncontractAsmIndex, nWinRTIndex, nClrIndex, nWinMDTypeKind) \
+ { szClrNS, szClrName, WinMDAdapter::FrameworkAssembly_ ## nClrAsmIdx, WinMDAdapter::WinMDTypeKind_ ## nWinMDTypeKind },
+
+static const RedirectedTypeNames g_redirectedTypeNames[WinMDAdapter::RedirectedTypeIndex_Count] =
+{
+#include "winrtprojectedtypes.h"
+};
+
+#undef DEFINE_PROJECTED_TYPE
+
+
+//
+// Return the redirection index and type kind if the MethodTable* is a redirected type
+//
+bool WinRTTypeNameConverter::ResolveRedirectedType(MethodTable *pMT, WinMDAdapter::RedirectedTypeIndex * pIndex, WinMDAdapter::WinMDTypeKind * pKind /*=NULL*/)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ WinMDAdapter::RedirectedTypeIndex index = pMT->GetClass()->GetWinRTRedirectedTypeIndex();
+ if (index == WinMDAdapter::RedirectedTypeIndex_Invalid)
+ return false;
+
+ if (pIndex != NULL)
+ *pIndex = index;
+
+ if (pKind != NULL)
+ *pKind = g_redirectedTypeNames[index].kind;
+
+ return true;
+}
+
+#ifndef DACCESS_COMPILE
+
+class MethodTableListNode;
+
+// Information to help in generating a runtimeclass name for a managed type
+// implementing a generic WinRT interface
+struct WinRTTypeNameInfo
+{
+ MethodTableListNode* PreviouslyVisitedTypes;
+ CorGenericParamAttr CurrentTypeParameterVariance;
+
+ WinRTTypeNameInfo(MethodTableListNode* pPreviouslyVisitedTypes) :
+ PreviouslyVisitedTypes(pPreviouslyVisitedTypes),
+ CurrentTypeParameterVariance(gpNonVariant)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(pPreviouslyVisitedTypes != nullptr);
+ }
+};
+
+// Helper data structure to build a stack allocated reverse linked list of MethodTables that we're examining
+// while building up WinRT runtimeclass name
+class MethodTableListNode
+{
+ MethodTable* m_pMT; // Type examined while building the runtimeclass name
+ MethodTableListNode* m_pPrevious; // Previous node in the list
+
+public:
+ MethodTableListNode(MethodTable* pMT, WinRTTypeNameInfo* pCurrent)
+ : m_pMT(pMT),
+ m_pPrevious(nullptr)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(pMT != nullptr);
+
+ if (pCurrent != nullptr)
+ {
+ m_pPrevious = pCurrent->PreviouslyVisitedTypes;
+ }
+ }
+
+ bool Contains(MethodTable* pMT)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (pMT == m_pMT)
+ {
+ return true;
+ }
+ else if (m_pPrevious == nullptr)
+ {
+ return false;
+ }
+ else
+ {
+ return m_pPrevious->Contains(pMT);
+ }
+ }
+};
+
+//
+// Append WinRT type name for the specified type handle
+//
+bool WinRTTypeNameConverter::AppendWinRTTypeNameForManagedType(
+ TypeHandle thManagedType,
+ SString &strWinRTTypeName,
+ bool bForGetRuntimeClassName,
+ bool *pbIsPrimitive)
+{
+ WRAPPER_NO_CONTRACT;
+ return AppendWinRTTypeNameForManagedType(thManagedType, strWinRTTypeName, bForGetRuntimeClassName, pbIsPrimitive, nullptr);
+}
+
+bool WinRTTypeNameConverter::AppendWinRTTypeNameForManagedType(
+ TypeHandle thManagedType,
+ SString &strWinRTTypeName,
+ bool bForGetRuntimeClassName,
+ bool *pbIsPrimitive,
+ WinRTTypeNameInfo *pCurrentTypeInfo)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(!thManagedType.IsNull());
+ PRECONDITION(CheckPointer(pbIsPrimitive, NULL_OK));
+ PRECONDITION(CheckPointer(pCurrentTypeInfo, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ if (pbIsPrimitive)
+ *pbIsPrimitive = false;
+
+ MethodTable *pMT = thManagedType.GetMethodTable();
+ BOOL fIsIReference = FALSE, fIsIReferenceArray = FALSE;
+ if (pMT->GetNumGenericArgs() == 1)
+ {
+ fIsIReference = pMT->HasSameTypeDefAs(MscorlibBinder::GetClass(CLASS__CLRIREFERENCEIMPL));
+ fIsIReferenceArray = pMT->HasSameTypeDefAs(MscorlibBinder::GetClass(CLASS__CLRIREFERENCEARRAYIMPL));
+ }
+
+ WinMDAdapter::RedirectedTypeIndex index;
+ if (ResolveRedirectedType(pMT, &index))
+ {
+ // Redirected types
+ // Use the redirected WinRT name
+ strWinRTTypeName.Append(WinMDAdapter::GetRedirectedTypeFullWinRTName(index));
+ }
+ else if (fIsIReference || fIsIReferenceArray)
+ {
+ //
+ // Convert CLRIReferenceImpl<T>/CLRIReferenceArrayImpl<T> to a WinRT Type
+ //
+ // If GetRuntimeClassName = true, return IReference<T>/IReferenceArray<T>
+ // Otherwise, return T/IReferenceArray`1<T>
+ //
+ Instantiation inst = pMT->GetInstantiation();
+ _ASSERTE(inst.GetNumArgs() == 1);
+ TypeHandle th = inst[0];
+
+ // I'm sure there are ways to avoid duplication here but I prefer this way - it is easier to understand
+ if (fIsIReference)
+ {
+ if (bForGetRuntimeClassName)
+ {
+ //
+ // IReference<T>
+ //
+ strWinRTTypeName.Append(W("Windows.Foundation.IReference`1<"));
+
+ if (!AppendWinRTTypeNameForManagedType(
+ th,
+ strWinRTTypeName,
+ bForGetRuntimeClassName,
+ NULL
+ ))
+ return false;
+
+ strWinRTTypeName.Append(W('>'));
+
+ return true;
+ }
+ else
+ {
+ //
+ // T
+ //
+ return AppendWinRTTypeNameForManagedType(
+ th,
+ strWinRTTypeName,
+ bForGetRuntimeClassName,
+ pbIsPrimitive
+ );
+ }
+ }
+ else
+ {
+ //
+ // IReferenceArray<T>
+ //
+ strWinRTTypeName.Append(W("Windows.Foundation.IReferenceArray`1<"));
+
+ if (!AppendWinRTTypeNameForManagedType(
+ th,
+ strWinRTTypeName,
+ bForGetRuntimeClassName,
+ NULL))
+ return false;
+
+ strWinRTTypeName.Append(W('>'));
+
+ return true;
+ }
+ }
+ else if (pMT->IsProjectedFromWinRT() || pMT->IsExportedToWinRT())
+ {
+ //
+ // WinRT type
+ //
+ SString strTypeName;
+ pMT->_GetFullyQualifiedNameForClassNestedAware(strTypeName);
+ strWinRTTypeName.Append(strTypeName);
+ }
+ else if (AppendWinRTNameForPrimitiveType(pMT, strWinRTTypeName))
+ {
+ //
+ // WinRT primitive type, return immediately
+ //
+ if (pbIsPrimitive)
+ *pbIsPrimitive = true;
+ return true;
+ }
+ else if (pMT->IsArray())
+ {
+ if (bForGetRuntimeClassName)
+ {
+ //
+ // An array is not a valid WinRT type - it must be wrapped in IReferenceArray to be a valid
+ // WinRT type
+ //
+ return false;
+ }
+ else
+ {
+ //
+ // System.Type marshaling - convert array type into IReferenceArray<T>
+ //
+ strWinRTTypeName.Append(W("Windows.Foundation.IReferenceArray`1<"));
+
+ if (!AppendWinRTTypeNameForManagedType(thManagedType.AsArray()->GetArrayElementTypeHandle(), strWinRTTypeName, bForGetRuntimeClassName, NULL))
+ return false;
+
+ strWinRTTypeName.Append(W('>'));
+ }
+ }
+ else if (bForGetRuntimeClassName)
+ {
+ //
+ // Not a WinRT type or a WinRT Primitive type,
+ // but if it implements a WinRT interface we will return the interface name.
+ // Which interface should we return if it implements multiple WinRT interfaces?
+ // For now we return the top most interface. And if there are more than one
+ // top most interfaces, we return the first one we encounter during the interface enumeration.
+ //
+ //
+ // We also need to keep track of the types we've already considered, so we don't wind up in an
+ // infinite recursion processing generic interfaces.
+ // For example, in the case where we have:
+ //
+ // class ManagedType : IEnumerable<ManagedType>
+ //
+ // We do not want to keep recursing on the ManagedType type parameter. Instead, we should
+ // discover that we've already attempted to figure out what the best representation for
+ // ManagedType is, and bail out.
+ //
+ // This is a linear search, however that shouldn't generally be a problem, since generic
+ // nesting should not be very large in the common case.
+
+ if (pCurrentTypeInfo != nullptr && pCurrentTypeInfo->PreviouslyVisitedTypes->Contains(pMT))
+ {
+ // We should only be restricting this recursion on non-WinRT types that may have WinRT interfaces
+ _ASSERTE(!pMT->IsProjectedFromWinRT() && !pMT->IsExportedToWinRT() && !pMT->IsTruePrimitive());
+
+ // We have two choices. If this is a reference type and the interface parameter is covariant, we
+ // can use IInspectable as the closure. Otherwise, we need to simply fail out with no possible
+ // type name.
+ if (pCurrentTypeInfo->CurrentTypeParameterVariance == gpCovariant &&
+ thManagedType.IsBoxedAndCanCastTo(TypeHandle(g_pObjectClass), nullptr))
+ {
+ // Object is used in runtime class names for generics closed over IInspectable at the ABI
+ strWinRTTypeName.Append(W("Object"));
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+ }
+
+ // This is the "top" most redirected interface implemented by pMT.
+ // E.g. if pMT implements both IList`1 and IEnumerable`1, we pick IList`1.
+
+ MethodTable* pTopIfaceMT = NULL;
+ WinMDAdapter::RedirectedTypeIndex idxTopIface = (WinMDAdapter::RedirectedTypeIndex)-1;
+
+ MethodTable::InterfaceMapIterator it = pMT->IterateInterfaceMap();
+ while (it.Next())
+ {
+ MethodTable* pIfaceMT = it.GetInterface();
+ if (ResolveRedirectedType(pIfaceMT, &index) ||
+ pIfaceMT->IsProjectedFromWinRT())
+ {
+ if (pTopIfaceMT == NULL || pIfaceMT->ImplementsInterface(pTopIfaceMT))
+ {
+ pTopIfaceMT = pIfaceMT;
+
+ // If pIfaceMT is not a redirected type, idxTopIface will contain garbage.
+ // But that is fine because we will only use idxTopIface if pTopIfaceMT
+ // is a redirected type.
+ idxTopIface = index;
+ }
+ }
+ }
+
+ if (pTopIfaceMT != NULL)
+ {
+ if (pTopIfaceMT->IsProjectedFromWinRT())
+ {
+ // Mscorlib contains copies of WinRT interfaces - don't return their names,
+ // instead return names of the corresponding interfaces in Windows.Foundation.winmd.
+
+ if (pTopIfaceMT->HasSameTypeDefAs(MscorlibBinder::GetClass(CLASS__IKEYVALUEPAIR)))
+ strWinRTTypeName.Append(W("Windows.Foundation.Collections.IKeyValuePair`2"));
+ else if (pTopIfaceMT->HasSameTypeDefAs(MscorlibBinder::GetClass(CLASS__IITERATOR)))
+ strWinRTTypeName.Append(W("Windows.Foundation.Collections.IIterator`1"));
+ else if (pTopIfaceMT->HasSameTypeDefAs(MscorlibBinder::GetClass(CLASS__IPROPERTYVALUE)))
+ strWinRTTypeName.Append(W("Windows.Foundation.IPropertyValue"));
+ else
+ {
+ SString strTypeName;
+ pTopIfaceMT->_GetFullyQualifiedNameForClassNestedAware(strTypeName);
+ strWinRTTypeName.Append(strTypeName);
+ }
+ }
+ else
+ strWinRTTypeName.Append(WinMDAdapter::GetRedirectedTypeFullWinRTName(idxTopIface));
+
+ // Since we are returning the typeName for the pTopIfaceMT we should use the same interfaceType
+ // to check for instantiation and creating the closed generic.
+ pMT = pTopIfaceMT;
+ }
+ else
+ return false;
+ }
+ else
+ {
+ //
+ // Non-WinRT type, Non-WinRT-Primitive type
+ //
+
+ return false;
+ }
+
+ // We allow typeName generation for only open types or completely instantiated types.
+ // In case it is a generic type definition like IList<T> we return the typeName as IVector'1 only
+ // and hence we do not need to visit the arguments.
+ if (pMT->HasInstantiation() && (!pMT->IsGenericTypeDefinition()))
+ {
+ // Add the current type we're trying to get a runtimeclass name for to the list of types
+ // we've already seen, so we can check for infinite recursion on the generic parameters.
+ MethodTableListNode examinedTypeList(thManagedType.GetMethodTable(), pCurrentTypeInfo);
+
+
+ strWinRTTypeName.Append(W('<'));
+
+ //
+ // Convert each arguments
+ //
+ Instantiation inst = pMT->GetInstantiation();
+ for (DWORD i = 0; i < inst.GetNumArgs(); ++i)
+ {
+ TypeHandle th = inst[i];
+
+ // We have a partial open type with us and hence we should throw.
+ if(th.ContainsGenericVariables())
+ COMPlusThrowArgumentException(W("th"), W("Argument_TypeNotValid"));
+
+ if (i > 0)
+ strWinRTTypeName.Append(W(','));
+
+ // In the recursive case, we can sometimes do a better job of getting a runtimeclass name if
+ // the actual instantiated type can be substitued for a different type due to variance on the
+ // generic type parameter. In order to allow that to occur when processing this parameter,
+ // make a note of the variance properties to pass along with the previously examined type list
+ WinRTTypeNameInfo currentParameterInfo(&examinedTypeList);
+ if (pMT->HasVariance())
+ {
+ currentParameterInfo.CurrentTypeParameterVariance = pMT->GetClass()->GetVarianceOfTypeParameter(i);
+ }
+
+ // Convert to WinRT type name
+ // If it is not a WinRT type, return immediately
+ if (!AppendWinRTTypeNameForManagedType(th, strWinRTTypeName, bForGetRuntimeClassName, NULL, &currentParameterInfo))
+ return false;
+ }
+
+ strWinRTTypeName.Append(W('>'));
+ }
+
+ return true;
+}
+
+//
+// Lookup table : CorElementType -> WinRT primitive type name
+//
+LPCWSTR const s_wszCorElementTypeToWinRTNameMapping[] =
+{
+ NULL, // ELEMENT_TYPE_END = 0x0,
+ NULL, // ELEMENT_TYPE_VOID = 0x1,
+ W("Boolean"), // ELEMENT_TYPE_BOOLEAN = 0x2,
+ W("Char16"), // ELEMENT_TYPE_CHAR = 0x3,
+ NULL, // ELEMENT_TYPE_I1 = 0x4,
+ W("UInt8"), // ELEMENT_TYPE_U1 = 0x5,
+ W("Int16"), // ELEMENT_TYPE_I2 = 0x6,
+ W("UInt16"), // ELEMENT_TYPE_U2 = 0x7,
+ W("Int32"), // ELEMENT_TYPE_I4 = 0x8,
+ W("UInt32"), // ELEMENT_TYPE_U4 = 0x9,
+ W("Int64"), // ELEMENT_TYPE_I8 = 0xa,
+ W("UInt64"), // ELEMENT_TYPE_U8 = 0xb,
+ W("Single"), // ELEMENT_TYPE_R4 = 0xc,
+ W("Double"), // ELEMENT_TYPE_R8 = 0xd,
+ W("String"), // ELEMENT_TYPE_STRING = 0xe,
+ NULL, // ELEMENT_TYPE_PTR = 0xf,
+ NULL, // ELEMENT_TYPE_BYREF = 0x10,
+ NULL, // ELEMENT_TYPE_VALUETYPE = 0x11,
+ NULL, // ELEMENT_TYPE_CLASS = 0x12,
+ NULL, // ??? = 0x13,
+ NULL, // ELEMENT_TYPE_ARRAY = 0x14,
+ NULL, // ??? = 0x15,
+ NULL, // ELEMENT_TYPE_TYPEDBYREF = 0x16,
+ NULL, // ELEMENT_TYPE_I = 0x18,
+ NULL, // ELEMENT_TYPE_U = 0x19,
+ NULL, // ??? = 0x1A,
+ NULL, // ELEMENT_TYPE_FNPTR = 0x1B,
+ W("Object"), // ELEMENT_TYPE_OBJECT = 0x1C,
+};
+
+//
+// Get predefined WinRT name for a primitive type
+//
+bool WinRTTypeNameConverter::GetWinRTNameForPrimitiveType(MethodTable *pMT, SString *pName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(CheckPointer(pMT, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ CorElementType elemType = TypeHandle(pMT).GetSignatureCorElementType();
+
+ //
+ // Try to find it in a lookup table
+ //
+ if (elemType >= 0 && elemType < _countof(s_wszCorElementTypeToWinRTNameMapping))
+ {
+ LPCWSTR wszName = s_wszCorElementTypeToWinRTNameMapping[elemType];
+
+ if (wszName != NULL)
+ {
+ if (pName != NULL)
+ {
+ pName->SetLiteral(wszName);
+ }
+
+ return true;
+ }
+ }
+
+ if (elemType == ELEMENT_TYPE_VALUETYPE)
+ {
+ if (pMT->GetModule()->IsSystem() &&
+ IsTypeRefOrDef(g_GuidClassName, pMT->GetModule(), pMT->GetCl()))
+ {
+ if (pName != NULL)
+ {
+ pName->SetLiteral(W("Guid"));
+ }
+
+ return true;
+ }
+ }
+ else if (elemType == ELEMENT_TYPE_CLASS)
+ {
+ if (pMT == g_pObjectClass)
+ {
+ if (pName != NULL)
+ {
+ pName->SetLiteral(W("Object"));
+ }
+
+ return true;
+ }
+ if (pMT == g_pStringClass)
+ {
+ if (pName != NULL)
+ {
+ pName->SetLiteral(W("String"));
+ }
+
+ return true;
+ }
+ }
+
+ // it's not a primitive
+ return false;
+}
+
+//
+// Append the WinRT type name for the method table, if it is a WinRT primitive type
+//
+bool WinRTTypeNameConverter::AppendWinRTNameForPrimitiveType(MethodTable *pMT, SString &strName)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMT));
+ PRECONDITION(CheckPointer(pMT, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ SString strPrimitiveTypeName;
+ if (GetWinRTNameForPrimitiveType(pMT, &strPrimitiveTypeName))
+ {
+ strName.Append(strPrimitiveTypeName);
+ return true;
+ }
+
+ return false;
+}
+
+// static
+bool WinRTTypeNameConverter::IsRedirectedType(MethodTable *pMT, WinMDAdapter::WinMDTypeKind kind)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ WinMDAdapter::RedirectedTypeIndex index;
+ return (ResolveRedirectedType(pMT, &index) && (g_redirectedTypeNames[index].kind == kind));
+}
+
+//
+// Determine if the given type redirected only by doing name comparisons. This is used to
+// calculate the redirected type index at EEClass creation time.
+//
+
+// static
+WinMDAdapter::RedirectedTypeIndex WinRTTypeNameConverter::GetRedirectedTypeIndexByName(
+ IMDInternalImport *pMDImport,
+ mdTypeDef token,
+ WinMDAdapter::FrameworkAssemblyIndex assemblyIndex)
+{
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+ PRECONDITION(CheckPointer(pMDImport));
+ }
+ CONTRACTL_END;
+
+ LPCSTR szName;
+ LPCSTR szNamespace;
+ IfFailThrow(pMDImport->GetNameOfTypeDef(token, &szName, &szNamespace));
+
+ // Check each of the redirected types to see if it is our type
+ for (int i = 0; i < COUNTOF(g_redirectedTypeNames); ++i)
+ {
+ // Do the fast checks first
+ if (g_redirectedTypeNames[i].assembly == assemblyIndex)
+ {
+ // This is in the right assembly, see if the name matches
+ if (strcmp(g_redirectedTypeNames[i].szClrName, szName) == 0 &&
+ strcmp(g_redirectedTypeNames[i].szClrNamespace, szNamespace) == 0)
+ {
+ return (WinMDAdapter::RedirectedTypeIndex)i;
+ }
+ }
+ }
+
+ return WinMDAdapter::RedirectedTypeIndex_Invalid;
+}
+
+struct WinRTPrimitiveTypeMapping
+{
+
+ BinderClassID binderID;
+ LPCWSTR wszWinRTName;
+};
+
+#define DEFINE_PRIMITIVE_TYPE_MAPPING(elementType, winrtTypeName) { elementType, L##winrtTypeName },
+
+//
+// Pre-sorted mapping : WinRT primitive type string -> BinderClassID
+//
+const WinRTPrimitiveTypeMapping s_winRTPrimitiveTypeMapping[] =
+{
+ DEFINE_PRIMITIVE_TYPE_MAPPING(CLASS__BOOLEAN, "Boolean")
+ DEFINE_PRIMITIVE_TYPE_MAPPING(CLASS__CHAR, "Char16")
+ DEFINE_PRIMITIVE_TYPE_MAPPING(CLASS__DOUBLE, "Double")
+ DEFINE_PRIMITIVE_TYPE_MAPPING(CLASS__GUID, "Guid")
+ DEFINE_PRIMITIVE_TYPE_MAPPING(CLASS__INT16, "Int16")
+ DEFINE_PRIMITIVE_TYPE_MAPPING(CLASS__INT32, "Int32")
+ DEFINE_PRIMITIVE_TYPE_MAPPING(CLASS__INT64, "Int64")
+ DEFINE_PRIMITIVE_TYPE_MAPPING(CLASS__OBJECT, "Object")
+ DEFINE_PRIMITIVE_TYPE_MAPPING(CLASS__SINGLE, "Single")
+ DEFINE_PRIMITIVE_TYPE_MAPPING(CLASS__STRING, "String")
+ DEFINE_PRIMITIVE_TYPE_MAPPING(CLASS__UINT16, "UInt16")
+ DEFINE_PRIMITIVE_TYPE_MAPPING(CLASS__UINT32, "UInt32")
+ DEFINE_PRIMITIVE_TYPE_MAPPING(CLASS__UINT64, "UInt64")
+ DEFINE_PRIMITIVE_TYPE_MAPPING(CLASS__BYTE, "UInt8")
+};
+
+//
+// Return MethodTable* for the specified WinRT primitive type name
+//
+bool WinRTTypeNameConverter::GetMethodTableFromWinRTPrimitiveType(LPCWSTR wszTypeName, UINT32 uTypeNameLen, MethodTable **ppMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(ppMT));
+ }
+ CONTRACTL_END;
+
+ if (uTypeNameLen >= 4 && uTypeNameLen <= 7)
+ {
+ //
+ // Binary search the lookup table
+ //
+ int begin = 0, end = _countof(s_winRTPrimitiveTypeMapping) - 1;
+ while (begin <= end)
+ {
+ _ASSERTE(begin >= 0 && begin <= _countof(s_winRTPrimitiveTypeMapping) - 1);
+ _ASSERTE(end >= 0 && end <= _countof(s_winRTPrimitiveTypeMapping) - 1);
+
+ int mid = (begin + end) / 2;
+ int ret = wcscmp(wszTypeName, s_winRTPrimitiveTypeMapping[mid].wszWinRTName);
+ if (ret == 0)
+ {
+ *ppMT = MscorlibBinder::GetClass(s_winRTPrimitiveTypeMapping[mid].binderID);
+ return true;
+ }
+ else if (ret > 0)
+ {
+ begin = mid + 1;
+ }
+ else
+ {
+ end = mid - 1;
+ }
+ }
+ }
+
+ // it's not a primitive
+ return false;
+}
+
+// Is the specified MethodTable a redirected WinRT type?
+bool WinRTTypeNameConverter::IsRedirectedWinRTSourceType(MethodTable *pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!pMT->IsProjectedFromWinRT())
+ return false;
+
+ // redirected types are hidden (made internal) by the adapter
+ if (IsTdPublic(pMT->GetClass()->GetProtection()))
+ return false;
+
+ DefineFullyQualifiedNameForClassW();
+ LPCWSTR pszName = GetFullyQualifiedNameForClassW_WinRT(pMT);
+
+ return !!WinMDAdapter::ConvertWellKnownFullTypeNameFromWinRTToClr(&pszName, NULL);
+}
+
+//
+// Get TypeHandle from a WinRT type name
+// Parse the WinRT type name in the form of WinRTType=TypeName[<WinRTType[, WinRTType, ...]>]
+//
+TypeHandle WinRTTypeNameConverter::GetManagedTypeFromWinRTTypeName(LPCWSTR wszWinRTTypeName, bool *pbIsPrimitive)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(wszWinRTTypeName));
+ PRECONDITION(CheckPointer(pbIsPrimitive, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ SString ssTypeName(SString::Literal, wszWinRTTypeName);
+
+ TypeHandle th = GetManagedTypeFromWinRTTypeNameInternal(&ssTypeName, pbIsPrimitive);
+ if (th.IsNull())
+ {
+ COMPlusThrowArgumentException(W("typeName"), NULL);
+ }
+
+ return th;
+}
+
+// Helper used by code:GetWinRTType to compose a generic type from an array of components.
+// For example [IDictionary`2, int, IList`1, string] yields IDictionary`2<int, IList`1<string>>.
+static TypeHandle ComposeTypeRecursively(CQuickArray<TypeHandle> &rqPartTypes, DWORD *pIndex)
+{
+ CONTRACTL
+ {
+ MODE_ANY;
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(*pIndex < rqPartTypes.Size());
+ }
+ CONTRACTL_END;
+
+ DWORD index = (*pIndex)++;
+ TypeHandle th = rqPartTypes[index];
+
+ if (th.HasInstantiation())
+ {
+ DWORD dwArgCount = th.GetNumGenericArgs();
+ for (DWORD i = 0; i < dwArgCount; i++)
+ {
+ // we scan rqPartTypes linearly so we know that the elements can be reused
+ rqPartTypes[i + index] = ComposeTypeRecursively(rqPartTypes, pIndex);
+ }
+
+ Instantiation inst(rqPartTypes.Ptr() + index, dwArgCount);
+ th = th.Instantiate(inst);
+ }
+ else if (th == g_pArrayClass)
+ {
+ // Support for arrays
+ rqPartTypes[index] = ComposeTypeRecursively(rqPartTypes, pIndex);
+ th = ClassLoader::LoadArrayTypeThrowing(rqPartTypes[index], ELEMENT_TYPE_SZARRAY, 1);
+ }
+
+ return th;
+}
+
+#ifdef CROSSGEN_COMPILE
+//
+// In crossgen, we use a mockup of RoParseTypeName since we need to run on pre-Win8 machines.
+//
+extern "C" HRESULT WINAPI CrossgenRoParseTypeName(SString* typeName, DWORD *partsCount, SString **typeNameParts);
+#endif
+
+//
+// Return TypeHandle for the specified WinRT type name (supports generic type)
+// Updates wszWinRTTypeName pointer as it parse the string
+//
+TypeHandle WinRTTypeNameConverter::GetManagedTypeFromWinRTTypeNameInternal(SString *ssTypeName, bool *pbIsPrimitive)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(ssTypeName));
+ PRECONDITION(CheckPointer(pbIsPrimitive, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ if (pbIsPrimitive)
+ *pbIsPrimitive = false;
+
+ if (ssTypeName->IsEmpty())
+ return TypeHandle();
+
+ TypeHandle typeHandle;
+
+ SString::Iterator it = ssTypeName->Begin();
+ if (ssTypeName->Find(it, W('<')))
+ {
+ // this is a generic type - use RoParseTypeName to break it down into components
+ CQuickArray<TypeHandle> rqPartTypes;
+
+#ifndef CROSSGEN_COMPILE
+
+ DWORD dwPartsCount = 0;
+ HSTRING *rhsPartNames;
+
+ CoTaskMemHSTRINGArrayHolder hsNamePartsHolder;
+ IfFailThrow(RoParseTypeName(WinRtStringRef(ssTypeName->GetUnicode(), ssTypeName->GetCount()), &dwPartsCount, &rhsPartNames));
+ hsNamePartsHolder.Init(rhsPartNames, dwPartsCount);
+
+ rqPartTypes.AllocThrows(dwPartsCount);
+
+ // load the components
+ for (DWORD i = 0; i < dwPartsCount; i++)
+ {
+ UINT32 cchPartLength;
+ PCWSTR wszPart = WindowsGetStringRawBuffer(rhsPartNames[i], &cchPartLength);
+
+ StackSString ssPartName(wszPart, cchPartLength);
+ rqPartTypes[i] = GetManagedTypeFromSimpleWinRTNameInternal(&ssPartName, NULL);
+ }
+
+#else //CROSSGEN_COMPILE
+
+ //
+ // In crossgen, we use a mockup of RoParseTypeName since we need to run on pre-Win8 machines.
+ //
+ DWORD dwPartsCount = 0;
+ SString *rhsPartNames;
+
+ IfFailThrow(CrossgenRoParseTypeName(ssTypeName, &dwPartsCount, &rhsPartNames));
+ _ASSERTE(rhsPartNames != nullptr);
+
+ rqPartTypes.AllocThrows(dwPartsCount);
+
+ // load the components
+ for (DWORD i = 0; i < dwPartsCount; i++)
+ {
+ rqPartTypes[i] = GetManagedTypeFromSimpleWinRTNameInternal(&rhsPartNames[i], NULL);
+ }
+
+ delete[] rhsPartNames;
+
+#endif //CROSSGEN_COMPILE
+
+ // and instantiate the generic type
+ DWORD dwIndex = 0;
+ typeHandle = ComposeTypeRecursively(rqPartTypes, &dwIndex);
+
+ _ASSERTE(dwIndex == rqPartTypes.Size());
+
+ return typeHandle;
+ }
+ else
+ {
+ return GetManagedTypeFromSimpleWinRTNameInternal(ssTypeName, pbIsPrimitive);
+ }
+}
+
+//
+// Return MethodTable* for the specified WinRT primitive type name (non-generic type)
+// Updates wszWinRTTypeName pointer as it parse the string
+//
+TypeHandle WinRTTypeNameConverter::GetManagedTypeFromSimpleWinRTNameInternal(SString *ssTypeName, bool *pbIsPrimitive)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(ssTypeName));
+ PRECONDITION(CheckPointer(pbIsPrimitive, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ if (pbIsPrimitive)
+ *pbIsPrimitive = false;
+
+ if (ssTypeName->IsEmpty())
+ return TypeHandle();
+
+ //
+ // Redirection
+ //
+ LPCWSTR pwszTypeName = ssTypeName->GetUnicode();
+ WinMDAdapter::RedirectedTypeIndex uIndex;
+ MethodTable *pMT = NULL;
+
+ if (WinMDAdapter::ConvertWellKnownFullTypeNameFromWinRTToClr(&pwszTypeName, &uIndex))
+ {
+ //
+ // Well known redirected types
+ //
+ return TypeHandle(GetAppDomain()->GetRedirectedType(uIndex));
+ }
+ else if (GetMethodTableFromWinRTPrimitiveType(pwszTypeName, ssTypeName->GetCount(), &pMT))
+ {
+
+ //
+ // Primitive type
+ //
+ if (pbIsPrimitive)
+ *pbIsPrimitive = true;
+ return TypeHandle(pMT);
+ }
+ else if (wcscmp(pwszTypeName, W("Windows.Foundation.IReferenceArray`1")) == 0)
+ {
+ //
+ // Handle array case - return the array and we'll create the array later
+ //
+ return TypeHandle(g_pArrayClass);
+ }
+ else
+ {
+ //
+ // A regular WinRT type
+ //
+ return GetWinRTType(ssTypeName, TRUE);
+ }
+}
+
+#endif // !DACCESS_COMPILE
+#endif // FEATURE_COMINTEROP
diff --git a/src/vm/winrttypenameconverter.h b/src/vm/winrttypenameconverter.h
new file mode 100644
index 0000000000..53cb5ebfe0
--- /dev/null
+++ b/src/vm/winrttypenameconverter.h
@@ -0,0 +1,127 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//
+// File: WinRTTypeNameConverter.cpp
+//
+
+//
+
+//
+// ============================================================================
+
+#ifndef FEATURE_COMINTEROP
+#error This file should only be included when FEATURE_COMINTEROP is defined
+#endif
+
+#pragma once
+
+#include "..\md\winmd\inc\adapter.h"
+
+struct WinRTTypeNameInfo;
+
+//
+// Converts between a WinRT type name and TypeHandle
+//
+class WinRTTypeNameConverter
+{
+public :
+ //==============================================================================================
+ // Managed -> WinRT
+ //==============================================================================================
+
+ //
+ // Append WinRT type name for the specified type handle
+ //
+ static bool AppendWinRTTypeNameForManagedType(
+ TypeHandle thManagedType,
+ SString &strWinRTTypeName,
+ bool bForGetRuntimeClassName,
+ bool *pbIsPrimitive);
+
+ //
+ // Return the redirection index and type kind if the MethodTable* is a redirected type
+ //
+ static bool ResolveRedirectedType(MethodTable *pMT, WinMDAdapter::RedirectedTypeIndex * pIndex, WinMDAdapter::WinMDTypeKind * pKind = NULL);
+
+ //
+ // Append the WinRT type name for the method table, if it is a WinRT primitive type
+ //
+ static bool AppendWinRTNameForPrimitiveType(MethodTable *pMT, SString &strName);
+
+ //
+ // Is the specified MethodTable a WinRT primitive type
+ //
+ static bool IsWinRTPrimitiveType(MethodTable *pMT)
+ {
+ WRAPPER_NO_CONTRACT;
+ return GetWinRTNameForPrimitiveType(pMT, NULL);
+ }
+
+ //
+ // Is the specified MethodTable a redirected CLR type?
+ //
+ static bool IsRedirectedType(MethodTable *pMT)
+ {
+ WRAPPER_NO_CONTRACT;
+ return ResolveRedirectedType(pMT, NULL);
+ }
+
+ static bool IsRedirectedType(MethodTable *pMT, WinMDAdapter::WinMDTypeKind kind);
+
+ //
+ // Determine if the given type redirected only by doing name comparisons. This is used to
+ // calculate the redirected type index at EEClass creation time.
+ //
+ static WinMDAdapter::RedirectedTypeIndex GetRedirectedTypeIndexByName(
+ IMDInternalImport *pMDImport,
+ mdTypeDef token,
+ WinMDAdapter::FrameworkAssemblyIndex assemblyIndex);
+public :
+ //==============================================================================================
+ // WinRT -> Managed
+ //==============================================================================================
+
+ //
+ // Is the specified MethodTable a redirected WinRT type?
+ //
+ static bool IsRedirectedWinRTSourceType(MethodTable *pMT);
+
+ //
+ // Get TypeHandle from a WinRT type name
+ // Parse the WinRT type name in the form of WinRTType=TypeName[<WinRTType[, WinRTType, ...]>]
+ //
+ static TypeHandle GetManagedTypeFromWinRTTypeName(LPCWSTR wszWinRTTypeName, bool *pbIsPrimitive);
+
+private :
+
+ //
+ // Get predefined WinRT name for a primitive type
+ //
+ static bool GetWinRTNameForPrimitiveType(MethodTable *pMT, SString *pName);
+
+ //
+ // Return MethodTable* for the specified WinRT primitive type name
+ //
+ static bool GetMethodTableFromWinRTPrimitiveType(LPCWSTR wszTypeName, UINT32 uTypeNameLen, MethodTable **ppMT);
+
+ //
+ // Return TypeHandle for the specified WinRT type name (supports generic type)
+ // Updates wszWinRTTypeName pointer as it parse the string
+ //
+ static TypeHandle GetManagedTypeFromWinRTTypeNameInternal(SString *ssTypeName, bool *pbIsPrimitive);
+
+ //
+ // Return MethodTable* for the specified WinRT primitive type name (non-generic type)
+ // Updates wszWinRTTypeName pointer as it parse the string
+ //
+ static TypeHandle GetManagedTypeFromSimpleWinRTNameInternal(SString *ssTypeName, bool *pbIsPrimitive);
+
+ static bool AppendWinRTTypeNameForManagedType(
+ TypeHandle thManagedType,
+ SString &strWinRTTypeName,
+ bool bForGetRuntimeClassName,
+ bool *pbIsPrimitive,
+ WinRTTypeNameInfo *pCurrentTypeInfo);
+};
diff --git a/src/vm/wks/.gitmirror b/src/vm/wks/.gitmirror
new file mode 100644
index 0000000000..f507630f94
--- /dev/null
+++ b/src/vm/wks/.gitmirror
@@ -0,0 +1 @@
+Only contents of this folder, excluding subfolders, will be mirrored by the Git-TFS Mirror. \ No newline at end of file
diff --git a/src/vm/wks/CMakeLists.txt b/src/vm/wks/CMakeLists.txt
new file mode 100644
index 0000000000..2f7c16d69f
--- /dev/null
+++ b/src/vm/wks/CMakeLists.txt
@@ -0,0 +1,44 @@
+add_library(cee_wks ${VM_SOURCES_WKS} ${VM_SOURCES_WKS_AMD64_ASM})
+
+if (WIN32)
+
+# Get the current list of definitions
+get_compile_definitions(DEFINITIONS)
+
+get_directory_property(COMPILE_DEFINITIONS_LIST COMPILE_DEFINITIONS)
+
+# Extract the definitions for the ASM code. Since there is a bug in the cmake that prevents us from
+# using the generator expressions, we split the definitions into lists based on the configuration.
+foreach(DEFINITION ${COMPILE_DEFINITIONS_LIST})
+ if (${DEFINITION} MATCHES "^\\$<\\$<CONFIG:([^>]+)>:([^>]+)>$")
+ # The entry contains generator expression, so insert the definition into a definitions list
+ # corresponding to the config
+ string(TOUPPER ${CMAKE_MATCH_1} CONFIG)
+ set(ASM_DEFINITIONS_${CONFIG} ${ASM_DEFINITIONS_${CONFIG}} ${CMAKE_MATCH_2})
+ else()
+ set(ASM_DEFINITIONS ${ASM_DEFINITIONS} ${DEFINITION})
+ endif()
+endforeach()
+
+# Add defines for the ASM. Unfortunately setting it on the target is ignored for asm by the cmake, so we have
+# to set it on the sources.
+set_property(SOURCE ${VM_SOURCES_WKS_AMD64_ASM} PROPERTY COMPILE_DEFINITIONS ${ASM_DEFINITIONS})
+foreach(CONFIG ${CMAKE_CONFIGURATION_TYPES})
+ string(TOUPPER ${CONFIG} CONFIG)
+ set_property(SOURCE ${VM_SOURCES_WKS_AMD64_ASM} PROPERTY COMPILE_DEFINITIONS_${CONFIG} ${ASM_DEFINITIONS_${CONFIG}})
+endforeach()
+
+# Convert AsmConstants.h into AsmConstants.inc
+find_program(POWERSHELL powershell)
+if (POWERSHELL STREQUAL "POWERSHELL-NOTFOUND")
+ message(FATAL_ERROR "POWERSHELL not found")
+endif()
+
+add_custom_command(
+ # The AsmConstants.inc will be built in the pre-build phase of the cee_wks build
+ TARGET cee_wks PRE_BUILD
+ COMMAND ${POWERSHELL} -NoProfile -ExecutionPolicy Bypass ${VM_DIR}/h2inc.ps1 ${VM_DIR}/amd64/asmconstants.h >${CMAKE_CURRENT_BINARY_DIR}/AsmConstants.tmp
+ COMMAND ${CMAKE_CXX_COMPILER} ${DEFINITIONS} /EP ${CMAKE_CURRENT_BINARY_DIR}/AsmConstants.tmp >${CMAKE_CURRENT_BINARY_DIR}/AsmConstants.inc
+)
+
+endif (WIN32)
diff --git a/src/vm/wks/wks.nativeproj b/src/vm/wks/wks.nativeproj
new file mode 100644
index 0000000000..d91655dbd2
--- /dev/null
+++ b/src/vm/wks/wks.nativeproj
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003" ToolsVersion="dogfood">
+ <Import Project="$(_NTDRIVE)$(_NTROOT)\ndp\clr\src\vm\vm.settings" />
+ <PropertyGroup Label="Globals">
+ <SccProjectName>SAK</SccProjectName>
+ <SccAuxPath>SAK</SccAuxPath>
+ <SccLocalPath>SAK</SccLocalPath>
+ <SccProvider>SAK</SccProvider>
+ </PropertyGroup>
+ <PropertyGroup>
+ <BuildCoreBinaries>true</BuildCoreBinaries>
+ <BuildSysBinaries>true</BuildSysBinaries>
+ <OutputName>cee_wks</OutputName>
+ </PropertyGroup>
+ <Import Project="$(_NTDRIVE)$(_NTROOT)\ndp\clr\src\vm\wks\wks.targets" />
+ <ItemGroup>
+ <DataFile Include="$(VmSourcesDir)\mdaboilerplate.exe.mda.config" />
+ <DataFile Condition="'$(BuildArchitecture)'=='amd64'" Include="$(Amd64SourcesDir)\CLRErrorReporting.vrg" />
+ <DataFile Condition="'$(BuildArchitecture)'=='i386'" Include="$(I386SourcesDir)\CLRErrorReporting.vrg" />
+ </ItemGroup>
+ <Import Project="$(_NTDRIVE)$(_NTROOT)\ndp\clr\src\vm\vm.targets" />
+</Project>
diff --git a/src/vm/wks/wks.targets b/src/vm/wks/wks.targets
new file mode 100644
index 0000000000..55b404c0e7
--- /dev/null
+++ b/src/vm/wks/wks.targets
@@ -0,0 +1,386 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+
+ <PropertyGroup>
+ <Win32WinNt>0x0602</Win32WinNt>
+ </PropertyGroup>
+
+ <ItemGroup>
+ <CppCompile Include="$(VmSourcesDir)\class.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\AppDomain.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\AppDomainHelper.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\AppDomainNative.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\appdomainstack.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\AppXUtil.cpp" Condition="'$(FeatureAppX)' == 'true'" />
+ <CppCompile Include="$(VmSourcesDir)\aptca.cpp" Condition="'$(FeatureAptca)' == 'true'" />
+ <CppCompile Include="$(VmSourcesDir)\array.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\Assembly.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\AssemblyName.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\AssemblyNative.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\AssemblySpec.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\AssemblySink.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\binder.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\cachelinealloc.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ceeload.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ceemain.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\certificatecache.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\classhash.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\clrex.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\CLRPrivBinderUtil.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\CLRPrivBinderAppX.cpp" Condition="'$(FeatureAppXBinder)' == 'true'" />
+ <CppCompile Include="$(VmSourcesDir)\CLRPrivBinderFusion.cpp" Condition="'$(FeatureFusion)' == 'true'" />
+ <CppCompile Include="$(VmSourcesDir)\CLRPrivBinderLoadFile.cpp" Condition="'$(FeatureCoreclr)' != 'true'" />
+ <CppCompile Include="$(VmSourcesDir)\CLRPrivBinderReflectionOnlyWinRT.cpp" Condition="'$(FeatureCoreclr)' != 'true'" />
+ <CppCompile Include="$(VmSourcesDir)\CLRPrivBinderWinRT.cpp" Condition="'$(FeatureCominterop)' == 'true'" />
+ <CppCompile Include="$(VmSourcesDir)\CLRPrivTypeCacheWinRT.cpp" Condition="'$(FeatureCominterop)' == 'true'" />
+ <CppCompile Include="$(VmSourcesDir)\CLRPrivTypeCacheReflectionOnlyWinRT.cpp" Condition="'$(FeatureCoreclr)' != 'true'" />
+ <CppCompile Include="$(VmSourcesDir)\clsload.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\CLRVarArgs.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\codeman.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\compile.cpp" Condition="'$(FeatureNativeImageGeneration)'=='true'" />
+ <CppCompile Include="$(VmSourcesDir)\ConfigHelper.cpp" Condition="'$(FeatureCoreclr)' != 'true'" />
+ <CppCompile Include="$(VmSourcesDir)\ConstrainedExecutionRegion.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\COMDateTime.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\COMDelegate.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\COMDynamic.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\COMIsolatedStorage.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\COMMemoryFailPoint.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\COMMethodRental.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\Domainfile.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\testhookmgr.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\BaseAssemblySpec.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\corebindresult.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\coreassemblyspec.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\COMDependentHandle.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\COMModule.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\CompactLayoutWriter.cpp" Condition="'$(MDILGenerator)' == 'true'" />
+ <CppCompile Include="$(VmSourcesDir)\MarshalNative.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\CompatibilitySwitch.cpp"/>
+ <CppCompile Include="$(VmSourcesDir)\COMSynchronizable.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\COMThreadPool.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\COMUtilNative.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\COMWaitHandle.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\CorHost.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\CustomMarshalerInfo.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\CrossDomainCalls.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\callhelpers.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\crst.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\contexts.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\CustomAttribute.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\dataimage.cpp" Condition="'$(FeatureNativeImageGeneration)'=='true'"/>
+ <CppCompile Include="$(VmSourcesDir)\debugHelp.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\decodeMD.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\DebugDebugger.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\DebugInfoStore.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\DynamicMethod.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ecall.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\eeconfig.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\AssemblyNamesConfigFactory.cpp" Condition="'$(FeatureCoreclr)' != 'true'"/>
+ <CppCompile Include="$(VmSourcesDir)\ngenoptout.cpp" Condition="'$(FeatureCoreclr)' != 'true'"/>
+ <CppCompile Include="$(VmSourcesDir)\eeconfigfactory.cpp" Condition="'$(FeatureCoreclr)' != 'true'"/>
+ <CppCompile Include="$(VmSourcesDir)\eecontract.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\eedbginterfaceimpl.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\eehash.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\EEMessageBox.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\EEPolicy.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\EEToProfInterfaceImpl.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\EETwain.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\eventreporter.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\EventStore.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\eventtrace.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\excep.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ExState.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\fcall.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\Field.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\finalizerthread.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\formattype.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\fptrstubs.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\frames.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\FrameworkExceptionLoader.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ThreadPoolRequest.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\TypeEquivalenceHash.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\fusioninit.cpp" Condition="'$(FeatureFusion)' == 'true'"/>
+ <CppCompile Include="$(VmSourcesDir)\FusionSink.cpp" Condition="'$(FeatureFusion)' == 'true'"/>
+ <CppCompile Include="$(VmSourcesDir)\FusionBind.cpp" Condition="'$(FeatureFusion)' == 'true'"/>
+ <CppCompile Include="$(VmSourcesDir)\GCDecode.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\gcenv.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\gchelpers.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\gchost.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\genericdict.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\generics.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\genmeth.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\hash.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\hillclimbing.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\hosting.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\HostExecutionContext.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\IBCLogger.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ILMarshalers.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ILStubCache.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ILStubResolver.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\instmethhash.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\InteropConverter.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\interoputil.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\interpreter.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\inlinetracking.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\WinRTTypeNameConverter.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\InvokeUtil.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\contractImpl.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\jithelpers.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\JITInterface.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\LoaderAllocator.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ListLock.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\marvin32.cpp" Condition="'$(FeatureRandomizedStringHashing)' == 'true'"/>
+ <CppCompile Include="$(VmSourcesDir)\memberload.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ManagedMdImport.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\message.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\Method.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\MethodIter.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\MethodImpl.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\MethodTable.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\methodtablebuilder.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\mixedmode.cpp" Condition="'$(FeatureMixedMode)' == 'true'" />
+ <CppCompile Include="$(VmSourcesDir)\mscorlib.cpp">
+ <DisablePrecompiledHeaders>true</DisablePrecompiledHeaders>
+ </CppCompile>
+ <CppCompile Include="$(VmSourcesDir)\stubcache.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\mlinfo.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\NativeOverlapped.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\NewCompressedStack.cpp" Condition="'$(FeatureCompressedstack)' == 'true'" />
+ <CppCompile Include="$(VmSourcesDir)\DllImport.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\DllImportCallback.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\FieldMarshaler.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\object.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ObjectClone.cpp" Condition="'$(FeatureRemoting)' == 'true'" />
+ <CppCompile Include="$(VmSourcesDir)\ObjectList.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\OleVariant.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\PEFile.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\PEFingerprint.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\PEImage.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\PEImageLayout.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\pendingload.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\PerfDefaults.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\Prestub.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\Precode.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ProfilerMetadataEmitValidator.cpp"/>
+ <CppCompile Include="$(VmSourcesDir)\MulticoreJIT.cpp" Condition="'$(FeatureMulticoreJIT)' == 'true'" />
+ <CppCompile Include="$(VmSourcesDir)\MulticoreJITPlayer.cpp" Condition="'$(FeatureMulticoreJIT)' == 'true'" />
+ <CppCompile Include="$(VmSourcesDir)\ProfilingEnumerators.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ProfilingHelper.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ProfToEEInterfaceImpl.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\QCall.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ReadyToRunInfo.cpp" Condition="'$(FeatureReadyToRun)' == 'true'" />
+ <CppCompile Include="$(VmSourcesDir)\ReflectClassWriter.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ReflectionInvocation.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ReJit.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\remoting.cpp" Condition="'$(FeatureRemoting)' == 'true'" />
+ <CppCompile Include="$(VmSourcesDir)\RtlFunctions.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\RuntimeHandles.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\rwlock.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\SafeHandle.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\security.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\SecurityAttributes.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\SecurityConfig.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\SecurityDeclarative.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\SecurityDeclarativeCache.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\SecurityDescriptor.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\SecurityHostProtection.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\SecurityDescriptorAppdomain.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\SecurityDescriptorAssembly.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\securitymeta.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\SecurityImperative.cpp" Condition="'$(FeatureCoreclr)' != 'true'" />
+ <CppCompile Include="$(VmSourcesDir)\SecurityPolicy.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\SecurityStackWalk.cpp" Condition="'$(FeatureCasPolicy)' == 'true'" />
+ <CppCompile Include="$(VmSourcesDir)\SecurityTransparentAssembly.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\siginfo.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\SigFormat.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\SimpleRWLock.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\SourceLine.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\spinlock.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\StackingAllocator.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\stacksampler.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\stackwalk.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\StackBuilderSink.cpp" Condition="'$(FeatureRemoting)' == 'true'" />
+ <CppCompile Include="$(VmSourcesDir)\StackCompressor.cpp" Condition="'$(FeatureCompressedstack)' == 'true'" />
+ <CppCompile Include="$(VmSourcesDir)\stublink.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\StringLiteralMap.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\stubmgr.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\stubgen.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\stubhelpers.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\syncblk.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\synch.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\SyncClean.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\synchronizationcontextnative.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ThreadDebugBlockingInfo.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\threads.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\threadsuspend.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\threadstatics.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\typectxt.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\typedesc.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\typehandle.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\typehash.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\typeparse.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\typestring.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\umthunkhash.cpp" Condition="'$(FeatureMixedMode)' == 'true'"/>
+ <CppCompile Include="$(VmSourcesDir)\util.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\validator.cpp" Condition="'$(FeatureCoreclr)' != 'true'"/>
+ <CppCompile Include="$(VmSourcesDir)\vars.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\verifier.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\VirtualCallStub.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\WeakReferenceNative.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\Win32Threadpool.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\zapsig.cpp" />
+ </ItemGroup>
+
+ <ItemGroup>
+ <CppCompile Include="$(ClrSrcDirectory)\gc\gccommon.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\gc\gcEEsvr.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\gc\gcEEwks.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\gc\gcsvr.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\gc\gcwks.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\gc\gcscan.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\gc\handletable.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\gc\handletableCache.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\gc\handletableCore.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\gc\handletableScan.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\gc\objecthandle.cpp" />
+ </ItemGroup>
+
+ <!-- SOURCES_NONPAL -->
+ <ItemGroup>
+ <CppCompile Include="$(VmSourcesDir)\AssemblyNativeResource.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\coverage.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\DbgGcInfoDecoder.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\dwreport.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\EnCEE.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ExceptionHandling.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\gcCover.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\GcInfoDecoder.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\mda.cpp" Condition="'$(FeatureMdaSupported)' == 'true'"/>
+ <CppCompile Include="$(VmSourcesDir)\mdaassistants.cpp" Condition="'$(FeatureMdaSupported)' == 'true'"/>
+ <CppCompile Include="$(VmSourcesDir)\mdadac.cpp" Condition="'$(FeatureMdaSupported)' == 'true'"/>
+ <CppCompile Include="$(VmSourcesDir)\ProfAttach.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ProfAttachClient.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ProfAttachServer.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ProfDetach.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\SecurityPrincipal.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\Crypto\SHA1.cpp" Condition="'$(FeatureCoreclr)' != 'true'"/>
+ <CppCompile Include="$(VmSourcesDir)\SHA1.cpp" Condition="'$(FeatureCoreclr)' == 'true'"/>
+ <CppCompile Include="$(VmSourcesDir)\StackProbe.cpp" Condition="'$(FeatureStackProbe)' == 'true'" />
+ </ItemGroup>
+ <ItemGroup Condition="'$(FeatureCominterop)' == 'true'">
+ <CppCompile Include="$(VmSourcesDir)\classcompat.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ClassFactory.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ComCache.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\COMtoCLRCall.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ComCallableWrapper.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ComConnectionPoints.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\COMInterfaceMarshaler.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ComMTMemberInfoMap.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\CLRtoCOMCall.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\RuntimeCallableWrapper.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\COMTypeLibConverter.cpp" Condition="'$(FeatureCominteropTlbSupport)' == 'true'"/>
+ <CppCompile Include="$(VmSourcesDir)\DispatchInfo.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\DispParamMarshaler.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ExtensibleClassFactory.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\mngstdinterfaces.cpp" />
+ <CCompile Include="$(VmSourcesDir)\Microsoft.ComServices_i.c" />
+ <CppCompile Include="$(VmSourcesDir)\NotifyExternals.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\OleContextHelpers.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\stdinterfaces.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\stdinterfaces_wrapper.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\tlbexport.cpp" Condition="'$(FeatureCominteropTlbSupport)' == 'true'"/>
+ <CppCompile Include="$(VmSourcesDir)\rcwwalker.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\rcwrefcache.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\WinRtHelpers.cpp" />
+ </ItemGroup>
+ <!-- AMD64_SOURCES -->
+ <ItemGroup Condition="'$(BuildArchitecture)' == 'amd64'">
+ <CppCompile Include="$(Amd64SourcesDir)\cGenAMD64.cpp" />
+ <CppCompile Include="$(Amd64SourcesDir)\ExcepAMD64.cpp" />
+ <CppCompile Include="$(Amd64SourcesDir)\gmsAMD64.cpp" />
+ <CppCompile Include="$(Amd64SourcesDir)\JitHelpersAMD64.cpp" />
+ <CppCompile Include="$(Amd64SourcesDir)\JITinterfaceAMD64.cpp" />
+ <CppCompile Include="$(Amd64SourcesDir)\Profiler.cpp" />
+ <CppCompile Include="$(Amd64SourcesDir)\RemotingAMD64.cpp" Condition="'$(FeatureRemoting)' == 'true'" />
+ <CppCompile Include="$(Amd64SourcesDir)\StubLinkerAMD64.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\JITinterfaceGen.cpp" />
+ </ItemGroup>
+ <ItemGroup Condition="'$(BuildArchitecture)' == 'amd64'">
+ <AssembleAmd64 Include="$(Amd64SourcesDir)\AsmHelpers.asm" />
+ <AssembleAmd64 Include="$(Amd64SourcesDir)\CallDescrWorkerAMD64.asm" />
+ <AssembleAmd64 Include="$(Amd64SourcesDir)\ComCallPreStub.asm" />
+ <AssembleAmd64 Include="$(Amd64SourcesDir)\CrtHelpers.asm" />
+ <AssembleAmd64 Include="$(Amd64SourcesDir)\GenericComCallStubs.asm" />
+ <AssembleAmd64 Include="$(Amd64SourcesDir)\GenericComPlusCallStubs.asm" />
+ <AssembleAmd64 Include="$(Amd64SourcesDir)\getstate.asm" />
+ <AssembleAmd64 Include="$(Amd64SourcesDir)\InstantiatingStub.asm" />
+ <AssembleAmd64 Include="$(Amd64SourcesDir)\JitHelpers_Fast.asm" />
+ <AssembleAmd64 Include="$(Amd64SourcesDir)\JitHelpers_FastWriteBarriers.asm" />
+ <AssembleAmd64 Include="$(Amd64SourcesDir)\JitHelpers_InlineGetAppDomain.asm" />
+ <AssembleAmd64 Include="$(Amd64SourcesDir)\JitHelpers_InlineGetThread.asm" />
+ <AssembleAmd64 Include="$(Amd64SourcesDir)\JitHelpers_Slow.asm" />
+ <AssembleAmd64 Include="$(Amd64SourcesDir)\PInvokeStubs.asm" />
+ <AssembleAmd64 Include="$(Amd64SourcesDir)\RedirectedHandledJITCase.asm" />
+ <AssembleAmd64 Include="$(Amd64SourcesDir)\RemotingThunksAMD64.asm" Condition="'$(FeatureRemoting)' == 'true'" />
+ <AssembleAmd64 Include="$(Amd64SourcesDir)\ThePreStubAMD64.asm" />
+ <AssembleAmd64 Include="$(Amd64SourcesDir)\ExternalMethodFixupThunk.asm" />
+ <AssembleAmd64 Include="$(Amd64SourcesDir)\TlsGetters.asm" Condition="'$(FeatureImplicitTls)' != 'true'" />
+ <AssembleAmd64 Include="$(Amd64SourcesDir)\UMThunkStub.asm" />
+ <AssembleAmd64 Include="$(Amd64SourcesDir)\VirtualCallStubAMD64.asm" />
+ </ItemGroup>
+ <!-- I386_SOURCES -->
+ <ItemGroup Condition="'$(BuildArchitecture)' == 'i386'">
+ <CppCompile Include="$(I386SourcesDir)\cgenx86.cpp" />
+ <CppCompile Include="$(I386SourcesDir)\ExcepX86.cpp" />
+ <CppCompile Include="$(I386SourcesDir)\gmsx86.cpp" />
+ <CppCompile Include="$(I386SourcesDir)\JITinterfaceX86.cpp" />
+ <CppCompile Include="$(I386SourcesDir)\Profiler.cpp" />
+ <CppCompile Include="$(I386SourcesDir)\remotingx86.cpp" Condition="'$(FeatureRemoting)' == 'true'" />
+ <CppCompile Include="$(I386SourcesDir)\StubLinkerX86.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\ExInfo.cpp" />
+ </ItemGroup>
+ <ItemGroup Condition="'$(BuildArchitecture)' == 'i386'">
+ <Assemble386 Include="$(I386SourcesDir)\asmhelpers.asm" />
+ <Assemble386 Include="$(I386SourcesDir)\fptext.asm" />
+ <Assemble386 Include="$(I386SourcesDir)\gmsasm.asm" />
+ <Assemble386 Include="$(I386SourcesDir)\jithelp.asm" />
+ <Assemble386 Include="$(I386SourcesDir)\RedirectedHandledJITCase.asm" />
+ </ItemGroup>
+ <!-- ARM_SOURCES -->
+ <ItemGroup Condition="'$(BuildArchitecture)' == 'arm'">
+ <CppCompile Include="$(ArmSourcesDir)\stubs.cpp" />
+ <CppCompile Include="$(ArmSourcesDir)\ExcepArm.cpp" />
+ <CppCompile Include="$(ArmSourcesDir)\ArmSingleStepper.cpp" />
+ <CppCompile Include="$(ArmSourcesDir)\JitHelpersARM.cpp" />
+ <CppCompile Include="$(ArmSourcesDir)\profiler.cpp" />
+ </ItemGroup>
+ <ItemGroup Condition="'$(BuildArchitecture)' == 'arm'">
+ <PreprocessAssembleArm Include="$(ArmSourcesDir)\asmhelpers.asm" />
+ <PreprocessAssembleArm Include="$(ArmSourcesDir)\crthelpers.asm" />
+ <PreprocessAssembleArm Include="$(ArmSourcesDir)\ehhelpers.asm" />
+ <PreprocessAssembleArm Include="$(ArmSourcesDir)\pinvokestubs.asm" />
+ <PreprocessAssembleArm Include="$(ArmSourcesDir)\memcpy.asm" />
+ <PreprocessAssembleArm Include="$(ArmSourcesDir)\patchedcode.asm" />
+ <AssembleArm Include="$(IntermediateOutputDirectory)\asmhelpers.i" />
+ <AssembleArm Include="$(IntermediateOutputDirectory)\crthelpers.i" />
+ <AssembleArm Include="$(IntermediateOutputDirectory)\ehhelpers.i" />
+ <AssembleArm Include="$(IntermediateOutputDirectory)\pinvokestubs.i" />
+ <AssembleArm Include="$(IntermediateOutputDirectory)\memcpy.i" />
+ <AssembleArm Include="$(IntermediateOutputDirectory)\patchedcode.i" />
+ </ItemGroup>
+ <!-- ARM64_SOURCES -->
+ <ItemGroup Condition="'$(BuildArchitecture)' == 'arm64'">
+ <CppCompile Include="$(Arm64SourcesDir)\stubs.cpp" />
+ <CppCompile Include="$(Arm64SourcesDir)\cGenArm64.cpp" />
+ </ItemGroup>
+ <ItemGroup Condition="'$(BuildArchitecture)' == 'arm64'">
+ <PreprocessAssembleArm Include="$(Arm64SourcesDir)\CallDescrWorkerARM64.asm" />
+ <PreprocessAssembleArm Include="$(Arm64SourcesDir)\asmHelpers.asm" />
+ <PreprocessAssembleArm Include="$(Arm64SourcesDir)\PInvokeStubs.asm" />
+ <PreprocessAssembleArm Include="$(Arm64SourcesDir)\crtHelpers.asm" />
+ <AssembleArm64 Include="$(IntermediateOutputDirectory)\CallDescrWorkerARM64.i" />
+ <AssembleArm64 Include="$(IntermediateOutputDirectory)\asmHelpers.i" />
+ <AssembleArm64 Include="$(IntermediateOutputDirectory)\PInvokeStubs.i" />
+ <AssembleArm64 Include="$(IntermediateOutputDirectory)\crtHelpers.i" />
+ </ItemGroup>
+</Project>
diff --git a/src/vm/wrappers.h b/src/vm/wrappers.h
new file mode 100644
index 0000000000..1a0836b707
--- /dev/null
+++ b/src/vm/wrappers.h
@@ -0,0 +1,351 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#ifndef _WRAPPERS_H_
+#define _WRAPPERS_H_
+
+#include "metadata.h"
+#include "interoputil.h"
+#ifdef FEATURE_COMINTEROP
+#include "windowsstring.h"
+#endif
+
+class MDEnumHolder
+{
+public:
+ inline MDEnumHolder(IMDInternalImport* IMDII)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(IMDII));
+ }
+ CONTRACTL_END;
+
+ m_IMDII = IMDII;
+
+ }
+
+ inline ~MDEnumHolder()
+ {
+ WRAPPER_NO_CONTRACT;
+ m_IMDII->EnumClose(&m_HEnum);
+ }
+
+ inline operator HENUMInternal()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_HEnum;
+ }
+
+ inline HENUMInternal* operator&()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return static_cast<HENUMInternal*>(&m_HEnum);
+ }
+
+private:
+ MDEnumHolder() {LIMITED_METHOD_CONTRACT;} // Must use parameterized constructor
+
+ HENUMInternal m_HEnum;
+ IMDInternalImport* m_IMDII;
+};
+
+
+//--------------------------------------------------------------------------------
+// safe variant helper
+void SafeVariantClear(VARIANT* pVar);
+
+class VariantHolder
+{
+public:
+ inline VariantHolder()
+ {
+ LIMITED_METHOD_CONTRACT;
+ memset(&m_var, 0, sizeof(VARIANT));
+ }
+
+ inline ~VariantHolder()
+ {
+ WRAPPER_NO_CONTRACT;
+ SafeVariantClear(&m_var);
+ }
+
+ inline VARIANT* operator&()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return static_cast<VARIANT*>(&m_var);
+ }
+
+private:
+ VARIANT m_var;
+};
+
+
+template <typename TYPE>
+inline void SafeComRelease(TYPE *value)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ SafeRelease((IUnknown*)value);
+}
+template <typename TYPE>
+inline void SafeComReleasePreemp(TYPE *value)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ SafeReleasePreemp((IUnknown*)value);
+}
+
+NEW_WRAPPER_TEMPLATE1(SafeComHolder, SafeComRelease<_TYPE>);
+
+// Use this holder if you're already in preemptive mode for other reasons,
+// use SafeComHolder otherwise.
+NEW_WRAPPER_TEMPLATE1(SafeComHolderPreemp, SafeComReleasePreemp<_TYPE>);
+
+
+
+#ifdef FEATURE_COMINTEROP
+#ifdef CROSSGEN_COMPILE
+ namespace clr
+ {
+ namespace winrt
+ {
+ template <typename ItfT> inline
+ HRESULT GetActivationFactory(
+ __in WinRtStringRef const & wzActivatableClassId,
+ __deref_out ItfT** ppItf)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return GetActivationFactory(wzActivatableClassId, ppItf);
+ }
+
+ template <typename ItfT> inline
+ HRESULT GetActivationFactory(
+ __in WinRtStringRef const & wzActivatableClassId,
+ __out typename SafeComHolderPreemp<ItfT>* pItf)
+ {
+ STATIC_CONTRACT_WRAPPER;
+
+ if (pItf == nullptr)
+ return E_INVALIDARG;
+
+ return GetActivationFactory(wzActivatableClassId, (ItfT**)&(*pItf));
+ }
+ }
+ }
+#endif //CROSSGEN_COMPILE
+#endif //FEATURE_COMINTEROP
+
+//-----------------------------------------------------------------------------
+// NewPreempHolder : New'ed memory holder, deletes in preemp mode.
+//
+// {
+// NewPreempHolder<Foo> foo = new Foo ();
+// } // delete foo on out of scope in preemp mode.
+//-----------------------------------------------------------------------------
+
+template <typename TYPE>
+void DeletePreemp(TYPE *value)
+{
+ WRAPPER_NO_CONTRACT;
+
+ GCX_PREEMP();
+ delete value;
+}
+
+NEW_WRAPPER_TEMPLATE1(NewPreempHolder, DeletePreemp<_TYPE>);
+
+
+//-----------------------------------------------------------------------------
+// VariantPtrHolder : Variant holder, Calls VariantClear on scope exit.
+//
+// {
+// VariantHolder foo = pVar
+// } // Call SafeVariantClear on out of scope.
+//-----------------------------------------------------------------------------
+
+FORCEINLINE void VariantPtrRelease(VARIANT* value)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (value)
+ {
+ SafeVariantClear(value);
+ }
+}
+
+class VariantPtrHolder : public Wrapper<VARIANT*, VariantPtrDoNothing, VariantPtrRelease, NULL>
+{
+public:
+ VariantPtrHolder(VARIANT* p = NULL)
+ : Wrapper<VARIANT*, VariantPtrDoNothing, VariantPtrRelease, NULL>(p)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ FORCEINLINE void operator=(VARIANT* p)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ Wrapper<VARIANT*, VariantPtrDoNothing, VariantPtrRelease, NULL>::operator=(p);
+ }
+};
+
+//-----------------------------------------------------------------------------
+// SafeArrayPtrHolder : SafeArray holder, Calls SafeArrayDestroy on scope exit.
+// In cooperative mode this holder should be used instead of code:SafeArrayHolder.
+//
+// {
+// SafeArrayPtrHolder foo = pSafeArray
+// } // Call SafeArrayDestroy on out of scope.
+//-----------------------------------------------------------------------------
+
+FORCEINLINE void SafeArrayPtrRelease(SAFEARRAY* value)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (value)
+ {
+ // SafeArrayDestroy may block and may also call back to MODE_PREEMPTIVE
+ // runtime functions like e.g. code:Unknown_Release_Internal
+ GCX_PREEMP();
+
+ HRESULT hr; hr = SafeArrayDestroy(value);
+ _ASSERTE(SUCCEEDED(hr));
+ }
+}
+
+class SafeArrayPtrHolder : public Wrapper<SAFEARRAY*, SafeArrayDoNothing, SafeArrayPtrRelease, NULL>
+{
+public:
+ SafeArrayPtrHolder(SAFEARRAY* p = NULL)
+ : Wrapper<SAFEARRAY*, SafeArrayDoNothing, SafeArrayPtrRelease, NULL>(p)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ FORCEINLINE void operator=(SAFEARRAY* p)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ Wrapper<SAFEARRAY*, SafeArrayDoNothing, SafeArrayPtrRelease, NULL>::operator=(p);
+ }
+};
+
+//-----------------------------------------------------------------------------
+// ZeroHolder : Sets value to zero on context exit.
+//
+// {
+// ZeroHolder foo = &data;
+// } // set data to zero on context exit
+//-----------------------------------------------------------------------------
+
+FORCEINLINE void ZeroRelease(VOID* value)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (value)
+ {
+ (*(size_t*)value) = 0;
+ }
+}
+
+class ZeroHolder : public Wrapper<VOID*, ZeroDoNothing, ZeroRelease, NULL>
+{
+public:
+ ZeroHolder(VOID* p = NULL)
+ : Wrapper<VOID*, ZeroDoNothing, ZeroRelease, NULL>(p)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ FORCEINLINE void operator=(VOID* p)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ Wrapper<VOID*, ZeroDoNothing, ZeroRelease, NULL>::operator=(p);
+ }
+};
+
+#ifdef FEATURE_COMINTEROP
+class TYPEATTRHolder
+{
+public:
+ TYPEATTRHolder(ITypeInfo* pTypeInfo)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pTypeInfo, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ m_pTypeInfo = pTypeInfo;
+ m_TYPEATTR = NULL;
+ }
+
+ ~TYPEATTRHolder()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(m_TYPEATTR ? CheckPointer(m_pTypeInfo) : CheckPointer(m_pTypeInfo, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ if (m_TYPEATTR)
+ {
+ GCX_PREEMP();
+ m_pTypeInfo->ReleaseTypeAttr(m_TYPEATTR);
+ }
+ }
+
+ inline void operator=(TYPEATTR* value)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_TYPEATTR = value;
+ }
+
+ inline TYPEATTR** operator&()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return &m_TYPEATTR;
+ }
+
+ inline TYPEATTR* operator->()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_TYPEATTR;
+ }
+
+private:
+ TYPEATTRHolder ()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ ITypeInfo* m_pTypeInfo;
+ TYPEATTR* m_TYPEATTR;
+};
+#endif // FEATURE_COMINTEROP
+
+#endif // _WRAPPERS_H_
diff --git a/src/vm/zapsig.cpp b/src/vm/zapsig.cpp
new file mode 100644
index 0000000000..7560958256
--- /dev/null
+++ b/src/vm/zapsig.cpp
@@ -0,0 +1,1431 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ===========================================================================
+// File: zapsig.cpp
+//
+// Signature encoding for zapper (ngen)
+//
+
+// ===========================================================================
+
+
+#include "common.h"
+#ifdef FEATURE_PREJIT
+#include "zapsig.h"
+#include "typedesc.h"
+#ifndef BINDER
+#include "compile.h"
+#else
+#include "mdilmodule.h"
+#endif
+#include "sigbuilder.h"
+
+#ifndef DACCESS_COMPILE
+
+BOOL ZapSig::GetSignatureForTypeDesc(TypeDesc * desc, SigBuilder * pSigBuilder)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END
+
+ CorElementType elemType = desc->GetInternalCorElementType();
+
+ if (elemType == ELEMENT_TYPE_VALUETYPE)
+ {
+ // convert to ELEMENT_TYPE_NATIVE_VALUETYPE_ZAPSIG so that the right
+ // thing will happen in code:SigPointer.GetTypeHandleThrowing
+ elemType = (CorElementType) ELEMENT_TYPE_NATIVE_VALUETYPE_ZAPSIG;
+ }
+ else if (elemType == ELEMENT_TYPE_VAR || elemType == ELEMENT_TYPE_MVAR)
+ {
+ // Enable encoding of type variables for NGen signature only. IBC toolchain is not aware of them yet.
+ if (context.externalTokens == ZapSig::NormalTokens)
+ elemType = (CorElementType) ELEMENT_TYPE_VAR_ZAPSIG;
+ }
+
+ pSigBuilder->AppendElementType(elemType);
+
+ if (desc->HasTypeParam())
+ {
+ if (!this->GetSignatureForTypeHandle(desc->GetTypeParam(), pSigBuilder))
+ return FALSE;
+
+ if (elemType == ELEMENT_TYPE_ARRAY)
+ {
+ ArrayTypeDesc *pArrayDesc = dac_cast<PTR_ArrayTypeDesc>(desc);
+ _ASSERTE(pArrayDesc->GetRank() != 0);
+ pSigBuilder->AppendData(pArrayDesc->GetRank());
+ pSigBuilder->AppendData(0);
+ pSigBuilder->AppendData(0);
+ }
+ }
+ else
+ {
+ switch (elemType)
+ {
+ case ELEMENT_TYPE_FNPTR:
+ {
+ FnPtrTypeDesc *pTD = dac_cast<PTR_FnPtrTypeDesc>(desc);
+
+ // Emit calling convention
+ pSigBuilder->AppendByte(pTD->GetCallConv());
+
+ // number of args
+ unsigned numArgs = pTD->GetNumArgs();
+ pSigBuilder->AppendData(numArgs);
+
+ // return type and args
+ TypeHandle *retAndArgTypes = pTD->GetRetAndArgTypesPointer();
+ for (DWORD i = 0; i <= numArgs; i++)
+ {
+ TypeHandle th = retAndArgTypes[i];
+ // This should be a consequence of the type key being restored
+ CONSISTENCY_CHECK(!th.IsNull() && !th.IsEncodedFixup());
+ if (!this->GetSignatureForTypeHandle(th, pSigBuilder))
+ return FALSE;
+ }
+ }
+ break;
+
+ case ELEMENT_TYPE_MVAR:
+ // _ASSERTE(!"Cannot encode ET_MVAR in a ZapSig");
+ return FALSE;
+
+ case ELEMENT_TYPE_VAR:
+ // _ASSERTE(!"Cannot encode ET_VAR in a ZapSig");
+ return FALSE;
+
+ case ELEMENT_TYPE_VAR_ZAPSIG:
+ {
+ TypeVarTypeDesc * pTypeVarDesc = dac_cast<PTR_TypeVarTypeDesc>(desc);
+#ifdef BINDER
+ MdilModule * pVarTypeModule = pTypeVarDesc->GetModule();
+#else
+ Module * pVarTypeModule = pTypeVarDesc->GetModule();
+#endif
+ if (pVarTypeModule != this->context.pInfoModule)
+ {
+ DWORD index = (*this->pfnEncodeModule)(this->context.pModuleContext, pVarTypeModule);
+
+ if (index == ENCODE_MODULE_FAILED)
+ return FALSE;
+
+ // emit the ET_MODULE_ZAPSIG escape
+ pSigBuilder->AppendElementType((CorElementType) ELEMENT_TYPE_MODULE_ZAPSIG);
+ // emit the module index
+ pSigBuilder->AppendData(index);
+ }
+ pSigBuilder->AppendData(RidFromToken(pTypeVarDesc->GetToken()));
+ break;
+ }
+
+ default:
+ _ASSERTE(!"Bad type");
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+
+// Create a signature for a typeHandle
+// It can be decoded using MetaSig::GetTypeHandleThrowing
+// The tokens are espressed relative to this->pInfoModule
+// When handle.GetModule() != this->pInfoModule), we escape the signature
+// with an ELEMENT_TYPE_MODULE_ZAPSIG <id-num> <token> to encode
+// a temporary change of module
+//
+// Returns the number of characters written into the buffer.
+// If buffer and bufferMax are NULL, it returns the number of
+// characters that would have been written.
+// If the buffer isn't big enough it doesn't write past bufferMax
+// A return value of 0 indidates a failure to encode the signature
+//
+BOOL ZapSig::GetSignatureForTypeHandle(TypeHandle handle,
+ SigBuilder * pSigBuilder)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+
+ PRECONDITION(CheckPointer(handle));
+ PRECONDITION(CheckPointer(this->context.pInfoModule));
+ PRECONDITION(!handle.HasUnrestoredTypeKey());
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ if (handle.IsTypeDesc())
+ return GetSignatureForTypeDesc(handle.AsTypeDesc(), pSigBuilder);
+
+ MethodTable *pMT = handle.AsMethodTable();
+
+ // Can we encode the type using a short ET encoding?
+ //
+ CorElementType elemType = elemType = TryEncodeUsingShortcut(pMT);
+ if (elemType != ELEMENT_TYPE_END)
+ {
+ _ASSERTE(pMT->IsTypicalTypeDefinition());
+
+ // Check for an array type and encode that we are dealing with a MethodTable representation
+ if (elemType == ELEMENT_TYPE_SZARRAY || elemType == ELEMENT_TYPE_ARRAY)
+ {
+ pSigBuilder->AppendElementType((CorElementType)ELEMENT_TYPE_NATIVE_ARRAY_TEMPLATE_ZAPSIG);
+ pSigBuilder->AppendElementType(elemType);
+
+ TypeHandle elementType = pMT->GetApproxArrayElementTypeHandle();
+ if (!this->GetSignatureForTypeHandle(elementType, pSigBuilder))
+ return FALSE;
+
+ if (elemType == ELEMENT_TYPE_ARRAY)
+ {
+ pSigBuilder->AppendData(pMT->GetRank());
+ pSigBuilder->AppendData(0);
+ pSigBuilder->AppendData(0);
+ }
+ }
+ else
+ {
+ pSigBuilder->AppendElementType(elemType);
+ }
+
+ return TRUE;
+ }
+
+ // We could not encode the type using a short encoding
+ // and we have a handle that represents a Class or ValueType
+
+ // We may need to emit an out-of-module escape sequence
+ //
+#ifdef BINDER
+ MdilModule *pTypeHandleModule = pMT->GetModule();
+#else
+ Module *pTypeHandleModule = pMT->GetModule_NoLogging();
+#endif
+
+ // If the type handle's module is different that the this->pInfoModule
+ // we will need to add an out-of-module escape for the type
+ //
+ DWORD index = 0;
+ mdToken token = pMT->GetCl_NoLogging();
+#ifndef BINDER
+#ifdef FEATURE_NATIVE_IMAGE_GENERATION
+ if (pTypeHandleModule != this->context.pInfoModule && !pTypeHandleModule->IsInCurrentVersionBubble())
+ {
+ pTypeHandleModule = GetAppDomain()->ToCompilationDomain()->GetTargetModule();
+ token = pTypeHandleModule->LookupTypeRefByMethodTable(pMT);
+ }
+#endif
+#endif
+ if (pTypeHandleModule != this->context.pInfoModule)
+ {
+ // During IBC profiling this calls
+ // code:Module.EncodeModuleHelper
+ // During ngen this calls
+ // code:ZapImportTable.EncodeModuleHelper)
+ //
+ index = (*this->pfnEncodeModule)(this->context.pModuleContext, pTypeHandleModule);
+
+ if (index == ENCODE_MODULE_FAILED)
+ return FALSE;
+
+ // emit the ET_MODULE_ZAPSIG escape
+ pSigBuilder->AppendElementType((CorElementType) ELEMENT_TYPE_MODULE_ZAPSIG);
+ // emit the module index
+ pSigBuilder->AppendData(index);
+ }
+
+ // Remember if we have an instantiated generic type
+ bool fNeedsInstantiation = pMT->HasInstantiation() && !pMT->IsGenericTypeDefinition();
+
+ // We possibly have an instantiated generic type
+ if (fNeedsInstantiation)
+ {
+ pSigBuilder->AppendElementType(ELEMENT_TYPE_GENERICINST);
+ }
+
+ // Beware of enums! Can't use GetInternalCorElementType() here.
+ pSigBuilder->AppendElementType(pMT->IsValueType() ? ELEMENT_TYPE_VALUETYPE : ELEMENT_TYPE_CLASS);
+
+ _ASSERTE(!IsNilToken(token));
+ if (IsNilToken(token))
+ return FALSE;
+
+ if ((index != 0) && (this->pfnTokenDefinition != NULL))
+ {
+ //
+ // We do not want to log the metadata lookups that we perform here
+ //
+ IBCLoggingDisabler disableLogging;
+
+ // During IBC profiling this calls
+ // code:Module::TokenDefinitionHelper
+ (*this->pfnTokenDefinition)(this->context.pModuleContext, pTypeHandleModule, index, &token);
+
+ // ibcExternalType tokens are actually encoded as mdtTypeDef tokens in the signature
+ _ASSERT(TypeFromToken(token) == ibcExternalType);
+ token = TokenFromRid(RidFromToken(token), mdtTypeDef);
+ }
+
+ pSigBuilder->AppendToken(token);
+
+ if (fNeedsInstantiation)
+ {
+ pSigBuilder->AppendData(pMT->GetNumGenericArgs());
+ Instantiation inst = pMT->GetInstantiation();
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ TypeHandle t = inst[i];
+ CONSISTENCY_CHECK(!t.IsNull() && !t.IsEncodedFixup());
+ if (!this->GetSignatureForTypeHandle(t, pSigBuilder))
+ return FALSE;
+ }
+ }
+ return TRUE;
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+//
+// Returns element type when the typeHandle can be encoded using
+// using a single CorElementType value
+// This includes using ELEMENT_TYPE_CANON_ZAPSIG for the System.__Canon type
+//
+/*static */ CorElementType ZapSig::TryEncodeUsingShortcut(/* in */ MethodTable * pMT)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ CorElementType elemType = ELEMENT_TYPE_END; // An illegal value that we check for later
+
+ // Set elemType to a shortcut encoding whenever possible
+ //
+ if (pMT->IsTruePrimitive())
+ elemType = pMT->GetInternalCorElementType();
+ else if (pMT == g_pObjectClass)
+ elemType = ELEMENT_TYPE_OBJECT;
+ else if (pMT == g_pStringClass)
+ elemType = ELEMENT_TYPE_STRING;
+ else if (pMT == g_pCanonMethodTableClass)
+ elemType = (CorElementType) ELEMENT_TYPE_CANON_ZAPSIG;
+ else if (pMT->IsArray())
+ elemType = pMT->GetInternalCorElementType(); // either ELEMENT_TYPE_SZARRAY or ELEMENT_TYPE_ARRAY
+
+ return elemType;
+}
+#ifndef BINDER
+//
+// Compare a metadata signature element with a type handle
+// The type handle must have a fully restored type key, which in turn means that modules for all of its
+// components are loaded (e.g. type arguments to an instantiated type).
+//
+// Hence we can do the signature comparison without incurring any loads or restores.
+//
+/*static*/ BOOL ZapSig::CompareSignatureToTypeHandle(PCCOR_SIGNATURE pSig,
+#ifdef BINDER
+ MdilModule* pModule,
+#else
+ Module* pModule,
+#endif
+ TypeHandle handle,
+ const ZapSig::Context * pZapSigContext)
+{
+ CONTRACT(BOOL)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(CheckPointer(pZapSigContext));
+ PRECONDITION(CheckPointer(pZapSigContext->pModuleContext));
+ PRECONDITION(CheckPointer(pZapSigContext->pInfoModule));
+ PRECONDITION(CheckPointer(handle));
+ PRECONDITION(CheckPointer(pSig));
+ PRECONDITION(!handle.HasUnrestoredTypeKey());
+ }
+ CONTRACT_END
+
+ mdToken tk;
+
+ //
+ // pOrigModule is the original module that contained this ZapSig
+ //
+#ifdef BINDER
+ MdilModule * pOrigModule = pZapSigContext->pInfoModule;
+#else
+ Module * pOrigModule = pZapSigContext->pInfoModule;
+#endif
+ CorElementType sigType = CorSigUncompressElementType(pSig);
+ CorElementType handleType = handle.GetSignatureCorElementType();
+
+ switch (sigType)
+ {
+ default:
+ {
+ // Unknown type!
+ _ASSERTE(!"Unknown type in ZapSig::CompareSignatureToTypeHandle");
+ RETURN(FALSE);
+ }
+
+ case ELEMENT_TYPE_MODULE_ZAPSIG:
+ {
+ DWORD ix = CorSigUncompressData(pSig);
+ CONTRACT_VIOLATION(ThrowsViolation|GCViolation);
+ pModule = pZapSigContext->GetZapSigModule()->GetModuleFromIndexIfLoaded(ix);
+ if (pModule == NULL)
+ RETURN FALSE;
+ else
+ RETURN(CompareSignatureToTypeHandle(pSig, pModule, handle, pZapSigContext));
+ }
+
+ case ELEMENT_TYPE_U:
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_VOID:
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_R4:
+ case ELEMENT_TYPE_R8:
+ case ELEMENT_TYPE_BOOLEAN:
+ case ELEMENT_TYPE_CHAR:
+ case ELEMENT_TYPE_TYPEDBYREF:
+ RETURN(sigType == handleType);
+
+ case ELEMENT_TYPE_STRING:
+ RETURN(handle == TypeHandle(g_pStringClass));
+
+ case ELEMENT_TYPE_OBJECT:
+ RETURN(handle == TypeHandle(g_pObjectClass));
+
+ case ELEMENT_TYPE_CANON_ZAPSIG:
+ RETURN(handle == TypeHandle(g_pCanonMethodTableClass));
+
+ case ELEMENT_TYPE_VAR:
+ case ELEMENT_TYPE_MVAR:
+ {
+ if (sigType != handleType)
+ RETURN(FALSE);
+
+ unsigned varNum = CorSigUncompressData(pSig);
+ RETURN(varNum == (dac_cast<PTR_TypeVarTypeDesc>(handle.AsTypeDesc())->GetIndex()));
+ }
+
+ case ELEMENT_TYPE_VAR_ZAPSIG:
+ {
+ if (!handle.IsGenericVariable())
+ RETURN(FALSE);
+
+ TypeVarTypeDesc *pTypeVarTypeDesc = handle.AsGenericVariable();
+
+ unsigned rid = CorSigUncompressData(pSig);
+ RETURN(TokenFromRid(rid, mdtGenericParam) == pTypeVarTypeDesc->GetToken() && pModule == pTypeVarTypeDesc->GetModule());
+ }
+
+ // These take an additional argument, which is the element type
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_BYREF:
+ {
+ if (sigType != handleType)
+ RETURN(FALSE);
+
+ RETURN (CompareSignatureToTypeHandle(pSig, pModule, handle.GetTypeParam(), pZapSigContext));
+ }
+
+ case ELEMENT_TYPE_NATIVE_ARRAY_TEMPLATE_ZAPSIG:
+ {
+ if (handle.IsTypeDesc() || !handle.AsMethodTable()->IsArray())
+ RETURN(FALSE);
+
+ RETURN (CompareSignatureToTypeHandle(pSig, pModule, handle, pZapSigContext));
+ }
+
+ case ELEMENT_TYPE_NATIVE_VALUETYPE_ZAPSIG:
+ {
+ sigType = CorSigUncompressElementType(pSig);
+ _ASSERTE(sigType == ELEMENT_TYPE_VALUETYPE);
+
+ if (!handle.IsNativeValueType()) RETURN(FALSE);
+ } // fall-through
+
+ case ELEMENT_TYPE_VALUETYPE:
+ case ELEMENT_TYPE_CLASS:
+ {
+ CorSigUncompressToken(pSig, &tk);
+ _ASSERTE(TypeFromToken(tk) == mdtTypeDef);
+ RETURN (sigType == handleType && !handle.HasInstantiation() && pModule == handle.GetModule() && handle.GetCl() == tk);
+ }
+
+ case ELEMENT_TYPE_FNPTR:
+ {
+ if (sigType != handleType)
+ RETURN(FALSE);
+
+ FnPtrTypeDesc *pTD = handle.AsFnPtrType();
+ DWORD callConv = CorSigUncompressData(pSig);
+ if (callConv != pTD->GetCallConv())
+ RETURN(FALSE);
+
+ DWORD numArgs = CorSigUncompressData(pSig);
+ if (numArgs != pTD->GetNumArgs())
+ RETURN(FALSE);
+
+ {
+ CONTRACT_VIOLATION(ThrowsViolation|GCViolation);
+
+ for (DWORD i = 0; i <= numArgs; i++)
+ {
+ SigPointer sp(pSig);
+ if (!CompareSignatureToTypeHandle(pSig, pOrigModule, pTD->GetRetAndArgTypes()[i], pZapSigContext))
+ RETURN(FALSE);
+ if (FAILED(sp.SkipExactlyOne()))
+ {
+ RETURN(FALSE);
+ }
+ pSig = sp.GetPtr();
+ }
+ }
+ break;
+ }
+
+ case ELEMENT_TYPE_GENERICINST:
+ {
+ if (!handle.HasInstantiation())
+ RETURN(FALSE);
+
+ sigType = CorSigUncompressElementType(pSig);
+ if (sigType != handleType)
+ RETURN(FALSE);
+
+ pSig += CorSigUncompressToken(pSig, &tk);
+ _ASSERTE(TypeFromToken(tk) == mdtTypeDef);
+ if (pModule != handle.GetModule() || tk != handle.GetCl())
+ RETURN(FALSE);
+
+ DWORD numGenericArgs = CorSigUncompressData(pSig);
+
+ if (numGenericArgs != handle.GetNumGenericArgs())
+ RETURN(FALSE);
+
+ Instantiation inst = handle.GetInstantiation();
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ SigPointer sp(pSig);
+ if (!CompareSignatureToTypeHandle(pSig, pOrigModule, inst[i], pZapSigContext))
+ RETURN(FALSE);
+ if (FAILED(sp.SkipExactlyOne()))
+ {
+ RETURN(FALSE);
+ }
+ pSig = sp.GetPtr();
+ }
+ break;
+ }
+
+ case ELEMENT_TYPE_ARRAY:
+ {
+ if (sigType != handleType)
+ RETURN(FALSE);
+
+ if (!CompareSignatureToTypeHandle(pSig, pModule, handle.GetTypeParam(), pZapSigContext))
+ RETURN(FALSE);
+ SigPointer sp(pSig);
+ if (FAILED(sp.SkipExactlyOne()))
+ RETURN(FALSE);
+
+ DWORD rank;
+ if (FAILED(sp.GetData(&rank)))
+ RETURN(FALSE);
+
+ if (rank != handle.AsArray()->GetRank())
+ RETURN(FALSE);
+
+ break;
+ }
+ }
+
+ RETURN(TRUE);
+}
+
+/*static*/
+BOOL ZapSig::CompareFixupToTypeHandle(Module * pModule, TADDR fixup, TypeHandle handle)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ PRECONDITION(CORCOMPILE_IS_POINTER_TAGGED(fixup));
+ PRECONDITION(CheckPointer(pModule));
+ }
+ CONTRACTL_END
+
+ Module *pDefiningModule;
+ PCCOR_SIGNATURE pSig = pModule->GetEncodedSigIfLoaded(CORCOMPILE_UNTAG_TOKEN(fixup), &pDefiningModule);
+ if (pDefiningModule == NULL)
+ return FALSE;
+
+ ZapSig::Context zapSigContext(pDefiningModule, pModule);
+ return ZapSig::CompareSignatureToTypeHandle(pSig, pDefiningModule, handle, &zapSigContext);
+}
+
+/*static*/
+BOOL ZapSig::CompareTypeHandleFieldToTypeHandle(TypeHandle *pTypeHnd, TypeHandle typeHnd2)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ FORBID_FAULT;
+ PRECONDITION(CheckPointer(pTypeHnd));
+ PRECONDITION(CheckPointer(typeHnd2));
+ PRECONDITION(!CORCOMPILE_IS_POINTER_TAGGED((SIZE_T) typeHnd2.AsPtr()));
+ }
+ CONTRACTL_END
+
+ // Ensure that the compiler won't fetch the value twice
+ SIZE_T fixup = VolatileLoadWithoutBarrier((SIZE_T *)pTypeHnd);
+
+ if (CORCOMPILE_IS_POINTER_TAGGED(fixup))
+ {
+ Module *pContainingModule = ExecutionManager::FindZapModule(dac_cast<TADDR>(pTypeHnd));
+ CONSISTENCY_CHECK(pContainingModule != NULL);
+
+ Module *pDefiningModule;
+ PCCOR_SIGNATURE pSig = pContainingModule->GetEncodedSigIfLoaded(CORCOMPILE_UNTAG_TOKEN(fixup), &pDefiningModule);
+ if (pDefiningModule == NULL)
+ return FALSE;
+ else
+ {
+ ZapSig::Context zapSigContext(pDefiningModule, pContainingModule);
+ ZapSig::Context * pZapSigContext = &zapSigContext;
+ return CompareSignatureToTypeHandle(pSig, pDefiningModule, typeHnd2, pZapSigContext);
+ }
+ }
+ else
+ return TypeHandle::FromTAddr(fixup) == typeHnd2;
+}
+
+#ifndef DACCESS_COMPILE
+Module *ZapSig::DecodeModuleFromIndexes(Module *fromModule,
+ DWORD assemblyIndex,
+ DWORD moduleIndex)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ Assembly *pAssembly = NULL;
+
+ if (assemblyIndex == 0)
+ {
+ pAssembly = fromModule->GetAssembly();
+ }
+ else
+ {
+ if (assemblyIndex < fromModule->GetAssemblyRefMax())
+ {
+ pAssembly = fromModule->LoadAssembly(GetAppDomain(), RidToToken(assemblyIndex, mdtAssemblyRef))->GetAssembly();
+ }
+ else
+ {
+ assemblyIndex -= fromModule->GetAssemblyRefMax();
+
+ pAssembly = fromModule->GetNativeMetadataAssemblyRefFromCache(assemblyIndex);
+
+ if(pAssembly == NULL)
+ {
+ AssemblySpec spec;
+ spec.InitializeSpec(TokenFromRid(assemblyIndex, mdtAssemblyRef),
+ fromModule->GetNativeAssemblyImport(),
+ NULL,
+ FALSE);
+
+ pAssembly = spec.LoadAssembly(FILE_LOADED);
+
+ fromModule->SetNativeMetadataAssemblyRefInCache(assemblyIndex, pAssembly);
+ }
+ }
+ }
+
+ if (moduleIndex == 0)
+ return pAssembly->GetManifestModule();
+ else
+ return pAssembly->GetManifestModule()->LoadModule(GetAppDomain(), RidToToken(moduleIndex, mdtFile))->GetModule();
+}
+
+Module *ZapSig::DecodeModuleFromIndexesIfLoaded(Module *fromModule,
+ DWORD assemblyIndex,
+ DWORD moduleIndex)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ Assembly *pAssembly = NULL;
+ mdAssemblyRef tkAssemblyRef;
+
+ if (assemblyIndex == 0)
+ pAssembly = fromModule->GetAssembly();
+ else
+ {
+ if (assemblyIndex < fromModule->GetAssemblyRefMax())
+ {
+ tkAssemblyRef = RidToToken(assemblyIndex, mdtAssemblyRef);
+ pAssembly = fromModule->GetAssemblyIfLoaded(tkAssemblyRef);
+ }
+ else
+ {
+ assemblyIndex -= fromModule->GetAssemblyRefMax();
+ tkAssemblyRef = RidToToken(assemblyIndex, mdtAssemblyRef);
+ IMDInternalImport * pMDImportOverride = fromModule->GetNativeAssemblyImport(FALSE);
+ if (pMDImportOverride != NULL)
+ {
+ CHAR szFullName[MAX_CLASS_NAME + 1];
+ LPCSTR szWinRtNamespace = NULL;
+ LPCSTR szWinRtClassName = NULL;
+
+ BOOL fValidAssemblyRef = TRUE;
+ LPCSTR pAssemblyName;
+ DWORD dwFlags;
+ if (FAILED(pMDImportOverride->GetAssemblyRefProps(tkAssemblyRef,
+ NULL,
+ NULL,
+ &pAssemblyName,
+ NULL,
+ NULL,
+ NULL,
+ &dwFlags)))
+ { // Unexpected failure reading MetaData
+ fValidAssemblyRef = FALSE;
+ }
+
+ if (fValidAssemblyRef && IsAfContentType_WindowsRuntime(dwFlags))
+ {
+ // Find the encoded type name
+ LPCSTR pTypeName = NULL;
+ if (pAssemblyName != NULL)
+ pTypeName = strchr(pAssemblyName, '!');
+
+ if (pTypeName != NULL)
+ {
+ pTypeName++;
+ // pTypeName now contains the full type name (namespace + name)
+
+ strcpy_s(szFullName, _countof(szFullName), pTypeName);
+ LPSTR pszName = strrchr(szFullName, '.');
+
+ // WinRT types must have a namespace
+ if (pszName != NULL)
+ {
+ // Replace . between namespace and name with null terminator.
+ // This breaks the string into a namespace and name pair.
+ *pszName = '\0';
+ pszName++;
+
+ szWinRtNamespace = szFullName;
+ szWinRtClassName = pszName;
+ }
+ else
+ { // Namespace '.' separator not found - invalid type name (namespace has to be always present)
+ fValidAssemblyRef = FALSE;
+ }
+ }
+ else
+ { // Type name separator in assembly name '!' not found
+ fValidAssemblyRef = FALSE;
+ }
+ }
+
+ if (fValidAssemblyRef)
+ {
+ pAssembly = fromModule->GetAssemblyIfLoaded(
+ tkAssemblyRef,
+ szWinRtNamespace,
+ szWinRtClassName,
+ pMDImportOverride);
+ }
+ }
+ }
+ }
+
+ if (pAssembly == NULL)
+ return NULL;
+
+ if (moduleIndex == 0)
+ return pAssembly->GetManifestModule();
+ else
+ return pAssembly->GetManifestModule()->GetModuleIfLoaded(RidToToken(moduleIndex, mdtFile), TRUE, TRUE);
+}
+
+
+TypeHandle ZapSig::DecodeType(Module *pEncodeModuleContext,
+ Module *pInfoModule,
+ PCCOR_SIGNATURE pBuffer,
+ ClassLoadLevel level)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SigPointer p(pBuffer);
+
+ ZapSig::Context zapSigContext(pInfoModule, pEncodeModuleContext);
+ ZapSig::Context * pZapSigContext = &zapSigContext;
+
+ SigTypeContext typeContext; // empty context is OK: encoding should not contain type variables.
+
+ TypeHandle th = p.GetTypeHandleThrowing(pInfoModule,
+ &typeContext,
+ ClassLoader::LoadTypes,
+ level,
+ level < CLASS_LOADED, // For non-full loads, drop a level when loading generic arguments
+ NULL,
+ pZapSigContext);
+
+ return th;
+}
+
+MethodDesc *ZapSig::DecodeMethod(Module *pReferencingModule,
+ Module *pInfoModule,
+ PCCOR_SIGNATURE pBuffer,
+ TypeHandle * ppTH /*=NULL*/)
+{
+ STANDARD_VM_CONTRACT;
+
+ MethodDesc *pMethod = NULL;
+
+ SigPointer sig(pBuffer);
+
+ ZapSig::Context zapSigContext(pInfoModule, (void *)pReferencingModule, ZapSig::NormalTokens);
+ ZapSig::Context * pZapSigContext = &zapSigContext;
+
+ SigTypeContext typeContext; // empty context is OK: encoding should not contain type variables.
+
+ // decode flags
+ DWORD methodFlags;
+ IfFailThrow(sig.GetData(&methodFlags));
+
+ TypeHandle thOwner = NULL;
+
+ if ( methodFlags & ENCODE_METHOD_SIG_OwnerType )
+ {
+ thOwner = sig.GetTypeHandleThrowing(pInfoModule,
+ &typeContext,
+ ClassLoader::LoadTypes,
+ CLASS_LOADED,
+ FALSE,
+ NULL,
+ pZapSigContext);
+
+ IfFailThrow(sig.SkipExactlyOne());
+ }
+
+ if ( methodFlags & ENCODE_METHOD_SIG_SlotInsteadOfToken )
+ {
+ // get the method desc using slot number
+ DWORD slot;
+ IfFailThrow(sig.GetData(&slot));
+
+ _ASSERTE(!thOwner.IsNull());
+
+ pMethod = thOwner.GetMethodTable()->GetMethodDescForSlot(slot);
+ }
+ else
+ {
+ //
+ // decode method token
+ //
+ RID rid;
+ IfFailThrow(sig.GetData(&rid));
+
+ if (methodFlags & ENCODE_METHOD_SIG_MemberRefToken)
+ {
+ if (thOwner.IsNull())
+ {
+ TypeHandle th;
+ MethodDesc * pMD = NULL;
+ FieldDesc * pFD = NULL;
+
+ MemberLoader::GetDescFromMemberRef(pInfoModule, TokenFromRid(rid, mdtMemberRef), &pMD, &pFD, NULL, FALSE, &th);
+ _ASSERTE(pMD != NULL);
+
+ thOwner = th;
+ pMethod = pMD;
+ }
+ else
+ {
+ pMethod = MemberLoader::GetMethodDescFromMemberRefAndType(pInfoModule, TokenFromRid(rid, mdtMemberRef), thOwner.GetMethodTable());
+ }
+ }
+ else
+ {
+ pMethod = MemberLoader::GetMethodDescFromMethodDef(pInfoModule, TokenFromRid(rid, mdtMethodDef), FALSE);
+ }
+ }
+
+ if (thOwner.IsNull())
+ thOwner = pMethod->GetMethodTable();
+
+ if (ppTH != NULL)
+ *ppTH = thOwner;
+
+ // Ensure that the methoddescs dependencies have been walked sufficiently for type forwarders to be resolved.
+ // This method is actually basically a nop for dependencies which are ngen'd, but is real work for jitted
+ // dependencies. (However, this shouldn't be meaningful work that wouldn't happen in any case very soon.)
+ pMethod->PrepareForUseAsADependencyOfANativeImage();
+
+ Instantiation inst;
+
+ // Instantiate the method if needed, or create a stub to a static method in a generic class.
+ if (methodFlags & ENCODE_METHOD_SIG_MethodInstantiation)
+ {
+ DWORD nargs;
+ IfFailThrow(sig.GetData(&nargs));
+ _ASSERTE(nargs > 0);
+
+ SIZE_T cbMem;
+
+ if (!ClrSafeInt<SIZE_T>::multiply(nargs, sizeof(TypeHandle), cbMem/* passed by ref */))
+ ThrowHR(COR_E_OVERFLOW);
+
+ TypeHandle * pInst = (TypeHandle*) _alloca(cbMem);
+
+ for (DWORD i = 0; i < nargs; i++)
+ {
+ pInst[i] = sig.GetTypeHandleThrowing(pInfoModule,
+ &typeContext,
+ ClassLoader::LoadTypes,
+ CLASS_LOADED,
+ FALSE,
+ NULL,
+ pZapSigContext);
+ IfFailThrow(sig.SkipExactlyOne());
+ }
+
+ inst = Instantiation(pInst, nargs);
+ }
+ else
+ {
+ inst = pMethod->GetMethodInstantiation();
+ }
+
+
+ // This must be called even if nargs == 0, in order to create an instantiating
+ // stub for static methods in generic classees if needed, also for BoxedEntryPointStubs
+ // in non-generic structs.
+ BOOL isInstantiatingStub = (methodFlags & ENCODE_METHOD_SIG_InstantiatingStub);
+ BOOL isUnboxingStub = (methodFlags & ENCODE_METHOD_SIG_UnboxingStub);
+
+ pMethod = MethodDesc::FindOrCreateAssociatedMethodDesc(pMethod, thOwner.GetMethodTable(),
+ isUnboxingStub,
+ inst,
+ !(isInstantiatingStub || isUnboxingStub));
+
+ g_IBCLogger.LogMethodDescAccess(pMethod);
+
+ if (methodFlags & ENCODE_METHOD_SIG_Constrained)
+ {
+ TypeHandle constrainedType = sig.GetTypeHandleThrowing(pInfoModule,
+ &typeContext,
+ ClassLoader::LoadTypes,
+ CLASS_LOADED,
+ FALSE,
+ NULL,
+ pZapSigContext);
+
+ MethodDesc * directMethod = constrainedType.GetMethodTable()->TryResolveConstraintMethodApprox(thOwner.GetMethodTable(), pMethod);
+ if (directMethod == NULL)
+ {
+ // Method on value type was removed. Boxing stub would need to be generated to handle this case.
+ _ASSERTE(!"Constrained method resolution failed");
+
+ MemberLoader::ThrowMissingMethodException(constrainedType.GetMethodTable(), NULL, NULL, NULL, 0, NULL);
+ }
+
+ pMethod = directMethod;
+ }
+
+ return pMethod;
+}
+
+FieldDesc * ZapSig::DecodeField(Module *pReferencingModule,
+ Module *pInfoModule,
+ PCCOR_SIGNATURE pBuffer,
+ TypeHandle * ppTH /*=NULL*/)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ FieldDesc *pField = NULL;
+
+ SigPointer sig(pBuffer);
+
+ DWORD fieldFlags;
+ IfFailThrow(sig.GetData(&fieldFlags));
+
+ MethodTable *pOwnerMT = NULL;
+
+ if (fieldFlags & ENCODE_FIELD_SIG_OwnerType)
+ {
+ ZapSig::Context zapSigContext(pInfoModule, pReferencingModule);
+ ZapSig::Context * pZapSigContext = &zapSigContext;
+
+ SigTypeContext typeContext; // empty context is OK: encoding should not contain type variables.
+
+ pOwnerMT = sig.GetTypeHandleThrowing(pInfoModule,
+ &typeContext,
+ ClassLoader::LoadTypes,
+ CLASS_LOADED,
+ FALSE,
+ NULL,
+ pZapSigContext).GetMethodTable();
+
+ IfFailThrow(sig.SkipExactlyOne());
+ }
+
+ if (fieldFlags & ENCODE_FIELD_SIG_IndexInsteadOfToken)
+ {
+ // get the field desc using index
+ DWORD fieldIndex;
+ IfFailThrow(sig.GetData(&fieldIndex));
+
+ _ASSERTE(pOwnerMT != NULL);
+
+ pField = pOwnerMT->GetFieldDescByIndex(fieldIndex);
+ _ASSERTE(pOwnerMT == pField->GetApproxEnclosingMethodTable());
+ }
+ else
+ {
+ RID rid;
+ IfFailThrow(sig.GetData(&rid));
+
+ if (fieldFlags & ENCODE_FIELD_SIG_MemberRefToken)
+ {
+ if (pOwnerMT == NULL)
+ {
+ TypeHandle th;
+ MethodDesc * pMD = NULL;
+ FieldDesc * pFD = NULL;
+
+ MemberLoader::GetDescFromMemberRef(pInfoModule, TokenFromRid(rid, mdtMemberRef), &pMD, &pFD, NULL, FALSE, &th);
+ _ASSERTE(pFD != NULL);
+
+ pField = pFD;
+ }
+ else
+ {
+ pField = MemberLoader::GetFieldDescFromMemberRefAndType(pInfoModule, TokenFromRid(rid, mdtMemberRef), pOwnerMT);
+ }
+ }
+ else
+ {
+ pField = MemberLoader::GetFieldDescFromFieldDef(pInfoModule, TokenFromRid(rid, mdtFieldDef), FALSE);
+ }
+ }
+
+ if (ppTH != NULL)
+ *ppTH = (pOwnerMT != NULL) ? pOwnerMT : pField->GetApproxEnclosingMethodTable();
+
+ return pField;
+}
+
+/* static */
+BOOL ZapSig::EncodeMethod(
+ MethodDesc * pMethod,
+ Module * pInfoModule,
+ SigBuilder * pSigBuilder,
+ LPVOID pEncodeModuleContext,
+ ENCODEMODULE_CALLBACK pfnEncodeModule,
+ DEFINETOKEN_CALLBACK pfnDefineToken,
+ CORINFO_RESOLVED_TOKEN * pResolvedToken,
+ CORINFO_RESOLVED_TOKEN * pConstrainedResolvedToken)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ TypeHandle ownerType;
+
+#ifdef FEATURE_READYTORUN_COMPILER
+ if (IsReadyToRunCompilation())
+ {
+ if (pResolvedToken == NULL)
+ {
+ _ASSERTE(!"CORINFO_RESOLVED_TOKEN required to encode method!");
+ ThrowHR(E_FAIL);
+ }
+
+ // Encode the referencing method type
+ ownerType = TypeHandle(pResolvedToken->hClass);
+ }
+ else
+#endif
+ {
+ ownerType = pMethod->GetMethodTable_NoLogging();
+ }
+
+ ZapSig::ExternalTokens externalTokens = ZapSig::NormalTokens;
+ if (pfnDefineToken != NULL)
+ {
+ externalTokens = ZapSig::IbcTokens;
+ }
+
+ ZapSig zapSig(pInfoModule, pEncodeModuleContext, externalTokens,
+ (EncodeModuleCallback) pfnEncodeModule,
+ (TokenDefinitionCallback) pfnDefineToken);
+
+ //
+ // output the sequence that represents the token for the method
+ //
+ mdMethodDef methodToken = pMethod->GetMemberDef_NoLogging();
+ DWORD methodFlags = 0;
+ BOOL fMethodNeedsInstantiation = pMethod->HasMethodInstantiation() && !pMethod->IsGenericMethodDefinition();
+
+ if (pMethod->IsUnboxingStub())
+ methodFlags |= ENCODE_METHOD_SIG_UnboxingStub;
+ if (pMethod->IsInstantiatingStub())
+ methodFlags |= ENCODE_METHOD_SIG_InstantiatingStub;
+ if (fMethodNeedsInstantiation)
+ methodFlags |= ENCODE_METHOD_SIG_MethodInstantiation;
+
+ //
+ // For backward compatibility, IBC tokens use slightly different encoding:
+ // - Owning type is uncoditionally encoded
+ // - Number of method instantiation arguments is not encoded
+ //
+ if (externalTokens == ZapSig::IbcTokens)
+ {
+ // The type is always encoded before flags for IBC
+ if (!zapSig.GetSignatureForTypeHandle(ownerType, pSigBuilder))
+ return FALSE;
+ }
+ else
+ {
+ // Assume that the owner type is going to be needed
+ methodFlags |= ENCODE_METHOD_SIG_OwnerType;
+ }
+
+#ifdef FEATURE_READYTORUN_COMPILER
+ if (IsReadyToRunCompilation())
+ {
+ if (pConstrainedResolvedToken != NULL)
+ {
+ methodFlags |= ENCODE_METHOD_SIG_Constrained;
+ }
+
+ Module * pReferencingModule = (Module *)pResolvedToken->tokenScope;
+
+ if (!pReferencingModule->IsInCurrentVersionBubble())
+ {
+ // FUTURE: Encoding of new cross-module references for ReadyToRun
+ // This warning is hit for recursive cross-module inlining. It is commented out to avoid noise.
+ // GetSvcLogger()->Printf(W("ReadyToRun: Method reference outside of current version bubble cannot be encoded\n"));
+ ThrowHR(E_FAIL);
+ }
+ _ASSERTE(pReferencingModule == GetAppDomain()->ToCompilationDomain()->GetTargetModule());
+
+ methodToken = pResolvedToken->token;
+
+ if (TypeFromToken(methodToken) == mdtMethodSpec)
+ {
+ IfFailThrow(pReferencingModule->GetMDImport()->GetMethodSpecProps(methodToken, &methodToken, NULL, NULL));
+ }
+
+ switch (TypeFromToken(methodToken))
+ {
+ case mdtMethodDef:
+ _ASSERTE(pResolvedToken->pTypeSpec == NULL);
+ methodFlags &= ~ENCODE_METHOD_SIG_OwnerType;
+ break;
+
+ case mdtMemberRef:
+ methodFlags |= ENCODE_METHOD_SIG_MemberRefToken;
+
+ if (pResolvedToken->pTypeSpec == NULL)
+ {
+ methodFlags &= ~ENCODE_METHOD_SIG_OwnerType;
+ }
+ else
+ if (!(methodFlags & ENCODE_METHOD_SIG_InstantiatingStub))
+ {
+ if (SigPointer(pResolvedToken->pTypeSpec, pResolvedToken->cbTypeSpec).IsPolyType(NULL) == hasNoVars)
+ methodFlags &= ~ENCODE_METHOD_SIG_OwnerType;
+ }
+ break;
+
+ default:
+ _ASSERTE(!"Unexpected method token type!");
+ ThrowHR(E_NOTIMPL);
+ }
+ }
+ else
+#endif
+ if (IsNilToken(methodToken))
+ {
+ methodFlags |= ENCODE_METHOD_SIG_SlotInsteadOfToken;
+ }
+ else
+ if (!pMethod->GetModule()->IsInCurrentVersionBubble())
+ {
+ // Using a method defined in another version bubble. We can assume the slot number is stable only for real interface methods.
+ if (!ownerType.IsInterface() || pMethod->IsStatic() || pMethod->HasMethodInstantiation())
+ {
+ // FUTURE TODO: Version resilience
+ _ASSERTE(!"References to non-interface methods not yet supported in version resilient images");
+ IfFailThrow(E_FAIL);
+ }
+ methodFlags |= ENCODE_METHOD_SIG_SlotInsteadOfToken;
+ }
+ else
+ {
+ Module * pTypeHandleModule = pMethod->GetModule();
+
+ if (pTypeHandleModule != pInfoModule)
+ {
+ // During IBC profiling this calls
+ // code:Module.EncodeModuleHelper
+ // During ngen this calls
+ // code:ZapImportTable.EncodeModuleHelper)
+ //
+ DWORD index = (*((EncodeModuleCallback) pfnEncodeModule))(pEncodeModuleContext, pTypeHandleModule);
+
+ if (index == ENCODE_MODULE_FAILED)
+ {
+ return FALSE;
+ }
+
+ // If the method handle's module is different that the pInfoModule
+ // we need to call the TokenDefinitionCallback function
+ // to record the names for the external module tokens
+ //
+ if ((index != 0) && (pfnDefineToken != NULL))
+ {
+ //
+ // We do not want to log the metadata lookups that we perform here
+ //
+ IBCLoggingDisabler disableLogging;
+
+ // During IBC profiling this calls
+ // code:Module::TokenDefinitionHelper()
+ (*((TokenDefinitionCallback) pfnDefineToken))(pEncodeModuleContext, pTypeHandleModule, index, &methodToken);
+ }
+ }
+ else
+ {
+ _ASSERTE(pInfoModule = pMethod->GetModule());
+ }
+
+ if (!ownerType.HasInstantiation())
+ methodFlags &= ~ENCODE_METHOD_SIG_OwnerType;
+ }
+
+ //
+ // output the flags
+ //
+ pSigBuilder->AppendData(methodFlags);
+
+ if (methodFlags & ENCODE_METHOD_SIG_OwnerType)
+ {
+ if (!zapSig.GetSignatureForTypeHandle(ownerType, pSigBuilder))
+ return FALSE;
+ }
+
+ if ((methodFlags & ENCODE_METHOD_SIG_SlotInsteadOfToken) == 0)
+ {
+ // emit the rid
+ pSigBuilder->AppendData(RidFromToken(methodToken));
+ }
+ else
+ {
+ // have no token (e.g. it could be an array), encode slot number
+ pSigBuilder->AppendData(pMethod->GetSlot());
+ }
+
+ if ((methodFlags & ENCODE_METHOD_SIG_MethodInstantiation) != 0)
+ {
+ Instantiation inst = pMethod->GetMethodInstantiation();
+
+ // Number of method instantiation arguments is not encoded in IBC tokens - see comment above
+ if (externalTokens != ZapSig::IbcTokens)
+ pSigBuilder->AppendData(inst.GetNumArgs());
+
+ for (DWORD i = 0; i < inst.GetNumArgs(); i++)
+ {
+ TypeHandle t = inst[i];
+ _ASSERTE(!t.IsNull());
+
+ if (!zapSig.GetSignatureForTypeHandle(t, pSigBuilder))
+ return FALSE;
+ }
+ }
+
+#ifdef FEATURE_READYTORUN_COMPILER
+ if ((methodFlags & ENCODE_METHOD_SIG_Constrained) != 0)
+ {
+ if (!zapSig.GetSignatureForTypeHandle(TypeHandle(pConstrainedResolvedToken->hClass), pSigBuilder))
+ return FALSE;
+ }
+#endif
+
+ return TRUE;
+}
+
+void ZapSig::EncodeField(
+ FieldDesc *pField,
+ Module *pInfoModule,
+ SigBuilder *pSigBuilder,
+ LPVOID pEncodeModuleContext,
+ ENCODEMODULE_CALLBACK pfnEncodeModule,
+ CORINFO_RESOLVED_TOKEN * pResolvedToken)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodTable * pMT;
+
+ mdMethodDef fieldToken = pField->GetMemberDef();
+ DWORD fieldFlags = ENCODE_FIELD_SIG_OwnerType;
+
+#ifdef FEATURE_READYTORUN_COMPILER
+ if (IsReadyToRunCompilation())
+ {
+ if (pResolvedToken == NULL)
+ {
+ _ASSERTE(!"CORINFO_RESOLVED_TOKEN required to encode field!");
+ ThrowHR(E_FAIL);
+ }
+
+ // Encode the referencing field type
+ pMT = (MethodTable *)(pResolvedToken->hClass);
+
+ Module * pReferencingModule = (Module *)pResolvedToken->tokenScope;
+
+ if (!pReferencingModule->IsInCurrentVersionBubble())
+ {
+ // FUTURE: Encoding of new cross-module references for ReadyToRun
+ // This warning is hit for recursive cross-module inlining. It is commented out to avoid noise.
+ // GetSvcLogger()->Printf(W("ReadyToRun: Field reference outside of current version bubble cannot be encoded\n"));
+ ThrowHR(E_FAIL);
+ }
+ _ASSERTE(pReferencingModule == GetAppDomain()->ToCompilationDomain()->GetTargetModule());
+
+ fieldToken = pResolvedToken->token;
+
+ switch (TypeFromToken(fieldToken))
+ {
+ case mdtFieldDef:
+ _ASSERTE(pResolvedToken->pTypeSpec == NULL);
+ fieldFlags &= ~ENCODE_FIELD_SIG_OwnerType;
+ break;
+
+ case mdtMemberRef:
+ fieldFlags |= ENCODE_FIELD_SIG_MemberRefToken;
+
+ if (pResolvedToken->pTypeSpec == NULL)
+ {
+ fieldFlags &= ~ENCODE_METHOD_SIG_OwnerType;
+ }
+ else
+ {
+ if (SigPointer(pResolvedToken->pTypeSpec, pResolvedToken->cbTypeSpec).IsPolyType(NULL) == hasNoVars)
+ fieldFlags &= ~ENCODE_METHOD_SIG_OwnerType;
+ }
+ break;
+
+ default:
+ _ASSERTE(!"Unexpected field token type!");
+ ThrowHR(E_NOTIMPL);
+ }
+ }
+ else
+#endif
+ {
+ pMT = pField->GetApproxEnclosingMethodTable();
+
+ fieldFlags |= ENCODE_FIELD_SIG_IndexInsteadOfToken;
+ }
+
+ //
+ // output the flags
+ //
+ pSigBuilder->AppendData(fieldFlags);
+
+ if (fieldFlags & ENCODE_FIELD_SIG_OwnerType)
+ {
+ ZapSig zapSig(pInfoModule, pEncodeModuleContext, ZapSig::NormalTokens,
+ (EncodeModuleCallback) pfnEncodeModule, NULL);
+
+ //
+ // Write class
+ //
+ BOOL fSuccess;
+ fSuccess = zapSig.GetSignatureForTypeHandle(pMT, pSigBuilder);
+ _ASSERTE(fSuccess);
+ }
+
+ if ((fieldFlags & ENCODE_FIELD_SIG_IndexInsteadOfToken) == 0)
+ {
+ // emit the rid
+ pSigBuilder->AppendData(RidFromToken(fieldToken));
+ }
+ else
+ {
+ //
+ // Write field index
+ //
+
+ DWORD fieldIndex = pMT->GetIndexForFieldDesc(pField);
+ _ASSERTE(fieldIndex < DWORD(pMT->GetNumStaticFields() + pMT->GetNumIntroducedInstanceFields()));
+
+ // have no token (e.g. it could be an array), encode slot number
+ pSigBuilder->AppendData(fieldIndex);
+ }
+}
+
+#endif // DACCESS_COMPILE
+
+#endif // !BINDER
+
+#endif // FEATURE_PREJIT
diff --git a/src/vm/zapsig.h b/src/vm/zapsig.h
new file mode 100644
index 0000000000..68f8f3c505
--- /dev/null
+++ b/src/vm/zapsig.h
@@ -0,0 +1,227 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+// ---------------------------------------------------------------------------
+// zapsig.h
+//
+// Copyright (c) Microsoft 2002
+// ---------------------------------------------------------------------------
+//
+// This module contains helper functions used to encode and manipulate
+// signatures for the zapper (ngen).
+//
+
+// ---------------------------------------------------------------------------
+
+
+#ifndef ZAPSIG_H
+#define ZAPSIG_H
+
+#include "common.h"
+
+#ifdef BINDER
+#include "typehandle.h"
+#endif
+
+//define function pointer type: EncodeModuleCallback
+//
+#ifdef BINDER
+typedef DWORD (*EncodeModuleCallback)(void* pModuleContext, MdilModule *pReferencedModule);
+#else
+typedef DWORD (*EncodeModuleCallback)(void* pModuleContext, Module *pReferencedModule);
+#endif
+enum {
+ // return value when EncodeModule fails
+ ENCODE_MODULE_FAILED = 0xffffffff,
+};
+
+//define function pointer type: TokenDefinitionCallback
+//
+#ifdef BINDER
+typedef void (*TokenDefinitionCallback)(void* pModuleContext, MdilModule *pReferencedModule, DWORD index, mdToken* refToken);
+#else
+typedef void (*TokenDefinitionCallback)(void* pModuleContext, Module *pReferencedModule, DWORD index, mdToken* refToken);
+#endif
+
+class ZapSig
+{
+public:
+ enum ExternalTokens
+ {
+ IllegalValue,
+ NormalTokens,
+ IbcTokens
+ };
+
+ struct Context
+ {
+#ifdef BINDER
+ MdilModule * pInfoModule; // The tokens in this ZapSig are expressed relative to context.pInfoModule
+#else
+ Module * pInfoModule; // The tokens in this ZapSig are expressed relative to context.pInfoModule
+#endif
+ void * pModuleContext; // This is a code:Module* when we are resolving Ngen fixups or doing an Ibc Profiling run
+ // and is a code:ZapImportTable* when we are running ngen
+ ExternalTokens externalTokens; // When we see a ELEMENT_TYPE_MODULE_ZAPSIG this tells us what type of token follows.
+
+ Module * GetZapSigModule() const { return (Module*) pModuleContext; }
+
+ Context(
+#ifdef BINDER
+ MdilModule* _pInfoModule,
+#else
+ Module* _pInfoModule,
+#endif
+ void* _pModuleContext, ExternalTokens _externalTokens)
+ : pInfoModule(_pInfoModule),
+ pModuleContext(_pModuleContext),
+ externalTokens(_externalTokens)
+ { LIMITED_METHOD_CONTRACT; _ASSERTE(externalTokens != IllegalValue); }
+
+ Context(
+#ifdef BINDER
+ MdilModule* _pInfoModule,
+#else
+ Module* _pInfoModule,
+#endif
+ Module* _pZapSigModule)
+ : pInfoModule(_pInfoModule),
+ pModuleContext((void*) _pZapSigModule),
+ externalTokens(NormalTokens)
+ { }
+ };
+
+public:
+
+ ZapSig(
+#ifdef BINDER
+ MdilModule * _pInfoModule,
+#else
+ Module * _pInfoModule,
+#endif
+ void * _pModuleContext,
+ ExternalTokens _externalTokens,
+ EncodeModuleCallback _pfnEncodeModule,
+ TokenDefinitionCallback _pfnTokenDefinition)
+
+ : context(_pInfoModule, _pModuleContext, _externalTokens),
+ pfnEncodeModule(_pfnEncodeModule),
+ pfnTokenDefinition(_pfnTokenDefinition)
+ {}
+
+#ifdef FEATURE_PREJIT
+
+ // Instance methods
+
+ // Create a signature for a typeHandle
+ // It can be decoded using MetaSig::GetTypeHandleThrowing
+ // The tokens are espressed relative to this->pInfoModule
+ // When (handle.GetModule() != this->pInfoModule), we escape
+ // the signature with ELEMENT_TYPE_MODULE_ZAPSIG <id-num>
+ // followed by a <token> to encode a temporary change of module
+ // For Ibc Signatures the <token> is one of the ibc defined tokens
+ // For Ngen Fixup signatures the <token> is for the external module
+ //
+ BOOL GetSignatureForTypeHandle(TypeHandle typeHandle,
+ SigBuilder * pSigBuilder);
+
+ // Static methods
+
+ // Compare a type handle with a signature whose tokens are resolved with respect to pModule
+ // pZapSigContext is used to resolve ELEMENT_TYPE_MODULE_ZAPSIG encodings
+ static BOOL CompareSignatureToTypeHandle(PCCOR_SIGNATURE pSig,
+#ifdef BINDER
+ MdilModule* pModule,
+#else
+ Module* pModule,
+#endif
+ TypeHandle handle,
+ const ZapSig::Context * pZapSigContext);
+
+ // Compare a type handle with a tagged pointer. Ensure that the common path is inlined into the caller.
+ static FORCEINLINE BOOL CompareTaggedPointerToTypeHandle(Module * pModule, TADDR addr, TypeHandle handle)
+ {
+ WRAPPER_NO_CONTRACT;
+ if (handle.AsTAddr() == addr)
+ return TRUE;
+ if (!CORCOMPILE_IS_POINTER_TAGGED(addr))
+ return FALSE;
+ return CompareFixupToTypeHandle(pModule, addr, handle);
+ }
+
+ static BOOL CompareFixupToTypeHandle(Module * pModule, TADDR fixup, TypeHandle handle);
+
+ static BOOL CompareTypeHandleFieldToTypeHandle(TypeHandle *pTypeHnd, TypeHandle typeHnd2);
+
+private:
+ BOOL GetSignatureForTypeDesc(TypeDesc * desc, SigBuilder * pSigBuilder);
+
+ // Returns element type when the typeHandle can be encoded using
+ // using a single CorElementType value
+ // This includes using ELEMENT_TYPE_CANON_ZAPSIG for the System.__Canon type
+ //
+ static CorElementType TryEncodeUsingShortcut(/* in */ MethodTable * pMT);
+
+#endif // FEATURE_PREJIT
+
+private:
+
+ ZapSig::Context context;
+
+ EncodeModuleCallback pfnEncodeModule; // Function Pointer to the EncodeModuleHelper
+ TokenDefinitionCallback pfnTokenDefinition; // Function Pointer to the DefineTokenHelper
+
+public:
+ //--------------------------------------------------------------------
+ // Static helper encode/decode helper methods
+
+ static Module *DecodeModuleFromIndexes(Module *fromModule,
+ DWORD assemblyIndex,
+ DWORD moduleIndex);
+
+ static Module *DecodeModuleFromIndexesIfLoaded(Module *fromModule,
+ DWORD assemblyIndex,
+ DWORD moduleIndex);
+
+ // referencingModule is the module that references the type.
+ // fromModule is the module in which the type is defined.
+ // pBuffer contains the signature encoding for the type.
+ // level is the class load level (see classloadlevel.h) to which the type should be loaded
+ static TypeHandle DecodeType(Module *referencingModule,
+ Module *fromModule,
+ PCCOR_SIGNATURE pBuffer,
+ ClassLoadLevel level = CLASS_LOADED);
+
+ static MethodDesc *DecodeMethod(Module *referencingModule,
+ Module *fromModule,
+ PCCOR_SIGNATURE pBuffer,
+ TypeHandle * ppTH = NULL);
+
+ static FieldDesc *DecodeField(Module *referencingModule,
+ Module *fromModule,
+ PCCOR_SIGNATURE pBuffer,
+ TypeHandle * ppTH = NULL);
+
+ static BOOL EncodeMethod(
+ MethodDesc *pMethod,
+ Module *pInfoModule,
+ SigBuilder *pSigBuilder,
+ LPVOID pReferencingModule,
+ ENCODEMODULE_CALLBACK pfnEncodeModule,
+ DEFINETOKEN_CALLBACK pfnDefineToken,
+ CORINFO_RESOLVED_TOKEN * pResolvedToken = NULL,
+ CORINFO_RESOLVED_TOKEN * pConstrainedResolvedToken = NULL);
+
+ static void EncodeField(
+ FieldDesc *pField,
+ Module *pInfoModule,
+ SigBuilder *pSigBuilder,
+ LPVOID pReferencingModule,
+ ENCODEMODULE_CALLBACK pfnEncodeModule,
+ CORINFO_RESOLVED_TOKEN * pResolvedToken = NULL);
+
+};
+
+#endif // ZAPGSIG_H